commit 1739a20efc4acb55fd1dc53dcc66057b70c2613c Author: Andy Ritger Date: Mon May 9 13:18:59 2022 -0700 515.43.04 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..be30c9c30 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,26 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**NVIDIA Driver Version** +Please write the version of the NVIDIA driver you are using. + +**GPU** +Please write the particular model of NVIDIA GPU you are using. + +**Describe the bug** +Please write a clear and concise description of what the bug is. + +**To Reproduce** +Please write the steps to reproduce the behavior. + +**Expected behavior** +Please write a clear and concise description of what you expected to happen. + +**Please reproduce the problem, run nvidia-bug-report.sh, and attach the resulting nvidia-bug-report.log.gz.** + diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..75434ed8d --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +*.o +*.o_binary +*.o.cmd +*.o.d +_out/ diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..643ae6dc9 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,141 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contribute to a positive environment for our +community include: + +* Using welcoming and inclusive language +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when +an individual is representing the project or its community. Examples of representing +our community include using an official e-mail address, posting via an official +social media account, or acting as an appointed representative at an online or +offline event. Representation of a project may be further defined and clarified +by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders and moderators responsible for enforcement at +GitHub_Conduct@nvidia.com. +All complaints will be reviewed and investigated and will result in a response +that is deemed necessary and appropriate to the circumstances. Leaders and moderators +are obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Moderators who do not follow or enforce the Code of Conduct in good faith +may face temporary or permanent repercussions as determined by other members of the +community’s leadership. + +## Enforcement Guidelines + +Community leaders and moderators will follow these Community Impact Guidelines +in determining the consequences for any action they deem in violation of this +Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community moderators, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating an egregious single violation, or a pattern of +violation of community standards, including sustained inappropriate behavior, +harassment of an individual, or aggression toward or disparagement of classes of +individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/COPYING b/COPYING new file mode 100644 index 000000000..84a3c3289 --- /dev/null +++ b/COPYING @@ -0,0 +1,369 @@ + +Except where noted otherwise, the individual files within this package are +licensed as MIT: + + Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +However, when linked together to form a Linux kernel module, the resulting Linux +kernel module is dual licensed as MIT/GPLv2. + + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. + diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..0ad5aa90a --- /dev/null +++ b/Makefile @@ -0,0 +1,76 @@ +########################################################################### +# This is the top level makefile for the NVIDIA Linux kernel module source +# package. +# +# To build: run `make modules` +# To install the build kernel modules: run (as root) `make modules_install` +########################################################################### + +include utils.mk + +all: modules + +nv_kernel_o = src/nvidia/$(OUTPUTDIR)/nv-kernel.o +nv_kernel_o_binary = kernel-open/nvidia/nv-kernel.o_binary + +nv_modeset_kernel_o = src/nvidia-modeset/$(OUTPUTDIR)/nv-modeset-kernel.o +nv_modeset_kernel_o_binary = kernel-open/nvidia-modeset/nv-modeset-kernel.o_binary + +.PHONY: $(nv_kernel_o) $(nv_modeset_kernel_o) modules modules_install + + +########################################################################### +# nv-kernel.o is the OS agnostic portion of nvidia.ko +########################################################################### + +$(nv_kernel_o): + $(MAKE) -C src/nvidia + +$(nv_kernel_o_binary): $(nv_kernel_o) + cd $(dir $@) && ln -sf ../../$^ $(notdir $@) + + +########################################################################### +# nv-modeset-kernel.o is the OS agnostic portion of nvidia-modeset.ko +########################################################################### + +$(nv_modeset_kernel_o): + $(MAKE) -C src/nvidia-modeset + +$(nv_modeset_kernel_o_binary): $(nv_modeset_kernel_o) + cd $(dir $@) && ln -sf ../../$^ $(notdir $@) + + +########################################################################### +# After the OS agnostic portions are built, descend into kernel-open/ and build +# the kernel modules with kbuild. +########################################################################### + +modules: $(nv_kernel_o_binary) $(nv_modeset_kernel_o_binary) + $(MAKE) -C kernel-open modules + + +########################################################################### +# Install the built kernel modules using kbuild. +########################################################################### + +modules_install: + $(MAKE) -C kernel-open modules_install + + +########################################################################### +# clean +########################################################################### + +.PHONY: clean nvidia.clean nvidia-modeset.clean kernel-open.clean + +clean: nvidia.clean nvidia-modeset.clean kernel-open.clean + +nvidia.clean: + $(MAKE) -C src/nvidia clean + +nvidia-modeset.clean: + $(MAKE) -C src/nvidia-modeset clean + +kernel-open.clean: + $(MAKE) -C kernel-open clean diff --git a/README.md b/README.md new file mode 100644 index 000000000..492369774 --- /dev/null +++ b/README.md @@ -0,0 +1,164 @@ +# NVIDIA Linux Open GPU Kernel Module Source + +This is the source release of the NVIDIA Linux open GPU kernel modules, +version 515.43.04. + + +## How to Build + +To build: + + make modules -j`nproc` + +To install, first uninstall any existing NVIDIA kernel modules. Then, +as root: + + make modules_install -j`nproc` + +Note that the kernel modules built here must be used with gsp.bin +firmware and user-space NVIDIA GPU driver components from a corresponding +515.43.04 driver release. This can be achieved by installing +the NVIDIA GPU driver from the .run file using the `--no-kernel-modules` +option. E.g., + + sh ./NVIDIA-Linux-[...].run --no-kernel-modules + + +## Supported Target CPU Architectures + +Currently, the kernel modules can be built for x86_64 or aarch64. +If cross-compiling, set these variables on the make command line: + + TARGET_ARCH=aarch64|x86_64 + CC + LD + AR + CXX + OBJCOPY + +E.g., + + # compile on x86_64 for aarch64 + make modules -j`nproc` \ + TARGET_ARCH=aarch64 \ + CC=aarch64-linux-gnu-gcc \ + LD=aarch64-linux-gnu-ld \ + AR=aarch64-linux-gnu-ar \ + CXX=aarch64-linux-gnu-g++ \ + OBJCOPY=aarch64-linux-gnu-objcopy + + +## Other Build Knobs + +NV_VERBOSE - Set this to "1" to print each complete command executed; + otherwise, a succinct "CC" line is printed. + +DEBUG - Set this to "1" to build the kernel modules as debug. By default, the + build compiles without debugging information. This also enables + various debug log messages in the kernel modules. + +These variables can be set on the make command line. E.g., + + make modules -j`nproc` NV_VERBOSE=1 + + +## Supported Toolchains + +Any reasonably modern version of gcc or clang can be used to build the +kernel modules. Note that the kernel interface layers of the kernel +modules must be built with the toolchain that was used to build the +kernel. + + +## Supported Linux Kernel Versions + +The NVIDIA open kernel modules support the same range of Linux kernel +versions that are supported with the proprietary NVIDIA kernel modules. +This is currently Linux kernel 3.10 or newer. + + +## How to Contribute + +Contributions can be made by creating a pull request on +https://github.com/NVIDIA/open-gpu-kernel-modules +We'll respond via github. + +Note that when submitting a pull request, you will be prompted to accept +a Contributor License Agreement. + +This code base is shared with NVIDIA's proprietary drivers, and various +processing is performed on the shared code to produce the source code that is +published here. This has several implications for the foreseeable future: + +* The github repository will function mostly as a snapshot of each driver + release. + +* We do not expect to be able to provide revision history for individual + changes that were made to NVIDIA's shared code base. There will likely + only be one git commit per driver release. + +* We may not be able to reflect individual contributions as separate + git commits in the github repository. + +* Because the code undergoes various processing prior to publishing here, + contributions made here require manual merging to be applied to the shared + code base. Therefore, large refactoring changes made here may be difficult to + merge and accept back into the shared code base. If you have large + refactoring to suggest, please contact us in advance, so we can coordinate. + + +## How to Report Issues + +Problems specific to the Open GPU Kernel Modules can be reported in the +Issues section of the https://github.com/NVIDIA/open-gpu-kernel-modules +repository. + +Further, any of the existing bug reporting venues can be used to communicate +problems to NVIDIA, such as our forum: + +https://forums.developer.nvidia.com/c/gpu-graphics/linux/148 + +or linux-bugs@nvidia.com. + +Please see the 'NVIDIA Contact Info and Additional Resources' section +of the NVIDIA GPU Driver README for details. + +Please see the separate [SECURITY.md](SECURITY.md) document if you +believe you have discovered a security vulnerability in this software. + + +## Kernel Interface and OS-Agnostic Components of Kernel Modules + +Most of NVIDIA's kernel modules are split into two components: + +* An "OS-agnostic" component: this is the component of each kernel module + that is independent of operating system. + +* A "kernel interface layer": this is the component of each kernel module + that is specific to the Linux kernel version and configuration. + +When packaged in the NVIDIA .run installation package, the OS-agnostic +component is provided as a binary: it is large and time-consuming to +compile, so pre-built versions are provided so that the user does +not have to compile it during every driver installation. For the +nvidia.ko kernel module, this component is named "nv-kernel.o_binary". +For the nvidia-modeset.ko kernel module, this component is named +"nv-modeset-kernel.o_binary". Neither nvidia-drm.ko nor nvidia-uvm.ko +have OS-agnostic components. + +The kernel interface layer component for each kernel module must be built +for the target kernel. + + +## Directory Structure Layout + +- `kernel-open/` The kernel interface layer +- `kernel-open/nvidia/` The kernel interface layer for nvidia.ko +- `kernel-open/nvidia-drm/` The kernel interface layer for nvidia-drm.ko +- `kernel-open/nvidia-modeset/` The kernel interface layer for nvidia-modeset.ko +- `kernel-open/nvidia-uvm/` The kernel interface layer for nvidia-uvm.ko + +- `src/` The OS-agnostic code +- `src/nvidia/` The OS-agnostic code for nvidia.ko +- `src/nvidia-modeset/` The OS-agnostic code for nvidia-modeset.ko +- `src/common/` Utility code used by one or more of nvidia.ko and nvidia-modeset.ko diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..9926a4c09 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,16 @@ +# Report a Security Vulnerability + +To report a potential security vulnerability in any NVIDIA product, please use either: +* this web form: [Security Vulnerability Submission Form](https://www.nvidia.com/object/submit-security-vulnerability.html), or +* send email to: [NVIDIA PSIRT](mailto:psirt@nvidia.com) + +**OEM Partners should contact their NVIDIA Customer Program Manager** + +If reporting a potential vulnerability via email, please encrypt it using NVIDIA’s public PGP key ([see PGP Key page](https://www.nvidia.com/en-us/security/pgp-key/)) and include the following information: +* Product/Driver name and version/branch that contains the vulnerability +* Type of vulnerability (code execution, denial of service, buffer overflow, etc.) +* Instructions to reproduce the vulnerability +* Proof-of-concept or exploit code +* Potential impact of the vulnerability, including how an attacker could exploit the vulnerability + +See https://www.nvidia.com/en-us/security/ for past NVIDIA Security Bulletins and Notices. diff --git a/kernel-open/.gitignore b/kernel-open/.gitignore new file mode 100644 index 000000000..ab5508a8c --- /dev/null +++ b/kernel-open/.gitignore @@ -0,0 +1,9 @@ +.*.cmd +*.ko +*.mod +*.mod.c +conftest/ +conftest[0-9]*.c +modules.order +Module.symvers +nv_compiler.h diff --git a/kernel-open/Kbuild b/kernel-open/Kbuild new file mode 100644 index 000000000..032dd9c02 --- /dev/null +++ b/kernel-open/Kbuild @@ -0,0 +1,245 @@ +########################################################################### +# Kbuild file for NVIDIA Linux GPU driver kernel modules +########################################################################### + +# +# The parent makefile is expected to define: +# +# NV_KERNEL_SOURCES : The root of the kernel source tree. +# NV_KERNEL_OUTPUT : The kernel's output tree. +# NV_KERNEL_MODULES : A whitespace-separated list of modules to build. +# ARCH : The target CPU architecture: x86_64|arm64|powerpc +# +# Kbuild provides the variables: +# +# $(src) : The directory containing this Kbuild file. +# $(obj) : The directory where the output from this build is written. +# + +NV_BUILD_TYPE ?= release + +# +# Utility macro ASSIGN_PER_OBJ_CFLAGS: to control CFLAGS on a +# per-object basis, Kbuild honors the 'CFLAGS_$(object)' variable. +# E.g., "CFLAGS_nv.o" for CFLAGS that are specific to nv.o. Use this +# macro to assign 'CFLAGS_$(object)' variables for multiple object +# files. +# +# $(1): The object files. +# $(2): The CFLAGS to add for those object files. +# +# With kernel git commit 54b8ae66ae1a3454a7645d159a482c31cd89ab33, the +# handling of object-specific CFLAGs, CFLAGS_$(object) has changed. Prior to +# this commit, the CFLAGS_$(object) variable was required to be defined with +# only the the object name (). With the aforementioned git +# commit, it is now required to give Kbuild relative paths along-with the +# object name (CFLAGS_/somefile.o>). As a result, CFLAGS_$(object) +# is set twice, once with a relative path to the object files and once with +# just the object files. +# +ASSIGN_PER_OBJ_CFLAGS = \ + $(foreach _cflags_variable, \ + $(notdir $(1)) $(1), \ + $(eval $(addprefix CFLAGS_,$(_cflags_variable)) += $(2))) + + +# +# Include the specifics of the individual NVIDIA kernel modules. +# +# Each of these should: +# - Append to 'obj-m', to indicate the kernel module that should be built. +# - Define the object files that should get built to produce the kernel module. +# - Tie into conftest (see the description below). +# + +NV_UNDEF_BEHAVIOR_SANITIZER ?= +ifeq ($(NV_UNDEF_BEHAVIOR_SANITIZER),1) + UBSAN_SANITIZE := y +endif + +$(foreach _module, $(NV_KERNEL_MODULES), \ + $(eval include $(src)/$(_module)/$(_module).Kbuild)) + + +# +# Define CFLAGS that apply to all the NVIDIA kernel modules. EXTRA_CFLAGS +# is deprecated since 2.6.24 in favor of ccflags-y, but we need to support +# older kernels which do not have ccflags-y. Newer kernels append +# $(EXTRA_CFLAGS) to ccflags-y for compatibility. +# + +EXTRA_CFLAGS += -I$(src)/common/inc +EXTRA_CFLAGS += -I$(src) +EXTRA_CFLAGS += -Wall -MD $(DEFINES) $(INCLUDES) -Wno-cast-qual -Wno-error -Wno-format-extra-args +EXTRA_CFLAGS += -D__KERNEL__ -DMODULE -DNVRM +EXTRA_CFLAGS += -DNV_VERSION_STRING=\"515.43.04\" + +EXTRA_CFLAGS += -Wno-unused-function + +ifneq ($(NV_BUILD_TYPE),debug) + EXTRA_CFLAGS += -Wuninitialized +endif + +EXTRA_CFLAGS += -fno-strict-aliasing + +ifeq ($(ARCH),arm64) + EXTRA_CFLAGS += -mstrict-align +endif + +ifeq ($(NV_BUILD_TYPE),debug) + EXTRA_CFLAGS += -g -gsplit-dwarf +endif + +EXTRA_CFLAGS += -ffreestanding + +ifeq ($(ARCH),arm64) + EXTRA_CFLAGS += -mgeneral-regs-only -march=armv8-a +endif + +ifeq ($(ARCH),x86_64) + EXTRA_CFLAGS += -mno-red-zone -mcmodel=kernel +endif + +ifeq ($(ARCH),powerpc) + EXTRA_CFLAGS += -mlittle-endian -mno-strict-align -mno-altivec +endif + +EXTRA_CFLAGS += -DNV_UVM_ENABLE +EXTRA_CFLAGS += $(call cc-option,-Werror=undef,) +EXTRA_CFLAGS += -DNV_SPECTRE_V2=$(NV_SPECTRE_V2) +EXTRA_CFLAGS += -DNV_KERNEL_INTERFACE_LAYER + +# +# Detect SGI UV systems and apply system-specific optimizations. +# + +ifneq ($(wildcard /proc/sgi_uv),) + EXTRA_CFLAGS += -DNV_CONFIG_X86_UV +endif + + +# +# The conftest.sh script tests various aspects of the target kernel. +# The per-module Kbuild files included above should: +# +# - Append to the NV_CONFTEST_*_COMPILE_TESTS variables to indicate +# which conftests they require. +# - Append to the NV_OBJECTS_DEPEND_ON_CONFTEST variable any object files +# that depend on conftest. +# +# The conftest machinery below will run the requested tests and +# generate the appropriate header files. +# + +CC ?= cc +LD ?= ld + +NV_CONFTEST_SCRIPT := $(src)/conftest.sh +NV_CONFTEST_HEADER := $(obj)/conftest/headers.h + +NV_CONFTEST_CMD := /bin/sh $(NV_CONFTEST_SCRIPT) \ + "$(CC)" $(ARCH) $(NV_KERNEL_SOURCES) $(NV_KERNEL_OUTPUT) + +NV_CFLAGS_FROM_CONFTEST := $(shell $(NV_CONFTEST_CMD) build_cflags) + +NV_CONFTEST_CFLAGS = $(NV_CFLAGS_FROM_CONFTEST) $(EXTRA_CFLAGS) -fno-pie + +NV_CONFTEST_COMPILE_TEST_HEADERS := $(obj)/conftest/macros.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/functions.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/symbols.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/types.h +NV_CONFTEST_COMPILE_TEST_HEADERS += $(obj)/conftest/generic.h + +NV_CONFTEST_HEADERS := $(obj)/conftest/patches.h +NV_CONFTEST_HEADERS += $(obj)/conftest/headers.h +NV_CONFTEST_HEADERS += $(NV_CONFTEST_COMPILE_TEST_HEADERS) + + +# +# Generate a header file for a single conftest compile test. Each compile test +# header depends on conftest.sh, as well as the generated conftest/headers.h +# file, which is included in the compile test preamble. +# + +$(obj)/conftest/compile-tests/%.h: $(NV_CONFTEST_SCRIPT) $(NV_CONFTEST_HEADER) + @mkdir -p $(obj)/conftest/compile-tests + @echo " CONFTEST: $(notdir $*)" + @$(NV_CONFTEST_CMD) compile_tests '$(NV_CONFTEST_CFLAGS)' \ + $(notdir $*) > $@ + +# +# Concatenate a conftest/*.h header from its constituent compile test headers +# +# $(1): The name of the concatenated header +# $(2): The list of compile tests that make up the header +# + +define NV_GENERATE_COMPILE_TEST_HEADER + $(obj)/conftest/$(1).h: $(addprefix $(obj)/conftest/compile-tests/,$(addsuffix .h,$(2))) + @mkdir -p $(obj)/conftest + @# concatenate /dev/null to prevent cat from hanging when $$^ is empty + @cat $$^ /dev/null > $$@ +endef + +# +# Generate the conftest compile test headers from the lists of compile tests +# provided by the module-specific Kbuild files. +# + +NV_CONFTEST_FUNCTION_COMPILE_TESTS ?= +NV_CONFTEST_GENERIC_COMPILE_TESTS ?= +NV_CONFTEST_MACRO_COMPILE_TESTS ?= +NV_CONFTEST_SYMBOL_COMPILE_TESTS ?= +NV_CONFTEST_TYPE_COMPILE_TESTS ?= + +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,functions,$(NV_CONFTEST_FUNCTION_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,generic,$(NV_CONFTEST_GENERIC_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,macros,$(NV_CONFTEST_MACRO_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,symbols,$(NV_CONFTEST_SYMBOL_COMPILE_TESTS))) +$(eval $(call NV_GENERATE_COMPILE_TEST_HEADER,types,$(NV_CONFTEST_TYPE_COMPILE_TESTS))) + +$(obj)/conftest/patches.h: $(NV_CONFTEST_SCRIPT) + @mkdir -p $(obj)/conftest + @$(NV_CONFTEST_CMD) patch_check > $@ + +$(obj)/conftest/headers.h: $(NV_CONFTEST_SCRIPT) + @mkdir -p $(obj)/conftest + @$(NV_CONFTEST_CMD) test_kernel_headers '$(NV_CONFTEST_CFLAGS)' > $@ + +clean-dirs := $(obj)/conftest + + +# For any object files that depend on conftest, declare the dependency here. +$(addprefix $(obj)/,$(NV_OBJECTS_DEPEND_ON_CONFTEST)): | $(NV_CONFTEST_HEADERS) + +# Sanity checks of the build environment and target system/kernel + +BUILD_SANITY_CHECKS = \ + cc_sanity_check \ + cc_version_check \ + dom0_sanity_check \ + xen_sanity_check \ + preempt_rt_sanity_check \ + vgpu_kvm_sanity_check \ + module_symvers_sanity_check + +.PHONY: $(BUILD_SANITY_CHECKS) + +$(BUILD_SANITY_CHECKS): + @$(NV_CONFTEST_CMD) $@ full_output + +# Perform all sanity checks before generating the conftest headers + +$(NV_CONFTEST_HEADERS): | $(BUILD_SANITY_CHECKS) + +# Make the conftest headers depend on the kernel version string + +$(obj)/conftest/uts_release: NV_GENERATE_UTS_RELEASE + @mkdir -p $(dir $@) + @NV_UTS_RELEASE="// Kernel version: `$(NV_CONFTEST_CMD) compile_tests '$(NV_CONFTEST_CFLAGS)' uts_release`"; \ + if ! [ -f "$@" ] || [ "$$NV_UTS_RELEASE" != "`cat $@`" ]; \ + then echo "$$NV_UTS_RELEASE" > $@; fi + +.PHONY: NV_GENERATE_UTS_RELEASE + +$(NV_CONFTEST_HEADERS): $(obj)/conftest/uts_release diff --git a/kernel-open/Makefile b/kernel-open/Makefile new file mode 100644 index 000000000..e0ed5e955 --- /dev/null +++ b/kernel-open/Makefile @@ -0,0 +1,126 @@ +# +# This Makefile was automatically generated; do not edit. +# + +########################################################################### +# Makefile for NVIDIA Linux GPU driver kernel modules +########################################################################### + +# This makefile is read twice: when a user or nvidia-installer invokes +# 'make', this file is read. It then invokes the Linux kernel's +# Kbuild. Modern versions of Kbuild will then read the Kbuild file in +# this directory. However, old versions of Kbuild will instead read +# this Makefile. For backwards compatibility, when read by Kbuild +# (recognized by KERNELRELEASE not being empty), do nothing but +# include the Kbuild file in this directory. + +ifneq ($(KERNELRELEASE),) + include $(src)/Kbuild +else + + # Determine the location of the Linux kernel source tree, and of the + # kernel's output tree. Use this to invoke Kbuild, and pass the paths + # to the source and output trees to NVIDIA's Kbuild file via + # NV_KERNEL_{SOURCES,OUTPUT}. + + ifdef SYSSRC + KERNEL_SOURCES := $(SYSSRC) + else + KERNEL_UNAME ?= $(shell uname -r) + KERNEL_MODLIB := /lib/modules/$(KERNEL_UNAME) + KERNEL_SOURCES := $(shell test -d $(KERNEL_MODLIB)/source && echo $(KERNEL_MODLIB)/source || echo $(KERNEL_MODLIB)/build) + endif + + KERNEL_OUTPUT := $(KERNEL_SOURCES) + KBUILD_PARAMS := + + ifdef SYSOUT + ifneq ($(SYSOUT), $(KERNEL_SOURCES)) + KERNEL_OUTPUT := $(SYSOUT) + KBUILD_PARAMS := KBUILD_OUTPUT=$(KERNEL_OUTPUT) + endif + else + KERNEL_UNAME ?= $(shell uname -r) + KERNEL_MODLIB := /lib/modules/$(KERNEL_UNAME) + ifeq ($(KERNEL_SOURCES), $(KERNEL_MODLIB)/source) + KERNEL_OUTPUT := $(KERNEL_MODLIB)/build + KBUILD_PARAMS := KBUILD_OUTPUT=$(KERNEL_OUTPUT) + endif + endif + + CC ?= cc + LD ?= ld + OBJDUMP ?= objdump + + ifndef ARCH + ARCH := $(shell uname -m | sed -e 's/i.86/i386/' \ + -e 's/armv[0-7]\w\+/arm/' \ + -e 's/aarch64/arm64/' \ + -e 's/ppc64le/powerpc/' \ + ) + endif + + NV_KERNEL_MODULES ?= $(wildcard nvidia nvidia-uvm nvidia-vgpu-vfio nvidia-modeset nvidia-drm nvidia-peermem) + NV_KERNEL_MODULES := $(filter-out $(NV_EXCLUDE_KERNEL_MODULES), \ + $(NV_KERNEL_MODULES)) + NV_VERBOSE ?= + SPECTRE_V2_RETPOLINE ?= 0 + + ifeq ($(NV_VERBOSE),1) + KBUILD_PARAMS += V=1 + endif + KBUILD_PARAMS += -C $(KERNEL_SOURCES) M=$(CURDIR) + KBUILD_PARAMS += ARCH=$(ARCH) + KBUILD_PARAMS += NV_KERNEL_SOURCES=$(KERNEL_SOURCES) + KBUILD_PARAMS += NV_KERNEL_OUTPUT=$(KERNEL_OUTPUT) + KBUILD_PARAMS += NV_KERNEL_MODULES="$(NV_KERNEL_MODULES)" + KBUILD_PARAMS += INSTALL_MOD_DIR=kernel/drivers/video + KBUILD_PARAMS += NV_SPECTRE_V2=$(SPECTRE_V2_RETPOLINE) + + .PHONY: modules module clean clean_conftest modules_install + modules clean modules_install: + @$(MAKE) "LD=$(LD)" "CC=$(CC)" "OBJDUMP=$(OBJDUMP)" $(KBUILD_PARAMS) $@ + @if [ "$@" = "modules" ]; then \ + for module in $(NV_KERNEL_MODULES); do \ + if [ -x split-object-file.sh ]; then \ + ./split-object-file.sh $$module.ko; \ + fi; \ + done; \ + fi + + # Compatibility target for scripts that may be directly calling the + # "module" target from the old build system. + + module: modules + + # Check if the any of kernel module linker scripts exist. If they do, pass + # them as linker options (via variable NV_MODULE_LD_SCRIPTS) while building + # the kernel interface object files. These scripts do some processing on the + # module symbols on which the Linux kernel's module resolution is dependent + # and hence must be used whenever present. + + LD_SCRIPT ?= $(KERNEL_SOURCES)/scripts/module-common.lds \ + $(KERNEL_SOURCES)/arch/$(ARCH)/kernel/module.lds \ + $(KERNEL_OUTPUT)/scripts/module.lds + NV_MODULE_COMMON_SCRIPTS := $(foreach s, $(wildcard $(LD_SCRIPT)), -T $(s)) + + # Use $* to match the stem % in the kernel interface file %-linux.o. Replace + # "nv" with "nvidia" in $* as appropriate: e.g. nv-modeset-linux.o links + # nvidia-modeset.mod.o and nvidia-modeset/nv-modeset-interface.o. The kernel + # interface file must have the .mod.o object linked into it: otherwise, the + # kernel module produced by linking the interface against its corresponding + # core object file will not be loadable. The .mod.o file is built as part of + # the MODPOST process (stage 2), so the rule to build the kernel interface + # cannot be defined in the *Kbuild files, which are only used during stage 1. + + %-linux.o: modules + $(LD) $(NV_MODULE_COMMON_SCRIPTS) -r -o $@ \ + $(subst nv,nvidia,$*).mod.o $(subst nv,nvidia,$*)/$*-interface.o + + # Kbuild's "clean" rule won't clean up the conftest headers on its own, and + # clean-dirs doesn't appear to work as advertised. + clean_conftest: + $(RM) -r conftest + clean: clean_conftest + +endif # KERNELRELEASE diff --git a/kernel-open/common/inc/conftest.h b/kernel-open/common/inc/conftest.h new file mode 100644 index 000000000..dd05144c4 --- /dev/null +++ b/kernel-open/common/inc/conftest.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _CONFTEST_H +#define _CONFTEST_H + +#include "conftest/headers.h" +#include "conftest/functions.h" +#include "conftest/generic.h" +#include "conftest/macros.h" +#include "conftest/symbols.h" +#include "conftest/types.h" + +#endif diff --git a/kernel-open/common/inc/cpuopsys.h b/kernel-open/common/inc/cpuopsys.h new file mode 100644 index 000000000..4b2ef6fb5 --- /dev/null +++ b/kernel-open/common/inc/cpuopsys.h @@ -0,0 +1,459 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! \brief + * Define compile time symbols for CPU type and operating system type. + * This file should only contain preprocessor commands so that + * there are no dependencies on other files. + * + * cpuopsys.h + * + * Copyright (c) 2001, Nvidia Corporation. All rights reserved. + */ + +/*! + * Uniform names are defined for compile time options to distinguish + * CPU types and Operating systems. + * Distinctions between CPU and OpSys should be orthogonal. + * + * These uniform names have initially been defined by keying off the + * makefile/build names defined for builds in the OpenGL group. + * Getting the uniform names defined for other builds may require + * different qualifications. + * + * The file is placed here to allow for the possibility of all driver + * components using the same naming convention for conditional compilation. + */ + +#ifndef CPUOPSYS_H +#define CPUOPSYS_H + +/*****************************************************************************/ +/* Define all OS/CPU-Chip related symbols */ + +/* ***** WINDOWS variations */ +#if defined(_WIN32) || defined(_WIN16) +# define NV_WINDOWS + +# if defined(_WIN32_WINNT) +# define NV_WINDOWS_NT +# elif defined(_WIN32_WCE) +# define NV_WINDOWS_CE +# elif !defined(NV_MODS) +# define NV_WINDOWS_9X +# endif +#endif /* _WIN32 || defined(_WIN16) */ + +/* ***** Unix variations */ +#if defined(__linux__) && !defined(NV_LINUX) && !defined(NV_VMWARE) +# define NV_LINUX +#endif /* defined(__linux__) */ + +#if defined(__VMWARE__) && !defined(NV_VMWARE) +# define NV_VMWARE +#endif /* defined(__VMWARE__) */ + +/* SunOS + gcc */ +#if defined(__sun__) && defined(__svr4__) && !defined(NV_SUNOS) +# define NV_SUNOS +#endif /* defined(__sun__) && defined(__svr4__) */ + +/* SunOS + Sun Compiler (named SunPro, Studio or Forte) */ +#if defined(__SUNPRO_C) || defined(__SUNPRO_CC) +# define NV_SUNPRO_C +# define NV_SUNOS +#endif /* defined(_SUNPRO_C) || defined(__SUNPRO_CC) */ + +#if defined(__FreeBSD__) && !defined(NV_BSD) +# define NV_BSD +#endif /* defined(__FreeBSD__) */ + +/* XXXar don't define NV_UNIX on MacOSX or vxworks or QNX */ +#if (defined(__unix__) || defined(__unix) || defined(__INTEGRITY) ) && !defined(nvmacosx) && !defined(vxworks) && !defined(NV_UNIX) && !defined(__QNX__) && !defined(__QNXNTO__)/* XXX until removed from Makefiles */ +# define NV_UNIX +#endif /* defined(__unix__) */ + +#if (defined(__QNX__) || defined(__QNXNTO__)) && !defined(NV_QNX) +# define NV_QNX +#endif + +#if (defined(__ANDROID__) || defined(ANDROID)) && !defined(NV_ANDROID) +# define NV_ANDROID +#endif + + + + + + + + +#if defined(DceCore) && !defined(NV_DCECORE) +# define NV_DCECORE +#endif + +/* ***** Apple variations */ +#if defined(macintosh) || defined(__APPLE__) +# define NV_MACINTOSH +# if defined(__MACH__) +# define NV_MACINTOSH_OSX +# else +# define NV_MACINTOSH_OS9 +# endif +# if defined(__LP64__) +# define NV_MACINTOSH_64 +# endif +#endif /* defined(macintosh) */ + +/* ***** VxWorks */ +/* Tornado 2.21 is gcc 2.96 and #defines __vxworks. */ +/* Tornado 2.02 is gcc 2.7.2 and doesn't define any OS symbol, so we rely on */ +/* the build system #defining vxworks. */ +#if defined(__vxworks) || defined(vxworks) +# define NV_VXWORKS +#endif + +/* ***** Integrity OS */ +#if defined(__INTEGRITY) +# if !defined(NV_INTEGRITY) +# define NV_INTEGRITY +# endif +#endif + +/* ***** Processor type variations */ +/* Note: The prefix NV_CPU_* is taken by Nvcm.h */ + +#if ((defined(_M_IX86) || defined(__i386__) || defined(__i386)) && !defined(NVCPU_X86)) /* XXX until removed from Makefiles */ +/* _M_IX86 for windows, __i386__ for Linux (or any x86 using gcc) */ +/* __i386 for Studio compiler on Solaris x86 */ +# define NVCPU_X86 /* any IA32 machine (not x86-64) */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(_WIN32) && defined(_M_IA64) +# define NVCPU_IA64_WINDOWS /* any IA64 for Windows opsys */ +#endif +#if defined(NV_LINUX) && defined(__ia64__) +# define NVCPU_IA64_LINUX /* any IA64 for Linux opsys */ +#endif +#if defined(NVCPU_IA64_WINDOWS) || defined(NVCPU_IA64_LINUX) || defined(IA64) +# define NVCPU_IA64 /* any IA64 for any opsys */ +#endif + +#if (defined(NV_MACINTOSH) && !(defined(__i386__) || defined(__x86_64__))) || defined(__PPC__) || defined(__ppc) +# if defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) +# ifndef NVCPU_PPC64LE +# define NVCPU_PPC64LE /* PPC 64-bit little endian */ +# endif +# else +# ifndef NVCPU_PPC +# define NVCPU_PPC /* any non-PPC64LE PowerPC architecture */ +# endif +# ifndef NV_BIG_ENDIAN +# define NV_BIG_ENDIAN +# endif +# endif +# define NVCPU_FAMILY_PPC +#endif + +#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +# define NVCPU_X86_64 /* any x86-64 for any opsys */ +#endif + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) +# define NVCPU_FAMILY_X86 +#endif + +#if defined(__riscv) && (__riscv_xlen==64) +# define NVCPU_RISCV64 +# if defined(__nvriscv) +# define NVCPU_NVRISCV64 +# endif +#endif + +#if defined(__arm__) || defined(_M_ARM) +/* + * 32-bit instruction set on, e.g., ARMv7 or AArch32 execution state + * on ARMv8 + */ +# define NVCPU_ARM +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(__aarch64__) || defined(__ARM64__) || defined(_M_ARM64) +# define NVCPU_AARCH64 /* 64-bit A64 instruction set on ARMv8 */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(NVCPU_ARM) || defined(NVCPU_AARCH64) +# define NVCPU_FAMILY_ARM +#endif + +#if defined(__SH4__) +# ifndef NVCPU_SH4 +# define NVCPU_SH4 /* Renesas (formerly Hitachi) SH4 */ +# endif +# if defined NV_WINDOWS_CE +# define NVCPU_MIN_PAGE_SHIFT 12 +# endif +#endif + +/* For Xtensa processors */ +#if defined(__XTENSA__) +# define NVCPU_XTENSA +# if defined(__XTENSA_EB__) +# define NV_BIG_ENDIAN +# endif +#endif + + +/* + * Other flavors of CPU type should be determined at run-time. + * For example, an x86 architecture with/without SSE. + * If it can compile, then there's no need for a compile time option. + * For some current GCC limitations, these may be fixed by using the Intel + * compiler for certain files in a Linux build. + */ + +/* The minimum page size can be determined from the minimum page shift */ +#if defined(NVCPU_MIN_PAGE_SHIFT) +#define NVCPU_MIN_PAGE_SIZE (1 << NVCPU_MIN_PAGE_SHIFT) +#endif + +#if defined(NVCPU_IA64) || defined(NVCPU_X86_64) || \ + defined(NV_MACINTOSH_64) || defined(NVCPU_AARCH64) || \ + defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64) +# define NV_64_BITS /* all architectures where pointers are 64 bits */ +#else +/* we assume 32 bits. I don't see a need for NV_16_BITS. */ +#endif + +/* For verification-only features not intended to be included in normal drivers */ +#if (defined(NV_MODS) || defined(NV_GSP_MODS)) && defined(DEBUG) && !defined(DISABLE_VERIF_FEATURES) +#define NV_VERIF_FEATURES +#endif + +/* + * New, safer family of #define's -- these ones use 0 vs. 1 rather than + * defined/!defined. This is advantageous because if you make a typo, + * say misspelled ENDIAN: + * + * #if NVCPU_IS_BIG_ENDAIN + * + * ...some compilers can give you a warning telling you that you screwed up. + * The compiler can also give you a warning if you forget to #include + * "cpuopsys.h" in your code before the point where you try to use these + * conditionals. + * + * Also, the names have been prefixed in more cases with "CPU" or "OS" for + * increased clarity. You can tell the names apart from the old ones because + * they all use "_IS_" in the name. + * + * Finally, these can be used in "if" statements and not just in #if's. For + * example: + * + * if (NVCPU_IS_BIG_ENDIAN) x = Swap32(x); + * + * Maybe some day in the far-off future these can replace the old #define's. + */ + +#if defined(NV_MODS) +#define NV_IS_MODS 1 +#else +#define NV_IS_MODS 0 +#endif + +#if defined(NV_GSP_MODS) +#define NV_IS_GSP_MODS 1 +#else +#define NV_IS_GSP_MODS 0 +#endif + +#if defined(NV_WINDOWS) +#define NVOS_IS_WINDOWS 1 +#else +#define NVOS_IS_WINDOWS 0 +#endif +#if defined(NV_WINDOWS_CE) +#define NVOS_IS_WINDOWS_CE 1 +#else +#define NVOS_IS_WINDOWS_CE 0 +#endif +#if defined(NV_LINUX) +#define NVOS_IS_LINUX 1 +#else +#define NVOS_IS_LINUX 0 +#endif +#if defined(NV_UNIX) +#define NVOS_IS_UNIX 1 +#else +#define NVOS_IS_UNIX 0 +#endif +#if defined(NV_BSD) +#define NVOS_IS_FREEBSD 1 +#else +#define NVOS_IS_FREEBSD 0 +#endif +#if defined(NV_SUNOS) +#define NVOS_IS_SOLARIS 1 +#else +#define NVOS_IS_SOLARIS 0 +#endif +#if defined(NV_VMWARE) +#define NVOS_IS_VMWARE 1 +#else +#define NVOS_IS_VMWARE 0 +#endif +#if defined(NV_QNX) +#define NVOS_IS_QNX 1 +#else +#define NVOS_IS_QNX 0 +#endif +#if defined(NV_ANDROID) +#define NVOS_IS_ANDROID 1 +#else +#define NVOS_IS_ANDROID 0 +#endif +#if defined(NV_MACINTOSH) +#define NVOS_IS_MACINTOSH 1 +#else +#define NVOS_IS_MACINTOSH 0 +#endif +#if defined(NV_VXWORKS) +#define NVOS_IS_VXWORKS 1 +#else +#define NVOS_IS_VXWORKS 0 +#endif +#if defined(NV_LIBOS) +#define NVOS_IS_LIBOS 1 +#else +#define NVOS_IS_LIBOS 0 +#endif +#if defined(NV_INTEGRITY) +#define NVOS_IS_INTEGRITY 1 +#else +#define NVOS_IS_INTEGRITY 0 +#endif + + + + + + + + + + +#if defined(NVCPU_X86) +#define NVCPU_IS_X86 1 +#else +#define NVCPU_IS_X86 0 +#endif +#if defined(NVCPU_RISCV64) +#define NVCPU_IS_RISCV64 1 +#else +#define NVCPU_IS_RISCV64 0 +#endif +#if defined(NVCPU_NVRISCV64) +#define NVCPU_IS_NVRISCV64 1 +#else +#define NVCPU_IS_NVRISCV64 0 +#endif +#if defined(NVCPU_IA64) +#define NVCPU_IS_IA64 1 +#else +#define NVCPU_IS_IA64 0 +#endif +#if defined(NVCPU_X86_64) +#define NVCPU_IS_X86_64 1 +#else +#define NVCPU_IS_X86_64 0 +#endif +#if defined(NVCPU_FAMILY_X86) +#define NVCPU_IS_FAMILY_X86 1 +#else +#define NVCPU_IS_FAMILY_X86 0 +#endif +#if defined(NVCPU_PPC) +#define NVCPU_IS_PPC 1 +#else +#define NVCPU_IS_PPC 0 +#endif +#if defined(NVCPU_PPC64LE) +#define NVCPU_IS_PPC64LE 1 +#else +#define NVCPU_IS_PPC64LE 0 +#endif +#if defined(NVCPU_FAMILY_PPC) +#define NVCPU_IS_FAMILY_PPC 1 +#else +#define NVCPU_IS_FAMILY_PPC 0 +#endif +#if defined(NVCPU_ARM) +#define NVCPU_IS_ARM 1 +#else +#define NVCPU_IS_ARM 0 +#endif +#if defined(NVCPU_AARCH64) +#define NVCPU_IS_AARCH64 1 +#else +#define NVCPU_IS_AARCH64 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_FAMILY_ARM 1 +#else +#define NVCPU_IS_FAMILY_ARM 0 +#endif +#if defined(NVCPU_SH4) +#define NVCPU_IS_SH4 1 +#else +#define NVCPU_IS_SH4 0 +#endif +#if defined(NVCPU_XTENSA) +#define NVCPU_IS_XTENSA 1 +#else +#define NVCPU_IS_XTENSA 0 +#endif +#if defined(NV_BIG_ENDIAN) +#define NVCPU_IS_BIG_ENDIAN 1 +#else +#define NVCPU_IS_BIG_ENDIAN 0 +#endif +#if defined(NV_64_BITS) +#define NVCPU_IS_64_BITS 1 +#else +#define NVCPU_IS_64_BITS 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_PCIE_CACHE_COHERENT 0 +#else +#define NVCPU_IS_PCIE_CACHE_COHERENT 1 +#endif +#if defined(NV_DCECORE) +#define NVOS_IS_DCECORE 1 +#else +#define NVOS_IS_DCECORE 0 +#endif +/*****************************************************************************/ + +#endif /* CPUOPSYS_H */ diff --git a/kernel-open/common/inc/nv-caps.h b/kernel-open/common/inc/nv-caps.h new file mode 100644 index 000000000..35bbf7c0c --- /dev/null +++ b/kernel-open/common/inc/nv-caps.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CAPS_H_ +#define _NV_CAPS_H_ + +#include + +/* + * Opaque OS-specific struct; on Linux, this has member + * 'struct proc_dir_entry'. + */ +typedef struct nv_cap nv_cap_t; + +/* + * Creates directory named "capabilities" under the provided path. + * + * @param[in] path Absolute path + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_init(const char *path); + +/* + * Creates capability directory entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability directory's name + * @param[in] mode Capability directory's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Creates capability file entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability file's name + * @param[in] mode Capability file's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Destroys capability entry + * + * @param[in] cap Capability entry + */ +void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap); + +/* + * Validates and duplicates the provided file descriptor + * + * @param[in] cap Capability entry + * @param[in] fd File descriptor to be validated + * + * Returns duplicate fd upon success. Otherwise, returns -1. + */ +int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd); + +/* + * Closes file descriptor + * + * This function should be used to close duplicate file descriptors + * returned by nv_cap_validate_and_dup_fd. + * + * @param[in] fd File descriptor to be validated + * + */ +void NV_API_CALL nv_cap_close_fd(int fd); + +#endif /* _NV_CAPS_H_ */ diff --git a/kernel-open/common/inc/nv-dmabuf.h b/kernel-open/common/inc/nv-dmabuf.h new file mode 100644 index 000000000..ab794df9f --- /dev/null +++ b/kernel-open/common/inc/nv-dmabuf.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_DMABUF_H_ +#define _NV_DMABUF_H_ + +#include "nv-linux.h" + +NV_STATUS nv_dma_buf_export(nv_state_t *, nv_ioctl_export_to_dma_buf_fd_t *); + +#endif // _NV_DMABUF_H_ diff --git a/kernel-open/common/inc/nv-gpu-info.h b/kernel-open/common/inc/nv-gpu-info.h new file mode 100644 index 000000000..a8c0c0a1f --- /dev/null +++ b/kernel-open/common/inc/nv-gpu-info.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_GPU_INFO_H_ +#define _NV_GPU_INFO_H_ + +typedef struct { + NvU32 gpu_id; + + struct { + NvU32 domain; + NvU8 bus, slot, function; + } pci_info; + + /* + * opaque OS-specific pointer; on Linux, this is a pointer to the + * 'struct device' for the GPU. + */ + void *os_device_ptr; +} nv_gpu_info_t; + +#define NV_MAX_GPUS 32 + +#endif /* _NV_GPU_INFO_H_ */ diff --git a/kernel-open/common/inc/nv-hash.h b/kernel-open/common/inc/nv-hash.h new file mode 100644 index 000000000..97dbb5ddc --- /dev/null +++ b/kernel-open/common/inc/nv-hash.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_HASH_H__ +#define __NV_HASH_H__ + +#include "conftest.h" +#include "nv-list-helpers.h" +#include +#include +#include + +#if defined(NV_LINUX_STRINGHASH_H_PRESENT) +#include /* full_name_hash() */ +#else +#include +#endif + +#if (NV_FULL_NAME_HASH_ARGUMENT_COUNT == 3) +#define nv_string_hash(_str) full_name_hash(NULL, _str, strlen(_str)) +#else +#define nv_string_hash(_str) full_name_hash(_str, strlen(_str)) +#endif + +/** + * This naive hashtable was introduced by commit d9b482c8ba19 (v3.7, 2012-10-31). + * To support older kernels import necessary functionality from + * . + */ + +#define NV_HASH_SIZE(name) (ARRAY_SIZE(name)) +#define NV_HASH_BITS(name) ilog2(NV_HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define NV_HASH_MIN(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +#define NV_DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +static inline void _nv_hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + { + INIT_HLIST_HEAD(&ht[i]); + } +} + +/** + * nv_hash_init - initialize a hash table + * @hashtable: hashtable to be initialized + */ +#define nv_hash_init(hashtable) _nv_hash_init(hashtable, NV_HASH_SIZE(hashtable)) + +/** + * nv_hash_add - add an object to a hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define nv_hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[NV_HASH_MIN(key, NV_HASH_BITS(hashtable))]) + +/** + * nv_hash_for_each_possible - iterate over all possible objects hashing to the + * same bucket + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define nv_hash_for_each_possible(name, obj, member, key) \ + nv_hlist_for_each_entry(obj, &name[NV_HASH_MIN(key, NV_HASH_BITS(name))], member) + +#endif // __NV_HASH_H__ diff --git a/kernel-open/common/inc/nv-hypervisor.h b/kernel-open/common/inc/nv-hypervisor.h new file mode 100644 index 000000000..ddc6a9134 --- /dev/null +++ b/kernel-open/common/inc/nv-hypervisor.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_HYPERVISOR_H_ +#define _NV_HYPERVISOR_H_ + +#include + +// Enums for supported hypervisor types. +// New hypervisor type should be added before OS_HYPERVISOR_CUSTOM_FORCED +typedef enum _HYPERVISOR_TYPE +{ + OS_HYPERVISOR_XEN = 0, + OS_HYPERVISOR_VMWARE, + OS_HYPERVISOR_HYPERV, + OS_HYPERVISOR_KVM, + OS_HYPERVISOR_PARALLELS, + OS_HYPERVISOR_CUSTOM_FORCED, + OS_HYPERVISOR_UNKNOWN +} HYPERVISOR_TYPE; + +#define CMD_VGPU_VFIO_WAKE_WAIT_QUEUE 0 +#define CMD_VGPU_VFIO_INJECT_INTERRUPT 1 +#define CMD_VGPU_VFIO_REGISTER_MDEV 2 +#define CMD_VGPU_VFIO_PRESENT 3 + +#define MAX_VF_COUNT_PER_GPU 64 + +typedef enum _VGPU_TYPE_INFO +{ + VGPU_TYPE_NAME = 0, + VGPU_TYPE_DESCRIPTION, + VGPU_TYPE_INSTANCES, +} VGPU_TYPE_INFO; + +typedef struct +{ + void *vgpuVfioRef; + void *waitQueue; + void *nv; + NvU32 *vgpuTypeIds; + NvU32 numVgpuTypes; + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; + NvBool is_virtfn; +} vgpu_vfio_info; + +typedef struct +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; + NvBool isNvidiaAttached; + NvBool isMdevAttached; +} vgpu_vf_pci_info; + +typedef enum VGPU_CMD_PROCESS_VF_INFO_E +{ + NV_VGPU_SAVE_VF_INFO = 0, + NV_VGPU_REMOVE_VF_PCI_INFO = 1, + NV_VGPU_REMOVE_VF_MDEV_INFO = 2, + NV_VGPU_GET_VF_INFO = 3 +} VGPU_CMD_PROCESS_VF_INFO; + +typedef enum VGPU_DEVICE_STATE_E +{ + NV_VGPU_DEV_UNUSED = 0, + NV_VGPU_DEV_OPENED = 1, + NV_VGPU_DEV_IN_USE = 2 +} VGPU_DEVICE_STATE; + +typedef enum _VMBUS_CMD_TYPE +{ + VMBUS_CMD_TYPE_INVALID = 0, + VMBUS_CMD_TYPE_SETUP = 1, + VMBUS_CMD_TYPE_SENDPACKET = 2, + VMBUS_CMD_TYPE_CLEANUP = 3, +} VMBUS_CMD_TYPE; + +typedef struct +{ + NvU32 request_id; + NvU32 page_count; + NvU64 *pPfns; + void *buffer; + NvU32 bufferlen; +} vmbus_send_packet_cmd_params; + + +typedef struct +{ + NvU32 override_sint; + NvU8 *nv_guid; +} vmbus_setup_cmd_params; + +/* + * Function prototypes + */ + +HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void); + +#endif // _NV_HYPERVISOR_H_ diff --git a/kernel-open/common/inc/nv-ioctl-numa.h b/kernel-open/common/inc/nv-ioctl-numa.h new file mode 100644 index 000000000..3fad82092 --- /dev/null +++ b/kernel-open/common/inc/nv-ioctl-numa.h @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_NUMA_H +#define NV_IOCTL_NUMA_H + +#if defined(NV_LINUX) + +#include + +#if defined(NV_KERNEL_INTERFACE_LAYER) + +#include + +#else + +#include + +#if !defined(__aligned) +#define __aligned(n) __attribute__((aligned(n))) +#endif + +#endif + +#define NV_ESC_NUMA_INFO (NV_IOCTL_BASE + 15) +#define NV_ESC_SET_NUMA_STATUS (NV_IOCTL_BASE + 16) + +#define NV_IOCTL_NUMA_INFO_MAX_OFFLINE_ADDRESSES 64 +typedef struct offline_addresses +{ + uint64_t addresses[NV_IOCTL_NUMA_INFO_MAX_OFFLINE_ADDRESSES] __aligned(8); + uint32_t numEntries; +} nv_offline_addresses_t; + + +/* per-device NUMA memory info as assigned by the system */ +typedef struct nv_ioctl_numa_info +{ + int32_t nid; + int32_t status; + uint64_t memblock_size __aligned(8); + uint64_t numa_mem_addr __aligned(8); + uint64_t numa_mem_size __aligned(8); + nv_offline_addresses_t offline_addresses __aligned(8); +} nv_ioctl_numa_info_t; + +/* set the status of the device NUMA memory */ +typedef struct nv_ioctl_set_numa_status +{ + int32_t status; +} nv_ioctl_set_numa_status_t; + +#define NV_IOCTL_NUMA_STATUS_DISABLED 0 +#define NV_IOCTL_NUMA_STATUS_OFFLINE 1 +#define NV_IOCTL_NUMA_STATUS_ONLINE_IN_PROGRESS 2 +#define NV_IOCTL_NUMA_STATUS_ONLINE 3 +#define NV_IOCTL_NUMA_STATUS_ONLINE_FAILED 4 +#define NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS 5 +#define NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED 6 + +#endif + +#endif diff --git a/kernel-open/common/inc/nv-ioctl-numbers.h b/kernel-open/common/inc/nv-ioctl-numbers.h new file mode 100644 index 000000000..cb0b6a246 --- /dev/null +++ b/kernel-open/common/inc/nv-ioctl-numbers.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_NUMBERS_H +#define NV_IOCTL_NUMBERS_H + +/* NOTE: using an ioctl() number > 55 will overflow! */ +#define NV_IOCTL_MAGIC 'F' +#define NV_IOCTL_BASE 200 +#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0) +#define NV_ESC_REGISTER_FD (NV_IOCTL_BASE + 1) +#define NV_ESC_ALLOC_OS_EVENT (NV_IOCTL_BASE + 6) +#define NV_ESC_FREE_OS_EVENT (NV_IOCTL_BASE + 7) +#define NV_ESC_STATUS_CODE (NV_IOCTL_BASE + 9) +#define NV_ESC_CHECK_VERSION_STR (NV_IOCTL_BASE + 10) +#define NV_ESC_IOCTL_XFER_CMD (NV_IOCTL_BASE + 11) +#define NV_ESC_ATTACH_GPUS_TO_FD (NV_IOCTL_BASE + 12) +#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13) +#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14) +#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17) + +#endif diff --git a/kernel-open/common/inc/nv-ioctl.h b/kernel-open/common/inc/nv-ioctl.h new file mode 100644 index 000000000..ffd1dee87 --- /dev/null +++ b/kernel-open/common/inc/nv-ioctl.h @@ -0,0 +1,145 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_H +#define NV_IOCTL_H + +#include +#include + +typedef struct { + NvU32 domain; /* PCI domain number */ + NvU8 bus; /* PCI bus number */ + NvU8 slot; /* PCI slot number */ + NvU8 function; /* PCI function number */ + NvU16 vendor_id; /* PCI vendor ID */ + NvU16 device_id; /* PCI device ID */ +} nv_pci_info_t; + +/* + * ioctl()'s with parameter structures too large for the + * _IOC cmd layout use the nv_ioctl_xfer_t structure + * and the NV_ESC_IOCTL_XFER_CMD ioctl() to pass the actual + * size and user argument pointer into the RM, which + * will then copy it to/from kernel space in separate steps. + */ +typedef struct nv_ioctl_xfer +{ + NvU32 cmd; + NvU32 size; + NvP64 ptr NV_ALIGN_BYTES(8); +} nv_ioctl_xfer_t; + +typedef struct nv_ioctl_card_info +{ + NvBool valid; + nv_pci_info_t pci_info; /* PCI config information */ + NvU32 gpu_id; + NvU16 interrupt_line; + NvU64 reg_address NV_ALIGN_BYTES(8); + NvU64 reg_size NV_ALIGN_BYTES(8); + NvU64 fb_address NV_ALIGN_BYTES(8); + NvU64 fb_size NV_ALIGN_BYTES(8); + NvU32 minor_number; + NvU8 dev_name[10]; /* device names such as vmgfx[0-32] for vmkernel */ +} nv_ioctl_card_info_t; + +/* alloc event */ +typedef struct nv_ioctl_alloc_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_alloc_os_event_t; + +/* free event */ +typedef struct nv_ioctl_free_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_free_os_event_t; + +/* status code */ +typedef struct nv_ioctl_status_code +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU32 status; +} nv_ioctl_status_code_t; + +/* check version string */ +#define NV_RM_API_VERSION_STRING_LENGTH 64 + +typedef struct nv_ioctl_rm_api_version +{ + NvU32 cmd; + NvU32 reply; + char versionString[NV_RM_API_VERSION_STRING_LENGTH]; +} nv_ioctl_rm_api_version_t; + +#define NV_RM_API_VERSION_CMD_STRICT 0 +#define NV_RM_API_VERSION_CMD_RELAXED '1' +#define NV_RM_API_VERSION_CMD_OVERRIDE '2' + +#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0 +#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1 + +typedef struct nv_ioctl_query_device_intr +{ + NvU32 intrStatus NV_ALIGN_BYTES(4); + NvU32 status; +} nv_ioctl_query_device_intr; + +/* system parameters that the kernel driver may use for configuration */ +typedef struct nv_ioctl_sys_params +{ + NvU64 memblock_size NV_ALIGN_BYTES(8); +} nv_ioctl_sys_params_t; + +typedef struct nv_ioctl_register_fd +{ + int ctl_fd; +} nv_ioctl_register_fd_t; + +#define NV_DMABUF_EXPORT_MAX_HANDLES 128 + +typedef struct nv_ioctl_export_to_dma_buf_fd +{ + int fd; + NvHandle hClient; + NvU32 totalObjects; + NvU32 numObjects; + NvU32 index; + NvU64 totalSize NV_ALIGN_BYTES(8); + NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES]; + NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU32 status; +} nv_ioctl_export_to_dma_buf_fd_t; + +#endif diff --git a/kernel-open/common/inc/nv-kernel-interface-api.h b/kernel-open/common/inc/nv-kernel-interface-api.h new file mode 100644 index 000000000..183f9b431 --- /dev/null +++ b/kernel-open/common/inc/nv-kernel-interface-api.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_KERNEL_INTERFACE_API_H +#define _NV_KERNEL_INTERFACE_API_H +/************************************************************************************************************** +* +* File: nv-kernel-interface-api.h +* +* Description: +* Defines the NV API related macros. +* +**************************************************************************************************************/ + +#if NVOS_IS_UNIX && NVCPU_IS_X86_64 && defined(__use_altstack__) +#define NV_API_CALL __attribute__((altstack(0))) +#else +#define NV_API_CALL +#endif + +#endif /* _NV_KERNEL_INTERFACE_API_H */ diff --git a/kernel-open/common/inc/nv-kref.h b/kernel-open/common/inc/nv-kref.h new file mode 100644 index 000000000..7e28ce236 --- /dev/null +++ b/kernel-open/common/inc/nv-kref.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KREF_H__ +#define __NV_KREF_H__ + +#include + +typedef struct nv_kref +{ + atomic_t refcount; +} nv_kref_t; + +static inline void nv_kref_init(nv_kref_t *nv_kref) +{ + atomic_set(&nv_kref->refcount, 1); +} + +static inline void nv_kref_get(nv_kref_t *nv_kref) +{ + atomic_inc(&nv_kref->refcount); +} + +static inline int nv_kref_put(nv_kref_t *nv_kref, + void (*release)(nv_kref_t *nv_kref)) +{ + if (atomic_dec_and_test(&nv_kref->refcount)) + { + release(nv_kref); + return 1; + } + + return 0; +} + +static inline unsigned int nv_kref_read(const nv_kref_t *nv_kref) +{ + return atomic_read(&nv_kref->refcount); +} + +#endif // __NV_KREF_H__ diff --git a/kernel-open/common/inc/nv-kthread-q.h b/kernel-open/common/inc/nv-kthread-q.h new file mode 100644 index 000000000..82a8a6b8a --- /dev/null +++ b/kernel-open/common/inc/nv-kthread-q.h @@ -0,0 +1,255 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KTHREAD_QUEUE_H__ +#define __NV_KTHREAD_QUEUE_H__ + +#include // atomic_t +#include // list +#include // task_struct +#include // NUMA_NO_NODE + +#include "conftest.h" + +#if defined(NV_LINUX_SEMAPHORE_H_PRESENT) + #include +#else + #include +#endif + +//////////////////////////////////////////////////////////////////////////////// +// nv_kthread_q: +// +// 1. API and overview +// +// This "nv_kthread_q" system implements a simple queuing system for deferred +// work. The nv_kthread_q system has goals and use cases that are similar to +// the named workqueues in the Linux kernel, but nv_kthread_q is much (10x or +// so) smaller, simpler--and correspondingly less general. Deferred work +// items are put into a queue, and run within the context of a dedicated set +// of kernel threads (kthread). +// +// In order to avoid confusion with the Linux workqueue system, I have +// avoided using the term "work", and instead refer to "queues" (also called +// "q's") and "queue items" (also called "q_items"), in both variable names +// and comments. +// +// This module depends only upon the Linux kernel. +// +// Queue items that are submitted to separate nv_kthread_q instances are +// guaranteed to be run in different kthreads. +// +// Queue items that are submitted to the same nv_kthread_q are not guaranteed +// to be serialized, nor are they guaranteed to run concurrently. +// +// 2. Allocations +// +// The caller allocates queues and queue items. The nv_kthread_q APIs do +// the initialization (zeroing and setup) of queues and queue items. +// Allocation is handled that way, because one of the first use cases is a +// bottom half interrupt handler, and for that, queue items should be +// pre-allocated (for example, one per GPU), so that no allocation is +// required in the top-half interrupt handler. Relevant API calls: +// +// 3. Queue initialization +// +// nv_kthread_q_init() initializes a queue on the current NUMA node. +// +// or +// +// nv_kthread_q_init_on_node() initializes a queue on a specific NUMA node. +// +// 3. Scheduling things for the queue to run +// +// The nv_kthread_q_schedule_q_item() routine will schedule a q_item to run. +// +// 4. Stopping the queue(s) +// +// The nv_kthread_q_stop() routine will flush the queue, and safely stop +// the kthread, before returning. +// +//////////////////////////////////////////////////////////////////////////////// + +typedef struct nv_kthread_q nv_kthread_q_t; +typedef struct nv_kthread_q_item nv_kthread_q_item_t; + +typedef void (*nv_q_func_t)(void *args); + +struct nv_kthread_q +{ + struct list_head q_list_head; + spinlock_t q_lock; + + // This is a counting semaphore. It gets incremented and decremented + // exactly once for each item that is added to the queue. + struct semaphore q_sem; + atomic_t main_loop_should_exit; + + struct task_struct *q_kthread; +}; + +struct nv_kthread_q_item +{ + struct list_head q_list_node; + nv_q_func_t function_to_run; + void *function_args; +}; + +#if defined(NV_KTHREAD_CREATE_ON_NODE_PRESENT) + #define NV_KTHREAD_Q_SUPPORTS_AFFINITY() 1 +#else + #define NV_KTHREAD_Q_SUPPORTS_AFFINITY() 0 +#endif + +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + +#define NV_KTHREAD_NO_NODE NUMA_NO_NODE + +// +// The queue must not be used before calling this routine. +// +// The caller allocates an nv_kthread_q_t item. This routine initializes +// the queue, and starts up a kernel thread ("kthread") to service the queue. +// The queue will initially be empty; there is intentionally no way to +// pre-initialize the queue with items to run. +// +// In order to avoid external dependencies (specifically, NV_STATUS codes), this +// returns a Linux kernel (negative) errno on failure, and zero on success. It +// is safe to call nv_kthread_q_stop() on a queue that nv_kthread_q_init() +// failed for. +// +// A short prefix of the qname arg will show up in []'s, via the ps(1) utility. +// +// The kernel thread stack is preferably allocated on the specified NUMA node if +// NUMA-affinity (NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1) is supported, but +// fallback to another node is possible because kernel allocators do not +// guarantee affinity. Note that NUMA-affinity applies only to +// the kthread stack. This API does not do anything about limiting the CPU +// affinity of the kthread. That is left to the caller. +// +// On kernels, which do not support NUMA-aware kthread stack allocations +// (NV_KTHTREAD_Q_SUPPORTS_AFFINITY() == 0), the API will return -ENOTSUPP +// if the value supplied for 'preferred_node' is anything other than +// NV_KTHREAD_NO_NODE. +// +// Reusing a queue: once a queue is initialized, it must be safely shut down +// (see "Stopping the queue(s)", below), before it can be reused. So, for +// a simple queue use case, the following will work: +// +// nv_kthread_q_init_on_node(&some_q, "display_name", preferred_node); +// nv_kthread_q_stop(&some_q); +// nv_kthread_q_init_on_node(&some_q, "reincarnated", preferred_node); +// nv_kthread_q_stop(&some_q); +// +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, + const char *qname, + int preferred_node); + +// +// This routine is the same as nv_kthread_q_init_on_node() with the exception +// that the queue stack will be allocated on the NUMA node of the caller. +// +static inline int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname) +{ + return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE); +} + +// +// The caller is responsible for stopping all queues, by calling this routine +// before, for example, kernel module unloading. This nv_kthread_q_stop() +// routine will flush the queue, and safely stop the kthread, before returning. +// +// You may ONLY call nv_kthread_q_stop() once, unless you reinitialize the +// queue in between, as shown in the nv_kthread_q_init() documentation, above. +// +// Do not add any more items to the queue after calling nv_kthread_q_stop. +// +// Calling nv_kthread_q_stop() on a queue which has been zero-initialized or +// for which nv_kthread_q_init() failed, is a no-op. +// +void nv_kthread_q_stop(nv_kthread_q_t *q); + +// +// All items that were in the queue before nv_kthread_q_flush was called, and +// all items scheduled by those items, will get run before this function +// returns. +// +// You may NOT call nv_kthread_q_flush() after having called nv_kthread_q_stop. +// +// This actually flushes the queue twice. That ensures that the queue is fully +// flushed, for an important use case: rescheduling from within one's own +// callback. In order to do that safely, you need to: +// +// -- set a flag that tells the callback to stop rescheduling itself. +// +// -- call either nv_kthread_q_flush or nv_kthread_q_stop (which internally +// calls nv_kthread_q_flush). The nv_kthread_q_flush, in turn, actually +// flushes the queue *twice*. The first flush waits for any callbacks +// to finish, that missed seeing the "stop_rescheduling" flag. The +// second flush waits for callbacks that were already scheduled when the +// first flush finished. +// +void nv_kthread_q_flush(nv_kthread_q_t *q); + +// Assigns function_to_run and function_args to the q_item. +// +// This must be called before calling nv_kthread_q_schedule_q_item. +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args); + +// +// The caller must have already set up the queue, via nv_kthread_q_init(). +// The caller owns the lifetime of the q_item. The nv_kthread_q system runs +// q_items, and adds or removes them from the queue. However, due to the first +// law of q-dynamics, it neither creates nor destroys q_items. +// +// When the callback (the function_to_run argument) is actually run, it is OK +// to free the q_item from within that routine. The nv_kthread_q system +// promises to be done with the q_item before that point. +// +// nv_kthread_q_schedule_q_item may be called from multiple threads at once, +// without danger of corrupting anything. This routine may also be safely +// called from interrupt context, including top-half ISRs. +// +// It is OK to reschedule the same q_item from within its own callback function. +// +// It is also OK to attempt to reschedule the same q_item, if that q_item is +// already pending in the queue. The q_item will not be rescheduled if it is +// already pending. +// +// Returns true (non-zero) if the item was actually scheduled. Returns false if +// the item was not scheduled, which can happen if: +// +// -- The q_item was already pending in a queue, or +// -- The queue is shutting down (or not yet started up). +// +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item); + +// Built-in test. Returns -1 if any subtest failed, or 0 upon success. +int nv_kthread_q_run_self_test(void); + +#endif // __NV_KTHREAD_QUEUE_H__ diff --git a/kernel-open/common/inc/nv-linux.h b/kernel-open/common/inc/nv-linux.h new file mode 100644 index 000000000..d7f7377c3 --- /dev/null +++ b/kernel-open/common/inc/nv-linux.h @@ -0,0 +1,2068 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_LINUX_H_ +#define _NV_LINUX_H_ + +#include "nvstatus.h" +#include "nv.h" +#include "nv-ioctl-numa.h" +#include "conftest.h" + +#include "nv-lock.h" +#include "nv-pgprot.h" +#include "nv-mm.h" +#include "os-interface.h" +#include "nv-timer.h" +#include "nv-time.h" + +#define NV_KERNEL_NAME "Linux" + +#ifndef AUTOCONF_INCLUDED +#if defined(NV_GENERATED_AUTOCONF_H_PRESENT) +#include +#else +#include +#endif +#endif + +#if defined(NV_GENERATED_UTSRELEASE_H_PRESENT) + #include +#endif + +#if defined(NV_GENERATED_COMPILE_H_PRESENT) + #include +#endif + +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) +#error "This driver does not support kernels older than 2.6.32!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) +# define KERNEL_2_6 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) +# define KERNEL_3 +#else +#error "This driver does not support development kernels!" +#endif + +#if defined (CONFIG_SMP) && !defined (__SMP__) +#define __SMP__ +#endif + +#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) +# define MODVERSIONS +#endif + +#include +#include +#include +#include + +#include + +#if !defined(VM_RESERVED) +#define VM_RESERVED 0x00000000 +#endif +#if !defined(VM_DONTEXPAND) +#define VM_DONTEXPAND 0x00000000 +#endif +#if !defined(VM_DONTDUMP) +#define VM_DONTDUMP 0x00000000 +#endif + +#include /* module_init, module_exit */ +#include /* pic_t, size_t, __u32, etc */ +#include /* error codes */ +#include /* circular linked list */ +#include /* NULL, offsetof */ +#include /* wait queues */ +#include /* strchr(), strpbrk() */ + +#include /* isspace(), etc */ +#include /* acquire_console_sem(), etc */ +#include /* cpufreq_get */ + +#include /* kmalloc, kfree, etc */ +#include /* vmalloc, vfree, etc */ + +#include /* poll_wait */ +#include /* mdelay, udelay */ + +#include /* suser(), capable() replacement */ + +#include /* get_random_bytes() */ + +#if defined(NV_LINUX_DMA_BUF_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_AVAILABLE) +#if defined(NV_DRM_DRM_DEVICE_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_DRV_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_GEM_H_PRESENT) +#include +#endif +#endif /* NV_DRM_AVAILABLE */ + +/* + * sched.h was refactored with this commit (as part of Linux 4.11) + * 2017-03-03 1827adb11ad26b2290dc9fe2aaf54976b2439865 + */ +#if defined(NV_LINUX_SCHED_SIGNAL_H_PRESENT) +#include /* task_lock(), task_unlock() */ +#endif + +#if defined(NV_LINUX_SCHED_TASK_H_PRESENT) +#include /* task_lock(), task_unlock() */ +#endif + +/* task and signal-related items, for kernels < 4.11: */ +#include /* task_lock(), task_unlock() */ + +#include /* module_param() */ +#include /* flush_tlb(), flush_tlb_all() */ + +#include /* pci_find_class, etc */ +#include /* tasklets, interrupt helpers */ +#include +#include /* fget(), fput() */ +#include +#include /* CPU hotplug support */ + +#include /* pm_runtime_* */ +#include /* files_fdtable, etc */ + +#include /* do_div() */ +#if defined(NV_ASM_SYSTEM_H_PRESENT) +#include /* cli, sli, save_flags */ +#endif +#include /* ioremap, virt_to_phys */ +#include /* access_ok */ +#include /* PAGE_OFFSET */ +#include /* pte bit definitions */ +#include /* __set_bit() */ + +#if defined(NV_LINUX_TIME_H_PRESENT) +#include /* FD_SET() */ +#endif + +#include "nv-list-helpers.h" + +/* + * Use current->cred->euid, instead of calling current_euid(). + * The latter can pull in the GPL-only debug_lockdep_rcu_enabled() + * symbol when CONFIG_PROVE_RCU. That is only used for debugging. + * + * The Linux kernel relies on the assumption that only the current process + * is permitted to change its cred structure. Therefore, current_euid() + * does not require the RCU's read lock on current->cred. + */ +#define NV_CURRENT_EUID() (__kuid_val(current->cred->euid)) + +#if !defined(NV_KUID_T_PRESENT) +static inline uid_t __kuid_val(uid_t uid) +{ + return uid; +} +#endif + +#if defined(CONFIG_VGA_ARB) +#include +#endif + +#include +#include + +#if defined(NV_LINUX_DMA_MAP_OPS_H_PRESENT) +#include +#endif + +#if defined(CONFIG_SWIOTLB) && defined(NVCPU_AARCH64) +#include +#endif + +#include +#include +#include + +#include + +#include /* workqueue */ +#include "nv-kthread-q.h" /* kthread based queue */ + +#if defined(NV_LINUX_EFI_H_PRESENT) +#include /* efi_enabled */ +#endif + +#include /* fb_info struct */ + +#if !defined(CONFIG_PCI) +#warning "Attempting to build driver for a platform with no PCI support!" +#include +#endif + +#if defined(NV_EFI_ENABLED_PRESENT) && defined(NV_EFI_ENABLED_ARGUMENT_COUNT) +#if (NV_EFI_ENABLED_ARGUMENT_COUNT == 1) +#define NV_EFI_ENABLED() efi_enabled(EFI_BOOT) +#else +#error "NV_EFI_ENABLED_ARGUMENT_COUNT value unrecognized!" +#endif +#elif (defined(NV_EFI_ENABLED_PRESENT) || defined(efi_enabled)) +#define NV_EFI_ENABLED() efi_enabled +#else +#define NV_EFI_ENABLED() 0 +#endif + +#if defined(CONFIG_CRAY_XT) +#include +NV_STATUS nvos_forward_error_to_cray(struct pci_dev *, NvU32, + const char *, va_list); +#endif + +#if defined(NVCPU_PPC64LE) && defined(CONFIG_EEH) +#include +#define NV_PCI_ERROR_RECOVERY_ENABLED() eeh_enabled() +#define NV_PCI_ERROR_RECOVERY +#endif + +#if defined(NV_ASM_SET_MEMORY_H_PRESENT) +#include +#endif + +#if defined(NV_SET_MEMORY_UC_PRESENT) +#undef NV_SET_PAGES_UC_PRESENT +#endif + +#if !defined(NVCPU_AARCH64) && !defined(NVCPU_PPC64LE) +#if !defined(NV_SET_MEMORY_UC_PRESENT) && !defined(NV_SET_PAGES_UC_PRESENT) +#error "This driver requires the ability to change memory types!" +#endif +#endif + +/* + * Traditionally, CONFIG_XEN indicated that the target kernel was + * built exclusively for use under a Xen hypervisor, requiring + * modifications to or disabling of a variety of NVIDIA graphics + * driver code paths. As of the introduction of CONFIG_PARAVIRT + * and support for Xen hypervisors within the CONFIG_PARAVIRT_GUEST + * architecture, CONFIG_XEN merely indicates that the target + * kernel can run under a Xen hypervisor, but not that it will. + * + * If CONFIG_XEN and CONFIG_PARAVIRT are defined, the old Xen + * specific code paths are disabled. If the target kernel executes + * stand-alone, the NVIDIA graphics driver will work fine. If the + * kernels executes under a Xen (or other) hypervisor, however, the + * NVIDIA graphics driver has no way of knowing and is unlikely + * to work correctly. + */ +#if defined(CONFIG_XEN) && !defined(CONFIG_PARAVIRT) +#include +#include +#define NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL +#endif + +#ifdef CONFIG_KDB +#include +#include +#endif + +#if defined(CONFIG_X86_REMOTE_DEBUG) +#include +#endif + +#if defined(DEBUG) && defined(CONFIG_KGDB) && \ + defined(NVCPU_AARCH64) +#include +#endif + +#if defined(NVCPU_X86_64) && !defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) +#define NV_ENABLE_PAT_SUPPORT +#endif + +#define NV_PAT_MODE_DISABLED 0 +#define NV_PAT_MODE_KERNEL 1 +#define NV_PAT_MODE_BUILTIN 2 + +extern int nv_pat_mode; + +#if defined(CONFIG_HOTPLUG_CPU) +#define NV_ENABLE_HOTPLUG_CPU +#include /* struct notifier_block, etc */ +#endif + +#if (defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)) +#include +#endif + +#if defined(CONFIG_ACPI) +#include +#define NV_LINUX_ACPI_EVENTS_SUPPORTED 1 +#endif + +#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED) +#define NV_ACPI_WALK_NAMESPACE(type, start_object, max_depth, \ + user_function, args...) \ + acpi_walk_namespace(type, start_object, max_depth, \ + user_function, NULL, args) +#endif + +#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL) +#define NV_CONFIG_PREEMPT_RT 1 +#endif + +#if defined(NV_WRITE_CR4_PRESENT) +#define NV_READ_CR4() read_cr4() +#define NV_WRITE_CR4(cr4) write_cr4(cr4) +#else +#define NV_READ_CR4() __read_cr4() +#define NV_WRITE_CR4(cr4) __write_cr4(cr4) +#endif + +#ifndef get_cpu +#define get_cpu() smp_processor_id() +#define put_cpu() +#endif + +#if !defined(unregister_hotcpu_notifier) +#define unregister_hotcpu_notifier unregister_cpu_notifier +#endif +#if !defined(register_hotcpu_notifier) +#define register_hotcpu_notifier register_cpu_notifier +#endif + +#if defined(NVCPU_X86_64) +#if !defined(pmd_large) +#define pmd_large(_pmd) \ + ((pmd_val(_pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) +#endif +#endif /* defined(NVCPU_X86_64) */ + +#define NV_PAGE_COUNT(page) \ + ((unsigned int)page_count(page)) +#define NV_GET_PAGE_COUNT(page_ptr) \ + (NV_PAGE_COUNT(NV_GET_PAGE_STRUCT(page_ptr->phys_addr))) +#define NV_GET_PAGE_FLAGS(page_ptr) \ + (NV_GET_PAGE_STRUCT(page_ptr->phys_addr)->flags) + +/* + * Before the introduction of VM_PFNMAP, there was an VM_UNPAGED flag. + * Drivers which wanted to call remap_pfn_range on normal pages had to use this + * VM_UNPAGED flag *and* set PageReserved. With the introduction of VM_PFNMAP, + * that restriction went away. This is described in commit + * + * 2005-10-28 6aab341e0a28aff100a09831c5300a2994b8b986 + * ("mm: re-architect the VM_UNPAGED logic") + * + * , which added VM_PFNMAP and vm_normal_page. Therefore, if VM_PFNMAP is + * defined, then we do *not* need to mark a page as reserved, in order to + * call remap_pfn_range(). + */ +#if !defined(VM_PFNMAP) +#define NV_MAYBE_RESERVE_PAGE(ptr_ptr) \ + SetPageReserved(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)) +#define NV_MAYBE_UNRESERVE_PAGE(page_ptr) \ + ClearPageReserved(NV_GET_PAGE_STRUCT(page_ptr->phys_addr)) +#else +#define NV_MAYBE_RESERVE_PAGE(ptr_ptr) +#define NV_MAYBE_UNRESERVE_PAGE(page_ptr) +#endif /* defined(VM_PFNMAP) */ + +#if !defined(__GFP_COMP) +#define __GFP_COMP 0 +#endif + +#if !defined(DEBUG) && defined(__GFP_NOWARN) +#define NV_GFP_KERNEL (GFP_KERNEL | __GFP_NOWARN) +#define NV_GFP_ATOMIC (GFP_ATOMIC | __GFP_NOWARN) +#else +#define NV_GFP_KERNEL (GFP_KERNEL) +#define NV_GFP_ATOMIC (GFP_ATOMIC) +#endif + +#if defined(GFP_DMA32) +/* + * GFP_DMA32 is similar to GFP_DMA, but instructs the Linux zone + * allocator to allocate memory from the first 4GB on platforms + * such as Linux/x86-64; the alternative is to use an IOMMU such + * as the one implemented with the K8 GART, if available. + */ +#define NV_GFP_DMA32 (NV_GFP_KERNEL | GFP_DMA32) +#else +#define NV_GFP_DMA32 (NV_GFP_KERNEL) +#endif + +extern NvBool nvos_is_chipset_io_coherent(void); + +#if defined(NVCPU_X86_64) +#define CACHE_FLUSH() asm volatile("wbinvd":::"memory") +#define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory") +#elif defined(NVCPU_AARCH64) + static inline void nv_flush_cache_cpu(void *info) + { + if (!nvos_is_chipset_io_coherent()) + { +#if defined(NV_FLUSH_CACHE_ALL_PRESENT) + flush_cache_all(); +#else + WARN_ONCE(0, "NVRM: kernel does not support flush_cache_all()\n"); +#endif + } + } +#define CACHE_FLUSH() nv_flush_cache_cpu(NULL) +#define CACHE_FLUSH_ALL() on_each_cpu(nv_flush_cache_cpu, NULL, 1) +#define WRITE_COMBINE_FLUSH() mb() +#elif defined(NVCPU_PPC64LE) +#define CACHE_FLUSH() asm volatile("sync; \n" \ + "isync; \n" ::: "memory") +#define WRITE_COMBINE_FLUSH() CACHE_FLUSH() +#endif + +typedef enum +{ + NV_MEMORY_TYPE_SYSTEM, /* Memory mapped for ROM, SBIOS and physical RAM. */ + NV_MEMORY_TYPE_REGISTERS, + NV_MEMORY_TYPE_FRAMEBUFFER, + NV_MEMORY_TYPE_DEVICE_MMIO, /* All kinds of MMIO referred by NVRM e.g. BARs and MCFG of device */ +} nv_memory_type_t; + +#if defined(NVCPU_AARCH64) || defined(NVCPU_PPC64LE) +#define NV_ALLOW_WRITE_COMBINING(mt) 1 +#elif defined(NVCPU_X86_64) +#if defined(NV_ENABLE_PAT_SUPPORT) +#define NV_ALLOW_WRITE_COMBINING(mt) \ + ((nv_pat_mode != NV_PAT_MODE_DISABLED) && \ + ((mt) != NV_MEMORY_TYPE_REGISTERS)) +#else +#define NV_ALLOW_WRITE_COMBINING(mt) 0 +#endif +#endif + +#if !defined(IRQF_SHARED) +#define IRQF_SHARED SA_SHIRQ +#endif + +#define NV_MAX_RECURRING_WARNING_MESSAGES 10 + +/* various memory tracking/debugging techniques + * disabled for retail builds, enabled for debug builds + */ + +// allow an easy way to convert all debug printfs related to memory +// management back and forth between 'info' and 'errors' +#if defined(NV_DBG_MEM) +#define NV_DBG_MEMINFO NV_DBG_ERRORS +#else +#define NV_DBG_MEMINFO NV_DBG_INFO +#endif + +#define NV_MEM_TRACKING_PAD_SIZE(size) \ + (size) = NV_ALIGN_UP((size + sizeof(void *)), sizeof(void *)) + +#define NV_MEM_TRACKING_HIDE_SIZE(ptr, size) \ + if ((ptr != NULL) && (*(ptr) != NULL)) \ + { \ + NvU8 *__ptr; \ + *(unsigned long *) *(ptr) = (size); \ + __ptr = *(ptr); __ptr += sizeof(void *); \ + *(ptr) = (void *) __ptr; \ + } +#define NV_MEM_TRACKING_RETRIEVE_SIZE(ptr, size) \ + { \ + NvU8 *__ptr = (ptr); __ptr -= sizeof(void *); \ + (ptr) = (void *) __ptr; \ + (size) = *(unsigned long *) (ptr); \ + } + +/* keep track of memory usage */ +#include "nv-memdbg.h" + +static inline void *nv_vmalloc(unsigned long size) +{ +#if defined(NV_VMALLOC_HAS_PGPROT_T_ARG) + void *ptr = __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); +#else + void *ptr = __vmalloc(size, GFP_KERNEL); +#endif + if (ptr) + NV_MEMDBG_ADD(ptr, size); + return ptr; +} + +static inline void nv_vfree(void *ptr, NvU32 size) +{ + NV_MEMDBG_REMOVE(ptr, size); + vfree(ptr); +} + +static inline void *nv_ioremap(NvU64 phys, NvU64 size) +{ + void *ptr = ioremap(phys, size); + if (ptr) + NV_MEMDBG_ADD(ptr, size); + return ptr; +} + +static inline void *nv_ioremap_nocache(NvU64 phys, NvU64 size) +{ + return nv_ioremap(phys, size); +} + +static inline void *nv_ioremap_cache(NvU64 phys, NvU64 size) +{ +#if defined(NV_IOREMAP_CACHE_PRESENT) + void *ptr = ioremap_cache(phys, size); + if (ptr) + NV_MEMDBG_ADD(ptr, size); + return ptr; +#elif defined(NVCPU_PPC64LE) + // + // ioremap_cache() has been only implemented correctly for ppc64le with + // commit f855b2f544d6 in April 2017 (kernel 4.12+). Internally, the kernel + // does provide a default implementation of ioremap_cache() that would be + // incorrect for our use (creating an uncached mapping) before the + // referenced commit, but that implementation is not exported and the + // NV_IOREMAP_CACHE_PRESENT conftest doesn't pick it up, and we end up in + // this #elif branch. + // + // At the same time, ppc64le have supported ioremap_prot() since May 2011 + // (commit 40f1ce7fb7e8, kernel 3.0+) and that covers all kernels we + // support on power. + // + void *ptr = ioremap_prot(phys, size, pgprot_val(PAGE_KERNEL)); + if (ptr) + NV_MEMDBG_ADD(ptr, size); + return ptr; +#else + return nv_ioremap(phys, size); +#endif +} + +static inline void *nv_ioremap_wc(NvU64 phys, NvU64 size) +{ +#if defined(NV_IOREMAP_WC_PRESENT) + void *ptr = ioremap_wc(phys, size); + if (ptr) + NV_MEMDBG_ADD(ptr, size); + return ptr; +#else + return nv_ioremap_nocache(phys, size); +#endif +} + +static inline void nv_iounmap(void *ptr, NvU64 size) +{ + NV_MEMDBG_REMOVE(ptr, size); + iounmap(ptr); +} + +static NvBool nv_numa_node_has_memory(int node_id) +{ + if (node_id < 0 || node_id >= MAX_NUMNODES) + return NV_FALSE; +#if defined(NV_NODE_STATES_N_MEMORY_PRESENT) + return node_state(node_id, N_MEMORY) ? NV_TRUE : NV_FALSE; +#else + return node_state(node_id, N_HIGH_MEMORY) ? NV_TRUE : NV_FALSE; +#endif +} + +#define NV_KMALLOC(ptr, size) \ + { \ + (ptr) = kmalloc(size, NV_GFP_KERNEL); \ + if (ptr) \ + NV_MEMDBG_ADD(ptr, size); \ + } + +#define NV_KMALLOC_ATOMIC(ptr, size) \ + { \ + (ptr) = kmalloc(size, NV_GFP_ATOMIC); \ + if (ptr) \ + NV_MEMDBG_ADD(ptr, size); \ + } + +#if defined(__GFP_RETRY_MAYFAIL) +#define NV_GFP_NO_OOM (NV_GFP_KERNEL | __GFP_RETRY_MAYFAIL) +#elif defined(__GFP_NORETRY) +#define NV_GFP_NO_OOM (NV_GFP_KERNEL | __GFP_NORETRY) +#else +#define NV_GFP_NO_OOM (NV_GFP_KERNEL) +#endif + +#define NV_KMALLOC_NO_OOM(ptr, size) \ + { \ + (ptr) = kmalloc(size, NV_GFP_NO_OOM); \ + if (ptr) \ + NV_MEMDBG_ADD(ptr, size); \ + } + +#define NV_KFREE(ptr, size) \ + { \ + NV_MEMDBG_REMOVE(ptr, size); \ + kfree((void *) (ptr)); \ + } + +#define NV_ALLOC_PAGES_NODE(ptr, nid, order, gfp_mask) \ + { \ + (ptr) = (unsigned long)page_address(alloc_pages_node(nid, gfp_mask, order)); \ + } + +#define NV_GET_FREE_PAGES(ptr, order, gfp_mask) \ + { \ + (ptr) = __get_free_pages(gfp_mask, order); \ + } + +#define NV_FREE_PAGES(ptr, order) \ + { \ + free_pages(ptr, order); \ + } + +#if defined(PAGE_KERNEL_NOENC) +#if defined(__pgprot_mask) +#define NV_PAGE_KERNEL_NOCACHE_NOENC __pgprot_mask(__PAGE_KERNEL_NOCACHE) +#elif defined(default_pgprot) +#define NV_PAGE_KERNEL_NOCACHE_NOENC default_pgprot(__PAGE_KERNEL_NOCACHE) +#elif defined( __pgprot) +#define NV_PAGE_KERNEL_NOCACHE_NOENC __pgprot(__PAGE_KERNEL_NOCACHE) +#else +#error "Unsupported kernel!!!" +#endif +#endif + +static inline NvUPtr nv_vmap(struct page **pages, NvU32 page_count, + NvBool cached, NvBool unencrypted) +{ + void *ptr; + pgprot_t prot = PAGE_KERNEL; +#if defined(NVCPU_X86_64) +#if defined(PAGE_KERNEL_NOENC) + if (unencrypted) + { + prot = cached ? PAGE_KERNEL_NOENC : NV_PAGE_KERNEL_NOCACHE_NOENC; + } + else +#endif + { + prot = cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE; + } +#elif defined(NVCPU_AARCH64) + prot = cached ? PAGE_KERNEL : NV_PGPROT_UNCACHED(PAGE_KERNEL); +#endif + /* All memory cached in PPC64LE; can't honor 'cached' input. */ + ptr = vmap(pages, page_count, VM_MAP, prot); + if (ptr) + NV_MEMDBG_ADD(ptr, page_count * PAGE_SIZE); + return (NvUPtr)ptr; +} + +static inline void nv_vunmap(NvUPtr vaddr, NvU32 page_count) +{ + vunmap((void *)vaddr); + NV_MEMDBG_REMOVE((void *)vaddr, page_count * PAGE_SIZE); +} + +#if defined(NV_GET_NUM_PHYSPAGES_PRESENT) +#define NV_NUM_PHYSPAGES get_num_physpages() +#else +#define NV_NUM_PHYSPAGES num_physpages +#endif +#define NV_GET_CURRENT_PROCESS() current->tgid +#define NV_IN_ATOMIC() in_atomic() +#define NV_LOCAL_BH_DISABLE() local_bh_disable() +#define NV_LOCAL_BH_ENABLE() local_bh_enable() +#define NV_COPY_TO_USER(to, from, n) copy_to_user(to, from, n) +#define NV_COPY_FROM_USER(to, from, n) copy_from_user(to, from, n) + +#define NV_IS_SUSER() capable(CAP_SYS_ADMIN) +#define NV_PCI_DEVICE_NAME(pci_dev) ((pci_dev)->pretty_name) +#define NV_CLI() local_irq_disable() +#define NV_SAVE_FLAGS(eflags) local_save_flags(eflags) +#define NV_RESTORE_FLAGS(eflags) local_irq_restore(eflags) +#define NV_MAY_SLEEP() (!irqs_disabled() && !in_interrupt() && !NV_IN_ATOMIC()) +#define NV_MODULE_PARAMETER(x) module_param(x, int, 0) +#define NV_MODULE_STRING_PARAMETER(x) module_param(x, charp, 0) +#undef MODULE_PARM + +#define NV_NUM_CPUS() num_possible_cpus() + +static inline dma_addr_t nv_phys_to_dma(struct device *dev, NvU64 pa) +{ +#if defined(NV_PHYS_TO_DMA_PRESENT) + return phys_to_dma(dev, pa); +#elif defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) + return phys_to_machine(pa); +#else + return (dma_addr_t)pa; +#endif +} + +#define NV_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) +#define NV_VMA_PGOFF(vma) ((vma)->vm_pgoff) +#define NV_VMA_SIZE(vma) ((vma)->vm_end - (vma)->vm_start) +#define NV_VMA_OFFSET(vma) (((NvU64)(vma)->vm_pgoff) << PAGE_SHIFT) +#define NV_VMA_PRIVATE(vma) ((vma)->vm_private_data) +#define NV_VMA_FILE(vma) ((vma)->vm_file) + +#define NV_DEVICE_MINOR_NUMBER(x) minor((x)->i_rdev) +#define NV_CONTROL_DEVICE_MINOR 255 + +#define NV_PCI_DISABLE_DEVICE(pci_dev) \ + { \ + NvU16 __cmd[2]; \ + pci_read_config_word((pci_dev), PCI_COMMAND, &__cmd[0]); \ + pci_disable_device(pci_dev); \ + pci_read_config_word((pci_dev), PCI_COMMAND, &__cmd[1]); \ + __cmd[1] |= PCI_COMMAND_MEMORY; \ + pci_write_config_word((pci_dev), PCI_COMMAND, \ + (__cmd[1] | (__cmd[0] & PCI_COMMAND_IO))); \ + } + +#define NV_PCI_RESOURCE_START(pci_dev, bar) pci_resource_start(pci_dev, (bar)) +#define NV_PCI_RESOURCE_SIZE(pci_dev, bar) pci_resource_len(pci_dev, (bar)) +#define NV_PCI_RESOURCE_FLAGS(pci_dev, bar) pci_resource_flags(pci_dev, (bar)) + +#define NV_PCI_RESOURCE_VALID(pci_dev, bar) \ + ((NV_PCI_RESOURCE_START(pci_dev, bar) != 0) && \ + (NV_PCI_RESOURCE_SIZE(pci_dev, bar) != 0)) + +#define NV_PCI_DOMAIN_NUMBER(pci_dev) (NvU32)pci_domain_nr(pci_dev->bus) +#define NV_PCI_BUS_NUMBER(pci_dev) (pci_dev)->bus->number +#define NV_PCI_DEVFN(pci_dev) (pci_dev)->devfn +#define NV_PCI_SLOT_NUMBER(pci_dev) PCI_SLOT(NV_PCI_DEVFN(pci_dev)) + +#if defined(CONFIG_X86_UV) && defined(NV_CONFIG_X86_UV) +#define NV_GET_DOMAIN_BUS_AND_SLOT(domain,bus,devfn) \ + ({ \ + struct pci_dev *__dev = NULL; \ + while ((__dev = pci_get_device(PCI_VENDOR_ID_NVIDIA, \ + PCI_ANY_ID, __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + if (__dev == NULL) \ + { \ + while ((__dev = pci_get_class((PCI_CLASS_BRIDGE_HOST << 8), \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + } \ + if (__dev == NULL) \ + { \ + while ((__dev = pci_get_class((PCI_CLASS_BRIDGE_PCI << 8), \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + } \ + if (__dev == NULL) \ + { \ + while ((__dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + } \ + __dev; \ + }) +#elif defined(NV_PCI_GET_DOMAIN_BUS_AND_SLOT_PRESENT) +#define NV_GET_DOMAIN_BUS_AND_SLOT(domain,bus, devfn) \ + pci_get_domain_bus_and_slot(domain, bus, devfn) +#else +#define NV_GET_DOMAIN_BUS_AND_SLOT(domain,bus,devfn) \ + ({ \ + struct pci_dev *__dev = NULL; \ + while ((__dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, \ + __dev)) != NULL) \ + { \ + if ((NV_PCI_DOMAIN_NUMBER(__dev) == domain) && \ + (NV_PCI_BUS_NUMBER(__dev) == bus) && \ + (NV_PCI_DEVFN(__dev) == devfn)) \ + { \ + break; \ + } \ + } \ + __dev; \ + }) +#endif + +#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE_PRESENT) // introduced in 3.4.9 +#define NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(pci_dev) pci_stop_and_remove_bus_device(pci_dev) +#elif defined(NV_PCI_REMOVE_BUS_DEVICE_PRESENT) // introduced in 2.6 +#define NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(pci_dev) pci_remove_bus_device(pci_dev) +#endif + +#define NV_PRINT_AT(nv_debug_level,at) \ + { \ + nv_printf(nv_debug_level, \ + "NVRM: VM: %s:%d: 0x%p, %d page(s), count = %d, flags = 0x%08x, " \ + "page_table = 0x%p\n", __FUNCTION__, __LINE__, at, \ + at->num_pages, NV_ATOMIC_READ(at->usage_count), \ + at->flags, at->page_table); \ + } + +#define NV_PRINT_VMA(nv_debug_level,vma) \ + { \ + nv_printf(nv_debug_level, \ + "NVRM: VM: %s:%d: 0x%lx - 0x%lx, 0x%08x bytes @ 0x%016llx, 0x%p, 0x%p\n", \ + __FUNCTION__, __LINE__, vma->vm_start, vma->vm_end, NV_VMA_SIZE(vma), \ + NV_VMA_OFFSET(vma), NV_VMA_PRIVATE(vma), NV_VMA_FILE(vma)); \ + } + +#ifndef minor +# define minor(x) MINOR(x) +#endif + +#if defined(cpu_relax) +#define NV_CPU_RELAX() cpu_relax() +#else +#define NV_CPU_RELAX() barrier() +#endif + +#ifndef IRQ_RETVAL +typedef void irqreturn_t; +#define IRQ_RETVAL(a) +#endif + +#if !defined(PCI_COMMAND_SERR) +#define PCI_COMMAND_SERR 0x100 +#endif +#if !defined(PCI_COMMAND_INTX_DISABLE) +#define PCI_COMMAND_INTX_DISABLE 0x400 +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +/* + * On Linux on PPC64LE enable basic support for Linux PCI error recovery (see + * Documentation/PCI/pci-error-recovery.txt). Currently RM only supports error + * notification and data collection, not actual recovery of the device. + */ +#if defined(NVCPU_PPC64LE) && defined(CONFIG_EEH) +#include +#define NV_PCI_ERROR_RECOVERY +#endif + +/* + * If the host OS has page sizes larger than 4KB, we may have a security + * problem. Registers are typically grouped in 4KB pages, but if there are + * larger pages, then the smallest userspace mapping possible (e.g., a page) + * may give more access than intended to the user. + */ +#define NV_4K_PAGE_ISOLATION_REQUIRED(addr, size) \ + ((PAGE_SIZE > NV_RM_PAGE_SIZE) && \ + ((size) <= NV_RM_PAGE_SIZE) && \ + (((addr) >> NV_RM_PAGE_SHIFT) == \ + (((addr) + (size) - 1) >> NV_RM_PAGE_SHIFT))) + +/* + * The kernel may have a workaround for this, by providing a method to isolate + * a single 4K page in a given mapping. + */ +#if (PAGE_SIZE > NV_RM_PAGE_SIZE) && defined(NVCPU_PPC64LE) && defined(NV_PAGE_4K_PFN) + #define NV_4K_PAGE_ISOLATION_PRESENT + #define NV_4K_PAGE_ISOLATION_MMAP_ADDR(addr) \ + ((NvP64)((void*)(((addr) >> NV_RM_PAGE_SHIFT) << PAGE_SHIFT))) + #define NV_4K_PAGE_ISOLATION_MMAP_LEN(size) PAGE_SIZE + #define NV_4K_PAGE_ISOLATION_ACCESS_START(addr) \ + ((NvP64)((void*)((addr) & ~NV_RM_PAGE_MASK))) + #define NV_4K_PAGE_ISOLATION_ACCESS_LEN(addr, size) \ + ((((addr) & NV_RM_PAGE_MASK) + size + NV_RM_PAGE_MASK) & \ + ~NV_RM_PAGE_MASK) + #define NV_PROT_4K_PAGE_ISOLATION NV_PAGE_4K_PFN +#endif + +static inline int nv_remap_page_range(struct vm_area_struct *vma, + unsigned long virt_addr, NvU64 phys_addr, NvU64 size, pgprot_t prot) +{ + int ret = -1; + +#if defined(NV_4K_PAGE_ISOLATION_PRESENT) && defined(NV_PROT_4K_PAGE_ISOLATION) + if ((size == PAGE_SIZE) && + ((pgprot_val(prot) & NV_PROT_4K_PAGE_ISOLATION) != 0)) + { + /* + * remap_4k_pfn() hardcodes the length to a single OS page, and checks + * whether applying the page isolation workaround will cause PTE + * corruption (in which case it will fail, and this is an unsupported + * configuration). + */ +#if defined(NV_HASH__REMAP_4K_PFN_PRESENT) + ret = hash__remap_4k_pfn(vma, virt_addr, (phys_addr >> PAGE_SHIFT), prot); +#else + ret = remap_4k_pfn(vma, virt_addr, (phys_addr >> PAGE_SHIFT), prot); +#endif + } + else +#endif + { + ret = remap_pfn_range(vma, virt_addr, (phys_addr >> PAGE_SHIFT), size, + prot); + } + + return ret; +} + +static inline pgprot_t nv_adjust_pgprot(pgprot_t vm_prot, NvU32 extra) +{ + pgprot_t prot = __pgprot(pgprot_val(vm_prot) | extra); +#if defined(CONFIG_AMD_MEM_ENCRYPT) && defined(NV_PGPROT_DECRYPTED_PRESENT) + /* + * When AMD memory encryption is enabled, device memory mappings with the + * C-bit set read as 0xFF, so ensure the bit is cleared for user mappings. + * + * If cc_mkdec() is present, then pgprot_decrypted() can't be used. + */ +#if defined(NV_CC_MKDEC_PRESENT) + prot = __pgprot(__sme_clr(pgprot_val(vm_prot))); +#else + prot = pgprot_decrypted(prot); +#endif +#endif + + return prot; +} + +static inline int nv_io_remap_page_range(struct vm_area_struct *vma, + NvU64 phys_addr, NvU64 size, NvU32 extra_prot) +{ + int ret = -1; +#if !defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) + ret = nv_remap_page_range(vma, vma->vm_start, phys_addr, size, + nv_adjust_pgprot(vma->vm_page_prot, extra_prot)); +#else + ret = io_remap_pfn_range(vma, vma->vm_start, (phys_addr >> PAGE_SHIFT), + size, nv_adjust_pgprot(vma->vm_page_prot, extra_prot)); +#endif + return ret; +} + +static inline vm_fault_t nv_insert_pfn(struct vm_area_struct *vma, + NvU64 virt_addr, NvU64 pfn, NvU32 extra_prot) +{ + /* + * vm_insert_pfn{,_prot} replaced with vmf_insert_pfn{,_prot} in Linux 4.20 + */ +#if defined(NV_VMF_INSERT_PFN_PROT_PRESENT) + return vmf_insert_pfn_prot(vma, virt_addr, pfn, + __pgprot(pgprot_val(vma->vm_page_prot) | extra_prot)); +#else + int ret = -EINVAL; + /* + * Only PPC64LE (NV_4K_PAGE_ISOLATION_PRESENT) requires extra_prot to be + * used when remapping. + * + * vm_insert_pfn_prot() was added in Linux 4.4, whereas POWER9 support + * was added in Linux 4.8. + * + * Rather than tampering with the vma to make use of extra_prot with + * vm_insert_pfn() on older kernels, for now, just fail in this case, as + * it's not expected to be used currently. + */ +#if defined(NV_VM_INSERT_PFN_PROT_PRESENT) + ret = vm_insert_pfn_prot(vma, virt_addr, pfn, + __pgprot(pgprot_val(vma->vm_page_prot) | extra_prot)); +#elif !defined(NV_4K_PAGE_ISOLATION_PRESENT) + ret = vm_insert_pfn(vma, virt_addr, pfn); +#endif + switch (ret) + { + case 0: + case -EBUSY: + /* + * EBUSY indicates that another thread already handled + * the faulted range. + */ + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + default: + break; + } +#endif /* defined(NV_VMF_INSERT_PFN_PROT_PRESENT) */ + return VM_FAULT_SIGBUS; +} + + +#define NV_PAGE_MASK (NvU64)(long)PAGE_MASK + +extern void *nvidia_stack_t_cache; + +/* + * On Linux, when a kmem cache is created, a new sysfs entry is created for the + * same unless it's merged with an existing cache. Upstream Linux kernel commit + * 3b7b314053d021601940c50b07f5f1423ae67e21 (version 4.12+) made cache + * destruction asynchronous which creates a race between cache destroy and + * create. A new cache created with attributes as a previous cache, which is + * scheduled for destruction, can try to create a sysfs entry with the same + * conflicting name. Upstream Linux kernel commit + * d50d82faa0c964e31f7a946ba8aba7c715ca7ab0 (4.18) fixes this issue by cleaning + * up sysfs entry within slab_mutex, so the entry is deleted before a cache with + * the same attributes could be created. + * + * To workaround this kernel issue, we take two steps: + * - Create unmergeable caches: a kmem_cache with a constructor is unmergeable. + * So, we define an empty contructor for the same. Creating an unmergeable + * cache ensures that the kernel doesn't generate an internal name and always + * uses our name instead. + * + * - Generate a unique cache name by appending the current timestamp (ns). We + * wait for the timestamp to increment by at least one to ensure that we do + * not hit a name conflict in cache create -> destroy (async) -> create cycle. + */ +#if defined(NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK) && !defined(NV_SYSFS_SLAB_UNLINK_PRESENT) +static inline void nv_kmem_ctor_dummy(void *arg) +{ + (void)arg; +} +#else +#define nv_kmem_ctor_dummy NULL +#endif + +#define NV_KMEM_CACHE_CREATE(name, type) \ + nv_kmem_cache_create(name, sizeof(type), 0) + +/* The NULL pointer check is required for kernels older than 4.3 */ +#define NV_KMEM_CACHE_DESTROY(kmem_cache) \ + if (kmem_cache != NULL) \ + { \ + kmem_cache_destroy(kmem_cache); \ + } + +#define NV_KMEM_CACHE_ALLOC(kmem_cache) \ + kmem_cache_alloc(kmem_cache, GFP_KERNEL) +#define NV_KMEM_CACHE_FREE(ptr, kmem_cache) \ + kmem_cache_free(kmem_cache, ptr) + +static inline void *nv_kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) +{ +#if defined(NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK) && !defined(NV_SYSFS_SLAB_UNLINK_PRESENT) + /* + * We cannot call kmem_cache_zalloc directly as it adds the __GFP_ZERO + * flag. This flag together with the presence of a slab constructor is + * flagged as a potential bug by the Linux kernel since it is the role + * of a constructor to fill an allocated object with the desired + * pattern. In our case, we specified a (dummy) constructor as a + * workaround for a bug and not to zero-initialize objects. So, we take + * the pain here to memset allocated object ourselves. + */ + void *object = kmem_cache_alloc(k, flags); + if (object) + memset(object, 0, kmem_cache_size(k)); + return object; +#else + return kmem_cache_zalloc(k, flags); +#endif +} + +static inline int nv_kmem_cache_alloc_stack(nvidia_stack_t **stack) +{ + nvidia_stack_t *sp = NULL; +#if defined(NVCPU_X86_64) + sp = NV_KMEM_CACHE_ALLOC(nvidia_stack_t_cache); + if (sp == NULL) + return -ENOMEM; + sp->size = sizeof(sp->stack); + sp->top = sp->stack + sp->size; +#endif + *stack = sp; + return 0; +} + +static inline void nv_kmem_cache_free_stack(nvidia_stack_t *stack) +{ +#if defined(NVCPU_X86_64) + if (stack != NULL) + { + NV_KMEM_CACHE_FREE(stack, nvidia_stack_t_cache); + } +#endif +} + +#if defined(NVCPU_X86_64) +/* + * RAM is cached on Linux by default, we can assume there's + * nothing to be done here. This is not the case for the + * other memory spaces: we will have made an attempt to add + * a WC MTRR for the frame buffer. + * + * If a WC MTRR is present, we can't satisfy the WB mapping + * attempt here, since the achievable effective memory + * types in that case are WC and UC, if not it's typically + * UC (MTRRdefType is UC); we could only satisfy WB mapping + * requests with a WB MTRR. + */ +#define NV_ALLOW_CACHING(mt) ((mt) == NV_MEMORY_TYPE_SYSTEM) +#else +#define NV_ALLOW_CACHING(mt) ((mt) != NV_MEMORY_TYPE_REGISTERS) +#endif + +typedef struct nvidia_pte_s { + NvU64 phys_addr; + unsigned long virt_addr; + NvU64 dma_addr; +#ifdef CONFIG_XEN + unsigned int guest_pfn; +#endif + unsigned int page_count; +} nvidia_pte_t; + + + + + + + + + + + +typedef struct nv_alloc_s { + struct nv_alloc_s *next; + struct device *dev; + atomic_t usage_count; + struct { + NvBool contig : 1; + NvBool guest : 1; + NvBool zeroed : 1; + NvBool aliased : 1; + NvBool user : 1; + NvBool node0 : 1; + NvBool peer_io : 1; + NvBool physical : 1; + NvBool unencrypted : 1; + NvBool coherent : 1; + } flags; + unsigned int cache_type; + unsigned int num_pages; + unsigned int order; + unsigned int size; + nvidia_pte_t **page_table; /* list of physical pages allocated */ + unsigned int pid; + struct page **user_pages; + NvU64 guest_id; /* id of guest VM */ + void *import_priv; + struct sg_table *import_sgt; +} nv_alloc_t; + +/** + * nv_is_dma_direct - return true if direct_dma is enabled + * + * Starting with the 5.0 kernel, SWIOTLB is merged into + * direct_dma, so systems without an IOMMU use direct_dma. We + * need to know if this is the case, so that we can use a + * different check for SWIOTLB enablement. + */ +static inline NvBool nv_is_dma_direct(struct device *dev) +{ + NvBool is_direct = NV_FALSE; + +#if defined(NV_DMA_IS_DIRECT_PRESENT) + if (dma_is_direct(get_dma_ops(dev))) + is_direct = NV_TRUE; +#endif + + return is_direct; +} + +/** + * nv_dma_maps_swiotlb - return NV_TRUE if swiotlb is enabled + * + * SWIOTLB creates bounce buffers for the DMA mapping layer to + * use if a driver asks the kernel to map a DMA buffer that is + * outside of the device's addressable range. The driver does + * not function correctly if bounce buffers are enabled for the + * device. So if SWIOTLB is enabled, we should avoid making + * mapping calls. + */ +static inline NvBool +nv_dma_maps_swiotlb(struct device *dev) +{ + NvBool swiotlb_in_use = NV_FALSE; +#if defined(CONFIG_SWIOTLB) + #if defined(NV_DMA_OPS_PRESENT) || defined(NV_GET_DMA_OPS_PRESENT) || \ + defined(NV_SWIOTLB_DMA_OPS_PRESENT) + /* + * We only use the 'dma_ops' symbol on older x86_64 kernels; later kernels, + * including those for other architectures, have converged on the + * get_dma_ops() interface. + */ + #if defined(NV_GET_DMA_OPS_PRESENT) + /* + * The __attribute__ ((unused)) is necessary because in at least one + * case, *none* of the preprocessor branches below are taken, and + * so the ops variable ends up never being referred to at all. This can + * happen with the (NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs == 1) + * case. + */ + const struct dma_map_ops *ops __attribute__ ((unused)) = get_dma_ops(dev); + #else + const struct dma_mapping_ops *ops __attribute__ ((unused)) = dma_ops; + #endif + + /* + * The switch from dma_mapping_ops -> dma_map_ops coincided with the + * switch from swiotlb_map_sg -> swiotlb_map_sg_attrs. + */ + #if defined(NVCPU_AARCH64) && \ + defined(NV_NONCOHERENT_SWIOTLB_DMA_OPS_PRESENT) + /* AArch64 exports these symbols directly */ + swiotlb_in_use = ((ops == &noncoherent_swiotlb_dma_ops) || + (ops == &coherent_swiotlb_dma_ops)); + #elif NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs != 0 + swiotlb_in_use = (ops->map_sg == swiotlb_map_sg_attrs); + #elif NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops != 0 + swiotlb_in_use = (ops == &swiotlb_dma_ops); + #endif + /* + * The "else" case that is not shown + * (for NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs == 0 || + * NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops == 0) does + * nothing, and ends up dropping us out to the last line of this function, + * effectively returning false. The nearly-human-readable version of that + * case is "struct swiotlb_dma_ops is present (NV_SWIOTLB_DMA_OPS_PRESENT + * is defined) but neither swiotlb_map_sg_attrs nor swiotlb_dma_ops is + * present". + * + * That can happen on kernels that fall within below range: + * + * 2017-12-24 4bd89ed39b2ab8dc4ac4b6c59b07d420b0213bec + * ("swiotlb: remove various exports") + * 2018-06-28 210d0797c97d0e8f3b1a932a0dc143f4c57008a3 + * ("swiotlb: export swiotlb_dma_ops") + * + * Related to this: Between above two commits, this driver has no way of + * detecting whether or not the SWIOTLB is in use. Furthermore, the + * driver cannot support DMA remapping. That leads to the following + * point: "swiotlb=force" is not supported for kernels falling in above + * range. + * + * The other "else" case that is not shown: + * Starting with the 5.0 kernel, swiotlb is integrated into dma_direct, + * which is used when there's no IOMMU. In these kernels, ops == NULL, + * swiotlb_dma_ops no longer exists, and we do not support swiotlb=force + * (doing so would require detecting when swiotlb=force is enabled and + * then returning NV_TRUE even when dma_direct is in use). So for now, + * we just return NV_FALSE and in nv_compute_gfp_mask() we check for + * whether swiotlb could possibly be used (outside of swiotlb=force). + */ + #endif + + /* + * Commit 2017-11-07 d7b417fa08d ("x86/mm: Add DMA support for + * SEV memory encryption") forces SWIOTLB to be enabled when AMD SEV + * is active in all cases. + */ + if (os_sev_enabled) + swiotlb_in_use = NV_TRUE; +#endif + + return swiotlb_in_use; +} + +/* + * TODO: Bug 1522381 will allow us to move these mapping relationships into + * common code. + */ + +/* + * Bug 1606851: the Linux kernel scatterlist code doesn't work for regions + * greater than or equal to 4GB, due to regular use of unsigned int + * throughout. So we need to split our mappings into 4GB-minus-1-page-or-less + * chunks and manage them separately. + */ +typedef struct nv_dma_submap_s { + NvU32 page_count; + NvU32 sg_map_count; + struct sg_table sgt; + NvBool imported; +} nv_dma_submap_t; + +typedef struct nv_dma_map_s { + struct page **pages; + NvU64 page_count; + NvBool contiguous; + NvU32 cache_type; + struct sg_table *import_sgt; + + union + { + struct + { + NvU32 submap_count; + nv_dma_submap_t *submaps; + } discontig; + + struct + { + NvU64 dma_addr; + } contig; + } mapping; + + struct device *dev; +} nv_dma_map_t; + +#define NV_FOR_EACH_DMA_SUBMAP(dm, sm, i) \ + for (i = 0, sm = &dm->mapping.discontig.submaps[0]; \ + i < dm->mapping.discontig.submap_count; \ + i++, sm = &dm->mapping.discontig.submaps[i]) + +#define NV_DMA_SUBMAP_MAX_PAGES ((NvU32)(NV_U32_MAX >> PAGE_SHIFT)) +#define NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(s) (s * NV_DMA_SUBMAP_MAX_PAGES) + +/* + * DO NOT use sg_alloc_table_from_pages on Xen Server, even if it's available. + * This will glom multiple pages into a single sg element, which + * xen_swiotlb_map_sg_attrs may try to route to the SWIOTLB. We must only use + * single-page sg elements on Xen Server. + */ +#if defined(NV_SG_ALLOC_TABLE_FROM_PAGES_PRESENT) && \ + !defined(NV_DOM0_KERNEL_PRESENT) + #define NV_ALLOC_DMA_SUBMAP_SCATTERLIST(dm, sm, i) \ + ((sg_alloc_table_from_pages(&sm->sgt, \ + &dm->pages[NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(i)], \ + sm->page_count, 0, \ + sm->page_count * PAGE_SIZE, NV_GFP_KERNEL) == 0) ? NV_OK : \ + NV_ERR_OPERATING_SYSTEM) +#else + #define NV_ALLOC_DMA_SUBMAP_SCATTERLIST(dm, sm, i) \ + ((sg_alloc_table(&sm->sgt, sm->page_count, NV_GFP_KERNEL)) == \ + 0 ? NV_OK : NV_ERR_OPERATING_SYSTEM) +#endif + +typedef struct nv_ibmnpu_info nv_ibmnpu_info_t; + +typedef struct nv_work_s { + struct work_struct task; + void *data; +} nv_work_t; + +#define NV_MAX_REGISTRY_KEYS_LENGTH 512 + +typedef enum +{ + NV_DEV_STACK_TIMER, + NV_DEV_STACK_ISR, + NV_DEV_STACK_ISR_BH, + NV_DEV_STACK_ISR_BH_UNLOCKED, + NV_DEV_STACK_GPU_WAKEUP, + NV_DEV_STACK_COUNT +} nvidia_linux_dev_stack_t; + +/* Linux version of the opaque type used for os_queue_work_item() */ +struct os_work_queue { + nv_kthread_q_t nvk; +}; + +/* Linux version of the opaque type used for os_wait_*() */ +struct os_wait_queue { + struct completion q; +}; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +/* + * To report error in msi/msix when unhandled count reaches a threshold + */ + +typedef struct nv_irq_count_info_s +{ + int irq; + NvU64 unhandled; + NvU64 total; + NvU64 last_unhandled; +} nv_irq_count_info_t; + +/* Linux-specific version of nv_dma_device_t */ +struct nv_dma_device { + struct { + NvU64 start; + NvU64 limit; + } addressable_range; + + struct device *dev; + NvBool nvlink; +}; + + + + + + + + + + + + + + +/* linux-specific version of old nv_state_t */ +/* this is a general os-specific state structure. the first element *must* be + the general state structure, for the generic unix-based code */ +typedef struct nv_linux_state_s { + nv_state_t nv_state; + + atomic_t usage_count; + NvU32 suspend_count; + + struct device *dev; + struct pci_dev *pci_dev; + + /* IBM-NPU info associated with this GPU */ + nv_ibmnpu_info_t *npu; + + + + + + + /* NUMA node information for the platforms where GPU memory is presented + * as a NUMA node to the kernel */ + struct { + /* NUMA node id >=0 when the platform supports GPU memory as NUMA node + * otherwise it holds the value of NUMA_NO_NODE */ + NvS32 node_id; + + /* NUMA online/offline status for platforms that support GPU memory as + * NUMA node */ + atomic_t status; + } numa_info; + + nvidia_stack_t *sp[NV_DEV_STACK_COUNT]; + + char registry_keys[NV_MAX_REGISTRY_KEYS_LENGTH]; + + nv_work_t work; + + /* get a timer callback every second */ + struct nv_timer rc_timer; + + /* lock for linux-specific data, not used by core rm */ + struct semaphore ldata_lock; + + /* proc directory information */ + struct proc_dir_entry *proc_dir; + + NvU32 minor_num; + struct nv_linux_state_s *next; + + /* DRM private information */ + struct drm_device *drm; + + /* kthread based bottom half servicing queue and elements */ + nv_kthread_q_t bottom_half_q; + nv_kthread_q_item_t bottom_half_q_item; + + /* Lock for unlocked bottom half protecting common allocated stack */ + void *isr_bh_unlocked_mutex; + + NvBool tce_bypass_enabled; + + NvU32 num_intr; + + /* Lock serializing ISRs for different MSI-X vectors */ + nv_spinlock_t msix_isr_lock; + + /* Lock serializing bottom halves for different MSI-X vectors */ + void *msix_bh_mutex; + + struct msix_entry *msix_entries; + + NvU64 numa_memblock_size; + + struct { + struct backlight_device *dev; + NvU32 displayId; + const char *device_name; + } backlight; + + /* + * file handle for pci sysfs config file (/sys/bus/pci/devices/.../config) + * which will be opened during device probe + */ + struct file *sysfs_config_file; + + /* Per-GPU queue */ + struct os_work_queue queue; + + /* GPU user mapping revocation/remapping (only for non-CTL device) */ + struct semaphore mmap_lock; /* Protects all fields in this category */ + struct list_head open_files; + NvBool all_mappings_revoked; + NvBool safe_to_mmap; + NvBool gpu_wakeup_callback_needed; + + /* Per-device notifier block for ACPI events */ + struct notifier_block acpi_nb; + + + + + + + + + + + + + + + + + + + /* Lock serializing ISRs for different SOC vectors */ + nv_spinlock_t soc_isr_lock; + + struct nv_timer snapshot_timer; + nv_spinlock_t snapshot_timer_lock; + void (*snapshot_callback)(void *context); + + /* count for unhandled, total and timestamp of irq */ + nv_irq_count_info_t *irq_count; + + /* Max number of irq triggered and are getting tracked */ + NvU16 current_num_irq_tracked; + + NvBool is_forced_shutdown; + + struct nv_dma_device dma_dev; + struct nv_dma_device niso_dma_dev; +} nv_linux_state_t; + +extern nv_linux_state_t *nv_linux_devices; + +/* + * Macros to protect operations on nv_linux_devices list + * Lock acquisition order while using the nv_linux_devices list + * 1. LOCK_NV_LINUX_DEVICES() + * 2. Traverse the list + * If the list is traversed to search for an element say nvl, + * acquire the nvl->ldata_lock before step 3 + * 3. UNLOCK_NV_LINUX_DEVICES() + * 4. Release nvl->ldata_lock after any read/write access to the + * nvl element is complete + */ +extern struct semaphore nv_linux_devices_lock; +#define LOCK_NV_LINUX_DEVICES() down(&nv_linux_devices_lock) +#define UNLOCK_NV_LINUX_DEVICES() up(&nv_linux_devices_lock) + +/* + * Lock to synchronize system power management transitions, + * and to protect the global system PM state. The procfs power + * management interface acquires this lock in write mode for + * the duration of the sleep operation, any other paths accessing + * device state must acquire the lock in read mode. + */ +extern struct rw_semaphore nv_system_pm_lock; + +extern NvBool nv_ats_supported; + +#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED) +/* + * acpi data storage structure + * + * This structure retains the pointer to the device, + * and any other baggage we want to carry along + * + */ +#define NV_MAXNUM_DISPLAY_DEVICES 8 + +typedef struct +{ + acpi_handle dev_handle; + int dev_id; +} nv_video_t; + +typedef struct +{ + nvidia_stack_t *sp; + struct acpi_device *device; + + nv_video_t pNvVideo[NV_MAXNUM_DISPLAY_DEVICES]; + + int notify_handler_installed; + int default_display_mask; +} nv_acpi_t; + +#endif + +/* + * file-private data + * hide a pointer to our data structures in a file-private ptr + * there are times we need to grab this data back from the file + * data structure.. + */ + +typedef struct nvidia_event +{ + struct nvidia_event *next; + nv_event_t event; +} nvidia_event_t; + +typedef enum +{ + NV_FOPS_STACK_INDEX_MMAP, + NV_FOPS_STACK_INDEX_IOCTL, + NV_FOPS_STACK_INDEX_COUNT +} nvidia_entry_point_index_t; + +typedef struct +{ + nv_file_private_t nvfp; + + nvidia_stack_t *sp; + nvidia_stack_t *fops_sp[NV_FOPS_STACK_INDEX_COUNT]; + struct semaphore fops_sp_lock[NV_FOPS_STACK_INDEX_COUNT]; + nv_alloc_t *free_list; + void *nvptr; + nvidia_event_t *event_data_head, *event_data_tail; + NvBool dataless_event_pending; + nv_spinlock_t fp_lock; + wait_queue_head_t waitqueue; + nv_kthread_q_item_t deferred_close_q_item; + NvU32 *attached_gpus; + size_t num_attached_gpus; + nv_alloc_mapping_context_t mmap_context; + struct address_space mapping; + + struct list_head entry; +} nv_linux_file_private_t; + +static inline nv_linux_file_private_t *nv_get_nvlfp_from_nvfp(nv_file_private_t *nvfp) +{ + return container_of(nvfp, nv_linux_file_private_t, nvfp); +} + +#define NV_SET_FILE_PRIVATE(filep,data) ((filep)->private_data = (data)) +#define NV_GET_LINUX_FILE_PRIVATE(filep) ((nv_linux_file_private_t *)(filep)->private_data) + +/* for the card devices */ +#define NV_GET_NVL_FROM_FILEP(filep) (NV_GET_LINUX_FILE_PRIVATE(filep)->nvptr) +#define NV_GET_NVL_FROM_NV_STATE(nv) ((nv_linux_state_t *)nv->os_state) + +#define NV_STATE_PTR(nvl) &(((nv_linux_state_t *)(nvl))->nv_state) + + +#define NV_ATOMIC_READ(data) atomic_read(&(data)) +#define NV_ATOMIC_SET(data,val) atomic_set(&(data), (val)) +#define NV_ATOMIC_INC(data) atomic_inc(&(data)) +#define NV_ATOMIC_DEC(data) atomic_dec(&(data)) +#define NV_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) + +static inline struct kmem_cache *nv_kmem_cache_create(const char *name, unsigned int size, + unsigned int align) +{ + char *name_unique; + struct kmem_cache *cache; + +#if defined(NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK) && !defined(NV_SYSFS_SLAB_UNLINK_PRESENT) + size_t len; + NvU64 tm_ns = nv_ktime_get_raw_ns(); + + /* + * Wait for timer to change at least once. This ensures + * that the name generated below is always unique. + */ + while (tm_ns == nv_ktime_get_raw_ns()); + tm_ns = nv_ktime_get_raw_ns(); + + /* 20 is the max length of a 64-bit integer printed in decimal */ + len = strlen(name) + 20 + 1; + name_unique = kzalloc(len, GFP_KERNEL); + if (!name_unique) + return NULL; + + if (snprintf(name_unique, len, "%s-%llu", name, tm_ns) >= len) + { + WARN(1, "kmem cache name too long: %s\n", name); + kfree(name_unique); + return NULL; + } +#else + name_unique = (char *)name; +#endif + cache = kmem_cache_create(name_unique, size, align, 0, nv_kmem_ctor_dummy); + if (name_unique != name) + kfree(name_unique); + + return cache; +} + + +#if defined(CONFIG_PCI_IOV) +#define NV_PCI_SRIOV_SUPPORT +#endif /* CONFIG_PCI_IOV */ + + +#define NV_PCIE_CFG_MAX_OFFSET 0x1000 + +#include "nv-proto.h" + +/* + * Check if GPU is present on the bus by checking flag + * NV_FLAG_IN_SURPRISE_REMOVAL(set when eGPU is removed from TB3). + */ +static inline NV_STATUS nv_check_gpu_state(nv_state_t *nv) +{ +#if !defined(NVCPU_PPC64LE) + if (NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)) + { + return NV_ERR_GPU_IS_LOST; + } +#endif + + return NV_OK; +} + +extern NvU32 NVreg_EnableUserNUMAManagement; +extern NvU32 NVreg_RegisterPCIDriver; + +extern NvU32 num_probed_nv_devices; +extern NvU32 num_nv_devices; + +#define NV_FILE_INODE(file) (file)->f_inode + +#if defined(NV_DOM0_KERNEL_PRESENT) || defined(NV_VGPU_KVM_BUILD) +#define NV_VGX_HYPER +#if defined(NV_XEN_IOEMU_INJECT_MSI) +#include +#endif +#endif + +static inline NvU64 nv_pci_bus_address(struct pci_dev *dev, NvU8 bar_index) +{ + NvU64 bus_addr = 0; +#if defined(NV_PCI_BUS_ADDRESS_PRESENT) + bus_addr = pci_bus_address(dev, bar_index); +#elif defined(CONFIG_PCI) + struct pci_bus_region region; + + pcibios_resource_to_bus(dev, ®ion, &dev->resource[bar_index]); + bus_addr = region.start; +#endif + return bus_addr; +} + +/* + * Decrements the usage count of the allocation, and moves the allocation to + * the given nvlfp's free list if the usage count drops to zero. + * + * Returns NV_TRUE if the allocation is moved to the nvlfp's free list. + */ +static inline NvBool nv_alloc_release(nv_linux_file_private_t *nvlfp, nv_alloc_t *at) +{ + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + if (NV_ATOMIC_DEC_AND_TEST(at->usage_count)) + { + NV_ATOMIC_INC(at->usage_count); + + at->next = nvlfp->free_list; + nvlfp->free_list = at; + return NV_TRUE; + } + + return NV_FALSE; +} + +/* + * RB_EMPTY_ROOT was added in 2.6.18 by this commit: + * 2006-06-21 dd67d051529387f6e44d22d1d5540ef281965fdd + */ +#if !defined(RB_EMPTY_ROOT) +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) +#endif + +/* + * Starting on Power9 systems, DMA addresses for NVLink are no longer + * the same as used over PCIe. + * + * Power9 supports a 56-bit Real Address. This address range is compressed + * when accessed over NVLink to allow the GPU to access all of memory using + * its 47-bit Physical address. + * + * If there is an NPU device present on the system, it implies that NVLink + * sysmem links are present and we need to apply the required address + * conversion for NVLink within the driver. + * + * See Bug 1920398 for further background and details. + * + * Note, a deviation from the documented compression scheme is that the + * upper address bits (i.e. bit 56-63) instead of being set to zero are + * preserved during NVLink address compression so the orignal PCIe DMA + * address can be reconstructed on expansion. These bits can be safely + * ignored on NVLink since they are truncated by the GPU. + * + * Bug 1968345: As a performance enhancement it is the responsibility of + * the caller on PowerPC platforms to check for presence of an NPU device + * before the address transformation is applied. + */ +static inline NvU64 nv_compress_nvlink_addr(NvU64 addr) +{ + NvU64 addr47 = addr; + +#if defined(NVCPU_PPC64LE) + addr47 = addr & ((1ULL << 43) - 1); + addr47 |= (addr & (0x3ULL << 45)) >> 2; + WARN_ON(addr47 & (1ULL << 44)); + addr47 |= (addr & (0x3ULL << 49)) >> 4; + addr47 |= addr & ~((1ULL << 56) - 1); +#endif + + return addr47; +} + +static inline NvU64 nv_expand_nvlink_addr(NvU64 addr47) +{ + NvU64 addr = addr47; + +#if defined(NVCPU_PPC64LE) + addr = addr47 & ((1ULL << 43) - 1); + addr |= (addr47 & (3ULL << 43)) << 2; + addr |= (addr47 & (3ULL << 45)) << 4; + addr |= addr47 & ~((1ULL << 56) - 1); +#endif + + return addr; +} + +// Default flags for ISRs +static inline NvU32 nv_default_irq_flags(nv_state_t *nv) +{ + NvU32 flags = 0; + + /* + * Request IRQs to be disabled in our ISRs to keep consistency across the + * supported kernel versions. + * + * IRQF_DISABLED has been made the default in 2.6.35 with commit e58aa3d2d0cc + * from March 2010. And it has been later completely removed in 4.1 with commit + * d8bf368d0631 from March 2015. Add it to our flags if it's defined to get the + * same behaviour on pre-2.6.35 kernels as on recent ones. + */ +#if defined(IRQF_DISABLED) + flags |= IRQF_DISABLED; +#endif + + /* + * For legacy interrupts, also allow sharing. Sharing doesn't make sense + * for MSI(-X) as on Linux they are never shared across different devices + * and we only register one ISR today. + */ + if ((nv->flags & (NV_FLAG_USES_MSI | NV_FLAG_USES_MSIX)) == 0) + flags |= IRQF_SHARED; + + return flags; +} + +/* + * From v3.7-rc1 kernel have stopped exporting get_unused_fd() and started + * exporting get_unused_fd_flags(), as of this commit: + * 2012-09-26 1a7bd2265fc ("make get_unused_fd_flags() a function") + */ +#if NV_IS_EXPORT_SYMBOL_PRESENT_get_unused_fd + #define NV_GET_UNUSED_FD() get_unused_fd() +#else + #define NV_GET_UNUSED_FD() get_unused_fd_flags(0) +#endif + +#if NV_IS_EXPORT_SYMBOL_PRESENT_get_unused_fd_flags + #define NV_GET_UNUSED_FD_FLAGS(flags) get_unused_fd_flags(flags) +#else + #define NV_GET_UNUSED_FD_FLAGS(flags) (-1) +#endif + +#if defined(NV_SET_CLOSE_ON_EXEC_PRESENT) + #define NV_SET_CLOSE_ON_EXEC(fd, fdt) __set_close_on_exec(fd, fdt) +#elif defined(NV_LINUX_TIME_H_PRESENT) && defined(FD_SET) + #define NV_SET_CLOSE_ON_EXEC(fd, fdt) FD_SET(fd, fdt->close_on_exec) +#else + #define NV_SET_CLOSE_ON_EXEC(fd, fdt) __set_bit(fd, fdt->close_on_exec) +#endif + +#define MODULE_BASE_NAME "nvidia" +#define MODULE_INSTANCE_NUMBER 0 +#define MODULE_INSTANCE_STRING "" +#define MODULE_NAME MODULE_BASE_NAME MODULE_INSTANCE_STRING + +NvS32 nv_request_soc_irq(nv_linux_state_t *, NvU32, nv_soc_irq_type_t, NvU32, NvU32); + + + + + + +static inline void nv_mutex_destroy(struct mutex *lock) +{ + mutex_destroy(lock); +} + +static inline NvBool nv_platform_supports_numa(nv_linux_state_t *nvl) +{ + return nvl->numa_info.node_id != NUMA_NO_NODE; +} + +static inline int nv_get_numa_status(nv_linux_state_t *nvl) +{ + if (!nv_platform_supports_numa(nvl)) + { + return NV_IOCTL_NUMA_STATUS_DISABLED; + } + + return NV_ATOMIC_READ(nvl->numa_info.status); +} + +static inline int nv_set_numa_status(nv_linux_state_t *nvl, int status) +{ + if (!nv_platform_supports_numa(nvl)) + { + return -EINVAL; + } + + NV_ATOMIC_SET(nvl->numa_info.status, status); + return 0; +} + +typedef enum +{ + NV_NUMA_STATUS_DISABLED = 0, + NV_NUMA_STATUS_OFFLINE = 1, + NV_NUMA_STATUS_ONLINE_IN_PROGRESS = 2, + NV_NUMA_STATUS_ONLINE = 3, + NV_NUMA_STATUS_ONLINE_FAILED = 4, + NV_NUMA_STATUS_OFFLINE_IN_PROGRESS = 5, + NV_NUMA_STATUS_OFFLINE_FAILED = 6, + NV_NUMA_STATUS_COUNT +} nv_numa_status_t; + +#if defined(NV_LINUX_PLATFORM_DEVICE_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_MUTEX_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_RESET_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_DMA_BUF_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_GPIO_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_OF_GPIO_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_OF_DEVICE_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_OF_PLATFORM_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_INTERCONNECT_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_PM_RUNTIME_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_CLK_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_CLK_PROVIDER_H_PRESENT) +#include +#endif + +#endif /* _NV_LINUX_H_ */ diff --git a/kernel-open/common/inc/nv-list-helpers.h b/kernel-open/common/inc/nv-list-helpers.h new file mode 100644 index 000000000..a241d358d --- /dev/null +++ b/kernel-open/common/inc/nv-list-helpers.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_LIST_HELPERS_H__ +#define __NV_LIST_HELPERS_H__ + +#include +#include "conftest.h" + +/* + * list_first_entry_or_null added by commit 6d7581e62f8b ("list: introduce + * list_first_entry_or_null") in v3.10 (2013-05-29). + */ +#if !defined(list_first_entry_or_null) + #define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +/* + * Added by commit 93be3c2eb337 ("list: introduce list_last_entry(), use + * list_{first,last}_entry()") in v3.13 (2013-11-12). + */ +#if !defined(list_last_entry) + #define list_last_entry(ptr, type, member) \ + list_entry((ptr)->prev, type, member) +#endif + +/* list_last_entry_or_null() doesn't actually exist in the kernel */ +#if !defined(list_last_entry_or_null) + #define list_last_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_last_entry(ptr, type, member) : NULL) +#endif + +/* + * list_prev_entry() and list_next_entry added by commit 008208c6b26f + * ("list: introduce list_next_entry() and list_prev_entry()") in + * v3.13 (2013-11-12). + */ +#if !defined(list_prev_entry) + #define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if !defined(list_next_entry) + #define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif + +#if !defined(NV_LIST_IS_FIRST_PRESENT) + static inline int list_is_first(const struct list_head *list, + const struct list_head *head) + { + return list->prev == head; + } +#endif + +#if defined(NV_HLIST_FOR_EACH_ENTRY_ARGUMENT_COUNT) +#if NV_HLIST_FOR_EACH_ENTRY_ARGUMENT_COUNT == 3 +#define nv_hlist_for_each_entry(pos, head, member) \ + hlist_for_each_entry(pos, head, member) +#else +#if !defined(hlist_entry_safe) +#define hlist_entry_safe(ptr, type, member) \ + (ptr) ? hlist_entry(ptr, type, member) : NULL +#endif + +#define nv_hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) +#endif +#endif // NV_HLIST_FOR_EACH_ENTRY_ARGUMENT_COUNT + +#endif // __NV_LIST_HELPERS_H__ diff --git a/kernel-open/common/inc/nv-lock.h b/kernel-open/common/inc/nv-lock.h new file mode 100644 index 000000000..34f593d0a --- /dev/null +++ b/kernel-open/common/inc/nv-lock.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_LOCK_H_ +#define _NV_LOCK_H_ + +#include "conftest.h" + +#include +#include +#include /* signal_pending, cond_resched */ + +#if defined(NV_LINUX_SCHED_SIGNAL_H_PRESENT) +#include /* signal_pending for kernels >= 4.11 */ +#endif + +#if defined(NV_LINUX_SEMAPHORE_H_PRESENT) +#include +#else +#include +#endif + +#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL) +typedef raw_spinlock_t nv_spinlock_t; +#define NV_SPIN_LOCK_INIT(lock) raw_spin_lock_init(lock) +#define NV_SPIN_LOCK_IRQ(lock) raw_spin_lock_irq(lock) +#define NV_SPIN_UNLOCK_IRQ(lock) raw_spin_unlock_irq(lock) +#define NV_SPIN_LOCK_IRQSAVE(lock,flags) raw_spin_lock_irqsave(lock,flags) +#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) raw_spin_unlock_irqrestore(lock,flags) +#define NV_SPIN_LOCK(lock) raw_spin_lock(lock) +#define NV_SPIN_UNLOCK(lock) raw_spin_unlock(lock) +#define NV_SPIN_UNLOCK_WAIT(lock) raw_spin_unlock_wait(lock) +#else +typedef spinlock_t nv_spinlock_t; +#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock) +#define NV_SPIN_LOCK_IRQ(lock) spin_lock_irq(lock) +#define NV_SPIN_UNLOCK_IRQ(lock) spin_unlock_irq(lock) +#define NV_SPIN_LOCK_IRQSAVE(lock,flags) spin_lock_irqsave(lock,flags) +#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) spin_unlock_irqrestore(lock,flags) +#define NV_SPIN_LOCK(lock) spin_lock(lock) +#define NV_SPIN_UNLOCK(lock) spin_unlock(lock) +#define NV_SPIN_UNLOCK_WAIT(lock) spin_unlock_wait(lock) +#endif + +#if defined(NV_CONFIG_PREEMPT_RT) +#define NV_INIT_SEMA(sema, val) sema_init(sema,val) +#else +#if !defined(__SEMAPHORE_INITIALIZER) && defined(__COMPAT_SEMAPHORE_INITIALIZER) +#define __SEMAPHORE_INITIALIZER __COMPAT_SEMAPHORE_INITIALIZER +#endif +#define NV_INIT_SEMA(sema, val) \ + { \ + struct semaphore __sema = \ + __SEMAPHORE_INITIALIZER(*(sema), val); \ + *(sema) = __sema; \ + } +#endif +#define NV_INIT_MUTEX(mutex) NV_INIT_SEMA(mutex, 1) + +static inline int nv_down_read_interruptible(struct rw_semaphore *lock) +{ + while (!down_read_trylock(lock)) + { + if (signal_pending(current)) + return -EINTR; + cond_resched(); + } + return 0; +} + + +#endif /* _NV_LOCK_H_ */ diff --git a/kernel-open/common/inc/nv-memdbg.h b/kernel-open/common/inc/nv-memdbg.h new file mode 100644 index 000000000..a7495710c --- /dev/null +++ b/kernel-open/common/inc/nv-memdbg.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVMEMDBG_H_ +#define _NVMEMDBG_H_ + +#include + +void nv_memdbg_init(void); +void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line); +void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line); +void nv_memdbg_exit(void); + +#if defined(NV_MEM_LOGGER) + +#define NV_MEMDBG_ADD(ptr, size) \ + nv_memdbg_add(ptr, size, __FILE__, __LINE__) + +#define NV_MEMDBG_REMOVE(ptr, size) \ + nv_memdbg_remove(ptr, size, __FILE__, __LINE__) + +#else + +#define NV_MEMDBG_ADD(ptr, size) +#define NV_MEMDBG_REMOVE(ptr, size) + +#endif /* NV_MEM_LOGGER */ + +#endif /* _NVMEMDBG_H_ */ diff --git a/kernel-open/common/inc/nv-mm.h b/kernel-open/common/inc/nv-mm.h new file mode 100644 index 000000000..0b70f30ca --- /dev/null +++ b/kernel-open/common/inc/nv-mm.h @@ -0,0 +1,273 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_MM_H__ +#define __NV_MM_H__ + +#include "conftest.h" + +#if !defined(NV_VM_FAULT_T_IS_PRESENT) +typedef int vm_fault_t; +#endif + +/* get_user_pages + * + * The 8-argument version of get_user_pages was deprecated by commit + * (2016 Feb 12: cde70140fed8429acf7a14e2e2cbd3e329036653)for the non-remote case + * (calling get_user_pages with current and current->mm). + * + * Completely moved to the 6 argument version of get_user_pages - + * 2016 Apr 4: c12d2da56d0e07d230968ee2305aaa86b93a6832 + * + * write and force parameters were replaced with gup_flags by - + * 2016 Oct 12: 768ae309a96103ed02eb1e111e838c87854d8b51 + * + * A 7-argument version of get_user_pages was introduced into linux-4.4.y by + * commit 8e50b8b07f462ab4b91bc1491b1c91bd75e4ad40 which cherry-picked the + * replacement of the write and force parameters with gup_flags + * + */ + +#if defined(NV_GET_USER_PAGES_HAS_TASK_STRUCT) + #if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS) + #define NV_GET_USER_PAGES(start, nr_pages, write, force, pages, vmas) \ + get_user_pages(current, current->mm, start, nr_pages, write, force, pages, vmas) + #else + #include + #include + + static inline long NV_GET_USER_PAGES(unsigned long start, + unsigned long nr_pages, + int write, + int force, + struct page **pages, + struct vm_area_struct **vmas) + { + unsigned int flags = 0; + + if (write) + flags |= FOLL_WRITE; + if (force) + flags |= FOLL_FORCE; + + return get_user_pages(current, current->mm, start, nr_pages, flags, + pages, vmas); + } + #endif +#else + #if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS) + #define NV_GET_USER_PAGES get_user_pages + #else + #include + + static inline long NV_GET_USER_PAGES(unsigned long start, + unsigned long nr_pages, + int write, + int force, + struct page **pages, + struct vm_area_struct **vmas) + { + unsigned int flags = 0; + + if (write) + flags |= FOLL_WRITE; + if (force) + flags |= FOLL_FORCE; + + return get_user_pages(start, nr_pages, flags, pages, vmas); + } + #endif +#endif + +/* + * get_user_pages_remote() was added by commit 1e9877902dc7 + * ("mm/gup: Introduce get_user_pages_remote()") in v4.6 (2016-02-12). + * + * The very next commit cde70140fed8 ("mm/gup: Overload get_user_pages() + * functions") deprecated the 8-argument version of get_user_pages for the + * non-remote case (calling get_user_pages with current and current->mm). + * + * The guidelines are: call NV_GET_USER_PAGES_REMOTE if you need the 8-argument + * version that uses something other than current and current->mm. Use + * NV_GET_USER_PAGES if you are refering to current and current->mm. + * + * Note that get_user_pages_remote() requires the caller to hold a reference on + * the task_struct (if non-NULL and if this API has tsk argument) and the mm_struct. + * This will always be true when using current and current->mm. If the kernel passes + * the driver a vma via driver callback, the kernel holds a reference on vma->vm_mm + * over that callback. + * + * get_user_pages_remote() write/force parameters were replaced + * with gup_flags by commit 9beae1ea8930 ("mm: replace get_user_pages_remote() + * write/force parameters with gup_flags") in v4.9 (2016-10-13). + * + * get_user_pages_remote() added 'locked' parameter by commit 5b56d49fc31d + * ("mm: add locked parameter to get_user_pages_remote()") in + * v4.10 (2016-12-14). + * + * get_user_pages_remote() removed 'tsk' parameter by + * commit 64019a2e467a ("mm/gup: remove task_struct pointer for + * all gup code") in v5.9-rc1 (2020-08-11). + * + */ + +#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT) + #if defined(NV_GET_USER_PAGES_REMOTE_HAS_WRITE_AND_FORCE_ARGS) + #define NV_GET_USER_PAGES_REMOTE get_user_pages_remote + #else + static inline long NV_GET_USER_PAGES_REMOTE(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + int write, + int force, + struct page **pages, + struct vm_area_struct **vmas) + { + unsigned int flags = 0; + + if (write) + flags |= FOLL_WRITE; + if (force) + flags |= FOLL_FORCE; + + #if defined(NV_GET_USER_PAGES_REMOTE_HAS_LOCKED_ARG) + #if defined (NV_GET_USER_PAGES_REMOTE_HAS_TSK_ARG) + return get_user_pages_remote(tsk, mm, start, nr_pages, flags, + pages, vmas, NULL); + #else + return get_user_pages_remote(mm, start, nr_pages, flags, + pages, vmas, NULL); + #endif + + #else + + return get_user_pages_remote(tsk, mm, start, nr_pages, flags, + pages, vmas); + + #endif + + } + #endif +#else + #if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS) + #define NV_GET_USER_PAGES_REMOTE get_user_pages + #else + #include + #include + + static inline long NV_GET_USER_PAGES_REMOTE(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + int write, + int force, + struct page **pages, + struct vm_area_struct **vmas) + { + unsigned int flags = 0; + + if (write) + flags |= FOLL_WRITE; + if (force) + flags |= FOLL_FORCE; + + return get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); + } + #endif +#endif + + +/* + * The .virtual_address field was effectively renamed to .address, by these + * two commits: + * + * struct vm_fault: .address was added by: + * 2016-12-14 82b0f8c39a3869b6fd2a10e180a862248736ec6f + * + * struct vm_fault: .virtual_address was removed by: + * 2016-12-14 1a29d85eb0f19b7d8271923d8917d7b4f5540b3e + */ +static inline unsigned long nv_page_fault_va(struct vm_fault *vmf) +{ +#if defined(NV_VM_FAULT_HAS_ADDRESS) + return vmf->address; +#else + return (unsigned long)(vmf->virtual_address); +#endif +} + +static inline void nv_mmap_read_lock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_read_lock(mm); +#else + down_read(&mm->mmap_sem); +#endif +} + +static inline void nv_mmap_read_unlock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_read_unlock(mm); +#else + up_read(&mm->mmap_sem); +#endif +} + +static inline void nv_mmap_write_lock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_write_lock(mm); +#else + down_write(&mm->mmap_sem); +#endif +} + +static inline void nv_mmap_write_unlock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + mmap_write_unlock(mm); +#else + up_write(&mm->mmap_sem); +#endif +} + +static inline int nv_mm_rwsem_is_locked(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + return rwsem_is_locked(&mm->mmap_lock); +#else + return rwsem_is_locked(&mm->mmap_sem); +#endif +} + +static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm) +{ +#if defined(NV_MM_HAS_MMAP_LOCK) + return &mm->mmap_lock; +#else + return &mm->mmap_sem; +#endif +} + +#endif // __NV_MM_H__ diff --git a/kernel-open/common/inc/nv-modeset-interface.h b/kernel-open/common/inc/nv-modeset-interface.h new file mode 100644 index 000000000..e2e303f5f --- /dev/null +++ b/kernel-open/common/inc/nv-modeset-interface.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_MODESET_INTERFACE_H_ +#define _NV_MODESET_INTERFACE_H_ + +/* + * This file defines the interface between the nvidia and + * nvidia-modeset UNIX kernel modules. + * + * The nvidia-modeset kernel module calls the nvidia kernel module's + * nvidia_get_rm_ops() function to get the RM API function pointers + * which it will need. + */ + +#include "nvstatus.h" + +#include "nv-gpu-info.h" + +/* + * nvidia_stack_s is defined in nv.h, which pulls in a lot of other + * dependencies. The nvidia-modeset kernel module doesn't need to + * dereference the nvidia_stack_s pointer, so just treat is as an + * opaque pointer for purposes of this API definition. + */ +typedef struct nvidia_stack_s *nvidia_modeset_stack_ptr; + +/* + * Callback functions from the RM OS interface layer into the NVKMS OS interface + * layer. + * + * These functions should be called without the RM lock held, using the kernel's + * native calling convention. + */ +typedef struct { + /* + * Suspend & resume callbacks. Note that these are called once per GPU. + */ + void (*suspend)(NvU32 gpu_id); + void (*resume)(NvU32 gpu_id); +} nvidia_modeset_callbacks_t; + +/* + * The RM API entry points which the nvidia-modeset kernel module should + * call in the nvidia kernel module. + */ + +typedef struct { + /* + * The nvidia-modeset kernel module should assign version_string + * before passing the structure to the nvidia kernel module, so + * that a version match can be confirmed: it is not supported to + * mix nvidia and nvidia-modeset kernel modules from different + * releases. + */ + const char *version_string; + + /* + * Return system information. + */ + struct { + /* Availability of write combining support for video memory */ + NvBool allow_write_combining; + } system_info; + + /* + * Allocate and free an nvidia_stack_t to pass into + * nvidia_modeset_rm_ops_t::op(). An nvidia_stack_t must only be + * used by one thread at a time. + * + * Note that on architectures where an alternate stack is not + * used, alloc_stack() will set sp=NULL even when it returns 0 + * (success). I.e., check the return value, not the sp value. + */ + int (*alloc_stack)(nvidia_modeset_stack_ptr *sp); + void (*free_stack)(nvidia_modeset_stack_ptr sp); + + /* + * Enumerate list of gpus probed by nvidia driver. + * + * gpu_info is an array of NVIDIA_MAX_GPUS elements. The number of GPUs + * in the system is returned. + */ + NvU32 (*enumerate_gpus)(nv_gpu_info_t *gpu_info); + + /* + * {open,close}_gpu() raise and lower the reference count of the + * specified GPU. This is equivalent to opening and closing a + * /dev/nvidiaN device file from user-space. + */ + int (*open_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp); + void (*close_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp); + + void (*op)(nvidia_modeset_stack_ptr sp, void *ops_cmd); + + int (*set_callbacks)(const nvidia_modeset_callbacks_t *cb); + +} nvidia_modeset_rm_ops_t; + +NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops); + +#endif /* _NV_MODESET_INTERFACE_H_ */ diff --git a/kernel-open/common/inc/nv-msi.h b/kernel-open/common/inc/nv-msi.h new file mode 100644 index 000000000..55861d5a5 --- /dev/null +++ b/kernel-open/common/inc/nv-msi.h @@ -0,0 +1,115 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_MSI_H_ +#define _NV_MSI_H_ + +#include "nv-linux.h" + +#if (defined(CONFIG_X86_LOCAL_APIC) || defined(NVCPU_AARCH64) || \ + defined(NVCPU_PPC64LE)) && \ + (defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)) +#define NV_LINUX_PCIE_MSI_SUPPORTED +#endif + +#if !defined(NV_LINUX_PCIE_MSI_SUPPORTED) || !defined(CONFIG_PCI_MSI) +#define NV_PCI_DISABLE_MSI(pci_dev) +#else +#define NV_PCI_DISABLE_MSI(pci_dev) pci_disable_msi(pci_dev) +#endif + +irqreturn_t nvidia_isr (int, void *); +irqreturn_t nvidia_isr_msix (int, void *); +irqreturn_t nvidia_isr_kthread_bh (int, void *); +irqreturn_t nvidia_isr_msix_kthread_bh(int, void *); + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) +void NV_API_CALL nv_init_msi (nv_state_t *); +void NV_API_CALL nv_init_msix (nv_state_t *); +NvS32 NV_API_CALL nv_request_msix_irq (nv_linux_state_t *); + +#define NV_PCI_MSIX_FLAGS 2 +#define NV_PCI_MSIX_FLAGS_QSIZE 0x7FF + +static inline void nv_free_msix_irq(nv_linux_state_t *nvl) +{ + int i; + + for (i = 0; i < nvl->num_intr; i++) + { + free_irq(nvl->msix_entries[i].vector, (void *)nvl); + } +} + +static inline int nv_get_max_irq(struct pci_dev *pci_dev) +{ + int nvec; + int cap_ptr; + NvU16 ctrl; + + cap_ptr = pci_find_capability(pci_dev, PCI_CAP_ID_MSIX); + /* + * The 'PCI_MSIX_FLAGS' was added in 2.6.21-rc3 by: + * 2007-03-05 f5f2b13129a6541debf8851bae843cbbf48298b7 + */ +#if defined(PCI_MSIX_FLAGS) + pci_read_config_word(pci_dev, cap_ptr + PCI_MSIX_FLAGS, &ctrl); + nvec = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; +#else + pci_read_config_word(pci_dev, cap_ptr + NV_PCI_MSIX_FLAGS, &ctrl); + nvec = (ctrl & NV_PCI_MSIX_FLAGS_QSIZE) + 1; +#endif + + return nvec; +} + +static inline int nv_pci_enable_msix(nv_linux_state_t *nvl, int nvec) +{ + int rc = 0; + + /* + * pci_enable_msix_range() replaced pci_enable_msix() in 3.14-rc1: + * 2014-01-03 302a2523c277bea0bbe8340312b09507905849ed + */ + +#if defined(NV_PCI_ENABLE_MSIX_RANGE_PRESENT) + // We require all the vectors we are requesting so use the same min and max + rc = pci_enable_msix_range(nvl->pci_dev, nvl->msix_entries, nvec, nvec); + if (rc < 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + WARN_ON(nvec != rc); +#else + rc = pci_enable_msix(nvl->pci_dev, nvl->msix_entries, nvec); + if (rc != 0) + { + return NV_ERR_OPERATING_SYSTEM; + } +#endif + + nvl->num_intr = nvec; + return NV_OK; +} +#endif +#endif /* _NV_MSI_H_ */ diff --git a/kernel-open/common/inc/nv-pci-types.h b/kernel-open/common/inc/nv-pci-types.h new file mode 100644 index 000000000..9706d0e3b --- /dev/null +++ b/kernel-open/common/inc/nv-pci-types.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_TYPES_H_ +#define _NV_PCI_TYPES_H_ + +#include +#include "conftest.h" + +#if defined(NV_PCI_CHANNEL_STATE_PRESENT) +typedef enum pci_channel_state nv_pci_channel_state_t; +#else +typedef pci_channel_state_t nv_pci_channel_state_t; +#endif + +#endif diff --git a/kernel-open/common/inc/nv-pci.h b/kernel-open/common/inc/nv-pci.h new file mode 100644 index 000000000..84c0f5d93 --- /dev/null +++ b/kernel-open/common/inc/nv-pci.h @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_H_ +#define _NV_PCI_H_ + +#include +#include "nv-linux.h" + +#if defined(NV_DEV_IS_PCI_PRESENT) +#define nv_dev_is_pci(dev) dev_is_pci(dev) +#else +/* + * Non-PCI devices are only supported on kernels which expose the + * dev_is_pci() function. For older kernels, we only support PCI + * devices, hence returning true to take all the PCI code paths. + */ +#define nv_dev_is_pci(dev) (true) +#endif + +int nv_pci_register_driver(void); +void nv_pci_unregister_driver(void); +int nv_pci_count_devices(void); +NvU8 nv_find_pci_capability(struct pci_dev *, NvU8); +int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *); +nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8); + +#endif diff --git a/kernel-open/common/inc/nv-pgprot.h b/kernel-open/common/inc/nv-pgprot.h new file mode 100644 index 000000000..b56d95611 --- /dev/null +++ b/kernel-open/common/inc/nv-pgprot.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_PGPROT_H__ + +#define __NV_PGPROT_H__ + +#include "cpuopsys.h" + +#include + +#if !defined(NV_VMWARE) +#if defined(NVCPU_X86_64) +/* mark memory UC-, rather than UC (don't use _PAGE_PWT) */ +static inline pgprot_t pgprot_noncached_weak(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); + return new_prot; + } + +#if !defined (pgprot_noncached) +static inline pgprot_t pgprot_noncached(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD | _PAGE_PWT); + return new_prot; + } +#endif +static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_PCD | _PAGE_PWT); + new_prot = __pgprot(pgprot_val(new_prot) | _PAGE_PWT); + return new_prot; + } +#endif /* defined(NVCPU_X86_64) */ +#endif /* !defined(NV_VMWARE) */ + +#if defined(NVCPU_AARCH64) +/* + * Don't rely on the kernel's definition of pgprot_noncached(), as on 64-bit + * ARM that's not for system memory, but device memory instead. For I/O cache + * coherent systems, use cached mappings instead of uncached. + */ +#define NV_PGPROT_UNCACHED(old_prot) \ + ((nvos_is_chipset_io_coherent()) ? \ + (old_prot) : \ + __pgprot_modify((old_prot), PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))) +#elif defined(NVCPU_PPC64LE) +/* Don't attempt to mark sysmem pages as uncached on ppc64le */ +#define NV_PGPROT_UNCACHED(old_prot) old_prot +#else +#define NV_PGPROT_UNCACHED(old_prot) pgprot_noncached(old_prot) +#endif + +#define NV_PGPROT_UNCACHED_DEVICE(old_prot) pgprot_noncached(old_prot) +#if defined(NVCPU_AARCH64) +#if defined(NV_MT_DEVICE_GRE_PRESENT) +#define NV_PROT_WRITE_COMBINED_DEVICE (PROT_DEFAULT | PTE_PXN | PTE_UXN | \ + PTE_ATTRINDX(MT_DEVICE_GRE)) +#else +#define NV_PROT_WRITE_COMBINED_DEVICE (PROT_DEFAULT | PTE_PXN | PTE_UXN | \ + PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#endif +#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \ + __pgprot_modify(old_prot, PTE_ATTRINDX_MASK, NV_PROT_WRITE_COMBINED_DEVICE) +#define NV_PGPROT_WRITE_COMBINED(old_prot) NV_PGPROT_UNCACHED(old_prot) +#define NV_PGPROT_READ_ONLY(old_prot) \ + __pgprot_modify(old_prot, 0, PTE_RDONLY) +#elif defined(NVCPU_X86_64) +#define NV_PGPROT_UNCACHED_WEAK(old_prot) pgprot_noncached_weak(old_prot) +#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \ + pgprot_modify_writecombine(old_prot) +#define NV_PGPROT_WRITE_COMBINED(old_prot) \ + NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) +#define NV_PGPROT_READ_ONLY(old_prot) \ + __pgprot(pgprot_val((old_prot)) & ~_PAGE_RW) +#elif defined(NVCPU_PPC64LE) +/* + * Some kernels use H_PAGE instead of _PAGE + */ +#if defined(_PAGE_RW) +#define NV_PAGE_RW _PAGE_RW +#elif defined(H_PAGE_RW) +#define NV_PAGE_RW H_PAGE_RW +#else +#warning "The kernel does not provide page protection defines!" +#endif + +#if defined(_PAGE_4K_PFN) +#define NV_PAGE_4K_PFN _PAGE_4K_PFN +#elif defined(H_PAGE_4K_PFN) +#define NV_PAGE_4K_PFN H_PAGE_4K_PFN +#else +#undef NV_PAGE_4K_PFN +#endif + +#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \ + pgprot_writecombine(old_prot) +/* Don't attempt to mark sysmem pages as write combined on ppc64le */ +#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot +#define NV_PGPROT_READ_ONLY(old_prot) \ + __pgprot(pgprot_val((old_prot)) & ~NV_PAGE_RW) +#else +/* Writecombine is not supported */ +#undef NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) +#undef NV_PGPROT_WRITE_COMBINED(old_prot) +#define NV_PGPROT_READ_ONLY(old_prot) +#endif + +#endif /* __NV_PGPROT_H__ */ diff --git a/kernel-open/common/inc/nv-procfs-utils.h b/kernel-open/common/inc/nv-procfs-utils.h new file mode 100644 index 000000000..5911d2d44 --- /dev/null +++ b/kernel-open/common/inc/nv-procfs-utils.h @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_PROCFS_UTILS_H +#define _NV_PROCFS_UTILS_H + +#include "conftest.h" + +#ifdef CONFIG_PROC_FS +#include +#include + +/* + * Allow procfs to create file to exercise error forwarding. + * This is supported by CRAY platforms. + */ +#if defined(CONFIG_CRAY_XT) +#define EXERCISE_ERROR_FORWARDING NV_TRUE +#else +#define EXERCISE_ERROR_FORWARDING NV_FALSE +#endif + +#define IS_EXERCISE_ERROR_FORWARDING_ENABLED() (EXERCISE_ERROR_FORWARDING) + +#if defined(NV_PROC_OPS_PRESENT) +typedef struct proc_ops nv_proc_ops_t; + +#define NV_PROC_OPS_SET_OWNER() + +#define NV_PROC_OPS_OPEN proc_open +#define NV_PROC_OPS_READ proc_read +#define NV_PROC_OPS_WRITE proc_write +#define NV_PROC_OPS_LSEEK proc_lseek +#define NV_PROC_OPS_RELEASE proc_release +#else +typedef struct file_operations nv_proc_ops_t; + +#define NV_PROC_OPS_SET_OWNER() .owner = THIS_MODULE, + +#define NV_PROC_OPS_OPEN open +#define NV_PROC_OPS_READ read +#define NV_PROC_OPS_WRITE write +#define NV_PROC_OPS_LSEEK llseek +#define NV_PROC_OPS_RELEASE release +#endif + +#define NV_CREATE_PROC_FILE(filename,parent,__name,__data) \ + ({ \ + struct proc_dir_entry *__entry; \ + int mode = (S_IFREG | S_IRUGO); \ + const nv_proc_ops_t *fops = &nv_procfs_##__name##_fops; \ + if (fops->NV_PROC_OPS_WRITE != 0) \ + mode |= S_IWUSR; \ + __entry = proc_create_data(filename, mode, parent, fops, __data);\ + __entry; \ + }) + +/* + * proc_mkdir_mode exists in Linux 2.6.9, but isn't exported until Linux 3.0. + * Use the older interface instead unless the newer interface is necessary. + */ +#if defined(NV_PROC_REMOVE_PRESENT) +# define NV_PROC_MKDIR_MODE(name, mode, parent) \ + proc_mkdir_mode(name, mode, parent) +#else +# define NV_PROC_MKDIR_MODE(name, mode, parent) \ + ({ \ + struct proc_dir_entry *__entry; \ + __entry = create_proc_entry(name, mode, parent); \ + __entry; \ + }) +#endif + +#define NV_CREATE_PROC_DIR(name,parent) \ + ({ \ + struct proc_dir_entry *__entry; \ + int mode = (S_IFDIR | S_IRUGO | S_IXUGO); \ + __entry = NV_PROC_MKDIR_MODE(name, mode, parent); \ + __entry; \ + }) + +#if defined(NV_PDE_DATA_LOWER_CASE_PRESENT) +#define NV_PDE_DATA(inode) pde_data(inode) +#else +#define NV_PDE_DATA(inode) PDE_DATA(inode) +#endif + +#if defined(NV_PROC_REMOVE_PRESENT) +# define NV_REMOVE_PROC_ENTRY(entry) \ + proc_remove(entry); +#else +# define NV_REMOVE_PROC_ENTRY(entry) \ + remove_proc_entry(entry->name, entry->parent); +#endif + +void nv_procfs_unregister_all(struct proc_dir_entry *entry, + struct proc_dir_entry *delimiter); +#define NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \ + static int nv_procfs_open_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + int ret; \ + ret = single_open(filep, nv_procfs_read_##name, \ + NV_PDE_DATA(inode)); \ + if (ret < 0) \ + { \ + return ret; \ + } \ + ret = nv_down_read_interruptible(&lock); \ + if (ret < 0) \ + { \ + single_release(inode, filep); \ + } \ + return ret; \ + } \ + \ + static int nv_procfs_release_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + up_read(&lock); \ + return single_release(inode, filep); \ + } + +#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, lock) \ + NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \ + \ + static const nv_proc_ops_t nv_procfs_##name##_fops = { \ + NV_PROC_OPS_SET_OWNER() \ + .NV_PROC_OPS_OPEN = nv_procfs_open_##name, \ + .NV_PROC_OPS_READ = seq_read, \ + .NV_PROC_OPS_LSEEK = seq_lseek, \ + .NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \ + }; + + +#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_WRITE(name, lock, \ +write_callback) \ + NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \ + \ + static ssize_t nv_procfs_write_##name( \ + struct file *file, \ + const char __user *buf, \ + size_t size, \ + loff_t *ppos \ + ) \ + { \ + ssize_t ret; \ + struct seq_file *s; \ + \ + s = file->private_data; \ + if (s == NULL) \ + { \ + return -EIO; \ + } \ + \ + ret = write_callback(s, buf + *ppos, size - *ppos); \ + if (ret == 0) \ + { \ + /* avoid infinite loop */ \ + ret = -EIO; \ + } \ + return ret; \ + } \ + \ + static const nv_proc_ops_t nv_procfs_##name##_fops = { \ + NV_PROC_OPS_SET_OWNER() \ + .NV_PROC_OPS_OPEN = nv_procfs_open_##name, \ + .NV_PROC_OPS_READ = seq_read, \ + .NV_PROC_OPS_WRITE = nv_procfs_write_##name, \ + .NV_PROC_OPS_LSEEK = seq_lseek, \ + .NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \ + }; + +#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY_WITHOUT_LOCK(name) \ + static int nv_procfs_open_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + int ret; \ + ret = single_open(filep, nv_procfs_read_##name, \ + NV_PDE_DATA(inode)); \ + return ret; \ + } \ + \ + static int nv_procfs_release_##name( \ + struct inode *inode, \ + struct file *filep \ + ) \ + { \ + return single_release(inode, filep); \ + } \ + \ + static const nv_proc_ops_t nv_procfs_##name##_fops = { \ + NV_PROC_OPS_SET_OWNER() \ + .NV_PROC_OPS_OPEN = nv_procfs_open_##name, \ + .NV_PROC_OPS_READ = seq_read, \ + .NV_PROC_OPS_LSEEK = seq_lseek, \ + .NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \ + }; + +#endif /* CONFIG_PROC_FS */ + +#endif /* _NV_PROCFS_UTILS_H */ diff --git a/kernel-open/common/inc/nv-procfs.h b/kernel-open/common/inc/nv-procfs.h new file mode 100644 index 000000000..11f95854b --- /dev/null +++ b/kernel-open/common/inc/nv-procfs.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_PROCFS_H +#define _NV_PROCFS_H + +#include "nv-procfs-utils.h" + +#endif /* _NV_PROCFS_H */ diff --git a/kernel-open/common/inc/nv-proto.h b/kernel-open/common/inc/nv-proto.h new file mode 100644 index 000000000..c1443e2ee --- /dev/null +++ b/kernel-open/common/inc/nv-proto.h @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PROTO_H_ +#define _NV_PROTO_H_ + +#include "nv-pci.h" +#include "nv-register-module.h" + + + + +extern const char *nv_device_name; +extern nvidia_module_t nv_fops; + +void nv_acpi_register_notifier (nv_linux_state_t *); +void nv_acpi_unregister_notifier (nv_linux_state_t *); +int nv_acpi_init (void); +int nv_acpi_uninit (void); + +NvU8 nv_find_pci_capability (struct pci_dev *, NvU8); + +int nv_procfs_init (void); +void nv_procfs_exit (void); +void nv_procfs_add_warning (const char *, const char *); +int nv_procfs_add_gpu (nv_linux_state_t *); +void nv_procfs_remove_gpu (nv_linux_state_t *); + +int nvidia_mmap (struct file *, struct vm_area_struct *); +int nvidia_mmap_helper (nv_state_t *, nv_linux_file_private_t *, nvidia_stack_t *, struct vm_area_struct *, void *); +int nv_encode_caching (pgprot_t *, NvU32, NvU32); +void nv_revoke_gpu_mappings_locked(nv_state_t *); + +NvUPtr nv_vm_map_pages (struct page **, NvU32, NvBool, NvBool); +void nv_vm_unmap_pages (NvUPtr, NvU32); + +NV_STATUS nv_alloc_contig_pages (nv_state_t *, nv_alloc_t *); +void nv_free_contig_pages (nv_alloc_t *); +NV_STATUS nv_alloc_system_pages (nv_state_t *, nv_alloc_t *); +void nv_free_system_pages (nv_alloc_t *); + +void nv_address_space_init_once (struct address_space *mapping); + +int nv_uvm_init (void); +void nv_uvm_exit (void); +NV_STATUS nv_uvm_suspend (void); +NV_STATUS nv_uvm_resume (void); +void nv_uvm_notify_start_device (const NvU8 *uuid); +void nv_uvm_notify_stop_device (const NvU8 *uuid); +NV_STATUS nv_uvm_event_interrupt (const NvU8 *uuid); + +/* Move these to nv.h once implemented by other UNIX platforms */ +NvBool nvidia_get_gpuid_list (NvU32 *gpu_ids, NvU32 *gpu_count); +int nvidia_dev_get (NvU32, nvidia_stack_t *); +void nvidia_dev_put (NvU32, nvidia_stack_t *); +int nvidia_dev_get_uuid (const NvU8 *, nvidia_stack_t *); +void nvidia_dev_put_uuid (const NvU8 *, nvidia_stack_t *); +int nvidia_dev_block_gc6 (const NvU8 *, nvidia_stack_t *); +int nvidia_dev_unblock_gc6 (const NvU8 *, nvidia_stack_t *); + +#if defined(CONFIG_PM) +NV_STATUS nv_set_system_power_state (nv_power_state_t, nv_pm_action_depth_t); +#endif + +void nvidia_modeset_suspend (NvU32 gpuId); +void nvidia_modeset_resume (NvU32 gpuId); +NvBool nv_is_uuid_in_gpu_exclusion_list (const char *); + +NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp); +nv_linux_state_t * find_uuid(const NvU8 *uuid); +void nv_report_error(struct pci_dev *dev, NvU32 error_number, const char *format, va_list ap); +void nv_shutdown_adapter(nvidia_stack_t *, nv_state_t *, nv_linux_state_t *); +void nv_dev_free_stacks(nv_linux_state_t *); +NvBool nv_lock_init_locks(nvidia_stack_t *, nv_state_t *); +void nv_lock_destroy_locks(nvidia_stack_t *, nv_state_t *); +void nv_linux_add_device_locked(nv_linux_state_t *); +void nv_linux_remove_device_locked(nv_linux_state_t *); +NvBool nv_acpi_power_resource_method_present(struct pci_dev *); + +#endif /* _NV_PROTO_H_ */ diff --git a/kernel-open/common/inc/nv-register-module.h b/kernel-open/common/inc/nv-register-module.h new file mode 100644 index 000000000..bd4545f60 --- /dev/null +++ b/kernel-open/common/inc/nv-register-module.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_REGISTER_MODULE_H_ +#define _NV_REGISTER_MODULE_H_ + +#include +#include +#include + +#include "nvtypes.h" + +typedef struct nvidia_module_s { + struct module *owner; + + /* nvidia0, nvidia1 ..*/ + const char *module_name; + + /* module instance */ + NvU32 instance; + + /* file operations */ + int (*open)(struct inode *, struct file *filp); + int (*close)(struct inode *, struct file *filp); + int (*mmap)(struct file *filp, struct vm_area_struct *vma); + int (*ioctl)(struct inode *, struct file * file, unsigned int cmd, unsigned long arg); + unsigned int (*poll)(struct file * file, poll_table *wait); + +} nvidia_module_t; + +int nvidia_register_module(nvidia_module_t *); +int nvidia_unregister_module(nvidia_module_t *); + +#endif diff --git a/kernel-open/common/inc/nv-retpoline.h b/kernel-open/common/inc/nv-retpoline.h new file mode 100644 index 000000000..249550336 --- /dev/null +++ b/kernel-open/common/inc/nv-retpoline.h @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_RETPOLINE_H_ +#define _NV_RETPOLINE_H_ + +#include "cpuopsys.h" + +#if (NV_SPECTRE_V2 == 0) +#define NV_RETPOLINE_THUNK NV_SPEC_THUNK +#else +#define NV_RETPOLINE_THUNK NV_NOSPEC_THUNK +#endif + +#if defined(NVCPU_X86_64) +#define NV_SPEC_THUNK(REG) \ + __asm__( \ + ".weak __x86_indirect_thunk_" #REG ";" \ + ".type __x86_indirect_thunk_" #REG ", @function;" \ + "__x86_indirect_thunk_" #REG ":" \ + " .cfi_startproc;" \ + " jmp *%" #REG ";" \ + " .cfi_endproc;" \ + ".size __x86_indirect_thunk_" #REG ", .-__x86_indirect_thunk_" #REG) + +#define NV_NOSPEC_THUNK(REG) \ + __asm__( \ + ".weak __x86_indirect_thunk_" #REG ";" \ + ".type __x86_indirect_thunk_" #REG ", @function;" \ + "__x86_indirect_thunk_" #REG ":" \ + " .cfi_startproc;" \ + " call .Lnv_no_fence_" #REG ";" \ + ".Lnv_fence_" #REG ":" \ + " pause;" \ + " lfence;" \ + " jmp .Lnv_fence_" #REG ";" \ + ".Lnv_no_fence_" #REG ":" \ + " mov %" #REG ", (%rsp);" \ + " ret;" \ + " .cfi_endproc;" \ + ".size __x86_indirect_thunk_" #REG ", .-__x86_indirect_thunk_" #REG) + + __asm__(".pushsection .text"); + NV_RETPOLINE_THUNK(rax); + NV_RETPOLINE_THUNK(rbx); + NV_RETPOLINE_THUNK(rcx); + NV_RETPOLINE_THUNK(rdx); + NV_RETPOLINE_THUNK(rsi); + NV_RETPOLINE_THUNK(rdi); + NV_RETPOLINE_THUNK(rbp); + NV_RETPOLINE_THUNK(r8); + NV_RETPOLINE_THUNK(r9); + NV_RETPOLINE_THUNK(r10); + NV_RETPOLINE_THUNK(r11); + NV_RETPOLINE_THUNK(r12); + NV_RETPOLINE_THUNK(r13); + NV_RETPOLINE_THUNK(r14); + NV_RETPOLINE_THUNK(r15); + __asm__(".popsection"); +#endif + +#endif /* _NV_RETPOLINE_H_ */ diff --git a/kernel-open/common/inc/nv-time.h b/kernel-open/common/inc/nv-time.h new file mode 100644 index 000000000..7c3f512e0 --- /dev/null +++ b/kernel-open/common/inc/nv-time.h @@ -0,0 +1,251 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_TIME_H__ +#define __NV_TIME_H__ + +#include "conftest.h" +#include +#include +#include +#include + +#include + +#define NV_MAX_ISR_DELAY_US 20000 +#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000) +#define NV_NSECS_TO_JIFFIES(nsec) ((nsec) * HZ / 1000000000) + +#if !defined(NV_TIMESPEC64_PRESENT) +struct timespec64 { + __s64 tv_sec; + long tv_nsec; +}; +#endif + +#if !defined(NV_KTIME_GET_RAW_TS64_PRESENT) +static inline void ktime_get_raw_ts64(struct timespec64 *ts64) +{ + struct timespec ts; + getrawmonotonic(&ts); + ts64->tv_sec = ts.tv_sec; + ts64->tv_nsec = ts.tv_nsec; +} +#endif + +#if !defined(NV_KTIME_GET_REAL_TS64_PRESENT) +static inline void ktime_get_real_ts64(struct timespec64 *ts64) +{ + struct timeval tv; + do_gettimeofday(&tv); + ts64->tv_sec = tv.tv_sec; + ts64->tv_nsec = tv.tv_usec * (NvU64) NSEC_PER_USEC; +} +#endif + +static NvBool nv_timer_less_than +( + const struct timespec64 *a, + const struct timespec64 *b +) +{ + return (a->tv_sec == b->tv_sec) ? (a->tv_nsec < b->tv_nsec) + : (a->tv_sec < b->tv_sec); +} + +#if !defined(NV_TIMESPEC64_PRESENT) +static inline struct timespec64 timespec64_add +( + const struct timespec64 a, + const struct timespec64 b +) +{ + struct timespec64 result; + + result.tv_sec = a.tv_sec + b.tv_sec; + result.tv_nsec = a.tv_nsec + b.tv_nsec; + while (result.tv_nsec >= NSEC_PER_SEC) + { + ++result.tv_sec; + result.tv_nsec -= NSEC_PER_SEC; + } + return result; +} + +static inline struct timespec64 timespec64_sub +( + const struct timespec64 a, + const struct timespec64 b +) +{ + struct timespec64 result; + + result.tv_sec = a.tv_sec - b.tv_sec; + result.tv_nsec = a.tv_nsec - b.tv_nsec; + while (result.tv_nsec < 0) + { + --(result.tv_sec); + result.tv_nsec += NSEC_PER_SEC; + } + return result; +} + +static inline s64 timespec64_to_ns(struct timespec64 *ts) +{ + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; +} +#endif + +static inline NvU64 nv_ktime_get_raw_ns(void) +{ + struct timespec64 ts; + ktime_get_raw_ts64(&ts); + return (NvU64)timespec64_to_ns(&ts); +} + +// #define NV_CHECK_DELAY_ACCURACY 1 + +/* + * It is generally a bad idea to use udelay() to wait for more than + * a few milliseconds. Since the caller is most likely not aware of + * this, we use mdelay() for any full millisecond to be safe. + */ +static inline NV_STATUS nv_sleep_us(unsigned int us) +{ + + unsigned long mdelay_safe_msec; + unsigned long usec; + +#ifdef NV_CHECK_DELAY_ACCURACY + struct timespec64 tm1, tm2, tm_diff; + + ktime_get_raw_ts64(&tm1); +#endif + + if (in_irq() && (us > NV_MAX_ISR_DELAY_US)) + return NV_ERR_GENERIC; + + mdelay_safe_msec = us / 1000; + if (mdelay_safe_msec) + mdelay(mdelay_safe_msec); + + usec = us % 1000; + if (usec) + udelay(usec); + +#ifdef NV_CHECK_DELAY_ACCURACY + ktime_get_raw_ts64(&tm2); + tm_diff = timespec64_sub(tm2, tm1); + pr_info("NVRM: delay of %d usec results in actual delay of 0x%llu nsec\n", + us, timespec64_to_ns(&tm_diff)); +#endif + return NV_OK; +} + +/* + * Sleep for specified milliseconds. Yields the CPU to scheduler. + * + * On Linux, a jiffie represents the time passed in between two timer + * interrupts. The number of jiffies per second (HZ) varies across the + * supported platforms. On i386, where HZ is 100, a timer interrupt is + * generated every 10ms. NV_MSECS_TO_JIFFIES should be accurate independent of + * the actual value of HZ; any partial jiffies will be 'floor'ed, the + * remainder will be accounted for with mdelay(). + */ +static inline NV_STATUS nv_sleep_ms(unsigned int ms) +{ + NvU64 ns; + unsigned long jiffies; + unsigned long mdelay_safe_msec; + struct timespec64 tm_end, tm_aux; +#ifdef NV_CHECK_DELAY_ACCURACY + struct timespec64 tm_start; +#endif + + ktime_get_raw_ts64(&tm_aux); +#ifdef NV_CHECK_DELAY_ACCURACY + tm_start = tm_aux; +#endif + + if (in_irq() && (ms > NV_MAX_ISR_DELAY_MS)) + { + return NV_ERR_GENERIC; + } + + if (irqs_disabled() || in_interrupt() || in_atomic()) + { + mdelay(ms); + return NV_OK; + } + + ns = ms * (NvU64) NSEC_PER_MSEC; + tm_end.tv_nsec = ns; + tm_end.tv_sec = 0; + tm_end = timespec64_add(tm_aux, tm_end); + + /* do we have a full jiffie to wait? */ + jiffies = NV_NSECS_TO_JIFFIES(ns); + + if (jiffies) + { + // + // If we have at least one full jiffy to wait, give up + // up the CPU; since we may be rescheduled before + // the requested timeout has expired, loop until less + // than a jiffie of the desired delay remains. + // + set_current_state(TASK_INTERRUPTIBLE); + do + { + schedule_timeout(jiffies); + ktime_get_raw_ts64(&tm_aux); + if (nv_timer_less_than(&tm_aux, &tm_end)) + { + tm_aux = timespec64_sub(tm_end, tm_aux); + ns = (NvU64) timespec64_to_ns(&tm_aux); + } + else + ns = 0; + } while ((jiffies = NV_NSECS_TO_JIFFIES(ns)) != 0); + } + + if (ns > (NvU64) NSEC_PER_MSEC) + { + mdelay_safe_msec = ns / (NvU64) NSEC_PER_MSEC; + mdelay(mdelay_safe_msec); + ns %= (NvU64) NSEC_PER_MSEC; + } + if (ns) + { + ndelay(ns); + } +#ifdef NV_CHECK_DELAY_ACCURACY + ktime_get_raw_ts64(&tm_aux); + tm_aux = timespec64_sub(tm_aux, tm_start); + pr_info("NVRM: delay of %d msec results in actual delay of %lld.%09ld sec\n", + ms, tm_aux.tv_sec, tm_aux.tv_nsec); +#endif + return NV_OK; +} + +#endif // __NV_TIME_H__ diff --git a/kernel-open/common/inc/nv-timer.h b/kernel-open/common/inc/nv-timer.h new file mode 100644 index 000000000..6af49fb67 --- /dev/null +++ b/kernel-open/common/inc/nv-timer.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_TIMER_H__ +#define __NV_TIMER_H__ + +#include +#include // For container_of + +#include "conftest.h" + +struct nv_timer +{ + struct timer_list kernel_timer; + void (*nv_timer_callback)(struct nv_timer *nv_timer); +}; + +static inline void nv_timer_callback_typed_data(struct timer_list *timer) +{ + struct nv_timer *nv_timer = + container_of(timer, struct nv_timer, kernel_timer); + + nv_timer->nv_timer_callback(nv_timer); +} + +static inline void nv_timer_callback_anon_data(unsigned long arg) +{ + struct nv_timer *nv_timer = (struct nv_timer *)arg; + + nv_timer->nv_timer_callback(nv_timer); +} + +static inline void nv_timer_setup(struct nv_timer *nv_timer, + void (*callback)(struct nv_timer *nv_timer)) +{ + nv_timer->nv_timer_callback = callback; + +#if defined(NV_TIMER_SETUP_PRESENT) + timer_setup(&nv_timer->kernel_timer, nv_timer_callback_typed_data, 0); +#else + init_timer(&nv_timer->kernel_timer); + nv_timer->kernel_timer.function = nv_timer_callback_anon_data; + nv_timer->kernel_timer.data = (unsigned long)nv_timer; +#endif +} + +#endif // __NV_TIMER_H__ diff --git a/kernel-open/common/inc/nv.h b/kernel-open/common/inc/nv.h new file mode 100644 index 000000000..76fa10414 --- /dev/null +++ b/kernel-open/common/inc/nv.h @@ -0,0 +1,1081 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_H_ +#define _NV_H_ + + + +#include + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(__FreeBSD__) + #include // NULL +#elif defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include // NULL +#else + #include // NULL +#endif + +#include +#include "nv_stdarg.h" +#include +#include +#include + +extern nv_cap_t *nvidia_caps_root; + +extern const NvBool nv_is_rm_firmware_supported_os; + + + + + + +#include + +/* NVIDIA's reserved major character device number (Linux). */ +#define NV_MAJOR_DEVICE_NUMBER 195 + +#define GPU_UUID_LEN (16) + +/* + * Buffer size for an ASCII UUID: We need 2 digits per byte, plus space + * for "GPU", 5 dashes, and '\0' termination: + */ +#define GPU_UUID_ASCII_LEN (GPU_UUID_LEN * 2 + 9) + +/* + * #define an absolute maximum used as a sanity check for the + * NV_ESC_IOCTL_XFER_CMD ioctl() size argument. + */ +#define NV_ABSOLUTE_MAX_IOCTL_SIZE 16384 + +/* + * Solaris provides no more than 8 bits for the argument size in + * the ioctl() command encoding; make sure we don't exceed this + * limit. + */ +#define __NV_IOWR_ASSERT(type) ((sizeof(type) <= NV_PLATFORM_MAX_IOCTL_SIZE) ? 1 : -1) +#define __NV_IOWR(nr, type) ({ \ + typedef char __NV_IOWR_TYPE_SIZE_ASSERT[__NV_IOWR_ASSERT(type)]; \ + _IOWR(NV_IOCTL_MAGIC, (nr), type); \ +}) + +#define NV_PCI_DEV_FMT "%04x:%02x:%02x.%x" +#define NV_PCI_DEV_FMT_ARGS(nv) (nv)->pci_info.domain, (nv)->pci_info.bus, \ + (nv)->pci_info.slot, (nv)->pci_info.function + +#define NV_RM_DEVICE_INTR_ADDRESS 0x100 + +/*! + * @brief The order of the display clocks in the below defined enum + * should be synced with below mapping array and macro. + * All four should be updated simultaneously in case + * of removal or addition of clocks in below order. + * Also, TEGRASOC_WHICH_CLK_MAX is used in various places + * in below mentioned files. + * arch/nvalloc/unix/Linux/nv-linux.h + * + * arch/nvalloc/unix/src/os.c + * dispClkMapRmToOsArr[] = {...}; + * + * arch/nvalloc/unix/Linux/nv-clk.c + * osMapClk[] = {...}; + * + */ +typedef enum _TEGRASOC_WHICH_CLK +{ + TEGRASOC_WHICH_CLK_NVDISPLAYHUB, + TEGRASOC_WHICH_CLK_NVDISPLAY_DISP, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0, + TEGRASOC_WHICH_CLK_NVDISPLAY_P1, + TEGRASOC_WHICH_CLK_DPAUX0, + TEGRASOC_WHICH_CLK_FUSE, + TEGRASOC_WHICH_CLK_DSIPLL_VCO, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_VCO, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB, + TEGRASOC_WHICH_CLK_SPPLL0_DIV10, + TEGRASOC_WHICH_CLK_SPPLL0_DIV25, + TEGRASOC_WHICH_CLK_SPPLL0_DIV27, + TEGRASOC_WHICH_CLK_SPPLL1_VCO, + TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL1_DIV27, + TEGRASOC_WHICH_CLK_VPLL0_REF, + TEGRASOC_WHICH_CLK_VPLL0, + TEGRASOC_WHICH_CLK_VPLL1, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF, + TEGRASOC_WHICH_CLK_RG0, + TEGRASOC_WHICH_CLK_RG1, + TEGRASOC_WHICH_CLK_DISPPLL, + TEGRASOC_WHICH_CLK_DISPHUBPLL, + TEGRASOC_WHICH_CLK_DSI_LP, + TEGRASOC_WHICH_CLK_DSI_CORE, + TEGRASOC_WHICH_CLK_DSI_PIXEL, + TEGRASOC_WHICH_CLK_PRE_SOR0, + TEGRASOC_WHICH_CLK_PRE_SOR1, + TEGRASOC_WHICH_CLK_DP_LINK_REF, + TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M, + TEGRASOC_WHICH_CLK_RG0_M, + TEGRASOC_WHICH_CLK_RG1_M, + TEGRASOC_WHICH_CLK_SOR0_M, + TEGRASOC_WHICH_CLK_SOR1_M, + TEGRASOC_WHICH_CLK_PLLHUB, + TEGRASOC_WHICH_CLK_SOR0, + TEGRASOC_WHICH_CLK_SOR1, + TEGRASOC_WHICH_CLK_SOR_PAD_INPUT, + TEGRASOC_WHICH_CLK_PRE_SF0, + TEGRASOC_WHICH_CLK_SF0, + TEGRASOC_WHICH_CLK_SF1, + TEGRASOC_WHICH_CLK_DSI_PAD_INPUT, + TEGRASOC_WHICH_CLK_PRE_SOR0_REF, + TEGRASOC_WHICH_CLK_PRE_SOR1_REF, + TEGRASOC_WHICH_CLK_SOR0_PLL_REF, + TEGRASOC_WHICH_CLK_SOR1_PLL_REF, + TEGRASOC_WHICH_CLK_SOR0_REF, + TEGRASOC_WHICH_CLK_SOR1_REF, + TEGRASOC_WHICH_CLK_OSC, + TEGRASOC_WHICH_CLK_DSC, + TEGRASOC_WHICH_CLK_MAUD, + TEGRASOC_WHICH_CLK_AZA_2XBIT, + TEGRASOC_WHICH_CLK_AZA_BIT, + TEGRA234_CLK_MIPI_CAL, + TEGRA234_CLK_UART_FST_MIPI_CAL, + TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only. +} TEGRASOC_WHICH_CLK; + +#ifdef NVRM + +extern const char *pNVRM_ID; + +/* + * ptr arithmetic convenience + */ + +typedef union +{ + volatile NvV8 Reg008[1]; + volatile NvV16 Reg016[1]; + volatile NvV32 Reg032[1]; +} nv_hwreg_t, * nv_phwreg_t; + + +#define NVRM_PCICFG_NUM_BARS 6 +#define NVRM_PCICFG_BAR_OFFSET(i) (0x10 + (i) * 4) +#define NVRM_PCICFG_BAR_REQTYPE_MASK 0x00000001 +#define NVRM_PCICFG_BAR_REQTYPE_MEMORY 0x00000000 +#define NVRM_PCICFG_BAR_MEMTYPE_MASK 0x00000006 +#define NVRM_PCICFG_BAR_MEMTYPE_64BIT 0x00000004 +#define NVRM_PCICFG_BAR_ADDR_MASK 0xfffffff0 + +#define NVRM_PCICFG_NUM_DWORDS 16 + +#define NV_GPU_NUM_BARS 3 +#define NV_GPU_BAR_INDEX_REGS 0 +#define NV_GPU_BAR_INDEX_FB 1 +#define NV_GPU_BAR_INDEX_IMEM 2 + +typedef struct +{ + NvU64 cpu_address; + NvU64 size; + NvU32 offset; + NvU32 *map; + nv_phwreg_t map_u; +} nv_aperture_t; + +typedef struct +{ + char *name; + NvU32 *data; +} nv_parm_t; + +#define NV_RM_PAGE_SHIFT 12 +#define NV_RM_PAGE_SIZE (1 << NV_RM_PAGE_SHIFT) +#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1) + +#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT) +#define NV_RM_PAGES_PER_OS_PAGE (1U << NV_RM_TO_OS_PAGE_SHIFT) +#define NV_RM_PAGES_TO_OS_PAGES(count) \ + ((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \ + ((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0)) + +#if defined(NVCPU_X86_64) +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 3) +#else +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 2) +#endif + +typedef struct nvidia_stack_s +{ + NvU32 size; + void *top; + NvU8 stack[NV_STACK_SIZE-16] __attribute__ ((aligned(16))); +} nvidia_stack_t; + +/* + * TODO: Remove once all UNIX layers have been converted to use nvidia_stack_t + */ +typedef nvidia_stack_t nv_stack_t; + +typedef struct nv_file_private_t nv_file_private_t; + +/* + * this is a wrapper for unix events + * unlike the events that will be returned to clients, this includes + * kernel-specific data, such as file pointer, etc.. + */ +typedef struct nv_event_s +{ + NvHandle hParent; + NvHandle hObject; + NvU32 index; + NvU32 info32; + NvU16 info16; + nv_file_private_t *nvfp; /* per file-descriptor data pointer */ + NvU32 fd; + NvBool active; /* whether the event should be signaled */ + NvU32 refcount; /* count of associated RM events */ + struct nv_event_s *next; +} nv_event_t; + +typedef struct nv_kern_mapping_s +{ + void *addr; + NvU64 size; + NvU32 modeFlag; + struct nv_kern_mapping_s *next; +} nv_kern_mapping_t; + +typedef struct nv_usermap_access_params_s +{ + NvU64 addr; + NvU64 size; + NvU64 offset; + NvU64 *page_array; + NvU64 num_pages; + NvU64 mmap_start; + NvU64 mmap_size; + NvU64 access_start; + NvU64 access_size; + NvU64 remap_prot_extra; + NvBool contig; +} nv_usermap_access_params_t; + +/* + * It stores mapping context per mapping + */ +typedef struct nv_alloc_mapping_context_s { + void *alloc; + NvU64 page_index; + NvU64 *page_array; + NvU64 num_pages; + NvU64 mmap_start; + NvU64 mmap_size; + NvU64 access_start; + NvU64 access_size; + NvU64 remap_prot_extra; + NvU32 prot; + NvBool valid; +} nv_alloc_mapping_context_t; + +typedef enum +{ + NV_SOC_IRQ_DISPLAY_TYPE, + NV_SOC_IRQ_DPAUX_TYPE, + NV_SOC_IRQ_GPIO_TYPE, + NV_SOC_IRQ_HDACODEC_TYPE, + NV_SOC_IRQ_INVALID_TYPE +} nv_soc_irq_type_t; + +/* + * It stores interrupt numbers and interrupt type and private data + */ +typedef struct nv_soc_irq_info_s { + NvU32 irq_num; + nv_soc_irq_type_t irq_type; + NvBool bh_pending; + union { + NvU32 gpio_num; + NvU32 dpaux_instance; + } irq_data; +} nv_soc_irq_info_t; + +#define NV_MAX_SOC_IRQS 6 +#define NV_MAX_DPAUX_NUM_DEVICES 4 +#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING + +/* + * per device state + */ + +/* DMA-capable device data, defined by kernel interface layer */ +typedef struct nv_dma_device nv_dma_device_t; + +typedef struct nv_state_t +{ + void *priv; /* private data */ + void *os_state; /* os-specific device state */ + + int flags; + + /* PCI config info */ + nv_pci_info_t pci_info; + NvU16 subsystem_id; + NvU16 subsystem_vendor; + NvU32 gpu_id; + NvU32 iovaspace_id; + struct + { + NvBool valid; + NvU8 uuid[GPU_UUID_LEN]; + } nv_uuid_cache; + void *handle; + + NvU32 pci_cfg_space[NVRM_PCICFG_NUM_DWORDS]; + + /* physical characteristics */ + nv_aperture_t bars[NV_GPU_NUM_BARS]; + nv_aperture_t *regs; + nv_aperture_t *dpaux[NV_MAX_DPAUX_NUM_DEVICES]; + nv_aperture_t *hdacodec_regs; + nv_aperture_t *mipical_regs; + nv_aperture_t *fb, ud; + + NvU32 num_dpaux_instance; + NvU32 interrupt_line; + NvU32 dpaux_irqs[NV_MAX_DPAUX_NUM_DEVICES]; + nv_soc_irq_info_t soc_irq_info[NV_MAX_SOC_IRQS]; + NvS32 current_soc_irq; + NvU32 num_soc_irqs; + NvU32 hdacodec_irq; + NvU8 *soc_dcb_blob; + NvU32 soc_dcb_size; + NvU32 disp_sw_soc_chip_id; + + NvBool primary_vga; + + NvU32 sim_env; + + NvU32 rc_timer_enabled; + + /* list of events allocated for this device */ + nv_event_t *event_list; + + /* lock to protect event_list */ + void *event_spinlock; + + nv_kern_mapping_t *kern_mappings; + + /* Kernel interface DMA device data */ + nv_dma_device_t *dma_dev; + nv_dma_device_t *niso_dma_dev; + + /* + * Per-GPU queue. The actual queue object is usually allocated in the + * arch-specific parent structure (e.g. nv_linux_state_t), and this + * pointer just points to it. + */ + struct os_work_queue *queue; + + /* For loading RM as a firmware (DCE or GSP) client */ + NvBool request_firmware; /* request firmware from the OS */ + NvBool request_fw_client_rm; /* attempt to init RM as FW a client */ + NvBool allow_fallback_to_monolithic_rm; /* allow fallback to monolithic RM if FW client RM doesn't work out */ + NvBool enable_firmware_logs; /* attempt to enable firmware log decoding/printing */ + + /* Variable to track, if nvidia_remove is called */ + NvBool removed; + + NvBool console_device; + + /* Variable to track, if GPU is external GPU */ + NvBool is_external_gpu; + + /* Variable to track, if regkey PreserveVideoMemoryAllocations is set */ + NvBool preserve_vidmem_allocations; + + /* Variable to force allocation of 32-bit addressable memory */ + NvBool force_dma32_alloc; + + /* Variable to track if device has entered dynamic power state */ + NvBool dynamic_power_entered; + + /* PCI power state should be D0 during system suspend */ + NvBool d0_state_in_suspend; + + /* Current cyclestats client and context */ + NvU32 profiler_owner; + void *profiler_context; + + /* + * RMAPI objects to use in the OS layer to talk to core RM. + * + * Note that we only need to store one subdevice handle: in SLI, we will + * have a separate nv_state_t per physical GPU. + */ + struct { + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubDevice; + NvHandle hI2C; + NvHandle hDisp; + } rmapi; + + /* Bool to check if ISO iommu enabled */ + NvBool iso_iommu_present; + + /* Bool to check if dma-buf is supported */ + NvBool dma_buf_supported; + + NvBool printed_openrm_enable_unsupported_gpus_error; + +} nv_state_t; + +// These define need to be in sync with defines in system.h +#define OS_TYPE_LINUX 0x1 +#define OS_TYPE_FREEBSD 0x2 +#define OS_TYPE_SUNOS 0x3 +#define OS_TYPE_VMWARE 0x4 + +struct nv_file_private_t +{ + NvHandle *handles; + NvU16 maxHandles; + NvU32 deviceInstance; + NvU8 metadata[64]; + + nv_file_private_t *ctl_nvfp; + void *ctl_nvfp_priv; +}; + +// Forward define the gpu ops structures +typedef struct gpuSession *nvgpuSessionHandle_t; +typedef struct gpuDevice *nvgpuDeviceHandle_t; +typedef struct gpuAddressSpace *nvgpuAddressSpaceHandle_t; +typedef struct gpuChannel *nvgpuChannelHandle_t; +typedef struct UvmGpuChannelInfo_tag *nvgpuChannelInfo_t; +typedef struct UvmGpuChannelAllocParams_tag nvgpuChannelAllocParams_t; +typedef struct UvmGpuCaps_tag *nvgpuCaps_t; +typedef struct UvmGpuCopyEnginesCaps_tag *nvgpuCesCaps_t; +typedef struct UvmGpuAddressSpaceInfo_tag *nvgpuAddressSpaceInfo_t; +typedef struct UvmGpuAllocInfo_tag *nvgpuAllocInfo_t; +typedef struct UvmGpuP2PCapsParams_tag *nvgpuP2PCapsParams_t; +typedef struct UvmGpuFbInfo_tag *nvgpuFbInfo_t; +typedef struct UvmGpuEccInfo_tag *nvgpuEccInfo_t; +typedef struct UvmGpuFaultInfo_tag *nvgpuFaultInfo_t; +typedef struct UvmGpuAccessCntrInfo_tag *nvgpuAccessCntrInfo_t; +typedef struct UvmGpuAccessCntrConfig_tag *nvgpuAccessCntrConfig_t; +typedef struct UvmGpuInfo_tag nvgpuInfo_t; +typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t; +typedef struct UvmPmaAllocationOptions_tag *nvgpuPmaAllocationOptions_t; +typedef struct UvmPmaStatistics_tag *nvgpuPmaStatistics_t; +typedef struct UvmGpuMemoryInfo_tag *nvgpuMemoryInfo_t; +typedef struct UvmGpuExternalMappingInfo_tag *nvgpuExternalMappingInfo_t; +typedef struct UvmGpuChannelResourceInfo_tag *nvgpuChannelResourceInfo_t; +typedef struct UvmGpuChannelInstanceInfo_tag *nvgpuChannelInstanceInfo_t; +typedef struct UvmGpuChannelResourceBindParams_tag *nvgpuChannelResourceBindParams_t; +typedef struct UvmGpuPagingChannelAllocParams_tag nvgpuPagingChannelAllocParams_t; +typedef struct UvmGpuPagingChannel_tag *nvgpuPagingChannelHandle_t; +typedef struct UvmGpuPagingChannelInfo_tag *nvgpuPagingChannelInfo_t; +typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU32, NvU64 *, NvU32, NvU64, NvU64); +typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64); + +/* + * flags + */ + +#define NV_FLAG_OPEN 0x0001 +#define NV_FLAG_EXCLUDE 0x0002 +#define NV_FLAG_CONTROL 0x0004 +// Unused 0x0008 +#define NV_FLAG_SOC_DISPLAY 0x0010 +#define NV_FLAG_USES_MSI 0x0020 +#define NV_FLAG_USES_MSIX 0x0040 +#define NV_FLAG_PASSTHRU 0x0080 +#define NV_FLAG_SUSPENDED 0x0100 +// Unused 0x0200 +// Unused 0x0400 +#define NV_FLAG_PERSISTENT_SW_STATE 0x0800 +#define NV_FLAG_IN_RECOVERY 0x1000 +// Unused 0x2000 +#define NV_FLAG_UNBIND_LOCK 0x4000 +/* To be set when GPU is not present on the bus, to help device teardown */ +#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000 + +typedef enum +{ + NV_PM_ACTION_HIBERNATE, + NV_PM_ACTION_STANDBY, + NV_PM_ACTION_RESUME +} nv_pm_action_t; + +typedef enum +{ + NV_PM_ACTION_DEPTH_DEFAULT, + NV_PM_ACTION_DEPTH_MODESET, + NV_PM_ACTION_DEPTH_UVM +} nv_pm_action_depth_t; + +typedef enum +{ + NV_DYNAMIC_PM_NEVER, + NV_DYNAMIC_PM_COARSE, + NV_DYNAMIC_PM_FINE +} nv_dynamic_power_mode_t; + +typedef enum +{ + NV_POWER_STATE_IN_HIBERNATE, + NV_POWER_STATE_IN_STANDBY, + NV_POWER_STATE_RUNNING +} nv_power_state_t; + +typedef enum +{ + NV_FIRMWARE_GSP, + NV_FIRMWARE_GSP_LOG +} nv_firmware_t; + +#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga) + +#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL) +#define NV_IS_SOC_DISPLAY_DEVICE(nv) \ + ((nv)->flags & NV_FLAG_SOC_DISPLAY) + +#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \ + (((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0) + +#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \ + ((nv)->iso_iommu_present) + +/* + * NVIDIA ACPI event IDs to be passed into the core NVIDIA + * driver for various events like display switch events, + * AC/battery events, etc.. + */ +#define NV_SYSTEM_ACPI_DISPLAY_SWITCH_EVENT 0x8001 +#define NV_SYSTEM_ACPI_BATTERY_POWER_EVENT 0x8002 + +/* + * GPU add/remove events + */ +#define NV_SYSTEM_GPU_ADD_EVENT 0x9001 +#define NV_SYSTEM_GPU_REMOVE_EVENT 0x9002 + +/* + * Status bit definitions for display switch hotkey events. + */ +#define NV_HOTKEY_STATUS_DISPLAY_ENABLE_LCD 0x01 +#define NV_HOTKEY_STATUS_DISPLAY_ENABLE_CRT 0x02 +#define NV_HOTKEY_STATUS_DISPLAY_ENABLE_TV 0x04 +#define NV_HOTKEY_STATUS_DISPLAY_ENABLE_DFP 0x08 + +/* + * NVIDIA ACPI sub-event IDs (event types) to be passed into + * to core NVIDIA driver for ACPI events. + */ +#define NV_SYSTEM_ACPI_EVENT_VALUE_DISPLAY_SWITCH_DEFAULT 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_AC 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_BATTERY 1 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_UNDOCKED 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_DOCKED 1 + +#define NV_ACPI_NVIF_HANDLE_PRESENT 0x01 +#define NV_ACPI_DSM_HANDLE_PRESENT 0x02 +#define NV_ACPI_WMMX_HANDLE_PRESENT 0x04 + +#define NV_EVAL_ACPI_METHOD_NVIF 0x01 +#define NV_EVAL_ACPI_METHOD_WMMX 0x02 + +#define NV_I2C_CMD_READ 1 +#define NV_I2C_CMD_WRITE 2 +#define NV_I2C_CMD_SMBUS_READ 3 +#define NV_I2C_CMD_SMBUS_WRITE 4 +#define NV_I2C_CMD_SMBUS_QUICK_WRITE 5 +#define NV_I2C_CMD_SMBUS_QUICK_READ 6 +#define NV_I2C_CMD_SMBUS_BLOCK_READ 7 +#define NV_I2C_CMD_SMBUS_BLOCK_WRITE 8 + +// Flags needed by OSAllocPagesNode +#define NV_ALLOC_PAGES_NODE_NONE 0x0 +#define NV_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1 + +/* +** where we hide our nv_state_t * ... +*/ +#define NV_SET_NV_STATE(pgpu,p) ((pgpu)->pOsGpuInfo = (p)) +#define NV_GET_NV_STATE(pGpu) \ + (nv_state_t *)((pGpu) ? (pGpu)->pOsGpuInfo : NULL) + +#define IS_REG_OFFSET(nv, offset, length) \ + (((offset) >= (nv)->regs->cpu_address) && \ + (((offset) + ((length)-1)) <= \ + (nv)->regs->cpu_address + ((nv)->regs->size-1))) + +#define IS_FB_OFFSET(nv, offset, length) \ + (((nv)->fb) && ((offset) >= (nv)->fb->cpu_address) && \ + (((offset) + ((length)-1)) <= (nv)->fb->cpu_address + ((nv)->fb->size-1))) + +#define IS_UD_OFFSET(nv, offset, length) \ + (((nv)->ud.cpu_address != 0) && ((nv)->ud.size != 0) && \ + ((offset) >= (nv)->ud.cpu_address) && \ + (((offset) + ((length)-1)) <= (nv)->ud.cpu_address + ((nv)->ud.size-1))) + +#define IS_IMEM_OFFSET(nv, offset, length) \ + (((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) && \ + ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) && \ + ((offset) >= (nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) && \ + (((offset) + ((length) - 1)) <= \ + (nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + \ + ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))) + +#define NV_RM_MAX_MSIX_LINES 8 + +#define NV_MAX_ISR_DELAY_US 20000 +#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000) + +#define NV_TIMERCMP(a, b, CMP) \ + (((a)->tv_sec == (b)->tv_sec) ? \ + ((a)->tv_usec CMP (b)->tv_usec) : ((a)->tv_sec CMP (b)->tv_sec)) + +#define NV_TIMERADD(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ + if ((result)->tv_usec >= 1000000) \ + { \ + ++(result)->tv_sec; \ + (result)->tv_usec -= 1000000; \ + } \ + } + +#define NV_TIMERSUB(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ + if ((result)->tv_usec < 0) \ + { \ + --(result)->tv_sec; \ + (result)->tv_usec += 1000000; \ + } \ + } + +#define NV_TIMEVAL_TO_US(tv) ((NvU64)(tv).tv_sec * 1000000 + (tv).tv_usec) + +#ifndef NV_ALIGN_UP +#define NV_ALIGN_UP(v,g) (((v) + ((g) - 1)) & ~((g) - 1)) +#endif +#ifndef NV_ALIGN_DOWN +#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1)) +#endif + +/* + * driver internal interfaces + */ + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for UNIX specific OS interface. + * + * --------------------------------------------------------------------------- + */ + +NvU32 NV_API_CALL nv_get_dev_minor (nv_state_t *); +void* NV_API_CALL nv_alloc_kernel_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, void **); +NV_STATUS NV_API_CALL nv_free_kernel_mapping (nv_state_t *, void *, void *, void *); +NV_STATUS NV_API_CALL nv_alloc_user_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_free_user_mapping (nv_state_t *, void *, NvU64, void *); +NV_STATUS NV_API_CALL nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32); + +NvU64 NV_API_CALL nv_get_kern_phys_address (NvU64); +NvU64 NV_API_CALL nv_get_user_phys_address (NvU64); +nv_state_t* NV_API_CALL nv_get_adapter_state (NvU32, NvU8, NvU8); +nv_state_t* NV_API_CALL nv_get_ctl_state (void); + +void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 ); + +NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *); + +NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **); +void NV_API_CALL nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **); + +NV_STATUS NV_API_CALL nv_register_peer_io_mem (nv_state_t *, NvU64 *, NvU64, void **); +void NV_API_CALL nv_unregister_peer_io_mem(nv_state_t *, void *); + +struct sg_table; + +NV_STATUS NV_API_CALL nv_register_sgt (nv_state_t *, NvU64 *, NvU64, NvU32, void **, struct sg_table *, void *); +void NV_API_CALL nv_unregister_sgt (nv_state_t *, struct sg_table **, void **, void *); +NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64, NvU32, void **); +void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *); + +NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **); +NV_STATUS NV_API_CALL nv_dma_map_pages (nv_dma_device_t *, NvU64, NvU64 *, NvBool, NvU32, void **); +NV_STATUS NV_API_CALL nv_dma_unmap_pages (nv_dma_device_t *, NvU64, NvU64 *, void **); + +NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **); +NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **); + +NV_STATUS NV_API_CALL nv_dma_map_peer (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_peer (nv_dma_device_t *, NvU64, NvU64); + +NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64); + +void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *); +void NV_API_CALL nv_dma_enable_nvlink (nv_dma_device_t *); + +NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *); +NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *); + +void NV_API_CALL nv_post_event (nv_event_t *, NvHandle, NvU32, NvU32, NvU16, NvBool); +NvS32 NV_API_CALL nv_get_event (nv_file_private_t *, nv_event_t *, NvU32 *); + +void* NV_API_CALL nv_i2c_add_adapter (nv_state_t *, NvU32); +void NV_API_CALL nv_i2c_del_adapter (nv_state_t *, void *); + +void NV_API_CALL nv_acpi_methods_init (NvU32 *); +void NV_API_CALL nv_acpi_methods_uninit (void); + +NV_STATUS NV_API_CALL nv_acpi_method (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_dsm_method (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_ddc_method (nv_state_t *, void *, NvU32 *, NvBool); +NV_STATUS NV_API_CALL nv_acpi_dod_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_rom_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_get_powersource (NvU32 *); +NvBool NV_API_CALL nv_acpi_is_battery_present(void); + +NV_STATUS NV_API_CALL nv_acpi_mux_method (nv_state_t *, NvU32 *, NvU32, const char *); + +NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list); + +NvU64 NV_API_CALL nv_get_dma_start_address (nv_state_t *); +NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *); +NV_STATUS NV_API_CALL nv_pci_trigger_recovery (nv_state_t *); +NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *); + +NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *); +const void*NV_API_CALL nv_get_firmware(nv_state_t *, nv_firmware_t, const void **, NvU32 *); +void NV_API_CALL nv_put_firmware(const void *); + +nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **); +void NV_API_CALL nv_put_file_private(void *); + +NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvU32 *, NvS32 *); + +NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**); +NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode); + +void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv); + +void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64); + +void NV_API_CALL nv_p2p_free_platform_data(void *data); + +#if defined(NVCPU_PPC64LE) +NV_STATUS NV_API_CALL nv_get_nvlink_line_rate (nv_state_t *, NvU32 *); +#endif + +NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *); +void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *); +void NV_API_CALL nv_release_mmap_lock (nv_state_t *); +NvBool NV_API_CALL nv_get_all_mappings_revoked_locked (nv_state_t *); +void NV_API_CALL nv_set_safe_to_mmap_locked (nv_state_t *, NvBool); + +NV_STATUS NV_API_CALL nv_indicate_idle (nv_state_t *); +NV_STATUS NV_API_CALL nv_indicate_not_idle (nv_state_t *); +void NV_API_CALL nv_idle_holdoff (nv_state_t *); + +NvBool NV_API_CALL nv_dynamic_power_available (nv_state_t *); +void NV_API_CALL nv_audio_dynamic_power (nv_state_t *); + +void NV_API_CALL nv_control_soc_irqs (nv_state_t *, NvBool bEnable); +NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *, NvU32 *); + +NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap (int, int*); +int NV_API_CALL nv_cap_drv_init(void); +void NV_API_CALL nv_cap_drv_exit(void); +NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *); + +NvU32 NV_API_CALL nv_get_os_type(void); + +void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end); +struct dma_buf; +typedef struct nv_dma_buf nv_dma_buf_t; +struct drm_gem_object; + +NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *); +void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *); +NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **); +NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **); +void NV_API_CALL nv_dma_release_dma_buf (void *, nv_dma_buf_t *); + +void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *); + + +NvBool NV_API_CALL nv_platform_supports_s0ix (void); +NvBool NV_API_CALL nv_s2idle_pm_configured (void); + + +NvBool NV_API_CALL nv_is_chassis_notebook (void); +void NV_API_CALL nv_allow_runtime_suspend (nv_state_t *nv); +void NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv); + +typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *); + +NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *); +NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *); + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for Resource Manager interface. + * + * --------------------------------------------------------------------------- + */ + +NvBool NV_API_CALL rm_init_rm (nvidia_stack_t *); +void NV_API_CALL rm_shutdown_rm (nvidia_stack_t *); +NvBool NV_API_CALL rm_init_private_state (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_free_private_state (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_init_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_disable_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_shutdown_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_exclude_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_acquire_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_release_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32); +NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *); +void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t); +NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_save_low_res_mode (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_vbios_version (nvidia_stack_t *, nv_state_t *, char *); +char* NV_API_CALL rm_get_gpu_uuid (nvidia_stack_t *, nv_state_t *); +const NvU8* NV_API_CALL rm_get_gpu_uuid_raw (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_set_rm_firmware_requested(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_firmware_version (nvidia_stack_t *, nv_state_t *, char *, NvLength); +void NV_API_CALL rm_cleanup_file_private (nvidia_stack_t *, nv_state_t *, nv_file_private_t *); +void NV_API_CALL rm_unbind_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_read_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32 *); +NV_STATUS NV_API_CALL rm_write_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_binary (nvidia_stack_t *, nv_state_t *, const char *, NvU8 *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_string (nvidia_stack_t *, nv_state_t *, const char *, const char *, NvU32); +void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *); +char* NV_API_CALL rm_remove_spaces (const char *); +char* NV_API_CALL rm_string_token (char **, const char); + +NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *); +const char* NV_API_CALL rm_get_device_name (NvU16, NvU16, NvU16); + +NV_STATUS NV_API_CALL rm_is_supported_device (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_supported_pci_device(NvU8 pci_class, + NvU8 pci_subclass, + NvU16 vendor, + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_legacy_warning); + +void NV_API_CALL rm_i2c_remove_adapters (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_i2c_is_smbus_capable (nvidia_stack_t *, nv_state_t *, void *); +NV_STATUS NV_API_CALL rm_i2c_transfer (nvidia_stack_t *, nv_state_t *, void *, NvU8, NvU8, NvU8, NvU32, NvU8 *); + +NV_STATUS NV_API_CALL rm_perform_version_check (nvidia_stack_t *, void *, NvU32); + +NV_STATUS NV_API_CALL rm_system_event (nvidia_stack_t *, NvU32, NvU32); + +void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *); +NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64); +NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *); +NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **); +NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *); +NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *); +NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU32, NvU32, NvU64 *, void **); +NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *); +void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle); +NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, NvU64 *); +NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64); +NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **); +void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *); +NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *); + +void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd); +NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id); +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(nvidia_stack_t *, nv_state_t *, NvU32 *); +NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *); +NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *); +NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, NvS32 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool); +NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_disable_iomap_wc(void); + +void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool); +void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool); +const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *); +const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *); +const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool); + +void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32); +NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *); + +/* vGPU VFIO specific functions */ +NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32); +NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16); +NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 **, NvBool); +NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8); +NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *); +NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32); +NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *); +NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *); +NV_STATUS NV_API_CALL nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *); +NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *); + +NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*); +nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*); +void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size); + +/* Callbacks should occur roughly every 10ms. */ +#define NV_SNAPSHOT_TIMER_HZ 100 +void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context)); +void NV_API_CALL nv_flush_snapshot_timer(void); +void NV_API_CALL nv_stop_snapshot_timer(void); + +static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv) +{ + return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL; +} + + + + + + + + + + + + + +#if defined(NVCPU_X86_64) + +static inline NvU64 nv_rdtsc(void) +{ + NvU64 val; + __asm__ __volatile__ ("rdtsc \t\n" + "shlq $0x20,%%rdx \t\n" + "orq %%rdx,%%rax \t\n" + : "=A" (val)); + return val; +} + +#endif + +#endif /* NVRM */ + +static inline int nv_count_bits(NvU64 word) +{ + NvU64 bits; + + bits = (word & 0x5555555555555555ULL) + ((word >> 1) & 0x5555555555555555ULL); + bits = (bits & 0x3333333333333333ULL) + ((bits >> 2) & 0x3333333333333333ULL); + bits = (bits & 0x0f0f0f0f0f0f0f0fULL) + ((bits >> 4) & 0x0f0f0f0f0f0f0f0fULL); + bits = (bits & 0x00ff00ff00ff00ffULL) + ((bits >> 8) & 0x00ff00ff00ff00ffULL); + bits = (bits & 0x0000ffff0000ffffULL) + ((bits >> 16) & 0x0000ffff0000ffffULL); + bits = (bits & 0x00000000ffffffffULL) + ((bits >> 32) & 0x00000000ffffffffULL); + + return (int)(bits); +} + +#endif diff --git a/kernel-open/common/inc/nvCpuUuid.h b/kernel-open/common/inc/nvCpuUuid.h new file mode 100644 index 000000000..0ab546b7c --- /dev/null +++ b/kernel-open/common/inc/nvCpuUuid.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CPU_UUID_H_ +#define _NV_CPU_UUID_H_ + +#define NV_UUID_LEN 16 + +typedef struct nv_uuid +{ + NvU8 uuid[NV_UUID_LEN]; + +} NvUuid; + +#define NV_UUID_HI(pUuid) (*((NvU64*)((pUuid)->uuid + (NV_UUID_LEN >> 1)))) +#define NV_UUID_LO(pUuid) (*((NvU64*)((pUuid)->uuid + 0))) + +typedef NvUuid NvSystemUuid; + +typedef NvUuid NvProcessorUuid; + +extern const NvProcessorUuid NV_PROCESSOR_UUID_CPU_DEFAULT; + +#endif // _NV_CPU_UUID_H_ diff --git a/kernel-open/common/inc/nv_firmware_types.h b/kernel-open/common/inc/nv_firmware_types.h new file mode 100644 index 000000000..90dd93f1e --- /dev/null +++ b/kernel-open/common/inc/nv_firmware_types.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_FIRMWARE_TYPES_H +#define NV_FIRMWARE_TYPES_H + +typedef enum { + NV_FIRMWARE_MODE_DISABLED = 0, + NV_FIRMWARE_MODE_ENABLED = 1, + NV_FIRMWARE_MODE_DEFAULT = 2, + NV_FIRMWARE_MODE_INVALID = 0xFF +} NvFirmwareMode; + +#endif // NV_FIRMWARE_TYPES_H diff --git a/kernel-open/common/inc/nv_speculation_barrier.h b/kernel-open/common/inc/nv_speculation_barrier.h new file mode 100644 index 000000000..20b32bdaf --- /dev/null +++ b/kernel-open/common/inc/nv_speculation_barrier.h @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * NVIDIA GPZ vulnerability mitigation definitions. + */ + +/* + * There are two copies of this file for legacy reasons: + * + * P4: <$NV_SOURCE/>drivers/common/inc/nv_speculation_barrier.h + * Git: include/nv_speculation_barrier.h + * + * Both files need to be kept in sync if any changes are required. + */ + +#ifndef _NV_SPECULATION_BARRIER_H_ +#define _NV_SPECULATION_BARRIER_H_ + +#define NV_SPECULATION_BARRIER_VERSION 2 + +/* + * GNU-C/MSC/clang - x86/x86_64 : x86_64, __i386, __i386__ + * GNU-C - THUMB mode : __GNUC__, __thumb__ + * GNU-C - ARM modes : __GNUC__, __arm__, __aarch64__ + * armclang - THUMB mode : __ARMCC_VERSION, __thumb__ + * armclang - ARM modes : __ARMCC_VERSION, __arm__, __aarch64__ + * GHS - THUMB mode : __ghs__, __THUMB__ + * GHS - ARM modes : __ghs__, __ARM__, __ARM64__ + */ + +#if defined(_M_IX86) || defined(__i386__) || defined(__i386) \ + || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) + /* All x86 */ + #define NV_SPECULATION_BARRIER_x86 + +#elif defined(macintosh) || defined(__APPLE__) \ + || defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) \ + || defined(__POWERPC__) || defined(__ppc) || defined(__ppc__) \ + || defined(__ppc64__) || defined(__PPC__) \ + || defined(__PPC64__) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) + /* All PowerPC */ + #define NV_SPECULATION_BARRIER_PPC + +#elif (defined(__GNUC__) && defined(__thumb__)) \ + || (defined(__ARMCC_VERSION) && defined(__thumb__)) \ + || (defined(__ghs__) && defined(__THUMB__)) + /* ARM-thumb mode(<=ARMv7)/T32 (ARMv8) */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst.w 0xf3af8014\n" + +#elif (defined(__GNUC__) && defined(__arm__)) \ + || (defined(__ARMCC_VERSION) && defined(__arm__)) \ + || (defined(__ghs__) && defined(__ARM__)) + /* aarch32(ARMv8) / arm(<=ARMv7) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst 0xe320f014\n" + +#elif (defined(__GNUC__) && defined(__aarch64__)) \ + || (defined(__ARMCC_VERSION) && defined(__aarch64__)) \ + || (defined(__ghs__) && defined(__ARM64__)) + /* aarch64(ARMv8) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB "HINT #20\n" +#elif (defined(_MSC_VER) && ( defined(_M_ARM64) || defined(_M_ARM)) ) + /* Not currently implemented for MSVC/ARM64. See bug 3366890. */ +# define nv_speculation_barrier() +# define speculation_barrier() nv_speculation_barrier() +#elif defined(NVCPU_NVRISCV64) && NVOS_IS_LIBOS +# define nv_speculation_barrier() +#else + #error "Unknown compiler/chip family" +#endif + +/* + * nv_speculation_barrier -- General-purpose speculation barrier + * + * This approach provides full protection against variant-1 vulnerability. + * However, the recommended approach is detailed below (See: + * nv_array_index_no_speculate) + * + * Semantics: + * Any memory read that is sequenced after a nv_speculation_barrier(), + * and contained directly within the scope of nv_speculation_barrier() or + * directly within a nested scope, will not speculatively execute until all + * conditions for entering that scope have been architecturally resolved. + * + * Example: + * if (untrusted_index_from_user < bound) { + * ... + * nv_speculation_barrier(); + * ... + * x = array1[untrusted_index_from_user]; + * bit = x & 1; + * y = array2[0x100 * bit]; + * } + */ + +#if defined(NV_SPECULATION_BARRIER_x86) +// Delete after all references are changed to nv_speculation_barrier +#define speculation_barrier() nv_speculation_barrier() + +static inline void nv_speculation_barrier(void) +{ + +#if defined(_MSC_VER) && !defined(__clang__) + _mm_lfence(); +#endif + +#if defined(__GNUC__) || defined(__clang__) + __asm__ __volatile__ ("lfence" : : : "memory"); +#endif + +} + +#elif defined(NV_SPECULATION_BARRIER_PPC) + +static inline void nv_speculation_barrier(void) +{ + asm volatile("ori 31,31,0"); +} + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + +/* Note: Cortex-A9 GNU-assembler seems to complain about DSB SY */ + #define nv_speculation_barrier() \ + asm volatile \ + ( \ + "DSB sy\n" \ + "ISB\n" \ + : : : "memory" \ + ) +#endif + +/* + * nv_array_index_no_speculate -- Recommended variant-1 mitigation approach + * + * The array-index-no-speculate approach "de-speculates" an array index that + * has already been bounds-checked. + * + * This approach is preferred over nv_speculation_barrier due to the following + * reasons: + * - It is just as effective as the general-purpose speculation barrier. + * - It clearly identifies what array index is being de-speculated and is thus + * self-commenting, whereas the general-purpose speculation barrier requires + * an explanation of what array index is being de-speculated. + * - It performs substantially better than the general-purpose speculation + * barrier on ARM Cortex-A cores (the difference is expected to be tens of + * cycles per invocation). Within tight loops, this difference may become + * noticeable. + * + * Semantics: + * Provided count is non-zero and the caller has already validated or otherwise + * established that index < count, any speculative use of the return value will + * use a speculative value that is less than count. + * + * Example: + * if (untrusted_index_from_user < bound) { + * untrusted_index_from_user = nv_array_index_no_speculate( + * untrusted_index_from_user, bound); + * ... + * x = array1[untrusted_index_from_user]; + * ... + * } + * + * The use of nv_array_index_no_speculate() in the above example ensures that + * subsequent uses of untrusted_index_from_user will not execute speculatively + * (they will wait for the bounds check to complete). + */ + +static inline unsigned long nv_array_index_no_speculate(unsigned long index, + unsigned long count) +{ +#if defined(NV_SPECULATION_BARRIER_x86) && (defined(__GNUC__) || defined(__clang__)) + unsigned long mask; + + __asm__ __volatile__ + ( + "CMP %2, %1 \n" + "SBB %0, %0 \n" + : "=r"(mask) : "r"(index), "r"(count) : "cc" + ); + + return (index & mask); + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + unsigned long mask; + + asm volatile + ( + "CMP %[ind], %[cnt] \n" + "SBC %[res], %[cnt], %[cnt] \n" + NV_SPEC_BARRIER_CSDB + : [res] "=r" (mask) : [ind] "r" (index), [cnt] "r" (count): "cc" + ); + + return (index & mask); + +/* Fallback to generic speculation barrier for unsupported platforms */ +#else + nv_speculation_barrier(); + + return index; +#endif +} + +#endif //_NV_SPECULATION_BARRIER_H_ diff --git a/kernel-open/common/inc/nv_stdarg.h b/kernel-open/common/inc/nv_stdarg.h new file mode 100644 index 000000000..b23f7f7b8 --- /dev/null +++ b/kernel-open/common/inc/nv_stdarg.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_STDARG_H_ +#define _NV_STDARG_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include "conftest.h" + #if defined(NV_LINUX_STDARG_H_PRESENT) + #include + #else + #include + #endif +#else + #include +#endif + +#endif // _NV_STDARG_H_ diff --git a/kernel-open/common/inc/nv_uvm_interface.h b/kernel-open/common/inc/nv_uvm_interface.h new file mode 100644 index 000000000..86a6b2e07 --- /dev/null +++ b/kernel-open/common/inc/nv_uvm_interface.h @@ -0,0 +1,1517 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file provides the interface that RM exposes to UVM. +// + +#ifndef _NV_UVM_INTERFACE_H_ +#define _NV_UVM_INTERFACE_H_ + +// Forward references, to break circular header file dependencies: +struct UvmOpsUvmEvents; + +#if defined(NVIDIA_UVM_ENABLED) + +// We are in the UVM build system, for a Linux target. +#include "uvm_linux.h" + +#else + +// We are in the RM build system, for a Linux target: +#include "nv-linux.h" + +#endif // NVIDIA_UVM_ENABLED + +#include "nvgputypes.h" +#include "nvstatus.h" +#include "nv_uvm_types.h" + + +// Define the type here as it's Linux specific, used only by the Linux specific +// nvUvmInterfaceRegisterGpu() API. +typedef struct +{ + struct pci_dev *pci_dev; + + // DMA addressable range of the device, mirrors fields in nv_state_t. + NvU64 dma_addressable_start; + NvU64 dma_addressable_limit; +} UvmGpuPlatformInfo; + +/******************************************************************************* + nvUvmInterfaceRegisterGpu + + Registers the GPU with the provided UUID for use. A GPU must be registered + before its UUID can be used with any other API. This call is ref-counted so + every nvUvmInterfaceRegisterGpu must be paired with a corresponding + nvUvmInterfaceUnregisterGpu. + + You don't need to call nvUvmInterfaceSessionCreate before calling this. + + Error codes: + NV_ERR_GPU_UUID_NOT_FOUND + NV_ERR_NO_MEMORY + NV_ERR_GENERIC +*/ +NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatformInfo *gpuInfo); + +/******************************************************************************* + nvUvmInterfaceUnregisterGpu + + Unregisters the GPU with the provided UUID. This drops the ref count from + nvUvmInterfaceRegisterGpu. Once the reference count goes to 0 the device may + no longer be accessible until the next nvUvmInterfaceRegisterGpu call. No + automatic resource freeing is performed, so only make the last unregister + call after destroying all your allocations associated with that UUID (such + as those from nvUvmInterfaceAddressSpaceCreate). + + If the UUID is not found, no operation is performed. +*/ +void nvUvmInterfaceUnregisterGpu(const NvProcessorUuid *gpuUuid); + +/******************************************************************************* + nvUvmInterfaceSessionCreate + + TODO: Creates session object. All allocations are tied to the session. + + The platformInfo parameter is filled by the callee with miscellaneous system + information. Refer to the UvmPlatformInfo struct for details. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session, + UvmPlatformInfo *platformInfo); + +/******************************************************************************* + nvUvmInterfaceSessionDestroy + + Destroys a session object. All allocations are tied to the session will + be destroyed. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceSessionDestroy(uvmGpuSessionHandle session); + +/******************************************************************************* + nvUvmInterfaceDeviceCreate + + Creates a device object under the given session for the GPU with the given + UUID. Also creates a partition object for the device iff bCreateSmcPartition + is true and pGpuInfo->smcEnabled is true. pGpuInfo->smcUserClientInfo will + be used to determine the SMC partition in this case. A device handle is + returned in the device output parameter. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY + NV_ERR_INVALID_ARGUMENT + NV_ERR_INSUFFICIENT_RESOURCES + NV_ERR_OBJECT_NOT_FOUND +*/ +NV_STATUS nvUvmInterfaceDeviceCreate(uvmGpuSessionHandle session, + const UvmGpuInfo *pGpuInfo, + const NvProcessorUuid *gpuUuid, + uvmGpuDeviceHandle *device, + NvBool bCreateSmcPartition); + +/******************************************************************************* + nvUvmInterfaceDeviceDestroy + + Destroys the device object for the given handle. The handle must have been + obtained in a prior call to nvUvmInterfaceDeviceCreate. +*/ +void nvUvmInterfaceDeviceDestroy(uvmGpuDeviceHandle device); + +/******************************************************************************* + nvUvmInterfaceAddressSpaceCreate + + This function creates an address space. + This virtual address space is created on the GPU specified + by device. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device, + unsigned long long vaBase, + unsigned long long vaSize, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +/******************************************************************************* + nvUvmInterfaceDupAddressSpace + + This function will dup the given vaspace from the users client to the + kernel client was created as an ops session. + + By duping the vaspace it is guaranteed that RM will refcount the vaspace object. + + Error codes: + NV_ERR_GENERIC +*/ +NV_STATUS nvUvmInterfaceDupAddressSpace(uvmGpuDeviceHandle device, + NvHandle hUserClient, + NvHandle hUserVASpace, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +/******************************************************************************* + nvUvmInterfaceAddressSpaceDestroy + + Destroys an address space that was previously created via + nvUvmInterfaceAddressSpaceCreate. +*/ + +void nvUvmInterfaceAddressSpaceDestroy(uvmGpuAddressSpaceHandle vaSpace); + +/******************************************************************************* + nvUvmInterfaceMemoryAllocFB + + This function will allocate video memory and provide a mapped Gpu + virtual address to this allocation. It also returns the Gpu physical + offset if contiguous allocations are requested. + + This function will allocate a minimum page size if the length provided is 0 + and will return a unique GPU virtual address. + + The default page size will be the small page size (as returned by query + caps). The Alignment will also be enforced to small page size(64K/128K). + + Arguments: + vaSpace[IN] - Pointer to vaSpace object + length [IN] - Length of the allocation + gpuPointer[OUT] - GPU VA mapping + allocInfo[IN/OUT] - Pointer to allocation info structure which + contains below given fields + + allocInfo Members: + rangeBegin[IN] - Allocation will be made between rangeBegin + rangeEnd[IN] and rangeEnd(both inclusive). Default will be + no-range limitation. + gpuPhysOffset[OUT] - Physical offset of allocation returned only + if contiguous allocation is requested. + bContiguousPhysAlloc[IN] - Flag to request contiguous allocation. Default + will follow the vidHeapControl default policy. + bHandleProvided [IN] - Flag to signify that the client has provided + the handle for phys allocation. + hPhysHandle[IN/OUT] - The handle will be used in allocation if provided. + If not provided; allocator will return the handle + it used eventually. + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY - Not enough physical memory to service + allocation request with provided constraints + NV_ERR_INSUFFICIENT_RESOURCES - Not enough available resources to satisfy allocation request + NV_ERR_INVALID_OWNER - Target memory not accessible by specified owner + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB + +*/ +NV_STATUS nvUvmInterfaceMemoryAllocFB(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, + UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo); + +/******************************************************************************* + nvUvmInterfaceMemoryAllocSys + + This function will allocate system memory and provide a mapped Gpu + virtual address to this allocation. + + This function will allocate a minimum page size if the length provided is 0 + and will return a unique GPU virtual address. + + The default page size will be the small page size (as returned by query caps) + The Alignment will also be enforced to small page size. + + Arguments: + vaSpace[IN] - Pointer to vaSpace object + length [IN] - Length of the allocation + gpuPointer[OUT] - GPU VA mapping + allocInfo[IN/OUT] - Pointer to allocation info structure which + contains below given fields + + allocInfo Members: + rangeBegin[IN] - Allocation will be made between rangeBegin + rangeEnd[IN] and rangeEnd(both inclusive). Default will be + no-range limitation. + gpuPhysOffset[OUT] - Physical offset of allocation returned only + if contiguous allocation is requested. + bContiguousPhysAlloc[IN] - Flag to request contiguous allocation. Default + will follow the vidHeapControl default policy. + bHandleProvided [IN] - Flag to signify that the client has provided + the handle for phys allocation. + hPhysHandle[IN/OUT] - The handle will be used in allocation if provided. + If not provided; allocator will return the handle + it used eventually. + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY - Not enough physical memory to service + allocation request with provided constraints + NV_ERR_INSUFFICIENT_RESOURCES - Not enough available resources to satisfy allocation request + NV_ERR_INVALID_OWNER - Target memory not accessible by specified owner + NV_ERR_NOT_SUPPORTED - Operation not supported +*/ +NV_STATUS nvUvmInterfaceMemoryAllocSys(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, + UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo); + +/******************************************************************************* + nvUvmInterfaceGetP2PCaps + + Obtain the P2P capabilities between two devices. + + Arguments: + device1[IN] - Device handle of the first GPU (required) + device2[IN] - Device handle of the second GPU (required) + p2pCapsParams [OUT] - P2P capabilities between the two GPUs + + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_GENERIC: + Unexpected error. We try hard to avoid returning this error + code,because it is not very informative. + +*/ +NV_STATUS nvUvmInterfaceGetP2PCaps(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + UvmGpuP2PCapsParams * p2pCapsParams); + +/******************************************************************************* + nvUvmInterfaceGetPmaObject + + This function will return pointer to PMA object for the given GPU. This + PMA object handle is required for page allocation. + + Arguments: + device [IN] - Device handle allocated in + nvUvmInterfaceDeviceCreate + pPma [OUT] - Pointer to PMA object + pPmaPubStats [OUT] - Pointer to UvmPmaStatistics object + + Error codes: + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB + NV_ERR_GENERIC: + Unexpected error. We try hard to avoid returning this error + code,because it is not very informative. +*/ +NV_STATUS nvUvmInterfaceGetPmaObject(uvmGpuDeviceHandle device, + void **pPma, + const UvmPmaStatistics **pPmaPubStats); + +// Mirrors pmaEvictPagesCb_t, see its documentation in pma.h. +typedef NV_STATUS (*uvmPmaEvictPagesCallback)(void *callbackData, + NvU32 pageSize, + NvU64 *pPages, + NvU32 count, + NvU64 physBegin, + NvU64 physEnd); + +// Mirrors pmaEvictRangeCb_t, see its documentation in pma.h. +typedef NV_STATUS (*uvmPmaEvictRangeCallback)(void *callbackData, NvU64 physBegin, NvU64 physEnd); + +/******************************************************************************* + nvUvmInterfacePmaRegisterEvictionCallbacks + + Simple wrapper for pmaRegisterEvictionCb(), see its documentation in pma.h. +*/ +NV_STATUS nvUvmInterfacePmaRegisterEvictionCallbacks(void *pPma, + uvmPmaEvictPagesCallback evictPages, + uvmPmaEvictRangeCallback evictRange, + void *callbackData); + +/****************************************************************************** + nvUvmInterfacePmaUnregisterEvictionCallbacks + + Simple wrapper for pmaUnregisterEvictionCb(), see its documentation in pma.h. +*/ +void nvUvmInterfacePmaUnregisterEvictionCallbacks(void *pPma); + +/******************************************************************************* + nvUvmInterfacePmaAllocPages + + @brief Synchronous API for allocating pages from the PMA. + PMA will decide which pma regions to allocate from based on the provided + flags. PMA will also initiate UVM evictions to make room for this + allocation unless prohibited by PMA_FLAGS_DONT_EVICT. UVM callers must pass + this flag to avoid deadlock. Only UVM may allocated unpinned memory from + this API. + + For broadcast methods, PMA will guarantee the same physical frames are + allocated on multiple GPUs, specified by the PMA objects passed in. + + If allocation is contiguous, only one page in pPages will be filled. + Also, contiguous flag must be passed later to nvUvmInterfacePmaFreePages. + + Arguments: + pPma[IN] - Pointer to PMA object + pageCount [IN] - Number of pages required to be allocated. + pageSize [IN] - 64kb, 128kb or 2mb. No other values are permissible. + pPmaAllocOptions[IN] - Pointer to PMA allocation info structure. + pPages[OUT] - Array of pointers, containing the PA base + address of each page. + + Error codes: + NV_ERR_NO_MEMORY: + Internal memory allocation failed. + NV_ERR_GENERIC: + Unexpected error. We try hard to avoid returning this error + code,because it is not very informative. +*/ +NV_STATUS nvUvmInterfacePmaAllocPages(void *pPma, + NvLength pageCount, + NvU32 pageSize, + UvmPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages); + +/******************************************************************************* + nvUvmInterfacePmaPinPages + + This function will pin the physical memory allocated using PMA. The pages + passed as input must be unpinned else this function will return an error and + rollback any change if any page is not previously marked "unpinned". + + Arguments: + pPma[IN] - Pointer to PMA object. + pPages[IN] - Array of pointers, containing the PA base + address of each page to be pinned. + pageCount [IN] - Number of pages required to be pinned. + pageSize [IN] - Page size of each page to be pinned. + flags [IN] - UVM_PMA_CALLED_FROM_PMA_EVICTION if called from + PMA eviction, 0 otherwise. + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid input arguments. + NV_ERR_GENERIC - Unexpected error. We try hard to avoid + returning this error code as is not very + informative. + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB +*/ +NV_STATUS nvUvmInterfacePmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags); + +/******************************************************************************* + nvUvmInterfacePmaUnpinPages + + This function will unpin the physical memory allocated using PMA. The pages + passed as input must be already pinned, else this function will return an + error and rollback any change if any page is not previously marked "pinned". + Behaviour is undefined if any blacklisted pages are unpinned. + + Arguments: + pPma[IN] - Pointer to PMA object. + pPages[IN] - Array of pointers, containing the PA base + address of each page to be unpinned. + pageCount [IN] - Number of pages required to be unpinned. + pageSize [IN] - Page size of each page to be unpinned. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid input arguments. + NV_ERR_GENERIC - Unexpected error. We try hard to avoid + returning this error code as is not very + informative. + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB +*/ +NV_STATUS nvUvmInterfacePmaUnpinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize); + +/******************************************************************************* + nvUvmInterfaceMemoryFree + + Free up a GPU allocation +*/ +void nvUvmInterfaceMemoryFree(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer); + +/******************************************************************************* + nvUvmInterfacePmaFreePages + + This function will free physical memory allocated using PMA. It marks a list + of pages as free. This operation is also used by RM to mark pages as "scrubbed" + for the initial ECC sweep. This function does not fail. + + When allocation was contiguous, an appropriate flag needs to be passed. + + Arguments: + pPma[IN] - Pointer to PMA object + pPages[IN] - Array of pointers, containing the PA base + address of each page. + pageCount [IN] - Number of pages required to be allocated. + pageSize [IN] - Page size of each page + flags [IN] - Flags with information about allocation type + with the same meaning as flags in options for + nvUvmInterfacePmaAllocPages. When called from PMA + eviction, UVM_PMA_CALLED_FROM_PMA_EVICTION needs + to be added to flags. + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_NO_MEMORY - Not enough physical memory to service + allocation request with provided constraints + NV_ERR_INSUFFICIENT_RESOURCES - Not enough available resources to satisfy allocation request + NV_ERR_INVALID_OWNER - Target memory not accessible by specified owner + NV_ERR_NOT_SUPPORTED - Operation not supported on broken FB +*/ +void nvUvmInterfacePmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags); + +/******************************************************************************* + nvUvmInterfaceMemoryCpuMap + + This function creates a CPU mapping to the provided GPU address. + If the address is not the same as what is returned by the Alloc + function, then the function will map it from the address provided. + This offset will be relative to the gpu offset obtained from the + memory alloc functions. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceMemoryCpuMap(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer, + NvLength length, void **cpuPtr, + NvU32 pageSize); + +/******************************************************************************* + uvmGpuMemoryCpuUnmap + + Unmaps the cpuPtr provided from the process virtual address space. +*/ +void nvUvmInterfaceMemoryCpuUnMap(uvmGpuAddressSpaceHandle vaSpace, + void *cpuPtr); + +/******************************************************************************* + nvUvmInterfaceChannelAllocate + + This function will allocate a channel bound to a copy engine + + allocParams must contain an engineIndex as channels need to be bound to an + engine type at allocation time. The possible values are [0, + UVM_COPY_ENGINE_COUNT_MAX), but notably only the copy engines that have + UvmGpuCopyEngineCaps::supported set to true can be allocated. This struct + also contains information relative to GPFIFO and GPPut. + + channel is filled with the address of the corresponding channel handle. + + channelInfo is filled out with channel get/put. The errorNotifier is filled + out when the channel hits an RC error. On Volta+ devices, it also computes + the work submission token and the work submission offset to be used in the + Host channel submission doorbell. + + Arguments: + vaSpace[IN] - VA space linked to a client and a device under which + the channel will be allocated + allocParams[IN] - structure with allocation settings + channel[OUT] - pointer to the new channel handle + channelInfo[OUT] - structure filled with channel information + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceChannelAllocate(uvmGpuAddressSpaceHandle vaSpace, + const UvmGpuChannelAllocParams *allocParams, + uvmGpuChannelHandle *channel, + UvmGpuChannelInfo *channelInfo); + +/******************************************************************************* + nvUvmInterfaceChannelDestroy + + This function destroys a given channel + + Arguments: + channel[IN] - channel handle +*/ +void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel); + +/******************************************************************************* + nvUvmInterfaceQueryCaps + + Return capabilities for the provided GPU. + If GPU does not exist, an error will be returned. + + If the client is only interested in the capabilities of the Copy Engines of + the given GPU, use nvUvmInterfaceQueryCopyEnginesCaps instead. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device, + UvmGpuCaps * caps); + +/******************************************************************************* + nvUvmInterfaceQueryCopyEnginesCaps + + Return the capabilities of all the Copy Engines for the provided GPU. + If the GPU does not exist, an error will be returned. + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY +*/ +NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device, + UvmGpuCopyEnginesCaps *caps); + +/******************************************************************************* + nvUvmInterfaceGetGpuInfo + + Return various gpu info, refer to the UvmGpuInfo struct for details. + If no gpu matching the uuid is found, an error will be returned. + + On Ampere+ GPUs, pGpuClientInfo contains SMC information provided by the + client regarding the partition targeted in this operation. + + Error codes: + NV_ERR_GENERIC + NV_ERR_INSUFFICIENT_RESOURCES +*/ +NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid, + const UvmGpuClientInfo *pGpuClientInfo, + UvmGpuInfo *pGpuInfo); + +/******************************************************************************* + nvUvmInterfaceServiceDeviceInterruptsRM + + Tells RM to service all pending interrupts. This is helpful in ECC error + conditions when ECC error interrupt is set & error can be determined only + after ECC notifier will be set or reset. + + Error codes: + NV_ERR_GENERIC + UVM_INVALID_ARGUMENTS +*/ +NV_STATUS nvUvmInterfaceServiceDeviceInterruptsRM(uvmGpuDeviceHandle device); + +/******************************************************************************* + nvUvmInterfaceSetPageDirectory + Sets pageDirectory in the provided location. Also moves the existing PDE to + the provided pageDirectory. + + RM will propagate the update to all channels using the provided VA space. + All channels must be idle when this call is made. + + Arguments: + vaSpace[IN} - VASpace Object + physAddress[IN] - Physical address of new page directory + numEntries[IN] - Number of entries including previous PDE which will be copied + bVidMemAperture[IN] - If set pageDirectory will reside in VidMem aperture else sysmem + pasid[IN] - PASID (Process Address Space IDentifier) of the process + corresponding to the VA space. Ignored unless the VA space + object has ATS enabled. + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceSetPageDirectory(uvmGpuAddressSpaceHandle vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid); + +/******************************************************************************* + nvUvmInterfaceUnsetPageDirectory + Unsets/Restores pageDirectory to RM's defined location. + + Arguments: + vaSpace[IN} - VASpace Object + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceUnsetPageDirectory(uvmGpuAddressSpaceHandle vaSpace); + +/******************************************************************************* + nvUvmInterfaceDupAllocation + + Duplicate the given allocation in a different VA space. + + The physical handle backing the source allocation is duplicated in + the GPU device associated with the destination VA space, and a new mapping + is created in that VA space. + + The input allocation can be located in sysmem (i.e. allocated using + nvUvmInterfaceMemoryAllocSys) or vidmem (i.e. allocated using + nvUvmInterfaceMemoryAllocFB). If located in vidmem, duplication across + GPUs is not supported. + + For duplication of physical memory use nvUvmInterfaceDupMemory. + + Arguments: + srcVaSpace[IN] - Source VA space. + srcAddress[IN] - GPU VA in the source VA space. The provided address + should match one previously returned by + nvUvmInterfaceMemoryAllocFB or + nvUvmInterfaceMemoryAllocSys. + dstVaSpace[IN] - Destination VA space where the new mapping will be + created. + dstAddress[OUT] - Pointer to the GPU VA in the destination VA space. + + Error codes: + NV_ERR_INVALID_ARGUMENT - If any of the inputs is invalid, or the source + and destination VA spaces are identical. + NV_ERR_OBJECT_NOT_FOUND - If the input allocation is not found in under + the provided VA space. + NV_ERR_NO_MEMORY - If there is no memory to back the duplicate, + or the associated metadata. + NV_ERR_NOT_SUPPORTED - If trying to duplicate vidmem across GPUs. +*/ +NV_STATUS nvUvmInterfaceDupAllocation(uvmGpuAddressSpaceHandle srcVaSpace, + NvU64 srcAddress, + uvmGpuAddressSpaceHandle dstVaSpace, + NvU64 *dstAddress); + +/******************************************************************************* + nvUvmInterfaceDupMemory + + Duplicates a physical memory allocation. If requested, provides information + about the allocation. + + Arguments: + device[IN] - Device linked to a client under which + the phys memory needs to be duped. + hClient[IN] - Client owning the memory. + hPhysMemory[IN] - Phys memory which is to be duped. + hDupedHandle[OUT] - Handle of the duped memory object. + pGpuMemoryInfo[OUT] - see nv_uvm_types.h for more information. + This parameter can be NULL. (optional) + Error codes: + NV_ERR_INVALID_ARGUMENT - If the parameter/s is invalid. + NV_ERR_NOT_SUPPORTED - If the allocation is not a physical allocation. + NV_ERR_OBJECT_NOT_FOUND - If the allocation is not found in under the provided client. +*/ +NV_STATUS nvUvmInterfaceDupMemory(uvmGpuDeviceHandle device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + UvmGpuMemoryInfo *pGpuMemoryInfo); + +/******************************************************************************* + nvUvmInterfaceFreeDupedAllocation + + Free the allocation represented by the physical handle used to create the + duped allocation. + + Arguments: + device[IN] - Device handle used to dup the memory. + hPhysHandle[IN] - Handle representing the phys allocation. + + Error codes: + NV_ERROR + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceFreeDupedHandle(uvmGpuDeviceHandle device, + NvHandle hPhysHandle); + +/******************************************************************************* + nvUvmInterfaceGetFbInfo + + Gets FB information from RM. + + Arguments: + device[IN] - GPU device handle + fbInfo [OUT] - Pointer to FbInfo structure which contains + reservedHeapSize & heapSize + Error codes: + NV_ERROR + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceGetFbInfo(uvmGpuDeviceHandle device, + UvmGpuFbInfo * fbInfo); + +/******************************************************************************* + nvUvmInterfaceGetEccInfo + + Gets ECC information from RM. + + Arguments: + device[IN] - GPU device handle + eccInfo [OUT] - Pointer to EccInfo structure + + Error codes: + NV_ERROR + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceGetEccInfo(uvmGpuDeviceHandle device, + UvmGpuEccInfo * eccInfo); + +/******************************************************************************* + nvUvmInterfaceOwnPageFaultIntr + + This function transfers ownership of the replayable page fault interrupt, + between RM and UVM, for a particular GPU. + + bOwnInterrupts == NV_TRUE: UVM is taking ownership from the RM. This causes + the following: RM will not service, enable or disable this interrupt and it + is up to the UVM driver to handle this interrupt. In this case, replayable + page fault interrupts are disabled by this function, before it returns. + + bOwnInterrupts == NV_FALSE: UVM is returning ownership to the RM: in this + case, replayable page fault interrupts MUST BE DISABLED BEFORE CALLING this + function. + + The cases above both result in transferring ownership of a GPU that has its + replayable page fault interrupts disabled. Doing otherwise would make it + very difficult to control which driver handles any interrupts that build up + during the hand-off. + + The calling pattern should look like this: + + UVM setting up a new GPU for operation: + UVM GPU LOCK + nvUvmInterfaceOwnPageFaultIntr(..., NV_TRUE) + UVM GPU UNLOCK + + Enable replayable page faults for that GPU + + UVM tearing down a GPU: + + Disable replayable page faults for that GPU + + UVM GPU GPU LOCK + nvUvmInterfaceOwnPageFaultIntr(..., NV_FALSE) + UVM GPU UNLOCK + + Arguments: + gpuUuid[IN] - UUID of the GPU to operate on + bOwnInterrupts - Set to NV_TRUE for UVM to take ownership of the + replayable page fault interrupts. Set to NV_FALSE + to return ownership of the page fault interrupts + to RM. + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceOwnPageFaultIntr(uvmGpuDeviceHandle device, NvBool bOwnInterrupts); + +/******************************************************************************* + nvUvmInterfaceInitFaultInfo + + This function obtains fault buffer address, size and a few register mappings + for replayable faults, and creates a shadow buffer to store non-replayable + faults if the GPU supports it. + + Arguments: + device[IN] - Device handle associated with the gpu + pFaultInfo[OUT] - information provided by RM for fault handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_NO_MEMORY + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo); + +/******************************************************************************* + nvUvmInterfaceDestroyFaultInfo + + This function obtains destroys unmaps the fault buffer and clears faultInfo + for replayable faults, and frees the shadow buffer for non-replayable faults. + + Arguments: + device[IN] - Device handle associated with the gpu + pFaultInfo[OUT] - information provided by RM for fault handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceDestroyFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo); + +/******************************************************************************* + nvUvmInterfaceHasPendingNonReplayableFaults + + This function tells whether there are pending non-replayable faults in the + client shadow fault buffer ready to be consumed. + + NOTES: + - This function uses a pre-allocated stack per GPU (stored in the + UvmGpuFaultInfo object) for calls related to non-replayable faults from the + top half. + - Concurrent calls to this function using the same pFaultInfo are not + thread-safe due to pre-allocated stack. Therefore, locking is the caller's + responsibility. + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + + Arguments: + pFaultInfo[IN] - information provided by RM for fault handling. + Contains a pointer to the shadow fault buffer + hasPendingFaults[OUT] - return value that tells if there are + non-replayable faults ready to be consumed by + the client + + Error codes: + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceHasPendingNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + NvBool *hasPendingFaults); + +/******************************************************************************* + nvUvmInterfaceGetNonReplayableFaults + + This function consumes all the non-replayable fault packets in the client + shadow fault buffer and copies them to the given buffer. It also returns the + number of faults that have been copied + + NOTES: + - This function uses a pre-allocated stack per GPU (stored in the + UvmGpuFaultInfo object) for calls from the bottom half that handles + non-replayable faults. + - See nvUvmInterfaceHasPendingNonReplayableFaults for the implications of + using a shared stack. + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + + Arguments: + pFaultInfo[IN] - information provided by RM for fault handling. + Contains a pointer to the shadow fault buffer + pFaultBuffer[OUT] - buffer provided by the client where fault buffers + are copied when they are popped out of the shadow + fault buffer (which is a circular queue). + numFaults[OUT] - return value that tells the number of faults copied + to the client's buffer + + Error codes: + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + void *pFaultBuffer, + NvU32 *numFaults); + +/******************************************************************************* + nvUvmInterfaceInitAccessCntrInfo + + This function obtains access counter buffer address, size and a few register mappings + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[OUT] - Information provided by RM for access counter handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceInitAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo); + +/******************************************************************************* + nvUvmInterfaceDestroyAccessCntrInfo + + This function obtains, destroys, unmaps the access counter buffer and clears accessCntrInfo + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[IN] - Information provided by RM for access counter handling + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo); + +/******************************************************************************* + nvUvmInterfaceEnableAccessCntr + + This function enables access counters using the given configuration + UVM is also taking ownership from the RM. + This causes the following: RM will not service, enable or disable this + interrupt and it is up to the UVM driver to handle this interrupt. In + this case, access counter notificaion interrupts are enabled by this + function before it returns. + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[IN] - Pointer to structure filled out by nvUvmInterfaceInitAccessCntrInfo + pAccessCntrConfig[IN] - Configuration for access counters + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceEnableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo, + UvmGpuAccessCntrConfig *pAccessCntrConfig); + +/******************************************************************************* + nvUvmInterfaceDisableAccessCntr + + This function disables acccess counters + UVM is also returning ownership to the RM: RM can service, enable or + disable this interrupt. In this case, access counter notificaion interrupts + are disabled by this function before it returns. + + Arguments: + device[IN] - Device handle associated with the gpu + pAccessCntrInfo[IN] - Pointer to structure filled out by nvUvmInterfaceInitAccessCntrInfo + + Error codes: + NV_ERR_GENERIC + NV_ERR_INVALID_ARGUMENT +*/ +NV_STATUS nvUvmInterfaceDisableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo); + +// +// Called by the UVM driver to register operations with RM. Only one set of +// callbacks can be registered by any driver at a time. If another set of +// callbacks was already registered, NV_ERR_IN_USE is returned. +// +NV_STATUS nvUvmInterfaceRegisterUvmCallbacks(struct UvmOpsUvmEvents *importedUvmOps); + +// +// Counterpart to nvUvmInterfaceRegisterUvmCallbacks. This must only be called +// if nvUvmInterfaceRegisterUvmCallbacks returned NV_OK. +// +// Upon return, the caller is guaranteed that any outstanding callbacks are done +// and no new ones will be invoked. +// +void nvUvmInterfaceDeRegisterUvmOps(void); + +/******************************************************************************* + nvUvmInterfaceP2pObjectCreate + + This API creates an NV50_P2P object for the GPUs with the given device + handles, and returns the handle to the object. + + Arguments: + device1[IN] - first GPU device handle + device2[IN] - second GPU device handle + hP2pObject[OUT] - handle to the created P2p object. + + Error codes: + NV_ERR_INVALID_ARGUMENT + NV_ERR_OBJECT_NOT_FOUND : If device object associated with the uuids aren't found. +*/ +NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + NvHandle *hP2pObject); + +/******************************************************************************* + nvUvmInterfaceP2pObjectDestroy + + This API destroys the NV50_P2P associated with the passed handle. + + Arguments: + session[IN] - Session handle. + hP2pObject[IN] - handle to an P2p object. + + Error codes: NONE +*/ +void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session, + NvHandle hP2pObject); + +/******************************************************************************* + nvUvmInterfaceGetExternalAllocPtes + + The interface builds the RM PTEs using the provided input parameters. + + Arguments: + vaSpace[IN] - vaSpace handle. + hMemory[IN] - Memory handle. + offset [IN] - Offset from the beginning of the allocation + where PTE mappings should begin. + Should be aligned with pagesize associated + with the allocation. + size [IN] - Length of the allocation for which PTEs + should be built. + Should be aligned with pagesize associated + with the allocation. + size = 0 will be interpreted as the total size + of the allocation. + gpuExternalMappingInfo[IN/OUT] - See nv_uvm_types.h for more information. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_INVALID_OBJECT_HANDLE - Invalid memory handle is passed. + NV_ERR_NOT_SUPPORTED - Functionality is not supported (see comments in nv_gpu_ops.c) + NV_ERR_INVALID_BASE - offset is beyond the allocation size + NV_ERR_INVALID_LIMIT - (offset + size) is beyond the allocation size. + NV_ERR_BUFFER_TOO_SMALL - gpuExternalMappingInfo.pteBufferSize is insufficient to + store single PTE. + NV_ERR_NOT_READY - Returned when querying the PTEs requires a deferred setup + which has not yet completed. It is expected that the caller + will reattempt the call until a different code is returned. + + + + +*/ +NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *gpuExternalMappingInfo); + +/******************************************************************************* + nvUvmInterfaceRetainChannel + + Validates and returns information about the user's channel and its resources + (local CTX buffers + global CTX buffers). The state is refcounted and must be + released by calling nvUvmInterfaceReleaseChannel. + + Arguments: + vaSpace[IN] - vaSpace handle. + hClient[IN] - Client handle + hChannel[IN] - Channel handle + retainedChannel[OUT] - Opaque pointer to use to refer to this + channel in other nvUvmInterface APIs. + channelInstanceInfo[OUT] - Channel instance information to be filled out. + See nv_uvm_types.h for details. + + Error codes: + NV_ERR_INVALID_ARGUMENT : If the parameter/s are invalid. + NV_ERR_OBJECT_NOT_FOUND : If the object associated with the handle isn't found. + NV_ERR_INVALID_CHANNEL : If the channel verification fails. + NV_ERR_INSUFFICIENT_RESOURCES : If no memory available to store the resource information. + */ +NV_STATUS nvUvmInterfaceRetainChannel(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hClient, + NvHandle hChannel, + void **retainedChannel, + UvmGpuChannelInstanceInfo *channelInstanceInfo); + +/******************************************************************************* + nvUvmInterfaceBindChannelResources + + Associates the mapping address of the channel resources (VAs) provided by the + caller with the channel. + + Arguments: + retainedChannel[IN] - Channel pointer returned by nvUvmInterfaceRetainChannel + channelResourceBindParams[IN] - Buffer of initialized UvmGpuChannelInstanceInfo::resourceCount + entries. See nv_uvm_types.h for details. + + Error codes: + NV_ERR_INVALID_ARGUMENT : If the parameter/s are invalid. + NV_ERR_OBJECT_NOT_FOUND : If the object associated with the handle aren't found. + NV_ERR_INSUFFICIENT_RESOURCES : If no memory available to store the resource information. + */ +NV_STATUS nvUvmInterfaceBindChannelResources(void *retainedChannel, + UvmGpuChannelResourceBindParams *channelResourceBindParams); + +/******************************************************************************* + nvUvmInterfaceReleaseChannel + + Releases state retained by nvUvmInterfaceRetainChannel. + */ +void nvUvmInterfaceReleaseChannel(void *retainedChannel); + +/******************************************************************************* + nvUvmInterfaceStopChannel + + Idles the channel and takes it off the runlist. + + Arguments: + retainedChannel[IN] - Channel pointer returned by nvUvmInterfaceRetainChannel + bImmediate[IN] - If true, kill the channel without attempting to wait for it to go idle. +*/ +void nvUvmInterfaceStopChannel(void *retainedChannel, NvBool bImmediate); + +/******************************************************************************* + nvUvmInterfaceGetChannelResourcePtes + + The interface builds the RM PTEs using the provided input parameters. + + Arguments: + vaSpace[IN] - vaSpace handle. + resourceDescriptor[IN] - The channel resource descriptor returned by returned by + nvUvmInterfaceRetainChannelResources. + offset[IN] - Offset from the beginning of the allocation + where PTE mappings should begin. + Should be aligned with pagesize associated + with the allocation. + size[IN] - Length of the allocation for which PTEs + should be built. + Should be aligned with pagesize associated + with the allocation. + size = 0 will be interpreted as the total size + of the allocation. + gpuExternalMappingInfo[IN/OUT] - See nv_uvm_types.h for more information. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_INVALID_OBJECT_HANDLE - Invalid memory handle is passed. + NV_ERR_NOT_SUPPORTED - Functionality is not supported. + NV_ERR_INVALID_BASE - offset is beyond the allocation size + NV_ERR_INVALID_LIMIT - (offset + size) is beyond the allocation size. + NV_ERR_BUFFER_TOO_SMALL - gpuExternalMappingInfo.pteBufferSize is insufficient to + store single PTE. +*/ +NV_STATUS nvUvmInterfaceGetChannelResourcePtes(uvmGpuAddressSpaceHandle vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *externalMappingInfo); + +/******************************************************************************* + nvUvmInterfaceReportNonReplayableFault + + The interface communicates a nonreplayable fault packet from UVM to RM, which + will log the fault, notify the clients and then trigger RC on the channel. + + Arguments: + device[IN] - The device where the fault happened. + pFaultPacket[IN] - The opaque pointer from UVM that will be later + converted to a MMU_FAULT_PACKET type. + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NOT_SUPPORTED - Functionality is not supported. +*/ +NV_STATUS nvUvmInterfaceReportNonReplayableFault(uvmGpuDeviceHandle device, + const void *pFaultPacket); + +/******************************************************************************* + nvUvmInterfacePagingChannelAllocate + + In SR-IOV heavy, this function requests the allocation of a paging channel + (i.e. a privileged CE channel) bound to a specified copy engine. Unlike + channels allocated via nvUvmInterfaceChannelAllocate, the caller cannot push + methods to a paging channel directly, but instead relies on the + nvUvmInterfacePagingChannelPushStream API to do so. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + device[IN] - device under which the paging channel will be allocated + allocParams[IN] - structure with allocation settings + channel[OUT] - pointer to the allocated paging channel handle + channelInfo[OUT] - structure filled with channel information + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NO_MEMORY - Not enough memory to allocate + paging channel/shadow notifier. + NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled. + + */ +NV_STATUS nvUvmInterfacePagingChannelAllocate(uvmGpuDeviceHandle device, + const UvmGpuPagingChannelAllocParams *allocParams, + UvmGpuPagingChannelHandle *channel, + UvmGpuPagingChannelInfo *channelInfo); + +/******************************************************************************* + nvUvmInterfacePagingChannelDestroy + + This function destroys a given paging channel. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + channel[IN] - paging channel handle. If the passed handle is + the NULL pointer, the function returns immediately. +*/ +void nvUvmInterfacePagingChannelDestroy(UvmGpuPagingChannelHandle channel); + +/******************************************************************************* + + nvUvmInterfacePagingChannelsMap + + Map a guest allocation in the address space associated with all the paging + channels allocated under the given device. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + srcVaSpace[IN] - VA space handle used to allocate the input pointer + srcAddress. + srcAddress[IN] - virtual address returned by nvUvmInterfaceMemoryAllocFB + or nvUvmInterfaceMemoryAllocSys. The entire allocation + backing this guest VA is mapped. + device[IN] - device under which paging channels were allocated + dstAddress[OUT] - a virtual address that is valid (i.e. is mapped) in + all the paging channels allocated under the given vaSpace. + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled. +*/ +NV_STATUS nvUvmInterfacePagingChannelsMap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device, + NvU64 *dstAddress); + +/******************************************************************************* + + nvUvmInterfacePagingChannelsUnmap + + Unmap a VA returned by nvUvmInterfacePagingChannelsMap. + + SR-IOV heavy only. The implementation of this interface can acquire + RM or GPU locks. + + Arguments: + srcVaSpace[IN] - VA space handle that was passed to prevous mapping. + srcAddress[IN] - virtual address that was passed to prevous mapping. + device[IN] - device under which paging channels were allocated. + */ +void nvUvmInterfacePagingChannelsUnmap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device); + + +/******************************************************************************* + nvUvmInterfacePagingChannelPushStream + + Used for remote execution of the passed methods; the UVM driver uses this + interface to ask the vGPU plugin to execute certain HW methods on its + behalf. The callee should push the methods in the specified order i.e. is + not allowed to do any reordering. + + The API is asynchronous. The UVM driver can wait on the remote execution by + inserting a semaphore release method at the end of the method stream, and + then loop until the semaphore value reaches the completion value indicated + in the release method. + + The valid HW methods that can be passed by the UVM driver follow; the source + functions listed contain the exact formatting (encoding) of the HW method + used by the UVM driver for Ampere. + + - TLB invalidation targeting a VA range. See + uvm_hal_volta_host_tlb_invalidate_va. + + - TLB invalidation targeting certain levels in the page tree (including + the possibility of invalidating everything). + See uvm_hal_pascal_host_tlb_invalidate_all. + + - Replayable fault replay. See uvm_hal_volta_replay_faults. + + - Replayable fault cancellation targeting a guest virtual address. See + uvm_hal_volta_cancel_faults_va + + - Membar, scoped to device or to the entire system. See + uvm_hal_pascal_host_membar_gpu and uvm_hal_pascal_host_membar_sys + + - Host semaphore acquire, see uvm_hal_turing_host_semaphore_acquire. The + virtual address specified in the semaphore operation must lie within a + buffer previously mapped by nvUvmInterfacePagingChannelsMap. + + - CE semaphore release, see uvm_hal_pascal_ce_semaphore_release. The + virtual address specified in the semaphore operation must lie within a + buffer previously mapped by nvUvmInterfacePagingChannelsMap. + + - 64 bits-wide memset, see uvm_hal_kepler_ce_memset_8. The destination + address is a physical address in vidmem. + + - No-op, see uvm_hal_kepler_host_noop. Used to store the source buffer + of a memcopy method within the input stream itself. + + - Memcopy, see uvm_hal_kepler_ce_memcopy. The destination address is a + physical address in vidmem. The source address is an offset within + methodStream, in bytes, indicating the location of the (inlined) source + buffer. The copy size does not exceed 4KB. + + - CE semaphore release with timestamp, see + uvm_hal_kepler_ce_semaphore_timestamp. The virtual address specified in + the semaphore operation must lie within a buffer previously mapped by + nvUvmInterfacePagingChannelsMap. + + - CE semaphore reduction, see uvm_hal_kepler_ce_semaphore_reduction_inc. + The virtual address specified in the semaphore operation must lie within + a buffer previously mapped by nvUvmInterfacePagingChannelsMap. + + Only invoked in SR-IOV heavy mode. + + NOTES: + - This function uses a pre-allocated stack per paging channel + (stored in the UvmGpuPagingChannel object) + - This function DOES NOT acquire the RM API or GPU locks. That is because + it is called during fault servicing, which could produce deadlocks. + - Concurrent calls to this function using channels under same device are not + allowed due to: + a. pre-allocated stack + b. the fact that internal RPC infrastructure doesn't acquire GPU lock. + Therefore, locking is the caller's responsibility. + - This function DOES NOT sleep (does not allocate memory or acquire locks) + so it can be invoked while holding a spinlock. + + Arguments: + channel[IN] - paging channel handle obtained via + nvUvmInterfacePagingChannelAllocate + + methodStream[IN] - HW methods to be pushed to the paging channel. + + methodStreamSize[IN] - Size of methodStream, in bytes. The maximum push + size is 128KB. + + + Error codes: + NV_ERR_INVALID_ARGUMENT - Invalid parameter/s is passed. + NV_ERR_NOT_SUPPORTED - SR-IOV heavy mode is disabled. +*/ +NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channel, + char *methodStream, + NvU32 methodStreamSize); + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#endif // _NV_UVM_INTERFACE_H_ diff --git a/kernel-open/common/inc/nv_uvm_types.h b/kernel-open/common/inc/nv_uvm_types.h new file mode 100644 index 000000000..865bf7ef0 --- /dev/null +++ b/kernel-open/common/inc/nv_uvm_types.h @@ -0,0 +1,970 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file provides common types for both UVM driver and RM's UVM interface. +// + +#ifndef _NV_UVM_TYPES_H_ +#define _NV_UVM_TYPES_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvgputypes.h" +#include "nvCpuUuid.h" + + +// +// Default Page Size if left "0" because in RM BIG page size is default & there +// are multiple BIG page sizes in RM. These defines are used as flags to "0" +// should be OK when user is not sure which pagesize allocation it wants +// +#define UVM_PAGE_SIZE_DEFAULT 0x0 +#define UVM_PAGE_SIZE_4K 0x1000 +#define UVM_PAGE_SIZE_64K 0x10000 +#define UVM_PAGE_SIZE_128K 0x20000 +#define UVM_PAGE_SIZE_2M 0x200000 +#define UVM_PAGE_SIZE_512M 0x20000000 + +// +// When modifying flags, make sure they are compatible with the mirrored +// PMA_* flags in phys_mem_allocator.h. +// +// Input flags +#define UVM_PMA_ALLOCATE_DONT_EVICT NVBIT(0) +#define UVM_PMA_ALLOCATE_PINNED NVBIT(1) +#define UVM_PMA_ALLOCATE_SPECIFY_MINIMUM_SPEED NVBIT(2) +#define UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE NVBIT(3) +#define UVM_PMA_ALLOCATE_SPECIFY_REGION_ID NVBIT(4) +#define UVM_PMA_ALLOCATE_PREFER_SLOWEST NVBIT(5) +#define UVM_PMA_ALLOCATE_CONTIGUOUS NVBIT(6) +#define UVM_PMA_ALLOCATE_PERSISTENT NVBIT(7) +#define UVM_PMA_ALLOCATE_PROTECTED_REGION NVBIT(8) +#define UVM_PMA_ALLOCATE_FORCE_ALIGNMENT NVBIT(9) +#define UVM_PMA_ALLOCATE_NO_ZERO NVBIT(10) +#define UVM_PMA_ALLOCATE_TURN_BLACKLIST_OFF NVBIT(11) +#define UVM_PMA_ALLOCATE_ALLOW_PARTIAL NVBIT(12) + +// Output flags +#define UVM_PMA_ALLOCATE_RESULT_IS_ZERO NVBIT(0) + +// Input flags to pmaFree +#define UVM_PMA_FREE_IS_ZERO NVBIT(0) + +// +// Indicate that the PMA operation is being done from one of the PMA eviction +// callbacks. +// +// Notably this flag is currently used only by the UVM/RM interface and not +// mirrored in PMA. +// +#define UVM_PMA_CALLED_FROM_PMA_EVICTION 16384 + +#define UVM_UUID_LEN 16 +#define UVM_SW_OBJ_SUBCHANNEL 5 + +typedef unsigned long long UvmGpuPointer; + +// +// The following typedefs serve to explain the resources they point to. +// The actual resources remain RM internal and not exposed. +// +typedef struct uvmGpuSession_tag *uvmGpuSessionHandle; // gpuSessionHandle +typedef struct uvmGpuDevice_tag *uvmGpuDeviceHandle; // gpuDeviceHandle +typedef struct uvmGpuAddressSpace_tag *uvmGpuAddressSpaceHandle; // gpuAddressSpaceHandle +typedef struct uvmGpuChannel_tag *uvmGpuChannelHandle; // gpuChannelHandle +typedef struct uvmGpuCopyEngine_tag *uvmGpuCopyEngineHandle; // gpuObjectHandle + +typedef struct UvmGpuMemoryInfo_tag +{ + // Out: Memory layout. + NvU32 kind; + + // Out: Set to TRUE, if the allocation is in sysmem. + NvBool sysmem; + + // Out: Set to TRUE, if the allocation is a constructed + // under a Device or Subdevice. + // All permutations of sysmem and deviceDescendant are valid. + // !sysmem && !deviceDescendant implies a fabric allocation. + NvBool deviceDescendant; + + // Out: Page size associated with the phys alloc. + NvU32 pageSize; + + // Out: Set to TRUE, if the allocation is contiguous. + NvBool contig; + + // Out: Starting Addr if the allocation is contiguous. + // This is only valid if contig is NV_TRUE. + NvU64 physAddr; + + // Out: Total size of the allocation. + NvU64 size; + + // Out: Uuid of the GPU to which the allocation belongs. + // This is only valid if deviceDescendant is NV_TRUE. + // Note: If the allocation is owned by a device in + // an SLI group and the allocation is broadcast + // across the SLI group, this UUID will be any one + // of the subdevices in the SLI group. + NvProcessorUuid uuid; +} UvmGpuMemoryInfo; + +// Some resources must share the same virtual mappings across channels. A mapped +// resource must be shared by a channel iff: +// +// 1) The channel belongs to a TSG (UvmGpuChannelInstanceInfo::bTsgChannel is +// NV_TRUE). +// +// 2) The channel is in the same TSG as all other channels sharing that mapping +// (UvmGpuChannelInstanceInfo::tsgId matches among channels). +// +// 3) The channel is in the same GPU address space as the other channels +// sharing that mapping. +// +// 4) The resource handle(s) match those of the shared mapping +// (UvmGpuChannelResourceInfo::resourceDescriptor and +// UvmGpuChannelResourceInfo::resourceId). +typedef struct UvmGpuChannelResourceInfo_tag +{ + // Out: Ptr to the RM memDesc of the channel resource. + NvP64 resourceDescriptor; + + // Out: RM ID of the channel resource. + NvU32 resourceId; + + // Out: Alignment needed for the resource allocation. + NvU64 alignment; + + // Out: Info about the resource allocation. + UvmGpuMemoryInfo resourceInfo; +} UvmGpuChannelResourceInfo; + +typedef struct UvmGpuPagingChannelInfo_tag +{ + // Pointer to a shadown buffer mirroring the contents of the error notifier + // for the paging channel + NvNotification *shadowErrorNotifier; +} UvmGpuPagingChannelInfo; + +typedef enum +{ + UVM_GPU_CHANNEL_ENGINE_TYPE_GR = 1, + UVM_GPU_CHANNEL_ENGINE_TYPE_CE = 2, + UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2 = 3, +} UVM_GPU_CHANNEL_ENGINE_TYPE; + +#define UVM_GPU_CHANNEL_MAX_RESOURCES 13 + +typedef struct UvmGpuChannelInstanceInfo_tag +{ + // Out: Starting address of the channel instance. + NvU64 base; + + // Out: Set to NV_TRUE, if the instance is in sysmem. + // Set to NV_FALSE, if the instance is in vidmem. + NvBool sysmem; + + // Out: Hardware runlist ID. + NvU32 runlistId; + + // Out: Hardware channel ID. + NvU32 chId; + + // Out: NV_TRUE if the channel belongs to a subcontext or NV_FALSE if it + // belongs to a regular context. + NvBool bInSubctx; + + // Out: ID of the subcontext to which the channel belongs. + NvU32 subctxId; + + // Out: Whether the channel belongs to a TSG or not + NvBool bTsgChannel; + + // Out: ID of the TSG to which the channel belongs + NvU32 tsgId; + + // Out: Maximum number of subcontexts in the TSG to which the channel belongs + NvU32 tsgMaxSubctxCount; + + // Out: Info of channel resources associated with the channel. + UvmGpuChannelResourceInfo resourceInfo[UVM_GPU_CHANNEL_MAX_RESOURCES]; + + // Out: Number of valid entries in resourceInfo array. + NvU32 resourceCount; + + // Out: Type of the engine the channel is bound to + NvU32 channelEngineType; + + // Out: Channel handle required to ring the doorbell + NvU32 workSubmissionToken; + + // Out: Address of the doorbell + volatile NvU32 *workSubmissionOffset; + + // Out: Channel handle to be used in the CLEAR_FAULTED method + NvU32 clearFaultedToken; + + // Out: Address of the NV_CHRAM_CHANNEL register required to clear the + // ENG_FAULTED/PBDMA_FAULTED bits after servicing non-replayable faults on + // Ampere+ GPUs + volatile NvU32 *pChramChannelRegister; + + // Out: SMC engine id to which the GR channel is bound, or zero if the GPU + // does not support SMC or it is a CE channel + NvU32 smcEngineId; + + // Out: Start of the VEID range assigned to the SMC engine the GR channel + // is bound to, or zero if the GPU does not support SMC or it is a CE + // channel + NvU32 smcEngineVeIdOffset; +} UvmGpuChannelInstanceInfo; + +typedef struct UvmGpuChannelResourceBindParams_tag +{ + // In: RM ID of the channel resource. + NvU32 resourceId; + + // In: Starting VA at which the channel resource is mapped. + NvU64 resourceVa; +} UvmGpuChannelResourceBindParams; + +typedef struct UvmGpuChannelInfo_tag +{ + volatile unsigned *gpGet; + volatile unsigned *gpPut; + UvmGpuPointer *gpFifoEntries; + unsigned numGpFifoEntries; + unsigned channelClassNum; + + // The errorNotifier is filled out when the channel hits an RC error. + NvNotification *errorNotifier; + + NvU32 hwRunlistId; + NvU32 hwChannelId; + + volatile unsigned *dummyBar1Mapping; + + // These values are filled by nvUvmInterfaceCopyEngineAlloc. The work + // submission token requires the channel to be bound to a runlist and that + // happens after CE allocation. + volatile NvU32 *workSubmissionOffset; + + // To be deprecated. See pWorkSubmissionToken below. + NvU32 workSubmissionToken; + + // + // This is the memory location where the most recently updated work + // submission token for this channel will be written to. After submitting + // new work and updating GP_PUT with the appropriate fence, the token must + // be read from this location before writing it to the workSubmissionOffset + // to kick off the new work. + // + volatile NvU32 *pWorkSubmissionToken; +} UvmGpuChannelInfo; + +typedef enum +{ + // This value must be passed by Pascal and pre-Pascal GPUs for those + // allocations for which a specific location cannot be enforced. + UVM_BUFFER_LOCATION_DEFAULT = 0, + + UVM_BUFFER_LOCATION_SYS = 1, + UVM_BUFFER_LOCATION_VID = 2, +} UVM_BUFFER_LOCATION; + +typedef struct UvmGpuChannelAllocParams_tag +{ + NvU32 numGpFifoEntries; + + // The next two fields store UVM_BUFFER_LOCATION values + NvU32 gpFifoLoc; + NvU32 gpPutLoc; + + // Index of the engine the channel will be bound to + // ignored if engineType is anything other than UVM_GPU_CHANNEL_ENGINE_TYPE_CE + NvU32 engineIndex; + + // interpreted as UVM_GPU_CHANNEL_ENGINE_TYPE + NvU32 engineType; +} UvmGpuChannelAllocParams; + +typedef struct UvmGpuPagingChannelAllocParams_tag +{ + // Index of the LCE engine the channel will be bound to, a zero-based offset + // from NV2080_ENGINE_TYPE_COPY0. + NvU32 engineIndex; +} UvmGpuPagingChannelAllocParams; + +// The max number of Copy Engines supported by a GPU. +// The gpu ops build has a static assert that this is the correct number. +#define UVM_COPY_ENGINE_COUNT_MAX 10 + +typedef struct +{ + // True if the CE is supported at all + NvBool supported:1; + + // True if the CE is synchronous with GR + NvBool grce:1; + + // True if the CE shares physical CEs with any other CE + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it + // again each time a GPU is registered. + NvBool shared:1; + + // True if the CE can give enhanced performance for SYSMEM reads over other CEs + NvBool sysmemRead:1; + + // True if the CE can give enhanced performance for SYSMEM writes over other CEs + NvBool sysmemWrite:1; + + // True if the CE can be used for SYSMEM transactions + NvBool sysmem:1; + + // True if the CE can be used for P2P transactions using NVLINK + NvBool nvlinkP2p:1; + + // True if the CE can be used for P2P transactions + NvBool p2p:1; + + // Mask of physical CEs assigned to this LCE + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it + // again each time a GPU is registered. + NvU32 cePceMask; +} UvmGpuCopyEngineCaps; + +typedef struct UvmGpuCopyEnginesCaps_tag +{ + // Supported CEs may not be contiguous + UvmGpuCopyEngineCaps copyEngineCaps[UVM_COPY_ENGINE_COUNT_MAX]; +} UvmGpuCopyEnginesCaps; + +typedef enum +{ + UVM_LINK_TYPE_NONE, + UVM_LINK_TYPE_PCIE, + UVM_LINK_TYPE_NVLINK_1, + UVM_LINK_TYPE_NVLINK_2, + UVM_LINK_TYPE_NVLINK_3, + + + + +} UVM_LINK_TYPE; + +typedef struct UvmGpuCaps_tag +{ + NvU32 sysmemLink; // UVM_LINK_TYPE + NvU32 sysmemLinkRateMBps; // See UvmGpuP2PCapsParams::totalLinkLineRateMBps + NvBool numaEnabled; + NvU32 numaNodeId; + + // On ATS systems, GPUs connected to different CPU sockets can have peer + // traffic. They are called indirect peers. However, indirect peers are + // mapped using sysmem aperture. In order to disambiguate the location of a + // specific memory address, each GPU maps its memory to a different window + // in the System Physical Address (SPA) space. The following fields contain + // the base + size of such window for the GPU. systemMemoryWindowSize + // different than 0 indicates that the window is valid. + // + // - If the window is valid, then we can map GPU memory to the CPU as + // cache-coherent by adding the GPU address to the window start. + // - If numaEnabled is NV_TRUE, then we can also convert the system + // addresses of allocated GPU memory to struct pages. + // + // TODO: Bug 1986868: fix window start computation for SIMICS + NvU64 systemMemoryWindowStart; + NvU64 systemMemoryWindowSize; + + // This tells if the GPU is connected to NVSwitch. On systems with NVSwitch + // all GPUs are connected to it. If connectedToSwitch is NV_TRUE, + // nvswitchMemoryWindowStart tells the base address for the GPU in the + // NVSwitch address space. It is used when creating PTEs of memory mappings + // to NVSwitch peers. + NvBool connectedToSwitch; + NvU64 nvswitchMemoryWindowStart; +} UvmGpuCaps; + +typedef struct UvmGpuAddressSpaceInfo_tag +{ + NvU32 bigPageSize; + + NvBool atsEnabled; + + // Mapped registers that contain the current GPU time + volatile NvU32 *time0Offset; + volatile NvU32 *time1Offset; + + // Maximum number of subcontexts supported under this GPU address space + NvU32 maxSubctxCount; + + NvBool smcEnabled; + + NvU32 smcSwizzId; + + NvU32 smcGpcCount; +} UvmGpuAddressSpaceInfo; + +typedef struct UvmGpuAllocInfo_tag +{ + NvU64 rangeBegin; // Allocation will be made between + NvU64 rangeEnd; // rangeBegin & rangeEnd both included + NvU64 gpuPhysOffset; // Returns gpuPhysOffset if contiguous requested + NvU32 pageSize; // default is RM big page size - 64K or 128 K" else use 4K or 2M + NvU64 alignment; // Alignment of allocation + NvBool bContiguousPhysAlloc; // Flag to request contiguous physical allocation + NvBool bMemGrowsDown; // Causes RM to reserve physical heap from top of FB + NvBool bPersistentVidmem; // Causes RM to allocate persistent video memory + NvHandle hPhysHandle; // Handle for phys allocation either provided or retrieved + + + + +} UvmGpuAllocInfo; + +typedef enum +{ + UVM_VIRT_MODE_NONE = 0, // Baremetal or passthrough virtualization + UVM_VIRT_MODE_LEGACY = 1, // Virtualization without SRIOV support + UVM_VIRT_MODE_SRIOV_HEAVY = 2, // Virtualization with SRIOV Heavy configured + UVM_VIRT_MODE_SRIOV_STANDARD = 3, // Virtualization with SRIOV Standard configured + UVM_VIRT_MODE_COUNT = 4, +} UVM_VIRT_MODE; + +// !!! The following enums (with UvmRm prefix) are defined and documented in +// mm/uvm/interface/uvm_types.h and must be mirrored. Please refer to that file +// for more details. + +// UVM GPU mapping types +typedef enum +{ + UvmRmGpuMappingTypeDefault = 0, + UvmRmGpuMappingTypeReadWriteAtomic = 1, + UvmRmGpuMappingTypeReadWrite = 2, + UvmRmGpuMappingTypeReadOnly = 3, + UvmRmGpuMappingTypeCount = 4 +} UvmRmGpuMappingType; + +// UVM GPU caching types +typedef enum +{ + UvmRmGpuCachingTypeDefault = 0, + UvmRmGpuCachingTypeForceUncached = 1, + UvmRmGpuCachingTypeForceCached = 2, + UvmRmGpuCachingTypeCount = 3 +} UvmRmGpuCachingType; + +// UVM GPU format types +typedef enum { + UvmRmGpuFormatTypeDefault = 0, + UvmRmGpuFormatTypeBlockLinear = 1, + UvmRmGpuFormatTypeCount = 2 +} UvmRmGpuFormatType; + +// UVM GPU Element bits types +typedef enum { + UvmRmGpuFormatElementBitsDefault = 0, + UvmRmGpuFormatElementBits8 = 1, + UvmRmGpuFormatElementBits16 = 2, + // Cuda does not support 24-bit width + UvmRmGpuFormatElementBits32 = 4, + UvmRmGpuFormatElementBits64 = 5, + UvmRmGpuFormatElementBits128 = 6, + UvmRmGpuFormatElementBitsCount = 7 +} UvmRmGpuFormatElementBits; + +// UVM GPU Compression types +typedef enum { + UvmRmGpuCompressionTypeDefault = 0, + UvmRmGpuCompressionTypeEnabledNoPlc = 1, + UvmRmGpuCompressionTypeCount = 2 +} UvmRmGpuCompressionType; + +typedef struct UvmGpuExternalMappingInfo_tag +{ + // In: GPU caching ability. + UvmRmGpuCachingType cachingType; + + // In: Virtual permissions. + UvmRmGpuMappingType mappingType; + + // In: RM virtual mapping memory format + UvmRmGpuFormatType formatType; + + // In: RM virtual mapping element bits + UvmRmGpuFormatElementBits elementBits; + + // In: RM virtual compression type + UvmRmGpuCompressionType compressionType; + + // In: Size of the buffer to store PTEs (in bytes). + NvU64 pteBufferSize; + + // In: Pointer to a buffer to store PTEs. + // Out: The interface will fill the buffer with PTEs + NvU64 *pteBuffer; + + // Out: Number of PTEs filled in to the buffer. + NvU64 numWrittenPtes; + + // Out: Number of PTEs remaining to be filled + // if the buffer is not sufficient to accommodate + // requested PTEs. + NvU64 numRemainingPtes; + + // Out: PTE size (in bytes) + NvU32 pteSize; +} UvmGpuExternalMappingInfo; + +typedef struct UvmGpuP2PCapsParams_tag +{ + // Out: peerId[i] contains gpu[i]'s peer id of gpu[1 - i]. Only defined if + // the GPUs are direct peers. + NvU32 peerIds[2]; + + // Out: UVM_LINK_TYPE + NvU32 p2pLink; + + // Out: optimalNvlinkWriteCEs[i] contains gpu[i]'s optimal CE for writing to + // gpu[1 - i]. The CE indexes are valid only if the GPUs are NVLink peers. + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it again + // each time a GPU is registered. + NvU32 optimalNvlinkWriteCEs[2]; + + // Out: Maximum unidirectional bandwidth between the peers in megabytes per + // second, not taking into account the protocols overhead. The reported + // bandwidth for indirect peers is zero. + NvU32 totalLinkLineRateMBps; + + // Out: True if the peers have a indirect link to communicate. On P9 + // systems, this is true if peers are connected to different NPUs that + // forward the requests between them. + NvU32 indirectAccess : 1; +} UvmGpuP2PCapsParams; + +// Platform-wide information +typedef struct UvmPlatformInfo_tag +{ + // Out: ATS (Address Translation Services) is supported + NvBool atsSupported; + + + + + +} UvmPlatformInfo; + +typedef struct UvmGpuClientInfo_tag +{ + NvHandle hClient; + + NvHandle hSmcPartRef; +} UvmGpuClientInfo; + + + + + + + + + + + + + + + + + + + +#define UVM_GPU_NAME_LENGTH 0x40 + +typedef struct UvmGpuInfo_tag +{ + // Printable gpu name + char name[UVM_GPU_NAME_LENGTH]; + + // Uuid of this gpu + NvProcessorUuid uuid; + + // Gpu architecture; NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_* + NvU32 gpuArch; + + // Gpu implementation; NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_* + NvU32 gpuImplementation; + + // Host (gpfifo) class; *_CHANNEL_GPFIFO_*, e.g. KEPLER_CHANNEL_GPFIFO_A + NvU32 hostClass; + + // Copy engine (dma) class; *_DMA_COPY_*, e.g. KEPLER_DMA_COPY_A + NvU32 ceClass; + + // Compute class; *_COMPUTE_*, e.g. KEPLER_COMPUTE_A + NvU32 computeClass; + + // Set if GPU supports TCC Mode & is in TCC mode. + NvBool gpuInTcc; + + // Number of subdevices in SLI group. + NvU32 subdeviceCount; + + // Virtualization mode of this gpu. + NvU32 virtMode; // UVM_VIRT_MODE + + // NV_TRUE if this is a simulated/emulated GPU. NV_FALSE, otherwise. + NvBool isSimulated; + + // Number of GPCs + // If SMC is enabled, this is the currently configured number of GPCs for + // the given partition (also see the smcSwizzId field below). + NvU32 gpcCount; + + // Maximum number of GPCs; NV_SCAL_LITTER_NUM_GPCS + // This number is independent of the partition configuration, and can be + // used to conservatively size GPU-global constructs. + NvU32 maxGpcCount; + + // Number of TPCs + NvU32 tpcCount; + + // Maximum number of TPCs per GPC + NvU32 maxTpcPerGpcCount; + + // NV_TRUE if SMC is enabled on this GPU. + NvBool smcEnabled; + + // SMC partition ID (unique per GPU); note: valid when first looked up in + // nvUvmInterfaceGetGpuInfo(), but not guaranteed to remain valid. + // nvUvmInterfaceDeviceCreate() re-verifies the swizzId and fails if it is + // no longer valid. + NvU32 smcSwizzId; + + UvmGpuClientInfo smcUserClientInfo; + + + + + +} UvmGpuInfo; + +typedef struct UvmGpuFbInfo_tag +{ + // Max physical address that can be allocated by UVM. This excludes internal + // RM regions that are not registered with PMA either. + NvU64 maxAllocatableAddress; + + NvU32 heapSize; // RAM in KB available for user allocations + NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation + NvBool bZeroFb; // Zero FB mode enabled. +} UvmGpuFbInfo; + +typedef struct UvmGpuEccInfo_tag +{ + unsigned eccMask; + unsigned eccOffset; + void *eccReadLocation; + NvBool *eccErrorNotifier; + NvBool bEccEnabled; +} UvmGpuEccInfo; + +typedef struct UvmPmaAllocationOptions_tag +{ + NvU32 flags; + NvU32 minimumSpeed; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_MININUM_SPEED + NvU64 physBegin, physEnd; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE + NvU32 regionId; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_REGION_ID + NvU64 alignment; // valid if flags & UVM_PMA_ALLOCATE_FORCE_ALIGNMENT + NvLength numPagesAllocated; // valid if flags & UVM_PMA_ALLOCATE_ALLOW_PARTIAL + + NvU32 resultFlags; // valid if the allocation function returns NV_OK +} UvmPmaAllocationOptions; + +// +// Mirrored in PMA (PMA_STATS) +// +typedef struct UvmPmaStatistics_tag +{ + volatile NvU64 numPages2m; // PMA-wide 2MB pages count across all regions + volatile NvU64 numFreePages64k; // PMA-wide free 64KB page count across all regions + volatile NvU64 numFreePages2m; // PMA-wide free 2MB pages count across all regions + + + + + +} UvmPmaStatistics; + +/******************************************************************************* + uvmEventSuspend + This function will be called by the GPU driver to signal to UVM that the + system is about to enter a sleep state. When it is called, the + following assumptions/guarantees are valid/made: + + * User channels have been preempted and disabled + * UVM channels are still running normally and will continue to do + so until after this function returns control + * User threads are still running, but can no longer issue system + system calls to the GPU driver + * Until exit from this function, UVM is allowed to make full use of + the GPUs under its control, as well as of the GPU driver + + Upon return from this function, UVM may not access GPUs under its control + until the GPU driver calls uvmEventResume(). It may still receive + calls to uvmEventIsrTopHalf() during this time, which it should return + NV_ERR_NO_INTR_PENDING from. It will not receive any other calls. +*/ +typedef NV_STATUS (*uvmEventSuspend_t) (void); + +/******************************************************************************* + uvmEventResume + This function will be called by the GPU driver to signal to UVM that the + system has exited a previously entered sleep state. When it is called, + the following assumptions/guarantees are valid/made: + + * UVM is again allowed to make full use of the GPUs under its + control, as well as of the GPU driver + * UVM channels are running normally + * User channels are still preempted and disabled + * User threads are again running, but still cannot issue system + calls to the GPU driver, nor submit new work + + Upon return from this function, UVM is expected to be fully functional. +*/ +typedef NV_STATUS (*uvmEventResume_t) (void); + +/******************************************************************************* + uvmEventStartDevice + This function will be called by the GPU driver once it has finished its + initialization to tell the UVM driver that this GPU has come up. +*/ +typedef NV_STATUS (*uvmEventStartDevice_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventStopDevice + This function will be called by the GPU driver to let UVM know that a GPU + is going down. +*/ +typedef NV_STATUS (*uvmEventStopDevice_t) (const NvProcessorUuid *pGpuUuidStruct); + +#if defined (_WIN32) +/******************************************************************************* + uvmEventWddmResetDuringTimeout + This function will be called by KMD in a TDR servicing path to unmap channel + resources and to destroy channels. This is a Windows specific event. +*/ +typedef NV_STATUS (*uvmEventWddmResetDuringTimeout_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventWddmRestartAfterTimeout + This function will be called by KMD in a TDR servicing path to map channel + resources and to create channels. This is a Windows specific event. +*/ +typedef NV_STATUS (*uvmEventWddmRestartAfterTimeout_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventServiceInterrupt + This function gets called from RM's intr service routine when an interrupt + to service a page fault is triggered. +*/ +typedef NV_STATUS (*uvmEventServiceInterrupt_t) (void *pDeviceObject, + NvU32 deviceId, NvU32 subdeviceId); +#endif + +/******************************************************************************* + uvmEventIsrTopHalf_t + This function will be called by the GPU driver to let UVM know + that an interrupt has occurred. + + Returns: + NV_OK if the UVM driver handled the interrupt + NV_ERR_NO_INTR_PENDING if the interrupt is not for the UVM driver +*/ +#if defined (__linux__) +typedef NV_STATUS (*uvmEventIsrTopHalf_t) (const NvProcessorUuid *pGpuUuidStruct); +#else +typedef void (*uvmEventIsrTopHalf_t) (void); +#endif + +struct UvmOpsUvmEvents +{ + uvmEventSuspend_t suspend; + uvmEventResume_t resume; + uvmEventStartDevice_t startDevice; + uvmEventStopDevice_t stopDevice; + uvmEventIsrTopHalf_t isrTopHalf; +#if defined (_WIN32) + uvmEventWddmResetDuringTimeout_t wddmResetDuringTimeout; + uvmEventWddmRestartAfterTimeout_t wddmRestartAfterTimeout; + uvmEventServiceInterrupt_t serviceInterrupt; +#endif +}; + +typedef struct UvmGpuFaultInfo_tag +{ + struct + { + // Register mappings obtained from RM + volatile NvU32* pFaultBufferGet; + volatile NvU32* pFaultBufferPut; + // Note: this variable is deprecated since buffer overflow is not a separate + // register from future chips. + volatile NvU32* pFaultBufferInfo; + volatile NvU32* pPmcIntr; + volatile NvU32* pPmcIntrEnSet; + volatile NvU32* pPmcIntrEnClear; + volatile NvU32* pPrefetchCtrl; + NvU32 replayableFaultMask; + // fault buffer cpu mapping and size + void* bufferAddress; + NvU32 bufferSize; + } replayable; + struct + { + // Shadow buffer for non-replayable faults on cpu memory. Resman copies + // here the non-replayable faults that need to be handled by UVM + void* shadowBufferAddress; + + // Execution context for the queue associated with the fault buffer + void* shadowBufferContext; + + // Fault buffer size + NvU32 bufferSize; + + // Preallocated stack for functions called from the UVM isr top half + void *isr_sp; + + // Preallocated stack for functions called from the UVM isr bottom half + void *isr_bh_sp; + } nonReplayable; + NvHandle faultBufferHandle; +} UvmGpuFaultInfo; + +typedef struct UvmGpuPagingChannel_tag +{ + struct gpuDevice *device; + NvNotification *errorNotifier; + NvHandle channelHandle; + NvHandle errorNotifierHandle; + void *pushStreamSp; +} UvmGpuPagingChannel, *UvmGpuPagingChannelHandle; + +typedef struct UvmGpuAccessCntrInfo_tag +{ + // Register mappings obtained from RM + // pointer to the Get register for the access counter buffer + volatile NvU32* pAccessCntrBufferGet; + // pointer to the Put register for the access counter buffer + volatile NvU32* pAccessCntrBufferPut; + // pointer to the Full register for the access counter buffer + volatile NvU32* pAccessCntrBufferFull; + // pointer to the hub interrupt + volatile NvU32* pHubIntr; + // pointer to interrupt enable register + volatile NvU32* pHubIntrEnSet; + // pointer to interrupt disable register + volatile NvU32* pHubIntrEnClear; + // mask for the access counter buffer + NvU32 accessCounterMask; + // access counter buffer cpu mapping and size + void* bufferAddress; + NvU32 bufferSize; + NvHandle accessCntrBufferHandle; + + // The Notification address in the access counter notification msg does not + // contain the correct upper bits 63-47 for GPA-based notifications. RM + // provides us with the correct offset to be added. + // See Bug 1803015 + NvU64 baseDmaSysmemAddr; +} UvmGpuAccessCntrInfo; + +typedef enum +{ + UVM_ACCESS_COUNTER_GRANULARITY_64K = 1, + UVM_ACCESS_COUNTER_GRANULARITY_2M = 2, + UVM_ACCESS_COUNTER_GRANULARITY_16M = 3, + UVM_ACCESS_COUNTER_GRANULARITY_16G = 4, +} UVM_ACCESS_COUNTER_GRANULARITY; + +typedef enum +{ + UVM_ACCESS_COUNTER_USE_LIMIT_NONE = 1, + UVM_ACCESS_COUNTER_USE_LIMIT_QTR = 2, + UVM_ACCESS_COUNTER_USE_LIMIT_HALF = 3, + UVM_ACCESS_COUNTER_USE_LIMIT_FULL = 4, +} UVM_ACCESS_COUNTER_USE_LIMIT; + +typedef struct UvmGpuAccessCntrConfig_tag +{ + NvU32 mimcGranularity; + + NvU32 momcGranularity; + + NvU32 mimcUseLimit; + + NvU32 momcUseLimit; + + NvU32 threshold; +} UvmGpuAccessCntrConfig; + +typedef UvmGpuChannelInfo gpuChannelInfo; +typedef UvmGpuChannelAllocParams gpuChannelAllocParams; +typedef UvmGpuCaps gpuCaps; +typedef UvmGpuCopyEngineCaps gpuCeCaps; +typedef UvmGpuCopyEnginesCaps gpuCesCaps; +typedef UvmGpuP2PCapsParams getP2PCapsParams; +typedef UvmGpuAddressSpaceInfo gpuAddressSpaceInfo; +typedef UvmGpuAllocInfo gpuAllocInfo; +typedef UvmGpuInfo gpuInfo; +typedef UvmGpuClientInfo gpuClientInfo; +typedef UvmGpuAccessCntrInfo gpuAccessCntrInfo; +typedef UvmGpuAccessCntrConfig gpuAccessCntrConfig; +typedef UvmGpuFaultInfo gpuFaultInfo; +typedef UvmGpuMemoryInfo gpuMemoryInfo; +typedef UvmGpuExternalMappingInfo gpuExternalMappingInfo; +typedef UvmGpuChannelResourceInfo gpuChannelResourceInfo; +typedef UvmGpuChannelInstanceInfo gpuChannelInstanceInfo; +typedef UvmGpuChannelResourceBindParams gpuChannelResourceBindParams; +typedef UvmGpuFbInfo gpuFbInfo; +typedef UvmGpuEccInfo gpuEccInfo; +typedef UvmGpuPagingChannel *gpuPagingChannelHandle; +typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo; +typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams; +typedef UvmPmaAllocationOptions gpuPmaAllocationOptions; + + + + + + + +#endif // _NV_UVM_TYPES_H_ diff --git a/kernel-open/common/inc/nvgputypes.h b/kernel-open/common/inc/nvgputypes.h new file mode 100644 index 000000000..59ba45b3d --- /dev/null +++ b/kernel-open/common/inc/nvgputypes.h @@ -0,0 +1,179 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2006 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + /***************************************************************************\ +|* *| +|* NV GPU Types *| +|* *| +|* This header contains definitions describing NVIDIA's GPU hardware state. *| +|* *| + \***************************************************************************/ + + +#ifndef NVGPUTYPES_INCLUDED +#define NVGPUTYPES_INCLUDED +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + + /***************************************************************************\ +|* NvNotification *| + \***************************************************************************/ + +/***** NvNotification Structure *****/ +/* + * NV objects return information about method completion to clients via an + * array of notification structures in main memory. + * + * The client sets the status field to NV???_NOTIFICATION_STATUS_IN_PROGRESS. + * NV fills in the NvNotification[] data structure in the following order: + * timeStamp, otherInfo32, otherInfo16, and then status. + */ + +/* memory data structures */ +typedef volatile struct NvNotificationRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvV32 info32; /* info returned depends on method 0008-000b*/ + NvV16 info16; /* info returned depends on method 000c-000d*/ + NvV16 status; /* user sets bit 15, NV sets status 000e-000f*/ +} NvNotification; + + /***************************************************************************\ +|* NvGpuSemaphore *| + \***************************************************************************/ + +/***** NvGpuSemaphore Structure *****/ +/* + * NvGpuSemaphore objects are used by the GPU to synchronize multiple + * command-streams. + * + * Please refer to class documentation for details regarding the content of + * the data[] field. + */ + +/* memory data structures */ +typedef volatile struct NvGpuSemaphoreRec { + NvV32 data[2]; /* Payload/Report data 0000-0007*/ + struct { /* 0008- */ + NvV32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 8- f*/ + } timeStamp; /* -000f*/ +} NvGpuSemaphore; + + /***************************************************************************\ +|* NvGetReport *| + \***************************************************************************/ + +/* + * NV objects, starting with Kelvin, return information such as pixel counts to + * the user via the NV*_GET_REPORT method. + * + * The client fills in the "zero" field to any nonzero value and waits until it + * becomes zero. NV fills in the timeStamp, value, and zero fields. + */ +typedef volatile struct NVGetReportRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 value; /* info returned depends on method 0008-000b*/ + NvU32 zero; /* always written to zero 000c-000f*/ +} NvGetReport; + + /***************************************************************************\ +|* NvRcNotification *| + \***************************************************************************/ + +/* + * NV robust channel notification information is reported to clients via + * standard NV01_EVENT objects bound to instance of the NV*_CHANNEL_DMA and + * NV*_CHANNEL_GPFIFO objects. + */ +typedef struct NvRcNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 exceptLevel; /* exception level 000c-000f*/ + NvU32 exceptType; /* exception type 0010-0013*/ +} NvRcNotification; + + /***************************************************************************\ +|* NvSyncPointFence *| + \***************************************************************************/ + +/***** NvSyncPointFence Structure *****/ +/* + * NvSyncPointFence objects represent a syncpoint event. The syncPointID + * identifies the syncpoint register and the value is the value that the + * register will contain right after the event occurs. + * + * If syncPointID contains NV_INVALID_SYNCPOINT_ID then this is an invalid + * event. This is often used to indicate an event in the past (i.e. no need to + * wait). + * + * For more info on syncpoints refer to Mobile channel and syncpoint + * documentation. + */ +typedef struct NvSyncPointFenceRec { + NvU32 syncPointID; + NvU32 value; +} NvSyncPointFence; + +#define NV_INVALID_SYNCPOINT_ID ((NvU32)-1) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +#if !defined(XAPIGEN) /* NvOffset is XAPIGEN builtin type, so skip typedef */ +typedef NvU64 NvOffset; /* GPU address */ +#endif + +#define NvOffset_HI32(n) ((NvU32)(((NvU64)(n)) >> 32)) +#define NvOffset_LO32(n) ((NvU32)((NvU64)(n))) + +/* +* There are two types of GPU-UUIDs available: +* +* (1) a SHA-256 based 32 byte ID, formatted as a 64 character +* hexadecimal string as "GPU-%16x-%08x-%08x-%08x-%024x"; this is +* deprecated. +* +* (2) a SHA-1 based 16 byte ID, formatted as a 32 character +* hexadecimal string as "GPU-%08x-%04x-%04x-%04x-%012x" (the +* canonical format of a UUID); this is the default. +*/ +#define NV_GPU_UUID_SHA1_LEN (16) +#define NV_GPU_UUID_SHA256_LEN (32) +#define NV_GPU_UUID_LEN NV_GPU_UUID_SHA1_LEN + +#ifdef __cplusplus +}; +#endif + +#endif /* NVGPUTYPES_INCLUDED */ diff --git a/kernel-open/common/inc/nvkms-api-types.h b/kernel-open/common/inc/nvkms-api-types.h new file mode 100644 index 000000000..0f59c8304 --- /dev/null +++ b/kernel-open/common/inc/nvkms-api-types.h @@ -0,0 +1,533 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_API_TYPES_H) +#define NVKMS_API_TYPES_H + +#include +#include +#include + +#define NVKMS_MAX_SUBDEVICES NV_MAX_SUBDEVICES + +#define NVKMS_LEFT 0 +#define NVKMS_RIGHT 1 +#define NVKMS_MAX_EYES 2 + +#define NVKMS_MAIN_LAYER 0 +#define NVKMS_OVERLAY_LAYER 1 +#define NVKMS_MAX_LAYERS_PER_HEAD 8 + +#define NVKMS_MAX_PLANES_PER_SURFACE 3 + +#define NVKMS_DP_ADDRESS_STRING_LENGTH 64 + +#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff + +typedef NvU32 NvKmsDeviceHandle; +typedef NvU32 NvKmsDispHandle; +typedef NvU32 NvKmsConnectorHandle; +typedef NvU32 NvKmsSurfaceHandle; +typedef NvU32 NvKmsFrameLockHandle; +typedef NvU32 NvKmsDeferredRequestFifoHandle; +typedef NvU32 NvKmsSwapGroupHandle; +typedef NvU32 NvKmsVblankSyncObjectHandle; + +struct NvKmsSize { + NvU16 width; + NvU16 height; +}; + +struct NvKmsPoint { + NvU16 x; + NvU16 y; +}; + +struct NvKmsSignedPoint { + NvS16 x; + NvS16 y; +}; + +struct NvKmsRect { + NvU16 x; + NvU16 y; + NvU16 width; + NvU16 height; +}; + +/* + * A 3x3 row-major matrix. + * + * The elements are 32-bit single-precision IEEE floating point values. The + * floating point bit pattern should be stored in NvU32s to be passed into the + * kernel. + */ +struct NvKmsMatrix { + NvU32 m[3][3]; +}; + +typedef enum { + NVKMS_CONNECTOR_TYPE_DP = 0, + NVKMS_CONNECTOR_TYPE_VGA = 1, + NVKMS_CONNECTOR_TYPE_DVI_I = 2, + NVKMS_CONNECTOR_TYPE_DVI_D = 3, + NVKMS_CONNECTOR_TYPE_ADC = 4, + NVKMS_CONNECTOR_TYPE_LVDS = 5, + NVKMS_CONNECTOR_TYPE_HDMI = 6, + NVKMS_CONNECTOR_TYPE_USBC = 7, + NVKMS_CONNECTOR_TYPE_DSI = 8, + NVKMS_CONNECTOR_TYPE_DP_SERIALIZER = 9, + NVKMS_CONNECTOR_TYPE_UNKNOWN = 10, + NVKMS_CONNECTOR_TYPE_MAX = NVKMS_CONNECTOR_TYPE_UNKNOWN, +} NvKmsConnectorType; + +static inline +const char *NvKmsConnectorTypeString(const NvKmsConnectorType connectorType) +{ + switch (connectorType) { + case NVKMS_CONNECTOR_TYPE_DP: return "DP"; + case NVKMS_CONNECTOR_TYPE_VGA: return "VGA"; + case NVKMS_CONNECTOR_TYPE_DVI_I: return "DVI-I"; + case NVKMS_CONNECTOR_TYPE_DVI_D: return "DVI-D"; + case NVKMS_CONNECTOR_TYPE_ADC: return "ADC"; + case NVKMS_CONNECTOR_TYPE_LVDS: return "LVDS"; + case NVKMS_CONNECTOR_TYPE_HDMI: return "HDMI"; + case NVKMS_CONNECTOR_TYPE_USBC: return "USB-C"; + case NVKMS_CONNECTOR_TYPE_DSI: return "DSI"; + case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: return "DP-SERIALIZER"; + default: break; + } + return "Unknown"; +} + +typedef enum { + NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA = 0, + NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS = 1, + NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS = 2, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DP = 3, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI = 4, + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN = 5, + NVKMS_CONNECTOR_SIGNAL_FORMAT_MAX = + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN, +} NvKmsConnectorSignalFormat; + +/*! + * Description of Notifiers and Semaphores (Non-isochronous (NISO) surfaces). + * + * When flipping, the client can optionally specify a notifier and/or + * a semaphore to use with the flip. The surfaces used for these + * should be registered with NVKMS to get an NvKmsSurfaceHandle. + * + * NvKmsNIsoSurface::offsetInWords indicates the starting location, in + * 32-bit words, within the surface where EVO should write the + * notifier or semaphore. Note that only the first 4096 bytes of a + * surface can be used by semaphores or notifiers; offsetInWords must + * allow for the semaphore or notifier to be written within the first + * 4096 bytes of the surface. I.e., this must be satisfied: + * + * ((offsetInWords * 4) + elementSizeInBytes) <= 4096 + * + * Where elementSizeInBytes is: + * + * if NISO_FORMAT_FOUR_WORD*, elementSizeInBytes = 16 + * if NISO_FORMAT_LEGACY, + * if overlay && notifier, elementSizeInBytes = 16 + * else, elementSizeInBytes = 4 + * + * Note that different GPUs support different semaphore and notifier formats. + * Check NvKmsAllocDeviceReply::validNIsoFormatMask to determine which are + * valid for the given device. + * + * Note also that FOUR_WORD and FOUR_WORD_NVDISPLAY are the same size, but + * FOUR_WORD uses a format compatible with display class 907[ce], and + * FOUR_WORD_NVDISPLAY uses a format compatible with c37e (actually defined by + * the NV_DISP_NOTIFIER definition in clc37d.h). + */ +enum NvKmsNIsoFormat { + NVKMS_NISO_FORMAT_LEGACY, + NVKMS_NISO_FORMAT_FOUR_WORD, + NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY, +}; + +enum NvKmsEventType { + NVKMS_EVENT_TYPE_DPY_CHANGED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED, + NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FLIP_OCCURRED, +}; + +typedef enum { + NV_EVO_SCALER_1TAP = 0, + NV_EVO_SCALER_2TAPS = 1, + NV_EVO_SCALER_3TAPS = 2, + NV_EVO_SCALER_5TAPS = 3, + NV_EVO_SCALER_8TAPS = 4, + NV_EVO_SCALER_TAPS_MIN = NV_EVO_SCALER_1TAP, + NV_EVO_SCALER_TAPS_MAX = NV_EVO_SCALER_8TAPS, +} NVEvoScalerTaps; + +/* This structure describes the scaling bounds for a given layer. */ +struct NvKmsScalingUsageBounds { + /* + * Maximum vertical downscale factor (scaled by 1024) + * + * For example, if the downscale factor is 1.5, then maxVDownscaleFactor + * would be 1.5 x 1024 = 1536. + */ + NvU16 maxVDownscaleFactor; + + /* + * Maximum horizontal downscale factor (scaled by 1024) + * + * See the example above for maxVDownscaleFactor. + */ + NvU16 maxHDownscaleFactor; + + /* Maximum vertical taps allowed */ + NVEvoScalerTaps vTaps; + + /* Whether vertical upscaling is allowed */ + NvBool vUpscalingAllowed; +}; + +struct NvKmsUsageBounds { + struct { + NvBool usable; + struct NvKmsScalingUsageBounds scaling; + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +/* + * A 3x4 row-major colorspace conversion matrix. + * + * The output color C' is the CSC matrix M times the column vector + * [ R, G, B, 1 ]. + * + * Each entry in the matrix is a signed 2's-complement fixed-point number with + * 3 integer bits and 16 fractional bits. + */ +struct NvKmsCscMatrix { + NvS32 m[3][4]; +}; + +#define NVKMS_IDENTITY_CSC_MATRIX \ + (struct NvKmsCscMatrix){{ \ + { 0x10000, 0, 0, 0 }, \ + { 0, 0x10000, 0, 0 }, \ + { 0, 0, 0x10000, 0 } \ + }} + +/*! + * A color key match bit used in the blend equations and one can select the src + * or dst Color Key when blending. Assert key bit means match, de-assert key + * bit means nomatch. + * + * The src Color Key means using the key bit from the current layer, the dst + * Color Key means using key bit from the previous layer composition stage. The + * src or dst key bit will be inherited by blended pixel for the preparation of + * next blending, as dst Color Key. + * + * src: Forward the color key match bit from the current layer pixel to next layer + * composition stage. + * + * dst: Forward the color key match bit from the previous composition stage + * pixel to next layer composition stage. + * + * disable: Forward “1” to the next layer composition stage as the color key. + */ +enum NvKmsCompositionColorKeySelect { + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE = 0, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST, +}; + +#define NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS 3 + +/*! + * Composition modes used for surfaces in general. + * The various types of composition are: + * + * Opaque: source pixels are opaque regardless of alpha, + * and will occlude the destination pixel. + * + * Alpha blending: aka opacity, which could be specified + * for a surface in its entirety, or on a per-pixel basis. + * + * Non-premultiplied: alpha value applies to source pixel, + * and also counter-weighs the destination pixel. + * Premultiplied: alpha already applied to source pixel, + * so it only counter-weighs the destination pixel. + * + * Color keying: use a color key structure to decide + * the criteria for matching and compositing. + * (See NVColorKey below.) + */ +enum NvKmsCompositionBlendingMode { + /*! + * Modes that use no other parameters. + */ + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE, + + /*! + * Mode that ignores both per-pixel alpha provided + * by client and the surfaceAlpha, makes source pixel + * totally transparent. + */ + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT, + + /*! + * Modes that use per-pixel alpha provided by client, + * and the surfaceAlpha must be set to 0. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA, + + /*! + * These use both the surface-wide and per-pixel alpha values. + * surfaceAlpha is treated as numerator ranging from 0 to 255 + * of a fraction whose denominator is 255. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA, +}; + +static inline NvBool +NvKmsIsCompositionModeUseAlpha(enum NvKmsCompositionBlendingMode mode) +{ + return mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; +} + +/*! + * Abstract description of a color key. + * + * a, r, g, and b are component values in the same width as the framebuffer + * values being scanned out. + * + * match[ARGB] defines whether that component is considered when matching the + * color key -- TRUE means that the value of the corresponding component must + * match the given value for the given pixel to be considered a 'key match'; + * FALSE means that the value of that component is not a key match criterion. + */ +typedef struct { + NvU16 a, r, g, b; + NvBool matchA, matchR, matchG, matchB; +} NVColorKey; + +/*! + * Describes the composition parameters for the single layer. + */ +struct NvKmsCompositionParams { + enum NvKmsCompositionColorKeySelect colorKeySelect; + NVColorKey colorKey; + /* + * It is possible to assign different blending mode for match pixels and + * nomatch pixels. blendingMode[0] is used to blend a pixel with the color key + * match bit "0", and blendingMode[1] is used to blend a pixel with the color + * key match bit "1". + * + * But because of the hardware restrictions match and nomatch pixels can + * not use blending mode PREMULT_ALPHA, NON_PREMULT_ALPHA, + * PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA at once. + */ + enum NvKmsCompositionBlendingMode blendingMode[2]; + NvU8 surfaceAlpha; /* Applies to all pixels of entire surface */ + /* + * Defines the composition order. A smaller value moves the layer closer to + * the top (away from the background). No need to pick consecutive values, + * requirements are that the value should be different for each of the + * layers owned by the head and the value for the main layer should be + * the greatest one. + * + * Cursor always remains at the top of all other layers, this parameter + * has no effect on cursor. NVKMS assigns default depth to each of the + * supported layers, by default depth of the layer is calculated as + * (NVKMS_MAX_LAYERS_PER_HEAD - index of the layer). If depth is set to + * '0' then default depth value will get used. + */ + NvU8 depth; +}; + +/*! + * Describes the composition capabilities supported by the hardware for + * cursor or layer. It describes supported the color key selects and for each + * of the supported color key selects it describes supported blending modes + * for match and nomatch pixles. + */ +struct NvKmsCompositionCapabilities { + + struct { + /* + * A bitmask of the supported blending modes for match and nomatch + * pixels. It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_*) values. + */ + NvU32 supportedBlendModes[2]; + } colorKeySelect[NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS]; + + /* + * A bitmask of the supported color key selects. + * + * It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_*) + * values. + */ + NvU32 supportedColorKeySelects; +}; + +struct NvKmsLayerCapabilities { + /*! + * Whether Layer supports the window mode. If window mode is supported, + * then clients can set the layer's dimensions so that they're smaller than + * the viewport, and can also change the output position of the layer to a + * non-(0, 0) position. + * + * NOTE: Dimension changes are currently unsupported for the main layer, + * and output position changes for the main layer are currently only + * supported via IOCTL_SET_LAYER_POSITION but not via flips. Support for + * these is coming soon, via changes to flip code. + */ + NvBool supportsWindowMode :1; + + /*! + * Whether layer supports HDR pipe. + */ + NvBool supportsHDR :1; + + + /*! + * Describes the supported Color Key selects and blending modes for + * match and nomatch layer pixels. + */ + struct NvKmsCompositionCapabilities composition; + + /*! + * Which NvKmsSurfaceMemoryFormat enum values are supported by the NVKMS + * device on the given scanout surface layer. + * + * Iff a particular enum NvKmsSurfaceMemoryFormat 'value' is supported, + * then (1 << value) will be set in the appropriate bitmask. + * + * Note that these bitmasks just report the static SW/HW capabilities, + * and are a superset of the formats that IMP may allow. Clients are + * still expected to honor the NvKmsUsageBounds for each head. + */ + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); +}; + +/*! + * Surface layouts. + * + * BlockLinear is the NVIDIA GPU native tiling format, arranging pixels into + * blocks or tiles for better locality during common GPU operations. + * + * Pitch is the naive "linear" surface layout with pixels laid out sequentially + * in memory line-by-line, optionally with some padding at the end of each line + * for alignment purposes. + */ +enum NvKmsSurfaceMemoryLayout { + NvKmsSurfaceMemoryLayoutBlockLinear = 0, + NvKmsSurfaceMemoryLayoutPitch = 1, +}; + +static inline const char *NvKmsSurfaceMemoryLayoutToString( + enum NvKmsSurfaceMemoryLayout layout) +{ + switch (layout) { + default: + return "Unknown"; + case NvKmsSurfaceMemoryLayoutBlockLinear: + return "BlockLinear"; + case NvKmsSurfaceMemoryLayoutPitch: + return "Pitch"; + } +} + +typedef enum { + MUX_STATE_GET = 0, + MUX_STATE_INTEGRATED = 1, + MUX_STATE_DISCRETE = 2, + MUX_STATE_UNKNOWN = 3, +} NvMuxState; + +enum NvKmsRotation { + NVKMS_ROTATION_0 = 0, + NVKMS_ROTATION_90 = 1, + NVKMS_ROTATION_180 = 2, + NVKMS_ROTATION_270 = 3, + NVKMS_ROTATION_MIN = NVKMS_ROTATION_0, + NVKMS_ROTATION_MAX = NVKMS_ROTATION_270, +}; + +struct NvKmsRRParams { + enum NvKmsRotation rotation; + NvBool reflectionX; + NvBool reflectionY; +}; + +/*! + * Convert each possible NvKmsRRParams to a unique integer [0..15], + * so that we can describe possible NvKmsRRParams with an NvU16 bitmask. + * + * E.g. + * rotation = 0, reflectionX = F, reflectionY = F == 0|0|0 == 0 + * ... + * rotation = 270, reflectionX = T, reflectionY = T == 3|4|8 == 15 + */ +static inline NvU8 NvKmsRRParamsToCapBit(const struct NvKmsRRParams *rrParams) +{ + NvU8 bitPosition = (NvU8)rrParams->rotation; + if (rrParams->reflectionX) { + bitPosition |= NVBIT(2); + } + if (rrParams->reflectionY) { + bitPosition |= NVBIT(3); + } + return bitPosition; +} + +/* + * NVKMS_MEMORY_ISO is used to tag surface memory that will be accessed via + * display's isochronous interface. Examples of this type of memory are pixel + * data and LUT entries. + * + * NVKMS_MEMORY_NISO is used to tag surface memory that will be accessed via + * display's non-isochronous interface. Examples of this type of memory are + * semaphores and notifiers. + */ +typedef enum { + NVKMS_MEMORY_ISO = 0, + NVKMS_MEMORY_NISO = 1, +} NvKmsMemoryIsoType; + +typedef struct { + NvBool coherent; + NvBool noncoherent; +} NvKmsDispIOCoherencyModes; + +#endif /* NVKMS_API_TYPES_H */ diff --git a/kernel-open/common/inc/nvkms-format.h b/kernel-open/common/inc/nvkms-format.h new file mode 100644 index 000000000..d1483f875 --- /dev/null +++ b/kernel-open/common/inc/nvkms-format.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_FORMAT_H) +#define NVKMS_FORMAT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* + * In order to interpret these pixel format namings, please take note of these + * conventions: + * - The Y8_U8__Y8_V8_N422 and U8_Y8__V8_Y8_N422 formats are both packed formats + * that have an interleaved chroma component across every two pixels. The + * double-underscore is a separator between these two pixel groups. + * - The triple-underscore is a separator between planes. + * - The 'N' suffix is a delimiter for the chroma decimation factor. + * + * As examples of the above rules: + * - The Y8_U8__Y8_V8_N422 format has one 8-bit luma component (Y8) and one + * 8-bit chroma component (U8) in pixel N, and one 8-bit luma component (Y8) + * and one 8-bit chroma component (V8) in pixel (N + 1). This format is + * 422-decimated since the U and V chroma samples are shared between each + * pair of adjacent pixels per line. + * - The Y10___U10V10_N444 format has one plane of 10-bit luma (Y10) components, + * and another plane of 10-bit chroma components (U10V10). This format has no + * chroma decimation since the luma and chroma components are sampled at the + * same rate. + */ +enum NvKmsSurfaceMemoryFormat { + NvKmsSurfaceMemoryFormatI8 = 0, + NvKmsSurfaceMemoryFormatA1R5G5B5 = 1, + NvKmsSurfaceMemoryFormatX1R5G5B5 = 2, + NvKmsSurfaceMemoryFormatR5G6B5 = 3, + NvKmsSurfaceMemoryFormatA8R8G8B8 = 4, + NvKmsSurfaceMemoryFormatX8R8G8B8 = 5, + NvKmsSurfaceMemoryFormatA2B10G10R10 = 6, + NvKmsSurfaceMemoryFormatX2B10G10R10 = 7, + NvKmsSurfaceMemoryFormatA8B8G8R8 = 8, + NvKmsSurfaceMemoryFormatX8B8G8R8 = 9, + NvKmsSurfaceMemoryFormatRF16GF16BF16AF16 = 10, + NvKmsSurfaceMemoryFormatR16G16B16A16 = 11, + NvKmsSurfaceMemoryFormatRF32GF32BF32AF32 = 12, + NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422 = 13, + NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422 = 14, + NvKmsSurfaceMemoryFormatY8___U8V8_N444 = 15, + NvKmsSurfaceMemoryFormatY8___V8U8_N444 = 16, + NvKmsSurfaceMemoryFormatY8___U8V8_N422 = 17, + NvKmsSurfaceMemoryFormatY8___V8U8_N422 = 18, + NvKmsSurfaceMemoryFormatY8___U8V8_N420 = 19, + NvKmsSurfaceMemoryFormatY8___V8U8_N420 = 20, + NvKmsSurfaceMemoryFormatY10___U10V10_N444 = 21, + NvKmsSurfaceMemoryFormatY10___V10U10_N444 = 22, + NvKmsSurfaceMemoryFormatY10___U10V10_N422 = 23, + NvKmsSurfaceMemoryFormatY10___V10U10_N422 = 24, + NvKmsSurfaceMemoryFormatY10___U10V10_N420 = 25, + NvKmsSurfaceMemoryFormatY10___V10U10_N420 = 26, + NvKmsSurfaceMemoryFormatY12___U12V12_N444 = 27, + NvKmsSurfaceMemoryFormatY12___V12U12_N444 = 28, + NvKmsSurfaceMemoryFormatY12___U12V12_N422 = 29, + NvKmsSurfaceMemoryFormatY12___V12U12_N422 = 30, + NvKmsSurfaceMemoryFormatY12___U12V12_N420 = 31, + NvKmsSurfaceMemoryFormatY12___V12U12_N420 = 32, + NvKmsSurfaceMemoryFormatY8___U8___V8_N444 = 33, + NvKmsSurfaceMemoryFormatY8___U8___V8_N420 = 34, + NvKmsSurfaceMemoryFormatMin = NvKmsSurfaceMemoryFormatI8, + NvKmsSurfaceMemoryFormatMax = NvKmsSurfaceMemoryFormatY8___U8___V8_N420, +}; + +typedef struct NvKmsSurfaceMemoryFormatInfo { + enum NvKmsSurfaceMemoryFormat format; + const char *name; + NvU8 depth; + NvBool isYUV; + NvU8 numPlanes; + + union { + struct { + NvU8 bytesPerPixel; + NvU8 bitsPerPixel; + } rgb; + + struct { + NvU8 depthPerComponent; + NvU8 storageBitsPerComponent; + NvU8 horizChromaDecimationFactor; + NvU8 vertChromaDecimationFactor; + } yuv; + }; +} NvKmsSurfaceMemoryFormatInfo; + +const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo( + const enum NvKmsSurfaceMemoryFormat format); + +const char *nvKmsSurfaceMemoryFormatToString( + const enum NvKmsSurfaceMemoryFormat format); + +#ifdef __cplusplus +}; +#endif + +#endif /* NVKMS_FORMAT_H */ diff --git a/kernel-open/common/inc/nvkms-kapi.h b/kernel-open/common/inc/nvkms-kapi.h new file mode 100644 index 000000000..b9fdd1b7c --- /dev/null +++ b/kernel-open/common/inc/nvkms-kapi.h @@ -0,0 +1,1061 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__NVKMS_KAPI_H__) + +#include "nvtypes.h" + +#include "nv-gpu-info.h" +#include "nvkms-api-types.h" +#include "nvkms-format.h" + +#define __NVKMS_KAPI_H__ + +#define NVKMS_KAPI_MAX_HEADS 4 + +#define NVKMS_KAPI_MAX_CONNECTORS 16 +#define NVKMS_KAPI_MAX_CLONE_DISPLAYS 16 + +#define NVKMS_KAPI_EDID_BUFFER_SIZE 2048 + +#define NVKMS_KAPI_MODE_NAME_LEN 32 + +/** + * \defgroup Objects + * @{ + */ + +struct NvKmsKapiDevice; +struct NvKmsKapiMemory; +struct NvKmsKapiSurface; +struct NvKmsKapiChannelEvent; + +typedef NvU32 NvKmsKapiConnector; +typedef NvU32 NvKmsKapiDisplay; + +/** @} */ + +/** + * \defgroup FuncPtrs + * @{ + */ + +/* + * Note: The channel event proc should not call back into NVKMS-KAPI driver. + * The callback into NVKMS-KAPI from the channel event proc, may cause + * deadlock. + */ +typedef void NvKmsChannelEventProc(void *dataPtr, NvU32 dataU32); + +/** @} */ + +/** + * \defgroup Structs + * @{ + */ + +struct NvKmsKapiDisplayModeTimings { + + NvU32 refreshRate; + NvU32 pixelClockHz; + NvU32 hVisible; + NvU32 hSyncStart; + NvU32 hSyncEnd; + NvU32 hTotal; + NvU32 hSkew; + NvU32 vVisible; + NvU32 vSyncStart; + NvU32 vSyncEnd; + NvU32 vTotal; + + struct { + + NvU32 interlaced : 1; + NvU32 doubleScan : 1; + NvU32 hSyncPos : 1; + NvU32 hSyncNeg : 1; + NvU32 vSyncPos : 1; + NvU32 vSyncNeg : 1; + + } flags; + + NvU32 widthMM; + NvU32 heightMM; + +}; + +struct NvKmsKapiDisplayMode { + struct NvKmsKapiDisplayModeTimings timings; + char name[NVKMS_KAPI_MODE_NAME_LEN]; +}; + +#define NVKMS_KAPI_LAYER_MAX 8 + +#define NVKMS_KAPI_LAYER_INVALID_IDX 0xff +#define NVKMS_KAPI_LAYER_PRIMARY_IDX 0 + +struct NvKmsKapiDeviceResourcesInfo { + + NvU32 numHeads; + NvU32 numLayers[NVKMS_KAPI_MAX_HEADS]; + + NvU32 numConnectors; + NvKmsKapiConnector connectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + + struct { + NvU32 validCursorCompositionModes; + NvU64 supportedCursorSurfaceMemoryFormats; + + struct { + NvU16 validRRTransforms; + NvU32 validCompositionModes; + } layer[NVKMS_KAPI_LAYER_MAX]; + + NvU32 minWidthInPixels; + NvU32 maxWidthInPixels; + + NvU32 minHeightInPixels; + NvU32 maxHeightInPixels; + + NvU32 maxCursorSizeInPixels; + + NvU32 pitchAlignment; + + NvU32 hasVideoMemory; + + NvU8 genericPageKind; + + NvBool supportsSyncpts; + } caps; + + NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX]; +}; + +#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType)) + +typedef enum NvKmsKapiMappingTypeRec { + NVKMS_KAPI_MAPPING_TYPE_USER = 1, + NVKMS_KAPI_MAPPING_TYPE_KERNEL = 2, +} NvKmsKapiMappingType; + +struct NvKmsKapiConnectorInfo { + + NvKmsKapiConnector handle; + + NvU32 physicalIndex; + + NvU32 headMask; + + NvKmsConnectorSignalFormat signalFormat; + NvKmsConnectorType type; + + /* + * List of connectors, not possible to serve together with this connector + * because they are competing for same resources. + */ + NvU32 numIncompatibleConnectors; + NvKmsKapiConnector incompatibleConnectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + +}; + +struct NvKmsKapiStaticDisplayInfo { + + NvKmsKapiDisplay handle; + + NvKmsKapiConnector connectorHandle; + + /* Set for DisplayPort MST displays (dynamic displays) */ + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + + NvBool internal; + + /* List of potential sibling display for cloning */ + NvU32 numPossibleClones; + NvKmsKapiDisplay possibleCloneHandles[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + +}; + +struct NvKmsKapiSyncpt { + + /*! + * Possible syncpt use case in kapi. + * For pre-syncpt, use only id and value + * and for post-syncpt, use only fd. + */ + NvBool preSyncptSpecified; + NvU32 preSyncptId; + NvU32 preSyncptValue; + + NvBool postSyncptRequested; +}; + +struct NvKmsKapiLayerConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + struct NvKmsRRParams rrParams; + struct NvKmsKapiSyncpt syncptParams; + + NvU8 minPresentInterval; + NvBool tearing; + + NvU16 srcX, srcY; + NvU16 srcWidth, srcHeight; + + NvS16 dstX, dstY; + NvU16 dstWidth, dstHeight; +}; + +struct NvKmsKapiLayerRequestedConfig { + struct NvKmsKapiLayerConfig config; + struct { + NvBool surfaceChanged : 1; + NvBool srcXYChanged : 1; + NvBool srcWHChanged : 1; + NvBool dstXYChanged : 1; + NvBool dstWHChanged : 1; + } flags; +}; + +struct NvKmsKapiCursorRequestedConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + + NvS16 dstX, dstY; + + struct { + NvBool surfaceChanged : 1; + NvBool dstXYChanged : 1; + } flags; +}; + +struct NvKmsKapiHeadModeSetConfig { + /* + * DRM distinguishes between the head state "enabled" (the specified + * configuration for the head is valid, its resources are allocated, + * etc, but the head may not necessarily be currently driving pixels + * to its output resource) and the head state "active" (the head is + * "enabled" _and_ the head is actively driving pixels to its output + * resource). + * + * This distinction is for DPMS: + * + * DPMS On : enabled=true, active=true + * DPMS Off : enabled=true, active=false + * + * "Enabled" state is indicated by numDisplays != 0. + * "Active" state is indicated by bActive == true. + */ + NvBool bActive; + + NvU32 numDisplays; + NvKmsKapiDisplay displays[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + + struct NvKmsKapiDisplayMode mode; +}; + +struct NvKmsKapiHeadRequestedConfig { + struct NvKmsKapiHeadModeSetConfig modeSetConfig; + struct { + NvBool activeChanged : 1; + NvBool displaysChanged : 1; + NvBool modeChanged : 1; + } flags; + + struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig; + + struct NvKmsKapiLayerRequestedConfig + layerRequestedConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiRequestedModeSetConfig { + NvU32 headsMask; + struct NvKmsKapiHeadRequestedConfig + headRequestedConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiLayerReplyConfig { + int postSyncptFd; +}; + +struct NvKmsKapiHeadReplyConfig { + struct NvKmsKapiLayerReplyConfig + layerReplyConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiModeSetReplyConfig { + struct NvKmsKapiHeadReplyConfig + headReplyConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiEventDisplayChanged { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventDynamicDisplayConnected { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventFlipOccurred { + NvU32 head; + NvU32 layer; +}; + +struct NvKmsKapiDpyCRC32 { + NvU32 value; + NvBool supported; +}; + +struct NvKmsKapiCrcs { + struct NvKmsKapiDpyCRC32 compositorCrc32; + struct NvKmsKapiDpyCRC32 rasterGeneratorCrc32; + struct NvKmsKapiDpyCRC32 outputCrc32; +}; + +struct NvKmsKapiEvent { + enum NvKmsEventType type; + + struct NvKmsKapiDevice *device; + + void *privateData; + + union { + struct NvKmsKapiEventDisplayChanged displayChanged; + struct NvKmsKapiEventDynamicDisplayConnected dynamicDisplayConnected; + struct NvKmsKapiEventFlipOccurred flipOccurred; + } u; +}; + +struct NvKmsKapiAllocateDeviceParams { + /* [IN] GPU ID obtained from enumerateGpus() */ + NvU32 gpuId; + + /* [IN] Private data of device allocator */ + void *privateData; + /* [IN] Event callback */ + void (*eventCallback)(const struct NvKmsKapiEvent *event); +}; + +struct NvKmsKapiDynamicDisplayParams { + /* [IN] Display Handle returned by getDisplays() */ + NvKmsKapiDisplay handle; + + /* [OUT] Connection status */ + NvU32 connected; + + /* [IN/OUT] EDID of connected monitor/ Input to override EDID */ + struct { + NvU16 bufferSize; + NvU8 buffer[NVKMS_KAPI_EDID_BUFFER_SIZE]; + } edid; + + /* [IN] Set true to override EDID */ + NvBool overrideEdid; + + /* [IN] Set true to force connected status */ + NvBool forceConnected; + + /* [IN] Set true to force disconnect status */ + NvBool forceDisconnected; +}; + +struct NvKmsKapiCreateSurfaceParams { + + /* [IN] Parameter of each plane */ + struct { + /* [IN] Memory allocated for plane, using allocateMemory() */ + struct NvKmsKapiMemory *memory; + /* [IN] Offsets within the memory object */ + NvU32 offset; + /* [IN] Byte pitch of plane */ + NvU32 pitch; + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + /* [IN] Width of the surface, in pixels */ + NvU32 width; + /* [IN] Height of the surface, in pixels */ + NvU32 height; + + /* [IN] The format describing number of planes and their content */ + enum NvKmsSurfaceMemoryFormat format; + + /* [IN] Whether to override the surface objects memory layout parameters + * with those provided here. */ + NvBool explicit_layout; + /* [IN] Whether the surface layout is block-linear or pitch. Used only + * if explicit_layout is NV_TRUE */ + enum NvKmsSurfaceMemoryLayout layout; + /* [IN] block-linear block height of surface. Used only when + * explicit_layout is NV_TRUE and layout is + * NvKmsSurfaceMemoryLayoutBlockLinear */ + NvU8 log2GobsPerBlockY; +}; + +struct NvKmsKapiFunctionsTable { + + /*! + * NVIDIA Driver version string. + */ + const char *versionString; + + /*! + * System Information. + */ + struct { + /* Availability of write combining support for video memory */ + NvBool bAllowWriteCombining; + } systemInfo; + + /*! + * Enumerate the available physical GPUs that can be used with NVKMS. + * + * \param [out] gpuInfo The information of the enumerated GPUs. + * It is an array of NVIDIA_MAX_GPUS elements. + * + * \return Count of enumerated gpus. + */ + NvU32 (*enumerateGpus)(nv_gpu_info_t *gpuInfo); + + /*! + * Allocate an NVK device using which you can query/allocate resources on + * GPU and do modeset. + * + * \param [in] params Parameters required for device allocation. + * + * \return An valid device handle on success, NULL on failure. + */ + struct NvKmsKapiDevice* (*allocateDevice) + ( + const struct NvKmsKapiAllocateDeviceParams *params + ); + + /*! + * Frees a device allocated by allocateDevice() and all its resources. + * + * \param [in] device A device returned by allocateDevice(). + * This function is a no-op if device is not valid. + */ + void (*freeDevice)(struct NvKmsKapiDevice *device); + + /*! + * Grab ownership of device, ownership is required to do modeset. + * + * \param [in] device A device returned by allocateDevice(). + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*grabOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Release ownership of device. + * + * \param [in] device A device returned by allocateDevice(). + */ + void (*releaseOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Registers for notification, via + * NvKmsKapiAllocateDeviceParams::eventCallback, of the events specified + * in interestMask. + * + * This call does nothing if eventCallback is NULL when NvKmsKapiDevice + * is allocated. + * + * Supported events are DPY_CHANGED and DYNAMIC_DPY_CONNECTED. + * + * \param [in] device A device returned by allocateDevice(). + * + * \param [in] interestMask A mask of events requested to listen. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*declareEventInterest) + ( + const struct NvKmsKapiDevice *device, + const NvU32 interestMask + ); + + /*! + * Retrieve various static resources like connector, head etc. present on + * device and capacities. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in/out] info A pointer to an NvKmsKapiDeviceResourcesInfo + * struct that the call will fill out with number + * of resources and their handles. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDeviceResourcesInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDeviceResourcesInfo *info + ); + + /*! + * Retrieve the number of displays on a device and an array of handles to + * those displays. + * + * \param [in] device A device allocated using + * allocateDevice(). + * + * \param [in/out] displayCount The caller should set this to the size + * of the displayHandles array it passed + * in. The function will set it to the + * number of displays returned, or the + * total number of displays on the device + * if displayHandles is NULL or array size + * of less than number of number of displays. + * + * \param [out] displayHandles An array of display handles with + * displayCount entries. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDisplays) + ( + struct NvKmsKapiDevice *device, + NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles + ); + + /*! + * Retrieve information about a specified connector. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] connector Which connector to query, handle return by + * getDeviceResourcesInfo(). + * + * \param [out] info A pointer to an NvKmsKapiConnectorInfo struct + * that the call will fill out with information + * about connector. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getConnectorInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info + ); + + /*! + * Retrieve information about a specified display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display Which connector to query, handle return by + * getDisplays(). + * + * \param [out] info A pointer to an NvKmsKapiStaticDisplayInfo struct + * that the call will fill out with information + * about display. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getStaticDisplayInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info + ); + + /*! + * Detect/force connection status/EDID of display. + * + * \param [in/out] params Parameters containing display + * handle, EDID and flags to force connection + * status. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDynamicDisplayInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDynamicDisplayParams *params + ); + + /*! + * Allocate some unformatted video memory of the specified size. + * + * This function allocates video memory on the specified GPU. + * It should be suitable for mapping on the CPU as a pitch + * linear or block-linear surface. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] layout BlockLinear or Pitch. + * + * \param [in] size Size, in bytes, of the memory to allocate. + * + * \param [in/out] compressible For input, non-zero if compression + * backing store should be allocated for + * the memory, for output, non-zero if + * compression backing store was + * allocated for the memory. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*allocateVideoMemory) + ( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + NvU8 *compressible + ); + + /*! + * Allocate some unformatted system memory of the specified size. + * + * This function allocates system memory . It should be suitable + * for mapping on the CPU as a pitch linear or block-linear surface. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] layout BlockLinear or Pitch. + * + * \param [in] size Size, in bytes, of the memory to allocate. + * + * \param [in/out] compressible For input, non-zero if compression + * backing store should be allocated for + * the memory, for output, non-zero if + * compression backing store was + * allocated for the memory. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*allocateSystemMemory) + ( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + NvU8 *compressible + ); + + /*! + * Import some unformatted memory of the specified size. + * + * This function accepts a driver-specific parameter structure representing + * memory allocated elsewhere and imports it to a NVKMS KAPI memory object + * of the specified size. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported must have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] size Size, in bytes, of the memory being imported. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the memory object being + * imported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*importMemory) + ( + struct NvKmsKapiDevice *device, NvU64 size, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Duplicate an existing NVKMS KAPI memory object, taking a reference on the + * underlying memory. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported need not have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] srcDevice The device associated with srcMemory. + * + * \param [in] srcMemory The memory object to duplicate. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*dupMemory) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiDevice *srcDevice, + const struct NvKmsKapiMemory *srcMemory + ); + + /*! + * Export the specified memory object to a userspace object handle. + * + * This function accepts a driver-specific parameter structure representing + * a new handle to be assigned to an existing NVKMS KAPI memory object. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being exported must have been created against + * or imported to the same device object, and the + * destination object handle must be valid for this + * device as well. + * + * \param [in] memory The memory object to export. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters specifying a handle to add to the + * memory object being exported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*exportMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free memory allocated using allocateMemory() + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory(). + * + * \return NV_TRUE on success, NV_FALSE if memory is in use. + */ + void (*freeMemory) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory + ); + + /*! + * Create MMIO mappings for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [out] ppLinearAddress The MMIO address where memory object is + * mapped. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*mapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + void **ppLinearAddress + ); + + /*! + * Destroy MMIO mappings created for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [in] pLinearAddress The MMIO address return by mapMemory() + */ + void (*unmapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + const void *pLinearAddress + ); + + /*! + * Create a formatted surface from an NvKmsKapiMemory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] params Parameters to the surface creation. + * + * \return An valid surface handle on success. NULL on failure. + */ + struct NvKmsKapiSurface* (*createSurface) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiCreateSurfaceParams *params + ); + + /*! + * Destroy a surface created by createSurface(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] surface A surface created using createSurface() + */ + void (*destroySurface) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface + ); + + /*! + * Enumerate the mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] modeIndex A mode index (Any integer >= 0). + * + * \param [out] mode A pointer to an NvKmsKapiDisplayMode struct that + * the call will fill out with mode-timings of mode + * at index modeIndex. + * + * \param [out] valid Returns TRUE in this param if mode-timings of + * mode at index modeIndex are valid on display. + * + * \param [out] preferredMode Returns TRUE if this mode is marked as + * "preferred" by the EDID. + * + * \return Value >= 1 if more modes are available, 0 if no more modes are + * available, and Value < 0 on failure. + */ + int (*getDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, NvU32 modeIndex, + struct NvKmsKapiDisplayMode *mode, NvBool *valid, + NvBool *preferredMode + ); + + /*! + * Validate given mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] mode A pointer to an NvKmsKapiDisplayMode struct that + * filled with mode-timings to validate. + * + * \return NV_TRUE if mode-timings are valid, NV_FALSE on failure. + */ + NvBool (*validateDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode + ); + + /*! + * Apply a mode configuration to the device. + * + * Client can describe damaged part of configuration but still it is must + * to describe entire configuration. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] requestedConfig Parameters describing a device-wide + * display configuration. + * + * \param [in] commit If set to 0 them call will only validate + * mode configuration, will not apply it. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*applyModeSetConfig) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit + ); + + /*! + * Return status of flip. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [in] layer A layer index. + * + * \param [out] pending Return TRUE if head has pending flip for + * given layer. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getFlipPendingStatus) + ( + const struct NvKmsKapiDevice *device, + const NvU32 head, + const NvU32 layer, + NvBool *pending + ); + + /*! + * Allocate an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] proc Function pointer to call when triggered. + * + * \param [in] data Argument to pass into function. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the event callback + * being created. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return struct NvKmsKapiChannelEvent* on success, NULL on failure. + */ + struct NvKmsKapiChannelEvent* (*allocateChannelEvent) + ( + struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] cb struct NvKmsKapiChannelEvent* returned from + * allocateChannelEvent() + */ + void (*freeChannelEvent) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb + ); + + /*! + * Get 32-bit CRC value for the last contents presented on the specified + * head. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [out] crc32 The CRC32 generated from the content currently + * presented onto the given head + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getCRC32) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + struct NvKmsKapiCrcs *crc32 + ); + + /*! + * Get the list allocation pages corresponding to the specified memory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory The memory object for which we need to find the + * list of allocation pages and number of pages. + * + * \param [out] pPages A pointer to the list of NvU64 pointers. Caller + * should free pPages on success using freeMemoryPages(). + * + * \param [out] pNumPages It gives the total number of NvU64 pointers + * returned in pPages. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getMemoryPages) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 **pPages, + NvU32 *pNumPages + ); + + /*! + * Free the list of allocation pages returned by getMemoryPages() + * + * \param [in] pPages A list of NvU64 pointers allocated by getMemoryPages(). + * + */ + void (*freeMemoryPages) + ( + NvU64 *pPages + ); + + /* + * Import SGT as a memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] sgt SGT pointer. + * \param [in] gem GEM pointer that pinned SGT, to be refcounted. + * + * \param [in] limit Size, in bytes, of the memory backed by the SGT. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromSgt)(struct NvKmsKapiDevice *device, + NvP64 sgt, + NvP64 gem, + NvU32 limit); + + /* + * Import dma-buf in the memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] dmaBuf DMA-BUF pointer. + * + * \param [in] limit Size, in bytes, of the dma-buf. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromDmaBuf)(struct NvKmsKapiDevice *device, + NvP64 dmaBuf, + NvU32 limit); + +}; + +/** @} */ + +/** + * \defgroup Functions + * @{ + */ + +NvBool nvKmsKapiGetFunctionsTable +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +/** @} */ + +#endif /* defined(__NVKMS_KAPI_H__) */ diff --git a/kernel-open/common/inc/nvlimits.h b/kernel-open/common/inc/nvlimits.h new file mode 100644 index 000000000..e119f676b --- /dev/null +++ b/kernel-open/common/inc/nvlimits.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvlimits.finn +// + + + + + + + + + + + +/* + * This is the maximum number of GPUs supported in a single system. + */ +#define NV_MAX_DEVICES 32 + +/* + * This is the maximum number of subdevices within a single device. + */ +#define NV_MAX_SUBDEVICES 8 + +/* + * This is the maximum length of the process name string. + */ +#define NV_PROC_NAME_MAX_LENGTH 100U + +/* + * This is the maximum number of heads per GPU. + */ +#define NV_MAX_HEADS 4 diff --git a/kernel-open/common/inc/nvmisc.h b/kernel-open/common/inc/nvmisc.h new file mode 100644 index 000000000..210e23798 --- /dev/null +++ b/kernel-open/common/inc/nvmisc.h @@ -0,0 +1,915 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * nvmisc.h + */ +#ifndef __NV_MISC_H +#define __NV_MISC_H + +#ifdef __cplusplus +extern "C" { +#endif //__cplusplus + +#include "nvtypes.h" + +#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS) +// +// Miscellaneous macros useful for bit field manipulations +// +// STUPID HACK FOR CL 19434692. Will revert when fix CL is delivered bfm -> chips_a. +#ifndef BIT +#define BIT(b) (1U<<(b)) +#endif +#ifndef BIT32 +#define BIT32(b) ((NvU32)1U<<(b)) +#endif +#ifndef BIT64 +#define BIT64(b) ((NvU64)1U<<(b)) +#endif + +#endif + +// +// It is recommended to use the following bit macros to avoid macro name +// collisions with other src code bases. +// +#ifndef NVBIT +#define NVBIT(b) (1U<<(b)) +#endif +#ifndef NVBIT_TYPE +#define NVBIT_TYPE(b, t) (((t)1U)<<(b)) +#endif +#ifndef NVBIT32 +#define NVBIT32(b) NVBIT_TYPE(b, NvU32) +#endif +#ifndef NVBIT64 +#define NVBIT64(b) NVBIT_TYPE(b, NvU64) +#endif + +// Helper macro's for 32 bit bitmasks +#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3) +#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5) +#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F)) +#define NV_BITMASK32_SET(pChannelMask, chId) \ + (pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId)) +#define NV_BITMASK32_GET(pChannelMask, chId) \ + ((pChannelMask)[NV_BITMASK32_IDX(chId)] & NVBIT(NV_BITMASK32_OFFSET(chId))) + + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-31. +#define BIT_IDX_32(n) \ + (((((n) & 0xFFFF0000U) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00U) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0U) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCU) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAU) != 0U) ? 0x01U: 0U) ) + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-63. +#define BIT_IDX_64(n) \ + (((((n) & 0xFFFFFFFF00000000ULL) != 0U) ? 0x20U: 0U) | \ + ((((n) & 0xFFFF0000FFFF0000ULL) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00FF00FF00ULL) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0F0F0F0F0ULL) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCCCCCCCCCULL) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAAAAAAAAAULL) != 0U) ? 0x01U: 0U) ) + +/*! + * DRF MACRO README: + * + * Glossary: + * DRF: Device, Register, Field + * FLD: Field + * REF: Reference + * + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA 0xDEADBEEF + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_GAMMA 27:0 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA 31:28 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ZERO 0x00000000 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ONE 0x00000001 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_TWO 0x00000002 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_THREE 0x00000003 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FOUR 0x00000004 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FIVE 0x00000005 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SIX 0x00000006 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SEVEN 0x00000007 + * + * + * Device = _DEVICE_OMEGA + * This is the common "base" that a group of registers in a manual share + * + * Register = _REGISTER_ALPHA + * Register for a given block of defines is the common root for one or more fields and constants + * + * Field(s) = _FIELD_GAMMA, _FIELD_ZETA + * These are the bit ranges for a given field within the register + * Fields are not required to have defined constant values (enumerations) + * + * Constant(s) = _ZERO, _ONE, _TWO, ... + * These are named values (enums) a field can contain; the width of the constants should not be larger than the field width + * + * MACROS: + * + * DRF_SHIFT: + * Bit index of the lower bound of a field + * DRF_SHIFT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 28 + * + * DRF_SHIFT_RT: + * Bit index of the higher bound of a field + * DRF_SHIFT_RT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 31 + * + * DRF_MASK: + * Produces a mask of 1-s equal to the width of a field + * DRF_MASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF (four 1s starting at bit 0) + * + * DRF_SHIFTMASK: + * Produces a mask of 1s equal to the width of a field at the location of the field + * DRF_SHIFTMASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF0000000 + * + * DRF_DEF: + * Shifts a field constant's value to the correct field offset + * DRF_DEF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE) == 0x30000000 + * + * DRF_NUM: + * Shifts a number to the location of a particular field + * DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 3) == 0x30000000 + * NOTE: If the value passed in is wider than the field, the value's high bits will be truncated + * + * DRF_SIZE: + * Provides the width of the field in bits + * DRF_SIZE(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 4 + * + * DRF_VAL: + * Provides the value of an input within the field specified + * DRF_VAL(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xABCD1234) == 0xA + * This is sort of like the inverse of DRF_NUM + * + * DRF_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_SET_DRF: + * Set the field bits in a given value with the given field constant + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, x); + * x == 0x30001234; + * + * FLD_SET_DRF_NUM: + * Same as FLD_SET_DRF but instead of using a field constant a literal/variable is passed in + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_TEST_DRF: + * Test if location specified by drf in 'v' has the same value as NV_drfc + * FLD_TEST_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * FLD_TEST_DRF_NUM: + * Test if locations specified by drf in 'v' have the same value as n + * FLD_TEST_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0x3, 0x3000ABCD) == NV_TRUE + * + * REF_DEF: + * Like DRF_DEF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_DEF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE) == 0x30000000 + * + * REF_VAL: + * Like DRF_VAL but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_VAL(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xABCD1234) == 0xA + * + * REF_NUM: + * Like DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xA) == 0xA00000000 + * + * FLD_SET_REF_NUM: + * Like FLD_SET_DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * NvU32 x = 0x00001234; + * x = FLD_SET_REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_TEST_REF: + * Like FLD_TEST_DRF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * FLD_TEST_REF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * Other macros: + * There a plethora of other macros below that extend the above (notably Multi-Word (MW), 64-bit, and some + * reg read/write variations). I hope these are self explanatory. If you have a need to use them, you + * probably have some knowledge of how they work. + */ + +// tegra mobile uses nvmisc_macros.h and can't access nvmisc.h... and sometimes both get included. +#ifndef _NVMISC_MACROS_H +// Use Coverity Annotation to mark issues as false positives/ignore when using single bit defines. +#define DRF_ISBIT(bitval,drf) \ + ( /* coverity[identical_branches] */ \ + (bitval != 0) ? drf ) +#define DEVICE_BASE(d) (0?d) // what's up with this name? totally non-parallel to the macros below +#define DEVICE_EXTENT(d) (1?d) // what's up with this name? totally non-parallel to the macros below +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#ifdef MISRA_14_3 +#define DRF_BASE(drf) (drf##_LOW_FIELD) +#define DRF_EXTENT(drf) (drf##_HIGH_FIELD) +#define DRF_SHIFT(drf) ((drf##_LOW_FIELD) % 32U) +#define DRF_SHIFT_RT(drf) ((drf##_HIGH_FIELD) % 32U) +#define DRF_MASK(drf) (0xFFFFFFFFU >> (31U - ((drf##_HIGH_FIELD) % 32U) + ((drf##_LOW_FIELD) % 32U))) +#else +#define DRF_BASE(drf) (NV_FALSE?drf) // much better +#define DRF_EXTENT(drf) (NV_TRUE?drf) // much better +#define DRF_SHIFT(drf) (((NvU32)DRF_BASE(drf)) % 32U) +#define DRF_SHIFT_RT(drf) (((NvU32)DRF_EXTENT(drf)) % 32U) +#define DRF_MASK(drf) (0xFFFFFFFFU>>(31U - DRF_SHIFT_RT(drf) + DRF_SHIFT(drf))) +#endif +#define DRF_DEF(d,r,f,c) (((NvU32)(NV ## d ## r ## f ## c))<>(31-((DRF_ISBIT(1,drf)) % 32)+((DRF_ISBIT(0,drf)) % 32))) +#define DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV ## d ## r ## f))&DRF_MASK(NV ## d ## r ## f)) +#endif + +// Signed version of DRF_VAL, which takes care of extending sign bit. +#define DRF_VAL_SIGNED(d,r,f,v) (((DRF_VAL(d,r,f,(v)) ^ (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U)))) - (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U))) +#define DRF_IDX_DEF(d,r,f,i,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV##d##r##f(i)))&DRF_MASK(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL(d,r,f,i,o,v) (((v)>>DRF_SHIFT(NV##d##r##f(i,o)))&DRF_MASK(NV##d##r##f(i,o))) +// Fractional version of DRF_VAL which reads Fx.y fixed point number (x.y)*z +#define DRF_VAL_FRAC(d,r,x,y,v,z) ((DRF_VAL(d,r,x,(v))*z) + ((DRF_VAL(d,r,y,v)*z) / (1<>(63-((DRF_ISBIT(1,drf)) % 64)+((DRF_ISBIT(0,drf)) % 64))) +#define DRF_SHIFTMASK64(drf) (DRF_MASK64(drf)<<(DRF_SHIFT64(drf))) + +#define DRF_DEF64(d,r,f,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV ## d ## r ## f))&DRF_MASK64(NV ## d ## r ## f)) + +#define DRF_VAL_SIGNED64(d,r,f,v) (((DRF_VAL64(d,r,f,(v)) ^ (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1)))) - (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1))) +#define DRF_IDX_DEF64(d,r,f,i,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV##d##r##f(i)))&DRF_MASK64(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL64(d,r,f,i,o,v) (((NvU64)(v)>>DRF_SHIFT64(NV##d##r##f(i,o)))&DRF_MASK64(NV##d##r##f(i,o))) + +#define FLD_SET_DRF64(d,r,f,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c)) +#define FLD_SET_DRF_NUM64(d,r,f,n,v) ((((NvU64)(v)) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_NUM64(d,r,f,n)) +#define FLD_IDX_SET_DRF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_OFFSET_SET_DRF64(d,r,f,i,o,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF64(d,r,f,i,o,c)) +#define FLD_IDX_SET_DRF_DEF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_SET_DRF_NUM64(d,r,f,i,n,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_NUM64(d,r,f,i,n)) +#define FLD_SET_DRF_IDX64(d,r,f,c,i,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c(i))) + +#define FLD_TEST_DRF64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) == NV##d##r##f##c) +#define FLD_TEST_DRF_AND64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) & NV##d##r##f##c) +#define FLD_TEST_DRF_NUM64(d,r,f,n,v) (DRF_VAL64(d, r, f, (v)) == (n)) +#define FLD_IDX_TEST_DRF64(d,r,f,i,c,v) (DRF_IDX_VAL64(d, r, f, i, (v)) == NV##d##r##f##c) +#define FLD_IDX_OFFSET_TEST_DRF64(d,r,f,i,o,c,v) (DRF_IDX_OFFSET_VAL64(d, r, f, i, o, (v)) == NV##d##r##f##c) + +#define REF_DEF64(drf,d) (((drf ## d)&DRF_MASK64(drf))<>DRF_SHIFT64(drf))&DRF_MASK64(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM64(drf,n) (((NvU64)(n)&(0xFFFFFFFFFFFFFFFFU>>(63U-((drf##_HIGH_FIELD) % 63U)+((drf##_LOW_FIELD) % 63U)))) << ((drf##_LOW_FIELD) % 63U)) +#else +#define REF_NUM64(drf,n) (((NvU64)(n)&DRF_MASK64(drf))<>DRF_SHIFT(drf))&DRF_MASK(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM(drf,n) (((n)&(0xFFFFFFFFU>>(31U-((drf##_HIGH_FIELD) % 32U)+((drf##_LOW_FIELD) % 32U)))) << ((drf##_LOW_FIELD) % 32U)) +#else +#define REF_NUM(drf,n) (((n)&DRF_MASK(drf))<>DRF_SHIFT(CR ## d ## r ## f))&DRF_MASK(CR ## d ## r ## f)) + +// Multi-word (MW) field manipulations. For multi-word structures (e.g., Fermi SPH), +// fields may have bit numbers beyond 32. To avoid errors using "classic" multi-word macros, +// all the field extents are defined as "MW(X)". For example, MW(127:96) means +// the field is in bits 0-31 of word number 3 of the structure. +// +// DRF_VAL_MW() macro is meant to be used for native endian 32-bit aligned 32-bit word data, +// not for byte stream data. +// +// DRF_VAL_BS() macro is for byte stream data used in fbQueryBIOS_XXX(). +// +#define DRF_EXPAND_MW(drf) drf // used to turn "MW(a:b)" into "a:b" +#define DRF_PICK_MW(drf,v) ((v)? DRF_EXPAND_##drf) // picks low or high bits +#define DRF_WORD_MW(drf) (DRF_PICK_MW(drf,0)/32) // which word in a multi-word array +#define DRF_BASE_MW(drf) (DRF_PICK_MW(drf,0)%32) // which start bit in the selected word? +#define DRF_EXTENT_MW(drf) (DRF_PICK_MW(drf,1)%32) // which end bit in the selected word +#define DRF_SHIFT_MW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_MASK_MW(drf) (0xFFFFFFFFU>>((31-(DRF_EXTENT_MW(drf))+(DRF_BASE_MW(drf)))%32)) +#define DRF_SHIFTMASK_MW(drf) ((DRF_MASK_MW(drf))<<(DRF_SHIFT_MW(drf))) +#define DRF_SIZE_MW(drf) (DRF_EXTENT_MW(drf)-DRF_BASE_MW(drf)+1) + +#define DRF_DEF_MW(d,r,f,c) ((NV##d##r##f##c) << DRF_SHIFT_MW(NV##d##r##f)) +#define DRF_NUM_MW(d,r,f,n) (((n)&DRF_MASK_MW(NV##d##r##f))<>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_SPANS(drf) ((DRF_PICK_MW(drf,0)/32) != (DRF_PICK_MW(drf,1)/32)) +#define DRF_WORD_MW_LOW(drf) (DRF_PICK_MW(drf,0)/32) +#define DRF_WORD_MW_HIGH(drf) (DRF_PICK_MW(drf,1)/32) +#define DRF_MASK_MW_LOW(drf) (0xFFFFFFFFU) +#define DRF_MASK_MW_HIGH(drf) (0xFFFFFFFFU>>(31-(DRF_EXTENT_MW(drf)))) +#define DRF_SHIFT_MW_LOW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_SHIFT_MW_HIGH(drf) (0) +#define DRF_MERGE_SHIFT(drf) ((32-((DRF_PICK_MW(drf,0)%32)))%32) +#define DRF_VAL_MW_2WORD(d,r,f,v) (((((v)[DRF_WORD_MW_LOW(NV##d##r##f)])>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((((v)[DRF_WORD_MW_HIGH(NV##d##r##f)])>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) +#define DRF_VAL_MW(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_MW_2WORD(d,r,f,v) : DRF_VAL_MW_1WORD(d,r,f,v) ) + +#define DRF_IDX_DEF_MW(d,r,f,i,c) ((NV##d##r##f##c)<>DRF_SHIFT_MW(NV##d##r##f(i)))&DRF_MASK_MW(NV##d##r##f(i))) + +// +// Logically OR all DRF_DEF constants indexed from zero to s (semiinclusive). +// Caution: Target variable v must be pre-initialized. +// +#define FLD_IDX_OR_DRF_DEF(d,r,f,c,s,v) \ +do \ +{ NvU32 idx; \ + for (idx = 0; idx < (NV ## d ## r ## f ## s); ++idx)\ + { \ + v |= DRF_IDX_DEF(d,r,f,idx,c); \ + } \ +} while(0) + + +#define FLD_MERGE_MW(drf,n,v) (((v)[DRF_WORD_MW(drf)] & ~DRF_SHIFTMASK_MW(drf)) | n) +#define FLD_ASSIGN_MW(drf,n,v) ((v)[DRF_WORD_MW(drf)] = FLD_MERGE_MW(drf, n, v)) +#define FLD_IDX_MERGE_MW(drf,i,n,v) (((v)[DRF_WORD_MW(drf(i))] & ~DRF_SHIFTMASK_MW(drf(i))) | n) +#define FLD_IDX_ASSIGN_MW(drf,i,n,v) ((v)[DRF_WORD_MW(drf(i))] = FLD_MERGE_MW(drf(i), n, v)) + +#define FLD_SET_DRF_MW(d,r,f,c,v) FLD_MERGE_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_SET_DRF_NUM_MW(d,r,f,n,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_NUM_MW(d,r,f,n), v) +#define FLD_SET_DRF_DEF_MW(d,r,f,c,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_IDX_SET_DRF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_DEF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_NUM_MW(d,r,f,i,n,v) FLD_IDX_ASSIGN_MW(NV##d##r##f, i, DRF_IDX_NUM_MW(d,r,f,i,n), v) + +#define FLD_TEST_DRF_MW(d,r,f,c,v) ((DRF_VAL_MW(d, r, f, (v)) == NV##d##r##f##c)) +#define FLD_TEST_DRF_NUM_MW(d,r,f,n,v) ((DRF_VAL_MW(d, r, f, (v)) == n)) +#define FLD_IDX_TEST_DRF_MW(d,r,f,i,c,v) ((DRF_IDX_VAL_MW(d, r, f, i, (v)) == NV##d##r##f##c)) + +#define DRF_VAL_BS(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_BS_2WORD(d,r,f,(v)) : DRF_VAL_BS_1WORD(d,r,f,(v)) ) + +//------------------------------------------------------------------------// +// // +// Common defines for engine register reference wrappers // +// // +// New engine addressing can be created like: // +// \#define ENG_REG_PMC(o,d,r) NV##d##r // +// \#define ENG_IDX_REG_CE(o,d,i,r) CE_MAP(o,r,i) // +// // +// See FB_FBPA* for more examples // +//------------------------------------------------------------------------// + +#define ENG_RD_REG(g,o,d,r) GPU_REG_RD32(g, ENG_REG##d(o,d,r)) +#define ENG_WR_REG(g,o,d,r,v) GPU_REG_WR32(g, ENG_REG##d(o,d,r), (v)) +#define ENG_RD_DRF(g,o,d,r,f) ((GPU_REG_RD32(g, ENG_REG##d(o,d,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_WR_DRF_DEF(g,o,d,r,f,c) GPU_REG_WR32(g, ENG_REG##d(o,d,r),(GPU_REG_RD32(g,ENG_REG##d(o,d,r))&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_TEST_IDX_DRF_DEF(g,o,d,r,f,c,i) (ENG_RD_IDX_DRF(g, o, d, r, f, (i)) == NV##d##r##f##c) + +#define ENG_IDX_RD_REG(g,o,d,i,r) GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r)) +#define ENG_IDX_WR_REG(g,o,d,i,r,v) GPU_REG_WR32(g, ENG_IDX_REG##d(o,d,i,r), (v)) + +#define ENG_IDX_RD_DRF(g,o,d,i,r,f) ((GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +// +// DRF_READ_1WORD_BS() and DRF_READ_1WORD_BS_HIGH() do not read beyond the bytes that contain +// the requested value. Reading beyond the actual data causes a page fault panic when the +// immediately following page happened to be protected or not mapped. +// +#define DRF_VAL_BS_1WORD(d,r,f,v) ((DRF_READ_1WORD_BS(d,r,f,v)>>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_VAL_BS_2WORD(d,r,f,v) (((DRF_READ_4BYTE_BS(NV##d##r##f,v)>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((DRF_READ_1WORD_BS_HIGH(d,r,f,v)>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) + +#define DRF_READ_1BYTE_BS(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4])) +#define DRF_READ_2BYTE_BS(drf,v) (DRF_READ_1BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS(drf,v) (DRF_READ_2BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS(drf,v) (DRF_READ_3BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+3])<<24)) + +#define DRF_READ_1BYTE_BS_HIGH(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4])) +#define DRF_READ_2BYTE_BS_HIGH(drf,v) (DRF_READ_1BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS_HIGH(drf,v) (DRF_READ_2BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS_HIGH(drf,v) (DRF_READ_3BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+3])<<24)) + +// Calculate 2^n - 1 and avoid shift counter overflow +// +// On Windows amd64, 64 << 64 => 1 +// +#define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1) + +#define DRF_READ_1WORD_BS(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS(NV##d##r##f,(v))))) + +#define DRF_READ_1WORD_BS_HIGH(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS_HIGH(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS_HIGH(NV##d##r##f,(v))))) + +#define LOWESTBIT(x) ( (x) & (((x) - 1U) ^ (x)) ) +// Destructive operation on n32 +#define HIGHESTBIT(n32) \ +{ \ + HIGHESTBITIDX_32(n32); \ + n32 = NVBIT(n32); \ +} +#define ONEBITSET(x) ( ((x) != 0U) && (((x) & ((x) - 1U)) == 0U) ) + +// Destructive operation on n32 +#define NUMSETBITS_32(n32) \ +{ \ + n32 = n32 - ((n32 >> 1) & 0x55555555); \ + n32 = (n32 & 0x33333333) + ((n32 >> 2) & 0x33333333); \ + n32 = (((n32 + (n32 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; \ +} + +/*! + * Calculate number of bits set in a 32-bit unsigned integer. + * Pure typesafe alternative to @ref NUMSETBITS_32. + */ +static NV_FORCEINLINE NvU32 +nvPopCount32(const NvU32 x) +{ + NvU32 temp = x; + temp = temp - ((temp >> 1) & 0x55555555U); + temp = (temp & 0x33333333U) + ((temp >> 2) & 0x33333333U); + temp = (((temp + (temp >> 4)) & 0x0F0F0F0FU) * 0x01010101U) >> 24; + return temp; +} + +/*! + * Calculate number of bits set in a 64-bit unsigned integer. + */ +static NV_FORCEINLINE NvU32 +nvPopCount64(const NvU64 x) +{ + NvU64 temp = x; + temp = temp - ((temp >> 1) & 0x5555555555555555ULL); + temp = (temp & 0x3333333333333333ULL) + ((temp >> 2) & 0x3333333333333333ULL); + temp = (temp + (temp >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + temp = (temp * 0x0101010101010101ULL) >> 56; + return (NvU32)temp; +} + +/*! + * Determine how many bits are set below a bit index within a mask. + * This assigns a dense ordering to the set bits in the mask. + * + * For example the mask 0xCD contains 5 set bits: + * nvMaskPos32(0xCD, 0) == 0 + * nvMaskPos32(0xCD, 2) == 1 + * nvMaskPos32(0xCD, 3) == 2 + * nvMaskPos32(0xCD, 6) == 3 + * nvMaskPos32(0xCD, 7) == 4 + */ +static NV_FORCEINLINE NvU32 +nvMaskPos32(const NvU32 mask, const NvU32 bitIdx) +{ + return nvPopCount32(mask & (NVBIT32(bitIdx) - 1U)); +} + +// Destructive operation on n32 +#define LOWESTBITIDX_32(n32) \ +{ \ + n32 = BIT_IDX_32(LOWESTBIT(n32));\ +} + +// Destructive operation on n32 +#define HIGHESTBITIDX_32(n32) \ +{ \ + NvU32 count = 0; \ + while (n32 >>= 1) \ + { \ + count++; \ + } \ + n32 = count; \ +} + +// Destructive operation on n32 +#define ROUNDUP_POW2(n32) \ +{ \ + n32--; \ + n32 |= n32 >> 1; \ + n32 |= n32 >> 2; \ + n32 |= n32 >> 4; \ + n32 |= n32 >> 8; \ + n32 |= n32 >> 16; \ + n32++; \ +} + +/*! + * Round up a 32-bit unsigned integer to the next power of 2. + * Pure typesafe alternative to @ref ROUNDUP_POW2. + * + * param[in] x must be in range [0, 2^31] to avoid overflow. + */ +static NV_FORCEINLINE NvU32 +nvNextPow2_U32(const NvU32 x) +{ + NvU32 y = x; + y--; + y |= y >> 1; + y |= y >> 2; + y |= y >> 4; + y |= y >> 8; + y |= y >> 16; + y++; + return y; +} + + +static NV_FORCEINLINE NvU32 +nvPrevPow2_U32(const NvU32 x ) +{ + NvU32 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + return y - (y >> 1); +} + +static NV_FORCEINLINE NvU64 +nvPrevPow2_U64(const NvU64 x ) +{ + NvU64 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + y |= (y >> 32); + return y - (y >> 1); +} + +// Destructive operation on n64 +#define ROUNDUP_POW2_U64(n64) \ +{ \ + n64--; \ + n64 |= n64 >> 1; \ + n64 |= n64 >> 2; \ + n64 |= n64 >> 4; \ + n64 |= n64 >> 8; \ + n64 |= n64 >> 16; \ + n64 |= n64 >> 32; \ + n64++; \ +} + +#define NV_SWAP_U8(a,b) \ +{ \ + NvU8 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +#define NV_SWAP_U32(a,b) \ +{ \ + NvU32 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +/*! + * @brief Macros allowing simple iteration over bits set in a given mask. + * + * @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64) + * + * @param[in,out] index lvalue that is used as a bit index in the loop + * (can be declared as any NvU* or NvS* variable) + * @param[in] mask expression, loop will iterate over set bits only + */ +#define FOR_EACH_INDEX_IN_MASK(maskWidth,index,mask) \ +{ \ + NvU##maskWidth lclMsk = (NvU##maskWidth)(mask); \ + for ((index) = 0U; lclMsk != 0U; (index)++, lclMsk >>= 1U)\ + { \ + if (((NvU##maskWidth)NVBIT64(0) & lclMsk) == 0U) \ + { \ + continue; \ + } +#define FOR_EACH_INDEX_IN_MASK_END \ + } \ +} + +// +// Size to use when declaring variable-sized arrays +// +#define NV_ANYSIZE_ARRAY 1 + +// +// Returns ceil(a/b) +// +#define NV_CEIL(a,b) (((a)+(b)-1)/(b)) + +// Clearer name for NV_CEIL +#ifndef NV_DIV_AND_CEIL +#define NV_DIV_AND_CEIL(a, b) NV_CEIL(a,b) +#endif + +#ifndef NV_MIN +#define NV_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef NV_MAX +#define NV_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +// +// Returns absolute value of provided integer expression +// +#define NV_ABS(a) ((a)>=0?(a):(-(a))) + +// +// Returns 1 if input number is positive, 0 if 0 and -1 if negative. Avoid +// macro parameter as function call which will have side effects. +// +#define NV_SIGN(s) ((NvS8)(((s) > 0) - ((s) < 0))) + +// +// Returns 1 if input number is >= 0 or -1 otherwise. This assumes 0 has a +// positive sign. +// +#define NV_ZERO_SIGN(s) ((NvS8)((((s) >= 0) * 2) - 1)) + +// Returns the offset (in bytes) of 'member' in struct 'type'. +#ifndef NV_OFFSETOF + #if defined(__GNUC__) && (__GNUC__ > 3) + #define NV_OFFSETOF(type, member) ((NvU32)__builtin_offsetof(type, member)) + #else + #define NV_OFFSETOF(type, member) ((NvU32)(NvU64)&(((type *)0)->member)) // shouldn't we use PtrToUlong? But will need to include windows header. + #endif +#endif + +// +// Performs a rounded division of b into a (unsigned). For SIGNED version of +// NV_ROUNDED_DIV() macro check the comments in bug 769777. +// +#define NV_UNSIGNED_ROUNDED_DIV(a,b) (((a) + ((b) / 2U)) / (b)) + +/*! + * Performs a ceiling division of b into a (unsigned). A "ceiling" division is + * a division is one with rounds up result up if a % b != 0. + * + * @param[in] a Numerator + * @param[in] b Denominator + * + * @return a / b + a % b != 0 ? 1 : 0. + */ +#define NV_UNSIGNED_DIV_CEIL(a, b) (((a) + (b - 1)) / (b)) + +/*! + * Performs subtraction where a negative difference is raised to zero. + * Can be used to avoid underflowing an unsigned subtraction. + * + * @param[in] a Minuend + * @param[in] b Subtrahend + * + * @return a > b ? a - b : 0. + */ +#define NV_SUBTRACT_NO_UNDERFLOW(a, b) ((a)>(b) ? (a)-(b) : 0) + +/*! + * Performs a rounded right-shift of 32-bit unsigned value "a" by "shift" bits. + * Will round result away from zero. + * + * @param[in] a 32-bit unsigned value to shift. + * @param[in] shift Number of bits by which to shift. + * + * @return Resulting shifted value rounded away from zero. + */ +#define NV_RIGHT_SHIFT_ROUNDED(a, shift) \ + (((a) >> (shift)) + !!((NVBIT((shift) - 1) & (a)) == NVBIT((shift) - 1))) + +// +// Power of 2 alignment. +// (Will give unexpected results if 'gran' is not a power of 2.) +// +#ifndef NV_ALIGN_DOWN +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_DOWN(v, gran) ((v) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_DOWN64 +#define NV_ALIGN_DOWN64(v, gran) ((v) & ~(((NvU64)gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP64 +#define NV_ALIGN_UP64(v, gran) (((v) + ((gran) - 1)) & ~(((NvU64)gran)-1)) +#endif + +#ifndef NV_IS_ALIGNED +#define NV_IS_ALIGNED(v, gran) (0U == ((v) & ((gran) - 1U))) +#endif + +#ifndef NV_IS_ALIGNED64 +#define NV_IS_ALIGNED64(v, gran) (0U == ((v) & (((NvU64)gran) - 1U))) +#endif + +#ifndef NVMISC_MEMSET +static NV_FORCEINLINE void *NVMISC_MEMSET(void *s, NvU8 c, NvLength n) +{ + NvU8 *b = (NvU8 *) s; + NvLength i; + + for (i = 0; i < n; i++) + { + b[i] = c; + } + + return s; +} +#endif + +#ifndef NVMISC_MEMCPY +static NV_FORCEINLINE void *NVMISC_MEMCPY(void *dest, const void *src, NvLength n) +{ + NvU8 *destByte = (NvU8 *) dest; + const NvU8 *srcByte = (const NvU8 *) src; + NvLength i; + + for (i = 0; i < n; i++) + { + destByte[i] = srcByte[i]; + } + + return dest; +} +#endif + +static NV_FORCEINLINE char *NVMISC_STRNCPY(char *dest, const char *src, NvLength n) +{ + NvLength i; + + for (i = 0; i < n; i++) + { + dest[i] = src[i]; + if (src[i] == '\0') + { + break; + } + } + + for (; i < n; i++) + { + dest[i] = '\0'; + } + + return dest; +} + +/*! + * Convert a void* to an NvUPtr. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting NvUPtr + */ +static NV_FORCEINLINE NvUPtr NV_PTR_TO_NVUPTR(void *ptr) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.p = ptr; + return uAddr.v; +} + +/*! + * Convert an NvUPtr to a void*. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting void * + */ +static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.v = address; + return uAddr.p; +} + +#ifdef __cplusplus +} +#endif //__cplusplus + +#endif // __NV_MISC_H + diff --git a/kernel-open/common/inc/nvstatus.h b/kernel-open/common/inc/nvstatus.h new file mode 100644 index 000000000..f90cbf520 --- /dev/null +++ b/kernel-open/common/inc/nvstatus.h @@ -0,0 +1,130 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_NVSTATUS_H +#define SDK_NVSTATUS_H + +/* XAPIGEN - this file is not suitable for (nor needed by) xapigen. */ +/* Rather than #ifdef out every such include in every sdk */ +/* file, punt here. */ +#if !defined(XAPIGEN) /* rest of file */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +typedef NvU32 NV_STATUS; + +#define NV_STATUS_CODE( name, code, string ) name = (code), + +enum +{ + #include "nvstatuscodes.h" +}; + +#undef NV_STATUS_CODE + +/*! + * @def NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL + * @brief Success: No error or special condition + */ +#define NV_STATUS_LEVEL_OK 0 + +/*! + * @def NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL + * @brief Success, but there is an special condition + * + * @details In general, NV_STATUS_LEVEL_WARN status codes are handled the + * same as NV_STATUS_LEVEL_OK, but are usefil to indicate that + * there is a condition that may be specially handled. + * + * Therefore, in most cases, client function should test for + * status <= NV_STATUS_LEVEL_WARN or status > NV_STATUS_LEVEL_WARN + * to determine success v. failure of a call. + */ +#define NV_STATUS_LEVEL_WARN 1 + +/*! + * @def NV_STATUS_LEVEL_ERR + * @see NV_STATUS_LEVEL + * @brief Unrecoverable error condition + */ +#define NV_STATUS_LEVEL_ERR 3 + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Level of the status code + * + * @warning IMPORTANT: When comparing NV_STATUS_LEVEL(_S) against one of + * these constants, it is important to use '<=' or '>' (rather + * than '<' or '>='). + * + * For example. do: + * if (NV_STATUS_LEVEL(status) <= NV_STATUS_LEVEL_WARN) + * rather than: + * if (NV_STATUS_LEVEL(status) < NV_STATUS_LEVEL_ERR) + * + * By being consistent in this manner, it is easier to systematically + * add additional level constants. New levels are likely to lower + * (rather than raise) the severity of _ERR codes. For example, + * if we were to add NV_STATUS_LEVEL_RETRY to indicate hardware + * failures that may be recoverable (e.g. RM_ERR_TIMEOUT_RETRY + * or RM_ERR_BUSY_RETRY), it would be less severe than + * NV_STATUS_LEVEL_ERR the level to which these status codes now + * belong. Using '<=' and '>' ensures your code is not broken in + * cases like this. + */ +#define NV_STATUS_LEVEL(_S) \ + ((_S) == NV_OK? NV_STATUS_LEVEL_OK: \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? NV_STATUS_LEVEL_WARN: \ + NV_STATUS_LEVEL_ERR)) + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Character representing status code level + */ +#define NV_STATUS_LEVEL_CHAR(_S) \ + ((_S) == NV_OK? '0': \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? 'W': \ + 'E')) + +// Function definitions +const char *nvstatusToString(NV_STATUS nvStatusIn); + +#ifdef __cplusplus +} +#endif + +#endif // XAPIGEN + +#endif /* SDK_NVSTATUS_H */ diff --git a/kernel-open/common/inc/nvstatuscodes.h b/kernel-open/common/inc/nvstatuscodes.h new file mode 100644 index 000000000..09256bf70 --- /dev/null +++ b/kernel-open/common/inc/nvstatuscodes.h @@ -0,0 +1,169 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_NVSTATUSCODES_H +#define SDK_NVSTATUSCODES_H + +/* XAPIGEN - this file is not suitable for (nor needed by) xapigen. */ +/* Rather than #ifdef out every such include in every sdk */ +/* file, punt here. */ +#if !defined(XAPIGEN) /* rest of file */ + +NV_STATUS_CODE(NV_OK, 0x00000000, "Success") +NV_STATUS_CODE(NV_ERR_GENERIC, 0x0000FFFF, "Failure: Generic Error") + +NV_STATUS_CODE(NV_ERR_BROKEN_FB, 0x00000001, "Frame-Buffer broken") +NV_STATUS_CODE(NV_ERR_BUFFER_TOO_SMALL, 0x00000002, "Buffer passed in is too small") +NV_STATUS_CODE(NV_ERR_BUSY_RETRY, 0x00000003, "System is busy, retry later") +NV_STATUS_CODE(NV_ERR_CALLBACK_NOT_SCHEDULED, 0x00000004, "The requested callback API not scheduled") +NV_STATUS_CODE(NV_ERR_CARD_NOT_PRESENT, 0x00000005, "Card not detected") +NV_STATUS_CODE(NV_ERR_CYCLE_DETECTED, 0x00000006, "Call cycle detected") +NV_STATUS_CODE(NV_ERR_DMA_IN_USE, 0x00000007, "Requested DMA is in use") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_LOCKED, 0x00000008, "Requested DMA memory is not locked") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_UNLOCKED, 0x00000009, "Requested DMA memory is not unlocked") +NV_STATUS_CODE(NV_ERR_DUAL_LINK_INUSE, 0x0000000A, "Dual-Link is in use") +NV_STATUS_CODE(NV_ERR_ECC_ERROR, 0x0000000B, "Generic ECC error") +NV_STATUS_CODE(NV_ERR_FIFO_BAD_ACCESS, 0x0000000C, "FIFO: Invalid access") +NV_STATUS_CODE(NV_ERR_FREQ_NOT_SUPPORTED, 0x0000000D, "Requested frequency is not supported") +NV_STATUS_CODE(NV_ERR_GPU_DMA_NOT_INITIALIZED, 0x0000000E, "Requested DMA not initialized") +NV_STATUS_CODE(NV_ERR_GPU_IS_LOST, 0x0000000F, "GPU lost from the bus") +NV_STATUS_CODE(NV_ERR_GPU_IN_FULLCHIP_RESET, 0x00000010, "GPU currently in full-chip reset") +NV_STATUS_CODE(NV_ERR_GPU_NOT_FULL_POWER, 0x00000011, "GPU not in full power") +NV_STATUS_CODE(NV_ERR_GPU_UUID_NOT_FOUND, 0x00000012, "GPU UUID not found") +NV_STATUS_CODE(NV_ERR_HOT_SWITCH, 0x00000013, "System in hot switch") +NV_STATUS_CODE(NV_ERR_I2C_ERROR, 0x00000014, "I2C Error") +NV_STATUS_CODE(NV_ERR_I2C_SPEED_TOO_HIGH, 0x00000015, "I2C Error: Speed too high") +NV_STATUS_CODE(NV_ERR_ILLEGAL_ACTION, 0x00000016, "Current action is not allowed") +NV_STATUS_CODE(NV_ERR_IN_USE, 0x00000017, "Generic busy error") +NV_STATUS_CODE(NV_ERR_INFLATE_COMPRESSED_DATA_FAILED, 0x00000018, "Failed to inflate compressed data") +NV_STATUS_CODE(NV_ERR_INSERT_DUPLICATE_NAME, 0x00000019, "Found a duplicate entry in the requested btree") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_RESOURCES, 0x0000001A, "Ran out of a critical resource, other than memory") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_PERMISSIONS, 0x0000001B, "The requester does not have sufficient permissions") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_POWER, 0x0000001C, "Generic Error: Low power") +NV_STATUS_CODE(NV_ERR_INVALID_ACCESS_TYPE, 0x0000001D, "This type of access is not allowed") +NV_STATUS_CODE(NV_ERR_INVALID_ADDRESS, 0x0000001E, "Address not valid") +NV_STATUS_CODE(NV_ERR_INVALID_ARGUMENT, 0x0000001F, "Invalid argument to call") +NV_STATUS_CODE(NV_ERR_INVALID_BASE, 0x00000020, "Invalid base") +NV_STATUS_CODE(NV_ERR_INVALID_CHANNEL, 0x00000021, "Given channel-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLASS, 0x00000022, "Given class-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLIENT, 0x00000023, "Given client not valid") +NV_STATUS_CODE(NV_ERR_INVALID_COMMAND, 0x00000024, "Command passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DATA, 0x00000025, "Invalid data passed") +NV_STATUS_CODE(NV_ERR_INVALID_DEVICE, 0x00000026, "Current device is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DMA_SPECIFIER, 0x00000027, "The requested DMA specifier is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_EVENT, 0x00000028, "Invalid event occurred") +NV_STATUS_CODE(NV_ERR_INVALID_FLAGS, 0x00000029, "Invalid flags passed") +NV_STATUS_CODE(NV_ERR_INVALID_FUNCTION, 0x0000002A, "Called function is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_HEAP, 0x0000002B, "Heap corrupted") +NV_STATUS_CODE(NV_ERR_INVALID_INDEX, 0x0000002C, "Index invalid") +NV_STATUS_CODE(NV_ERR_INVALID_IRQ_LEVEL, 0x0000002D, "Requested IRQ level is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_LIMIT, 0x0000002E, "Generic Error: Invalid limit") +NV_STATUS_CODE(NV_ERR_INVALID_LOCK_STATE, 0x0000002F, "Requested lock state not valid") +NV_STATUS_CODE(NV_ERR_INVALID_METHOD, 0x00000030, "Requested method not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT, 0x00000031, "Object not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_BUFFER, 0x00000032, "Object buffer passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_HANDLE, 0x00000033, "Object handle is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_NEW, 0x00000034, "New object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_OLD, 0x00000035, "Old object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_PARENT, 0x00000036, "Object parent is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OFFSET, 0x00000037, "The offset passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OPERATION, 0x00000038, "Requested operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OWNER, 0x00000039, "Owner not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PARAM_STRUCT, 0x0000003A, "Invalid structure parameter") +NV_STATUS_CODE(NV_ERR_INVALID_PARAMETER, 0x0000003B, "At least one of the parameters passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PATH, 0x0000003C, "The requested path is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_POINTER, 0x0000003D, "Pointer not valid") +NV_STATUS_CODE(NV_ERR_INVALID_REGISTRY_KEY, 0x0000003E, "Found an invalid registry key") +NV_STATUS_CODE(NV_ERR_INVALID_REQUEST, 0x0000003F, "Generic Error: Invalid request") +NV_STATUS_CODE(NV_ERR_INVALID_STATE, 0x00000040, "Generic Error: Invalid state") +NV_STATUS_CODE(NV_ERR_INVALID_STRING_LENGTH, 0x00000041, "The string length is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_READ, 0x00000042, "The requested read operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_WRITE, 0x00000043, "The requested write operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_XLATE, 0x00000044, "The requested translate operation is not valid") +NV_STATUS_CODE(NV_ERR_IRQ_NOT_FIRING, 0x00000045, "Requested IRQ is not firing") +NV_STATUS_CODE(NV_ERR_IRQ_EDGE_TRIGGERED, 0x00000046, "IRQ is edge triggered") +NV_STATUS_CODE(NV_ERR_MEMORY_TRAINING_FAILED, 0x00000047, "Failed memory training sequence") +NV_STATUS_CODE(NV_ERR_MISMATCHED_SLAVE, 0x00000048, "Slave mismatch") +NV_STATUS_CODE(NV_ERR_MISMATCHED_TARGET, 0x00000049, "Target mismatch") +NV_STATUS_CODE(NV_ERR_MISSING_TABLE_ENTRY, 0x0000004A, "Requested entry missing not found in the table") +NV_STATUS_CODE(NV_ERR_MODULE_LOAD_FAILED, 0x0000004B, "Failed to load the requested module") +NV_STATUS_CODE(NV_ERR_MORE_DATA_AVAILABLE, 0x0000004C, "There is more data available") +NV_STATUS_CODE(NV_ERR_MORE_PROCESSING_REQUIRED, 0x0000004D, "More processing required for the given call") +NV_STATUS_CODE(NV_ERR_MULTIPLE_MEMORY_TYPES, 0x0000004E, "Multiple memory types found") +NV_STATUS_CODE(NV_ERR_NO_FREE_FIFOS, 0x0000004F, "No more free FIFOs found") +NV_STATUS_CODE(NV_ERR_NO_INTR_PENDING, 0x00000050, "No interrupt pending") +NV_STATUS_CODE(NV_ERR_NO_MEMORY, 0x00000051, "Out of memory") +NV_STATUS_CODE(NV_ERR_NO_SUCH_DOMAIN, 0x00000052, "Requested domain does not exist") +NV_STATUS_CODE(NV_ERR_NO_VALID_PATH, 0x00000053, "Caller did not specify a valid path") +NV_STATUS_CODE(NV_ERR_NOT_COMPATIBLE, 0x00000054, "Generic Error: Incompatible types") +NV_STATUS_CODE(NV_ERR_NOT_READY, 0x00000055, "Generic Error: Not ready") +NV_STATUS_CODE(NV_ERR_NOT_SUPPORTED, 0x00000056, "Call not supported") +NV_STATUS_CODE(NV_ERR_OBJECT_NOT_FOUND, 0x00000057, "Requested object not found") +NV_STATUS_CODE(NV_ERR_OBJECT_TYPE_MISMATCH, 0x00000058, "Specified objects do not match") +NV_STATUS_CODE(NV_ERR_OPERATING_SYSTEM, 0x00000059, "Generic operating system error") +NV_STATUS_CODE(NV_ERR_OTHER_DEVICE_FOUND, 0x0000005A, "Found other device instead of the requested one") +NV_STATUS_CODE(NV_ERR_OUT_OF_RANGE, 0x0000005B, "The specified value is out of bounds") +NV_STATUS_CODE(NV_ERR_OVERLAPPING_UVM_COMMIT, 0x0000005C, "Overlapping unified virtual memory commit") +NV_STATUS_CODE(NV_ERR_PAGE_TABLE_NOT_AVAIL, 0x0000005D, "Requested page table not available") +NV_STATUS_CODE(NV_ERR_PID_NOT_FOUND, 0x0000005E, "Process-Id not found") +NV_STATUS_CODE(NV_ERR_PROTECTION_FAULT, 0x0000005F, "Protection fault") +NV_STATUS_CODE(NV_ERR_RC_ERROR, 0x00000060, "Generic RC error") +NV_STATUS_CODE(NV_ERR_REJECTED_VBIOS, 0x00000061, "Given Video BIOS rejected/invalid") +NV_STATUS_CODE(NV_ERR_RESET_REQUIRED, 0x00000062, "Reset required") +NV_STATUS_CODE(NV_ERR_STATE_IN_USE, 0x00000063, "State in use") +NV_STATUS_CODE(NV_ERR_SIGNAL_PENDING, 0x00000064, "Signal pending") +NV_STATUS_CODE(NV_ERR_TIMEOUT, 0x00000065, "Call timed out") +NV_STATUS_CODE(NV_ERR_TIMEOUT_RETRY, 0x00000066, "Call timed out, please retry later") +NV_STATUS_CODE(NV_ERR_TOO_MANY_PRIMARIES, 0x00000067, "Too many primaries") +NV_STATUS_CODE(NV_ERR_UVM_ADDRESS_IN_USE, 0x00000068, "Unified virtual memory requested address already in use") +NV_STATUS_CODE(NV_ERR_MAX_SESSION_LIMIT_REACHED, 0x00000069, "Maximum number of sessions reached") +NV_STATUS_CODE(NV_ERR_LIB_RM_VERSION_MISMATCH, 0x0000006A, "Library version doesn't match driver version") //Contained within the RMAPI library +NV_STATUS_CODE(NV_ERR_PRIV_SEC_VIOLATION, 0x0000006B, "Priv security violation") +NV_STATUS_CODE(NV_ERR_GPU_IN_DEBUG_MODE, 0x0000006C, "GPU currently in debug mode") +NV_STATUS_CODE(NV_ERR_FEATURE_NOT_ENABLED, 0x0000006D, "Requested Feature functionality is not enabled") +NV_STATUS_CODE(NV_ERR_RESOURCE_LOST, 0x0000006E, "Requested resource has been destroyed") +NV_STATUS_CODE(NV_ERR_PMU_NOT_READY, 0x0000006F, "PMU is not ready or has not yet been initialized") +NV_STATUS_CODE(NV_ERR_FLCN_ERROR, 0x00000070, "Generic falcon assert or halt") +NV_STATUS_CODE(NV_ERR_FATAL_ERROR, 0x00000071, "Fatal/unrecoverable error") +NV_STATUS_CODE(NV_ERR_MEMORY_ERROR, 0x00000072, "Generic memory error") +NV_STATUS_CODE(NV_ERR_INVALID_LICENSE, 0x00000073, "License provided is rejected or invalid") +NV_STATUS_CODE(NV_ERR_NVLINK_INIT_ERROR, 0x00000074, "Nvlink Init Error") +NV_STATUS_CODE(NV_ERR_NVLINK_MINION_ERROR, 0x00000075, "Nvlink Minion Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CLOCK_ERROR, 0x00000076, "Nvlink Clock Error") +NV_STATUS_CODE(NV_ERR_NVLINK_TRAINING_ERROR, 0x00000077, "Nvlink Training Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Configuration Error") +NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt") + +// Warnings: +NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch") +NV_STATUS_CODE(NV_WARN_INCORRECT_PERFMON_DATA, 0x00010002, "WARNING Incorrect performance monitor data") +NV_STATUS_CODE(NV_WARN_MISMATCHED_SLAVE, 0x00010003, "WARNING Slave mismatch") +NV_STATUS_CODE(NV_WARN_MISMATCHED_TARGET, 0x00010004, "WARNING Target mismatch") +NV_STATUS_CODE(NV_WARN_MORE_PROCESSING_REQUIRED, 0x00010005, "WARNING More processing required for the call") +NV_STATUS_CODE(NV_WARN_NOTHING_TO_DO, 0x00010006, "WARNING Nothing to do") +NV_STATUS_CODE(NV_WARN_NULL_OBJECT, 0x00010007, "WARNING NULL object found") +NV_STATUS_CODE(NV_WARN_OUT_OF_RANGE, 0x00010008, "WARNING value out of range") + +#endif // XAPIGEN + +#endif /* SDK_NVSTATUSCODES_H */ diff --git a/kernel-open/common/inc/nvtypes.h b/kernel-open/common/inc/nvtypes.h new file mode 100644 index 000000000..c349199cf --- /dev/null +++ b/kernel-open/common/inc/nvtypes.h @@ -0,0 +1,662 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVTYPES_INCLUDED +#define NVTYPES_INCLUDED + +#ifdef __cplusplus +extern "C" { +#endif + +#include "cpuopsys.h" + +#ifndef NVTYPES_USE_STDINT +#define NVTYPES_USE_STDINT 0 +#endif + +#if NVTYPES_USE_STDINT +#ifdef __cplusplus +#include +#include +#else +#include +#include +#endif // __cplusplus +#endif // NVTYPES_USE_STDINT + +#ifndef __cplusplus +// Header includes to make sure wchar_t is defined for C-file compilation +// (C++ is not affected as it is a fundamental type there) +// _MSC_VER is a hack to avoid failures for old setup of UEFI builds which are +// currently set to msvc100 but do not properly set the include paths +#if defined(NV_WINDOWS) && (!defined(_MSC_VER) || (_MSC_VER > 1600)) +#include +#define NV_HAS_WCHAR_T_TYPEDEF 1 +#endif +#endif // __cplusplus + +#if defined(MAKE_NV64TYPES_8BYTES_ALIGNED) && defined(__i386__) +// ensure or force 8-bytes alignment of NV 64-bit types +#define OPTIONAL_ALIGN8_ATTR __attribute__((aligned(8))) +#else +// nothing needed +#define OPTIONAL_ALIGN8_ATTR +#endif // MAKE_NV64TYPES_8BYTES_ALIGNED && i386 + + /***************************************************************************\ +|* Typedefs *| + \***************************************************************************/ + +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +//Typedefs for MISRA COMPLIANCE +typedef unsigned long long UInt64; +typedef signed long long Int64; +typedef unsigned int UInt32; +typedef signed int Int32; +typedef unsigned short UInt16; +typedef signed short Int16; +typedef unsigned char UInt8 ; +typedef signed char Int8 ; + +typedef void Void; +typedef float float32_t; +typedef double float64_t; +#endif + + +// Floating point types +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef float32_t NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef float64_t NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#else +typedef float NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef double NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#endif + + +// 8-bit: 'char' is the only 8-bit in the C89 standard and after. +#if NVTYPES_USE_STDINT +typedef uint8_t NvV8; /* "void": enumerated or multiple fields */ +typedef uint8_t NvU8; /* 0 to 255 */ +typedef int8_t NvS8; /* -128 to 127 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt8 NvV8; /* "void": enumerated or multiple fields */ +typedef UInt8 NvU8; /* 0 to 255 */ +typedef Int8 NvS8; /* -128 to 127 */ +#else +typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ +typedef unsigned char NvU8; /* 0 to 255 */ +typedef signed char NvS8; /* -128 to 127 */ +#endif +#endif // NVTYPES_USE_STDINT + + +#if NVTYPES_USE_STDINT +typedef uint16_t NvV16; /* "void": enumerated or multiple fields */ +typedef uint16_t NvU16; /* 0 to 65535 */ +typedef int16_t NvS16; /* -32768 to 32767 */ +#else +// 16-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT16_TYPE__ +typedef unsigned __INT16_TYPE__ NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned __INT16_TYPE__ NvU16; /* 0 to 65535 */ +typedef signed __INT16_TYPE__ NvS16; /* -32768 to 32767 */ + +// The minimal standard for C89 and after +#else // __INT16_TYPE__ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt16 NvV16; /* "void": enumerated or multiple fields */ +typedef UInt16 NvU16; /* 0 to 65535 */ +typedef Int16 NvS16; /* -32768 to 32767 */ +#else +typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned short NvU16; /* 0 to 65535 */ +typedef signed short NvS16; /* -32768 to 32767 */ +#endif +#endif // __INT16_TYPE__ +#endif // NVTYPES_USE_STDINT + +// wchar type (fixed size types consistent across Linux/Windows boundaries) +#if defined(NV_HAS_WCHAR_T_TYPEDEF) + typedef wchar_t NvWchar; +#else + typedef NvV16 NvWchar; +#endif + +// Macro to build an NvU32 from four bytes, listed from msb to lsb +#define NvU32_BUILD(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d)) + +#if NVTYPES_USE_STDINT +typedef uint32_t NvV32; /* "void": enumerated or multiple fields */ +typedef uint32_t NvU32; /* 0 to 4294967295 */ +typedef int32_t NvS32; /* -2147483648 to 2147483647 */ +#else +// 32-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT32_TYPE__ +typedef unsigned __INT32_TYPE__ NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned __INT32_TYPE__ NvU32; /* 0 to 4294967295 */ +typedef signed __INT32_TYPE__ NvS32; /* -2147483648 to 2147483647 */ + +// Older compilers +#else // __INT32_TYPE__ + +// For historical reasons, NvU32/NvV32 are defined to different base intrinsic +// types than NvS32 on some platforms. +// Mainly for 64-bit linux, where long is 64 bits and win9x, where int is 16 bit. +#if (defined(NV_UNIX) || defined(vxworks) || defined(NV_WINDOWS_CE) || \ + defined(__arm) || defined(__IAR_SYSTEMS_ICC__) || defined(NV_QNX) || \ + defined(NV_INTEGRITY) || defined(NV_MODS) || \ + defined(__GNUC__) || defined(__clang__) || defined(NV_MACINTOSH_64)) && \ + (!defined(NV_MACINTOSH) || defined(NV_MACINTOSH_64)) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt32 NvV32; /* "void": enumerated or multiple fields */ +typedef UInt32 NvU32; /* 0 to 4294967295 */ +#else +typedef unsigned int NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned int NvU32; /* 0 to 4294967295 */ +#endif + +// The minimal standard for C89 and after +#else // (defined(NV_UNIX) || defined(vxworks) || ... +typedef unsigned long NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned long NvU32; /* 0 to 4294967295 */ +#endif // (defined(NV_UNIX) || defined(vxworks) || ... + +// Mac OS 32-bit still needs this +#if defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +typedef signed long NvS32; /* -2147483648 to 2147483647 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef Int32 NvS32; /* -2147483648 to 2147483647 */ +#else +typedef signed int NvS32; /* -2147483648 to 2147483647 */ +#endif +#endif // defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +#endif // __INT32_TYPE__ +#endif // NVTYPES_USE_STDINT + + + +#if NVTYPES_USE_STDINT +typedef uint64_t NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef int64_t NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX PRIX64 +#define NvU64_fmtx PRIx64 +#define NvU64_fmtu PRIu64 +#define NvU64_fmto PRIo64 +#define NvS64_fmtd PRId64 +#define NvS64_fmti PRIi64 +#else +// 64-bit types for compilers that support them, plus some obsolete variants +#if defined(__GNUC__) || defined(__clang__) || defined(__arm) || \ + defined(__IAR_SYSTEMS_ICC__) || defined(__ghs__) || defined(_WIN64) || \ + defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined (__xlC__) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef Int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#else +typedef unsigned long long NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef long long NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#endif + +#define NvU64_fmtX "llX" +#define NvU64_fmtx "llx" +#define NvU64_fmtu "llu" +#define NvU64_fmto "llo" +#define NvS64_fmtd "lld" +#define NvS64_fmti "lli" + +// Microsoft since 2003 -- https://msdn.microsoft.com/en-us/library/29dh1w7z.aspx +#else +typedef unsigned __int64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef __int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX "I64X" +#define NvU64_fmtx "I64x" +#define NvU64_fmtu "I64u" +#define NvU64_fmto "I64o" +#define NvS64_fmtd "I64d" +#define NvS64_fmti "I64i" + +#endif +#endif // NVTYPES_USE_STDINT + +#ifdef NV_TYPESAFE_HANDLES +/* + * Can't use opaque pointer as clients might be compiled with mismatched + * pointer sizes. TYPESAFE check will eventually be removed once all clients + * have transistioned safely to NvHandle. + * The plan is to then eventually scale up the handle to be 64-bits. + */ +typedef struct +{ + NvU32 val; +} NvHandle; +#else +/* + * For compatibility with modules that haven't moved typesafe handles. + */ +typedef NvU32 NvHandle; +#endif // NV_TYPESAFE_HANDLES + +/* Boolean type */ +typedef NvU8 NvBool; +#define NV_TRUE ((NvBool)(0 == 0)) +#define NV_FALSE ((NvBool)(0 != 0)) + +/* Tristate type: NV_TRISTATE_FALSE, NV_TRISTATE_TRUE, NV_TRISTATE_INDETERMINATE */ +typedef NvU8 NvTristate; +#define NV_TRISTATE_FALSE ((NvTristate) 0) +#define NV_TRISTATE_TRUE ((NvTristate) 1) +#define NV_TRISTATE_INDETERMINATE ((NvTristate) 2) + +/* Macros to extract the low and high parts of a 64-bit unsigned integer */ +/* Also designed to work if someone happens to pass in a 32-bit integer */ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffffU)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffffU)) +#else +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffff)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffff)) +#endif +#define NvU40_HI32(n) ((NvU32)((((NvU64)(n)) >> 8) & 0xffffffffU)) +#define NvU40_HI24of32(n) ((NvU32)( (NvU64)(n) & 0xffffff00U)) + +/* Macros to get the MSB and LSB of a 32 bit unsigned number */ +#define NvU32_HI16(n) ((NvU16)((((NvU32)(n)) >> 16) & 0xffffU)) +#define NvU32_LO16(n) ((NvU16)(( (NvU32)(n)) & 0xffffU)) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +#if defined(NV_64_BITS) + +typedef void* NvP64; /* 64 bit void pointer */ +typedef NvU64 NvUPtr; /* pointer sized unsigned int */ +typedef NvS64 NvSPtr; /* pointer sized signed int */ +typedef NvU64 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) (n) +#define NvP64_fmt "%p" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(v)) +#define NvP64_PLUS_OFFSET(p,o) (NvP64)((NvU64)(p) + (NvU64)(o)) + +#define NvUPtr_fmtX NvU64_fmtX +#define NvUPtr_fmtx NvU64_fmtx +#define NvUPtr_fmtu NvU64_fmtu +#define NvUPtr_fmto NvU64_fmto +#define NvSPtr_fmtd NvS64_fmtd +#define NvSPtr_fmti NvS64_fmti + +#else + +typedef NvU64 NvP64; /* 64 bit void pointer */ +typedef NvU32 NvUPtr; /* pointer sized unsigned int */ +typedef NvS32 NvSPtr; /* pointer sized signed int */ +typedef NvU32 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) ((void *)(NvUPtr)(n)) +#define NvP64_fmt "0x%llx" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(NvUPtr)(v)) +#define NvP64_PLUS_OFFSET(p,o) ((p) + (NvU64)(o)) + +#define NvUPtr_fmtX "X" +#define NvUPtr_fmtx "x" +#define NvUPtr_fmtu "u" +#define NvUPtr_fmto "o" +#define NvSPtr_fmtd "d" +#define NvSPtr_fmti "i" + +#endif + +#define NvP64_NULL (NvP64)0 + +/*! + * Helper macro to pack an @ref NvU64_ALIGN32 structure from a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64_ALIGN32 structure to pack + * @param[in] pSrc Pointer to NvU64 with which to pack + */ +#define NvU64_ALIGN32_PACK(pDst, pSrc) \ +do { \ + (pDst)->lo = NvU64_LO32(*(pSrc)); \ + (pDst)->hi = NvU64_HI32(*(pSrc)); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure into a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64 in which to unpack + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure from which to unpack + */ +#define NvU64_ALIGN32_UNPACK(pDst, pSrc) \ +do { \ + (*(pDst)) = NvU64_ALIGN32_VAL(pSrc); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure as a @ref NvU64. + * + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure to unpack + */ +#define NvU64_ALIGN32_VAL(pSrc) \ + ((NvU64) ((NvU64)((pSrc)->lo) | (((NvU64)(pSrc)->hi) << 32U))) + +/*! + * Helper macro to check whether the 32 bit aligned 64 bit number is zero. + * + * @param[in] _pU64 Pointer to NvU64_ALIGN32 structure. + * + * @return + * NV_TRUE _pU64 is zero. + * NV_FALSE otherwise. + */ +#define NvU64_ALIGN32_IS_ZERO(_pU64) \ + (((_pU64)->lo == 0U) && ((_pU64)->hi == 0U)) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_ADD(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 + __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_SUB(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 - __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Structure for representing 32 bit aligned NvU64 (64-bit unsigned integer) + * structures. This structure must be used because the 32 bit processor and + * 64 bit processor compilers will pack/align NvU64 differently. + * + * One use case is RM being 64 bit proc whereas PMU being 32 bit proc, this + * alignment difference will result in corrupted transactions between the RM + * and PMU. + * + * See the @ref NvU64_ALIGN32_PACK and @ref NvU64_ALIGN32_UNPACK macros for + * packing and unpacking these structures. + * + * @note The intention of this structure is to provide a datatype which will + * packed/aligned consistently and efficiently across all platforms. + * We don't want to use "NV_DECLARE_ALIGNED(NvU64, 8)" because that + * leads to memory waste on our 32-bit uprocessors (e.g. FALCONs) where + * DMEM efficiency is vital. + */ +typedef struct +{ + /*! + * Low 32 bits. + */ + NvU32 lo; + /*! + * High 32 bits. + */ + NvU32 hi; +} NvU64_ALIGN32; + +/* Useful macro to hide required double cast */ +#define NV_PTR_TO_NvP64(n) (NvP64)(NvUPtr)(n) +#define NV_SIGN_EXT_PTR_TO_NvP64(p) ((NvP64)(NvS64)(NvSPtr)(p)) +#define KERNEL_POINTER_TO_NvP64(p) ((NvP64)(uintptr_t)(p)) + + /***************************************************************************\ +|* *| +|* Limits for common types. *| +|* *| + \***************************************************************************/ + +/* Explanation of the current form of these limits: + * + * - Decimal is used, as hex values are by default positive. + * - Casts are not used, as usage in the preprocessor itself (#if) ends poorly. + * - The subtraction of 1 for some MIN values is used to get around the fact + * that the C syntax actually treats -x as NEGATE(x) instead of a distinct + * number. Since 214748648 isn't a valid positive 32-bit signed value, we + * take the largest valid positive signed number, negate it, and subtract 1. + */ +#define NV_S8_MIN (-128) +#define NV_S8_MAX (+127) +#define NV_U8_MIN (0U) +#define NV_U8_MAX (+255U) +#define NV_S16_MIN (-32768) +#define NV_S16_MAX (+32767) +#define NV_U16_MIN (0U) +#define NV_U16_MAX (+65535U) +#define NV_S32_MIN (-2147483647 - 1) +#define NV_S32_MAX (+2147483647) +#define NV_U32_MIN (0U) +#define NV_U32_MAX (+4294967295U) +#define NV_S64_MIN (-9223372036854775807LL - 1LL) +#define NV_S64_MAX (+9223372036854775807LL) +#define NV_U64_MIN (0ULL) +#define NV_U64_MAX (+18446744073709551615ULL) + +/* Aligns fields in structs so they match up between 32 and 64 bit builds */ +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_ALIGN_BYTES(size) __attribute__ ((aligned (size))) +#elif defined(__arm) +#define NV_ALIGN_BYTES(size) __align(ALIGN) +#else +// XXX This is dangerously nonportable! We really shouldn't provide a default +// version of this that doesn't do anything. +#define NV_ALIGN_BYTES(size) +#endif + +// NV_DECLARE_ALIGNED() can be used on all platforms. +// This macro form accounts for the fact that __declspec on Windows is required +// before the variable type, +// and NV_ALIGN_BYTES is required after the variable name. +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) TYPE_VAR __attribute__ ((aligned (ALIGN))) +#elif defined(_MSC_VER) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __declspec(align(ALIGN)) TYPE_VAR +#elif defined(__arm) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __align(ALIGN) TYPE_VAR +#endif + + /***************************************************************************\ +|* Function Declaration Types *| + \***************************************************************************/ + +// stretching the meaning of "nvtypes", but this seems to least offensive +// place to re-locate these from nvos.h which cannot be included by a number +// of builds that need them + +#if defined(_MSC_VER) + + #if _MSC_VER >= 1310 + #define NV_NOINLINE __declspec(noinline) + #else + #define NV_NOINLINE + #endif + + #define NV_INLINE __inline + + #if _MSC_VER >= 1200 + #define NV_FORCEINLINE __forceinline + #else + #define NV_FORCEINLINE __inline + #endif + + #define NV_APIENTRY __stdcall + #define NV_FASTCALL __fastcall + #define NV_CDECLCALL __cdecl + #define NV_STDCALL __stdcall + + #define NV_FORCERESULTCHECK + + #define NV_ATTRIBUTE_UNUSED + + #define NV_FORMAT_PRINTF(_f, _a) + +#else // ! defined(_MSC_VER) + + #if defined(__GNUC__) + #if (__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) && (__GNUC_PATCHLEVEL__ >= 1)) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__clang__) + #if __has_attribute(noinline) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 300000) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) ||\ + (defined(__SUNPRO_CC) && (__SUNPRO_CC >= 0x590)) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif defined (__INTEL_COMPILER) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + + #if !defined(NV_NOINLINE) + #define NV_NOINLINE + #endif + + /* GreenHills compiler defines __GNUC__, but doesn't support + * __inline__ keyword. */ + #if defined(__ghs__) + #define NV_INLINE inline + #elif defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_INLINE __inline__ + #elif defined (macintosh) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) + #define NV_INLINE inline + #elif defined(__arm) + #define NV_INLINE __inline + #else + #define NV_INLINE + #endif + + /* Don't force inline on DEBUG builds -- it's annoying for debuggers. */ + #if !defined(DEBUG) + /* GreenHills compiler defines __GNUC__, but doesn't support + * __attribute__ or __inline__ keyword. */ + #if defined(__ghs__) + #define NV_FORCEINLINE inline + #elif defined(__GNUC__) + // GCC 3.1 and beyond support the always_inline function attribute. + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__clang__) + #if __has_attribute(always_inline) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 220000) + // RVDS 2.2 also supports forceinline, but ADS 1.2 does not + #define NV_FORCEINLINE __forceinline + #else /* defined(__GNUC__) */ + #define NV_FORCEINLINE NV_INLINE + #endif + #else + #define NV_FORCEINLINE NV_INLINE + #endif + + #define NV_APIENTRY + #define NV_FASTCALL + #define NV_CDECLCALL + #define NV_STDCALL + + /* + * The 'warn_unused_result' function attribute prompts GCC to issue a + * warning if the result of a function tagged with this attribute + * is ignored by a caller. In combination with '-Werror', it can be + * used to enforce result checking in RM code; at this point, this + * is only done on UNIX. + */ + #if defined(__GNUC__) && defined(NV_UNIX) + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #elif defined(__clang__) + #if __has_attribute(warn_unused_result) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #else /* defined(__GNUC__) */ + #define NV_FORCERESULTCHECK + #endif + + #if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_ATTRIBUTE_UNUSED __attribute__((__unused__)) + #else + #define NV_ATTRIBUTE_UNUSED + #endif + + /* + * Functions decorated with NV_FORMAT_PRINTF(f, a) have a format string at + * parameter number 'f' and variadic arguments start at parameter number 'a'. + * (Note that for C++ methods, there is an implicit 'this' parameter so + * explicit parameters are numbered from 2.) + */ + #if defined(__GNUC__) + #define NV_FORMAT_PRINTF(_f, _a) __attribute__((format(printf, _f, _a))) + #else + #define NV_FORMAT_PRINTF(_f, _a) + #endif + +#endif // defined(_MSC_VER) + +#ifdef __cplusplus +} +#endif + +#endif /* NVTYPES_INCLUDED */ diff --git a/kernel-open/common/inc/os-interface.h b/kernel-open/common/inc/os-interface.h new file mode 100644 index 000000000..7163bea0e --- /dev/null +++ b/kernel-open/common/inc/os-interface.h @@ -0,0 +1,255 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + * Os interface definitions needed by os-interface.c + */ + +#ifndef OS_INTERFACE_H +#define OS_INTERFACE_H + +/******************* Operating System Interface Routines *******************\ +* * +* Operating system wrapper functions used to abstract the OS. * +* * +\***************************************************************************/ + +#include +#include +#include "nv_stdarg.h" +#include +#include +#include + + + +typedef struct +{ + NvU32 os_major_version; + NvU32 os_minor_version; + NvU32 os_build_number; + const char * os_build_version_str; + const char * os_build_date_plus_str; +}os_version_info; + +/* Each OS defines its own version of this opaque type */ +struct os_work_queue; + +/* Each OS defines its own version of this opaque type */ +typedef struct os_wait_queue os_wait_queue; + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for OS interface. + * + * --------------------------------------------------------------------------- + */ + +NvU64 NV_API_CALL os_get_num_phys_pages (void); +NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64); +void NV_API_CALL os_free_mem (void *); +NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *); +NvU64 NV_API_CALL os_get_current_tick (void); +NvU64 NV_API_CALL os_get_current_tick_hr (void); +NvU64 NV_API_CALL os_get_tick_resolution (void); +NV_STATUS NV_API_CALL os_delay (NvU32); +NV_STATUS NV_API_CALL os_delay_us (NvU32); +NvU64 NV_API_CALL os_get_cpu_frequency (void); +NvU32 NV_API_CALL os_get_current_process (void); +void NV_API_CALL os_get_current_process_name (char *, NvU32); +NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *); +char* NV_API_CALL os_string_copy (char *, const char *); +NvU32 NV_API_CALL os_string_length (const char *); +NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32); +NvS32 NV_API_CALL os_string_compare (const char *, const char *); +NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...); +NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list); +void NV_API_CALL os_log_error (const char *, va_list); +void* NV_API_CALL os_mem_copy (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32); +void* NV_API_CALL os_mem_set (void *, NvU8, NvU32); +NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32); +void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *); +NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8); +NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16); +NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32); +NvBool NV_API_CALL os_pci_remove_supported (void); +void NV_API_CALL os_pci_remove (void *); +void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32); +void NV_API_CALL os_unmap_kernel_space (void *, NvU64); +void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **); +void NV_API_CALL os_unmap_user_space (void *, NvU64, void *); +NV_STATUS NV_API_CALL os_flush_cpu_cache (void); +NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void); +NV_STATUS NV_API_CALL os_flush_user_cache (void); +void NV_API_CALL os_flush_cpu_write_combine_buffer(void); +NvU8 NV_API_CALL os_io_read_byte (NvU32); +NvU16 NV_API_CALL os_io_read_word (NvU32); +NvU32 NV_API_CALL os_io_read_dword (NvU32); +void NV_API_CALL os_io_write_byte (NvU32, NvU8); +void NV_API_CALL os_io_write_word (NvU32, NvU16); +void NV_API_CALL os_io_write_dword (NvU32, NvU32); +NvBool NV_API_CALL os_is_administrator (void); +NvBool NV_API_CALL os_allow_priority_override (void); +void NV_API_CALL os_dbg_init (void); +void NV_API_CALL os_dbg_breakpoint (void); +void NV_API_CALL os_dbg_set_level (NvU32); +NvU32 NV_API_CALL os_get_cpu_count (void); +NvU32 NV_API_CALL os_get_cpu_number (void); +void NV_API_CALL os_disable_console_access (void); +void NV_API_CALL os_enable_console_access (void); +NV_STATUS NV_API_CALL os_registry_init (void); +NV_STATUS NV_API_CALL os_schedule (void); +NV_STATUS NV_API_CALL os_alloc_spinlock (void **); +void NV_API_CALL os_free_spinlock (void *); +NvU64 NV_API_CALL os_acquire_spinlock (void *); +void NV_API_CALL os_release_spinlock (void *, NvU64); +NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *); +NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *); +NV_STATUS NV_API_CALL os_alloc_mutex (void **); +void NV_API_CALL os_free_mutex (void *); +NV_STATUS NV_API_CALL os_acquire_mutex (void *); +NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *); +void NV_API_CALL os_release_mutex (void *); +void* NV_API_CALL os_alloc_semaphore (NvU32); +void NV_API_CALL os_free_semaphore (void *); +NV_STATUS NV_API_CALL os_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_release_semaphore (void *); +NvBool NV_API_CALL os_semaphore_may_sleep (void); +NV_STATUS NV_API_CALL os_get_version_info (os_version_info*); +NvBool NV_API_CALL os_is_isr (void); +NvBool NV_API_CALL os_pat_supported (void); +void NV_API_CALL os_dump_stack (void); +NvBool NV_API_CALL os_is_efi_enabled (void); +NvBool NV_API_CALL os_is_xen_dom0 (void); +NvBool NV_API_CALL os_is_vgx_hyper (void); +NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32); +NvBool NV_API_CALL os_is_grid_supported (void); +NvU32 NV_API_CALL os_get_grid_csp_support (void); +void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64); +void NV_API_CALL os_bug_check (NvU32, const char *); +NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32); +NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**); +NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *); +NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *); +NV_STATUS NV_API_CALL os_get_euid (NvU32 *); +NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr); +NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *); +void NV_API_CALL os_add_record_for_crashLog (void *, NvU32); +void NV_API_CALL os_delete_record_for_crashLog (void *); +NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32); +NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *); +NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *); +NV_STATUS NV_API_CALL os_get_page (NvU64 address); +NV_STATUS NV_API_CALL os_put_page (NvU64 address); +NvU32 NV_API_CALL os_get_page_refcount (NvU64 address); +NvU32 NV_API_CALL os_count_tail_pages (NvU64 address); +void NV_API_CALL os_free_pages_phys (NvU64, NvU32); +NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *); +NV_STATUS NV_API_CALL os_open_temporary_file (void **); +void NV_API_CALL os_close_file (void *); +NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **); +NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64); +NvBool NV_API_CALL os_is_nvswitch_present (void); +void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16); +NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **); +void NV_API_CALL os_free_wait_queue (os_wait_queue *); +void NV_API_CALL os_wait_uninterruptible (os_wait_queue *); +void NV_API_CALL os_wait_interruptible (os_wait_queue *); +void NV_API_CALL os_wake_up (os_wait_queue *); +nv_cap_t* NV_API_CALL os_nv_cap_init (const char *); +nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int); +nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int); +void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *); +int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int); +void NV_API_CALL os_nv_cap_close_fd (int); + + + + + + + + + + + + + + + +extern NvU32 os_page_size; +extern NvU64 os_page_mask; +extern NvU8 os_page_shift; +extern NvU32 os_sev_status; +extern NvBool os_sev_enabled; +extern NvBool os_dma_buf_enabled; + +/* + * --------------------------------------------------------------------------- + * + * Debug macros. + * + * --------------------------------------------------------------------------- + */ + +#define NV_DBG_INFO 0x0 +#define NV_DBG_SETUP 0x1 +#define NV_DBG_USERERRORS 0x2 +#define NV_DBG_WARNINGS 0x3 +#define NV_DBG_ERRORS 0x4 + + +void NV_API_CALL out_string(const char *str); +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...); + +#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__) + +#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status) + +/* + * Fields for os_lock_user_pages flags parameter + */ +#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001 + + + + + + + + +#endif /* OS_INTERFACE_H */ diff --git a/kernel-open/common/inc/os/nv_memory_type.h b/kernel-open/common/inc/os/nv_memory_type.h new file mode 100644 index 000000000..34255c758 --- /dev/null +++ b/kernel-open/common/inc/os/nv_memory_type.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_MEMORY_TYPE_H +#define NV_MEMORY_TYPE_H + +#define NV_MEMORY_NONCONTIGUOUS 0 +#define NV_MEMORY_CONTIGUOUS 1 + +#define NV_MEMORY_CACHED 0 +#define NV_MEMORY_UNCACHED 1 +#define NV_MEMORY_WRITECOMBINED 2 +#define NV_MEMORY_WRITEBACK 5 +#define NV_MEMORY_DEFAULT 6 +#define NV_MEMORY_UNCACHED_WEAK 7 + +#define NV_PROTECT_READABLE 1 +#define NV_PROTECT_WRITEABLE 2 +#define NV_PROTECT_READ_WRITE (NV_PROTECT_READABLE | NV_PROTECT_WRITEABLE) + +#endif /* NV_MEMORY_TYPE_H */ diff --git a/kernel-open/common/inc/rm-gpu-ops.h b/kernel-open/common/inc/rm-gpu-ops.h new file mode 100644 index 000000000..2e97de475 --- /dev/null +++ b/kernel-open/common/inc/rm-gpu-ops.h @@ -0,0 +1,110 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _RM_GPU_OPS_H_ +#define _RM_GPU_OPS_H_ + + + +#include +#include +#include "nv_stdarg.h" +#include +#include + +NV_STATUS NV_API_CALL rm_gpu_ops_create_session (nvidia_stack_t *, nvgpuSessionHandle_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (nvidia_stack_t *, nvgpuSessionHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_device_create (nvidia_stack_t *, nvgpuSessionHandle_t, const nvgpuInfo_t *, const NvProcessorUuid *, nvgpuDeviceHandle_t *, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (nvidia_stack_t *, nvgpuDeviceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *, nvgpuAddressSpaceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(nvidia_stack_t *, void *, NvLength, NvU32 , nvgpuPmaAllocationOptions_t, NvU64 *); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(nvidia_stack_t *, nvgpuDeviceHandle_t, void **, const nvgpuPmaStatistics_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(nvidia_stack_t *sp, void *, nvPmaEvictPagesCallback, nvPmaEvictRangeCallback, void *); +void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(nvidia_stack_t *sp, void *); + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_sys(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_get_p2p_caps(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, nvgpuP2PCapsParams_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, NvLength, void **, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_ummap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, void*); +NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, const nvgpuChannelAllocParams_t *, nvgpuChannelHandle_t *, nvgpuChannelInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t *, nvgpuChannelHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_memory_free(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64); +NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuCaps_t); +NV_STATUS NV_API_CALL rm_gpu_ops_query_ces_caps(nvidia_stack_t *sp, nvgpuDeviceHandle_t, nvgpuCesCaps_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_gpu_info(nvidia_stack_t *, const NvProcessorUuid *pUuid, const nvgpuClientInfo_t *, nvgpuInfo_t *); +NV_STATUS NV_API_CALL rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t *, nvgpuDeviceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_dup_allocation(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuAddressSpaceHandle_t, NvU64 *); + +NV_STATUS NV_API_CALL rm_gpu_ops_dup_memory (nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, NvHandle *, nvgpuMemoryInfo_t); + +NV_STATUS NV_API_CALL rm_gpu_ops_free_duped_handle(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle); +NV_STATUS NV_API_CALL rm_gpu_ops_get_fb_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFbInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_ecc_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuEccInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *, nvgpuDeviceHandle_t, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, void *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool *); +NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_own_access_cntr_intr(nvidia_stack_t *, nvgpuSessionHandle_t, nvgpuAccessCntrInfo_t, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, nvgpuAccessCntrConfig_t); +NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, unsigned, NvBool, NvU32); +NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_p2p_object_create(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, NvHandle *); +void NV_API_CALL rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *, nvgpuSessionHandle_t, NvHandle); +NV_STATUS NV_API_CALL rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t*, nvgpuAddressSpaceHandle_t, NvHandle, NvU64, NvU64, nvgpuExternalMappingInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_retain_channel(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvHandle, NvHandle, void **, nvgpuChannelInstanceInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_bind_channel_resources(nvidia_stack_t *, void *, nvgpuChannelResourceBindParams_t); +void NV_API_CALL rm_gpu_ops_release_channel(nvidia_stack_t *, void *); +void NV_API_CALL rm_gpu_ops_stop_channel(nvidia_stack_t *, void *, NvBool); +NV_STATUS NV_API_CALL rm_gpu_ops_get_channel_resource_ptes(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvP64, NvU64, NvU64, nvgpuExternalMappingInfo_t); +NV_STATUS NV_API_CALL rm_gpu_ops_report_non_replayable_fault(nvidia_stack_t *, nvgpuDeviceHandle_t, const void *); + +NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_allocate(nvidia_stack_t *, nvgpuDeviceHandle_t, const nvgpuPagingChannelAllocParams_t *, nvgpuPagingChannelHandle_t *, nvgpuPagingChannelInfo_t); +void NV_API_CALL rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *, nvgpuPagingChannelHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_paging_channels_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t, NvU64 *); +void NV_API_CALL rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t); +NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, nvgpuPagingChannelHandle_t, char *, NvU32); + + + + + + + + + + +#endif diff --git a/kernel-open/conftest.sh b/kernel-open/conftest.sh new file mode 100755 index 000000000..27ecf500b --- /dev/null +++ b/kernel-open/conftest.sh @@ -0,0 +1,5759 @@ +#!/bin/sh + +PATH="${PATH}:/bin:/sbin:/usr/bin" + +# make sure we are in the directory containing this script +SCRIPTDIR=`dirname $0` +cd $SCRIPTDIR + +CC="$1" +ARCH=$2 +ISYSTEM=`$CC -print-file-name=include 2> /dev/null` +SOURCES=$3 +HEADERS=$SOURCES/include +OUTPUT=$4 +XEN_PRESENT=1 +PREEMPT_RT_PRESENT=0 +KERNEL_ARCH="$ARCH" + +if [ "$ARCH" = "i386" -o "$ARCH" = "x86_64" ]; then + if [ -d "$SOURCES/arch/x86" ]; then + KERNEL_ARCH="x86" + fi +fi + +# VGX_BUILD parameter defined only for VGX builds (vGPU Host driver) +# VGX_KVM_BUILD parameter defined only vGPU builds on KVM hypervisor +# GRID_BUILD parameter defined only for GRID builds (GRID Guest driver) +# GRID_BUILD_CSP parameter defined only for GRID CSP builds (GRID Guest driver for CSPs) + +test_xen() { + # + # Determine if the target kernel is a Xen kernel. It used to be + # sufficient to check for CONFIG_XEN, but the introduction of + # modular para-virtualization (CONFIG_PARAVIRT, etc.) and + # Xen guest support, it is no longer possible to determine the + # target environment at build time. Therefore, if both + # CONFIG_XEN and CONFIG_PARAVIRT are present, text_xen() treats + # the kernel as a stand-alone kernel. + # + if ! test_configuration_option CONFIG_XEN || + test_configuration_option CONFIG_PARAVIRT; then + XEN_PRESENT=0 + fi +} + +append_conftest() { + # + # Echo data from stdin: this is a transitional function to make it easier + # to port conftests from drivers with parallel conftest generation to + # older driver versions + # + + while read LINE; do + echo ${LINE} + done +} + +translate_and_preprocess_header_files() { + # Inputs: + # $1: list of relative file paths + # + # This routine creates an upper case, underscore version of each of the + # relative file paths, and uses that as the token to either define or + # undefine in a C header file. For example, linux/fence.h becomes + # NV_LINUX_FENCE_H_PRESENT, and that is either defined or undefined, in the + # output (which goes to stdout, just like the rest of this file). + + # -MG or -MD can interfere with the use of -M and -M -MG for testing file + # existence; filter out any occurrences from CFLAGS. CFLAGS is intentionally + # wrapped with whitespace in the input to sed(1) so the regex can match zero + # or more occurrences of "-MD" or "-MG", surrounded by whitespace to avoid + # accidental matches with tokens that happen to contain either of those + # strings, without special handling of the beginning or the end of the line. + TEST_CFLAGS=`echo "-E -M $CFLAGS " | sed -e 's/\( -M[DG]\)* / /g'` + + for file in $@; do + local file_define=NV_`echo $file | tr '/.' '_' | tr '-' '_' | tr 'a-z' 'A-Z'`_PRESENT + + CODE="#include <$file>" + + if echo "$CODE" | $CC $TEST_CFLAGS - > /dev/null 2>&1; then + echo "#define $file_define" + else + # If preprocessing failed, it could have been because the header + # file under test is not present, or because it is present but + # depends upon the inclusion of other header files. Attempting + # preprocessing again with -MG will ignore a missing header file + # but will still fail if the header file is present. + if echo "$CODE" | $CC $TEST_CFLAGS -MG - > /dev/null 2>&1; then + echo "#undef $file_define" + else + echo "#define $file_define" + fi + fi + done +} + +test_headers() { + # + # Determine which header files (of a set that may or may not be + # present) are provided by the target kernel. + # + FILES="asm/system.h" + FILES="$FILES drm/drmP.h" + FILES="$FILES drm/drm_auth.h" + FILES="$FILES drm/drm_gem.h" + FILES="$FILES drm/drm_crtc.h" + FILES="$FILES drm/drm_atomic.h" + FILES="$FILES drm/drm_atomic_helper.h" + FILES="$FILES drm/drm_encoder.h" + FILES="$FILES drm/drm_atomic_uapi.h" + FILES="$FILES drm/drm_drv.h" + FILES="$FILES drm/drm_framebuffer.h" + FILES="$FILES drm/drm_connector.h" + FILES="$FILES drm/drm_probe_helper.h" + FILES="$FILES drm/drm_blend.h" + FILES="$FILES drm/drm_fourcc.h" + FILES="$FILES drm/drm_prime.h" + FILES="$FILES drm/drm_plane.h" + FILES="$FILES drm/drm_vblank.h" + FILES="$FILES drm/drm_file.h" + FILES="$FILES drm/drm_ioctl.h" + FILES="$FILES drm/drm_device.h" + FILES="$FILES drm/drm_mode_config.h" + FILES="$FILES dt-bindings/interconnect/tegra_icc_id.h" + FILES="$FILES generated/autoconf.h" + FILES="$FILES generated/compile.h" + FILES="$FILES generated/utsrelease.h" + FILES="$FILES linux/efi.h" + FILES="$FILES linux/kconfig.h" + FILES="$FILES linux/platform/tegra/mc_utils.h" + FILES="$FILES linux/semaphore.h" + FILES="$FILES linux/printk.h" + FILES="$FILES linux/ratelimit.h" + FILES="$FILES linux/prio_tree.h" + FILES="$FILES linux/log2.h" + FILES="$FILES linux/of.h" + FILES="$FILES linux/bug.h" + FILES="$FILES linux/sched/signal.h" + FILES="$FILES linux/sched/task.h" + FILES="$FILES linux/sched/task_stack.h" + FILES="$FILES xen/ioemu.h" + FILES="$FILES linux/fence.h" + FILES="$FILES linux/dma-resv.h" + FILES="$FILES soc/tegra/chip-id.h" + FILES="$FILES soc/tegra/fuse.h" + FILES="$FILES soc/tegra/tegra_bpmp.h" + FILES="$FILES video/nv_internal.h" + FILES="$FILES linux/platform/tegra/dce/dce-client-ipc.h" + FILES="$FILES linux/nvhost.h" + FILES="$FILES linux/nvhost_t194.h" + FILES="$FILES asm/book3s/64/hash-64k.h" + FILES="$FILES asm/set_memory.h" + FILES="$FILES asm/prom.h" + FILES="$FILES asm/powernv.h" + FILES="$FILES linux/atomic.h" + FILES="$FILES asm/barrier.h" + FILES="$FILES asm/opal-api.h" + FILES="$FILES sound/hdaudio.h" + FILES="$FILES asm/pgtable_types.h" + FILES="$FILES linux/stringhash.h" + FILES="$FILES linux/dma-map-ops.h" + FILES="$FILES rdma/peer_mem.h" + FILES="$FILES sound/hda_codec.h" + FILES="$FILES linux/dma-buf.h" + FILES="$FILES linux/time.h" + FILES="$FILES linux/platform_device.h" + FILES="$FILES linux/mutex.h" + FILES="$FILES linux/reset.h" + FILES="$FILES linux/of_platform.h" + FILES="$FILES linux/of_device.h" + FILES="$FILES linux/of_gpio.h" + FILES="$FILES linux/gpio.h" + FILES="$FILES linux/gpio/consumer.h" + FILES="$FILES linux/interconnect.h" + FILES="$FILES linux/pm_runtime.h" + FILES="$FILES linux/clk.h" + FILES="$FILES linux/clk-provider.h" + FILES="$FILES linux/ioasid.h" + FILES="$FILES linux/stdarg.h" + FILES="$FILES linux/iosys-map.h" + FILES="$FILES asm/coco.h" + + translate_and_preprocess_header_files $FILES +} + +build_cflags() { + BASE_CFLAGS="-O2 -D__KERNEL__ \ +-DKBUILD_BASENAME=\"#conftest$$\" -DKBUILD_MODNAME=\"#conftest$$\" \ +-nostdinc -isystem $ISYSTEM" + + if [ "$OUTPUT" != "$SOURCES" ]; then + OUTPUT_CFLAGS="-I$OUTPUT/include2 -I$OUTPUT/include" + if [ -f "$OUTPUT/include/generated/autoconf.h" ]; then + AUTOCONF_FILE="$OUTPUT/include/generated/autoconf.h" + else + AUTOCONF_FILE="$OUTPUT/include/linux/autoconf.h" + fi + else + if [ -f "$HEADERS/generated/autoconf.h" ]; then + AUTOCONF_FILE="$HEADERS/generated/autoconf.h" + else + AUTOCONF_FILE="$HEADERS/linux/autoconf.h" + fi + fi + + test_xen + + if [ "$XEN_PRESENT" != "0" ]; then + MACH_CFLAGS="-I$HEADERS/asm/mach-xen" + fi + + SOURCE_HEADERS="$HEADERS" + SOURCE_ARCH_HEADERS="$SOURCES/arch/$KERNEL_ARCH/include" + OUTPUT_HEADERS="$OUTPUT/include" + OUTPUT_ARCH_HEADERS="$OUTPUT/arch/$KERNEL_ARCH/include" + + # Look for mach- directories on this arch, and add it to the list of + # includes if that platform is enabled in the configuration file, which + # may have a definition like this: + # #define CONFIG_ARCH_ 1 + for _mach_dir in `ls -1d $SOURCES/arch/$KERNEL_ARCH/mach-* 2>/dev/null`; do + _mach=`echo $_mach_dir | \ + sed -e "s,$SOURCES/arch/$KERNEL_ARCH/mach-,," | \ + tr 'a-z' 'A-Z'` + grep "CONFIG_ARCH_$_mach \+1" $AUTOCONF_FILE > /dev/null 2>&1 + if [ $? -eq 0 ]; then + MACH_CFLAGS="$MACH_CFLAGS -I$_mach_dir/include" + fi + done + + if [ "$ARCH" = "arm" ]; then + MACH_CFLAGS="$MACH_CFLAGS -D__LINUX_ARM_ARCH__=7" + fi + + # Add the mach-default includes (only found on x86/older kernels) + MACH_CFLAGS="$MACH_CFLAGS -I$SOURCE_HEADERS/asm-$KERNEL_ARCH/mach-default" + MACH_CFLAGS="$MACH_CFLAGS -I$SOURCE_ARCH_HEADERS/asm/mach-default" + + CFLAGS="$BASE_CFLAGS $MACH_CFLAGS $OUTPUT_CFLAGS -include $AUTOCONF_FILE" + CFLAGS="$CFLAGS -I$SOURCE_HEADERS" + CFLAGS="$CFLAGS -I$SOURCE_HEADERS/uapi" + CFLAGS="$CFLAGS -I$SOURCE_HEADERS/xen" + CFLAGS="$CFLAGS -I$OUTPUT_HEADERS/generated/uapi" + CFLAGS="$CFLAGS -I$SOURCE_ARCH_HEADERS" + CFLAGS="$CFLAGS -I$SOURCE_ARCH_HEADERS/uapi" + CFLAGS="$CFLAGS -I$OUTPUT_ARCH_HEADERS/generated" + CFLAGS="$CFLAGS -I$OUTPUT_ARCH_HEADERS/generated/uapi" + + if [ -n "$BUILD_PARAMS" ]; then + CFLAGS="$CFLAGS -D$BUILD_PARAMS" + fi + + # Check if gcc supports asm goto and set CC_HAVE_ASM_GOTO if it does. + # Older kernels perform this check and set this flag in Kbuild, and since + # conftest.sh runs outside of Kbuild it ends up building without this flag. + # Starting with commit e9666d10a5677a494260d60d1fa0b73cc7646eb3 this test + # is done within Kconfig, and the preprocessor flag is no longer needed. + + GCC_GOTO_SH="$SOURCES/build/gcc-goto.sh" + + if [ -f "$GCC_GOTO_SH" ]; then + # Newer versions of gcc-goto.sh don't print anything on success, but + # this is okay, since it's no longer necessary to set CC_HAVE_ASM_GOTO + # based on the output of those versions of gcc-goto.sh. + if [ `/bin/sh "$GCC_GOTO_SH" "$CC"` = "y" ]; then + CFLAGS="$CFLAGS -DCC_HAVE_ASM_GOTO" + fi + fi + + # + # If CONFIG_HAVE_FENTRY is enabled and gcc supports -mfentry flags then set + # CC_USING_FENTRY and add -mfentry into cflags. + # + # linux/ftrace.h file indirectly gets included into the conftest source and + # fails to get compiled, because conftest.sh runs outside of Kbuild it ends + # up building without -mfentry and CC_USING_FENTRY flags. + # + grep "CONFIG_HAVE_FENTRY \+1" $AUTOCONF_FILE > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "" > conftest$$.c + + $CC -mfentry -c -x c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + CFLAGS="$CFLAGS -mfentry -DCC_USING_FENTRY" + fi + fi +} + +CONFTEST_PREAMBLE="#include \"conftest/headers.h\" + #if defined(NV_LINUX_KCONFIG_H_PRESENT) + #include + #endif + #if defined(NV_GENERATED_AUTOCONF_H_PRESENT) + #include + #else + #include + #endif + #if defined(CONFIG_XEN) && \ + defined(CONFIG_XEN_INTERFACE_VERSION) && !defined(__XEN_INTERFACE_VERSION__) + #define __XEN_INTERFACE_VERSION__ CONFIG_XEN_INTERFACE_VERSION + #endif + #if defined(CONFIG_KASAN) && defined(CONFIG_ARM64) + #if defined(CONFIG_KASAN_SW_TAGS) + #define KASAN_SHADOW_SCALE_SHIFT 4 + #else + #define KASAN_SHADOW_SCALE_SHIFT 3 + #endif + #endif" + +test_configuration_option() { + # + # Check to see if the given configuration option is defined + # + + get_configuration_option $1 >/dev/null 2>&1 + + return $? + +} + +set_configuration() { + # + # Set a specific configuration option. This function is called to always + # enable a configuration, in order to verify whether the test code for that + # configuration is no longer required and the corresponding + # conditionally-compiled code in the driver can be removed. + # + DEF="$1" + + if [ "$3" = "" ] + then + VAL="" + CAT="$2" + else + VAL="$2" + CAT="$3" + fi + + echo "#define ${DEF} ${VAL}" | append_conftest "${CAT}" +} + +unset_configuration() { + # + # Un-set a specific configuration option. This function is called to + # always disable a configuration, in order to verify whether the test + # code for that configuration is no longer required and the corresponding + # conditionally-compiled code in the driver can be removed. + # + DEF="$1" + CAT="$2" + + echo "#undef ${DEF}" | append_conftest "${CAT}" +} + +compile_check_conftest() { + # + # Compile the current conftest C file and check+output the result + # + CODE="$1" + DEF="$2" + VAL="$3" + CAT="$4" + + echo "$CONFTEST_PREAMBLE + $CODE" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + if [ "${CAT}" = "functions" ]; then + # + # The logic for "functions" compilation tests is inverted compared to + # other compilation steps: if the function is present, the code + # snippet will fail to compile because the function call won't match + # the prototype. If the function is not present, the code snippet + # will produce an object file with the function as an unresolved + # symbol. + # + echo "#undef ${DEF}" | append_conftest "${CAT}" + else + echo "#define ${DEF} ${VAL}" | append_conftest "${CAT}" + fi + return + else + if [ "${CAT}" = "functions" ]; then + echo "#define ${DEF} ${VAL}" | append_conftest "${CAT}" + else + echo "#undef ${DEF}" | append_conftest "${CAT}" + fi + return + fi +} + +export_symbol_present_conftest() { + # + # Check Module.symvers to see whether the given symbol is present. + # + + SYMBOL="$1" + TAB=' ' + + if grep -e "${TAB}${SYMBOL}${TAB}.*${TAB}EXPORT_SYMBOL.*\$" \ + "$OUTPUT/Module.symvers" >/dev/null 2>&1; then + echo "#define NV_IS_EXPORT_SYMBOL_PRESENT_$SYMBOL 1" | + append_conftest "symbols" + else + # May be a false negative if Module.symvers is absent or incomplete, + # or if the Module.symvers format changes. + echo "#define NV_IS_EXPORT_SYMBOL_PRESENT_$SYMBOL 0" | + append_conftest "symbols" + fi +} + +export_symbol_gpl_conftest() { + # + # Check Module.symvers to see whether the given symbol is present and its + # export type is GPL-only (including deprecated GPL-only symbols). + # + + SYMBOL="$1" + TAB=' ' + + if grep -e "${TAB}${SYMBOL}${TAB}.*${TAB}EXPORT_\(UNUSED_\)*SYMBOL_GPL\$" \ + "$OUTPUT/Module.symvers" >/dev/null 2>&1; then + echo "#define NV_IS_EXPORT_SYMBOL_GPL_$SYMBOL 1" | + append_conftest "symbols" + else + # May be a false negative if Module.symvers is absent or incomplete, + # or if the Module.symvers format changes. + echo "#define NV_IS_EXPORT_SYMBOL_GPL_$SYMBOL 0" | + append_conftest "symbols" + fi +} + +get_configuration_option() { + # + # Print the value of given configuration option, if defined + # + RET=1 + OPTION=$1 + + OLD_FILE="linux/autoconf.h" + NEW_FILE="generated/autoconf.h" + FILE="" + + if [ -f $HEADERS/$NEW_FILE -o -f $OUTPUT/include/$NEW_FILE ]; then + FILE=$NEW_FILE + elif [ -f $HEADERS/$OLD_FILE -o -f $OUTPUT/include/$OLD_FILE ]; then + FILE=$OLD_FILE + fi + + if [ -n "$FILE" ]; then + # + # We are looking at a configured source tree; verify + # that its configuration includes the given option + # via a compile check, and print the option's value. + # + + if [ -f $HEADERS/$FILE ]; then + INCLUDE_DIRECTORY=$HEADERS + elif [ -f $OUTPUT/include/$FILE ]; then + INCLUDE_DIRECTORY=$OUTPUT/include + else + return 1 + fi + + echo "#include <$FILE> + #ifndef $OPTION + #error $OPTION not defined! + #endif + + $OPTION + " > conftest$$.c + + $CC -E -P -I$INCLUDE_DIRECTORY -o conftest$$ conftest$$.c > /dev/null 2>&1 + + if [ -e conftest$$ ]; then + tr -d '\r\n\t ' < conftest$$ + RET=$? + fi + + rm -f conftest$$.c conftest$$ + else + CONFIG=$OUTPUT/.config + if [ -f $CONFIG ] && grep "^$OPTION=" $CONFIG; then + grep "^$OPTION=" $CONFIG | cut -f 2- -d "=" + RET=$? + fi + fi + + return $RET + +} + +check_for_ib_peer_memory_symbols() { + local kernel_dir="$1" + local module_symvers="${kernel_dir}/Module.symvers" + + local sym_ib_register="ib_register_peer_memory_client" + local sym_ib_unregister="ib_unregister_peer_memory_client" + local tab=' ' + + # Return 0 for true(no errors), 1 for false + if [ ! -f "${module_symvers}" ]; then + return 1 + fi + + if grep -e "${tab}${sym_ib_register}${tab}.*${tab}EXPORT_SYMBOL.*\$" \ + "${module_symvers}" > /dev/null 2>&1 && + grep -e "${tab}${sym_ib_unregister}${tab}.*${tab}EXPORT_SYMBOL.*\$" \ + "${module_symvers}" > /dev/null 2>&1; then + return 0 + else + return 1 + fi +} + +compile_test() { + case "$1" in + set_memory_uc) + # + # Determine if the set_memory_uc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #else + #include + #endif + void conftest_set_memory_uc(void) { + set_memory_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_MEMORY_UC_PRESENT" "" "functions" + ;; + + set_memory_array_uc) + # + # Determine if the set_memory_array_uc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #else + #include + #endif + void conftest_set_memory_array_uc(void) { + set_memory_array_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_MEMORY_ARRAY_UC_PRESENT" "" "functions" + ;; + + sysfs_slab_unlink) + # + # Determine if the sysfs_slab_unlink() function is present. + # + # This test is useful to check for the presence a fix for the deferred + # kmem_cache destroy feature (see nvbug: 2543505). + # + # Added by commit d50d82faa0c9 ("slub: fix failure when we delete and + # create a slab cache") in 4.18 (2018-06-27). + # + CODE=" + #include + void conftest_sysfs_slab_unlink(void) { + sysfs_slab_unlink(); + }" + + compile_check_conftest "$CODE" "NV_SYSFS_SLAB_UNLINK_PRESENT" "" "functions" + ;; + + list_is_first) + # + # Determine if the list_is_first() function is present. + # + # Added by commit 70b44595eafe ("mm, compaction: use free lists + # to quickly locate a migration source") in 5.1 (2019-03-05) + # + CODE=" + #include + void conftest_list_is_first(void) { + list_is_first(); + }" + + compile_check_conftest "$CODE" "NV_LIST_IS_FIRST_PRESENT" "" "functions" + ;; + + set_pages_uc) + # + # Determine if the set_pages_uc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #else + #include + #endif + void conftest_set_pages_uc(void) { + set_pages_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_PAGES_UC_PRESENT" "" "functions" + ;; + + set_pages_array_uc) + # + # Determine if the set_pages_array_uc() function is present. + # It does not exist on all architectures. + # + # set_pages_array_uc() was added by commit + # 0f3507555f6fa4acbc85a646d6e8766230db38fc ("x86, CPA: Add + # set_pages_arrayuc and set_pages_array_wb") in v2.6.30-rc1 (Thu Mar + # 19 14:51:15 2009) + # + CODE=" + #include + #if defined(NV_ASM_SET_MEMORY_H_PRESENT) + #if defined(NV_ASM_PGTABLE_TYPES_H_PRESENT) + #include + #endif + #include + #else + #include + #endif + void conftest_set_pages_array_uc(void) { + set_pages_array_uc(); + }" + + compile_check_conftest "$CODE" "NV_SET_PAGES_ARRAY_UC_PRESENT" "" "functions" + ;; + + flush_cache_all) + # + # Determine if flush_cache_all() function is present + # + # flush_cache_all() was removed by commit id + # 68234df4ea79 ("arm64: kill flush_cache_all()") in 4.2 (2015-04-20) + # for aarch64 + # + CODE=" + #include + int conftest_flush_cache_all(void) { + return flush_cache_all(); + }" + compile_check_conftest "$CODE" "NV_FLUSH_CACHE_ALL_PRESENT" "" "functions" + ;; + + pci_get_domain_bus_and_slot) + # + # Determine if the pci_get_domain_bus_and_slot() function + # is present. + # + # Added by commit 3c299dc22635 ("PCI: add + # pci_get_domain_bus_and_slot function") in 2.6.33. + # + CODE=" + #include + void conftest_pci_get_domain_bus_and_slot(void) { + pci_get_domain_bus_and_slot(); + }" + + compile_check_conftest "$CODE" "NV_PCI_GET_DOMAIN_BUS_AND_SLOT_PRESENT" "" "functions" + ;; + + pci_bus_address) + # + # Determine if the pci_bus_address() function is + # present. + # + # Added by commit 06cf56e497c8 ("PCI: Add pci_bus_address() to + # get bus address of a BAR") in v3.14 + # + CODE=" + #include + void conftest_pci_bus_address(void) { + pci_bus_address(); + }" + + compile_check_conftest "$CODE" "NV_PCI_BUS_ADDRESS_PRESENT" "" "functions" + ;; + + hash__remap_4k_pfn) + # + # Determine if the hash__remap_4k_pfn() function is + # present. + # + # Added by commit 6cc1a0ee4ce2 ("powerpc/mm/radix: Add radix + # callback for pmd accessors") in v4.7 (committed 2016-04-29). + # Present only in arch/powerpc + # + CODE=" + #if defined(NV_ASM_BOOK3S_64_HASH_64K_H_PRESENT) + #include + #include + #endif + void conftest_hash__remap_4k_pfn(void) { + hash__remap_4k_pfn(); + }" + + compile_check_conftest "$CODE" "NV_HASH__REMAP_4K_PFN_PRESENT" "" "functions" + ;; + + register_cpu_notifier) + # + # Determine if register_cpu_notifier() is present + # + # Removed by commit 530e9b76ae8f ("cpu/hotplug: Remove obsolete + # cpu hotplug register/unregister functions") in v4.10 + # (2016-12-21) + # + CODE=" + #include + void conftest_register_cpu_notifier(void) { + register_cpu_notifier(); + }" > conftest$$.c + compile_check_conftest "$CODE" "NV_REGISTER_CPU_NOTIFIER_PRESENT" "" "functions" + ;; + + cpuhp_setup_state) + # + # Determine if cpuhp_setup_state() is present + # + # Added by commit 5b7aa87e0482 ("cpu/hotplug: Implement + # setup/removal interface") in v4.6 (commited 2016-02-26) + # + # It is used as a replacement for register_cpu_notifier + CODE=" + #include + void conftest_cpuhp_setup_state(void) { + cpuhp_setup_state(); + }" > conftest$$.c + compile_check_conftest "$CODE" "NV_CPUHP_SETUP_STATE_PRESENT" "" "functions" + ;; + + ioremap_cache) + # + # Determine if the ioremap_cache() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_ioremap_cache(void) { + ioremap_cache(); + }" + + compile_check_conftest "$CODE" "NV_IOREMAP_CACHE_PRESENT" "" "functions" + ;; + + ioremap_wc) + # + # Determine if the ioremap_wc() function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_ioremap_wc(void) { + ioremap_wc(); + }" + + compile_check_conftest "$CODE" "NV_IOREMAP_WC_PRESENT" "" "functions" + ;; + + file_operations) + # 'ioctl' field removed by commit b19dd42faf41 + # ("bkl: Remove locked .ioctl file operation") in v2.6.36 + CODE=" + #include + int conftest_file_operations(void) { + return offsetof(struct file_operations, ioctl); + }" + + compile_check_conftest "$CODE" "NV_FILE_OPERATIONS_HAS_IOCTL" "" "types" + ;; + + sg_alloc_table) + # + # sg_alloc_table_from_pages added by commit efc42bc98058 + # ("scatterlist: add sg_alloc_table_from_pages function") in v3.6 + # + CODE=" + #include + void conftest_sg_alloc_table_from_pages(void) { + sg_alloc_table_from_pages(); + }" + + compile_check_conftest "$CODE" "NV_SG_ALLOC_TABLE_FROM_PAGES_PRESENT" "" "functions" + ;; + + efi_enabled) + # + # Added in 2.6.12 as a variable + # + # Determine if the efi_enabled symbol is present (as a variable), + # or if the efi_enabled() function is present and how many + # arguments it takes. + # + # Converted from a variable to a function by commit 83e68189745a + # ("efi: Make 'efi_enabled' a function to query EFI facilities") + # in v3.8 + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_LINUX_EFI_H_PRESENT) + #include + #endif + int conftest_efi_enabled(void) { + return efi_enabled(0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_EFI_ENABLED_PRESENT" | append_conftest "functions" + echo "#define NV_EFI_ENABLED_ARGUMENT_COUNT 1" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#define NV_EFI_ENABLED_PRESENT" | append_conftest "symbols" + return + fi + ;; + + dom0_kernel_present) + # Add config parameter if running on DOM0. + if [ -n "$VGX_BUILD" ]; then + echo "#define NV_DOM0_KERNEL_PRESENT" | append_conftest "generic" + else + echo "#undef NV_DOM0_KERNEL_PRESENT" | append_conftest "generic" + fi + return + ;; + + nvidia_vgpu_kvm_build) + # Add config parameter if running on KVM host. + if [ -n "$VGX_KVM_BUILD" ]; then + echo "#define NV_VGPU_KVM_BUILD" | append_conftest "generic" + else + echo "#undef NV_VGPU_KVM_BUILD" | append_conftest "generic" + fi + return + ;; + + vfio_register_notifier) + # + # Check number of arguments required. + # + # New parameters added by commit 22195cbd3451 ("vfio: + # vfio_register_notifier: classify iommu notifier") in v4.10 + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_vfio_register_notifier(void) { + return vfio_register_notifier((struct device *) NULL, (struct notifier_block *) NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_VFIO_NOTIFIER_ARGUMENT_COUNT 2" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#define NV_VFIO_NOTIFIER_ARGUMENT_COUNT 4" | append_conftest "functions" + return + fi + ;; + + vfio_info_add_capability_has_cap_type_id_arg) + # + # Check if vfio_info_add_capability() has cap_type_id parameter. + # + # Removed by commit dda01f787df9 ("vfio: Simplify capability + # helper") in v4.16 (2017-12-12) + # + CODE=" + #include + int vfio_info_add_capability(struct vfio_info_cap *caps, + int cap_type_id, + void *cap_type) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_VFIO_INFO_ADD_CAPABILITY_HAS_CAP_TYPE_ID_ARGS" "" "types" + ;; + + vmbus_channel_has_ringbuffer_page) + # + # Check if ringbuffer_page field exist in vmbus_channel structure + # + # Changed in commit 52a42c2a90226dc61c99bbd0cb096deeb52c334b + # ("vmbus: keep pointer to ring buffer page") in v5.0 (2018-09-14) + # + + CODE=" + #include + + int conftest_vmbus_channel_has_ringbuffer_page(void) { + return offsetof(struct vmbus_channel, ringbuffer_page); + }" + + compile_check_conftest "$CODE" "NV_VMBUS_CHANNEL_HAS_RING_BUFFER_PAGE" "" "types" + ;; + + nvidia_grid_build) + if [ -n "$GRID_BUILD" ]; then + echo "#define NV_GRID_BUILD" | append_conftest "generic" + else + echo "#undef NV_GRID_BUILD" | append_conftest "generic" + fi + return + ;; + + nvidia_grid_csp_build) + if [ -n "$GRID_BUILD_CSP" ]; then + echo "#define NV_GRID_BUILD_CSP $GRID_BUILD_CSP" | append_conftest "generic" + else + echo "#undef NV_GRID_BUILD_CSP" | append_conftest "generic" + fi + return + ;; + + vm_fault_has_address) + # + # Determine if the 'vm_fault' structure has an 'address', or a + # 'virtual_address' field. The .virtual_address field was + # effectively renamed to .address: + # + # 'address' added by commit 82b0f8c39a38 ("mm: join + # struct fault_env and vm_fault") in v4.10 (2016-12-14) + # + # 'virtual_address' removed by commit 1a29d85eb0f1 ("mm: use + # vmf->address instead of of vmf->virtual_address") in v4.10 + # (2016-12-14) + # + CODE=" + #include + int conftest_vm_fault_has_address(void) { + return offsetof(struct vm_fault, address); + }" + + compile_check_conftest "$CODE" "NV_VM_FAULT_HAS_ADDRESS" "" "types" + ;; + + kmem_cache_has_kobj_remove_work) + # + # Determine if the 'kmem_cache' structure has 'kobj_remove_work'. + # + # 'kobj_remove_work' was added by commit 3b7b314053d02 ("slub: make + # sysfs file removal asynchronous") in v4.12 (2017-06-23). This + # commit introduced a race between kmem_cache destroy and create + # which we need to workaround in our driver (see nvbug: 2543505). + # Also see comment for sysfs_slab_unlink conftest. + # + CODE=" + #include + #include + #include + int conftest_kmem_cache_has_kobj_remove_work(void) { + return offsetof(struct kmem_cache, kobj_remove_work); + }" + + compile_check_conftest "$CODE" "NV_KMEM_CACHE_HAS_KOBJ_REMOVE_WORK" "" "types" + ;; + + mdev_uuid) + # + # Determine if mdev_uuid() function is present or not + # + # Added by commit 99e3123e3d72 ("vfio-mdev: Make mdev_device + # private and abstract interfaces") in v4.10 + # + CODE=" + #include + #include + void conftest_mdev_uuid() { + mdev_uuid(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_UUID_PRESENT" "" "functions" + + # + # Determine if mdev_uuid() returns 'const guid_t *'. + # + # mdev_uuid() function prototype updated to return 'const guid_t *' + # by commit 278bca7f318e ("vfio-mdev: Switch to use new generic UUID + # API") in v5.1 (2019-01-10). + # + CODE=" + #include + #include + const guid_t *conftest_mdev_uuid_return_guid_ptr(struct mdev_device *mdev) { + return mdev_uuid(mdev); + }" + + compile_check_conftest "$CODE" "NV_MDEV_UUID_RETURN_GUID_PTR" "" "types" + ;; + + mdev_dev) + # + # Determine if mdev_dev() function is present or not + # + # Added by commit 99e3123e3d72 ("vfio-mdev: Make mdev_device + # private and abstract interfaces") in v4.10 + # + CODE=" + #include + #include + void conftest_mdev_dev() { + mdev_dev(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_DEV_PRESENT" "" "functions" + ;; + + mdev_get_type_group_id) + # + # Determine if mdev_get_type_group_id() function is present or not + # + # Added by commit 15fcc44be0c7a ("vfio/mdev: Add + # mdev/mtype_get_type_group_id()") in v5.13 + # + CODE=" + #include + #include + void conftest_mdev_get_type_group_id() { + mdev_get_type_group_id(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_GET_TYPE_GROUP_ID_PRESENT" "" "functions" + ;; + + mdev_parent) + # + # Determine if the struct mdev_parent type is present. + # + # Added by commit 42930553a7c1 ("vfio-mdev: de-polute the + # namespace, rename parent_device & parent_ops") in v4.10 + # + CODE=" + #include + #include + struct mdev_parent_ops conftest_mdev_parent; + " + + compile_check_conftest "$CODE" "NV_MDEV_PARENT_OPS_STRUCT_PRESENT" "" "types" + ;; + + mdev_parent_dev) + # + # Determine if mdev_parent_dev() function is present or not + # + # Added by commit 9372e6feaafb ("vfio-mdev: Make mdev_parent + # private") in v4.10 + # + CODE=" + #include + #include + void conftest_mdev_parent_dev() { + mdev_parent_dev(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_PARENT_DEV_PRESENT" "" "functions" + ;; + + mdev_from_dev) + # + # Determine if mdev_from_dev() function is present or not. + # + # Added by commit 99e3123e3d72 ("vfio-mdev: Make mdev_device + # private and abstract interfaces") in v4.10 (2016-12-30) + # + CODE=" + #include + #include + void conftest_mdev_from_dev() { + mdev_from_dev(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_FROM_DEV_PRESENT" "" "functions" + ;; + + mdev_set_iommu_device) + # + # Determine if mdev_set_iommu_device() function is present or not. + # + # Added by commit 8ac13175cbe9 ("vfio/mdev: Add iommu related member + # in mdev_device) in v5.1 (2019-04-12) + # + CODE=" + #include + #include + void conftest_mdev_set_iommu_device() { + mdev_set_iommu_device(); + }" + + compile_check_conftest "$CODE" "NV_MDEV_SET_IOMMU_DEVICE_PRESENT" "" "functions" + ;; + + pci_irq_vector_helpers) + # + # Determine if pci_alloc_irq_vectors(), pci_free_irq_vectors() + # functions are present or not. + # + # Added by commit aff171641d181ea573 (PCI: Provide sensible IRQ + # vector alloc/free routines) (2016-07-12) + # + CODE=" + #include + #include + void conftest_pci_irq_vector_helpers() { + pci_alloc_irq_vectors(); + pci_free_irq_vectors (); + }" + + compile_check_conftest "$CODE" "NV_PCI_IRQ_VECTOR_HELPERS_PRESENT" "" "functions" + ;; + + + vfio_device_gfx_plane_info) + # + # determine if the 'struct vfio_device_gfx_plane_info' type is present. + # + # Added by commit e20eaa2382e7 ("vfio: ABI for mdev display + # dma-buf operation") in v4.16 (2017-11-23) + # + CODE=" + #include + struct vfio_device_gfx_plane_info info;" + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_GFX_PLANE_INFO_PRESENT" "" "types" + ;; + + vfio_device_migration_info) + # + # determine if the 'struct vfio_device_migration_info' type is present. + # + # Proposed interface for vGPU Migration + # ("[PATCH v3 0/5] Add migration support for VFIO device ") + # https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg05176.html + # Upstreamed commit a8a24f3f6e38 (vfio: UAPI for migration interface + # for device state) in v5.8 (2020-05-29) + # + CODE=" + #include + struct vfio_device_migration_info info;" + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_MIGRATION_INFO_PRESENT" "" "types" + ;; + + vfio_device_migration_has_start_pfn) + # + # Determine if the 'vfio_device_migration_info' structure has + # a 'start_pfn' field. + # + # This member was present in proposed interface for vGPU Migration + # ("[PATCH v3 0/5] Add migration support for VFIO device ") + # https://lists.gnu.org/archive/html/qemu-devel/2019-02/msg05176.html + # which is not present in upstreamed commit a8a24f3f6e38 (vfio: UAPI + # for migration interface for device state) in v5.8 (2020-05-29) + # + CODE=" + #include + int conftest_vfio_device_migration_has_start_pfn(void) { + return offsetof(struct vfio_device_migration_info, start_pfn); + }" + + compile_check_conftest "$CODE" "NV_VFIO_DEVICE_MIGRATION_HAS_START_PFN" "" "types" + ;; + + drm_available) + # Determine if the DRM subsystem is usable + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + #if !defined(CONFIG_DRM) && !defined(CONFIG_DRM_MODULE) + #error DRM not enabled + #endif + + void conftest_drm_available(void) { + struct drm_driver drv; + + /* 2013-10-02 1bb72532ac260a2d3982b40bdd4c936d779d0d16 */ + (void)drm_dev_alloc; + + /* 2013-10-02 c22f0ace1926da399d9a16dfaf09174c1b03594c */ + (void)drm_dev_register; + + /* 2013-10-02 c3a49737ef7db0bdd4fcf6cf0b7140a883e32b2a */ + (void)drm_dev_unregister; + }" + + compile_check_conftest "$CODE" "NV_DRM_AVAILABLE" "" "generic" + ;; + + drm_dev_unref) + # + # Determine if drm_dev_unref() is present. + # If it isn't, we use drm_dev_free() instead. + # + # drm_dev_free was added by commit 0dc8fe5985e0 ("drm: introduce + # drm_dev_free() to fix error paths") in v3.13 (2013-10-02) + # + # Renamed to drm_dev_unref by commit 099d1c290e2e + # ("drm: provide device-refcount") in v3.15 (2014-01-29) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + void conftest_drm_dev_unref(void) { + drm_dev_unref(); + }" + + compile_check_conftest "$CODE" "NV_DRM_DEV_UNREF_PRESENT" "" "functions" + ;; + + pde_data) + # + # Determine if the pde_data() function is present. + # + # The commit c28198889c15 removed the function + # 'PDE_DATA()', and replaced it with 'pde_data()' + # ("proc: remove PDE_DATA() completely") in v5.17-rc1. + # + CODE=" + #include + void conftest_pde_data(void) { + pde_data(); + }" + + compile_check_conftest "$CODE" "NV_PDE_DATA_LOWER_CASE_PRESENT" "" "functions" + ;; + + get_num_physpages) + # + # Determine if the get_num_physpages() function is + # present. + # + # Added by commit 7ee3d4e8cd56 ("mm: introduce helper function + # mem_init_print_info() to simplify mem_init()") in v3.11 + # + CODE=" + #include + void conftest_get_num_physpages(void) { + get_num_physpages(NULL); + }" + + compile_check_conftest "$CODE" "NV_GET_NUM_PHYSPAGES_PRESENT" "" "functions" + ;; + + proc_remove) + # + # Determine if the proc_remove() function is present. + # + # Added by commit a8ca16ea7b0a ("proc: Supply a function to + # remove a proc entry by PDE") in v3.10 + # + CODE=" + #include + void conftest_proc_remove(void) { + proc_remove(); + }" + + compile_check_conftest "$CODE" "NV_PROC_REMOVE_PRESENT" "" "functions" + ;; + + backing_dev_info) + # + # Determine if the 'address_space' structure has + # a 'backing_dev_info' field. + # + # Removed by commit b83ae6d42143 ("fs: remove + # mapping->backing_dev_info") in v4.0 + # + CODE=" + #include + int conftest_backing_dev_info(void) { + return offsetof(struct address_space, backing_dev_info); + }" + + compile_check_conftest "$CODE" "NV_ADDRESS_SPACE_HAS_BACKING_DEV_INFO" "" "types" + ;; + + address_space) + # + # Determine if the 'address_space' structure has + # a 'tree_lock' field of type rwlock_t. + # + # 'tree_lock' was changed to spinlock_t by commit 19fd6231279b + # ("mm: spinlock tree_lock") in v2.6.27 + # + # It was removed altogether by commit b93b016313b3 ("page cache: + # use xa_lock") in v4.17 + # + CODE=" + #include + int conftest_address_space(void) { + struct address_space as; + rwlock_init(&as.tree_lock); + return offsetof(struct address_space, tree_lock); + }" + + compile_check_conftest "$CODE" "NV_ADDRESS_SPACE_HAS_RWLOCK_TREE_LOCK" "" "types" + ;; + + address_space_init_once) + # + # Determine if address_space_init_once is present. + # + # Added by commit 2aa15890f3c1 ("mm: prevent concurrent + # unmap_mapping_range() on the same inode") in v2.6.38 + # + # If not present, it will be defined in uvm-linux.h. + # + CODE=" + #include + void conftest_address_space_init_once(void) { + address_space_init_once(); + }" + + compile_check_conftest "$CODE" "NV_ADDRESS_SPACE_INIT_ONCE_PRESENT" "" "functions" + ;; + + kuid_t) + # + # Determine if the 'kuid_t' type is present. + # + # Added by commit 7a4e7408c5ca ("userns: Add kuid_t and kgid_t + # and associated infrastructure in uidgid.h") in v3.5 + # + CODE=" + #include + kuid_t conftest_kuid_t; + " + + compile_check_conftest "$CODE" "NV_KUID_T_PRESENT" "" "types" + ;; + + pm_vt_switch_required) + # + # Determine if the pm_vt_switch_required() function is present. + # + # Added by commit f43f627d2f17 ("PM: make VT switching to the + # suspend console optional v3") in v3.10 + # + CODE=" + #include + void conftest_pm_vt_switch_required(void) { + pm_vt_switch_required(); + }" + + compile_check_conftest "$CODE" "NV_PM_VT_SWITCH_REQUIRED_PRESENT" "" "functions" + ;; + + xen_ioemu_inject_msi) + # Determine if the xen_ioemu_inject_msi() function is present. + CODE=" + #if defined(NV_XEN_IOEMU_H_PRESENT) + #include + #include + #include + #include + #endif + void conftest_xen_ioemu_inject_msi(void) { + xen_ioemu_inject_msi(); + }" + + compile_check_conftest "$CODE" "NV_XEN_IOEMU_INJECT_MSI" "" "functions" + ;; + + phys_to_dma) + # + # Determine if the phys_to_dma function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_phys_to_dma(void) { + phys_to_dma(); + }" + + compile_check_conftest "$CODE" "NV_PHYS_TO_DMA_PRESENT" "" "functions" + ;; + + + dma_attr_macros) + # + # Determine if the NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT macro present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_dma_attr_macros(void) { + int ret; + ret = DMA_ATTR_SKIP_CPU_SYNC(); + }" + compile_check_conftest "$CODE" "NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT" "" "functions" + ;; + + dma_map_page_attrs) + # + # Determine if the dma_map_page_attrs function is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_dma_map_page_attrs(void) { + dma_map_page_attrs(); + }" + + compile_check_conftest "$CODE" "NV_DMA_MAP_PAGE_ATTRS_PRESENT" "" "functions" + ;; + + dma_ops) + # + # Determine if the 'dma_ops' structure is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_dma_ops(void) { + (void)dma_ops; + }" + + compile_check_conftest "$CODE" "NV_DMA_OPS_PRESENT" "" "symbols" + ;; + + swiotlb_dma_ops) + # + # Determine if the 'swiotlb_dma_ops' structure is present. + # It does not exist on all architectures. + # + CODE=" + #include + void conftest_dma_ops(void) { + (void)swiotlb_dma_ops; + }" + + compile_check_conftest "$CODE" "NV_SWIOTLB_DMA_OPS_PRESENT" "" "symbols" + ;; + + get_dma_ops) + # + # Determine if the get_dma_ops() function is present. + # + # The structure was made available to all architectures by commit + # e1c7e324539a ("dma-mapping: always provide the dma_map_ops + # based implementation") in v4.5 + # + # Commit 0a0f0d8be76d ("dma-mapping: split ") + # in v5.10-rc1 (2020-09-22), moved get_dma_ops() function + # prototype from to . + # + CODE=" + #if defined(NV_LINUX_DMA_MAP_OPS_H_PRESENT) + #include + #else + #include + #endif + void conftest_get_dma_ops(void) { + get_dma_ops(); + }" + + compile_check_conftest "$CODE" "NV_GET_DMA_OPS_PRESENT" "" "functions" + ;; + + noncoherent_swiotlb_dma_ops) + # + # Determine if the 'noncoherent_swiotlb_dma_ops' symbol is present. + # This API only exists on ARM64. + # + # Added by commit 7363590d2c46 ("arm64: Implement coherent DMA API + # based on swiotlb") in v3.15 + # + # Removed by commit 9d3bfbb4df58 ("arm64: Combine coherent and + # non-coherent swiotlb dma_ops") in v4.0 + # + CODE=" + #include + void conftest_noncoherent_swiotlb_dma_ops(void) { + (void)noncoherent_swiotlb_dma_ops; + }" + + compile_check_conftest "$CODE" "NV_NONCOHERENT_SWIOTLB_DMA_OPS_PRESENT" "" "symbols" + ;; + + dma_map_resource) + # + # Determine if the dma_map_resource() function is present. + # + # Added by commit 6f3d87968f9c ("dma-mapping: add + # dma_{map,unmap}_resource") in v4.9 (2016-08-10) + # + CODE=" + #include + void conftest_dma_map_resource(void) { + dma_map_resource(); + }" + + compile_check_conftest "$CODE" "NV_DMA_MAP_RESOURCE_PRESENT" "" "functions" + ;; + + write_cr4) + # + # Determine if the write_cr4() function is present. + # + CODE=" + #include + void conftest_write_cr4(void) { + write_cr4(); + }" + + compile_check_conftest "$CODE" "NV_WRITE_CR4_PRESENT" "" "functions" + ;; + + nvhost_dma_fence_unpack) + # + # Determine if the nvhost_dma_fence_unpack function is present. + # This is only present in NVIDIA Tegra downstream kernels. + # + CODE=" + #if defined(NV_LINUX_NVHOST_H_PRESENT) + #include + #endif + void conftest_nvhost_dma_fence_unpack(void) { + nvhost_dma_fence_unpack(); + }" + + compile_check_conftest "$CODE" "NV_NVHOST_DMA_FENCE_UNPACK_PRESENT" "" "functions" + ;; + + of_get_property) + # + # Determine if the of_get_property function is present. + # + # Support for kernels without CONFIG_OF defined added by commit + # 89272b8c0d42 ("dt: add empty of_get_property for non-dt") in v3.1 + # + # Test if linux/of.h header file inclusion is successful or not and + # define/undefine NV_LINUX_OF_H_USABLE depending upon status of inclusion + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_LINUX_OF_H_USABLE" | append_conftest "generic" + CODE=" + #include + void conftest_of_get_property() { + of_get_property(); + }" + + compile_check_conftest "$CODE" "NV_OF_GET_PROPERTY_PRESENT" "" "functions" + else + echo "#undef NV_LINUX_OF_H_USABLE" | append_conftest "generic" + echo "#undef NV_OF_GET_PROPERTY_PRESENT" | append_conftest "functions" + fi + ;; + + of_find_node_by_phandle) + # + # Determine if the of_find_node_by_phandle function is present. + # + # Support for kernels without CONFIG_OF defined added by commit + # ce16b9d23561 ("of: define of_find_node_by_phandle for + # !CONFIG_OF") in v4.2 + # + # Test if linux/of.h header file inclusion is successful or not and + # define/undefine NV_LINUX_OF_H_USABLE depending upon status of inclusion. + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_LINUX_OF_H_USABLE" | append_conftest "generic" + CODE=" + #include + void conftest_of_find_node_by_phandle() { + of_find_node_by_phandle(); + }" + + compile_check_conftest "$CODE" "NV_OF_FIND_NODE_BY_PHANDLE_PRESENT" "" "functions" + else + echo "#undef NV_LINUX_OF_H_USABLE" | append_conftest "generic" + echo "#undef NV_OF_FIND_NODE_BY_PHANDLE_PRESENT" | append_conftest "functions" + fi + ;; + + of_node_to_nid) + # + # Determine if of_node_to_nid is present + # + # Dummy implementation added by commit 559e2b7ee7a1 + # ("of: Provide default of_node_to_nid() implementation.") in v2.6.36 + # + # Real implementation added by commit 298535c00a2c + # ("of, numa: Add NUMA of binding implementation.") in v4.7 + # + # Test if linux/of.h header file inclusion is successful or not and + # define/undefine NV_LINUX_OF_H_USABLE depending upon status of inclusion. + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_LINUX_OF_H_USABLE" | append_conftest "generic" + CODE=" + #include + #include + #include + void conftest_of_node_to_nid() { + of_node_to_nid(); + }" + + compile_check_conftest "$CODE" "NV_OF_NODE_TO_NID_PRESENT" "" "functions" + else + echo "#undef NV_LINUX_OF_H_USABLE" | append_conftest "generic" + echo "#undef NV_OF_NODE_TO_NID_PRESENT" | append_conftest "functions" + fi + ;; + + pnv_pci_get_npu_dev) + # + # Determine if the pnv_pci_get_npu_dev function is present. + # + # Added by commit 5d2aa710e697 ("powerpc/powernv: Add support + # for Nvlink NPUs") in v4.5 + # + CODE=" + #include + void conftest_pnv_pci_get_npu_dev() { + pnv_pci_get_npu_dev(); + }" + + compile_check_conftest "$CODE" "NV_PNV_PCI_GET_NPU_DEV_PRESENT" "" "functions" + ;; + + kernel_write) + # + # Determine if the function kernel_write() is present. + # + # First exported by commit 7bb307e894d5 ("export kernel_write(), + # convert open-coded instances") in v3.9 + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_kernel_write(void) { + kernel_write(); + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#undef NV_KERNEL_WRITE_PRESENT" | append_conftest "function" + rm -f conftest$$.o + else + echo "#define NV_KERNEL_WRITE_PRESENT" | append_conftest "function" + + # + # Determine the pos argument type, which was changed by + # commit e13ec939e96b1 (fs: fix kernel_write prototype) on + # 9/1/2017. + # + echo "$CONFTEST_PREAMBLE + #include + ssize_t kernel_write(struct file *file, const void *buf, + size_t count, loff_t *pos) + { + return 0; + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_KERNEL_WRITE_HAS_POINTER_POS_ARG" | append_conftest "function" + rm -f conftest$$.o + else + echo "#undef NV_KERNEL_WRITE_HAS_POINTER_POS_ARG" | append_conftest "function" + fi + fi + ;; + + kernel_read_has_pointer_pos_arg) + # + # Determine the pos argument type, which was changed by + # commit bdd1d2d3d251c (fs: fix kernel_read prototype) on + # 9/1/2017. + # + echo "$CONFTEST_PREAMBLE + #include + ssize_t kernel_read(struct file *file, void *buf, size_t count, + loff_t *pos) + { + return 0; + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_KERNEL_READ_HAS_POINTER_POS_ARG" | append_conftest "function" + rm -f conftest$$.o + else + echo "#undef NV_KERNEL_READ_HAS_POINTER_POS_ARG" | append_conftest "function" + fi + ;; + + vm_insert_pfn_prot) + # + # Determine if vm_insert_pfn_prot function is present + # + # Added by commit 1745cbc5d0de ("mm: Add vm_insert_pfn_prot()") in + # v3.16.59 + # + # Removed by commit f5e6d1d5f8f3 ("mm: introduce + # vmf_insert_pfn_prot()") in v4.20. + # + CODE=" + #include + void conftest_vm_insert_pfn_prot() { + vm_insert_pfn_prot(); + }" + + compile_check_conftest "$CODE" "NV_VM_INSERT_PFN_PROT_PRESENT" "" "functions" + ;; + + vmf_insert_pfn_prot) + # + # Determine if vmf_insert_pfn_prot function is present + # + # Added by commit f5e6d1d5f8f3 ("mm: introduce + # vmf_insert_pfn_prot()") in v4.20. + # + CODE=" + #include + void conftest_vmf_insert_pfn_prot() { + vmf_insert_pfn_prot(); + }" + + compile_check_conftest "$CODE" "NV_VMF_INSERT_PFN_PROT_PRESENT" "" "functions" + ;; + + drm_atomic_available) + # + # Determine if the DRM atomic modesetting subsystem is usable + # + # Added by commit 036ef5733ba4 + # ("drm/atomic: Allow drivers to subclass drm_atomic_state, v3") in + # v4.2 (2018-05-18). + # + # Make conftest more robust by adding test for + # drm_atomic_set_mode_prop_for_crtc(), this function added by + # commit 955f3c334f0f ("drm/atomic: Add MODE_ID property") in v4.2 + # (2015-05-25). If the DRM atomic modesetting subsystem is + # back ported to Linux kernel older than v4.2, then commit + # 955f3c334f0f must be back ported in order to get NVIDIA-DRM KMS + # support. + # Commit 72fdb40c1a4b ("drm: extract drm_atomic_uapi.c") in v4.20 + # (2018-09-05), moved drm_atomic_set_mode_prop_for_crtc() function + # prototype from drm/drm_atomic.h to drm/drm_atomic_uapi.h. + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #include + #if !defined(CONFIG_DRM) && !defined(CONFIG_DRM_MODULE) + #error DRM not enabled + #endif + void conftest_drm_atomic_modeset_available(void) { + size_t a; + + a = offsetof(struct drm_mode_config_funcs, atomic_state_alloc); + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #include + #if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT) + #include + #endif + void conftest_drm_atomic_set_mode_prop_for_crtc(void) { + drm_atomic_set_mode_prop_for_crtc(); + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#undef NV_DRM_ATOMIC_MODESET_AVAILABLE" | append_conftest "generic" + else + echo "#define NV_DRM_ATOMIC_MODESET_AVAILABLE" | append_conftest "generic" + fi + else + echo "#undef NV_DRM_ATOMIC_MODESET_AVAILABLE" | append_conftest "generic" + fi + ;; + + drm_bus_present) + # + # Determine if the 'struct drm_bus' type is present. + # + # Added by commit 8410ea3b95d1 ("drm: rework PCI/platform driver + # interface.") in v2.6.39 (2010-12-15) + # + # Removed by commit c5786fe5f1c5 ("drm: Goody bye, drm_bus!") + # in v3.18 (2014-08-29) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + void conftest_drm_bus_present(void) { + struct drm_bus bus; + }" + + compile_check_conftest "$CODE" "NV_DRM_BUS_PRESENT" "" "types" + ;; + + drm_bus_has_bus_type) + # + # Determine if the 'drm_bus' structure has a 'bus_type' field. + # + # Added by commit 8410ea3b95d1 ("drm: rework PCI/platform driver + # interface.") in v2.6.39 (2010-12-15) + # + # Removed by commit 42b21049fc26 ("drm: kill drm_bus->bus_type") + # in v3.16 (2013-11-03) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_bus_has_bus_type(void) { + return offsetof(struct drm_bus, bus_type); + }" + + compile_check_conftest "$CODE" "NV_DRM_BUS_HAS_BUS_TYPE" "" "types" + ;; + + drm_bus_has_get_irq) + # + # Determine if the 'drm_bus' structure has a 'get_irq' field. + # + # Added by commit 8410ea3b95d1 ("drm: rework PCI/platform + # driver interface.") in v2.6.39 (2010-12-15) + # + # Removed by commit b2a21aa25a39 ("drm: remove bus->get_irq + # implementations") in v3.16 (2013-11-03) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_bus_has_get_irq(void) { + return offsetof(struct drm_bus, get_irq); + }" + + compile_check_conftest "$CODE" "NV_DRM_BUS_HAS_GET_IRQ" "" "types" + ;; + + drm_bus_has_get_name) + # + # Determine if the 'drm_bus' structure has a 'get_name' field. + # + # Added by commit 8410ea3b95d1 ("drm: rework PCI/platform driver + # interface.") in v2.6.39 (2010-12-15) + # + # removed by commit 9de1b51f1fae ("drm: remove drm_bus->get_name") + # in v3.16 (2013-11-03) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_bus_has_get_name(void) { + return offsetof(struct drm_bus, get_name); + }" + + compile_check_conftest "$CODE" "NV_DRM_BUS_HAS_GET_NAME" "" "types" + ;; + + drm_driver_has_device_list) + # + # Determine if the 'drm_driver' structure has a 'device_list' field. + # + # Renamed from device_list to legacy_device_list by commit + # b3f2333de8e8 ("drm: restrict the device list for shadow + # attached drivers") in v3.14 (2013-12-11) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_device_list(void) { + return offsetof(struct drm_driver, device_list); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_DEVICE_LIST" "" "types" + ;; + + + drm_driver_has_legacy_dev_list) + # + # Determine if the 'drm_driver' structure has a 'legacy_dev_list' field. + # + # Renamed from device_list to legacy_device_list by commit + # b3f2333de8e8 ("drm: restrict the device list for shadow + # attached drivers") in v3.14 (2013-12-11) + # + # The commit 57bb1ee60340 ("drm: Compile out legacy chunks from + # struct drm_device") compiles out the legacy chunks like + # drm_driver::legacy_dev_list. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_legacy_dev_list(void) { + return offsetof(struct drm_driver, legacy_dev_list); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST" "" "types" + ;; + + jiffies_to_timespec) + # + # Determine if jiffies_to_timespec() is present + # + # removed by commit 751addac78b6 + # ("y2038: remove obsolete jiffies conversion functions") + # in v5.6-rc1 (2019-12-13). + CODE=" + #include + void conftest_jiffies_to_timespec(void){ + jiffies_to_timespec(); + }" + compile_check_conftest "$CODE" "NV_JIFFIES_TO_TIMESPEC_PRESENT" "" "functions" + ;; + + drm_init_function_args) + # + # Determine if these functions: + # drm_universal_plane_init() + # drm_crtc_init_with_planes() + # drm_encoder_init() + # have a 'name' argument, which was added by these commits: + # drm_universal_plane_init: 2015-12-09 b0b3b7951114315d65398c27648705ca1c322faa + # drm_crtc_init_with_planes: 2015-12-09 f98828769c8838f526703ef180b3088a714af2f9 + # drm_encoder_init: 2015-12-09 13a3d91f17a5f7ed2acd275d18b6acfdb131fb15 + # + # Additionally determine whether drm_universal_plane_init() has a + # 'format_modifiers' argument, which was added by: + # 2017-07-23 e6fc3b68558e4c6d8d160b5daf2511b99afa8814 + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + + int conftest_drm_crtc_init_with_planes_has_name_arg(void) { + return + drm_crtc_init_with_planes( + NULL, /* struct drm_device *dev */ + NULL, /* struct drm_crtc *crtc */ + NULL, /* struct drm_plane *primary */ + NULL, /* struct drm_plane *cursor */ + NULL, /* const struct drm_crtc_funcs *funcs */ + NULL); /* const char *name */ + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_INIT_WITH_PLANES_HAS_NAME_ARG" "" "types" + + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_ENCODER_H_PRESENT) + #include + #endif + + int conftest_drm_encoder_init_has_name_arg(void) { + return + drm_encoder_init( + NULL, /* struct drm_device *dev */ + NULL, /* struct drm_encoder *encoder */ + NULL, /* const struct drm_encoder_funcs *funcs */ + DRM_MODE_ENCODER_NONE, /* int encoder_type */ + NULL); /* const char *name */ + }" + + compile_check_conftest "$CODE" "NV_DRM_ENCODER_INIT_HAS_NAME_ARG" "" "types" + + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_PLANE_H_PRESENT) + #include + #endif + + int conftest_drm_universal_plane_init_has_format_modifiers_arg(void) { + return + drm_universal_plane_init( + NULL, /* struct drm_device *dev */ + NULL, /* struct drm_plane *plane */ + 0, /* unsigned long possible_crtcs */ + NULL, /* const struct drm_plane_funcs *funcs */ + NULL, /* const uint32_t *formats */ + 0, /* unsigned int format_count */ + NULL, /* const uint64_t *format_modifiers */ + DRM_PLANE_TYPE_PRIMARY, + NULL); /* const char *name */ + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + echo "#define NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG" | append_conftest "types" + echo "#define NV_DRM_UNIVERSAL_PLANE_INIT_HAS_NAME_ARG" | append_conftest "types" + else + echo "#undef NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG" | append_conftest "types" + + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_PLANE_H_PRESENT) + #include + #endif + + int conftest_drm_universal_plane_init_has_name_arg(void) { + return + drm_universal_plane_init( + NULL, /* struct drm_device *dev */ + NULL, /* struct drm_plane *plane */ + 0, /* unsigned long possible_crtcs */ + NULL, /* const struct drm_plane_funcs *funcs */ + NULL, /* const uint32_t *formats */ + 0, /* unsigned int format_count */ + DRM_PLANE_TYPE_PRIMARY, + NULL); /* const char *name */ + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + + echo "#define NV_DRM_UNIVERSAL_PLANE_INIT_HAS_NAME_ARG" | append_conftest "types" + else + echo "#undef NV_DRM_UNIVERSAL_PLANE_INIT_HAS_NAME_ARG" | append_conftest "types" + fi + fi + + ;; + + vzalloc) + # + # Determine if the vzalloc function is present + # + # Added by commit e1ca7788dec6 ("mm: add vzalloc() and + # vzalloc_node() helpers") in v2.6.37 (2010-10-26) + # + CODE=" + #include + void conftest_vzalloc() { + vzalloc(); + }" + + compile_check_conftest "$CODE" "NV_VZALLOC_PRESENT" "" "functions" + ;; + + drm_driver_has_set_busid) + # + # Determine if the drm_driver structure has a 'set_busid' callback + # field. + # + # Added by commit 915b4d11b8b9 ("drm: add driver->set_busid() + # callback") in v3.18 (2014-08-29) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_set_busid(void) { + return offsetof(struct drm_driver, set_busid); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_SET_BUSID" "" "types" + ;; + + drm_driver_has_gem_prime_res_obj) + # + # Determine if the drm_driver structure has a 'gem_prime_res_obj' + # callback field. + # + # Added by commit 3aac4502fd3f ("dma-buf: use reservation + # objects") in v3.17 (2014-07-01). + # + # Removed by commit 51c98747113e (drm/prime: Ditch + # gem_prime_res_obj hook) in v5.4. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_gem_prime_res_obj(void) { + return offsetof(struct drm_driver, gem_prime_res_obj); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ" "" "types" + ;; + + drm_crtc_state_has_connectors_changed) + # + # Determine if the crtc_state has a 'connectors_changed' field. + # + # Added by commit fc596660dd4e ("drm/atomic: add + # connectors_changed to separate it from mode_changed, v2") + # in v4.3 (2015-07-21) + # + CODE=" + #include + void conftest_drm_crtc_state_has_connectors_changed(void) { + struct drm_crtc_state foo; + (void)foo.connectors_changed; + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_CONNECTORS_CHANGED" "" "types" + ;; + + drm_reinit_primary_mode_group) + # + # Determine if the function drm_reinit_primary_mode_group() is + # present. + # + # Added by commit 2390cd11bfbe ("drm/crtc: add interface to + # reinitialise the legacy mode group") in v3.17 (2014-06-05) + # + # Removed by commit 3fdefa399e46 ("drm: gc now dead + # mode_group code") in v4.3 (2015-07-09) + # + CODE=" + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + void conftest_drm_reinit_primary_mode_group(void) { + drm_reinit_primary_mode_group(); + }" + + compile_check_conftest "$CODE" "NV_DRM_REINIT_PRIMARY_MODE_GROUP_PRESENT" "" "functions" + ;; + + wait_on_bit_lock_argument_count) + # + # Determine how many arguments wait_on_bit_lock takes. + # + # Changed by commit 743162013d40 ("sched: Remove proliferation + # of wait_on_bit() action functions") in v3.17 (2014-07-07) + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_wait_on_bit_lock(void) { + wait_on_bit_lock(NULL, 0, 0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_WAIT_ON_BIT_LOCK_ARGUMENT_COUNT 3" | append_conftest "functions" + return + fi + + echo "$CONFTEST_PREAMBLE + #include + void conftest_wait_on_bit_lock(void) { + wait_on_bit_lock(NULL, 0, NULL, 0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_WAIT_ON_BIT_LOCK_ARGUMENT_COUNT 4" | append_conftest "functions" + return + fi + echo "#error wait_on_bit_lock() conftest failed!" | append_conftest "functions" + ;; + + bitmap_clear) + # + # Determine if the bitmap_clear function is present + # + # Added by commit c1a2a962a2ad ("bitmap: introduce bitmap_set, + # bitmap_clear, bitmap_find_next_zero_area") in v2.6.33 + # (2009-12-15) + # + CODE=" + #include + void conftest_bitmap_clear() { + bitmap_clear(); + }" + + compile_check_conftest "$CODE" "NV_BITMAP_CLEAR_PRESENT" "" "functions" + ;; + + pci_stop_and_remove_bus_device) + # + # Determine if the pci_stop_and_remove_bus_device() function is present. + # + # Added by commit 210647af897a ("PCI: Rename pci_remove_bus_device + # to pci_stop_and_remove_bus_device") in v3.4 (2012-02-25) + # + CODE=" + #include + #include + void conftest_pci_stop_and_remove_bus_device() { + pci_stop_and_remove_bus_device(); + }" + + compile_check_conftest "$CODE" "NV_PCI_STOP_AND_REMOVE_BUS_DEVICE_PRESENT" "" "functions" + ;; + + pci_remove_bus_device) + # + # Determine if the pci_remove_bus_device() function is present. + # Added before Linux-2.6.12-rc2 2005-04-16 + # Because we support builds on non-PCI platforms, we still need + # to check for this function's presence. + # + CODE=" + #include + #include + void conftest_pci_remove_bus_device() { + pci_remove_bus_device(); + }" + + compile_check_conftest "$CODE" "NV_PCI_REMOVE_BUS_DEVICE_PRESENT" "" "functions" + ;; + + drm_helper_mode_fill_fb_struct | drm_helper_mode_fill_fb_struct_has_const_mode_cmd_arg) + # + # Determine if the drm_helper_mode_fill_fb_struct function takes + # 'dev' argument. + # + # The drm_helper_mode_fill_fb_struct() has been updated to + # take 'dev' parameter by commit a3f913ca9892 ("drm: Pass 'dev' + # to drm_helper_mode_fill_fb_struct()") in v4.11 (2016-12-14) + # + echo "$CONFTEST_PREAMBLE + #include + void drm_helper_mode_fill_fb_struct(struct drm_device *dev, + struct drm_framebuffer *fb, + const struct drm_mode_fb_cmd2 *mode_cmd) + { + return; + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_DEV_ARG" | append_conftest "function" + echo "#define NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG" | append_conftest "function" + rm -f conftest$$.o + else + echo "#undef NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_DEV_ARG" | append_conftest "function" + + # + # Determine if the drm_mode_fb_cmd2 pointer argument is const in + # drm_mode_config_funcs::fb_create and drm_helper_mode_fill_fb_struct(). + # + # The drm_mode_fb_cmd2 pointer through this call chain was made + # const by commit 1eb83451ba55 ("drm: Pass the user drm_mode_fb_cmd2 + # as const to .fb_create()") in v4.5 (2015-11-11) + # + echo "$CONFTEST_PREAMBLE + #include + void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, + const struct drm_mode_fb_cmd2 *mode_cmd) + { + return; + }" > conftest$$.c; + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG" | append_conftest "function" + rm -f conftest$$.o + else + echo "#undef NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG" | append_conftest "function" + fi + fi + ;; + + mm_context_t) + # + # Determine if the 'mm_context_t' data type is present + # and if it has an 'id' member. + # It does not exist on all architectures. + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_mm_context_t(void) { + return offsetof(mm_context_t, id); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_MM_CONTEXT_T_HAS_ID" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_MM_CONTEXT_T_HAS_ID" | append_conftest "types" + return + fi + ;; + + pci_dev_has_ats_enabled) + # + # Determine if the 'pci_dev' data type has a 'ats_enabled' member. + # + # Added by commit d544d75ac96aa ("PCI: Embed ATS info directly + # into struct pci_dev") in v4.3-rc1 (2015-08-14) + # + CODE=" + #include + int conftest_pci_dev_ats_enabled_t(void) { + return ((struct pci_dev *)0)->ats_enabled; + }" + + compile_check_conftest "$CODE" "NV_PCI_DEV_HAS_ATS_ENABLED" "" "types" + ;; + + mt_device_gre) + # + # Determine if MT_DEVICE_GRE flag is present. + # + # MT_DEVICE_GRE flag is removed by commit 58cc6b72a21274 + # ("arm64: mm: Remove unused support for Device-GRE memory type") in v5.14-rc1 + # (2021-06-01). + # + CODE=" + #include + unsigned int conftest_mt_device_gre(void) { + return MT_DEVICE_GRE; + }" + + compile_check_conftest "$CODE" "NV_MT_DEVICE_GRE_PRESENT" "" "types" + ;; + + get_user_pages) + # + # Conftest for get_user_pages() + # + # Use long type for get_user_pages and unsigned long for nr_pages + # by commit 28a35716d317 ("mm: use long type for page counts + # in mm_populate() and get_user_pages()") in v3.9 (2013-02-22) + # + # Removed struct task_struct *tsk & struct mm_struct *mm from + # get_user_pages by commit cde70140fed8 ("mm/gup: Overload + # get_user_pages() functions") in v4.6 (2016-02-12) + # + # Replaced get_user_pages6 with get_user_pages by commit + # c12d2da56d0e ("mm/gup: Remove the macro overload API migration + # helpers from the get_user*() APIs") in v4.6 (2016-04-04) + # + # Replaced write and force parameters with gup_flags by + # commit 768ae309a961 ("mm: replace get_user_pages() write/force + # parameters with gup_flags") in v4.9 (2016-10-13) + # + # linux-4.4.168 cherry-picked commit 768ae309a961 without + # c12d2da56d0e which is covered in Conftest #3. + # + # Conftest #1: Check if get_user_pages accepts 6 arguments. + # Return if true. + # Fall through to conftest #2 on failure. + # + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages(unsigned long start, + unsigned long nr_pages, + int write, + int force, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + if [ -f conftest$$.o ]; then + echo "#define NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS" | append_conftest "functions" + echo "#undef NV_GET_USER_PAGES_HAS_TASK_STRUCT" | append_conftest "functions" + rm -f conftest$$.o + return + fi + + # Conftest #2: Check if get_user_pages has gup_flags instead of + # write and force parameters. And that gup doesn't accept a + # task_struct and mm_struct as its first arguments. + # Return if available. + # Fall through to conftest #3 on failure. + + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages(unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#undef NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS" | append_conftest "functions" + echo "#undef NV_GET_USER_PAGES_HAS_TASK_STRUCT" | append_conftest "functions" + rm -f conftest$$.o + return + fi + + # Conftest #3: Check if get_user_pages has gup_flags instead of + # write and force parameters AND that gup has task_struct and + # mm_struct as its first arguments. + # Return if available. + # Fall through to default case if absent. + + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#undef NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS" | append_conftest "functions" + echo "#define NV_GET_USER_PAGES_HAS_TASK_STRUCT" | append_conftest "functions" + rm -f conftest$$.o + return + fi + + echo "#define NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS" | append_conftest "functions" + echo "#define NV_GET_USER_PAGES_HAS_TASK_STRUCT" | append_conftest "functions" + + return + ;; + + get_user_pages_remote) + # + # Determine if the function get_user_pages_remote() is + # present and has write/force/locked/tsk parameters. + # + # get_user_pages_remote() was added by commit 1e9877902dc7 + # ("mm/gup: Introduce get_user_pages_remote()") in v4.6 (2016-02-12) + # + # get_user_pages[_remote]() write/force parameters + # replaced with gup_flags by commits 768ae309a961 ("mm: replace + # get_user_pages() write/force parameters with gup_flags") and + # commit 9beae1ea8930 ("mm: replace get_user_pages_remote() + # write/force parameters with gup_flags") in v4.9 (2016-10-13) + # + # get_user_pages_remote() added 'locked' parameter by + # commit 5b56d49fc31d ("mm: add locked parameter to + # get_user_pages_remote()") in v4.10 (2016-12-14) + # + # get_user_pages_remote() removed 'tsk' parameter by + # commit 64019a2e467a ("mm/gup: remove task_struct pointer for + # all gup code") in v5.9-rc1 (2020-08-11). + # + # conftest #1: check if get_user_pages_remote() is available + # return if not available. + # Fall through to conftest #2 if it is present + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_get_user_pages_remote(void) { + get_user_pages_remote(); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#undef NV_GET_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions" + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_TSK_ARG" | append_conftest "functions" + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_WRITE_AND_FORCE_ARGS" | append_conftest "functions" + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_LOCKED_ARG" | append_conftest "functions" + rm -f conftest$$.o + return + fi + + # + # conftest #2: check if get_user_pages_remote() has write and + # force arguments. Return if these arguments are present + # Fall through to conftest #3 if these args are absent. + # + echo "#define NV_GET_USER_PAGES_REMOTE_PRESENT" | append_conftest "functions" + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages_remote(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + int write, + int force, + struct page **pages, + struct vm_area_struct **vmas) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_TSK_ARG" | append_conftest "functions" + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_WRITE_AND_FORCE_ARGS" | append_conftest "functions" + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_LOCKED_ARG" | append_conftest "functions" + rm -f conftest$$.o + return + fi + + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_WRITE_AND_FORCE_ARGS" | append_conftest "functions" + + # + # conftest #3: check if get_user_pages_remote() has locked argument + # Return if these arguments are present. Fall through to conftest #4 + # if these args are absent. + # + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages_remote(struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_TSK_ARG" | append_conftest "functions" + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_LOCKED_ARG" | append_conftest "functions" + rm -f conftest$$.o + return + fi + + # + # conftest #4: check if get_user_pages_remote() does not take + # tsk argument. + # + echo "$CONFTEST_PREAMBLE + #include + long get_user_pages_remote(struct mm_struct *mm, + unsigned long start, + unsigned long nr_pages, + unsigned int gup_flags, + struct page **pages, + struct vm_area_struct **vmas, + int *locked) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_TSK_ARG" | append_conftest "functions" + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_LOCKED_ARG" | append_conftest "functions" + rm -f conftest$$.o + else + + echo "#define NV_GET_USER_PAGES_REMOTE_HAS_TSK_ARG" | append_conftest "functions" + echo "#undef NV_GET_USER_PAGES_REMOTE_HAS_LOCKED_ARG" | append_conftest "functions" + fi + ;; + + usleep_range) + # + # Determine if the function usleep_range() is present. + # + # Added by commit 5e7f5a178bba ("timer: Added usleep_range timer") + # in v2.6.36 (2010-08-04) + # + CODE=" + #include + void conftest_usleep_range(void) { + usleep_range(); + }" + + compile_check_conftest "$CODE" "NV_USLEEP_RANGE_PRESENT" "" "functions" + ;; + + radix_tree_empty) + # + # Determine if the function radix_tree_empty() is present. + # + # Added by commit e9256efcc8e3 ("radix-tree: introduce + # radix_tree_empty") in v4.7 (2016-05-20) + # + CODE=" + #include + int conftest_radix_tree_empty(void) { + radix_tree_empty(); + }" + + compile_check_conftest "$CODE" "NV_RADIX_TREE_EMPTY_PRESENT" "" "functions" + ;; + + drm_gem_object_lookup) + # + # Determine the number of arguments of drm_gem_object_lookup(). + # + # First argument of type drm_device removed by commit + # a8ad0bd84f98 ("drm: Remove unused drm_device from + # drm_gem_object_lookup()") in v4.7 (2016-05-09) + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #if defined(NV_DRM_DRM_GEM_H_PRESENT) + #include + #endif + void conftest_drm_gem_object_lookup(void) { + drm_gem_object_lookup(NULL, NULL, 0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT 3" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#define NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT 2" | append_conftest "functions" + fi + ;; + + drm_master_drop_has_from_release_arg) + # + # Determine if drm_driver::master_drop() has 'from_release' argument. + # + # Last argument 'bool from_release' has been removed by commit + # d6ed682eba54 ("drm: Refactor drop/set master code a bit") + # in v4.8 (2016-06-21) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + void conftest_drm_master_drop_has_from_release_arg(struct drm_driver *drv) { + drv->master_drop(NULL, NULL, false); + }" + + compile_check_conftest "$CODE" "NV_DRM_MASTER_DROP_HAS_FROM_RELEASE_ARG" "" "types" + ;; + + drm_atomic_state_ref_counting) + # + # Determine if functions drm_atomic_state_get/put() are + # present. + # + # Added by commit 0853695c3ba4 ("drm: Add reference counting to + # drm_atomic_state") in v4.10 (2016-10-14) + # + CODE=" + #if defined(NV_DRM_DRM_ATOMIC_H_PRESENT) + #include + #endif + void conftest_drm_atomic_state_get(void) { + drm_atomic_state_get(); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT" "" "functions" + ;; + + vm_ops_fault_removed_vma_arg) + # + # Determine if vma.vm_ops.fault takes (vma, vmf), or just (vmf) + # args. Acronym key: + # vma: struct vm_area_struct + # vm_ops: struct vm_operations_struct + # vmf: struct vm_fault + # + # The redundant vma arg was removed from BOTH vma.vm_ops.fault and + # vma.vm_ops.page_mkwrite by commit 11bac8000449 ("mm, fs: reduce + # fault, page_mkwrite, and pfn_mkwrite to take only vmf") in + # v4.11 (2017-02-24) + # + CODE=" + #include + void conftest_vm_ops_fault_removed_vma_arg(void) { + struct vm_operations_struct vm_ops; + struct vm_fault *vmf; + (void)vm_ops.fault(vmf); + }" + + compile_check_conftest "$CODE" "NV_VM_OPS_FAULT_REMOVED_VMA_ARG" "" "types" + ;; + + pnv_npu2_init_context) + # + # Determine if the pnv_npu2_init_context() function is + # present and the signature of its callback. + # + # Added by commit 1ab66d1fbada ("powerpc/powernv: Introduce + # address translation services for Nvlink2") in v4.12 + # (2017-04-03). + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_ASM_POWERNV_H_PRESENT) + #include + #include + #endif + void conftest_pnv_npu2_init_context(void) { + pnv_npu2_init_context(); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + if [ -f conftest$$.o ]; then + echo "#undef NV_PNV_NPU2_INIT_CONTEXT_PRESENT" | append_conftest "functions" + echo "#undef NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID" | append_conftest "functions" + rm -f conftest$$.o + return + fi + + echo "#define NV_PNV_NPU2_INIT_CONTEXT_PRESENT" | append_conftest "functions" + + # Check the callback signature + echo "$CONFTEST_PREAMBLE + #if defined(NV_ASM_POWERNV_H_PRESENT) + #include + #include + #endif + + struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, + unsigned long flags, + void (*cb)(struct npu_context *, void *), + void *priv) { + return NULL; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + if [ -f conftest$$.o ]; then + echo "#define NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID" | append_conftest "functions" + rm -f conftest$$.o + return + fi + + echo "#undef NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID" | append_conftest "functions" + ;; + + of_get_ibm_chip_id) + # + # Determine if the of_get_ibm_chip_id() function is present. + # + # Added by commit b130e7c04f11 ("powerpc: export + # of_get_ibm_chip_id function") in v4.2 (2015-05-07) + # + CODE=" + #include + #if defined(NV_ASM_PROM_H_PRESENT) + #include + #endif + void conftest_of_get_ibm_chip_id(void) { + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) + of_get_ibm_chip_id(); + #endif + }" + + compile_check_conftest "$CODE" "NV_OF_GET_IBM_CHIP_ID_PRESENT" "" "functions" + ;; + + drm_driver_unload_has_int_return_type) + # + # Determine if drm_driver::unload() returns integer value + # + # Changed to void by commit 11b3c20bdd15 ("drm: Change the return + # type of the unload hook to void") in v4.11 (2017-01-06) + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + int conftest_drm_driver_unload_has_int_return_type(struct drm_driver *drv) { + return drv->unload(NULL /* dev */); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_UNLOAD_HAS_INT_RETURN_TYPE" "" "types" + ;; + + is_export_symbol_present_*) + export_symbol_present_conftest $(echo $1 | cut -f5- -d_) + ;; + + is_export_symbol_gpl_*) + export_symbol_gpl_conftest $(echo $1 | cut -f5- -d_) + ;; + + drm_atomic_helper_crtc_destroy_state_has_crtc_arg) + # + # Determine if __drm_atomic_helper_crtc_destroy_state() has 'crtc' + # argument. + # + # 'crtc' argument removed by commit ec2dc6a0fe38 ("drm: Drop crtc + # argument from __drm_atomic_helper_crtc_destroy_state") in v4.7 + # (2016-05-09) + # + CODE=" + #if defined(NV_DRM_DRM_ATOMIC_HELPER_H_PRESENT) + #include + #endif + void conftest_drm_atomic_helper_crtc_destroy_state_has_crtc_arg(void) { + __drm_atomic_helper_crtc_destroy_state(NULL, NULL); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_CRTC_DESTROY_STATE_HAS_CRTC_ARG" "" "types" + ;; + + drm_atomic_helper_plane_destroy_state_has_plane_arg) + # + # Determine if __drm_atomic_helper_plane_destroy_state has + # 'plane' argument. + # + # 'plane' argument removed by commit 2f701695fd3a (drm: Drop plane + # argument from __drm_atomic_helper_plane_destroy_state") in v4.7 + # (2016-05-09) + # + CODE=" + #if defined(NV_DRM_DRM_ATOMIC_HELPER_H_PRESENT) + #include + #endif + void conftest_drm_atomic_helper_plane_destroy_state_has_plane_arg(void) { + __drm_atomic_helper_plane_destroy_state(NULL, NULL); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_PLANE_DESTROY_STATE_HAS_PLANE_ARG" "" "types" + ;; + + drm_atomic_helper_connector_dpms) + # + # Determine if the function drm_atomic_helper_connector_dpms() is present. + # + # Removed by commit 7d902c05b480 ("drm: Nuke + # drm_atomic_helper_connector_dpms") in v4.14 (2017-07-25) + # + CODE=" + #if defined(NV_DRM_DRM_ATOMIC_HELPER_H_PRESENT) + #include + #endif + void conftest_drm_atomic_helper_connector_dpms(void) { + drm_atomic_helper_connector_dpms(); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_CONNECTOR_DPMS_PRESENT" "" "functions" + ;; + + get_backlight_device_by_name) + # + # Determine if the get_backlight_device_by_name() function is present + # + CODE=" + #include + int conftest_get_backlight_device_by_name(void) { + return get_backlight_device_by_name(); + }" + compile_check_conftest "$CODE" "NV_GET_BACKLIGHT_DEVICE_BY_NAME_PRESENT" "" "functions" + ;; + + timer_setup) + # + # Determine if the function timer_setup() is present. + # + # Added by commit 686fef928bba ("timer: Prepare to change timer + # callback argument type") in v4.14 (2017-09-28) + # + CODE=" + #include + int conftest_timer_setup(void) { + return timer_setup(); + }" + compile_check_conftest "$CODE" "NV_TIMER_SETUP_PRESENT" "" "functions" + ;; + + radix_tree_replace_slot) + # + # Determine if the radix_tree_replace_slot() function is + # present and how many arguments it takes. + # + # root parameter added to radix_tree_replace_slot (but the symbol + # was not exported) by commit 6d75f366b924 ("lib: radix-tree: + # check accounting of existing slot replacement users") in v4.10 + # (2016-12-12) + # + # radix_tree_replace_slot symbol export added by commit + # 10257d719686 ("EXPORT_SYMBOL radix_tree_replace_slot") in v4.11 + # (2017-01-11) + # + CODE=" + #include + #include + void conftest_radix_tree_replace_slot(void) { + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) + radix_tree_replace_slot(); + #endif + }" + compile_check_conftest "$CODE" "NV_RADIX_TREE_REPLACE_SLOT_PRESENT" "" "functions" + + echo "$CONFTEST_PREAMBLE + #include + void conftest_radix_tree_replace_slot(void) { + radix_tree_replace_slot(NULL, NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_RADIX_TREE_REPLACE_SLOT_ARGUMENT_COUNT 2" | append_conftest "functions" + return + fi + + echo "$CONFTEST_PREAMBLE + #include + void conftest_radix_tree_replace_slot(void) { + radix_tree_replace_slot(NULL, NULL, NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_RADIX_TREE_REPLACE_SLOT_ARGUMENT_COUNT 3" | append_conftest "functions" + return + else + echo "#error radix_tree_replace_slot() conftest failed!" | append_conftest "functions" + fi + ;; + + kthread_create_on_node) + # + # Determine if kthread_create_on_node is available + # + # kthread_create_on_node was added in by commit 207205a2ba26 + # ("kthread: NUMA aware kthread_create_on_node()") in v2.6.39 + # (2011-03-22). + # + CODE=" + #include + void kthread_create_on_node_conftest(void) { + (void)kthread_create_on_node(); + }" + + compile_check_conftest "$CODE" "NV_KTHREAD_CREATE_ON_NODE_PRESENT" "" "functions" + ;; + + cpumask_of_node) + # + # Determine whether cpumask_of_node is available. + # + # ARM support for cpumask_of_node() lagged until commit 1a2db300348b + # ("arm64, numa: Add NUMA support for arm64 platforms.") in v4.7 + # (2016-04-08) + # + CODE=" + #include + void conftest_cpumask_of_node(void) { + (void)cpumask_of_node(); + }" + + compile_check_conftest "$CODE" "NV_CPUMASK_OF_NODE_PRESENT" "" "functions" + ;; + + drm_mode_object_find_has_file_priv_arg) + # + # Determine if drm_mode_object_find() has 'file_priv' arguments. + # + # Updated to take 'file_priv' argument by commit 418da17214ac + # ("drm: Pass struct drm_file * to __drm_mode_object_find [v2]") + # in v4.15 (2017-03-14) + # + CODE=" + #include + void conftest_drm_mode_object_find_has_file_priv_arg( + struct drm_device *dev, + struct drm_file *file_priv, + uint32_t id, + uint32_t type) { + (void)drm_mode_object_find(dev, file_priv, id, type); + }" + + compile_check_conftest "$CODE" "NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG" | append_conftest "types" + ;; + + pci_enable_msix_range) + # + # Determine if the pci_enable_msix_range() function is present. + # + # Added by commit 302a2523c277 ("PCI/MSI: Add + # pci_enable_msi_range() and pci_enable_msix_range()") in v3.14 + # (2013-12-30) + # + CODE=" + #include + void conftest_pci_enable_msix_range(void) { + pci_enable_msix_range(); + }" + + compile_check_conftest "$CODE" "NV_PCI_ENABLE_MSIX_RANGE_PRESENT" "" "functions" + ;; + + dma_buf_owner) + # + # Determine if the dma_buf struct has an owner member. + # + # Added by commit 9abdffe286c1 ("dma-buf: add ref counting for + # module as exporter") in v4.2 (2015-05-05) + # + CODE=" + #include + int conftest_dma_buf_owner(void) { + return offsetof(struct dma_buf, owner); + }" + + compile_check_conftest "$CODE" "NV_DMA_BUF_OWNER_PRESENT" "" "types" + ;; + + dma_buf_export_args) + # + # Determine argument count for dma_buf_export(). + # + # 4 arguments added by commit d15bd7ee445d + # ("dma-buf: Introduce dma buffer sharing mechanism") + # in v3.3 (2011-12-26) + # + # Additional argument added by commit 3aac4502fd3f + # ("dma-buf: use reservation objects") in v3.17 (2014-07-01). + # + # Parameters wrapped in a single struct dma_buf_export_info by commit: + # d8fbe341beb6("dma-buf: cleanup dma_buf_export() to make it easily extensible") + # in v4.1 (2015-01-23). + # + echo "$CONFTEST_PREAMBLE + #include + struct dma_buf* conftest_dma_buf_export(void) { + return dma_buf_export(NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DMA_BUF_EXPORT_ARGUMENT_COUNT 1" | append_conftest "functions" + return + fi + + echo "$CONFTEST_PREAMBLE + #include + struct dma_buf* conftest_dma_buf_export(void) { + return dma_buf_export(NULL, NULL, 0, 0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DMA_BUF_EXPORT_ARGUMENT_COUNT 4" | append_conftest "functions" + return + fi + + echo "$CONFTEST_PREAMBLE + #include + struct dma_buf* conftest_dma_buf_export(void) { + return dma_buf_export(NULL, NULL, 0, 0, NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DMA_BUF_EXPORT_ARGUMENT_COUNT 5" | append_conftest "functions" + return + fi + echo "#error dma_buf_export() conftest failed!" | append_conftest "functions" + ;; + + dma_buf_ops_has_kmap) + # + # Determine if .kmap exists in dma_buf_ops. + # In some kernels, this is a mandatory callback. + # + # Added by commit fc13020e086b + # ("dma-buf: add support for kernel cpu access") in v3.4 (2012-03-20) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_ops_has_kmap(void) { + return offsetof(struct dma_buf_ops, kmap); + } + int conftest_dma_buf_ops_has_kunmap(void) { + return offsetof(struct dma_buf_ops, kunmap); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_OPS_HAS_KMAP" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_OPS_HAS_KMAP" | append_conftest "types" + return + fi + ;; + + dma_buf_ops_has_kmap_atomic) + # + # Determine if .kmap_atomic exists in dma_buf_ops. + # In some kernels, this is a mandatory callback. + # + # Added by commit fc13020e086b + # ("dma-buf: add support for kernel cpu access")in v3.4 (2012-03-20) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_ops_has_kmap_atomic(void) { + return offsetof(struct dma_buf_ops, kmap_atomic); + } + int conftest_dma_buf_ops_has_kunmap_atomic(void) { + return offsetof(struct dma_buf_ops, kunmap_atomic); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC" | append_conftest "types" + return + fi + ;; + + dma_buf_ops_has_map) + # + # Determine if .map exists in dma_buf_ops. + # In some kernels, this is a mandatory callback. + # + # Added by commit f9b67f0014cb + # ("dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic macro") + # in v4.12 (2017-04-19) + # + # Removed as a mandatory callback by commit f82aab2d521e + # ("dma-buf: Remove requirement for ops->map() from dma_buf_export") + # in v4.20 (2018-08-07) + # + # Completely removed from dma-buf by commit 4337ebbbbda3 + # ("dma-buf: Remove kernel map/unmap hooks") in v5.6 (2019-11-18) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_ops_has_map(void) { + return offsetof(struct dma_buf_ops, map); + } + int conftest_dma_buf_ops_has_unmap(void) { + return offsetof(struct dma_buf_ops, unmap); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_OPS_HAS_MAP" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_OPS_HAS_MAP" | append_conftest "types" + return + fi + ;; + + dma_buf_ops_has_map_atomic) + # + # Determine if map_atomic/unmap_atomic exists in dma_buf_ops. + # In some kernels, this is a mandatory callback. + # + # Added by commit f9b67f0014cb + # ("dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic macro") + # in v4.12 (2017-04-19) + # + # Removed by commit f664a5269542 + # ("dma-buf: remove kmap_atomic interface") in v4.19 (2018-05-28) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_ops_has_map_atomic(void) { + return offsetof(struct dma_buf_ops, map_atomic); + } + int conftest_dma_buf_ops_has_unmap_atomic(void) { + return offsetof(struct dma_buf_ops, unmap_atomic); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_OPS_HAS_MAP_ATOMIC" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_OPS_HAS_MAP_ATOMIC" | append_conftest "types" + return + fi + ;; + + dma_buf_has_dynamic_attachment) + # + # Determine if the function dma_buf_attachment_is_dynamic() + # is present. + # + # Added by commit: 15fd552d186c + # ("dma-buf: change DMA-buf locking convention v3") in v5.5 (2018-07-03) + # + echo "$CONFTEST_PREAMBLE + #include + bool conftest_dma_buf_attachment_is_dynamic(void) { + return dma_buf_attachment_is_dynamic(NULL); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_HAS_DYNAMIC_ATTACHMENT" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_HAS_DYNAMIC_ATTACHMENT" | append_conftest "functions" + return + fi + ;; + + dma_buf_attachment_has_peer2peer) + # + # Determine if peer2peer is present in struct dma_buf_attachment. + # peer2peer being true indicates that a dma-buf importer is able + # to handle peer resources not backed by struct page. + # + # Added by commit: 09606b5446c2 + # ("dma-buf: add peer2peer flag") in v5.8 (2018-03-22) + # + echo "$CONFTEST_PREAMBLE + #include + int conftest_dma_buf_peer2peer(void) { + return offsetof(struct dma_buf_attachment, peer2peer); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER" | append_conftest "types" + rm -f conftest$$.o + return + else + echo "#undef NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER" | append_conftest "types" + return + fi + ;; + + drm_connector_funcs_have_mode_in_name) + # + # Determine if _mode_ is present in connector function names. We + # only test drm_mode_connector_attach_encoder() and assume the + # other functions are changed in sync. + # + # drm_mode_connector_attach_encoder() was renamed to + # drm_connector_attach_encoder() by commit cde4c44d8769 ("drm: + # drop _mode_ from drm_mode_connector_attach_encoder") in v4.19 + # (2018-07-09) + # + # drm_mode_connector_update_edid_property() was renamed by commit + # c555f02371c3 ("drm: drop _mode_ from update_edit_property()") + # in v4.19 (2018-07-09). + # + # The other DRM functions were renamed by commit 97e14fbeb53f + # ("drm: drop _mode_ from remaining connector functions") in v4.19 + # (2018-07-09) + # + # Note that drm_connector.h by introduced by commit 522171951761 + # ("drm: Extract drm_connector.[hc]") in v4.9 (2016-08-12) + # + CODE=" + #include + void conftest_drm_connector_funcs_have_mode_in_name(void) { + drm_mode_connector_attach_encoder(); + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME" "" "functions" + ;; + + + node_states_n_memory) + # + # Determine if the N_MEMORY constant exists. + # + # Added by commit 8219fc48adb3 ("mm: node_states: introduce + # N_MEMORY") in v3.8 (2012-12-12). + # + CODE=" + #include + int conftest_node_states_n_memory(void) { + return N_MEMORY; + }" + + compile_check_conftest "$CODE" "NV_NODE_STATES_N_MEMORY_PRESENT" "" "types" + ;; + + vm_fault_t) + # + # Determine if vm_fault_t is present + # + # Added by commit 1c8f422059ae5da07db7406ab916203f9417e396 ("mm: + # change return type to vm_fault_t") in v4.17 (2018-04-05) + # + CODE=" + #include + vm_fault_t conftest_vm_fault_t; + " + compile_check_conftest "$CODE" "NV_VM_FAULT_T_IS_PRESENT" "" "types" + ;; + + vmf_insert_pfn) + # + # Determine if the function vmf_insert_pfn() is + # present. + # + # Added by commit 1c8f422059ae5da07db7406ab916203f9417e396 ("mm: + # change return type to vm_fault_t") in v4.17 (2018-04-05) + # + CODE=" + #include + void conftest_vmf_insert_pfn(void) { + vmf_insert_pfn(); + }" + + compile_check_conftest "$CODE" "NV_VMF_INSERT_PFN_PRESENT" "" "functions" + ;; + + drm_framebuffer_get) + # + # Determine if the function drm_framebuffer_get() is present. + # + # Added by commit a4a69da06bc1 ("drm: Introduce + # drm_framebuffer_{get,put}()") in v4.12 (2017-02-28). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_FRAMEBUFFER_H_PRESENT) + #include + #endif + + void conftest_drm_framebuffer_get(void) { + drm_framebuffer_get(); + }" + + compile_check_conftest "$CODE" "NV_DRM_FRAMEBUFFER_GET_PRESENT" "" "functions" + ;; + + drm_gem_object_get) + # + # Determine if the function drm_gem_object_get() is present. + # + # Added by commit e6b62714e87c ("drm: Introduce + # drm_gem_object_{get,put}()") in v4.12 (2017-02-28). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_GEM_H_PRESENT) + #include + #endif + void conftest_drm_gem_object_get(void) { + drm_gem_object_get(); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_GET_PRESENT" "" "functions" + ;; + + drm_dev_put) + # + # Determine if the function drm_dev_put() is present. + # + # Added by commit 9a96f55034e4 ("drm: introduce drm_dev_{get/put} + # functions") in v4.15 (2017-09-26). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + void conftest_drm_dev_put(void) { + drm_dev_put(); + }" + + compile_check_conftest "$CODE" "NV_DRM_DEV_PUT_PRESENT" "" "functions" + ;; + + drm_connector_list_iter) + # + # Determine if the drm_connector_list_iter struct is present. + # + # Added by commit 613051dac40da1751ab269572766d3348d45a197 ("drm: + # locking&new iterators for connector_list") in v4.11 (2016-12-14). + # + CODE=" + #include + int conftest_drm_connector_list_iter(void) { + struct drm_connector_list_iter conn_iter; + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_LIST_ITER_PRESENT" "" "types" + + # + # Determine if the function drm_connector_list_iter_get() is + # renamed to drm_connector_list_iter_begin(). + # + # Renamed by b982dab1e66d2b998e80a97acb6eaf56518988d3 (drm: Rename + # connector list iterator API) in v4.12 (2017-02-28). + # + CODE=" + #if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT) + #include + #endif + void conftest_drm_connector_list_iter_begin(void) { + drm_connector_list_iter_begin(); + }" + + compile_check_conftest "$CODE" "NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT" "" "functions" + ;; + + drm_atomic_helper_swap_state_has_stall_arg) + # + # Determine if drm_atomic_helper_swap_state() has 'stall' argument. + # + # drm_atomic_helper_swap_state() function prototype updated to take + # 'state' and 'stall' arguments by commit + # 5e84c2690b805caeff3b4c6c9564c7b8de54742d (drm/atomic-helper: + # Massage swap_state signature somewhat) + # in v4.8 (2016-06-10). + # + CODE=" + #include + void conftest_drm_atomic_helper_swap_state_has_stall_arg( + struct drm_atomic_state *state, + bool stall) { + (void)drm_atomic_helper_swap_state(state, stall); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG" | append_conftest "types" + + # + # Determine if drm_atomic_helper_swap_state() returns int. + # + # drm_atomic_helper_swap_state() function prototype + # updated to return int by commit + # c066d2310ae9bbc695c06e9237f6ea741ec35e43 (drm/atomic: Change + # drm_atomic_helper_swap_state to return an error.) in v4.14 + # (2017-07-11). + # + CODE=" + #include + int conftest_drm_atomic_helper_swap_state_return_int( + struct drm_atomic_state *state, + bool stall) { + return drm_atomic_helper_swap_state(state, stall); + }" + + compile_check_conftest "$CODE" "NV_DRM_ATOMIC_HELPER_SWAP_STATE_RETURN_INT" | append_conftest "types" + ;; + + pm_runtime_available) + # + # Determine if struct dev_pm_info has the 'usage_count' field. + # + # This was added to the kernel in commit 5e928f77a09a0 in v2.6.32 + # (2008-08-18), but originally were dependent on CONFIG_PM_RUNTIME, + # which was folded into the more generic CONFIG_PM in commit + # d30d819dc8310 in v3.19 (2014-11-27). + # Rather than attempt to select the appropriate CONFIG option, + # simply check if this member is present. + # + CODE=" + #include + void pm_runtime_conftest(void) { + struct dev_pm_info dpmi; + atomic_set(&dpmi.usage_count, 1); + }" + + compile_check_conftest "$CODE" "NV_PM_RUNTIME_AVAILABLE" "" "generic" + ;; + + device_driver_of_match_table) + # + # Determine if the device_driver struct has an of_match_table member. + # + # of_match_table was added by commit 597b9d1e44e9 ("drivercore: + # Add of_match_table to the common device drivers") in v2.6.35 + # (2010-04-13). + # + CODE=" + #include + int conftest_device_driver_of_match_table(void) { + return offsetof(struct device_driver, of_match_table); + }" + + compile_check_conftest "$CODE" "NV_DEVICE_DRIVER_OF_MATCH_TABLE_PRESENT" "" "types" + ;; + + device_of_node) + # + # Determine if the device struct has an of_node member. + # + # of_node member was added by commit d706c1b05027 ("driver-core: + # Add device node pointer to struct device") in v2.6.35 + # (2010-04-13). + # + CODE=" + #include + int conftest_device_of_node(void) { + return offsetof(struct device, of_node); + }" + + compile_check_conftest "$CODE" "NV_DEVICE_OF_NODE_PRESENT" "" "types" + ;; + + dev_is_pci) + # + # Determine if the dev_is_pci() macro is present. + # + # dev_is_pci() macro was added by commit fb8a0d9d1bfd ("pci: Add + # SR-IOV convenience functions and macros") in v2.6.34 + # (2010-02-10). + # + CODE=" + #include + void conftest_dev_is_pci(void) { + if(dev_is_pci()) {} + } + " + + compile_check_conftest "$CODE" "NV_DEV_IS_PCI_PRESENT" "" "functions" + ;; + + of_find_matching_node) + # + # Determine if the of_find_matching_node() function is present. + # + # Test if linux/of.h header file inclusion is successful or not and + # define/undefine NV_LINUX_OF_H_USABLE depending upon status of inclusion. + # + # of_find_matching_node was added by commit 283029d16a88 + # ("[POWERPC] Add of_find_matching_node() helper function") in + # v2.6.25 (2008-01-09). + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_LINUX_OF_H_USABLE" | append_conftest "generic" + CODE=" + #include + void conftest_of_find_matching_node() { + of_find_matching_node(); + }" + + compile_check_conftest "$CODE" "NV_OF_FIND_MATCHING_NODE_PRESENT" "" "functions" + else + echo "#undef NV_LINUX_OF_H_USABLE" | append_conftest "generic" + echo "#undef NV_OF_FIND_MATCHING_NODE_PRESENT" | append_conftest "functions" + fi + ;; + + dma_direct_map_resource) + # + # Determine whether dma_is_direct() exists. + # + # dma_is_direct() was added by commit 356da6d0cde3 ("dma-mapping: + # bypass indirect calls for dma-direct") in 5.1 (2018-12-06). + # + # If dma_is_direct() does exist, then we assume that + # dma_direct_map_resource() exists. Both functions were added + # as part of the same patchset. + # + # The presence of dma_is_direct() and dma_direct_map_resource() + # means that dma_direct can perform DMA mappings itself. + # + CODE=" + #include + void conftest_dma_is_direct(void) { + dma_is_direct(); + }" + + compile_check_conftest "$CODE" "NV_DMA_IS_DIRECT_PRESENT" "" "functions" + ;; + + tegra_get_platform) + # + # Determine if tegra_get_platform() function is present + # + CODE=" + #if defined NV_SOC_TEGRA_CHIP_ID_H_PRESENT + #include + #elif defined(NV_SOC_TEGRA_FUSE_H_PRESENT) + #include + #endif + void conftest_tegra_get_platform(void) { + tegra_get_platform(0); + } + " + + compile_check_conftest "$CODE" "NV_TEGRA_GET_PLATFORM_PRESENT" "" "functions" + ;; + + tegra_bpmp_send_receive) + # + # Determine if tegra_bpmp_send_receive() function is present + # + CODE=" + #if defined NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT + #include + #endif + int conftest_tegra_bpmp_send_receive( + int mrq, + void *ob_data, + int ob_sz, + void *ib_data, + int ib_sz) { + return tegra_bpmp_send_receive(mrq, ob_data, ob_sz, ib_data, ib_sz); + } + " + + compile_check_conftest "$CODE" "NV_TEGRA_BPMP_SEND_RECEIVE" "" "functions" + ;; + + drm_alpha_blending_available) + # + # Determine if the DRM subsystem supports alpha blending + # + # This conftest using "generic" rather than "functions" because + # with the logic of "functions" the presence of + # *either*_alpha_property or _blend_mode_property would be enough + # to cause NV_DRM_ALPHA_BLENDING_AVAILABLE to be defined. + # + CODE=" + #if defined(NV_DRM_DRM_BLEND_H_PRESENT) + #include + #endif + void conftest_drm_alpha_blending_available(void) { + /* 2018-04-11 ae0e28265e216dad11d4cbde42fc15e92919af78 */ + (void)drm_plane_create_alpha_property; + + /* 2018-08-23 a5ec8332d4280500544e316f76c04a7adc02ce03 */ + (void)drm_plane_create_blend_mode_property; + }" + + compile_check_conftest "$CODE" "NV_DRM_ALPHA_BLENDING_AVAILABLE" "" "generic" + ;; + + drm_rotation_available) + # + # Determine if the DRM subsystem supports rotation. + # + # drm_plane_create_rotation_property() was added on 2016-09-26 by + # d138dd3c0c70979215f3184cf36f95875e37932e (drm: Add support for + # optional per-plane rotation property) in linux kernel. Presence + # of it is sufficient to say that DRM subsystem support rotation. + # + CODE=" + #if defined(NV_DRM_DRM_BLEND_H_PRESENT) + #include + #endif + void conftest_drm_rotation_available(void) { + drm_plane_create_rotation_property(); + }" + + compile_check_conftest "$CODE" "NV_DRM_ROTATION_AVAILABLE" "" "functions" + ;; + + drm_driver_prime_flag_present) + # + # Determine whether driver feature flag DRIVER_PRIME is present. + # + # The DRIVER_PRIME flag was added by commit 3248877ea179 (drm: + # base prime/dma-buf support (v5)) in v3.4 (2011-11-25) and is + # removed by commit 0424fdaf883a (drm/prime: Actually remove + # DRIVER_PRIME everywhere) on 2019-06-17. + # + # DRIVER_PRIME definition moved from drmP.h to drm_drv.h by + # commit 85e634bce01a (drm: Extract drm_drv.h) in v4.10 + # (2016-11-14). + # + # DRIVER_PRIME define is changed to enum value by commit + # 0e2a933b02c9 (drm: Switch DRIVER_ flags to an enum) in v5.1 + # (2019-01-29). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + unsigned int drm_driver_prime_flag_present_conftest(void) { + return DRIVER_PRIME; + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_PRIME_FLAG_PRESENT" "" "types" + ;; + + drm_connector_for_each_possible_encoder) + # + # Determine the number of arguments of the + # drm_connector_for_each_possible_encoder() macro. + # + # drm_connector_for_each_possible_encoder() is added by commit + # 83aefbb887b5 (drm: Add drm_connector_for_each_possible_encoder()) + # in v4.19. The definition and prorotype is changed to take only + # two arguments connector and encoder, by commit 62afb4ad425a + # (drm/connector: Allow max possible encoders to attach to a + # connector) in v5.5rc1. + # + echo "$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_CONNECTOR_H_PRESENT) + #include + #endif + + void conftest_drm_connector_for_each_possible_encoder( + struct drm_connector *connector, + struct drm_encoder *encoder, + int i) { + + drm_connector_for_each_possible_encoder(connector, encoder, i) { + } + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + echo "#define NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT 3" | append_conftest "functions" + rm -f conftest$$.o + return + else + echo "#define NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT 2" | append_conftest "functions" + fi + ;; + + mmu_notifier_ops_invalidate_range) + # + # Determine if the mmu_notifier_ops struct has the + # 'invalidate_range' member. + # + # struct mmu_notifier_ops.invalidate_range was added by commit + # 0f0a327fa12cd55de5e7f8c05a70ac3d047f405e ("mmu_notifier: add the + # callback for mmu_notifier_invalidate_range()") in v3.19 + # (2014-11-13). + CODE=" + #include + int conftest_mmu_notifier_ops_invalidate_range(void) { + return offsetof(struct mmu_notifier_ops, invalidate_range); + }" + + compile_check_conftest "$CODE" "NV_MMU_NOTIFIER_OPS_HAS_INVALIDATE_RANGE" "" "types" + ;; + + drm_format_num_planes) + # + # Determine if drm_format_num_planes() function is present. + # + # The drm_format_num_planes() function was added by commit + # d0d110e09629 drm: Add drm_format_num_planes() utility function in + # v3.3 (2011-12-20). Prototype was moved from drm_crtc.h to + # drm_fourcc.h by commit ae4df11a0f53 (drm: Move format-related + # helpers to drm_fourcc.c) in v4.8 (2016-06-09). + # drm_format_num_planes() has been removed by commit 05c452c115bf + # (drm: Remove users of drm_format_num_planes) removed v5.3 + # (2019-05-16). + # + CODE=" + + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_FOURCC_H_PRESENT) + #include + #endif + + void conftest_drm_format_num_planes(void) { + drm_format_num_planes(); + } + " + + compile_check_conftest "$CODE" "NV_DRM_FORMAT_NUM_PLANES_PRESENT" "" "functions" + ;; + + drm_gem_object_has_resv) + # + # Determine if the 'drm_gem_object' structure has a 'resv' field. + # + # A 'resv' filed in the 'drm_gem_object' structure, is added by + # commit 1ba627148ef5 (drm: Add reservation_object to + # drm_gem_object) in v5.2. + # + CODE="$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRM_GEM_H_PRESENT) + #include + #endif + + int conftest_drm_gem_object_has_resv(void) { + return offsetof(struct drm_gem_object, resv); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_HAS_RESV" "" "types" + ;; + + proc_ops) + # + # Determine if the 'struct proc_ops' type is present. + # + # Added by commit d56c0d45f0e2 ("proc: decouple proc from VFS with + # "struct proc_ops"") in 5.6-rc1 + # + CODE=" + #include + + struct proc_ops p_ops; + " + + compile_check_conftest "$CODE" "NV_PROC_OPS_PRESENT" "" "types" + ;; + + drm_crtc_state_has_async_flip) + # + # Determine if the 'drm_crtc_state' structure has a 'async_flip' + # field. + # + # Commit 4d85f45c73a2 (drm/atomic: Rename crtc_state->pageflip_flags + # to async_flip) replaced 'pageflip_flags' by 'async_flip' in v5.4. + # + CODE=" + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + + int conftest_drm_crtc_state_has_async_flip(void) { + return offsetof(struct drm_crtc_state, async_flip); + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP" "" "types" + ;; + + drm_crtc_state_has_pageflip_flags) + # + # Determine if the 'drm_crtc_state' structure has a + # 'pageflip_flags' field. + # + # 'pageflip_flags' added by commit 6cbe5c466d73 (drm/atomic: Save + # flip flags in drm_crtc_state) in v4.12. Commit 4d85f45c73a2 + # (drm/atomic: Rename crtc_state->pageflip_flags to async_flip) + # replaced 'pageflip_flags' by 'async_flip' in v5.4. + # + CODE=" + #if defined(NV_DRM_DRM_CRTC_H_PRESENT) + #include + #endif + + int conftest_drm_crtc_state_has_pageflip_flags(void) { + return offsetof(struct drm_crtc_state, pageflip_flags); + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS" "" "types" + ;; + + ktime_get_raw_ts64) + # + # Determine if ktime_get_raw_ts64() is present + # + # Added by commit fb7fcc96a86cf ("timekeeping: Standardize on + # ktime_get_*() naming") in 4.18 (2018-04-27) + # + CODE=" + #include + void conftest_ktime_get_raw_ts64(void){ + ktime_get_raw_ts64(); + }" + compile_check_conftest "$CODE" "NV_KTIME_GET_RAW_TS64_PRESENT" "" "functions" + ;; + + ktime_get_real_ts64) + # + # Determine if ktime_get_real_ts64() is present + # + # Added by commit d6d29896c665d ("timekeeping: Provide timespec64 + # based interfaces") in 3.17 (2014-07-16) + # + CODE=" + #include + void conftest_ktime_get_real_ts64(void){ + ktime_get_real_ts64(); + }" + compile_check_conftest "$CODE" "NV_KTIME_GET_REAL_TS64_PRESENT" "" "functions" + ;; + + drm_format_modifiers_present) + # + # Determine whether the base DRM format modifier support is present. + # + # This will show up in a few places: + # + # -Definition of the format modifier constructor macro, which + # we can use to reconstruct our bleeding-edge format modifiers + # when the local kernel headers don't include them. + # + # -The first set of format modifier vendor macros, including the + # poorly named "NV" vendor, which was later renamed "NVIDIA". + # + # -the "modifier[]" member of the AddFB2 ioctl's parameter + # structure. + # + # All these were added by commit e3eb3250d84e (drm: add support for + # tiled/compressed/etc modifier in addfb2) in 4.1-rc1 (2015-02-05). + CODE=" + #include + #include + int conftest_fourcc_fb_modifiers(void) { + u64 my_fake_mod = fourcc_mod_code(INTEL, 0); + (void)my_fake_mod; + return offsetof(struct drm_mode_fb_cmd2, modifier); + }" + + compile_check_conftest "$CODE" "NV_DRM_FORMAT_MODIFIERS_PRESENT" "" "types" + + ;; + + timespec64) + # + # Determine if struct timespec64 is present + # Added by commit 361a3bf00582 ("time64: Add time64.h header and + # define struct timespec64") in 3.17 (2014-07-16) + # + CODE=" + #include + + struct timespec64 ts64; + " + compile_check_conftest "$CODE" "NV_TIMESPEC64_PRESENT" "" "types" + + ;; + + vmalloc_has_pgprot_t_arg) + # + # Determine if __vmalloc has the 'pgprot' argument. + # + # The third argument to __vmalloc, page protection + # 'pgprot_t prot', was removed by commit 88dca4ca5a93 + # (mm: remove the pgprot argument to __vmalloc) + # in v5.8-rc1 (2020-06-01). + CODE=" + #include + + void conftest_vmalloc_has_pgprot_t_arg(void) { + pgprot_t prot; + (void)__vmalloc(0, 0, prot); + }" + + compile_check_conftest "$CODE" "NV_VMALLOC_HAS_PGPROT_T_ARG" "" "types" + + ;; + + mm_has_mmap_lock) + # + # Determine if the 'mm_struct' structure has a 'mmap_lock' field. + # + # Kernel commit da1c55f1b272 ("mmap locking API: rename mmap_sem + # to mmap_lock") replaced the field 'mmap_sem' by 'mmap_lock' + # in v5.8-rc1 (2020-06-08). + CODE=" + #include + + int conftest_mm_has_mmap_lock(void) { + return offsetof(struct mm_struct, mmap_lock); + }" + + compile_check_conftest "$CODE" "NV_MM_HAS_MMAP_LOCK" "" "types" + ;; + + full_name_hash) + # + # Determine how many arguments full_name_hash takes. + # + # Changed by commit 8387ff2577e ("vfs: make the string hashes salt + # the hash") in v4.8 (2016-06-10) + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_full_name_hash(void) { + full_name_hash(NULL, NULL, 0); + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_FULL_NAME_HASH_ARGUMENT_COUNT 3" | append_conftest "functions" + else + echo "#define NV_FULL_NAME_HASH_ARGUMENT_COUNT 2" | append_conftest "functions" + fi + ;; + + hlist_for_each_entry) + # + # Determine how many arguments hlist_for_each_entry takes. + # + # Changed by commit b67bfe0d42c ("hlist: drop the node parameter + # from iterators") in v3.9 (2013-02-28) + # + echo "$CONFTEST_PREAMBLE + #include + void conftest_hlist_for_each_entry(void) { + struct hlist_head *head; + struct dummy + { + struct hlist_node hlist; + }; + struct dummy *pos; + hlist_for_each_entry(pos, head, hlist) {} + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_HLIST_FOR_EACH_ENTRY_ARGUMENT_COUNT 3" | append_conftest "functions" + else + echo "#define NV_HLIST_FOR_EACH_ENTRY_ARGUMENT_COUNT 4" | append_conftest "functions" + fi + ;; + + drm_vma_offset_exact_lookup_locked) + # + # Determine if the drm_vma_offset_exact_lookup_locked() function + # is present. + # + # Added by commit 2225cfe46bcc ("drm/gem: Use kref_get_unless_zero + # for the weak mmap references") in v4.4 + # + CODE=" + #include + void conftest_drm_vma_offset_exact_lookup_locked(void) { + drm_vma_offset_exact_lookup_locked(); + }" + + compile_check_conftest "$CODE" "NV_DRM_VMA_OFFSET_EXACT_LOOKUP_LOCKED_PRESENT" "" "functions" + ;; + + drm_vma_node_is_allowed_has_tag_arg) + # + # Determine if drm_vma_node_is_allowed() has 'tag' arguments of + # 'struct drm_file *' type. + # + # Updated to take 'tag' argument by commit d9a1f0b4eb60 ("drm: use + # drm_file to tag vm-bos") in v4.9 + # + CODE=" + #include + bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct drm_file *tag) { + return true; + }" + + compile_check_conftest "$CODE" "NV_DRM_VMA_NODE_IS_ALLOWED_HAS_TAG_ARG" | append_conftest "types" + ;; + + drm_vma_offset_node_has_readonly) + # + # Determine if the 'drm_vma_offset_node' structure has a 'readonly' + # field. + # + # Added by commit 3e977ac6179b ("drm/i915: Prevent writing into a + # read-only object via a GGTT mmap") in v4.19. + # + CODE=" + #include + + int conftest_drm_vma_offset_node_has_readonly(void) { + return offsetof(struct drm_vma_offset_node, readonly); + }" + + compile_check_conftest "$CODE" "NV_DRM_VMA_OFFSET_NODE_HAS_READONLY" "" "types" + + ;; + + pci_enable_atomic_ops_to_root) + # pci_enable_atomic_ops_to_root was added by + # commit 430a23689dea ("PCI: Add pci_enable_atomic_ops_to_root()") + # in v4.16-rc1 (2018-01-05) + # + CODE=" + #include + void conftest_pci_enable_atomic_ops_to_root(void) { + pci_enable_atomic_ops_to_root(); + }" + compile_check_conftest "$CODE" "NV_PCI_ENABLE_ATOMIC_OPS_TO_ROOT_PRESENT" "" "functions" + ;; + + kvmalloc) + # + # Determine if kvmalloc() is present + # + # Added by commit a7c3e901a46ff54c016d040847eda598a9e3e653 ("mm: + # introduce kv[mz]alloc helpers") in v4.12 (2017-05-08). + # + CODE=" + #include + void conftest_kvmalloc(void){ + kvmalloc(); + }" + compile_check_conftest "$CODE" "NV_KVMALLOC_PRESENT" "" "functions" + + ;; + + drm_gem_object_put_unlocked) + # + # Determine if the function drm_gem_object_put_unlocked() is present. + # + # In v5.9-rc1, commit 2f4dd13d4bb8 ("drm/gem: add + # drm_gem_object_put helper") removes drm_gem_object_put_unlocked() + # function and replace its definition by transient macro. Commit + # ab15d56e27be ("drm: remove transient + # drm_gem_object_put_unlocked()") finally removes + # drm_gem_object_put_unlocked() macro. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_GEM_H_PRESENT) + #include + #endif + void conftest_drm_gem_object_put_unlocked(void) { + drm_gem_object_put_unlocked(); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT" "" "functions" + ;; + + drm_display_mode_has_vrefresh) + # + # Determine if the 'drm_display_mode' structure has a 'vrefresh' + # field. + # + # Removed by commit 0425662fdf05 ("drm: Nuke mode->vrefresh") in + # v5.9-rc1. + # + CODE=" + #include + + int conftest_drm_display_mode_has_vrefresh(void) { + return offsetof(struct drm_display_mode, vrefresh); + }" + + compile_check_conftest "$CODE" "NV_DRM_DISPLAY_MODE_HAS_VREFRESH" "types" + + ;; + + drm_driver_master_set_has_int_return_type) + # + # Determine if drm_driver::master_set() returns integer value + # + # Changed to void by commit 907f53200f98 ("drm: vmwgfx: remove + # drm_driver::master_set() return type") in v5.9-rc1. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + int conftest_drm_driver_master_set_has_int_return_type(struct drm_driver *drv, + struct drm_device *dev, struct drm_file *file_priv, bool from_open) { + + return drv->master_set(dev, file_priv, from_open); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_SET_MASTER_HAS_INT_RETURN_TYPE" "" "types" + ;; + + drm_driver_has_gem_free_object) + # + # Determine if the 'drm_driver' structure has a 'gem_free_object' + # function pointer. + # + # drm_driver::gem_free_object is removed by commit 1a9458aeb8eb + # ("drm: remove drm_driver::gem_free_object") in v5.9-rc1. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + int conftest_drm_driver_has_gem_free_object(void) { + return offsetof(struct drm_driver, gem_free_object); + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT" "" "types" + ;; + + vga_tryget) + # + # Determine if vga_tryget() is present + # + # vga_tryget() was removed by commit f369bc3f9096 ("vgaarb: mark + # vga_tryget static") in v5.9-rc1 (2020-08-01). + # + CODE=" + #include + void conftest_vga_tryget(void) { + vga_tryget(); + }" + + compile_check_conftest "$CODE" "NV_VGA_TRYGET_PRESENT" "" "functions" + ;; + + pci_channel_state) + # + # Determine if pci_channel_state enum type is present. + # + # pci_channel_state was removed by commit 16d79cd4e23b ("PCI: Use + # 'pci_channel_state_t' instead of 'enum pci_channel_state'") in + # v5.9-rc1 (2020-07-02). + # + CODE=" + #include + + enum pci_channel_state state; + " + + compile_check_conftest "$CODE" "NV_PCI_CHANNEL_STATE_PRESENT" "" "types" + ;; + + pgprot_decrypted) + # + # Determine if the macro 'pgprot_decrypted()' is present. + # + # Added by commit 21729f81ce8a ("x86/mm: Provide general kernel + # support for memory encryption") in v4.14 (2017-07-18) + CODE=" + #include + + void conftest_pgprot_decrypted(void) + if(pgprot_decrypted()) {} + }" + + compile_check_conftest "$CODE" "NV_PGPROT_DECRYPTED_PRESENT" "" "functions" + + ;; + + cc_mkdec) + # + # Determine if cc_mkdec() is present. + # + # cc_mkdec() by commit b577f542f93c ("x86/coco: Add API to handle + # encryption mask) in v5.18-rc1 (2022-02-22). + # + CODE=" + #if defined(NV_ASM_COCO_H_PRESENT) + #include + #endif + + void conftest_cc_mkdec(void) { + cc_mkdec(); + }" + + compile_check_conftest "$CODE" "NV_CC_MKDEC_PRESENT" "" "functions" + ;; + + drm_prime_pages_to_sg_has_drm_device_arg) + # + # Determine if drm_prime_pages_to_sg() has 'dev' argument. + # + # drm_prime_pages_to_sg() is updated to take 'dev' argument by commit + # 707d561f77b5 ("drm: allow limiting the scatter list size."). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #if defined(NV_DRM_DRM_PRIME_H_PRESENT) + #include + #endif + + struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, + unsigned int nr_pages) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG" "" "types" + ;; + + drm_driver_has_gem_prime_callbacks) + # + # Determine if drm_driver structure has the GEM and PRIME callback + # function pointers. + # + # The GEM and PRIME callback are removed from drm_driver + # structure, by commit d693def4fd1c ("drm: Remove obsolete GEM and + # PRIME callbacks from struct drm_driver"). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DRV_H_PRESENT) + #include + #endif + + void conftest_drm_driver_has_gem_and_prime_callbacks(void) { + struct drm_driver drv; + + drv.gem_prime_pin = 0; + drv.gem_prime_get_sg_table = 0; + drv.gem_prime_vmap = 0; + drv.gem_prime_vunmap = 0; + drv.gem_vm_ops = 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS" "" "types" + ;; + + drm_crtc_atomic_check_has_atomic_state_arg) + # + # Determine if drm_crtc_helper_funcs::atomic_check takes 'state' + # argument of 'struct drm_atomic_state' type. + # + # The commit 29b77ad7b9ca ("drm/atomic: Pass the full state to CRTC + # atomic_check") passed the full atomic state to + # drm_crtc_helper_funcs::atomic_check() + # + # To test the signature of drm_crtc_helper_funcs::atomic_check(), + # declare a function prototype with typeof ::atomic_check(), and then + # define the corresponding function implementation with the expected + # signature. Successful compilation indicates that ::atomic_check() + # has the expected signature. + # + echo "$CONFTEST_PREAMBLE + #include + + static const struct drm_crtc_helper_funcs *funcs; + typeof(*funcs->atomic_check) conftest_drm_crtc_atomic_check_has_atomic_state_arg; + + int conftest_drm_crtc_atomic_check_has_atomic_state_arg( + struct drm_crtc *crtc, struct drm_atomic_state *state) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + else + echo "#undef NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + fi + ;; + + drm_gem_object_vmap_has_map_arg) + # + # Determine if drm_gem_object_funcs::vmap takes 'map' + # argument of 'struct dma_buf_map' type. + # + # The commit 49a3f51dfeee ("drm/gem: Use struct dma_buf_map in GEM + # vmap ops and convert GEM backends") update + # drm_gem_object_funcs::vmap to take 'map' argument. + # + CODE=" + #include + int conftest_drm_gem_object_vmap_has_map_arg( + struct drm_gem_object *obj, struct dma_buf_map *map) { + return obj->funcs->vmap(obj, map); + }" + + compile_check_conftest "$CODE" "NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG" "" "types" + ;; + + set_close_on_exec) + # + # __set_close_on_exec(() was added by + # commit 1dce27c5aa67 ("Wrap accesses to the fd_sets") + # in v3.4-rc1 (2012-02-19) + # + CODE=" + #include + #include + void conftest_set_close_on_exec(void) { + __set_close_on_exec(); + }" + + compile_check_conftest "$CODE" "NV_SET_CLOSE_ON_EXEC_PRESENT" "" "functions" + ;; + + iterate_fd) + # + # iterate_fd() was added by + # commit c3c073f808b2 ("new helper: iterate_fd()") + # in v3.7-rc1 (2012-09-26) + # + CODE=" + #include + #include + void conftest_iterate_fd(void) { + iterate_fd(); + }" + + compile_check_conftest "$CODE" "NV_ITERATE_FD_PRESENT" "" "functions" + ;; + + seq_read_iter) + # + # Determine if seq_read_iter() is present + # + # seq_read_iter() was added by commit d4d50710a8b4 ("seq_file: + # add seq_read_iter") in v5.10-rc1 (2020-11-04). + # + CODE=" + #include + void conftest_seq_read_iter(void) { + seq_read_iter(); + }" + + compile_check_conftest "$CODE" "NV_SEQ_READ_ITER_PRESENT" "" "functions" + ;; + + pci_class_multimedia_hd_audio) + # + # Determine if 'PCI_CLASS_MULTIMEDIA_HD_AUDIO' macro is present + # in . + # + # The commit 07f4f97d7b4b ("vga_switcheroo: Use device link for HDA + # controller") has moved 'PCI_CLASS_MULTIMEDIA_HD_AUDIO' macro from + # to in v4.17-rc1 (2018-03-03). + # + CODE=" + #include + unsigned int conftest_pci_class_multimedia_hd_audio(void) { + return PCI_CLASS_MULTIMEDIA_HD_AUDIO; + }" + + compile_check_conftest "$CODE" "NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT" "" "generic" + ;; + + sg_page_iter_page) + # + # Determine if sg_page_iter_page() is present + # + # sg_page_iter_page() was added by commit 2db76d7c3c6db + # ("lib/scatterlist: sg_page_iter: support sg lists w/o backing + # pages") in v3.10-rc1 (2013-05-11). + # + CODE=" + #include + void conftest_sg_page_iter_page(void) { + sg_page_iter_page(); + }" + + compile_check_conftest "$CODE" "NV_SG_PAGE_ITER_PAGE_PRESENT" "" "functions" + ;; + + unsafe_follow_pfn) + # + # Determine if unsafe_follow_pfn() is present. + # + # unsafe_follow_pfn() was added by commit 69bacee7f9ad + # ("mm: Add unsafe_follow_pfn") in v5.13-rc1. + # + CODE=" + #include + void conftest_unsafe_follow_pfn(void) { + unsafe_follow_pfn(); + }" + + compile_check_conftest "$CODE" "NV_UNSAFE_FOLLOW_PFN_PRESENT" "" "functions" + ;; + + drm_plane_atomic_check_has_atomic_state_arg) + # + # Determine if drm_plane_helper_funcs::atomic_check takes 'state' + # argument of 'struct drm_atomic_state' type. + # + # The commit 7c11b99a8e58 ("drm/atomic: Pass the full state to + # planes atomic_check") passed the full atomic state to + # drm_plane_helper_funcs::atomic_check() + # + # To test the signature of drm_plane_helper_funcs::atomic_check(), + # declare a function prototype with typeof ::atomic_check(), and then + # define the corresponding function implementation with the expected + # signature. Successful compilation indicates that ::atomic_check() + # has the expected signature. + # + echo "$CONFTEST_PREAMBLE + #include + + static const struct drm_plane_helper_funcs *funcs; + typeof(*funcs->atomic_check) conftest_drm_plane_atomic_check_has_atomic_state_arg; + + int conftest_drm_plane_atomic_check_has_atomic_state_arg( + struct drm_plane *plane, struct drm_atomic_state *state) { + return 0; + }" > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + echo "#define NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + else + echo "#undef NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG" | append_conftest "types" + fi + ;; + + ib_peer_memory_symbols) + # + # Determine if the following symbols exist in Module.symvers: + # 1. ib_register_peer_memory_client + # 2. ib_unregister_peer_memory_client + # The conftest first checks in the kernel's own Module.symvers in + # the regular path. If the symbols are not found there, it's possible + # that MOFED is installed and check for these symbols in MOFED's + # Module.symvers whose path is different from the kernel's symvers. + # + # Note: KERNELRELEASE and ARCH are defined by Kbuild and automatically + # passed down to conftest.sh as env vars. + + MLNX_OFED_KERNEL_DIR=/usr/src/ofa_kernel + VAR_DKMS_SOURCES_DIR=$(test -d /var/lib/dkms/mlnx-ofed-kernel && + ls -d /var/lib/dkms/mlnx-ofed-kernel/*/build 2>/dev/null) + + if check_for_ib_peer_memory_symbols "$OUTPUT" || \ + check_for_ib_peer_memory_symbols "$MLNX_OFED_KERNEL_DIR/$ARCH/$KERNELRELEASE" || \ + check_for_ib_peer_memory_symbols "$MLNX_OFED_KERNEL_DIR/$KERNELRELEASE" || \ + check_for_ib_peer_memory_symbols "$MLNX_OFED_KERNEL_DIR/default" || \ + check_for_ib_peer_memory_symbols "$VAR_DKMS_SOURCES_DIR"; then + echo "#define NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT" | append_conftest "symbols" + else + echo "#undef NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT" | append_conftest "symbols" + fi + ;; + + add_memory_driver_managed) + # + # Determine if the add_memory_driver_managed function is present + # + # Added by commit 7b7b27214bba ("mm/memory_hotplug: introduce + # add_memory_driver_managed()") in v5.8-rc1 (2020-06-05) + # + CODE=" + #include + void conftest_add_memory_driver_managed() { + add_memory_driver_managed(); + }" + + compile_check_conftest "$CODE" "NV_ADD_MEMORY_DRIVER_MANAGED_PRESENT" "" "functions" + ;; + + add_memory_driver_managed_has_mhp_flags_arg) + # + # Check if add_memory_driver_managed() has mhp_flags arg. + # + # Added by commit b6117199787c ("mm/memory_hotplug: prepare passing flags to + # add_memory() and friends") in v5.10-rc1 (2020-10-16) + # + CODE=" + #include + int add_memory_driver_managed(int nid, u64 start, u64 size, + const char *resource_name, + mhp_t mhp_flags) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_ADD_MEMORY_DRIVER_MANAGED_HAS_MHP_FLAGS_ARG" "" "types" + ;; + + remove_memory_has_nid_arg) + # + # Check if remove_memory() has nid parameter. + # + # Removed by commit e1c158e4956612e7 ("mm/memory_hotplug: remove nid + # parameter from remove_memory() and friends") in v5.15-rc1 (2021-09-09) + # + CODE=" + #include + int remove_memory(int nid, u64 start, u64 size) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_REMOVE_MEMORY_HAS_NID_ARG" "" "types" + ;; + + + device_property_read_u64) + # + # Determine if the device_property_read_u64 function is present + # + # Added by commit b31384fa5de37a1 ("Driver core: Unified device + # properties interface for platform firmware") in v3.19-rc1 (2014-11-05) + # + CODE=" + #include + void conftest_device_property_read_u64() { + device_property_read_u64(); + }" + + compile_check_conftest "$CODE" "NV_DEVICE_PROPERTY_READ_U64_PRESENT" "" "functions" + ;; + + of_property_count_elems_of_size) + # + # Determine if of_property_count_elems_of_size is present + # + # Added by commit 1df09bcof (" Move OF property and graph API from + # base.c to property.c" + # + # Test if linux/of.h header file inclusion is successful or not, + # depending on that check, for of_property_count_elems_of_size + # presence + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + CODE=" + #include + void conftest_of_property_count_elems_of_size() { + of_property_count_elems_of_size(); + }" + + compile_check_conftest "$CODE" "NV_OF_PROPERTY_COUNT_ELEMS_OF_SIZE_PRESENT" "" "functions" + else + echo "#undef NV_OF_PROPERTY_COUNT_ELEMS_OF_SIZE_PRESENT" | append_conftest "functions" + fi + ;; + + of_property_read_variable_u8_array) + # + # Determine if of_property_read_variable_u8_array is present + # + # Added by commit 1df09bcof (" Move OF property and graph API from + # base.c to property.c" + # + # Test if linux/of.h header file inclusion is successful or not, + # depending on that, check for of_property_read_variable_u8_array + # presence + # + echo "$CONFTEST_PREAMBLE + #include + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + CODE=" + #include + void conftest_of_property_read_variable_u8_array() { + of_property_read_variable_u8_array(); + }" + + compile_check_conftest "$CODE" "NV_OF_PROPERTY_READ_VARIABLE_U8_ARRAY_PRESENT" "" "functions" + else + echo "#undef NV_OF_PROPERTY_READ_VARIABLE_U8_ARRAY_PRESENT" | append_conftest "functions" + fi + ;; + + devm_of_platform_populate) + # + # Determine if devm_of_platform_populate() function is present + # + # Added by commit 38b0b21of (add devm_ functions for populate and + # depopulate") + # + CODE=" + #if defined(NV_LINUX_OF_PLATFORM_H_PRESENT) + #include + #endif + void conftest_devm_of_platform_populate(void) + { + devm_of_platform_populate(NULL, NULL); + } + " + compile_check_conftest "$CODE" "NV_DEVM_OF_PLATFORM_POPULATE_PRESENT" "" "functions" + ;; + + of_dma_configure) + # + # Determine if of_dma_configure() function is present + # + # Added by commit 591c1eeof ("configure the platform device + # dma parameters") + # + CODE=" + #if defined(NV_LINUX_OF_DEVICE_H_PRESENT) + #include + #endif + void conftest_of_dma_configure(void) + { + of_dma_configure(); + } + " + + compile_check_conftest "$CODE" "NV_OF_DMA_CONFIGURE_PRESENT" "" "functions" + ;; + + icc_get) + # + # Determine if icc_get() function is present + # + # Added by commit 11f1cec ("interconnect: Add generic on-chip + # interconnect API") + # + CODE=" + #if defined(NV_LINUX_INTERCONNECT_H_PRESENT) + #include + #endif + void conftest_icc_get(void) + { + icc_get(); + } + " + + compile_check_conftest "$CODE" "NV_ICC_GET_PRESENT" "" "functions" + ;; + + icc_set_bw) + # + # Determine if icc_set_bw() function is present + # + # Added by commit 11f1cec ("interconnect: Add generic on-chip + # interconnect API") + # + CODE=" + #if defined(NV_LINUX_INTERCONNECT_H_PRESENT) + #include + #endif + void conftest_icc_set_bw(void) + { + icc_set_bw(); + } + " + + compile_check_conftest "$CODE" "NV_ICC_SET_BW_PRESENT" "" "functions" + ;; + + icc_put) + # + # Determine if icc_put() function is present + # + # Added by commit 11f1cec ("interconnect: Add generic on-chip + # interconnect API") + # + CODE=" + #if defined(NV_LINUX_INTERCONNECT_H_PRESENT) + #include + #endif + void conftest_icc_put(void) + { + icc_put(); + } + " + + compile_check_conftest "$CODE" "NV_ICC_PUT_PRESENT" "" "functions" + ;; + + i2c_new_client_device) + # + # Determine if i2c_new_client_device() function is present + # + # Added by commit 390fd04i2c ("remove deprecated i2c_new_device API") + # + CODE=" + #include + void conftest_i2c_new_client_device(void) + { + i2c_new_client_device(); + } + " + + compile_check_conftest "$CODE" "NV_I2C_NEW_CLIENT_DEVICE_PRESENT" "" "functions" + ;; + + i2c_unregister_device) + # + # Determine if i2c_unregister_device() function is present + # + # Added by commit 9c1600ei2c ("Add i2c_board_info and i2c_new_device()") + # + CODE=" + #include + void conftest_i2c_unregister_device(void) + { + i2c_unregister_device(); + } + " + + compile_check_conftest "$CODE" "NV_I2C_UNREGISTER_DEVICE_PRESENT" "" "functions" + ;; + + of_get_named_gpio) + # + # Determine if of_get_named_gpio() function is present + # + # Added by commit a6b0919 ("of/gpio: Add new method for getting gpios + # under different property names") + # + CODE=" + #if defined(NV_LINUX_OF_GPIO_H_PRESENT) + #include + #endif + void conftest_of_get_named_gpio(void) + { + of_get_named_gpio(); + } + " + + compile_check_conftest "$CODE" "NV_OF_GET_NAME_GPIO_PRESENT" "" "functions" + ;; + + devm_gpio_request_one) + # + # Determine if devm_gpio_request_one() function is present + # + # Added by commit 09d71ff (gpiolib: Implement devm_gpio_request_one()") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_devm_gpio_request_one(void) + { + devm_gpio_request_one(); + } + " + + compile_check_conftest "$CODE" "NV_DEVM_GPIO_REQUEST_ONE_PRESENT" "" "functions" + ;; + + gpio_direction_input) + # + # Determine if gpio_direction_input() function is present + # + # Added by commit c7caf86 (gpio: remove gpio_ensure_requested()") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_gpio_direction_input(void) + { + gpio_direction_input(); + } + " + + compile_check_conftest "$CODE" "NV_GPIO_DIRECTION_INPUT_PRESENT" "" "functions" + ;; + + gpio_direction_output) + # + # Determine if gpio_direction_output() function is present + # + # Added by commit c7caf86 (gpio: remove gpio_ensure_requested()") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_gpio_direction_output(void) + { + gpio_direction_output(); + } + " + + compile_check_conftest "$CODE" "NV_GPIO_DIRECTION_OUTPUT_PRESENT" "" "functions" + ;; + + gpio_get_value) + # + # Determine if gpio_get_value() function is present + # + # Added by commit 7563bbf ("gpiolib/arches: Centralise bolierplate + # asm/gpio.h") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_gpio_get_value(void) + { + gpio_get_value(); + } + " + + compile_check_conftest "$CODE" "NV_GPIO_GET_VALUE_PRESENT" "" "functions" + ;; + + gpio_set_value) + # + # Determine if gpio_set_value() function is present + # + # Added by commit 7563bbf ("gpiolib/arches: Centralise bolierplate + # asm/gpio.h") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_gpio_set_value(void) + { + gpio_set_value(); + } + " + + compile_check_conftest "$CODE" "NV_GPIO_SET_VALUE_PRESENT" "" "functions" + ;; + + gpio_to_irq) + # + # Determine if gpio_to_irq() function is present + # + # Added by commit 7563bbf ("gpiolib/arches: Centralise bolierplate + # asm/gpio.h") + # + CODE=" + #if defined(NV_LINUX_GPIO_H_PRESENT) + #include + #endif + void conftest_gpio_to_irq(void) + { + gpio_to_irq(); + } + " + + compile_check_conftest "$CODE" "NV_GPIO_TO_IRQ_PRESENT" "" "functions" + ;; + + migrate_vma_setup) + # + # Determine if migrate_vma_setup() function is present + # + # migrate_vma_setup() function was added by commit + # a7d1f22bb74f32cf3cd93f52776007e161f1a738 ("mm: turn migrate_vma + # upside down) in v5.4. + # (2019-08-20). + CODE=" + #include + int conftest_migrate_vma_setup(void) { + migrate_vma_setup(); + }" + + compile_check_conftest "$CODE" "NV_MIGRATE_VMA_SETUP_PRESENT" "" "functions" + ;; + + migrate_vma_added_flags) + # + # Determine if migrate_vma structure has flags + # + # flags were added to struct migrate_vma by commit + # 5143192cd410c4fc83be09a2e73423765aee072b ("mm/migrate: add a flags + # parameter to_migrate_vma) in v5.9. + # (2020-07-28). + CODE=" + #include + int conftest_migrate_vma_added_flags(void) { + return offsetof(struct migrate_vma, flags); + }" + + compile_check_conftest "$CODE" "NV_MIGRATE_VMA_FLAGS_PRESENT" "" "types" + ;; + + drm_device_has_pdev) + # + # Determine if the 'drm_device' structure has a 'pdev' field. + # + # Removed by commit b347e04452ff ("drm: Remove pdev field from + # struct drm_device") in v5.14-rc1. + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + + #if defined(NV_DRM_DRM_DEVICE_H_PRESENT) + #include + #endif + + int conftest_drm_device_has_pdev(void) { + return offsetof(struct drm_device, pdev); + }" + + compile_check_conftest "$CODE" "NV_DRM_DEVICE_HAS_PDEV" "" "types" + ;; + + make_device_exclusive_range) + # + # Determine if the make_device_exclusive_range() function is present + # + # make_device_exclusive_range() function was added by commit + # b756a3b5e7ead ("mm: device exclusive memory access") in v5.14 + # (2021-06-30). + CODE=" + #include + int conftest_make_device_exclusive_range(void) { + make_device_exclusive_range(); + }" + + compile_check_conftest "$CODE" "NV_MAKE_DEVICE_EXCLUSIVE_RANGE_PRESENT" "" "functions" + ;; + + ioasid_get) + # + # Determine if ioasid_get() function is present + # + # ioasid_get() function was added by commit + # cb4789b0d19ff231ce9f73376a023341300aed96 (iommu/ioasid: Add ioasidreferences) in v5.11. + # (2020-11-23). + CODE=" + #if defined(NV_LINUX_IOASID_H_PRESENT) + #include + #endif + void conftest_ioasid_get(void) { + ioasid_get(); + }" + + compile_check_conftest "$CODE" "NV_IOASID_GET_PRESENT" "" "functions" + ;; + + drm_crtc_state_has_no_vblank) + # + # Determine if the 'drm_crtc_state' structure has 'no_vblank'. + # + # drm_crtc_state::no_vblank was added by commit b25c60af7a877 + # ("drm/crtc: Add a generic infrastructure to fake VBLANK events") + # in 4.18.0-rc3 (2018-07-03). + # + CODE=" + #include + void conftest_drm_crtc_state_has_no_vblank(void) { + struct drm_crtc_state foo; + (void)foo.no_vblank; + }" + + compile_check_conftest "$CODE" "NV_DRM_CRTC_STATE_HAS_NO_VBLANK" "" "types" + ;; + + drm_mode_config_has_allow_fb_modifiers) + # + # Determine if the 'drm_mode_config' structure has + # an 'allow_fb_modifiers' field. + # + # an 'allow_fb_modifiers' field in the 'drm_mode_config' structure, + # is added by commit e3eb3250d84e ("drm: add support for + # tiled/compressed/etc modifier in addfb2") in v4.1, and removed by + # commit 3d082157a242 ("drm: remove allow_fb_modifiers") in v5.18-rc1. + # + # The 'struct drm_mode_config' definition, is moved to + # drm_mode_config.h file by commit 28575f165d36 ("drm: Extract + # drm_mode_config.[hc]") in v4.10. + # + CODE="$CONFTEST_PREAMBLE + #if defined(NV_DRM_DRM_MODE_CONFIG_H_PRESENT) + #include + #else + #include + #endif + int conftest_drm_mode_config_has_allow_fb_modifiers(void) { + return offsetof(struct drm_mode_config, allow_fb_modifiers); + }" + + compile_check_conftest "$CODE" "NV_DRM_MODE_CONFIG_HAS_ALLOW_FB_MODIFIERS" "" "types" + ;; + + dma_set_mask_and_coherent) + # + # Determine if dma_set_mask_and_coherent function is present. + # Added by commit 4aa806b771d1 ("DMA-API: provide a helper to set both DMA + # and coherent DMA masks") in v3.13 (2013-06-26). + # + CODE=" + #include + void conftest_dma_set_mask_and_coherent(void) { + dma_set_mask_and_coherent(); + }" + + compile_check_conftest "$CODE" "NV_DMA_SET_MASK_AND_COHERENT_PRESENT" "" "functions" + ;; + + drm_has_hdr_output_metadata) + # + # Determine if drm_mode.h has 'hdr_output_metadata' structure. + # + # struct hdr_output_metadata was added by commit fbb5d0353c62d + # ("drm: Add HDR source metadata property") in 5.1.0-rc5 + # (2019-05-16) + # + CODE=" + #include + void conftest_drm_has_hdr_output_metadata(void) { + struct hdr_output_metadata foo; + (void)foo; + }" + + compile_check_conftest "$CODE" "NV_DRM_HAS_HDR_OUTPUT_METADATA" "" "types" + ;; + + uts_release) + # + # print the kernel's UTS_RELEASE string. + # + echo "#include + UTS_RELEASE" > conftest$$.c + + $CC $CFLAGS -E -P conftest$$.c + rm -f conftest$$.c + ;; + + acpi_bus_get_device) + # + # Determine if the acpi_bus_get_device() function is present + # + # acpi_bus_get_device() was removed by commit ac2a3feefad5 + # ("ACPI: bus: Eliminate acpi_bus_get_device()") in + # v5.18-rc2 (2022-04-05). + # + CODE=" + #include + int conftest_acpi_bus_get_device(void) { + return acpi_bus_get_device(); + }" + compile_check_conftest "$CODE" "NV_ACPI_BUS_GET_DEVICE_PRESENT" "" "functions" + ;; + + # When adding a new conftest entry, please use the correct format for + # specifying the relevant upstream Linux kernel commit. + # + # was added|removed|etc by commit (" (). + + *) + # Unknown test name given + echo "Error: unknown conftest '$1' requested" >&2 + exit 1 + ;; + esac +} + +case "$5" in + cc_sanity_check) + # + # Check if the selected compiler can create object files + # in the current environment. + # + VERBOSE=$6 + + echo "int cc_sanity_check(void) { + return 0; + }" > conftest$$.c + + $CC -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ ! -f conftest$$.o ]; then + if [ "$VERBOSE" = "full_output" ]; then + echo ""; + fi + if [ "$CC" != "cc" ]; then + echo "The C compiler '$CC' does not appear to be able to" + echo "create object files. Please make sure you have " + echo "your Linux distribution's libc development package" + echo "installed and that '$CC' is a valid C compiler"; + echo "name." + else + echo "The C compiler '$CC' does not appear to be able to" + echo "create executables. Please make sure you have " + echo "your Linux distribution's gcc and libc development" + echo "packages installed." + fi + if [ "$VERBOSE" = "full_output" ]; then + echo ""; + echo "*** Failed CC sanity check. Bailing out! ***"; + echo ""; + fi + exit 1 + else + rm -f conftest$$.o + exit 0 + fi + ;; + + cc_version_check) + # + # Verify that the same compiler major and minor version is + # used for the kernel and kernel module. A mismatch condition is + # not considered fatal, so this conftest returns a success status + # code, even if it fails. Failure of the test can be distinguished + # by testing for empty (success) versus non-empty (failure) output. + # + # Some gcc version strings that have proven problematic for parsing + # in the past: + # + # gcc.real (GCC) 3.3 (Debian) + # gcc-Version 3.3 (Debian) + # gcc (GCC) 3.1.1 20020606 (Debian prerelease) + # version gcc 3.2.3 + # + # As of this writing, GCC uses a version number as x.y.z and below + # are the typical version strings seen with various distributions. + # gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-23) + # gcc version 4.8.5 20150623 (Red Hat 4.8.5-39) (GCC) + # gcc (GCC) 8.3.1 20190507 (Red Hat 8.3.1-4) + # gcc (GCC) 10.2.1 20200723 (Red Hat 10.2.1-1) + # gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0 + # gcc (Ubuntu 7.5.0-3ubuntu1~16.04) 7.5.0 + # gcc (Debian 8.3.0-6) 8.3.0 + # aarch64-linux-gcc.br_real (Buildroot 2020.08-14-ge5a2a90) 9.3.0, GNU ld (GNU Binutils) 2.33.1 + # + # In order to extract GCC version correctly for version strings + # like the last one above, we first check for x.y.z and if that + # fails, we fallback to x.y format. + VERBOSE=$6 + + kernel_compile_h=$OUTPUT/include/generated/compile.h + + if [ ! -f ${kernel_compile_h} ]; then + # The kernel's compile.h file is not present, so there + # isn't a convenient way to identify the compiler version + # used to build the kernel. + IGNORE_CC_MISMATCH=1 + fi + + if [ -n "$IGNORE_CC_MISMATCH" ]; then + exit 0 + fi + + kernel_cc_string=`cat ${kernel_compile_h} | \ + grep LINUX_COMPILER | cut -f 2 -d '"'` + + kernel_cc_version=`echo ${kernel_cc_string} | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+' | head -n 1` + if [ -z "${kernel_cc_version}" ]; then + kernel_cc_version=`echo ${kernel_cc_string} | grep -o '[0-9]\+\.[0-9]\+' | head -n 1` + fi + kernel_cc_major=`echo ${kernel_cc_version} | cut -d '.' -f 1` + kernel_cc_minor=`echo ${kernel_cc_version} | cut -d '.' -f 2` + + echo " + #if (__GNUC__ != ${kernel_cc_major}) || (__GNUC_MINOR__ != ${kernel_cc_minor}) + #error \"cc version mismatch\" + #endif + " > conftest$$.c + + $CC $CFLAGS -c conftest$$.c > /dev/null 2>&1 + rm -f conftest$$.c + + if [ -f conftest$$.o ]; then + rm -f conftest$$.o + exit 0; + else + # + # The gcc version check failed + # + + if [ "$VERBOSE" = "full_output" ]; then + echo ""; + echo "Warning: Compiler version check failed:"; + echo ""; + echo "The major and minor number of the compiler used to"; + echo "compile the kernel:"; + echo ""; + echo "${kernel_cc_string}"; + echo ""; + echo "does not match the compiler used here:"; + echo ""; + $CC --version + echo ""; + echo "It is recommended to set the CC environment variable"; + echo "to the compiler that was used to compile the kernel."; + echo "" + echo "To skip the test and silence this warning message, set"; + echo "the IGNORE_CC_MISMATCH environment variable to \"1\"."; + echo "However, mixing compiler versions between the kernel"; + echo "and kernel modules can result in subtle bugs that are"; + echo "difficult to diagnose."; + echo ""; + echo "*** Failed CC version check. ***"; + echo ""; + elif [ "$VERBOSE" = "just_msg" ]; then + echo "Warning: The kernel was built with ${kernel_cc_string}, but the" \ + "current compiler version is `$CC --version | head -n 1`."; + fi + exit 0; + fi + ;; + + xen_sanity_check) + # + # Check if the target kernel is a Xen kernel. If so, exit, since + # the RM doesn't currently support Xen. + # + VERBOSE=$6 + + if [ -n "$IGNORE_XEN_PRESENCE" -o -n "$VGX_BUILD" ]; then + exit 0 + fi + + test_xen + + if [ "$XEN_PRESENT" != "0" ]; then + echo "The kernel you are installing for is a Xen kernel!"; + echo ""; + echo "The NVIDIA driver does not currently support Xen kernels. If "; + echo "you are using a stock distribution kernel, please install "; + echo "a variant of this kernel without Xen support; if this is a "; + echo "custom kernel, please install a standard Linux kernel. Then "; + echo "try installing the NVIDIA kernel module again."; + echo ""; + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed Xen sanity check. Bailing out! ***"; + echo ""; + fi + exit 1 + else + exit 0 + fi + ;; + + preempt_rt_sanity_check) + # + # Check if the target kernel has the PREEMPT_RT patch set applied. If + # so, exit, since the RM doesn't support this configuration. + # + VERBOSE=$6 + + if [ -n "$IGNORE_PREEMPT_RT_PRESENCE" ]; then + exit 0 + fi + + if test_configuration_option CONFIG_PREEMPT_RT; then + PREEMPT_RT_PRESENT=1 + elif test_configuration_option CONFIG_PREEMPT_RT_FULL; then + PREEMPT_RT_PRESENT=1 + fi + + if [ "$PREEMPT_RT_PRESENT" != "0" ]; then + echo "The kernel you are installing for is a PREEMPT_RT kernel!"; + echo ""; + echo "The NVIDIA driver does not support real-time kernels. If you "; + echo "are using a stock distribution kernel, please install "; + echo "a variant of this kernel that does not have the PREEMPT_RT "; + echo "patch set applied; if this is a custom kernel, please "; + echo "install a standard Linux kernel. Then try installing the "; + echo "NVIDIA kernel module again."; + echo ""; + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed PREEMPT_RT sanity check. Bailing out! ***"; + echo ""; + fi + exit 1 + else + exit 0 + fi + ;; + + patch_check) + # + # Check for any "official" patches that may have been applied and + # construct a description table for reporting purposes. + # + PATCHES="" + + for PATCH in patch-*.h; do + if [ -f $PATCH ]; then + echo "#include \"$PATCH\"" + PATCHES="$PATCHES "`echo $PATCH | sed -s 's/patch-\(.*\)\.h/\1/'` + fi + done + + echo "static struct { + const char *short_description; + const char *description; + } __nv_patches[] = {" + for i in $PATCHES; do + echo "{ \"$i\", NV_PATCH_${i}_DESCRIPTION }," + done + echo "{ NULL, NULL } };" + + exit 0 + ;; + + compile_tests) + # + # Run a series of compile tests to determine the set of interfaces + # and features available in the target kernel. + # + shift 5 + + CFLAGS=$1 + shift + + for i in $*; do compile_test $i; done + + for file in conftest*.d; do + rm -f $file > /dev/null 2>&1 + done + + exit 0 + ;; + + dom0_sanity_check) + # + # Determine whether running in DOM0. + # + VERBOSE=$6 + + if [ -n "$VGX_BUILD" ]; then + if [ -f /proc/xen/capabilities ]; then + if [ "`cat /proc/xen/capabilities`" == "control_d" ]; then + exit 0 + fi + else + echo "The kernel is not running in DOM0."; + echo ""; + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed DOM0 sanity check. Bailing out! ***"; + echo ""; + fi + fi + exit 1 + fi + ;; + vgpu_kvm_sanity_check) + # + # Determine whether we are running a vGPU on KVM host. + # + VERBOSE=$6 + iommu=CONFIG_VFIO_IOMMU_TYPE1 + mdev=CONFIG_VFIO_MDEV + kvm=CONFIG_KVM_VFIO + VFIO_IOMMU_PRESENT=0 + VFIO_MDEV_PRESENT=0 + KVM_PRESENT=0 + + if [ -n "$VGX_KVM_BUILD" ]; then + if (test_configuration_option ${iommu} || test_configuration_option ${iommu}_MODULE); then + VFIO_IOMMU_PRESENT=1 + fi + + if (test_configuration_option ${mdev} || test_configuration_option ${mdev}_MODULE); then + VFIO_MDEV_PRESENT=1 + fi + + if (test_configuration_option ${kvm} || test_configuration_option ${kvm}_MODULE); then + KVM_PRESENT=1 + fi + + if [ "$VFIO_IOMMU_PRESENT" != "0" ] && + [ "$VFIO_MDEV_PRESENT" != "0" ] && + [ "$KVM_PRESENT" != "0" ] ; then + exit 0 + else + echo "Below CONFIG options are missing on the kernel for installing"; + echo "NVIDIA vGPU driver on KVM host"; + if [ "$VFIO_IOMMU_PRESENT" = "0" ]; then + echo "CONFIG_VFIO_IOMMU_TYPE1"; + fi + + if [ "$VFIO_MDEV_PRESENT" = "0" ]; then + echo "CONFIG_VFIO_MDEV"; + fi + + if [ "$KVM_PRESENT" = "0" ]; then + echo "CONFIG_KVM"; + fi + echo "Please install the kernel with above CONFIG options set, then"; + echo "try installing again"; + echo ""; + + if [ "$VERBOSE" = "full_output" ]; then + echo "*** Failed vGPU on KVM sanity check. Bailing out! ***"; + echo ""; + fi + fi + exit 1 + else + exit 0 + fi + ;; + test_configuration_option) + # + # Check to see if the given config option is set. + # + OPTION=$6 + + test_configuration_option $OPTION + exit $? + ;; + + get_configuration_option) + # + # Get the value of the given config option. + # + OPTION=$6 + + get_configuration_option $OPTION + exit $? + ;; + + + guess_module_signing_hash) + # + # Determine the best cryptographic hash to use for module signing, + # to the extent that is possible. + # + + HASH=$(get_configuration_option CONFIG_MODULE_SIG_HASH) + + if [ $? -eq 0 ] && [ -n $HASH ]; then + echo $HASH + exit 0 + else + for SHA in 512 384 256 224 1; do + if test_configuration_option CONFIG_MODULE_SIG_SHA$SHA; then + echo sha$SHA + exit 0 + fi + done + fi + exit 1 + ;; + + + test_kernel_headers) + # + # Check for the availability of certain kernel headers + # + + CFLAGS=$6 + + test_headers + + for file in conftest*.d; do + rm -f $file > /dev/null 2>&1 + done + + exit $? + ;; + + + build_cflags) + # + # Generate CFLAGS for use in the compile tests + # + + build_cflags + echo $CFLAGS + exit 0 + ;; + + module_symvers_sanity_check) + # + # Check whether Module.symvers exists and contains at least one + # EXPORT_SYMBOL* symbol from vmlinux + # + + if [ -n "$IGNORE_MISSING_MODULE_SYMVERS" ]; then + exit 0 + fi + + TAB=' ' + + if [ -f "$OUTPUT/Module.symvers" ] && \ + grep -e "^[^${TAB}]*${TAB}[^${TAB}]*${TAB}\+vmlinux" \ + "$OUTPUT/Module.symvers" >/dev/null 2>&1; then + exit 0 + fi + + echo "The Module.symvers file is missing, or does not contain any" + echo "symbols exported from the kernel. This could cause the NVIDIA" + echo "kernel modules to be built against a configuration that does" + echo "not accurately reflect the actual target kernel." + echo "The Module.symvers file check can be disabled by setting the" + echo "environment variable IGNORE_MISSING_MODULE_SYMVERS to 1." + + exit 1 + ;; +esac diff --git a/kernel-open/dkms.conf b/kernel-open/dkms.conf new file mode 100644 index 000000000..aef54d352 --- /dev/null +++ b/kernel-open/dkms.conf @@ -0,0 +1,12 @@ +PACKAGE_NAME="nvidia" +PACKAGE_VERSION="__VERSION_STRING" +AUTOINSTALL="yes" + +# By default, DKMS will add KERNELRELEASE to the make command line; however, +# this will cause the kernel module build to infer that it was invoked via +# Kbuild directly instead of DKMS. The dkms(8) manual page recommends quoting +# the 'make' command name to suppress this behavior. +MAKE[0]="'make' -j__JOBS NV_EXCLUDE_BUILD_MODULES='__EXCLUDE_MODULES' KERNEL_UNAME=${kernelver} modules" + +# The list of kernel modules will be generated by nvidia-installer at runtime. +__DKMS_MODULES diff --git a/kernel-open/nvidia-drm/nv-pci-table.c b/kernel-open/nvidia-drm/nv-pci-table.c new file mode 100644 index 000000000..e3494735a --- /dev/null +++ b/kernel-open/nvidia-drm/nv-pci-table.c @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "nv-pci-table.h" + +/* Devices supported by RM */ +struct pci_device_id nv_pci_table[] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { } +}; + +/* Devices supported by all drivers in nvidia.ko */ +struct pci_device_id nv_module_device_table[] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_BRIDGE_OTHER << 8), + .class_mask = ~0 + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, nv_module_device_table); diff --git a/kernel-open/nvidia-drm/nv-pci-table.h b/kernel-open/nvidia-drm/nv-pci-table.h new file mode 100644 index 000000000..b28483bb6 --- /dev/null +++ b/kernel-open/nvidia-drm/nv-pci-table.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_TABLE_H_ +#define _NV_PCI_TABLE_H_ + +#include + +extern struct pci_device_id nv_pci_table[]; + +#endif /* _NV_PCI_TABLE_H_ */ diff --git a/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h b/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h new file mode 100644 index 000000000..a09ab7678 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-dma-fence-helper.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DMA_FENCE_HELPER_H__ +#define __NVIDIA_DMA_FENCE_HELPER_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_FENCE_AVAILABLE) + +/* + * Fence headers are moved to file dma-fence.h and struct fence has + * been renamed to dma_fence by commit - + * + * 2016-10-25 : f54d1867005c3323f5d8ad83eed823e84226c429 + */ + +#if defined(NV_LINUX_FENCE_H_PRESENT) +#include +#else +#include +#endif + +#if defined(NV_LINUX_FENCE_H_PRESENT) +typedef struct fence nv_dma_fence_t; +typedef struct fence_ops nv_dma_fence_ops_t; +#else +typedef struct dma_fence nv_dma_fence_t; +typedef struct dma_fence_ops nv_dma_fence_ops_t; +#endif + +#if defined(NV_LINUX_FENCE_H_PRESENT) +#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT +#else +#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT +#endif + +static inline bool nv_dma_fence_is_signaled(nv_dma_fence_t *fence) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + return fence_is_signaled(fence); +#else + return dma_fence_is_signaled(fence); +#endif +} + +static inline nv_dma_fence_t *nv_dma_fence_get(nv_dma_fence_t *fence) +{ +#if defined(NV_LINUX_FENCE_H_PRESENT) + return fence_get(fence); +#else + return dma_fence_get(fence); +#endif +} + +static inline void nv_dma_fence_put(nv_dma_fence_t *fence) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + fence_put(fence); +#else + dma_fence_put(fence); +#endif +} + +static inline signed long +nv_dma_fence_default_wait(nv_dma_fence_t *fence, + bool intr, signed long timeout) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + return fence_default_wait(fence, intr, timeout); +#else + return dma_fence_default_wait(fence, intr, timeout); +#endif +} + +static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + return fence_signal(fence); +#else + return dma_fence_signal(fence); +#endif +} + +static inline u64 nv_dma_fence_context_alloc(unsigned num) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + return fence_context_alloc(num); +#else + return dma_fence_context_alloc(num); +#endif +} + +static inline void +nv_dma_fence_init(nv_dma_fence_t *fence, + const nv_dma_fence_ops_t *ops, + spinlock_t *lock, u64 context, unsigned seqno) { +#if defined(NV_LINUX_FENCE_H_PRESENT) + fence_init(fence, ops, lock, context, seqno); +#else + dma_fence_init(fence, ops, lock, context, seqno); +#endif +} + +#endif /* defined(NV_DRM_FENCE_AVAILABLE) */ + +#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h b/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h new file mode 100644 index 000000000..ad8800d25 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-dma-resv-helper.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DMA_RESV_HELPER_H__ +#define __NVIDIA_DMA_RESV_HELPER_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_FENCE_AVAILABLE) + +/* + * linux/reservation.h is renamed to linux/dma-resv.h, by commit + * 52791eeec1d9 (dma-buf: rename reservation_object to dma_resv) + * in v5.4. + */ + +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) +#include +#else +#include +#endif + +#include + +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) +typedef struct dma_resv nv_dma_resv_t; +#else +typedef struct reservation_object nv_dma_resv_t; +#endif + +static inline void nv_dma_resv_init(nv_dma_resv_t *obj) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_init(obj); +#else + reservation_object_init(obj); +#endif +} + +static inline void nv_dma_resv_fini(nv_dma_resv_t *obj) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_fini(obj); +#else + reservation_object_init(obj); +#endif +} + +static inline void nv_dma_resv_add_excl_fence(nv_dma_resv_t *obj, + nv_dma_fence_t *fence) +{ +#if defined(NV_LINUX_DMA_RESV_H_PRESENT) + dma_resv_add_excl_fence(obj, fence); +#else + reservation_object_add_excl_fence(obj, fence); +#endif +} + +#endif /* defined(NV_DRM_FENCE_AVAILABLE) */ + +#endif /* __NVIDIA_DMA_RESV_HELPER_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-conftest.h b/kernel-open/nvidia-drm/nvidia-drm-conftest.h new file mode 100644 index 000000000..bed8d8126 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-conftest.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_CONFTEST_H__ +#define __NVIDIA_DRM_CONFTEST_H__ + +#include "conftest.h" + +/* + * NOTE: This file is expected to get included at the top before including any + * of linux/drm headers. + * + * The goal is to redefine refcount_dec_and_test and refcount_inc before + * including drm header files, so that the drm macro/inline calls to + * refcount_dec_and_test* and refcount_inc get redirected to + * alternate implementation in this file. + */ + +#if NV_IS_EXPORT_SYMBOL_GPL_refcount_inc + +#include + +#define refcount_inc(__ptr) \ + do { \ + atomic_inc(&(__ptr)->refs); \ + } while(0) + +#endif + +#if NV_IS_EXPORT_SYMBOL_GPL_refcount_dec_and_test + +#include + +#define refcount_dec_and_test(__ptr) atomic_dec_and_test(&(__ptr)->refs) + +#endif + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) || \ + defined(NV_DRM_GEM_OBJECT_HAS_RESV) +#define NV_DRM_FENCE_AVAILABLE +#else +#undef NV_DRM_FENCE_AVAILABLE +#endif + +#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-connector.c b/kernel-open/nvidia-drm/nvidia-drm-connector.c new file mode 100644 index 000000000..6fbcd6372 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-connector.c @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-helper.h" +#include "nvidia-drm-priv.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-encoder.h" + +/* + * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h") + * moves a number of helper function definitions from + * drm/drm_crtc_helper.h to a new drm_probe_helper.h. + */ +#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT) +#include +#endif +#include + +#include +#include + +static void nv_drm_connector_destroy(struct drm_connector *connector) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + drm_connector_unregister(connector); + + drm_connector_cleanup(connector); + + if (nv_connector->edid != NULL) { + nv_drm_free(nv_connector->edid); + } + + nv_drm_free(nv_connector); +} + +static bool +__nv_drm_detect_encoder(struct NvKmsKapiDynamicDisplayParams *pDetectParams, + struct drm_connector *connector, + struct drm_encoder *encoder) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + struct drm_device *dev = connector->dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_encoder *nv_encoder; + + /* + * DVI-I connectors can drive both digital and analog + * encoders. If a digital connection has been forced then + * skip analog encoders. + */ + + if (connector->connector_type == DRM_MODE_CONNECTOR_DVII && + connector->force == DRM_FORCE_ON_DIGITAL && + encoder->encoder_type == DRM_MODE_ENCODER_DAC) { + return false; + } + + nv_encoder = to_nv_encoder(encoder); + + memset(pDetectParams, 0, sizeof(*pDetectParams)); + + pDetectParams->handle = nv_encoder->hDisplay; + + switch (connector->force) { + case DRM_FORCE_ON: + case DRM_FORCE_ON_DIGITAL: + pDetectParams->forceConnected = NV_TRUE; + break; + case DRM_FORCE_OFF: + pDetectParams->forceDisconnected = NV_TRUE; + break; + case DRM_FORCE_UNSPECIFIED: + break; + } + + if (connector->override_edid) { + const struct drm_property_blob *edid = connector->edid_blob_ptr; + + if (edid->length <= sizeof(pDetectParams->edid.buffer)) { + memcpy(pDetectParams->edid.buffer, edid->data, edid->length); + pDetectParams->edid.bufferSize = edid->length; + pDetectParams->overrideEdid = NV_TRUE; + } else { + WARN_ON(edid->length > + sizeof(pDetectParams->edid.buffer)); + } + } + + if (!nvKms->getDynamicDisplayInfo(nv_dev->pDevice, pDetectParams)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to detect display state"); + return false; + } + + if (pDetectParams->connected) { + if (!pDetectParams->overrideEdid && pDetectParams->edid.bufferSize) { + + if ((nv_connector->edid = nv_drm_calloc( + 1, + pDetectParams->edid.bufferSize)) != NULL) { + + memcpy(nv_connector->edid, + pDetectParams->edid.buffer, + pDetectParams->edid.bufferSize); + } else { + NV_DRM_LOG_ERR("Out of Memory"); + } + } + + return true; + } + + return false; +} + +static enum drm_connector_status __nv_drm_connector_detect_internal( + struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + enum drm_connector_status status = connector_status_disconnected; + + struct drm_encoder *detected_encoder = NULL; + struct nv_drm_encoder *nv_detected_encoder = NULL; + struct drm_encoder *encoder; + + struct NvKmsKapiDynamicDisplayParams *pDetectParams = NULL; + + BUG_ON(!mutex_is_locked(&dev->mode_config.mutex)); + + if (nv_connector->edid != NULL) { + nv_drm_free(nv_connector->edid); + nv_connector->edid = NULL; + } + + if ((pDetectParams = nv_drm_calloc( + 1, + sizeof(*pDetectParams))) == NULL) { + WARN_ON(pDetectParams == NULL); + goto done; + } + + nv_drm_connector_for_each_possible_encoder(connector, encoder) { + if (__nv_drm_detect_encoder(pDetectParams, connector, encoder)) { + detected_encoder = encoder; + break; + } + } nv_drm_connector_for_each_possible_encoder_end; + + if (detected_encoder == NULL) { + goto done; + } + + nv_detected_encoder = to_nv_encoder(detected_encoder); + + status = connector_status_connected; + + nv_connector->nv_detected_encoder = nv_detected_encoder; + + if (nv_connector->type == NVKMS_CONNECTOR_TYPE_DVI_I) { + drm_object_property_set_value( + &connector->base, + dev->mode_config.dvi_i_subconnector_property, + detected_encoder->encoder_type == DRM_MODE_ENCODER_DAC ? + DRM_MODE_SUBCONNECTOR_DVIA : + DRM_MODE_SUBCONNECTOR_DVID); + } + +done: + + nv_drm_free(pDetectParams); + + return status; +} + +static void __nv_drm_connector_force(struct drm_connector *connector) +{ + __nv_drm_connector_detect_internal(connector); +} + +static enum drm_connector_status +nv_drm_connector_detect(struct drm_connector *connector, bool force) +{ + return __nv_drm_connector_detect_internal(connector); +} + +static struct drm_connector_funcs nv_connector_funcs = { +#if defined NV_DRM_ATOMIC_HELPER_CONNECTOR_DPMS_PRESENT + .dpms = drm_atomic_helper_connector_dpms, +#endif + .destroy = nv_drm_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .force = __nv_drm_connector_force, + .detect = nv_drm_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int nv_drm_connector_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + struct nv_drm_encoder *nv_detected_encoder = + nv_connector->nv_detected_encoder; + NvU32 modeIndex = 0; + int count = 0; + + + if (nv_connector->edid != NULL) { + nv_drm_connector_update_edid_property(connector, nv_connector->edid); + } + + while (1) { + struct drm_display_mode *mode; + struct NvKmsKapiDisplayMode displayMode; + NvBool valid = 0; + NvBool preferredMode = NV_FALSE; + int ret; + + ret = nvKms->getDisplayMode(nv_dev->pDevice, + nv_detected_encoder->hDisplay, + modeIndex++, &displayMode, &valid, + &preferredMode); + + if (ret < 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to get mode at modeIndex %d of NvKmsKapiDisplay 0x%08x", + modeIndex, nv_detected_encoder->hDisplay); + break; + } + + /* Is end of mode-list */ + + if (ret == 0) { + break; + } + + /* Ignore invalid modes */ + + if (!valid) { + continue; + } + + mode = drm_mode_create(connector->dev); + + if (mode == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create mode for NvKmsKapiDisplay 0x%08x", + nv_detected_encoder->hDisplay); + continue; + } + + nvkms_display_mode_to_drm_mode(&displayMode, mode); + + if (preferredMode) { + mode->type |= DRM_MODE_TYPE_PREFERRED; + } + + /* Add a mode to a connector's probed_mode list */ + + drm_mode_probed_add(connector, mode); + + count++; + } + + return count; +} + +static int nv_drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_device *dev = connector->dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_encoder *nv_detected_encoder = + to_nv_connector(connector)->nv_detected_encoder; + struct NvKmsKapiDisplayMode displayMode; + + if (nv_detected_encoder == NULL) { + return MODE_BAD; + } + + drm_mode_to_nvkms_display_mode(mode, &displayMode); + + if (!nvKms->validateDisplayMode(nv_dev->pDevice, + nv_detected_encoder->hDisplay, + &displayMode)) { + return MODE_BAD; + } + + return MODE_OK; +} + +static struct drm_encoder* +nv_drm_connector_best_encoder(struct drm_connector *connector) +{ + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + if (nv_connector->nv_detected_encoder != NULL) { + return &nv_connector->nv_detected_encoder->base; + } + + return NULL; +} + +static const struct drm_connector_helper_funcs nv_connector_helper_funcs = { + .get_modes = nv_drm_connector_get_modes, + .mode_valid = nv_drm_connector_mode_valid, + .best_encoder = nv_drm_connector_best_encoder, +}; + +static struct drm_connector* +nv_drm_connector_new(struct drm_device *dev, + NvU32 physicalIndex, NvKmsConnectorType type, + NvBool internal, + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_connector *nv_connector = NULL; + int ret = -ENOMEM; + + if ((nv_connector = nv_drm_calloc(1, sizeof(*nv_connector))) == NULL) { + goto failed; + } + + if ((nv_connector->base.state = + nv_drm_calloc(1, sizeof(*nv_connector->base.state))) == NULL) { + goto failed_state_alloc; + } + nv_connector->base.state->connector = &nv_connector->base; + + nv_connector->physicalIndex = physicalIndex; + nv_connector->type = type; + nv_connector->internal = internal; + + strcpy(nv_connector->dpAddress, dpAddress); + + ret = drm_connector_init( + dev, + &nv_connector->base, &nv_connector_funcs, + nvkms_connector_type_to_drm_connector_type(type, internal)); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to initialize connector created from physical index %u", + nv_connector->physicalIndex); + goto failed_connector_init; + } + + drm_connector_helper_add(&nv_connector->base, &nv_connector_helper_funcs); + + nv_connector->base.polled = DRM_CONNECTOR_POLL_HPD; + + if (nv_connector->type == NVKMS_CONNECTOR_TYPE_VGA) { + nv_connector->base.polled = + DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + } + + /* Register connector with DRM subsystem */ + + ret = drm_connector_register(&nv_connector->base); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to register connector created from physical index %u", + nv_connector->physicalIndex); + goto failed_connector_register; + } + + return &nv_connector->base; + +failed_connector_register: + drm_connector_cleanup(&nv_connector->base); + +failed_connector_init: + nv_drm_free(nv_connector->base.state); + +failed_state_alloc: + nv_drm_free(nv_connector); + +failed: + return ERR_PTR(ret); +} + +/* + * Get connector with given physical index one exists. Otherwise, create and + * return a new connector. + */ +struct drm_connector* +nv_drm_get_connector(struct drm_device *dev, + NvU32 physicalIndex, NvKmsConnectorType type, + NvBool internal, + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]) +{ + struct drm_connector *connector = NULL; +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) + struct drm_connector_list_iter conn_iter; + nv_drm_connector_list_iter_begin(dev, &conn_iter); +#else + struct drm_mode_config *config = &dev->mode_config; + mutex_lock(&config->mutex); +#endif + + /* Lookup for existing connector with same physical index */ + nv_drm_for_each_connector(connector, &conn_iter, dev) { + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + if (nv_connector->physicalIndex == physicalIndex) { + BUG_ON(nv_connector->type != type || + nv_connector->internal != internal); + + if (strcmp(nv_connector->dpAddress, dpAddress) == 0) { + goto done; + } + } + } + connector = NULL; + +done: +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) + nv_drm_connector_list_iter_end(&conn_iter); +#else + mutex_unlock(&config->mutex); +#endif + + if (!connector) { + connector = nv_drm_connector_new(dev, + physicalIndex, type, internal, + dpAddress); + } + + return connector; +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-connector.h b/kernel-open/nvidia-drm/nvidia-drm-connector.h new file mode 100644 index 000000000..fd83d7a56 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-connector.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_CONNECTOR_H__ +#define __NVIDIA_DRM_CONNECTOR_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT) +#include +#endif + +#include "nvtypes.h" +#include "nvkms-api-types.h" + +struct nv_drm_connector { + NvU32 physicalIndex; + + NvBool internal; + NvKmsConnectorType type; + + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + + struct nv_drm_encoder *nv_detected_encoder; + struct edid *edid; + + atomic_t connection_status_dirty; + + struct drm_connector base; +}; + +static inline struct nv_drm_connector *to_nv_connector( + struct drm_connector *connector) +{ + if (connector == NULL) { + return NULL; + } + return container_of(connector, struct nv_drm_connector, base); +} + +static inline void nv_drm_connector_mark_connection_status_dirty( + struct nv_drm_connector *nv_connector) +{ + atomic_cmpxchg(&nv_connector->connection_status_dirty, false, true); +} + +static inline bool nv_drm_connector_check_connection_status_dirty_and_clear( + struct nv_drm_connector *nv_connector) +{ + return atomic_cmpxchg( + &nv_connector->connection_status_dirty, + true, + false) == true; +} + +struct drm_connector* +nv_drm_get_connector(struct drm_device *dev, + NvU32 physicalIndex, NvKmsConnectorType type, + NvBool internal, + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_CONNECTOR_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-crtc.c b/kernel-open/nvidia-drm/nvidia-drm-crtc.c new file mode 100644 index 000000000..6380fe7b1 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-crtc.c @@ -0,0 +1,1235 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-helper.h" +#include "nvidia-drm-priv.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-encoder.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-fb.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-format.h" + +#include "nvmisc.h" + +#include +#include + +#include +#include + +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) +#include +#endif + +static void nv_drm_plane_destroy(struct drm_plane *plane) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + + /* plane->state gets freed here */ + drm_plane_cleanup(plane); + + nv_drm_free(nv_plane); +} + +static inline void +plane_req_config_disable(struct NvKmsKapiLayerRequestedConfig *req_config) +{ + /* Clear layer config */ + memset(&req_config->config, 0, sizeof(req_config->config)); + + /* Set flags to get cleared layer config applied */ + req_config->flags.surfaceChanged = NV_TRUE; + req_config->flags.srcXYChanged = NV_TRUE; + req_config->flags.srcWHChanged = NV_TRUE; + req_config->flags.dstXYChanged = NV_TRUE; + req_config->flags.dstWHChanged = NV_TRUE; +} + +static inline void +cursor_req_config_disable(struct NvKmsKapiCursorRequestedConfig *req_config) +{ + req_config->surface = NULL; + req_config->flags.surfaceChanged = NV_TRUE; +} + +static void +cursor_plane_req_config_update(struct drm_plane *plane, + struct drm_plane_state *plane_state, + struct NvKmsKapiCursorRequestedConfig *req_config) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + struct NvKmsKapiCursorRequestedConfig old_config = *req_config; + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(plane_state); + + if (plane_state->fb == NULL) { + cursor_req_config_disable(req_config); + return; + } + + *req_config = (struct NvKmsKapiCursorRequestedConfig) { + .surface = to_nv_framebuffer(plane_state->fb)->pSurface, + + .dstX = plane_state->crtc_x, + .dstY = plane_state->crtc_y, + }; + +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) + if (plane->blend_mode_property != NULL && plane->alpha_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + req_config->compParams.surfaceAlpha = + plane_state->alpha >> 8; + + } else if (plane->blend_mode_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + } else { + req_config->compParams.compMode = + nv_plane->defaultCompositionMode; + } +#else + req_config->compParams.compMode = nv_plane->defaultCompositionMode; +#endif + + /* + * Unconditionally mark the surface as changed, even if nothing changed, + * so that we always get a flip event: a DRM client may flip with + * the same surface and wait for a flip event. + */ + req_config->flags.surfaceChanged = NV_TRUE; + + if (old_config.surface == NULL && + old_config.surface != req_config->surface) { + req_config->flags.dstXYChanged = NV_TRUE; + return; + } + + req_config->flags.dstXYChanged = + old_config.dstX != req_config->dstX || + old_config.dstY != req_config->dstY; +} + +static int +plane_req_config_update(struct drm_plane *plane, + struct drm_plane_state *plane_state, + struct NvKmsKapiLayerRequestedConfig *req_config) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + struct NvKmsKapiLayerConfig old_config = req_config->config; + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(plane_state); + int ret = 0; + + if (plane_state->fb == NULL) { + plane_req_config_disable(req_config); + return 0; + } + + *req_config = (struct NvKmsKapiLayerRequestedConfig) { + .config = { + .surface = to_nv_framebuffer(plane_state->fb)->pSurface, + + /* Source values are 16.16 fixed point */ + .srcX = plane_state->src_x >> 16, + .srcY = plane_state->src_y >> 16, + .srcWidth = plane_state->src_w >> 16, + .srcHeight = plane_state->src_h >> 16, + + .dstX = plane_state->crtc_x, + .dstY = plane_state->crtc_y, + .dstWidth = plane_state->crtc_w, + .dstHeight = plane_state->crtc_h, + }, + }; + +#if defined(NV_DRM_ROTATION_AVAILABLE) + /* + * plane_state->rotation is only valid when plane->rotation_property + * is non-NULL. + */ + if (plane->rotation_property != NULL) { + if (plane_state->rotation & DRM_MODE_REFLECT_X) { + req_config->config.rrParams.reflectionX = true; + } + + if (plane_state->rotation & DRM_MODE_REFLECT_Y) { + req_config->config.rrParams.reflectionY = true; + } + + switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_0: + req_config->config.rrParams.rotation = NVKMS_ROTATION_0; + break; + case DRM_MODE_ROTATE_90: + req_config->config.rrParams.rotation = NVKMS_ROTATION_90; + break; + case DRM_MODE_ROTATE_180: + req_config->config.rrParams.rotation = NVKMS_ROTATION_180; + break; + case DRM_MODE_ROTATE_270: + req_config->config.rrParams.rotation = NVKMS_ROTATION_270; + break; + default: + /* + * We should not hit this, because + * plane_state->rotation should only have values + * registered in + * __nv_drm_plane_create_rotation_property(). + */ + WARN_ON("Unsupported rotation"); + break; + } + } +#endif + +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) + if (plane->blend_mode_property != NULL && plane->alpha_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + req_config->config.compParams.surfaceAlpha = + plane_state->alpha >> 8; + + } else if (plane->blend_mode_property != NULL) { + + switch (plane_state->pixel_blend_mode) { + case DRM_MODE_BLEND_PREMULTI: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + break; + case DRM_MODE_BLEND_COVERAGE: + req_config->config.compParams.compMode = + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA; + break; + default: + /* + * We should not hit this, because + * plane_state->pixel_blend_mode should only have values + * registered in + * __nv_drm_plane_create_alpha_blending_properties(). + */ + WARN_ON("Unsupported blending mode"); + break; + + } + + } else { + req_config->config.compParams.compMode = + nv_plane->defaultCompositionMode; + } +#else + req_config->config.compParams.compMode = + nv_plane->defaultCompositionMode; +#endif + + req_config->config.syncptParams.preSyncptSpecified = false; + req_config->config.syncptParams.postSyncptRequested = false; + + if (plane_state->fence != NULL || nv_drm_plane_state->fd_user_ptr) { + if (!nv_dev->supportsSyncpts) { + return -1; + } + +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) +#if defined(NV_NVHOST_DMA_FENCE_UNPACK_PRESENT) + if (plane_state->fence != NULL) { + ret = nvhost_dma_fence_unpack( + plane_state->fence, + &req_config->config.syncptParams.preSyncptId, + &req_config->config.syncptParams.preSyncptValue); + if (ret != 0) { + return ret; + } + req_config->config.syncptParams.preSyncptSpecified = true; + } +#endif + + if (nv_drm_plane_state->fd_user_ptr) { + req_config->config.syncptParams.postSyncptRequested = true; + } +#else + return -1; +#endif + } + + /* + * Unconditionally mark the surface as changed, even if nothing changed, + * so that we always get a flip event: a DRM client may flip with + * the same surface and wait for a flip event. + */ + req_config->flags.surfaceChanged = NV_TRUE; + + if (old_config.surface == NULL && + old_config.surface != req_config->config.surface) { + req_config->flags.srcXYChanged = NV_TRUE; + req_config->flags.srcWHChanged = NV_TRUE; + req_config->flags.dstXYChanged = NV_TRUE; + req_config->flags.dstWHChanged = NV_TRUE; + return 0; + } + + req_config->flags.srcXYChanged = + old_config.srcX != req_config->config.srcX || + old_config.srcY != req_config->config.srcY; + + req_config->flags.srcWHChanged = + old_config.srcWidth != req_config->config.srcWidth || + old_config.srcHeight != req_config->config.srcHeight; + + req_config->flags.dstXYChanged = + old_config.dstX != req_config->config.dstX || + old_config.dstY != req_config->config.dstY; + + req_config->flags.dstWHChanged = + old_config.dstWidth != req_config->config.dstWidth || + old_config.dstHeight != req_config->config.dstHeight; + + return 0; +} + +static bool __is_async_flip_requested(const struct drm_plane *plane, + const struct drm_crtc_state *crtc_state) +{ + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { +#if defined(NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP) + return crtc_state->async_flip; +#elif defined(NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS) + return !!(crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC); +#endif + } + + return false; +} + +static int __nv_drm_cursor_atomic_check(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + WARN_ON(nv_plane->layer_idx != NVKMS_KAPI_LAYER_INVALID_IDX); + + nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) { + struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state); + struct NvKmsKapiHeadRequestedConfig *head_req_config = + &nv_crtc_state->req_config; + struct NvKmsKapiCursorRequestedConfig *cursor_req_config = + &head_req_config->cursorRequestedConfig; + + if (plane->state->crtc == crtc && + plane->state->crtc != plane_state->crtc) { + cursor_req_config_disable(cursor_req_config); + continue; + } + + if (plane_state->crtc == crtc) { + cursor_plane_req_config_update(plane, plane_state, + cursor_req_config); + } + } + + return 0; +} + +#if defined(NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) +static int nv_drm_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +#else +static int nv_drm_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *plane_state) +#endif +{ + struct nv_drm_plane *nv_plane = to_nv_plane(plane); +#if defined(NV_DRM_PLANE_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) + struct drm_plane_state *plane_state = + drm_atomic_get_new_plane_state(state, plane); +#endif + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int ret; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + return __nv_drm_cursor_atomic_check(plane, plane_state); + } + + WARN_ON(nv_plane->layer_idx == NVKMS_KAPI_LAYER_INVALID_IDX); + + nv_drm_for_each_crtc_in_state(plane_state->state, crtc, crtc_state, i) { + struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state); + struct NvKmsKapiHeadRequestedConfig *head_req_config = + &nv_crtc_state->req_config; + struct NvKmsKapiLayerRequestedConfig *plane_requested_config = + &head_req_config->layerRequestedConfig[nv_plane->layer_idx]; + + if (plane->state->crtc == crtc && + plane->state->crtc != plane_state->crtc) { + plane_req_config_disable(plane_requested_config); + continue; + } + + if (plane_state->crtc == crtc) { + ret = plane_req_config_update(plane, + plane_state, + plane_requested_config); + if (ret != 0) { + return ret; + } + + if (__is_async_flip_requested(plane, crtc_state)) { + /* + * Async flip requests that the flip happen 'as soon as + * possible', meaning that it not delay waiting for vblank. + * This may cause tearing on the screen. + */ + plane_requested_config->config.minPresentInterval = 0; + plane_requested_config->config.tearing = NV_TRUE; + } else { + plane_requested_config->config.minPresentInterval = 1; + plane_requested_config->config.tearing = NV_FALSE; + } + } + } + + return 0; +} + +#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG) +static bool nv_drm_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + /* All supported modifiers are compatible with all supported formats */ + return true; +} +#endif + + +static int nv_drm_plane_atomic_set_property( + struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + struct nv_drm_plane_state *nv_drm_plane_state = + to_nv_drm_plane_state(state); + + if (property == nv_dev->nv_out_fence_property) { +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) + nv_drm_plane_state->fd_user_ptr = u64_to_user_ptr(val); +#endif + return 0; + } else { + return -EINVAL; + } +} + +static int nv_drm_plane_atomic_get_property( + struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + + if (property == nv_dev->nv_out_fence_property) { + return 0; + } else { + return -EINVAL; + } +} + +static struct drm_plane_state * +nv_drm_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct nv_drm_plane_state *nv_old_plane_state = + to_nv_drm_plane_state(plane->state); + struct nv_drm_plane_state *nv_plane_state = + nv_drm_calloc(1, sizeof(*nv_plane_state)); + + if (nv_plane_state == NULL) { + return NULL; + } + + __drm_atomic_helper_plane_duplicate_state(plane, &nv_plane_state->base); + + nv_plane_state->fd_user_ptr = nv_old_plane_state->fd_user_ptr; + + return &nv_plane_state->base; +} + +static inline void __nv_drm_plane_atomic_destroy_state( + struct drm_plane *plane, + struct drm_plane_state *state) +{ +#if defined(NV_DRM_ATOMIC_HELPER_PLANE_DESTROY_STATE_HAS_PLANE_ARG) + __drm_atomic_helper_plane_destroy_state(plane, state); +#else + __drm_atomic_helper_plane_destroy_state(state); +#endif +} + +static void nv_drm_plane_atomic_destroy_state( + struct drm_plane *plane, + struct drm_plane_state *state) +{ + __nv_drm_plane_atomic_destroy_state(plane, state); + + nv_drm_free(to_nv_drm_plane_state(state)); +} + +static const struct drm_plane_funcs nv_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = nv_drm_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_get_property = nv_drm_plane_atomic_get_property, + .atomic_set_property = nv_drm_plane_atomic_set_property, + .atomic_duplicate_state = nv_drm_plane_atomic_duplicate_state, + .atomic_destroy_state = nv_drm_plane_atomic_destroy_state, +#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG) + .format_mod_supported = nv_drm_plane_format_mod_supported, +#endif +}; + +static const struct drm_plane_helper_funcs nv_plane_helper_funcs = { + .atomic_check = nv_drm_plane_atomic_check, +}; + +static void nv_drm_crtc_destroy(struct drm_crtc *crtc) +{ + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + drm_crtc_cleanup(crtc); + + nv_drm_free(nv_crtc); +} + +static inline void +__nv_drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ +#if defined(NV_DRM_ATOMIC_HELPER_CRTC_DESTROY_STATE_HAS_CRTC_ARG) + __drm_atomic_helper_crtc_destroy_state(crtc, crtc_state); +#else + __drm_atomic_helper_crtc_destroy_state(crtc_state); +#endif +} + +static inline void nv_drm_crtc_duplicate_req_head_modeset_config( + const struct NvKmsKapiHeadRequestedConfig *old, + struct NvKmsKapiHeadRequestedConfig *new) +{ + uint32_t i; + + /* + * Do not duplicate fields like 'modeChanged' flags expressing delta changed + * in new configuration with respect to previous/old configuration because + * there is no change in new configuration yet with respect + * to older one! + */ + *new = (struct NvKmsKapiHeadRequestedConfig) { + .modeSetConfig = old->modeSetConfig, + }; + + for (i = 0; i < ARRAY_SIZE(old->layerRequestedConfig); i++) { + new->layerRequestedConfig[i] = (struct NvKmsKapiLayerRequestedConfig) { + .config = old->layerRequestedConfig[i].config, + }; + } +} + +/** + * nv_drm_atomic_crtc_duplicate_state - crtc state duplicate hook + * @crtc: DRM crtc + * + * Allocate and accosiate flip state with DRM crtc state, this flip state will + * be getting consumed at the time of atomic update commit to hardware by + * nv_drm_atomic_helper_commit_tail(). + */ +static struct drm_crtc_state* +nv_drm_atomic_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct nv_drm_crtc_state *nv_state = nv_drm_calloc(1, sizeof(*nv_state)); + + if (nv_state == NULL) { + return NULL; + } + + if ((nv_state->nv_flip = + nv_drm_calloc(1, sizeof(*(nv_state->nv_flip)))) == NULL) { + nv_drm_free(nv_state); + return NULL; + } + + __drm_atomic_helper_crtc_duplicate_state(crtc, &nv_state->base); + + INIT_LIST_HEAD(&nv_state->nv_flip->list_entry); + INIT_LIST_HEAD(&nv_state->nv_flip->deferred_flip_list); + + nv_drm_crtc_duplicate_req_head_modeset_config( + &(to_nv_crtc_state(crtc->state)->req_config), + &nv_state->req_config); + + return &nv_state->base; +} + +/** + * nv_drm_atomic_crtc_destroy_state - crtc state destroy hook + * @crtc: DRM crtc + * @state: DRM crtc state object to destroy + * + * Destroy flip state associated with the given crtc state if it haven't get + * consumed because failure of atomic commit. + */ +static void nv_drm_atomic_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct nv_drm_crtc_state *nv_state = to_nv_crtc_state(state); + + if (nv_state->nv_flip != NULL) { + nv_drm_free(nv_state->nv_flip); + nv_state->nv_flip = NULL; + } + + __nv_drm_atomic_helper_crtc_destroy_state(crtc, &nv_state->base); + + nv_drm_free(nv_state); +} + +static struct drm_crtc_funcs nv_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .destroy = nv_drm_crtc_destroy, + .atomic_duplicate_state = nv_drm_atomic_crtc_duplicate_state, + .atomic_destroy_state = nv_drm_atomic_crtc_destroy_state, +}; + +/* + * In kernel versions before the addition of + * drm_crtc_state::connectors_changed, connector changes were + * reflected in drm_crtc_state::mode_changed. + */ +static inline bool +nv_drm_crtc_state_connectors_changed(struct drm_crtc_state *crtc_state) +{ +#if defined(NV_DRM_CRTC_STATE_HAS_CONNECTORS_CHANGED) + return crtc_state->connectors_changed; +#else + return crtc_state->mode_changed; +#endif +} + +static int head_modeset_config_attach_connector( + struct nv_drm_connector *nv_connector, + struct NvKmsKapiHeadModeSetConfig *head_modeset_config) +{ + struct nv_drm_encoder *nv_encoder = nv_connector->nv_detected_encoder; + + if (NV_DRM_WARN(nv_encoder == NULL || + head_modeset_config->numDisplays >= + ARRAY_SIZE(head_modeset_config->displays))) { + return -EINVAL; + } + head_modeset_config->displays[head_modeset_config->numDisplays++] = + nv_encoder->hDisplay; + return 0; +} + +/** + * nv_drm_crtc_atomic_check() can fail after it has modified + * the 'nv_drm_crtc_state::req_config', that is fine because 'nv_drm_crtc_state' + * will be discarded if ->atomic_check() fails. + */ +#if defined(NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) +static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +#else +static int nv_drm_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +#endif +{ +#if defined(NV_DRM_CRTC_ATOMIC_CHECK_HAS_ATOMIC_STATE_ARG) + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); +#endif + struct nv_drm_crtc_state *nv_crtc_state = to_nv_crtc_state(crtc_state); + struct NvKmsKapiHeadRequestedConfig *req_config = + &nv_crtc_state->req_config; + int ret = 0; + + if (crtc_state->mode_changed) { + drm_mode_to_nvkms_display_mode(&crtc_state->mode, + &req_config->modeSetConfig.mode); + req_config->flags.modeChanged = NV_TRUE; + } + + if (nv_drm_crtc_state_connectors_changed(crtc_state)) { + struct NvKmsKapiHeadModeSetConfig *config = &req_config->modeSetConfig; + struct drm_connector *connector; + struct drm_connector_state *connector_state; + int j; + + config->numDisplays = 0; + + memset(config->displays, 0, sizeof(config->displays)); + + req_config->flags.displaysChanged = NV_TRUE; + + nv_drm_for_each_connector_in_state(crtc_state->state, + connector, connector_state, j) { + if (connector_state->crtc != crtc) { + continue; + } + + if ((ret = head_modeset_config_attach_connector( + to_nv_connector(connector), + config)) != 0) { + return ret; + } + } + } + + if (crtc_state->active_changed) { + req_config->modeSetConfig.bActive = crtc_state->active; + req_config->flags.activeChanged = NV_TRUE; + } + + return ret; +} + +static bool +nv_drm_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_crtc_helper_funcs nv_crtc_helper_funcs = { + .atomic_check = nv_drm_crtc_atomic_check, + .mode_fixup = nv_drm_crtc_mode_fixup, +}; + +static void nv_drm_plane_install_properties( + struct drm_plane *plane) +{ + struct nv_drm_device *nv_dev = to_nv_device(plane->dev); + + if (nv_dev->nv_out_fence_property) { + drm_object_attach_property( + &plane->base, nv_dev->nv_out_fence_property, 0); + } +} + +static void +__nv_drm_plane_create_alpha_blending_properties(struct drm_plane *plane, + NvU32 validCompModes) +{ +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) + if ((validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA)) != 0x0 && + (validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA)) != 0x0) { + + drm_plane_create_alpha_property(plane); + drm_plane_create_blend_mode_property(plane, + NVBIT(DRM_MODE_BLEND_PREMULTI) | + NVBIT(DRM_MODE_BLEND_COVERAGE)); + } else if ((validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) != 0x0 && + (validCompModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA)) != 0x0) { + + drm_plane_create_blend_mode_property(plane, + NVBIT(DRM_MODE_BLEND_PREMULTI) | + NVBIT(DRM_MODE_BLEND_COVERAGE)); + } +#endif +} + +static void +__nv_drm_plane_create_rotation_property(struct drm_plane *plane, + NvU16 validLayerRRTransforms) +{ +#if defined(NV_DRM_ROTATION_AVAILABLE) + enum NvKmsRotation curRotation; + NvU32 supported_rotations = 0; + struct NvKmsRRParams rrParams = { + .rotation = NVKMS_ROTATION_0, + .reflectionX = true, + .reflectionY = true, + }; + + if ((NVBIT(NvKmsRRParamsToCapBit(&rrParams)) & + validLayerRRTransforms) != 0) { + supported_rotations |= DRM_MODE_REFLECT_X; + supported_rotations |= DRM_MODE_REFLECT_Y; + } + + rrParams.reflectionX = false; + rrParams.reflectionY = false; + + for (curRotation = NVKMS_ROTATION_MIN; + curRotation <= NVKMS_ROTATION_MAX; curRotation++) { + rrParams.rotation = curRotation; + if ((NVBIT(NvKmsRRParamsToCapBit(&rrParams)) & + validLayerRRTransforms) == 0) { + continue; + } + + switch (curRotation) { + case NVKMS_ROTATION_0: + supported_rotations |= DRM_MODE_ROTATE_0; + break; + case NVKMS_ROTATION_90: + supported_rotations |= DRM_MODE_ROTATE_90; + break; + case NVKMS_ROTATION_180: + supported_rotations |= DRM_MODE_ROTATE_180; + break; + case NVKMS_ROTATION_270: + supported_rotations |= DRM_MODE_ROTATE_270; + break; + default: + break; + } + + } + + if (supported_rotations != 0) { + drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, + supported_rotations); + } +#endif +} + +static struct drm_plane* +nv_drm_plane_create(struct drm_device *dev, + enum drm_plane_type plane_type, + uint32_t layer_idx, + NvU32 head, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo) +{ +#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG) + struct nv_drm_device *nv_dev = to_nv_device(dev); + const NvU64 linear_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, + }; +#endif + enum NvKmsCompositionBlendingMode defaultCompositionMode; + struct nv_drm_plane *nv_plane = NULL; + struct nv_drm_plane_state *nv_plane_state = NULL; + struct drm_plane *plane = NULL; + int ret = -ENOMEM; + uint32_t *formats = NULL; + unsigned int formats_count = 0; + const NvU32 validCompositionModes = + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + pResInfo->caps.validCursorCompositionModes : + pResInfo->caps.layer[layer_idx].validCompositionModes; + const long unsigned int nvkms_formats_mask = + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + pResInfo->caps.supportedCursorSurfaceMemoryFormats : + pResInfo->supportedSurfaceMemoryFormats[layer_idx]; + const NvU16 validLayerRRTransforms = + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + 0x0 : pResInfo->caps.layer[layer_idx].validRRTransforms; + + if ((validCompositionModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE)) != 0x0) { + defaultCompositionMode = NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE; + } else if ((validCompositionModes & + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) != 0x0) { + defaultCompositionMode = NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA; + } else { + goto failed; + } + + formats = + nv_drm_format_array_alloc(&formats_count, + nvkms_formats_mask); + if (formats == NULL) { + goto failed; + } + + if ((nv_plane = nv_drm_calloc(1, sizeof(*nv_plane))) == NULL) { + goto failed_plane_alloc; + } + plane = &nv_plane->base; + + nv_plane->defaultCompositionMode = defaultCompositionMode; + nv_plane->layer_idx = layer_idx; + + if ((nv_plane_state = + nv_drm_calloc(1, sizeof(*nv_plane_state))) == NULL) { + goto failed_state_alloc; + } + + plane->state = &nv_plane_state->base; + plane->state->plane = plane; + + /* + * Possible_crtcs for primary and cursor plane is zero because + * drm_crtc_init_with_planes() will assign the plane's possible_crtcs + * after the crtc is successfully initialized. + */ + ret = drm_universal_plane_init( + dev, + plane, + (plane_type == DRM_PLANE_TYPE_OVERLAY) ? + (1 << head) : 0, + &nv_plane_funcs, + formats, formats_count, +#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_FORMAT_MODIFIERS_ARG) + (plane_type == DRM_PLANE_TYPE_CURSOR) ? + linear_modifiers : nv_dev->modifiers, +#endif + plane_type +#if defined(NV_DRM_UNIVERSAL_PLANE_INIT_HAS_NAME_ARG) + , NULL +#endif + ); + + if (ret != 0) { + goto failed_plane_init; + } + + drm_plane_helper_add(plane, &nv_plane_helper_funcs); + + if (plane_type != DRM_PLANE_TYPE_CURSOR) { + nv_drm_plane_install_properties(plane); + } + + __nv_drm_plane_create_alpha_blending_properties( + plane, + validCompositionModes); + + __nv_drm_plane_create_rotation_property( + plane, + validLayerRRTransforms); + + return plane; + +failed_plane_init: + nv_drm_free(nv_plane_state); + +failed_state_alloc: + nv_drm_free(nv_plane); + +failed_plane_alloc: + nv_drm_free(formats); + +failed: + return ERR_PTR(ret); +} + +/* + * Add drm crtc for given head and supported enum NvKmsSurfaceMemoryFormats. + */ +static struct drm_crtc *__nv_drm_crtc_create(struct nv_drm_device *nv_dev, + struct drm_plane *primary_plane, + struct drm_plane *cursor_plane, + unsigned int head) +{ + struct nv_drm_crtc *nv_crtc = NULL; + struct nv_drm_crtc_state *nv_state = NULL; + int ret = -ENOMEM; + + if ((nv_crtc = nv_drm_calloc(1, sizeof(*nv_crtc))) == NULL) { + goto failed; + } + + nv_state = nv_drm_calloc(1, sizeof(*nv_state)); + if (nv_state == NULL) { + goto failed_state_alloc; + } + + nv_crtc->base.state = &nv_state->base; + nv_crtc->base.state->crtc = &nv_crtc->base; + + nv_crtc->head = head; + INIT_LIST_HEAD(&nv_crtc->flip_list); + spin_lock_init(&nv_crtc->flip_list_lock); + + ret = drm_crtc_init_with_planes(nv_dev->dev, + &nv_crtc->base, + primary_plane, cursor_plane, + &nv_crtc_funcs +#if defined(NV_DRM_CRTC_INIT_WITH_PLANES_HAS_NAME_ARG) + , NULL +#endif + ); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to init crtc for head %u with planes", head); + goto failed_init_crtc; + } + + /* Add crtc to drm sub-system */ + + drm_crtc_helper_add(&nv_crtc->base, &nv_crtc_helper_funcs); + + return &nv_crtc->base; + +failed_init_crtc: + nv_drm_free(nv_state); + +failed_state_alloc: + nv_drm_free(nv_crtc); + +failed: + return ERR_PTR(ret); +} + +void nv_drm_enumerate_crtcs_and_planes( + struct nv_drm_device *nv_dev, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo) +{ + unsigned int i; + + for (i = 0; i < pResInfo->numHeads; i++) { + struct drm_plane *primary_plane = NULL, *cursor_plane = NULL; + NvU32 layer; + + if (pResInfo->numLayers[i] <= NVKMS_KAPI_LAYER_PRIMARY_IDX) { + continue; + } + + primary_plane = + nv_drm_plane_create(nv_dev->dev, + DRM_PLANE_TYPE_PRIMARY, + NVKMS_KAPI_LAYER_PRIMARY_IDX, + i, + pResInfo); + + if (IS_ERR(primary_plane)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create primary plane for head %u, error = %ld", + i, PTR_ERR(primary_plane)); + continue; + } + + cursor_plane = + nv_drm_plane_create(nv_dev->dev, + DRM_PLANE_TYPE_CURSOR, + NVKMS_KAPI_LAYER_INVALID_IDX, + i, + pResInfo); + if (IS_ERR(cursor_plane)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create cursor plane for head %u, error = %ld", + i, PTR_ERR(cursor_plane)); + cursor_plane = NULL; + } + + /* Create crtc with the primary and cursor planes */ + { + struct drm_crtc *crtc = + __nv_drm_crtc_create(nv_dev, + primary_plane, cursor_plane, + i); + if (IS_ERR(crtc)) { + nv_drm_plane_destroy(primary_plane); + + if (cursor_plane != NULL) { + nv_drm_plane_destroy(cursor_plane); + } + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to add DRM CRTC for head %u, error = %ld", + i, PTR_ERR(crtc)); + continue; + } + } + + for (layer = 0; layer < pResInfo->numLayers[i]; layer++) { + if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX) { + continue; + } + + struct drm_plane *overlay_plane = + nv_drm_plane_create(nv_dev->dev, + DRM_PLANE_TYPE_OVERLAY, + layer, + i, + pResInfo); + + if (IS_ERR(overlay_plane)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to create plane for layer-%u of head %u, error = %ld", + layer, i, PTR_ERR(overlay_plane)); + } + } + + } +} +/* + * Helper function to convert NvKmsKapiCrcs to drm_nvidia_crtc_crc32_out. + */ +static void NvKmsKapiCrcsToDrm(const struct NvKmsKapiCrcs *crcs, + struct drm_nvidia_crtc_crc32_v2_out *drmCrcs) +{ + drmCrcs->outputCrc32.value = crcs->outputCrc32.value; + drmCrcs->outputCrc32.supported = crcs->outputCrc32.supported; + drmCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value; + drmCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported; + drmCrcs->compositorCrc32.value = crcs->compositorCrc32.value; + drmCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported; +} + +int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_get_crtc_crc32_v2_params *params = data; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_crtc *crtc = NULL; + struct nv_drm_crtc *nv_crtc = NULL; + struct NvKmsKapiCrcs crc32; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -ENOENT; + } + + crtc = nv_drm_crtc_find(dev, params->crtc_id); + if (!crtc) { + return -ENOENT; + } + + nv_crtc = to_nv_crtc(crtc); + + if (!nvKms->getCRC32(nv_dev->pDevice, nv_crtc->head, &crc32)) { + return -ENODEV; + } + NvKmsKapiCrcsToDrm(&crc32, ¶ms->crc32); + + return 0; +} + +int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_get_crtc_crc32_params *params = data; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_crtc *crtc = NULL; + struct nv_drm_crtc *nv_crtc = NULL; + struct NvKmsKapiCrcs crc32; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -ENOENT; + } + + crtc = nv_drm_crtc_find(dev, params->crtc_id); + if (!crtc) { + return -ENOENT; + } + + nv_crtc = to_nv_crtc(crtc); + + if (!nvKms->getCRC32(nv_dev->pDevice, nv_crtc->head, &crc32)) { + return -ENODEV; + } + params->crc32 = crc32.outputCrc32.value; + + return 0; +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-crtc.h b/kernel-open/nvidia-drm/nvidia-drm-crtc.h new file mode 100644 index 000000000..061b235c7 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-crtc.h @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_CRTC_H__ +#define __NVIDIA_DRM_CRTC_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-helper.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include + +#if defined(NV_DRM_ALPHA_BLENDING_AVAILABLE) || defined(NV_DRM_ROTATION_AVAILABLE) +/* For DRM_ROTATE_* , DRM_REFLECT_* */ +#include +#endif + +#if defined(NV_DRM_ROTATION_AVAILABLE) +/* For DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* */ +#include +#endif + +#include "nvtypes.h" +#include "nvkms-kapi.h" + +#if defined(NV_DRM_ROTATION_AVAILABLE) +/* + * 19-05-2017 c2c446ad29437bb92b157423c632286608ebd3ec has added + * DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* to UAPI and removed + * DRM_ROTATE_* and DRM_MODE_REFLECT_* + */ +#if !defined(DRM_MODE_ROTATE_0) +#define DRM_MODE_ROTATE_0 DRM_ROTATE_0 +#define DRM_MODE_ROTATE_90 DRM_ROTATE_90 +#define DRM_MODE_ROTATE_180 DRM_ROTATE_180 +#define DRM_MODE_ROTATE_270 DRM_ROTATE_270 +#define DRM_MODE_REFLECT_X DRM_REFLECT_X +#define DRM_MODE_REFLECT_Y DRM_REFLECT_Y +#define DRM_MODE_ROTATE_MASK DRM_ROTATE_MASK +#define DRM_MODE_REFLECT_MASK DRM_REFLECT_MASK +#endif + +#endif //NV_DRM_ROTATION_AVAILABLE + +struct nv_drm_crtc { + NvU32 head; + + /** + * @flip_list: + * + * List of flips pending to get processed by __nv_drm_handle_flip_event(). + * Protected by @flip_list_lock. + */ + struct list_head flip_list; + + /** + * @flip_list_lock: + * + * Spinlock to protect @flip_list. + */ + spinlock_t flip_list_lock; + + struct drm_crtc base; +}; + +/** + * struct nv_drm_flip - flip state + * + * This state is getting used to consume DRM completion event associated + * with each crtc state from atomic commit. + * + * Function nv_drm_atomic_apply_modeset_config() consumes DRM completion + * event, save it into flip state associated with crtc and queue flip state into + * crtc's flip list and commits atomic update to hardware. + */ +struct nv_drm_flip { + /** + * @event: + * + * Optional pointer to a DRM event to signal upon completion of + * the state update. + */ + struct drm_pending_vblank_event *event; + + /** + * @pending_events + * + * Number of HW events pending to signal completion of the state + * update. + */ + uint32_t pending_events; + + /** + * @list_entry: + * + * Entry on the per-CRTC &nv_drm_crtc.flip_list. Protected by + * &nv_drm_crtc.flip_list_lock. + */ + struct list_head list_entry; + + /** + * @deferred_flip_list + * + * List flip objects whose processing is deferred until processing of + * this flip object. Protected by &nv_drm_crtc.flip_list_lock. + * nv_drm_atomic_commit() gets last flip object from + * nv_drm_crtc:flip_list and add deferred flip objects into + * @deferred_flip_list, __nv_drm_handle_flip_event() processes + * @deferred_flip_list. + */ + struct list_head deferred_flip_list; +}; + +struct nv_drm_crtc_state { + /** + * @base: + * + * Base DRM crtc state object for this. + */ + struct drm_crtc_state base; + + /** + * @head_req_config: + * + * Requested head's modeset configuration corresponding to this crtc state. + */ + struct NvKmsKapiHeadRequestedConfig req_config; + + /** + * @nv_flip: + * + * Flip state associated with this crtc state, gets allocated + * by nv_drm_atomic_crtc_duplicate_state(), on successful commit it gets + * consumed and queued into flip list by + * nv_drm_atomic_apply_modeset_config() and finally gets destroyed + * by __nv_drm_handle_flip_event() after getting processed. + * + * In case of failure of atomic commit, this flip state getting destroyed by + * nv_drm_atomic_crtc_destroy_state(). + */ + struct nv_drm_flip *nv_flip; +}; + +static inline struct nv_drm_crtc_state *to_nv_crtc_state(struct drm_crtc_state *state) +{ + return container_of(state, struct nv_drm_crtc_state, base); +} + +struct nv_drm_plane { + /** + * @base: + * + * Base DRM plane object for this plane. + */ + struct drm_plane base; + + /** + * @defaultCompositionMode: + * + * Default composition blending mode of this plane. + */ + enum NvKmsCompositionBlendingMode defaultCompositionMode; + + /** + * @layer_idx + * + * Index of this plane in the per head array of layers. + */ + uint32_t layer_idx; +}; + +static inline struct nv_drm_plane *to_nv_plane(struct drm_plane *plane) +{ + if (plane == NULL) { + return NULL; + } + return container_of(plane, struct nv_drm_plane, base); +} + +struct nv_drm_plane_state { + struct drm_plane_state base; + s32 __user *fd_user_ptr; +}; + +static inline struct nv_drm_plane_state *to_nv_drm_plane_state(struct drm_plane_state *state) +{ + return container_of(state, struct nv_drm_plane_state, base); +} + +static inline struct nv_drm_crtc *to_nv_crtc(struct drm_crtc *crtc) +{ + if (crtc == NULL) { + return NULL; + } + return container_of(crtc, struct nv_drm_crtc, base); +} + +/* + * CRTCs are static objects, list does not change once after initialization and + * before teardown of device. Initialization/teardown paths are single + * threaded, so no locking required. + */ +static inline +struct nv_drm_crtc *nv_drm_crtc_lookup(struct nv_drm_device *nv_dev, NvU32 head) +{ + struct drm_crtc *crtc; + nv_drm_for_each_crtc(crtc, nv_dev->dev) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + if (nv_crtc->head == head) { + return nv_crtc; + } + } + return NULL; +} + +/** + * nv_drm_crtc_enqueue_flip - Enqueue nv_drm_flip object to flip_list of crtc. + */ +static inline void nv_drm_crtc_enqueue_flip(struct nv_drm_crtc *nv_crtc, + struct nv_drm_flip *nv_flip) +{ + spin_lock(&nv_crtc->flip_list_lock); + list_add(&nv_flip->list_entry, &nv_crtc->flip_list); + spin_unlock(&nv_crtc->flip_list_lock); +} + +/** + * nv_drm_crtc_dequeue_flip - Dequeue nv_drm_flip object to flip_list of crtc. + */ +static inline +struct nv_drm_flip *nv_drm_crtc_dequeue_flip(struct nv_drm_crtc *nv_crtc) +{ + struct nv_drm_flip *nv_flip = NULL; + uint32_t pending_events = 0; + + spin_lock(&nv_crtc->flip_list_lock); + nv_flip = list_first_entry_or_null(&nv_crtc->flip_list, + struct nv_drm_flip, list_entry); + if (likely(nv_flip != NULL)) { + /* + * Decrement pending_event count and dequeue flip object if + * pending_event count becomes 0. + */ + pending_events = --nv_flip->pending_events; + if (!pending_events) { + list_del(&nv_flip->list_entry); + } + } + spin_unlock(&nv_crtc->flip_list_lock); + + if (WARN_ON(nv_flip == NULL) || pending_events) { + return NULL; + } + + return nv_flip; +} + +void nv_drm_enumerate_crtcs_and_planes( + struct nv_drm_device *nv_dev, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo); + +int nv_drm_get_crtc_crc32_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_get_crtc_crc32_v2_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_CRTC_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-drv.c b/kernel-open/nvidia-drm/nvidia-drm-drv.c new file mode 100644 index 000000000..cf2080db3 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-drv.c @@ -0,0 +1,1024 @@ +/* + * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE and NV_DRM_DRM_GEM_H_PRESENT */ + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-drv.h" +#include "nvidia-drm-fb.h" +#include "nvidia-drm-modeset.h" +#include "nvidia-drm-encoder.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-prime-fence.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvidia-drm-gem-user-memory.h" +#include "nvidia-drm-gem-dma-buf.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-ioctl.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_VBLANK_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_FILE_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_IOCTL_H_PRESENT) +#include +#endif + +#include + +/* + * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h") + * moves a number of helper function definitions from + * drm/drm_crtc_helper.h to a new drm_probe_helper.h. + */ +#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT) +#include +#endif +#include + +#if defined(NV_DRM_DRM_GEM_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_AUTH_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) +#include +#endif + +static struct nv_drm_device *dev_list = NULL; + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +static void nv_drm_output_poll_changed(struct drm_device *dev) +{ + struct drm_connector *connector = NULL; + struct drm_mode_config *config = &dev->mode_config; +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) + struct drm_connector_list_iter conn_iter; + nv_drm_connector_list_iter_begin(dev, &conn_iter); +#endif + /* + * Here drm_mode_config::mutex has been acquired unconditionally: + * + * - In the non-NV_DRM_CONNECTOR_LIST_ITER_PRESENT case, the mutex must + * be held for the duration of walking over the connectors. + * + * - In the NV_DRM_CONNECTOR_LIST_ITER_PRESENT case, the mutex must be + * held for the duration of a fill_modes() call chain: + * connector->funcs->fill_modes() + * |-> drm_helper_probe_single_connector_modes() + * + * It is easiest to always acquire the mutext for the entire connector + * loop. + */ + mutex_lock(&config->mutex); + + nv_drm_for_each_connector(connector, &conn_iter, dev) { + + struct nv_drm_connector *nv_connector = to_nv_connector(connector); + + if (!nv_drm_connector_check_connection_status_dirty_and_clear( + nv_connector)) { + continue; + } + + connector->funcs->fill_modes( + connector, + dev->mode_config.max_width, dev->mode_config.max_height); + } + + mutex_unlock(&config->mutex); +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) + nv_drm_connector_list_iter_end(&conn_iter); +#endif +} + +static struct drm_framebuffer *nv_drm_framebuffer_create( + struct drm_device *dev, + struct drm_file *file, + #if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG) + const struct drm_mode_fb_cmd2 *cmd + #else + struct drm_mode_fb_cmd2 *cmd + #endif +) +{ + struct drm_mode_fb_cmd2 local_cmd; + struct drm_framebuffer *fb; + + local_cmd = *cmd; + + fb = nv_drm_internal_framebuffer_create( + dev, + file, + &local_cmd); + + #if !defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_CONST_MODE_CMD_ARG) + *cmd = local_cmd; + #endif + + return fb; +} + +static const struct drm_mode_config_funcs nv_mode_config_funcs = { + .fb_create = nv_drm_framebuffer_create, + + .atomic_state_alloc = nv_drm_atomic_state_alloc, + .atomic_state_clear = nv_drm_atomic_state_clear, + .atomic_state_free = nv_drm_atomic_state_free, + .atomic_check = nv_drm_atomic_check, + .atomic_commit = nv_drm_atomic_commit, + + .output_poll_changed = nv_drm_output_poll_changed, +}; + +static void nv_drm_event_callback(const struct NvKmsKapiEvent *event) +{ + struct nv_drm_device *nv_dev = event->privateData; + + mutex_lock(&nv_dev->lock); + + if (!atomic_read(&nv_dev->enable_event_handling)) { + goto done; + } + + switch (event->type) { + case NVKMS_EVENT_TYPE_DPY_CHANGED: + nv_drm_handle_display_change( + nv_dev, + event->u.displayChanged.display); + break; + + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: + nv_drm_handle_dynamic_display_connected( + nv_dev, + event->u.dynamicDisplayConnected.display); + break; + case NVKMS_EVENT_TYPE_FLIP_OCCURRED: + nv_drm_handle_flip_occurred( + nv_dev, + event->u.flipOccurred.head, + event->u.flipOccurred.layer); + break; + default: + break; + } + +done: + + mutex_unlock(&nv_dev->lock); +} + +/* + * Helper function to initialize drm_device::mode_config from + * NvKmsKapiDevice's resource information. + */ +static void +nv_drm_init_mode_config(struct nv_drm_device *nv_dev, + const struct NvKmsKapiDeviceResourcesInfo *pResInfo) +{ + struct drm_device *dev = nv_dev->dev; + + drm_mode_config_init(dev); + drm_mode_create_dvi_i_properties(dev); + + dev->mode_config.funcs = &nv_mode_config_funcs; + + dev->mode_config.min_width = pResInfo->caps.minWidthInPixels; + dev->mode_config.min_height = pResInfo->caps.minHeightInPixels; + + dev->mode_config.max_width = pResInfo->caps.maxWidthInPixels; + dev->mode_config.max_height = pResInfo->caps.maxHeightInPixels; + + dev->mode_config.cursor_width = pResInfo->caps.maxCursorSizeInPixels; + dev->mode_config.cursor_height = pResInfo->caps.maxCursorSizeInPixels; + + /* + * NVIDIA GPUs have no preferred depth. Arbitrarily report 24, to be + * consistent with other DRM drivers. + */ + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + + /* Currently unused. Update when needed. */ + + dev->mode_config.fb_base = 0; + +#if defined(NV_DRM_CRTC_STATE_HAS_ASYNC_FLIP) || \ + defined(NV_DRM_CRTC_STATE_HAS_PAGEFLIP_FLAGS) + dev->mode_config.async_page_flip = true; +#else + dev->mode_config.async_page_flip = false; +#endif + +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) && \ + defined(NV_DRM_MODE_CONFIG_HAS_ALLOW_FB_MODIFIERS) + /* Allow clients to define framebuffer layouts using DRM format modifiers */ + dev->mode_config.allow_fb_modifiers = true; +#endif + + /* Initialize output polling support */ + + drm_kms_helper_poll_init(dev); + + /* Disable output polling, because we don't support it yet */ + + drm_kms_helper_poll_disable(dev); +} + +/* + * Helper function to enumerate encoders/connectors from NvKmsKapiDevice. + */ +static void nv_drm_enumerate_encoders_and_connectors +( + struct nv_drm_device *nv_dev +) +{ + struct drm_device *dev = nv_dev->dev; + NvU32 nDisplays = 0; + + if (!nvKms->getDisplays(nv_dev->pDevice, &nDisplays, NULL)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to enumurate NvKmsKapiDisplay count"); + } + + if (nDisplays != 0) { + NvKmsKapiDisplay *hDisplays = + nv_drm_calloc(nDisplays, sizeof(*hDisplays)); + + if (hDisplays != NULL) { + if (!nvKms->getDisplays(nv_dev->pDevice, &nDisplays, hDisplays)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to enumurate NvKmsKapiDisplay handles"); + } else { + NvU32 i; + + for (i = 0; i < nDisplays; i++) { + struct drm_encoder *encoder = + nv_drm_add_encoder(dev, hDisplays[i]); + + if (IS_ERR(encoder)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to add connector for NvKmsKapiDisplay 0x%08x", + hDisplays[i]); + } + } + } + + nv_drm_free(hDisplays); + } else { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate memory for NvKmsKapiDisplay array"); + } + } +} + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +/*! + * 'NV_DRM_OUT_FENCE_PTR' is an atomic per-plane property that clients can use + * to request an out-fence fd for a particular plane that's being flipped. + * 'NV_DRM_OUT_FENCE_PTR' does NOT have the same behavior as the standard + * 'OUT_FENCE_PTR' property - the fd that's returned via 'NV_DRM_OUT_FENCE_PTR' + * will only be signaled once the buffers in the corresponding flip are flipped + * away from. + * In order to use this property, client needs to call set property function + * with user mode pointer as value. Once driver have post syncpt fd from flip reply, + * it will copy post syncpt fd at location pointed by user mode pointer. + */ +static int nv_drm_create_properties(struct nv_drm_device *nv_dev) +{ +#if defined(NV_LINUX_NVHOST_H_PRESENT) && defined(CONFIG_TEGRA_GRHOST) + if (!nv_dev->supportsSyncpts) { + return 0; + } + + nv_dev->nv_out_fence_property = + drm_property_create_range(nv_dev->dev, DRM_MODE_PROP_ATOMIC, + "NV_DRM_OUT_FENCE_PTR", 0, U64_MAX); + if (nv_dev->nv_out_fence_property == NULL) { + return -ENOMEM; + } +#endif + + return 0; +} + +static int nv_drm_load(struct drm_device *dev, unsigned long flags) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + struct NvKmsKapiDevice *pDevice; + + struct NvKmsKapiAllocateDeviceParams allocateDeviceParams; + struct NvKmsKapiDeviceResourcesInfo resInfo; +#endif +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) + NvU64 kind; + NvU64 gen; + int i; +#endif + int ret; + + struct nv_drm_device *nv_dev = to_nv_device(dev); + + NV_DRM_DEV_LOG_INFO(nv_dev, "Loading driver"); + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return 0; + } + + /* Allocate NvKmsKapiDevice from GPU ID */ + + memset(&allocateDeviceParams, 0, sizeof(allocateDeviceParams)); + + allocateDeviceParams.gpuId = nv_dev->gpu_info.gpu_id; + + allocateDeviceParams.privateData = nv_dev; + allocateDeviceParams.eventCallback = nv_drm_event_callback; + + pDevice = nvKms->allocateDevice(&allocateDeviceParams); + + if (pDevice == NULL) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to allocate NvKmsKapiDevice"); + return -ENODEV; + } + + /* Query information of resources available on device */ + + if (!nvKms->getDeviceResourcesInfo(pDevice, &resInfo)) { + + nvKms->freeDevice(pDevice); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to query NvKmsKapiDevice resources info"); + return -ENODEV; + } + + mutex_lock(&nv_dev->lock); + + /* Set NvKmsKapiDevice */ + + nv_dev->pDevice = pDevice; + + nv_dev->pitchAlignment = resInfo.caps.pitchAlignment; + + nv_dev->hasVideoMemory = resInfo.caps.hasVideoMemory; + + nv_dev->genericPageKind = resInfo.caps.genericPageKind; + + // Fermi-Volta use generation 0, Turing+ uses generation 2. + nv_dev->pageKindGeneration = (nv_dev->genericPageKind == 0x06) ? 2 : 0; + + // Desktop GPUs and mobile GPUs Xavier and later use the same sector layout + nv_dev->sectorLayout = 1; + + nv_dev->supportsSyncpts = resInfo.caps.supportsSyncpts; + +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) + gen = nv_dev->pageKindGeneration; + kind = nv_dev->genericPageKind; + + for (i = 0; i <= 5; i++) { + nv_dev->modifiers[i] = + /* Log2(block height) ----------------------------------+ * + * Page Kind ------------------------------------+ | * + * Gob Height/Page Kind Generation --------+ | | * + * Sector layout ---------------------+ | | | * + * Compression --------------------+ | | | | * + * | | | | | */ + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, gen, kind, 5 - i); + } + + nv_dev->modifiers[i++] = DRM_FORMAT_MOD_LINEAR; + nv_dev->modifiers[i++] = DRM_FORMAT_MOD_INVALID; +#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */ + + /* Initialize drm_device::mode_config */ + + nv_drm_init_mode_config(nv_dev, &resInfo); + + ret = nv_drm_create_properties(nv_dev); + if (ret < 0) { + return -ENODEV; + } + + if (!nvKms->declareEventInterest( + nv_dev->pDevice, + ((1 << NVKMS_EVENT_TYPE_DPY_CHANGED) | + (1 << NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED) | + (1 << NVKMS_EVENT_TYPE_FLIP_OCCURRED)))) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to register event mask"); + } + + /* Add crtcs */ + + nv_drm_enumerate_crtcs_and_planes(nv_dev, &resInfo); + + /* Add connectors and encoders */ + + nv_drm_enumerate_encoders_and_connectors(nv_dev); + +#if !defined(NV_DRM_CRTC_STATE_HAS_NO_VBLANK) + drm_vblank_init(dev, dev->mode_config.num_crtc); +#endif + + /* + * Trigger hot-plug processing, to update connection status of + * all HPD supported connectors. + */ + + drm_helper_hpd_irq_event(dev); + + /* Enable event handling */ + + atomic_set(&nv_dev->enable_event_handling, true); + + init_waitqueue_head(&nv_dev->flip_event_wq); + + mutex_unlock(&nv_dev->lock); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + + return 0; +} + +static void __nv_drm_unload(struct drm_device *dev) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + struct NvKmsKapiDevice *pDevice = NULL; +#endif + + struct nv_drm_device *nv_dev = to_nv_device(dev); + + NV_DRM_DEV_LOG_INFO(nv_dev, "Unloading driver"); + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return; + } + + mutex_lock(&nv_dev->lock); + + /* Disable event handling */ + + atomic_set(&nv_dev->enable_event_handling, false); + + /* Clean up output polling */ + + drm_kms_helper_poll_fini(dev); + + /* Clean up mode configuration */ + + drm_mode_config_cleanup(dev); + + if (!nvKms->declareEventInterest(nv_dev->pDevice, 0x0)) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to stop event listening"); + } + + /* Unset NvKmsKapiDevice */ + + pDevice = nv_dev->pDevice; + nv_dev->pDevice = NULL; + + mutex_unlock(&nv_dev->lock); + + nvKms->freeDevice(pDevice); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ +} + +#if defined(NV_DRM_DRIVER_UNLOAD_HAS_INT_RETURN_TYPE) +static int nv_drm_unload(struct drm_device *dev) +{ + __nv_drm_unload(dev); + + return 0; +} +#else +static void nv_drm_unload(struct drm_device *dev) +{ + __nv_drm_unload(dev); +} +#endif + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +static int __nv_drm_master_set(struct drm_device *dev, + struct drm_file *file_priv, bool from_open) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + if (!nvKms->grabOwnership(nv_dev->pDevice)) { + return -EINVAL; + } + + return 0; +} + +#if defined(NV_DRM_DRIVER_SET_MASTER_HAS_INT_RETURN_TYPE) +static int nv_drm_master_set(struct drm_device *dev, + struct drm_file *file_priv, bool from_open) +{ + return __nv_drm_master_set(dev, file_priv, from_open); +} +#else +static void nv_drm_master_set(struct drm_device *dev, + struct drm_file *file_priv, bool from_open) +{ + if (__nv_drm_master_set(dev, file_priv, from_open) != 0) { + NV_DRM_DEV_LOG_ERR(to_nv_device(dev), "Failed to grab modeset ownership"); + } +} +#endif + + +#if defined(NV_DRM_MASTER_DROP_HAS_FROM_RELEASE_ARG) +static +void nv_drm_master_drop(struct drm_device *dev, + struct drm_file *file_priv, bool from_release) +#else +static +void nv_drm_master_drop(struct drm_device *dev, struct drm_file *file_priv) +#endif +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + int err; + + /* + * After dropping nvkms modeset onwership, it is not guaranteed that + * drm and nvkms modeset state will remain in sync. Therefore, disable + * all outputs and crtcs before dropping nvkms modeset ownership. + * + * First disable all active outputs atomically and then disable each crtc one + * by one, there is not helper function available to disable all crtcs + * atomically. + */ + + drm_modeset_lock_all(dev); + + if ((err = nv_drm_atomic_helper_disable_all( + dev, + dev->mode_config.acquire_ctx)) != 0) { + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "nv_drm_atomic_helper_disable_all failed with error code %d !", + err); + } + + drm_modeset_unlock_all(dev); + + nvKms->releaseOwnership(nv_dev->pDevice); +} +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_BUS_PRESENT) || defined(NV_DRM_DRIVER_HAS_SET_BUSID) +static int nv_drm_pci_set_busid(struct drm_device *dev, + struct drm_master *master) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + master->unique = nv_drm_asprintf("pci:%04x:%02x:%02x.%d", + nv_dev->gpu_info.pci_info.domain, + nv_dev->gpu_info.pci_info.bus, + nv_dev->gpu_info.pci_info.slot, + nv_dev->gpu_info.pci_info.function); + + if (master->unique == NULL) { + return -ENOMEM; + } + + master->unique_len = strlen(master->unique); + + return 0; +} +#endif + +static int nv_drm_get_dev_info_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_get_dev_info_params *params = data; + + if (dev->primary == NULL) { + return -ENOENT; + } + + params->gpu_id = nv_dev->gpu_info.gpu_id; + params->primary_index = dev->primary->index; +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + params->generic_page_kind = nv_dev->genericPageKind; + params->page_kind_generation = nv_dev->pageKindGeneration; + params->sector_layout = nv_dev->sectorLayout; +#else + params->generic_page_kind = 0; + params->page_kind_generation = 0; + params->sector_layout = 0; +#endif + + return 0; +} + +static +int nv_drm_get_client_capability_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_get_client_capability_params *params = data; + + switch (params->capability) { +#if defined(DRM_CLIENT_CAP_STEREO_3D) + case DRM_CLIENT_CAP_STEREO_3D: + params->value = filep->stereo_allowed; + break; +#endif +#if defined(DRM_CLIENT_CAP_UNIVERSAL_PLANES) + case DRM_CLIENT_CAP_UNIVERSAL_PLANES: + params->value = filep->universal_planes; + break; +#endif +#if defined(DRM_CLIENT_CAP_ATOMIC) + case DRM_CLIENT_CAP_ATOMIC: + params->value = filep->atomic; + break; +#endif + default: + return -EINVAL; + } + + return 0; +} + +#if defined(NV_DRM_BUS_PRESENT) + +#if defined(NV_DRM_BUS_HAS_GET_IRQ) +static int nv_drm_bus_get_irq(struct drm_device *dev) +{ + return 0; +} +#endif + +#if defined(NV_DRM_BUS_HAS_GET_NAME) +static const char *nv_drm_bus_get_name(struct drm_device *dev) +{ + return "nvidia-drm"; +} +#endif + +static struct drm_bus nv_drm_bus = { +#if defined(NV_DRM_BUS_HAS_BUS_TYPE) + .bus_type = DRIVER_BUS_PCI, +#endif +#if defined(NV_DRM_BUS_HAS_GET_IRQ) + .get_irq = nv_drm_bus_get_irq, +#endif +#if defined(NV_DRM_BUS_HAS_GET_NAME) + .get_name = nv_drm_bus_get_name, +#endif + .set_busid = nv_drm_pci_set_busid, +}; + +#endif /* NV_DRM_BUS_PRESENT */ + +static const struct file_operations nv_drm_fops = { + .owner = THIS_MODULE, + + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = drm_compat_ioctl, +#endif + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + .mmap = nv_drm_mmap, +#endif + + .poll = drm_poll, + .read = drm_read, + + .llseek = noop_llseek, +}; + +static const struct drm_ioctl_desc nv_drm_ioctls[] = { +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_NVKMS_MEMORY, + nv_drm_gem_import_nvkms_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IMPORT_USERSPACE_MEMORY, + nv_drm_gem_import_userspace_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_MAP_OFFSET, + nv_drm_gem_map_offset_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GET_DEV_INFO, + nv_drm_get_dev_info_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + +#if defined(NV_DRM_FENCE_AVAILABLE) + DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_SUPPORTED, + nv_drm_fence_supported_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_FENCE_CONTEXT_CREATE, + nv_drm_fence_context_create_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_FENCE_ATTACH, + nv_drm_gem_fence_attach_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), +#endif + + DRM_IOCTL_DEF_DRV(NVIDIA_GET_CLIENT_CAPABILITY, + nv_drm_get_client_capability_ioctl, + 0), +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32, + nv_drm_get_crtc_crc32_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GET_CRTC_CRC32_V2, + nv_drm_get_crtc_crc32_v2_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_EXPORT_NVKMS_MEMORY, + nv_drm_gem_export_nvkms_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_ALLOC_NVKMS_MEMORY, + nv_drm_gem_alloc_nvkms_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_EXPORT_DMABUF_MEMORY, + nv_drm_gem_export_dmabuf_memory_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(NVIDIA_GEM_IDENTIFY_OBJECT, + nv_drm_gem_identify_object_ioctl, + DRM_RENDER_ALLOW|DRM_UNLOCKED), +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ +}; + +static struct drm_driver nv_drm_driver = { + + .driver_features = +#if defined(NV_DRM_DRIVER_PRIME_FLAG_PRESENT) + DRIVER_PRIME | +#endif + DRIVER_GEM | DRIVER_RENDER, + +#if defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) + .gem_free_object = nv_drm_gem_free, +#endif + + .ioctls = nv_drm_ioctls, + .num_ioctls = ARRAY_SIZE(nv_drm_ioctls), + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import = nv_drm_gem_prime_import, + .gem_prime_import_sg_table = nv_drm_gem_prime_import_sg_table, + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = nv_drm_gem_prime_get_sg_table, + .gem_prime_vmap = nv_drm_gem_prime_vmap, + .gem_prime_vunmap = nv_drm_gem_prime_vunmap, + + .gem_vm_ops = &nv_drm_gem_vma_ops, +#endif + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) + .gem_prime_res_obj = nv_drm_gem_prime_res_obj, +#endif + +#if defined(NV_DRM_DRIVER_HAS_SET_BUSID) + .set_busid = nv_drm_pci_set_busid, +#endif + + .load = nv_drm_load, + .unload = nv_drm_unload, + + .fops = &nv_drm_fops, + +#if defined(NV_DRM_BUS_PRESENT) + .bus = &nv_drm_bus, +#endif + + .name = "nvidia-drm", + + .desc = "NVIDIA DRM driver", + .date = "20160202", + +#if defined(NV_DRM_DRIVER_HAS_DEVICE_LIST) + .device_list = LIST_HEAD_INIT(nv_drm_driver.device_list), +#elif defined(NV_DRM_DRIVER_HAS_LEGACY_DEV_LIST) + .legacy_dev_list = LIST_HEAD_INIT(nv_drm_driver.legacy_dev_list), +#endif +}; + + +/* + * Update the global nv_drm_driver for the intended features. + * + * It defaults to PRIME-only, but is upgraded to atomic modeset if the + * kernel supports atomic modeset and the 'modeset' kernel module + * parameter is true. + */ +static void nv_drm_update_drm_driver_features(void) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + + if (!nv_drm_modeset_module_param) { + return; + } + + nv_drm_driver.driver_features |= DRIVER_MODESET | DRIVER_ATOMIC; + + nv_drm_driver.master_set = nv_drm_master_set; + nv_drm_driver.master_drop = nv_drm_master_drop; + + nv_drm_driver.dumb_create = nv_drm_dumb_create; + nv_drm_driver.dumb_map_offset = nv_drm_dumb_map_offset; + nv_drm_driver.dumb_destroy = nv_drm_dumb_destroy; +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ +} + + + +/* + * Helper function for allocate/register DRM device for given NVIDIA GPU ID. + */ +static void nv_drm_register_drm_device(const nv_gpu_info_t *gpu_info) +{ + struct nv_drm_device *nv_dev = NULL; + struct drm_device *dev = NULL; + struct device *device = gpu_info->os_device_ptr; + + DRM_DEBUG( + "Registering device for NVIDIA GPU ID 0x08%x", + gpu_info->gpu_id); + + /* Allocate NVIDIA-DRM device */ + + nv_dev = nv_drm_calloc(1, sizeof(*nv_dev)); + + if (nv_dev == NULL) { + NV_DRM_LOG_ERR( + "Failed to allocate memory for NVIDIA-DRM device object"); + return; + } + + nv_dev->gpu_info = *gpu_info; + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + mutex_init(&nv_dev->lock); +#endif + + /* Allocate DRM device */ + + dev = drm_dev_alloc(&nv_drm_driver, device); + + if (dev == NULL) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to allocate device"); + goto failed_drm_alloc; + } + + dev->dev_private = nv_dev; + nv_dev->dev = dev; + +#if defined(NV_DRM_DEVICE_HAS_PDEV) + if (device->bus == &pci_bus_type) { + dev->pdev = to_pci_dev(device); + } +#endif + + /* Register DRM device to DRM sub-system */ + + if (drm_dev_register(dev, 0) != 0) { + NV_DRM_DEV_LOG_ERR(nv_dev, "Failed to register device"); + goto failed_drm_register; + } + + /* Add NVIDIA-DRM device into list */ + + nv_dev->next = dev_list; + dev_list = nv_dev; + + return; /* Success */ + +failed_drm_register: + + nv_drm_dev_free(dev); + +failed_drm_alloc: + + nv_drm_free(nv_dev); +} + +/* + * Enumerate NVIDIA GPUs and allocate/register DRM device for each of them. + */ +int nv_drm_probe_devices(void) +{ + nv_gpu_info_t *gpu_info = NULL; + NvU32 gpu_count = 0; + NvU32 i; + + int ret = 0; + + nv_drm_update_drm_driver_features(); + + /* Enumerate NVIDIA GPUs */ + + gpu_info = nv_drm_calloc(NV_MAX_GPUS, sizeof(*gpu_info)); + + if (gpu_info == NULL) { + ret = -ENOMEM; + + NV_DRM_LOG_ERR("Failed to allocate gpu ids arrays"); + goto done; + } + + gpu_count = nvKms->enumerateGpus(gpu_info); + + if (gpu_count == 0) { + NV_DRM_LOG_INFO("Not found NVIDIA GPUs"); + goto done; + } + + WARN_ON(gpu_count > NV_MAX_GPUS); + + /* Register DRM device for each NVIDIA GPU */ + + for (i = 0; i < gpu_count; i++) { + nv_drm_register_drm_device(&gpu_info[i]); + } + +done: + + nv_drm_free(gpu_info); + + return ret; +} + +/* + * Unregister all NVIDIA DRM devices. + */ +void nv_drm_remove_devices(void) +{ + while (dev_list != NULL) { + struct nv_drm_device *next = dev_list->next; + + drm_dev_unregister(dev_list->dev); + nv_drm_dev_free(dev_list->dev); + + nv_drm_free(dev_list); + + dev_list = next; + } +} + +#endif /* NV_DRM_AVAILABLE */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-drv.h b/kernel-open/nvidia-drm/nvidia-drm-drv.h new file mode 100644 index 000000000..cd20ec93f --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-drv.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_DRV_H__ +#define __NVIDIA_DRM_DRV_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +int nv_drm_probe_devices(void); + +void nv_drm_remove_devices(void); + +#endif /* defined(NV_DRM_AVAILABLE) */ + +#endif /* __NVIDIA_DRM_DRV_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-encoder.c b/kernel-open/nvidia-drm/nvidia-drm-encoder.c new file mode 100644 index 000000000..653b43254 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-encoder.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-encoder.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-connector.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-helper.h" + +#include "nvmisc.h" + +/* + * Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h") + * moves a number of helper function definitions from + * drm/drm_crtc_helper.h to a new drm_probe_helper.h. + */ +#if defined(NV_DRM_DRM_PROBE_HELPER_H_PRESENT) +#include +#endif +#include + +#include +#include + +static void nv_drm_encoder_destroy(struct drm_encoder *encoder) +{ + struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder); + + drm_encoder_cleanup(encoder); + + nv_drm_free(nv_encoder); +} + +static const struct drm_encoder_funcs nv_encoder_funcs = { + .destroy = nv_drm_encoder_destroy, +}; + +static bool nv_drm_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void nv_drm_encoder_prepare(struct drm_encoder *encoder) +{ + +} + +static void nv_drm_encoder_commit(struct drm_encoder *encoder) +{ + +} + +static void nv_drm_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + +} + +static const struct drm_encoder_helper_funcs nv_encoder_helper_funcs = { + .mode_fixup = nv_drm_encoder_mode_fixup, + .prepare = nv_drm_encoder_prepare, + .commit = nv_drm_encoder_commit, + .mode_set = nv_drm_encoder_mode_set, +}; + +static uint32_t get_crtc_mask(struct drm_device *dev, uint32_t headMask) +{ + struct drm_crtc *crtc = NULL; + uint32_t crtc_mask = 0x0; + + nv_drm_for_each_crtc(crtc, dev) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + if (headMask & NVBIT(nv_crtc->head)) { + crtc_mask |= drm_crtc_mask(crtc); + } + } + + return crtc_mask; +} + +/* + * Helper function to create new encoder for given NvKmsKapiDisplay + * with given signal format. + */ +static struct drm_encoder* +nv_drm_encoder_new(struct drm_device *dev, + NvKmsKapiDisplay hDisplay, + NvKmsConnectorSignalFormat format, + unsigned int crtc_mask) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + struct nv_drm_encoder *nv_encoder = NULL; + + int ret = 0; + + /* Allocate an NVIDIA encoder object */ + + nv_encoder = nv_drm_calloc(1, sizeof(*nv_encoder)); + + if (nv_encoder == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate memory for NVIDIA-DRM encoder object"); + return ERR_PTR(-ENOMEM); + } + + nv_encoder->hDisplay = hDisplay; + + /* Initialize the base encoder object and add it to the drm subsystem */ + + ret = drm_encoder_init(dev, + &nv_encoder->base, &nv_encoder_funcs, + nvkms_connector_signal_to_drm_encoder_signal(format) +#if defined(NV_DRM_ENCODER_INIT_HAS_NAME_ARG) + , NULL +#endif + ); + + if (ret != 0) { + nv_drm_free(nv_encoder); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to initialize encoder created from NvKmsKapiDisplay 0x%08x", + hDisplay); + return ERR_PTR(ret); + } + + nv_encoder->base.possible_crtcs = crtc_mask; + + drm_encoder_helper_add(&nv_encoder->base, &nv_encoder_helper_funcs); + + return &nv_encoder->base; +} + +/* + * Add encoder for given NvKmsKapiDisplay + */ +struct drm_encoder* +nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + struct NvKmsKapiStaticDisplayInfo *displayInfo = NULL; + struct NvKmsKapiConnectorInfo *connectorInfo = NULL; + + struct drm_encoder *encoder = NULL; + struct nv_drm_encoder *nv_encoder = NULL; + + struct drm_connector *connector = NULL; + + int ret = 0; + + /* Query NvKmsKapiStaticDisplayInfo and NvKmsKapiConnectorInfo */ + + if ((displayInfo = nv_drm_calloc(1, sizeof(*displayInfo))) == NULL) { + ret = -ENOMEM; + goto done; + } + + if (!nvKms->getStaticDisplayInfo(nv_dev->pDevice, hDisplay, displayInfo)) { + ret = -EINVAL; + goto done; + } + + connectorInfo = nvkms_get_connector_info(nv_dev->pDevice, + displayInfo->connectorHandle); + + if (IS_ERR(connectorInfo)) { + ret = PTR_ERR(connectorInfo); + goto done; + } + + /* Create and add drm encoder */ + + encoder = nv_drm_encoder_new(dev, + displayInfo->handle, + connectorInfo->signalFormat, + get_crtc_mask(dev, connectorInfo->headMask)); + + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + goto done; + } + + /* Get connector from respective physical index */ + + connector = + nv_drm_get_connector(dev, + connectorInfo->physicalIndex, + connectorInfo->type, + displayInfo->internal, displayInfo->dpAddress); + + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + goto failed_connector_encoder_attach; + } + + /* Attach encoder and connector */ + + ret = nv_drm_connector_attach_encoder(connector, encoder); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to attach encoder created from NvKmsKapiDisplay 0x%08x " + "to connector", + hDisplay); + goto failed_connector_encoder_attach; + } + + nv_encoder = to_nv_encoder(encoder); + + mutex_lock(&dev->mode_config.mutex); + + nv_encoder->nv_connector = to_nv_connector(connector); + + nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector); + + mutex_unlock(&dev->mode_config.mutex); + + goto done; + +failed_connector_encoder_attach: + + drm_encoder_cleanup(encoder); + + nv_drm_free(encoder); + +done: + + nv_drm_free(displayInfo); + + nv_drm_free(connectorInfo); + + return ret != 0 ? ERR_PTR(ret) : encoder; +} + +static inline struct nv_drm_encoder* +get_nv_encoder_from_nvkms_display(struct drm_device *dev, + NvKmsKapiDisplay hDisplay) +{ + struct drm_encoder *encoder; + + nv_drm_for_each_encoder(encoder, dev) { + struct nv_drm_encoder *nv_encoder = to_nv_encoder(encoder); + + if (nv_encoder->hDisplay == hDisplay) { + return nv_encoder; + } + } + + return NULL; +} + +void nv_drm_handle_display_change(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay) +{ + struct drm_device *dev = nv_dev->dev; + struct nv_drm_encoder *nv_encoder = NULL; + + mutex_lock(&dev->mode_config.mutex); + + nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay); + + mutex_unlock(&dev->mode_config.mutex); + + if (nv_encoder == NULL) { + return; + } + + nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector); + + drm_kms_helper_hotplug_event(dev); +} + +void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay) +{ + struct drm_device *dev = nv_dev->dev; + + struct drm_encoder *encoder = NULL; + struct nv_drm_encoder *nv_encoder = NULL; + + /* + * Look for an existing encoder with the same hDisplay and + * use it if available. + */ + + nv_encoder = get_nv_encoder_from_nvkms_display(dev, hDisplay); + + if (nv_encoder != NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Encoder with NvKmsKapiDisplay 0x%08x already exists.", + hDisplay); + return; + } + + encoder = nv_drm_add_encoder(dev, hDisplay); + + if (IS_ERR(encoder)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to add encoder for NvKmsKapiDisplay 0x%08x", + hDisplay); + return; + } + + /* + * On some kernels, DRM has the notion of a "primary group" that + * tracks the global mode setting state for the device. + * + * On kernels where DRM has a primary group, we need to reinitialize + * after adding encoders and connectors. + */ +#if defined(NV_DRM_REINIT_PRIMARY_MODE_GROUP_PRESENT) + drm_reinit_primary_mode_group(dev); +#endif + + drm_kms_helper_hotplug_event(dev); +} +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-encoder.h b/kernel-open/nvidia-drm/nvidia-drm-encoder.h new file mode 100644 index 000000000..bbaf9868c --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-encoder.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_ENCODER_H__ +#define __NVIDIA_DRM_ENCODER_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" + +#if defined(NV_DRM_DRM_ENCODER_H_PRESENT) +#include +#else +#include +#endif + +#include "nvkms-kapi.h" + +struct nv_drm_encoder { + NvKmsKapiDisplay hDisplay; + + struct nv_drm_connector *nv_connector; + + struct drm_encoder base; +}; + +static inline struct nv_drm_encoder *to_nv_encoder( + struct drm_encoder *encoder) +{ + if (encoder == NULL) { + return NULL; + } + return container_of(encoder, struct nv_drm_encoder, base); +} + +struct drm_encoder* +nv_drm_add_encoder(struct drm_device *dev, NvKmsKapiDisplay hDisplay); + +void nv_drm_handle_display_change(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay); + +void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev, + NvKmsKapiDisplay hDisplay); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_ENCODER_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-fb.c b/kernel-open/nvidia-drm/nvidia-drm-fb.c new file mode 100644 index 000000000..d119e7c92 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-fb.c @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-fb.h" +#include "nvidia-drm-utils.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-format.h" + +#include + +static void __nv_drm_framebuffer_free(struct nv_drm_framebuffer *nv_fb) +{ + uint32_t i; + + /* Unreference gem object */ + for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) { + if (nv_fb->nv_gem[i] != NULL) { + nv_drm_gem_object_unreference_unlocked(nv_fb->nv_gem[i]); + } + } + + /* Free framebuffer */ + nv_drm_free(nv_fb); +} + +static void nv_drm_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct nv_drm_device *nv_dev = to_nv_device(fb->dev); + struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb); + + /* Cleaup core framebuffer object */ + + drm_framebuffer_cleanup(fb); + + /* Free NvKmsKapiSurface associated with this framebuffer object */ + + nvKms->destroySurface(nv_dev->pDevice, nv_fb->pSurface); + + __nv_drm_framebuffer_free(nv_fb); +} + +static int +nv_drm_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file, unsigned int *handle) +{ + struct nv_drm_framebuffer *nv_fb = to_nv_framebuffer(fb); + + return nv_drm_gem_handle_create(file, + nv_fb->nv_gem[0], + handle); +} + +static struct drm_framebuffer_funcs nv_framebuffer_funcs = { + .destroy = nv_drm_framebuffer_destroy, + .create_handle = nv_drm_framebuffer_create_handle, +}; + +static struct nv_drm_framebuffer *nv_drm_framebuffer_alloc( + struct drm_device *dev, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_framebuffer *nv_fb; + const int num_planes = nv_drm_format_num_planes(cmd->pixel_format); + uint32_t i; + + /* Allocate memory for the framebuffer object */ + nv_fb = nv_drm_calloc(1, sizeof(*nv_fb)); + + if (nv_fb == NULL) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Failed to allocate memory for framebuffer object"); + return ERR_PTR(-ENOMEM); + } + + if (num_planes > ARRAY_SIZE(nv_fb->nv_gem)) { + NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Unsupported number of planes"); + goto failed; + } + + for (i = 0; i < num_planes; i++) { + if ((nv_fb->nv_gem[i] = nv_drm_gem_object_lookup( + dev, + file, + cmd->handles[i])) == NULL) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Failed to find gem object of type nvkms memory"); + goto failed; + } + } + + return nv_fb; + +failed: + + __nv_drm_framebuffer_free(nv_fb); + + return ERR_PTR(-ENOENT); +} + +static int nv_drm_framebuffer_init(struct drm_device *dev, + struct nv_drm_framebuffer *nv_fb, + enum NvKmsSurfaceMemoryFormat format, + bool have_modifier, + uint64_t modifier) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct NvKmsKapiCreateSurfaceParams params = { }; + uint32_t i; + int ret; + + /* Initialize the base framebuffer object and add it to drm subsystem */ + + ret = drm_framebuffer_init(dev, &nv_fb->base, &nv_framebuffer_funcs); + if (ret != 0) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Failed to initialize framebuffer object"); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(nv_fb->nv_gem); i++) { + if (nv_fb->nv_gem[i] != NULL) { + params.planes[i].memory = nv_fb->nv_gem[i]->pMemory; + params.planes[i].offset = nv_fb->base.offsets[i]; + params.planes[i].pitch = nv_fb->base.pitches[i]; + } + } + params.height = nv_fb->base.height; + params.width = nv_fb->base.width; + params.format = format; + + if (have_modifier) { + params.explicit_layout = true; + params.layout = (modifier & 0x10) ? + NvKmsSurfaceMemoryLayoutBlockLinear : + NvKmsSurfaceMemoryLayoutPitch; + params.log2GobsPerBlockY = modifier & 0xf; + } else { + params.explicit_layout = false; + } + + /* Create NvKmsKapiSurface */ + + nv_fb->pSurface = nvKms->createSurface(nv_dev->pDevice, ¶ms); + if (nv_fb->pSurface == NULL) { + NV_DRM_DEV_DEBUG_DRIVER(nv_dev, "Failed to create NvKmsKapiSurface"); + drm_framebuffer_cleanup(&nv_fb->base); + return -EINVAL; + } + + return 0; +} + +struct drm_framebuffer *nv_drm_internal_framebuffer_create( + struct drm_device *dev, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_framebuffer *nv_fb; + uint64_t modifier = 0; + int ret; + enum NvKmsSurfaceMemoryFormat format; +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) + int i; +#endif + bool have_modifier = false; + + /* Check whether NvKms supports the given pixel format */ + if (!nv_drm_format_to_nvkms_format(cmd->pixel_format, &format)) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Unsupported drm pixel format 0x%08x", cmd->pixel_format); + return ERR_PTR(-EINVAL); + } + +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) + if (cmd->flags & DRM_MODE_FB_MODIFIERS) { + have_modifier = true; + modifier = cmd->modifier[0]; + + for (i = 0; nv_dev->modifiers[i] != DRM_FORMAT_MOD_INVALID; i++) { + if (nv_dev->modifiers[i] == modifier) { + break; + } + } + + if (nv_dev->modifiers[i] == DRM_FORMAT_MOD_INVALID) { + NV_DRM_DEV_DEBUG_DRIVER( + nv_dev, + "Invalid format modifier for framebuffer object: 0x%016llx", + modifier); + return ERR_PTR(-EINVAL); + } + } +#endif + + nv_fb = nv_drm_framebuffer_alloc(dev, file, cmd); + if (IS_ERR(nv_fb)) { + return (struct drm_framebuffer *)nv_fb; + } + + /* Fill out framebuffer metadata from the userspace fb creation request */ + + drm_helper_mode_fill_fb_struct( + #if defined(NV_DRM_HELPER_MODE_FILL_FB_STRUCT_HAS_DEV_ARG) + dev, + #endif + &nv_fb->base, + cmd); + + /* + * Finish up FB initialization by creating the backing NVKMS surface and + * publishing the DRM fb + */ + + ret = nv_drm_framebuffer_init(dev, nv_fb, format, have_modifier, modifier); + + if (ret != 0) { + __nv_drm_framebuffer_free(nv_fb); + return ERR_PTR(ret); + } + + return &nv_fb->base; +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-fb.h b/kernel-open/nvidia-drm/nvidia-drm-fb.h new file mode 100644 index 000000000..cf477cc73 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-fb.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_FB_H__ +#define __NVIDIA_DRM_FB_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_FRAMEBUFFER_H_PRESENT) +#include +#endif + +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvkms-kapi.h" + +struct nv_drm_framebuffer { + struct NvKmsKapiSurface *pSurface; + + struct nv_drm_gem_object* + nv_gem[NVKMS_MAX_PLANES_PER_SURFACE]; + + struct drm_framebuffer base; +}; + +static inline struct nv_drm_framebuffer *to_nv_framebuffer( + struct drm_framebuffer *fb) +{ + if (fb == NULL) { + return NULL; + } + return container_of(fb, struct nv_drm_framebuffer, base); +} + +struct drm_framebuffer *nv_drm_internal_framebuffer_create( + struct drm_device *dev, + struct drm_file *file, + struct drm_mode_fb_cmd2 *cmd); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_FB_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-format.c b/kernel-open/nvidia-drm/nvidia-drm-format.c new file mode 100644 index 000000000..c8a295936 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-format.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif +#include +#include + +#include "nvidia-drm-format.h" +#include "nvidia-drm-os-interface.h" + +static const u32 nvkms_to_drm_format[] = { + /* RGB formats */ + [NvKmsSurfaceMemoryFormatA1R5G5B5] = DRM_FORMAT_ARGB1555, + [NvKmsSurfaceMemoryFormatX1R5G5B5] = DRM_FORMAT_XRGB1555, + [NvKmsSurfaceMemoryFormatR5G6B5] = DRM_FORMAT_RGB565, + [NvKmsSurfaceMemoryFormatA8R8G8B8] = DRM_FORMAT_ARGB8888, + [NvKmsSurfaceMemoryFormatX8R8G8B8] = DRM_FORMAT_XRGB8888, + [NvKmsSurfaceMemoryFormatA2B10G10R10] = DRM_FORMAT_ABGR2101010, + [NvKmsSurfaceMemoryFormatX2B10G10R10] = DRM_FORMAT_XBGR2101010, + [NvKmsSurfaceMemoryFormatA8B8G8R8] = DRM_FORMAT_ABGR8888, + + [NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422] = DRM_FORMAT_YUYV, + [NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422] = DRM_FORMAT_UYVY, + + /* YUV semi-planar formats + * + * NVKMS YUV semi-planar formats are MSB aligned. Yx__UxVx means + * that the UV components are packed like UUUUUVVVVV (MSB to LSB) + * and Yx_VxUx means VVVVVUUUUU (MSB to LSB). + */ + + /* + * 2 plane YCbCr + * index 0 = Y plane, [7:0] Y + * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian + * or + * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian + */ + [NvKmsSurfaceMemoryFormatY8___V8U8_N444] = DRM_FORMAT_NV24, /* non-subsampled Cr:Cb plane */ + [NvKmsSurfaceMemoryFormatY8___U8V8_N444] = DRM_FORMAT_NV42, /* non-subsampled Cb:Cr plane */ + [NvKmsSurfaceMemoryFormatY8___V8U8_N422] = DRM_FORMAT_NV16, /* 2x1 subsampled Cr:Cb plane */ + [NvKmsSurfaceMemoryFormatY8___U8V8_N422] = DRM_FORMAT_NV61, /* 2x1 subsampled Cb:Cr plane */ + [NvKmsSurfaceMemoryFormatY8___V8U8_N420] = DRM_FORMAT_NV12, /* 2x2 subsampled Cr:Cb plane */ + [NvKmsSurfaceMemoryFormatY8___U8V8_N420] = DRM_FORMAT_NV21, /* 2x2 subsampled Cb:Cr plane */ + +#if defined(DRM_FORMAT_P210) + /* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [10:6] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian + * + * 2x1 subsampled Cr:Cb plane, 10 bit per channel + */ + [NvKmsSurfaceMemoryFormatY10___V10U10_N422] = DRM_FORMAT_P210, +#endif + +#if defined(DRM_FORMAT_P010) + /* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [10:6] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian + * + * 2x2 subsampled Cr:Cb plane 10 bits per channel + */ + [NvKmsSurfaceMemoryFormatY10___V10U10_N420] = DRM_FORMAT_P010, +#endif + +#if defined(DRM_FORMAT_P012) + /* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [12:4] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian + * + * 2x2 subsampled Cr:Cb plane 12 bits per channel + */ + [NvKmsSurfaceMemoryFormatY12___V12U12_N420] = DRM_FORMAT_P012, +#endif +}; + +bool nv_drm_format_to_nvkms_format(u32 format, + enum NvKmsSurfaceMemoryFormat *nvkms_format) +{ + enum NvKmsSurfaceMemoryFormat i; + for (i = 0; i < ARRAY_SIZE(nvkms_to_drm_format); i++) { + /* + * Note nvkms_to_drm_format[] is sparsely populated: it doesn't + * handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0 + * entries when iterating through it. + */ + if (nvkms_to_drm_format[i] != 0 && nvkms_to_drm_format[i] == format) { + *nvkms_format = i; + return true; + } + } + return false; +} + +uint32_t *nv_drm_format_array_alloc( + unsigned int *count, + const long unsigned int nvkms_format_mask) +{ + enum NvKmsSurfaceMemoryFormat i; + unsigned int max_count = hweight64(nvkms_format_mask); + uint32_t *array = nv_drm_calloc(1, sizeof(uint32_t) * max_count); + + if (array == NULL) { + return NULL; + } + + *count = 0; + for_each_set_bit(i, &nvkms_format_mask, + sizeof(nvkms_format_mask) * BITS_PER_BYTE) { + + if (i >= ARRAY_SIZE(nvkms_to_drm_format)) { + break; + } + + /* + * Note nvkms_to_drm_format[] is sparsely populated: it doesn't + * handle all NvKmsSurfaceMemoryFormat values, so be sure to skip 0 + * entries when iterating through it. + */ + if (nvkms_to_drm_format[i] == 0) { + continue; + } + array[(*count)++] = nvkms_to_drm_format[i]; + } + + if (*count == 0) { + nv_drm_free(array); + return NULL; + } + + return array; +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-format.h b/kernel-open/nvidia-drm/nvidia-drm-format.h new file mode 100644 index 000000000..d1650967f --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-format.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_FORMAT_H__ +#define __NVIDIA_DRM_FORMAT_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include + +#include "nvkms-format.h" + +bool nv_drm_format_to_nvkms_format(u32 format, + enum NvKmsSurfaceMemoryFormat *nvkms_format); + +uint32_t *nv_drm_format_array_alloc( + unsigned int *count, + const long unsigned int nvkms_format_mask); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_FORMAT_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c b/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c new file mode 100644 index 000000000..fccde05d0 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.c @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_DRV_H_PRESENT) +#include +#endif + +#include "nvidia-drm-gem-dma-buf.h" +#include "nvidia-drm-ioctl.h" + +#include "linux/dma-buf.h" + +static inline +void __nv_drm_gem_dma_buf_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_device *nv_dev = nv_gem->nv_dev; + struct nv_drm_gem_dma_buf *nv_dma_buf = to_nv_dma_buf(nv_gem); + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + if (nv_dma_buf->base.pMemory) { + /* Free NvKmsKapiMemory handle associated with this gem object */ + nvKms->freeMemory(nv_dev->pDevice, nv_dma_buf->base.pMemory); + } +#endif + + drm_prime_gem_destroy(&nv_gem->base, nv_dma_buf->sgt); + + nv_drm_free(nv_dma_buf); +} + +static int __nv_drm_gem_dma_buf_create_mmap_offset( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + (void)nv_dev; + return nv_drm_gem_create_mmap_offset(nv_gem, offset); +} + +static int __nv_drm_gem_dma_buf_mmap(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma) +{ + struct dma_buf_attachment *attach = nv_gem->base.import_attach; + struct dma_buf *dma_buf = attach->dmabuf; + struct file *old_file; + int ret; + + /* check if buffer supports mmap */ + if (!dma_buf->file->f_op->mmap) + return -EINVAL; + + /* readjust the vma */ + get_file(dma_buf->file); + old_file = vma->vm_file; + vma->vm_file = dma_buf->file; + vma->vm_pgoff -= drm_vma_node_start(&nv_gem->base.vma_node);; + + ret = dma_buf->file->f_op->mmap(dma_buf->file, vma); + + if (ret) { + /* restore old parameters on failure */ + vma->vm_file = old_file; + fput(dma_buf->file); + } else { + if (old_file) + fput(old_file); + } + + return ret; +} + +const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops = { + .free = __nv_drm_gem_dma_buf_free, + .create_mmap_offset = __nv_drm_gem_dma_buf_create_mmap_offset, + .mmap = __nv_drm_gem_dma_buf_mmap, +}; + +struct drm_gem_object* +nv_drm_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct dma_buf *dma_buf = attach->dmabuf; + struct nv_drm_gem_dma_buf *nv_dma_buf; + struct NvKmsKapiMemory *pMemory; + + if ((nv_dma_buf = + nv_drm_calloc(1, sizeof(*nv_dma_buf))) == NULL) { + return NULL; + } + + // dma_buf->size must be a multiple of PAGE_SIZE + BUG_ON(dma_buf->size % PAGE_SIZE); + + pMemory = NULL; +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + pMemory = nvKms->getSystemMemoryHandleFromDmaBuf(nv_dev->pDevice, + (NvP64)(NvUPtr)dma_buf, + dma_buf->size - 1); + } +#endif + + nv_drm_gem_object_init(nv_dev, &nv_dma_buf->base, + &__nv_gem_dma_buf_ops, dma_buf->size, pMemory); + + nv_dma_buf->sgt = sgt; + + return &nv_dma_buf->base.base; +} + +int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_export_dmabuf_memory_params *p = data; + struct nv_drm_gem_dma_buf *nv_dma_buf = NULL; + int ret = 0; + struct NvKmsKapiMemory *pTmpMemory = NULL; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EINVAL; + goto done; + } + + if (p->__pad != 0) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed"); + goto done; + } + + if ((nv_dma_buf = nv_drm_gem_object_dma_buf_lookup( + dev, filep, p->handle)) == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup DMA-BUF GEM object for export: 0x%08x", + p->handle); + goto done; + } + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (!nv_dma_buf->base.pMemory) { + /* + * Get RM system memory handle from SGT - RM will take a reference + * on this GEM object to prevent the DMA-BUF from being unpinned + * prematurely. + */ + pTmpMemory = nvKms->getSystemMemoryHandleFromSgt( + nv_dev->pDevice, + (NvP64)(NvUPtr)nv_dma_buf->sgt, + (NvP64)(NvUPtr)&nv_dma_buf->base.base, + nv_dma_buf->base.base.size - 1); + } + } +#endif + + if (!nv_dma_buf->base.pMemory && !pTmpMemory) { + ret = -ENOMEM; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to get memory to export from DMA-BUF GEM object: 0x%08x", + p->handle); + goto done; + } + + if (!nvKms->exportMemory(nv_dev->pDevice, + nv_dma_buf->base.pMemory ? + nv_dma_buf->base.pMemory : pTmpMemory, + p->nvkms_params_ptr, + p->nvkms_params_size)) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to export memory from DMA-BUF GEM object: 0x%08x", + p->handle); + goto done; + } + +done: + if (pTmpMemory) { + /* + * Release reference on RM system memory to prevent circular + * refcounting. Another refcount will still be held by RM FD. + */ + nvKms->freeMemory(nv_dev->pDevice, pTmpMemory); + } + + if (nv_dma_buf != NULL) { + nv_drm_gem_object_unreference_unlocked(&nv_dma_buf->base); + } + + return ret; +} +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h b/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h new file mode 100644 index 000000000..05b16fc3c --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-dma-buf.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_DMA_BUF_H__ +#define __NVIDIA_DRM_GEM_DMA_BUF_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-gem.h" + +struct nv_drm_gem_dma_buf { + struct nv_drm_gem_object base; + struct sg_table *sgt; +}; + +extern const struct nv_drm_gem_object_funcs __nv_gem_dma_buf_ops; + +static inline struct nv_drm_gem_dma_buf *to_nv_dma_buf( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_dma_buf, base); + } + + return NULL; +} + +static inline +struct nv_drm_gem_dma_buf *nv_drm_gem_object_dma_buf_lookup( + struct drm_device *dev, + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(dev, filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &__nv_gem_dma_buf_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_nv_dma_buf(nv_gem); +} + +struct drm_gem_object* +nv_drm_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +int nv_drm_gem_export_dmabuf_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif + +#endif /* __NVIDIA_DRM_GEM_DMA_BUF_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c b/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c new file mode 100644 index 000000000..1d047f2b1 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.c @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-ioctl.h" + +#if defined(NV_DRM_DRM_DRV_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +#include + +#include "nv-mm.h" + +static void __nv_drm_gem_nvkms_memory_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_device *nv_dev = nv_gem->nv_dev; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + + if (nv_nvkms_memory->physically_mapped) { + if (nv_nvkms_memory->pWriteCombinedIORemapAddress != NULL) { + iounmap(nv_nvkms_memory->pWriteCombinedIORemapAddress); + } + + nvKms->unmapMemory(nv_dev->pDevice, + nv_nvkms_memory->base.pMemory, + NVKMS_KAPI_MAPPING_TYPE_USER, + nv_nvkms_memory->pPhysicalAddress); + } + + if (nv_nvkms_memory->pages_count != 0) { + nvKms->freeMemoryPages((NvU64 *)nv_nvkms_memory->pages); + } + + /* Free NvKmsKapiMemory handle associated with this gem object */ + + nvKms->freeMemory(nv_dev->pDevice, nv_nvkms_memory->base.pMemory); + + nv_drm_free(nv_nvkms_memory); +} + +static int __nv_drm_gem_nvkms_mmap(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma) +{ + return drm_gem_mmap_obj(&nv_gem->base, + drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma); +} + +static vm_fault_t __nv_drm_gem_nvkms_handle_vma_fault( + struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + unsigned long address = nv_page_fault_va(vmf); + struct drm_gem_object *gem = vma->vm_private_data; + unsigned long page_offset, pfn; + vm_fault_t ret; + + page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node); + + if (nv_nvkms_memory->pages_count == 0) { + pfn = (unsigned long)(uintptr_t)nv_nvkms_memory->pPhysicalAddress; + pfn >>= PAGE_SHIFT; + pfn += page_offset; + } else { + BUG_ON(page_offset > nv_nvkms_memory->pages_count); + pfn = page_to_pfn(nv_nvkms_memory->pages[page_offset]); + } + +#if defined(NV_VMF_INSERT_PFN_PRESENT) + ret = vmf_insert_pfn(vma, address, pfn); +#else + ret = vm_insert_pfn(vma, address, pfn); + switch (ret) { + case 0: + case -EBUSY: + /* + * EBUSY indicates that another thread already handled + * the faulted range. + */ + ret = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + ret = VM_FAULT_OOM; + break; + default: + WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret); + ret = VM_FAULT_SIGBUS; + break; + } +#endif /* defined(NV_VMF_INSERT_PFN_PRESENT) */ + return ret; +#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */ + return VM_FAULT_SIGBUS; +} + +static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup( + struct drm_device *dev, + const struct nv_drm_gem_object *nv_gem_src); + +static int __nv_drm_gem_nvkms_map( + struct nv_drm_device *nv_dev, + struct NvKmsKapiMemory *pMemory, + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory, + uint64_t size) +{ + if (!nv_dev->hasVideoMemory) { + return 0; + } + + if (!nvKms->mapMemory(nv_dev->pDevice, + pMemory, + NVKMS_KAPI_MAPPING_TYPE_USER, + &nv_nvkms_memory->pPhysicalAddress)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to map NvKmsKapiMemory 0x%p", + pMemory); + return -ENOMEM; + } + + nv_nvkms_memory->pWriteCombinedIORemapAddress = ioremap_wc( + (uintptr_t)nv_nvkms_memory->pPhysicalAddress, + size); + + if (!nv_nvkms_memory->pWriteCombinedIORemapAddress) { + NV_DRM_DEV_LOG_INFO( + nv_dev, + "Failed to ioremap_wc NvKmsKapiMemory 0x%p", + pMemory); + } + + nv_nvkms_memory->physically_mapped = true; + + return 0; +} + +static int __nv_drm_gem_map_nvkms_memory_offset( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + + if (!nv_nvkms_memory->physically_mapped) { + int ret = __nv_drm_gem_nvkms_map(nv_dev, + nv_nvkms_memory->base.pMemory, + nv_nvkms_memory, + nv_nvkms_memory->base.base.size); + if (ret) { + return ret; + } + } + + return nv_drm_gem_create_mmap_offset(&nv_nvkms_memory->base, offset); +} + +static struct sg_table *__nv_drm_gem_nvkms_memory_prime_get_sg_table( + struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_device *nv_dev = nv_gem->nv_dev; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = + to_nv_nvkms_memory(nv_gem); + struct sg_table *sg_table; + + if (nv_nvkms_memory->pages_count == 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Cannot create sg_table for NvKmsKapiMemory 0x%p", + nv_gem->pMemory); + return NULL; + } + + sg_table = nv_drm_prime_pages_to_sg(nv_dev->dev, + nv_nvkms_memory->pages, + nv_nvkms_memory->pages_count); + + return sg_table; +} + +const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops = { + .free = __nv_drm_gem_nvkms_memory_free, + .prime_dup = __nv_drm_gem_nvkms_prime_dup, + .mmap = __nv_drm_gem_nvkms_mmap, + .handle_vma_fault = __nv_drm_gem_nvkms_handle_vma_fault, + .create_mmap_offset = __nv_drm_gem_map_nvkms_memory_offset, + .prime_get_sg_table = __nv_drm_gem_nvkms_memory_prime_get_sg_table, +}; + +static int __nv_drm_nvkms_gem_obj_init( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory, + struct NvKmsKapiMemory *pMemory, + uint64_t size) +{ + NvU64 *pages = NULL; + NvU32 numPages = 0; + + nv_nvkms_memory->pPhysicalAddress = NULL; + nv_nvkms_memory->pWriteCombinedIORemapAddress = NULL; + nv_nvkms_memory->physically_mapped = false; + + if (!nvKms->getMemoryPages(nv_dev->pDevice, + pMemory, + &pages, + &numPages) && + !nv_dev->hasVideoMemory) { + /* GetMemoryPages may fail for vidmem allocations, + * but it should not fail for sysmem allocations. */ + NV_DRM_DEV_LOG_ERR(nv_dev, + "Failed to get memory pages for NvKmsKapiMemory 0x%p", + pMemory); + return -ENOMEM; + } + nv_nvkms_memory->pages_count = numPages; + nv_nvkms_memory->pages = (struct page **)pages; + + nv_drm_gem_object_init(nv_dev, + &nv_nvkms_memory->base, + &nv_gem_nvkms_memory_ops, + size, + pMemory); + + return 0; +} + +int nv_drm_dumb_create( + struct drm_file *file_priv, + struct drm_device *dev, struct drm_mode_create_dumb *args) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + uint8_t compressible = 0; + struct NvKmsKapiMemory *pMemory; + int ret = 0; + + args->pitch = roundup(args->width * ((args->bpp + 7) >> 3), + nv_dev->pitchAlignment); + + args->size = args->height * args->pitch; + + /* Core DRM requires gem object size to be aligned with PAGE_SIZE */ + + args->size = roundup(args->size, PAGE_SIZE); + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + ret = -ENOMEM; + goto fail; + } + + if (nv_dev->hasVideoMemory) { + pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice, + NvKmsSurfaceMemoryLayoutPitch, + args->size, + &compressible); + } else { + pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice, + NvKmsSurfaceMemoryLayoutPitch, + args->size, + &compressible); + } + + if (pMemory == NULL) { + ret = -ENOMEM; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate NvKmsKapiMemory for dumb object of size %llu", + args->size); + goto nvkms_alloc_memory_failed; + } + + ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, args->size); + if (ret) { + goto nvkms_gem_obj_init_failed; + } + + /* Always map dumb buffer memory up front. Clients are only expected + * to use dumb buffers for software rendering, so they're not much use + * without a CPU mapping. + */ + ret = __nv_drm_gem_nvkms_map(nv_dev, pMemory, nv_nvkms_memory, args->size); + if (ret) { + nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base); + goto fail; + } + + return nv_drm_gem_handle_create_drop_reference(file_priv, + &nv_nvkms_memory->base, + &args->handle); + +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_alloc_memory_failed: + nv_drm_free(nv_nvkms_memory); + +fail: + return ret; +} + +int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_import_nvkms_memory_params *p = data; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + struct NvKmsKapiMemory *pMemory; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EINVAL; + goto failed; + } + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + ret = -ENOMEM; + goto failed; + } + + pMemory = nvKms->importMemory(nv_dev->pDevice, + p->mem_size, + p->nvkms_params_ptr, + p->nvkms_params_size); + + if (pMemory == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to import NVKMS memory to GEM object"); + goto nvkms_import_memory_failed; + } + + ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, p->mem_size); + if (ret) { + goto nvkms_gem_obj_init_failed; + } + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_nvkms_memory->base, + &p->handle); +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_import_memory_failed: + nv_drm_free(nv_nvkms_memory); + +failed: + return ret; +} + +int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_export_nvkms_memory_params *p = data; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EINVAL; + goto done; + } + + if (p->__pad != 0) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR(nv_dev, "Padding fields must be zeroed"); + goto done; + } + + if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup( + dev, + filep, + p->handle)) == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup NVKMS gem object for export: 0x%08x", + p->handle); + goto done; + } + + if (!nvKms->exportMemory(nv_dev->pDevice, + nv_nvkms_memory->base.pMemory, + p->nvkms_params_ptr, + p->nvkms_params_size)) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to export memory from NVKMS GEM object: 0x%08x", p->handle); + goto done; + } + +done: + if (nv_nvkms_memory != NULL) { + nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base); + } + + return ret; +} + +int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_alloc_nvkms_memory_params *p = data; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory = NULL; + struct NvKmsKapiMemory *pMemory; + enum NvKmsSurfaceMemoryLayout layout; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = -EINVAL; + goto failed; + } + + if (p->__pad != 0) { + NV_DRM_DEV_LOG_ERR(nv_dev, "non-zero value in padding field"); + goto failed; + } + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + ret = -ENOMEM; + goto failed; + } + + layout = p->block_linear ? + NvKmsSurfaceMemoryLayoutBlockLinear : NvKmsSurfaceMemoryLayoutPitch; + + if (nv_dev->hasVideoMemory) { + pMemory = nvKms->allocateVideoMemory(nv_dev->pDevice, + layout, + p->memory_size, + &p->compressible); + } else { + pMemory = nvKms->allocateSystemMemory(nv_dev->pDevice, + layout, + p->memory_size, + &p->compressible); + } + + if (pMemory == NULL) { + ret = -EINVAL; + NV_DRM_DEV_LOG_ERR(nv_dev, + "Failed to allocate NVKMS memory for GEM object"); + goto nvkms_alloc_memory_failed; + } + + ret = __nv_drm_nvkms_gem_obj_init(nv_dev, nv_nvkms_memory, pMemory, + p->memory_size); + if (ret) { + goto nvkms_gem_obj_init_failed; + } + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_nvkms_memory->base, + &p->handle); + +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_alloc_memory_failed: + nv_drm_free(nv_nvkms_memory); + +failed: + return ret; +} + +static struct drm_gem_object *__nv_drm_gem_nvkms_prime_dup( + struct drm_device *dev, + const struct nv_drm_gem_object *nv_gem_src) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + const struct nv_drm_device *nv_dev_src; + const struct nv_drm_gem_nvkms_memory *nv_nvkms_memory_src; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + struct NvKmsKapiMemory *pMemory; + + BUG_ON(nv_gem_src == NULL || nv_gem_src->ops != &nv_gem_nvkms_memory_ops); + + nv_dev_src = to_nv_device(nv_gem_src->base.dev); + nv_nvkms_memory_src = to_nv_nvkms_memory_const(nv_gem_src); + + if ((nv_nvkms_memory = + nv_drm_calloc(1, sizeof(*nv_nvkms_memory))) == NULL) { + return NULL; + } + + pMemory = nvKms->dupMemory(nv_dev->pDevice, + nv_dev_src->pDevice, nv_gem_src->pMemory); + if (pMemory == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to import NVKMS memory to GEM object"); + goto nvkms_dup_memory_failed; + } + + if (__nv_drm_nvkms_gem_obj_init(nv_dev, + nv_nvkms_memory, + pMemory, + nv_gem_src->base.size)) { + goto nvkms_gem_obj_init_failed; + } + + return &nv_nvkms_memory->base.base; + +nvkms_gem_obj_init_failed: + nvKms->freeMemory(nv_dev->pDevice, pMemory); + +nvkms_dup_memory_failed: + nv_drm_free(nv_nvkms_memory); + + return NULL; +} + +int nv_drm_dumb_map_offset(struct drm_file *file, + struct drm_device *dev, uint32_t handle, + uint64_t *offset) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + int ret = -EINVAL; + + if ((nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup( + dev, + file, + handle)) == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for mapping: 0x%08x", + handle); + return ret; + } + + ret = __nv_drm_gem_map_nvkms_memory_offset(nv_dev, + &nv_nvkms_memory->base, offset); + + nv_drm_gem_object_unreference_unlocked(&nv_nvkms_memory->base); + + return ret; +} + +int nv_drm_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file, handle); +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h b/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h new file mode 100644 index 000000000..7ecbb94d6 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-nvkms-memory.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ +#define __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-gem.h" + +struct nv_drm_gem_nvkms_memory { + struct nv_drm_gem_object base; + + bool physically_mapped; + + void *pPhysicalAddress; + void *pWriteCombinedIORemapAddress; + + struct page **pages; + unsigned long pages_count; +}; + +extern const struct nv_drm_gem_object_funcs nv_gem_nvkms_memory_ops; + +static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base); + } + + return NULL; +} + +static inline struct nv_drm_gem_nvkms_memory *to_nv_nvkms_memory_const( + const struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_nvkms_memory, base); + } + + return NULL; +} + +static inline +struct nv_drm_gem_nvkms_memory *nv_drm_gem_object_nvkms_memory_lookup( + struct drm_device *dev, + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(dev, filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &nv_gem_nvkms_memory_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_nv_nvkms_memory(nv_gem); +} + +int nv_drm_dumb_create( + struct drm_file *file_priv, + struct drm_device *dev, struct drm_mode_create_dumb *args); + +int nv_drm_gem_import_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_gem_export_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_gem_alloc_nvkms_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_dumb_map_offset(struct drm_file *file, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); + +int nv_drm_dumb_destroy(struct drm_file *file, + struct drm_device *dev, + uint32_t handle); + +struct drm_gem_object *nv_drm_gem_nvkms_prime_import( + struct drm_device *dev, + struct drm_gem_object *gem); + +#endif + +#endif /* __NVIDIA_DRM_GEM_NVKMS_MEMORY_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c b/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c new file mode 100644 index 000000000..e554adc27 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.c @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +#include "nvidia-drm-gem-user-memory.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-ioctl.h" + +#include "linux/dma-buf.h" +#include "linux/mm.h" +#include "nv-mm.h" + +static inline +void __nv_drm_gem_user_memory_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + + nv_drm_unlock_user_pages(nv_user_memory->pages_count, + nv_user_memory->pages); + + nv_drm_free(nv_user_memory); +} + +static struct sg_table *__nv_drm_gem_user_memory_prime_get_sg_table( + struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + struct drm_gem_object *gem = &nv_gem->base; + + return nv_drm_prime_pages_to_sg(gem->dev, + nv_user_memory->pages, + nv_user_memory->pages_count); +} + +static void *__nv_drm_gem_user_memory_prime_vmap( + struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + + return nv_drm_vmap(nv_user_memory->pages, + nv_user_memory->pages_count); +} + +static void __nv_drm_gem_user_memory_prime_vunmap( + struct nv_drm_gem_object *gem, + void *address) +{ + nv_drm_vunmap(address); +} + +static int __nv_drm_gem_user_memory_mmap(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma) +{ + int ret = drm_gem_mmap_obj(&nv_gem->base, + drm_vma_node_size(&nv_gem->base.vma_node) << PAGE_SHIFT, vma); + + if (ret < 0) { + return ret; + } + + /* + * Enforce that user-memory GEM mappings are MAP_SHARED, to prevent COW + * with MAP_PRIVATE and VM_MIXEDMAP + */ + if (!(vma->vm_flags & VM_SHARED)) { + return -EINVAL; + } + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags &= ~VM_IO; + vma->vm_flags |= VM_MIXEDMAP; + + return 0; +} + +static vm_fault_t __nv_drm_gem_user_memory_handle_vma_fault( + struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + unsigned long address = nv_page_fault_va(vmf); + struct drm_gem_object *gem = vma->vm_private_data; + unsigned long page_offset; + vm_fault_t ret; + + page_offset = vmf->pgoff - drm_vma_node_start(&gem->vma_node); + + BUG_ON(page_offset > nv_user_memory->pages_count); + + ret = vm_insert_page(vma, address, nv_user_memory->pages[page_offset]); + switch (ret) { + case 0: + case -EBUSY: + /* + * EBUSY indicates that another thread already handled + * the faulted range. + */ + ret = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + ret = VM_FAULT_OOM; + break; + default: + WARN_ONCE(1, "Unhandled error in %s: %d\n", __FUNCTION__, ret); + ret = VM_FAULT_SIGBUS; + break; + } + + return ret; +} + +static int __nv_drm_gem_user_create_mmap_offset( + struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + (void)nv_dev; + return nv_drm_gem_create_mmap_offset(nv_gem, offset); +} + +const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops = { + .free = __nv_drm_gem_user_memory_free, + .prime_get_sg_table = __nv_drm_gem_user_memory_prime_get_sg_table, + .prime_vmap = __nv_drm_gem_user_memory_prime_vmap, + .prime_vunmap = __nv_drm_gem_user_memory_prime_vunmap, + .mmap = __nv_drm_gem_user_memory_mmap, + .handle_vma_fault = __nv_drm_gem_user_memory_handle_vma_fault, + .create_mmap_offset = __nv_drm_gem_user_create_mmap_offset, +}; + +int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + + struct drm_nvidia_gem_import_userspace_memory_params *params = data; + struct nv_drm_gem_user_memory *nv_user_memory; + + struct page **pages = NULL; + unsigned long pages_count = 0; + + int ret = 0; + + if ((params->size % PAGE_SIZE) != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Userspace memory 0x%llx size should be in a multiple of page " + "size to create a gem object", + params->address); + return -EINVAL; + } + + pages_count = params->size / PAGE_SIZE; + + ret = nv_drm_lock_user_pages(params->address, pages_count, &pages); + + if (ret != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lock user pages for address 0x%llx: %d", + params->address, ret); + return ret; + } + + if ((nv_user_memory = + nv_drm_calloc(1, sizeof(*nv_user_memory))) == NULL) { + ret = -ENOMEM; + goto failed; + } + + nv_user_memory->pages = pages; + nv_user_memory->pages_count = pages_count; + + nv_drm_gem_object_init(nv_dev, + &nv_user_memory->base, + &__nv_gem_user_memory_ops, + params->size, + NULL /* pMemory */); + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_user_memory->base, + ¶ms->handle); + +failed: + nv_drm_unlock_user_pages(pages_count, pages); + + return ret; +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h b/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h new file mode 100644 index 000000000..275c083fc --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem-user-memory.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_USER_MEMORY_H__ +#define __NVIDIA_DRM_GEM_USER_MEMORY_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-gem.h" + +struct nv_drm_gem_user_memory { + struct nv_drm_gem_object base; + struct page **pages; + unsigned long pages_count; +}; + +extern const struct nv_drm_gem_object_funcs __nv_gem_user_memory_ops; + +static inline struct nv_drm_gem_user_memory *to_nv_user_memory( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_user_memory, base); + } + + return NULL; +} + +int nv_drm_gem_import_userspace_memory_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +static inline +struct nv_drm_gem_user_memory *nv_drm_gem_object_user_memory_lookup( + struct drm_device *dev, + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(dev, filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &__nv_gem_user_memory_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_nv_user_memory(nv_gem); +} + +#endif + +#endif /* __NVIDIA_DRM_GEM_USER_MEMORY_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem.c b/kernel-open/nvidia-drm/nvidia-drm-gem.c new file mode 100644 index 000000000..92d61a6ee --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem.c @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-prime-fence.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-gem-nvkms-memory.h" +#include "nvidia-drm-gem-user-memory.h" +#include "nvidia-dma-resv-helper.h" +#include "nvidia-drm-helper.h" +#include "nvidia-drm-gem-dma-buf.h" +#include "nvidia-drm-gem-nvkms-memory.h" + +#if defined(NV_DRM_DRM_DRV_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_FILE_H_PRESENT) +#include +#endif + +#include "linux/dma-buf.h" + +#include "nv-mm.h" + +void nv_drm_gem_free(struct drm_gem_object *gem) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + /* Cleanup core gem object */ + drm_gem_object_release(&nv_gem->base); + +#if defined(NV_DRM_FENCE_AVAILABLE) && !defined(NV_DRM_GEM_OBJECT_HAS_RESV) + nv_dma_resv_fini(&nv_gem->resv); +#endif + + nv_gem->ops->free(nv_gem); +} + +#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) && \ + defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG) + +/* + * The 'dma_buf_map' structure is renamed to 'iosys_map' by the commit + * 7938f4218168 ("dma-buf-map: Rename to iosys-map"). + */ +#if defined(NV_LINUX_IOSYS_MAP_H_PRESENT) +typedef struct iosys_map nv_sysio_map_t; +#else +typedef struct dma_buf_map nv_sysio_map_t; +#endif + +static int nv_drm_gem_vmap(struct drm_gem_object *gem, + nv_sysio_map_t *map) +{ + map->vaddr = nv_drm_gem_prime_vmap(gem); + if (map->vaddr == NULL) { + return -ENOMEM; + } + map->is_iomem = true; + return 0; +} + +static void nv_drm_gem_vunmap(struct drm_gem_object *gem, + nv_sysio_map_t *map) +{ + nv_drm_gem_prime_vunmap(gem, map->vaddr); + map->vaddr = NULL; +} +#endif + +#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) || \ + !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) +static struct drm_gem_object_funcs nv_drm_gem_funcs = { + .free = nv_drm_gem_free, + .get_sg_table = nv_drm_gem_prime_get_sg_table, + +#if !defined(NV_DRM_DRIVER_HAS_GEM_PRIME_CALLBACKS) + .export = drm_gem_prime_export, +#if defined(NV_DRM_GEM_OBJECT_VMAP_HAS_MAP_ARG) + .vmap = nv_drm_gem_vmap, + .vunmap = nv_drm_gem_vunmap, +#else + .vmap = nv_drm_gem_prime_vmap, + .vunmap = nv_drm_gem_prime_vunmap, +#endif + .vm_ops = &nv_drm_gem_vma_ops, +#endif +}; +#endif + +void nv_drm_gem_object_init(struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + const struct nv_drm_gem_object_funcs * const ops, + size_t size, + struct NvKmsKapiMemory *pMemory) +{ + struct drm_device *dev = nv_dev->dev; + + nv_gem->nv_dev = nv_dev; + nv_gem->ops = ops; + + nv_gem->pMemory = pMemory; + + /* Initialize the gem object */ + +#if defined(NV_DRM_FENCE_AVAILABLE) + nv_dma_resv_init(&nv_gem->resv); + +#if defined(NV_DRM_GEM_OBJECT_HAS_RESV) + nv_gem->base.resv = &nv_gem->resv; +#endif + +#endif + +#if !defined(NV_DRM_DRIVER_HAS_GEM_FREE_OBJECT) + nv_gem->base.funcs = &nv_drm_gem_funcs; +#endif + + drm_gem_private_object_init(dev, &nv_gem->base, size); +} + +struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ +#if defined(NV_DMA_BUF_OWNER_PRESENT) + struct drm_gem_object *gem_dst; + struct nv_drm_gem_object *nv_gem_src; + + if (dma_buf->owner == dev->driver->fops->owner) { + nv_gem_src = to_nv_gem_object(dma_buf->priv); + + if (nv_gem_src->base.dev != dev && + nv_gem_src->ops->prime_dup != NULL) { + /* + * If we're importing from another NV device, try to handle the + * import internally rather than attaching through the dma-buf + * mechanisms. Importing from the same device is even easier, + * and drm_gem_prime_import() handles that just fine. + */ + gem_dst = nv_gem_src->ops->prime_dup(dev, nv_gem_src); + + if (gem_dst) + return gem_dst; + } + } +#endif /* NV_DMA_BUF_OWNER_PRESENT */ + + return drm_gem_prime_import(dev, dma_buf); +} + +struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (nv_gem->ops->prime_get_sg_table != NULL) { + return nv_gem->ops->prime_get_sg_table(nv_gem); + } + + return ERR_PTR(-ENOTSUPP); +} + +void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (nv_gem->ops->prime_vmap != NULL) { + return nv_gem->ops->prime_vmap(nv_gem); + } + + return ERR_PTR(-ENOTSUPP); +} + +void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (nv_gem->ops->prime_vunmap != NULL) { + nv_gem->ops->prime_vunmap(nv_gem, address); + } +} + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) +nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(obj); + + return &nv_gem->resv; +} +#endif + +int nv_drm_gem_map_offset_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_map_offset_params *params = data; + struct nv_drm_gem_object *nv_gem; + int ret; + + if ((nv_gem = nv_drm_gem_object_lookup(dev, + filep, + params->handle)) == NULL) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for map: 0x%08x", + params->handle); + return -EINVAL; + } + + if (nv_gem->ops->create_mmap_offset) { + ret = nv_gem->ops->create_mmap_offset(nv_dev, nv_gem, ¶ms->offset); + } else { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Gem object type does not support mapping: 0x%08x", + params->handle); + ret = -EINVAL; + } + + nv_drm_gem_object_unreference_unlocked(nv_gem); + + return ret; +} + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) +int nv_drm_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct drm_file *priv = file->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_gem_object *obj = NULL; + struct drm_vma_offset_node *node; + int ret = 0; + struct nv_drm_gem_object *nv_gem; + + drm_vma_offset_lock_lookup(dev->vma_offset_manager); + node = nv_drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, + vma->vm_pgoff, vma_pages(vma)); + if (likely(node)) { + obj = container_of(node, struct drm_gem_object, vma_node); + /* + * When the object is being freed, after it hits 0-refcnt it proceeds + * to tear down the object. In the process it will attempt to remove + * the VMA offset and so acquire this mgr->vm_lock. Therefore if we + * find an object with a 0-refcnt that matches our range, we know it is + * in the process of being destroyed and will be freed as soon as we + * release the lock - so we have to check for the 0-refcnted object and + * treat it as invalid. + */ + if (!kref_get_unless_zero(&obj->refcount)) + obj = NULL; + } + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); + + if (!obj) + return -EINVAL; + + nv_gem = to_nv_gem_object(obj); + if (nv_gem->ops->mmap == NULL) { + ret = -EINVAL; + goto done; + } + + if (!nv_drm_vma_node_is_allowed(node, file)) { + ret = -EACCES; + goto done; + } + +#if defined(NV_DRM_VMA_OFFSET_NODE_HAS_READONLY) + if (node->readonly) { + if (vma->vm_flags & VM_WRITE) { + ret = -EINVAL; + goto done; + } + vma->vm_flags &= ~VM_MAYWRITE; + } +#endif + + ret = nv_gem->ops->mmap(nv_gem, vma); + +done: + nv_drm_gem_object_unreference_unlocked(nv_gem); + + return ret; +} +#endif + +int nv_drm_gem_identify_object_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct drm_nvidia_gem_identify_object_params *p = data; + struct nv_drm_gem_dma_buf *nv_dma_buf; + struct nv_drm_gem_nvkms_memory *nv_nvkms_memory; + struct nv_drm_gem_user_memory *nv_user_memory; + struct nv_drm_gem_object *nv_gem = NULL; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + return -EINVAL; + } + + nv_dma_buf = nv_drm_gem_object_dma_buf_lookup(dev, filep, p->handle); + if (nv_dma_buf) { + p->object_type = NV_GEM_OBJECT_DMABUF; + nv_gem = &nv_dma_buf->base; + goto done; + } + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + nv_nvkms_memory = nv_drm_gem_object_nvkms_memory_lookup(dev, filep, p->handle); + if (nv_nvkms_memory) { + p->object_type = NV_GEM_OBJECT_NVKMS; + nv_gem = &nv_nvkms_memory->base; + goto done; + } +#endif + + nv_user_memory = nv_drm_gem_object_user_memory_lookup(dev, filep, p->handle); + if (nv_user_memory) { + p->object_type = NV_GEM_OBJECT_USERMEMORY; + nv_gem = &nv_user_memory->base; + goto done; + } + + p->object_type = NV_GEM_OBJECT_UNKNOWN; + +done: + if (nv_gem) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + } + return 0; +} + +/* XXX Move these vma operations to os layer */ + +static vm_fault_t __nv_drm_vma_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct drm_gem_object *gem = vma->vm_private_data; + struct nv_drm_gem_object *nv_gem = to_nv_gem_object(gem); + + if (!nv_gem) { + return VM_FAULT_SIGBUS; + } + + return nv_gem->ops->handle_vma_fault(nv_gem, vma, vmf); +} + +/* + * Note that nv_drm_vma_fault() can be called for different or same + * ranges of the same drm_gem_object simultaneously. + */ + +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) +static vm_fault_t nv_drm_vma_fault(struct vm_fault *vmf) +{ + return __nv_drm_vma_fault(vmf->vma, vmf); +} +#else +static vm_fault_t nv_drm_vma_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + return __nv_drm_vma_fault(vma, vmf); +} +#endif + +const struct vm_operations_struct nv_drm_gem_vma_ops = { + .open = drm_gem_vm_open, + .fault = nv_drm_vma_fault, + .close = drm_gem_vm_close, +}; + +#endif /* NV_DRM_AVAILABLE */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-gem.h b/kernel-open/nvidia-drm/nvidia-drm-gem.h new file mode 100644 index 000000000..a27c2e996 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-gem.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_GEM_H__ +#define __NVIDIA_DRM_GEM_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-priv.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_GEM_H_PRESENT) +#include +#endif + +#include "nvkms-kapi.h" +#include "nv-mm.h" + +#if defined(NV_DRM_FENCE_AVAILABLE) +#include "nvidia-dma-fence-helper.h" +#include "nvidia-dma-resv-helper.h" +#endif + +struct nv_drm_gem_object; + +struct nv_drm_gem_object_funcs { + void (*free)(struct nv_drm_gem_object *nv_gem); + struct sg_table *(*prime_get_sg_table)(struct nv_drm_gem_object *nv_gem); + void *(*prime_vmap)(struct nv_drm_gem_object *nv_gem); + void (*prime_vunmap)(struct nv_drm_gem_object *nv_gem, void *address); + struct drm_gem_object *(*prime_dup)(struct drm_device *dev, + const struct nv_drm_gem_object *nv_gem_src); + int (*mmap)(struct nv_drm_gem_object *nv_gem, struct vm_area_struct *vma); + vm_fault_t (*handle_vma_fault)(struct nv_drm_gem_object *nv_gem, + struct vm_area_struct *vma, + struct vm_fault *vmf); + int (*create_mmap_offset)(struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + uint64_t *offset); +}; + +struct nv_drm_gem_object { + struct drm_gem_object base; + + struct nv_drm_device *nv_dev; + const struct nv_drm_gem_object_funcs *ops; + + struct NvKmsKapiMemory *pMemory; + +#if defined(NV_DRM_FENCE_AVAILABLE) + nv_dma_resv_t resv; +#endif +}; + +static inline struct nv_drm_gem_object *to_nv_gem_object( + struct drm_gem_object *gem) +{ + if (gem != NULL) { + return container_of(gem, struct nv_drm_gem_object, base); + } + + return NULL; +} + +/* + * drm_gem_object_{get/put}() added by commit + * e6b62714e87c8811d5564b6a0738dcde63a51774 (2017-02-28) and + * drm_gem_object_{reference/unreference}() removed by commit + * 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15). + */ + +static inline void +nv_drm_gem_object_unreference_unlocked(struct nv_drm_gem_object *nv_gem) +{ +#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT) + +#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT) + drm_gem_object_put_unlocked(&nv_gem->base); +#else + drm_gem_object_put(&nv_gem->base); +#endif + +#else + drm_gem_object_unreference_unlocked(&nv_gem->base); +#endif +} + +static inline void +nv_drm_gem_object_unreference(struct nv_drm_gem_object *nv_gem) +{ +#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT) + drm_gem_object_put(&nv_gem->base); +#else + drm_gem_object_unreference(&nv_gem->base); +#endif +} + +static inline int nv_drm_gem_handle_create_drop_reference( + struct drm_file *file_priv, + struct nv_drm_gem_object *nv_gem, + uint32_t *handle) +{ + int ret = drm_gem_handle_create(file_priv, &nv_gem->base, handle); + + /* drop reference from allocate - handle holds it now */ + + nv_drm_gem_object_unreference_unlocked(nv_gem); + + return ret; +} + +static inline int nv_drm_gem_create_mmap_offset( + struct nv_drm_gem_object *nv_gem, + uint64_t *offset) +{ + int ret; + + if ((ret = drm_gem_create_mmap_offset(&nv_gem->base)) < 0) { + NV_DRM_DEV_LOG_ERR( + nv_gem->nv_dev, + "drm_gem_create_mmap_offset failed with error code %d", + ret); + goto done; + } + + *offset = drm_vma_node_offset_addr(&nv_gem->base.vma_node); + +done: + + return ret; +} + +void nv_drm_gem_free(struct drm_gem_object *gem); + +static inline struct nv_drm_gem_object *nv_drm_gem_object_lookup( + struct drm_device *dev, + struct drm_file *filp, + u32 handle) +{ +#if (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 3) + return to_nv_gem_object(drm_gem_object_lookup(dev, filp, handle)); +#elif (NV_DRM_GEM_OBJECT_LOOKUP_ARGUMENT_COUNT == 2) + return to_nv_gem_object(drm_gem_object_lookup(filp, handle)); +#else + #error "Unknown argument count of drm_gem_object_lookup()" +#endif +} + +static inline int nv_drm_gem_handle_create(struct drm_file *filp, + struct nv_drm_gem_object *nv_gem, + uint32_t *handle) +{ + return drm_gem_handle_create(filp, &nv_gem->base, handle); +} + +void nv_drm_gem_object_init(struct nv_drm_device *nv_dev, + struct nv_drm_gem_object *nv_gem, + const struct nv_drm_gem_object_funcs * const ops, + size_t size, + struct NvKmsKapiMemory *pMemory); + +struct drm_gem_object *nv_drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); + +struct sg_table *nv_drm_gem_prime_get_sg_table(struct drm_gem_object *gem); + +void *nv_drm_gem_prime_vmap(struct drm_gem_object *gem); + +void nv_drm_gem_prime_vunmap(struct drm_gem_object *gem, void *address); + +#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) +nv_dma_resv_t* nv_drm_gem_prime_res_obj(struct drm_gem_object *obj); +#endif + +extern const struct vm_operations_struct nv_drm_gem_vma_ops; + +int nv_drm_gem_map_offset_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_mmap(struct file *file, struct vm_area_struct *vma); + +int nv_drm_gem_identify_object_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif /* NV_DRM_AVAILABLE */ + +#endif /* __NVIDIA_DRM_GEM_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-helper.c b/kernel-open/nvidia-drm/nvidia-drm-helper.c new file mode 100644 index 000000000..3831180e0 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-helper.c @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains snapshots of DRM helper functions from the + * Linux kernel which are used by nvidia-drm.ko if the target kernel + * predates the helper function. Having these functions consistently + * present simplifies nvidia-drm.ko source. + */ + +#include "nvidia-drm-helper.h" + +#include "nvmisc.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_ATOMIC_UAPI_H_PRESENT) +#include +#endif + +static void __nv_drm_framebuffer_put(struct drm_framebuffer *fb) +{ +#if defined(NV_DRM_FRAMEBUFFER_GET_PRESENT) + drm_framebuffer_put(fb); +#else + drm_framebuffer_unreference(fb); +#endif + +} + +/* + * drm_atomic_helper_disable_all() has been added by commit + * 1494276000db789c6d2acd85747be4707051c801, which is Signed-off-by: + * Thierry Reding + * Daniel Vetter + * + * drm_atomic_helper_disable_all() is copied from + * linux/drivers/gpu/drm/drm_atomic_helper.c and modified to use + * nv_drm_for_each_crtc instead of drm_for_each_crtc to loop over all crtcs, + * use nv_drm_for_each_*_in_state instead of for_each_connector_in_state to loop + * over all modeset object states, and use drm_atomic_state_free() if + * drm_atomic_state_put() is not available. + * + * drm_atomic_helper_disable_all() is copied from + * linux/drivers/gpu/drm/drm_atomic_helper.c @ + * 49d70aeaeca8f62b72b7712ecd1e29619a445866, which has the following + * copyright and license information: + * + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ +int nv_drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_connector_state *conn_state; + struct drm_connector *conn; + struct drm_plane_state *plane_state; + struct drm_plane *plane; + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + unsigned plane_mask = 0; + int ret, i; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + + nv_drm_for_each_crtc(crtc, dev) { + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto free; + } + + crtc_state->active = false; + + ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); + if (ret < 0) + goto free; + + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret < 0) + goto free; + + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret < 0) + goto free; + } + + nv_drm_for_each_connector_in_state(state, conn, conn_state, i) { + ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); + if (ret < 0) + goto free; + } + + nv_drm_for_each_plane_in_state(state, plane, plane_state, i) { + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret < 0) + goto free; + + drm_atomic_set_fb_for_plane(plane_state, NULL); + plane_mask |= NVBIT(drm_plane_index(plane)); + plane->old_fb = plane->fb; + } + + ret = drm_atomic_commit(state); +free: + if (plane_mask) { + drm_for_each_plane_mask(plane, dev, plane_mask) { + if (ret == 0) { + plane->fb = NULL; + plane->crtc = NULL; + + WARN_ON(plane->state->fb); + WARN_ON(plane->state->crtc); + + if (plane->old_fb) + __nv_drm_framebuffer_put(plane->old_fb); + } + plane->old_fb = NULL; + } + } + +#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT) + drm_atomic_state_put(state); +#else + if (ret != 0) { + drm_atomic_state_free(state); + } else { + /* + * In case of success, drm_atomic_commit() takes care to cleanup and + * free @state. + * + * Comment placed above drm_atomic_commit() says: The caller must not + * free or in any other way access @state. If the function fails then + * the caller must clean up @state itself. + */ + } +#endif + return ret; +} + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-helper.h b/kernel-open/nvidia-drm/nvidia-drm-helper.h new file mode 100644 index 000000000..ecc5ecfdb --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-helper.h @@ -0,0 +1,584 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_HELPER_H__ +#define __NVIDIA_DRM_HELPER_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_DRV_H_PRESENT) +#include +#endif + +/* + * drm_dev_put() is added by commit 9a96f55034e41b4e002b767e9218d55f03bdff7d + * (2017-09-26) and drm_dev_unref() is removed by + * ba1d345401476a5f7fbad622607c5a1f95e59b31 (2018-11-15). + * + * drm_dev_unref() has been added and drm_dev_free() removed by commit - + * + * 2014-01-29: 099d1c290e2ebc3b798961a6c177c3aef5f0b789 + */ +static inline void nv_drm_dev_free(struct drm_device *dev) +{ +#if defined(NV_DRM_DEV_PUT_PRESENT) + drm_dev_put(dev); +#elif defined(NV_DRM_DEV_UNREF_PRESENT) + drm_dev_unref(dev); +#else + drm_dev_free(dev); +#endif +} + +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +static inline struct sg_table* +nv_drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, unsigned int nr_pages) +{ +#if defined(NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG) + return drm_prime_pages_to_sg(dev, pages, nr_pages); +#else + return drm_prime_pages_to_sg(pages, nr_pages); +#endif +} + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +/* + * drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(), + * drm_for_each_encoder and drm_for_each_plane() were added by kernel + * commit 6295d607ad34ee4e43aab3f20714c2ef7a6adea1 which was + * Signed-off-by: + * Daniel Vetter + * drm_for_each_connector(), drm_for_each_crtc(), drm_for_each_fb(), + * drm_for_each_encoder and drm_for_each_plane() are copied from + * include/drm/drm_crtc @ + * 6295d607ad34ee4e43aab3f20714c2ef7a6adea1 + * which has the following copyright and license information: + * + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include + +#if defined(drm_for_each_plane) +#define nv_drm_for_each_plane(plane, dev) \ + drm_for_each_plane(plane, dev) +#else +#define nv_drm_for_each_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) +#endif + +#if defined(drm_for_each_crtc) +#define nv_drm_for_each_crtc(crtc, dev) \ + drm_for_each_crtc(crtc, dev) +#else +#define nv_drm_for_each_crtc(crtc, dev) \ + list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) +#endif + +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) +#define nv_drm_for_each_connector(connector, conn_iter, dev) \ + drm_for_each_connector_iter(connector, conn_iter) +#elif defined(drm_for_each_connector) +#define nv_drm_for_each_connector(connector, conn_iter, dev) \ + drm_for_each_connector(connector, dev) +#else +#define nv_drm_for_each_connector(connector, conn_iter, dev) \ + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); \ + list_for_each_entry(connector, &(dev)->mode_config.connector_list, head) +#endif + +#if defined(drm_for_each_encoder) +#define nv_drm_for_each_encoder(encoder, dev) \ + drm_for_each_encoder(encoder, dev) +#else +#define nv_drm_for_each_encoder(encoder, dev) \ + list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head) +#endif + +#if defined(drm_for_each_fb) +#define nv_drm_for_each_fb(fb, dev) \ + drm_for_each_fb(fb, dev) +#else +#define nv_drm_for_each_fb(fb, dev) \ + list_for_each_entry(fb, &(dev)->mode_config.fb_list, head) +#endif + +#include +#include + +int nv_drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); + +/* + * for_each_connector_in_state(), for_each_crtc_in_state() and + * for_each_plane_in_state() were added by kernel commit + * df63b9994eaf942afcdb946d27a28661d7dfbf2a which was Signed-off-by: + * Ander Conselvan de Oliveira + * Daniel Vetter + * + * for_each_connector_in_state(), for_each_crtc_in_state() and + * for_each_plane_in_state() were copied from + * include/drm/drm_atomic.h @ + * 21a01abbe32a3cbeb903378a24e504bfd9fe0648 + * which has the following copyright and license information: + * + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark + * Daniel Vetter + */ + +/** + * nv_drm_for_each_connector_in_state - iterate over all connectors in an + * atomic update + * @__state: &struct drm_atomic_state pointer + * @connector: &struct drm_connector iteration cursor + * @connector_state: &struct drm_connector_state iteration cursor + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all connectors in an atomic update. Note that before the + * software state is committed (by calling drm_atomic_helper_swap_state(), this + * points to the new state, while afterwards it points to the old state. Due to + * this tricky confusion this macro is deprecated. + */ +#if !defined(for_each_connector_in_state) +#define nv_drm_for_each_connector_in_state(__state, \ + connector, connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_connector && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (connector_state) = (__state)->connectors[__i].state, 1); \ + (__i)++) \ + for_each_if (connector) +#else +#define nv_drm_for_each_connector_in_state(__state, \ + connector, connector_state, __i) \ + for_each_connector_in_state(__state, connector, connector_state, __i) +#endif + + +/** + * nv_drm_for_each_crtc_in_state - iterate over all CRTCs in an atomic update + * @__state: &struct drm_atomic_state pointer + * @crtc: &struct drm_crtc iteration cursor + * @crtc_state: &struct drm_crtc_state iteration cursor + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all CRTCs in an atomic update. Note that before the + * software state is committed (by calling drm_atomic_helper_swap_state(), this + * points to the new state, while afterwards it points to the old state. Due to + * this tricky confusion this macro is deprecated. + */ +#if !defined(for_each_crtc_in_state) +#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_crtc && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (crtc_state) = (__state)->crtcs[__i].state, 1); \ + (__i)++) \ + for_each_if (crtc_state) +#else +#define nv_drm_for_each_crtc_in_state(__state, crtc, crtc_state, __i) \ + for_each_crtc_in_state(__state, crtc, crtc_state, __i) +#endif + +/** + * nv_drm_for_each_plane_in_state - iterate over all planes in an atomic update + * @__state: &struct drm_atomic_state pointer + * @plane: &struct drm_plane iteration cursor + * @plane_state: &struct drm_plane_state iteration cursor + * @__i: int iteration cursor, for macro-internal use + * + * This iterates over all planes in an atomic update. Note that before the + * software state is committed (by calling drm_atomic_helper_swap_state(), this + * points to the new state, while afterwards it points to the old state. Due to + * this tricky confusion this macro is deprecated. + */ +#if !defined(for_each_plane_in_state) +#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->dev->mode_config.num_total_plane && \ + ((plane) = (__state)->planes[__i].ptr, \ + (plane_state) = (__state)->planes[__i].state, 1); \ + (__i)++) \ + for_each_if (plane_state) +#else +#define nv_drm_for_each_plane_in_state(__state, plane, plane_state, __i) \ + for_each_plane_in_state(__state, plane, plane_state, __i) +#endif + +static inline struct drm_crtc *nv_drm_crtc_find(struct drm_device *dev, + uint32_t id) +{ +#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG) + return drm_crtc_find(dev, NULL /* file_priv */, id); +#else + return drm_crtc_find(dev, id); +#endif +} + +static inline struct drm_encoder *nv_drm_encoder_find(struct drm_device *dev, + uint32_t id) +{ +#if defined(NV_DRM_MODE_OBJECT_FIND_HAS_FILE_PRIV_ARG) + return drm_encoder_find(dev, NULL /* file_priv */, id); +#else + return drm_encoder_find(dev, id); +#endif +} + +/* + * drm_connector_for_each_possible_encoder() is added by commit + * 83aefbb887b59df0b3520965c3701e01deacfc52 which was Signed-off-by: + * Ville Syrjälä + * + * drm_connector_for_each_possible_encoder() is copied from + * include/drm/drm_connector.h and modified to use nv_drm_encoder_find() + * instead of drm_encoder_find(). + * + * drm_connector_for_each_possible_encoder() is copied from + * include/drm/drm_connector.h @ + * 83aefbb887b59df0b3520965c3701e01deacfc52 + * which has the following copyright and license information: + * + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#if defined(NV_DRM_DRM_CONNECTOR_H_PRESENT) +#include +#endif + +/** + * nv_drm_connector_for_each_possible_encoder - iterate connector's possible + * encoders + * @connector: &struct drm_connector pointer + * @encoder: &struct drm_encoder pointer used as cursor + * @__i: int iteration cursor, for macro-internal use + */ +#if !defined(drm_connector_for_each_possible_encoder) + +#if !defined(for_each_if) +#define for_each_if(condition) if (!(condition)) {} else +#endif + +#define __nv_drm_connector_for_each_possible_encoder(connector, encoder, __i) \ + for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \ + (connector)->encoder_ids[(__i)] != 0; (__i)++) \ + for_each_if((encoder) = \ + nv_drm_encoder_find((connector)->dev, \ + (connector)->encoder_ids[(__i)])) + +#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \ + { \ + unsigned int __i; \ + __nv_drm_connector_for_each_possible_encoder(connector, encoder, __i) + +#define nv_drm_connector_for_each_possible_encoder_end \ + } + +#else + +#if NV_DRM_CONNECTOR_FOR_EACH_POSSIBLE_ENCODER_ARGUMENT_COUNT == 3 + +#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \ + { \ + unsigned int __i; \ + drm_connector_for_each_possible_encoder(connector, encoder, __i) + +#define nv_drm_connector_for_each_possible_encoder_end \ + } + +#else + +#define nv_drm_connector_for_each_possible_encoder(connector, encoder) \ + drm_connector_for_each_possible_encoder(connector, encoder) + +#define nv_drm_connector_for_each_possible_encoder_end + +#endif + +#endif + +static inline int +nv_drm_connector_attach_encoder(struct drm_connector *connector, + struct drm_encoder *encoder) +{ +#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME) + return drm_mode_connector_attach_encoder(connector, encoder); +#else + return drm_connector_attach_encoder(connector, encoder); +#endif +} + +static inline int +nv_drm_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid) +{ +#if defined(NV_DRM_CONNECTOR_FUNCS_HAVE_MODE_IN_NAME) + return drm_mode_connector_update_edid_property(connector, edid); +#else + return drm_connector_update_edid_property(connector, edid); +#endif +} + +#if defined(NV_DRM_CONNECTOR_LIST_ITER_PRESENT) +#include + +static inline +void nv_drm_connector_list_iter_begin(struct drm_device *dev, + struct drm_connector_list_iter *iter) +{ +#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT) + drm_connector_list_iter_begin(dev, iter); +#else + drm_connector_list_iter_get(dev, iter); +#endif +} + +static inline +void nv_drm_connector_list_iter_end(struct drm_connector_list_iter *iter) +{ +#if defined(NV_DRM_CONNECTOR_LIST_ITER_BEGIN_PRESENT) + drm_connector_list_iter_end(iter); +#else + drm_connector_list_iter_put(iter); +#endif +} +#endif + +/* + * The drm_format_num_planes() function was added by commit d0d110e09629 drm: + * Add drm_format_num_planes() utility function in v3.3 (2011-12-20). Prototype + * was moved from drm_crtc.h to drm_fourcc.h by commit ae4df11a0f53 (drm: Move + * format-related helpers to drm_fourcc.c) in v4.8 (2016-06-09). + * drm_format_num_planes() has been removed by commit 05c452c115bf (drm: Remove + * users of drm_format_num_planes) in v5.3 (2019-05-16). + * + * drm_format_info() is available only from v4.10 (2016-10-18), added by commit + * 84770cc24f3a (drm: Centralize format information). + */ +#include +#include + +static inline int nv_drm_format_num_planes(uint32_t format) +{ +#if defined(NV_DRM_FORMAT_NUM_PLANES_PRESENT) + return drm_format_num_planes(format); +#else + const struct drm_format_info *info = drm_format_info(format); + return info != NULL ? info->num_planes : 1; +#endif +} + +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) +/* + * DRM_FORMAT_MOD_LINEAR was also defined after the original modifier support + * was added to the kernel, as a more explicit alias of DRM_FORMAT_MOD_NONE + */ +#if !defined(DRM_FORMAT_MOD_VENDOR_NONE) +#define DRM_FORMAT_MOD_VENDOR_NONE 0 +#endif + +#if !defined(DRM_FORMAT_MOD_LINEAR) +#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) +#endif + +/* + * DRM_FORMAT_MOD_INVALID was defined after the original modifier support was + * added to the kernel, for use as a sentinel value. + */ +#if !defined(DRM_FORMAT_RESERVED) +#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) +#endif + +#if !defined(DRM_FORMAT_MOD_INVALID) +#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED) +#endif + +/* + * DRM_FORMAT_MOD_VENDOR_NVIDIA was previously called + * DRM_FORMAT_MOD_VNEDOR_NV. + */ +#if !defined(DRM_FORMAT_MOD_VENDOR_NVIDIA) +#define DRM_FORMAT_MOD_VENDOR_NVIDIA DRM_FORMAT_MOD_VENDOR_NV +#endif + +/* + * DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D is a relatively new addition to the + * upstream kernel headers compared to the other format modifiers. + */ +#if !defined(DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D) +#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \ + fourcc_mod_code(NVIDIA, (0x10 | \ + ((h) & 0xf) | \ + (((k) & 0xff) << 12) | \ + (((g) & 0x3) << 20) | \ + (((s) & 0x1) << 22) | \ + (((c) & 0x7) << 23))) +#endif + +#endif /* defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) */ + +/* + * drm_vma_offset_exact_lookup_locked() were added + * by kernel commit 2225cfe46bcc which was Signed-off-by: + * Daniel Vetter + * + * drm_vma_offset_exact_lookup_locked() were copied from + * include/drm/drm_vma_manager.h @ 2225cfe46bcc + * which has the following copyright and license information: + * + * Copyright (c) 2013 David Herrmann + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +/** + * nv_drm_vma_offset_exact_lookup_locked() - Look up node by exact address + * @mgr: Manager object + * @start: Start address (page-based, not byte-based) + * @pages: Size of object (page-based) + * + * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node. + * It only returns the exact object with the given start address. + * + * RETURNS: + * Node at exact start address @start. + */ +static inline struct drm_vma_offset_node * +nv_drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages) +{ +#if defined(NV_DRM_VMA_OFFSET_EXACT_LOOKUP_LOCKED_PRESENT) + return drm_vma_offset_exact_lookup_locked(mgr, start, pages); +#else + struct drm_vma_offset_node *node; + + node = drm_vma_offset_lookup_locked(mgr, start, pages); + return (node && node->vm_node.start == start) ? node : NULL; +#endif +} + +static inline bool +nv_drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct file *filp) +{ +#if defined(NV_DRM_VMA_NODE_IS_ALLOWED_HAS_TAG_ARG) + return drm_vma_node_is_allowed(node, filp->private_data); +#else + return drm_vma_node_is_allowed(node, filp); +#endif +} + +#endif /* defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) */ + +#endif /* defined(NV_DRM_AVAILABLE) */ + +#endif /* __NVIDIA_DRM_HELPER_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-ioctl.h b/kernel-open/nvidia-drm/nvidia-drm-ioctl.h new file mode 100644 index 000000000..1a377b11f --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-ioctl.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _UAPI_NVIDIA_DRM_IOCTL_H_ +#define _UAPI_NVIDIA_DRM_IOCTL_H_ + +#include + +/* + * We should do our best to keep these values constant. Any change to these will + * be backwards incompatible with client applications that might be using them + */ +#define DRM_NVIDIA_GET_CRTC_CRC32 0x00 +#define DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY 0x01 +#define DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY 0x02 +#define DRM_NVIDIA_GET_DEV_INFO 0x03 +#define DRM_NVIDIA_FENCE_SUPPORTED 0x04 +#define DRM_NVIDIA_FENCE_CONTEXT_CREATE 0x05 +#define DRM_NVIDIA_GEM_FENCE_ATTACH 0x06 +#define DRM_NVIDIA_GET_CLIENT_CAPABILITY 0x08 +#define DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY 0x09 +#define DRM_NVIDIA_GEM_MAP_OFFSET 0x0a +#define DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY 0x0b +#define DRM_NVIDIA_GET_CRTC_CRC32_V2 0x0c +#define DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY 0x0d +#define DRM_NVIDIA_GEM_IDENTIFY_OBJECT 0x0e + +#define DRM_IOCTL_NVIDIA_GEM_IMPORT_NVKMS_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_NVKMS_MEMORY), \ + struct drm_nvidia_gem_import_nvkms_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IMPORT_USERSPACE_MEMORY), \ + struct drm_nvidia_gem_import_userspace_memory_params) + +#define DRM_IOCTL_NVIDIA_GET_DEV_INFO \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_DEV_INFO), \ + struct drm_nvidia_get_dev_info_params) + +/* + * XXX Solaris compiler has issues with DRM_IO. None of this is supported on + * Solaris anyway, so just skip it. + * + * 'warning: suggest parentheses around arithmetic in operand of |' + */ +#if defined(NV_LINUX) +#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED \ + DRM_IO(DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_SUPPORTED) +#else +#define DRM_IOCTL_NVIDIA_FENCE_SUPPORTED 0 +#endif + +#define DRM_IOCTL_NVIDIA_FENCE_CONTEXT_CREATE \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_FENCE_CONTEXT_CREATE), \ + struct drm_nvidia_fence_context_create_params) + +#define DRM_IOCTL_NVIDIA_GEM_FENCE_ATTACH \ + DRM_IOW((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_FENCE_ATTACH), \ + struct drm_nvidia_gem_fence_attach_params) + +#define DRM_IOCTL_NVIDIA_GET_CLIENT_CAPABILITY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CLIENT_CAPABILITY), \ + struct drm_nvidia_get_client_capability_params) + +#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32 \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32), \ + struct drm_nvidia_get_crtc_crc32_params) + +#define DRM_IOCTL_NVIDIA_GET_CRTC_CRC32_V2 \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GET_CRTC_CRC32_V2), \ + struct drm_nvidia_get_crtc_crc32_v2_params) + +#define DRM_IOCTL_NVIDIA_GEM_EXPORT_NVKMS_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_NVKMS_MEMORY), \ + struct drm_nvidia_gem_export_nvkms_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_MAP_OFFSET \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_MAP_OFFSET), \ + struct drm_nvidia_gem_map_offset_params) + +#define DRM_IOCTL_NVIDIA_GEM_ALLOC_NVKMS_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_ALLOC_NVKMS_MEMORY), \ + struct drm_nvidia_gem_alloc_nvkms_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_EXPORT_DMABUF_MEMORY \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_EXPORT_DMABUF_MEMORY), \ + struct drm_nvidia_gem_export_dmabuf_memory_params) + +#define DRM_IOCTL_NVIDIA_GEM_IDENTIFY_OBJECT \ + DRM_IOWR((DRM_COMMAND_BASE + DRM_NVIDIA_GEM_IDENTIFY_OBJECT), \ + struct drm_nvidia_gem_identify_object_params) + +struct drm_nvidia_gem_import_nvkms_memory_params { + uint64_t mem_size; /* IN */ + + uint64_t nvkms_params_ptr; /* IN */ + uint64_t nvkms_params_size; /* IN */ + + uint32_t handle; /* OUT */ + + uint32_t __pad; +}; + +struct drm_nvidia_gem_import_userspace_memory_params { + uint64_t size; /* IN Size of memory in bytes */ + uint64_t address; /* IN Virtual address of userspace memory */ + uint32_t handle; /* OUT Handle to gem object */ +}; + +struct drm_nvidia_get_dev_info_params { + uint32_t gpu_id; /* OUT */ + uint32_t primary_index; /* OUT; the "card%d" value */ + + /* See DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D definitions of these */ + uint32_t generic_page_kind; /* OUT */ + uint32_t page_kind_generation; /* OUT */ + uint32_t sector_layout; /* OUT */ +}; + +struct drm_nvidia_fence_context_create_params { + uint32_t handle; /* OUT GEM handle to fence context */ + + uint32_t index; /* IN Index of semaphore to use for fencing */ + uint64_t size; /* IN Size of semaphore surface in bytes */ + + /* Params for importing userspace semaphore surface */ + uint64_t import_mem_nvkms_params_ptr; /* IN */ + uint64_t import_mem_nvkms_params_size; /* IN */ + + /* Params for creating software signaling event */ + uint64_t event_nvkms_params_ptr; /* IN */ + uint64_t event_nvkms_params_size; /* IN */ +}; + +struct drm_nvidia_gem_fence_attach_params { + uint32_t handle; /* IN GEM handle to attach fence to */ + uint32_t fence_context_handle; /* IN GEM handle to fence context on which fence is run on */ + uint32_t sem_thresh; /* IN Semaphore value to reach before signal */ +}; + +struct drm_nvidia_get_client_capability_params { + uint64_t capability; /* IN Client capability enum */ + uint64_t value; /* OUT Client capability value */ +}; + +/* Struct that stores Crc value and if it is supported by hardware */ +struct drm_nvidia_crtc_crc32 { + uint32_t value; /* Read value, undefined if supported is false */ + uint8_t supported; /* Supported boolean, true if readable by hardware */ +}; + +struct drm_nvidia_crtc_crc32_v2_out { + struct drm_nvidia_crtc_crc32 compositorCrc32; /* OUT compositor hardware CRC32 value */ + struct drm_nvidia_crtc_crc32 rasterGeneratorCrc32; /* OUT raster generator CRC32 value */ + struct drm_nvidia_crtc_crc32 outputCrc32; /* OUT SF/SOR CRC32 value */ +}; + +struct drm_nvidia_get_crtc_crc32_v2_params { + uint32_t crtc_id; /* IN CRTC identifier */ + struct drm_nvidia_crtc_crc32_v2_out crc32; /* OUT Crc32 output structure */ +}; + +struct drm_nvidia_get_crtc_crc32_params { + uint32_t crtc_id; /* IN CRTC identifier */ + uint32_t crc32; /* OUT CRC32 value */ +}; + +struct drm_nvidia_gem_export_nvkms_memory_params { + uint32_t handle; /* IN */ + uint32_t __pad; + + uint64_t nvkms_params_ptr; /* IN */ + uint64_t nvkms_params_size; /* IN */ +}; + +struct drm_nvidia_gem_map_offset_params { + uint32_t handle; /* IN Handle to gem object */ + uint32_t __pad; + + uint64_t offset; /* OUT Fake offset */ +}; + +struct drm_nvidia_gem_alloc_nvkms_memory_params { + uint32_t handle; /* OUT */ + uint8_t block_linear; /* IN */ + uint8_t compressible; /* IN/OUT */ + uint16_t __pad; + + uint64_t memory_size; /* IN */ +}; + +struct drm_nvidia_gem_export_dmabuf_memory_params { + uint32_t handle; /* IN GEM Handle*/ + uint32_t __pad; + + uint64_t nvkms_params_ptr; /* IN */ + uint64_t nvkms_params_size; /* IN */ +}; + +typedef enum { + NV_GEM_OBJECT_NVKMS, + NV_GEM_OBJECT_DMABUF, + NV_GEM_OBJECT_USERMEMORY, + + NV_GEM_OBJECT_UNKNOWN = 0x7fffffff /* Force size of 32-bits. */ +} drm_nvidia_gem_object_type; + +struct drm_nvidia_gem_identify_object_params { + uint32_t handle; /* IN GEM handle*/ + drm_nvidia_gem_object_type object_type; /* OUT GEM object type */ +}; + +#endif /* _UAPI_NVIDIA_DRM_IOCTL_H_ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-linux.c b/kernel-open/nvidia-drm/nvidia-drm-linux.c new file mode 100644 index 000000000..6bdf40a44 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-linux.c @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "nvidia-drm-os-interface.h" +#include "nvidia-drm.h" + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include + +#include "nv-mm.h" + +MODULE_PARM_DESC( + modeset, + "Enable atomic kernel modesetting (1 = enable, 0 = disable (default))"); +bool nv_drm_modeset_module_param = false; +module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400); + +void *nv_drm_calloc(size_t nmemb, size_t size) +{ + return kzalloc(nmemb * size, GFP_KERNEL); +} + +void nv_drm_free(void *ptr) +{ + if (IS_ERR(ptr)) { + return; + } + + kfree(ptr); +} + +char *nv_drm_asprintf(const char *fmt, ...) +{ + va_list ap; + char *p; + + va_start(ap, fmt); + p = kvasprintf(GFP_KERNEL, fmt, ap); + va_end(ap); + + return p; +} + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) + #define WRITE_COMBINE_FLUSH() asm volatile("sfence":::"memory") +#elif defined(NVCPU_FAMILY_ARM) + #if defined(NVCPU_ARM) + #define WRITE_COMBINE_FLUSH() { dsb(); outer_sync(); } + #elif defined(NVCPU_AARCH64) + #define WRITE_COMBINE_FLUSH() mb() + #endif +#elif defined(NVCPU_PPC64LE) + #define WRITE_COMBINE_FLUSH() asm volatile("sync":::"memory") +#endif + +void nv_drm_write_combine_flush(void) +{ + WRITE_COMBINE_FLUSH(); +} + +int nv_drm_lock_user_pages(unsigned long address, + unsigned long pages_count, struct page ***pages) +{ + struct mm_struct *mm = current->mm; + struct page **user_pages; + const int write = 1; + const int force = 0; + int pages_pinned; + + user_pages = nv_drm_calloc(pages_count, sizeof(*user_pages)); + + if (user_pages == NULL) { + return -ENOMEM; + } + + nv_mmap_read_lock(mm); + + pages_pinned = NV_GET_USER_PAGES(address, pages_count, write, force, + user_pages, NULL); + nv_mmap_read_unlock(mm); + + if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) { + goto failed; + } + + *pages = user_pages; + + return 0; + +failed: + + if (pages_pinned > 0) { + int i; + + for (i = 0; i < pages_pinned; i++) { + put_page(user_pages[i]); + } + } + + nv_drm_free(user_pages); + + return (pages_pinned < 0) ? pages_pinned : -EINVAL; +} + +void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages) +{ + unsigned long i; + + for (i = 0; i < pages_count; i++) { + set_page_dirty_lock(pages[i]); + + put_page(pages[i]); + } + + nv_drm_free(pages); +} + +void *nv_drm_vmap(struct page **pages, unsigned long pages_count) +{ + return vmap(pages, pages_count, VM_USERMAP, PAGE_KERNEL); +} + +void nv_drm_vunmap(void *address) +{ + vunmap(address); +} + +#endif /* NV_DRM_AVAILABLE */ + +/************************************************************************* + * Linux loading support code. + *************************************************************************/ + +static int __init nv_linux_drm_init(void) +{ + return nv_drm_init(); +} + +static void __exit nv_linux_drm_exit(void) +{ + nv_drm_exit(); +} + +module_init(nv_linux_drm_init); +module_exit(nv_linux_drm_exit); + +#if defined(MODULE_LICENSE) + + MODULE_LICENSE("Dual MIT/GPL"); + + + +#endif +#if defined(MODULE_INFO) + MODULE_INFO(supported, "external"); +#endif +#if defined(MODULE_VERSION) + MODULE_VERSION(NV_VERSION_STRING); +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-modeset.c b/kernel-open/nvidia-drm/nvidia-drm-modeset.c new file mode 100644 index 000000000..9132af9ad --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-modeset.c @@ -0,0 +1,577 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-modeset.h" +#include "nvidia-drm-crtc.h" +#include "nvidia-drm-os-interface.h" +#include "nvidia-drm-helper.h" + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_VBLANK_H_PRESENT) +#include +#endif + +#include +#include +#include + +struct nv_drm_atomic_state { + struct NvKmsKapiRequestedModeSetConfig config; + struct drm_atomic_state base; +}; + +static inline struct nv_drm_atomic_state *to_nv_atomic_state( + struct drm_atomic_state *state) +{ + return container_of(state, struct nv_drm_atomic_state, base); +} + +struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev) +{ + struct nv_drm_atomic_state *nv_state = + nv_drm_calloc(1, sizeof(*nv_state)); + + if (nv_state == NULL || drm_atomic_state_init(dev, &nv_state->base) < 0) { + nv_drm_free(nv_state); + return NULL; + } + + return &nv_state->base; +} + +void nv_drm_atomic_state_clear(struct drm_atomic_state *state) +{ + drm_atomic_state_default_clear(state); +} + +void nv_drm_atomic_state_free(struct drm_atomic_state *state) +{ + struct nv_drm_atomic_state *nv_state = + to_nv_atomic_state(state); + drm_atomic_state_default_release(state); + nv_drm_free(nv_state); +} + +/** + * __will_generate_flip_event - Check whether event is going to be generated by + * hardware when it flips from old crtc/plane state to current one. This + * function is called after drm_atomic_helper_swap_state(), therefore new state + * is swapped into current state. + */ +static bool __will_generate_flip_event(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct drm_crtc_state *new_crtc_state = crtc->state; + struct nv_drm_crtc_state *nv_new_crtc_state = + to_nv_crtc_state(new_crtc_state); + struct drm_plane_state *old_plane_state = NULL; + struct drm_plane *plane = NULL; + struct drm_plane *primary_plane = crtc->primary; + bool primary_event = false; + bool overlay_event = false; + int i; + + if (!old_crtc_state->active && !new_crtc_state->active) { + /* + * crtc is not active in old and new states therefore all planes are + * disabled, hardware can not generate flip events. + */ + return false; + } + + /* Find out whether primary & overlay flip done events will be generated. */ + nv_drm_for_each_plane_in_state(old_crtc_state->state, + plane, old_plane_state, i) { + if (old_plane_state->crtc != crtc) { + continue; + } + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + continue; + } + + /* + * Hardware generates flip event for only those + * planes which were active previously. + */ + if (old_crtc_state->active && old_plane_state->fb != NULL) { + nv_new_crtc_state->nv_flip->pending_events++; + } + } + + return nv_new_crtc_state->nv_flip->pending_events != 0; +} + +static int __nv_drm_put_back_post_fence_fd( + struct nv_drm_plane_state *plane_state, + const struct NvKmsKapiLayerReplyConfig *layer_reply_config) +{ + int fd = layer_reply_config->postSyncptFd; + + if ((fd >= 0) && (plane_state->fd_user_ptr != NULL)) { + if (put_user(fd, plane_state->fd_user_ptr)) { + return -EFAULT; + } + + /*! set back to Null and let set_property specify it again */ + plane_state->fd_user_ptr = NULL; + } + return 0; +} + +static int __nv_drm_get_syncpt_data( + struct nv_drm_device *nv_dev, + struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state, + struct NvKmsKapiRequestedModeSetConfig *requested_config, + struct NvKmsKapiModeSetReplyConfig *reply_config) +{ + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + struct NvKmsKapiHeadReplyConfig *head_reply_config; + struct nv_drm_plane_state *plane_state; + struct drm_crtc_state *new_crtc_state = crtc->state; + struct drm_plane_state *old_plane_state = NULL; + struct drm_plane_state *new_plane_state = NULL; + struct drm_plane *plane = NULL; + int i, ret; + + if (!old_crtc_state->active && !new_crtc_state->active) { + /* + * crtc is not active in old and new states therefore all planes are + * disabled, exit early. + */ + return 0; + } + + head_reply_config = &reply_config->headReplyConfig[nv_crtc->head]; + + nv_drm_for_each_plane_in_state(old_crtc_state->state, plane, old_plane_state, i) { + struct nv_drm_plane *nv_plane = to_nv_plane(plane); + + if (plane->type == DRM_PLANE_TYPE_CURSOR || old_plane_state->crtc != crtc) { + continue; + } + + new_plane_state = plane->state; + + if (new_plane_state->crtc != crtc) { + continue; + } + + plane_state = to_nv_drm_plane_state(new_plane_state); + + ret = __nv_drm_put_back_post_fence_fd( + plane_state, + &head_reply_config->layerReplyConfig[nv_plane->layer_idx]); + + if (ret != 0) { + return ret; + } + } + + return 0; +} + +/** + * nv_drm_atomic_commit - validate/commit modeset config + * @dev: DRM device + * @state: atomic state tracking atomic update + * @commit: commit/check modeset config associated with atomic update + * + * @state tracks atomic update and modeset objects affected + * by the atomic update, but the state of the modeset objects it contains + * depends on the current stage of the update. + * At the commit stage, the proposed state is already stored in the current + * state, and @state contains old state for all affected modeset objects. + * At the check/validation stage, @state contains the proposed state for + * all affected objects. + * + * Sequence of atomic update - + * 1. The check/validation of proposed atomic state, + * 2. Do any other steps that might fail, + * 3. Put the proposed state into the current state pointers, + * 4. Actually commit the hardware state, + * 5. Cleanup old state. + * + * The function nv_drm_atomic_apply_modeset_config() is getting called + * at stages (1) and (4) after drm_atomic_helper_swap_state(). + */ +static int +nv_drm_atomic_apply_modeset_config(struct drm_device *dev, + struct drm_atomic_state *state, + bool commit) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct NvKmsKapiRequestedModeSetConfig *requested_config = + &(to_nv_atomic_state(state)->config); + struct NvKmsKapiModeSetReplyConfig reply_config = { }; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i; + int ret; + + memset(requested_config, 0, sizeof(*requested_config)); + + /* Loop over affected crtcs and construct NvKmsKapiRequestedModeSetConfig */ + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + /* + * When committing a state, the new state is already stored in + * crtc->state. When checking a proposed state, the proposed state is + * stored in crtc_state. + */ + struct drm_crtc_state *new_crtc_state = + commit ? crtc->state : crtc_state; + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + requested_config->headRequestedConfig[nv_crtc->head] = + to_nv_crtc_state(new_crtc_state)->req_config; + + requested_config->headsMask |= 1 << nv_crtc->head; + + if (commit) { + struct drm_crtc_state *old_crtc_state = crtc_state; + struct nv_drm_crtc_state *nv_new_crtc_state = + to_nv_crtc_state(new_crtc_state); + + nv_new_crtc_state->nv_flip->event = new_crtc_state->event; + nv_new_crtc_state->nv_flip->pending_events = 0; + new_crtc_state->event = NULL; + + /* + * If flip event will be generated by hardware + * then defer flip object processing to flip event from hardware. + */ + if (__will_generate_flip_event(crtc, old_crtc_state)) { + nv_drm_crtc_enqueue_flip(nv_crtc, + nv_new_crtc_state->nv_flip); + + nv_new_crtc_state->nv_flip = NULL; + } + } + } + + if (commit && nvKms->systemInfo.bAllowWriteCombining) { + /* + * XXX This call is required only if dumb buffer is going + * to be presented. + */ + nv_drm_write_combine_flush(); + } + + if (!nvKms->applyModeSetConfig(nv_dev->pDevice, + requested_config, + &reply_config, + commit)) { + return -EINVAL; + } + + if (commit && nv_dev->supportsSyncpts) { + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + /*! loop over affected crtcs and get NvKmsKapiModeSetReplyConfig */ + ret = __nv_drm_get_syncpt_data( + nv_dev, crtc, crtc_state, requested_config, &reply_config); + if (ret != 0) { + return ret; + } + } + } + + return 0; +} + +int nv_drm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int ret = 0; + + if ((ret = drm_atomic_helper_check(dev, state)) != 0) { + goto done; + } + + ret = nv_drm_atomic_apply_modeset_config(dev, + state, false /* commit */); + +done: + return ret; +} + +/** + * __nv_drm_handle_flip_event - handle flip occurred event + * @nv_crtc: crtc on which flip has been occurred + * + * This handler dequeues the first nv_drm_flip from the crtc's flip_list, + * generates an event if requested at flip time, and frees the nv_drm_flip. + */ +static void __nv_drm_handle_flip_event(struct nv_drm_crtc *nv_crtc) +{ + struct drm_device *dev = nv_crtc->base.dev; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct nv_drm_flip *nv_flip; + + /* + * Acquire event_lock before nv_flip object dequeue, otherwise immediate + * flip event delivery from nv_drm_atomic_commit() races ahead and + * messes up with event delivery order. + */ + spin_lock(&dev->event_lock); + nv_flip = nv_drm_crtc_dequeue_flip(nv_crtc); + if (likely(nv_flip != NULL)) { + struct nv_drm_flip *nv_deferred_flip, *nv_next_deferred_flip; + + if (nv_flip->event != NULL) { + drm_crtc_send_vblank_event(&nv_crtc->base, nv_flip->event); + } + + /* + * Process flips that were deferred until processing of this nv_flip + * object. + */ + list_for_each_entry_safe(nv_deferred_flip, + nv_next_deferred_flip, + &nv_flip->deferred_flip_list, list_entry) { + + if (nv_deferred_flip->event != NULL) { + drm_crtc_send_vblank_event(&nv_crtc->base, + nv_deferred_flip->event); + } + list_del(&nv_deferred_flip->list_entry); + + nv_drm_free(nv_deferred_flip); + } + } + spin_unlock(&dev->event_lock); + + wake_up_all(&nv_dev->flip_event_wq); + + nv_drm_free(nv_flip); +} + +int nv_drm_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool nonblock) +{ + int ret = -EBUSY; + + int i; + struct drm_crtc *crtc = NULL; + struct drm_crtc_state *crtc_state = NULL; + struct nv_drm_device *nv_dev = to_nv_device(dev); + + /* + * drm_mode_config_funcs::atomic_commit() mandates to return -EBUSY + * for nonblocking commit if previous updates (commit tasks/flip event) are + * pending. In case of blocking commits it mandates to wait for previous + * updates to complete. + */ + if (nonblock) { + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + + /* + * Here you aren't required to hold nv_drm_crtc::flip_list_lock + * because: + * + * The core DRM driver acquires lock for all affected crtcs before + * calling into ->commit() hook, therefore it is not possible for + * other threads to call into ->commit() hook affecting same crtcs + * and enqueue flip objects into flip_list - + * + * nv_drm_atomic_commit_internal() + * |-> nv_drm_atomic_apply_modeset_config(commit=true) + * |-> nv_drm_crtc_enqueue_flip() + * + * Only possibility is list_empty check races with code path + * dequeuing flip object - + * + * __nv_drm_handle_flip_event() + * |-> nv_drm_crtc_dequeue_flip() + * + * But this race condition can't lead list_empty() to return + * incorrect result. nv_drm_crtc_dequeue_flip() in the middle of + * updating the list could not trick us into thinking the list is + * empty when it isn't. + */ + if (!list_empty(&nv_crtc->flip_list)) { + return -EBUSY; + } + } + } + +#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_HAS_STALL_ARG) + + /* + * nv_drm_atomic_commit_internal() + * implements blocking/non-blocking atomic commit using + * nv_drm_crtc::flip_list, it does not require any help from core DRM + * helper functions to stall commit processing. Therefore passing false to + * 'stall' parameter. + * In this context, failure from drm_atomic_helper_swap_state() is not + * expected. + */ + +#if defined(NV_DRM_ATOMIC_HELPER_SWAP_STATE_RETURN_INT) + ret = drm_atomic_helper_swap_state(state, false /* stall */); + if (WARN_ON(ret != 0)) { + return ret; + } +#else + drm_atomic_helper_swap_state(state, false /* stall */); +#endif + +#else + drm_atomic_helper_swap_state(dev, state); +#endif + + /* + * nv_drm_atomic_commit_internal() must not return failure after + * calling drm_atomic_helper_swap_state(). + */ + + if ((ret = nv_drm_atomic_apply_modeset_config( + dev, + state, true /* commit */)) != 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to apply atomic modeset. Error code: %d", + ret); + + goto done; + } + + nv_drm_for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct nv_drm_crtc *nv_crtc = to_nv_crtc(crtc); + struct nv_drm_crtc_state *nv_new_crtc_state = + to_nv_crtc_state(crtc->state); + + /* + * If nv_drm_atomic_apply_modeset_config() hasn't consumed the flip + * object, no event will be generated for this flip, and we need process + * it: + */ + + if (nv_new_crtc_state->nv_flip != NULL) { + /* + * First, defer processing of all pending flips for this crtc until + * last flip in the queue has been processed. This is to ensure a + * correct order in event delivery. + */ + spin_lock(&nv_crtc->flip_list_lock); + if (!list_empty(&nv_crtc->flip_list)) { + struct nv_drm_flip *nv_last_flip = + list_last_entry(&nv_crtc->flip_list, + struct nv_drm_flip, list_entry); + + list_add(&nv_new_crtc_state->nv_flip->list_entry, + &nv_last_flip->deferred_flip_list); + + nv_new_crtc_state->nv_flip = NULL; + } + spin_unlock(&nv_crtc->flip_list_lock); + } + + if (nv_new_crtc_state->nv_flip != NULL) { + /* + * Then, if no more pending flips for this crtc, deliver event for the + * current flip. + */ + if (nv_new_crtc_state->nv_flip->event != NULL) { + spin_lock(&dev->event_lock); + drm_crtc_send_vblank_event(crtc, + nv_new_crtc_state->nv_flip->event); + spin_unlock(&dev->event_lock); + } + + nv_drm_free(nv_new_crtc_state->nv_flip); + nv_new_crtc_state->nv_flip = NULL; + } + + if (!nonblock) { + /* + * Here you aren't required to hold nv_drm_crtc::flip_list_lock + * because: + * + * The core DRM driver acquires lock for all affected crtcs before + * calling into ->commit() hook, therefore it is not possible for + * other threads to call into ->commit() hook affecting same crtcs + * and enqueue flip objects into flip_list - + * + * nv_drm_atomic_commit_internal() + * |-> nv_drm_atomic_apply_modeset_config(commit=true) + * |-> nv_drm_crtc_enqueue_flip() + * + * Only possibility is list_empty check races with code path + * dequeuing flip object - + * + * __nv_drm_handle_flip_event() + * |-> nv_drm_crtc_dequeue_flip() + * + * But this race condition can't lead list_empty() to return + * incorrect result. nv_drm_crtc_dequeue_flip() in the middle of + * updating the list could not trick us into thinking the list is + * empty when it isn't. + */ + if (wait_event_timeout( + nv_dev->flip_event_wq, + list_empty(&nv_crtc->flip_list), + 3 * HZ /* 3 second */) == 0) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Flip event timeout on head %u", nv_crtc->head); + } + } + } + +done: + +#if defined(NV_DRM_ATOMIC_STATE_REF_COUNTING_PRESENT) + /* + * If ref counting is present, state will be freed when the caller + * drops its reference after we return. + */ +#else + drm_atomic_state_free(state); +#endif + + return 0; +} + +void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev, + NvU32 head, NvU32 plane) +{ + struct nv_drm_crtc *nv_crtc = nv_drm_crtc_lookup(nv_dev, head); + + if (NV_DRM_WARN(nv_crtc == NULL)) { + return; + } + + __nv_drm_handle_flip_event(nv_crtc); +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-modeset.h b/kernel-open/nvidia-drm/nvidia-drm-modeset.h new file mode 100644 index 000000000..40df6314b --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-modeset.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_MODESET_H__ +#define __NVIDIA_DRM_MODESET_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvkms-kapi.h" + +struct drm_device; +struct drm_atomic_state; + +struct drm_atomic_state *nv_drm_atomic_state_alloc(struct drm_device *dev); +void nv_drm_atomic_state_clear(struct drm_atomic_state *state); +void nv_drm_atomic_state_free(struct drm_atomic_state *state); + +int nv_drm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state); + +int nv_drm_atomic_commit(struct drm_device *dev, + struct drm_atomic_state *state, bool nonblock); + + +void nv_drm_handle_flip_occurred(struct nv_drm_device *nv_dev, + NvU32 head, NvU32 plane); + +int nv_drm_shut_down_all_crtcs(struct drm_device *dev); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_MODESET_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-os-interface.h b/kernel-open/nvidia-drm/nvidia-drm-os-interface.h new file mode 100644 index 000000000..ac527528a --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-os-interface.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_OS_INTERFACE_H__ +#define __NVIDIA_DRM_OS_INTERFACE_H__ + +#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */ + +#include "nvtypes.h" + +#if defined(NV_DRM_AVAILABLE) + +struct page; + +/* Set to true when the atomic modeset feature is enabled. */ +extern bool nv_drm_modeset_module_param; + +void *nv_drm_calloc(size_t nmemb, size_t size); + +void nv_drm_free(void *ptr); + +char *nv_drm_asprintf(const char *fmt, ...); + +void nv_drm_write_combine_flush(void); + +int nv_drm_lock_user_pages(unsigned long address, + unsigned long pages_count, struct page ***pages); + +void nv_drm_unlock_user_pages(unsigned long pages_count, struct page **pages); + +void *nv_drm_vmap(struct page **pages, unsigned long pages_count); + +void nv_drm_vunmap(void *address); + +#endif + +#endif /* __NVIDIA_DRM_OS_INTERFACE_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c b/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c new file mode 100644 index 000000000..d76ecf372 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-prime-fence.c @@ -0,0 +1,518 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-ioctl.h" +#include "nvidia-drm-gem.h" +#include "nvidia-drm-prime-fence.h" +#include "nvidia-dma-resv-helper.h" + +#if defined(NV_DRM_FENCE_AVAILABLE) + +#include "nvidia-dma-fence-helper.h" + +struct nv_drm_fence_context { + struct nv_drm_device *nv_dev; + + uint32_t context; + + NvU64 fenceSemIndex; /* Index into semaphore surface */ + + /* Mapped semaphore surface */ + struct NvKmsKapiMemory *pSemSurface; + NvU32 *pLinearAddress; + + /* Protects nv_drm_fence_context::{pending, last_seqno} */ + spinlock_t lock; + + /* + * Software signaling structures. __nv_drm_fence_context_new() + * allocates channel event and __nv_drm_fence_context_destroy() frees it. + * There are no simultaneous read/write access to 'cb', therefore it does + * not require spin-lock protection. + */ + struct NvKmsKapiChannelEvent *cb; + + /* List of pending fences which are not yet signaled */ + struct list_head pending; + + unsigned last_seqno; +}; + +struct nv_drm_prime_fence { + struct list_head list_entry; + nv_dma_fence_t base; + spinlock_t lock; +}; + +static inline +struct nv_drm_prime_fence *to_nv_drm_prime_fence(nv_dma_fence_t *fence) +{ + return container_of(fence, struct nv_drm_prime_fence, base); +} + +static const char* +nv_drm_gem_prime_fence_op_get_driver_name(nv_dma_fence_t *fence) +{ + return "NVIDIA"; +} + +static const char* +nv_drm_gem_prime_fence_op_get_timeline_name(nv_dma_fence_t *fence) +{ + return "nvidia.prime"; +} + +static bool nv_drm_gem_prime_fence_op_enable_signaling(nv_dma_fence_t *fence) +{ + // DO NOTHING + return true; +} + +static void nv_drm_gem_prime_fence_op_release(nv_dma_fence_t *fence) +{ + struct nv_drm_prime_fence *nv_fence = to_nv_drm_prime_fence(fence); + nv_drm_free(nv_fence); +} + +static signed long +nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence, + bool intr, signed long timeout) +{ + /* + * If the waiter requests to wait with no timeout, force a timeout to ensure + * that it won't get stuck forever in the kernel if something were to go + * wrong with signaling, such as a malicious userspace not releasing the + * semaphore. + * + * 96 ms (roughly 6 frames @ 60 Hz) is arbitrarily chosen to be long enough + * that it should never get hit during normal operation, but not so long + * that the system becomes unresponsive. + */ + return nv_dma_fence_default_wait(fence, intr, + (timeout == MAX_SCHEDULE_TIMEOUT) ? + msecs_to_jiffies(96) : timeout); +} + +static const nv_dma_fence_ops_t nv_drm_gem_prime_fence_ops = { + .get_driver_name = nv_drm_gem_prime_fence_op_get_driver_name, + .get_timeline_name = nv_drm_gem_prime_fence_op_get_timeline_name, + .enable_signaling = nv_drm_gem_prime_fence_op_enable_signaling, + .release = nv_drm_gem_prime_fence_op_release, + .wait = nv_drm_gem_prime_fence_op_wait, +}; + +static inline void +__nv_drm_prime_fence_signal(struct nv_drm_prime_fence *nv_fence) +{ + list_del(&nv_fence->list_entry); + nv_dma_fence_signal(&nv_fence->base); + nv_dma_fence_put(&nv_fence->base); +} + +static void nv_drm_gem_prime_force_fence_signal( + struct nv_drm_fence_context *nv_fence_context) +{ + WARN_ON(!spin_is_locked(&nv_fence_context->lock)); + + while (!list_empty(&nv_fence_context->pending)) { + struct nv_drm_prime_fence *nv_fence = list_first_entry( + &nv_fence_context->pending, + typeof(*nv_fence), + list_entry); + + __nv_drm_prime_fence_signal(nv_fence); + } +} + +static void nv_drm_gem_prime_fence_event +( + void *dataPtr, + NvU32 dataU32 +) +{ + struct nv_drm_fence_context *nv_fence_context = dataPtr; + + spin_lock(&nv_fence_context->lock); + + while (!list_empty(&nv_fence_context->pending)) { + struct nv_drm_prime_fence *nv_fence = list_first_entry( + &nv_fence_context->pending, + typeof(*nv_fence), + list_entry); + + /* Index into surface with 16 byte stride */ + unsigned int seqno = *((nv_fence_context->pLinearAddress) + + (nv_fence_context->fenceSemIndex * 4)); + + if (nv_fence->base.seqno > seqno) { + /* + * Fences in list are placed in increasing order of sequence + * number, breaks a loop once found first fence not + * ready to signal. + */ + break; + } + + __nv_drm_prime_fence_signal(nv_fence); + } + + spin_unlock(&nv_fence_context->lock); +} + +static inline struct nv_drm_fence_context *__nv_drm_fence_context_new( + struct nv_drm_device *nv_dev, + struct drm_nvidia_fence_context_create_params *p) +{ + struct nv_drm_fence_context *nv_fence_context; + struct NvKmsKapiMemory *pSemSurface; + NvU32 *pLinearAddress; + + /* Allocate backup nvkms resources */ + + pSemSurface = nvKms->importMemory(nv_dev->pDevice, + p->size, + p->import_mem_nvkms_params_ptr, + p->import_mem_nvkms_params_size); + if (!pSemSurface) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to import fence semaphore surface"); + + goto failed; + } + + if (!nvKms->mapMemory(nv_dev->pDevice, + pSemSurface, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + (void **) &pLinearAddress)) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to map fence semaphore surface"); + + goto failed_to_map_memory; + } + + /* + * Allocate a fence context object, initialize it and allocate channel + * event for it. + */ + + if ((nv_fence_context = nv_drm_calloc( + 1, + sizeof(*nv_fence_context))) == NULL) { + goto failed_alloc_fence_context; + } + + /* + * nv_dma_fence_context_alloc() cannot fail, so we do not need + * to check a return value. + */ + + *nv_fence_context = (struct nv_drm_fence_context) { + .nv_dev = nv_dev, + .context = nv_dma_fence_context_alloc(1), + .pSemSurface = pSemSurface, + .pLinearAddress = pLinearAddress, + .fenceSemIndex = p->index, + }; + + INIT_LIST_HEAD(&nv_fence_context->pending); + + spin_lock_init(&nv_fence_context->lock); + + /* + * Except 'cb', the fence context should be completely initialized + * before channel event allocation because the fence context may start + * receiving events immediately after allocation. + * + * There are no simultaneous read/write access to 'cb', therefore it does + * not require spin-lock protection. + */ + nv_fence_context->cb = + nvKms->allocateChannelEvent(nv_dev->pDevice, + nv_drm_gem_prime_fence_event, + nv_fence_context, + p->event_nvkms_params_ptr, + p->event_nvkms_params_size); + if (!nv_fence_context->cb) { + NV_DRM_DEV_LOG_ERR(nv_dev, + "Failed to allocate fence signaling event"); + goto failed_to_allocate_channel_event; + } + + return nv_fence_context; + +failed_to_allocate_channel_event: + nv_drm_free(nv_fence_context); + +failed_alloc_fence_context: + + nvKms->unmapMemory(nv_dev->pDevice, + pSemSurface, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + (void *) pLinearAddress); + +failed_to_map_memory: + nvKms->freeMemory(nv_dev->pDevice, pSemSurface); + +failed: + return NULL; +} + +static void __nv_drm_fence_context_destroy( + struct nv_drm_fence_context *nv_fence_context) +{ + struct nv_drm_device *nv_dev = nv_fence_context->nv_dev; + + /* + * Free channel event before destroying the fence context, otherwise event + * callback continue to get called. + */ + nvKms->freeChannelEvent(nv_dev->pDevice, nv_fence_context->cb); + + /* Force signal all pending fences and empty pending list */ + spin_lock(&nv_fence_context->lock); + + nv_drm_gem_prime_force_fence_signal(nv_fence_context); + + spin_unlock(&nv_fence_context->lock); + + /* Free nvkms resources */ + + nvKms->unmapMemory(nv_dev->pDevice, + nv_fence_context->pSemSurface, + NVKMS_KAPI_MAPPING_TYPE_KERNEL, + (void *) nv_fence_context->pLinearAddress); + + nvKms->freeMemory(nv_dev->pDevice, nv_fence_context->pSemSurface); + + nv_drm_free(nv_fence_context); +} + +static nv_dma_fence_t *__nv_drm_fence_context_create_fence( + struct nv_drm_fence_context *nv_fence_context, + unsigned int seqno) +{ + struct nv_drm_prime_fence *nv_fence; + int ret = 0; + + if ((nv_fence = nv_drm_calloc(1, sizeof(*nv_fence))) == NULL) { + ret = -ENOMEM; + goto out; + } + + spin_lock(&nv_fence_context->lock); + + /* + * If seqno wrapped, force signal fences to make sure none of them + * get stuck. + */ + if (seqno < nv_fence_context->last_seqno) { + nv_drm_gem_prime_force_fence_signal(nv_fence_context); + } + + INIT_LIST_HEAD(&nv_fence->list_entry); + + spin_lock_init(&nv_fence->lock); + + nv_dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops, + &nv_fence->lock, nv_fence_context->context, + seqno); + + list_add_tail(&nv_fence->list_entry, &nv_fence_context->pending); + + nv_fence_context->last_seqno = seqno; + + spin_unlock(&nv_fence_context->lock); + +out: + return ret != 0 ? ERR_PTR(ret) : &nv_fence->base; +} + +int nv_drm_fence_supported_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + return nv_dev->pDevice ? 0 : -EINVAL; +} + +struct nv_drm_gem_fence_context { + struct nv_drm_gem_object base; + struct nv_drm_fence_context *nv_fence_context; +}; + +static inline struct nv_drm_gem_fence_context *to_gem_fence_context( + struct nv_drm_gem_object *nv_gem) +{ + if (nv_gem != NULL) { + return container_of(nv_gem, struct nv_drm_gem_fence_context, base); + } + + return NULL; +} + +/* + * Tear down of the 'struct nv_drm_gem_fence_context' object is not expected + * to be happen from any worker thread, if that happen it causes dead-lock + * because tear down sequence calls to flush all existing + * worker thread. + */ +static void __nv_drm_gem_fence_context_free(struct nv_drm_gem_object *nv_gem) +{ + struct nv_drm_gem_fence_context *nv_gem_fence_context = + to_gem_fence_context(nv_gem); + + __nv_drm_fence_context_destroy(nv_gem_fence_context->nv_fence_context); + + nv_drm_free(nv_gem_fence_context); +} + +const struct nv_drm_gem_object_funcs nv_gem_fence_context_ops = { + .free = __nv_drm_gem_fence_context_free, +}; + +static inline +struct nv_drm_gem_fence_context *__nv_drm_gem_object_fence_context_lookup( + struct drm_device *dev, + struct drm_file *filp, + u32 handle) +{ + struct nv_drm_gem_object *nv_gem = + nv_drm_gem_object_lookup(dev, filp, handle); + + if (nv_gem != NULL && nv_gem->ops != &nv_gem_fence_context_ops) { + nv_drm_gem_object_unreference_unlocked(nv_gem); + return NULL; + } + + return to_gem_fence_context(nv_gem); +} + +int nv_drm_fence_context_create_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_fence_context_create_params *p = data; + struct nv_drm_gem_fence_context *nv_gem_fence_context = NULL; + + if ((nv_gem_fence_context = nv_drm_calloc( + 1, + sizeof(struct nv_drm_gem_fence_context))) == NULL) { + goto done; + } + + if ((nv_gem_fence_context->nv_fence_context = + __nv_drm_fence_context_new(nv_dev, p)) == NULL) { + goto fence_context_new_failed; + } + + nv_drm_gem_object_init(nv_dev, + &nv_gem_fence_context->base, + &nv_gem_fence_context_ops, + 0 /* size */, + NULL /* pMemory */); + + return nv_drm_gem_handle_create_drop_reference(filep, + &nv_gem_fence_context->base, + &p->handle); + +fence_context_new_failed: + nv_drm_free(nv_gem_fence_context); + +done: + return -ENOMEM; +} + +int nv_drm_gem_fence_attach_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep) +{ + int ret = -EINVAL; + struct nv_drm_device *nv_dev = to_nv_device(dev); + struct drm_nvidia_gem_fence_attach_params *p = data; + + struct nv_drm_gem_object *nv_gem; + struct nv_drm_gem_fence_context *nv_gem_fence_context; + + nv_dma_fence_t *fence; + + nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle); + + if (!nv_gem) { + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for fence attach: 0x%08x", + p->handle); + + goto done; + } + + if((nv_gem_fence_context = __nv_drm_gem_object_fence_context_lookup( + nv_dev->dev, + filep, + p->fence_context_handle)) == NULL) { + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to lookup gem object for fence context: 0x%08x", + p->fence_context_handle); + + goto fence_context_lookup_failed; + } + + if (IS_ERR(fence = __nv_drm_fence_context_create_fence( + nv_gem_fence_context->nv_fence_context, + p->sem_thresh))) { + ret = PTR_ERR(fence); + + NV_DRM_DEV_LOG_ERR( + nv_dev, + "Failed to allocate fence: 0x%08x", p->handle); + + goto fence_context_create_fence_failed; + } + + nv_dma_resv_add_excl_fence(&nv_gem->resv, fence); + + ret = 0; + +fence_context_create_fence_failed: + nv_drm_gem_object_unreference_unlocked(&nv_gem_fence_context->base); + +fence_context_lookup_failed: + nv_drm_gem_object_unreference_unlocked(nv_gem); + +done: + return ret; +} + +#endif /* NV_DRM_FENCE_AVAILABLE */ + +#endif /* NV_DRM_AVAILABLE */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h b/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h new file mode 100644 index 000000000..5afa2ae95 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-prime-fence.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_PRIME_FENCE_H__ +#define __NVIDIA_DRM_PRIME_FENCE_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_AVAILABLE) + +struct drm_file; +struct drm_device; + +#if defined(NV_DRM_FENCE_AVAILABLE) + +int nv_drm_fence_supported_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_fence_context_create_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +int nv_drm_gem_fence_attach_ioctl(struct drm_device *dev, + void *data, struct drm_file *filep); + +#endif /* NV_DRM_FENCE_AVAILABLE */ + +#endif /* NV_DRM_AVAILABLE */ + +#endif /* __NVIDIA_DRM_PRIME_FENCE_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-priv.h b/kernel-open/nvidia-drm/nvidia-drm-priv.h new file mode 100644 index 000000000..dce6a531f --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-priv.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_PRIV_H__ +#define __NVIDIA_DRM_PRIV_H__ + +#include "nvidia-drm-conftest.h" /* NV_DRM_AVAILABLE */ + +#if defined(NV_DRM_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_DEVICE_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_GEM_H_PRESENT) +#include +#endif + +#include "nvidia-drm-os-interface.h" + +#include "nvkms-kapi.h" + +#define NV_DRM_LOG_ERR(__fmt, ...) \ + DRM_ERROR("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__) + +#define NV_DRM_LOG_INFO(__fmt, ...) \ + DRM_INFO("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__) + +#define NV_DRM_DEV_LOG_INFO(__dev, __fmt, ...) \ + NV_DRM_LOG_INFO("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__) + +#define NV_DRM_DEV_LOG_ERR(__dev, __fmt, ...) \ + NV_DRM_LOG_ERR("[GPU ID 0x%08x] " __fmt, __dev->gpu_info.gpu_id, ##__VA_ARGS__) + +#define NV_DRM_WARN(__condition) WARN_ON((__condition)) + +#define NV_DRM_DEBUG_DRIVER(__fmt, ...) \ + DRM_DEBUG_DRIVER("[nvidia-drm] " __fmt "\n", ##__VA_ARGS__) + +#define NV_DRM_DEV_DEBUG_DRIVER(__dev, __fmt, ...) \ + DRM_DEBUG_DRIVER("[GPU ID 0x%08x] " __fmt, \ + __dev->gpu_info.gpu_id, ##__VA_ARGS__) + +struct nv_drm_device { + nv_gpu_info_t gpu_info; + + struct drm_device *dev; + + struct NvKmsKapiDevice *pDevice; + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + /* + * Lock to protect drm-subsystem and fields of this structure + * from concurrent access. + * + * Do not hold this lock if some lock from core drm-subsystem + * is already held, locking order should be like this - + * + * mutex_lock(nv_drm_device::lock); + * .... + * mutex_lock(drm_device::mode_config::lock); + * .... + * ....... + * mutex_unlock(drm_device::mode_config::lock); + * ........ + * .. + * mutex_lock(drm_device::struct_mutex); + * .... + * ........ + * mutex_unlock(drm_device::struct_mutex); + * .. + * mutex_unlock(nv_drm_device::lock); + */ + struct mutex lock; + + NvU32 pitchAlignment; + + NvU8 genericPageKind; + NvU8 pageKindGeneration; + NvU8 sectorLayout; +#if defined(NV_DRM_FORMAT_MODIFIERS_PRESENT) + NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */]; +#endif + + atomic_t enable_event_handling; + + /** + * @flip_event_wq: + * + * The wait queue on which nv_drm_atomic_commit_internal() sleeps until + * next flip event occurs. + */ + wait_queue_head_t flip_event_wq; + +#endif + + NvBool hasVideoMemory; + + NvBool supportsSyncpts; + + struct drm_property *nv_out_fence_property; + + struct nv_drm_device *next; +}; + +static inline struct nv_drm_device *to_nv_device( + struct drm_device *dev) +{ + return dev->dev_private; +} + +extern const struct NvKmsKapiFunctionsTable* const nvKms; + +#endif /* defined(NV_DRM_AVAILABLE) */ + +#endif /* __NVIDIA_DRM_PRIV_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm-utils.c b/kernel-open/nvidia-drm/nvidia-drm-utils.c new file mode 100644 index 000000000..42fb0cd65 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-utils.c @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm-conftest.h" /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#if defined(NV_DRM_DRMP_H_PRESENT) +#include +#endif + +#if defined(NV_DRM_DRM_PLANE_H_PRESENT) +#include +#endif + +#include +#include + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-utils.h" + +struct NvKmsKapiConnectorInfo* +nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice, + NvKmsKapiConnector hConnector) +{ + struct NvKmsKapiConnectorInfo *connectorInfo = + nv_drm_calloc(1, sizeof(*connectorInfo)); + + if (connectorInfo == NULL) { + return ERR_PTR(-ENOMEM); + } + + if (!nvKms->getConnectorInfo(pDevice, hConnector, connectorInfo)) { + nv_drm_free(connectorInfo); + + return ERR_PTR(-EINVAL); + } + + return connectorInfo; +} + +int +nvkms_connector_signal_to_drm_encoder_signal(NvKmsConnectorSignalFormat format) +{ + switch (format) { + default: + case NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN: + return DRM_MODE_ENCODER_NONE; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS: + case NVKMS_CONNECTOR_SIGNAL_FORMAT_DP: + return DRM_MODE_ENCODER_TMDS; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS: + return DRM_MODE_ENCODER_LVDS; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA: + return DRM_MODE_ENCODER_DAC; + case NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI: + return DRM_MODE_ENCODER_DSI; + } +} + +int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type, + NvBool internal) +{ + switch (type) { + default: + case NVKMS_CONNECTOR_TYPE_UNKNOWN: + return DRM_MODE_CONNECTOR_Unknown; + case NVKMS_CONNECTOR_TYPE_DP: + return + internal ? + DRM_MODE_CONNECTOR_eDP : DRM_MODE_CONNECTOR_DisplayPort; + case NVKMS_CONNECTOR_TYPE_HDMI: + return DRM_MODE_CONNECTOR_HDMIA; + case NVKMS_CONNECTOR_TYPE_DVI_D: + return DRM_MODE_CONNECTOR_DVID; + case NVKMS_CONNECTOR_TYPE_DVI_I: + return DRM_MODE_CONNECTOR_DVII; + case NVKMS_CONNECTOR_TYPE_LVDS: + return DRM_MODE_CONNECTOR_LVDS; + case NVKMS_CONNECTOR_TYPE_VGA: + return DRM_MODE_CONNECTOR_VGA; + case NVKMS_CONNECTOR_TYPE_DSI: + return DRM_MODE_CONNECTOR_DSI; + case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: + return DRM_MODE_CONNECTOR_DisplayPort; + } +} + +void +nvkms_display_mode_to_drm_mode(const struct NvKmsKapiDisplayMode *displayMode, + struct drm_display_mode *mode) +{ +#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH) + mode->vrefresh = (displayMode->timings.refreshRate + 500) / 1000; /* In Hz */ +#endif + + mode->clock = (displayMode->timings.pixelClockHz + 500) / 1000; /* In Hz */ + + mode->hdisplay = displayMode->timings.hVisible; + mode->hsync_start = displayMode->timings.hSyncStart; + mode->hsync_end = displayMode->timings.hSyncEnd; + mode->htotal = displayMode->timings.hTotal; + mode->hskew = displayMode->timings.hSkew; + + mode->vdisplay = displayMode->timings.vVisible; + mode->vsync_start = displayMode->timings.vSyncStart; + mode->vsync_end = displayMode->timings.vSyncEnd; + mode->vtotal = displayMode->timings.vTotal; + + if (displayMode->timings.flags.interlaced) { + mode->flags |= DRM_MODE_FLAG_INTERLACE; + } + + if (displayMode->timings.flags.doubleScan) { + mode->flags |= DRM_MODE_FLAG_DBLSCAN; + } + + if (displayMode->timings.flags.hSyncPos) { + mode->flags |= DRM_MODE_FLAG_PHSYNC; + } + + if (displayMode->timings.flags.hSyncNeg) { + mode->flags |= DRM_MODE_FLAG_NHSYNC; + } + + if (displayMode->timings.flags.vSyncPos) { + mode->flags |= DRM_MODE_FLAG_PVSYNC; + } + + if (displayMode->timings.flags.vSyncNeg) { + mode->flags |= DRM_MODE_FLAG_NVSYNC; + } + + mode->width_mm = displayMode->timings.widthMM; + mode->height_mm = displayMode->timings.heightMM; + + if (strlen(displayMode->name) != 0) { + memcpy( + mode->name, displayMode->name, + min(sizeof(mode->name), sizeof(displayMode->name))); + + mode->name[sizeof(mode->name) - 1] = '\0'; + } else { + drm_mode_set_name(mode); + } +} + +void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src, + struct NvKmsKapiDisplayMode *dst) +{ +#if defined(NV_DRM_DISPLAY_MODE_HAS_VREFRESH) + dst->timings.refreshRate = src->vrefresh * 1000; +#else + dst->timings.refreshRate = drm_mode_vrefresh(src) * 1000; +#endif + + dst->timings.pixelClockHz = src->clock * 1000; /* In Hz */ + + dst->timings.hVisible = src->hdisplay; + dst->timings.hSyncStart = src->hsync_start; + dst->timings.hSyncEnd = src->hsync_end; + dst->timings.hTotal = src->htotal; + dst->timings.hSkew = src->hskew; + + dst->timings.vVisible = src->vdisplay; + dst->timings.vSyncStart = src->vsync_start; + dst->timings.vSyncEnd = src->vsync_end; + dst->timings.vTotal = src->vtotal; + + if (src->flags & DRM_MODE_FLAG_INTERLACE) { + dst->timings.flags.interlaced = NV_TRUE; + } else { + dst->timings.flags.interlaced = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_DBLSCAN) { + dst->timings.flags.doubleScan = NV_TRUE; + } else { + dst->timings.flags.doubleScan = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_PHSYNC) { + dst->timings.flags.hSyncPos = NV_TRUE; + } else { + dst->timings.flags.hSyncPos = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_NHSYNC) { + dst->timings.flags.hSyncNeg = NV_TRUE; + } else { + dst->timings.flags.hSyncNeg = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_PVSYNC) { + dst->timings.flags.vSyncPos = NV_TRUE; + } else { + dst->timings.flags.vSyncPos = NV_FALSE; + } + + if (src->flags & DRM_MODE_FLAG_NVSYNC) { + dst->timings.flags.vSyncNeg = NV_TRUE; + } else { + dst->timings.flags.vSyncNeg = NV_FALSE; + } + + dst->timings.widthMM = src->width_mm; + dst->timings.heightMM = src->height_mm; + + memcpy(dst->name, src->name, min(sizeof(dst->name), sizeof(src->name))); +} + +#endif diff --git a/kernel-open/nvidia-drm/nvidia-drm-utils.h b/kernel-open/nvidia-drm/nvidia-drm-utils.h new file mode 100644 index 000000000..2c0588a8c --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm-utils.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_UTILS_H__ +#define __NVIDIA_DRM_UTILS_H__ + +#include "nvidia-drm-conftest.h" + +#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) + +#include "nvkms-kapi.h" + +enum drm_plane_type; +struct drm_display_mode; + +struct NvKmsKapiConnectorInfo* +nvkms_get_connector_info(struct NvKmsKapiDevice *pDevice, + NvKmsKapiConnector hConnector); + +int nvkms_connector_signal_to_drm_encoder_signal( + NvKmsConnectorSignalFormat format); + +int nvkms_connector_type_to_drm_connector_type(NvKmsConnectorType type, + NvBool internal); + +void nvkms_display_mode_to_drm_mode( + const struct NvKmsKapiDisplayMode *displayMode, + struct drm_display_mode *mode); + +void drm_mode_to_nvkms_display_mode(const struct drm_display_mode *src, + struct NvKmsKapiDisplayMode *dst); + +#endif /* NV_DRM_ATOMIC_MODESET_AVAILABLE */ + +#endif /* __NVIDIA_DRM_UTILS_H__ */ diff --git a/kernel-open/nvidia-drm/nvidia-drm.Kbuild b/kernel-open/nvidia-drm/nvidia-drm.Kbuild new file mode 100644 index 000000000..46615d66d --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm.Kbuild @@ -0,0 +1,117 @@ +########################################################################### +# Kbuild fragment for nvidia-drm.ko +########################################################################### + +# +# Define NVIDIA_DRM_{SOURCES,OBJECTS} +# + +NVIDIA_DRM_SOURCES = +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-drv.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-utils.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-crtc.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-encoder.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-connector.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-fb.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-modeset.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-prime-fence.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-linux.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-helper.c +NVIDIA_DRM_SOURCES += nvidia-drm/nv-pci-table.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-nvkms-memory.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-user-memory.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-gem-dma-buf.c +NVIDIA_DRM_SOURCES += nvidia-drm/nvidia-drm-format.c + +NVIDIA_DRM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_DRM_SOURCES)) + +obj-m += nvidia-drm.o +nvidia-drm-y := $(NVIDIA_DRM_OBJECTS) + +NVIDIA_DRM_KO = nvidia-drm/nvidia-drm.ko + +NV_KERNEL_MODULE_TARGETS += $(NVIDIA_DRM_KO) + +# +# Define nvidia-drm.ko-specific CFLAGS. +# + +NVIDIA_DRM_CFLAGS += -I$(src)/nvidia-drm +NVIDIA_DRM_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0 + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_DRM_OBJECTS), $(NVIDIA_DRM_CFLAGS)) + +# +# Register the conftests needed by nvidia-drm.ko +# + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_DRM_OBJECTS) + +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_atomic_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_inc +NV_CONFTEST_GENERIC_COMPILE_TESTS += is_export_symbol_gpl_refcount_dec_and_test +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_alpha_blending_available + +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_unref +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_reinit_primary_mode_group +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages_remote +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_user_pages +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_lookup +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_state_ref_counting +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_driver_has_gem_prime_res_obj +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_atomic_helper_connector_dpms +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_funcs_have_mode_in_name +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_framebuffer_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_dev_put +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_format_num_planes +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_connector_for_each_possible_encoder +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_rotation_available +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_vma_offset_exact_lookup_locked +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked +NV_CONFTEST_FUNCTION_COMPILE_TESTS += nvhost_dma_fence_unpack + +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_present +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_bus_type +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_irq +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_bus_has_get_name +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_device_list +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_legacy_dev_list +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_set_busid +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_connectors_changed +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_init_function_args +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_helper_mode_fill_fb_struct +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_master_drop_has_from_release_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_unload_has_int_return_type +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_crtc_destroy_state_has_crtc_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_plane_destroy_state_has_plane_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_object_find_has_file_priv_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += dma_buf_owner +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_connector_list_iter +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_atomic_helper_swap_state_has_stall_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_prime_flag_present +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_async_flip +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_pageflip_flags +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_format_modifiers_present +NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_node_is_allowed_has_tag_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_vma_offset_node_has_readonly +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_display_mode_has_vrefresh +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_master_set_has_int_return_type +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_free_object +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_driver_has_gem_prime_callbacks +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_atomic_check_has_atomic_state_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_vmap_has_map_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_plane_atomic_check_has_atomic_state_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_device_has_pdev +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_crtc_state_has_no_vblank +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_mode_config_has_allow_fb_modifiers diff --git a/kernel-open/nvidia-drm/nvidia-drm.c b/kernel-open/nvidia-drm/nvidia-drm.c new file mode 100644 index 000000000..a191ccf7c --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-drm.h" + +#if defined(NV_DRM_AVAILABLE) + +#include "nvidia-drm-priv.h" +#include "nvidia-drm-drv.h" + +static struct NvKmsKapiFunctionsTable nvKmsFuncsTable = { + .versionString = NV_VERSION_STRING, +}; + +const struct NvKmsKapiFunctionsTable* const nvKms = &nvKmsFuncsTable; + +#endif + +int nv_drm_init(void) +{ +#if defined(NV_DRM_AVAILABLE) + if (!nvKmsKapiGetFunctionsTable(&nvKmsFuncsTable)) { + NV_DRM_LOG_ERR( + "Version mismatch: nvidia-modeset.ko(%s) nvidia-drm.ko(%s)", + nvKmsFuncsTable.versionString, NV_VERSION_STRING); + return -EINVAL; + } + + return nv_drm_probe_devices(); +#else + return 0; +#endif +} + +void nv_drm_exit(void) +{ +#if defined(NV_DRM_AVAILABLE) + nv_drm_remove_devices(); +#endif +} diff --git a/kernel-open/nvidia-drm/nvidia-drm.h b/kernel-open/nvidia-drm/nvidia-drm.h new file mode 100644 index 000000000..9f1c31ce3 --- /dev/null +++ b/kernel-open/nvidia-drm/nvidia-drm.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_DRM_H__ +#define __NVIDIA_DRM_H__ + +#include "nvidia-drm-conftest.h" + +int nv_drm_init(void); +void nv_drm_exit(void); + +#endif /* __NVIDIA_DRM_H__ */ diff --git a/kernel-open/nvidia-modeset/nv-kthread-q.c b/kernel-open/nvidia-modeset/nv-kthread-q.c new file mode 100644 index 000000000..5a95f4a40 --- /dev/null +++ b/kernel-open/nvidia-modeset/nv-kthread-q.c @@ -0,0 +1,335 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kthread-q.h" +#include "nv-list-helpers.h" + +#include +#include +#include +#include +#include + +#if defined(NV_LINUX_BUG_H_PRESENT) + #include +#else + #include +#endif + +// Today's implementation is a little simpler and more limited than the +// API description allows for in nv-kthread-q.h. Details include: +// +// 1. Each nv_kthread_q instance is a first-in, first-out queue. +// +// 2. Each nv_kthread_q instance is serviced by exactly one kthread. +// +// You can create any number of queues, each of which gets its own +// named kernel thread (kthread). You can then insert arbitrary functions +// into the queue, and those functions will be run in the context of the +// queue's kthread. + +#ifndef WARN + // Only *really* old kernels (2.6.9) end up here. Just use a simple printk + // to implement this, because such kernels won't be supported much longer. + #define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + printk(KERN_ERR format); \ + unlikely(__ret_warn_on); \ + }) +#endif + +#define NVQ_WARN(fmt, ...) \ + do { \ + if (in_interrupt()) { \ + WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \ + ##__VA_ARGS__); \ + } \ + else { \ + WARN(1, "nv_kthread_q: task: %s: " fmt, \ + current->comm, \ + ##__VA_ARGS__); \ + } \ + } while (0) + +static int _main_loop(void *args) +{ + nv_kthread_q_t *q = (nv_kthread_q_t *)args; + nv_kthread_q_item_t *q_item = NULL; + unsigned long flags; + + while (1) { + // Normally this thread is never interrupted. However, + // down_interruptible (instead of down) is called here, + // in order to avoid being classified as a potentially + // hung task, by the kernel watchdog. + while (down_interruptible(&q->q_sem)) + NVQ_WARN("Interrupted during semaphore wait\n"); + + if (atomic_read(&q->main_loop_should_exit)) + break; + + spin_lock_irqsave(&q->q_lock, flags); + + // The q_sem semaphore prevents us from getting here unless there is + // at least one item in the list, so an empty list indicates a bug. + if (unlikely(list_empty(&q->q_list_head))) { + spin_unlock_irqrestore(&q->q_lock, flags); + NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q); + continue; + } + + // Consume one item from the queue + q_item = list_first_entry(&q->q_list_head, + nv_kthread_q_item_t, + q_list_node); + + list_del_init(&q_item->q_list_node); + + spin_unlock_irqrestore(&q->q_lock, flags); + + // Run the item + q_item->function_to_run(q_item->function_args); + + // Make debugging a little simpler by clearing this between runs: + q_item = NULL; + } + + while (!kthread_should_stop()) + schedule(); + + return 0; +} + +void nv_kthread_q_stop(nv_kthread_q_t *q) +{ + // check if queue has been properly initialized + if (unlikely(!q->q_kthread)) + return; + + nv_kthread_q_flush(q); + + // If this assertion fires, then a caller likely either broke the API rules, + // by adding items after calling nv_kthread_q_stop, or possibly messed up + // with inadequate flushing of self-rescheduling q_items. + if (unlikely(!list_empty(&q->q_list_head))) + NVQ_WARN("list not empty after flushing\n"); + + if (likely(!atomic_read(&q->main_loop_should_exit))) { + + atomic_set(&q->main_loop_should_exit, 1); + + // Wake up the kthread so that it can see that it needs to stop: + up(&q->q_sem); + + kthread_stop(q->q_kthread); + q->q_kthread = NULL; + } +} + +// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by +// kthread_create_on_node relies on a 2 entry, per-core cache to minimize +// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the +// stack location ends up being a function of the core assigned to the current +// thread, instead of being a function of the specified NUMA node. The cache was +// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0 +// ("fork: Optimize task creation by caching two thread stacks per CPU if +// CONFIG_VMAP_STACK=y") +// +// To work around the problematic cache, we create up to three kernel threads +// -If the first thread's stack is resident on the preferred node, return this +// thread. +// -Otherwise, create a second thread. If its stack is resident on the +// preferred node, stop the first thread and return this one. +// -Otherwise, create a third thread. The stack allocator does not find a +// cached stack, and so falls back to vmalloc, which takes the NUMA hint into +// consideration. The first two threads are then stopped. +// +// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned. +// +// This function is never invoked when there is no NUMA preference (preferred +// node is NUMA_NO_NODE). +#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 +static struct task_struct *thread_create_on_node(int (*threadfn)(void *data), + nv_kthread_q_t *q, + int preferred_node, + const char *q_name) +{ + + unsigned i, j; + const static unsigned attempts = 3; + struct task_struct *thread[3]; + + for (i = 0;; i++) { + struct page *stack; + + thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name); + + if (unlikely(IS_ERR(thread[i]))) { + + // Instead of failing, pick the previous thread, even if its + // stack is not allocated on the preferred node. + if (i > 0) + i--; + + break; + } + + // vmalloc is not used to allocate the stack, so simply return the + // thread, even if its stack may not be allocated on the preferred node + if (!is_vmalloc_addr(thread[i]->stack)) + break; + + // Ran out of attempts - return thread even if its stack may not be + // allocated on the preferred node + if ((i == (attempts - 1))) + break; + + // Get the NUMA node where the first page of the stack is resident. If + // it is the preferred node, select this thread. + stack = vmalloc_to_page(thread[i]->stack); + if (page_to_nid(stack) == preferred_node) + break; + } + + for (j = i; j > 0; j--) + kthread_stop(thread[j - 1]); + + return thread[i]; +} +#endif + +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node) +{ + memset(q, 0, sizeof(*q)); + + INIT_LIST_HEAD(&q->q_list_head); + spin_lock_init(&q->q_lock); + sema_init(&q->q_sem, 0); + + if (preferred_node == NV_KTHREAD_NO_NODE) { + q->q_kthread = kthread_create(_main_loop, q, q_name); + } + else { +#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 + q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name); +#else + return -ENOTSUPP; +#endif + } + + if (IS_ERR(q->q_kthread)) { + int err = PTR_ERR(q->q_kthread); + + // Clear q_kthread before returning so that nv_kthread_q_stop() can be + // safely called on it making error handling easier. + q->q_kthread = NULL; + + return err; + } + + wake_up_process(q->q_kthread); + + return 0; +} + +// Returns true (non-zero) if the item was actually scheduled, and false if the +// item was already pending in a queue. +static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&q->q_lock, flags); + + if (likely(list_empty(&q_item->q_list_node))) + list_add_tail(&q_item->q_list_node, &q->q_list_head); + else + ret = 0; + + spin_unlock_irqrestore(&q->q_lock, flags); + + if (likely(ret)) + up(&q->q_sem); + + return ret; +} + +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args) +{ + INIT_LIST_HEAD(&q_item->q_list_node); + q_item->function_to_run = function_to_run; + q_item->function_args = function_args; +} + +// Returns true (non-zero) if the q_item got scheduled, false otherwise. +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was " + "called with a non-alive q: 0x%p\n", q); + return 0; + } + + return _raw_q_schedule(q, q_item); +} + +static void _q_flush_function(void *args) +{ + struct completion *completion = (struct completion *)args; + complete(completion); +} + + +static void _raw_q_flush(nv_kthread_q_t *q) +{ + nv_kthread_q_item_t q_item; + DECLARE_COMPLETION(completion); + + nv_kthread_q_item_init(&q_item, _q_flush_function, &completion); + + _raw_q_schedule(q, &q_item); + + // Wait for the flush item to run. Once it has run, then all of the + // previously queued items in front of it will have run, so that means + // the flush is complete. + wait_for_completion(&completion); +} + +void nv_kthread_q_flush(nv_kthread_q_t *q) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_flush was called after " + "nv_kthread_q_stop. q: 0x%p\n", q); + return; + } + + // This 2x flush is not a typing mistake. The queue really does have to be + // flushed twice, in order to take care of the case of a q_item that + // reschedules itself. + _raw_q_flush(q); + _raw_q_flush(q); +} diff --git a/kernel-open/nvidia-modeset/nvidia-modeset-linux.c b/kernel-open/nvidia-modeset/nvidia-modeset-linux.c new file mode 100644 index 000000000..75a84712b --- /dev/null +++ b/kernel-open/nvidia-modeset/nvidia-modeset-linux.c @@ -0,0 +1,1734 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-21 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include /* do_div() */ +#include +#include +#include +#include +#include +#include + +#include "nvstatus.h" + +#include "nv-register-module.h" +#include "nv-modeset-interface.h" +#include "nv-kref.h" + +#include "nvidia-modeset-os-interface.h" +#include "nvkms.h" +#include "nvkms-ioctl.h" + +#include "conftest.h" +#include "nv-procfs.h" +#include "nv-kthread-q.h" +#include "nv-time.h" +#include "nv-lock.h" + +#if !defined(CONFIG_RETPOLINE) +#include "nv-retpoline.h" +#endif + +#include + +#define NVKMS_LOG_PREFIX "nvidia-modeset: " + +/* These parameters are used for fault injection tests. Normally the defaults + * should be used. */ +MODULE_PARM_DESC(fail_malloc, "Fail the Nth call to nvkms_alloc"); +static int fail_malloc_num = -1; +module_param_named(fail_malloc, fail_malloc_num, int, 0400); + +MODULE_PARM_DESC(malloc_verbose, "Report information about malloc calls on module unload"); +static bool malloc_verbose = false; +module_param_named(malloc_verbose, malloc_verbose, bool, 0400); + +static atomic_t nvkms_alloc_called_count; + + +#define NVKMS_SYNCPT_STUBS_NEEDED + +/************************************************************************* + * NVKMS interface for nvhost unit for sync point APIs. + *************************************************************************/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#ifdef NVKMS_SYNCPT_STUBS_NEEDED +/* Unsupported STUB for nvkms_syncpt APIs */ +NvBool nvkms_syncpt_op( + enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params) +{ + return NV_FALSE; +} +#endif + +#define NVKMS_MAJOR_DEVICE_NUMBER 195 +#define NVKMS_MINOR_DEVICE_NUMBER 254 + +/* + * Convert from microseconds to jiffies. The conversion is: + * ((usec) * HZ / 1000000) + * + * Use do_div() to avoid gcc-generated references to __udivdi3(). + * Note that the do_div() macro divides the first argument in place. + */ +static inline unsigned long NVKMS_USECS_TO_JIFFIES(NvU64 usec) +{ + unsigned long result = usec * HZ; + do_div(result, 1000000); + return result; +} + + +/************************************************************************* + * NVKMS uses a global lock, nvkms_lock. The lock is taken in the + * file operation callback functions when calling into core NVKMS. + *************************************************************************/ + +static struct semaphore nvkms_lock; + +/************************************************************************* + * User clients of NVKMS may need to be synchronized with suspend/resume + * operations. This depends on the state of the system when the NVKMS + * suspend/resume callbacks are invoked. NVKMS uses a single + * RW lock, nvkms_pm_lock, for this synchronization. + *************************************************************************/ + +static struct rw_semaphore nvkms_pm_lock; + +/************************************************************************* + * NVKMS executes almost all of its queued work items on a single + * kthread. The exception are deferred close() handlers, which typically + * block for long periods of time and stall their queue. + *************************************************************************/ + +static struct nv_kthread_q nvkms_kthread_q; +static struct nv_kthread_q nvkms_deferred_close_kthread_q; + +/************************************************************************* + * The nvkms_per_open structure tracks data that is specific to a + * single open. + *************************************************************************/ + +struct nvkms_per_open { + void *data; + + enum NvKmsClientType type; + + union { + struct { + struct { + atomic_t available; + wait_queue_head_t wait_queue; + } events; + } user; + + struct { + struct { + nv_kthread_q_item_t nv_kthread_q_item; + } events; + } kernel; + } u; + + nv_kthread_q_item_t deferred_close_q_item; +}; + +/************************************************************************* + * nvkms_pm_lock helper functions. Since no down_read_interruptible() + * or equivalent interface is available, it needs to be approximated with + * down_read_trylock() to enable the kernel's freezer to round up user + * threads going into suspend. + *************************************************************************/ + +static inline int nvkms_read_trylock_pm_lock(void) +{ + return !down_read_trylock(&nvkms_pm_lock); +} + +static inline void nvkms_read_lock_pm_lock(void) +{ + down_read(&nvkms_pm_lock); +} + +static inline void nvkms_read_unlock_pm_lock(void) +{ + up_read(&nvkms_pm_lock); +} + +static inline void nvkms_write_lock_pm_lock(void) +{ + down_write(&nvkms_pm_lock); +} + +static inline void nvkms_write_unlock_pm_lock(void) +{ + up_write(&nvkms_pm_lock); +} + +/************************************************************************* + * nvidia-modeset-os-interface.h functions. It is assumed that these + * are called while nvkms_lock is held. + *************************************************************************/ + +/* Don't use kmalloc for allocations larger than one page */ +#define KMALLOC_LIMIT PAGE_SIZE + +void* nvkms_alloc(size_t size, NvBool zero) +{ + void *p; + + if (malloc_verbose || fail_malloc_num >= 0) { + int this_alloc = atomic_inc_return(&nvkms_alloc_called_count) - 1; + if (fail_malloc_num >= 0 && fail_malloc_num == this_alloc) { + printk(KERN_WARNING NVKMS_LOG_PREFIX "Failing alloc %d\n", + fail_malloc_num); + return NULL; + } + } + + if (size <= KMALLOC_LIMIT) { + p = kmalloc(size, GFP_KERNEL); + } else { + p = vmalloc(size); + } + + if (zero && (p != NULL)) { + memset(p, 0, size); + } + + return p; +} + +void nvkms_free(void *ptr, size_t size) +{ + if (size <= KMALLOC_LIMIT) { + kfree(ptr); + } else { + vfree(ptr); + } +} + +void* nvkms_memset(void *ptr, NvU8 c, size_t size) +{ + return memset(ptr, c, size); +} + +void* nvkms_memcpy(void *dest, const void *src, size_t n) +{ + return memcpy(dest, src, n); +} + +void* nvkms_memmove(void *dest, const void *src, size_t n) +{ + return memmove(dest, src, n); +} + +int nvkms_memcmp(const void *s1, const void *s2, size_t n) +{ + return memcmp(s1, s2, n); +} + +size_t nvkms_strlen(const char *s) +{ + return strlen(s); +} + +int nvkms_strcmp(const char *s1, const char *s2) +{ + return strcmp(s1, s2); +} + +char* nvkms_strncpy(char *dest, const char *src, size_t n) +{ + return strncpy(dest, src, n); +} + +void nvkms_usleep(NvU64 usec) +{ + if (usec < 1000) { + /* + * If the period to wait is less than one millisecond, sleep + * using udelay(); note this is a busy wait. + */ + udelay(usec); + } else { + /* + * Otherwise, sleep with millisecond precision. Clamp the + * time to ~4 seconds (0xFFF/1000 => 4.09 seconds). + * + * Note that the do_div() macro divides the first argument in + * place. + */ + + int msec; + NvU64 tmp = usec + 500; + do_div(tmp, 1000); + msec = (int) (tmp & 0xFFF); + + /* + * XXX NVKMS TODO: this may need to be msleep_interruptible(), + * though the callers would need to be made to handle + * returning early. + */ + msleep(msec); + } +} + +NvU64 nvkms_get_usec(void) +{ + struct timespec64 ts; + NvU64 ns; + + ktime_get_real_ts64(&ts); + + ns = timespec64_to_ns(&ts); + return ns / 1000; +} + +int nvkms_copyin(void *kptr, NvU64 uaddr, size_t n) +{ + if (!nvKmsNvU64AddressIsSafe(uaddr)) { + return -EINVAL; + } + + if (copy_from_user(kptr, nvKmsNvU64ToPointer(uaddr), n) != 0) { + return -EFAULT; + } + + return 0; +} + +int nvkms_copyout(NvU64 uaddr, const void *kptr, size_t n) +{ + if (!nvKmsNvU64AddressIsSafe(uaddr)) { + return -EINVAL; + } + + if (copy_to_user(nvKmsNvU64ToPointer(uaddr), kptr, n) != 0) { + return -EFAULT; + } + + return 0; +} + +void nvkms_yield(void) +{ + schedule(); +} + +void nvkms_dump_stack(void) +{ + dump_stack(); +} + +int nvkms_snprintf(char *str, size_t size, const char *format, ...) +{ + int ret; + va_list ap; + + va_start(ap, format); + ret = vsnprintf(str, size, format, ap); + va_end(ap); + + return ret; +} + +int nvkms_vsnprintf(char *str, size_t size, const char *format, va_list ap) +{ + return vsnprintf(str, size, format, ap); +} + +void nvkms_log(const int level, const char *gpuPrefix, const char *msg) +{ + const char *levelString; + const char *levelPrefix; + + switch (level) { + default: + case NVKMS_LOG_LEVEL_INFO: + levelPrefix = ""; + levelString = KERN_INFO; + break; + case NVKMS_LOG_LEVEL_WARN: + levelPrefix = "WARNING: "; + levelString = KERN_WARNING; + break; + case NVKMS_LOG_LEVEL_ERROR: + levelPrefix = "ERROR: "; + levelString = KERN_ERR; + break; + } + + printk("%s%s%s%s%s\n", + levelString, NVKMS_LOG_PREFIX, levelPrefix, gpuPrefix, msg); +} + +void +nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel, + NvBool eventsAvailable) +{ + struct nvkms_per_open *popen = pOpenKernel; + + switch (popen->type) { + case NVKMS_CLIENT_USER_SPACE: + /* + * Write popen->events.available atomically, to avoid any races or + * memory barrier issues interacting with nvkms_poll(). + */ + atomic_set(&popen->u.user.events.available, eventsAvailable); + + wake_up_interruptible(&popen->u.user.events.wait_queue); + + break; + case NVKMS_CLIENT_KERNEL_SPACE: + if (eventsAvailable) { + nv_kthread_q_schedule_q_item( + &nvkms_kthread_q, + &popen->u.kernel.events.nv_kthread_q_item); + } + + break; + } +} + +static void nvkms_suspend(NvU32 gpuId) +{ + if (gpuId == 0) { + nvkms_write_lock_pm_lock(); + } + + down(&nvkms_lock); + nvKmsSuspend(gpuId); + up(&nvkms_lock); +} + +static void nvkms_resume(NvU32 gpuId) +{ + down(&nvkms_lock); + nvKmsResume(gpuId); + up(&nvkms_lock); + + if (gpuId == 0) { + nvkms_write_unlock_pm_lock(); + } +} + + +/************************************************************************* + * Interface with resman. + *************************************************************************/ + +static nvidia_modeset_rm_ops_t __rm_ops = { 0 }; +static nvidia_modeset_callbacks_t nvkms_rm_callbacks = { + .suspend = nvkms_suspend, + .resume = nvkms_resume +}; + +static int nvkms_alloc_rm(void) +{ + NV_STATUS nvstatus; + int ret; + + __rm_ops.version_string = NV_VERSION_STRING; + + nvstatus = nvidia_get_rm_ops(&__rm_ops); + + if (nvstatus != NV_OK) { + printk(KERN_ERR NVKMS_LOG_PREFIX "Version mismatch: " + "nvidia.ko(%s) nvidia-modeset.ko(%s)\n", + __rm_ops.version_string, NV_VERSION_STRING); + return -EINVAL; + } + + ret = __rm_ops.set_callbacks(&nvkms_rm_callbacks); + if (ret < 0) { + printk(KERN_ERR NVKMS_LOG_PREFIX "Failed to register callbacks\n"); + return ret; + } + + return 0; +} + +static void nvkms_free_rm(void) +{ + __rm_ops.set_callbacks(NULL); +} + +void nvkms_call_rm(void *ops) +{ + nvidia_modeset_stack_ptr stack = NULL; + + if (__rm_ops.alloc_stack(&stack) != 0) { + return; + } + + __rm_ops.op(stack, ops); + + __rm_ops.free_stack(stack); +} + +/************************************************************************* + * ref_ptr implementation. + *************************************************************************/ + +struct nvkms_ref_ptr { + nv_kref_t refcnt; + // Access to ptr is guarded by the nvkms_lock. + void *ptr; +}; + +struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr) +{ + struct nvkms_ref_ptr *ref_ptr = nvkms_alloc(sizeof(*ref_ptr), NV_FALSE); + if (ref_ptr) { + // The ref_ptr owner counts as a reference on the ref_ptr itself. + nv_kref_init(&ref_ptr->refcnt); + ref_ptr->ptr = ptr; + } + return ref_ptr; +} + +void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr) +{ + if (ref_ptr) { + ref_ptr->ptr = NULL; + // Release the owner's reference of the ref_ptr. + nvkms_dec_ref(ref_ptr); + } +} + +void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr) +{ + nv_kref_get(&ref_ptr->refcnt); +} + +static void ref_ptr_free(nv_kref_t *ref) +{ + struct nvkms_ref_ptr *ref_ptr = container_of(ref, struct nvkms_ref_ptr, + refcnt); + nvkms_free(ref_ptr, sizeof(*ref_ptr)); +} + +void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr) +{ + void *ptr = ref_ptr->ptr; + nv_kref_put(&ref_ptr->refcnt, ref_ptr_free); + return ptr; +} + +/************************************************************************* + * Timer support + * + * Core NVKMS needs to be able to schedule work to execute in the + * future, within process context. + * + * To achieve this, use struct timer_list to schedule a timer + * callback, nvkms_timer_callback(). This will execute in softirq + * context, so from there schedule an nv_kthread_q item, + * nvkms_kthread_q_callback(), which will execute in process context. + *************************************************************************/ + +struct nvkms_timer_t { + nv_kthread_q_item_t nv_kthread_q_item; + struct timer_list kernel_timer; + NvBool cancel; + NvBool complete; + NvBool isRefPtr; + NvBool kernel_timer_created; + nvkms_timer_proc_t *proc; + void *dataPtr; + NvU32 dataU32; + struct list_head timers_list; +}; + +/* + * Global list with pending timers, any change requires acquiring lock + */ +static struct { + spinlock_t lock; + struct list_head list; +} nvkms_timers; + +static void nvkms_kthread_q_callback(void *arg) +{ + struct nvkms_timer_t *timer = arg; + void *dataPtr; + unsigned long flags = 0; + + /* + * We can delete this timer from pending timers list - it's being + * processed now. + */ + spin_lock_irqsave(&nvkms_timers.lock, flags); + list_del(&timer->timers_list); + spin_unlock_irqrestore(&nvkms_timers.lock, flags); + + /* + * After kthread_q_callback we want to be sure that timer_callback + * for this timer also have finished. It's important during module + * unload - this way we can safely unload this module by first deleting + * pending timers and than waiting for workqueue callbacks. + */ + if (timer->kernel_timer_created) { + del_timer_sync(&timer->kernel_timer); + } + + /* + * Block the kthread during system suspend & resume in order to defer + * handling of events such as DP_IRQ and hotplugs until after resume. + */ + nvkms_read_lock_pm_lock(); + + down(&nvkms_lock); + + if (timer->isRefPtr) { + // If the object this timer refers to was destroyed, treat the timer as + // canceled. + dataPtr = nvkms_dec_ref(timer->dataPtr); + if (!dataPtr) { + timer->cancel = NV_TRUE; + } + } else { + dataPtr = timer->dataPtr; + } + + if (!timer->cancel) { + timer->proc(dataPtr, timer->dataU32); + timer->complete = NV_TRUE; + } + + if (timer->isRefPtr) { + // ref_ptr-based timers are allocated with kmalloc(GFP_ATOMIC). + kfree(timer); + } else if (timer->cancel) { + nvkms_free(timer, sizeof(*timer)); + } + + up(&nvkms_lock); + + nvkms_read_unlock_pm_lock(); +} + +static void nvkms_queue_work(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + int ret = nv_kthread_q_schedule_q_item(q, q_item); + /* + * nv_kthread_q_schedule_q_item should only fail (which it indicates by + * returning false) if the item is already scheduled or the queue is + * stopped. Neither of those should happen in NVKMS. + */ + WARN_ON(!ret); +} + +static void _nvkms_timer_callback_internal(struct nvkms_timer_t *nvkms_timer) +{ + /* In softirq context, so schedule nvkms_kthread_q_callback(). */ + nvkms_queue_work(&nvkms_kthread_q, &nvkms_timer->nv_kthread_q_item); +} + +/* + * Why the "inline" keyword? Because only one of these next two functions will + * be used, thus leading to a "defined but not used function" warning. The + * "inline" keyword is redefined in the Kbuild system + * (see: /include/linux/compiler-gcc.h) so as to suppress that warning. + */ +inline static void nvkms_timer_callback_typed_data(struct timer_list *timer) +{ + struct nvkms_timer_t *nvkms_timer = + container_of(timer, struct nvkms_timer_t, kernel_timer); + + _nvkms_timer_callback_internal(nvkms_timer); +} + +inline static void nvkms_timer_callback_anon_data(unsigned long arg) +{ + struct nvkms_timer_t *nvkms_timer = (struct nvkms_timer_t *) arg; + _nvkms_timer_callback_internal(nvkms_timer); +} + +static void +nvkms_init_timer(struct nvkms_timer_t *timer, nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, NvBool isRefPtr, NvU64 usec) +{ + unsigned long flags = 0; + + memset(timer, 0, sizeof(*timer)); + timer->cancel = NV_FALSE; + timer->complete = NV_FALSE; + timer->isRefPtr = isRefPtr; + + timer->proc = proc; + timer->dataPtr = dataPtr; + timer->dataU32 = dataU32; + + nv_kthread_q_item_init(&timer->nv_kthread_q_item, nvkms_kthread_q_callback, + timer); + + /* + * After adding timer to timers_list we need to finish referencing it + * (calling nvkms_queue_work() or mod_timer()) before releasing the lock. + * Otherwise, if the code to free the timer were ever updated to + * run in parallel with this, it could race against nvkms_init_timer() + * and free the timer before its initialization is complete. + */ + spin_lock_irqsave(&nvkms_timers.lock, flags); + list_add(&timer->timers_list, &nvkms_timers.list); + + if (usec == 0) { + timer->kernel_timer_created = NV_FALSE; + nvkms_queue_work(&nvkms_kthread_q, &timer->nv_kthread_q_item); + } else { +#if defined(NV_TIMER_SETUP_PRESENT) + timer_setup(&timer->kernel_timer, nvkms_timer_callback_typed_data, 0); +#else + init_timer(&timer->kernel_timer); + timer->kernel_timer.function = nvkms_timer_callback_anon_data; + timer->kernel_timer.data = (unsigned long) timer; +#endif + + timer->kernel_timer_created = NV_TRUE; + mod_timer(&timer->kernel_timer, jiffies + NVKMS_USECS_TO_JIFFIES(usec)); + } + spin_unlock_irqrestore(&nvkms_timers.lock, flags); +} + +nvkms_timer_handle_t* +nvkms_alloc_timer(nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, + NvU64 usec) +{ + // nvkms_alloc_timer cannot be called from an interrupt context. + struct nvkms_timer_t *timer = nvkms_alloc(sizeof(*timer), NV_FALSE); + if (timer) { + nvkms_init_timer(timer, proc, dataPtr, dataU32, NV_FALSE, usec); + } + return timer; +} + +NvBool +nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc, + struct nvkms_ref_ptr *ref_ptr, + NvU32 dataU32, NvU64 usec) +{ + // nvkms_alloc_timer_with_ref_ptr is called from an interrupt bottom half + // handler, which runs in a tasklet (i.e. atomic) context. + struct nvkms_timer_t *timer = kmalloc(sizeof(*timer), GFP_ATOMIC); + if (timer) { + // Reference the ref_ptr to make sure that it doesn't get freed before + // the timer fires. + nvkms_inc_ref(ref_ptr); + nvkms_init_timer(timer, proc, ref_ptr, dataU32, NV_TRUE, usec); + } + + return timer != NULL; +} + +void nvkms_free_timer(nvkms_timer_handle_t *handle) +{ + struct nvkms_timer_t *timer = handle; + + if (timer == NULL) { + return; + } + + if (timer->complete) { + nvkms_free(timer, sizeof(*timer)); + return; + } + + timer->cancel = NV_TRUE; +} + +void* nvkms_get_per_open_data(int fd) +{ + struct file *filp = fget(fd); + struct nvkms_per_open *popen = NULL; + dev_t rdev = 0; + void *data = NULL; + + if (filp == NULL) { + return NULL; + } + + if (filp->f_inode == NULL) { + goto done; + } + rdev = filp->f_inode->i_rdev; + + if ((MAJOR(rdev) != NVKMS_MAJOR_DEVICE_NUMBER) || + (MINOR(rdev) != NVKMS_MINOR_DEVICE_NUMBER)) { + goto done; + } + + popen = filp->private_data; + if (popen == NULL) { + goto done; + } + + data = popen->data; + +done: + /* + * fget() incremented the struct file's reference count, which + * needs to be balanced with a call to fput(). It is safe to + * decrement the reference count before returning + * filp->private_data because core NVKMS is currently holding the + * nvkms_lock, which prevents the nvkms_close() => nvKmsClose() + * call chain from freeing the file out from under the caller of + * nvkms_get_per_open_data(). + */ + fput(filp); + + return data; +} + +NvBool nvkms_fd_is_nvidia_chardev(int fd) +{ + struct file *filp = fget(fd); + dev_t rdev = 0; + NvBool ret = NV_FALSE; + + if (filp == NULL) { + return ret; + } + + if (filp->f_inode == NULL) { + goto done; + } + rdev = filp->f_inode->i_rdev; + + if (MAJOR(rdev) == NVKMS_MAJOR_DEVICE_NUMBER) { + ret = NV_TRUE; + } + +done: + fput(filp); + + return ret; +} + +NvBool nvkms_open_gpu(NvU32 gpuId) +{ + nvidia_modeset_stack_ptr stack = NULL; + NvBool ret; + + if (__rm_ops.alloc_stack(&stack) != 0) { + return NV_FALSE; + } + + ret = __rm_ops.open_gpu(gpuId, stack) == 0; + + __rm_ops.free_stack(stack); + + return ret; +} + +void nvkms_close_gpu(NvU32 gpuId) +{ + nvidia_modeset_stack_ptr stack = NULL; + + if (__rm_ops.alloc_stack(&stack) != 0) { + return; + } + + __rm_ops.close_gpu(gpuId, stack); + + __rm_ops.free_stack(stack); +} + +NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info) +{ + return __rm_ops.enumerate_gpus(gpu_info); +} + +NvBool nvkms_allow_write_combining(void) +{ + return __rm_ops.system_info.allow_write_combining; +} + +/************************************************************************* + * Implementation of sysfs interface to control backlight + *************************************************************************/ + +struct nvkms_backlight_device { + NvU32 gpu_id; + NvU32 display_id; + + void *drv_priv; + + struct backlight_device * dev; +}; + +static int nvkms_update_backlight_status(struct backlight_device *bd) +{ + struct nvkms_backlight_device *nvkms_bd = bl_get_data(bd); + NvBool status; + int ret; + + ret = down_interruptible(&nvkms_lock); + + if (ret != 0) { + return ret; + } + + status = nvKmsSetBacklight(nvkms_bd->display_id, nvkms_bd->drv_priv, + bd->props.brightness); + + up(&nvkms_lock); + + return status ? 0 : -EINVAL; +} + +static int nvkms_get_backlight_brightness(struct backlight_device *bd) +{ + struct nvkms_backlight_device *nvkms_bd = bl_get_data(bd); + NvU32 brightness = 0; + NvBool status; + int ret; + + ret = down_interruptible(&nvkms_lock); + + if (ret != 0) { + return ret; + } + + status = nvKmsGetBacklight(nvkms_bd->display_id, nvkms_bd->drv_priv, + &brightness); + + up(&nvkms_lock); + + return status ? brightness : -1; +} + +static const struct backlight_ops nvkms_backlight_ops = { + .update_status = nvkms_update_backlight_status, + .get_brightness = nvkms_get_backlight_brightness, +}; + +struct nvkms_backlight_device* +nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv, + NvU32 current_brightness) +{ + char name[18]; + struct backlight_properties props = { + .brightness = current_brightness, + .max_brightness = 100, + .type = BACKLIGHT_RAW, + }; + nv_gpu_info_t *gpu_info = NULL; + NvU32 gpu_count = 0; + struct nvkms_backlight_device *nvkms_bd = NULL; + int i; + + gpu_info = nvkms_alloc(NV_MAX_GPUS * sizeof(*gpu_info), NV_TRUE); + if (gpu_info == NULL) { + return NULL; + } + + gpu_count = __rm_ops.enumerate_gpus(gpu_info); + if (gpu_count == 0) { + goto done; + } + + for (i = 0; i < gpu_count; i++) { + if (gpu_info[i].gpu_id == gpu_id) { + break; + } + } + + if (i == gpu_count) { + goto done; + } + + nvkms_bd = nvkms_alloc(sizeof(*nvkms_bd), NV_TRUE); + if (nvkms_bd == NULL) { + goto done; + } + + snprintf(name, sizeof(name), "nvidia_%d", i); + name[sizeof(name) - 1] = '\0'; + + nvkms_bd->gpu_id = gpu_id; + nvkms_bd->display_id = display_id; + nvkms_bd->drv_priv = drv_priv; + + nvkms_bd->dev = + backlight_device_register(name, + gpu_info[i].os_device_ptr, + nvkms_bd, + &nvkms_backlight_ops, + &props); + +done: + nvkms_free(gpu_info, NV_MAX_GPUS * sizeof(*gpu_info)); + + return nvkms_bd; +} + +void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd) +{ + if (nvkms_bd->dev) { + backlight_device_unregister(nvkms_bd->dev); + } + + nvkms_free(nvkms_bd, sizeof(*nvkms_bd)); +} + +/************************************************************************* + * Common to both user-space and kapi NVKMS interfaces + *************************************************************************/ + +static void nvkms_kapi_event_kthread_q_callback(void *arg) +{ + struct NvKmsKapiDevice *device = arg; + + nvKmsKapiHandleEventQueueChange(device); +} + +struct nvkms_per_open *nvkms_open_common(enum NvKmsClientType type, + struct NvKmsKapiDevice *device, + int *status) +{ + struct nvkms_per_open *popen = NULL; + + popen = nvkms_alloc(sizeof(*popen), NV_TRUE); + + if (popen == NULL) { + *status = -ENOMEM; + goto failed; + } + + popen->type = type; + + *status = down_interruptible(&nvkms_lock); + + if (*status != 0) { + goto failed; + } + + popen->data = nvKmsOpen(current->tgid, type, popen); + + up(&nvkms_lock); + + if (popen->data == NULL) { + *status = -EPERM; + goto failed; + } + + switch (popen->type) { + case NVKMS_CLIENT_USER_SPACE: + init_waitqueue_head(&popen->u.user.events.wait_queue); + break; + case NVKMS_CLIENT_KERNEL_SPACE: + nv_kthread_q_item_init(&popen->u.kernel.events.nv_kthread_q_item, + nvkms_kapi_event_kthread_q_callback, + device); + break; + } + + *status = 0; + + return popen; + +failed: + + nvkms_free(popen, sizeof(*popen)); + + return NULL; +} + +void nvkms_close_common(struct nvkms_per_open *popen) +{ + /* + * Don't use down_interruptible(): we need to free resources + * during close, so we have no choice but to wait to take the + * mutex. + */ + + down(&nvkms_lock); + + nvKmsClose(popen->data); + + popen->data = NULL; + + up(&nvkms_lock); + + if (popen->type == NVKMS_CLIENT_KERNEL_SPACE) { + /* + * Flush any outstanding nvkms_kapi_event_kthread_q_callback() work + * items before freeing popen. + * + * Note that this must be done after the above nvKmsClose() call, to + * guarantee that no more nvkms_kapi_event_kthread_q_callback() work + * items get scheduled. + * + * Also, note that though popen->data is freed above, any subsequent + * nvkms_kapi_event_kthread_q_callback()'s for this popen should be + * safe: if any nvkms_kapi_event_kthread_q_callback()-initiated work + * attempts to call back into NVKMS, the popen->data==NULL check in + * nvkms_ioctl_common() should reject the request. + */ + + nv_kthread_q_flush(&nvkms_kthread_q); + } + + nvkms_free(popen, sizeof(*popen)); +} + +static void nvkms_close_deferred(void *data) +{ + struct nvkms_per_open *popen = data; + + nvkms_read_lock_pm_lock(); + + nvkms_close_common(popen); + + nvkms_read_unlock_pm_lock(); +} + +static void nvkms_close_popen(struct nvkms_per_open *popen) +{ + if (nvkms_read_trylock_pm_lock() == 0) { + nvkms_close_common(popen); + nvkms_read_unlock_pm_lock(); + } else { + nv_kthread_q_item_init(&popen->deferred_close_q_item, + nvkms_close_deferred, + popen); + nvkms_queue_work(&nvkms_deferred_close_kthread_q, + &popen->deferred_close_q_item); + } +} + +int nvkms_ioctl_common +( + struct nvkms_per_open *popen, + NvU32 cmd, NvU64 address, const size_t size +) +{ + int status; + NvBool ret; + + status = down_interruptible(&nvkms_lock); + if (status != 0) { + return status; + } + + if (popen->data != NULL) { + ret = nvKmsIoctl(popen->data, cmd, address, size); + } else { + ret = NV_FALSE; + } + + up(&nvkms_lock); + + return ret ? 0 : -EPERM; +} + +/************************************************************************* + * NVKMS interface for kernel space NVKMS clients like KAPI + *************************************************************************/ + +struct nvkms_per_open* nvkms_open_from_kapi +( + struct NvKmsKapiDevice *device +) +{ + int status = 0; + struct nvkms_per_open *ret; + + nvkms_read_lock_pm_lock(); + ret = nvkms_open_common(NVKMS_CLIENT_KERNEL_SPACE, device, &status); + nvkms_read_unlock_pm_lock(); + + return ret; +} + +void nvkms_close_from_kapi(struct nvkms_per_open *popen) +{ + nvkms_close_popen(popen); +} + +NvBool nvkms_ioctl_from_kapi +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t param_size +) +{ + NvBool ret; + + nvkms_read_lock_pm_lock(); + ret = nvkms_ioctl_common(popen, + cmd, + (NvU64)(NvUPtr)params_address, param_size) == 0; + nvkms_read_unlock_pm_lock(); + + return ret; +} + +/************************************************************************* + * APIs for locking. + *************************************************************************/ + +struct nvkms_sema_t { + struct semaphore os_sema; +}; + +nvkms_sema_handle_t* nvkms_sema_alloc(void) +{ + nvkms_sema_handle_t *sema = nvkms_alloc(sizeof(*sema), NV_TRUE); + + if (sema != NULL) { + sema_init(&sema->os_sema, 1); + } + + return sema; +} + +void nvkms_sema_free(nvkms_sema_handle_t *sema) +{ + nvkms_free(sema, sizeof(*sema)); +} + +void nvkms_sema_down(nvkms_sema_handle_t *sema) +{ + down(&sema->os_sema); +} + +void nvkms_sema_up(nvkms_sema_handle_t *sema) +{ + up(&sema->os_sema); +} + +/************************************************************************* + * Procfs files support code. + *************************************************************************/ + +#if defined(CONFIG_PROC_FS) + +#define NV_DEFINE_SINGLE_NVKMS_PROCFS_FILE(name) \ + NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nvkms_pm_lock) + +#define NVKMS_PROCFS_FOLDER "driver/nvidia-modeset" + +struct proc_dir_entry *nvkms_proc_dir; + +static void nv_procfs_out_string(void *data, const char *str) +{ + struct seq_file *s = data; + + seq_puts(s, str); +} + +static int nv_procfs_read_nvkms_proc(struct seq_file *s, void *arg) +{ + char *buffer; + nvkms_procfs_proc_t *func; + +#define NVKMS_PROCFS_STRING_SIZE 8192 + + func = s->private; + if (func == NULL) { + return 0; + } + + buffer = nvkms_alloc(NVKMS_PROCFS_STRING_SIZE, NV_TRUE); + + if (buffer != NULL) { + int status = down_interruptible(&nvkms_lock); + + if (status != 0) { + nvkms_free(buffer, NVKMS_PROCFS_STRING_SIZE); + return status; + } + + func(s, buffer, NVKMS_PROCFS_STRING_SIZE, &nv_procfs_out_string); + + up(&nvkms_lock); + + nvkms_free(buffer, NVKMS_PROCFS_STRING_SIZE); + } + + return 0; +} + +NV_DEFINE_SINGLE_NVKMS_PROCFS_FILE(nvkms_proc); + +static NvBool +nvkms_add_proc_file(const nvkms_procfs_file_t *file) +{ + struct proc_dir_entry *new_proc_dir; + + if (nvkms_proc_dir == NULL) { + return NV_FALSE; + } + + new_proc_dir = proc_create_data(file->name, 0, nvkms_proc_dir, + &nv_procfs_nvkms_proc_fops, file->func); + return (new_proc_dir != NULL); +} + +#endif /* defined(CONFIG_PROC_FS) */ + +static void nvkms_proc_init(void) +{ +#if defined(CONFIG_PROC_FS) + const nvkms_procfs_file_t *file; + + nvkms_proc_dir = NULL; + nvKmsGetProcFiles(&file); + + if (file == NULL || file->name == NULL) { + return; + } + + nvkms_proc_dir = NV_CREATE_PROC_DIR(NVKMS_PROCFS_FOLDER, NULL); + if (nvkms_proc_dir == NULL) { + return; + } + + while (file->name != NULL) { + if (!nvkms_add_proc_file(file)) { + nvkms_log(NVKMS_LOG_LEVEL_WARN, NVKMS_LOG_PREFIX, + "Failed to create proc file"); + break; + } + file++; + } +#endif +} + +static void nvkms_proc_exit(void) +{ +#if defined(CONFIG_PROC_FS) + if (nvkms_proc_dir == NULL) { + return; + } + +#if defined(NV_PROC_REMOVE_PRESENT) + proc_remove(nvkms_proc_dir); +#else + /* + * On kernel versions without proc_remove(), we need to explicitly + * remove each proc file beneath nvkms_proc_dir. + * nvkms_proc_init() only creates files directly under + * nvkms_proc_dir, so those are the only files we need to remove + * here: warn if there is any deeper directory nesting. + */ + { + struct proc_dir_entry *entry = nvkms_proc_dir->subdir; + + while (entry != NULL) { + struct proc_dir_entry *next = entry->next; + WARN_ON(entry->subdir != NULL); + remove_proc_entry(entry->name, entry->parent); + entry = next; + } + } + + remove_proc_entry(nvkms_proc_dir->name, nvkms_proc_dir->parent); +#endif /* NV_PROC_REMOVE_PRESENT */ +#endif /* CONFIG_PROC_FS */ +} + +/************************************************************************* + * NVKMS KAPI functions + ************************************************************************/ + +NvBool nvKmsKapiGetFunctionsTable +( + struct NvKmsKapiFunctionsTable *funcsTable +) +{ + return nvKmsKapiGetFunctionsTableInternal(funcsTable); +} +EXPORT_SYMBOL(nvKmsKapiGetFunctionsTable); + +/************************************************************************* + * File operation callback functions. + *************************************************************************/ + +static int nvkms_open(struct inode *inode, struct file *filp) +{ + int status; + + status = nv_down_read_interruptible(&nvkms_pm_lock); + if (status != 0) { + return status; + } + + filp->private_data = + nvkms_open_common(NVKMS_CLIENT_USER_SPACE, NULL, &status); + + nvkms_read_unlock_pm_lock(); + + return status; +} + +static int nvkms_close(struct inode *inode, struct file *filp) +{ + struct nvkms_per_open *popen = filp->private_data; + + if (popen == NULL) { + return -EINVAL; + } + + nvkms_close_popen(popen); + return 0; +} + +static int nvkms_mmap(struct file *filp, struct vm_area_struct *vma) +{ + return -EPERM; +} + +static int nvkms_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + size_t size; + unsigned int nr; + int status; + struct NvKmsIoctlParams params; + struct nvkms_per_open *popen = filp->private_data; + + if ((popen == NULL) || (popen->data == NULL)) { + return -EINVAL; + } + + size = _IOC_SIZE(cmd); + nr = _IOC_NR(cmd); + + /* The only supported ioctl is NVKMS_IOCTL_CMD. */ + + if ((nr != NVKMS_IOCTL_CMD) || (size != sizeof(struct NvKmsIoctlParams))) { + return -ENOTTY; + } + + status = copy_from_user(¶ms, (void *) arg, size); + if (status != 0) { + return -EFAULT; + } + + status = nv_down_read_interruptible(&nvkms_pm_lock); + if (status != 0) { + return status; + } + + status = nvkms_ioctl_common(popen, + params.cmd, + params.address, + params.size); + + nvkms_read_unlock_pm_lock(); + + return status; +} + +static unsigned int nvkms_poll(struct file *filp, poll_table *wait) +{ + unsigned int mask = 0; + struct nvkms_per_open *popen = filp->private_data; + + if ((popen == NULL) || (popen->data == NULL)) { + return mask; + } + + BUG_ON(popen->type != NVKMS_CLIENT_USER_SPACE); + + if ((filp->f_flags & O_NONBLOCK) == 0) { + poll_wait(filp, &popen->u.user.events.wait_queue, wait); + } + + if (atomic_read(&popen->u.user.events.available)) { + mask = POLLPRI | POLLIN; + } + + return mask; +} + + +/************************************************************************* + * Module loading support code. + *************************************************************************/ + +static nvidia_module_t nvidia_modeset_module = { + .owner = THIS_MODULE, + .module_name = "nvidia-modeset", + .instance = 1, /* minor number: 255-1=254 */ + .open = nvkms_open, + .close = nvkms_close, + .mmap = nvkms_mmap, + .ioctl = nvkms_ioctl, + .poll = nvkms_poll, +}; + +static int __init nvkms_init(void) +{ + int ret; + + atomic_set(&nvkms_alloc_called_count, 0); + + ret = nvkms_alloc_rm(); + + if (ret != 0) { + return ret; + } + + sema_init(&nvkms_lock, 1); + init_rwsem(&nvkms_pm_lock); + + ret = nv_kthread_q_init(&nvkms_kthread_q, + "nvidia-modeset/kthread_q"); + if (ret != 0) { + goto fail_kthread; + } + + ret = nv_kthread_q_init(&nvkms_deferred_close_kthread_q, + "nvidia-modeset/deferred_close_kthread_q"); + if (ret != 0) { + goto fail_deferred_close_kthread; + } + + INIT_LIST_HEAD(&nvkms_timers.list); + spin_lock_init(&nvkms_timers.lock); + + ret = nvidia_register_module(&nvidia_modeset_module); + + if (ret != 0) { + goto fail_register_module; + } + + down(&nvkms_lock); + if (!nvKmsModuleLoad()) { + ret = -ENOMEM; + } + up(&nvkms_lock); + if (ret != 0) { + goto fail_module_load; + } + + nvkms_proc_init(); + + return 0; + +fail_module_load: + nvidia_unregister_module(&nvidia_modeset_module); +fail_register_module: + nv_kthread_q_stop(&nvkms_deferred_close_kthread_q); +fail_deferred_close_kthread: + nv_kthread_q_stop(&nvkms_kthread_q); +fail_kthread: + nvkms_free_rm(); + + return ret; +} + +static void __exit nvkms_exit(void) +{ + struct nvkms_timer_t *timer, *tmp_timer; + unsigned long flags = 0; + + nvkms_proc_exit(); + + down(&nvkms_lock); + nvKmsModuleUnload(); + up(&nvkms_lock); + + /* + * At this point, any pending tasks should be marked canceled, but + * we still need to drain them, so that nvkms_kthread_q_callback() doesn't + * get called after the module is unloaded. + */ +restart: + spin_lock_irqsave(&nvkms_timers.lock, flags); + + list_for_each_entry_safe(timer, tmp_timer, &nvkms_timers.list, timers_list) { + if (timer->kernel_timer_created) { + /* + * We delete pending timers and check whether it was being executed + * (returns 0) or we have deactivated it before execution (returns 1). + * If it began execution, the kthread_q callback will wait for timer + * completion, and we wait for queue completion with + * nv_kthread_q_stop below. + */ + if (del_timer_sync(&timer->kernel_timer) == 1) { + /* We've deactivated timer so we need to clean after it */ + list_del(&timer->timers_list); + + /* We need to unlock spinlock because we are freeing memory which + * may sleep */ + spin_unlock_irqrestore(&nvkms_timers.lock, flags); + + if (timer->isRefPtr) { + nvkms_dec_ref(timer->dataPtr); + kfree(timer); + } else { + nvkms_free(timer, sizeof(*timer)); + } + + /* List could change when we were freeing memory. */ + goto restart; + } + } + } + + spin_unlock_irqrestore(&nvkms_timers.lock, flags); + + nv_kthread_q_stop(&nvkms_deferred_close_kthread_q); + nv_kthread_q_stop(&nvkms_kthread_q); + + nvidia_unregister_module(&nvidia_modeset_module); + nvkms_free_rm(); + + if (malloc_verbose) { + printk(KERN_INFO NVKMS_LOG_PREFIX "Total allocations: %d\n", + atomic_read(&nvkms_alloc_called_count)); + } +} + +module_init(nvkms_init); +module_exit(nvkms_exit); + +#if defined(MODULE_LICENSE) + + MODULE_LICENSE("Dual MIT/GPL"); + + + +#endif +#if defined(MODULE_INFO) + MODULE_INFO(supported, "external"); +#endif +#if defined(MODULE_VERSION) + MODULE_VERSION(NV_VERSION_STRING); +#endif diff --git a/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h b/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h new file mode 100644 index 000000000..36685a026 --- /dev/null +++ b/kernel-open/nvidia-modeset/nvidia-modeset-os-interface.h @@ -0,0 +1,363 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Define the entry points which the NVKMS kernel interface layer + * provides to core NVKMS. + */ + +#if !defined(_NVIDIA_MODESET_OS_INTERFACE_H_) +#define _NVIDIA_MODESET_OS_INTERFACE_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif +#include "nvtypes.h" /* NvU8 */ + +#include "nvkms.h" +#include "nv_stdarg.h" + +enum NvKmsSyncPtOp { + NVKMS_SYNCPT_OP_ALLOC, + NVKMS_SYNCPT_OP_GET, + NVKMS_SYNCPT_OP_PUT, + NVKMS_SYNCPT_OP_INCR_MAX, + NVKMS_SYNCPT_OP_CPU_INCR, + NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH, + NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD, + NVKMS_SYNCPT_OP_READ_MINVAL, + NVKMS_SYNCPT_OP_READ_MAXVAL, + NVKMS_SYNCPT_OP_SET_MIN_EQ_MAX, + NVKMS_SYNCPT_OP_SET_MAXVAL, +}; + +typedef struct { + + struct { + const char *syncpt_name; /* in */ + NvU32 id; /* out */ + } alloc; + + struct { + NvU32 id; /* in */ + } get; + + struct { + NvU32 id; /* in */ + } put; + + struct { + NvU32 id; /* in */ + NvU32 incr; /* in */ + NvU32 value; /* out */ + } incr_max; + + struct { + NvU32 id; /* in */ + } cpu_incr; + + struct { + NvS32 fd; /* in */ + NvU32 id; /* out */ + NvU32 thresh; /* out */ + } fd_to_id_and_thresh; + + struct { + NvU32 id; /* in */ + NvU32 thresh; /* in */ + NvS32 fd; /* out */ + } id_and_thresh_to_fd; + + struct { + NvU32 id; /* in */ + NvU32 minval; /* out */ + } read_minval; + + struct { + NvU32 id; /* in */ + NvU32 maxval; /* out */ + } read_maxval; + + struct { + NvU32 id; /* in */ + } set_min_eq_max; + + struct { + NvU32 id; /* in */ + NvU32 val; /* in */ + } set_maxval; +} NvKmsSyncPtOpParams; + + +void nvkms_call_rm (void *ops); +void* nvkms_alloc (size_t size, + NvBool zero); +void nvkms_free (void *ptr, + size_t size); +void* nvkms_memset (void *ptr, + NvU8 c, + size_t size); +void* nvkms_memcpy (void *dest, + const void *src, + size_t n); +void* nvkms_memmove (void *dest, + const void *src, + size_t n); +int nvkms_memcmp (const void *s1, + const void *s2, + size_t n); +size_t nvkms_strlen (const char *s); +int nvkms_strcmp (const char *s1, + const char *s2); +char* nvkms_strncpy (char *dest, + const char *src, + size_t n); +void nvkms_usleep (NvU64 usec); +NvU64 nvkms_get_usec (void); +int nvkms_copyin (void *kptr, + NvU64 uaddr, + size_t n); +int nvkms_copyout (NvU64 uaddr, + const void *kptr, + size_t n); +void nvkms_yield (void); +void nvkms_dump_stack (void); +NvBool nvkms_syncpt_op (enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params); +int nvkms_snprintf (char *str, + size_t size, + const char *format, ...) + __attribute__((format (printf, 3, 4))); + +int nvkms_vsnprintf (char *str, + size_t size, + const char *format, + va_list ap); + +#define NVKMS_LOG_LEVEL_INFO 0 +#define NVKMS_LOG_LEVEL_WARN 1 +#define NVKMS_LOG_LEVEL_ERROR 2 + +void nvkms_log (const int level, + const char *gpuPrefix, + const char *msg); + +/*! + * Refcounted pointer to an object that may be freed while references still + * exist. + * + * This structure is intended to be used for nvkms timers to refer to objects + * that may be freed while timers with references to the object are still + * pending. + * + * When the owner of an nvkms_ref_ptr is freed, the teardown code should call + * nvkms_free_ref_ptr(). That marks the pointer as invalid so that later calls + * to nvkms_dec_ref() (i.e. from a workqueue callback) return NULL rather than + * the pointer originally passed to nvkms_alloc_ref_ptr(). + */ +struct nvkms_ref_ptr; + +/*! + * Allocate and initialize a ref_ptr. + * + * The pointer stored in the ref_ptr is initialized to ptr, and its refcount is + * initialized to 1. + */ +struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr); + +/*! + * Clear a ref_ptr. + * + * This function sets the pointer stored in the ref_ptr to NULL and drops the + * reference created by nvkms_alloc_ref_ptr(). This function should be called + * when the object pointed to by the ref_ptr is freed. + * + * A caller should make sure that no code that can call nvkms_inc_ref() can + * execute after nvkms_free_ref_ptr() is called. + */ +void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Increment the refcount of a ref_ptr. + * + * This function should be used when a pointer to the ref_ptr is stored + * somewhere. For example, when the ref_ptr is used as the argument to + * nvkms_alloc_timer. + * + * This may be called outside of the nvkms_lock, for example by an RM callback. + */ +void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Decrement the refcount of a ref_ptr and extract the embedded pointer. + * + * This should be used by code that needs to atomically determine whether the + * object pointed to by the ref_ptr still exists. To prevent the object from + * being destroyed while the current thread is executing, this should be called + * from inside the nvkms_lock. + */ +void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr); + +typedef void nvkms_timer_proc_t(void *dataPtr, NvU32 dataU32); +typedef struct nvkms_timer_t nvkms_timer_handle_t; + +/*! + * Schedule a callback function to be called in the future. + * + * The callback function 'proc' will be called with the arguments + * 'dataPtr' and 'dataU32' at 'usec' (or later) microseconds from now. + * If usec==0, the callback will be scheduled to be called as soon as + * possible. + * + * The callback function is guaranteed to be called back with the + * nvkms_lock held, and in process context. + * + * Returns an opaque handle, nvkms_timer_handle_t*, or NULL on + * failure. If non-NULL, the caller is responsible for caching the + * handle and eventually calling nvkms_free_timer() to free the + * memory. + * + * The nvkms_lock may be held when nvkms_alloc_timer() is called, but + * the nvkms_lock is not required. + */ +nvkms_timer_handle_t* nvkms_alloc_timer (nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, + NvU64 usec); + +/*! + * Schedule a callback function to be called in the future. + * + * This function is like nvkms_alloc_timer() except that instead of returning a + * pointer to a structure that the caller should free later, the timer will free + * itself after executing the callback function. This is only intended for + * cases where the caller cannot cache the nvkms_alloc_timer() return value. + */ +NvBool +nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc, + struct nvkms_ref_ptr *ref_ptr, + NvU32 dataU32, NvU64 usec); + +/*! + * Free the nvkms_timer_t object. If the callback function has not + * yet been called, freeing the nvkms_timer_handle_t will guarantee + * that it is not called. + * + * The nvkms_lock must be held when calling nvkms_free_timer(). + */ +void nvkms_free_timer (nvkms_timer_handle_t *handle); + + + +/*! + * Notify the NVKMS kernel interface that the event queue has changed. + * + * \param[in] pOpenKernel This indicates the file descriptor + * ("per-open") of the client whose event queue + * has been updated. This is the pointer + * passed by the kernel interface to nvKmsOpen(). + * \param[in] eventsAvailable If TRUE, a new event has been added to the + * event queue. If FALSE, the last event has + * been removed from the event queue. + */ +void +nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel, + NvBool eventsAvailable); + + +/*! + * Get the "per-open" data (the pointer returned by nvKmsOpen()) + * associated with this fd. + */ +void* nvkms_get_per_open_data(int fd); + + +/*! + * Raise and lower the reference count of the specified GPU. + */ +NvBool nvkms_open_gpu(NvU32 gpuId); +void nvkms_close_gpu(NvU32 gpuId); + + +/*! + * Enumerate nvidia gpus. + */ + +NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info); + +/*! + * Availability of write combining support for video memory. + */ + +NvBool nvkms_allow_write_combining(void); + +/*! + * Checks whether the fd is associated with an nvidia character device. + */ +NvBool nvkms_fd_is_nvidia_chardev(int fd); + +/*! + * NVKMS interface for kernel space NVKMS clients like KAPI + */ + +struct nvkms_per_open; + +struct nvkms_per_open* nvkms_open_from_kapi +( + struct NvKmsKapiDevice *device +); + +void nvkms_close_from_kapi(struct nvkms_per_open *popen); + +NvBool nvkms_ioctl_from_kapi +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t params_size +); + +/*! + * APIs for locking. + */ + +typedef struct nvkms_sema_t nvkms_sema_handle_t; + +nvkms_sema_handle_t* + nvkms_sema_alloc (void); +void nvkms_sema_free (nvkms_sema_handle_t *sema); +void nvkms_sema_down (nvkms_sema_handle_t *sema); +void nvkms_sema_up (nvkms_sema_handle_t *sema); + +/*! + * APIs to register/unregister backlight device. + */ +struct nvkms_backlight_device; + +struct nvkms_backlight_device* +nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv, + NvU32 current_brightness); + +void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd); + +#endif /* _NVIDIA_MODESET_OS_INTERFACE_H_ */ + diff --git a/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild b/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild new file mode 100644 index 000000000..0475f26cf --- /dev/null +++ b/kernel-open/nvidia-modeset/nvidia-modeset.Kbuild @@ -0,0 +1,99 @@ +########################################################################### +# Kbuild fragment for nvidia-modeset.ko +########################################################################### + +# +# Define NVIDIA_MODESET_{SOURCES,OBJECTS} +# + +NVIDIA_MODESET_SOURCES = nvidia-modeset/nvidia-modeset-linux.c +NVIDIA_MODESET_SOURCES += nvidia-modeset/nv-kthread-q.c + +NVIDIA_MODESET_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_MODESET_SOURCES)) + +obj-m += nvidia-modeset.o +nvidia-modeset-y := $(NVIDIA_MODESET_OBJECTS) + +NVIDIA_MODESET_KO = nvidia-modeset/nvidia-modeset.ko + +NV_KERNEL_MODULE_TARGETS += $(NVIDIA_MODESET_KO) + + +# +# nv-modeset-kernel.o_binary is the core binary component of nvidia-modeset.ko, +# shared across all UNIX platforms. Create a symlink, "nv-modeset-kernel.o" +# that points to nv-modeset-kernel.o_binary, and add nv-modeset-kernel.o to the +# list of objects to link into nvidia-modeset.ko. +# +# Note that: +# - The kbuild "clean" rule will delete all objects in nvidia-modeset-y (which +# is why we use a symlink instead of just adding nv-modeset-kernel.o_binary +# to nvidia-modeset-y). +# - kbuild normally uses the naming convention of ".o_shipped" for +# binary files. That is not used here, because the kbuild rule to +# create the "normal" object file from ".o_shipped" does a copy, not +# a symlink. This file is quite large, so a symlink is preferred. +# - The file added to nvidia-modeset-y should be relative to gmake's cwd. +# But, the target for the symlink rule should be prepended with $(obj). +# + +NVIDIA_MODESET_BINARY_OBJECT := $(src)/nvidia-modeset/nv-modeset-kernel.o_binary +NVIDIA_MODESET_BINARY_OBJECT_O := nvidia-modeset/nv-modeset-kernel.o + +quiet_cmd_symlink = SYMLINK $@ +cmd_symlink = ln -sf $< $@ + +targets += $(NVIDIA_MODESET_BINARY_OBJECT_O) + +$(obj)/$(NVIDIA_MODESET_BINARY_OBJECT_O): $(NVIDIA_MODESET_BINARY_OBJECT) FORCE + $(call if_changed,symlink) + +nvidia-modeset-y += $(NVIDIA_MODESET_BINARY_OBJECT_O) + + +# +# Define nvidia-modeset.ko-specific CFLAGS. +# + +NVIDIA_MODESET_CFLAGS += -I$(src)/nvidia-modeset +NVIDIA_MODESET_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0 + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_MODESET_OBJECTS), $(NVIDIA_MODESET_CFLAGS)) + + +# +# Build nv-modeset-interface.o from the kernel interface layer +# objects, suitable for further processing by the installer and +# inclusion as a precompiled kernel interface file. +# + +NVIDIA_MODESET_INTERFACE := nvidia-modeset/nv-modeset-interface.o + +# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions +# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6 +# look at both. + +always += $(NVIDIA_MODESET_INTERFACE) +always-y += $(NVIDIA_MODESET_INTERFACE) + +$(obj)/$(NVIDIA_MODESET_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_MODESET_OBJECTS)) + $(LD) -r -o $@ $^ + +# +# Register the conftests needed by nvidia-modeset.ko +# + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_MODESET_OBJECTS) + +NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations +NV_CONFTEST_TYPE_COMPILE_TESTS += node_states_n_memory +NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64 +NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data +NV_CONFTEST_FUNCTION_COMPILE_TESTS += proc_remove +NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup +NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_create_on_node +NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64 +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_kthread_create_on_node diff --git a/kernel-open/nvidia-modeset/nvkms-ioctl.h b/kernel-open/nvidia-modeset/nvkms-ioctl.h new file mode 100644 index 000000000..cb2757313 --- /dev/null +++ b/kernel-open/nvidia-modeset/nvkms-ioctl.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_IOCTL_H) +#define NVKMS_IOCTL_H + +#include "nvtypes.h" + +/*! + * Some of the NVKMS ioctl parameter data structures are quite large + * and would exceed the parameter size constraints on at least SunOS. + * + * Redirect ioctls through a level of indirection: user-space assigns + * NvKmsIoctlParams with the real command, size, and pointer, and + * passes the NvKmsIoctlParams through the ioctl. + */ + +struct NvKmsIoctlParams { + NvU32 cmd; + NvU32 size; + NvU64 address NV_ALIGN_BYTES(8); +}; + +#define NVKMS_IOCTL_MAGIC 'm' +#define NVKMS_IOCTL_CMD 0 + +#define NVKMS_IOCTL_IOWR \ + _IOWR(NVKMS_IOCTL_MAGIC, NVKMS_IOCTL_CMD, struct NvKmsIoctlParams) + +/*! + * User-space pointers are always passed to NVKMS in an NvU64. + * This user-space address is eventually passed into the platform's + * copyin/copyout functions, in a void* argument. + * + * This utility function converts from an NvU64 to a pointer. + */ + +static inline void *nvKmsNvU64ToPointer(NvU64 value) +{ + return (void *)(NvUPtr)value; +} + +/*! + * Before casting the NvU64 to a void*, check that casting to a pointer + * size within the kernel does not lose any precision in the current + * environment. + */ +static inline NvBool nvKmsNvU64AddressIsSafe(NvU64 address) +{ + return address == (NvU64)(NvUPtr)address; +} + +#endif /* NVKMS_IOCTL_H */ diff --git a/kernel-open/nvidia-modeset/nvkms.h b/kernel-open/nvidia-modeset/nvkms.h new file mode 100644 index 000000000..1276186ce --- /dev/null +++ b/kernel-open/nvidia-modeset/nvkms.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KMS_H__ +#define __NV_KMS_H__ + +#include "nvtypes.h" +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif + +#include "nvkms-kapi.h" + +typedef struct nvkms_per_open nvkms_per_open_handle_t; + +typedef void nvkms_procfs_out_string_func_t(void *data, + const char *str); + +typedef void nvkms_procfs_proc_t(void *data, + char *buffer, size_t size, + nvkms_procfs_out_string_func_t *outString); + +typedef struct { + const char *name; + nvkms_procfs_proc_t *func; +} nvkms_procfs_file_t; + +enum NvKmsClientType { + NVKMS_CLIENT_USER_SPACE, + NVKMS_CLIENT_KERNEL_SPACE, +}; + +NvBool nvKmsIoctl( + void *pOpenVoid, + NvU32 cmd, + NvU64 paramsAddress, + const size_t paramSize); + +void nvKmsClose(void *pOpenVoid); + +void* nvKmsOpen( + NvU32 pid, + enum NvKmsClientType clientType, + nvkms_per_open_handle_t *pOpenKernel); + +NvBool nvKmsModuleLoad(void); + +void nvKmsModuleUnload(void); + +void nvKmsSuspend(NvU32 gpuId); +void nvKmsResume(NvU32 gpuId); + +void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles); + +void nvKmsKapiHandleEventQueueChange +( + struct NvKmsKapiDevice *device +); + +NvBool nvKmsKapiGetFunctionsTableInternal +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness); +NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness); + +#endif /* __NV_KMS_H__ */ diff --git a/kernel-open/nvidia-peermem/nv-p2p.h b/kernel-open/nvidia-peermem/nv-p2p.h new file mode 100644 index 000000000..c2059145d --- /dev/null +++ b/kernel-open/nvidia-peermem/nv-p2p.h @@ -0,0 +1,427 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_P2P_H_ +#define _NV_P2P_H_ + +/* + * NVIDIA P2P Structure Versioning + * + * For the nvidia_p2p_*_t structures allocated by the NVIDIA driver, it will + * set the version field of the structure according to the definition used by + * the NVIDIA driver. The "major" field of the version is defined as the upper + * 16 bits, and the "minor" field of the version is defined as the lower 16 + * bits. The version field will always be the first 4 bytes of the structure, + * and third-party drivers should check the value of this field in structures + * allocated by the NVIDIA driver to ensure runtime compatibility. + * + * In general, version numbers will be incremented as follows: + * - When a backwards-compatible change is made to the structure layout, the + * minor version for that structure will be incremented. Third-party drivers + * built against an older minor version will continue to work with the newer + * minor version used by the NVIDIA driver, without recompilation. + * - When a breaking change is made to the structure layout, the major version + * will be incremented. Third-party drivers built against an older major + * version require at least recompilation and potentially additional updates + * to use the new API. + */ +#define NVIDIA_P2P_MAJOR_VERSION_MASK 0xffff0000 +#define NVIDIA_P2P_MINOR_VERSION_MASK 0x0000ffff + +#define NVIDIA_P2P_MAJOR_VERSION(v) \ + (((v) & NVIDIA_P2P_MAJOR_VERSION_MASK) >> 16) + +#define NVIDIA_P2P_MINOR_VERSION(v) \ + (((v) & NVIDIA_P2P_MINOR_VERSION_MASK)) + +#define NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) \ + (NVIDIA_P2P_MAJOR_VERSION((p)->version) == NVIDIA_P2P_MAJOR_VERSION(v)) + +#define NVIDIA_P2P_VERSION_COMPATIBLE(p, v) \ + (NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) && \ + (NVIDIA_P2P_MINOR_VERSION((p)->version) >= (NVIDIA_P2P_MINOR_VERSION(v)))) + +enum { + NVIDIA_P2P_ARCHITECTURE_TESLA = 0, + NVIDIA_P2P_ARCHITECTURE_FERMI, + NVIDIA_P2P_ARCHITECTURE_CURRENT = NVIDIA_P2P_ARCHITECTURE_FERMI +}; + +#define NVIDIA_P2P_PARAMS_VERSION 0x00010001 + +enum { + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_GPU = 0, + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE, + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX = \ + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE +}; + +#define NVIDIA_P2P_GPU_UUID_LEN 16 + +typedef +struct nvidia_p2p_params { + uint32_t version; + uint32_t architecture; + union nvidia_p2p_mailbox_addresses { + struct { + uint64_t wmb_addr; + uint64_t wmb_data; + uint64_t rreq_addr; + uint64_t rcomp_addr; + uint64_t reserved[2]; + } fermi; + } addresses[NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX+1]; +} nvidia_p2p_params_t; + +/* + * Capability flag for users to detect + * driver support for persistent pages. + */ +extern int nvidia_p2p_cap_persistent_pages; +#define NVIDIA_P2P_CAP_PERSISTENT_PAGES + +/* + * This API is not supported. + */ +int nvidia_p2p_init_mapping(uint64_t p2p_token, + struct nvidia_p2p_params *params, + void (*destroy_callback)(void *data), + void *data); + +/* + * This API is not supported. + */ +int nvidia_p2p_destroy_mapping(uint64_t p2p_token); + +enum nvidia_p2p_page_size_type { + NVIDIA_P2P_PAGE_SIZE_4KB = 0, + NVIDIA_P2P_PAGE_SIZE_64KB, + NVIDIA_P2P_PAGE_SIZE_128KB, + NVIDIA_P2P_PAGE_SIZE_COUNT +}; + +typedef +struct nvidia_p2p_page { + uint64_t physical_address; + union nvidia_p2p_request_registers { + struct { + uint32_t wreqmb_h; + uint32_t rreqmb_h; + uint32_t rreqmb_0; + uint32_t reserved[3]; + } fermi; + } registers; +} nvidia_p2p_page_t; + +#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00010002 + +#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION) + +typedef +struct nvidia_p2p_page_table { + uint32_t version; + uint32_t page_size; /* enum nvidia_p2p_page_size_type */ + struct nvidia_p2p_page **pages; + uint32_t entries; + uint8_t *gpu_uuid; +} nvidia_p2p_page_table_t; + +/* + * @brief + * Make the pages underlying a range of GPU virtual memory + * accessible to a third-party device. + * + * This API only supports pinned, GPU-resident memory, such as that provided + * by cudaMalloc(). + * + * This API may sleep. + * + * @param[in] p2p_token + * A token that uniquely identifies the P2P mapping. + * @param[in] va_space + * A GPU virtual address space qualifier. + * @param[in] virtual_address + * The start address in the specified virtual address space. + * Address must be aligned to the 64KB boundary. + * @param[in] length + * The length of the requested P2P mapping. + * Length must be a multiple of 64KB. + * @param[out] page_table + * A pointer to an array of structures with P2P PTEs. + * @param[in] free_callback + * A pointer to the function to be invoked when the pages + * underlying the virtual address range are freed + * implicitly. + * If NULL, persistent pages will be returned. + * This means the pages underlying the range of GPU virtual memory + * will persist until explicitly freed by nvidia_p2p_put_pages(). + * Persistent GPU memory mappings are not supported on PowerPC, + + + + * MIG-enabled devices and vGPU. + + * @param[in] data + * A non-NULL opaque pointer to private data to be passed to the + * callback function. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_get_pages(uint64_t p2p_token, uint32_t va_space, + uint64_t virtual_address, + uint64_t length, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void *data), + void *data); + +#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003 + +#define NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_DMA_MAPPING_VERSION) + +struct pci_dev; + +typedef +struct nvidia_p2p_dma_mapping { + uint32_t version; + enum nvidia_p2p_page_size_type page_size_type; + uint32_t entries; + uint64_t *dma_addresses; + void *private; + struct pci_dev *pci_dev; +} nvidia_p2p_dma_mapping_t; + +/* + * @brief + * Make the physical pages retrieved using nvidia_p2p_get_pages accessible to + * a third-party device. + * + * @param[in] peer + * The struct pci_dev * of the peer device that needs to DMA to/from the + * mapping. + * @param[in] page_table + * The page table outlining the physical pages underlying the mapping, as + * retrieved with nvidia_p2p_get_pages(). + * @param[out] dma_mapping + * The DMA mapping containing the DMA addresses to use on the third-party + * device. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_dma_map_pages(struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping **dma_mapping); + +/* + * @brief + * Unmap the physical pages previously mapped to the third-party device by + * nvidia_p2p_dma_map_pages(). + * + * @param[in] peer + * The struct pci_dev * of the peer device that the DMA mapping belongs to. + * @param[in] page_table + * The page table backing the DMA mapping to be unmapped. + * @param[in] dma_mapping + * The DMA mapping containing the DMA addresses used by the third-party + * device, as retrieved with nvidia_p2p_dma_map_pages(). After this call + * returns, neither this struct nor the addresses contained within will be + * valid for use by the third-party device. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping *dma_mapping); + +/* + * @brief + * Release a set of pages previously made accessible to + * a third-party device. + * + * @param[in] p2p_token + * A token that uniquely identifies the P2P mapping. + * @param[in] va_space + * A GPU virtual address space qualifier. + * @param[in] virtual_address + * The start address in the specified virtual address space. + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_put_pages(uint64_t p2p_token, uint32_t va_space, + uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Free a third-party P2P page table. (This function is a no-op.) + * + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + */ +int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Free a third-party P2P DMA mapping. (This function is a no-op.) + * + * @param[in] dma_mapping + * A pointer to the DMA mapping structure. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + */ +int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping); + +#define NVIDIA_P2P_RSYNC_DRIVER_VERSION 0x00010001 + +#define NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_DRIVER_VERSION) + +typedef +struct nvidia_p2p_rsync_driver { + uint32_t version; + int (*get_relaxed_ordering_mode)(int *mode, void *data); + void (*put_relaxed_ordering_mode)(int mode, void *data); + void (*wait_for_rsync)(struct pci_dev *gpu, void *data); +} nvidia_p2p_rsync_driver_t; + +/* + * @brief + * Registers the rsync driver. + * + * @param[in] driver + * A pointer to the rsync driver structure. The NVIDIA driver would use, + * + * get_relaxed_ordering_mode to obtain a reference to the current relaxed + * ordering mode (treated as a boolean) from the rsync driver. + * + * put_relaxed_ordering_mode to release a reference to the current relaxed + * ordering mode back to the rsync driver. The NVIDIA driver will call this + * function once for each successful call to get_relaxed_ordering_mode, and + * the relaxed ordering mode must not change until the last reference is + * released. + * + * wait_for_rsync to call into the rsync module to issue RSYNC. This callback + * can't sleep or re-schedule as it may arrive under spinlocks. + * @param[in] data + * A pointer to the rsync driver's private data. + * + * @Returns + * 0 upon successful completion. + * -EINVAL parameters are incorrect. + * -EBUSY if a module is already registered or GPU devices are in use. + */ +int nvidia_p2p_register_rsync_driver(nvidia_p2p_rsync_driver_t *driver, + void *data); + +/* + * @brief + * Unregisters the rsync driver. + * + * @param[in] driver + * A pointer to the rsync driver structure. + * @param[in] data + * A pointer to the rsync driver's private data. + */ +void nvidia_p2p_unregister_rsync_driver(nvidia_p2p_rsync_driver_t *driver, + void *data); + +#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION 0x00020001 + +#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_REG_INFO_VERSION) + +typedef struct nvidia_p2p_rsync_reg { + void *ptr; + size_t size; + struct pci_dev *ibmnpu; + struct pci_dev *gpu; + uint32_t cluster_id; + uint32_t socket_id; +} nvidia_p2p_rsync_reg_t; + +typedef struct nvidia_p2p_rsync_reg_info { + uint32_t version; + nvidia_p2p_rsync_reg_t *regs; + size_t entries; +} nvidia_p2p_rsync_reg_info_t; + +/* + * @brief + * Gets rsync (GEN-ID) register information associated with the supported + * NPUs. + * + * The caller would use the returned information {GPU device, NPU device, + * socket-id, cluster-id} to pick the optimal generation registers to issue + * RSYNC (NVLink HW flush). + * + * The interface allocates structures to return the information, hence + * nvidia_p2p_put_rsync_registers() must be called to free the structures. + * + * Note, cluster-id is hardcoded to zero as early system configurations would + * only support cluster mode i.e. all devices would share the same cluster-id + * (0). In the future, appropriate kernel support would be needed to query + * cluster-ids. + * + * @param[out] reg_info + * A pointer to the rsync reg info structure. + * + * @Returns + * 0 Upon successful completion. Otherwise, returns negative value. + */ +int nvidia_p2p_get_rsync_registers(nvidia_p2p_rsync_reg_info_t **reg_info); + +/* + * @brief + * Frees the structures allocated by nvidia_p2p_get_rsync_registers(). + * + * @param[in] reg_info + * A pointer to the rsync reg info structure. + */ +void nvidia_p2p_put_rsync_registers(nvidia_p2p_rsync_reg_info_t *reg_info); + +#endif /* _NV_P2P_H_ */ diff --git a/kernel-open/nvidia-peermem/nvidia-peermem.Kbuild b/kernel-open/nvidia-peermem/nvidia-peermem.Kbuild new file mode 100644 index 000000000..3204180a7 --- /dev/null +++ b/kernel-open/nvidia-peermem/nvidia-peermem.Kbuild @@ -0,0 +1,61 @@ +########################################################################### +# Kbuild fragment for nvidia-peermem.ko +########################################################################### + +# +# Define NVIDIA_PEERMEM_{SOURCES,OBJECTS} +# + +NVIDIA_PEERMEM_SOURCES = +NVIDIA_PEERMEM_SOURCES += nvidia-peermem/nvidia-peermem.c + +NVIDIA_PEERMEM_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_PEERMEM_SOURCES)) + +obj-m += nvidia-peermem.o +nvidia-peermem-y := $(NVIDIA_PEERMEM_OBJECTS) + +NVIDIA_PEERMEM_KO = nvidia-peermem/nvidia-peermem.ko + +NV_KERNEL_MODULE_TARGETS += $(NVIDIA_PEERMEM_KO) + +# +# Define nvidia-peermem.ko-specific CFLAGS. +# +NVIDIA_PEERMEM_CFLAGS += -I$(src)/nvidia-peermem +NVIDIA_PEERMEM_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_BUILD_MODULE_INSTANCES=0 + +# +# In case of MOFED installation, nvidia-peermem compilation +# needs paths to the MOFED headers in CFLAGS. +# MOFED's Module.symvers is needed for the build +# to find the additional ib_* symbols. +# +OFA_DIR := /usr/src/ofa_kernel +OFA_CANDIDATES = $(OFA_DIR)/$(ARCH)/$(KERNELRELEASE) $(OFA_DIR)/$(KERNELRELEASE) $(OFA_DIR)/default /var/lib/dkms/mlnx-ofed-kernel +MLNX_OFED_KERNEL := $(shell for d in $(OFA_CANDIDATES); do \ + if [ -d "$$d" ]; then \ + echo "$$d"; \ + exit 0; \ + fi; \ + done; \ + echo $(OFA_DIR) \ + ) + +ifneq ($(shell test -d $(MLNX_OFED_KERNEL) && echo "true" || echo "" ),) + NVIDIA_PEERMEM_CFLAGS += -I$(MLNX_OFED_KERNEL)/include -I$(MLNX_OFED_KERNEL)/include/rdma + KBUILD_EXTRA_SYMBOLS := $(MLNX_OFED_KERNEL)/Module.symvers +endif + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_PEERMEM_OBJECTS), $(NVIDIA_PEERMEM_CFLAGS)) + +# +# Register the conftests needed by nvidia-peermem.ko +# + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_PEERMEM_OBJECTS) + +NV_CONFTEST_GENERIC_COMPILE_TESTS += ib_peer_memory_symbols + +NV_CONFTEST_FUNCTION_COMPILE_TESTS += + +NV_CONFTEST_TYPE_COMPILE_TESTS += diff --git a/kernel-open/nvidia-peermem/nvidia-peermem.c b/kernel-open/nvidia-peermem/nvidia-peermem.c new file mode 100644 index 000000000..699d1cf17 --- /dev/null +++ b/kernel-open/nvidia-peermem/nvidia-peermem.c @@ -0,0 +1,523 @@ +/* SPDX-License-Identifier: Linux-OpenIB */ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nv-p2p.h" +#include "peer_mem.h" +#include "conftest.h" + +#define DRV_NAME "nv_mem" +#define DRV_VERSION NV_VERSION_STRING + +MODULE_AUTHOR("Yishai Hadas"); +MODULE_DESCRIPTION("NVIDIA GPU memory plug-in"); +MODULE_LICENSE("Linux-OpenIB"); +MODULE_VERSION(DRV_VERSION); +enum { + NV_MEM_PEERDIRECT_SUPPORT_DEFAULT = 0, + NV_MEM_PEERDIRECT_SUPPORT_LEGACY = 1, +}; +static int peerdirect_support = NV_MEM_PEERDIRECT_SUPPORT_DEFAULT; +module_param(peerdirect_support, int, S_IRUGO); +MODULE_PARM_DESC(peerdirect_support, "Set level of support for Peer-direct, 0 [default] or 1 [legacy, for example MLNX_OFED 4.9 LTS]"); + +#define peer_err(FMT, ARGS...) printk(KERN_ERR "nvidia-peermem" " %s:%d " FMT, __FUNCTION__, __LINE__, ## ARGS) + +#if defined(NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT) + +#ifndef READ_ONCE +#define READ_ONCE(x) ACCESS_ONCE(x) +#endif + +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif + +#define GPU_PAGE_SHIFT 16 +#define GPU_PAGE_SIZE ((u64)1 << GPU_PAGE_SHIFT) +#define GPU_PAGE_OFFSET (GPU_PAGE_SIZE-1) +#define GPU_PAGE_MASK (~GPU_PAGE_OFFSET) + +invalidate_peer_memory mem_invalidate_callback; +static void *reg_handle = NULL; +static void *reg_handle_nc = NULL; + +struct nv_mem_context { + struct nvidia_p2p_page_table *page_table; + struct nvidia_p2p_dma_mapping *dma_mapping; + u64 core_context; + u64 page_virt_start; + u64 page_virt_end; + size_t mapped_size; + unsigned long npages; + unsigned long page_size; + struct task_struct *callback_task; + int sg_allocated; + struct sg_table sg_head; +}; + + +static void nv_get_p2p_free_callback(void *data) +{ + int ret = 0; + struct nv_mem_context *nv_mem_context = (struct nv_mem_context *)data; + struct nvidia_p2p_page_table *page_table = NULL; + struct nvidia_p2p_dma_mapping *dma_mapping = NULL; + + __module_get(THIS_MODULE); + if (!nv_mem_context) { + peer_err("nv_get_p2p_free_callback -- invalid nv_mem_context\n"); + goto out; + } + + if (!nv_mem_context->page_table) { + peer_err("nv_get_p2p_free_callback -- invalid page_table\n"); + goto out; + } + + /* Save page_table locally to prevent it being freed as part of nv_mem_release + * in case it's called internally by that callback. + */ + page_table = nv_mem_context->page_table; + + if (!nv_mem_context->dma_mapping) { + peer_err("nv_get_p2p_free_callback -- invalid dma_mapping\n"); + goto out; + } + dma_mapping = nv_mem_context->dma_mapping; + + /* For now don't set nv_mem_context->page_table to NULL, + * confirmed by NVIDIA that inflight put_pages with valid pointer will fail gracefully. + */ + + nv_mem_context->callback_task = current; + (*mem_invalidate_callback) (reg_handle, nv_mem_context->core_context); + nv_mem_context->callback_task = NULL; + + ret = nvidia_p2p_free_dma_mapping(dma_mapping); + if (ret) + peer_err("nv_get_p2p_free_callback -- error %d while calling nvidia_p2p_free_dma_mapping()\n", ret); + + ret = nvidia_p2p_free_page_table(page_table); + if (ret) + peer_err("nv_get_p2p_free_callback -- error %d while calling nvidia_p2p_free_page_table()\n", ret); + +out: + module_put(THIS_MODULE); + return; + +} + +/* At that function we don't call IB core - no ticket exists */ +static void nv_mem_dummy_callback(void *data) +{ + struct nv_mem_context *nv_mem_context = (struct nv_mem_context *)data; + int ret = 0; + + __module_get(THIS_MODULE); + + ret = nvidia_p2p_free_page_table(nv_mem_context->page_table); + if (ret) + peer_err("nv_mem_dummy_callback -- error %d while calling nvidia_p2p_free_page_table()\n", ret); + + module_put(THIS_MODULE); + return; +} + +/* acquire return code: 1 mine, 0 - not mine */ +static int nv_mem_acquire(unsigned long addr, size_t size, void *peer_mem_private_data, + char *peer_mem_name, void **client_context) +{ + + int ret = 0; + struct nv_mem_context *nv_mem_context; + + nv_mem_context = kzalloc(sizeof *nv_mem_context, GFP_KERNEL); + if (!nv_mem_context) + /* Error case handled as not mine */ + return 0; + + nv_mem_context->page_virt_start = addr & GPU_PAGE_MASK; + nv_mem_context->page_virt_end = (addr + size + GPU_PAGE_SIZE - 1) & GPU_PAGE_MASK; + nv_mem_context->mapped_size = nv_mem_context->page_virt_end - nv_mem_context->page_virt_start; + + ret = nvidia_p2p_get_pages(0, 0, nv_mem_context->page_virt_start, nv_mem_context->mapped_size, + &nv_mem_context->page_table, nv_mem_dummy_callback, nv_mem_context); + + if (ret < 0) + goto err; + + ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start, + nv_mem_context->page_table); + if (ret < 0) { + /* Not expected, however in case callback was called on that buffer just before + put pages we'll expect to fail gracefully (confirmed by NVIDIA) and return an error. + */ + peer_err("nv_mem_acquire -- error %d while calling nvidia_p2p_put_pages()\n", ret); + goto err; + } + + /* 1 means mine */ + *client_context = nv_mem_context; + __module_get(THIS_MODULE); + return 1; + +err: + kfree(nv_mem_context); + + /* Error case handled as not mine */ + return 0; +} + +static int nv_dma_map(struct sg_table *sg_head, void *context, + struct device *dma_device, int dmasync, + int *nmap) +{ + int i, ret; + struct scatterlist *sg; + struct nv_mem_context *nv_mem_context = + (struct nv_mem_context *) context; + struct nvidia_p2p_page_table *page_table = nv_mem_context->page_table; + struct nvidia_p2p_dma_mapping *dma_mapping; + struct pci_dev *pdev = to_pci_dev(dma_device); + + if (page_table->page_size != NVIDIA_P2P_PAGE_SIZE_64KB) { + peer_err("nv_dma_map -- assumption of 64KB pages failed size_id=%u\n", + nv_mem_context->page_table->page_size); + return -EINVAL; + } + + if (!pdev) { + peer_err("nv_dma_map -- invalid pci_dev\n"); + return -EINVAL; + } + + ret = nvidia_p2p_dma_map_pages(pdev, page_table, &dma_mapping); + if (ret) { + peer_err("nv_dma_map -- error %d while calling nvidia_p2p_dma_map_pages()\n", ret); + return ret; + } + + if (!NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(dma_mapping)) { + peer_err("error, incompatible dma mapping version 0x%08x\n", + dma_mapping->version); + nvidia_p2p_dma_unmap_pages(pdev, page_table, dma_mapping); + return -EINVAL; + } + + nv_mem_context->npages = dma_mapping->entries; + + ret = sg_alloc_table(sg_head, dma_mapping->entries, GFP_KERNEL); + if (ret) { + nvidia_p2p_dma_unmap_pages(pdev, page_table, dma_mapping); + return ret; + } + + nv_mem_context->dma_mapping = dma_mapping; + nv_mem_context->sg_allocated = 1; + for_each_sg(sg_head->sgl, sg, nv_mem_context->npages, i) { + sg_set_page(sg, NULL, nv_mem_context->page_size, 0); + sg->dma_address = dma_mapping->dma_addresses[i]; + sg->dma_length = nv_mem_context->page_size; + } + nv_mem_context->sg_head = *sg_head; + *nmap = nv_mem_context->npages; + + return 0; +} + +static int nv_dma_unmap(struct sg_table *sg_head, void *context, + struct device *dma_device) +{ + struct pci_dev *pdev = to_pci_dev(dma_device); + struct nv_mem_context *nv_mem_context = + (struct nv_mem_context *)context; + + if (!nv_mem_context) { + peer_err("nv_dma_unmap -- invalid nv_mem_context\n"); + return -EINVAL; + } + + if (WARN_ON(0 != memcmp(sg_head, &nv_mem_context->sg_head, sizeof(*sg_head)))) + return -EINVAL; + + if (nv_mem_context->callback_task == current) + goto out; + + if (nv_mem_context->dma_mapping) + nvidia_p2p_dma_unmap_pages(pdev, nv_mem_context->page_table, + nv_mem_context->dma_mapping); + +out: + return 0; +} + + +static void nv_mem_put_pages(struct sg_table *sg_head, void *context) +{ + int ret = 0; + struct nv_mem_context *nv_mem_context = + (struct nv_mem_context *) context; + + if (!nv_mem_context) { + peer_err("nv_mem_put_pages -- invalid nv_mem_context\n"); + return; + } + + if (WARN_ON(0 != memcmp(sg_head, &nv_mem_context->sg_head, sizeof(*sg_head)))) + return; + + if (nv_mem_context->callback_task == current) + return; + + ret = nvidia_p2p_put_pages(0, 0, nv_mem_context->page_virt_start, + nv_mem_context->page_table); + +#ifdef _DEBUG_ONLY_ + /* Here we expect an error in real life cases that should be ignored - not printed. + * (e.g. concurrent callback with that call) + */ + if (ret < 0) { + printk(KERN_ERR "error %d while calling nvidia_p2p_put_pages, page_table=%p \n", + ret, nv_mem_context->page_table); + } +#endif + + return; +} + +static void nv_mem_release(void *context) +{ + struct nv_mem_context *nv_mem_context = + (struct nv_mem_context *) context; + if (nv_mem_context->sg_allocated) { + sg_free_table(&nv_mem_context->sg_head); + nv_mem_context->sg_allocated = 0; + } + kfree(nv_mem_context); + module_put(THIS_MODULE); + return; +} + +static int nv_mem_get_pages(unsigned long addr, + size_t size, int write, int force, + struct sg_table *sg_head, + void *client_context, + u64 core_context) +{ + int ret; + struct nv_mem_context *nv_mem_context; + + nv_mem_context = (struct nv_mem_context *)client_context; + if (!nv_mem_context) + return -EINVAL; + + nv_mem_context->core_context = core_context; + nv_mem_context->page_size = GPU_PAGE_SIZE; + + ret = nvidia_p2p_get_pages(0, 0, nv_mem_context->page_virt_start, nv_mem_context->mapped_size, + &nv_mem_context->page_table, nv_get_p2p_free_callback, nv_mem_context); + if (ret < 0) { + peer_err("error %d while calling nvidia_p2p_get_pages()\n", ret); + return ret; + } + + /* No extra access to nv_mem_context->page_table here as we are + called not under a lock and may race with inflight invalidate callback on that buffer. + Extra handling was delayed to be done under nv_dma_map. + */ + return 0; +} + +static unsigned long nv_mem_get_page_size(void *context) +{ + struct nv_mem_context *nv_mem_context = + (struct nv_mem_context *)context; + + return nv_mem_context->page_size; +} + + +static struct peer_memory_client_ex nv_mem_client_ex = { .client = { + .acquire = nv_mem_acquire, + .get_pages = nv_mem_get_pages, + .dma_map = nv_dma_map, + .dma_unmap = nv_dma_unmap, + .put_pages = nv_mem_put_pages, + .get_page_size = nv_mem_get_page_size, + .release = nv_mem_release, +}}; + +static int nv_mem_get_pages_nc(unsigned long addr, + size_t size, int write, int force, + struct sg_table *sg_head, + void *client_context, + u64 core_context) +{ + int ret; + struct nv_mem_context *nv_mem_context; + + nv_mem_context = (struct nv_mem_context *)client_context; + if (!nv_mem_context) + return -EINVAL; + + nv_mem_context->core_context = core_context; + nv_mem_context->page_size = GPU_PAGE_SIZE; + + ret = nvidia_p2p_get_pages(0, 0, nv_mem_context->page_virt_start, nv_mem_context->mapped_size, + &nv_mem_context->page_table, NULL, NULL); + if (ret < 0) { + peer_err("error %d while calling nvidia_p2p_get_pages() with NULL callback\n", ret); + return ret; + } + + return 0; +} + +static struct peer_memory_client nv_mem_client_nc = { + .acquire = nv_mem_acquire, + .get_pages = nv_mem_get_pages_nc, + .dma_map = nv_dma_map, + .dma_unmap = nv_dma_unmap, + .put_pages = nv_mem_put_pages, + .get_page_size = nv_mem_get_page_size, + .release = nv_mem_release, +}; + +#endif /* NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT */ + +static int nv_mem_param_conf_check(void) +{ + int rc = 0; + switch (peerdirect_support) { + case NV_MEM_PEERDIRECT_SUPPORT_DEFAULT: + case NV_MEM_PEERDIRECT_SUPPORT_LEGACY: + break; + default: + peer_err("invalid peerdirect_support param value %d\n", peerdirect_support); + rc = -EINVAL; + break; + } + return rc; +} + +static int __init nv_mem_client_init(void) +{ + int rc; + rc = nv_mem_param_conf_check(); + if (rc) { + return rc; + } + +#if defined (NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT) + int status = 0; + + // off by one, to leave space for the trailing '1' which is flagging + // the new client type + BUG_ON(strlen(DRV_NAME) > IB_PEER_MEMORY_NAME_MAX-1); + strcpy(nv_mem_client_ex.client.name, DRV_NAME); + + // [VER_MAX-1]=1 <-- last byte is used as flag + // [VER_MAX-2]=0 <-- version string terminator + BUG_ON(strlen(DRV_VERSION) > IB_PEER_MEMORY_VER_MAX-2); + strcpy(nv_mem_client_ex.client.version, DRV_VERSION); + + nv_mem_client_ex.client.version[IB_PEER_MEMORY_VER_MAX-1] = 1; + + if (peerdirect_support != NV_MEM_PEERDIRECT_SUPPORT_LEGACY) { + nv_mem_client_ex.ex_size = sizeof(struct peer_memory_client_ex); + // PEER_MEM_INVALIDATE_UNMAPS allow clients to opt out of + // unmap/put_pages during invalidation, i.e. the client tells the + // infiniband layer that it does not need to call + // unmap/put_pages in the invalidation callback + nv_mem_client_ex.flags = PEER_MEM_INVALIDATE_UNMAPS; + } else { + nv_mem_client_ex.ex_size = 0; + nv_mem_client_ex.flags = 0; + } + + reg_handle = ib_register_peer_memory_client(&nv_mem_client_ex.client, + &mem_invalidate_callback); + if (!reg_handle) { + peer_err("nv_mem_client_init -- error while registering traditional client\n"); + status = -EINVAL; + goto out; + } + + // The nc client enables support for persistent pages. + // Thanks to this check, nvidia-peermem requires the new symbol from nvidia.ko, which + // prevents users to unintentionally load this module with unsupported nvidia.ko. + BUG_ON(!nvidia_p2p_cap_persistent_pages); + strcpy(nv_mem_client_nc.name, DRV_NAME "_nc"); + strcpy(nv_mem_client_nc.version, DRV_VERSION); + reg_handle_nc = ib_register_peer_memory_client(&nv_mem_client_nc, NULL); + if (!reg_handle_nc) { + peer_err("nv_mem_client_init -- error while registering nc client\n"); + status = -EINVAL; + goto out; + } + +out: + if (status) { + if (reg_handle) { + ib_unregister_peer_memory_client(reg_handle); + reg_handle = NULL; + } + + if (reg_handle_nc) { + ib_unregister_peer_memory_client(reg_handle_nc); + reg_handle_nc = NULL; + } + } + + return status; +#else + return -EINVAL; +#endif +} + +static void __exit nv_mem_client_cleanup(void) +{ +#if defined (NV_MLNX_IB_PEER_MEM_SYMBOLS_PRESENT) + if (reg_handle) + ib_unregister_peer_memory_client(reg_handle); + + if (reg_handle_nc) + ib_unregister_peer_memory_client(reg_handle_nc); +#endif +} + +module_init(nv_mem_client_init); +module_exit(nv_mem_client_cleanup); diff --git a/kernel-open/nvidia-peermem/peer_mem.h b/kernel-open/nvidia-peermem/peer_mem.h new file mode 100644 index 000000000..64156d476 --- /dev/null +++ b/kernel-open/nvidia-peermem/peer_mem.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: Linux-OpenIB */ +/* + * Copyright (c) 2014-2020, Mellanox Technologies. All rights reserved. + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef RDMA_PEER_MEM_H +#define RDMA_PEER_MEM_H + +#include + +#define IB_PEER_MEMORY_NAME_MAX 64 +#define IB_PEER_MEMORY_VER_MAX 16 + +/* + * Prior versions used a void * for core_context, at some point this was + * switched to use u64. Be careful if compiling this as 32 bit. To help the + * value of core_context is limited to u32 so it should work OK despite the + * type change. + */ +#define PEER_MEM_U64_CORE_CONTEXT + +struct device; + +/** + * struct peer_memory_client - registration information for user virtual + * memory handlers + * + * The peer_memory_client scheme allows a driver to register with the ib_umem + * system that it has the ability to understand user virtual address ranges + * that are not compatible with get_user_pages(). For instance VMAs created + * with io_remap_pfn_range(), or other driver special VMA. + * + * For ranges the interface understands it can provide a DMA mapped sg_table + * for use by the ib_umem, allowing user virtual ranges that cannot be + * supported by get_user_pages() to be used as umems. + */ +struct peer_memory_client { + char name[IB_PEER_MEMORY_NAME_MAX]; + char version[IB_PEER_MEMORY_VER_MAX]; + + /** + * acquire - Begin working with a user space virtual address range + * + * @addr - Virtual address to be checked whether belongs to peer. + * @size - Length of the virtual memory area starting at addr. + * @peer_mem_private_data - Obsolete, always NULL + * @peer_mem_name - Obsolete, always NULL + * @client_context - Returns an opaque value for this acquire use in + * other APIs + * + * Returns 1 if the peer_memory_client supports the entire virtual + * address range, 0 or -ERRNO otherwise. If 1 is returned then + * release() will be called to release the acquire(). + */ + int (*acquire)(unsigned long addr, size_t size, + void *peer_mem_private_data, char *peer_mem_name, + void **client_context); + /** + * get_pages - Fill in the first part of a sg_table for a virtual + * address range + * + * @addr - Virtual address to be checked whether belongs to peer. + * @size - Length of the virtual memory area starting at addr. + * @write - Always 1 + * @force - 1 if write is required + * @sg_head - Obsolete, always NULL + * @client_context - Value returned by acquire() + * @core_context - Value to be passed to invalidate_peer_memory for + * this get + * + * addr/size are passed as the raw virtual address range requested by + * the user, it is not aligned to any page size. get_pages() is always + * followed by dma_map(). + * + * Upon return the caller can call the invalidate_callback(). + * + * Returns 0 on success, -ERRNO on failure. After success put_pages() + * will be called to return the pages. + */ + int (*get_pages)(unsigned long addr, size_t size, int write, int force, + struct sg_table *sg_head, void *client_context, + u64 core_context); + /** + * dma_map - Create a DMA mapped sg_table + * + * @sg_head - The sg_table to allocate + * @client_context - Value returned by acquire() + * @dma_device - The device that will be doing DMA from these addresses + * @dmasync - Obsolete, always 0 + * @nmap - Returns the number of dma mapped entries in the sg_head + * + * Must be called after get_pages(). This must fill in the sg_head with + * DMA mapped SGLs for dma_device. Each SGL start and end must meet a + * minimum alignment of at least PAGE_SIZE, though individual sgls can + * be multiples of PAGE_SIZE, in any mixture. Since the user virtual + * address/size are not page aligned, the implementation must increase + * it to the logical alignment when building the SGLs. + * + * Returns 0 on success, -ERRNO on failure. After success dma_unmap() + * will be called to unmap the pages. On failure sg_head must be left + * untouched or point to a valid sg_table. + */ + int (*dma_map)(struct sg_table *sg_head, void *client_context, + struct device *dma_device, int dmasync, int *nmap); + /** + * dma_unmap - Unmap a DMA mapped sg_table + * + * @sg_head - The sg_table to unmap + * @client_context - Value returned by acquire() + * @dma_device - The device that will be doing DMA from these addresses + * + * sg_head will not be touched after this function returns. + * + * Must return 0. + */ + int (*dma_unmap)(struct sg_table *sg_head, void *client_context, + struct device *dma_device); + /** + * put_pages - Unpin a SGL + * + * @sg_head - The sg_table to unpin + * @client_context - Value returned by acquire() + * + * sg_head must be freed on return. + */ + void (*put_pages)(struct sg_table *sg_head, void *client_context); + /* Obsolete, not used */ + unsigned long (*get_page_size)(void *client_context); + /** + * release - Undo acquire + * + * @client_context - Value returned by acquire() + * + * If acquire() returns 1 then release() must be called. All + * get_pages() and dma_map()'s must be undone before calling this + * function. + */ + void (*release)(void *client_context); +}; + +enum { + PEER_MEM_INVALIDATE_UNMAPS = 1 << 0, +}; + +struct peer_memory_client_ex { + struct peer_memory_client client; + size_t ex_size; + u32 flags; +}; + +/* + * If invalidate_callback() is non-NULL then the client will only support + * umems which can be invalidated. The caller may call the + * invalidate_callback() after acquire() on return the range will no longer + * have DMA active, and release() will have been called. + * + * Note: The implementation locking must ensure that get_pages(), and + * dma_map() do not have locking dependencies with invalidate_callback(). The + * ib_core will wait until any concurrent get_pages() or dma_map() completes + * before returning. + * + * Similarly, this can call dma_unmap(), put_pages() and release() from within + * the callback, or will wait for another thread doing those operations to + * complete. + * + * For these reasons the user of invalidate_callback() must be careful with + * locking. + */ +typedef int (*invalidate_peer_memory)(void *reg_handle, u64 core_context); + +void * +ib_register_peer_memory_client(const struct peer_memory_client *peer_client, + invalidate_peer_memory *invalidate_callback); +void ib_unregister_peer_memory_client(void *reg_handle); + +#endif diff --git a/kernel-open/nvidia-uvm/cla06fsubch.h b/kernel-open/nvidia-uvm/cla06fsubch.h new file mode 100644 index 000000000..930a0d343 --- /dev/null +++ b/kernel-open/nvidia-uvm/cla06fsubch.h @@ -0,0 +1,29 @@ +/******************************************************************************* + Copyright (c) 2013 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __cla06fsubch_h__ +#define __cla06fsubch_h__ + +#define NVA06F_SUBCHANNEL_COPY_ENGINE 4 + +#endif // {__cla06fsubch_h__} diff --git a/kernel-open/nvidia-uvm/cla16f.h b/kernel-open/nvidia-uvm/cla16f.h new file mode 100644 index 000000000..356bfe67f --- /dev/null +++ b/kernel-open/nvidia-uvm/cla16f.h @@ -0,0 +1,155 @@ +/******************************************************************************* + Copyright (c) 2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _cla16f_h_ +#define _cla16f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define KEPLER_CHANNEL_GPFIFO_B (0x0000A16F) + +#define NVA16F_SET_OBJECT (0x00000000) +#define NVA16F_NOP (0x00000008) +#define NVA16F_NOP_HANDLE 31:0 +#define NVA16F_SEMAPHOREA (0x00000010) +#define NVA16F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVA16F_SEMAPHOREB (0x00000014) +#define NVA16F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVA16F_SEMAPHOREC (0x00000018) +#define NVA16F_SEMAPHOREC_PAYLOAD 31:0 +#define NVA16F_SEMAPHORED (0x0000001C) +#define NVA16F_SEMAPHORED_OPERATION 4:0 +#define NVA16F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVA16F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVA16F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVA16F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVA16F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVA16F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVA16F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVA16F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVA16F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVA16F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVA16F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVA16F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVA16F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVA16F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVA16F_SEMAPHORED_REDUCTION 30:27 +#define NVA16F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVA16F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVA16F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVA16F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVA16F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVA16F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVA16F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVA16F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVA16F_SEMAPHORED_FORMAT 31:31 +#define NVA16F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVA16F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVA16F_NON_STALL_INTERRUPT (0x00000020) +#define NVA16F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVA16F_FB_FLUSH (0x00000024) +#define NVA16F_FB_FLUSH_HANDLE 31:0 +#define NVA16F_MEM_OP_A (0x00000028) +#define NVA16F_MEM_OP_A_OPERAND_LOW 31:2 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_ADDR 29:2 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET 31:30 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET_VID_MEM 0x00000000 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT 0x00000002 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 +#define NVA16F_MEM_OP_B (0x0000002c) +#define NVA16F_MEM_OP_B_OPERAND_HIGH 7:0 +#define NVA16F_MEM_OP_B_OPERATION 31:27 +#define NVA16F_MEM_OP_B_OPERATION_SYSMEMBAR_FLUSH 0x00000005 +#define NVA16F_MEM_OP_B_OPERATION_SOFT_FLUSH 0x00000006 +#define NVA16F_MEM_OP_B_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVA16F_MEM_OP_B_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVA16F_MEM_OP_B_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +#define NVA16F_MEM_OP_B_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVA16F_MEM_OP_B_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB 0:0 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ALL 0x00000001 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC 1:1 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVA16F_SET_REFERENCE (0x00000050) +#define NVA16F_SET_REFERENCE_COUNT 31:0 +#define NVA16F_WFI (0x00000078) +#define NVA16F_WFI_HANDLE 31:0 + +/* GPFIFO entry format */ +#define NVA16F_GP_ENTRY__SIZE 8 +#define NVA16F_GP_ENTRY0_FETCH 0:0 +#define NVA16F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVA16F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVA16F_GP_ENTRY0_GET 31:2 +#define NVA16F_GP_ENTRY0_OPERAND 31:0 +#define NVA16F_GP_ENTRY1_GET_HI 7:0 +#define NVA16F_GP_ENTRY1_PRIV 8:8 +#define NVA16F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVA16F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVA16F_GP_ENTRY1_LEVEL 9:9 +#define NVA16F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVA16F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVA16F_GP_ENTRY1_LENGTH 30:10 + +/* dma method formats */ +#define NVA16F_DMA_METHOD_ADDRESS 11:0 +#define NVA16F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVA16F_DMA_METHOD_COUNT 28:16 +#define NVA16F_DMA_SEC_OP 31:29 +#define NVA16F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVA16F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) + +/* dma incrementing method format */ +#define NVA16F_DMA_INCR_ADDRESS 11:0 +#define NVA16F_DMA_INCR_SUBCHANNEL 15:13 +#define NVA16F_DMA_INCR_COUNT 28:16 +#define NVA16F_DMA_INCR_OPCODE 31:29 +#define NVA16F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVA16F_DMA_INCR_DATA 31:0 + +/* dma non-incrementing method format */ +#define NVA16F_DMA_NONINCR_ADDRESS 11:0 +#define NVA16F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVA16F_DMA_NONINCR_COUNT 28:16 +#define NVA16F_DMA_NONINCR_OPCODE 31:29 +#define NVA16F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVA16F_DMA_NONINCR_DATA 31:0 + +/* dma immediate-data format */ +#define NVA16F_DMA_IMMD_ADDRESS 11:0 +#define NVA16F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVA16F_DMA_IMMD_DATA 28:16 +#define NVA16F_DMA_IMMD_OPCODE 31:29 +#define NVA16F_DMA_IMMD_OPCODE_VALUE (0x00000004) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cla16F_h_ */ diff --git a/kernel-open/nvidia-uvm/clb069.h b/kernel-open/nvidia-uvm/clb069.h new file mode 100644 index 000000000..9e1f3a267 --- /dev/null +++ b/kernel-open/nvidia-uvm/clb069.h @@ -0,0 +1,62 @@ +/******************************************************************************* + Copyright (c) 2014 NVidia Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ +#ifndef _clb069_h_ +#define _clb069_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAXWELL_FAULT_BUFFER_A (0xb069) + +#define NVB069_FAULT_BUF_ENTRY 0x0000001f:0x00000000 +#define NVB069_FAULT_BUF_SIZE 32 +#define NVB069_FAULT_BUF_ENTRY_INST_APERTURE MW((9+0*32):(0*32+8)) +#define NVB069_FAULT_BUF_ENTRY_INST_APERTURE_VID_MEM 0x00000000 +#define NVB069_FAULT_BUF_ENTRY_INST_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVB069_FAULT_BUF_ENTRY_INST_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVB069_FAULT_BUF_ENTRY_INST_LO MW((31+0*32):(0*32+12)) +#define NVB069_FAULT_BUF_ENTRY_INST_HI MW((31+1*32):(1*32+0)) +#define NVB069_FAULT_BUF_ENTRY_INST MW((31+1*32):(0*32+12)) +#define NVB069_FAULT_BUF_ENTRY_ADDR_LO MW((31+2*32):(2*32+0)) +#define NVB069_FAULT_BUF_ENTRY_ADDR_HI MW((31+3*32):(3*32+0)) +#define NVB069_FAULT_BUF_ENTRY_ADDR MW((31+3*32):(2*32+0)) +#define NVB069_FAULT_BUF_ENTRY_TIMESTAMP_LO MW((31+4*32):(4*32+0)) +#define NVB069_FAULT_BUF_ENTRY_TIMESTAMP_HI MW((31+5*32):(5*32+0)) +#define NVB069_FAULT_BUF_ENTRY_TIMESTAMP MW((31+5*32):(4*32+0)) +#define NVB069_FAULT_BUF_ENTRY_RESERVED MW((31+6*32):(6*32+0)) +#define NVB069_FAULT_BUF_ENTRY_FAULT_TYPE MW((4+7*32):(7*32+0)) +#define NVB069_FAULT_BUF_ENTRY_CLIENT MW((14+7*32):(7*32+8)) +#define NVB069_FAULT_BUF_ENTRY_ACCESS_TYPE MW((18+7*32):(7*32+16)) +#define NVB069_FAULT_BUF_ENTRY_MMU_CLIENT_TYPE MW((20+7*32):(7*32+20)) +#define NVB069_FAULT_BUF_ENTRY_GPC_ID MW((28+7*32):(7*32+24)) +#define NVB069_FAULT_BUF_ENTRY_VALID MW((31+7*32):(7*32+31)) +#define NVB069_FAULT_BUF_ENTRY_VALID_FALSE 0x00000000 +#define NVB069_FAULT_BUF_ENTRY_VALID_TRUE 0x00000001 +#define NVB069_NOTIFIERS_REPLAYABLE_FAULT (0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clb069_h_ */ + diff --git a/kernel-open/nvidia-uvm/clb06f.h b/kernel-open/nvidia-uvm/clb06f.h new file mode 100644 index 000000000..67f521bb5 --- /dev/null +++ b/kernel-open/nvidia-uvm/clb06f.h @@ -0,0 +1,140 @@ +/******************************************************************************* + Copyright (c) 2014 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _clB06f_h_ +#define _clB06f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define MAXWELL_CHANNEL_GPFIFO_A (0x0000B06F) + +/* class MAXWELL_CHANNEL_GPFIFO */ +#define NVB06F_SET_OBJECT (0x00000000) +#define NVB06F_NOP (0x00000008) +#define NVB06F_NOP_HANDLE 31:0 +#define NVB06F_SEMAPHOREA (0x00000010) +#define NVB06F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVB06F_SEMAPHOREB (0x00000014) +#define NVB06F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVB06F_SEMAPHOREC (0x00000018) +#define NVB06F_SEMAPHOREC_PAYLOAD 31:0 +#define NVB06F_SEMAPHORED (0x0000001C) +#define NVB06F_SEMAPHORED_OPERATION 4:0 +#define NVB06F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVB06F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVB06F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVB06F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVB06F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVB06F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVB06F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVB06F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVB06F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 + +#define NVB06F_NON_STALL_INTERRUPT (0x00000020) +// NOTE - MEM_OP_A and MEM_OP_B have been removed for gm20x to make room for +// possible future MEM_OP features. MEM_OP_C/D have identical functionality +// to the previous MEM_OP_A/B methods. +#define NVB06F_MEM_OP_C (0x00000030) +#define NVB06F_MEM_OP_C_OPERAND_LOW 31:2 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET 11:10 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_VID_MEM 0x00000000 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT 0x00000002 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_ADDR_LO 31:12 +#define NVB06F_MEM_OP_D (0x00000034) +#define NVB06F_MEM_OP_D_OPERAND_HIGH 7:0 +#define NVB06F_MEM_OP_D_OPERATION 31:27 +#define NVB06F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVB06F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVB06F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVB06F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +#define NVB06F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVB06F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVB06F_MEM_OP_D_TLB_INVALIDATE_ADDR_HI 7:0 +#define NVB06F_WFI (0x00000078) + +/* GPFIFO entry format */ +#define NVB06F_GP_ENTRY__SIZE 8 +#define NVB06F_GP_ENTRY0_GET 31:2 +#define NVB06F_GP_ENTRY0_OPERAND 31:0 +#define NVB06F_GP_ENTRY1_GET_HI 7:0 +#define NVB06F_GP_ENTRY1_PRIV 8:8 +#define NVB06F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVB06F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVB06F_GP_ENTRY1_LEVEL 9:9 +#define NVB06F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVB06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVB06F_GP_ENTRY1_LENGTH 30:10 + +/* dma method formats */ +#define NVB06F_DMA_SEC_OP 31:29 +#define NVB06F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVB06F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +/* dma incrementing method format */ +#define NVB06F_DMA_INCR_ADDRESS 11:0 +#define NVB06F_DMA_INCR_SUBCHANNEL 15:13 +#define NVB06F_DMA_INCR_COUNT 28:16 +#define NVB06F_DMA_INCR_OPCODE 31:29 +#define NVB06F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVB06F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVB06F_DMA_NONINCR_ADDRESS 11:0 +#define NVB06F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVB06F_DMA_NONINCR_COUNT 28:16 +#define NVB06F_DMA_NONINCR_OPCODE 31:29 +#define NVB06F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVB06F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVB06F_DMA_ONEINCR_ADDRESS 11:0 +#define NVB06F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVB06F_DMA_ONEINCR_COUNT 28:16 +#define NVB06F_DMA_ONEINCR_OPCODE 31:29 +#define NVB06F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVB06F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVB06F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVB06F_DMA_IMMD_ADDRESS 11:0 +#define NVB06F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVB06F_DMA_IMMD_DATA 28:16 +#define NVB06F_DMA_IMMD_OPCODE 31:29 +#define NVB06F_DMA_IMMD_OPCODE_VALUE (0x00000004) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clB06F_h_ */ diff --git a/kernel-open/nvidia-uvm/clb0b5.h b/kernel-open/nvidia-uvm/clb0b5.h new file mode 100644 index 000000000..26ba85103 --- /dev/null +++ b/kernel-open/nvidia-uvm/clb0b5.h @@ -0,0 +1,191 @@ +/******************************************************************************* + Copyright (c) 2014 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clb0b5_h_ +#define _clb0b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAXWELL_DMA_COPY_A (0x0000B0B5) + +#define NVB0B5_SET_SEMAPHORE_A (0x00000240) +#define NVB0B5_SET_SEMAPHORE_A_UPPER 7:0 +#define NVB0B5_SET_SEMAPHORE_B (0x00000244) +#define NVB0B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVB0B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVB0B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVB0B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVB0B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVB0B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVB0B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVB0B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVB0B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVB0B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVB0B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVB0B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVB0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVB0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVB0B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVB0B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVB0B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVB0B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVB0B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVB0B5_SET_DST_PHYS_MODE (0x00000264) +#define NVB0B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVB0B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVB0B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVB0B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVB0B5_LAUNCH_DMA (0x00000300) +#define NVB0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVB0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVB0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVB0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVB0B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVB0B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVB0B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVB0B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVB0B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVB0B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVB0B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVB0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVB0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVB0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVB0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVB0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVB0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVB0B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVB0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVB0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVB0B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVB0B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVB0B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVB0B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVB0B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVB0B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVB0B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVB0B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVB0B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVB0B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVB0B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVB0B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVB0B5_LAUNCH_DMA_BYPASS_L2 20:20 +#define NVB0B5_LAUNCH_DMA_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVB0B5_LAUNCH_DMA_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVB0B5_OFFSET_IN_UPPER (0x00000400) +#define NVB0B5_OFFSET_IN_UPPER_UPPER 7:0 +#define NVB0B5_OFFSET_IN_LOWER (0x00000404) +#define NVB0B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVB0B5_OFFSET_OUT_UPPER (0x00000408) +#define NVB0B5_OFFSET_OUT_UPPER_UPPER 7:0 +#define NVB0B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVB0B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVB0B5_PITCH_IN (0x00000410) +#define NVB0B5_PITCH_IN_VALUE 31:0 +#define NVB0B5_PITCH_OUT (0x00000414) +#define NVB0B5_PITCH_OUT_VALUE 31:0 +#define NVB0B5_LINE_LENGTH_IN (0x00000418) +#define NVB0B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVB0B5_LINE_COUNT (0x0000041C) +#define NVB0B5_LINE_COUNT_VALUE 31:0 +#define NVB0B5_SET_REMAP_CONST_A (0x00000700) +#define NVB0B5_SET_REMAP_CONST_A_V 31:0 +#define NVB0B5_SET_REMAP_CONST_B (0x00000704) +#define NVB0B5_SET_REMAP_CONST_B_V 31:0 +#define NVB0B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVB0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVB0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) + + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clb0b5_h diff --git a/kernel-open/nvidia-uvm/clc06f.h b/kernel-open/nvidia-uvm/clc06f.h new file mode 100644 index 000000000..285d041b5 --- /dev/null +++ b/kernel-open/nvidia-uvm/clc06f.h @@ -0,0 +1,173 @@ +/******************************************************************************* + Copyright (c) 2014 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _clc06f_h_ +#define _clc06f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define PASCAL_CHANNEL_GPFIFO_A (0x0000C06F) + +/* class PASCAL_CHANNEL_GPFIFO_A */ +#define NVC06F_SET_OBJECT (0x00000000) +#define NVC06F_NOP (0x00000008) +#define NVC06F_NOP_HANDLE 31:0 +#define NVC06F_SEMAPHOREA (0x00000010) +#define NVC06F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC06F_SEMAPHOREB (0x00000014) +#define NVC06F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC06F_SEMAPHOREC (0x00000018) +#define NVC06F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC06F_SEMAPHORED (0x0000001C) +#define NVC06F_SEMAPHORED_OPERATION 4:0 +#define NVC06F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC06F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC06F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 + + +/* GPFIFO entry format */ +#define NVC06F_GP_ENTRY__SIZE 8 +#define NVC06F_GP_ENTRY0_GET 31:2 +#define NVC06F_GP_ENTRY0_OPERAND 31:0 +#define NVC06F_GP_ENTRY1_GET_HI 7:0 +#define NVC06F_GP_ENTRY1_PRIV 8:8 +#define NVC06F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVC06F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVC06F_GP_ENTRY1_LEVEL 9:9 +#define NVC06F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC06F_GP_ENTRY1_LENGTH 30:10 + +/* dma incrementing method format */ +#define NVC06F_DMA_INCR_ADDRESS 11:0 +#define NVC06F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC06F_DMA_INCR_COUNT 28:16 +#define NVC06F_DMA_INCR_OPCODE 31:29 +#define NVC06F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC06F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC06F_DMA_NONINCR_ADDRESS 11:0 +#define NVC06F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC06F_DMA_NONINCR_COUNT 28:16 +#define NVC06F_DMA_NONINCR_OPCODE 31:29 +#define NVC06F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC06F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC06F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC06F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC06F_DMA_ONEINCR_COUNT 28:16 +#define NVC06F_DMA_ONEINCR_OPCODE 31:29 +#define NVC06F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC06F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC06F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC06F_DMA_IMMD_ADDRESS 11:0 +#define NVC06F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC06F_DMA_IMMD_DATA 28:16 +#define NVC06F_DMA_IMMD_OPCODE 31:29 +#define NVC06F_DMA_IMMD_OPCODE_VALUE (0x00000004) + +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC06F_MEM_OP_A (0x00000028) +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC06F_MEM_OP_B (0x0000002c) +#define NVC06F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC06F_MEM_OP_C (0x00000030) +#define NVC06F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC06F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC06F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC06F_MEM_OP_D (0x00000034) +#define NVC06F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC06F_MEM_OP_D_OPERATION 31:27 +#define NVC06F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC06F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC06F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC06F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC06F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC06F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC06F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC06F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC06F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC06F_SET_REFERENCE (0x00000050) +#define NVC06F_SET_REFERENCE_COUNT 31:0 + +#define NVC06F_WFI (0x00000078) +#define NVC06F_WFI_SCOPE 0:0 +#define NVC06F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC06F_WFI_SCOPE_ALL 0x00000001 + + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc06f_h_ */ diff --git a/kernel-open/nvidia-uvm/clc076.h b/kernel-open/nvidia-uvm/clc076.h new file mode 100644 index 000000000..284c57a23 --- /dev/null +++ b/kernel-open/nvidia-uvm/clc076.h @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc076_h_ +#define _clc076_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define GP100_UVM_SW (0x0000c076) + +#define NVC076_SET_OBJECT (0x00000000) +#define NVC076_NO_OPERATION (0x00000100) + +/* Method data fields to support gpu fault cancel. These are pushed in order by UVM */ + +#define NVC076_FAULT_CANCEL_A (0x00000104) +#define NVC076_FAULT_CANCEL_A_INST_APERTURE 1:0 +#define NVC076_FAULT_CANCEL_A_INST_APERTURE_VID_MEM 0x00000000 +#define NVC076_FAULT_CANCEL_A_INST_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC076_FAULT_CANCEL_A_INST_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 + +/* instance pointer is 4k aligned so those bits are reused to store the aperture */ +#define NVC076_FAULT_CANCEL_A_INST_LOW 31:12 + +#define NVC076_FAULT_CANCEL_B (0x00000108) +#define NVC076_FAULT_CANCEL_B_INST_HI 31:0 + +#define NVC076_FAULT_CANCEL_C (0x0000010c) +#define NVC076_FAULT_CANCEL_C_CLIENT_ID 5:0 +#define NVC076_FAULT_CANCEL_C_GPC_ID 10:6 +#define NVC076_FAULT_CANCEL_C_MODE 31:30 +#define NVC076_FAULT_CANCEL_C_MODE_TARGETED 0x00000000 +#define NVC076_FAULT_CANCEL_C_MODE_GLOBAL 0x00000001 + +/* Method data fields to support clearing faulted bit. These are pushed in order by UVM */ + +#define NVC076_CLEAR_FAULTED_A (0x00000110) + +#define NVC076_CLEAR_FAULTED_A_INST_APERTURE 1:0 +#define NVC076_CLEAR_FAULTED_A_INST_APERTURE_VID_MEM 0x00000000 +#define NVC076_CLEAR_FAULTED_A_INST_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC076_CLEAR_FAULTED_A_INST_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 + +#define NVC076_CLEAR_FAULTED_A_TYPE 2:2 +#define NVC076_CLEAR_FAULTED_A_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC076_CLEAR_FAULTED_A_TYPE_ENG_FAULTED 0x00000001 + +/* instance pointer is 4k aligned */ +#define NVC076_CLEAR_FAULTED_A_INST_LOW 31:12 + +#define NVC076_CLEAR_FAULTED_B (0x00000114) +#define NVC076_CLEAR_FAULTED_B_INST_HI 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc076_h_ */ diff --git a/kernel-open/nvidia-uvm/clc0b5.h b/kernel-open/nvidia-uvm/clc0b5.h new file mode 100644 index 000000000..3059a980c --- /dev/null +++ b/kernel-open/nvidia-uvm/clc0b5.h @@ -0,0 +1,191 @@ +/******************************************************************************* + Copyright (c) 2014 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clc0b5_h_ +#define _clc0b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define PASCAL_DMA_COPY_A (0x0000C0B5) + +#define NVC0B5_SET_SEMAPHORE_A (0x00000240) +#define NVC0B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC0B5_SET_SEMAPHORE_B (0x00000244) +#define NVC0B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC0B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC0B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC0B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC0B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC0B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC0B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC0B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC0B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC0B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC0B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC0B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC0B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC0B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC0B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC0B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC0B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC0B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC0B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC0B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC0B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC0B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC0B5_LAUNCH_DMA (0x00000300) +#define NVC0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC0B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC0B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC0B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC0B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC0B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC0B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC0B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC0B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC0B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC0B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC0B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC0B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC0B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC0B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC0B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC0B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC0B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC0B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC0B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC0B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC0B5_LAUNCH_DMA_SRC_BYPASS_L2 20:20 +#define NVC0B5_LAUNCH_DMA_SRC_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC0B5_LAUNCH_DMA_SRC_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC0B5_OFFSET_IN_UPPER (0x00000400) +#define NVC0B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC0B5_OFFSET_IN_LOWER (0x00000404) +#define NVC0B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC0B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC0B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC0B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC0B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC0B5_PITCH_IN (0x00000410) +#define NVC0B5_PITCH_IN_VALUE 31:0 +#define NVC0B5_PITCH_OUT (0x00000414) +#define NVC0B5_PITCH_OUT_VALUE 31:0 +#define NVC0B5_LINE_LENGTH_IN (0x00000418) +#define NVC0B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC0B5_LINE_COUNT (0x0000041C) +#define NVC0B5_LINE_COUNT_VALUE 31:0 +#define NVC0B5_SET_REMAP_CONST_A (0x00000700) +#define NVC0B5_SET_REMAP_CONST_A_V 31:0 +#define NVC0B5_SET_REMAP_CONST_B (0x00000704) +#define NVC0B5_SET_REMAP_CONST_B_V 31:0 +#define NVC0B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc0b5_h + diff --git a/kernel-open/nvidia-uvm/clc1b5.h b/kernel-open/nvidia-uvm/clc1b5.h new file mode 100644 index 000000000..e95f14ac9 --- /dev/null +++ b/kernel-open/nvidia-uvm/clc1b5.h @@ -0,0 +1,191 @@ +/******************************************************************************* + Copyright (c) 2014 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clc1b5_h_ +#define _clc1b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define PASCAL_DMA_COPY_B (0x0000C1B5) + +#define NVC1B5_SET_SEMAPHORE_A (0x00000240) +#define NVC1B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC1B5_SET_SEMAPHORE_B (0x00000244) +#define NVC1B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC1B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC1B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC1B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC1B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC1B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC1B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC1B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC1B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC1B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC1B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC1B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC1B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC1B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC1B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC1B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC1B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC1B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC1B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC1B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC1B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC1B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC1B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC1B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC1B5_LAUNCH_DMA (0x00000300) +#define NVC1B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC1B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC1B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC1B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC1B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC1B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC1B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC1B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC1B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC1B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC1B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC1B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC1B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC1B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC1B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC1B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC1B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC1B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC1B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC1B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC1B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC1B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC1B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC1B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC1B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC1B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC1B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC1B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC1B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC1B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC1B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC1B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC1B5_LAUNCH_DMA_SRC_BYPASS_L2 20:20 +#define NVC1B5_LAUNCH_DMA_SRC_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC1B5_LAUNCH_DMA_SRC_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC1B5_OFFSET_IN_UPPER (0x00000400) +#define NVC1B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC1B5_OFFSET_IN_LOWER (0x00000404) +#define NVC1B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC1B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC1B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC1B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC1B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC1B5_PITCH_IN (0x00000410) +#define NVC1B5_PITCH_IN_VALUE 31:0 +#define NVC1B5_PITCH_OUT (0x00000414) +#define NVC1B5_PITCH_OUT_VALUE 31:0 +#define NVC1B5_LINE_LENGTH_IN (0x00000418) +#define NVC1B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC1B5_LINE_COUNT (0x0000041C) +#define NVC1B5_LINE_COUNT_VALUE 31:0 +#define NVC1B5_SET_REMAP_CONST_A (0x00000700) +#define NVC1B5_SET_REMAP_CONST_A_V 31:0 +#define NVC1B5_SET_REMAP_CONST_B (0x00000704) +#define NVC1B5_SET_REMAP_CONST_B_V 31:0 +#define NVC1B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC1B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC1B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc1b5_h + diff --git a/kernel-open/nvidia-uvm/clc365.h b/kernel-open/nvidia-uvm/clc365.h new file mode 100644 index 000000000..10d286bed --- /dev/null +++ b/kernel-open/nvidia-uvm/clc365.h @@ -0,0 +1,93 @@ +/******************************************************************************* + Copyright (c) 2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +// AUTO GENERATED -- DO NOT EDIT - this file automatically generated by refhdr2class.pl +// Command: ../../../bin/manuals/refhdr2class.pl clc365.h c365 ACCESS_COUNTER_NOTIFY_BUFFER --search_str=NV_ACCESS_COUNTER --input_file=nv_ref_dev_access_counter.h + + +#ifndef _clc365_h_ +#define _clc365_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define ACCESS_COUNTER_NOTIFY_BUFFER (0xc365) + +#define NVC365_NOTIFY_BUF +#define NVC365_NOTIFY_BUF_ENTRY 0x0000001f:0x00000000 +#define NVC365_NOTIFY_BUF_SIZE 32 +#define NVC365_NOTIFY_BUF_ENTRY_TYPE MW((0+0*32):(0*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_TYPE_CPU 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_TYPE_GPU 0x00000001 +#define NVC365_NOTIFY_BUF_ENTRY_ADDR_TYPE MW((1+0*32):(0*32+1)) +#define NVC365_NOTIFY_BUF_ENTRY_ADDR_TYPE_GVA 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_ADDR_TYPE_GPA 0x00000001 +#define NVC365_NOTIFY_BUF_ENTRY_BANK MW((5+0*32):(0*32+2)) +#define NVC365_NOTIFY_BUF_ENTRY_BANK_0 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_1 0x00000001 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_2 0x00000002 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_3 0x00000003 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_4 0x00000004 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_5 0x00000005 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_6 0x00000006 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_7 0x00000007 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_8 0x00000008 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_9 0x00000009 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_10 0x0000000a +#define NVC365_NOTIFY_BUF_ENTRY_BANK_11 0x0000000b +#define NVC365_NOTIFY_BUF_ENTRY_BANK_12 0x0000000c +#define NVC365_NOTIFY_BUF_ENTRY_BANK_13 0x0000000d +#define NVC365_NOTIFY_BUF_ENTRY_BANK_14 0x0000000e +#define NVC365_NOTIFY_BUF_ENTRY_BANK_15 0x0000000f +#define NVC365_NOTIFY_BUF_ENTRY_APERTURE MW((9+0*32):(0*32+8)) +#define NVC365_NOTIFY_BUF_ENTRY_APERTURE_VID_MEM 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_APERTURE_PEER_MEM 0x00000001 +#define NVC365_NOTIFY_BUF_ENTRY_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC365_NOTIFY_BUF_ENTRY_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC365_NOTIFY_BUF_ENTRY_INST_APERTURE MW((11+0*32):(0*32+10)) +#define NVC365_NOTIFY_BUF_ENTRY_INST_APERTURE_VID_MEM 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_INST_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC365_NOTIFY_BUF_ENTRY_INST_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC365_NOTIFY_BUF_ENTRY_INST_LO MW((31+0*32):(0*32+12)) +#define NVC365_NOTIFY_BUF_ENTRY_INST_HI MW((31+1*32):(1*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_INST MW((31+1*32):(0*32+12)) +#define NVC365_NOTIFY_BUF_ENTRY_ADDR_LO MW((31+2*32):(2*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_ADDR_HI MW((31+3*32):(3*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_ADDR MW((31+3*32):(2*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_SUB_GRANULARITY MW((31+4*32):(4*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_NOTIFY_TAG MW((19+5*32):(5*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_COUNTER_VAL MW((15+6*32):(6*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_PEER_ID MW((2+7*32):(7*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_MMU_ENGINE_ID MW((28+7*32):(7*32+20)) +#define NVC365_NOTIFY_BUF_ENTRY_VALID MW((31+7*32):(7*32+31)) +#define NVC365_NOTIFY_BUF_ENTRY_VALID_FALSE 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_VALID_TRUE 0x00000001 +#define NVC365_NOTIFIERS_ACCESS_COUNTER (0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc365_h_ */ diff --git a/kernel-open/nvidia-uvm/clc369.h b/kernel-open/nvidia-uvm/clc369.h new file mode 100644 index 000000000..5b35067d8 --- /dev/null +++ b/kernel-open/nvidia-uvm/clc369.h @@ -0,0 +1,82 @@ +/******************************************************************************* + Copyright (c) 2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +// AUTO GENERATED -- DO NOT EDIT - this file automatically generated by refhdr2class.pl +// Command: ../../../bin/manuals/refhdr2class.pl clc369.h c369 MMU_FAULT_BUFFER --search_str=NV_MMU_FAULT --input_file=nv_ref_dev_mmu_fault.h + + +#ifndef _clc369_h_ +#define _clc369_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define MMU_FAULT_BUFFER (0xc369) + +#define NVC369_BUF +#define NVC369_BUF_ENTRY 0x0000001f:0x00000000 +#define NVC369_BUF_SIZE 32 +#define NVC369_BUF_ENTRY_INST_APERTURE MW((9+0*32):(0*32+8)) +#define NVC369_BUF_ENTRY_INST_APERTURE_VID_MEM 0x00000000 +#define NVC369_BUF_ENTRY_INST_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC369_BUF_ENTRY_INST_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC369_BUF_ENTRY_INST_LO MW((31+0*32):(0*32+12)) +#define NVC369_BUF_ENTRY_INST_HI MW((31+1*32):(1*32+0)) +#define NVC369_BUF_ENTRY_INST MW((31+1*32):(0*32+12)) +#define NVC369_BUF_ENTRY_ADDR_PHYS_APERTURE MW((1+2*32):(2*32+0)) +#define NVC369_BUF_ENTRY_ADDR_LO MW((31+2*32):(2*32+12)) +#define NVC369_BUF_ENTRY_ADDR_HI MW((31+3*32):(3*32+0)) +#define NVC369_BUF_ENTRY_ADDR MW((31+3*32):(2*32+12)) +#define NVC369_BUF_ENTRY_TIMESTAMP_LO MW((31+4*32):(4*32+0)) +#define NVC369_BUF_ENTRY_TIMESTAMP_HI MW((31+5*32):(5*32+0)) +#define NVC369_BUF_ENTRY_TIMESTAMP MW((31+5*32):(4*32+0)) +#define NVC369_BUF_ENTRY_ENGINE_ID MW((8+6*32):(6*32+0)) +#define NVC369_BUF_ENTRY_FAULT_TYPE MW((4+7*32):(7*32+0)) +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT MW((7+7*32):(7*32+7)) +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT_FALSE 0x00000000 +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT_TRUE 0x00000001 +#define NVC369_BUF_ENTRY_CLIENT MW((14+7*32):(7*32+8)) +#define NVC369_BUF_ENTRY_ACCESS_TYPE MW((19+7*32):(7*32+16)) +#define NVC369_BUF_ENTRY_MMU_CLIENT_TYPE MW((20+7*32):(7*32+20)) +#define NVC369_BUF_ENTRY_GPC_ID MW((28+7*32):(7*32+24)) +#define NVC369_BUF_ENTRY_PROTECTED_MODE MW((29+7*32):(7*32+29)) +#define NVC369_BUF_ENTRY_PROTECTED_MODE_FALSE 0x00000000 +#define NVC369_BUF_ENTRY_PROTECTED_MODE_TRUE 0x00000001 +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT_EN MW((30+7*32):(7*32+30)) +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT_EN_FALSE 0x00000000 +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT_EN_TRUE 0x00000001 +#define NVC369_BUF_ENTRY_VALID MW((31+7*32):(7*32+31)) +#define NVC369_BUF_ENTRY_VALID_FALSE 0x00000000 +#define NVC369_BUF_ENTRY_VALID_TRUE 0x00000001 +#define NVC369_NOTIFIER_MMU_FAULT_NON_REPLAYABLE 0 +#define NVC369_NOTIFIER_MMU_FAULT_REPLAYABLE 1 +#define NVC369_NOTIFIER_MMU_FAULT_ERROR 2 +#define NVC369_NOTIFIER_MMU_FAULT_NON_REPLAYABLE_IN_PRIV 3 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc369_h_ */ diff --git a/kernel-open/nvidia-uvm/clc36f.h b/kernel-open/nvidia-uvm/clc36f.h new file mode 100644 index 000000000..92d8c0d77 --- /dev/null +++ b/kernel-open/nvidia-uvm/clc36f.h @@ -0,0 +1,368 @@ +/******************************************************************************* + Copyright (c) 2012-2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef _clc36f_h_ +#define _clc36f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class VOLTA_CHANNEL_GPFIFO */ +/* + * Documentation for VOLTA_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define VOLTA_CHANNEL_GPFIFO_A (0x0000C36F) + +#define NVC36F_TYPEDEF VOLTA_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc36fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc36fControl, VoltaAControlGPFifo; + +/* fields and values */ +#define NVC36F_NUMBER_OF_SUBCHANNELS (8) +#define NVC36F_SET_OBJECT (0x00000000) +#define NVC36F_SET_OBJECT_NVCLASS 15:0 +#define NVC36F_SET_OBJECT_ENGINE 20:16 +#define NVC36F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC36F_ILLEGAL (0x00000004) +#define NVC36F_ILLEGAL_HANDLE 31:0 +#define NVC36F_NOP (0x00000008) +#define NVC36F_NOP_HANDLE 31:0 +#define NVC36F_SEMAPHOREA (0x00000010) +#define NVC36F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC36F_SEMAPHOREB (0x00000014) +#define NVC36F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC36F_SEMAPHOREC (0x00000018) +#define NVC36F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC36F_SEMAPHORED (0x0000001C) +#define NVC36F_SEMAPHORED_OPERATION 4:0 +#define NVC36F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC36F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC36F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC36F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC36F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC36F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC36F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC36F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC36F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC36F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC36F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC36F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC36F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC36F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC36F_SEMAPHORED_REDUCTION 30:27 +#define NVC36F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC36F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC36F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC36F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC36F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC36F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC36F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC36F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC36F_SEMAPHORED_FORMAT 31:31 +#define NVC36F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC36F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC36F_NON_STALL_INTERRUPT (0x00000020) +#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC36F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC36F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC36F_MEM_OP_A (0x00000028) +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC36F_MEM_OP_B (0x0000002c) +#define NVC36F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC36F_MEM_OP_C (0x00000030) +#define NVC36F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC36F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC36F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +#define NVC36F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0 +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC36F_MEM_OP_D (0x00000034) +#define NVC36F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC36F_MEM_OP_D_OPERATION 31:27 +#define NVC36F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC36F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC36F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC36F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC36F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC36F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC36F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC36F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3 +#define NVC36F_SET_REFERENCE (0x00000050) +#define NVC36F_SET_REFERENCE_COUNT 31:0 +#define NVC36F_SEM_ADDR_LO (0x0000005c) +#define NVC36F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC36F_SEM_ADDR_HI (0x00000060) +#define NVC36F_SEM_ADDR_HI_OFFSET 7:0 +#define NVC36F_SEM_PAYLOAD_LO (0x00000064) +#define NVC36F_SEM_PAYLOAD_LO_PAYLOAD 31:0 +#define NVC36F_SEM_PAYLOAD_HI (0x00000068) +#define NVC36F_SEM_PAYLOAD_HI_PAYLOAD 31:0 +#define NVC36F_SEM_EXECUTE (0x0000006c) +#define NVC36F_SEM_EXECUTE_OPERATION 2:0 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC36F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005 +#define NVC36F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_REDUCTION 30:27 +#define NVC36F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000 +#define NVC36F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001 +#define NVC36F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002 +#define NVC36F_SEM_EXECUTE_REDUCTION_IAND 0x00000003 +#define NVC36F_SEM_EXECUTE_REDUCTION_IOR 0x00000004 +#define NVC36F_SEM_EXECUTE_REDUCTION_IADD 0x00000005 +#define NVC36F_SEM_EXECUTE_REDUCTION_INC 0x00000006 +#define NVC36F_SEM_EXECUTE_REDUCTION_DEC 0x00000007 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT 31:31 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001 +#define NVC36F_WFI (0x00000078) +#define NVC36F_WFI_SCOPE 0:0 +#define NVC36F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC36F_WFI_SCOPE_CURRENT_VEID 0x00000000 +#define NVC36F_WFI_SCOPE_ALL 0x00000001 +#define NVC36F_CRC_CHECK (0x0000007c) +#define NVC36F_CRC_CHECK_VALUE 31:0 +#define NVC36F_YIELD (0x00000080) +#define NVC36F_YIELD_OP 1:0 +#define NVC36F_YIELD_OP_NOP 0x00000000 +#define NVC36F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002 +#define NVC36F_YIELD_OP_TSG 0x00000003 +#define NVC36F_CLEAR_FAULTED (0x00000084) +#define NVC36F_CLEAR_FAULTED_CHID 11:0 +#define NVC36F_CLEAR_FAULTED_TYPE 31:31 +#define NVC36F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC36F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001 +#define NVC36F_QUADRO_VERIFY (0x000000a0) + + +/* GPFIFO entry format */ +#define NVC36F_GP_ENTRY__SIZE 8 +#define NVC36F_GP_ENTRY0_FETCH 0:0 +#define NVC36F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC36F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC36F_GP_ENTRY0_GET 31:2 +#define NVC36F_GP_ENTRY0_OPERAND 31:0 +#define NVC36F_GP_ENTRY1_GET_HI 7:0 +#define NVC36F_GP_ENTRY1_PRIV 8:8 +#define NVC36F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVC36F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVC36F_GP_ENTRY1_LEVEL 9:9 +#define NVC36F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC36F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC36F_GP_ENTRY1_LENGTH 30:10 +#define NVC36F_GP_ENTRY1_SYNC 31:31 +#define NVC36F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC36F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC36F_GP_ENTRY1_OPCODE 7:0 +#define NVC36F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC36F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC36F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC36F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC36F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC36F_DMA_METHOD_ADDRESS 11:0 +#define NVC36F_DMA_SUBDEVICE_MASK 15:4 +#define NVC36F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC36F_DMA_TERT_OP 17:16 +#define NVC36F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC36F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC36F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC36F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC36F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC36F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC36F_DMA_METHOD_COUNT 28:16 +#define NVC36F_DMA_IMMD_DATA 28:16 +#define NVC36F_DMA_SEC_OP 31:29 +#define NVC36F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC36F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC36F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC36F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC36F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC36F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC36F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC36F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC36F_DMA_INCR_ADDRESS 11:0 +#define NVC36F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC36F_DMA_INCR_COUNT 28:16 +#define NVC36F_DMA_INCR_OPCODE 31:29 +#define NVC36F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC36F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC36F_DMA_NONINCR_ADDRESS 11:0 +#define NVC36F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC36F_DMA_NONINCR_COUNT 28:16 +#define NVC36F_DMA_NONINCR_OPCODE 31:29 +#define NVC36F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC36F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC36F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC36F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC36F_DMA_ONEINCR_COUNT 28:16 +#define NVC36F_DMA_ONEINCR_OPCODE 31:29 +#define NVC36F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC36F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC36F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC36F_DMA_IMMD_ADDRESS 11:0 +#define NVC36F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC36F_DMA_IMMD_DATA 28:16 +#define NVC36F_DMA_IMMD_OPCODE 31:29 +#define NVC36F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC36F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC36F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC36F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC36F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC36F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC36F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC36F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC36F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC36F_DMA_ENDSEG_OPCODE 31:29 +#define NVC36F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC36F_DMA_ADDRESS 12:2 +#define NVC36F_DMA_SUBCH 15:13 +#define NVC36F_DMA_OPCODE3 17:16 +#define NVC36F_DMA_OPCODE3_NONE (0x00000000) +#define NVC36F_DMA_COUNT 28:18 +#define NVC36F_DMA_OPCODE 31:29 +#define NVC36F_DMA_OPCODE_METHOD (0x00000000) +#define NVC36F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC36F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc36f_h_ */ diff --git a/kernel-open/nvidia-uvm/clc3b5.h b/kernel-open/nvidia-uvm/clc3b5.h new file mode 100644 index 000000000..1e79c7526 --- /dev/null +++ b/kernel-open/nvidia-uvm/clc3b5.h @@ -0,0 +1,203 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clc3b5_h_ +#define _clc3b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define VOLTA_DMA_COPY_A (0x0000C3B5) + +#define NVC3B5_SET_SEMAPHORE_A (0x00000240) +#define NVC3B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC3B5_SET_SEMAPHORE_B (0x00000244) +#define NVC3B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC3B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC3B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC3B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC3B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC3B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC3B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC3B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC3B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC3B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC3B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC3B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC3B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC3B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC3B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC3B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC3B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC3B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC3B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC3B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2 +#define NVC3B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC3B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC3B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC3B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC3B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC3B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2 +#define NVC3B5_LAUNCH_DMA (0x00000300) +#define NVC3B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC3B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC3B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC3B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC3B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC3B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC3B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC3B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC3B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC3B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC3B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC3B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC3B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC3B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC3B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC3B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC3B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC3B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC3B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC3B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC3B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC3B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC3B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC3B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC3B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC3B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC3B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC3B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC3B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC3B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC3B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC3B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC3B5_LAUNCH_DMA_SRC_BYPASS_L2 20:20 +#define NVC3B5_LAUNCH_DMA_SRC_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC3B5_LAUNCH_DMA_SRC_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC3B5_LAUNCH_DMA_DST_BYPASS_L2 21:21 +#define NVC3B5_LAUNCH_DMA_DST_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC3B5_LAUNCH_DMA_DST_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC3B5_LAUNCH_DMA_VPRMODE 23:22 +#define NVC3B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000) +#define NVC3B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001) +#define NVC3B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002) +#define NVC3B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003) +#define NVC3B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24 +#define NVC3B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28 +#define NVC3B5_OFFSET_IN_UPPER (0x00000400) +#define NVC3B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC3B5_OFFSET_IN_LOWER (0x00000404) +#define NVC3B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC3B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC3B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC3B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC3B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC3B5_PITCH_IN (0x00000410) +#define NVC3B5_PITCH_IN_VALUE 31:0 +#define NVC3B5_PITCH_OUT (0x00000414) +#define NVC3B5_PITCH_OUT_VALUE 31:0 +#define NVC3B5_LINE_LENGTH_IN (0x00000418) +#define NVC3B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC3B5_LINE_COUNT (0x0000041C) +#define NVC3B5_LINE_COUNT_VALUE 31:0 +#define NVC3B5_SET_REMAP_CONST_A (0x00000700) +#define NVC3B5_SET_REMAP_CONST_A_V 31:0 +#define NVC3B5_SET_REMAP_CONST_B (0x00000704) +#define NVC3B5_SET_REMAP_CONST_B_V 31:0 +#define NVC3B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC3B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC3B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc3b5_h + diff --git a/kernel-open/nvidia-uvm/clc46f.h b/kernel-open/nvidia-uvm/clc46f.h new file mode 100644 index 000000000..a452ec43e --- /dev/null +++ b/kernel-open/nvidia-uvm/clc46f.h @@ -0,0 +1,367 @@ +/******************************************************************************* + Copyright (c) 2012-2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef _clc46f_h_ +#define _clc46f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class TURING_CHANNEL_GPFIFO */ +/* + * Documentation for TURING_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define TURING_CHANNEL_GPFIFO_A (0x0000C46F) + +#define NVC46F_TYPEDEF TURING_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc46fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc46fControl, TuringAControlGPFifo; + +/* fields and values */ +#define NVC46F_NUMBER_OF_SUBCHANNELS (8) +#define NVC46F_SET_OBJECT (0x00000000) +#define NVC46F_SET_OBJECT_NVCLASS 15:0 +#define NVC46F_SET_OBJECT_ENGINE 20:16 +#define NVC46F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC46F_ILLEGAL (0x00000004) +#define NVC46F_ILLEGAL_HANDLE 31:0 +#define NVC46F_NOP (0x00000008) +#define NVC46F_NOP_HANDLE 31:0 +#define NVC46F_SEMAPHOREA (0x00000010) +#define NVC46F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC46F_SEMAPHOREB (0x00000014) +#define NVC46F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC46F_SEMAPHOREC (0x00000018) +#define NVC46F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC46F_SEMAPHORED (0x0000001C) +#define NVC46F_SEMAPHORED_OPERATION 4:0 +#define NVC46F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC46F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC46F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC46F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC46F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC46F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC46F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC46F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC46F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC46F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC46F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC46F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC46F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC46F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC46F_SEMAPHORED_REDUCTION 30:27 +#define NVC46F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC46F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC46F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC46F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC46F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC46F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC46F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC46F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC46F_SEMAPHORED_FORMAT 31:31 +#define NVC46F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC46F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC46F_NON_STALL_INTERRUPT (0x00000020) +#define NVC46F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC46F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC46F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC46F_MEM_OP_A (0x00000028) +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC46F_MEM_OP_B (0x0000002c) +#define NVC46F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC46F_MEM_OP_C (0x00000030) +#define NVC46F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC46F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC46F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +#define NVC46F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0 +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC46F_MEM_OP_D (0x00000034) +#define NVC46F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC46F_MEM_OP_D_OPERATION 31:27 +#define NVC46F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC46F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC46F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC46F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC46F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC46F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC46F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC46F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC46F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC46F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3 +#define NVC46F_SET_REFERENCE (0x00000050) +#define NVC46F_SET_REFERENCE_COUNT 31:0 +#define NVC46F_SEM_ADDR_LO (0x0000005c) +#define NVC46F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC46F_SEM_ADDR_HI (0x00000060) +#define NVC46F_SEM_ADDR_HI_OFFSET 7:0 +#define NVC46F_SEM_PAYLOAD_LO (0x00000064) +#define NVC46F_SEM_PAYLOAD_LO_PAYLOAD 31:0 +#define NVC46F_SEM_PAYLOAD_HI (0x00000068) +#define NVC46F_SEM_PAYLOAD_HI_PAYLOAD 31:0 +#define NVC46F_SEM_EXECUTE (0x0000006c) +#define NVC46F_SEM_EXECUTE_OPERATION 2:0 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC46F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005 +#define NVC46F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006 +#define NVC46F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC46F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000 +#define NVC46F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC46F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC46F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC46F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001 +#define NVC46F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC46F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC46F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001 +#define NVC46F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC46F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC46F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC46F_SEM_EXECUTE_REDUCTION 30:27 +#define NVC46F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000 +#define NVC46F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001 +#define NVC46F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002 +#define NVC46F_SEM_EXECUTE_REDUCTION_IAND 0x00000003 +#define NVC46F_SEM_EXECUTE_REDUCTION_IOR 0x00000004 +#define NVC46F_SEM_EXECUTE_REDUCTION_IADD 0x00000005 +#define NVC46F_SEM_EXECUTE_REDUCTION_INC 0x00000006 +#define NVC46F_SEM_EXECUTE_REDUCTION_DEC 0x00000007 +#define NVC46F_SEM_EXECUTE_REDUCTION_FORMAT 31:31 +#define NVC46F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000 +#define NVC46F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001 +#define NVC46F_WFI (0x00000078) +#define NVC46F_WFI_SCOPE 0:0 +#define NVC46F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC46F_WFI_SCOPE_CURRENT_VEID 0x00000000 +#define NVC46F_WFI_SCOPE_ALL 0x00000001 +#define NVC46F_CRC_CHECK (0x0000007c) +#define NVC46F_CRC_CHECK_VALUE 31:0 +#define NVC46F_YIELD (0x00000080) +#define NVC46F_YIELD_OP 1:0 +#define NVC46F_YIELD_OP_NOP 0x00000000 +#define NVC46F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002 +#define NVC46F_YIELD_OP_TSG 0x00000003 +#define NVC46F_CLEAR_FAULTED (0x00000084) +// Note: RM provides the HANDLE as an opaque value; the internal detail fields +// are intentionally not exposed to the driver through these defines. +#define NVC46F_CLEAR_FAULTED_HANDLE 30:0 +#define NVC46F_CLEAR_FAULTED_TYPE 31:31 +#define NVC46F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC46F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001 +#define NVC46F_QUADRO_VERIFY (0x000000a0) + + +/* GPFIFO entry format */ +#define NVC46F_GP_ENTRY__SIZE 8 +#define NVC46F_GP_ENTRY0_FETCH 0:0 +#define NVC46F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC46F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC46F_GP_ENTRY0_GET 31:2 +#define NVC46F_GP_ENTRY0_OPERAND 31:0 +#define NVC46F_GP_ENTRY1_GET_HI 7:0 +#define NVC46F_GP_ENTRY1_LEVEL 9:9 +#define NVC46F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC46F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC46F_GP_ENTRY1_LENGTH 30:10 +#define NVC46F_GP_ENTRY1_SYNC 31:31 +#define NVC46F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC46F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC46F_GP_ENTRY1_OPCODE 7:0 +#define NVC46F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC46F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC46F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC46F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC46F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC46F_DMA_METHOD_ADDRESS 11:0 +#define NVC46F_DMA_SUBDEVICE_MASK 15:4 +#define NVC46F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC46F_DMA_TERT_OP 17:16 +#define NVC46F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC46F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC46F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC46F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC46F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC46F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC46F_DMA_METHOD_COUNT 28:16 +#define NVC46F_DMA_IMMD_DATA 28:16 +#define NVC46F_DMA_SEC_OP 31:29 +#define NVC46F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC46F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC46F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC46F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC46F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC46F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC46F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC46F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC46F_DMA_INCR_ADDRESS 11:0 +#define NVC46F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC46F_DMA_INCR_COUNT 28:16 +#define NVC46F_DMA_INCR_OPCODE 31:29 +#define NVC46F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC46F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC46F_DMA_NONINCR_ADDRESS 11:0 +#define NVC46F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC46F_DMA_NONINCR_COUNT 28:16 +#define NVC46F_DMA_NONINCR_OPCODE 31:29 +#define NVC46F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC46F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC46F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC46F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC46F_DMA_ONEINCR_COUNT 28:16 +#define NVC46F_DMA_ONEINCR_OPCODE 31:29 +#define NVC46F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC46F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC46F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC46F_DMA_IMMD_ADDRESS 11:0 +#define NVC46F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC46F_DMA_IMMD_DATA 28:16 +#define NVC46F_DMA_IMMD_OPCODE 31:29 +#define NVC46F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC46F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC46F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC46F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC46F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC46F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC46F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC46F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC46F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC46F_DMA_ENDSEG_OPCODE 31:29 +#define NVC46F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC46F_DMA_ADDRESS 12:2 +#define NVC46F_DMA_SUBCH 15:13 +#define NVC46F_DMA_OPCODE3 17:16 +#define NVC46F_DMA_OPCODE3_NONE (0x00000000) +#define NVC46F_DMA_COUNT 28:18 +#define NVC46F_DMA_OPCODE 31:29 +#define NVC46F_DMA_OPCODE_METHOD (0x00000000) +#define NVC46F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC46F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc46f_h_ */ diff --git a/kernel-open/nvidia-uvm/clc56f.h b/kernel-open/nvidia-uvm/clc56f.h new file mode 100644 index 000000000..818da2dc1 --- /dev/null +++ b/kernel-open/nvidia-uvm/clc56f.h @@ -0,0 +1,369 @@ +/******************************************************************************* + Copyright (c) 2012-2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef _clc56f_h_ +#define _clc56f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class AMPERE_CHANNEL_GPFIFO */ +/* + * Documentation for AMPERE_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define AMPERE_CHANNEL_GPFIFO_A (0x0000C56F) + +#define NVC56F_TYPEDEF AMPERE_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc56fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc56fControl, AmpereAControlGPFifo; + +/* fields and values */ +#define NVC56F_NUMBER_OF_SUBCHANNELS (8) +#define NVC56F_SET_OBJECT (0x00000000) +#define NVC56F_SET_OBJECT_NVCLASS 15:0 +#define NVC56F_SET_OBJECT_ENGINE 20:16 +#define NVC56F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC56F_ILLEGAL (0x00000004) +#define NVC56F_ILLEGAL_HANDLE 31:0 +#define NVC56F_NOP (0x00000008) +#define NVC56F_NOP_HANDLE 31:0 +#define NVC56F_SEMAPHOREA (0x00000010) +#define NVC56F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC56F_SEMAPHOREB (0x00000014) +#define NVC56F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC56F_SEMAPHOREC (0x00000018) +#define NVC56F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC56F_SEMAPHORED (0x0000001C) +#define NVC56F_SEMAPHORED_OPERATION 4:0 +#define NVC56F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC56F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC56F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC56F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC56F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC56F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC56F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC56F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC56F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC56F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC56F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC56F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC56F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC56F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC56F_SEMAPHORED_REDUCTION 30:27 +#define NVC56F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC56F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC56F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC56F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC56F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC56F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC56F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC56F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC56F_SEMAPHORED_FORMAT 31:31 +#define NVC56F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC56F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC56F_NON_STALL_INTERRUPT (0x00000020) +#define NVC56F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC56F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC56F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC56F_MEM_OP_A (0x00000028) +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE 7:6 // only relevant for invalidates with NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE for invalidating link TLB only, or non-link TLB only or all TLBs +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_ALL_TLBS 0 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_LINK_TLBS 1 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_NON_LINK_TLBS 2 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_RSVRVD 3 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC56F_MEM_OP_B (0x0000002c) +#define NVC56F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC56F_MEM_OP_C (0x00000030) +#define NVC56F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC56F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC56F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +#define NVC56F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0 +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC56F_MEM_OP_D (0x00000034) +#define NVC56F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC56F_MEM_OP_D_OPERATION 31:27 +#define NVC56F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC56F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC56F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC56F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC56F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC56F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC56F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC56F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC56F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC56F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3 +#define NVC56F_SET_REFERENCE (0x00000050) +#define NVC56F_SET_REFERENCE_COUNT 31:0 +#define NVC56F_SEM_ADDR_LO (0x0000005c) +#define NVC56F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC56F_SEM_ADDR_HI (0x00000060) +#define NVC56F_SEM_ADDR_HI_OFFSET 7:0 +#define NVC56F_SEM_PAYLOAD_LO (0x00000064) +#define NVC56F_SEM_PAYLOAD_LO_PAYLOAD 31:0 +#define NVC56F_SEM_PAYLOAD_HI (0x00000068) +#define NVC56F_SEM_PAYLOAD_HI_PAYLOAD 31:0 +#define NVC56F_SEM_EXECUTE (0x0000006c) +#define NVC56F_SEM_EXECUTE_OPERATION 2:0 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC56F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005 +#define NVC56F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006 +#define NVC56F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC56F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000 +#define NVC56F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC56F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC56F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC56F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001 +#define NVC56F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC56F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC56F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001 +#define NVC56F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC56F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC56F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC56F_SEM_EXECUTE_REDUCTION 30:27 +#define NVC56F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000 +#define NVC56F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001 +#define NVC56F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002 +#define NVC56F_SEM_EXECUTE_REDUCTION_IAND 0x00000003 +#define NVC56F_SEM_EXECUTE_REDUCTION_IOR 0x00000004 +#define NVC56F_SEM_EXECUTE_REDUCTION_IADD 0x00000005 +#define NVC56F_SEM_EXECUTE_REDUCTION_INC 0x00000006 +#define NVC56F_SEM_EXECUTE_REDUCTION_DEC 0x00000007 +#define NVC56F_SEM_EXECUTE_REDUCTION_FORMAT 31:31 +#define NVC56F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000 +#define NVC56F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001 +#define NVC56F_WFI (0x00000078) +#define NVC56F_WFI_SCOPE 0:0 +#define NVC56F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC56F_WFI_SCOPE_CURRENT_VEID 0x00000000 +#define NVC56F_WFI_SCOPE_ALL 0x00000001 +#define NVC56F_YIELD (0x00000080) +#define NVC56F_YIELD_OP 1:0 +#define NVC56F_YIELD_OP_NOP 0x00000000 +#define NVC56F_YIELD_OP_TSG 0x00000003 +#define NVC56F_CLEAR_FAULTED (0x00000084) +// Note: RM provides the HANDLE as an opaque value; the internal detail fields +// are intentionally not exposed to the driver through these defines. +#define NVC56F_CLEAR_FAULTED_HANDLE 30:0 +#define NVC56F_CLEAR_FAULTED_TYPE 31:31 +#define NVC56F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC56F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001 +#define NVC56F_QUADRO_VERIFY (0x000000a0) + + +/* GPFIFO entry format */ +#define NVC56F_GP_ENTRY__SIZE 8 +#define NVC56F_GP_ENTRY0_FETCH 0:0 +#define NVC56F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC56F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC56F_GP_ENTRY0_GET 31:2 +#define NVC56F_GP_ENTRY0_OPERAND 31:0 +#define NVC56F_GP_ENTRY1_GET_HI 7:0 +#define NVC56F_GP_ENTRY1_LEVEL 9:9 +#define NVC56F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC56F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC56F_GP_ENTRY1_LENGTH 30:10 +#define NVC56F_GP_ENTRY1_SYNC 31:31 +#define NVC56F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC56F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC56F_GP_ENTRY1_OPCODE 7:0 +#define NVC56F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC56F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC56F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC56F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC56F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC56F_DMA_METHOD_ADDRESS 11:0 +#define NVC56F_DMA_SUBDEVICE_MASK 15:4 +#define NVC56F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC56F_DMA_TERT_OP 17:16 +#define NVC56F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC56F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC56F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC56F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC56F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC56F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC56F_DMA_METHOD_COUNT 28:16 +#define NVC56F_DMA_IMMD_DATA 28:16 +#define NVC56F_DMA_SEC_OP 31:29 +#define NVC56F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC56F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC56F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC56F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC56F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC56F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC56F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC56F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC56F_DMA_INCR_ADDRESS 11:0 +#define NVC56F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC56F_DMA_INCR_COUNT 28:16 +#define NVC56F_DMA_INCR_OPCODE 31:29 +#define NVC56F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC56F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC56F_DMA_NONINCR_ADDRESS 11:0 +#define NVC56F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC56F_DMA_NONINCR_COUNT 28:16 +#define NVC56F_DMA_NONINCR_OPCODE 31:29 +#define NVC56F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC56F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC56F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC56F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC56F_DMA_ONEINCR_COUNT 28:16 +#define NVC56F_DMA_ONEINCR_OPCODE 31:29 +#define NVC56F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC56F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC56F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC56F_DMA_IMMD_ADDRESS 11:0 +#define NVC56F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC56F_DMA_IMMD_DATA 28:16 +#define NVC56F_DMA_IMMD_OPCODE 31:29 +#define NVC56F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC56F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC56F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC56F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC56F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC56F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC56F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC56F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC56F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC56F_DMA_ENDSEG_OPCODE 31:29 +#define NVC56F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC56F_DMA_ADDRESS 12:2 +#define NVC56F_DMA_SUBCH 15:13 +#define NVC56F_DMA_OPCODE3 17:16 +#define NVC56F_DMA_OPCODE3_NONE (0x00000000) +#define NVC56F_DMA_COUNT 28:18 +#define NVC56F_DMA_OPCODE 31:29 +#define NVC56F_DMA_OPCODE_METHOD (0x00000000) +#define NVC56F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC56F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc56f_h_ */ diff --git a/kernel-open/nvidia-uvm/clc5b5.h b/kernel-open/nvidia-uvm/clc5b5.h new file mode 100644 index 000000000..71fb5ec4d --- /dev/null +++ b/kernel-open/nvidia-uvm/clc5b5.h @@ -0,0 +1,352 @@ +/******************************************************************************* + Copyright (c) 1993-2004 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + + +#include "nvtypes.h" + +#ifndef _clc5b5_h_ +#define _clc5b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define TURING_DMA_COPY_A (0x0000C5B5) + +typedef volatile struct _clc5b5_tag0 { + NvV32 Reserved00[0x40]; + NvV32 Nop; // 0x00000100 - 0x00000103 + NvV32 Reserved01[0xF]; + NvV32 PmTrigger; // 0x00000140 - 0x00000143 + NvV32 Reserved02[0x3F]; + NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243 + NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247 + NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B + NvV32 Reserved03[0x2]; + NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257 + NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B + NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F + NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263 + NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267 + NvV32 Reserved04[0x6]; + NvV32 SetGlobalCounterUpper; // 0x00000280 - 0x00000283 + NvV32 SetGlobalCounterLower; // 0x00000284 - 0x00000287 + NvV32 SetPageoutStartPAUpper; // 0x00000288 - 0x0000028B + NvV32 SetPageoutStartPALower; // 0x0000028C - 0x0000028F + NvV32 Reserved05[0x1C]; + NvV32 LaunchDma; // 0x00000300 - 0x00000303 + NvV32 Reserved06[0x3F]; + NvV32 OffsetInUpper; // 0x00000400 - 0x00000403 + NvV32 OffsetInLower; // 0x00000404 - 0x00000407 + NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B + NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F + NvV32 PitchIn; // 0x00000410 - 0x00000413 + NvV32 PitchOut; // 0x00000414 - 0x00000417 + NvV32 LineLengthIn; // 0x00000418 - 0x0000041B + NvV32 LineCount; // 0x0000041C - 0x0000041F + NvV32 Reserved07[0xB8]; + NvV32 SetRemapConstA; // 0x00000700 - 0x00000703 + NvV32 SetRemapConstB; // 0x00000704 - 0x00000707 + NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B + NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F + NvV32 SetDstWidth; // 0x00000710 - 0x00000713 + NvV32 SetDstHeight; // 0x00000714 - 0x00000717 + NvV32 SetDstDepth; // 0x00000718 - 0x0000071B + NvV32 SetDstLayer; // 0x0000071C - 0x0000071F + NvV32 SetDstOrigin; // 0x00000720 - 0x00000723 + NvV32 Reserved08[0x1]; + NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B + NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F + NvV32 SetSrcHeight; // 0x00000730 - 0x00000733 + NvV32 SetSrcDepth; // 0x00000734 - 0x00000737 + NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B + NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F + NvV32 Reserved09[0x1]; + NvV32 SrcOriginX; // 0x00000744 - 0x00000747 + NvV32 SrcOriginY; // 0x00000748 - 0x0000074B + NvV32 DstOriginX; // 0x0000074C - 0x0000074F + NvV32 DstOriginY; // 0x00000750 - 0x00000753 + NvV32 Reserved10[0x270]; + NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117 + NvV32 Reserved11[0x3BA]; +} turing_dma_copy_aControlPio; + +#define NVC5B5_NOP (0x00000100) +#define NVC5B5_NOP_PARAMETER 31:0 +#define NVC5B5_PM_TRIGGER (0x00000140) +#define NVC5B5_PM_TRIGGER_V 31:0 +#define NVC5B5_SET_SEMAPHORE_A (0x00000240) +#define NVC5B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC5B5_SET_SEMAPHORE_B (0x00000244) +#define NVC5B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC5B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC5B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC5B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC5B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC5B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC5B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC5B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC5B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC5B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC5B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC5B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC5B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC5B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC5B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC5B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC5B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC5B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC5B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC5B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2 +#define NVC5B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC5B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC5B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC5B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC5B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC5B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2 +#define NVC5B5_SET_GLOBAL_COUNTER_UPPER (0x00000280) +#define NVC5B5_SET_GLOBAL_COUNTER_UPPER_V 31:0 +#define NVC5B5_SET_GLOBAL_COUNTER_LOWER (0x00000284) +#define NVC5B5_SET_GLOBAL_COUNTER_LOWER_V 31:0 +#define NVC5B5_SET_PAGEOUT_START_PAUPPER (0x00000288) +#define NVC5B5_SET_PAGEOUT_START_PAUPPER_V 4:0 +#define NVC5B5_SET_PAGEOUT_START_PALOWER (0x0000028C) +#define NVC5B5_SET_PAGEOUT_START_PALOWER_V 31:0 +#define NVC5B5_LAUNCH_DMA (0x00000300) +#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC5B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC5B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_FLUSH_TYPE 25:25 +#define NVC5B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000) +#define NVC5B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_CONDITIONAL_INTR_SEMAPHORE (0x00000003) +#define NVC5B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC5B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC5B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC5B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC5B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC5B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC5B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC5B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC5B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC5B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC5B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC5B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC5B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC5B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC5B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC5B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC5B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC5B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC5B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_SRC_BYPASS_L2 20:20 +#define NVC5B5_LAUNCH_DMA_SRC_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC5B5_LAUNCH_DMA_SRC_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC5B5_LAUNCH_DMA_DST_BYPASS_L2 21:21 +#define NVC5B5_LAUNCH_DMA_DST_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC5B5_LAUNCH_DMA_DST_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC5B5_LAUNCH_DMA_VPRMODE 23:22 +#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000) +#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001) +#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002) +#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003) +#define NVC5B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24 +#define NVC5B5_LAUNCH_DMA_DISABLE_PLC 26:26 +#define NVC5B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_DISABLE_PLC_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28 +#define NVC5B5_OFFSET_IN_UPPER (0x00000400) +#define NVC5B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC5B5_OFFSET_IN_LOWER (0x00000404) +#define NVC5B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC5B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC5B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC5B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC5B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC5B5_PITCH_IN (0x00000410) +#define NVC5B5_PITCH_IN_VALUE 31:0 +#define NVC5B5_PITCH_OUT (0x00000414) +#define NVC5B5_PITCH_OUT_VALUE 31:0 +#define NVC5B5_LINE_LENGTH_IN (0x00000418) +#define NVC5B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC5B5_LINE_COUNT (0x0000041C) +#define NVC5B5_LINE_COUNT_VALUE 31:0 +#define NVC5B5_SET_REMAP_CONST_A (0x00000700) +#define NVC5B5_SET_REMAP_CONST_A_V 31:0 +#define NVC5B5_SET_REMAP_CONST_B (0x00000704) +#define NVC5B5_SET_REMAP_CONST_B_V 31:0 +#define NVC5B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC5B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC5B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVC5B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVC5B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC5B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC5B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC5B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC5B5_SET_DST_WIDTH (0x00000710) +#define NVC5B5_SET_DST_WIDTH_V 31:0 +#define NVC5B5_SET_DST_HEIGHT (0x00000714) +#define NVC5B5_SET_DST_HEIGHT_V 31:0 +#define NVC5B5_SET_DST_DEPTH (0x00000718) +#define NVC5B5_SET_DST_DEPTH_V 31:0 +#define NVC5B5_SET_DST_LAYER (0x0000071C) +#define NVC5B5_SET_DST_LAYER_V 31:0 +#define NVC5B5_SET_DST_ORIGIN (0x00000720) +#define NVC5B5_SET_DST_ORIGIN_X 15:0 +#define NVC5B5_SET_DST_ORIGIN_Y 31:16 +#define NVC5B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVC5B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVC5B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC5B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC5B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC5B5_SET_SRC_WIDTH (0x0000072C) +#define NVC5B5_SET_SRC_WIDTH_V 31:0 +#define NVC5B5_SET_SRC_HEIGHT (0x00000730) +#define NVC5B5_SET_SRC_HEIGHT_V 31:0 +#define NVC5B5_SET_SRC_DEPTH (0x00000734) +#define NVC5B5_SET_SRC_DEPTH_V 31:0 +#define NVC5B5_SET_SRC_LAYER (0x00000738) +#define NVC5B5_SET_SRC_LAYER_V 31:0 +#define NVC5B5_SET_SRC_ORIGIN (0x0000073C) +#define NVC5B5_SET_SRC_ORIGIN_X 15:0 +#define NVC5B5_SET_SRC_ORIGIN_Y 31:16 +#define NVC5B5_SRC_ORIGIN_X (0x00000744) +#define NVC5B5_SRC_ORIGIN_X_VALUE 31:0 +#define NVC5B5_SRC_ORIGIN_Y (0x00000748) +#define NVC5B5_SRC_ORIGIN_Y_VALUE 31:0 +#define NVC5B5_DST_ORIGIN_X (0x0000074C) +#define NVC5B5_DST_ORIGIN_X_VALUE 31:0 +#define NVC5B5_DST_ORIGIN_Y (0x00000750) +#define NVC5B5_DST_ORIGIN_Y_VALUE 31:0 +#define NVC5B5_PM_TRIGGER_END (0x00001114) +#define NVC5B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc5b5_h + diff --git a/kernel-open/nvidia-uvm/clc6b5.h b/kernel-open/nvidia-uvm/clc6b5.h new file mode 100644 index 000000000..7d4871fd6 --- /dev/null +++ b/kernel-open/nvidia-uvm/clc6b5.h @@ -0,0 +1,352 @@ +/******************************************************************************* + Copyright (c) 1993-2004 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + + +#include "nvtypes.h" + +#ifndef _clc6b5_h_ +#define _clc6b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define AMPERE_DMA_COPY_A (0x0000C6B5) + +typedef volatile struct _clc6b5_tag0 { + NvV32 Reserved00[0x40]; + NvV32 Nop; // 0x00000100 - 0x00000103 + NvV32 Reserved01[0xF]; + NvV32 PmTrigger; // 0x00000140 - 0x00000143 + NvV32 Reserved02[0x3F]; + NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243 + NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247 + NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B + NvV32 Reserved03[0x2]; + NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257 + NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B + NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F + NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263 + NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267 + NvV32 Reserved04[0x6]; + NvV32 SetGlobalCounterUpper; // 0x00000280 - 0x00000283 + NvV32 SetGlobalCounterLower; // 0x00000284 - 0x00000287 + NvV32 SetPageoutStartPAUpper; // 0x00000288 - 0x0000028B + NvV32 SetPageoutStartPALower; // 0x0000028C - 0x0000028F + NvV32 Reserved05[0x1C]; + NvV32 LaunchDma; // 0x00000300 - 0x00000303 + NvV32 Reserved06[0x3F]; + NvV32 OffsetInUpper; // 0x00000400 - 0x00000403 + NvV32 OffsetInLower; // 0x00000404 - 0x00000407 + NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B + NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F + NvV32 PitchIn; // 0x00000410 - 0x00000413 + NvV32 PitchOut; // 0x00000414 - 0x00000417 + NvV32 LineLengthIn; // 0x00000418 - 0x0000041B + NvV32 LineCount; // 0x0000041C - 0x0000041F + NvV32 Reserved07[0xB8]; + NvV32 SetRemapConstA; // 0x00000700 - 0x00000703 + NvV32 SetRemapConstB; // 0x00000704 - 0x00000707 + NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B + NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F + NvV32 SetDstWidth; // 0x00000710 - 0x00000713 + NvV32 SetDstHeight; // 0x00000714 - 0x00000717 + NvV32 SetDstDepth; // 0x00000718 - 0x0000071B + NvV32 SetDstLayer; // 0x0000071C - 0x0000071F + NvV32 SetDstOrigin; // 0x00000720 - 0x00000723 + NvV32 Reserved08[0x1]; + NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B + NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F + NvV32 SetSrcHeight; // 0x00000730 - 0x00000733 + NvV32 SetSrcDepth; // 0x00000734 - 0x00000737 + NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B + NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F + NvV32 Reserved09[0x1]; + NvV32 SrcOriginX; // 0x00000744 - 0x00000747 + NvV32 SrcOriginY; // 0x00000748 - 0x0000074B + NvV32 DstOriginX; // 0x0000074C - 0x0000074F + NvV32 DstOriginY; // 0x00000750 - 0x00000753 + NvV32 Reserved10[0x270]; + NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117 + NvV32 Reserved11[0x3BA]; +} ampere_dma_copy_aControlPio; + +#define NVC6B5_NOP (0x00000100) +#define NVC6B5_NOP_PARAMETER 31:0 +#define NVC6B5_PM_TRIGGER (0x00000140) +#define NVC6B5_PM_TRIGGER_V 31:0 +#define NVC6B5_SET_SEMAPHORE_A (0x00000240) +#define NVC6B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC6B5_SET_SEMAPHORE_B (0x00000244) +#define NVC6B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC6B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC6B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC6B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC6B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC6B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC6B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC6B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC6B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC6B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC6B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC6B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC6B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC6B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC6B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC6B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC6B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC6B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC6B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC6B5_SET_SRC_PHYS_MODE_TARGET_PEERMEM (0x00000003) +#define NVC6B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2 +#define NVC6B5_SET_SRC_PHYS_MODE_PEER_ID 8:6 +#define NVC6B5_SET_SRC_PHYS_MODE_FLA 9:9 +#define NVC6B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC6B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC6B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC6B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC6B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC6B5_SET_DST_PHYS_MODE_TARGET_PEERMEM (0x00000003) +#define NVC6B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2 +#define NVC6B5_SET_DST_PHYS_MODE_PEER_ID 8:6 +#define NVC6B5_SET_DST_PHYS_MODE_FLA 9:9 +#define NVC6B5_SET_GLOBAL_COUNTER_UPPER (0x00000280) +#define NVC6B5_SET_GLOBAL_COUNTER_UPPER_V 31:0 +#define NVC6B5_SET_GLOBAL_COUNTER_LOWER (0x00000284) +#define NVC6B5_SET_GLOBAL_COUNTER_LOWER_V 31:0 +#define NVC6B5_SET_PAGEOUT_START_PAUPPER (0x00000288) +#define NVC6B5_SET_PAGEOUT_START_PAUPPER_V 4:0 +#define NVC6B5_SET_PAGEOUT_START_PALOWER (0x0000028C) +#define NVC6B5_SET_PAGEOUT_START_PALOWER_V 31:0 +#define NVC6B5_LAUNCH_DMA (0x00000300) +#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC6B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC6B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_FLUSH_TYPE 25:25 +#define NVC6B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000) +#define NVC6B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_CONDITIONAL_INTR_SEMAPHORE (0x00000003) +#define NVC6B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC6B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC6B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC6B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC6B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC6B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC6B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC6B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC6B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC6B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC6B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC6B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC6B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC6B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC6B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC6B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC6B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC6B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC6B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_VPRMODE 23:22 +#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000) +#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001) +#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002) +#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003) +#define NVC6B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24 +#define NVC6B5_LAUNCH_DMA_DISABLE_PLC 26:26 +#define NVC6B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_DISABLE_PLC_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28 +#define NVC6B5_OFFSET_IN_UPPER (0x00000400) +#define NVC6B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC6B5_OFFSET_IN_LOWER (0x00000404) +#define NVC6B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC6B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC6B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC6B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC6B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC6B5_PITCH_IN (0x00000410) +#define NVC6B5_PITCH_IN_VALUE 31:0 +#define NVC6B5_PITCH_OUT (0x00000414) +#define NVC6B5_PITCH_OUT_VALUE 31:0 +#define NVC6B5_LINE_LENGTH_IN (0x00000418) +#define NVC6B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC6B5_LINE_COUNT (0x0000041C) +#define NVC6B5_LINE_COUNT_VALUE 31:0 +#define NVC6B5_SET_REMAP_CONST_A (0x00000700) +#define NVC6B5_SET_REMAP_CONST_A_V 31:0 +#define NVC6B5_SET_REMAP_CONST_B (0x00000704) +#define NVC6B5_SET_REMAP_CONST_B_V 31:0 +#define NVC6B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC6B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC6B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVC6B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVC6B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC6B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC6B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC6B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC6B5_SET_DST_WIDTH (0x00000710) +#define NVC6B5_SET_DST_WIDTH_V 31:0 +#define NVC6B5_SET_DST_HEIGHT (0x00000714) +#define NVC6B5_SET_DST_HEIGHT_V 31:0 +#define NVC6B5_SET_DST_DEPTH (0x00000718) +#define NVC6B5_SET_DST_DEPTH_V 31:0 +#define NVC6B5_SET_DST_LAYER (0x0000071C) +#define NVC6B5_SET_DST_LAYER_V 31:0 +#define NVC6B5_SET_DST_ORIGIN (0x00000720) +#define NVC6B5_SET_DST_ORIGIN_X 15:0 +#define NVC6B5_SET_DST_ORIGIN_Y 31:16 +#define NVC6B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVC6B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVC6B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC6B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC6B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC6B5_SET_SRC_WIDTH (0x0000072C) +#define NVC6B5_SET_SRC_WIDTH_V 31:0 +#define NVC6B5_SET_SRC_HEIGHT (0x00000730) +#define NVC6B5_SET_SRC_HEIGHT_V 31:0 +#define NVC6B5_SET_SRC_DEPTH (0x00000734) +#define NVC6B5_SET_SRC_DEPTH_V 31:0 +#define NVC6B5_SET_SRC_LAYER (0x00000738) +#define NVC6B5_SET_SRC_LAYER_V 31:0 +#define NVC6B5_SET_SRC_ORIGIN (0x0000073C) +#define NVC6B5_SET_SRC_ORIGIN_X 15:0 +#define NVC6B5_SET_SRC_ORIGIN_Y 31:16 +#define NVC6B5_SRC_ORIGIN_X (0x00000744) +#define NVC6B5_SRC_ORIGIN_X_VALUE 31:0 +#define NVC6B5_SRC_ORIGIN_Y (0x00000748) +#define NVC6B5_SRC_ORIGIN_Y_VALUE 31:0 +#define NVC6B5_DST_ORIGIN_X (0x0000074C) +#define NVC6B5_DST_ORIGIN_X_VALUE 31:0 +#define NVC6B5_DST_ORIGIN_Y (0x00000750) +#define NVC6B5_DST_ORIGIN_Y_VALUE 31:0 +#define NVC6B5_PM_TRIGGER_END (0x00001114) +#define NVC6B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc6b5_h + diff --git a/kernel-open/nvidia-uvm/clc7b5.h b/kernel-open/nvidia-uvm/clc7b5.h new file mode 100644 index 000000000..fa72af41e --- /dev/null +++ b/kernel-open/nvidia-uvm/clc7b5.h @@ -0,0 +1,379 @@ +/******************************************************************************* + Copyright (c) 1993-2004 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + + +#include "nvtypes.h" + +#ifndef _clc7b5_h_ +#define _clc7b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define AMPERE_DMA_COPY_B (0x0000C7B5) + +typedef volatile struct _clc7b5_tag0 { + NvV32 Reserved00[0x40]; + NvV32 Nop; // 0x00000100 - 0x00000103 + NvV32 Reserved01[0xF]; + NvV32 PmTrigger; // 0x00000140 - 0x00000143 + NvV32 Reserved02[0x36]; + NvV32 SetMonitoredFenceType; // 0x0000021C - 0x0000021F + NvV32 SetMonitoredFenceSignalAddrBaseUpper; // 0x00000220 - 0x00000223 + NvV32 SetMonitoredFenceSignalAddrBaseLower; // 0x00000224 - 0x00000227 + NvV32 Reserved03[0x6]; + NvV32 SetSemaphoreA; // 0x00000240 - 0x00000243 + NvV32 SetSemaphoreB; // 0x00000244 - 0x00000247 + NvV32 SetSemaphorePayload; // 0x00000248 - 0x0000024B + NvV32 SetSemaphorePayloadUpper; // 0x0000024C - 0x0000024F + NvV32 Reserved04[0x1]; + NvV32 SetRenderEnableA; // 0x00000254 - 0x00000257 + NvV32 SetRenderEnableB; // 0x00000258 - 0x0000025B + NvV32 SetRenderEnableC; // 0x0000025C - 0x0000025F + NvV32 SetSrcPhysMode; // 0x00000260 - 0x00000263 + NvV32 SetDstPhysMode; // 0x00000264 - 0x00000267 + NvV32 Reserved05[0x6]; + NvV32 SetGlobalCounterUpper; // 0x00000280 - 0x00000283 + NvV32 SetGlobalCounterLower; // 0x00000284 - 0x00000287 + NvV32 SetPageoutStartPAUpper; // 0x00000288 - 0x0000028B + NvV32 SetPageoutStartPALower; // 0x0000028C - 0x0000028F + NvV32 Reserved06[0x1C]; + NvV32 LaunchDma; // 0x00000300 - 0x00000303 + NvV32 Reserved07[0x3F]; + NvV32 OffsetInUpper; // 0x00000400 - 0x00000403 + NvV32 OffsetInLower; // 0x00000404 - 0x00000407 + NvV32 OffsetOutUpper; // 0x00000408 - 0x0000040B + NvV32 OffsetOutLower; // 0x0000040C - 0x0000040F + NvV32 PitchIn; // 0x00000410 - 0x00000413 + NvV32 PitchOut; // 0x00000414 - 0x00000417 + NvV32 LineLengthIn; // 0x00000418 - 0x0000041B + NvV32 LineCount; // 0x0000041C - 0x0000041F + NvV32 Reserved08[0xB8]; + NvV32 SetRemapConstA; // 0x00000700 - 0x00000703 + NvV32 SetRemapConstB; // 0x00000704 - 0x00000707 + NvV32 SetRemapComponents; // 0x00000708 - 0x0000070B + NvV32 SetDstBlockSize; // 0x0000070C - 0x0000070F + NvV32 SetDstWidth; // 0x00000710 - 0x00000713 + NvV32 SetDstHeight; // 0x00000714 - 0x00000717 + NvV32 SetDstDepth; // 0x00000718 - 0x0000071B + NvV32 SetDstLayer; // 0x0000071C - 0x0000071F + NvV32 SetDstOrigin; // 0x00000720 - 0x00000723 + NvV32 Reserved09[0x1]; + NvV32 SetSrcBlockSize; // 0x00000728 - 0x0000072B + NvV32 SetSrcWidth; // 0x0000072C - 0x0000072F + NvV32 SetSrcHeight; // 0x00000730 - 0x00000733 + NvV32 SetSrcDepth; // 0x00000734 - 0x00000737 + NvV32 SetSrcLayer; // 0x00000738 - 0x0000073B + NvV32 SetSrcOrigin; // 0x0000073C - 0x0000073F + NvV32 Reserved10[0x1]; + NvV32 SrcOriginX; // 0x00000744 - 0x00000747 + NvV32 SrcOriginY; // 0x00000748 - 0x0000074B + NvV32 DstOriginX; // 0x0000074C - 0x0000074F + NvV32 DstOriginY; // 0x00000750 - 0x00000753 + NvV32 Reserved11[0x270]; + NvV32 PmTriggerEnd; // 0x00001114 - 0x00001117 + NvV32 Reserved12[0x3BA]; +} ampere_dma_copy_bControlPio; + +#define NVC7B5_NOP (0x00000100) +#define NVC7B5_NOP_PARAMETER 31:0 +#define NVC7B5_PM_TRIGGER (0x00000140) +#define NVC7B5_PM_TRIGGER_V 31:0 +#define NVC7B5_SET_MONITORED_FENCE_TYPE (0x0000021C) +#define NVC7B5_SET_MONITORED_FENCE_TYPE_TYPE 0:0 +#define NVC7B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE (0x00000000) +#define NVC7B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE_EXT (0x00000001) +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER (0x00000220) +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER_UPPER 16:0 +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER (0x00000224) +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER_LOWER 31:0 +#define NVC7B5_SET_SEMAPHORE_A (0x00000240) +#define NVC7B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC7B5_SET_SEMAPHORE_B (0x00000244) +#define NVC7B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC7B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC7B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC7B5_SET_SEMAPHORE_PAYLOAD_UPPER (0x0000024C) +#define NVC7B5_SET_SEMAPHORE_PAYLOAD_UPPER_PAYLOAD 31:0 +#define NVC7B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC7B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC7B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC7B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC7B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC7B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_PEERMEM (0x00000003) +#define NVC7B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2 +#define NVC7B5_SET_SRC_PHYS_MODE_PEER_ID 8:6 +#define NVC7B5_SET_SRC_PHYS_MODE_FLA 9:9 +#define NVC7B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_PEERMEM (0x00000003) +#define NVC7B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2 +#define NVC7B5_SET_DST_PHYS_MODE_PEER_ID 8:6 +#define NVC7B5_SET_DST_PHYS_MODE_FLA 9:9 +#define NVC7B5_SET_GLOBAL_COUNTER_UPPER (0x00000280) +#define NVC7B5_SET_GLOBAL_COUNTER_UPPER_V 31:0 +#define NVC7B5_SET_GLOBAL_COUNTER_LOWER (0x00000284) +#define NVC7B5_SET_GLOBAL_COUNTER_LOWER_V 31:0 +#define NVC7B5_SET_PAGEOUT_START_PAUPPER (0x00000288) +#define NVC7B5_SET_PAGEOUT_START_PAUPPER_V 4:0 +#define NVC7B5_SET_PAGEOUT_START_PALOWER (0x0000028C) +#define NVC7B5_SET_PAGEOUT_START_PALOWER_V 31:0 +#define NVC7B5_LAUNCH_DMA (0x00000300) +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC7B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC7B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_FLUSH_TYPE 25:25 +#define NVC7B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000) +#define NVC7B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_NO_TIMESTAMP (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_WITH_TIMESTAMP (0x00000002) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_CONDITIONAL_INTR_SEMAPHORE (0x00000003) +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC7B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC7B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC7B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC7B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC7B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC7B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC7B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC7B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC7B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC7B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC7B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC7B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC7B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC7B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC7B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDA (0x00000008) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDB (0x00000009) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMIN (0x0000000B) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMAX (0x0000000C) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDC (0x0000000D) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDD (0x0000000E) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDE (0x0000000F) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_VPRMODE 23:22 +#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001) +#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_VID2SYS (0x00000002) +#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_SYS2VID (0x00000003) +#define NVC7B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24 +#define NVC7B5_LAUNCH_DMA_DISABLE_PLC 26:26 +#define NVC7B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_DISABLE_PLC_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE 27:27 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_ONE_WORD (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_TWO_WORD (0x00000001) +#define NVC7B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28 +#define NVC7B5_OFFSET_IN_UPPER (0x00000400) +#define NVC7B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC7B5_OFFSET_IN_LOWER (0x00000404) +#define NVC7B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC7B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC7B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC7B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC7B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC7B5_PITCH_IN (0x00000410) +#define NVC7B5_PITCH_IN_VALUE 31:0 +#define NVC7B5_PITCH_OUT (0x00000414) +#define NVC7B5_PITCH_OUT_VALUE 31:0 +#define NVC7B5_LINE_LENGTH_IN (0x00000418) +#define NVC7B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC7B5_LINE_COUNT (0x0000041C) +#define NVC7B5_LINE_COUNT_VALUE 31:0 +#define NVC7B5_SET_REMAP_CONST_A (0x00000700) +#define NVC7B5_SET_REMAP_CONST_A_V 31:0 +#define NVC7B5_SET_REMAP_CONST_B (0x00000704) +#define NVC7B5_SET_REMAP_CONST_B_V 31:0 +#define NVC7B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVC7B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVC7B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC7B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC7B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC7B5_SET_DST_WIDTH (0x00000710) +#define NVC7B5_SET_DST_WIDTH_V 31:0 +#define NVC7B5_SET_DST_HEIGHT (0x00000714) +#define NVC7B5_SET_DST_HEIGHT_V 31:0 +#define NVC7B5_SET_DST_DEPTH (0x00000718) +#define NVC7B5_SET_DST_DEPTH_V 31:0 +#define NVC7B5_SET_DST_LAYER (0x0000071C) +#define NVC7B5_SET_DST_LAYER_V 31:0 +#define NVC7B5_SET_DST_ORIGIN (0x00000720) +#define NVC7B5_SET_DST_ORIGIN_X 15:0 +#define NVC7B5_SET_DST_ORIGIN_Y 31:16 +#define NVC7B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVC7B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVC7B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC7B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC7B5_SET_SRC_WIDTH (0x0000072C) +#define NVC7B5_SET_SRC_WIDTH_V 31:0 +#define NVC7B5_SET_SRC_HEIGHT (0x00000730) +#define NVC7B5_SET_SRC_HEIGHT_V 31:0 +#define NVC7B5_SET_SRC_DEPTH (0x00000734) +#define NVC7B5_SET_SRC_DEPTH_V 31:0 +#define NVC7B5_SET_SRC_LAYER (0x00000738) +#define NVC7B5_SET_SRC_LAYER_V 31:0 +#define NVC7B5_SET_SRC_ORIGIN (0x0000073C) +#define NVC7B5_SET_SRC_ORIGIN_X 15:0 +#define NVC7B5_SET_SRC_ORIGIN_Y 31:16 +#define NVC7B5_SRC_ORIGIN_X (0x00000744) +#define NVC7B5_SRC_ORIGIN_X_VALUE 31:0 +#define NVC7B5_SRC_ORIGIN_Y (0x00000748) +#define NVC7B5_SRC_ORIGIN_Y_VALUE 31:0 +#define NVC7B5_DST_ORIGIN_X (0x0000074C) +#define NVC7B5_DST_ORIGIN_X_VALUE 31:0 +#define NVC7B5_DST_ORIGIN_Y (0x00000750) +#define NVC7B5_DST_ORIGIN_Y_VALUE 31:0 +#define NVC7B5_PM_TRIGGER_END (0x00001114) +#define NVC7B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc7b5_h + diff --git a/kernel-open/nvidia-uvm/ctrl2080mc.h b/kernel-open/nvidia-uvm/ctrl2080mc.h new file mode 100644 index 000000000..d9815affd --- /dev/null +++ b/kernel-open/nvidia-uvm/ctrl2080mc.h @@ -0,0 +1,51 @@ +/******************************************************************************* + Copyright (c) 2013-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _ctrl2080mc_h_ +#define _ctrl2080mc_h_ + +/* valid architecture values */ +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_T13X (0xE0000013) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM000 (0x00000110) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM200 (0x00000120) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GP100 (0x00000130) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GV100 (0x00000140) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_TU100 (0x00000160) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100 (0x00000170) + + + + + + + + + + +/* valid ARCHITECTURE_GP10x implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GP100 (0x00000000) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GP000 (0x00000001) + +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA100 (0x00000000) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA000 (0x00000001) +#endif /* _ctrl2080mc_h_ */ diff --git a/kernel-open/nvidia-uvm/hwref/ampere/ga100/dev_fault.h b/kernel-open/nvidia-uvm/hwref/ampere/ga100/dev_fault.h new file mode 100644 index 000000000..1c8ebde45 --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/ampere/ga100/dev_fault.h @@ -0,0 +1,480 @@ +/******************************************************************************* + Copyright (c) 2003-2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef __ga100_dev_fault_h__ +#define __ga100_dev_fault_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PFAULT /* ----G */ +#define NV_PFAULT_MMU_ENG_ID_GRAPHICS 64 /* */ +#define NV_PFAULT_MMU_ENG_ID_DISPLAY 1 /* */ +#define NV_PFAULT_MMU_ENG_ID_GSP 2 /* */ +#define NV_PFAULT_MMU_ENG_ID_IFB 9 /* */ +#define NV_PFAULT_MMU_ENG_ID_FLA 4 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1 128 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2 192 /* */ +#define NV_PFAULT_MMU_ENG_ID_SEC 14 /* */ +#define NV_PFAULT_MMU_ENG_ID_PERF 8 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC 25 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC0 25 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC1 26 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC2 27 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC3 28 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC4 29 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVJPG0 30 /* */ +#define NV_PFAULT_MMU_ENG_ID_GRCOPY 15 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE0 15 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE1 16 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE2 17 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE3 18 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE4 19 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE5 20 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE6 21 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE7 22 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE8 23 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE9 24 /* */ +#define NV_PFAULT_MMU_ENG_ID_PWR_PMU 6 /* */ +#define NV_PFAULT_MMU_ENG_ID_PTP 3 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC0 11 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC1 12 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC2 13 /* */ +#define NV_PFAULT_MMU_ENG_ID_OFA0 10 /* */ +#define NV_PFAULT_MMU_ENG_ID_PHYSICAL 31 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST0 32 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST1 33 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST2 34 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST3 35 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST4 36 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST5 37 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST6 38 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST7 39 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST8 40 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST9 41 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST10 42 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST11 43 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST12 44 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST13 45 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST14 46 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST15 47 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST16 48 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST17 49 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST18 50 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST19 51 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST20 52 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST21 53 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST22 54 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST23 55 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST24 56 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST25 57 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST26 58 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST27 59 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST28 60 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST29 61 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST30 62 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST31 63 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN0 128 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN1 129 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN2 130 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN3 131 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN4 132 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN5 133 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN6 134 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN7 135 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN8 136 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN9 137 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN10 138 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN11 139 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN12 140 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN13 141 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN14 142 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN15 143 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN16 144 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN17 145 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN18 146 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN19 147 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN20 148 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN21 149 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN22 150 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN23 151 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN24 152 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN25 153 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN26 154 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN27 155 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN28 156 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN29 157 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN30 158 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN31 159 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN32 160 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN33 161 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN34 162 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN35 163 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN36 164 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN37 165 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN38 166 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN39 167 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN40 168 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN41 169 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN42 170 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN43 171 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN44 172 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN45 173 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN46 174 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN47 175 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN48 176 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN49 177 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN50 178 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN51 179 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN52 180 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN53 181 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN54 182 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN55 183 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN56 184 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN57 185 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN58 186 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN59 187 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN60 188 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN61 189 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN62 190 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN63 191 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN0 192 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN1 193 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN2 194 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN3 195 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN4 196 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN5 197 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN6 198 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN7 199 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN8 200 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN9 201 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN10 202 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN11 203 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN12 204 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN13 205 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN14 206 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN15 207 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN16 208 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN17 209 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN18 210 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN19 211 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN20 212 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN21 213 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN22 214 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN23 215 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN24 216 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN25 217 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN26 218 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN27 219 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN28 220 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN29 221 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN30 222 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN31 223 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN32 224 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN33 225 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN34 226 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN35 227 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN36 228 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN37 229 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN38 230 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN39 231 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN40 232 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN41 233 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN42 234 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN43 235 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN44 236 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN45 237 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN46 238 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN47 239 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN48 240 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN49 241 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN50 242 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN51 243 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN52 244 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN53 245 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN54 246 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN55 247 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN56 248 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN57 249 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN58 250 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN59 251 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN60 252 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN61 253 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN62 254 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN63 255 /* */ +#define NV_PFAULT_FAULT_TYPE 4:0 /* */ +#define NV_PFAULT_FAULT_TYPE_PDE 0x00000000 /* */ +#define NV_PFAULT_FAULT_TYPE_PDE_SIZE 0x00000001 /* */ +#define NV_PFAULT_FAULT_TYPE_PTE 0x00000002 /* */ +#define NV_PFAULT_FAULT_TYPE_VA_LIMIT_VIOLATION 0x00000003 /* */ +#define NV_PFAULT_FAULT_TYPE_UNBOUND_INST_BLOCK 0x00000004 /* */ +#define NV_PFAULT_FAULT_TYPE_PRIV_VIOLATION 0x00000005 /* */ +#define NV_PFAULT_FAULT_TYPE_RO_VIOLATION 0x00000006 /* */ +#define NV_PFAULT_FAULT_TYPE_WO_VIOLATION 0x00000007 /* */ +#define NV_PFAULT_FAULT_TYPE_PITCH_MASK_VIOLATION 0x00000008 /* */ +#define NV_PFAULT_FAULT_TYPE_WORK_CREATION 0x00000009 /* */ +#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_APERTURE 0x0000000a /* */ +#define NV_PFAULT_FAULT_TYPE_COMPRESSION_FAILURE 0x0000000b /* */ +#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_KIND 0x0000000c /* */ +#define NV_PFAULT_FAULT_TYPE_REGION_VIOLATION 0x0000000d /* */ +#define NV_PFAULT_FAULT_TYPE_POISONED 0x0000000e /* */ +#define NV_PFAULT_FAULT_TYPE_ATOMIC_VIOLATION 0x0000000f /* */ +#define NV_PFAULT_CLIENT 14:8 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_0 0x00000000 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_1 0x00000001 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_2 0x00000002 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_3 0x00000003 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_4 0x00000004 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_5 0x00000005 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_6 0x00000006 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_7 0x00000007 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_0 0x00000008 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_1 0x00000009 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_2 0x0000000A /* */ +#define NV_PFAULT_CLIENT_GPC_PE_3 0x0000000B /* */ +#define NV_PFAULT_CLIENT_GPC_PE_4 0x0000000C /* */ +#define NV_PFAULT_CLIENT_GPC_PE_5 0x0000000D /* */ +#define NV_PFAULT_CLIENT_GPC_PE_6 0x0000000E /* */ +#define NV_PFAULT_CLIENT_GPC_PE_7 0x0000000F /* */ +#define NV_PFAULT_CLIENT_GPC_RAST 0x00000010 /* */ +#define NV_PFAULT_CLIENT_GPC_GCC 0x00000011 /* */ +#define NV_PFAULT_CLIENT_GPC_GPCCS 0x00000012 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_0 0x00000013 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_1 0x00000014 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_2 0x00000015 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_3 0x00000016 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_8 0x00000021 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_9 0x00000022 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_10 0x00000023 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_11 0x00000024 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_12 0x00000025 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_13 0x00000026 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_14 0x00000027 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_15 0x00000028 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_0 0x00000029 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_1 0x0000002A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_2 0x0000002B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_3 0x0000002C /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_4 0x0000002D /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_5 0x0000002E /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_6 0x0000002F /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_7 0x00000030 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_8 0x00000031 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_9 0x00000032 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_8 0x00000033 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_9 0x00000034 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_16 0x00000035 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_17 0x00000036 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_18 0x00000037 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_19 0x00000038 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_10 0x00000039 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_11 0x0000003A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_10 0x0000003B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_11 0x0000003C /* */ +#define NV_PFAULT_CLIENT_GPC_T1_20 0x0000003D /* */ +#define NV_PFAULT_CLIENT_GPC_T1_21 0x0000003E /* */ +#define NV_PFAULT_CLIENT_GPC_T1_22 0x0000003F /* */ +#define NV_PFAULT_CLIENT_GPC_T1_23 0x00000040 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_12 0x00000041 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_13 0x00000042 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_12 0x00000043 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_13 0x00000044 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_24 0x00000045 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_25 0x00000046 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_26 0x00000047 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_27 0x00000048 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_14 0x00000049 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_15 0x0000004A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_14 0x0000004B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_15 0x0000004C /* */ +#define NV_PFAULT_CLIENT_GPC_T1_28 0x0000004D /* */ +#define NV_PFAULT_CLIENT_GPC_T1_29 0x0000004E /* */ +#define NV_PFAULT_CLIENT_GPC_T1_30 0x0000004F /* */ +#define NV_PFAULT_CLIENT_GPC_T1_31 0x00000050 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_16 0x00000051 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_17 0x00000052 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_16 0x00000053 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_17 0x00000054 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_32 0x00000055 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_33 0x00000056 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_34 0x00000057 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_35 0x00000058 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_18 0x00000059 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_19 0x0000005A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_18 0x0000005B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_19 0x0000005C /* */ +#define NV_PFAULT_CLIENT_GPC_T1_36 0x0000005D /* */ +#define NV_PFAULT_CLIENT_GPC_T1_37 0x0000005E /* */ +#define NV_PFAULT_CLIENT_GPC_T1_38 0x0000005F /* */ +#define NV_PFAULT_CLIENT_GPC_T1_39 0x00000060 /* */ +#define NV_PFAULT_CLIENT_GPC_ROP_0 0x00000070 /* */ +#define NV_PFAULT_CLIENT_GPC_ROP_1 0x00000071 /* */ +#define NV_PFAULT_CLIENT_GPC_ROP_2 0x00000072 /* */ +#define NV_PFAULT_CLIENT_GPC_ROP_3 0x00000073 /* */ +#define NV_PFAULT_CLIENT_GPC_GPM 0x00000017 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_0 0x00000018 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_1 0x00000019 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_2 0x0000001A /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_3 0x0000001B /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_4 0x0000001C /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_5 0x0000001D /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_6 0x0000001E /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_7 0x0000001F /* */ +#define NV_PFAULT_CLIENT_GPC_RGG_UTLB 0x00000020 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_8 0x00000031 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_9 0x00000032 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_10 0x00000033 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_11 0x00000034 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_12 0x00000035 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_13 0x00000036 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_14 0x00000037 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_15 0x00000038 /* */ +#define NV_PFAULT_CLIENT_HUB_VIP 0x00000000 /* */ +#define NV_PFAULT_CLIENT_HUB_CE0 0x00000001 /* */ +#define NV_PFAULT_CLIENT_HUB_CE1 0x00000002 /* */ +#define NV_PFAULT_CLIENT_HUB_DNISO 0x00000003 /* */ +#define NV_PFAULT_CLIENT_HUB_DISPNISO 0x00000003 /* */ +#define NV_PFAULT_CLIENT_HUB_FE0 0x00000004 /* */ +#define NV_PFAULT_CLIENT_HUB_FE 0x00000004 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS0 0x00000005 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS 0x00000005 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST 0x00000006 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST_CPU 0x00000007 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST_CPU_NB 0x00000008 /* */ +#define NV_PFAULT_CLIENT_HUB_ISO 0x00000009 /* */ +#define NV_PFAULT_CLIENT_HUB_MMU 0x0000000A /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC0 0x0000000B /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC 0x0000000B /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC1 0x0000000D /* */ +#define NV_PFAULT_CLIENT_HUB_NISO 0x0000000E /* */ +#define NV_PFAULT_CLIENT_HUB_ACTRS 0x0000000E /* */ +#define NV_PFAULT_CLIENT_HUB_P2P 0x0000000F /* */ +#define NV_PFAULT_CLIENT_HUB_PD 0x00000010 /* */ +#define NV_PFAULT_CLIENT_HUB_PERF0 0x00000011 /* */ +#define NV_PFAULT_CLIENT_HUB_PERF 0x00000011 /* */ +#define NV_PFAULT_CLIENT_HUB_PMU 0x00000012 /* */ +#define NV_PFAULT_CLIENT_HUB_RASTERTWOD 0x00000013 /* */ +#define NV_PFAULT_CLIENT_HUB_SCC 0x00000014 /* */ +#define NV_PFAULT_CLIENT_HUB_SCC_NB 0x00000015 /* */ +#define NV_PFAULT_CLIENT_HUB_SEC 0x00000016 /* */ +#define NV_PFAULT_CLIENT_HUB_SSYNC 0x00000017 /* */ +#define NV_PFAULT_CLIENT_HUB_GRCOPY 0x00000018 /* */ +#define NV_PFAULT_CLIENT_HUB_CE2 0x00000018 /* */ +#define NV_PFAULT_CLIENT_HUB_XV 0x00000019 /* */ +#define NV_PFAULT_CLIENT_HUB_MMU_NB 0x0000001A /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC0 0x0000001B /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC 0x0000001B /* */ +#define NV_PFAULT_CLIENT_HUB_DFALCON 0x0000001C /* */ +#define NV_PFAULT_CLIENT_HUB_SKED0 0x0000001D /* */ +#define NV_PFAULT_CLIENT_HUB_SKED 0x0000001D /* */ +#define NV_PFAULT_CLIENT_HUB_AFALCON 0x0000001E /* */ +#define NV_PFAULT_CLIENT_HUB_DONT_CARE 0x0000001F /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE0 0x00000020 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE1 0x00000021 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE2 0x00000022 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE3 0x00000023 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE4 0x00000024 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE5 0x00000025 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE6 0x00000026 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE7 0x00000027 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE8 0x00000028 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE9 0x00000029 /* */ +#define NV_PFAULT_CLIENT_HUB_HSHUB 0x0000002A /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X0 0x0000002B /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X1 0x0000002C /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X2 0x0000002D /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X3 0x0000002E /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X4 0x0000002F /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X5 0x00000030 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X6 0x00000031 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X7 0x00000032 /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC2 0x00000033 /* */ +#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER0 0x00000034 /* */ +#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER1 0x00000035 /* */ +#define NV_PFAULT_CLIENT_HUB_DWBIF 0x00000036 /* */ +#define NV_PFAULT_CLIENT_HUB_FBFALCON 0x00000037 /* */ +#define NV_PFAULT_CLIENT_HUB_CE_SHIM 0x00000038 /* */ +#define NV_PFAULT_CLIENT_HUB_GSP 0x00000039 /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC1 0x0000003A /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC2 0x0000003B /* */ +#define NV_PFAULT_CLIENT_HUB_NVJPG0 0x0000003C /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC3 0x0000003D /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC4 0x0000003E /* */ +#define NV_PFAULT_CLIENT_HUB_OFA0 0x0000003F /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE10 0x00000040 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE11 0x00000041 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE12 0x00000042 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE13 0x00000043 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE14 0x00000044 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE15 0x00000045 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X8 0x00000046 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X9 0x00000047 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X10 0x00000048 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X11 0x00000049 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X12 0x0000004A /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X13 0x0000004B /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X14 0x0000004C /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X15 0x0000004D /* */ +#define NV_PFAULT_CLIENT_HUB_FE1 0x0000004E /* */ +#define NV_PFAULT_CLIENT_HUB_FE2 0x0000004F /* */ +#define NV_PFAULT_CLIENT_HUB_FE3 0x00000050 /* */ +#define NV_PFAULT_CLIENT_HUB_FE4 0x00000051 /* */ +#define NV_PFAULT_CLIENT_HUB_FE5 0x00000052 /* */ +#define NV_PFAULT_CLIENT_HUB_FE6 0x00000053 /* */ +#define NV_PFAULT_CLIENT_HUB_FE7 0x00000054 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS1 0x00000055 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS2 0x00000056 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS3 0x00000057 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS4 0x00000058 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS5 0x00000059 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS6 0x0000005A /* */ +#define NV_PFAULT_CLIENT_HUB_FECS7 0x0000005B /* */ +#define NV_PFAULT_CLIENT_HUB_SKED1 0x0000005C /* */ +#define NV_PFAULT_CLIENT_HUB_SKED2 0x0000005D /* */ +#define NV_PFAULT_CLIENT_HUB_SKED3 0x0000005E /* */ +#define NV_PFAULT_CLIENT_HUB_SKED4 0x0000005F /* */ +#define NV_PFAULT_CLIENT_HUB_SKED5 0x00000060 /* */ +#define NV_PFAULT_CLIENT_HUB_SKED6 0x00000061 /* */ +#define NV_PFAULT_CLIENT_HUB_SKED7 0x00000062 /* */ +#define NV_PFAULT_CLIENT_HUB_ESC 0x00000063 /* */ +#define NV_PFAULT_ACCESS_TYPE 19:16 /* */ +#define NV_PFAULT_ACCESS_TYPE_READ 0x00000000 /* */ +#define NV_PFAULT_ACCESS_TYPE_WRITE 0x00000001 /* */ +#define NV_PFAULT_ACCESS_TYPE_ATOMIC 0x00000002 /* */ +#define NV_PFAULT_ACCESS_TYPE_PREFETCH 0x00000003 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_READ 0x00000000 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_WRITE 0x00000001 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC 0x00000002 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_STRONG 0x00000002 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_PREFETCH 0x00000003 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_WEAK 0x00000004 /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_READ 0x00000008 /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_WRITE 0x00000009 /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_ATOMIC 0x0000000a /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_PREFETCH 0x0000000b /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE 20:20 /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE_GPC 0x00000000 /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE_HUB 0x00000001 /* */ +#define NV_PFAULT_GPC_ID 28:24 /* */ +#define NV_PFAULT_PROTECTED_MODE 29:29 /* */ +#define NV_PFAULT_REPLAYABLE_FAULT_EN 30:30 /* */ +#define NV_PFAULT_VALID 31:31 /* */ +#endif // __ga100_dev_fault_h__ diff --git a/kernel-open/nvidia-uvm/hwref/ampere/ga100/dev_runlist.h b/kernel-open/nvidia-uvm/hwref/ampere/ga100/dev_runlist.h new file mode 100644 index 000000000..e38e49dda --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/ampere/ga100/dev_runlist.h @@ -0,0 +1,782 @@ +/******************************************************************************* + Copyright (c) 2003-2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef __ga100_dev_runlist_h__ +#define __ga100_dev_runlist_h__ +/* This file is autogenerated. Do not edit */ +#define NV_RUNLIST 0x000003ff:0x00000000 /* RW--D */ +#define NV_CHRAM 0x00001fff:0x00000000 /* RW--D */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK 0x040 /* RW-4R */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION 3:0 /* RWIVF */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_DEFAULT_PRIV_LEVEL 15 /* RWI-V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_ALL_LEVELS_ENABLED 0x0000000F /* RW--V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_ONLY_LEVEL3_ENABLED 0x00000008 /* RW--V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0 0:0 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1 1:1 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2 2:2 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL3 3:3 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL3_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL3_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION 7:4 /* RWIVF */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_DEFAULT_PRIV_LEVEL 8 /* RWI-V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_ALL_LEVELS_ENABLED 0x0F /* RW--V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_ONLY_LEVEL3_ENABLED 0x08 /* RW--V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0 4:4 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1 5:5 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2 6:6 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL3 7:7 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL3_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL3_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_VIOLATION 8:8 /* RWIVF */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_READ_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_VIOLATION 9:9 /* RWIVF */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_WRITE_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_SOURCE_READ_CONTROL 10:10 /* RWIVF */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_SOURCE_READ_CONTROL_BLOCKED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_SOURCE_READ_CONTROL_LOWERED 0x00000000 /* RW--V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_SOURCE_WRITE_CONTROL 11:11 /* RWIVF */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_SOURCE_WRITE_CONTROL_BLOCKED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_SOURCE_WRITE_CONTROL_LOWERED 0x00000000 /* RW--V */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_SOURCE_ENABLE 31:12 /* RWIVF */ +#define NV_RUNLIST_INT_CYA_PRIV_LEVEL_MASK_SOURCE_ENABLE_ALL_SOURCES_ENABLED 0x000FFFFF /* RWI-V */ +#define NV_RUNLIST_INT_CYA_SPARE 0x044 /* RW-4R */ +#define NV_RUNLIST_INT_CYA_SPARE__PRIV_LEVEL_MASK 0x040 /* */ +#define NV_RUNLIST_INT_CYA_SPARE_DATA 31:0 /* RWIUF */ +#define NV_RUNLIST_INT_CYA_SPARE_DATA_INIT 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INT_CYA_SPARE_FORCE_FE_MTHD_THROTTLE 0:0 /* */ +#define NV_RUNLIST_INT_CYA_SPARE_FORCE_FE_MTHD_THROTTLE_VAL 1:1 /* */ +#define NV_RUNLIST_INT_CYA_SPARE_FORCE_FE_MTHD_THROTTLE_VAL_1MTHD 0x00000000 /* */ +#define NV_RUNLIST_INT_CYA_SPARE_FORCE_FE_MTHD_THROTTLE_VAL_2MTHD 0x00000001 /* */ +#define NV_RUNLIST_CONFIG 0x000 /* RW-4R */ +#define NV_RUNLIST_CONFIG_SEM_ACQ_STRENGTH 0:0 /* RWIVF */ +#define NV_RUNLIST_CONFIG_SEM_ACQ_STRENGTH_WEAK 0x00000000 /* RWI-V */ +#define NV_RUNLIST_CONFIG_SEM_ACQ_STRENGTH_STRONG 0x00000001 /* RW--V */ +#define NV_RUNLIST_CONFIG_SEM_REL_STRENGTH 4:4 /* RWIVF */ +#define NV_RUNLIST_CONFIG_SEM_REL_STRENGTH_WEAK 0x00000000 /* RW--V */ +#define NV_RUNLIST_CONFIG_SEM_REL_STRENGTH_STRONG 0x00000001 /* RWI-V */ +#define NV_RUNLIST_CONFIG_L2_EVICT 9:8 /* RWIVF */ +#define NV_RUNLIST_CONFIG_L2_EVICT_FIRST 0x00000000 /* RWI-V */ +#define NV_RUNLIST_CONFIG_L2_EVICT_NORMAL 0x00000001 /* RW--V */ +#define NV_RUNLIST_CONFIG_L2_EVICT_LAST 0x00000002 /* RW--V */ +#define NV_RUNLIST_CONFIG_SUBCH4 10:10 /* RWXVF */ +#define NV_RUNLIST_CONFIG_SUBCH4_INACTIVE 0x00000000 /* RW--V */ +#define NV_RUNLIST_CONFIG_SUBCH4_ACTIVE 0x00000001 /* RW--V */ +#define NV_RUNLIST_CHANNEL_CONFIG 0x004 /* R--4R */ +#define NV_RUNLIST_CHANNEL_CONFIG_NUM_CHANNELS_LOG2 3:0 /* C--UF */ +#define NV_RUNLIST_CHANNEL_CONFIG_NUM_CHANNELS_LOG2_2K 11 /* C---V */ +#define NV_RUNLIST_CHANNEL_CONFIG_CHRAM_BAR0_OFFSET 31:4 /* R-XVF */ +#define NV_RUNLIST_DOORBELL_CONFIG 0x008 /* R--4R */ +#define NV_RUNLIST_DOORBELL_CONFIG_ID 31:16 /* R-XVF */ +#define NV_RUNLIST_FB_CONFIG 0x00C /* R--4R */ +#define NV_RUNLIST_FB_CONFIG_FB_THREAD_ID 7:0 /* R-XVF */ +#define NV_RUNLIST_FB_CONFIG_ESC_ID 15:8 /* R-XVF */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG(i) (0x300+(i)*4) /* RW-4A */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG__SIZE_1 64 /* */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG_MASK 11:0 /* */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG_MASK_HW 10:0 /* RWIVF */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG_MASK_HW_INIT 2047 /* RWI-V */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG_SET 27:16 /* */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG_SET_HW 26:16 /* RWIVF */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG_SET_HW_INIT 0x0 /* RWI-V */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG_PENDING_ENABLE 31:31 /* RWIVF */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG_PENDING_ENABLE_TRUE 1 /* RW--V */ +#define NV_RUNLIST_VIRTUAL_CHANNEL_CFG_PENDING_ENABLE_FALSE 0 /* RWI-V */ +#define NV_RUNLIST_PBDMA_CONFIG(i) (0x010+(i)*4) /* R--4A */ +#define NV_RUNLIST_PBDMA_CONFIG__SIZE_1 2 /* */ +#define NV_RUNLIST_PBDMA_CONFIG_PBDMA_ID 7:0 /* R-XUF */ +#define NV_RUNLIST_PBDMA_CONFIG_PBDMA_BAR0_OFFSET 25:10 /* R-XUF */ +#define NV_RUNLIST_PBDMA_CONFIG_VALID 31:31 /* R-XUF */ +#define NV_RUNLIST_PBDMA_CONFIG_VALID_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_PBDMA_CONFIG_VALID_FALSE 0x00000000 /* R---V */ +#define NV_RUNLIST_ACQ_PRETEST 0x020 /* RW-4R */ +#define NV_RUNLIST_ACQ_PRETEST_TIMEOUT 7:0 /* RWIUF */ +#define NV_RUNLIST_ACQ_PRETEST_TIMEOUT_8 0x00000008 /* RWI-V */ +#define NV_RUNLIST_ACQ_PRETEST_TIMESCALE 15:12 /* RWIUF */ +#define NV_RUNLIST_ACQ_PRETEST_TIMESCALE_0 0x00000000 /* RWI-V */ +#define NV_RUNLIST_ACQ_PRETEST_TIMESCALE_10 0x0000000a /* RW--V */ +#define NV_RUNLIST_IDLE_FILTER 0x024 /* RW-4R */ +#define NV_RUNLIST_IDLE_FILTER_PERIOD 7:0 /* RWIUF */ +#define NV_RUNLIST_IDLE_FILTER_PERIOD_INIT 0x00000050 /* RWI-V */ +#define NV_RUNLIST_IDLE_FILTER_PERIOD__PROD 0x00000064 /* RW--V */ +#define NV_RUNLIST_IDLE_FILTER_PERIOD_8 0x00000008 /* RW--V */ +#define NV_RUNLIST_IDLE_FILTER_PERIOD_32 0x00000020 /* RW--V */ +#define NV_RUNLIST_USERD_WRITEBACK 0x028 /* RW-4R */ +#define NV_RUNLIST_USERD_WRITEBACK_TIMER 7:0 /* RWIUF */ +#define NV_RUNLIST_USERD_WRITEBACK_TIMER_DISABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_USERD_WRITEBACK_TIMER_SHORT 0x00000003 /* RW--V */ +#define NV_RUNLIST_USERD_WRITEBACK_TIMER_100US 0x00000064 /* RWI-V */ +#define NV_RUNLIST_USERD_WRITEBACK_TIMESCALE 15:12 /* RWIUF */ +#define NV_RUNLIST_USERD_WRITEBACK_TIMESCALE_0 0x00000000 /* RWI-V */ +#define NV_RUNLIST_USERD_WRITEBACK_TIMESCALE_SHORT 0x00000000 /* */ +#define NV_RUNLIST_USERD_WRITEBACK_TIMESCALE_100US 0x00000000 /* */ +#define NV_RUNLIST_ESCHED_CONFIG 0x02c /* C--4R */ +#define NV_RUNLIST_ESCHED_CONFIG_ESCHED_CLASS_ID 15:0 /* C--UF */ +#define NV_RUNLIST_ESCHED_CONFIG_ESCHED_CLASS_ID_VALUE 50543 /* C---V */ +#define NV_CHRAM_CHANNEL(i) (0x000+(i)*4) /* RW-4A */ +#define NV_CHRAM_CHANNEL__SIZE_1 2048 /* */ +#define NV_CHRAM_CHANNEL_WRITE_CONTROL 0:0 /* -WIVF */ +#define NV_CHRAM_CHANNEL_WRITE_CONTROL_ONES_SET_BITS 0x00000000 /* -WI-V */ +#define NV_CHRAM_CHANNEL_WRITE_CONTROL_ONES_CLEAR_BITS 0x00000001 /* -W--V */ +#define NV_CHRAM_CHANNEL_ENABLE 1:1 /* RWIVF */ +#define NV_CHRAM_CHANNEL_ENABLE_NOT_IN_USE 0x00000000 /* RWI-V */ +#define NV_CHRAM_CHANNEL_ENABLE_IN_USE 0x00000001 /* RW--V */ +#define NV_CHRAM_CHANNEL_NEXT 2:2 /* RWIVF */ +#define NV_CHRAM_CHANNEL_NEXT_FALSE 0x00000000 /* RWI-V */ +#define NV_CHRAM_CHANNEL_NEXT_TRUE 0x00000001 /* RW--V */ +#define NV_CHRAM_CHANNEL_BUSY 3:3 /* R-IVF */ +#define NV_CHRAM_CHANNEL_BUSY_FALSE 0x00000000 /* R-I-V */ +#define NV_CHRAM_CHANNEL_BUSY_TRUE 0x00000001 /* R---V */ +#define NV_CHRAM_CHANNEL_PBDMA_FAULTED 4:4 /* RWIVF */ +#define NV_CHRAM_CHANNEL_PBDMA_FAULTED_FALSE 0x00000000 /* RWI-V */ +#define NV_CHRAM_CHANNEL_PBDMA_FAULTED_TRUE 0x00000001 /* RW--V */ +#define NV_CHRAM_CHANNEL_ENG_FAULTED 5:5 /* RWIVF */ +#define NV_CHRAM_CHANNEL_ENG_FAULTED_FALSE 0x00000000 /* RWI-V */ +#define NV_CHRAM_CHANNEL_ENG_FAULTED_TRUE 0x00000001 /* RW--V */ +#define NV_CHRAM_CHANNEL_ON_PBDMA 6:6 /* R-IVF */ +#define NV_CHRAM_CHANNEL_ON_PBDMA_FALSE 0x00000000 /* R-I-V */ +#define NV_CHRAM_CHANNEL_ON_PBDMA_TRUE 0x00000001 /* R---V */ +#define NV_CHRAM_CHANNEL_ON_ENG 7:7 /* R-IVF */ +#define NV_CHRAM_CHANNEL_ON_ENG_FALSE 0x00000000 /* R-I-V */ +#define NV_CHRAM_CHANNEL_ON_ENG_TRUE 0x00000001 /* R---V */ +#define NV_CHRAM_CHANNEL_PENDING 8:8 /* RWIVF */ +#define NV_CHRAM_CHANNEL_PENDING_FALSE 0x00000000 /* RWI-V */ +#define NV_CHRAM_CHANNEL_PENDING_TRUE 0x00000001 /* RW--V */ +#define NV_CHRAM_CHANNEL_CTX_RELOAD 9:9 /* RWIVF */ +#define NV_CHRAM_CHANNEL_CTX_RELOAD_FALSE 0x00000000 /* RWI-V */ +#define NV_CHRAM_CHANNEL_CTX_RELOAD_TRUE 0x00000001 /* RW--V */ +#define NV_CHRAM_CHANNEL_PBDMA_BUSY 10:10 /* R-IVF */ +#define NV_CHRAM_CHANNEL_PBDMA_BUSY_FALSE 0x00000000 /* R-I-V */ +#define NV_CHRAM_CHANNEL_PBDMA_BUSY_TRUE 0x00000001 /* R---V */ +#define NV_CHRAM_CHANNEL_ENG_BUSY 11:11 /* R-IVF */ +#define NV_CHRAM_CHANNEL_ENG_BUSY_FALSE 0x00000000 /* R-I-V */ +#define NV_CHRAM_CHANNEL_ENG_BUSY_TRUE 0x00000001 /* R---V */ +#define NV_CHRAM_CHANNEL_ACQUIRE_FAIL 12:12 /* RWIVF */ +#define NV_CHRAM_CHANNEL_ACQUIRE_FAIL_FALSE 0x00000000 /* RWI-V */ +#define NV_CHRAM_CHANNEL_ACQUIRE_FAIL_TRUE 0x00000001 /* RW--V */ +#define NV_CHRAM_CHANNEL_STATUS 12:8 /* */ +#define NV_CHRAM_CHANNEL_STATUS_IDLE 0x00000000 /* */ +#define NV_CHRAM_CHANNEL_STATUS_PENDING 0x00000001 /* */ +#define NV_CHRAM_CHANNEL_STATUS_PENDING_CTX_RELOAD 0x00000003 /* */ +#define NV_CHRAM_CHANNEL_STATUS_PENDING_ACQUIRE_FAIL 0x00000011 /* */ +#define NV_CHRAM_CHANNEL_STATUS_PENDING_ACQUIRE_FAIL_CTX_RELOAD 0x00000013 /* */ +#define NV_CHRAM_CHANNEL_STATUS_PBDMA_BUSY 0x00000004 /* */ +#define NV_CHRAM_CHANNEL_STATUS_PBDMA_BUSY_AND_ENG_BUSY 0x0000000C /* */ +#define NV_CHRAM_CHANNEL_STATUS_ENG_BUSY 0x00000008 /* */ +#define NV_CHRAM_CHANNEL_STATUS_ENG_BUSY_PENDING_ACQUIRE_FAIL 0x00000019 /* */ +#define NV_CHRAM_CHANNEL_STATUS_ENG_BUSY_PENDING 0x00000009 /* */ +#define NV_CHRAM_CHANNEL_STATUS_PBDMA_BUSY_CTX_RELOAD 0x00000006 /* */ +#define NV_CHRAM_CHANNEL_STATUS_PBDMA_BUSY_ENG_BUSY_CTX_RELOAD 0x0000000E /* */ +#define NV_CHRAM_CHANNEL_STATUS_ENG_BUSY_CTX_RELOAD 0x0000000A /* */ +#define NV_CHRAM_CHANNEL_STATUS_ENG_BUSY_PENDING_CTX_RELOAD 0x0000000B /* */ +#define NV_CHRAM_CHANNEL_STATUS_ENG_BUSY_PENDING_ACQUIRE_FAIL_CTX_RELOAD 0x0000001B /* */ +#define NV_CHRAM_CHANNEL_UPDATE 31:0 /* */ +#define NV_CHRAM_CHANNEL_UPDATE_ENABLE_CHANNEL 0x00000002 /* */ +#define NV_CHRAM_CHANNEL_UPDATE_DISABLE_CHANNEL 0x00000003 /* */ +#define NV_CHRAM_CHANNEL_UPDATE_FORCE_CTX_RELOAD 0x00000200 /* */ +#define NV_CHRAM_CHANNEL_UPDATE_RESET_PBDMA_FAULTED 0x00000011 /* */ +#define NV_CHRAM_CHANNEL_UPDATE_RESET_ENG_FAULTED 0x00000021 /* */ +#define NV_CHRAM_CHANNEL_UPDATE_CLEAR_CHANNEL 0xFFFFFFFF /* */ +#define NV_RUNLIST_SUBMIT_BASE_LO 0x080 /* RW-4R */ +#define NV_RUNLIST_SUBMIT_BASE_LO_PTR_LO 31:12 /* RWIUF */ +#define NV_RUNLIST_SUBMIT_BASE_LO_PTR_LO_NULL 0x00000000 /* RWI-V */ +#define NV_RUNLIST_SUBMIT_BASE_LO_TARGET 1:0 /* RWIVF */ +#define NV_RUNLIST_SUBMIT_BASE_LO_TARGET_VID_MEM 0x0 /* RWI-V */ +#define NV_RUNLIST_SUBMIT_BASE_LO_TARGET_SYS_MEM_COHERENT 0x2 /* RW--V */ +#define NV_RUNLIST_SUBMIT_BASE_LO_TARGET_SYS_MEM_NONCOHERENT 0x3 /* RW--V */ +#define NV_RUNLIST_SUBMIT_BASE_LO_PTR_ALIGN_SHIFT 12 /* */ +#define NV_RUNLIST_SUBMIT_BASE_HI 0x084 /* RW-4R */ +#define NV_RUNLIST_SUBMIT_BASE_HI_PTR_HI 7:0 /* RWIUF */ +#define NV_RUNLIST_SUBMIT_BASE_HI_PTR_HI_NULL 0x00000000 /* RWI-V */ +#define NV_RUNLIST_SUBMIT 0x088 /* RW-4R */ +#define NV_RUNLIST_SUBMIT_LENGTH 15:0 /* RWIUF */ +#define NV_RUNLIST_SUBMIT_LENGTH_ZERO 0x00000000 /* RWI-V */ +#define NV_RUNLIST_SUBMIT_LENGTH_MAX 0x0000ffff /* RW--V */ +#define NV_RUNLIST_SUBMIT_OFFSET 31:16 /* RWIVF */ +#define NV_RUNLIST_SUBMIT_OFFSET_ZERO 0x00000000 /* RWI-V */ +#define NV_RUNLIST_SUBMIT_INFO 0x08C /* R--4R */ +#define NV_RUNLIST_SUBMIT_INFO_PREEMPTED_TSGID 13:0 /* */ +#define NV_RUNLIST_SUBMIT_INFO_PREEMPTED_TSGID_HW 10:0 /* R-IUF */ +#define NV_RUNLIST_SUBMIT_INFO_PREEMPTED_TSGID_HW_DEFAULT 0x00000000 /* R-I-V */ +#define NV_RUNLIST_SUBMIT_INFO_PREEMPTED_TSGID_VALID 14:14 /* R-IUF */ +#define NV_RUNLIST_SUBMIT_INFO_PREEMPTED_TSGID_VALID_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_SUBMIT_INFO_PREEMPTED_TSGID_VALID_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_SUBMIT_INFO_PENDING 15:15 /* R-IVF */ +#define NV_RUNLIST_SUBMIT_INFO_PENDING_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_SUBMIT_INFO_PENDING_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_SUBMIT_INFO_PREEMPTED_OFFSET 31:16 /* R-IVF */ +#define NV_RUNLIST_SUBMIT_INFO_PREEMPTED_OFFSET_ZERO 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK(i) (0x190+(i)*4) /* RW-4A */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION 3:0 /* RWIVF */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_DEFAULT_PRIV_LEVEL 15 /* RWI-V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_ALL_LEVELS_ENABLED 0x0000000F /* RW--V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_ONLY_LEVEL3_ENABLED 0x00000008 /* RW--V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0 0:0 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1 1:1 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2 2:2 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL3 3:3 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL3_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL3_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION 7:4 /* RWIVF */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_DEFAULT_PRIV_LEVEL 15 /* RWI-V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_ALL_LEVELS_ENABLED 0x0F /* RW--V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_ONLY_LEVEL3_ENABLED 0x08 /* RW--V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0 4:4 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1 5:5 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2 6:6 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL3 7:7 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL3_ENABLE 0x00000001 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL3_DISABLE 0x00000000 /* */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_VIOLATION 8:8 /* RWIVF */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_READ_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_VIOLATION 9:9 /* RWIVF */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_WRITE_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_SOURCE_READ_CONTROL 10:10 /* RWIVF */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_SOURCE_READ_CONTROL_BLOCKED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_SOURCE_READ_CONTROL_LOWERED 0x00000000 /* RW--V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_SOURCE_WRITE_CONTROL 11:11 /* RWIVF */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_SOURCE_WRITE_CONTROL_BLOCKED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_SOURCE_WRITE_CONTROL_LOWERED 0x00000000 /* RW--V */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_SOURCE_ENABLE 31:12 /* RWIVF */ +#define NV_RUNLIST_INTR_PRIV_LEVEL_MASK_SOURCE_ENABLE_ALL_SOURCES_ENABLED 0x000FFFFF /* RWI-V */ +#define NV_RUNLIST_INTR_VECTORID(i) (0x160+(i)*4) /* RW-4A */ +#define NV_RUNLIST_INTR_VECTORID__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_VECTORID__PRIV_LEVEL_MASK "NV_RUNLIST_INTR_PRIV_LEVEL_MASK" /* */ +#define NV_RUNLIST_INTR_VECTORID_VECTOR 11:0 /* RWXUF */ +#define NV_RUNLIST_INTR_VECTORID_GSP 30:30 /* RWIUF */ +#define NV_RUNLIST_INTR_VECTORID_GSP_DISABLE 0 /* RW--V */ +#define NV_RUNLIST_INTR_VECTORID_GSP_ENABLE 1 /* RWI-V */ +#define NV_RUNLIST_INTR_VECTORID_CPU 31:31 /* RWIUF */ +#define NV_RUNLIST_INTR_VECTORID_CPU_DISABLE 0 /* RW--V */ +#define NV_RUNLIST_INTR_VECTORID_CPU_ENABLE 1 /* RWI-V */ +#define NV_RUNLIST_INTR_RETRIGGER(i) (0x180+(i)*4) /* -W-4A */ +#define NV_RUNLIST_INTR_RETRIGGER__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_RETRIGGER__PRIV_LEVEL_MASK "NV_RUNLIST_INTR_PRIV_LEVEL_MASK" /* */ +#define NV_RUNLIST_INTR_RETRIGGER_TRIGGER 0:0 /* -W-VF */ +#define NV_RUNLIST_INTR_RETRIGGER_TRIGGER_TRUE 1 /* -W--V */ +#define NV_RUNLIST_INTR_0 0x100 /* RW-4R */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG0 0:0 /* RWIVF */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG0_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG0_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG0_RESET 0x00000001 /* -W--V */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG1 1:1 /* RWIVF */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG1_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG1_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG1_RESET 0x00000001 /* -W--V */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG2 2:2 /* RWIVF */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG2_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG2_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG2_RESET 0x00000001 /* -W--V */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG(i) (i):(i) /* */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG__SIZE_1 3 /* */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG_NOT_PENDING 0x00000000 /* */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG_PENDING 0x00000001 /* */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_ENG_RESET 0x00000001 /* */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_GRCE(i) ((i)+1):((i)+1) /* */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_GRCE__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_GRCE_NOT_PENDING 0x00000000 /* */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_GRCE_PENDING 0x00000001 /* */ +#define NV_RUNLIST_INTR_0_CTXSW_TIMEOUT_GRCE_RESET 0x00000001 /* */ +#define NV_RUNLIST_INTR_0_RUNLIST_IDLE 4:4 /* RWIVF */ +#define NV_RUNLIST_INTR_0_RUNLIST_IDLE_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INTR_0_RUNLIST_IDLE_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_RUNLIST_IDLE_RESET 0x00000001 /* -W--V */ +#define NV_RUNLIST_INTR_0_RUNLIST_AND_ENG_IDLE 5:5 /* RWXVF */ +#define NV_RUNLIST_INTR_0_RUNLIST_AND_ENG_IDLE_NOT_PENDING 0x00000000 /* R---V */ +#define NV_RUNLIST_INTR_0_RUNLIST_AND_ENG_IDLE_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_RUNLIST_AND_ENG_IDLE_RESET 0x00000001 /* -W--V */ +#define NV_RUNLIST_INTR_0_RUNLIST_ACQUIRE 6:6 /* RWXVF */ +#define NV_RUNLIST_INTR_0_RUNLIST_ACQUIRE_NOT_PENDING 0x00000000 /* R---V */ +#define NV_RUNLIST_INTR_0_RUNLIST_ACQUIRE_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_RUNLIST_ACQUIRE_RESET 0x00000001 /* -W--V */ +#define NV_RUNLIST_INTR_0_RUNLIST_ACQUIRE_AND_ENG_IDLE 7:7 /* RWXVF */ +#define NV_RUNLIST_INTR_0_RUNLIST_ACQUIRE_AND_ENG_IDLE_NOT_PENDING 0x00000000 /* R---V */ +#define NV_RUNLIST_INTR_0_RUNLIST_ACQUIRE_AND_ENG_IDLE_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_RUNLIST_ACQUIRE_AND_ENG_IDLE_RESET 0x00000001 /* -W--V */ +#define NV_RUNLIST_INTR_0_BAD_TSG 12:12 /* RWIVF */ +#define NV_RUNLIST_INTR_0_BAD_TSG_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INTR_0_BAD_TSG_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_BAD_TSG_RESET 0x00000001 /* -W--V */ +#define NV_RUNLIST_INTR_0_TSG_PREEMPT_COMPLETE 8:8 /* RWIVF */ +#define NV_RUNLIST_INTR_0_TSG_PREEMPT_COMPLETE_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INTR_0_TSG_PREEMPT_COMPLETE_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_TSG_PREEMPT_COMPLETE_RESET 0x00000001 /* -W--V */ +#define NV_RUNLIST_INTR_0_PBDMA0_INTR_TREE_0 16:16 /* R-XVF */ +#define NV_RUNLIST_INTR_0_PBDMA0_INTR_TREE_0_NOT_PENDING 0x00000000 /* R---V */ +#define NV_RUNLIST_INTR_0_PBDMA0_INTR_TREE_0_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_PBDMA1_INTR_TREE_0 17:17 /* R-XVF */ +#define NV_RUNLIST_INTR_0_PBDMA1_INTR_TREE_0_NOT_PENDING 0x00000000 /* R---V */ +#define NV_RUNLIST_INTR_0_PBDMA1_INTR_TREE_0_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_PBDMA0_INTR_TREE_1 18:18 /* R-XVF */ +#define NV_RUNLIST_INTR_0_PBDMA0_INTR_TREE_1_NOT_PENDING 0x00000000 /* R---V */ +#define NV_RUNLIST_INTR_0_PBDMA0_INTR_TREE_1_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_PBDMA1_INTR_TREE_1 19:19 /* R-XVF */ +#define NV_RUNLIST_INTR_0_PBDMA1_INTR_TREE_1_NOT_PENDING 0x00000000 /* R---V */ +#define NV_RUNLIST_INTR_0_PBDMA1_INTR_TREE_1_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_PBDMAi_INTR_TREE_j(i,j) (16+(i)+(j)*2):(16+(i)+(j)*2) /* */ +#define NV_RUNLIST_INTR_0_PBDMAi_INTR_TREE_j__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_0_PBDMAi_INTR_TREE_j__SIZE_2 2 /* */ +#define NV_RUNLIST_INTR_0_PBDMAi_INTR_TREE_j_NOT_PENDING 0x00000000 /* */ +#define NV_RUNLIST_INTR_0_PBDMAi_INTR_TREE_j_PENDING 0x00000001 /* */ +#define NV_RUNLIST_INTR_0_RUNLIST_PREEMPT_COMPLETE 9:9 /* RWIVF */ +#define NV_RUNLIST_INTR_0_RUNLIST_PREEMPT_COMPLETE_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INTR_0_RUNLIST_PREEMPT_COMPLETE_PENDING 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_0_RUNLIST_PREEMPT_COMPLETE_RESET 0x00000001 /* -W--V */ +#define NV_RUNLIST_INTR_0_RUNLIST_EVENT 9:9 /* */ +#define NV_RUNLIST_INTR_0_MASK_SET 0x110 /* RW-4R */ +#define NV_RUNLIST_INTR_0_MASK_SET_TSG_PREEMPT_COMPLETE 8:8 /* RWIVF */ +#define NV_RUNLIST_INTR_0_MASK_SET_TSG_PREEMPT_COMPLETE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_MASK_SET_TSG_PREEMPT_COMPLETE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_MASK_CLEAR 0x118 /* RW-4R */ +#define NV_RUNLIST_INTR_0_MASK_CLEAR_TSG_PREEMPT_COMPLETE 8:8 /* RWIVF */ +#define NV_RUNLIST_INTR_0_MASK_CLEAR_TSG_PREEMPT_COMPLETE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_MASK_CLEAR_TSG_PREEMPT_COMPLETE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE(i) (0x120+(i)*8) /* RW-4A */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE__PRIV_LEVEL_MASK "NV_RUNLIST_INTR_PRIV_LEVEL_MASK" /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG0 0:0 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG0_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG0_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG1 1:1 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG1_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG1_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG2 2:2 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG2_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG2_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG(i) (i):(i) /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG__SIZE_1 3 /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_ENG_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_GRCE(i) ((i)+1):((i)+1) /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_GRCE__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_GRCE_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_CTXSW_TIMEOUT_GRCE_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_IDLE 4:4 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_IDLE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_IDLE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_AND_ENG_IDLE 5:5 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_AND_ENG_IDLE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_AND_ENG_IDLE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_ACQUIRE 6:6 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_ACQUIRE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_ACQUIRE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_ACQUIRE_AND_ENG_IDLE 7:7 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_ACQUIRE_AND_ENG_IDLE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_ACQUIRE_AND_ENG_IDLE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_TSG_PREEMPT_COMPLETE 8:8 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_TSG_PREEMPT_COMPLETE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_TSG_PREEMPT_COMPLETE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_PREEMPT_COMPLETE 9:9 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_PREEMPT_COMPLETE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_RUNLIST_PREEMPT_COMPLETE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_BAD_TSG 12:12 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_BAD_TSG_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_BAD_TSG_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA0_INTR_TREE_0 16:16 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA0_INTR_TREE_0_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA0_INTR_TREE_0_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA1_INTR_TREE_0 17:17 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA1_INTR_TREE_0_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA1_INTR_TREE_0_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA0_INTR_TREE_1 18:18 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA0_INTR_TREE_1_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA0_INTR_TREE_1_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA1_INTR_TREE_1 19:19 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA1_INTR_TREE_1_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMA1_INTR_TREE_1_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMAi_INTR_TREE_j(i,j) (16+(i)+(j)*2):(16+(i)+(j)*2) /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMAi_INTR_TREE_j__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMAi_INTR_TREE_j__SIZE_2 2 /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMAi_INTR_TREE_j_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_INTR_0_EN_SET_TREE_PBDMAi_INTR_TREE_j_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE(i) (0x140+(i)*8) /* RW-4A */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE__PRIV_LEVEL_MASK "NV_RUNLIST_INTR_PRIV_LEVEL_MASK" /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG0 0:0 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG0_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG0_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG1 1:1 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG1_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG1_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG2 2:2 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG2_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG2_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG(i) (i):(i) /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG__SIZE_1 3 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_ENG_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_GRCE(i) ((i)+1):((i)+1) /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_GRCE__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_GRCE_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_CTXSW_TIMEOUT_GRCE_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_IDLE 4:4 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_IDLE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_IDLE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_AND_ENG_IDLE 5:5 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_AND_ENG_IDLE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_AND_ENG_IDLE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_ACQUIRE 6:6 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_ACQUIRE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_ACQUIRE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_ACQUIRE_AND_ENG_IDLE 7:7 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_ACQUIRE_AND_ENG_IDLE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_ACQUIRE_AND_ENG_IDLE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_TSG_PREEMPT_COMPLETE 8:8 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_TSG_PREEMPT_COMPLETE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_TSG_PREEMPT_COMPLETE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_PREEMPT_COMPLETE 9:9 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_PREEMPT_COMPLETE_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_RUNLIST_PREEMPT_COMPLETE_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_BAD_TSG 12:12 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_BAD_TSG_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_BAD_TSG_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA0_INTR_TREE_0 16:16 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA0_INTR_TREE_0_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA0_INTR_TREE_0_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA1_INTR_TREE_0 17:17 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA1_INTR_TREE_0_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA1_INTR_TREE_0_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA0_INTR_TREE_1 18:18 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA0_INTR_TREE_1_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA0_INTR_TREE_1_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA1_INTR_TREE_1 19:19 /* RWIVF */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA1_INTR_TREE_1_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMA1_INTR_TREE_1_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMAi_INTR_TREE_j(i,j) (16+(i)+(j)*2):(16+(i)+(j)*2) /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMAi_INTR_TREE_j__SIZE_1 2 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMAi_INTR_TREE_j__SIZE_2 2 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMAi_INTR_TREE_j_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_INTR_0_EN_CLEAR_TREE_PBDMAi_INTR_TREE_j_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO(i) (0x224+(i)*64) /* R--4A */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO__SIZE_1 3 /* */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_PREV_TSGID 13:0 /* R-IUF */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_PREV_TSGID_DEFAULT 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_CTXSW_STATE 15:14 /* R-IUF */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_CTXSW_STATE_LOAD 0x00000001 /* R-I-V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_CTXSW_STATE_SAVE 0x00000002 /* R---V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_CTXSW_STATE_SWITCH 0x00000003 /* R---V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_NEXT_TSGID 29:16 /* R-IUF */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_NEXT_TSGID_DEFAULT 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_STATUS 31:30 /* R-IUF */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_STATUS_AWAITING_ACK 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_STATUS_ENG_WAS_RESET 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_STATUS_ACK_RECEIVED 0x00000002 /* R---V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_INFO_STATUS_DROPPED_TIMEOUT 0x00000003 /* R---V */ +#define NV_RUNLIST_INFO 0x108 /* R--4R */ +#define NV_RUNLIST_INFO_RUNLIST_IDLE_INTR_ARM 0:0 /* R-IUF */ +#define NV_RUNLIST_INFO_RUNLIST_IDLE_INTR_ARM_UNARMED 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INFO_RUNLIST_IDLE_INTR_ARM_ARMED 0x00000001 /* R---V */ +#define NV_RUNLIST_INFO_RUNLIST_ACQUIRE_INTR_ARM 1:1 /* R-IUF */ +#define NV_RUNLIST_INFO_RUNLIST_ACQUIRE_INTR_ARM_UNARMED 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INFO_RUNLIST_ACQUIRE_INTR_ARM_ARMED 0x00000001 /* R---V */ +#define NV_RUNLIST_INFO_RUNLIST_AND_ENG_IDLE_INTR_ARM 4:4 /* R-IUF */ +#define NV_RUNLIST_INFO_RUNLIST_AND_ENG_IDLE_INTR_ARM_UNARMED 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INFO_RUNLIST_AND_ENG_IDLE_INTR_ARM_ARMED 0x00000001 /* R---V */ +#define NV_RUNLIST_INFO_RUNLIST_ACQUIRE_AND_ENG_IDLE_INTR_ARM 5:5 /* R-IUF */ +#define NV_RUNLIST_INFO_RUNLIST_ACQUIRE_AND_ENG_IDLE_INTR_ARM_UNARMED 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INFO_RUNLIST_ACQUIRE_AND_ENG_IDLE_INTR_ARM_ARMED 0x00000001 /* R---V */ +#define NV_RUNLIST_INFO_ENG_IDLE 8:8 /* R-IUF */ +#define NV_RUNLIST_INFO_ENG_IDLE_FALSE 0x00000000 /* R---V */ +#define NV_RUNLIST_INFO_ENG_IDLE_TRUE 0x00000001 /* R-I-V */ +#define NV_RUNLIST_INFO_RUNLIST_IDLE 9:9 /* R-IUF */ +#define NV_RUNLIST_INFO_RUNLIST_IDLE_FALSE 0x00000000 /* R---V */ +#define NV_RUNLIST_INFO_RUNLIST_IDLE_TRUE 0x00000001 /* R-I-V */ +#define NV_RUNLIST_INFO_RUNLIST_FETCH_STATUS 10:10 /* R-IVF */ +#define NV_RUNLIST_INFO_RUNLIST_FETCH_STATUS_IDLE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INFO_RUNLIST_FETCH_STATUS_BUSY 0x00000001 /* R---V */ +#define NV_RUNLIST_INFO_ACQUIRE_STILL_PENDING 12:12 /* R-IUF */ +#define NV_RUNLIST_INFO_ACQUIRE_STILL_PENDING_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INFO_ACQUIRE_STILL_PENDING_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_INFO_RUNLIST_FETCH_NACKED 13:13 /* R-IUF */ +#define NV_RUNLIST_INFO_RUNLIST_FETCH_NACKED_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INFO_RUNLIST_FETCH_NACKED_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_BAD_TSG 0x174 /* R--4R */ +#define NV_RUNLIST_INTR_BAD_TSG_CODE 3:0 /* R-IVF */ +#define NV_RUNLIST_INTR_BAD_TSG_CODE_NO_ERROR 0x00000000 /* R-I-V */ +#define NV_RUNLIST_INTR_BAD_TSG_CODE_ZERO_LENGTH_TSG 0x00000001 /* R---V */ +#define NV_RUNLIST_INTR_BAD_TSG_CODE_MAX_LENGTH_EXCEEDED 0x00000002 /* R---V */ +#define NV_RUNLIST_INTR_BAD_TSG_CODE_RUNLIST_OVERFLOW 0x00000003 /* R---V */ +#define NV_RUNLIST_INTR_BAD_TSG_CODE_EXPECTED_A_CHID_ENTRY 0x00000004 /* R---V */ +#define NV_RUNLIST_INTR_BAD_TSG_CODE_EXPECTED_A_TSG_HEADER 0x00000005 /* R---V */ +#define NV_RUNLIST_INTR_BAD_TSG_CODE_INVALID_RUNQUEUE 0x00000006 /* R---V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_CONFIG(i) (0x220+(i)*64) /* RW-4A */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_CONFIG__SIZE_1 3 /* */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_CONFIG_PERIOD 30:0 /* RWIVF */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_CONFIG_PERIOD_INIT 0x003fffff /* RWI-V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_CONFIG_PERIOD_MAX 0x7fffffff /* RW--V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_CONFIG_DETECTION 31:31 /* RWIVF */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_CONFIG_DETECTION_DISABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_ENGINE_CTXSW_TIMEOUT_CONFIG_DETECTION_ENABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG 0x050 /* RW-4R */ +#define NV_RUNLIST_BLKCG_IDLE_CG_DLY_CNT 5:0 /* RWIVF */ +#define NV_RUNLIST_BLKCG_IDLE_CG_DLY_CNT_INIT 0x00000000 /* RWI-V */ +#define NV_RUNLIST_BLKCG_IDLE_CG_DLY_CNT__PROD 0x00000002 /* RW--V */ +#define NV_RUNLIST_BLKCG_IDLE_CG_EN 6:6 /* RWIVF */ +#define NV_RUNLIST_BLKCG_IDLE_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_BLKCG_IDLE_CG_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_BLKCG_IDLE_CG_EN__PROD 0x00000001 /* RW--V */ +#define NV_RUNLIST_BLKCG_STATE_CG_EN 7:7 /* */ +#define NV_RUNLIST_BLKCG_STATE_CG_EN_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_BLKCG_STATE_CG_EN_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_STATE_CG_EN__PROD 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_STALL_CG_DLY_CNT 13:8 /* */ +#define NV_RUNLIST_BLKCG_STALL_CG_DLY_CNT_INIT 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_STALL_CG_DLY_CNT__PROD 0x00000002 /* */ +#define NV_RUNLIST_BLKCG_STALL_CG_EN 14:14 /* RWIVF */ +#define NV_RUNLIST_BLKCG_STALL_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_BLKCG_STALL_CG_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_BLKCG_STALL_CG_EN__PROD 0x00000001 /* RW--V */ +#define NV_RUNLIST_BLKCG_QUIESCENT_CG_EN 15:15 /* */ +#define NV_RUNLIST_BLKCG_QUIESCENT_CG_EN_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_BLKCG_QUIESCENT_CG_EN_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_QUIESCENT_CG_EN__PROD 0x00000001 /* */ +#define NV_RUNLIST_BLKCG_WAKEUP_DLY_CNT 19:16 /* RWIVF */ +#define NV_RUNLIST_BLKCG_WAKEUP_DLY_CNT_INIT 0x00000000 /* RWI-V */ +#define NV_RUNLIST_BLKCG_WAKEUP_DLY_CNT__PROD 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG_THROT_CLK_CNT 23:20 /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_CNT_INIT 0x0000000f /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_CNT_FULLSPEED 0x0000000f /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_CNT__PROD 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_DI_DT_SKEW_VAL 27:24 /* */ +#define NV_RUNLIST_BLKCG_DI_DT_SKEW_VAL_INIT 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_DI_DT_SKEW_VAL__PROD 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_EN 28:28 /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_EN_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_EN_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_EN__PROD 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_SW_OVER 29:29 /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_SW_OVER_EN 0x00000001 /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_SW_OVER_DIS 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_THROT_CLK_SW_OVER__PROD 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_PAUSE_CG_EN 30:30 /* */ +#define NV_RUNLIST_BLKCG_PAUSE_CG_EN_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_BLKCG_PAUSE_CG_EN_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_PAUSE_CG_EN__PROD 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_HALT_CG_EN 31:31 /* */ +#define NV_RUNLIST_BLKCG_HALT_CG_EN_ENABLED 0x00000001 /* */ +#define NV_RUNLIST_BLKCG_HALT_CG_EN_DISABLED 0x00000000 /* */ +#define NV_RUNLIST_BLKCG_HALT_CG_EN__PROD 0x00000000 /* */ +#define NV_RUNLIST_BLKCG1 0x054 /* RW-4R */ +#define NV_RUNLIST_BLKCG1_MONITOR_CG_EN 0:0 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_MONITOR_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_BLKCG1_MONITOR_CG_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG 16:1 /* */ +#define NV_RUNLIST_BLKCG1_SLCG_ENABLED 0x00000000 /* */ +#define NV_RUNLIST_BLKCG1_SLCG_DISABLED 0x0000FFFF /* */ +#define NV_RUNLIST_BLKCG1_SLCG__PROD 0x00000001 /* */ +#define NV_RUNLIST_BLKCG1_SLCG_RLP 1:1 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_SLCG_RLP_ENABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_RLP_DISABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG_RLP__PROD 0x00000001 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_EVH 3:3 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_SLCG_EVH_ENABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_EVH_DISABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG_EVH__PROD 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_EISM 7:7 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_SLCG_EISM_ENABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_EISM_DISABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG_EISM__PROD 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_LB 8:8 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_SLCG_LB_ENABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_LB_DISABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG_LB__PROD 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_CTL 9:9 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_CTL_ENABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_CTL_DISABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_CTL__PROD 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_GP 10:10 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_GP_ENABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_GP_DISABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_GP__PROD 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_PB 11:11 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_PB_ENABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_PB_DISABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG_PBDMA_PB__PROD 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_PRI 13:13 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_SLCG_PRI_ENABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_PRI_DISABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG_PRI__PROD 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_CHSW 14:14 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_SLCG_CHSW_ENABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_CHSW_DISABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG_CHSW__PROD 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_XBAR 15:15 /* RWIVF */ +#define NV_RUNLIST_BLKCG1_SLCG_XBAR_ENABLED 0x00000000 /* RW--V */ +#define NV_RUNLIST_BLKCG1_SLCG_XBAR_DISABLED 0x00000001 /* RWI-V */ +#define NV_RUNLIST_BLKCG1_SLCG_XBAR__PROD 0x00000000 /* RW--V */ +#define NV_RUNLIST_SLCG_MISC 0x05C /* RW-4R */ +#define NV_RUNLIST_SLCG_MISC_EXTRA_BUSY_CLKS 3:0 /* RWIVF */ +#define NV_RUNLIST_SLCG_MISC_EXTRA_BUSY_CLKS_ZERO 0x00000000 /* RWI-V */ +#define NV_RUNLIST_INTERNAL_DOORBELL 0x090 /* -W-4R */ +#define NV_RUNLIST_INTERNAL_DOORBELL_CHID 11:0 /* */ +#define NV_RUNLIST_INTERNAL_DOORBELL_CHID_HW 10:0 /* -WXUF */ +#define NV_RUNLIST_INTERNAL_DOORBELL_GFID 21:16 /* -WXUF */ +#define NV_RUNLIST_SCHED_DISABLE 0x094 /* RW-4R */ +#define NV_RUNLIST_SCHED_DISABLE_RUNLIST 0:0 /* RWIVF */ +#define NV_RUNLIST_SCHED_DISABLE_RUNLIST_ENABLED 0x00000000 /* RWI-V */ +#define NV_RUNLIST_SCHED_DISABLE_RUNLIST_DISABLED 0x00000001 /* RW--V */ +#define NV_RUNLIST_SCHED_DISABLE_RUNLIST_FALSE 0x00000000 /* */ +#define NV_RUNLIST_SCHED_DISABLE_RUNLIST_TRUE 0x00000001 /* */ +#define NV_RUNLIST_PREEMPT 0x098 /* RW-4R */ +#define NV_RUNLIST_PREEMPT_ID 11:0 /* */ +#define NV_RUNLIST_PREEMPT_ID_HW 10:0 /* RWIUF */ +#define NV_RUNLIST_PREEMPT_ID_HW_NULL 0x00000000 /* RWI-V */ +#define NV_RUNLIST_PREEMPT_TSG_PREEMPT_PENDING 20:20 /* R-IVF */ +#define NV_RUNLIST_PREEMPT_TSG_PREEMPT_PENDING_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_PREEMPT_TSG_PREEMPT_PENDING_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_PREEMPT_RUNLIST_PREEMPT_PENDING 21:21 /* R-IVF */ +#define NV_RUNLIST_PREEMPT_RUNLIST_PREEMPT_PENDING_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_PREEMPT_RUNLIST_PREEMPT_PENDING_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_PREEMPT_TYPE 25:24 /* RWIVF */ +#define NV_RUNLIST_PREEMPT_TYPE_RUNLIST 0x00000000 /* RWI-V */ +#define NV_RUNLIST_PREEMPT_TYPE_TSG 0x00000001 /* RW--V */ +#define NV_RUNLIST_ENGINE_STATUS0(i) (0x200+(i)*64) /* R--4A */ +#define NV_RUNLIST_ENGINE_STATUS0__SIZE_1 3 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_TSGID 11:0 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_TSGID_HW 10:0 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS0_CTX_STATUS 15:13 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS0_CTX_STATUS_INVALID 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS0_CTX_STATUS_VALID 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS0_CTX_STATUS_CTXSW_SAVE 0x00000005 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS0_CTX_STATUS_CTXSW_LOAD 0x00000006 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS0_CTX_STATUS_CTXSW_SWITCH 0x00000007 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS0_CTX 13:13 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_CTX_INVALID 0x00000000 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_CTX_VALID 0x00000001 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_CTXLOAD 14:14 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_CTXLOAD_NOT_IN_PROGRESS 0x00000000 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_CTXLOAD_IN_PROGRESS 0x00000001 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_CTXSW 15:15 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_CTXSW_NOT_IN_PROGRESS 0x00000000 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_CTXSW_IN_PROGRESS 0x00000001 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_NEXT_TSGID 27:16 /* */ +#define NV_RUNLIST_ENGINE_STATUS0_NEXT_TSGID_HW 26:16 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS0_ENG_RELOAD 29:29 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS0_ENG_RELOAD_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS0_ENG_RELOAD_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS0_FAULTED 30:30 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS0_FAULTED_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS0_FAULTED_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS0_ENGINE 31:31 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS0_ENGINE_IDLE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS0_ENGINE_BUSY 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS1(i) (0x204+(i)*64) /* R--4A */ +#define NV_RUNLIST_ENGINE_STATUS1__SIZE_1 3 /* */ +#define NV_RUNLIST_ENGINE_STATUS1_GFID 5:0 /* R-XVF */ +#define NV_RUNLIST_ENGINE_STATUS1_NEXT_GFID 13:8 /* R-XVF */ +#define NV_RUNLIST_ENGINE_STATUS1_INTR_ID 20:16 /* R-XVF */ +#define NV_RUNLIST_ENGINE_STATUS1_GFID_VALID 30:30 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS1_GFID_VALID_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS1_GFID_VALID_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS1_NEXT_GFID_VALID 31:31 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS1_NEXT_GFID_VALID_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS1_NEXT_GFID_VALID_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL(i,j) (0x208+(i)*64+(j)*4) /* R--4A */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL__SIZE_1 3 /* */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL__SIZE_2 2 /* */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL_CHID 11:0 /* */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL_CHID_HW 10:0 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL_VALID 15:15 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL_VALID_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL_VALID_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL_NEXT_CHID 27:16 /* */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL_NEXT_CHID_HW 26:16 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL_NEXT_VALID 31:31 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL_NEXT_VALID_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS_CHANNEL_NEXT_VALID_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG(i) (0x228+(i)*64) /* R--4A */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG__SIZE_1 3 /* */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_IF_EN 0:0 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_IF_EN_DISABLED 0x00000000 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_IF_EN_ENABLED 0x00000001 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE0_NO_CREDITS 8:8 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE0_NO_CREDITS_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE0_NO_CREDITS_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE0_WFI 12:12 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE0_WFI_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE0_WFI_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE1_NO_CREDITS 16:16 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE1_NO_CREDITS_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE1_NO_CREDITS_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE1_WFI 20:20 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE1_WFI_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_PIPE1_WFI_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_DEBUG_ENGINE_ID 29:24 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS_INST(i) (0x210+(i)*64) /* R--4A */ +#define NV_RUNLIST_ENGINE_STATUS_INST__SIZE_1 3 /* */ +#define NV_RUNLIST_ENGINE_STATUS_INST_TARGET 1:0 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS_INST_TARGET_VID_MEM 0x00000000 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_INST_TARGET_SYS_MEM_COHERENT 0x00000002 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_INST_TARGET_SYS_MEM_NONCOHERENT 0x00000003 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_INST_VALID 11:11 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS_INST_VALID_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS_INST_VALID_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_INST_PTR_LO 31:12 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS_INST_HI(i) (0x214+(i)*64) /* R--4A */ +#define NV_RUNLIST_ENGINE_STATUS_INST_HI__SIZE_1 3 /* */ +#define NV_RUNLIST_ENGINE_STATUS_INST_HI_PTR_HI 31:0 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS_INST_HI_PTR_HI_ZERO 0x00000000 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST(i) (0x218+(i)*64) /* R--4A */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST__SIZE_1 3 /* */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_TARGET 1:0 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_TARGET_VID_MEM 0x00000000 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_TARGET_SYS_MEM_COHERENT 0x00000002 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_TARGET_SYS_MEM_NONCOHERENT 0x00000003 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_VALID 11:11 /* R-IVF */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_VALID_FALSE 0x00000000 /* R-I-V */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_VALID_TRUE 0x00000001 /* R---V */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_PTR_LO 31:12 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_HI(i) (0x21C+(i)*64) /* R--4A */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_HI__SIZE_1 3 /* */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_HI_PTR_HI 31:0 /* R-XUF */ +#define NV_RUNLIST_ENGINE_STATUS_NEXT_INST_HI_PTR_HI_ZERO 0x00000000 /* R---V */ +#endif // __ga100_dev_runlist_h__ diff --git a/kernel-open/nvidia-uvm/hwref/maxwell/gm107/dev_mmu.h b/kernel-open/nvidia-uvm/hwref/maxwell/gm107/dev_mmu.h new file mode 100644 index 000000000..28c47cfb9 --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/maxwell/gm107/dev_mmu.h @@ -0,0 +1,339 @@ +/******************************************************************************* + Copyright (c) 2003-2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef __gm107_dev_mmu_h__ +#define __gm107_dev_mmu_h__ +/* This file is autogenerated. Do not edit */ +#define NV_MMU_PDE /* ----G */ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID_PEER (1*32+31):(1*32+32-3) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE /* ----G */ +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+28):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_COMPTAGS_NONE 0x0 /* */ +#define NV_MMU_PTE_COMPTAGS_1 0x1 /* */ +#define NV_MMU_PTE_COMPTAGS_2 0x2 /* */ +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_INVALID 0xff /* R---V */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_PTE_KIND_Z16 0x01 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2C 0x02 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2C 0x03 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2C 0x04 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2C 0x05 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2C 0x06 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2Z 0x07 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2Z 0x08 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2Z 0x09 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2Z 0x0a /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2Z 0x0b /* R---V */ +#define NV_MMU_PTE_KIND_Z16_4CZ 0x0c /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_4CZ 0x0d /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_4CZ 0x0e /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_4CZ 0x0f /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_4CZ 0x10 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24 0x11 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_1Z 0x12 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_1Z 0x13 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_1Z 0x14 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_1Z 0x15 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_1Z 0x16 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_2CZ 0x17 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_2CZ 0x18 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_2CZ 0x19 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_2CZ 0x1a /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_2CZ 0x1b /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_2CS 0x1c /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_2CS 0x1d /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_2CS 0x1e /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_2CS 0x1f /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_2CS 0x20 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_4CSZV 0x21 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_4CSZV 0x22 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_4CSZV 0x23 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_4CSZV 0x24 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_4CSZV 0x25 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12 0x26 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4 0x27 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8 0x28 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24 0x29 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_1ZV 0x2e /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_1ZV 0x2f /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_1ZV 0x30 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_1ZV 0x31 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2CS 0x32 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2CS 0x33 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2CS 0x34 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2CS 0x35 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2CZV 0x3a /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2CZV 0x3b /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2CZV 0x3c /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2CZV 0x3d /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2ZV 0x3e /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2ZV 0x3f /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2ZV 0x40 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2ZV 0x41 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_4CSZV 0x42 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_4CSZV 0x43 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_4CSZV 0x44 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_4CSZV 0x45 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8 0x46 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_1Z 0x47 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_1Z 0x48 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_1Z 0x49 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_1Z 0x4a /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_1Z 0x4b /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_2CS 0x4c /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_2CS 0x4d /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_2CS 0x4e /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_2CS 0x4f /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_2CS 0x50 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_2CZ 0x51 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_2CZ 0x52 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_2CZ 0x53 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_2CZ 0x54 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_2CZ 0x55 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_4CSZV 0x56 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_4CSZV 0x57 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_4CSZV 0x58 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_4CSZV 0x59 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_4CSZV 0x5a /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12 0x5b /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4 0x5c /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8 0x5d /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24 0x5e /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_1ZV 0x63 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_1ZV 0x64 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_1ZV 0x65 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_1ZV 0x66 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2CS 0x67 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2CS 0x68 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2CS 0x69 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2CS 0x6a /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2CZV 0x6f /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2CZV 0x70 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2CZV 0x71 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2CZV 0x72 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2ZV 0x73 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2ZV 0x74 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2ZV 0x75 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2ZV 0x76 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_4CSZV 0x77 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_4CSZV 0x78 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_4CSZV 0x79 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_4CSZV 0x7a /* R---V */ +#define NV_MMU_PTE_KIND_ZF32 0x7b /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_1Z 0x7c /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_1Z 0x7d /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_1Z 0x7e /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_1Z 0x7f /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_1Z 0x80 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_2CS 0x81 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_2CS 0x82 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_2CS 0x83 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_2CS 0x84 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_2CS 0x85 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_2CZ 0x86 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_2CZ 0x87 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_2CZ 0x88 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_2CZ 0x89 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_2CZ 0x8a /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12 0x8b /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4 0x8c /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8 0x8d /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24 0x8e /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1CS 0x8f /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1CS 0x90 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1CS 0x91 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1CS 0x92 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1ZV 0x97 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1ZV 0x98 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1ZV 0x99 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1ZV 0x9a /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1CZV 0x9b /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1CZV 0x9c /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1CZV 0x9d /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1CZV 0x9e /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_2CS 0x9f /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_2CS 0xa0 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_2CS 0xa1 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_2CS 0xa2 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_2CSZV 0xa3 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_2CSZV 0xa4 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_2CSZV 0xa5 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_2CSZV 0xa6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12 0xa7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4 0xa8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8 0xa9 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24 0xaa /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1CS 0xab /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1CS 0xac /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1CS 0xad /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1CS 0xae /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1ZV 0xb3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1ZV 0xb4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1ZV 0xb5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1ZV 0xb6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1CZV 0xb7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1CZV 0xb8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1CZV 0xb9 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1CZV 0xba /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_2CS 0xbb /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_2CS 0xbc /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_2CS 0xbd /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_2CS 0xbe /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_2CSZV 0xbf /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_2CSZV 0xc0 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_2CSZV 0xc1 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_2CSZV 0xc2 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8 0xc3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_1CS 0xc4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_1CS 0xc5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_1CS 0xc6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_1CS 0xc7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_1CS 0xc8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_2CSZV 0xce /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_2CSZV 0xcf /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_2CSZV 0xd0 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_2CSZV 0xd1 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_2CSZV 0xd2 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_2CS 0xd3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_2CS 0xd4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_2CS 0xd5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_2CS 0xd6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_2CS 0xd7 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_16BX2 0xfe /* R---V */ +#define NV_MMU_PTE_KIND_C32_2C 0xd8 /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CBR 0xd9 /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CBA 0xda /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CRA 0xdb /* R---V */ +#define NV_MMU_PTE_KIND_C32_2BRA 0xdc /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_2C 0xdd /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_2CBR 0xde /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_2CRA 0xcc /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2C 0xdf /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CBR 0xe0 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CBA 0xe1 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CRA 0xe2 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2BRA 0xe3 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS8_MS16_2C 0xe4 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS8_MS16_2CRA 0xe5 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2C 0xe6 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CBR 0xe7 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CBA 0xe8 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CRA 0xe9 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2BRA 0xea /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_2C 0xeb /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_2CBR 0xec /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_2CRA 0xcd /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2C 0xed /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CBR 0xee /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CBA 0xef /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CRA 0xf0 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2BRA 0xf1 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS8_MS16_2C 0xf2 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS8_MS16_2CRA 0xf3 /* R---V */ +#define NV_MMU_PTE_KIND_C128_2C 0xf4 /* R---V */ +#define NV_MMU_PTE_KIND_C128_2CR 0xf5 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS2_2C 0xf6 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS2_2CR 0xf7 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS4_2C 0xf8 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS4_2CR 0xf9 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS8_MS16_2C 0xfa /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS8_MS16_2CR 0xfb /* R---V */ +#define NV_MMU_PTE_KIND_X8C24 0xfc /* R---V */ +#define NV_MMU_PTE_KIND_PITCH_NO_SWIZZLE 0xfd /* R---V */ +#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0xca /* R---V */ +#define NV_MMU_PTE_KIND_SMHOST_MESSAGE 0xcb /* R---V */ +#endif // __gm107_dev_mmu_h__ diff --git a/kernel-open/nvidia-uvm/hwref/pascal/gp100/dev_fault.h b/kernel-open/nvidia-uvm/hwref/pascal/gp100/dev_fault.h new file mode 100644 index 000000000..72ff29eea --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/pascal/gp100/dev_fault.h @@ -0,0 +1,203 @@ +/******************************************************************************* + Copyright (c) 2003-2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef __gp100_dev_fault_h__ +#define __gp100_dev_fault_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PFAULT_MMU_ENG_ID_GRAPHICS 0 /* */ +#define NV_PFAULT_MMU_ENG_ID_DISPLAY 1 /* */ +#define NV_PFAULT_MMU_ENG_ID_IFB 3 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1 4 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2 5 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST0 6 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST1 7 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST2 8 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST3 9 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST4 10 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST5 11 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST6 12 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST7 13 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST8 14 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST9 15 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST10 16 /* */ +#define NV_PFAULT_MMU_ENG_ID_SEC 18 /* */ +#define NV_PFAULT_MMU_ENG_ID_PERF 19 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC 2 /* */ +#define NV_PFAULT_MMU_ENG_ID_GRCOPY 27 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE0 21 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE1 22 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE2 27 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE3 28 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE4 29 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE5 30 /* */ +#define NV_PFAULT_MMU_ENG_ID_PWR_PMU 23 /* */ +#define NV_PFAULT_MMU_ENG_ID_PTP 24 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC 25 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC0 25 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC1 17 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC2 20 /* */ +#define NV_PFAULT_MMU_ENG_ID_PHYSICAL 31 /* */ +#define NV_PFAULT_FAULT_TYPE 4:0 /* */ +#define NV_PFAULT_FAULT_TYPE_PDE 0x00000000 /* */ +#define NV_PFAULT_FAULT_TYPE_PDE_SIZE 0x00000001 /* */ +#define NV_PFAULT_FAULT_TYPE_PTE 0x00000002 /* */ +#define NV_PFAULT_FAULT_TYPE_VA_LIMIT_VIOLATION 0x00000003 /* */ +#define NV_PFAULT_FAULT_TYPE_UNBOUND_INST_BLOCK 0x00000004 /* */ +#define NV_PFAULT_FAULT_TYPE_PRIV_VIOLATION 0x00000005 /* */ +#define NV_PFAULT_FAULT_TYPE_RO_VIOLATION 0x00000006 /* */ +#define NV_PFAULT_FAULT_TYPE_PITCH_MASK_VIOLATION 0x00000008 /* */ +#define NV_PFAULT_FAULT_TYPE_WORK_CREATION 0x00000009 /* */ +#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_APERTURE 0x0000000a /* */ +#define NV_PFAULT_FAULT_TYPE_COMPRESSION_FAILURE 0x0000000b /* */ +#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_KIND 0x0000000c /* */ +#define NV_PFAULT_FAULT_TYPE_REGION_VIOLATION 0x0000000d /* */ +#define NV_PFAULT_FAULT_TYPE_POISONED 0x0000000e /* */ +#define NV_PFAULT_FAULT_TYPE_ATOMIC_VIOLATION 0x0000000f /* */ +#define NV_PFAULT_CLIENT 14:8 /* */ +#define NV_PFAULT_CLIENT_GPC_L1_0 0x00000000 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_0 0x00000001 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_0 0x00000002 /* */ +#define NV_PFAULT_CLIENT_GPC_L1_1 0x00000003 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_1 0x00000004 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_1 0x00000005 /* */ +#define NV_PFAULT_CLIENT_GPC_L1_2 0x00000006 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_2 0x00000007 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_2 0x00000008 /* */ +#define NV_PFAULT_CLIENT_GPC_L1_3 0x00000009 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_3 0x0000000A /* */ +#define NV_PFAULT_CLIENT_GPC_PE_3 0x0000000B /* */ +#define NV_PFAULT_CLIENT_GPC_RAST 0x0000000C /* */ +#define NV_PFAULT_CLIENT_GPC_GCC 0x0000000D /* */ +#define NV_PFAULT_CLIENT_GPC_GPCCS 0x0000000E /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_0 0x0000000F /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_1 0x00000010 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_2 0x00000011 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_3 0x00000012 /* */ +#define NV_PFAULT_CLIENT_GPC_L1_4 0x00000014 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_4 0x00000015 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_4 0x00000016 /* */ +#define NV_PFAULT_CLIENT_GPC_L1_5 0x00000017 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_5 0x00000018 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_5 0x00000019 /* */ +#define NV_PFAULT_CLIENT_GPC_L1_6 0x0000001A /* */ +#define NV_PFAULT_CLIENT_GPC_T1_6 0x0000001B /* */ +#define NV_PFAULT_CLIENT_GPC_PE_6 0x0000001C /* */ +#define NV_PFAULT_CLIENT_GPC_L1_7 0x0000001D /* */ +#define NV_PFAULT_CLIENT_GPC_T1_7 0x0000001E /* */ +#define NV_PFAULT_CLIENT_GPC_PE_7 0x0000001F /* */ +#define NV_PFAULT_CLIENT_GPC_L1_8 0x00000020 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_8 0x00000021 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_8 0x00000022 /* */ +#define NV_PFAULT_CLIENT_GPC_L1_9 0x00000023 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_9 0x00000024 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_9 0x00000025 /* */ +#define NV_PFAULT_CLIENT_GPC_L1_10 0x00000026 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_10 0x00000027 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_10 0x00000028 /* */ +#define NV_PFAULT_CLIENT_GPC_L1_11 0x00000029 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_11 0x0000002A /* */ +#define NV_PFAULT_CLIENT_GPC_PE_11 0x0000002B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_0 0x00000030 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_1 0x00000031 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_2 0x00000032 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_3 0x00000033 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_4 0x00000034 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_5 0x00000035 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_6 0x00000036 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_7 0x00000037 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_8 0x00000038 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_9 0x00000039 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_10 0x0000003A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_11 0x0000003B /* */ +#define NV_PFAULT_CLIENT_GPC_GPM 0x00000013 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_0 0x00000014 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_1 0x00000015 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_2 0x00000016 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_3 0x00000017 /* */ +#define NV_PFAULT_CLIENT_GPC_RGG_UTLB 0x00000018 /* */ +#define NV_PFAULT_CLIENT_HUB_CE0 0x00000001 /* */ +#define NV_PFAULT_CLIENT_HUB_CE1 0x00000002 /* */ +#define NV_PFAULT_CLIENT_HUB_DNISO 0x00000003 /* */ +#define NV_PFAULT_CLIENT_HUB_FE 0x00000004 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS 0x00000005 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST 0x00000006 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST_CPU 0x00000007 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST_CPU_NB 0x00000008 /* */ +#define NV_PFAULT_CLIENT_HUB_ISO 0x00000009 /* */ +#define NV_PFAULT_CLIENT_HUB_MMU 0x0000000A /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC 0x0000000B /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC1 0x0000000D /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC2 0x00000033 /* */ +#define NV_PFAULT_CLIENT_HUB_NISO 0x0000000E /* */ +#define NV_PFAULT_CLIENT_HUB_P2P 0x0000000F /* */ +#define NV_PFAULT_CLIENT_HUB_PD 0x00000010 /* */ +#define NV_PFAULT_CLIENT_HUB_PERF 0x00000011 /* */ +#define NV_PFAULT_CLIENT_HUB_PMU 0x00000012 /* */ +#define NV_PFAULT_CLIENT_HUB_RASTERTWOD 0x00000013 /* */ +#define NV_PFAULT_CLIENT_HUB_SCC 0x00000014 /* */ +#define NV_PFAULT_CLIENT_HUB_SCC_NB 0x00000015 /* */ +#define NV_PFAULT_CLIENT_HUB_SEC 0x00000016 /* */ +#define NV_PFAULT_CLIENT_HUB_SSYNC 0x00000017 /* */ +#define NV_PFAULT_CLIENT_HUB_VIP 0x00000000 /* */ +#define NV_PFAULT_CLIENT_HUB_GRCOPY 0x00000018 /* */ +#define NV_PFAULT_CLIENT_HUB_CE2 0x00000018 /* */ +#define NV_PFAULT_CLIENT_HUB_XV 0x00000019 /* */ +#define NV_PFAULT_CLIENT_HUB_MMU_NB 0x0000001A /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC 0x0000001B /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC0 0x0000001B /* */ +#define NV_PFAULT_CLIENT_HUB_DFALCON 0x0000001C /* */ +#define NV_PFAULT_CLIENT_HUB_SKED 0x0000001D /* */ +#define NV_PFAULT_CLIENT_HUB_AFALCON 0x0000001E /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE0 0x00000020 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE1 0x00000021 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE2 0x00000022 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE3 0x00000023 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE4 0x00000024 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE5 0x00000025 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE6 0x00000026 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE7 0x00000027 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE8 0x00000028 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE9 0x00000029 /* */ +#define NV_PFAULT_CLIENT_HUB_HSHUB 0x0000002A /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X0 0x0000002B /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X1 0x0000002C /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X2 0x0000002D /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X3 0x0000002E /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X4 0x0000002F /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X5 0x00000030 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X6 0x00000031 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X7 0x00000032 /* */ +#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER0 0x00000034 /* */ +#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER1 0x00000035 /* */ +#define NV_PFAULT_CLIENT_HUB_DONT_CARE 0x0000001F /* */ +#define NV_PFAULT_ACCESS_TYPE 18:16 /* */ +#define NV_PFAULT_ACCESS_TYPE_READ 0x00000000 /* */ +#define NV_PFAULT_ACCESS_TYPE_WRITE 0x00000001 /* */ +#define NV_PFAULT_ACCESS_TYPE_ATOMIC 0x00000002 /* */ +#define NV_PFAULT_ACCESS_TYPE_PREFETCH 0x00000003 /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE 20:20 /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE_GPC 0x00000000 /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE_HUB 0x00000001 /* */ +#define NV_PFAULT_GPC_ID 28:24 /* */ +#endif // __gp100_dev_fault_h__ diff --git a/kernel-open/nvidia-uvm/hwref/pascal/gp100/dev_fb.h b/kernel-open/nvidia-uvm/hwref/pascal/gp100/dev_fb.h new file mode 100644 index 000000000..faced40e2 --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/pascal/gp100/dev_fb.h @@ -0,0 +1,71 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +// Excerpt of gp100/dev_fb.h + +#ifndef __dev_fb_h__ +#define __dev_fb_h__ + +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_VA 0:0 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_VA_FALSE 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_VA_TRUE 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_PDB 1:1 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_PDB_FALSE 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_PDB_TRUE 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_REPLAY 5:3 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_REPLAY_NONE 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_REPLAY_START 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_REPLAY_CANCEL 0x00000004 /* */ +#define NV_PFB_PRI_MMU_INVALIDATE_SYS_MEMBAR 6:6 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_SYS_MEMBAR_FALSE 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_SYS_MEMBAR_TRUE 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_ACK 8:7 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_ACK_NONE_REQUIRED 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_ACK_INTRANODE 0x00000002 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_ACK_GLOBALLY 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_CANCEL_CLIENT_ID 14:9 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_CANCEL_GPC_ID 19:15 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_CANCEL_CLIENT_TYPE 20:20 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_CANCEL_CLIENT_TYPE_GPC 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_CANCEL_CLIENT_TYPE_HUB 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_CACHE_LEVEL 26:24 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_CACHE_LEVEL_ALL 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_CACHE_LEVEL_PTE_ONLY 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE0 0x00000002 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE1 0x00000003 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE2 0x00000004 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE3 0x00000005 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE4 0x00000006 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE5 0x00000007 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_TRIGGER 31:31 /* -WEVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_TRIGGER_FALSE 0x00000000 /* -WE-V */ +#define NV_PFB_PRI_MMU_INVALIDATE_TRIGGER_TRUE 0x00000001 /* -W--T */ + +#define NV_PFB_PRI_MMU_PAGE_FAULT_CTRL_PRF_FILTER 1:0 /* RWEVF */ +#define NV_PFB_PRI_MMU_PAGE_FAULT_CTRL_PRF_FILTER_SEND_ALL 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_PAGE_FAULT_CTRL_PRF_FILTER_SEND_NONE 0x00000003 /* RW--V */ + +#endif // __dev_fb_h__ diff --git a/kernel-open/nvidia-uvm/hwref/pascal/gp100/dev_mmu.h b/kernel-open/nvidia-uvm/hwref/pascal/gp100/dev_mmu.h new file mode 100644 index 000000000..cb1131e8d --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/pascal/gp100/dev_mmu.h @@ -0,0 +1,625 @@ +/******************************************************************************* + Copyright (c) 2003-2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef __gp100_dev_mmu_h__ +#define __gp100_dev_mmu_h__ +/* This file is autogenerated. Do not edit */ +#define NV_MMU_PDE /* ----G */ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID_PEER (1*32+31):(1*32+32-3) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE /* ----G */ +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ATOMIC_DISABLE (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+18+11):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_COMPTAGS_NONE 0x0 /* */ +#define NV_MMU_PTE_COMPTAGS_1 0x1 /* */ +#define NV_MMU_PTE_COMPTAGS_2 0x2 /* */ +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_INVALID 0xff /* R---V */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_PTE_KIND_Z16 0x01 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2C 0x02 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2C 0x03 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2C 0x04 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2C 0x05 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2C 0x06 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2Z 0x07 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2Z 0x08 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2Z 0x09 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2Z 0x0a /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2Z 0x0b /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2CZ 0x36 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2CZ 0x37 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2CZ 0x38 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2CZ 0x39 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2CZ 0x5f /* R---V */ +#define NV_MMU_PTE_KIND_Z16_4CZ 0x0c /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_4CZ 0x0d /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_4CZ 0x0e /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_4CZ 0x0f /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_4CZ 0x10 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24 0x11 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_1Z 0x12 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_1Z 0x13 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_1Z 0x14 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_1Z 0x15 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_1Z 0x16 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_2CZ 0x17 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_2CZ 0x18 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_2CZ 0x19 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_2CZ 0x1a /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_2CZ 0x1b /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_2CS 0x1c /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_2CS 0x1d /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_2CS 0x1e /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_2CS 0x1f /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_2CS 0x20 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_4CSZV 0x21 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_4CSZV 0x22 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_4CSZV 0x23 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_4CSZV 0x24 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_4CSZV 0x25 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12 0x26 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4 0x27 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8 0x28 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24 0x29 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_1ZV 0x2e /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_1ZV 0x2f /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_1ZV 0x30 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_1ZV 0x31 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2CS 0x32 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2CS 0x33 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2CS 0x34 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2CS 0x35 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2CZV 0x3a /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2CZV 0x3b /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2CZV 0x3c /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2CZV 0x3d /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2ZV 0x3e /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2ZV 0x3f /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2ZV 0x40 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2ZV 0x41 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_4CSZV 0x42 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_4CSZV 0x43 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_4CSZV 0x44 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_4CSZV 0x45 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8 0x46 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_1Z 0x47 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_1Z 0x48 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_1Z 0x49 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_1Z 0x4a /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_1Z 0x4b /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_2CS 0x4c /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_2CS 0x4d /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_2CS 0x4e /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_2CS 0x4f /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_2CS 0x50 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_2CZ 0x51 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_2CZ 0x52 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_2CZ 0x53 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_2CZ 0x54 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_2CZ 0x55 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_4CSZV 0x56 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_4CSZV 0x57 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_4CSZV 0x58 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_4CSZV 0x59 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_4CSZV 0x5a /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12 0x5b /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4 0x5c /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8 0x5d /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24 0x5e /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_1ZV 0x63 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_1ZV 0x64 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_1ZV 0x65 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_1ZV 0x66 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2CS 0x67 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2CS 0x68 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2CS 0x69 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2CS 0x6a /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2CZV 0x6f /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2CZV 0x70 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2CZV 0x71 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2CZV 0x72 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2ZV 0x73 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2ZV 0x74 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2ZV 0x75 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2ZV 0x76 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_4CSZV 0x77 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_4CSZV 0x78 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_4CSZV 0x79 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_4CSZV 0x7a /* R---V */ +#define NV_MMU_PTE_KIND_ZF32 0x7b /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_1Z 0x7c /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_1Z 0x7d /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_1Z 0x7e /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_1Z 0x7f /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_1Z 0x80 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_2CS 0x81 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_2CS 0x82 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_2CS 0x83 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_2CS 0x84 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_2CS 0x85 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_2CZ 0x86 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_2CZ 0x87 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_2CZ 0x88 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_2CZ 0x89 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_2CZ 0x8a /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12 0x8b /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4 0x8c /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8 0x8d /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24 0x8e /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1CS 0x8f /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1CS 0x90 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1CS 0x91 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1CS 0x92 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1ZV 0x97 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1ZV 0x98 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1ZV 0x99 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1ZV 0x9a /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1CZV 0x9b /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1CZV 0x9c /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1CZV 0x9d /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1CZV 0x9e /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_2CS 0x9f /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_2CS 0xa0 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_2CS 0xa1 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_2CS 0xa2 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_2CSZV 0xa3 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_2CSZV 0xa4 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_2CSZV 0xa5 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_2CSZV 0xa6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12 0xa7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4 0xa8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8 0xa9 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24 0xaa /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1CS 0xab /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1CS 0xac /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1CS 0xad /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1CS 0xae /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1ZV 0xb3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1ZV 0xb4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1ZV 0xb5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1ZV 0xb6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1CZV 0xb7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1CZV 0xb8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1CZV 0xb9 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1CZV 0xba /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_2CS 0xbb /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_2CS 0xbc /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_2CS 0xbd /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_2CS 0xbe /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_2CSZV 0xbf /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_2CSZV 0xc0 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_2CSZV 0xc1 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_2CSZV 0xc2 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8 0xc3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_1CS 0xc4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_1CS 0xc5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_1CS 0xc6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_1CS 0xc7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_1CS 0xc8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_2CSZV 0xce /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_2CSZV 0xcf /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_2CSZV 0xd0 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_2CSZV 0xd1 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_2CSZV 0xd2 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_2CS 0xd3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_2CS 0xd4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_2CS 0xd5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_2CS 0xd6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_2CS 0xd7 /* R---V */ +#define NV_MMU_PTE_KIND_S8 0x2a /* R---V */ +#define NV_MMU_PTE_KIND_S8_2S 0x2b /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_16BX2 0xfe /* R---V */ +#define NV_MMU_PTE_KIND_C32_2C 0xd8 /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CBR 0xd9 /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CBA 0xda /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CRA 0xdb /* R---V */ +#define NV_MMU_PTE_KIND_C32_2BRA 0xdc /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_2C 0xdd /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_2CBR 0xde /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_2CRA 0xcc /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2C 0xdf /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CBR 0xe0 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CBA 0xe1 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CRA 0xe2 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2BRA 0xe3 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_4CBRA 0x2c /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS8_MS16_2C 0xe4 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS8_MS16_2CRA 0xe5 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2C 0xe6 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CBR 0xe7 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CBA 0xe8 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CRA 0xe9 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2BRA 0xea /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_2C 0xeb /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_2CBR 0xec /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_2CRA 0xcd /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2C 0xed /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CBR 0xee /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CBA 0xef /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CRA 0xf0 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2BRA 0xf1 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_4CBRA 0x2d /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS8_MS16_2C 0xf2 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS8_MS16_2CRA 0xf3 /* R---V */ +#define NV_MMU_PTE_KIND_C128_2C 0xf4 /* R---V */ +#define NV_MMU_PTE_KIND_C128_2CR 0xf5 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS2_2C 0xf6 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS2_2CR 0xf7 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS4_2C 0xf8 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS4_2CR 0xf9 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS8_MS16_2C 0xfa /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS8_MS16_2CR 0xfb /* R---V */ +#define NV_MMU_PTE_KIND_X8C24 0xfc /* R---V */ +#define NV_MMU_PTE_KIND_PITCH_NO_SWIZZLE 0xfd /* R---V */ +#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0xca /* R---V */ +#define NV_MMU_PTE_KIND_SMHOST_MESSAGE 0xcb /* R---V */ +#define NV_MMU_VER1_PDE /* ----G */ +#define NV_MMU_VER1_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_VER1_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_VER1_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_VER1_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID_PEER (1*32+31):(1*32+32-3) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER1_PDE__SIZE 8 +#define NV_MMU_VER1_PTE /* ----G */ +#define NV_MMU_VER1_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_VER1_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_VER1_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_VER1_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_VER1_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_VER1_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_VER1_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_VER1_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_VER1_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_VER1_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PTE_ATOMIC_DISABLE (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_VER1_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_COMPTAGLINE (1*32+18+11):(1*32+12) /* RWXVF */ +#define NV_MMU_VER1_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER1_PTE__SIZE 8 +#define NV_MMU_VER1_PTE_COMPTAGS_NONE 0x0 /* */ +#define NV_MMU_VER1_PTE_COMPTAGS_1 0x1 /* */ +#define NV_MMU_VER1_PTE_COMPTAGS_2 0x2 /* */ +#define NV_MMU_NEW_PDE /* ----G */ +#define NV_MMU_NEW_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_NEW_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_NEW_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_NEW_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_NEW_PDE_APERTURE_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_PDE_VOL 3:3 /* RWXVF */ +#define NV_MMU_NEW_PDE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PDE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PDE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_NEW_PDE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_NEW_PDE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_NEW_PDE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_NEW_PDE__SIZE 8 +#define NV_MMU_NEW_DUAL_PDE /* ----G */ +#define NV_MMU_NEW_DUAL_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_BIG 3:3 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_SYS 53:(8-4) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID (35-3):(8-4) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL 67:67 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_SYS 117:72 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID (99-3):72 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID_PEER 99:(100-3) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */ +#define NV_MMU_NEW_DUAL_PDE__SIZE 16 +#define NV_MMU_NEW_PTE /* ----G */ +#define NV_MMU_NEW_PTE_VALID 0:0 /* RWXVF */ +#define NV_MMU_NEW_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_NEW_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_PTE_VOL 3:3 /* RWXVF */ +#define NV_MMU_NEW_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PTE_ENCRYPTED 4:4 /* RWXVF */ +#define NV_MMU_NEW_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_NEW_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_NEW_PTE_PRIVILEGE 5:5 /* RWXVF */ +#define NV_MMU_NEW_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_READ_ONLY 6:6 /* RWXVF */ +#define NV_MMU_NEW_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_ATOMIC_DISABLE 7:7 /* RWXVF */ +#define NV_MMU_NEW_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_NEW_PTE_COMPTAGLINE (18+35):36 /* RWXVF */ +#define NV_MMU_NEW_PTE_KIND 63:56 /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_NEW_PTE__SIZE 8 +#define NV_MMU_VER2_PDE /* ----G */ +#define NV_MMU_VER2_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_VER2_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_VER2_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER2_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_VER2_PDE_APERTURE_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_PDE_VOL 3:3 /* RWXVF */ +#define NV_MMU_VER2_PDE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PDE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PDE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_VER2_PDE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PDE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_VER2_PDE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_PDE__SIZE 8 +#define NV_MMU_VER2_DUAL_PDE /* ----G */ +#define NV_MMU_VER2_DUAL_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG 3:3 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SYS 53:(8-4) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID (35-3):(8-4) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL 67:67 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_SYS 117:72 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID (99-3):72 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID_PEER 99:(100-3) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */ +#define NV_MMU_VER2_DUAL_PDE__SIZE 16 +#define NV_MMU_VER2_PTE /* ----G */ +#define NV_MMU_VER2_PTE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER2_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_VER2_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_PTE_VOL 3:3 /* RWXVF */ +#define NV_MMU_VER2_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_ENCRYPTED 4:4 /* RWXVF */ +#define NV_MMU_VER2_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_VER2_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_VER2_PTE_PRIVILEGE 5:5 /* RWXVF */ +#define NV_MMU_VER2_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_READ_ONLY 6:6 /* RWXVF */ +#define NV_MMU_VER2_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE 7:7 /* RWXVF */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_VER2_PTE_COMPTAGLINE (18+35):36 /* RWXVF */ +#define NV_MMU_VER2_PTE_KIND 63:56 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_PTE__SIZE 8 +#endif // __gp100_dev_mmu_h__ diff --git a/kernel-open/nvidia-uvm/hwref/turing/tu102/dev_fault.h b/kernel-open/nvidia-uvm/hwref/turing/tu102/dev_fault.h new file mode 100644 index 000000000..b0263e7e2 --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/turing/tu102/dev_fault.h @@ -0,0 +1,400 @@ +/******************************************************************************* + Copyright (c) 2003-2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef __tu102_dev_fault_h__ +#define __tu102_dev_fault_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PFAULT_MMU_ENG_ID_GRAPHICS 64 /* */ +#define NV_PFAULT_MMU_ENG_ID_DISPLAY 1 /* */ +#define NV_PFAULT_MMU_ENG_ID_GSP 2 /* */ +#define NV_PFAULT_MMU_ENG_ID_IFB 9 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1 128 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2 192 /* */ +#define NV_PFAULT_MMU_ENG_ID_SEC 14 /* */ +#define NV_PFAULT_MMU_ENG_ID_PERF 8 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC 10 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC0 10 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC1 25 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC2 26 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVJPG0 24 /* */ +#define NV_PFAULT_MMU_ENG_ID_GRCOPY 15 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE0 15 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE1 16 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE2 17 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE3 18 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE4 19 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE5 20 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE6 21 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE7 22 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE8 23 /* */ +#define NV_PFAULT_MMU_ENG_ID_PWR_PMU 6 /* */ +#define NV_PFAULT_MMU_ENG_ID_PTP 3 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC0 11 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC1 12 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC2 13 /* */ +#define NV_PFAULT_MMU_ENG_ID_PHYSICAL 31 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST0 32 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST1 33 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST2 34 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST3 35 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST4 36 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST5 37 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST6 38 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST7 39 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST8 40 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST9 41 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST10 42 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST11 43 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST12 44 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST13 45 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST14 46 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN0 128 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN1 129 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN2 130 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN3 131 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN4 132 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN5 133 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN6 134 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN7 135 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN8 136 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN9 137 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN10 138 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN11 139 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN12 140 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN13 141 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN14 142 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN15 143 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN16 144 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN17 145 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN18 146 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN19 147 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN20 148 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN21 149 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN22 150 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN23 151 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN24 152 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN25 153 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN26 154 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN27 155 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN28 156 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN29 157 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN30 158 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN31 159 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN32 160 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN33 161 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN34 162 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN35 163 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN36 164 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN37 165 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN38 166 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN39 167 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN40 168 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN41 169 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN42 170 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN43 171 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN44 172 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN45 173 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN46 174 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN47 175 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN48 176 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN49 177 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN50 178 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN51 179 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN52 180 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN53 181 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN54 182 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN55 183 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN56 184 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN57 185 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN58 186 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN59 187 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN60 188 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN61 189 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN62 190 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1_FN63 191 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN0 192 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN1 193 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN2 194 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN3 195 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN4 196 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN5 197 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN6 198 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN7 199 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN8 200 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN9 201 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN10 202 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN11 203 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN12 204 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN13 205 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN14 206 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN15 207 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN16 208 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN17 209 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN18 210 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN19 211 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN20 212 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN21 213 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN22 214 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN23 215 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN24 216 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN25 217 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN26 218 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN27 219 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN28 220 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN29 221 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN30 222 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN31 223 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN32 224 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN33 225 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN34 226 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN35 227 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN36 228 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN37 229 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN38 230 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN39 231 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN40 232 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN41 233 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN42 234 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN43 235 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN44 236 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN45 237 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN46 238 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN47 239 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN48 240 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN49 241 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN50 242 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN51 243 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN52 244 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN53 245 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN54 246 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN55 247 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN56 248 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN57 249 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN58 250 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN59 251 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN60 252 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN61 253 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN62 254 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2_FN63 255 /* */ +#define NV_PFAULT_FAULT_TYPE 4:0 /* */ +#define NV_PFAULT_FAULT_TYPE_PDE 0x00000000 /* */ +#define NV_PFAULT_FAULT_TYPE_PDE_SIZE 0x00000001 /* */ +#define NV_PFAULT_FAULT_TYPE_PTE 0x00000002 /* */ +#define NV_PFAULT_FAULT_TYPE_VA_LIMIT_VIOLATION 0x00000003 /* */ +#define NV_PFAULT_FAULT_TYPE_UNBOUND_INST_BLOCK 0x00000004 /* */ +#define NV_PFAULT_FAULT_TYPE_PRIV_VIOLATION 0x00000005 /* */ +#define NV_PFAULT_FAULT_TYPE_RO_VIOLATION 0x00000006 /* */ +#define NV_PFAULT_FAULT_TYPE_WO_VIOLATION 0x00000007 /* */ +#define NV_PFAULT_FAULT_TYPE_PITCH_MASK_VIOLATION 0x00000008 /* */ +#define NV_PFAULT_FAULT_TYPE_WORK_CREATION 0x00000009 /* */ +#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_APERTURE 0x0000000a /* */ +#define NV_PFAULT_FAULT_TYPE_COMPRESSION_FAILURE 0x0000000b /* */ +#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_KIND 0x0000000c /* */ +#define NV_PFAULT_FAULT_TYPE_REGION_VIOLATION 0x0000000d /* */ +#define NV_PFAULT_FAULT_TYPE_POISONED 0x0000000e /* */ +#define NV_PFAULT_FAULT_TYPE_ATOMIC_VIOLATION 0x0000000f /* */ +#define NV_PFAULT_CLIENT 14:8 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_0 0x00000000 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_1 0x00000001 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_2 0x00000002 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_3 0x00000003 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_4 0x00000004 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_5 0x00000005 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_6 0x00000006 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_7 0x00000007 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_0 0x00000008 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_1 0x00000009 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_2 0x0000000A /* */ +#define NV_PFAULT_CLIENT_GPC_PE_3 0x0000000B /* */ +#define NV_PFAULT_CLIENT_GPC_PE_4 0x0000000C /* */ +#define NV_PFAULT_CLIENT_GPC_PE_5 0x0000000D /* */ +#define NV_PFAULT_CLIENT_GPC_PE_6 0x0000000E /* */ +#define NV_PFAULT_CLIENT_GPC_PE_7 0x0000000F /* */ +#define NV_PFAULT_CLIENT_GPC_RAST 0x00000010 /* */ +#define NV_PFAULT_CLIENT_GPC_GCC 0x00000011 /* */ +#define NV_PFAULT_CLIENT_GPC_GPCCS 0x00000012 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_0 0x00000013 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_1 0x00000014 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_2 0x00000015 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_3 0x00000016 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_8 0x00000021 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_9 0x00000022 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_10 0x00000023 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_11 0x00000024 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_12 0x00000025 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_13 0x00000026 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_14 0x00000027 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_15 0x00000028 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_0 0x00000029 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_1 0x0000002A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_2 0x0000002B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_3 0x0000002C /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_4 0x0000002D /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_5 0x0000002E /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_6 0x0000002F /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_7 0x00000030 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_8 0x00000031 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_9 0x00000032 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_8 0x00000033 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_9 0x00000034 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_16 0x00000035 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_17 0x00000036 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_18 0x00000037 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_19 0x00000038 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_10 0x00000039 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_11 0x0000003A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_10 0x0000003B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_11 0x0000003C /* */ +#define NV_PFAULT_CLIENT_GPC_T1_20 0x0000003D /* */ +#define NV_PFAULT_CLIENT_GPC_T1_21 0x0000003E /* */ +#define NV_PFAULT_CLIENT_GPC_T1_22 0x0000003F /* */ +#define NV_PFAULT_CLIENT_GPC_T1_23 0x00000040 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_12 0x00000041 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_13 0x00000042 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_12 0x00000043 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_13 0x00000044 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_24 0x00000045 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_25 0x00000046 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_26 0x00000047 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_27 0x00000048 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_14 0x00000049 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_15 0x0000004A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_14 0x0000004B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_15 0x0000004C /* */ +#define NV_PFAULT_CLIENT_GPC_T1_28 0x0000004D /* */ +#define NV_PFAULT_CLIENT_GPC_T1_29 0x0000004E /* */ +#define NV_PFAULT_CLIENT_GPC_T1_30 0x0000004F /* */ +#define NV_PFAULT_CLIENT_GPC_T1_31 0x00000050 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_16 0x00000051 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_17 0x00000052 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_16 0x00000053 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_17 0x00000054 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_32 0x00000055 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_33 0x00000056 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_34 0x00000057 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_35 0x00000058 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_18 0x00000059 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_19 0x0000005A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_18 0x0000005B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_19 0x0000005C /* */ +#define NV_PFAULT_CLIENT_GPC_T1_36 0x0000005D /* */ +#define NV_PFAULT_CLIENT_GPC_T1_37 0x0000005E /* */ +#define NV_PFAULT_CLIENT_GPC_T1_38 0x0000005F /* */ +#define NV_PFAULT_CLIENT_GPC_T1_39 0x00000060 /* */ +#define NV_PFAULT_CLIENT_GPC_GPM 0x00000017 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_0 0x00000018 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_1 0x00000019 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_2 0x0000001A /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_3 0x0000001B /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_4 0x0000001C /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_5 0x0000001D /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_6 0x0000001E /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_7 0x0000001F /* */ +#define NV_PFAULT_CLIENT_GPC_RGG_UTLB 0x00000020 /* */ +#define NV_PFAULT_CLIENT_HUB_CE0 0x00000001 /* */ +#define NV_PFAULT_CLIENT_HUB_CE1 0x00000002 /* */ +#define NV_PFAULT_CLIENT_HUB_DNISO 0x00000003 /* */ +#define NV_PFAULT_CLIENT_HUB_FE 0x00000004 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS 0x00000005 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST 0x00000006 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST_CPU 0x00000007 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST_CPU_NB 0x00000008 /* */ +#define NV_PFAULT_CLIENT_HUB_ISO 0x00000009 /* */ +#define NV_PFAULT_CLIENT_HUB_MMU 0x0000000A /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC 0x0000000B /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC0 0x0000000B /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC1 0x0000000D /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC2 0x00000033 /* */ +#define NV_PFAULT_CLIENT_HUB_NISO 0x0000000E /* */ +#define NV_PFAULT_CLIENT_HUB_P2P 0x0000000F /* */ +#define NV_PFAULT_CLIENT_HUB_PD 0x00000010 /* */ +#define NV_PFAULT_CLIENT_HUB_PERF 0x00000011 /* */ +#define NV_PFAULT_CLIENT_HUB_PMU 0x00000012 /* */ +#define NV_PFAULT_CLIENT_HUB_RASTERTWOD 0x00000013 /* */ +#define NV_PFAULT_CLIENT_HUB_SCC 0x00000014 /* */ +#define NV_PFAULT_CLIENT_HUB_SCC_NB 0x00000015 /* */ +#define NV_PFAULT_CLIENT_HUB_SEC 0x00000016 /* */ +#define NV_PFAULT_CLIENT_HUB_SSYNC 0x00000017 /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC1 0x0000003A /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC2 0x0000003B /* */ +#define NV_PFAULT_CLIENT_HUB_NVJPG0 0x0000003C /* */ +#define NV_PFAULT_CLIENT_HUB_VIP 0x00000000 /* */ +#define NV_PFAULT_CLIENT_HUB_GRCOPY 0x00000018 /* */ +#define NV_PFAULT_CLIENT_HUB_CE2 0x00000018 /* */ +#define NV_PFAULT_CLIENT_HUB_XV 0x00000019 /* */ +#define NV_PFAULT_CLIENT_HUB_MMU_NB 0x0000001A /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC 0x0000001B /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC0 0x0000001B /* */ +#define NV_PFAULT_CLIENT_HUB_DFALCON 0x0000001C /* */ +#define NV_PFAULT_CLIENT_HUB_SKED 0x0000001D /* */ +#define NV_PFAULT_CLIENT_HUB_AFALCON 0x0000001E /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE0 0x00000020 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE1 0x00000021 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE2 0x00000022 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE3 0x00000023 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE4 0x00000024 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE5 0x00000025 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE6 0x00000026 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE7 0x00000027 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE8 0x00000028 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE9 0x00000029 /* */ +#define NV_PFAULT_CLIENT_HUB_HSHUB 0x0000002A /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X0 0x0000002B /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X1 0x0000002C /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X2 0x0000002D /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X3 0x0000002E /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X4 0x0000002F /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X5 0x00000030 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X6 0x00000031 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X7 0x00000032 /* */ +#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER0 0x00000034 /* */ +#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER1 0x00000035 /* */ +#define NV_PFAULT_CLIENT_HUB_DWBIF 0x00000036 /* */ +#define NV_PFAULT_CLIENT_HUB_FBFALCON 0x00000037 /* */ +#define NV_PFAULT_CLIENT_HUB_CE_SHIM 0x00000038 /* */ +#define NV_PFAULT_CLIENT_HUB_GSP 0x00000039 /* */ +#define NV_PFAULT_CLIENT_HUB_DONT_CARE 0x0000001F /* */ +#define NV_PFAULT_ACCESS_TYPE 19:16 /* */ +#define NV_PFAULT_ACCESS_TYPE_READ 0x00000000 /* */ +#define NV_PFAULT_ACCESS_TYPE_WRITE 0x00000001 /* */ +#define NV_PFAULT_ACCESS_TYPE_ATOMIC 0x00000002 /* */ +#define NV_PFAULT_ACCESS_TYPE_PREFETCH 0x00000003 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_READ 0x00000000 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_WRITE 0x00000001 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC 0x00000002 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_STRONG 0x00000002 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_PREFETCH 0x00000003 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_WEAK 0x00000004 /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_READ 0x00000008 /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_WRITE 0x00000009 /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_ATOMIC 0x0000000a /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_PREFETCH 0x0000000b /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE 20:20 /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE_GPC 0x00000000 /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE_HUB 0x00000001 /* */ +#define NV_PFAULT_GPC_ID 28:24 /* */ +#define NV_PFAULT_PROTECTED_MODE 29:29 /* */ +#define NV_PFAULT_REPLAYABLE_FAULT_EN 30:30 /* */ +#define NV_PFAULT_VALID 31:31 /* */ +#endif // __tu102_dev_fault_h__ diff --git a/kernel-open/nvidia-uvm/hwref/turing/tu102/dev_mmu.h b/kernel-open/nvidia-uvm/hwref/turing/tu102/dev_mmu.h new file mode 100644 index 000000000..ce9464c2c --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/turing/tu102/dev_mmu.h @@ -0,0 +1,649 @@ +/******************************************************************************* + Copyright (c) 2003-2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef __tu102_dev_mmu_h__ +#define __tu102_dev_mmu_h__ +/* This file is autogenerated. Do not edit */ +#define NV_MMU_PDE /* ----G */ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID_PEER (1*32+31):(1*32+32-3) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE /* ----G */ +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ATOMIC_DISABLE (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+20+11):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_COMPTAGS_NONE 0x0 /* */ +#define NV_MMU_PTE_COMPTAGS_1 0x1 /* */ +#define NV_MMU_PTE_COMPTAGS_2 0x2 /* */ +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_INVALID 0x07 /* R---V */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x06 /* R---V */ +#define NV_MMU_PTE_KIND_Z16 0x01 /* R---V */ +#define NV_MMU_PTE_KIND_S8 0x02 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24 0x03 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8 0x04 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8 0x05 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE 0x08 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x09 /* R---V */ +#define NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC 0x0A /* R---V */ +#define NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC 0x0B /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC 0x0C /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC 0x0D /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC 0x0E /* R---V */ +#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0x0F /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2C 0x2a /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2C 0x11 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2C 0xC3 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2C 0x46 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2C 0x6c /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2Z 0x6b /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2Z 0x10 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2Z 0x60 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2Z 0x61 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2Z 0x62 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2CZ 0x36 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2CZ 0x37 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2CZ 0x38 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2CZ 0x39 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2CZ 0x5f /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_1Z 0x12 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_1Z 0x13 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_1Z 0x14 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_1Z 0x15 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_1Z 0x16 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_2CZ 0x17 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_2CZ 0x18 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_2CZ 0x19 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_2CZ 0x1a /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_2CZ 0x1b /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_2CS 0x1c /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_2CS 0x1d /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_2CS 0x1e /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_2CS 0x1f /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_2CS 0x20 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_4CSZV 0x21 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_4CSZV 0x22 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_4CSZV 0x23 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_4CSZV 0x24 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_4CSZV 0x25 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12 0x26 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4 0x27 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8 0x28 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24 0x29 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_1ZV 0x2e /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_1ZV 0x2f /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_1ZV 0x30 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_1ZV 0x31 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2CS 0x32 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2CS 0x33 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2CS 0x34 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2CS 0x35 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2CZV 0x3a /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2CZV 0x3b /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2CZV 0x3c /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2CZV 0x3d /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2ZV 0x3e /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2ZV 0x3f /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2ZV 0x40 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2ZV 0x41 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_4CSZV 0x42 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_4CSZV 0x43 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_4CSZV 0x44 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_4CSZV 0x45 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_1Z 0x47 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_1Z 0x48 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_1Z 0x49 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_1Z 0x4a /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_1Z 0x4b /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_2CS 0x4c /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_2CS 0x4d /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_2CS 0x4e /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_2CS 0x4f /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_2CS 0x50 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_2CZ 0x51 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_2CZ 0x52 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_2CZ 0x53 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_2CZ 0x54 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_2CZ 0x55 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_4CSZV 0x56 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_4CSZV 0x57 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_4CSZV 0x58 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_4CSZV 0x59 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_4CSZV 0x5a /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12 0x5b /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4 0x5c /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8 0x5d /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24 0x5e /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_1ZV 0x63 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_1ZV 0x64 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_1ZV 0x65 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_1ZV 0x66 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2CS 0x67 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2CS 0x68 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2CS 0x69 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2CS 0x6a /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2CZV 0x6f /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2CZV 0x70 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2CZV 0x71 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2CZV 0x72 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2ZV 0x73 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2ZV 0x74 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2ZV 0x75 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2ZV 0x76 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_4CSZV 0x77 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_4CSZV 0x78 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_4CSZV 0x79 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_4CSZV 0x7a /* R---V */ +#define NV_MMU_PTE_KIND_ZF32 0x7b /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_1Z 0x7c /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_1Z 0x7d /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_1Z 0x7e /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_1Z 0x7f /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_1Z 0x80 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_2CS 0x81 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_2CS 0x82 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_2CS 0x83 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_2CS 0x84 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_2CS 0x85 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_2CZ 0x86 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_2CZ 0x87 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_2CZ 0x88 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_2CZ 0x89 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_2CZ 0x8a /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12 0x8b /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4 0x8c /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8 0x8d /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24 0x8e /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1CS 0x8f /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1CS 0x90 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1CS 0x91 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1CS 0x92 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1ZV 0x97 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1ZV 0x98 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1ZV 0x99 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1ZV 0x9a /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1CZV 0x9b /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1CZV 0x9c /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1CZV 0x9d /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1CZV 0x9e /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_2CS 0x9f /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_2CS 0xa0 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_2CS 0xa1 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_2CS 0xa2 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_2CSZV 0xa3 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_2CSZV 0xa4 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_2CSZV 0xa5 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_2CSZV 0xa6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12 0xa7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4 0xa8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8 0xa9 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24 0xaa /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1CS 0xab /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1CS 0xac /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1CS 0xad /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1CS 0xae /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1ZV 0xb3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1ZV 0xb4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1ZV 0xb5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1ZV 0xb6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1CZV 0xb7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1CZV 0xb8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1CZV 0xb9 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1CZV 0xba /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_2CS 0xbb /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_2CS 0xbc /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_2CS 0xbd /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_2CS 0xbe /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_2CSZV 0xbf /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_2CSZV 0xc0 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_2CSZV 0xc1 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_2CSZV 0xc2 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_1CS 0xc4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_1CS 0xc5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_1CS 0xc6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_1CS 0xc7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_1CS 0xc8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_2CSZV 0xce /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_2CSZV 0xcf /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_2CSZV 0xd0 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_2CSZV 0xd1 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_2CSZV 0xd2 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_2CS 0xd3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_2CS 0xd4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_2CS 0xd5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_2CS 0xd6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_2CS 0xd7 /* R---V */ +#define NV_MMU_PTE_KIND_S8_2S 0x2b /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_16BX2 0xfe /* R---V */ +#define NV_MMU_PTE_KIND_C32_2C 0xd8 /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CBR 0xd9 /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CBA 0xda /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CRA 0xdb /* R---V */ +#define NV_MMU_PTE_KIND_C32_2BRA 0xdc /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_2C 0xdd /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_2CBR 0xde /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_4CBRA 0xcc /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2C 0xdf /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CBR 0xe0 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CBA 0xe1 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CRA 0xe2 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2BRA 0xe3 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_4CBRA 0x2c /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS8_MS16_2C 0xe4 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS8_MS16_2CRA 0xe5 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2C 0xe6 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CBR 0xe7 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CBA 0xe8 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CRA 0xe9 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2BRA 0xea /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_2C 0xeb /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_2CBR 0xec /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_4CBRA 0xcd /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2C 0xed /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CBR 0xee /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CBA 0xef /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CRA 0xf0 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2BRA 0xf1 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_4CBRA 0x2d /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS8_MS16_2C 0xf2 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS8_MS16_2CRA 0xf3 /* R---V */ +#define NV_MMU_PTE_KIND_C128_2C 0xf4 /* R---V */ +#define NV_MMU_PTE_KIND_C128_2CR 0xf5 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS2_2C 0xf6 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS2_2CR 0xf7 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS4_2C 0xf8 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS4_2CR 0xf9 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS8_MS16_2C 0xfa /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS8_MS16_2CR 0xfb /* R---V */ +#define NV_MMU_PTE_KIND_X8C24 0xfc /* R---V */ +#define NV_MMU_PTE_KIND_PITCH_NO_SWIZZLE 0xfd /* R---V */ +#define NV_MMU_PTE_KIND_SMHOST_MESSAGE 0xcb /* R---V */ +#define NV_MMU_VER1_PDE /* ----G */ +#define NV_MMU_VER1_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_VER1_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_VER1_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_VER1_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID_PEER (1*32+31):(1*32+32-3) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER1_PDE__SIZE 8 +#define NV_MMU_VER1_PTE /* ----G */ +#define NV_MMU_VER1_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_VER1_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_VER1_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_VER1_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_VER1_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_VER1_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_VER1_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_VER1_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_VER1_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_VER1_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PTE_ATOMIC_DISABLE (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_VER1_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_COMPTAGLINE (1*32+20+11):(1*32+12) /* RWXVF */ +#define NV_MMU_VER1_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER1_PTE__SIZE 8 +#define NV_MMU_VER1_PTE_COMPTAGS_NONE 0x0 /* */ +#define NV_MMU_VER1_PTE_COMPTAGS_1 0x1 /* */ +#define NV_MMU_VER1_PTE_COMPTAGS_2 0x2 /* */ +#define NV_MMU_NEW_PDE /* ----G */ +#define NV_MMU_NEW_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_NEW_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_NEW_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_NEW_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_NEW_PDE_APERTURE_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_PDE_VOL 3:3 /* RWXVF */ +#define NV_MMU_NEW_PDE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PDE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PDE_NO_ATS 5:5 /* RWXVF */ +#define NV_MMU_NEW_PDE_NO_ATS_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_NO_ATS_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_NEW_PDE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_NEW_PDE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_NEW_PDE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_NEW_PDE__SIZE 8 +#define NV_MMU_NEW_DUAL_PDE /* ----G */ +#define NV_MMU_NEW_DUAL_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_BIG 3:3 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_NO_ATS 5:5 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_NO_ATS_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_NO_ATS_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_SYS 53:(8-4) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID (35-3):(8-4) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL 67:67 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_SYS 117:72 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID (99-3):72 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID_PEER 99:(100-3) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */ +#define NV_MMU_NEW_DUAL_PDE__SIZE 16 +#define NV_MMU_NEW_PTE /* ----G */ +#define NV_MMU_NEW_PTE_VALID 0:0 /* RWXVF */ +#define NV_MMU_NEW_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_NEW_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_PTE_VOL 3:3 /* RWXVF */ +#define NV_MMU_NEW_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PTE_ENCRYPTED 4:4 /* RWXVF */ +#define NV_MMU_NEW_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_NEW_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_NEW_PTE_PRIVILEGE 5:5 /* RWXVF */ +#define NV_MMU_NEW_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_READ_ONLY 6:6 /* RWXVF */ +#define NV_MMU_NEW_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_ATOMIC_DISABLE 7:7 /* RWXVF */ +#define NV_MMU_NEW_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_NEW_PTE_COMPTAGLINE (20+35):36 /* RWXVF */ +#define NV_MMU_NEW_PTE_KIND 63:56 /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_NEW_PTE__SIZE 8 +#define NV_MMU_VER2_PDE /* ----G */ +#define NV_MMU_VER2_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_VER2_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_VER2_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER2_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_VER2_PDE_APERTURE_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_PDE_VOL 3:3 /* RWXVF */ +#define NV_MMU_VER2_PDE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PDE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PDE_NO_ATS 5:5 /* RWXVF */ +#define NV_MMU_VER2_PDE_NO_ATS_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_NO_ATS_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_VER2_PDE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PDE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_VER2_PDE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_PDE__SIZE 8 +#define NV_MMU_VER2_DUAL_PDE /* ----G */ +#define NV_MMU_VER2_DUAL_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG 3:3 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_NO_ATS 5:5 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_NO_ATS_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_NO_ATS_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SYS 53:(8-4) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID (35-3):(8-4) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL 67:67 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_SYS 117:72 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID (99-3):72 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID_PEER 99:(100-3) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */ +#define NV_MMU_VER2_DUAL_PDE__SIZE 16 +#define NV_MMU_VER2_PTE /* ----G */ +#define NV_MMU_VER2_PTE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER2_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_VER2_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_PTE_VOL 3:3 /* RWXVF */ +#define NV_MMU_VER2_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_ENCRYPTED 4:4 /* RWXVF */ +#define NV_MMU_VER2_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_VER2_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_VER2_PTE_PRIVILEGE 5:5 /* RWXVF */ +#define NV_MMU_VER2_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_READ_ONLY 6:6 /* RWXVF */ +#define NV_MMU_VER2_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE 7:7 /* RWXVF */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_VER2_PTE_COMPTAGLINE (20+35):36 /* RWXVF */ +#define NV_MMU_VER2_PTE_KIND 63:56 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_PTE__SIZE 8 +#define NV_MMU_CLIENT /* ----G */ +#define NV_MMU_CLIENT_KIND 2:0 /* RWXVF */ +#define NV_MMU_CLIENT_KIND_Z16 0x1 /* R---V */ +#define NV_MMU_CLIENT_KIND_S8 0x2 /* R---V */ +#define NV_MMU_CLIENT_KIND_S8Z24 0x3 /* R---V */ +#define NV_MMU_CLIENT_KIND_ZF32_X24S8 0x4 /* R---V */ +#define NV_MMU_CLIENT_KIND_Z24S8 0x5 /* R---V */ +#define NV_MMU_CLIENT_KIND_GENERIC_MEMORY 0x6 /* R---V */ +#define NV_MMU_CLIENT_KIND_INVALID 0x7 /* R---V */ +#endif // __tu102_dev_mmu_h__ diff --git a/kernel-open/nvidia-uvm/hwref/volta/gv100/dev_fault.h b/kernel-open/nvidia-uvm/hwref/volta/gv100/dev_fault.h new file mode 100644 index 000000000..6981215ac --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/volta/gv100/dev_fault.h @@ -0,0 +1,263 @@ +/******************************************************************************* + Copyright (c) 2003-2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef __gv100_dev_fault_h__ +#define __gv100_dev_fault_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PFAULT_MMU_ENG_ID_GRAPHICS 64 /* */ +#define NV_PFAULT_MMU_ENG_ID_DISPLAY 1 /* */ +#define NV_PFAULT_MMU_ENG_ID_GSP 2 /* */ +#define NV_PFAULT_MMU_ENG_ID_IFB 8 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR1 4 /* */ +#define NV_PFAULT_MMU_ENG_ID_BAR2 5 /* */ +#define NV_PFAULT_MMU_ENG_ID_SEC 14 /* */ +#define NV_PFAULT_MMU_ENG_ID_PERF 9 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVDEC 10 /* */ +#define NV_PFAULT_MMU_ENG_ID_GRCOPY 15 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE0 15 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE1 16 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE2 17 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE3 18 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE4 19 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE5 20 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE6 21 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE7 22 /* */ +#define NV_PFAULT_MMU_ENG_ID_CE8 23 /* */ +#define NV_PFAULT_MMU_ENG_ID_PWR_PMU 6 /* */ +#define NV_PFAULT_MMU_ENG_ID_PTP 3 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC0 11 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC1 12 /* */ +#define NV_PFAULT_MMU_ENG_ID_NVENC2 13 /* */ +#define NV_PFAULT_MMU_ENG_ID_PHYSICAL 31 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST0 32 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST1 33 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST2 34 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST3 35 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST4 36 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST5 37 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST6 38 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST7 39 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST8 40 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST9 41 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST10 42 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST11 43 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST12 44 /* */ +#define NV_PFAULT_MMU_ENG_ID_HOST13 45 /* */ +#define NV_PFAULT_FAULT_TYPE 4:0 /* */ +#define NV_PFAULT_FAULT_TYPE_PDE 0x00000000 /* */ +#define NV_PFAULT_FAULT_TYPE_PDE_SIZE 0x00000001 /* */ +#define NV_PFAULT_FAULT_TYPE_PTE 0x00000002 /* */ +#define NV_PFAULT_FAULT_TYPE_VA_LIMIT_VIOLATION 0x00000003 /* */ +#define NV_PFAULT_FAULT_TYPE_UNBOUND_INST_BLOCK 0x00000004 /* */ +#define NV_PFAULT_FAULT_TYPE_PRIV_VIOLATION 0x00000005 /* */ +#define NV_PFAULT_FAULT_TYPE_RO_VIOLATION 0x00000006 /* */ +#define NV_PFAULT_FAULT_TYPE_WO_VIOLATION 0x00000007 /* */ +#define NV_PFAULT_FAULT_TYPE_PITCH_MASK_VIOLATION 0x00000008 /* */ +#define NV_PFAULT_FAULT_TYPE_WORK_CREATION 0x00000009 /* */ +#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_APERTURE 0x0000000a /* */ +#define NV_PFAULT_FAULT_TYPE_COMPRESSION_FAILURE 0x0000000b /* */ +#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_KIND 0x0000000c /* */ +#define NV_PFAULT_FAULT_TYPE_REGION_VIOLATION 0x0000000d /* */ +#define NV_PFAULT_FAULT_TYPE_POISONED 0x0000000e /* */ +#define NV_PFAULT_FAULT_TYPE_ATOMIC_VIOLATION 0x0000000f /* */ +#define NV_PFAULT_CLIENT 14:8 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_0 0x00000000 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_1 0x00000001 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_2 0x00000002 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_3 0x00000003 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_4 0x00000004 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_5 0x00000005 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_6 0x00000006 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_7 0x00000007 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_0 0x00000008 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_1 0x00000009 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_2 0x0000000A /* */ +#define NV_PFAULT_CLIENT_GPC_PE_3 0x0000000B /* */ +#define NV_PFAULT_CLIENT_GPC_PE_4 0x0000000C /* */ +#define NV_PFAULT_CLIENT_GPC_PE_5 0x0000000D /* */ +#define NV_PFAULT_CLIENT_GPC_PE_6 0x0000000E /* */ +#define NV_PFAULT_CLIENT_GPC_PE_7 0x0000000F /* */ +#define NV_PFAULT_CLIENT_GPC_RAST 0x00000010 /* */ +#define NV_PFAULT_CLIENT_GPC_GCC 0x00000011 /* */ +#define NV_PFAULT_CLIENT_GPC_GPCCS 0x00000012 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_0 0x00000013 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_1 0x00000014 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_2 0x00000015 /* */ +#define NV_PFAULT_CLIENT_GPC_PROP_3 0x00000016 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_8 0x00000021 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_9 0x00000022 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_10 0x00000023 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_11 0x00000024 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_12 0x00000025 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_13 0x00000026 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_14 0x00000027 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_15 0x00000028 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_0 0x00000029 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_1 0x0000002A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_2 0x0000002B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_3 0x0000002C /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_4 0x0000002D /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_5 0x0000002E /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_6 0x0000002F /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_7 0x00000030 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_8 0x00000031 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_9 0x00000032 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_8 0x00000033 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_9 0x00000034 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_16 0x00000035 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_17 0x00000036 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_18 0x00000037 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_19 0x00000038 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_10 0x00000039 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_11 0x0000003A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_10 0x0000003B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_11 0x0000003C /* */ +#define NV_PFAULT_CLIENT_GPC_T1_20 0x0000003D /* */ +#define NV_PFAULT_CLIENT_GPC_T1_21 0x0000003E /* */ +#define NV_PFAULT_CLIENT_GPC_T1_22 0x0000003F /* */ +#define NV_PFAULT_CLIENT_GPC_T1_23 0x00000040 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_12 0x00000041 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_13 0x00000042 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_12 0x00000043 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_13 0x00000044 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_24 0x00000045 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_25 0x00000046 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_26 0x00000047 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_27 0x00000048 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_14 0x00000049 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_15 0x0000004A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_14 0x0000004B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_15 0x0000004C /* */ +#define NV_PFAULT_CLIENT_GPC_T1_28 0x0000004D /* */ +#define NV_PFAULT_CLIENT_GPC_T1_29 0x0000004E /* */ +#define NV_PFAULT_CLIENT_GPC_T1_30 0x0000004F /* */ +#define NV_PFAULT_CLIENT_GPC_T1_31 0x00000050 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_16 0x00000051 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_17 0x00000052 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_16 0x00000053 /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_17 0x00000054 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_32 0x00000055 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_33 0x00000056 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_34 0x00000057 /* */ +#define NV_PFAULT_CLIENT_GPC_T1_35 0x00000058 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_18 0x00000059 /* */ +#define NV_PFAULT_CLIENT_GPC_PE_19 0x0000005A /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_18 0x0000005B /* */ +#define NV_PFAULT_CLIENT_GPC_TPCCS_19 0x0000005C /* */ +#define NV_PFAULT_CLIENT_GPC_T1_36 0x0000005D /* */ +#define NV_PFAULT_CLIENT_GPC_T1_37 0x0000005E /* */ +#define NV_PFAULT_CLIENT_GPC_T1_38 0x0000005F /* */ +#define NV_PFAULT_CLIENT_GPC_T1_39 0x00000060 /* */ +#define NV_PFAULT_CLIENT_GPC_GPM 0x00000017 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_0 0x00000018 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_1 0x00000019 /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_2 0x0000001A /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_3 0x0000001B /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_4 0x0000001C /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_5 0x0000001D /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_6 0x0000001E /* */ +#define NV_PFAULT_CLIENT_GPC_LTP_UTLB_7 0x0000001F /* */ +#define NV_PFAULT_CLIENT_GPC_RGG_UTLB 0x00000020 /* */ +#define NV_PFAULT_CLIENT_HUB_CE0 0x00000001 /* */ +#define NV_PFAULT_CLIENT_HUB_CE1 0x00000002 /* */ +#define NV_PFAULT_CLIENT_HUB_DNISO 0x00000003 /* */ +#define NV_PFAULT_CLIENT_HUB_FE 0x00000004 /* */ +#define NV_PFAULT_CLIENT_HUB_FECS 0x00000005 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST 0x00000006 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST_CPU 0x00000007 /* */ +#define NV_PFAULT_CLIENT_HUB_HOST_CPU_NB 0x00000008 /* */ +#define NV_PFAULT_CLIENT_HUB_ISO 0x00000009 /* */ +#define NV_PFAULT_CLIENT_HUB_MMU 0x0000000A /* */ +#define NV_PFAULT_CLIENT_HUB_NVDEC 0x0000000B /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC1 0x0000000D /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC2 0x00000033 /* */ +#define NV_PFAULT_CLIENT_HUB_NISO 0x0000000E /* */ +#define NV_PFAULT_CLIENT_HUB_P2P 0x0000000F /* */ +#define NV_PFAULT_CLIENT_HUB_PD 0x00000010 /* */ +#define NV_PFAULT_CLIENT_HUB_PERF 0x00000011 /* */ +#define NV_PFAULT_CLIENT_HUB_PMU 0x00000012 /* */ +#define NV_PFAULT_CLIENT_HUB_RASTERTWOD 0x00000013 /* */ +#define NV_PFAULT_CLIENT_HUB_SCC 0x00000014 /* */ +#define NV_PFAULT_CLIENT_HUB_SCC_NB 0x00000015 /* */ +#define NV_PFAULT_CLIENT_HUB_SEC 0x00000016 /* */ +#define NV_PFAULT_CLIENT_HUB_SSYNC 0x00000017 /* */ +#define NV_PFAULT_CLIENT_HUB_VIP 0x00000000 /* */ +#define NV_PFAULT_CLIENT_HUB_GRCOPY 0x00000018 /* */ +#define NV_PFAULT_CLIENT_HUB_CE2 0x00000018 /* */ +#define NV_PFAULT_CLIENT_HUB_XV 0x00000019 /* */ +#define NV_PFAULT_CLIENT_HUB_MMU_NB 0x0000001A /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC 0x0000001B /* */ +#define NV_PFAULT_CLIENT_HUB_NVENC0 0x0000001B /* */ +#define NV_PFAULT_CLIENT_HUB_DFALCON 0x0000001C /* */ +#define NV_PFAULT_CLIENT_HUB_SKED 0x0000001D /* */ +#define NV_PFAULT_CLIENT_HUB_AFALCON 0x0000001E /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE0 0x00000020 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE1 0x00000021 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE2 0x00000022 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE3 0x00000023 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE4 0x00000024 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE5 0x00000025 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE6 0x00000026 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE7 0x00000027 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE8 0x00000028 /* */ +#define NV_PFAULT_CLIENT_HUB_HSCE9 0x00000029 /* */ +#define NV_PFAULT_CLIENT_HUB_HSHUB 0x0000002A /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X0 0x0000002B /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X1 0x0000002C /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X2 0x0000002D /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X3 0x0000002E /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X4 0x0000002F /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X5 0x00000030 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X6 0x00000031 /* */ +#define NV_PFAULT_CLIENT_HUB_PTP_X7 0x00000032 /* */ +#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER0 0x00000034 /* */ +#define NV_PFAULT_CLIENT_HUB_VPR_SCRUBBER1 0x00000035 /* */ +#define NV_PFAULT_CLIENT_HUB_DWBIF 0x00000036 /* */ +#define NV_PFAULT_CLIENT_HUB_FBFALCON 0x00000037 /* */ +#define NV_PFAULT_CLIENT_HUB_CE_SHIM 0x00000038 /* */ +#define NV_PFAULT_CLIENT_HUB_GSP 0x00000039 /* */ +#define NV_PFAULT_CLIENT_HUB_DONT_CARE 0x0000001F /* */ +#define NV_PFAULT_ACCESS_TYPE 19:16 /* */ +#define NV_PFAULT_ACCESS_TYPE_READ 0x00000000 /* */ +#define NV_PFAULT_ACCESS_TYPE_WRITE 0x00000001 /* */ +#define NV_PFAULT_ACCESS_TYPE_ATOMIC 0x00000002 /* */ +#define NV_PFAULT_ACCESS_TYPE_PREFETCH 0x00000003 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_READ 0x00000000 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_WRITE 0x00000001 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC 0x00000002 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_STRONG 0x00000002 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_PREFETCH 0x00000003 /* */ +#define NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_WEAK 0x00000004 /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_READ 0x00000008 /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_WRITE 0x00000009 /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_ATOMIC 0x0000000a /* */ +#define NV_PFAULT_ACCESS_TYPE_PHYS_PREFETCH 0x0000000b /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE 20:20 /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE_GPC 0x00000000 /* */ +#define NV_PFAULT_MMU_CLIENT_TYPE_HUB 0x00000001 /* */ +#define NV_PFAULT_GPC_ID 28:24 /* */ +#define NV_PFAULT_PROTECTED_MODE 29:29 /* */ +#define NV_PFAULT_ATS_FAULT 30:30 /* */ +#define NV_PFAULT_VALID 31:31 /* */ +#endif // __gv100_dev_fault_h__ diff --git a/kernel-open/nvidia-uvm/hwref/volta/gv100/dev_fb.h b/kernel-open/nvidia-uvm/hwref/volta/gv100/dev_fb.h new file mode 100644 index 000000000..ed1c3bfe2 --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/volta/gv100/dev_fb.h @@ -0,0 +1,103 @@ +/******************************************************************************* + Copyright (c) 2017 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +// Excerpt of gv100/dev_fb.h + +#ifndef __dev_fb_h__ +#define __dev_fb_h__ + +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO(i) (0x00100E24+(i)*20) /* RW-4A */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO__SIZE_1 2 /* */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO_ADDR_MODE 0:0 /* RW-VF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO_ADDR_MODE_VIRTUAL 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO_ADDR_MODE_PHYSICAL 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO_PHYS_APERTURE 2:1 /* RW-VF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO_PHYS_APERTURE_LOCAL 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO_PHYS_APERTURE_SYS_COH 0x00000002 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO_PHYS_APERTURE_SYS_NCOH 0x00000003 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO_PHYS_VOL 3:3 /* RW-VF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_LO_ADDR 31:12 /* RW-VF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_HI(i) (0x00100E28+(i)*20) /* RW-4A */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_HI__SIZE_1 2 /* */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_HI_ADDR 31:0 /* RW-VF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET(i) (0x00100E2C+(i)*20) /* RW-4A */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET__SIZE_1 2 /* */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_PTR 19:0 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_PTR_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED 30:30 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED_NO 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED_YES 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_OVERFLOW 31:31 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_OVERFLOW_NO 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_OVERFLOW_YES 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_OVERFLOW_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT(i) (0x00100E30+(i)*20) /* R--4A */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT__SIZE_1 2 /* */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_PTR 19:0 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_PTR_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_GETPTR_CORRUPTED 30:30 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_GETPTR_CORRUPTED_NO 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_GETPTR_CORRUPTED_YES 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_OVERFLOW 31:31 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_OVERFLOW_NO 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_OVERFLOW_YES 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE(i) (0x00100E34+(i)*20) /* RW-4A */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE__SIZE_1 2 /* */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_VAL 19:0 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_VAL_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_OVERFLOW_INTR 29:29 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_OVERFLOW_INTR_DISABLE 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_OVERFLOW_INTR_ENABLE 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_SET_DEFAULT 30:30 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_SET_DEFAULT_NO 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_SET_DEFAULT_YES 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_ENABLE 31:31 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_ENABLE_FALSE 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_SIZE_ENABLE_TRUE 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_LO 0x00100E4C /* R--4R */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_LO_PHYS_APERTURE 1:0 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_LO_PHYS_APERTURE_LOCAL 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_LO_PHYS_APERTURE_PEER 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_LO_PHYS_APERTURE_SYS_COH 0x00000002 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_LO_PHYS_APERTURE_SYS_NCOH 0x00000003 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_LO_ADDR 31:12 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_LO_ADDR_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_HI 0x00100E50 /* R--4R */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_HI_ADDR 31:0 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_ADDR_HI_ADDR_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_INST_LO 0x00100E54 /* R--4R */ +#define NV_PFB_PRI_MMU_FAULT_INST_LO_ENGINE_ID 8:0 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_INST_LO_ENGINE_ID_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_INST_LO_APERTURE 11:10 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_INST_LO_APERTURE_VID_MEM 0x00000000 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_INST_LO_APERTURE_SYS_MEM_COHERENT 0x00000002 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_INST_LO_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_INST_LO_APERTURE_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_INST_LO_ADDR 31:12 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_INST_LO_ADDR_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_INST_HI 0x00100E58 /* R--4R */ +#define NV_PFB_PRI_MMU_FAULT_INST_HI_ADDR 31:0 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_INST_HI_ADDR_RESET 0x00000000 /* R-E-V */ + +#endif diff --git a/kernel-open/nvidia-uvm/hwref/volta/gv100/dev_mmu.h b/kernel-open/nvidia-uvm/hwref/volta/gv100/dev_mmu.h new file mode 100644 index 000000000..690383957 --- /dev/null +++ b/kernel-open/nvidia-uvm/hwref/volta/gv100/dev_mmu.h @@ -0,0 +1,661 @@ +/******************************************************************************* + Copyright (c) 2003-2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef __gv100_dev_mmu_h__ +#define __gv100_dev_mmu_h__ +/* This file is autogenerated. Do not edit */ +#define NV_MMU_PDE /* ----G */ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID_PEER (1*32+31):(1*32+32-3) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE /* ----G */ +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ATOMIC_DISABLE (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+18+11):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_COMPTAGS_NONE 0x0 /* */ +#define NV_MMU_PTE_COMPTAGS_1 0x1 /* */ +#define NV_MMU_PTE_COMPTAGS_2 0x2 /* */ +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_INVALID 0xff /* R---V */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_PTE_KIND_Z16 0x01 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2C 0x02 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2C 0x03 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2C 0x04 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2C 0x05 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2C 0x06 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2Z 0x07 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2Z 0x08 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2Z 0x09 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2Z 0x0a /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2Z 0x0b /* R---V */ +#define NV_MMU_PTE_KIND_Z16_2CZ 0x36 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_2CZ 0x37 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_2CZ 0x38 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_2CZ 0x39 /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_2CZ 0x5f /* R---V */ +#define NV_MMU_PTE_KIND_Z16_4CZ 0x0c /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS2_4CZ 0x0d /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS4_4CZ 0x0e /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS8_4CZ 0x0f /* R---V */ +#define NV_MMU_PTE_KIND_Z16_MS16_4CZ 0x10 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24 0x11 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_1Z 0x12 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_1Z 0x13 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_1Z 0x14 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_1Z 0x15 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_1Z 0x16 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_2CZ 0x17 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_2CZ 0x18 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_2CZ 0x19 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_2CZ 0x1a /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_2CZ 0x1b /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_2CS 0x1c /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_2CS 0x1d /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_2CS 0x1e /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_2CS 0x1f /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_2CS 0x20 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_4CSZV 0x21 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS2_4CSZV 0x22 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS4_4CSZV 0x23 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS8_4CSZV 0x24 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_MS16_4CSZV 0x25 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12 0x26 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4 0x27 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8 0x28 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24 0x29 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_1ZV 0x2e /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_1ZV 0x2f /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_1ZV 0x30 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_1ZV 0x31 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2CS 0x32 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2CS 0x33 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2CS 0x34 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2CS 0x35 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2CZV 0x3a /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2CZV 0x3b /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2CZV 0x3c /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2CZV 0x3d /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_2ZV 0x3e /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_2ZV 0x3f /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_2ZV 0x40 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_2ZV 0x41 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC12_4CSZV 0x42 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS4_VC4_4CSZV 0x43 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC8_4CSZV 0x44 /* R---V */ +#define NV_MMU_PTE_KIND_V8Z24_MS8_VC24_4CSZV 0x45 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8 0x46 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_1Z 0x47 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_1Z 0x48 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_1Z 0x49 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_1Z 0x4a /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_1Z 0x4b /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_2CS 0x4c /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_2CS 0x4d /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_2CS 0x4e /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_2CS 0x4f /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_2CS 0x50 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_2CZ 0x51 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_2CZ 0x52 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_2CZ 0x53 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_2CZ 0x54 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_2CZ 0x55 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_4CSZV 0x56 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS2_4CSZV 0x57 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS4_4CSZV 0x58 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS8_4CSZV 0x59 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_MS16_4CSZV 0x5a /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12 0x5b /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4 0x5c /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8 0x5d /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24 0x5e /* R---V */ +#define NV_MMU_PTE_KIND_YUV_B8C1_2Y 0x60 /* R---V */ +#define NV_MMU_PTE_KIND_YUV_B8C2_2Y 0x61 /* R---V */ +#define NV_MMU_PTE_KIND_YUV_B10C1_2Y 0x62 /* R---V */ +#define NV_MMU_PTE_KIND_YUV_B10C2_2Y 0x6b /* R---V */ +#define NV_MMU_PTE_KIND_YUV_B12C1_2Y 0x6c /* R---V */ +#define NV_MMU_PTE_KIND_YUV_B12C2_2Y 0x6d /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_1ZV 0x63 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_1ZV 0x64 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_1ZV 0x65 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_1ZV 0x66 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2CS 0x67 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2CS 0x68 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2CS 0x69 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2CS 0x6a /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2CZV 0x6f /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2CZV 0x70 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2CZV 0x71 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2CZV 0x72 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_2ZV 0x73 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_2ZV 0x74 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_2ZV 0x75 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_2ZV 0x76 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC12_4CSZV 0x77 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS4_VC4_4CSZV 0x78 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC8_4CSZV 0x79 /* R---V */ +#define NV_MMU_PTE_KIND_Z24V8_MS8_VC24_4CSZV 0x7a /* R---V */ +#define NV_MMU_PTE_KIND_ZF32 0x7b /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_1Z 0x7c /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_1Z 0x7d /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_1Z 0x7e /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_1Z 0x7f /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_1Z 0x80 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_2CS 0x81 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_2CS 0x82 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_2CS 0x83 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_2CS 0x84 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_2CS 0x85 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_2CZ 0x86 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS2_2CZ 0x87 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS4_2CZ 0x88 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS8_2CZ 0x89 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_MS16_2CZ 0x8a /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12 0x8b /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4 0x8c /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8 0x8d /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24 0x8e /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1CS 0x8f /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1CS 0x90 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1CS 0x91 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1CS 0x92 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1ZV 0x97 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1ZV 0x98 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1ZV 0x99 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1ZV 0x9a /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_1CZV 0x9b /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_1CZV 0x9c /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_1CZV 0x9d /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_1CZV 0x9e /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_2CS 0x9f /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_2CS 0xa0 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_2CS 0xa1 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_2CS 0xa2 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC12_2CSZV 0xa3 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS4_VC4_2CSZV 0xa4 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC8_2CSZV 0xa5 /* R---V */ +#define NV_MMU_PTE_KIND_X8Z24_X16V8S8_MS8_VC24_2CSZV 0xa6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12 0xa7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4 0xa8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8 0xa9 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24 0xaa /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1CS 0xab /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1CS 0xac /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1CS 0xad /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1CS 0xae /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1ZV 0xb3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1ZV 0xb4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1ZV 0xb5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1ZV 0xb6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_1CZV 0xb7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_1CZV 0xb8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_1CZV 0xb9 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_1CZV 0xba /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_2CS 0xbb /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_2CS 0xbc /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_2CS 0xbd /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_2CS 0xbe /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC12_2CSZV 0xbf /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS4_VC4_2CSZV 0xc0 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC8_2CSZV 0xc1 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X16V8S8_MS8_VC24_2CSZV 0xc2 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8 0xc3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_1CS 0xc4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_1CS 0xc5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_1CS 0xc6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_1CS 0xc7 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_1CS 0xc8 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_2CSZV 0xce /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_2CSZV 0xcf /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_2CSZV 0xd0 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_2CSZV 0xd1 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_2CSZV 0xd2 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_2CS 0xd3 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS2_2CS 0xd4 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS4_2CS 0xd5 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS8_2CS 0xd6 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_MS16_2CS 0xd7 /* R---V */ +#define NV_MMU_PTE_KIND_S8 0x2a /* R---V */ +#define NV_MMU_PTE_KIND_S8_2S 0x2b /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_16BX2 0xfe /* R---V */ +#define NV_MMU_PTE_KIND_C32_2C 0xd8 /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CBR 0xd9 /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CBA 0xda /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CRA 0xdb /* R---V */ +#define NV_MMU_PTE_KIND_C32_2BRA 0xdc /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_2C 0xdd /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_2CBR 0xde /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS2_4CBRA 0xcc /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2C 0xdf /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CBR 0xe0 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CBA 0xe1 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CRA 0xe2 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2BRA 0xe3 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_4CBRA 0x2c /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS8_MS16_2C 0xe4 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS8_MS16_2CRA 0xe5 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2C 0xe6 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CBR 0xe7 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CBA 0xe8 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CRA 0xe9 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2BRA 0xea /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_2C 0xeb /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_2CBR 0xec /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS2_4CBRA 0xcd /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2C 0xed /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CBR 0xee /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CBA 0xef /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CRA 0xf0 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2BRA 0xf1 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_4CBRA 0x2d /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS8_MS16_2C 0xf2 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS8_MS16_2CRA 0xf3 /* R---V */ +#define NV_MMU_PTE_KIND_C128_2C 0xf4 /* R---V */ +#define NV_MMU_PTE_KIND_C128_2CR 0xf5 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS2_2C 0xf6 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS2_2CR 0xf7 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS4_2C 0xf8 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS4_2CR 0xf9 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS8_MS16_2C 0xfa /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS8_MS16_2CR 0xfb /* R---V */ +#define NV_MMU_PTE_KIND_X8C24 0xfc /* R---V */ +#define NV_MMU_PTE_KIND_PITCH_NO_SWIZZLE 0xfd /* R---V */ +#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0xca /* R---V */ +#define NV_MMU_PTE_KIND_SMHOST_MESSAGE 0xcb /* R---V */ +#define NV_MMU_VER1_PDE /* ----G */ +#define NV_MMU_VER1_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_VER1_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_VER1_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_VER1_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID_PEER (1*32+31):(1*32+32-3) /* RWXVF */ +#define NV_MMU_VER1_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER1_PDE__SIZE 8 +#define NV_MMU_VER1_PTE /* ----G */ +#define NV_MMU_VER1_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_VER1_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_VER1_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_VER1_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_VER1_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_VER1_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_VER1_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_VER1_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_VER1_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_VER1_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_VER1_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER1_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER1_PTE_ATOMIC_DISABLE (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_VER1_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER1_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER1_PTE_COMPTAGLINE (1*32+18+11):(1*32+12) /* RWXVF */ +#define NV_MMU_VER1_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_VER1_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER1_PTE__SIZE 8 +#define NV_MMU_VER1_PTE_COMPTAGS_NONE 0x0 /* */ +#define NV_MMU_VER1_PTE_COMPTAGS_1 0x1 /* */ +#define NV_MMU_VER1_PTE_COMPTAGS_2 0x2 /* */ +#define NV_MMU_NEW_PDE /* ----G */ +#define NV_MMU_NEW_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_NEW_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_NEW_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_NEW_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_NEW_PDE_APERTURE_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_PDE_VOL 3:3 /* RWXVF */ +#define NV_MMU_NEW_PDE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PDE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PDE_NO_ATS 5:5 /* RWXVF */ +#define NV_MMU_NEW_PDE_NO_ATS_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PDE_NO_ATS_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PDE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_NEW_PDE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_NEW_PDE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_NEW_PDE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_NEW_PDE__SIZE 8 +#define NV_MMU_NEW_DUAL_PDE /* ----G */ +#define NV_MMU_NEW_DUAL_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_BIG 3:3 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_NO_ATS 5:5 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_NO_ATS_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_NO_ATS_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_SYS 53:(8-4) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID (35-3):(8-4) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL 67:67 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_SYS 117:72 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID (99-3):72 /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID_PEER 99:(100-3) /* RWXVF */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_NEW_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */ +#define NV_MMU_NEW_DUAL_PDE__SIZE 16 +#define NV_MMU_NEW_PTE /* ----G */ +#define NV_MMU_NEW_PTE_VALID 0:0 /* RWXVF */ +#define NV_MMU_NEW_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_NEW_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_NEW_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_NEW_PTE_VOL 3:3 /* RWXVF */ +#define NV_MMU_NEW_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PTE_ENCRYPTED 4:4 /* RWXVF */ +#define NV_MMU_NEW_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_NEW_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_NEW_PTE_PRIVILEGE 5:5 /* RWXVF */ +#define NV_MMU_NEW_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_READ_ONLY 6:6 /* RWXVF */ +#define NV_MMU_NEW_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_ATOMIC_DISABLE 7:7 /* RWXVF */ +#define NV_MMU_NEW_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_NEW_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_NEW_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_NEW_PTE_COMPTAGLINE (18+35):36 /* RWXVF */ +#define NV_MMU_NEW_PTE_KIND 63:56 /* RWXVF */ +#define NV_MMU_NEW_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_NEW_PTE__SIZE 8 +#define NV_MMU_VER2_PDE /* ----G */ +#define NV_MMU_VER2_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_VER2_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_VER2_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER2_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_VER2_PDE_APERTURE_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_PDE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_PDE_VOL 3:3 /* RWXVF */ +#define NV_MMU_VER2_PDE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PDE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PDE_NO_ATS 5:5 /* RWXVF */ +#define NV_MMU_VER2_PDE_NO_ATS_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PDE_NO_ATS_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PDE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_VER2_PDE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PDE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_VER2_PDE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_PDE__SIZE 8 +#define NV_MMU_VER2_DUAL_PDE /* ----G */ +#define NV_MMU_VER2_DUAL_PDE_IS_PTE 0:0 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_IS_PTE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_IS_PTE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_IS_PDE 0:0 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_IS_PDE_TRUE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_IS_PDE_FALSE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG 3:3 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_NO_ATS 5:5 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_NO_ATS_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_NO_ATS_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SYS 53:(8-4) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID (35-3):(8-4) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL 67:67 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_SYS 117:72 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID (99-3):72 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID_PEER 99:(100-3) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */ +#define NV_MMU_VER2_DUAL_PDE__SIZE 16 +#define NV_MMU_VER2_PTE /* ----G */ +#define NV_MMU_VER2_PTE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER2_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_VER2_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_VER2_PTE_VOL 3:3 /* RWXVF */ +#define NV_MMU_VER2_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_ENCRYPTED 4:4 /* RWXVF */ +#define NV_MMU_VER2_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_VER2_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_VER2_PTE_PRIVILEGE 5:5 /* RWXVF */ +#define NV_MMU_VER2_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_READ_ONLY 6:6 /* RWXVF */ +#define NV_MMU_VER2_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE 7:7 /* RWXVF */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_VER2_PTE_COMPTAGLINE (18+35):36 /* RWXVF */ +#define NV_MMU_VER2_PTE_KIND 63:56 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_PTE__SIZE 8 +#define NV_MMU_BASIC /* ----G */ +#define NV_MMU_BASIC_KIND 3:0 /* RWXVF */ +#define NV_MMU_BASIC_KIND_TRANSPARENT 0x0 /* R---V */ +#define NV_MMU_BASIC_KIND_GENERIC 0x1 /* R---V */ +#define NV_MMU_BASIC_KIND_S8 0x2 /* R---V */ +#define NV_MMU_BASIC_KIND_Z16 0x3 /* R---V */ +#define NV_MMU_BASIC_KIND_Z24S8 0x4 /* R---V */ +#define NV_MMU_BASIC_KIND_ZF32 0x5 /* R---V */ +#define NV_MMU_BASIC_KIND_ZF32_X24S8 0x6 /* R---V */ +#define NV_MMU_BASIC_KIND_RSVRD0 0x7 /* R---V */ +#define NV_MMU_BASIC_KIND_PITCH 0x8 /* R---V */ +#define NV_MMU_BASIC_KIND_GENERIC_C 0x9 /* R---V */ +#define NV_MMU_BASIC_KIND_S8_C 0xa /* R---V */ +#define NV_MMU_BASIC_KIND_Z16_C 0xb /* R---V */ +#define NV_MMU_BASIC_KIND_Z24S8_C 0xc /* R---V */ +#define NV_MMU_BASIC_KIND_ZF32_C 0xd /* R---V */ +#define NV_MMU_BASIC_KIND_ZF32_X24S8_C 0xe /* R---V */ +#define NV_MMU_BASIC_KIND_INVALID 0xf /* R---V */ +#endif // __gv100_dev_mmu_h__ diff --git a/kernel-open/nvidia-uvm/nv-kthread-q-selftest.c b/kernel-open/nvidia-uvm/nv-kthread-q-selftest.c new file mode 100644 index 000000000..bb67754e7 --- /dev/null +++ b/kernel-open/nvidia-uvm/nv-kthread-q-selftest.c @@ -0,0 +1,577 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nv-kthread-q.h" +#include +#include +#include +#include +#include +#include +#include + +// If NV_BUILD_MODULE_INSTANCES is not defined, do it here in order to avoid +// build warnings/errors when including nv-linux.h as it expects the definition +// to be present. +#ifndef NV_BUILD_MODULE_INSTANCES +#define NV_BUILD_MODULE_INSTANCES 1 +#endif +#include "nv-linux.h" + +// Below are just a very few lines of printing and test assertion support. +// It is important to avoid dependencies on other modules, because nv-kthread-q +// and its self test are supposed to only require: +// +// -- Linux kernel functions and macros +// +// In order to avoid external dependencies (specifically, NV_STATUS codes), all +// functions in this file return non-zero upon failure, and zero upon success. + +#ifndef NVIDIA_PRETTY_PRINTING_PREFIX + #define NVIDIA_PRETTY_PRINTING_PREFIX "nvidia: " +#endif + +// This prints even on release builds: +#define NVQ_TEST_PRINT(fmt, ...) \ + printk(KERN_INFO NVIDIA_PRETTY_PRINTING_PREFIX "%s:%u[pid:%d] " fmt, \ + __FUNCTION__, \ + __LINE__, \ + current->pid, \ + ##__VA_ARGS__) + +// Caution: This macro will return out of the current scope +#define TEST_CHECK_RET(cond) \ + do { \ + if (unlikely(!(cond))) { \ + NVQ_TEST_PRINT("Test check failed, condition '%s' not true\n", \ + #cond); \ + on_nvq_assert(); \ + return -1; \ + } \ + } while(0) + +// Most test failures will do things such as just hang or crash. However, in +// order to detect bugs that are less fatal, simply count how many queue items +// actually ran. + +#define NUM_Q_ITEMS_IN_BASIC_TEST 6 +#define NUM_RESCHEDULE_CALLBACKS 10 +#define NUM_TEST_Q_ITEMS (100 * 1000) +#define NUM_TEST_KTHREADS 8 +#define NUM_Q_ITEMS_IN_MULTITHREAD_TEST (NUM_TEST_Q_ITEMS * NUM_TEST_KTHREADS) + +// This exists in order to have a function to place a breakpoint on: +void on_nvq_assert(void) +{ + (void)NULL; +} + +//////////////////////////////////////////////////////////////////////////////// +// Basic start-stop test + +typedef struct basic_start_stop_args +{ + int value_to_write; + int *where_to_write; +} basic_start_stop_args_t; + +static void _basic_start_stop_callback(void *args) +{ + basic_start_stop_args_t *start_stop_args = (basic_start_stop_args_t*)args; + + *start_stop_args->where_to_write = start_stop_args->value_to_write; +} + +static int _basic_start_stop_test(void) +{ + int i, was_scheduled; + int result = 0; + nv_kthread_q_item_t q_item[NUM_Q_ITEMS_IN_BASIC_TEST]; + int callback_values_written[NUM_Q_ITEMS_IN_BASIC_TEST]; + basic_start_stop_args_t start_stop_args[NUM_Q_ITEMS_IN_BASIC_TEST]; + nv_kthread_q_t local_q; + + // Do a redudant stop to ensure stop is supported on zero initialized memory + // No crash should occur + memset(&local_q, 0, sizeof(nv_kthread_q_t)); + nv_kthread_q_stop(&local_q); + + // Do a quick start-stop cycle first: + result = nv_kthread_q_init(&local_q, "q_to_stop"); + TEST_CHECK_RET(result == 0); + nv_kthread_q_stop(&local_q); + + // call another q_stop and it shouldn't crash and should return fine + nv_kthread_q_stop(&local_q); + + memset(&start_stop_args, 0, sizeof(start_stop_args)); + memset(callback_values_written, 0, sizeof(callback_values_written)); + + // All the callback arguments point to the same nv_kthread_q: + for (i = 0; i < NUM_Q_ITEMS_IN_BASIC_TEST; ++i) { + start_stop_args[i].value_to_write = i; + start_stop_args[i].where_to_write = &callback_values_written[i]; + } + + result = nv_kthread_q_init(&local_q, "basic_q"); + TEST_CHECK_RET(result == 0); + + // Launch 3 items, then flush the queue. + // + // Each iteration sends a different instance of args to the callback + // function. + for (i = 0; i < 3; ++i) { + nv_kthread_q_item_init(&q_item[i], + _basic_start_stop_callback, + &start_stop_args[i]); + + was_scheduled = nv_kthread_q_schedule_q_item(&local_q, &q_item[i]); + result |= (!was_scheduled); + } + + // It is legal to flush more than once, so flush twice in a row: + nv_kthread_q_flush(&local_q); + nv_kthread_q_flush(&local_q); + + // Launch the remaining items, then stop (which flushes) the queue: + for (i = 3; i < NUM_Q_ITEMS_IN_BASIC_TEST; ++i) { + nv_kthread_q_item_init(&q_item[i], + _basic_start_stop_callback, + &start_stop_args[i]); + + was_scheduled = nv_kthread_q_schedule_q_item(&local_q, &q_item[i]); + result |= (!was_scheduled); + } + + nv_kthread_q_stop(&local_q); + + // Verify that all the callbacks ran and wrote their values: + for (i = 0; i < NUM_Q_ITEMS_IN_BASIC_TEST; ++i) + TEST_CHECK_RET(callback_values_written[i] == i); + + return result; +} + +//////////////////////////////////////////////////////////////////////////////// +// Multithreaded test + +typedef struct multithread_args +{ + nv_kthread_q_t *test_q; + atomic_t *test_wide_accumulator; + atomic_t per_thread_accumulator; +} multithread_args_t; + +static void _multithread_callback(void *args) +{ + multithread_args_t *multithread_args = (multithread_args_t*)(args); + atomic_inc(multithread_args->test_wide_accumulator); + atomic_inc(&multithread_args->per_thread_accumulator); +} + +// +// Return values: +// 0: Success +// -ENOMEM: vmalloc failed +// -EINVAL: test failed due to mismatched accumulator counts +// +static int _multithreaded_q_kthread_function(void *args) +{ + int i, was_scheduled; + int result = 0; + int per_thread_count; + int test_wide_count; + multithread_args_t *multithread_args = (multithread_args_t*)args; + nv_kthread_q_item_t *q_items; + size_t alloc_size = NUM_TEST_Q_ITEMS * sizeof(nv_kthread_q_item_t); + + q_items = vmalloc(alloc_size); + if (!q_items) { + result = -ENOMEM; + goto done; + } + + memset(q_items, 0, alloc_size); + + for (i = 0; i < NUM_TEST_Q_ITEMS; ++i) { + nv_kthread_q_item_init(&q_items[i], + _multithread_callback, + multithread_args); + + was_scheduled = nv_kthread_q_schedule_q_item(multithread_args->test_q, + &q_items[i]); + result |= (!was_scheduled); + } + + nv_kthread_q_flush(multithread_args->test_q); + + per_thread_count = atomic_read(&multithread_args->per_thread_accumulator); + if (per_thread_count != NUM_TEST_Q_ITEMS) { + NVQ_TEST_PRINT("per_thread_count: Expected: %d, actual: %d\n", + NUM_TEST_Q_ITEMS, per_thread_count); + goto done; + } + + test_wide_count = atomic_read(multithread_args->test_wide_accumulator); + if (test_wide_count < NUM_TEST_Q_ITEMS) { + NVQ_TEST_PRINT("test_wide_count: Expected: >= %d, actual: %d\n", + NUM_TEST_Q_ITEMS, test_wide_count); + goto done; + } + +done: + if (q_items) + vfree(q_items); + + while (!kthread_should_stop()) + schedule(); + + return result; +} + +static int _multithreaded_q_test(void) +{ + int i, j; + int result = 0; + struct task_struct *kthreads[NUM_TEST_KTHREADS]; + multithread_args_t multithread_args[NUM_TEST_KTHREADS]; + nv_kthread_q_t local_q; + atomic_t local_accumulator; + + memset(multithread_args, 0, sizeof(multithread_args)); + memset(kthreads, 0, sizeof(kthreads)); + atomic_set(&local_accumulator, 0); + + result = nv_kthread_q_init(&local_q, "multithread_test_q"); + TEST_CHECK_RET(result == 0); + + for (i = 0; i < NUM_TEST_KTHREADS; ++i) { + multithread_args[i].test_q = &local_q; + multithread_args[i].test_wide_accumulator = &local_accumulator; + + kthreads[i] = kthread_run(_multithreaded_q_kthread_function, + &multithread_args[i], + "nvq_test_kthread"); + + if (IS_ERR(kthreads[i])) + goto failed; + } + + // Stop all of the test kthreads, then stop the queue. Collect any + // non-zero (failure) return values from the kthreads, and use those + // later to report a test failure. + for (i = 0; i < NUM_TEST_KTHREADS; ++i) { + result |= kthread_stop(kthreads[i]); + } + + nv_kthread_q_stop(&local_q); + + TEST_CHECK_RET(atomic_read(&local_accumulator) == + NUM_Q_ITEMS_IN_MULTITHREAD_TEST); + return result; + +failed: + NVQ_TEST_PRINT("kthread_run[%d] failed: errno: %ld\n", + i, PTR_ERR(kthreads[i])); + + // Stop any threads that had successfully started: + for (j = 0; j < i; ++j) + kthread_stop(kthreads[j]); + + nv_kthread_q_stop(&local_q); + return -1; +} + +//////////////////////////////////////////////////////////////////////////////// +// Self-rescheduling test + +typedef struct resched_args +{ + nv_kthread_q_t test_q; + nv_kthread_q_item_t q_item; + atomic_t accumulator; + atomic_t stop_rescheduling_callbacks; + int test_failure; +} resched_args_t; + +static void _reschedule_callback(void *args) +{ + int was_scheduled; + resched_args_t *resched_args = (resched_args_t*)args; + + // This test promises to add one to accumulator, for each time through. + atomic_inc(&resched_args->accumulator); + + if (atomic_read(&resched_args->stop_rescheduling_callbacks) == 0) { + nv_kthread_q_item_init(&resched_args->q_item, + _reschedule_callback, + resched_args); + + was_scheduled = nv_kthread_q_schedule_q_item(&resched_args->test_q, + &resched_args->q_item); + if (!was_scheduled) { + resched_args->test_failure = 1; + } + } + + // Ensure thread relinquishes control else we hang in single-core environments + schedule(); +} + +// Verify that re-scheduling the same q_item, from within its own +// callback, works. +static int _reschedule_same_item_from_its_own_callback_test(void) +{ + int was_scheduled; + int result = 0; + resched_args_t resched_args; + + memset(&resched_args, 0, sizeof(resched_args)); + + result = nv_kthread_q_init(&resched_args.test_q, "resched_test_q"); + TEST_CHECK_RET(result == 0); + + nv_kthread_q_item_init(&resched_args.q_item, + _reschedule_callback, + &resched_args); + + was_scheduled = nv_kthread_q_schedule_q_item(&resched_args.test_q, + &resched_args.q_item); + result |= (!was_scheduled); + + // Wait for a few callback items to run + while(atomic_read(&resched_args.accumulator) < NUM_RESCHEDULE_CALLBACKS) + schedule(); + + // Stop the callbacks from rescheduling themselves. This requires two + // levels of flushing: one flush to wait for any callbacks that missed + // the .stop_rescheduling_callbacks change, and another for any pending + // callbacks that were scheduled from within the callback. + atomic_set(&resched_args.stop_rescheduling_callbacks, 1); + + // Stop the queue. This is guaranteed to do a (double) flush, and that + // flush takes care of any pending callbacks that we rescheduled from + // within the callback function. + nv_kthread_q_stop(&resched_args.test_q); + + return (result || resched_args.test_failure); +} + +//////////////////////////////////////////////////////////////////////////////// +// Rescheduling the exact same q_item test + +typedef struct same_q_item_args +{ + atomic_t test_accumulator; +} same_q_item_args_t; + +static void _same_q_item_callback(void *args) +{ + same_q_item_args_t *same_q_item_args = (same_q_item_args_t*)(args); + atomic_inc(&same_q_item_args->test_accumulator); +} + +static int _same_q_item_test(void) +{ + int result, i; + int num_scheduled = 0; + same_q_item_args_t same_q_item_args; + nv_kthread_q_t local_q; + nv_kthread_q_item_t q_item; + + memset(&same_q_item_args, 0, sizeof(same_q_item_args)); + + result = nv_kthread_q_init(&local_q, "same_q_item_test_q"); + TEST_CHECK_RET(result == 0); + + nv_kthread_q_item_init(&q_item, + _same_q_item_callback, + &same_q_item_args); + + // Attempt to queue up many copies of the same q_item, then stop the queue. + // This is an attempt to launch enough q_items that at least some of them + // end up being pending in the queue, and exercise the "if already pending" + // logic. + // + // Some manual testing indicates that launching 1000 q_items in a tight loop + // causes between 1 and 20 copies to run. Obviously this is extremely + // dependent on the particular test machine and kernel and more, but it + // shows that 1000 is not unreasonable. + for (i = 0; i < 1000; ++i) + num_scheduled += nv_kthread_q_schedule_q_item(&local_q, &q_item); + + nv_kthread_q_stop(&local_q); + + // At least one item will have run, but not necessarily any more than that. + TEST_CHECK_RET(atomic_read(&same_q_item_args.test_accumulator) >= 1); + TEST_CHECK_RET(atomic_read(&same_q_item_args.test_accumulator) == num_scheduled); + + return 0; +} + +// Returns true if any of the stack pages are not resident on the indicated node. +static bool stack_mismatch(const struct task_struct *thread, int preferred_node) +{ + unsigned num_stack_pages, i; + char* stack = (char*) thread->stack; + + // If the stack has not been allocated using vmalloc, the physical pages + // are all on the same node, so just check the first page + if (!is_vmalloc_addr(stack)) { + struct page *page = virt_to_page(stack); + int node = page_to_nid(page); + + return node != preferred_node; + } + + num_stack_pages = THREAD_SIZE >> PAGE_SHIFT; + + // The physical pages backing the stack may be discontiguous, so check them + // all. + for (i = 0; i < num_stack_pages; i++) { + char *curr_stack_page = stack + i * PAGE_SIZE; + struct page *page = vmalloc_to_page(curr_stack_page); + int node = page_to_nid(page); + + if (node != preferred_node) + return true; + } + + return false; +} + +static void _check_cpu_affinity_callback(void *args) +{ + struct task_struct *thread = get_current(); + int *preferred_node = (int *) args; + int *ret = preferred_node + 1; + + *ret = stack_mismatch(thread, *preferred_node); +} + +// Verify that the stack of the kernel thread created by +// nv_kthread_q_init_on_node is resident on the specified NUMA node. +// +// nv_kthread_q_init_on_node does not guarantee that the thread's stack +// will be resident on the passed node, but in practice the preference is mostly +// honored so we invoke the function multiple times and allow a percentage of +// failures per node. +static int _check_cpu_affinity_test(void) +{ + int result, node; + nv_kthread_q_t local_q; + + // If the API does not support CPU affinity, check whether the correct + // error code is returned. + // Non-affinitized queue allocation has been verified by previous test + // so just ensure that the affinitized version also works. + if (!NV_KTHREAD_Q_SUPPORTS_AFFINITY()) { + result = nv_kthread_q_init_on_node(&local_q, "should_fail", 0); + TEST_CHECK_RET(result == -ENOTSUPP); + return 0; + } + + for_each_online_node(node) { + unsigned i; + const unsigned max_i = 100; + unsigned stack_mismatches = 0; + + // Allow up to 20% of the stacks to be resident on a node different from + // the one requested. + const int alloc_mismatch_percentage = 20; + + // Only test on CPU nodes which have memory + if (!nv_numa_node_has_memory(node) || !node_state(node, N_CPU)) + continue; + + for (i = 0; i < max_i; i++) { + unsigned j; + int thread_args[2]; + nv_kthread_q_item_t q_item; + char q_name[64]; + + nv_kthread_q_item_init(&q_item, _check_cpu_affinity_callback, thread_args); + snprintf(q_name, sizeof(q_name), "test_q_%d", node); + result = nv_kthread_q_init_on_node(&local_q, q_name, node); + TEST_CHECK_RET(result == 0); + + // The second entry contains the value returned by the callback: + // 0 if no mismatch found, and 1 otherwise. + thread_args[0] = node; + thread_args[1] = 0; + + // Run several iterations to ensure that the thread's stack does + // not migrate after initialization. + for (j = 0; j < 25; j++) { + result = nv_kthread_q_schedule_q_item(&local_q, &q_item); + + // nv_kthread_q_schedule_q_item() returns non-zero value if the + // item was successfully scheduled. + if (result == 0) { + nv_kthread_q_stop(&local_q); + TEST_CHECK_RET(false); + } + + nv_kthread_q_flush(&local_q); + + // Count as failure if any of the stack pages is resident on a + // another node on any iteration. + if (thread_args[1] == 1) { + stack_mismatches++; + break; + } + } + + nv_kthread_q_stop(&local_q); + + if ((100 * stack_mismatches / max_i) > alloc_mismatch_percentage) + TEST_CHECK_RET(false); + } + } + return 0; +} + +//////////////////////////////////////////////////////////////////////////////// +// Top-level test entry point + +int nv_kthread_q_run_self_test(void) +{ + int result; + + result = _basic_start_stop_test(); + TEST_CHECK_RET(result == 0); + + result = _reschedule_same_item_from_its_own_callback_test(); + TEST_CHECK_RET(result == 0); + + result = _multithreaded_q_test(); + TEST_CHECK_RET(result == 0); + + result = _same_q_item_test(); + TEST_CHECK_RET(result == 0); + + result = _check_cpu_affinity_test(); + TEST_CHECK_RET(result == 0); + + return 0; +} diff --git a/kernel-open/nvidia-uvm/nv-kthread-q.c b/kernel-open/nvidia-uvm/nv-kthread-q.c new file mode 100644 index 000000000..5a95f4a40 --- /dev/null +++ b/kernel-open/nvidia-uvm/nv-kthread-q.c @@ -0,0 +1,335 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kthread-q.h" +#include "nv-list-helpers.h" + +#include +#include +#include +#include +#include + +#if defined(NV_LINUX_BUG_H_PRESENT) + #include +#else + #include +#endif + +// Today's implementation is a little simpler and more limited than the +// API description allows for in nv-kthread-q.h. Details include: +// +// 1. Each nv_kthread_q instance is a first-in, first-out queue. +// +// 2. Each nv_kthread_q instance is serviced by exactly one kthread. +// +// You can create any number of queues, each of which gets its own +// named kernel thread (kthread). You can then insert arbitrary functions +// into the queue, and those functions will be run in the context of the +// queue's kthread. + +#ifndef WARN + // Only *really* old kernels (2.6.9) end up here. Just use a simple printk + // to implement this, because such kernels won't be supported much longer. + #define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + printk(KERN_ERR format); \ + unlikely(__ret_warn_on); \ + }) +#endif + +#define NVQ_WARN(fmt, ...) \ + do { \ + if (in_interrupt()) { \ + WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \ + ##__VA_ARGS__); \ + } \ + else { \ + WARN(1, "nv_kthread_q: task: %s: " fmt, \ + current->comm, \ + ##__VA_ARGS__); \ + } \ + } while (0) + +static int _main_loop(void *args) +{ + nv_kthread_q_t *q = (nv_kthread_q_t *)args; + nv_kthread_q_item_t *q_item = NULL; + unsigned long flags; + + while (1) { + // Normally this thread is never interrupted. However, + // down_interruptible (instead of down) is called here, + // in order to avoid being classified as a potentially + // hung task, by the kernel watchdog. + while (down_interruptible(&q->q_sem)) + NVQ_WARN("Interrupted during semaphore wait\n"); + + if (atomic_read(&q->main_loop_should_exit)) + break; + + spin_lock_irqsave(&q->q_lock, flags); + + // The q_sem semaphore prevents us from getting here unless there is + // at least one item in the list, so an empty list indicates a bug. + if (unlikely(list_empty(&q->q_list_head))) { + spin_unlock_irqrestore(&q->q_lock, flags); + NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q); + continue; + } + + // Consume one item from the queue + q_item = list_first_entry(&q->q_list_head, + nv_kthread_q_item_t, + q_list_node); + + list_del_init(&q_item->q_list_node); + + spin_unlock_irqrestore(&q->q_lock, flags); + + // Run the item + q_item->function_to_run(q_item->function_args); + + // Make debugging a little simpler by clearing this between runs: + q_item = NULL; + } + + while (!kthread_should_stop()) + schedule(); + + return 0; +} + +void nv_kthread_q_stop(nv_kthread_q_t *q) +{ + // check if queue has been properly initialized + if (unlikely(!q->q_kthread)) + return; + + nv_kthread_q_flush(q); + + // If this assertion fires, then a caller likely either broke the API rules, + // by adding items after calling nv_kthread_q_stop, or possibly messed up + // with inadequate flushing of self-rescheduling q_items. + if (unlikely(!list_empty(&q->q_list_head))) + NVQ_WARN("list not empty after flushing\n"); + + if (likely(!atomic_read(&q->main_loop_should_exit))) { + + atomic_set(&q->main_loop_should_exit, 1); + + // Wake up the kthread so that it can see that it needs to stop: + up(&q->q_sem); + + kthread_stop(q->q_kthread); + q->q_kthread = NULL; + } +} + +// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by +// kthread_create_on_node relies on a 2 entry, per-core cache to minimize +// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the +// stack location ends up being a function of the core assigned to the current +// thread, instead of being a function of the specified NUMA node. The cache was +// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0 +// ("fork: Optimize task creation by caching two thread stacks per CPU if +// CONFIG_VMAP_STACK=y") +// +// To work around the problematic cache, we create up to three kernel threads +// -If the first thread's stack is resident on the preferred node, return this +// thread. +// -Otherwise, create a second thread. If its stack is resident on the +// preferred node, stop the first thread and return this one. +// -Otherwise, create a third thread. The stack allocator does not find a +// cached stack, and so falls back to vmalloc, which takes the NUMA hint into +// consideration. The first two threads are then stopped. +// +// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned. +// +// This function is never invoked when there is no NUMA preference (preferred +// node is NUMA_NO_NODE). +#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 +static struct task_struct *thread_create_on_node(int (*threadfn)(void *data), + nv_kthread_q_t *q, + int preferred_node, + const char *q_name) +{ + + unsigned i, j; + const static unsigned attempts = 3; + struct task_struct *thread[3]; + + for (i = 0;; i++) { + struct page *stack; + + thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name); + + if (unlikely(IS_ERR(thread[i]))) { + + // Instead of failing, pick the previous thread, even if its + // stack is not allocated on the preferred node. + if (i > 0) + i--; + + break; + } + + // vmalloc is not used to allocate the stack, so simply return the + // thread, even if its stack may not be allocated on the preferred node + if (!is_vmalloc_addr(thread[i]->stack)) + break; + + // Ran out of attempts - return thread even if its stack may not be + // allocated on the preferred node + if ((i == (attempts - 1))) + break; + + // Get the NUMA node where the first page of the stack is resident. If + // it is the preferred node, select this thread. + stack = vmalloc_to_page(thread[i]->stack); + if (page_to_nid(stack) == preferred_node) + break; + } + + for (j = i; j > 0; j--) + kthread_stop(thread[j - 1]); + + return thread[i]; +} +#endif + +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node) +{ + memset(q, 0, sizeof(*q)); + + INIT_LIST_HEAD(&q->q_list_head); + spin_lock_init(&q->q_lock); + sema_init(&q->q_sem, 0); + + if (preferred_node == NV_KTHREAD_NO_NODE) { + q->q_kthread = kthread_create(_main_loop, q, q_name); + } + else { +#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 + q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name); +#else + return -ENOTSUPP; +#endif + } + + if (IS_ERR(q->q_kthread)) { + int err = PTR_ERR(q->q_kthread); + + // Clear q_kthread before returning so that nv_kthread_q_stop() can be + // safely called on it making error handling easier. + q->q_kthread = NULL; + + return err; + } + + wake_up_process(q->q_kthread); + + return 0; +} + +// Returns true (non-zero) if the item was actually scheduled, and false if the +// item was already pending in a queue. +static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&q->q_lock, flags); + + if (likely(list_empty(&q_item->q_list_node))) + list_add_tail(&q_item->q_list_node, &q->q_list_head); + else + ret = 0; + + spin_unlock_irqrestore(&q->q_lock, flags); + + if (likely(ret)) + up(&q->q_sem); + + return ret; +} + +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args) +{ + INIT_LIST_HEAD(&q_item->q_list_node); + q_item->function_to_run = function_to_run; + q_item->function_args = function_args; +} + +// Returns true (non-zero) if the q_item got scheduled, false otherwise. +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was " + "called with a non-alive q: 0x%p\n", q); + return 0; + } + + return _raw_q_schedule(q, q_item); +} + +static void _q_flush_function(void *args) +{ + struct completion *completion = (struct completion *)args; + complete(completion); +} + + +static void _raw_q_flush(nv_kthread_q_t *q) +{ + nv_kthread_q_item_t q_item; + DECLARE_COMPLETION(completion); + + nv_kthread_q_item_init(&q_item, _q_flush_function, &completion); + + _raw_q_schedule(q, &q_item); + + // Wait for the flush item to run. Once it has run, then all of the + // previously queued items in front of it will have run, so that means + // the flush is complete. + wait_for_completion(&completion); +} + +void nv_kthread_q_flush(nv_kthread_q_t *q) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_flush was called after " + "nv_kthread_q_stop. q: 0x%p\n", q); + return; + } + + // This 2x flush is not a typing mistake. The queue really does have to be + // flushed twice, in order to take care of the case of a q_item that + // reschedules itself. + _raw_q_flush(q); + _raw_q_flush(q); +} diff --git a/kernel-open/nvidia-uvm/nvCpuUuid.c b/kernel-open/nvidia-uvm/nvCpuUuid.c new file mode 100644 index 000000000..f2e102571 --- /dev/null +++ b/kernel-open/nvidia-uvm/nvCpuUuid.c @@ -0,0 +1,34 @@ +/******************************************************************************* + Copyright (c) 2015-2018 NVidia Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "nvtypes.h" +#include "nvCpuUuid.h" + +const NvProcessorUuid NV_PROCESSOR_UUID_CPU_DEFAULT = +{ + { + // Produced via uuidgen(1): 73772a14-2c41-4750-a27b-d4d74e0f5ea6: + 0xa6, 0x5e, 0x0f, 0x4e, 0xd7, 0xd4, 0x7b, 0xa2, + 0x50, 0x47, 0x41, 0x2c, 0x14, 0x2a, 0x77, 0x73 + } +}; + diff --git a/kernel-open/nvidia-uvm/nvidia-uvm-sources.Kbuild b/kernel-open/nvidia-uvm/nvidia-uvm-sources.Kbuild new file mode 100644 index 000000000..f8666478f --- /dev/null +++ b/kernel-open/nvidia-uvm/nvidia-uvm-sources.Kbuild @@ -0,0 +1,113 @@ +NVIDIA_UVM_SOURCES ?= +NVIDIA_UVM_SOURCES_CXX ?= + +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_common.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_linux.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_debug_optimized.c +NVIDIA_UVM_SOURCES += nvidia-uvm/nvstatus.c +NVIDIA_UVM_SOURCES += nvidia-uvm/nvCpuUuid.c +NVIDIA_UVM_SOURCES += nvidia-uvm/nv-kthread-q.c +NVIDIA_UVM_SOURCES += nvidia-uvm/nv-kthread-q-selftest.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_tools.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_global.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_gpu.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_gpu_isr.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_procfs.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_space.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_space_mm.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_gpu_semaphore.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_mem.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rm_mem.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_channel.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_lock.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hal.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rb_tree.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_allocator.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_range.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_policy.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_block.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_group.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_gpu_replayable_faults.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_gpu_non_replayable_faults.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_gpu_access_counters.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_events.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_module.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_mmu.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pte_batch.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_tlb_batch.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_push.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pushbuffer.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_thread_context.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_tracker.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_host.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_ce.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_mmu.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_fault_buffer.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_maxwell_access_counter_buffer.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pascal.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pascal_ce.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pascal_host.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pascal_mmu.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pascal_fault_buffer.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_host.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_mmu.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_fault_buffer.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_volta_access_counter_buffer.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_access_counter_buffer.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_fault_buffer.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_mmu.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_turing_host.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_ce.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_host.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ampere_mmu.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_policy.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_utils.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_kvmalloc.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pmm_sysmem.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pmm_gpu.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_migrate.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_populate_pageable.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_migrate_pageable.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_map_external.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_user_channel.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hmm.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_heuristics.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_thrashing.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_prefetch.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_ibm.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ats_faults.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_test_rng.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_tree_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_allocator_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_gpu_semaphore_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_hmm_sanity_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_mem_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rm_mem_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_page_tree_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_tracker_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_push_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_channel_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_ce_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_host_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_lock_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_utils_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_kvmalloc_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pmm_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_pmm_sysmem_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_events_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_perf_module_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_get_rm_ptes_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_fault_buffer_flush_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_peer_identity_mappings_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_va_block_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_range_group_tree_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_thread_context_test.c +NVIDIA_UVM_SOURCES += nvidia-uvm/uvm_rb_tree_test.c diff --git a/kernel-open/nvidia-uvm/nvidia-uvm.Kbuild b/kernel-open/nvidia-uvm/nvidia-uvm.Kbuild new file mode 100644 index 000000000..e1af6db88 --- /dev/null +++ b/kernel-open/nvidia-uvm/nvidia-uvm.Kbuild @@ -0,0 +1,112 @@ +########################################################################### +# Kbuild fragment for nvidia-uvm.ko +########################################################################### + +UVM_BUILD_TYPE = release + +# +# Define NVIDIA_UVM_{SOURCES,OBJECTS} +# + +NVIDIA_UVM_OBJECTS = + +include $(src)/nvidia-uvm/nvidia-uvm-sources.Kbuild +NVIDIA_UVM_OBJECTS += $(patsubst %.c,%.o,$(NVIDIA_UVM_SOURCES)) + +# Some linux kernel functions rely on being built with optimizations on and +# to work around this we put wrappers for them in a separate file that's built +# with optimizations on in debug builds and skipped in other builds. +# Notably gcc 4.4 supports per function optimization attributes that would be +# easier to use, but is too recent to rely on for now. +NVIDIA_UVM_DEBUG_OPTIMIZED_SOURCE := nvidia-uvm/uvm_debug_optimized.c +NVIDIA_UVM_DEBUG_OPTIMIZED_OBJECT := $(patsubst %.c,%.o,$(NVIDIA_UVM_DEBUG_OPTIMIZED_SOURCE)) + +ifneq ($(UVM_BUILD_TYPE),debug) + # Only build the wrappers on debug builds + NVIDIA_UVM_OBJECTS := $(filter-out $(NVIDIA_UVM_DEBUG_OPTIMIZED_OBJECT), $(NVIDIA_UVM_OBJECTS)) +endif + +obj-m += nvidia-uvm.o +nvidia-uvm-y := $(NVIDIA_UVM_OBJECTS) + +NVIDIA_UVM_KO = nvidia-uvm/nvidia-uvm.ko + +# +# Define nvidia-uvm.ko-specific CFLAGS. +# + +ifeq ($(UVM_BUILD_TYPE),debug) + NVIDIA_UVM_CFLAGS += -DDEBUG $(call cc-option,-Og,-O0) -g +else + ifeq ($(UVM_BUILD_TYPE),develop) + # -DDEBUG is required, in order to allow pr_devel() print statements to + # work: + NVIDIA_UVM_CFLAGS += -DDEBUG + NVIDIA_UVM_CFLAGS += -DNVIDIA_UVM_DEVELOP + endif + NVIDIA_UVM_CFLAGS += -O2 +endif + +NVIDIA_UVM_CFLAGS += -DNVIDIA_UVM_ENABLED +NVIDIA_UVM_CFLAGS += -DNVIDIA_UNDEF_LEGACY_BIT_MACROS + +NVIDIA_UVM_CFLAGS += -DLinux +NVIDIA_UVM_CFLAGS += -D__linux__ +NVIDIA_UVM_CFLAGS += -I$(src)/nvidia-uvm + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_UVM_OBJECTS), $(NVIDIA_UVM_CFLAGS)) + +ifeq ($(UVM_BUILD_TYPE),debug) + # Force optimizations on for the wrappers + $(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_UVM_DEBUG_OPTIMIZED_OBJECT), $(NVIDIA_UVM_CFLAGS) -O2) +endif + +# +# Register the conftests needed by nvidia-uvm.ko +# + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_UVM_OBJECTS) + +NV_CONFTEST_FUNCTION_COMPILE_TESTS += address_space_init_once +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vzalloc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += wait_on_bit_lock_argument_count +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data +NV_CONFTEST_FUNCTION_COMPILE_TESTS += proc_remove +NV_CONFTEST_FUNCTION_COMPILE_TESTS += bitmap_clear +NV_CONFTEST_FUNCTION_COMPILE_TESTS += usleep_range +NV_CONFTEST_FUNCTION_COMPILE_TESTS += radix_tree_empty +NV_CONFTEST_FUNCTION_COMPILE_TESTS += radix_tree_replace_slot +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_npu2_init_context +NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_create_on_node +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpumask_of_node +NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first +NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_bus_address +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioasid_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += migrate_vma_setup + +NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations +NV_CONFTEST_TYPE_COMPILE_TESTS += kuid_t +NV_CONFTEST_TYPE_COMPILE_TESTS += address_space +NV_CONFTEST_TYPE_COMPILE_TESTS += backing_dev_info +NV_CONFTEST_TYPE_COMPILE_TESTS += mm_context_t +NV_CONFTEST_TYPE_COMPILE_TESTS += get_user_pages_remote +NV_CONFTEST_TYPE_COMPILE_TESTS += get_user_pages +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += node_states_n_memory +NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work +NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t +NV_CONFTEST_TYPE_COMPILE_TESTS += mmu_notifier_ops_invalidate_range +NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64 +NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock +NV_CONFTEST_TYPE_COMPILE_TESTS += migrate_vma_added_flags +NV_CONFTEST_TYPE_COMPILE_TESTS += make_device_exclusive_range + +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_int_active_memcg diff --git a/kernel-open/nvidia-uvm/nvstatus.c b/kernel-open/nvidia-uvm/nvstatus.c new file mode 100644 index 000000000..e377dd9f9 --- /dev/null +++ b/kernel-open/nvidia-uvm/nvstatus.c @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" + +#if !defined(NV_PRINTF_STRING_SECTION) +#if defined(NVRM) && NVCPU_IS_RISCV64 +#define NV_PRINTF_STRING_SECTION __attribute__ ((section (".logging"))) +#else // defined(NVRM) && NVCPU_IS_RISCV64 +#define NV_PRINTF_STRING_SECTION +#endif // defined(NVRM) && NVCPU_IS_RISCV64 +#endif // !defined(NV_PRINTF_STRING_SECTION) + +/* + * Include nvstatuscodes.h twice. Once for creating constant strings in the + * the NV_PRINTF_STRING_SECTION section of the ececutable, and once to build + * the g_StatusCodeList table. + */ +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) static NV_PRINTF_STRING_SECTION \ + const char rm_pvt_##name##_str[] = string " [" #name "]"; +#include "nvstatuscodes.h" + +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) { name, rm_pvt_##name##_str }, +static struct NvStatusCodeString +{ + NV_STATUS statusCode; + const char *statusString; +} g_StatusCodeList[] = { + #include "nvstatuscodes.h" + { 0xffffffff, "Unknown error code!" } // Some compilers don't like the trailing ',' +}; +#undef NV_STATUS_CODE + +/*! + * @brief Given an NV_STATUS code, returns the corresponding status string. + * + * @param[in] nvStatusIn NV_STATUS code for which the string is required + * + * @returns Corresponding status string from the nvstatuscodes.h + * + * TODO: Bug 200025711: convert this to an array-indexed lookup, instead of a linear search + * +*/ +const char *nvstatusToString(NV_STATUS nvStatusIn) +{ + static NV_PRINTF_STRING_SECTION const char rm_pvt_UNKNOWN_str[] = "Unknown error code!"; + NvU32 i; + NvU32 n = ((NvU32)(sizeof(g_StatusCodeList))/(NvU32)(sizeof(g_StatusCodeList[0]))); + for (i = 0; i < n; i++) + { + if (g_StatusCodeList[i].statusCode == nvStatusIn) + { + return g_StatusCodeList[i].statusString; + } + } + + return rm_pvt_UNKNOWN_str; +} diff --git a/kernel-open/nvidia-uvm/uvm.c b/kernel-open/nvidia-uvm/uvm.c new file mode 100644 index 000000000..476c12548 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm.c @@ -0,0 +1,1149 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_api.h" +#include "uvm_global.h" +#include "uvm_gpu_replayable_faults.h" +#include "uvm_tools_init.h" +#include "uvm_lock.h" +#include "uvm_test.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" +#include "uvm_va_block.h" +#include "uvm_tools.h" +#include "uvm_common.h" +#include "uvm_linux_ioctl.h" +#include "uvm_hmm.h" +#include "uvm_mem.h" + +#define NVIDIA_UVM_DEVICE_NAME "nvidia-uvm" + +static dev_t g_uvm_base_dev; +static struct cdev g_uvm_cdev; + +// List of fault service contexts for CPU faults +static LIST_HEAD(g_cpu_service_block_context_list); + +static uvm_spinlock_t g_cpu_service_block_context_list_lock; + +NV_STATUS uvm_service_block_context_init(void) +{ + unsigned num_preallocated_contexts = 4; + + uvm_spin_lock_init(&g_cpu_service_block_context_list_lock, UVM_LOCK_ORDER_LEAF); + + // Pre-allocate some fault service contexts for the CPU and add them to the global list + while (num_preallocated_contexts-- > 0) { + uvm_service_block_context_t *service_context = uvm_kvmalloc(sizeof(*service_context)); + if (!service_context) + return NV_ERR_NO_MEMORY; + + list_add(&service_context->cpu_fault.service_context_list, &g_cpu_service_block_context_list); + } + + return NV_OK; +} + +void uvm_service_block_context_exit(void) +{ + uvm_service_block_context_t *service_context, *service_context_tmp; + + // Free fault service contexts for the CPU and add clear the global list + list_for_each_entry_safe(service_context, service_context_tmp, &g_cpu_service_block_context_list, + cpu_fault.service_context_list) { + uvm_kvfree(service_context); + } + INIT_LIST_HEAD(&g_cpu_service_block_context_list); +} + +// Get a fault service context from the global list or allocate a new one if there are no +// available entries +static uvm_service_block_context_t *uvm_service_block_context_cpu_alloc(void) +{ + uvm_service_block_context_t *service_context; + + uvm_spin_lock(&g_cpu_service_block_context_list_lock); + + service_context = list_first_entry_or_null(&g_cpu_service_block_context_list, uvm_service_block_context_t, + cpu_fault.service_context_list); + + if (service_context) + list_del(&service_context->cpu_fault.service_context_list); + + uvm_spin_unlock(&g_cpu_service_block_context_list_lock); + + if (!service_context) + service_context = uvm_kvmalloc(sizeof(*service_context)); + + return service_context; +} + +// Put a fault service context in the global list +static void uvm_service_block_context_cpu_free(uvm_service_block_context_t *service_context) +{ + uvm_spin_lock(&g_cpu_service_block_context_list_lock); + + list_add(&service_context->cpu_fault.service_context_list, &g_cpu_service_block_context_list); + + uvm_spin_unlock(&g_cpu_service_block_context_list_lock); +} + +static int uvm_open(struct inode *inode, struct file *filp) +{ + NV_STATUS status = uvm_global_get_status(); + + if (status == NV_OK) { + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + status = uvm_va_space_create(inode, filp); + + uvm_up_read(&g_uvm_global.pm.lock); + } + + return -nv_status_to_errno(status); +} + +static int uvm_open_entry(struct inode *inode, struct file *filp) +{ + UVM_ENTRY_RET(uvm_open(inode, filp)); +} + +static void uvm_release_deferred(void *data) +{ + uvm_va_space_t *va_space = data; + + // Since this function is only scheduled to run when uvm_release() fails + // to trylock-acquire the pm.lock, the following acquisition attempt + // is expected to block this thread, and cause it to remain blocked until + // uvm_resume() releases the lock. As a result, the deferred release + // kthread queue may stall for long periods of time. + uvm_down_read(&g_uvm_global.pm.lock); + + uvm_va_space_destroy(va_space); + + uvm_up_read(&g_uvm_global.pm.lock); +} + +static int uvm_release(struct inode *inode, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + int ret; + + filp->private_data = NULL; + filp->f_mapping = NULL; + + // Because the kernel discards the status code returned from this release + // callback, early exit in case of a pm.lock acquisition failure is not + // an option. Instead, the teardown work normally performed synchronously + // needs to be scheduled to run after uvm_resume() releases the lock. + if (uvm_down_read_trylock(&g_uvm_global.pm.lock)) { + uvm_va_space_destroy(va_space); + uvm_up_read(&g_uvm_global.pm.lock); + } + else { + // Remove references to this inode from the address_space. This isn't + // strictly necessary, as any CPU mappings of this file have already + // been destroyed, and va_space->mapping won't be used again. Still, + // the va_space survives the inode if its destruction is deferred, in + // which case the references are rendered stale. + address_space_init_once(&va_space->mapping); + + nv_kthread_q_item_init(&va_space->deferred_release_q_item, uvm_release_deferred, va_space); + ret = nv_kthread_q_schedule_q_item(&g_uvm_global.deferred_release_q, &va_space->deferred_release_q_item); + UVM_ASSERT(ret != 0); + } + + return 0; +} + +static int uvm_release_entry(struct inode *inode, struct file *filp) +{ + UVM_ENTRY_RET(uvm_release(inode, filp)); +} + +static void uvm_destroy_vma_managed(struct vm_area_struct *vma, bool make_zombie) +{ + uvm_va_range_t *va_range, *va_range_next; + NvU64 size = 0; + + uvm_assert_rwsem_locked_write(&uvm_va_space_get(vma->vm_file)->lock); + uvm_for_each_va_range_in_vma_safe(va_range, va_range_next, vma) { + // On exit_mmap (process teardown), current->mm is cleared so + // uvm_va_range_vma_current would return NULL. + UVM_ASSERT(uvm_va_range_vma(va_range) == vma); + UVM_ASSERT(va_range->node.start >= vma->vm_start); + UVM_ASSERT(va_range->node.end < vma->vm_end); + size += uvm_va_range_size(va_range); + if (make_zombie) + uvm_va_range_zombify(va_range); + else + uvm_va_range_destroy(va_range, NULL); + } + + if (vma->vm_private_data) { + uvm_vma_wrapper_destroy(vma->vm_private_data); + vma->vm_private_data = NULL; + } + UVM_ASSERT(size == vma->vm_end - vma->vm_start); +} + +static void uvm_destroy_vma_semaphore_pool(struct vm_area_struct *vma) +{ + uvm_va_space_t *va_space; + uvm_va_range_t *va_range; + + va_space = uvm_va_space_get(vma->vm_file); + uvm_assert_rwsem_locked(&va_space->lock); + va_range = uvm_va_range_find(va_space, vma->vm_start); + UVM_ASSERT(va_range && + va_range->node.start == vma->vm_start && + va_range->node.end + 1 == vma->vm_end && + va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL); + + uvm_mem_unmap_cpu_user(va_range->semaphore_pool.mem); +} + +// If a fault handler is not set, paths like handle_pte_fault in older kernels +// assume the memory is anonymous. That would make debugging this failure harder +// so we force it to fail instead. +static vm_fault_t uvm_vm_fault_sigbus(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + UVM_DBG_PRINT_RL("Fault to address 0x%lx in disabled vma\n", nv_page_fault_va(vmf)); + return VM_FAULT_SIGBUS; +} + +static vm_fault_t uvm_vm_fault_sigbus_entry(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + UVM_ENTRY_RET(uvm_vm_fault_sigbus(vma, vmf)); +} + +static vm_fault_t uvm_vm_fault_sigbus_wrapper(struct vm_fault *vmf) +{ +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + return uvm_vm_fault_sigbus(vmf->vma, vmf); +#else + return uvm_vm_fault_sigbus(NULL, vmf); +#endif +} + +static vm_fault_t uvm_vm_fault_sigbus_wrapper_entry(struct vm_fault *vmf) +{ + UVM_ENTRY_RET(uvm_vm_fault_sigbus_wrapper(vmf)); +} + +static struct vm_operations_struct uvm_vm_ops_disabled = +{ +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + .fault = uvm_vm_fault_sigbus_wrapper_entry +#else + .fault = uvm_vm_fault_sigbus_entry +#endif +}; + +static void uvm_disable_vma(struct vm_area_struct *vma) +{ + // In the case of fork, the kernel has already copied the old PTEs over to + // the child process, so an access in the child might succeed instead of + // causing a fault. To force a fault we'll unmap it directly here. + // + // Note that since the unmap works on file offset, not virtual address, this + // unmaps both the old and new vmas. + // + // In the case of a move (mremap), the kernel will copy the PTEs over later, + // so it doesn't matter if we unmap here. However, the new vma's open will + // immediately be followed by a close on the old vma. We call + // unmap_mapping_range for the close, which also unmaps the new vma because + // they have the same file offset. + unmap_mapping_range(vma->vm_file->f_mapping, + vma->vm_pgoff << PAGE_SHIFT, + vma->vm_end - vma->vm_start, + 1); + + vma->vm_ops = &uvm_vm_ops_disabled; + + if (vma->vm_private_data) { + uvm_vma_wrapper_destroy(vma->vm_private_data); + vma->vm_private_data = NULL; + } +} + +// We can't return an error from uvm_vm_open so on failed splits +// we'll disable *both* vmas. This isn't great behavior for the +// user, but we don't have many options. We could leave the old VA +// range in place but that breaks the model of vmas always +// completely covering VA ranges. We'd have to be very careful +// handling later splits and closes of both that partially-covered +// VA range, and of the vmas which might or might not cover it any +// more. +// +// A failure likely means we're in OOM territory, so this should not +// be common by any means, and the process might die anyway. +static void uvm_vm_open_failure(struct vm_area_struct *original, + struct vm_area_struct *new) +{ + uvm_va_space_t *va_space = uvm_va_space_get(new->vm_file); + static const bool make_zombie = false; + + UVM_ASSERT(va_space == uvm_va_space_get(original->vm_file)); + uvm_assert_rwsem_locked_write(&va_space->lock); + + uvm_destroy_vma_managed(original, make_zombie); + uvm_disable_vma(original); + uvm_disable_vma(new); +} + +// vm_ops->open cases: +// +// 1) Parent vma is dup'd (fork) +// This is undefined behavior in the UVM Programming Model. For convenience +// the parent will continue operating properly, but the child is not +// guaranteed access to the range. +// +// 2) Original vma is split (munmap, mprotect, mremap, mbind, etc) +// The UVM Programming Model supports mbind always and supports mprotect if +// HMM is present. Supporting either of those means all such splitting cases +// must be handled. This involves splitting the va_range covering the split +// location. Note that the kernel will never merge us back on two counts: we +// set VM_MIXEDMAP and we have a ->close callback. +// +// 3) Original vma is moved (mremap) +// This is undefined behavior in the UVM Programming Model. We'll get an open +// on the new vma in which we disable operations on the new vma, then a close +// on the old vma. +// +// Note that since we set VM_DONTEXPAND on the vma we're guaranteed that the vma +// will never increase in size, only shrink/split. +static void uvm_vm_open_managed(struct vm_area_struct *vma) +{ + uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file); + uvm_va_range_t *va_range; + struct vm_area_struct *original; + NV_STATUS status; + NvU64 new_end; + + // This is slightly ugly. We need to know the parent vma of this new one, + // but we can't use the range tree to look up the original because that + // doesn't handle a vma move operation. + // + // However, all of the old vma's fields have been copied into the new vma, + // and open of the new vma is always called before close of the old (in + // cases where close will be called immediately afterwards, like move). + // vma->vm_private_data will thus still point to the original vma that we + // set in mmap or open. + // + // Things to watch out for here: + // - For splits, the old vma hasn't been adjusted yet so its vm_start and + // vm_end region will overlap with this vma's start and end. + // + // - For splits and moves, the new vma has not yet been inserted into the + // mm's list so vma->vm_prev and vma->vm_next cannot be used, nor will + // the new vma show up in find_vma and friends. + original = ((uvm_vma_wrapper_t*)vma->vm_private_data)->vma; + vma->vm_private_data = NULL; + // On fork or move we want to simply disable the new vma + if (vma->vm_mm != original->vm_mm || + (vma->vm_start != original->vm_start && vma->vm_end != original->vm_end)) { + uvm_disable_vma(vma); + return; + } + + // At this point we are guaranteed that the mmap_lock is held in write + // mode. + uvm_record_lock_mmap_lock_write(current->mm); + + // Split vmas should always fall entirely within the old one, and be on one + // side. + UVM_ASSERT(vma->vm_start >= original->vm_start && vma->vm_end <= original->vm_end); + UVM_ASSERT(vma->vm_start == original->vm_start || vma->vm_end == original->vm_end); + + // The vma is splitting, so create a new range under this vma if necessary. + // The kernel handles splits in the middle of the vma by doing two separate + // splits so we just have to handle one vma splitting in two here. + if (vma->vm_start == original->vm_start) + new_end = vma->vm_end - 1; // Left split (new_end is inclusive) + else + new_end = vma->vm_start - 1; // Right split (new_end is inclusive) + + uvm_va_space_down_write(va_space); + + vma->vm_private_data = uvm_vma_wrapper_alloc(vma); + if (!vma->vm_private_data) { + uvm_vm_open_failure(original, vma); + goto out; + } + + // There can be multiple va_ranges under the vma already. Check if one spans + // the new split boundary. If so, split it. + va_range = uvm_va_range_find(va_space, new_end); + UVM_ASSERT(va_range); + UVM_ASSERT(uvm_va_range_vma_current(va_range) == original); + if (va_range->node.end != new_end) { + status = uvm_va_range_split(va_range, new_end, NULL); + if (status != NV_OK) { + UVM_DBG_PRINT("Failed to split VA range, destroying both: %s. " + "original vma [0x%lx, 0x%lx) new vma [0x%lx, 0x%lx)\n", + nvstatusToString(status), + original->vm_start, original->vm_end, + vma->vm_start, vma->vm_end); + uvm_vm_open_failure(original, vma); + goto out; + } + } + + // Point va_ranges to the new vma + uvm_for_each_va_range_in_vma(va_range, vma) { + UVM_ASSERT(uvm_va_range_vma_current(va_range) == original); + va_range->managed.vma_wrapper = vma->vm_private_data; + } + +out: + uvm_va_space_up_write(va_space); + uvm_record_unlock_mmap_lock_write(current->mm); +} + +static void uvm_vm_open_managed_entry(struct vm_area_struct *vma) +{ + UVM_ENTRY_VOID(uvm_vm_open_managed(vma)); +} + +static void uvm_vm_close_managed(struct vm_area_struct *vma) +{ + uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file); + uvm_gpu_t *gpu; + bool make_zombie = false; + + if (current->mm != NULL) + uvm_record_lock_mmap_lock_write(current->mm); + + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + + // current->mm will be NULL on process teardown, in which case we have + // special handling. + if (current->mm == NULL) { + make_zombie = (va_space->initialization_flags & UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE); + if (!make_zombie) { + // If we're not in multi-process mode, then we want to stop all user + // channels before unmapping the managed allocations to avoid + // spurious MMU faults in the system log. If we have a va_space_mm + // then this must've already happened as part of + // uvm_va_space_mm_shutdown. Otherwise we need to handle it here. + if (uvm_va_space_mm_enabled(va_space) && current->mm == va_space->va_space_mm.mm) { + UVM_ASSERT(atomic_read(&va_space->user_channels_stopped)); + } + else { + // Stopping channels involves making RM calls, so we have to do + // that with the VA space lock in read mode. + uvm_va_space_down_read_rm(va_space); + if (!atomic_read(&va_space->user_channels_stopped)) + uvm_va_space_stop_all_user_channels(va_space); + uvm_va_space_up_read_rm(va_space); + } + } + } + + // See uvm_mmap for why we need this in addition to mmap_lock + uvm_va_space_down_write(va_space); + + uvm_destroy_vma_managed(vma, make_zombie); + + // Notify GPU address spaces that the fault buffer needs to be flushed to avoid finding stale entries + // that can be attributed to new VA ranges reallocated at the same address + for_each_va_space_gpu_in_mask(gpu, va_space, &va_space->registered_gpu_va_spaces) { + uvm_gpu_va_space_t *gpu_va_space = uvm_gpu_va_space_get(va_space, gpu); + UVM_ASSERT(gpu_va_space); + + gpu_va_space->needs_fault_buffer_flush = true; + } + uvm_va_space_up_write(va_space); + + if (current->mm != NULL) + uvm_record_unlock_mmap_lock_write(current->mm); +} + +static void uvm_vm_close_managed_entry(struct vm_area_struct *vma) +{ + UVM_ENTRY_VOID(uvm_vm_close_managed(vma)); +} + +static vm_fault_t uvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file); + uvm_va_block_t *va_block; + NvU64 fault_addr = nv_page_fault_va(vmf); + bool is_write = vmf->flags & FAULT_FLAG_WRITE; + NV_STATUS status = uvm_global_get_status(); + bool tools_enabled; + bool major_fault = false; + uvm_service_block_context_t *service_context; + uvm_global_processor_mask_t gpus_to_check_for_ecc; + + if (status != NV_OK) + goto convert_error; + + // TODO: Bug 2583279: Lock tracking is disabled for the power management + // lock in order to suppress reporting of a lock policy violation. + // The violation consists in acquiring the power management lock multiple + // times, and it is manifested as an error during release. The + // re-acquisition of the power management locks happens upon re-entry in the + // UVM module, and it is benign on itself, but when combined with certain + // power management scenarios, it is indicative of a potential deadlock. + // Tracking will be re-enabled once the power management locking strategy is + // modified to avoid deadlocks. + if (!uvm_down_read_trylock_no_tracking(&g_uvm_global.pm.lock)) { + status = NV_ERR_BUSY_RETRY; + goto convert_error; + } + + service_context = uvm_service_block_context_cpu_alloc(); + if (!service_context) { + status = NV_ERR_NO_MEMORY; + goto unlock; + } + + service_context->cpu_fault.wakeup_time_stamp = 0; + + // The mmap_lock might be held in write mode, but the mode doesn't matter + // for the purpose of lock ordering and we don't rely on it being in write + // anywhere so just record it as read mode in all cases. + uvm_record_lock_mmap_lock_read(vma->vm_mm); + + do { + bool do_sleep = false; + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) { + NvU64 now = NV_GETTIME(); + if (now < service_context->cpu_fault.wakeup_time_stamp) + do_sleep = true; + + if (do_sleep) + uvm_tools_record_throttling_start(va_space, fault_addr, UVM_ID_CPU); + + // Drop the VA space lock while we sleep + uvm_va_space_up_read(va_space); + + // usleep_range is preferred because msleep has a 20ms granularity + // and udelay uses a busy-wait loop. usleep_range uses high-resolution + // timers and, by adding a range, the Linux scheduler may coalesce + // our wakeup with others, thus saving some interrupts. + if (do_sleep) { + unsigned long nap_us = (service_context->cpu_fault.wakeup_time_stamp - now) / 1000; + + usleep_range(nap_us, nap_us + nap_us / 2); + } + } + + uvm_va_space_down_read(va_space); + + if (do_sleep) + uvm_tools_record_throttling_end(va_space, fault_addr, UVM_ID_CPU); + + status = uvm_va_block_find_create_managed(va_space, fault_addr, &va_block); + if (status != NV_OK) { + UVM_ASSERT_MSG(status == NV_ERR_NO_MEMORY, "status: %s\n", nvstatusToString(status)); + break; + } + + // Watch out, current->mm might not be vma->vm_mm + UVM_ASSERT(vma == uvm_va_range_vma(va_block->va_range)); + + // Loop until thrashing goes away. + status = uvm_va_block_cpu_fault(va_block, fault_addr, is_write, service_context); + } while (status == NV_WARN_MORE_PROCESSING_REQUIRED); + + if (status != NV_OK) { + UvmEventFatalReason reason; + + reason = uvm_tools_status_to_fatal_fault_reason(status); + UVM_ASSERT(reason != UvmEventFatalReasonInvalid); + + uvm_tools_record_cpu_fatal_fault(va_space, fault_addr, is_write, reason); + } + + tools_enabled = va_space->tools.enabled; + + if (status == NV_OK) { + uvm_va_space_global_gpus_in_mask(va_space, + &gpus_to_check_for_ecc, + &service_context->cpu_fault.gpus_to_check_for_ecc); + uvm_global_mask_retain(&gpus_to_check_for_ecc); + } + + uvm_va_space_up_read(va_space); + uvm_record_unlock_mmap_lock_read(vma->vm_mm); + + if (status == NV_OK) { + status = uvm_global_mask_check_ecc_error(&gpus_to_check_for_ecc); + uvm_global_mask_release(&gpus_to_check_for_ecc); + } + + if (tools_enabled) + uvm_tools_flush_events(); + + // Major faults involve I/O in order to resolve the fault. + // If any pages were DMA'ed between the GPU and host memory, that makes it a major fault. + // A process can also get statistics for major and minor faults by calling readproc(). + major_fault = service_context->cpu_fault.did_migrate; + uvm_service_block_context_cpu_free(service_context); + +unlock: + // TODO: Bug 2583279: See the comment above the matching lock acquisition + uvm_up_read_no_tracking(&g_uvm_global.pm.lock); + +convert_error: + switch (status) { + case NV_OK: + case NV_ERR_BUSY_RETRY: + return VM_FAULT_NOPAGE | (major_fault ? VM_FAULT_MAJOR : 0); + case NV_ERR_NO_MEMORY: + return VM_FAULT_OOM; + default: + return VM_FAULT_SIGBUS; + } +} + + +static vm_fault_t uvm_vm_fault_entry(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + UVM_ENTRY_RET(uvm_vm_fault(vma, vmf)); +} + +static vm_fault_t uvm_vm_fault_wrapper(struct vm_fault *vmf) +{ +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + return uvm_vm_fault(vmf->vma, vmf); +#else + return uvm_vm_fault(NULL, vmf); +#endif +} + +static vm_fault_t uvm_vm_fault_wrapper_entry(struct vm_fault *vmf) +{ + UVM_ENTRY_RET(uvm_vm_fault_wrapper(vmf)); +} + +static struct vm_operations_struct uvm_vm_ops_managed = +{ + .open = uvm_vm_open_managed_entry, + .close = uvm_vm_close_managed_entry, + +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + .fault = uvm_vm_fault_wrapper_entry, + .page_mkwrite = uvm_vm_fault_wrapper_entry, +#else + .fault = uvm_vm_fault_entry, + .page_mkwrite = uvm_vm_fault_entry, +#endif +}; + +// vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs, +// freeing the allocation, and destroying the va_range are handled by UVM_FREE. +static void uvm_vm_open_semaphore_pool(struct vm_area_struct *vma) +{ + struct vm_area_struct *origin_vma = (struct vm_area_struct *)vma->vm_private_data; + uvm_va_space_t *va_space = uvm_va_space_get(origin_vma->vm_file); + uvm_va_range_t *va_range; + bool is_fork = (vma->vm_mm != origin_vma->vm_mm); + NV_STATUS status; + + uvm_record_lock_mmap_lock_write(current->mm); + + uvm_va_space_down_write(va_space); + + va_range = uvm_va_range_find(va_space, origin_vma->vm_start); + UVM_ASSERT(va_range); + UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL && + va_range->node.start == origin_vma->vm_start && + va_range->node.end + 1 == origin_vma->vm_end, + "origin vma [0x%llx, 0x%llx); va_range [0x%llx, 0x%llx) type %d\n", + (NvU64)origin_vma->vm_start, (NvU64)origin_vma->vm_end, va_range->node.start, + va_range->node.end + 1, va_range->type); + + // Semaphore pool vmas do not have vma wrappers, but some functions will + // assume vm_private_data is a wrapper. + vma->vm_private_data = NULL; + + if (is_fork) { + // If we forked, leave the parent vma alone. + uvm_disable_vma(vma); + + // uvm_disable_vma unmaps in the parent as well; clear the uvm_mem CPU + // user mapping metadata and then remap. + uvm_mem_unmap_cpu_user(va_range->semaphore_pool.mem); + + status = uvm_mem_map_cpu_user(va_range->semaphore_pool.mem, va_range->va_space, origin_vma); + if (status != NV_OK) { + UVM_DBG_PRINT("Failed to remap semaphore pool to CPU for parent after fork; status = %d (%s)", + status, nvstatusToString(status)); + origin_vma->vm_ops = &uvm_vm_ops_disabled; + } + } + else { + origin_vma->vm_private_data = NULL; + origin_vma->vm_ops = &uvm_vm_ops_disabled; + vma->vm_ops = &uvm_vm_ops_disabled; + uvm_mem_unmap_cpu_user(va_range->semaphore_pool.mem); + } + + uvm_va_space_up_write(va_space); + + uvm_record_unlock_mmap_lock_write(current->mm); +} + +static void uvm_vm_open_semaphore_pool_entry(struct vm_area_struct *vma) +{ + UVM_ENTRY_VOID(uvm_vm_open_semaphore_pool(vma)); +} + +// vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs, +// freeing the allocation, and destroying the va_range are handled by UVM_FREE. +static void uvm_vm_close_semaphore_pool(struct vm_area_struct *vma) +{ + uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file); + + if (current->mm != NULL) + uvm_record_lock_mmap_lock_write(current->mm); + + uvm_va_space_down_read(va_space); + + uvm_destroy_vma_semaphore_pool(vma); + + uvm_va_space_up_read(va_space); + + if (current->mm != NULL) + uvm_record_unlock_mmap_lock_write(current->mm); +} + +static void uvm_vm_close_semaphore_pool_entry(struct vm_area_struct *vma) +{ + UVM_ENTRY_VOID(uvm_vm_close_semaphore_pool(vma)); +} + +static struct vm_operations_struct uvm_vm_ops_semaphore_pool = +{ + .open = uvm_vm_open_semaphore_pool_entry, + .close = uvm_vm_close_semaphore_pool_entry, + +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + .fault = uvm_vm_fault_sigbus_wrapper_entry, +#else + .fault = uvm_vm_fault_sigbus_entry, +#endif +}; + +static int uvm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_range_t *va_range; + NV_STATUS status = uvm_global_get_status(); + int ret = 0; + bool vma_wrapper_allocated = false; + + if (status != NV_OK) + return -nv_status_to_errno(status); + + status = uvm_va_space_initialized(va_space); + if (status != NV_OK) + return -EBADFD; + + // When the VA space is associated with an mm, all vmas under the VA space + // must come from that mm. + if (uvm_va_space_mm_enabled(va_space)) { + UVM_ASSERT(va_space->va_space_mm.mm); + if (va_space->va_space_mm.mm != current->mm) + return -EOPNOTSUPP; + } + + // UVM mappings are required to set offset == VA. This simplifies things + // since we don't have to worry about address aliasing (except for fork, + // handled separately) and it makes unmap_mapping_range simpler. + if (vma->vm_start != (vma->vm_pgoff << PAGE_SHIFT)) { + UVM_DBG_PRINT_RL("vm_start 0x%lx != vm_pgoff 0x%lx\n", vma->vm_start, vma->vm_pgoff << PAGE_SHIFT); + return -EINVAL; + } + + // Enforce shared read/writable mappings so we get all fault callbacks + // without the kernel doing COW behind our backs. The user can still call + // mprotect to change protections, but that will only hurt user space. + if ((vma->vm_flags & (VM_SHARED|VM_READ|VM_WRITE)) != + (VM_SHARED|VM_READ|VM_WRITE)) { + UVM_DBG_PRINT_RL("User requested non-shared or non-writable mapping\n"); + return -EINVAL; + } + + // If the PM lock cannot be acquired, disable the VMA and report success + // to the caller. The caller is expected to determine whether the + // map operation succeeded via an ioctl() call. This is necessary to + // safely handle MAP_FIXED, which needs to complete atomically to prevent + // the loss of the virtual address range. + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) { + uvm_disable_vma(vma); + return 0; + } + + uvm_record_lock_mmap_lock_write(current->mm); + + // VM_MIXEDMAP Required to use vm_insert_page + // + // VM_DONTEXPAND mremap can grow a vma in place without giving us any + // callback. We need to prevent this so our ranges stay + // up-to-date with the vma. This flag doesn't prevent + // mremap from moving the mapping elsewhere, nor from + // shrinking it. We can detect both of those cases however + // with vm_ops->open() and vm_ops->close() callbacks. + // + // Using VM_DONTCOPY would be nice, but madvise(MADV_DOFORK) can reset that + // so we have to handle vm_open on fork anyway. We could disable MADV_DOFORK + // with VM_IO, but that causes other mapping issues. + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + + vma->vm_ops = &uvm_vm_ops_managed; + + // This identity assignment is needed so uvm_vm_open can find its parent vma + vma->vm_private_data = uvm_vma_wrapper_alloc(vma); + if (!vma->vm_private_data) { + ret = -ENOMEM; + goto out; + } + vma_wrapper_allocated = true; + + // The kernel has taken mmap_lock in write mode, but that doesn't prevent + // this va_space from being modified by the GPU fault path or from the ioctl + // path where we don't have this mm for sure, so we have to lock the VA + // space directly. + uvm_va_space_down_write(va_space); + + // uvm_va_range_create_mmap will catch collisions. Below are some example + // cases which can cause collisions. There may be others. + // 1) An overlapping range was previously created with an ioctl, for example + // for an external mapping. + // 2) This file was passed to another process via a UNIX domain socket + status = uvm_va_range_create_mmap(va_space, current->mm, vma->vm_private_data, NULL); + + if (status == NV_ERR_UVM_ADDRESS_IN_USE) { + // If the mmap is for a semaphore pool, the VA range will have been + // allocated by a previous ioctl, and the mmap just creates the CPU + // mapping. + va_range = uvm_va_range_find(va_space, vma->vm_start); + if (va_range && va_range->node.start == vma->vm_start && + va_range->node.end + 1 == vma->vm_end && + va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL) { + uvm_vma_wrapper_destroy(vma->vm_private_data); + vma_wrapper_allocated = false; + vma->vm_private_data = vma; + vma->vm_ops = &uvm_vm_ops_semaphore_pool; + status = uvm_mem_map_cpu_user(va_range->semaphore_pool.mem, va_range->va_space, vma); + } + } + + if (status != NV_OK) { + UVM_DBG_PRINT_RL("Failed to create or map VA range for vma [0x%lx, 0x%lx): %s\n", + vma->vm_start, vma->vm_end, nvstatusToString(status)); + ret = -nv_status_to_errno(status); + } + + uvm_va_space_up_write(va_space); + +out: + if (ret != 0 && vma_wrapper_allocated) + uvm_vma_wrapper_destroy(vma->vm_private_data); + + uvm_record_unlock_mmap_lock_write(current->mm); + + uvm_up_read(&g_uvm_global.pm.lock); + + return ret; +} + +static int uvm_mmap_entry(struct file *filp, struct vm_area_struct *vma) +{ + UVM_ENTRY_RET(uvm_mmap(filp, vma)); +} + +static NV_STATUS uvm_api_initialize(UVM_INITIALIZE_PARAMS *params, struct file *filp) +{ + return uvm_va_space_initialize(uvm_va_space_get(filp), params->flags); +} + +static NV_STATUS uvm_api_pageable_mem_access(UVM_PAGEABLE_MEM_ACCESS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + params->pageableMemAccess = uvm_va_space_pageable_mem_access_supported(va_space) ? NV_TRUE : NV_FALSE; + return NV_OK; +} + +static long uvm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + switch (cmd) + { + case UVM_DEINITIALIZE: + return 0; + + UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_INITIALIZE, uvm_api_initialize); + + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_PAGEABLE_MEM_ACCESS, uvm_api_pageable_mem_access); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_PAGEABLE_MEM_ACCESS_ON_GPU, uvm_api_pageable_mem_access_on_gpu); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_REGISTER_GPU, uvm_api_register_gpu); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNREGISTER_GPU, uvm_api_unregister_gpu); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_CREATE_RANGE_GROUP, uvm_api_create_range_group); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_DESTROY_RANGE_GROUP, uvm_api_destroy_range_group); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_ENABLE_PEER_ACCESS, uvm_api_enable_peer_access); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_DISABLE_PEER_ACCESS, uvm_api_disable_peer_access); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_SET_RANGE_GROUP, uvm_api_set_range_group); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_CREATE_EXTERNAL_RANGE, uvm_api_create_external_range); + UVM_ROUTE_CMD_ALLOC_INIT_CHECK(UVM_MAP_EXTERNAL_ALLOCATION, uvm_api_map_external_allocation); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_MAP_EXTERNAL_SPARSE, uvm_api_map_external_sparse); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_FREE, uvm_api_free); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_PREVENT_MIGRATION_RANGE_GROUPS, uvm_api_prevent_migration_range_groups); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_ALLOW_MIGRATION_RANGE_GROUPS, uvm_api_allow_migration_range_groups); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_SET_PREFERRED_LOCATION, uvm_api_set_preferred_location); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNSET_PREFERRED_LOCATION, uvm_api_unset_preferred_location); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_SET_ACCESSED_BY, uvm_api_set_accessed_by); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNSET_ACCESSED_BY, uvm_api_unset_accessed_by); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_REGISTER_GPU_VASPACE, uvm_api_register_gpu_va_space); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNREGISTER_GPU_VASPACE, uvm_api_unregister_gpu_va_space); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_REGISTER_CHANNEL, uvm_api_register_channel); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNREGISTER_CHANNEL, uvm_api_unregister_channel); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_ENABLE_READ_DUPLICATION, uvm_api_enable_read_duplication); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_DISABLE_READ_DUPLICATION, uvm_api_disable_read_duplication); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_MIGRATE, uvm_api_migrate); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_ENABLE_SYSTEM_WIDE_ATOMICS, uvm_api_enable_system_wide_atomics); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_DISABLE_SYSTEM_WIDE_ATOMICS, uvm_api_disable_system_wide_atomics); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TOOLS_READ_PROCESS_MEMORY, uvm_api_tools_read_process_memory); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TOOLS_WRITE_PROCESS_MEMORY, uvm_api_tools_write_process_memory); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TOOLS_GET_PROCESSOR_UUID_TABLE, uvm_api_tools_get_processor_uuid_table); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_MAP_DYNAMIC_PARALLELISM_REGION, uvm_api_map_dynamic_parallelism_region); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_UNMAP_EXTERNAL, uvm_api_unmap_external); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_MIGRATE_RANGE_GROUP, uvm_api_migrate_range_group); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TOOLS_FLUSH_EVENTS, uvm_api_tools_flush_events); + UVM_ROUTE_CMD_ALLOC_INIT_CHECK(UVM_ALLOC_SEMAPHORE_POOL, uvm_api_alloc_semaphore_pool); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_CLEAN_UP_ZOMBIE_RESOURCES, uvm_api_clean_up_zombie_resources); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_POPULATE_PAGEABLE, uvm_api_populate_pageable); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_VALIDATE_VA_RANGE, uvm_api_validate_va_range); + } + + // Try the test ioctls if none of the above matched + return uvm_test_ioctl(filp, cmd, arg); +} + +static long uvm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long ret; + + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + ret = uvm_ioctl(filp, cmd, arg); + + uvm_up_read(&g_uvm_global.pm.lock); + + uvm_thread_assert_all_unlocked(); + + return ret; +} + +static long uvm_unlocked_ioctl_entry(struct file *filp, unsigned int cmd, unsigned long arg) +{ + UVM_ENTRY_RET(uvm_unlocked_ioctl(filp, cmd, arg)); +} + +static const struct file_operations uvm_fops = +{ + .open = uvm_open_entry, + .release = uvm_release_entry, + .mmap = uvm_mmap_entry, + .unlocked_ioctl = uvm_unlocked_ioctl_entry, +#if NVCPU_IS_X86_64 + .compat_ioctl = uvm_unlocked_ioctl_entry, +#endif + .owner = THIS_MODULE, +}; + +bool uvm_file_is_nvidia_uvm(struct file *filp) +{ + return (filp != NULL) && (filp->f_op == &uvm_fops); +} + +NV_STATUS uvm_test_register_unload_state_buffer(UVM_TEST_REGISTER_UNLOAD_STATE_BUFFER_PARAMS *params, struct file *filp) +{ + long ret; + int write = 1; + int force = 0; + struct page *page; + NV_STATUS status = NV_OK; + + if (!IS_ALIGNED(params->unload_state_buf, sizeof(NvU64))) + return NV_ERR_INVALID_ADDRESS; + + // Hold mmap_lock to call get_user_pages(), the UVM locking helper functions + // are not used because unload_state_buf may be a managed memory pointer and + // therefore a locking assertion from the CPU fault handler could be fired. + nv_mmap_read_lock(current->mm); + ret = NV_GET_USER_PAGES(params->unload_state_buf, 1, write, force, &page, NULL); + nv_mmap_read_unlock(current->mm); + + if (ret < 0) + return errno_to_nv_status(ret); + UVM_ASSERT(ret == 1); + + uvm_mutex_lock(&g_uvm_global.global_lock); + + if (g_uvm_global.unload_state.ptr) { + put_page(page); + status = NV_ERR_IN_USE; + goto error; + } + + g_uvm_global.unload_state.page = page; + g_uvm_global.unload_state.ptr = (NvU64 *)((char *)kmap(page) + (params->unload_state_buf & ~PAGE_MASK)); + *g_uvm_global.unload_state.ptr = 0; + +error: + uvm_mutex_unlock(&g_uvm_global.global_lock); + + return status; +} + +static void uvm_test_unload_state_exit(void) +{ + if (g_uvm_global.unload_state.ptr) { + kunmap(g_uvm_global.unload_state.page); + put_page(g_uvm_global.unload_state.page); + } +} + +static int uvm_chardev_create(void) +{ + dev_t uvm_dev; + + int ret = alloc_chrdev_region(&g_uvm_base_dev, + 0, + NVIDIA_UVM_NUM_MINOR_DEVICES, + NVIDIA_UVM_DEVICE_NAME); + if (ret != 0) { + UVM_ERR_PRINT("alloc_chrdev_region failed: %d\n", ret); + return ret; + } + uvm_dev = MKDEV(MAJOR(g_uvm_base_dev), NVIDIA_UVM_PRIMARY_MINOR_NUMBER); + + uvm_init_character_device(&g_uvm_cdev, &uvm_fops); + ret = cdev_add(&g_uvm_cdev, uvm_dev, 1); + if (ret != 0) { + UVM_ERR_PRINT("cdev_add (major %u, minor %u) failed: %d\n", MAJOR(uvm_dev), MINOR(uvm_dev), ret); + unregister_chrdev_region(g_uvm_base_dev, NVIDIA_UVM_NUM_MINOR_DEVICES); + return ret; + } + + return 0; +} + +static void uvm_chardev_exit(void) +{ + cdev_del(&g_uvm_cdev); + unregister_chrdev_region(g_uvm_base_dev, NVIDIA_UVM_NUM_MINOR_DEVICES); +} + +static int uvm_init(void) +{ + bool initialized_globals = false; + bool added_device = false; + int ret; + + NV_STATUS status = uvm_global_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_global_init() failed: %s\n", nvstatusToString(status)); + ret = -ENODEV; + goto error; + } + initialized_globals = true; + + ret = uvm_chardev_create(); + if (ret != 0) { + UVM_ERR_PRINT("uvm_chardev_create failed: %d\n", ret); + goto error; + } + added_device = true; + + ret = uvm_tools_init(g_uvm_base_dev); + if (ret != 0) { + UVM_ERR_PRINT("uvm_tools_init() failed: %d\n", ret); + goto error; + } + + pr_info("Loaded the UVM driver, major device number %d.\n", MAJOR(g_uvm_base_dev)); + + if (uvm_enable_builtin_tests) + pr_info("Built-in UVM tests are enabled. This is a security risk.\n"); + + + // After Open RM is released, both the enclosing "#if" and this comment + // block should be removed, because the uvm_hmm_is_enabled_system_wide() + // check is both necessary and sufficient for reporting functionality. + // Until that time, however, we need to avoid advertisting UVM's ability to + // enable HMM functionality. + + if (uvm_hmm_is_enabled_system_wide()) + UVM_INFO_PRINT("HMM (Heterogeneous Memory Management) is enabled in the UVM driver.\n"); + + + return 0; + +error: + if (added_device) + uvm_chardev_exit(); + + if (initialized_globals) + uvm_global_exit(); + + UVM_ERR_PRINT("uvm init failed: %d\n", ret); + + return ret; +} + +static int __init uvm_init_entry(void) +{ + UVM_ENTRY_RET(uvm_init()); +} + +static void uvm_exit(void) +{ + uvm_tools_exit(); + uvm_chardev_exit(); + + uvm_global_exit(); + + uvm_test_unload_state_exit(); + + pr_info("Unloaded the UVM driver.\n"); +} + +static void __exit uvm_exit_entry(void) +{ + UVM_ENTRY_VOID(uvm_exit()); +} + +module_init(uvm_init_entry); +module_exit(uvm_exit_entry); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_INFO(supported, "external"); + diff --git a/kernel-open/nvidia-uvm/uvm.h b/kernel-open/nvidia-uvm/uvm.h new file mode 100644 index 000000000..32922fb7d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm.h @@ -0,0 +1,3902 @@ +/******************************************************************************* + Copyright (c) 2013-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +// +// uvm.h +// +// This file contains the UVM API declarations, for the userspace-to-kernel +// calls. For legacy API definitions that are in use on Windows, see +// uvm_legacy.h. +// + +// UVM API signature modification steps +// In order to change API signature for any of the APIs defined in this file, a +// particular sequence of steps has to be followed since the consumer of this +// API (i.e. CUDA) belongs to a different module branch than the one for this +// file. Here are the steps to change the signature for a hypothetical API named +// UvmExampleApi. The assumption being made here is that this file is being +// modified in chips_a. +// 1) Increment the value of UVM_API_LATEST_REVISION defined in this file. +// 2) Use the macro UVM_API_REV_IS_AT_MOST to define the two revisions of the +// API as follows: +// #if UVM_API_REV_IS_AT_MOST() +// // Old UvmExampleApi declaration +// #else +// // New UvmExampleApi declaration +// #endif +// 3) Do the same thing for the function definition, and for any structs that +// are taken as arguments to these functions. +// 4) Let this change propagate over to cuda_a, so that the CUDA driver can +// start using the new API by bumping up the API version number its using. +// This can be found in gpgpu/cuda/cuda.nvmk. +// 5) Once the cuda_a changes have made it back into chips_a, remove the old API +// declaration, definition, and any old structs that were in use. + +#ifndef _UVM_H_ +#define _UVM_H_ + +#define UVM_API_LATEST_REVISION 7 + +#if !defined(UVM_API_REVISION) +#error "please define UVM_API_REVISION macro to a desired version number or UVM_API_LATEST_REVISION macro" +#endif + +#define UVM_API_REV_IS_AT_MOST(rev) (UVM_API_REVISION <= rev) + +#include "uvm_types.h" +#include "uvm_user_types.h" +#include "uvm_legacy.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// UvmSetDriverVersion +// +// Informs the user-mode layer which kernel driver version is running. The user- +// mode layer uses this information to know what flavor to use when calling +// kernel APIs. +// +// If this API is not called, the user-mode layer assumes that the kernel +// version is the same as the user-mode layer version. +// +// The last UvmDeinitialize will reset this state. +// +// If this API is called, it must be called before UvmInitialize. It is an error +// to call this API after UvmInitialize and before the last UvmDeinitialize, or +// to call this API more than once before the last UvmDeinitialize. +// +// Arguments: +// major: (INPUT) +// The kernel driver's major version number, such as 384. +// +// changelist: (INPUT) +// The changelist at which the kernel driver was built. +// +// Error codes: +// NV_ERR_INVALID_STATE: +// UvmInitialize or UvmSetDriverVersion has already been called. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmSetDriverVersion(NvU32 major, NvU32 changelist); + +//------------------------------------------------------------------------------ +// UvmInitialize +// +// This must be called before any other UVM functions except for +// UvmSetDriverVersion. Repeated calls to UvmInitialize increment a refcount, +// which is decremented by calls to UvmDeinitialize. UVM deinitilization occurs +// when the refcount reaches zero. +// +// The UVM file descriptor passed in can either be UVM_AUTO_FD or a valid file +// descriptor created during a prior call to UvmInitialize. If UVM_AUTO_FD is +// passed and the refcount is zero, a new file descriptor is created. Subsequent +// calls must either also specify UVM_AUTO_FD or use the current file +// descriptor. If the first call to UvmInitialize did not specify UVM_AUTO_FD, +// all subsequent calls must use the same file descriptor used in the initial +// call. The file descriptor that is currently in use can be retrieved using +// UvmGetFileDescriptor. +// +// If flags does not contain UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE, the +// UvmInitialize call which creates the file descriptor will associate the +// calling process with that file descriptor when the Operating System can +// support such an association. In that case UvmInitialize may be called using +// the same file in other processes, but internally the file remains associated +// with the original process. +// +// Arguments: +// fd: (INPUT) +// The UVM file descriptor to initialize UVM with. Passing in +// UVM_AUTO_FD creates a new file descriptor on the first call to +// UvmInitialize. +// +// flags: (INPUT) +// Must be a combination of 0 or more of following flags: +// +// - UVM_INIT_FLAGS_DISABLE_HMM +// Specifying this flag will only have an effect if the system +// allows GPUs to read/write system (CPU) pageable memory and the +// GPUs do not have hardware support to do it transparently, and the +// UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE flag is not specified. +// In such cases pageable access from the GPU will be disabled. +// +// Pageable memory here refers to memory allocated by the Operating +// System for the process's globals, stack variables, heap +// allocations, etc. that has not been registered for CUDA access +// using cudaHostRegister. +// +// - UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE +// Specifying this flag will prevent UVM from creating any +// association between this process and the UVM file descriptor. +// Pageable memory access of any kind will be disabled (regardless +// of whether UVM_INIT_FLAGS_DISABLE_HMM was specified) and the GPU +// resources used by the UVM file descriptor will be freed when the +// last reference to the file is dropped rather than when this +// process exits. +// +// If this flag is not specified, calling UvmMemMap or +// UvmAllocSemaphorePool on the same file from a different process +// may return an error. +// +// If UvmInitialize is called multiple times on the same file, even from +// different processes, the flags to each call must match. +// +// Error codes: +// NV_ERR_NOT_SUPPORTED: +// The Linux kernel is not able to support UVM. This could be because +// the kernel is too old, or because it lacks a feature that UVM +// requires. The kernel log will have details. +// +// NV_ERR_INVALID_ARGUMENT: +// The file descriptor passed in is neither UVM_AUTO_FD nor a valid file +// descriptor created during a prior call to UvmInitialize, or the flags +// do not match a prior call to UvmInitialize. +// +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +#if UVM_API_REV_IS_AT_MOST(4) +NV_STATUS UvmInitialize(UvmFileDescriptor fd); +#else +NV_STATUS UvmInitialize(UvmFileDescriptor fd, + NvU64 flags); +#endif + +//------------------------------------------------------------------------------ +// UvmDeinitialize +// +// Releases the reference implicitly obtained by UvmInitialize. If the refcount +// reaches zero, cleans up all UVM resources associated with the calling +// process. Any channels that are still registered will be unregistered prior to +// unmapping any managed allocations. Any resources that have been shared with +// other processes and are still being used will continue to remain valid. +// +// Error codes: +// NV_ERR_INVALID_STATE: +// Refcount is zero. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmDeinitialize(void); + +//------------------------------------------------------------------------------ +// UvmReopen +// +// Reinitializes the UVM driver after checking for minimal user-mode state. +// Before calling this function, all GPUs must be unregistered with +// UvmUnregisterGpu() and all allocated VA ranges must be freed with UvmFree(). +// Note that it is not required to release VA ranges that were reserved with +// UvmReserveVa(). +// + + + + + + +// UvmReopen() closes the open file returned by UvmGetFileDescriptor() and +// replaces it with a new open file with the same name. +// +// Arguments: +// flags: (INPUT) +// Must be zero. UVM will be reinitialized with the +// Same flags that were passed to UvmInitialize() originally. +// +// Error codes: +// NV_ERR_INVALID_STATE: +// UVM was not initialized before calling this function. +// +// NV_ERR_UVM_ADDRESS_IN_USE: +// Not all allocated VA ranges were freed before calling this function. +// +// NV_ERR_IN_USE: +// Not all GPUs were unregistered before calling this function. +// +// NV_ERR_INVALID_FLAGS: +// Flags is not zero. +// +// NV_ERR_OPERATING_SYSTEM: +// Replacing the original UVM file descriptor failed. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmReopen(NvU64 flags); + +//------------------------------------------------------------------------------ +// UvmIsPageableMemoryAccessSupported +// +// Returns true only if pageable memory access from GPUs is supported by the +// system and that support was not explicitly disabled via UvmInitialize. +// +// Pageable memory here refers to memory allocated by the Operating System for +// the process's globals, stack variables, heap allocations, etc that has not +// been registered for CUDA access using cudaHostRegister. +// +// Note that this does not check whether GPUs are present which can make use of +// this feature, just whether system support exists. If +// UvmIsPageableMemoryAccessSupported reports that pageable memory access is +// supported, UvmIsPageableMemoryAccessSupportedOnGpu can be used for querying +// per-GPU support. +// +// Arguments: +// pageableMemAccess: (OUTPUT) +// Returns true (non-zero) if the system supports pageable memory access +// from GPUs and that support was not explicitly disabled via +// UvmInitialize, and false (zero) otherwise. +// +// Error codes: +// NV_ERR_INVALID_STATE: +// UVM was not initialized. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmIsPageableMemoryAccessSupported(NvBool *pageableMemAccess); + +//------------------------------------------------------------------------------ +// UvmIsPageableMemoryAccessSupportedOnGpu +// +// Returns whether pageable memory access is supported from the given GPU on +// this system and that support was not explicitly disabled via UvmInitialize. +// The GPU must have been previously registered with UvmRegisterGpu first. +// +// Pageable memory here refers to memory allocated by the Operating System for +// the process's globals, stack variables, heap allocations, etc that has not +// been registered for CUDA access using cudaHostRegister. +// +// Arguments: +// gpuUuid: (INPUT) +// UUID of the GPU for which pageable memory access support is queried. +// +// pageableMemAccess: (OUTPUT) +// Returns true (non-zero) if the GPU represented by gpuUuid supports +// pageable memory access and that support was not explicitly disabled +// via UvmInitialize, and false (zero) otherwise. +// +// Error codes: +// NV_ERR_INVALID_STATE: +// UVM was not initialized. +// +// NV_ERR_INVALID_DEVICE: +// The given GPU has not been registered. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmIsPageableMemoryAccessSupportedOnGpu(const NvProcessorUuid *gpuUuid, + NvBool *pageableMemAccess); + +//------------------------------------------------------------------------------ +// UvmRegisterGpu +// +// Registers a GPU with UVM. If this is the first process to register this GPU, +// the UVM driver initializes resources on the GPU and prepares it for CUDA +// usage. Calling UvmRegisterGpu multiple times on the same GPU from the same +// process results in an error. +// +// Arguments: +// gpuUuid: (INPUT) +// UUID of the GPU to register. +// +// Error codes: +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_INSUFFICIENT_RESOURCES +// Internal client or object allocation failed. +// +// NV_ERR_INVALID_DEVICE: +// The GPU referred to by pGpuUuid has already been registered by this +// process. +// +// The GPU referred to by pGpuUuid doesn't have a NVLINK2 link to the +// CPU but a GPU with such a link has already been registered by this +// process, or vice-versa. +// +// NV_ERR_NOT_SUPPORTED: +// The GPU referred to by pGpuUuid is not supported by UVM or the GPU +// is configured to run in virtualization mode without SRIOV support. +// +// NV_ERR_GPU_UUID_NOT_FOUND: +// The GPU referred to by pGpuUuid was not found. +// +// NV_ERR_PAGE_TABLE_NOT_AVAIL: +// The system requires that the UVM file descriptor be associated with a +// single process, and that process has exited. +// +// NV_ERR_INVALID_ARGUMENT: +// OS state required to register the GPU is not present. +// +// NV_ERR_OBJECT_NOT_FOUND: +// OS state required to register the GPU is not present. +// +// NV_ERR_INVALID_STATE: +// OS state required to register the GPU is malformed. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmRegisterGpu(const NvProcessorUuid *gpuUuid); + +//------------------------------------------------------------------------------ +// UvmRegisterGpuSmc +// +// The same as UvmRegisterGpu, but takes additional parameters to specify the +// GPU partition being registered if SMC is enabled. +// +// TODO: Bug 2844714: Merge UvmRegisterGpuSmc() with UvmRegisterGpu() once +// the initial SMC support is in place. +// +// Arguments: +// gpuUuid: (INPUT) +// UUID of the parent GPU of the SMC partition to register. +// +// platformParams: (INPUT) +// User handles identifying the partition to register. +// +// Error codes (see UvmRegisterGpu also): +// +// NV_ERR_INVALID_STATE: +// SMC was not enabled, or the partition identified by the user +// handles or its configuration changed. +// +NV_STATUS UvmRegisterGpuSmc(const NvProcessorUuid *gpuUuid, + const UvmGpuPlatformParams *platformParams); + +//------------------------------------------------------------------------------ +// UvmUnregisterGpu +// +// Unregisters a GPU from UVM. If this is the last process to unregister this +// GPU, the UVM driver frees all resources allocated on the GPU when the GPU +// was first registered. Any pages on the GPU allocated by the UVM driver will +// be migrated to CPU memory before the GPU resources are freed. +// +// Any GPU VA spaces or channels that were registered on this GPU using +// UvmRegisterGpuVaSpace or UvmRegisterChannel respectively, will be +// unregistered. Any state that was set by calling UvmSetPreferredLocation or +// UvmSetAccessedBy for this GPU will be cleared. Any pages that were associated +// with a non-migratable range group and had this GPU as their preferred +// location will have their range group association changed to +// UVM_RANGE_GROUP_ID_NONE. +// + + + + + + + + +// Arguments: +// gpuUuid: (INPUT) +// UUID of the GPU to unregister. +// +// Error codes: +// NV_ERR_INVALID_DEVICE: +// The GPU referred to by pGpuUuid was not registered by this process. +// +// NV_ERR_GPU_UUID_NOT_FOUND: +// The GPU referred to by pGpuUuid was not found. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmUnregisterGpu(const NvProcessorUuid *gpuUuid); + +//------------------------------------------------------------------------------ +// UvmRegisterGpuVaSpace +// +// Registers a GPU's VA (virtual address) space for use with UVM. Only one GPU +// VA space can be registered for a given GPU at a time. Once a VA space has +// been registered for a GPU, all page table updates for that VA space on that +// GPU will be managed by the UVM driver. +// +// The GPU must have been registered using UvmRegisterGpu prior to making this +// call. +// +// On systems with GPUs that support transparent access to pageable memory, this +// feature is enabled per GPU VA space. This setting must match for all +// registered GPU VA spaces. +// +// Any VA ranges that were allocated using UvmAllocSemaphorePool will be mapped +// on this GPU with the mapping and caching attributes as specified during that +// call, or with default attributes if none were specified. +// +// Any VA ranges that had a preferred location set to this GPU will be mapped on +// this GPU only if this GPU is not fault-capable and the VA range belongs to a +// non-migratable range group. If such a mapping cannot be established, an error +// is returned. +// +// Any VA ranges which have accessed-by set for this GPU will be mapped on this +// GPU. If that VA range resides in a PCIe peer GPU's memory and P2P support +// between the two GPUs has not been enabled via UvmEnablePeerAccess, then a +// mapping won't be established. Also, if read duplication is enabled for this +// VA range, or its preferred location is set to this GPU, and this GPU is a +// fault-capable GPU, then a mapping will not be established. If this is a +// non-fault-capable GPU and a mapping cannot be established, then an error is +// returned. +// +// If P2P support has been enabled between this GPU and another GPU that also +// has a GPU VA space registered, then the two GPU VA spaces must support the +// same set of page sizes for GPU mappings. Otherwise, an error is returned. +// +// Note that all the aforementioned VA ranges must lie within the largest +// possible virtual address supported by this GPU. +// +// Arguments: +// gpuUuid: (INPUT) +// UUID of the GPU to register. +// +// platformParams: (INPUT) +// On Linux: RM ctrl fd, hClient and hVaSpace. +// +// Error codes: +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_OUT_OF_RANGE: +// A VA range that needs to be mapped on this GPU exceeds the largest +// virtual address supported by the GPU. +// +// NV_ERR_INVALID_DEVICE: +// The GPU referred to by gpuUuid was not registered or a VA space has +// already been registered for this GPU. Or this is a non-fault-capable +// GPU that is present in the accessed-by list of a VA range that +// resides on another non-fault-capable GPU, and P2P support between +// both GPUs is not enabled. +// +// NV_ERR_OTHER_DEVICE_FOUND: +// The UUID does not match the UUID of the device that is associated +// with the VA space handles in the platformParams argument. +// +// NV_ERR_INVALID_FLAGS: +// The VA space was originally allocated with UVM-incompatible flags. +// This includes the case in which the value for the setting to enable +// transparent access to pageable memory for the given GPU VA space does +// not match the value in previously-registered GPU VA spaces, or that +// value is set but pageable memory access has been disabled via +// UvmInitialize. +// +// NV_ERR_NOT_COMPATIBLE: +// The GPU referred to by gpuUuid has P2P support enabled with another +// GPU and the set of page sizes supported by the specified VA space +// doesn't match that of the VA space registered on the peer GPU. +// +// NV_ERR_INVALID_ARGUMENT: +// Some problem with the platform specific arguments was detected. +// +// NV_ERR_NOT_SUPPORTED: +// A GPU VA space has already been registered using a different UVM file +// descriptor in this process and this platform does not support that +// operation, or a GPU VA space has already been registered on this UVM +// file descriptor by a different process and this platform does not +// support that operation. +// +// NV_ERR_PAGE_TABLE_NOT_AVAIL: +// The system requires that the UVM file descriptor be associated with a +// single process, and that process has exited. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmRegisterGpuVaSpace(const NvProcessorUuid *gpuUuid, + const UvmGpuVaSpacePlatformParams *platformParams); + +//------------------------------------------------------------------------------ +// UvmUnregisterGpuVaSpace +// +// Unregisters the GPU VA space that was previously registered via a call to +// UvmRegisterGpuVaSpace. +// +// Any page table mappings created by UVM on that GPU for that VA space will be +// unmapped. Any channels that were registered on this GPU using +// UvmRegisterChannel will be unregistered. +// +// Arguments: +// gpuUuid: (INPUT) +// UUID of the GPU whose VA space should be unregistered. +// +// Error codes: +// NV_ERR_INVALID_DEVICE: +// The GPU referred to by gpuUuid was not registered or no VA space has +// been registered for this GPU. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmUnregisterGpuVaSpace(const NvProcessorUuid *gpuUuid); + +//------------------------------------------------------------------------------ +// UvmEnablePeerAccess +// +// Enables P2P (peer to peer) support in the UVM driver between two GPUs +// connected via PCIe. NVLink peers are automatically discovered/enabled in the +// driver at UvmRegisterGpu time. Enabling P2P support between two GPUs allows +// peer mappings to be created as part of fault servicing, memory allocation, +// etc. The P2P support is bidirectional i.e. enabling P2P between GPU A and +// GPU B also enables P2P support between GPU B and GPU A. +// +// The two GPUs must have been registered via UvmRegisterGpu prior to making +// this call. An error is returned if P2P support has already been enabled +// between these two GPUs in this process. +// +// The two GPUs must be connected via PCIe. An error is returned if the GPUs are +// not connected or are connected over an interconnect different than PCIe +// (NVLink, for example). +// +// If both GPUs have GPU VA spaces registered for them, the two GPU VA spaces +// must support the same set of page sizes for GPU mappings. +// +// If any VA range resides in one GPU's memory, and the peer GPU is in the +// accessed-by list of that VA range, then a peer mapping will be established +// unless the VA space for the peer GPU has not been registered, or read +// duplication is enabled for the VA range, or the preferred location of the VA +// range is the peer GPU. +// +// Arguments: +// gpuUuidA: (INPUT) +// UUID of GPU A. +// +// gpuUuidB: (INPUT) +// UUID of GPU B. +// +// Error codes: +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_INVALID_DEVICE: +// At least one GPU has not been registered, P2P support has already +// been enabled between the two GPUs, or the GPUs are connected via an +// interconnect other than PCIe. +// +// NV_ERR_NOT_SUPPORTED: +// The two GPUs are not peer capable. +// +// NV_ERR_NOT_COMPATIBLE: +// Both GPUs have a GPU VA space registered for them and the two VA +// spaces don't support the same set of page sizes for GPU mappings. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEnablePeerAccess(const NvProcessorUuid *gpuUuidA, + const NvProcessorUuid *gpuUuidB); + +//------------------------------------------------------------------------------ +// UvmDisablePeerAccess +// +// Disables P2P (peer to peer) support in the UVM driver between two GPUs. +// connected via PCIe. NVLink peers are automatically disabled in the driver +// at UvmUnregisterGpu time. Disabling P2P support between two GPUs removes all +// existing peer mappings from either GPU to the other, and also prevents new +// peer mappings from being established between the two GPUs. +// +// The two GPUs must be connected via PCIe. An error is returned if the GPUs are +// not connected or are connected over an interconnect different than PCIe +// (NVLink, for example). +// +// If one of the two GPUs is present in the accessed-by list of a non-migratable +// VA range that has a preferred location set to the other GPU, and the two GPUs +// are not fault-capable, then the GPU is removed from the accessed-by list of +// the range. +// +// Arguments: +// gpuUuidA: (INPUT) +// UUID of GPU A. +// +// gpuUuidB: (INPUT) +// UUID of GPU B. +// +// Error codes: +// NV_ERR_INVALID_DEVICE: +// At least one GPU has not been registered, or P2P support has not been +// enabled between the two GPUs, or the GPUs are connected via an +// interconnect other than PCIe. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmDisablePeerAccess(const NvProcessorUuid *gpuUuidA, + const NvProcessorUuid *gpuUuidB); + +//------------------------------------------------------------------------------ +// UvmRegisterChannel +// +// Register a channel for use with UVM. Any faults that occur on this channel +// will be handled by the UVM driver. +// +// A GPU VA space must have been registered on this GPU via +// UvmRegisterGpuVaSpace prior to making this call. +// +// For channels that require associated mappings, the base and length of a +// virtual address region that was reserved via UvmReserveVa must be supplied to +// this call in order to map those allocations. The size and alignment of this +// region can be obtained by calling the appropriate platform specific API. For +// example, on RM, an RM control call has to be made with the control type as +// NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_SIZE. If no region needs to be reserved for +// this channel, the base and length arguments are ignored. +// +// Using the same VA region for multiple UvmRegisterChannel calls is allowed, +// provided all allocations required by all of those calls fit within the +// region. +// +// Registering the same channel on multiple subdevices of an SLI group is +// disallowed. +// +// On any errors, the channel may be reset, thereby terminating any pending +// work on that channel. +// +// Arguments: +// gpuUuid: (INPUT) +// UUID of the GPU that the channel is associated with. +// +// platformParams: (INPUT) +// On Linux: RM ctrl fd, hClient and hChannel. +// +// base: (INPUT) +// Base address (starting point) of the VA (virtual address) range +// reserved for mapping the allocations associated with this channel. +// If this channel does not have associated allocations, this argument +// is ignored. +// +// length: (INPUT) +// Length, in bytes, of the range. If this channel does not have +// associated allocations, this argument is ignored. +// +// Error codes: +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_OTHER_DEVICE_FOUND: +// The UUID does not match the UUID of the device that is associated +// with the channel identifier in the platformParams argument. +// +// NV_ERR_GPU_INVALID_DEVICE: +// The GPU referred to by pGpuUuid was not registered or no VA space +// has been registered for this GPU. +// +// NV_ERR_INVALID_CHANNEL: +// The given channel identifier is invalid or has already been +// registered. +// +// NV_ERR_INVALID_ADDRESS: +// The channel has allocations which need to be mapped but the base +// address is invalid, or the VA range specified by base and length +// is too small. +// +// NV_ERR_INVALID_ARGUMENT: +// Either some problem with the platform-specific arguments was detected +// or the channel has allocations which need to be mapped but length is +// invalid. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmRegisterChannel(const NvProcessorUuid *gpuUuid, + const UvmChannelPlatformParams *platformParams, + void *base, + NvLength length); + +//------------------------------------------------------------------------------ +// UvmUnregisterChannel +// +// Unregisters a channel from UVM. The channel must have been previously +// registered via a call to UvmRegisterChannel. The channel will be reset, +// thereby terminating any pending work on that channel. +// +// Since channels may share virtual mappings, a call to UvmUnregisterChannel is +// not guaranteed to unmap the VA range passed into the corresponding +// UvmRegisterChannel call because other still-registered channels may be using +// allocations in that VA range. Only channels which share the same TSG can +// share allocations, so a channel's VA range can only be considered released +// after UvmUnregisterChannel has been called on all channels under that TSG. +// +// Arguments: +// platformParams: (INPUT) +// On Linux: RM ctrl fd, hClient and hChannel. +// +// Error codes: +// NV_ERR_INVALID_CHANNEL: +// The given channel identifier was not registered. +// +// NV_ERR_INVALID_ARGUMENT: +// Some problem with the platform specific arguments was detected. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmUnregisterChannel(const UvmChannelPlatformParams *platformParams); + +//------------------------------------------------------------------------------ +// UvmReserveVa +// +// Reserves VA space on the CPU for future use. Multiple, non-contiguous VA +// ranges can be reserved via this API. +// +// The starting address for the VA reservation can be either explicitly +// specified or left NULL to let the API implementation select one. When the +// starting address is specified, it must be aligned to the smallest CPU page +// size. When the starting address is not specified, the bounds of the search +// space within which the VA range should be reserved must be specified. The +// specified lower bound of the search space is rounded up to the nearest +// non-zero multiple of the requested alignment. The total size of the search +// space taking into consideration the rounded up lower bound cannot be less +// than the requested length for the VA reservation. The starting address chosen +// by the API implementation is guaranteed to be aligned to the requested +// alignment. +// +// The requested alignment must be either a power of two that is at least the +// smallest CPU page size or left zero to indicate default alignment which is +// the smallest CPU page size. +// +// The length of the VA reservation must be a multiple of the smallest CPU page +// size. +// +// Arguments: +// base: (INPUT/OUTPUT) +// Contains the starting address of the VA reservation when the call +// returns successfully. If *base is NULL when this API is invoked, a VA +// range that falls within the requested bounds is reserved. Note that +// the lower bound will be rounded up to the nearest non-zero multiple +// of the requested alignment. If *base is non-NULL when this API +// is invoked, then that address is chosen as the starting address of +// the VA reservation. +// +// length: (INPUT) +// Length in bytes of the region. Must be a multiple of the smallest CPU +// page size. +// +// minVa: (INPUT) +// Lower limit for the search space within which the VA range must be +// reserved. Will be rounded up to the nearest non-zero multiple of the +// requested alignment. Ignored if *base is non-NULL when the API is +// invoked. +// +// maxVa: (INPUT) +// Upper limit for the search space within which the VA range must be +// reserved. Ignored if *base is non-NULL when the API is invoked. +// +// alignment: (INPUT) +// Alignment required for the starting address of the reservation. Must +// either be zero to indicate default alignment which is smallest CPU +// page size or a power of two that is at least the smallest CPU page +// size. Ignored if *base is non-NULL when the API is invoked. +// +// Error codes: +// NV_ERR_NO_MEMORY: +// Either *base is NULL and no suitable VA reservation could be made or +// some other internal memory allocation failed. +// +// NV_ERR_UVM_ADDRESS_IN_USE: +// *base is non-NULL and reserving the VA range at that address failed. +// +// NV_ERR_INVALID_ADDRESS: +// One of the following occurred: +// - base is NULL. +// - *base is non-NULL and is not aligned to the smallest CPU page size. +// - *base is NULL and one of the following occurred: +// - the rounded up minVa is not less than maxVa. +// - the region covered by the rounded up minVa and maxVa is not big +// enough to contain a VA reservation of the requested length. +// - alignment is non-zero and is either not a power of two or is less +// than the smallest CPU size. +// - length is zero or is not a multiple of the smallest CPU page size. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmReserveVa(void **base, + NvLength length, + void *minVa, + void *maxVa, + NvLength alignment); + +//------------------------------------------------------------------------------ +// UvmReleaseVa +// +// Releases all pages within the VA range. If any of the pages were committed, +// they are automatically decomitted as well. +// +// The release may encompass more than a single reserve VA or commit call, but +// must not partially release any regions that were either reserved or +// committed previously. +// +// Arguments: +// base: (INPUT) +// Base address (starting point) of the VA (virtual address) range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// Error codes: +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. There is likely more than one +// possible cause of this error. +// +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned or the range was not +// previously reserved via UvmReserveVa. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmReleaseVa(void *base, + NvLength length); + +//------------------------------------------------------------------------------ +// UvmCreateRangeGroup +// +// Creates a new range group. Virtual address ranges can be associated with +// this range group as outlined in UvmSetRangeGroup. +// +// Arguments: +// rangeGroupId: (OUTPUT) +// Id of the newly created range group. +// +// Error codes: +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_INVALID_ARGUMENT: +// A NULL pointer was passed in the rangeGroupId argument. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmCreateRangeGroup(NvU64 *rangeGroupId); + +//------------------------------------------------------------------------------ +// UvmDestroyRangeGroup +// +// Destroys a previously created range group. If there are any pages associated +// with this range group, that association is cleared. i.e. the behavior is the +// same as associating those pages with UVM_RANGE_GROUP_ID_NONE via a call to +// UvmSetRangeGroup. +// +// Arguments: +// rangeGroupId: (INPUT) +// Id of the range group to be destroyed. +// +// Error codes: +// NV_ERR_OBJECT_NOT_FOUND: +// rangeGroupId was not created by a previous call to +// UvmCreateRangeGroup. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmDestroyRangeGroup(NvU64 rangeGroupId); + +//------------------------------------------------------------------------------ +// UvmSetRangeGroup +// +// Associates the pages in a virtual address (VA) range with the specified +// range group. The base address and length of the VA range must be aligned to +// the smallest page size supported by the CPU. If any pages in that VA range +// were associated with another range group, that association is changed to +// this range group. The VA range must have been allocated via either UvmAlloc +// or UvmMemMap. +// +// If the range group was made non-migratable by a previous call to +// UvmPreventMigrationRangeGroups, then all pages in the VA range are migrated +// to their preferred location if they are not already located there. If any +// page does not have a preferred location or if the preferred location is a +// fault-capable GPU, an error is returned. +// +// If rangeGroupId is UVM_RANGE_GROUP_ID_NONE, then all pages in the VA range +// will have their range group association removed. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// rangeGroupId: (INPUT) +// Id of the range group to associate the VA range with. +// +// Errors: +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned or don't represent a valid +// address range. +// +// NV_ERR_INVALID_DEVICE: +// The range group is non-migratable and at least one page in the VA +// range either does not have a preferred location or its preferred +// location is a fault-capable GPU. +// +// NV_ERR_OBJECT_NOT_FOUND: +// rangeGroupId was not created by a previous call to +// UvmCreateRangeGroup. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmSetRangeGroup(void *base, + NvLength length, + NvU64 rangeGroupId); + +//------------------------------------------------------------------------------ +// UvmPreventMigrationRangeGroups +// +// Migrates all pages associated with the specified range groups to their +// preferred location and prevents them from being migrated on faults from +// either the CPU or the GPU. Any unpopulated pages are populated at the +// preferred location. If any page does not have a preferred location or if the +// preferred location is a fault-capable GPU, an error is returned. All the +// specified range groups must be valid range groups allocated using +// UvmCreateRangeGroup. +// +// All pages associated with the specified range groups are mapped at the +// preferred location and from all the GPUs present in the accessed-by list of +// those pages, provided establishing a mapping is possible. If any page +// associated with any of the specified range groups has a preferred location +// set to a non-fault-capable GPU, and another non-fault-capable GPU is in the +// accessed-by list of the page but P2P support between both GPUs is not +// enabled, an error is returned. +// +// GPUs are allowed to map any pages belonging to these range groups on faults. +// If establishing such a mapping is not possible, the fault is fatal. +// +// Existing CPU mappings to any pages belonging to these range groups are +// revoked, even if the pages are in system memory and even if the CPU is in +// the accessed-by list of those pages. The CPU is not allowed to map these +// pages on faults even if they are located in system memory and so, CPU faults +// to these pages are always fatal. +// +// Multiple calls to UvmPreventMigrationRangeGroups are not refcounted. i.e. +// calling UvmPreventMigrationRangeGroups on a range group on which +// UvmPreventMigrationRangeGroups has already been called results in a no-op. +// +// Arguments: +// rangeGroupIds: (INPUT) +// An array of range group IDs. +// +// numGroupIds: (INPUT) +// Number of items in the rangeGroupIds array. +// +// Errors: +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_OBJECT_NOT_FOUND: +// One or more rangeGroupIds was not found. +// +// NV_ERR_INVALID_ARGUMENT: +// A NULL pointer was passed in for rangeGroupIds or numGroupIds was +// zero. +// +// NV_ERR_INVALID_DEVICE: +// At least one page in one of the VA ranges associated with these range +// groups does not have a preferred location or its preferred location +// is a fault-capable GPU. Or the preferred location has been set to a +// non-fault-capable GPU, and another non-fault-capable GPU is present +// in the accessed-by list of a page but P2P support between both GPUs +// has not been enabled. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmPreventMigrationRangeGroups(const NvU64 *rangeGroupIds, + NvLength numGroupIds); + +//------------------------------------------------------------------------------ +// UvmAllowMigrationRangeGroups +// +// Undoes the effect of UvmPreventMigrationRangeGroups. Pages associated with +// these range groups are now allowed to migrate at any time, and CPU or GPU +// faults to these pages are no longer fatal. All the specified range groups +// must be valid range groups allocated using UvmCreateRangeGroup. +// +// Multiple calls to UvmAllowMigrationRangeGroups are not refcounted. i.e. +// calling UvmAllowMigrationRangeGroups on a range group on which +// UvmAllowMigrationRangeGroups has already been called results in a no-op. +// +// Arguments: +// rangeGroupIds: (INPUT) +// An array of range group IDs. +// +// numGroupIds: (INPUT) +// Number of items in the rangeGroupIds array. +// +// Errors: +// NV_ERR_OBJECT_NOT_FOUND: +// One or more rangeGroupIds was not found. +// +// NV_ERR_INVALID_ARGUMENT: +// A NULL pointer was passed in for rangeGroupIds or numGroupIds was +// zero. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmAllowMigrationRangeGroups(const NvU64 *rangeGroupIds, + NvLength numGroupIds); + +//------------------------------------------------------------------------------ +// UvmAlloc +// +// Creates a new mapping in the virtual address space of the process, populates +// it at the specified preferred location, maps it on the provided list of +// processors if feasible and associates the range with the given range group. +// +// This API is equivalent to the following code sequence: +// UvmMemMap(base, length); +// UvmSetPreferredLocation(base, length, preferredLocationUuid); +// for (i = 0; i < accessedByCount; i++) { +// UvmSetAccessedBy(base, length, &accessedByUuids[i]); +// } +// UvmSetRangeGroup(base, length, rangeGroupId); +// UvmMigrate(base, length, preferredLocationUuid, 0); +// +// Please see those APIs for further details on their behavior. If an error is +// encountered during any part of the sequence, the completed portion will be +// undone. +// +// The VA range can be unmapped and freed via a call to UvmFree. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// preferredLocationUuid: (INPUT) +// UUID of the preferred location for this VA range. +// +// accessedByUuids: (INPUT) +// UUIDs of all processors that should have persistent mappings to this +// VA range. +// +// accessedByCount: (INPUT) +// Number of elements in the accessedByUuids array. +// +// rangeGroupId: (INPUT) +// ID of the range group to associate this VA range with. +// +// Errors: +// NV_ERR_UVM_ADDRESS_IN_USE: +// The requested address range overlaps with an existing allocation. +// +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned or the range was not +// previously reserved via UvmReserveVa. +// +// NV_ERR_INVALID_DEVICE: +// Either preferredLocationUuid or one of the UUIDs in the +// accessedByUuids array was not registered or the UUID represents a GPU +// that has no VA space registered for it. +// +// NV_ERR_OBJECT_NOT_FOUND: +// rangeGroupId was not found. +// +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmAlloc(void *base, + NvLength length, + const NvProcessorUuid *preferredLocationUuid, + const NvProcessorUuid *accessedByUuids, + NvLength accessedByCount, + NvU64 rangeGroupId); + +//------------------------------------------------------------------------------ +// UvmFree +// +// Frees a VA range previously allocated via one of the UVM allocator APIs, +// namely either UvmAlloc, UvmMemMap, UvmCreateExternalRange, +// UvmMapDynamicParallelismRegion or UvmAllocSemaphorePool. +// +// For VA ranges allocated via UvmAlloc, UvmMemMap or UvmAllocSemaphorePool, all +// CPU and GPU page table mappings are cleared and all allocated pages are +// freed. +// +// For VA ranges allocated via UvmCreateExternalRange, all GPU page table +// mappings are cleared. No CPU page table mappings for this range are affected, +// and no physical pages for this range are freed. +// +// For VA ranges allocated via UvmMapDynamicParallelismRegion, all GPU page +// table mappings are cleared. No CPU page table mappings for this range are +// affected. +// +// The base address of the VA range to be freed must match the base address used +// when allocating the range. If the VA range came from a region previously +// reserved via UvmReserveVa, then this VA range is put back in the reserved +// state. +// +// Note that the reason this API does not take a length argument is because this +// API is modeled after the C library free() API. Partial frees are not allowed +// and the UVM usermode layer tracks the base and length of each allocated +// range, so having a length argument would be redundant. This also eliminates +// the need for the caller to track the length of each allocation. +// +// Arguments: +// base: (INPUT) +// Starting address of the range to be freed. This must be match an +// address that was obtained via a UVM allocator API. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// base does not match an address that was passed into a UVM allocator +// API. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmFree(void *base); + +//------------------------------------------------------------------------------ +// UvmCleanUpZombieResources +// +// Clean up resources left by processes that specify +// UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE. Resources not freed before +// termination by such processes are not immediately freed by UVM if another +// processes is using the same UVM file. +// +// Errors: +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmCleanUpZombieResources(void); + +//------------------------------------------------------------------------------ +// UvmAllocSemaphorePool +// +// Allocates memory from which semaphores can be suballocated and used to order +// work between UVM and CUDA as described in UvmMigrateAsync. +// +// The virtual address range specified by (base, length) must have been +// previously reserved via a call to UvmReserveVa. Both base and length must be +// aligned to the smallest page size supported by the CPU. +// +// The pages are populated in CPU memory and zero initialized. They are mapped +// on the CPU and in all registered GPU VA spaces. They will also be mapped in +// any GPU VA spaces registered after this call. The pages are non-migratable +// and the GPU mappings are persistent, which makes them safe to access from +// non-fault-capable HW engines. +// +// By default, all mappings to this VA range have read, write and atomic access +// and are uncached. This behavior can be overridden for GPUs by explicitly +// specifying the mapping and caching attributes through this API. At most one +// GPU may cache the allocation, in which case no other processor should write +// to it. These GPUs must have been registered via UvmRegisterGpu. These GPUs +// do not need to have a GPU VA space registered at the time of this API call. +// Overriding default mapping and caching attributes for the CPU is disallowed. +// If a new GPU is registered or a currently registered GPU is unregistered via +// UvmUnregisterGpu and then re-registered, default mapping and caching +// attributes will be applied for that GPU. +// +// The VA range must lie within the largest possible virtual address supported +// by all GPUs that currently have a GPU VA space registered for them. Also, if +// a GPU VA space is registered in the future for a GPU which is unable to map +// this allocation, that GPU VA space registration will fail. +// +// The pages in this VA range cannot be associated with range groups, cannot be +// the target for read duplication, cannot have a preferred location set, and +// cannot have any accessed-by processors. +// +// The VA range can be unmapped and freed via a call to UvmFree. +// + + + + + + + + +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// perGpuAttribs: (INPUT) +// List of per GPU mapping and caching attributes. GPUs not in the list +// are mapped with default attributes. +// +// gpuAttribsCount: (INPUT) +// Number of entries in the perGpuAttribs array. +// +// Errors: +// NV_ERR_UVM_ADDRESS_IN_USE: +// The requested address range overlaps with an existing allocation. +// +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned or the range was not +// previously reserved via UvmReserveVa. +// +// NV_ERR_OUT_OF_RANGE: +// The VA range exceeds the largest virtual address supported by one or +// more registered GPUs. +// +// NV_ERR_INVALID_DEVICE: +// At least one of the UUIDs in the perGpuAttribs list was either not +// registered or is the UUID of the CPU. +// +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_INVALID_ARGUMENT: +// perGpuAttribs is NULL but gpuAttribsCount is non-zero or vice-versa, +// or caching is requested on more than one GPU. + + + + +// +// NV_ERR_NOT_SUPPORTED: +// The current process is not the one which called UvmInitialize, and +// UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE was not specified to +// UvmInitialize. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmAllocSemaphorePool(void *base, + NvLength length, + const UvmGpuMappingAttributes *perGpuAttribs, + NvLength gpuAttribsCount); + +//------------------------------------------------------------------------------ +// UvmMigrate +// +// Migrates the backing of a given virtual address range to the specified +// destination processor. If any page in the VA range is unpopulated, it is +// populated at the destination processor. The migrated pages in the VA range +// are also mapped on the destination processor. +// +// Both base and length must be aligned to the smallest page size supported by +// the CPU. The VA range must lie within the largest possible virtual address +// supported by the specified processor. +// +// The virtual address range specified by (base, length) must have been +// allocated via a call to either UvmAlloc or UvmMemMap, or be supported +// system-allocated pageable memory. +// +// If the input virtual range corresponds to system-allocated pageable memory, +// and there is at least one GPU in the system that supports transparent access +// to pageable memory, the behavior described in the next paragraphs does not +// take effect. Instead, the driver will first populate any unpopulated pages +// according to the memory policy defined by the calling process and address +// range. Then, pages will be migrated to the requested processor. If the +// destination processor is the CPU, and the memory policy has not defined +// preferred CPU memory nodes or the given preferredCpuMemoryNode is in the +// mask of preferred memory nodes, the driver will try to migrate memory to +// preferredCpuMemoryNode first, and will fallback to the rest of CPU the nodes +// if it doesn't succeed. If pages were already resident on any CPU memory node, +// they will not be migrated. +// +// If the input virtual range corresponds to system-allocated pageable memory, +// and UvmIsPageableMemoryAccessSupported reports that pageable memory access +// is supported, then the driver will populate any unpopulated pages at the +// destination processor and migrate the data from any source location to the +// destination. Pages in the VA range are migrated even if their preferred +// location is set to a processor other than the destination processor. +// If the accessed-by list of any of the pages in the VA range is not empty, +// then mappings to those pages from all the appropriate processors are updated +// to refer to the new location if establishing such a mapping is possible. +// Otherwise, those mappings are cleared. +// Note that in this case, software managed pageable memory does not support +// migration of MAP_SHARED, file-backed, or PROT_NONE mappings. +// +// If any pages in the given VA range are associated with a range group which +// has been made non-migratable via UvmPreventMigrationRangeGroups, then those +// pages are not migrated and the mappings on the destination processor for +// those pages are left unmodified. If the VA range is associated with a +// migratable range group and the destination processor is a non-fault-capable +// GPU, then an error is returned if that GPU is in the accessed-by list of the +// VA range but that GPU is not the preferred location. +// +// If read duplication is enabled on any pages in the VA range, then those pages +// are read duplicated at the destination processor, leaving the source copy, if +// present, intact with only its mapping changed to read-only if it wasn't +// already mapped that way. +// +// Pages in the VA range are migrated even if their preferred location is set to +// a processor other than the destination processor. +// +// If the accessed-by list of any of the pages in the VA range is not empty, +// then mappings to those pages from all the appropriate processors are updated +// to refer to the new location if establishing such a mapping is possible. +// Otherwise, those mappings are cleared. +// +// If fewer than the number of requested pages were migrated, +// NV_WARN_MORE_PROCESSING_REQUIRED is returned. An example scenario where this +// could happen is when UvmPreventMigrationRangeGroups has been called on a +// range group associated with some pages in this range. If fewer than the +// number of requested pages were migrated due to insufficient memory to +// allocate physical pages or page tables, then NV_ERR_NO_MEMORY is returned. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// destinationUuid: (INPUT) +// UUID of the destination processor to migrate pages to. +// +// preferredCpuMemoryNode: (INPUT) +// Preferred CPU NUMA memory node used if the destination processor is +// the CPU. This argument is ignored if the given virtual address range +// corresponds to managed memory. +// +// Error codes: +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned, or the range does not +// represent a migratable allocation created via UvmMemMap, or the +// range is pageable memory and the system does not support accessing +// pageable memory, or the range does not represent a supported +// Operating System allocation. +// +// NV_ERR_OUT_OF_RANGE: +// The VA range exceeds the largest virtual address supported by the +// destination processor. +// +// NV_ERR_INVALID_DEVICE: +// destinationUuid does not represent a valid processor such as a CPU or +// a GPU with a GPU VA space registered for it. Or destinationUuid is a +// non-fault-capable GPU, and that GPU is present in the accessed-by +// list of the VA range but that GPU is not the preferred location. +// +// NV_ERR_NO_MEMORY: +// There was insufficient memory to allocate physical pages or page +// tables to complete the migration. Or internal memory allocation +// failed. +// +// NV_ERR_NOT_SUPPORTED: +// The UVM file descriptor is associated with another process and the +// input virtual range corresponds to system-allocated pageable memory +// that cannot be migrated from this process. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +// NV_WARN_MORE_PROCESSING_REQUIRED: +// Fewer than the number of requested pages were migrated because some +// pages were associated with a non-migratable range group. +// +//------------------------------------------------------------------------------ +#if UVM_API_REV_IS_AT_MOST(5) +NV_STATUS UvmMigrate(void *base, + NvLength length, + const NvProcessorUuid *destinationUuid); +#else +NV_STATUS UvmMigrate(void *base, + NvLength length, + const NvProcessorUuid *destinationUuid, + NvU32 preferredCpuMemoryNode); +#endif + +//------------------------------------------------------------------------------ +// UvmMigrateAsync +// +// Migrates the backing of a given virtual address range to the specified +// destination processor. The behavior of this API is exactly the same as that +// of UvmMigrate except for the differences outlined below. +// +// When this call returns NV_OK, the migration operation is considered to be +// in-flight and can be synchronized upon by waiting for the specified payload +// to be written at the given semaphore address. The semaphore address must be +// 4-byte aligned and must fall within a VA range allocated using +// UvmAllocSemaphorePool. It is up to the caller to ensure that the payload has +// been written before reusing the address in a subsequent UvmMigrateAsync call. +// Specifying a semaphore address is optional. If the semaphore address is NULL +// the payload must be zero. +// +// The API makes no guarantees about how many pages will be migrated, and there +// is no provision to detect errors that occur during the in-flight operations. +// However, the API does guarantee that the semaphore will eventually be +// released regardless of errors during in-flight operations, as long as the API +// call itself returned NV_OK. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// destinationUuid: (INPUT) +// UUID of the destination processor to migrate pages to. +// +// preferredCpuMemoryNode: (INPUT) +// Preferred CPU NUMA memory node used if the destination processor is +// the CPU. This argument is ignored if the given virtual address range +// corresponds to managed memory. +// +// semaphoreAddress: (INPUT) +// Base address of the semaphore. +// +// semaphorePayload: (INPUT) +// Payload to be written at semaphoreAddress when the operation +// completes. Must be zero if semaphoreAddress is NULL. +// +// Error codes: +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned, or the range does not +// represent a migratable allocation created via UvmMemMap, or the +// range is pageable memory and the system does not support accessing +// pageable memory, or the range does not represent a supported +// Operating System allocation, or the semaphoreAddress isn't properly +// aligned, or isn't suballocated from a semaphore pool. +// +// NV_ERR_OUT_OF_RANGE: +// The VA range exceeds the largest virtual address supported by the +// destination processor. +// +// NV_ERR_INVALID_DEVICE: +// destinationUuid does not represent a valid processor such as a CPU or +// a GPU with a GPU VA space registered for it. Or destinationUuid is a +// non-fault-capable GPU, and that GPU is present in the accessed-by +// list of the VA range but that GPU is not the preferred location. +// +// NV_ERR_INVALID_ARGUMENT: +// semaphoreAddress is NULL and semaphorePayload is not zero. +// +// NV_ERR_NO_MEMORY: +// There was insufficient memory to allocate physical pages or page +// tables to complete the migration. Or internal memory allocation +// failed. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +// NV_WARN_MORE_PROCESSING_REQUIRED: +// Fewer than the number of requested pages were migrated because some +// pages were associated with a non-migratable range group. +// +//------------------------------------------------------------------------------ +#if UVM_API_REV_IS_AT_MOST(5) +NV_STATUS UvmMigrateAsync(void *base, + NvLength length, + const NvProcessorUuid *destinationUuid, + void *semaphoreAddress, + NvU32 semaphorePayload); +#else +NV_STATUS UvmMigrateAsync(void *base, + NvLength length, + const NvProcessorUuid *destinationUuid, + NvU32 preferredCpuMemoryNode, + void *semaphoreAddress, + NvU32 semaphorePayload); +#endif + +//------------------------------------------------------------------------------ +// UvmMigrateRangeGroup +// +// Migrates the backing of all virtual address ranges associated with the given +// range group to the specified destination processor. The behavior of this API +// is equivalent to calling UvmMigrate on each VA range associated with this +// range group. The value for the preferredCpuMemoryNode is irrelevant in this +// case as it only applies to migrations of pageable address, which cannot be +// used to create range groups. +// +// Any errors encountered during migration are returned immediately. No attempt +// is made to migrate the remaining unmigrated ranges and the ranges that are +// already migrated are not rolled back to their previous location. +// +// The range group id specified must have been allocated via +// UvmCreateRangeGroup. +// +// Arguments: +// rangeGroupId: (INPUT) +// Id of the range group whose associated VA ranges have to be migrated. +// +// destinationUuid: (INPUT) +// UUID of the destination processor to migrate pages to. +// +// Error codes: +// NV_ERR_OBJECT_NOT_FOUND: +// Either UVM_RANGE_GROUP_ID_NONE was specified or the rangeGroupId was +// not found. +// +// NV_ERR_INVALID_DEVICE: +// destinationUuid does not represent a valid processor such as a CPU or +// a GPU with a GPU VA space registered for it. +// +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_OUT_OF_RANGE: +// One or more of the VA ranges exceeds the largest virtual address +// supported by the destination processor. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +// NV_WARN_MORE_PROCESSING_REQUIRED: +// Fewer than requested pages were migrated because for example, the +// range group was non-migratable. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmMigrateRangeGroup(NvU64 rangeGroupId, + const NvProcessorUuid *destinationUuid); + +//------------------------------------------------------------------------------ +// UvmPopulatePageable +// +// Forces the population of the given virtual address range. Memory will be +// populated by the system according to the memory policy defined by the calling +// process and address range. +// +// This function only supports pageable memory. None of the pages within the +// virtual address range specified by (base, length) may belong to a virtual +// address range allocated or registered using any of the UVM +// allocation/mapping APIs. Also, all pages must be mapped with at least read +// permissions. +// +// If fewer than the number of requested pages were populated, NV_ERR_NO_MEMORY +// is returned. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned, the range does not +// represent a supported Operating System allocation, or the range +// contains pages not mapped with at least read permissions. +// +// NV_ERR_NO_MEMORY: +// Fewer than the number of requested pages were populated, likely +// because the system ran out of memory. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmPopulatePageable(void *base, + NvLength length); + +//------------------------------------------------------------------------------ +// UvmMemMap +// +// Creates a new mapping in the virtual address space of the process that is +// valid for access from any fault-capable CPU or GPU. +// +// The virtual address range specified by (base, length) must have been +// previously reserved via a call to UvmReserveVa. Both base and length must be +// aligned to the smallest page size supported by the CPU. Note that using a +// larger alignment for base and length, such as the largest GPU page size, may +// result in higher performance. +// +// The pages in the VA range are zero initialized. They are typically populated +// on demand, for example, through CPU or GPU faults. +// +// The VA range can be unmapped and freed via a call to UvmFree. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// Errors: +// NV_ERR_UVM_ADDRESS_IN_USE: +// The requested address range overlaps with an existing allocation. +// +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned or the range was not +// previously reserved via UvmReserveVa. +// +// NV_ERR_NOT_SUPPORTED: +// The current process is not the one which called UvmInitialize, and +// UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE was not specified to +// UvmInitialize. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmMemMap(void *base, + NvLength length); + +//------------------------------------------------------------------------------ +// UvmCreateExternalRange +// +// Create a VA range within the process's address space reserved for external +// allocations. The VA range is not mapped to any physical allocation at the +// time of creation. Once an external VA range has been created using this API, +// the user is free to map any number of physical allocations within the VA +// range (see UvmMapExternalAllocation and UvmMapExternalSparse for more +// details). +// +// The virtual address range, itself, does not impose any restrictions on the +// alignment of the physical allocations mapped within it. However, both base +// and length must be aligned to 4K. +// +// The VA range must not overlap with an existing VA range, irrespective of +// whether the existing range corresponds to a UVM allocation or an external +// allocation. +// +// It is allowed (but not required) for the VA range to come from a region +// previously reserved via UvmReserveVa. +// +// Any mappings created within this VA range are considered non-migratable. +// Consequently, pages cannot be associated with range groups, cannot be +// the target for read duplication, cannot have a preferred location set, +// cannot have any accessed-by processors, and any GPU faults within this range +// are fatal. +// +// Mappings within this range neither create nor modify any CPU mappings, even +// if the mappings came from a region previously reserved via UvmReserveVa. +// This implies that CPU accesses to any mappings within this range will cause +// a fatal fault if it's not mapped. +// +// The VA range is not reclaimed until UvmFree is called on it even if it is +// fully unmapped from all GPUs either explicitly via UvmUnmapExternal or +// implicitly via APIs such as UvmUnregisterGpu, UvmUnregisterGpuVaSpace, +// UvmDisablePeerAccess, etc. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// base is NULL or length is zero or at least one of base and length is +// not aligned to 4K. +// +// NV_ERR_UVM_ADDRESS_IN_USE: +// The requested address range overlaps with an existing allocation. +// +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmCreateExternalRange(void *base, + NvLength length); + +//------------------------------------------------------------------------------ +// UvmMapExternalAllocation +// +// Maps an allocation that was allocated outside of UVM on the specified list of +// GPUs. The external allocation can be unmapped from a specific GPU using +// UvmUnmapExternal or from all GPUs using UvmFree. +// +// The virtual address range specified by (base, length) must be aligned to the +// allocation's physical page size and must fall within a VA range previously +// created with UvmCreateExternalRange. A GPU VA space must have been registered +// for each GPU in the list. The offset in the physical allocation at which the +// allocation must be mapped should also be aligned to the allocation's physical +// page size. The (base, length) range must lie within the largest possible +// virtual address supported by the specified GPUs. +// +// If the range specified by (base, length) falls within any existing mappings, +// the behavior is the same as if UvmUnmapExternal with the range specified by +// (base, length) had been called first. +// +// If the allocation resides in GPU memory, that GPU must have been registered +// via UvmRegisterGpu. If the allocation resides in GPU memory and a mapping is +// requested for a different GPU, then P2P support should have been enabled via +// UvmEnablePeerAccess between the two GPUs if connected by PCIe. +// +// The allocation can be mapped with different access permissions and +// cacheability settings on different GPUs. The settings to use for each GPU are +// specified in the perGpuAttribs array. It is also legal to map the allocation +// multiple times on the same GPU with different access permissions and +// cacheability settings as long as all of the mappings are fully contained +// within the VA range. Calling this API with the same GPU appearing multiple +// times in the list is equivalent to calling the API multiple times on the same +// GPU. +// +// Access permissions control which of 3 types of accesses (reads, writes and +// atomics) are allowed for this VA range. Any GPU accesses of a disallowed kind +// result in a fatal fault. If UvmGpuMappingTypeDefault is specified, the UVM +// driver chooses the appropriate access permissions. On non-fault-capable GPUs, +// specifying either UvmGpuMappingTypeReadOnly or UvmGpuMappingTypeReadWrite is +// disallowed. +// +// Caching can be forced on or off, or can be left to the UVM driver to manage +// by specifying UvmGpuCachingTypeDefault. Specifying UvmGpuCachingTypeDefault +// will result in a cached mapping only if the allocation is physically located +// in that GPU's memory. Note that caching here only refers to GPU L2 caching +// and not GPU L1 caching as the latter is controlled via instruction opcode +// modifiers and not through page table attributes. +// +// Format and element bits can be forced, or can be left to the UVM driver to +// manage by specifying UvmGpuFormatTypeDefault and +// UvmGpuFormatElementBitsDefault respectively. UvmGpuFormatTypeDefault and +// UvmGpuFormatElementBitsDefault are mutually inclusive, meaning that if one +// of them is specified then the other one must be specified too. +// +// Compression type of the specified virtual address range can be specified with +// UvmGpuCompressionType mapping attribute. +// +// The UVM driver retains a reference on the external allocation as long as at +// least one GPU has any portion of that allocation mapped. +// +// The pages in this mapping are not zero initialized or modified in any way. +// +// Note that calling UvmUnregisterGpuVaSpace will also unmap all mappings +// created via this API on the GPU that the GPU VA space is associated with. +// Also, if a mapping has to be created on a GPU for a physical allocation that +// resides on a PCIe peer GPU, then peer-to-peer support must have been enabled +// between those two GPUs via UvmEnablePeerAccess. Disabling peer-to-peer +// support via UvmDisablePeerAccess will tear down all peer mappings between the +// two GPUs. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// offset: (INPUT) +// Offset, in bytes, in the physical allocation at which the VA range +// must be mapped. +// +// perGpuAttribs: (INPUT) +// List of per GPU mapping and caching attributes. GPUs not in the list +// are not affected. +// +// gpuAttribsCount: (INPUT) +// Number of entries in the perGpuAttribs array. +// +// platformParams: (INPUT) +// Platform specific parameters that identify the allocation. +// On Linux: RM ctrl fd, hClient and hMemory. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// One of the following occurred: +// - base is NULL. +// - length is zero. +// - The requested address range does not fall entirely within an +// existing external VA range created with a single call to +// UvmCreateExternalRange. +// - At least one of base and length is not aligned to the allocation's +// physical page size. +// - base or base + length fall within an existing mapping but are not +// aligned to that mapping's page size. +// +// NV_ERR_OUT_OF_RANGE: +// The range specified by (base, length) exceeds the largest virtual +// address supported by one or more of the specified GPUs. +// +// NV_ERR_INVALID_OFFSET: +// offset is not aligned to the allocation's physical page size or +// offset+length exceeds the allocation size. +// +// NV_ERR_INVALID_DEVICE: +// One of the following occurred: +// - The allocation resides in GPU memory whose UUID was not registered. +// - One or more of the UUIDs in the perGpuAttribs list was either not +// registered or has no GPU VA space registered for it. +// - The allocation resides in GPU memory and a mapping was requested +// for a different GPU and P2P support was not enabled between them. +// - The UUID of the CPU was specified in the perGpuAttribs list. +// - UvmGpuCompressionTypeEnabledNoPlc compression type was used on one +// or more GPUs that don't support PLC. +// +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_INVALID_ARGUMENT: +// One of the following occurred: +// - perGpuAttribs is NULL. +// - gpuAttribsCount is zero. +// - an invalid mapping type was specified. +// - an invalid caching type was specified. +// - an invalid format/element bits combination was specified. +// - an invalid compression type was specified. +// - UvmGpuCompressionTypeEnabledNoPlc compression type was used with a +// non-compressible physical allocation. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmMapExternalAllocation(void *base, + NvLength length, + NvLength offset, + const UvmGpuMappingAttributes *perGpuAttribs, + NvLength gpuAttribsCount, + const UvmAllocationPlatformParams *platformParams); + +//------------------------------------------------------------------------------ +// UvmMapExternalSparse +// +// Create a Sparse mapping for the virtual address range specified by (base, +// length). The mapping does not have any physical backing, rather the PTEs use +// a special pattern. The virtual address range specified by (base, length) must +// be fully contained within a virtual address range previously created with +// UvmCreateExternalRange. +// +// Virtual address ranges with Sparse mappings will not generate any faults when +// accessed. Instead, writes will be discarded and reads will return 0. +// +// Sparse mappings are supported only on fault-capable GPUs and only for 64K +// pages, so the virtual address range specified by (base, length) must be +// aligned to 64K. +// +// If the range specified by (base, length) falls within any existing mappings, +// the behavior is the same as if UvmUnmapExternal with the range specified by +// (base, length) had been called first. +// +// Note that calling UvmUnregisterGpuVaSpace will also unmap all mappings +// created via this API on the GPU that the GPU VA space is associated with. +// Notably the mappings won't be recreated when the GPU VA space is +// re-registered. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. The address must be +// aligned on a 64K boundary. +// +// length: (INPUT) +// Length, in bytes, of the range. The length must be 64K aligned. +// +// +// gpuUuid: (INPUT) +// UUID of the GPU to map the sparse region on. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// One of the following occurred: +// - base is NULL. +// - length is zero. +// - The requested address range does not fall entirely within an +// existing external VA range created with a single call to +// UvmCreateExternalRange. +// - At least one of base and length is not aligned to a 64K +// boundary. +// +// NV_ERR_OUT_OF_RANGE: +// The range specified by (base, length) exceeds the largest virtual +// address supported by the specified GPU. +// +// NV_ERR_INVALID_DEVICE: +// One of the following occurred: +// - The specified GPU was not registered. +// - The GPU specified has no VA space registered for it. +// - The UUID of the CPU was specified. +// - Sparse mappings are not supported on the specified GPU. +// +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +//------------------------------------------------------------------------------ +NV_STATUS UvmMapExternalSparse(void *base, + NvLength length, + const NvProcessorUuid *gpuUuid); + +//------------------------------------------------------------------------------ +// UvmUnmapExternal +// +// Unmaps a virtual address range that was mapped using UvmMapExternalAllocation +// or UvmMapExternalSparse from the specified GPU. The range specified by (base, +// length) must be fully contained within a single External VA range created +// with UvmCreateExternalRange. +// +// If the range specified by (base, length) range partially overlaps existing +// mappings, the overlapping portion of the existing mappings will be unmapped +// provided that the split points are aligned to the mappings' respective page +// sizes. Otherwise, the overlapping portions of the existing mappings will be +// left in an undefined state. +// +// Note that the VA range is not reclaimed until UvmFree is called on it even if +// all mappings in the created range have been unmapped from all GPUs via this +// API. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// The length of the virtual address range. +// +// gpuUuid: (INPUT) +// UUID of the GPU to unmap the VA range from. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// One of the following has occurred: +// - base is NULL. +// - The requested address range does not fall entirely within an +// existing external VA range created with a single call to +// UvmCreateExternalRange. +// - base or base + length fall within an existing mapping but are not +// aligned to that mapping's page size. +// +// NV_ERR_INVALID_DEVICE: +// Either gpuUuid does not represent a valid registered GPU or the VA +// range corresponding to the given base address is not mapped on the +// specified GPU. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmUnmapExternal(void *base, + NvLength length, + const NvProcessorUuid *gpuUuid); + +// TODO: Bug 2732305: Remove this declaration when the new external APIs have +// been implemented. +NV_STATUS UvmUnmapExternalAllocation(void *base, + const NvProcessorUuid *gpuUuid); + +//------------------------------------------------------------------------------ +// UvmMapDynamicParallelismRegion +// +// Creates a special mapping required for dynamic parallelism. The mapping +// doesn't have any physical backing, it's just a PTE with a special kind. +// +// The virtual address range specified by (base, length) must cover exactly one +// GPU page, so length must be a page size supported by the GPU and base must be +// aligned to that page size. The VA range must not overlap with an existing +// mapping for the GPU. A GPU VA space must have been registered for the GPU and +// the GPU must support dynamic parallelism. +// +// The mapping is created immediately and not modified until a call to UvmFree +// Calling UvmFree frees the GPU page table mapping. The range cannot be +// associated with range groups and any GPU faults within this range are fatal. +// Also, the pages cannot be the target for read duplication, cannot have a +// preferred location set, and cannot have any accessed-by processors. +// +// Note that calling UvmUnregisterGpuVaSpace will also unmap all mappings +// created via this API on the GPU that the GPU VA space is associated with. +// Notably the mappings won't be recreated when the GPU VA space is +// re-registered, but the range should still be destroyed with UvmFree. +// +// This call neither creates nor modifies any CPU mappings, even if the VA range +// came from a region previously reserved via UvmReserveVa. This implies that +// CPU accesses to this range will cause a fatal fault if it's not mapped. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. Must be equal to a page size +// supported by the GPU. +// +// gpuUuid: (INPUT) +// UUID of the GPU to map the dynamic parallelism region on. +// +// Errors: +// NV_ERR_UVM_ADDRESS_IN_USE: +// The requested address range overlaps with an existing allocation. +// +// NV_ERR_INVALID_ADDRESS: +// base is NULL or not aligned to length or length is not a page size +// supported by the GPU. +// +// NV_ERR_OUT_OF_RANGE: +// The VA range exceeds the largest virtual address supported by one or +// more of the specified GPUs. +// +// NV_ERR_INVALID_DEVICE: +// The gpuUuid was either not registered, has no GPU VA space +// registered for it, or the GPU doesn't support dynamic parallelism. +// +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmMapDynamicParallelismRegion(void *base, + NvLength length, + const NvProcessorUuid *gpuUuid); + +//------------------------------------------------------------------------------ +// UvmEnableReadDuplication +// +// Enables read duplication on the specified virtual address range, overriding +// the UVM driver's default migration and mapping policy on read faults. +// +// The virtual address range specified by (base, length) must have been +// allocated via a call to either UvmAlloc or UvmMemMap, or be supported +// system-allocated pageable memory. If the input virtual range corresponds to +// system-allocated pageable memory and UvmIsPageableMemoryAccessSupported +// reports that pageable memory access is supported, the behavior described +// below does not take effect, and read duplication will not be enabled for +// the input range. +// +// Both base and length must be aligned to the smallest page size supported by +// the CPU. +// +// On a read fault from a processor on a page in this range, any existing +// mapping to that page from all other processors will be made read-only. If the +// page does not reside in the faulting processor's memory, a duplicate copy of +// the page will be created there. The copy of the page in the faulting +// processor's memory will then be mapped as read-only on that processor. Note +// that a write to this page from any processor will collapse the duplicated +// copies. +// +// If UvmMigrate, UvmMigrateAsync or UvmMigrateRangeGroup is called on any pages +// in this VA range, then those pages will also be read duplicated on the +// destination processor for the migration. +// +// Enabling read duplication on a VA range requires the CPU and all GPUs with +// registered VA spaces to be fault-capable. Otherwise, the migration and +// mapping policies outlined above are not applied until all the +// non-fault-capable GPUs are unregistered via UvmUnregisterGpu. If a +// non-fault-capable GPU is registered after a page has already been +// read-duplicated, then the copies of that page will be collapsed into a single +// page. +// +// If UvmPreventMigrationRangeGroups has been called on the range group that +// this VA range is associated with, then the migration and mapping policies +// outlined above don't take effect until UvmAllowMigrationRangeGroups is called +// for that range group. +// +// If any page in the VA range has a preferred location, then the migration and +// mapping policies associated with this API take precedence over those related +// to the preferred location. +// +// If any pages in this VA range have any processors present in their +// accessed-by list, the migration and mapping policies associated with this +// API override those associated with the accessed-by list. +// +// Multiple calls to this API for the same VA range and the same processor are +// not refcounted, i.e. calling this API on a VA range after it has already been +// called for that same VA range results in a no-op. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned, or the range does not +// represent a valid UVM allocation, or the range is pageable memory and +// the system does not support accessing pageable memory, or the range +// does not represent a supported Operating System allocation. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEnableReadDuplication(void *base, + NvLength length); + +//------------------------------------------------------------------------------ +// UvmDisableReadDuplication +// +// Disables read duplication on the specified virtual address range, and reverts +// the associated policies. This also disables any default read duplication +// heuristics employed by the kernel driver. +// +// The virtual address range specified by (base, length) must have been +// allocated via a call to either UvmAlloc or UvmMemMap, or be supported +// system-allocated pageable memory. If the input virtual range corresponds to +// system-allocated pageable memory and UvmIsPageableMemoryAccessSupported +// reports that pageable memory access is supported, the behavior described +// below does not take effect, and read duplication will not be enabled for +// the input range. +// +// Both base and length must be aligned to the smallest page size supported by +// the CPU. +// +// Any pages in the VA range that are currently read duplicated will be +// collapsed into a single copy. The location for the collapsed copy will be the +// preferred location if the page has a preferred location and was resident at +// that location when this API was called. Otherwise, the location will be +// chosen arbitrarily. +// +// It is ok to call this API only on a subset of the VA range on which +// UvmEnableReadDuplication was called or for a VA range on which +// UvmEnableReadDuplication was never called. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned, or the range does not +// represent a valid UVM allocation, or the range is pageable memory and +// the system does not support accessing pageable memory, or the range +// does not represent a supported Operating System allocation. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//----------------------------------------------------------------------------- +NV_STATUS UvmDisableReadDuplication(void *base, + NvLength length); + +//------------------------------------------------------------------------------ +// UvmSetPreferredLocation +// +// Sets the preferred location for the given virtual address range to be the +// specified processor's memory. +// +// Both base and length must be aligned to the smallest page size supported by +// the CPU. The VA range must lie within the largest possible virtual address +// supported by the specified processor. +// +// The virtual address range specified by (base, length) must have been +// allocated via a call to either UvmAlloc or UvmMemMap, or be supported +// system-allocated pageable memory. If the input range is pageable memory and +// at least one GPU in the system supports transparent access to pageable +// memory, the behavior described below does not take effect and the preferred +// location of the pages in the given range does not change. +// +// If any pages in the VA range are associated with a range group that was made +// non-migratable via UvmPreventMigrationRangeGroups, then those pages are +// migrated immediately to the specified preferred location and mapped according +// to policies specified in UvmPreventMigrationRangeGroups. Otherwise, this API +// neither migrates pages nor does it populate unpopulated pages. Note that if +// the specified preferred location is a fault-capable GPU and at least one page +// in the VA range is associated with a non-migratable range group, then an +// error is returned. Additionally, if the specified preferred location is a +// non-fault capable GPU and at least one page in the VA range is associated +// with a non-migratable range group, an error is returned if another +// non-fault-capable GPU is present in the accessed-by list of that page but P2P +// support has not been enabled between both GPUs. +// +// When a page is in its preferred location, a fault from another processor will +// not cause a migration if a mapping for that page from that processor can be +// established without migrating the page. +// +// When a page migrates away from its preferred location, the mapping on the +// preferred location's processor is cleared so that the next access from that +// processor will cause a fault and migrate the page back to its preferred +// location. In other words, a page is mapped on the preferred location's +// processor only if the page is in its preferred location. Thus, when the +// preferred location changes, mappings to pages in the given range are removed +// from the new preferred location if the pages are resident in a different +// processor. Note that if the preferred location's processor is a GPU, then a +// mapping from that GPU to a page in the VA range is only created if a GPU VA +// space has been registered for that GPU and the page is in its preferred +// location. +// +// If read duplication has been enabled for any pages in this VA range and +// UvmPreventMigrationRangeGroups has not been called on the range group that +// those pages are associated with, then the migration and mapping policies +// associated with UvmEnableReadDuplication override the policies outlined +// above. Note that enabling read duplication on on any pages in this VA range +// does not clear the state set by this API for those pages. It merely overrides +// the policies associated with this state until read duplication is disabled +// for those pages. +// +// If the preferred location processor is present in the accessed-by list of any +// of the pages in this VA range, then the migration and mapping policies +// associated with associated with the accessed-by list. +// +// The state set by this API can be cleared either by calling +// UvmUnsetPreferredLocation for the same VA range or by calling +// UvmUnregisterGpu on this processor if the processor is a GPU. Note that +// calling UvmUnregisterGpuVaSpace will not clear the state set by this API. +// Multiple calls to this API for the same VA range and the same processor are +// not refcounted, i.e. calling this API on a VA range and processor after it +// has already been called for that same VA range and processor results in a +// no-op. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// preferredLocationUuid: (INPUT) +// UUID of the preferred location. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned, or the range does not +// represent a valid UVM allocation, or the range is pageable memory and +// the system does not support accessing pageable memory, or the range +// does not represent a supported Operating System allocation. +// +// NV_ERR_OUT_OF_RANGE: +// The VA range exceeds the largest virtual address supported by the +// specified processor. +// +// NV_ERR_INVALID_DEVICE: +// preferredLocationUuid is neither the UUID of the CPU nor the UUID of +// a GPU that was registered by this process. Or at least one page in +// VA range belongs to a non-migratable range group and the specified +// UUID represents a fault-capable GPU. Or preferredLocationUuid is the +// UUID of a non-fault-capable GPU and at least one page in the VA range +// belongs to a non-migratable range group and another non-fault-capable +// GPU is in the accessed-by list of the same page but P2P support +// between both GPUs has not been enabled. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmSetPreferredLocation(void *base, + NvLength length, + const NvProcessorUuid *preferredLocationUuid); + +//------------------------------------------------------------------------------ +// UvmUnsetPreferredLocation +// +// Unsets the preferred location associated with all pages in the specified +// virtual address range, reverting the migration and mapping policies outlined +// in UvmSetPreferredLocation. +// +// Both base and length must be aligned to the smallest page size supported by +// the CPU. +// +// The virtual address range specified by (base, length) must have been +// allocated via a call to either UvmAlloc or UvmMemMap, or be supported +// system-allocated pageable memory. If the input range is pageable memory and +// at least one GPU in the system supports transparent access to pageable +// memory, the behavior described below does not take effect and the preferred +// location of the pages in the given range does not change. +// +// If the VA range is associated with a non-migratable range group, then that +// association is cleared. i.e. the pages in this VA range have their range +// group association changed to UVM_RANGE_GROUP_ID_NONE. +// +// It is ok to call this API only on a subset of the VA range on which +// UvmSetPreferredLocation was called or for a VA range on which +// UvmSetPreferredLocation was never called. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned or the range does not +// represent a valid UVM allocation, or the range is pageable memory and +// the system does not support accessing pageable memory, or the range +// does not represent a supported Operating System allocation. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmUnsetPreferredLocation(void *base, + NvLength length); + +//------------------------------------------------------------------------------ +// UvmSetAccessedBy +// +// Indicates to the UVM driver that the pages in the given virtual address range +// should be mapped on the specified processor whenever establishing such a +// mapping is possible. The purpose of this API is to prevent faults from the +// specified processor to the given VA range as much as possible. +// +// Both base and length must be aligned to the smallest page size supported by +// the CPU. The VA range must lie within the largest possible virtual address +// supported by the specified processor. +// +// The virtual address range specified by (base, length) must have been +// allocated via a call to either UvmAlloc or UvmMemMap, or be supported +// system-allocated pageable memory. If the input range is pageable memory and +// at least one GPU in the system supports transparent access to pageable +// memory, the behavior described below does not take effect and the accessed-by +// processor list of the VA range does not change. +// +// If a page in the VA range is not populated or its current location doesn't +// permit a mapping to be established, then no mapping is created for that page. +// If a page in the VA range migrates to a new location, then the mapping is +// updated to point to the new location if establishing such a mapping is +// possible. If a page in the VA range is associated with a non-migratable range +// group and the specified processor is a non-fault-capable GPU, then an error +// is returned if the mapping cannot be established. +// +// If the specified processor is a GPU and no GPU VA space has been registered +// for it or if the registered GPU VA space gets unregistered, then the policies +// outlined above will take effect the next time a GPU VA space gets registered +// for this GPU. +// +// If read duplication is enabled in any pages in this VA range, then the page +// mapping policy associated with read duplication overrides the mapping policy +// associated with this API. +// +// Similarly, if any page in this VA range has a preferred location, and the +// UUID of the preferred location is the same as the UUID passed in to this API, +// then the mapping policy associated with having a preferred location overrides +// the mapping policy associated with this API. +// +// Note that enabling read duplication or setting a preferred location on any +// pages in this VA range does not clear the state set by this API for those +// pages. It merely overrides the policies associated with this state until read +// duplication is disabled on those pages or their preferred location is +// cleared. +// +// The state set by this API can be cleared either by calling UvmUnsetAccessedBy +// for the same VA range and processor or by calling UvmUnregisterGpu on this +// processor if the processor is a GPU. It is also cleared if the processor is a +// non-fault-capable GPU and the VA range has a preferred location set to a peer +// GPU and peer access is disabled via UvmDisablePeerAccess. Note however that +// calling UvmUnregisterGpuVaSpace will not clear the state set by this API. +// +// Multiple calls to this API for the same VA range and the same processor are +// not refcounted. i.e. calling this API on a VA range and processor after it +// has already been called for that same VA range and processor results in a +// no-op. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// accessedByUuid: (INPUT) +// UUID of the processor that should have pages in the the VA range +// mapped when possible. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned or the range does not +// represent a valid UVM allocation, or the range is pageable memory and +// the system does not support accessing pageable memory, or the range +// does not represent a supported Operating System allocation. +// +// NV_ERR_OUT_OF_RANGE: +// The VA range exceeds the largest virtual address supported by the +// specified processor. +// +// NV_ERR_INVALID_DEVICE: +// accessedByUuid is neither the UUID of the CPU nor the UUID of a GPU +// that was registered by this process. Or accessedByUuid is the UUID of +// a non-fault-capable GPU and the VA range is associated with a +// non-migratable range group with a preferred location set to another +// non-fault-capable GPU that doesn't have P2P support enabled with this +// GPU. +// +// NV_ERR_NO_MEMORY: +// accessedByUuid is a non-fault-capable GPU and there was insufficient +// memory to create the mapping. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmSetAccessedBy(void *base, + NvLength length, + const NvProcessorUuid *accessedByUuid); + +//------------------------------------------------------------------------------ +// UvmUnsetAccessedBy +// +// Undoes the effect of UvmSetAccessedBy for the given virtual address range on +// the specified processor, thereby reverting the mapping policies imposed by +// UvmSetAccessedBy. +// +// Both base and length must be aligned to the smallest page size supported by +// the CPU +// +// The virtual address range specified by (base, length) must have been +// allocated via a call to either UvmAlloc or UvmMemMap, or be supported +// system-allocated pageable memory. If the input range is pageable memory and +// at least one GPU in the system supports transparent access to pageable +// memory, the behavior described below does not take effect and the accessed-by +// processor list of the VA range does not change. + +// +// Existing mappings to this VA range from the given processor are not affected. +// If any page in the VA range migrates to a different location however, the +// mapping may be cleared or updated based on other mapping policies that are in +// effect. +// +// It is ok to call this API for a subset of a VA range with a accessed-by list +// containing this processor, or for a VA range with an empty accessed-by list. +// +// Arguments: +// base: (INPUT) +// Base address of the virtual address range. +// +// length: (INPUT) +// Length, in bytes, of the range. +// +// accessedByUuid: (INPUT) +// UUID of the processor from which any policies set by +// UvmSetAccessedBy should be revoked for the given VA range. +// +// Errors: +// NV_ERR_INVALID_ADDRESS: +// base and length are not properly aligned or the range does not +// represent a valid UVM allocation, or the range is pageable memory and +// the system does not support accessing pageable memory, or the range +// does not represent a supported Operating System allocation. +// +// NV_ERR_INVALID_DEVICE: +// accessedByUuid is neither the UUID of the CPU nor the UUID of a GPU +// that was registered by this process. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmUnsetAccessedBy(void *base, + NvLength length, + const NvProcessorUuid *accessedByUuid); + +//------------------------------------------------------------------------------ +// UvmEnableSystemWideAtomics +// +// Enables software-assisted system-wide atomics support on the specified GPU. +// Any system-wide atomic operation issued from this GPU is now guaranteed to be +// atomic with respect to all accesses from other processors that also support +// system-wide atomics regardless of whether that support is enabled on those +// other processors or not. +// +// The class of atomic operations from the GPU that are considered system-wide +// is GPU architecture dependent. All atomic operations from the CPU are always +// considered to be system-wide and support for system-wide atomics on the CPU +// is always considered to be enabled. +// +// System-wide atomics which cannot be natively supported in hardware are +// emulated using virtual mappings and page faults. For example, assume a +// virtual address which is resident in CPU memory and has CPU memory as its +// preferred location. A GPU with system-wide atomics enabled but without native +// atomics support to CPU memory will not have atomics enabled in its virtual +// mapping of the page that contains that address. If that GPU performs an +// atomic operation, the access will fault, all other processors' mappings to +// that page will have their write permissions revoked, the faulting GPU will be +// granted atomic permissions in its virtual mapping, and the faulting GPU will +// retry its access. Further atomic accesses from that GPU will not cause page +// faults until another processor attempts a write access to the same page. +// +// Multiple calls to this API for the same GPU are not refcounted, i.e. calling +// this API for a GPU for which software-assisted system-wide atomics support +// has already been enabled results in a no-op. +// +// The GPU must have been registered using UvmRegisterGpu prior to making this +// call. By default, software-assisted system-wide atomics support is enabled +// when a GPU is registered. +// +// Arguments: +// gpuUuid: (INPUT) +// UUID of the GPU to enable software-assisted system-wide atomics on. +// +// Error codes: +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +// NV_ERR_INVALID_DEVICE: +// The GPU referred to by gpuUuid was not registered. +// +// NV_ERR_NOT_SUPPORTED: +// The GPU does not support system-wide atomic operations, or the GPU +// has hardware support for scoped atomic operations. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEnableSystemWideAtomics(const NvProcessorUuid *gpuUuid); + +//------------------------------------------------------------------------------ +// UvmDisableSystemWideAtomics +// +// Disables software-assisted system-wide atomics support on the specified GPU. +// Any atomic operation from this GPU is no longer guaranteed to be atomic with +// respect to accesses from other processors in the system, even if the +// operation has system-wide scope at the instruction level. +// +// The GPU must have been registered using UvmRegisterGpu prior to making this +// call. It is however ok to call this API for GPUs that do not have support for +// system-wide atomic operations enabled. If the GPU is unregistered via +// UvmUnregisterGpu and then registered again via UvmRegisterGpu, support for +// software-assisted system-wide atomics will be enabled. +// +// Arguments: +// gpuUuid: (INPUT) +// UUID of the GPU to disable software-assisted system-wide atomics on. +// +// Error codes: +// NV_ERR_INVALID_DEVICE: +// The GPU referred to by gpuUuid was not registered. +// +// NV_ERR_NOT_SUPPORTED: +// The GPU does not support system-wide atomic operations, or the GPU +// has hardware support for scoped atomic operations. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmDisableSystemWideAtomics(const NvProcessorUuid *gpuUuid); + +//------------------------------------------------------------------------------ +// UvmGetFileDescriptor +// +// Returns the UVM file descriptor currently being used to call into the UVM +// kernel mode driver. The data type of the returned file descriptor is platform +// specific. +// +// If UvmInitialize has not yet been called, an error is returned. If +// UvmInitialize was called with UVM_AUTO_FD, then the file created during +// UvmInitialize is returned. If UvmInitialize was called with an existing UVM +// file descriptor, then that file descriptor is returned. +// +// Arguments: +// returnedFd: (OUTPUT) +// A platform specific file descriptor. +// +// Error codes: +// NV_ERR_INVALID_ARGUMENT: +// returnedFd is NULL. +// +// NV_ERR_INVALID_STATE: +// UVM was not initialized before calling this function. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmGetFileDescriptor(UvmFileDescriptor *returnedFd); + +//------------------------------------------------------------------------------ +// UvmIs8Supported +// +// Returns whether the kernel driver has been loaded in UVM 8 mode or not. +// +// Argument: +// is8Supported: (OUTPUT) +// Will be set to true (nonzero) if the driver was loaded as UVM 8, or +// false (zero) if it was loaded as UVM Lite. +// +// Error codes: +// NV_ERR_INVALID_ARGUMENT: +// is8Supported is NULL. +// +// NV_ERR_GENERIC: +// Unexpected error. We try hard to avoid returning this error code, +// because it is not very informative. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmIs8Supported(NvU32 *is8Supported); + +//------------------------------------------------------------------------------ +// Tools API +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ +// UvmDebugGetVersion +// +// Returns the version number of the UVM debug library +// See uvm_types.h for valid verion numbers, e.g. UVM_DEBUG_V1 +// +//------------------------------------------------------------------------------ +unsigned UvmDebugVersion(void); + +//------------------------------------------------------------------------------ +// UvmDebugCreateSession +// +// Creates a handle for a debugging session. +// +// When the client initializes, it will pass in a process handle and get a +// session ID for itself. Subsequent calls to the UVM API will take in that +// session ID. +// +// There are security requirements to this call. +// One of the following must be true: +// 1. The session owner must be running as an elevated user +// 2. The session owner and target must belong to the same user and the +// session owner is at least as privileged as the target. +// +// For CUDA 6.0 we can create at most 64 sessions per debugger process. +// +// Arguments: +// pid: (INPUT) +// Process id for which the debugging session will be created +// +// session: (OUTPUT) +// Handle to the debugging session associated to that pid. +// +// Error codes: +// NV_ERR_PID_NOT_FOUND: +// pid is invalid/ not associated with UVM. +// +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Function fails the security check. +// +// NV_ERR_INSUFFICIENT_RESOURCES: +// Attempt is made to allocate more than 64 sessions per process. +// +// NV_ERR_BUSY_RETRY: +// internal resources are blocked by other threads. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmDebugCreateSession(unsigned pid, + UvmDebugSession *session); + +//------------------------------------------------------------------------------ +// UvmDebugDestroySession +// +// Destroys a debugging session. +// +// Arguments: +// session: (INPUT) +// Handle to the debugging session associated to that pid. +// +// Error codes: +// NV_ERR_INVALID_ARGUMENT: +// session is invalid. +// +// NV_ERR_BUSY_RETRY: +// ebug session is in use by some other thread. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmDebugDestroySession(UvmDebugSession session); + +//------------------------------------------------------------------------------ +// UvmDebugCountersEnable +// +// Enables the counters following the user specified configuration. +// +// The user must fill a list with the configuration of the counters it needs to +// either enable or disable. It can only enable one counter per line. +// +// The structure (UvmCounterConfig) has several fields: +// - scope: Please see the UvmCounterScope enum (above), for details. +// - name: Name of the counter. Please check UvmCounterName for list. +// - gpuid: Identifies the GPU for which the counter will be enabled/disabled +// This parameter is ignored in AllGpu scopes. +// - state: A value of 0 will disable the counter, a value of 1 will enable +// the counter. +// +// Note: All counters are refcounted, that means that a counter will only be +// disable when its refcount reached zero. +// +// Arguments: +// session: (INPUT) +// Handle to the debugging session. +// +// config: (INPUT) +// pointer to configuration list as per above. +// +// count: (INPUT) +// number of entries in the config list. +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Function fails the security check +// +// RM_INVALID_ARGUMENT: +// debugging session is invalid or one of the counter lines is invalid. +// If call returns this value, no action specified by the config list +// will have taken effect. +// +// NV_ERR_NOT_SUPPORTED: +// UvmCounterScopeGlobalSingleGpu is not supported for CUDA 6.0 +// +// NV_ERR_BUSY_RETRY: +// the debug session is in use by some other thread. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmDebugCountersEnable(UvmDebugSession session, + UvmCounterConfig *config, + unsigned count); + +//------------------------------------------------------------------------------ +// UvmDebugGetCounterHandle +// +// Returns handle to a particular counter. This is an opaque handle that the +// implementation uses in order to find your counter, later. This handle can be +// used in subsequent calls to UvmDebugGetCounterVal(). +// +// Arguments: +// session: (INPUT) +// Handle to the debugging session. +// +// scope: (INPUT) +// Scope that will be mapped. +// +// counterName: (INPUT) +// Name of the counter in that scope. +// +// gpu: (INPUT) +// Gpuid of the scoped GPU. This parameter is ignored in AllGpu scopes. +// +// pCounterHandle: (OUTPUT) +// Handle to the counter address. +// +// Error codes: +// NV_ERR_INVALID_ARGUMENT: +// Specified scope/gpu pair or session id is invalid +// +// NV_ERR_NOT_SUPPORTED: +// UvmCounterScopeGlobalSingleGpu is not supported for CUDA 6.0 +// +// NV_ERR_BUSY_RETRY: +// debug session is in use by some other thread. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmDebugGetCounterHandle(UvmDebugSession session, + UvmCounterScope scope, + UvmCounterName counterName, + NvProcessorUuid gpu, + NvUPtr *pCounterHandle); + +//------------------------------------------------------------------------------ +// UvmDebugGetCounterVal +// +// Returns the counter value specified by the counter name. +// +// Arguments: +// session: (INPUT) +// Handle to the debugging session. +// +// counterHandleArray: (INPUT) +// Array of counter handles +// +// handleCount: (INPUT) +// Number of handles in the pPCounterHandle array. +// +// counterValArray: (OUTPUT) +// Array of counter values corresponding to the handles. +// +// Error codes: +// NV_ERR_INVALID_ARGUMENT: +// one of the specified handles is invalid. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmDebugGetCounterVal(UvmDebugSession session, + NvUPtr *counterHandleArray, + unsigned handleCount, + unsigned long long *counterValArray); + +//------------------------------------------------------------------------------ +// UvmEventQueueCreate +// +// This call creates an event queue of the given size. +// No events are added in the queue till they are enabled by the user. +// Event queue data is visible to the user even after the target process dies +// if the session is active and queue is not freed. +// +// User doesn't need to serialize multiple UvmEventQueueCreate calls as +// each call creates a new queue state associated with the returned queue +// handle. +// +// Arguments: +// sessionHandle: (INPUT) +// Handle to the debugging session. +// +// queueHandle: (OUTPUT) +// Handle to created queue. +// +// queueSize: (INPUT) +// Size of the event queue buffer in units of UvmEventEntry's. +// This quantity must be > 1. +// +// notificationCount: (INPUT) +// Number of entries after which the user should be notified that +// there are events to fetch. +// User is notified when queueEntries >= notification count. +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Function fails the security check. +// +// NV_ERR_INVALID_ARGUMENT: +// One of the arguments is invalid. +// +// NV_ERR_INSUFFICIENT_RESOURCES: +// it's not possible to allocate a queue of requested size. +// +// NV_ERR_BUSY_RETRY: +// internal resources are blocked by other threads. +// +// NV_ERR_PID_NOT_FOUND: +// queue create call is made on a session after the target dies. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEventQueueCreate(UvmDebugSession sessionHandle, + UvmEventQueueHandle *queueHandle, + NvS64 queueSize, + NvU64 notificationCount, + UvmEventTimeStampType timeStampType); + +//------------------------------------------------------------------------------ +// UvmEventQueueDestroy +// +// This call frees all interal resources associated with the queue, including +// upinning of the memory associated with that queue. Freeing user buffer is +// responsibility of a caller. Event queue might be also destroyed as a side +// effect of destroying a session associated with this queue. +// +// User needs to ensure that a queue handle is not deleted while some other +// thread is using the same queue handle. +// +// Arguments: +// sessionHandle: (INPUT) +// Handle to the debugging session. +// +// queueHandle: (INPUT) +// Handle to the queue which is to be freed +// +// Error codes: +// RM_ERR_NOT_PERMITTED: +// Function fails the security check. +// +// NV_ERR_INVALID_ARGUMENT: +// One of the arguments is invalid. +// +// NV_ERR_BUSY_RETRY: +// internal resources are blocked by other threads. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEventQueueDestroy(UvmDebugSession sessionHandle, + UvmEventQueueHandle queueHandle); + +//------------------------------------------------------------------------------ +// UvmEventEnable +// +// This call enables a particular event type in the event queue. +// All events are disabled by default when a queue is created. +// +// This API does not access the queue state maintained in the user +// library so the user doesn't need to acquire a lock to protect the queue +// state. +// +// Arguments: +// sessionHandle: (INPUT) +// Handle to the debugging session. +// +// queueHandle: (INPUT) +// Handle to the queue where events are to be enabled +// +// eventTypeFlags: (INPUT) +// This field specifies the event types to be enabled. For example: +// To enable migration events and memory violations: pass flags +// "UVM_EVENT_ENABLE_MEMORY_VIOLATION |UVM_EVENT_ENABLE_MIGRATION" +// +// Error codes: +// RM_ERR_NOT_PERMITTED: +// Function fails the security check. +// +// NV_ERR_INVALID_ARGUMENT: +// One of the arguments is invalid. +// +// NV_ERR_PID_NOT_FOUND: +// this call is made after the target process dies +// +// NV_ERR_BUSY_RETRY: +// internal resources are blocked by other threads. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEventEnable(UvmDebugSession sessionHandle, + UvmEventQueueHandle queueHandle, + unsigned eventTypeFlags); + +//------------------------------------------------------------------------------ +// UvmEventDisable +// +// This call disables a particular event type in the queue. +// +// This API does not access the queue state maintained in the user +// library so the user doesn't need to acquire a lock to protect the queue +// state. +// +// Arguments: +// sessionHandle: (INPUT) +// Handle to the debugging session. +// +// queueHandle: (INPUT) +// Handle to the queue where events are to be enabled +// +// eventTypeFlags: (INPUT) +// This field specifies the event types to be enabled +// For example: To enable migration events and memory violations: +// pass "UVM_EVENT_ENABLE_MEMORY_VIOLATION |UVM_EVENT_ENABLE_MIGRATION" +// as flags +// +// Error codes: +// RM_ERR_NOT_PERMITTED: +// Function fails the security check. +// +// NV_ERR_INVALID_ARGUMENT: +// One of the arguments is invalid. +// +// NV_ERR_PID_NOT_FOUND: +// this call is made after the target process dies +// +// NV_ERR_BUSY_RETRY: +// internal resources are blocked by other threads. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEventDisable(UvmDebugSession sessionHandle, + UvmEventQueueHandle queueHandle, + unsigned eventTypeFlags); + +//------------------------------------------------------------------------------ +// UvmEventWaitOnQueueHandles +// +// User is notified when queueEntries >= notification count. +// This call does a blocking wait for this notification. It returns when +// at least one of the queue handles has events to be fetched or if it timeouts +// +// This API accesses constant data maintained in the queue state. Hence, +// the user doesn't need to acquire a lock to protect the queue state. +// +// Arguments: +// queueHandles: (INPUT) +// array of queue handles. +// +// arraySize: (INPUT) +// number of handles in array. +// +// timeout: (INPUT) +// timeout in msec +// +// pNotificationFlags: (OUTPUT) +// If a particular queue handle in the input array is notified then +// the respective bit flag is set in pNotificationFlags. +// +// Error codes: +// NV_ERR_INVALID_ARGUMENT: +// one of the queueHandles is invalid. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEventWaitOnQueueHandles(UvmEventQueueHandle *queueHandleArray, + unsigned arraySize, + NvU64 timeout, + unsigned *pNotificationFlags); + +//------------------------------------------------------------------------------ +// UvmEventGetNotificationHandles +// +// User is notified when queueEntries >= notification count. +// The user can directly get the queue notification handles rather than using +// a UVM API to wait on queue handles. This helps the user to wait on other +// objects (apart from queue notification) along with queue notification +// handles in the same thread. The user can safely use this call along with the +// library supported wait call UvmEventWaitOnQueueHandles. +// +// This API reads constant data maintained in the queue state. Hence, +// the user doesn't need to acquire a lock to protect the queue state. +// +// Arguments: +// queueHandles: (INPUT) +// array of queue handles. +// +// arraySize: (INPUT) +// number of handles in array. +// +// notificationHandles: (OUTPUT) +// Windows: Output of this call contains an array of 'windows event +// handles' corresponding to the queue handles passes as input. +// Linux: All queues belonging to the same process share the same +// file descriptor(fd) for notification. If the user chooses to use +// UvmEventGetNotificationHandles then he should check all queues +// for new events (by calling UvmEventFetch) when notified on +// the fd. +// +// Error codes: +// NV_ERR_INVALID_ARGUMENT: +// One of the arguments is invalid. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEventGetNotificationHandles(UvmEventQueueHandle *queueHandleArray, + unsigned arraySize, + void **notificationHandleArray); + +//------------------------------------------------------------------------------ +// UvmEventGetGpuUuidTable +// +// Each migration event entry contains the gpu index to/from where data is +// migrated. This index maps to a corresponding gpu UUID in the gpuUuidTable. +// Using indices saves on the size of each event entry. This API provides the +// gpuIndex to gpuUuid relation to the user. +// +// This API does not access the queue state maintained in the user +// library and so the user doesn't need to acquire a lock to protect the +// queue state. +// +// Arguments: +// gpuUuidTable: (OUTPUT) +// The return value is an array of UUIDs. The array index is the +// corresponding gpuIndex. There can be at max 32 gpus associated with +// UVM, so array size is 32. +// +// validCount: (OUTPUT) +// The system doesn't normally contain 32 GPUs. This field gives the +// count of entries that are valid in the returned gpuUuidTable. +// +// Error codes: +// NV_ERR_BUSY_RETRY: +// internal resources are blocked by other threads. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEventGetGpuUuidTable(NvProcessorUuid *gpuUuidTable, + unsigned *validCount); + +//------------------------------------------------------------------------------ +// UvmEventFetch +// +// This call is used to fetch the queue entries in a user buffer. +// +// This API updates the queue state. Hence simultaneous calls to fetch/skip +// events should be avoided as that might corrupt the queue state. +// +// Arguments: +// sessionHandle: (INPUT) +// Handle to the debugging session. +// +// queueHandle: (INPUT) +// queue from where to fetch the events. +// +// pBuffer: (OUTPUT) +// Pointer to the buffer where the API will copy the events. User +// shall ensure the size is enough. +// +// nEntries: (INPUT/OUTPUT) +// It provides the maximum number of entries that will be fetched +// from the queue. If this number is larger than the size of the +// queue it will be internally capped to that value. +// As output it returns the actual number of entries copies to the +// buffer. +// +// Error codes: +// RM_ERR_NOT_PERMITTED: +// Function fails the security check. +// +// NV_ERR_INVALID_ARGUMENT: +// One of the arguments is invalid. +// +// NV_ERR_INVALID_INDEX: +// The indices of the queue have been corrupted. +// +// NV_ERR_BUFFER_TOO_SMALL: +// The event queue buffer provided by the caller was too small to +// contain all of the events that occurred during this run. +// Events were therefore dropped (not recorded). +// Please re-run with a larger buffer. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEventFetch(UvmDebugSession sessionHandle, + UvmEventQueueHandle queueHandle, + UvmEventEntry *pBuffer, + NvU64 *nEntries); + +//------------------------------------------------------------------------------ +// UvmEventSkipAll +// +// This API drops all event entries from the queue. +// +// This API updates the queue state. Hence simultaneous calls to fetch/ +// skip events should be avoided as that might corrupt the queue state. +// +// Arguments: +// sessionHandle: (INPUT) +// Handle to the debugging session. +// +// queueHandle: (INPUT) +// target queue. +// +// Error codes: +// RM_ERR_NOT_PERMITTED: +// Function fails the security check. +// +// NV_ERR_INVALID_ARGUMENT: +// One of the arguments is invalid. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEventSkipAll(UvmDebugSession sessionHandle, + UvmEventQueueHandle queueHandle); + +//------------------------------------------------------------------------------ +// UvmEventQueryTimeStampType +// +// This API returns the type of time stamp used in an event entry for a given +// queue. +// +// This API reads constant data maintained in the queue state. Hence, +// the user doesn't need to acquire a lock to protect the queue state. +// +// Arguments: +// sessionHandle: (INPUT) +// Handle to the debugging session. +// +// queueHandle: (INPUT) +// target queue. +// +// timeStampType: (OUTPUT) +// type of time stamp used in event entry. See UvmEventTimestampType +// for supported types of time stamps. +// +// Error codes: +// RM_ERR_NOT_PERMITTED: +// Function fails the security check. +// +// NV_ERR_INVALID_ARGUMENT: +// One of the arguments is invalid. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmEventQueryTimeStampType(UvmDebugSession sessionHandle, + UvmEventQueueHandle queueHandle, + UvmEventTimeStampType *timeStampType); + +//------------------------------------------------------------------------------ +// UvmDebugAccessMemory +// +// This call can be used by the debugger to read/write memory range. UVM driver +// may not be aware of all the pages in this range. A bit per page is set by the +// driver if it is read/written by UVM. +// +// Arguments: +// session: (INPUT) +// Handle to the debugging session. +// +// baseAddress: (INPUT) +// base address from where memory is to be accessed +// +// sizeInBytes: (INPUT) +// Number of bytes to be accessed +// +// accessType: (INPUT) +// Read or write access request +// +// buffer: (INPUT/OUTPUT) +// This buffer would be read or written to by the driver. +// User needs to allocate a big enough buffer to fit sizeInBytes. +// +// isBitmaskSet: (INPUT/OUTPUT) +// Set to 1, if any field in bitmask is set +// NULL(INPUT) if unused +// +// bitmask: (INPUT/OUTPUT) +// One bit per page is set if UVM reads or writes to it. +// User should allocate a bitmask big enough to fit one bit per page +// covered by baseAddress + sizeInBytes: +// (baseAlignmentBytes + sizeInBytes + pageSize - 1)/pageSize number +// of bits. +// NULL(IN) if unused. +// +// Error codes: +// NV_ERR_INVALID_ARGUMENT: +// One of the arguments is invalid. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmDebugAccessMemory(UvmDebugSession session, + void *baseAddress, + NvU64 sizeInBytes, + UvmDebugAccessType accessType, + void *buffer, + NvBool *isBitmaskSet, + NvU64 *bitmask); + +// +// Uvm Tools uvm API +// + + +//------------------------------------------------------------------------------ +// UvmToolsCreateSession +// +// Creates a handle for a tools session. +// +// When the client initializes, it will pass a duplicated Uvm file handle from +// target's process UvmGetFileDescriptor API, e.g. by calling DuplicateHandle, +// dup2, share file descriptor over Unix Socket Domains. Returned session +// handle is required to create other Tool's objects, e.g. events, counters. +// +// In order to guarantee that session persists the lifetime of a target process, +// callee is responsible for passing a duplicate file descriptor. This is also +// required for correctness in case of out-of-process session. +// +// Passing non duplicated Uvm file handle results in undefined behaviour. The +// least that you should expect is that all your session related objects will +// become useless once target process closes Uvm file handle. +// +// +// There are security requirements for this call to be successful. Fortunately, +// after validating a file descriptor, one of the following conditions must +// hold: +// 1. The session owner is running as an elevated user +// 2. The session owner and target belong to the same user and the +// session owner is at least as privileged as the target. +// +// Arguments: +// fd: (INPUT) +// Duplicated file handle from target process. +// +// session: (OUTPUT) +// Handle to the tools session associated to fd above. +// +// Error codes: +// NV_ERR_INVALID_ARGUMENT: +// fd is either closed or points to non uvm device. +// +// NV_ERR_NO_MEMORY: +// Internal memory allocation failed. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsCreateSession(UvmFileDescriptor fd, + UvmToolsSessionHandle *session); + +//------------------------------------------------------------------------------ +// UvmToolsDestroySession +// +// Destroys a tools session. This also has a side-effect of closing fd +// associated with this session during UvmToolsCreateSession. +// +// All resources associated with this session (counters, event queues) are also +// destroyed. +// +// Arguments: +// session: (INPUT) +// Handle associated with a Tool's session. +// +// Error codes: +// NV_ERR_INVALID_ARGUMENT: +// session handle does not refer to a valid session. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsDestroySession(UvmToolsSessionHandle session); + +// +// Events subsystem +// +// Events subsystem is useful for a tools process to track target process +// behaviour. Every event refers to a single process using Unified memory. +// +// The most typical use case is as follows: +// 1. Create event Queue using UvmToolsCreateEventQueue +// 2. Start capture of interesting event types using +// UvmToolsEventQueueEnableEvents +// 3. poll / Loop using Get/Put pointer +// - Consume existing events from user's buffer +// - exit loop based on some condition (e.g. timeout, target process exit) +// - pause (Stop) capture of some of the events +// 4. Destroy event Queue using UvmToolsDestroyEventQueue +// + + +NvLength UvmToolsGetEventControlSize(void); + +NvLength UvmToolsGetEventEntrySize(void); + +NvLength UvmToolsGetNumberOfCounters(void); + +//------------------------------------------------------------------------------ +// UvmToolsCreateEventQueue +// +// This call creates an event queue that can hold the given number of events. +// All events are disabled by default. Event queue data persists lifetime of the +// target process. +// +// Arguments: +// session: (INPUT) +// Handle to the tools session. +// +// event_buffer: (INPUT) +// User allocated buffer. Must be page-aligned. Must be large enough to +// hold at least event_buffer_size events. Gets pinned until queue is +// destroyed. +// +// event_buffer_size: (INPUT) +// Size of the event queue buffer in units of UvmEventEntry's. Must be +// a power of two, and greater than 1. +// +// event_control (INPUT) +// User allocated buffer. Must be page-aligned. Must be large enough to +// hold UvmToolsEventControlData (although single page-size allocation +// should be more than enough). One could call +// UvmToolsGetEventControlSize() function to find out current size of +// UvmToolsEventControlData. Gets pinned until queue is destroyed. +// +// queue: (OUTPUT) +// Handle to the created queue. +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Session handle does not refer to a valid session +// +// NV_ERR_INVALID_ARGUMENT: +// One of the parameters: event_buffer, event_buffer_size, event_control +// is not valid +// +// NV_ERR_INSUFFICIENT_RESOURCES: +// There could be multiple reasons for this error. One would be that it's +// not possible to allocate a queue of requested size. Another would be +// that either event_buffer or event_control memory couldn't be pinned +// (e.g. because of OS limitation of pinnable memory). Also it could not +// have been possible to create UvmToolsEventQueueDescriptor. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsCreateEventQueue(UvmToolsSessionHandle session, + void *event_buffer, + NvLength event_buffer_size, + void *event_control, + UvmToolsEventQueueHandle *queue); + +UvmToolsEventQueueDescriptor UvmToolsGetEventQueueDescriptor(UvmToolsEventQueueHandle queue); + + +//------------------------------------------------------------------------------ +// UvmToolsSetNotificationThreshold +// +// Sets a custom notification threshold in number of events for a given queue. +// Polling subsystem will notify user about this queue if and only if number +// of unconsumed events is greater or equal notification_threshold. Default +// threshold upon creating an event queue is floor(N / 2), where N represents +// maximum number of events that this queue can fit. +// +// Consequently, if notifications_threshold is greater than queue size, there +// will be no notification. +// +// Arguments: +// queue: (INPUT) +// Handle to the queue, for which events are supposed to be enabled +// +// notification_threshold: (INPUT) +// A new threshold, in number of events, to be set for this queue. +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Event Queue might be corrupted (associated session is not valid). +// +// NV_ERR_INVALID_ARGUMENT: +// Queue handle does not refer to a valid queue. +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsSetNotificationThreshold(UvmToolsEventQueueHandle queue, + NvLength notification_threshold); + +//------------------------------------------------------------------------------ +// UvmToolsDestroyEventQueue +// +// Destroys all internal resources associated with the queue. It unpinns the +// buffers provided in UvmToolsCreateEventQueue. Event Queue is also auto +// destroyed when corresponding session gets destroyed. +// +// Arguments: +// queue: (INPUT) +// Handle to the queue to be destroyed +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Event Queue might be corrupted (associated session is not valid). +// +// NV_ERR_INVALID_ARGUMENT: +// Queue handle does not refer to a valid queue. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsDestroyEventQueue(UvmToolsEventQueueHandle queue); + +//------------------------------------------------------------------------------ +// UvmEventQueueEnableEvents +// +// This call enables a particular event type in the event queue. All events are +// disabled by default. Any event type is considered listed if and only if it's +// corresponding value is equal to 1 (in other words, bit is set). Disabled +// events listed in eventTypeFlags are going to be enabled. Enabled events and +// events not listed in eventTypeFlags are not affected by this call. +// +// It is not an error to call this function multiple times with the same +// arguments. +// +// Arguments: +// queue: (INPUT) +// Handle to the queue, for which events are supposed to be enabled +// +// eventTypeFlags: (INPUT) +// This bit field specifies the event types to be enabled. Events not +// specified in this field do not change their state. For example to +// enable migration and memory violations events pass flags +// "UVM_EVENT_ENABLE_MEMORY_VIOLATION | UVM_EVENT_ENABLE_MIGRATION" +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Event Queue might be corrupted (associated session is not valid). +// +// NV_ERR_INVALID_ARGUMENT: +// Queue handle does not refer to a valid queue. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsEventQueueEnableEvents(UvmToolsEventQueueHandle queue, + NvU64 eventTypeFlags); + +//------------------------------------------------------------------------------ +// UvmToolsEventQueueDisableEvents +// +// This call disables a particular event type in the event queue. Any event type +// is considered listed if and only if it's corresponding value is equal to 1 +// (in other words, bit is set). Enabled events listed in eventTypeFlags are +// going to be disabled. Disabled events and events not listed in eventTypeFlags +// are not affected by this call. +// +// It is not an error to call this function multiple times with the same +// arguments. +// +// Arguments: +// queue: (INPUT) +// Handle to the queue, for which events are supposed to be enabled +// +// eventTypeFlags: (INPUT) +// This bit field specifies the event types to be disabled. Events not +// specified in this field do not change their state. For example to +// disable migration and memory violations events pass flags +// "UVM_EVENT_ENABLE_MEMORY_VIOLATION | UVM_EVENT_ENABLE_MIGRATION" +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Event Queue might be corrupted (associated session is not valid). +// +// NV_ERR_INVALID_ARGUMENT: +// Queue handle does not refer to a valid event queue. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsEventQueueDisableEvents(UvmToolsEventQueueHandle queue, + NvU64 eventTypeFlags); + + +//------------------------------------------------------------------------------ +// UvmToolsCreateProcessAggregateCounters +// +// Creates the counters structure for tracking aggregate process counters. +// These counters are enabled by default. +// +// Counters position follows the layout of the memory that UVM driver decides to +// use. To obtain particular counter value, user should perform consecutive +// atomic reads at a a given buffer + offset address. +// +// It is not defined what is the initial value of a counter. User should rely on +// a difference between each snapshot. +// +// Arguments: +// session: (INPUT) +// Handle to the tools session. +// +// counters_buffer : (INPUT) +// User allocated buffer. Must be aligned to the OS's page aligned. Must +// be large enough to hold all possible counter types. In practice, 4kB +// system page (minimal granurality) should be sufficent. This memory +// gets pinned until counters are destroyed. +// +// counters: (OUTPUT) +// Handle to the created counters. +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Provided session is not valid +// +// NV_ERR_INSUFFICIENT_RESOURCES +// There could be multiple reasons for this error. One would be that it's +// not possible to allocate counters structure. Another would be that +// either event_buffer or event_control memory couldn't be pinned +// (e.g. because of OS limitation of pinnable memory) +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsCreateProcessAggregateCounters(UvmToolsSessionHandle session, + void *counters_buffer, + UvmToolsCountersHandle *counters); + +//------------------------------------------------------------------------------ +// UvmToolsCreateProcessorCounters +// +// Creates the counters structure for tracking per-process counters. +// These counters are disabled by default. +// +// Counters position follows the layout of the memory that UVM driver decides to +// use. To obtain particular counter value, user should perform consecutive +// atomic reads at a a given buffer + offset address. +// +// It is not defined what is the initial value of a counter. User should rely on +// a difference between each snapshot. +// +// Arguments: +// session: (INPUT) +// Handle to the tools session. +// +// counters_buffer : (INPUT) +// User allocated buffer. Must be aligned to the OS's page aligned. Must +// be large enough to hold all possible counter types. In practice, 4kB +// system page should be sufficent. This memory gets pinned until +// counters are destroyed. +// +// processorUuid: (INPUT) +// UUID of the resource, for which counters will provide statistic data. +// +// counters: (OUTPUT) +// Handle to the created counters. +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// session handle does not refer to a valid tools session +// +// NV_ERR_INSUFFICIENT_RESOURCES +// There could be multiple reasons for this error. One would be that it's +// not possible to allocate counters structure. Another would be that +// either event_buffer or event_control memory couldn't be pinned +// (e.g. because of OS limitation of pinnable memory) +// +// NV_ERR_INVALID_ARGUMENT +// processorUuid does not refer to any known resource in UVM driver +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsCreateProcessorCounters(UvmToolsSessionHandle session, + void *counters_buffer, + const NvProcessorUuid *processorUuid, + UvmToolsCountersHandle *counters); + +//------------------------------------------------------------------------------ +// UvmToolsDestroyCounters +// +// Destroys all internal resources associated with this counters structure. +// It unpinns the buffer provided in UvmToolsCreate*Counters. Counters structure +// also gest destroyed when corresponding session is destroyed. +// +// Arguments: +// counters: (INPUT) +// Handle to the counters structure. +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// State of the counters has been corrupted. +// +// NV_ERR_INVALID_ARGUMENT: +// Counters handle does not refer to a valid Counters structure. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsDestroyCounters(UvmToolsCountersHandle counters); + +//------------------------------------------------------------------------------ +// UvmToolsEnableCounters +// +// This call enables certain counter types in the counters structure. Any +// counter type is considered listed if and only if it's corresponding value is +// equal to 1 (in other words, bit is set). Disabled counter types listed in +// counterTypeFlags are going to be enabled. Already enabled counter types and +// counter types not listed in counterTypeFlags are not affected by this call. +// +// It is not an error to call this function multiple times with the same +// arguments. +// +// Arguments: +// counters: (INPUT) +// Handle to the counters structure. +// +// counterTypeFlags: (INPUT) +// This bit field specifies the counter types to be enabled. +// For example, to enable faults number accounting and number of bytes +// transferred into a given resource (or aggregate) pass flags +// "UVM_COUNTER_ENABLE_FAULTS_NUMBER | +// UVM_COUNTER_ENABLE_BYTES_TRANSFERRED_IN" +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Counters structure mighe be corrupted (associated session is not +// valid). +// +// NV_ERR_INVALID_ARGUMENT: +// Counters handle does not refer to a valid counters structure. +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsEnableCounters(UvmToolsCountersHandle counters, + NvU64 counterTypeFlags); + +//------------------------------------------------------------------------------ +// UvmToolsDisableCounters +// +// This call disables certain counter types in the counters structure. Any +// counter type is considered listed if and only if it's corresponding value is +// equal to 1 (in other words, bit is set). Enabled counter types listed in +// counterTypeFlags are going to be disabled. Already disabled counter types and +// counter types not listed in counterTypeFlags are not affected by this call. +// +// It is not an error to call this function multiple times with the same +// arguments. +// +// Arguments: +// counters: (INPUT) +// Handle to the counters structure. +// +// counterTypeFlags: (INPUT) +// This bit field specifies the counter types to be disabled. +// For example, to disable faults number accounting and number of bytes +// transferred into a given resource (or aggregate) pass flags +// "UVM_COUNTER_ENABLE_FAULTS_NUMBER | +// UVM_COUNTER_ENABLE_BYTES_TRANSFERRED_IN" +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Counters structure mighe be corrupted (associated session is not +// valid). +// +// NV_ERR_INVALID_ARGUMENT: +// Counters handle does not refer to a valid counters structure. +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsDisableCounters(UvmToolsCountersHandle counters, + NvU64 counterTypeFlags); + +//------------------------------------------------------------------------------ +// UvmToolsReadProcessMemory +// +// Reads up to size bytes from a given target process's virtual address. +// If size is 0, function should successfully return size of the largest size +// that can be read starting at a given target process's virtual memory. This +// might be used to discover size of user's allocation. +// +// Upon successful execution and size greater than 0, user should have a copy of +// target's process memory in a given buffer. Result is unspecified in case of +// In-process scenario when targetVa address + size overlaps with buffer + size. +// +// This is essentially a UVM version of RM ctrl call +// NV83DE_CTRL_CMD_DEBUG_READ_MEMORY. For implementation constraints (and more +// information), please refer to the documentation: +// //sw/docs/resman/components/compute/UVM/subsystems/UVM_8_Tools_API_Design.docx +// +// Arguments: +// session: (INPUT) +// Handle to the tools session. +// +// buffer: (INPUT) +// User buffer (destination) address, where requested memory shall be +// copied. +// +// size: (INPUT) +// Number of bytes requested to be copied. If user's buffer is not large +// enough to fit size bytes, result is unspecified. If this is 0, +// function should return largest chunk of memory available to read. +// +// targetVa: (INPUT) +// Target process's (source) address, from which memory should be +// copied. +// +// bytes_read: (OUTPUT) +// Either number of bytes successfully read or the largest chunk of +// memory available to read, depending on size parameter. +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// session handle does not refer to a valid tools session +// +// NV_ERR_INVALID_ADDRESS: +// UVM driver has no knowledge of targetVa address. +// +// NV_ERR_INVALID_ARGUMENT: +// Read spans more than a single target process allocation. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsReadProcessMemory(UvmToolsSessionHandle session, + void *buffer, + NvLength size, + void *targetVa, + NvLength *bytes_read); + +//------------------------------------------------------------------------------ +// UvmToolsWriteProcessMemory +// +// Writes up to size bytes from a given target process's virtual address. +// If size is 0, function should successfully return size of the largest size +// that can be written starting at a given target process's virtual address. +// This might be used to discover size of user's allocation. +// +// Upon successful execution and size greater than 0, target process should have +// a copy of buffer starting at targetVa address. Result is unspecified in case +// of In-process scenario when targetVa address + size overlaps with +// buffer + size. +// +// This is essentially a UVM version of RM ctrl call +// NV83DE_CTRL_CMD_DEBUG_READ_MEMORY. For implementation constraints (and more +// information), please refer to the documentation: +// //sw/docs/resman/components/compute/UVM/subsystems/UVM_8_Tools_API_Design.docx +// +// Arguments: +// session: (INPUT) +// Handle to the tools session. +// +// buffer: (INPUT) +// User buffer (source) address, from which requested memory shall be +// copied. +// +// size: (INPUT) +// Number of bytes requested to be copied. If user's buffer is not large +// enough to fit size bytes, result is unspecified. If this is 0, +// function should return largest chunk of memory available to write. +// +// targetVa: (INPUT) +// Target process's (destination) address, where memory should be +// copied. +// +// bytes_read: (OUTPUT) +// Either number of bytes successfully written or the largest chunk of +// memory available to write, depending on size parameter. +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// session handle does not refer to a valid tools session +// +// NV_ERR_INVALID_ADDRESS: +// UVM driver has no knowledge of targetVa address. +// +// NV_ERR_INVALID_ARGUMENT: +// Write spans more than a single target process allocation. +// +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsWriteProcessMemory(UvmToolsSessionHandle session, + void *buffer, + NvLength size, + void *targetVa, + NvLength *bytes_read); + +//------------------------------------------------------------------------------ +// UvmToolsGetProcessorUuidTable +// +// Populate a table with the UUIDs of all the currently registered processors +// in the target process. When a GPU is registered, it is added to the table. +// When a GPU is unregistered, it is removed. As long as a GPU remains registered, +// its index in the table does not change. New registrations obtain the first +// unused index. +// +// Arguments: +// session: (INPUT) +// Handle to the tools session. +// +// table: (OUTPUT) +// Array of processor UUIDs, including the CPU's UUID which is always +// at index zero. The srcIndex and dstIndex fields of the +// UvmEventMigrationInfo struct index this array. Unused indices will +// have a UUID of zero. +// +// count: (OUTPUT) +// Set by UVM to the number of UUIDs written, including any gaps in +// the table due to unregistered GPUs. +// +// Error codes: +// NV_ERR_INVALID_ADDRESS: +// writing to table failed. +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsGetProcessorUuidTable(UvmToolsSessionHandle session, + NvProcessorUuid *table, + NvLength *count); + +//------------------------------------------------------------------------------ +// UvmToolsFlushEvents +// +// Some events, like migrations, which have end timestamps are not immediately +// submitted to queues when they are completed. This call enqueues any +// completed but unenqueued events associated with the session. +// +// Arguments: +// session: (INPUT) +// Handle to the tools session. +// +// Error codes: +// NV_ERR_INSUFFICIENT_PERMISSIONS: +// Session handle does not refer to a valid session +//------------------------------------------------------------------------------ +NV_STATUS UvmToolsFlushEvents(UvmToolsSessionHandle session); + +#ifdef __cplusplus +} +#endif + +#endif // _UVM_H_ diff --git a/kernel-open/nvidia-uvm/uvm_ampere.c b/kernel-open/nvidia-uvm/uvm_ampere.c new file mode 100644 index 000000000..6cbb8c8fa --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ampere.c @@ -0,0 +1,102 @@ +/******************************************************************************* + Copyright (c) 2018-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_global.h" +#include "uvm_hal.h" +#include "uvm_gpu.h" +#include "uvm_mem.h" +#include "uvm_ampere_fault_buffer.h" + +void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu) +{ + parent_gpu->tlb_batch.va_invalidate_supported = true; + + parent_gpu->tlb_batch.va_range_invalidate_supported = true; + + // TODO: Bug 1767241: Run benchmarks to figure out a good number + parent_gpu->tlb_batch.max_ranges = 8; + + parent_gpu->utlb_per_gpc_count = uvm_ampere_get_utlbs_per_gpc(parent_gpu); + + parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.maxGpcCount * parent_gpu->utlb_per_gpc_count; + { + uvm_fault_buffer_entry_t *dummy; + UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8))); + } + + // A single top level PDE on Ampere covers 128 TB and that's the minimum + // size that can be used. + parent_gpu->rm_va_base = 0; + parent_gpu->rm_va_size = 128ull * 1024 * 1024 * 1024 * 1024; + + parent_gpu->uvm_mem_va_base = 384ull * 1024 * 1024 * 1024 * 1024; + parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE; + + // See uvm_mmu.h for mapping placement + parent_gpu->flat_vidmem_va_base = 132ull * 1024 * 1024 * 1024 * 1024; + parent_gpu->flat_sysmem_va_base = 256ull * 1024 * 1024 * 1024 * 1024; + + parent_gpu->peer_copy_mode = g_uvm_global.peer_copy_mode; + + // Not all units on Ampere support 49-bit addressing, including those which + // access channel buffers. + parent_gpu->max_channel_va = 1ULL << 40; + + parent_gpu->max_host_va = 1ULL << 40; + + // Ampere can map sysmem with any page size + parent_gpu->can_map_sysmem_with_large_pages = true; + + // Prefetch instructions will generate faults + parent_gpu->prefetch_fault_supported = true; + + // Ampere can place GPFIFO in vidmem + parent_gpu->gpfifo_in_vidmem_supported = true; + + parent_gpu->replayable_faults_supported = true; + + parent_gpu->non_replayable_faults_supported = true; + + parent_gpu->access_counters_supported = true; + + parent_gpu->fault_cancel_va_supported = true; + + parent_gpu->scoped_atomics_supported = true; + + parent_gpu->has_clear_faulted_channel_sw_method = true; + + parent_gpu->has_clear_faulted_channel_method = false; + + parent_gpu->smc.supported = true; + + parent_gpu->sparse_mappings_supported = true; + + UVM_ASSERT(parent_gpu->rm_info.gpuArch == NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100); + if (parent_gpu->rm_info.gpuImplementation == NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA100 || + parent_gpu->rm_info.gpuImplementation == NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA000) + parent_gpu->map_remap_larger_page_promotion = true; + else + parent_gpu->map_remap_larger_page_promotion = false; + + parent_gpu->plc_supported = true; +} diff --git a/kernel-open/nvidia-uvm/uvm_ampere_ce.c b/kernel-open/nvidia-uvm/uvm_ampere_ce.c new file mode 100644 index 000000000..fdac5bdbd --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ampere_ce.c @@ -0,0 +1,230 @@ +/******************************************************************************* + Copyright (c) 2018-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hal.h" +#include "uvm_hal_types.h" +#include "clc6b5.h" +#include "clc7b5.h" +#include "clc56f.h" // Needed because HAL ce_init pushes SET_OBJECT + +bool uvm_hal_ampere_ce_method_validate_c6b5(uvm_push_t *push, NvU32 method_address, NvU32 method_data) +{ + if (!uvm_channel_is_proxy(push->channel)) + return true; + + switch (method_address) { + case NVC56F_SET_OBJECT: + case NVC6B5_SET_SEMAPHORE_A: + case NVC6B5_SET_SEMAPHORE_B: + case NVC6B5_SET_SEMAPHORE_PAYLOAD: + case NVC6B5_SET_SRC_PHYS_MODE: + case NVC6B5_SET_DST_PHYS_MODE: + case NVC6B5_LAUNCH_DMA: + case NVC6B5_OFFSET_IN_UPPER: + case NVC6B5_OFFSET_IN_LOWER: + case NVC6B5_OFFSET_OUT_UPPER: + case NVC6B5_OFFSET_OUT_LOWER: + case NVC6B5_LINE_LENGTH_IN: + case NVC6B5_SET_REMAP_CONST_A: + case NVC6B5_SET_REMAP_CONST_B: + case NVC6B5_SET_REMAP_COMPONENTS: + return true; + } + + UVM_ERR_PRINT("Unsupported CE method 0x%x\n", method_address); + return false; +} + +static NvU32 ce_aperture(uvm_aperture_t aperture) +{ + BUILD_BUG_ON(HWCONST(C6B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB) != + HWCONST(C6B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB)); + BUILD_BUG_ON(HWCONST(C6B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM) != + HWCONST(C6B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM)); + BUILD_BUG_ON(HWCONST(C6B5, SET_SRC_PHYS_MODE, TARGET, PEERMEM) != + HWCONST(C6B5, SET_DST_PHYS_MODE, TARGET, PEERMEM)); + + if (aperture == UVM_APERTURE_SYS) { + return HWCONST(C6B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM); + } + else if (aperture == UVM_APERTURE_VID) { + return HWCONST(C6B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB); + } + else { + return HWCONST(C6B5, SET_SRC_PHYS_MODE, TARGET, PEERMEM) | + HWVALUE(C6B5, SET_SRC_PHYS_MODE, FLA, 0) | + HWVALUE(C6B5, SET_SRC_PHYS_MODE, PEER_ID, UVM_APERTURE_PEER_ID(aperture)); + } +} + +// Push SET_{SRC,DST}_PHYS mode if needed and return LAUNCH_DMA_{SRC,DST}_TYPE +// flags +NvU32 uvm_hal_ampere_ce_phys_mode(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src) +{ + NvU32 launch_dma_src_dst_type = 0; + + if (src.is_virtual) + launch_dma_src_dst_type |= HWCONST(C6B5, LAUNCH_DMA, SRC_TYPE, VIRTUAL); + else + launch_dma_src_dst_type |= HWCONST(C6B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL); + + if (dst.is_virtual) + launch_dma_src_dst_type |= HWCONST(C6B5, LAUNCH_DMA, DST_TYPE, VIRTUAL); + else + launch_dma_src_dst_type |= HWCONST(C6B5, LAUNCH_DMA, DST_TYPE, PHYSICAL); + + if (!src.is_virtual && !dst.is_virtual) { + NV_PUSH_2U(C6B5, SET_SRC_PHYS_MODE, ce_aperture(src.aperture), + SET_DST_PHYS_MODE, ce_aperture(dst.aperture)); + } + else if (!src.is_virtual) { + NV_PUSH_1U(C6B5, SET_SRC_PHYS_MODE, ce_aperture(src.aperture)); + } + else if (!dst.is_virtual) { + NV_PUSH_1U(C6B5, SET_DST_PHYS_MODE, ce_aperture(dst.aperture)); + } + + return launch_dma_src_dst_type; +} + +NvU32 uvm_hal_ampere_ce_plc_mode_c7b5(void) +{ + return HWCONST(C7B5, LAUNCH_DMA, DISABLE_PLC, TRUE); +} + +bool uvm_hal_ampere_ce_memcopy_validate_c6b5(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src) +{ + NvU64 push_begin_gpu_va; + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + + if (!uvm_gpu_is_virt_mode_sriov_heavy(gpu)) + return true; + + if (uvm_channel_is_proxy(push->channel)) { + if (dst.is_virtual) { + UVM_ERR_PRINT("Destination address of memcopy must be physical, not virtual\n"); + return false; + } + + if (dst.aperture != UVM_APERTURE_VID) { + UVM_ERR_PRINT("Destination address of memcopy must be in vidmem\n"); + return false; + } + + // The source address is irrelevant, since it is a pushbuffer offset + if (!IS_ALIGNED(dst.address, 8)){ + UVM_ERR_PRINT("Destination address of memcopy is not 8-byte aligned"); + return false; + } + + if (!src.is_virtual) { + UVM_ERR_PRINT("Source address of memcopy must be virtual\n"); + return false; + } + + push_begin_gpu_va = uvm_pushbuffer_get_gpu_va_for_push(push->channel->pool->manager->pushbuffer, push); + + if ((src.address < push_begin_gpu_va) || (src.address >= push_begin_gpu_va + uvm_push_get_size(push))) { + UVM_ERR_PRINT("Source address of memcopy must point to pushbuffer\n"); + return false; + } + } + else { + // TODO: Bug 3429418: When in SR-IOV heavy, a memcopy/memset pushed to a + // UVM internal channel cannot use peer physical addresses. + if (!dst.is_virtual && !uvm_aperture_is_peer(dst.aperture)) { + UVM_ERR_PRINT("Destination address of memcopy must be virtual, not physical (aperture: %s)\n", + uvm_gpu_address_aperture_string(dst)); + return false; + } + + if (!src.is_virtual && !uvm_aperture_is_peer(src.aperture)) { + UVM_ERR_PRINT("Source address of memcopy must be virtual, not physical (aperture: %s)\n", + uvm_gpu_address_aperture_string(src)); + return false; + } + } + + return true; +} + +// In SR-IOV heavy (GA100 only), the UVM driver is expected to push a patched +// version of an inlined memcopy to the proxy channels. The patching consists in +// passing the offset of the inlined data within the push as the source virtual +// address, instead of passing its GPU VA. +// +// Copies pushed to internal channels use the GPU VA of the inlined data, +// irrespective of the virtualization mode. +void uvm_hal_ampere_ce_memcopy_patch_src_c6b5(uvm_push_t *push, uvm_gpu_address_t *src) +{ + if (!uvm_channel_is_proxy(push->channel)) + return; + + src->address -= uvm_pushbuffer_get_gpu_va_for_push(push->channel->pool->manager->pushbuffer, push); +} + +bool uvm_hal_ampere_ce_memset_validate_c6b5(uvm_push_t *push, uvm_gpu_address_t dst, size_t element_size) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + + if (!uvm_gpu_is_virt_mode_sriov_heavy(gpu)) + return true; + + if (uvm_channel_is_proxy(push->channel)) { + if (dst.is_virtual) { + UVM_ERR_PRINT("Destination address of memset must be physical, not virtual\n"); + return false; + } + + if (dst.aperture != UVM_APERTURE_VID) { + UVM_ERR_PRINT("Destination address of memset must be in vidmem\n"); + return false; + } + + if (!IS_ALIGNED(dst.address, 8)){ + UVM_ERR_PRINT("Destination address of memset is not 8-byte aligned"); + return false; + } + + // Disallow memsets that don't match the page table/directory entry + // size. PDE0 entries are 16 bytes wide, but those are written using a + // memcopy. + // + // The memset size is not checked to be a multiple of the element size + // because the check is not exclusive of SR-IOV heavy, and it is already + // present in the uvm_hal_*_memset_* functions. + if (element_size != 8) { + UVM_ERR_PRINT("Memset data must be 8 bytes wide, but found %zu instead\n", element_size); + return false; + } + } + // TODO: Bug 3429418: When in SR-IOV heavy, a memcopy/memset pushed to a + // UVM internal channel cannot use peer physical addresses. + else if (!dst.is_virtual && !uvm_aperture_is_peer(dst.aperture)) { + UVM_ERR_PRINT("Destination address of memset must be virtual, not physical (aperture: %s)\n", + uvm_gpu_address_aperture_string(dst)); + return false; + } + + return true; +} diff --git a/kernel-open/nvidia-uvm/uvm_ampere_fault_buffer.h b/kernel-open/nvidia-uvm/uvm_ampere_fault_buffer.h new file mode 100644 index 000000000..405033fed --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ampere_fault_buffer.h @@ -0,0 +1,88 @@ +/******************************************************************************* + Copyright (c) 2018-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_HAL_AMPERE_FAULT_BUFFER_H__ +#define __UVM_HAL_AMPERE_FAULT_BUFFER_H__ + +#include "nvtypes.h" +#include "uvm_common.h" +#include "uvm_gpu.h" + +// There are up to 8 TPCs per GPC in Ampere, and there are 2 LTP uTLB per TPC. +// Besides, there is one RGG uTLB per GPC. Each TPC has a number of clients +// that can make requests to its uTLBs: 1xTPCCS, 1xPE, 2xT1. Requests from +// these units are routed as follows to the 2 LTP uTLBs: +// +// -------- --------- +// | T1_0 | -----------------> | uTLB0 | +// -------- --------- +// +// -------- --------- +// | T1_1 | -----------------> | uTLB1 | +// -------- --------> --------- +// | ^ +// ------- | | +// | PE | ----------- | +// ------- | +// | +// --------- | +// | TPCCS | ----------------------- +// --------- +// +// +// The client ids are local to their GPC and the id mapping is linear across +// TPCs: TPC_n has TPCCS_n, PE_n, T1_p, and T1_q, where p=2*n and q=p+1. +// +// NV_PFAULT_CLIENT_GPC_LTP_UTLB_n and NV_PFAULT_CLIENT_GPC_RGG_UTLB enums can +// be ignored. These will never be reported in a fault message, and should +// never be used in an invalidate. Therefore, we define our own values. +typedef enum { + UVM_AMPERE_GPC_UTLB_ID_RGG = 0, + UVM_AMPERE_GPC_UTLB_ID_LTP0 = 1, + UVM_AMPERE_GPC_UTLB_ID_LTP1 = 2, + UVM_AMPERE_GPC_UTLB_ID_LTP2 = 3, + UVM_AMPERE_GPC_UTLB_ID_LTP3 = 4, + UVM_AMPERE_GPC_UTLB_ID_LTP4 = 5, + UVM_AMPERE_GPC_UTLB_ID_LTP5 = 6, + UVM_AMPERE_GPC_UTLB_ID_LTP6 = 7, + UVM_AMPERE_GPC_UTLB_ID_LTP7 = 8, + UVM_AMPERE_GPC_UTLB_ID_LTP8 = 9, + UVM_AMPERE_GPC_UTLB_ID_LTP9 = 10, + UVM_AMPERE_GPC_UTLB_ID_LTP10 = 11, + UVM_AMPERE_GPC_UTLB_ID_LTP11 = 12, + UVM_AMPERE_GPC_UTLB_ID_LTP12 = 13, + UVM_AMPERE_GPC_UTLB_ID_LTP13 = 14, + UVM_AMPERE_GPC_UTLB_ID_LTP14 = 15, + UVM_AMPERE_GPC_UTLB_ID_LTP15 = 16, + + UVM_AMPERE_GPC_UTLB_COUNT, +} uvm_ampere_gpc_utlb_id_t; + +static NvU32 uvm_ampere_get_utlbs_per_gpc(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 utlbs = parent_gpu->rm_info.maxTpcPerGpcCount * 2 + 1; + UVM_ASSERT(utlbs <= UVM_AMPERE_GPC_UTLB_COUNT); + return utlbs; +} + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_ampere_host.c b/kernel-open/nvidia-uvm/uvm_ampere_host.c new file mode 100644 index 000000000..dc1ec0638 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ampere_host.c @@ -0,0 +1,435 @@ +/******************************************************************************* + Copyright (c) 2018-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hal.h" +#include "uvm_global.h" +#include "uvm_user_channel.h" +#include "uvm_push_macros.h" +#include "hwref/ampere/ga100/dev_runlist.h" +#include "clc56f.h" +#include "clc076.h" + +bool uvm_hal_ampere_host_method_validate(uvm_push_t *push, NvU32 method_address, NvU32 method_data) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + + if (!uvm_gpu_is_virt_mode_sriov_heavy(gpu)) + return true; + + if (uvm_channel_is_privileged(push->channel)) { + switch (method_address) { + case NVC56F_SET_OBJECT: + case NVC56F_NON_STALL_INTERRUPT: + case NVC56F_MEM_OP_A: + case NVC56F_MEM_OP_B: + case NVC56F_MEM_OP_C: + case NVC56F_MEM_OP_D: + case NVC56F_SEM_ADDR_LO: + case NVC56F_SEM_ADDR_HI: + case NVC56F_SEM_PAYLOAD_LO: + case NVC56F_SEM_PAYLOAD_HI: + case NVC56F_SEM_EXECUTE: + case NVC56F_WFI: + case NVC56F_NOP: + return true; + } + + UVM_ERR_PRINT("Unsupported Host method 0x%x\n", method_address); + return false; + } + else if (method_address == NVC56F_MEM_OP_D) { + NvU32 operation = READ_HWVALUE(method_data, C56F, MEM_OP_D, OPERATION); + + // Prohibit privileged operations from being pushed to non-privileged + // channels. + + // TLB invalidations. + if ((operation == NVC56F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE) || + (operation == NVC56F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED)) { + UVM_ERR_PRINT("Pushed privileged operation 0x%x to non-privileged channel\n", operation); + return false; + } + + // Access counter clearing is a privileged operation. But access + // counters are not supported on SR-IOV heavy, so the presence of the + // operation indicates a missing check for access counters support. + if (operation == NVC56F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR) { + UVM_ERR_PRINT("Pushed access counters operation 0x%x, but access counters are not supported\n", operation); + return false; + } + } + + return true; +} + +bool uvm_hal_ampere_host_sw_method_validate(uvm_push_t *push, NvU32 method_address, NvU32 method_data) +{ + if (!uvm_channel_is_proxy(push->channel)) + return true; + + switch (method_address) { + case NVC076_SET_OBJECT: + case NVC076_CLEAR_FAULTED_A: + case NVC076_CLEAR_FAULTED_B: + case NVC076_FAULT_CANCEL_A: + case NVC076_FAULT_CANCEL_B: + case NVC076_FAULT_CANCEL_C: + return true; + } + + UVM_ERR_PRINT("Unsupported SW method 0x%x\n", method_address); + return false; +} + +void uvm_hal_ampere_host_clear_faulted_channel_register(uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *fault) +{ + uvm_spin_loop_t spin; + NvU32 channel_faulted_mask = 0; + NvU32 clear_type_value = 0; + + UVM_ASSERT(!user_channel->gpu->parent->has_clear_faulted_channel_method); + + if (fault->fault_source.mmu_engine_type == UVM_MMU_ENGINE_TYPE_HOST) { + clear_type_value = NV_CHRAM_CHANNEL_UPDATE_RESET_PBDMA_FAULTED; + channel_faulted_mask = HWCONST(_CHRAM, CHANNEL, PBDMA_FAULTED, TRUE); + } + else if (fault->fault_source.mmu_engine_type == UVM_MMU_ENGINE_TYPE_CE) { + clear_type_value = NV_CHRAM_CHANNEL_UPDATE_RESET_ENG_FAULTED; + channel_faulted_mask = HWCONST(_CHRAM, CHANNEL, ENG_FAULTED, TRUE); + } + else { + UVM_ASSERT_MSG(false, "Unsupported MMU engine type %s\n", + uvm_mmu_engine_type_string(fault->fault_source.mmu_engine_type)); + } + + // Wait for the channel to have the FAULTED bit set as this can race with + // interrupt notification + UVM_SPIN_WHILE(!(UVM_GPU_READ_ONCE(*user_channel->chram_channel_register) & channel_faulted_mask), &spin); + + UVM_GPU_WRITE_ONCE(*user_channel->chram_channel_register, clear_type_value); + + wmb(); + + UVM_GPU_WRITE_ONCE(*user_channel->work_submission_offset, user_channel->work_submission_token); +} + +static NvU32 instance_ptr_aperture_type_to_hw_value(uvm_aperture_t aperture) +{ + switch (aperture) { + case UVM_APERTURE_SYS: + return HWCONST(C076, CLEAR_FAULTED_A, INST_APERTURE, SYS_MEM_COHERENT); + case UVM_APERTURE_VID: + return HWCONST(C076, CLEAR_FAULTED_A, INST_APERTURE, VID_MEM); + default: + UVM_ASSERT_MSG(false, "Invalid aperture_type %d\n", aperture); + } + + return 0; +} + +static void instance_ptr_address_to_hw_values(NvU64 instance_ptr_address, + NvU32 *instance_ptr_lo, + NvU32 *instance_ptr_hi) +{ + // instance_ptr must be 4K aligned + UVM_ASSERT_MSG(IS_ALIGNED(instance_ptr_address, 1 << 12), "instance_ptr 0x%llx\n", instance_ptr_address); + instance_ptr_address >>= 12; + + *instance_ptr_lo = instance_ptr_address & HWMASK(C076, CLEAR_FAULTED_A, INST_LOW); + *instance_ptr_hi = instance_ptr_address >> HWSIZE(C076, CLEAR_FAULTED_A, INST_LOW); +} + +static NvU32 mmu_engine_type_to_hw_value(uvm_mmu_engine_type_t mmu_engine_type) +{ + switch (mmu_engine_type) { + case UVM_MMU_ENGINE_TYPE_HOST: + return HWCONST(C076, CLEAR_FAULTED_A, TYPE, PBDMA_FAULTED); + case UVM_MMU_ENGINE_TYPE_CE: + return HWCONST(C076, CLEAR_FAULTED_A, TYPE, ENG_FAULTED); + default: + UVM_ASSERT_MSG(false, "Unsupported MMU engine type %s\n", + uvm_mmu_engine_type_string(mmu_engine_type)); + } + + return 0; +} + +void uvm_hal_ampere_host_clear_faulted_channel_sw_method(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *fault) +{ + NvU32 clear_type_value; + NvU32 aperture_type_value; + NvU32 instance_ptr_lo, instance_ptr_hi; + uvm_gpu_phys_address_t instance_ptr = user_channel->instance_ptr.addr; + + UVM_ASSERT(user_channel->gpu->parent->has_clear_faulted_channel_sw_method); + + clear_type_value = mmu_engine_type_to_hw_value(fault->fault_source.mmu_engine_type); + aperture_type_value = instance_ptr_aperture_type_to_hw_value(instance_ptr.aperture); + + instance_ptr_address_to_hw_values(instance_ptr.address, &instance_ptr_lo, &instance_ptr_hi); + + NV_PUSH_2U(C076, CLEAR_FAULTED_A, HWVALUE(C076, CLEAR_FAULTED_A, INST_LOW, instance_ptr_lo) | + aperture_type_value | + clear_type_value, + CLEAR_FAULTED_B, HWVALUE(C076, CLEAR_FAULTED_B, INST_HI, instance_ptr_hi)); +} + +// Copy from Pascal, this version sets TLB_INVALIDATE_INVAL_SCOPE. +void uvm_hal_ampere_host_tlb_invalidate_all(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + uvm_membar_t membar) +{ + NvU32 aperture_value; + NvU32 page_table_level; + NvU32 pdb_lo; + NvU32 pdb_hi; + NvU32 ack_value = 0; + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + + if (pdb.aperture == UVM_APERTURE_VID) + aperture_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, VID_MEM); + else + aperture_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + + pdb_lo = pdb.address & HWMASK(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + pdb_hi = pdb.address >> HWSIZE(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + + // PDE3 is the highest level on Pascal, see the comment in uvm_pascal_mmu.c + // for details. + UVM_ASSERT_MSG(depth < NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3, "depth %u", depth); + page_table_level = NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 - depth; + + if (membar != UVM_MEMBAR_NONE) { + // If a GPU or SYS membar is needed, ACK_TYPE needs to be set to + // GLOBALLY to make sure all the pending accesses can be picked up by + // the membar. + ack_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_ACK_TYPE, GLOBALLY); + } + + NV_PUSH_4U(C56F, MEM_OP_A, HWCONST(C56F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS) | + HWCONST(C56F, MEM_OP_A, TLB_INVALIDATE_INVAL_SCOPE, NON_LINK_TLBS), + MEM_OP_B, 0, + MEM_OP_C, HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE) | + HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_REPLAY, NONE) | + HWVALUE(C56F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, page_table_level) | + aperture_value | + ack_value, + MEM_OP_D, HWCONST(C56F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE) | + HWVALUE(C56F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); + + uvm_hal_tlb_invalidate_membar(push, membar); +} + +// Copy from Volta, this version sets TLB_INVALIDATE_INVAL_SCOPE. +void uvm_hal_ampere_host_tlb_invalidate_va(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + NvU64 base, + NvU64 size, + NvU32 page_size, + uvm_membar_t membar) +{ + NvU32 aperture_value; + NvU32 page_table_level; + NvU32 pdb_lo; + NvU32 pdb_hi; + NvU32 ack_value = 0; + NvU32 va_lo; + NvU32 va_hi; + NvU64 end; + NvU64 actual_base; + NvU64 actual_size; + NvU64 actual_end; + NvU32 log2_invalidation_size; + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + + UVM_ASSERT_MSG(IS_ALIGNED(page_size, 1 << 12), "page_size 0x%x\n", page_size); + UVM_ASSERT_MSG(IS_ALIGNED(base, page_size), "base 0x%llx page_size 0x%x\n", base, page_size); + UVM_ASSERT_MSG(IS_ALIGNED(size, page_size), "size 0x%llx page_size 0x%x\n", size, page_size); + UVM_ASSERT_MSG(size > 0, "size 0x%llx\n", size); + + // The invalidation size must be a power-of-two number of pages containing + // the passed interval + end = base + size - 1; + log2_invalidation_size = __fls((unsigned long)(end ^ base)) + 1; + + if (log2_invalidation_size == 64) { + // Invalidate everything + gpu->parent->host_hal->tlb_invalidate_all(push, pdb, depth, membar); + return; + } + + // The hardware aligns the target address down to the invalidation size. + actual_size = 1ULL << log2_invalidation_size; + actual_base = UVM_ALIGN_DOWN(base, actual_size); + actual_end = actual_base + actual_size - 1; + UVM_ASSERT(actual_end >= end); + + // The invalidation size field expects log2(invalidation size in 4K), not + // log2(invalidation size in bytes) + log2_invalidation_size -= 12; + + // Address to invalidate, as a multiple of 4K. + base >>= 12; + va_lo = base & HWMASK(C56F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + va_hi = base >> HWSIZE(C56F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + + if (pdb.aperture == UVM_APERTURE_VID) + aperture_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, VID_MEM); + else + aperture_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + + pdb_lo = pdb.address & HWMASK(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + pdb_hi = pdb.address >> HWSIZE(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + + // PDE3 is the highest level on Pascal-Ampere , see the comment in + // uvm_pascal_mmu.c for details. + UVM_ASSERT_MSG(depth < NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3, "depth %u", depth); + page_table_level = NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 - depth; + + if (membar != UVM_MEMBAR_NONE) { + // If a GPU or SYS membar is needed, ACK_TYPE needs to be set to + // GLOBALLY to make sure all the pending accesses can be picked up by + // the membar. + ack_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_ACK_TYPE, GLOBALLY); + } + + NV_PUSH_4U(C56F, MEM_OP_A, HWVALUE(C56F, MEM_OP_A, TLB_INVALIDATE_INVALIDATION_SIZE, log2_invalidation_size) | + HWCONST(C56F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS) | + HWVALUE(C56F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO, va_lo) | + HWCONST(C56F, MEM_OP_A, TLB_INVALIDATE_INVAL_SCOPE, NON_LINK_TLBS), + MEM_OP_B, HWVALUE(C56F, MEM_OP_B, TLB_INVALIDATE_TARGET_ADDR_HI, va_hi), + MEM_OP_C, HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE) | + HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_REPLAY, NONE) | + HWVALUE(C56F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, page_table_level) | + aperture_value | + ack_value, + MEM_OP_D, HWCONST(C56F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE_TARGETED) | + HWVALUE(C56F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); + + uvm_hal_tlb_invalidate_membar(push, membar); +} + +// Copy from Pascal, this version sets TLB_INVALIDATE_INVAL_SCOPE. +void uvm_hal_ampere_host_tlb_invalidate_test(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + UVM_TEST_INVALIDATE_TLB_PARAMS *params) +{ + NvU32 ack_value = 0; + NvU32 invalidate_gpc_value = 0; + NvU32 aperture_value = 0; + NvU32 pdb_lo = 0; + NvU32 pdb_hi = 0; + NvU32 page_table_level = 0; + uvm_membar_t membar; + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + if (pdb.aperture == UVM_APERTURE_VID) + aperture_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, VID_MEM); + else + aperture_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + + pdb_lo = pdb.address & HWMASK(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + pdb_hi = pdb.address >> HWSIZE(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + + if (params->page_table_level != UvmInvalidatePageTableLevelAll) { + // PDE3 is the highest level on Pascal, see the comment in + // uvm_pascal_mmu.c for details. + page_table_level = min((NvU32)UvmInvalidatePageTableLevelPde3, params->page_table_level) - 1; + } + + if (params->membar != UvmInvalidateTlbMemBarNone) { + // If a GPU or SYS membar is needed, ack_value needs to be set to + // GLOBALLY to make sure all the pending accesses can be picked up by + // the membar. + ack_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_ACK_TYPE, GLOBALLY); + } + + if (params->disable_gpc_invalidate) + invalidate_gpc_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_GPC, DISABLE); + else + invalidate_gpc_value = HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE); + + if (params->target_va_mode == UvmTargetVaModeTargeted) { + NvU64 va = params->va >> 12; + + NvU32 va_lo = va & HWMASK(C56F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + NvU32 va_hi = va >> HWSIZE(C56F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + NV_PUSH_4U(C56F, MEM_OP_A, HWCONST(C56F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS) | + HWVALUE(C56F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO, va_lo) | + HWCONST(C56F, MEM_OP_A, TLB_INVALIDATE_INVAL_SCOPE, NON_LINK_TLBS), + MEM_OP_B, HWVALUE(C56F, MEM_OP_B, TLB_INVALIDATE_TARGET_ADDR_HI, va_hi), + MEM_OP_C, HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_REPLAY, NONE) | + HWVALUE(C56F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, page_table_level) | + HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + invalidate_gpc_value | + aperture_value | + ack_value, + MEM_OP_D, HWCONST(C56F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE_TARGETED) | + HWVALUE(C56F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); + } + else { + NV_PUSH_4U(C56F, MEM_OP_A, HWCONST(C56F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS) | + HWCONST(C56F, MEM_OP_A, TLB_INVALIDATE_INVAL_SCOPE, NON_LINK_TLBS), + MEM_OP_B, 0, + MEM_OP_C, HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_REPLAY, NONE) | + HWVALUE(C56F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, page_table_level) | + HWCONST(C56F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C56F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + invalidate_gpc_value | + aperture_value | + ack_value, + MEM_OP_D, HWCONST(C56F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE) | + HWVALUE(C56F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); + } + + if (params->membar == UvmInvalidateTlbMemBarSys) + membar = UVM_MEMBAR_SYS; + else if (params->membar == UvmInvalidateTlbMemBarLocal) + membar = UVM_MEMBAR_GPU; + else + membar = UVM_MEMBAR_NONE; + + uvm_hal_tlb_invalidate_membar(push, membar); +} diff --git a/kernel-open/nvidia-uvm/uvm_ampere_mmu.c b/kernel-open/nvidia-uvm/uvm_ampere_mmu.c new file mode 100644 index 000000000..8f35f5a35 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ampere_mmu.c @@ -0,0 +1,162 @@ +/******************************************************************************* + Copyright (c) 2018-2020 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +// For Ampere, UVM page tree 'depth' maps to hardware as follows: +// +// UVM depth HW level VA bits +// 0 PDE3 48:47 +// 1 PDE2 46:38 +// 2 PDE1 (or 512M PTE) 37:29 +// 3 PDE0 (dual 64k/4k PDE, or 2M PTE) 28:21 +// 4 PTE_64K / PTE_4K 20:16 / 20:12 + +#include "uvm_types.h" +#include "uvm_global.h" +#include "uvm_hal.h" +#include "uvm_ampere_fault_buffer.h" +#include "hwref/ampere/ga100/dev_fault.h" + +uvm_mmu_engine_type_t uvm_hal_ampere_mmu_engine_id_to_type(NvU16 mmu_engine_id) +{ + if (mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_HOST0 && mmu_engine_id <= NV_PFAULT_MMU_ENG_ID_HOST31) + return UVM_MMU_ENGINE_TYPE_HOST; + + if (mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_CE0 && mmu_engine_id <= NV_PFAULT_MMU_ENG_ID_CE9) + return UVM_MMU_ENGINE_TYPE_CE; + + // We shouldn't be servicing faults from any other engines + UVM_ASSERT_MSG(mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_GRAPHICS && mmu_engine_id < NV_PFAULT_MMU_ENG_ID_BAR1, + "Unexpected engine ID: 0x%x\n", mmu_engine_id); + + return UVM_MMU_ENGINE_TYPE_GRAPHICS; +} + +static NvU32 page_table_depth_ampere(NvU32 page_size) +{ + // The common-case is page_size == UVM_PAGE_SIZE_2M, hence the first check + if (page_size == UVM_PAGE_SIZE_2M) + return 3; + else if (page_size == UVM_PAGE_SIZE_512M) + return 2; + else + return 4; +} + +static NvU32 page_sizes_ampere(void) +{ + return UVM_PAGE_SIZE_512M | UVM_PAGE_SIZE_2M | UVM_PAGE_SIZE_64K | UVM_PAGE_SIZE_4K; +} + +static uvm_mmu_mode_hal_t ampere_mmu_mode_hal; + +uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_ampere(NvU32 big_page_size) +{ + static bool initialized = false; + + UVM_ASSERT(big_page_size == UVM_PAGE_SIZE_64K || big_page_size == UVM_PAGE_SIZE_128K); + + // TODO: Bug 1789555: RM should reject the creation of GPU VA spaces with + // 128K big page size for Pascal+ GPUs + if (big_page_size == UVM_PAGE_SIZE_128K) + return NULL; + + if (!initialized) { + uvm_mmu_mode_hal_t *turing_mmu_mode_hal = uvm_hal_mmu_mode_turing(big_page_size); + UVM_ASSERT(turing_mmu_mode_hal); + + // The assumption made is that arch_hal->mmu_mode_hal() will be + // called under the global lock the first time, so check it here. + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + ampere_mmu_mode_hal = *turing_mmu_mode_hal; + ampere_mmu_mode_hal.page_table_depth = page_table_depth_ampere; + ampere_mmu_mode_hal.page_sizes = page_sizes_ampere; + + initialized = true; + } + + return &ere_mmu_mode_hal; +} + +NvU16 uvm_hal_ampere_mmu_client_id_to_utlb_id(NvU16 client_id) +{ + switch (client_id) { + case NV_PFAULT_CLIENT_GPC_RAST: + case NV_PFAULT_CLIENT_GPC_GCC: + case NV_PFAULT_CLIENT_GPC_GPCCS: + return UVM_AMPERE_GPC_UTLB_ID_RGG; + case NV_PFAULT_CLIENT_GPC_T1_0: + return UVM_AMPERE_GPC_UTLB_ID_LTP0; + case NV_PFAULT_CLIENT_GPC_T1_1: + case NV_PFAULT_CLIENT_GPC_PE_0: + case NV_PFAULT_CLIENT_GPC_TPCCS_0: + return UVM_AMPERE_GPC_UTLB_ID_LTP1; + case NV_PFAULT_CLIENT_GPC_T1_2: + return UVM_AMPERE_GPC_UTLB_ID_LTP2; + case NV_PFAULT_CLIENT_GPC_T1_3: + case NV_PFAULT_CLIENT_GPC_PE_1: + case NV_PFAULT_CLIENT_GPC_TPCCS_1: + return UVM_AMPERE_GPC_UTLB_ID_LTP3; + case NV_PFAULT_CLIENT_GPC_T1_4: + return UVM_AMPERE_GPC_UTLB_ID_LTP4; + case NV_PFAULT_CLIENT_GPC_T1_5: + case NV_PFAULT_CLIENT_GPC_PE_2: + case NV_PFAULT_CLIENT_GPC_TPCCS_2: + return UVM_AMPERE_GPC_UTLB_ID_LTP5; + case NV_PFAULT_CLIENT_GPC_T1_6: + return UVM_AMPERE_GPC_UTLB_ID_LTP6; + case NV_PFAULT_CLIENT_GPC_T1_7: + case NV_PFAULT_CLIENT_GPC_PE_3: + case NV_PFAULT_CLIENT_GPC_TPCCS_3: + return UVM_AMPERE_GPC_UTLB_ID_LTP7; + case NV_PFAULT_CLIENT_GPC_T1_8: + return UVM_AMPERE_GPC_UTLB_ID_LTP8; + case NV_PFAULT_CLIENT_GPC_T1_9: + case NV_PFAULT_CLIENT_GPC_PE_4: + case NV_PFAULT_CLIENT_GPC_TPCCS_4: + return UVM_AMPERE_GPC_UTLB_ID_LTP9; + case NV_PFAULT_CLIENT_GPC_T1_10: + return UVM_AMPERE_GPC_UTLB_ID_LTP10; + case NV_PFAULT_CLIENT_GPC_T1_11: + case NV_PFAULT_CLIENT_GPC_PE_5: + case NV_PFAULT_CLIENT_GPC_TPCCS_5: + return UVM_AMPERE_GPC_UTLB_ID_LTP11; + case NV_PFAULT_CLIENT_GPC_T1_12: + return UVM_AMPERE_GPC_UTLB_ID_LTP12; + case NV_PFAULT_CLIENT_GPC_T1_13: + case NV_PFAULT_CLIENT_GPC_PE_6: + case NV_PFAULT_CLIENT_GPC_TPCCS_6: + return UVM_AMPERE_GPC_UTLB_ID_LTP13; + case NV_PFAULT_CLIENT_GPC_T1_14: + return UVM_AMPERE_GPC_UTLB_ID_LTP14; + case NV_PFAULT_CLIENT_GPC_T1_15: + case NV_PFAULT_CLIENT_GPC_PE_7: + case NV_PFAULT_CLIENT_GPC_TPCCS_7: + return UVM_AMPERE_GPC_UTLB_ID_LTP15; + + default: + UVM_ASSERT_MSG(false, "Invalid client value: 0x%x\n", client_id); + } + + return 0; +} diff --git a/kernel-open/nvidia-uvm/uvm_api.h b/kernel-open/nvidia-uvm/uvm_api.h new file mode 100644 index 000000000..7f8d31194 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_api.h @@ -0,0 +1,256 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_API_H__ +#define __UVM_API_H__ + +#include "uvm_types.h" +#include "uvm_ioctl.h" +#include "uvm_linux.h" +#include "uvm_lock.h" +#include "uvm_thread_context.h" +#include "uvm_kvmalloc.h" +#include "uvm_va_space.h" +#include "nv_uvm_types.h" + +// This weird number comes from UVM_PREVENT_MIGRATION_RANGE_GROUPS_PARAMS. That +// ioctl is called frequently so we don't want to allocate a copy every time. +// It's a little over 256 bytes in size. +#define UVM_MAX_IOCTL_PARAM_STACK_SIZE 288 + +// The UVM_ROUTE_CMD_* macros are only intended for use in the ioctl routines + +// If the BUILD_BUG_ON fires, use __UVM_ROUTE_CMD_ALLOC instead. +#define __UVM_ROUTE_CMD_STACK(cmd, params_type, function_name, do_init_check) \ + case cmd: \ + { \ + params_type params; \ + BUILD_BUG_ON(sizeof(params) > UVM_MAX_IOCTL_PARAM_STACK_SIZE); \ + if (nv_copy_from_user(¶ms, (void __user*)arg, sizeof(params))) \ + return -EFAULT; \ + \ + params.rmStatus = uvm_global_get_status(); \ + if (params.rmStatus == NV_OK) { \ + if (do_init_check) \ + params.rmStatus = uvm_va_space_initialized(uvm_va_space_get(filp)); \ + if (likely(params.rmStatus == NV_OK)) \ + params.rmStatus = function_name(¶ms, filp); \ + } \ + \ + if (nv_copy_to_user((void __user*)arg, ¶ms, sizeof(params))) \ + return -EFAULT; \ + \ + return 0; \ + } + +// We need to concatenate cmd##_PARAMS here to avoid the preprocessor's argument +// prescan. Attempting concatenation in the lower-level macro will fail because +// it will have been expanded to a literal by then. +#define UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(cmd, function_name) \ + __UVM_ROUTE_CMD_STACK(cmd, cmd##_PARAMS, function_name, false) + +#define UVM_ROUTE_CMD_STACK_INIT_CHECK(cmd, function_name) \ + __UVM_ROUTE_CMD_STACK(cmd, cmd##_PARAMS, function_name, true) + +// If the BUILD_BUG_ON fires, use __UVM_ROUTE_CMD_STACK instead +#define __UVM_ROUTE_CMD_ALLOC(cmd, params_type, function_name, do_init_check) \ + case cmd: \ + { \ + int ret = 0; \ + params_type *params = uvm_kvmalloc(sizeof(*params)); \ + if (!params) \ + return -ENOMEM; \ + BUILD_BUG_ON(sizeof(*params) <= UVM_MAX_IOCTL_PARAM_STACK_SIZE); \ + if (nv_copy_from_user(params, (void __user*)arg, sizeof(*params))) { \ + uvm_kvfree(params); \ + return -EFAULT; \ + } \ + \ + params->rmStatus = uvm_global_get_status(); \ + if (params->rmStatus == NV_OK) { \ + if (do_init_check) \ + params->rmStatus = uvm_va_space_initialized(uvm_va_space_get(filp)); \ + if (likely(params->rmStatus == NV_OK)) \ + params->rmStatus = function_name(params, filp); \ + } \ + \ + if (nv_copy_to_user((void __user*)arg, params, sizeof(*params))) \ + ret = -EFAULT; \ + \ + uvm_kvfree(params); \ + return ret; \ + } + +#define UVM_ROUTE_CMD_ALLOC_NO_INIT_CHECK(cmd, function_name) \ + __UVM_ROUTE_CMD_ALLOC(cmd, cmd##_PARAMS, function_name, false) + +#define UVM_ROUTE_CMD_ALLOC_INIT_CHECK(cmd, function_name) \ + __UVM_ROUTE_CMD_ALLOC(cmd, cmd##_PARAMS, function_name, true) + +// Wrap an entry point into the UVM module. +// +// An entry function with signature +// +// return_type foo(...); +// +// is required to have a counterpart of the form +// +// return_type foo_entry(...) { +// UVM_ENTRY_RET(foo(...)); +// } +// +// An entry function with signature +// +// void foo(...); +// +// is required to have a counterpart of the form +// +// void foo_entry(...) { +// UVM_ENTRY_VOID(foo(...)); +// } +// +// Invocations of foo must be replaced by invocations of foo_entry at the entry +// points. +#define UVM_ENTRY_WRAP(line) \ + do { \ + bool added; \ + \ + if (in_interrupt()) { \ + line; \ + } \ + else if (uvm_thread_context_wrapper_is_used()) { \ + uvm_thread_context_wrapper_t thread_context_wrapper; \ + \ + added = uvm_thread_context_add(&thread_context_wrapper.context); \ + line; \ + if (added) \ + uvm_thread_context_remove(&thread_context_wrapper.context); \ + } \ + else { \ + uvm_thread_context_t thread_context; \ + \ + added = uvm_thread_context_add(&thread_context); \ + line; \ + if (added) \ + uvm_thread_context_remove(&thread_context); \ + } \ + } while (0) \ + +// Wrapper for non-void functions +#define UVM_ENTRY_RET(func_call) \ + do { \ + typeof(func_call) ret; \ + UVM_ENTRY_WRAP((ret = (func_call))); \ + return ret; \ + } while (0) \ + +// Wrapper for void functions +#define UVM_ENTRY_VOID UVM_ENTRY_WRAP + +// Validate input ranges from the user with specific alignment requirement +static bool uvm_api_range_invalid_aligned(NvU64 base, NvU64 length, NvU64 alignment) +{ + return !IS_ALIGNED(base, alignment) || + !IS_ALIGNED(length, alignment) || + base == 0 || + length == 0 || + base + length < base; // Overflow +} + +// Most APIs require PAGE_SIZE alignment +static bool uvm_api_range_invalid(NvU64 base, NvU64 length) +{ + return uvm_api_range_invalid_aligned(base, length, PAGE_SIZE); +} + +// Some APIs can only enforce 4K alignment as it's the smallest GPU page size +// even when the smallest host page is larger (e.g. 64K on ppc64le). +static bool uvm_api_range_invalid_4k(NvU64 base, NvU64 length) +{ + return uvm_api_range_invalid_aligned(base, length, UVM_PAGE_SIZE_4K); +} + +// Verify alignment on a 64K boundary. +static bool uvm_api_range_invalid_64k(NvU64 base, NvU64 length) +{ + return uvm_api_range_invalid_aligned(base, length, UVM_PAGE_SIZE_64K); +} + +// Returns true if the interval [start, start + length -1] is entirely covered +// by vmas. +// +// LOCKING: mm->mmap_lock must be held in at least read mode. +bool uvm_is_valid_vma_range(struct mm_struct *mm, NvU64 start, NvU64 length); + +// Check that the interval [base, base + length) is fully covered by UVM +// managed ranges (NV_OK is returned), or (if ATS is enabled and mm != NULL) +// fully covered by valid vmas (NV_WARN_NOTHING_TO_DO is returned), or (if HMM +// is enabled and mm != NULL) fully covered by valid vmas (NV_OK is returned). +// Any other input results in a return status of NV_ERR_INVALID_ADDRESS. +// +// LOCKING: va_space->lock must be held in at least read mode. If mm != NULL, +// mm->mmap_lock must also be held in at least read mode. +NV_STATUS uvm_api_range_type_check(uvm_va_space_t *va_space, struct mm_struct *mm, NvU64 base, NvU64 length); + +NV_STATUS uvm_api_pageable_mem_access_on_gpu(UVM_PAGEABLE_MEM_ACCESS_ON_GPU_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_register_gpu(UVM_REGISTER_GPU_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_unregister_gpu(UVM_UNREGISTER_GPU_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_create_range_group(UVM_CREATE_RANGE_GROUP_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_destroy_range_group(UVM_DESTROY_RANGE_GROUP_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_enable_peer_access(UVM_ENABLE_PEER_ACCESS_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_disable_peer_access(UVM_DISABLE_PEER_ACCESS_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_set_range_group(UVM_SET_RANGE_GROUP_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_create_external_range(UVM_CREATE_EXTERNAL_RANGE_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_map_external_allocation(UVM_MAP_EXTERNAL_ALLOCATION_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_map_external_sparse(UVM_MAP_EXTERNAL_SPARSE_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_free(UVM_FREE_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_prevent_migration_range_groups(UVM_PREVENT_MIGRATION_RANGE_GROUPS_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_allow_migration_range_groups(UVM_ALLOW_MIGRATION_RANGE_GROUPS_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_set_preferred_location(const UVM_SET_PREFERRED_LOCATION_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_unset_preferred_location(const UVM_UNSET_PREFERRED_LOCATION_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_set_accessed_by(const UVM_SET_ACCESSED_BY_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_unset_accessed_by(const UVM_UNSET_ACCESSED_BY_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_register_gpu_va_space(UVM_REGISTER_GPU_VASPACE_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_unregister_gpu_va_space(UVM_UNREGISTER_GPU_VASPACE_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_register_channel(UVM_REGISTER_CHANNEL_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_unregister_channel(UVM_UNREGISTER_CHANNEL_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_enable_read_duplication(const UVM_ENABLE_READ_DUPLICATION_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_disable_read_duplication(const UVM_DISABLE_READ_DUPLICATION_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_migrate(UVM_MIGRATE_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_enable_system_wide_atomics(UVM_ENABLE_SYSTEM_WIDE_ATOMICS_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_disable_system_wide_atomics(UVM_DISABLE_SYSTEM_WIDE_ATOMICS_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_init_event_tracker(UVM_TOOLS_INIT_EVENT_TRACKER_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_set_notification_threshold(UVM_TOOLS_SET_NOTIFICATION_THRESHOLD_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_event_queue_enable_events(UVM_TOOLS_EVENT_QUEUE_ENABLE_EVENTS_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_event_queue_disable_events(UVM_TOOLS_EVENT_QUEUE_DISABLE_EVENTS_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_enable_counters(UVM_TOOLS_ENABLE_COUNTERS_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_disable_counters(UVM_TOOLS_DISABLE_COUNTERS_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_read_process_memory(UVM_TOOLS_READ_PROCESS_MEMORY_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_write_process_memory(UVM_TOOLS_WRITE_PROCESS_MEMORY_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_map_dynamic_parallelism_region(UVM_MAP_DYNAMIC_PARALLELISM_REGION_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_unmap_external(UVM_UNMAP_EXTERNAL_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_migrate_range_group(UVM_MIGRATE_RANGE_GROUP_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_alloc_semaphore_pool(UVM_ALLOC_SEMAPHORE_POOL_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_populate_pageable(const UVM_POPULATE_PAGEABLE_PARAMS *params, struct file *filp); + +#endif // __UVM_API_H__ diff --git a/kernel-open/nvidia-uvm/uvm_ats.c b/kernel-open/nvidia-uvm/uvm_ats.c new file mode 100644 index 000000000..1b5d6c5e3 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ats.c @@ -0,0 +1,193 @@ +/******************************************************************************* + Copyright (c) 2018-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_va_space.h" +#include "uvm_ats.h" +#include "uvm_global.h" +#include "uvm_gpu.h" + +static int uvm_ats_mode = 1; +module_param(uvm_ats_mode, int, S_IRUGO); +MODULE_PARM_DESC(uvm_ats_mode, "Set to 0 to disable ATS (Address Translation Services). " + "Any other value is ignored. Has no effect unless the " + "platform supports ATS."); + +void uvm_ats_init(const UvmPlatformInfo *platform_info) +{ + g_uvm_global.ats.supported = platform_info->atsSupported; + + g_uvm_global.ats.enabled = uvm_ats_mode && + g_uvm_global.ats.supported && + UVM_ATS_SUPPORTED() && + uvm_va_space_mm_enabled_system(); +} + +void uvm_ats_init_va_space(uvm_va_space_t *va_space) +{ + if (UVM_ATS_IBM_SUPPORTED()) + uvm_ats_ibm_init_va_space(va_space); +} + +NV_STATUS uvm_ats_add_gpu(uvm_parent_gpu_t *parent_gpu) +{ + if (UVM_ATS_IBM_SUPPORTED()) { + // uvm_ibm_add_gpu() needs to be called even if ATS is disabled since it + // sets parent_gpu->npu. Not setting parent_gpu->npu will result in + // incorrect NVLink addresses. See dma_addr_to_gpu_addr(). + + return uvm_ats_ibm_add_gpu(parent_gpu); + } + + + + + + + + return NV_OK; +} + +void uvm_ats_remove_gpu(uvm_parent_gpu_t *parent_gpu) +{ + if (UVM_ATS_IBM_SUPPORTED()) { + // uvm_ibm_remove_gpu() needs to be called even if ATS is disabled since + // uvm_ibm_add_gpu() is called even in that case and + // uvm_ibm_remove_gpu() needs to undo the work done by + // uvm_ats_add_gpu() (gpu retained_count etc.). + + uvm_ats_ibm_remove_gpu(parent_gpu); + } + + + + + + +} + +NV_STATUS uvm_ats_bind_gpu(uvm_gpu_va_space_t *gpu_va_space) +{ + NV_STATUS status = NV_OK; + + UVM_ASSERT(gpu_va_space); + + if (!gpu_va_space->ats.enabled) + return status; + + uvm_assert_lockable_order(UVM_LOCK_ORDER_MMAP_LOCK); + uvm_assert_lockable_order(UVM_LOCK_ORDER_VA_SPACE); + + if (UVM_ATS_IBM_SUPPORTED()) + status = uvm_ats_ibm_bind_gpu(gpu_va_space); + + + + + + return status; +} + +void uvm_ats_unbind_gpu(uvm_gpu_va_space_t *gpu_va_space) +{ + UVM_ASSERT(gpu_va_space); + + if (!gpu_va_space->ats.enabled) + return; + + if (UVM_ATS_IBM_SUPPORTED()) + uvm_ats_ibm_unbind_gpu(gpu_va_space); + + + + +} + +NV_STATUS uvm_ats_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space) +{ + NV_STATUS status = NV_OK; + uvm_va_space_t *va_space; + uvm_gpu_id_t gpu_id; + + UVM_ASSERT(gpu_va_space); + + if (!gpu_va_space->ats.enabled) + return status; + + va_space = gpu_va_space->va_space; + UVM_ASSERT(va_space); + + uvm_assert_rwsem_locked_write(&va_space->lock); + + gpu_id = gpu_va_space->gpu->id; + + // Prevent multiple registrations of the same gpu_va_space for ATS access. + if (uvm_processor_mask_test(&va_space->ats.registered_gpu_va_spaces, gpu_id)) + return NV_ERR_INVALID_DEVICE; + + if (UVM_ATS_IBM_SUPPORTED()) + status = uvm_ats_ibm_register_gpu_va_space(gpu_va_space); + + + + + + if (status == NV_OK) + uvm_processor_mask_set(&va_space->ats.registered_gpu_va_spaces, gpu_id); + + return status; +} + +void uvm_ats_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space) +{ + uvm_gpu_id_t gpu_id; + uvm_va_space_t *va_space; + + UVM_ASSERT(gpu_va_space); + + if (!gpu_va_space->ats.enabled) + return; + + va_space = gpu_va_space->va_space; + gpu_id = gpu_va_space->gpu->id; + + if (UVM_ATS_IBM_SUPPORTED()) + uvm_ats_ibm_unregister_gpu_va_space(gpu_va_space); + + + + + + uvm_va_space_down_write(va_space); + uvm_processor_mask_clear(&va_space->ats.registered_gpu_va_spaces, gpu_id); + uvm_va_space_up_write(va_space); +} + +void uvm_ats_invalidate(uvm_va_space_t *va_space, NvU64 start, NvU64 end) +{ + // We can only reach here from the mmu_notifier callbacks and these callbacks + // wouldn't have been registered if ATS wasn't enabled. + UVM_ASSERT(g_uvm_global.ats.enabled); + + if (UVM_ATS_IBM_SUPPORTED()) + uvm_ats_ibm_invalidate(va_space, start, end); +} diff --git a/kernel-open/nvidia-uvm/uvm_ats.h b/kernel-open/nvidia-uvm/uvm_ats.h new file mode 100644 index 000000000..ad8c514aa --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ats.h @@ -0,0 +1,152 @@ +/******************************************************************************* + Copyright (c) 2018-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_ATS_H__ +#define __UVM_ATS_H__ + +#include "uvm_linux.h" +#include "uvm_forward_decl.h" +#include "uvm_ats_ibm.h" +#include "nv_uvm_types.h" + + + + + + + #define UVM_ATS_SUPPORTED() (UVM_ATS_IBM_SUPPORTED()) + + +typedef struct +{ + // Mask of gpu_va_spaces which are registered for ATS access. The mask is + // indexed by gpu->id. This mask is protected by the VA space lock. + uvm_processor_mask_t registered_gpu_va_spaces; + + union + { + uvm_ibm_va_space_t ibm; + + + + + + + }; +} uvm_ats_va_space_t; + +typedef struct +{ + // Each GPU VA space can have ATS enabled or disabled in its hardware + // state. This is controlled by user space when it allocates that GPU VA + // space object from RM. This flag indicates the mode user space + // requested when allocating this GPU VA space. + bool enabled; + + NvU32 pasid; + + union + { + uvm_ibm_gpu_va_space_t ibm; + + + + + + + }; +} uvm_ats_gpu_va_space_t; + +// Initializes driver-wide ATS state +// +// LOCKING: None +void uvm_ats_init(const UvmPlatformInfo *platform_info); + +// Initializes ATS specific GPU state +// +// LOCKING: None +void uvm_ats_init_va_space(uvm_va_space_t *va_space); + +// Enables ATS feature on the GPU. +// +// LOCKING: g_uvm_global.global lock mutex must be held. +NV_STATUS uvm_ats_add_gpu(uvm_parent_gpu_t *parent_gpu); + +// Disables ATS feature on the GPU. The caller is responsible for ensuring +// that the GPU won't issue ATS requests anymore prior to calling this function. +// +// LOCKING: g_uvm_global.global lock mutex must be held. +void uvm_ats_remove_gpu(uvm_parent_gpu_t *parent_gpu); + +// Creates a binding on the GPU for the mm associated with the VA space +// (va_space_mm). Multiple calls to this function are tracked and refcounted for +// the specific {gpu, mm} pair. A successful uvm_ats_add_gpu() must precede a +// call to this function. +// +// LOCKING: mmap_lock must be lockable. +// VA space lock must be lockable. +// gpu_va_space->gpu must be retained. + + + + +NV_STATUS uvm_ats_bind_gpu(uvm_gpu_va_space_t *gpu_va_space); + +// Decrements the refcount on the {gpu, mm} pair. Removes the binding from the +// mm (va_space_mm) to this GPU when the refcount reaches zero. +// +// LOCKING: None +void uvm_ats_unbind_gpu(uvm_gpu_va_space_t *gpu_va_space); + +// Enables ATS access on the GPU for the mm_struct associated with the VA space +// (va_space_mm) and assigns a PASID. A successful uvm_ats_bind_gpu() must +// precede a call to this function. Returns NV_ERR_INVALID_DEVICE if the +// gpu_va_space is already registered for ATS access. +// +// LOCKING: The VA space lock must be held in write mode. +// mm has to be retained prior to calling this function. +// current->mm->mmap_lock must be held in write mode iff +// UVM_ATS_IBM_SUPPORTED_IN_KERNEL() is 1. +NV_STATUS uvm_ats_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space); + +// Disables ATS access for the gpu_va_space. Prior to calling this function, +// the caller must guarantee that the GPU will no longer make any ATS +// accesses in this GPU VA space, and that no ATS fault handling for this +// GPU will be attempted. +// +// LOCKING: This function may block on mmap_lock and will acquire the VA space +// lock, so neither lock must be held. +void uvm_ats_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space); + +// Synchronously invalidate ATS translations cached by GPU TLBs. The +// invalidate applies to all GPUs with active GPU VA spaces in va_space, and +// covers all pages touching any part of the given range. end is inclusive. +// +// GMMU translations in the given range are not guaranteed to be +// invalidated. +// +// LOCKING: No locks are required, but this function may be called with +// interrupts disabled. +void uvm_ats_invalidate(uvm_va_space_t *va_space, NvU64 start, NvU64 end); + +#endif // __UVM_ATS_H__ diff --git a/kernel-open/nvidia-uvm/uvm_ats_faults.c b/kernel-open/nvidia-uvm/uvm_ats_faults.c new file mode 100644 index 000000000..7b7ee646c --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ats_faults.c @@ -0,0 +1,232 @@ +/******************************************************************************* + Copyright (c) 2018 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "uvm_tools.h" +#include "uvm_va_range.h" +#include "uvm_ats_faults.h" +#include "uvm_migrate_pageable.h" + +static NV_STATUS uvm_ats_service_fault(uvm_gpu_va_space_t *gpu_va_space, + NvU64 fault_addr, + uvm_fault_access_type_t access_type) +{ + uvm_va_space_t *va_space = gpu_va_space->va_space; + struct mm_struct *mm = va_space->va_space_mm.mm; + bool write = (access_type >= UVM_FAULT_ACCESS_TYPE_WRITE); + NV_STATUS status; + NvU64 start; + NvU64 length; + + // Request uvm_migrate_pageable() to touch the corresponding page after + // population. + // Under virtualization ATS provides two translations: + // 1) guest virtual -> guest physical + // 2) guest physical -> host physical + // + // The overall ATS translation will fault if either of those translations is + // invalid. The get_user_pages() call above handles translation #1, but not + // #2. We don't know if we're running as a guest, but in case we are we can + // force that translation to be valid by touching the guest physical address + // from the CPU. If the translation is not valid then the access will cause + // a hypervisor fault. Note that dma_map_page() can't establish mappings + // used by GPU ATS SVA translations. GPU accesses to host physical addresses + // obtained as a result of the address translation request uses the CPU + // address space instead of the IOMMU address space since the translated + // host physical address isn't necessarily an IOMMU address. The only way to + // establish guest physical to host physical mapping in the CPU address + // space is to touch the page from the CPU. + // + // We assume that the hypervisor mappings are all VM_PFNMAP, VM_SHARED, and + // VM_WRITE, meaning that the mappings are all granted write access on any + // fault and that the kernel will never revoke them. + // drivers/vfio/pci/vfio_pci_nvlink2.c enforces this. Thus we can assume + // that a read fault is always sufficient to also enable write access on the + // guest translation. + + uvm_migrate_args_t uvm_migrate_args = + { + .va_space = va_space, + .mm = mm, + .start = fault_addr, + .length = PAGE_SIZE, + .dst_id = gpu_va_space->gpu->parent->id, + .dst_node_id = -1, + .populate_permissions = write ? UVM_POPULATE_PERMISSIONS_WRITE : UVM_POPULATE_PERMISSIONS_ANY, + .touch = true, + .skip_mapped = true, + .user_space_start = &start, + .user_space_length = &length, + }; + + UVM_ASSERT(uvm_ats_can_service_faults(gpu_va_space, mm)); + + // TODO: Bug 2103669: Service more than a single fault at a time + // + // We are trying to use migrate_vma API in the kernel (if it exists) to + // populate and map the faulting region on the GPU. We want to do this only + // on the first touch. That is, pages which are not already mapped. So, we + // set skip_mapped to true. For pages already mapped, this will only handle + // PTE upgrades if needed. + status = uvm_migrate_pageable(&uvm_migrate_args); + if (status == NV_WARN_NOTHING_TO_DO) + status = NV_OK; + + UVM_ASSERT(status != NV_ERR_MORE_PROCESSING_REQUIRED); + + return status; +} + +NV_STATUS uvm_ats_service_fault_entry(uvm_gpu_va_space_t *gpu_va_space, + uvm_fault_buffer_entry_t *current_entry, + uvm_ats_fault_invalidate_t *ats_invalidate) +{ + NvU64 gmmu_region_base; + bool in_gmmu_region; + NV_STATUS status = NV_OK; + uvm_fault_access_type_t service_access_type; + + UVM_ASSERT(g_uvm_global.ats.enabled); + UVM_ASSERT(gpu_va_space->ats.enabled); + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + + UVM_ASSERT(current_entry->fault_access_type == + uvm_fault_access_type_mask_highest(current_entry->access_type_mask)); + + service_access_type = current_entry->fault_access_type; + + // ATS lookups are disabled on all addresses within the same + // UVM_GMMU_ATS_GRANULARITY as existing GMMU mappings (see documentation in + // uvm_mmu.h). User mode is supposed to reserve VAs as appropriate to + // prevent any system memory allocations from falling within the NO_ATS + // range of other GMMU mappings, so this shouldn't happen during normal + // operation. However, since this scenario may lead to infinite fault loops, + // we handle it by canceling the fault. + // + // TODO: Bug 2103669: Remove redundant VA range lookups + gmmu_region_base = UVM_ALIGN_DOWN(current_entry->fault_address, UVM_GMMU_ATS_GRANULARITY); + in_gmmu_region = !uvm_va_space_range_empty(current_entry->va_space, + gmmu_region_base, + gmmu_region_base + UVM_GMMU_ATS_GRANULARITY - 1); + if (in_gmmu_region) { + status = NV_ERR_INVALID_ADDRESS; + } + else { + // TODO: Bug 2103669: Service more than a single fault at a time + status = uvm_ats_service_fault(gpu_va_space, current_entry->fault_address, service_access_type); + } + + // Do not flag prefetch faults as fatal unless something fatal happened + if (status == NV_ERR_INVALID_ADDRESS) { + if (current_entry->fault_access_type != UVM_FAULT_ACCESS_TYPE_PREFETCH) { + current_entry->is_fatal = true; + current_entry->fatal_reason = uvm_tools_status_to_fatal_fault_reason(status); + + // Compute cancel mode for replayable faults + if (current_entry->is_replayable) { + if (service_access_type == UVM_FAULT_ACCESS_TYPE_READ || in_gmmu_region) + current_entry->replayable.cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL; + else + current_entry->replayable.cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_WRITE_AND_ATOMIC; + + // If there are pending read accesses on the same page, we have to + // service them before we can cancel the write/atomic faults. So we + // retry with read fault access type. + if (!in_gmmu_region && + current_entry->fault_access_type > UVM_FAULT_ACCESS_TYPE_READ && + uvm_fault_access_type_mask_test(current_entry->access_type_mask, UVM_FAULT_ACCESS_TYPE_READ)) { + status = uvm_ats_service_fault(gpu_va_space, + current_entry->fault_address, + UVM_FAULT_ACCESS_TYPE_READ); + + // If read accesses are also invalid, cancel the fault. If a + // different error code is returned, exit + if (status == NV_ERR_INVALID_ADDRESS) + current_entry->replayable.cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL; + else if (status != NV_OK) + return status; + } + } + } + else { + current_entry->is_invalid_prefetch = true; + } + + // Do not fail overall fault servicing due to logical errors + status = NV_OK; + } + + // The Linux kernel never invalidates TLB entries on mapping permission + // upgrade. This is a problem if the GPU has cached entries with the old + // permission. The GPU will re-fetch the entry if the PTE is invalid and + // page size is not 4K (this is the case on P9). However, if a page gets + // upgraded from R/O to R/W and GPU has the PTEs cached with R/O + // permissions we will enter an infinite loop because we just forward the + // fault to the Linux kernel and it will see that the permissions in the + // page table are correct. Therefore, we flush TLB entries on ATS write + // faults. + if (!current_entry->is_fatal && current_entry->fault_access_type > UVM_FAULT_ACCESS_TYPE_READ) { + if (!ats_invalidate->write_faults_in_batch) { + uvm_tlb_batch_begin(&gpu_va_space->page_tables, &ats_invalidate->write_faults_tlb_batch); + ats_invalidate->write_faults_in_batch = true; + } + + uvm_tlb_batch_invalidate(&ats_invalidate->write_faults_tlb_batch, + current_entry->fault_address, + PAGE_SIZE, + PAGE_SIZE, + UVM_MEMBAR_NONE); + } + + return status; +} + +NV_STATUS uvm_ats_invalidate_tlbs(uvm_gpu_va_space_t *gpu_va_space, + uvm_ats_fault_invalidate_t *ats_invalidate, + uvm_tracker_t *out_tracker) +{ + NV_STATUS status; + uvm_push_t push; + + if (!ats_invalidate->write_faults_in_batch) + return NV_OK; + + UVM_ASSERT(gpu_va_space); + UVM_ASSERT(gpu_va_space->ats.enabled); + + status = uvm_push_begin(gpu_va_space->gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &push, + "Invalidate ATS entries"); + + if (status == NV_OK) { + uvm_tlb_batch_end(&ats_invalidate->write_faults_tlb_batch, &push, UVM_MEMBAR_NONE); + uvm_push_end(&push); + + // Add this push to the GPU's tracker so that fault replays/clears can + // wait on it + status = uvm_tracker_add_push_safe(out_tracker, &push); + } + + ats_invalidate->write_faults_in_batch = false; + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_ats_faults.h b/kernel-open/nvidia-uvm/uvm_ats_faults.h new file mode 100644 index 000000000..db8784d6c --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ats_faults.h @@ -0,0 +1,47 @@ +/******************************************************************************* + Copyright (c) 2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_forward_decl.h" +#include "uvm_lock.h" +#include "uvm_global.h" +#include "uvm_va_space.h" + +NV_STATUS uvm_ats_service_fault_entry(uvm_gpu_va_space_t *gpu_va_space, + uvm_fault_buffer_entry_t *current_entry, + uvm_ats_fault_invalidate_t *ats_invalidate); + +// This function performs pending TLB invalidations for ATS and clears the +// ats_invalidate->write_faults_in_batch flag +NV_STATUS uvm_ats_invalidate_tlbs(uvm_gpu_va_space_t *gpu_va_space, + uvm_ats_fault_invalidate_t *ats_invalidate, + uvm_tracker_t *out_tracker); + +static bool uvm_ats_can_service_faults(uvm_gpu_va_space_t *gpu_va_space, struct mm_struct *mm) +{ + if (mm) + uvm_assert_mmap_lock_locked(mm); + if (gpu_va_space->ats.enabled) + UVM_ASSERT(g_uvm_global.ats.enabled); + + return gpu_va_space->ats.enabled && mm; +} diff --git a/kernel-open/nvidia-uvm/uvm_ats_ibm.c b/kernel-open/nvidia-uvm/uvm_ats_ibm.c new file mode 100644 index 000000000..e2f15cf0f --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ats_ibm.c @@ -0,0 +1,715 @@ +/******************************************************************************* + Copyright (c) 2018-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_api.h" +#include "uvm_lock.h" +#include "uvm_kvmalloc.h" +#include "uvm_global.h" +#include "uvm_va_space.h" +#include "uvm_va_space_mm.h" +#include "uvm_ats_ibm.h" +#include "uvm_common.h" + +#include + +#if UVM_IBM_NPU_SUPPORTED() + +#include +#include +#include +#include +#include +#include + +#define NPU_ATSD_REG_MAP_SIZE 32 + +// There are three 8-byte registers in each ATSD mapping: +#define NPU_ATSD_REG_LAUNCH 0 +#define NPU_ATSD_REG_AVA 1 +#define NPU_ATSD_REG_STAT 2 + +// Fields within the NPU_ATSD_REG_LAUNCH register: + +// "PRS" (process-scoped) bit. 1 means to limit invalidates to the specified +// PASID. +#define NPU_ATSD_REG_LAUNCH_PASID_ENABLE 13 + +// "PID" field. This specifies the PASID target of the invalidate. +#define NPU_ATSD_REG_LAUNCH_PASID_VAL 38 + +// "IS" bit. 0 means the specified virtual address range will be invalidated. 1 +// means all entries will be invalidated. +#define NPU_ATSD_REG_LAUNCH_INVAL_ALL 12 + +// "AP" field. This encodes the size of a range-based invalidate. +#define NPU_ATSD_REG_LAUNCH_INVAL_SIZE 17 + +// "No flush" bit. 0 will trigger a flush (membar) from the GPU following the +// invalidate, 1 will not. +#define NPU_ATSD_REG_LAUNCH_FLUSH_DISABLE 39 + +// Helper to iterate over the active NPUs in the given VA space (all NPUs with +// GPUs that have GPU VA spaces registered in this VA space). +#define for_each_npu_index_in_va_space(npu_index, va_space) \ + for (({uvm_assert_rwlock_locked(&(va_space)->ats.ibm.rwlock); \ + (npu_index) = find_first_bit((va_space)->ats.ibm.npu_active_mask, NV_MAX_NPUS);}); \ + (npu_index) < NV_MAX_NPUS; \ + (npu_index) = find_next_bit((va_space)->ats.ibm.npu_active_mask, NV_MAX_NPUS, (npu_index) + 1)) + +// An invalidate requires operating on one set of registers in each NPU. This +// struct tracks which register set (id) is in use per NPU for a given +// operation. +typedef struct +{ + NvU8 ids[NV_MAX_NPUS]; +} uvm_atsd_regs_t; + +// Get the index of the input npu pointer within UVM's global npus array +static size_t uvm_ibm_npu_index(uvm_ibm_npu_t *npu) +{ + size_t npu_index = npu - &g_uvm_global.npus[0]; + UVM_ASSERT(npu_index < ARRAY_SIZE(g_uvm_global.npus)); + return npu_index; +} + +// Find an existing NPU matching pci_domain, or return an empty NPU slot if none +// is found. Returns NULL if no slots are available. +static uvm_ibm_npu_t *uvm_ibm_npu_find(int pci_domain) +{ + size_t i; + uvm_ibm_npu_t *npu, *first_free = NULL; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + for (i = 0; i < ARRAY_SIZE(g_uvm_global.npus); i++) { + npu = &g_uvm_global.npus[i]; + if (npu->num_retained_gpus == 0) { + if (!first_free) + first_free = npu; + } + else if (npu->pci_domain == pci_domain) { + return npu; + } + } + + return first_free; +} + +static void uvm_ibm_npu_destroy(uvm_ibm_npu_t *npu) +{ + size_t i; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + UVM_ASSERT(npu->num_retained_gpus == 0); + UVM_ASSERT(bitmap_empty(npu->atsd_regs.locks, UVM_MAX_ATSD_REGS)); + + for (i = 0; i < npu->atsd_regs.count; i++) { + UVM_ASSERT(npu->atsd_regs.io_addrs[i]); + iounmap(npu->atsd_regs.io_addrs[i]); + } + + memset(npu, 0, sizeof(*npu)); +} + +static NV_STATUS uvm_ibm_npu_init(uvm_ibm_npu_t *npu, struct pci_dev *npu_dev) +{ + struct pci_controller *hose; + size_t i, reg_count, reg_size = sizeof(npu->atsd_regs.io_addrs[0]); + int ret; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + UVM_ASSERT(npu->num_retained_gpus == 0); + UVM_ASSERT(bitmap_empty(npu->atsd_regs.locks, UVM_MAX_ATSD_REGS)); + + npu->pci_domain = pci_domain_nr(npu_dev->bus); + + if (!UVM_ATS_IBM_SUPPORTED_IN_DRIVER()) + return NV_OK; + + hose = pci_bus_to_host(npu_dev->bus); + + ret = of_property_count_elems_of_size(hose->dn, "ibm,mmio-atsd", reg_size); + if (ret < 0) { + UVM_ERR_PRINT("Failed to query NPU %d ATSD register count: %d\n", npu->pci_domain, ret); + return errno_to_nv_status(ret); + } + + // For ATS to be enabled globally, we must have NPU ATSD registers + reg_count = ret; + if (reg_count == 0 || reg_count > UVM_MAX_ATSD_REGS) { + UVM_ERR_PRINT("NPU %d has invalid ATSD register count: %zu\n", npu->pci_domain, reg_count); + return NV_ERR_INVALID_STATE; + } + + // Map the ATSD registers + for (i = 0; i < reg_count; i++) { + u64 phys_addr; + __be64 __iomem *io_addr; + ret = of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", i, &phys_addr); + UVM_ASSERT(ret == 0); + + io_addr = ioremap(phys_addr, NPU_ATSD_REG_MAP_SIZE); + if (!io_addr) { + uvm_ibm_npu_destroy(npu); + return NV_ERR_NO_MEMORY; + } + + npu->atsd_regs.io_addrs[npu->atsd_regs.count++] = io_addr; + } + + return NV_OK; +} + +NV_STATUS uvm_ats_ibm_add_gpu(uvm_parent_gpu_t *parent_gpu) +{ + struct pci_dev *npu_dev = pnv_pci_get_npu_dev(parent_gpu->pci_dev, 0); + uvm_ibm_npu_t *npu; + NV_STATUS status; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + if (!npu_dev) + return NV_OK; + + npu = uvm_ibm_npu_find(pci_domain_nr(npu_dev->bus)); + if (!npu) { + // If this happens then we can't support the system configuation until + // NV_MAX_NPUS is updated. Return the same error as when the number of + // GPUs exceeds UVM_MAX_GPUS. + UVM_ERR_PRINT("No more NPU slots available, update NV_MAX_NPUS\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (npu->num_retained_gpus == 0) { + status = uvm_ibm_npu_init(npu, npu_dev); + if (status != NV_OK) + return status; + } + + // This npu field could be read concurrently by a thread in the ATSD + // invalidate path. We don't need to provide ordering with those threads + // because those invalidates won't apply to the GPU being added until a GPU + // VA space on this GPU is registered. + npu->atsd_regs.num_membars = max(npu->atsd_regs.num_membars, parent_gpu->num_hshub_tlb_invalidate_membars); + + parent_gpu->npu = npu; + ++npu->num_retained_gpus; + return NV_OK; +} + +void uvm_ats_ibm_remove_gpu(uvm_parent_gpu_t *parent_gpu) +{ + uvm_ibm_npu_t *npu = parent_gpu->npu; + uvm_parent_gpu_t *other_parent_gpu; + NvU32 num_membars_new = 0; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + if (!npu) + return; + + UVM_ASSERT(npu->num_retained_gpus > 0); + if (--npu->num_retained_gpus == 0) { + uvm_ibm_npu_destroy(npu); + } + else { + // Re-calculate the membar count + for_each_parent_gpu(other_parent_gpu) { + // The current GPU being removed should've already been removed from + // the global list. + UVM_ASSERT(other_parent_gpu != parent_gpu); + if (other_parent_gpu->npu == npu) + num_membars_new = max(num_membars_new, other_parent_gpu->num_hshub_tlb_invalidate_membars); + } + + UVM_ASSERT(num_membars_new > 0); + npu->atsd_regs.num_membars = num_membars_new; + } +} + +#if UVM_ATS_IBM_SUPPORTED() + +void uvm_ats_ibm_init_va_space(uvm_va_space_t *va_space) +{ + uvm_ibm_va_space_t *ibm_va_space; + + UVM_ASSERT(va_space); + ibm_va_space = &va_space->ats.ibm; + + uvm_rwlock_irqsave_init(&ibm_va_space->rwlock, UVM_LOCK_ORDER_LEAF); +} + +#if UVM_ATS_IBM_SUPPORTED_IN_KERNEL() +static void npu_release_dummy(struct npu_context *npu_context, void *va_mm) +{ + // See the comment on the call to pnv_npu2_init_context() +} + +static NV_STATUS uvm_ats_ibm_register_gpu_va_space_kernel(uvm_gpu_va_space_t *gpu_va_space) +{ + uvm_va_space_t *va_space = gpu_va_space->va_space; + uvm_ibm_gpu_va_space_t *ibm_gpu_va_space = &gpu_va_space->ats.ibm; + struct npu_context *npu_context; + + // pnv_npu2_init_context() registers current->mm with + // mmu_notifier_register(). We need that to match the mm we passed to our + // own mmu_notifier_register() for this VA space. + if (current->mm != va_space->va_space_mm.mm) + return NV_ERR_NOT_SUPPORTED; + + uvm_assert_mmap_lock_locked_write(current->mm); + uvm_assert_rwsem_locked_write(&va_space->lock); + + // pnv_npu2_init_context() doesn't handle being called multiple times for + // the same GPU under the same mm, which could happen if multiple VA spaces + // are created in this process. To handle that we pass the VA space pointer + // as the callback parameter: the callback values are shared by all devices + // under this mm, so pnv_npu2_init_context() enforces that the values match + // the ones already registered to the mm. + // + // Otherwise we don't use the callback, since we have our own callback + // registered under the va_space_mm that will be called at the same point + // (mmu_notifier release). + npu_context = pnv_npu2_init_context(gpu_va_space->gpu->parent->pci_dev, + (MSR_DR | MSR_PR | MSR_HV), + npu_release_dummy, + va_space); + if (IS_ERR(npu_context)) { + int err = PTR_ERR(npu_context); + + // We'll get -EINVAL if the callback value (va_space) differs from the + // one already registered to the npu_context associated with this mm. + // That can only happen when multiple VA spaces attempt registration + // within the same process, which is disallowed and should return + // NV_ERR_NOT_SUPPORTED. + if (err == -EINVAL) + return NV_ERR_NOT_SUPPORTED; + return errno_to_nv_status(err); + } + + ibm_gpu_va_space->npu_context = npu_context; + + return NV_OK; +} + +static void uvm_ats_ibm_unregister_gpu_va_space_kernel(uvm_gpu_va_space_t *gpu_va_space) +{ + uvm_gpu_va_space_state_t state; + uvm_va_space_t *va_space = gpu_va_space->va_space; + uvm_ibm_va_space_t *ibm_va_space; + uvm_ibm_gpu_va_space_t *ibm_gpu_va_space = &gpu_va_space->ats.ibm; + + if (!ibm_gpu_va_space->npu_context) + return; + + // va_space is guaranteed to not be NULL if ibm_gpu_va_space->npu_context is + // not NULL. + UVM_ASSERT(va_space); + + state = uvm_gpu_va_space_state(gpu_va_space); + UVM_ASSERT(state == UVM_GPU_VA_SPACE_STATE_INIT || state == UVM_GPU_VA_SPACE_STATE_DEAD); + + ibm_va_space = &va_space->ats.ibm; + + // pnv_npu2_destroy_context() may in turn call mmu_notifier_unregister(). + // If uvm_va_space_mm_shutdown() is concurrently executing in another + // thread, mmu_notifier_unregister() will wait for + // uvm_va_space_mm_shutdown() to finish. uvm_va_space_mm_shutdown() takes + // mmap_lock and the VA space lock, so we can't be holding those locks on + // this path. + uvm_assert_unlocked_order(UVM_LOCK_ORDER_MMAP_LOCK); + uvm_assert_unlocked_order(UVM_LOCK_ORDER_VA_SPACE); + + pnv_npu2_destroy_context(ibm_gpu_va_space->npu_context, gpu_va_space->gpu->parent->pci_dev); + ibm_gpu_va_space->npu_context = NULL; +} + +#else + +static void uvm_ats_ibm_register_gpu_va_space_driver(uvm_gpu_va_space_t *gpu_va_space) +{ + uvm_va_space_t *va_space = gpu_va_space->va_space; + uvm_ibm_gpu_va_space_t *ibm_gpu_va_space = &gpu_va_space->ats.ibm; + uvm_gpu_t *gpu = gpu_va_space->gpu; + size_t npu_index = uvm_ibm_npu_index(gpu->parent->npu); + uvm_ibm_va_space_t *ibm_va_space; + + UVM_ASSERT(va_space); + ibm_va_space = &va_space->ats.ibm; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + uvm_write_lock_irqsave(&ibm_va_space->rwlock); + + // If this is the first GPU VA space to use this NPU in the VA space, mark + // the NPU as active so invalidates are issued to it. + if (ibm_va_space->npu_ref_counts[npu_index] == 0) { + // If this is the first active NPU in the entire VA space, we have to + // tell the kernel to send TLB invalidations to the IOMMU. See kernel + // commit 03b8abedf4f4965e7e9e0d4f92877c42c07ce19f for background. + // + // This is safe to do without holding mm_users high or mmap_lock. + if (bitmap_empty(ibm_va_space->npu_active_mask, NV_MAX_NPUS)) + mm_context_add_copro(va_space->va_space_mm.mm); + + UVM_ASSERT(!test_bit(npu_index, ibm_va_space->npu_active_mask)); + __set_bit(npu_index, ibm_va_space->npu_active_mask); + } + else { + UVM_ASSERT(test_bit(npu_index, ibm_va_space->npu_active_mask)); + } + + ++ibm_va_space->npu_ref_counts[npu_index]; + + // As soon as this lock is dropped, invalidates on this VA space's mm may + // begin issuing ATSDs to this NPU. + uvm_write_unlock_irqrestore(&ibm_va_space->rwlock); + + ibm_gpu_va_space->did_ibm_driver_init = true; +} + +static void uvm_ats_ibm_unregister_gpu_va_space_driver(uvm_gpu_va_space_t *gpu_va_space) +{ + uvm_va_space_t *va_space = gpu_va_space->va_space; + uvm_gpu_t *gpu = gpu_va_space->gpu; + size_t npu_index = uvm_ibm_npu_index(gpu->parent->npu); + bool do_remove = false; + uvm_ibm_va_space_t *ibm_va_space; + uvm_ibm_gpu_va_space_t *ibm_gpu_va_space = &gpu_va_space->ats.ibm; + + if (!ibm_gpu_va_space->did_ibm_driver_init) + return; + + UVM_ASSERT(va_space); + ibm_va_space = &va_space->ats.ibm; + + // Note that we aren't holding the VA space lock here, so another thread + // could be in uvm_ats_ibm_register_gpu_va_space() for this same GPU right + // now. The write lock and ref counts below will handle that case. + + // Once we return from this function with a bit cleared in the + // npu_active_mask, we have to guarantee that this VA space no longer + // accesses that NPU's ATSD registers. This is needed in case GPU unregister + // needs to unmap those registers. We use the reader/writer lock to + // guarantee this, which means that invalidations must not access the ATSD + // registers outside of the lock. + // + // Future work: if we could synchronize_srcu() on the mmu_notifier SRCU we + // might do that here instead to flush out all invalidates. That would allow + // us to avoid taking a read lock in the invalidate path, though we'd have + // to be careful when clearing the mask bit relative to the synchronize, and + // we'd have to be careful in cases where this thread doesn't hold a + // reference to mm_users. + uvm_write_lock_irqsave(&ibm_va_space->rwlock); + + UVM_ASSERT(ibm_va_space->npu_ref_counts[npu_index] > 0); + UVM_ASSERT(test_bit(npu_index, ibm_va_space->npu_active_mask)); + + --ibm_va_space->npu_ref_counts[npu_index]; + if (ibm_va_space->npu_ref_counts[npu_index] == 0) { + __clear_bit(npu_index, ibm_va_space->npu_active_mask); + if (bitmap_empty(ibm_va_space->npu_active_mask, NV_MAX_NPUS)) + do_remove = true; + } + + uvm_write_unlock_irqrestore(&ibm_va_space->rwlock); + + if (do_remove) { + // mm_context_remove_copro() must be called outside of the spinlock + // because it may issue invalidates across CPUs in this mm. The + // coprocessor count is atomically refcounted by that function, so it's + // safe to call here even if another thread jumps in with a register and + // calls mm_context_add_copro() between this thread's unlock and this + // call. + UVM_ASSERT(va_space->va_space_mm.mm); + mm_context_remove_copro(va_space->va_space_mm.mm); + } +} + +#endif // UVM_ATS_IBM_SUPPORTED_IN_KERNEL() + +static mm_context_id_t va_space_pasid(uvm_va_space_t *va_space) +{ + struct mm_struct *mm = va_space->va_space_mm.mm; + UVM_ASSERT(mm); + return mm->context.id; +} + +NV_STATUS uvm_ats_ibm_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space) +{ + uvm_va_space_t *va_space = gpu_va_space->va_space; + NV_STATUS status = NV_OK; + + UVM_ASSERT(gpu_va_space->ats.enabled); + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_INIT); + UVM_ASSERT(va_space->va_space_mm.mm); + uvm_assert_rwsem_locked_write(&va_space->lock); + +#if UVM_ATS_IBM_SUPPORTED_IN_KERNEL() + status = uvm_ats_ibm_register_gpu_va_space_kernel(gpu_va_space); +#else + uvm_ats_ibm_register_gpu_va_space_driver(gpu_va_space); +#endif + + gpu_va_space->ats.pasid = (NvU32) va_space_pasid(gpu_va_space->va_space); + + return status; +} + +void uvm_ats_ibm_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space) +{ +#if UVM_ATS_IBM_SUPPORTED_IN_KERNEL() + uvm_ats_ibm_unregister_gpu_va_space_kernel(gpu_va_space); +#else + uvm_ats_ibm_unregister_gpu_va_space_driver(gpu_va_space); +#endif + + gpu_va_space->ats.pasid = -1U; +} + +#if UVM_ATS_IBM_SUPPORTED_IN_DRIVER() + +// Find any available ATSD register set in this NPU and return that index. This +// will busy wait until a register set is free. +static NvU8 atsd_reg_acquire(uvm_ibm_npu_t *npu) +{ + uvm_spin_loop_t spin; + size_t i; + bool first = true; + + while (1) { + // Using for_each_clear_bit is racy, since the bits could change at any + // point. That's ok since we'll either just retry or use a real atomic + // to lock the bit. Checking for clear bits first avoids spamming + // atomics in the contended case. + for_each_clear_bit(i, npu->atsd_regs.locks, npu->atsd_regs.count) { + if (!test_and_set_bit_lock(i, npu->atsd_regs.locks)) + return (NvU8)i; + } + + // Back off and try again, avoiding the overhead of initializing the + // tracking timers unless we need them. + if (first) { + uvm_spin_loop_init(&spin); + first = false; + } + else { + UVM_SPIN_LOOP(&spin); + } + } +} + +static void atsd_reg_release(uvm_ibm_npu_t *npu, NvU8 reg) +{ + UVM_ASSERT(reg < npu->atsd_regs.count); + UVM_ASSERT(test_bit(reg, npu->atsd_regs.locks)); + clear_bit_unlock(reg, npu->atsd_regs.locks); +} + +static __be64 atsd_reg_read(uvm_ibm_npu_t *npu, NvU8 reg, size_t offset) +{ + __be64 __iomem *io_addr = npu->atsd_regs.io_addrs[reg] + offset; + UVM_ASSERT(reg < npu->atsd_regs.count); + return __raw_readq(io_addr); +} + +static void atsd_reg_write(uvm_ibm_npu_t *npu, NvU8 reg, size_t offset, NvU64 val) +{ + __be64 __iomem *io_addr = npu->atsd_regs.io_addrs[reg] + offset; + UVM_ASSERT(reg < npu->atsd_regs.count); + __raw_writeq_be(val, io_addr); +} + +// Acquire a set of registers in each NPU which is active in va_space +static void atsd_regs_acquire(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs) +{ + size_t i; + for_each_npu_index_in_va_space(i, va_space) + regs->ids[i] = atsd_reg_acquire(&g_uvm_global.npus[i]); +} + +static void atsd_regs_release(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs) +{ + size_t i; + for_each_npu_index_in_va_space(i, va_space) + atsd_reg_release(&g_uvm_global.npus[i], regs->ids[i]); +} + +// Write the provided value to each NPU active in va_space at the provided +// register offset. +static void atsd_regs_write(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs, size_t offset, NvU64 val) +{ + size_t i; + for_each_npu_index_in_va_space(i, va_space) + atsd_reg_write(&g_uvm_global.npus[i], regs->ids[i], offset, val); +} + +// Wait for all prior operations issued to active NPUs in va_space on the given +// registers to finish. +static void atsd_regs_wait(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs) +{ + uvm_spin_loop_t spin; + size_t i; + for_each_npu_index_in_va_space(i, va_space) { + UVM_SPIN_WHILE(atsd_reg_read(&g_uvm_global.npus[i], regs->ids[i], NPU_ATSD_REG_STAT), &spin) + ; + } +} + +// Encode an invalidate targeting the given pasid and the given size for the +// NPU_ATSD_REG_LAUNCH register. The target address is encoded separately. +// +// psize must be one of the MMU_PAGE_* values defined in powerpc's asm/mmu.h. A +// psize of MMU_PAGE_COUNT means to invalidate the entire address space. +static NvU64 atsd_get_launch_val(mm_context_id_t pasid, int psize) +{ + NvU64 val = 0; + + val |= PPC_BIT(NPU_ATSD_REG_LAUNCH_PASID_ENABLE); + val |= pasid << PPC_BITLSHIFT(NPU_ATSD_REG_LAUNCH_PASID_VAL); + + if (psize == MMU_PAGE_COUNT) { + val |= PPC_BIT(NPU_ATSD_REG_LAUNCH_INVAL_ALL); + } + else { + // The NPU registers do not support arbitrary sizes + UVM_ASSERT(psize == MMU_PAGE_64K || psize == MMU_PAGE_2M || psize == MMU_PAGE_1G); + val |= (NvU64)mmu_get_ap(psize) << PPC_BITLSHIFT(NPU_ATSD_REG_LAUNCH_INVAL_SIZE); + } + + return val; +} + +// Return the encoded size to use for an ATSD targeting the given range, in one +// of the MMU_PAGE_* values defined in powerpc's asm/mmu.h. A return value of +// MMU_PAGE_COUNT means the entire address space must be invalidated. +// +// start is an in/out parameter. On return start will be set to the aligned +// starting address to use for the ATSD. end is inclusive. +static int atsd_calc_size(NvU64 *start, NvU64 end) +{ + // ATSDs have high latency, so we prefer to over-invalidate rather than + // issue multiple precise invalidates. Supported sizes are only 64K, 2M, and + // 1G. + + *start = UVM_ALIGN_DOWN(*start, SZ_64K); + end = UVM_ALIGN_DOWN(end, SZ_64K); + if (*start == end) + return MMU_PAGE_64K; + + *start = UVM_ALIGN_DOWN(*start, SZ_2M); + end = UVM_ALIGN_DOWN(end, SZ_2M); + if (*start == end) + return MMU_PAGE_2M; + + *start = UVM_ALIGN_DOWN(*start, SZ_1G); + end = UVM_ALIGN_DOWN(end, SZ_1G); + if (*start == end) + return MMU_PAGE_1G; + + return MMU_PAGE_COUNT; +} + +// Issue an ATSD to all NPUs and wait for completion +static void atsd_launch_wait(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs, NvU64 val) +{ + atsd_regs_write(va_space, regs, NPU_ATSD_REG_LAUNCH, val); + atsd_regs_wait(va_space, regs); +} + +// Issue and wait for the required membars following an invalidate +static void atsd_issue_membars(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs) +{ + size_t i; + NvU32 num_membars = 0; + + // These membars are issued using ATSDs which target a reserved PASID of 0. + // That PASID is valid on the GPU in order for the membar to be valid, but + // 0 will never be used by the kernel for an actual address space so the + // ATSD won't actually invalidate any entries. + NvU64 val = atsd_get_launch_val(0, MMU_PAGE_COUNT); + + for_each_npu_index_in_va_space(i, va_space) { + uvm_ibm_npu_t *npu = &g_uvm_global.npus[i]; + num_membars = max(num_membars, npu->atsd_regs.num_membars); + } + + for (i = 0; i < num_membars; i++) + atsd_launch_wait(va_space, regs, val); +} + +static void uvm_ats_ibm_invalidate_all(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs) +{ + NvU64 val = atsd_get_launch_val(va_space_pasid(va_space), MMU_PAGE_COUNT); + atsd_launch_wait(va_space, regs, val); + atsd_issue_membars(va_space, regs); +} + +static void uvm_ats_ibm_invalidate_range(uvm_va_space_t *va_space, uvm_atsd_regs_t *regs, NvU64 start, int psize) +{ + NvU64 val = atsd_get_launch_val(va_space_pasid(va_space), psize); + + // Barriers are expensive, so write all address registers first then do a + // single barrier for all of them. + atsd_regs_write(va_space, regs, NPU_ATSD_REG_AVA, start); + eieio(); + atsd_launch_wait(va_space, regs, val); + atsd_issue_membars(va_space, regs); +} + +#endif // UVM_ATS_IBM_SUPPORTED_IN_DRIVER() + +void uvm_ats_ibm_invalidate(uvm_va_space_t *va_space, NvU64 start, NvU64 end) +{ +#if UVM_ATS_IBM_SUPPORTED_IN_DRIVER() + unsigned long irq_flags; + uvm_atsd_regs_t regs; + NvU64 atsd_start = start; + int psize = atsd_calc_size(&atsd_start, end); + uvm_ibm_va_space_t *ibm_va_space = &va_space->ats.ibm; + + BUILD_BUG_ON(order_base_2(UVM_MAX_ATSD_REGS) > 8*sizeof(regs.ids[0])); + + // We must hold this lock in at least read mode when accessing NPU + // registers. See the comment in uvm_ats_ibm_unregister_gpu_va_space_driver. + uvm_read_lock_irqsave(&ibm_va_space->rwlock, irq_flags); + + if (!bitmap_empty(ibm_va_space->npu_active_mask, NV_MAX_NPUS)) { + atsd_regs_acquire(va_space, ®s); + + if (psize == MMU_PAGE_COUNT) + uvm_ats_ibm_invalidate_all(va_space, ®s); + else + uvm_ats_ibm_invalidate_range(va_space, ®s, atsd_start, psize); + + atsd_regs_release(va_space, ®s); + } + + uvm_read_unlock_irqrestore(&ibm_va_space->rwlock, irq_flags); +#else + UVM_ASSERT_MSG(0, "This function should not be called on this kernel version\n"); +#endif // UVM_ATS_IBM_SUPPORTED_IN_DRIVER() +} + +#endif // UVM_ATS_IBM_SUPPORTED +#endif // UVM_IBM_NPU_SUPPORTED diff --git a/kernel-open/nvidia-uvm/uvm_ats_ibm.h b/kernel-open/nvidia-uvm/uvm_ats_ibm.h new file mode 100644 index 000000000..4ceba0cce --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ats_ibm.h @@ -0,0 +1,266 @@ +/******************************************************************************* + Copyright (c) 2018-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_ATS_IBM_H__ +#define __UVM_ATS_IBM_H__ + +#include "uvm_linux.h" +#include "uvm_forward_decl.h" +#include "uvm_hal_types.h" + +#if defined(NVCPU_PPC64LE) && defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT) + #include + #if defined(NV_MAX_NPUS) + #define UVM_IBM_NPU_SUPPORTED() 1 + #else + #define UVM_IBM_NPU_SUPPORTED() 0 + #endif +#else + #define UVM_IBM_NPU_SUPPORTED() 0 +#endif + +#if defined(NV_ASM_OPAL_API_H_PRESENT) + // For OPAL_NPU_INIT_CONTEXT + #include +#endif + +// Timeline of kernel changes: +// +// 0) Before 1ab66d1fbadad86b1f4a9c7857e193af0ee0022c +// - No NPU-ATS code existed, nor did the OPAL_NPU_INIT_CONTEXT firmware +// call. +// - NV_PNV_NPU2_INIT_CONTEXT_PRESENT Not defined +// - NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID Not defined +// - OPAL_NPU_INIT_CONTEXT Not defined +// - ATS support type None +// +// 1) NPU ATS code added: 1ab66d1fbadad86b1f4a9c7857e193af0ee0022c, v4.12 +// (2017-04-03) +// - This commit added initial support for NPU ATS, including the necessary +// OPAL firmware calls. This support was developmental and required +// several bug fixes before it could be used in production. +// - NV_PNV_NPU2_INIT_CONTEXT_PRESENT Defined +// - NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID Not defined +// - OPAL_NPU_INIT_CONTEXT Defined +// - ATS support type None +// +// 2) NPU ATS code fixed: a1409adac748f0db655e096521bbe6904aadeb98, v4.17 +// (2018-04-11) +// - This commit changed the function signature for pnv_npu2_init_context's +// callback parameter. Since all required bug fixes went in prior to this +// change, we can use the callback signature as a flag to indicate +// whether the PPC arch layer in the kernel supports ATS in production. +// - NV_PNV_NPU2_INIT_CONTEXT_PRESENT Defined +// - NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID Defined +// - OPAL_NPU_INIT_CONTEXT Defined +// - ATS support type Kernel +// +// 3) NPU ATS code removed: 7eb3cf761927b2687164e182efa675e6c09cfe44, v5.3 +// (2019-06-25) +// - This commit removed NPU-ATS support from the PPC arch layer, so the +// driver needs to handle things instead. pnv_npu2_init_context is no +// longer present, so we use OPAL_NPU_INIT_CONTEXT to differentiate +// between this state and scenario #0. +// - NV_PNV_NPU2_INIT_CONTEXT_PRESENT Not defined +// - NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID Not defined +// - OPAL_NPU_INIT_CONTEXT Defined +// - ATS support type Driver +// +#if defined(NV_PNV_NPU2_INIT_CONTEXT_CALLBACK_RETURNS_VOID) + #define UVM_ATS_IBM_SUPPORTED_IN_KERNEL() 1 + #define UVM_ATS_IBM_SUPPORTED_IN_DRIVER() 0 +#elif !defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT) && defined(OPAL_NPU_INIT_CONTEXT) && UVM_CAN_USE_MMU_NOTIFIERS() + #define UVM_ATS_IBM_SUPPORTED_IN_KERNEL() 0 + #define UVM_ATS_IBM_SUPPORTED_IN_DRIVER() 1 +#else + #define UVM_ATS_IBM_SUPPORTED_IN_KERNEL() 0 + #define UVM_ATS_IBM_SUPPORTED_IN_DRIVER() 0 +#endif + +#define UVM_ATS_IBM_SUPPORTED() (UVM_ATS_IBM_SUPPORTED_IN_KERNEL() || UVM_ATS_IBM_SUPPORTED_IN_DRIVER()) + +// Maximum number of parallel ATSD register sets per NPU +#define UVM_MAX_ATSD_REGS 16 + +typedef struct +{ +#if UVM_IBM_NPU_SUPPORTED() + // These are the active NPUs in this VA space, that is, all NPUs with + // GPUs that have GPU VA spaces registered in this VA space. + // + // If a bit is clear in npu_active_mask then the corresponding entry of + // npu_ref_counts is 0. If a bit is set then the corresponding entry of + // npu_ref_counts is greater than 0. + NvU32 npu_ref_counts[NV_MAX_NPUS]; + DECLARE_BITMAP(npu_active_mask, NV_MAX_NPUS); +#endif + + // Lock protecting npu_ref_counts and npu_active_mask. Invalidations + // take this lock for read. GPU VA space register and unregister take + // this lock for write. Since all invalidations take the lock for read + // for the duration of the invalidate, taking the lock for write also + // flushes all invalidates. + // + // This is a spinlock because the invalidation code paths may be called + // with interrupts disabled, so those paths can't take the VA space + // lock. We could use a normal exclusive spinlock instead, but a reader/ + // writer lock is preferred to allow concurrent invalidates in the same + // VA space. + uvm_rwlock_irqsave_t rwlock; +} uvm_ibm_va_space_t; + +typedef struct +{ +#if UVM_ATS_IBM_SUPPORTED_IN_KERNEL() + struct npu_context *npu_context; +#endif + + // Used on the teardown path to know what to clean up. npu_context acts + // as the equivalent flag for kernel-provided support. + bool did_ibm_driver_init; +} uvm_ibm_gpu_va_space_t; + +struct uvm_ibm_npu_struct +{ + // Number of retained GPUs under this NPU. The other fields in this struct + // are only valid if this is non-zero. + unsigned int num_retained_gpus; + + // PCI domain containing this NPU. This acts as a unique system-wide ID for + // this UVM NPU. + int pci_domain; + + // The ATS-related fields are only valid when ATS support is enabled and + // UVM_ATS_IBM_SUPPORTED_IN_DRIVER() is 1. + struct + { + // Mapped addresses of the ATSD trigger registers. There may be more + // than one set of identical registers per NPU to enable concurrent + // invalidates. + // + // These will not be accessed unless there is a GPU VA space registered + // on a GPU under this NPU. They are protected by bit locks in the locks + // field. + __be64 __iomem *io_addrs[UVM_MAX_ATSD_REGS]; + + // Actual number of registers in the io_addrs array + size_t count; + + // Bitmask for allocation and locking of the registers. Bit index n + // corresponds to io_addrs[n]. A set bit means that index is in use + // (locked). + DECLARE_BITMAP(locks, UVM_MAX_ATSD_REGS); + + // Max value of any uvm_parent_gpu_t::num_hshub_tlb_invalidate_membars + // for all retained GPUs under this NPU. + NvU32 num_membars; + } atsd_regs; +}; + +#if UVM_IBM_NPU_SUPPORTED() + NV_STATUS uvm_ats_ibm_add_gpu(uvm_parent_gpu_t *parent_gpu); + void uvm_ats_ibm_remove_gpu(uvm_parent_gpu_t *parent_gpu); +#else + static NV_STATUS uvm_ats_ibm_add_gpu(uvm_parent_gpu_t *parent_gpu) + { + return NV_OK; + } + + static void uvm_ats_ibm_remove_gpu(uvm_parent_gpu_t *parent_gpu) + { + + } +#endif // UVM_IBM_NPU_SUPPORTED + +#if UVM_ATS_IBM_SUPPORTED() + // Initializes IBM specific GPU state. + // + // LOCKING: None + void uvm_ats_ibm_init_va_space(uvm_va_space_t *va_space); + + // Enables ATS access for the gpu_va_space on the mm_struct associated with + // the VA space (va_space_mm). + // + // If UVM_ATS_IBM_SUPPORTED_IN_KERNEL() is 1, NV_ERR_NOT_SUPPORTED is + // returned if current->mm does not match va_space_mm.mm or if a GPU VA + // space within another VA space has already called this function on the + // same mm. + // + // If UVM_ATS_IBM_SUPPORTED_IN_DRIVER() is 1 there are no such restrictions. + // + // LOCKING: The VA space lock must be held in write mode. + // current->mm->mmap_lock must be held in write mode iff + // UVM_ATS_IBM_SUPPORTED_IN_KERNEL() is 1. + NV_STATUS uvm_ats_ibm_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space); + + // Disables ATS access for the gpu_va_space. Prior to calling this function, + // the caller must guarantee that the GPU will no longer make any ATS + // accesses in this GPU VA space, and that no ATS fault handling for this + // GPU will be attempted. + // + // LOCKING: This function may block on mmap_lock and the VA space lock, so + // neither must be held. + void uvm_ats_ibm_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space); + + // Synchronously invalidate ATS translations cached by GPU TLBs. The + // invalidate applies to all GPUs with active GPU VA spaces in va_space, and + // covers all pages touching any part of the given range. end is inclusive. + // + // GMMU translations in the given range are not guaranteed to be + // invalidated. + // + // LOCKING: No locks are required, but this function may be called with + // interrupts disabled. + void uvm_ats_ibm_invalidate(uvm_va_space_t *va_space, NvU64 start, NvU64 end); +#else + static void uvm_ats_ibm_init_va_space(uvm_va_space_t *va_space) + { + + } + static NV_STATUS uvm_ats_ibm_register_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space) + { + return NV_OK; + } + + static void uvm_ats_ibm_unregister_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space) + { + + } + + static void uvm_ats_ibm_invalidate(uvm_va_space_t *va_space, NvU64 start, NvU64 end) + { + + } +#endif // UVM_ATS_IBM_SUPPORTED + +static NV_STATUS uvm_ats_ibm_bind_gpu(uvm_gpu_va_space_t *gpu_va_space) +{ + return NV_OK; +} + +static void uvm_ats_ibm_unbind_gpu(uvm_gpu_va_space_t *gpu_va_space) +{ + +} + +#endif // __UVM_ATS_IBM_H__ diff --git a/kernel-open/nvidia-uvm/uvm_ce_test.c b/kernel-open/nvidia-uvm/uvm_ce_test.c new file mode 100644 index 000000000..dd1ba52dd --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ce_test.c @@ -0,0 +1,680 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_channel.h" +#include "uvm_global.h" +#include "uvm_hal.h" +#include "uvm_push.h" +#include "uvm_test.h" +#include "uvm_tracker.h" +#include "uvm_va_space.h" +#include "uvm_rm_mem.h" +#include "uvm_mem.h" + +#define CE_TEST_MEM_SIZE (2 * 1024 * 1024) +#define CE_TEST_MEM_END_SIZE 32 +#define CE_TEST_MEM_BEGIN_SIZE 32 +#define CE_TEST_MEM_MIDDLE_SIZE (CE_TEST_MEM_SIZE - CE_TEST_MEM_BEGIN_SIZE - CE_TEST_MEM_END_SIZE) +#define CE_TEST_MEM_MIDDLE_OFFSET (CE_TEST_MEM_BEGIN_SIZE) +#define CE_TEST_MEM_END_OFFSET (CE_TEST_MEM_SIZE - CE_TEST_MEM_BEGIN_SIZE) +#define CE_TEST_MEM_COUNT 5 + +static NV_STATUS test_non_pipelined(uvm_gpu_t *gpu) +{ + NvU32 i; + NV_STATUS status; + uvm_rm_mem_t *mem[CE_TEST_MEM_COUNT] = { NULL }; + uvm_rm_mem_t *host_mem = NULL; + NvU32 *host_ptr; + NvU64 host_mem_gpu_va, mem_gpu_va; + NvU64 dst_va; + NvU64 src_va; + uvm_push_t push; + bool is_proxy; + + status = uvm_rm_mem_alloc_and_map_cpu(gpu, UVM_RM_MEM_TYPE_SYS, CE_TEST_MEM_SIZE, &host_mem); + TEST_CHECK_GOTO(status == NV_OK, done); + host_ptr = (NvU32 *)uvm_rm_mem_get_cpu_va(host_mem); + memset(host_ptr, 0, CE_TEST_MEM_SIZE); + + for (i = 0; i < CE_TEST_MEM_COUNT; ++i) { + status = uvm_rm_mem_alloc(gpu, UVM_RM_MEM_TYPE_GPU, CE_TEST_MEM_SIZE, &mem[i]); + TEST_CHECK_GOTO(status == NV_OK, done); + } + + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, "Non-pipelined test"); + TEST_CHECK_GOTO(status == NV_OK, done); + + is_proxy = uvm_channel_is_proxy(push.channel); + host_mem_gpu_va = uvm_rm_mem_get_gpu_va(host_mem, gpu, is_proxy); + + // All of the following CE transfers are done from a single (L)CE and + // disabling pipelining is enough to order them when needed. Only push_end + // needs a MEMBAR SYS to order everything with the CPU. + + // Initialize to a bad value + for (i = 0; i < CE_TEST_MEM_COUNT; ++i) { + mem_gpu_va = uvm_rm_mem_get_gpu_va(mem[i], gpu, is_proxy); + + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + gpu->parent->ce_hal->memset_v_4(&push, mem_gpu_va, 1337 + i, CE_TEST_MEM_SIZE); + } + + // Set the first buffer to 1 + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + mem_gpu_va = uvm_rm_mem_get_gpu_va(mem[0], gpu, is_proxy); + gpu->parent->ce_hal->memset_v_4(&push, mem_gpu_va, 1, CE_TEST_MEM_SIZE); + + for (i = 0; i < CE_TEST_MEM_COUNT; ++i) { + NvU32 dst = i + 1; + if (dst == CE_TEST_MEM_COUNT) + dst_va = host_mem_gpu_va; + else + dst_va = uvm_rm_mem_get_gpu_va(mem[dst], gpu, is_proxy); + + src_va = uvm_rm_mem_get_gpu_va(mem[i], gpu, is_proxy); + + // The first memcpy needs to be non-pipelined as otherwise the previous + // memset/memcpy to the source may not be done yet. + + // Alternate the order of copying the beginning and the end + if (i % 2 == 0) { + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + gpu->parent->ce_hal->memcopy_v_to_v(&push, dst_va + CE_TEST_MEM_END_OFFSET, src_va + CE_TEST_MEM_END_OFFSET, CE_TEST_MEM_END_SIZE); + + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + gpu->parent->ce_hal->memcopy_v_to_v(&push, + dst_va + CE_TEST_MEM_MIDDLE_OFFSET, + src_va + CE_TEST_MEM_MIDDLE_OFFSET, + CE_TEST_MEM_MIDDLE_SIZE); + + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + gpu->parent->ce_hal->memcopy_v_to_v(&push, dst_va, src_va, CE_TEST_MEM_BEGIN_SIZE); + } + else { + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + gpu->parent->ce_hal->memcopy_v_to_v(&push, dst_va, src_va, CE_TEST_MEM_BEGIN_SIZE); + + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + gpu->parent->ce_hal->memcopy_v_to_v(&push, + dst_va + CE_TEST_MEM_MIDDLE_OFFSET, + src_va + CE_TEST_MEM_MIDDLE_OFFSET, + CE_TEST_MEM_MIDDLE_SIZE); + + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + gpu->parent->ce_hal->memcopy_v_to_v(&push, + dst_va + CE_TEST_MEM_END_OFFSET, + src_va + CE_TEST_MEM_END_OFFSET, + CE_TEST_MEM_END_SIZE); + } + } + + status = uvm_push_end_and_wait(&push); + TEST_CHECK_GOTO(status == NV_OK, done); + + + for (i = 0; i < CE_TEST_MEM_SIZE / sizeof(NvU32); ++i) { + if (host_ptr[i] != 1) { + UVM_TEST_PRINT("host_ptr[%u] = %u instead of 1\n", i, host_ptr[i]); + status = NV_ERR_INVALID_STATE; + goto done; + } + } + +done: + for (i = 0; i < CE_TEST_MEM_COUNT; ++i) { + uvm_rm_mem_free(mem[i]); + } + uvm_rm_mem_free(host_mem); + + return status; +} + +#define REDUCTIONS 32 + +static NV_STATUS test_membar(uvm_gpu_t *gpu) +{ + NvU32 i; + NV_STATUS status; + uvm_rm_mem_t *host_mem = NULL; + NvU32 *host_ptr; + NvU64 host_mem_gpu_va; + uvm_push_t push; + NvU32 value; + + status = uvm_rm_mem_alloc_and_map_cpu(gpu, UVM_RM_MEM_TYPE_SYS, sizeof(NvU32), &host_mem); + TEST_CHECK_GOTO(status == NV_OK, done); + host_ptr = (NvU32 *)uvm_rm_mem_get_cpu_va(host_mem); + *host_ptr = 0; + + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_TO_CPU, &push, "Membar test"); + TEST_CHECK_GOTO(status == NV_OK, done); + + host_mem_gpu_va = uvm_rm_mem_get_gpu_va(host_mem, gpu, uvm_channel_is_proxy(push.channel)); + + for (i = 0; i < REDUCTIONS; ++i) { + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + gpu->parent->ce_hal->semaphore_reduction_inc(&push, host_mem_gpu_va, REDUCTIONS + 1); + } + + // Without a sys membar the channel tracking semaphore can and does complete + // before all the reductions. + status = uvm_push_end_and_wait(&push); + TEST_CHECK_GOTO(status == NV_OK, done); + + value = *host_ptr; + if (value != REDUCTIONS) { + UVM_TEST_PRINT("Value = %u instead of %u, GPU %s\n", value, REDUCTIONS, uvm_gpu_name(gpu)); + status = NV_ERR_INVALID_STATE; + goto done; + } + +done: + uvm_rm_mem_free(host_mem); + + return status; +} + +static void push_memset(uvm_push_t *push, uvm_gpu_address_t dst, NvU64 value, size_t element_size, size_t size) +{ + switch (element_size) { + case 1: + uvm_push_get_gpu(push)->parent->ce_hal->memset_1(push, dst, (NvU8)value, size); + break; + case 4: + uvm_push_get_gpu(push)->parent->ce_hal->memset_4(push, dst, (NvU32)value, size); + break; + case 8: + uvm_push_get_gpu(push)->parent->ce_hal->memset_8(push, dst, value, size); + break; + default: + UVM_ASSERT(0); + } +} + +static NV_STATUS test_unaligned_memset(uvm_gpu_t *gpu, + uvm_gpu_address_t gpu_verif_addr, + NvU8 *cpu_verif_addr, + size_t size, + size_t element_size, + size_t offset) +{ + uvm_push_t push; + NV_STATUS status; + size_t i; + NvU64 value64 = (offset + 2) * (1ull << 32) + (offset + 1); + NvU64 test_value, expected_value = 0; + uvm_gpu_address_t dst; + + // Copy a single element at an unaligned position and make sure it doesn't + // clobber anything else + TEST_CHECK_RET(gpu_verif_addr.address % element_size == 0); + TEST_CHECK_RET(offset + element_size <= size); + dst = gpu_verif_addr; + dst.address += offset; + + memset(cpu_verif_addr, (NvU8)(~value64), size); + + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, + "memset_%zu offset %zu", + element_size, offset); + TEST_CHECK_RET(status == NV_OK); + + push_memset(&push, dst, value64, element_size, element_size); + status = uvm_push_end_and_wait(&push); + TEST_CHECK_RET(status == NV_OK); + + // Make sure all bytes of element are present + test_value = 0; + memcpy(&test_value, cpu_verif_addr + offset, element_size); + + switch (element_size) { + case 1: + expected_value = (NvU8)value64; + break; + case 4: + expected_value = (NvU32)value64; + break; + case 8: + expected_value = value64; + break; + default: + UVM_ASSERT(0); + } + + if (test_value != expected_value) { + UVM_TEST_PRINT("memset_%zu offset %zu failed, written value is 0x%llx instead of 0x%llx\n", + element_size, offset, test_value, expected_value); + return NV_ERR_INVALID_STATE; + } + + // Make sure all other bytes are unchanged + for (i = 0; i < size; i++) { + if (i >= offset && i < offset + element_size) + continue; + if (cpu_verif_addr[i] != (NvU8)(~value64)) { + UVM_TEST_PRINT("memset_%zu offset %zu failed, immutable byte %zu changed value from 0x%x to 0x%x\n", + element_size, offset, i, (NvU8)(~value64), + cpu_verif_addr[i]); + return NV_ERR_INVALID_STATE; + } + } + + return NV_OK; +} + +static NV_STATUS test_memcpy_and_memset_inner(uvm_gpu_t *gpu, + uvm_gpu_address_t dst, + uvm_gpu_address_t src, + size_t size, + size_t element_size, + uvm_gpu_address_t gpu_verif_addr, + void *cpu_verif_addr, + int test_iteration) +{ + uvm_push_t push; + size_t i; + const char *src_type = src.is_virtual ? "virtual" : "physical"; + const char *src_loc = src.aperture == UVM_APERTURE_SYS ? "sysmem" : "vidmem"; + const char *dst_type = dst.is_virtual ? "virtual" : "physical"; + const char *dst_loc = dst.aperture == UVM_APERTURE_SYS ? "sysmem" : "vidmem"; + + NvU64 value64 = (test_iteration + 2) * (1ull << 32) + (test_iteration + 1); + NvU64 test_value = 0, expected_value = 0; + + TEST_NV_CHECK_RET(uvm_push_begin(gpu->channel_manager, + UVM_CHANNEL_TYPE_GPU_INTERNAL, + &push, + "Memset %s %s (0x%llx) and memcopy to %s %s (0x%llx), iter %d", + src_type, + src_loc, + src.address, + dst_type, + dst_loc, + dst.address, + test_iteration)); + + // Waive if any of the input addresses is physical but the channel does not + // support physical addressing + if (!uvm_channel_is_privileged(push.channel) && (!dst.is_virtual || !src.is_virtual)) { + TEST_NV_CHECK_RET(uvm_push_end_and_wait(&push)); + return NV_OK; + } + + // The input virtual addresses exist in UVM's internal address space, not + // the proxy address space + if (uvm_channel_is_proxy(push.channel)) { + TEST_NV_CHECK_RET(uvm_push_end_and_wait(&push)); + return NV_ERR_INVALID_STATE; + } + + // Memset src with the appropriate element size, then memcpy to dst and from + // dst to the verif location (physical sysmem). + + push_memset(&push, src, value64, element_size, size); + gpu->parent->ce_hal->memcopy(&push, dst, src, size); + gpu->parent->ce_hal->memcopy(&push, gpu_verif_addr, dst, size); + + TEST_NV_CHECK_RET(uvm_push_end_and_wait(&push)); + + for (i = 0; i < size / element_size; i++) { + switch (element_size) { + case 1: + expected_value = (NvU8)value64; + test_value = ((NvU8 *)cpu_verif_addr)[i]; + break; + case 4: + expected_value = (NvU32)value64; + test_value = ((NvU32 *)cpu_verif_addr)[i]; + break; + case 8: + expected_value = value64; + test_value = ((NvU64 *)cpu_verif_addr)[i]; + break; + default: + UVM_ASSERT(0); + } + + if (test_value != expected_value) { + UVM_TEST_PRINT("memset_%zu of %s %s and memcpy into %s %s failed, value[%zu] = 0x%llx instead of 0x%llx\n", + element_size, src_type, src_loc, dst_type, dst_loc, + i, test_value, expected_value); + return NV_ERR_INVALID_STATE; + } + } + + return NV_OK; +} + +static NV_STATUS test_memcpy_and_memset(uvm_gpu_t *gpu) +{ + NV_STATUS status = NV_OK; + bool is_proxy_va_space; + uvm_gpu_address_t gpu_verif_addr; + void *cpu_verif_addr; + uvm_mem_t *verif_mem = NULL; + uvm_mem_t *sys_uvm_mem = NULL; + uvm_mem_t *gpu_uvm_mem = NULL; + uvm_rm_mem_t *sys_rm_mem = NULL; + uvm_rm_mem_t *gpu_rm_mem = NULL; + uvm_gpu_address_t gpu_addresses[4]; + NvU64 gpu_va; + size_t size; + static const size_t element_sizes[] = {1, 4, 8}; + const size_t iterations = 4; + size_t i, j, k, s; + uvm_mem_alloc_params_t mem_params = {0}; + + size = gpu->big_page.internal_size; + + TEST_NV_CHECK_GOTO(uvm_mem_alloc_sysmem_and_map_cpu_kernel(size, current->mm, &verif_mem), done); + TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(verif_mem, gpu), done); + + gpu_verif_addr = uvm_mem_gpu_address_virtual_kernel(verif_mem, gpu); + cpu_verif_addr = uvm_mem_get_cpu_addr_kernel(verif_mem); + + for (i = 0; i < iterations; ++i) { + for (s = 0; s < ARRAY_SIZE(element_sizes); s++) { + TEST_NV_CHECK_GOTO(test_unaligned_memset(gpu, + gpu_verif_addr, + cpu_verif_addr, + size, + element_sizes[s], + i), + done); + } + } + + // Using a page size equal to the allocation size ensures that the UVM + // memories about to be allocated are physically contiguous. And since the + // size is a valid GPU page size, the memories can be virtually mapped on + // the GPU if needed. + mem_params.size = size; + mem_params.page_size = size; + mem_params.mm = current->mm; + + // Physical address in sysmem + TEST_NV_CHECK_GOTO(uvm_mem_alloc(&mem_params, &sys_uvm_mem), done); + TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_phys(sys_uvm_mem, gpu), done); + gpu_addresses[0] = uvm_mem_gpu_address_physical(sys_uvm_mem, gpu, 0, size); + + // Physical address in vidmem + mem_params.backing_gpu = gpu; + TEST_NV_CHECK_GOTO(uvm_mem_alloc(&mem_params, &gpu_uvm_mem), done); + gpu_addresses[1] = uvm_mem_gpu_address_physical(gpu_uvm_mem, gpu, 0, size); + + // Virtual address (in UVM's internal address space) backed by vidmem + TEST_NV_CHECK_GOTO(uvm_rm_mem_alloc(gpu, UVM_RM_MEM_TYPE_GPU, size, &gpu_rm_mem), done); + is_proxy_va_space = false; + gpu_va = uvm_rm_mem_get_gpu_va(gpu_rm_mem, gpu, is_proxy_va_space); + gpu_addresses[2] = uvm_gpu_address_virtual(gpu_va); + + // Virtual address (in UVM's internal address space) backed by sysmem + TEST_NV_CHECK_GOTO(uvm_rm_mem_alloc(gpu, UVM_RM_MEM_TYPE_SYS, size, &sys_rm_mem), done); + gpu_va = uvm_rm_mem_get_gpu_va(sys_rm_mem, gpu, is_proxy_va_space); + gpu_addresses[3] = uvm_gpu_address_virtual(gpu_va); + + for (i = 0; i < iterations; ++i) { + for (j = 0; j < ARRAY_SIZE(gpu_addresses); ++j) { + for (k = 0; k < ARRAY_SIZE(gpu_addresses); ++k) { + for (s = 0; s < ARRAY_SIZE(element_sizes); s++) { + TEST_NV_CHECK_GOTO(test_memcpy_and_memset_inner(gpu, + gpu_addresses[k], + gpu_addresses[j], + size, + element_sizes[s], + gpu_verif_addr, + cpu_verif_addr, + i), + done); + } + } + } + } + +done: + uvm_rm_mem_free(sys_rm_mem); + uvm_rm_mem_free(gpu_rm_mem); + uvm_mem_free(gpu_uvm_mem); + uvm_mem_free(sys_uvm_mem); + uvm_mem_free(verif_mem); + + return status; +} + +static NV_STATUS test_semaphore_alloc_sem(uvm_gpu_t *gpu, size_t size, uvm_mem_t **mem_out) +{ + NvU64 gpu_va; + NV_STATUS status = NV_OK; + uvm_mem_t *mem = NULL; + + TEST_NV_CHECK_RET(uvm_mem_alloc_sysmem_and_map_cpu_kernel(size, current->mm, &mem)); + + TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(mem, gpu), error); + + gpu_va = uvm_mem_get_gpu_va_kernel(mem, gpu); + + // This semaphore resides in the uvm_mem region, i.e., it has the GPU VA + // MSbit set. The intent is to validate semaphore operations when the + // semaphore's VA is in the high-end of the GPU effective virtual address + // space spectrum, i.e., its VA upper-bit is set. + TEST_CHECK_GOTO(gpu_va & (1ULL << (gpu->address_space_tree.hal->num_va_bits() - 1)), error); + + *mem_out = mem; + + return NV_OK; + +error: + uvm_mem_free(mem); + return status; +} + +// test_semaphore_reduction_inc is similar in concept to test_membar(). It uses +// uvm_mem (instead of uvm_rm_mem) as the semaphore, i.e., it assumes that the +// CE HAL has been validated, since uvm_mem needs the CE memset/memcopy to be +// operational as a pre-requisite for GPU PTE writes. The purpose of +// test_semaphore_reduction_inc is to validate the reduction inc operation on +// semaphores with their VA's upper-bit set. +static NV_STATUS test_semaphore_reduction_inc(uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_push_t push; + uvm_mem_t *mem; + NvU64 gpu_va; + NvU32 i; + NvU32 *host_ptr = NULL; + NvU32 value; + + // Semaphore reduction needs 1 word (4 bytes). + const size_t size = sizeof(NvU32); + + status = test_semaphore_alloc_sem(gpu, size, &mem); + TEST_CHECK_RET(status == NV_OK); + + // Initialize the counter of reductions. + host_ptr = uvm_mem_get_cpu_addr_kernel(mem); + TEST_CHECK_GOTO(host_ptr != NULL, done); + *host_ptr = 0; + + gpu_va = uvm_mem_get_gpu_va_kernel(mem, gpu); + + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, "semaphore_reduction_inc test"); + TEST_CHECK_GOTO(status == NV_OK, done); + + for (i = 0; i < REDUCTIONS; i++) { + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + gpu->parent->ce_hal->semaphore_reduction_inc(&push, gpu_va, i+1); + } + + status = uvm_push_end_and_wait(&push); + TEST_CHECK_GOTO(status == NV_OK, done); + + value = *host_ptr; + if (value != REDUCTIONS) { + UVM_TEST_PRINT("Value = %u instead of %u, GPU %s\n", value, REDUCTIONS, uvm_gpu_name(gpu)); + status = NV_ERR_INVALID_STATE; + goto done; + } + +done: + uvm_mem_free(mem); + + return status; +} + +static NV_STATUS test_semaphore_release(uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_push_t push; + uvm_mem_t *mem; + NvU64 gpu_va; + NvU32 value; + NvU32 *host_ptr = NULL; + NvU32 payload = 0xA5A55A5A; + + // Semaphore release needs 1 word (4 bytes). + const size_t size = sizeof(NvU32); + + status = test_semaphore_alloc_sem(gpu, size, &mem); + TEST_CHECK_RET(status == NV_OK); + + // Initialize the payload. + host_ptr = uvm_mem_get_cpu_addr_kernel(mem); + TEST_CHECK_GOTO(host_ptr != NULL, done); + *host_ptr = 0; + + gpu_va = uvm_mem_get_gpu_va_kernel(mem, gpu); + + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, "semaphore_release test"); + TEST_CHECK_GOTO(status == NV_OK, done); + + gpu->parent->ce_hal->semaphore_release(&push, gpu_va, payload); + + status = uvm_push_end_and_wait(&push); + TEST_CHECK_GOTO(status == NV_OK, done); + + value = *host_ptr; + if (value != payload) { + UVM_TEST_PRINT("Semaphore payload = %u instead of %u, GPU %s\n", value, payload, uvm_gpu_name(gpu)); + status = NV_ERR_INVALID_STATE; + goto done; + } + +done: + uvm_mem_free(mem); + + return status; +} + +static NV_STATUS test_semaphore_timestamp(uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_push_t push; + uvm_mem_t *mem; + NvU64 gpu_va; + NvU32 i; + NvU64 *timestamp; + NvU64 last_timestamp = 0; + + // 2 iterations: + // 1: compare retrieved timestamp with 0; + // 2: compare retrieved timestamp with previous timestamp (obtained in 1). + const NvU32 iterations = 2; + + // The semaphore is 4 words long (16 bytes). + const size_t size = 16; + + status = test_semaphore_alloc_sem(gpu, size, &mem); + TEST_CHECK_RET(status == NV_OK); + + timestamp = uvm_mem_get_cpu_addr_kernel(mem); + TEST_CHECK_GOTO(timestamp != NULL, done); + memset(timestamp, 0, size); + + // Shift the timestamp pointer to where the semaphore timestamp info is. + timestamp += 1; + + gpu_va = uvm_mem_get_gpu_va_kernel(mem, gpu); + + for (i = 0; i < iterations; i++) { + status = uvm_push_begin(gpu->channel_manager, + UVM_CHANNEL_TYPE_GPU_INTERNAL, + &push, + "semaphore_timestamp test, iter: %u", + i); + TEST_CHECK_GOTO(status == NV_OK, done); + + gpu->parent->ce_hal->semaphore_timestamp(&push, gpu_va); + + status = uvm_push_end_and_wait(&push); + TEST_CHECK_GOTO(status == NV_OK, done); + + TEST_CHECK_GOTO(*timestamp != 0, done); + TEST_CHECK_GOTO(*timestamp >= last_timestamp, done); + last_timestamp = *timestamp; + } + +done: + uvm_mem_free(mem); + + return status; +} + +static NV_STATUS test_ce(uvm_va_space_t *va_space, bool skipTimestampTest) +{ + uvm_gpu_t *gpu; + + for_each_va_space_gpu(gpu, va_space) { + TEST_NV_CHECK_RET(test_non_pipelined(gpu)); + TEST_NV_CHECK_RET(test_membar(gpu)); + TEST_NV_CHECK_RET(test_memcpy_and_memset(gpu)); + TEST_NV_CHECK_RET(test_semaphore_reduction_inc(gpu)); + TEST_NV_CHECK_RET(test_semaphore_release(gpu)); + if (!skipTimestampTest) + TEST_NV_CHECK_RET(test_semaphore_timestamp(gpu)); + } + + return NV_OK; +} + +NV_STATUS uvm_test_ce_sanity(UVM_TEST_CE_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_va_space_down_read_rm(va_space); + + status = test_ce(va_space, params->skipTimestampTest); + if (status != NV_OK) + goto done; + +done: + uvm_va_space_up_read_rm(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_channel.c b/kernel-open/nvidia-uvm/uvm_channel.c new file mode 100644 index 000000000..5eaf3f14e --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_channel.c @@ -0,0 +1,1792 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_channel.h" + +#include "uvm_api.h" +#include "uvm_global.h" +#include "uvm_hal.h" +#include "uvm_procfs.h" +#include "uvm_push.h" +#include "uvm_gpu_semaphore.h" +#include "uvm_lock.h" +#include "uvm_kvmalloc.h" + +#include "nv_uvm_interface.h" +#include "clb06f.h" + +#define UVM_CHANNEL_NUM_GPFIFO_ENTRIES_DEFAULT 1024 +#define UVM_CHANNEL_NUM_GPFIFO_ENTRIES_MIN 32 +#define UVM_CHANNEL_NUM_GPFIFO_ENTRIES_MAX (1024 * 1024) + +static unsigned uvm_channel_num_gpfifo_entries = UVM_CHANNEL_NUM_GPFIFO_ENTRIES_DEFAULT; + +#define UVM_CHANNEL_GPFIFO_LOC_DEFAULT "auto" + +static char *uvm_channel_gpfifo_loc = UVM_CHANNEL_GPFIFO_LOC_DEFAULT; + +#define UVM_CHANNEL_GPPUT_LOC_DEFAULT "auto" + +static char *uvm_channel_gpput_loc = UVM_CHANNEL_GPPUT_LOC_DEFAULT; + +#define UVM_CHANNEL_PUSHBUFFER_LOC_DEFAULT "auto" + +static char *uvm_channel_pushbuffer_loc = UVM_CHANNEL_PUSHBUFFER_LOC_DEFAULT; + +module_param(uvm_channel_num_gpfifo_entries, uint, S_IRUGO); +module_param(uvm_channel_gpfifo_loc, charp, S_IRUGO); +module_param(uvm_channel_gpput_loc, charp, S_IRUGO); +module_param(uvm_channel_pushbuffer_loc, charp, S_IRUGO); + +static NV_STATUS manager_create_procfs_dirs(uvm_channel_manager_t *manager); +static NV_STATUS manager_create_procfs(uvm_channel_manager_t *manager); +static NV_STATUS channel_create_procfs(uvm_channel_t *channel); + +typedef enum +{ + // Only remove completed GPFIFO entries from the pushbuffer + UVM_CHANNEL_UPDATE_MODE_COMPLETED, + + // Remove all remaining GPFIFO entries from the pushbuffer, regardless of + // whether they're actually done yet. + UVM_CHANNEL_UPDATE_MODE_FORCE_ALL +} uvm_channel_update_mode_t; + +// Update channel progress, completing up to max_to_complete entries +static NvU32 uvm_channel_update_progress_with_max(uvm_channel_t *channel, + NvU32 max_to_complete, + uvm_channel_update_mode_t mode) +{ + NvU32 gpu_get; + NvU32 cpu_put; + NvU32 completed_count = 0; + NvU32 pending_gpfifos; + + NvU64 completed_value = uvm_channel_update_completed_value(channel); + + uvm_spin_lock(&channel->pool->lock); + + cpu_put = channel->cpu_put; + gpu_get = channel->gpu_get; + + while (gpu_get != cpu_put && completed_count < max_to_complete) { + uvm_gpfifo_entry_t *entry = &channel->gpfifo_entries[gpu_get]; + + if (mode == UVM_CHANNEL_UPDATE_MODE_COMPLETED && entry->tracking_semaphore_value > completed_value) + break; + + uvm_pushbuffer_mark_completed(channel->pool->manager->pushbuffer, entry); + list_add_tail(&entry->push_info->available_list_node, &channel->available_push_infos); + gpu_get = (gpu_get + 1) % channel->num_gpfifo_entries; + ++completed_count; + } + + channel->gpu_get = gpu_get; + + uvm_spin_unlock(&channel->pool->lock); + + if (cpu_put >= gpu_get) + pending_gpfifos = cpu_put - gpu_get; + else + pending_gpfifos = channel->num_gpfifo_entries - gpu_get + cpu_put; + + return pending_gpfifos; +} + +NvU32 uvm_channel_update_progress(uvm_channel_t *channel) +{ + // By default, don't complete too many entries at a time to spread the cost + // of doing so across callers and avoid holding a spin lock for too long. + return uvm_channel_update_progress_with_max(channel, 8, UVM_CHANNEL_UPDATE_MODE_COMPLETED); +} + +// Update progress for all pending GPFIFO entries. This might take a longer time +// and should be only used in exceptional circumstances like when a channel +// error is encountered. Otherwise, uvm_chanel_update_progress() should be used. +static NvU32 channel_update_progress_all(uvm_channel_t *channel, uvm_channel_update_mode_t mode) +{ + return uvm_channel_update_progress_with_max(channel, channel->num_gpfifo_entries, mode); +} + +NvU32 uvm_channel_update_progress_all(uvm_channel_t *channel) +{ + return channel_update_progress_all(channel, UVM_CHANNEL_UPDATE_MODE_COMPLETED); +} + +NvU32 uvm_channel_manager_update_progress(uvm_channel_manager_t *channel_manager) +{ + NvU32 pending_gpfifos = 0; + uvm_channel_pool_t *pool; + + uvm_for_each_pool(pool, channel_manager) { + uvm_channel_t *channel; + + uvm_for_each_channel_in_pool(channel, pool) + pending_gpfifos += uvm_channel_update_progress(channel); + } + + return pending_gpfifos; +} + +static bool channel_is_available(uvm_channel_t *channel) +{ + NvU32 next_put; + + uvm_assert_spinlock_locked(&channel->pool->lock); + + next_put = (channel->cpu_put + channel->current_pushes_count + 1) % channel->num_gpfifo_entries; + + return (next_put != channel->gpu_get); +} + +static bool try_claim_channel(uvm_channel_t *channel) +{ + bool claimed = false; + + uvm_spin_lock(&channel->pool->lock); + + if (channel_is_available(channel)) { + ++channel->current_pushes_count; + claimed = true; + } + + uvm_spin_unlock(&channel->pool->lock); + + return claimed; +} + +static void lock_push(uvm_channel_t *channel) +{ + + + + +} + +static void unlock_push(uvm_channel_t *channel) +{ + + + + +} + +static bool trylock_push(uvm_channel_t *channel) +{ + + + + + return true; +} + +// Reserve a channel in the specified pool +static NV_STATUS channel_reserve_in_pool(uvm_channel_pool_t *pool, uvm_channel_t **channel_out) +{ + uvm_channel_t *channel; + uvm_spin_loop_t spin; + + UVM_ASSERT(pool); + + uvm_for_each_channel_in_pool(channel, pool) { + // TODO: Bug 1764953: Prefer idle/less busy channels + + + + + + if (trylock_push(channel)) { + if (try_claim_channel(channel)) { + *channel_out = channel; + return NV_OK; + } + else { + unlock_push(channel); + } + } + } + + uvm_spin_loop_init(&spin); + while (1) { + uvm_for_each_channel_in_pool(channel, pool) { + NV_STATUS status; + + uvm_channel_update_progress(channel); + + if (try_claim_channel(channel)) { + lock_push(channel); + *channel_out = channel; + + return NV_OK; + } + + status = uvm_channel_check_errors(channel); + if (status != NV_OK) + return status; + + UVM_SPIN_LOOP(&spin); + } + } + + UVM_ASSERT_MSG(0, "Cannot get here?!\n"); + return NV_ERR_GENERIC; +} + +NV_STATUS uvm_channel_reserve_type(uvm_channel_manager_t *manager, uvm_channel_type_t type, uvm_channel_t **channel_out) +{ + UVM_ASSERT(type < UVM_CHANNEL_TYPE_COUNT); + return channel_reserve_in_pool(manager->pool_to_use.default_for_type[type], channel_out); +} + +NV_STATUS uvm_channel_reserve_gpu_to_gpu(uvm_channel_manager_t *manager, + uvm_gpu_t *dst_gpu, + uvm_channel_t **channel_out) +{ + const NvU32 dst_gpu_index = uvm_id_gpu_index(dst_gpu->id); + uvm_channel_pool_t *pool = manager->pool_to_use.gpu_to_gpu[dst_gpu_index]; + + // If there is no recommended pool for the given GPU pair, use default + if (pool == NULL) + pool = manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_GPU_TO_GPU]; + + UVM_ASSERT(pool->pool_type == UVM_CHANNEL_POOL_TYPE_CE); + + return channel_reserve_in_pool(pool, channel_out); +} + +NV_STATUS uvm_channel_manager_wait(uvm_channel_manager_t *manager) +{ + NV_STATUS status = NV_OK; + uvm_spin_loop_t spin; + + if (uvm_channel_manager_update_progress(manager) == 0) + return uvm_channel_manager_check_errors(manager); + + uvm_spin_loop_init(&spin); + while (uvm_channel_manager_update_progress(manager) > 0 && status == NV_OK) { + UVM_SPIN_LOOP(&spin); + status = uvm_channel_manager_check_errors(manager); + } + + return status; +} + +static NvU32 channel_get_available_push_info_index(uvm_channel_t *channel) +{ + uvm_push_info_t *push_info; + + uvm_spin_lock(&channel->pool->lock); + + push_info = list_first_entry_or_null(&channel->available_push_infos, uvm_push_info_t, available_list_node); + UVM_ASSERT(push_info != NULL); + UVM_ASSERT(push_info->on_complete == NULL && push_info->on_complete_data == NULL); + list_del(&push_info->available_list_node); + + uvm_spin_unlock(&channel->pool->lock); + + return push_info - channel->push_infos; +} + +NV_STATUS uvm_channel_begin_push(uvm_channel_t *channel, uvm_push_t *push) +{ + NV_STATUS status; + uvm_channel_manager_t *manager; + + UVM_ASSERT(channel); + UVM_ASSERT(push); + + manager = channel->pool->manager; + + + + + + + + + status = uvm_pushbuffer_begin_push(manager->pushbuffer, push); + if (status != NV_OK) + return status; + + push->channel = channel; + push->channel_tracking_value = 0; + push->push_info_index = channel_get_available_push_info_index(channel); + + return NV_OK; +} + +static void internal_channel_submit_work(uvm_push_t *push, NvU32 push_size, NvU32 new_gpu_put) +{ + NvU64 *gpfifo_entry; + NvU64 pushbuffer_va; + uvm_channel_t *channel = push->channel; + uvm_channel_manager_t *channel_manager = channel->pool->manager; + uvm_pushbuffer_t *pushbuffer = channel_manager->pushbuffer; + uvm_gpu_t *gpu = channel_manager->gpu; + + BUILD_BUG_ON(sizeof(*gpfifo_entry) != NVB06F_GP_ENTRY__SIZE); + UVM_ASSERT(!uvm_channel_is_proxy(channel)); + + gpfifo_entry = (NvU64*)channel->channel_info.gpFifoEntries + channel->cpu_put; + pushbuffer_va = uvm_pushbuffer_get_gpu_va_for_push(pushbuffer, push); + + gpu->parent->host_hal->set_gpfifo_entry(gpfifo_entry, pushbuffer_va, push_size); + + // Need to make sure all the pushbuffer and the GPFIFO entries writes + // complete before updating GPPUT. We also don't want any reads to be moved + // after the GPPut write as the GPU might modify the data they read as soon + // as the GPPut write happens. + mb(); + + gpu->parent->host_hal->write_gpu_put(channel, new_gpu_put); +} + +static void proxy_channel_submit_work(uvm_push_t *push, NvU32 push_size) +{ + NV_STATUS status; + uvm_channel_t *channel = push->channel; + + UVM_ASSERT(uvm_channel_is_proxy(channel)); + + // nvUvmInterfacePagingChannelPushStream should not sleep, because a + // spinlock is currently held. + uvm_assert_spinlock_locked(&channel->pool->lock); + + status = nvUvmInterfacePagingChannelPushStream(channel->proxy.handle, (char *) push->begin, push_size); + + if (status != NV_OK) { + uvm_push_info_t *push_info = uvm_push_info_from_push(push); + + // If the RM invocation fails, there is no clean recovery available + // (for example, the vGPU plugin may have crashed), so swallow the error + // but print a descriptive message about the failed push. + UVM_ASSERT_MSG(status == NV_OK, + "nvUvmInterfacePagingChannelPushStream() failed: %s, GPU %s, push '%s' started at %s:%d in %s()\n", + nvstatusToString(status), + uvm_gpu_name(uvm_channel_get_gpu(channel)), + push_info->description, + push_info->filename, + push_info->line, + push_info->function); + } +} + +static void uvm_channel_semaphore_release(uvm_push_t *push, NvU64 semaphore_va, NvU32 new_payload) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + + if (uvm_channel_is_ce(push->channel)) + gpu->parent->ce_hal->semaphore_release(push, semaphore_va, new_payload); + + + + + else + UVM_ASSERT_MSG(0, "Semaphore release on an unsupported channel.\n"); +} + +void uvm_channel_end_push(uvm_push_t *push) +{ + uvm_channel_t *channel = push->channel; + uvm_channel_manager_t *channel_manager = channel->pool->manager; + uvm_pushbuffer_t *pushbuffer = channel_manager->pushbuffer; + uvm_gpfifo_entry_t *entry; + NvU64 semaphore_va; + NvU64 new_tracking_value; + NvU32 new_payload; + NvU32 push_size; + NvU32 cpu_put; + NvU32 new_cpu_put; + + uvm_spin_lock(&channel->pool->lock); + + new_tracking_value = ++channel->tracking_sem.queued_value; + new_payload = (NvU32)new_tracking_value; + + semaphore_va = uvm_channel_tracking_semaphore_get_gpu_va(channel); + uvm_channel_semaphore_release(push, semaphore_va, new_payload); + + push_size = uvm_push_get_size(push); + UVM_ASSERT_MSG(push_size <= UVM_MAX_PUSH_SIZE, "push size %u\n", push_size); + + cpu_put = channel->cpu_put; + new_cpu_put = (cpu_put + 1) % channel->num_gpfifo_entries; + + entry = &channel->gpfifo_entries[cpu_put]; + entry->tracking_semaphore_value = new_tracking_value; + entry->pushbuffer_offset = uvm_pushbuffer_get_offset_for_push(pushbuffer, push); + entry->pushbuffer_size = push_size; + entry->push_info = &channel->push_infos[push->push_info_index]; + + UVM_ASSERT(channel->current_pushes_count > 0); + --channel->current_pushes_count; + + if (uvm_channel_is_proxy(channel)) + proxy_channel_submit_work(push, push_size); + else + internal_channel_submit_work(push, push_size, new_cpu_put); + + channel->cpu_put = new_cpu_put; + + uvm_pushbuffer_end_push(pushbuffer, push, entry); + + // The moment the channel is unlocked uvm_channel_update_progress_with_max() + // may notice the GPU work to be completed and hence all state tracking the + // push must be updated before that. Notably uvm_pushbuffer_end_push() has + // to be called first. + uvm_spin_unlock(&channel->pool->lock); + unlock_push(channel); + + // This memory barrier is borrowed from CUDA, as it supposedly fixes perf + // issues on some systems. Comment from CUDA: "fixes throughput-related + // performance problems, e.g. bugs 626179, 593841. This may be related to + // bug 124888, which GL works around by doing a clflush" + wmb(); + + push->push_info_index = channel->num_gpfifo_entries; + push->channel_tracking_value = new_tracking_value; +} + +NV_STATUS uvm_channel_reserve(uvm_channel_t *channel) +{ + NV_STATUS status = NV_OK; + uvm_spin_loop_t spin; + + if (try_claim_channel(channel)) + goto out; + + uvm_channel_update_progress(channel); + + uvm_spin_loop_init(&spin); + while (!try_claim_channel(channel) && status == NV_OK) { + UVM_SPIN_LOOP(&spin); + status = uvm_channel_check_errors(channel); + uvm_channel_update_progress(channel); + } + +out: + if (status == NV_OK) + lock_push(channel); + + return status; +} + +// Get the first pending GPFIFO entry, if any. +// This doesn't stop the entry from being reused. +static uvm_gpfifo_entry_t *uvm_channel_get_first_pending_entry(uvm_channel_t *channel) +{ + uvm_gpfifo_entry_t *entry = NULL; + NvU32 pending_count = channel_update_progress_all(channel, UVM_CHANNEL_UPDATE_MODE_COMPLETED); + + if (pending_count == 0) + return NULL; + + uvm_spin_lock(&channel->pool->lock); + + if (channel->gpu_get != channel->cpu_put) + entry = &channel->gpfifo_entries[channel->gpu_get]; + + uvm_spin_unlock(&channel->pool->lock); + + return entry; +} + +NV_STATUS uvm_channel_get_status(uvm_channel_t *channel) +{ + uvm_gpu_t *gpu; + NvNotification *errorNotifier; + + if (uvm_channel_is_proxy(channel)) + errorNotifier = channel->proxy.channel_info.shadowErrorNotifier; + else + errorNotifier = channel->channel_info.errorNotifier; + + if (errorNotifier->status == 0) + return NV_OK; + + // In case we hit a channel error, check the ECC error notifier as well so + // that a more precise ECC error can be returned in case there is indeed an + // ECC error. + // + // Notably this might be racy depending on the ordering of the notifications, + // but we can't always call RM to service interrupts from this context. + gpu = uvm_channel_get_gpu(channel); + if (gpu->ecc.enabled && *gpu->ecc.error_notifier) + return NV_ERR_ECC_ERROR; + + return NV_ERR_RC_ERROR; +} + +uvm_gpfifo_entry_t *uvm_channel_get_fatal_entry(uvm_channel_t *channel) +{ + UVM_ASSERT(uvm_channel_get_status(channel) != NV_OK); + + return uvm_channel_get_first_pending_entry(channel); +} + +NV_STATUS uvm_channel_check_errors(uvm_channel_t *channel) +{ + uvm_gpfifo_entry_t *fatal_entry; + NV_STATUS status = uvm_channel_get_status(channel); + + if (status == NV_OK) + return NV_OK; + + UVM_ERR_PRINT("Detected a channel error, channel %s GPU %s\n", + channel->name, + uvm_gpu_name(uvm_channel_get_gpu(channel))); + + fatal_entry = uvm_channel_get_fatal_entry(channel); + if (fatal_entry != NULL) { + uvm_push_info_t *push_info = fatal_entry->push_info; + UVM_ERR_PRINT("Channel error likely caused by push '%s' started at %s:%d in %s()\n", + push_info->description, push_info->filename, push_info->line, push_info->function); + } + + uvm_global_set_fatal_error(status); + return status; +} + +NV_STATUS uvm_channel_manager_check_errors(uvm_channel_manager_t *channel_manager) +{ + uvm_channel_pool_t *pool; + NV_STATUS status = uvm_global_get_status(); + + if (status != NV_OK) + return status; + + uvm_for_each_pool(pool, channel_manager) { + uvm_channel_t *channel; + + uvm_for_each_channel_in_pool(channel, pool) { + status = uvm_channel_check_errors(channel); + if (status != NV_OK) + return status; + } + } + + return status; +} + +bool uvm_channel_is_value_completed(uvm_channel_t *channel, NvU64 value) +{ + return uvm_gpu_tracking_semaphore_is_value_completed(&channel->tracking_sem, value); +} + +NvU64 uvm_channel_update_completed_value(uvm_channel_t *channel) +{ + return uvm_gpu_tracking_semaphore_update_completed_value(&channel->tracking_sem); +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +static void channel_destroy(uvm_channel_pool_t *pool, uvm_channel_t *channel) +{ + UVM_ASSERT(pool->num_channels > 0); + + if (channel->tracking_sem.queued_value > 0) { + // The channel should have been idled before being destroyed, unless an + // error was triggered. We need to check both error cases (global and + // channel) to handle the UVM_TEST_CHANNEL_SANITY unit test. + if (uvm_global_get_status() == NV_OK && uvm_channel_get_status(channel) == NV_OK) + UVM_ASSERT(uvm_gpu_tracking_semaphore_is_completed(&channel->tracking_sem)); + + // Remove all remaining GPFIFOs from their pushbuffer chunk, since the + // pushbuffer has a longer lifetime. + channel_update_progress_all(channel, UVM_CHANNEL_UPDATE_MODE_FORCE_ALL); + } + + uvm_procfs_destroy_entry(channel->procfs.pushes); + uvm_procfs_destroy_entry(channel->procfs.info); + uvm_procfs_destroy_entry(channel->procfs.dir); + + uvm_kvfree(channel->push_acquire_infos); + uvm_kvfree(channel->push_infos); + + uvm_kvfree(channel->gpfifo_entries); + + + + + + if (uvm_channel_is_proxy(channel)) + uvm_rm_locked_call_void(nvUvmInterfacePagingChannelDestroy(channel->proxy.handle)); + else + uvm_rm_locked_call_void(nvUvmInterfaceChannelDestroy(channel->handle)); + + uvm_gpu_tracking_semaphore_free(&channel->tracking_sem); + + UVM_ASSERT(list_empty(&channel->tools.channel_list_node)); + UVM_ASSERT(channel->tools.pending_event_count == 0); + + pool->num_channels--; +} + +static NV_STATUS internal_channel_create(uvm_channel_t *channel, unsigned engine_index) +{ + NV_STATUS status; + UvmGpuChannelAllocParams channel_alloc_params; + UvmGpuChannelInfo *channel_info = &channel->channel_info; + uvm_channel_manager_t *manager = channel->pool->manager; + uvm_gpu_t *gpu = manager->gpu; + + if (uvm_channel_is_ce(channel)) { + UVM_ASSERT(channel->pool->pool_type == UVM_CHANNEL_POOL_TYPE_CE); + + + + + + } + + memset(&channel_alloc_params, 0, sizeof(channel_alloc_params)); + channel_alloc_params.numGpFifoEntries = manager->conf.num_gpfifo_entries; + channel_alloc_params.gpFifoLoc = manager->conf.gpfifo_loc; + channel_alloc_params.gpPutLoc = manager->conf.gpput_loc; + channel_alloc_params.engineIndex = engine_index; + + if (uvm_channel_is_ce(channel)) + channel_alloc_params.engineType = UVM_GPU_CHANNEL_ENGINE_TYPE_CE; + + + + + + status = uvm_rm_locked_call(nvUvmInterfaceChannelAllocate(gpu->rm_address_space, + &channel_alloc_params, + &channel->handle, + channel_info)); + if (status != NV_OK) { + UVM_ERR_PRINT("nvUvmInterfaceChannelAllocate() failed: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + snprintf(channel->name, + sizeof(channel->name), + "ID %u:%u (0x%x:0x%x) %s %u", + channel_info->hwRunlistId, + channel_info->hwChannelId, + channel_info->hwRunlistId, + channel_info->hwChannelId, + + + + "CE", + + engine_index); + + return NV_OK; +} + +static NV_STATUS proxy_channel_create(uvm_channel_t *channel, unsigned ce_index) +{ + NV_STATUS status; + unsigned proxy_index; + UvmGpuPagingChannelAllocParams channel_alloc_params; + uvm_channel_manager_t *manager = channel->pool->manager; + uvm_gpu_t *gpu = manager->gpu; + + UVM_ASSERT(uvm_channel_is_proxy(channel)); + + memset(&channel_alloc_params, 0, sizeof(channel_alloc_params)); + channel_alloc_params.engineIndex = ce_index; + + status = uvm_rm_locked_call(nvUvmInterfacePagingChannelAllocate(uvm_gpu_device_handle(gpu), + &channel_alloc_params, + &channel->proxy.handle, + &channel->proxy.channel_info)); + if (status != NV_OK) { + UVM_ERR_PRINT("nvUvmInterfacePagingChannelAllocate() failed: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + proxy_index = uvm_channel_index_in_pool(channel); + snprintf(channel->name, sizeof(channel->name), "Proxy %u CE %u", proxy_index, ce_index); + + return NV_OK; +} + +static NV_STATUS channel_create(uvm_channel_pool_t *pool, uvm_channel_t *channel) +{ + NV_STATUS status; + uvm_channel_manager_t *manager = pool->manager; + uvm_gpu_t *gpu = manager->gpu; + unsigned int i; + + UVM_ASSERT(channel != NULL); + + channel->pool = pool; + pool->num_channels++; + INIT_LIST_HEAD(&channel->available_push_infos); + channel->tools.pending_event_count = 0; + INIT_LIST_HEAD(&channel->tools.channel_list_node); + + status = uvm_gpu_tracking_semaphore_alloc(gpu->semaphore_pool, &channel->tracking_sem); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_gpu_tracking_semaphore_alloc() failed: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + goto error; + } + + if (uvm_channel_is_proxy(channel)) + status = proxy_channel_create(channel, pool->engine_index); + else + status = internal_channel_create(channel, pool->engine_index); + + if (status != NV_OK) + goto error; + + + + + + + + channel->num_gpfifo_entries = manager->conf.num_gpfifo_entries; + channel->gpfifo_entries = uvm_kvmalloc_zero(sizeof(*channel->gpfifo_entries) * channel->num_gpfifo_entries); + if (channel->gpfifo_entries == NULL) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + channel->push_infos = uvm_kvmalloc_zero(sizeof(*channel->push_infos) * channel->num_gpfifo_entries); + if (channel->push_infos == NULL) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + if (uvm_push_info_is_tracking_acquires()) { + channel->push_acquire_infos = uvm_kvmalloc_zero(sizeof(*channel->push_acquire_infos) * channel->num_gpfifo_entries); + if (channel->push_acquire_infos == NULL) { + status = NV_ERR_NO_MEMORY; + goto error; + } + } + + for (i = 0; i < channel->num_gpfifo_entries; i++) + list_add_tail(&channel->push_infos[i].available_list_node, &channel->available_push_infos); + + status = channel_create_procfs(channel); + if (status != NV_OK) + goto error; + + return NV_OK; + +error: + channel_destroy(pool, channel); + + return status; +} + +NvU64 uvm_channel_tracking_semaphore_get_gpu_va_in_channel(uvm_channel_t *semaphore_channel, + uvm_channel_t *access_channel) +{ + uvm_gpu_semaphore_t *semaphore = &semaphore_channel->tracking_sem.semaphore; + uvm_gpu_t *gpu = uvm_channel_get_gpu(access_channel); + + return uvm_gpu_semaphore_get_gpu_va(semaphore, gpu, uvm_channel_is_proxy(access_channel)); +} + +static NV_STATUS init_channel(uvm_channel_t *channel) +{ + uvm_push_t push; + uvm_gpu_t *gpu = uvm_channel_get_gpu(channel); + NV_STATUS status = uvm_push_begin_on_channel(channel, &push, "Init channel"); + + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to begin push on channel: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + if (uvm_channel_is_ce(channel)) + gpu->parent->ce_hal->init(&push); + + + + + + gpu->parent->host_hal->init(&push); + + status = uvm_push_end_and_wait(&push); + if (status != NV_OK) + UVM_ERR_PRINT("Channel init failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + + return status; +} + +static bool channel_manager_uses_proxy_pool(uvm_channel_manager_t *manager) +{ + return uvm_gpu_is_virt_mode_sriov_heavy(manager->gpu); +} + +// Number of channels to create in a pool of the given type. +// +// TODO: Bug 1764958: Tweak this function after benchmarking real workloads. +static unsigned channel_pool_type_num_channels(uvm_channel_pool_type_t pool_type) +{ + // TODO: Bug 3387454: The vGPU plugin implementation supports a single + // proxy channel per GPU + if (pool_type == UVM_CHANNEL_POOL_TYPE_CE_PROXY) + return 1; + + return 2; +} + +static void channel_pool_destroy(uvm_channel_pool_t *pool) +{ + UVM_ASSERT(pool->manager->num_channel_pools > 0); + + while (pool->num_channels > 0) + channel_destroy(pool, pool->channels + pool->num_channels - 1); + + uvm_kvfree(pool->channels); + pool->manager->num_channel_pools--; +} + +static NV_STATUS channel_pool_add(uvm_channel_manager_t *channel_manager, + uvm_channel_pool_type_t pool_type, + unsigned engine_index, + uvm_channel_pool_t **pool_out) +{ + NV_STATUS status; + unsigned i; + unsigned num_channels; + uvm_channel_pool_t *pool; + + pool = channel_manager->channel_pools + channel_manager->num_channel_pools; + pool->manager = channel_manager; + pool->engine_index = engine_index; + pool->pool_type = pool_type; + + uvm_spin_lock_init(&pool->lock, UVM_LOCK_ORDER_CHANNEL); + + num_channels = channel_pool_type_num_channels(pool_type); + + pool->channels = uvm_kvmalloc_zero(sizeof(*pool->channels) * num_channels); + if (!pool->channels) + return NV_ERR_NO_MEMORY; + + channel_manager->num_channel_pools++; + + for (i = 0; i < num_channels; i++) { + uvm_channel_t *channel = pool->channels + i; + + status = channel_create(pool, channel); + if (status != NV_OK) + goto error; + + status = init_channel(channel); + if (status != NV_OK) + goto error; + } + + *pool_out = pool; + return NV_OK; + + error: + channel_pool_destroy(pool); + return status; +} + +static bool ce_usable_for_channel_type(uvm_channel_type_t type, const UvmGpuCopyEngineCaps *cap) +{ + if (!cap->supported || cap->grce) + return false; + + switch (type) { + case UVM_CHANNEL_TYPE_CPU_TO_GPU: + case UVM_CHANNEL_TYPE_GPU_TO_CPU: + return cap->sysmem; + case UVM_CHANNEL_TYPE_GPU_INTERNAL: + case UVM_CHANNEL_TYPE_MEMOPS: + return true; + case UVM_CHANNEL_TYPE_GPU_TO_GPU: + return cap->p2p; + default: + UVM_ASSERT_MSG(false, "Unexpected channel type 0x%x\n", type); + return false; + } +} + +static unsigned ce_usage_count(NvU32 ce, const unsigned *preferred_ce) +{ + unsigned i; + unsigned count = 0; + + UVM_ASSERT(ce < UVM_COPY_ENGINE_COUNT_MAX); + + for (i = 0; i < UVM_CHANNEL_TYPE_CE_COUNT; i++) { + if (ce == preferred_ce[i]) + count++; + } + + return count; +} + +// Returns negative if the first CE should be considered better than the second +static int compare_ce_for_channel_type(const UvmGpuCopyEngineCaps *ce_caps, + uvm_channel_type_t type, + NvU32 ce_index0, + NvU32 ce_index1, + NvU32 *preferred_ce) +{ + unsigned ce0_usage, ce1_usage; + const UvmGpuCopyEngineCaps *cap0 = ce_caps + ce_index0; + const UvmGpuCopyEngineCaps *cap1 = ce_caps + ce_index1; + + UVM_ASSERT(ce_usable_for_channel_type(type, cap0)); + UVM_ASSERT(ce_usable_for_channel_type(type, cap1)); + UVM_ASSERT(ce_index0 < UVM_COPY_ENGINE_COUNT_MAX); + UVM_ASSERT(ce_index1 < UVM_COPY_ENGINE_COUNT_MAX); + UVM_ASSERT(ce_index0 != ce_index1); + + switch (type) { + case UVM_CHANNEL_TYPE_CPU_TO_GPU: + // For CPU to GPU fast sysmem read is the most important + if (cap0->sysmemRead != cap1->sysmemRead) + return cap1->sysmemRead - cap0->sysmemRead; + + // Prefer not to take up the CEs for nvlink P2P + if (cap0->nvlinkP2p != cap1->nvlinkP2p) + return cap0->nvlinkP2p - cap1->nvlinkP2p; + + break; + + case UVM_CHANNEL_TYPE_GPU_TO_CPU: + // For GPU to CPU fast sysmem write is the most important + if (cap0->sysmemWrite != cap1->sysmemWrite) + return cap1->sysmemWrite - cap0->sysmemWrite; + + // Prefer not to take up the CEs for nvlink P2P + if (cap0->nvlinkP2p != cap1->nvlinkP2p) + return cap0->nvlinkP2p - cap1->nvlinkP2p; + + break; + + case UVM_CHANNEL_TYPE_GPU_TO_GPU: + // Prefer the LCE with the most PCEs + { + int pce_diff = (int)hweight32(cap1->cePceMask) - (int)hweight32(cap0->cePceMask); + + if (pce_diff != 0) + return pce_diff; + } + + break; + + case UVM_CHANNEL_TYPE_GPU_INTERNAL: + // We want the max possible bandwidth for CEs used for GPU_INTERNAL, + // for now assume that the number of PCEs is a good measure. + // TODO: Bug 1735254: Add a direct CE query for local FB bandwidth + { + int pce_diff = (int)hweight32(cap1->cePceMask) - (int)hweight32(cap0->cePceMask); + + if (pce_diff != 0) + return pce_diff; + } + + // Leave P2P CEs to the GPU_TO_GPU channel type, when possible + if (cap0->nvlinkP2p != cap1->nvlinkP2p) + return cap0->nvlinkP2p - cap1->nvlinkP2p; + + break; + + case UVM_CHANNEL_TYPE_MEMOPS: + // For MEMOPS we mostly care about latency which should be better + // with less used CEs (although we only know about our own usage and + // not system-wide) so just break out to get the default ordering + // which prioritizes usage count. + break; + + default: + UVM_ASSERT_MSG(false, "Unexpected channel type 0x%x\n", type); + return 0; + } + + // By default, prefer less used CEs (within the UVM driver at least) + ce0_usage = ce_usage_count(ce_index0, preferred_ce); + ce1_usage = ce_usage_count(ce_index1, preferred_ce); + + if (ce0_usage != ce1_usage) + return ce0_usage - ce1_usage; + + // And CEs that don't share PCEs + if (cap0->shared != cap1->shared) + return cap0->shared - cap1->shared; + + // Last resort, just order by index + return ce_index0 - ce_index1; +} + +// Identify usable CEs, and select the preferred CE for a given channel type. +static NV_STATUS pick_ce_for_channel_type(uvm_channel_manager_t *manager, + const UvmGpuCopyEngineCaps *ce_caps, + uvm_channel_type_t type, + unsigned *preferred_ce) +{ + NvU32 i; + NvU32 best_ce = UVM_COPY_ENGINE_COUNT_MAX; + + UVM_ASSERT(type < UVM_CHANNEL_TYPE_CE_COUNT); + + for (i = 0; i < UVM_COPY_ENGINE_COUNT_MAX; ++i) { + const UvmGpuCopyEngineCaps *cap = ce_caps + i; + + if (!ce_usable_for_channel_type(type, cap)) + continue; + + __set_bit(i, manager->ce_mask); + + if (best_ce == UVM_COPY_ENGINE_COUNT_MAX) { + best_ce = i; + continue; + } + + if (compare_ce_for_channel_type(ce_caps, type, i, best_ce, preferred_ce) < 0) + best_ce = i; + } + + if (best_ce == UVM_COPY_ENGINE_COUNT_MAX) { + UVM_ERR_PRINT("Failed to find a suitable CE for channel type %s\n", uvm_channel_type_to_string(type)); + return NV_ERR_NOT_SUPPORTED; + } + + preferred_ce[type] = best_ce; + return NV_OK; +} + +static NV_STATUS channel_manager_pick_copy_engines(uvm_channel_manager_t *manager, unsigned *preferred_ce) +{ + NV_STATUS status; + unsigned i; + UvmGpuCopyEnginesCaps ces_caps; + uvm_channel_type_t types[] = {UVM_CHANNEL_TYPE_CPU_TO_GPU, + UVM_CHANNEL_TYPE_GPU_TO_CPU, + UVM_CHANNEL_TYPE_GPU_INTERNAL, + UVM_CHANNEL_TYPE_GPU_TO_GPU, + UVM_CHANNEL_TYPE_MEMOPS}; + + memset(&ces_caps, 0, sizeof(ces_caps)); + status = uvm_rm_locked_call(nvUvmInterfaceQueryCopyEnginesCaps(uvm_gpu_device_handle(manager->gpu), &ces_caps)); + if (status != NV_OK) + return status; + + // The order of picking CEs for each type matters as it's affected by the + // usage count of each CE and it increases every time a CE is selected. + // MEMOPS has the least priority as it only cares about low usage of the + // CE to improve latency + for (i = 0; i < ARRAY_SIZE(types); ++i) { + status = pick_ce_for_channel_type(manager, ces_caps.copyEngineCaps, types[i], preferred_ce); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +// Return the pool corresponding to the given CE index +// +// This function cannot be used to access the proxy pool in SR-IOV heavy. +static uvm_channel_pool_t *channel_manager_ce_pool(uvm_channel_manager_t *manager, NvU32 ce) +{ + uvm_channel_pool_t *pool; + + UVM_ASSERT(test_bit(ce, manager->ce_mask)); + + // The index of the pool associated with 'ce' is the number of usable CEs + // in [0, ce) + pool = manager->channel_pools + bitmap_weight(manager->ce_mask, ce); + + UVM_ASSERT(pool->pool_type == UVM_CHANNEL_POOL_TYPE_CE); + UVM_ASSERT(pool->engine_index == ce); + + return pool; +} + +void uvm_channel_manager_set_p2p_ce(uvm_channel_manager_t *manager, uvm_gpu_t *peer, NvU32 optimal_ce) +{ + const NvU32 peer_gpu_index = uvm_id_gpu_index(peer->id); + + UVM_ASSERT(manager->gpu != peer); + UVM_ASSERT(optimal_ce < UVM_COPY_ENGINE_COUNT_MAX); + + manager->pool_to_use.gpu_to_gpu[peer_gpu_index] = channel_manager_ce_pool(manager, optimal_ce); +} + +static bool is_string_valid_location(const char *loc) +{ + return strcmp(uvm_channel_gpfifo_loc, "sys") == 0 || + strcmp(uvm_channel_gpfifo_loc, "vid") == 0 || + strcmp(uvm_channel_gpfifo_loc, "auto") == 0; +} + +static UVM_BUFFER_LOCATION string_to_buffer_location(const char *loc) +{ + UVM_ASSERT(is_string_valid_location(loc)); + + if (strcmp(loc, "sys") == 0) + return UVM_BUFFER_LOCATION_SYS; + else if (strcmp(loc, "vid") == 0) + return UVM_BUFFER_LOCATION_VID; + else + return UVM_BUFFER_LOCATION_DEFAULT; +} + +static const char *buffer_location_to_string(UVM_BUFFER_LOCATION loc) +{ + if (loc == UVM_BUFFER_LOCATION_SYS) + return "sys"; + else if (loc == UVM_BUFFER_LOCATION_VID) + return "vid"; + else if (loc == UVM_BUFFER_LOCATION_DEFAULT) + return "auto"; + + UVM_ASSERT_MSG(false, "Invalid buffer locationvalue %d\n", loc); + return NULL; +} + +static void init_channel_manager_conf(uvm_channel_manager_t *manager) +{ + const char *gpfifo_loc_value; + const char *gpput_loc_value; + const char *pushbuffer_loc_value; + + uvm_gpu_t *gpu = manager->gpu; + + // 1- Number of GPFIFO entries + manager->conf.num_gpfifo_entries = uvm_channel_num_gpfifo_entries; + + if (uvm_channel_num_gpfifo_entries < UVM_CHANNEL_NUM_GPFIFO_ENTRIES_MIN) + manager->conf.num_gpfifo_entries = UVM_CHANNEL_NUM_GPFIFO_ENTRIES_MIN; + else if (uvm_channel_num_gpfifo_entries > UVM_CHANNEL_NUM_GPFIFO_ENTRIES_MAX) + manager->conf.num_gpfifo_entries = UVM_CHANNEL_NUM_GPFIFO_ENTRIES_MAX; + + if (!is_power_of_2(manager->conf.num_gpfifo_entries)) + manager->conf.num_gpfifo_entries = UVM_CHANNEL_NUM_GPFIFO_ENTRIES_DEFAULT; + + if (manager->conf.num_gpfifo_entries != uvm_channel_num_gpfifo_entries) { + pr_info("Invalid value for uvm_channel_num_gpfifo_entries = %u, using %u instead\n", + uvm_channel_num_gpfifo_entries, + manager->conf.num_gpfifo_entries); + } + + // 2- Allocation locations + + // Override if the GPU doesn't have memory + if (gpu->mem_info.size == 0) { + manager->conf.pushbuffer_loc = UVM_BUFFER_LOCATION_SYS; + manager->conf.gpfifo_loc = UVM_BUFFER_LOCATION_SYS; + manager->conf.gpput_loc = UVM_BUFFER_LOCATION_SYS; + return; + } + + manager->conf.pushbuffer_loc = UVM_BUFFER_LOCATION_SYS; + + pushbuffer_loc_value = uvm_channel_pushbuffer_loc; + if (!is_string_valid_location(pushbuffer_loc_value)) { + pushbuffer_loc_value = UVM_CHANNEL_PUSHBUFFER_LOC_DEFAULT; + pr_info("Invalid value for uvm_channel_pushbuffer_loc = %s, using %s instead\n", + uvm_channel_pushbuffer_loc, + pushbuffer_loc_value); + } + + // Override the default value if requested by the user + if (strcmp(pushbuffer_loc_value, "vid") == 0) { + // aarch64 requires memset_io/memcpy_io instead of memset/memcpy for + // mapped GPU memory. The existing push paths only use memset/memcpy, + // so force the location to sys for now. + // TODO: Bug 2904133: Remove the following "if" after the bug is fixed. + if (NVCPU_IS_AARCH64) { + pr_info("uvm_channel_pushbuffer_loc = %s is not supported on AARCH64, using sys instead\n", + pushbuffer_loc_value); + manager->conf.pushbuffer_loc = UVM_BUFFER_LOCATION_SYS; + } + else { + manager->conf.pushbuffer_loc = UVM_BUFFER_LOCATION_VID; + } + } + + // 3- GPFIFO/GPPut location + // Only support the knobs for GPFIFO/GPPut on Volta+ + if (!gpu->parent->gpfifo_in_vidmem_supported) { + if (manager->conf.gpput_loc == UVM_BUFFER_LOCATION_SYS) { + pr_info("CAUTION: allocating GPPut in sysmem is NOT supported and may crash the system, using %s instead\n", + buffer_location_to_string(UVM_BUFFER_LOCATION_DEFAULT)); + } + + manager->conf.gpfifo_loc = UVM_BUFFER_LOCATION_DEFAULT; + manager->conf.gpput_loc = UVM_BUFFER_LOCATION_DEFAULT; + + return; + } + + gpfifo_loc_value = uvm_channel_gpfifo_loc; + if (!is_string_valid_location(gpfifo_loc_value)) { + gpfifo_loc_value = UVM_CHANNEL_GPFIFO_LOC_DEFAULT; + pr_info("Invalid value for uvm_channel_gpfifo_loc = %s, using %s instead\n", + uvm_channel_gpfifo_loc, + gpfifo_loc_value); + } + + gpput_loc_value = uvm_channel_gpput_loc; + if (!is_string_valid_location(gpput_loc_value)) { + gpput_loc_value = UVM_CHANNEL_GPPUT_LOC_DEFAULT; + pr_info("Invalid value for uvm_channel_gpput_loc = %s, using %s instead\n", + uvm_channel_gpput_loc, + gpput_loc_value); + } + + // By default we place GPFIFO and GPPUT on vidmem as it potentially has + // lower latency. + manager->conf.gpfifo_loc = UVM_BUFFER_LOCATION_VID; + manager->conf.gpput_loc = UVM_BUFFER_LOCATION_VID; + + // TODO: Bug 1766129: However, this will likely be different on P9 systems. + // Leaving GPFIFO on sysmem for now. GPPut on sysmem is not supported in + // production, so we keep it on vidmem, too. + if (gpu->parent->sysmem_link >= UVM_GPU_LINK_NVLINK_2) + manager->conf.gpfifo_loc = UVM_BUFFER_LOCATION_SYS; + + // Override defaults + if (string_to_buffer_location(gpfifo_loc_value) != UVM_BUFFER_LOCATION_DEFAULT) + manager->conf.gpfifo_loc = string_to_buffer_location(gpfifo_loc_value); + + if (string_to_buffer_location(gpput_loc_value) != UVM_BUFFER_LOCATION_DEFAULT) + manager->conf.gpput_loc = string_to_buffer_location(gpput_loc_value); +} + +// A pool is created for each usable CE, even if it has not been selected as the +// preferred CE for any type, because as more information is discovered (for +// example, a pair of peer GPUs is added) we may start using the previously idle +// channels. +static NV_STATUS channel_manager_create_pools(uvm_channel_manager_t *manager) +{ + NV_STATUS status; + unsigned ce, type; + unsigned num_channel_pools; + unsigned preferred_ce[UVM_CHANNEL_TYPE_CE_COUNT]; + uvm_channel_pool_t *pool = NULL; + + for (type = 0; type < ARRAY_SIZE(preferred_ce); type++) + preferred_ce[type] = UVM_COPY_ENGINE_COUNT_MAX; + + status = channel_manager_pick_copy_engines(manager, preferred_ce); + if (status != NV_OK) + return status; + + // CE channel pools + num_channel_pools = bitmap_weight(manager->ce_mask, UVM_COPY_ENGINE_COUNT_MAX); + + // CE proxy channel pool. + if (uvm_gpu_uses_proxy_channel_pool(manager->gpu)) + num_channel_pools++; + + + + + + + + manager->channel_pools = uvm_kvmalloc_zero(sizeof(*manager->channel_pools) * num_channel_pools); + if (!manager->channel_pools) + return NV_ERR_NO_MEMORY; + + for_each_set_bit(ce, manager->ce_mask, UVM_COPY_ENGINE_COUNT_MAX) { + status = channel_pool_add(manager, UVM_CHANNEL_POOL_TYPE_CE, ce, &pool); + if (status != NV_OK) + return status; + } + + // Assign channel types to pools + for (type = 0; type < ARRAY_SIZE(preferred_ce); type++) { + unsigned ce = preferred_ce[type]; + + UVM_ASSERT(test_bit(ce, manager->ce_mask)); + + manager->pool_to_use.default_for_type[type] = channel_manager_ce_pool(manager, ce); + } + + // In SR-IOV heavy, add an additional, single-channel, pool that is + // dedicated to the MEMOPS type. + if (uvm_gpu_uses_proxy_channel_pool(manager->gpu)) { + uvm_channel_type_t channel_type = uvm_channel_proxy_channel_type(); + status = channel_pool_add(manager, UVM_CHANNEL_POOL_TYPE_CE_PROXY, preferred_ce[channel_type], &pool); + if (status != NV_OK) + return status; + + manager->pool_to_use.default_for_type[channel_type] = pool; + } + + + + + + + + + + + + + return NV_OK; +} + +NV_STATUS uvm_channel_manager_create(uvm_gpu_t *gpu, uvm_channel_manager_t **channel_manager_out) +{ + NV_STATUS status = NV_OK; + uvm_channel_manager_t *channel_manager; + + channel_manager = uvm_kvmalloc_zero(sizeof(*channel_manager)); + if (!channel_manager) + return NV_ERR_NO_MEMORY; + + channel_manager->gpu = gpu; + init_channel_manager_conf(channel_manager); + status = uvm_pushbuffer_create(channel_manager, &channel_manager->pushbuffer); + if (status != NV_OK) + goto error; + + status = manager_create_procfs_dirs(channel_manager); + if (status != NV_OK) + goto error; + + status = channel_manager_create_pools(channel_manager); + if (status != NV_OK) + goto error; + + status = manager_create_procfs(channel_manager); + if (status != NV_OK) + goto error; + + *channel_manager_out = channel_manager; + + return status; + +error: + uvm_channel_manager_destroy(channel_manager); + return status; +} + +static void channel_manager_destroy_pools(uvm_channel_manager_t *manager) +{ + while (manager->num_channel_pools > 0) + channel_pool_destroy(manager->channel_pools + manager->num_channel_pools - 1); + + uvm_kvfree(manager->channel_pools); +} + +void uvm_channel_manager_destroy(uvm_channel_manager_t *channel_manager) +{ + if (channel_manager == NULL) + return; + + uvm_procfs_destroy_entry(channel_manager->procfs.pending_pushes); + + channel_manager_destroy_pools(channel_manager); + + uvm_procfs_destroy_entry(channel_manager->procfs.channels_dir); + + uvm_pushbuffer_destroy(channel_manager->pushbuffer); + + uvm_kvfree(channel_manager); +} + +bool uvm_channel_is_privileged(uvm_channel_t *channel) +{ + if (uvm_gpu_is_virt_mode_sriov_heavy(uvm_channel_get_gpu(channel))) + return uvm_channel_is_proxy(channel); + + return true; +} + +// Return the first channel pool of the given type(s) starting at begin_pool +// (included). +// +// The pool type mask must be a non empty mask of uvm_channel_pool_type_t +// values. +static uvm_channel_pool_t *channel_pool_first_from(uvm_channel_manager_t *manager, + uvm_channel_pool_t *begin_pool, + NvU32 pool_type_mask) +{ + uvm_channel_pool_t *curr_pool, *end_pool; + + UVM_ASSERT(manager->channel_pools != NULL); + UVM_ASSERT(begin_pool != NULL); + UVM_ASSERT(begin_pool >= manager->channel_pools); + UVM_ASSERT(pool_type_mask > 0); + UVM_ASSERT(pool_type_mask <= UVM_CHANNEL_POOL_TYPE_MASK); + + end_pool = manager->channel_pools + manager->num_channel_pools; + UVM_ASSERT(begin_pool <= end_pool); + + for (curr_pool = begin_pool; curr_pool != end_pool; curr_pool++) { + if (curr_pool->pool_type & pool_type_mask) + return curr_pool; + } + + return NULL; +} + +uvm_channel_pool_t *uvm_channel_pool_first(uvm_channel_manager_t *manager, NvU32 pool_type_mask) +{ + return channel_pool_first_from(manager, manager->channel_pools, pool_type_mask); +} + +uvm_channel_pool_t *uvm_channel_pool_next(uvm_channel_manager_t *manager, + uvm_channel_pool_t *pool, + NvU32 pool_type_mask) +{ + return channel_pool_first_from(manager, pool + 1, pool_type_mask); +} + +uvm_channel_t *uvm_channel_any_of_type(uvm_channel_manager_t *manager, NvU32 pool_type_mask) +{ + uvm_channel_pool_t *pool = uvm_channel_pool_first(manager, pool_type_mask); + + if (pool == NULL) + return NULL; + + UVM_ASSERT(pool->channels); + + return pool->channels; +} + +const char *uvm_channel_type_to_string(uvm_channel_type_t channel_type) +{ + + + + BUILD_BUG_ON(UVM_CHANNEL_TYPE_COUNT != 5); + + + switch (channel_type) { + UVM_ENUM_STRING_CASE(UVM_CHANNEL_TYPE_CPU_TO_GPU); + UVM_ENUM_STRING_CASE(UVM_CHANNEL_TYPE_GPU_TO_CPU); + UVM_ENUM_STRING_CASE(UVM_CHANNEL_TYPE_GPU_INTERNAL); + UVM_ENUM_STRING_CASE(UVM_CHANNEL_TYPE_MEMOPS); + UVM_ENUM_STRING_CASE(UVM_CHANNEL_TYPE_GPU_TO_GPU); + + + + UVM_ENUM_STRING_DEFAULT(); + } +} + +const char *uvm_channel_pool_type_to_string(uvm_channel_pool_type_t channel_pool_type) +{ + + + + BUILD_BUG_ON(UVM_CHANNEL_POOL_TYPE_COUNT != 2); + + + switch (channel_pool_type) { + UVM_ENUM_STRING_CASE(UVM_CHANNEL_POOL_TYPE_CE); + UVM_ENUM_STRING_CASE(UVM_CHANNEL_POOL_TYPE_CE_PROXY); + + + + UVM_ENUM_STRING_DEFAULT(); + } +} + +static void uvm_channel_print_info(uvm_channel_t *channel, struct seq_file *s) +{ + uvm_channel_manager_t *manager = channel->pool->manager; + UVM_SEQ_OR_DBG_PRINT(s, "Channel %s\n", channel->name); + + uvm_spin_lock(&channel->pool->lock); + + UVM_SEQ_OR_DBG_PRINT(s, "completed %llu\n", uvm_channel_update_completed_value(channel)); + UVM_SEQ_OR_DBG_PRINT(s, "queued %llu\n", channel->tracking_sem.queued_value); + UVM_SEQ_OR_DBG_PRINT(s, "GPFIFO count %u\n", channel->num_gpfifo_entries); + UVM_SEQ_OR_DBG_PRINT(s, "GPFIFO location %s\n", buffer_location_to_string(manager->conf.gpfifo_loc)); + UVM_SEQ_OR_DBG_PRINT(s, "GPPUT location %s\n", buffer_location_to_string(manager->conf.gpput_loc)); + UVM_SEQ_OR_DBG_PRINT(s, "get %u\n", channel->gpu_get); + UVM_SEQ_OR_DBG_PRINT(s, "put %u\n", channel->cpu_put); + UVM_SEQ_OR_DBG_PRINT(s, "Semaphore GPU VA 0x%llx\n", uvm_channel_tracking_semaphore_get_gpu_va(channel)); + + uvm_spin_unlock(&channel->pool->lock); +} + +static void channel_print_push_acquires(uvm_push_acquire_info_t *push_acquire_info, struct seq_file *seq) +{ + NvU32 i; + NvU32 valid_entries; + + UVM_ASSERT(uvm_push_info_is_tracking_acquires()); + UVM_ASSERT(push_acquire_info); + + if (push_acquire_info->num_values == 0) + return; + + valid_entries = min(push_acquire_info->num_values, (NvU32)UVM_PUSH_ACQUIRE_INFO_MAX_ENTRIES); + + for (i = 0; i < valid_entries; ++i) { + bool is_proxy = push_acquire_info->values[i].is_proxy; + + UVM_SEQ_OR_DBG_PRINT(seq, + "%s (gpu %u, channel %d:%u, value %llu)", + i == 0? " acquiring values" : "", + uvm_id_value(push_acquire_info->values[i].gpu_id), + is_proxy? -1 : push_acquire_info->values[i].runlist_id, + is_proxy? push_acquire_info->values[i].proxy.pool_index : + push_acquire_info->values[i].channel_id, + push_acquire_info->values[i].value); + } + + if (push_acquire_info->num_values > valid_entries) + UVM_SEQ_OR_DBG_PRINT(seq, " (missing %u entries)", push_acquire_info->num_values - valid_entries); + + UVM_SEQ_OR_DBG_PRINT(seq, "\n"); +} + +// Print all pending pushes and up to finished_pushes_count completed if their +// GPFIFO entries haven't been reused yet. +static void channel_print_pushes(uvm_channel_t *channel, NvU32 finished_pushes_count, struct seq_file *seq) +{ + NvU32 gpu_get; + NvU32 cpu_put; + + NvU64 completed_value = uvm_channel_update_completed_value(channel); + + uvm_spin_lock(&channel->pool->lock); + + cpu_put = channel->cpu_put; + + for (gpu_get = channel->gpu_get; gpu_get != cpu_put; gpu_get = (gpu_get + 1) % channel->num_gpfifo_entries) { + uvm_gpfifo_entry_t *entry = &channel->gpfifo_entries[gpu_get]; + uvm_push_info_t *push_info = entry->push_info; + uvm_push_acquire_info_t *push_acquire_info = NULL; + + if (entry->tracking_semaphore_value + finished_pushes_count <= completed_value) + continue; + + // Obtain the value acquire tracking information from the push_info index + if (uvm_push_info_is_tracking_acquires()) { + NvU32 push_info_index = push_info - channel->push_infos; + UVM_ASSERT(push_info_index < channel->num_gpfifo_entries); + + push_acquire_info = &channel->push_acquire_infos[push_info_index]; + } + + UVM_SEQ_OR_DBG_PRINT(seq, + " %s push '%s' started at %s:%d in %s() releasing value %llu%s", + entry->tracking_semaphore_value <= completed_value ? "finished" : "pending", + push_info->description, + push_info->filename, + push_info->line, + push_info->function, + entry->tracking_semaphore_value, + !push_acquire_info || push_acquire_info->num_values == 0? "\n" : ""); + + if (push_acquire_info) + channel_print_push_acquires(push_acquire_info, seq); + } + uvm_spin_unlock(&channel->pool->lock); +} + +void uvm_channel_print_pending_pushes(uvm_channel_t *channel) +{ + channel_print_pushes(channel, 0, NULL); +} + +static void channel_manager_print_pending_pushes(uvm_channel_manager_t *manager, struct seq_file *seq) +{ + uvm_channel_pool_t *pool; + + uvm_for_each_pool(pool, manager) { + uvm_channel_t *channel; + + uvm_for_each_channel_in_pool(channel, pool) { + UVM_SEQ_OR_DBG_PRINT(seq, "Channel %s, pending pushes:\n", channel->name); + + channel_print_pushes(channel, 0, seq); + } + } +} + +static NV_STATUS manager_create_procfs_dirs(uvm_channel_manager_t *manager) +{ + uvm_gpu_t *gpu = manager->gpu; + + // The channel manager procfs files are debug only + if (!uvm_procfs_is_debug_enabled()) + return NV_OK; + + manager->procfs.channels_dir = NV_CREATE_PROC_DIR("channels", gpu->procfs.dir); + if (manager->procfs.channels_dir == NULL) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +static int nv_procfs_read_manager_pending_pushes(struct seq_file *s, void *v) +{ + uvm_channel_manager_t *manager = (uvm_channel_manager_t *)s->private; + + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + channel_manager_print_pending_pushes(manager, s); + + uvm_up_read(&g_uvm_global.pm.lock); + + return 0; +} + +static int nv_procfs_read_manager_pending_pushes_entry(struct seq_file *s, void *v) +{ + UVM_ENTRY_RET(nv_procfs_read_manager_pending_pushes(s, v)); +} + +UVM_DEFINE_SINGLE_PROCFS_FILE(manager_pending_pushes_entry); + +static NV_STATUS manager_create_procfs(uvm_channel_manager_t *manager) +{ + uvm_gpu_t *gpu = manager->gpu; + + // The channel manager procfs files are debug only + if (!uvm_procfs_is_debug_enabled()) + return NV_OK; + + manager->procfs.pending_pushes = NV_CREATE_PROC_FILE("pending_pushes", + gpu->procfs.dir, + manager_pending_pushes_entry, + manager); + if (manager->procfs.pending_pushes == NULL) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +static int nv_procfs_read_channel_info(struct seq_file *s, void *v) +{ + uvm_channel_t *channel = (uvm_channel_t *)s->private; + + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + uvm_channel_print_info(channel, s); + + uvm_up_read(&g_uvm_global.pm.lock); + + return 0; +} + +static int nv_procfs_read_channel_info_entry(struct seq_file *s, void *v) +{ + UVM_ENTRY_RET(nv_procfs_read_channel_info(s, v)); +} + +UVM_DEFINE_SINGLE_PROCFS_FILE(channel_info_entry); + +static int nv_procfs_read_channel_pushes(struct seq_file *s, void *v) +{ + uvm_channel_t *channel = (uvm_channel_t *)s->private; + + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + // Include up to 5 finished pushes for some context + channel_print_pushes(channel, 5, s); + + uvm_up_read(&g_uvm_global.pm.lock); + + return 0; +} + +static int nv_procfs_read_channel_pushes_entry(struct seq_file *s, void *v) +{ + UVM_ENTRY_RET(nv_procfs_read_channel_pushes(s, v)); +} + +UVM_DEFINE_SINGLE_PROCFS_FILE(channel_pushes_entry); + +static NV_STATUS channel_create_procfs(uvm_channel_t *channel) +{ + char dirname[16]; + uvm_channel_manager_t *manager = channel->pool->manager; + + // The channel procfs files are debug only + if (!uvm_procfs_is_debug_enabled()) + return NV_OK; + + // For internal channels, the directory name contains the HW IDs. Those are + // not available for proxy channels, so use -1: instead. + if (uvm_channel_is_proxy(channel)) + snprintf(dirname, sizeof(dirname), "-1:%u", uvm_channel_index_in_pool(channel)); + else + snprintf(dirname, sizeof(dirname), "%u:%u", channel->channel_info.hwRunlistId, channel->channel_info.hwChannelId); + + channel->procfs.dir = NV_CREATE_PROC_DIR(dirname, manager->procfs.channels_dir); + if (channel->procfs.dir == NULL) + return NV_ERR_OPERATING_SYSTEM; + + channel->procfs.info = NV_CREATE_PROC_FILE("info", channel->procfs.dir, channel_info_entry, channel); + if (channel->procfs.info == NULL) + return NV_ERR_OPERATING_SYSTEM; + + channel->procfs.pushes = NV_CREATE_PROC_FILE("pushes", channel->procfs.dir, channel_pushes_entry, channel); + if (channel->procfs.pushes == NULL) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} diff --git a/kernel-open/nvidia-uvm/uvm_channel.h b/kernel-open/nvidia-uvm/uvm_channel.h new file mode 100644 index 000000000..7e5add260 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_channel.h @@ -0,0 +1,487 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_CHANNEL_H__ +#define __UVM_CHANNEL_H__ + +#include "nv_uvm_types.h" +#include "uvm_forward_decl.h" +#include "uvm_gpu_semaphore.h" +#include "uvm_pushbuffer.h" +#include "uvm_tracker.h" + +// +// UVM channels +// +// A channel manager is created as part of the GPU addition. This involves +// creating channels for each of the supported types (uvm_channel_type_t) in +// separate channel pools possibly using different CE instances in the HW. Each +// channel has a uvm_gpu_tracking_semaphore_t and a set of uvm_gpfifo_entry_t +// (one per each HW GPFIFO entry) allowing to track completion of pushes on the +// channel. +// +// Beginning a push on a channel implies reserving a GPFIFO entry in that +// channel and hence there can only be as many on-going pushes per channel as +// there are free GPFIFO entries. This ensures that ending a push won't have to +// wait for a GPFIFO entry to free up. +// + +// Channel types +typedef enum +{ + // CPU to GPU copies + UVM_CHANNEL_TYPE_CPU_TO_GPU, + + // GPU to CPU copies + UVM_CHANNEL_TYPE_GPU_TO_CPU, + + // Memsets and copies within the GPU + UVM_CHANNEL_TYPE_GPU_INTERNAL, + + // Memops and small memsets/copies for writing PTEs + UVM_CHANNEL_TYPE_MEMOPS, + + // GPU to GPU peer copies + UVM_CHANNEL_TYPE_GPU_TO_GPU, + + UVM_CHANNEL_TYPE_CE_COUNT, + + // ^^^^^^ + // Channel types backed by a CE. + + + + + + + + + + + UVM_CHANNEL_TYPE_COUNT = UVM_CHANNEL_TYPE_CE_COUNT, + +} uvm_channel_type_t; + +typedef enum +{ + // A pool that contains CE channels owned by UVM. + UVM_CHANNEL_POOL_TYPE_CE = (1 << 0), + + // A proxy pool contains only proxy channels, so it only exists in SR-IOV + // heavy. The pool is only used for UVM_CHANNEL_TYPE_MEMOPS pushes. + // + // A proxy channel is a privileged CE channel owned by the vGPU plugin. A + // proxy channel cannot be manipulated directly by the UVM driver, who + // instead can only submit work to it by invoking an RM API. + // + // There is a single proxy pool and channel per GPU. + UVM_CHANNEL_POOL_TYPE_CE_PROXY = (1 << 1), + + + + + + + + + UVM_CHANNEL_POOL_TYPE_COUNT = 2, + + + // A mask used to select pools of any type. + UVM_CHANNEL_POOL_TYPE_MASK = ((1U << UVM_CHANNEL_POOL_TYPE_COUNT) - 1) +} uvm_channel_pool_type_t; + +struct uvm_gpfifo_entry_struct +{ + // Offset of the pushbuffer in the pushbuffer allocation used by this entry + NvU32 pushbuffer_offset; + + // Size of the pushbuffer used for this entry + NvU32 pushbuffer_size; + + // List node used by the pushbuffer tracking + struct list_head pending_list_node; + + // Channel tracking semaphore value that indicates completion of this entry + NvU64 tracking_semaphore_value; + + // Push info for the pending push that used this GPFIFO entry + uvm_push_info_t *push_info; +}; + +// A channel pool is a set of channels that use the same engine. For example, +// all channels in a CE pool share the same (logical) Copy Engine. +typedef struct +{ + // Owning channel manager + uvm_channel_manager_t *manager; + + // Channels in this pool + uvm_channel_t *channels; + + // Number of elements in the channel array + NvU32 num_channels; + + // Index of the engine associated with the pool (index is an offset from the + // first engine of the same engine type.) + unsigned engine_index; + + // Pool type: Refer to the uvm_channel_pool_type_t enum. + uvm_channel_pool_type_t pool_type; + + // Lock protecting the state of channels in the pool + uvm_spinlock_t lock; +} uvm_channel_pool_t; + +struct uvm_channel_struct +{ + // Owning pool + uvm_channel_pool_t *pool; + + // The channel name contains the CE index, and (for UVM internal channels) + // the HW runlist and channel IDs. + char name[64]; + + // Array of gpfifo entries, one per each HW GPFIFO + uvm_gpfifo_entry_t *gpfifo_entries; + + // Number of GPFIFO entries in gpfifo_entries + NvU32 num_gpfifo_entries; + + // Latest GPFIFO entry submitted to the GPU + // Updated when new pushes are submitted to the GPU in + // uvm_channel_end_push(). + NvU32 cpu_put; + + // Latest GPFIFO entry completed by the GPU + // Updated by uvm_channel_update_progress() after checking pending GPFIFOs + // for completion. + NvU32 gpu_get; + + // Number of currently on-going pushes on this channel + // A new push is only allowed to begin on the channel if there is a free + // GPFIFO entry for it. + NvU32 current_pushes_count; + + // Array of uvm_push_info_t for all pending pushes on the channel + uvm_push_info_t *push_infos; + + // Array of uvm_push_acquire_info_t for all pending pushes on the channel. + // Each entry corresponds to the push_infos entry with the same index. + uvm_push_acquire_info_t *push_acquire_infos; + + // List of uvm_push_info_entry_t that are currently available. A push info + // entry is not available if it has been assigned to a push + // (uvm_push_begin), and the GPFIFO entry associated with the push has not + // been marked as completed. + struct list_head available_push_infos; + + // GPU tracking semaphore tracking the work in the channel + // Each push on the channel increments the semaphore, see + // uvm_channel_end_push(). + uvm_gpu_tracking_semaphore_t tracking_sem; + + + + + + + + + + + + + + + + + + + + + + // RM channel information + union + { + // UVM internal channels + struct + { + // UVM-RM interface handle + uvmGpuChannelHandle handle; + + // Channel state populated by RM. Includes the GPFIFO, error + // notifier, work submission information etc. + UvmGpuChannelInfo channel_info; + }; + + // Proxy channels (SR-IOV heavy only) + struct + { + // UVM-RM interface handle + UvmGpuPagingChannelHandle handle; + + // Channel state populated by RM. Includes the error notifier. + UvmGpuPagingChannelInfo channel_info; + } proxy; + }; + + struct + { + struct proc_dir_entry *dir; + struct proc_dir_entry *info; + struct proc_dir_entry *pushes; + } procfs; + + // Information managed by the tools event notification mechanism. Mainly + // used to keep a list of channels with pending events, which is needed + // to collect the timestamps of asynchronous operations. + struct + { + struct list_head channel_list_node; + NvU32 pending_event_count; + } tools; +}; + +struct uvm_channel_manager_struct +{ + // The owning GPU + uvm_gpu_t *gpu; + + // The pushbuffer used for all pushes done with this channel manager + uvm_pushbuffer_t *pushbuffer; + + // Array of channel pools. + uvm_channel_pool_t *channel_pools; + + // Number of elements in the pool array + unsigned num_channel_pools; + + // Mask containing the indexes of the usable Copy Engines. Each usable CE + // has a pool associated with it, see channel_manager_ce_pool + DECLARE_BITMAP(ce_mask, UVM_COPY_ENGINE_COUNT_MAX); + + struct + { + // Pools to be used by each channel type by default. + // + // Transfers of a given type may use a pool different from that in + // default_for_type[type]. For example, transfers to NvLink GPU + // peers may instead use the more optimal pool stored in the gpu_to_gpu + // array + uvm_channel_pool_t *default_for_type[UVM_CHANNEL_TYPE_COUNT]; + + // Optimal pools to use when writing from the owning GPU to its NvLink + // peers. + // If there is no optimal pool (the entry is NULL), use default pool + // default_for_type[UVM_CHANNEL_GPU_TO_GPU] instead. + uvm_channel_pool_t *gpu_to_gpu[UVM_ID_MAX_GPUS]; + } pool_to_use; + + struct + { + struct proc_dir_entry *channels_dir; + struct proc_dir_entry *pending_pushes; + } procfs; + + struct + { + NvU32 num_gpfifo_entries; + UVM_BUFFER_LOCATION gpfifo_loc; + UVM_BUFFER_LOCATION gpput_loc; + UVM_BUFFER_LOCATION pushbuffer_loc; + } conf; +}; + +// Create a channel manager for the GPU +NV_STATUS uvm_channel_manager_create(uvm_gpu_t *gpu, uvm_channel_manager_t **manager_out); + +static bool uvm_channel_is_proxy(uvm_channel_t *channel) +{ + UVM_ASSERT(channel->pool->pool_type < UVM_CHANNEL_POOL_TYPE_MASK); + return channel->pool->pool_type == UVM_CHANNEL_POOL_TYPE_CE_PROXY; +} + +static bool uvm_channel_is_ce(uvm_channel_t *channel) +{ + UVM_ASSERT(channel->pool->pool_type < UVM_CHANNEL_POOL_TYPE_MASK); + return (channel->pool->pool_type == UVM_CHANNEL_POOL_TYPE_CE) || uvm_channel_is_proxy(channel); +} + + + + + + + + + +// Proxy channels are used to push page tree related methods, so their channel +// type is UVM_CHANNEL_TYPE_MEMOPS. +static uvm_channel_type_t uvm_channel_proxy_channel_type(void) +{ + return UVM_CHANNEL_TYPE_MEMOPS; +} + +// Privileged channels support all the Host and engine methods, while +// non-privileged channels don't support privileged methods. +// +// A major limitation of non-privileged CE channels is lack of physical +// addressing support. +bool uvm_channel_is_privileged(uvm_channel_t *channel); + +// Destroy the channel manager +void uvm_channel_manager_destroy(uvm_channel_manager_t *channel_manager); + +// Get the current status of the channel +// Returns NV_OK if the channel is in a good state and NV_ERR_RC_ERROR +// otherwise. Notably this never sets the global fatal error. +NV_STATUS uvm_channel_get_status(uvm_channel_t *channel); + +// Check for channel errors +// Checks for channel errors by calling uvm_channel_get_status(). If an error +// occurred, sets the global fatal error and prints errors. +NV_STATUS uvm_channel_check_errors(uvm_channel_t *channel); + +// Check errors on all channels in the channel manager +// Also includes uvm_global_get_status +NV_STATUS uvm_channel_manager_check_errors(uvm_channel_manager_t *channel_manager); + +// Retrieve the GPFIFO entry that caused a channel error +// The channel has to be in error state prior to calling this function. +uvm_gpfifo_entry_t *uvm_channel_get_fatal_entry(uvm_channel_t *channel); + +// Update progress of a specific channel +// Returns the number of still pending GPFIFO entries for that channel. +// Notably some of the pending GPFIFO entries might be already completed, but +// the update early-outs after completing a fixed number of them to spread the +// cost of the updates across calls. +NvU32 uvm_channel_update_progress(uvm_channel_t *channel); + +// Update progress of all channels +// Returns the number of still pending GPFIFO entries for all channels. +// Notably some of the pending GPFIFO entries might be already completed, but +// the update early-outs after completing a fixed number of them to spread the +// cost of the updates across calls. +NvU32 uvm_channel_manager_update_progress(uvm_channel_manager_t *channel_manager); + +// Wait for all channels to idle +// It waits for anything that is running, but doesn't prevent new work from +// beginning. +NV_STATUS uvm_channel_manager_wait(uvm_channel_manager_t *manager); + +// Get the GPU VA of semaphore_channel's tracking semaphore within the VA space +// associated with access_channel. +// +// The channels can belong to different GPUs, the same GPU, or even be +// identical, in which case uvm_channel_tracking_semaphore_get_gpu_va can be +// used instead. +NvU64 uvm_channel_tracking_semaphore_get_gpu_va_in_channel(uvm_channel_t *semaphore_channel, + uvm_channel_t *access_channel); + +// See above. +static NvU64 uvm_channel_tracking_semaphore_get_gpu_va(uvm_channel_t *channel) +{ + return uvm_channel_tracking_semaphore_get_gpu_va_in_channel(channel, channel); +} + +// Check whether the channel completed a value +bool uvm_channel_is_value_completed(uvm_channel_t *channel, NvU64 value); + +// Update and get the latest completed value by the channel +NvU64 uvm_channel_update_completed_value(uvm_channel_t *channel); + +// Select and reserve a channel with the specified type for a push +NV_STATUS uvm_channel_reserve_type(uvm_channel_manager_t *manager, + uvm_channel_type_t type, + uvm_channel_t **channel_out); + +// Select and reserve a channel for a transfer from channel_manager->gpu to +// dst_gpu. +NV_STATUS uvm_channel_reserve_gpu_to_gpu(uvm_channel_manager_t *channel_manager, + uvm_gpu_t *dst_gpu, + uvm_channel_t **channel_out); + +// Reserve a specific channel for a push +NV_STATUS uvm_channel_reserve(uvm_channel_t *channel); + +// Set optimal CE for P2P transfers between manager->gpu and peer +void uvm_channel_manager_set_p2p_ce(uvm_channel_manager_t *manager, uvm_gpu_t *peer, NvU32 optimal_ce); + +// Begin a push on a previously reserved channel +// Should be used by uvm_push_*() only. +NV_STATUS uvm_channel_begin_push(uvm_channel_t *channel, uvm_push_t *push); + +// End a push +// Should be used by uvm_push_end() only. +void uvm_channel_end_push(uvm_push_t *push); + +const char *uvm_channel_type_to_string(uvm_channel_type_t channel_type); +const char *uvm_channel_pool_type_to_string(uvm_channel_pool_type_t channel_pool_type); + +void uvm_channel_print_pending_pushes(uvm_channel_t *channel); + +static uvm_gpu_t *uvm_channel_get_gpu(uvm_channel_t *channel) +{ + return channel->pool->manager->gpu; +} + +// Index of a channel within the owning pool +static unsigned uvm_channel_index_in_pool(const uvm_channel_t *channel) +{ + return channel - channel->pool->channels; +} + +NvU32 uvm_channel_update_progress_all(uvm_channel_t *channel); + +// Return an arbitrary channel of the given type(s) +uvm_channel_t *uvm_channel_any_of_type(uvm_channel_manager_t *manager, NvU32 pool_type_mask); + +// Return an arbitrary channel of any type +static uvm_channel_t *uvm_channel_any(uvm_channel_manager_t *manager) +{ + return uvm_channel_any_of_type(manager, UVM_CHANNEL_POOL_TYPE_MASK); +} + +// Helper to iterate over all the channels in a pool. +#define uvm_for_each_channel_in_pool(channel, pool) \ + for (({UVM_ASSERT(pool->channels); \ + channel = pool->channels;}); \ + channel != pool->channels + pool->num_channels; \ + channel++) + +uvm_channel_pool_t *uvm_channel_pool_first(uvm_channel_manager_t *manager, NvU32 pool_type_mask); +uvm_channel_pool_t *uvm_channel_pool_next(uvm_channel_manager_t *manager, + uvm_channel_pool_t *curr_pool, + NvU32 pool_type_mask); + +// Helper to iterate over all the channel pools of the given type(s) in a GPU. +// The pool mask must not be zero. +#define uvm_for_each_pool_of_type(pool, manager, pool_type_mask) \ + for (pool = uvm_channel_pool_first(manager, pool_type_mask); \ + pool != NULL; \ + pool = uvm_channel_pool_next(manager, pool, pool_type_mask)) + +#define uvm_for_each_pool(pool, manager) uvm_for_each_pool_of_type(pool, manager, UVM_CHANNEL_POOL_TYPE_MASK) + +#endif // __UVM_CHANNEL_H__ diff --git a/kernel-open/nvidia-uvm/uvm_channel_test.c b/kernel-open/nvidia-uvm/uvm_channel_test.c new file mode 100644 index 000000000..c7f31d059 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_channel_test.c @@ -0,0 +1,844 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_global.h" +#include "uvm_channel.h" +#include "uvm_hal.h" +#include "uvm_push.h" +#include "uvm_test.h" +#include "uvm_test_rng.h" +#include "uvm_va_space.h" +#include "uvm_tracker.h" +#include "uvm_thread_context.h" +#include "uvm_gpu_semaphore.h" +#include "uvm_kvmalloc.h" + +#define TEST_ORDERING_ITERS_PER_CHANNEL_TYPE_PER_GPU 1024 +#define TEST_ORDERING_ITERS_PER_CHANNEL_TYPE_PER_GPU_EMU 64 + +// Schedule pushes one after another on all GPUs and channel types that copy and +// increment a counter into an adjacent memory location in a buffer. And then +// verify that all the values are correct on the CPU. +static NV_STATUS test_ordering(uvm_va_space_t *va_space) +{ + NV_STATUS status; + uvm_gpu_t *gpu; + bool exclude_proxy_channel_type; + NvU32 i, j; + uvm_rm_mem_t *mem = NULL; + NvU32 *host_mem; + uvm_push_t push; + NvU64 gpu_va; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + NvU32 value = 0; + const NvU32 iters_per_channel_type_per_gpu = g_uvm_global.num_simulated_devices > 0 ? + TEST_ORDERING_ITERS_PER_CHANNEL_TYPE_PER_GPU_EMU : + TEST_ORDERING_ITERS_PER_CHANNEL_TYPE_PER_GPU; + const NvU32 values_count = iters_per_channel_type_per_gpu; + const size_t buffer_size = sizeof(NvU32) * values_count; + + gpu = uvm_va_space_find_first_gpu(va_space); + TEST_CHECK_RET(gpu != NULL); + + status = uvm_rm_mem_alloc_and_map_all(gpu, UVM_RM_MEM_TYPE_SYS, buffer_size, &mem); + TEST_CHECK_GOTO(status == NV_OK, done); + + host_mem = (NvU32*)uvm_rm_mem_get_cpu_va(mem); + memset(host_mem, 0, buffer_size); + + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_TO_CPU, &push, "Initial memset"); + TEST_CHECK_GOTO(status == NV_OK, done); + + gpu_va = uvm_rm_mem_get_gpu_va(mem, gpu, uvm_channel_is_proxy(push.channel)); + + // Semaphore release as part of uvm_push_end() will do the membar + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + gpu->parent->ce_hal->memset_v_4(&push, gpu_va, 0, buffer_size); + + uvm_push_end(&push); + + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, &push), done); + + exclude_proxy_channel_type = uvm_gpu_uses_proxy_channel_pool(gpu); + + for (i = 0; i < iters_per_channel_type_per_gpu; ++i) { + for (j = 0; j < UVM_CHANNEL_TYPE_CE_COUNT; ++j) { + uvm_channel_type_t channel_type = j; + + // Proxy channels don't support the virtual memcopies that are about + // to be pushed, so don't test the proxy channel type in any of the + // GPUs. + if (exclude_proxy_channel_type && (channel_type == uvm_channel_proxy_channel_type())) + continue; + + for_each_va_space_gpu(gpu, va_space) { + NvU64 gpu_va_base; + NvU64 gpu_va_src; + NvU64 gpu_va_dst; + + status = uvm_push_begin_acquire(gpu->channel_manager, + channel_type, + &tracker, + &push, + "memcpy and inc to %u", + value + 1); + TEST_CHECK_GOTO(status == NV_OK, done); + + gpu_va_base = uvm_rm_mem_get_gpu_va(mem, gpu, uvm_channel_is_proxy(push.channel)); + gpu_va_src = gpu_va_base + (value % values_count) * sizeof(NvU32); + gpu_va_dst = gpu_va_base + ((value + 1) % values_count) * sizeof(NvU32); + + // The semaphore reduction will do a membar before the reduction + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + gpu->parent->ce_hal->memcopy_v_to_v(&push, gpu_va_dst, gpu_va_src, sizeof(NvU32)); + + // The following reduction is done from the same GPU, but the + // previous memcpy is to uncached sysmem and that bypasses L2 + // and hence requires a SYSMEMBAR to be ordered. + gpu->parent->ce_hal->semaphore_reduction_inc(&push, gpu_va_dst, ++value); + + uvm_push_end(&push); + + uvm_tracker_clear(&tracker); + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, &push), done); + } + } + } + status = uvm_tracker_wait(&tracker); + TEST_CHECK_GOTO(status == NV_OK, done); + + // At this moment, this should hold: + // mem[value % values_count] == value + // mem[(value + 1) % values_count] == value + 1 - values_count + // And in general, for i=[0, values_count): + // mem[(value + 1 + i) % values_count] == value + 1 - values_count + i + // Verify that + + for (i = 0; i < values_count; ++i) { + NvU32 index = (value + 1 + i) % values_count; + NvU32 expected = (value + 1 + i) - values_count; + if (host_mem[index] != expected) { + UVM_TEST_PRINT("Bad value at host_mem[%u] = %u instead of %u\n", index, host_mem[index], expected); + status = NV_ERR_INVALID_STATE; + goto done; + } + } + +done: + uvm_tracker_wait(&tracker); + uvm_rm_mem_free(mem); + + return status; +} + +static NV_STATUS uvm_test_rc_for_gpu(uvm_gpu_t *gpu) +{ + uvm_push_t push; + uvm_channel_pool_t *pool; + uvm_gpfifo_entry_t *fatal_entry; + uvm_push_info_t *push_info; + int fatal_line; + uvm_tracker_entry_t tracker_entry; + NV_STATUS status; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + uvm_channel_manager_t *manager = gpu->channel_manager; + + // Submit a bunch of successful pushes on each channel first so that the + // fatal one is behind a bunch of work (notably more than + // uvm_channel_update_progress() completes by default). + uvm_for_each_pool(pool, manager) { + uvm_channel_t *channel; + + uvm_for_each_channel_in_pool(channel, pool) { + NvU32 i; + for (i = 0; i < 512; ++i) { + status = uvm_push_begin_on_channel(channel, &push, "Non-faulting push"); + TEST_CHECK_RET(status == NV_OK); + + uvm_push_end(&push); + } + } + } + + // Check RC on a proxy channel (SR-IOV heavy) or internal channel (any other + // mode). It is not allowed to use a virtual address in a memset pushed to + // a proxy channel, so we use a physical address instead. + if (uvm_gpu_uses_proxy_channel_pool(gpu)) { + uvm_gpu_address_t dst_address; + + // Save the line number the push that's supposed to fail was started on + fatal_line = __LINE__ + 1; + TEST_NV_CHECK_RET(uvm_push_begin(manager, uvm_channel_proxy_channel_type(), &push, "Fatal push 0x%X", 0xBAD)); + + // Memset targeting a physical address beyond the vidmem size. The + // passed physical address is not the vidmem size reported by RM + // because the reported size can be smaller than the actual physical + // size, such that accessing a GPA at the reported size may be allowed + // by VMMU. + // + // GA100 GPUs have way less than UVM_GPU_MAX_PHYS_MEM vidmem, so using + // that value as physical address should result on an error + dst_address = uvm_gpu_address_physical(UVM_APERTURE_VID, UVM_GPU_MAX_PHYS_MEM - 8); + gpu->parent->ce_hal->memset_8(&push, dst_address, 0, 8); + } + else { + fatal_line = __LINE__ + 1; + TEST_NV_CHECK_RET(uvm_push_begin(manager, UVM_CHANNEL_TYPE_GPU_TO_CPU, &push, "Fatal push 0x%X", 0xBAD)); + + // Memset that should fault on 0xFFFFFFFF + gpu->parent->ce_hal->memset_v_4(&push, 0xFFFFFFFF, 0, 4); + } + + uvm_push_end(&push); + + uvm_push_get_tracker_entry(&push, &tracker_entry); + uvm_tracker_overwrite_with_push(&tracker, &push); + + status = uvm_channel_manager_wait(manager); + TEST_CHECK_RET(status == NV_ERR_RC_ERROR); + + TEST_CHECK_RET(uvm_channel_get_status(push.channel) == NV_ERR_RC_ERROR); + fatal_entry = uvm_channel_get_fatal_entry(push.channel); + TEST_CHECK_RET(fatal_entry != NULL); + + push_info = fatal_entry->push_info; + TEST_CHECK_RET(push_info != NULL); + TEST_CHECK_RET(push_info->line == fatal_line); + TEST_CHECK_RET(strcmp(push_info->function, __FUNCTION__) == 0); + TEST_CHECK_RET(strcmp(push_info->filename, kbasename(__FILE__)) == 0); + if (uvm_push_info_is_tracking_descriptions()) + TEST_CHECK_RET(strcmp(push_info->description, "Fatal push 0xBAD") == 0); + + TEST_CHECK_RET(uvm_global_get_status() == NV_ERR_RC_ERROR); + + // Check that waiting for an entry after a global fatal error makes the + // entry completed. + TEST_CHECK_RET(!uvm_tracker_is_entry_completed(&tracker_entry)); + TEST_CHECK_RET(uvm_tracker_wait_for_entry(&tracker_entry) == NV_ERR_RC_ERROR); + TEST_CHECK_RET(uvm_tracker_is_entry_completed(&tracker_entry)); + + // Check that waiting for a tracker after a global fatal error, clears all + // the entries from the tracker. + TEST_CHECK_RET(!uvm_tracker_is_empty(&tracker)); + TEST_CHECK_RET(uvm_tracker_wait(&tracker) == NV_ERR_RC_ERROR); + TEST_CHECK_RET(uvm_tracker_is_empty(&tracker)); + + TEST_CHECK_RET(uvm_global_reset_fatal_error() == NV_ERR_RC_ERROR); + + return NV_OK; +} + +static NV_STATUS test_rc(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + for_each_va_space_gpu(gpu, va_space) { + NV_STATUS test_status, create_status; + + // The GPU channel manager is destroyed and then re-created after + // testing RC, so this test requires exclusive access to the GPU. + TEST_CHECK_RET(uvm_gpu_retained_count(gpu) == 1); + + g_uvm_global.disable_fatal_error_assert = true; + test_status = uvm_test_rc_for_gpu(gpu); + g_uvm_global.disable_fatal_error_assert = false; + + uvm_channel_manager_destroy(gpu->channel_manager); + create_status = uvm_channel_manager_create(gpu, &gpu->channel_manager); + + TEST_NV_CHECK_RET(test_status); + TEST_NV_CHECK_RET(create_status); + } + + return NV_OK; +} + + +typedef struct +{ + uvm_push_t push; + uvm_tracker_t tracker; + uvm_gpu_semaphore_t semaphore; + NvU32 queued_counter_value; + NvU32 queued_counter_repeat; + uvm_rm_mem_t *counter_mem; + uvm_rm_mem_t *counter_snapshots_mem; + uvm_rm_mem_t *other_stream_counter_snapshots_mem; + NvU32 *counter_snapshots; + NvU32 *other_stream_counter_snapshots; + NvU32 *other_stream_counter_expected; +} uvm_test_stream_t; + +#define MAX_COUNTER_REPEAT_COUNT 10 * 1024 +// For each iter, snapshot the first and last counter value +#define TEST_SNAPSHOT_SIZE(it) (2 * it * sizeof(NvU32)) + +static void snapshot_counter(uvm_push_t *push, + uvm_rm_mem_t *counter_mem, + uvm_rm_mem_t *snapshot_mem, + NvU32 index, + NvU32 counters_count) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + NvU64 counter_gpu_va; + NvU64 snapshot_gpu_va; + bool is_proxy_channel; + NvU32 last_counter_offset = (counters_count - 1) * sizeof(NvU32); + + if (counters_count == 0) + return; + + is_proxy_channel = uvm_channel_is_proxy(push->channel); + counter_gpu_va = uvm_rm_mem_get_gpu_va(counter_mem, gpu, is_proxy_channel); + snapshot_gpu_va = uvm_rm_mem_get_gpu_va(snapshot_mem, gpu, is_proxy_channel) + index * 2 * sizeof(NvU32); + + // Copy the last and first counter to a snapshot for later verification. + + // Membar will be done by uvm_push_end() + uvm_push_set_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + uvm_push_set_flag(push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + gpu->parent->ce_hal->memcopy_v_to_v(push, + snapshot_gpu_va + sizeof(NvU32), + counter_gpu_va + last_counter_offset, + sizeof(NvU32)); + + // Membar will be done by uvm_push_end() + uvm_push_set_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + uvm_push_set_flag(push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + gpu->parent->ce_hal->memcopy_v_to_v(push, snapshot_gpu_va, counter_gpu_va, sizeof(NvU32)); +} + +static void set_counter(uvm_push_t *push, uvm_rm_mem_t *counter_mem, NvU32 value, NvU32 count) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + NvU64 counter_gpu_va; + bool is_proxy_channel; + + is_proxy_channel = uvm_channel_is_proxy(push->channel); + counter_gpu_va = uvm_rm_mem_get_gpu_va(counter_mem, gpu, is_proxy_channel); + + gpu->parent->ce_hal->memset_v_4(push, counter_gpu_va, value, count * sizeof(NvU32)); +} + +static uvm_channel_type_t random_ce_channel_type(uvm_test_rng_t *rng) +{ + return (uvm_channel_type_t)uvm_test_rng_range_32(rng, 0, UVM_CHANNEL_TYPE_CE_COUNT - 1); +} + +static uvm_channel_type_t random_ce_channel_type_except(uvm_test_rng_t *rng, uvm_channel_type_t exception) +{ + uvm_channel_type_t channel_type; + + UVM_ASSERT(exception < UVM_CHANNEL_TYPE_CE_COUNT); + + channel_type = (uvm_channel_type_t)uvm_test_rng_range_32(rng, 0, UVM_CHANNEL_TYPE_CE_COUNT - 2); + + if (channel_type >= exception) + channel_type++; + + UVM_ASSERT(channel_type < UVM_CHANNEL_TYPE_CE_COUNT); + + return channel_type; +} + +static uvm_channel_type_t gpu_random_internal_ce_channel_type(uvm_gpu_t *gpu, uvm_test_rng_t *rng) +{ + if (uvm_gpu_uses_proxy_channel_pool(gpu)) + return random_ce_channel_type_except(rng, uvm_channel_proxy_channel_type()); + + return random_ce_channel_type(rng); +} + +static uvm_gpu_t *random_va_space_gpu(uvm_test_rng_t *rng, uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + NvU32 gpu_count = uvm_processor_mask_get_gpu_count(&va_space->registered_gpus); + NvU32 gpu_index = uvm_test_rng_range_32(rng, 0, gpu_count - 1); + + UVM_ASSERT(gpu_count > 0); + + for_each_va_space_gpu(gpu, va_space) { + if (gpu_index-- == 0) + return gpu; + } + + UVM_ASSERT(0); + return NULL; +} + + +static void test_memset_rm_mem(uvm_push_t *push, uvm_rm_mem_t *rm_mem, NvU32 value) +{ + uvm_gpu_t *gpu; + NvU64 gpu_va; + + UVM_ASSERT(rm_mem->size % 4 == 0); + + gpu = uvm_push_get_gpu(push); + gpu_va = uvm_rm_mem_get_gpu_va(rm_mem, gpu, uvm_channel_is_proxy(push->channel)); + + gpu->parent->ce_hal->memset_v_4(push, gpu_va, value, rm_mem->size); +} + +// This test schedules a randomly sized memset on a random channel and GPU in a +// "stream" that has operations ordered by acquiring the tracker of the previous +// operation. It also snapshots the memset done by the previous operation in the +// stream to verify it later on the CPU. Each iteration also optionally acquires +// a different stream and snapshots its memset. +// The test ioctl is expected to be called at the same time from multiple +// threads and contains some schedule() calls to help get as many threads +// through the init phase before other threads continue. It also has a random +// schedule() call in the main loop scheduling GPU work. +static NV_STATUS stress_test_all_gpus_in_va(uvm_va_space_t *va_space, + NvU32 num_streams, + NvU32 iterations_per_stream, + NvU32 seed, + NvU32 verbose) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + NvU32 i, j; + uvm_test_stream_t *streams; + uvm_test_rng_t rng; + + uvm_test_rng_init(&rng, seed); + + gpu = uvm_va_space_find_first_gpu(va_space); + TEST_CHECK_RET(gpu != NULL); + + streams = uvm_kvmalloc_zero(sizeof(*streams) * num_streams); + TEST_CHECK_RET(streams != NULL); + + // Initialize all the trackers first so that clean up on error can always + // wait for them. + for (i = 0; i < num_streams; ++i) { + uvm_test_stream_t *stream = &streams[i]; + uvm_tracker_init(&stream->tracker); + } + + for (i = 0; i < num_streams; ++i) { + uvm_test_stream_t *stream = &streams[i]; + + status = uvm_gpu_semaphore_alloc(gpu->semaphore_pool, &stream->semaphore); + if (status != NV_OK) + goto done; + + stream->queued_counter_value = 0; + + status = uvm_rm_mem_alloc_and_map_all(gpu, + UVM_RM_MEM_TYPE_SYS, + MAX_COUNTER_REPEAT_COUNT * sizeof(NvU32), + &stream->counter_mem); + TEST_CHECK_GOTO(status == NV_OK, done); + + status = uvm_rm_mem_alloc_and_map_all(gpu, + UVM_RM_MEM_TYPE_SYS, + TEST_SNAPSHOT_SIZE(iterations_per_stream), + &stream->counter_snapshots_mem); + TEST_CHECK_GOTO(status == NV_OK, done); + + stream->counter_snapshots = (NvU32*)uvm_rm_mem_get_cpu_va(stream->counter_snapshots_mem); + + status = uvm_rm_mem_alloc_and_map_all(gpu, + UVM_RM_MEM_TYPE_SYS, + TEST_SNAPSHOT_SIZE(iterations_per_stream), + &stream->other_stream_counter_snapshots_mem); + TEST_CHECK_GOTO(status == NV_OK, done); + + stream->other_stream_counter_snapshots = (NvU32*)uvm_rm_mem_get_cpu_va(stream->other_stream_counter_snapshots_mem); + + stream->other_stream_counter_expected = uvm_kvmalloc_zero(sizeof(NvU32) * iterations_per_stream); + if (stream->other_stream_counter_expected == NULL) { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_CPU_TO_GPU, &stream->push, "stream %u init", i); + TEST_CHECK_GOTO(status == NV_OK, done); + + test_memset_rm_mem(&stream->push, stream->counter_mem, 0); + test_memset_rm_mem(&stream->push, stream->counter_snapshots_mem, 0); + test_memset_rm_mem(&stream->push, stream->other_stream_counter_snapshots_mem, 0); + + status = uvm_push_end_and_wait(&stream->push); + TEST_CHECK_GOTO(status == NV_OK, done); + + if (fatal_signal_pending(current)) { + status = NV_ERR_SIGNAL_PENDING; + goto done; + } + + // Let other threads run + schedule(); + } + + if (verbose > 0) { + UVM_TEST_PRINT("Init done, seed %u, GPUs:\n", seed); + for_each_va_space_gpu(gpu, va_space) { + UVM_TEST_PRINT(" GPU %s\n", uvm_gpu_name(gpu)); + } + } + + for (i = 0; i < iterations_per_stream; ++i) { + for (j = 0; j < num_streams; ++j) { + uvm_test_stream_t *stream = &streams[j]; + uvm_channel_type_t channel_type; + gpu = random_va_space_gpu(&rng, va_space); + + if (fatal_signal_pending(current)) { + status = NV_ERR_SIGNAL_PENDING; + goto done; + } + + // Select a random channel type. In SR-IOV heavy the selection has + // to exclude the type associated with proxy channels, because they + // do not support the virtual memcopies/memsets pushed by + // snapshot_counter and set_counter + channel_type = gpu_random_internal_ce_channel_type(gpu, &rng); + + status = uvm_push_begin_acquire(gpu->channel_manager, + channel_type, + &stream->tracker, + &stream->push, + "stream %u payload %u gid %u channel_type %u", + j, + stream->queued_counter_value, + uvm_id_value(gpu->id), + channel_type); + TEST_CHECK_GOTO(status == NV_OK, done); + + snapshot_counter(&stream->push, + stream->counter_mem, + stream->counter_snapshots_mem, + i, + stream->queued_counter_repeat); + // Set a random number [2, MAX_COUNTER_REPEAT_COUNT] of counters + stream->queued_counter_repeat = uvm_test_rng_range_32(&rng, 2, MAX_COUNTER_REPEAT_COUNT); + set_counter(&stream->push, + stream->counter_mem, + ++stream->queued_counter_value, + stream->queued_counter_repeat); + + if (uvm_test_rng_range_32(&rng, 0, 1) == 0) { + NvU32 random_stream_index = uvm_test_rng_range_32(&rng, 0, num_streams - 1); + uvm_test_stream_t *random_stream = &streams[random_stream_index]; + uvm_push_acquire_tracker(&stream->push, &random_stream->tracker); + snapshot_counter(&stream->push, + random_stream->counter_mem, + stream->other_stream_counter_snapshots_mem, + i, + random_stream->queued_counter_repeat); + } + + uvm_push_end(&stream->push); + uvm_tracker_clear(&stream->tracker); + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&stream->tracker, &stream->push), done); + } + + // Randomly schedule other threads + if (uvm_test_rng_range_32(&rng, 0, 9) == 0) + schedule(); + } + + if (verbose > 0) + UVM_TEST_PRINT("All work scheduled\n"); + + // Let other threads run + schedule(); + + for (i = 0; i < num_streams; ++i) { + uvm_test_stream_t *stream = &streams[i]; + status = uvm_tracker_wait(&stream->tracker); + if (status != NV_OK) { + UVM_TEST_PRINT("Failed to wait for the tracker for stream %u: %s\n", i, nvstatusToString(status)); + goto done; + } + for (j = 0; j < iterations_per_stream; ++j) { + NvU32 snapshot_last = stream->counter_snapshots[j * 2]; + NvU32 snapshot_first = stream->counter_snapshots[j * 2 + 1]; + if (snapshot_last != j || snapshot_first != j) { + UVM_TEST_PRINT("Stream %u counter snapshot[%u] = %u,%u instead of %u,%u\n", + i, + j, + snapshot_last, + snapshot_first, + j, + j); + status = NV_ERR_INVALID_STATE; + goto done; + } + } + for (j = 0; j < iterations_per_stream; ++j) { + NvU32 snapshot_last = stream->other_stream_counter_snapshots[j * 2]; + NvU32 snapshot_first = stream->other_stream_counter_snapshots[j * 2 + 1]; + NvU32 expected = stream->other_stream_counter_expected[j]; + if (snapshot_last < expected || snapshot_first < expected) { + UVM_TEST_PRINT("Stream %u other_counter snapshot[%u] = %u,%u which is < of %u,%u\n", + i, + j, + snapshot_last, + snapshot_first, + expected, + expected); + status = NV_ERR_INVALID_STATE; + goto done; + } + } + } + + if (verbose > 0) + UVM_TEST_PRINT("Verification done\n"); + + schedule(); + +done: + // Wait for all the trackers first before freeing up memory as streams + // references each other's buffers. + for (i = 0; i < num_streams; ++i) { + uvm_test_stream_t *stream = &streams[i]; + uvm_tracker_wait(&stream->tracker); + } + + for (i = 0; i < num_streams; ++i) { + uvm_test_stream_t *stream = &streams[i]; + uvm_gpu_semaphore_free(&stream->semaphore); + uvm_rm_mem_free(stream->other_stream_counter_snapshots_mem); + uvm_rm_mem_free(stream->counter_snapshots_mem); + uvm_rm_mem_free(stream->counter_mem); + uvm_tracker_deinit(&stream->tracker); + uvm_kvfree(stream->other_stream_counter_expected); + } + uvm_kvfree(streams); + + if (verbose > 0) + UVM_TEST_PRINT("Cleanup done\n"); + + return status; +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +NV_STATUS uvm_test_channel_sanity(UVM_TEST_CHANNEL_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_mutex_lock(&g_uvm_global.global_lock); + uvm_va_space_down_read_rm(va_space); + + status = test_ordering(va_space); + if (status != NV_OK) + goto done; + + + + + + + + if (g_uvm_global.num_simulated_devices == 0) { + status = test_rc(va_space); + if (status != NV_OK) + goto done; + } + +done: + uvm_va_space_up_read_rm(va_space); + uvm_mutex_unlock(&g_uvm_global.global_lock); + + return status; +} + +static NV_STATUS uvm_test_channel_stress_stream(uvm_va_space_t *va_space, + const UVM_TEST_CHANNEL_STRESS_PARAMS *params) +{ + NV_STATUS status; + + if (params->iterations == 0 || params->num_streams == 0) + return NV_ERR_INVALID_PARAMETER; + + // TODO: Bug 1764963: Rework the test to not rely on the global lock as that + // serializes all the threads calling this at the same time. + uvm_mutex_lock(&g_uvm_global.global_lock); + uvm_va_space_down_read_rm(va_space); + + status = stress_test_all_gpus_in_va(va_space, + params->num_streams, + params->iterations, + params->seed, + params->verbose); + if (status != NV_OK) + goto done; + +done: + uvm_va_space_up_read_rm(va_space); + uvm_mutex_unlock(&g_uvm_global.global_lock); + + return status; +} + +static NV_STATUS uvm_test_channel_stress_update_channels(uvm_va_space_t *va_space, + const UVM_TEST_CHANNEL_STRESS_PARAMS *params) +{ + NV_STATUS status = NV_OK; + uvm_test_rng_t rng; + NvU32 i; + + uvm_test_rng_init(&rng, params->seed); + + uvm_va_space_down_read(va_space); + + for (i = 0; i < params->iterations; ++i) { + uvm_gpu_t *gpu = random_va_space_gpu(&rng, va_space); + uvm_channel_manager_update_progress(gpu->channel_manager); + + if (fatal_signal_pending(current)) { + status = NV_ERR_SIGNAL_PENDING; + goto done; + } + } + +done: + uvm_va_space_up_read(va_space); + + return status; +} + +static NV_STATUS uvm_test_channel_noop_push(uvm_va_space_t *va_space, + const UVM_TEST_CHANNEL_STRESS_PARAMS *params) +{ + NV_STATUS status = NV_OK; + uvm_push_t push; + uvm_test_rng_t rng; + uvm_gpu_t *gpu; + NvU32 i; + + uvm_test_rng_init(&rng, params->seed); + + uvm_va_space_down_read(va_space); + + for (i = 0; i < params->iterations; ++i) { + uvm_channel_type_t channel_type = random_ce_channel_type(&rng); + gpu = random_va_space_gpu(&rng, va_space); + + status = uvm_push_begin(gpu->channel_manager, channel_type, &push, "noop push"); + if (status != NV_OK) + goto done; + + // Push an actual noop method so that the push doesn't get optimized + // away if we ever detect empty pushes. + gpu->parent->host_hal->noop(&push, UVM_METHOD_SIZE); + + uvm_push_end(&push); + + if (fatal_signal_pending(current)) { + status = NV_ERR_SIGNAL_PENDING; + goto done; + } + } + if (params->verbose > 0) + UVM_TEST_PRINT("Noop pushes: completed %u pushes seed: %u\n", i, params->seed); + + for_each_va_space_gpu_in_mask(gpu, va_space, &va_space->registered_gpu_va_spaces) { + NV_STATUS wait_status = uvm_channel_manager_wait(gpu->channel_manager); + if (status == NV_OK) + status = wait_status; + } + +done: + uvm_va_space_up_read(va_space); + + return status; +} + +NV_STATUS uvm_test_channel_stress(UVM_TEST_CHANNEL_STRESS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + switch (params->mode) { + case UVM_TEST_CHANNEL_STRESS_MODE_STREAM: + return uvm_test_channel_stress_stream(va_space, params); + case UVM_TEST_CHANNEL_STRESS_MODE_UPDATE_CHANNELS: + return uvm_test_channel_stress_update_channels(va_space, params); + case UVM_TEST_CHANNEL_STRESS_MODE_NOOP_PUSH: + return uvm_test_channel_noop_push(va_space, params); + default: + return NV_ERR_INVALID_PARAMETER; + } +} diff --git a/kernel-open/nvidia-uvm/uvm_common.c b/kernel-open/nvidia-uvm/uvm_common.c new file mode 100644 index 000000000..f46761eb5 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_common.c @@ -0,0 +1,322 @@ +/******************************************************************************* + Copyright (c) 2013-2021 NVIDIA Corporation + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_forward_decl.h" + +// TODO: Bug 1710855: Tweak this number through benchmarks +#define UVM_SPIN_LOOP_SCHEDULE_TIMEOUT_NS (10*1000ULL) +#define UVM_SPIN_LOOP_PRINT_TIMEOUT_SEC 30ULL + +// Default to debug prints being enabled for debug and develop builds and +// disabled for release builds. +static int uvm_debug_prints = UVM_IS_DEBUG() || UVM_IS_DEVELOP(); + +// Make the module param writable so that prints can be enabled or disabled at +// any time by modifying the module parameter. +module_param(uvm_debug_prints, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(uvm_debug_prints, "Enable uvm debug prints."); + +bool uvm_debug_prints_enabled() +{ + return uvm_debug_prints != 0; +} + +// This parameter allows a program in user mode to call the kernel tests +// defined in this module. This parameter should only be used for testing and +// must not be set to true otherwise since it breaks security when it is +// enabled. By default and for safety reasons this parameter is set to false. +int uvm_enable_builtin_tests __read_mostly = 0; +module_param(uvm_enable_builtin_tests, int, S_IRUGO); +MODULE_PARM_DESC(uvm_enable_builtin_tests, + "Enable the UVM built-in tests. (This is a security risk)"); + +// +// Convert kernel errno codes to corresponding NV_STATUS +// +NV_STATUS errno_to_nv_status(int errnoCode) +{ + if (errnoCode < 0) + errnoCode = -errnoCode; + + switch (errnoCode) + { + case 0: + return NV_OK; + + case E2BIG: + case EINVAL: + return NV_ERR_INVALID_ARGUMENT; + + case EACCES: + return NV_ERR_INVALID_ACCESS_TYPE; + + case EADDRINUSE: + case EADDRNOTAVAIL: + return NV_ERR_UVM_ADDRESS_IN_USE; + + case EFAULT: + return NV_ERR_INVALID_ADDRESS; + + case EOVERFLOW: + return NV_ERR_OUT_OF_RANGE; + + case EINTR: + case EBUSY: + case EAGAIN: + return NV_ERR_BUSY_RETRY; + + case ENXIO: + case ENODEV: + return NV_ERR_MODULE_LOAD_FAILED; + + case ENOMEM: + return NV_ERR_NO_MEMORY; + + case EPERM: + return NV_ERR_INSUFFICIENT_PERMISSIONS; + + case ESRCH: + return NV_ERR_PID_NOT_FOUND; + + case ETIMEDOUT: + return NV_ERR_TIMEOUT; + + case EEXIST: + return NV_ERR_IN_USE; + + case ENOSYS: + case EOPNOTSUPP: + return NV_ERR_NOT_SUPPORTED; + + case ENOENT: + return NV_ERR_NO_VALID_PATH; + + case EIO: + return NV_ERR_RC_ERROR; + + case ENODATA: + return NV_ERR_OBJECT_NOT_FOUND; + + default: + return NV_ERR_GENERIC; + }; +} + +// Returns POSITIVE errno +int nv_status_to_errno(NV_STATUS status) +{ + switch (status) { + case NV_OK: + return 0; + + case NV_ERR_BUSY_RETRY: + return EAGAIN; + + case NV_ERR_INSUFFICIENT_PERMISSIONS: + return EPERM; + + case NV_ERR_GPU_UUID_NOT_FOUND: + return ENODEV; + + case NV_ERR_INSUFFICIENT_RESOURCES: + case NV_ERR_NO_MEMORY: + return ENOMEM; + + case NV_ERR_INVALID_ACCESS_TYPE: + return EACCES; + + case NV_ERR_INVALID_ADDRESS: + return EFAULT; + + case NV_ERR_INVALID_ARGUMENT: + case NV_ERR_INVALID_DEVICE: + case NV_ERR_INVALID_PARAMETER: + case NV_ERR_INVALID_REQUEST: + case NV_ERR_INVALID_STATE: + return EINVAL; + + case NV_ERR_NOT_SUPPORTED: + return ENOSYS; + + case NV_ERR_OBJECT_NOT_FOUND: + return ENODATA; + + case NV_ERR_MODULE_LOAD_FAILED: + return ENXIO; + + case NV_ERR_OVERLAPPING_UVM_COMMIT: + case NV_ERR_UVM_ADDRESS_IN_USE: + return EADDRINUSE; + + case NV_ERR_PID_NOT_FOUND: + return ESRCH; + + case NV_ERR_TIMEOUT: + case NV_ERR_TIMEOUT_RETRY: + return ETIMEDOUT; + + case NV_ERR_IN_USE: + return EEXIST; + + case NV_ERR_NO_VALID_PATH: + return ENOENT; + + case NV_ERR_RC_ERROR: + case NV_ERR_ECC_ERROR: + return EIO; + + case NV_ERR_OUT_OF_RANGE: + return EOVERFLOW; + + default: + UVM_ASSERT_MSG(0, "No errno conversion set up for NV_STATUS %s\n", nvstatusToString(status)); + return EINVAL; + } +} + +// +// This routine retrieves the process ID of current, but makes no attempt to +// refcount or lock the pid in place. +// +unsigned uvm_get_stale_process_id(void) +{ + return (unsigned)task_tgid_vnr(current); +} + +unsigned uvm_get_stale_thread_id(void) +{ + return (unsigned)task_pid_vnr(current); +} + +// +// A simple security rule for allowing access to UVM user space memory: if you +// are the same user as the owner of the memory, or if you are root, then you +// are granted access. The idea is to allow debuggers and profilers to work, but +// without opening up any security holes. +// +NvBool uvm_user_id_security_check(uid_t euidTarget) +{ + return (NV_CURRENT_EUID() == euidTarget) || + (UVM_ROOT_UID == euidTarget); +} + +void on_uvm_test_fail(void) +{ + (void)NULL; +} + +void on_uvm_assert(void) +{ + (void)NULL; +#ifdef __COVERITY__ + __coverity_panic__() +#endif +} + +NV_STATUS uvm_spin_loop(uvm_spin_loop_t *spin) +{ + NvU64 curr = NV_GETTIME(); + + // This schedule() is required for functionality, not just system + // performance. It allows RM to run and unblock the UVM driver: + // + // - UVM must service faults in order for RM to idle/preempt a context + // - RM must service interrupts which stall UVM (SW methods, stalling CE + // interrupts, etc) in order for UVM to service faults + // + // Even though UVM's bottom half is preemptable, we have encountered cases + // in which a user thread running in RM won't preempt the UVM driver's + // thread unless the UVM driver thread gives up its timeslice. This is also + // theoretically possible if the RM thread has a low nice priority. + // + // TODO: Bug 1710855: Look into proper prioritization of these threads as a longer-term + // solution. + if (curr - spin->start_time_ns >= UVM_SPIN_LOOP_SCHEDULE_TIMEOUT_NS && NV_MAY_SLEEP()) { + schedule(); + curr = NV_GETTIME(); + } + + cpu_relax(); + + // TODO: Bug 1710855: Also check fatal_signal_pending() here if the caller can handle it. + + if (curr - spin->print_time_ns >= 1000*1000*1000*UVM_SPIN_LOOP_PRINT_TIMEOUT_SEC) { + spin->print_time_ns = curr; + return NV_ERR_TIMEOUT_RETRY; + } + + return NV_OK; +} + +// This formats a GPU UUID, in a UVM-friendly way. That is, nearly the same as +// what nvidia-smi reports. It will always prefix the UUID with UVM-GPU so +// that we know that we have a real, binary formatted UUID that will work in +// the UVM APIs. +// +// It comes out like this: +// +// UVM-GPU-d802726c-df8d-a3c3-ec53-48bdec201c27 +// +// This routine will always null-terminate the string for you. This is true +// even if the buffer was too small! +// +// Return value is the number of non-null characters written. +// +// Note that if you were to let the NV2080_CTRL_CMD_GPU_GET_GID_INFO command +// return it's default format, which is ascii, not binary, then you would get +// this back: +// +// GPU-d802726c-df8d-a3c3-ec53-48bdec201c27 +// +// ...which is actually a character string, and won't work for UVM API calls. +// So it's very important to be able to see the difference. +// +static char uvm_digit_to_hex(unsigned value) +{ + if (value >= 10) + return value - 10 + 'a'; + else + return value + '0'; +} + +int format_uuid_to_buffer(char *buffer, unsigned bufferLength, const NvProcessorUuid *pUuidStruct) +{ + char *str = buffer+8; + unsigned i; + unsigned dashMask = 1 << 4 | 1 << 6 | 1 << 8 | 1 << 10; + + memcpy(buffer, "UVM-GPU-", 8); + if (bufferLength < (8 /*prefix*/+ 16 * 2 /*digits*/ + 4 * 1 /*dashes*/ + 1 /*null*/)) + return *buffer = 0; + + for (i = 0; i < 16; i++) { + *str++ = uvm_digit_to_hex(pUuidStruct->uuid[i] >> 4); + *str++ = uvm_digit_to_hex(pUuidStruct->uuid[i] & 0xF); + + if (dashMask & (1 << (i+1))) + *str++ = '-'; + } + + *str = 0; + + return (int)(str-buffer); +} + diff --git a/kernel-open/nvidia-uvm/uvm_common.h b/kernel-open/nvidia-uvm/uvm_common.h new file mode 100644 index 000000000..1b93e2303 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_common.h @@ -0,0 +1,357 @@ +/******************************************************************************* + Copyright (c) 2013-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _UVM_COMMON_H +#define _UVM_COMMON_H + +#ifdef DEBUG + #define UVM_IS_DEBUG() 1 +#else + #define UVM_IS_DEBUG() 0 +#endif + +// NVIDIA_UVM_DEVELOP implies DEBUG, but not vice-versa +// TODO Bug 1773100: Figure out the right distinction between develop and debug +// builds. +#ifdef NVIDIA_UVM_DEVELOP + #define UVM_IS_DEVELOP() 1 +#else + #define UVM_IS_DEVELOP() 0 +#endif + +#include "uvm_types.h" +#include "uvm_linux.h" + +enum { + NVIDIA_UVM_PRIMARY_MINOR_NUMBER = 0, + NVIDIA_UVM_TOOLS_MINOR_NUMBER = 1, + // to ensure backward-compatiblity and correct counting, please insert any + // new minor devices just above the following field: + NVIDIA_UVM_NUM_MINOR_DEVICES +}; + +#define UVM_GPU_UUID_TEXT_BUFFER_LENGTH (8+16*2+4+1) + +int format_uuid_to_buffer(char *buffer, unsigned bufferLength, const NvProcessorUuid *pGpuUuid); + +#define UVM_PRINT_FUNC_PREFIX(func, prefix, fmt, ...) \ + func(prefix "%s:%u %s[pid:%d]" fmt, \ + kbasename(__FILE__), \ + __LINE__, \ + __FUNCTION__, \ + current->pid, \ + ##__VA_ARGS__) + +#define UVM_PRINT_FUNC(func, fmt, ...) \ + UVM_PRINT_FUNC_PREFIX(func, "", fmt, ##__VA_ARGS__) + +// Check whether UVM_{ERR,DBG,INFO)_PRINT* should be enabled +bool uvm_debug_prints_enabled(void); + +// A printing helper like UVM_PRINT_FUNC_PREFIX that only prints if +// uvm_debug_prints_enabled() returns true. +#define UVM_PRINT_FUNC_PREFIX_CHECK(func, prefix, fmt, ...) \ + do { \ + if (uvm_debug_prints_enabled()) { \ + UVM_PRINT_FUNC_PREFIX(func, prefix, fmt, ##__VA_ARGS__); \ + } \ + } while (0) + +#define UVM_ASSERT_PRINT(fmt, ...) \ + UVM_PRINT_FUNC_PREFIX(printk, KERN_ERR NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__) + +#define UVM_ERR_PRINT(fmt, ...) \ + UVM_PRINT_FUNC_PREFIX_CHECK(printk, KERN_ERR NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__) + +#define UVM_ERR_PRINT_RL(fmt, ...) \ + UVM_PRINT_FUNC_PREFIX_CHECK(printk_ratelimited, KERN_ERR NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__) + +#define UVM_DBG_PRINT(fmt, ...) \ + UVM_PRINT_FUNC_PREFIX_CHECK(printk, KERN_DEBUG NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__) + +#define UVM_DBG_PRINT_RL(fmt, ...) \ + UVM_PRINT_FUNC_PREFIX_CHECK(printk_ratelimited, KERN_DEBUG NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__) + +#define UVM_INFO_PRINT(fmt, ...) \ + UVM_PRINT_FUNC_PREFIX_CHECK(printk, KERN_INFO NVIDIA_UVM_PRETTY_PRINTING_PREFIX, " " fmt, ##__VA_ARGS__) + +// +// Please see the documentation of format_uuid_to_buffer, for details on what +// this routine prints for you. +// +#define UVM_DBG_PRINT_UUID(msg, uuidPtr) \ + do { \ + char uuidBuffer[UVM_GPU_UUID_TEXT_BUFFER_LENGTH]; \ + format_uuid_to_buffer(uuidBuffer, sizeof(uuidBuffer), uuidPtr); \ + UVM_DBG_PRINT("%s: %s\n", msg, uuidBuffer); \ + } while (0) + +#define UVM_ERR_PRINT_NV_STATUS(msg, rmStatus, ...) \ + UVM_ERR_PRINT("ERROR: %s : " msg "\n", nvstatusToString(rmStatus), ##__VA_ARGS__) + +#define UVM_ERR_PRINT_UUID(msg, uuidPtr, ...) \ + do { \ + char uuidBuffer[UVM_GPU_UUID_TEXT_BUFFER_LENGTH]; \ + format_uuid_to_buffer(uuidBuffer, sizeof(uuidBuffer), uuidPtr); \ + UVM_ERR_PRINT("ERROR: %s : " msg "\n", uuidBuffer, ##__VA_ARGS__); \ + } while (0) + +#define UVM_PANIC() UVM_PRINT_FUNC(panic, "\n") +#define UVM_PANIC_MSG(fmt, ...) UVM_PRINT_FUNC(panic, ": " fmt, ##__VA_ARGS__) + +#define UVM_PANIC_ON_MSG(cond, fmt, ...) \ + do { \ + if (unlikely(cond)) \ + UVM_PANIC_MSG(fmt, ##__VA_ARGS__); \ + } while (0) + +#define UVM_PANIC_ON(cond) UVM_PANIC_ON_MSG(cond, "failed cond %s\n", #cond) + +// expr may include function calls. Use sizeof to prevent it from being +// evaluated while also preventing unused variable warnings. sizeof() can't be +// used on a bitfield however, so use ! to force the expression to evaluate as +// an int. +#define UVM_IGNORE_EXPR(expr) ((void)sizeof(!(expr))) + +#define UVM_IGNORE_EXPR2(expr1, expr2) \ + do { \ + UVM_IGNORE_EXPR(expr1); \ + UVM_IGNORE_EXPR(expr2); \ + } while (0) + +// NO-OP function to break on_uvm_test_fail - that is just to set a breakpoint +void on_uvm_test_fail(void); + +// NO-OP function to break on_uvm_assert - that is just to set a breakpoint +// Unlike on_uvm_test_fail it provides 'panic' coverity semantics +void on_uvm_assert(void); + +// UVM_ASSERT_RELEASE and UVM_ASSERT_MSG_RELEASE are always enabled, even on +// release builds. +#define _UVM_ASSERT_MSG_RELEASE(expr, cond, fmt, ...) \ + do { \ + if (unlikely(!(expr))) { \ + UVM_ASSERT_PRINT("Assert failed, condition %s not true" fmt, cond, ##__VA_ARGS__); \ + dump_stack(); \ + on_uvm_assert(); \ + } \ + } while (0) + +#define UVM_ASSERT_MSG_RELEASE(expr, fmt, ...) _UVM_ASSERT_MSG_RELEASE(expr, #expr, ": " fmt, ##__VA_ARGS__) +#define UVM_ASSERT_RELEASE(expr) _UVM_ASSERT_MSG_RELEASE(expr, #expr, "\n") + +// Prevent function calls in expr and the print argument list from being +// evaluated. +#define UVM_ASSERT_MSG_IGNORE(expr, fmt, ...) \ + do { \ + UVM_IGNORE_EXPR(expr); \ + UVM_NO_PRINT(fmt, ##__VA_ARGS__); \ + } while (0) + +// UVM_ASSERT and UVM_ASSERT_MSG are only enabled on non-release and Coverity builds +#if UVM_IS_DEBUG() || defined __COVERITY__ + #define UVM_ASSERT_MSG UVM_ASSERT_MSG_RELEASE + #define UVM_ASSERT UVM_ASSERT_RELEASE +#else + #define UVM_ASSERT_MSG(expr, fmt, ...) UVM_ASSERT_MSG_IGNORE(expr, fmt, ##__VA_ARGS__) + #define UVM_ASSERT(expr) UVM_ASSERT_MSG_IGNORE(expr, "\n") +#endif + +// Provide a short form of UUID's, typically for use in debug printing: +#define ABBREV_UUID(uuid) (unsigned)(uuid) + +static inline NvBool uvm_uuid_is_cpu(const NvProcessorUuid *uuid) +{ + return memcmp(uuid, &NV_PROCESSOR_UUID_CPU_DEFAULT, sizeof(*uuid)) == 0; +} + +#define UVM_ALIGN_DOWN(x, a) ({ \ + typeof(x) _a = a; \ + UVM_ASSERT(is_power_of_2(_a)); \ + (x) & ~(_a - 1); \ + }) + +#define UVM_ALIGN_UP(x, a) ({ \ + typeof(x) _a = a; \ + UVM_ASSERT(is_power_of_2(_a)); \ + ((x) + _a - 1) & ~(_a - 1); \ + }) + +#define UVM_PAGE_ALIGN_UP(value) UVM_ALIGN_UP(value, PAGE_SIZE) +#define UVM_PAGE_ALIGN_DOWN(value) UVM_ALIGN_DOWN(value, PAGE_SIZE) + +// These macros provide a convenient way to string-ify enum values. +#define UVM_ENUM_STRING_CASE(value) case value: return #value +#define UVM_ENUM_STRING_DEFAULT() default: return "UNKNOWN" + +// Divide by a dynamic value known at runtime to be a power of 2. ilog2 is +// optimized as a single instruction in many processors, whereas integer +// division is always slow. +static inline NvU32 uvm_div_pow2_32(NvU32 numerator, NvU32 denominator_pow2) +{ + UVM_ASSERT(is_power_of_2(denominator_pow2)); + UVM_ASSERT(denominator_pow2); + return numerator >> ilog2(denominator_pow2); +} + +static inline NvU64 uvm_div_pow2_64(NvU64 numerator, NvU64 denominator_pow2) +{ + UVM_ASSERT(is_power_of_2(denominator_pow2)); + UVM_ASSERT(denominator_pow2); + return numerator >> ilog2(denominator_pow2); +} + +#define SUM_FROM_0_TO_N(n) (((n) * ((n) + 1)) / 2) + +// Start and end are inclusive +static inline NvBool uvm_ranges_overlap(NvU64 a_start, NvU64 a_end, NvU64 b_start, NvU64 b_end) +{ + // De Morgan's of: !(a_end < b_start || b_end < a_start) + return a_end >= b_start && b_end >= a_start; +} + +static int debug_mode(void) +{ +#ifdef DEBUG + return 1; +#else + return 0; +#endif +} + +static inline void kmem_cache_destroy_safe(struct kmem_cache **ppCache) +{ + if (ppCache) + { + if (*ppCache) + kmem_cache_destroy(*ppCache); + + *ppCache = NULL; + } +} + +static const uid_t UVM_ROOT_UID = 0; + + +typedef struct +{ + NvU64 start_time_ns; + NvU64 print_time_ns; +} uvm_spin_loop_t; + +static inline void uvm_spin_loop_init(uvm_spin_loop_t *spin) +{ + NvU64 curr = NV_GETTIME(); + spin->start_time_ns = curr; + spin->print_time_ns = curr; +} + +// Periodically yields the CPU when not called from interrupt context. Returns +// NV_ERR_TIMEOUT_RETRY if the caller should print a warning that we've been +// waiting too long, and NV_OK otherwise. +NV_STATUS uvm_spin_loop(uvm_spin_loop_t *spin); + +static NvU64 uvm_spin_loop_elapsed(const uvm_spin_loop_t *spin) +{ + NvU64 curr = NV_GETTIME(); + return curr - spin->start_time_ns; +} + +#define UVM_SPIN_LOOP(__spin) ({ \ + NV_STATUS __status = uvm_spin_loop(__spin); \ + if (__status == NV_ERR_TIMEOUT_RETRY) { \ + UVM_DBG_PRINT("Warning: stuck waiting for %llus\n", \ + uvm_spin_loop_elapsed(__spin) / (1000*1000*1000)); \ + \ + if (uvm_debug_prints_enabled()) \ + dump_stack(); \ + } \ + __status; \ +}) + +// Execute the loop code while cond is true. Invokes uvm_spin_loop_iter at the +// end of each iteration. +#define UVM_SPIN_WHILE(cond, spin) \ + if (cond) \ + for (uvm_spin_loop_init(spin); (cond); UVM_SPIN_LOOP(spin)) + +// +// Documentation for the internal routines listed below may be found in the +// implementation file(s). +// +NV_STATUS errno_to_nv_status(int errnoCode); +int nv_status_to_errno(NV_STATUS status); +unsigned uvm_get_stale_process_id(void); +unsigned uvm_get_stale_thread_id(void); +NvBool uvm_user_id_security_check(uid_t euidTarget); + +extern int uvm_enable_builtin_tests; + +static inline void uvm_init_character_device(struct cdev *cdev, const struct file_operations *fops) +{ + cdev_init(cdev, fops); + cdev->owner = THIS_MODULE; +} + +typedef struct +{ + int rm_control_fd; + NvHandle user_client; + NvHandle user_object; +} uvm_rm_user_object_t; + +// Macro used to compare two values for types that support less than operator. +// It returns -1 if a < b, 1 if a > b and 0 if a == 0 +#define UVM_CMP_DEFAULT(a,b) \ +({ \ + typeof(a) _a = a; \ + typeof(b) _b = b; \ + int __ret; \ + BUILD_BUG_ON(sizeof(a) != sizeof(b)); \ + if (_a < _b) \ + __ret = -1; \ + else if (_b < _a) \ + __ret = 1; \ + else \ + __ret = 0; \ + \ + __ret; \ +}) + +// Returns whether the input file was opened against the UVM character device +// file. A NULL input returns false. +bool uvm_file_is_nvidia_uvm(struct file *filp); + +// Reads the first word in the supplied struct page. +static inline void uvm_touch_page(struct page *page) +{ + char *mapping; + + UVM_ASSERT(page); + + mapping = (char *) kmap(page); + (void)UVM_READ_ONCE(*mapping); + kunmap(page); +} + +#endif /* _UVM_COMMON_H */ diff --git a/kernel-open/nvidia-uvm/uvm_debug_optimized.c b/kernel-open/nvidia-uvm/uvm_debug_optimized.c new file mode 100644 index 000000000..af4a4cfbc --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_debug_optimized.c @@ -0,0 +1,53 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +// This file provides simple wrappers that are always built with optimizations +// turned on to WAR issues with functions that don't build correctly otherwise. + +#include "uvm_linux.h" + +int nv_atomic_xchg(atomic_t *val, int new) +{ + return atomic_xchg(val, new); +} + +int nv_atomic_cmpxchg(atomic_t *val, int old, int new) +{ + return atomic_cmpxchg(val, old, new); +} + +long nv_atomic_long_cmpxchg(atomic_long_t *val, long old, long new) +{ + return atomic_long_cmpxchg(val, old, new); +} + +unsigned long nv_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return copy_from_user(to, from, n); +} + +unsigned long nv_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return copy_to_user(to, from, n); +} + diff --git a/kernel-open/nvidia-uvm/uvm_extern_decl.h b/kernel-open/nvidia-uvm/uvm_extern_decl.h new file mode 100644 index 000000000..9ff9339b1 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_extern_decl.h @@ -0,0 +1,38 @@ +/******************************************************************************* + Copyright (c) 2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_EXTERN_DECL_H__ +#define __UVM_EXTERN_DECL_H__ + +#include "uvm_linux.h" +#include "uvm_forward_decl.h" + +extern int uvm_enable_debug_procfs; + +extern unsigned uvm_perf_map_remote_on_native_atomics_fault; + +extern uvm_global_t g_uvm_global; + +extern bool uvm_global_is_suspended(void); + +#endif //__UVM_EXTERN_DECL_H__ diff --git a/kernel-open/nvidia-uvm/uvm_fault_buffer_flush_test.c b/kernel-open/nvidia-uvm/uvm_fault_buffer_flush_test.c new file mode 100644 index 000000000..d300650ce --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_fault_buffer_flush_test.c @@ -0,0 +1,69 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_gpu_replayable_faults.h" +#include "uvm_test.h" +#include "uvm_va_space.h" + +NV_STATUS uvm_test_fault_buffer_flush(UVM_TEST_FAULT_BUFFER_FLUSH_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_t *gpu; + uvm_global_processor_mask_t retained_gpus; + NvU64 i; + + uvm_global_processor_mask_zero(&retained_gpus); + + uvm_va_space_down_read(va_space); + + for_each_va_space_gpu(gpu, va_space) { + if (gpu->parent->replayable_faults_supported) + uvm_global_processor_mask_set(&retained_gpus, gpu->global_id); + } + + uvm_global_mask_retain(&retained_gpus); + + uvm_va_space_up_read(va_space); + + if (uvm_global_processor_mask_empty(&retained_gpus)) + return NV_ERR_INVALID_DEVICE; + + for (i = 0; i < params->iterations; i++) { + if (fatal_signal_pending(current)) { + status = NV_ERR_SIGNAL_PENDING; + break; + } + + for_each_global_gpu_in_mask(gpu, &retained_gpus) + TEST_CHECK_GOTO(uvm_gpu_fault_buffer_flush(gpu) == NV_OK, out); + } + +out: + uvm_global_mask_release(&retained_gpus); + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_forward_decl.h b/kernel-open/nvidia-uvm/uvm_forward_decl.h new file mode 100644 index 000000000..a2a28608b --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_forward_decl.h @@ -0,0 +1,98 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_FORWARD_DECL_H__ +#define __UVM_FORWARD_DECL_H__ + +typedef struct uvm_global_struct uvm_global_t; + +typedef struct uvm_gpu_struct uvm_gpu_t; +typedef struct uvm_parent_gpu_struct uvm_parent_gpu_t; +typedef struct uvm_rm_mem_struct uvm_rm_mem_t; +typedef struct uvm_mem_struct uvm_mem_t; +typedef struct uvm_host_hal_struct uvm_host_hal_t; +typedef struct uvm_ce_hal_struct uvm_ce_hal_t; +typedef struct uvm_arch_hal_struct uvm_arch_hal_t; +typedef struct uvm_fault_buffer_hal_struct uvm_fault_buffer_hal_t; +typedef struct uvm_access_counter_buffer_hal_struct uvm_access_counter_buffer_hal_t; + + + +typedef struct uvm_gpu_semaphore_struct uvm_gpu_semaphore_t; +typedef struct uvm_gpu_tracking_semaphore_struct uvm_gpu_tracking_semaphore_t; +typedef struct uvm_gpu_semaphore_pool_struct uvm_gpu_semaphore_pool_t; +typedef struct uvm_gpu_semaphore_pool_page_struct uvm_gpu_semaphore_pool_page_t; +typedef struct uvm_gpu_peer_struct uvm_gpu_peer_t; +typedef struct uvm_mmu_mode_hal_struct uvm_mmu_mode_hal_t; + +typedef struct uvm_channel_manager_struct uvm_channel_manager_t; +typedef struct uvm_channel_struct uvm_channel_t; +typedef struct uvm_user_channel_struct uvm_user_channel_t; +typedef struct uvm_push_struct uvm_push_t; +typedef struct uvm_push_info_struct uvm_push_info_t; +typedef struct uvm_push_acquire_info_struct uvm_push_acquire_info_t; +typedef struct uvm_pushbuffer_struct uvm_pushbuffer_t; +typedef struct uvm_gpfifo_entry_struct uvm_gpfifo_entry_t; + +typedef struct uvm_va_policy_struct uvm_va_policy_t; +typedef struct uvm_va_range_struct uvm_va_range_t; +typedef struct uvm_va_block_struct uvm_va_block_t; +typedef struct uvm_va_block_test_struct uvm_va_block_test_t; +typedef struct uvm_va_block_wrapper_struct uvm_va_block_wrapper_t; +typedef struct uvm_va_space_struct uvm_va_space_t; +typedef struct uvm_va_space_mm_struct uvm_va_space_mm_t; + +typedef struct uvm_make_resident_context_struct uvm_make_resident_context_t; + +typedef struct uvm_gpu_va_space_struct uvm_gpu_va_space_t; + +typedef struct uvm_thread_context_lock_struct uvm_thread_context_lock_t; +typedef struct uvm_thread_context_struct uvm_thread_context_t; +typedef struct uvm_thread_context_wrapper_struct uvm_thread_context_wrapper_t; + +typedef struct uvm_perf_module_struct uvm_perf_module_t; + +typedef struct uvm_page_table_range_vec_struct uvm_page_table_range_vec_t; +typedef struct uvm_page_table_range_struct uvm_page_table_range_t; +typedef struct uvm_page_tree_struct uvm_page_tree_t; + +typedef struct uvm_fault_buffer_entry_struct uvm_fault_buffer_entry_t; + +typedef struct uvm_pte_batch_struct uvm_pte_batch_t; +typedef struct uvm_tlb_batch_struct uvm_tlb_batch_t; + +typedef struct uvm_fault_service_batch_context_struct uvm_fault_service_batch_context_t; +typedef struct uvm_service_block_context_struct uvm_service_block_context_t; + +typedef struct uvm_ats_fault_invalidate_struct uvm_ats_fault_invalidate_t; + +typedef struct uvm_replayable_fault_buffer_info_struct uvm_replayable_fault_buffer_info_t; +typedef struct uvm_non_replayable_fault_buffer_info_struct uvm_non_replayable_fault_buffer_info_t; +typedef struct uvm_access_counter_buffer_entry_struct uvm_access_counter_buffer_entry_t; +typedef struct uvm_access_counter_service_batch_context_struct uvm_access_counter_service_batch_context_t; +typedef struct uvm_pmm_sysmem_mappings_struct uvm_pmm_sysmem_mappings_t; + +typedef struct uvm_reverse_map_struct uvm_reverse_map_t; + +typedef struct uvm_ibm_npu_struct uvm_ibm_npu_t; +#endif //__UVM_FORWARD_DECL_H__ diff --git a/kernel-open/nvidia-uvm/uvm_get_rm_ptes_test.c b/kernel-open/nvidia-uvm/uvm_get_rm_ptes_test.c new file mode 100644 index 000000000..b92b039b3 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_get_rm_ptes_test.c @@ -0,0 +1,352 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVidia Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "uvm_test.h" +#include "uvm_test_ioctl.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_hal.h" +#include "uvm_va_space.h" +#include "uvm_mmu.h" +#include "nv_uvm_types.h" +#include "nv_uvm_interface.h" +#include "uvm_common.h" + +#define get_rm_ptes(offset, size, ext_map_info) \ + uvm_rm_locked_call( \ + nvUvmInterfaceGetExternalAllocPtes(gpu_va_space, \ + duped_memory, \ + offset, \ + size, \ + ext_map_info)) + +static uvm_aperture_t get_aperture(uvm_va_space_t *va_space, + uvm_gpu_t *memory_owning_gpu, + uvm_gpu_t *memory_mapping_gpu, + UvmGpuMemoryInfo *memory_info, + bool sli_supported) +{ + if (memory_info->sysmem) { + return UVM_APERTURE_SYS; + } + else { + if (memory_mapping_gpu != memory_owning_gpu && !sli_supported) + return uvm_gpu_peer_aperture(memory_mapping_gpu, memory_owning_gpu); + return UVM_APERTURE_VID; + } +} + +static bool is_cacheable(UvmGpuExternalMappingInfo *ext_mapping_info, uvm_aperture_t aperture) +{ + if (ext_mapping_info->cachingType == UvmRmGpuCachingTypeForceCached) + return true; + else if (ext_mapping_info->cachingType == UvmRmGpuCachingTypeForceUncached) + return false; + else if (aperture == UVM_APERTURE_VID) + return true; + + return false; +} + +static NvU32 get_protection(UvmGpuExternalMappingInfo *ext_mapping_info) +{ + if (ext_mapping_info->mappingType == UvmRmGpuMappingTypeReadWriteAtomic || + ext_mapping_info->mappingType == UvmRmGpuMappingTypeDefault) + return UVM_PROT_READ_WRITE_ATOMIC; + else if (ext_mapping_info->mappingType == UvmRmGpuMappingTypeReadWrite) + return UVM_PROT_READ_WRITE; + else + return UVM_PROT_READ_ONLY; +} + +static NV_STATUS verify_mapping_info(uvm_va_space_t *va_space, + uvm_gpu_t *memory_mapping_gpu, + NvU64 mapping_offset, + NvU64 mapping_size, + UvmGpuExternalMappingInfo *ext_mapping_info, + UvmGpuMemoryInfo *memory_info, + bool sli_supported) +{ + NvU32 index = 0, total_pte_count = 0, skip = 0, page_size = 0; + uvm_aperture_t aperture = 0; + NvU32 prot; + NvU64 phys_offset, pte; + uvm_mmu_mode_hal_t *hal; + NvU64 pte_flags = UVM_MMU_PTE_FLAGS_ACCESS_COUNTERS_DISABLED; + uvm_gpu_t *memory_owning_gpu = NULL; + + TEST_CHECK_RET(memory_info->contig); + + hal = uvm_gpu_va_space_get(va_space, memory_mapping_gpu)->page_tables.hal; + + page_size = memory_info->pageSize; + + // Verify that make_pte supports this page size + TEST_CHECK_RET(page_size & hal->page_sizes()); + + total_pte_count = mapping_size ? (mapping_size / page_size) : (memory_info->size / page_size); + + TEST_CHECK_RET(total_pte_count); + + TEST_CHECK_RET(ext_mapping_info->numWrittenPtes <= total_pte_count); + + TEST_CHECK_RET(ext_mapping_info->numRemainingPtes == (total_pte_count - ext_mapping_info->numWrittenPtes)); + + skip = ext_mapping_info->pteSize / sizeof(NvU64); + + TEST_CHECK_RET(skip); + + memory_owning_gpu = uvm_va_space_get_gpu_by_uuid(va_space, &memory_info->uuid); + if (memory_owning_gpu == NULL) + return NV_ERR_INVALID_DEVICE; + + // TODO: Bug 1903234: Once RM supports indirect peer mappings, we'll need to + // update this test since the aperture will be SYS. Depending on how + // RM implements things, we might not be able to compare the physical + // addresses either. + aperture = get_aperture(va_space, memory_owning_gpu, memory_mapping_gpu, memory_info, sli_supported); + + if (is_cacheable(ext_mapping_info, aperture)) + pte_flags |= UVM_MMU_PTE_FLAGS_CACHED; + + prot = get_protection(ext_mapping_info); + + phys_offset = mapping_offset; + + // Add the physical offset for nvswitch connected peer mappings + if (uvm_aperture_is_peer(aperture) && uvm_gpus_are_nvswitch_connected(memory_mapping_gpu, memory_owning_gpu)) + phys_offset += memory_owning_gpu->parent->nvswitch_info.fabric_memory_window_start; + + for (index = 0; index < ext_mapping_info->numWrittenPtes; index++) { + + pte = hal->make_pte(aperture, + memory_info->physAddr + phys_offset, + prot, + pte_flags); + + TEST_CHECK_RET(pte == ext_mapping_info->pteBuffer[index * skip]); + + phys_offset += page_size; + } + + return NV_OK; +} + +static NV_STATUS test_get_rm_ptes_single_gpu(uvm_va_space_t *va_space, UVM_TEST_GET_RM_PTES_PARAMS *params) +{ + NV_STATUS status = NV_OK; + NV_STATUS free_status; + uvm_gpu_t *memory_mapping_gpu; + NvHandle duped_memory; + UvmGpuExternalMappingInfo ext_mapping_info; + UvmGpuMemoryInfo memory_info; + NvU64 pte_buffer[16] = {0}; + NvU32 size = 0; + uvmGpuAddressSpaceHandle gpu_va_space; + uvmGpuDeviceHandle rm_device; + NvHandle client, memory; + + client = params->hClient; + memory = params->hMemory; + + // Note: This check is safe as single GPU test does not run on SLI enabled devices. + memory_mapping_gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, ¶ms->gpu_uuid); + if (!memory_mapping_gpu) + return NV_ERR_INVALID_DEVICE; + + gpu_va_space = memory_mapping_gpu->rm_address_space; + rm_device = uvm_gpu_device_handle(memory_mapping_gpu); + + status = uvm_rm_locked_call(nvUvmInterfaceDupMemory(rm_device, client, memory, &duped_memory, &memory_info)); + if (status != NV_OK) + return status; + + TEST_CHECK_GOTO(uvm_processor_uuid_eq(&memory_info.uuid, ¶ms->gpu_uuid), done); + + TEST_CHECK_GOTO((memory_info.size == params->size), done); + + size = params->size; + + memset(&ext_mapping_info, 0, sizeof(ext_mapping_info)); + + ext_mapping_info.pteBuffer = pte_buffer; + + ext_mapping_info.pteBufferSize = 1; + + TEST_CHECK_GOTO((get_rm_ptes(size + 1, 0, &ext_mapping_info) == NV_ERR_INVALID_BASE), done); + + TEST_CHECK_GOTO((get_rm_ptes(0, size + 1, &ext_mapping_info) == NV_ERR_INVALID_LIMIT), done); + + TEST_CHECK_GOTO((get_rm_ptes(1, 0, &ext_mapping_info) == NV_ERR_INVALID_ARGUMENT), done); + + TEST_CHECK_GOTO((get_rm_ptes(0, size - 1, &ext_mapping_info) == NV_ERR_INVALID_ARGUMENT), done); + + TEST_CHECK_GOTO((get_rm_ptes(0, 0, &ext_mapping_info) == NV_ERR_BUFFER_TOO_SMALL), done); + + ext_mapping_info.pteBufferSize = sizeof(pte_buffer); + TEST_CHECK_GOTO(get_rm_ptes(0, 0, &ext_mapping_info) == NV_OK, done); + TEST_CHECK_GOTO(verify_mapping_info(va_space, + memory_mapping_gpu, + 0, + 0, + &ext_mapping_info, + &memory_info, + false) == NV_OK, done); + + TEST_CHECK_GOTO(get_rm_ptes(memory_info.pageSize, 0, &ext_mapping_info) == NV_OK, done); + TEST_CHECK_GOTO(verify_mapping_info(va_space, + memory_mapping_gpu, + memory_info.pageSize, + 0, + &ext_mapping_info, + &memory_info, + false) == NV_OK, done); + + TEST_CHECK_GOTO(get_rm_ptes(0, size - memory_info.pageSize, &ext_mapping_info) == NV_OK, done); + TEST_CHECK_GOTO(verify_mapping_info(va_space, + memory_mapping_gpu, + 0, + size - memory_info.pageSize, + &ext_mapping_info, + &memory_info, + false) == NV_OK, done); + + ext_mapping_info.mappingType = UvmRmGpuMappingTypeReadWrite; + ext_mapping_info.cachingType = UvmRmGpuCachingTypeForceCached; + TEST_CHECK_GOTO(get_rm_ptes(memory_info.pageSize, size - memory_info.pageSize, &ext_mapping_info) == NV_OK, done); + TEST_CHECK_GOTO(verify_mapping_info(va_space, + memory_mapping_gpu, + memory_info.pageSize, + size - memory_info.pageSize, + &ext_mapping_info, + &memory_info, + false) == NV_OK, done); + + ext_mapping_info.mappingType = UvmRmGpuMappingTypeReadOnly; + ext_mapping_info.cachingType = UvmRmGpuCachingTypeForceUncached; + TEST_CHECK_GOTO(get_rm_ptes(size - memory_info.pageSize, memory_info.pageSize, &ext_mapping_info) == NV_OK, done); + TEST_CHECK_GOTO(verify_mapping_info(va_space, + memory_mapping_gpu, + size - memory_info.pageSize, + memory_info.pageSize, + &ext_mapping_info, + &memory_info, + false) == NV_OK, done); + +done: + free_status = uvm_rm_locked_call(nvUvmInterfaceFreeDupedHandle(rm_device, duped_memory)); + if (status == NV_OK) + status = free_status; + + return status; +} + +static NV_STATUS test_get_rm_ptes_multi_gpu(uvm_va_space_t *va_space, UVM_TEST_GET_RM_PTES_PARAMS *params) +{ + NV_STATUS status = NV_OK; + NV_STATUS free_status; + uvm_gpu_t *memory_mapping_gpu; + NvHandle duped_memory; + UvmGpuExternalMappingInfo ext_mapping_info; + UvmGpuMemoryInfo memory_info; + uvmGpuDeviceHandle rm_device; + NvU64 pte_buffer[16] = {0}; + uvmGpuAddressSpaceHandle gpu_va_space; + + memory_mapping_gpu = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!memory_mapping_gpu) + return NV_ERR_INVALID_DEVICE; + + gpu_va_space = memory_mapping_gpu->rm_address_space; + rm_device = uvm_gpu_device_handle(memory_mapping_gpu); + + status = uvm_rm_locked_call(nvUvmInterfaceDupMemory(rm_device, + params->hClient, + params->hMemory, + &duped_memory, + &memory_info)); + if (status != NV_OK) + return status; + + memset(&ext_mapping_info, 0, sizeof(ext_mapping_info)); + + memset(pte_buffer, 0, sizeof(pte_buffer)); + + ext_mapping_info.pteBuffer = pte_buffer; + + ext_mapping_info.pteBufferSize = sizeof(pte_buffer); + + switch (params->test_mode) { + case UVM_TEST_GET_RM_PTES_MULTI_GPU_SUPPORTED: + case UVM_TEST_GET_RM_PTES_MULTI_GPU_SLI_SUPPORTED: + TEST_CHECK_GOTO(get_rm_ptes(0, 0, &ext_mapping_info) == NV_OK, done); + TEST_CHECK_GOTO(verify_mapping_info(va_space, + memory_mapping_gpu, + 0, + 0, + &ext_mapping_info, + &memory_info, + (params->test_mode == + UVM_TEST_GET_RM_PTES_MULTI_GPU_SLI_SUPPORTED)) == NV_OK, done); + break; + + case UVM_TEST_GET_RM_PTES_MULTI_GPU_NOT_SUPPORTED: + TEST_CHECK_GOTO(get_rm_ptes(0, 0, &ext_mapping_info) == NV_ERR_NOT_SUPPORTED, done); + break; + + default: + status = NV_ERR_INVALID_PARAMETER; + } + +done: + free_status = uvm_rm_locked_call(nvUvmInterfaceFreeDupedHandle(rm_device, duped_memory)); + if (status == NV_OK) + status = free_status; + + return status; +} + +NV_STATUS uvm_test_get_rm_ptes(UVM_TEST_GET_RM_PTES_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_va_space_down_read_rm(va_space); + + switch (params->test_mode) { + case UVM_TEST_GET_RM_PTES_SINGLE_GPU: + status = test_get_rm_ptes_single_gpu(va_space, params); + break; + + case UVM_TEST_GET_RM_PTES_MULTI_GPU_SUPPORTED: + case UVM_TEST_GET_RM_PTES_MULTI_GPU_SLI_SUPPORTED: + case UVM_TEST_GET_RM_PTES_MULTI_GPU_NOT_SUPPORTED: + status = test_get_rm_ptes_multi_gpu(va_space, params); + break; + + default: + status = NV_ERR_INVALID_PARAMETER; + } + uvm_va_space_up_read_rm(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_global.c b/kernel-open/nvidia-uvm/uvm_global.c new file mode 100644 index 000000000..6759a54ff --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_global.c @@ -0,0 +1,473 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_api.h" +#include "uvm_ats.h" +#include "uvm_global.h" +#include "uvm_gpu_replayable_faults.h" +#include "uvm_mem.h" +#include "uvm_perf_events.h" +#include "uvm_procfs.h" +#include "uvm_thread_context.h" +#include "uvm_va_range.h" +#include "uvm_kvmalloc.h" +#include "uvm_mmu.h" +#include "uvm_perf_heuristics.h" +#include "uvm_pmm_sysmem.h" +#include "uvm_migrate.h" +#include "uvm_gpu_access_counters.h" +#include "uvm_va_space_mm.h" +#include "nv_uvm_interface.h" + +uvm_global_t g_uvm_global; +static struct UvmOpsUvmEvents g_exported_uvm_ops; +static bool g_ops_registered = false; + +static NV_STATUS uvm_register_callbacks(void) +{ + NV_STATUS status = NV_OK; + + g_exported_uvm_ops.suspend = uvm_suspend_entry; + g_exported_uvm_ops.resume = uvm_resume_entry; + g_exported_uvm_ops.startDevice = NULL; + g_exported_uvm_ops.stopDevice = NULL; + g_exported_uvm_ops.isrTopHalf = uvm_isr_top_half_entry; + + // Register the UVM callbacks with the main GPU driver: + status = uvm_rm_locked_call(nvUvmInterfaceRegisterUvmCallbacks(&g_exported_uvm_ops)); + if (status != NV_OK) + return status; + + g_ops_registered = true; + return NV_OK; +} + +// Calling this function more than once is harmless: +static void uvm_unregister_callbacks(void) +{ + if (g_ops_registered) { + uvm_rm_locked_call_void(nvUvmInterfaceDeRegisterUvmOps()); + g_ops_registered = false; + } +} + + + + + + + + +NV_STATUS uvm_global_init(void) +{ + NV_STATUS status; + UvmPlatformInfo platform_info; + + // Initialization of thread contexts happened already, during registration + // (addition) of the thread context associated with the UVM module entry + // point that is calling this function. + UVM_ASSERT(uvm_thread_context_global_initialized()); + + uvm_mutex_init(&g_uvm_global.global_lock, UVM_LOCK_ORDER_GLOBAL); + uvm_init_rwsem(&g_uvm_global.pm.lock, UVM_LOCK_ORDER_GLOBAL_PM); + uvm_spin_lock_irqsave_init(&g_uvm_global.gpu_table_lock, UVM_LOCK_ORDER_LEAF); + uvm_mutex_init(&g_uvm_global.va_spaces.lock, UVM_LOCK_ORDER_VA_SPACES_LIST); + INIT_LIST_HEAD(&g_uvm_global.va_spaces.list); + + status = uvm_kvmalloc_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_kvmalloc_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = errno_to_nv_status(nv_kthread_q_init(&g_uvm_global.global_q, "UVM global queue")); + if (status != NV_OK) { + UVM_DBG_PRINT("nv_kthread_q_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = errno_to_nv_status(nv_kthread_q_init(&g_uvm_global.deferred_release_q, "UVM deferred release queue")); + if (status != NV_OK) { + UVM_DBG_PRINT("nv_kthread_q_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_procfs_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_procfs_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_rm_locked_call(nvUvmInterfaceSessionCreate(&g_uvm_global.rm_session_handle, &platform_info)); + if (status != NV_OK) { + UVM_ERR_PRINT("nvUvmInterfaceSessionCreate() failed: %s\n", nvstatusToString(status)); + return status; + } + + uvm_ats_init(&platform_info); + g_uvm_global.num_simulated_devices = 0; + + + + + + status = uvm_gpu_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_gpu_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_pmm_sysmem_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_pmm_sysmem_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_mmu_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_mmu_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_mem_global_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_mem_gloal_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_va_policy_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_va_policy_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_va_range_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_va_range_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_range_group_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_range_group_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_migrate_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_migrate_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_perf_events_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_perf_events_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_perf_heuristics_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_perf_heuristics_init() failed: %s\n", nvstatusToString(status)); + goto error; + } + + status = uvm_service_block_context_init(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_service_block_context_init failed: %s\n", nvstatusToString(status)); + goto error; + } + + // This sets up the ISR (interrupt service routine), by hooking into RM's top-half ISR callback. As soon as this + // call completes, GPU interrupts will start arriving, so it's important to be prepared to receive interrupts before + // this point: + status = uvm_register_callbacks(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_register_callbacks failed: %s\n", nvstatusToString(status)); + goto error; + } + + return NV_OK; + +error: + uvm_global_exit(); + return status; +} + +void uvm_global_exit(void) +{ + uvm_assert_mutex_unlocked(&g_uvm_global.global_lock); + + // Guarantee completion of any release callbacks scheduled after the flush + // in uvm_resume(). + nv_kthread_q_flush(&g_uvm_global.deferred_release_q); + + uvm_unregister_callbacks(); + uvm_service_block_context_exit(); + + uvm_perf_heuristics_exit(); + uvm_perf_events_exit(); + uvm_migrate_exit(); + uvm_range_group_exit(); + uvm_va_range_exit(); + uvm_va_policy_exit(); + uvm_mem_global_exit(); + uvm_pmm_sysmem_exit(); + uvm_gpu_exit(); + + if (g_uvm_global.rm_session_handle != 0) + uvm_rm_locked_call_void(nvUvmInterfaceSessionDestroy(g_uvm_global.rm_session_handle)); + + uvm_procfs_exit(); + + nv_kthread_q_stop(&g_uvm_global.deferred_release_q); + nv_kthread_q_stop(&g_uvm_global.global_q); + + uvm_assert_mutex_unlocked(&g_uvm_global.va_spaces.lock); + UVM_ASSERT(list_empty(&g_uvm_global.va_spaces.list)); + + uvm_thread_context_global_exit(); + uvm_kvmalloc_exit(); +} + +// Signal to the top-half ISR whether calls from the RM's top-half ISR are to +// be completed without processing. +static void uvm_gpu_set_isr_suspended(uvm_gpu_t *gpu, bool is_suspended) +{ + uvm_spin_lock_irqsave(&gpu->parent->isr.interrupts_lock); + + gpu->parent->isr.is_suspended = is_suspended; + + uvm_spin_unlock_irqrestore(&gpu->parent->isr.interrupts_lock); +} + +static NV_STATUS uvm_suspend(void) +{ + uvm_va_space_t *va_space = NULL; + uvm_global_gpu_id_t gpu_id; + uvm_gpu_t *gpu; + + // Upon entry into this function, the following is true: + // * GPU interrupts are enabled + // * Any number of fault or access counter notifications could + // be pending + // * No new fault notifications will appear, but new access + // counter notifications could + // * Any of the bottom halves could be running + // * New bottom halves of all types could be scheduled as GPU + // interrupts are handled + // Due to this, the sequence of suspend operations for each GPU is the + // following: + // * Flush the fault buffer to prevent fault interrupts when + // the top-half ISR is suspended + // * Suspend access counter processing + // * Suspend the top-half ISR + // * Flush relevant kthread queues (bottom half, etc.) + + // Some locks acquired by this function, such as pm.lock, are released + // by uvm_resume(). This is contrary to the lock tracking code's + // expectations, so lock tracking is disabled. + uvm_thread_context_lock_disable_tracking(); + + // Take the global power management lock in write mode to lock out + // most user-facing entry points. + uvm_down_write(&g_uvm_global.pm.lock); + + nv_kthread_q_flush(&g_uvm_global.global_q); + + // Though global_lock isn't held here, pm.lock indirectly prevents the + // addition and removal of GPUs, since these operations can currently + // only occur in response to ioctl() calls. + for_each_global_gpu_id_in_mask(gpu_id, &g_uvm_global.retained_gpus) { + gpu = uvm_gpu_get(gpu_id); + + // Since fault buffer state may be lost across sleep cycles, UVM must + // ensure any outstanding replayable faults are dismissed. The RM + // guarantees that all user channels have been preempted before + // uvm_suspend() is called, which implies that no user channels can be + // stalled on faults when this point is reached. + if (gpu->parent->replayable_faults_supported) + uvm_gpu_fault_buffer_flush(gpu); + + // TODO: Bug 2535118: flush the non-replayable fault buffer + + // Stop access counter interrupt processing for the duration of this + // sleep cycle to defend against potential interrupt storms in + // the suspend path: if rate limiting is applied to access counter + // interrupts in the bottom half in the future, the bottom half flush + // below will no longer be able to guarantee that all outstanding + // notifications have been handled. + uvm_gpu_access_counters_set_ignore(gpu, true); + + uvm_gpu_set_isr_suspended(gpu, true); + + nv_kthread_q_flush(&gpu->parent->isr.bottom_half_q); + + if (gpu->parent->isr.non_replayable_faults.handling) + nv_kthread_q_flush(&gpu->parent->isr.kill_channel_q); + } + + // Acquire each VA space's lock in write mode to lock out VMA open and + // release callbacks. These entry points do not have feasible early exit + // options, and so aren't suitable for synchronization with pm.lock. + uvm_mutex_lock(&g_uvm_global.va_spaces.lock); + + list_for_each_entry(va_space, &g_uvm_global.va_spaces.list, list_node) + uvm_va_space_down_write(va_space); + + uvm_mutex_unlock(&g_uvm_global.va_spaces.lock); + + uvm_thread_context_lock_enable_tracking(); + + g_uvm_global.pm.is_suspended = true; + + return NV_OK; +} + +NV_STATUS uvm_suspend_entry(void) +{ + UVM_ENTRY_RET(uvm_suspend()); +} + +static NV_STATUS uvm_resume(void) +{ + uvm_va_space_t *va_space = NULL; + uvm_global_gpu_id_t gpu_id; + uvm_gpu_t *gpu; + + g_uvm_global.pm.is_suspended = false; + + // Some locks released by this function, such as pm.lock, were acquired + // by uvm_suspend(). This is contrary to the lock tracking code's + // expectations, so lock tracking is disabled. + uvm_thread_context_lock_disable_tracking(); + + // Release each VA space's lock. + uvm_mutex_lock(&g_uvm_global.va_spaces.lock); + + list_for_each_entry(va_space, &g_uvm_global.va_spaces.list, list_node) + uvm_va_space_up_write(va_space); + + uvm_mutex_unlock(&g_uvm_global.va_spaces.lock); + + // pm.lock is held in lieu of global_lock to prevent GPU addition/removal + for_each_global_gpu_id_in_mask(gpu_id, &g_uvm_global.retained_gpus) { + gpu = uvm_gpu_get(gpu_id); + + // Bring the fault buffer software state back in sync with the + // hardware state. + uvm_gpu_fault_buffer_resume(gpu->parent); + + uvm_gpu_set_isr_suspended(gpu, false); + + // Reenable access counter interrupt processing unless notifications + // have been set to be suppressed. + uvm_gpu_access_counters_set_ignore(gpu, false); + } + + uvm_up_write(&g_uvm_global.pm.lock); + + uvm_thread_context_lock_enable_tracking(); + + // Force completion of any release callbacks successfully queued for + // deferred completion while suspended. The deferred release + // queue is not guaranteed to remain empty following this flush since + // some threads that failed to acquire pm.lock in uvm_release() may + // not have scheduled their handlers yet. + nv_kthread_q_flush(&g_uvm_global.deferred_release_q); + + return NV_OK; +} + +NV_STATUS uvm_resume_entry(void) +{ + UVM_ENTRY_RET(uvm_resume()); +} + +bool uvm_global_is_suspended(void) +{ + return g_uvm_global.pm.is_suspended; +} + +void uvm_global_set_fatal_error_impl(NV_STATUS error) +{ + NV_STATUS previous_error; + + UVM_ASSERT(error != NV_OK); + + previous_error = nv_atomic_cmpxchg(&g_uvm_global.fatal_error, NV_OK, error); + + if (previous_error == NV_OK) { + UVM_ERR_PRINT("Encountered a global fatal error: %s\n", nvstatusToString(error)); + } + else { + UVM_ERR_PRINT("Encountered a global fatal error: %s after a global error has been already set: %s\n", + nvstatusToString(error), nvstatusToString(previous_error)); + } +} + +NV_STATUS uvm_global_reset_fatal_error(void) +{ + if (!uvm_enable_builtin_tests) { + UVM_ASSERT_MSG(0, "Resetting global fatal error without tests being enabled\n"); + return NV_ERR_INVALID_STATE; + } + + return nv_atomic_xchg(&g_uvm_global.fatal_error, NV_OK); +} + +void uvm_global_mask_retain(const uvm_global_processor_mask_t *mask) +{ + uvm_gpu_t *gpu; + for_each_global_gpu_in_mask(gpu, mask) + uvm_gpu_retain(gpu); +} + +void uvm_global_mask_release(const uvm_global_processor_mask_t *mask) +{ + uvm_global_gpu_id_t gpu_id; + + if (uvm_global_processor_mask_empty(mask)) + return; + + uvm_mutex_lock(&g_uvm_global.global_lock); + + // Do not use for_each_global_gpu_in_mask as it reads the GPU state and it + // might get destroyed + for_each_global_gpu_id_in_mask(gpu_id, mask) + uvm_gpu_release_locked(uvm_gpu_get(gpu_id)); + + uvm_mutex_unlock(&g_uvm_global.global_lock); +} + +NV_STATUS uvm_global_mask_check_ecc_error(uvm_global_processor_mask_t *gpus) +{ + uvm_gpu_t *gpu; + + for_each_global_gpu_in_mask(gpu, gpus) { + NV_STATUS status = uvm_gpu_check_ecc_error(gpu); + if (status != NV_OK) + return status; + } + + return NV_OK; +} diff --git a/kernel-open/nvidia-uvm/uvm_global.h b/kernel-open/nvidia-uvm/uvm_global.h new file mode 100644 index 000000000..ae31d8ea3 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_global.h @@ -0,0 +1,416 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_GLOBAL_H__ +#define __UVM_GLOBAL_H__ + +#include "nv_uvm_types.h" +#include "uvm_extern_decl.h" +#include "uvm_linux.h" +#include "uvm_common.h" +#include "uvm_processors.h" +#include "uvm_gpu.h" +#include "uvm_lock.h" +#include "uvm_ats_ibm.h" + +// Global state of the uvm driver +struct uvm_global_struct +{ + // Mask of retained GPUs. + // Note that GPUs are added to this mask as the last step of add_gpu() and + // removed from it as the first step of remove_gpu() implying that a GPU + // that's being initialized or deinitialized will not be in it. + uvm_global_processor_mask_t retained_gpus; + + // Array of the parent GPUs registered with UVM. Note that GPUs will have + // ids offset by 1 to accomodate the UVM_GLOBAL_ID_CPU so e.g. + // parent_gpus[0] will have GPU id = 1. A GPU entry is unused iff it does + // not exist (is a NULL pointer) in this table. + uvm_parent_gpu_t *parent_gpus[UVM_MAX_GPUS]; + + // A global RM session (RM client) + // Created on module load and destroyed on module unload + uvmGpuSessionHandle rm_session_handle; + + // peer-to-peer table + // peer info is added and removed from this table when usermode + // driver calls UvmEnablePeerAccess and UvmDisablePeerAccess + // respectively. + uvm_gpu_peer_t peers[UVM_MAX_UNIQUE_GPU_PAIRS]; + + // peer-to-peer copy mode + // Pascal+ GPUs support virtual addresses in p2p copies. + // Ampere+ GPUs add support for physical addresses in p2p copies. + uvm_gpu_peer_copy_mode_t peer_copy_mode; + + // Stores an NV_STATUS, once it becomes != NV_OK, the driver should refuse to + // do most anything other than try and clean up as much as possible. + // An example of a fatal error is an unrecoverable ECC error on one of the + // GPUs. + atomic_t fatal_error; + + // A flag to disable the assert on fatal error + // To be used by tests and only consulted if tests are enabled. + bool disable_fatal_error_assert; + + // Lock protecting the global state + uvm_mutex_t global_lock; + + struct + { + // Lock synchronizing user threads with power management activity + uvm_rw_semaphore_t lock; + + // Power management state flag; tested by UVM_GPU_WRITE_ONCE() + // and UVM_GPU_READ_ONCE() to detect accesses to GPUs when + // UVM is suspended. + bool is_suspended; + } pm; + + // This lock synchronizes addition and removal of GPUs from UVM's global + // table. It must be held whenever g_uvm_global.parent_gpus[] is written. In + // order to read from this table, you must hold either the gpu_table_lock, + // or the global_lock. + // + // This is a leaf lock. + uvm_spinlock_irqsave_t gpu_table_lock; + + // Number of simulated/emulated devices that have registered with UVM + unsigned num_simulated_devices; + + // A single queue for deferred work that is non-GPU-specific. + nv_kthread_q_t global_q; + + // A single queue for deferred f_ops->release() handling. Items scheduled to + // run on it may block for the duration of system sleep cycles, stalling + // the queue and preventing any other items from running. + nv_kthread_q_t deferred_release_q; + + struct + { + // Indicates whether the system HW supports ATS. This field is set once + // during global initialization (uvm_global_init), and can be read + // afterwards without acquiring any locks. + bool supported; + + // On top of HW platform support, ATS support can be overridden using + // the module parameter uvm_ats_mode. This field is set once during + // global initialization (uvm_global_init), and can be read afterwards + // without acquiring any locks. + bool enabled; + } ats; + +#if UVM_IBM_NPU_SUPPORTED() + // On IBM systems this array tracks the active NPUs (the NPUs which are + // attached to retained GPUs). + uvm_ibm_npu_t npus[NV_MAX_NPUS]; +#endif + + // List of all active VA spaces + struct + { + uvm_mutex_t lock; + struct list_head list; + } va_spaces; + + // Notify a registered process about the driver state after it's unloaded. + // The intent is to systematically report any error during the driver + // teardown. unload_state is used for testing only. + struct + { + // ptr points to a 8-byte buffer within page. + NvU64 *ptr; + struct page *page; + } unload_state; + + + + + + + +}; + +// Initialize global uvm state +NV_STATUS uvm_global_init(void); + +// Deinitialize global state (called from module exit) +void uvm_global_exit(void); + +// Prepare for entry into a system sleep state +NV_STATUS uvm_suspend_entry(void); + +// Recover after exit from a system sleep state +NV_STATUS uvm_resume_entry(void); + +// Add parent GPU to the global table. +// +// LOCKING: requires that you hold the global lock and gpu_table_lock +static void uvm_global_add_parent_gpu(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 gpu_index = uvm_id_gpu_index(parent_gpu->id); + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + uvm_assert_spinlock_locked(&g_uvm_global.gpu_table_lock); + + UVM_ASSERT(!g_uvm_global.parent_gpus[gpu_index]); + g_uvm_global.parent_gpus[gpu_index] = parent_gpu; +} + +// Remove parent GPU from the global table. +// +// LOCKING: requires that you hold the global lock and gpu_table_lock +static void uvm_global_remove_parent_gpu(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 gpu_index = uvm_id_gpu_index(parent_gpu->id); + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + uvm_assert_spinlock_locked(&g_uvm_global.gpu_table_lock); + + UVM_ASSERT(g_uvm_global.parent_gpus[gpu_index]); + UVM_ASSERT(g_uvm_global.parent_gpus[gpu_index] == parent_gpu); + + g_uvm_global.parent_gpus[gpu_index] = NULL; +} + +// Get a gpu by its global id. +// Returns a pointer to the GPU object, or NULL if not found. +// +// LOCKING: requires that you hold the gpu_table_lock, the global_lock, or have +// retained the gpu. +static uvm_gpu_t *uvm_gpu_get(uvm_global_gpu_id_t global_gpu_id) +{ + uvm_parent_gpu_t *parent_gpu; + + parent_gpu = g_uvm_global.parent_gpus[uvm_id_gpu_index_from_global_gpu_id(global_gpu_id)]; + if (!parent_gpu) + return NULL; + + return parent_gpu->gpus[uvm_global_id_sub_processor_index(global_gpu_id)]; +} + +// Get a gpu by its processor id. +// Returns a pointer to the GPU object, or NULL if not found. +// +// LOCKING: requires that you hold the gpu_table_lock, the global_lock, or have +// retained the gpu. +static uvm_gpu_t *uvm_gpu_get_by_processor_id(uvm_processor_id_t id) +{ + uvm_global_gpu_id_t global_id = uvm_global_gpu_id_from_gpu_id(id); + uvm_gpu_t *gpu = uvm_gpu_get(global_id); + + if (gpu) + UVM_ASSERT(!gpu->parent->smc.enabled); + + return gpu; +} + +static uvmGpuSessionHandle uvm_gpu_session_handle(uvm_gpu_t *gpu) +{ + if (gpu->parent->smc.enabled) + return gpu->smc.rm_session_handle; + return g_uvm_global.rm_session_handle; +} + +// Use these READ_ONCE()/WRITE_ONCE() wrappers when accessing GPU resources +// in BAR0/BAR1 to detect cases in which GPUs are accessed when UVM is +// suspended. +#define UVM_GPU_WRITE_ONCE(x, val) do { \ + UVM_ASSERT(!uvm_global_is_suspended()); \ + UVM_WRITE_ONCE(x, val); \ + } while (0) + +#define UVM_GPU_READ_ONCE(x) ({ \ + UVM_ASSERT(!uvm_global_is_suspended()); \ + UVM_READ_ONCE(x); \ + }) + +static bool global_is_fatal_error_assert_disabled(void) +{ + // Only allow the assert to be disabled if tests are enabled + if (!uvm_enable_builtin_tests) + return false; + + return g_uvm_global.disable_fatal_error_assert; +} + +// Set a global fatal error +// Once that happens the the driver should refuse to do anything other than try +// and clean up as much as possible. +// An example of a fatal error is an unrecoverable ECC error on one of the +// GPUs. +// Use a macro so that the assert below provides precise file and line info and +// a backtrace. +#define uvm_global_set_fatal_error(error) \ + do { \ + if (!global_is_fatal_error_assert_disabled()) \ + UVM_ASSERT_MSG(0, "Fatal error: %s\n", nvstatusToString(error)); \ + uvm_global_set_fatal_error_impl(error); \ + } while (0) +void uvm_global_set_fatal_error_impl(NV_STATUS error); + +// Get the global status +static NV_STATUS uvm_global_get_status(void) +{ + return atomic_read(&g_uvm_global.fatal_error); +} + +// Reset global fatal error +// This is to be used by tests triggering the global error on purpose only. +// Returns the value of the global error field that existed just before this +// reset call was made. +NV_STATUS uvm_global_reset_fatal_error(void); + +static uvm_gpu_t *uvm_global_processor_mask_find_first_gpu(const uvm_global_processor_mask_t *global_gpus) +{ + uvm_gpu_t *gpu; + uvm_global_gpu_id_t gpu_id = uvm_global_processor_mask_find_first_gpu_id(global_gpus); + + if (UVM_GLOBAL_ID_IS_INVALID(gpu_id)) + return NULL; + + gpu = uvm_gpu_get(gpu_id); + + // If there is valid GPU id in the mask, assert that the corresponding + // uvm_gpu_t is present. Otherwise it would stop a + // for_each_global_gpu_in_mask() loop pre-maturely. Today, this could only + // happen in remove_gpu() because the GPU being removed is deleted from the + // global table very early. + UVM_ASSERT_MSG(gpu, "gpu_id %u\n", uvm_global_id_value(gpu_id)); + + return gpu; +} + +static uvm_gpu_t *__uvm_global_processor_mask_find_next_gpu(const uvm_global_processor_mask_t *global_gpus, uvm_gpu_t *gpu) +{ + uvm_global_gpu_id_t gpu_id; + + UVM_ASSERT(gpu); + + gpu_id = uvm_global_processor_mask_find_next_id(global_gpus, uvm_global_gpu_id_next(gpu->global_id)); + if (UVM_GLOBAL_ID_IS_INVALID(gpu_id)) + return NULL; + + gpu = uvm_gpu_get(gpu_id); + + // See comment in uvm_global_processor_mask_find_first_gpu(). + UVM_ASSERT_MSG(gpu, "gpu_id %u\n", uvm_global_id_value(gpu_id)); + + return gpu; +} + +// Helper to iterate over all GPUs in the input mask +#define for_each_global_gpu_in_mask(gpu, global_mask) \ + for (gpu = uvm_global_processor_mask_find_first_gpu(global_mask); \ + gpu != NULL; \ + gpu = __uvm_global_processor_mask_find_next_gpu(global_mask, gpu)) + +// Helper to iterate over all GPUs retained by the UVM driver (across all va spaces) +#define for_each_global_gpu(gpu) \ + for (({uvm_assert_mutex_locked(&g_uvm_global.global_lock); \ + gpu = uvm_global_processor_mask_find_first_gpu(&g_uvm_global.retained_gpus);}); \ + gpu != NULL; \ + gpu = __uvm_global_processor_mask_find_next_gpu(&g_uvm_global.retained_gpus, gpu)) + +// LOCKING: Must hold either the global_lock or the gpu_table_lock +static uvm_parent_gpu_t *uvm_global_find_next_parent_gpu(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 i; + + if (parent_gpu) { + NvU32 gpu_index = uvm_id_gpu_index(parent_gpu->id); + i = gpu_index + 1; + } + else { + i = 0; + } + + parent_gpu = NULL; + + while (i < UVM_MAX_GPUS) { + if (g_uvm_global.parent_gpus[i]) { + parent_gpu = g_uvm_global.parent_gpus[i]; + break; + } + + i++; + } + + return parent_gpu; +} + +// LOCKING: Must hold the global_lock +static uvm_gpu_t *uvm_gpu_find_next_valid_gpu_in_parent(uvm_parent_gpu_t *parent_gpu, uvm_gpu_t *cur_gpu) +{ + uvm_gpu_t *gpu = NULL; + uvm_global_gpu_id_t global_gpu_id; + NvU32 sub_processor_index; + NvU32 cur_sub_processor_index; + + UVM_ASSERT(parent_gpu); + + global_gpu_id = uvm_global_gpu_id_from_gpu_id(parent_gpu->id); + cur_sub_processor_index = cur_gpu ? uvm_global_id_sub_processor_index(cur_gpu->global_id) : -1; + + sub_processor_index = find_next_bit(parent_gpu->valid_gpus, UVM_ID_MAX_SUB_PROCESSORS, cur_sub_processor_index + 1); + if (sub_processor_index < UVM_ID_MAX_SUB_PROCESSORS) { + gpu = uvm_gpu_get(uvm_global_id_from_value(uvm_global_id_value(global_gpu_id) + sub_processor_index)); + UVM_ASSERT(gpu != NULL); + } + + return gpu; +} + +// LOCKING: Must hold either the global_lock or the gpu_table_lock +#define for_each_parent_gpu(parent_gpu) \ + for ((parent_gpu) = uvm_global_find_next_parent_gpu(NULL); \ + (parent_gpu) != NULL; \ + (parent_gpu) = uvm_global_find_next_parent_gpu((parent_gpu))) + +// LOCKING: Must hold the global_lock +#define for_each_gpu_in_parent(parent_gpu, gpu) \ + for (({uvm_assert_mutex_locked(&g_uvm_global.global_lock); \ + (gpu) = uvm_gpu_find_next_valid_gpu_in_parent((parent_gpu), NULL);}); \ + (gpu) != NULL; \ + (gpu) = uvm_gpu_find_next_valid_gpu_in_parent((parent_gpu), (gpu))) + +// Helper which calls uvm_gpu_retain on each GPU in mask +void uvm_global_mask_retain(const uvm_global_processor_mask_t *mask); + +// Helper which calls uvm_gpu_release_locked on each GPU in mask. +// +// LOCKING: this function takes and releases the global lock if the input mask +// is not empty +void uvm_global_mask_release(const uvm_global_processor_mask_t *mask); + +// Check for ECC errors for all GPUs in a mask +// Notably this check cannot be performed where it's not safe to call into RM. +NV_STATUS uvm_global_mask_check_ecc_error(uvm_global_processor_mask_t *gpus); + +// Pre-allocate fault service contexts. +NV_STATUS uvm_service_block_context_init(void); + +// Release fault service contexts if any exist. +void uvm_service_block_context_exit(void); + +#endif // __UVM_GLOBAL_H__ diff --git a/kernel-open/nvidia-uvm/uvm_gpu.c b/kernel-open/nvidia-uvm/uvm_gpu.c new file mode 100644 index 000000000..6ed161022 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu.c @@ -0,0 +1,3316 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nv_uvm_interface.h" +#include "uvm_api.h" +#include "uvm_channel.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_gpu_semaphore.h" +#include "uvm_hal.h" +#include "uvm_procfs.h" +#include "uvm_pmm_gpu.h" +#include "uvm_pmm_sysmem.h" +#include "uvm_va_space.h" +#include "uvm_user_channel.h" +#include "uvm_perf_events.h" +#include "uvm_perf_heuristics.h" +#include "uvm_common.h" +#include "ctrl2080mc.h" +#include "nv-kthread-q.h" +#include "uvm_gpu_access_counters.h" +#include "uvm_ats.h" +#include "uvm_test.h" + + + + +#include "uvm_linux.h" + +#define UVM_PROC_GPUS_PEER_DIR_NAME "peers" + +// The uvm_peer_copy module parameter enables to choose from "phys" or "virt". +// It determines the addressing mode for P2P copies. +#define UVM_PARAM_PEER_COPY_VIRTUAL "virt" +#define UVM_PARAM_PEER_COPY_PHYSICAL "phys" +static char *uvm_peer_copy = UVM_PARAM_PEER_COPY_PHYSICAL; +module_param(uvm_peer_copy, charp, S_IRUGO); +MODULE_PARM_DESC(uvm_peer_copy, "Choose the addressing mode for peer copying, options: " + UVM_PARAM_PEER_COPY_PHYSICAL " [default] or " UVM_PARAM_PEER_COPY_VIRTUAL ". " + "Valid for Ampere+ GPUs."); + +static void remove_gpu(uvm_gpu_t *gpu); +static void disable_peer_access(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1); +static NV_STATUS discover_nvlink_peers(uvm_gpu_t *gpu); +static void destroy_nvlink_peers(uvm_gpu_t *gpu); + +static uvm_user_channel_t *get_user_channel(uvm_rb_tree_node_t *node) +{ + return container_of(node, uvm_user_channel_t, instance_ptr.node); +} + +static void fill_gpu_info(uvm_parent_gpu_t *parent_gpu, const UvmGpuInfo *gpu_info) +{ + char uuid_buffer[UVM_GPU_UUID_TEXT_BUFFER_LENGTH]; + + parent_gpu->rm_info = *gpu_info; + + format_uuid_to_buffer(uuid_buffer, sizeof(uuid_buffer), &parent_gpu->uuid); + snprintf(parent_gpu->name, + sizeof(parent_gpu->name), + "ID %u: %s: %s", + uvm_id_value(parent_gpu->id), + parent_gpu->rm_info.name, + uuid_buffer); +} + +static uvm_gpu_link_type_t get_gpu_link_type(UVM_LINK_TYPE link_type) +{ + switch (link_type) { + case UVM_LINK_TYPE_PCIE: + return UVM_GPU_LINK_PCIE; + case UVM_LINK_TYPE_NVLINK_1: + return UVM_GPU_LINK_NVLINK_1; + case UVM_LINK_TYPE_NVLINK_2: + return UVM_GPU_LINK_NVLINK_2; + case UVM_LINK_TYPE_NVLINK_3: + return UVM_GPU_LINK_NVLINK_3; + + + + + + + default: + return UVM_GPU_LINK_INVALID; + } +} + +static NV_STATUS get_gpu_caps(uvm_parent_gpu_t *parent_gpu) +{ + NV_STATUS status; + UvmGpuCaps gpu_caps; + + memset(&gpu_caps, 0, sizeof(gpu_caps)); + + status = uvm_rm_locked_call(nvUvmInterfaceQueryCaps(parent_gpu->rm_device, &gpu_caps)); + if (status != NV_OK) + return status; + + parent_gpu->sysmem_link = get_gpu_link_type(gpu_caps.sysmemLink); + UVM_ASSERT(parent_gpu->sysmem_link != UVM_GPU_LINK_INVALID); + + parent_gpu->sysmem_link_rate_mbyte_per_s = gpu_caps.sysmemLinkRateMBps; + parent_gpu->nvswitch_info.is_nvswitch_connected = gpu_caps.connectedToSwitch; + + // nvswitch is routed via physical pages, where the upper 13-bits of the + // 47-bit address space holds the routing information for each peer. + // Currently, this is limited to a 16GB framebuffer window size. + if (parent_gpu->nvswitch_info.is_nvswitch_connected) + parent_gpu->nvswitch_info.fabric_memory_window_start = gpu_caps.nvswitchMemoryWindowStart; + + if (gpu_caps.numaEnabled) { + parent_gpu->numa_info.enabled = true; + parent_gpu->numa_info.node_id = gpu_caps.numaNodeId; + parent_gpu->numa_info.system_memory_window_start = gpu_caps.systemMemoryWindowStart; + parent_gpu->numa_info.system_memory_window_end = gpu_caps.systemMemoryWindowStart + + gpu_caps.systemMemoryWindowSize - + 1; + } + else { + UVM_ASSERT(!g_uvm_global.ats.enabled); + } + + return NV_OK; +} + +static NV_STATUS alloc_and_init_address_space(uvm_gpu_t *gpu) +{ + NV_STATUS status; + UvmGpuAddressSpaceInfo gpu_address_space_info = {0}; + + status = uvm_rm_locked_call(nvUvmInterfaceAddressSpaceCreate(uvm_gpu_device_handle(gpu), + gpu->parent->rm_va_base, + gpu->parent->rm_va_size, + &gpu->rm_address_space, + &gpu_address_space_info)); + if (status != NV_OK) + return status; + + gpu->big_page.internal_size = gpu_address_space_info.bigPageSize; + + gpu->time.time0_register = gpu_address_space_info.time0Offset; + gpu->time.time1_register = gpu_address_space_info.time1Offset; + + gpu->max_subcontexts = gpu_address_space_info.maxSubctxCount; + + return NV_OK; +} + +static NV_STATUS get_gpu_fb_info(uvm_gpu_t *gpu) +{ + NV_STATUS status; + UvmGpuFbInfo fb_info = {0}; + + status = uvm_rm_locked_call(nvUvmInterfaceGetFbInfo(uvm_gpu_device_handle(gpu), &fb_info)); + if (status != NV_OK) + return status; + + if (!fb_info.bZeroFb) { + gpu->mem_info.size = ((NvU64)fb_info.heapSize + fb_info.reservedHeapSize) * 1024; + gpu->mem_info.max_allocatable_address = fb_info.maxAllocatableAddress; + } + + return NV_OK; +} + +static NV_STATUS get_gpu_ecc_info(uvm_gpu_t *gpu) +{ + NV_STATUS status; + UvmGpuEccInfo ecc_info = {0}; + + status = uvm_rm_locked_call(nvUvmInterfaceGetEccInfo(uvm_gpu_device_handle(gpu), &ecc_info)); + if (status != NV_OK) + return status; + + gpu->ecc.enabled = ecc_info.bEccEnabled; + if (gpu->ecc.enabled) { + gpu->ecc.hw_interrupt_tree_location = (volatile NvU32*)((char*)ecc_info.eccReadLocation + ecc_info.eccOffset); + UVM_ASSERT(gpu->ecc.hw_interrupt_tree_location != NULL); + + gpu->ecc.mask = ecc_info.eccMask; + UVM_ASSERT(gpu->ecc.mask != 0); + + gpu->ecc.error_notifier = ecc_info.eccErrorNotifier; + UVM_ASSERT(gpu->ecc.error_notifier != NULL); + } + + return NV_OK; +} + +static bool gpu_supports_uvm(uvm_parent_gpu_t *parent_gpu) +{ + // TODO: Bug 1757136: Add Linux SLI support. Until then, explicitly disable + // UVM on SLI. + return parent_gpu->rm_info.subdeviceCount == 1; +} + +static bool parent_gpu_uses_canonical_form_address(uvm_parent_gpu_t *parent_gpu) +{ + NvU64 gpu_addr_shift; + NvU64 cpu_addr_shift; + + // PPC64LE doesn't use canonical form addresses. + if (NVCPU_IS_PPC64LE) + return false; + + // We use big_page_size as UVM_PAGE_SIZE_64K because num_va_bits() is + // big_page_size invariant in the MMU HAL. + UVM_ASSERT(!parent_gpu->arch_hal->mmu_mode_hal(UVM_PAGE_SIZE_128K) || + (parent_gpu->arch_hal->mmu_mode_hal(UVM_PAGE_SIZE_64K)->num_va_bits() == + parent_gpu->arch_hal->mmu_mode_hal(UVM_PAGE_SIZE_128K)->num_va_bits())); + + gpu_addr_shift = parent_gpu->arch_hal->mmu_mode_hal(UVM_PAGE_SIZE_64K)->num_va_bits(); + cpu_addr_shift = fls64(TASK_SIZE - 1) + 1; + + // Refer to the comments and diagram in uvm_gpu.c:uvm_gpu_can_address(). + return gpu_addr_shift >= cpu_addr_shift; + +} + +bool uvm_gpu_can_address(uvm_gpu_t *gpu, NvU64 addr, NvU64 size) +{ + // Lower and upper address spaces are typically found in platforms that use + // the canonical address form. + NvU64 max_va_lower; + NvU64 addr_end = addr + size - 1; + + // Watch out for calling this too early in init + UVM_ASSERT(gpu->address_space_tree.hal); + UVM_ASSERT(gpu->address_space_tree.hal->num_va_bits() < 64); + UVM_ASSERT(addr <= addr_end); + UVM_ASSERT(size > 0); + + // Pascal+ GPUs are capable of accessing kernel pointers in various modes + // by applying the same upper-bit checks that x86, ARM, and Power + // processors do. x86 and ARM use canonical form addresses. For ARM, even + // with Top-Byte Ignore enabled, the following logic validates addresses + // from the kernel VA range. PowerPC does not use canonical form address. + // The following diagram illustrates the valid (V) VA regions that can be + // mapped (or addressed) by the GPU/CPU when the CPU uses canonical form. + // (C) regions are only accessible by the CPU. Similarly, (G) regions + // are only accessible by the GPU. (X) regions are not addressible. + // + // GPU MAX VA < CPU MAX VA GPU MAX VA >= CPU MAX VA + // 0xF..F +----------------+ 0xF..F +----------------+ + // |CCCCCCCCCCCCCCCC| |VVVVVVVVVVVVVVVV| + // |CCCCCCCCCCCCCCCC| |VVVVVVVVVVVVVVVV| + // |CCCCCCCCCCCCCCCC| |VVVVVVVVVVVVVVVV| + // |CCCCCCCCCCCCCCCC| CPU MIN UPPER VA|----------------| + // |CCCCCCCCCCCCCCCC| |GGGGGGGGGGGGGGGG| + // |CCCCCCCCCCCCCCCC| |GGGGGGGGGGGGGGGG| + // CPU MIN UPPER VA|----------------| GPU MIN UPPER VA|----------------| + // |XXXXXXXXXXXXXXXX| |XXXXXXXXXXXXXXXX| + // |XXXXXXXXXXXXXXXX| |XXXXXXXXXXXXXXXX| + // CPU MAX LOWER VA|----------------| GPU MAX LOWER VA|----------------| + // |CCCCCCCCCCCCCCCC| |GGGGGGGGGGGGGGGG| + // |CCCCCCCCCCCCCCCC| |GGGGGGGGGGGGGGGG| + // GPU MAX VA|----------------| CPU MAX LOWER VA|----------------| + // |VVVVVVVVVVVVVVVV| |VVVVVVVVVVVVVVVV| + // |VVVVVVVVVVVVVVVV| |VVVVVVVVVVVVVVVV| + // |VVVVVVVVVVVVVVVV| |VVVVVVVVVVVVVVVV| + // 0 +----------------+ 0 +----------------+ + + if (parent_gpu_uses_canonical_form_address(gpu->parent)) { + NvU64 min_va_upper = (NvU64)((NvS64)(1ULL << 63) >> (64 - gpu->address_space_tree.hal->num_va_bits())); + max_va_lower = 1ULL << (gpu->address_space_tree.hal->num_va_bits() - 1); + return (addr_end < max_va_lower) || (addr >= min_va_upper); + } + else { + max_va_lower = 1ULL << gpu->address_space_tree.hal->num_va_bits(); + return addr_end < max_va_lower; + } +} + +NvU64 uvm_parent_gpu_canonical_address(uvm_parent_gpu_t *parent_gpu, NvU64 addr) +{ + NvU32 gpu_va_bits; + NvU32 shift; + + if (parent_gpu_uses_canonical_form_address(parent_gpu)) { + gpu_va_bits = parent_gpu->arch_hal->mmu_mode_hal(UVM_PAGE_SIZE_64K)->num_va_bits(); + shift = 64 - gpu_va_bits; + addr = (NvU64)((NvS64)(addr << shift) >> shift); + } + + return addr; +} + +static void gpu_info_print_ce_caps(uvm_gpu_t *gpu, struct seq_file *s) +{ + NvU32 i; + UvmGpuCopyEnginesCaps ces_caps; + NV_STATUS status; + + memset(&ces_caps, 0, sizeof(ces_caps)); + status = uvm_rm_locked_call(nvUvmInterfaceQueryCopyEnginesCaps(uvm_gpu_device_handle(gpu), &ces_caps)); + + if (status != NV_OK) { + UVM_SEQ_OR_DBG_PRINT(s, "supported_ces: unavailable (query failed)\n"); + return; + } + + UVM_SEQ_OR_DBG_PRINT(s, "supported_ces:\n"); + for (i = 0; i < UVM_COPY_ENGINE_COUNT_MAX; ++i) { + UvmGpuCopyEngineCaps *ce_caps = ces_caps.copyEngineCaps + i; + + if (!ce_caps->supported) + continue; + + UVM_SEQ_OR_DBG_PRINT(s, " ce %u pce mask 0x%08x grce %u shared %u sysmem read %u sysmem write %u sysmem %u nvlink p2p %u " + "p2p %u\n", + i, + ce_caps->cePceMask, + ce_caps->grce, + ce_caps->shared, + ce_caps->sysmemRead, + ce_caps->sysmemWrite, + ce_caps->sysmem, + ce_caps->nvlinkP2p, + ce_caps->p2p); + } +} + +static const char *uvm_gpu_virt_type_string(UVM_VIRT_MODE virtMode) +{ + BUILD_BUG_ON(UVM_VIRT_MODE_COUNT != 4); + + switch (virtMode) { + UVM_ENUM_STRING_CASE(UVM_VIRT_MODE_NONE); + UVM_ENUM_STRING_CASE(UVM_VIRT_MODE_LEGACY); + UVM_ENUM_STRING_CASE(UVM_VIRT_MODE_SRIOV_HEAVY); + UVM_ENUM_STRING_CASE(UVM_VIRT_MODE_SRIOV_STANDARD); + UVM_ENUM_STRING_DEFAULT(); + } +} + +static const char *uvm_gpu_link_type_string(uvm_gpu_link_type_t link_type) +{ + + + + BUILD_BUG_ON(UVM_GPU_LINK_MAX != 5); + + + switch (link_type) { + UVM_ENUM_STRING_CASE(UVM_GPU_LINK_INVALID); + UVM_ENUM_STRING_CASE(UVM_GPU_LINK_PCIE); + UVM_ENUM_STRING_CASE(UVM_GPU_LINK_NVLINK_1); + UVM_ENUM_STRING_CASE(UVM_GPU_LINK_NVLINK_2); + UVM_ENUM_STRING_CASE(UVM_GPU_LINK_NVLINK_3); + + + + + UVM_ENUM_STRING_DEFAULT(); + } +} + +static void gpu_info_print_common(uvm_gpu_t *gpu, struct seq_file *s) +{ + const UvmGpuInfo *gpu_info = &gpu->parent->rm_info; + uvm_numa_info_t *numa_info = &gpu->parent->numa_info; + NvU64 num_pages_in; + NvU64 num_pages_out; + NvU64 mapped_cpu_pages_size; + NvU32 get, put; + unsigned int cpu; + + UVM_SEQ_OR_DBG_PRINT(s, "GPU %s\n", uvm_gpu_name(gpu)); + UVM_SEQ_OR_DBG_PRINT(s, "retained_count %llu\n", uvm_gpu_retained_count(gpu)); + UVM_SEQ_OR_DBG_PRINT(s, "ecc %s\n", gpu->ecc.enabled ? "enabled" : "disabled"); + if (gpu->parent->closest_cpu_numa_node == -1) + UVM_SEQ_OR_DBG_PRINT(s, "closest_cpu_numa_node n/a\n"); + else + UVM_SEQ_OR_DBG_PRINT(s, "closest_cpu_numa_node %d\n", gpu->parent->closest_cpu_numa_node); + + if (!uvm_procfs_is_debug_enabled()) + return; + + UVM_SEQ_OR_DBG_PRINT(s, "CPU link type %s\n", + uvm_gpu_link_type_string(gpu->parent->sysmem_link)); + UVM_SEQ_OR_DBG_PRINT(s, "CPU link bandwidth %uMBps\n", + gpu->parent->sysmem_link_rate_mbyte_per_s); + + UVM_SEQ_OR_DBG_PRINT(s, "architecture 0x%X\n", gpu_info->gpuArch); + UVM_SEQ_OR_DBG_PRINT(s, "implementation 0x%X\n", gpu_info->gpuImplementation); + UVM_SEQ_OR_DBG_PRINT(s, "gpcs %u\n", gpu_info->gpcCount); + UVM_SEQ_OR_DBG_PRINT(s, "max_gpcs %u\n", gpu_info->maxGpcCount); + UVM_SEQ_OR_DBG_PRINT(s, "tpcs %u\n", gpu_info->tpcCount); + UVM_SEQ_OR_DBG_PRINT(s, "max_tpcs_per_gpc %u\n", gpu_info->maxTpcPerGpcCount); + UVM_SEQ_OR_DBG_PRINT(s, "host_class 0x%X\n", gpu_info->hostClass); + UVM_SEQ_OR_DBG_PRINT(s, "ce_class 0x%X\n", gpu_info->ceClass); + UVM_SEQ_OR_DBG_PRINT(s, "virtualization_mode %s\n", + uvm_gpu_virt_type_string(gpu_info->virtMode)); + UVM_SEQ_OR_DBG_PRINT(s, "big_page_size %u\n", gpu->big_page.internal_size); + UVM_SEQ_OR_DBG_PRINT(s, "rm_va_base 0x%llx\n", gpu->parent->rm_va_base); + UVM_SEQ_OR_DBG_PRINT(s, "rm_va_size 0x%llx\n", gpu->parent->rm_va_size); + UVM_SEQ_OR_DBG_PRINT(s, "vidmem_size %llu (%llu MBs)\n", + gpu->mem_info.size, + gpu->mem_info.size / (1024 * 1024)); + UVM_SEQ_OR_DBG_PRINT(s, "vidmem_max_allocatable 0x%llx (%llu MBs)\n", + gpu->mem_info.max_allocatable_address, + gpu->mem_info.max_allocatable_address / (1024 * 1024)); + + if (numa_info->enabled) { + NvU64 window_size = numa_info->system_memory_window_end - numa_info->system_memory_window_start + 1; + UVM_SEQ_OR_DBG_PRINT(s, "numa_node_id %u\n", numa_info->node_id); + UVM_SEQ_OR_DBG_PRINT(s, "system_memory_window_start 0x%llx\n", + numa_info->system_memory_window_start); + UVM_SEQ_OR_DBG_PRINT(s, "system_memory_window_end 0x%llx\n", + numa_info->system_memory_window_end); + UVM_SEQ_OR_DBG_PRINT(s, "system_memory_window_size 0x%llx (%llu MBs)\n", + window_size, + window_size / (1024 * 1024)); + } + + if (gpu->parent->npu) + UVM_SEQ_OR_DBG_PRINT(s, "npu_domain %d\n", gpu->parent->npu->pci_domain); + + UVM_SEQ_OR_DBG_PRINT(s, "interrupts %llu\n", gpu->parent->isr.interrupt_count); + + if (gpu->parent->isr.replayable_faults.handling) { + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults_bh %llu\n", + gpu->parent->isr.replayable_faults.stats.bottom_half_count); + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults_bh/cpu\n"); + for_each_cpu(cpu, &gpu->parent->isr.replayable_faults.stats.cpus_used_mask) { + UVM_SEQ_OR_DBG_PRINT(s, " cpu%02u %llu\n", + cpu, + gpu->parent->isr.replayable_faults.stats.cpu_exec_count[cpu]); + } + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults_buffer_entries %u\n", + gpu->parent->fault_buffer_info.replayable.max_faults); + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults_cached_get %u\n", + gpu->parent->fault_buffer_info.replayable.cached_get); + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults_cached_put %u\n", + gpu->parent->fault_buffer_info.replayable.cached_put); + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults_get %u\n", + gpu->parent->fault_buffer_hal->read_get(gpu->parent)); + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults_put %u\n", + gpu->parent->fault_buffer_hal->read_put(gpu->parent)); + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults_fault_batch_size %u\n", + gpu->parent->fault_buffer_info.max_batch_size); + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults_replay_policy %s\n", + uvm_perf_fault_replay_policy_string(gpu->parent->fault_buffer_info.replayable.replay_policy)); + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults_num_faults %llu\n", + gpu->parent->stats.num_replayable_faults); + } + if (gpu->parent->isr.non_replayable_faults.handling) { + UVM_SEQ_OR_DBG_PRINT(s, "non_replayable_faults_bh %llu\n", + gpu->parent->isr.non_replayable_faults.stats.bottom_half_count); + UVM_SEQ_OR_DBG_PRINT(s, "non_replayable_faults_bh/cpu\n"); + for_each_cpu(cpu, &gpu->parent->isr.non_replayable_faults.stats.cpus_used_mask) { + UVM_SEQ_OR_DBG_PRINT(s, " cpu%02u %llu\n", + cpu, + gpu->parent->isr.non_replayable_faults.stats.cpu_exec_count[cpu]); + } + UVM_SEQ_OR_DBG_PRINT(s, "non_replayable_faults_buffer_entries %u\n", + gpu->parent->fault_buffer_info.non_replayable.max_faults); + UVM_SEQ_OR_DBG_PRINT(s, "non_replayable_faults_num_faults %llu\n", + gpu->parent->stats.num_non_replayable_faults); + } + + if (gpu->parent->isr.access_counters.handling_ref_count > 0) { + UVM_SEQ_OR_DBG_PRINT(s, "access_counters_bh %llu\n", + gpu->parent->isr.access_counters.stats.bottom_half_count); + UVM_SEQ_OR_DBG_PRINT(s, "access_counters_bh/cpu\n"); + for_each_cpu(cpu, &gpu->parent->isr.access_counters.stats.cpus_used_mask) { + UVM_SEQ_OR_DBG_PRINT(s, " cpu%02u %llu\n", + cpu, + gpu->parent->isr.access_counters.stats.cpu_exec_count[cpu]); + } + UVM_SEQ_OR_DBG_PRINT(s, "access_counters_buffer_entries %u\n", + gpu->parent->access_counter_buffer_info.max_notifications); + UVM_SEQ_OR_DBG_PRINT(s, "access_counters_cached_get %u\n", + gpu->parent->access_counter_buffer_info.cached_get); + UVM_SEQ_OR_DBG_PRINT(s, "access_counters_cached_put %u\n", + gpu->parent->access_counter_buffer_info.cached_put); + + get = UVM_GPU_READ_ONCE(*gpu->parent->access_counter_buffer_info.rm_info.pAccessCntrBufferGet); + put = UVM_GPU_READ_ONCE(*gpu->parent->access_counter_buffer_info.rm_info.pAccessCntrBufferPut); + + UVM_SEQ_OR_DBG_PRINT(s, "access_counters_get %u\n", get); + UVM_SEQ_OR_DBG_PRINT(s, "access_counters_put %u\n", put); + } + + num_pages_out = atomic64_read(&gpu->parent->stats.num_pages_out); + num_pages_in = atomic64_read(&gpu->parent->stats.num_pages_in); + mapped_cpu_pages_size = atomic64_read(&gpu->parent->mapped_cpu_pages_size); + + UVM_SEQ_OR_DBG_PRINT(s, "migrated_pages_in %llu (%llu MB)\n", + num_pages_in, + (num_pages_in * (NvU64)PAGE_SIZE) / (1024u * 1024u)); + UVM_SEQ_OR_DBG_PRINT(s, "migrated_pages_out %llu (%llu MB)\n", + num_pages_out, + (num_pages_out * (NvU64)PAGE_SIZE) / (1024u * 1024u)); + UVM_SEQ_OR_DBG_PRINT(s, "mapped_cpu_pages_dma %llu (%llu MB)\n", + mapped_cpu_pages_size / PAGE_SIZE, + mapped_cpu_pages_size / (1024u * 1024u)); + + gpu_info_print_ce_caps(gpu, s); + + + + + + + +} + +static void +gpu_fault_stats_print_common(uvm_parent_gpu_t *parent_gpu, struct seq_file *s) +{ + NvU64 num_pages_in; + NvU64 num_pages_out; + + UVM_ASSERT(uvm_procfs_is_debug_enabled()); + + UVM_SEQ_OR_DBG_PRINT(s, "replayable_faults %llu\n", parent_gpu->stats.num_replayable_faults); + UVM_SEQ_OR_DBG_PRINT(s, "duplicates %llu\n", + parent_gpu->fault_buffer_info.replayable.stats.num_duplicate_faults); + UVM_SEQ_OR_DBG_PRINT(s, "faults_by_access_type:\n"); + UVM_SEQ_OR_DBG_PRINT(s, " prefetch %llu\n", + parent_gpu->fault_buffer_info.replayable.stats.num_prefetch_faults); + UVM_SEQ_OR_DBG_PRINT(s, " read %llu\n", + parent_gpu->fault_buffer_info.replayable.stats.num_read_faults); + UVM_SEQ_OR_DBG_PRINT(s, " write %llu\n", + parent_gpu->fault_buffer_info.replayable.stats.num_write_faults); + UVM_SEQ_OR_DBG_PRINT(s, " atomic %llu\n", + parent_gpu->fault_buffer_info.replayable.stats.num_atomic_faults); + num_pages_out = atomic64_read(&parent_gpu->fault_buffer_info.replayable.stats.num_pages_out); + num_pages_in = atomic64_read(&parent_gpu->fault_buffer_info.replayable.stats.num_pages_in); + UVM_SEQ_OR_DBG_PRINT(s, "migrations:\n"); + UVM_SEQ_OR_DBG_PRINT(s, " num_pages_in %llu (%llu MB)\n", num_pages_in, + (num_pages_in * (NvU64)PAGE_SIZE) / (1024u * 1024u)); + UVM_SEQ_OR_DBG_PRINT(s, " num_pages_out %llu (%llu MB)\n", num_pages_out, + (num_pages_out * (NvU64)PAGE_SIZE) / (1024u * 1024u)); + UVM_SEQ_OR_DBG_PRINT(s, "replays:\n"); + UVM_SEQ_OR_DBG_PRINT(s, " start %llu\n", + parent_gpu->fault_buffer_info.replayable.stats.num_replays); + UVM_SEQ_OR_DBG_PRINT(s, " start_ack_all %llu\n", + parent_gpu->fault_buffer_info.replayable.stats.num_replays_ack_all); + UVM_SEQ_OR_DBG_PRINT(s, "non_replayable_faults %llu\n", parent_gpu->stats.num_non_replayable_faults); + UVM_SEQ_OR_DBG_PRINT(s, "faults_by_access_type:\n"); + UVM_SEQ_OR_DBG_PRINT(s, " read %llu\n", + parent_gpu->fault_buffer_info.non_replayable.stats.num_read_faults); + UVM_SEQ_OR_DBG_PRINT(s, " write %llu\n", + parent_gpu->fault_buffer_info.non_replayable.stats.num_write_faults); + UVM_SEQ_OR_DBG_PRINT(s, " atomic %llu\n", + parent_gpu->fault_buffer_info.non_replayable.stats.num_atomic_faults); + UVM_SEQ_OR_DBG_PRINT(s, "faults_by_addressing:\n"); + UVM_SEQ_OR_DBG_PRINT(s, " virtual %llu\n", + parent_gpu->stats.num_non_replayable_faults - + parent_gpu->fault_buffer_info.non_replayable.stats.num_physical_faults); + UVM_SEQ_OR_DBG_PRINT(s, " physical %llu\n", + parent_gpu->fault_buffer_info.non_replayable.stats.num_physical_faults); + num_pages_out = atomic64_read(&parent_gpu->fault_buffer_info.non_replayable.stats.num_pages_out); + num_pages_in = atomic64_read(&parent_gpu->fault_buffer_info.non_replayable.stats.num_pages_in); + UVM_SEQ_OR_DBG_PRINT(s, "migrations:\n"); + UVM_SEQ_OR_DBG_PRINT(s, " num_pages_in %llu (%llu MB)\n", num_pages_in, + (num_pages_in * (NvU64)PAGE_SIZE) / (1024u * 1024u)); + UVM_SEQ_OR_DBG_PRINT(s, " num_pages_out %llu (%llu MB)\n", num_pages_out, + (num_pages_out * (NvU64)PAGE_SIZE) / (1024u * 1024u)); +} + +static void gpu_access_counters_print_common(uvm_parent_gpu_t *parent_gpu, struct seq_file *s) +{ + NvU64 num_pages_in; + NvU64 num_pages_out; + + UVM_ASSERT(uvm_procfs_is_debug_enabled()); + + num_pages_out = atomic64_read(&parent_gpu->access_counter_buffer_info.stats.num_pages_out); + num_pages_in = atomic64_read(&parent_gpu->access_counter_buffer_info.stats.num_pages_in); + UVM_SEQ_OR_DBG_PRINT(s, "migrations:\n"); + UVM_SEQ_OR_DBG_PRINT(s, " num_pages_in %llu (%llu MB)\n", num_pages_in, + (num_pages_in * (NvU64)PAGE_SIZE) / (1024u * 1024u)); + UVM_SEQ_OR_DBG_PRINT(s, " num_pages_out %llu (%llu MB)\n", num_pages_out, + (num_pages_out * (NvU64)PAGE_SIZE) / (1024u * 1024u)); +} + +void uvm_gpu_print(uvm_gpu_t *gpu) +{ + gpu_info_print_common(gpu, NULL); +} + +static void gpu_peer_caps_print(uvm_gpu_t **gpu_pair, struct seq_file *s) +{ + bool nvswitch_connected; + uvm_aperture_t aperture; + uvm_gpu_peer_t *peer_caps; + uvm_gpu_t *local; + uvm_gpu_t *remote; + + UVM_ASSERT(uvm_procfs_is_debug_enabled()); + + local = gpu_pair[0]; + remote = gpu_pair[1]; + peer_caps = uvm_gpu_peer_caps(local, remote); + aperture = uvm_gpu_peer_aperture(local, remote); + nvswitch_connected = uvm_gpus_are_nvswitch_connected(local, remote); + UVM_SEQ_OR_DBG_PRINT(s, "Link type %s\n", uvm_gpu_link_type_string(peer_caps->link_type)); + UVM_SEQ_OR_DBG_PRINT(s, "Bandwidth %uMBps\n", peer_caps->total_link_line_rate_mbyte_per_s); + UVM_SEQ_OR_DBG_PRINT(s, "Aperture %s\n", uvm_aperture_string(aperture)); + UVM_SEQ_OR_DBG_PRINT(s, "Connected through NVSWITCH %s\n", nvswitch_connected ? "True" : "False"); + UVM_SEQ_OR_DBG_PRINT(s, "Refcount %llu\n", UVM_READ_ONCE(peer_caps->ref_count)); +} + +static int nv_procfs_read_gpu_info(struct seq_file *s, void *v) +{ + uvm_gpu_t *gpu = (uvm_gpu_t *)s->private; + + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + gpu_info_print_common(gpu, s); + + uvm_up_read(&g_uvm_global.pm.lock); + + return 0; +} + +static int nv_procfs_read_gpu_info_entry(struct seq_file *s, void *v) +{ + UVM_ENTRY_RET(nv_procfs_read_gpu_info(s, v)); +} + +static int nv_procfs_read_gpu_fault_stats(struct seq_file *s, void *v) +{ + uvm_parent_gpu_t *parent_gpu = (uvm_parent_gpu_t *)s->private; + + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + gpu_fault_stats_print_common(parent_gpu, s); + + uvm_up_read(&g_uvm_global.pm.lock); + + return 0; +} + +static int nv_procfs_read_gpu_fault_stats_entry(struct seq_file *s, void *v) +{ + UVM_ENTRY_RET(nv_procfs_read_gpu_fault_stats(s, v)); +} + +static int nv_procfs_read_gpu_access_counters(struct seq_file *s, void *v) +{ + uvm_parent_gpu_t *parent_gpu = (uvm_parent_gpu_t *)s->private; + + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + gpu_access_counters_print_common(parent_gpu, s); + + uvm_up_read(&g_uvm_global.pm.lock); + + return 0; +} + +static int nv_procfs_read_gpu_access_counters_entry(struct seq_file *s, void *v) +{ + UVM_ENTRY_RET(nv_procfs_read_gpu_access_counters(s, v)); +} + +UVM_DEFINE_SINGLE_PROCFS_FILE(gpu_info_entry); +UVM_DEFINE_SINGLE_PROCFS_FILE(gpu_fault_stats_entry); +UVM_DEFINE_SINGLE_PROCFS_FILE(gpu_access_counters_entry); + +static NV_STATUS init_parent_procfs_dir(uvm_parent_gpu_t *parent_gpu) +{ + struct proc_dir_entry *gpu_base_dir_entry; + char uuid_text_buffer[UVM_GPU_UUID_TEXT_BUFFER_LENGTH]; + char gpu_dir_name[sizeof(uuid_text_buffer) + 1]; + + if (!uvm_procfs_is_enabled()) + return NV_OK; + + gpu_base_dir_entry = uvm_procfs_get_gpu_base_dir(); + + format_uuid_to_buffer(uuid_text_buffer, sizeof(uuid_text_buffer), &parent_gpu->uuid); + + // Create UVM-GPU-${UUID} directory + snprintf(gpu_dir_name, sizeof(gpu_dir_name), "%s", uuid_text_buffer); + + parent_gpu->procfs.dir = NV_CREATE_PROC_DIR(gpu_dir_name, gpu_base_dir_entry); + if (parent_gpu->procfs.dir == NULL) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +static void deinit_parent_procfs_dir(uvm_parent_gpu_t *parent_gpu) +{ + uvm_procfs_destroy_entry(parent_gpu->procfs.dir); +} + +static NV_STATUS init_parent_procfs_files(uvm_parent_gpu_t *parent_gpu) +{ + // Fault and access counter files are debug only + if (!uvm_procfs_is_debug_enabled()) + return NV_OK; + + parent_gpu->procfs.fault_stats_file = NV_CREATE_PROC_FILE("fault_stats", + parent_gpu->procfs.dir, + gpu_fault_stats_entry, + parent_gpu); + if (parent_gpu->procfs.fault_stats_file == NULL) + return NV_ERR_OPERATING_SYSTEM; + + parent_gpu->procfs.access_counters_file = NV_CREATE_PROC_FILE("access_counters", + parent_gpu->procfs.dir, + gpu_access_counters_entry, + parent_gpu); + if (parent_gpu->procfs.access_counters_file == NULL) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +static void deinit_parent_procfs_files(uvm_parent_gpu_t *parent_gpu) +{ + uvm_procfs_destroy_entry(parent_gpu->procfs.access_counters_file); + uvm_procfs_destroy_entry(parent_gpu->procfs.fault_stats_file); +} + +static NV_STATUS init_procfs_dirs(uvm_gpu_t *gpu) +{ + struct proc_dir_entry *gpu_base_dir_entry; + char symlink_name[16]; // Hold a global_gpu_id_t value in decimal. + char uuid_text_buffer[UVM_GPU_UUID_TEXT_BUFFER_LENGTH]; + char gpu_dir_name[sizeof(symlink_name) + sizeof(uuid_text_buffer) + 1]; + + if (!uvm_procfs_is_enabled()) + return NV_OK; + + format_uuid_to_buffer(uuid_text_buffer, sizeof(uuid_text_buffer), uvm_gpu_uuid(gpu)); + + gpu_base_dir_entry = uvm_procfs_get_gpu_base_dir(); + + // Create UVM-GPU-${UUID}/${sub_processor_index} directory + snprintf(gpu_dir_name, sizeof(gpu_dir_name), "%u", uvm_global_id_sub_processor_index(gpu->global_id)); + + gpu->procfs.dir = NV_CREATE_PROC_DIR(gpu_dir_name, gpu->parent->procfs.dir); + if (gpu->procfs.dir == NULL) + return NV_ERR_OPERATING_SYSTEM; + + // Create symlink from ${global_gpu_id} to + // gpus/UVM-GPU-${UUID}/${sub_processor_index} + snprintf(symlink_name, sizeof(symlink_name), "%u", uvm_global_id_value(gpu->global_id)); + snprintf(gpu_dir_name, + sizeof(gpu_dir_name), + "%s/%u", + uuid_text_buffer, + uvm_global_id_sub_processor_index(gpu->global_id)); + + gpu->procfs.dir_symlink = proc_symlink(symlink_name, gpu_base_dir_entry, gpu_dir_name); + if (gpu->procfs.dir_symlink == NULL) + return NV_ERR_OPERATING_SYSTEM; + + // GPU peer files are debug only + if (!uvm_procfs_is_debug_enabled()) + return NV_OK; + + gpu->procfs.dir_peers = NV_CREATE_PROC_DIR(UVM_PROC_GPUS_PEER_DIR_NAME, gpu->procfs.dir); + if (gpu->procfs.dir_peers == NULL) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +// The kernel waits on readers to finish before returning from those calls +static void deinit_procfs_dirs(uvm_gpu_t *gpu) +{ + uvm_procfs_destroy_entry(gpu->procfs.dir_peers); + uvm_procfs_destroy_entry(gpu->procfs.dir_symlink); + uvm_procfs_destroy_entry(gpu->procfs.dir); +} + +static NV_STATUS init_procfs_files(uvm_gpu_t *gpu) +{ + gpu->procfs.info_file = NV_CREATE_PROC_FILE("info", gpu->procfs.dir, gpu_info_entry, gpu); + if (gpu->procfs.info_file == NULL) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +static void deinit_procfs_files(uvm_gpu_t *gpu) +{ + uvm_procfs_destroy_entry(gpu->procfs.info_file); +} + +static void deinit_procfs_peer_cap_files(uvm_gpu_peer_t *peer_caps) +{ + uvm_procfs_destroy_entry(peer_caps->procfs.peer_symlink_file[0]); + uvm_procfs_destroy_entry(peer_caps->procfs.peer_symlink_file[1]); + uvm_procfs_destroy_entry(peer_caps->procfs.peer_file[0]); + uvm_procfs_destroy_entry(peer_caps->procfs.peer_file[1]); +} + +static NV_STATUS init_semaphore_pool(uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_gpu_t *other_gpu; + + status = uvm_gpu_semaphore_pool_create(gpu, &gpu->semaphore_pool); + if (status != NV_OK) + return status; + + for_each_global_gpu(other_gpu) { + if (other_gpu == gpu) + continue; + status = uvm_gpu_semaphore_pool_map_gpu(other_gpu->semaphore_pool, gpu); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +static void deinit_semaphore_pool(uvm_gpu_t *gpu) +{ + uvm_gpu_t *other_gpu; + + for_each_global_gpu(other_gpu) { + if (other_gpu == gpu) + continue; + uvm_gpu_semaphore_pool_unmap_gpu(other_gpu->semaphore_pool, gpu); + } + + uvm_gpu_semaphore_pool_destroy(gpu->semaphore_pool); +} + +static NV_STATUS find_unused_global_gpu_id(uvm_parent_gpu_t *parent_gpu, uvm_global_gpu_id_t *out_id) +{ + NvU32 i; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + if (!parent_gpu) { + for (i = 0; i < UVM_MAX_GPUS; i++) { + if (!g_uvm_global.parent_gpus[i]) { + *out_id = uvm_global_gpu_id_from_parent_index(i); + return NV_OK; + } + } + } + else { + NvU32 sub_processor_index = find_first_zero_bit(parent_gpu->valid_gpus, UVM_ID_MAX_SUB_PROCESSORS); + if (sub_processor_index < UVM_ID_MAX_SUB_PROCESSORS) { + *out_id = uvm_global_gpu_id_from_sub_processor_index(parent_gpu->id, sub_processor_index); + return NV_OK; + } + } + + return NV_ERR_INSUFFICIENT_RESOURCES; +} + +// Allocates a uvm_parent_gpu_t, assigns the GPU ID, and sets up basic data +// structures, but leaves all other initialization up to the caller. +static NV_STATUS alloc_parent_gpu(const NvProcessorUuid *gpu_uuid, + uvm_gpu_id_t gpu_id, + uvm_parent_gpu_t **parent_gpu_out) +{ + uvm_parent_gpu_t *parent_gpu; + + parent_gpu = uvm_kvmalloc_zero(sizeof(*parent_gpu)); + if (!parent_gpu) + return NV_ERR_NO_MEMORY; + + parent_gpu->id = gpu_id; + + uvm_processor_uuid_copy(&parent_gpu->uuid, gpu_uuid); + uvm_sema_init(&parent_gpu->isr.replayable_faults.service_lock, 1, UVM_LOCK_ORDER_ISR); + uvm_sema_init(&parent_gpu->isr.non_replayable_faults.service_lock, 1, UVM_LOCK_ORDER_ISR); + uvm_sema_init(&parent_gpu->isr.access_counters.service_lock, 1, UVM_LOCK_ORDER_ISR); + uvm_spin_lock_irqsave_init(&parent_gpu->isr.interrupts_lock, UVM_LOCK_ORDER_LEAF); + uvm_spin_lock_init(&parent_gpu->instance_ptr_table_lock, UVM_LOCK_ORDER_LEAF); + uvm_rb_tree_init(&parent_gpu->instance_ptr_table); + uvm_rb_tree_init(&parent_gpu->tsg_table); + + nv_kref_init(&parent_gpu->gpu_kref); + + *parent_gpu_out = parent_gpu; + + return NV_OK; +} + +// Allocates a uvm_gpu_t struct and initializes the basic fields and leaves all +// other initialization up to the caller. +static uvm_gpu_t *alloc_gpu(uvm_parent_gpu_t *parent_gpu, uvm_global_gpu_id_t global_gpu_id) +{ + NvU32 sub_processor_index; + uvm_gpu_t *gpu; + + gpu = uvm_kvmalloc_zero(sizeof(*gpu)); + if (!gpu) + return gpu; + + gpu->id = parent_gpu->id; + gpu->global_id = global_gpu_id; + gpu->parent = parent_gpu; + + // Initialize enough of the gpu struct for remove_gpu to be called + gpu->magic = UVM_GPU_MAGIC_VALUE; + uvm_spin_lock_init(&gpu->peer_info.peer_gpus_lock, UVM_LOCK_ORDER_LEAF); + + sub_processor_index = uvm_global_id_sub_processor_index(global_gpu_id); + parent_gpu->gpus[sub_processor_index] = gpu; + + return gpu; +} + +static NV_STATUS configure_address_space(uvm_gpu_t *gpu) +{ + NV_STATUS status; + NvU32 num_entries; + NvU64 va_size; + NvU64 va_per_entry; + + status = uvm_page_tree_init(gpu, + NULL, + UVM_PAGE_TREE_TYPE_KERNEL, + gpu->big_page.internal_size, + uvm_gpu_page_tree_init_location(gpu), + &gpu->address_space_tree); + if (status != NV_OK) { + UVM_ERR_PRINT("Initializing the page tree failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + num_entries = uvm_mmu_page_tree_entries(&gpu->address_space_tree, 0, UVM_PAGE_SIZE_AGNOSTIC); + + UVM_ASSERT(gpu->address_space_tree.hal->num_va_bits() < 64); + va_size = 1ull << gpu->address_space_tree.hal->num_va_bits(); + va_per_entry = va_size / num_entries; + + // Make sure that RM's part of the VA is aligned to the VA covered by a + // single top level PDE. + UVM_ASSERT_MSG(gpu->parent->rm_va_base % va_per_entry == 0, + "va_base 0x%llx va_per_entry 0x%llx\n", gpu->parent->rm_va_base, va_per_entry); + UVM_ASSERT_MSG(gpu->parent->rm_va_size % va_per_entry == 0, + "va_size 0x%llx va_per_entry 0x%llx\n", gpu->parent->rm_va_size, va_per_entry); + + status = uvm_rm_locked_call(nvUvmInterfaceSetPageDirectory(gpu->rm_address_space, + uvm_page_tree_pdb(&gpu->address_space_tree)->addr.address, num_entries, + uvm_page_tree_pdb(&gpu->address_space_tree)->addr.aperture == UVM_APERTURE_VID, + -1U /* Invalid PASID for internal RM address space */)); + if (status != NV_OK) { + UVM_ERR_PRINT("nvUvmInterfaceSetPageDirectory() failed: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + gpu->rm_address_space_moved_to_page_tree = true; + + return NV_OK; +} + +static void deconfigure_address_space(uvm_gpu_t *gpu) +{ + if (gpu->rm_address_space_moved_to_page_tree) + uvm_rm_locked_call_void(nvUvmInterfaceUnsetPageDirectory(gpu->rm_address_space)); + + if (gpu->address_space_tree.root) + uvm_page_tree_deinit(&gpu->address_space_tree); +} + +static NV_STATUS service_interrupts(uvm_parent_gpu_t *parent_gpu) +{ + // Asking RM to service interrupts from top half interrupt handler would + // very likely deadlock. + UVM_ASSERT(!in_interrupt()); + + return uvm_rm_locked_call(nvUvmInterfaceServiceDeviceInterruptsRM(parent_gpu->rm_device)); +} + +NV_STATUS uvm_gpu_check_ecc_error(uvm_gpu_t *gpu) +{ + NV_STATUS status = uvm_gpu_check_ecc_error_no_rm(gpu); + + if (status == NV_OK || status != NV_WARN_MORE_PROCESSING_REQUIRED) + return status; + + // An interrupt that might mean an ECC error needs to be serviced. + UVM_ASSERT(status == NV_WARN_MORE_PROCESSING_REQUIRED); + + status = service_interrupts(gpu->parent); + if (status != NV_OK) { + UVM_ERR_PRINT("Servicing interrupts failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + // After servicing interrupts the ECC error notifier should be current. + if (*gpu->ecc.error_notifier) { + UVM_ERR_PRINT("ECC error encountered, GPU %s\n", uvm_gpu_name(gpu)); + uvm_global_set_fatal_error(NV_ERR_ECC_ERROR); + return NV_ERR_ECC_ERROR; + } + + return NV_OK; +} + +static NV_STATUS init_parent_gpu(uvm_parent_gpu_t *parent_gpu, + const NvProcessorUuid *gpu_uuid, + const UvmGpuInfo *gpu_info, + const UvmGpuPlatformInfo *gpu_platform_info) +{ + NV_STATUS status; + + status = uvm_rm_locked_call(nvUvmInterfaceDeviceCreate(g_uvm_global.rm_session_handle, + gpu_info, + gpu_uuid, + &parent_gpu->rm_device, + NV_FALSE)); + if (status != NV_OK) { + UVM_ERR_PRINT("Creating RM device failed: %s, GPU %s\n", nvstatusToString(status), parent_gpu->name); + return status; + } + + + + + + + + + + + parent_gpu->pci_dev = gpu_platform_info->pci_dev; + parent_gpu->closest_cpu_numa_node = dev_to_node(&parent_gpu->pci_dev->dev); + parent_gpu->dma_addressable_start = gpu_platform_info->dma_addressable_start; + parent_gpu->dma_addressable_limit = gpu_platform_info->dma_addressable_limit; + + parent_gpu->sli_enabled = (gpu_info->subdeviceCount > 1); + + parent_gpu->virt_mode = gpu_info->virtMode; + if (parent_gpu->virt_mode == UVM_VIRT_MODE_LEGACY) { + UVM_ERR_PRINT("Failed to init GPU %s. UVM is not supported in legacy virtualization mode\n", parent_gpu->name); + return NV_ERR_NOT_SUPPORTED; + } + + if (gpu_info->isSimulated) + ++g_uvm_global.num_simulated_devices; + + status = init_parent_procfs_dir(parent_gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init parent procfs dir: %s, GPU %s\n", nvstatusToString(status), parent_gpu->name); + return status; + } + + status = uvm_hal_init_gpu(parent_gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init GPU hal: %s, GPU %s\n", nvstatusToString(status), parent_gpu->name); + return status; + } + + uvm_hal_init_properties(parent_gpu); + + UVM_ASSERT(!parent_gpu->rm_info.smcEnabled || parent_gpu->smc.supported); + parent_gpu->smc.enabled = !!parent_gpu->rm_info.smcEnabled; + + uvm_mmu_init_gpu_chunk_sizes(parent_gpu); + + status = get_gpu_caps(parent_gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to get GPU caps: %s, GPU %s\n", nvstatusToString(status), parent_gpu->name); + return status; + } + + status = uvm_ats_add_gpu(parent_gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_ats_add_gpu failed: %s, GPU %s\n", nvstatusToString(status), parent_gpu->name); + return status; + } + + status = init_parent_procfs_files(parent_gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init parent procfs files: %s, GPU %s\n", nvstatusToString(status), parent_gpu->name); + return status; + } + + status = uvm_gpu_init_isr(parent_gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init ISR: %s, GPU %s\n", nvstatusToString(status), parent_gpu->name); + return status; + } + + return NV_OK; +} + +static NV_STATUS init_gpu(uvm_gpu_t *gpu, const UvmGpuInfo *gpu_info) +{ + NV_STATUS status; + + // Presently, an RM client can only subscribe to a single partition per + // GPU. Therefore, UVM needs to create several RM clients. For simplicity, + // and since P2P is not supported when SMC partitions are created, we + // create a client (session) per GPU partition. + if (gpu->parent->smc.enabled) { + UvmPlatformInfo platform_info; + status = uvm_rm_locked_call(nvUvmInterfaceSessionCreate(&gpu->smc.rm_session_handle, &platform_info)); + if (status != NV_OK) { + UVM_ERR_PRINT("Creating RM session failed: %s\n", nvstatusToString(status)); + return status; + } + + status = uvm_rm_locked_call(nvUvmInterfaceDeviceCreate(uvm_gpu_session_handle(gpu), + gpu_info, + uvm_gpu_uuid(gpu), + &gpu->smc.rm_device, + NV_TRUE)); + if (status != NV_OK) { + UVM_ERR_PRINT("Creating RM device failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + } + + gpu->smc.swizz_id = gpu_info->smcSwizzId; + + // Initialize the per-GPU procfs dirs as early as possible so that other + // parts of the driver can add files in them as part of their per-GPU init. + status = init_procfs_dirs(gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init procfs dirs: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + uvm_mmu_init_gpu_peer_addresses(gpu); + + status = alloc_and_init_address_space(gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Creating RM address space failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + status = get_gpu_fb_info(gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to get GPU FB info: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + status = get_gpu_ecc_info(gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to get GPU ECC info: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + status = uvm_pmm_gpu_init(&gpu->pmm); + if (status != NV_OK) { + UVM_ERR_PRINT("PMM initialization failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + status = uvm_pmm_sysmem_mappings_init(gpu, &gpu->pmm_reverse_sysmem_mappings); + if (status != NV_OK) { + UVM_ERR_PRINT("CPU PMM MMIO initialization failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + status = init_semaphore_pool(gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to initialize the semaphore pool: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + status = uvm_channel_manager_create(gpu, &gpu->channel_manager); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to initialize the channel manager: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + status = configure_address_space(gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to configure the GPU address space: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + status = uvm_mmu_create_flat_mappings(gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Creating flat mappings failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + + + + + + + + + + + status = init_procfs_files(gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init procfs files: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + status = uvm_perf_heuristics_add_gpu(gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init heuristics: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + return NV_OK; +} + +// Add a new gpu and register it with RM +// TODO: Bug 2844714: Split parent-specific parts of this function out into a +// separate add_parent_gpu() function. +static NV_STATUS add_gpu(const NvProcessorUuid *gpu_uuid, + const uvm_global_gpu_id_t global_gpu_id, + const UvmGpuInfo *gpu_info, + const UvmGpuPlatformInfo *gpu_platform_info, + uvm_parent_gpu_t *parent_gpu, + uvm_gpu_t **gpu_out) +{ + NV_STATUS status; + bool alloc_parent = (parent_gpu == NULL); + uvm_gpu_t *gpu = NULL; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + if (alloc_parent) { + status = alloc_parent_gpu(gpu_uuid, uvm_gpu_id_from_global_gpu_id(global_gpu_id), &parent_gpu); + if (status != NV_OK) + return status; + } + + gpu = alloc_gpu(parent_gpu, global_gpu_id); + if (!gpu) { + if (alloc_parent) + uvm_parent_gpu_kref_put(parent_gpu); + + return NV_ERR_NO_MEMORY; + } + + parent_gpu->num_retained_gpus++; + + if (alloc_parent) + fill_gpu_info(parent_gpu, gpu_info); + + // After this point all error clean up should be handled by remove_gpu() + + if (!gpu_supports_uvm(parent_gpu)) { + UVM_DBG_PRINT("Registration of non-UVM-capable GPU attempted: GPU %s\n", uvm_gpu_name(gpu)); + status = NV_ERR_NOT_SUPPORTED; + goto error; + } + + if (alloc_parent) { + status = init_parent_gpu(parent_gpu, gpu_uuid, gpu_info, gpu_platform_info); + if (status != NV_OK) + goto error; + } + + status = init_gpu(gpu, gpu_info); + if (status != NV_OK) + goto error; + + status = uvm_gpu_check_ecc_error(gpu); + if (status != NV_OK) + goto error; + + atomic64_set(&gpu->retained_count, 1); + uvm_global_processor_mask_set(&g_uvm_global.retained_gpus, gpu->global_id); + + uvm_spin_lock_irqsave(&g_uvm_global.gpu_table_lock); + + if (alloc_parent) + uvm_global_add_parent_gpu(parent_gpu); + + // Mark the GPU as valid in the parent GPU's GPU table. + UVM_ASSERT(!test_bit(uvm_global_id_sub_processor_index(gpu->global_id), parent_gpu->valid_gpus)); + __set_bit(uvm_global_id_sub_processor_index(gpu->global_id), parent_gpu->valid_gpus); + + // Although locking correctness does not, at this early point (before the + // GPU is visible in the table) strictly require holding the gpu_table_lock + // in order to read gpu->isr.replayable_faults.handling, nor to enable page + // fault interrupts (this could have been done earlier), it is best to do it + // here, in order to avoid an interrupt storm. That way, we take advantage + // of the spinlock_irqsave side effect of turning off local CPU interrupts, + // part of holding the gpu_table_lock. That means that the local CPU won't + // receive any of these interrupts, until the GPU is safely added to the + // table (where the top half ISR can find it). + // + // As usual with spinlock_irqsave behavior, *other* CPUs can still handle + // these interrupts, but the local CPU will not be slowed down (interrupted) + // by such handling, and can quickly release the gpu_table_lock, thus + // unblocking any other CPU's top half (which waits for the gpu_table_lock). + if (alloc_parent && parent_gpu->isr.replayable_faults.handling) { + parent_gpu->fault_buffer_hal->enable_replayable_faults(parent_gpu); + + // Clear the interrupt bit and force the re-evaluation of the interrupt + // condition to ensure that we don't miss any pending interrupt + parent_gpu->fault_buffer_hal->clear_replayable_faults(parent_gpu, + parent_gpu->fault_buffer_info.replayable.cached_get); + } + + // Access counters are enabled on demand + + uvm_spin_unlock_irqrestore(&g_uvm_global.gpu_table_lock); + + if (alloc_parent) { + status = discover_nvlink_peers(gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to discover NVLINK peers: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + + // Nobody can have retained the GPU yet, since we still hold the global + // lock. + UVM_ASSERT(uvm_gpu_retained_count(gpu) == 1); + atomic64_set(&gpu->retained_count, 0); + goto error; + } + } + + *gpu_out = gpu; + + return NV_OK; + +error: + remove_gpu(gpu); + + return status; +} + +static void sync_parent_gpu_trackers(uvm_parent_gpu_t *parent_gpu, + bool sync_replay_tracker, + bool sync_clear_faulted_tracker) +{ + NV_STATUS status; + + // Sync the replay tracker since it inherits dependencies from the VA block + // trackers. + if (sync_replay_tracker) { + uvm_gpu_replayable_faults_isr_lock(parent_gpu); + status = uvm_tracker_wait(&parent_gpu->fault_buffer_info.replayable.replay_tracker); + uvm_gpu_replayable_faults_isr_unlock(parent_gpu); + + if (status != NV_OK) + UVM_ASSERT(status == uvm_global_get_status()); + } + + // Sync the clear_faulted tracker since it inherits dependencies from the + // VA block trackers, too. + if (sync_clear_faulted_tracker) { + uvm_gpu_non_replayable_faults_isr_lock(parent_gpu); + status = uvm_tracker_wait(&parent_gpu->fault_buffer_info.non_replayable.clear_faulted_tracker); + uvm_gpu_non_replayable_faults_isr_unlock(parent_gpu); + + if (status != NV_OK) + UVM_ASSERT(status == uvm_global_get_status()); + } +} + +// Remove all references the given GPU has to other GPUs, since one of those +// other GPUs is getting removed. This involves waiting for any unfinished +// trackers contained by this GPU. +static void remove_gpus_from_gpu(uvm_gpu_t *gpu) +{ + sync_parent_gpu_trackers(gpu->parent, + gpu->parent->isr.replayable_faults.handling, + gpu->parent->isr.non_replayable_faults.handling); + + // Sync all trackers in PMM + uvm_pmm_gpu_sync(&gpu->pmm); + + + + + +} + +// Remove all references to the given GPU from its parent, since it is being +// removed. This involves waiting for any unfinished trackers contained +// by the parent GPU. +static void remove_gpu_from_parent_gpu(uvm_gpu_t *gpu) +{ + // We use *.was_handling instead of *.handling here since this function is + // called after uvm_gpu_disable_isr(), and the *.handling flags will + // already have been copied to *.was_handling, and then set to false. + sync_parent_gpu_trackers(gpu->parent, + gpu->parent->isr.replayable_faults.was_handling, + gpu->parent->isr.non_replayable_faults.was_handling); +} + +static void deinit_parent_gpu(uvm_parent_gpu_t *parent_gpu) +{ + // All channels should have been removed before the retained count went to 0 + UVM_ASSERT(uvm_rb_tree_empty(&parent_gpu->instance_ptr_table)); + UVM_ASSERT(uvm_rb_tree_empty(&parent_gpu->tsg_table)); + + // Access counters should have been disabled when the GPU is no longer + // registered in any VA space. + UVM_ASSERT(parent_gpu->isr.access_counters.handling_ref_count == 0); + + // Return ownership to RM + uvm_gpu_deinit_isr(parent_gpu); + + deinit_parent_procfs_files(parent_gpu); + + uvm_ats_remove_gpu(parent_gpu); + + UVM_ASSERT(atomic64_read(&parent_gpu->mapped_cpu_pages_size) == 0); + + // After calling nvUvmInterfaceUnregisterGpu() the reference to pci_dev may + // not be valid any more so clear it ahead of time. + parent_gpu->pci_dev = NULL; + + deinit_parent_procfs_dir(parent_gpu); + + if (parent_gpu->rm_info.isSimulated) + --g_uvm_global.num_simulated_devices; + + if (parent_gpu->rm_device != 0) + uvm_rm_locked_call_void(nvUvmInterfaceDeviceDestroy(parent_gpu->rm_device)); + + uvm_parent_gpu_kref_put(parent_gpu); +} + +static void deinit_gpu(uvm_gpu_t *gpu) +{ + uvm_gpu_t *other_gpu; + + // Remove any pointers to this GPU from other GPUs' trackers. + for_each_global_gpu(other_gpu) { + UVM_ASSERT(other_gpu != gpu); + remove_gpus_from_gpu(other_gpu); + } + + // Further, remove any pointers to this GPU from its parent's trackers. + remove_gpu_from_parent_gpu(gpu); + + uvm_perf_heuristics_remove_gpu(gpu); + + deinit_procfs_files(gpu); + + // TODO Bug 3429163: [UVM] Move uvm_mmu_destroy_flat_mapping() to the + // correct spot + uvm_mmu_destroy_flat_mappings(gpu); + + // Wait for any deferred frees and their associated trackers to be finished + // before tearing down channels. + uvm_pmm_gpu_sync(&gpu->pmm); + + uvm_channel_manager_destroy(gpu->channel_manager); + + // Deconfigure the address space only after destroying all the channels as + // in case any of them hit fatal errors, RM will assert that they are not + // idle during nvUvmInterfaceUnsetPageDirectory() and that's an unnecessary + // pain during development. + deconfigure_address_space(gpu); + + deinit_semaphore_pool(gpu); + + uvm_pmm_sysmem_mappings_deinit(&gpu->pmm_reverse_sysmem_mappings); + + uvm_pmm_gpu_deinit(&gpu->pmm); + + if (gpu->rm_address_space != 0) + uvm_rm_locked_call_void(nvUvmInterfaceAddressSpaceDestroy(gpu->rm_address_space)); + + deinit_procfs_dirs(gpu); + + if (gpu->parent->smc.enabled) { + if (gpu->smc.rm_device != 0) + uvm_rm_locked_call_void(nvUvmInterfaceDeviceDestroy(gpu->smc.rm_device)); + + if (gpu->smc.rm_session_handle != 0) + uvm_rm_locked_call_void(nvUvmInterfaceSessionDestroy(gpu->smc.rm_session_handle)); + } + + gpu->magic = 0; +} + +// Remove a gpu and unregister it from RM +// Note that this is also used in most error paths in add_gpu() +static void remove_gpu(uvm_gpu_t *gpu) +{ + NvU32 sub_processor_index; + uvm_parent_gpu_t *parent_gpu; + bool free_parent; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + sub_processor_index = uvm_global_id_sub_processor_index(gpu->global_id); + parent_gpu = gpu->parent; + + UVM_ASSERT_MSG(uvm_gpu_retained_count(gpu) == 0, + "gpu_id %u retained_count %llu\n", + uvm_id_value(gpu->id), + uvm_gpu_retained_count(gpu)); + + UVM_ASSERT(parent_gpu->num_retained_gpus > 0); + parent_gpu->num_retained_gpus--; + + free_parent = (parent_gpu->num_retained_gpus == 0); + + // NVLINK peers must be removed and the relevant access counter buffers must + // be flushed before removing this GPU from the global table. See the + // comment on discover_nvlink_peers in add_gpu. + if (free_parent) + destroy_nvlink_peers(gpu); + + + + + + + + + // TODO: Bug 2844714: If the parent is not being freed, the following + // gpu_table_lock is only needed to protect concurrent + // find_first_valid_gpu() in BH from the __clear_bit here. After + // find_first_valid_gpu() is removed, gpu_table_lock should only be acquired + // and released in the free_parent case. + // + // In the free_parent case, gpu_table_lock protects the top half from the + // uvm_global_remove_parent_gpu() + uvm_spin_lock_irqsave(&g_uvm_global.gpu_table_lock); + + // Mark the GPU as invalid in the parent GPU's GPU table. + __clear_bit(sub_processor_index, parent_gpu->valid_gpus); + + // Remove the GPU from the table. + if (free_parent) + uvm_global_remove_parent_gpu(parent_gpu); + + uvm_spin_unlock_irqrestore(&g_uvm_global.gpu_table_lock); + + uvm_global_processor_mask_clear(&g_uvm_global.retained_gpus, gpu->global_id); + + // If the parent is being freed, stop scheduling new bottom halves and + // update relevant software state. Else flush any pending bottom halves + // before continuing. + if (free_parent) + uvm_gpu_disable_isr(parent_gpu); + else + uvm_gpu_flush_bottom_halves(parent_gpu); + + deinit_gpu(gpu); + + UVM_ASSERT(parent_gpu->gpus[sub_processor_index] == gpu); + parent_gpu->gpus[sub_processor_index] = NULL; + uvm_kvfree(gpu); + + if (free_parent) + deinit_parent_gpu(parent_gpu); +} + +// Do not not call this directly. It is called by nv_kref_put, when the +// GPU's ref count drops to zero. +static void uvm_parent_gpu_destroy(nv_kref_t *nv_kref) +{ + uvm_parent_gpu_t *parent_gpu = container_of(nv_kref, uvm_parent_gpu_t, gpu_kref); + NvU32 sub_processor_index; + + UVM_ASSERT(parent_gpu->num_retained_gpus == 0); + UVM_ASSERT(bitmap_empty(parent_gpu->valid_gpus, UVM_ID_MAX_SUB_PROCESSORS)); + + for (sub_processor_index = 0; sub_processor_index < UVM_ID_MAX_SUB_PROCESSORS; sub_processor_index++) + UVM_ASSERT(!parent_gpu->gpus[sub_processor_index]); + + uvm_kvfree(parent_gpu); +} + +void uvm_parent_gpu_kref_put(uvm_parent_gpu_t *parent_gpu) +{ + nv_kref_put(&parent_gpu->gpu_kref, uvm_parent_gpu_destroy); +} + +static void update_stats_gpu_fault_instance(uvm_gpu_t *gpu, + const uvm_fault_buffer_entry_t *fault_entry, + bool is_duplicate) +{ + if (!fault_entry->is_replayable) { + switch (fault_entry->fault_access_type) + { + case UVM_FAULT_ACCESS_TYPE_READ: + ++gpu->parent->fault_buffer_info.non_replayable.stats.num_read_faults; + break; + case UVM_FAULT_ACCESS_TYPE_WRITE: + ++gpu->parent->fault_buffer_info.non_replayable.stats.num_write_faults; + break; + case UVM_FAULT_ACCESS_TYPE_ATOMIC_WEAK: + case UVM_FAULT_ACCESS_TYPE_ATOMIC_STRONG: + ++gpu->parent->fault_buffer_info.non_replayable.stats.num_atomic_faults; + break; + default: + UVM_ASSERT_MSG(false, "Invalid access type for non-replayable faults\n"); + break; + } + + if (!fault_entry->is_virtual) + ++gpu->parent->fault_buffer_info.non_replayable.stats.num_physical_faults; + + ++gpu->parent->stats.num_non_replayable_faults; + + return; + } + + UVM_ASSERT(fault_entry->is_virtual); + + switch (fault_entry->fault_access_type) + { + case UVM_FAULT_ACCESS_TYPE_PREFETCH: + ++gpu->parent->fault_buffer_info.replayable.stats.num_prefetch_faults; + break; + case UVM_FAULT_ACCESS_TYPE_READ: + ++gpu->parent->fault_buffer_info.replayable.stats.num_read_faults; + break; + case UVM_FAULT_ACCESS_TYPE_WRITE: + ++gpu->parent->fault_buffer_info.replayable.stats.num_write_faults; + break; + case UVM_FAULT_ACCESS_TYPE_ATOMIC_WEAK: + case UVM_FAULT_ACCESS_TYPE_ATOMIC_STRONG: + ++gpu->parent->fault_buffer_info.replayable.stats.num_atomic_faults; + break; + default: + break; + } + if (is_duplicate || fault_entry->filtered) + ++gpu->parent->fault_buffer_info.replayable.stats.num_duplicate_faults; + + ++gpu->parent->stats.num_replayable_faults; +} + +static void update_stats_fault_cb(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + uvm_gpu_t *gpu; + const uvm_fault_buffer_entry_t *fault_entry, *fault_instance; + + UVM_ASSERT(event_id == UVM_PERF_EVENT_FAULT); + + if (UVM_ID_IS_CPU(event_data->fault.proc_id)) + return; + + // The reported fault entry must be the "representative" fault entry + UVM_ASSERT(!event_data->fault.gpu.buffer_entry->filtered); + + gpu = uvm_va_space_get_gpu(event_data->fault.space, event_data->fault.proc_id); + + fault_entry = event_data->fault.gpu.buffer_entry; + + // Update the stats using the representative fault entry and the rest of + // instances + update_stats_gpu_fault_instance(gpu, fault_entry, event_data->fault.gpu.is_duplicate); + + list_for_each_entry(fault_instance, &fault_entry->merged_instances_list, merged_instances_list) + update_stats_gpu_fault_instance(gpu, fault_instance, event_data->fault.gpu.is_duplicate); +} + +static void update_stats_migration_cb(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + uvm_gpu_t *gpu_dst = NULL; + uvm_gpu_t *gpu_src = NULL; + NvU64 pages; + bool is_replayable_fault; + bool is_non_replayable_fault; + bool is_access_counter; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(event_data->migration.block); + + UVM_ASSERT(event_id == UVM_PERF_EVENT_MIGRATION); + + if (UVM_ID_IS_GPU(event_data->migration.dst)) + gpu_dst = uvm_va_space_get_gpu(va_space, event_data->migration.dst); + + if (UVM_ID_IS_GPU(event_data->migration.src)) + gpu_src = uvm_va_space_get_gpu(va_space, event_data->migration.src); + + if (!gpu_dst && !gpu_src) + return; + + // Page prefetching is also triggered by faults + is_replayable_fault = + event_data->migration.make_resident_context->cause == UVM_MAKE_RESIDENT_CAUSE_REPLAYABLE_FAULT; + is_non_replayable_fault = + event_data->migration.make_resident_context->cause == UVM_MAKE_RESIDENT_CAUSE_NON_REPLAYABLE_FAULT; + is_access_counter = + event_data->migration.make_resident_context->cause == UVM_MAKE_RESIDENT_CAUSE_ACCESS_COUNTER; + + pages = event_data->migration.bytes / PAGE_SIZE; + UVM_ASSERT(event_data->migration.bytes % PAGE_SIZE == 0); + UVM_ASSERT(pages > 0); + + if (gpu_dst) { + atomic64_add(pages, &gpu_dst->parent->stats.num_pages_in); + if (is_replayable_fault) + atomic64_add(pages, &gpu_dst->parent->fault_buffer_info.replayable.stats.num_pages_in); + else if (is_non_replayable_fault) + atomic64_add(pages, &gpu_dst->parent->fault_buffer_info.non_replayable.stats.num_pages_in); + else if (is_access_counter) + atomic64_add(pages, &gpu_dst->parent->access_counter_buffer_info.stats.num_pages_in); + } + if (gpu_src) { + atomic64_add(pages, &gpu_src->parent->stats.num_pages_out); + if (is_replayable_fault) + atomic64_add(pages, &gpu_src->parent->fault_buffer_info.replayable.stats.num_pages_out); + else if (is_non_replayable_fault) + atomic64_add(pages, &gpu_src->parent->fault_buffer_info.non_replayable.stats.num_pages_out); + else if (is_access_counter) + atomic64_add(pages, &gpu_src->parent->access_counter_buffer_info.stats.num_pages_out); + } +} + +// Override the UVM driver and GPU settings from the module loader +static void uvm_param_conf(void) +{ + // uvm_peer_copy: Valid entries are "phys" and "virt" for Ampere+ GPUs. + // No effect in pre-Ampere GPUs + if (strcmp(uvm_peer_copy, UVM_PARAM_PEER_COPY_VIRTUAL) == 0) { + g_uvm_global.peer_copy_mode = UVM_GPU_PEER_COPY_MODE_VIRTUAL; + } + else { + if (strcmp(uvm_peer_copy, UVM_PARAM_PEER_COPY_PHYSICAL) != 0) { + pr_info("Invalid value for uvm_peer_copy = %s, using %s instead.\n", + uvm_peer_copy, UVM_PARAM_PEER_COPY_PHYSICAL); + } + + g_uvm_global.peer_copy_mode = UVM_GPU_PEER_COPY_MODE_PHYSICAL; + } +} + +NV_STATUS uvm_gpu_init(void) +{ + NV_STATUS status; + + uvm_param_conf(); + + status = uvm_hal_init_table(); + if (status != NV_OK) { + UVM_ERR_PRINT("uvm_hal_init_table() failed: %s\n", nvstatusToString(status)); + return status; + } + + return NV_OK; +} + +void uvm_gpu_exit(void) +{ + uvm_parent_gpu_t *parent_gpu; + + for_each_parent_gpu(parent_gpu) + UVM_ASSERT_MSG(false, "GPU still present: %s\n", parent_gpu->name); + + // CPU should never be in the retained GPUs mask + UVM_ASSERT(!uvm_global_processor_mask_test(&g_uvm_global.retained_gpus, UVM_GLOBAL_ID_CPU)); +} + +NV_STATUS uvm_gpu_init_va_space(uvm_va_space_t *va_space) +{ + NV_STATUS status; + + if (uvm_procfs_is_debug_enabled()) { + status = uvm_perf_register_event_callback(&va_space->perf_events, + UVM_PERF_EVENT_FAULT, + update_stats_fault_cb); + if (status != NV_OK) + return status; + + status = uvm_perf_register_event_callback(&va_space->perf_events, + UVM_PERF_EVENT_MIGRATION, + update_stats_migration_cb); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +uvm_parent_gpu_t *uvm_parent_gpu_get_by_uuid_locked(const NvProcessorUuid *gpu_uuid) +{ + uvm_parent_gpu_t *parent_gpu; + + for_each_parent_gpu(parent_gpu) { + if (uvm_processor_uuid_eq(&parent_gpu->uuid, gpu_uuid)) + return parent_gpu; + } + + return NULL; +} + +uvm_parent_gpu_t *uvm_parent_gpu_get_by_uuid(const NvProcessorUuid *gpu_uuid) +{ + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + return uvm_parent_gpu_get_by_uuid_locked(gpu_uuid); +} + +static uvm_gpu_t *uvm_gpu_get_by_uuid_locked(const NvProcessorUuid *gpu_uuid) +{ + uvm_gpu_id_t gpu_id; + uvm_global_gpu_id_t global_gpu_id; + uvm_gpu_t *gpu; + + for_each_gpu_id(gpu_id) { + global_gpu_id = uvm_global_gpu_id_from_gpu_id(gpu_id); + gpu = uvm_gpu_get(global_gpu_id); + if (gpu) { + if (uvm_processor_uuid_eq(uvm_gpu_uuid(gpu), gpu_uuid)) { + UVM_ASSERT(!gpu->parent->smc.enabled); + return gpu; + } + } + } + + return NULL; +} + +uvm_gpu_t *uvm_gpu_get_by_uuid(const NvProcessorUuid *gpu_uuid) +{ + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + return uvm_gpu_get_by_uuid_locked(gpu_uuid); +} + +uvm_gpu_t *uvm_gpu_get_by_parent_and_swizz_id_locked(uvm_parent_gpu_t *parent_gpu, NvU32 swizz_id) +{ + uvm_gpu_t *gpu; + + UVM_ASSERT(parent_gpu); + + for_each_gpu_in_parent(parent_gpu, gpu) { + if (gpu->smc.swizz_id == swizz_id) + return gpu; + } + + return NULL; +} + +uvm_gpu_t *uvm_gpu_get_by_parent_and_swizz_id(uvm_parent_gpu_t *parent_gpu, NvU32 swizz_id) +{ + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + return uvm_gpu_get_by_parent_and_swizz_id_locked(parent_gpu, swizz_id); +} + +// Increment the refcount for the GPU with the given UUID. If this is the first +// time that this UUID is retained, the GPU is added to UVM. +// When SMC partitioning is enabled, user_rm_device contains the user handles +// that were created by the caller, and that can be used to identify and +// obtain information about the partition. nvUvmInterfaceGetGpuInfo returns, in +// gpu_info, whether SMC is enabled and the swizzId corresponding to the +// partition. +static NV_STATUS gpu_retain_by_uuid_locked(const NvProcessorUuid *gpu_uuid, + const uvm_rm_user_object_t *user_rm_device, + uvm_gpu_t **gpu_out) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu = NULL; + uvm_parent_gpu_t *parent_gpu; + UvmGpuInfo *gpu_info = NULL; + UvmGpuClientInfo client_info = {0}; + UvmGpuPlatformInfo gpu_platform_info = {0}; + uvm_global_gpu_id_t global_gpu_id; + + client_info.hClient = user_rm_device->user_client; + client_info.hSmcPartRef = user_rm_device->user_object; + + gpu_info = uvm_kvmalloc_zero(sizeof(*gpu_info)); + if (!gpu_info) + return NV_ERR_NO_MEMORY; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + parent_gpu = uvm_parent_gpu_get_by_uuid(gpu_uuid); + + if (parent_gpu == NULL) { + // If this is the first time the UUID is seen, register it on RM + status = uvm_rm_locked_call(nvUvmInterfaceRegisterGpu(gpu_uuid, &gpu_platform_info)); + if (status != NV_OK) + goto error_free_gpu_info; + } + + status = uvm_rm_locked_call(nvUvmInterfaceGetGpuInfo(gpu_uuid, &client_info, gpu_info)); + if (status != NV_OK) + goto error_unregister; + + if (parent_gpu != NULL) { + // If the UUID has been seen before, and if SMC is enabled, then check + // if this specific partition has been seen previously. The UUID-based + // look-up above may have succeeded for a different partition with the + // same parent GPU. + if (gpu_info->smcEnabled) { + gpu = uvm_gpu_get_by_parent_and_swizz_id(parent_gpu, gpu_info->smcSwizzId); + } + else { + gpu = parent_gpu->gpus[0]; + UVM_ASSERT(gpu != NULL); + } + } + + if (gpu == NULL) { + status = find_unused_global_gpu_id(parent_gpu, &global_gpu_id); + if (status != NV_OK) + goto error_unregister; + + status = add_gpu(gpu_uuid, global_gpu_id, gpu_info, &gpu_platform_info, parent_gpu, &gpu); + if (status != NV_OK) + goto error_unregister; + } + else { + atomic64_inc(&gpu->retained_count); + } + + *gpu_out = gpu; + + uvm_kvfree(gpu_info); + + return status; + +error_unregister: + if (parent_gpu == NULL) + uvm_rm_locked_call_void(nvUvmInterfaceUnregisterGpu(gpu_uuid)); +error_free_gpu_info: + uvm_kvfree(gpu_info); + + return status; +} + +NV_STATUS uvm_gpu_retain_by_uuid(const NvProcessorUuid *gpu_uuid, + const uvm_rm_user_object_t *user_rm_device, + uvm_gpu_t **gpu_out) +{ + NV_STATUS status; + uvm_mutex_lock(&g_uvm_global.global_lock); + status = gpu_retain_by_uuid_locked(gpu_uuid, user_rm_device, gpu_out); + uvm_mutex_unlock(&g_uvm_global.global_lock); + return status; +} + +void uvm_gpu_retain(uvm_gpu_t *gpu) +{ + UVM_ASSERT(uvm_gpu_retained_count(gpu) > 0); + atomic64_inc(&gpu->retained_count); +} + +void uvm_gpu_release_locked(uvm_gpu_t *gpu) +{ + uvm_parent_gpu_t *parent_gpu = gpu->parent; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + UVM_ASSERT(uvm_gpu_retained_count(gpu) > 0); + + if (atomic64_dec_and_test(&gpu->retained_count)) { + nv_kref_get(&parent_gpu->gpu_kref); + remove_gpu(gpu); + if (parent_gpu->num_retained_gpus == 0) + uvm_rm_locked_call_void(nvUvmInterfaceUnregisterGpu(&parent_gpu->uuid)); + uvm_parent_gpu_kref_put(parent_gpu); + } +} + +void uvm_gpu_release(uvm_gpu_t *gpu) +{ + uvm_mutex_lock(&g_uvm_global.global_lock); + uvm_gpu_release_locked(gpu); + uvm_mutex_unlock(&g_uvm_global.global_lock); +} + +// Note: Peer table is an upper triangular matrix packed into a flat array. +// This function converts an index of 2D array of size [N x N] into an index +// of upper triangular array of size [((N - 1) * ((N - 1) + 1)) / 2] which +// does not include diagonal elements. +NvU32 uvm_gpu_peer_table_index(const uvm_gpu_id_t gpu_id0, const uvm_gpu_id_t gpu_id1) +{ + NvU32 square_index, triangular_index; + NvU32 gpu_index0 = uvm_id_gpu_index(gpu_id0); + NvU32 gpu_index1 = uvm_id_gpu_index(gpu_id1); + + UVM_ASSERT(!uvm_id_equal(gpu_id0, gpu_id1)); + + // Calculate an index of 2D array by re-ordering indices to always point + // to the same entry. + square_index = min(gpu_index0, gpu_index1) * UVM_ID_MAX_GPUS + + max(gpu_index0, gpu_index1); + + // Calculate and subtract number of lower triangular matrix elements till + // the current row (which includes diagonal elements) to get the correct + // index in an upper triangular matrix. + // Note: As gpu_id can be [1, N), no extra logic is needed to calculate + // diagonal elements. + triangular_index = square_index - SUM_FROM_0_TO_N(min(uvm_id_value(gpu_id0), uvm_id_value(gpu_id1))); + + UVM_ASSERT(triangular_index < UVM_MAX_UNIQUE_GPU_PAIRS); + + return triangular_index; +} + +NV_STATUS uvm_gpu_check_ecc_error_no_rm(uvm_gpu_t *gpu) +{ + // We may need to call service_interrupts() which cannot be done in the top + // half interrupt handler so assert here as well to catch improper use as + // early as possible. + UVM_ASSERT(!in_interrupt()); + + if (!gpu->ecc.enabled) + return NV_OK; + + // Early out If a global ECC error is already set to not spam the logs with + // the same error. + if (uvm_global_get_status() == NV_ERR_ECC_ERROR) + return NV_ERR_ECC_ERROR; + + if (*gpu->ecc.error_notifier) { + UVM_ERR_PRINT("ECC error encountered, GPU %s\n", uvm_gpu_name(gpu)); + uvm_global_set_fatal_error(NV_ERR_ECC_ERROR); + return NV_ERR_ECC_ERROR; + } + + // RM hasn't seen an ECC error yet, check whether there is a pending + // interrupt that might indicate one. We might get false positives because + // the interrupt bits we read are not ECC-specific. They're just the + // top-level bits for any interrupt on all engines which support ECC. On + // Pascal for example, RM returns us a mask with the bits for GR, L2, and + // FB, because any of those might raise an ECC interrupt. So if they're set + // we have to ask RM to check whether it was really an ECC error (and a + // double-bit ECC error at that), in which case it sets the notifier. + if ((*gpu->ecc.hw_interrupt_tree_location & gpu->ecc.mask) == 0) { + // No pending interrupts. + return NV_OK; + } + + // An interrupt that might mean an ECC error needs to be serviced, signal + // that to the caller. + return NV_WARN_MORE_PROCESSING_REQUIRED; +} + +static NV_STATUS get_p2p_caps(uvm_gpu_t *gpu0, + uvm_gpu_t *gpu1, + UvmGpuP2PCapsParams *p2p_caps_params) +{ + NV_STATUS status; + uvmGpuDeviceHandle rm_device0, rm_device1; + + if (uvm_id_value(gpu0->id) < uvm_id_value(gpu1->id)) { + rm_device0 = uvm_gpu_device_handle(gpu0); + rm_device1 = uvm_gpu_device_handle(gpu1); + } + else { + rm_device0 = uvm_gpu_device_handle(gpu1); + rm_device1 = uvm_gpu_device_handle(gpu0); + } + + memset(p2p_caps_params, 0, sizeof(*p2p_caps_params)); + status = uvm_rm_locked_call(nvUvmInterfaceGetP2PCaps(rm_device0, rm_device1, p2p_caps_params)); + if (status != NV_OK) { + UVM_ERR_PRINT("nvUvmInterfaceGetP2PCaps() failed with error: %s, for GPU0:%s and GPU1:%s\n", + nvstatusToString(status), + uvm_gpu_name(gpu0), + uvm_gpu_name(gpu1)); + return status; + } + + if (p2p_caps_params->p2pLink != UVM_LINK_TYPE_NONE) { + // P2P is not supported under SMC partitioning + UVM_ASSERT(!gpu0->parent->smc.enabled); + UVM_ASSERT(!gpu1->parent->smc.enabled); + } + + return NV_OK; +} + +static NV_STATUS create_p2p_object(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1, NvHandle *p2p_handle) +{ + NV_STATUS status; + uvmGpuDeviceHandle rm_device0, rm_device1; + + if (uvm_id_value(gpu0->id) < uvm_id_value(gpu1->id)) { + rm_device0 = uvm_gpu_device_handle(gpu0); + rm_device1 = uvm_gpu_device_handle(gpu1); + } + else { + rm_device0 = uvm_gpu_device_handle(gpu1); + rm_device1 = uvm_gpu_device_handle(gpu0); + } + + *p2p_handle = 0; + + status = uvm_rm_locked_call(nvUvmInterfaceP2pObjectCreate(rm_device0, rm_device1, p2p_handle)); + if (status != NV_OK) { + UVM_ERR_PRINT("nvUvmInterfaceP2pObjectCreate() failed with error: %s, for GPU0:%s and GPU1:%s\n", + nvstatusToString(status), + uvm_gpu_name(gpu0), + uvm_gpu_name(gpu1)); + return status; + } + + UVM_ASSERT(*p2p_handle); + return NV_OK; +} + +static void set_optimal_p2p_write_ces(const UvmGpuP2PCapsParams *p2p_caps_params, + const uvm_gpu_peer_t *peer_caps, + uvm_gpu_t *gpu0, + uvm_gpu_t *gpu1) +{ + bool sorted; + NvU32 ce0, ce1; + + if (peer_caps->link_type < UVM_GPU_LINK_NVLINK_1) + return; + + sorted = uvm_id_value(gpu0->id) < uvm_id_value(gpu1->id); + ce0 = p2p_caps_params->optimalNvlinkWriteCEs[sorted ? 0 : 1]; + ce1 = p2p_caps_params->optimalNvlinkWriteCEs[sorted ? 1 : 0]; + + // Indirect peers communicate through the CPU, so the optimal CE + // should match the one selected for writing to system memory + if (peer_caps->is_indirect_peer) { + uvm_channel_pool_t *pool; + + pool = gpu0->channel_manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_GPU_TO_CPU]; + UVM_ASSERT(ce0 == pool->engine_index); + + pool = gpu1->channel_manager->pool_to_use.default_for_type[UVM_CHANNEL_TYPE_GPU_TO_CPU]; + UVM_ASSERT(ce1 == pool->engine_index); + } + + uvm_channel_manager_set_p2p_ce(gpu0->channel_manager, gpu1, ce0); + uvm_channel_manager_set_p2p_ce(gpu1->channel_manager, gpu0, ce1); +} + +static int nv_procfs_read_gpu_peer_caps(struct seq_file *s, void *v) +{ + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + gpu_peer_caps_print((uvm_gpu_t **)s->private, s); + + uvm_up_read(&g_uvm_global.pm.lock); + + return 0; +} + +static int nv_procfs_read_gpu_peer_caps_entry(struct seq_file *s, void *v) +{ + UVM_ENTRY_RET(nv_procfs_read_gpu_peer_caps(s, v)); +} + +UVM_DEFINE_SINGLE_PROCFS_FILE(gpu_peer_caps_entry); + +static NV_STATUS init_procfs_peer_cap_files(uvm_gpu_t *local, uvm_gpu_t *remote, size_t local_idx) +{ + // This needs to hold a gpu_id_t in decimal + char gpu_dir_name[16]; + + // This needs to hold a GPU UUID + char symlink_name[UVM_GPU_UUID_TEXT_BUFFER_LENGTH]; + uvm_gpu_peer_t *peer_caps; + + if (!uvm_procfs_is_enabled()) + return NV_OK; + + peer_caps = uvm_gpu_peer_caps(local, remote); + peer_caps->procfs.pairs[local_idx][0] = local; + peer_caps->procfs.pairs[local_idx][1] = remote; + + // Create gpus/gpuA/peers/gpuB + snprintf(gpu_dir_name, sizeof(gpu_dir_name), "%u", uvm_id_value(remote->id)); + peer_caps->procfs.peer_file[local_idx] = NV_CREATE_PROC_FILE(gpu_dir_name, + local->procfs.dir_peers, + gpu_peer_caps_entry, + &peer_caps->procfs.pairs[local_idx]); + + if (peer_caps->procfs.peer_file[local_idx] == NULL) + return NV_ERR_OPERATING_SYSTEM; + + // Create a symlink from UVM GPU UUID (UVM-GPU-...) to the UVM GPU ID gpuB + format_uuid_to_buffer(symlink_name, sizeof(symlink_name), uvm_gpu_uuid(remote)); + peer_caps->procfs.peer_symlink_file[local_idx] = proc_symlink(symlink_name, + local->procfs.dir_peers, + gpu_dir_name); + if (peer_caps->procfs.peer_symlink_file[local_idx] == NULL) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +static NV_STATUS init_peer_access(uvm_gpu_t *gpu0, + uvm_gpu_t *gpu1, + const UvmGpuP2PCapsParams *p2p_caps_params, + uvm_gpu_peer_t *peer_caps) +{ + NV_STATUS status; + + + + + + // check for peer-to-peer compatibility (PCI-E or NvLink). + peer_caps->link_type = get_gpu_link_type(p2p_caps_params->p2pLink); + if (peer_caps->link_type == UVM_GPU_LINK_INVALID + + + + ) + return NV_ERR_NOT_SUPPORTED; + + peer_caps->total_link_line_rate_mbyte_per_s = p2p_caps_params->totalLinkLineRateMBps; + + // Initialize peer ids and establish peer mappings + peer_caps->is_indirect_peer = (p2p_caps_params->indirectAccess == NV_TRUE); + + if (peer_caps->is_indirect_peer) { + UVM_ASSERT(gpu0->parent->numa_info.enabled); + UVM_ASSERT(gpu1->parent->numa_info.enabled); + + status = uvm_pmm_gpu_indirect_peer_init(&gpu0->pmm, gpu1); + if (status != NV_OK) + return status; + + status = uvm_pmm_gpu_indirect_peer_init(&gpu1->pmm, gpu0); + if (status != NV_OK) + return status; + + set_optimal_p2p_write_ces(p2p_caps_params, peer_caps, gpu0, gpu1); + UVM_ASSERT(peer_caps->total_link_line_rate_mbyte_per_s == 0); + } + else { + // Peer id from min(gpu_id0, gpu_id1) -> max(gpu_id0, gpu_id1) + peer_caps->peer_ids[0] = p2p_caps_params->peerIds[0]; + + // Peer id from max(gpu_id0, gpu_id1) -> min(gpu_id0, gpu_id1) + peer_caps->peer_ids[1] = p2p_caps_params->peerIds[1]; + + // Establish peer mappings from each GPU to the other. Indirect peers + // do not require identity mappings since they use sysmem aperture to + // communicate. + status = uvm_mmu_create_peer_identity_mappings(gpu0, gpu1); + if (status != NV_OK) + return status; + + status = uvm_mmu_create_peer_identity_mappings(gpu1, gpu0); + if (status != NV_OK) + return status; + + set_optimal_p2p_write_ces(p2p_caps_params, peer_caps, gpu0, gpu1); + + UVM_ASSERT(uvm_gpu_get(gpu0->global_id) == gpu0); + UVM_ASSERT(uvm_gpu_get(gpu1->global_id) == gpu1); + + // In the case of NVLINK peers, this initialization will happen during + // add_gpu. As soon as the peer info table is assigned below, the access + // counter bottom half could start operating on the GPU being newly + // added and inspecting the peer caps, so all of the appropriate + // initialization must happen before this point. + uvm_spin_lock(&gpu0->peer_info.peer_gpus_lock); + + uvm_processor_mask_set(&gpu0->peer_info.peer_gpu_mask, gpu1->id); + UVM_ASSERT(gpu0->peer_info.peer_gpus[uvm_id_gpu_index(gpu1->id)] == NULL); + gpu0->peer_info.peer_gpus[uvm_id_gpu_index(gpu1->id)] = gpu1; + + uvm_spin_unlock(&gpu0->peer_info.peer_gpus_lock); + uvm_spin_lock(&gpu1->peer_info.peer_gpus_lock); + + uvm_processor_mask_set(&gpu1->peer_info.peer_gpu_mask, gpu0->id); + UVM_ASSERT(gpu1->peer_info.peer_gpus[uvm_id_gpu_index(gpu0->id)] == NULL); + gpu1->peer_info.peer_gpus[uvm_id_gpu_index(gpu0->id)] = gpu0; + + uvm_spin_unlock(&gpu1->peer_info.peer_gpus_lock); + } + + if (!uvm_procfs_is_debug_enabled()) + return NV_OK; + + status = init_procfs_peer_cap_files(gpu0, gpu1, 0); + if (status != NV_OK) + return status; + + status = init_procfs_peer_cap_files(gpu1, gpu0, 1); + if (status != NV_OK) + return status; + + return NV_OK; +} + +static NV_STATUS enable_pcie_peer_access(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + NV_STATUS status = NV_OK; + UvmGpuP2PCapsParams p2p_caps_params; + uvm_gpu_peer_t *peer_caps; + NvHandle p2p_handle; + + UVM_ASSERT(gpu0); + UVM_ASSERT(gpu1); + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + peer_caps = uvm_gpu_peer_caps(gpu0, gpu1); + UVM_ASSERT(peer_caps->link_type == UVM_GPU_LINK_INVALID); + UVM_ASSERT(peer_caps->ref_count == 0); + + status = create_p2p_object(gpu0, gpu1, &p2p_handle); + if (status != NV_OK) + return status; + + // Store the handle in the global table. + peer_caps->p2p_handle = p2p_handle; + + status = get_p2p_caps(gpu0, gpu1, &p2p_caps_params); + if (status != NV_OK) + goto cleanup; + + // Sanity checks + UVM_ASSERT(p2p_caps_params.indirectAccess == NV_FALSE); + UVM_ASSERT(p2p_caps_params.p2pLink == UVM_LINK_TYPE_PCIE); + + status = init_peer_access(gpu0, gpu1, &p2p_caps_params, peer_caps); + if (status != NV_OK) + goto cleanup; + + return NV_OK; + +cleanup: + disable_peer_access(gpu0, gpu1); + return status; +} + +static NV_STATUS enable_nvlink_peer_access(uvm_gpu_t *gpu0, + uvm_gpu_t *gpu1, + UvmGpuP2PCapsParams *p2p_caps_params) +{ + NV_STATUS status = NV_OK; + NvHandle p2p_handle; + uvm_gpu_peer_t *peer_caps; + + UVM_ASSERT(gpu0); + UVM_ASSERT(gpu1); + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + peer_caps = uvm_gpu_peer_caps(gpu0, gpu1); + UVM_ASSERT(peer_caps->ref_count == 0); + peer_caps->ref_count = 1; + + if (!p2p_caps_params->indirectAccess) { + // Create P2P object for direct NVLink peers + status = create_p2p_object(gpu0, gpu1, &p2p_handle); + if (status != NV_OK) { + UVM_ERR_PRINT("failed to create a P2P object with error: %s, for GPU1:%s and GPU2:%s \n", + nvstatusToString(status), + uvm_gpu_name(gpu0), + uvm_gpu_name(gpu1)); + return status; + } + + UVM_ASSERT(p2p_handle != 0); + + // Store the handle in the global table. + peer_caps->p2p_handle = p2p_handle; + + // Update p2p caps after p2p object creation as it generates the peer + // ids + status = get_p2p_caps(gpu0, gpu1, p2p_caps_params); + if (status != NV_OK) + goto cleanup; + } + + status = init_peer_access(gpu0, gpu1, p2p_caps_params, peer_caps); + if (status != NV_OK) + goto cleanup; + + return NV_OK; + +cleanup: + disable_peer_access(gpu0, gpu1); + return status; +} + +static NV_STATUS discover_nvlink_peers(uvm_gpu_t *gpu) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *other_gpu; + + UVM_ASSERT(gpu); + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + if (gpu->parent->smc.enabled) + return NV_OK; + + for_each_global_gpu(other_gpu) { + UvmGpuP2PCapsParams p2p_caps_params; + + if ((other_gpu == gpu) || other_gpu->parent->smc.enabled) + continue; + + status = get_p2p_caps(gpu, other_gpu, &p2p_caps_params); + if (status != NV_OK) + goto cleanup; + + // PCIe peers need to be explicitly enabled via UvmEnablePeerAccess + if (p2p_caps_params.p2pLink == UVM_LINK_TYPE_NONE || p2p_caps_params.p2pLink == UVM_LINK_TYPE_PCIE) + continue; + + // Indirect peers are only supported when onlined as NUMA nodes, because + // we want to use vm_insert_page and dma_map_page. + if (p2p_caps_params.indirectAccess && + (!gpu->parent->numa_info.enabled || !other_gpu->parent->numa_info.enabled)) + continue; + + status = enable_nvlink_peer_access(gpu, other_gpu, &p2p_caps_params); + if (status != NV_OK) + goto cleanup; + } + + return NV_OK; + +cleanup: + destroy_nvlink_peers(gpu); + + return status; +} + +static void destroy_nvlink_peers(uvm_gpu_t *gpu) +{ + uvm_gpu_t *other_gpu; + + UVM_ASSERT(gpu); + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + if (gpu->parent->smc.enabled) + return; + + for_each_global_gpu(other_gpu) { + uvm_gpu_peer_t *peer_caps; + + if ((other_gpu == gpu) || other_gpu->parent->smc.enabled) + continue; + + peer_caps = uvm_gpu_peer_caps(gpu, other_gpu); + + // PCIe peers need to be explicitly destroyed via UvmDisablePeerAccess + if (peer_caps->link_type == UVM_GPU_LINK_INVALID || peer_caps->link_type == UVM_GPU_LINK_PCIE) + continue; + + disable_peer_access(gpu, other_gpu); + } +} + +NV_STATUS uvm_gpu_retain_pcie_peer_access(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + NV_STATUS status = NV_OK; + uvm_gpu_peer_t *peer_caps; + + UVM_ASSERT(gpu0); + UVM_ASSERT(gpu1); + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + peer_caps = uvm_gpu_peer_caps(gpu0, gpu1); + + // Insert an entry into global peer table, if not present. + if (peer_caps->link_type == UVM_GPU_LINK_INVALID) { + UVM_ASSERT(peer_caps->ref_count == 0); + + status = enable_pcie_peer_access(gpu0, gpu1); + if (status != NV_OK) + return status; + } + else if (peer_caps->link_type != UVM_GPU_LINK_PCIE) { + return NV_ERR_INVALID_DEVICE; + } + + // GPUs can't be destroyed until their peer pairings have also been + // destroyed. + uvm_gpu_retain(gpu0); + uvm_gpu_retain(gpu1); + + peer_caps->ref_count++; + + return status; +} + +static void disable_peer_access(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + uvm_gpu_peer_t *peer_caps; + NvHandle p2p_handle = 0; + + UVM_ASSERT(gpu0); + UVM_ASSERT(gpu1); + + // P2P is not supported under SMC partitioning + UVM_ASSERT(!gpu0->parent->smc.enabled); + UVM_ASSERT(!gpu1->parent->smc.enabled); + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + peer_caps = uvm_gpu_peer_caps(gpu0, gpu1); + + if (uvm_procfs_is_debug_enabled()) + deinit_procfs_peer_cap_files(peer_caps); + + p2p_handle = peer_caps->p2p_handle; + + if (peer_caps->is_indirect_peer) { + uvm_pmm_gpu_indirect_peer_destroy(&gpu0->pmm, gpu1); + uvm_pmm_gpu_indirect_peer_destroy(&gpu1->pmm, gpu0); + } + else { + UVM_ASSERT(p2p_handle); + + uvm_mmu_destroy_peer_identity_mappings(gpu0, gpu1); + uvm_mmu_destroy_peer_identity_mappings(gpu1, gpu0); + + uvm_rm_locked_call_void(nvUvmInterfaceP2pObjectDestroy(uvm_gpu_session_handle(gpu0), p2p_handle)); + + UVM_ASSERT(uvm_gpu_get(gpu0->global_id) == gpu0); + UVM_ASSERT(uvm_gpu_get(gpu1->global_id) == gpu1); + + uvm_spin_lock(&gpu0->peer_info.peer_gpus_lock); + uvm_processor_mask_clear(&gpu0->peer_info.peer_gpu_mask, gpu1->id); + gpu0->peer_info.peer_gpus[uvm_id_gpu_index(gpu1->id)] = NULL; + uvm_spin_unlock(&gpu0->peer_info.peer_gpus_lock); + + uvm_spin_lock(&gpu1->peer_info.peer_gpus_lock); + uvm_processor_mask_clear(&gpu1->peer_info.peer_gpu_mask, gpu0->id); + gpu1->peer_info.peer_gpus[uvm_id_gpu_index(gpu0->id)] = NULL; + uvm_spin_unlock(&gpu1->peer_info.peer_gpus_lock); + } + + // Flush the access counter buffer to avoid getting stale notifications for + // accesses to GPUs to which peer access is being disabled. This is also + // needed in the case of disabling automatic (NVLINK) peers on GPU + // unregister, because access counter processing might still be using GPU + // IDs queried from the peer table above which are about to be removed from + // the global table. + if (gpu0->parent->access_counters_supported) + uvm_gpu_access_counter_buffer_flush(gpu0); + if (gpu1->parent->access_counters_supported) + uvm_gpu_access_counter_buffer_flush(gpu1); + + memset(peer_caps, 0, sizeof(*peer_caps)); +} + +void uvm_gpu_release_pcie_peer_access(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + uvm_gpu_peer_t *peer_caps; + UVM_ASSERT(gpu0); + UVM_ASSERT(gpu1); + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + peer_caps = uvm_gpu_peer_caps(gpu0, gpu1); + + UVM_ASSERT(peer_caps->ref_count > 0); + UVM_ASSERT(peer_caps->link_type == UVM_GPU_LINK_PCIE); + peer_caps->ref_count--; + + if (peer_caps->ref_count == 0) + disable_peer_access(gpu0, gpu1); + + uvm_gpu_release_locked(gpu0); + uvm_gpu_release_locked(gpu1); +} + +static uvm_aperture_t uvm_gpu_peer_caps_aperture(uvm_gpu_peer_t *peer_caps, uvm_gpu_t *local_gpu, uvm_gpu_t *remote_gpu) +{ + size_t peer_index; + UVM_ASSERT(peer_caps->link_type != UVM_GPU_LINK_INVALID); + + // Indirect peers are accessed as sysmem addresses + if (peer_caps->is_indirect_peer) + return UVM_APERTURE_SYS; + + if (uvm_id_value(local_gpu->id) < uvm_id_value(remote_gpu->id)) + peer_index = 0; + else + peer_index = 1; + + return UVM_APERTURE_PEER(peer_caps->peer_ids[peer_index]); +} + +uvm_aperture_t uvm_gpu_peer_aperture(uvm_gpu_t *local_gpu, uvm_gpu_t *remote_gpu) +{ + uvm_gpu_peer_t *peer_caps = uvm_gpu_peer_caps(local_gpu, remote_gpu); + return uvm_gpu_peer_caps_aperture(peer_caps, local_gpu, remote_gpu); +} + +uvm_aperture_t uvm_gpu_page_tree_init_location(const uvm_gpu_t *gpu) +{ + // See comment in page_tree_set_location + return uvm_gpu_is_virt_mode_sriov_heavy(gpu)? UVM_APERTURE_VID : UVM_APERTURE_DEFAULT; +} + +uvm_processor_id_t uvm_gpu_get_processor_id_by_address(uvm_gpu_t *gpu, uvm_gpu_phys_address_t addr) +{ + uvm_processor_id_t id = UVM_ID_INVALID; + + // TODO: Bug 1899622: On P9 systems with multiple CPU sockets, SYS aperture + // is also reported for accesses to remote GPUs connected to a different CPU + // NUMA domain. We will need to determine the actual processor id using the + // reported physical address. + if (addr.aperture == UVM_APERTURE_SYS) + return UVM_ID_CPU; + else if (addr.aperture == UVM_APERTURE_VID) + return gpu->id; + + uvm_spin_lock(&gpu->peer_info.peer_gpus_lock); + + for_each_gpu_id_in_mask(id, &gpu->peer_info.peer_gpu_mask) { + uvm_gpu_t *other_gpu = gpu->peer_info.peer_gpus[uvm_id_gpu_index(id)]; + UVM_ASSERT(other_gpu); + + if (uvm_gpus_are_nvswitch_connected(gpu, other_gpu)) { + // NVSWITCH connected systems use an extended physical address to + // map to peers. Find the physical memory 'slot' containing the + // given physical address to find the peer gpu that owns the + // physical address + NvU64 fabric_window_end = other_gpu->parent->nvswitch_info.fabric_memory_window_start + + other_gpu->mem_info.max_allocatable_address; + + if (other_gpu->parent->nvswitch_info.fabric_memory_window_start <= addr.address && + fabric_window_end >= addr.address) + break; + } + else if (uvm_gpu_peer_aperture(gpu, other_gpu) == addr.aperture) { + break; + } + } + + uvm_spin_unlock(&gpu->peer_info.peer_gpus_lock); + + return id; +} + +uvm_gpu_peer_t *uvm_gpu_index_peer_caps(const uvm_gpu_id_t gpu_id1, const uvm_gpu_id_t gpu_id2) +{ + NvU32 table_index = uvm_gpu_peer_table_index(gpu_id1, gpu_id2); + return &g_uvm_global.peers[table_index]; +} + +static NvU64 instance_ptr_to_key(uvm_gpu_phys_address_t instance_ptr) +{ + NvU64 key; + int is_sys = (instance_ptr.aperture == UVM_APERTURE_SYS); + + // Instance pointers must be 4k aligned and they must have either VID or SYS + // apertures. Compress them as much as we can both to guarantee that the key + // fits within 64 bits, and to make the table as shallow as possible. + UVM_ASSERT(IS_ALIGNED(instance_ptr.address, UVM_PAGE_SIZE_4K)); + UVM_ASSERT(instance_ptr.aperture == UVM_APERTURE_VID || instance_ptr.aperture == UVM_APERTURE_SYS); + + key = (instance_ptr.address >> 11) | is_sys; + + return key; +} + +static NV_STATUS gpu_add_user_channel_subctx_info(uvm_gpu_t *gpu, uvm_user_channel_t *user_channel) +{ + uvm_gpu_phys_address_t instance_ptr = user_channel->instance_ptr.addr; + NV_STATUS status = NV_OK; + uvm_rb_tree_node_t *channel_tree_node; + uvm_user_channel_subctx_info_t *channel_subctx_info; + uvm_user_channel_subctx_info_t *new_channel_subctx_info = NULL; + uvm_va_space_t *va_space = user_channel->gpu_va_space->va_space; + + if (!user_channel->in_subctx) + return NV_OK; + + // Pre-allocate a subcontext info descriptor out of the lock, in case we + // need to add a new entry to the tree + new_channel_subctx_info = uvm_kvmalloc_zero(sizeof(*new_channel_subctx_info)); + + // Don't check for the result of the allocation since it is only needed + // if the TSG has not been registered yet, and we do that under the lock + // below + if (new_channel_subctx_info) { + new_channel_subctx_info->subctxs = + uvm_kvmalloc_zero(sizeof(*new_channel_subctx_info->subctxs) * user_channel->tsg.max_subctx_count); + } + + uvm_spin_lock(&gpu->parent->instance_ptr_table_lock); + + // Check if the subcontext information for the channel already exists + channel_tree_node = uvm_rb_tree_find(&gpu->parent->tsg_table, user_channel->tsg.id); + + if (!channel_tree_node) { + // We could not allocate the descriptor before taking the lock. Exiting + if (!new_channel_subctx_info || !new_channel_subctx_info->subctxs) { + status = NV_ERR_NO_MEMORY; + goto exit_unlock; + } + + // Insert the new subcontext information descriptor + new_channel_subctx_info->node.key = user_channel->tsg.id; + status = uvm_rb_tree_insert(&gpu->parent->tsg_table, &new_channel_subctx_info->node); + UVM_ASSERT(status == NV_OK); + + channel_subctx_info = new_channel_subctx_info; + channel_subctx_info->smc_engine_id = user_channel->smc_engine_id; + } + else { + channel_subctx_info = container_of(channel_tree_node, uvm_user_channel_subctx_info_t, node); + UVM_ASSERT(channel_subctx_info->smc_engine_id == user_channel->smc_engine_id); + } + + user_channel->subctx_info = channel_subctx_info; + + // Register the VA space of the channel subcontext info descriptor, or + // check that the existing one matches the channel's + if (channel_subctx_info->subctxs[user_channel->subctx_id].refcount++ > 0) { + UVM_ASSERT_MSG(channel_subctx_info->subctxs[user_channel->subctx_id].va_space == va_space, + "CH %u:%u instance_ptr {0x%llx:%s} SubCTX %u in TSG %u: expected VA space 0x%llx but got 0x%llx instead\n", + user_channel->hw_runlist_id, + user_channel->hw_channel_id, + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture), + user_channel->subctx_id, + user_channel->tsg.id, + (NvU64)va_space, + (NvU64)channel_subctx_info->subctxs[user_channel->subctx_id].va_space); + UVM_ASSERT_MSG(channel_subctx_info->subctxs[user_channel->subctx_id].va_space != NULL, + "CH %u:%u instance_ptr {0x%llx:%s} SubCTX %u in TSG %u: VA space is NULL\n", + user_channel->hw_runlist_id, + user_channel->hw_channel_id, + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture), + user_channel->subctx_id, + user_channel->tsg.id); + UVM_ASSERT_MSG(channel_subctx_info->total_refcount > 0, + "CH %u:%u instance_ptr {0x%llx:%s} SubCTX %u in TSG %u: TSG refcount is 0\n", + user_channel->hw_runlist_id, + user_channel->hw_channel_id, + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture), + user_channel->subctx_id, + user_channel->tsg.id); + } + else { + UVM_ASSERT_MSG(channel_subctx_info->subctxs[user_channel->subctx_id].va_space == NULL, + "CH %u:%u instance_ptr {0x%llx:%s} SubCTX %u in TSG %u: expected VA space NULL but got 0x%llx instead\n", + user_channel->hw_runlist_id, + user_channel->hw_channel_id, + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture), + user_channel->subctx_id, + user_channel->tsg.id, + (NvU64)channel_subctx_info->subctxs[user_channel->subctx_id].va_space); + + channel_subctx_info->subctxs[user_channel->subctx_id].va_space = va_space; + } + + ++channel_subctx_info->total_refcount; + +exit_unlock: + uvm_spin_unlock(&gpu->parent->instance_ptr_table_lock); + + // Remove the pre-allocated per-TSG subctx information struct if there was + // some error or it was not used + if (status != NV_OK || user_channel->subctx_info != new_channel_subctx_info) { + if (new_channel_subctx_info) + uvm_kvfree(new_channel_subctx_info->subctxs); + + uvm_kvfree(new_channel_subctx_info); + } + + return status; +} + +static void gpu_remove_user_channel_subctx_info_locked(uvm_gpu_t *gpu, uvm_user_channel_t *user_channel) +{ + uvm_gpu_phys_address_t instance_ptr = user_channel->instance_ptr.addr; + uvm_va_space_t *va_space = user_channel->gpu_va_space->va_space; + + uvm_assert_spinlock_locked(&gpu->parent->instance_ptr_table_lock); + + // Channel subcontext info descriptor may not have been registered in + // tsg_table since this function is called in some teardown paths during + // channel creation + if (!user_channel->subctx_info) + return; + + UVM_ASSERT_MSG(&user_channel->subctx_info->node == + uvm_rb_tree_find(&gpu->parent->tsg_table, user_channel->subctx_info->node.key), + "CH %u:%u instance_ptr {0x%llx:%s} SubCTX %u in TSG %u: SubCTX not found in TSG table\n", + user_channel->hw_runlist_id, + user_channel->hw_channel_id, + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture), + user_channel->subctx_id, + user_channel->tsg.id); + + UVM_ASSERT_MSG(user_channel->subctx_info->subctxs[user_channel->subctx_id].refcount > 0, + "CH %u:%u instance_ptr {0x%llx:%s} SubCTX %u in TSG %u: SubCTX refcount is 0\n", + user_channel->hw_runlist_id, + user_channel->hw_channel_id, + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture), + user_channel->subctx_id, + user_channel->tsg.id); + + UVM_ASSERT_MSG(user_channel->subctx_info->subctxs[user_channel->subctx_id].va_space == va_space, + "CH %u:%u instance_ptr {0x%llx:%s} SubCTX %u in TSG %u: expected VA space 0x%llx but got 0x%llx instead\n", + user_channel->hw_runlist_id, + user_channel->hw_channel_id, + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture), + user_channel->subctx_id, + user_channel->tsg.id, + (NvU64)va_space, + (NvU64)user_channel->subctx_info->subctxs[user_channel->subctx_id].va_space); + + UVM_ASSERT_MSG(user_channel->subctx_info->total_refcount > 0, + "CH %u:%u instance_ptr {0x%llx:%s} SubCTX %u in TSG %u: TSG refcount is 0\n", + user_channel->hw_runlist_id, + user_channel->hw_channel_id, + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture), + user_channel->subctx_id, + user_channel->tsg.id); + + // Decrement VA space refcount. If it gets to zero, unregister the pointer + if (--user_channel->subctx_info->subctxs[user_channel->subctx_id].refcount == 0) + user_channel->subctx_info->subctxs[user_channel->subctx_id].va_space = NULL; + + if (--user_channel->subctx_info->total_refcount == 0) { + uvm_rb_tree_remove(&gpu->parent->tsg_table, &user_channel->subctx_info->node); + uvm_kvfree(user_channel->subctx_info->subctxs); + uvm_kvfree(user_channel->subctx_info); + } + + user_channel->subctx_info = NULL; +} + +static void gpu_remove_user_channel_subctx_info(uvm_gpu_t *gpu, uvm_user_channel_t *user_channel) +{ + uvm_spin_lock(&gpu->parent->instance_ptr_table_lock); + gpu_remove_user_channel_subctx_info_locked(gpu, user_channel); + uvm_spin_unlock(&gpu->parent->instance_ptr_table_lock); +} + +static void gpu_add_user_channel_instance_ptr(uvm_gpu_t *gpu, uvm_user_channel_t *user_channel) +{ + uvm_gpu_phys_address_t instance_ptr = user_channel->instance_ptr.addr; + NvU64 instance_ptr_key = instance_ptr_to_key(instance_ptr); + NV_STATUS status; + + uvm_spin_lock(&gpu->parent->instance_ptr_table_lock); + + // Insert the instance_ptr -> user_channel mapping + user_channel->instance_ptr.node.key = instance_ptr_key; + status = uvm_rb_tree_insert(&gpu->parent->instance_ptr_table, &user_channel->instance_ptr.node); + + uvm_spin_unlock(&gpu->parent->instance_ptr_table_lock); + + UVM_ASSERT_MSG(status == NV_OK, "CH %u:%u instance_ptr {0x%llx:%s} SubCTX %u in TSG %u: error %s\n", + user_channel->hw_runlist_id, + user_channel->hw_channel_id, + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture), + user_channel->subctx_id, + user_channel->tsg.id, + nvstatusToString(status)); +} + +static void gpu_remove_user_channel_instance_ptr_locked(uvm_gpu_t *gpu, uvm_user_channel_t *user_channel) +{ + uvm_assert_spinlock_locked(&gpu->parent->instance_ptr_table_lock); + + if (UVM_RB_TREE_EMPTY_NODE(&user_channel->instance_ptr.node)) + return; + + uvm_rb_tree_remove(&gpu->parent->instance_ptr_table, &user_channel->instance_ptr.node); +} + +NV_STATUS uvm_gpu_add_user_channel(uvm_gpu_t *gpu, uvm_user_channel_t *user_channel) +{ + uvm_va_space_t *va_space; + uvm_gpu_va_space_t *gpu_va_space = user_channel->gpu_va_space; + NV_STATUS status; + + UVM_ASSERT(user_channel->rm_retained_channel); + UVM_ASSERT(gpu_va_space); + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + va_space = gpu_va_space->va_space; + uvm_assert_rwsem_locked(&va_space->lock); + + status = gpu_add_user_channel_subctx_info(gpu, user_channel); + if (status != NV_OK) + return status; + + gpu_add_user_channel_instance_ptr(gpu, user_channel); + + return NV_OK; +} + +static uvm_user_channel_t *instance_ptr_to_user_channel(uvm_gpu_t *gpu, uvm_gpu_phys_address_t instance_ptr) +{ + NvU64 key = instance_ptr_to_key(instance_ptr); + uvm_rb_tree_node_t *instance_node; + + uvm_assert_spinlock_locked(&gpu->parent->instance_ptr_table_lock); + + instance_node = uvm_rb_tree_find(&gpu->parent->instance_ptr_table, key); + if (!instance_node) + return NULL; + + return get_user_channel(instance_node); +} + +static uvm_va_space_t *user_channel_and_subctx_to_va_space(uvm_user_channel_t *user_channel, NvU32 subctx_id) +{ + uvm_user_channel_subctx_info_t *channel_subctx_info; + + UVM_ASSERT(user_channel); + UVM_ASSERT(user_channel->in_subctx); + UVM_ASSERT(user_channel->subctx_info); + + uvm_assert_spinlock_locked(&user_channel->gpu->parent->instance_ptr_table_lock); + + channel_subctx_info = user_channel->subctx_info; + + UVM_ASSERT_MSG(subctx_id < user_channel->tsg.max_subctx_count, + "instance_ptr {0x%llx:%s} in TSG %u. Invalid SubCTX %u\n", + user_channel->instance_ptr.addr.address, + uvm_aperture_string(user_channel->instance_ptr.addr.aperture), + user_channel->tsg.id, + subctx_id); + UVM_ASSERT_MSG(channel_subctx_info->total_refcount > 0, + "instance_ptr {0x%llx:%s} in TSG %u: TSG refcount is 0\n", + user_channel->instance_ptr.addr.address, + uvm_aperture_string(user_channel->instance_ptr.addr.aperture), + user_channel->tsg.id); + + // A subcontext's refcount can be zero if that subcontext is torn down + // uncleanly and work from that subcontext continues running with work from + // other subcontexts. + if (channel_subctx_info->subctxs[subctx_id].refcount == 0) { + UVM_ASSERT(channel_subctx_info->subctxs[subctx_id].va_space == NULL); + } + else { + UVM_ASSERT_MSG(channel_subctx_info->subctxs[subctx_id].va_space, + "instance_ptr {0x%llx:%s} in TSG %u: no VA space for SubCTX %u\n", + user_channel->instance_ptr.addr.address, + uvm_aperture_string(user_channel->instance_ptr.addr.aperture), + user_channel->tsg.id, + subctx_id); + } + + return channel_subctx_info->subctxs[subctx_id].va_space; +} + +NV_STATUS uvm_gpu_fault_entry_to_va_space(uvm_gpu_t *gpu, + uvm_fault_buffer_entry_t *fault, + uvm_va_space_t **out_va_space) +{ + uvm_user_channel_t *user_channel; + NV_STATUS status = NV_OK; + + *out_va_space = NULL; + + uvm_spin_lock(&gpu->parent->instance_ptr_table_lock); + + user_channel = instance_ptr_to_user_channel(gpu, fault->instance_ptr); + if (!user_channel) { + status = NV_ERR_INVALID_CHANNEL; + goto exit_unlock; + } + + // Faults from HUB clients will always report VEID 0 even if the channel + // belongs a TSG with many subcontexts. Therefore, we cannot use the per-TSG + // subctx table and we need to directly return the channel's VA space + if (!user_channel->in_subctx || (fault->fault_source.client_type == UVM_FAULT_CLIENT_TYPE_HUB)) { + UVM_ASSERT_MSG(fault->fault_source.ve_id == 0, + "Fault packet contains SubCTX %u for channel not in subctx\n", + fault->fault_source.ve_id); + + // We can safely access user_channel->gpu_va_space under the + // instance_ptr_table_lock since gpu_va_space is set to NULL after this + // function is called in uvm_user_channel_detach + UVM_ASSERT(uvm_gpu_va_space_state(user_channel->gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + *out_va_space = user_channel->gpu_va_space->va_space; + } + else { + NvU32 ve_id = fault->fault_source.ve_id; + + // Compute the SMC engine-local VEID + UVM_ASSERT(ve_id >= user_channel->smc_engine_ve_id_offset); + + ve_id -= user_channel->smc_engine_ve_id_offset; + + *out_va_space = user_channel_and_subctx_to_va_space(user_channel, ve_id); + + // Instance pointer is valid but the fault targets a non-existent + // subcontext. + if (!*out_va_space) + status = NV_ERR_PAGE_TABLE_NOT_AVAIL; + } + +exit_unlock: + uvm_spin_unlock(&gpu->parent->instance_ptr_table_lock); + + if (status == NV_OK) + UVM_ASSERT(uvm_va_space_initialized(*out_va_space) == NV_OK); + + return status; +} + +NV_STATUS uvm_gpu_access_counter_entry_to_va_space(uvm_gpu_t *gpu, + uvm_access_counter_buffer_entry_t *entry, + uvm_va_space_t **out_va_space) +{ + uvm_user_channel_t *user_channel; + NV_STATUS status = NV_OK; + + *out_va_space = NULL; + UVM_ASSERT(entry->address.is_virtual); + + uvm_spin_lock(&gpu->parent->instance_ptr_table_lock); + + user_channel = instance_ptr_to_user_channel(gpu, entry->virtual_info.instance_ptr); + if (!user_channel) { + status = NV_ERR_INVALID_CHANNEL; + goto exit_unlock; + } + + if (!user_channel->in_subctx) { + UVM_ASSERT_MSG(entry->virtual_info.ve_id == 0, + "Access counter packet contains SubCTX %u for channel not in subctx\n", + entry->virtual_info.ve_id); + + UVM_ASSERT(uvm_gpu_va_space_state(user_channel->gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + *out_va_space = user_channel->gpu_va_space->va_space; + } + else { + *out_va_space = user_channel_and_subctx_to_va_space(user_channel, entry->virtual_info.ve_id); + if (!*out_va_space) + status = NV_ERR_PAGE_TABLE_NOT_AVAIL; + } + +exit_unlock: + uvm_spin_unlock(&gpu->parent->instance_ptr_table_lock); + + if (status == NV_OK) + UVM_ASSERT(uvm_va_space_initialized(*out_va_space) == NV_OK); + + return status; +} + +void uvm_gpu_remove_user_channel(uvm_gpu_t *gpu, uvm_user_channel_t *user_channel) +{ + uvm_va_space_t *va_space; + uvm_gpu_va_space_t *gpu_va_space = user_channel->gpu_va_space; + + UVM_ASSERT(user_channel->rm_retained_channel); + UVM_ASSERT(gpu_va_space); + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + va_space = gpu_va_space->va_space; + uvm_assert_rwsem_locked_write(&va_space->lock); + + uvm_spin_lock(&gpu->parent->instance_ptr_table_lock); + gpu_remove_user_channel_subctx_info_locked(gpu, user_channel); + gpu_remove_user_channel_instance_ptr_locked(gpu, user_channel); + uvm_spin_unlock(&gpu->parent->instance_ptr_table_lock); +} + +static NvU64 gpu_addr_to_dma_addr(uvm_parent_gpu_t *parent_gpu, NvU64 gpu_addr) +{ + NvU64 dma_addr = gpu_addr; + UVM_ASSERT(dma_addr <= dma_addr + parent_gpu->dma_addressable_start); + + if (parent_gpu->npu) + dma_addr = nv_expand_nvlink_addr(dma_addr); + + dma_addr += parent_gpu->dma_addressable_start; + + return dma_addr; +} + +// The GPU has its NV_PFB_XV_UPPER_ADDR register set by RM to +// dma_addressable_start (in bifSetupDmaWindow_IMPL()) and hence when +// referencing sysmem from the GPU, dma_addressable_start should be +// subtracted from the DMA address we get from the OS. +static NvU64 dma_addr_to_gpu_addr(uvm_parent_gpu_t *parent_gpu, NvU64 dma_addr) +{ + NvU64 gpu_addr = dma_addr - parent_gpu->dma_addressable_start; + UVM_ASSERT(dma_addr >= gpu_addr); + + // See Bug 1920398 for background and details about NVLink DMA address + // transformations being applied here. + if (parent_gpu->npu) + gpu_addr = nv_compress_nvlink_addr(gpu_addr); + + return gpu_addr; +} + +void *uvm_gpu_dma_alloc_page(uvm_parent_gpu_t *parent_gpu, gfp_t gfp_flags, NvU64 *dma_address_out) +{ + NvU64 dma_addr; + void *cpu_addr; + + cpu_addr = dma_alloc_coherent(&parent_gpu->pci_dev->dev, PAGE_SIZE, &dma_addr, gfp_flags); + + if (!cpu_addr) + return cpu_addr; + + *dma_address_out = dma_addr_to_gpu_addr(parent_gpu, dma_addr); + atomic64_add(PAGE_SIZE, &parent_gpu->mapped_cpu_pages_size); + return cpu_addr; +} + +void uvm_gpu_dma_free_page(uvm_parent_gpu_t *parent_gpu, void *va, NvU64 dma_address) +{ + dma_address = gpu_addr_to_dma_addr(parent_gpu, dma_address); + dma_free_coherent(&parent_gpu->pci_dev->dev, PAGE_SIZE, va, dma_address); + atomic64_sub(PAGE_SIZE, &parent_gpu->mapped_cpu_pages_size); +} + +NV_STATUS uvm_gpu_map_cpu_pages(uvm_gpu_t *gpu, struct page *page, size_t size, NvU64 *dma_address_out) +{ + NvU64 dma_addr; + + UVM_ASSERT(PAGE_ALIGNED(size)); + + dma_addr = dma_map_page(&gpu->parent->pci_dev->dev, page, 0, size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&gpu->parent->pci_dev->dev, dma_addr)) + return NV_ERR_OPERATING_SYSTEM; + + if (dma_addr < gpu->parent->dma_addressable_start || + dma_addr + size - 1 > gpu->parent->dma_addressable_limit) { + dma_unmap_page(&gpu->parent->pci_dev->dev, dma_addr, size, DMA_BIDIRECTIONAL); + UVM_ERR_PRINT_RL("PCI mapped range [0x%llx, 0x%llx) not in the addressable range [0x%llx, 0x%llx), GPU %s\n", + dma_addr, + dma_addr + (NvU64)size, + gpu->parent->dma_addressable_start, + gpu->parent->dma_addressable_limit + 1, + uvm_gpu_name(gpu)); + return NV_ERR_INVALID_ADDRESS; + } + + atomic64_add(size, &gpu->parent->mapped_cpu_pages_size); + *dma_address_out = dma_addr_to_gpu_addr(gpu->parent, dma_addr); + + return NV_OK; +} + +void uvm_gpu_unmap_cpu_pages(uvm_gpu_t *gpu, NvU64 dma_address, size_t size) +{ + UVM_ASSERT(PAGE_ALIGNED(size)); + + dma_address = gpu_addr_to_dma_addr(gpu->parent, dma_address); + dma_unmap_page(&gpu->parent->pci_dev->dev, dma_address, size, DMA_BIDIRECTIONAL); + atomic64_sub(size, &gpu->parent->mapped_cpu_pages_size); +} + +// This function implements the UvmRegisterGpu API call, as described in uvm.h. +// Notes: +// +// 1. The UVM VA space has a 1-to-1 relationship with an open instance of +// /dev/nvidia-uvm. That, in turn, has a 1-to-1 relationship with a process, +// because the user-level UVM code (os-user-linux.c, for example) enforces an +// "open /dev/nvidia-uvm only once per process" policy. So a UVM VA space is +// very close to a process's VA space. +// +// If that user space code fails or is not used, then the relationship is no +// longer 1-to-1. That situation requires that this code should avoid crashing, +// leaking resources, exhibiting security holes, etc, but it does not have to +// provide correct UVM API behavior. Correct UVM API behavior requires doing +// the right things in user space before calling into the kernel. +// +// 2. The uvm_api*() routines are invoked directly from the top-level ioctl +// handler. They are considered "API routing routines", because they are +// responsible for providing the behavior that is described in the UVM +// user-to-kernel API documentation, in uvm.h. +// +// 3. A GPU VA space, which you'll see in other parts of the driver, +// is something different: there may be more than one +// GPU VA space within a process, and therefore within a UVM VA space. +// +NV_STATUS uvm_api_register_gpu(UVM_REGISTER_GPU_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_rm_user_object_t user_rm_va_space = { + .rm_control_fd = params->rmCtrlFd, + .user_client = params->hClient, + .user_object = params->hSmcPartRef, + }; + + return uvm_va_space_register_gpu(va_space, + ¶ms->gpu_uuid, + &user_rm_va_space, + ¶ms->numaEnabled, + ¶ms->numaNodeId); +} + +NV_STATUS uvm_api_unregister_gpu(UVM_UNREGISTER_GPU_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + return uvm_va_space_unregister_gpu(va_space, ¶ms->gpu_uuid); +} + +NV_STATUS uvm_api_register_gpu_va_space(UVM_REGISTER_GPU_VASPACE_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_rm_user_object_t user_rm_va_space = { + .rm_control_fd = params->rmCtrlFd, + .user_client = params->hClient, + .user_object = params->hVaSpace + }; + return uvm_va_space_register_gpu_va_space(va_space, &user_rm_va_space, ¶ms->gpuUuid); +} + +NV_STATUS uvm_api_unregister_gpu_va_space(UVM_UNREGISTER_GPU_VASPACE_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + return uvm_va_space_unregister_gpu_va_space(va_space, ¶ms->gpuUuid); +} + +NV_STATUS uvm_api_pageable_mem_access_on_gpu(UVM_PAGEABLE_MEM_ACCESS_ON_GPU_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_t *gpu; + + uvm_va_space_down_read(va_space); + gpu = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + + if (!gpu) { + uvm_va_space_up_read(va_space); + return NV_ERR_INVALID_DEVICE; + } + + if (uvm_va_space_pageable_mem_access_supported(va_space) && gpu->parent->replayable_faults_supported) + params->pageableMemAccess = NV_TRUE; + else + params->pageableMemAccess = NV_FALSE; + + uvm_va_space_up_read(va_space); + return NV_OK; +} + +NV_STATUS uvm_test_set_prefetch_filtering(UVM_TEST_SET_PREFETCH_FILTERING_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_t *gpu = NULL; + NV_STATUS status = NV_OK; + + uvm_mutex_lock(&g_uvm_global.global_lock); + + uvm_va_space_down_read(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + + if (!gpu) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + if (!gpu->parent->isr.replayable_faults.handling || !gpu->parent->prefetch_fault_supported) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + switch (params->filtering_mode) { + case UVM_TEST_PREFETCH_FILTERING_MODE_FILTER_ALL: + uvm_gpu_disable_prefetch_faults(gpu->parent); + break; + case UVM_TEST_PREFETCH_FILTERING_MODE_FILTER_NONE: + uvm_gpu_enable_prefetch_faults(gpu->parent); + break; + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + } + +done: + uvm_va_space_up_read(va_space); + + uvm_mutex_unlock(&g_uvm_global.global_lock); + return status; +} + +NV_STATUS uvm_test_get_gpu_time(UVM_TEST_GET_GPU_TIME_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_t *gpu = NULL; + NV_STATUS status = NV_OK; + + uvm_va_space_down_read(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + + if (gpu) + params->timestamp_ns = gpu->parent->host_hal->get_time(gpu); + else + status = NV_ERR_INVALID_DEVICE; + + uvm_va_space_up_read(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_gpu.h b/kernel-open/nvidia-uvm/uvm_gpu.h new file mode 100644 index 000000000..0048326ad --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu.h @@ -0,0 +1,1429 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_GPU_H__ +#define __UVM_GPU_H__ + +#include "nvtypes.h" +#include "nvmisc.h" +#include "uvm_types.h" +#include "nv_uvm_types.h" +#include "uvm_linux.h" +#include "nv-kref.h" +#include "uvm_common.h" +#include "ctrl2080mc.h" +#include "uvm_forward_decl.h" +#include "uvm_processors.h" +#include "uvm_pmm_gpu.h" +#include "uvm_pmm_sysmem.h" +#include "uvm_mmu.h" +#include "uvm_gpu_replayable_faults.h" +#include "uvm_gpu_isr.h" +#include "uvm_hal_types.h" +#include "uvm_hmm.h" +#include "uvm_va_block_types.h" +#include "uvm_perf_module.h" +#include "uvm_rb_tree.h" +#include "nv-kthread-q.h" + + + + +// Buffer length to store uvm gpu id, RM device name and gpu uuid. +#define UVM_GPU_NICE_NAME_BUFFER_LENGTH (sizeof("ID 999: : ") + \ + UVM_GPU_NAME_LENGTH + UVM_GPU_UUID_TEXT_BUFFER_LENGTH) + +#define UVM_GPU_MAGIC_VALUE 0xc001d00d12341993ULL + +typedef struct +{ + // Number of faults from this uTLB that have been fetched but have not been serviced yet + NvU32 num_pending_faults; + + // Whether the uTLB contains fatal faults + bool has_fatal_faults; + + // We have issued a replay of type START_ACK_ALL while containing fatal faults. This puts + // the uTLB in lockdown mode and no new translations are accepted + bool in_lockdown; + + // We have issued a cancel on this uTLB + bool cancelled; + + uvm_fault_buffer_entry_t prev_fatal_fault; + + // Last fetched fault that was originated from this uTLB. Used for fault + // filtering. + uvm_fault_buffer_entry_t *last_fault; +} uvm_fault_utlb_info_t; + +struct uvm_service_block_context_struct +{ + // + // Fields initialized by CPU/GPU fault handling and access counter routines + // + + // Whether the information refers to replayable/non-replayable faults or + // access counters + uvm_service_operation_t operation; + + // Processors that will be the residency of pages after the operation has + // been serviced + uvm_processor_mask_t resident_processors; + + // VA block region that contains all the pages affected by the operation + uvm_va_block_region_t region; + + // Array of type uvm_fault_access_type_t that contains the type of the + // access that caused the fault/access_counter notification to be serviced + // for each page. + NvU8 access_type[PAGES_PER_UVM_VA_BLOCK]; + + // Number of times the service operation has been retried + unsigned num_retries; + + // Pages that need to be pinned due to thrashing + uvm_page_mask_t thrashing_pin_mask; + + // Number of pages that need to be pinned due to thrashing. This is the same + // value as the result of bitmap_weight(thrashing_pin_mask) + unsigned thrashing_pin_count; + + // Pages that can be read-duplicated + uvm_page_mask_t read_duplicate_mask; + + // Number of pages that can be read-duplicated. This is the same value as + // the result of bitmap_weight(read_duplicate_count_mask) + unsigned read_duplicate_count; + + // + // Fields used by the CPU fault handling routine + // + + struct + { + // Node of the list of fault service contexts used by the CPU + struct list_head service_context_list; + + // A mask of GPUs that need to be checked for ECC errors before the CPU + // fault handler returns, but after the VA space lock has been unlocked to + // avoid the RM/UVM VA space lock deadlocks. + uvm_processor_mask_t gpus_to_check_for_ecc; + + // This is set to throttle page fault thrashing. + NvU64 wakeup_time_stamp; + + // This is set if the page migrated to/from the GPU and CPU. + bool did_migrate; + } cpu_fault; + + // + // Fields managed by the common operation servicing routine + // + + uvm_prot_page_mask_array_t mappings_by_prot; + + // Mask with the pages that did not migrate to the processor (they were + // already resident) in the last call to uvm_va_block_make_resident. + // This is used to compute the pages that need to revoke mapping permissions + // from other processors. + uvm_page_mask_t did_not_migrate_mask; + + // Pages whose permissions need to be revoked from other processors + uvm_page_mask_t revocation_mask; + + struct + { + // Per-processor mask with the pages that will be resident after servicing. + // We need one mask per processor because we may coalesce faults that + // trigger migrations to different processors. + uvm_page_mask_t new_residency; + } per_processor_masks[UVM_ID_MAX_PROCESSORS]; + + // State used by the VA block routines called by the servicing routine + uvm_va_block_context_t block_context; +}; + +struct uvm_fault_service_batch_context_struct +{ + // Array of elements fetched from the GPU fault buffer. The number of + // elements in this array is exactly max_batch_size + uvm_fault_buffer_entry_t *fault_cache; + + // Array of pointers to elements in fault cache used for fault + // preprocessing. The number of elements in this array is exactly + // max_batch_size + uvm_fault_buffer_entry_t **ordered_fault_cache; + + // Per uTLB fault information. Used for replay policies and fault + // cancellation on Pascal + uvm_fault_utlb_info_t *utlbs; + + // Largest uTLB id seen in a GPU fault + NvU32 max_utlb_id; + + NvU32 num_cached_faults; + + NvU32 num_coalesced_faults; + + bool has_fatal_faults; + + bool has_throttled_faults; + + NvU32 num_invalid_prefetch_faults; + + NvU32 num_duplicate_faults; + + NvU32 num_replays; + + // Unique id (per-GPU) generated for tools events recording + NvU32 batch_id; + + uvm_tracker_t tracker; + + // Boolean used to avoid sorting the fault batch by instance_ptr if we + // determine at fetch time that all the faults in the batch report the same + // instance_ptr + bool is_single_instance_ptr; + + // Last fetched fault. Used for fault filtering. + uvm_fault_buffer_entry_t *last_fault; +}; + +struct uvm_ats_fault_invalidate_struct +{ + // Whether the TLB batch contains any information + bool write_faults_in_batch; + + // Batch of TLB entries to be invalidated + uvm_tlb_batch_t write_faults_tlb_batch; +}; + +typedef struct +{ + // Fault buffer information and structures provided by RM + UvmGpuFaultInfo rm_info; + + // Maximum number of faults to be processed in batch before fetching new + // entries from the GPU buffer + NvU32 max_batch_size; + + struct uvm_replayable_fault_buffer_info_struct + { + // Maximum number of faults entries that can be stored in the buffer + NvU32 max_faults; + + // Cached value of the GPU GET register to minimize the round-trips + // over PCIe + NvU32 cached_get; + + // Cached value of the GPU PUT register to minimize the round-trips over + // PCIe + NvU32 cached_put; + + // Policy that determines when GPU replays are issued during normal + // fault servicing + uvm_perf_fault_replay_policy_t replay_policy; + + // Tracker used to aggregate replay operations, needed for fault cancel + // and GPU removal + uvm_tracker_t replay_tracker; + + // If there is a ratio larger than replay_update_put_ratio of duplicate + // faults in a batch, PUT pointer is updated before flushing the buffer + // that comes before the replay method. + NvU32 replay_update_put_ratio; + + // Fault statistics. These fields are per-GPU and most of them are only + // updated during fault servicing, and can be safely incremented. + // Migrations may be triggered by different GPUs and need to be + // incremented using atomics + struct + { + NvU64 num_prefetch_faults; + + NvU64 num_read_faults; + + NvU64 num_write_faults; + + NvU64 num_atomic_faults; + + NvU64 num_duplicate_faults; + + atomic64_t num_pages_out; + + atomic64_t num_pages_in; + + NvU64 num_replays; + + NvU64 num_replays_ack_all; + } stats; + + // Number of uTLBs in the chip + NvU32 utlb_count; + + // Context structure used to service a GPU fault batch + uvm_fault_service_batch_context_t batch_service_context; + + // Structure used to coalesce fault servicing in a VA block + uvm_service_block_context_t block_service_context; + + // Information required to invalidate stale ATS PTEs from the GPU TLBs + uvm_ats_fault_invalidate_t ats_invalidate; + } replayable; + + struct uvm_non_replayable_fault_buffer_info_struct + { + // Maximum number of faults entries that can be stored in the buffer + NvU32 max_faults; + + // Tracker used to aggregate clear faulted operations, needed for GPU + // removal + uvm_tracker_t clear_faulted_tracker; + + // Buffer used to store elements popped out from the queue shared with + // RM for fault servicing. + void *shadow_buffer_copy; + + // Array of elements fetched from the GPU fault buffer. The number of + // elements in this array is exactly max_batch_size + uvm_fault_buffer_entry_t *fault_cache; + + // Fault statistics. See replayable fault stats for more details. + struct + { + NvU64 num_read_faults; + + NvU64 num_write_faults; + + NvU64 num_atomic_faults; + + NvU64 num_physical_faults; + + atomic64_t num_pages_out; + + atomic64_t num_pages_in; + } stats; + + // Tracker which temporarily holds the work pushed to service faults + uvm_tracker_t fault_service_tracker; + + // Structure used to coalesce fault servicing in a VA block + uvm_service_block_context_t block_service_context; + + // Unique id (per-GPU) generated for tools events recording + NvU32 batch_id; + + // Information required to invalidate stale ATS PTEs from the GPU TLBs + uvm_ats_fault_invalidate_t ats_invalidate; + } non_replayable; + + // Flag that tells if prefetch faults are enabled in HW + bool prefetch_faults_enabled; + + // Timestamp when prefetch faults where disabled last time + NvU64 disable_prefetch_faults_timestamp; +} uvm_fault_buffer_info_t; + +typedef struct +{ + // True if the platform supports HW coherence (P9) and RM has exposed the + // GPU's memory as a NUMA node to the kernel. + bool enabled; + + // Range in the system physical address space where the memory of this GPU + // is mapped + NvU64 system_memory_window_start; + NvU64 system_memory_window_end; + + NvU64 memblock_size; + + unsigned node_id; +} uvm_numa_info_t; + +struct uvm_access_counter_service_batch_context_struct +{ + uvm_access_counter_buffer_entry_t *notification_cache; + + NvU32 num_cached_notifications; + + struct + { + uvm_access_counter_buffer_entry_t **notifications; + + NvU32 num_notifications; + + // Boolean used to avoid sorting the fault batch by instance_ptr if we + // determine at fetch time that all the access counter notifications in the + // batch report the same instance_ptr + bool is_single_instance_ptr; + } virt; + + struct + { + uvm_access_counter_buffer_entry_t **notifications; + uvm_reverse_map_t *translations; + + NvU32 num_notifications; + + // Boolean used to avoid sorting the fault batch by aperture if we + // determine at fetch time that all the access counter notifications in the + // batch report the same aperture + bool is_single_aperture; + } phys; + + // Helper page mask to compute the accessed pages within a VA block + uvm_page_mask_t accessed_pages; + + // Structure used to coalesce access counter servicing in a VA block + uvm_service_block_context_t block_service_context; + + // Unique id (per-GPU) generated for tools events recording + NvU32 batch_id; +}; + +typedef struct +{ + // Values used to configure access counters in RM + struct + { + UVM_ACCESS_COUNTER_GRANULARITY granularity; + UVM_ACCESS_COUNTER_USE_LIMIT use_limit; + } rm; + + // The following values are precomputed by the access counter notification + // handling code. See comments for UVM_MAX_TRANSLATION_SIZE in + // uvm_gpu_access_counters.c for more details. + NvU64 translation_size; + + NvU64 translations_per_counter; + + NvU64 sub_granularity_region_size; + + NvU64 sub_granularity_regions_per_translation; +} uvm_gpu_access_counter_type_config_t; + +typedef struct +{ + UvmGpuAccessCntrInfo rm_info; + + NvU32 max_notifications; + + NvU32 max_batch_size; + + // Cached value of the GPU GET register to minimize the round-trips + // over PCIe + NvU32 cached_get; + + // Cached value of the GPU PUT register to minimize the round-trips over + // PCIe + NvU32 cached_put; + + // Tracker used to aggregate access counters clear operations, needed for + // GPU removal + uvm_tracker_t clear_tracker; + + // Current access counter configuration. During normal operation this + // information is computed once during GPU initialization. However, tests + // may override it to try different configuration values. + struct + { + uvm_gpu_access_counter_type_config_t mimc; + uvm_gpu_access_counter_type_config_t momc; + + NvU32 threshold; + } current_config; + + // Access counter statistics + struct + { + atomic64_t num_pages_out; + + atomic64_t num_pages_in; + } stats; + + // Ignoring access counters means that notifications are left in the HW + // buffer without being serviced. Requests to ignore access counters + // are counted since the suspend path inhibits access counter interrupts, + // and the resume path needs to know whether to reenable them. + NvU32 notifications_ignored_count; + + // Context structure used to service a GPU access counter batch + uvm_access_counter_service_batch_context_t batch_service_context; + + // VA space that reconfigured the access counters configuration, if any. + // Used in builtin tests only, to avoid reconfigurations from different + // processes + // + // Locking: both readers and writers must hold the access counters ISR lock + uvm_va_space_t *reconfiguration_owner; +} uvm_access_counter_buffer_info_t; + +typedef struct +{ + // VA where the identity mapping should be mapped in the internal VA + // space managed by uvm_gpu_t.address_space_tree (see below). + NvU64 base; + + // Page tables with the mapping. + uvm_page_table_range_vec_t *range_vec; +} uvm_gpu_identity_mapping_t; + +// Root chunk mapping +typedef struct +{ + // Page table range representation of the mapping. Because a root chunk + // fits into a single 2MB page, in practice the range consists of a single + // 2MB PTE. + uvm_page_table_range_t *range; + + // Number of mapped pages of size PAGE_SIZE. + NvU32 num_mapped_pages; +} uvm_gpu_root_chunk_mapping_t; + +typedef enum +{ + UVM_GPU_LINK_INVALID = 0, + UVM_GPU_LINK_PCIE, + UVM_GPU_LINK_NVLINK_1, + UVM_GPU_LINK_NVLINK_2, + UVM_GPU_LINK_NVLINK_3, + + + + + UVM_GPU_LINK_MAX +} uvm_gpu_link_type_t; + +// UVM does not support P2P copies on pre-Pascal GPUs. Pascal+ GPUs only +// support virtual addresses in P2P copies. Therefore, a peer identity mapping +// needs to be created. +// Ampere+ GPUs support physical peer copies, too, so identity mappings are not +// needed +typedef enum +{ + UVM_GPU_PEER_COPY_MODE_UNSUPPORTED, + UVM_GPU_PEER_COPY_MODE_VIRTUAL, + UVM_GPU_PEER_COPY_MODE_PHYSICAL, + UVM_GPU_PEER_COPY_MODE_COUNT +} uvm_gpu_peer_copy_mode_t; + +struct uvm_gpu_struct +{ + uvm_parent_gpu_t *parent; + + // Refcount of the gpu, i.e. how many times it has been retained. This is + // roughly a count of how many times it has been registered with a VA space, + // except that some paths retain the GPU temporarily without a VA space. + // + // While this is >0, the GPU can't be removed. This differs from gpu_kref, + // which merely prevents the uvm_gpu_t object from being freed. + // + // In most cases this count is protected by the global lock: retaining a GPU + // from a UUID and any release require the global lock to be taken. But it's + // also useful for a caller to retain a GPU they've already retained, in + // which case there's no need to take the global lock. This can happen when + // an operation needs to drop the VA space lock but continue operating on a + // GPU. This is an atomic variable to handle those cases. + // + // Security note: keep it as a 64-bit counter to prevent overflow cases (a + // user can create a lot of va spaces and register the gpu with them). + atomic64_t retained_count; + + // A unique uvm gpu id in range [1, UVM_ID_MAX_PROCESSORS); this is a copy + // of the parent's id. + uvm_gpu_id_t id; + + // A unique uvm global_gpu id in range [1, UVM_GLOBAL_ID_MAX_PROCESSORS) + uvm_global_gpu_id_t global_id; + + // Should be UVM_GPU_MAGIC_VALUE. Used for memory checking. + NvU64 magic; + + struct + { + // The amount of memory the GPU has in total, in bytes. If the GPU is in + // ZeroFB testing mode, this will be 0. + NvU64 size; + + // Max (inclusive) physical address of this GPU's memory that the driver + // can allocate through PMM (PMA). + NvU64 max_allocatable_address; + } mem_info; + + struct + { + // Big page size used by the internal UVM VA space + // Notably it may be different than the big page size used by a user's VA + // space in general. + NvU32 internal_size; + } big_page; + + // Mapped registers needed to obtain the current GPU timestamp + struct + { + volatile NvU32 *time0_register; + volatile NvU32 *time1_register; + } time; + + // Identity peer mappings are only defined when + // peer_copy_mode == UVM_GPU_PEER_COPY_MODE_VIRTUAL + uvm_gpu_identity_mapping_t peer_mappings[UVM_ID_MAX_GPUS]; + + struct + { + // Mask of peer_gpus set + // + // We can use a regular processor id because P2P is not allowed between + // partitioned GPUs when SMC is enabled + uvm_processor_mask_t peer_gpu_mask; + + // lazily-populated array of peer GPUs, indexed by the peer's GPU index + uvm_gpu_t *peer_gpus[UVM_ID_MAX_GPUS]; + + // Leaf spinlock used to synchronize access to the peer_gpus table so that + // it can be safely accessed from the access counters bottom half + uvm_spinlock_t peer_gpus_lock; + } peer_info; + + // Maximum number of subcontexts supported + NvU32 max_subcontexts; + + // RM address space handle used in many of the UVM/RM APIs + // Represents a GPU VA space within rm_device. + // + // In SR-IOV heavy, proxy channels are not associated with this address + // space. + uvmGpuAddressSpaceHandle rm_address_space; + + // Page tree used for the internal UVM VA space shared with RM + uvm_page_tree_t address_space_tree; + + // Set to true during add_gpu() as soon as the RM's address space is moved + // to the address_space_tree. + bool rm_address_space_moved_to_page_tree; + + uvm_gpu_semaphore_pool_t *semaphore_pool; + + uvm_channel_manager_t *channel_manager; + + uvm_pmm_gpu_t pmm; + + // Flat linear mapping covering vidmem. This is a kernel mapping that is + // only created in certain configurations. + // + // There are two mutually exclusive versions of the mapping. The simplest + // version covers the entire GPU memory, and it is created during GPU + // initialization. The dynamic version is a partial vidmem mapping that + // creates and destroys mappings to GPU root chunks on demand. + union + { + // Static mapping covering the whole GPU memory. + uvm_gpu_identity_mapping_t static_flat_mapping; + + // Dynamic mapping of GPU memory. + struct + { + // Array of root chunk mappings. + uvm_gpu_root_chunk_mapping_t *array; + + // Number of elements in the array. + size_t count; + + // Each bit in the bitlock protects a single root chunk mapping. + uvm_bit_locks_t bitlocks; + + } root_chunk_mappings; + }; + + // Linear sysmem mappings. Mappings are added on demand, and removed upon + // GPU deinitialization. The mappings are added to UVM's internal address + // space i.e. they are kernel mappings. + // + // Only used in SR-IOV heavy. + struct + { + // Size of each mapping, in bytes. + NvU64 mapping_size; + + // Array of sysmem mappings. + uvm_gpu_identity_mapping_t *array; + + // Number of elements in the array. + size_t count; + + // Each bit in the bitlock protects a sysmem mapping. + uvm_bit_locks_t bitlocks; + } sysmem_mappings; + + // Reverse lookup table used to query the user mapping associated with a + // sysmem (DMA) physical address. + // + // The system memory mapping information referred to by this field is + // different from that of sysmem_mappings, because it relates to user + // mappings (instead of kernel), and it is used in most configurations. + uvm_pmm_sysmem_mappings_t pmm_reverse_sysmem_mappings; + + + + + + // ECC handling + // In order to trap ECC errors as soon as possible the driver has the hw + // interrupt register mapped directly. If an ECC interrupt is ever noticed + // to be pending, then the UVM driver needs to: + // + // 1) ask RM to service interrupts, and then + // 2) inspect the ECC error notifier state. + // + // Notably, checking for channel errors is not enough, because ECC errors + // can be pending, even after a channel has become idle. + // + // See more details in uvm_gpu_check_ecc_error(). + struct + { + // Does the GPU have ECC enabled? + bool enabled; + + // Direct mapping of the 32-bit part of the hw interrupt tree that has + // the ECC bits. + volatile NvU32 *hw_interrupt_tree_location; + + // Mask to get the ECC interrupt bits from the 32-bits above. + NvU32 mask; + + // Set to true by RM when a fatal ECC error is encountered (requires + // asking RM to service pending interrupts to be current). + NvBool *error_notifier; + } ecc; + + struct + { + NvU32 swizz_id; + + uvmGpuSessionHandle rm_session_handle; + + // RM device handle used in many of the UVM/RM APIs. + // + // Do not read this field directly, use uvm_gpu_device_handle instead. + uvmGpuDeviceHandle rm_device; + } smc; + + struct + { + struct proc_dir_entry *dir; + + struct proc_dir_entry *dir_symlink; + + struct proc_dir_entry *info_file; + + struct proc_dir_entry *dir_peers; + } procfs; + + // Placeholder for per-GPU performance heuristics information + uvm_perf_module_data_desc_t perf_modules_data[UVM_PERF_MODULE_TYPE_COUNT]; +}; + +struct uvm_parent_gpu_struct +{ + // Reference count for how many places are holding on to a parent GPU + // (internal to the UVM driver). This includes any GPUs we know about, not + // just GPUs that are registered with a VA space. Most GPUs end up being + // registered, but there are brief periods when they are not registered, + // such as during interrupt handling, and in add_gpu() or remove_gpu(). + nv_kref_t gpu_kref; + + // The number of uvm_gpu_ts referencing this uvm_parent_gpu_t. + NvU32 num_retained_gpus; + + uvm_gpu_t *gpus[UVM_ID_MAX_SUB_PROCESSORS]; + + // Bitmap of valid child entries in the gpus[] table. Used to retrieve a + // usable child GPU in bottom-halves. + DECLARE_BITMAP(valid_gpus, UVM_ID_MAX_SUB_PROCESSORS); + + // The gpu's uuid + NvProcessorUuid uuid; + + // Nice printable name including the uvm gpu id, ascii name from RM and uuid + char name[UVM_GPU_NICE_NAME_BUFFER_LENGTH]; + + // GPU information and provided by RM (architecture, implementation, + // hardware classes, etc.). + UvmGpuInfo rm_info; + + // A unique uvm gpu id in range [1, UVM_ID_MAX_PROCESSORS) + uvm_gpu_id_t id; + + // Reference to the Linux PCI device + // + // The reference to the PCI device remains valid as long as the GPU is + // registered with RM's Linux layer (between nvUvmInterfaceRegisterGpu() and + // nvUvmInterfaceUnregisterGpu()). + struct pci_dev *pci_dev; + + // NVLINK Processing Unit (NPU) on PowerPC platforms. The NPU is a + // collection of CPU-side PCI devices which bridge GPU NVLINKs and the CPU + // memory bus. + // + // There is one PCI device per NVLINK. A set of NVLINKs connects to a single + // GPU, and all NVLINKs for a given socket are collected logically under + // this UVM NPU because some resources (such as register mappings) are + // shared by all those NVLINKs. This means multiple GPUs may connect to the + // same UVM NPU. + uvm_ibm_npu_t *npu; + + // On kernels with NUMA support, this entry contains the closest CPU NUMA + // node to this GPU. Otherwise, the value will be -1. + int closest_cpu_numa_node; + + // RM device handle used in many of the UVM/RM APIs. + // + // Do not read this field directly, use uvm_gpu_device_handle instead. + uvmGpuDeviceHandle rm_device; + + // The physical address range addressable by the GPU + // + // The GPU has its NV_PFB_XV_UPPER_ADDR register set by RM to + // dma_addressable_start (in bifSetupDmaWindow_IMPL()) and hence when + // referencing sysmem from the GPU, dma_addressable_start should be + // subtracted from the physical address. The DMA mapping helpers like + // uvm_gpu_map_cpu_pages() and uvm_gpu_dma_alloc_page() take care of that. + NvU64 dma_addressable_start; + NvU64 dma_addressable_limit; + + // Total size (in bytes) of physically mapped (with uvm_gpu_map_cpu_pages) + // sysmem pages, used for leak detection. + atomic64_t mapped_cpu_pages_size; + + // Hardware Abstraction Layer + uvm_host_hal_t *host_hal; + uvm_ce_hal_t *ce_hal; + uvm_arch_hal_t *arch_hal; + uvm_fault_buffer_hal_t *fault_buffer_hal; + uvm_access_counter_buffer_hal_t *access_counter_buffer_hal; + + + + + uvm_gpu_peer_copy_mode_t peer_copy_mode; + + // Virtualization mode of the GPU. + UVM_VIRT_MODE virt_mode; + + // Whether the GPU can trigger faults on prefetch instructions + bool prefetch_fault_supported; + + // Number of membars required to flush out HSHUB following a TLB invalidate + NvU32 num_hshub_tlb_invalidate_membars; + + // Whether the channels can configure GPFIFO in vidmem + bool gpfifo_in_vidmem_supported; + + bool replayable_faults_supported; + + bool non_replayable_faults_supported; + + bool access_counters_supported; + + bool fault_cancel_va_supported; + + // True if the GPU has hardware support for scoped atomics + bool scoped_atomics_supported; + + // If true, a HW method can be used to clear a faulted channel. + // If false, then the GPU supports clearing faulted channels using registers + // instead of a HW method. + // This value is only defined for GPUs that support non-replayable faults. + bool has_clear_faulted_channel_method; + + // If true, a SW method can be used to clear a faulted channel. + // If false, the HW method or the registers (whichever is available + // according to has_clear_faulted_channel_method) needs to be used. + // + // This value is only defined for GPUs that support non-replayable faults. + bool has_clear_faulted_channel_sw_method; + + bool sparse_mappings_supported; + + // Ampere(GA100) requires map->invalidate->remap->invalidate for page size + // promotion + bool map_remap_larger_page_promotion; + + bool plc_supported; + + // Parameters used by the TLB batching API + struct + { + // Is the targeted (single page) VA invalidate supported at all? + NvBool va_invalidate_supported; + + // Is the VA range invalidate supported? + NvBool va_range_invalidate_supported; + + union + { + // Maximum (inclusive) number of single page invalidations before + // falling back to invalidate all + NvU32 max_pages; + + // Maximum (inclusive) number of range invalidations before falling + // back to invalidate all + NvU32 max_ranges; + }; + } tlb_batch; + + // Largest VA (exclusive) which can be used for channel buffer mappings + NvU64 max_channel_va; + + // Largest VA (exclusive) which Host can operate. + NvU64 max_host_va; + + // Indicates whether the GPU can map sysmem with pages larger than 4k + bool can_map_sysmem_with_large_pages; + + // VA base and size of the RM managed part of the internal UVM VA space. + // + // The internal UVM VA is shared with RM by RM controlling some of the top + // level PDEs and leaving the rest for UVM to control. + // On Pascal a single top level PDE covers 128 TB of VA and given that + // semaphores and other allocations limited to 40bit are currently allocated + // through RM, RM needs to control the [0, 128TB) VA range at least for now. + // On Maxwell, limit RMs VA to [0, 128GB) that should easily fit + // all RM allocations and leave enough space for UVM. + NvU64 rm_va_base; + NvU64 rm_va_size; + + // Base and size of the GPU VA used for uvm_mem_t allocations mapped in the + // internal address_space_tree. + NvU64 uvm_mem_va_base; + NvU64 uvm_mem_va_size; + + // Base of the GPU VAs used for the vidmem and sysmem flat mappings. + NvU64 flat_vidmem_va_base; + NvU64 flat_sysmem_va_base; + + // Bitmap of allocation sizes for user memory supported by a GPU. PAGE_SIZE + // is guaranteed to be both present and the smallest size. + uvm_chunk_sizes_mask_t mmu_user_chunk_sizes; + + // Bitmap of allocation sizes that could be requested by the page tree for + // a GPU + uvm_chunk_sizes_mask_t mmu_kernel_chunk_sizes; + + struct + { + struct proc_dir_entry *dir; + + struct proc_dir_entry *fault_stats_file; + + struct proc_dir_entry *access_counters_file; + } procfs; + + // Interrupt handling state and locks + uvm_isr_info_t isr; + + // Fault buffer info. This is only valid if supports_replayable_faults is set to true + uvm_fault_buffer_info_t fault_buffer_info; + + // NUMA info, mainly for ATS + uvm_numa_info_t numa_info; + + // Access counter buffer info. This is only valid if supports_access_counters is set to true + uvm_access_counter_buffer_info_t access_counter_buffer_info; + + // Number of uTLBs per GPC. This information is only valid on Pascal+ GPUs. + NvU32 utlb_per_gpc_count; + + // In order to service GPU faults, UVM must be able to obtain the VA + // space for each reported fault. The fault packet contains the + // instance_ptr of the channel that was bound when the SMs triggered + // the fault. On fault any instance pointer in the TSG may be + // reported. This is a problem on Volta, which allow different channels + // in the TSG to be bound to different VA spaces in order to support + // subcontexts. In order to be able to obtain the correct VA space, HW + // provides the subcontext id (or VEID) in addition to the instance_ptr. + // + // Summary: + // + // 1) Channels in a TSG may be in different VA spaces, identified by their + // subcontext ID. + // 2) Different subcontext IDs may map to the same or different VA spaces. + // 3) On fault, any instance pointer in the TSG may be reported. The + // reported subcontext ID identifies which VA space within the TSG actually + // encountered the fault. + // + // Thus, UVM needs to keep track of all the instance pointers that belong + // to the same TSG. We use two tables: + // + // - instance_ptr_table (instance_ptr -> subctx_info) this table maps + // instance pointers to the subcontext info descriptor for the channel. If + // the channel belongs to a subcontext, this descriptor will contain all + // the VA spaces for the subcontexts in the same TSG. If the channel does + // not belong to a subcontext, it will only contain a pointer to its VA + // space. + // - tsg_table (tsg_id -> subctx_info): this table also stores the + // subctx information, but in this case it is indexed by TSG ID. Thus, + // when a new channel bound to a subcontext is registered, it will check + // first in this table if the subcontext information descriptor for its TSG + // already exists, otherwise it will create it. Channels not bound to + // subcontexts will not use this table. + // + // The bottom half reads the tables under + // isr.replayable_faults_handler.lock, but a separate lock is necessary + // because entries are added and removed from the table under the va_space + // lock, and we can't take isr.replayable_faults_handler.lock while holding + // the va_space lock. + uvm_rb_tree_t tsg_table; + + uvm_rb_tree_t instance_ptr_table; + uvm_spinlock_t instance_ptr_table_lock; + + // This is set to true if the GPU belongs to an SLI group. Else, set to false. + bool sli_enabled; + + struct + { + bool supported; + + bool enabled; + } smc; + + // Global statistics. These fields are per-GPU and most of them are only + // updated during fault servicing, and can be safely incremented. + struct + { + NvU64 num_replayable_faults; + + NvU64 num_non_replayable_faults; + + atomic64_t num_pages_out; + + atomic64_t num_pages_in; + } stats; + + // Structure to hold nvswitch specific information. In an nvswitch + // environment, rather than using the peer-id field of the PTE (which can + // only address 8 gpus), all gpus are assigned a 47-bit physical address + // space by the fabric manager. Any physical address access to these + // physical address spaces are routed through the switch to the corresponding + // peer. + struct + { + bool is_nvswitch_connected; + + // 47-bit fabric memory physical offset that peer gpus need to access + // to read a peer's memory + NvU64 fabric_memory_window_start; + } nvswitch_info; + + uvm_gpu_link_type_t sysmem_link; + NvU32 sysmem_link_rate_mbyte_per_s; +}; + +static const char *uvm_gpu_name(uvm_gpu_t *gpu) +{ + return gpu->parent->name; +} + +static const NvProcessorUuid *uvm_gpu_uuid(uvm_gpu_t *gpu) +{ + return &gpu->parent->uuid; +} + +static uvmGpuDeviceHandle uvm_gpu_device_handle(uvm_gpu_t *gpu) +{ + if (gpu->parent->smc.enabled) + return gpu->smc.rm_device; + return gpu->parent->rm_device; +} + +struct uvm_gpu_peer_struct +{ + // The fields in this global structure can only be inspected under one of + // the following conditions: + // + // - The VA space lock is held for either read or write, both GPUs are + // registered in the VA space, and the corresponding bit in the + // va_space.enabled_peers bitmap is set. + // + // - The global lock is held. + // + // - While the global lock was held in the past, the two GPUs were detected + // to be NVLINK peers and were both retained. + // + // - While the global lock was held in the past, the two GPUs were detected + // to be PCIe peers and uvm_gpu_retain_pcie_peer_access() was called. + // + // - The peer_gpus_lock is held on one of the GPUs. In this case, the other + // GPU must be read from the original GPU's peer_gpus table. The fields + // will not change while the lock is held, but they may no longer be valid + // because the other GPU might be in teardown. + + // Peer Id associated with this device w.r.t. to a peer GPU. + // Note: peerId (A -> B) != peerId (B -> A) + // peer_id[0] from min(gpu_id_1, gpu_id_2) -> max(gpu_id_1, gpu_id_2) + // peer_id[1] from max(gpu_id_1, gpu_id_2) -> min(gpu_id_1, gpu_id_2) + NvU8 peer_ids[2]; + + // Indirect peers are GPUs which can coherently access each others' memory + // over NVLINK, but are routed through the CPU using the SYS aperture rather + // than a PEER aperture + NvU8 is_indirect_peer : 1; + + // The link type between the peer GPUs, currently either PCIe or NVLINK. + // This field is used to determine the when this peer struct has been + // initialized (link_type != UVM_GPU_LINK_INVALID). NVLink peers are + // initialized at GPU registration time. PCIe peers are initialized when + // the refcount below goes from 0 to 1. + uvm_gpu_link_type_t link_type; + + // Maximum unidirectional bandwidth between the peers in megabytes per + // second, not taking into account the protocols' overhead. The reported + // bandwidth for indirect peers is zero. See UvmGpuP2PCapsParams. + NvU32 total_link_line_rate_mbyte_per_s; + + // For PCIe, the number of times that this has been retained by a VA space. + // For NVLINK this will always be 1. + NvU64 ref_count; + + // This handle gets populated when enable_peer_access successfully creates + // an NV50_P2P object. disable_peer_access resets the same on the object + // deletion. + NvHandle p2p_handle; + + struct { + struct proc_dir_entry *peer_file[2]; + struct proc_dir_entry *peer_symlink_file[2]; + + // GPU-A <-> GPU-B link is bidirectional, pairs[x][0] is always the + // local GPU, while pairs[x][1] is the remote GPU. The table shall be + // filled like so: [[GPU-A, GPU-B], [GPU-B, GPU-A]]. + uvm_gpu_t *pairs[2][2]; + } procfs; +}; + +// Initialize global gpu state +NV_STATUS uvm_gpu_init(void); + +// Deinitialize global state (called from module exit) +void uvm_gpu_exit(void); + +NV_STATUS uvm_gpu_init_va_space(uvm_va_space_t *va_space); + +void uvm_gpu_exit_va_space(uvm_va_space_t *va_space); + +static uvm_numa_info_t *uvm_gpu_numa_info(uvm_gpu_t *gpu) +{ + UVM_ASSERT(gpu->parent->numa_info.enabled); + + return &gpu->parent->numa_info; +} + +static uvm_gpu_phys_address_t uvm_gpu_page_to_phys_address(uvm_gpu_t *gpu, struct page *page) +{ + uvm_numa_info_t *numa_info = uvm_gpu_numa_info(gpu); + + unsigned long sys_addr = page_to_pfn(page) << PAGE_SHIFT; + unsigned long gpu_offset = sys_addr - numa_info->system_memory_window_start; + + UVM_ASSERT(page_to_nid(page) == numa_info->node_id); + UVM_ASSERT(sys_addr >= numa_info->system_memory_window_start); + UVM_ASSERT(sys_addr + PAGE_SIZE - 1 <= numa_info->system_memory_window_end); + + return uvm_gpu_phys_address(UVM_APERTURE_VID, gpu_offset); +} + +// Note that there is a uvm_gpu_get() function defined in uvm_global.h to break +// a circular dep between global and gpu modules. + +// Get a uvm_gpu_t by UUID. This returns NULL if the GPU is not present. This +// is the general purpose call that should be used normally. +// That is, unless a uvm_gpu_t for a specific SMC partition needs to be +// retrieved, in which case uvm_gpu_get_by_parent_and_swizz_id() must be used +// instead. +// +// LOCKING: requires the global lock to be held +uvm_gpu_t *uvm_gpu_get_by_uuid(const NvProcessorUuid *gpu_uuid); + +// Get a uvm_parent_gpu_t by UUID. Like uvm_gpu_get_by_uuid(), this function +// returns NULL if the GPU has not been registered. +// +// LOCKING: requires the global lock to be held +uvm_parent_gpu_t *uvm_parent_gpu_get_by_uuid(const NvProcessorUuid *gpu_uuid); + +// Like uvm_parent_gpu_get_by_uuid(), but this variant does not assertion-check +// that the caller is holding the global_lock. This is a narrower-purpose +// function, and is only intended for use by the top-half ISR, or other very +// limited cases. +uvm_parent_gpu_t *uvm_parent_gpu_get_by_uuid_locked(const NvProcessorUuid *gpu_uuid); + +// Get the uvm_gpu_t for a partition by parent and swizzId. This returns NULL if +// the partition hasn't been registered. This call needs to be used instead of +// uvm_gpu_get_by_uuid() when a specific partition is targeted. +// +// LOCKING: requires the global lock to be held +uvm_gpu_t *uvm_gpu_get_by_parent_and_swizz_id(uvm_parent_gpu_t *parent_gpu, NvU32 swizz_id); + +// Retain a gpu by uuid +// Returns the retained uvm_gpu_t in gpu_out on success +// +// LOCKING: Takes and releases the global lock for the caller. +NV_STATUS uvm_gpu_retain_by_uuid(const NvProcessorUuid *gpu_uuid, + const uvm_rm_user_object_t *user_rm_device, + uvm_gpu_t **gpu_out); + +// Retain a gpu which is known to already be retained. Does NOT require the +// global lock to be held. +void uvm_gpu_retain(uvm_gpu_t *gpu); + +// Release a gpu +// LOCKING: requires the global lock to be held +void uvm_gpu_release_locked(uvm_gpu_t *gpu); + +// Like uvm_gpu_release_locked, but takes and releases the global lock for the +// caller. +void uvm_gpu_release(uvm_gpu_t *gpu); + +static NvU64 uvm_gpu_retained_count(uvm_gpu_t *gpu) +{ + return atomic64_read(&gpu->retained_count); +} + +// Decrease the refcount on the parent GPU object, and actually delete the object +// if the refcount hits zero. +void uvm_parent_gpu_kref_put(uvm_parent_gpu_t *gpu); + +// Calculates peer table index using GPU ids. +NvU32 uvm_gpu_peer_table_index(uvm_gpu_id_t gpu_id1, uvm_gpu_id_t gpu_id2); + +// Either retains an existing PCIe peer entry or creates a new one. In both +// cases the two GPUs are also each retained. +// LOCKING: requires the global lock to be held +NV_STATUS uvm_gpu_retain_pcie_peer_access(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1); + +// Releases a PCIe peer entry and the two GPUs. +// LOCKING: requires the global lock to be held +void uvm_gpu_release_pcie_peer_access(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1); + +// Get the aperture for local_gpu to use to map memory resident on remote_gpu. +// They must not be the same gpu. +uvm_aperture_t uvm_gpu_peer_aperture(uvm_gpu_t *local_gpu, uvm_gpu_t *remote_gpu); + +// Get the processor id accessible by the given GPU for the given physical address +uvm_processor_id_t uvm_gpu_get_processor_id_by_address(uvm_gpu_t *gpu, uvm_gpu_phys_address_t addr); + +// Get the P2P capabilities between the gpus with the given indexes +uvm_gpu_peer_t *uvm_gpu_index_peer_caps(uvm_gpu_id_t gpu_id1, uvm_gpu_id_t gpu_id2); + +// Get the P2P capabilities between the given gpus +static uvm_gpu_peer_t *uvm_gpu_peer_caps(const uvm_gpu_t *gpu0, const uvm_gpu_t *gpu1) +{ + return uvm_gpu_index_peer_caps(gpu0->id, gpu1->id); +} + +static bool uvm_gpus_are_nvswitch_connected(uvm_gpu_t *gpu1, uvm_gpu_t *gpu2) +{ + if (gpu1->parent->nvswitch_info.is_nvswitch_connected && gpu2->parent->nvswitch_info.is_nvswitch_connected) { + UVM_ASSERT(uvm_gpu_peer_caps(gpu1, gpu2)->link_type >= UVM_GPU_LINK_NVLINK_2); + return true; + } + + return false; +} + +static bool uvm_gpus_are_indirect_peers(uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + uvm_gpu_peer_t *peer_caps = uvm_gpu_peer_caps(gpu0, gpu1); + + if (peer_caps->link_type != UVM_GPU_LINK_INVALID && peer_caps->is_indirect_peer) { + UVM_ASSERT(gpu0->parent->numa_info.enabled); + UVM_ASSERT(gpu1->parent->numa_info.enabled); + UVM_ASSERT(peer_caps->link_type != UVM_GPU_LINK_PCIE); + UVM_ASSERT(!uvm_gpus_are_nvswitch_connected(gpu0, gpu1)); + return true; + } + + return false; +} + +// Retrieve the virtual address corresponding to the given vidmem physical +// address, according to the linear vidmem mapping in the GPU kernel address +// space. +// +// The actual GPU mapping only exists if a full flat mapping, or a partial flat +// mapping covering the passed address, has been previously created. +static uvm_gpu_address_t uvm_gpu_address_virtual_from_vidmem_phys(uvm_gpu_t *gpu, NvU64 pa) +{ + UVM_ASSERT(uvm_mmu_gpu_needs_static_vidmem_mapping(gpu) || uvm_mmu_gpu_needs_dynamic_vidmem_mapping(gpu)); + UVM_ASSERT(pa <= gpu->mem_info.max_allocatable_address); + + return uvm_gpu_address_virtual(gpu->parent->flat_vidmem_va_base + pa); +} + +// Retrieve the virtual address corresponding to the given sysmem physical +// address, according to the linear sysmem mapping in the GPU kernel address +// space. +// +// The actual GPU mapping only exists if a linear mapping covering the passed +// address has been previously created. +static uvm_gpu_address_t uvm_gpu_address_virtual_from_sysmem_phys(uvm_gpu_t *gpu, NvU64 pa) +{ + UVM_ASSERT(uvm_mmu_gpu_needs_dynamic_sysmem_mapping(gpu)); + UVM_ASSERT(pa <= (gpu->parent->dma_addressable_limit - gpu->parent->dma_addressable_start)); + + return uvm_gpu_address_virtual(gpu->parent->flat_sysmem_va_base + pa); +} + +static uvm_gpu_identity_mapping_t *uvm_gpu_get_peer_mapping(uvm_gpu_t *gpu, uvm_gpu_id_t peer_id) +{ + return &gpu->peer_mappings[uvm_id_gpu_index(peer_id)]; +} + +// Check for ECC errors +// +// Notably this check cannot be performed where it's not safe to call into RM. +NV_STATUS uvm_gpu_check_ecc_error(uvm_gpu_t *gpu); + +// Check for ECC errors without calling into RM +// +// Calling into RM is problematic in many places, this check is always safe to do. +// Returns NV_WARN_MORE_PROCESSING_REQUIRED if there might be an ECC error and +// it's required to call uvm_gpu_check_ecc_error() to be sure. +NV_STATUS uvm_gpu_check_ecc_error_no_rm(uvm_gpu_t *gpu); + +// Map size bytes of contiguous sysmem on the GPU for physical access +// +// size has to be aligned to PAGE_SIZE. +// +// Returns the physical address of the pages that can be used to access them on +// the GPU. +NV_STATUS uvm_gpu_map_cpu_pages(uvm_gpu_t *gpu, struct page *page, size_t size, NvU64 *dma_address_out); + +// Unmap num_pages pages previously mapped with uvm_gpu_map_cpu_pages(). +void uvm_gpu_unmap_cpu_pages(uvm_gpu_t *gpu, NvU64 dma_address, size_t size); + +static NV_STATUS uvm_gpu_map_cpu_page(uvm_gpu_t *gpu, struct page *page, NvU64 *dma_address_out) +{ + return uvm_gpu_map_cpu_pages(gpu, page, PAGE_SIZE, dma_address_out); +} + +static void uvm_gpu_unmap_cpu_page(uvm_gpu_t *gpu, NvU64 dma_address) +{ + uvm_gpu_unmap_cpu_pages(gpu, dma_address, PAGE_SIZE); +} + +// Allocate and map a page of system DMA memory on the GPU for physical access +// +// Returns +// - the address of the page that can be used to access them on +// the GPU in the dma_address_out parameter. +// - the address of allocated memory in CPU virtual address space. +void *uvm_gpu_dma_alloc_page(uvm_parent_gpu_t *parent_gpu, + gfp_t gfp_flags, + NvU64 *dma_address_out); + +// Unmap and free size bytes of contiguous sysmem DMA previously allocated +// with uvm_gpu_map_cpu_pages(). +void uvm_gpu_dma_free_page(uvm_parent_gpu_t *parent_gpu, void *va, NvU64 dma_address); + +// Returns whether the given range is within the GPU's addressable VA ranges. +// It requires the input 'addr' to be in canonical form for platforms compliant +// to canonical form addresses, i.e., ARM64, and x86. +// Warning: This only checks whether the GPU's MMU can support the given +// address. Some HW units on that GPU might only support a smaller range. +// +// The GPU must be initialized before calling this function. +bool uvm_gpu_can_address(uvm_gpu_t *gpu, NvU64 addr, NvU64 size); + +// Returns addr's canonical form for host systems that use canonical form +// addresses. +NvU64 uvm_parent_gpu_canonical_address(uvm_parent_gpu_t *parent_gpu, NvU64 addr); + +static bool uvm_gpu_supports_eviction(uvm_gpu_t *gpu) +{ + + + + + + + // Eviction is supported only if the GPU supports replayable faults + return gpu->parent->replayable_faults_supported; +} + +static bool uvm_gpu_is_virt_mode_sriov_heavy(const uvm_gpu_t *gpu) +{ + return gpu->parent->virt_mode == UVM_VIRT_MODE_SRIOV_HEAVY; +} + +static bool uvm_gpu_is_virt_mode_sriov_standard(const uvm_gpu_t *gpu) +{ + return gpu->parent->virt_mode == UVM_VIRT_MODE_SRIOV_STANDARD; +} + +// Returns true if the virtualization mode is SR-IOV heavy or SR-IOV standard. +static bool uvm_gpu_is_virt_mode_sriov(const uvm_gpu_t *gpu) +{ + return uvm_gpu_is_virt_mode_sriov_heavy(gpu) || uvm_gpu_is_virt_mode_sriov_standard(gpu); +} + +static bool uvm_gpu_uses_proxy_channel_pool(const uvm_gpu_t *gpu) +{ + return uvm_gpu_is_virt_mode_sriov_heavy(gpu); +} + +uvm_aperture_t uvm_gpu_page_tree_init_location(const uvm_gpu_t *gpu); + +// Debug print of GPU properties +void uvm_gpu_print(uvm_gpu_t *gpu); + +// Add the given instance pointer -> user_channel mapping to this GPU. The bottom +// half GPU page fault handler uses this to look up the VA space for GPU faults. +NV_STATUS uvm_gpu_add_user_channel(uvm_gpu_t *gpu, uvm_user_channel_t *user_channel); +void uvm_gpu_remove_user_channel(uvm_gpu_t *gpu, uvm_user_channel_t *user_channel); + +// Looks up an entry added by uvm_gpu_add_user_channel. Return codes: +// NV_OK Translation successful +// NV_ERR_INVALID_CHANNEL Entry's instance pointer was not found +// NV_ERR_PAGE_TABLE_NOT_AVAIL Entry's instance pointer is valid but the entry +// targets an invalid subcontext +// +// out_va_space is valid if NV_OK is returned, otherwise it's NULL. The caller +// is responsibile for ensuring that the returned va_space can't be destroyed, +// so these functions should only be called from the bottom half. +NV_STATUS uvm_gpu_fault_entry_to_va_space(uvm_gpu_t *gpu, + uvm_fault_buffer_entry_t *fault, + uvm_va_space_t **out_va_space); + +NV_STATUS uvm_gpu_access_counter_entry_to_va_space(uvm_gpu_t *gpu, + uvm_access_counter_buffer_entry_t *entry, + uvm_va_space_t **out_va_space); + +typedef enum +{ + UVM_GPU_BUFFER_FLUSH_MODE_CACHED_PUT, + UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT, +} uvm_gpu_buffer_flush_mode_t; + +#endif // __UVM_GPU_H__ diff --git a/kernel-open/nvidia-uvm/uvm_gpu_access_counters.c b/kernel-open/nvidia-uvm/uvm_gpu_access_counters.c new file mode 100644 index 000000000..05d3273be --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_access_counters.c @@ -0,0 +1,1822 @@ +/******************************************************************************* + Copyright (c) 2017-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "linux/sort.h" +#include "nv_uvm_interface.h" +#include "uvm_gpu_access_counters.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_hal.h" +#include "uvm_kvmalloc.h" +#include "uvm_tools.h" +#include "uvm_va_block.h" +#include "uvm_va_range.h" +#include "uvm_va_space_mm.h" +#include "uvm_pmm_sysmem.h" +#include "uvm_perf_module.h" + +#define UVM_PERF_ACCESS_COUNTER_BATCH_COUNT_MIN 1 +#define UVM_PERF_ACCESS_COUNTER_BATCH_COUNT_DEFAULT 256 +#define UVM_PERF_ACCESS_COUNTER_GRANULARITY_DEFAULT "2m" +#define UVM_PERF_ACCESS_COUNTER_THRESHOLD_MIN 1 +#define UVM_PERF_ACCESS_COUNTER_THRESHOLD_MAX ((1 << 16) - 1) +#define UVM_PERF_ACCESS_COUNTER_THRESHOLD_DEFAULT 256 + +// Each page in a tracked physical range may belong to a different VA Block. We +// preallocate an array of reverse map translations. However, access counter +// granularity can be set to up to 16G, which would require an array too large +// to hold all possible translations. Thus, we set an upper bound for reverse +// map translations, and we perform as many translation requests as needed to +// cover the whole tracked range. +#define UVM_MAX_TRANSLATION_SIZE (2 * 1024 * 1024ULL) +#define UVM_SUB_GRANULARITY_REGIONS 32 + +// The GPU offers the following tracking granularities: 64K, 2M, 16M, 16G +// +// Use the largest granularity to minimize the number of access counter +// notifications. This is fine because we simply drop the notifications during +// normal operation, and tests override these values. +static UVM_ACCESS_COUNTER_GRANULARITY g_uvm_access_counter_granularity; +static unsigned g_uvm_access_counter_threshold; + +// Per-VA space access counters information +typedef struct +{ + // VA space-specific configuration settings. These override the global + // settings + struct + { + atomic_t enable_mimc_migrations; + + atomic_t enable_momc_migrations; + } params; + + uvm_va_space_t *va_space; +} va_space_access_counters_info_t; + +// Enable/disable access-counter-guided migrations +// +static int uvm_perf_access_counter_mimc_migration_enable = -1; +static int uvm_perf_access_counter_momc_migration_enable = -1; + +// Number of entries that are fetched from the GPU access counter notification +// buffer and serviced in batch +static unsigned uvm_perf_access_counter_batch_count = UVM_PERF_ACCESS_COUNTER_BATCH_COUNT_DEFAULT; + +// See module param documentation below +static char *uvm_perf_access_counter_granularity = UVM_PERF_ACCESS_COUNTER_GRANULARITY_DEFAULT; +static unsigned uvm_perf_access_counter_threshold = UVM_PERF_ACCESS_COUNTER_THRESHOLD_DEFAULT; + +// Module parameters for the tunables +module_param(uvm_perf_access_counter_mimc_migration_enable, int, S_IRUGO); +MODULE_PARM_DESC(uvm_perf_access_counter_mimc_migration_enable, + "Whether MIMC access counters will trigger migrations." + "Valid values: <= -1 (default policy), 0 (off), >= 1 (on)"); +module_param(uvm_perf_access_counter_momc_migration_enable, int, S_IRUGO); +MODULE_PARM_DESC(uvm_perf_access_counter_momc_migration_enable, + "Whether MOMC access counters will trigger migrations." + "Valid values: <= -1 (default policy), 0 (off), >= 1 (on)"); +module_param(uvm_perf_access_counter_batch_count, uint, S_IRUGO); +module_param(uvm_perf_access_counter_granularity, charp, S_IRUGO); +MODULE_PARM_DESC(uvm_perf_access_counter_granularity, + "Size of the physical memory region tracked by each counter. Valid values as" + "of Volta: 64k, 2m, 16m, 16g"); +module_param(uvm_perf_access_counter_threshold, uint, S_IRUGO); +MODULE_PARM_DESC(uvm_perf_access_counter_threshold, + "Number of remote accesses on a region required to trigger a notification." + "Valid values: [1, 65535]"); + +static void access_counter_buffer_flush_locked(uvm_gpu_t *gpu, uvm_gpu_buffer_flush_mode_t flush_mode); + +static uvm_perf_module_event_callback_desc_t g_callbacks_access_counters[] = {}; + +// Performance heuristics module for access_counters +static uvm_perf_module_t g_module_access_counters; + +// Get the access counters tracking struct for the given VA space if it exists. +// This information is allocated at VA space creation and freed during VA space +// destruction. +static va_space_access_counters_info_t *va_space_access_counters_info_get_or_null(uvm_va_space_t *va_space) +{ + return uvm_perf_module_type_data(va_space->perf_modules_data, UVM_PERF_MODULE_TYPE_ACCESS_COUNTERS); +} + +// Get the access counters tracking struct for the given VA space. It asserts +// that the information has been previously created. +static va_space_access_counters_info_t *va_space_access_counters_info_get(uvm_va_space_t *va_space) +{ + va_space_access_counters_info_t *va_space_access_counters = va_space_access_counters_info_get_or_null(va_space); + UVM_ASSERT(va_space_access_counters); + + return va_space_access_counters; +} + +// Whether access counter migrations are enabled or not. The policy is as +// follows: +// - MIMC migrations are enabled by default on P9 systems with ATS support +// - MOMC migrations are disabled by default on all systems +// - Users can override this policy by specifying on/off +static bool is_migration_enabled(uvm_access_counter_type_t type) +{ + int val; + if (type == UVM_ACCESS_COUNTER_TYPE_MIMC) { + val = uvm_perf_access_counter_mimc_migration_enable; + } + else { + val = uvm_perf_access_counter_momc_migration_enable; + + UVM_ASSERT(type == UVM_ACCESS_COUNTER_TYPE_MOMC); + } + + if (val == 0) + return false; + else if (val > 0) + return true; + + if (type == UVM_ACCESS_COUNTER_TYPE_MOMC) + return false; + + return g_uvm_global.ats.supported; +} + +// Create the access counters tracking struct for the given VA space +// +// VA space lock needs to be held in write mode +static va_space_access_counters_info_t *va_space_access_counters_info_create(uvm_va_space_t *va_space) +{ + va_space_access_counters_info_t *va_space_access_counters; + uvm_assert_rwsem_locked_write(&va_space->lock); + + UVM_ASSERT(va_space_access_counters_info_get_or_null(va_space) == NULL); + + va_space_access_counters = uvm_kvmalloc_zero(sizeof(*va_space_access_counters)); + if (va_space_access_counters) { + uvm_perf_module_type_set_data(va_space->perf_modules_data, + va_space_access_counters, + UVM_PERF_MODULE_TYPE_ACCESS_COUNTERS); + + // Snap the access_counters parameters so that they can be tuned per VA space + atomic_set(&va_space_access_counters->params.enable_mimc_migrations, + is_migration_enabled(UVM_ACCESS_COUNTER_TYPE_MIMC)); + atomic_set(&va_space_access_counters->params.enable_momc_migrations, + is_migration_enabled(UVM_ACCESS_COUNTER_TYPE_MOMC)); + va_space_access_counters->va_space = va_space; + } + + return va_space_access_counters; +} + +// Destroy the access counters tracking struct for the given VA space +// +// VA space lock needs to be in write mode +static void va_space_access_counters_info_destroy(uvm_va_space_t *va_space) +{ + va_space_access_counters_info_t *va_space_access_counters = va_space_access_counters_info_get_or_null(va_space); + uvm_assert_rwsem_locked_write(&va_space->lock); + + if (va_space_access_counters) { + uvm_perf_module_type_unset_data(va_space->perf_modules_data, UVM_PERF_MODULE_TYPE_ACCESS_COUNTERS); + uvm_kvfree(va_space_access_counters); + } +} + +static NV_STATUS config_granularity_to_bytes(UVM_ACCESS_COUNTER_GRANULARITY granularity, NvU64 *bytes) +{ + switch (granularity) { + case UVM_ACCESS_COUNTER_GRANULARITY_64K: + *bytes = 64 * 1024ULL; + break; + case UVM_ACCESS_COUNTER_GRANULARITY_2M: + *bytes = 2 * 1024 * 1024ULL; + break; + case UVM_ACCESS_COUNTER_GRANULARITY_16M: + *bytes = 16 * 1024 * 1024ULL; + break; + case UVM_ACCESS_COUNTER_GRANULARITY_16G: + *bytes = 16 * 1024 * 1024 * 1024ULL; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +// Clear the given access counter and add it to the per-GPU clear tracker +static NV_STATUS access_counter_clear_targeted(uvm_gpu_t *gpu, + const uvm_access_counter_buffer_entry_t *entry) +{ + NV_STATUS status; + uvm_push_t push; + uvm_access_counter_buffer_info_t *access_counters = &gpu->parent->access_counter_buffer_info; + + if (entry->address.is_virtual) { + status = uvm_push_begin(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &push, + "Clear access counter with virtual address: 0x%llx", + entry->address.address); + } + else { + status = uvm_push_begin(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &push, + "Clear access counter with physical address: 0x%llx:%s", + entry->address.address, + uvm_aperture_string(entry->address.aperture)); + } + + if (status != NV_OK) { + UVM_ERR_PRINT("Error creating push to clear access counters: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + gpu->parent->host_hal->access_counter_clear_targeted(&push, entry); + + uvm_push_end(&push); + + uvm_tracker_remove_completed(&access_counters->clear_tracker); + + return uvm_tracker_add_push_safe(&access_counters->clear_tracker, &push); +} + +// Clear all access counters and add the operation to the per-GPU clear tracker +static NV_STATUS access_counter_clear_all(uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_push_t push; + uvm_access_counter_buffer_info_t *access_counters = &gpu->parent->access_counter_buffer_info; + + status = uvm_push_begin(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &push, + "Clear access counter: all"); + if (status != NV_OK) { + UVM_ERR_PRINT("Error creating push to clear access counters: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + gpu->parent->host_hal->access_counter_clear_all(&push); + + uvm_push_end(&push); + + uvm_tracker_remove_completed(&access_counters->clear_tracker); + + return uvm_tracker_add_push_safe(&access_counters->clear_tracker, &push); +} + +static const uvm_gpu_access_counter_type_config_t * +get_config_for_type(const uvm_access_counter_buffer_info_t *access_counters, uvm_access_counter_type_t counter_type) +{ + return counter_type == UVM_ACCESS_COUNTER_TYPE_MIMC? &(access_counters)->current_config.mimc : + &(access_counters)->current_config.momc; +} + +bool uvm_gpu_access_counters_pending(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT(parent_gpu->access_counters_supported); + + // Fast path 1: we left some notifications unserviced in the buffer in the last pass + if (parent_gpu->access_counter_buffer_info.cached_get != parent_gpu->access_counter_buffer_info.cached_put) + return true; + + // Fast path 2: read the valid bit of the notification buffer entry pointed by the cached get pointer + if (!parent_gpu->access_counter_buffer_hal->entry_is_valid(parent_gpu, + parent_gpu->access_counter_buffer_info.cached_get)) { + // Slow path: read the put pointer from the GPU register via BAR0 over PCIe + parent_gpu->access_counter_buffer_info.cached_put = + UVM_GPU_READ_ONCE(*parent_gpu->access_counter_buffer_info.rm_info.pAccessCntrBufferPut); + + // No interrupt pending + if (parent_gpu->access_counter_buffer_info.cached_get == parent_gpu->access_counter_buffer_info.cached_put) + return false; + } + + return true; +} + +// Initialize the configuration and pre-compute some required values for the +// given access counter type +static void init_access_counter_types_config(const UvmGpuAccessCntrConfig *config, + uvm_access_counter_type_t counter_type, + uvm_gpu_access_counter_type_config_t *counter_type_config) +{ + NV_STATUS status; + NvU64 tracking_size = 0; + UVM_ACCESS_COUNTER_GRANULARITY granularity = counter_type == UVM_ACCESS_COUNTER_TYPE_MIMC? config->mimcGranularity: + config->momcGranularity; + UVM_ACCESS_COUNTER_USE_LIMIT use_limit = counter_type == UVM_ACCESS_COUNTER_TYPE_MIMC? config->mimcUseLimit: + config->momcUseLimit; + + counter_type_config->rm.granularity = granularity; + counter_type_config->rm.use_limit = use_limit; + + // Precompute the maximum size to use in reverse map translations and the + // number of translations that are required per access counter notification. + status = config_granularity_to_bytes(granularity, &tracking_size); + UVM_ASSERT(status == NV_OK); + + // sub_granularity field is only filled for tracking granularities larger + // than 64K + if (granularity == UVM_ACCESS_COUNTER_GRANULARITY_64K) + counter_type_config->sub_granularity_region_size = tracking_size; + else + counter_type_config->sub_granularity_region_size = tracking_size / UVM_SUB_GRANULARITY_REGIONS; + + counter_type_config->translation_size = min(UVM_MAX_TRANSLATION_SIZE, tracking_size); + counter_type_config->translations_per_counter = + max(counter_type_config->translation_size / UVM_MAX_TRANSLATION_SIZE, 1ULL); + counter_type_config->sub_granularity_regions_per_translation = + max(counter_type_config->translation_size / counter_type_config->sub_granularity_region_size, 1ULL); + UVM_ASSERT(counter_type_config->sub_granularity_regions_per_translation <= UVM_SUB_GRANULARITY_REGIONS); +} + +NV_STATUS uvm_gpu_init_access_counters(uvm_parent_gpu_t *parent_gpu) +{ + NV_STATUS status = NV_OK; + uvm_access_counter_buffer_info_t *access_counters = &parent_gpu->access_counter_buffer_info; + uvm_access_counter_service_batch_context_t *batch_context = &access_counters->batch_service_context; + NvU64 granularity_bytes = 0; + + if (uvm_perf_access_counter_threshold < UVM_PERF_ACCESS_COUNTER_THRESHOLD_MIN) { + g_uvm_access_counter_threshold = UVM_PERF_ACCESS_COUNTER_THRESHOLD_MIN; + pr_info("Value %u too small for uvm_perf_access_counter_threshold, using %u instead\n", + uvm_perf_access_counter_threshold, + g_uvm_access_counter_threshold); + } + else if (uvm_perf_access_counter_threshold > UVM_PERF_ACCESS_COUNTER_THRESHOLD_MAX) { + g_uvm_access_counter_threshold = UVM_PERF_ACCESS_COUNTER_THRESHOLD_MAX; + pr_info("Value %u too large for uvm_perf_access_counter_threshold, using %u instead\n", + uvm_perf_access_counter_threshold, + g_uvm_access_counter_threshold); + } + else { + g_uvm_access_counter_threshold = uvm_perf_access_counter_threshold; + } + + if (strcmp(uvm_perf_access_counter_granularity, "64k") == 0) { + g_uvm_access_counter_granularity = UVM_ACCESS_COUNTER_GRANULARITY_64K; + } + else if (strcmp(uvm_perf_access_counter_granularity, "2m") == 0) { + g_uvm_access_counter_granularity = UVM_ACCESS_COUNTER_GRANULARITY_2M; + } + else if (strcmp(uvm_perf_access_counter_granularity, "16m") == 0) { + g_uvm_access_counter_granularity = UVM_ACCESS_COUNTER_GRANULARITY_16M; + } + else if (strcmp(uvm_perf_access_counter_granularity, "16g") == 0) { + g_uvm_access_counter_granularity = UVM_ACCESS_COUNTER_GRANULARITY_16G; + } + else { + g_uvm_access_counter_granularity = UVM_ACCESS_COUNTER_GRANULARITY_2M; + pr_info("Invalid value '%s' for uvm_perf_access_counter_granularity, using '%s' instead", + uvm_perf_access_counter_granularity, + UVM_PERF_ACCESS_COUNTER_GRANULARITY_DEFAULT); + } + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + UVM_ASSERT(parent_gpu->access_counter_buffer_hal != NULL); + + status = uvm_rm_locked_call(nvUvmInterfaceInitAccessCntrInfo(parent_gpu->rm_device, + &access_counters->rm_info)); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init notify buffer info from RM: %s, GPU %s\n", + nvstatusToString(status), + parent_gpu->name); + + // nvUvmInterfaceInitAccessCntrInfo may leave fields in rm_info + // populated when it returns an error. Set the buffer handle to zero as + // it is used by the deinitialization logic to determine if it was + // correctly initialized. + access_counters->rm_info.accessCntrBufferHandle = 0; + goto fail; + } + + UVM_ASSERT(access_counters->rm_info.bufferSize % + parent_gpu->access_counter_buffer_hal->entry_size(parent_gpu) == 0); + + status = config_granularity_to_bytes(g_uvm_access_counter_granularity, &granularity_bytes); + UVM_ASSERT(status == NV_OK); + if (granularity_bytes > UVM_MAX_TRANSLATION_SIZE) + UVM_ASSERT(granularity_bytes % UVM_MAX_TRANSLATION_SIZE == 0); + + parent_gpu->access_counter_buffer_info.notifications_ignored_count = 0; + parent_gpu->access_counter_buffer_info.reconfiguration_owner = NULL; + + uvm_tracker_init(&access_counters->clear_tracker); + + access_counters->max_notifications = parent_gpu->access_counter_buffer_info.rm_info.bufferSize / + parent_gpu->access_counter_buffer_hal->entry_size(parent_gpu); + + // Check provided module parameter value + access_counters->max_batch_size = max(uvm_perf_access_counter_batch_count, + (NvU32)UVM_PERF_ACCESS_COUNTER_BATCH_COUNT_MIN); + access_counters->max_batch_size = min(access_counters->max_batch_size, + access_counters->max_notifications); + + if (access_counters->max_batch_size != uvm_perf_access_counter_batch_count) { + pr_info("Invalid uvm_perf_access_counter_batch_count value on GPU %s: %u. Valid range [%u:%u] Using %u instead\n", + parent_gpu->name, + uvm_perf_access_counter_batch_count, + UVM_PERF_ACCESS_COUNTER_BATCH_COUNT_MIN, + access_counters->max_notifications, + access_counters->max_batch_size); + } + + batch_context->notification_cache = uvm_kvmalloc_zero(access_counters->max_notifications * + sizeof(*batch_context->notification_cache)); + if (!batch_context->notification_cache) { + status = NV_ERR_NO_MEMORY; + goto fail; + } + + batch_context->virt.notifications = uvm_kvmalloc_zero(access_counters->max_notifications * + sizeof(*batch_context->virt.notifications)); + if (!batch_context->virt.notifications) { + status = NV_ERR_NO_MEMORY; + goto fail; + } + + batch_context->phys.notifications = uvm_kvmalloc_zero(access_counters->max_notifications * + sizeof(*batch_context->phys.notifications)); + if (!batch_context->phys.notifications) { + status = NV_ERR_NO_MEMORY; + goto fail; + } + + batch_context->phys.translations = uvm_kvmalloc_zero((UVM_MAX_TRANSLATION_SIZE / PAGE_SIZE) * + sizeof(*batch_context->phys.translations)); + if (!batch_context->phys.translations) { + status = NV_ERR_NO_MEMORY; + goto fail; + } + + return NV_OK; + +fail: + uvm_gpu_deinit_access_counters(parent_gpu); + + return status; +} + +void uvm_gpu_deinit_access_counters(uvm_parent_gpu_t *parent_gpu) +{ + uvm_access_counter_buffer_info_t *access_counters = &parent_gpu->access_counter_buffer_info; + uvm_access_counter_service_batch_context_t *batch_context = &access_counters->batch_service_context; + + UVM_ASSERT(parent_gpu->isr.access_counters.handling_ref_count == 0); + + if (access_counters->rm_info.accessCntrBufferHandle) { + NV_STATUS status = uvm_rm_locked_call(nvUvmInterfaceDestroyAccessCntrInfo(parent_gpu->rm_device, + &access_counters->rm_info)); + UVM_ASSERT(status == NV_OK); + + access_counters->rm_info.accessCntrBufferHandle = 0; + uvm_tracker_deinit(&access_counters->clear_tracker); + } + + uvm_kvfree(batch_context->notification_cache); + uvm_kvfree(batch_context->virt.notifications); + uvm_kvfree(batch_context->phys.notifications); + uvm_kvfree(batch_context->phys.translations); + batch_context->notification_cache = NULL; + batch_context->virt.notifications = NULL; + batch_context->phys.notifications = NULL; + batch_context->phys.translations = NULL; +} + +bool uvm_gpu_access_counters_required(const uvm_parent_gpu_t *parent_gpu) +{ + if (!parent_gpu->access_counters_supported) + return false; + + if (parent_gpu->rm_info.isSimulated) + return true; + + return is_migration_enabled(UVM_ACCESS_COUNTER_TYPE_MIMC) || is_migration_enabled(UVM_ACCESS_COUNTER_TYPE_MOMC); +} + +// This function enables access counters with the given configuration and takes +// ownership from RM. The function also stores the new configuration within the +// uvm_gpu_t struct. +static NV_STATUS access_counters_take_ownership(uvm_gpu_t *gpu, UvmGpuAccessCntrConfig *config) +{ + NV_STATUS status, disable_status; + uvm_access_counter_buffer_info_t *access_counters = &gpu->parent->access_counter_buffer_info; + + UVM_ASSERT(gpu->parent->access_counters_supported); + UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.access_counters.service_lock)); + + status = uvm_rm_locked_call(nvUvmInterfaceEnableAccessCntr(gpu->parent->rm_device, + &access_counters->rm_info, + config)); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to enable access counter notification from RM: %s, GPU %s\n", + nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + status = access_counter_clear_all(gpu); + if (status != NV_OK) + goto error; + + status = uvm_tracker_wait(&access_counters->clear_tracker); + if (status != NV_OK) + goto error; + + // Read current get pointer as this might not be the first time we have + // taken control of the notify buffer since the GPU was initialized. Then + // flush old notifications. This will update the cached_put pointer. + access_counters->cached_get = UVM_GPU_READ_ONCE(*access_counters->rm_info.pAccessCntrBufferGet); + access_counter_buffer_flush_locked(gpu, UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT); + + access_counters->current_config.threshold = config->threshold; + + init_access_counter_types_config(config, UVM_ACCESS_COUNTER_TYPE_MIMC, &access_counters->current_config.mimc); + init_access_counter_types_config(config, UVM_ACCESS_COUNTER_TYPE_MOMC, &access_counters->current_config.momc); + + return NV_OK; + +error: + disable_status = uvm_rm_locked_call(nvUvmInterfaceDisableAccessCntr(gpu->parent->rm_device, + &access_counters->rm_info)); + UVM_ASSERT(disable_status == NV_OK); + + return status; +} + +// If ownership is yielded as part of reconfiguration, the access counters +// handling refcount may not be 0 +static void access_counters_yield_ownership(uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_access_counter_buffer_info_t *access_counters = &gpu->parent->access_counter_buffer_info; + + UVM_ASSERT(gpu->parent->access_counters_supported); + UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.access_counters.service_lock)); + + // Wait for any pending clear operation befor releasing ownership + status = uvm_tracker_wait(&access_counters->clear_tracker); + if (status != NV_OK) + UVM_ASSERT(status == uvm_global_get_status()); + + status = uvm_rm_locked_call(nvUvmInterfaceDisableAccessCntr(gpu->parent->rm_device, + &access_counters->rm_info)); + UVM_ASSERT(status == NV_OK); +} + +// Increment the refcount of access counter enablement. If this is the first +// reference, enable the HW feature. +static NV_STATUS gpu_access_counters_enable(uvm_gpu_t *gpu, UvmGpuAccessCntrConfig *config) +{ + UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.access_counters.service_lock)); + UVM_ASSERT(gpu->parent->access_counters_supported); + UVM_ASSERT(gpu->parent->access_counter_buffer_info.rm_info.accessCntrBufferHandle); + + // There cannot be a concurrent modification of the handling count, since + // the only two writes of that field happen in the enable/disable functions + // and those are protected by the access counters ISR lock. + if (gpu->parent->isr.access_counters.handling_ref_count == 0) { + NV_STATUS status = access_counters_take_ownership(gpu, config); + + if (status != NV_OK) + return status; + } + + ++gpu->parent->isr.access_counters.handling_ref_count; + return NV_OK; +} + +// Decrement the refcount of access counter enablement. If this is the last +// reference, disable the HW feature. +static void gpu_access_counters_disable(uvm_gpu_t *gpu) +{ + UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.access_counters.service_lock)); + UVM_ASSERT(gpu->parent->access_counters_supported); + UVM_ASSERT(gpu->parent->isr.access_counters.handling_ref_count > 0); + + if (--gpu->parent->isr.access_counters.handling_ref_count == 0) + access_counters_yield_ownership(gpu); +} + +// Invoked during registration of the GPU in the VA space +NV_STATUS uvm_gpu_access_counters_enable(uvm_gpu_t *gpu, uvm_va_space_t *va_space) +{ + NV_STATUS status; + + UVM_ASSERT(gpu->parent->access_counters_supported); + + uvm_gpu_access_counters_isr_lock(gpu->parent); + + if (uvm_processor_mask_test(&va_space->access_counters_enabled_processors, gpu->id)) { + status = NV_ERR_INVALID_DEVICE; + } + else { + UvmGpuAccessCntrConfig default_config = + { + .mimcGranularity = g_uvm_access_counter_granularity, + .momcGranularity = g_uvm_access_counter_granularity, + .mimcUseLimit = UVM_ACCESS_COUNTER_USE_LIMIT_FULL, + .momcUseLimit = UVM_ACCESS_COUNTER_USE_LIMIT_FULL, + .threshold = g_uvm_access_counter_threshold, + }; + status = gpu_access_counters_enable(gpu, &default_config); + + // No VA space lock is currently held, so the mask is atomically + // modified to protect from concurrent enablement of access counters in + // another GPU + if (status == NV_OK) + uvm_processor_mask_set_atomic(&va_space->access_counters_enabled_processors, gpu->id); + } + + // If this is the first reference taken on access counters, dropping the + // ISR lock will enable interrupts. + uvm_gpu_access_counters_isr_unlock(gpu->parent); + + return status; +} + +void uvm_gpu_access_counters_disable(uvm_gpu_t *gpu, uvm_va_space_t *va_space) +{ + UVM_ASSERT(gpu->parent->access_counters_supported); + + uvm_gpu_access_counters_isr_lock(gpu->parent); + + if (uvm_processor_mask_test_and_clear_atomic(&va_space->access_counters_enabled_processors, gpu->id)) { + gpu_access_counters_disable(gpu); + + // If this is VA space reconfigured access counters, clear the + // ownership to allow for other processes to invoke the reconfiguration + if (gpu->parent->access_counter_buffer_info.reconfiguration_owner == va_space) + gpu->parent->access_counter_buffer_info.reconfiguration_owner = NULL; + } + + uvm_gpu_access_counters_isr_unlock(gpu->parent); +} + +static void write_get(uvm_parent_gpu_t *parent_gpu, NvU32 get) +{ + uvm_access_counter_buffer_info_t *access_counters = &parent_gpu->access_counter_buffer_info; + + UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.access_counters.service_lock)); + + // Write get on the GPU only if it's changed. + if (access_counters->cached_get == get) + return; + + access_counters->cached_get = get; + + // Update get pointer on the GPU + UVM_GPU_WRITE_ONCE(*access_counters->rm_info.pAccessCntrBufferGet, get); +} + +static void access_counter_buffer_flush_locked(uvm_gpu_t *gpu, uvm_gpu_buffer_flush_mode_t flush_mode) +{ + NvU32 get; + NvU32 put; + uvm_spin_loop_t spin; + uvm_access_counter_buffer_info_t *access_counters = &gpu->parent->access_counter_buffer_info; + + UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.access_counters.service_lock)); + UVM_ASSERT(gpu->parent->access_counters_supported); + + // Read PUT pointer from the GPU if requested + if (flush_mode == UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT) + access_counters->cached_put = UVM_GPU_READ_ONCE(*access_counters->rm_info.pAccessCntrBufferPut); + + get = access_counters->cached_get; + put = access_counters->cached_put; + + while (get != put) { + // Wait until valid bit is set + UVM_SPIN_WHILE(!gpu->parent->access_counter_buffer_hal->entry_is_valid(gpu->parent, get), &spin); + + gpu->parent->access_counter_buffer_hal->entry_clear_valid(gpu->parent, get); + ++get; + if (get == access_counters->max_notifications) + get = 0; + } + + write_get(gpu->parent, get); +} + +void uvm_gpu_access_counter_buffer_flush(uvm_gpu_t *gpu) +{ + UVM_ASSERT(gpu->parent->access_counters_supported); + + // Disables access counter interrupts and notification servicing + uvm_gpu_access_counters_isr_lock(gpu->parent); + + if (gpu->parent->isr.access_counters.handling_ref_count > 0) + access_counter_buffer_flush_locked(gpu, UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT); + + uvm_gpu_access_counters_isr_unlock(gpu->parent); +} + +static inline int cmp_access_counter_instance_ptr(const uvm_access_counter_buffer_entry_t *a, + const uvm_access_counter_buffer_entry_t *b) +{ + int result; + + result = uvm_gpu_phys_addr_cmp(a->virtual_info.instance_ptr, b->virtual_info.instance_ptr); + // On Volta+ we need to sort by {instance_ptr + subctx_id} pair since it can + // map to a different VA space + if (result != 0) + return result; + return UVM_CMP_DEFAULT(a->virtual_info.ve_id, b->virtual_info.ve_id); +} + +// Sort comparator for pointers to GVA access counter notification buffer +// entries that sorts by instance pointer +static int cmp_sort_virt_notifications_by_instance_ptr(const void *_a, const void *_b) +{ + const uvm_access_counter_buffer_entry_t *a = *(const uvm_access_counter_buffer_entry_t **)_a; + const uvm_access_counter_buffer_entry_t *b = *(const uvm_access_counter_buffer_entry_t **)_b; + + UVM_ASSERT(a->address.is_virtual); + UVM_ASSERT(b->address.is_virtual); + + return cmp_access_counter_instance_ptr(a, b); +} + +// Sort comparator for pointers to GPA access counter notification buffer +// entries that sorts by physical address' aperture +static int cmp_sort_phys_notifications_by_processor_id(const void *_a, const void *_b) +{ + const uvm_access_counter_buffer_entry_t *a = *(const uvm_access_counter_buffer_entry_t **)_a; + const uvm_access_counter_buffer_entry_t *b = *(const uvm_access_counter_buffer_entry_t **)_b; + + UVM_ASSERT(!a->address.is_virtual); + UVM_ASSERT(!b->address.is_virtual); + + return uvm_id_cmp(a->physical_info.resident_id, b->physical_info.resident_id); +} + +typedef enum +{ + // Fetch a batch of notifications from the buffer. Stop at the first entry + // that is not ready yet + NOTIFICATION_FETCH_MODE_BATCH_READY, + + // Fetch all notifications in the buffer before PUT. Wait for all + // notifications to become ready + NOTIFICATION_FETCH_MODE_ALL, +} notification_fetch_mode_t; + +static NvU32 fetch_access_counter_buffer_entries(uvm_gpu_t *gpu, + uvm_access_counter_service_batch_context_t *batch_context, + notification_fetch_mode_t fetch_mode) +{ + NvU32 get; + NvU32 put; + NvU32 notification_index; + uvm_access_counter_buffer_entry_t *notification_cache; + uvm_spin_loop_t spin; + uvm_access_counter_buffer_info_t *access_counters = &gpu->parent->access_counter_buffer_info; + NvU32 last_instance_ptr_idx = 0; + uvm_aperture_t last_aperture = UVM_APERTURE_PEER_MAX; + + UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.access_counters.service_lock)); + UVM_ASSERT(gpu->parent->access_counters_supported); + + notification_cache = batch_context->notification_cache; + + get = access_counters->cached_get; + + // Read put pointer from GPU and cache it + if (get == access_counters->cached_put) { + access_counters->cached_put = UVM_GPU_READ_ONCE(*access_counters->rm_info.pAccessCntrBufferPut); + } + + put = access_counters->cached_put; + + if (get == put) + return 0; + + batch_context->phys.num_notifications = 0; + batch_context->virt.num_notifications = 0; + + batch_context->virt.is_single_instance_ptr = true; + batch_context->phys.is_single_aperture = true; + + notification_index = 0; + + // Parse until get != put and have enough space to cache. + while ((get != put) && + (fetch_mode == NOTIFICATION_FETCH_MODE_ALL || notification_index < access_counters->max_batch_size)) { + uvm_access_counter_buffer_entry_t *current_entry = ¬ification_cache[notification_index]; + + // We cannot just wait for the last entry (the one pointed by put) to become valid, we have to do it + // individually since entries can be written out of order + UVM_SPIN_WHILE(!gpu->parent->access_counter_buffer_hal->entry_is_valid(gpu->parent, get), &spin) { + // We have some entry to work on. Let's do the rest later. + if (fetch_mode != NOTIFICATION_FETCH_MODE_ALL && notification_index > 0) + goto done; + } + + // Prevent later accesses being moved above the read of the valid bit + smp_mb__after_atomic(); + + // Got valid bit set. Let's cache. + gpu->parent->access_counter_buffer_hal->parse_entry(gpu->parent, get, current_entry); + + if (current_entry->address.is_virtual) { + batch_context->virt.notifications[batch_context->virt.num_notifications++] = current_entry; + + if (batch_context->virt.is_single_instance_ptr) { + if (batch_context->virt.num_notifications == 1) { + last_instance_ptr_idx = notification_index; + } + else if (cmp_access_counter_instance_ptr(¬ification_cache[last_instance_ptr_idx], + current_entry) != 0) { + batch_context->virt.is_single_instance_ptr = false; + } + } + } + else { + const NvU64 translation_size = get_config_for_type(access_counters, current_entry->counter_type)->translation_size; + current_entry->address.address = UVM_ALIGN_DOWN(current_entry->address.address, translation_size); + + batch_context->phys.notifications[batch_context->phys.num_notifications++] = current_entry; + + current_entry->physical_info.resident_id = + uvm_gpu_get_processor_id_by_address(gpu, uvm_gpu_phys_address(current_entry->address.aperture, + current_entry->address.address)); + + if (batch_context->phys.is_single_aperture) { + if (batch_context->phys.num_notifications == 1) + last_aperture = current_entry->address.aperture; + else if (current_entry->address.aperture != last_aperture) + batch_context->phys.is_single_aperture = false; + } + + if (current_entry->counter_type == UVM_ACCESS_COUNTER_TYPE_MOMC) + UVM_ASSERT(uvm_id_equal(current_entry->physical_info.resident_id, gpu->id)); + else + UVM_ASSERT(!uvm_id_equal(current_entry->physical_info.resident_id, gpu->id)); + } + + ++notification_index; + ++get; + if (get == access_counters->max_notifications) + get = 0; + } + +done: + write_get(gpu->parent, get); + + return notification_index; +} + +static void translate_virt_notifications_instance_ptrs(uvm_gpu_t *gpu, + uvm_access_counter_service_batch_context_t *batch_context) +{ + NvU32 i; + NV_STATUS status; + + for (i = 0; i < batch_context->virt.num_notifications; ++i) { + uvm_access_counter_buffer_entry_t *current_entry = batch_context->virt.notifications[i]; + + if (i == 0 || + cmp_access_counter_instance_ptr(current_entry, batch_context->virt.notifications[i - 1]) != 0) { + // If instance_ptr is different, make a new translation. If the + // translation fails then va_space will be NULL and the entry will + // simply be ignored in subsequent processing. + status = uvm_gpu_access_counter_entry_to_va_space(gpu, + current_entry, + ¤t_entry->virtual_info.va_space); + if (status != NV_OK) + UVM_ASSERT(current_entry->virtual_info.va_space == NULL); + } + else { + current_entry->virtual_info.va_space = batch_context->virt.notifications[i - 1]->virtual_info.va_space; + } + } +} + +// GVA notifications provide an instance_ptr and ve_id that can be directly +// translated to a VA space. In order to minimize translations, we sort the +// entries by instance_ptr. +static void preprocess_virt_notifications(uvm_gpu_t *gpu, + uvm_access_counter_service_batch_context_t *batch_context) +{ + if (!batch_context->virt.is_single_instance_ptr) { + // Sort by instance_ptr + sort(batch_context->virt.notifications, + batch_context->virt.num_notifications, + sizeof(*batch_context->virt.notifications), + cmp_sort_virt_notifications_by_instance_ptr, + NULL); + } + + translate_virt_notifications_instance_ptrs(gpu, batch_context); +} + +static NV_STATUS service_virt_notifications(uvm_gpu_t *gpu, + uvm_access_counter_service_batch_context_t *batch_context) +{ + // TODO: Bug 1990466: Service virtual notifications. Entries with NULL + // va_space are simply dropped. + if (uvm_enable_builtin_tests) { + NvU32 i; + + preprocess_virt_notifications(gpu, batch_context); + + for (i = 0; i < batch_context->virt.num_notifications; ++i) { + const bool on_managed = false; + uvm_tools_broadcast_access_counter(gpu, batch_context->virt.notifications[i], on_managed); + } + } + + return NV_OK; +} + +// GPA notifications provide a physical address and an aperture. Sort +// accesses by aperture to try to coalesce operations on the same target +// processor. +static void preprocess_phys_notifications(uvm_access_counter_service_batch_context_t *batch_context) +{ + if (!batch_context->phys.is_single_aperture) { + // Sort by instance_ptr + sort(batch_context->phys.notifications, + batch_context->phys.num_notifications, + sizeof(*batch_context->phys.notifications), + cmp_sort_phys_notifications_by_processor_id, + NULL); + } +} + +static NV_STATUS service_va_block_locked(uvm_processor_id_t processor, + uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_service_block_context_t *service_context, + uvm_page_mask_t *accessed_pages) +{ + NV_STATUS status = NV_OK; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_range_group_range_iter_t iter; + uvm_page_index_t page_index; + uvm_page_index_t first_page_index; + uvm_page_index_t last_page_index; + NvU32 page_count = 0; + const uvm_page_mask_t *residency_mask; + + uvm_assert_mutex_locked(&va_block->lock); + + // GPU VA space could be gone since we received the notification. We handle + // this case by skipping service if processor is not in the mapped mask. + // Using this approach we also filter out notifications for pages that + // moved since they were reported by the GPU. This is fine because: + // - If the GPU is still accessing them, it should have faulted + // - If the GPU gets remote mappings in the future, we will get new + // notifications and we will act accordingly + // - If the GPU does not access the pages again, we do not want to migrate + // them + if (!uvm_processor_mask_test(&va_block->mapped, processor)) + return NV_OK; + + if (uvm_processor_mask_test(&va_block->resident, processor)) + residency_mask = uvm_va_block_resident_mask_get(va_block, processor); + else + residency_mask = NULL; + + first_page_index = PAGES_PER_UVM_VA_BLOCK; + last_page_index = 0; + + // Initialize fault service block context + uvm_processor_mask_zero(&service_context->resident_processors); + service_context->read_duplicate_count = 0; + service_context->thrashing_pin_count = 0; + + // If the page is already resident on the accessing processor, the + // notification for this page is stale. Skip it. + if (residency_mask) + uvm_page_mask_andnot(accessed_pages, accessed_pages, residency_mask); + + uvm_range_group_range_migratability_iter_first(va_space, va_block->start, va_block->end, &iter); + + for_each_va_block_page_in_mask(page_index, accessed_pages, va_block) { + uvm_perf_thrashing_hint_t thrashing_hint; + NvU64 address = uvm_va_block_cpu_page_address(va_block, page_index); + bool read_duplicate = false; + uvm_processor_id_t new_residency; + + // Ensure that the migratability iterator covers the current address + while (iter.end < address) + uvm_range_group_range_migratability_iter_next(va_space, &iter, va_block->end); + + UVM_ASSERT(iter.start <= address && iter.end >= address); + + // If the range is not migratable, skip the page + if (!iter.migratable) + continue; + + thrashing_hint = uvm_perf_thrashing_get_hint(va_block, address, processor); + if (thrashing_hint.type == UVM_PERF_THRASHING_HINT_TYPE_THROTTLE) { + // If the page is throttling, ignore the access counter + // notification + continue; + } + else if (thrashing_hint.type == UVM_PERF_THRASHING_HINT_TYPE_PIN) { + if (service_context->thrashing_pin_count++ == 0) + uvm_page_mask_zero(&service_context->thrashing_pin_mask); + + uvm_page_mask_set(&service_context->thrashing_pin_mask, page_index); + } + + service_context->block_context.policy = uvm_va_policy_get(va_block, address); + + new_residency = uvm_va_block_select_residency(va_block, + page_index, + processor, + uvm_fault_access_type_mask_bit(UVM_FAULT_ACCESS_TYPE_PREFETCH), + service_context->block_context.policy, + &thrashing_hint, + UVM_SERVICE_OPERATION_ACCESS_COUNTERS, + &read_duplicate); + + if (!uvm_processor_mask_test_and_set(&service_context->resident_processors, new_residency)) + uvm_page_mask_zero(&service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency); + + uvm_page_mask_set(&service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency, page_index); + + if (page_index < first_page_index) + first_page_index = page_index; + if (page_index > last_page_index) + last_page_index = page_index; + + ++page_count; + + service_context->access_type[page_index] = UVM_FAULT_ACCESS_TYPE_PREFETCH; + } + + // Apply the changes computed in the service block context, if there are + // pages to be serviced + if (page_count > 0) { + uvm_processor_id_t id; + uvm_processor_mask_t update_processors; + + uvm_processor_mask_and(&update_processors, &va_block->resident, &service_context->resident_processors); + + // Remove pages that are already resident in the destination processors + for_each_id_in_mask(id, &update_processors) { + bool migrate_pages; + uvm_page_mask_t *residency_mask = uvm_va_block_resident_mask_get(va_block, id); + UVM_ASSERT(residency_mask); + + migrate_pages = uvm_page_mask_andnot(&service_context->per_processor_masks[uvm_id_value(id)].new_residency, + &service_context->per_processor_masks[uvm_id_value(id)].new_residency, + residency_mask); + + if (!migrate_pages) + uvm_processor_mask_clear(&service_context->resident_processors, id); + } + + if (!uvm_processor_mask_empty(&service_context->resident_processors)) { + while (first_page_index <= last_page_index) { + uvm_page_index_t outer = last_page_index + 1; + + if (uvm_va_block_is_hmm(va_block)) { + status = uvm_hmm_find_policy_vma_and_outer(va_block, + &service_context->block_context, + first_page_index, + &outer); + if (status != NV_OK) + break; + } + + service_context->region = uvm_va_block_region(first_page_index, outer); + first_page_index = outer; + + status = uvm_va_block_service_locked(processor, va_block, va_block_retry, service_context); + if (status != NV_OK) + break; + } + } + } + + ++service_context->num_retries; + + return status; +} + +static void reverse_mappings_to_va_block_page_mask(uvm_va_block_t *va_block, + const uvm_reverse_map_t *reverse_mappings, + size_t num_reverse_mappings, + uvm_page_mask_t *page_mask) +{ + NvU32 index; + + UVM_ASSERT(page_mask); + + if (num_reverse_mappings > 0) + UVM_ASSERT(reverse_mappings); + + uvm_page_mask_zero(page_mask); + + // Populate the mask of accessed pages within the VA Block + for (index = 0; index < num_reverse_mappings; ++index) { + const uvm_reverse_map_t *reverse_map = &reverse_mappings[index]; + uvm_va_block_region_t region = reverse_map->region; + + UVM_ASSERT(reverse_map->va_block == va_block); + + // The VA Block could have been split since we obtained the reverse + // mappings. Clamp the region to the current VA block size, to handle + // the case in which it was split. + region.outer = min(region.outer, (uvm_page_index_t)uvm_va_block_num_cpu_pages(va_block)); + region.first = min(region.first, region.outer); + + uvm_page_mask_region_fill(page_mask, region); + } +} + +static NV_STATUS service_phys_single_va_block(uvm_gpu_t *gpu, + uvm_access_counter_service_batch_context_t *batch_context, + const uvm_access_counter_buffer_entry_t *current_entry, + const uvm_reverse_map_t *reverse_mappings, + size_t num_reverse_mappings, + bool *clear_counter) +{ + size_t index; + uvm_va_block_t *va_block = reverse_mappings[0].va_block; + uvm_va_space_t *va_space = NULL; + struct mm_struct *mm = NULL; + NV_STATUS status = NV_OK; + const uvm_processor_id_t processor = current_entry->counter_type == UVM_ACCESS_COUNTER_TYPE_MIMC? + gpu->id: UVM_ID_CPU; + + *clear_counter = false; + + UVM_ASSERT(num_reverse_mappings > 0); + + uvm_mutex_lock(&va_block->lock); + va_space = uvm_va_block_get_va_space_maybe_dead(va_block); + uvm_mutex_unlock(&va_block->lock); + + if (va_space) { + uvm_va_block_retry_t va_block_retry; + va_space_access_counters_info_t *va_space_access_counters; + uvm_service_block_context_t *service_context = &batch_context->block_service_context; + uvm_page_mask_t *accessed_pages = &batch_context->accessed_pages; + + // If an mm is registered with the VA space, we have to retain it + // in order to lock it before locking the VA space. + mm = uvm_va_space_mm_retain_lock(va_space); + + uvm_va_space_down_read(va_space); + + // Re-check that the VA block is valid after taking the VA block lock. + if (uvm_va_block_is_dead(va_block)) + goto done; + + va_space_access_counters = va_space_access_counters_info_get(va_space); + if (UVM_ID_IS_CPU(processor) && !atomic_read(&va_space_access_counters->params.enable_momc_migrations)) + goto done; + + if (!UVM_ID_IS_CPU(processor) && !atomic_read(&va_space_access_counters->params.enable_mimc_migrations)) + goto done; + + service_context->operation = UVM_SERVICE_OPERATION_ACCESS_COUNTERS; + service_context->num_retries = 0; + service_context->block_context.mm = mm; + + uvm_mutex_lock(&va_block->lock); + + reverse_mappings_to_va_block_page_mask(va_block, reverse_mappings, num_reverse_mappings, accessed_pages); + + status = UVM_VA_BLOCK_RETRY_LOCKED(va_block, &va_block_retry, + service_va_block_locked(processor, + va_block, + &va_block_retry, + service_context, + accessed_pages)); + + uvm_mutex_unlock(&va_block->lock); + + if (status == NV_OK) + *clear_counter = true; + } + +done: + if (va_space) { + uvm_va_space_up_read(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + } + + // Drop the refcounts taken by the reverse map translation routines + for (index = 0; index < num_reverse_mappings; ++index) + uvm_va_block_release(va_block); + + return status; +} + +static NV_STATUS service_phys_va_blocks(uvm_gpu_t *gpu, + uvm_access_counter_service_batch_context_t *batch_context, + const uvm_access_counter_buffer_entry_t *current_entry, + const uvm_reverse_map_t *reverse_mappings, + size_t num_reverse_mappings, + bool *clear_counter) +{ + NV_STATUS status = NV_OK; + size_t index; + + *clear_counter = false; + + for (index = 0; index < num_reverse_mappings; ++index) { + bool clear_counter_local = false; + status = service_phys_single_va_block(gpu, + batch_context, + current_entry, + reverse_mappings + index, + 1, + &clear_counter_local); + if (status != NV_OK) + break; + + *clear_counter = *clear_counter || clear_counter_local; + } + + // In the case of failure, drop the refcounts for the remaining reverse mappings + while (++index < num_reverse_mappings) + uvm_va_block_release(reverse_mappings[index].va_block); + + return status; +} + +// Iterate over all regions set in the given sub_granularity mask +#define for_each_sub_granularity_region(region_start, region_end, sub_granularity, config) \ + for ((region_start) = find_first_bit(&(sub_granularity), (config)->sub_granularity_regions_per_translation), \ + (region_end) = find_next_zero_bit(&(sub_granularity), \ + (config)->sub_granularity_regions_per_translation, \ + (region_start) + 1); \ + (region_start) < config->sub_granularity_regions_per_translation; \ + (region_start) = find_next_bit(&(sub_granularity), \ + (config)->sub_granularity_regions_per_translation, \ + (region_end) + 1), \ + (region_end) = find_next_zero_bit(&(sub_granularity), \ + (config)->sub_granularity_regions_per_translation, \ + (region_start) + 1)) + +static bool are_reverse_mappings_on_single_block(const uvm_reverse_map_t *reverse_mappings, size_t num_reverse_mappings) +{ + size_t index; + uvm_va_block_t *prev_va_block = NULL; + + for (index = 0; index < num_reverse_mappings; ++index) { + uvm_va_block_t *va_block = reverse_mappings[index].va_block; + UVM_ASSERT(va_block); + + if (prev_va_block && prev_va_block != va_block) + return false; + + prev_va_block = va_block; + } + + return true; +} + +// Service the given translation range. It will return the count of the reverse +// mappings found during servicing in num_reverse_mappings, even if the function +// doesn't return NV_OK. +static NV_STATUS service_phys_notification_translation(uvm_gpu_t *gpu, + uvm_gpu_t *resident_gpu, + uvm_access_counter_service_batch_context_t *batch_context, + const uvm_gpu_access_counter_type_config_t *config, + const uvm_access_counter_buffer_entry_t *current_entry, + NvU64 address, + unsigned long sub_granularity, + size_t *num_reverse_mappings, + bool *clear_counter) +{ + NV_STATUS status; + NvU32 region_start, region_end; + + *num_reverse_mappings = 0; + + // Get the reverse_map translations for all the regions set in the + // sub_granularity field of the counter. + for_each_sub_granularity_region(region_start, region_end, sub_granularity, config) { + NvU64 local_address = address + region_start * config->sub_granularity_region_size; + NvU32 local_translation_size = (region_end - region_start) * config->sub_granularity_region_size; + uvm_reverse_map_t *local_reverse_mappings = batch_context->phys.translations + *num_reverse_mappings; + + // Obtain the virtual addresses of the pages within the reported + // DMA range + if (resident_gpu) { + *num_reverse_mappings += uvm_pmm_gpu_phys_to_virt(&resident_gpu->pmm, + local_address, + local_translation_size, + local_reverse_mappings); + } + else { + *num_reverse_mappings += uvm_pmm_sysmem_mappings_dma_to_virt(&gpu->pmm_reverse_sysmem_mappings, + local_address, + local_translation_size, + local_reverse_mappings, + local_translation_size / PAGE_SIZE); + } + } + + if (*num_reverse_mappings == 0) + return NV_OK; + + // Service all the translations + if (are_reverse_mappings_on_single_block(batch_context->phys.translations, *num_reverse_mappings)) { + status = service_phys_single_va_block(gpu, + batch_context, + current_entry, + batch_context->phys.translations, + *num_reverse_mappings, + clear_counter); + } + else { + status = service_phys_va_blocks(gpu, + batch_context, + current_entry, + batch_context->phys.translations, + *num_reverse_mappings, + clear_counter); + } + + return status; +} + +static NV_STATUS service_phys_notification(uvm_gpu_t *gpu, + uvm_access_counter_service_batch_context_t *batch_context, + const uvm_access_counter_buffer_entry_t *current_entry) +{ + NvU64 address; + NvU64 translation_index; + uvm_access_counter_buffer_info_t *access_counters = &gpu->parent->access_counter_buffer_info; + uvm_access_counter_type_t counter_type = current_entry->counter_type; + const uvm_gpu_access_counter_type_config_t *config = get_config_for_type(access_counters, counter_type); + unsigned long sub_granularity; + size_t total_reverse_mappings = 0; + uvm_gpu_t *resident_gpu = NULL; + NV_STATUS status = NV_OK; + bool clear_counter = false; + + address = current_entry->address.address; + UVM_ASSERT(address % config->translation_size == 0); + sub_granularity = current_entry->sub_granularity; + + if (config->rm.granularity == UVM_ACCESS_COUNTER_GRANULARITY_64K) + sub_granularity = 1; + + if (UVM_ID_IS_GPU(current_entry->physical_info.resident_id)) { + resident_gpu = uvm_gpu_get_by_processor_id(current_entry->physical_info.resident_id); + UVM_ASSERT(resident_gpu != NULL); + + if (gpu != resident_gpu && uvm_gpus_are_nvswitch_connected(gpu, resident_gpu)) { + UVM_ASSERT(address >= resident_gpu->parent->nvswitch_info.fabric_memory_window_start); + address -= resident_gpu->parent->nvswitch_info.fabric_memory_window_start; + } + + // On P9 systems, the CPU accesses the reserved heap on vidmem via + // coherent NVLINK mappings. This can trigger notifications that + // fall outside of the allocatable address range. We just drop + // them. + if (address >= resident_gpu->mem_info.max_allocatable_address) + return NV_OK; + } + + for (translation_index = 0; translation_index < config->translations_per_counter; ++translation_index) { + size_t num_reverse_mappings; + bool clear_counter_local = false; + status = service_phys_notification_translation(gpu, + resident_gpu, + batch_context, + config, + current_entry, + address, + sub_granularity, + &num_reverse_mappings, + &clear_counter_local); + total_reverse_mappings += num_reverse_mappings; + clear_counter = clear_counter || clear_counter_local; + + if (status != NV_OK) + break; + + address += config->translation_size; + sub_granularity = sub_granularity >> config->sub_granularity_regions_per_translation; + } + + // TODO: Bug 1990466: Here we already have virtual addresses and + // address spaces. Merge virtual and physical notification handling + + // Currently we only report events for our tests, not for tools + if (uvm_enable_builtin_tests) { + const bool on_managed = total_reverse_mappings != 0; + uvm_tools_broadcast_access_counter(gpu, current_entry, on_managed); + } + + if (status == NV_OK && clear_counter) + status = access_counter_clear_targeted(gpu, current_entry); + + return status; +} + +// TODO: Bug 2018899: Add statistics for dropped access counter notifications +static NV_STATUS service_phys_notifications(uvm_gpu_t *gpu, + uvm_access_counter_service_batch_context_t *batch_context) +{ + NvU32 i; + preprocess_phys_notifications(batch_context); + + for (i = 0; i < batch_context->phys.num_notifications; ++i) { + NV_STATUS status; + uvm_access_counter_buffer_entry_t *current_entry = batch_context->phys.notifications[i]; + + if (!UVM_ID_IS_VALID(current_entry->physical_info.resident_id)) + continue; + + status = service_phys_notification(gpu, batch_context, current_entry); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +void uvm_gpu_service_access_counters(uvm_gpu_t *gpu) +{ + NV_STATUS status = NV_OK; + uvm_access_counter_service_batch_context_t *batch_context = &gpu->parent->access_counter_buffer_info.batch_service_context; + + UVM_ASSERT(gpu->parent->access_counters_supported); + + if (gpu->parent->access_counter_buffer_info.notifications_ignored_count > 0) + return; + + while (1) { + batch_context->num_cached_notifications = fetch_access_counter_buffer_entries(gpu, + batch_context, + NOTIFICATION_FETCH_MODE_BATCH_READY); + if (batch_context->num_cached_notifications == 0) + break; + + ++batch_context->batch_id; + + status = service_virt_notifications(gpu, batch_context); + if (status != NV_OK) + break; + + status = service_phys_notifications(gpu, batch_context); + if (status != NV_OK) + break; + } + + if (status != NV_OK) { + UVM_DBG_PRINT("Error %s servicing access counter notifications on GPU: %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + } +} + +static const NvU32 g_uvm_access_counters_threshold_max = (1 << 15) - 1; + +static NV_STATUS access_counters_config_from_test_params(const UVM_TEST_RECONFIGURE_ACCESS_COUNTERS_PARAMS *params, + UvmGpuAccessCntrConfig *config) +{ + NvU64 tracking_size; + memset(config, 0, sizeof(*config)); + + if (params->threshold == 0 || params->threshold > g_uvm_access_counters_threshold_max) + return NV_ERR_INVALID_ARGUMENT; + + if (config_granularity_to_bytes(params->mimc_granularity, &tracking_size) != NV_OK) + return NV_ERR_INVALID_ARGUMENT; + + if (config_granularity_to_bytes(params->momc_granularity, &tracking_size) != NV_OK) + return NV_ERR_INVALID_ARGUMENT; + + // Since values for granularity/use limit are shared between tests and + // nv_uvm_types.h, the value will be checked in the call to + // nvUvmInterfaceEnableAccessCntr + config->mimcGranularity = params->mimc_granularity; + config->momcGranularity = params->momc_granularity; + + config->mimcUseLimit = params->mimc_use_limit; + config->momcUseLimit = params->momc_use_limit; + + config->threshold = params->threshold; + + return NV_OK; +} + +bool uvm_va_space_has_access_counter_migrations(uvm_va_space_t *va_space) +{ + va_space_access_counters_info_t *va_space_access_counters = va_space_access_counters_info_get(va_space); + + return atomic_read(&va_space_access_counters->params.enable_mimc_migrations); +} + +NV_STATUS uvm_perf_access_counters_init() +{ + uvm_perf_module_init("perf_access_counters", + UVM_PERF_MODULE_TYPE_ACCESS_COUNTERS, + g_callbacks_access_counters, + ARRAY_SIZE(g_callbacks_access_counters), + &g_module_access_counters); + + return NV_OK; +} + +void uvm_perf_access_counters_exit() +{ +} + +NV_STATUS uvm_perf_access_counters_load(uvm_va_space_t *va_space) +{ + va_space_access_counters_info_t *va_space_access_counters; + NV_STATUS status; + + status = uvm_perf_module_load(&g_module_access_counters, va_space); + if (status != NV_OK) + return status; + + va_space_access_counters = va_space_access_counters_info_create(va_space); + if (!va_space_access_counters) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +void uvm_perf_access_counters_unload(uvm_va_space_t *va_space) +{ + uvm_perf_module_unload(&g_module_access_counters, va_space); + + va_space_access_counters_info_destroy(va_space); +} + +NV_STATUS uvm_test_access_counters_enabled_by_default(UVM_TEST_ACCESS_COUNTERS_ENABLED_BY_DEFAULT_PARAMS *params, + struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_t *gpu = NULL; + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + params->enabled = uvm_gpu_access_counters_required(gpu->parent); + + uvm_gpu_release(gpu); + + return NV_OK; +} + +NV_STATUS uvm_test_reconfigure_access_counters(UVM_TEST_RECONFIGURE_ACCESS_COUNTERS_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu = NULL; + UvmGpuAccessCntrConfig config = {0}; + va_space_access_counters_info_t *va_space_access_counters; + uvm_va_space_t *va_space_reconfiguration_owner; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + status = access_counters_config_from_test_params(params, &config); + if (status != NV_OK) + return status; + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + if (!gpu->parent->access_counters_supported) { + status = NV_ERR_NOT_SUPPORTED; + goto exit_release_gpu; + } + + // ISR lock ensures that we own GET/PUT registers. It disables interrupts + // and ensures that no other thread (nor the top half) will be able to + // re-enable interrupts during reconfiguration. + uvm_gpu_access_counters_isr_lock(gpu->parent); + + uvm_va_space_down_read_rm(va_space); + + if (!uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)) { + status = NV_ERR_INVALID_STATE; + goto exit_isr_unlock; + } + + // Unregistration already started. Fail to avoid an interleaving in which + // access counters end up been enabled on an unregistered GPU: + // (thread 0) uvm_va_space_unregister_gpu disables access counters + // (thread 1) assuming no VA space lock is held yet by the unregistration, + // this function enables access counters and runs to completion, + // returning NV_OK + // (thread 0) uvm_va_space_unregister_gpu takes the VA space lock and + // completes the unregistration + if (uvm_processor_mask_test(&va_space->gpu_unregister_in_progress, gpu->id)) { + status = NV_ERR_INVALID_STATE; + goto exit_isr_unlock; + } + + va_space_access_counters = va_space_access_counters_info_get(va_space); + + va_space_reconfiguration_owner = gpu->parent->access_counter_buffer_info.reconfiguration_owner; + + // If any other VA space has reconfigured access counters on this GPU, + // return error to avoid overwriting its configuration. + if (va_space_reconfiguration_owner && (va_space_reconfiguration_owner != va_space)) { + status = NV_ERR_INVALID_STATE; + goto exit_isr_unlock; + } + + if (!uvm_processor_mask_test(&va_space->access_counters_enabled_processors, gpu->id)) { + status = gpu_access_counters_enable(gpu, &config); + + if (status == NV_OK) + uvm_processor_mask_set_atomic(&va_space->access_counters_enabled_processors, gpu->id); + else + goto exit_isr_unlock; + } + + UVM_ASSERT(gpu->parent->isr.access_counters.handling_ref_count > 0); + + // Disable counters, and renable with the new configuration. + // Note that we are yielding ownership even when the access counters are + // enabled in at least gpu. This inconsistent state is not visible to other + // threads or VA spaces because of the ISR lock, and it is immediately + // rectified by retaking ownership. + access_counters_yield_ownership(gpu); + status = access_counters_take_ownership(gpu, &config); + + // Retaking ownership failed, so RM owns the interrupt. + if (status != NV_OK) { + // The state of any other VA space with access counters enabled is + // corrupt + // TODO: Bug 2419290: Fail reconfiguration if access + // counters are enabled on a different VA space. + if (gpu->parent->isr.access_counters.handling_ref_count > 1) { + UVM_ASSERT_MSG(status == NV_OK, + "Access counters interrupt still owned by RM, other VA spaces may experience failures"); + } + + uvm_processor_mask_clear_atomic(&va_space->access_counters_enabled_processors, gpu->id); + gpu_access_counters_disable(gpu); + goto exit_isr_unlock; + } + + gpu->parent->access_counter_buffer_info.reconfiguration_owner = va_space; + + uvm_va_space_up_read_rm(va_space); + uvm_va_space_down_write(va_space); + atomic_set(&va_space_access_counters->params.enable_mimc_migrations, !!params->enable_mimc_migrations); + atomic_set(&va_space_access_counters->params.enable_momc_migrations, !!params->enable_momc_migrations); + uvm_va_space_up_write(va_space); + +exit_isr_unlock: + if (status != NV_OK) + uvm_va_space_up_read_rm(va_space); + + uvm_gpu_access_counters_isr_unlock(gpu->parent); + +exit_release_gpu: + uvm_gpu_release(gpu); + + return status; +} + +NV_STATUS uvm_test_reset_access_counters(UVM_TEST_RESET_ACCESS_COUNTERS_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu = NULL; + uvm_access_counter_buffer_info_t *access_counters; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + if (params->mode >= UVM_TEST_ACCESS_COUNTER_RESET_MODE_MAX) + return NV_ERR_INVALID_ARGUMENT; + + if (params->mode == UVM_TEST_ACCESS_COUNTER_RESET_MODE_TARGETED && + params->counter_type >= UVM_TEST_ACCESS_COUNTER_TYPE_MAX) { + return NV_ERR_INVALID_ARGUMENT; + } + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + if (!gpu->parent->access_counters_supported) { + status = NV_ERR_NOT_SUPPORTED; + goto exit_release_gpu; + } + + uvm_gpu_access_counters_isr_lock(gpu->parent); + + // Access counters not enabled. Nothing to reset + if (gpu->parent->isr.access_counters.handling_ref_count == 0) + goto exit_isr_unlock; + + access_counters = &gpu->parent->access_counter_buffer_info; + + if (params->mode == UVM_TEST_ACCESS_COUNTER_RESET_MODE_ALL) { + status = access_counter_clear_all(gpu); + } + else { + uvm_access_counter_buffer_entry_t entry = { 0 }; + + if (params->counter_type == UVM_TEST_ACCESS_COUNTER_TYPE_MIMC) + entry.counter_type = UVM_ACCESS_COUNTER_TYPE_MIMC; + else + entry.counter_type = UVM_ACCESS_COUNTER_TYPE_MOMC; + + entry.bank = params->bank; + entry.tag = params->tag; + + status = access_counter_clear_targeted(gpu, &entry); + } + + if (status == NV_OK) + status = uvm_tracker_wait(&access_counters->clear_tracker); + +exit_isr_unlock: + uvm_gpu_access_counters_isr_unlock(gpu->parent); + +exit_release_gpu: + uvm_gpu_release(gpu); + + return status; +} + +void uvm_gpu_access_counters_set_ignore(uvm_gpu_t *gpu, bool do_ignore) +{ + bool change_intr_state = false; + + if (!gpu->parent->access_counters_supported) + return; + + uvm_gpu_access_counters_isr_lock(gpu->parent); + + if (do_ignore) { + if (gpu->parent->access_counter_buffer_info.notifications_ignored_count++ == 0) + change_intr_state = true; + } + else { + UVM_ASSERT(gpu->parent->access_counter_buffer_info.notifications_ignored_count >= 1); + if (--gpu->parent->access_counter_buffer_info.notifications_ignored_count == 0) + change_intr_state = true; + } + + if (change_intr_state) { + // We need to avoid an interrupt storm while ignoring notifications. We + // just disable the interrupt. + uvm_spin_lock_irqsave(&gpu->parent->isr.interrupts_lock); + + if (do_ignore) + uvm_gpu_access_counters_intr_disable(gpu->parent); + else + uvm_gpu_access_counters_intr_enable(gpu->parent); + + uvm_spin_unlock_irqrestore(&gpu->parent->isr.interrupts_lock); + + if (!do_ignore) + access_counter_buffer_flush_locked(gpu, UVM_GPU_BUFFER_FLUSH_MODE_CACHED_PUT); + } + + uvm_gpu_access_counters_isr_unlock(gpu->parent); +} + +NV_STATUS uvm_test_set_ignore_access_counters(UVM_TEST_SET_IGNORE_ACCESS_COUNTERS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu = NULL; + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + if (gpu->parent->access_counters_supported) + uvm_gpu_access_counters_set_ignore(gpu, params->ignore); + else + status = NV_ERR_NOT_SUPPORTED; + + uvm_gpu_release(gpu); + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_gpu_access_counters.h b/kernel-open/nvidia-uvm/uvm_gpu_access_counters.h new file mode 100644 index 000000000..09d318b77 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_access_counters.h @@ -0,0 +1,88 @@ +/******************************************************************************* + Copyright (c) 2017 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef __UVM_GPU_ACCESS_COUNTERS_H__ +#define __UVM_GPU_ACCESS_COUNTERS_H__ + +#include "uvm_common.h" +#include "uvm_forward_decl.h" +#include "uvm_test_ioctl.h" + +NV_STATUS uvm_gpu_init_access_counters(uvm_parent_gpu_t *parent_gpu); +void uvm_gpu_deinit_access_counters(uvm_parent_gpu_t *parent_gpu); +bool uvm_gpu_access_counters_pending(uvm_parent_gpu_t *parent_gpu); + +void uvm_gpu_service_access_counters(uvm_gpu_t *gpu); + +void uvm_gpu_access_counter_buffer_flush(uvm_gpu_t *gpu); + +// Ignore or unignore access counters notifications. Ignoring means that the +// bottom half is a no-op which just leaves notifications in the HW buffer +// without being serviced and without inspecting any SW state. +// +// To avoid interrupt storms, access counter interrupts will be disabled while +// ignored. Access counter bottom halves may still be scheduled in the top half +// when other interrupts arrive and the top half sees that there are also +// pending access counter notifications. +// +// When uningoring, the interrupt conditions will be re-evaluated to trigger +// processing of buffered notifications, if any exist. +void uvm_gpu_access_counters_set_ignore(uvm_gpu_t *gpu, bool do_ignore); + +// Return whether the VA space has access counter migrations enabled. The +// caller must ensure that the VA space cannot go away. +bool uvm_va_space_has_access_counter_migrations(uvm_va_space_t *va_space); + +// Global perf initialization/cleanup functions +NV_STATUS uvm_perf_access_counters_init(void); +void uvm_perf_access_counters_exit(void); + +// VA space Initialization/cleanup functions. See comments in +// uvm_perf_heuristics.h +NV_STATUS uvm_perf_access_counters_load(uvm_va_space_t *va_space); +void uvm_perf_access_counters_unload(uvm_va_space_t *va_space); + +// Check whether access counters should be enabled when the given GPU is +// registered on any VA space. +bool uvm_gpu_access_counters_required(const uvm_parent_gpu_t *parent_gpu); + +// Functions used to enable/disable access counters on a GPU in the given VA +// space. +// +// A per-GPU reference counter tracks the number of VA spaces in which access +// counters are currently enabled. The hardware notifications and interrupts on +// the GPU are enabled the first time any VA space invokes +// uvm_gpu_access_counters_enable, and disabled when the last VA space invokes +// uvm_gpu_access_counters_disable +// +// Locking: the VA space lock must not be held by the caller since these +// functions may take the access counters ISR lock. +NV_STATUS uvm_gpu_access_counters_enable(uvm_gpu_t *gpu, uvm_va_space_t *va_space); +void uvm_gpu_access_counters_disable(uvm_gpu_t *gpu, uvm_va_space_t *va_space); + +NV_STATUS uvm_test_access_counters_enabled_by_default(UVM_TEST_ACCESS_COUNTERS_ENABLED_BY_DEFAULT_PARAMS *params, + struct file *filp); +NV_STATUS uvm_test_reconfigure_access_counters(UVM_TEST_RECONFIGURE_ACCESS_COUNTERS_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_reset_access_counters(UVM_TEST_RESET_ACCESS_COUNTERS_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_set_ignore_access_counters(UVM_TEST_SET_IGNORE_ACCESS_COUNTERS_PARAMS *params, struct file *filp); + +#endif // __UVM_GPU_ACCESS_COUNTERS_H__ diff --git a/kernel-open/nvidia-uvm/uvm_gpu_isr.c b/kernel-open/nvidia-uvm/uvm_gpu_isr.c new file mode 100644 index 000000000..2a5cdac82 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_isr.c @@ -0,0 +1,774 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_api.h" +#include "uvm_global.h" +#include "uvm_gpu_isr.h" +#include "uvm_hal.h" +#include "uvm_gpu.h" +#include "uvm_gpu_access_counters.h" +#include "uvm_gpu_non_replayable_faults.h" +#include "uvm_thread_context.h" + +// Level-based vs pulse-based interrupts +// ===================================== +// Turing switches to pulse-based interrupts for replayable/non-replayable +// faults and access counter notifications. Prior GPUs use level-based +// interrupts. +// +// Level-based interrupts are rearmed automatically as long as the interrupt +// condition is set. Pulse-based interrupts, on the other hand, are +// re-triggered by clearing their interrupt line and forcing the interrupt +// condition to be re-evaluated. However, RM re-triggers all top-level +// interrupts when exiting its top half. Thus, both level-based and pulse-based +// interrupts need to be disabled at interrupt handling boundaries, in order to +// avoid interrupt storms. +// +// Moreover, in order to make sure that pulse-based interrupts are not missed, +// we need to clear the interrupt bit and force a interrupt condition +// re-evaluation after interrupts are re-enabled. In the case of replayable +// faults and access counter notifications the interrupt condition is +// re-evaluated by writing to GET. Non-replayable faults work the same way, but +// they are currently owned by RM, so UVM doesn't have to do anything. + +// For use by the nv_kthread_q that is servicing the replayable fault bottom +// half, only. +static void replayable_faults_isr_bottom_half_entry(void *args); + +// For use by the nv_kthread_q that is servicing the replayable fault bottom +// half, only. +static void non_replayable_faults_isr_bottom_half_entry(void *args); + +// For use by the nv_kthread_q that is servicing the replayable fault bottom +// half, only. +static void access_counters_isr_bottom_half_entry(void *args); + +// Increments the reference count tracking whether replayable page fault +// interrupts should be disabled. The caller is guaranteed that replayable page +// faults are disabled upon return. Interrupts might already be disabled prior +// to making this call. Each call is ref-counted, so this must be paired with a +// call to uvm_gpu_replayable_faults_intr_enable(). +// +// parent_gpu->isr.interrupts_lock must be held to call this function. +static void uvm_gpu_replayable_faults_intr_disable(uvm_parent_gpu_t *parent_gpu); + +// Decrements the reference count tracking whether replayable page fault +// interrupts should be disabled. Only once the count reaches 0 are the HW +// interrupts actually enabled, so this call does not guarantee that the +// interrupts have been re-enabled upon return. +// +// uvm_gpu_replayable_faults_intr_disable() must have been called prior to +// calling this function. +// +// parent_gpu->isr.interrupts_lock must be held to call this function. +static void uvm_gpu_replayable_faults_intr_enable(uvm_parent_gpu_t *parent_gpu); + +static unsigned schedule_replayable_faults_handler(uvm_parent_gpu_t *parent_gpu) +{ + // handling gets set to false for all handlers during removal, so quit if + // the GPU is in the process of being removed. + if (parent_gpu->isr.replayable_faults.handling) { + + // Use raw call instead of UVM helper. Ownership will be recorded in the + // bottom half. See comment replayable_faults_isr_bottom_half(). + if (down_trylock(&parent_gpu->isr.replayable_faults.service_lock.sem) == 0) { + if (uvm_gpu_replayable_faults_pending(parent_gpu)) { + nv_kref_get(&parent_gpu->gpu_kref); + + // Interrupts need to be disabled here to avoid an interrupt + // storm + uvm_gpu_replayable_faults_intr_disable(parent_gpu); + + // Schedule a bottom half, but do *not* release the GPU ISR + // lock. The bottom half releases the GPU ISR lock as part of + // its cleanup. + nv_kthread_q_schedule_q_item(&parent_gpu->isr.bottom_half_q, + &parent_gpu->isr.replayable_faults.bottom_half_q_item); + return 1; + } + else { + up(&parent_gpu->isr.replayable_faults.service_lock.sem); + } + } + } + + return 0; +} + +static unsigned schedule_non_replayable_faults_handler(uvm_parent_gpu_t *parent_gpu) +{ + // handling gets set to false for all handlers during removal, so quit if + // the GPU is in the process of being removed. + if (parent_gpu->isr.non_replayable_faults.handling) { + // Non-replayable_faults are stored in a synchronized circular queue + // shared by RM/UVM. Therefore, we can query the number of pending + // faults. This type of faults are not replayed and since RM advances + // GET to PUT when copying the fault packets to the queue, no further + // interrupts will be triggered by the gpu and faults may stay + // unserviced. Therefore, if there is a fault in the queue, we schedule + // a bottom half unconditionally. + if (uvm_gpu_non_replayable_faults_pending(parent_gpu)) { + bool scheduled; + nv_kref_get(&parent_gpu->gpu_kref); + + scheduled = nv_kthread_q_schedule_q_item(&parent_gpu->isr.bottom_half_q, + &parent_gpu->isr.non_replayable_faults.bottom_half_q_item) != 0; + + // If the q_item did not get scheduled because it was already + // queued, that instance will handle the pending faults. Just + // drop the GPU kref. + if (!scheduled) + uvm_parent_gpu_kref_put(parent_gpu); + + return 1; + } + } + + return 0; +} + +static unsigned schedule_access_counters_handler(uvm_parent_gpu_t *parent_gpu) +{ + uvm_assert_spinlock_locked(&parent_gpu->isr.interrupts_lock); + + if (!parent_gpu->isr.access_counters.handling_ref_count) + return 0; + + if (down_trylock(&parent_gpu->isr.access_counters.service_lock.sem)) + return 0; + + if (!uvm_gpu_access_counters_pending(parent_gpu)) { + up(&parent_gpu->isr.access_counters.service_lock.sem); + return 0; + } + + nv_kref_get(&parent_gpu->gpu_kref); + + // Interrupts need to be disabled to avoid an interrupt storm + uvm_gpu_access_counters_intr_disable(parent_gpu); + + nv_kthread_q_schedule_q_item(&parent_gpu->isr.bottom_half_q, + &parent_gpu->isr.access_counters.bottom_half_q_item); + + return 1; +} + +// This is called from RM's top-half ISR (see: the nvidia_isr() function), and UVM is given a +// chance to handle the interrupt, before most of the RM processing. UVM communicates what it +// did, back to RM, via the return code: +// +// NV_OK: +// UVM handled an interrupt. +// +// NV_WARN_MORE_PROCESSING_REQUIRED: +// UVM did not schedule a bottom half, because it was unable to get the locks it +// needed, but there is still UVM work to be done. RM will return "not handled" to the +// Linux kernel, *unless* RM handled other faults in its top half. In that case, the +// fact that UVM did not handle its interrupt is lost. However, life and interrupt +// processing continues anyway: the GPU will soon raise another interrupt, because +// that's what it does when there are replayable page faults remaining (GET != PUT in +// the fault buffer). +// +// NV_ERR_NO_INTR_PENDING: +// UVM did not find any work to do. Currently this is handled in RM in exactly the same +// way as NV_WARN_MORE_PROCESSING_REQUIRED is handled. However, the extra precision is +// available for the future. RM's interrupt handling tends to evolve as new chips and +// new interrupts get created. + +static NV_STATUS uvm_isr_top_half(const NvProcessorUuid *gpu_uuid) +{ + uvm_parent_gpu_t *parent_gpu; + unsigned num_handlers_scheduled = 0; + NV_STATUS status; + + if (!in_interrupt() && in_atomic()) { + // Early-out if we're not in interrupt context, but memory allocations + // require GFP_ATOMIC. This happens with CONFIG_DEBUG_SHIRQ enabled, + // where the interrupt handler is called as part of its removal to make + // sure it's prepared for being called even when it's being freed. + // This breaks the assumption that the UVM driver is called in atomic + // context only in the interrupt context, which the thread context + // management relies on. + return NV_OK; + } + + if (!gpu_uuid) { + // This can happen early in the main GPU driver initialization, because + // that involves testing interrupts before the GPU is fully set up. + return NV_ERR_NO_INTR_PENDING; + } + + uvm_spin_lock_irqsave(&g_uvm_global.gpu_table_lock); + + parent_gpu = uvm_parent_gpu_get_by_uuid_locked(gpu_uuid); + + if (parent_gpu == NULL) { + uvm_spin_unlock_irqrestore(&g_uvm_global.gpu_table_lock); + return NV_ERR_NO_INTR_PENDING; + } + + // We take a reference during the top half, and an additional reference for + // each scheduled bottom. References are dropped at the end of the bottom + // halves. + nv_kref_get(&parent_gpu->gpu_kref); + uvm_spin_unlock_irqrestore(&g_uvm_global.gpu_table_lock); + + // Now that we got a GPU object, lock it so that it can't be removed without us noticing. + uvm_spin_lock_irqsave(&parent_gpu->isr.interrupts_lock); + + ++parent_gpu->isr.interrupt_count; + + if (parent_gpu->isr.is_suspended) { + status = NV_ERR_NO_INTR_PENDING; + } + else { + num_handlers_scheduled += schedule_replayable_faults_handler(parent_gpu); + num_handlers_scheduled += schedule_non_replayable_faults_handler(parent_gpu); + num_handlers_scheduled += schedule_access_counters_handler(parent_gpu); + + if (num_handlers_scheduled == 0) + status = NV_WARN_MORE_PROCESSING_REQUIRED; + else + status = NV_OK; + } + + uvm_spin_unlock_irqrestore(&parent_gpu->isr.interrupts_lock); + + uvm_parent_gpu_kref_put(parent_gpu); + + return status; +} + +NV_STATUS uvm_isr_top_half_entry(const NvProcessorUuid *gpu_uuid) +{ + UVM_ENTRY_RET(uvm_isr_top_half(gpu_uuid)); +} + +static NV_STATUS init_queue_on_node(nv_kthread_q_t *queue, const char *name, int node) +{ +#if UVM_THREAD_AFFINITY_SUPPORTED() + if (node != -1 && !cpumask_empty(uvm_cpumask_of_node(node))) { + NV_STATUS status; + + status = errno_to_nv_status(nv_kthread_q_init_on_node(queue, name, node)); + if (status != NV_OK) + return status; + + return errno_to_nv_status(set_cpus_allowed_ptr(queue->q_kthread, uvm_cpumask_of_node(node))); + } +#endif + + return errno_to_nv_status(nv_kthread_q_init(queue, name)); +} + +NV_STATUS uvm_gpu_init_isr(uvm_parent_gpu_t *parent_gpu) +{ + NV_STATUS status = NV_OK; + char kthread_name[TASK_COMM_LEN + 1]; + + if (parent_gpu->replayable_faults_supported) { + status = uvm_gpu_fault_buffer_init(parent_gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to initialize GPU fault buffer: %s, GPU: %s\n", + nvstatusToString(status), + parent_gpu->name); + return status; + } + + nv_kthread_q_item_init(&parent_gpu->isr.replayable_faults.bottom_half_q_item, + replayable_faults_isr_bottom_half_entry, + parent_gpu); + + parent_gpu->isr.replayable_faults.stats.cpu_exec_count = + uvm_kvmalloc_zero(sizeof(*parent_gpu->isr.replayable_faults.stats.cpu_exec_count) * num_possible_cpus()); + if (!parent_gpu->isr.replayable_faults.stats.cpu_exec_count) + return NV_ERR_NO_MEMORY; + + parent_gpu->isr.replayable_faults.handling = true; + + snprintf(kthread_name, sizeof(kthread_name), "UVM GPU%u BH", uvm_id_value(parent_gpu->id)); + status = init_queue_on_node(&parent_gpu->isr.bottom_half_q, kthread_name, parent_gpu->closest_cpu_numa_node); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed in nv_kthread_q_init for bottom_half_q: %s, GPU %s\n", + nvstatusToString(status), + parent_gpu->name); + return status; + } + + if (parent_gpu->non_replayable_faults_supported) { + nv_kthread_q_item_init(&parent_gpu->isr.non_replayable_faults.bottom_half_q_item, + non_replayable_faults_isr_bottom_half_entry, + parent_gpu); + + parent_gpu->isr.non_replayable_faults.stats.cpu_exec_count = + uvm_kvmalloc_zero(sizeof(*parent_gpu->isr.non_replayable_faults.stats.cpu_exec_count) * + num_possible_cpus()); + if (!parent_gpu->isr.non_replayable_faults.stats.cpu_exec_count) + return NV_ERR_NO_MEMORY; + + parent_gpu->isr.non_replayable_faults.handling = true; + + snprintf(kthread_name, sizeof(kthread_name), "UVM GPU%u KC", uvm_id_value(parent_gpu->id)); + status = init_queue_on_node(&parent_gpu->isr.kill_channel_q, + kthread_name, + parent_gpu->closest_cpu_numa_node); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed in nv_kthread_q_init for kill_channel_q: %s, GPU %s\n", + nvstatusToString(status), + parent_gpu->name); + return status; + } + } + + if (parent_gpu->access_counters_supported) { + status = uvm_gpu_init_access_counters(parent_gpu); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to initialize GPU access counters: %s, GPU: %s\n", + nvstatusToString(status), + parent_gpu->name); + return status; + } + + nv_kthread_q_item_init(&parent_gpu->isr.access_counters.bottom_half_q_item, + access_counters_isr_bottom_half_entry, + parent_gpu); + + // Access counters interrupts are initially disabled. They are + // dynamically enabled when the GPU is registered on a VA space. + parent_gpu->isr.access_counters.handling_ref_count = 0; + parent_gpu->isr.access_counters.stats.cpu_exec_count = + uvm_kvmalloc_zero(sizeof(*parent_gpu->isr.access_counters.stats.cpu_exec_count) * num_possible_cpus()); + if (!parent_gpu->isr.access_counters.stats.cpu_exec_count) + return NV_ERR_NO_MEMORY; + } + } + + return NV_OK; +} + +void uvm_gpu_flush_bottom_halves(uvm_parent_gpu_t *parent_gpu) +{ + nv_kthread_q_flush(&parent_gpu->isr.bottom_half_q); + nv_kthread_q_flush(&parent_gpu->isr.kill_channel_q); +} + +void uvm_gpu_disable_isr(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT(parent_gpu->isr.access_counters.handling_ref_count == 0); + + // Now that the GPU is safely out of the global table, lock the GPU and mark + // it as no longer handling interrupts so the top half knows not to schedule + // any more bottom halves. + uvm_spin_lock_irqsave(&parent_gpu->isr.interrupts_lock); + + uvm_gpu_replayable_faults_intr_disable(parent_gpu); + + parent_gpu->isr.replayable_faults.was_handling = parent_gpu->isr.replayable_faults.handling; + parent_gpu->isr.non_replayable_faults.was_handling = parent_gpu->isr.non_replayable_faults.handling; + + parent_gpu->isr.replayable_faults.handling = false; + parent_gpu->isr.non_replayable_faults.handling = false; + + uvm_spin_unlock_irqrestore(&parent_gpu->isr.interrupts_lock); + + // Flush all bottom half ISR work items and stop the nv_kthread_q that is + // servicing this GPU's bottom halves. Note that this requires that the + // bottom half never take the global lock, since we're holding it here. + // + // Note that it's safe to call nv_kthread_q_stop() even if + // nv_kthread_q_init() failed in uvm_gpu_init_isr(). + nv_kthread_q_stop(&parent_gpu->isr.bottom_half_q); + nv_kthread_q_stop(&parent_gpu->isr.kill_channel_q); +} + +void uvm_gpu_deinit_isr(uvm_parent_gpu_t *parent_gpu) +{ + // Return ownership to RM: + if (parent_gpu->isr.replayable_faults.was_handling) { + // No user threads could have anything left on + // replayable_faults.disable_intr_ref_count since they must retain the + // GPU across uvm_gpu_replayable_faults_isr_lock/ + // uvm_gpu_replayable_faults_isr_unlock. This means the + // uvm_gpu_replayable_faults_disable_intr above could only have raced + // with bottom halves. + // + // If we cleared replayable_faults.handling before the bottom half got + // to its uvm_gpu_replayable_faults_isr_unlock, when it eventually + // reached uvm_gpu_replayable_faults_isr_unlock it would have skipped + // the disable, leaving us with extra ref counts here. + // + // In any case we're guaranteed that replayable faults interrupts are + // disabled and can't get re-enabled, so we can safely ignore the ref + // count value and just clean things up. + UVM_ASSERT_MSG(parent_gpu->isr.replayable_faults.disable_intr_ref_count > 0, + "%s replayable_faults.disable_intr_ref_count: %llu\n", + parent_gpu->name, + parent_gpu->isr.replayable_faults.disable_intr_ref_count); + + uvm_gpu_fault_buffer_deinit(parent_gpu); + } + + if (parent_gpu->access_counters_supported) { + // It is safe to deinitialize access counters even if they have not been + // successfully initialized. + uvm_gpu_deinit_access_counters(parent_gpu); + } + + uvm_kvfree(parent_gpu->isr.replayable_faults.stats.cpu_exec_count); + uvm_kvfree(parent_gpu->isr.non_replayable_faults.stats.cpu_exec_count); + uvm_kvfree(parent_gpu->isr.access_counters.stats.cpu_exec_count); +} + +static uvm_gpu_t *find_first_valid_gpu(uvm_parent_gpu_t *parent_gpu) +{ + uvm_global_gpu_id_t global_gpu_id = uvm_global_gpu_id_from_gpu_id(parent_gpu->id); + uvm_gpu_t *gpu; + + // When SMC is enabled, there's no longer a 1:1 relationship between the + // parent and the partitions. But because all relevant interrupt paths + // are shared, as is the fault reporting logic, it's sufficient here + // to proceed with any valid uvm_gpu_t, even if the corresponding partition + // didn't cause all, or even any of the interrupts. + // The bottom half handlers will later find the appropriate partitions by + // attributing the notifications to VA spaces as necessary. + if (parent_gpu->smc.enabled) { + NvU32 sub_processor_index; + + uvm_spin_lock_irqsave(&g_uvm_global.gpu_table_lock); + + sub_processor_index = find_first_bit(parent_gpu->valid_gpus, UVM_ID_MAX_SUB_PROCESSORS); + + if (sub_processor_index < UVM_ID_MAX_SUB_PROCESSORS) { + gpu = uvm_gpu_get(uvm_global_id_from_value(uvm_global_id_value(global_gpu_id) + sub_processor_index)); + UVM_ASSERT(gpu != NULL); + } + else { + gpu = NULL; + } + + uvm_spin_unlock_irqrestore(&g_uvm_global.gpu_table_lock); + } + else { + gpu = uvm_gpu_get(global_gpu_id); + UVM_ASSERT(gpu != NULL); + } + + return gpu; +} + +static void replayable_faults_isr_bottom_half(void *args) +{ + uvm_parent_gpu_t *parent_gpu = (uvm_parent_gpu_t *)args; + uvm_gpu_t *gpu; + unsigned int cpu; + + gpu = find_first_valid_gpu(parent_gpu); + if (gpu == NULL) + goto put_kref; + + UVM_ASSERT(parent_gpu->replayable_faults_supported); + + // Record the lock ownership + // The service_lock semaphore is taken in the top half using a raw + // semaphore call (down_trylock()). Here, the lock "ownership" is recorded, + // using a direct call to uvm_record_lock(). The pair of the two raw calls + // result in an ownership "transfer" between the top and bottom halves. + // Due to this ownership transfer, other usages of the service_lock can + // use the UVM (un)lock helpers to handle lock ownership and record keeping. + uvm_record_lock(&parent_gpu->isr.replayable_faults.service_lock, UVM_LOCK_FLAGS_MODE_SHARED); + + // Multiple bottom halves for replayable faults can be running + // concurrently, but only one can be running this function for a given GPU + // since we enter with the replayable_faults.service_lock held. + cpu = get_cpu(); + ++parent_gpu->isr.replayable_faults.stats.bottom_half_count; + cpumask_set_cpu(cpu, &parent_gpu->isr.replayable_faults.stats.cpus_used_mask); + ++parent_gpu->isr.replayable_faults.stats.cpu_exec_count[cpu]; + put_cpu(); + + uvm_gpu_service_replayable_faults(gpu); + + uvm_gpu_replayable_faults_isr_unlock(parent_gpu); + +put_kref: + uvm_parent_gpu_kref_put(parent_gpu); +} + +static void replayable_faults_isr_bottom_half_entry(void *args) +{ + UVM_ENTRY_VOID(replayable_faults_isr_bottom_half(args)); +} + +static void non_replayable_faults_isr_bottom_half(void *args) +{ + uvm_parent_gpu_t *parent_gpu = (uvm_parent_gpu_t *)args; + uvm_gpu_t *gpu; + unsigned int cpu; + + gpu = find_first_valid_gpu(parent_gpu); + if (gpu == NULL) + goto put_kref; + + UVM_ASSERT(parent_gpu->non_replayable_faults_supported); + + uvm_gpu_non_replayable_faults_isr_lock(parent_gpu); + + // Multiple bottom halves for non-replayable faults can be running + // concurrently, but only one can enter this section for a given GPU + // since we acquired the non_replayable_faults.service_lock + cpu = get_cpu(); + ++parent_gpu->isr.non_replayable_faults.stats.bottom_half_count; + cpumask_set_cpu(cpu, &parent_gpu->isr.non_replayable_faults.stats.cpus_used_mask); + ++parent_gpu->isr.non_replayable_faults.stats.cpu_exec_count[cpu]; + put_cpu(); + + uvm_gpu_service_non_replayable_fault_buffer(gpu); + + uvm_gpu_non_replayable_faults_isr_unlock(parent_gpu); + +put_kref: + uvm_parent_gpu_kref_put(parent_gpu); +} + +static void non_replayable_faults_isr_bottom_half_entry(void *args) +{ + UVM_ENTRY_VOID(non_replayable_faults_isr_bottom_half(args)); +} + +static void access_counters_isr_bottom_half(void *args) +{ + uvm_parent_gpu_t *parent_gpu = (uvm_parent_gpu_t *)args; + uvm_gpu_t *gpu; + unsigned int cpu; + + gpu = find_first_valid_gpu(parent_gpu); + if (gpu == NULL) + goto put_kref; + + UVM_ASSERT(parent_gpu->access_counters_supported); + + uvm_record_lock(&parent_gpu->isr.access_counters.service_lock, UVM_LOCK_FLAGS_MODE_SHARED); + + // Multiple bottom halves for counter notifications can be running + // concurrently, but only one can be running this function for a given GPU + // since we enter with the access_counters_isr_lock held. + cpu = get_cpu(); + ++parent_gpu->isr.access_counters.stats.bottom_half_count; + cpumask_set_cpu(cpu, &parent_gpu->isr.access_counters.stats.cpus_used_mask); + ++parent_gpu->isr.access_counters.stats.cpu_exec_count[cpu]; + put_cpu(); + + uvm_gpu_service_access_counters(gpu); + + uvm_gpu_access_counters_isr_unlock(parent_gpu); + +put_kref: + uvm_parent_gpu_kref_put(parent_gpu); +} + +static void access_counters_isr_bottom_half_entry(void *args) +{ + UVM_ENTRY_VOID(access_counters_isr_bottom_half(args)); +} + +void uvm_gpu_replayable_faults_isr_lock(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT(nv_kref_read(&parent_gpu->gpu_kref) > 0); + + uvm_spin_lock_irqsave(&parent_gpu->isr.interrupts_lock); + + // Bump the disable ref count. This guarantees that the bottom half or + // another thread trying to take the replayable_faults.service_lock won't + // inadvertently re-enable interrupts during this locking sequence. + uvm_gpu_replayable_faults_intr_disable(parent_gpu); + + uvm_spin_unlock_irqrestore(&parent_gpu->isr.interrupts_lock); + + // Now that we know replayable fault interrupts can't get enabled, take the + // lock. + uvm_down(&parent_gpu->isr.replayable_faults.service_lock); +} + +void uvm_gpu_replayable_faults_isr_unlock(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT(nv_kref_read(&parent_gpu->gpu_kref) > 0); + + uvm_spin_lock_irqsave(&parent_gpu->isr.interrupts_lock); + + // The following sequence is delicate: + // + // 1) Enable replayable page fault interrupts + // 2) Rearm pulse based interrupts + // 3) Unlock GPU isr.replayable_faults.service_lock (mutex) + // 4) Unlock isr.interrupts_lock (spin lock) + // + // ...because the moment that page fault interrupts are reenabled, a top + // half might start receiving them. A top-half cannot run on the core + // executing this code as interrupts are disabled as long as the + // interrupts_lock is held. If it runs on a different core, it's going to + // spin waiting for the interrupts_lock to be released by this core before + // attempting to acquire the service_lock mutex. Hence there is no risk of + // the top-half missing interrupts after they are reenabled, but before the + // service_lock mutex is released. + + if (parent_gpu->isr.replayable_faults.handling) { + // Turn page fault interrupts back on, unless remove_gpu() has already removed this GPU + // from the GPU table. remove_gpu() indicates that situation by setting + // gpu->replayable_faults.handling to false. + // + // This path can only be taken from the bottom half. User threads + // calling this function must have previously retained the GPU, so they + // can't race with remove_gpu. + // + // TODO: Bug 1766600: Assert that we're in a bottom half thread, once + // that's tracked by the lock assertion code. + // + // Note that if we're in the bottom half and the GPU was removed before + // we checked replayable_faults.handling, we won't drop our interrupt + // disable ref count from the corresponding top-half call to + // uvm_gpu_replayable_faults_intr_disable. That's ok because remove_gpu + // ignores the refcount after waiting for the bottom half to finish. + uvm_gpu_replayable_faults_intr_enable(parent_gpu); + + // Rearm pulse interrupts. This guarantees that the state of the pending + // interrupt is current and the top level rearm performed by RM is only + // going to trigger it if necessary. This avoids both of the possible + // bad cases: + // 1) GET != PUT but interrupt state is not pending + // This could lead to the interrupt being lost. + // 2) GET == PUT but interrupt state is pending + // This could lead to an interrupt storm as the top-half would see + // no work to be done, but the interrupt would get constantly + // retriggered by RM's top level rearm. + // clear_replayable_faults is a no-op for architectures that don't + // support pulse-based interrupts. + parent_gpu->fault_buffer_hal->clear_replayable_faults(parent_gpu, + parent_gpu->fault_buffer_info.replayable.cached_get); + } + + // This unlock call has to be out-of-order unlock due to interrupts_lock + // still being held. Otherwise, it would result in a lock order violation. + uvm_up_out_of_order(&parent_gpu->isr.replayable_faults.service_lock); + + uvm_spin_unlock_irqrestore(&parent_gpu->isr.interrupts_lock); +} + +void uvm_gpu_non_replayable_faults_isr_lock(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT(nv_kref_read(&parent_gpu->gpu_kref) > 0); + + uvm_down(&parent_gpu->isr.non_replayable_faults.service_lock); +} + +void uvm_gpu_non_replayable_faults_isr_unlock(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT(nv_kref_read(&parent_gpu->gpu_kref) > 0); + + uvm_up(&parent_gpu->isr.non_replayable_faults.service_lock); +} + +void uvm_gpu_access_counters_isr_lock(uvm_parent_gpu_t *parent_gpu) +{ + // See comments in uvm_gpu_replayable_faults_isr_lock + + uvm_spin_lock_irqsave(&parent_gpu->isr.interrupts_lock); + + uvm_gpu_access_counters_intr_disable(parent_gpu); + + uvm_spin_unlock_irqrestore(&parent_gpu->isr.interrupts_lock); + + uvm_down(&parent_gpu->isr.access_counters.service_lock); +} + +void uvm_gpu_access_counters_isr_unlock(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT(nv_kref_read(&parent_gpu->gpu_kref) > 0); + + // See comments in uvm_gpu_replayable_faults_isr_unlock + + uvm_spin_lock_irqsave(&parent_gpu->isr.interrupts_lock); + + uvm_gpu_access_counters_intr_enable(parent_gpu); + + if (parent_gpu->isr.access_counters.handling_ref_count > 0) { + parent_gpu->access_counter_buffer_hal->clear_access_counter_notifications(parent_gpu, + parent_gpu->access_counter_buffer_info.cached_get); + } + + // This unlock call has to be out-of-order unlock due to interrupts_lock + // still being held. Otherwise, it would result in a lock order violation. + uvm_up_out_of_order(&parent_gpu->isr.access_counters.service_lock); + + uvm_spin_unlock_irqrestore(&parent_gpu->isr.interrupts_lock); +} + +static void uvm_gpu_replayable_faults_intr_disable(uvm_parent_gpu_t *parent_gpu) +{ + uvm_assert_spinlock_locked(&parent_gpu->isr.interrupts_lock); + + if (parent_gpu->isr.replayable_faults.handling && parent_gpu->isr.replayable_faults.disable_intr_ref_count == 0) + parent_gpu->fault_buffer_hal->disable_replayable_faults(parent_gpu); + + ++parent_gpu->isr.replayable_faults.disable_intr_ref_count; +} + +static void uvm_gpu_replayable_faults_intr_enable(uvm_parent_gpu_t *parent_gpu) +{ + uvm_assert_spinlock_locked(&parent_gpu->isr.interrupts_lock); + UVM_ASSERT(parent_gpu->isr.replayable_faults.disable_intr_ref_count > 0); + + --parent_gpu->isr.replayable_faults.disable_intr_ref_count; + if (parent_gpu->isr.replayable_faults.handling && parent_gpu->isr.replayable_faults.disable_intr_ref_count == 0) + parent_gpu->fault_buffer_hal->enable_replayable_faults(parent_gpu); +} + +void uvm_gpu_access_counters_intr_disable(uvm_parent_gpu_t *parent_gpu) +{ + uvm_assert_spinlock_locked(&parent_gpu->isr.interrupts_lock); + + // The read of handling_ref_count could race with a write from + // gpu_access_counters_enable/disable, since here we may not hold the + // ISR lock. But those functions are invoked with the interrupt disabled + // (disable_intr_ref_count > 0), so the check always returns false when the + // race occurs + if (parent_gpu->isr.access_counters.handling_ref_count > 0 && + parent_gpu->isr.access_counters.disable_intr_ref_count == 0) { + parent_gpu->access_counter_buffer_hal->disable_access_counter_notifications(parent_gpu); + } + + ++parent_gpu->isr.access_counters.disable_intr_ref_count; +} + +void uvm_gpu_access_counters_intr_enable(uvm_parent_gpu_t *parent_gpu) +{ + uvm_assert_spinlock_locked(&parent_gpu->isr.interrupts_lock); + UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.access_counters.service_lock)); + UVM_ASSERT(parent_gpu->isr.access_counters.disable_intr_ref_count > 0); + + --parent_gpu->isr.access_counters.disable_intr_ref_count; + + if (parent_gpu->isr.access_counters.handling_ref_count > 0 && + parent_gpu->isr.access_counters.disable_intr_ref_count == 0) { + parent_gpu->access_counter_buffer_hal->enable_access_counter_notifications(parent_gpu); + } +} diff --git a/kernel-open/nvidia-uvm/uvm_gpu_isr.h b/kernel-open/nvidia-uvm/uvm_gpu_isr.h new file mode 100644 index 000000000..e11f2bdda --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_isr.h @@ -0,0 +1,196 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_GPU_ISR_H__ +#define __UVM_GPU_ISR_H__ + +#include "nv-kthread-q.h" +#include "uvm_common.h" +#include "uvm_lock.h" +#include "uvm_forward_decl.h" + +// ISR handling state for a specific interrupt type +typedef struct +{ + // Protects against changes to the GPU data structures used by the handling + // routines of this interrupt type. + uvm_semaphore_t service_lock; + + // Bottom-half to be executed for this interrupt. There is one bottom-half + // per interrupt type. + nv_kthread_q_item_t bottom_half_q_item; + + union + { + // Used for replayable and non-replayable faults. + struct + { + // This is set to true during add_gpu(), if the GPU supports the + // interrupt. It is set back to false during remove_gpu(). + // interrupts_lock must be held in order to write this variable. + bool handling; + + // Variable set in uvm_gpu_disable_isr() during remove_gpu() to + // indicate if this type of interrupt was being handled by the + // driver. + bool was_handling; + }; + + // Used for access counters. + // + // If the GPU does not support access counters, the ref count is always + // zero. Otherwise, the refcount is incremented when the GPU is + // registered in a VA space for the first time, and decremented when + // unregistered or the VA space is destroyed. + // + // Locking: protected by the GPU access counters ISR lock. Naked + // accesses are allowed during GPU addition and removal. + NvU64 handling_ref_count; + }; + + struct + { + // Number of the bottom-half invocations for this interrupt on a GPU over + // its lifetime + NvU64 bottom_half_count; + + // A bitmask of the CPUs on which the bottom half has executed. The + // corresponding bit gets set once the bottom half executes on that + // CPU. + // This mask is useful when testing that the bottom half is getting + // executed on the correct set of CPUs. + struct cpumask cpus_used_mask; + + // An array (one per possible CPU), which holds the number of times the + // bottom half has executed on that CPU. + NvU64 *cpu_exec_count; + } stats; + + // This is the number of times the function that disables this type of + // interrupt has been called without a corresponding call to the function + // that enables it. If this is > 0, interrupts are disabled. This field is + // protected by interrupts_lock. This field is only valid for interrupts + // directly owned by UVM: + // - replayable_faults + // - access_counters + NvU64 disable_intr_ref_count; +} uvm_intr_handler_t; + +// State for all ISR handling in UVM +typedef struct +{ + // This is set by uvm_suspend() and uvm_resume() to indicate whether + // top-half ISR processing is suspended for power management. Calls from + // the RM's top-half are to be completed without processing when this + // flag is set to true. + bool is_suspended; + + // There is exactly one nv_kthread_q per GPU. It is used for the ISR bottom + // halves. So N CPUs will be servicing M GPUs, in general. There is one + // bottom-half per interrupt type. + nv_kthread_q_t bottom_half_q; + + // Protects the state of interrupts (enabled/disabled) and whether the GPU is + // currently handling them. Taken in both interrupt and process context. + uvm_spinlock_irqsave_t interrupts_lock; + + uvm_intr_handler_t replayable_faults; + uvm_intr_handler_t non_replayable_faults; + uvm_intr_handler_t access_counters; + + // Kernel thread used to kill channels on fatal non-replayable faults. + // This is needed because we cannot call into RM from the bottom-half to + // avoid deadlocks. + nv_kthread_q_t kill_channel_q; + + // Number of top-half ISRs called for this GPU over its lifetime + NvU64 interrupt_count; +} uvm_isr_info_t; + +// Entry point for interrupt handling. This is called from RM's top half +NV_STATUS uvm_isr_top_half_entry(const NvProcessorUuid *gpu_uuid); + +// Initialize ISR handling state +NV_STATUS uvm_gpu_init_isr(uvm_parent_gpu_t *parent_gpu); + +// Flush any currently scheduled bottom halves. This is called during GPU +// removal. +void uvm_gpu_flush_bottom_halves(uvm_parent_gpu_t *parent_gpu); + +// Prevent new bottom halves from being scheduled. This is called during parent +// GPU removal. +void uvm_gpu_disable_isr(uvm_parent_gpu_t *parent_gpu); + +// Destroy ISR handling state and return interrupt ownership to RM. This is +// called during parent GPU removal +void uvm_gpu_deinit_isr(uvm_parent_gpu_t *parent_gpu); + +// Take parent_gpu->isr.replayable_faults.service_lock from a non-top/bottom +// half thread. This will also disable replayable page fault interrupts (if +// supported by the GPU) because the top half attempts to take this lock, and we +// would cause an interrupt storm if we didn't disable them first. +// +// At least one GPU under the parent must have been previously retained. +void uvm_gpu_replayable_faults_isr_lock(uvm_parent_gpu_t *parent_gpu); + +// Unlock parent_gpu->isr.replayable_faults.service_lock. This call may +// re-enable replayable page fault interrupts. Unlike +// uvm_gpu_replayable_faults_isr_lock(), which should only called from +// non-top/bottom half threads, this can be called by any thread. +void uvm_gpu_replayable_faults_isr_unlock(uvm_parent_gpu_t *parent_gpu); + +// Lock/unlock routines for non-replayable faults. These do not need to prevent +// interrupt storms since the GPU fault buffers for non-replayable faults are +// managed by RM. Unlike uvm_gpu_replayable_faults_isr_lock, no GPUs under +// the parent need to have been previously retained. +void uvm_gpu_non_replayable_faults_isr_lock(uvm_parent_gpu_t *parent_gpu); +void uvm_gpu_non_replayable_faults_isr_unlock(uvm_parent_gpu_t *parent_gpu); + +// See uvm_gpu_replayable_faults_isr_lock/unlock +void uvm_gpu_access_counters_isr_lock(uvm_parent_gpu_t *parent_gpu); +void uvm_gpu_access_counters_isr_unlock(uvm_parent_gpu_t *parent_gpu); + +// Increments the reference count tracking whether access counter interrupts +// should be disabled. The caller is guaranteed that access counter interrupts +// are disabled upon return. Interrupts might already be disabled prior to +// making this call. Each call is ref-counted, so this must be paired with a +// call to uvm_gpu_access_counters_intr_enable(). +// +// parent_gpu->isr.interrupts_lock must be held to call this function. +void uvm_gpu_access_counters_intr_disable(uvm_parent_gpu_t *parent_gpu); + +// Decrements the reference count tracking whether access counter interrupts +// should be disabled. Only once the count reaches 0 are the HW interrupts +// actually enabled, so this call does not guarantee that the interrupts have +// been re-enabled upon return. +// +// uvm_gpu_access_counters_intr_disable() must have been called prior to calling +// this function. +// +// NOTE: For pulse-based interrupts, the caller is responsible for re-arming +// the interrupt. +// +// parent_gpu->isr.interrupts_lock must be held to call this function. +void uvm_gpu_access_counters_intr_enable(uvm_parent_gpu_t *parent_gpu); + +#endif // __UVM_GPU_ISR_H__ diff --git a/kernel-open/nvidia-uvm/uvm_gpu_non_replayable_faults.c b/kernel-open/nvidia-uvm/uvm_gpu_non_replayable_faults.c new file mode 100644 index 000000000..3be8a73b2 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_non_replayable_faults.c @@ -0,0 +1,683 @@ +/******************************************************************************* + Copyright (c) 2017-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#include "nv_uvm_interface.h" +#include "uvm_common.h" +#include "uvm_api.h" +#include "uvm_gpu_non_replayable_faults.h" +#include "uvm_gpu.h" +#include "uvm_hal.h" +#include "uvm_lock.h" +#include "uvm_tools.h" +#include "uvm_user_channel.h" +#include "uvm_va_space_mm.h" +#include "uvm_va_block.h" +#include "uvm_va_range.h" +#include "uvm_kvmalloc.h" +#include "uvm_ats_faults.h" + +// In the context of a CUDA application using Unified Memory, it is sometimes +// assumed that there is a single type of fault, originated by a memory +// load/store in a SM (Graphics Engine), which itself can be traced back to a +// memory access in a CUDA kernel written by a developer. In reality, faults can +// also be triggered by other parts of the GPU i.e. by other engines, as the +// result of developer-facing APIs, or operations initiated by a user-mode +// driver. The Graphics Engine faults are called replayable faults, while the +// rest are called non-replayable. The differences between the two types of +// faults go well beyond the engine originating the fault. +// +// A non-replayable fault originates in an engine other than Graphics. UVM +// services non-replayable faults from the Copy and PBDMA (Host/ESCHED) Engines. +// Non-replayable faults originated in other engines are considered fatal, and +// do not reach the UVM driver. While UVM can distinguish between faults +// originated in the Copy Engine and faults originated in the PBDMA Engine, in +// practice they are all processed in the same way. Replayable fault support in +// Graphics was introduced in Pascal, and non-replayable fault support in CE and +// PBDMA Engines was introduced in Volta; all non-replayable faults were fatal +// before Volta. +// +// An example of a Copy Engine non-replayable fault is a memory copy between two +// virtual addresses on a GPU, in which either the source or destination +// pointers are not currently mapped to a physical address in the page tables of +// the GPU. An example of a PBDMA non-replayable fault is a semaphore acquire in +// which the semaphore virtual address passed as argument is currently not +// mapped to any physical address. +// +// Non-replayable faults originated in the CE and PBDMA Engines result in HW +// preempting the channel associated with the fault, a mechanism called "fault +// and switch". More precisely, the switching out affects not only the channel +// that caused the fault, but all the channels in the same Time Slice Group +// (TSG). SW intervention is required so all the channels in the TSG can be +// scheduled again, but channels in other TSGs can be scheduled and resume their +// normal execution. In the case of the non-replayable faults serviced by UVM, +// the driver clears a channel's faulted bit upon successful servicing, but it +// is only when the servicing has completed for all the channels in the TSG that +// they are all allowed to be switched in. Non-replayable faults originated in +// engines other than CE and PBDMA are fatal because these other units lack +// hardware support for the "fault and switch" and restart mechanisms just +// described. +// On the other hand, replayable faults block preemption of the channel until +// software (UVM) services the fault. This is sometimes known as "fault and +// stall". Note that replayable faults prevent the execution of other channels, +// which are stalled until the fault is serviced. +// +// The "non-replayable" naming alludes to the fact that, historically, these +// faults indicated a fatal condition so there was no recovery ("replay") +// process, and SW could not ignore or drop the fault. As discussed before, this +// is no longer the case and while at times the hardware documentation uses the +// "fault and replay" expression for CE and PBDMA faults, we reserve that +// expression for Graphics faults and favor the term "fault and reschedule" +// instead. Replaying a fault does not necessarily imply that UVM has serviced +// it. For example, the UVM driver may choose to ignore the replayable faults +// associated with a GPU for some period of time if it detects that there is +// thrashing going on, and the GPU needs to be throttled. The fault entries +// corresponding to the ignored faults are never saved by UVM, but new entries +// (and new interrupts) will be generated by hardware each time after UVM issues +// a replay. +// +// While replayable faults are always the responsibility of UVM, the servicing +// of non-replayable faults is split between RM and UVM. In the case of +// replayable faults, UVM has sole SW ownership of the hardware buffer +// containing the faults, and it is responsible for updating the GET pointer to +// signal the hardware that a number of faults have been read. UVM also reads +// the PUT pointer value written by hardware. But in the case of non-replayable +// faults, UVM reads the fault entries out of a regular CPU buffer, shared with +// RM, called "shadow buffer". RM is responsible for accessing the actual +// non-replayable hardware buffer, reading the PUT pointer, updating the GET +// pointer, and moving CE and PBDMA faults from the hardware buffer to the +// shadow buffer. Because the Resource Manager owns the HW buffer, UVM needs to +// call RM when servicing a non-replayable fault, first to figure out if there +// is a pending fault, and then to read entries from the shadow buffer. +// +// Once UVM has parsed a non-replayable fault entry corresponding to managed +// memory, and identified the VA block associated with it, the servicing logic +// for that block is identical to that of a replayable fault, see +// uvm_va_block_service_locked. Another similarity between the two types of +// faults is that they use the same entry format, uvm_fault_buffer_entry_t. + + +// There is no error handling in this function. The caller is in charge of +// calling uvm_gpu_fault_buffer_deinit_non_replayable_faults on failure. +NV_STATUS uvm_gpu_fault_buffer_init_non_replayable_faults(uvm_parent_gpu_t *parent_gpu) +{ + uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &parent_gpu->fault_buffer_info.non_replayable; + + UVM_ASSERT(parent_gpu->non_replayable_faults_supported); + + non_replayable_faults->shadow_buffer_copy = NULL; + non_replayable_faults->fault_cache = NULL; + + non_replayable_faults->max_faults = parent_gpu->fault_buffer_info.rm_info.nonReplayable.bufferSize / + parent_gpu->fault_buffer_hal->entry_size(parent_gpu); + + non_replayable_faults->shadow_buffer_copy = + uvm_kvmalloc_zero(parent_gpu->fault_buffer_info.rm_info.nonReplayable.bufferSize); + if (!non_replayable_faults->shadow_buffer_copy) + return NV_ERR_NO_MEMORY; + + non_replayable_faults->fault_cache = uvm_kvmalloc_zero(non_replayable_faults->max_faults * + sizeof(*non_replayable_faults->fault_cache)); + if (!non_replayable_faults->fault_cache) + return NV_ERR_NO_MEMORY; + + uvm_tracker_init(&non_replayable_faults->clear_faulted_tracker); + uvm_tracker_init(&non_replayable_faults->fault_service_tracker); + + return NV_OK; +} + +void uvm_gpu_fault_buffer_deinit_non_replayable_faults(uvm_parent_gpu_t *parent_gpu) +{ + uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &parent_gpu->fault_buffer_info.non_replayable; + + if (non_replayable_faults->fault_cache) { + UVM_ASSERT(uvm_tracker_is_empty(&non_replayable_faults->clear_faulted_tracker)); + uvm_tracker_deinit(&non_replayable_faults->clear_faulted_tracker); + + UVM_ASSERT(uvm_tracker_is_empty(&non_replayable_faults->fault_service_tracker)); + uvm_tracker_deinit(&non_replayable_faults->fault_service_tracker); + } + + uvm_kvfree(non_replayable_faults->shadow_buffer_copy); + uvm_kvfree(non_replayable_faults->fault_cache); + non_replayable_faults->shadow_buffer_copy = NULL; + non_replayable_faults->fault_cache = NULL; +} + +bool uvm_gpu_non_replayable_faults_pending(uvm_parent_gpu_t *parent_gpu) +{ + NV_STATUS status; + NvBool has_pending_faults; + + UVM_ASSERT(parent_gpu->isr.non_replayable_faults.handling); + + status = nvUvmInterfaceHasPendingNonReplayableFaults(&parent_gpu->fault_buffer_info.rm_info, + &has_pending_faults); + UVM_ASSERT(status == NV_OK); + + return has_pending_faults == NV_TRUE; +} + +static NvU32 fetch_non_replayable_fault_buffer_entries(uvm_gpu_t *gpu) +{ + NV_STATUS status; + NvU32 i = 0; + NvU32 cached_faults = 0; + uvm_fault_buffer_entry_t *fault_cache; + NvU32 entry_size = gpu->parent->fault_buffer_hal->entry_size(gpu->parent); + uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &gpu->parent->fault_buffer_info.non_replayable; + char *current_hw_entry = (char *)non_replayable_faults->shadow_buffer_copy; + + fault_cache = non_replayable_faults->fault_cache; + + UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.non_replayable_faults.service_lock)); + UVM_ASSERT(gpu->parent->non_replayable_faults_supported); + + status = nvUvmInterfaceGetNonReplayableFaults(&gpu->parent->fault_buffer_info.rm_info, + non_replayable_faults->shadow_buffer_copy, + &cached_faults); + UVM_ASSERT(status == NV_OK); + + // Parse all faults + for (i = 0; i < cached_faults; ++i) { + uvm_fault_buffer_entry_t *fault_entry = &non_replayable_faults->fault_cache[i]; + + gpu->parent->fault_buffer_hal->parse_non_replayable_entry(gpu->parent, current_hw_entry, fault_entry); + + // The GPU aligns the fault addresses to 4k, but all of our tracking is + // done in PAGE_SIZE chunks which might be larger. + fault_entry->fault_address = UVM_PAGE_ALIGN_DOWN(fault_entry->fault_address); + + // Make sure that all fields in the entry are properly initialized + fault_entry->va_space = NULL; + fault_entry->is_fatal = (fault_entry->fault_type >= UVM_FAULT_TYPE_FATAL); + fault_entry->filtered = false; + + fault_entry->num_instances = 1; + fault_entry->access_type_mask = uvm_fault_access_type_mask_bit(fault_entry->fault_access_type); + INIT_LIST_HEAD(&fault_entry->merged_instances_list); + fault_entry->non_replayable.buffer_index = i; + + if (fault_entry->is_fatal) { + // Record the fatal fault event later as we need the va_space locked + fault_entry->fatal_reason = UvmEventFatalReasonInvalidFaultType; + } + else { + fault_entry->fatal_reason = UvmEventFatalReasonInvalid; + } + + current_hw_entry += entry_size; + } + + return cached_faults; +} + +// In SRIOV, the UVM (guest) driver does not have access to the privileged +// registers used to clear the faulted bit. Instead, UVM requests host RM to do +// the clearing on its behalf, using a SW method. +static bool use_clear_faulted_channel_sw_method(uvm_gpu_t *gpu) +{ + if (uvm_gpu_is_virt_mode_sriov(gpu)) { + UVM_ASSERT(gpu->parent->has_clear_faulted_channel_sw_method); + return true; + } + + return false; +} + +static NV_STATUS clear_faulted_method_on_gpu(uvm_gpu_t *gpu, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *fault_entry, + NvU32 batch_id, + uvm_tracker_t *tracker) +{ + NV_STATUS status; + uvm_push_t push; + uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &gpu->parent->fault_buffer_info.non_replayable; + + UVM_ASSERT(!fault_entry->is_fatal); + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + tracker, + &push, + "Clearing set bit for address 0x%llx", + fault_entry->fault_address); + if (status != NV_OK) { + UVM_ERR_PRINT("Error acquiring tracker before clearing faulted: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + if (use_clear_faulted_channel_sw_method(gpu)) + gpu->parent->host_hal->clear_faulted_channel_sw_method(&push, user_channel, fault_entry); + else + gpu->parent->host_hal->clear_faulted_channel_method(&push, user_channel, fault_entry); + + uvm_tools_broadcast_replay(gpu, &push, batch_id, fault_entry->fault_source.client_type); + + uvm_push_end(&push); + + // Add this push to the GPU's clear_faulted_tracker so GPU removal can wait + // on it. + status = uvm_tracker_add_push_safe(&non_replayable_faults->clear_faulted_tracker, &push); + + // Add this push to the channel's clear_faulted_tracker so user channel + // removal can wait on it instead of using the per-GPU tracker, which would + // require a lock. + if (status == NV_OK) + status = uvm_tracker_add_push_safe(&user_channel->clear_faulted_tracker, &push); + + return status; +} + +static NV_STATUS clear_faulted_register_on_gpu(uvm_gpu_t *gpu, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *fault_entry, + NvU32 batch_id, + uvm_tracker_t *tracker) +{ + NV_STATUS status; + + UVM_ASSERT(!gpu->parent->has_clear_faulted_channel_method); + + // We need to wait for all pending work before writing to the channel + // register + status = uvm_tracker_wait(tracker); + if (status != NV_OK) + return status; + + gpu->parent->host_hal->clear_faulted_channel_register(user_channel, fault_entry); + + uvm_tools_broadcast_replay_sync(gpu, batch_id, fault_entry->fault_source.client_type); + + return NV_OK; +} + +static NV_STATUS clear_faulted_on_gpu(uvm_gpu_t *gpu, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *fault_entry, + NvU32 batch_id, + uvm_tracker_t *tracker) +{ + if (gpu->parent->has_clear_faulted_channel_method || use_clear_faulted_channel_sw_method(gpu)) + return clear_faulted_method_on_gpu(gpu, user_channel, fault_entry, batch_id, tracker); + + return clear_faulted_register_on_gpu(gpu, user_channel, fault_entry, batch_id, tracker); +} + +static NV_STATUS service_managed_fault_in_block_locked(uvm_gpu_t *gpu, + uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_fault_buffer_entry_t *fault_entry, + uvm_service_block_context_t *service_context) +{ + NV_STATUS status = NV_OK; + uvm_page_index_t page_index; + uvm_perf_thrashing_hint_t thrashing_hint; + uvm_processor_id_t new_residency; + bool read_duplicate; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_va_range_t *va_range = va_block->va_range; + uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &gpu->parent->fault_buffer_info.non_replayable; + + UVM_ASSERT(!fault_entry->is_fatal); + + uvm_assert_rwsem_locked(&va_space->lock); + + UVM_ASSERT(fault_entry->va_space == va_space); + UVM_ASSERT(fault_entry->fault_address >= va_block->start); + UVM_ASSERT(fault_entry->fault_address <= va_block->end); + + service_context->block_context.policy = uvm_va_policy_get(va_block, fault_entry->fault_address); + + if (service_context->num_retries == 0) { + // notify event to tools/performance heuristics. For now we use a + // unique batch id per fault, since we clear the faulted channel for + // each fault. + uvm_perf_event_notify_gpu_fault(&va_space->perf_events, + va_block, + gpu->id, + service_context->block_context.policy->preferred_location, + fault_entry, + ++non_replayable_faults->batch_id, + false); + } + + // Check logical permissions + status = uvm_va_range_check_logical_permissions(va_range, + gpu->id, + fault_entry->fault_access_type, + uvm_range_group_address_migratable(va_space, + fault_entry->fault_address)); + if (status != NV_OK) { + fault_entry->is_fatal = true; + fault_entry->fatal_reason = uvm_tools_status_to_fatal_fault_reason(status); + return NV_OK; + } + + // TODO: Bug 1880194: Revisit thrashing detection + thrashing_hint.type = UVM_PERF_THRASHING_HINT_TYPE_NONE; + + service_context->read_duplicate_count = 0; + service_context->thrashing_pin_count = 0; + + page_index = uvm_va_block_cpu_page_index(va_block, fault_entry->fault_address); + + // Compute new residency and update the masks + new_residency = uvm_va_block_select_residency(va_block, + page_index, + gpu->id, + fault_entry->access_type_mask, + service_context->block_context.policy, + &thrashing_hint, + UVM_SERVICE_OPERATION_NON_REPLAYABLE_FAULTS, + &read_duplicate); + + // Initialize the minimum necessary state in the fault service context + uvm_processor_mask_zero(&service_context->resident_processors); + + // Set new residency and update the masks + uvm_processor_mask_set(&service_context->resident_processors, new_residency); + + // The masks need to be fully zeroed as the fault region may grow due to prefetching + uvm_page_mask_zero(&service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency); + uvm_page_mask_set(&service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency, page_index); + + if (read_duplicate) { + uvm_page_mask_zero(&service_context->read_duplicate_mask); + uvm_page_mask_set(&service_context->read_duplicate_mask, page_index); + service_context->read_duplicate_count = 1; + } + + service_context->access_type[page_index] = fault_entry->fault_access_type; + + service_context->region = uvm_va_block_region_for_page(page_index); + + status = uvm_va_block_service_locked(gpu->id, va_block, va_block_retry, service_context); + + ++service_context->num_retries; + + return status; +} + +static NV_STATUS service_managed_fault_in_block(uvm_gpu_t *gpu, + struct mm_struct *mm, + uvm_va_block_t *va_block, + uvm_fault_buffer_entry_t *fault_entry) +{ + NV_STATUS status, tracker_status; + uvm_va_block_retry_t va_block_retry; + uvm_service_block_context_t *service_context = &gpu->parent->fault_buffer_info.non_replayable.block_service_context; + + service_context->operation = UVM_SERVICE_OPERATION_NON_REPLAYABLE_FAULTS; + service_context->num_retries = 0; + service_context->block_context.mm = mm; + + uvm_mutex_lock(&va_block->lock); + + status = UVM_VA_BLOCK_RETRY_LOCKED(va_block, &va_block_retry, + service_managed_fault_in_block_locked(gpu, + va_block, + &va_block_retry, + fault_entry, + service_context)); + + tracker_status = uvm_tracker_add_tracker_safe(&gpu->parent->fault_buffer_info.non_replayable.fault_service_tracker, + &va_block->tracker); + + uvm_mutex_unlock(&va_block->lock); + + return status == NV_OK? tracker_status: status; +} + +// See uvm_unregister_channel for comments on the the channel destruction +// sequence. +static void kill_channel_delayed(void *_user_channel) +{ + uvm_user_channel_t *user_channel = (uvm_user_channel_t *)_user_channel; + uvm_va_space_t *va_space = user_channel->kill_channel.va_space; + + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + + uvm_va_space_down_read_rm(va_space); + if (user_channel->gpu_va_space) { + // RM handles the fault, which will do the correct fault reporting in the + // kernel logs and will initiate channel teardown + NV_STATUS status = nvUvmInterfaceReportNonReplayableFault(uvm_gpu_device_handle(user_channel->gpu), + user_channel->kill_channel.fault_packet); + UVM_ASSERT(status == NV_OK); + } + uvm_va_space_up_read_rm(va_space); + + uvm_user_channel_release(user_channel); +} + +static void kill_channel_delayed_entry(void *user_channel) +{ + UVM_ENTRY_VOID(kill_channel_delayed(user_channel)); +} + +static void schedule_kill_channel(uvm_gpu_t *gpu, + uvm_fault_buffer_entry_t *fault_entry, + uvm_user_channel_t *user_channel) +{ + uvm_va_space_t *va_space = fault_entry->va_space; + uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &gpu->parent->fault_buffer_info.non_replayable; + void *packet = (char *)non_replayable_faults->shadow_buffer_copy + + (fault_entry->non_replayable.buffer_index * gpu->parent->fault_buffer_hal->entry_size(gpu->parent)); + + UVM_ASSERT(gpu); + UVM_ASSERT(va_space); + UVM_ASSERT(user_channel); + + if (user_channel->kill_channel.scheduled) + return; + + user_channel->kill_channel.scheduled = true; + user_channel->kill_channel.va_space = va_space; + + // Save the packet to be handled by RM in the channel structure + memcpy(user_channel->kill_channel.fault_packet, packet, gpu->parent->fault_buffer_hal->entry_size(gpu->parent)); + + // Retain the channel here so it is not prematurely destroyed. It will be + // released after forwarding the fault to RM in kill_channel_delayed. + uvm_user_channel_retain(user_channel); + + // Schedule a work item to kill the channel + nv_kthread_q_item_init(&user_channel->kill_channel.kill_channel_q_item, + kill_channel_delayed_entry, + user_channel); + + nv_kthread_q_schedule_q_item(&gpu->parent->isr.kill_channel_q, + &user_channel->kill_channel.kill_channel_q_item); +} + +static NV_STATUS service_non_managed_fault(uvm_gpu_va_space_t *gpu_va_space, + struct mm_struct *mm, + uvm_fault_buffer_entry_t *fault_entry, + NV_STATUS lookup_status) +{ + uvm_gpu_t *gpu = gpu_va_space->gpu; + uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &gpu->parent->fault_buffer_info.non_replayable; + uvm_ats_fault_invalidate_t *ats_invalidate = &non_replayable_faults->ats_invalidate; + NV_STATUS status = lookup_status; + + UVM_ASSERT(!fault_entry->is_fatal); + + // Avoid dropping fault events when the VA block is not found or cannot be created + uvm_perf_event_notify_gpu_fault(&fault_entry->va_space->perf_events, + NULL, + gpu->id, + UVM_ID_INVALID, + fault_entry, + ++non_replayable_faults->batch_id, + false); + + if (status != NV_ERR_INVALID_ADDRESS) + return status; + + if (uvm_ats_can_service_faults(gpu_va_space, mm)) { + ats_invalidate->write_faults_in_batch = false; + + // The VA isn't managed. See if ATS knows about it. + status = uvm_ats_service_fault_entry(gpu_va_space, fault_entry, ats_invalidate); + + // Invalidate ATS TLB entries if needed + if (status == NV_OK) { + status = uvm_ats_invalidate_tlbs(gpu_va_space, + ats_invalidate, + &non_replayable_faults->fault_service_tracker); + } + } + else { + UVM_ASSERT(fault_entry->fault_access_type != UVM_FAULT_ACCESS_TYPE_PREFETCH); + fault_entry->is_fatal = true; + fault_entry->fatal_reason = uvm_tools_status_to_fatal_fault_reason(status); + + // Do not return error due to logical errors in the application + status = NV_OK; + } + + return status; +} + +static NV_STATUS service_fault(uvm_gpu_t *gpu, uvm_fault_buffer_entry_t *fault_entry) +{ + NV_STATUS status; + uvm_user_channel_t *user_channel; + uvm_va_block_t *va_block; + uvm_va_space_t *va_space = NULL; + struct mm_struct *mm; + uvm_gpu_va_space_t *gpu_va_space; + uvm_non_replayable_fault_buffer_info_t *non_replayable_faults = &gpu->parent->fault_buffer_info.non_replayable; + uvm_va_block_context_t *va_block_context = + &gpu->parent->fault_buffer_info.non_replayable.block_service_context.block_context; + + status = uvm_gpu_fault_entry_to_va_space(gpu, fault_entry, &va_space); + if (status != NV_OK) { + // The VA space lookup will fail if we're running concurrently with + // removal of the channel from the VA space (channel unregister, GPU VA + // space unregister, VA space destroy, etc). The other thread will stop + // the channel and remove the channel from the table, so the faulting + // condition will be gone. In the case of replayable faults we need to + // flush the buffer, but here we can just ignore the entry and proceed + // on. + // + // Note that we can't have any subcontext issues here, since non- + // replayable faults only use the address space of their channel. + UVM_ASSERT(status == NV_ERR_INVALID_CHANNEL); + UVM_ASSERT(!va_space); + return NV_OK; + } + + UVM_ASSERT(va_space); + + // If an mm is registered with the VA space, we have to retain it + // in order to lock it before locking the VA space. It is guaranteed + // to remain valid until we release. If no mm is registered, we + // can only service managed faults, not ATS/HMM faults. + mm = uvm_va_space_mm_retain_lock(va_space); + + uvm_va_space_down_read(va_space); + + gpu_va_space = uvm_gpu_va_space_get_by_parent_gpu(va_space, gpu->parent); + + if (!gpu_va_space) { + // The va_space might have gone away. See the comment above. + status = NV_OK; + goto exit_no_channel; + } + + fault_entry->va_space = va_space; + + user_channel = uvm_gpu_va_space_get_user_channel(gpu_va_space, fault_entry->instance_ptr); + if (!user_channel) { + // The channel might have gone away. See the comment above. + status = NV_OK; + goto exit_no_channel; + } + + fault_entry->fault_source.channel_id = user_channel->hw_channel_id; + + if (!fault_entry->is_fatal) { + status = uvm_va_block_find_create(fault_entry->va_space, + mm, + fault_entry->fault_address, + va_block_context, + &va_block); + if (status == NV_OK) + status = service_managed_fault_in_block(gpu_va_space->gpu, mm, va_block, fault_entry); + else + status = service_non_managed_fault(gpu_va_space, mm, fault_entry, status); + + // We are done, we clear the faulted bit on the channel, so it can be + // re-scheduled again + if (status == NV_OK && !fault_entry->is_fatal) { + status = clear_faulted_on_gpu(gpu, + user_channel, + fault_entry, + non_replayable_faults->batch_id, + &non_replayable_faults->fault_service_tracker); + uvm_tracker_clear(&non_replayable_faults->fault_service_tracker); + } + } + + if (fault_entry->is_fatal) + uvm_tools_record_gpu_fatal_fault(gpu->parent->id, fault_entry->va_space, fault_entry, fault_entry->fatal_reason); + + if (status != NV_OK || fault_entry->is_fatal) + schedule_kill_channel(gpu, fault_entry, user_channel); + +exit_no_channel: + uvm_va_space_up_read(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + + return status; +} + +void uvm_gpu_service_non_replayable_fault_buffer(uvm_gpu_t *gpu) +{ + NV_STATUS status = NV_OK; + NvU32 cached_faults; + + // If this handler is modified to handle fewer than all of the outstanding + // faults, then special handling will need to be added to uvm_suspend() + // to guarantee that fault processing has completed before control is + // returned to the RM. + while ((cached_faults = fetch_non_replayable_fault_buffer_entries(gpu)) > 0) { + NvU32 i; + + // Differently to replayable faults, we do not batch up and preprocess + // non-replayable faults since getting multiple faults on the same + // memory region is not very likely + for (i = 0; i < cached_faults; ++i) { + status = service_fault(gpu, &gpu->parent->fault_buffer_info.non_replayable.fault_cache[i]); + if (status != NV_OK) + break; + } + } + + if (status != NV_OK) + UVM_DBG_PRINT("Error servicing non-replayable faults on GPU: %s\n", uvm_gpu_name(gpu)); +} diff --git a/kernel-open/nvidia-uvm/uvm_gpu_non_replayable_faults.h b/kernel-open/nvidia-uvm/uvm_gpu_non_replayable_faults.h new file mode 100644 index 000000000..54267625d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_non_replayable_faults.h @@ -0,0 +1,37 @@ +/******************************************************************************* + Copyright (c) 2017 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef __UVM_GPU_NON_REPLAYABLE_FAULTS_H__ +#define __UVM_GPU_NON_REPLAYABLE_FAULTS_H__ + +#include +#include "uvm_forward_decl.h" + +bool uvm_gpu_non_replayable_faults_pending(uvm_parent_gpu_t *parent_gpu); + +void uvm_gpu_service_non_replayable_fault_buffer(uvm_gpu_t *gpu); + +NV_STATUS uvm_gpu_fault_buffer_init_non_replayable_faults(uvm_parent_gpu_t *parent_gpu); + +void uvm_gpu_fault_buffer_deinit_non_replayable_faults(uvm_parent_gpu_t *parent_gpu); + +#endif // __UVM_GPU_NON_REPLAYABLE_FAULTS_H__ diff --git a/kernel-open/nvidia-uvm/uvm_gpu_replayable_faults.c b/kernel-open/nvidia-uvm/uvm_gpu_replayable_faults.c new file mode 100644 index 000000000..2a1b15095 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_replayable_faults.c @@ -0,0 +1,2396 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "linux/sort.h" +#include "nv_uvm_interface.h" +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_gpu_replayable_faults.h" +#include "uvm_hal.h" +#include "uvm_kvmalloc.h" +#include "uvm_tools.h" +#include "uvm_va_block.h" +#include "uvm_va_range.h" +#include "uvm_va_space.h" +#include "uvm_va_space_mm.h" +#include "uvm_procfs.h" +#include "uvm_perf_thrashing.h" +#include "uvm_gpu_non_replayable_faults.h" +#include "uvm_ats_faults.h" +#include "uvm_test.h" + +// The documentation at the beginning of uvm_gpu_non_replayable_faults.c +// provides some background for understanding replayable faults, non-replayable +// faults, and how UVM services each fault type. + +#define UVM_PERF_REENABLE_PREFETCH_FAULTS_LAPSE_MSEC_DEFAULT 1000 + +// Lapse of time in milliseconds after which prefetch faults can be re-enabled. +// 0 means it is never disabled +static unsigned uvm_perf_reenable_prefetch_faults_lapse_msec = UVM_PERF_REENABLE_PREFETCH_FAULTS_LAPSE_MSEC_DEFAULT; +module_param(uvm_perf_reenable_prefetch_faults_lapse_msec, uint, S_IRUGO); + +#define UVM_PERF_FAULT_BATCH_COUNT_MIN 1 +#define UVM_PERF_FAULT_BATCH_COUNT_DEFAULT 256 + +// Number of entries that are fetched from the GPU fault buffer and serviced in +// batch +static unsigned uvm_perf_fault_batch_count = UVM_PERF_FAULT_BATCH_COUNT_DEFAULT; +module_param(uvm_perf_fault_batch_count, uint, S_IRUGO); + +#define UVM_PERF_FAULT_REPLAY_POLICY_DEFAULT UVM_PERF_FAULT_REPLAY_POLICY_BATCH_FLUSH + +// Policy that determines when to issue fault replays +static uvm_perf_fault_replay_policy_t uvm_perf_fault_replay_policy = UVM_PERF_FAULT_REPLAY_POLICY_DEFAULT; +module_param(uvm_perf_fault_replay_policy, uint, S_IRUGO); + +#define UVM_PERF_FAULT_REPLAY_UPDATE_PUT_RATIO_DEFAULT 50 + +// Reading fault buffer GET/PUT pointers from the CPU is expensive. However, +// updating PUT before flushing the buffer helps minimizing the number of +// duplicates in the buffer as it discards faults that were not processed +// because of the batch size limit or because they arrived during servicing. +// If PUT is not updated, the replay operation will make them show up again +// in the buffer as duplicates. +// +// We keep track of the number of duplicates in each batch and we use +// UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT for the fault buffer flush after if the +// percentage of duplicate faults in a batch is greater than the ratio defined +// in the following module parameter. UVM_GPU_BUFFER_FLUSH_MODE_CACHED_PUT is +// used, otherwise. +static unsigned uvm_perf_fault_replay_update_put_ratio = UVM_PERF_FAULT_REPLAY_UPDATE_PUT_RATIO_DEFAULT; +module_param(uvm_perf_fault_replay_update_put_ratio, uint, S_IRUGO); + +#define UVM_PERF_FAULT_MAX_BATCHES_PER_SERVICE_DEFAULT 20 + +#define UVM_PERF_FAULT_MAX_THROTTLE_PER_SERVICE_DEFAULT 5 + +// Maximum number of batches to be processed per execution of the bottom-half +static unsigned uvm_perf_fault_max_batches_per_service = UVM_PERF_FAULT_MAX_BATCHES_PER_SERVICE_DEFAULT; +module_param(uvm_perf_fault_max_batches_per_service, uint, S_IRUGO); + +// Maximum number of batches with thrashing pages per execution of the bottom-half +static unsigned uvm_perf_fault_max_throttle_per_service = UVM_PERF_FAULT_MAX_THROTTLE_PER_SERVICE_DEFAULT; +module_param(uvm_perf_fault_max_throttle_per_service, uint, S_IRUGO); + +static unsigned uvm_perf_fault_coalesce = 1; +module_param(uvm_perf_fault_coalesce, uint, S_IRUGO); + +// This function is used for both the initial fault buffer initialization and +// the power management resume path. +static void fault_buffer_reinit_replayable_faults(uvm_parent_gpu_t *parent_gpu) +{ + uvm_replayable_fault_buffer_info_t *replayable_faults = &parent_gpu->fault_buffer_info.replayable; + + // Read the current get/put pointers, as this might not be the first time + // we take control of the fault buffer since the GPU was initialized, + // or since we may need to bring UVM's cached copies back in sync following + // a sleep cycle. + replayable_faults->cached_get = parent_gpu->fault_buffer_hal->read_get(parent_gpu); + replayable_faults->cached_put = parent_gpu->fault_buffer_hal->read_put(parent_gpu); + + // (Re-)enable fault prefetching + if (parent_gpu->fault_buffer_info.prefetch_faults_enabled) + parent_gpu->arch_hal->enable_prefetch_faults(parent_gpu); + else + parent_gpu->arch_hal->disable_prefetch_faults(parent_gpu); +} + +// There is no error handling in this function. The caller is in charge of +// calling fault_buffer_deinit_replayable_faults on failure. +static NV_STATUS fault_buffer_init_replayable_faults(uvm_parent_gpu_t *parent_gpu) +{ + NV_STATUS status = NV_OK; + uvm_replayable_fault_buffer_info_t *replayable_faults = &parent_gpu->fault_buffer_info.replayable; + uvm_fault_service_batch_context_t *batch_context = &replayable_faults->batch_service_context; + + UVM_ASSERT(parent_gpu->fault_buffer_info.rm_info.replayable.bufferSize % + parent_gpu->fault_buffer_hal->entry_size(parent_gpu) == 0); + + replayable_faults->max_faults = parent_gpu->fault_buffer_info.rm_info.replayable.bufferSize / + parent_gpu->fault_buffer_hal->entry_size(parent_gpu); + + // Check provided module parameter value + parent_gpu->fault_buffer_info.max_batch_size = max(uvm_perf_fault_batch_count, + (NvU32)UVM_PERF_FAULT_BATCH_COUNT_MIN); + parent_gpu->fault_buffer_info.max_batch_size = min(parent_gpu->fault_buffer_info.max_batch_size, + replayable_faults->max_faults); + + if (parent_gpu->fault_buffer_info.max_batch_size != uvm_perf_fault_batch_count) { + pr_info("Invalid uvm_perf_fault_batch_count value on GPU %s: %u. Valid range [%u:%u] Using %u instead\n", + parent_gpu->name, + uvm_perf_fault_batch_count, + UVM_PERF_FAULT_BATCH_COUNT_MIN, + replayable_faults->max_faults, + parent_gpu->fault_buffer_info.max_batch_size); + } + + batch_context->fault_cache = uvm_kvmalloc_zero(replayable_faults->max_faults * sizeof(*batch_context->fault_cache)); + if (!batch_context->fault_cache) + return NV_ERR_NO_MEMORY; + + // fault_cache is used to signal that the tracker was initialized. + uvm_tracker_init(&replayable_faults->replay_tracker); + + batch_context->ordered_fault_cache = uvm_kvmalloc_zero(replayable_faults->max_faults * + sizeof(*batch_context->ordered_fault_cache)); + if (!batch_context->ordered_fault_cache) + return NV_ERR_NO_MEMORY; + + // This value must be initialized by HAL + UVM_ASSERT(replayable_faults->utlb_count > 0); + + batch_context->utlbs = uvm_kvmalloc_zero(replayable_faults->utlb_count * sizeof(*batch_context->utlbs)); + if (!batch_context->utlbs) + return NV_ERR_NO_MEMORY; + + batch_context->max_utlb_id = 0; + + status = uvm_rm_locked_call(nvUvmInterfaceOwnPageFaultIntr(parent_gpu->rm_device, NV_TRUE)); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to take page fault ownership from RM: %s, GPU %s\n", + nvstatusToString(status), + parent_gpu->name); + return status; + } + + replayable_faults->replay_policy = uvm_perf_fault_replay_policy < UVM_PERF_FAULT_REPLAY_POLICY_MAX? + uvm_perf_fault_replay_policy: + UVM_PERF_FAULT_REPLAY_POLICY_DEFAULT; + + if (replayable_faults->replay_policy != uvm_perf_fault_replay_policy) { + pr_info("Invalid uvm_perf_fault_replay_policy value on GPU %s: %d. Using %d instead\n", + parent_gpu->name, + uvm_perf_fault_replay_policy, + replayable_faults->replay_policy); + } + + replayable_faults->replay_update_put_ratio = min(uvm_perf_fault_replay_update_put_ratio, 100u); + if (replayable_faults->replay_update_put_ratio != uvm_perf_fault_replay_update_put_ratio) { + pr_info("Invalid uvm_perf_fault_replay_update_put_ratio value on GPU %s: %u. Using %u instead\n", + parent_gpu->name, + uvm_perf_fault_replay_update_put_ratio, + replayable_faults->replay_update_put_ratio); + } + + // Re-enable fault prefetching just in case it was disabled in a previous run + parent_gpu->fault_buffer_info.prefetch_faults_enabled = parent_gpu->prefetch_fault_supported; + + fault_buffer_reinit_replayable_faults(parent_gpu); + + return NV_OK; +} + +static void fault_buffer_deinit_replayable_faults(uvm_parent_gpu_t *parent_gpu) +{ + uvm_replayable_fault_buffer_info_t *replayable_faults = &parent_gpu->fault_buffer_info.replayable; + uvm_fault_service_batch_context_t *batch_context = &replayable_faults->batch_service_context; + + if (batch_context->fault_cache) { + UVM_ASSERT(uvm_tracker_is_empty(&replayable_faults->replay_tracker)); + uvm_tracker_deinit(&replayable_faults->replay_tracker); + } + + if (parent_gpu->fault_buffer_info.rm_info.faultBufferHandle) { + // Re-enable prefetch faults in case we disabled them + if (parent_gpu->prefetch_fault_supported && !parent_gpu->fault_buffer_info.prefetch_faults_enabled) + parent_gpu->arch_hal->enable_prefetch_faults(parent_gpu); + } + + uvm_kvfree(batch_context->fault_cache); + uvm_kvfree(batch_context->ordered_fault_cache); + uvm_kvfree(batch_context->utlbs); + batch_context->fault_cache = NULL; + batch_context->ordered_fault_cache = NULL; + batch_context->utlbs = NULL; +} + +NV_STATUS uvm_gpu_fault_buffer_init(uvm_parent_gpu_t *parent_gpu) +{ + NV_STATUS status = NV_OK; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + UVM_ASSERT(parent_gpu->replayable_faults_supported); + + status = uvm_rm_locked_call(nvUvmInterfaceInitFaultInfo(parent_gpu->rm_device, + &parent_gpu->fault_buffer_info.rm_info)); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init fault buffer info from RM: %s, GPU %s\n", + nvstatusToString(status), + parent_gpu->name); + + // nvUvmInterfaceInitFaultInfo may leave fields in rm_info populated + // when it returns an error. Set the buffer handle to zero as it is + // used by the deinitialization logic to determine if it was correctly + // initialized. + parent_gpu->fault_buffer_info.rm_info.faultBufferHandle = 0; + goto fail; + } + + status = fault_buffer_init_replayable_faults(parent_gpu); + if (status != NV_OK) + goto fail; + + if (parent_gpu->non_replayable_faults_supported) { + status = uvm_gpu_fault_buffer_init_non_replayable_faults(parent_gpu); + if (status != NV_OK) + goto fail; + } + + return NV_OK; + +fail: + uvm_gpu_fault_buffer_deinit(parent_gpu); + + return status; +} + +// Reinitialize state relevant to replayable fault handling after returning +// from a power management cycle. +void uvm_gpu_fault_buffer_resume(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT(parent_gpu->replayable_faults_supported); + + fault_buffer_reinit_replayable_faults(parent_gpu); +} + +void uvm_gpu_fault_buffer_deinit(uvm_parent_gpu_t *parent_gpu) +{ + NV_STATUS status = NV_OK; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + if (parent_gpu->non_replayable_faults_supported) + uvm_gpu_fault_buffer_deinit_non_replayable_faults(parent_gpu); + + fault_buffer_deinit_replayable_faults(parent_gpu); + + if (parent_gpu->fault_buffer_info.rm_info.faultBufferHandle) { + status = uvm_rm_locked_call(nvUvmInterfaceOwnPageFaultIntr(parent_gpu->rm_device, NV_FALSE)); + UVM_ASSERT(status == NV_OK); + + uvm_rm_locked_call_void(nvUvmInterfaceDestroyFaultInfo(parent_gpu->rm_device, + &parent_gpu->fault_buffer_info.rm_info)); + + parent_gpu->fault_buffer_info.rm_info.faultBufferHandle = 0; + } +} + +bool uvm_gpu_replayable_faults_pending(uvm_parent_gpu_t *parent_gpu) +{ + uvm_replayable_fault_buffer_info_t *replayable_faults = &parent_gpu->fault_buffer_info.replayable; + + UVM_ASSERT(parent_gpu->replayable_faults_supported); + + // Fast path 1: we left some faults unserviced in the buffer in the last pass + if (replayable_faults->cached_get != replayable_faults->cached_put) + return true; + + // Fast path 2: read the valid bit of the fault buffer entry pointed by the + // cached get pointer + if (!parent_gpu->fault_buffer_hal->entry_is_valid(parent_gpu, replayable_faults->cached_get)) { + // Slow path: read the put pointer from the GPU register via BAR0 + // over PCIe + replayable_faults->cached_put = parent_gpu->fault_buffer_hal->read_put(parent_gpu); + + // No interrupt pending + if (replayable_faults->cached_get == replayable_faults->cached_put) + return false; + } + + return true; +} + +// Push a fault cancel method on the given client. Any failure during this +// operation may lead to application hang (requiring manual Ctrl+C from the +// user) or system crash (requiring reboot). +// In that case we log an error message. +// +// gpc_id and client_id aren't used if global_cancel is true. +// +// This function acquires both the given tracker and the replay tracker +static NV_STATUS push_cancel_on_gpu(uvm_gpu_t *gpu, + uvm_gpu_phys_address_t instance_ptr, + bool global_cancel, + NvU32 gpc_id, + NvU32 client_id, + uvm_tracker_t *tracker) +{ + NV_STATUS status; + uvm_push_t push; + uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable; + + if (global_cancel) { + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &replayable_faults->replay_tracker, + &push, + "Cancel targeting instance_ptr {0x%llx:%s}\n", + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture)); + } else { + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &replayable_faults->replay_tracker, + &push, + "Cancel targeting instance_ptr {0x%llx:%s} gpc %u client %u\n", + instance_ptr.address, + uvm_aperture_string(instance_ptr.aperture), + gpc_id, + client_id); + } + + UVM_ASSERT(status == NV_OK); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to create push and acquire replay tracker before pushing cancel: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + uvm_push_acquire_tracker(&push, tracker); + + if (global_cancel) + gpu->parent->host_hal->cancel_faults_global(&push, instance_ptr); + else + gpu->parent->host_hal->cancel_faults_targeted(&push, instance_ptr, gpc_id, client_id); + + // We don't need to put the cancel in the GPU replay tracker since we wait + // on it immediately. + status = uvm_push_end_and_wait(&push); + + UVM_ASSERT(status == NV_OK); + if (status != NV_OK) + UVM_ERR_PRINT("Failed to wait for pushed cancel: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + + uvm_tracker_clear(&replayable_faults->replay_tracker); + + return status; +} + +static NV_STATUS push_cancel_on_gpu_targeted(uvm_gpu_t *gpu, + uvm_gpu_phys_address_t instance_ptr, + NvU32 gpc_id, + NvU32 client_id, + uvm_tracker_t *tracker) +{ + return push_cancel_on_gpu(gpu, instance_ptr, false, gpc_id, client_id, tracker); +} + +static NV_STATUS push_cancel_on_gpu_global(uvm_gpu_t *gpu, uvm_gpu_phys_address_t instance_ptr, uvm_tracker_t *tracker) +{ + UVM_ASSERT(!gpu->parent->smc.enabled); + + return push_cancel_on_gpu(gpu, instance_ptr, true, 0, 0, tracker); +} + +// Volta implements a targeted VA fault cancel that simplifies the fault cancel +// process. You only need to specify the address, type, and mmu_engine_id for +// the access to be cancelled. Caller must hold the VA space lock for the access +// to be cancelled. +static NV_STATUS cancel_fault_precise_va(uvm_gpu_t *gpu, + uvm_fault_buffer_entry_t *fault_entry, + uvm_fault_cancel_va_mode_t cancel_va_mode) +{ + NV_STATUS status; + uvm_gpu_va_space_t *gpu_va_space; + uvm_gpu_phys_address_t pdb; + uvm_push_t push; + uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable; + NvU64 offset; + + UVM_ASSERT(gpu->parent->replayable_faults_supported); + UVM_ASSERT(fault_entry->fatal_reason != UvmEventFatalReasonInvalid); + UVM_ASSERT(!fault_entry->filtered); + + gpu_va_space = uvm_gpu_va_space_get_by_parent_gpu(fault_entry->va_space, gpu->parent); + UVM_ASSERT(gpu_va_space); + pdb = uvm_page_tree_pdb(&gpu_va_space->page_tables)->addr; + + // Record fatal fault event + uvm_tools_record_gpu_fatal_fault(gpu->id, fault_entry->va_space, fault_entry, fault_entry->fatal_reason); + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &replayable_faults->replay_tracker, + &push, + "Precise cancel targeting PDB {0x%llx:%s} VA 0x%llx VEID %u with access type %s", + pdb.address, + uvm_aperture_string(pdb.aperture), + fault_entry->fault_address, + fault_entry->fault_source.ve_id, + uvm_fault_access_type_string(fault_entry->fault_access_type)); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to create push and acquire replay tracker before pushing cancel: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + // UVM aligns fault addresses to PAGE_SIZE as it is the smallest mapping + // and coherence tracking granularity. However, the cancel method requires + // the original address (4K-aligned) reported in the packet, which is lost + // at this point. Since the access permissions are the same for the whole + // 64K page, we issue a cancel per 4K range to make sure that the HW sees + // the address reported in the packet. + for (offset = 0; offset < PAGE_SIZE; offset += UVM_PAGE_SIZE_4K) { + gpu->parent->host_hal->cancel_faults_va(&push, pdb, fault_entry, cancel_va_mode); + fault_entry->fault_address += UVM_PAGE_SIZE_4K; + } + fault_entry->fault_address = UVM_PAGE_ALIGN_DOWN(fault_entry->fault_address - 1); + + // We don't need to put the cancel in the GPU replay tracker since we wait + // on it immediately. + status = uvm_push_end_and_wait(&push); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to wait for pushed VA global fault cancel: %s, GPU %s\n", + nvstatusToString(status), uvm_gpu_name(gpu)); + } + + uvm_tracker_clear(&replayable_faults->replay_tracker); + + return status; +} + +static NV_STATUS push_replay_on_gpu(uvm_gpu_t *gpu, uvm_fault_replay_type_t type, uvm_fault_service_batch_context_t *batch_context) +{ + NV_STATUS status; + uvm_push_t push; + uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable; + uvm_tracker_t *tracker = NULL; + + if (batch_context) + tracker = &batch_context->tracker; + + status = uvm_push_begin_acquire(gpu->channel_manager, UVM_CHANNEL_TYPE_MEMOPS, tracker, &push, + "Replaying faults"); + if (status != NV_OK) + return status; + + gpu->parent->host_hal->replay_faults(&push, type); + + // Do not count REPLAY_TYPE_START_ACK_ALL's toward the replay count. + // REPLAY_TYPE_START_ACK_ALL's are issued for cancels, and the cancel + // algorithm checks to make sure that no REPLAY_TYPE_START's have been + // issued using batch_context->replays. + if (batch_context && type != UVM_FAULT_REPLAY_TYPE_START_ACK_ALL) { + uvm_tools_broadcast_replay(gpu, &push, batch_context->batch_id, UVM_FAULT_CLIENT_TYPE_GPC); + ++batch_context->num_replays; + } + + uvm_push_end(&push); + + // Add this push to the GPU's replay_tracker so cancel can wait on it. + status = uvm_tracker_add_push_safe(&replayable_faults->replay_tracker, &push); + + if (uvm_procfs_is_debug_enabled()) { + if (type == UVM_FAULT_REPLAY_TYPE_START) + ++replayable_faults->stats.num_replays; + else + ++replayable_faults->stats.num_replays_ack_all; + } + + return status; +} + +static void write_get(uvm_parent_gpu_t *parent_gpu, NvU32 get) +{ + uvm_replayable_fault_buffer_info_t *replayable_faults = &parent_gpu->fault_buffer_info.replayable; + + UVM_ASSERT(uvm_sem_is_locked(&parent_gpu->isr.replayable_faults.service_lock)); + + // Write get on the GPU only if it's changed. + if (replayable_faults->cached_get == get) + return; + + replayable_faults->cached_get = get; + + // Update get pointer on the GPU + parent_gpu->fault_buffer_hal->write_get(parent_gpu, get); +} + +static NV_STATUS fault_buffer_flush_locked(uvm_gpu_t *gpu, + uvm_gpu_buffer_flush_mode_t flush_mode, + uvm_fault_replay_type_t fault_replay, + uvm_fault_service_batch_context_t *batch_context) +{ + NvU32 get; + NvU32 put; + uvm_spin_loop_t spin; + uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable; + + UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.replayable_faults.service_lock)); + UVM_ASSERT(gpu->parent->replayable_faults_supported); + + // Read PUT pointer from the GPU if requested + if (flush_mode == UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT) + replayable_faults->cached_put = gpu->parent->fault_buffer_hal->read_put(gpu->parent); + + get = replayable_faults->cached_get; + put = replayable_faults->cached_put; + + while (get != put) { + // Wait until valid bit is set + UVM_SPIN_WHILE(!gpu->parent->fault_buffer_hal->entry_is_valid(gpu->parent, get), &spin); + + gpu->parent->fault_buffer_hal->entry_clear_valid(gpu->parent, get); + ++get; + if (get == replayable_faults->max_faults) + get = 0; + } + + write_get(gpu->parent, get); + + // Issue fault replay + return push_replay_on_gpu(gpu, fault_replay, batch_context); +} + +NV_STATUS uvm_gpu_fault_buffer_flush(uvm_gpu_t *gpu) +{ + NV_STATUS status = NV_OK; + + UVM_ASSERT(gpu->parent->replayable_faults_supported); + + // Disables replayable fault interrupts and fault servicing + uvm_gpu_replayable_faults_isr_lock(gpu->parent); + + status = fault_buffer_flush_locked(gpu, + UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT, + UVM_FAULT_REPLAY_TYPE_START, + NULL); + + // This will trigger the top half to start servicing faults again, if the + // replay brought any back in + uvm_gpu_replayable_faults_isr_unlock(gpu->parent); + return status; +} + +static inline int cmp_fault_instance_ptr(const uvm_fault_buffer_entry_t *a, + const uvm_fault_buffer_entry_t *b) +{ + int result = uvm_gpu_phys_addr_cmp(a->instance_ptr, b->instance_ptr); + // On Volta+ we need to sort by {instance_ptr + subctx_id} pair since it can + // map to a different VA space + if (result != 0) + return result; + return UVM_CMP_DEFAULT(a->fault_source.ve_id, b->fault_source.ve_id); +} + +// Compare two VA spaces +static inline int cmp_va_space(const uvm_va_space_t *a, const uvm_va_space_t *b) +{ + return UVM_CMP_DEFAULT(a, b); +} + +// Compare two virtual addresses +static inline int cmp_addr(NvU64 a, NvU64 b) +{ + return UVM_CMP_DEFAULT(a, b); +} + +// Compare two fault access types +static inline int cmp_access_type(uvm_fault_access_type_t a, uvm_fault_access_type_t b) +{ + UVM_ASSERT(a >= 0 && a < UVM_FAULT_ACCESS_TYPE_COUNT); + UVM_ASSERT(b >= 0 && b < UVM_FAULT_ACCESS_TYPE_COUNT); + + // Check that fault access type enum values are ordered by "intrusiveness" + BUILD_BUG_ON(UVM_FAULT_ACCESS_TYPE_ATOMIC_STRONG <= UVM_FAULT_ACCESS_TYPE_ATOMIC_WEAK); + BUILD_BUG_ON(UVM_FAULT_ACCESS_TYPE_ATOMIC_WEAK <= UVM_FAULT_ACCESS_TYPE_WRITE); + BUILD_BUG_ON(UVM_FAULT_ACCESS_TYPE_WRITE <= UVM_FAULT_ACCESS_TYPE_READ); + BUILD_BUG_ON(UVM_FAULT_ACCESS_TYPE_READ <= UVM_FAULT_ACCESS_TYPE_PREFETCH); + + return b - a; +} + +typedef enum +{ + // Fetch a batch of faults from the buffer. + FAULT_FETCH_MODE_BATCH_ALL, + + // Fetch a batch of faults from the buffer. Stop at the first entry that is + // not ready yet + FAULT_FETCH_MODE_BATCH_READY, + + // Fetch all faults in the buffer before PUT. Wait for all faults to become + // ready + FAULT_FETCH_MODE_ALL, +} fault_fetch_mode_t; + +static void fetch_fault_buffer_merge_entry(uvm_fault_buffer_entry_t *current_entry, + uvm_fault_buffer_entry_t *last_entry) +{ + UVM_ASSERT(last_entry->num_instances > 0); + + ++last_entry->num_instances; + uvm_fault_access_type_mask_set(&last_entry->access_type_mask, current_entry->fault_access_type); + + if (current_entry->fault_access_type > last_entry->fault_access_type) { + // If the new entry has a higher access type, it becomes the + // fault to be serviced. Add the previous one to the list of instances + current_entry->access_type_mask = last_entry->access_type_mask; + current_entry->num_instances = last_entry->num_instances; + last_entry->filtered = true; + + // We only merge faults from different uTLBs if the new fault has an + // access type with the same or lower level of intrusiveness. + UVM_ASSERT(current_entry->fault_source.utlb_id == last_entry->fault_source.utlb_id); + + list_replace(&last_entry->merged_instances_list, ¤t_entry->merged_instances_list); + list_add(&last_entry->merged_instances_list, ¤t_entry->merged_instances_list); + } + else { + // Add the new entry to the list of instances for reporting purposes + current_entry->filtered = true; + list_add(¤t_entry->merged_instances_list, &last_entry->merged_instances_list); + } +} + +static bool fetch_fault_buffer_try_merge_entry(uvm_fault_buffer_entry_t *current_entry, + uvm_fault_service_batch_context_t *batch_context, + uvm_fault_utlb_info_t *current_tlb, + bool is_same_instance_ptr) +{ + uvm_fault_buffer_entry_t *last_tlb_entry = current_tlb->last_fault; + uvm_fault_buffer_entry_t *last_global_entry = batch_context->last_fault; + + // Check the last coalesced fault and the coalesced fault that was + // originated from this uTLB + const bool is_last_tlb_fault = current_tlb->num_pending_faults > 0 && + cmp_fault_instance_ptr(current_entry, last_tlb_entry) == 0 && + current_entry->fault_address == last_tlb_entry->fault_address; + + // We only merge faults from different uTLBs if the new fault has an + // access type with the same or lower level of intrusiveness. This is to + // avoid having to update num_pending_faults on both uTLBs and recomputing + // last_fault. + const bool is_last_fault = is_same_instance_ptr && + current_entry->fault_address == last_global_entry->fault_address && + current_entry->fault_access_type <= last_global_entry->fault_access_type; + + if (is_last_tlb_fault) { + fetch_fault_buffer_merge_entry(current_entry, last_tlb_entry); + if (current_entry->fault_access_type > last_tlb_entry->fault_access_type) + current_tlb->last_fault = current_entry; + + return true; + } + else if (is_last_fault) { + fetch_fault_buffer_merge_entry(current_entry, last_global_entry); + if (current_entry->fault_access_type > last_global_entry->fault_access_type) + batch_context->last_fault = current_entry; + + return true; + } + + return false; +} + +// Fetch entries from the fault buffer, decode them and store them in the batch +// context. We implement the fetch modes described above. +// +// When possible, we coalesce duplicate entries to minimize the fault handling +// overhead. Basically, we merge faults with the same instance pointer and page +// virtual address. We keep track of the last fault per uTLB to detect +// duplicates due to local reuse and the last fault in the whole batch to +// detect reuse across CTAs. +// +// We will service the first fault entry with the most "intrusive" (atomic > +// write > read > prefetch) access type*. That fault entry is called the +// "representative". The rest of filtered faults have the "filtered" flag set +// and are added to a list in the representative fault entry for reporting +// purposes. The representative fault entry also contains a mask with all the +// access types that produced a fault on the page. +// +// *We only merge faults from different uTLBs if the new fault has an access +// type with the same or lower level of intrusiveness. +// +// This optimization cannot be performed during fault cancel on Pascal GPUs +// (fetch_mode == FAULT_FETCH_MODE_ALL) since we need accurate tracking of all +// the faults in each uTLB in order to guarantee precise fault attribution. +static void fetch_fault_buffer_entries(uvm_gpu_t *gpu, + uvm_fault_service_batch_context_t *batch_context, + fault_fetch_mode_t fetch_mode) +{ + NvU32 get; + NvU32 put; + NvU32 fault_index; + NvU32 num_coalesced_faults; + NvU32 utlb_id; + uvm_fault_buffer_entry_t *fault_cache; + uvm_spin_loop_t spin; + uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable; + const bool in_pascal_cancel_path = (!gpu->parent->fault_cancel_va_supported && fetch_mode == FAULT_FETCH_MODE_ALL); + const bool may_filter = uvm_perf_fault_coalesce && !in_pascal_cancel_path; + + UVM_ASSERT(uvm_sem_is_locked(&gpu->parent->isr.replayable_faults.service_lock)); + UVM_ASSERT(gpu->parent->replayable_faults_supported); + + fault_cache = batch_context->fault_cache; + + get = replayable_faults->cached_get; + + // Read put pointer from GPU and cache it + if (get == replayable_faults->cached_put) + replayable_faults->cached_put = gpu->parent->fault_buffer_hal->read_put(gpu->parent); + + put = replayable_faults->cached_put; + + batch_context->is_single_instance_ptr = true; + batch_context->last_fault = NULL; + + fault_index = 0; + num_coalesced_faults = 0; + + // Clear uTLB counters + for (utlb_id = 0; utlb_id <= batch_context->max_utlb_id; ++utlb_id) { + batch_context->utlbs[utlb_id].num_pending_faults = 0; + batch_context->utlbs[utlb_id].has_fatal_faults = false; + } + batch_context->max_utlb_id = 0; + + if (get == put) + goto done; + + // Parse until get != put and have enough space to cache. + while ((get != put) && + (fetch_mode == FAULT_FETCH_MODE_ALL || fault_index < gpu->parent->fault_buffer_info.max_batch_size)) { + bool is_same_instance_ptr = true; + uvm_fault_buffer_entry_t *current_entry = &fault_cache[fault_index]; + uvm_fault_utlb_info_t *current_tlb; + + // We cannot just wait for the last entry (the one pointed by put) to + // become valid, we have to do it individually since entries can be + // written out of order + UVM_SPIN_WHILE(!gpu->parent->fault_buffer_hal->entry_is_valid(gpu->parent, get), &spin) { + // We have some entry to work on. Let's do the rest later. + if (fetch_mode != FAULT_FETCH_MODE_ALL && + fetch_mode != FAULT_FETCH_MODE_BATCH_ALL && + fault_index > 0) + goto done; + } + + // Prevent later accesses being moved above the read of the valid bit + smp_mb__after_atomic(); + + // Got valid bit set. Let's cache. + gpu->parent->fault_buffer_hal->parse_entry(gpu->parent, get, current_entry); + + // The GPU aligns the fault addresses to 4k, but all of our tracking is + // done in PAGE_SIZE chunks which might be larger. + current_entry->fault_address = UVM_PAGE_ALIGN_DOWN(current_entry->fault_address); + + // Make sure that all fields in the entry are properly initialized + current_entry->is_fatal = (current_entry->fault_type >= UVM_FAULT_TYPE_FATAL); + + if (current_entry->is_fatal) { + // Record the fatal fault event later as we need the va_space locked + current_entry->fatal_reason = UvmEventFatalReasonInvalidFaultType; + } + else { + current_entry->fatal_reason = UvmEventFatalReasonInvalid; + } + + current_entry->va_space = NULL; + current_entry->filtered = false; + + if (current_entry->fault_source.utlb_id > batch_context->max_utlb_id) { + UVM_ASSERT(current_entry->fault_source.utlb_id < replayable_faults->utlb_count); + batch_context->max_utlb_id = current_entry->fault_source.utlb_id; + } + + current_tlb = &batch_context->utlbs[current_entry->fault_source.utlb_id]; + + if (fault_index > 0) { + UVM_ASSERT(batch_context->last_fault); + is_same_instance_ptr = cmp_fault_instance_ptr(current_entry, batch_context->last_fault) == 0; + + // Coalesce duplicate faults when possible + if (may_filter && !current_entry->is_fatal) { + bool merged = fetch_fault_buffer_try_merge_entry(current_entry, + batch_context, + current_tlb, + is_same_instance_ptr); + if (merged) + goto next_fault; + } + } + + if (batch_context->is_single_instance_ptr && !is_same_instance_ptr) + batch_context->is_single_instance_ptr = false; + + current_entry->num_instances = 1; + current_entry->access_type_mask = uvm_fault_access_type_mask_bit(current_entry->fault_access_type); + INIT_LIST_HEAD(¤t_entry->merged_instances_list); + + ++current_tlb->num_pending_faults; + current_tlb->last_fault = current_entry; + batch_context->last_fault = current_entry; + + ++num_coalesced_faults; + + next_fault: + ++fault_index; + ++get; + if (get == replayable_faults->max_faults) + get = 0; + } + +done: + write_get(gpu->parent, get); + + batch_context->num_cached_faults = fault_index; + batch_context->num_coalesced_faults = num_coalesced_faults; +} + +// Sort comparator for pointers to fault buffer entries that sorts by +// instance pointer +static int cmp_sort_fault_entry_by_instance_ptr(const void *_a, const void *_b) +{ + const uvm_fault_buffer_entry_t **a = (const uvm_fault_buffer_entry_t **)_a; + const uvm_fault_buffer_entry_t **b = (const uvm_fault_buffer_entry_t **)_b; + + return cmp_fault_instance_ptr(*a, *b); +} + +// Sort comparator for pointers to fault buffer entries that sorts by va_space, +// fault address and fault access type +static int cmp_sort_fault_entry_by_va_space_address_access_type(const void *_a, const void *_b) +{ + const uvm_fault_buffer_entry_t **a = (const uvm_fault_buffer_entry_t **)_a; + const uvm_fault_buffer_entry_t **b = (const uvm_fault_buffer_entry_t **)_b; + + int result; + + result = cmp_va_space((*a)->va_space, (*b)->va_space); + if (result != 0) + return result; + + result = cmp_addr((*a)->fault_address, (*b)->fault_address); + if (result != 0) + return result; + + return cmp_access_type((*a)->fault_access_type, (*b)->fault_access_type); +} + +// Translate all instance pointers to VA spaces. Since the buffer is ordered by +// instance_ptr, we minimize the number of translations +// +// This function returns NV_WARN_MORE_PROCESSING_REQUIRED if a fault buffer +// flush occurred and executed successfully, or the error code if it failed. +// NV_OK otherwise. +static NV_STATUS translate_instance_ptrs(uvm_gpu_t *gpu, + uvm_fault_service_batch_context_t *batch_context) +{ + NvU32 i; + NV_STATUS status; + + for (i = 0; i < batch_context->num_coalesced_faults; ++i) { + uvm_fault_buffer_entry_t *current_entry; + + current_entry = batch_context->ordered_fault_cache[i]; + + // If this instance pointer matches the previous instance pointer, just + // copy over the already-translated va_space and move on. + if (i != 0 && cmp_fault_instance_ptr(current_entry, batch_context->ordered_fault_cache[i - 1]) == 0) { + current_entry->va_space = batch_context->ordered_fault_cache[i - 1]->va_space; + continue; + } + + status = uvm_gpu_fault_entry_to_va_space(gpu, current_entry, ¤t_entry->va_space); + if (status != NV_OK) { + if (status == NV_ERR_PAGE_TABLE_NOT_AVAIL) { + // The channel is valid but the subcontext is not. This can only + // happen if the subcontext is torn down before its work is + // complete while other subcontexts in the same TSG are still + // executing. This is a violation of the programming model. We + // have limited options since the VA space is gone, meaning we + // can't target the PDB for cancel even if we wanted to. So + // we'll just throw away precise attribution and cancel this + // fault using the SW method, which validates that the intended + // context (TSG) is still running so we don't cancel an innocent + // context. + UVM_ASSERT(!current_entry->va_space); + UVM_ASSERT(gpu->max_subcontexts > 0); + + if (gpu->parent->smc.enabled) { + status = push_cancel_on_gpu_targeted(gpu, + current_entry->instance_ptr, + current_entry->fault_source.gpc_id, + current_entry->fault_source.client_id, + &batch_context->tracker); + } + else { + status = push_cancel_on_gpu_global(gpu, current_entry->instance_ptr, &batch_context->tracker); + } + + if (status != NV_OK) + return status; + + // Fall through and let the flush restart fault processing + } + else { + UVM_ASSERT(status == NV_ERR_INVALID_CHANNEL); + } + + // If the channel is gone then we're looking at a stale fault entry. + // The fault must have been resolved already (serviced or + // cancelled), so we can just flush the fault buffer. + status = fault_buffer_flush_locked(gpu, + UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT, + UVM_FAULT_REPLAY_TYPE_START, + batch_context); + if (status != NV_OK) + return status; + + return NV_WARN_MORE_PROCESSING_REQUIRED; + } + else { + UVM_ASSERT(current_entry->va_space); + } + } + + return NV_OK; +} + +// Fault cache preprocessing for fault coalescing +// +// This function generates an ordered view of the given fault_cache in which +// faults are sorted by VA space, fault address (aligned to 4K) and access type +// "intrusiveness". In order to minimize the number of instance_ptr to VA space +// translations we perform a first sort by instance_ptr. +// +// This function returns NV_WARN_MORE_PROCESSING_REQUIRED if a fault buffer +// flush occurred during instance_ptr translation and executed successfully, or +// the error code if it failed. NV_OK otherwise. +// +// Current scheme: +// 1) sort by instance_ptr +// 2) translate all instance_ptrs to VA spaces +// 3) sort by va_space, fault address (fault_address is page-aligned at this +// point) and access type +static NV_STATUS preprocess_fault_batch(uvm_gpu_t *gpu, uvm_fault_service_batch_context_t *batch_context) +{ + NV_STATUS status; + NvU32 i, j; + uvm_fault_buffer_entry_t **ordered_fault_cache = batch_context->ordered_fault_cache; + + UVM_ASSERT(batch_context->num_coalesced_faults > 0); + UVM_ASSERT(batch_context->num_cached_faults >= batch_context->num_coalesced_faults); + + // Generate an ordered view of the fault cache in ordered_fault_cache. + // We sort the pointers, not the entries in fault_cache + + // Initialize pointers before they are sorted. We only sort one instance per + // coalesced fault + for (i = 0, j = 0; i < batch_context->num_cached_faults; ++i) { + if (!batch_context->fault_cache[i].filtered) + ordered_fault_cache[j++] = &batch_context->fault_cache[i]; + } + UVM_ASSERT(j == batch_context->num_coalesced_faults); + + // 1) if the fault batch contains more than one, sort by instance_ptr + if (!batch_context->is_single_instance_ptr) { + sort(ordered_fault_cache, + batch_context->num_coalesced_faults, + sizeof(*ordered_fault_cache), + cmp_sort_fault_entry_by_instance_ptr, + NULL); + } + + // 2) translate all instance_ptrs to VA spaces + status = translate_instance_ptrs(gpu, batch_context); + if (status != NV_OK) + return status; + + // 3) sort by va_space, fault address (GPU already reports 4K-aligned + // address) and access type + sort(ordered_fault_cache, + batch_context->num_coalesced_faults, + sizeof(*ordered_fault_cache), + cmp_sort_fault_entry_by_va_space_address_access_type, + NULL); + + return NV_OK; +} + +// This function computes the maximum access type that can be serviced for the +// reported fault instances given the logical permissions of the VA range. If +// none of the fault instances can be serviced UVM_FAULT_ACCESS_TYPE_COUNT is +// returned instead. +// +// In the case that there are faults that cannot be serviced, this function +// also sets the flags required for fault cancellation. Prefetch faults do not +// need to be cancelled since they disappear on replay. +// +// The UVM driver considers two scenarios for logical permissions violation: +// - All access types are invalid. For example, when faulting from a processor +// that doesn't have access to the preferred location of a range group when it +// is not migratable. In this case all accesses to the page must be cancelled. +// - Write/atomic accesses are invalid. Basically, when trying to modify a +// read-only VA range. In this case we restrict fault cancelling to those types +// of accesses. +// +// Return values: +// - service_access_type: highest access type that can be serviced. +static uvm_fault_access_type_t check_fault_access_permissions(uvm_gpu_t *gpu, + uvm_va_block_t *va_block, + uvm_fault_buffer_entry_t *fault_entry, + bool allow_migration) +{ + NV_STATUS perm_status; + + perm_status = uvm_va_range_check_logical_permissions(va_block->va_range, + gpu->id, + fault_entry->fault_access_type, + allow_migration); + if (perm_status == NV_OK) + return fault_entry->fault_access_type; + + if (fault_entry->fault_access_type == UVM_FAULT_ACCESS_TYPE_PREFETCH) { + fault_entry->is_invalid_prefetch = true; + return UVM_FAULT_ACCESS_TYPE_COUNT; + } + + // At this point we know that some fault instances cannot be serviced + fault_entry->is_fatal = true; + fault_entry->fatal_reason = uvm_tools_status_to_fatal_fault_reason(perm_status); + + if (fault_entry->fault_access_type > UVM_FAULT_ACCESS_TYPE_READ) { + fault_entry->replayable.cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_WRITE_AND_ATOMIC; + + // If there are pending read accesses on the same page, we have to + // service them before we can cancel the write/atomic faults. So we + // retry with read fault access type. + if (uvm_fault_access_type_mask_test(fault_entry->access_type_mask, UVM_FAULT_ACCESS_TYPE_READ)) { + perm_status = uvm_va_range_check_logical_permissions(va_block->va_range, + gpu->id, + UVM_FAULT_ACCESS_TYPE_READ, + allow_migration); + if (perm_status == NV_OK) + return UVM_FAULT_ACCESS_TYPE_READ; + + // If that didn't succeed, cancel all faults + fault_entry->replayable.cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL; + fault_entry->fatal_reason = uvm_tools_status_to_fatal_fault_reason(perm_status); + } + } + else { + fault_entry->replayable.cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL; + } + + return UVM_FAULT_ACCESS_TYPE_COUNT; +} + +// We notify the fault event for all faults within the block so that the +// performance heuristics are updated. Then, all required actions for the block +// data are performed by the performance heuristics code. +// +// Fatal faults are flagged as fatal for later cancellation. Servicing is not +// interrupted on fatal faults due to insufficient permissions or invalid +// addresses. +// +// Return codes: +// - NV_OK if all faults were handled (both fatal and non-fatal) +// - NV_ERR_MORE_PROCESSING_REQUIRED if servicing needs allocation retry +// - NV_ERR_NO_MEMORY if the faults could not be serviced due to OOM +// - Any other value is a UVM-global error +static NV_STATUS service_batch_managed_faults_in_block_locked(uvm_gpu_t *gpu, + uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + NvU32 first_fault_index, + uvm_fault_service_batch_context_t *batch_context, + NvU32 *block_faults) +{ + NV_STATUS status = NV_OK; + NvU32 i; + uvm_page_index_t first_page_index; + uvm_page_index_t last_page_index; + NvU32 page_fault_count = 0; + uvm_range_group_range_iter_t iter; + uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable; + uvm_fault_buffer_entry_t **ordered_fault_cache = batch_context->ordered_fault_cache; + uvm_service_block_context_t *block_context = &replayable_faults->block_service_context; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + NvU64 end; + + // Check that all uvm_fault_access_type_t values can fit into an NvU8 + BUILD_BUG_ON(UVM_FAULT_ACCESS_TYPE_COUNT > (int)(NvU8)-1); + + uvm_assert_mutex_locked(&va_block->lock); + + *block_faults = 0; + + first_page_index = PAGES_PER_UVM_VA_BLOCK; + last_page_index = 0; + + // Initialize fault service block context + uvm_processor_mask_zero(&block_context->resident_processors); + block_context->thrashing_pin_count = 0; + block_context->read_duplicate_count = 0; + + uvm_range_group_range_migratability_iter_first(va_space, va_block->start, va_block->end, &iter); + + // The first entry is guaranteed to fall within this block + UVM_ASSERT(ordered_fault_cache[first_fault_index]->va_space == va_space); + UVM_ASSERT(ordered_fault_cache[first_fault_index]->fault_address >= va_block->start); + UVM_ASSERT(ordered_fault_cache[first_fault_index]->fault_address <= va_block->end); + + end = va_block->end; + if (uvm_va_block_is_hmm(va_block)) + uvm_hmm_find_policy_end(va_block, + &block_context->block_context, + ordered_fault_cache[first_fault_index]->fault_address, + &end); + else + block_context->block_context.policy = uvm_va_range_get_policy(va_block->va_range); + + // Scan the sorted array and notify the fault event for all fault entries + // in the block + for (i = first_fault_index; + i < batch_context->num_coalesced_faults && + ordered_fault_cache[i]->va_space == va_space && + ordered_fault_cache[i]->fault_address <= end; + ++i) { + uvm_fault_buffer_entry_t *current_entry = ordered_fault_cache[i]; + const uvm_fault_buffer_entry_t *previous_entry = NULL; + bool read_duplicate; + uvm_processor_id_t new_residency; + uvm_perf_thrashing_hint_t thrashing_hint; + uvm_page_index_t page_index = uvm_va_block_cpu_page_index(va_block, current_entry->fault_address); + bool is_duplicate = false; + uvm_fault_access_type_t service_access_type; + NvU32 service_access_type_mask; + + UVM_ASSERT(current_entry->fault_access_type == + uvm_fault_access_type_mask_highest(current_entry->access_type_mask)); + + current_entry->is_fatal = false; + current_entry->is_throttled = false; + current_entry->is_invalid_prefetch = false; + + if (i > first_fault_index) { + previous_entry = ordered_fault_cache[i - 1]; + is_duplicate = current_entry->fault_address == previous_entry->fault_address; + } + + if (block_context->num_retries == 0) { + uvm_perf_event_notify_gpu_fault(&va_space->perf_events, + va_block, + gpu->id, + block_context->block_context.policy->preferred_location, + current_entry, + batch_context->batch_id, + is_duplicate); + } + + // Service the most intrusive fault per page, only. Waive the rest + if (is_duplicate) { + // Propagate the is_invalid_prefetch flag across all prefetch + // faults on the page + current_entry->is_invalid_prefetch = previous_entry->is_invalid_prefetch; + + // If a page is throttled, all faults on the page must be skipped + current_entry->is_throttled = previous_entry->is_throttled; + + // The previous fault was non-fatal so the page has been already + // serviced + if (!previous_entry->is_fatal) + goto next; + } + + // Ensure that the migratability iterator covers the current fault + // address + while (iter.end < current_entry->fault_address) + uvm_range_group_range_migratability_iter_next(va_space, &iter, va_block->end); + + UVM_ASSERT(iter.start <= current_entry->fault_address && iter.end >= current_entry->fault_address); + + service_access_type = check_fault_access_permissions(gpu, va_block, current_entry, iter.migratable); + + // Do not exit early due to logical errors such as access permission + // violation. + if (service_access_type == UVM_FAULT_ACCESS_TYPE_COUNT) + goto next; + + if (service_access_type != current_entry->fault_access_type) { + // Some of the fault instances cannot be serviced due to invalid + // access permissions. Recompute the access type service mask to + // service the rest. + UVM_ASSERT(service_access_type < current_entry->fault_access_type); + service_access_type_mask = uvm_fault_access_type_mask_bit(service_access_type); + } + else { + service_access_type_mask = current_entry->access_type_mask; + } + + // If the GPU already has the necessary access permission, the fault + // does not need to be serviced + if (uvm_va_block_page_is_gpu_authorized(va_block, + page_index, + gpu->id, + uvm_fault_access_type_to_prot(service_access_type))) + goto next; + + thrashing_hint = uvm_perf_thrashing_get_hint(va_block, current_entry->fault_address, gpu->id); + if (thrashing_hint.type == UVM_PERF_THRASHING_HINT_TYPE_THROTTLE) { + // Throttling is implemented by sleeping in the fault handler on + // the CPU and by continuing to process faults on other pages on + // the GPU + current_entry->is_throttled = true; + goto next; + } + else if (thrashing_hint.type == UVM_PERF_THRASHING_HINT_TYPE_PIN) { + if (block_context->thrashing_pin_count++ == 0) + uvm_page_mask_zero(&block_context->thrashing_pin_mask); + + uvm_page_mask_set(&block_context->thrashing_pin_mask, page_index); + } + + // Compute new residency and update the masks + new_residency = uvm_va_block_select_residency(va_block, + page_index, + gpu->id, + service_access_type_mask, + block_context->block_context.policy, + &thrashing_hint, + UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS, + &read_duplicate); + + if (!uvm_processor_mask_test_and_set(&block_context->resident_processors, new_residency)) + uvm_page_mask_zero(&block_context->per_processor_masks[uvm_id_value(new_residency)].new_residency); + + uvm_page_mask_set(&block_context->per_processor_masks[uvm_id_value(new_residency)].new_residency, page_index); + + if (read_duplicate) { + if (block_context->read_duplicate_count++ == 0) + uvm_page_mask_zero(&block_context->read_duplicate_mask); + + uvm_page_mask_set(&block_context->read_duplicate_mask, page_index); + } + + ++page_fault_count; + + block_context->access_type[page_index] = service_access_type; + + if (page_index < first_page_index) + first_page_index = page_index; + if (page_index > last_page_index) + last_page_index = page_index; + + next: + // Only update counters the first time since logical permissions cannot + // change while we hold the VA space lock + // TODO: Bug 1750144: That might not be true with HMM. + if (block_context->num_retries == 0) { + uvm_fault_utlb_info_t *utlb = &batch_context->utlbs[current_entry->fault_source.utlb_id]; + + if (current_entry->is_invalid_prefetch) + batch_context->num_invalid_prefetch_faults += current_entry->num_instances; + + if (is_duplicate) + batch_context->num_duplicate_faults += current_entry->num_instances; + else + batch_context->num_duplicate_faults += current_entry->num_instances - 1; + + if (current_entry->is_throttled) + batch_context->has_throttled_faults = true; + + if (current_entry->is_fatal) { + utlb->has_fatal_faults = true; + batch_context->has_fatal_faults = true; + } + } + } + + // Apply the changes computed in the fault service block context, if there + // are pages to be serviced + if (page_fault_count > 0) { + block_context->region = uvm_va_block_region(first_page_index, last_page_index + 1); + status = uvm_va_block_service_locked(gpu->id, va_block, va_block_retry, block_context); + } + + *block_faults = i - first_fault_index; + + ++block_context->num_retries; + + if (status == NV_OK && batch_context->has_fatal_faults) + status = uvm_va_block_set_cancel(va_block, &block_context->block_context, gpu); + + return status; +} + +// We notify the fault event for all faults within the block so that the +// performance heuristics are updated. The VA block lock is taken for the whole +// fault servicing although it might be temporarily dropped and re-taken if +// memory eviction is required. +// +// See the comments for function service_fault_batch_block_locked for +// implementation details and error codes. +static NV_STATUS service_batch_managed_faults_in_block(uvm_gpu_t *gpu, + struct mm_struct *mm, + uvm_va_block_t *va_block, + NvU32 first_fault_index, + uvm_fault_service_batch_context_t *batch_context, + NvU32 *block_faults) +{ + NV_STATUS status; + uvm_va_block_retry_t va_block_retry; + NV_STATUS tracker_status; + uvm_service_block_context_t *fault_block_context = &gpu->parent->fault_buffer_info.replayable.block_service_context; + + fault_block_context->operation = UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS; + fault_block_context->num_retries = 0; + fault_block_context->block_context.mm = mm; + + uvm_mutex_lock(&va_block->lock); + + status = UVM_VA_BLOCK_RETRY_LOCKED(va_block, &va_block_retry, + service_batch_managed_faults_in_block_locked(gpu, + va_block, + &va_block_retry, + first_fault_index, + batch_context, + block_faults)); + + tracker_status = uvm_tracker_add_tracker_safe(&batch_context->tracker, &va_block->tracker); + + uvm_mutex_unlock(&va_block->lock); + + return status == NV_OK? tracker_status: status; +} + +typedef enum +{ + // Use this mode when calling from the normal fault servicing path + FAULT_SERVICE_MODE_REGULAR, + + // Use this mode when servicing faults from the fault cancelling algorithm. + // In this mode no replays are issued + FAULT_SERVICE_MODE_CANCEL, +} fault_service_mode_t; + +static NV_STATUS service_non_managed_fault(uvm_fault_buffer_entry_t *current_entry, + const uvm_fault_buffer_entry_t *previous_entry, + NV_STATUS lookup_status, + uvm_gpu_va_space_t *gpu_va_space, + struct mm_struct *mm, + uvm_fault_service_batch_context_t *batch_context, + uvm_ats_fault_invalidate_t *ats_invalidate, + uvm_fault_utlb_info_t *utlb) +{ + NV_STATUS status = lookup_status; + bool is_duplicate = false; + UVM_ASSERT(utlb->num_pending_faults > 0); + UVM_ASSERT(lookup_status != NV_OK); + + if (previous_entry) { + is_duplicate = (current_entry->va_space == previous_entry->va_space) && + (current_entry->fault_address == previous_entry->fault_address); + + if (is_duplicate) { + // Propagate the is_invalid_prefetch flag across all prefetch faults + // on the page + if (previous_entry->is_invalid_prefetch) + current_entry->is_invalid_prefetch = true; + + // If a page is throttled, all faults on the page must be skipped + if (previous_entry->is_throttled) + current_entry->is_throttled = true; + } + } + + // Generate fault events for all fault packets + uvm_perf_event_notify_gpu_fault(¤t_entry->va_space->perf_events, + NULL, + gpu_va_space->gpu->id, + UVM_ID_INVALID, + current_entry, + batch_context->batch_id, + is_duplicate); + + if (status != NV_ERR_INVALID_ADDRESS) + return status; + + if (uvm_ats_can_service_faults(gpu_va_space, mm)) { + // The VA isn't managed. See if ATS knows about it, unless it is a + // duplicate and the previous fault was non-fatal so the page has + // already been serviced + if (!is_duplicate || previous_entry->is_fatal) + status = uvm_ats_service_fault_entry(gpu_va_space, current_entry, ats_invalidate); + else + status = NV_OK; + } + else { + // If the VA block cannot be found, set the fatal fault flag, + // unless it is a prefetch fault + if (current_entry->fault_access_type == UVM_FAULT_ACCESS_TYPE_PREFETCH) { + current_entry->is_invalid_prefetch = true; + } + else { + current_entry->is_fatal = true; + current_entry->fatal_reason = uvm_tools_status_to_fatal_fault_reason(status); + current_entry->replayable.cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL; + } + + // Do not fail due to logical errors + status = NV_OK; + } + + if (is_duplicate) + batch_context->num_duplicate_faults += current_entry->num_instances; + else + batch_context->num_duplicate_faults += current_entry->num_instances - 1; + + if (current_entry->is_invalid_prefetch) + batch_context->num_invalid_prefetch_faults += current_entry->num_instances; + + if (current_entry->is_fatal) { + utlb->has_fatal_faults = true; + batch_context->has_fatal_faults = true; + } + + if (current_entry->is_throttled) + batch_context->has_throttled_faults = true; + + return status; +} + +// Scan the ordered view of faults and group them by different va_blocks. +// Service faults for each va_block, in batch. +// +// This function returns NV_WARN_MORE_PROCESSING_REQUIRED if the fault buffer +// was flushed because the needs_fault_buffer_flush flag was set on some GPU VA +// space +static NV_STATUS service_fault_batch(uvm_gpu_t *gpu, + fault_service_mode_t service_mode, + uvm_fault_service_batch_context_t *batch_context) +{ + NV_STATUS status = NV_OK; + NvU32 i; + uvm_va_space_t *va_space = NULL; + uvm_gpu_va_space_t *gpu_va_space = NULL; + uvm_ats_fault_invalidate_t *ats_invalidate = &gpu->parent->fault_buffer_info.replayable.ats_invalidate; + const bool replay_per_va_block = service_mode != FAULT_SERVICE_MODE_CANCEL && + gpu->parent->fault_buffer_info.replayable.replay_policy == UVM_PERF_FAULT_REPLAY_POLICY_BLOCK; + struct mm_struct *mm = NULL; + uvm_va_block_context_t *va_block_context = + &gpu->parent->fault_buffer_info.replayable.block_service_context.block_context; + + UVM_ASSERT(gpu->parent->replayable_faults_supported); + + ats_invalidate->write_faults_in_batch = false; + + for (i = 0; i < batch_context->num_coalesced_faults;) { + uvm_va_block_t *va_block; + NvU32 block_faults; + uvm_fault_buffer_entry_t *current_entry = batch_context->ordered_fault_cache[i]; + uvm_fault_utlb_info_t *utlb = &batch_context->utlbs[current_entry->fault_source.utlb_id]; + + UVM_ASSERT(current_entry->va_space); + + if (current_entry->va_space != va_space) { + // Fault on a different va_space, drop the lock of the old one... + if (va_space != NULL) { + // TLB entries are invalidated per GPU VA space + status = uvm_ats_invalidate_tlbs(gpu_va_space, ats_invalidate, &batch_context->tracker); + if (status != NV_OK) + goto fail; + + uvm_va_space_up_read(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + mm = NULL; + } + + va_space = current_entry->va_space; + + // ... and take the lock of the new one + + // If an mm is registered with the VA space, we have to retain it + // in order to lock it before locking the VA space. It is guaranteed + // to remain valid until we release. If no mm is registered, we + // can only service managed faults, not ATS/HMM faults. + mm = uvm_va_space_mm_retain_lock(va_space); + + uvm_va_space_down_read(va_space); + + gpu_va_space = uvm_gpu_va_space_get_by_parent_gpu(va_space, gpu->parent); + if (gpu_va_space && gpu_va_space->needs_fault_buffer_flush) { + // flush if required and clear the flush flag + status = fault_buffer_flush_locked(gpu, + UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT, + UVM_FAULT_REPLAY_TYPE_START, + batch_context); + gpu_va_space->needs_fault_buffer_flush = false; + + if (status == NV_OK) + status = NV_WARN_MORE_PROCESSING_REQUIRED; + + break; + } + + // The case where there is no valid GPU VA space for the GPU in this + // VA space is handled next + } + + // Some faults could be already fatal if they cannot be handled by + // the UVM driver + if (current_entry->is_fatal) { + ++i; + batch_context->has_fatal_faults = true; + utlb->has_fatal_faults = true; + UVM_ASSERT(utlb->num_pending_faults > 0); + continue; + } + + if (!uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, gpu->parent->id)) { + // If there is no GPU VA space for the GPU, ignore the fault. This + // can happen if a GPU VA space is destroyed without explicitly + // freeing all memory ranges (destroying the VA range triggers a + // flush of the fault buffer) and there are stale entries in the + // buffer that got fixed by the servicing in a previous batch. + ++i; + continue; + } + + // TODO: Bug 2103669: Service more than one ATS fault at a time so we + // don't do an unconditional VA range lookup for every ATS fault. + status = uvm_va_block_find_create(va_space, + mm, + current_entry->fault_address, + va_block_context, + &va_block); + if (status == NV_OK) { + status = service_batch_managed_faults_in_block(gpu_va_space->gpu, + mm, + va_block, + i, + batch_context, + &block_faults); + + // When service_batch_managed_faults_in_block returns != NV_OK + // something really bad happened + if (status != NV_OK) + goto fail; + + i += block_faults; + } + else { + const uvm_fault_buffer_entry_t *previous_entry = i == 0? NULL : batch_context->ordered_fault_cache[i - 1]; + + status = service_non_managed_fault(current_entry, + previous_entry, + status, + gpu_va_space, + mm, + batch_context, + ats_invalidate, + utlb); + + // When service_non_managed_fault returns != NV_OK something really + // bad happened + if (status != NV_OK) + goto fail; + + ++i; + continue; + } + + // Don't issue replays in cancel mode + if (replay_per_va_block) { + status = push_replay_on_gpu(gpu, UVM_FAULT_REPLAY_TYPE_START, batch_context); + if (status != NV_OK) + goto fail; + + // Increment the batch id if UVM_PERF_FAULT_REPLAY_POLICY_BLOCK + // is used, as we issue a replay after servicing each VA block + // and we can service a number of VA blocks before returning. + ++batch_context->batch_id; + } + } + + // Only clobber status if invalidate_status != NV_OK, since status may also + // contain NV_WARN_MORE_PROCESSING_REQUIRED. + if (va_space != NULL) { + NV_STATUS invalidate_status = uvm_ats_invalidate_tlbs(gpu_va_space, ats_invalidate, &batch_context->tracker); + if (invalidate_status != NV_OK) + status = invalidate_status; + } + +fail: + if (va_space != NULL) { + uvm_va_space_up_read(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + } + + return status; +} + +// Tells if the given fault entry is the first one in its uTLB +static bool is_first_fault_in_utlb(uvm_fault_service_batch_context_t *batch_context, NvU32 fault_index) +{ + NvU32 i; + NvU32 utlb_id = batch_context->fault_cache[fault_index].fault_source.utlb_id; + + for (i = 0; i < fault_index; ++i) { + uvm_fault_buffer_entry_t *current_entry = &batch_context->fault_cache[i]; + + // We have found a prior fault in the same uTLB + if (current_entry->fault_source.utlb_id == utlb_id) + return false; + } + + return true; +} + +// Compute the number of fatal and non-fatal faults for a page in the given uTLB +static void faults_for_page_in_utlb(uvm_fault_service_batch_context_t *batch_context, + uvm_va_space_t *va_space, + NvU64 addr, + NvU32 utlb_id, + NvU32 *fatal_faults, + NvU32 *non_fatal_faults) +{ + NvU32 i; + + *fatal_faults = 0; + *non_fatal_faults = 0; + + // Fault filtering is not allowed in the TLB-based fault cancel path + UVM_ASSERT(batch_context->num_cached_faults == batch_context->num_coalesced_faults); + + for (i = 0; i < batch_context->num_cached_faults; ++i) { + uvm_fault_buffer_entry_t *current_entry = &batch_context->fault_cache[i]; + + if (current_entry->fault_source.utlb_id == utlb_id && + current_entry->va_space == va_space && current_entry->fault_address == addr) { + // We have found the page + if (current_entry->is_fatal) + ++(*fatal_faults); + else + ++(*non_fatal_faults); + } + } +} + +// Function that tells if there are addresses (reminder: they are aligned to 4K) +// with non-fatal faults only +static bool no_fatal_pages_in_utlb(uvm_fault_service_batch_context_t *batch_context, + NvU32 start_index, + NvU32 utlb_id) +{ + NvU32 i; + + // Fault filtering is not allowed in the TLB-based fault cancel path + UVM_ASSERT(batch_context->num_cached_faults == batch_context->num_coalesced_faults); + + for (i = start_index; i < batch_context->num_cached_faults; ++i) { + uvm_fault_buffer_entry_t *current_entry = &batch_context->fault_cache[i]; + + if (current_entry->fault_source.utlb_id == utlb_id) { + // We have found a fault for the uTLB + NvU32 fatal_faults; + NvU32 non_fatal_faults; + + faults_for_page_in_utlb(batch_context, + current_entry->va_space, + current_entry->fault_address, + utlb_id, + &fatal_faults, + &non_fatal_faults); + + if (non_fatal_faults > 0 && fatal_faults == 0) + return true; + } + } + + return false; +} + +static void record_fatal_fault_helper(uvm_gpu_t *gpu, uvm_fault_buffer_entry_t *entry, UvmEventFatalReason reason) +{ + uvm_va_space_t *va_space; + + va_space = entry->va_space; + UVM_ASSERT(va_space); + uvm_va_space_down_read(va_space); + // Record fatal fault event + uvm_tools_record_gpu_fatal_fault(gpu->parent->id, va_space, entry, reason); + uvm_va_space_up_read(va_space); +} + +// This function tries to find and issue a cancel for each uTLB that meets +// the requirements to guarantee precise fault attribution: +// - No new faults can arrive on the uTLB (uTLB is in lockdown) +// - The first fault in the buffer for a specific uTLB is fatal +// - There are no other addresses in the uTLB with non-fatal faults only +// +// This function and the related helpers iterate over faults as read from HW, +// not through the ordered fault view +// +// TODO: Bug 1766754 +// This is very costly, although not critical for performance since we are +// cancelling. +// - Build a list with all the faults within a uTLB +// - Sort by uTLB id +static NV_STATUS try_to_cancel_utlbs(uvm_gpu_t *gpu, uvm_fault_service_batch_context_t *batch_context) +{ + NvU32 i; + + // Fault filtering is not allowed in the TLB-based fault cancel path + UVM_ASSERT(batch_context->num_cached_faults == batch_context->num_coalesced_faults); + + for (i = 0; i < batch_context->num_cached_faults; ++i) { + uvm_fault_buffer_entry_t *current_entry = &batch_context->fault_cache[i]; + uvm_fault_utlb_info_t *utlb = &batch_context->utlbs[current_entry->fault_source.utlb_id]; + NvU32 gpc_id = current_entry->fault_source.gpc_id; + NvU32 utlb_id = current_entry->fault_source.utlb_id; + NvU32 client_id = current_entry->fault_source.client_id; + + // Only fatal faults are considered + if (!current_entry->is_fatal) + continue; + + // Only consider uTLBs in lock-down + if (!utlb->in_lockdown) + continue; + + // Issue a single cancel per uTLB + if (utlb->cancelled) + continue; + + if (is_first_fault_in_utlb(batch_context, i) && + !no_fatal_pages_in_utlb(batch_context, i + 1, utlb_id)) { + NV_STATUS status; + + record_fatal_fault_helper(gpu, current_entry, current_entry->fatal_reason); + + status = push_cancel_on_gpu_targeted(gpu, + current_entry->instance_ptr, + gpc_id, + client_id, + &batch_context->tracker); + if (status != NV_OK) + return status; + + utlb->cancelled = true; + } + } + + return NV_OK; +} + +static NvU32 find_fatal_fault_in_utlb(uvm_fault_service_batch_context_t *batch_context, + NvU32 utlb_id) +{ + NvU32 i; + + // Fault filtering is not allowed in the TLB-based fault cancel path + UVM_ASSERT(batch_context->num_cached_faults == batch_context->num_coalesced_faults); + + for (i = 0; i < batch_context->num_cached_faults; ++i) { + if (batch_context->fault_cache[i].is_fatal && + batch_context->fault_cache[i].fault_source.utlb_id == utlb_id) + return i; + } + + return i; +} + +static NvU32 is_fatal_fault_in_buffer(uvm_fault_service_batch_context_t *batch_context, + uvm_fault_buffer_entry_t *fault) +{ + NvU32 i; + + // Fault filtering is not allowed in the TLB-based fault cancel path + UVM_ASSERT(batch_context->num_cached_faults == batch_context->num_coalesced_faults); + + for (i = 0; i < batch_context->num_cached_faults; ++i) { + uvm_fault_buffer_entry_t *current_entry = &batch_context->fault_cache[i]; + if (cmp_fault_instance_ptr(current_entry, fault) == 0 && + current_entry->fault_address == fault->fault_address && + current_entry->fault_access_type == fault->fault_access_type && + current_entry->fault_source.utlb_id == fault->fault_source.utlb_id) { + return true; + } + } + + return false; +} + +typedef enum +{ + // Only cancel faults flagged as fatal + FAULT_CANCEL_MODE_FATAL, + + // Cancel all faults in the batch unconditionally + FAULT_CANCEL_MODE_ALL, +} fault_cancel_mode_t; + +// Cancel faults in the given fault service batch context. The function provides +// two different modes depending on the value of cancel_mode: +// - If cancel_mode == FAULT_CANCEL_MODE_FATAL, only faults flagged as fatal +// will be cancelled. In this case, the reason reported to tools is the one +// contained in the fault entry itself. +// - If cancel_mode == FAULT_CANCEL_MODE_ALL, all faults will be cancelled +// unconditionally. In this case, the reason reported to tools for non-fatal +// faults is the one passed to this function. +static NV_STATUS cancel_faults_precise_va(uvm_gpu_t *gpu, + uvm_fault_service_batch_context_t *batch_context, + fault_cancel_mode_t cancel_mode, + UvmEventFatalReason reason) +{ + NV_STATUS status = NV_OK; + NV_STATUS fault_status; + uvm_va_space_t *va_space = NULL; + NvU32 i; + + UVM_ASSERT(gpu->parent->fault_cancel_va_supported); + if (cancel_mode == FAULT_CANCEL_MODE_ALL) + UVM_ASSERT(reason != UvmEventFatalReasonInvalid); + + for (i = 0; i < batch_context->num_coalesced_faults; ++i) { + uvm_fault_buffer_entry_t *current_entry = batch_context->ordered_fault_cache[i]; + + UVM_ASSERT(current_entry->va_space); + + if (current_entry->va_space != va_space) { + // Fault on a different va_space, drop the lock of the old one... + if (va_space != NULL) + uvm_va_space_up_read(va_space); + + va_space = current_entry->va_space; + + // ... and take the lock of the new one + uvm_va_space_down_read(va_space); + + // We don't need to check whether a buffer flush is required + // (due to VA range destruction). + // - For cancel_mode == FAULT_CANCEL_MODE_FATAL, once a fault is + // flagged as fatal we need to cancel it, even if its VA range no + // longer exists. + // - For cancel_mode == FAULT_CANCEL_MODE_ALL we don't care about + // any of this, we just want to trigger RC in RM. + } + + if (!uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, gpu->parent->id)) { + // If there is no GPU VA space for the GPU, ignore the fault. + // This can happen if the GPU VA did not exist in + // service_fault_batch(), or it was destroyed since then. + // This is to avoid targetting a PDB that might have been reused + // by another process. + continue; + } + + // Cancel the fault + if (cancel_mode == FAULT_CANCEL_MODE_ALL || current_entry->is_fatal) { + uvm_fault_cancel_va_mode_t cancel_va_mode = current_entry->replayable.cancel_va_mode; + + // If cancelling unconditionally and the fault was not fatal, + // set the cancel reason passed to this function + if (!current_entry->is_fatal) { + current_entry->fatal_reason = reason; + cancel_va_mode = UVM_FAULT_CANCEL_VA_MODE_ALL; + } + + status = cancel_fault_precise_va(gpu, current_entry, cancel_va_mode); + if (status != NV_OK) + break; + } + } + + if (va_space != NULL) + uvm_va_space_up_read(va_space); + + // After cancelling the fatal faults, the fault buffer is flushed to remove + // any potential duplicated fault that may have been added while processing + // the faults in this batch. This flush also avoids doing unnecessary + // processing after the fatal faults have been cancelled, so all the rest + // are unlikely to remain after a replay because the context is probably in + // the process of dying. + fault_status = fault_buffer_flush_locked(gpu, + UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT, + UVM_FAULT_REPLAY_TYPE_START, + batch_context); + + // We report the first encountered error. + if (status == NV_OK) + status = fault_status; + + return status; +} + +// Function called when the system has found a global error and needs to +// trigger RC in RM. +// We cancel one entry per uTLB +static void cancel_fault_batch_tlb(uvm_gpu_t *gpu, + uvm_fault_service_batch_context_t *batch_context, + UvmEventFatalReason reason) +{ + NvU32 i; + + // Fault filtering is not allowed in the TLB-based fault cancel path + UVM_ASSERT(batch_context->num_cached_faults == batch_context->num_coalesced_faults); + + for (i = 0; i < batch_context->num_cached_faults; ++i) { + NV_STATUS status; + uvm_fault_buffer_entry_t *current_entry; + uvm_fault_utlb_info_t *utlb; + + current_entry = &batch_context->fault_cache[i]; + utlb = &batch_context->utlbs[current_entry->fault_source.utlb_id]; + + // If this uTLB has been already cancelled, skip it + if (utlb->cancelled) + continue; + + record_fatal_fault_helper(gpu, current_entry, reason); + + // Although the global cancellation method can be used here instead of + // targeted, we still use the targeted method since this function is + // only invoked in GPUs without support for VA fault cancellation, for + // which the targeted version is already required in + // cancel_faults_precise_tlb(). To maintain consistency, we use the + // targeted variant in both cases. + status = push_cancel_on_gpu_targeted(gpu, + current_entry->instance_ptr, + current_entry->fault_source.gpc_id, + current_entry->fault_source.client_id, + &batch_context->tracker); + if (status != NV_OK) + break; + + utlb->cancelled = true; + } +} + +static void cancel_fault_batch(uvm_gpu_t *gpu, + uvm_fault_service_batch_context_t *batch_context, + UvmEventFatalReason reason) +{ + if (gpu->parent->fault_cancel_va_supported) { + cancel_faults_precise_va(gpu, batch_context, FAULT_CANCEL_MODE_ALL, reason); + return; + } + + cancel_fault_batch_tlb(gpu, batch_context, reason); +} + + +// Current fault cancel algorithm +// +// 1- Disable prefetching to avoid new requests keep coming and flooding the +// buffer. +// LOOP +// 2- Record one fatal fault per uTLB to check if it shows up after the replay +// 3- Flush fault buffer (REPLAY_TYPE_START_ACK_ALL to prevent new faults from +// coming to TLBs with pending faults) +// 4- Wait for replay to finish +// 5- Fetch all faults from buffer +// 6- Check what uTLBs are in lockdown mode and can be cancelled +// 7- Preprocess faults (order per va_space, fault address, access type) +// 8- Service all non-fatal faults and mark all non-serviceable faults as fatal +// 6.1- If fatal faults are not found, we are done +// 9- Search for a uTLB which can be targeted for cancel, as described in +// try_to_cancel_utlbs. If found, cancel it. +// END LOOP +// 10- Re-enable prefetching +// +// NOTE: prefetch faults MUST NOT trigger fault cancel. We make sure that no +// prefetch faults are left in the buffer by disabling prefetching and +// flushing the fault buffer afterwards (prefetch faults are not replayed and, +// therefore, will not show up again) +static NV_STATUS cancel_faults_precise_tlb(uvm_gpu_t *gpu, uvm_fault_service_batch_context_t *batch_context) +{ + NV_STATUS status; + NV_STATUS tracker_status; + uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable; + bool first = true; + + UVM_ASSERT(gpu->parent->replayable_faults_supported); + + // 1) Disable prefetching to avoid new requests keep coming and flooding + // the buffer + if (gpu->parent->fault_buffer_info.prefetch_faults_enabled) + gpu->parent->arch_hal->disable_prefetch_faults(gpu->parent); + + while (1) { + NvU32 utlb_id; + + // 2) Record one fatal fault per uTLB to check if it shows up after + // the replay. This is used to handle the case in which the uTLB is + // being cancelled from behind our backs by RM. See the comment in + // step 6. + for (utlb_id = 0; utlb_id <= batch_context->max_utlb_id; ++utlb_id) { + uvm_fault_utlb_info_t *utlb = &batch_context->utlbs[utlb_id]; + + if (!first && utlb->has_fatal_faults) { + NvU32 idx = find_fatal_fault_in_utlb(batch_context, utlb_id); + UVM_ASSERT(idx < batch_context->num_cached_faults); + + utlb->prev_fatal_fault = batch_context->fault_cache[idx]; + } + else { + utlb->prev_fatal_fault.fault_address = (NvU64)-1; + } + } + first = false; + + // 3) Flush fault buffer. After this call, all faults from any of the + // faulting uTLBs are before PUT. New faults from other uTLBs can keep + // arriving. Therefore, in each iteration we just try to cancel faults + // from uTLBs that contained fatal faults in the previous iterations + // and will cause the TLB to stop generating new page faults after the + // following replay with type UVM_FAULT_REPLAY_TYPE_START_ACK_ALL + status = fault_buffer_flush_locked(gpu, + UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT, + UVM_FAULT_REPLAY_TYPE_START_ACK_ALL, + batch_context); + if (status != NV_OK) + break; + + // 4) Wait for replay to finish + status = uvm_tracker_wait(&replayable_faults->replay_tracker); + if (status != NV_OK) + break; + + batch_context->num_invalid_prefetch_faults = 0; + batch_context->num_replays = 0; + batch_context->has_fatal_faults = false; + batch_context->has_throttled_faults = false; + + // 5) Fetch all faults from buffer + fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_ALL); + ++batch_context->batch_id; + + UVM_ASSERT(batch_context->num_cached_faults == batch_context->num_coalesced_faults); + + // No more faults left, we are done + if (batch_context->num_cached_faults == 0) + break; + + // 6) Check what uTLBs are in lockdown mode and can be cancelled + for (utlb_id = 0; utlb_id <= batch_context->max_utlb_id; ++utlb_id) { + uvm_fault_utlb_info_t *utlb = &batch_context->utlbs[utlb_id]; + + utlb->in_lockdown = false; + utlb->cancelled = false; + + if (utlb->prev_fatal_fault.fault_address != (NvU64)-1) { + // If a previously-reported fault shows up again we can "safely" + // assume that the uTLB that contains it is in lockdown mode + // and no new translations will show up before cancel. + // A fatal fault could only be removed behind our backs by RM + // issuing a cancel, which only happens when RM is resetting the + // engine. That means the instance pointer can't generate any + // new faults, so we won't have an ABA problem where a new + // fault arrives with the same state. + if (is_fatal_fault_in_buffer(batch_context, &utlb->prev_fatal_fault)) + utlb->in_lockdown = true; + } + } + + // 7) Preprocess faults + status = preprocess_fault_batch(gpu, batch_context); + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) + continue; + else if (status != NV_OK) + break; + + // 8) Service all non-fatal faults and mark all non-serviceable faults + // as fatal + status = service_fault_batch(gpu, FAULT_SERVICE_MODE_CANCEL, batch_context); + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) + continue; + + UVM_ASSERT(batch_context->num_replays == 0); + if (status == NV_ERR_NO_MEMORY) + continue; + else if (status != NV_OK) + break; + + // No more fatal faults left, we are done + if (!batch_context->has_fatal_faults) + break; + + // 9) Search for uTLBs that contain fatal faults and meet the + // requirements to be cancelled + try_to_cancel_utlbs(gpu, batch_context); + } + + // 10) Re-enable prefetching + if (gpu->parent->fault_buffer_info.prefetch_faults_enabled) + gpu->parent->arch_hal->enable_prefetch_faults(gpu->parent); + + if (status == NV_OK) + status = push_replay_on_gpu(gpu, UVM_FAULT_REPLAY_TYPE_START, batch_context); + + tracker_status = uvm_tracker_wait(&batch_context->tracker); + + return status == NV_OK? tracker_status: status; +} + +static NV_STATUS cancel_faults_precise(uvm_gpu_t *gpu, uvm_fault_service_batch_context_t *batch_context) +{ + UVM_ASSERT(batch_context->has_fatal_faults); + if (gpu->parent->fault_cancel_va_supported) { + return cancel_faults_precise_va(gpu, + batch_context, + FAULT_CANCEL_MODE_FATAL, + UvmEventFatalReasonInvalid); + } + + return cancel_faults_precise_tlb(gpu, batch_context); +} + +static void enable_disable_prefetch_faults(uvm_parent_gpu_t *parent_gpu, uvm_fault_service_batch_context_t *batch_context) +{ + if (!parent_gpu->prefetch_fault_supported) + return; + + // If more than 66% of faults are invalid prefetch accesses, disable + // prefetch faults for a while. + // Some tests rely on this logic (and ratio) to correctly disable prefetch + // fault reporting. If the logic changes, the tests will have to be changed. + if (parent_gpu->fault_buffer_info.prefetch_faults_enabled && + uvm_perf_reenable_prefetch_faults_lapse_msec > 0 && + ((batch_context->num_invalid_prefetch_faults * 3 > parent_gpu->fault_buffer_info.max_batch_size * 2) || + (uvm_enable_builtin_tests && + parent_gpu->rm_info.isSimulated && + batch_context->num_invalid_prefetch_faults > 5))) { + uvm_gpu_disable_prefetch_faults(parent_gpu); + } + else if (!parent_gpu->fault_buffer_info.prefetch_faults_enabled) { + NvU64 lapse = NV_GETTIME() - parent_gpu->fault_buffer_info.disable_prefetch_faults_timestamp; + + // Reenable prefetch faults after some time + if (lapse > ((NvU64)uvm_perf_reenable_prefetch_faults_lapse_msec * (1000 * 1000))) + uvm_gpu_enable_prefetch_faults(parent_gpu); + } +} + +void uvm_gpu_service_replayable_faults(uvm_gpu_t *gpu) +{ + NvU32 num_replays = 0; + NvU32 num_batches = 0; + NvU32 num_throttled = 0; + NV_STATUS status = NV_OK; + uvm_replayable_fault_buffer_info_t *replayable_faults = &gpu->parent->fault_buffer_info.replayable; + uvm_fault_service_batch_context_t *batch_context = &replayable_faults->batch_service_context; + + UVM_ASSERT(gpu->parent->replayable_faults_supported); + + uvm_tracker_init(&batch_context->tracker); + + // Process all faults in the buffer + while (1) { + if (num_throttled >= uvm_perf_fault_max_throttle_per_service || + num_batches >= uvm_perf_fault_max_batches_per_service) { + break; + } + + batch_context->num_invalid_prefetch_faults = 0; + batch_context->num_duplicate_faults = 0; + batch_context->num_replays = 0; + batch_context->has_fatal_faults = false; + batch_context->has_throttled_faults = false; + + fetch_fault_buffer_entries(gpu, batch_context, FAULT_FETCH_MODE_BATCH_READY); + if (batch_context->num_cached_faults == 0) + break; + + ++batch_context->batch_id; + + status = preprocess_fault_batch(gpu, batch_context); + + num_replays += batch_context->num_replays; + + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) + continue; + else if (status != NV_OK) + break; + + status = service_fault_batch(gpu, FAULT_SERVICE_MODE_REGULAR, batch_context); + + // We may have issued replays even if status != NV_OK if + // UVM_PERF_FAULT_REPLAY_POLICY_BLOCK is being used or the fault buffer + // was flushed + num_replays += batch_context->num_replays; + + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) + continue; + + enable_disable_prefetch_faults(gpu->parent, batch_context); + + if (status != NV_OK) { + // Unconditionally cancel all faults to trigger RC. This will not + // provide precise attribution, but this case handles global + // errors such as OOM or ECC where it's not reasonable to + // guarantee precise attribution. We ignore the return value of + // the cancel operation since this path is already returning an + // error code. + cancel_fault_batch(gpu, batch_context, uvm_tools_status_to_fatal_fault_reason(status)); + break; + } + + if (batch_context->has_fatal_faults) { + status = uvm_tracker_wait(&batch_context->tracker); + if (status == NV_OK) + status = cancel_faults_precise(gpu, batch_context); + + break; + } + + if (replayable_faults->replay_policy == UVM_PERF_FAULT_REPLAY_POLICY_BATCH) { + status = push_replay_on_gpu(gpu, UVM_FAULT_REPLAY_TYPE_START, batch_context); + if (status != NV_OK) + break; + ++num_replays; + } + else if (replayable_faults->replay_policy == UVM_PERF_FAULT_REPLAY_POLICY_BATCH_FLUSH) { + uvm_gpu_buffer_flush_mode_t flush_mode = UVM_GPU_BUFFER_FLUSH_MODE_CACHED_PUT; + + if (batch_context->num_duplicate_faults * 100 > + batch_context->num_cached_faults * replayable_faults->replay_update_put_ratio) { + flush_mode = UVM_GPU_BUFFER_FLUSH_MODE_UPDATE_PUT; + } + + status = fault_buffer_flush_locked(gpu, flush_mode, UVM_FAULT_REPLAY_TYPE_START, batch_context); + if (status != NV_OK) + break; + ++num_replays; + status = uvm_tracker_wait(&replayable_faults->replay_tracker); + if (status != NV_OK) + break; + } + + if (batch_context->has_throttled_faults) + ++num_throttled; + + ++num_batches; + } + + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) + status = NV_OK; + + // Make sure that we issue at least one replay if no replay has been + // issued yet to avoid dropping faults that do not show up in the buffer + if ((status == NV_OK && replayable_faults->replay_policy == UVM_PERF_FAULT_REPLAY_POLICY_ONCE) || + num_replays == 0) + status = push_replay_on_gpu(gpu, UVM_FAULT_REPLAY_TYPE_START, batch_context); + + uvm_tracker_deinit(&batch_context->tracker); + + if (status != NV_OK) + UVM_DBG_PRINT("Error servicing replayable faults on GPU: %s\n", uvm_gpu_name(gpu)); +} + +void uvm_gpu_enable_prefetch_faults(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT(parent_gpu->isr.replayable_faults.handling); + UVM_ASSERT(parent_gpu->prefetch_fault_supported); + + if (!parent_gpu->fault_buffer_info.prefetch_faults_enabled) { + parent_gpu->arch_hal->enable_prefetch_faults(parent_gpu); + parent_gpu->fault_buffer_info.prefetch_faults_enabled = true; + } +} + +void uvm_gpu_disable_prefetch_faults(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT(parent_gpu->isr.replayable_faults.handling); + UVM_ASSERT(parent_gpu->prefetch_fault_supported); + + if (parent_gpu->fault_buffer_info.prefetch_faults_enabled) { + parent_gpu->arch_hal->disable_prefetch_faults(parent_gpu); + parent_gpu->fault_buffer_info.prefetch_faults_enabled = false; + parent_gpu->fault_buffer_info.disable_prefetch_faults_timestamp = NV_GETTIME(); + } +} + +const char *uvm_perf_fault_replay_policy_string(uvm_perf_fault_replay_policy_t replay_policy) +{ + BUILD_BUG_ON(UVM_PERF_FAULT_REPLAY_POLICY_MAX != 4); + + switch (replay_policy) { + UVM_ENUM_STRING_CASE(UVM_PERF_FAULT_REPLAY_POLICY_BLOCK); + UVM_ENUM_STRING_CASE(UVM_PERF_FAULT_REPLAY_POLICY_BATCH); + UVM_ENUM_STRING_CASE(UVM_PERF_FAULT_REPLAY_POLICY_BATCH_FLUSH); + UVM_ENUM_STRING_CASE(UVM_PERF_FAULT_REPLAY_POLICY_ONCE); + UVM_ENUM_STRING_DEFAULT(); + } +} + +NV_STATUS uvm_test_get_prefetch_faults_reenable_lapse(UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE_PARAMS *params, + struct file *filp) +{ + params->reenable_lapse = uvm_perf_reenable_prefetch_faults_lapse_msec; + + return NV_OK; +} + +NV_STATUS uvm_test_set_prefetch_faults_reenable_lapse(UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE_PARAMS *params, + struct file *filp) +{ + uvm_perf_reenable_prefetch_faults_lapse_msec = params->reenable_lapse; + + return NV_OK; +} + +NV_STATUS uvm_test_drain_replayable_faults(UVM_TEST_DRAIN_REPLAYABLE_FAULTS_PARAMS *params, struct file *filp) +{ + uvm_gpu_t *gpu; + NV_STATUS status = NV_OK; + uvm_spin_loop_t spin; + bool pending = true; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + uvm_spin_loop_init(&spin); + + do { + uvm_gpu_replayable_faults_isr_lock(gpu->parent); + pending = uvm_gpu_replayable_faults_pending(gpu->parent); + uvm_gpu_replayable_faults_isr_unlock(gpu->parent); + + if (!pending) + break; + + if (fatal_signal_pending(current)) { + status = NV_ERR_SIGNAL_PENDING; + break; + } + + UVM_SPIN_LOOP(&spin); + } while (uvm_spin_loop_elapsed(&spin) < params->timeout_ns); + + if (pending && status == NV_OK) + status = NV_ERR_TIMEOUT; + + uvm_gpu_release(gpu); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_gpu_replayable_faults.h b/kernel-open/nvidia-uvm/uvm_gpu_replayable_faults.h new file mode 100644 index 000000000..2e10f6ebc --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_replayable_faults.h @@ -0,0 +1,78 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_GPU_PAGE_FAULT_H__ +#define __UVM_GPU_PAGE_FAULT_H__ + +#include "nvtypes.h" +#include "uvm_types.h" +#include "uvm_hal_types.h" +#include "uvm_tracker.h" + +typedef enum +{ + // Issue a fault replay after all faults for a block within a batch have been serviced + UVM_PERF_FAULT_REPLAY_POLICY_BLOCK = 0, + + // Issue a fault replay after each fault batch has been serviced + UVM_PERF_FAULT_REPLAY_POLICY_BATCH, + + // Like UVM_PERF_FAULT_REPLAY_POLICY_BATCH but only one batch of faults is serviced. The fault buffer is flushed + // before issuing the replay. The potential benefit is that we can resume execution of some SMs earlier, if SMs + // are faulting on different sets of pages. + UVM_PERF_FAULT_REPLAY_POLICY_BATCH_FLUSH, + + // Issue a fault replay after all faults in the buffer have been serviced + UVM_PERF_FAULT_REPLAY_POLICY_ONCE, + + // TODO: Bug 1768226: Implement uTLB-aware fault replay policy + + UVM_PERF_FAULT_REPLAY_POLICY_MAX, +} uvm_perf_fault_replay_policy_t; + +const char *uvm_perf_fault_replay_policy_string(uvm_perf_fault_replay_policy_t fault_replay); + +NV_STATUS uvm_gpu_fault_buffer_init(uvm_parent_gpu_t *parent_gpu); +void uvm_gpu_fault_buffer_deinit(uvm_parent_gpu_t *parent_gpu); + +void uvm_gpu_fault_buffer_resume(uvm_parent_gpu_t *parent_gpu); + +bool uvm_gpu_replayable_faults_pending(uvm_parent_gpu_t *parent_gpu); + +// Clear valid bit for all remaining unserviced faults in the buffer, set GET to +// PUT, and push a fault replay of type UVM_FAULT_REPLAY_TYPE_START. It does not +// wait for the replay to complete before returning. The pushed replay is added +// to the GPU's replay_tracker. +// +// LOCKING: Takes gpu->isr_lock +NV_STATUS uvm_gpu_fault_buffer_flush(uvm_gpu_t *gpu); + +// Enable/disable HW support for prefetch-initiated faults +void uvm_gpu_enable_prefetch_faults(uvm_parent_gpu_t *parent_gpu); +void uvm_gpu_disable_prefetch_faults(uvm_parent_gpu_t *parent_gpu); + +// Service pending replayable faults on the given GPU. This function must be +// only called from the ISR bottom half +void uvm_gpu_service_replayable_faults(uvm_gpu_t *gpu); + +#endif // __UVM_GPU_PAGE_FAULT_H__ diff --git a/kernel-open/nvidia-uvm/uvm_gpu_semaphore.c b/kernel-open/nvidia-uvm/uvm_gpu_semaphore.c new file mode 100644 index 000000000..f649de09a --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_semaphore.c @@ -0,0 +1,551 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_gpu_semaphore.h" +#include "uvm_lock.h" +#include "uvm_global.h" +#include "uvm_kvmalloc.h" + +#define UVM_SEMAPHORE_SIZE 4 +#define UVM_SEMAPHORE_PAGE_SIZE PAGE_SIZE +#define UVM_SEMAPHORE_COUNT_PER_PAGE (PAGE_SIZE / UVM_SEMAPHORE_SIZE) + +// The top nibble of the canary base is intentionally 0. The rest of the value +// is arbitrary. See the comments below on make_canary. +#define UVM_SEMAPHORE_CANARY_BASE 0x0badc0de +#define UVM_SEMAPHORE_CANARY_MASK 0xf0000000 + +struct uvm_gpu_semaphore_pool_struct +{ + // The GPU owning the pool + uvm_gpu_t *gpu; + + // List of all the semaphore pages belonging to the pool + struct list_head pages; + + // Count of free semaphores among all the pages + NvU32 free_semaphores_count; + + // Lock protecting the state of the pool + uvm_mutex_t mutex; +}; + +struct uvm_gpu_semaphore_pool_page_struct +{ + // Allocation backing the page + uvm_rm_mem_t *memory; + + // Pool the page is part of + uvm_gpu_semaphore_pool_t *pool; + + // Node in the list of all pages in a semaphore pool + struct list_head all_pages_node; + + // Mask indicating free semaphore indices within the page + DECLARE_BITMAP(free_semaphores, UVM_SEMAPHORE_COUNT_PER_PAGE); +}; + +static NvU32 get_index(uvm_gpu_semaphore_t *semaphore) +{ + NvU32 offset; + NvU32 index; + + UVM_ASSERT(semaphore->payload != NULL); + UVM_ASSERT(semaphore->page != NULL); + + offset = (char*)semaphore->payload - (char*)uvm_rm_mem_get_cpu_va(semaphore->page->memory); + UVM_ASSERT(offset % UVM_SEMAPHORE_SIZE == 0); + + index = offset / UVM_SEMAPHORE_SIZE; + UVM_ASSERT(index < UVM_SEMAPHORE_COUNT_PER_PAGE); + + return index; +} + +// Use canary values on debug builds to catch semaphore use-after-free. We can +// catch release-after-free by simply setting the payload to a known value at +// free then checking it on alloc or pool free, but catching acquire-after-free +// is a little trickier. +// +// In order to make still-pending GEQ acquires stall indefinitely we need to +// reduce the current payload as much as we can, subject to two restrictions: +// +// 1) The pending acquires could be comparing against values much less than and +// much greater than the current payload, so we have to set the payload to a +// value reasonably less than the acquires which we might expect to be +// pending. +// +// 2) Going over halfway past a pending acquire on the 32-bit number wheel will +// cause Host to wrap and think the acquire succeeded. So we shouldn't reduce +// by more than 2^31. +// +// To handle these restrictions we'll deal with quadrants of 2^32, under the +// assumption that it's unlikely for a payload to outpace a pending acquire by +// more than 2^30. +// +// We also need for the base value to have some 0s in the upper significant +// bits, otherwise those bits might carry us past the quadrant boundary when we +// OR them in. +static NvU32 make_canary(NvU32 payload) +{ + NvU32 prev_quadrant = payload - (1 << 30); + return (prev_quadrant & UVM_SEMAPHORE_CANARY_MASK) | UVM_SEMAPHORE_CANARY_BASE; +} + +static bool is_canary(NvU32 val) +{ + return (val & ~UVM_SEMAPHORE_CANARY_MASK) == UVM_SEMAPHORE_CANARY_BASE; +} + +static NV_STATUS pool_alloc_page(uvm_gpu_semaphore_pool_t *pool) +{ + NV_STATUS status; + uvm_gpu_semaphore_pool_page_t *pool_page; + NvU32 *payloads; + size_t i; + + uvm_assert_mutex_locked(&pool->mutex); + + pool_page = uvm_kvmalloc_zero(sizeof(*pool_page)); + + if (!pool_page) + return NV_ERR_NO_MEMORY; + + pool_page->pool = pool; + + status = uvm_rm_mem_alloc_and_map_all(pool->gpu, UVM_RM_MEM_TYPE_SYS, UVM_SEMAPHORE_PAGE_SIZE, &pool_page->memory); + if (status != NV_OK) + goto error; + + // All semaphores are initially free + bitmap_fill(pool_page->free_semaphores, UVM_SEMAPHORE_COUNT_PER_PAGE); + + list_add(&pool_page->all_pages_node, &pool->pages); + pool->free_semaphores_count += UVM_SEMAPHORE_COUNT_PER_PAGE; + + // Initialize the semaphore payloads to known values + if (UVM_IS_DEBUG()) { + payloads = uvm_rm_mem_get_cpu_va(pool_page->memory); + for (i = 0; i < UVM_SEMAPHORE_COUNT_PER_PAGE; i++) + payloads[i] = make_canary(0); + } + + return NV_OK; + +error: + uvm_kvfree(pool_page); + return status; +} + +static void pool_free_page(uvm_gpu_semaphore_pool_page_t *page) +{ + uvm_gpu_semaphore_pool_t *pool; + NvU32 *payloads; + size_t i; + + UVM_ASSERT(page); + pool = page->pool; + + uvm_assert_mutex_locked(&pool->mutex); + + // Assert that no semaphores are still allocated + UVM_ASSERT(bitmap_full(page->free_semaphores, UVM_SEMAPHORE_COUNT_PER_PAGE)); + UVM_ASSERT_MSG(pool->free_semaphores_count >= UVM_SEMAPHORE_COUNT_PER_PAGE, + "count: %u\n", + pool->free_semaphores_count); + + // Check for semaphore release-after-free + if (UVM_IS_DEBUG()) { + payloads = uvm_rm_mem_get_cpu_va(page->memory); + for (i = 0; i < UVM_SEMAPHORE_COUNT_PER_PAGE; i++) + UVM_ASSERT(is_canary(payloads[i])); + } + + pool->free_semaphores_count -= UVM_SEMAPHORE_COUNT_PER_PAGE; + list_del(&page->all_pages_node); + uvm_rm_mem_free(page->memory); + uvm_kvfree(page); +} + +NV_STATUS uvm_gpu_semaphore_alloc(uvm_gpu_semaphore_pool_t *pool, uvm_gpu_semaphore_t *semaphore) +{ + NV_STATUS status = NV_OK; + uvm_gpu_semaphore_pool_page_t *page; + + memset(semaphore, 0, sizeof(*semaphore)); + + uvm_mutex_lock(&pool->mutex); + + if (pool->free_semaphores_count == 0) + status = pool_alloc_page(pool); + + if (status != NV_OK) + goto done; + + list_for_each_entry(page, &pool->pages, all_pages_node) { + NvU32 semaphore_index = find_first_bit(page->free_semaphores, UVM_SEMAPHORE_COUNT_PER_PAGE); + if (semaphore_index == UVM_SEMAPHORE_COUNT_PER_PAGE) + continue; + + semaphore->payload = (NvU32*)((char*)uvm_rm_mem_get_cpu_va(page->memory) + semaphore_index * UVM_SEMAPHORE_SIZE); + semaphore->page = page; + + // Check for semaphore release-after-free + UVM_ASSERT(is_canary(uvm_gpu_semaphore_get_payload(semaphore))); + + uvm_gpu_semaphore_set_payload(semaphore, 0); + + __clear_bit(semaphore_index, page->free_semaphores); + --pool->free_semaphores_count; + + goto done; + } + + UVM_ASSERT_MSG(0, "Failed to find a semaphore after allocating a new page\n"); + status = NV_ERR_GENERIC; + +done: + uvm_mutex_unlock(&pool->mutex); + + return status; +} + +void uvm_gpu_semaphore_free(uvm_gpu_semaphore_t *semaphore) +{ + uvm_gpu_semaphore_pool_page_t *page; + uvm_gpu_semaphore_pool_t *pool; + NvU32 index; + + UVM_ASSERT(semaphore); + + // uvm_gpu_semaphore_t is to be embedded in other structures so it should always + // be accessible, but it may not be initialized in error cases. Early out if + // page is NULL indicating the semaphore hasn't been allocated successfully. + page = semaphore->page; + if (page == NULL) + return; + + pool = page->pool; + index = get_index(semaphore); + + // Write a known value lower than the current payload in an attempt to catch + // release-after-free and acquire-after-free. + if (UVM_IS_DEBUG()) + uvm_gpu_semaphore_set_payload(semaphore, make_canary(uvm_gpu_semaphore_get_payload(semaphore))); + + uvm_mutex_lock(&pool->mutex); + + semaphore->page = NULL; + semaphore->payload = NULL; + + ++pool->free_semaphores_count; + __set_bit(index, page->free_semaphores); + + uvm_mutex_unlock(&pool->mutex); +} + +NV_STATUS uvm_gpu_semaphore_pool_create(uvm_gpu_t *gpu, uvm_gpu_semaphore_pool_t **pool_out) +{ + uvm_gpu_semaphore_pool_t *pool; + pool = uvm_kvmalloc_zero(sizeof(*pool)); + + if (!pool) + return NV_ERR_NO_MEMORY; + + uvm_mutex_init(&pool->mutex, UVM_LOCK_ORDER_GPU_SEMAPHORE_POOL); + + INIT_LIST_HEAD(&pool->pages); + + pool->free_semaphores_count = 0; + pool->gpu = gpu; + + *pool_out = pool; + + return NV_OK; +} + +void uvm_gpu_semaphore_pool_destroy(uvm_gpu_semaphore_pool_t *pool) +{ + uvm_gpu_semaphore_pool_page_t *page; + uvm_gpu_semaphore_pool_page_t *next_page; + + if (!pool) + return; + + // No other thread should be touching the pool once it's being destroyed + uvm_assert_mutex_unlocked(&pool->mutex); + + // Keep pool_free_page happy + uvm_mutex_lock(&pool->mutex); + + list_for_each_entry_safe(page, next_page, &pool->pages, all_pages_node) + pool_free_page(page); + + UVM_ASSERT_MSG(pool->free_semaphores_count == 0, "unused: %u", pool->free_semaphores_count); + UVM_ASSERT(list_empty(&pool->pages)); + + uvm_mutex_unlock(&pool->mutex); + + uvm_kvfree(pool); +} + +NV_STATUS uvm_gpu_semaphore_pool_map_gpu(uvm_gpu_semaphore_pool_t *pool, uvm_gpu_t *gpu) +{ + NV_STATUS status = NV_OK; + uvm_gpu_semaphore_pool_page_t *page; + + UVM_ASSERT(pool); + UVM_ASSERT(gpu); + + uvm_mutex_lock(&pool->mutex); + + list_for_each_entry(page, &pool->pages, all_pages_node) { + status = uvm_rm_mem_map_gpu(page->memory, gpu); + if (status != NV_OK) + goto done; + } + +done: + uvm_mutex_unlock(&pool->mutex); + + return status; +} + +void uvm_gpu_semaphore_pool_unmap_gpu(uvm_gpu_semaphore_pool_t *pool, uvm_gpu_t *gpu) +{ + uvm_gpu_semaphore_pool_page_t *page; + + UVM_ASSERT(pool); + UVM_ASSERT(gpu); + + uvm_mutex_lock(&pool->mutex); + + list_for_each_entry(page, &pool->pages, all_pages_node) + uvm_rm_mem_unmap_gpu(page->memory, gpu); + + uvm_mutex_unlock(&pool->mutex); +} + +NvU64 uvm_gpu_semaphore_get_gpu_uvm_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu_t *gpu) +{ + return uvm_gpu_semaphore_get_gpu_va(semaphore, gpu, false); +} + +NvU64 uvm_gpu_semaphore_get_gpu_proxy_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu_t *gpu) +{ + return uvm_gpu_semaphore_get_gpu_va(semaphore, gpu, true); +} + +NvU64 uvm_gpu_semaphore_get_gpu_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu_t *gpu, bool is_proxy_va_space) +{ + NvU32 index = get_index(semaphore); + NvU64 base_va = uvm_rm_mem_get_gpu_va(semaphore->page->memory, gpu, is_proxy_va_space); + + return base_va + UVM_SEMAPHORE_SIZE * index; +} + +NvU32 uvm_gpu_semaphore_get_payload(uvm_gpu_semaphore_t *semaphore) +{ + return UVM_GPU_READ_ONCE(*semaphore->payload); +} + +void uvm_gpu_semaphore_set_payload(uvm_gpu_semaphore_t *semaphore, NvU32 payload) +{ + // Provide a guarantee that all memory accesses prior to setting the payload + // won't be moved past it. + // Use a big hammer mb() as set_payload() is not used in any performance path + // today. + // This could likely be optimized to be either an smp_store_release() or use + // an smp_mb__before_atomic() barrier. The former is a recent addition to + // kernel though, and it's not clear whether combining the latter with a + // regular 32bit store is well defined in all cases. Both also seem to risk + // being optimized out on non-SMP configs (we need them for interacting with + // the GPU correctly even on non-SMP). + mb(); + UVM_GPU_WRITE_ONCE(*semaphore->payload, payload); +} + +// This function is intended to catch channels which have been left dangling in +// trackers after their owning GPUs have been destroyed. +static bool tracking_semaphore_check_gpu(uvm_gpu_tracking_semaphore_t *tracking_sem) +{ + uvm_gpu_t *gpu = tracking_sem->semaphore.page->pool->gpu; + uvm_gpu_t *table_gpu; + + UVM_ASSERT_MSG(gpu->magic == UVM_GPU_MAGIC_VALUE, "Corruption detected: magic number is 0x%llx\n", gpu->magic); + + // It's ok for the GPU to not be in the global table, since add_gpu operates + // on trackers before adding the GPU to the table, and remove_gpu operates + // on trackers after removing the GPU. We rely on the magic value to catch + // those cases. + // + // But if a pointer is in the table it must match. + table_gpu = uvm_gpu_get(gpu->global_id); + if (table_gpu) + UVM_ASSERT(table_gpu == gpu); + + // Return a boolean so this function can be used in assertions for + // conditional compilation + return true; +} + +NV_STATUS uvm_gpu_tracking_semaphore_alloc(uvm_gpu_semaphore_pool_t *pool, uvm_gpu_tracking_semaphore_t *tracking_sem) +{ + NV_STATUS status; + + memset(tracking_sem, 0, sizeof(*tracking_sem)); + + status = uvm_gpu_semaphore_alloc(pool, &tracking_sem->semaphore); + if (status != NV_OK) + return status; + + UVM_ASSERT(uvm_gpu_semaphore_get_payload(&tracking_sem->semaphore) == 0); + + uvm_spin_lock_init(&tracking_sem->lock, UVM_LOCK_ORDER_LEAF); + atomic64_set(&tracking_sem->completed_value, 0); + tracking_sem->queued_value = 0; + + return NV_OK; +} + +void uvm_gpu_tracking_semaphore_free(uvm_gpu_tracking_semaphore_t *tracking_sem) +{ + uvm_gpu_semaphore_free(&tracking_sem->semaphore); +} + +static NvU64 update_completed_value_locked(uvm_gpu_tracking_semaphore_t *tracking_semaphore) +{ + NvU64 old_value = atomic64_read(&tracking_semaphore->completed_value); + // The semaphore value is the bottom 32 bits of completed_value + NvU32 old_sem_value = (NvU32)old_value; + NvU32 new_sem_value = uvm_gpu_semaphore_get_payload(&tracking_semaphore->semaphore); + NvU64 new_value; + + uvm_assert_spinlock_locked(&tracking_semaphore->lock); + + // The following logic to update the completed value is very subtle, it + // helps to read https://www.kernel.org/doc/Documentation/memory-barriers.txt + // before going through this code. + + if (old_sem_value == new_sem_value) { + // No progress since the last update. + // No additional memory barrier required in this case as completed_value + // is always updated under the spinlock that this thread just acquired. + // That guarantees full ordering with all the accesses the thread that + // updated completed_value did under the lock including the GPU + // semaphore read. + return old_value; + } + + // Replace the bottom 32-bits with the new semaphore value + new_value = (old_value & 0xFFFFFFFF00000000ull) | new_sem_value; + + // If we've wrapped around, add 2^32 to the value + // Notably the user of the GPU tracking semaphore needs to guarantee that + // the value is updated often enough to notice the wrap around each time it + // happens. In case of a channel tracking semaphore that's released for each + // push, it's easily guaranteed because of the small number of GPFIFO + // entries available per channel (there could be at most as many pending + // pushes as GPFIFO entries). + if (new_sem_value < old_sem_value) + new_value += 1ULL << 32; + + // Use an atomic write even though the spinlock is held so that the value can + // be (carefully) read atomically outside of the lock. + // + // atomic64_set() on its own doesn't imply any memory barriers and we need + // prior memory accesses (in particular the read of the GPU semaphore + // payload) by this thread to be visible to other threads that see the newly + // set completed_value. smp_mb__before_atomic() provides that ordering. + // + // Also see the comment and matching smp_mb__after_atomic() barrier in + // uvm_gpu_tracking_semaphore_is_value_completed(). + // + // Notably as of 4.3, atomic64_set_release() and atomic64_read_acquire() + // have been added that are exactly what we need and could be slightly + // faster on arm and powerpc than the implementation below. But at least in + // 4.3 the implementation looks broken for arm32 (it maps directly to + // smp_load_acquire() and that doesn't support 64-bit reads on 32-bit + // architectures) so instead of dealing with that just use a slightly bigger + // hammer. + smp_mb__before_atomic(); + atomic64_set(&tracking_semaphore->completed_value, new_value); + + // For this thread, we don't want any later accesses to be ordered above the + // GPU semaphore read. This could be accomplished by using a + // smp_load_acquire() for reading it, but given that it's also a pretty + // recent addition to the kernel, just leverage smp_mb__after_atomic() that + // guarantees that no accesses will be ordered above the atomic (and hence + // the GPU semaphore read). + // + // Notably the soon following uvm_spin_unlock() is a release barrier that + // allows later memory accesses to be reordered above it and hence doesn't + // provide the necessary ordering with the GPU semaphore read. + // + // Also notably this would still need to be handled if we ever switch to + // atomic64_set_release() and atomic64_read_acquire() for accessing + // completed_value. + smp_mb__after_atomic(); + + return new_value; +} + +NvU64 uvm_gpu_tracking_semaphore_update_completed_value(uvm_gpu_tracking_semaphore_t *tracking_semaphore) +{ + NvU64 completed; + + // Check that the GPU which owns the semaphore is still present + UVM_ASSERT(tracking_semaphore_check_gpu(tracking_semaphore)); + + uvm_spin_lock(&tracking_semaphore->lock); + + completed = update_completed_value_locked(tracking_semaphore); + + uvm_spin_unlock(&tracking_semaphore->lock); + + return completed; +} + +bool uvm_gpu_tracking_semaphore_is_value_completed(uvm_gpu_tracking_semaphore_t *tracking_sem, NvU64 value) +{ + NvU64 completed = atomic64_read(&tracking_sem->completed_value); + + // Check that the GPU which owns the semaphore is still present + UVM_ASSERT(tracking_semaphore_check_gpu(tracking_sem)); + + if (completed >= value) { + // atomic64_read() doesn't imply any memory barriers and we need all + // subsequent memory accesses in this thread to be ordered after the + // atomic read of the completed value above as that will also order them + // with any accesses (in particular the GPU semaphore read) performed by + // the other thread prior to it setting the completed_value we read. + // smp_mb__after_atomic() provides that ordering. + // + // Also see the comment in update_completed_value_locked(). + smp_mb__after_atomic(); + + return true; + } + + return uvm_gpu_tracking_semaphore_update_completed_value(tracking_sem) >= value; +} diff --git a/kernel-open/nvidia-uvm/uvm_gpu_semaphore.h b/kernel-open/nvidia-uvm/uvm_gpu_semaphore.h new file mode 100644 index 000000000..50931f840 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_semaphore.h @@ -0,0 +1,181 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_GPU_SEMAPHORE_H__ +#define __UVM_GPU_SEMAPHORE_H__ + +#include "uvm_forward_decl.h" +#include "uvm_lock.h" +#include "uvm_rm_mem.h" +#include "uvm_linux.h" + +// A GPU semaphore is a memory location accessible by the GPUs and the CPU +// that's used for synchronization among them. +// The GPU has primitives to acquire (wait for) and release (set) 4-byte memory +// locations. The same memory can be accessed by multiple GPUs and the CPU +// allowing for different synchronization schemes. +// +// The UVM driver maintains a per-GPU semaphore pool that grows on demand as +// semaphores are allocated out of it. +// +// TODO: Bug 200194638: Add support for timestamps (the GPU also supports +// releasing 16-byte semaphores that include an 8-byte timestamp). +struct uvm_gpu_semaphore_struct +{ + // The semaphore pool page the semaphore came from + uvm_gpu_semaphore_pool_page_t *page; + + // Pointer to the memory location + NvU32 *payload; +}; + +// A primitive used for tracking progress of the GPU +// Whenever a stream of GPU operations needs to be synchronized it increments +// the semaphore's payload as the last step so that other processors +// can acquire (wait for) it. +// The primitive maintains a 64-bit counter on top of the 32-bit GPU semaphore +// to support 2^64 synchronization points instead of just 2^32. The logic relies +// on being able to notice every time the 32-bit counter wraps around (see +// update_completed_value()). +struct uvm_gpu_tracking_semaphore_struct +{ + uvm_gpu_semaphore_t semaphore; + + // Last completed value + // The bottom 32-bits will always match the latest semaphore payload seen in + // update_completed_value_locked(). + atomic64_t completed_value; + + // Lock protecting updates to the completed_value + uvm_spinlock_t lock; + + // Last queued value + // All accesses to the queued value should be handled by the user of the GPU + // tracking semaphore. + NvU64 queued_value; +}; + +// Create a semaphore pool for a GPU. +NV_STATUS uvm_gpu_semaphore_pool_create(uvm_gpu_t *gpu, uvm_gpu_semaphore_pool_t **pool_out); + +// Destroy a semaphore pool +// Locking: +// - Global lock needs to be held in read mode (for unmapping from all GPUs) +// - Internally acquires: +// - GPU semaphore pool lock +// - RM API lock +// - RM GPUs lock +void uvm_gpu_semaphore_pool_destroy(uvm_gpu_semaphore_pool_t *pool); + +// Allocate a semaphore from the pool. +// The semaphore will be mapped on all GPUs currently registered with the UVM +// driver, and on all new GPUs which will be registered in the future. +// The mappings are added to UVM's internal address space, and (in SR-IOV heavy) +// to the proxy address space. +// +// The semaphore's payload will be initially set to 0. +// +// Locking: +// - Global lock needs to be held in read mode (for mapping on all GPUs) +// - Internally synchronized and hence safe to be called from multiple threads +// - Internally acquires: +// - GPU semaphore pool lock +// - RM API lock +// - RM GPUs lock +NV_STATUS uvm_gpu_semaphore_alloc(uvm_gpu_semaphore_pool_t *pool, uvm_gpu_semaphore_t *semaphore); + +// Free a semaphore +// Locking: +// - Internally synchronized and hence safe to be called from multiple threads +void uvm_gpu_semaphore_free(uvm_gpu_semaphore_t *semaphore); + +// Map all the semaphores from the pool on a GPU +// +// The mappings are added to UVM's internal address space, and (in SR-IOV heavy) +// to the proxy address space. +NV_STATUS uvm_gpu_semaphore_pool_map_gpu(uvm_gpu_semaphore_pool_t *pool, uvm_gpu_t *gpu); + +// Unmap all the semaphores from the pool from a GPU +// +// The unmapping affects all the VA spaces where the semaphores are currently +// mapped. +void uvm_gpu_semaphore_pool_unmap_gpu(uvm_gpu_semaphore_pool_t *pool, uvm_gpu_t *gpu); + +// Get the GPU VA of a semaphore in UVM's internal address space. +NvU64 uvm_gpu_semaphore_get_gpu_uvm_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu_t *gpu); + +// Get the GPU VA of a semaphore in the proxy address space. +NvU64 uvm_gpu_semaphore_get_gpu_proxy_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu_t *gpu); + +NvU64 uvm_gpu_semaphore_get_gpu_va(uvm_gpu_semaphore_t *semaphore, uvm_gpu_t *gpu, bool is_proxy_va_space); + +// Read the 32-bit payload of the semaphore +// Notably doesn't provide any memory ordering guarantees and needs to be used with +// care. For an example of what needs to be considered see +// uvm_gpu_tracking_semaphore_update_completed_value(). +NvU32 uvm_gpu_semaphore_get_payload(uvm_gpu_semaphore_t *semaphore); + +// Set the 32-bit payload of the semaphore +// Guarantees that all memory accesses preceding setting the payload won't be +// moved past it. +void uvm_gpu_semaphore_set_payload(uvm_gpu_semaphore_t *semaphore, NvU32 payload); + +// Allocate a GPU tracking semaphore from the pool +// Locking same as uvm_gpu_semaphore_alloc() +NV_STATUS uvm_gpu_tracking_semaphore_alloc(uvm_gpu_semaphore_pool_t *pool, uvm_gpu_tracking_semaphore_t *tracking_sem); + +// Free a GPU tracking semaphore +// Locking same as uvm_gpu_semaphore_free() +void uvm_gpu_tracking_semaphore_free(uvm_gpu_tracking_semaphore_t *tracking_sem); + +// Check whether a specific value has been completed +// +// If true is returned, guarantees that all operations ordered prior to a +// processor (commonly a GPU) completing the specific value will be visible to +// the caller. +// +// In case a GPU is supposed to complete a value, care needs to be taken for all GPU +// operations to be ordered correctly with the semaphore release that sets the value. +// In case it's the CPU completing the value, uvm_gpu_semaphore_set_payload() +// should be used that provides the necessary ordering guarantees. +// +// Locking: this operation is internally synchronized and hence safe to be +// called from multiple threads. +bool uvm_gpu_tracking_semaphore_is_value_completed(uvm_gpu_tracking_semaphore_t *tracking_sem, NvU64 value); + +// Update and return the completed value +// +// Provides the same guarantees as if uvm_gpu_tracking_semaphore_is_value_completed() +// returned true for the returned completed value. +// +// Locking: this operation is internally synchronized and hence safe to be +// called from multiple threads. +NvU64 uvm_gpu_tracking_semaphore_update_completed_value(uvm_gpu_tracking_semaphore_t *tracking_sem); + +// See the comments for uvm_gpu_tracking_semaphore_is_value_completed +static bool uvm_gpu_tracking_semaphore_is_completed(uvm_gpu_tracking_semaphore_t *tracking_sem) +{ + return uvm_gpu_tracking_semaphore_is_value_completed(tracking_sem, tracking_sem->queued_value); +} + +#endif // __UVM_GPU_SEMAPHORE_H__ diff --git a/kernel-open/nvidia-uvm/uvm_gpu_semaphore_test.c b/kernel-open/nvidia-uvm/uvm_gpu_semaphore_test.c new file mode 100644 index 000000000..220d0a46b --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_gpu_semaphore_test.c @@ -0,0 +1,165 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_global.h" +#include "uvm_gpu_semaphore.h" +#include "uvm_test.h" +#include "uvm_va_space.h" +#include "uvm_kvmalloc.h" + +static NV_STATUS add_and_test(uvm_gpu_tracking_semaphore_t *tracking_sem, NvU32 increment_by) +{ + NvU64 new_value; + NvU64 completed = uvm_gpu_tracking_semaphore_update_completed_value(tracking_sem); + new_value = completed + increment_by; + tracking_sem->queued_value = new_value; + + TEST_CHECK_RET(uvm_gpu_tracking_semaphore_update_completed_value(tracking_sem) == completed); + TEST_CHECK_RET(uvm_gpu_tracking_semaphore_is_value_completed(tracking_sem, 0)); + if (completed > 0) + TEST_CHECK_RET(uvm_gpu_tracking_semaphore_is_value_completed(tracking_sem, completed - 1)); + TEST_CHECK_RET(uvm_gpu_tracking_semaphore_is_value_completed(tracking_sem, completed)); + TEST_CHECK_RET(!uvm_gpu_tracking_semaphore_is_value_completed(tracking_sem, completed + 1)); + TEST_CHECK_RET(!uvm_gpu_tracking_semaphore_is_value_completed(tracking_sem, new_value)); + TEST_CHECK_RET(!uvm_gpu_tracking_semaphore_is_completed(tracking_sem)); + + uvm_gpu_semaphore_set_payload(&tracking_sem->semaphore, (NvU32)new_value); + TEST_CHECK_RET(uvm_gpu_tracking_semaphore_update_completed_value(tracking_sem) == new_value); + TEST_CHECK_RET(uvm_gpu_tracking_semaphore_is_value_completed(tracking_sem, completed)); + TEST_CHECK_RET(uvm_gpu_tracking_semaphore_is_value_completed(tracking_sem, new_value)); + TEST_CHECK_RET(uvm_gpu_tracking_semaphore_is_value_completed(tracking_sem, new_value - 1)); + TEST_CHECK_RET(!uvm_gpu_tracking_semaphore_is_value_completed(tracking_sem, new_value + 1)); + TEST_CHECK_RET(uvm_gpu_tracking_semaphore_is_completed(tracking_sem)); + + return NV_OK; +} + +static NV_STATUS test_tracking(uvm_va_space_t *va_space) +{ + NV_STATUS status; + uvm_gpu_tracking_semaphore_t tracking_sem; + int i; + uvm_gpu_t *gpu = uvm_va_space_find_first_gpu(va_space); + + if (gpu == NULL) + return NV_ERR_INVALID_STATE; + + status = uvm_gpu_tracking_semaphore_alloc(gpu->semaphore_pool, &tracking_sem); + if (status != NV_OK) + return status; + + status = add_and_test(&tracking_sem, 1); + if (status != NV_OK) + goto done; + + for (i = 0; i < 100; ++i) { + status = add_and_test(&tracking_sem, UINT_MAX - 1); + if (status != NV_OK) + goto done; + } + +done: + uvm_gpu_tracking_semaphore_free(&tracking_sem); + return status; +} + +#define NUM_SEMAPHORES_PER_GPU 4096 + +static NV_STATUS test_alloc(uvm_va_space_t *va_space) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + uvm_gpu_semaphore_t *semaphores; + int i; + NvU32 semaphore_count; + NvU32 gpu_count = uvm_processor_mask_get_gpu_count(&va_space->registered_gpus); + NvU32 current_semaphore = 0; + + if (gpu_count == 0) + return NV_ERR_INVALID_STATE; + + semaphore_count = gpu_count * NUM_SEMAPHORES_PER_GPU; + + semaphores = uvm_kvmalloc_zero(semaphore_count * sizeof(*semaphores)); + if (semaphores == NULL) + return NV_ERR_NO_MEMORY; + + for (i = 0; i < NUM_SEMAPHORES_PER_GPU; ++i) { + for_each_va_space_gpu(gpu, va_space) { + status = uvm_gpu_semaphore_alloc(gpu->semaphore_pool, &semaphores[current_semaphore++]); + if (status != NV_OK) + goto done; + } + } + + for (i = 0; i < current_semaphore; ++i) { + for_each_va_space_gpu(gpu, va_space) { + NvU64 gpu_va; + + gpu_va = uvm_gpu_semaphore_get_gpu_uvm_va(&semaphores[i], gpu); + TEST_CHECK_GOTO(gpu_va != 0, done); + + // In SR-IOV heavy, there should be a mapping in the proxy VA space + // too. + if (uvm_gpu_uses_proxy_channel_pool(gpu)) { + gpu_va = uvm_gpu_semaphore_get_gpu_proxy_va(&semaphores[i], gpu); + TEST_CHECK_GOTO(gpu_va != 0, done); + } + + uvm_gpu_semaphore_set_payload(&semaphores[i], 1); + TEST_CHECK_GOTO(uvm_gpu_semaphore_get_payload(&semaphores[i]) == 1, done); + } + } + +done: + for (i = 0; i < current_semaphore; ++i) + uvm_gpu_semaphore_free(&semaphores[i]); + + uvm_kvfree(semaphores); + + return status; +} + + +NV_STATUS uvm_test_gpu_semaphore_sanity(UVM_TEST_GPU_SEMAPHORE_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_mutex_lock(&g_uvm_global.global_lock); + uvm_va_space_down_read_rm(va_space); + + status = test_alloc(va_space); + if (status != NV_OK) + goto done; + + status = test_tracking(va_space); + if (status != NV_OK) + goto done; + +done: + uvm_va_space_up_read_rm(va_space); + uvm_mutex_unlock(&g_uvm_global.global_lock); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_hal.c b/kernel-open/nvidia-uvm/uvm_hal.c new file mode 100644 index 000000000..c27fbe112 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_hal.c @@ -0,0 +1,991 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hal.h" +#include "uvm_kvmalloc.h" + +#include "cla16f.h" +#include "clb069.h" +#include "clb06f.h" +#include "clb0b5.h" +#include "clc06f.h" +#include "clc0b5.h" +#include "clc1b5.h" +#include "ctrl2080mc.h" +#include "clc3b5.h" +#include "clc36f.h" +#include "clc369.h" +#include "clc365.h" +#include "clc46f.h" +#include "clc5b5.h" +#include "clc6b5.h" +#include "clc56f.h" +#include "clc7b5.h" + + + + + +#define CE_OP_COUNT (sizeof(uvm_ce_hal_t) / sizeof(void *)) +#define HOST_OP_COUNT (sizeof(uvm_host_hal_t) / sizeof(void *)) +#define ARCH_OP_COUNT (sizeof(uvm_arch_hal_t) / sizeof(void *)) +#define FAULT_BUFFER_OP_COUNT (sizeof(uvm_fault_buffer_hal_t) / sizeof(void *)) +#define ACCESS_COUNTER_BUFFER_OP_COUNT (sizeof(uvm_access_counter_buffer_hal_t) / sizeof(void *)) + + + + +// Table for copy engine functions. +// Each entry is associated with a copy engine class through the 'class' field. +// By setting the 'parent_class' field, a class will inherit the parent class's +// functions for any fields left NULL when uvm_hal_init_table() runs upon module +// load. The parent class must appear earlier in the array than the child. +static uvm_hal_class_ops_t ce_table[] = +{ + { + .id = MAXWELL_DMA_COPY_A, + .u.ce_ops = { + .init = uvm_hal_maxwell_ce_init, + .method_validate = uvm_hal_method_validate_stub, + .semaphore_release = uvm_hal_maxwell_ce_semaphore_release, + .semaphore_timestamp = uvm_hal_maxwell_ce_semaphore_timestamp, + .semaphore_reduction_inc = uvm_hal_maxwell_ce_semaphore_reduction_inc, + .offset_out = uvm_hal_maxwell_ce_offset_out, + .offset_in_out = uvm_hal_maxwell_ce_offset_in_out, + .phys_mode = uvm_hal_maxwell_ce_phys_mode, + .plc_mode = uvm_hal_maxwell_ce_plc_mode, + .memcopy_validate = uvm_hal_ce_memcopy_validate_stub, + .memcopy_patch_src = uvm_hal_ce_memcopy_patch_src_stub, + .memcopy = uvm_hal_maxwell_ce_memcopy, + .memcopy_v_to_v = uvm_hal_maxwell_ce_memcopy_v_to_v, + .memset_validate = uvm_hal_ce_memset_validate_stub, + .memset_1 = uvm_hal_maxwell_ce_memset_1, + .memset_4 = uvm_hal_maxwell_ce_memset_4, + .memset_8 = uvm_hal_maxwell_ce_memset_8, + .memset_v_4 = uvm_hal_maxwell_ce_memset_v_4, + } + }, + { + .id = PASCAL_DMA_COPY_A, + .parent_id = MAXWELL_DMA_COPY_A, + .u.ce_ops = { + .semaphore_release = uvm_hal_pascal_ce_semaphore_release, + .semaphore_timestamp = uvm_hal_pascal_ce_semaphore_timestamp, + .semaphore_reduction_inc = uvm_hal_pascal_ce_semaphore_reduction_inc, + .offset_out = uvm_hal_pascal_ce_offset_out, + .offset_in_out = uvm_hal_pascal_ce_offset_in_out, + } + }, + { + .id = PASCAL_DMA_COPY_B, + .parent_id = PASCAL_DMA_COPY_A, + .u.ce_ops = {} + }, + { + .id = VOLTA_DMA_COPY_A, + .parent_id = PASCAL_DMA_COPY_B, + .u.ce_ops = {}, + }, + { + .id = TURING_DMA_COPY_A, + .parent_id = VOLTA_DMA_COPY_A, + .u.ce_ops = {}, + }, + { + .id = AMPERE_DMA_COPY_A, + .parent_id = TURING_DMA_COPY_A, + .u.ce_ops = { + .method_validate = uvm_hal_ampere_ce_method_validate_c6b5, + .phys_mode = uvm_hal_ampere_ce_phys_mode, + .memcopy_validate = uvm_hal_ampere_ce_memcopy_validate_c6b5, + .memcopy_patch_src = uvm_hal_ampere_ce_memcopy_patch_src_c6b5, + .memset_validate = uvm_hal_ampere_ce_memset_validate_c6b5, + }, + }, + { + .id = AMPERE_DMA_COPY_B, + .parent_id = AMPERE_DMA_COPY_A, + .u.ce_ops = { + .method_validate = uvm_hal_method_validate_stub, + .plc_mode = uvm_hal_ampere_ce_plc_mode_c7b5, + .memcopy_validate = uvm_hal_ce_memcopy_validate_stub, + .memcopy_patch_src = uvm_hal_ce_memcopy_patch_src_stub, + .memset_validate = uvm_hal_ce_memset_validate_stub, + }, + }, + + + + + + + + + + + + + + + + +}; + +// Table for GPFIFO functions. Same idea as the copy engine table. +static uvm_hal_class_ops_t host_table[] = +{ + { + // This host class is reported for GM10x + .id = KEPLER_CHANNEL_GPFIFO_B, + .u.host_ops = { + .init = uvm_hal_maxwell_host_init_noop, + .method_validate = uvm_hal_method_validate_stub, + .sw_method_validate = uvm_hal_method_validate_stub, + .wait_for_idle = uvm_hal_maxwell_host_wait_for_idle, + .membar_sys = uvm_hal_maxwell_host_membar_sys, + // No MEMBAR GPU until Pascal, just do a MEMBAR SYS. + .membar_gpu = uvm_hal_maxwell_host_membar_sys, + .noop = uvm_hal_maxwell_host_noop, + .interrupt = uvm_hal_maxwell_host_interrupt, + .semaphore_acquire = uvm_hal_maxwell_host_semaphore_acquire, + .semaphore_release = uvm_hal_maxwell_host_semaphore_release, + .semaphore_timestamp = uvm_hal_maxwell_host_semaphore_timestamp, + .set_gpfifo_entry = uvm_hal_maxwell_host_set_gpfifo_entry, + .write_gpu_put = uvm_hal_maxwell_host_write_gpu_put, + .tlb_invalidate_all = uvm_hal_maxwell_host_tlb_invalidate_all_a16f, + .tlb_invalidate_va = uvm_hal_maxwell_host_tlb_invalidate_va, + .tlb_invalidate_test = uvm_hal_maxwell_host_tlb_invalidate_test, + .replay_faults = uvm_hal_maxwell_replay_faults_unsupported, + .cancel_faults_global = uvm_hal_maxwell_cancel_faults_global_unsupported, + .cancel_faults_targeted = uvm_hal_maxwell_cancel_faults_targeted_unsupported, + .cancel_faults_va = uvm_hal_maxwell_cancel_faults_va_unsupported, + .clear_faulted_channel_sw_method = uvm_hal_maxwell_host_clear_faulted_channel_sw_method_unsupported, + .clear_faulted_channel_method = uvm_hal_maxwell_host_clear_faulted_channel_method_unsupported, + .clear_faulted_channel_register = uvm_hal_maxwell_host_clear_faulted_channel_register_unsupported, + .access_counter_clear_all = uvm_hal_maxwell_access_counter_clear_all_unsupported, + .access_counter_clear_type = uvm_hal_maxwell_access_counter_clear_type_unsupported, + .access_counter_clear_targeted = uvm_hal_maxwell_access_counter_clear_targeted_unsupported, + .get_time = uvm_hal_maxwell_get_time, + } + }, + { + // This host class is reported for GM20x + .id = MAXWELL_CHANNEL_GPFIFO_A, + .parent_id = KEPLER_CHANNEL_GPFIFO_B, + .u.host_ops = { + .tlb_invalidate_all = uvm_hal_maxwell_host_tlb_invalidate_all_b06f, + } + }, + { + .id = PASCAL_CHANNEL_GPFIFO_A, + .parent_id = MAXWELL_CHANNEL_GPFIFO_A, + .u.host_ops = { + .init = uvm_hal_pascal_host_init, + .membar_sys = uvm_hal_pascal_host_membar_sys, + .membar_gpu = uvm_hal_pascal_host_membar_gpu, + .tlb_invalidate_all = uvm_hal_pascal_host_tlb_invalidate_all, + .tlb_invalidate_va = uvm_hal_pascal_host_tlb_invalidate_va, + .tlb_invalidate_test = uvm_hal_pascal_host_tlb_invalidate_test, + .replay_faults = uvm_hal_pascal_replay_faults, + .cancel_faults_global = uvm_hal_pascal_cancel_faults_global, + .cancel_faults_targeted = uvm_hal_pascal_cancel_faults_targeted, + } + }, + { + .id = VOLTA_CHANNEL_GPFIFO_A, + .parent_id = PASCAL_CHANNEL_GPFIFO_A, + .u.host_ops = { + .write_gpu_put = uvm_hal_volta_host_write_gpu_put, + .tlb_invalidate_va = uvm_hal_volta_host_tlb_invalidate_va, + .replay_faults = uvm_hal_volta_replay_faults, + .cancel_faults_va = uvm_hal_volta_cancel_faults_va, + .clear_faulted_channel_method = uvm_hal_volta_host_clear_faulted_channel_method, + .access_counter_clear_all = uvm_hal_volta_access_counter_clear_all, + .access_counter_clear_type = uvm_hal_volta_access_counter_clear_type, + .access_counter_clear_targeted = uvm_hal_volta_access_counter_clear_targeted, + .semaphore_timestamp = uvm_hal_volta_host_semaphore_timestamp, + } + }, + { + .id = TURING_CHANNEL_GPFIFO_A, + .parent_id = VOLTA_CHANNEL_GPFIFO_A, + .u.host_ops = { + .semaphore_acquire = uvm_hal_turing_host_semaphore_acquire, + .semaphore_release = uvm_hal_turing_host_semaphore_release, + .clear_faulted_channel_method = uvm_hal_turing_host_clear_faulted_channel_method, + .set_gpfifo_entry = uvm_hal_turing_host_set_gpfifo_entry, + } + }, + { + .id = AMPERE_CHANNEL_GPFIFO_A, + .parent_id = TURING_CHANNEL_GPFIFO_A, + .u.host_ops = { + .method_validate = uvm_hal_ampere_host_method_validate, + .sw_method_validate = uvm_hal_ampere_host_sw_method_validate, + .clear_faulted_channel_sw_method = uvm_hal_ampere_host_clear_faulted_channel_sw_method, + .clear_faulted_channel_register = uvm_hal_ampere_host_clear_faulted_channel_register, + .tlb_invalidate_all = uvm_hal_ampere_host_tlb_invalidate_all, + .tlb_invalidate_va = uvm_hal_ampere_host_tlb_invalidate_va, + .tlb_invalidate_test = uvm_hal_ampere_host_tlb_invalidate_test, + } + }, + + + + + + + + + + + + + + + + + +}; + +static uvm_hal_class_ops_t arch_table[] = +{ + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM000, + .u.arch_ops = { + .init_properties = uvm_hal_maxwell_arch_init_properties, + .mmu_mode_hal = uvm_hal_mmu_mode_maxwell, + .enable_prefetch_faults = uvm_hal_maxwell_mmu_enable_prefetch_faults_unsupported, + .disable_prefetch_faults = uvm_hal_maxwell_mmu_disable_prefetch_faults_unsupported, + .mmu_engine_id_to_type = uvm_hal_maxwell_mmu_engine_id_to_type_unsupported, + .mmu_client_id_to_utlb_id = uvm_hal_maxwell_mmu_client_id_to_utlb_id_unsupported, + } + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM200, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM000, + .u.arch_ops = {} + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GP100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM200, + .u.arch_ops = { + .init_properties = uvm_hal_pascal_arch_init_properties, + .mmu_mode_hal = uvm_hal_mmu_mode_pascal, + .enable_prefetch_faults = uvm_hal_pascal_mmu_enable_prefetch_faults, + .disable_prefetch_faults = uvm_hal_pascal_mmu_disable_prefetch_faults, + .mmu_client_id_to_utlb_id = uvm_hal_pascal_mmu_client_id_to_utlb_id, + } + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GV100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GP100, + .u.arch_ops = { + .init_properties = uvm_hal_volta_arch_init_properties, + .mmu_mode_hal = uvm_hal_mmu_mode_volta, + .mmu_engine_id_to_type = uvm_hal_volta_mmu_engine_id_to_type, + .mmu_client_id_to_utlb_id = uvm_hal_volta_mmu_client_id_to_utlb_id, + }, + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_TU100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GV100, + .u.arch_ops = { + .init_properties = uvm_hal_turing_arch_init_properties, + .mmu_mode_hal = uvm_hal_mmu_mode_turing, + .mmu_engine_id_to_type = uvm_hal_turing_mmu_engine_id_to_type, + }, + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_TU100, + .u.arch_ops = { + .init_properties = uvm_hal_ampere_arch_init_properties, + .mmu_mode_hal = uvm_hal_mmu_mode_ampere, + .mmu_engine_id_to_type = uvm_hal_ampere_mmu_engine_id_to_type, + .mmu_client_id_to_utlb_id = uvm_hal_ampere_mmu_client_id_to_utlb_id, + }, + }, + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +}; + +static uvm_hal_class_ops_t fault_buffer_table[] = +{ + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM000, + .u.fault_buffer_ops = { + .enable_replayable_faults = uvm_hal_maxwell_enable_replayable_faults_unsupported, + .disable_replayable_faults = uvm_hal_maxwell_disable_replayable_faults_unsupported, + .clear_replayable_faults = uvm_hal_maxwell_clear_replayable_faults_unsupported, + .read_put = uvm_hal_maxwell_fault_buffer_read_put_unsupported, + .read_get = uvm_hal_maxwell_fault_buffer_read_get_unsupported, + .write_get = uvm_hal_maxwell_fault_buffer_write_get_unsupported, + .get_ve_id = uvm_hal_maxwell_fault_buffer_get_ve_id_unsupported, + .parse_entry = uvm_hal_maxwell_fault_buffer_parse_entry_unsupported, + .entry_is_valid = uvm_hal_maxwell_fault_buffer_entry_is_valid_unsupported, + .entry_clear_valid = uvm_hal_maxwell_fault_buffer_entry_clear_valid_unsupported, + .entry_size = uvm_hal_maxwell_fault_buffer_entry_size_unsupported, + .parse_non_replayable_entry = uvm_hal_maxwell_fault_buffer_parse_non_replayable_entry_unsupported, + } + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM200, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM000, + .u.fault_buffer_ops = {} + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GP100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM200, + .u.fault_buffer_ops = { + .enable_replayable_faults = uvm_hal_pascal_enable_replayable_faults, + .disable_replayable_faults = uvm_hal_pascal_disable_replayable_faults, + .clear_replayable_faults = uvm_hal_pascal_clear_replayable_faults, + .read_put = uvm_hal_pascal_fault_buffer_read_put, + .read_get = uvm_hal_pascal_fault_buffer_read_get, + .write_get = uvm_hal_pascal_fault_buffer_write_get, + .parse_entry = uvm_hal_pascal_fault_buffer_parse_entry, + .entry_is_valid = uvm_hal_pascal_fault_buffer_entry_is_valid, + .entry_clear_valid = uvm_hal_pascal_fault_buffer_entry_clear_valid, + .entry_size = uvm_hal_pascal_fault_buffer_entry_size, + } + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GV100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GP100, + .u.fault_buffer_ops = { + .read_put = uvm_hal_volta_fault_buffer_read_put, + .read_get = uvm_hal_volta_fault_buffer_read_get, + .write_get = uvm_hal_volta_fault_buffer_write_get, + .get_ve_id = uvm_hal_volta_fault_buffer_get_ve_id, + .parse_entry = uvm_hal_volta_fault_buffer_parse_entry, + .parse_non_replayable_entry = uvm_hal_volta_fault_buffer_parse_non_replayable_entry, + } + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_TU100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GV100, + .u.fault_buffer_ops = { + .disable_replayable_faults = uvm_hal_turing_disable_replayable_faults, + .clear_replayable_faults = uvm_hal_turing_clear_replayable_faults, + } + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_TU100, + .u.fault_buffer_ops = {} + }, + + + + + + + + + + + + + + + + + + + + + + + + + + + +}; + +static uvm_hal_class_ops_t access_counter_buffer_table[] = +{ + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM000, + .u.access_counter_buffer_ops = { + .enable_access_counter_notifications = uvm_hal_maxwell_enable_access_counter_notifications_unsupported, + .disable_access_counter_notifications = uvm_hal_maxwell_disable_access_counter_notifications_unsupported, + .clear_access_counter_notifications = uvm_hal_maxwell_clear_access_counter_notifications_unsupported, + .parse_entry = uvm_hal_maxwell_access_counter_buffer_parse_entry_unsupported, + .entry_is_valid = uvm_hal_maxwell_access_counter_buffer_entry_is_valid_unsupported, + .entry_clear_valid = uvm_hal_maxwell_access_counter_buffer_entry_clear_valid_unsupported, + .entry_size = uvm_hal_maxwell_access_counter_buffer_entry_size_unsupported, + } + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM200, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM000, + .u.access_counter_buffer_ops = {} + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GP100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM200, + .u.access_counter_buffer_ops = {} + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GV100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GP100, + .u.access_counter_buffer_ops = { + .enable_access_counter_notifications = uvm_hal_volta_enable_access_counter_notifications, + .disable_access_counter_notifications = uvm_hal_volta_disable_access_counter_notifications, + .clear_access_counter_notifications = uvm_hal_volta_clear_access_counter_notifications, + .parse_entry = uvm_hal_volta_access_counter_buffer_parse_entry, + .entry_is_valid = uvm_hal_volta_access_counter_buffer_entry_is_valid, + .entry_clear_valid = uvm_hal_volta_access_counter_buffer_entry_clear_valid, + .entry_size = uvm_hal_volta_access_counter_buffer_entry_size, + } + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_TU100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GV100, + .u.access_counter_buffer_ops = { + .disable_access_counter_notifications = uvm_hal_turing_disable_access_counter_notifications, + .clear_access_counter_notifications = uvm_hal_turing_clear_access_counter_notifications, + } + }, + { + .id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100, + .parent_id = NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_TU100, + .u.access_counter_buffer_ops = {} + }, + + + + + + + + + + + + + + + + + + + + + + + + + +}; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +static inline uvm_hal_class_ops_t *ops_find_by_id(uvm_hal_class_ops_t *table, NvU32 row_count, NvU32 id) +{ + NvLength i; + + // go through array and match on class. + for (i = 0; i < row_count; i++) { + if (table[i].id == id) + return table + i; + } + + return NULL; +} + +// use memcmp to check for function pointer assignment in a well defined, +// general way. +static inline bool op_is_null(uvm_hal_class_ops_t *row, NvLength op_idx, NvLength op_offset) +{ + void *temp = NULL; + return memcmp(&temp, (char *)row + op_offset + sizeof(void *) * op_idx, sizeof(void *)) == 0; +} + +// use memcpy to copy function pointers in a well defined, general way. +static inline void op_copy(uvm_hal_class_ops_t *dst, uvm_hal_class_ops_t *src, NvLength op_idx, NvLength op_offset) +{ + void *m_dst = (char *)dst + op_offset + sizeof(void *) * op_idx; + void *m_src = (char *)src + op_offset + sizeof(void *) * op_idx; + memcpy(m_dst, m_src, sizeof(void *)); +} + +static inline NV_STATUS ops_init_from_parent(uvm_hal_class_ops_t *table, + NvU32 row_count, + NvLength op_count, + NvLength op_offset) +{ + NvLength i; + + for (i = 0; i < row_count; i++) { + NvLength j; + uvm_hal_class_ops_t *parent = NULL; + + if (table[i].parent_id != 0) { + parent = ops_find_by_id(table, i, table[i].parent_id); + if (parent == NULL) + return NV_ERR_INVALID_CLASS; + + // Go through all the ops and assign from parent's corresponding op + // if NULL + for (j = 0; j < op_count; j++) { + if (op_is_null(table + i, j, op_offset)) + op_copy(table + i, parent, j, op_offset); + } + } + + // At this point, it is an error to have missing HAL operations + for (j = 0; j < op_count; j++) { + if (op_is_null(table + i, j, op_offset)) + return NV_ERR_INVALID_STATE; + } + } + + return NV_OK; +} + +NV_STATUS uvm_hal_init_table(void) +{ + NV_STATUS status; + + status = ops_init_from_parent(ce_table, ARRAY_SIZE(ce_table), CE_OP_COUNT, offsetof(uvm_hal_class_ops_t, u.ce_ops)); + if (status != NV_OK) { + UVM_ERR_PRINT("ops_init_from_parent(ce_table) failed: %s\n", nvstatusToString(status)); + return status; + } + + status = ops_init_from_parent(host_table, ARRAY_SIZE(host_table), HOST_OP_COUNT, offsetof(uvm_hal_class_ops_t, u.host_ops)); + if (status != NV_OK) { + UVM_ERR_PRINT("ops_init_from_parent(host_table) failed: %s\n", nvstatusToString(status)); + return status; + } + + status = ops_init_from_parent(arch_table, ARRAY_SIZE(arch_table), ARCH_OP_COUNT, offsetof(uvm_hal_class_ops_t, u.arch_ops)); + if (status != NV_OK) { + UVM_ERR_PRINT("ops_init_from_parent(arch_table) failed: %s\n", nvstatusToString(status)); + return status; + } + + status = ops_init_from_parent(fault_buffer_table, + ARRAY_SIZE(fault_buffer_table), + FAULT_BUFFER_OP_COUNT, + offsetof(uvm_hal_class_ops_t, u.fault_buffer_ops)); + if (status != NV_OK) { + UVM_ERR_PRINT("ops_init_from_parent(fault_buffer_table) failed: %s\n", nvstatusToString(status)); + return status; + } + + status = ops_init_from_parent(access_counter_buffer_table, + ARRAY_SIZE(access_counter_buffer_table), + ACCESS_COUNTER_BUFFER_OP_COUNT, + offsetof(uvm_hal_class_ops_t, u.access_counter_buffer_ops)); + if (status != NV_OK) { + UVM_ERR_PRINT("ops_init_from_parent(access_counter_buffer_table) failed: %s\n", nvstatusToString(status)); + return status; + } + + + + + + + + + + + + + return NV_OK; +} + +NV_STATUS uvm_hal_init_gpu(uvm_parent_gpu_t *parent_gpu) +{ + const UvmGpuInfo *gpu_info = &parent_gpu->rm_info; + uvm_hal_class_ops_t *class_ops; + + class_ops = ops_find_by_id(ce_table, ARRAY_SIZE(ce_table), gpu_info->ceClass); + if (class_ops == NULL) { + UVM_ERR_PRINT("Unsupported ce class: 0x%X, GPU %s\n", gpu_info->ceClass, parent_gpu->name); + return NV_ERR_INVALID_CLASS; + } + + parent_gpu->ce_hal = &class_ops->u.ce_ops; + + class_ops = ops_find_by_id(host_table, ARRAY_SIZE(host_table), gpu_info->hostClass); + if (class_ops == NULL) { + UVM_ERR_PRINT("Unsupported host class: 0x%X, GPU %s\n", gpu_info->hostClass, parent_gpu->name); + return NV_ERR_INVALID_CLASS; + } + + parent_gpu->host_hal = &class_ops->u.host_ops; + + class_ops = ops_find_by_id(arch_table, ARRAY_SIZE(arch_table), gpu_info->gpuArch); + if (class_ops == NULL) { + UVM_ERR_PRINT("Unsupported GPU architecture: 0x%X, GPU %s\n", gpu_info->gpuArch, parent_gpu->name); + return NV_ERR_INVALID_CLASS; + } + + parent_gpu->arch_hal = &class_ops->u.arch_ops; + + class_ops = ops_find_by_id(fault_buffer_table, ARRAY_SIZE(fault_buffer_table), gpu_info->gpuArch); + if (class_ops == NULL) { + UVM_ERR_PRINT("Fault buffer HAL not found, GPU %s, arch: 0x%X\n", parent_gpu->name, gpu_info->gpuArch); + return NV_ERR_INVALID_CLASS; + } + + parent_gpu->fault_buffer_hal = &class_ops->u.fault_buffer_ops; + + class_ops = ops_find_by_id(access_counter_buffer_table, + ARRAY_SIZE(access_counter_buffer_table), + gpu_info->gpuArch); + if (class_ops == NULL) { + UVM_ERR_PRINT("Access counter HAL not found, GPU %s, arch: 0x%X\n", parent_gpu->name, gpu_info->gpuArch); + return NV_ERR_INVALID_CLASS; + } + + parent_gpu->access_counter_buffer_hal = &class_ops->u.access_counter_buffer_ops; + + + + + + + + + + + + return NV_OK; +} + +void uvm_hal_init_properties(uvm_parent_gpu_t *parent_gpu) +{ + parent_gpu->arch_hal->init_properties(parent_gpu); + + // Override the HAL when in non-passthrough virtualization + // TODO: Bug 200692962: [UVM] Add support for access counters in UVM on SR-IOV configurations + if (parent_gpu->virt_mode != UVM_VIRT_MODE_NONE) + parent_gpu->access_counters_supported = false; +} + +void uvm_hal_tlb_invalidate_membar(uvm_push_t *push, uvm_membar_t membar) +{ + uvm_gpu_t *gpu; + NvU32 i; + + if (membar == UVM_MEMBAR_NONE) + return; + + gpu = uvm_push_get_gpu(push); + + for (i = 0; i < gpu->parent->num_hshub_tlb_invalidate_membars; i++) + gpu->parent->host_hal->membar_gpu(push); + + uvm_hal_membar(gpu, push, membar); +} + +const char *uvm_aperture_string(uvm_aperture_t aperture) +{ + BUILD_BUG_ON(UVM_APERTURE_MAX != 12); + + switch (aperture) { + UVM_ENUM_STRING_CASE(UVM_APERTURE_PEER_0); + UVM_ENUM_STRING_CASE(UVM_APERTURE_PEER_1); + UVM_ENUM_STRING_CASE(UVM_APERTURE_PEER_2); + UVM_ENUM_STRING_CASE(UVM_APERTURE_PEER_3); + UVM_ENUM_STRING_CASE(UVM_APERTURE_PEER_4); + UVM_ENUM_STRING_CASE(UVM_APERTURE_PEER_5); + UVM_ENUM_STRING_CASE(UVM_APERTURE_PEER_6); + UVM_ENUM_STRING_CASE(UVM_APERTURE_PEER_7); + UVM_ENUM_STRING_CASE(UVM_APERTURE_PEER_MAX); + UVM_ENUM_STRING_CASE(UVM_APERTURE_SYS); + UVM_ENUM_STRING_CASE(UVM_APERTURE_VID); + UVM_ENUM_STRING_CASE(UVM_APERTURE_DEFAULT); + UVM_ENUM_STRING_DEFAULT(); + } +} + +const char *uvm_prot_string(uvm_prot_t prot) +{ + BUILD_BUG_ON(UVM_PROT_MAX != 4); + + switch (prot) { + UVM_ENUM_STRING_CASE(UVM_PROT_NONE); + UVM_ENUM_STRING_CASE(UVM_PROT_READ_ONLY); + UVM_ENUM_STRING_CASE(UVM_PROT_READ_WRITE); + UVM_ENUM_STRING_CASE(UVM_PROT_READ_WRITE_ATOMIC); + UVM_ENUM_STRING_DEFAULT(); + } +} + +const char *uvm_membar_string(uvm_membar_t membar) +{ + switch (membar) { + UVM_ENUM_STRING_CASE(UVM_MEMBAR_SYS); + UVM_ENUM_STRING_CASE(UVM_MEMBAR_GPU); + UVM_ENUM_STRING_CASE(UVM_MEMBAR_NONE); + } + + return "UNKNOWN"; +} + +const char *uvm_fault_access_type_string(uvm_fault_access_type_t fault_access_type) +{ + BUILD_BUG_ON(UVM_FAULT_ACCESS_TYPE_COUNT != 5); + + switch (fault_access_type) { + UVM_ENUM_STRING_CASE(UVM_FAULT_ACCESS_TYPE_ATOMIC_STRONG); + UVM_ENUM_STRING_CASE(UVM_FAULT_ACCESS_TYPE_ATOMIC_WEAK); + UVM_ENUM_STRING_CASE(UVM_FAULT_ACCESS_TYPE_WRITE); + UVM_ENUM_STRING_CASE(UVM_FAULT_ACCESS_TYPE_READ); + UVM_ENUM_STRING_CASE(UVM_FAULT_ACCESS_TYPE_PREFETCH); + UVM_ENUM_STRING_DEFAULT(); + } +} + +const char *uvm_fault_type_string(uvm_fault_type_t fault_type) +{ + BUILD_BUG_ON(UVM_FAULT_TYPE_COUNT != 16); + + switch (fault_type) { + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_INVALID_PDE); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_INVALID_PTE); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_ATOMIC); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_WRITE); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_READ); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_PDE_SIZE); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_VA_LIMIT_VIOLATION); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_UNBOUND_INST_BLOCK); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_PRIV_VIOLATION); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_PITCH_MASK_VIOLATION); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_WORK_CREATION); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_UNSUPPORTED_APERTURE); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_COMPRESSION_FAILURE); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_UNSUPPORTED_KIND); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_REGION_VIOLATION); + UVM_ENUM_STRING_CASE(UVM_FAULT_TYPE_POISONED); + UVM_ENUM_STRING_DEFAULT(); + } +} + +const char *uvm_fault_client_type_string(uvm_fault_client_type_t fault_client_type) +{ + BUILD_BUG_ON(UVM_FAULT_CLIENT_TYPE_COUNT != 2); + + switch (fault_client_type) { + UVM_ENUM_STRING_CASE(UVM_FAULT_CLIENT_TYPE_GPC); + UVM_ENUM_STRING_CASE(UVM_FAULT_CLIENT_TYPE_HUB); + UVM_ENUM_STRING_DEFAULT(); + } +} + +const char *uvm_mmu_engine_type_string(uvm_mmu_engine_type_t mmu_engine_type) +{ + BUILD_BUG_ON(UVM_MMU_ENGINE_TYPE_COUNT != 3); + + switch (mmu_engine_type) { + UVM_ENUM_STRING_CASE(UVM_MMU_ENGINE_TYPE_GRAPHICS); + UVM_ENUM_STRING_CASE(UVM_MMU_ENGINE_TYPE_HOST); + UVM_ENUM_STRING_CASE(UVM_MMU_ENGINE_TYPE_CE); + UVM_ENUM_STRING_DEFAULT(); + } +} + +void uvm_hal_print_fault_entry(const uvm_fault_buffer_entry_t *entry) +{ + UVM_DBG_PRINT("fault_address: 0x%llx\n", entry->fault_address); + UVM_DBG_PRINT(" fault_instance_ptr: {0x%llx:%s}\n", entry->instance_ptr.address, + uvm_aperture_string(entry->instance_ptr.aperture)); + UVM_DBG_PRINT(" fault_type: %s\n", uvm_fault_type_string(entry->fault_type)); + UVM_DBG_PRINT(" fault_access_type: %s\n", uvm_fault_access_type_string(entry->fault_access_type)); + UVM_DBG_PRINT(" is_replayable: %s\n", entry->is_replayable? "true": "false"); + UVM_DBG_PRINT(" is_virtual: %s\n", entry->is_virtual? "true": "false"); + UVM_DBG_PRINT(" in_protected_mode: %s\n", entry->in_protected_mode? "true": "false"); + UVM_DBG_PRINT(" fault_source.client_type: %s\n", uvm_fault_client_type_string(entry->fault_source.client_type)); + UVM_DBG_PRINT(" fault_source.client_id: %d\n", entry->fault_source.client_id); + UVM_DBG_PRINT(" fault_source.gpc_id: %d\n", entry->fault_source.gpc_id); + UVM_DBG_PRINT(" fault_source.mmu_engine_id: %d\n", entry->fault_source.mmu_engine_id); + UVM_DBG_PRINT(" fault_source.mmu_engine_type: %s\n", + uvm_mmu_engine_type_string(entry->fault_source.mmu_engine_type)); + UVM_DBG_PRINT(" timestamp: %llu\n", entry->timestamp); +} + +const char *uvm_access_counter_type_string(uvm_access_counter_type_t access_counter_type) +{ + BUILD_BUG_ON(UVM_ACCESS_COUNTER_TYPE_MAX != 2); + + switch (access_counter_type) { + UVM_ENUM_STRING_CASE(UVM_ACCESS_COUNTER_TYPE_MIMC); + UVM_ENUM_STRING_CASE(UVM_ACCESS_COUNTER_TYPE_MOMC); + UVM_ENUM_STRING_DEFAULT(); + } +} + +void uvm_hal_print_access_counter_buffer_entry(const uvm_access_counter_buffer_entry_t *entry) +{ + if (!entry->address.is_virtual) { + UVM_DBG_PRINT("physical address: {0x%llx:%s}\n", entry->address.address, + uvm_aperture_string(entry->address.aperture)); + } + else { + UVM_DBG_PRINT("virtual address: 0x%llx\n", entry->address.address); + UVM_DBG_PRINT(" instance_ptr {0x%llx:%s}\n", entry->virtual_info.instance_ptr.address, + uvm_aperture_string(entry->virtual_info.instance_ptr.aperture)); + UVM_DBG_PRINT(" mmu_engine_type %s\n", uvm_mmu_engine_type_string(entry->virtual_info.mmu_engine_type)); + UVM_DBG_PRINT(" mmu_engine_id %u\n", entry->virtual_info.mmu_engine_id); + UVM_DBG_PRINT(" ve_id %u\n", entry->virtual_info.ve_id); + } + + UVM_DBG_PRINT(" is_virtual %u\n", entry->address.is_virtual); + UVM_DBG_PRINT(" counter_type %s\n", uvm_access_counter_type_string(entry->counter_type)); + UVM_DBG_PRINT(" counter_value %u\n", entry->counter_value); + UVM_DBG_PRINT(" subgranularity 0x%08x\n", entry->sub_granularity); + UVM_DBG_PRINT(" bank %u\n", entry->bank); + UVM_DBG_PRINT(" tag %x\n", entry->tag); +} + +bool uvm_hal_method_validate_stub(uvm_push_t *push, NvU32 method_address, NvU32 method_data) +{ + return true; +} + +bool uvm_hal_ce_memcopy_validate_stub(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src) +{ + return true; +} + +void uvm_hal_ce_memcopy_patch_src_stub(uvm_push_t *push, uvm_gpu_address_t *src) +{ +} + +bool uvm_hal_ce_memset_validate_stub(uvm_push_t *push, uvm_gpu_address_t dst, size_t element_size) +{ + return true; +} diff --git a/kernel-open/nvidia-uvm/uvm_hal.h b/kernel-open/nvidia-uvm/uvm_hal.h new file mode 100644 index 000000000..2ddfce9df --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_hal.h @@ -0,0 +1,818 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_HAL_H__ +#define __UVM_HAL_H__ + +#include "uvm_types.h" +#include "uvm_common.h" +#include "uvm_forward_decl.h" +#include "uvm_hal_types.h" +#include "uvm_push.h" +#include "uvm_gpu.h" +#include "uvm_test_ioctl.h" + +// A dummy method validation that always returns true; it can be used to skip +// CE/Host/SW method validations for a given architecture +bool uvm_hal_method_validate_stub(uvm_push_t *push, NvU32 method_address, NvU32 method_data); + +typedef void (*uvm_hal_init_t)(uvm_push_t *push); +void uvm_hal_maxwell_ce_init(uvm_push_t *push); +void uvm_hal_maxwell_host_init_noop(uvm_push_t *push); +void uvm_hal_pascal_host_init(uvm_push_t *push); + + + + + +// Host method validation +typedef bool (*uvm_hal_host_method_validate)(uvm_push_t *push, NvU32 method_address, NvU32 method_data); +bool uvm_hal_ampere_host_method_validate(uvm_push_t *push, NvU32 method_address, NvU32 method_data); + +// SW method validation +typedef bool (*uvm_hal_host_sw_method_validate)(uvm_push_t *push, NvU32 method_address, NvU32 method_data); +bool uvm_hal_ampere_host_sw_method_validate(uvm_push_t *push, NvU32 method_address, NvU32 method_data); + +// Wait for idle +typedef void (*uvm_hal_wait_for_idle_t)(uvm_push_t *push); +void uvm_hal_maxwell_host_wait_for_idle(uvm_push_t *push); + +// Membar SYS +typedef void (*uvm_hal_membar_sys_t)(uvm_push_t *push); +void uvm_hal_maxwell_host_membar_sys(uvm_push_t *push); +void uvm_hal_pascal_host_membar_sys(uvm_push_t *push); + +// Membar GPU +typedef void (*uvm_hal_membar_gpu_t)(uvm_push_t *push); +void uvm_hal_pascal_host_membar_gpu(uvm_push_t *push); + +// Put a noop in the pushbuffer of the given size in bytes. +// The size needs to be a multiple of 4. +typedef void (*uvm_hal_noop_t)(uvm_push_t *push, NvU32 size); +void uvm_hal_maxwell_host_noop(uvm_push_t *push, NvU32 size); + +// Host-generated interrupt method. This will generate a call to +// uvm_isr_top_half_entry. +// +// This is a non-stalling interrupt, which means that it's fire-and-forget. Host +// will not stall method processing nor stop channel switching, which means that +// we cannot directly identify in software which channel generated the +// interrupt. +// +// We must set up software state before pushing the interrupt, and check any +// possible interrupt condition on receiving an interrupt callback. +typedef void (*uvm_hal_interrupt_t)(uvm_push_t *push); +void uvm_hal_maxwell_host_interrupt(uvm_push_t *push); + +// Issue a TLB invalidate applying to all VAs in a PDB. +// +// The PTE caches (TLBs) are always invalidated. The PDE caches for all VAs in +// the PDB are invalidated from the specified depth down to the PTEs. This +// allows for optimizations if the caller isn't writing all levels of the PDEs. +// Depth follows the MMU code convention where depth 0 is the top level and here +// means to invalidate everything. See uvm_pascal_mmu.c for an example of depth +// mapping to HW PDE levels. Notably 2M PTEs are considered PDEs as far as the +// TLBs are concerned and hence on Pascal the depth needs to be at most 3 for +// them to be included in the invalidation. +// +// If the membar parameter is not UVM_MEMBAR_NONE, the specified membar is +// performed logically after the TLB invalidate such that all physical memory +// accesses using the old translations are ordered to the scope of the membar. +typedef void (*uvm_hal_host_tlb_invalidate_all_t)(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + uvm_membar_t membar); +void uvm_hal_maxwell_host_tlb_invalidate_all_a16f(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + uvm_membar_t membar); +void uvm_hal_maxwell_host_tlb_invalidate_all_b06f(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + uvm_membar_t membar); +void uvm_hal_pascal_host_tlb_invalidate_all(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + uvm_membar_t membar); +void uvm_hal_ampere_host_tlb_invalidate_all(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + uvm_membar_t membar); + + + + + + + +// Issue a TLB invalidate applying to the specified VA range in a PDB. +// +// The PTE caches (TLBs) for each page size aligned VA within the VA range +// are always invalidated. The PDE caches covering the specified VA +// range in the PDB are invalidated from the specified depth down to the PTEs. +// Specifying the depth allows for optimizations if the caller isn't writing all +// levels of the PDEs. Specifying the page size allows for optimizations if +// the caller can guarantee caches for smaller page sizes don't need to be +// invalidated. +// +// Depth follows the MMU code convention where depth 0 is the top level and here +// means to invalidate all levels. See uvm_pascal_mmu.c for an example of depth +// mapping to HW PDE levels. Notably 2M PTEs are considered PDEs as far as the +// TLBs are concerned and hence on Pascal the depth needs to be at most 3 for +// them to be included in the invalidation. +// +// If the membar parameter is not UVM_MEMBAR_NONE, the specified membar is +// performed logically after the TLB invalidate such that all physical memory +// accesses using the old translations are ordered to the scope of the membar. +// +// Note that this can end up pushing a lot of methods for big ranges so it's +// better not to use it directly. Instead, uvm_tlb_batch* APIs should be used +// that automatically switch between targeted VA invalidates and invalidate all. +typedef void (*uvm_hal_host_tlb_invalidate_va_t)(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + NvU64 base, + NvU64 size, + NvU32 page_size, + uvm_membar_t membar); +void uvm_hal_maxwell_host_tlb_invalidate_va(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + NvU64 base, + NvU64 size, + NvU32 page_size, + uvm_membar_t membar); +void uvm_hal_pascal_host_tlb_invalidate_va(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + NvU64 base, + NvU64 size, + NvU32 page_size, + uvm_membar_t membar); +void uvm_hal_volta_host_tlb_invalidate_va(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + NvU64 base, + NvU64 size, + NvU32 page_size, + uvm_membar_t membar); +void uvm_hal_ampere_host_tlb_invalidate_va(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + NvU64 base, + NvU64 size, + NvU32 page_size, + uvm_membar_t membar); + + + + + + + + + + +typedef void (*uvm_hal_host_tlb_invalidate_test_t)(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + UVM_TEST_INVALIDATE_TLB_PARAMS *params); +void uvm_hal_maxwell_host_tlb_invalidate_test(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + UVM_TEST_INVALIDATE_TLB_PARAMS *params); +void uvm_hal_pascal_host_tlb_invalidate_test(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + UVM_TEST_INVALIDATE_TLB_PARAMS *params); +void uvm_hal_ampere_host_tlb_invalidate_test(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + UVM_TEST_INVALIDATE_TLB_PARAMS *params); + + + + + + +// By default all semaphore release operations include a membar sys before the +// operation. This can be affected by using UVM_PUSH_FLAG_NEXT_* flags with +// uvm_push_set_flag(). +typedef void (*uvm_hal_semaphore_release_t)(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); +void uvm_hal_maxwell_host_semaphore_release(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); +void uvm_hal_maxwell_ce_semaphore_release(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); + + + +void uvm_hal_pascal_ce_semaphore_release(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); +void uvm_hal_turing_host_semaphore_release(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); + + + + + + + + +// Release a semaphore including a timestamp at the specific GPU VA. +// +// This operation writes 16 bytes of memory and the VA needs to be 16-byte +// aligned. The value of the released payload is unspecified and shouldn't be +// relied on, only the timestamp should be of interest. +typedef void (*uvm_hal_semaphore_timestamp_t)(uvm_push_t *push, NvU64 gpu_va); +void uvm_hal_maxwell_ce_semaphore_timestamp(uvm_push_t *push, NvU64 gpu_va); +void uvm_hal_pascal_ce_semaphore_timestamp(uvm_push_t *push, NvU64 gpu_va); + + + + +void uvm_hal_maxwell_host_semaphore_timestamp(uvm_push_t *push, NvU64 gpu_va); +void uvm_hal_volta_host_semaphore_timestamp(uvm_push_t *push, NvU64 gpu_va); + + + + + + + + + +typedef void (*uvm_hal_semaphore_acquire_t)(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); +void uvm_hal_maxwell_host_semaphore_acquire(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); +void uvm_hal_turing_host_semaphore_acquire(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); + + + + +typedef void (*uvm_hal_host_set_gpfifo_entry_t)(NvU64 *fifo_entry, NvU64 pushbuffer_va, NvU32 pushbuffer_length); +void uvm_hal_maxwell_host_set_gpfifo_entry(NvU64 *fifo_entry, NvU64 pushbuffer_va, NvU32 pushbuffer_length); +void uvm_hal_turing_host_set_gpfifo_entry(NvU64 *fifo_entry, NvU64 pushbuffer_va, NvU32 pushbuffer_length); + +typedef void (*uvm_hal_host_write_gpu_put_t)(uvm_channel_t *channel, NvU32 gpu_put); +void uvm_hal_maxwell_host_write_gpu_put(uvm_channel_t *channel, NvU32 gpu_put); +void uvm_hal_volta_host_write_gpu_put(uvm_channel_t *channel, NvU32 gpu_put); + +// Return the current GPU time in nanoseconds +typedef NvU64 (*uvm_hal_get_time_t)(uvm_gpu_t *gpu); +NvU64 uvm_hal_maxwell_get_time(uvm_gpu_t *gpu); + +// Internal helpers used by the CE hal +// Used to handle the offset encoding differences between architectures +typedef void (*uvm_hal_ce_offset_out_t)(uvm_push_t *push, NvU64 offset); +void uvm_hal_maxwell_ce_offset_out(uvm_push_t *push, NvU64 offset); +void uvm_hal_pascal_ce_offset_out(uvm_push_t *push, NvU64 offset); + + + + +typedef void (*uvm_hal_ce_offset_in_out_t)(uvm_push_t *push, NvU64 offset_in, NvU64 offset_out); +void uvm_hal_maxwell_ce_offset_in_out(uvm_push_t *push, NvU64 offset_in, NvU64 offset_out); +void uvm_hal_pascal_ce_offset_in_out(uvm_push_t *push, NvU64 offset_in, NvU64 offset_out); + + + + +typedef NvU32 (*uvm_hal_ce_phys_mode_t)(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src); +NvU32 uvm_hal_maxwell_ce_phys_mode(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src); +NvU32 uvm_hal_ampere_ce_phys_mode(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src); + +typedef NvU32 (*uvm_hal_ce_plc_mode_t)(void); +NvU32 uvm_hal_maxwell_ce_plc_mode(void); +NvU32 uvm_hal_ampere_ce_plc_mode_c7b5(void); + +// CE method validation +typedef bool (*uvm_hal_ce_method_validate)(uvm_push_t *push, NvU32 method_address, NvU32 method_data); +bool uvm_hal_ampere_ce_method_validate_c6b5(uvm_push_t *push, NvU32 method_address, NvU32 method_data); + +// Memcopy validation. +// The validation happens at the start of the memcopy (uvm_hal_memcopy_t) +// execution. Use uvm_hal_ce_memcopy_validate_stub to skip the validation for +// a given architecture. +typedef bool (*uvm_hal_ce_memcopy_validate)(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src); +bool uvm_hal_ce_memcopy_validate_stub(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src); +bool uvm_hal_ampere_ce_memcopy_validate_c6b5(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src); + +// Patching of the memcopy source; if not needed for a given architecture use +// the (empty) uvm_hal_ce_memcopy_patch_src_stub implementation +typedef void (*uvm_hal_ce_memcopy_patch_src)(uvm_push_t *push, uvm_gpu_address_t *src); +void uvm_hal_ce_memcopy_patch_src_stub(uvm_push_t *push, uvm_gpu_address_t *src); +void uvm_hal_ampere_ce_memcopy_patch_src_c6b5(uvm_push_t *push, uvm_gpu_address_t *src); + +// Memcopy size bytes from src to dst. +// +// By default all CE transfer operations include a membar sys after the +// operation and are not pipelined. This can be affected by using +// UVM_PUSH_FLAG_NEXT_CE_* flags with uvm_push_set_flag(). +typedef void (*uvm_hal_memcopy_t)(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src, size_t size); +void uvm_hal_maxwell_ce_memcopy(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src, size_t size); + +// Simple wrapper for uvm_hal_memcopy_t with both addresses being virtual +typedef void (*uvm_hal_memcopy_v_to_v_t)(uvm_push_t *push, NvU64 dst, NvU64 src, size_t size); +void uvm_hal_maxwell_ce_memcopy_v_to_v(uvm_push_t *push, NvU64 dst, NvU64 src, size_t size); + +// Memset validation. +// The validation happens at the start of the memset (uvm_hal_memset_*_t) +// execution. Use uvm_hal_ce_memset_validate_stub to skip the validation for +// a given architecture. +typedef bool (*uvm_hal_ce_memset_validate)(uvm_push_t *push, uvm_gpu_address_t dst, size_t element_size); +bool uvm_hal_ce_memset_validate_stub(uvm_push_t *push, uvm_gpu_address_t dst, size_t element_size); +bool uvm_hal_ampere_ce_memset_validate_c6b5(uvm_push_t *push, uvm_gpu_address_t dst, size_t element_size); + +// Memset size bytes at dst to a given N-byte input value. +// +// Size has to be a multiple of the element size. For example, the size passed +// to uvm_hal_memset_4_t must be a multiple of 4 bytes. +// +// By default all CE transfer operations include a membar sys after the +// operation and are not pipelined. This can be affected by using +// UVM_PUSH_FLAG_NEXT_CE_* flags with uvm_push_set_flag(). +typedef void (*uvm_hal_memset_1_t)(uvm_push_t *push, uvm_gpu_address_t dst, NvU8 value, size_t size); +typedef void (*uvm_hal_memset_4_t)(uvm_push_t *push, uvm_gpu_address_t dst, NvU32 value, size_t size); +typedef void (*uvm_hal_memset_8_t)(uvm_push_t *push, uvm_gpu_address_t dst, NvU64 value, size_t size); + +// Simple wrapper for uvm_hal_memset_4_t with the address being virtual. +typedef void (*uvm_hal_memset_v_4_t)(uvm_push_t *push, NvU64 dst_va, NvU32 value, size_t size); + +void uvm_hal_maxwell_ce_memset_1(uvm_push_t *push, uvm_gpu_address_t dst, NvU8 value, size_t size); +void uvm_hal_maxwell_ce_memset_4(uvm_push_t *push, uvm_gpu_address_t dst, NvU32 value, size_t size); +void uvm_hal_maxwell_ce_memset_8(uvm_push_t *push, uvm_gpu_address_t dst, NvU64 value, size_t size); +void uvm_hal_maxwell_ce_memset_v_4(uvm_push_t *push, NvU64 dst_va, NvU32 value, size_t size); + + + + + + + +// Increments the semaphore by 1, or resets to 0 if the incremented value would +// exceed the payload. +// +// By default all CE semaphore operations include a membar sys before the +// semaphore operation. This can be affected by using UVM_PUSH_FLAG_NEXT_CE_* +// flags with uvm_push_set_flag(). +typedef void (*uvm_hal_semaphore_reduction_inc_t)(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); +void uvm_hal_maxwell_ce_semaphore_reduction_inc(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); +void uvm_hal_pascal_ce_semaphore_reduction_inc(uvm_push_t *push, NvU64 gpu_va, NvU32 payload); + + + + +// Initialize GPU architecture dependent properties +typedef void (*uvm_hal_arch_init_properties_t)(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_maxwell_arch_init_properties(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_pascal_arch_init_properties(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_volta_arch_init_properties(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_turing_arch_init_properties(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_ampere_arch_init_properties(uvm_parent_gpu_t *parent_gpu); + + + + + + + +// Retrieve the page-tree HAL for a given big page size +typedef uvm_mmu_mode_hal_t *(*uvm_hal_lookup_mode_hal_t)(NvU32 big_page_size); +typedef void (*uvm_hal_mmu_enable_prefetch_faults_t)(uvm_parent_gpu_t *parent_gpu); +typedef void (*uvm_hal_mmu_disable_prefetch_faults_t)(uvm_parent_gpu_t *parent_gpu); +uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_maxwell(NvU32 big_page_size); +uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_pascal(NvU32 big_page_size); +uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_volta(NvU32 big_page_size); +uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_turing(NvU32 big_page_size); +uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_ampere(NvU32 big_page_size); + + + +void uvm_hal_maxwell_mmu_enable_prefetch_faults_unsupported(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_maxwell_mmu_disable_prefetch_faults_unsupported(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_pascal_mmu_enable_prefetch_faults(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_pascal_mmu_disable_prefetch_faults(uvm_parent_gpu_t *parent_gpu); + +// Convert a faulted MMU engine ID to a UVM engine type. Only engines which have +// faults serviced by UVM are handled. On Pascal the only such engine is +// GRAPHICS, so no translation is provided. +typedef uvm_mmu_engine_type_t (*uvm_hal_mmu_engine_id_to_type_t)(NvU16 mmu_engine_id); +uvm_mmu_engine_type_t uvm_hal_maxwell_mmu_engine_id_to_type_unsupported(NvU16 mmu_engine_id); +uvm_mmu_engine_type_t uvm_hal_volta_mmu_engine_id_to_type(NvU16 mmu_engine_id); +uvm_mmu_engine_type_t uvm_hal_turing_mmu_engine_id_to_type(NvU16 mmu_engine_id); +uvm_mmu_engine_type_t uvm_hal_ampere_mmu_engine_id_to_type(NvU16 mmu_engine_id); + + + + +typedef NvU16 (*uvm_hal_mmu_client_id_to_utlb_id_t)(NvU16 client_id); +NvU16 uvm_hal_maxwell_mmu_client_id_to_utlb_id_unsupported(NvU16 client_id); +NvU16 uvm_hal_pascal_mmu_client_id_to_utlb_id(NvU16 client_id); +NvU16 uvm_hal_volta_mmu_client_id_to_utlb_id(NvU16 client_id); +NvU16 uvm_hal_ampere_mmu_client_id_to_utlb_id(NvU16 client_id); + + + + +// Replayable faults +typedef void (*uvm_hal_enable_replayable_faults_t)(uvm_parent_gpu_t *parent_gpu); +typedef void (*uvm_hal_disable_replayable_faults_t)(uvm_parent_gpu_t *parent_gpu); +typedef void (*uvm_hal_clear_replayable_faults_t)(uvm_parent_gpu_t *parent_gpu, NvU32 get); +typedef NvU32 (*uvm_hal_fault_buffer_read_put_t)(uvm_parent_gpu_t *parent_gpu); +typedef NvU32 (*uvm_hal_fault_buffer_read_get_t)(uvm_parent_gpu_t *parent_gpu); +typedef void (*uvm_hal_fault_buffer_write_get_t)(uvm_parent_gpu_t *parent_gpu, NvU32 get); +typedef NvU8 (*uvm_hal_fault_buffer_get_ve_id_t)(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type); + +// Parse the entry on the given buffer index. This also clears the valid bit of +// the entry in the buffer. +typedef void (*uvm_hal_fault_buffer_parse_entry_t)(uvm_parent_gpu_t *gpu, + NvU32 index, + uvm_fault_buffer_entry_t *buffer_entry); +typedef bool (*uvm_hal_fault_buffer_entry_is_valid_t)(uvm_parent_gpu_t *parent_gpu, NvU32 index); +typedef void (*uvm_hal_fault_buffer_entry_clear_valid_t)(uvm_parent_gpu_t *parent_gpu, NvU32 index); +typedef NvU32 (*uvm_hal_fault_buffer_entry_size_t)(uvm_parent_gpu_t *parent_gpu); +typedef void (*uvm_hal_fault_buffer_replay_t)(uvm_push_t *push, uvm_fault_replay_type_t type); +typedef void (*uvm_hal_fault_cancel_global_t)(uvm_push_t *push, uvm_gpu_phys_address_t instance_ptr); +typedef void (*uvm_hal_fault_cancel_targeted_t)(uvm_push_t *push, + uvm_gpu_phys_address_t instance_ptr, + NvU32 gpc_id, + NvU32 client_id); + +void uvm_hal_maxwell_enable_replayable_faults_unsupported(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_maxwell_disable_replayable_faults_unsupported(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_maxwell_clear_replayable_faults_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 get); +NvU32 uvm_hal_maxwell_fault_buffer_read_put_unsupported(uvm_parent_gpu_t *parent_gpu); +NvU32 uvm_hal_maxwell_fault_buffer_read_get_unsupported(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_maxwell_fault_buffer_write_get_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index); +NvU8 uvm_hal_maxwell_fault_buffer_get_ve_id_unsupported(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type); +void uvm_hal_maxwell_fault_buffer_parse_entry_unsupported(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_fault_buffer_entry_t *buffer_entry); +void uvm_hal_pascal_enable_replayable_faults(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_pascal_disable_replayable_faults(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_pascal_clear_replayable_faults(uvm_parent_gpu_t *parent_gpu, NvU32 get); +NvU32 uvm_hal_pascal_fault_buffer_read_put(uvm_parent_gpu_t *parent_gpu); +NvU32 uvm_hal_pascal_fault_buffer_read_get(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_pascal_fault_buffer_write_get(uvm_parent_gpu_t *parent_gpu, NvU32 index); +void uvm_hal_pascal_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_fault_buffer_entry_t *buffer_entry); +NvU32 uvm_hal_volta_fault_buffer_read_put(uvm_parent_gpu_t *parent_gpu); +NvU32 uvm_hal_volta_fault_buffer_read_get(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_volta_fault_buffer_write_get(uvm_parent_gpu_t *parent_gpu, NvU32 index); +NvU8 uvm_hal_volta_fault_buffer_get_ve_id(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type); +void uvm_hal_volta_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_fault_buffer_entry_t *buffer_entry); +void uvm_hal_turing_disable_replayable_faults(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_turing_clear_replayable_faults(uvm_parent_gpu_t *parent_gpu, NvU32 get); + + + + +bool uvm_hal_maxwell_fault_buffer_entry_is_valid_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index); +void uvm_hal_maxwell_fault_buffer_entry_clear_valid_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index); +NvU32 uvm_hal_maxwell_fault_buffer_entry_size_unsupported(uvm_parent_gpu_t *parent_gpu); +bool uvm_hal_pascal_fault_buffer_entry_is_valid(uvm_parent_gpu_t *parent_gpu, NvU32 index); +void uvm_hal_pascal_fault_buffer_entry_clear_valid(uvm_parent_gpu_t *parent_gpu, NvU32 index); +NvU32 uvm_hal_pascal_fault_buffer_entry_size(uvm_parent_gpu_t *parent_gpu); + +typedef void (*uvm_hal_fault_buffer_parse_non_replayable_entry_t)(uvm_parent_gpu_t *parent_gpu, + void *fault_packet, + uvm_fault_buffer_entry_t *buffer_entry); +void uvm_hal_maxwell_fault_buffer_parse_non_replayable_entry_unsupported(uvm_parent_gpu_t *parent_gpu, + void *fault_packet, + uvm_fault_buffer_entry_t *buffer_entry); +void uvm_hal_volta_fault_buffer_parse_non_replayable_entry(uvm_parent_gpu_t *parent_gpu, + void *fault_packet, + uvm_fault_buffer_entry_t *buffer_entry); + +void uvm_hal_maxwell_cancel_faults_global_unsupported(uvm_push_t *push, uvm_gpu_phys_address_t instance_ptr); +void uvm_hal_pascal_cancel_faults_global(uvm_push_t *push, uvm_gpu_phys_address_t instance_ptr); + +// Trigger fault replay on the GPU where the given pushbuffer is located. +void uvm_hal_maxwell_replay_faults_unsupported(uvm_push_t *push, uvm_fault_replay_type_t type); +void uvm_hal_maxwell_cancel_faults_targeted_unsupported(uvm_push_t *push, + uvm_gpu_phys_address_t instance_ptr, + NvU32 gpc_id, + NvU32 client_id); +void uvm_hal_pascal_replay_faults(uvm_push_t *push, uvm_fault_replay_type_t type); +void uvm_hal_pascal_cancel_faults_targeted(uvm_push_t *push, + uvm_gpu_phys_address_t instance_ptr, + NvU32 gpc_id, + NvU32 client_id); + +typedef void (*uvm_hal_fault_cancel_va_t)(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + const uvm_fault_buffer_entry_t *fault_entry, + uvm_fault_cancel_va_mode_t cancel_va_mode); + +void uvm_hal_maxwell_cancel_faults_va_unsupported(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + const uvm_fault_buffer_entry_t *fault_entry, + uvm_fault_cancel_va_mode_t cancel_va_mode); + +void uvm_hal_volta_replay_faults(uvm_push_t *push, uvm_fault_replay_type_t type); +void uvm_hal_volta_cancel_faults_va(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + const uvm_fault_buffer_entry_t *fault_entry, + uvm_fault_cancel_va_mode_t cancel_va_mode); + + + + + + + + +typedef void (*uvm_hal_host_clear_faulted_channel_method_t)(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry); + +void uvm_hal_maxwell_host_clear_faulted_channel_method_unsupported(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry); +void uvm_hal_volta_host_clear_faulted_channel_method(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry); +void uvm_hal_turing_host_clear_faulted_channel_method(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry); +typedef void (*uvm_hal_host_clear_faulted_channel_register_t)(uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry); +void uvm_hal_maxwell_host_clear_faulted_channel_register_unsupported(uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry); +void uvm_hal_ampere_host_clear_faulted_channel_register(uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry); + +typedef void (*uvm_hal_host_clear_faulted_channel_sw_method_t)(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry); +void uvm_hal_maxwell_host_clear_faulted_channel_sw_method_unsupported(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry); +void uvm_hal_ampere_host_clear_faulted_channel_sw_method(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry); + +void uvm_hal_print_fault_entry(const uvm_fault_buffer_entry_t *entry); +void uvm_hal_print_access_counter_buffer_entry(const uvm_access_counter_buffer_entry_t *entry); + +// Access counters +typedef void (*uvm_hal_enable_access_counter_notifications_t)(uvm_parent_gpu_t *parent_gpu); +typedef void (*uvm_hal_disable_access_counter_notifications_t)(uvm_parent_gpu_t *parent_gpu); +typedef void (*uvm_hal_clear_access_counter_notifications_t)(uvm_parent_gpu_t *parent_gpu, NvU32 get); + +// Parse the entry on the given buffer index. This also clears the valid bit of +// the entry in the buffer. +typedef void (*uvm_hal_access_counter_buffer_parse_entry_t)(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_access_counter_buffer_entry_t *buffer_entry); +typedef bool (*uvm_hal_access_counter_buffer_entry_is_valid_t)(uvm_parent_gpu_t *parent_gpu, NvU32 index); +typedef void (*uvm_hal_access_counter_buffer_entry_clear_valid_t)(uvm_parent_gpu_t *parent_gpu, NvU32 index); +typedef NvU32 (*uvm_hal_access_counter_buffer_entry_size_t)(uvm_parent_gpu_t *parent_gpu); +typedef void (*uvm_hal_access_counter_clear_all_t)(uvm_push_t *push); +typedef void (*uvm_hal_access_counter_clear_type_t)(uvm_push_t *push, uvm_access_counter_type_t type); +typedef void (*uvm_hal_access_counter_clear_targeted_t)(uvm_push_t *push, + const uvm_access_counter_buffer_entry_t *buffer_entry); + +void uvm_hal_maxwell_enable_access_counter_notifications_unsupported(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_maxwell_disable_access_counter_notifications_unsupported(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_maxwell_clear_access_counter_notifications_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 get); +void uvm_hal_maxwell_access_counter_buffer_parse_entry_unsupported(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_access_counter_buffer_entry_t *buffer_entry); +bool uvm_hal_maxwell_access_counter_buffer_entry_is_valid_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index); +void uvm_hal_maxwell_access_counter_buffer_entry_clear_valid_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index); +NvU32 uvm_hal_maxwell_access_counter_buffer_entry_size_unsupported(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_maxwell_access_counter_clear_all_unsupported(uvm_push_t *push); +void uvm_hal_maxwell_access_counter_clear_type_unsupported(uvm_push_t *push, uvm_access_counter_type_t type); +void uvm_hal_maxwell_access_counter_clear_targeted_unsupported(uvm_push_t *push, + const uvm_access_counter_buffer_entry_t *buffer_entry); + +void uvm_hal_volta_enable_access_counter_notifications(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_volta_disable_access_counter_notifications(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_volta_clear_access_counter_notifications(uvm_parent_gpu_t *parent_gpu, NvU32 get); +void uvm_hal_volta_access_counter_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_access_counter_buffer_entry_t *buffer_entry); +bool uvm_hal_volta_access_counter_buffer_entry_is_valid(uvm_parent_gpu_t *parent_gpu, NvU32 index); +void uvm_hal_volta_access_counter_buffer_entry_clear_valid(uvm_parent_gpu_t *parent_gpu, NvU32 index); +NvU32 uvm_hal_volta_access_counter_buffer_entry_size(uvm_parent_gpu_t *parent_gpu); + +void uvm_hal_volta_access_counter_clear_all(uvm_push_t *push); +void uvm_hal_volta_access_counter_clear_type(uvm_push_t *push, uvm_access_counter_type_t type); +void uvm_hal_volta_access_counter_clear_targeted(uvm_push_t *push, + const uvm_access_counter_buffer_entry_t *buffer_entry); + +void uvm_hal_turing_disable_access_counter_notifications(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_turing_clear_access_counter_notifications(uvm_parent_gpu_t *parent_gpu, NvU32 get); + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +struct uvm_host_hal_struct +{ + uvm_hal_init_t init; + uvm_hal_host_method_validate method_validate; + uvm_hal_host_sw_method_validate sw_method_validate; + uvm_hal_wait_for_idle_t wait_for_idle; + uvm_hal_membar_sys_t membar_sys; + uvm_hal_membar_gpu_t membar_gpu; + uvm_hal_noop_t noop; + uvm_hal_interrupt_t interrupt; + uvm_hal_semaphore_release_t semaphore_release; + uvm_hal_semaphore_acquire_t semaphore_acquire; + uvm_hal_semaphore_timestamp_t semaphore_timestamp; + uvm_hal_host_set_gpfifo_entry_t set_gpfifo_entry; + uvm_hal_host_write_gpu_put_t write_gpu_put; + uvm_hal_host_tlb_invalidate_all_t tlb_invalidate_all; + uvm_hal_host_tlb_invalidate_va_t tlb_invalidate_va; + uvm_hal_host_tlb_invalidate_test_t tlb_invalidate_test; + uvm_hal_fault_buffer_replay_t replay_faults; + uvm_hal_fault_cancel_global_t cancel_faults_global; + uvm_hal_fault_cancel_targeted_t cancel_faults_targeted; + uvm_hal_fault_cancel_va_t cancel_faults_va; + uvm_hal_host_clear_faulted_channel_method_t clear_faulted_channel_sw_method; + uvm_hal_host_clear_faulted_channel_method_t clear_faulted_channel_method; + uvm_hal_host_clear_faulted_channel_register_t clear_faulted_channel_register; + uvm_hal_access_counter_clear_all_t access_counter_clear_all; + uvm_hal_access_counter_clear_type_t access_counter_clear_type; + uvm_hal_access_counter_clear_targeted_t access_counter_clear_targeted; + uvm_hal_get_time_t get_time; +}; + +struct uvm_ce_hal_struct +{ + uvm_hal_init_t init; + uvm_hal_ce_method_validate method_validate; + uvm_hal_semaphore_release_t semaphore_release; + uvm_hal_semaphore_timestamp_t semaphore_timestamp; + uvm_hal_ce_offset_out_t offset_out; + uvm_hal_ce_offset_in_out_t offset_in_out; + uvm_hal_ce_phys_mode_t phys_mode; + uvm_hal_ce_plc_mode_t plc_mode; + uvm_hal_ce_memcopy_validate memcopy_validate; + uvm_hal_ce_memcopy_patch_src memcopy_patch_src; + uvm_hal_memcopy_t memcopy; + uvm_hal_memcopy_v_to_v_t memcopy_v_to_v; + uvm_hal_ce_memset_validate memset_validate; + uvm_hal_memset_1_t memset_1; + uvm_hal_memset_4_t memset_4; + uvm_hal_memset_8_t memset_8; + uvm_hal_memset_v_4_t memset_v_4; + uvm_hal_semaphore_reduction_inc_t semaphore_reduction_inc; +}; + +struct uvm_arch_hal_struct +{ + uvm_hal_arch_init_properties_t init_properties; + uvm_hal_lookup_mode_hal_t mmu_mode_hal; + uvm_hal_mmu_enable_prefetch_faults_t enable_prefetch_faults; + uvm_hal_mmu_disable_prefetch_faults_t disable_prefetch_faults; + uvm_hal_mmu_engine_id_to_type_t mmu_engine_id_to_type; + uvm_hal_mmu_client_id_to_utlb_id_t mmu_client_id_to_utlb_id; +}; + +struct uvm_fault_buffer_hal_struct +{ + uvm_hal_enable_replayable_faults_t enable_replayable_faults; + uvm_hal_disable_replayable_faults_t disable_replayable_faults; + uvm_hal_clear_replayable_faults_t clear_replayable_faults; + uvm_hal_fault_buffer_read_put_t read_put; + uvm_hal_fault_buffer_read_get_t read_get; + uvm_hal_fault_buffer_write_get_t write_get; + uvm_hal_fault_buffer_get_ve_id_t get_ve_id; + uvm_hal_fault_buffer_parse_entry_t parse_entry; + uvm_hal_fault_buffer_entry_is_valid_t entry_is_valid; + uvm_hal_fault_buffer_entry_clear_valid_t entry_clear_valid; + uvm_hal_fault_buffer_entry_size_t entry_size; + uvm_hal_fault_buffer_parse_non_replayable_entry_t parse_non_replayable_entry; +}; + +struct uvm_access_counter_buffer_hal_struct +{ + uvm_hal_enable_access_counter_notifications_t enable_access_counter_notifications; + uvm_hal_disable_access_counter_notifications_t disable_access_counter_notifications; + uvm_hal_clear_access_counter_notifications_t clear_access_counter_notifications; + uvm_hal_access_counter_buffer_parse_entry_t parse_entry; + uvm_hal_access_counter_buffer_entry_is_valid_t entry_is_valid; + uvm_hal_access_counter_buffer_entry_clear_valid_t entry_clear_valid; + uvm_hal_access_counter_buffer_entry_size_t entry_size; +}; + + + + + + + + + + + + +typedef struct +{ + // id is either a hardware class or GPU architecture + NvU32 id; + NvU32 parent_id; + union + { + // host_ops: id is a hardware class + uvm_host_hal_t host_ops; + + // ce_ops: id is a hardware class + uvm_ce_hal_t ce_ops; + + // arch_ops: id is an architecture + uvm_arch_hal_t arch_ops; + + // fault_buffer_ops: id is an architecture + uvm_fault_buffer_hal_t fault_buffer_ops; + + // access_counter_buffer_ops: id is an architecture + uvm_access_counter_buffer_hal_t access_counter_buffer_ops; + + + + + + } u; +} uvm_hal_class_ops_t; + +NV_STATUS uvm_hal_init_table(void); +NV_STATUS uvm_hal_init_gpu(uvm_parent_gpu_t *parent_gpu); +void uvm_hal_init_properties(uvm_parent_gpu_t *parent_gpu); + +// Helper to push a SYS or GPU membar based on the membar type +// +// Notably this doesn't just get the GPU from the push object to support the +// test mode of the page tree code that doesn't do real pushes. +static void uvm_hal_membar(uvm_gpu_t *gpu, uvm_push_t *push, uvm_membar_t membar) +{ + switch (membar) { + case UVM_MEMBAR_SYS: + gpu->parent->host_hal->membar_sys(push); + break; + case UVM_MEMBAR_GPU: + gpu->parent->host_hal->membar_gpu(push); + break; + case UVM_MEMBAR_NONE: + break; + } +} + +static void uvm_hal_wfi_membar(uvm_push_t *push, uvm_membar_t membar) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + gpu->parent->host_hal->wait_for_idle(push); + uvm_hal_membar(gpu, push, membar); +} + +// Internal helper used by the TLB invalidate hal functions. This issues the +// appropriate Host membar(s) after a TLB invalidate. +void uvm_hal_tlb_invalidate_membar(uvm_push_t *push, uvm_membar_t membar); + +#endif // __UVM_HAL_H__ diff --git a/kernel-open/nvidia-uvm/uvm_hal_types.h b/kernel-open/nvidia-uvm/uvm_hal_types.h new file mode 100644 index 000000000..25497cf25 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_hal_types.h @@ -0,0 +1,533 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_HAL_TYPES_H__ +#define __UVM_HAL_TYPES_H__ + +#include "uvm_common.h" +#include "uvm_forward_decl.h" +#include "uvm_processors.h" + +#define UVM_GPU_MMU_MAX_FAULT_PACKET_SIZE 32 + +typedef enum +{ + UVM_APERTURE_PEER_0, + UVM_APERTURE_PEER_1, + UVM_APERTURE_PEER_2, + UVM_APERTURE_PEER_3, + UVM_APERTURE_PEER_4, + UVM_APERTURE_PEER_5, + UVM_APERTURE_PEER_6, + UVM_APERTURE_PEER_7, + UVM_APERTURE_PEER_MAX, + UVM_APERTURE_SYS, + UVM_APERTURE_VID, + + // DEFAULT is a special value to let MMU pick the location of page tables + UVM_APERTURE_DEFAULT, + + UVM_APERTURE_MAX +} uvm_aperture_t; + +const char *uvm_aperture_string(uvm_aperture_t aperture); + +static bool uvm_aperture_is_peer(uvm_aperture_t aperture) +{ + return (aperture >= UVM_APERTURE_PEER_0) && (aperture < UVM_APERTURE_PEER_MAX); +} + +static inline NvU32 UVM_APERTURE_PEER_ID(uvm_aperture_t aperture) +{ + UVM_ASSERT(uvm_aperture_is_peer(aperture)); + + return (NvU32)aperture; +} + +static inline uvm_aperture_t UVM_APERTURE_PEER(NvU32 id) +{ + uvm_aperture_t aperture = (uvm_aperture_t)id; + + UVM_ASSERT(UVM_APERTURE_PEER_ID(aperture) == id); + + return aperture; +} + +// A physical GPU address +typedef struct +{ + NvU64 address; + + uvm_aperture_t aperture; +} uvm_gpu_phys_address_t; + +// Create a physical GPU address +static uvm_gpu_phys_address_t uvm_gpu_phys_address(uvm_aperture_t aperture, NvU64 address) +{ + return (uvm_gpu_phys_address_t){ address, aperture }; +} + +// Compare two gpu physical addresses +static int uvm_gpu_phys_addr_cmp(uvm_gpu_phys_address_t a, uvm_gpu_phys_address_t b) +{ + int result = UVM_CMP_DEFAULT(a.aperture, b.aperture); + if (result != 0) + return result; + + return UVM_CMP_DEFAULT(a.address, b.address); +} + +// A physical or virtual address directly accessible by a GPU. +// This implies that the address already went through identity mapping and IOMMU +// translations and is only valid for a specific GPU. +typedef struct +{ + // Physical or virtual address + // In general, only valid for a specific GPU + NvU64 address; + + // Aperture for a physical address + uvm_aperture_t aperture; + + // Whether the address is virtual + bool is_virtual; +} uvm_gpu_address_t; + +// Create a virtual GPU address +static uvm_gpu_address_t uvm_gpu_address_virtual(NvU64 va) +{ + uvm_gpu_address_t address = {0}; + address.address = va; + address.aperture = UVM_APERTURE_MAX; + address.is_virtual = true; + return address; +} + +// Create a physical GPU address +static uvm_gpu_address_t uvm_gpu_address_physical(uvm_aperture_t aperture, NvU64 pa) +{ + uvm_gpu_address_t address = {0}; + address.aperture = aperture; + address.address = pa; + return address; +} + +// Create a GPU address from a physical GPU address +static uvm_gpu_address_t uvm_gpu_address_from_phys(uvm_gpu_phys_address_t phys_address) +{ + return uvm_gpu_address_physical(phys_address.aperture, phys_address.address); +} + +static const char *uvm_gpu_address_aperture_string(uvm_gpu_address_t addr) +{ + if (addr.is_virtual) + return "VIRTUAL"; + return uvm_aperture_string(addr.aperture); +} + +// Compare two gpu addresses +static int uvm_gpu_addr_cmp(uvm_gpu_address_t a, uvm_gpu_address_t b) +{ + int result = UVM_CMP_DEFAULT(a.is_virtual, b.is_virtual); + if (result != 0) + return result; + + if (a.is_virtual) { + return UVM_CMP_DEFAULT(a.address, b.address); + } + else { + uvm_gpu_phys_address_t phys_a = { a.address, a.aperture }; + uvm_gpu_phys_address_t phys_b = { b.address, b.aperture }; + + return uvm_gpu_phys_addr_cmp(phys_a, phys_b); + } +} + +// For processors with no concept of an atomic fault (the CPU and pre-Pascal +// GPUs), UVM_PROT_READ_WRITE and UVM_PROT_READ_WRITE_ATOMIC are +// interchangeable. +typedef enum +{ + UVM_PROT_NONE, + UVM_PROT_READ_ONLY, + UVM_PROT_READ_WRITE, + UVM_PROT_READ_WRITE_ATOMIC, + UVM_PROT_MAX +} uvm_prot_t; + +const char *uvm_prot_string(uvm_prot_t prot); + +typedef enum +{ + UVM_MEMBAR_NONE, + UVM_MEMBAR_GPU, + UVM_MEMBAR_SYS, +} uvm_membar_t; + +const char *uvm_membar_string(uvm_membar_t membar); + +// Types of memory accesses that can cause a replayable fault on the GPU. They +// are ordered by access "intrusiveness" to simplify fault preprocessing (e.g. +// to implement fault coalescing) +typedef enum +{ + UVM_FAULT_ACCESS_TYPE_PREFETCH = 0, + UVM_FAULT_ACCESS_TYPE_READ, + UVM_FAULT_ACCESS_TYPE_WRITE, + UVM_FAULT_ACCESS_TYPE_ATOMIC_WEAK, + UVM_FAULT_ACCESS_TYPE_ATOMIC_STRONG, + UVM_FAULT_ACCESS_TYPE_COUNT +} uvm_fault_access_type_t; + +const char *uvm_fault_access_type_string(uvm_fault_access_type_t fault_access_type); + +static NvU32 uvm_fault_access_type_mask_bit(uvm_fault_access_type_t fault_access_type) +{ + BUILD_BUG_ON(UVM_FAULT_ACCESS_TYPE_COUNT >= 32); + + UVM_ASSERT(fault_access_type >= 0); + UVM_ASSERT(fault_access_type < UVM_FAULT_ACCESS_TYPE_COUNT); + + return (NvU32)1 << fault_access_type; +} + +static bool uvm_fault_access_type_mask_test(NvU32 mask, uvm_fault_access_type_t fault_access_type) +{ + return uvm_fault_access_type_mask_bit(fault_access_type) & mask; +} + +static void uvm_fault_access_type_mask_set(NvU32 *mask, uvm_fault_access_type_t fault_access_type) +{ + *mask |= uvm_fault_access_type_mask_bit(fault_access_type); +} + +static uvm_fault_access_type_t uvm_fault_access_type_mask_highest(NvU32 mask) +{ + int pos; + + UVM_ASSERT((1 << UVM_FAULT_ACCESS_TYPE_COUNT) > mask); + UVM_ASSERT(mask != 0); + + pos = __fls(mask); + UVM_ASSERT(pos < UVM_FAULT_ACCESS_TYPE_COUNT); + + return pos; +} + +static uvm_fault_access_type_t uvm_fault_access_type_mask_lowest(NvU32 mask) +{ + int pos; + + UVM_ASSERT((1 << UVM_FAULT_ACCESS_TYPE_COUNT) > mask); + UVM_ASSERT(mask != 0); + + pos = __ffs(mask); + UVM_ASSERT(pos < UVM_FAULT_ACCESS_TYPE_COUNT); + + return pos; +} + +typedef enum +{ + // Cancel all accesses on the page + UVM_FAULT_CANCEL_VA_MODE_ALL = 0, + + // Cancel write and atomic accesses on the page + UVM_FAULT_CANCEL_VA_MODE_WRITE_AND_ATOMIC, + + UVM_FAULT_CANCEL_VA_MODE_COUNT, +} uvm_fault_cancel_va_mode_t; + +// Types of faults that can show up in the fault buffer. Non-UVM related faults are grouped in FATAL category +// since we don't care about the specific type +typedef enum +{ + UVM_FAULT_TYPE_INVALID_PDE = 0, + UVM_FAULT_TYPE_INVALID_PTE, + UVM_FAULT_TYPE_ATOMIC, + + // WRITE to READ-ONLY + UVM_FAULT_TYPE_WRITE, + + // READ to WRITE-ONLY (ATS) + UVM_FAULT_TYPE_READ, + + // The next values are considered fatal and are not handled by the UVM driver + UVM_FAULT_TYPE_FATAL, + + // Values required for tools + UVM_FAULT_TYPE_PDE_SIZE = UVM_FAULT_TYPE_FATAL, + UVM_FAULT_TYPE_VA_LIMIT_VIOLATION, + UVM_FAULT_TYPE_UNBOUND_INST_BLOCK, + UVM_FAULT_TYPE_PRIV_VIOLATION, + UVM_FAULT_TYPE_PITCH_MASK_VIOLATION, + UVM_FAULT_TYPE_WORK_CREATION, + UVM_FAULT_TYPE_UNSUPPORTED_APERTURE, + UVM_FAULT_TYPE_COMPRESSION_FAILURE, + UVM_FAULT_TYPE_UNSUPPORTED_KIND, + UVM_FAULT_TYPE_REGION_VIOLATION, + UVM_FAULT_TYPE_POISONED, + + UVM_FAULT_TYPE_COUNT +} uvm_fault_type_t; + +const char *uvm_fault_type_string(uvm_fault_type_t fault_type); + +// Main MMU client type that triggered the fault +typedef enum +{ + UVM_FAULT_CLIENT_TYPE_GPC = 0, + UVM_FAULT_CLIENT_TYPE_HUB, + UVM_FAULT_CLIENT_TYPE_COUNT +} uvm_fault_client_type_t; + +const char *uvm_fault_client_type_string(uvm_fault_client_type_t fault_client_type); + +typedef enum +{ + UVM_MMU_ENGINE_TYPE_GRAPHICS = 0, + UVM_MMU_ENGINE_TYPE_HOST, + UVM_MMU_ENGINE_TYPE_CE, + UVM_MMU_ENGINE_TYPE_COUNT, +} uvm_mmu_engine_type_t; + +const char *uvm_mmu_engine_type_string(uvm_mmu_engine_type_t mmu_engine_type); + +// HW unit that triggered the fault. We include the fields required for fault cancelling. Including more information +// might be useful for performance heuristics in the future +typedef struct +{ + uvm_fault_client_type_t client_type : order_base_2(UVM_FAULT_CLIENT_TYPE_COUNT) + 1; + + uvm_mmu_engine_type_t mmu_engine_type : order_base_2(UVM_MMU_ENGINE_TYPE_COUNT) + 1; + + NvU16 client_id; + + NvU16 mmu_engine_id; + + union + { + struct + { + NvU16 utlb_id; + + NvU8 gpc_id; + }; + + // TODO: Bug 3283289: the channel ID, which is only populated for + // non-replayable faults, is never consumed. + NvU16 channel_id; + }; + + + // Identifier of the subcontext that caused the fault. HW uses it as an + // offset in the instance block to obtain the GPU VA space PDB of the + // faulting process. + NvU8 ve_id; +} uvm_fault_source_t; + +struct uvm_fault_buffer_entry_struct +{ + // + // The next fields are filled by the fault buffer parsing code + // + + // Virtual address of the faulting request aligned to CPU page size + NvU64 fault_address; + + // GPU timestamp in (nanoseconds) when the fault was inserted in the fault + // buffer + NvU64 timestamp; + + uvm_gpu_phys_address_t instance_ptr; + + uvm_fault_source_t fault_source; + + uvm_fault_type_t fault_type : order_base_2(UVM_FAULT_TYPE_COUNT) + 1; + + uvm_fault_access_type_t fault_access_type : order_base_2(UVM_FAULT_ACCESS_TYPE_COUNT) + 1; + + // + // The next fields are managed by the fault handling code + // + + uvm_va_space_t *va_space; + + // This is set to true when some fault could not be serviced and a + // cancel command needs to be issued + bool is_fatal : 1; + + // This is set to true for all GPU faults on a page that is thrashing + bool is_throttled : 1; + + // This is set to true if the fault has prefetch access type and the + // address or the access privileges are not valid + bool is_invalid_prefetch : 1; + + bool is_replayable : 1; + + bool is_virtual : 1; + + bool in_protected_mode : 1; + + bool filtered : 1; + + // Reason for the fault to be fatal + UvmEventFatalReason fatal_reason : order_base_2(UvmEventNumFatalReasons) + 1; + + // Mode to be used to cancel faults. This must be set according to the + // fatal fault reason and the fault access types of the merged fault + // instances. + union + { + struct + { + uvm_fault_cancel_va_mode_t cancel_va_mode : order_base_2(UVM_FAULT_CANCEL_VA_MODE_COUNT) + 1; + } replayable; + + struct + { + NvU32 buffer_index; + } non_replayable; + }; + + // List of duplicate fault buffer entries that have been merged into this + // one + struct list_head merged_instances_list; + + // Access types to this page for all accesses that have been coalesced at + // fetch time. It must include, at least, fault_access_type + NvU32 access_type_mask; + + // Number of faults with the same properties that have been coalesced at + // fetch time + NvU16 num_instances; +}; + +typedef enum +{ + // Completes when all fault replays are in-flight + UVM_FAULT_REPLAY_TYPE_START = 0, + + // Completes when all faulting accesses have been correctly translated or faulted again + UVM_FAULT_REPLAY_TYPE_START_ACK_ALL, + + UVM_FAULT_REPLAY_TYPE_MAX +} uvm_fault_replay_type_t; + +static uvm_membar_t uvm_membar_max(uvm_membar_t membar_1, uvm_membar_t membar_2) +{ + BUILD_BUG_ON(UVM_MEMBAR_NONE >= UVM_MEMBAR_GPU); + BUILD_BUG_ON(UVM_MEMBAR_GPU >= UVM_MEMBAR_SYS); + return max(membar_1, membar_2); +} + +typedef enum +{ + UVM_ACCESS_COUNTER_TYPE_MIMC = 0, + UVM_ACCESS_COUNTER_TYPE_MOMC, + + UVM_ACCESS_COUNTER_TYPE_MAX, +} uvm_access_counter_type_t; + +const char *uvm_access_counter_type_string(uvm_access_counter_type_t access_counter_type); + +struct uvm_access_counter_buffer_entry_struct +{ + // Whether this counter refers to outbound accesses to remote GPUs or + // sysmem (MIMC), or it refers to inbound accesses from CPU or a non-peer + // GPU (whose accesses are routed through the CPU, too) to vidmem (MOMC) + uvm_access_counter_type_t counter_type; + + // Address of the region for which a notification was sent + uvm_gpu_address_t address; + + // These fields are only valid if address.is_virtual is true + union + { + struct + { + // Instance pointer of one of the channels in the TSG that triggered the + // notification + uvm_gpu_phys_address_t instance_ptr; + + uvm_mmu_engine_type_t mmu_engine_type; + + NvU32 mmu_engine_id; + + // Identifier of the subcontext that performed the memory accesses that + // triggered the notification. This value, combined with the instance_ptr, + // is needed to obtain the GPU VA space of the process that triggered the + // notification. + NvU32 ve_id; + + // VA space for the address that triggered the notification + uvm_va_space_t *va_space; + } virtual_info; + + // These fields are only valid if address.is_virtual is false + struct + { + // Processor id where data is resident + // + // Although this information is not tied to a VA space, we can use + // a regular processor id because P2P is not allowed between + // partitioned GPUs. + uvm_processor_id_t resident_id; + } physical_info; + }; + + // Number of times the tracked region was accessed since the last time it + // was cleared. Counter values saturate at the maximum value supported by + // the GPU (2^16 - 1 in Volta) + NvU32 counter_value; + + // When the granularity of the tracked regions is greater than 64KB, the + // region is split into 32 equal subregions. Each bit in this field + // represents one of those subregions. 1 means that the subregion has been + // accessed + NvU32 sub_granularity; + + // Opaque fields provided by HW, required for targeted clear of a counter + NvU32 bank; + NvU32 tag; +}; + +static uvm_prot_t uvm_fault_access_type_to_prot(uvm_fault_access_type_t access_type) +{ + switch (access_type) { + case UVM_FAULT_ACCESS_TYPE_ATOMIC_STRONG: + return UVM_PROT_READ_WRITE_ATOMIC; + + case UVM_FAULT_ACCESS_TYPE_ATOMIC_WEAK: + case UVM_FAULT_ACCESS_TYPE_WRITE: + return UVM_PROT_READ_WRITE; + + default: + // Prefetch faults, if not ignored, are handled like read faults and require + // a mapping with, at least, READ_ONLY access permission + return UVM_PROT_READ_ONLY; + } +} + +#endif // __UVM_HAL_TYPES_H__ diff --git a/kernel-open/nvidia-uvm/uvm_hmm.c b/kernel-open/nvidia-uvm/uvm_hmm.c new file mode 100644 index 000000000..8c1937aa9 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_hmm.c @@ -0,0 +1,790 @@ +/******************************************************************************* + Copyright (c) 2016-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hmm.h" + +static bool uvm_disable_hmm = false; +module_param(uvm_disable_hmm, bool, 0444); +MODULE_PARM_DESC(uvm_disable_hmm, + "Force-disable HMM functionality in the UVM driver. " + "Default: false (i.e, HMM is potentially enabled). Ignored if " + "HMM is not supported in the driver, or if ATS settings " + "conflict with HMM."); + + +#if UVM_IS_CONFIG_HMM() + +#include +#include + +#include "uvm_common.h" +#include "uvm_gpu.h" +#include "uvm_va_block_types.h" +#include "uvm_va_space_mm.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" +#include "uvm_range_tree.h" +#include "uvm_lock.h" +#include "uvm_api.h" +#include "uvm_va_policy.h" + +bool uvm_hmm_is_enabled_system_wide(void) +{ + return !uvm_disable_hmm && !g_uvm_global.ats.enabled && uvm_va_space_mm_enabled_system(); +} + +bool uvm_hmm_is_enabled(uvm_va_space_t *va_space) +{ + // TODO: Bug 3351822: [UVM-HMM] Remove temporary testing changes. + return uvm_hmm_is_enabled_system_wide() && + uvm_va_space_mm_enabled(va_space) && + !(va_space->initialization_flags & UVM_INIT_FLAGS_DISABLE_HMM) && + !va_space->hmm.disable; +} + +static uvm_va_block_t *hmm_va_block_from_node(uvm_range_tree_node_t *node) +{ + if (!node) + return NULL; + return container_of(node, uvm_va_block_t, hmm.node); +} + +NV_STATUS uvm_hmm_va_space_initialize(uvm_va_space_t *va_space) +{ + struct mm_struct *mm = va_space->va_space_mm.mm; + + if (!uvm_hmm_is_enabled(va_space)) + return NV_OK; + + uvm_assert_mmap_lock_locked_write(mm); + uvm_assert_rwsem_locked_write(&va_space->lock); + + // TODO: Bug 3351822: [UVM-HMM] Remove temporary testing changes. + // Disable HMM by default for each va_space until enough functionality is + // implemented that this can be enabled by default. + // Note that it can be enabled for testing under controlled circumstances. + va_space->hmm.disable = true; + + return NV_OK; +} + +NV_STATUS uvm_hmm_va_space_initialize_test(uvm_va_space_t *va_space) +{ + uvm_hmm_va_space_t *hmm_va_space = &va_space->hmm; + struct mm_struct *mm = va_space->va_space_mm.mm; + int ret; + + if (!uvm_hmm_is_enabled_system_wide() || !mm) + return NV_WARN_NOTHING_TO_DO; + + uvm_assert_mmap_lock_locked_write(mm); + uvm_assert_rwsem_locked_write(&va_space->lock); + + // Temporarily enable HMM for testing. + va_space->hmm.disable = false; + + // Initialize MMU interval notifiers for this process. + // This allows mmu_interval_notifier_insert() to be called without holding + // the mmap_lock for write. + // Note: there is no __mmu_notifier_unregister(), this call just allocates + // memory which is attached to the mm_struct and freed when the mm_struct + // is freed. + ret = __mmu_notifier_register(NULL, mm); + if (ret) + return errno_to_nv_status(ret); + + uvm_range_tree_init(&hmm_va_space->blocks); + uvm_mutex_init(&hmm_va_space->blocks_lock, UVM_LOCK_ORDER_LEAF); + + return NV_OK; +} + +void uvm_hmm_va_space_destroy(uvm_va_space_t *va_space) +{ + uvm_hmm_va_space_t *hmm_va_space = &va_space->hmm; + uvm_range_tree_node_t *node, *next; + uvm_va_block_t *va_block; + + if (!uvm_hmm_is_enabled(va_space) || uvm_va_space_initialized(va_space) != NV_OK) + return; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + // The blocks_lock is not needed when the va_space lock is held for write. + uvm_range_tree_for_each_safe(node, next, &hmm_va_space->blocks) { + va_block = hmm_va_block_from_node(node); + uvm_range_tree_remove(&hmm_va_space->blocks, node); + mmu_interval_notifier_remove(&va_block->hmm.notifier); + uvm_va_block_kill(va_block); + } + + // TODO: Bug 3351822: [UVM-HMM] Remove temporary testing changes. + va_space->hmm.disable = true; +} + +static bool hmm_invalidate(uvm_va_block_t *va_block, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + struct mmu_interval_notifier *mni = &va_block->hmm.notifier; + NvU64 start, end; + + // The MMU_NOTIFY_RELEASE event isn't really needed since mn_itree_release() + // doesn't remove the interval notifiers from the struct_mm so there will + // be a full range MMU_NOTIFY_UNMAP event after the release from + // unmap_vmas() during exit_mmap(). + if (range->event == MMU_NOTIFY_SOFT_DIRTY || range->event == MMU_NOTIFY_RELEASE) + return true; + + // Blockable is only set false by + // mmu_notifier_invalidate_range_start_nonblock() which is only called in + // __oom_reap_task_mm(). + if (!mmu_notifier_range_blockable(range)) + return false; + + // Ignore invalidation callbacks for device private pages since the + // invalidation is handled as part of the migration process. + // Note that the va_space pointer won't be NULL if the callback is for + // MMU_NOTIFY_MIGRATE/MMU_NOTIFY_EXCLUSIVE because the va_block lock + // is already held and we have to prevent recursively getting the lock. + if ((range->event == MMU_NOTIFY_MIGRATE || range->event == MMU_NOTIFY_EXCLUSIVE) && + range->owner == va_block->hmm.va_space) + return true; + + uvm_mutex_lock(&va_block->lock); + + // Ignore this invalidation callback if the block is dead. + if (uvm_va_block_is_dead(va_block)) + goto unlock; + + mmu_interval_set_seq(mni, cur_seq); + + // Note: unmap_vmas() does MMU_NOTIFY_UNMAP [0, 0xffffffffffffffff] + start = range->start; + end = (range->end == ULONG_MAX) ? range->end : range->end - 1; + if (start < va_block->start) + start = va_block->start; + if (end > va_block->end) + end = va_block->end; + + if (range->event == MMU_NOTIFY_UNMAP) + uvm_va_policy_clear(va_block, start, end); + +unlock: + uvm_mutex_unlock(&va_block->lock); + + return true; +} + +static bool uvm_hmm_invalidate_entry(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + uvm_va_block_t *va_block = container_of(mni, uvm_va_block_t, hmm.notifier); + + UVM_ENTRY_RET(hmm_invalidate(va_block, range, cur_seq)); +} + +static const struct mmu_interval_notifier_ops uvm_hmm_notifier_ops = +{ + .invalidate = uvm_hmm_invalidate_entry, +}; + +NV_STATUS uvm_hmm_va_block_find(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_block_t **va_block_ptr) +{ + uvm_range_tree_node_t *node; + + if (!uvm_hmm_is_enabled(va_space)) + return NV_ERR_INVALID_ADDRESS; + + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + uvm_assert_rwsem_locked(&va_space->lock); + + uvm_mutex_lock(&va_space->hmm.blocks_lock); + node = uvm_range_tree_find(&va_space->hmm.blocks, addr); + uvm_mutex_unlock(&va_space->hmm.blocks_lock); + + if (!node) + return NV_ERR_OBJECT_NOT_FOUND; + + *va_block_ptr = hmm_va_block_from_node(node); + + return NV_OK; +} + +static bool uvm_hmm_vma_is_valid(struct vm_area_struct *vma, + unsigned long addr, + bool allow_unreadable_vma) +{ + // UVM doesn't support userfaultfd. hmm_range_fault() doesn't support + // VM_IO, VM_PFNMAP, or VM_MIXEDMAP VMAs. It also doesn't support + // VMAs without VM_READ but we allow those VMAs to have policy set on + // them. + return vma && + addr >= vma->vm_start && + !userfaultfd_armed(vma) && + !(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) && + (allow_unreadable_vma || (vma->vm_flags & VM_READ)); +} + +static NV_STATUS hmm_va_block_find_create(uvm_va_space_t *va_space, + NvU64 addr, + bool allow_unreadable_vma, + uvm_va_block_context_t *va_block_context, + uvm_va_block_t **va_block_ptr) +{ + struct mm_struct *mm = va_space->va_space_mm.mm; + struct vm_area_struct *vma; + uvm_va_block_t *va_block; + NvU64 start, end; + NV_STATUS status; + int ret; + + if (!uvm_hmm_is_enabled(va_space)) + return NV_ERR_INVALID_ADDRESS; + + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + UVM_ASSERT(mm); + uvm_assert_mmap_lock_locked(mm); + uvm_assert_rwsem_locked(&va_space->lock); + UVM_ASSERT(PAGE_ALIGNED(addr)); + + // Note that we have to allow PROT_NONE VMAs so that policies can be set. + vma = find_vma(mm, addr); + if (!uvm_hmm_vma_is_valid(vma, addr, allow_unreadable_vma)) + return NV_ERR_INVALID_ADDRESS; + + // Since we only hold the va_space read lock, there can be multiple + // parallel va_block insertions. + uvm_mutex_lock(&va_space->hmm.blocks_lock); + + va_block = hmm_va_block_from_node(uvm_range_tree_find(&va_space->hmm.blocks, addr)); + if (va_block) + goto done; + + // The va_block is always created to cover the whole aligned + // UVM_VA_BLOCK_SIZE interval unless there are existing UVM va_ranges or + // HMM va_blocks. In that case, the new HMM va_block size is adjusted so it + // doesn't overlap. + start = UVM_VA_BLOCK_ALIGN_DOWN(addr); + end = start + UVM_VA_BLOCK_SIZE - 1; + + // Search for existing UVM va_ranges in the start/end interval and create + // a maximum interval that doesn't overlap any existing UVM va_ranges. + // We know that 'addr' is not within a va_range or + // hmm_va_block_find_create() wouldn't be called. + uvm_range_tree_adjust_interval(&va_space->va_range_tree, addr, &start, &end); + + // Search for existing HMM va_blocks in the start/end interval and create + // a maximum interval that doesn't overlap any existing HMM va_blocks. + uvm_range_tree_adjust_interval(&va_space->hmm.blocks, addr, &start, &end); + + // Create a HMM va_block with a NULL va_range pointer. + status = uvm_va_block_create(NULL, start, end, &va_block); + if (status != NV_OK) + goto err_unlock; + + va_block->hmm.node.start = start; + va_block->hmm.node.end = end; + va_block->hmm.va_space = va_space; + uvm_range_tree_init(&va_block->hmm.va_policy_tree); + + ret = mmu_interval_notifier_insert(&va_block->hmm.notifier, + mm, + start, + end - start + 1, + &uvm_hmm_notifier_ops); + if (ret) { + status = errno_to_nv_status(ret); + goto err_release; + } + + status = uvm_range_tree_add(&va_space->hmm.blocks, &va_block->hmm.node); + if (status != NV_OK) { + UVM_ASSERT(status != NV_ERR_UVM_ADDRESS_IN_USE); + goto err_unreg; + } + +done: + uvm_mutex_unlock(&va_space->hmm.blocks_lock); + if (va_block_context) + va_block_context->hmm.vma = vma; + *va_block_ptr = va_block; + return NV_OK; + +err_unreg: + mmu_interval_notifier_remove(&va_block->hmm.notifier); + +err_release: + uvm_va_block_release(va_block); + +err_unlock: + uvm_mutex_unlock(&va_space->hmm.blocks_lock); + return status; +} + +NV_STATUS uvm_hmm_va_block_find_create(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_block_context_t *va_block_context, + uvm_va_block_t **va_block_ptr) +{ + return hmm_va_block_find_create(va_space, addr, false, va_block_context, va_block_ptr); +} + +typedef struct { + struct mmu_interval_notifier notifier; + uvm_va_block_t *existing_block; + uvm_va_block_t *new_block; +} hmm_split_invalidate_data_t; + +static bool hmm_split_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + hmm_split_invalidate_data_t *split_data = container_of(mni, hmm_split_invalidate_data_t, notifier); + uvm_va_block_t *existing_block = split_data->existing_block; + uvm_va_block_t *new_block = split_data->new_block; + + if (uvm_ranges_overlap(existing_block->start, existing_block->end, range->start, range->end - 1)) + hmm_invalidate(existing_block, range, cur_seq); + + if (uvm_ranges_overlap(new_block->start, new_block->end, range->start, range->end - 1)) + hmm_invalidate(new_block, range, cur_seq); + + return true; +} + +static bool hmm_split_invalidate_entry(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + UVM_ENTRY_RET(hmm_split_invalidate(mni, range, cur_seq)); +} + +static const struct mmu_interval_notifier_ops hmm_notifier_split_ops = +{ + .invalidate = hmm_split_invalidate_entry, +}; + +// Splits existing va_block into two pieces, with new_va_block always after +// va_block. va_block is updated to have new_end. new_end+1 must be page- +// aligned. +// +// Before: [----------- existing ------------] +// After: [---- existing ----][---- new ----] +// ^new_end +// +// On error, va_block is still accessible and is left in its original +// functional state. +static NV_STATUS hmm_split_block(uvm_va_block_t *va_block, + NvU64 new_end, + uvm_va_block_t **new_block_ptr) +{ + uvm_va_space_t *va_space = va_block->hmm.va_space; + struct mm_struct *mm = va_space->va_space_mm.mm; + hmm_split_invalidate_data_t split_data; + uvm_va_block_t *new_va_block; + NV_STATUS status; + int ret; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + UVM_ASSERT(new_end > va_block->start); + UVM_ASSERT(new_end < va_block->end); + UVM_ASSERT(PAGE_ALIGNED(new_end + 1)); + + status = uvm_va_block_create(NULL, new_end + 1, va_block->end, &new_va_block); + if (status != NV_OK) + return status; + + // Initialize the newly created HMM va_block. + new_va_block->hmm.va_space = va_space; + uvm_range_tree_init(&new_va_block->hmm.va_policy_tree); + + // The MMU interval notifier has to be removed in order to resize it. + // That means there would be a window of time where invalidation callbacks + // could be missed. To handle this case, we register a temporary notifier + // to cover the same address range while resizing the old notifier (it is + // OK to have multiple notifiers for the same range, we may simply try to + // invalidate twice). + split_data.existing_block = va_block; + split_data.new_block = new_va_block; + ret = mmu_interval_notifier_insert(&split_data.notifier, + mm, + va_block->start, + new_va_block->end - va_block->start + 1, + &hmm_notifier_split_ops); + + uvm_mutex_lock(&va_block->lock); + + status = uvm_va_block_split_locked(va_block, new_end, new_va_block, NULL); + if (status != NV_OK) + goto err; + + uvm_mutex_unlock(&va_block->lock); + + // Since __mmu_notifier_register() was called when the va_space was + // initially created, we know that mm->notifier_subscriptions is valid + // and mmu_interval_notifier_insert() can't return ENOMEM. + // The only error return is for start + length overflowing but we already + // registered the same address range before so there should be no error. + UVM_ASSERT(!ret); + + mmu_interval_notifier_remove(&va_block->hmm.notifier); + + uvm_range_tree_shrink_node(&va_space->hmm.blocks, &va_block->hmm.node, va_block->start, va_block->end); + + // Enable notifications on the old block with the smaller size. + ret = mmu_interval_notifier_insert(&va_block->hmm.notifier, + mm, + va_block->start, + va_block->end - va_block->start + 1, + &uvm_hmm_notifier_ops); + UVM_ASSERT(!ret); + + new_va_block->hmm.node.start = new_va_block->start; + new_va_block->hmm.node.end = new_va_block->end; + + ret = mmu_interval_notifier_insert(&new_va_block->hmm.notifier, + mm, + new_va_block->start, + new_va_block->end - new_va_block->start + 1, + &uvm_hmm_notifier_ops); + UVM_ASSERT(!ret); + + mmu_interval_notifier_remove(&split_data.notifier); + + status = uvm_range_tree_add(&va_space->hmm.blocks, &new_va_block->hmm.node); + UVM_ASSERT(status == NV_OK); + + if (new_block_ptr) + *new_block_ptr = new_va_block; + + return status; + +err: + uvm_mutex_unlock(&va_block->lock); + mmu_interval_notifier_remove(&split_data.notifier); + uvm_va_block_release(new_va_block); + return status; +} + +// Check to see if the HMM va_block would overlap the range start/end and +// split it so it can be removed. That breaks down to the following cases: +// start/end could cover all of the HMM va_block -> +// remove the va_block +// start/end could cover the left part of the HMM va_block -> +// remove the left part +// start/end could cover the right part of the HMM va_block -> +// remove the right part +// or start/end could "punch a hole" in the middle and leave the ends intact. +// In each case, only one HMM va_block is removed so return it in out_va_block. +static NV_STATUS split_block_if_needed(uvm_va_block_t *va_block, + NvU64 start, + NvU64 end, + uvm_va_block_t **out_va_block) +{ + uvm_va_block_t *new; + NV_STATUS status; + + if (va_block->start < start) { + status = hmm_split_block(va_block, start - 1, &new); + if (status != NV_OK) + return status; + + // Keep the left part, the right part will be deleted. + va_block = new; + } + + if (va_block->end > end) { + status = hmm_split_block(va_block, end, NULL); + if (status != NV_OK) + return status; + + // Keep the right part, the left part will be deleted. + } + + *out_va_block = va_block; + + return NV_OK; +} + +// Normally, the HMM va_block is destroyed when the va_space is destroyed +// (i.e., when the /dev/nvidia-uvm device is closed). A munmap() call triggers +// a uvm_hmm_invalidate() callback which unmaps the VMA's range from the GPU's +// page tables. However, it doesn't destroy the va_block because that would +// require calling mmu_interval_notifier_remove() which can't be called from +// the invalidate callback due to Linux locking constraints. If a process +// calls mmap()/munmap() for SAM and then creates a UVM managed allocation, +// the same VMA range can be picked and there would be a UVM/HMM va_block +// conflict. Creating a UVM managed allocation (or other va_range) calls this +// function to remove stale HMM va_blocks or split the HMM va_block so there +// is no overlap. +NV_STATUS uvm_hmm_va_block_reclaim(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 end) +{ + uvm_range_tree_node_t *node, *next; + uvm_va_block_t *va_block; + NV_STATUS status; + + if (!uvm_hmm_is_enabled(va_space)) + return NV_OK; + + if (mm) { + UVM_ASSERT(mm == va_space->va_space_mm.mm); + uvm_assert_mmap_lock_locked(mm); + } + uvm_assert_rwsem_locked_write(&va_space->lock); + + // Process each HMM va_block that overlaps the interval [start, end]. + // Note that end is inclusive. + // The blocks_lock is not needed when the va_space lock is held for write. + uvm_range_tree_for_each_in_safe(node, next, &va_space->hmm.blocks, start, end) { + va_block = hmm_va_block_from_node(node); + + if (mm) { + status = split_block_if_needed(va_block, start, end, &va_block); + if (status != NV_OK) + return status; + } + + // Note that this waits for any invalidations callbacks to complete + // so uvm_hmm_invalidate() won't see a block disapear. + // The va_space write lock should prevent uvm_hmm_va_block_find_create() + // from adding it back. + mmu_interval_notifier_remove(&va_block->hmm.notifier); + uvm_range_tree_remove(&va_space->hmm.blocks, &va_block->hmm.node); + uvm_va_block_kill(va_block); + } + + UVM_ASSERT(!uvm_range_tree_iter_first(&va_space->hmm.blocks, start, end)); + + return NV_OK; +} + +NV_STATUS uvm_hmm_split_as_needed(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_policy_is_split_needed_t split_needed_cb, + void *data) +{ + uvm_va_block_t *va_block; + uvm_va_policy_node_t *node; + NV_STATUS status; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + // If there is no HMM va_block or the va_block doesn't span the policy + // addr, there is no need to split. + status = uvm_hmm_va_block_find(va_space, addr, &va_block); + if (status != NV_OK || va_block->start == addr) + return NV_OK; + + uvm_mutex_lock(&va_block->lock); + + node = uvm_va_policy_node_find(va_block, addr); + if (!node) + goto done; + + // If the policy range doesn't span addr, we're done. + if (addr == node->node.start) + goto done; + + if (split_needed_cb(&node->policy, data)) + status = uvm_va_policy_node_split(va_block, node, addr - 1, NULL); + +done: + uvm_mutex_unlock(&va_block->lock); + return status; +} + +NV_STATUS uvm_hmm_set_preferred_location(uvm_va_space_t *va_space, + uvm_processor_id_t preferred_location, + NvU64 base, + NvU64 last_address) +{ + bool is_default = UVM_ID_IS_INVALID(preferred_location); + uvm_va_block_t *va_block; + NvU64 addr; + NV_STATUS status = NV_OK; + + if (!uvm_hmm_is_enabled(va_space)) + return NV_ERR_INVALID_ADDRESS; + + uvm_assert_mmap_lock_locked(va_space->va_space_mm.mm); + uvm_assert_rwsem_locked_write(&va_space->lock); + UVM_ASSERT(PAGE_ALIGNED(base)); + UVM_ASSERT(PAGE_ALIGNED(last_address + 1)); + UVM_ASSERT(base < last_address); + + // Update HMM preferred location policy. + + for (addr = base; addr < last_address; addr = va_block->end + 1) { + NvU64 end; + + status = hmm_va_block_find_create(va_space, addr, true, NULL, &va_block); + if (status != NV_OK) + break; + + end = min(last_address, va_block->end); + + uvm_mutex_lock(&va_block->lock); + + status = uvm_va_policy_set_range(va_block, + addr, + end, + UVM_VA_POLICY_PREFERRED_LOCATION, + is_default, + preferred_location, + UVM_READ_DUPLICATION_MAX); + + // TODO: Bug 1750144: unset requires re-evaluating accessed-by mappings + // (see uvm_va_range_set_preferred_location's call of + // uvm_va_block_set_accessed_by), and set requires unmapping remote + // mappings (uvm_va_block_set_preferred_location_locked). + + uvm_mutex_unlock(&va_block->lock); + + if (status != NV_OK) + break; + } + + return status; +} + +NV_STATUS uvm_hmm_set_accessed_by(uvm_va_space_t *va_space, + uvm_processor_id_t processor_id, + bool set_bit, + NvU64 base, + NvU64 last_address) +{ + uvm_va_block_t *va_block; + NvU64 addr; + NV_STATUS status = NV_OK; + + if (!uvm_hmm_is_enabled(va_space)) + return NV_ERR_INVALID_ADDRESS; + + uvm_assert_mmap_lock_locked(va_space->va_space_mm.mm); + uvm_assert_rwsem_locked_write(&va_space->lock); + UVM_ASSERT(PAGE_ALIGNED(base)); + UVM_ASSERT(PAGE_ALIGNED(last_address + 1)); + UVM_ASSERT(base < last_address); + + // Update HMM accessed by policy. + + for (addr = base; addr < last_address; addr = va_block->end + 1) { + NvU64 end; + + status = hmm_va_block_find_create(va_space, addr, true, NULL, &va_block); + if (status != NV_OK) + break; + + end = min(last_address, va_block->end); + + uvm_mutex_lock(&va_block->lock); + + status = uvm_va_policy_set_range(va_block, + addr, + end, + UVM_VA_POLICY_ACCESSED_BY, + !set_bit, + processor_id, + UVM_READ_DUPLICATION_MAX); + + // TODO: Bug 1750144: need to call va_block_set_accessed_by_locked() + // if read duplication isn't enabled. + + uvm_mutex_unlock(&va_block->lock); + + if (status != NV_OK) + break; + } + + return status; +} + +void uvm_hmm_find_policy_end(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + unsigned long addr, + NvU64 *endp) +{ + struct vm_area_struct *vma = va_block_context->hmm.vma; + uvm_va_policy_node_t *node; + NvU64 end = *endp; + + uvm_assert_mmap_lock_locked(vma->vm_mm); + uvm_assert_mutex_locked(&va_block->lock); + + if (end > vma->vm_end - 1) + end = vma->vm_end - 1; + + node = uvm_va_policy_node_find(va_block, addr); + if (node) { + va_block_context->policy = &node->policy; + if (end > node->node.end) + end = node->node.end; + } + else + va_block_context->policy = &uvm_va_policy_default; + + *endp = end; +} + +NV_STATUS uvm_hmm_find_policy_vma_and_outer(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_page_index_t page_index, + uvm_page_index_t *outerp) +{ + struct vm_area_struct *vma; + unsigned long addr; + NvU64 end = va_block->end; + uvm_page_index_t outer; + + UVM_ASSERT(uvm_va_block_is_hmm(va_block)); + uvm_assert_mmap_lock_locked(va_block_context->mm); + uvm_assert_mutex_locked(&va_block->lock); + + addr = uvm_va_block_cpu_page_address(va_block, page_index); + + vma = vma_lookup(va_block_context->mm, addr); + if (!vma || !(vma->vm_flags & VM_READ)) + return NV_ERR_INVALID_ADDRESS; + + va_block_context->hmm.vma = vma; + + uvm_hmm_find_policy_end(va_block, va_block_context, addr, &end); + + outer = uvm_va_block_cpu_page_index(va_block, end) + 1; + if (*outerp > outer) + *outerp = outer; + + return NV_OK; +} + +#endif // UVM_IS_CONFIG_HMM() + + diff --git a/kernel-open/nvidia-uvm/uvm_hmm.h b/kernel-open/nvidia-uvm/uvm_hmm.h new file mode 100644 index 000000000..88c63e975 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_hmm.h @@ -0,0 +1,287 @@ +/******************************************************************************* + Copyright (c) 2016-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _UVM_HMM_H_ +#define _UVM_HMM_H_ + +#include "nvtypes.h" +#include "uvm_forward_decl.h" +#include "uvm_va_block_types.h" +#include "uvm_va_policy.h" +#include "uvm_linux.h" +#include "uvm_range_tree.h" +#include "uvm_lock.h" + +typedef struct +{ + // This stores pointers to uvm_va_block_t for HMM blocks. + uvm_range_tree_t blocks; + uvm_mutex_t blocks_lock; + + // TODO: Bug 3351822: [UVM-HMM] Remove temporary testing changes. + // This flag is set true by default for each va_space so most processes + // don't see partially implemented UVM-HMM behavior but can be enabled by + // test code for a given va_space so the test process can do some interim + // testing. It needs to be a separate flag instead of modifying + // uvm_disable_hmm or va_space->flags since those are user inputs and are + // visible/checked by test code. + // Remove this when UVM-HMM is fully integrated into chips_a. + bool disable; +} uvm_hmm_va_space_t; + +#if UVM_IS_CONFIG_HMM() + // Tells whether HMM is enabled for the given va_space. + // If it is not enabled, all of the functions below are no-ops. + bool uvm_hmm_is_enabled(uvm_va_space_t *va_space); + + // Self-explanatory name: reports if HMM is enabled system-wide. + bool uvm_hmm_is_enabled_system_wide(void); + + // Initialize HMM for the given the va_space. + // Locking: the va_space->va_space_mm.mm mmap_lock must be write locked + // and the va_space lock must be held in write mode. + NV_STATUS uvm_hmm_va_space_initialize(uvm_va_space_t *va_space); + + // Initialize HMM for the given the va_space for testing. + // Bug 1750144: UVM: Add HMM (Heterogeneous Memory Management) support to + // the UVM driver. Remove this when enough HMM functionality is implemented. + NV_STATUS uvm_hmm_va_space_initialize_test(uvm_va_space_t *va_space); + + // Destroy any HMM state for the given the va_space. + // Locking: va_space lock must be held in write mode. + void uvm_hmm_va_space_destroy(uvm_va_space_t *va_space); + + // Find an existing HMM va_block. + // This function can be called without having retained and locked the mm, + // but in that case, the only allowed operations on the returned block are + // locking the block, reading its state, and performing eviction. GPU fault + // handling and user-initiated migrations are not allowed. + // Return values are the same as uvm_va_block_find(). + // Locking: This must be called with va_space lock held in at least read + // mode. + NV_STATUS uvm_hmm_va_block_find(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_block_t **va_block_ptr); + + // Find or create a new HMM va_block. + // + // Return NV_ERR_INVALID_ADDRESS if there is no VMA associated with the + // address 'addr' or the VMA does not have at least PROT_READ permission. + // Locking: This function must be called with mm retained and locked for + // at least read and the va_space lock at least for read. + NV_STATUS uvm_hmm_va_block_find_create(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_block_context_t *va_block_context, + uvm_va_block_t **va_block_ptr); + + // Reclaim any HMM va_blocks that overlap the given range. + // Note that 'end' is inclusive. + // A HMM va_block can be reclaimed if it doesn't contain any "valid" VMAs. + // See uvm_hmm_vma_is_valid() for details. + // Return values: + // NV_ERR_NO_MEMORY: Reclaim required a block split, which failed. + // NV_OK: There were no HMM blocks in the range, or all HMM + // blocks in the range were successfully reclaimed. + // Locking: If mm is not NULL, it must equal va_space_mm.mm, the caller + // must hold a reference on it, and it must be locked for at least read + // mode. Also, the va_space lock must be held in write mode. + // TODO: Bug 3372166: add asynchronous va_block reclaim. + NV_STATUS uvm_hmm_va_block_reclaim(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 end); + + // Find a HMM policy range that needs to be split. The callback function + // 'split_needed_cb' returns true if the policy range needs to be split. + // If a policy range is split, the existing range is truncated to + // [existing_start, addr - 1] and a new policy node with the same policy + // values as the existing node is created covering [addr, existing_end]. + // Before: [----------- existing ------------] + // After: [---- existing ----][---- new ----] + // ^addr + // Locking: the va_space must be write locked. + NV_STATUS uvm_hmm_split_as_needed(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_policy_is_split_needed_t split_needed_cb, + void *data); + + // Set the preferred location policy for the given range. + // Note that 'last_address' is inclusive. + // Locking: the va_space->va_space_mm.mm mmap_lock must be locked + // and the va_space lock must be held in write mode. + NV_STATUS uvm_hmm_set_preferred_location(uvm_va_space_t *va_space, + uvm_processor_id_t preferred_location, + NvU64 base, + NvU64 last_address); + + // Set the accessed by policy for the given range. This also tries to + // map the range. Note that 'last_address' is inclusive. + // Locking: the va_space->va_space_mm.mm mmap_lock must be locked + // and the va_space lock must be held in write mode. + NV_STATUS uvm_hmm_set_accessed_by(uvm_va_space_t *va_space, + uvm_processor_id_t processor_id, + bool set_bit, + NvU64 base, + NvU64 last_address); + + // Set the read duplication policy for the given range. + // Note that 'last_address' is inclusive. + // Locking: the va_space->va_space_mm.mm mmap_lock must be write locked + // and the va_space lock must be held in write mode. + // TODO: Bug 2046423: need to implement read duplication support in Linux. + static NV_STATUS uvm_hmm_set_read_duplication(uvm_va_space_t *va_space, + uvm_read_duplication_policy_t new_policy, + NvU64 base, + NvU64 last_address) + { + if (!uvm_hmm_is_enabled(va_space)) + return NV_ERR_INVALID_ADDRESS; + return NV_OK; + } + + // Set va_block_context->policy to the policy covering the given address + // 'addr' and update the ending address '*endp' to the minimum of *endp, + // va_block_context->hmm.vma->vm_end - 1, and the ending address of the + // policy range. + // Locking: This function must be called with + // va_block_context->hmm.vma->vm_mm retained and locked for least read and + // the va_block lock held. + void uvm_hmm_find_policy_end(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + unsigned long addr, + NvU64 *endp); + + // Find the VMA for the page index 'page_index', + // set va_block_context->policy to the policy covering the given address, + // and update the ending page range '*outerp' to the minimum of *outerp, + // va_block_context->hmm.vma->vm_end - 1, and the ending address of the + // policy range. + // Return NV_ERR_INVALID_ADDRESS if no VMA is found; otherwise, NV_OK. + // Locking: This function must be called with + // va_block_context->hmm.vma->vm_mm retained and locked for least read and + // the va_block lock held. + NV_STATUS uvm_hmm_find_policy_vma_and_outer(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_page_index_t page_index, + uvm_page_index_t *outerp); + +#else // UVM_IS_CONFIG_HMM() + + static bool uvm_hmm_is_enabled(uvm_va_space_t *va_space) + { + return false; + } + + static bool uvm_hmm_is_enabled_system_wide(void) + { + return false; + } + + static NV_STATUS uvm_hmm_va_space_initialize(uvm_va_space_t *va_space) + { + return NV_OK; + } + + static NV_STATUS uvm_hmm_va_space_initialize_test(uvm_va_space_t *va_space) + { + return NV_WARN_NOTHING_TO_DO; + } + + static void uvm_hmm_va_space_destroy(uvm_va_space_t *va_space) + { + } + + static NV_STATUS uvm_hmm_va_block_find(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_block_t **va_block_ptr) + { + return NV_ERR_INVALID_ADDRESS; + } + + static NV_STATUS uvm_hmm_va_block_find_create(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_block_context_t *va_block_context, + uvm_va_block_t **va_block_ptr) + { + return NV_ERR_INVALID_ADDRESS; + } + + static NV_STATUS uvm_hmm_va_block_reclaim(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 end) + { + return NV_OK; + } + + static NV_STATUS uvm_hmm_split_as_needed(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_policy_is_split_needed_t split_needed_cb, + void *data) + { + return NV_OK; + } + + static NV_STATUS uvm_hmm_set_preferred_location(uvm_va_space_t *va_space, + uvm_processor_id_t preferred_location, + NvU64 base, + NvU64 last_address) + { + return NV_ERR_INVALID_ADDRESS; + } + + static NV_STATUS uvm_hmm_set_accessed_by(uvm_va_space_t *va_space, + uvm_processor_id_t processor_id, + bool set_bit, + NvU64 base, + NvU64 last_address) + { + return NV_ERR_INVALID_ADDRESS; + } + + static NV_STATUS uvm_hmm_set_read_duplication(uvm_va_space_t *va_space, + uvm_read_duplication_policy_t new_policy, + NvU64 base, + NvU64 last_address) + { + return NV_ERR_INVALID_ADDRESS; + } + + static void uvm_hmm_find_policy_end(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + unsigned long addr, + NvU64 *endp) + { + } + + static NV_STATUS uvm_hmm_find_policy_vma_and_outer(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_page_index_t page_index, + uvm_page_index_t *outerp) + { + return NV_OK; + } + +#endif // UVM_IS_CONFIG_HMM() + +#endif // _UVM_HMM_H_ diff --git a/kernel-open/nvidia-uvm/uvm_hmm_sanity_test.c b/kernel-open/nvidia-uvm/uvm_hmm_sanity_test.c new file mode 100644 index 000000000..1548e2d2b --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_hmm_sanity_test.c @@ -0,0 +1,90 @@ +/******************************************************************************* + Copyright (c) 2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_test.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" +#include "uvm_hmm.h" + +NV_STATUS uvm_test_hmm_sanity(UVM_TEST_HMM_SANITY_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + struct mm_struct *mm; + uvm_va_block_t *hmm_block = NULL; + NV_STATUS status; + + mm = uvm_va_space_mm_retain(va_space); + if (!mm) + return NV_WARN_NOTHING_TO_DO; + + uvm_down_write_mmap_lock(mm); + uvm_va_space_down_write(va_space); + + // TODO: Bug 3351822: [UVM-HMM] Remove temporary testing changes. + // By default, HMM is enabled system wide but disabled per va_space. + // This will initialize the va_space for HMM. + status = uvm_hmm_va_space_initialize_test(va_space); + if (status != NV_OK) + goto out; + + uvm_va_space_up_write(va_space); + uvm_up_write_mmap_lock(mm); + + uvm_down_read_mmap_lock(mm); + uvm_va_space_down_read(va_space); + + // Try to create an HMM va_block to virtual address zero (NULL). + // It should fail. There should be no VMA but a va_block for range + // [0x0 0x1fffff] is possible. + status = uvm_hmm_va_block_find_create(va_space, 0UL, NULL, &hmm_block); + TEST_CHECK_GOTO(status == NV_ERR_INVALID_ADDRESS, done); + + // Try to create an HMM va_block which overlaps a UVM managed block. + // It should fail. + status = uvm_hmm_va_block_find_create(va_space, params->uvm_address, NULL, &hmm_block); + TEST_CHECK_GOTO(status == NV_ERR_INVALID_ADDRESS, done); + + // Try to create an HMM va_block; it should succeed. + status = uvm_hmm_va_block_find_create(va_space, params->hmm_address, NULL, &hmm_block); + TEST_CHECK_GOTO(status == NV_OK, done); + + // Try to find an existing HMM va_block; it should succeed. + status = uvm_hmm_va_block_find(va_space, params->hmm_address, &hmm_block); + TEST_CHECK_GOTO(status == NV_OK, done); + +done: + uvm_va_space_up_read(va_space); + uvm_up_read_mmap_lock(mm); + uvm_va_space_mm_release(va_space); + + return status; + +out: + uvm_va_space_up_write(va_space); + uvm_up_write_mmap_lock(mm); + uvm_va_space_mm_release(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_host_test.c b/kernel-open/nvidia-uvm/uvm_host_test.c new file mode 100644 index 000000000..adee750e7 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_host_test.c @@ -0,0 +1,296 @@ +/******************************************************************************* + Copyright (c) 2020-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_global.h" +#include "uvm_common.h" +#include "uvm_hal.h" +#include "uvm_push.h" +#include "uvm_test.h" +#include "uvm_va_space.h" +#include "uvm_mem.h" +#include "uvm_rm_mem.h" + +typedef struct test_sem_mem_t { + void *cpu_va; + NvU64 gpu_va; + + union { + uvm_mem_t *uvm_mem; + uvm_rm_mem_t *rm_mem; + }; +} test_sem_mem; + +static NV_STATUS test_semaphore_alloc_uvm_rm_mem(uvm_gpu_t *gpu, const size_t size, test_sem_mem *mem_out) +{ + NV_STATUS status; + uvm_rm_mem_t *mem = NULL; + NvU64 gpu_va; + + status = uvm_rm_mem_alloc_and_map_cpu(gpu, UVM_RM_MEM_TYPE_SYS, size, &mem); + TEST_NV_CHECK_RET(status); + + gpu_va = uvm_rm_mem_get_gpu_uvm_va(mem, gpu); + TEST_CHECK_GOTO(gpu_va < gpu->parent->max_host_va, error); + + mem_out->cpu_va = uvm_rm_mem_get_cpu_va(mem); + mem_out->gpu_va = gpu_va; + mem_out->rm_mem = mem; + + return NV_OK; + +error: + uvm_rm_mem_free(mem); + return status; +} + +static NV_STATUS test_semaphore_alloc_sem(uvm_gpu_t *gpu, const size_t size, test_sem_mem *mem_out) +{ + NV_STATUS status = NV_OK; + uvm_mem_t *mem = NULL; + NvU64 gpu_va; + + TEST_NV_CHECK_RET(uvm_mem_alloc_sysmem(size, current->mm, &mem)); + + TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(mem, gpu), error); + gpu_va = uvm_mem_get_gpu_va_kernel(mem, gpu); + + // Use an RM allocation when Host cannot address the semaphore. + if (gpu_va >= gpu->parent->max_host_va) { + uvm_mem_free(mem); + return test_semaphore_alloc_uvm_rm_mem(gpu, size, mem_out); + } + + // This semaphore resides in the uvm_mem region, i.e., it has the GPU VA + // MSbit set. The intent is to validate semaphore operations when the + // semaphore's VA is in the high-end of the GPU effective virtual address + // space spectrum, i.e., its VA upper-bit is set. + TEST_CHECK_GOTO(gpu_va & (1ULL << (gpu->address_space_tree.hal->num_va_bits() - 1)), error); + + TEST_NV_CHECK_GOTO(uvm_mem_map_cpu_kernel(mem), error); + + mem_out->cpu_va = uvm_mem_get_cpu_addr_kernel(mem); + mem_out->gpu_va = gpu_va; + mem_out->uvm_mem = mem; + + return NV_OK; + +error: + uvm_mem_free(mem); + return status; +} + +static void test_semaphore_free_sem(uvm_gpu_t *gpu, test_sem_mem *mem) +{ + if (mem->gpu_va >= gpu->parent->uvm_mem_va_base) + uvm_mem_free(mem->uvm_mem); + else + uvm_rm_mem_free(mem->rm_mem); +} + +// This test is similar to the test_semaphore_release() test in uvm_ce_test.c, +// except that this one uses host_hal->semaphore_release(); +static NV_STATUS test_semaphore_release(uvm_gpu_t *gpu) +{ + NV_STATUS status; + test_sem_mem mem = { 0 }; + uvm_push_t push; + NvU32 value; + NvU32 payload = 0xA5A55A5A; + NvU32 *cpu_ptr; + + // Semaphore release needs 1 word (4 bytes). + const size_t size = sizeof(NvU32); + + status = test_semaphore_alloc_sem(gpu, size, &mem); + TEST_NV_CHECK_RET(status); + + // Initialize the payload. + cpu_ptr = (NvU32 *)mem.cpu_va; + *cpu_ptr = 0; + + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, "semaphore_release test"); + TEST_NV_CHECK_GOTO(status, done); + + gpu->parent->host_hal->semaphore_release(&push, mem.gpu_va, payload); + + status = uvm_push_end_and_wait(&push); + TEST_NV_CHECK_GOTO(status, done); + + value = *cpu_ptr; + if (value != payload) { + UVM_TEST_PRINT("Semaphore payload = %u instead of %u, GPU %s\n", value, payload, uvm_gpu_name(gpu)); + status = NV_ERR_INVALID_STATE; + goto done; + } + +done: + test_semaphore_free_sem(gpu, &mem); + + return status; +} + +static NV_STATUS test_semaphore_acquire(uvm_gpu_t *gpu) +{ + NV_STATUS status; + test_sem_mem mem = { 0 }; + uvm_push_t push; + uvm_spin_loop_t spin; + NvU32 *cpu_ptr, *cpu_sema_A, *cpu_sema_B, *cpu_sema_C; + NvU64 gpu_sema_va_A, gpu_sema_va_B, gpu_sema_va_C; + bool check_sema_C; + + // The semaphore is one word long(4 bytes), we use three semaphores. + const size_t sema_size = 4; + const size_t size = sema_size * 3; + + status = test_semaphore_alloc_sem(gpu, size, &mem); + TEST_NV_CHECK_RET(status); + + gpu_sema_va_A = mem.gpu_va; + gpu_sema_va_B = mem.gpu_va + sema_size; + gpu_sema_va_C = mem.gpu_va + 2 * sema_size; + + cpu_ptr = (NvU32 *)mem.cpu_va; + memset(cpu_ptr, 0, size); + cpu_sema_A = cpu_ptr; + cpu_sema_B = cpu_ptr + 1; + cpu_sema_C = cpu_ptr + 2; + + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, "semaphore_acquire test"); + TEST_NV_CHECK_GOTO(status, done); + + gpu->parent->host_hal->semaphore_release(&push, gpu_sema_va_A, 1); + gpu->parent->host_hal->semaphore_acquire(&push, gpu_sema_va_B, 1); + gpu->parent->host_hal->semaphore_release(&push, gpu_sema_va_C, 1); + + uvm_push_end(&push); + + // Wait for sema_A release. + UVM_SPIN_WHILE(UVM_READ_ONCE(*cpu_sema_A) != 1, &spin); + + // Sleep for 10ms, the GPU waits while sema_B is held by us. + msleep(10); + + check_sema_C = UVM_READ_ONCE(*cpu_sema_C) == 0; + + // memory fence/barrier, check comment in + // uvm_gpu_semaphore.c:uvm_gpu_semaphore_set_payload() for details. + mb(); + + // Release sema_B. + UVM_WRITE_ONCE(*cpu_sema_B, 1); + + // Wait for the GPU to release sema_C, i.e., the end of the push. + status = uvm_push_wait(&push); + TEST_CHECK_GOTO(status == NV_OK, done); + + // check_sema_C is validated here to ensure the push has ended and was not + // interrupted in the middle, had the check failed. + TEST_CHECK_GOTO(check_sema_C, done); + TEST_CHECK_GOTO(UVM_READ_ONCE(*cpu_sema_C) == 1, done); + +done: + test_semaphore_free_sem(gpu, &mem); + + return status; +} + +// This test is similar to the test_semaphore_timestamp() test in +// uvm_ce_test.c, except that this one uses host_hal->semaphore_timestamp(); +static NV_STATUS test_semaphore_timestamp(uvm_gpu_t *gpu) +{ + NV_STATUS status; + test_sem_mem mem = { 0 }; + uvm_push_t push; + NvU32 i; + NvU64 *timestamp; + NvU64 last_timestamp = 0; + + // 2 iterations: + // 1: compare retrieved timestamp with 0; + // 2: compare retrieved timestamp with previous timestamp (obtained in 1). + const NvU32 iterations = 2; + + // The semaphore is 4 words long (16 bytes). + const size_t size = 16; + + status = test_semaphore_alloc_sem(gpu, size, &mem); + TEST_NV_CHECK_RET(status); + + timestamp = (NvU64 *)mem.cpu_va; + TEST_CHECK_GOTO(timestamp != NULL, done); + memset(timestamp, 0, size); + + // Shift the timestamp pointer to where the semaphore timestamp info is. + timestamp += 1; + + for (i = 0; i < iterations; i++) { + status = uvm_push_begin(gpu->channel_manager, + UVM_CHANNEL_TYPE_GPU_INTERNAL, + &push, + "semaphore_timestamp test, iter: %u", + i); + TEST_NV_CHECK_GOTO(status, done); + + gpu->parent->host_hal->semaphore_timestamp(&push, mem.gpu_va); + + status = uvm_push_end_and_wait(&push); + TEST_NV_CHECK_GOTO(status, done); + + TEST_CHECK_GOTO(*timestamp != 0, done); + TEST_CHECK_GOTO(*timestamp >= last_timestamp, done); + last_timestamp = *timestamp; + } + +done: + test_semaphore_free_sem(gpu, &mem); + + return status; +} + +static NV_STATUS test_host(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + + for_each_va_space_gpu(gpu, va_space) { + TEST_NV_CHECK_RET(test_semaphore_release(gpu)); + TEST_NV_CHECK_RET(test_semaphore_acquire(gpu)); + TEST_NV_CHECK_RET(test_semaphore_timestamp(gpu)); + } + + return NV_OK; +} + +NV_STATUS uvm_test_host_sanity(UVM_TEST_HOST_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_va_space_down_read_rm(va_space); + + status = test_host(va_space); + + uvm_va_space_up_read_rm(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_ioctl.h b/kernel-open/nvidia-uvm/uvm_ioctl.h new file mode 100644 index 000000000..9459759a4 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_ioctl.h @@ -0,0 +1,1073 @@ +/******************************************************************************* + Copyright (c) 2013-2019 NVidia Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef _UVM_IOCTL_H +#define _UVM_IOCTL_H + +#include "uvm_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// +// Please see the header file (uvm.h) for detailed documentation on each of the +// associated API calls. +// + +#if defined(WIN32) || defined(WIN64) +# define UVM_IOCTL_BASE(i) CTL_CODE(FILE_DEVICE_UNKNOWN, 0x800+i, METHOD_BUFFERED, FILE_READ_DATA | FILE_WRITE_DATA) +#else +# define UVM_IOCTL_BASE(i) i +#endif + +// +// UvmReserveVa +// +#define UVM_RESERVE_VA UVM_IOCTL_BASE(1) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_RESERVE_VA_PARAMS; + +// +// UvmReleaseVa +// +#define UVM_RELEASE_VA UVM_IOCTL_BASE(2) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_RELEASE_VA_PARAMS; + +// +// UvmRegionCommit +// +#define UVM_REGION_COMMIT UVM_IOCTL_BASE(3) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + UvmStream streamId NV_ALIGN_BYTES(8); // IN + NvProcessorUuid gpuUuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_REGION_COMMIT_PARAMS; + +// +// UvmRegionDecommit +// +#define UVM_REGION_DECOMMIT UVM_IOCTL_BASE(4) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_REGION_DECOMMIT_PARAMS; + +// +// UvmRegionSetStream +// +#define UVM_REGION_SET_STREAM UVM_IOCTL_BASE(5) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + UvmStream newStreamId NV_ALIGN_BYTES(8); // IN + NvProcessorUuid gpuUuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_REGION_SET_STREAM_PARAMS; + +// +// UvmSetStreamRunning +// +#define UVM_SET_STREAM_RUNNING UVM_IOCTL_BASE(6) + +typedef struct +{ + UvmStream streamId NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_SET_STREAM_RUNNING_PARAMS; + + +// +// Due to limitations in how much we want to send per ioctl call, the nStreams +// member must be less than or equal to about 250. That's an upper limit. +// +// However, from a typical user-space driver's point of view (for example, the +// CUDA driver), a vast majority of the time, we expect there to be only one +// stream passed in. The second most common case is something like atmost 32 +// streams being passed in. The cases where there are more than 32 streams are +// the most rare. So we might want to optimize the ioctls accordingly so that we +// don't always copy a 250 * sizeof(streamID) sized array when there's only one +// or a few streams. +// +// For that reason, UVM_MAX_STREAMS_PER_IOCTL_CALL is set to 32. +// +// If the higher-level (uvm.h) call requires more streams to be stopped than +// this value, then multiple ioctl calls should be made. +// +#define UVM_MAX_STREAMS_PER_IOCTL_CALL 32 + +// +// UvmSetStreamStopped +// +#define UVM_SET_STREAM_STOPPED UVM_IOCTL_BASE(7) + +typedef struct +{ + UvmStream streamIdArray[UVM_MAX_STREAMS_PER_IOCTL_CALL] NV_ALIGN_BYTES(8); // IN + NvU64 nStreams NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_SET_STREAM_STOPPED_PARAMS; + +// +// UvmCallTestFunction +// +#define UVM_RUN_TEST UVM_IOCTL_BASE(9) + +typedef struct +{ + NvProcessorUuid gpuUuid; // IN + NvU32 test; // IN + struct + { + NvProcessorUuid peerGpuUuid; // IN + NvU32 peerId; // IN + } multiGpu; + NV_STATUS rmStatus; // OUT +} UVM_RUN_TEST_PARAMS; + +// +// This is a magic offset for mmap. Any mapping of an offset above this +// threshold will be treated as a counters mapping, not as an allocation +// mapping. Since allocation offsets must be identical to the virtual address +// of the mapping, this threshold has to be an offset that cannot be +// a valid virtual address. +// +#if defined(__linux__) + #if defined(NV_64_BITS) + #define UVM_EVENTS_OFFSET_BASE (1UL << 63) + #define UVM_COUNTERS_OFFSET_BASE (1UL << 62) + #else + #define UVM_EVENTS_OFFSET_BASE (1UL << 31) + #define UVM_COUNTERS_OFFSET_BASE (1UL << 30) + #endif +#endif // defined(__linux___) + +// +// UvmAddSession +// +#define UVM_ADD_SESSION UVM_IOCTL_BASE(10) + +typedef struct +{ + NvU32 pidTarget; // IN +#ifdef __linux__ + NvP64 countersBaseAddress NV_ALIGN_BYTES(8); // IN + NvS32 sessionIndex; // OUT (session index that got added) +#endif + NV_STATUS rmStatus; // OUT +} UVM_ADD_SESSION_PARAMS; + +// +// UvmRemoveSession +// +#define UVM_REMOVE_SESSION UVM_IOCTL_BASE(11) + +typedef struct +{ +#ifdef __linux__ + NvS32 sessionIndex; // IN (session index to be removed) +#endif + NV_STATUS rmStatus; // OUT +} UVM_REMOVE_SESSION_PARAMS; + + +#define UVM_MAX_COUNTERS_PER_IOCTL_CALL 32 + +// +// UvmEnableCounters +// +#define UVM_ENABLE_COUNTERS UVM_IOCTL_BASE(12) + +typedef struct +{ +#ifdef __linux__ + NvS32 sessionIndex; // IN +#endif + UvmCounterConfig config[UVM_MAX_COUNTERS_PER_IOCTL_CALL]; // IN + NvU32 count; // IN + NV_STATUS rmStatus; // OUT +} UVM_ENABLE_COUNTERS_PARAMS; + +// +// UvmMapCounter +// +#define UVM_MAP_COUNTER UVM_IOCTL_BASE(13) + +typedef struct +{ +#ifdef __linux__ + NvS32 sessionIndex; // IN +#endif + NvU32 scope; // IN (UvmCounterScope) + NvU32 counterName; // IN (UvmCounterName) + NvProcessorUuid gpuUuid; // IN + NvP64 addr NV_ALIGN_BYTES(8); // OUT + NV_STATUS rmStatus; // OUT +} UVM_MAP_COUNTER_PARAMS; + +// +// UvmCreateEventQueue +// +#define UVM_CREATE_EVENT_QUEUE UVM_IOCTL_BASE(14) + +typedef struct +{ +#ifdef __linux__ + NvS32 sessionIndex; // IN +#endif + NvU32 eventQueueIndex; // OUT + NvU64 queueSize NV_ALIGN_BYTES(8); // IN + NvU64 notificationCount NV_ALIGN_BYTES(8); // IN +#if defined(WIN32) || defined(WIN64) + NvU64 notificationHandle NV_ALIGN_BYTES(8); // IN +#endif + NvU32 timeStampType; // IN (UvmEventTimeStampType) + NV_STATUS rmStatus; // OUT +} UVM_CREATE_EVENT_QUEUE_PARAMS; + +// +// UvmRemoveEventQueue +// +#define UVM_REMOVE_EVENT_QUEUE UVM_IOCTL_BASE(15) + +typedef struct +{ +#ifdef __linux__ + NvS32 sessionIndex; // IN +#endif + NvU32 eventQueueIndex; // IN + NV_STATUS rmStatus; // OUT +} UVM_REMOVE_EVENT_QUEUE_PARAMS; + +// +// UvmMapEventQueue +// +#define UVM_MAP_EVENT_QUEUE UVM_IOCTL_BASE(16) + +typedef struct +{ +#ifdef __linux__ + NvS32 sessionIndex; // IN +#endif + NvU32 eventQueueIndex; // IN + NvP64 userRODataAddr NV_ALIGN_BYTES(8); // IN + NvP64 userRWDataAddr NV_ALIGN_BYTES(8); // IN + NvP64 readIndexAddr NV_ALIGN_BYTES(8); // OUT + NvP64 writeIndexAddr NV_ALIGN_BYTES(8); // OUT + NvP64 queueBufferAddr NV_ALIGN_BYTES(8); // OUT + NV_STATUS rmStatus; // OUT +} UVM_MAP_EVENT_QUEUE_PARAMS; + +// +// UvmEnableEvent +// +#define UVM_EVENT_CTRL UVM_IOCTL_BASE(17) + +typedef struct +{ +#ifdef __linux__ + NvS32 sessionIndex; // IN +#endif + NvU32 eventQueueIndex; // IN + NvS32 eventType; // IN + NvU32 enable; // IN + NV_STATUS rmStatus; // OUT +} UVM_EVENT_CTRL_PARAMS; + +// +// UvmRegisterMpsServer +// +#define UVM_REGISTER_MPS_SERVER UVM_IOCTL_BASE(18) + +typedef struct +{ + NvProcessorUuid gpuUuidArray[UVM_MAX_GPUS]; // IN + NvU32 numGpus; // IN + NvU64 serverId NV_ALIGN_BYTES(8); // OUT + NV_STATUS rmStatus; // OUT +} UVM_REGISTER_MPS_SERVER_PARAMS; + +// +// UvmRegisterMpsClient +// +#define UVM_REGISTER_MPS_CLIENT UVM_IOCTL_BASE(19) + +typedef struct +{ + NvU64 serverId NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_REGISTER_MPS_CLIENT_PARAMS; + +// +// UvmEventGetGpuUuidTable +// +#define UVM_GET_GPU_UUID_TABLE UVM_IOCTL_BASE(20) + +typedef struct +{ + NvProcessorUuid gpuUuidArray[UVM_MAX_GPUS]; // OUT + NvU32 validCount; // OUT + NV_STATUS rmStatus; // OUT +} UVM_GET_GPU_UUID_TABLE_PARAMS; + +#if defined(WIN32) || defined(WIN64) +// +// UvmRegionSetBacking +// +#define UVM_REGION_SET_BACKING UVM_IOCTL_BASE(21) + +typedef struct +{ + NvProcessorUuid gpuUuid; // IN + NvU32 hAllocation; // IN + NvP64 vaAddr NV_ALIGN_BYTES(8); // IN + NvU64 regionLength NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_REGION_SET_BACKING_PARAMS; + +// +// UvmRegionUnsetBacking +// +#define UVM_REGION_UNSET_BACKING UVM_IOCTL_BASE(22) + +typedef struct +{ + NvP64 vaAddr NV_ALIGN_BYTES(8); // IN + NvU64 regionLength NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_REGION_UNSET_BACKING_PARAMS; + +#endif + +#define UVM_CREATE_RANGE_GROUP UVM_IOCTL_BASE(23) + +typedef struct +{ + NvU64 rangeGroupId NV_ALIGN_BYTES(8); // OUT + NV_STATUS rmStatus; // OUT +} UVM_CREATE_RANGE_GROUP_PARAMS; + +#define UVM_DESTROY_RANGE_GROUP UVM_IOCTL_BASE(24) + +typedef struct +{ + NvU64 rangeGroupId NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_DESTROY_RANGE_GROUP_PARAMS; + +// +// UvmRegisterGpuVaSpace +// +#define UVM_REGISTER_GPU_VASPACE UVM_IOCTL_BASE(25) + +typedef struct +{ + NvProcessorUuid gpuUuid; // IN + NvS32 rmCtrlFd; // IN + NvHandle hClient; // IN + NvHandle hVaSpace; // IN + NV_STATUS rmStatus; // OUT +} UVM_REGISTER_GPU_VASPACE_PARAMS; + +// +// UvmUnregisterGpuVaSpace +// +#define UVM_UNREGISTER_GPU_VASPACE UVM_IOCTL_BASE(26) + +typedef struct +{ + NvProcessorUuid gpuUuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_UNREGISTER_GPU_VASPACE_PARAMS; + +// +// UvmRegisterChannel +// +#define UVM_REGISTER_CHANNEL UVM_IOCTL_BASE(27) + +typedef struct +{ + NvProcessorUuid gpuUuid; // IN + NvS32 rmCtrlFd; // IN + NvHandle hClient; // IN + NvHandle hChannel; // IN + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_REGISTER_CHANNEL_PARAMS; + +// +// UvmUnregisterChannel +// +#define UVM_UNREGISTER_CHANNEL UVM_IOCTL_BASE(28) + +typedef struct +{ + NvProcessorUuid gpuUuid; // IN + NvHandle hClient; // IN + NvHandle hChannel; // IN + NV_STATUS rmStatus; // OUT +} UVM_UNREGISTER_CHANNEL_PARAMS; + +// +// UvmEnablePeerAccess +// +#define UVM_ENABLE_PEER_ACCESS UVM_IOCTL_BASE(29) + +typedef struct +{ + NvProcessorUuid gpuUuidA; // IN + NvProcessorUuid gpuUuidB; // IN + NV_STATUS rmStatus; // OUT +} UVM_ENABLE_PEER_ACCESS_PARAMS; + +// +// UvmDisablePeerAccess +// +#define UVM_DISABLE_PEER_ACCESS UVM_IOCTL_BASE(30) + +typedef struct +{ + NvProcessorUuid gpuUuidA; // IN + NvProcessorUuid gpuUuidB; // IN + NV_STATUS rmStatus; // OUT +} UVM_DISABLE_PEER_ACCESS_PARAMS; + +// +// UvmSetRangeGroup +// +#define UVM_SET_RANGE_GROUP UVM_IOCTL_BASE(31) + +typedef struct +{ + NvU64 rangeGroupId NV_ALIGN_BYTES(8); // IN + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_SET_RANGE_GROUP_PARAMS; + +// +// UvmMapExternalAllocation +// +#define UVM_MAP_EXTERNAL_ALLOCATION UVM_IOCTL_BASE(33) +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NvU64 offset NV_ALIGN_BYTES(8); // IN + UvmGpuMappingAttributes perGpuAttributes[UVM_MAX_GPUS]; // IN + NvU64 gpuAttributesCount NV_ALIGN_BYTES(8); // IN + NvS32 rmCtrlFd; // IN + NvU32 hClient; // IN + NvU32 hMemory; // IN + + NV_STATUS rmStatus; // OUT +} UVM_MAP_EXTERNAL_ALLOCATION_PARAMS; + +// +// UvmFree +// +#define UVM_FREE UVM_IOCTL_BASE(34) +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_FREE_PARAMS; + +// +// UvmMemMap +// +#define UVM_MEM_MAP UVM_IOCTL_BASE(35) + +typedef struct +{ + NvP64 regionBase NV_ALIGN_BYTES(8); // IN + NvU64 regionLength NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_MEM_MAP_PARAMS; + +// +// UvmDebugAccessMemory +// +#define UVM_DEBUG_ACCESS_MEMORY UVM_IOCTL_BASE(36) + +typedef struct +{ +#ifdef __linux__ + NvS32 sessionIndex; // IN +#endif + NvU64 baseAddress NV_ALIGN_BYTES(8); // IN + NvU64 sizeInBytes NV_ALIGN_BYTES(8); // IN + NvU32 accessType; // IN (UvmDebugAccessType) + NvU64 buffer NV_ALIGN_BYTES(8); // IN/OUT + NvBool isBitmaskSet; // OUT + NvU64 bitmask NV_ALIGN_BYTES(8); // IN/OUT + NV_STATUS rmStatus; // OUT +} UVM_DEBUG_ACCESS_MEMORY_PARAMS; + +// +// UvmRegisterGpu +// +#define UVM_REGISTER_GPU UVM_IOCTL_BASE(37) + +typedef struct +{ + NvProcessorUuid gpu_uuid; // IN + NvBool numaEnabled; // OUT + NvS32 numaNodeId; // OUT + NvS32 rmCtrlFd; // IN + NvHandle hClient; // IN + NvHandle hSmcPartRef; // IN + NV_STATUS rmStatus; // OUT +} UVM_REGISTER_GPU_PARAMS; + +// +// UvmUnregisterGpu +// +#define UVM_UNREGISTER_GPU UVM_IOCTL_BASE(38) + +typedef struct +{ + NvProcessorUuid gpu_uuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_UNREGISTER_GPU_PARAMS; + +#define UVM_PAGEABLE_MEM_ACCESS UVM_IOCTL_BASE(39) + +typedef struct +{ + NvBool pageableMemAccess; // OUT + NV_STATUS rmStatus; // OUT +} UVM_PAGEABLE_MEM_ACCESS_PARAMS; + +// +// Due to limitations in how much we want to send per ioctl call, the numGroupIds +// member must be less than or equal to about 250. That's an upper limit. +// +// However, from a typical user-space driver's point of view (for example, the +// CUDA driver), a vast majority of the time, we expect there to be only one +// range group passed in. The second most common case is something like atmost 32 +// range groups being passed in. The cases where there are more than 32 range +// groups are the most rare. So we might want to optimize the ioctls accordingly +// so that we don't always copy a 250 * sizeof(NvU64) sized array when there's +// only one or a few range groups. +// +// For that reason, UVM_MAX_RANGE_GROUPS_PER_IOCTL_CALL is set to 32. +// +// If the higher-level (uvm.h) call requires more range groups than +// this value, then multiple ioctl calls should be made. +// +#define UVM_MAX_RANGE_GROUPS_PER_IOCTL_CALL 32 + +// +// UvmPreventMigrationRangeGroups +// +#define UVM_PREVENT_MIGRATION_RANGE_GROUPS UVM_IOCTL_BASE(40) + +typedef struct +{ + NvU64 rangeGroupIds[UVM_MAX_RANGE_GROUPS_PER_IOCTL_CALL] NV_ALIGN_BYTES(8); // IN + NvU64 numGroupIds NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_PREVENT_MIGRATION_RANGE_GROUPS_PARAMS; + +// +// UvmAllowMigrationRangeGroups +// +#define UVM_ALLOW_MIGRATION_RANGE_GROUPS UVM_IOCTL_BASE(41) + +typedef struct +{ + NvU64 rangeGroupIds[UVM_MAX_RANGE_GROUPS_PER_IOCTL_CALL] NV_ALIGN_BYTES(8); // IN + NvU64 numGroupIds NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_ALLOW_MIGRATION_RANGE_GROUPS_PARAMS; + +// +// UvmSetPreferredLocation +// +#define UVM_SET_PREFERRED_LOCATION UVM_IOCTL_BASE(42) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NvProcessorUuid preferredLocation; // IN + NV_STATUS rmStatus; // OUT +} UVM_SET_PREFERRED_LOCATION_PARAMS; + +// +// UvmUnsetPreferredLocation +// +#define UVM_UNSET_PREFERRED_LOCATION UVM_IOCTL_BASE(43) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_UNSET_PREFERRED_LOCATION_PARAMS; + +// +// UvmEnableReadDuplication +// +#define UVM_ENABLE_READ_DUPLICATION UVM_IOCTL_BASE(44) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_ENABLE_READ_DUPLICATION_PARAMS; + +// +// UvmDisableReadDuplication +// +#define UVM_DISABLE_READ_DUPLICATION UVM_IOCTL_BASE(45) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_DISABLE_READ_DUPLICATION_PARAMS; + +// +// UvmSetAccessedBy +// +#define UVM_SET_ACCESSED_BY UVM_IOCTL_BASE(46) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NvProcessorUuid accessedByUuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_SET_ACCESSED_BY_PARAMS; + +// +// UvmUnsetAccessedBy +// +#define UVM_UNSET_ACCESSED_BY UVM_IOCTL_BASE(47) + +typedef struct +{ + NvU64 requestedBase NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NvProcessorUuid accessedByUuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_UNSET_ACCESSED_BY_PARAMS; + +// For managed allocations, UVM_MIGRATE implements the behavior described in +// UvmMigrate. If the input virtual range corresponds to system-allocated +// pageable memory, and the GPUs in the system support transparent access to +// pageable memory, the scheme is a bit more elaborate, potentially with +// several transitions betwen user and kernel spaces: +// +// 1) UVM_MIGRATE with the range base address and size. This will migrate +// anonymous vmas until: +// a) It finds a file-backed vma or no GPUs are registered in the VA space +// so no GPU can drive the copy. It will try to populate the vma using +// get_user_pages and return NV_WARN_NOTHING_TO_DO. +// b) It fails to allocate memory on the destination CPU node. It will return +// NV_ERR_MORE_PROCESSING_REQUIRED. +// c) It fails to populate pages directly on the destination GPU. It will try +// to populate the vma using get_user_pages and return. +// d) The full input range is migrated (or empty), this call will release +// the semaphore before returning. +// 2) The user-mode needs to handle the following error codes: +// a) NV_WARN_NOTHING_TO_DO: use move_pages to migrate pages for the VA +// range corresponding to the vma that couldn't be migrated in kernel +// mode. Then, it processes the remainder of the range, starting after +// that vma. +// b) NV_ERR_MORE_PROCESSING_REQUIRED: choose a different CPU NUMA node, +// trying to enforce the NUMA policies of the thread and retry the +// ioctl. If there are no more CPU NUMA nodes to try, try to populate +// the remainder of the range anywhere using the UVM_POPULATE_PAGEABLE +// ioctl. +// c) NV_OK: success. This only guarantees that pages were populated, not +// that they moved to the requested destination. +// 3) For cases 2.a) and 2.b) Goto 1 +// +// If UVM_MIGRATE_FLAG_ASYNC is 0, the ioctl won't return until the migration is +// done and all mappings are updated, subject to the special rules for pageable +// memory described above. semaphoreAddress must be 0. semaphorePayload is +// ignored. +// +// If UVM_MIGRATE_FLAG_ASYNC is 1, the ioctl may return before the migration is +// complete. If semaphoreAddress is 0, semaphorePayload is ignored and no +// notification will be given on completion. If semaphoreAddress is non-zero +// and the returned error code is NV_OK, semaphorePayload will be written to +// semaphoreAddress once the migration is complete. +#define UVM_MIGRATE_FLAG_ASYNC 0x00000001 + +// When the migration destination is the CPU, skip the step which creates new +// virtual mappings on the CPU. Creating CPU mappings must wait for the +// migration to complete, so skipping this step allows the migration to be +// fully asynchronous. This flag is ignored for pageable migrations if the GPUs +// in the system support transparent access to pageable memory. +// +// The UVM driver must have builtin tests enabled for the API to use this flag. +#define UVM_MIGRATE_FLAG_SKIP_CPU_MAP 0x00000002 + +// By default UVM_MIGRATE returns an error if the destination UUID is a GPU +// without a registered GPU VA space. Setting this flag skips that check, so the +// destination GPU only needs to have been registered. +// +// This can be used in tests to trigger migrations of physical memory without +// the overhead of GPU PTE mappings. +// +// The UVM driver must have builtin tests enabled for the API to use this flag. +#define UVM_MIGRATE_FLAG_NO_GPU_VA_SPACE 0x00000004 + +#define UVM_MIGRATE_FLAGS_TEST_ALL (UVM_MIGRATE_FLAG_SKIP_CPU_MAP | \ + UVM_MIGRATE_FLAG_NO_GPU_VA_SPACE) + +#define UVM_MIGRATE_FLAGS_ALL (UVM_MIGRATE_FLAG_ASYNC | \ + UVM_MIGRATE_FLAGS_TEST_ALL) + +// For pageable migrations, cpuNumaNode is used as the destination NUMA node if +// destinationUuid is the CPU. +// +// If NV_WARN_NOTHING_TO_DO is returned, user-space is responsible for +// completing the migration of the VA range described by userSpaceStart and +// userSpaceLength using move_pages. +// +// If NV_ERR_MORE_PROCESSING_REQUIRED is returned, user-space is responsible +// for re-trying with a different cpuNumaNode, starting at userSpaceStart. +#define UVM_MIGRATE UVM_IOCTL_BASE(51) +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NvProcessorUuid destinationUuid; // IN + NvU32 flags; // IN + NvU64 semaphoreAddress NV_ALIGN_BYTES(8); // IN + NvU32 semaphorePayload; // IN + NvU32 cpuNumaNode; // IN + NvU64 userSpaceStart NV_ALIGN_BYTES(8); // OUT + NvU64 userSpaceLength NV_ALIGN_BYTES(8); // OUT + NV_STATUS rmStatus; // OUT +} UVM_MIGRATE_PARAMS; + +#define UVM_MIGRATE_RANGE_GROUP UVM_IOCTL_BASE(53) +typedef struct +{ + NvU64 rangeGroupId NV_ALIGN_BYTES(8); // IN + NvProcessorUuid destinationUuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_MIGRATE_RANGE_GROUP_PARAMS; + +// +// UvmEnableSystemWideAtomics +// +#define UVM_ENABLE_SYSTEM_WIDE_ATOMICS UVM_IOCTL_BASE(54) + +typedef struct +{ + NvProcessorUuid gpu_uuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_ENABLE_SYSTEM_WIDE_ATOMICS_PARAMS; + +// +// UvmDisableSystemWideAtomics +// +#define UVM_DISABLE_SYSTEM_WIDE_ATOMICS UVM_IOCTL_BASE(55) + +typedef struct +{ + NvProcessorUuid gpu_uuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_DISABLE_SYSTEM_WIDE_ATOMICS_PARAMS; + +// +// Initialize any tracker object such as a queue or counter +// UvmToolsCreateEventQueue, UvmToolsCreateProcessAggregateCounters, UvmToolsCreateProcessorCounters +// +#define UVM_TOOLS_INIT_EVENT_TRACKER UVM_IOCTL_BASE(56) +typedef struct +{ + NvU64 queueBuffer NV_ALIGN_BYTES(8); // IN + NvU64 queueBufferSize NV_ALIGN_BYTES(8); // IN + NvU64 controlBuffer NV_ALIGN_BYTES(8); // IN + NvProcessorUuid processor; // IN + NvU32 allProcessors; // IN + NvU32 uvmFd; // IN + NV_STATUS rmStatus; // OUT +} UVM_TOOLS_INIT_EVENT_TRACKER_PARAMS; + +// +// UvmToolsSetNotificationThreshold +// +#define UVM_TOOLS_SET_NOTIFICATION_THRESHOLD UVM_IOCTL_BASE(57) +typedef struct +{ + NvU32 notificationThreshold; // IN + NV_STATUS rmStatus; // OUT +} UVM_TOOLS_SET_NOTIFICATION_THRESHOLD_PARAMS; + +// +// UvmToolsEventQueueEnableEvents +// +#define UVM_TOOLS_EVENT_QUEUE_ENABLE_EVENTS UVM_IOCTL_BASE(58) +typedef struct +{ + NvU64 eventTypeFlags NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_TOOLS_EVENT_QUEUE_ENABLE_EVENTS_PARAMS; + +// +// UvmToolsEventQueueDisableEvents +// +#define UVM_TOOLS_EVENT_QUEUE_DISABLE_EVENTS UVM_IOCTL_BASE(59) +typedef struct +{ + NvU64 eventTypeFlags NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_TOOLS_EVENT_QUEUE_DISABLE_EVENTS_PARAMS; + +// +// UvmToolsEnableCounters +// +#define UVM_TOOLS_ENABLE_COUNTERS UVM_IOCTL_BASE(60) +typedef struct +{ + NvU64 counterTypeFlags NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_TOOLS_ENABLE_COUNTERS_PARAMS; + +// +// UvmToolsDisableCounters +// +#define UVM_TOOLS_DISABLE_COUNTERS UVM_IOCTL_BASE(61) +typedef struct +{ + NvU64 counterTypeFlags NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_TOOLS_DISABLE_COUNTERS_PARAMS; + +// +// UvmToolsReadProcessMemory +// +#define UVM_TOOLS_READ_PROCESS_MEMORY UVM_IOCTL_BASE(62) +typedef struct +{ + NvU64 buffer NV_ALIGN_BYTES(8); // IN + NvU64 size NV_ALIGN_BYTES(8); // IN + NvU64 targetVa NV_ALIGN_BYTES(8); // IN + NvU64 bytesRead NV_ALIGN_BYTES(8); // OUT + NV_STATUS rmStatus; // OUT +} UVM_TOOLS_READ_PROCESS_MEMORY_PARAMS; + +// +// UvmToolsWriteProcessMemory +// +#define UVM_TOOLS_WRITE_PROCESS_MEMORY UVM_IOCTL_BASE(63) +typedef struct +{ + NvU64 buffer NV_ALIGN_BYTES(8); // IN + NvU64 size NV_ALIGN_BYTES(8); // IN + NvU64 targetVa NV_ALIGN_BYTES(8); // IN + NvU64 bytesWritten NV_ALIGN_BYTES(8); // OUT + NV_STATUS rmStatus; // OUT +} UVM_TOOLS_WRITE_PROCESS_MEMORY_PARAMS; + +// +// UvmToolsGetProcessorUuidTable +// +#define UVM_TOOLS_GET_PROCESSOR_UUID_TABLE UVM_IOCTL_BASE(64) +typedef struct +{ + NvU64 tablePtr NV_ALIGN_BYTES(8); // IN + NvU32 count; // IN/OUT + NV_STATUS rmStatus; // OUT +} UVM_TOOLS_GET_PROCESSOR_UUID_TABLE_PARAMS; + + +// +// UvmMapDynamicParallelismRegion +// +#define UVM_MAP_DYNAMIC_PARALLELISM_REGION UVM_IOCTL_BASE(65) +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NvProcessorUuid gpuUuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_MAP_DYNAMIC_PARALLELISM_REGION_PARAMS; + +// +// UvmUnmapExternal +// +#define UVM_UNMAP_EXTERNAL UVM_IOCTL_BASE(66) +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NvProcessorUuid gpuUuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_UNMAP_EXTERNAL_PARAMS; + + +// +// UvmToolsFlushEvents +// +#define UVM_TOOLS_FLUSH_EVENTS UVM_IOCTL_BASE(67) +typedef struct +{ + NV_STATUS rmStatus; // OUT +} UVM_TOOLS_FLUSH_EVENTS_PARAMS; + +// +// UvmAllocSemaphorePool +// +#define UVM_ALLOC_SEMAPHORE_POOL UVM_IOCTL_BASE(68) +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + UvmGpuMappingAttributes perGpuAttributes[UVM_MAX_GPUS]; // IN + NvU64 gpuAttributesCount NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_ALLOC_SEMAPHORE_POOL_PARAMS; + +// +// UvmCleanUpZombieResources +// +#define UVM_CLEAN_UP_ZOMBIE_RESOURCES UVM_IOCTL_BASE(69) +typedef struct +{ + NV_STATUS rmStatus; // OUT +} UVM_CLEAN_UP_ZOMBIE_RESOURCES_PARAMS; + +// +// UvmIsPageableMemoryAccessSupportedOnGpu +// +#define UVM_PAGEABLE_MEM_ACCESS_ON_GPU UVM_IOCTL_BASE(70) + +typedef struct +{ + NvProcessorUuid gpu_uuid; // IN + NvBool pageableMemAccess; // OUT + NV_STATUS rmStatus; // OUT +} UVM_PAGEABLE_MEM_ACCESS_ON_GPU_PARAMS; + +// +// UvmPopulatePageable +// +#define UVM_POPULATE_PAGEABLE UVM_IOCTL_BASE(71) + +// Allow population of managed ranges. +// +// The UVM driver must have builtin tests enabled for the API to use the +// following two flags. +#define UVM_POPULATE_PAGEABLE_FLAG_ALLOW_MANAGED 0x00000001 + +// By default UVM_POPULATE_PAGEABLE returns an error if the destination vma +// does not have read permission. This flag skips that check. +#define UVM_POPULATE_PAGEABLE_FLAG_SKIP_PROT_CHECK 0x00000002 + +#define UVM_POPULATE_PAGEABLE_FLAGS_TEST_ALL (UVM_POPULATE_PAGEABLE_FLAG_ALLOW_MANAGED | \ + UVM_POPULATE_PAGEABLE_FLAG_SKIP_PROT_CHECK) + +#define UVM_POPULATE_PAGEABLE_FLAGS_ALL UVM_POPULATE_PAGEABLE_FLAGS_TEST_ALL + +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NvU32 flags; // IN + NV_STATUS rmStatus; // OUT +} UVM_POPULATE_PAGEABLE_PARAMS; + +// +// UvmValidateVaRange +// +#define UVM_VALIDATE_VA_RANGE UVM_IOCTL_BASE(72) +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_VALIDATE_VA_RANGE_PARAMS; + +#define UVM_CREATE_EXTERNAL_RANGE UVM_IOCTL_BASE(73) +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_CREATE_EXTERNAL_RANGE_PARAMS; + +#define UVM_MAP_EXTERNAL_SPARSE UVM_IOCTL_BASE(74) +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); // IN + NvU64 length NV_ALIGN_BYTES(8); // IN + NvProcessorUuid gpuUuid; // IN + NV_STATUS rmStatus; // OUT +} UVM_MAP_EXTERNAL_SPARSE_PARAMS; + +// +// Temporary ioctls which should be removed before UVM 8 release +// Number backwards from 2047 - highest custom ioctl function number +// windows can handle. +// + +// +// UvmIs8Supported +// +#define UVM_IS_8_SUPPORTED UVM_IOCTL_BASE(2047) + +typedef struct +{ + NvU32 is8Supported; // OUT + NV_STATUS rmStatus; // OUT +} UVM_IS_8_SUPPORTED_PARAMS; + + +#ifdef __cplusplus +} +#endif + +#endif // _UVM_IOCTL_H diff --git a/kernel-open/nvidia-uvm/uvm_kvmalloc.c b/kernel-open/nvidia-uvm/uvm_kvmalloc.c new file mode 100644 index 000000000..69e0b30b9 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_kvmalloc.c @@ -0,0 +1,412 @@ +/******************************************************************************* + Copyright (c) 2016-2020 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_kvmalloc.h" +#include "uvm_rb_tree.h" + +// To implement realloc for vmalloc-based allocations we need to track the size +// of the original allocation. We can do that by allocating a header along with +// the allocation itself. Since vmalloc is only used for relatively large +// allocations, this overhead is very small. +// +// We don't need this for kmalloc since we can use ksize(). +typedef struct +{ + size_t alloc_size; + uint8_t ptr[0]; +} uvm_vmalloc_hdr_t; + +typedef struct +{ + const char *file; + const char *function; + int line; + uvm_rb_tree_node_t node; +} uvm_kvmalloc_info_t; + +typedef enum +{ + UVM_KVMALLOC_LEAK_CHECK_NONE = 0, + UVM_KVMALLOC_LEAK_CHECK_BYTES, + UVM_KVMALLOC_LEAK_CHECK_ORIGIN, + UVM_KVMALLOC_LEAK_CHECK_COUNT +} uvm_kvmalloc_leak_check_t; + +// This is used just to make sure that the APIs aren't used outside of +// uvm_kvmalloc_init/uvm_kvmalloc_exit. The memory allocation would still work +// fine, but the leak checker would get confused. +static bool g_malloc_initialized = false; + +static struct +{ + // Current outstanding bytes allocated + atomic_long_t bytes_allocated; + + // Number of allocations made which failed their info allocations. Used just + // for sanity checks. + atomic_long_t untracked_allocations; + + // Use a raw spinlock rather than a uvm_spinlock_t because the kvmalloc + // layer is initialized and torn down before the thread context layer. + spinlock_t lock; + + // Table of all outstanding allocations + uvm_rb_tree_t allocation_info; + + struct kmem_cache *info_cache; +} g_uvm_leak_checker; + +// Default to byte-count-only leak checking for non-release builds. This can +// always be overridden by the module parameter. +static int uvm_leak_checker = (UVM_IS_DEBUG() || UVM_IS_DEVELOP()) ? + UVM_KVMALLOC_LEAK_CHECK_BYTES : + UVM_KVMALLOC_LEAK_CHECK_NONE; + +module_param(uvm_leak_checker, int, S_IRUGO); +MODULE_PARM_DESC(uvm_leak_checker, + "Enable uvm memory leak checking. " + "0 = disabled, 1 = count total bytes allocated and freed, 2 = per-allocation origin tracking."); + +NV_STATUS uvm_kvmalloc_init(void) +{ + if (uvm_leak_checker >= UVM_KVMALLOC_LEAK_CHECK_ORIGIN) { + spin_lock_init(&g_uvm_leak_checker.lock); + uvm_rb_tree_init(&g_uvm_leak_checker.allocation_info); + + g_uvm_leak_checker.info_cache = NV_KMEM_CACHE_CREATE("uvm_kvmalloc_info_t", uvm_kvmalloc_info_t); + if (!g_uvm_leak_checker.info_cache) + return NV_ERR_NO_MEMORY; + } + + g_malloc_initialized = true; + return NV_OK; +} + +void uvm_kvmalloc_exit(void) +{ + if (!g_malloc_initialized) + return; + + if (atomic_long_read(&g_uvm_leak_checker.bytes_allocated) > 0) { + printk(KERN_ERR NVIDIA_UVM_PRETTY_PRINTING_PREFIX "!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"); + printk(KERN_ERR NVIDIA_UVM_PRETTY_PRINTING_PREFIX "Memory leak of %lu bytes detected.%s\n", + atomic_long_read(&g_uvm_leak_checker.bytes_allocated), + uvm_leak_checker < UVM_KVMALLOC_LEAK_CHECK_ORIGIN ? + " insmod with uvm_leak_checker=2 for detailed information." : + ""); + printk(KERN_ERR NVIDIA_UVM_PRETTY_PRINTING_PREFIX "!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"); + + if (g_uvm_global.unload_state.ptr) + *g_uvm_global.unload_state.ptr |= UVM_TEST_UNLOAD_STATE_MEMORY_LEAK; + } + + if (uvm_leak_checker >= UVM_KVMALLOC_LEAK_CHECK_ORIGIN) { + uvm_rb_tree_node_t *node, *next; + + uvm_rb_tree_for_each_safe(node, next, &g_uvm_leak_checker.allocation_info) { + uvm_kvmalloc_info_t *info = container_of(node, uvm_kvmalloc_info_t, node); + + printk(KERN_ERR NVIDIA_UVM_PRETTY_PRINTING_PREFIX " Leaked %zu bytes from %s:%d:%s (0x%llx)\n", + uvm_kvsize((void *)((uintptr_t)info->node.key)), + kbasename(info->file), + info->line, + info->function, + info->node.key); + + // Free so we don't keep eating up memory while debugging. Note that + // this also removes the entry from the table, frees info, and drops + // the allocated bytes count. + uvm_kvfree((void *)((uintptr_t)info->node.key)); + } + + if (atomic_long_read(&g_uvm_leak_checker.untracked_allocations) == 0) + UVM_ASSERT(atomic_long_read(&g_uvm_leak_checker.bytes_allocated) == 0); + + kmem_cache_destroy_safe(&g_uvm_leak_checker.info_cache); + } + + g_malloc_initialized = false; +} + +static void insert_info(uvm_kvmalloc_info_t *info) +{ + NV_STATUS status; + unsigned long irq_flags; + + spin_lock_irqsave(&g_uvm_leak_checker.lock, irq_flags); + status = uvm_rb_tree_insert(&g_uvm_leak_checker.allocation_info, &info->node); + spin_unlock_irqrestore(&g_uvm_leak_checker.lock, irq_flags); + + // We shouldn't have duplicates + UVM_ASSERT(status == NV_OK); +} + +static uvm_kvmalloc_info_t *remove_info(void *p) +{ + uvm_rb_tree_node_t *node; + uvm_kvmalloc_info_t *info = NULL; + unsigned long irq_flags; + + spin_lock_irqsave(&g_uvm_leak_checker.lock, irq_flags); + node = uvm_rb_tree_find(&g_uvm_leak_checker.allocation_info, (NvU64)p); + if (node) + uvm_rb_tree_remove(&g_uvm_leak_checker.allocation_info, node); + spin_unlock_irqrestore(&g_uvm_leak_checker.lock, irq_flags); + + if (!node) { + UVM_ASSERT(atomic_long_read(&g_uvm_leak_checker.untracked_allocations) > 0); + atomic_long_dec(&g_uvm_leak_checker.untracked_allocations); + } + else { + info = container_of(node, uvm_kvmalloc_info_t, node); + UVM_ASSERT(info->node.key == (NvU64)((uintptr_t)p)); + } + return info; +} + +static void alloc_tracking_add(void *p, const char *file, int line, const char *function) +{ + // Add uvm_kvsize(p) instead of size because uvm_kvsize might be larger (due + // to ksize), and uvm_kvfree only knows about uvm_kvsize + size_t size = uvm_kvsize(p); + uvm_kvmalloc_info_t *info; + + UVM_ASSERT(g_malloc_initialized); + + if (ZERO_OR_NULL_PTR(p)) + return; + + atomic_long_add(size, &g_uvm_leak_checker.bytes_allocated); + + if (uvm_leak_checker >= UVM_KVMALLOC_LEAK_CHECK_ORIGIN) { + // Silently ignore OOM errors + info = nv_kmem_cache_zalloc(g_uvm_leak_checker.info_cache, NV_UVM_GFP_FLAGS); + if (!info) { + atomic_long_inc(&g_uvm_leak_checker.untracked_allocations); + return; + } + + info->node.key = (NvU64)p; + info->file = file; + info->function = function; + info->line = line; + + insert_info(info); + } +} + +static void alloc_tracking_remove(void *p) +{ + size_t size = uvm_kvsize(p); + uvm_kvmalloc_info_t *info; + + UVM_ASSERT(g_malloc_initialized); + + if (ZERO_OR_NULL_PTR(p)) + return; + + atomic_long_sub(size, &g_uvm_leak_checker.bytes_allocated); + + if (uvm_leak_checker >= UVM_KVMALLOC_LEAK_CHECK_ORIGIN) { + info = remove_info(p); + if (info) + kmem_cache_free(g_uvm_leak_checker.info_cache, info); + } +} + +static uvm_vmalloc_hdr_t *get_hdr(void *p) +{ + uvm_vmalloc_hdr_t *hdr; + UVM_ASSERT(is_vmalloc_addr(p)); + hdr = container_of(p, uvm_vmalloc_hdr_t, ptr); + UVM_ASSERT(hdr->alloc_size > UVM_KMALLOC_THRESHOLD); + return hdr; +} + +static void *alloc_internal(size_t size, bool zero_memory) +{ + uvm_vmalloc_hdr_t *hdr; + + // Make sure that the allocation pointer is suitably-aligned for a natively- + // sized allocation. + BUILD_BUG_ON(offsetof(uvm_vmalloc_hdr_t, ptr) != sizeof(void *)); + + // Make sure that (sizeof(hdr) + size) is what it should be + BUILD_BUG_ON(sizeof(uvm_vmalloc_hdr_t) != offsetof(uvm_vmalloc_hdr_t, ptr)); + + if (size <= UVM_KMALLOC_THRESHOLD) { + if (zero_memory) + return kzalloc(size, NV_UVM_GFP_FLAGS); + return kmalloc(size, NV_UVM_GFP_FLAGS); + } + + if (zero_memory) + hdr = vzalloc(sizeof(*hdr) + size); + else + hdr = vmalloc(sizeof(*hdr) + size); + + if (!hdr) + return NULL; + + hdr->alloc_size = size; + return hdr->ptr; +} + +void *__uvm_kvmalloc(size_t size, const char *file, int line, const char *function) +{ + void *p = alloc_internal(size, false); + + if (uvm_leak_checker && p) + alloc_tracking_add(p, file, line, function); + + return p; +} + +void *__uvm_kvmalloc_zero(size_t size, const char *file, int line, const char *function) +{ + void *p = alloc_internal(size, true); + + if (uvm_leak_checker && p) + alloc_tracking_add(p, file, line, function); + + return p; +} + +void uvm_kvfree(void *p) +{ + if (!p) + return; + + if (uvm_leak_checker) + alloc_tracking_remove(p); + + if (is_vmalloc_addr(p)) + vfree(get_hdr(p)); + else + kfree(p); +} + +// Handle reallocs of kmalloc-based allocations +static void *realloc_from_kmalloc(void *p, size_t new_size) +{ + void *new_p; + + // Simple case: kmalloc -> kmalloc + if (new_size <= UVM_KMALLOC_THRESHOLD) + return krealloc(p, new_size, NV_UVM_GFP_FLAGS); + + // kmalloc -> vmalloc + new_p = alloc_internal(new_size, false); + if (!new_p) + return NULL; + memcpy(new_p, p, min(ksize(p), new_size)); + kfree(p); + return new_p; +} + +// Handle reallocs of vmalloc-based allocations +static void *realloc_from_vmalloc(void *p, size_t new_size) +{ + uvm_vmalloc_hdr_t *old_hdr = get_hdr(p); + void *new_p; + + if (new_size == 0) { + vfree(old_hdr); + return ZERO_SIZE_PTR; // What krealloc returns for this case + } + + if (new_size == old_hdr->alloc_size) + return p; + + // vmalloc has no realloc functionality so we need to do a separate alloc + + // copy. + new_p = alloc_internal(new_size, false); + if (!new_p) + return NULL; + + memcpy(new_p, p, min(new_size, old_hdr->alloc_size)); + vfree(old_hdr); + return new_p; +} + +void *__uvm_kvrealloc(void *p, size_t new_size, const char *file, int line, const char *function) +{ + void *new_p; + uvm_kvmalloc_info_t *info = NULL; + size_t old_size; + + if (ZERO_OR_NULL_PTR(p)) + return __uvm_kvmalloc(new_size, file, line, function); + + old_size = uvm_kvsize(p); + + if (uvm_leak_checker) { + // new_size == 0 is a free, so just remove everything + if (new_size == 0) { + alloc_tracking_remove(p); + } + else { + // Remove the old pointer. If the realloc gives us a new pointer + // with the old one still in the tracking table, that pointer could + // be reallocated by another thread before we remove it from the + // table. + atomic_long_sub(old_size, &g_uvm_leak_checker.bytes_allocated); + if (uvm_leak_checker >= UVM_KVMALLOC_LEAK_CHECK_ORIGIN) + info = remove_info(p); + } + } + + if (is_vmalloc_addr(p)) + new_p = realloc_from_vmalloc(p, new_size); + else + new_p = realloc_from_kmalloc(p, new_size); + + if (uvm_leak_checker) { + if (!new_p) { + // The realloc failed, so put the old info back + atomic_long_add(old_size, &g_uvm_leak_checker.bytes_allocated); + if (uvm_leak_checker >= UVM_KVMALLOC_LEAK_CHECK_ORIGIN && info) + insert_info(info); + } + else if (new_size != 0) { + // Drop the old info and insert the new + if (info) + kmem_cache_free(g_uvm_leak_checker.info_cache, info); + alloc_tracking_add(new_p, file, line, function); + } + } + + return new_p; +} + +size_t uvm_kvsize(void *p) +{ + UVM_ASSERT(g_malloc_initialized); + UVM_ASSERT(p); + if (is_vmalloc_addr(p)) + return get_hdr(p)->alloc_size; + return ksize(p); +} diff --git a/kernel-open/nvidia-uvm/uvm_kvmalloc.h b/kernel-open/nvidia-uvm/uvm_kvmalloc.h new file mode 100644 index 000000000..1d4fb7207 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_kvmalloc.h @@ -0,0 +1,75 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_KVMALLOC_H__ +#define __UVM_KVMALLOC_H__ + +#include "uvm_linux.h" +#include "uvm_test_ioctl.h" + +// kmalloc is faster than vmalloc because it doesn't have to remap kernel +// virtual memory, but for that same reason it requires physically-contiguous +// memory. It also supports a native krealloc function which is missing in +// vmalloc. +// +// Therefore the uvm_kvmalloc APIs use kmalloc when possible, but will fall back +// to vmalloc when the allocation size exceeds this UVM_KMALLOC_THRESHOLD. +// +// This value is somewhat arbitrary. kmalloc can support allocations much larger +// than PAGE_SIZE, but the larger the size the higher the chances of allocation +// failure. +// +// This is in the header so callers can use it to inform their allocation sizes +// if they wish. +#define UVM_KMALLOC_THRESHOLD (4*PAGE_SIZE) + +NV_STATUS uvm_kvmalloc_init(void); +void uvm_kvmalloc_exit(void); + +// Allocating a size of 0 with any of these APIs returns ZERO_SIZE_PTR +void *__uvm_kvmalloc(size_t size, const char *file, int line, const char *function); +void *__uvm_kvmalloc_zero(size_t size, const char *file, int line, const char *function); + +#define uvm_kvmalloc(__size) __uvm_kvmalloc(__size, __FILE__, __LINE__, __FUNCTION__) +#define uvm_kvmalloc_zero(__size) __uvm_kvmalloc_zero(__size, __FILE__, __LINE__, __FUNCTION__) + +void uvm_kvfree(void *p); + +// Follows standard realloc semantics: +// - uvm_kvrealloc(NULL, size) and uvm_kvrealloc(ZERO_SIZE_PTR, size) are each +// equivalent to uvm_kvmalloc(size) +// - uvm_kvrealloc(p, 0) is the same as uvm_kvfree(p), and returns ZERO_SIZE_PTR +void *__uvm_kvrealloc(void *p, size_t new_size, const char *file, int line, const char *function); + +#define uvm_kvrealloc(__p, __new_size) __uvm_kvrealloc(__p, __new_size, __FILE__, __LINE__, __FUNCTION__) + +// Returns the allocation size for a prior allocation from uvm_kvmalloc, +// uvm_kvmalloc_zero, or uvm_kvrealloc. This may be more than the size requested +// in those calls, in which case the extra memory is safe to use. +// +// p must not be NULL. +size_t uvm_kvsize(void *p); + +NV_STATUS uvm_test_kvmalloc(UVM_TEST_KVMALLOC_PARAMS *params, struct file *filp); + +#endif // __UVM_KVMALLOC_H__ diff --git a/kernel-open/nvidia-uvm/uvm_kvmalloc_test.c b/kernel-open/nvidia-uvm/uvm_kvmalloc_test.c new file mode 100644 index 000000000..e17f3b823 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_kvmalloc_test.c @@ -0,0 +1,184 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_kvmalloc.h" +#include "uvm_test.h" + +typedef enum +{ + ALLOC_TYPE_MALLOC, + ALLOC_TYPE_ZALLOC, + ALLOC_TYPE_REALLOC_NULL, + ALLOC_TYPE_REALLOC_ZERO, + ALLOC_TYPE_MAX +} alloc_type_t; + +static NV_STATUS check_alloc(void *p, size_t size) +{ + if (size == 0) { + TEST_CHECK_RET(p == ZERO_SIZE_PTR); + TEST_CHECK_RET(uvm_kvsize(p) == 0); + } + else if (size <= UVM_KMALLOC_THRESHOLD) { + TEST_CHECK_RET(!is_vmalloc_addr(p)); + + // In theory it's possible to use kmalloc yet have ksize(p) be larger + // than our arbitrary UVM_KMALLOC_THRESHOLD. In practice, as long as + // UVM_KMALLOC_THRESHOLD is a multiple of PAGE_SIZE, that's highly + // unlikely. + TEST_CHECK_RET(uvm_kvsize(p) == ksize(p)); + TEST_CHECK_RET(uvm_kvsize(p) >= size); + } + else { + TEST_CHECK_RET(is_vmalloc_addr(p)); + TEST_CHECK_RET(uvm_kvsize(p) == size); + } + + return NV_OK; +} + +static NV_STATUS test_uvm_kvmalloc(void) +{ + static const size_t sizes[] = {0, UVM_KMALLOC_THRESHOLD, UVM_KMALLOC_THRESHOLD + 1}; + uint8_t *p; + uint8_t expected; + size_t i, j, size; + alloc_type_t alloc_type; + + for (i = 0; i < ARRAY_SIZE(sizes); i++) { + size = sizes[i]; + for (alloc_type = 0; alloc_type < ALLOC_TYPE_MAX; alloc_type++) { + switch (alloc_type) { + case ALLOC_TYPE_MALLOC: + p = uvm_kvmalloc(size); + break; + case ALLOC_TYPE_ZALLOC: + p = uvm_kvmalloc_zero(size); + break; + case ALLOC_TYPE_REALLOC_NULL: + p = uvm_kvrealloc(NULL, size); + break; + case ALLOC_TYPE_REALLOC_ZERO: + p = uvm_kvrealloc(ZERO_SIZE_PTR, size); + break; + default: + UVM_ASSERT(0); + p = NULL; + } + if (!p) + return NV_ERR_NO_MEMORY; + + // On failure, this macro returns and thus leaks the allocation. But + // if the check fails, our allocation state is messed up so we can't + // reasonably free the allocation anyway. + MEM_NV_CHECK_RET(check_alloc(p, size), NV_OK); + + // Scribble on the allocation to make sure we don't crash + if (alloc_type == ALLOC_TYPE_ZALLOC) { + expected = 0; + } + else { + expected = (uint8_t)(current->pid + i); + memset(p, expected, size); + } + + for (j = 0; j < size; j++) { + if (p[j] != expected) { + UVM_TEST_PRINT("p[%zu] is 0x%x instead of expected value 0x%x\n", j, p[j], expected); + uvm_kvfree(p); + TEST_CHECK_RET(0); + } + } + + uvm_kvfree(p); + } + } + + return NV_OK; +} + +static NV_STATUS test_uvm_kvrealloc(void) +{ + size_t i, j, k, old_size, new_size; + uint8_t *old_p, *new_p; + uint8_t expected = (uint8_t)current->pid; + + static const size_t sizes[] = {0, + UVM_KMALLOC_THRESHOLD / 2, + UVM_KMALLOC_THRESHOLD, + UVM_KMALLOC_THRESHOLD + 1, + 2*UVM_KMALLOC_THRESHOLD}; + + // uvm_kvrealloc(NULL, size) and uvm_kvrealloc(ZERO_SIZE_PTR, size) are + // tested in test_uvm_alloc so we don't have to do them here. + + for (i = 0; i < ARRAY_SIZE(sizes); i++) { + old_size = sizes[i]; + for (j = 0; j < ARRAY_SIZE(sizes); j++) { + new_size = sizes[j]; + + old_p = uvm_kvmalloc(old_size); + if (!old_p) + return NV_ERR_NO_MEMORY; + MEM_NV_CHECK_RET(check_alloc(old_p, old_size), NV_OK); + + ++expected; + memset(old_p, expected, old_size); + + new_p = uvm_kvrealloc(old_p, new_size); + if (!new_p) { + uvm_kvfree(old_p); + return NV_ERR_NO_MEMORY; + } + + // At this point, either new_p == old_p or old_p should have been + // freed. In either case there's no need to free old_p. + + MEM_NV_CHECK_RET(check_alloc(new_p, new_size), NV_OK); + + // Make sure the data is still present + for (k = 0; k < min(new_size, old_size); k++) { + if (new_p[k] != expected) { + UVM_TEST_PRINT("new_p[%zu] is 0x%x instead of expected value 0x%x\n", k, new_p[k], expected); + uvm_kvfree(new_p); + TEST_CHECK_RET(0); + } + } + + // Exercise the free-via-realloc path + TEST_CHECK_RET(uvm_kvrealloc(new_p, 0) == ZERO_SIZE_PTR); + } + } + + return NV_OK; +} + +NV_STATUS uvm_test_kvmalloc(UVM_TEST_KVMALLOC_PARAMS *params, struct file *filp) +{ + NV_STATUS status = test_uvm_kvmalloc(); + if (status != NV_OK) + return status; + return test_uvm_kvrealloc(); +} diff --git a/kernel-open/nvidia-uvm/uvm_linux.c b/kernel-open/nvidia-uvm/uvm_linux.c new file mode 100644 index 000000000..5319ba4ea --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_linux.c @@ -0,0 +1,81 @@ +/******************************************************************************* + Copyright (c) 2013 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#if UVM_CGROUP_ACCOUNTING_SUPPORTED() +#include +#include +#endif + +// +// uvm_linux.c +// +// This file, along with conftest.h and umv_linux.h, helps to insulate +// the (out-of-tree) UVM driver from changes to the upstream Linux kernel. +// + +#if !defined(NV_ADDRESS_SPACE_INIT_ONCE_PRESENT) +void address_space_init_once(struct address_space *mapping) +{ + memset(mapping, 0, sizeof(*mapping)); + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); + +#if defined(NV_ADDRESS_SPACE_HAS_RWLOCK_TREE_LOCK) + // + // The .tree_lock member variable was changed from type rwlock_t, to + // spinlock_t, on 25 July 2008, by mainline commit + // 19fd6231279be3c3bdd02ed99f9b0eb195978064. + // + rwlock_init(&mapping->tree_lock); +#else + spin_lock_init(&mapping->tree_lock); +#endif + + spin_lock_init(&mapping->i_mmap_lock); + INIT_LIST_HEAD(&mapping->private_list); + spin_lock_init(&mapping->private_lock); + INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); + INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); +} +#endif + +#if UVM_CGROUP_ACCOUNTING_SUPPORTED() +void uvm_memcg_context_start(uvm_memcg_context_t *context, struct mm_struct *mm) +{ + memset(context, 0, sizeof(*context)); + if (!mm) + return; + + context->new_memcg = get_mem_cgroup_from_mm(mm); + context->old_memcg = set_active_memcg(context->new_memcg); +} + +void uvm_memcg_context_end(uvm_memcg_context_t *context) +{ + if (!context->new_memcg) + return; + + set_active_memcg(context->old_memcg); + mem_cgroup_put(context->new_memcg); +} +#endif diff --git a/kernel-open/nvidia-uvm/uvm_linux.h b/kernel-open/nvidia-uvm/uvm_linux.h new file mode 100644 index 000000000..693ed5513 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_linux.h @@ -0,0 +1,622 @@ +/******************************************************************************* + Copyright (c) 2013-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +// +// uvm_linux.h +// +// This file, along with conftest.h and umv_linux.c, helps to insulate +// the (out-of-tree) UVM driver from changes to the upstream Linux kernel. +// +// + +#ifndef _UVM_LINUX_H +#define _UVM_LINUX_H + +#include "nvtypes.h" + +#include "nv-time.h" + +#define NV_BUILD_MODULE_INSTANCES 0 +#include "nv-linux.h" + +#if defined(NV_LINUX_LOG2_H_PRESENT) +#include +#endif +#if defined(NV_PRIO_TREE_PRESENT) +#include +#endif + +#include +#include +#include + +#if defined(NV_ASM_BARRIER_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_ATOMIC_H_PRESENT) +#include +#endif + +#include + +#include /* get_random_bytes() */ +#include /* Linux kernel radix tree */ + +#include /* fget() */ + +#include + +#if defined(NV_LINUX_PRINTK_H_PRESENT) +#include +#endif + +#if defined(NV_LINUX_RATELIMIT_H_PRESENT) +#include +#endif + +#if defined(NV_PNV_NPU2_INIT_CONTEXT_PRESENT) +#include +#endif + +#if defined(NV_LINUX_SCHED_TASK_STACK_H_PRESENT) +#include +#endif + +#include +#include + +#include "nv-kthread-q.h" + + + #if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 && defined(NV_CPUMASK_OF_NODE_PRESENT) + #define UVM_THREAD_AFFINITY_SUPPORTED() 1 + #else + #define UVM_THREAD_AFFINITY_SUPPORTED() 0 + #endif + + + + +// The ARM arch lacks support for cpumask_of_node() until kernel 4.7. It was +// added via commit1a2db300348b ("arm64, numa: Add NUMA support for arm64 +// platforms.") Callers should either check UVM_THREAD_AFFINITY_SUPPORTED() +// prior to calling this function of be prepared to deal with a NULL CPU +// mask. +static inline const struct cpumask *uvm_cpumask_of_node(int node) +{ +#ifdef NV_CPUMASK_OF_NODE_PRESENT + return cpumask_of_node(node); +#else + return NULL; +#endif +} + + + #if defined(CONFIG_HMM_MIRROR) && defined(CONFIG_DEVICE_PRIVATE) && defined(NV_MAKE_DEVICE_EXCLUSIVE_RANGE_PRESENT) + #define UVM_IS_CONFIG_HMM() 1 + #else + #define UVM_IS_CONFIG_HMM() 0 + #endif + + + + +// Various issues prevent us from using mmu_notifiers in older kernels. These +// include: +// - ->release being called under RCU instead of SRCU: fixed by commit +// 21a92735f660eaecf69a6f2e777f18463760ec32, v3.7 (2012-10-08). +// - Race conditions between mmu_notifier_release and mmu_notifier_unregister: +// fixed by commit d34883d4e35c0a994e91dd847a82b4c9e0c31d83, v3.10 +// (2013-05-24). +// +// Unfortunately these issues aren't conftest-able, so instead we look for the +// presence of the invalidate_range callback in mmu_notifier_ops. This was added +// after all of the above issues were resolved, so we assume the fixes are +// present if we see the callback. +// +// The callback was added in commit 0f0a327fa12cd55de5e7f8c05a70ac3d047f405e, +// v3.19 (2014-11-13). + + #if defined(NV_MMU_NOTIFIER_OPS_HAS_INVALIDATE_RANGE) + #define UVM_CAN_USE_MMU_NOTIFIERS() 1 + #else + #define UVM_CAN_USE_MMU_NOTIFIERS() 0 + #endif + + + + + + + + + +// See bug 1707453 for further details about setting the minimum kernel version. +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) +# error This driver does not support kernels older than 2.6.32! +#endif + +#if !defined(VM_RESERVED) +#define VM_RESERVED 0x00000000 +#endif +#if !defined(VM_DONTEXPAND) +#define VM_DONTEXPAND 0x00000000 +#endif +#if !defined(VM_DONTDUMP) +#define VM_DONTDUMP 0x00000000 +#endif +#if !defined(VM_MIXEDMAP) +#define VM_MIXEDMAP 0x00000000 +#endif + +// +// printk.h already defined pr_fmt, so we have to redefine it so the pr_* +// routines pick up our version +// +#undef pr_fmt +#define NVIDIA_UVM_PRETTY_PRINTING_PREFIX "nvidia-uvm: " +#define pr_fmt(fmt) NVIDIA_UVM_PRETTY_PRINTING_PREFIX fmt + +// Dummy printing function that maintains syntax and format specifier checking +// but doesn't print anything and doesn't evaluate the print parameters. This is +// roughly equivalent to the kernel's no_printk function. We use this instead +// because: +// 1) no_printk was not available until 2.6.36 +// 2) Until 4.5 no_printk was implemented as a static function, meaning its +// parameters were always evaluated +#define UVM_NO_PRINT(fmt, ...) \ + do { \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ + } while (0) + +// printk_ratelimited was added in 2.6.33 via commit +// 8a64f336bc1d4aa203b138d29d5a9c414a9fbb47. If not available, we prefer not +// printing anything since it's supposed to be rate-limited. +#if !defined(printk_ratelimited) + #define printk_ratelimited UVM_NO_PRINT +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) + // Just too much compilation trouble with the rate-limiting printk feature + // until about k3.8. Because the non-rate-limited printing will cause + // surprises and problems, just turn it off entirely in this situation. + // + #undef pr_debug_ratelimited + #define pr_debug_ratelimited UVM_NO_PRINT +#endif + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) +#if !defined(pmd_large) +#define pmd_large(_pmd) \ + ((pmd_val(_pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) +#endif +#endif /* defined(NVCPU_X86) || defined(NVCPU_X86_64) */ + +#if !defined(GFP_DMA32) +/* + * GFP_DMA32 is similar to GFP_DMA, but instructs the Linux zone + * allocator to allocate memory from the first 4GB on platforms + * such as Linux/x86-64; the alternative is to use an IOMMU such + * as the one implemented with the K8 GART, if available. + */ +#define GFP_DMA32 0 +#endif + +#if !defined(__GFP_NOWARN) +#define __GFP_NOWARN 0 +#endif + +#if !defined(__GFP_NORETRY) +#define __GFP_NORETRY 0 +#endif + +#define NV_UVM_GFP_FLAGS (GFP_KERNEL | __GFP_NORETRY) + +#if !defined(NV_ADDRESS_SPACE_INIT_ONCE_PRESENT) + void address_space_init_once(struct address_space *mapping); +#endif + +// Develop builds define DEBUG but enable optimization +#if defined(DEBUG) && !defined(NVIDIA_UVM_DEVELOP) + // Wrappers for functions not building correctly without optimizations on, + // implemented in uvm_debug_optimized.c. Notably the file is only built for + // debug builds, not develop or release builds. + + // Unoptimized builds of atomic_xchg() hit a BUILD_BUG() on arm64 as it relies + // on __xchg being completely inlined: + // /usr/src/linux-3.12.19/arch/arm64/include/asm/cmpxchg.h:67:3: note: in expansion of macro 'BUILD_BUG' + // + // Powerppc hits a similar issue, but ends up with an undefined symbol: + // WARNING: "__xchg_called_with_bad_pointer" [...] undefined! + int nv_atomic_xchg(atomic_t *val, int new); + + // Same problem as atomic_xchg() on powerppc: + // WARNING: "__cmpxchg_called_with_bad_pointer" [...] undefined! + int nv_atomic_cmpxchg(atomic_t *val, int old, int new); + + // Same problem as atomic_xchg() on powerppc: + // WARNING: "__cmpxchg_called_with_bad_pointer" [...] undefined! + long nv_atomic_long_cmpxchg(atomic_long_t *val, long old, long new); + + // This Linux kernel commit: + // 2016-08-30 0d025d271e55f3de21f0aaaf54b42d20404d2b23 + // leads to build failures on x86_64, when compiling without optimization. Avoid + // that problem, by providing our own builds of copy_from_user / copy_to_user, + // for debug (non-optimized) UVM builds. Those are accessed via these + // nv_copy_to/from_user wrapper functions. + // + // Bug 1849583 has further details. + unsigned long nv_copy_from_user(void *to, const void __user *from, unsigned long n); + unsigned long nv_copy_to_user(void __user *to, const void *from, unsigned long n); + +#else + #define nv_atomic_xchg atomic_xchg + #define nv_atomic_cmpxchg atomic_cmpxchg + #define nv_atomic_long_cmpxchg atomic_long_cmpxchg + #define nv_copy_to_user copy_to_user + #define nv_copy_from_user copy_from_user +#endif + +#ifndef NV_ALIGN_DOWN +#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1)) +#endif + +#if defined(NVCPU_X86) +/* Some old IA32 kernels don't have 64/64 division routines, + * they only support 64/32 division with do_div(). */ +static inline uint64_t NV_DIV64(uint64_t dividend, uint64_t divisor, uint64_t *remainder) +{ + /* do_div() only accepts a 32-bit divisor */ + *remainder = do_div(dividend, (uint32_t)divisor); + + /* do_div() modifies the dividend in-place */ + return dividend; +} +#else +/* All other 32/64-bit kernels we support (including non-x86 kernels) support + * 64/64 division. */ +static inline uint64_t NV_DIV64(uint64_t dividend, uint64_t divisor, uint64_t *remainder) +{ + *remainder = dividend % divisor; + + return dividend / divisor; +} +#endif + +#if defined(CLOCK_MONOTONIC_RAW) +/* Return a nanosecond-precise value */ +static inline NvU64 NV_GETTIME(void) +{ + struct timespec64 tm; + + ktime_get_raw_ts64(&tm); + return (NvU64) timespec64_to_ns(&tm); +} +#else +/* We can only return a microsecond-precise value with the + * available non-GPL symbols. */ +static inline NvU64 NV_GETTIME(void) +{ + struct timespec64 tm; + + ktime_get_real_ts64(&tm); + return (NvU64) timespec64_to_ns(&tm); +} +#endif + +#if !defined(ilog2) + static inline int NV_ILOG2_U32(u32 n) + { + return fls(n) - 1; + } + static inline int NV_ILOG2_U64(u64 n) + { + return fls64(n) - 1; + } + #define ilog2(n) (sizeof(n) <= 4 ? NV_ILOG2_U32(n) : NV_ILOG2_U64(n)) +#endif + +// for_each_bit added in 2.6.24 via commit 3e037454bcfa4b187e8293d2121bd8c0f5a5c31c +// later renamed in 2.6.34 via commit 984b3f5746ed2cde3d184651dabf26980f2b66e5 +#if !defined(for_each_set_bit) + #define for_each_set_bit(bit, addr, size) for_each_bit((bit), (addr), (size)) +#endif + +// for_each_set_bit_cont was added in 3.2 via 1e2ad28f80b4e155678259238f51edebc19e4014 +// It was renamed to for_each_set_bit_from in 3.3 via 307b1cd7ecd7f3dc5ce3d3860957f034f0abe4df +#if !defined(for_each_set_bit_from) + #define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif + +// for_each_clear_bit and for_each_clear_bit_from were added in 3.10 via +// 03f4a8226c2f9c14361f75848d1e93139bab90c4 +#if !defined(for_each_clear_bit) + #define for_each_clear_bit(bit, addr, size) \ + for ((bit) = find_first_zero_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) +#endif + +#if !defined(for_each_clear_bit_from) + #define for_each_clear_bit_from(bit, addr, size) \ + for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) +#endif + +// bitmap_clear was added in 2.6.33 via commit c1a2a962a2ad103846e7950b4591471fabecece7 +#if !defined(NV_BITMAP_CLEAR_PRESENT) + static inline void bitmap_clear(unsigned long *map, unsigned int start, int len) + { + unsigned int index = start; + for_each_set_bit_from(index, map, start + len) + __clear_bit(index, map); + } + + static inline void bitmap_set(unsigned long *map, unsigned int start, int len) + { + unsigned int index = start; + for_each_clear_bit_from(index, map, start + len) + __set_bit(index, map); + } +#endif + +// Added in 2.6.24 +#ifndef ACCESS_ONCE + #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +// WRITE_ONCE/READ_ONCE have incompatible definitions across versions, which produces warnings. +// Therefore, we define our own macros +#define UVM_WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val)) +#define UVM_READ_ONCE(x) ACCESS_ONCE(x) + +// smp_mb__before_atomic was added in 3.16, provide a fallback +#ifndef smp_mb__before_atomic + #if NVCPU_IS_X86 || NVCPU_IS_X86_64 + // That's what the kernel does for x86 + #define smp_mb__before_atomic() barrier() + #else + // That's what the kernel does for at least arm32, arm64 and powerpc as of 4.3 + #define smp_mb__before_atomic() smp_mb() + #endif +#endif + +// smp_mb__after_atomic was added in 3.16, provide a fallback +#ifndef smp_mb__after_atomic + #if NVCPU_IS_X86 || NVCPU_IS_X86_64 + // That's what the kernel does for x86 + #define smp_mb__after_atomic() barrier() + #else + // That's what the kernel does for at least arm32, arm64 and powerpc as of 4.3 + #define smp_mb__after_atomic() smp_mb() + #endif +#endif + +// smp_load_acquire and smp_store_release were added in commit +// 47933ad41a86a4a9b50bed7c9b9bd2ba242aac63 ("arch: Introduce +// smp_load_acquire(), smp_store_release()") in v3.14 (2013-11-06). +#ifndef smp_load_acquire + #define smp_load_acquire(p) \ + ({ \ + typeof(*(p)) __v = UVM_READ_ONCE(*(p)); \ + smp_mb(); \ + __v; \ + }) +#endif + +#ifndef smp_store_release + #define smp_store_release(p, v) \ + do { \ + smp_mb(); \ + UVM_WRITE_ONCE(*(p), v); \ + } while (0) +#endif + +// atomic_read_acquire and atomic_set_release were added in commit +// 654672d4ba1a6001c365833be895f9477c4d5eab ("locking/atomics: +// Add _{acquire|release|relaxed}() variants of some atomic operations") in v4.3 +// (2015-08-06). +#ifndef atomic_read_acquire + #define atomic_read_acquire(p) smp_load_acquire(&(p)->counter) +#endif + +#ifndef atomic_set_release + #define atomic_set_release(p, v) smp_store_release(&(p)->counter, v) +#endif + + +// Added in 3.11 +#ifndef PAGE_ALIGNED + #define PAGE_ALIGNED(addr) (((addr) & (PAGE_SIZE - 1)) == 0) +#endif + +// Added in 2.6.37 via commit e1ca7788dec6773b1a2bce51b7141948f2b8bccf +#if !defined(NV_VZALLOC_PRESENT) + static inline void *vzalloc(unsigned long size) + { + void *p = vmalloc(size); + if (p) + memset(p, 0, size); + return p; + } +#endif + +// Changed in 3.17 via commit 743162013d40ca612b4cb53d3a200dff2d9ab26e +#if (NV_WAIT_ON_BIT_LOCK_ARGUMENT_COUNT == 3) + #define UVM_WAIT_ON_BIT_LOCK(word, bit, mode) \ + wait_on_bit_lock(word, bit, mode) +#elif (NV_WAIT_ON_BIT_LOCK_ARGUMENT_COUNT == 4) + static __sched int uvm_bit_wait(void *word) + { + if (signal_pending_state(current->state, current)) + return 1; + schedule(); + return 0; + } + #define UVM_WAIT_ON_BIT_LOCK(word, bit, mode) \ + wait_on_bit_lock(word, bit, uvm_bit_wait, mode) +#else +#error "Unknown number of arguments" +#endif + +static void uvm_init_radix_tree_preloadable(struct radix_tree_root *tree) +{ + // GFP_NOWAIT, or some combination of flags that avoids setting + // __GFP_DIRECT_RECLAIM (__GFP_WAIT prior to commit + // d0164adc89f6bb374d304ffcc375c6d2652fe67d from Nov 2015), is required for + // using radix_tree_preload() for the tree. + INIT_RADIX_TREE(tree, GFP_NOWAIT); +} + +#if !defined(NV_RADIX_TREE_EMPTY_PRESENT) +static bool radix_tree_empty(struct radix_tree_root *tree) +{ + void *dummy; + return radix_tree_gang_lookup(tree, &dummy, 0, 1) == 0; +} +#endif + +// The radix tree root parameter was added to radix_tree_replace_slot in 4.10. +// That same change moved radix_tree_replace_slot from a header-only +// implementation to a .c file, but the symbol wasn't exported until later so +// we cannot use the function on 4.10. UVM uses this macro to ensure that +// radix_tree_replace_slot is not called when using that kernel. +#ifndef NV_RADIX_TREE_REPLACE_SLOT_PRESENT + #define NV_RADIX_TREE_REPLACE_SLOT(...) \ + UVM_ASSERT_MSG(false, "radix_tree_replace_slot cannot be used in 4.10\n"); +#else +#if (NV_RADIX_TREE_REPLACE_SLOT_ARGUMENT_COUNT == 2) + #define NV_RADIX_TREE_REPLACE_SLOT(root, slot, entry) \ + radix_tree_replace_slot((slot), (entry)) +#elif (NV_RADIX_TREE_REPLACE_SLOT_ARGUMENT_COUNT == 3) + #define NV_RADIX_TREE_REPLACE_SLOT(root, slot, entry) \ + radix_tree_replace_slot((root), (slot), (entry)) +#else +#error "Unknown number of arguments" +#endif +#endif + +#if !defined(NV_USLEEP_RANGE_PRESENT) +static void __sched usleep_range(unsigned long min, unsigned long max) +{ + unsigned min_msec = min / 1000; + unsigned max_msec = max / 1000; + + if (min_msec != 0) + msleep(min_msec); + else if (max_msec != 0) + msleep(max_msec); + else + msleep(1); +} +#endif + +typedef struct +{ + struct mem_cgroup *new_memcg; + struct mem_cgroup *old_memcg; +} uvm_memcg_context_t; + + + // cgroup support requires set_active_memcg(). set_active_memcg() is an + // inline function that requires int_active_memcg per-cpu symbol when called + // from interrupt context. int_active_memcg is only exported by commit + // c74d40e8b5e2a on >= 5.14 kernels. + #if NV_IS_EXPORT_SYMBOL_PRESENT_int_active_memcg + #define UVM_CGROUP_ACCOUNTING_SUPPORTED() 1 + #define NV_UVM_GFP_FLAGS_ACCOUNT (NV_UVM_GFP_FLAGS | __GFP_ACCOUNT) + + // Begin a Cgroup accounting context. + // All sysmem page allocations done with NV_UVM_ACCOUNT_GFP_FLAGS will be + // charged to the mm's memory control group. + // + // If mm is NULL, the accounting context will not be switched. Please, note + // that in this case, any allocations which include NV_UVM_ACCOUNT_GFP_FLAGS + // will be charged to the currently active context. + // + // Locking: uvm_memcg_context_t does not maintain its own locking. Callers must + // ensure that concurrent calls do not operate on the same context. + void uvm_memcg_context_start(uvm_memcg_context_t *context, struct mm_struct *mm); + + // End the Cgroup accounting context started with uvm_mem_memcg_context_start(). + // After this call, the previously active memory control group will be restored. + // + // Locking: Callers must ensure that concurrent calls do not operate on the same + // context. + void uvm_memcg_context_end(uvm_memcg_context_t *context); + #else // !NV_IS_EXPORT_SYMBOL_PRESENT_int_active_memcg + #define UVM_CGROUP_ACCOUNTING_SUPPORTED() 0 + #define NV_UVM_GFP_FLAGS_ACCOUNT (NV_UVM_GFP_FLAGS) + + static inline void uvm_memcg_context_start(uvm_memcg_context_t *context, struct mm_struct *mm) + { + return; + } + + static inline void uvm_memcg_context_end(uvm_memcg_context_t *context) + { + return; + } + #endif // NV_IS_EXPORT_SYMBOL_PRESENT_int_active_memcg + + + + + + + + + + + + + + + + + + + + + + + + + +// Commit 1dff8083a024650c75a9c961c38082473ceae8cf (v4.7). +// +// Archs with CONFIG_MMU should have their own page.h, and can't include +// asm-generic/page.h. However, x86, powerpc, arm64 don't define page_to_virt() +// macro in their version of page.h. +#include +#ifndef page_to_virt + #include + #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) +#endif +#endif // _UVM_LINUX_H diff --git a/kernel-open/nvidia-uvm/uvm_linux_ioctl.h b/kernel-open/nvidia-uvm/uvm_linux_ioctl.h new file mode 100644 index 000000000..d3d5f070d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_linux_ioctl.h @@ -0,0 +1,42 @@ +/******************************************************************************* + Copyright (c) 2013 NVidia Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef _UVM_LINUX_IOCTL_H +#define _UVM_LINUX_IOCTL_H + +#include "uvm_ioctl.h" + +// This ioctl must be the first operation performed on the UVM file descriptor +// after opening it. Until this ioctl is made, the UVM file descriptor is +// inoperable: all other ioctls will return NV_ERR_ILLEGAL_ACTION and mmap will +// return EBADFD. +#define UVM_INITIALIZE 0x30000001 + +typedef struct +{ + NvU64 flags NV_ALIGN_BYTES(8); // IN + NV_STATUS rmStatus; // OUT +} UVM_INITIALIZE_PARAMS; + +#define UVM_DEINITIALIZE 0x30000002 + +#endif // _UVM_LINUX_IOCTL_H diff --git a/kernel-open/nvidia-uvm/uvm_lock.c b/kernel-open/nvidia-uvm/uvm_lock.c new file mode 100644 index 000000000..3b89da612 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_lock.c @@ -0,0 +1,380 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_lock.h" +#include "uvm_thread_context.h" +#include "uvm_kvmalloc.h" + +const char *uvm_lock_order_to_string(uvm_lock_order_t lock_order) +{ + + + + BUILD_BUG_ON(UVM_LOCK_ORDER_COUNT != 26); + + + switch (lock_order) { + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_INVALID); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_GLOBAL_PM); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_GLOBAL); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_ISR); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_MMAP_LOCK); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_VA_SPACES_LIST); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_VA_SPACE_SERIALIZE_WRITERS); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_VA_SPACE_READ_ACQUIRE_WRITE_RELEASE_LOCK); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_VA_SPACE); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_EXT_RANGE_TREE); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_GPU_SEMAPHORE_POOL); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_RM_API); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_RM_GPUS); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_VA_BLOCK); + + + + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CHUNK_MAPPING); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_PAGE_TREE); + + + + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_PUSH); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_PMM); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_PMM_PMA); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_PMM_ROOT_CHUNK); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_CHANNEL); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_TOOLS_VA_SPACE_LIST); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_VA_SPACE_EVENTS); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_VA_SPACE_TOOLS); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_SEMA_POOL_TRACKER); + UVM_ENUM_STRING_CASE(UVM_LOCK_ORDER_LEAF); + UVM_ENUM_STRING_DEFAULT(); + } +} + +bool __uvm_record_lock(void *lock, uvm_lock_order_t lock_order, uvm_lock_flags_t flags) +{ + bool correct = true; + uvm_lock_order_t conflicting_order; + uvm_thread_context_lock_t *uvm_context = uvm_thread_context_lock_get(); + uvm_lock_flags_t mode_flags = (flags & UVM_LOCK_FLAGS_MODE_MASK); + bool trylock = (flags & UVM_LOCK_FLAGS_TRYLOCK); + + UVM_ASSERT(mode_flags == UVM_LOCK_FLAGS_MODE_EXCLUSIVE || mode_flags == UVM_LOCK_FLAGS_MODE_SHARED); + + if (!uvm_context) { + UVM_ERR_PRINT("Failed to acquire the thread context when recording lock of %s\n", + uvm_lock_order_to_string(lock_order)); + return false; + } + + if (uvm_context->skip_lock_tracking > 0) + return true; + + if (lock_order == UVM_LOCK_ORDER_INVALID) { + UVM_ERR_PRINT("Acquiring a lock (0x%llx) with an invalid lock order\n", (NvU64)lock); + return false; + } + + // TODO: Bug 1799173: Hack in special rules for the RM locks so we don't add + // any new invalid uses while we figure out a better way to handle + // these dependencies. + if (lock_order == UVM_LOCK_ORDER_RM_GPUS) { + if (test_bit(UVM_LOCK_ORDER_MMAP_LOCK, uvm_context->acquired_lock_orders)) { + UVM_ERR_PRINT("Acquiring RM GPU lock with mmap_lock held\n"); + correct = false; + } + + if (test_bit(UVM_LOCK_ORDER_VA_SPACE, uvm_context->exclusive_acquired_lock_orders)) { + UVM_ERR_PRINT("Acquiring RM GPU lock with VA space lock held in write mode\n"); + correct = false; + } + else if (test_bit(UVM_LOCK_ORDER_VA_SPACE, uvm_context->acquired_lock_orders) && + !test_bit(UVM_LOCK_ORDER_VA_SPACE_SERIALIZE_WRITERS, uvm_context->acquired_lock_orders)) { + UVM_ERR_PRINT("Acquiring RM GPU lock with the VA space lock held in read mode, but without the VA space writer serialization lock held\n"); + correct = false; + } + } + + conflicting_order = find_next_bit(uvm_context->acquired_lock_orders, UVM_LOCK_ORDER_COUNT, lock_order); + if (conflicting_order != UVM_LOCK_ORDER_COUNT) { + if (trylock) { + // If the lock attempt is a trylock, i.e. non-blocking, then + // out-of-order lock acquisition is acceptable. Record it + // to enable __uvm_record_unlock() to skip enforcing in-order + // lock release for this lock order. + __set_bit(lock_order, uvm_context->out_of_order_acquired_lock_orders); + } else { + correct = false; + // Equivalent order is not necessarily incorrect. However, it is not yet supported, + // and is therefore treated as an error case. + UVM_ERR_PRINT("Already acquired equivalent or deeper lock %s when trying to acquire %s\n", + uvm_lock_order_to_string(conflicting_order), + uvm_lock_order_to_string(lock_order)); + } + } + + __set_bit(lock_order, uvm_context->acquired_lock_orders); + + if (mode_flags == UVM_LOCK_FLAGS_MODE_EXCLUSIVE) + __set_bit(lock_order, uvm_context->exclusive_acquired_lock_orders); + + uvm_context->acquired[lock_order] = lock; + + return correct; +} + +bool __uvm_record_unlock(void *lock, uvm_lock_order_t lock_order, uvm_lock_flags_t flags) +{ + bool correct = true; + uvm_thread_context_lock_t *uvm_context = uvm_thread_context_lock_get(); + uvm_lock_flags_t mode_flags = (flags & UVM_LOCK_FLAGS_MODE_MASK); + bool exclusive = (mode_flags == UVM_LOCK_FLAGS_MODE_EXCLUSIVE); + bool out_of_order = (flags & UVM_LOCK_FLAGS_OUT_OF_ORDER); + + UVM_ASSERT(mode_flags == UVM_LOCK_FLAGS_MODE_EXCLUSIVE || mode_flags == UVM_LOCK_FLAGS_MODE_SHARED); + + if (!uvm_context) { + UVM_ERR_PRINT("Failed to acquire the thread context when recording unlock of %s\n", + uvm_lock_order_to_string(lock_order)); + return false; + } + + if (uvm_context->skip_lock_tracking > 0) + return true; + + if (lock_order == UVM_LOCK_ORDER_INVALID) { + UVM_ERR_PRINT("Releasing a lock (0x%llx) with an invalid lock order\n", (NvU64)lock); + return false; + } + + // Releasing a lock out of order is not incorrect, but often points to + // issues. Consider it an error by default, unless the lock was + // legally acquired out-of-order via trylock, in which case out-of-order + // lock release is expected. But also give an option to opt out of + // enforcing in-order lock release, if needed. + if (!__test_and_clear_bit(lock_order, uvm_context->out_of_order_acquired_lock_orders) && !out_of_order) { + uvm_lock_order_t deeper_order = find_next_bit(uvm_context->acquired_lock_orders, + UVM_LOCK_ORDER_COUNT, lock_order + 1); + if (deeper_order != UVM_LOCK_ORDER_COUNT) { + correct = false; + UVM_ERR_PRINT("Releasing lock %s while still holding %s\n", + uvm_lock_order_to_string(lock_order), + uvm_lock_order_to_string(deeper_order)); + } + } + + if (!__test_and_clear_bit(lock_order, uvm_context->acquired_lock_orders)) { + correct = false; + UVM_ERR_PRINT("Releasing lock %s that's not held\n", uvm_lock_order_to_string(lock_order)); + } + else if (uvm_context->acquired[lock_order] != lock) { + correct = false; + UVM_ERR_PRINT("Releasing a different instance of lock %s than held, held 0x%llx releasing 0x%llx\n", + uvm_lock_order_to_string(lock_order), + (NvU64)uvm_context->acquired[lock_order], + (NvU64)lock); + } + else if (!!__test_and_clear_bit(lock_order, uvm_context->exclusive_acquired_lock_orders) != exclusive) { + correct = false; + UVM_ERR_PRINT("Releasing lock %s as %s while it was acquired as %s\n", + uvm_lock_order_to_string(lock_order), + exclusive ? "exclusive" : "shared", exclusive ? "shared" : "exclusive"); + } + uvm_context->acquired[lock_order] = NULL; + + return correct; +} + +bool __uvm_record_downgrade(void *lock, uvm_lock_order_t lock_order) +{ + uvm_thread_context_lock_t *uvm_context = uvm_thread_context_lock_get(); + + if (!uvm_context) { + UVM_ERR_PRINT("Failed to acquire the thread context when recording downgrade of %s\n", + uvm_lock_order_to_string(lock_order)); + return false; + } + + if (uvm_context->skip_lock_tracking > 0) + return true; + + if (!__uvm_check_locked(lock, lock_order, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)) { + UVM_ERR_PRINT("Lock %s is not held in exclusive mode: downgrading failed\n", + uvm_lock_order_to_string(lock_order)); + return false; + } + + clear_bit(lock_order, uvm_context->exclusive_acquired_lock_orders); + return true; +} + +bool __uvm_check_locked(void *lock, uvm_lock_order_t lock_order, uvm_lock_flags_t flags) +{ + uvm_thread_context_lock_t *uvm_context = uvm_thread_context_lock_get(); + uvm_lock_flags_t mode_flags = (flags & UVM_LOCK_FLAGS_MODE_MASK); + bool exclusive = (mode_flags == UVM_LOCK_FLAGS_MODE_EXCLUSIVE); + + if (!uvm_context) { + UVM_ERR_PRINT("Failed to acquire the thread context when checking that lock %s is locked\n", + uvm_lock_order_to_string(lock_order)); + return false; + } + + if (uvm_context->skip_lock_tracking > 0) + return true; + + if (!test_bit(lock_order, uvm_context->acquired_lock_orders)) { + UVM_ERR_PRINT("No lock with order %s acquired at all\n", uvm_lock_order_to_string(lock_order)); + return false; + } + if (uvm_context->acquired[lock_order] != lock) { + UVM_ERR_PRINT("Different instance of lock %s acquired, 0x%llx != 0x%llx\n", + uvm_lock_order_to_string(lock_order), + (NvU64)lock, + (NvU64)uvm_context->acquired[lock_order]); + return false; + } + + if (mode_flags != UVM_LOCK_FLAGS_MODE_ANY && + !!test_bit(lock_order, uvm_context->exclusive_acquired_lock_orders) != exclusive) { + UVM_ERR_PRINT("Lock %s acquired in %s mode instead of %s mode\n", + uvm_lock_order_to_string(lock_order), + exclusive ? "shared" : "exclusive", exclusive ? "exclusive" : "shared"); + return false; + } + + return true; +} + +bool __uvm_locking_initialized(void) +{ + return uvm_thread_context_global_initialized(); +} + +bool __uvm_check_lockable_order(uvm_lock_order_t lock_order, uvm_lock_flags_t flags) +{ + uvm_lock_order_t conflicting_order; + uvm_thread_context_lock_t *uvm_context = uvm_thread_context_lock_get(); + bool trylock = (flags & UVM_LOCK_FLAGS_TRYLOCK); + + if (!uvm_context) + return true; + + if (uvm_context->skip_lock_tracking > 0) + return true; + + if (lock_order == UVM_LOCK_ORDER_INVALID) { + UVM_ERR_PRINT("Checking for an invalid lock order\n"); + return false; + } + + if (!trylock) { + conflicting_order = find_next_bit(uvm_context->acquired_lock_orders, UVM_LOCK_ORDER_COUNT, lock_order); + if (conflicting_order != UVM_LOCK_ORDER_COUNT) { + UVM_ERR_PRINT("Acquired equivalent or deeper lock %s when checking that %s is lockable\n", + uvm_lock_order_to_string(conflicting_order), + uvm_lock_order_to_string(lock_order)); + return false; + } + } + + return true; +} + +bool __uvm_check_unlocked_order(uvm_lock_order_t lock_order) +{ + uvm_thread_context_lock_t *uvm_context = uvm_thread_context_lock_get(); + if (!uvm_context) + return true; + + if (uvm_context->skip_lock_tracking > 0) + return true; + + if (lock_order == UVM_LOCK_ORDER_INVALID) { + UVM_ERR_PRINT("Checking for an invalid lock order\n"); + return false; + } + + if (test_bit(lock_order, uvm_context->acquired_lock_orders)) { + UVM_ERR_PRINT("Lock order %s acquired\n", uvm_lock_order_to_string(lock_order)); + return false; + } + return true; +} + +bool __uvm_check_all_unlocked(uvm_thread_context_lock_t *uvm_context) +{ + uvm_lock_order_t lock_order; + NvU32 still_locked_count; + + if (!uvm_context) + return true; + + still_locked_count = bitmap_weight(uvm_context->acquired_lock_orders, UVM_LOCK_ORDER_COUNT); + if (still_locked_count == 0) + return true; + + UVM_ERR_PRINT("Still %u acquired lock(s):\n", still_locked_count); + + for_each_set_bit(lock_order, uvm_context->acquired_lock_orders, UVM_LOCK_ORDER_COUNT) { + UVM_ERR_PRINT(" Lock %s, instance 0x%llx\n", + uvm_lock_order_to_string(lock_order), + (NvU64)uvm_context->acquired[lock_order]); + } + + return false; +} + +bool __uvm_thread_check_all_unlocked() +{ + return __uvm_check_all_unlocked(uvm_thread_context_lock_get()); +} + +NV_STATUS uvm_bit_locks_init(uvm_bit_locks_t *bit_locks, size_t count, uvm_lock_order_t lock_order) +{ + // TODO: Bug 1772140: Notably bit locks currently do not work on memory + // allocated through vmalloc() (including big allocations created with + // uvm_kvmalloc()). The problem is the bit_waitqueue() helper used by the + // kernel internally that uses virt_to_page(). + // To prevent us from using kmalloc() for a huge allocation, warn if the + // allocation size gets bigger than what we are comfortable with for + // kmalloc() in uvm_kvmalloc(). + size_t size = sizeof(unsigned long) * BITS_TO_LONGS(count); + WARN_ON_ONCE(size > UVM_KMALLOC_THRESHOLD); + + bit_locks->bits = kzalloc(size, NV_UVM_GFP_FLAGS); + if (!bit_locks->bits) + return NV_ERR_NO_MEMORY; + +#if UVM_IS_DEBUG() + uvm_locking_assert_initialized(); + bit_locks->lock_order = lock_order; +#endif + + return NV_OK; +} + +void uvm_bit_locks_deinit(uvm_bit_locks_t *bit_locks) +{ + kfree(bit_locks->bits); + memset(bit_locks, 0, sizeof(*bit_locks)); +} diff --git a/kernel-open/nvidia-uvm/uvm_lock.h b/kernel-open/nvidia-uvm/uvm_lock.h new file mode 100644 index 000000000..b2b139230 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_lock.h @@ -0,0 +1,1169 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_LOCK_H__ +#define __UVM_LOCK_H__ + +#include "uvm_forward_decl.h" +#include "uvm_linux.h" +#include "uvm_common.h" + +// --------------------------- UVM Locking Order ---------------------------- // +// +// Any locks described here should have their locking order added to +// uvm_lock_order_t below. +// +// - Global power management lock (g_uvm_global.pm.lock) +// Order: UVM_LOCK_ORDER_GLOBAL_PM +// Reader/write lock (rw_semaphore) +// +// Synchronizes user threads with system power management. +// +// Taken in read mode by most user-facing UVM driver entry points. Taken +// in write mode by uvm_suspend(), only, and held for the duration of +// sleep cycles. +// +// This lock is special: while it's taken by user-facing entry points, +// and may be taken before or after mmap_lock, this apparent violation of +// lock ordering is permissible because pm_lock may only be taken via +// trylock in read mode by paths which already hold any lower-level +// locks, as well as by paths subject to the kernel's freezer. Paths +// taking it must be prepared to back off in case of acquisition failures. +// +// This, in turn, is acceptable because the lock is taken in write mode +// infrequently, and only as part of to power management. Starvation is +// not a concern. +// +// The mmap_lock deadlock potential aside, the trylock approch is also +// motivated by the need to prevent user threads making UVM system calls +// from blocking when UVM is suspended: when the kernel suspends the +// system, the freezer employed to stop user tasks requires these tasks +// to be interruptible. +// +// - Global driver state lock (g_uvm_global.global_lock) +// Order: UVM_LOCK_ORDER_GLOBAL +// Exclusive lock (mutex) +// +// This protects state associated with GPUs, such as the P2P table +// and instance pointer mappings. +// +// This should be taken whenever global GPU state might need to be modified. +// +// - GPU ISR lock +// Order: UVM_LOCK_ORDER_ISR +// Exclusive lock (mutex) per gpu +// +// Protects: +// - gpu->parent->isr.replayable_faults.service_lock: +// Changes to the state of a GPU as it transitions from top-half to bottom-half +// interrupt handler for replayable faults. This lock is acquired for that GPU, +// in the ISR top-half. Then a bottom-half is scheduled (to run in a workqueue). +// Then the bottom-half releases the lock when that GPU's processing appears to +// be done. +// - gpu->parent->isr.non_replayable_faults.service_lock: +// Changes to the state of a GPU in the bottom-half for non-replayable faults. +// Non-replayable faults are handed-off from RM instead of directly from the GPU +// hardware. This means that we do not keep receiving interrupts after RM pops +// out the faults from the HW buffer. In order not to miss fault notifications, +// we will always schedule a bottom-half for non-replayable faults if there are +// faults ready to be consumed in the buffer, even if there already is some +// bottom-half running or scheduled. This lock serializes all scheduled bottom +// halves per GPU which service non-replayable faults. +// - gpu->parent->isr.access_counters.service_lock: +// Changes to the state of a GPU as it transitions from top-half to bottom-half +// interrupt handler for access counter notifications. This lock is acquired for +// that GPU, in the ISR top-half. Then a bottom-half is scheduled (to run in a +// workqueue). Then the bottom-half releases the lock when that GPU's processing +// appears to be done. +// +// - mmap_lock (mmap_sem in kernels < 5.8) +// Order: UVM_LOCK_ORDER_MMAP_LOCK +// Reader/writer lock (rw_semaphore) +// +// We're often called with the kernel already holding mmap_lock: mmap, +// munmap, CPU fault, etc. These operations may have to take any number of +// UVM locks, so mmap_lock requires special consideration in the lock +// order, since it's sometimes out of our control. +// +// We need to hold mmap_lock when calling vm_insert_page, which means that +// any time an operation (such as an ioctl) might need to install a CPU +// mapping, it must take mmap_lock in read mode very early on. +// +// However, current->mm is not necessarily the owning mm of the UVM vma. +// fork or fd passing via a UNIX doman socket can cause that. Notably, this +// is also the case when handling GPU faults or doing other operations from +// a kernel thread. In some cases we have an mm associated with a VA space, +// and in those cases we lock that mm instead of current->mm. But since we +// don't always have that luxury, each path specifies the mm to use (either +// explicitly or via uvm_va_block_context_t::mm). That mm may be NULL. +// Later on down the stack we look up the UVM vma and compare its mm before +// operating on that vma. +// +// With HMM and ATS, the GPU fault handler takes mmap_lock. GPU faults may +// block forward progress of threads holding the RM GPUs lock until those +// faults are serviced, which means that mmap_lock cannot be held when the +// UVM driver calls into RM. In other words, mmap_lock and the RM GPUs lock +// are mutually exclusive. +// +// - Global VA spaces list lock +// Order: UVM_LOCK_ORDER_VA_SPACES_LIST +// Mutex which protects g_uvm_global.va_spaces state. +// +// - VA space writer serialization lock (va_space->serialize_writers_lock) +// Order: UVM_LOCK_ORDER_VA_SPACE_SERIALIZE_WRITERS +// Exclusive lock (mutex) per uvm_va_space (UVM struct file) +// +// This lock prevents a deadlock between RM and UVM by only allowing one +// writer to queue up on the VA space lock at a time. +// +// GPU faults are serviced by the UVM bottom half with the VA space lock +// held in read mode. Until they're serviced, these faults may block +// forward progress of RM threads. +// +// This constraint means that the UVM driver cannot call into RM while +// GPU fault servicing is blocked. We may block GPU fault servicing by: +// - Taking the VA space lock in write mode +// - Holding the VA space lock in read mode with a writer pending, since +// Linux rw_semaphores are fair. +// +// Example of the second condition: +// Thread A Thread B UVM BH Thread C +// UVM API call UVM API call GPU fault RM API call +// ------------ ------------ ------------ ------------ +// down_read +// down_write +// // Blocked on A +// down_read +// // Blocked on B +// RM GPU lock +// // Blocked on GPU fault +// RM GPU lock +// // Deadlock +// +// The writer serialization lock works around this by biasing the VA space +// lock towards readers, without causing starvation of writers. Writers and +// readers which will make RM calls take this lock, which prevents them +// from queueing up on the VA space rw_semaphore and blocking the UVM +// bottom half. +// +// TODO: Bug 1799173: A better long-term approach might be to never allow +// RM calls under the VA space lock at all, but that will take a +// larger restructuring. +// +// - VA space serialization of down_read with up_write of the VA space lock +// (va_space->read_acquire_write_release_lock) +// Order: UVM_LOCK_ORDER_VA_SPACE_READ_ACQUIRE_WRITE_RELEASE_LOCK +// Exclusive lock (mutex) per uvm_va_space (UVM struct file) +// +// This lock prevents a deadlock between RM and UVM by preventing any +// interleaving of down_reads on the VA space lock with concurrent +// up_writes/downgrade_writes. The Linux rw_semaphore implementation does +// not guarantee that two readers will always run concurrently, as shown by +// the following interleaving: +// +// Thread A Thread B +// UVM API call UVM BH +// ------------ ------------ +// down_write +// down_read +// // Fails, calls handler +// up_write +// down_read +// // Success +// // Handler sees the lock still active +// // Handler waits for lock to be released +// // Blocked on A +// RM GPU lock +// // Blocked on GPU fault +// +// Given the above interleaving, the kernel's implementation of the +// down_read failure handler running in thread B does not distinguish +// between a reader vs writer holding the lock. From the perspective of all +// other threads, even those which attempt to take the lock for read while +// thread A's reader holds it, a writer is active. Therefore no other +// readers can take the lock, and we result in the same deadlock described +// in the above comments on the VA space writer serialization lock. +// +// This lock prevents any such interleaving: +// - Writers take this lock for the duration of the write lock. +// +// - Readers which do not call into RM only take this lock across the +// down_read call. If a writer holds the lock, the reader would be +// blocked on the VA space lock anyway. Concurrent readers will serialize +// the taking of the VA space lock, but they will not be serialized +// across their read sections. +// +// - Readers which call into RM do not need to take this lock. Their +// down_read is already serialized with a writer's up_write by the +// serialize_writers_lock. +// +// - VA space lock (va_space->lock) +// Order: UVM_LOCK_ORDER_VA_SPACE +// Reader/writer lock (rw_semaphore) per uvm_va_space (UVM struct file) +// +// This is the UVM equivalent of mmap_lock. It protects all state under +// that va_space, such as the VA range tree. +// +// Read mode: Faults (CPU and GPU), mapping creation, prefetches. These +// will be serialized at the VA block level if necessary. RM calls are +// allowed only if the VA space serialize_writers_lock is also taken. +// +// Write mode: Modification of the range state such as mmap and changes to +// logical permissions or location preferences. RM calls are never allowed. +// +// - External Allocation Tree lock +// Order: UVM_LOCK_ORDER_EXT_RANGE_TREE +// Exclusive lock (mutex) per external VA range, per GPU. +// +// Protects the per-GPU sub-range tree mappings in each external VA range. +// +// - GPU semaphore pool lock (semaphore_pool->mutex) +// Order: UVM_LOCK_ORDER_GPU_SEMAPHORE_POOL +// Exclusive lock (mutex) per uvm_gpu_semaphore_pool +// +// Protects the state of the semaphore pool. +// +// - RM API lock +// Order: UVM_LOCK_ORDER_RM_API +// Exclusive lock +// +// This is an internal RM lock that's acquired by most if not all UVM-RM +// APIs. +// Notably this lock is also held on PMA eviction. +// +// - RM GPUs lock +// Order: UVM_LOCK_ORDER_RM_GPUS +// Exclusive lock +// +// This is an internal RM lock that's acquired by most if not all UVM-RM +// APIs and disables interrupts for the GPUs. +// Notably this lock is *not* held on PMA eviction. +// +// - VA block lock (va_block->lock) +// Order: UVM_LOCK_ORDER_VA_BLOCK +// Exclusive lock (mutex) +// +// Protects: +// - CPU and GPU page table mappings for all VAs under the block +// - Updates to the GPU work tracker for that block (migrations) +// +// Operations allowed while holding the lock: +// - CPU allocation (we don't evict CPU memory) +// - GPU memory allocation which cannot evict +// - CPU page table mapping/unmapping +// - Pushing work (GPU page table mapping/unmapping) +// +// Operations not allowed while holding the lock: +// - GPU memory allocation which can evict memory (would require nesting +// block locks) + + + + + + + + + + +// - Chunk mapping lock (gpu->root_chunk_mappings.bitlocks and +// gpu->sysmem_mappings.bitlock) +// Order: UVM_LOCK_ORDER_CHUNK_MAPPING +// Exclusive bitlock (mutex) per each root chunk, or physical sysmem +// segment. +// +// A chunk mapping lock is used to enforce serialization when updating +// kernel mappings of GPU root chunks (vidmem), or CPU chunks (sysmem). +// The VA block lock is usually held during the mapping operation. +// +// In the case of vidmem, each lock in the bitlock array serializes the +// mapping and unmapping of a single GPU root chunk. If serialization +// is required to update a root chunk, but no mappings are involved, use +// the PMM root chunk lock (order UVM_LOCK_ORDER_PMM_ROOT_CHUNK) instead. +// +// In the case of sysmem, each lock in the array serializes the mapping +// of a large segment of system address space: the locking granularity is +// significantly coarser than the CPU chunk size. +// +// - Page tree lock +// Order: UVM_LOCK_ORDER_PAGE_TREE +// Exclusive lock per GPU page tree +// +// This protects a page tree. All modifications to the device's page tree +// and the host-side cache of that tree must be done under this lock. +// The host-side cache and device state must be consistent when this lock +// is released +// +// Operations allowed while holding this lock +// - Pushing work +// +// Operations not allowed while holding this lock +// - GPU memory allocation which can evict +// + + + + + + + + + + + + + + +// - Concurrent push semaphore +// Order: UVM_LOCK_ORDER_PUSH +// Semaphore (uvm_semaphore_t) +// +// This is a semaphore limiting the amount of concurrent pushes that is +// held for the duration of a push (between uvm_push_begin*() and +// uvm_push_end()). +// +// - PMM GPU lock (pmm->lock) +// Order: UVM_LOCK_ORDER_PMM +// Exclusive lock (mutex) per uvm_pmm_gpu_t +// +// Protects the state of PMM - internal to PMM. +// +// - PMM GPU PMA lock (pmm->pma_lock) +// Order: UVM_LOCK_ORDER_PMM_PMA +// Reader/writer lock (rw_semaphore) per per uvm_pmm_gpu_t +// +// Lock internal to PMM for synchronizing allocations from PMA with +// PMA eviction. +// +// - PMM root chunk lock (pmm->root_chunks.bitlocks) +// Order: UVM_LOCK_ORDER_PMM_ROOT_CHUNK +// Exclusive bitlock (mutex) per each root chunk internal to PMM. +// +// - Channel lock +// Order: UVM_LOCK_ORDER_CHANNEL +// Spinlock (uvm_spinlock_t) +// +// - Tools global VA space list lock (g_tools_va_space_list_lock) +// Order: UVM_LOCK_ORDER_TOOLS_VA_SPACE_LIST +// Reader/writer lock (rw_sempahore) +// +// This lock protects the list of VA spaces used when broadcasting +// UVM profiling events. +// +// - VA space events +// Order: UVM_LOCK_ORDER_VA_SPACE_EVENTS +// Reader/writer lock (rw_semaphore) per uvm_perf_va_space_events_t. +// serializes perf callbacks with event register/unregister. It's separate +// from the VA space lock so it can be taken on the eviction path. +// +// - VA space tools +// Order: UVM_LOCK_ORDER_VA_SPACE_TOOLS +// Reader/writer lock (rw_semaphore) per uvm_va_space_t. Serializes tools +// reporting with tools register/unregister. Since some of the tools +// events come from perf events, both VA_SPACE_EVENTS and VA_SPACE_TOOLS +// must be taken to register/report some tools events. +// +// - Leaf locks +// Order: UVM_LOCK_ORDER_LEAF +// +// All leaf locks. +// +// -------------------------------------------------------------------------- // + +// Remember to add any new lock orders to uvm_lock_order_to_string() in +// uvm_lock.c +typedef enum +{ + UVM_LOCK_ORDER_INVALID = 0, + UVM_LOCK_ORDER_GLOBAL_PM, + UVM_LOCK_ORDER_GLOBAL, + UVM_LOCK_ORDER_ISR, + UVM_LOCK_ORDER_MMAP_LOCK, + UVM_LOCK_ORDER_VA_SPACES_LIST, + UVM_LOCK_ORDER_VA_SPACE_SERIALIZE_WRITERS, + UVM_LOCK_ORDER_VA_SPACE_READ_ACQUIRE_WRITE_RELEASE_LOCK, + UVM_LOCK_ORDER_VA_SPACE, + UVM_LOCK_ORDER_EXT_RANGE_TREE, + UVM_LOCK_ORDER_GPU_SEMAPHORE_POOL, + UVM_LOCK_ORDER_RM_API, + UVM_LOCK_ORDER_RM_GPUS, + UVM_LOCK_ORDER_VA_BLOCK, + + + + UVM_LOCK_ORDER_CHUNK_MAPPING, + UVM_LOCK_ORDER_PAGE_TREE, + + + + UVM_LOCK_ORDER_PUSH, + UVM_LOCK_ORDER_PMM, + UVM_LOCK_ORDER_PMM_PMA, + UVM_LOCK_ORDER_PMM_ROOT_CHUNK, + UVM_LOCK_ORDER_CHANNEL, + UVM_LOCK_ORDER_TOOLS_VA_SPACE_LIST, + UVM_LOCK_ORDER_VA_SPACE_EVENTS, + UVM_LOCK_ORDER_VA_SPACE_TOOLS, + UVM_LOCK_ORDER_SEMA_POOL_TRACKER, + UVM_LOCK_ORDER_LEAF, + UVM_LOCK_ORDER_COUNT, +} uvm_lock_order_t; + +const char *uvm_lock_order_to_string(uvm_lock_order_t lock_order); + +typedef enum +{ + UVM_LOCK_FLAGS_INVALID = 0, + UVM_LOCK_FLAGS_MODE_EXCLUSIVE = (1 << 0), + UVM_LOCK_FLAGS_MODE_SHARED = (1 << 1), + UVM_LOCK_FLAGS_MODE_ANY = (UVM_LOCK_FLAGS_MODE_EXCLUSIVE | UVM_LOCK_FLAGS_MODE_SHARED), + UVM_LOCK_FLAGS_MODE_MASK = (UVM_LOCK_FLAGS_MODE_EXCLUSIVE | UVM_LOCK_FLAGS_MODE_SHARED), + UVM_LOCK_FLAGS_OUT_OF_ORDER = (1 << 2), + UVM_LOCK_FLAGS_TRYLOCK = (1 << 3), + UVM_LOCK_FLAGS_MASK = (1 << 4) - 1 +} uvm_lock_flags_t; + +// Record locking a lock of given lock_order in exclusive or shared mode, +// distinguishing between trylock and normal acquisition attempts. +// Returns true if the recorded lock follows all the locking rules and false +// otherwise. +bool __uvm_record_lock(void *lock, uvm_lock_order_t lock_order, uvm_lock_flags_t flags); + +// Record unlocking a lock of given lock_order in exclusive or shared mode and +// possibly out of order. +// Returns true if the unlock follows all the locking rules and false otherwise. +bool __uvm_record_unlock(void *lock, uvm_lock_order_t lock_order, uvm_lock_flags_t flags); + +bool __uvm_record_downgrade(void *lock, uvm_lock_order_t lock_order); + +// Check whether a lock of given lock_order is held in exclusive, shared, or +// either mode by the current thread. +bool __uvm_check_locked(void *lock, uvm_lock_order_t lock_order, uvm_lock_flags_t flags); + +// Check that no locks are held with the given lock order +bool __uvm_check_unlocked_order(uvm_lock_order_t lock_order); + +// Check that a lock of the given order can be locked, i.e. that no locks are +// held with the given or deeper lock order. Allow for out-of-order locking +// when checking for a trylock. +bool __uvm_check_lockable_order(uvm_lock_order_t lock_order, uvm_lock_flags_t flags); + +// Check that all locks have been released in a thread context lock +bool __uvm_check_all_unlocked(uvm_thread_context_lock_t *context_lock); + +// Check that all locks have been released in the current thread context lock +bool __uvm_thread_check_all_unlocked(void); + +// Check that the locking infrastructure has been initialized +bool __uvm_locking_initialized(void); + +#if UVM_IS_DEBUG() + // These macros are intended to be expanded on the call site directly and will + // print the precise location of the violation while the __uvm_record* + // functions will error print the details. + #define uvm_record_lock_raw(lock, lock_order, flags) \ + UVM_ASSERT_MSG(__uvm_record_lock((lock), (lock_order), (flags)), "Locking violation\n") + #define uvm_record_unlock_raw(lock, lock_order, flags) \ + UVM_ASSERT_MSG(__uvm_record_unlock((lock), (lock_order), (flags)), "Locking violation\n") + #define uvm_record_downgrade_raw(lock, lock_order) \ + UVM_ASSERT_MSG(__uvm_record_downgrade((lock), (lock_order)), "Locking violation\n") + + // Record UVM lock (a lock that has a lock_order member) operation and assert + // that it's correct + #define uvm_record_lock(lock, flags) \ + uvm_record_lock_raw((lock), (lock)->lock_order, (flags)) + #define uvm_record_unlock(lock, flags) uvm_record_unlock_raw((lock), (lock)->lock_order, (flags)) + #define uvm_record_unlock_out_of_order(lock, flags) \ + uvm_record_unlock_raw((lock), (lock)->lock_order, (flags) | UVM_LOCK_FLAGS_OUT_OF_ORDER) + #define uvm_record_downgrade(lock) uvm_record_downgrade_raw((lock), (lock)->lock_order) + + // Check whether a UVM lock (a lock that has a lock_order member) is held in + // the given mode. + #define uvm_check_locked(lock, flags) __uvm_check_locked((lock), (lock)->lock_order, (flags)) + + // Helpers for recording and asserting mmap_lock + // (mmap_sem in kernels < 5.8 ) state + #define uvm_record_lock_mmap_lock_read(mm) \ + uvm_record_lock_raw(nv_mmap_get_lock(mm), UVM_LOCK_ORDER_MMAP_LOCK, UVM_LOCK_FLAGS_MODE_SHARED) + + #define uvm_record_unlock_mmap_lock_read(mm) \ + uvm_record_unlock_raw(nv_mmap_get_lock(mm), UVM_LOCK_ORDER_MMAP_LOCK, UVM_LOCK_FLAGS_MODE_SHARED) + + #define uvm_record_unlock_mmap_lock_read_out_of_order(mm) \ + uvm_record_unlock_raw(nv_mmap_get_lock(mm), UVM_LOCK_ORDER_MMAP_LOCK, \ + UVM_LOCK_FLAGS_MODE_SHARED | UVM_LOCK_FLAGS_OUT_OF_ORDER) + + #define uvm_record_lock_mmap_lock_write(mm) \ + uvm_record_lock_raw(nv_mmap_get_lock(mm), UVM_LOCK_ORDER_MMAP_LOCK, UVM_LOCK_FLAGS_MODE_EXCLUSIVE) + + #define uvm_record_unlock_mmap_lock_write(mm) \ + uvm_record_unlock_raw(nv_mmap_get_lock(mm), UVM_LOCK_ORDER_MMAP_LOCK, UVM_LOCK_FLAGS_MODE_EXCLUSIVE) + + #define uvm_record_unlock_mmap_lock_write_out_of_order(mm) \ + uvm_record_unlock_raw(nv_mmap_get_lock(mm), UVM_LOCK_ORDER_MMAP_LOCK, \ + UVM_LOCK_FLAGS_MODE_EXCLUSIVE | UVM_LOCK_FLAGS_OUT_OF_ORDER) + + #define uvm_check_locked_mmap_lock(mm, flags) \ + __uvm_check_locked(nv_mmap_get_lock(mm), UVM_LOCK_ORDER_MMAP_LOCK, (flags)) + + // Helpers for recording RM API lock usage around UVM-RM interfaces + #define uvm_record_lock_rm_api() \ + uvm_record_lock_raw((void*)UVM_LOCK_ORDER_RM_API, UVM_LOCK_ORDER_RM_API, \ + UVM_LOCK_FLAGS_MODE_EXCLUSIVE) + #define uvm_record_unlock_rm_api() \ + uvm_record_unlock_raw((void*)UVM_LOCK_ORDER_RM_API, UVM_LOCK_ORDER_RM_API, \ + UVM_LOCK_FLAGS_MODE_EXCLUSIVE) + + // Helpers for recording RM GPUS lock usage around UVM-RM interfaces + #define uvm_record_lock_rm_gpus() \ + uvm_record_lock_raw((void*)UVM_LOCK_ORDER_RM_GPUS, UVM_LOCK_ORDER_RM_GPUS, \ + UVM_LOCK_FLAGS_MODE_EXCLUSIVE) + #define uvm_record_unlock_rm_gpus() \ + uvm_record_unlock_raw((void*)UVM_LOCK_ORDER_RM_GPUS, UVM_LOCK_ORDER_RM_GPUS, \ + UVM_LOCK_FLAGS_MODE_EXCLUSIVE) + + // Helpers for recording both RM locks usage around UVM-RM interfaces + #define uvm_record_lock_rm_all() ({ uvm_record_lock_rm_api(); uvm_record_lock_rm_gpus(); }) + #define uvm_record_unlock_rm_all() ({ uvm_record_unlock_rm_gpus(); uvm_record_unlock_rm_api(); }) + +#else + #define uvm_record_lock UVM_IGNORE_EXPR2 + #define uvm_record_unlock UVM_IGNORE_EXPR2 + #define uvm_record_unlock_out_of_order UVM_IGNORE_EXPR2 + #define uvm_record_downgrade UVM_IGNORE_EXPR + + static bool uvm_check_locked(void *lock, uvm_lock_flags_t flags) + { + return false; + } + + #define uvm_record_lock_mmap_lock_read UVM_IGNORE_EXPR + #define uvm_record_unlock_mmap_lock_read UVM_IGNORE_EXPR + #define uvm_record_unlock_mmap_lock_read_out_of_order UVM_IGNORE_EXPR + #define uvm_record_lock_mmap_lock_write UVM_IGNORE_EXPR + #define uvm_record_unlock_mmap_lock_write UVM_IGNORE_EXPR + #define uvm_record_unlock_mmap_lock_write_out_of_order UVM_IGNORE_EXPR + + #define uvm_check_locked_mmap_lock uvm_check_locked + + #define uvm_record_lock_rm_api() + #define uvm_record_unlock_rm_api() + + #define uvm_record_lock_rm_gpus() + #define uvm_record_unlock_rm_gpus() + + #define uvm_record_lock_rm_all() + #define uvm_record_unlock_rm_all() +#endif + +#define uvm_locking_assert_initialized() UVM_ASSERT(__uvm_locking_initialized()) +#define uvm_thread_assert_all_unlocked() UVM_ASSERT(__uvm_thread_check_all_unlocked()) +#define uvm_assert_lockable_order(order) UVM_ASSERT(__uvm_check_lockable_order(order, UVM_LOCK_FLAGS_MODE_ANY)) +#define uvm_assert_unlocked_order(order) UVM_ASSERT(__uvm_check_unlocked_order(order)) + +// Helpers for locking mmap_lock (mmap_sem in kernels < 5.8) +// and recording its usage +#define uvm_assert_mmap_lock_locked_mode(mm, flags) ({ \ + typeof(mm) _mm = (mm); \ + UVM_ASSERT(nv_mm_rwsem_is_locked(_mm) && uvm_check_locked_mmap_lock((_mm), (flags))); \ + }) + +#define uvm_assert_mmap_lock_locked(mm) \ + uvm_assert_mmap_lock_locked_mode((mm), UVM_LOCK_FLAGS_MODE_ANY) +#define uvm_assert_mmap_lock_locked_read(mm) \ + uvm_assert_mmap_lock_locked_mode((mm), UVM_LOCK_FLAGS_MODE_SHARED) +#define uvm_assert_mmap_lock_locked_write(mm) \ + uvm_assert_mmap_lock_locked_mode((mm), UVM_LOCK_FLAGS_MODE_EXCLUSIVE) + +#define uvm_down_read_mmap_lock(mm) ({ \ + typeof(mm) _mm = (mm); \ + uvm_record_lock_mmap_lock_read(_mm); \ + nv_mmap_read_lock(_mm); \ + }) + +#define uvm_up_read_mmap_lock(mm) ({ \ + typeof(mm) _mm = (mm); \ + nv_mmap_read_unlock(_mm); \ + uvm_record_unlock_mmap_lock_read(_mm); \ + }) + +#define uvm_up_read_mmap_lock_out_of_order(mm) ({ \ + typeof(mm) _mm = (mm); \ + nv_mmap_read_unlock(_mm); \ + uvm_record_unlock_mmap_lock_read_out_of_order(_mm); \ + }) + +#define uvm_down_write_mmap_lock(mm) ({ \ + typeof(mm) _mm = (mm); \ + uvm_record_lock_mmap_lock_write(_mm); \ + nv_mmap_write_lock(_mm); \ + }) + +#define uvm_up_write_mmap_lock(mm) ({ \ + typeof(mm) _mm = (mm); \ + nv_mmap_write_unlock(_mm); \ + uvm_record_unlock_mmap_lock_write(_mm); \ + }) + +// Helper for calling a UVM-RM interface function with lock recording +#define uvm_rm_locked_call(call) ({ \ + typeof(call) ret; \ + uvm_record_lock_rm_all(); \ + ret = call; \ + uvm_record_unlock_rm_all(); \ + ret; \ + }) + +// Helper for calling a UVM-RM interface function that returns void with lock recording +#define uvm_rm_locked_call_void(call) ({ \ + uvm_record_lock_rm_all(); \ + call; \ + uvm_record_unlock_rm_all(); \ + }) + +typedef struct +{ + struct rw_semaphore sem; +#if UVM_IS_DEBUG() + uvm_lock_order_t lock_order; +#endif +} uvm_rw_semaphore_t; + +// +// Note that this is a macro, not an inline or static function so the +// "uvm_sem" argument is subsituted as text. If this is invoked with +// uvm_assert_rwsem_locked_mode(_sem, flags) then we get code "_sem = _sem" +// and _sem is initialized to NULL. Avoid this by using a name unlikely to +// be the same as the string passed to "uvm_sem". +// See uvm_down_read() and uvm_up_read() below as examples. +// +#define uvm_assert_rwsem_locked_mode(uvm_sem, flags) ({ \ + typeof(uvm_sem) _sem_ = (uvm_sem); \ + UVM_ASSERT(rwsem_is_locked(&_sem_->sem) && uvm_check_locked(_sem_, (flags))); \ + }) + +#define uvm_assert_rwsem_locked(uvm_sem) \ + uvm_assert_rwsem_locked_mode(uvm_sem, UVM_LOCK_FLAGS_MODE_ANY) +#define uvm_assert_rwsem_locked_read(uvm_sem) \ + uvm_assert_rwsem_locked_mode(uvm_sem, UVM_LOCK_FLAGS_MODE_SHARED) +#define uvm_assert_rwsem_locked_write(uvm_sem) \ + uvm_assert_rwsem_locked_mode(uvm_sem, UVM_LOCK_FLAGS_MODE_EXCLUSIVE) + +#define uvm_assert_rwsem_unlocked(uvm_sem) UVM_ASSERT(!rwsem_is_locked(&(uvm_sem)->sem)) + +static void uvm_init_rwsem(uvm_rw_semaphore_t *uvm_sem, uvm_lock_order_t lock_order) +{ + init_rwsem(&uvm_sem->sem); +#if UVM_IS_DEBUG() + uvm_locking_assert_initialized(); + uvm_sem->lock_order = lock_order; +#endif + uvm_assert_rwsem_unlocked(uvm_sem); +} + +#define uvm_down_read(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + uvm_record_lock(_sem, UVM_LOCK_FLAGS_MODE_SHARED); \ + down_read(&_sem->sem); \ + uvm_assert_rwsem_locked_read(_sem); \ + }) + +#define uvm_up_read(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + uvm_assert_rwsem_locked_read(_sem); \ + up_read(&_sem->sem); \ + uvm_record_unlock(_sem, UVM_LOCK_FLAGS_MODE_SHARED); \ + }) + +// Unlock w/o any tracking. This should be extremely rare and *_no_tracking +// helpers will be added only as needed. +// +// TODO: Bug 2594854: +// TODO: Bug 2583279: Remove macro when bugs are fixed +#define uvm_up_read_no_tracking(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + up_read(&_sem->sem); \ + }) + +#define uvm_down_write(uvm_sem) ({ \ + typeof (uvm_sem) _sem = (uvm_sem); \ + uvm_record_lock(_sem, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + down_write(&_sem->sem); \ + uvm_assert_rwsem_locked_write(_sem); \ + }) + +// trylock for reading: returns 1 if successful, 0 if not. Out-of-order lock +// acquisition via this function is legal, i.e. the lock order checker will +// allow it. However, if an out-of-order lock acquisition attempt fails, it is +// the caller's responsibility to back off at least to the point where the +// next held lower-order lock is released. +#define uvm_down_read_trylock(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + int locked; \ + uvm_record_lock(_sem, UVM_LOCK_FLAGS_MODE_SHARED | UVM_LOCK_FLAGS_TRYLOCK); \ + locked = down_read_trylock(&_sem->sem); \ + if (locked == 0) \ + uvm_record_unlock(_sem, UVM_LOCK_FLAGS_MODE_SHARED); \ + else \ + uvm_assert_rwsem_locked_read(_sem); \ + locked; \ + }) + +// Lock w/o any tracking. This should be extremely rare and *_no_tracking +// helpers will be added only as needed. +// +// TODO: Bug 2594854: +// TODO: Bug 2583279: Remove macro when bugs are fixed +#define uvm_down_read_trylock_no_tracking(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + down_read_trylock(&_sem->sem); \ + }) + +// trylock for writing: returns 1 if successful, 0 if not. Out-of-order lock +// acquisition via this function is legal, i.e. the lock order checker will +// allow it. However, if an out-of-order lock acquisition attempt fails, it is +// the caller's responsibility to back off at least to the point where the +// next held lower-order lock is released. +#define uvm_down_write_trylock(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + int locked; \ + uvm_record_lock(_sem, UVM_LOCK_FLAGS_MODE_EXCLUSIVE | UVM_LOCK_FLAGS_TRYLOCK); \ + locked = down_write_trylock(&_sem->sem); \ + if (locked == 0) \ + uvm_record_unlock(_sem, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + else \ + uvm_assert_rwsem_locked_write(_sem); \ + locked; \ + }) + +#define uvm_up_write(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + uvm_assert_rwsem_locked_write(_sem); \ + up_write(&_sem->sem); \ + uvm_record_unlock(_sem, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + }) + +#define uvm_downgrade_write(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + uvm_assert_rwsem_locked_write(_sem); \ + downgrade_write(&_sem->sem); \ + uvm_record_downgrade(_sem); \ + }) + +typedef struct +{ + struct mutex m; +#if UVM_IS_DEBUG() + uvm_lock_order_t lock_order; +#endif +} uvm_mutex_t; + +// Note that this is a macro, not an inline or static function so the +// "uvm_macro" argument is subsituted as text. If this is invoked with +// uvm__mutex_is_locked(_mutex) then we get code "_mutex = _mutex" and _mutex is +// initialized to NULL. Avoid this by using a name unlikely to be the same as +// the string passed to "uvm_mutex". +// See uvm_mutex_lock() and uvm_mutex_unlock() below as examples. +// +#define uvm_mutex_is_locked(uvm_mutex) ({ \ + typeof(uvm_mutex) _mutex_ = (uvm_mutex); \ + (mutex_is_locked(&_mutex_->m) && uvm_check_locked(_mutex_, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); \ + }) + +#define uvm_assert_mutex_locked(uvm_mutex) UVM_ASSERT(uvm_mutex_is_locked(uvm_mutex)) +#define uvm_assert_mutex_unlocked(uvm_mutex) UVM_ASSERT(!mutex_is_locked(&(uvm_mutex)->m)) + +// +// Linux kernel mutexes cannot be used with interrupts disabled. Doing so +// can lead to deadlocks. +// To warn about mutex usages with interrupts disabled, the following +// macros and inline functions wrap around the raw kernel mutex operations +// in order to check if the interrupts have been disabled and assert if so. +// +// TODO: Bug 2690258: evaluate whether !irqs_disabled() && !in_interrupt() is +// enough. +// +#define uvm_assert_mutex_interrupts() ({ \ + UVM_ASSERT_MSG(!irqs_disabled() && !in_interrupt(), "Mutexes cannot be used with interrupts disabled"); \ + }) + +static void uvm_mutex_init(uvm_mutex_t *mutex, uvm_lock_order_t lock_order) +{ + mutex_init(&mutex->m); +#if UVM_IS_DEBUG() + uvm_locking_assert_initialized(); + mutex->lock_order = lock_order; +#endif + uvm_assert_mutex_unlocked(mutex); +} + +#define uvm_mutex_lock(mutex) ({ \ + typeof(mutex) _mutex = (mutex); \ + uvm_assert_mutex_interrupts(); \ + uvm_record_lock(_mutex, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + mutex_lock(&_mutex->m); \ + uvm_assert_mutex_locked(_mutex); \ + }) + +// Lock w/o any tracking. This should be extremely rare and *_no_tracking +// helpers will be added only as needed. +#define uvm_mutex_lock_no_tracking(mutex) ({ \ + uvm_assert_mutex_interrupts(); \ + mutex_lock(&(mutex)->m); \ + }) + +#define uvm_mutex_trylock(mutex) ({ \ + typeof(mutex) _mutex = (mutex); \ + int locked; \ + uvm_record_lock(_mutex, UVM_LOCK_FLAGS_MODE_EXCLUSIVE | UVM_LOCK_FLAGS_TRYLOCK); \ + locked = mutex_trylock(&_mutex->m); \ + if (locked == 0) \ + uvm_record_unlock(_mutex, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + else \ + uvm_assert_mutex_locked(_mutex); \ + locked; \ + }) + +#define uvm_mutex_unlock(mutex) ({ \ + typeof(mutex) _mutex = (mutex); \ + uvm_assert_mutex_interrupts(); \ + uvm_assert_mutex_locked(_mutex); \ + mutex_unlock(&_mutex->m); \ + uvm_record_unlock(_mutex, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + }) +#define uvm_mutex_unlock_out_of_order(mutex) ({ \ + typeof(mutex) _mutex = (mutex); \ + uvm_assert_mutex_interrupts(); \ + uvm_assert_mutex_locked(_mutex); \ + mutex_unlock(&_mutex->m); \ + uvm_record_unlock_out_of_order(_mutex, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + }) + +// Unlock w/o any tracking. This should be extremely rare and *_no_tracking +// helpers will be added only as needed. +#define uvm_mutex_unlock_no_tracking(mutex) ({ \ + uvm_assert_mutex_interrupts(); \ + mutex_unlock(&(mutex)->m); \ + }) + +typedef struct +{ + struct semaphore sem; +#if UVM_IS_DEBUG() + uvm_lock_order_t lock_order; +#endif +} uvm_semaphore_t; + +static void uvm_sema_init(uvm_semaphore_t *semaphore, int val, uvm_lock_order_t lock_order) +{ + sema_init(&semaphore->sem, val); +#if UVM_IS_DEBUG() + uvm_locking_assert_initialized(); + semaphore->lock_order = lock_order; +#endif +} + +#define uvm_sem_is_locked(uvm_sem) uvm_check_locked(uvm_sem, UVM_LOCK_FLAGS_MODE_SHARED) + +#define uvm_down(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + uvm_record_lock(_sem, UVM_LOCK_FLAGS_MODE_SHARED); \ + down(&_sem->sem); \ + }) + +#define uvm_up(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + UVM_ASSERT(uvm_sem_is_locked(_sem)); \ + up(&_sem->sem); \ + uvm_record_unlock(_sem, UVM_LOCK_FLAGS_MODE_SHARED); \ + }) +#define uvm_up_out_of_order(uvm_sem) ({ \ + typeof(uvm_sem) _sem = (uvm_sem); \ + UVM_ASSERT(uvm_sem_is_locked(_sem)); \ + up(&_sem->sem); \ + uvm_record_unlock_out_of_order(_sem, UVM_LOCK_FLAGS_MODE_SHARED); \ + }) + + +// A regular spinlock +// Locked/unlocked with uvm_spin_lock()/uvm_spin_unlock() +typedef struct +{ + spinlock_t lock; +#if UVM_IS_DEBUG() + uvm_lock_order_t lock_order; +#endif +} uvm_spinlock_t; + +// A separate spinlock type for spinlocks that need to disable interrupts. For +// guaranteed correctness and convenience embed the saved and restored irq state +// in the lock itself. +// Locked/unlocked with uvm_spin_lock_irqsave()/uvm_spin_unlock_irqrestore() +typedef struct +{ + spinlock_t lock; + unsigned long irq_flags; +#if UVM_IS_DEBUG() + uvm_lock_order_t lock_order; +#endif +} uvm_spinlock_irqsave_t; + +// Asserts that the spinlock is held. Notably the macros below support both +// types of spinlocks. +// Note that this is a macro, not an inline or static function so the +// "spinlock" argument is subsituted as text. If this is invoked with +// uvm_assert_spinlock_locked(_lock) then we get code "_lock = _lock" +// and _lock is initialized to NULL. Avoid this by using a name unlikely to +// be the same as the string passed to "spinlock". +// See uvm_spin_lock() and uvm_spin_unlock() below as examples. +// +#define uvm_assert_spinlock_locked(spinlock) ({ \ + typeof(spinlock) _lock_ = (spinlock); \ + UVM_ASSERT(spin_is_locked(&_lock_->lock) && uvm_check_locked(_lock_, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); \ + }) + +#define uvm_assert_spinlock_unlocked(spinlock) UVM_ASSERT(!spin_is_locked(&(spinlock)->lock)) + +static void uvm_spin_lock_init(uvm_spinlock_t *spinlock, uvm_lock_order_t lock_order) +{ + spin_lock_init(&spinlock->lock); +#if UVM_IS_DEBUG() + uvm_locking_assert_initialized(); + spinlock->lock_order = lock_order; +#endif + uvm_assert_spinlock_unlocked(spinlock); +} + +#define uvm_spin_lock(uvm_lock) ({ \ + typeof(uvm_lock) _lock = (uvm_lock); \ + uvm_record_lock(_lock, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + spin_lock(&_lock->lock); \ + uvm_assert_spinlock_locked(_lock); \ + }) + +#define uvm_spin_unlock(uvm_lock) ({ \ + typeof(uvm_lock) _lock = (uvm_lock); \ + uvm_assert_spinlock_locked(_lock); \ + spin_unlock(&_lock->lock); \ + uvm_record_unlock(_lock, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + }) + +static void uvm_spin_lock_irqsave_init(uvm_spinlock_irqsave_t *spinlock, uvm_lock_order_t lock_order) +{ + spin_lock_init(&spinlock->lock); +#if UVM_IS_DEBUG() + uvm_locking_assert_initialized(); + spinlock->lock_order = lock_order; +#endif + uvm_assert_spinlock_unlocked(spinlock); +} + +// Use a temp to not rely on flags being written after acquiring the lock. +#define uvm_spin_lock_irqsave(uvm_lock) ({ \ + typeof(uvm_lock) _lock = (uvm_lock); \ + unsigned long irq_flags; \ + uvm_record_lock(_lock, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + spin_lock_irqsave(&_lock->lock, irq_flags); \ + _lock->irq_flags = irq_flags; \ + uvm_assert_spinlock_locked(_lock); \ + }) + +// Use a temp to not rely on flags being read before releasing the lock. +#define uvm_spin_unlock_irqrestore(uvm_lock) ({ \ + typeof(uvm_lock) _lock = (uvm_lock); \ + unsigned long irq_flags = _lock->irq_flags; \ + uvm_assert_spinlock_locked(_lock); \ + spin_unlock_irqrestore(&_lock->lock, irq_flags); \ + uvm_record_unlock(_lock, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + }) + +// Wrapper for a reader-writer spinlock that disables and enables interrupts +typedef struct +{ + rwlock_t lock; + + // This flags variable is only used by writers, since concurrent readers may + // have different values. + unsigned long irq_flags; + +#if UVM_IS_DEBUG() + uvm_lock_order_t lock_order; + + // The kernel doesn't provide a function to tell if an rwlock_t is locked, + // so we create our own. + atomic_t lock_count; +#endif +} uvm_rwlock_irqsave_t; + +static bool uvm_rwlock_irqsave_is_locked(uvm_rwlock_irqsave_t *rwlock) +{ +#if UVM_IS_DEBUG() + return atomic_read(&rwlock->lock_count) > 0; +#else + return false; +#endif +} + +static void uvm_rwlock_irqsave_inc(uvm_rwlock_irqsave_t *rwlock) +{ +#if UVM_IS_DEBUG() + atomic_inc(&rwlock->lock_count); +#endif +} + +static void uvm_rwlock_irqsave_dec(uvm_rwlock_irqsave_t *rwlock) +{ +#if UVM_IS_DEBUG() + atomic_dec(&rwlock->lock_count); +#endif +} + +#define uvm_assert_rwlock_locked(uvm_rwlock) \ + UVM_ASSERT(uvm_rwlock_irqsave_is_locked(uvm_rwlock) && uvm_check_locked(uvm_rwlock, UVM_LOCK_FLAGS_MODE_ANY)) +#define uvm_assert_rwlock_locked_read(uvm_rwlock) \ + UVM_ASSERT(uvm_rwlock_irqsave_is_locked(uvm_rwlock) && uvm_check_locked(uvm_rwlock, UVM_LOCK_FLAGS_MODE_SHARED)) +#define uvm_assert_rwlock_locked_write(uvm_rwlock) \ + UVM_ASSERT(uvm_rwlock_irqsave_is_locked(uvm_rwlock) && uvm_check_locked(uvm_rwlock, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)) + +#if UVM_IS_DEBUG() + #define uvm_assert_rwlock_unlocked(uvm_rwlock) UVM_ASSERT(!uvm_rwlock_irqsave_is_locked(uvm_rwlock)) +#else + #define uvm_assert_rwlock_unlocked(uvm_rwlock) +#endif + +static void uvm_rwlock_irqsave_init(uvm_rwlock_irqsave_t *rwlock, uvm_lock_order_t lock_order) +{ + rwlock_init(&rwlock->lock); +#if UVM_IS_DEBUG() + uvm_locking_assert_initialized(); + rwlock->lock_order = lock_order; + atomic_set(&rwlock->lock_count, 0); +#endif + uvm_assert_rwlock_unlocked(rwlock); +} + +// We can't store the irq_flags within the lock itself for readers, so they must +// pass in their flags. +#define uvm_read_lock_irqsave(uvm_rwlock, irq_flags) ({ \ + typeof(uvm_rwlock) _lock = (uvm_rwlock); \ + uvm_record_lock(_lock, UVM_LOCK_FLAGS_MODE_SHARED); \ + read_lock_irqsave(&_lock->lock, irq_flags); \ + uvm_rwlock_irqsave_inc(uvm_rwlock); \ + uvm_assert_rwlock_locked_read(_lock); \ + }) + +#define uvm_read_unlock_irqrestore(uvm_rwlock, irq_flags) ({ \ + typeof(uvm_rwlock) _lock = (uvm_rwlock); \ + uvm_assert_rwlock_locked_read(_lock); \ + uvm_rwlock_irqsave_dec(uvm_rwlock); \ + read_unlock_irqrestore(&_lock->lock, irq_flags); \ + uvm_record_unlock(_lock, UVM_LOCK_FLAGS_MODE_SHARED); \ + }) + +// Use a temp to not rely on flags being written after acquiring the lock. +#define uvm_write_lock_irqsave(uvm_rwlock) ({ \ + typeof(uvm_rwlock) _lock = (uvm_rwlock); \ + unsigned long irq_flags; \ + uvm_record_lock(_lock, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + write_lock_irqsave(&_lock->lock, irq_flags); \ + uvm_rwlock_irqsave_inc(uvm_rwlock); \ + _lock->irq_flags = irq_flags; \ + uvm_assert_rwlock_locked_write(_lock); \ + }) + +// Use a temp to not rely on flags being written after acquiring the lock. +#define uvm_write_unlock_irqrestore(uvm_rwlock) ({ \ + typeof(uvm_rwlock) _lock = (uvm_rwlock); \ + unsigned long irq_flags = _lock->irq_flags; \ + uvm_assert_rwlock_locked_write(_lock); \ + uvm_rwlock_irqsave_dec(uvm_rwlock); \ + write_unlock_irqrestore(&_lock->lock, irq_flags); \ + uvm_record_unlock(_lock, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + }) + +// Bit locks are 'compressed' mutexes which take only 1 bit per lock by virtue +// of using shared waitqueues. +typedef struct +{ + unsigned long *bits; + +#if UVM_IS_DEBUG() + uvm_lock_order_t lock_order; +#endif +} uvm_bit_locks_t; + +NV_STATUS uvm_bit_locks_init(uvm_bit_locks_t *bit_locks, size_t count, uvm_lock_order_t lock_order); +void uvm_bit_locks_deinit(uvm_bit_locks_t *bit_locks); + +// Asserts that the bit lock is held. +// +// TODO: Bug 1766601: +// - assert for the right ownership (defining the owner might be tricky in +// the kernel). +#define uvm_assert_bit_locked(bit_locks, bit) ({ \ + typeof(bit_locks) _bit_locks = (bit_locks); \ + typeof(bit) _bit = (bit); \ + UVM_ASSERT(test_bit(_bit, _bit_locks->bits)); \ + UVM_ASSERT(uvm_check_locked(_bit_locks, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); \ +}) + +#define uvm_assert_bit_unlocked(bit_locks, bit) ({ \ + typeof(bit_locks) _bit_locks = (bit_locks); \ + typeof(bit) _bit = (bit); \ + UVM_ASSERT(!test_bit(_bit, _bit_locks->bits)); \ +}) + +static void __uvm_bit_lock(uvm_bit_locks_t *bit_locks, unsigned long bit) +{ + int res; + + res = UVM_WAIT_ON_BIT_LOCK(bit_locks->bits, bit, TASK_UNINTERRUPTIBLE); + UVM_ASSERT_MSG(res == 0, "Uninterruptible task interrupted: %d\n", res); + uvm_assert_bit_locked(bit_locks, bit); +} +#define uvm_bit_lock(bit_locks, bit) ({ \ + typeof(bit_locks) _bit_locks = (bit_locks); \ + typeof(bit) _bit = (bit); \ + uvm_record_lock(_bit_locks, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ + __uvm_bit_lock(_bit_locks, _bit); \ +}) + +static void __uvm_bit_unlock(uvm_bit_locks_t *bit_locks, unsigned long bit) +{ + uvm_assert_bit_locked(bit_locks, bit); + + clear_bit_unlock(bit, bit_locks->bits); + // Make sure we don't reorder release with wakeup as it would cause + // deadlocks (other thread checking lock and adding itself to queue + // in reversed order). clear_bit_unlock has only release semantics. + smp_mb__after_atomic(); + wake_up_bit(bit_locks->bits, bit); +} +#define uvm_bit_unlock(bit_locks, bit) ({ \ + typeof(bit_locks) _bit_locks = (bit_locks); \ + typeof(bit) _bit = (bit); \ + __uvm_bit_unlock(_bit_locks, _bit); \ + uvm_record_unlock(_bit_locks, UVM_LOCK_FLAGS_MODE_EXCLUSIVE); \ +}) + +#endif // __UVM_LOCK_H__ diff --git a/kernel-open/nvidia-uvm/uvm_lock_test.c b/kernel-open/nvidia-uvm/uvm_lock_test.c new file mode 100644 index 000000000..2f7e84343 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_lock_test.c @@ -0,0 +1,460 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_test.h" +#include "uvm_lock.h" +#include "uvm_global.h" +#include "uvm_thread_context.h" + +#define UVM_LOCK_ORDER_FIRST (UVM_LOCK_ORDER_INVALID + 1) +#define UVM_LOCK_ORDER_SECOND (UVM_LOCK_ORDER_INVALID + 2) + +static bool fake_lock(uvm_lock_order_t lock_order, uvm_lock_flags_t flags) +{ + // Just use the lock_order as the void * handle for the lock + return __uvm_record_lock((void*)(long)lock_order, lock_order, flags); +} + +static bool fake_unlock_common(uvm_lock_order_t lock_order, uvm_lock_flags_t flags) +{ + // Just use the lock_order as the void * handle for the lock + return __uvm_record_unlock((void*)(long)lock_order, lock_order, flags); +} + +static bool fake_unlock(uvm_lock_order_t lock_order, uvm_lock_flags_t flags) +{ + return fake_unlock_common(lock_order, flags); +} + +static bool fake_unlock_out_of_order(uvm_lock_order_t lock_order, uvm_lock_flags_t flags) +{ + return fake_unlock_common(lock_order, flags | UVM_LOCK_FLAGS_OUT_OF_ORDER); +} + +static bool fake_downgrade(uvm_lock_order_t lock_order) +{ + // Just use the lock_order as the void * handle for the lock + return __uvm_record_downgrade((void*)(long)lock_order, lock_order); +} + +static bool fake_check_locked(uvm_lock_order_t lock_order, uvm_lock_flags_t flags) +{ + return __uvm_check_locked((void*)(long)lock_order, lock_order, flags); +} + +// TODO: Bug 1799173: The lock asserts verify that the RM GPU lock isn't taken +// with the VA space lock in exclusive mode, and that the RM GPU lock +// isn't taken with mmap_lock held in any mode. Hack around this in the +// test to enable the checks until we figure out something better. +static bool skip_lock(uvm_lock_order_t lock_order, uvm_lock_flags_t flags) +{ + uvm_lock_flags_t mode_flags = (flags & UVM_LOCK_FLAGS_MODE_MASK); + + if (lock_order == UVM_LOCK_ORDER_RM_GPUS) + return mode_flags == UVM_LOCK_FLAGS_MODE_EXCLUSIVE; + + return lock_order == UVM_LOCK_ORDER_MMAP_LOCK; +} + +static NV_STATUS test_all_locks_from(uvm_lock_order_t from_lock_order) +{ + NvU32 exclusive; + uvm_lock_flags_t flags; + NvU32 out_of_order; + NvU32 lock_order; + + TEST_CHECK_RET(from_lock_order != UVM_LOCK_ORDER_INVALID); + + for (out_of_order = 0; out_of_order < 2; ++out_of_order) { + for (exclusive = 0; exclusive < 2; ++exclusive) { + flags = exclusive ? UVM_LOCK_FLAGS_MODE_EXCLUSIVE : UVM_LOCK_FLAGS_MODE_SHARED; + + if (out_of_order) + flags |= UVM_LOCK_FLAGS_OUT_OF_ORDER; + + for (lock_order = from_lock_order; lock_order < UVM_LOCK_ORDER_COUNT; ++lock_order) { + TEST_CHECK_RET(__uvm_check_unlocked_order(lock_order)); + TEST_CHECK_RET(__uvm_check_lockable_order(lock_order, flags)); + } + + for (lock_order = from_lock_order; lock_order < UVM_LOCK_ORDER_COUNT; ++lock_order) { + if (skip_lock(lock_order, flags)) + continue; + TEST_CHECK_RET(fake_lock(lock_order, flags)); + } + + if (!skip_lock(from_lock_order, flags)) { + TEST_CHECK_RET(!__uvm_check_unlocked_order(from_lock_order)); + TEST_CHECK_RET(!__uvm_check_lockable_order(from_lock_order, flags)); + } + + for (lock_order = from_lock_order; lock_order < UVM_LOCK_ORDER_COUNT; ++lock_order) { + if (skip_lock(lock_order, flags)) + continue; + TEST_CHECK_RET(fake_check_locked(lock_order, flags)); + } + + for (lock_order = from_lock_order; lock_order < UVM_LOCK_ORDER_COUNT; ++lock_order) { + if (skip_lock(lock_order, flags)) + continue; + TEST_CHECK_RET(fake_check_locked(lock_order, UVM_LOCK_FLAGS_MODE_ANY)); + } + + if (out_of_order == 0) { + for (lock_order = UVM_LOCK_ORDER_COUNT - 1; lock_order != from_lock_order - 1; --lock_order) { + if (skip_lock(lock_order, flags)) + continue; + TEST_CHECK_RET(fake_unlock(lock_order, flags)); + } + } + else { + for (lock_order = from_lock_order; lock_order < UVM_LOCK_ORDER_COUNT; ++lock_order) { + if (skip_lock(lock_order, flags)) + continue; + TEST_CHECK_RET(fake_unlock_out_of_order(lock_order, flags)); + } + } + + for (lock_order = from_lock_order; lock_order < UVM_LOCK_ORDER_COUNT; ++lock_order) { + if (skip_lock(lock_order, flags)) + continue; + TEST_CHECK_RET(__uvm_check_unlocked_order(lock_order)); + TEST_CHECK_RET(__uvm_check_lockable_order(lock_order, flags)); + } + } + } + + return NV_OK; +} + +static NV_STATUS test_all_locks(void) +{ + TEST_CHECK_RET(test_all_locks_from(UVM_LOCK_ORDER_FIRST) == NV_OK); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_locking_first_as_shared_then_test_higher_order_locks(void) +{ + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_SHARED)); + TEST_CHECK_RET(test_all_locks_from(UVM_LOCK_ORDER_FIRST + 1) == NV_OK); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_SHARED)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_locking_second_as_exclusive_then_test_higher_order_locks(void) +{ + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(test_all_locks_from(UVM_LOCK_ORDER_SECOND + 1) == NV_OK); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_unlocking_without_locking(void) +{ + // Unlocking a lock w/o locking any lock at all + TEST_CHECK_RET(!fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_unlocking_different_lock_order_than_locked(void) +{ + // Unlocking a different lock than locked + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(!__uvm_thread_check_all_unlocked()); + + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_unlocking_different_lock_instance_than_locked(void) +{ + // Unlocking a different instance of a lock than locked + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!__uvm_record_unlock(NULL, UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_unlocking_with_different_mode_than_locked(void) +{ + // Unlocking with different mode + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_unlock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_SHARED)); + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_SHARED)); + TEST_CHECK_RET(!fake_unlock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_unlocking_in_different_order_than_locked(void) +{ + // Unlocking in different order than locked + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_unlock_out_of_order(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + // Unlocking in different order than locked (not necessarily incorrect, but + // commonly pointing to issues) + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_unlock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_locking_out_of_order(void) +{ + // Locking in wrong order + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_locking_same_order_twice(void) +{ + // Locking the same order twice (lock tracking doesn't support this case although + // it's not necessarily incorrect) + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_checking_locked_when_no_locks_held(void) +{ + // Nothing locked + TEST_CHECK_RET(!fake_check_locked(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_SHARED)); + TEST_CHECK_RET(!fake_check_locked(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_check_locked(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_ANY)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_checking_exclusive_when_locked_as_shared(void) +{ + // Expecting exclusive while locked as shared + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_SHARED)); + TEST_CHECK_RET(!fake_check_locked(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_SHARED)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_checking_shared_when_locked_as_exclusive(void) +{ + // Expecting shared while locked as exclusive + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_check_locked(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_SHARED)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_checking_locked_when_different_instance_held(void) +{ + // Wrong instance of a lock held + TEST_CHECK_RET(__uvm_record_lock(NULL, UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_check_locked(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(__uvm_record_unlock(NULL, UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_checking_all_unlocked_when_lock_held(void) +{ + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_SHARED)); + TEST_CHECK_RET(!__uvm_thread_check_all_unlocked()); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_SHARED)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_downgrading(void) +{ + // Lock downgrade + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_check_locked(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_check_locked(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_ANY)); + + TEST_CHECK_RET(fake_downgrade(UVM_LOCK_ORDER_FIRST)); + TEST_CHECK_RET(fake_check_locked(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_SHARED)); + TEST_CHECK_RET(fake_check_locked(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_ANY)); + + // Can't downgrade twice + TEST_CHECK_RET(!fake_downgrade(UVM_LOCK_ORDER_FIRST)); + TEST_CHECK_RET(fake_check_locked(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_ANY)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_SHARED)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_downgrading_without_locking(void) +{ + // Downgrading a lock w/o locking any lock at all + TEST_CHECK_RET(!fake_downgrade(UVM_LOCK_ORDER_FIRST)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_downgrading_when_different_instance_held(void) +{ + // Wrong instance of lock to downgrade + TEST_CHECK_RET(__uvm_record_lock(NULL, UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_downgrade(UVM_LOCK_ORDER_FIRST)); + TEST_CHECK_RET(__uvm_record_unlock(NULL, UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_downgrading_when_locked_as_shared(void) +{ + // Downgrading a lock that was acquired as shared + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_SHARED)); + TEST_CHECK_RET(!fake_downgrade(UVM_LOCK_ORDER_FIRST)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_SHARED)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS test_try_locking_out_of_order(void) +{ + // Try-locking in wrong order + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE | UVM_LOCK_FLAGS_TRYLOCK)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE | UVM_LOCK_FLAGS_TRYLOCK)); + TEST_CHECK_RET(fake_lock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(!fake_unlock(UVM_LOCK_ORDER_FIRST, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + TEST_CHECK_RET(fake_unlock(UVM_LOCK_ORDER_SECOND, UVM_LOCK_FLAGS_MODE_EXCLUSIVE)); + + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + return NV_OK; +} + +static NV_STATUS run_all_lock_tests(void) +{ + // The test needs all locks to be released initially + TEST_CHECK_RET(__uvm_thread_check_all_unlocked()); + + TEST_CHECK_RET(test_all_locks() == NV_OK); + TEST_CHECK_RET(test_locking_first_as_shared_then_test_higher_order_locks() == NV_OK); + TEST_CHECK_RET(test_locking_second_as_exclusive_then_test_higher_order_locks() == NV_OK); + TEST_CHECK_RET(test_unlocking_without_locking() == NV_OK); + TEST_CHECK_RET(test_unlocking_different_lock_order_than_locked() == NV_OK); + TEST_CHECK_RET(test_unlocking_different_lock_instance_than_locked() == NV_OK); + TEST_CHECK_RET(test_unlocking_with_different_mode_than_locked() == NV_OK); + TEST_CHECK_RET(test_unlocking_in_different_order_than_locked() == NV_OK); + TEST_CHECK_RET(test_locking_out_of_order() == NV_OK); + TEST_CHECK_RET(test_locking_same_order_twice() == NV_OK); + TEST_CHECK_RET(test_checking_locked_when_no_locks_held() == NV_OK); + TEST_CHECK_RET(test_checking_exclusive_when_locked_as_shared() == NV_OK); + TEST_CHECK_RET(test_checking_shared_when_locked_as_exclusive() == NV_OK); + TEST_CHECK_RET(test_checking_locked_when_different_instance_held() == NV_OK); + TEST_CHECK_RET(test_checking_all_unlocked_when_lock_held() == NV_OK); + TEST_CHECK_RET(test_downgrading() == NV_OK); + TEST_CHECK_RET(test_downgrading_without_locking() == NV_OK); + TEST_CHECK_RET(test_downgrading_when_different_instance_held() == NV_OK); + TEST_CHECK_RET(test_downgrading_when_locked_as_shared() == NV_OK); + TEST_CHECK_RET(test_try_locking_out_of_order() == NV_OK); + + return NV_OK; +} + +NV_STATUS uvm_test_lock_sanity(UVM_TEST_LOCK_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_thread_context_wrapper_t thread_context_wrapper_backup; + + // The global PM lock is acquired by the top-level UVM ioctl() entry point + // and still held here, which confuses the (pre-existing) test logic that + // assumes everything is unlocked at the beginning. Clearing the thread + // context data resolves the issue, but the original state needs to be saved + // and restored before exiting the test to avoid problems in the top-level + // code. + uvm_thread_context_save(&thread_context_wrapper_backup.context); + + status = run_all_lock_tests(); + + uvm_thread_context_restore(&thread_context_wrapper_backup.context); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_map_external.c b/kernel-open/nvidia-uvm/uvm_map_external.c new file mode 100644 index 000000000..3b3a09c37 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_map_external.c @@ -0,0 +1,1396 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_forward_decl.h" +#include "uvm_lock.h" +#include "uvm_mmu.h" +#include "uvm_api.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_push.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" +#include "uvm_tracker.h" +#include "uvm_hal.h" +#include "uvm_hal_types.h" +#include "uvm_map_external.h" +#include "uvm_pte_batch.h" +#include "uvm_tlb_batch.h" +#include "nv_uvm_interface.h" + +#include "uvm_pushbuffer.h" + +// Assume almost all of the push space can be used for PTEs leaving 1K of margin. +#define MAX_COPY_SIZE_PER_PUSH ((size_t)(UVM_MAX_PUSH_SIZE - 1024)) + +typedef struct +{ + // The VA range the buffer is for + uvm_va_range_t *va_range; + + // The GPU that's mapping the VA range + uvm_gpu_t *gpu; + + // Mapping info used for querying PTEs from RM + UvmGpuExternalMappingInfo mapping_info; + + // Size of the buffer + size_t buffer_size; + + // Page size in bytes + NvU32 page_size; + + // Size of a single PTE in bytes + NvU32 pte_size; + + // Max PTE offset covered by the VA range. + // + // Notably the mapping might not start at offset 0 and max PTE offset can be + // larger than number of PTEs covering the VA range. + size_t max_pte_offset; + + // Number of PTEs currently in the buffer + size_t num_ptes; + + // PTE offset at which the currently buffered PTEs start. + size_t pte_offset; +} uvm_pte_buffer_t; + +// Max PTE buffer size is the size of the buffer used for querying PTEs from RM. +// It has to be big enough to amortize the cost of calling into RM, but small +// enough to fit in CPU caches as it's written and read multiple times on the +// CPU before it ends up in the pushbuffer. +// 96K seems to be a sweet spot at least on a Xeon W5580 system. This could use +// some benchmarking on more systems though. +#define MAX_PTE_BUFFER_SIZE ((size_t)96 * 1024) + +static NV_STATUS uvm_pte_buffer_init(uvm_va_range_t *va_range, + uvm_gpu_t *gpu, + const uvm_map_rm_params_t *map_rm_params, + NvU64 length, + NvU32 page_size, + uvm_pte_buffer_t *pte_buffer) +{ + uvm_gpu_va_space_t *gpu_va_space = uvm_gpu_va_space_get(va_range->va_space, gpu); + uvm_page_tree_t *tree = &gpu_va_space->page_tables; + size_t num_all_ptes; + + memset(pte_buffer, 0, sizeof(*pte_buffer)); + + pte_buffer->va_range = va_range; + pte_buffer->gpu = gpu; + pte_buffer->mapping_info.cachingType = map_rm_params->caching_type; + pte_buffer->mapping_info.mappingType = map_rm_params->mapping_type; + pte_buffer->mapping_info.formatType = map_rm_params->format_type; + pte_buffer->mapping_info.elementBits = map_rm_params->element_bits; + pte_buffer->mapping_info.compressionType = map_rm_params->compression_type; + pte_buffer->page_size = page_size; + pte_buffer->pte_size = uvm_mmu_pte_size(tree, page_size); + num_all_ptes = uvm_div_pow2_64(length, page_size); + pte_buffer->max_pte_offset = uvm_div_pow2_64(map_rm_params->map_offset, page_size) + num_all_ptes; + pte_buffer->buffer_size = min(MAX_PTE_BUFFER_SIZE, num_all_ptes * pte_buffer->pte_size); + + pte_buffer->mapping_info.pteBuffer = uvm_kvmalloc(pte_buffer->buffer_size); + if (!pte_buffer->mapping_info.pteBuffer) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +static void uvm_pte_buffer_deinit(uvm_pte_buffer_t *pte_buffer) +{ + uvm_kvfree(pte_buffer->mapping_info.pteBuffer); +} + +// Get the PTEs for mapping the [map_offset, map_offset + map_size) VA range. +static NV_STATUS uvm_pte_buffer_get(uvm_pte_buffer_t *pte_buffer, + NvHandle mem_handle, + NvU64 map_offset, + NvU64 map_size, + NvU64 **ptes_out) +{ + NV_STATUS status; + size_t pte_offset; + size_t num_ptes; + size_t ptes_left; + uvm_va_range_t *va_range = pte_buffer->va_range; + uvm_gpu_va_space_t *gpu_va_space = uvm_gpu_va_space_get(va_range->va_space, pte_buffer->gpu); + + UVM_ASSERT(IS_ALIGNED(map_offset, pte_buffer->page_size)); + UVM_ASSERT(IS_ALIGNED(map_size, pte_buffer->page_size)); + + pte_offset = uvm_div_pow2_64(map_offset, pte_buffer->page_size); + num_ptes = uvm_div_pow2_64(map_size, pte_buffer->page_size); + + UVM_ASSERT(num_ptes <= pte_buffer->buffer_size / pte_buffer->pte_size); + + // If the requested range is already fully cached, just calculate its + // offset within the buffer and return. + if (pte_buffer->pte_offset <= pte_offset && pte_buffer->pte_offset + pte_buffer->num_ptes >= pte_offset + num_ptes) { + pte_offset -= pte_buffer->pte_offset; + *ptes_out = (NvU64 *)((char *)pte_buffer->mapping_info.pteBuffer + pte_offset * pte_buffer->pte_size); + return NV_OK; + } + + // Otherwise get max possible PTEs from RM starting at the requested offset. + pte_buffer->pte_offset = pte_offset; + ptes_left = pte_buffer->max_pte_offset - pte_offset; + pte_buffer->num_ptes = min(pte_buffer->buffer_size / pte_buffer->pte_size, ptes_left); + + UVM_ASSERT_MSG(pte_buffer->num_ptes >= num_ptes, "buffer num ptes %zu < num ptes %zu\n", + pte_buffer->num_ptes, num_ptes); + + // TODO: Bug 1735291: RM can determine the buffer size from the map_size + // parameter. + pte_buffer->mapping_info.pteBufferSize = pte_buffer->num_ptes * pte_buffer->pte_size; + + if (va_range->type == UVM_VA_RANGE_TYPE_CHANNEL) { + status = uvm_rm_locked_call(nvUvmInterfaceGetChannelResourcePtes(gpu_va_space->duped_gpu_va_space, + va_range->channel.rm_descriptor, + map_offset, + pte_buffer->num_ptes * pte_buffer->page_size, + &pte_buffer->mapping_info)); + } + else { + status = uvm_rm_locked_call(nvUvmInterfaceGetExternalAllocPtes(gpu_va_space->duped_gpu_va_space, + mem_handle, + map_offset, + pte_buffer->num_ptes * pte_buffer->page_size, + &pte_buffer->mapping_info)); + } + + if (status != NV_OK) { + if (status != NV_ERR_NOT_READY) { + UVM_ERR_PRINT("Failed to get %s mappings for VA range [0x%llx, 0x%llx], offset 0x%llx, size 0x%llx: %s\n", + va_range->type == UVM_VA_RANGE_TYPE_CHANNEL ? "channel" : "external", + va_range->node.start, + va_range->node.end, + map_offset, + map_size, + nvstatusToString(status)); + } + return status; + } + + *ptes_out = pte_buffer->mapping_info.pteBuffer; + + return NV_OK; +} + +// Copies the input ptes buffer to the given physical address, with an optional +// TLB invalidate. The copy acquires the input tracker then updates it. +static NV_STATUS copy_ptes(uvm_page_tree_t *tree, + NvU64 page_size, + uvm_gpu_phys_address_t pte_addr, + NvU64 *ptes, + NvU32 num_ptes, + bool last_mapping, + uvm_range_tree_node_t *range_node, + uvm_tracker_t *tracker) +{ + uvm_push_t push; + NV_STATUS status; + NvU32 pte_size = uvm_mmu_pte_size(tree, page_size); + + UVM_ASSERT(((NvU64)pte_size) * num_ptes == pte_size * num_ptes); + UVM_ASSERT(pte_size * num_ptes <= MAX_COPY_SIZE_PER_PUSH); + + status = uvm_push_begin_acquire(tree->gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + tracker, + &push, + "Writing %u bytes of PTEs to {%s, 0x%llx}", + pte_size * num_ptes, + uvm_aperture_string(pte_addr.aperture), + pte_addr.address); + if (status != NV_OK) + return status; + + uvm_pte_batch_single_write_ptes(&push, pte_addr, ptes, pte_size, num_ptes); + + if (last_mapping) { + // Do a TLB invalidate if this is the last mapping in the VA range + // Membar: This is a permissions upgrade, so no post-invalidate membar + // is needed. + uvm_tlb_batch_single_invalidate(tree, + &push, + range_node->start, + uvm_range_tree_node_size(range_node), + page_size, + UVM_MEMBAR_NONE); + } + else { + // For pushes prior to the last one, the PTE batch write has + // already pushed a membar that's enough to order the PTE writes + // with the TLB invalidate in the last push and that's all + // that's needed. + // If a failure happens before the push for the last mapping, it is + // still ok as what will follow is more CE writes to unmap the PTEs and + // those will get ordered by the membar from the PTE batch. + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + } + + uvm_push_end(&push); + + // The push acquired the tracker so it's ok to just overwrite it with + // the entry tracking the push. + uvm_tracker_overwrite_with_push(tracker, &push); + + return NV_OK; +} + +// Map all of pt_range, which is contained with the va_range and begins at +// virtual address map_start. The PTE values are queried from RM and the pushed +// writes are added to the input tracker. +// +// If the mapped range ends on range_node->end, a TLB invalidate for upgrade is +// also issued. +static NV_STATUS map_rm_pt_range(uvm_page_tree_t *tree, + uvm_page_table_range_t *pt_range, + uvm_pte_buffer_t *pte_buffer, + uvm_range_tree_node_t *range_node, + NvHandle mem_handle, + NvU64 map_start, + NvU64 map_offset, + uvm_tracker_t *tracker) +{ + uvm_gpu_phys_address_t pte_addr; + NvU64 page_size = pt_range->page_size; + NvU32 pte_size = uvm_mmu_pte_size(tree, page_size); + NvU64 addr, end; + size_t max_ptes, ptes_left, num_ptes; + NvU64 map_size; + bool last_mapping; + NV_STATUS status = NV_OK; + + end = map_start + uvm_page_table_range_size(pt_range) - 1; + + UVM_ASSERT(map_start >= range_node->start); + UVM_ASSERT(end <= range_node->end); + UVM_ASSERT(page_size & tree->hal->page_sizes()); + UVM_ASSERT(IS_ALIGNED(map_start, page_size)); + UVM_ASSERT(IS_ALIGNED(map_offset, page_size)); + + pte_addr = uvm_page_table_range_entry_address(tree, pt_range, 0); + max_ptes = min((size_t)(uvm_mmu_pde_coverage(tree, page_size) / page_size), MAX_COPY_SIZE_PER_PUSH / pte_size); + max_ptes = min(max_ptes, pte_buffer->buffer_size / pte_size); + + addr = map_start; + ptes_left = (size_t)uvm_div_pow2_64(uvm_page_table_range_size(pt_range), page_size); + while (addr < end) { + NvU64 *pte_bits; + + num_ptes = min(max_ptes, ptes_left); + map_size = num_ptes * page_size; + UVM_ASSERT(addr + map_size <= end + 1); + + status = uvm_pte_buffer_get(pte_buffer, mem_handle, map_offset, map_size, &pte_bits); + if (status != NV_OK) + return status; + + last_mapping = (addr + map_size - 1 == range_node->end); + + // These copies are technically independent, except for the last one + // which issues the TLB invalidate and thus must wait for all others. + // However, since each copy will saturate the bus anyway we force them + // to serialize to avoid bus contention. + status = copy_ptes(tree, + page_size, + pte_addr, + pte_bits, + num_ptes, + last_mapping, + range_node, + tracker); + if (status != NV_OK) + return status; + + ptes_left -= num_ptes; + pte_addr.address += num_ptes * pte_size; + addr += map_size; + map_offset += map_size; + } + + return NV_OK; +} + +// Determine the appropriate membar for downgrades on a VA range with type +// UVM_VA_RANGE_TYPE_EXTERNAL or UVM_VA_RANGE_TYPE_CHANNEL. +static uvm_membar_t va_range_downgrade_membar(uvm_va_range_t *va_range, uvm_ext_gpu_map_t *ext_gpu_map) +{ + if (va_range->type == UVM_VA_RANGE_TYPE_CHANNEL) { + if (va_range->channel.aperture == UVM_APERTURE_VID) + return UVM_MEMBAR_GPU; + return UVM_MEMBAR_SYS; + } + + // If there is no mem_handle, this is a sparse mapping. + // UVM_MEMBAR_GPU is sufficient because the debug pages remain allocated + // until the GPU is torn down. GPU tear down implies that our context has + // been switched out. In turn, this implies a sysmembar. + if (!ext_gpu_map->mem_handle) + return UVM_MEMBAR_GPU; + + if (ext_gpu_map->is_sysmem || ext_gpu_map->gpu != ext_gpu_map->owning_gpu) + return UVM_MEMBAR_SYS; + return UVM_MEMBAR_GPU; +} + +NV_STATUS uvm_va_range_map_rm_allocation(uvm_va_range_t *va_range, + uvm_gpu_t *mapping_gpu, + const UvmGpuMemoryInfo *mem_info, + const uvm_map_rm_params_t *map_rm_params, + uvm_ext_gpu_map_t *ext_gpu_map, + uvm_tracker_t *out_tracker) +{ + uvm_gpu_va_space_t *gpu_va_space = uvm_gpu_va_space_get(va_range->va_space, mapping_gpu); + uvm_page_tree_t *page_tree; + uvm_pte_buffer_t pte_buffer; + uvm_page_table_range_vec_t *pt_range_vec; + uvm_page_table_range_t *pt_range; + uvm_range_tree_node_t *node; + NvU64 addr, size; + NvU64 map_offset = map_rm_params->map_offset; + size_t i; + NV_STATUS status; + uvm_tracker_t *tracker; + + // Track local pushes in a separate tracker, instead of adding them + // directly to the output tracker, to avoid false dependencies + // (serialization) on unrelated work. The local tracker is added to the + // output tracker before the function returns. + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + + // Local tracker is used when this function is called to map allocations + // other than external allocations. Otherwise, the external allocations + // use their own tracker. + if (ext_gpu_map) + tracker = &ext_gpu_map->tracker; + else + tracker = &local_tracker; + + UVM_ASSERT(gpu_va_space); + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_EXTERNAL || va_range->type == UVM_VA_RANGE_TYPE_CHANNEL); + UVM_ASSERT(IS_ALIGNED(mem_info->size, mem_info->pageSize)); + UVM_ASSERT(out_tracker); + + page_tree = &gpu_va_space->page_tables; + + // Verify that the GPU VA space supports this page size + if ((mem_info->pageSize & page_tree->hal->page_sizes()) == 0) + return NV_ERR_INVALID_ADDRESS; + + if (va_range->type == UVM_VA_RANGE_TYPE_EXTERNAL) { + // We should be never called with ext_gpu_map == NULL + // and UVM_VA_RANGE_TYPE_EXTERNAL + UVM_ASSERT(ext_gpu_map != NULL); + node = &ext_gpu_map->node; + pt_range_vec = &ext_gpu_map->pt_range_vec; + } + else { + node = &va_range->node; + pt_range_vec = &va_range->channel.pt_range_vec; + } + + if (!IS_ALIGNED(map_offset, mem_info->pageSize) || + map_offset + uvm_range_tree_node_size(node) > mem_info->size) + return NV_ERR_INVALID_OFFSET; + + // Consolidate input checks for API-level callers + if (!IS_ALIGNED(node->start, mem_info->pageSize) || !IS_ALIGNED(node->end + 1, mem_info->pageSize)) + return NV_ERR_INVALID_ADDRESS; + + status = uvm_pte_buffer_init(va_range, + mapping_gpu, + map_rm_params, + uvm_range_tree_node_size(node), + mem_info->pageSize, + &pte_buffer); + if (status != NV_OK) + return status; + + // Allocate all page tables for this VA range. + // + // TODO: Bug 1766649: Benchmark to see if we get any performance improvement + // from parallelizing page range allocation with writing PTEs for + // earlier ranges. + status = uvm_page_table_range_vec_init(page_tree, + node->start, + uvm_range_tree_node_size(node), + mem_info->pageSize, + UVM_PMM_ALLOC_FLAGS_EVICT, + pt_range_vec); + if (status != NV_OK) + goto out; + + addr = node->start; + for (i = 0; i < pt_range_vec->range_count; i++) { + pt_range = &pt_range_vec->ranges[i]; + + // External allocations track pushes in their own trackers. User channel + // mappings don't have their own trackers, so for those the local tracker + // is used. + status = map_rm_pt_range(page_tree, + pt_range, + &pte_buffer, + node, + ext_gpu_map ? ext_gpu_map->mem_handle->rm_handle : 0, + addr, + map_offset, + tracker); + if (status != NV_OK) + goto out; + + size = uvm_page_table_range_size(pt_range); + addr += size; + map_offset += size; + } + + status = uvm_tracker_add_tracker(out_tracker, tracker); + +out: + if (status != NV_OK) { + // We could have any number of mappings in flight to these page tables, + // so wait for everything before we clear and free them. + if (uvm_tracker_wait(tracker) != NV_OK) { + // System-fatal error. Just leak. + return status; + } + + if (pt_range_vec->ranges) { + uvm_page_table_range_vec_clear_ptes(pt_range_vec, va_range_downgrade_membar(va_range, ext_gpu_map)); + uvm_page_table_range_vec_deinit(pt_range_vec); + } + } + + uvm_pte_buffer_deinit(&pte_buffer); + uvm_tracker_deinit(&local_tracker); + return status; +} + +static bool uvm_api_mapping_type_invalid(UvmGpuMappingType map_type) +{ + BUILD_BUG_ON((int)UvmGpuMappingTypeDefault != (int)UvmRmGpuMappingTypeDefault); + BUILD_BUG_ON((int)UvmGpuMappingTypeReadWriteAtomic != (int)UvmRmGpuMappingTypeReadWriteAtomic); + BUILD_BUG_ON((int)UvmGpuMappingTypeReadWrite != (int)UvmRmGpuMappingTypeReadWrite); + BUILD_BUG_ON((int)UvmGpuMappingTypeReadOnly != (int)UvmRmGpuMappingTypeReadOnly); + BUILD_BUG_ON((int)UvmGpuMappingTypeCount != (int)UvmRmGpuMappingTypeCount); + + switch (map_type) { + case UvmGpuMappingTypeDefault: + case UvmGpuMappingTypeReadWriteAtomic: + case UvmGpuMappingTypeReadWrite: + case UvmGpuMappingTypeReadOnly: + return false; + default: + return true; + } +} + +static bool uvm_api_caching_type_invalid(UvmGpuCachingType cache_type) +{ + BUILD_BUG_ON((int)UvmGpuCachingTypeDefault != (int)UvmRmGpuCachingTypeDefault); + BUILD_BUG_ON((int)UvmGpuCachingTypeForceUncached != (int)UvmRmGpuCachingTypeForceUncached); + BUILD_BUG_ON((int)UvmGpuCachingTypeForceCached != (int)UvmRmGpuCachingTypeForceCached); + BUILD_BUG_ON((int)UvmGpuCachingTypeCount != (int)UvmRmGpuCachingTypeCount); + + switch (cache_type) { + case UvmGpuCachingTypeDefault: + case UvmGpuCachingTypeForceUncached: + case UvmGpuCachingTypeForceCached: + return false; + default: + return true; + } +} + +static bool uvm_api_kind_type_invalid(UvmGpuFormatType format_type, + UvmGpuFormatElementBits element_bits, + UvmGpuCompressionType compression_type) +{ + BUILD_BUG_ON((int)UvmGpuFormatTypeDefault != (int)UvmRmGpuFormatTypeDefault); + BUILD_BUG_ON((int)UvmGpuFormatTypeBlockLinear != (int)UvmRmGpuFormatTypeBlockLinear); + BUILD_BUG_ON((int)UvmGpuFormatTypeCount != (int)UvmRmGpuFormatTypeCount); + + BUILD_BUG_ON((int)UvmGpuFormatElementBitsDefault != (int)UvmRmGpuFormatElementBitsDefault); + BUILD_BUG_ON((int)UvmGpuFormatElementBits8 != (int)UvmRmGpuFormatElementBits8); + BUILD_BUG_ON((int)UvmGpuFormatElementBits16 != (int)UvmRmGpuFormatElementBits16); + BUILD_BUG_ON((int)UvmGpuFormatElementBits32 != (int)UvmRmGpuFormatElementBits32); + BUILD_BUG_ON((int)UvmGpuFormatElementBits64 != (int)UvmRmGpuFormatElementBits64); + BUILD_BUG_ON((int)UvmGpuFormatElementBits128 != (int)UvmRmGpuFormatElementBits128); + BUILD_BUG_ON((int)UvmGpuFormatElementBitsCount != (int)UvmRmGpuFormatElementBitsCount); + + BUILD_BUG_ON((int)UvmGpuCompressionTypeDefault != (int)UvmRmGpuCompressionTypeDefault); + BUILD_BUG_ON((int)UvmGpuCompressionTypeEnabledNoPlc != (int)UvmRmGpuCompressionTypeEnabledNoPlc); + BUILD_BUG_ON((int)UvmGpuCompressionTypeCount != (int)UvmRmGpuCompressionTypeCount); + + if (compression_type >= UvmGpuCompressionTypeCount) + return true; + + switch (format_type) { + case UvmGpuFormatTypeDefault: + case UvmGpuFormatTypeBlockLinear: + break; + default: + return true; + } + + switch (element_bits) { + case UvmGpuFormatElementBitsDefault: + case UvmGpuFormatElementBits8: + case UvmGpuFormatElementBits16: + // CUDA does not support 24-bit width + case UvmGpuFormatElementBits32: + case UvmGpuFormatElementBits64: + case UvmGpuFormatElementBits128: + break; + default: + return true; + } + + if (((format_type != UvmGpuFormatTypeDefault) && (element_bits == UvmGpuFormatElementBitsDefault)) || + ((element_bits != UvmGpuFormatElementBitsDefault) && (format_type == UvmGpuFormatTypeDefault))) + return true; + + return false; +} + +static void uvm_release_rm_handle(struct nv_kref *ref) +{ + uvm_ext_gpu_mem_handle *mem_handle = container_of(ref, uvm_ext_gpu_mem_handle, ref_count); + + if (mem_handle->rm_handle) { + NV_STATUS status; + + status = uvm_rm_locked_call(nvUvmInterfaceFreeDupedHandle(uvm_gpu_device_handle(mem_handle->gpu), + mem_handle->rm_handle)); + UVM_ASSERT(status == NV_OK); + } + uvm_kvfree(mem_handle); +} + +static NV_STATUS uvm_create_external_range(uvm_va_space_t *va_space, UVM_CREATE_EXTERNAL_RANGE_PARAMS *params) +{ + uvm_va_range_t *va_range = NULL; + struct mm_struct *mm; + NV_STATUS status = NV_OK; + + // Before we know the page size used by the allocation, we can only enforce + // 4K alignment as that's the minimum page size used for GPU allocations. + // Later uvm_map_external_allocation_on_gpu() will enforce alignment to the + // page size used by the allocation. + if (uvm_api_range_invalid_4k(params->base, params->length)) + return NV_ERR_INVALID_ADDRESS; + + // The mm needs to be locked in order to remove stale HMM va_blocks. + mm = uvm_va_space_mm_retain_lock(va_space); + uvm_va_space_down_write(va_space); + + // Create the new external VA range. + // uvm_va_range_create_external handles any collisions when it attempts to + // insert the new range into the va_space range tree. + status = uvm_va_range_create_external(va_space, mm, params->base, params->length, &va_range); + if (status != NV_OK) { + UVM_DBG_PRINT_RL("Failed to create external VA range [0x%llx, 0x%llx)\n", + params->base, + params->base + params->length); + } + + uvm_va_space_up_write(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + return status; +} + +NV_STATUS uvm_api_create_external_range(UVM_CREATE_EXTERNAL_RANGE_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + return uvm_create_external_range(va_space, params); +} + +static NV_STATUS set_ext_gpu_map_location(uvm_ext_gpu_map_t *ext_gpu_map, + uvm_va_space_t *va_space, + uvm_gpu_t *mapping_gpu, + const UvmGpuMemoryInfo *mem_info) +{ + uvm_gpu_t *owning_gpu; + + + + + + + + + + + + + + + // This is a local or peer allocation, so the owning GPU must have been + // registered. + owning_gpu = uvm_va_space_get_gpu_by_uuid(va_space, &mem_info->uuid); + if (!owning_gpu) + return NV_ERR_INVALID_DEVICE; + + // Even if the allocation is in sysmem then it still matters which GPU owns + // it, because our dup is not enough to keep the owning GPU around and that + // exposes a bug in RM where the memory can outlast the GPU and then cause + // crashes when it's eventually freed. + // TODO: Bug 1811006: Bug tracking the RM issue, its fix might change the + // semantics of sysmem allocations. + if (mem_info->sysmem) { + ext_gpu_map->owning_gpu = owning_gpu; + ext_gpu_map->is_sysmem = true; + return NV_OK; + } + + if (owning_gpu != mapping_gpu) { + // TODO: Bug 1757136: In SLI, the returned UUID may be different but a + // local mapping must be used. We need to query SLI groups to know + // that. + if (!uvm_va_space_peer_enabled(va_space, mapping_gpu, owning_gpu)) + return NV_ERR_INVALID_DEVICE; + } + + ext_gpu_map->owning_gpu = owning_gpu; + ext_gpu_map->is_sysmem = false; + return NV_OK; +} + +static uvm_ext_gpu_map_t *uvm_va_range_ext_gpu_map(uvm_va_range_t *va_range, uvm_gpu_t *mapping_gpu, NvU64 addr) +{ + uvm_ext_gpu_map_t *ext_gpu_map = NULL; + uvm_range_tree_node_t *node; + uvm_ext_gpu_range_tree_t *range_tree; + + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_EXTERNAL); + uvm_assert_rwsem_locked(&va_range->va_space->lock); + + range_tree = uvm_ext_gpu_range_tree(va_range, mapping_gpu); + + if (uvm_processor_mask_test(&va_range->external.mapped_gpus, mapping_gpu->id)) { + UVM_ASSERT(!uvm_range_tree_empty(&range_tree->tree)); + node = uvm_range_tree_find(&range_tree->tree, addr); + if (node) { + ext_gpu_map = uvm_ext_gpu_map_container(node); + UVM_ASSERT(ext_gpu_map->gpu == mapping_gpu); + } + } + else { + UVM_ASSERT(uvm_range_tree_empty(&range_tree->tree)); + } + + return ext_gpu_map; +} + +static NV_STATUS uvm_ext_gpu_map_split(uvm_range_tree_t *tree, + uvm_ext_gpu_map_t *existing_map, + NvU64 new_end, + uvm_ext_gpu_map_t **new_map) +{ + uvm_ext_gpu_map_t *new; + NV_STATUS status; + NvU64 new_start = new_end + 1; + + if (!IS_ALIGNED(new_start, existing_map->pt_range_vec.page_size)) + return NV_ERR_INVALID_ADDRESS; + + UVM_ASSERT(new_start >= existing_map->node.start && new_start < existing_map->node.end); + + new = uvm_kvmalloc_zero(sizeof(*new)); + if (!new) + return NV_ERR_NO_MEMORY; + + RB_CLEAR_NODE(&new->node.rb_node); + new->mem_handle = existing_map->mem_handle; + new->gpu = existing_map->gpu; + new->owning_gpu = existing_map->owning_gpu; + new->is_sysmem = existing_map->is_sysmem; + + // Initialize the new ext_gpu_map tracker as a copy of the existing_map tracker. + // This way, any operations on any of the two ext_gpu_maps will be able to + // wait for any uncompleted work prior to the split. + status = uvm_tracker_init_from(&new->tracker, &existing_map->tracker); + if (status != NV_OK) { + uvm_kvfree(new); + return status; + } + + status = uvm_page_table_range_vec_split_upper(&existing_map->pt_range_vec, new_start - 1, &new->pt_range_vec); + if (status != NV_OK) { + uvm_tracker_deinit(&new->tracker); + uvm_kvfree(new); + return status; + } + + new->node.start = new_start; + + // Sparse mappings don't have actual allocations. + if (new->mem_handle) + nv_kref_get(&new->mem_handle->ref_count); + + uvm_range_tree_split(tree, &existing_map->node, &new->node); + + if (new_map) + *new_map = new; + + return NV_OK; +} + +static NV_STATUS uvm_unmap_external_in_range(uvm_va_range_t *va_range, + uvm_gpu_t *gpu, + NvU64 start, + NvU64 end, + struct list_head *deferred_list) +{ + uvm_ext_gpu_range_tree_t *range_tree = uvm_ext_gpu_range_tree(va_range, gpu); + uvm_ext_gpu_map_t *ext_map, *ext_map_next = NULL; + NV_STATUS status = NV_OK; + + uvm_assert_mutex_locked(&range_tree->lock); + + // If a previously existing sub-range is found (ext_map != NULL), the + // new sub-range can be overlapping with the existing one in one of the + // following ways: + // + // 1. complete overlap (exact start and end boundary match is special + // cases of this): + // [---- existing ----] + // [---- new ----] + // 2. partial overlap at the start (end boundary match is a special case + // of this): + // [---- existing ----] + // [---- new ----] + // 3. partial overlap at the end (start boundary match is a special case + // of this): + // [---- existing ----] + // [---- new ----] + // 4. completely contained (start of new != start of existing and end of + // new != end of existing, otherwise see 1): + // [---- existing ----] + // [-- new --] + // + // The algorithm below is: + // 1. If the start of the new mapping is greater than the start of the + // existing mapping, split the existing mapping at start. The newly + // created uvm_ext_gpu_map_t will be inserted into the tree. Note that + // the newly created uvm_ext_gpu_map_t is the one that we want to visit + // next. When the loop visits the newly created uvm_ext_gpu_map_t and + // its boundaries are completely overlapped by the new mapping, it will + // cause the algorithm to destroy it. + // 2. If the end of the new mapping is less than the end of the existing + // mapping, split the existing mapping at end. The newly created + // uvm_ext_gpu_map_t will be inserted into the tree. The overlapping + // portion of the existing mapping will be destroyed. + // 3. If the existing mapping is completely overlapped by the new mapping, + // the existing mapping is destroyed. + // + // The loop cannot use any of the existing iterators because: + // 1. It needs to be able to destroy ext_gpu_map structures. This means it + // can't use non-safe iterators. + // 2. It needs to visit newly created uvm_ext_gpu_map_t, as a result of + // splits. This means it can't use safe iterators as they will skip the + // newly created uvm_ext_gpu_map_t. + ext_map = uvm_ext_gpu_map_iter_first(va_range, gpu, start, end); + while (ext_map) { + if (start > ext_map->node.start) { + status = uvm_ext_gpu_map_split(&range_tree->tree, ext_map, start - 1, &ext_map_next); + if (status != NV_OK) + break; + } + else { + if (end < ext_map->node.end) { + status = uvm_ext_gpu_map_split(&range_tree->tree, ext_map, end, NULL); + if (status != NV_OK) + break; + ext_map_next = NULL; + } + else { + ext_map_next = uvm_ext_gpu_map_iter_next(va_range, ext_map, end); + } + + uvm_ext_gpu_map_destroy(va_range, ext_map, deferred_list); + } + + ext_map = ext_map_next; + } + + return status; +} + +static NV_STATUS uvm_map_external_allocation_on_gpu(uvm_va_range_t *va_range, + uvm_gpu_t *mapping_gpu, + const uvm_rm_user_object_t *user_rm_mem, + const uvm_map_rm_params_t *map_rm_params, + NvU64 base, + NvU64 length, + uvm_tracker_t *out_tracker) +{ + uvm_va_space_t *va_space = va_range->va_space; + uvm_ext_gpu_map_t *ext_gpu_map = NULL; + uvm_ext_gpu_range_tree_t *range_tree = uvm_ext_gpu_range_tree(va_range, mapping_gpu); + UvmGpuMemoryInfo mem_info; + NV_STATUS status; + + uvm_assert_rwsem_locked_read(&va_space->lock); + + if ((map_rm_params->compression_type == UvmGpuCompressionTypeEnabledNoPlc) && !mapping_gpu->parent->plc_supported) + return NV_ERR_INVALID_DEVICE; + + // Check if the GPU can access the VA + if (!uvm_gpu_can_address(mapping_gpu, base, length)) + return NV_ERR_OUT_OF_RANGE; + + uvm_mutex_lock(&range_tree->lock); + + status = uvm_unmap_external_in_range(va_range, mapping_gpu, base, base + length - 1, NULL); + if (status != NV_OK) + goto error; + + ext_gpu_map = uvm_kvmalloc_zero(sizeof(*ext_gpu_map)); + if (!ext_gpu_map) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + // Insert the ext_gpu_map into the VA range immediately since some of the + // below calls require it to be there. + ext_gpu_map->node.start = base; + ext_gpu_map->node.end = base + length - 1; + RB_CLEAR_NODE(&ext_gpu_map->node.rb_node); + uvm_tracker_init(&ext_gpu_map->tracker); + ext_gpu_map->mem_handle = uvm_kvmalloc_zero(sizeof(*ext_gpu_map->mem_handle)); + if (!ext_gpu_map->mem_handle) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + // Due to the fact that any overlapping mappings were already unmapped, + // adding the new mapping to the tree cannot fail. + status = uvm_range_tree_add(&range_tree->tree, &ext_gpu_map->node); + UVM_ASSERT(status == NV_OK); + + uvm_processor_mask_set_atomic(&va_range->external.mapped_gpus, mapping_gpu->id); + ext_gpu_map->gpu = mapping_gpu; + ext_gpu_map->mem_handle->gpu = mapping_gpu; + nv_kref_init(&ext_gpu_map->mem_handle->ref_count); + + // Error paths after this point may call uvm_va_range_ext_gpu_map, so do a + // sanity check now to make sure it doesn't trigger any asserts. + UVM_ASSERT(uvm_va_range_ext_gpu_map(va_range, mapping_gpu, base) == ext_gpu_map); + + // Dup the memory. This verifies the input handles, takes a ref count on the + // physical allocation so it can't go away under us, and returns us the + // allocation info. + status = uvm_rm_locked_call(nvUvmInterfaceDupMemory(uvm_gpu_device_handle(mapping_gpu), + user_rm_mem->user_client, + user_rm_mem->user_object, + &ext_gpu_map->mem_handle->rm_handle, + &mem_info)); + if (status != NV_OK) { + UVM_DBG_PRINT("Failed to dup memory handle {0x%x, 0x%x}: %s, GPU: %s\n", + user_rm_mem->user_client, + user_rm_mem->user_object, + nvstatusToString(status), + uvm_gpu_name(mapping_gpu)); + goto error; + } + + status = set_ext_gpu_map_location(ext_gpu_map, va_space, mapping_gpu, &mem_info); + if (status != NV_OK) + goto error; + + status = uvm_va_range_map_rm_allocation(va_range, + mapping_gpu, + &mem_info, + map_rm_params, + ext_gpu_map, + out_tracker); + if (status != NV_OK) + goto error; + + uvm_mutex_unlock(&range_tree->lock); + return NV_OK; + +error: + uvm_ext_gpu_map_destroy(va_range, ext_gpu_map, NULL); + uvm_mutex_unlock(&range_tree->lock); + return status; +} + +// Actual implementation of UvmMapExternalAllocation +static NV_STATUS uvm_map_external_allocation(uvm_va_space_t *va_space, UVM_MAP_EXTERNAL_ALLOCATION_PARAMS *params) +{ + uvm_va_range_t *va_range = NULL; + uvm_gpu_t *mapping_gpu; + uvm_processor_mask_t mapped_gpus; + NV_STATUS status = NV_OK; + size_t i; + uvm_map_rm_params_t map_rm_params; + uvm_rm_user_object_t user_rm_mem = + { + .rm_control_fd = params->rmCtrlFd, + .user_client = params->hClient, + .user_object = params->hMemory + }; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + + if (uvm_api_range_invalid_4k(params->base, params->length)) + return NV_ERR_INVALID_ADDRESS; + + if (params->gpuAttributesCount == 0 || params->gpuAttributesCount > UVM_MAX_GPUS) + return NV_ERR_INVALID_ARGUMENT; + + uvm_va_space_down_read_rm(va_space); + va_range = uvm_va_range_find(va_space, params->base); + + if (!va_range || + va_range->type != UVM_VA_RANGE_TYPE_EXTERNAL || + va_range->node.end < params->base + params->length - 1) { + uvm_va_space_up_read_rm(va_space); + return NV_ERR_INVALID_ADDRESS; + } + + uvm_processor_mask_zero(&mapped_gpus); + for (i = 0; i < params->gpuAttributesCount; i++) { + if (uvm_api_mapping_type_invalid(params->perGpuAttributes[i].gpuMappingType) || + uvm_api_caching_type_invalid(params->perGpuAttributes[i].gpuCachingType) || + uvm_api_kind_type_invalid(params->perGpuAttributes[i].gpuFormatType, + params->perGpuAttributes[i].gpuElementBits, + params->perGpuAttributes[i].gpuCompressionType)) { + status = NV_ERR_INVALID_ARGUMENT; + goto error; + } + + mapping_gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, ¶ms->perGpuAttributes[i].gpuUuid); + if (!mapping_gpu) { + status = NV_ERR_INVALID_DEVICE; + goto error; + } + + // Use a tracker to get as much parallelization as possible among GPUs, + // so one GPU can have its PTE writes in flight while we're working on + // the next one. + map_rm_params.map_offset = params->offset; + map_rm_params.mapping_type = params->perGpuAttributes[i].gpuMappingType; + map_rm_params.caching_type = params->perGpuAttributes[i].gpuCachingType; + map_rm_params.format_type = params->perGpuAttributes[i].gpuFormatType; + map_rm_params.element_bits = params->perGpuAttributes[i].gpuElementBits; + map_rm_params.compression_type = params->perGpuAttributes[i].gpuCompressionType; + status = uvm_map_external_allocation_on_gpu(va_range, + mapping_gpu, + &user_rm_mem, + &map_rm_params, + params->base, + params->length, + &tracker); + if (status != NV_OK) + goto error; + + uvm_processor_mask_set(&mapped_gpus, mapping_gpu->id); + } + + // Wait for outstanding page table operations to finish across all GPUs. We + // just need to hold the VA space lock to prevent the GPUs on which we're + // waiting from getting unregistered underneath us. + status = uvm_tracker_wait_deinit(&tracker); + + uvm_va_space_up_read_rm(va_space); + return status; + +error: + // We still have to wait for page table writes to finish, since the teardown + // could free them. + (void)uvm_tracker_wait_deinit(&tracker); + + // Tear down only those mappings we created during this call + for_each_va_space_gpu_in_mask(mapping_gpu, va_space, &mapped_gpus) { + uvm_ext_gpu_range_tree_t *range_tree = uvm_ext_gpu_range_tree(va_range, mapping_gpu); + uvm_ext_gpu_map_t *ext_map, *ext_map_next; + + uvm_mutex_lock(&range_tree->lock); + uvm_ext_gpu_map_for_each_in_safe(ext_map, + ext_map_next, + va_range, + mapping_gpu, + params->base, + params->base + params->length - 1) + uvm_ext_gpu_map_destroy(va_range, ext_map, NULL); + uvm_mutex_unlock(&range_tree->lock); + } + + uvm_va_space_up_read_rm(va_space); + + return status; +} + +NV_STATUS uvm_api_map_external_allocation(UVM_MAP_EXTERNAL_ALLOCATION_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + return uvm_map_external_allocation(va_space, params); +} + +static NvU64 external_sparse_pte_maker(uvm_page_table_range_vec_t *range_vec, NvU64 offset, void *caller_data) +{ + return range_vec->tree->hal->make_sparse_pte(); +} + +static NV_STATUS uvm_map_external_sparse_on_gpu(uvm_va_range_t *va_range, + uvm_gpu_t *mapping_gpu, + NvU64 base, + NvU64 length, + struct list_head *deferred_free_list) +{ + uvm_va_space_t *va_space = va_range->va_space; + uvm_ext_gpu_map_t *ext_gpu_map = NULL; + uvm_ext_gpu_range_tree_t *range_tree = uvm_ext_gpu_range_tree(va_range, mapping_gpu); + uvm_gpu_va_space_t *gpu_va_space = uvm_gpu_va_space_get(va_space, mapping_gpu); + uvm_page_tree_t *page_tree; + NV_STATUS status; + + uvm_assert_rwsem_locked(&va_space->lock); + + if (!uvm_gpu_can_address(mapping_gpu, base, length)) + return NV_ERR_OUT_OF_RANGE; + + UVM_ASSERT(gpu_va_space); + + page_tree = &gpu_va_space->page_tables; + + uvm_mutex_lock(&range_tree->lock); + + status = uvm_unmap_external_in_range(va_range, mapping_gpu, base, base + length - 1, deferred_free_list); + if (status != NV_OK) + goto error; + + ext_gpu_map = uvm_kvmalloc_zero(sizeof(*ext_gpu_map)); + if (!ext_gpu_map) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + ext_gpu_map->node.start = base; + ext_gpu_map->node.end = base + length - 1; + RB_CLEAR_NODE(&ext_gpu_map->node.rb_node); + uvm_tracker_init(&ext_gpu_map->tracker); + + // Due to the fact that any overlapping mappings were already unmapped, + // adding the new mapping to the tree cannot fail. + status = uvm_range_tree_add(&range_tree->tree, &ext_gpu_map->node); + UVM_ASSERT(status == NV_OK); + + uvm_processor_mask_set_atomic(&va_range->external.mapped_gpus, mapping_gpu->id); + ext_gpu_map->gpu = mapping_gpu; + + UVM_ASSERT(uvm_va_range_ext_gpu_map(va_range, mapping_gpu, base) == ext_gpu_map); + + status = uvm_page_table_range_vec_init(page_tree, + ext_gpu_map->node.start, + uvm_range_tree_node_size(&ext_gpu_map->node), + UVM_PAGE_SIZE_64K, + UVM_PMM_ALLOC_FLAGS_EVICT, + &ext_gpu_map->pt_range_vec); + if (status != NV_OK) + goto error; + + status = uvm_page_table_range_vec_write_ptes(&ext_gpu_map->pt_range_vec, + UVM_MEMBAR_NONE, + external_sparse_pte_maker, + NULL); + if (status != NV_OK) + goto error; + + uvm_mutex_unlock(&range_tree->lock); + return NV_OK; + +error: + uvm_ext_gpu_map_destroy(va_range, ext_gpu_map, NULL); + uvm_mutex_unlock(&range_tree->lock); + return status; +} + +static NV_STATUS uvm_map_external_sparse(uvm_va_space_t *va_space, UVM_MAP_EXTERNAL_SPARSE_PARAMS *params) +{ + uvm_va_range_t *va_range = NULL; + uvm_gpu_t *mapping_gpu = NULL; + NV_STATUS status = NV_OK; + LIST_HEAD(deferred_free_list); + + if (uvm_api_range_invalid_64k(params->base, params->length)) + return NV_ERR_INVALID_ADDRESS; + + uvm_va_space_down_read(va_space); + va_range = uvm_va_range_find(va_space, params->base); + if (!va_range || + va_range->type != UVM_VA_RANGE_TYPE_EXTERNAL || + va_range->node.end < params->base + params->length - 1) { + status = NV_ERR_INVALID_ADDRESS; + goto out; + } + + mapping_gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, ¶ms->gpuUuid); + if (!mapping_gpu) { + status = NV_ERR_INVALID_DEVICE; + goto out; + } + + // Sparse mappings are unsupported on GPUs prior to Pascal. + if (!mapping_gpu->parent->sparse_mappings_supported) { + status = NV_ERR_INVALID_DEVICE; + goto out; + } + + status = uvm_map_external_sparse_on_gpu(va_range, mapping_gpu, params->base, params->length, &deferred_free_list); + + if (!list_empty(&deferred_free_list)) + uvm_gpu_retain(mapping_gpu); + +out: + uvm_va_space_up_read(va_space); + + if (!list_empty(&deferred_free_list)) { + uvm_deferred_free_object_list(&deferred_free_list); + uvm_gpu_release(mapping_gpu); + } + + return status; +} + +NV_STATUS uvm_api_map_external_sparse(UVM_MAP_EXTERNAL_SPARSE_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + return uvm_map_external_sparse(va_space, params); +} + +// Version of free which returns but doesn't release the owning GPU +static uvm_gpu_t *uvm_ext_gpu_map_free_internal(uvm_ext_gpu_map_t *ext_gpu_map) +{ + uvm_gpu_t *owning_gpu; + + if (!ext_gpu_map) + return NULL; + + UVM_ASSERT(!ext_gpu_map->pt_range_vec.ranges); + + if (ext_gpu_map->mem_handle) + nv_kref_put(&ext_gpu_map->mem_handle->ref_count, uvm_release_rm_handle); + + owning_gpu = ext_gpu_map->owning_gpu; + uvm_kvfree(ext_gpu_map); + + return owning_gpu; +} + +void uvm_ext_gpu_map_free(uvm_ext_gpu_map_t *ext_gpu_map) +{ + uvm_gpu_t *owning_gpu = uvm_ext_gpu_map_free_internal(ext_gpu_map); + if (owning_gpu) + uvm_gpu_release(owning_gpu); +} + +void uvm_ext_gpu_map_destroy(uvm_va_range_t *va_range, + uvm_ext_gpu_map_t *ext_gpu_map, + struct list_head *deferred_free_list) +{ + uvm_membar_t membar; + uvm_ext_gpu_range_tree_t *range_tree; + uvm_gpu_t *mapped_gpu; + + if (!ext_gpu_map) + return; + + (void)uvm_tracker_wait_deinit(&ext_gpu_map->tracker); + + // The external map is inserted into the tree prior to the rest of the mapping + // steps. So, if it has not been inserted yet, there is nothing to clean up. Just + // free the memory. + if (RB_EMPTY_NODE(&ext_gpu_map->node.rb_node)) { + uvm_kvfree(ext_gpu_map->mem_handle); + uvm_kvfree(ext_gpu_map); + return; + } + + mapped_gpu = ext_gpu_map->gpu; + + range_tree = uvm_ext_gpu_range_tree(va_range, mapped_gpu); + + uvm_assert_mutex_locked(&range_tree->lock); + UVM_ASSERT(uvm_gpu_va_space_get(va_range->va_space, mapped_gpu)); + + uvm_range_tree_remove(&range_tree->tree, &ext_gpu_map->node); + + // Unmap the PTEs + if (ext_gpu_map->pt_range_vec.ranges) { + membar = va_range_downgrade_membar(va_range, ext_gpu_map); + uvm_page_table_range_vec_clear_ptes(&ext_gpu_map->pt_range_vec, membar); + uvm_page_table_range_vec_deinit(&ext_gpu_map->pt_range_vec); + } + + if (deferred_free_list && ext_gpu_map->mem_handle) { + // If this is a GPU allocation, we have to prevent that GPU from going + // away until we've freed the handle. + if (ext_gpu_map->owning_gpu) + uvm_gpu_retain(ext_gpu_map->owning_gpu); + + uvm_deferred_free_object_add(deferred_free_list, + &ext_gpu_map->deferred_free, + UVM_DEFERRED_FREE_OBJECT_TYPE_EXTERNAL_ALLOCATION); + } + else { + uvm_ext_gpu_map_free_internal(ext_gpu_map); + } + + // Check if the sub-range tree is empty. Only then can the GPU be removed from + // the mapped_gpus bitmap. + if (uvm_range_tree_empty(&range_tree->tree)) + uvm_processor_mask_clear_atomic(&va_range->external.mapped_gpus, mapped_gpu->id); +} + +static NV_STATUS uvm_unmap_external(uvm_va_space_t *va_space, + NvU64 base, + NvU64 length, + const NvProcessorUuid *gpu_uuid) +{ + uvm_va_range_t *va_range; + uvm_gpu_t *gpu = NULL; + NV_STATUS status = NV_OK; + uvm_ext_gpu_range_tree_t *range_tree; + LIST_HEAD(deferred_free_list); + + if (uvm_api_range_invalid_4k(base, length)) + return NV_ERR_INVALID_ADDRESS; + + uvm_va_space_down_read(va_space); + + va_range = uvm_va_range_find(va_space, base); + if (!va_range || va_range->type != UVM_VA_RANGE_TYPE_EXTERNAL || base + length - 1 > va_range->node.end) { + status = NV_ERR_INVALID_ADDRESS; + goto out; + } + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, gpu_uuid); + if (!gpu) { + status = NV_ERR_INVALID_DEVICE; + goto out; + } + + range_tree = uvm_ext_gpu_range_tree(va_range, gpu); + uvm_mutex_lock(&range_tree->lock); + status = uvm_unmap_external_in_range(va_range, gpu, base, base + length - 1, &deferred_free_list); + uvm_mutex_unlock(&range_tree->lock); + + // If the deferred_free_list is not empty, retain the GPU which maps the + // allocation because it's the parent of dup_handle. The owning GPU (if any) + // is retained internally by the deferred free layer. + if (!list_empty(&deferred_free_list)) + uvm_gpu_retain(gpu); + +out: + uvm_va_space_up_read(va_space); + + if (!list_empty(&deferred_free_list)) { + uvm_deferred_free_object_list(&deferred_free_list); + uvm_gpu_release(gpu); + } + + return status; +} + +NV_STATUS uvm_api_unmap_external(UVM_UNMAP_EXTERNAL_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + return uvm_unmap_external(va_space, params->base, params->length, ¶ms->gpuUuid); +} + +// This destroys VA ranges created by UvmMapExternalAllocation, +// UvmMapDynamicParallelismRegion, and UvmAllocSemaphorePool *only*. VA ranges +// created by UvmMemMap and UvmAlloc go through mmap/munmap. +static NV_STATUS uvm_free(uvm_va_space_t *va_space, NvU64 base, NvU64 length) +{ + uvm_va_range_t *va_range; + NV_STATUS status = NV_OK; + uvm_global_processor_mask_t retained_mask; + LIST_HEAD(deferred_free_list); + + if (uvm_api_range_invalid_4k(base, length)) + return NV_ERR_INVALID_ADDRESS; + + uvm_va_space_down_write(va_space); + + // Non-managed ranges are defined to not require splitting, so a partial + // free attempt is an error. + // + // TODO: Bug 1763676: The length parameter may be needed for MPS. If not, it + // should be removed from the ioctl. + va_range = uvm_va_range_find(va_space, base); + if (!va_range || + (va_range->type != UVM_VA_RANGE_TYPE_EXTERNAL && + va_range->type != UVM_VA_RANGE_TYPE_SKED_REFLECTED && + va_range->type != UVM_VA_RANGE_TYPE_SEMAPHORE_POOL) || + va_range->node.start != base || + va_range->node.end != base + length - 1) { + status = NV_ERR_INVALID_ADDRESS; + goto out; + } + + if ((va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL) && + uvm_mem_mapped_on_cpu_user(va_range->semaphore_pool.mem)) { + // Semaphore pools must be first unmapped from the CPU with munmap to + // invalidate the vma. + status = NV_ERR_INVALID_ARGUMENT; + goto out; + } + + if (va_range->type == UVM_VA_RANGE_TYPE_EXTERNAL) { + // External ranges may have deferred free work, so the GPUs may have to + // be retained. Construct the mask of all the GPUs that need to be + // retained. + uvm_va_space_global_gpus_in_mask(va_space, &retained_mask, &va_range->external.mapped_gpus); + } + + uvm_va_range_destroy(va_range, &deferred_free_list); + + // If there is deferred work, retain the required GPUs. + if (!list_empty(&deferred_free_list)) + uvm_global_mask_retain(&retained_mask); + +out: + uvm_va_space_up_write(va_space); + + if (!list_empty(&deferred_free_list)) { + UVM_ASSERT(status == NV_OK); + uvm_deferred_free_object_list(&deferred_free_list); + uvm_global_mask_release(&retained_mask); + } + + return status; +} + +NV_STATUS uvm_api_free(UVM_FREE_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + return uvm_free(va_space, params->base, params->length); +} diff --git a/kernel-open/nvidia-uvm/uvm_map_external.h b/kernel-open/nvidia-uvm/uvm_map_external.h new file mode 100644 index 000000000..5f2181470 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_map_external.h @@ -0,0 +1,151 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_MAP_EXTERNAL_H__ +#define __UVM_MAP_EXTERNAL_H__ + +#include "uvm_forward_decl.h" +#include "uvm_va_range.h" +#include "uvm_tracker.h" +#include "nv_uvm_types.h" +#include "uvm_types.h" + +typedef struct +{ + NvU64 map_offset; + UvmGpuMappingType mapping_type; + UvmGpuCachingType caching_type; + UvmGpuFormatType format_type; + UvmGpuFormatElementBits element_bits; + UvmGpuCompressionType compression_type; +} uvm_map_rm_params_t; + +static uvm_ext_gpu_range_tree_t *uvm_ext_gpu_range_tree(uvm_va_range_t *va_range, uvm_gpu_t *gpu) +{ + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_EXTERNAL); + + return &va_range->external.gpu_ranges[uvm_id_gpu_index(gpu->id)]; +} + +// Returns the first external map (if any) in the gpu's range tree. +// va_range should be of type UVM_VA_RANGE_TYPE_EXTERNAL. +// The caller must hold the range tree lock. +static uvm_ext_gpu_map_t *uvm_ext_gpu_map_iter_first(uvm_va_range_t *va_range, uvm_gpu_t *gpu, NvU64 start, NvU64 end) +{ + uvm_ext_gpu_range_tree_t *range_tree; + uvm_range_tree_node_t *node; + + UVM_ASSERT(start >= va_range->node.start); + UVM_ASSERT(end <= va_range->node.end); + + range_tree = uvm_ext_gpu_range_tree(va_range, gpu); + node = uvm_range_tree_iter_first(&range_tree->tree, start, end); + return uvm_ext_gpu_map_container(node); +} + +// Returns the external map following the provided map (if any) in address order from +// the gpu's range tree. va_range should be of type UVM_VA_RANGE_TYPE_EXTERNAL. +// The caller must hold the range tree lock. +static uvm_ext_gpu_map_t *uvm_ext_gpu_map_iter_next(uvm_va_range_t *va_range, uvm_ext_gpu_map_t *ext_gpu_map, NvU64 end) +{ + uvm_ext_gpu_range_tree_t *range_tree; + uvm_range_tree_node_t *node; + + if (!ext_gpu_map) + return NULL; + + UVM_ASSERT(end <= va_range->node.end); + + range_tree = uvm_ext_gpu_range_tree(va_range, ext_gpu_map->gpu); + node = uvm_range_tree_iter_next(&range_tree->tree, &ext_gpu_map->node, end); + return uvm_ext_gpu_map_container(node); +} + +// The four iterators below require that the caller hold the gpu's range tree +// lock. +#define uvm_ext_gpu_map_for_each_in(ext_gpu_map, va_range, gpu, start, end) \ + for ((ext_gpu_map) = uvm_ext_gpu_map_iter_first((va_range), (gpu), (start), (end)); \ + (ext_gpu_map); \ + (ext_gpu_map) = uvm_ext_gpu_map_iter_next((va_range), (ext_gpu_map), (end))) + +#define uvm_ext_gpu_map_for_each_in_safe(ext_gpu_map, ext_gpu_map_next, va_range, gpu, start, end) \ + for ((ext_gpu_map) = uvm_ext_gpu_map_iter_first((va_range), (gpu), (start), (end)), \ + (ext_gpu_map_next) = uvm_ext_gpu_map_iter_next((va_range), (ext_gpu_map), (end)); \ + (ext_gpu_map); \ + (ext_gpu_map) = (ext_gpu_map_next), \ + (ext_gpu_map_next) = uvm_ext_gpu_map_iter_next((va_range), (ext_gpu_map), (end))) + +#define uvm_ext_gpu_map_for_each(ext_gpu_map, va_range, gpu) \ + uvm_ext_gpu_map_for_each_in(ext_gpu_map, va_range, gpu, (va_range)->node.start, (va_range)->node.end) + +#define uvm_ext_gpu_map_for_each_safe(ext_gpu_map, ext_gpu_map_next, va_range, gpu) \ + uvm_ext_gpu_map_for_each_in_safe(ext_gpu_map, \ + ext_gpu_map_next, \ + va_range, \ + gpu, \ + (va_range)->node.start, \ + (va_range)->node.end) + +// User-facing APIs (uvm_api_map_external_allocation, uvm_api_free) are declared +// uvm_api.h. + +// Queries RM for the PTEs appropriate to the VA range and mem_info, allocates +// page tables for the VA range, and writes the PTEs. +// +// va_range must have type UVM_VA_RANGE_TYPE_EXTERNAL or +// UVM_VA_RANGE_TYPE_CHANNEL. The allocation descriptor given to RM is looked up +// depending on the type of the va_range. +// For va_ranges of type UVM_VA_RANGE_TYPE_CHANNEL, the descriptor is looked up +// from the va_range. In these cases, the ext_gpu_map parameter should be NULL. +// For va_ranges of type UVM_VA_RANGE_TYPE_EXTERNAL, it is looked up from the +// ext_gpu_map parameter. +// +// This does not wait for the PTE writes to complete. The work is added to +// the output tracker. +NV_STATUS uvm_va_range_map_rm_allocation(uvm_va_range_t *va_range, + uvm_gpu_t *mapping_gpu, + const UvmGpuMemoryInfo *mem_info, + const uvm_map_rm_params_t *map_rm_params, + uvm_ext_gpu_map_t *ext_gpu_map, + uvm_tracker_t *out_tracker); + +// Removes and frees the external mapping for mapping_gpu from ext_gpu_map +// mapped within va_range. If deferred_free_list is NULL, the RM handle is freed +// immediately by this function. Otherwise the GPU which owns the allocation (if +// any) is retained and the handle is added to the list for later processing by +// uvm_deferred_free_object_list. +// +// If the mapping is a Sparse mapping, the mapping is removed and freed. +// However, since sparse mappings do not have RM handles, nothing is added to +// the deferred_free_list (if not NULL) and the GPU is no retained. +// +// The caller must hold the range tree lock for the mapping gpu and is +// responsible for making sure that mapping gpu is retained across those calls. +void uvm_ext_gpu_map_destroy(uvm_va_range_t *va_range, + uvm_ext_gpu_map_t *ext_gpu_map, + struct list_head *deferred_free_list); + +// Deferred free function which frees the RM handle and the object itself. +void uvm_ext_gpu_map_free(uvm_ext_gpu_map_t *ext_gpu_map); + +#endif // __UVM_MAP_EXTERNAL_H__ diff --git a/kernel-open/nvidia-uvm/uvm_maxwell.c b/kernel-open/nvidia-uvm/uvm_maxwell.c new file mode 100644 index 000000000..6f80a6a65 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_maxwell.c @@ -0,0 +1,72 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hal.h" +#include "uvm_gpu.h" +#include "uvm_mem.h" + +void uvm_hal_maxwell_arch_init_properties(uvm_parent_gpu_t *parent_gpu) +{ + parent_gpu->tlb_batch.va_invalidate_supported = false; + + // 128 GB should be enough for all current RM allocations and leaves enough + // space for UVM internal mappings. + // A single top level PDE covers 64 or 128 MB on Maxwell so 128 GB is fine to use. + parent_gpu->rm_va_base = 0; + parent_gpu->rm_va_size = 128ull * 1024 * 1024 * 1024; + + parent_gpu->uvm_mem_va_base = 768ull * 1024 * 1024 * 1024; + parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE; + + // We don't have a compelling use case in UVM-Lite for direct peer + // migrations between GPUs, so don't bother setting them up. + parent_gpu->peer_copy_mode = UVM_GPU_PEER_COPY_MODE_UNSUPPORTED; + + parent_gpu->max_channel_va = 1ULL << 40; + + parent_gpu->max_host_va = 1ULL << 40; + + // Maxwell can only map sysmem with 4K pages + parent_gpu->can_map_sysmem_with_large_pages = false; + + // Maxwell cannot place GPFIFO in vidmem + parent_gpu->gpfifo_in_vidmem_supported = false; + + parent_gpu->replayable_faults_supported = false; + + parent_gpu->non_replayable_faults_supported = false; + + parent_gpu->access_counters_supported = false; + + parent_gpu->fault_cancel_va_supported = false; + + parent_gpu->scoped_atomics_supported = false; + + parent_gpu->sparse_mappings_supported = false; + + parent_gpu->map_remap_larger_page_promotion = false; + + parent_gpu->smc.supported = false; + + parent_gpu->plc_supported = false; +} diff --git a/kernel-open/nvidia-uvm/uvm_maxwell_access_counter_buffer.c b/kernel-open/nvidia-uvm/uvm_maxwell_access_counter_buffer.c new file mode 100644 index 000000000..519ae0daa --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_maxwell_access_counter_buffer.c @@ -0,0 +1,64 @@ +/******************************************************************************* + Copyright (c) 2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_gpu.h" +#include "uvm_hal.h" + +void uvm_hal_maxwell_enable_access_counter_notifications_unsupported(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT_MSG(false, "enable_access_counter_notifications is not supported on GPU: %s.\n", parent_gpu->name); +} + +void uvm_hal_maxwell_disable_access_counter_notifications_unsupported(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT_MSG(false, "disable_access_counter_notifications is not supported on GPU: %s.\n", parent_gpu->name); +} + +void uvm_hal_maxwell_clear_access_counter_notifications_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 get) +{ + UVM_ASSERT_MSG(false, "clear_access_counter_notifications is not supported on GPU: %s.\n", parent_gpu->name); +} + +NvU32 uvm_hal_maxwell_access_counter_buffer_entry_size_unsupported(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT_MSG(false, "access_counter_buffer_entry_size is not supported on GPU: %s.\n", parent_gpu->name); + return 0; +} + +bool uvm_hal_maxwell_access_counter_buffer_entry_is_valid_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + UVM_ASSERT_MSG(false, "access_counter_buffer_entry_is_valid is not supported on GPU: %s.\n", parent_gpu->name); + return false; +} + +void uvm_hal_maxwell_access_counter_buffer_entry_clear_valid_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + UVM_ASSERT_MSG(false, "access_counter_buffer_entry_clear_valid is not supported on GPU: %s.\n", parent_gpu->name); +} + +void uvm_hal_maxwell_access_counter_buffer_parse_entry_unsupported(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_access_counter_buffer_entry_t *buffer_entry) +{ + UVM_ASSERT_MSG(false, "access_counter_buffer_parse_entry is not supported on GPU: %s.\n", parent_gpu->name); +} diff --git a/kernel-open/nvidia-uvm/uvm_maxwell_ce.c b/kernel-open/nvidia-uvm/uvm_maxwell_ce.c new file mode 100644 index 000000000..08404b48d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_maxwell_ce.c @@ -0,0 +1,377 @@ +/******************************************************************************* + Copyright (c) 2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hal.h" +#include "uvm_push.h" +#include "clb06f.h" +#include "clb0b5.h" + +void uvm_hal_maxwell_ce_init(uvm_push_t *push) +{ + // Notably this sends SET_OBJECT with the CE class on subchannel 0 instead + // of the recommended by HW subchannel 4 (subchannel 4 is recommended to + // match CE usage on GRCE). For the UVM driver using subchannel 0 has the + // benefit of also verifying that we ended up on the right PBDMA though as + // SET_OBJECT with CE class on subchannel 0 would fail on GRCE. + NV_PUSH_1U(B06F, SET_OBJECT, uvm_push_get_gpu(push)->parent->rm_info.ceClass); +} + +void uvm_hal_maxwell_ce_offset_out(uvm_push_t *push, NvU64 offset_out) +{ + NV_PUSH_2U(B0B5, OFFSET_OUT_UPPER, HWVALUE(B0B5, OFFSET_OUT_UPPER, UPPER, NvOffset_HI32(offset_out)), + OFFSET_OUT_LOWER, HWVALUE(B0B5, OFFSET_OUT_LOWER, VALUE, NvOffset_LO32(offset_out))); +} + +void uvm_hal_maxwell_ce_offset_in_out(uvm_push_t *push, NvU64 offset_in, NvU64 offset_out) +{ + NV_PUSH_4U(B0B5, OFFSET_IN_UPPER, HWVALUE(B0B5, OFFSET_IN_UPPER, UPPER, NvOffset_HI32(offset_in)), + OFFSET_IN_LOWER, HWVALUE(B0B5, OFFSET_IN_LOWER, VALUE, NvOffset_LO32(offset_in)), + OFFSET_OUT_UPPER, HWVALUE(B0B5, OFFSET_OUT_UPPER, UPPER, NvOffset_HI32(offset_out)), + OFFSET_OUT_LOWER, HWVALUE(B0B5, OFFSET_OUT_LOWER, VALUE, NvOffset_LO32(offset_out))); +} + +// Perform an appropriate membar before a semaphore operation. Returns whether +// the semaphore operation should include a flush. +static bool maxwell_membar_before_semaphore(uvm_push_t *push) +{ + uvm_gpu_t *gpu; + + if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE)) { + // No MEMBAR requested, don't use a flush. + return false; + } + + if (!uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU)) { + // By default do a MEMBAR SYS and for that we can just use flush on the + // semaphore operation. + return true; + } + + // MEMBAR GPU requested, do it on the HOST and skip the CE flush as CE + // doesn't have this capability. + gpu = uvm_push_get_gpu(push); + gpu->parent->host_hal->wait_for_idle(push); + gpu->parent->host_hal->membar_gpu(push); + + return false; +} + +void uvm_hal_maxwell_ce_semaphore_release(uvm_push_t *push, NvU64 gpu_va, NvU32 payload) +{ + NvU32 flush_value; + bool use_flush; + + use_flush = maxwell_membar_before_semaphore(push); + + if (use_flush) + flush_value = HWCONST(B0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE); + else + flush_value = HWCONST(B0B5, LAUNCH_DMA, FLUSH_ENABLE, FALSE); + + NV_PUSH_3U(B0B5, SET_SEMAPHORE_A, HWVALUE(B0B5, SET_SEMAPHORE_A, UPPER, NvOffset_HI32(gpu_va)), + SET_SEMAPHORE_B, HWVALUE(B0B5, SET_SEMAPHORE_B, LOWER, NvOffset_LO32(gpu_va)), + SET_SEMAPHORE_PAYLOAD, payload); + + NV_PUSH_1U(B0B5, LAUNCH_DMA, flush_value | + HWCONST(B0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NONE) | + HWCONST(B0B5, LAUNCH_DMA, SEMAPHORE_TYPE, RELEASE_ONE_WORD_SEMAPHORE)); +} + +void uvm_hal_maxwell_ce_semaphore_reduction_inc(uvm_push_t *push, NvU64 gpu_va, NvU32 payload) +{ + NvU32 flush_value; + bool use_flush; + + use_flush = maxwell_membar_before_semaphore(push); + + if (use_flush) + flush_value = HWCONST(B0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE); + else + flush_value = HWCONST(B0B5, LAUNCH_DMA, FLUSH_ENABLE, FALSE); + + NV_PUSH_3U(B0B5, SET_SEMAPHORE_A, HWVALUE(B0B5, SET_SEMAPHORE_A, UPPER, NvOffset_HI32(gpu_va)), + SET_SEMAPHORE_B, HWVALUE(B0B5, SET_SEMAPHORE_B, LOWER, NvOffset_LO32(gpu_va)), + SET_SEMAPHORE_PAYLOAD, payload); + + NV_PUSH_1U(B0B5, LAUNCH_DMA, flush_value | + HWCONST(B0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NONE) | + HWCONST(B0B5, LAUNCH_DMA, SEMAPHORE_TYPE, RELEASE_ONE_WORD_SEMAPHORE) | + HWCONST(B0B5, LAUNCH_DMA, SEMAPHORE_REDUCTION, INC) | + HWCONST(B0B5, LAUNCH_DMA, SEMAPHORE_REDUCTION_SIGN, UNSIGNED) | + HWCONST(B0B5, LAUNCH_DMA, SEMAPHORE_REDUCTION_ENABLE, TRUE)); +} + +void uvm_hal_maxwell_ce_semaphore_timestamp(uvm_push_t *push, NvU64 gpu_va) +{ + NvU32 flush_value; + bool use_flush; + + use_flush = maxwell_membar_before_semaphore(push); + + if (use_flush) + flush_value = HWCONST(B0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE); + else + flush_value = HWCONST(B0B5, LAUNCH_DMA, FLUSH_ENABLE, FALSE); + + NV_PUSH_3U(B0B5, SET_SEMAPHORE_A, HWVALUE(B0B5, SET_SEMAPHORE_A, UPPER, NvOffset_HI32(gpu_va)), + SET_SEMAPHORE_B, HWVALUE(B0B5, SET_SEMAPHORE_B, LOWER, NvOffset_LO32(gpu_va)), + SET_SEMAPHORE_PAYLOAD, 0xdeadbeef); + + NV_PUSH_1U(B0B5, LAUNCH_DMA, flush_value | + HWCONST(B0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NONE) | + HWCONST(B0B5, LAUNCH_DMA, SEMAPHORE_TYPE, RELEASE_FOUR_WORD_SEMAPHORE)); +} + +static void maxwell_membar_after_transfer(uvm_push_t *push) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + + if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE)) + return; + + // Flush on transfers only works when paired with a semaphore release. Use a + // host WFI + MEMBAR. + // http://nvbugs/1709888 + gpu->parent->host_hal->wait_for_idle(push); + + if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU)) + gpu->parent->host_hal->membar_gpu(push); + else + gpu->parent->host_hal->membar_sys(push); +} + +static NvU32 ce_aperture(uvm_aperture_t aperture) +{ + BUILD_BUG_ON(HWCONST(B0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB) != + HWCONST(B0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB)); + BUILD_BUG_ON(HWCONST(B0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM) != + HWCONST(B0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM)); + + UVM_ASSERT_MSG(aperture == UVM_APERTURE_VID || aperture == UVM_APERTURE_SYS, "aperture 0x%x\n", aperture); + + if (aperture == UVM_APERTURE_SYS) + return HWCONST(B0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM); + else + return HWCONST(B0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB); +} + +// Push SET_{SRC,DST}_PHYS mode if needed and return LAUNCH_DMA_{SRC,DST}_TYPE +// flags +NvU32 uvm_hal_maxwell_ce_phys_mode(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src) +{ + NvU32 launch_dma_src_dst_type = 0; + + if (src.is_virtual) + launch_dma_src_dst_type |= HWCONST(B0B5, LAUNCH_DMA, SRC_TYPE, VIRTUAL); + else + launch_dma_src_dst_type |= HWCONST(B0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL); + + if (dst.is_virtual) + launch_dma_src_dst_type |= HWCONST(B0B5, LAUNCH_DMA, DST_TYPE, VIRTUAL); + else + launch_dma_src_dst_type |= HWCONST(B0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL); + + if (!src.is_virtual && !dst.is_virtual) { + NV_PUSH_2U(B0B5, SET_SRC_PHYS_MODE, ce_aperture(src.aperture), + SET_DST_PHYS_MODE, ce_aperture(dst.aperture)); + } + else if (!src.is_virtual) { + NV_PUSH_1U(B0B5, SET_SRC_PHYS_MODE, ce_aperture(src.aperture)); + } + else if (!dst.is_virtual) { + NV_PUSH_1U(B0B5, SET_DST_PHYS_MODE, ce_aperture(dst.aperture)); + } + + return launch_dma_src_dst_type; +} + +// Noop, since DISABLE_PLC doesn't exist in Maxwell. +NvU32 uvm_hal_maxwell_ce_plc_mode(void) +{ + return 0; +} + +void uvm_hal_maxwell_ce_memcopy(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src, size_t size) +{ + // If >4GB copies ever become an important use case, this function should + // use multi-line transfers so we don't have to iterate (bug 1766588). + static const size_t max_single_copy_size = 0xFFFFFFFF; + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + + NvU32 pipelined_value; + NvU32 launch_dma_src_dst_type; + NvU32 launch_dma_plc_mode; + bool first_operation = true; + + UVM_ASSERT_MSG(gpu->parent->ce_hal->memcopy_validate(push, dst, src), + "Memcopy validation failed in channel %s, GPU %s", + push->channel->name, + uvm_gpu_name(gpu)); + + gpu->parent->ce_hal->memcopy_patch_src(push, &src); + + launch_dma_src_dst_type = gpu->parent->ce_hal->phys_mode(push, dst, src); + launch_dma_plc_mode = gpu->parent->ce_hal->plc_mode(); + + do { + NvU32 copy_this_time = (NvU32)min(size, max_single_copy_size); + + if (first_operation && uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED)) + pipelined_value = HWCONST(B0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, PIPELINED); + else + pipelined_value = HWCONST(B0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED); + + gpu->parent->ce_hal->offset_in_out(push, src.address, dst.address); + + NV_PUSH_1U(B0B5, LINE_LENGTH_IN, copy_this_time); + + NV_PUSH_1U(B0B5, LAUNCH_DMA, + HWCONST(B0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) | + HWCONST(B0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) | + HWCONST(B0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) | + HWCONST(B0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) | + HWCONST(B0B5, LAUNCH_DMA, FLUSH_ENABLE, FALSE) | + launch_dma_src_dst_type | + launch_dma_plc_mode | + pipelined_value); + + dst.address += copy_this_time; + src.address += copy_this_time; + size -= copy_this_time; + first_operation = false; + } while (size > 0); + + maxwell_membar_after_transfer(push); +} + +void uvm_hal_maxwell_ce_memcopy_v_to_v(uvm_push_t *push, NvU64 dst_va, NvU64 src_va, size_t size) +{ + uvm_hal_maxwell_ce_memcopy(push, uvm_gpu_address_virtual(dst_va), uvm_gpu_address_virtual(src_va), size); +} + +// Push SET_DST_PHYS mode if needed and return LAUNCH_DMA_DST_TYPE flags +static NvU32 memset_push_phys_mode(uvm_push_t *push, uvm_gpu_address_t dst) +{ + if (dst.is_virtual) + return HWCONST(B0B5, LAUNCH_DMA, DST_TYPE, VIRTUAL); + + NV_PUSH_1U(B0B5, SET_DST_PHYS_MODE, ce_aperture(dst.aperture)); + return HWCONST(B0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL); +} + +static void memset_common(uvm_push_t *push, uvm_gpu_address_t dst, size_t size, size_t memset_element_size) +{ + // If >4GB memsets ever become an important use case, this function should + // use multi-line transfers so we don't have to iterate (bug 1766588). + static const size_t max_single_memset_size = 0xFFFFFFFF; + + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + NvU32 pipelined_value; + NvU32 launch_dma_dst_type; + NvU32 launch_dma_plc_mode; + + UVM_ASSERT_MSG(gpu->parent->ce_hal->memset_validate(push, dst, memset_element_size), + "Memset validation failed in channel %s, GPU %s", + push->channel->name, + uvm_gpu_name(gpu)); + + launch_dma_dst_type = memset_push_phys_mode(push, dst); + launch_dma_plc_mode = gpu->parent->ce_hal->plc_mode(); + + if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED)) + pipelined_value = HWCONST(B0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, PIPELINED); + else + pipelined_value = HWCONST(B0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED); + + do { + NvU32 memset_this_time = (NvU32)min(size, max_single_memset_size); + + gpu->parent->ce_hal->offset_out(push, dst.address); + + NV_PUSH_1U(B0B5, LINE_LENGTH_IN, memset_this_time); + + NV_PUSH_1U(B0B5, LAUNCH_DMA, + HWCONST(B0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) | + HWCONST(B0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) | + HWCONST(B0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) | + HWCONST(B0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) | + HWCONST(B0B5, LAUNCH_DMA, FLUSH_ENABLE, FALSE) | + launch_dma_dst_type | + launch_dma_plc_mode | + pipelined_value); + + dst.address += memset_this_time * memset_element_size; + size -= memset_this_time; + pipelined_value = HWCONST(B0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED); + } while (size > 0); + + maxwell_membar_after_transfer(push); +} + +void uvm_hal_maxwell_ce_memset_1(uvm_push_t *push, uvm_gpu_address_t dst, NvU8 value, size_t size) +{ + NV_PUSH_2U(B0B5, SET_REMAP_CONST_B, (NvU32)value, + SET_REMAP_COMPONENTS, + HWCONST(B0B5, SET_REMAP_COMPONENTS, DST_X, CONST_B) | + HWCONST(B0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, ONE) | + HWCONST(B0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, ONE)); + + memset_common(push, dst, size, 1); +} + +void uvm_hal_maxwell_ce_memset_4(uvm_push_t *push, uvm_gpu_address_t dst, NvU32 value, size_t size) +{ + UVM_ASSERT_MSG(size % 4 == 0, "size: %zd\n", size); + + size /= 4; + + NV_PUSH_2U(B0B5, SET_REMAP_CONST_B, value, + SET_REMAP_COMPONENTS, + HWCONST(B0B5, SET_REMAP_COMPONENTS, DST_X, CONST_B) | + HWCONST(B0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) | + HWCONST(B0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, ONE)); + + memset_common(push, dst, size, 4); +} + +void uvm_hal_maxwell_ce_memset_8(uvm_push_t *push, uvm_gpu_address_t dst, NvU64 value, size_t size) +{ + UVM_ASSERT_MSG(size % 8 == 0, "size: %zd\n", size); + + size /= 8; + + NV_PUSH_3U(B0B5, SET_REMAP_CONST_A, (NvU32)value, + SET_REMAP_CONST_B, (NvU32)(value >> 32), + SET_REMAP_COMPONENTS, + HWCONST(B0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) | + HWCONST(B0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) | + HWCONST(B0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) | + HWCONST(B0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO)); + + memset_common(push, dst, size, 8); +} + +void uvm_hal_maxwell_ce_memset_v_4(uvm_push_t *push, NvU64 dst_va, NvU32 value, size_t size) +{ + uvm_hal_maxwell_ce_memset_4(push, uvm_gpu_address_virtual(dst_va), value, size); +} diff --git a/kernel-open/nvidia-uvm/uvm_maxwell_fault_buffer.c b/kernel-open/nvidia-uvm/uvm_maxwell_fault_buffer.c new file mode 100644 index 000000000..f5c9af6ce --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_maxwell_fault_buffer.c @@ -0,0 +1,95 @@ +/******************************************************************************* + Copyright (c) 2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_gpu.h" +#include "uvm_hal.h" + +void uvm_hal_maxwell_enable_replayable_faults_unsupported(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT_MSG(false, "enable_replayable_faults is not supported on GPU: %s.\n", parent_gpu->name); +} + +void uvm_hal_maxwell_disable_replayable_faults_unsupported(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT_MSG(false, "disable_replayable_faults is not supported on GPU: %s.\n", parent_gpu->name); +} + +void uvm_hal_maxwell_clear_replayable_faults_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 get) +{ + UVM_ASSERT_MSG(false, "clear_replayable_faults is not supported on GPU: %s.\n", parent_gpu->name); +} + +NvU32 uvm_hal_maxwell_fault_buffer_read_put_unsupported(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT_MSG(false, "fault_buffer_read_put is not supported on GPU: %s.\n", parent_gpu->name); + return 0; +} + +NvU32 uvm_hal_maxwell_fault_buffer_read_get_unsupported(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT_MSG(false, "fault_buffer_read_get is not supported on GPU: %s.\n", parent_gpu->name); + return 0; +} + +void uvm_hal_maxwell_fault_buffer_write_get_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + UVM_ASSERT_MSG(false, "fault_buffer_write_get is not supported on GPU: %s.\n", parent_gpu->name); +} + +NvU8 uvm_hal_maxwell_fault_buffer_get_ve_id_unsupported(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type) +{ + UVM_ASSERT_MSG(false, "fault_buffer_get_ve_id is not supported on Maxwell GPUs.\n"); + return 0; +} + +void uvm_hal_maxwell_fault_buffer_parse_entry_unsupported(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_fault_buffer_entry_t *buffer_entry) +{ + UVM_ASSERT_MSG(false, "fault_buffer_parse_entry is not supported on GPU: %s.\n", parent_gpu->name); +} + +bool uvm_hal_maxwell_fault_buffer_entry_is_valid_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + UVM_ASSERT_MSG(false, "fault_buffer_entry_is_valid is not supported on GPU: %s.\n", parent_gpu->name); + return false; +} + +void uvm_hal_maxwell_fault_buffer_entry_clear_valid_unsupported(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + UVM_ASSERT_MSG(false, "fault_buffer_entry_clear_valid is not supported on GPU: %s.\n", parent_gpu->name); +} + +NvU32 uvm_hal_maxwell_fault_buffer_entry_size_unsupported(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT_MSG(false, "fault_buffer_entry_size is not supported on GPU: %s.\n", parent_gpu->name); + return 0; +} + +void uvm_hal_maxwell_fault_buffer_parse_non_replayable_entry_unsupported(uvm_parent_gpu_t *parent_gpu, + void *fault_packet, + uvm_fault_buffer_entry_t *buffer_entry) +{ + UVM_ASSERT_MSG(false, "fault_buffer_parse_non_replayable_entry is not supported on GPU: %s.\n", parent_gpu->name); +} + diff --git a/kernel-open/nvidia-uvm/uvm_maxwell_host.c b/kernel-open/nvidia-uvm/uvm_maxwell_host.c new file mode 100644 index 000000000..74c8a6cce --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_maxwell_host.c @@ -0,0 +1,326 @@ +/******************************************************************************* + Copyright (c) 2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_hal_types.h" +#include "uvm_hal.h" +#include "uvm_push.h" +#include "cla16f.h" +#include "clb06f.h" + +void uvm_hal_maxwell_host_wait_for_idle(uvm_push_t *push) +{ + NV_PUSH_1U(A16F, WFI, 0); +} + +void uvm_hal_maxwell_host_membar_sys(uvm_push_t *push) +{ + NV_PUSH_1U(A16F, MEM_OP_B, + HWCONST(A16F, MEM_OP_B, OPERATION, SYSMEMBAR_FLUSH)); +} + +void uvm_hal_maxwell_host_tlb_invalidate_all_a16f(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + uvm_membar_t membar) +{ + NvU32 target; + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + + // Only Pascal+ supports invalidating down from a specific depth. + (void)depth; + + (void)membar; + + if (pdb.aperture == UVM_APERTURE_VID) + target = HWCONST(A16F, MEM_OP_A, TLB_INVALIDATE_TARGET, VID_MEM); + else + target = HWCONST(A16F, MEM_OP_A, TLB_INVALIDATE_TARGET, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + + NV_PUSH_2U(A16F, MEM_OP_A, target | + HWVALUE(A16F, MEM_OP_A, TLB_INVALIDATE_ADDR, pdb.address), + MEM_OP_B, HWCONST(A16F, MEM_OP_B, OPERATION, MMU_TLB_INVALIDATE) | + HWCONST(A16F, MEM_OP_B, MMU_TLB_INVALIDATE_PDB, ONE) | + HWCONST(A16F, MEM_OP_B, MMU_TLB_INVALIDATE_GPC, ENABLE)); +} + +void uvm_hal_maxwell_host_tlb_invalidate_all_b06f(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + uvm_membar_t membar) +{ + NvU32 target; + NvU32 pdb_lo; + NvU32 pdb_hi; + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + + // Only Pascal+ supports invalidating down from a specific depth. + (void)depth; + + (void)membar; + + if (pdb.aperture == UVM_APERTURE_VID) + target = HWCONST(B06F, MEM_OP_C, TLB_INVALIDATE_TARGET, VID_MEM); + else + target = HWCONST(B06F, MEM_OP_C, TLB_INVALIDATE_TARGET, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + pdb_lo = pdb.address & HWMASK(B06F, MEM_OP_C, TLB_INVALIDATE_ADDR_LO); + pdb_hi = pdb.address >> HWSIZE(B06F, MEM_OP_C, TLB_INVALIDATE_ADDR_LO); + + NV_PUSH_2U(B06F, MEM_OP_C, target | + HWCONST(B06F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWCONST(B06F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE) | + HWVALUE(B06F, MEM_OP_C, TLB_INVALIDATE_ADDR_LO, pdb_lo), + MEM_OP_D, HWCONST(B06F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE) | + HWVALUE(B06F, MEM_OP_D, TLB_INVALIDATE_ADDR_HI, pdb_hi)); +} + +void uvm_hal_maxwell_host_tlb_invalidate_va(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + NvU64 base, + NvU64 size, + NvU32 page_size, + uvm_membar_t membar) +{ + // No per VA invalidate on Maxwell, redirect to invalidate all. + uvm_push_get_gpu(push)->parent->host_hal->tlb_invalidate_all(push, pdb, depth, membar); +} + +void uvm_hal_maxwell_host_tlb_invalidate_test(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + UVM_TEST_INVALIDATE_TLB_PARAMS *params) +{ + NvU32 target_pdb = 0; + NvU32 invalidate_gpc_value; + + // Only Pascal+ supports invalidating down from a specific depth. We + // invalidate all + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + if (pdb.aperture == UVM_APERTURE_VID) + target_pdb = HWCONST(A16F, MEM_OP_A, TLB_INVALIDATE_TARGET, VID_MEM); + else + target_pdb = HWCONST(A16F, MEM_OP_A, TLB_INVALIDATE_TARGET, SYS_MEM_COHERENT); + target_pdb |= HWVALUE(A16F, MEM_OP_A, TLB_INVALIDATE_ADDR, pdb.address); + + if (params->disable_gpc_invalidate) + invalidate_gpc_value = HWCONST(A16F, MEM_OP_B, MMU_TLB_INVALIDATE_GPC, DISABLE); + else + invalidate_gpc_value = HWCONST(A16F, MEM_OP_B, MMU_TLB_INVALIDATE_GPC, ENABLE); + + NV_PUSH_2U(A16F, MEM_OP_A, target_pdb, + MEM_OP_B, HWCONST(A16F, MEM_OP_B, OPERATION, MMU_TLB_INVALIDATE) | + HWCONST(A16F, MEM_OP_B, MMU_TLB_INVALIDATE_PDB, ONE) | + invalidate_gpc_value); +} + +void uvm_hal_maxwell_host_noop(uvm_push_t *push, NvU32 size) +{ + UVM_ASSERT_MSG(size % 4 == 0, "size %u\n", size); + + if (size == 0) + return; + + // size is in bytes so divide by the method size (4 bytes) + size /= 4; + + while (size > 0) { + // noop_this_time includes the NOP method itself and hence can be + // up to COUNT_MAX + 1. + NvU32 noop_this_time = min(UVM_METHOD_COUNT_MAX + 1, size); + + // -1 for the NOP method itself. + NV_PUSH_NU_NONINC(A16F, NOP, noop_this_time - 1); + + size -= noop_this_time; + } +} + +void uvm_hal_maxwell_host_interrupt(uvm_push_t *push) +{ + NV_PUSH_1U(A16F, NON_STALL_INTERRUPT, 0); +} + +void uvm_hal_maxwell_host_semaphore_release(uvm_push_t *push, NvU64 gpu_va, NvU32 payload) +{ + NvU32 sem_lo; + UVM_ASSERT(!(NvOffset_LO32(gpu_va) & ~HWSHIFTMASK(A16F, SEMAPHOREB, OFFSET_LOWER))); + sem_lo = READ_HWVALUE(NvOffset_LO32(gpu_va), A16F, SEMAPHOREB, OFFSET_LOWER); + + uvm_hal_wfi_membar(push, uvm_push_get_and_reset_membar_flag(push)); + + NV_PUSH_4U(A16F, SEMAPHOREA, HWVALUE(A16F, SEMAPHOREA, OFFSET_UPPER, NvOffset_HI32(gpu_va)), + SEMAPHOREB, HWVALUE(A16F, SEMAPHOREB, OFFSET_LOWER, sem_lo), + SEMAPHOREC, payload, + SEMAPHORED, HWCONST(A16F, SEMAPHORED, OPERATION, RELEASE) | + HWCONST(A16F, SEMAPHORED, RELEASE_SIZE, 4BYTE)| + HWCONST(A16F, SEMAPHORED, RELEASE_WFI, DIS)); +} + +void uvm_hal_maxwell_host_semaphore_acquire(uvm_push_t *push, NvU64 gpu_va, NvU32 payload) +{ + NvU32 sem_lo; + UVM_ASSERT(!(NvOffset_LO32(gpu_va) & ~HWSHIFTMASK(A16F, SEMAPHOREB, OFFSET_LOWER))); + sem_lo = READ_HWVALUE(NvOffset_LO32(gpu_va), A16F, SEMAPHOREB, OFFSET_LOWER); + NV_PUSH_4U(A16F, SEMAPHOREA, HWVALUE(A16F, SEMAPHOREA, OFFSET_UPPER, NvOffset_HI32(gpu_va)), + SEMAPHOREB, HWVALUE(A16F, SEMAPHOREB, OFFSET_LOWER, sem_lo), + SEMAPHOREC, payload, + SEMAPHORED, HWCONST(A16F, SEMAPHORED, ACQUIRE_SWITCH, ENABLED) | + HWCONST(A16F, SEMAPHORED, OPERATION, ACQ_GEQ)); + +} + +void uvm_hal_maxwell_host_semaphore_timestamp(uvm_push_t *push, NvU64 gpu_va) +{ + NvU32 sem_lo; + UVM_ASSERT(!(NvOffset_LO32(gpu_va) & ~HWSHIFTMASK(A16F, SEMAPHOREB, OFFSET_LOWER))); + sem_lo = READ_HWVALUE(NvOffset_LO32(gpu_va), A16F, SEMAPHOREB, OFFSET_LOWER); + + uvm_hal_wfi_membar(push, uvm_push_get_and_reset_membar_flag(push)); + + NV_PUSH_4U(A16F, SEMAPHOREA, HWVALUE(A16F, SEMAPHOREA, OFFSET_UPPER, NvOffset_HI32(gpu_va)), + SEMAPHOREB, HWVALUE(A16F, SEMAPHOREB, OFFSET_LOWER, sem_lo), + SEMAPHOREC, 0xdeadbeef, + SEMAPHORED, HWCONST(A16F, SEMAPHORED, OPERATION, RELEASE) | + HWCONST(A16F, SEMAPHORED, RELEASE_SIZE, 16BYTE)| + HWCONST(A16F, SEMAPHORED, RELEASE_WFI, DIS)); +} + +void uvm_hal_maxwell_host_set_gpfifo_entry(NvU64 *fifo_entry, NvU64 pushbuffer_va, NvU32 pushbuffer_length) +{ + NvU64 fifo_entry_value; + + UVM_ASSERT(!uvm_global_is_suspended()); + UVM_ASSERT_MSG(pushbuffer_va % 4 == 0, "pushbuffer va unaligned: %llu\n", pushbuffer_va); + UVM_ASSERT_MSG(pushbuffer_length % 4 == 0, "pushbuffer length unaligned: %u\n", pushbuffer_length); + + fifo_entry_value = HWVALUE(A16F, GP_ENTRY0, GET, NvU64_LO32(pushbuffer_va) >> 2); + fifo_entry_value |= (NvU64)(HWVALUE(A16F, GP_ENTRY1, GET_HI, NvU64_HI32(pushbuffer_va)) | + HWVALUE(A16F, GP_ENTRY1, LENGTH, pushbuffer_length >> 2) | + HWCONST(A16F, GP_ENTRY1, PRIV, KERNEL)) << 32; + + *fifo_entry = fifo_entry_value; +} + +void uvm_hal_maxwell_host_write_gpu_put(uvm_channel_t *channel, NvU32 gpu_put) +{ + UVM_GPU_WRITE_ONCE(*channel->channel_info.gpPut, gpu_put); +} + +void uvm_hal_maxwell_host_init_noop(uvm_push_t *push) +{ +} + +void uvm_hal_maxwell_replay_faults_unsupported(uvm_push_t *push, uvm_fault_replay_type_t type) +{ + UVM_ASSERT_MSG(false, "host replay_faults called on Maxwell GPU\n"); +} + +void uvm_hal_maxwell_cancel_faults_global_unsupported(uvm_push_t *push, uvm_gpu_phys_address_t instance_ptr) +{ + UVM_ASSERT_MSG(false, "host cancel_faults_global called on Maxwell GPU\n"); +} + +void uvm_hal_maxwell_cancel_faults_targeted_unsupported(uvm_push_t *push, + uvm_gpu_phys_address_t instance_ptr, + NvU32 gpc_id, + NvU32 client_id) +{ + UVM_ASSERT_MSG(false, "host cancel_faults_targeted called on Maxwell GPU\n"); +} + +void uvm_hal_maxwell_cancel_faults_va_unsupported(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + const uvm_fault_buffer_entry_t *fault_entry, + uvm_fault_cancel_va_mode_t cancel_va_mode) +{ + UVM_ASSERT_MSG(false, "host cancel_faults_va called on Maxwell GPU\n"); +} + +void uvm_hal_maxwell_host_clear_faulted_channel_sw_method_unsupported(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry) +{ + UVM_ASSERT_MSG(false, "host clear_faulted_channel_sw_method called on Maxwell GPU\n"); +} + +void uvm_hal_maxwell_host_clear_faulted_channel_method_unsupported(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry) +{ + UVM_ASSERT_MSG(false, "host clear_faulted_channel_method called on Maxwell GPU\n"); +} + +void uvm_hal_maxwell_host_clear_faulted_channel_register_unsupported(uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *buffer_entry) +{ + UVM_ASSERT_MSG(false, "host clear_faulted_channel_register called on Maxwell GPU\n"); +} + +void uvm_hal_maxwell_access_counter_clear_all_unsupported(uvm_push_t *push) +{ + UVM_ASSERT_MSG(false, "host access_counter_clear_all called on Maxwell GPU\n"); +} + +void uvm_hal_maxwell_access_counter_clear_type_unsupported(uvm_push_t *push, uvm_access_counter_type_t type) +{ + UVM_ASSERT_MSG(false, "host access_counter_clear_type called on Maxwell GPU\n"); +} + +void uvm_hal_maxwell_access_counter_clear_targeted_unsupported(uvm_push_t *push, + const uvm_access_counter_buffer_entry_t *buffer_entry) +{ + UVM_ASSERT_MSG(false, "host access_counter_clear_targeted called on Maxwell GPU\n"); +} + +NvU64 uvm_hal_maxwell_get_time(uvm_gpu_t *gpu) +{ + NvU32 time0; + NvU32 time1_first, time1_second; + + // When reading the TIME, TIME_1 should be read first, followed by TIME_0, + // then a second reading of TIME_1 should be done. If the two readings of + // do not match, this process should be repeated. + // + // Doing that will catch the 4-second wrap-around + do { + time1_first = UVM_GPU_READ_ONCE(*gpu->time.time1_register); + rmb(); + time0 = UVM_GPU_READ_ONCE(*gpu->time.time0_register); + rmb(); + time1_second = UVM_GPU_READ_ONCE(*gpu->time.time1_register); + } while (time1_second != time1_first); + + return (((NvU64)time1_first) << 32) + time0; +} diff --git a/kernel-open/nvidia-uvm/uvm_maxwell_mmu.c b/kernel-open/nvidia-uvm/uvm_maxwell_mmu.c new file mode 100644 index 000000000..4f3e05552 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_maxwell_mmu.c @@ -0,0 +1,382 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +// For Maxwell, UVM page tree 'depth' maps to hardware as follows: +// +// 64k Big page size: +// UVM depth HW level VA bits +// 0 PDE 39:26 +// 1 PTE Big / PTE 4k 25:16 / 25:12 +// +// 128k Big page size (same levels, just different VA bits): +// UVM depth HW level VA bits +// 0 PDE 39:27 +// 1 PTE Big / PTE 4k 26:17 / 26:12 + +#include "uvm_types.h" +#include "uvm_forward_decl.h" +#include "uvm_gpu.h" +#include "uvm_mmu.h" +#include "uvm_push_macros.h" +#include "hwref/maxwell/gm107/dev_mmu.h" + +#define MMU_BIG 0 +#define MMU_SMALL 1 + +static NvU32 entries_per_index_maxwell(NvU32 depth) +{ + UVM_ASSERT(depth < 2); + if (depth == 0) + return 2; + return 1; +} + +static NvLength entry_offset_maxwell(NvU32 depth, NvU32 page_size) +{ + UVM_ASSERT(depth < 2); + if (page_size == UVM_PAGE_SIZE_4K && depth == 0) + return MMU_SMALL; + return MMU_BIG; +} + +static NvU64 big_half_pde_maxwell(uvm_mmu_page_table_alloc_t *phys_alloc) +{ + NvU64 pde_bits = 0; + if (phys_alloc != NULL) { + NvU64 address = phys_alloc->addr.address >> NV_MMU_PDE_ADDRESS_SHIFT; + pde_bits |= HWCONST64(_MMU, PDE, VOL_BIG, TRUE); + switch (phys_alloc->addr.aperture) { + case UVM_APERTURE_SYS: + pde_bits |= HWCONST64(_MMU, PDE, APERTURE_BIG, SYSTEM_COHERENT_MEMORY) | + HWVALUE64(_MMU, PDE, ADDRESS_BIG_SYS, address); + break; + case UVM_APERTURE_VID: + pde_bits |= HWCONST64(_MMU, PDE, APERTURE_BIG, VIDEO_MEMORY) | + HWVALUE64(_MMU, PDE, ADDRESS_BIG_VID, phys_alloc->addr.address >> NV_MMU_PDE_ADDRESS_SHIFT); + break; + default: + UVM_ASSERT_MSG(0, "Invalid big aperture: %d\n", phys_alloc->addr.aperture); + break; + } + } + return pde_bits; +} + +static NvU64 small_half_pde_maxwell(uvm_mmu_page_table_alloc_t *phys_alloc) +{ + NvU64 pde_bits = 0; + if (phys_alloc != NULL) { + NvU64 address = phys_alloc->addr.address >> NV_MMU_PDE_ADDRESS_SHIFT; + pde_bits |= HWCONST64(_MMU, PDE, VOL_SMALL, TRUE); + switch (phys_alloc->addr.aperture) { + case UVM_APERTURE_SYS: + pde_bits |= HWCONST64(_MMU, PDE, APERTURE_SMALL, SYSTEM_COHERENT_MEMORY) | + HWVALUE64(_MMU, PDE, ADDRESS_SMALL_SYS, address); + break; + case UVM_APERTURE_VID: + pde_bits |= HWCONST64(_MMU, PDE, APERTURE_SMALL, VIDEO_MEMORY) | + HWVALUE64(_MMU, PDE, ADDRESS_SMALL_VID, address); + break; + default: + UVM_ASSERT_MSG(0, "Invalid small aperture: %d\n", phys_alloc->addr.aperture); + break; + } + } + return pde_bits; +} + +static void make_pde_maxwell(void *entry, uvm_mmu_page_table_alloc_t **phys_allocs, NvU32 depth) +{ + NvU64 pde_bits = 0; + UVM_ASSERT(depth == 0); + pde_bits |= HWCONST64(_MMU, PDE, SIZE, FULL); + pde_bits |= big_half_pde_maxwell(phys_allocs[MMU_BIG]) | small_half_pde_maxwell(phys_allocs[MMU_SMALL]); + + *(NvU64 *)entry = pde_bits; +} + +static NvLength entry_size_maxwell(NvU32 depth) +{ + UVM_ASSERT(depth < 2); + return 8; +} + +static NvU32 index_bits_maxwell_64(NvU32 depth, NvU32 page_size) +{ + UVM_ASSERT(depth < 2); + UVM_ASSERT(page_size == UVM_PAGE_SIZE_4K || + page_size == UVM_PAGE_SIZE_64K || + (depth == 0 && page_size == UVM_PAGE_SIZE_AGNOSTIC)); + + if (depth == 0) { + return 14; + } + else { + if (page_size == UVM_PAGE_SIZE_4K) + return 14; + else + return 10; + } +} + +static NvU32 index_bits_maxwell_128(NvU32 depth, NvU32 page_size) +{ + UVM_ASSERT(depth < 2); + UVM_ASSERT(page_size == UVM_PAGE_SIZE_4K || + page_size == UVM_PAGE_SIZE_128K || + (depth == 0 && page_size == UVM_PAGE_SIZE_AGNOSTIC)); + + if (depth == 0) { + return 13; + } + else { + if (page_size == UVM_PAGE_SIZE_4K) + return 15; + else + return 10; + } +} + +static NvU32 num_va_bits_maxwell(void) +{ + return 40; +} + +static NvLength allocation_size_maxwell_64(NvU32 depth, NvU32 page_size) +{ + return entry_size_maxwell(depth) << index_bits_maxwell_64(depth, page_size); +} + +static NvLength allocation_size_maxwell_128(NvU32 depth, NvU32 page_size) +{ + return entry_size_maxwell(depth) << index_bits_maxwell_128(depth, page_size); +} + +static NvU32 page_table_depth_maxwell(NvU32 page_size) +{ + return 1; +} + +static NvU32 page_sizes_maxwell_128(void) +{ + return UVM_PAGE_SIZE_128K | UVM_PAGE_SIZE_4K; +} + +static NvU32 page_sizes_maxwell_64(void) +{ + return UVM_PAGE_SIZE_64K | UVM_PAGE_SIZE_4K; +} + +static NvU64 unmapped_pte_maxwell(NvU32 page_size) +{ + // Setting the privilege bit on an otherwise-zeroed big PTE causes the + // corresponding 4k PTEs to be ignored. This allows the invalidation of a + // mixed PDE range to be much faster. + if (page_size == UVM_PAGE_SIZE_4K) + return 0; + + // When VALID == 0, MMU still reads the VOL and PRIV fields. VOL == 1 + // indicates that the PTE is sparse, so make sure we don't use it. + return HWCONST64(_MMU, PTE, VALID, FALSE) | + HWCONST64(_MMU, PTE, PRIVILEGE, TRUE) | + HWCONST64(_MMU, PTE, VOL, FALSE); +} + +static NvU64 make_pte_maxwell(uvm_aperture_t aperture, NvU64 address, uvm_prot_t prot, NvU64 flags) +{ + NvU64 pte_bits = 0; + NvU8 aperture_bits = 0; + + UVM_ASSERT(prot != UVM_PROT_NONE); + UVM_ASSERT((flags & ~UVM_MMU_PTE_FLAGS_MASK) == 0); + + // valid 0:0 + pte_bits |= HWCONST64(_MMU, PTE, VALID, TRUE); + + // privilege 1:1 + pte_bits |= HWCONST64(_MMU, PTE, PRIVILEGE, FALSE); + + // read-only 2:2 (used by everything except L1 and GCC) + if (prot == UVM_PROT_READ_ONLY) + pte_bits |= HWCONST64(_MMU, PTE, READ_ONLY, TRUE); + else + pte_bits |= HWCONST64(_MMU, PTE, READ_ONLY, FALSE); + + // encrypted 3:3 + pte_bits |= HWCONST64(_MMU, PTE, ENCRYPTED, FALSE); + + address >>= NV_MMU_PTE_ADDRESS_SHIFT; + if (aperture == UVM_APERTURE_SYS) { + // sys address 31:4 + pte_bits |= HWVALUE64(_MMU, PTE, ADDRESS_SYS, address); + } + else { + // vid address 28:4 + pte_bits |= HWVALUE64(_MMU, PTE, ADDRESS_VID, address); + + // peer 29:31 + if (aperture != UVM_APERTURE_VID) + pte_bits |= HWVALUE64(_MMU, PTE, ADDRESS_VID_PEER, UVM_APERTURE_PEER_ID(aperture)); + } + + // volatile 32:32 + if (flags & UVM_MMU_PTE_FLAGS_CACHED) + pte_bits |= HWCONST64(_MMU, PTE, VOL, FALSE); + else + pte_bits |= HWCONST64(_MMU, PTE, VOL, TRUE); + + // aperture 34:32 + if (aperture == UVM_APERTURE_SYS) + aperture_bits = NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY; + else if (aperture == UVM_APERTURE_VID) + aperture_bits = NV_MMU_PTE_APERTURE_VIDEO_MEMORY; + else if (aperture >= UVM_APERTURE_PEER_0 && aperture <= UVM_APERTURE_PEER_7) + aperture_bits = NV_MMU_PTE_APERTURE_PEER_MEMORY; + else + UVM_ASSERT_MSG(NV_FALSE, "Invalid location: %d", aperture); + + pte_bits |= HWVALUE64(_MMU, PTE, APERTURE, aperture_bits); + + // lock 35:35 + pte_bits |= HWCONST64(_MMU, PTE, LOCK, FALSE); + + // kind 43:36 + pte_bits |= HWCONST64(_MMU, PTE, KIND, PITCH); + + // comptagline 61:44 + pte_bits |= HWVALUE64(_MMU, PTE, COMPTAGLINE, 0); + + // read disable 62:62 (used only by L1 and GCC) + pte_bits |= HWCONST64(_MMU, PTE, READ_DISABLE, FALSE); + + // write disable 63:63 (used only by L1 and GCC: everything else uses READ_ONLY) + if (prot == UVM_PROT_READ_ONLY) + pte_bits |= HWCONST64(_MMU, PTE, WRITE_DISABLE, TRUE); + else + pte_bits |= HWCONST64(_MMU, PTE, WRITE_DISABLE, FALSE); + + return pte_bits; +} + +static NvU64 make_sked_reflected_pte_maxwell(void) +{ + NvU64 pte_bits = 0; + + pte_bits |= HWCONST64(_MMU, PTE, VALID, TRUE); + pte_bits |= HWCONST64(_MMU, PTE, KIND, SMSKED_MESSAGE); + + return pte_bits; +} + +static NvU64 poisoned_pte_maxwell(void) +{ + // An invalid PTE is also fatal on Maxwell, but a PRIV violation will + // immediately identify bad PTE usage. + + // Engines with priv accesses won't fault on the priv PTE, so add a backup + // mechanism using an impossible memory address. This will trigger an + // interrupt starting with GM20x. On earlier GPUs the upper bits will + // silently be dropped. + // + // This address has to fit within 37 bits (max address width of vidmem) and + // be aligned to page_size. + NvU64 phys_addr = 0x1bad000000ULL; + NvU64 pte_bits = make_pte_maxwell(UVM_APERTURE_VID, phys_addr, UVM_PROT_READ_ONLY, UVM_MMU_PTE_FLAGS_NONE); + + return WRITE_HWCONST64(pte_bits, _MMU, PTE, PRIVILEGE, TRUE); +} + +// Sparse mappings are not supported. +static NvU64 make_sparse_pte_maxwell_unsupported(void) +{ + UVM_ASSERT_MSG(0, "Sparse mappings unsupported on pre-Pascal GPUs\n"); + return poisoned_pte_maxwell(); +} + +static uvm_mmu_mode_hal_t maxwell_64_mmu_mode_hal = +{ + .make_pte = make_pte_maxwell, + .make_sked_reflected_pte = make_sked_reflected_pte_maxwell, + .make_sparse_pte = make_sparse_pte_maxwell_unsupported, + .unmapped_pte = unmapped_pte_maxwell, + .poisoned_pte = poisoned_pte_maxwell, + .make_pde = make_pde_maxwell, + .entry_size = entry_size_maxwell, + .index_bits = index_bits_maxwell_64, + .entries_per_index = entries_per_index_maxwell, + .entry_offset = entry_offset_maxwell, + .num_va_bits = num_va_bits_maxwell, + .allocation_size = allocation_size_maxwell_64, + .page_table_depth = page_table_depth_maxwell, + .page_sizes = page_sizes_maxwell_64 +}; + +static uvm_mmu_mode_hal_t maxwell_128_mmu_mode_hal = +{ + .make_pte = make_pte_maxwell, + .make_sked_reflected_pte = make_sked_reflected_pte_maxwell, + .make_sparse_pte = make_sparse_pte_maxwell_unsupported, + .unmapped_pte = unmapped_pte_maxwell, + .poisoned_pte = poisoned_pte_maxwell, + .make_pde = make_pde_maxwell, + .entry_size = entry_size_maxwell, + .index_bits = index_bits_maxwell_128, + .entries_per_index = entries_per_index_maxwell, + .entry_offset = entry_offset_maxwell, + .num_va_bits = num_va_bits_maxwell, + .allocation_size = allocation_size_maxwell_128, + .page_table_depth = page_table_depth_maxwell, + .page_sizes = page_sizes_maxwell_128 +}; + +uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_maxwell(NvU32 big_page_size) +{ + UVM_ASSERT(big_page_size == UVM_PAGE_SIZE_64K || big_page_size == UVM_PAGE_SIZE_128K); + if (big_page_size == UVM_PAGE_SIZE_64K) + return &maxwell_64_mmu_mode_hal; + + return &maxwell_128_mmu_mode_hal; +} + +void uvm_hal_maxwell_mmu_enable_prefetch_faults_unsupported(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT_MSG(false, "mmu enable_prefetch_faults called on Maxwell GPU\n"); +} + +void uvm_hal_maxwell_mmu_disable_prefetch_faults_unsupported(uvm_parent_gpu_t *parent_gpu) +{ + UVM_ASSERT_MSG(false, "mmu disable_prefetch_faults called on Maxwell GPU\n"); +} + +uvm_mmu_engine_type_t uvm_hal_maxwell_mmu_engine_id_to_type_unsupported(NvU16 mmu_engine_id) +{ + UVM_ASSERT(0); + return UVM_MMU_ENGINE_TYPE_COUNT; +} + +NvU16 uvm_hal_maxwell_mmu_client_id_to_utlb_id_unsupported(NvU16 client_id) +{ + UVM_ASSERT(0); + return 0; +} diff --git a/kernel-open/nvidia-uvm/uvm_mem.c b/kernel-open/nvidia-uvm/uvm_mem.c new file mode 100644 index 000000000..460f786da --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_mem.c @@ -0,0 +1,1349 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_mem.h" +#include "uvm_mmu.h" +#include "uvm_processors.h" +#include "uvm_va_space.h" +#include "uvm_gpu.h" +#include "uvm_global.h" +#include "uvm_kvmalloc.h" +#include "uvm_push.h" +#include "uvm_range_allocator.h" +#include "uvm_hal.h" +#include "uvm_linux.h" + +static uvm_range_allocator_t g_free_ranges; +static bool g_mem_initialized; + +NV_STATUS uvm_mem_global_init(void) +{ + NV_STATUS status = uvm_range_allocator_init(UVM_MEM_VA_SIZE, &g_free_ranges); + if (status != NV_OK) + return status; + + g_mem_initialized = true; + + return NV_OK; +} + +void uvm_mem_global_exit(void) +{ + if (!g_mem_initialized) + return; + + uvm_range_allocator_deinit(&g_free_ranges); +} + +static bool vidmem_can_be_mapped(uvm_mem_t *vidmem, bool is_user_space) +{ + UVM_ASSERT(uvm_mem_is_vidmem(vidmem)); + + // Mapping a vidmem allocation on a user VA space is currently unsupported, + // because there is no use case. + if (is_user_space) + return false; + + return true; +} + +static bool sysmem_can_be_mapped(uvm_mem_t *sysmem) +{ + UVM_ASSERT(uvm_mem_is_sysmem(sysmem)); + + + + + + + + return true; +} + +static bool mem_can_be_mapped_on_cpu(uvm_mem_t *mem, bool is_user_space) +{ + if (uvm_mem_is_sysmem(mem)) + return sysmem_can_be_mapped(mem); + + if (!vidmem_can_be_mapped(mem, is_user_space)) + return false; + + return mem->backing_gpu->parent->numa_info.enabled && PAGE_ALIGNED(mem->chunk_size); +} + +static bool mem_can_be_mapped_on_cpu_kernel(uvm_mem_t *mem) +{ + return mem_can_be_mapped_on_cpu(mem, false); +} + +static bool mem_can_be_mapped_on_cpu_user(uvm_mem_t *mem) +{ + return mem_can_be_mapped_on_cpu(mem, true); +} + +static bool mem_can_be_mapped_on_gpu(uvm_mem_t *mem, uvm_gpu_t *gpu, bool is_user_space) +{ + if (uvm_mem_is_sysmem(mem)) + return sysmem_can_be_mapped(mem); + + if (!vidmem_can_be_mapped(mem, is_user_space)) + return false; + + return uvm_mem_is_local_vidmem(mem, gpu); +} + +static bool mem_can_be_mapped_on_gpu_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + return mem_can_be_mapped_on_gpu(mem, gpu, false); +} + +static bool mem_can_be_mapped_on_gpu_user(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + return mem_can_be_mapped_on_gpu(mem, gpu, true); +} + +bool uvm_mem_mapped_on_gpu_user(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + if (mem->user == NULL) + return false; + + return uvm_global_processor_mask_test(&mem->user->mapped_on, gpu->global_id); +} + +bool uvm_mem_mapped_on_gpu_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + return uvm_global_processor_mask_test(&mem->kernel.mapped_on, gpu->global_id); +} + +bool uvm_mem_mapped_on_cpu_user(uvm_mem_t *mem) +{ + if (mem->user == NULL) + return false; + + return uvm_global_processor_mask_test(&mem->user->mapped_on, UVM_GLOBAL_ID_CPU); +} + +bool uvm_mem_mapped_on_cpu_kernel(uvm_mem_t *mem) +{ + return uvm_global_processor_mask_test(&mem->kernel.mapped_on, UVM_GLOBAL_ID_CPU); +} + +static void mem_set_mapped_on_gpu_user(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(mem->user != NULL); + UVM_ASSERT(mem_can_be_mapped_on_gpu_user(mem, gpu)); + UVM_ASSERT(!uvm_mem_mapped_on_gpu_user(mem, gpu)); + + uvm_global_processor_mask_set(&mem->user->mapped_on, gpu->global_id); +} + +static void mem_set_mapped_on_gpu_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(mem_can_be_mapped_on_gpu_kernel(mem, gpu)); + UVM_ASSERT(!uvm_mem_mapped_on_gpu_kernel(mem, gpu)); + + uvm_global_processor_mask_set(&mem->kernel.mapped_on, gpu->global_id); +} + +static void mem_set_mapped_on_cpu_user(uvm_mem_t *mem) +{ + UVM_ASSERT(mem->user != NULL); + UVM_ASSERT(mem_can_be_mapped_on_cpu_user(mem)); + UVM_ASSERT(!uvm_mem_mapped_on_cpu_user(mem)); + + uvm_global_processor_mask_set(&mem->user->mapped_on, UVM_GLOBAL_ID_CPU); +} + +static void mem_set_mapped_on_cpu_kernel(uvm_mem_t *mem) +{ + UVM_ASSERT(mem_can_be_mapped_on_cpu_kernel(mem)); + UVM_ASSERT(!uvm_mem_mapped_on_cpu_kernel(mem)); + + uvm_global_processor_mask_set(&mem->kernel.mapped_on, UVM_GLOBAL_ID_CPU); +} + +static void mem_clear_mapped_on_gpu_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + uvm_global_processor_mask_clear(&mem->kernel.mapped_on, gpu->global_id); +} + +static void mem_clear_mapped_on_gpu_user(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(mem->user != NULL); + + uvm_global_processor_mask_clear(&mem->user->mapped_on, gpu->global_id); +} + +static void mem_clear_mapped_on_cpu_user(uvm_mem_t *mem) +{ + UVM_ASSERT(mem->user != NULL); + + uvm_global_processor_mask_clear(&mem->user->mapped_on, UVM_GLOBAL_ID_CPU); +} + +static void mem_clear_mapped_on_cpu_kernel(uvm_mem_t *mem) +{ + uvm_global_processor_mask_clear(&mem->kernel.mapped_on, UVM_GLOBAL_ID_CPU); +} + +static bool sysmem_mapped_on_gpu_phys(uvm_mem_t *sysmem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(uvm_mem_is_sysmem(sysmem)); + + return uvm_global_processor_mask_test(&sysmem->sysmem.mapped_on_phys, gpu->global_id); +} + +static void sysmem_set_mapped_on_gpu_phys(uvm_mem_t *sysmem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(uvm_mem_is_sysmem(sysmem)); + UVM_ASSERT(!sysmem_mapped_on_gpu_phys(sysmem, gpu)); + + uvm_global_processor_mask_set(&sysmem->sysmem.mapped_on_phys, gpu->global_id); +} + +static void sysmem_clear_mapped_on_gpu_phys(uvm_mem_t *sysmem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(uvm_mem_is_sysmem(sysmem)); + + uvm_global_processor_mask_clear(&sysmem->sysmem.mapped_on_phys, gpu->global_id); +} + +NV_STATUS uvm_mem_translate_gpu_attributes(const UvmGpuMappingAttributes *attrs, + uvm_va_space_t *va_space, + uvm_gpu_t **gpu_out, + uvm_mem_gpu_mapping_attrs_t *attrs_out) +{ + uvm_gpu_t *gpu; + + switch (attrs->gpuMappingType) { + case UvmGpuMappingTypeDefault: + break; + case UvmGpuMappingTypeReadWriteAtomic: + attrs_out->protection = UVM_PROT_READ_WRITE_ATOMIC; + break; + case UvmGpuMappingTypeReadWrite: + attrs_out->protection = UVM_PROT_READ_WRITE; + break; + case UvmGpuMappingTypeReadOnly: + attrs_out->protection = UVM_PROT_READ_ONLY; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + switch (attrs->gpuCachingType) { + case UvmGpuCachingTypeDefault: + break; + case UvmGpuCachingTypeForceUncached: + attrs_out->is_cacheable = false; + break; + case UvmGpuCachingTypeForceCached: + attrs_out->is_cacheable = true; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, &attrs->gpuUuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + if (gpu_out) + *gpu_out = gpu; + + return NV_OK; +} + +static struct page *uvm_virt_to_page(const void *addr) +{ + if (virt_addr_valid(addr)) + return virt_to_page(addr); + + if (is_vmalloc_addr(addr)) + return vmalloc_to_page(addr); + + return NULL; +} + +uvm_chunk_sizes_mask_t uvm_mem_kernel_chunk_sizes(uvm_gpu_t *gpu) +{ + // Get the mmu mode hal directly as the internal address space tree has not + // been created yet. + uvm_mmu_mode_hal_t *hal = gpu->parent->arch_hal->mmu_mode_hal(gpu->big_page.internal_size); + NvU32 page_sizes = hal->page_sizes(); + + return (uvm_chunk_sizes_mask_t)(page_sizes & UVM_CHUNK_SIZES_MASK); +} + +static NvU32 mem_pick_chunk_size(uvm_mem_t *mem) +{ + NvU32 biggest_page_size; + NvU32 chunk_size; + + if (uvm_mem_is_sysmem(mem)) + return PAGE_SIZE; + + biggest_page_size = uvm_mmu_biggest_page_size_up_to(&mem->backing_gpu->address_space_tree, UVM_CHUNK_SIZE_MAX); + + if (mem->size < mem->backing_gpu->big_page.internal_size) + chunk_size = UVM_PAGE_SIZE_4K; + else if (mem->size < biggest_page_size) + chunk_size = mem->backing_gpu->big_page.internal_size; + else + chunk_size = biggest_page_size; + + // When UVM_PAGE_SIZE_DEFAULT is used on NUMA-enabled GPUs, we force + // chunk_size to be PAGE_SIZE at least, to allow CPU mappings. + if (mem->backing_gpu->parent->numa_info.enabled) + chunk_size = max(chunk_size, (NvU32)PAGE_SIZE); + + return chunk_size; +} + +static NvU32 mem_pick_gpu_page_size(uvm_mem_t *mem, uvm_gpu_t *gpu, uvm_page_tree_t *gpu_page_tree) +{ + if (uvm_mem_is_vidmem(mem)) { + // For vidmem allocations the chunk size is picked out of the supported + // page sizes and can be used directly. + return mem->chunk_size; + } + + // For sysmem, check whether the GPU supports mapping it with large pages. + if (gpu->parent->can_map_sysmem_with_large_pages) { + // If it's supported, pick the largest page size not bigger than + // the chunk size. + return uvm_mmu_biggest_page_size_up_to(gpu_page_tree, mem->chunk_size); + } + + // Otherwise just use 4K. + return UVM_PAGE_SIZE_4K; +} + +static void mem_free_vidmem_chunks(uvm_mem_t *mem) +{ + size_t i; + + UVM_ASSERT(uvm_mem_is_vidmem(mem)); + + if (!mem->vidmem.chunks) + return; + + for (i = 0; i < mem->chunks_count; ++i) { + // On allocation error PMM guarantees the chunks array to be zeroed so + // just check for NULL. + if (mem->vidmem.chunks[i] == NULL) + break; + uvm_pmm_gpu_free(&mem->backing_gpu->pmm, mem->vidmem.chunks[i], NULL); + } + + uvm_kvfree(mem->vidmem.chunks); + mem->vidmem.chunks = NULL; +} + +static void mem_free_sysmem_dma_chunks(uvm_mem_t *mem) +{ + size_t i; + NvU32 gpu_index; + + UVM_ASSERT(uvm_mem_is_sysmem_dma(mem)); + gpu_index = uvm_global_id_gpu_index(mem->dma_owner->global_id); + + if (!mem->sysmem.pages || !mem->sysmem.va) + goto end; + + for (i = 0; i < mem->chunks_count; ++i) { + if (!mem->sysmem.va[i]) + break; + + uvm_gpu_dma_free_page(mem->dma_owner->parent, + mem->sysmem.va[i], + mem->sysmem.dma_addrs[gpu_index][i]); + } + +end: + sysmem_clear_mapped_on_gpu_phys(mem, mem->dma_owner); + + uvm_kvfree(mem->sysmem.dma_addrs[gpu_index]); + mem->sysmem.dma_addrs[gpu_index] = NULL; + + uvm_kvfree(mem->sysmem.pages); + mem->sysmem.pages = NULL; + + uvm_kvfree(mem->sysmem.va); + mem->sysmem.va = NULL; +} + +static void mem_free_sysmem_chunks(uvm_mem_t *mem) +{ + size_t i; + + UVM_ASSERT(uvm_mem_is_sysmem(mem)); + + if (!mem->sysmem.pages) + return; + + for (i = 0; i < mem->chunks_count; ++i) { + if (!mem->sysmem.pages[i]) + break; + __free_pages(mem->sysmem.pages[i], get_order(mem->chunk_size)); + } + + uvm_kvfree(mem->sysmem.pages); + mem->sysmem.pages = NULL; +} + +static void mem_free_chunks(uvm_mem_t *mem) +{ + if (uvm_mem_is_vidmem(mem)) + mem_free_vidmem_chunks(mem); + else if (uvm_mem_is_sysmem_dma(mem)) + mem_free_sysmem_dma_chunks(mem); + else + mem_free_sysmem_chunks(mem); +} + +static NV_STATUS mem_alloc_dma_addrs(uvm_mem_t *mem, const uvm_gpu_t *gpu) +{ + NvU64 *dma_addrs = NULL; + NvU32 gpu_index = uvm_global_id_gpu_index(gpu->global_id); + + dma_addrs = uvm_kvmalloc_zero(sizeof(*dma_addrs) * mem->chunks_count); + if (!dma_addrs) + return NV_ERR_NO_MEMORY; + + mem->sysmem.dma_addrs[gpu_index] = dma_addrs; + + return NV_OK; +} + +static gfp_t sysmem_allocation_gfp_flags(int order, bool zero) +{ + gfp_t gfp_flags = NV_UVM_GFP_FLAGS; + + if (zero) + gfp_flags |= __GFP_ZERO; + + // High-order page allocations require the __GFP_COMP flag to work with + // vm_insert_page. + if (order > 0) + gfp_flags |= __GFP_COMP; + + return gfp_flags; +} + + + + + + +// There is a tighter coupling between allocation and mapping because of the +// allocator UVM must use. Hence, this function does the equivalent of +// uvm_mem_map_gpu_phys(). +// +// In case of failure, the caller is required to handle cleanup by calling +// uvm_mem_free +static NV_STATUS mem_alloc_sysmem_dma_chunks(uvm_mem_t *mem, struct mm_struct *mm, gfp_t gfp_flags) +{ + size_t i; + NV_STATUS status; + NvU64 *dma_addrs; + + UVM_ASSERT_MSG(mem->chunk_size == PAGE_SIZE, + "mem->chunk_size is 0x%x. PAGE_SIZE is only supported.", + mem->chunk_size); + UVM_ASSERT(uvm_mem_is_sysmem_dma(mem)); + + mem->sysmem.pages = uvm_kvmalloc_zero(sizeof(*mem->sysmem.pages) * mem->chunks_count); + mem->sysmem.va = uvm_kvmalloc_zero(sizeof(*mem->sysmem.va) * mem->chunks_count); + if (!mem->sysmem.pages || !mem->sysmem.va) + goto err_no_mem; + + status = mem_alloc_dma_addrs(mem, mem->dma_owner); + if (status != NV_OK) + goto error; + + dma_addrs = mem->sysmem.dma_addrs[uvm_global_id_gpu_index(mem->dma_owner->global_id)]; + + for (i = 0; i < mem->chunks_count; ++i) { + mem->sysmem.va[i] = uvm_gpu_dma_alloc_page(mem->dma_owner->parent, gfp_flags, &dma_addrs[i]); + if (!mem->sysmem.va[i]) + goto err_no_mem; + + mem->sysmem.pages[i] = uvm_virt_to_page(mem->sysmem.va[i]); + if (!mem->sysmem.pages[i]) + goto err_no_mem; + } + + sysmem_set_mapped_on_gpu_phys(mem, mem->dma_owner); + + return NV_OK; + +err_no_mem: + status = NV_ERR_NO_MEMORY; +error: + mem_free_sysmem_dma_chunks(mem); + return status; +} + +// In case of failure, the caller is required to handle cleanup by calling +// uvm_mem_free +static NV_STATUS mem_alloc_sysmem_chunks(uvm_mem_t *mem, struct mm_struct *mm, gfp_t gfp_flags) +{ + size_t i; + int order; + + UVM_ASSERT(uvm_mem_is_sysmem(mem) && !uvm_mem_is_sysmem_dma(mem)); + + mem->sysmem.pages = uvm_kvmalloc_zero(sizeof(*mem->sysmem.pages) * mem->chunks_count); + if (!mem->sysmem.pages) + return NV_ERR_NO_MEMORY; + + order = get_order(mem->chunk_size); + for (i = 0; i < mem->chunks_count; ++i) { + mem->sysmem.pages[i] = alloc_pages(gfp_flags, order); + if (!mem->sysmem.pages[i]) + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + +// In case of failure, the caller is required to handle cleanup by calling +// uvm_mem_free +static NV_STATUS mem_alloc_vidmem_chunks(uvm_mem_t *mem, bool zero) +{ + NV_STATUS status; + + UVM_ASSERT(uvm_mem_is_vidmem(mem)); + + // TODO: Bug 2446832: A non-zeroing request may not be obeyed because PMM + // does not support explicit allocation of non-zeroed (or zeroed) chunks. + // + // The zeroing case can be implemented even without resolving that bug, by + // clearing the chunks after PMM allocation. But this functionality has not + // been implemented, because the only expected use case is a memory that + // gets mapped on user space, and vidmem never is. + UVM_ASSERT(!zero); + + mem->vidmem.chunks = uvm_kvmalloc_zero(mem->chunks_count * sizeof(*mem->vidmem.chunks)); + if (!mem->vidmem.chunks) + return NV_ERR_NO_MEMORY; + + status = uvm_pmm_gpu_alloc_kernel(&mem->backing_gpu->pmm, + mem->chunks_count, + mem->chunk_size, + UVM_PMM_ALLOC_FLAGS_NONE, + mem->vidmem.chunks, + NULL); + if (status != NV_OK) { + UVM_ERR_PRINT("pmm_gpu_alloc(count=%zd, size=0x%x) failed: %s\n", + mem->chunks_count, + mem->chunk_size, + nvstatusToString(status)); + return status; + } + + return NV_OK; +} + +static NV_STATUS mem_alloc_chunks(uvm_mem_t *mem, struct mm_struct *mm, bool zero) +{ + if (uvm_mem_is_sysmem(mem)) { + gfp_t gfp_flags; + uvm_memcg_context_t memcg_context; + NV_STATUS status; + + UVM_ASSERT(PAGE_ALIGNED(mem->chunk_size)); + gfp_flags = sysmem_allocation_gfp_flags(get_order(mem->chunk_size), zero); + if (UVM_CGROUP_ACCOUNTING_SUPPORTED() && mm) + gfp_flags |= NV_UVM_GFP_FLAGS_ACCOUNT; + + uvm_memcg_context_start(&memcg_context, mm); + if (uvm_mem_is_sysmem_dma(mem)) + status = mem_alloc_sysmem_dma_chunks(mem, mm, gfp_flags); + else + status = mem_alloc_sysmem_chunks(mem, mm, gfp_flags); + + uvm_memcg_context_end(&memcg_context); + return status; + } + + return mem_alloc_vidmem_chunks(mem, zero); +} + +static const char *mem_physical_source(uvm_mem_t *mem) +{ + if (uvm_mem_is_vidmem(mem)) + return uvm_gpu_name(mem->backing_gpu); + + return "CPU"; +} + +NV_STATUS uvm_mem_map_kernel(uvm_mem_t *mem, const uvm_global_processor_mask_t *mask) +{ + uvm_gpu_t *gpu; + NV_STATUS status; + + if (!mask) + return NV_OK; + + if (uvm_global_processor_mask_test(mask, UVM_GLOBAL_ID_CPU)) { + status = uvm_mem_map_cpu_kernel(mem); + if (status != NV_OK) + return status; + } + + for_each_global_gpu_in_mask(gpu, mask) { + status = uvm_mem_map_gpu_kernel(mem, gpu); + if (status != NV_OK) + return status; + } + return NV_OK; +} + +NV_STATUS uvm_mem_alloc(const uvm_mem_alloc_params_t *params, uvm_mem_t **mem_out) +{ + NV_STATUS status; + uvm_mem_t *mem = NULL; + + UVM_ASSERT(params->size > 0); + + mem = uvm_kvmalloc_zero(sizeof(*mem)); + if (mem == NULL) + return NV_ERR_NO_MEMORY; + + mem->backing_gpu = params->backing_gpu; + mem->dma_owner = params->dma_owner; + UVM_ASSERT(!mem->dma_owner || !mem->backing_gpu); + + mem->size = params->size; + mem->chunk_size = params->page_size; + if (mem->chunk_size == UVM_PAGE_SIZE_DEFAULT) + mem->chunk_size = mem_pick_chunk_size(mem); + + UVM_ASSERT(mem->chunk_size > 0); + + mem->physical_allocation_size = UVM_ALIGN_UP(mem->size, mem->chunk_size); + mem->chunks_count = mem->physical_allocation_size / mem->chunk_size; + + status = mem_alloc_chunks(mem, params->mm, params->zero); + if (status != NV_OK) + goto error; + + *mem_out = mem; + return NV_OK; + +error: + uvm_mem_free(mem); + return status; +} + +static NV_STATUS mem_init_user_mapping(uvm_mem_t *mem, uvm_va_space_t *user_va_space, void *user_addr) +{ + UVM_ASSERT(user_va_space); + UVM_ASSERT(user_addr); + + // If the user structure exists, the VA space and address should match + if (mem->user != NULL) { + UVM_ASSERT(mem->user->va_space == user_va_space); + UVM_ASSERT(mem->user->addr == user_addr); + return NV_OK; + } + + UVM_ASSERT(IS_ALIGNED((NvU64)user_addr, mem->chunk_size)); + UVM_ASSERT(mem->physical_allocation_size == mem->size); + + mem->user = uvm_kvmalloc_zero(sizeof(*mem->user)); + if (mem->user == NULL) + return NV_ERR_NO_MEMORY; + + mem->user->va_space = user_va_space; + mem->user->addr = user_addr; + + return NV_OK; +} + +static void mem_deinit_user_mapping(uvm_mem_t *mem) +{ + if (mem->user == NULL) + return; + + if (!uvm_global_processor_mask_empty(&mem->user->mapped_on)) + return; + + uvm_kvfree(mem->user); + mem->user = NULL; +} + +static NvU64 reserved_gpu_va(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(mem->kernel.range_alloc.aligned_start + mem->physical_allocation_size < gpu->parent->uvm_mem_va_size); + + return gpu->parent->uvm_mem_va_base + mem->kernel.range_alloc.aligned_start; +} + +static struct page *mem_cpu_page(uvm_mem_t *mem, NvU64 offset) +{ + struct page *base_page = mem->sysmem.pages[offset / mem->chunk_size]; + + UVM_ASSERT_MSG(PAGE_ALIGNED(offset), "offset 0x%llx\n", offset); + + offset = offset % mem->chunk_size; + return pfn_to_page(page_to_pfn(base_page) + offset / PAGE_SIZE); +} + +static NV_STATUS mem_map_cpu_to_sysmem_kernel(uvm_mem_t *mem) +{ + struct page **pages = mem->sysmem.pages; + size_t num_pages = mem->physical_allocation_size / PAGE_SIZE; + pgprot_t prot = PAGE_KERNEL; + + UVM_ASSERT(uvm_mem_is_sysmem(mem)); + + // If chunk size is different than PAGE_SIZE then create a temporary array + // of all the pages to map so that vmap() can be used. + if (mem->chunk_size != PAGE_SIZE) { + size_t page_index; + pages = uvm_kvmalloc(sizeof(*pages) * num_pages); + if (!pages) + return NV_ERR_NO_MEMORY; + for (page_index = 0; page_index < num_pages; ++page_index) + pages[page_index] = mem_cpu_page(mem, page_index * PAGE_SIZE); + } + + + + + + + mem->kernel.cpu_addr = vmap(pages, num_pages, VM_MAP, prot); + + if (mem->chunk_size != PAGE_SIZE) + uvm_kvfree(pages); + + if (!mem->kernel.cpu_addr) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +static NV_STATUS mem_map_cpu_to_vidmem_kernel(uvm_mem_t *mem) +{ + struct page **pages; + size_t num_chunk_pages = mem->chunk_size / PAGE_SIZE; + size_t num_pages = mem->physical_allocation_size / PAGE_SIZE; + size_t page_index; + size_t chunk_index; + + UVM_ASSERT(uvm_mem_is_vidmem(mem)); + + pages = uvm_kvmalloc(sizeof(*pages) * num_pages); + if (!pages) + return NV_ERR_NO_MEMORY; + + page_index = 0; + + for (chunk_index = 0; chunk_index < mem->chunks_count; ++chunk_index) { + uvm_gpu_chunk_t *chunk = mem->vidmem.chunks[chunk_index]; + struct page *page = uvm_gpu_chunk_to_page(&mem->backing_gpu->pmm, chunk); + size_t chunk_page_index; + + for (chunk_page_index = 0; chunk_page_index < num_chunk_pages; ++chunk_page_index) + pages[page_index++] = page + chunk_page_index; + } + UVM_ASSERT(page_index == num_pages); + + mem->kernel.cpu_addr = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); + + uvm_kvfree(pages); + + if (!mem->kernel.cpu_addr) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +void uvm_mem_unmap_cpu_kernel(uvm_mem_t *mem) +{ + if (!uvm_mem_mapped_on_cpu_kernel(mem)) + return; + + vunmap(mem->kernel.cpu_addr); + mem->kernel.cpu_addr = NULL; + mem_clear_mapped_on_cpu_kernel(mem); +} + +static NV_STATUS mem_map_cpu_to_sysmem_user(uvm_mem_t *mem, struct vm_area_struct *vma) +{ + NV_STATUS status; + NvU64 offset; + + UVM_ASSERT(mem->user != NULL); + UVM_ASSERT(uvm_mem_is_sysmem(mem)); + uvm_assert_mmap_lock_locked(vma->vm_mm); + + // TODO: Bug 1995015: high-order page allocations need to be allocated as + // compound pages in order to be able to use vm_insert_page on them. This + // is not currently being exercised because the only allocations using this + // are semaphore pools (which typically use a single page). + for (offset = 0; offset < mem->physical_allocation_size; offset += PAGE_SIZE) { + int ret = vm_insert_page(vma, (unsigned long)mem->user->addr + offset, mem_cpu_page(mem, offset)); + if (ret) { + UVM_ASSERT_MSG(ret == -ENOMEM, "ret: %d\n", ret); + status = errno_to_nv_status(ret); + goto error; + } + } + + return NV_OK; + +error: + unmap_mapping_range(&mem->user->va_space->mapping, (size_t)mem->user->addr, mem->physical_allocation_size, 1); + return status; +} + +void uvm_mem_unmap_cpu_user(uvm_mem_t *mem) +{ + if (!uvm_mem_mapped_on_cpu_user(mem)) + return; + + unmap_mapping_range(&mem->user->va_space->mapping, (size_t)mem->user->addr, mem->physical_allocation_size, 1); + mem_clear_mapped_on_cpu_user(mem); + mem_deinit_user_mapping(mem); +} + +NV_STATUS uvm_mem_map_cpu_user(uvm_mem_t *mem, uvm_va_space_t *user_va_space, struct vm_area_struct *vma) +{ + NV_STATUS status; + void *user_addr; + + UVM_ASSERT(mem); + UVM_ASSERT(mem_can_be_mapped_on_cpu_user(mem)); + + if (uvm_mem_mapped_on_cpu_user(mem)) + return NV_OK; + + UVM_ASSERT((vma->vm_end - vma->vm_start) == mem->size); + + user_addr = (void *) (uintptr_t)vma->vm_start; + + status = mem_init_user_mapping(mem, user_va_space, user_addr); + if (status != NV_OK) + return status; + + status = mem_map_cpu_to_sysmem_user(mem, vma); + if (status != NV_OK) + goto cleanup; + + mem_set_mapped_on_cpu_user(mem); + + return NV_OK; + +cleanup: + mem_deinit_user_mapping(mem); + return status; +} + +NV_STATUS uvm_mem_map_cpu_kernel(uvm_mem_t *mem) +{ + NV_STATUS status; + + UVM_ASSERT(mem); + UVM_ASSERT(mem_can_be_mapped_on_cpu_kernel(mem)); + + if (uvm_mem_mapped_on_cpu_kernel(mem)) + return NV_OK; + + if (uvm_mem_is_sysmem(mem)) + status = mem_map_cpu_to_sysmem_kernel(mem); + else + status = mem_map_cpu_to_vidmem_kernel(mem); + + if (status != NV_OK) + return status; + + mem_set_mapped_on_cpu_kernel(mem); + + return NV_OK; +} + +static void sysmem_unmap_gpu_phys(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + NvU64 *dma_addrs = mem->sysmem.dma_addrs[uvm_global_id_gpu_index(gpu->global_id)]; + NvU32 i; + + UVM_ASSERT(uvm_mem_is_sysmem(mem)); + UVM_ASSERT(gpu != mem->dma_owner); + UVM_ASSERT(dma_addrs); + + for (i = 0; i < mem->chunks_count; ++i) { + if (dma_addrs[i] == 0) { + // The DMA address can only be 0 when cleaning up after a failed + // partial map_gpu_sysmem_iommu() operation. + break; + } + uvm_gpu_unmap_cpu_pages(gpu, dma_addrs[i], mem->chunk_size); + dma_addrs[i] = 0; + } + + uvm_kvfree(dma_addrs); + mem->sysmem.dma_addrs[uvm_global_id_gpu_index(gpu->global_id)] = NULL; +} + +static NV_STATUS sysmem_map_gpu_phys(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + NV_STATUS status; + size_t i; + + UVM_ASSERT(uvm_mem_is_sysmem(mem)); + UVM_ASSERT(gpu != mem->dma_owner); + + status = mem_alloc_dma_addrs(mem, gpu); + if (status != NV_OK) + return status; + + for (i = 0; i < mem->chunks_count; ++i) { + status = uvm_gpu_map_cpu_pages(gpu, + mem->sysmem.pages[i], + mem->chunk_size, + &mem->sysmem.dma_addrs[uvm_global_id_gpu_index(gpu->global_id)][i]); + if (status != NV_OK) + goto error; + } + + return NV_OK; + +error: + sysmem_unmap_gpu_phys(mem, gpu); + return status; +} + +static uvm_gpu_chunk_t *mem_get_chunk(uvm_mem_t *mem, size_t mem_offset, size_t *offset_in_chunk) +{ + size_t chunk_index = uvm_div_pow2_64(mem_offset, mem->chunk_size); + + if (offset_in_chunk) + *offset_in_chunk = mem_offset & (mem->chunk_size - 1); + + UVM_ASSERT(uvm_mem_is_vidmem(mem)); + return mem->vidmem.chunks[chunk_index]; +} + +static uvm_gpu_phys_address_t mem_gpu_physical_vidmem(uvm_mem_t *mem, size_t offset) +{ + size_t chunk_offset; + uvm_gpu_chunk_t *chunk = mem_get_chunk(mem, offset, &chunk_offset); + return uvm_gpu_phys_address(UVM_APERTURE_VID, chunk->address + chunk_offset); +} + +static uvm_gpu_phys_address_t mem_gpu_physical_sysmem(uvm_mem_t *mem, uvm_gpu_t *gpu, size_t offset) +{ + NvU64 *dma_addrs = mem->sysmem.dma_addrs[uvm_global_id_gpu_index(gpu->global_id)]; + NvU64 dma_addr = dma_addrs[offset / mem->chunk_size]; + + UVM_ASSERT(uvm_mem_is_sysmem(mem)); + UVM_ASSERT(sysmem_mapped_on_gpu_phys(mem, gpu)); + + return uvm_gpu_phys_address(UVM_APERTURE_SYS, dma_addr + offset % mem->chunk_size); +} + +static bool mem_check_range(uvm_mem_t *mem, NvU64 offset, NvU64 size) +{ + UVM_ASSERT(size != 0); + UVM_ASSERT_MSG(UVM_ALIGN_DOWN(offset, mem->chunk_size) == UVM_ALIGN_DOWN(offset + size - 1, mem->chunk_size), + "offset %llu size %llu page_size %u\n", + offset, + size, + mem->chunk_size); + UVM_ASSERT_MSG(offset / mem->chunk_size < mem->chunks_count, "offset %llu\n", offset); + return true; +} + +uvm_gpu_phys_address_t uvm_mem_gpu_physical(uvm_mem_t *mem, uvm_gpu_t *gpu, NvU64 offset, NvU64 size) +{ + UVM_ASSERT(mem_check_range(mem, offset, size)); + + if (uvm_mem_is_vidmem(mem)) { + UVM_ASSERT(uvm_mem_is_local_vidmem(mem, gpu)); + + return mem_gpu_physical_vidmem(mem, offset); + } + + return mem_gpu_physical_sysmem(mem, gpu, offset); +} + +uvm_gpu_address_t uvm_mem_gpu_address_copy(uvm_mem_t *mem, uvm_gpu_t *accessing_gpu, NvU64 offset, NvU64 size) +{ + uvm_gpu_address_t copy_addr; + size_t chunk_offset; + uvm_gpu_chunk_t *chunk; + + UVM_ASSERT(mem_check_range(mem, offset, size)); + + if (uvm_mem_is_sysmem(mem) || uvm_mem_is_local_vidmem(mem, accessing_gpu)) + return uvm_mem_gpu_address_physical(mem, accessing_gpu, offset, size); + + // Peer GPUs may need to use some form of translation (identity mappings, + // indirect peers) to copy. + chunk = mem_get_chunk(mem, offset, &chunk_offset); + copy_addr = uvm_pmm_gpu_peer_copy_address(&mem->backing_gpu->pmm, chunk, accessing_gpu); + copy_addr.address += chunk_offset; + return copy_addr; +} + +typedef struct uvm_mem_pte_maker_data_struct +{ + uvm_mem_t *mem; + const uvm_mem_gpu_mapping_attrs_t *attrs; +} uvm_mem_pte_maker_data_t; + +static NvU64 mem_pte_maker(uvm_page_table_range_vec_t *range_vec, NvU64 offset, void *vp_data) +{ + uvm_mem_pte_maker_data_t *data = (uvm_mem_pte_maker_data_t *)vp_data; + uvm_page_tree_t *tree = range_vec->tree; + uvm_gpu_t *gpu = tree->gpu; + uvm_gpu_phys_address_t phys = uvm_mem_gpu_physical(data->mem, gpu, offset, range_vec->page_size); + + return tree->hal->make_pte(phys.aperture, + phys.address, + data->attrs->protection, + data->attrs->is_cacheable ? UVM_MMU_PTE_FLAGS_CACHED : UVM_MMU_PTE_FLAGS_NONE); +} + +static void mem_unmap_gpu(uvm_mem_t *mem, uvm_gpu_t *gpu, uvm_page_table_range_vec_t **range_vec) +{ + NV_STATUS status; + uvm_membar_t tlb_membar = UVM_MEMBAR_SYS; + + if (uvm_mem_is_local_vidmem(mem, gpu)) + tlb_membar = UVM_MEMBAR_GPU; + + status = uvm_page_table_range_vec_clear_ptes(*range_vec, tlb_membar); + if (status != NV_OK) + UVM_ERR_PRINT("Clearing PTEs failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + + uvm_page_table_range_vec_destroy(*range_vec); + *range_vec = NULL; +} + +static NV_STATUS mem_map_gpu(uvm_mem_t *mem, + uvm_gpu_t *gpu, + NvU64 gpu_va, + uvm_page_tree_t *tree, + const uvm_mem_gpu_mapping_attrs_t *attrs, + uvm_page_table_range_vec_t **range_vec) +{ + NV_STATUS status; + NvU32 page_size; + uvm_pmm_alloc_flags_t pmm_flags = UVM_PMM_ALLOC_FLAGS_EVICT; + + uvm_mem_pte_maker_data_t pte_maker_data = { + .mem = mem, + .attrs = attrs + }; + + if (!uvm_gpu_can_address(gpu, gpu_va, mem->size)) + return NV_ERR_OUT_OF_RANGE; + + page_size = mem_pick_gpu_page_size(mem, gpu, tree); + UVM_ASSERT_MSG(uvm_mmu_page_size_supported(tree, page_size), "page_size 0x%x\n", page_size); + + + + + + + + + + + + + + status = uvm_page_table_range_vec_create(tree, + gpu_va, + mem->physical_allocation_size, + page_size, + pmm_flags, + range_vec); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init page mapping at [0x%llx, 0x%llx): %s, GPU %s\n", + gpu_va, + gpu_va + mem->physical_allocation_size, + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + status = uvm_page_table_range_vec_write_ptes(*range_vec, UVM_MEMBAR_NONE, mem_pte_maker, &pte_maker_data); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to write PTEs for mapping at [0x%llx, 0x%llx): %s, GPU %s\n", + gpu_va, + gpu_va + mem->physical_allocation_size, + nvstatusToString(status), + uvm_gpu_name(gpu)); + goto error; + } + + return NV_OK; + +error: + mem_unmap_gpu(mem, gpu, range_vec); + return status; +} + +static NV_STATUS mem_init_gpu_kernel_range(uvm_mem_t *mem) +{ + if (mem->kernel.range_alloc.node != NULL) + return NV_OK; + + return uvm_range_allocator_alloc(&g_free_ranges, + mem->physical_allocation_size, + mem->chunk_size, + &mem->kernel.range_alloc); +} + +static void mem_deinit_gpu_kernel_range(uvm_mem_t *mem) +{ + if (mem->kernel.range_alloc.node == NULL) + return; + + // Do not remove the range allocation if there is any GPU where the memory + // is still mapped on kernel space. + if (UVM_GLOBAL_ID_IS_VALID(uvm_global_processor_mask_find_first_gpu_id(&mem->kernel.mapped_on))) + return; + + uvm_range_allocator_free(&g_free_ranges, &mem->kernel.range_alloc); +} + +NV_STATUS uvm_mem_map_gpu_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + NV_STATUS status; + NvU64 gpu_va; + uvm_page_table_range_vec_t **range_vec; + uvm_mem_gpu_mapping_attrs_t attrs = { + .protection = UVM_PROT_READ_WRITE_ATOMIC, + .is_cacheable = uvm_mem_is_vidmem(mem) + }; + + UVM_ASSERT(mem_can_be_mapped_on_gpu_kernel(mem, gpu)); + + if (uvm_mem_mapped_on_gpu_kernel(mem, gpu)) + return NV_OK; + + status = uvm_mem_map_gpu_phys(mem, gpu); + if (status != NV_OK) + return status; + + status = mem_init_gpu_kernel_range(mem); + if (status != NV_OK) + return status; + + gpu_va = uvm_parent_gpu_canonical_address(gpu->parent, reserved_gpu_va(mem, gpu)); + range_vec = &mem->kernel.range_vecs[uvm_global_id_gpu_index(gpu->global_id)]; + + status = mem_map_gpu(mem, gpu, gpu_va, &gpu->address_space_tree, &attrs, range_vec); + if (status != NV_OK) + goto cleanup; + + mem_set_mapped_on_gpu_kernel(mem, gpu); + + return NV_OK; + +cleanup: + mem_deinit_gpu_kernel_range(mem); + + return status; +} + +NV_STATUS uvm_mem_map_gpu_user(uvm_mem_t *mem, + uvm_gpu_t *gpu, + uvm_va_space_t *user_va_space, + void *user_addr, + const uvm_mem_gpu_mapping_attrs_t *attrs) +{ + NV_STATUS status; + uvm_gpu_va_space_t *gpu_va_space; + uvm_page_table_range_vec_t **range_vec; + + UVM_ASSERT(mem_can_be_mapped_on_gpu_user(mem, gpu)); + uvm_assert_rwsem_locked(&user_va_space->lock); + + if (uvm_mem_mapped_on_gpu_user(mem, gpu)) + return NV_OK; + + status = uvm_mem_map_gpu_phys(mem, gpu); + if (status != NV_OK) + return status; + + status = mem_init_user_mapping(mem, user_va_space, user_addr); + if (status != NV_OK) + return status; + + gpu_va_space = uvm_gpu_va_space_get(mem->user->va_space, gpu); + range_vec = &mem->user->range_vecs[uvm_global_id_gpu_index(gpu->global_id)]; + + status = mem_map_gpu(mem, gpu, (NvU64)mem->user->addr, &gpu_va_space->page_tables, attrs, range_vec); + if (status != NV_OK) + goto cleanup; + + mem_set_mapped_on_gpu_user(mem, gpu); + + return NV_OK; + +cleanup: + mem_deinit_user_mapping(mem); + return status; +} + +void uvm_mem_unmap_gpu_user(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + if (!uvm_mem_mapped_on_gpu_user(mem, gpu)) + return; + + mem_unmap_gpu(mem, gpu, &mem->user->range_vecs[uvm_global_id_gpu_index(gpu->global_id)]); + mem_clear_mapped_on_gpu_user(mem, gpu); + mem_deinit_user_mapping(mem); +} + +void uvm_mem_unmap_gpu_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + if (!uvm_mem_mapped_on_gpu_kernel(mem, gpu)) + return; + + mem_unmap_gpu(mem, gpu, &mem->kernel.range_vecs[uvm_global_id_gpu_index(gpu->global_id)]); + mem_clear_mapped_on_gpu_kernel(mem, gpu); + mem_deinit_gpu_kernel_range(mem); +} + +static bool mem_can_be_phys_mapped_on_gpu(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + if (uvm_mem_is_sysmem(mem)) + return sysmem_can_be_mapped(mem); + else + return uvm_mem_is_local_vidmem(mem, gpu); +} + +NV_STATUS uvm_mem_map_gpu_phys(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + NV_STATUS status; + + UVM_ASSERT(mem_can_be_phys_mapped_on_gpu(mem, gpu)); + + if (uvm_mem_is_vidmem(mem)) + return NV_OK; + + if (gpu == mem->dma_owner) + return NV_OK; + + if (sysmem_mapped_on_gpu_phys(mem, gpu)) + return NV_OK; + + status = sysmem_map_gpu_phys(mem, gpu); + if (status != NV_OK) + return status; + + sysmem_set_mapped_on_gpu_phys(mem, gpu); + return NV_OK; +} + +void uvm_mem_unmap_gpu_phys(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(mem); + UVM_ASSERT(gpu); + + if (uvm_mem_is_vidmem(mem)) + return; + + // GPU for which the mapping is managed by the dma_alloc_coherent + // API will be unmapped when the allocation is freed. + if (gpu == mem->dma_owner) + return; + + if (!sysmem_mapped_on_gpu_phys(mem, gpu)) + return; + + uvm_mem_unmap_gpu_user(mem, gpu); + uvm_mem_unmap_gpu_kernel(mem, gpu); + + sysmem_unmap_gpu_phys(mem, gpu); + sysmem_clear_mapped_on_gpu_phys(mem, gpu); +} + +void uvm_mem_free(uvm_mem_t *mem) +{ + uvm_gpu_t *gpu; + + if (mem == NULL) + return; + + uvm_mem_unmap_cpu_user(mem); + uvm_mem_unmap_cpu_kernel(mem); + + if (mem->user != NULL) { + for_each_global_gpu_in_mask(gpu, &mem->user->mapped_on) { + uvm_mem_unmap_gpu_user(mem, gpu); + + // If we unmapped the last device, the user mapping is freed, so + // exit the loop before the iterator accesses a non-existing mask. + if (mem->user == NULL) + break; + } + } + + for_each_global_gpu_in_mask(gpu, &mem->kernel.mapped_on) + uvm_mem_unmap_gpu_kernel(mem, gpu); + + if (uvm_mem_is_sysmem(mem)) { + for_each_global_gpu_in_mask(gpu, &mem->sysmem.mapped_on_phys) + uvm_mem_unmap_gpu_phys(mem, gpu); + } + + mem_free_chunks(mem); + + uvm_kvfree(mem); +} + +void *uvm_mem_get_cpu_addr_kernel(uvm_mem_t *mem) +{ + UVM_ASSERT(uvm_mem_mapped_on_cpu_kernel(mem)); + + return mem->kernel.cpu_addr; +} + +NvU64 uvm_mem_get_gpu_va_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(uvm_mem_mapped_on_gpu_kernel(mem, gpu)); + + return reserved_gpu_va(mem, gpu); +} + +uvm_gpu_address_t uvm_mem_gpu_address_virtual_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + return uvm_gpu_address_virtual(uvm_mem_get_gpu_va_kernel(mem, gpu)); +} + +uvm_gpu_address_t uvm_mem_gpu_address_physical(uvm_mem_t *mem, uvm_gpu_t *gpu, NvU64 offset, NvU64 size) +{ + return uvm_gpu_address_from_phys(uvm_mem_gpu_physical(mem, gpu, offset, size)); +} diff --git a/kernel-open/nvidia-uvm/uvm_mem.h b/kernel-open/nvidia-uvm/uvm_mem.h new file mode 100644 index 000000000..62ebfc7bb --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_mem.h @@ -0,0 +1,432 @@ +/******************************************************************************* + Copyright (c) 2016-2020 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_MEM_H__ +#define __UVM_MEM_H__ + +#include "uvm_forward_decl.h" +#include "uvm_processors.h" +#include "uvm_hal_types.h" +#include "uvm_pmm_gpu.h" +#include "uvm_range_allocator.h" + +// +// This module provides an abstraction for UVM-managed allocations, both sysmem +// and vidmem, which can be mapped on GPUs in internal or user VA spaces or on +// the CPU, or accessed physically. +// +// As opposed to the uvm_rm_mem_* abstraction, this module has no dependencies +// on the UVM-RM interface and implements all the functionality on top of other +// UVM abstractions. Specifically, vidmem is allocated from PMM and sysmem is +// allocated directly from the kernel (in the future PMM will support sysmem as +// well and then this module can switch over). And GPU mappings are created +// through the page table range vector (uvm_page_table_range_vec_t) and CPU +// mappings (only sysmem) use vmap directly. +// +// The module currently allows the following: +// - sysmem allocation and mapping on all GPUs and the CPU +// - vidmem allocation and mapping on the GPU backing the allocation +// +// Additionally, helpers for accessing the allocations physically are provided, +// which allows skipping virtual mappings if not necessary (e.g. allocating a +// single CPU page and accessing it from the GPU). +// +// For internal mappings, GPU VA ranges used for mapping the allocations are +// allocated from a global range allocator (uvm_range_allocator_t) and are +// currently offset by a GPU specific offset (gpu->uvm_mem_va_base). This would +// change if the first limitation below is lifted and UVM can control the VA +// starting at 0. For user mappings, a fixed VA is provided externally. +// +// Allocation lifetimes: +// - Vidmem allocations cannot exceed the lifetime of the GPU on which they are +// allocated (backing_gpu). +// - Sysmem allocations without a DMA owner have no lifetime restrictions. +// - Sysmem allocations with a DMA owner cannot exceed the lifetime of the +// dma_owner GPU. +// +// Future additions: +// - Per processor caching attributes (longer term, the envisioned use-case is +// for GPU semaphore caching, which requires the first limitation below to be +// lifted) +// +// Limitations: +// - On Pascal+ limited to VAs over 40bit due to how the internal VA is shared +// with RM. This implies it cannot be used for e.g. pushbuffer nor sempahores +// currently. At some point in the future UVM should be able +// to take full control of the VA (or at least the bottom 40bits of it) +// and this limitation would be lifted. See comments around +// gpu->rm_va_base for more details. +// - Mapping vidmem on the CPU is only allowed on GPU-coherent systems. The +// problem with lifting this limitation in other systems is that the BAR1 +// space (that's used for such mappings) is limited and controlled by RM and +// may not be easy to interop with vidmem allocations from PMM. +// + + +// The size of the VA used for mapping uvm_mem_t allocations +// 128 GBs should be plenty for internal allocations and fits easily on all +// supported architectures. +#define UVM_MEM_VA_SIZE (128ull * 1024 * 1024 * 1024) + +typedef struct +{ + // The GPU to allocate memory from, or NULL for sysmem. + uvm_gpu_t *backing_gpu; + + // For DMA allocations, the accessing GPU needs to be known at alloc + // time for sysmem allocations. Setting the DMA owner has consequences on + // the lifetime of the allocation, which are described in the block + // commment at the top of the file. + // The mapping is bound to the allocation, hence, one can assume that the + // mappings on dma_owner are done when uvm_mem_alloc() returns. + uvm_gpu_t *dma_owner; + + // Size of the allocation, in bytes. + // The only restriction is for it to be non-0. + NvU64 size; + + // mm owning the memory allocation. + // The mm is used to charge the mm's memory cgroup for the allocation. + // If mm is NULL, the allocation will not be charged. + struct mm_struct *mm; + + // Desired page size to use, in bytes. + // + // If this is a DMA allocation, the physical allocation chunk must be + // equal to PAGE_SIZE + // + // If this is a CPU allocation, the physical allocation chunk has to be + // aligned to PAGE_SIZE and the allocation will be mapped with the largest + // PTEs possible on the GPUs. If set to UVM_PAGE_SIZE_DEFAULT, PAGE_SIZE + // size will be used. + // + // For a GPU allocation, if set to UVM_PAGE_SIZE_DEFAULT, GPU mappings will + // use the largest page size supported by the backing GPU which is not + // larger than size. Otherwise, the desired page size will be used. + // + // CPU mappings will always use PAGE_SIZE, so the physical allocation chunk + // has to be aligned to PAGE_SIZE. + NvU32 page_size; + + // If true, the allocation is zeroed (scrubbed). + bool zero; +} uvm_mem_alloc_params_t; + +typedef struct +{ + uvm_prot_t protection; + bool is_cacheable; +} uvm_mem_gpu_mapping_attrs_t; + +// Information specific to allocations mapped in a user VA space. +typedef struct +{ + // Mask of processors the memory is virtually mapped on + uvm_global_processor_mask_t mapped_on; + + // Page table ranges for all GPUs + uvm_page_table_range_vec_t *range_vecs[UVM_GLOBAL_ID_MAX_GPUS]; + + uvm_va_space_t *va_space; + + // The VA to map the allocation at on all processors + void *addr; +} uvm_mem_user_mapping_t; + +struct uvm_mem_struct +{ + // The GPU the physical memory is allocated on. Or NULL for sysmem. + // + // For GPU allocations, the lifetime of the allocation cannot extend the + // lifetime of the GPU. For CPU allocations there is no lifetime limitation. + uvm_gpu_t *backing_gpu; + + + + + + uvm_gpu_t *dma_owner; + + // Size of the physical chunks. + NvU32 chunk_size; + + union + { + struct + { + uvm_gpu_chunk_t **chunks; + } vidmem; + + struct + { + // Mask of processors the memory is physically mapped on. + // + // There is no equivalent mask for vidmem, because only the backing + // GPU can physical access the memory + uvm_global_processor_mask_t mapped_on_phys; + + struct page **pages; + void **va; + + // Per GPU IOMMU mappings of the pages + NvU64 *dma_addrs[UVM_GLOBAL_ID_MAX_GPUS]; + } sysmem; + }; + + // Count of chunks (vidmem) or CPU pages (sysmem) above + size_t chunks_count; + + // Size of the allocation + NvU64 size; + + // Size of the physical allocation backing + NvU64 physical_allocation_size; + + uvm_mem_user_mapping_t *user; + + // Information specific to allocations mapped in UVM internal VA space. + struct + { + // Mask of processors the memory is virtually mapped on + uvm_global_processor_mask_t mapped_on; + + // Page table ranges for all GPUs + uvm_page_table_range_vec_t *range_vecs[UVM_GLOBAL_ID_MAX_GPUS]; + + // Range allocation for the GPU VA + uvm_range_allocation_t range_alloc; + + // CPU address of the allocation if mapped on the CPU + void *cpu_addr; + } kernel; +}; + +NV_STATUS uvm_mem_global_init(void); +void uvm_mem_global_exit(void); + +// Fill out attrs_out from attrs. attrs_out must not be null. attrs_out may be +// prepopulated with default values, which are not overwritten if the +// corresponding field in attrs has a default value. The gpu corresponding to +// attrs->gpuUuid is optionally returned in gpu_out if it is not NULL. +// +// Returns an error if attrs is invalid. +NV_STATUS uvm_mem_translate_gpu_attributes(const UvmGpuMappingAttributes *attrs, + uvm_va_space_t *va_space, + uvm_gpu_t **gpu_out, + uvm_mem_gpu_mapping_attrs_t *attrs_out); + +uvm_chunk_sizes_mask_t uvm_mem_kernel_chunk_sizes(uvm_gpu_t *gpu); + +// Allocate memory according to the given allocation parameters. +// +// In the case of sysmem, the memory is immediately physically accessible from +// the GPU DMA owner, if any. Vidmem is accessible from the GPU backing the +// allocation. +// +// Unless a specific page size is needed, or the physical pages need to be +// zeroed, the caller can use the appropriate uvm_mem_alloc* helper instead. +NV_STATUS uvm_mem_alloc(const uvm_mem_alloc_params_t *params, uvm_mem_t **mem_out); + +// Clear all mappings and free the memory +void uvm_mem_free(uvm_mem_t *mem); + +// Map/unmap sysmem for physical access on a GPU. +// A physical unmap implies a virtual user and kernel unmap i.e. it clears all +// mappings in the given GPU. +NV_STATUS uvm_mem_map_gpu_phys(uvm_mem_t *mem, uvm_gpu_t *gpu); +void uvm_mem_unmap_gpu_phys(uvm_mem_t *mem, uvm_gpu_t *gpu); + +// Map/unmap on a user VA space. +// +// For GPU mappings, the caller passes the user VA space to map the allocation, +// the address to map at, and the mapping attributes. CPU mappings pass the user +// VA space, and the affected VM area; the memory is mapped at the VM area +// starting address. +// +// The user VA space and address values must be identical to those used in prior +// mappings (if any) on the same memory object. +NV_STATUS uvm_mem_map_gpu_user(uvm_mem_t *mem, + uvm_gpu_t *gpu, + uvm_va_space_t *user_va_space, + void *user_addr, + const uvm_mem_gpu_mapping_attrs_t *attrs); +NV_STATUS uvm_mem_map_cpu_user(uvm_mem_t *mem, + uvm_va_space_t *user_va_space, + struct vm_area_struct *vma); +void uvm_mem_unmap_gpu_user(uvm_mem_t *mem, uvm_gpu_t *gpu); +void uvm_mem_unmap_cpu_user(uvm_mem_t *mem); + +// Map/unmap on UVM's internal VA space. +// +// TODO: Bug 1812419: uvm_mem_map_gpu_kernel should accept GPU mapping +// attributes the way uvm_mem_map_gpu_user does. +NV_STATUS uvm_mem_map_gpu_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu); +NV_STATUS uvm_mem_map_cpu_kernel(uvm_mem_t *mem); +void uvm_mem_unmap_gpu_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu); +void uvm_mem_unmap_cpu_kernel(uvm_mem_t *mem); + +// Check if a user or kernel mapping exists on a given device. +bool uvm_mem_mapped_on_gpu_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu); +bool uvm_mem_mapped_on_gpu_user(uvm_mem_t *mem, uvm_gpu_t *gpu); +bool uvm_mem_mapped_on_cpu_kernel(uvm_mem_t *mem); +bool uvm_mem_mapped_on_cpu_user(uvm_mem_t *mem); + +// Get the CPU address +// +// The allocation has to be mapped on the CPU prior to calling this function. +void *uvm_mem_get_cpu_addr_kernel(uvm_mem_t *mem); + +// Get the GPU VA +// +// The allocation has to be internally mapped on the given GPU prior to calling +// this function. +NvU64 uvm_mem_get_gpu_va_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu); + +// Helper for getting a virtual uvm_gpu_address_t +uvm_gpu_address_t uvm_mem_gpu_address_virtual_kernel(uvm_mem_t *mem, uvm_gpu_t *gpu); + +// Helpers for getting both types of GPU physical addresses. +// +// Offset and size are used to return the address of the correct physical chunk +// and check that the allocation is physically contiguous for the given range. +uvm_gpu_phys_address_t uvm_mem_gpu_physical(uvm_mem_t *mem, uvm_gpu_t *gpu, NvU64 offset, NvU64 size); +uvm_gpu_address_t uvm_mem_gpu_address_physical(uvm_mem_t *mem, uvm_gpu_t *gpu, NvU64 offset, NvU64 size); + +// Helper to get an address suitable for accessing_gpu (which may be the backing +// GPU) to access with CE. Note that mappings for indirect peers are not +// created automatically. +uvm_gpu_address_t uvm_mem_gpu_address_copy(uvm_mem_t *mem, uvm_gpu_t *accessing_gpu, NvU64 offset, NvU64 size); + +static bool uvm_mem_is_sysmem(uvm_mem_t *mem) +{ + return mem->backing_gpu == NULL; +} + +static bool uvm_mem_is_vidmem(uvm_mem_t *mem) +{ + return !uvm_mem_is_sysmem(mem); +} + +static bool uvm_mem_is_local_vidmem(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + return uvm_mem_is_vidmem(mem) && (mem->backing_gpu == gpu); +} + +static bool uvm_mem_is_sysmem_dma(uvm_mem_t *mem) +{ + return uvm_mem_is_sysmem(mem) && !!mem->dma_owner; +} + +// Helper for allocating sysmem using the default page size. The backing pages +// are not zeroed. +static NV_STATUS uvm_mem_alloc_sysmem(NvU64 size, struct mm_struct *mm, uvm_mem_t **mem_out) +{ + uvm_mem_alloc_params_t params = { 0 }; + params.size = size; + params.backing_gpu = NULL; + params.page_size = UVM_PAGE_SIZE_DEFAULT; + params.mm = mm; + + return uvm_mem_alloc(¶ms, mem_out); +} + +// Helper for allocating sysmem in DMA zone using the default page size. The +// backing pages are not zeroed. +static NV_STATUS uvm_mem_alloc_sysmem_dma(NvU64 size, uvm_gpu_t *dma_owner, struct mm_struct *mm, uvm_mem_t **mem_out) +{ + uvm_mem_alloc_params_t params = { 0 }; + params.size = size; + params.backing_gpu = NULL; + params.dma_owner = dma_owner; + params.page_size = UVM_PAGE_SIZE_DEFAULT; + params.mm = mm; + + return uvm_mem_alloc(¶ms, mem_out); +} + +// Helper for allocating vidmem with the default page size. The backing pages +// are not zeroed. +static NV_STATUS uvm_mem_alloc_vidmem(NvU64 size, uvm_gpu_t *gpu, uvm_mem_t **mem_out) +{ + uvm_mem_alloc_params_t params = { 0 }; + params.size = size; + params.backing_gpu = gpu; + params.page_size = UVM_PAGE_SIZE_DEFAULT; + + return uvm_mem_alloc(¶ms, mem_out); +} + +// Helper for allocating sysmem and mapping it on the CPU +static NV_STATUS uvm_mem_alloc_sysmem_and_map_cpu_kernel(NvU64 size, struct mm_struct *mm, uvm_mem_t **mem_out) +{ + NV_STATUS status; + uvm_mem_t *mem; + + status = uvm_mem_alloc_sysmem(size, mm, &mem); + if (status != NV_OK) + return status; + + status = uvm_mem_map_cpu_kernel(mem); + if (status != NV_OK) { + uvm_mem_free(mem); + return status; + } + + *mem_out = mem; + return NV_OK; +} + + + + + + + + + +static NV_STATUS uvm_mem_alloc_sysmem_dma_and_map_cpu_kernel(NvU64 size, + uvm_gpu_t *gpu, + struct mm_struct *mm, + uvm_mem_t **mem_out) +{ + NV_STATUS status; + uvm_mem_t *mem; + + status = uvm_mem_alloc_sysmem_dma(size, gpu, mm, &mem); + if (status != NV_OK) + return status; + + status = uvm_mem_map_cpu_kernel(mem); + if (status != NV_OK) { + uvm_mem_free(mem); + return status; + } + + *mem_out = mem; + return NV_OK; +} + +// Helper to map an allocation on the specified processors in the UVM VA space. +NV_STATUS uvm_mem_map_kernel(uvm_mem_t *mem, const uvm_global_processor_mask_t *mask); + +#endif // __UVM_MEM_H__ diff --git a/kernel-open/nvidia-uvm/uvm_mem_test.c b/kernel-open/nvidia-uvm/uvm_mem_test.c new file mode 100644 index 000000000..53e7ac623 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_mem_test.c @@ -0,0 +1,622 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ +#include "uvm_hal.h" +#include "uvm_gpu.h" +#include "uvm_kvmalloc.h" +#include "uvm_mem.h" +#include "uvm_push.h" + + + +#include "uvm_test.h" +#include "uvm_test_ioctl.h" +#include "uvm_va_space.h" + +static const size_t sysmem_alloc_sizes[] = { 1, PAGE_SIZE - 1, PAGE_SIZE, 7 * PAGE_SIZE }; + +static NvU32 first_page_size(NvU32 page_sizes) +{ + return page_sizes & ~(page_sizes - 1); +} + +#define for_each_page_size(page_size, page_sizes) \ + for (page_size = first_page_size(page_sizes); \ + page_size; \ + page_size = first_page_size((page_sizes) & ~(page_size | (page_size - 1)))) + +static inline NV_STATUS __alloc_map_sysmem(NvU64 size, uvm_gpu_t *gpu, uvm_mem_t **sys_mem) +{ + + + + + + + return uvm_mem_alloc_sysmem_and_map_cpu_kernel(size, current->mm, sys_mem); + +} + +static NV_STATUS check_accessible_from_gpu(uvm_gpu_t *gpu, uvm_mem_t *mem) +{ + NV_STATUS status = NV_OK; + uvm_mem_t *sys_mem = NULL; + uvm_push_t push; + NvU64 *sys_verif; + size_t i; + NvU64 verif_size = mem->size; + NvU64 offset; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + + verif_size = UVM_ALIGN_UP(verif_size, sizeof(*sys_verif)); + + UVM_ASSERT(mem->physical_allocation_size >= verif_size); + UVM_ASSERT(verif_size >= sizeof(*sys_verif)); + + TEST_NV_CHECK_GOTO(__alloc_map_sysmem(verif_size, gpu, &sys_mem), done); + TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(sys_mem, gpu), done); + + sys_verif = (NvU64*)uvm_mem_get_cpu_addr_kernel(sys_mem); + + for (i = 0; i < verif_size / sizeof(*sys_verif); ++i) + sys_verif[i] = mem->size + i; + + // Copy from sys_mem to mem (in mem->page_size chunks) using: + // - virtual access for sys_mem + // - physical access for mem, unless the channel only supports virtual + // addressing + for (offset = 0; offset < verif_size; offset += mem->chunk_size) { + uvm_gpu_address_t sys_mem_gpu_address, mem_gpu_address; + size_t size_this_time = min((NvU64)mem->chunk_size, verif_size - offset); + bool should_use_pa; + + TEST_NV_CHECK_GOTO(uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_CPU_TO_GPU, &push, " "), done); + + sys_mem_gpu_address = uvm_mem_gpu_address_virtual_kernel(sys_mem, gpu); + sys_mem_gpu_address.address += offset; + + + + + should_use_pa = uvm_channel_is_privileged(push.channel); + + + if (should_use_pa) { + mem_gpu_address = uvm_mem_gpu_address_physical(mem, gpu, offset, size_this_time); + } + else { + mem_gpu_address = uvm_mem_gpu_address_virtual_kernel(mem, gpu); + mem_gpu_address.address += offset; + } + + uvm_push_set_description(&push, + "Memcopy %zd bytes from virtual sys_mem 0x%llx to %s mem 0x%llx [mem loc: %s, page size: %u]", + size_this_time, + sys_mem_gpu_address.address, + mem_gpu_address.is_virtual? "virtual" : "physical", + mem_gpu_address.address, + uvm_mem_is_sysmem(mem)? "sys" : "vid", + mem->chunk_size); + + gpu->parent->ce_hal->memcopy(&push, mem_gpu_address, sys_mem_gpu_address, size_this_time); + + uvm_push_end(&push); + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, &push), done); + } + + TEST_NV_CHECK_GOTO(uvm_tracker_wait(&tracker), done); + + memset(sys_verif, 0, verif_size); + + // Copy back to sys_mem from mem (in sys_mem->page_size chunks) using: + // - physical access for sys_mem, unless the channel only supports virtual + // addressing + // - virtual access for mem + for (offset = 0; offset < verif_size; offset += sys_mem->chunk_size) { + uvm_gpu_address_t mem_gpu_address, sys_mem_gpu_address; + size_t size_this_time = min((NvU64)sys_mem->chunk_size, verif_size - offset); + + TEST_NV_CHECK_GOTO(uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_TO_CPU, &push, " "), done); + + mem_gpu_address = uvm_mem_gpu_address_virtual_kernel(mem, gpu); + mem_gpu_address.address += offset; + + if (uvm_channel_is_privileged(push.channel)) { + sys_mem_gpu_address = uvm_mem_gpu_address_physical(sys_mem, gpu, offset, size_this_time); + } + else { + sys_mem_gpu_address = uvm_mem_gpu_address_virtual_kernel(sys_mem, gpu); + sys_mem_gpu_address.address += offset; + } + + uvm_push_set_description(&push, + "Memcopy %zd bytes from virtual mem 0x%llx to %s sys_mem 0x%llx", + size_this_time, + mem_gpu_address.address, + sys_mem_gpu_address.is_virtual? "virtual" : "physical", + sys_mem_gpu_address.address); + + gpu->parent->ce_hal->memcopy(&push, sys_mem_gpu_address, mem_gpu_address, size_this_time); + + uvm_push_end(&push); + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, &push), done); + } + + TEST_NV_CHECK_GOTO(uvm_tracker_wait(&tracker), done); + + for (i = 0; i < verif_size / sizeof(*sys_verif); ++i) { + if (sys_verif[i] != mem->size + i) { + UVM_TEST_PRINT("Verif failed for %zd = 0x%llx instead of 0x%llx, verif_size=0x%llx mem(size=0x%llx, page_size=%u, processor=%u)\n", + i, + sys_verif[i], + (NvU64)(verif_size + i), + verif_size, + mem->size, + mem->chunk_size, + uvm_mem_is_vidmem(mem) ? uvm_id_value(mem->backing_gpu->id) : UVM_ID_CPU_VALUE); + status = NV_ERR_INVALID_STATE; + goto done; + } + } + +done: + (void)uvm_tracker_wait(&tracker); + uvm_tracker_deinit(&tracker); + uvm_mem_free(sys_mem); + + return status; +} + +static NV_STATUS test_map_gpu(uvm_mem_t *mem, uvm_gpu_t *gpu) +{ + NvU64 gpu_va; + + TEST_NV_CHECK_RET(uvm_mem_map_gpu_kernel(mem, gpu)); + TEST_CHECK_RET(uvm_mem_mapped_on_gpu_kernel(mem, gpu)); + TEST_CHECK_RET(!uvm_mem_mapped_on_gpu_user(mem, gpu)); + + gpu_va = uvm_mem_get_gpu_va_kernel(mem, gpu); + TEST_CHECK_RET(gpu_va >= gpu->parent->uvm_mem_va_base); + TEST_CHECK_RET(gpu_va + mem->physical_allocation_size <= gpu->parent->uvm_mem_va_base + gpu->parent->uvm_mem_va_size); + + // Mapping if already mapped is OK + TEST_NV_CHECK_RET(uvm_mem_map_gpu_kernel(mem, gpu)); + + // Unmap + uvm_mem_unmap_gpu_kernel(mem, gpu); + TEST_CHECK_RET(!uvm_mem_mapped_on_gpu_kernel(mem, gpu)); + + // Unmapping an unmapped memory is OK + uvm_mem_unmap_gpu_kernel(mem, gpu); + uvm_mem_unmap_gpu_user(mem, gpu); + + // Map again + TEST_NV_CHECK_RET(uvm_mem_map_gpu_kernel(mem, gpu)); + + // Should get the same VA + TEST_CHECK_RET(gpu_va == uvm_mem_get_gpu_va_kernel(mem, gpu)); + + return check_accessible_from_gpu(gpu, mem); +} + +static NV_STATUS test_map_cpu(uvm_mem_t *mem) +{ + char *cpu_addr; + + if (uvm_mem_is_vidmem(mem)) + UVM_ASSERT(mem->backing_gpu->parent->numa_info.enabled); + + // Map + TEST_NV_CHECK_RET(uvm_mem_map_cpu_kernel(mem)); + TEST_CHECK_RET(uvm_mem_mapped_on_cpu_kernel(mem)); + TEST_CHECK_RET(!uvm_mem_mapped_on_cpu_user(mem)); + TEST_CHECK_RET(uvm_mem_get_cpu_addr_kernel(mem) != NULL); + + // Mapping if already mapped is OK + TEST_NV_CHECK_RET(uvm_mem_map_cpu_kernel(mem)); + + // Unmap + uvm_mem_unmap_cpu_kernel(mem); + TEST_CHECK_RET(!uvm_mem_mapped_on_cpu_kernel(mem)); + + // Unmapping an unmapped memory is OK + uvm_mem_unmap_cpu_kernel(mem); + uvm_mem_unmap_cpu_user(mem); + + // Map again + TEST_NV_CHECK_RET(uvm_mem_map_cpu_kernel(mem)); + + cpu_addr = uvm_mem_get_cpu_addr_kernel(mem); + TEST_CHECK_RET(cpu_addr != NULL); + + memset(cpu_addr, 3, mem->size); + + return NV_OK; +} + +static NV_STATUS test_alloc_sysmem(uvm_va_space_t *va_space, NvU32 page_size, size_t size, uvm_mem_t **mem_out) +{ + NV_STATUS status; + uvm_mem_t *mem; + uvm_gpu_t *gpu; + uvm_mem_alloc_params_t params = { 0 }; + + params.size = size; + params.page_size = page_size; + params.mm = current->mm; + + status = uvm_mem_alloc(¶ms, &mem); + TEST_CHECK_GOTO(status == NV_OK, error); + + TEST_CHECK_GOTO(test_map_cpu(mem) == NV_OK, error); + + for_each_va_space_gpu(gpu, va_space) + TEST_NV_CHECK_GOTO(test_map_gpu(mem, gpu), error); + + *mem_out = mem; + + return NV_OK; + +error: + uvm_mem_free(mem); + return status; +} + +static NV_STATUS test_alloc_sysmem_dma(uvm_va_space_t *va_space, uvm_gpu_t *dma_owner, size_t size, uvm_mem_t **mem_out) +{ + NV_STATUS status; + uvm_mem_t *mem; + uvm_gpu_t *gpu; + uvm_mem_alloc_params_t params = { 0 }; + + params.size = size; + params.page_size = PAGE_SIZE; + params.dma_owner = dma_owner; + params.mm = current->mm; + + status = uvm_mem_alloc(¶ms, &mem); + TEST_CHECK_GOTO(status == NV_OK, error); + + TEST_CHECK_GOTO(test_map_cpu(mem) == NV_OK, error); + + // Mapping twice on the dma_owner is OK. + for_each_va_space_gpu(gpu, va_space) + TEST_NV_CHECK_GOTO(test_map_gpu(mem, gpu), error); + + *mem_out = mem; + + return NV_OK; + +error: + uvm_mem_free(mem); + return status; +} + +static NV_STATUS test_alloc_vidmem(uvm_gpu_t *gpu, NvU32 page_size, size_t size, uvm_mem_t **mem_out) +{ + NV_STATUS status; + uvm_mem_t *mem; + uvm_mem_alloc_params_t params = { 0 }; + + params.backing_gpu = gpu; + params.page_size = page_size; + params.size = size; + params.mm = current->mm; + + status = uvm_mem_alloc(¶ms, &mem); + TEST_CHECK_GOTO(status == NV_OK, error); + + if (page_size == UVM_PAGE_SIZE_DEFAULT) { + if (gpu->parent->numa_info.enabled) + TEST_CHECK_GOTO(mem->chunk_size >= PAGE_SIZE && mem->chunk_size <= max(size, (size_t)PAGE_SIZE), error); + else + TEST_CHECK_GOTO(mem->chunk_size == UVM_PAGE_SIZE_4K || mem->chunk_size <= size, error); + } + + TEST_NV_CHECK_GOTO(test_map_gpu(mem, gpu), error); + + if (gpu->parent->numa_info.enabled && (page_size == UVM_PAGE_SIZE_DEFAULT || page_size >= PAGE_SIZE)) + TEST_CHECK_GOTO(test_map_cpu(mem) == NV_OK, error); + + *mem_out = mem; + + return NV_OK; + +error: + uvm_mem_free(mem); + return status; +} + +static bool should_test_page_size(size_t alloc_size, NvU32 page_size) +{ + + + + + + if (g_uvm_global.num_simulated_devices == 0) + return true; + + return alloc_size <= UVM_PAGE_SIZE_2M || page_size == UVM_PAGE_SIZE_2M; +} + +static NV_STATUS test_all(uvm_va_space_t *va_space) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + NvU32 gpu_count; + uvm_mem_t **all_mem = NULL; + NvU32 allocation_count; + NvU32 current_alloc = 0; + + // Create allocations of these sizes + static const size_t sizes[] = {1, 4, 16, 1024, 4096, 1024 * 1024, 7 * 1024 * 1024 + 17 }; + + // Pascal+ can map sysmem with 4K, 64K and 2M PTEs, other GPUs can only use + // 4K. Test all of the sizes supported by Pascal+ and 128K to match big page + // size on pre-Pascal GPUs with 128K big page size. + // Ampere+ also supports 512M PTEs, but since UVM's maximum chunk size is + // 2M, we don't test for this page size. + static const NvU32 cpu_chunk_sizes = PAGE_SIZE | UVM_PAGE_SIZE_64K | UVM_PAGE_SIZE_128K | UVM_PAGE_SIZE_2M; + + // All supported page sizes will be tested, CPU has the most with 4 and +1 + // for the default. + static const int max_supported_page_sizes = 4 + 1; + int i; + + gpu_count = uvm_processor_mask_get_gpu_count(&va_space->registered_gpus); + + // +1 for the CPU + allocation_count = (gpu_count + 1) * max_supported_page_sizes * ARRAY_SIZE(sizes); + + // For the DMA allocations per GPU + allocation_count += gpu_count * ARRAY_SIZE(sizes); + + all_mem = uvm_kvmalloc_zero(sizeof(*all_mem) * allocation_count); + + if (all_mem == NULL) + return NV_ERR_NO_MEMORY; + + for (i = 0; i < ARRAY_SIZE(sizes); ++i) { + NvU32 page_size = 0; + uvm_mem_t *mem; + + if (should_test_page_size(sizes[i], UVM_PAGE_SIZE_DEFAULT)) { + status = test_alloc_sysmem(va_space, UVM_PAGE_SIZE_DEFAULT, sizes[i], &mem); + if (status != NV_OK) { + UVM_TEST_PRINT("Failed to alloc sysmem size %zd, page_size default\n", sizes[i], page_size); + goto cleanup; + } + all_mem[current_alloc++] = mem; + } + + for_each_page_size(page_size, cpu_chunk_sizes) { + if (!should_test_page_size(sizes[i], page_size)) + continue; + + status = test_alloc_sysmem(va_space, page_size, sizes[i], &mem); + if (status != NV_OK) { + UVM_TEST_PRINT("Failed to alloc sysmem size %zd, page_size %u\n", sizes[i], page_size); + goto cleanup; + } + all_mem[current_alloc++] = mem; + } + + for_each_va_space_gpu(gpu, va_space) { + NvU32 page_sizes = gpu->address_space_tree.hal->page_sizes(); + + UVM_ASSERT(max_supported_page_sizes >= hweight_long(page_sizes)); + + status = test_alloc_vidmem(gpu, UVM_PAGE_SIZE_DEFAULT, sizes[i], &mem); + if (status != NV_OK) { + UVM_TEST_PRINT("Test alloc vidmem failed, page_size default size %zd GPU %s\n", + sizes[i], + uvm_gpu_name(gpu)); + goto cleanup; + } + all_mem[current_alloc++] = mem; + + page_sizes &= UVM_CHUNK_SIZES_MASK; + for_each_page_size(page_size, page_sizes) { + status = test_alloc_vidmem(gpu, page_size, sizes[i], &mem); + if (status != NV_OK) { + UVM_TEST_PRINT("Test alloc vidmem failed, page_size %u size %zd GPU %s\n", + page_size, + sizes[i], + uvm_gpu_name(gpu)); + goto cleanup; + } + all_mem[current_alloc++] = mem; + + } + + status = test_alloc_sysmem_dma(va_space, gpu, sizes[i], &mem); + if (status != NV_OK) { + UVM_TEST_PRINT("Test alloc sysmem DMA failed, size %zd GPU %s\n", + sizes[i], + uvm_gpu_name(gpu)); + goto cleanup; + } + all_mem[current_alloc++] = mem; + } + } + +cleanup: + for (i = 0; i < current_alloc; ++i) + uvm_mem_free(all_mem[i]); + + uvm_kvfree(all_mem); + + return status; +} + +static NV_STATUS test_basic_vidmem(uvm_gpu_t *gpu) +{ + NV_STATUS status = NV_OK; + NvU32 page_size; + NvU32 page_sizes = gpu->address_space_tree.hal->page_sizes(); + NvU32 biggest_page_size = uvm_mmu_biggest_page_size_up_to(&gpu->address_space_tree, UVM_CHUNK_SIZE_MAX); + NvU32 smallest_page_size = page_sizes & ~(page_sizes - 1); + uvm_mem_t *mem = NULL; + + page_sizes &= UVM_CHUNK_SIZES_MASK; + for_each_page_size(page_size, page_sizes) { + TEST_CHECK_GOTO(uvm_mem_alloc_vidmem(page_size - 1, gpu, &mem) == NV_OK, done); + if (gpu->parent->numa_info.enabled) + TEST_CHECK_GOTO(mem->chunk_size >= PAGE_SIZE && mem->chunk_size <= max(page_size, (NvU32)PAGE_SIZE), done); + else + TEST_CHECK_GOTO(mem->chunk_size < page_size || page_size == smallest_page_size, done); + uvm_mem_free(mem); + mem = NULL; + + TEST_CHECK_GOTO(uvm_mem_alloc_vidmem(page_size, gpu, &mem) == NV_OK, done); + if (gpu->parent->numa_info.enabled) + TEST_CHECK_GOTO(mem->chunk_size == max(page_size, (NvU32)PAGE_SIZE), done); + else + TEST_CHECK_GOTO(mem->chunk_size == page_size, done); + uvm_mem_free(mem); + mem = NULL; + } + + TEST_CHECK_GOTO(uvm_mem_alloc_vidmem(5 * ((NvU64)biggest_page_size) - 1, gpu, &mem) == NV_OK, done); + TEST_CHECK_GOTO(mem->chunk_size == biggest_page_size, done); + +done: + uvm_mem_free(mem); + return status; +} + +static NV_STATUS test_basic_sysmem(void) +{ + NV_STATUS status = NV_OK; + uvm_mem_t *mem = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(sysmem_alloc_sizes); ++i) { + size_t size = sysmem_alloc_sizes[i]; + TEST_NV_CHECK_GOTO(uvm_mem_alloc_sysmem(size, current->mm, &mem), done); + TEST_CHECK_GOTO(mem->chunk_size == PAGE_SIZE, done); + uvm_mem_free(mem); + mem = NULL; + } + +done: + uvm_mem_free(mem); + return status; +} + +static NV_STATUS test_basic_sysmem_dma(uvm_gpu_t *gpu) +{ + NV_STATUS status = NV_OK; + uvm_mem_t *mem = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(sysmem_alloc_sizes); ++i) { + size_t size = sysmem_alloc_sizes[i]; + TEST_NV_CHECK_GOTO(uvm_mem_alloc_sysmem_dma(size, gpu, current->mm, &mem), done); + TEST_CHECK_GOTO(mem->chunk_size == PAGE_SIZE, done); + uvm_mem_free(mem); + mem = NULL; + } + +done: + uvm_mem_free(mem); + return status; +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +static NV_STATUS test_basic(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + + TEST_CHECK_RET(test_basic_sysmem() == NV_OK); + + for_each_va_space_gpu(gpu, va_space) { + TEST_CHECK_RET(test_basic_vidmem(gpu) == NV_OK); + TEST_CHECK_RET(test_basic_sysmem_dma(gpu) == NV_OK); + + + + } + + return NV_OK; +} + +static NV_STATUS tests(uvm_va_space_t *va_space) +{ + TEST_NV_CHECK_RET(test_basic(va_space)); + TEST_NV_CHECK_RET(test_all(va_space)); + + return NV_OK; +} + +NV_STATUS uvm_test_mem_sanity(UVM_TEST_MEM_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_va_space_down_read(va_space); + + status = tests(va_space); + + uvm_va_space_up_read(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_migrate.c b/kernel-open/nvidia-uvm/uvm_migrate.c new file mode 100644 index 000000000..6f9d6bddf --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_migrate.c @@ -0,0 +1,1058 @@ +/******************************************************************************* + Copyright (c) 2016-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_ioctl.h" +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_lock.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" +#include "uvm_va_block.h" +#include "uvm_tracker.h" +#include "uvm_api.h" +#include "uvm_channel.h" +#include "uvm_push.h" +#include "uvm_hal.h" +#include "uvm_tools.h" +#include "uvm_migrate.h" +#include "uvm_migrate_pageable.h" +#include "uvm_va_space_mm.h" +#include "nv_speculation_barrier.h" + +typedef enum +{ + UVM_MIGRATE_PASS_FIRST, + UVM_MIGRATE_PASS_SECOND +} uvm_migrate_pass_t; + +static int uvm_perf_migrate_cpu_preunmap_enable = 1; +module_param(uvm_perf_migrate_cpu_preunmap_enable, int, S_IRUGO); + +#define UVM_PERF_MIGRATE_CPU_PREUNMAP_BLOCK_ORDER_DEFAULT 2 +#define UVM_PERF_MIGRATE_CPU_PREUNMAP_BLOCK_ORDER_MAX 10 +static unsigned uvm_perf_migrate_cpu_preunmap_block_order = UVM_PERF_MIGRATE_CPU_PREUNMAP_BLOCK_ORDER_DEFAULT; +module_param(uvm_perf_migrate_cpu_preunmap_block_order, uint, S_IRUGO); + +// Global post-processed values of the module parameters +static bool g_uvm_perf_migrate_cpu_preunmap_enable __read_mostly; +static NvU64 g_uvm_perf_migrate_cpu_preunmap_size __read_mostly; + +static bool is_migration_single_block(uvm_va_range_t *first_va_range, NvU64 base, NvU64 length) +{ + NvU64 end = base + length - 1; + + if (end > first_va_range->node.end) + return false; + + return uvm_va_range_block_index(first_va_range, base) == uvm_va_range_block_index(first_va_range, end); +} + +static NV_STATUS block_migrate_map_mapped_pages(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_va_block_region_t region, + uvm_processor_id_t dest_id) +{ + uvm_prot_t prot; + uvm_page_index_t page_index; + NV_STATUS status = NV_OK; + const uvm_page_mask_t *pages_mapped_on_destination = uvm_va_block_map_mask_get(va_block, dest_id); + + for (prot = UVM_PROT_READ_ONLY; prot <= UVM_PROT_READ_WRITE_ATOMIC; ++prot) + va_block_context->mask_by_prot[prot - 1].count = 0; + + // Only map those pages that are not already mapped on destination + for_each_va_block_unset_page_in_region_mask(page_index, pages_mapped_on_destination, region) { + prot = uvm_va_block_page_compute_highest_permission(va_block, dest_id, page_index); + UVM_ASSERT(prot != UVM_PROT_NONE); + + if (va_block_context->mask_by_prot[prot - 1].count++ == 0) + uvm_page_mask_zero(&va_block_context->mask_by_prot[prot - 1].page_mask); + + uvm_page_mask_set(&va_block_context->mask_by_prot[prot - 1].page_mask, page_index); + } + + for (prot = UVM_PROT_READ_ONLY; prot <= UVM_PROT_READ_WRITE_ATOMIC; ++prot) { + if (va_block_context->mask_by_prot[prot - 1].count == 0) + continue; + + // We pass UvmEventMapRemoteCauseInvalid since the destination processor + // of a migration will never be mapped remotely + status = uvm_va_block_map(va_block, + va_block_context, + dest_id, + region, + &va_block_context->mask_by_prot[prot - 1].page_mask, + prot, + UvmEventMapRemoteCauseInvalid, + &va_block->tracker); + if (status != NV_OK) + break; + + // Whoever added the other mapping(s) should have already added + // SetAccessedBy processors + } + + return status; +} + +static NV_STATUS block_migrate_map_unmapped_pages(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_va_block_region_t region, + uvm_processor_id_t dest_id) + +{ + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + NV_STATUS status = NV_OK; + NV_STATUS tracker_status; + + // Save the mask of unmapped pages because it will change after the + // first map operation + uvm_page_mask_complement(&va_block_context->caller_page_mask, &va_block->maybe_mapped_pages); + + // Only map those pages that are not mapped anywhere else (likely due + // to a first touch or a migration). We pass + // UvmEventMapRemoteCauseInvalid since the destination processor of a + // migration will never be mapped remotely. + status = uvm_va_block_map(va_block, + va_block_context, + dest_id, + region, + &va_block_context->caller_page_mask, + UVM_PROT_READ_WRITE_ATOMIC, + UvmEventMapRemoteCauseInvalid, + &local_tracker); + if (status != NV_OK) + goto out; + + // Add mappings for AccessedBy processors + // + // No mappings within this call will operate on dest_id, so we don't + // need to acquire the map operation above. + status = uvm_va_block_add_mappings_after_migration(va_block, + va_block_context, + dest_id, + dest_id, + region, + &va_block_context->caller_page_mask, + UVM_PROT_READ_WRITE_ATOMIC, + NULL); + +out: + tracker_status = uvm_tracker_add_tracker_safe(&va_block->tracker, &local_tracker); + uvm_tracker_deinit(&local_tracker); + return status == NV_OK ? tracker_status : status; +} + +// Pages that are not mapped anywhere can be safely mapped with RWA permission. +// The rest of pages need to individually compute the maximum permission that +// does not require a revocation. +static NV_STATUS block_migrate_add_mappings(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_va_block_region_t region, + uvm_processor_id_t dest_id) + +{ + NV_STATUS status; + + status = block_migrate_map_unmapped_pages(va_block, + va_block_retry, + va_block_context, + region, + dest_id); + if (status != NV_OK) + return status; + + return block_migrate_map_mapped_pages(va_block, + va_block_retry, + va_block_context, + region, + dest_id); +} + +NV_STATUS uvm_va_block_migrate_locked(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_va_block_region_t region, + uvm_processor_id_t dest_id, + uvm_migrate_mode_t mode, + uvm_tracker_t *out_tracker) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + NV_STATUS status, tracker_status = NV_OK; + + uvm_assert_mutex_locked(&va_block->lock); + + if (uvm_va_policy_is_read_duplicate(va_block_context->policy, va_space)) { + status = uvm_va_block_make_resident_read_duplicate(va_block, + va_block_retry, + va_block_context, + dest_id, + region, + NULL, + NULL, + UVM_MAKE_RESIDENT_CAUSE_API_MIGRATE); + } + else { + status = uvm_va_block_make_resident(va_block, + va_block_retry, + va_block_context, + dest_id, + region, + NULL, + NULL, + UVM_MAKE_RESIDENT_CAUSE_API_MIGRATE); + } + + if (status == NV_OK && mode == UVM_MIGRATE_MODE_MAKE_RESIDENT_AND_MAP) { + // block_migrate_add_mappings will acquire the work from the above + // make_resident call and update the VA block tracker. + status = block_migrate_add_mappings(va_block, va_block_retry, va_block_context, region, dest_id); + } + + if (out_tracker) + tracker_status = uvm_tracker_add_tracker_safe(out_tracker, &va_block->tracker); + + return status == NV_OK ? tracker_status : status; +} + +// Unmapping CPU pages on P9 systems is very costly, to the point that it +// becomes the bottleneck of UvmMigrate. We have measured up to 3x lower BW for +// migrations that need to remove CPU mappings compared to migrations that only +// create CPU mappings. The overhead can be fully attributed to the TLB +// shootdown. When a CPU page is unmapped, it needs to (1) invalidate any copy +// in the P9 cores, and (2) if ATS is enabled, issue ATSD messages over NVLINK +// to remove the corresponding entries in the GPUs' TLBs. ATSDs are not even +// required when migration managed memory since UVM ensures that there are no +// ATS entries cached in the GPU TLBs for the managed VA ranges. However, we +// don't have a way to skip them as of today. +// +// In order to minimize the overhead of CPU unmaps during UvmMigrate we try to +// call unmap_mapping_range on VA regions larger than the VA block granularity +// before the actual migration so that TLB invalidations are batched better by +// the OS. This also has an impact in the number of ATSD messages issued. This +// is because the NPU code uses MMU notifiers in order to get a callback +// (invalidate_range) when a TLB invalidation is required. Fortunately, this +// callback is not called if there is nothing to be invalidated. Therefore, if +// we issue a large unmap, subsequent unmaps within that region will not invoke +// the callback. +// +// However, due to (1), even issuing a single invalidate for the whole migrated +// range introduces a noticeable overhead (20-30%) on systems with 3xNVLINK2. +// This is only expected to get worse if CPU-GPU interconnects' BW keeps +// increasing. +// +// Thus, VA range migrations are split into groups of contiguous VA blocks, and +// trigger a single pre-unmap of the group of VA blocks in the Linux kernel +// before the VA blocks' migration starts. This way, we trigger larger (more +// efficient) TLB invalidations than when we do it one VA block a time, while +// still being able to pipeline the migration, which allows to hide most of the +// costs of (1). +// +// However, there are some cases in which the CPU has mappings to the pages +// being migrated but they don't need to be removed (which can introduce +// unnecessary CPU faults later on). Therefore, we skip the pre-unmap step +// under the following conditions: +// - Pages mapped by the CPU that are *already* in the destination. +// - Pages mapped by the CPU that are *not* in the destination but +// read-duplication is enabled in the VA range. + +// This function checks if the pre-unmap optimization is required given the +// system capabilities and the destination of the migration. This is to skip +// any subsequent checks required by the optimization, which can be costly. +// +// The current logic checks that: +// - We are in the first pass of the migration (see the explanation of the +// two-pass strategy in uvm_migrate). +// - The CPU has an NVLINK interconnect to the GPUs. Otherwise, we don't +// need this optimization since we are already limited by PCIe BW. +// - If the migration spans several VA blocks, otherwise skip the preunmap to +// avoid the overhead. +static bool migration_should_do_cpu_preunmap(uvm_va_space_t *va_space, + uvm_migrate_pass_t pass, + bool is_single_block) + +{ + if (!g_uvm_perf_migrate_cpu_preunmap_enable) + return false; + + if (pass != UVM_MIGRATE_PASS_FIRST || is_single_block) + return false; + + if (uvm_processor_mask_get_gpu_count(&va_space->has_nvlink[UVM_ID_CPU_VALUE]) == 0) + return false; + + return true; +} + +// This function determines if the VA range properties avoid the need to remove +// CPU mappings on UvmMigrate. Currently, it only checks whether +// read-duplication is enabled in the VA range. This is because, when migrating +// read-duplicated VA blocks, the source processor doesn't need to be unmapped +// (though it may need write access revoked). +static bool va_range_should_do_cpu_preunmap(uvm_va_policy_t *policy, uvm_va_space_t *va_space) +{ + return !uvm_va_policy_is_read_duplicate(policy, va_space); +} + +// Function that determines if the VA block to be migrated contains pages with +// CPU mappings that don't need to be removed (see the comment above). In that +// case false is returned. Otherwise it returns true, and stores in the +// variable pointed by num_unmap_pages the number of pages that do need to +// remove their CPU mappings. +static bool va_block_should_do_cpu_preunmap(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + NvU64 start, + NvU64 end, + uvm_processor_id_t dest_id, + NvU32 *num_unmap_pages) +{ + const uvm_page_mask_t *mapped_pages_cpu; + NvU32 num_cpu_unchanged_pages = 0; + uvm_va_block_region_t region; + + *num_unmap_pages = 0; + + if (!va_block) + return true; + + UVM_ASSERT(va_range_should_do_cpu_preunmap(va_block_context->policy, uvm_va_block_get_va_space(va_block))); + + region = uvm_va_block_region_from_start_end(va_block, max(start, va_block->start), min(end, va_block->end)); + + uvm_mutex_lock(&va_block->lock); + + mapped_pages_cpu = uvm_va_block_map_mask_get(va_block, UVM_ID_CPU); + if (uvm_processor_mask_test(&va_block->resident, dest_id)) { + const uvm_page_mask_t *resident_pages_dest = uvm_va_block_resident_mask_get(va_block, dest_id); + uvm_page_mask_t *do_not_unmap_pages = &va_block_context->scratch_page_mask; + + // TODO: Bug 1877578 + // + // We assume that if pages are mapped on the CPU and not resident on + // the destination, the pages will change residency so the CPU must be + // unmapped. If we implement automatic read-duplication heuristics in + // the future, we'll also need to check if the pages are being + // read-duplicated. + uvm_page_mask_and(do_not_unmap_pages, mapped_pages_cpu, resident_pages_dest); + + num_cpu_unchanged_pages = uvm_page_mask_region_weight(do_not_unmap_pages, region); + } + + *num_unmap_pages = uvm_page_mask_region_weight(mapped_pages_cpu, region) - num_cpu_unchanged_pages; + + uvm_mutex_unlock(&va_block->lock); + + return num_cpu_unchanged_pages == 0; +} + +static void preunmap_multi_block(uvm_va_range_t *va_range, + uvm_va_block_context_t *va_block_context, + NvU64 start, + NvU64 end, + uvm_processor_id_t dest_id) +{ + size_t i; + const size_t first_block_index = uvm_va_range_block_index(va_range, start); + const size_t last_block_index = uvm_va_range_block_index(va_range, end); + NvU32 num_unmap_pages = 0; + + UVM_ASSERT(start >= va_range->node.start); + UVM_ASSERT(end <= va_range->node.end); + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + uvm_assert_rwsem_locked(&va_range->va_space->lock); + + UVM_ASSERT(uvm_range_group_all_migratable(va_range->va_space, start, end)); + + for (i = first_block_index; i <= last_block_index; i++) { + NvU32 num_block_unmap_pages; + + if (!va_block_should_do_cpu_preunmap(uvm_va_range_block(va_range, i), + va_block_context, + start, + end, + dest_id, + &num_block_unmap_pages)) { + return; + } + + num_unmap_pages += num_block_unmap_pages; + } + + if (num_unmap_pages > 0) + unmap_mapping_range(&va_range->va_space->mapping, start, end - start + 1, 1); +} + +static NV_STATUS uvm_va_range_migrate_multi_block(uvm_va_range_t *va_range, + uvm_va_block_context_t *va_block_context, + NvU64 start, + NvU64 end, + uvm_processor_id_t dest_id, + uvm_migrate_mode_t mode, + uvm_tracker_t *out_tracker) +{ + size_t i; + const size_t first_block_index = uvm_va_range_block_index(va_range, start); + const size_t last_block_index = uvm_va_range_block_index(va_range, end); + + UVM_ASSERT(start >= va_range->node.start); + UVM_ASSERT(end <= va_range->node.end); + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + uvm_assert_rwsem_locked(&va_range->va_space->lock); + + UVM_ASSERT(uvm_range_group_all_migratable(va_range->va_space, start, end)); + + // Iterate over blocks, populating them if necessary + for (i = first_block_index; i <= last_block_index; i++) { + uvm_va_block_retry_t va_block_retry; + uvm_va_block_region_t region; + uvm_va_block_t *va_block; + NV_STATUS status = uvm_va_range_block_create(va_range, i, &va_block); + + if (status != NV_OK) + return status; + + region = uvm_va_block_region_from_start_end(va_block, + max(start, va_block->start), + min(end, va_block->end)); + + status = UVM_VA_BLOCK_LOCK_RETRY(va_block, &va_block_retry, + uvm_va_block_migrate_locked(va_block, + &va_block_retry, + va_block_context, + region, + dest_id, + mode, + out_tracker)); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +static NV_STATUS uvm_va_range_migrate(uvm_va_range_t *va_range, + uvm_va_block_context_t *va_block_context, + NvU64 start, + NvU64 end, + uvm_processor_id_t dest_id, + uvm_migrate_mode_t mode, + bool should_do_cpu_preunmap, + uvm_tracker_t *out_tracker) +{ + NvU64 preunmap_range_start = start; + + should_do_cpu_preunmap = should_do_cpu_preunmap && va_range_should_do_cpu_preunmap(va_block_context->policy, + va_range->va_space); + + // Divide migrations into groups of contiguous VA blocks. This is to trigger + // CPU unmaps for that region before the migration starts. + while (preunmap_range_start < end) { + NV_STATUS status; + NvU64 preunmap_range_end; + + if (should_do_cpu_preunmap) { + preunmap_range_end = UVM_ALIGN_UP(preunmap_range_start + 1, g_uvm_perf_migrate_cpu_preunmap_size); + preunmap_range_end = min(preunmap_range_end - 1, end); + + preunmap_multi_block(va_range, + va_block_context, + preunmap_range_start, + preunmap_range_end, + dest_id); + } + else { + preunmap_range_end = end; + } + + status = uvm_va_range_migrate_multi_block(va_range, + va_block_context, + preunmap_range_start, + preunmap_range_end, + dest_id, + mode, + out_tracker); + if (status != NV_OK) + return status; + + preunmap_range_start = preunmap_range_end + 1; + } + + return NV_OK; +} + +static NV_STATUS uvm_migrate_ranges(uvm_va_space_t *va_space, + uvm_va_block_context_t *va_block_context, + uvm_va_range_t *first_va_range, + NvU64 base, + NvU64 length, + uvm_processor_id_t dest_id, + uvm_migrate_mode_t mode, + bool should_do_cpu_preunmap, + uvm_tracker_t *out_tracker) +{ + uvm_va_range_t *va_range, *va_range_last; + NvU64 end = base + length - 1; + NV_STATUS status = NV_OK; + bool skipped_migrate = false; + + UVM_ASSERT(first_va_range == uvm_va_space_iter_first(va_space, base, base)); + + va_range_last = NULL; + uvm_for_each_va_range_in_contig_from(va_range, va_space, first_va_range, end) { + uvm_range_group_range_iter_t iter; + va_range_last = va_range; + + // Only managed ranges can be migrated + if (va_range->type != UVM_VA_RANGE_TYPE_MANAGED) { + status = NV_ERR_INVALID_ADDRESS; + break; + } + + va_block_context->policy = uvm_va_range_get_policy(va_range); + + // For UVM-Lite GPUs, the CUDA driver may suballocate a single va_range + // into many range groups. For this reason, we iterate over each va_range first + // then through the range groups within. + uvm_range_group_for_each_migratability_in(&iter, + va_space, + max(base, va_range->node.start), + min(end, va_range->node.end)) { + // Skip non-migratable VA ranges + if (!iter.migratable) { + // Only return NV_WARN_MORE_PROCESSING_REQUIRED if the pages aren't + // already resident at dest_id. + if (!uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, dest_id)) + skipped_migrate = true; + } + else if (uvm_processor_mask_test(&va_range->uvm_lite_gpus, dest_id) && + !uvm_id_equal(dest_id, uvm_va_range_get_policy(va_range)->preferred_location)) { + // Don't migrate to a non-faultable GPU that is in UVM-Lite mode, + // unless it's the preferred location + status = NV_ERR_INVALID_DEVICE; + break; + } + else { + status = uvm_va_range_migrate(va_range, + va_block_context, + iter.start, + iter.end, + dest_id, + mode, + should_do_cpu_preunmap, + out_tracker); + if (status != NV_OK) + break; + } + } + } + + if (status != NV_OK) + return status; + + // Check that we were able to iterate over the entire range without any gaps + if (!va_range_last || va_range_last->node.end < end) + return NV_ERR_INVALID_ADDRESS; + + if (skipped_migrate) + return NV_WARN_MORE_PROCESSING_REQUIRED; + + return NV_OK; +} + +static NV_STATUS uvm_migrate(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 base, + NvU64 length, + uvm_processor_id_t dest_id, + NvU32 migrate_flags, + uvm_tracker_t *out_tracker) +{ + NV_STATUS status = NV_OK; + uvm_va_range_t *first_va_range = uvm_va_space_iter_first(va_space, base, base); + uvm_va_block_context_t *va_block_context; + bool do_mappings; + bool do_two_passes; + bool is_single_block; + bool should_do_cpu_preunmap; + + uvm_assert_rwsem_locked(&va_space->lock); + + if (!first_va_range || first_va_range->type != UVM_VA_RANGE_TYPE_MANAGED) + return NV_ERR_INVALID_ADDRESS; + + // If the GPU has its memory disabled, just skip the migration and let + // faults take care of things. + if (!uvm_va_space_processor_has_memory(va_space, dest_id)) + return NV_OK; + + if (mm) + uvm_assert_mmap_lock_locked(mm); + va_block_context = uvm_va_block_context_alloc(mm); + if (!va_block_context) + return NV_ERR_NO_MEMORY; + + // We perform two passes (unless the migration only covers a single VA + // block or UVM_MIGRATE_FLAG_SKIP_CPU_MAP is passed). This helps in the + // following scenarios: + // + // - Migrations that add CPU mappings, since they are synchronous operations + // that delay the migration of the next VA blocks. + // - Concurrent migrations. This is due to our current channel selection + // logic that doesn't prevent false dependencies between independent + // operations. For example, removal of mappings for outgoing transfers are + // delayed by the mappings added by incoming transfers. + // TODO: Bug 1764953: Re-evaluate the two-pass logic when channel selection + // is overhauled. + // + // The two passes are as follows: + // + // 1- Transfer all VA blocks (do not add mappings) + // 2- Go block by block reexecuting the transfer (in case someone moved it + // since the first pass), and adding the mappings. + is_single_block = is_migration_single_block(first_va_range, base, length); + do_mappings = UVM_ID_IS_GPU(dest_id) || !(migrate_flags & UVM_MIGRATE_FLAG_SKIP_CPU_MAP); + do_two_passes = do_mappings && !is_single_block; + + if (do_two_passes) { + should_do_cpu_preunmap = migration_should_do_cpu_preunmap(va_space, UVM_MIGRATE_PASS_FIRST, is_single_block); + + status = uvm_migrate_ranges(va_space, + va_block_context, + first_va_range, + base, + length, + dest_id, + UVM_MIGRATE_MODE_MAKE_RESIDENT, + should_do_cpu_preunmap, + out_tracker); + } + + if (status == NV_OK) { + uvm_migrate_mode_t mode = do_mappings? UVM_MIGRATE_MODE_MAKE_RESIDENT_AND_MAP: + UVM_MIGRATE_MODE_MAKE_RESIDENT; + uvm_migrate_pass_t pass = do_two_passes? UVM_MIGRATE_PASS_SECOND: + UVM_MIGRATE_PASS_FIRST; + should_do_cpu_preunmap = migration_should_do_cpu_preunmap(va_space, pass, is_single_block); + + status = uvm_migrate_ranges(va_space, + va_block_context, + first_va_range, + base, + length, + dest_id, + mode, + should_do_cpu_preunmap, + out_tracker); + } + + uvm_va_block_context_free(va_block_context); + + return status; +} + +static NV_STATUS semaphore_release_from_gpu(uvm_gpu_t *gpu, + uvm_va_range_semaphore_pool_t *semaphore_va_range, + NvU64 semaphore_user_addr, + NvU32 semaphore_payload, + uvm_tracker_t *release_after_tracker) +{ + NV_STATUS status; + uvm_push_t push; + uvm_channel_type_t channel_type; + NvU64 semaphore_gpu_va; + NvU64 semaphore_offset; + + UVM_ASSERT(uvm_mem_mapped_on_gpu_kernel(semaphore_va_range->mem, gpu)); + + semaphore_offset = semaphore_user_addr - (NvU64)(uintptr_t)semaphore_va_range->mem->user->addr; + semaphore_gpu_va = uvm_mem_get_gpu_va_kernel(semaphore_va_range->mem, gpu) + semaphore_offset; + + // Outside of SR-IOV heavy, using UVM_CHANNEL_TYPE_MEMOPS is optimal from a + // performance standpoint because if the migration is targeting a GPU, it is + // likely that the channel used for the GPU page table update (pushed to + // UVM_CHANNEL_TYPE_MEMOPS) will also be used for the release. The + // inter-channel dependency avoided by using a single channel can add a + // significant overhead to the enclosing migration. + // + // In SR-IOV heavy, the user semaphore release is functionally forbidden + // from being pushed to a UVM_CHANNEL_TYPE_MEMOPS channel, because it is not + // a page tree operation. + if (uvm_gpu_is_virt_mode_sriov_heavy(gpu)) + channel_type = UVM_CHANNEL_TYPE_GPU_INTERNAL; + else + channel_type = UVM_CHANNEL_TYPE_MEMOPS; + + status = uvm_push_begin_acquire(gpu->channel_manager, + channel_type, + release_after_tracker, + &push, + "Pushing semaphore release (*0x%llx = %u)", + semaphore_user_addr, + semaphore_payload); + if (status != NV_OK) + return status; + + gpu->parent->ce_hal->semaphore_release(&push, semaphore_gpu_va, semaphore_payload); + uvm_push_end(&push); + + uvm_mutex_lock(&semaphore_va_range->tracker_lock); + status = uvm_tracker_add_push_safe(&semaphore_va_range->tracker, &push); + uvm_tracker_remove_completed(&semaphore_va_range->tracker); + uvm_mutex_unlock(&semaphore_va_range->tracker_lock); + + return status; +} + +static void semaphore_release_from_cpu(uvm_mem_t *semaphore_mem, NvU64 semaphore_user_addr, NvU32 semaphore_payload) +{ + char *semaphore_cpu_va; + NvU64 semaphore_offset; + + UVM_ASSERT(uvm_mem_mapped_on_cpu_kernel(semaphore_mem)); + + semaphore_offset = semaphore_user_addr - (NvU64)(uintptr_t)semaphore_mem->user->addr; + + // Prevent processor speculation prior to accessing user-mapped memory to + // avoid leaking information from side-channel attacks. Under speculation, a + // valid VA range which does not contain this semaphore could be used by the + // caller. It's unclear but likely that the user might be able to control + // the data at that address. Auditing all potential ways that could happen + // is difficult and error-prone, so to be on the safe side we'll just always + // block speculation. + nv_speculation_barrier(); + + semaphore_cpu_va = (char *) uvm_mem_get_cpu_addr_kernel(semaphore_mem) + semaphore_offset; + + UVM_WRITE_ONCE(*(NvU32 *)semaphore_cpu_va, semaphore_payload); +} + +static NV_STATUS semaphore_release(NvU64 semaphore_address, + NvU32 semaphore_payload, + uvm_va_range_semaphore_pool_t *semaphore_pool, + uvm_gpu_t *dest_gpu, + uvm_tracker_t *tracker_ptr) +{ + uvm_gpu_t *gpu; + uvm_gpu_t *gpu_owner = semaphore_pool->owner; + + // If there is a GPU owner, release the semaphore from it. + if (gpu_owner != NULL) + return semaphore_release_from_gpu(gpu_owner, semaphore_pool, semaphore_address, semaphore_payload, tracker_ptr); + + // Attempt eager release from CPU if the tracker is already completed. + if (uvm_tracker_is_completed(tracker_ptr)) { + semaphore_release_from_cpu(semaphore_pool->mem, semaphore_address, semaphore_payload); + return NV_OK; + } + + if (dest_gpu == NULL) { + // The destination is the CPU, but we didn't do a CPU release above + // because the previous work is not complete. This situation arises when + // accessed_by mappings are being set up asynchronously, or the + // test-only flag UVM_MIGRATE_FLAG_SKIP_CPU_MAP is used. So there should + // be a registered GPU, since all CPU work is synchronous, and the + // semaphore must be mapped on that GPU. + // + // Note that the GPU selected for the release may not be the same device + // that prevented the tracker from being complete. + gpu = uvm_global_processor_mask_find_first_gpu(&semaphore_pool->mem->kernel.mapped_on); + + UVM_ASSERT(gpu != NULL); + } + else { + gpu = dest_gpu; + } + + return semaphore_release_from_gpu(gpu, semaphore_pool, semaphore_address, semaphore_payload, tracker_ptr); +} + +NV_STATUS uvm_migrate_init() +{ + NV_STATUS status = uvm_migrate_pageable_init(); + if (status != NV_OK) + return status; + + g_uvm_perf_migrate_cpu_preunmap_enable = uvm_perf_migrate_cpu_preunmap_enable != 0; + + BUILD_BUG_ON((UVM_VA_BLOCK_SIZE) & (UVM_VA_BLOCK_SIZE - 1)); + + if (g_uvm_perf_migrate_cpu_preunmap_enable) { + if (uvm_perf_migrate_cpu_preunmap_block_order <= UVM_PERF_MIGRATE_CPU_PREUNMAP_BLOCK_ORDER_MAX) { + g_uvm_perf_migrate_cpu_preunmap_size = UVM_VA_BLOCK_SIZE << uvm_perf_migrate_cpu_preunmap_block_order; + } + else { + g_uvm_perf_migrate_cpu_preunmap_size = UVM_VA_BLOCK_SIZE << UVM_PERF_MIGRATE_CPU_PREUNMAP_BLOCK_ORDER_DEFAULT; + + pr_info("Invalid value %u for uvm_perf_migrate_cpu_preunmap_block_order. Using %u instead\n", + uvm_perf_migrate_cpu_preunmap_block_order, + UVM_PERF_MIGRATE_CPU_PREUNMAP_BLOCK_ORDER_DEFAULT); + } + } + + return NV_OK; +} + +void uvm_migrate_exit() +{ + uvm_migrate_pageable_exit(); +} + +NV_STATUS uvm_api_migrate(UVM_MIGRATE_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + uvm_tracker_t *tracker_ptr = NULL; + uvm_gpu_t *dest_gpu = NULL; + uvm_va_range_t *sema_va_range = NULL; + struct mm_struct *mm; + NV_STATUS status = NV_OK; + bool flush_events = false; + const bool synchronous = !(params->flags & UVM_MIGRATE_FLAG_ASYNC); + + // We temporarily allow 0 length in the IOCTL parameters as a signal to + // only release the semaphore. This is because user-space is in charge of + // migrating pageable memory in some cases. + // + // TODO: Bug 2419180: do not allow 0 length migrations when we fully switch + // to migrate_vma for all types of vmas. + if (params->length > 0 || synchronous || params->semaphoreAddress == 0) { + if (uvm_api_range_invalid(params->base, params->length)) + return NV_ERR_INVALID_ADDRESS; + } + + if (params->flags & ~UVM_MIGRATE_FLAGS_ALL) + return NV_ERR_INVALID_ARGUMENT; + + if ((params->flags & UVM_MIGRATE_FLAGS_TEST_ALL) && !uvm_enable_builtin_tests) { + UVM_INFO_PRINT("Test flag set for UVM_MIGRATE. Did you mean to insmod with uvm_enable_builtin_tests=1?\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // mmap_lock will be needed if we have to create CPU mappings + mm = uvm_va_space_mm_or_current_retain_lock(va_space); + uvm_va_space_down_read(va_space); + + if (synchronous) { + if (params->semaphoreAddress != 0) { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + else { + if (params->semaphoreAddress == 0) { + if (params->semaphorePayload != 0) { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + else { + sema_va_range = uvm_va_range_find(va_space, params->semaphoreAddress); + if (!IS_ALIGNED(params->semaphoreAddress, sizeof(params->semaphorePayload)) || + !sema_va_range || sema_va_range->type != UVM_VA_RANGE_TYPE_SEMAPHORE_POOL) { + status = NV_ERR_INVALID_ADDRESS; + goto done; + } + } + } + + if (!uvm_uuid_is_cpu(¶ms->destinationUuid)) { + if (params->flags & UVM_MIGRATE_FLAG_NO_GPU_VA_SPACE) + dest_gpu = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->destinationUuid); + else + dest_gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, ¶ms->destinationUuid); + + if (!dest_gpu) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + if (params->length > 0 && !uvm_gpu_can_address(dest_gpu, params->base, params->length)) { + status = NV_ERR_OUT_OF_RANGE; + goto done; + } + } + + UVM_ASSERT(status == NV_OK); + + // If we're synchronous or if we need to release a semaphore, use a tracker. + if (synchronous || params->semaphoreAddress) + tracker_ptr = &tracker; + + if (params->length > 0) { + status = uvm_api_range_type_check(va_space, mm, params->base, params->length); + if (status == NV_OK) { + status = uvm_migrate(va_space, + mm, + params->base, + params->length, + (dest_gpu ? dest_gpu->id : UVM_ID_CPU), + params->flags, + tracker_ptr); + } + else if (status == NV_WARN_NOTHING_TO_DO) { + uvm_migrate_args_t uvm_migrate_args = + { + .va_space = va_space, + .mm = mm, + .start = params->base, + .length = params->length, + .dst_id = (dest_gpu ? dest_gpu->id : UVM_ID_CPU), + .dst_node_id = (int)params->cpuNumaNode, + .populate_permissions = UVM_POPULATE_PERMISSIONS_INHERIT, + .touch = false, + .skip_mapped = false, + .user_space_start = ¶ms->userSpaceStart, + .user_space_length = ¶ms->userSpaceLength, + }; + + status = uvm_migrate_pageable(&uvm_migrate_args); + } + } + +done: + // We only need to hold mmap_lock to create new CPU mappings, so drop it if + // we need to wait for the tracker to finish. + // + // TODO: Bug 1766650: For large migrations with destination CPU, try + // benchmarks to see if a two-pass approach would be faster (first + // pass pushes all GPU work asynchronously, second pass updates CPU + // mappings synchronously). + if (mm) { + uvm_up_read_mmap_lock_out_of_order(mm); + uvm_va_space_mm_or_current_release(va_space, mm); + } + + if (tracker_ptr) { + // If requested, release semaphore + if (params->semaphoreAddress && (status == NV_OK)) { + status = semaphore_release(params->semaphoreAddress, + params->semaphorePayload, + &sema_va_range->semaphore_pool, + dest_gpu, + tracker_ptr); + } + + // Wait on the tracker if we are synchronous or there was an error. The + // VA space lock must be held to prevent GPUs from being unregistered. + if (synchronous || (status != NV_OK)) { + NV_STATUS tracker_status = uvm_tracker_wait(tracker_ptr); + + // Only clobber status if we didn't hit an earlier error + if (status == NV_OK) + status = tracker_status; + + flush_events = true; + } + + uvm_tracker_deinit(tracker_ptr); + } + + uvm_va_space_up_read(va_space); + + // If the migration is known to be complete, eagerly dispatch the migration + // events, instead of processing them on a later event flush. Note that an + // asynchronous migration could be complete by now, but the flush would not + // be triggered. + if (flush_events) + uvm_tools_flush_events(); + + return status; +} + +NV_STATUS uvm_api_migrate_range_group(UVM_MIGRATE_RANGE_GROUP_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + NV_STATUS tracker_status = NV_OK; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + struct mm_struct *mm; + uvm_range_group_t *range_group; + uvm_range_group_range_t *rgr; + uvm_processor_id_t dest_id; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + NvU32 migrate_flags = 0; + uvm_gpu_t *gpu = NULL; + + // mmap_lock will be needed if we have to create CPU mappings + mm = uvm_va_space_mm_or_current_retain_lock(va_space); + uvm_va_space_down_read(va_space); + + if (uvm_uuid_is_cpu(¶ms->destinationUuid)) { + dest_id = UVM_ID_CPU; + } + else { + gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, ¶ms->destinationUuid); + if (!gpu) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + dest_id = gpu->id; + } + + range_group = radix_tree_lookup(&va_space->range_groups, params->rangeGroupId); + if (!range_group) { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + // Migrate all VA ranges in the range group. uvm_migrate is used because it performs all + // VA range validity checks. + list_for_each_entry(rgr, &range_group->ranges, range_group_list_node) { + NvU64 start = rgr->node.start; + NvU64 length = rgr->node.end - rgr->node.start + 1; + + if (gpu && !uvm_gpu_can_address(gpu, start, length)) + status = NV_ERR_OUT_OF_RANGE; + else + status = uvm_migrate(va_space, mm, start, length, dest_id, migrate_flags, &local_tracker); + + if (status != NV_OK) + goto done; + } + +done: + // We only need to hold mmap_lock to create new CPU mappings, so drop it if + // we need to wait for the tracker to finish. + // + // TODO: Bug 1766650: For large migrations with destination CPU, try + // benchmarks to see if a two-pass approach would be faster (first + // pass pushes all GPU work asynchronously, second pass updates CPU + // mappings synchronously). + if (mm) { + uvm_up_read_mmap_lock_out_of_order(mm); + uvm_va_space_mm_or_current_release(va_space, mm); + } + + tracker_status = uvm_tracker_wait_deinit(&local_tracker); + uvm_va_space_up_read(va_space); + + // This API is synchronous, so wait for migrations to finish + uvm_tools_flush_events(); + + return status == NV_OK? tracker_status : status; +} diff --git a/kernel-open/nvidia-uvm/uvm_migrate.h b/kernel-open/nvidia-uvm/uvm_migrate.h new file mode 100644 index 000000000..a1839e349 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_migrate.h @@ -0,0 +1,25 @@ +/******************************************************************************* + Copyright (c) 2018 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +NV_STATUS uvm_migrate_init(void); +void uvm_migrate_exit(void); diff --git a/kernel-open/nvidia-uvm/uvm_migrate_pageable.c b/kernel-open/nvidia-uvm/uvm_migrate_pageable.c new file mode 100644 index 000000000..d2e3e324c --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_migrate_pageable.c @@ -0,0 +1,1012 @@ +/******************************************************************************* + Copyright (c) 2018-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_gpu.h" +#include "uvm_lock.h" +#include "uvm_va_space.h" +#include "uvm_tracker.h" +#include "uvm_api.h" +#include "uvm_push.h" +#include "uvm_hal.h" +#include "uvm_migrate_pageable.h" +#include "uvm_populate_pageable.h" + +#ifdef UVM_MIGRATE_VMA_SUPPORTED + +static struct kmem_cache *g_uvm_migrate_vma_state_cache __read_mostly; + +static const gfp_t g_migrate_vma_gfp_flags = NV_UVM_GFP_FLAGS | GFP_HIGHUSER_MOVABLE | __GFP_THISNODE; + +// Compute the address needed for copying_gpu to access the given page, +// resident on resident_id. +static NV_STATUS migrate_vma_page_copy_address(struct page *page, + unsigned long page_index, + uvm_processor_id_t resident_id, + uvm_gpu_t *copying_gpu, + migrate_vma_state_t *state, + uvm_gpu_address_t *gpu_addr) +{ + uvm_va_space_t *va_space = state->uvm_migrate_args->va_space; + uvm_gpu_t *owning_gpu = UVM_ID_IS_CPU(resident_id)? NULL: uvm_va_space_get_gpu(va_space, resident_id); + const bool can_copy_from = uvm_processor_mask_test(&va_space->can_copy_from[uvm_id_value(copying_gpu->id)], + resident_id); + const bool direct_peer = owning_gpu && + (owning_gpu != copying_gpu) && + can_copy_from && + !uvm_gpu_peer_caps(owning_gpu, copying_gpu)->is_indirect_peer; + + UVM_ASSERT(page_index < state->num_pages); + + memset(gpu_addr, 0, sizeof(*gpu_addr)); + + if (owning_gpu == copying_gpu) { + // Local vidmem address + *gpu_addr = uvm_gpu_address_from_phys(uvm_gpu_page_to_phys_address(owning_gpu, page)); + } + else if (direct_peer) { + // Direct GPU peer + uvm_gpu_identity_mapping_t *gpu_peer_mappings = uvm_gpu_get_peer_mapping(copying_gpu, owning_gpu->id); + uvm_gpu_phys_address_t phys_addr = uvm_gpu_page_to_phys_address(owning_gpu, page); + + *gpu_addr = uvm_gpu_address_virtual(gpu_peer_mappings->base + phys_addr.address); + } + else { + // Sysmem/Indirect Peer + NV_STATUS status = uvm_gpu_map_cpu_page(copying_gpu, page, &state->dma.addrs[page_index]); + + if (status != NV_OK) + return status; + + state->dma.addrs_gpus[page_index] = copying_gpu; + + if (state->dma.num_pages++ == 0) + bitmap_zero(state->dma.page_mask, state->num_pages); + + UVM_ASSERT(!test_bit(page_index, state->dma.page_mask)); + + __set_bit(page_index, state->dma.page_mask); + + *gpu_addr = uvm_gpu_address_physical(UVM_APERTURE_SYS, state->dma.addrs[page_index]); + } + + return NV_OK; +} + +// Return the GPU identified with the given NUMA node id +static uvm_gpu_t *get_gpu_from_node_id(uvm_va_space_t *va_space, int node_id) +{ + uvm_gpu_t *gpu; + + for_each_va_space_gpu(gpu, va_space) { + if (uvm_gpu_numa_info(gpu)->node_id == node_id) + return gpu; + } + + return NULL; +} + +// Create a new push to zero pages on dst_id +static NV_STATUS migrate_vma_zero_begin_push(uvm_va_space_t *va_space, + uvm_processor_id_t dst_id, + uvm_gpu_t *gpu, + unsigned long start, + unsigned long outer, + uvm_push_t *push) +{ + uvm_channel_type_t channel_type; + + if (UVM_ID_IS_CPU(dst_id)) { + channel_type = UVM_CHANNEL_TYPE_GPU_TO_CPU; + } + else { + UVM_ASSERT(uvm_id_equal(dst_id, gpu->id)); + channel_type = UVM_CHANNEL_TYPE_GPU_INTERNAL; + } + + return uvm_push_begin(gpu->channel_manager, + channel_type, + push, + "Zero %s from %s VMA region [0x%lx, 0x%lx]", + uvm_va_space_processor_name(va_space, dst_id), + uvm_va_space_processor_name(va_space, gpu->id), + start, + outer); +} + +// Create a new push to copy pages between src_id and dst_id +static NV_STATUS migrate_vma_copy_begin_push(uvm_va_space_t *va_space, + uvm_processor_id_t dst_id, + uvm_processor_id_t src_id, + unsigned long start, + unsigned long outer, + uvm_push_t *push) +{ + uvm_channel_type_t channel_type; + uvm_gpu_t *gpu; + + UVM_ASSERT_MSG(!uvm_id_equal(src_id, dst_id), + "Unexpected copy to self, processor %s\n", + uvm_va_space_processor_name(va_space, src_id)); + + if (UVM_ID_IS_CPU(src_id)) { + gpu = uvm_va_space_get_gpu(va_space, dst_id); + channel_type = UVM_CHANNEL_TYPE_CPU_TO_GPU; + } + else if (UVM_ID_IS_CPU(dst_id)) { + gpu = uvm_va_space_get_gpu(va_space, src_id); + channel_type = UVM_CHANNEL_TYPE_GPU_TO_CPU; + } + else { + // For GPU to GPU copies, prefer to "push" the data from the source as + // that works better + gpu = uvm_va_space_get_gpu(va_space, src_id); + + channel_type = UVM_CHANNEL_TYPE_GPU_TO_GPU; + } + + // NUMA-enabled GPUs can copy to any other NUMA node in the system even if + // P2P access has not been explicitly enabled (ie va_space->can_copy_from + // is not set). + if (!gpu->parent->numa_info.enabled) { + UVM_ASSERT_MSG(uvm_processor_mask_test(&va_space->can_copy_from[uvm_id_value(gpu->id)], dst_id), + "GPU %s dst %s src %s\n", + uvm_va_space_processor_name(va_space, gpu->id), + uvm_va_space_processor_name(va_space, dst_id), + uvm_va_space_processor_name(va_space, src_id)); + UVM_ASSERT_MSG(uvm_processor_mask_test(&va_space->can_copy_from[uvm_id_value(gpu->id)], src_id), + "GPU %s dst %s src %s\n", + uvm_va_space_processor_name(va_space, gpu->id), + uvm_va_space_processor_name(va_space, dst_id), + uvm_va_space_processor_name(va_space, src_id)); + } + + if (channel_type == UVM_CHANNEL_TYPE_GPU_TO_GPU) { + uvm_gpu_t *dst_gpu = uvm_va_space_get_gpu(va_space, dst_id); + return uvm_push_begin_gpu_to_gpu(gpu->channel_manager, + dst_gpu, + push, + "Copy from %s to %s for VMA region [0x%lx, 0x%lx]", + uvm_va_space_processor_name(va_space, src_id), + uvm_va_space_processor_name(va_space, dst_id), + start, + outer); + } + + return uvm_push_begin(gpu->channel_manager, + channel_type, + push, + "Copy from %s to %s for VMA region [0x%lx, 0x%lx]", + uvm_va_space_processor_name(va_space, src_id), + uvm_va_space_processor_name(va_space, dst_id), + start, + outer); +} + +static void migrate_vma_compute_masks(struct vm_area_struct *vma, const unsigned long *src, migrate_vma_state_t *state) +{ + unsigned long i; + const bool is_rw = vma->vm_flags & VM_WRITE; + uvm_migrate_args_t *uvm_migrate_args = state->uvm_migrate_args; + uvm_processor_id_t dst_id = uvm_migrate_args->dst_id; + + UVM_ASSERT(vma_is_anonymous(vma)); + + bitmap_zero(state->populate_pages_mask, state->num_pages); + bitmap_zero(state->allocation_failed_mask, state->num_pages); + bitmap_zero(state->dst_resident_pages_mask, state->num_pages); + + uvm_processor_mask_zero(&state->src_processors); + state->num_populate_anon_pages = 0; + state->dma.num_pages = 0; + + for (i = 0; i < state->num_pages; ++i) { + uvm_processor_id_t src_id; + struct page *src_page = NULL; + int src_nid; + uvm_gpu_t *src_gpu = NULL; + + // Skip pages that cannot be migrated + if (!(src[i] & MIGRATE_PFN_MIGRATE)) { + // This can happen in two cases : + // - Page is populated but can't be migrated. + // - Page isn't populated + // In both the above cases, treat the page as failing migration and + // populate with get_user_pages. + if (!(src[i] & MIGRATE_PFN_VALID)) + __set_bit(i, state->populate_pages_mask); + + continue; + } + + src_page = migrate_pfn_to_page(src[i]); + if (!src_page) { + if (is_rw) { + // Populate PROT_WRITE vmas in migrate_vma so we can use the + // GPU's copy engines + if (state->num_populate_anon_pages++ == 0) + bitmap_zero(state->processors[uvm_id_value(dst_id)].page_mask, state->num_pages); + + __set_bit(i, state->processors[uvm_id_value(dst_id)].page_mask); + } + else { + // PROT_NONE vmas cannot be populated. PROT_READ anonymous vmas + // are populated using the zero page. In order to match this + // behavior, we tell the caller to populate using + // get_user_pages. + __set_bit(i, state->populate_pages_mask); + } + + continue; + } + + // Page is already mapped. Skip migration of this page if requested. + if (uvm_migrate_args->skip_mapped) { + __set_bit(i, state->populate_pages_mask); + continue; + } + + src_nid = page_to_nid(src_page); + + // Already at destination + if (src_nid == uvm_migrate_args->dst_node_id) { + __set_bit(i, state->dst_resident_pages_mask); + continue; + } + + // Already resident on a CPU node, don't move + if (UVM_ID_IS_CPU(dst_id) && node_state(src_nid, N_CPU)) { + __set_bit(i, state->dst_resident_pages_mask); + continue; + } + + src_gpu = get_gpu_from_node_id(uvm_migrate_args->va_space, src_nid); + + // Already resident on a node with no CPUs that doesn't belong to a + // GPU, don't move + if (UVM_ID_IS_CPU(dst_id) && !src_gpu) { + __set_bit(i, state->dst_resident_pages_mask); + continue; + } + + // TODO: Bug 2449272: Implement non-P2P copies. All systems that hit + // this path have P2P copy support between all GPUs in the system, but + // it could change in the future. + + if (src_gpu) + src_id = src_gpu->id; + else + src_id = UVM_ID_CPU; + + if (!uvm_processor_mask_test_and_set(&state->src_processors, src_id)) + bitmap_zero(state->processors[uvm_id_value(src_id)].page_mask, state->num_pages); + + __set_bit(i, state->processors[uvm_id_value(src_id)].page_mask); + } +} + +static struct page *migrate_vma_alloc_page(migrate_vma_state_t *state) +{ + struct page *dst_page; + uvm_migrate_args_t *uvm_migrate_args = state->uvm_migrate_args; + uvm_va_space_t *va_space = uvm_migrate_args->va_space; + + if (uvm_enable_builtin_tests && atomic_dec_if_positive(&va_space->test.migrate_vma_allocation_fail_nth) == 0) { + dst_page = NULL; + } + else { + dst_page = alloc_pages_node(uvm_migrate_args->dst_node_id, g_migrate_vma_gfp_flags, 0); + + // TODO: Bug 2399573: Linux commit + // 183f6371aac2a5496a8ef2b0b0a68562652c3cdb introduced a bug that makes + // __GFP_THISNODE not always be honored (this was later fixed in commit + // 7810e6781e0fcbca78b91cf65053f895bf59e85f). Therefore, we verify + // whether the flag has been honored and abort the allocation, + // otherwise. Remove this check when the fix is deployed on all + // production systems. + if (dst_page && page_to_nid(dst_page) != uvm_migrate_args->dst_node_id) { + __free_page(dst_page); + dst_page = NULL; + } + } + + return dst_page; +} + +static NV_STATUS migrate_vma_populate_anon_pages(struct vm_area_struct *vma, + unsigned long *dst, + unsigned long start, + unsigned long outer, + migrate_vma_state_t *state) +{ + NV_STATUS status = NV_OK; + uvm_migrate_args_t *uvm_migrate_args = state->uvm_migrate_args; + uvm_processor_id_t dst_id = uvm_migrate_args->dst_id; + unsigned long *page_mask = state->processors[uvm_id_value(dst_id)].page_mask; + uvm_gpu_t *copying_gpu = NULL; + uvm_va_space_t *va_space = uvm_migrate_args->va_space; + uvm_push_t push; + unsigned long i; + + // Nothing to do + if (state->num_populate_anon_pages == 0) + return NV_OK; + + UVM_ASSERT(state->num_populate_anon_pages == bitmap_weight(page_mask, state->num_pages)); + + for_each_set_bit(i, page_mask, state->num_pages) { + uvm_gpu_address_t dst_address; + struct page *dst_page; + + dst_page = migrate_vma_alloc_page(state); + if (!dst_page) { + __set_bit(i, state->allocation_failed_mask); + continue; + } + + if (!copying_gpu) { + // Try to get a GPU attached to the node being populated. If there + // is none, use any of the GPUs registered in the VA space. + if (UVM_ID_IS_CPU(dst_id)) { + copying_gpu = uvm_va_space_find_first_gpu_attached_to_cpu_node(va_space, uvm_migrate_args->dst_node_id); + if (!copying_gpu) + copying_gpu = uvm_va_space_find_first_gpu(va_space); + } + else { + copying_gpu = uvm_va_space_get_gpu(va_space, dst_id); + } + + UVM_ASSERT(copying_gpu); + + status = migrate_vma_zero_begin_push(va_space, dst_id, copying_gpu, start, outer - 1, &push); + if (status != NV_OK) { + __free_page(dst_page); + return status; + } + } + else { + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + } + + status = migrate_vma_page_copy_address(dst_page, i, dst_id, copying_gpu, state, &dst_address); + if (status != NV_OK) { + __free_page(dst_page); + break; + } + + lock_page(dst_page); + + // We'll push one membar later for all memsets in this loop + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + copying_gpu->parent->ce_hal->memset_8(&push, dst_address, 0, PAGE_SIZE); + + dst[i] = migrate_pfn(page_to_pfn(dst_page)); + } + + if (copying_gpu) { + NV_STATUS tracker_status; + + uvm_push_end(&push); + + tracker_status = uvm_tracker_add_push_safe(&state->tracker, &push); + if (status == NV_OK) + status = tracker_status; + } + + return status; +} + +static NV_STATUS migrate_vma_copy_pages_from(struct vm_area_struct *vma, + const unsigned long *src, + unsigned long *dst, + unsigned long start, + unsigned long outer, + uvm_processor_id_t src_id, + migrate_vma_state_t *state) +{ + NV_STATUS status = NV_OK; + uvm_push_t push; + unsigned long i; + uvm_gpu_t *copying_gpu = NULL; + uvm_migrate_args_t *uvm_migrate_args = state->uvm_migrate_args; + uvm_processor_id_t dst_id = uvm_migrate_args->dst_id; + unsigned long *page_mask = state->processors[uvm_id_value(src_id)].page_mask; + uvm_va_space_t *va_space = uvm_migrate_args->va_space; + + UVM_ASSERT(!bitmap_empty(page_mask, state->num_pages)); + + for_each_set_bit(i, page_mask, state->num_pages) { + uvm_gpu_address_t src_address; + uvm_gpu_address_t dst_address; + struct page *src_page = migrate_pfn_to_page(src[i]); + struct page *dst_page; + + UVM_ASSERT(src[i] & MIGRATE_PFN_VALID); + UVM_ASSERT(src_page); + + dst_page = migrate_vma_alloc_page(state); + if (!dst_page) { + __set_bit(i, state->allocation_failed_mask); + continue; + } + + if (!copying_gpu) { + status = migrate_vma_copy_begin_push(va_space, dst_id, src_id, start, outer - 1, &push); + if (status != NV_OK) { + __free_page(dst_page); + return status; + } + + copying_gpu = uvm_push_get_gpu(&push); + } + else { + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + } + + // We don't have a case where both src and dst use the SYS aperture, so + // the second call can't overwrite a dma addr set up by the first call. + status = migrate_vma_page_copy_address(src_page, i, src_id, copying_gpu, state, &src_address); + if (status == NV_OK) + status = migrate_vma_page_copy_address(dst_page, i, dst_id, copying_gpu, state, &dst_address); + + if (status != NV_OK) { + __free_page(dst_page); + break; + } + + lock_page(dst_page); + + // We'll push one membar later for all copies in this loop + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + copying_gpu->parent->ce_hal->memcopy(&push, dst_address, src_address, PAGE_SIZE); + + dst[i] = migrate_pfn(page_to_pfn(dst_page)); + } + + // TODO: Bug 1766424: If the destination is a GPU and the copy was done by + // that GPU, use a GPU-local membar if no peer nor the CPU can + // currently map this page. When peer access gets enabled, do a + // MEMBAR_SYS at that point. + if (copying_gpu) { + NV_STATUS tracker_status; + + uvm_push_end(&push); + + tracker_status = uvm_tracker_add_push_safe(&state->tracker, &push); + if (status == NV_OK) + status = tracker_status; + } + + return status; +} + +static NV_STATUS migrate_vma_copy_pages(struct vm_area_struct *vma, + const unsigned long *src, + unsigned long *dst, + unsigned long start, + unsigned long outer, + migrate_vma_state_t *state) +{ + uvm_processor_id_t src_id; + + for_each_id_in_mask(src_id, &state->src_processors) { + NV_STATUS status = migrate_vma_copy_pages_from(vma, src, dst, start, outer, src_id, state); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +void uvm_migrate_vma_alloc_and_copy(struct migrate_vma *args, migrate_vma_state_t *state) +{ + struct vm_area_struct *vma = args->vma; + unsigned long start = args->start; + unsigned long outer = args->end; + NV_STATUS tracker_status; + + uvm_tracker_init(&state->tracker); + + state->num_pages = (outer - start) / PAGE_SIZE; + state->status = NV_OK; + + migrate_vma_compute_masks(vma, args->src, state); + + state->status = migrate_vma_populate_anon_pages(vma, args->dst, start, outer, state); + + if (state->status == NV_OK) + state->status = migrate_vma_copy_pages(vma, args->src, args->dst, start, outer, state); + + // Wait for tracker since all copies must have completed before returning + tracker_status = uvm_tracker_wait_deinit(&state->tracker); + + if (state->status == NV_OK) + state->status = tracker_status; +} + +void uvm_migrate_vma_alloc_and_copy_helper(struct vm_area_struct *vma, + const unsigned long *src, + unsigned long *dst, + unsigned long start, + unsigned long end, + void *private) +{ + struct migrate_vma args = + { + .vma = vma, + .dst = dst, + .src = (unsigned long *) src, + .start = start, + .end = end, + }; + + uvm_migrate_vma_alloc_and_copy(&args, (migrate_vma_state_t *) private); +} + +void uvm_migrate_vma_finalize_and_map(struct migrate_vma *args, migrate_vma_state_t *state) +{ + unsigned long i; + + for (i = 0; i < state->num_pages; i++) { + bool needs_touch = false; + uvm_migrate_args_t *uvm_migrate_args = state->uvm_migrate_args; + + // The page was successfully migrated. + if (args->src[i] & MIGRATE_PFN_MIGRATE) { + // Touch if requested since population of these pages won't be tried + // later. + needs_touch = true; + } + else { + // The page was not migrated. This can happen for two reasons. + // + // 1. Page is already resident at the destination. + // 2. Page failed migration because the page state could not be + // migrated by the kernel. + // + // So, only set the corresponding populate_pages bit if both the + // following conditions are true. + // + // 1.Trying to populate pages (with gup) which are already resident + // at the destination is wasteful but usually harmless except in the + // PROT_NONE case. gup returns NV_ERR_INVALID_ADDRESS for such pages + // and will incorrectly lead to API migration failures even though + // migration worked as expected. + // + // 2. Migration failure was not because of allocation failure in + // uvm_migrate_vma_finalize_and_map() since such failures would be + // indicated in allocation_failed_mask. Failures other than + // allocation failures likely means that the page is populated + // somewhere. So, set the corresponding bit in populate_pages_mask. + if (test_bit(i, state->dst_resident_pages_mask)) { + + // If touch was requested, pages in allocation_failed and + // populate_pages masks will be touched during population. But pages + // which are already resident at the destination need to be touched + // here since population isn't tried later for such pages. + needs_touch = true; + } + else if (!test_bit(i, state->allocation_failed_mask)) { + __set_bit(i, state->populate_pages_mask); + } + } + + // Touch if requested and needed. + if (uvm_migrate_args->touch && needs_touch) { + struct page *dst_page; + + UVM_ASSERT(args->dst[i] & MIGRATE_PFN_VALID); + + dst_page = migrate_pfn_to_page(args->dst[i]); + UVM_ASSERT(dst_page); + uvm_touch_page(dst_page); + } + } + + // Remove the IOMMU mappings created during the copy + if (state->dma.num_pages > 0) { + + for_each_set_bit(i, state->dma.page_mask, state->num_pages) + uvm_gpu_unmap_cpu_page(state->dma.addrs_gpus[i], state->dma.addrs[i]); + } + + UVM_ASSERT(!bitmap_intersects(state->populate_pages_mask, state->allocation_failed_mask, state->num_pages)); +} + +void uvm_migrate_vma_finalize_and_map_helper(struct vm_area_struct *vma, + const unsigned long *src, + const unsigned long *dst, + unsigned long start, + unsigned long end, + void *private) +{ + struct migrate_vma args = + { + .vma = vma, + .dst = (unsigned long *) dst, + .src = (unsigned long *) src, + .start = start, + .end = end, + }; + + uvm_migrate_vma_finalize_and_map(&args, (migrate_vma_state_t *) private); +} + +static NV_STATUS nv_migrate_vma(struct migrate_vma *args, migrate_vma_state_t *state) +{ + int ret; + +#if defined(CONFIG_MIGRATE_VMA_HELPER) + static const struct migrate_vma_ops uvm_migrate_vma_ops = + { + .alloc_and_copy = uvm_migrate_vma_alloc_and_copy_helper, + .finalize_and_map = uvm_migrate_vma_finalize_and_map_helper, + }; + + ret = migrate_vma(&uvm_migrate_vma_ops, args->vma, args->start, args->end, args->src, args->dst, state); + if (ret < 0) + return errno_to_nv_status(ret); +#else // CONFIG_MIGRATE_VMA_HELPER + +#if defined(NV_MIGRATE_VMA_FLAGS_PRESENT) + args->flags = MIGRATE_VMA_SELECT_SYSTEM; +#endif // NV_MIGRATE_VMA_FLAGS_PRESENT + + ret = migrate_vma_setup(args); + if (ret < 0) + return errno_to_nv_status(ret); + + uvm_migrate_vma_alloc_and_copy(args, state); + if (state->status == NV_OK) { + migrate_vma_pages(args); + uvm_migrate_vma_finalize_and_map(args, state); + } + + migrate_vma_finalize(args); +#endif // CONFIG_MIGRATE_VMA_HELPER + + return state->status; +} + +static NV_STATUS migrate_pageable_vma_populate_mask(struct vm_area_struct *vma, + unsigned long start, + unsigned long outer, + const unsigned long *mask, + migrate_vma_state_t *state) +{ + const unsigned long num_pages = (outer - start) / PAGE_SIZE; + unsigned long subregion_first = find_first_bit(mask, num_pages); + uvm_migrate_args_t *uvm_migrate_args = state->uvm_migrate_args; + + while (subregion_first < num_pages) { + NV_STATUS status; + unsigned long subregion_outer = find_next_zero_bit(mask, num_pages, subregion_first + 1); + + status = uvm_populate_pageable_vma(vma, + start + subregion_first * PAGE_SIZE, + (subregion_outer - subregion_first) * PAGE_SIZE, + 0, + uvm_migrate_args->touch, + uvm_migrate_args->populate_permissions); + if (status != NV_OK) + return status; + + subregion_first = find_next_bit(mask, num_pages, subregion_outer + 1); + } + + return NV_OK; +} + +static NV_STATUS migrate_pageable_vma_migrate_mask(struct vm_area_struct *vma, + unsigned long start, + unsigned long outer, + const unsigned long *mask, + migrate_vma_state_t *state) +{ + NV_STATUS status; + const unsigned long num_pages = (outer - start) / PAGE_SIZE; + unsigned long subregion_first = find_first_bit(mask, num_pages); + uvm_migrate_args_t *uvm_migrate_args = state->uvm_migrate_args; + struct migrate_vma args = + { + .vma = vma, + .src = state->src_pfn_array, + .dst = state->dst_pfn_array, + }; + + UVM_ASSERT(!uvm_migrate_args->skip_mapped); + + while (subregion_first < num_pages) { + unsigned long subregion_outer = find_next_zero_bit(mask, num_pages, subregion_first + 1); + + args.start = start + subregion_first * PAGE_SIZE; + args.end = start + subregion_outer * PAGE_SIZE; + + status = nv_migrate_vma(&args, state); + if (status != NV_OK) + return status; + + // We ignore allocation failure here as we are just retrying migration, + // but pages must have already been populated by the caller + + subregion_first = find_next_bit(mask, num_pages, subregion_outer + 1); + } + + return NV_OK; +} + +static NV_STATUS migrate_pageable_vma_region(struct vm_area_struct *vma, + unsigned long start, + unsigned long outer, + migrate_vma_state_t *state, + unsigned long *next_addr) +{ + NV_STATUS status; + const unsigned long num_pages = (outer - start) / PAGE_SIZE; + struct mm_struct *mm = vma->vm_mm; + uvm_migrate_args_t *uvm_migrate_args = state->uvm_migrate_args; + struct migrate_vma args = + { + .vma = vma, + .src = state->src_pfn_array, + .dst = state->dst_pfn_array, + .start = start, + .end = outer, + }; + + UVM_ASSERT(PAGE_ALIGNED(start)); + UVM_ASSERT(PAGE_ALIGNED(outer)); + UVM_ASSERT(start < outer); + UVM_ASSERT(start >= vma->vm_start); + UVM_ASSERT(outer <= vma->vm_end); + UVM_ASSERT(outer - start <= UVM_MIGRATE_VMA_MAX_SIZE); + uvm_assert_mmap_lock_locked(mm); + uvm_assert_rwsem_locked(&uvm_migrate_args->va_space->lock); + + status = nv_migrate_vma(&args, state); + if (status != NV_OK) + return status; + + // Save the returned page masks because they can be overwritten by + // migrate_pageable_vma_migrate_mask(). + bitmap_copy(state->scratch1_mask, state->populate_pages_mask, num_pages); + bitmap_copy(state->scratch2_mask, state->allocation_failed_mask, num_pages); + + if (!bitmap_empty(state->scratch1_mask, state->num_pages)) { + // Populate pages using get_user_pages + status = migrate_pageable_vma_populate_mask(vma, start, outer, state->scratch1_mask, state); + if (status != NV_OK) + return status; + + if (!uvm_migrate_args->skip_mapped) { + status = migrate_pageable_vma_migrate_mask(vma, start, outer, state->scratch1_mask, state); + if (status != NV_OK) + return status; + } + } + + // There is no need to copy the masks again after the migration is retried. + // We ignore the allocation_failed, populate_pages and dst_resident_pages + // masks set by the retried migration. + + if (!bitmap_empty(state->scratch2_mask, state->num_pages)) { + // If the destination is the CPU, signal user-space to retry with a + // different node. Otherwise, just try to populate anywhere in the + // system + if (UVM_ID_IS_CPU(uvm_migrate_args->dst_id)) { + *next_addr = start + find_first_bit(state->scratch2_mask, num_pages) * PAGE_SIZE; + return NV_ERR_MORE_PROCESSING_REQUIRED; + } + else { + status = migrate_pageable_vma_populate_mask(vma, start, outer, state->scratch2_mask, state); + if (status != NV_OK) + return status; + } + } + + return NV_OK; +} + +static NV_STATUS migrate_pageable_vma(struct vm_area_struct *vma, + unsigned long start, + unsigned long outer, + migrate_vma_state_t *state, + unsigned long *next_addr) +{ + NV_STATUS status = NV_OK; + struct mm_struct *mm = vma->vm_mm; + uvm_migrate_args_t *uvm_migrate_args = state->uvm_migrate_args; + uvm_va_space_t *va_space = uvm_migrate_args->va_space; + + UVM_ASSERT(PAGE_ALIGNED(start)); + UVM_ASSERT(PAGE_ALIGNED(outer)); + UVM_ASSERT(vma->vm_end > start); + UVM_ASSERT(vma->vm_start < outer); + uvm_assert_mmap_lock_locked(mm); + uvm_assert_rwsem_locked(&va_space->lock); + + // Adjust to input range boundaries + start = max(start, vma->vm_start); + outer = min(outer, vma->vm_end); + + // TODO: Bug 2419180: support file-backed pages in migrate_vma, when + // support for it is added to the Linux kernel + if (!vma_is_anonymous(vma)) + return NV_WARN_NOTHING_TO_DO; + + if (uvm_processor_mask_empty(&va_space->registered_gpus)) + return NV_WARN_NOTHING_TO_DO; + + while (start < outer) { + const size_t region_size = min(outer - start, UVM_MIGRATE_VMA_MAX_SIZE); + + status = migrate_pageable_vma_region(vma, start, start + region_size, state, next_addr); + if (status == NV_ERR_MORE_PROCESSING_REQUIRED) { + UVM_ASSERT(*next_addr >= start); + UVM_ASSERT(*next_addr < outer); + } + + if (status != NV_OK) + break; + + start += region_size; + }; + + return status; +} + +static NV_STATUS migrate_pageable(migrate_vma_state_t *state) +{ + uvm_migrate_args_t *uvm_migrate_args = state->uvm_migrate_args; + uvm_va_space_t *va_space = uvm_migrate_args->va_space; + const unsigned long length = uvm_migrate_args->length; + NvU64 *user_space_start = uvm_migrate_args->user_space_start; + NvU64 *user_space_length = uvm_migrate_args->user_space_length; + struct mm_struct *mm = uvm_migrate_args->mm; + unsigned long start = uvm_migrate_args->start; + unsigned long outer = start + length; + unsigned long prev_outer = outer; + struct vm_area_struct *vma; + + UVM_ASSERT(PAGE_ALIGNED(start)); + UVM_ASSERT(PAGE_ALIGNED(length)); + uvm_assert_mmap_lock_locked(mm); + + vma = find_vma_intersection(mm, start, outer); + if (!vma || (start < vma->vm_start)) + return NV_ERR_INVALID_ADDRESS; + + // VMAs are validated and migrated one at a time, since migrate_vma works + // on one vma at a time + for (; vma->vm_start <= prev_outer; vma = vma->vm_next) { + unsigned long next_addr = 0; + NV_STATUS status = migrate_pageable_vma(vma, start, outer, state, &next_addr); + if (status == NV_WARN_NOTHING_TO_DO) { + NV_STATUS populate_status = NV_OK; + bool touch = uvm_migrate_args->touch; + uvm_populate_permissions_t populate_permissions = uvm_migrate_args->populate_permissions; + + UVM_ASSERT(!vma_is_anonymous(vma) || uvm_processor_mask_empty(&va_space->registered_gpus)); + + // We can't use migrate_vma to move the pages as desired. Normally + // this fallback path is supposed to populate the memory then inform + // user mode that it should call move_pages, but that move_pages + // call won't work as expected if the caller is in the wrong + // process. Make that failure explicit so the caller is aware that + // move_pages won't behave as expected. + // + // If the caller is a kernel thread, such as the GPU BH, continue + // with population since there's no move_pages fallback. + if (current->mm != mm && !(current->flags & PF_KTHREAD)) + return NV_ERR_NOT_SUPPORTED; + + // Populate pages with uvm_populate_pageable + populate_status = uvm_populate_pageable_vma(vma, start, length, 0, touch, populate_permissions); + if (populate_status == NV_OK) { + *user_space_start = max(vma->vm_start, start); + *user_space_length = min(vma->vm_end, outer) - *user_space_start; + } + else { + status = populate_status; + } + } + else if (status == NV_ERR_MORE_PROCESSING_REQUIRED) { + UVM_ASSERT(next_addr >= start); + UVM_ASSERT(next_addr < outer); + UVM_ASSERT(UVM_ID_IS_CPU(uvm_migrate_args->dst_id)); + + *user_space_start = next_addr; + } + + if (status != NV_OK) + return status; + + if (vma->vm_end >= outer) + return NV_OK; + + prev_outer = vma->vm_end; + } + + // Input range not fully covered by VMAs. + return NV_ERR_INVALID_ADDRESS; +} + +NV_STATUS uvm_migrate_pageable(uvm_migrate_args_t *uvm_migrate_args) +{ + migrate_vma_state_t *state = NULL; + NV_STATUS status; + uvm_va_space_t *va_space = uvm_migrate_args->va_space; + uvm_processor_id_t dst_id = uvm_migrate_args->dst_id; + int dst_node_id = uvm_migrate_args->dst_node_id; + + UVM_ASSERT(PAGE_ALIGNED(uvm_migrate_args->start)); + UVM_ASSERT(PAGE_ALIGNED(uvm_migrate_args->length)); + uvm_assert_mmap_lock_locked(uvm_migrate_args->mm); + + if (UVM_ID_IS_CPU(dst_id)) { + // We only check that dst_node_id is a valid node in the system and it + // doesn't correspond to a GPU node. This is fine because + // alloc_pages_node will clamp the allocation to + // cpuset_current_mems_allowed, and uvm_migrate_pageable is only called + // from process context (uvm_migrate) when dst_id is CPU. UVM bottom + // half never calls uvm_migrate_pageable when dst_id is CPU. So, assert + // that we're in a user thread. However, this would need to change if we + // wanted to call this function from a bottom half with CPU dst_id. + UVM_ASSERT(!(current->flags & PF_KTHREAD)); + + if (!nv_numa_node_has_memory(dst_node_id) || get_gpu_from_node_id(va_space, dst_node_id) != NULL) + return NV_ERR_INVALID_ARGUMENT; + } + else { + // Incoming dst_node_id is only valid if dst_id belongs to the CPU. Use + // dst_node_id as the GPU node id if dst_id doesn't belong to the CPU. + uvm_migrate_args->dst_node_id = uvm_gpu_numa_info(uvm_va_space_get_gpu(va_space, dst_id))->node_id; + } + + state = kmem_cache_alloc(g_uvm_migrate_vma_state_cache, NV_UVM_GFP_FLAGS); + if (!state) + return NV_ERR_NO_MEMORY; + + state->uvm_migrate_args = uvm_migrate_args; + status = migrate_pageable(state); + + kmem_cache_free(g_uvm_migrate_vma_state_cache, state); + + return status; +} + +NV_STATUS uvm_migrate_pageable_init() +{ + g_uvm_migrate_vma_state_cache = NV_KMEM_CACHE_CREATE("migrate_vma_state_t", migrate_vma_state_t); + if (!g_uvm_migrate_vma_state_cache) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +void uvm_migrate_pageable_exit() +{ + kmem_cache_destroy_safe(&g_uvm_migrate_vma_state_cache); +} +#endif diff --git a/kernel-open/nvidia-uvm/uvm_migrate_pageable.h b/kernel-open/nvidia-uvm/uvm_migrate_pageable.h new file mode 100644 index 000000000..3efa59fc3 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_migrate_pageable.h @@ -0,0 +1,253 @@ +/******************************************************************************* + Copyright (c) 2018 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_MIGRATE_PAGEABLE_H__ +#define __UVM_MIGRATE_PAGEABLE_H__ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_populate_pageable.h" +#include "uvm_forward_decl.h" +#include "uvm_processors.h" + +typedef struct +{ + uvm_va_space_t *va_space; + struct mm_struct *mm; + const unsigned long start; + const unsigned long length; + uvm_processor_id_t dst_id; + + // dst_node_id may be clobbered by uvm_migrate_pageable(). + int dst_node_id; + uvm_populate_permissions_t populate_permissions; + bool touch : 1; + bool skip_mapped : 1; + NvU64 *user_space_start; + NvU64 *user_space_length; +} uvm_migrate_args_t; + +#if defined(CONFIG_MIGRATE_VMA_HELPER) +#define UVM_MIGRATE_VMA_SUPPORTED 1 +#else +#if defined(CONFIG_DEVICE_PRIVATE) && defined(NV_MIGRATE_VMA_SETUP_PRESENT) +#define UVM_MIGRATE_VMA_SUPPORTED 1 +#endif +#endif + +#ifdef UVM_MIGRATE_VMA_SUPPORTED +#include + +// The calls to migrate_vma are capped at 32MB to set an upper bound on the +// amount of metadata that needs to be allocated for the operation. This number +// was chosen because performance seems to plateau at this size. +#define UVM_MIGRATE_VMA_MAX_SIZE (32UL * 1024 * 1024) +#define UVM_MIGRATE_VMA_MAX_PAGES (UVM_MIGRATE_VMA_MAX_SIZE >> PAGE_SHIFT) + +typedef struct +{ + // Input parameters + uvm_migrate_args_t *uvm_migrate_args; + + // Output parameters + // + // Error code. This only signals errors in internal UVM operations. + // Pages that failed allocation or could not be populated are communicated + // using the fields below. + NV_STATUS status; + + // Mask of pages that couldn't be made resident on the destination because + // (a) they are backed with data but pages are not populated (e.g. in swap), + // (b) pages are not backed with any data yet but were not populated + // due to the vma not being READ_WRITE, as it would not charge the pages to + // the process properly. + // (c) pages are already mapped and such pages were requested to not be + // migrated via skip_mapped. + // (d) pages which couldn't be migrated by the kernel. + DECLARE_BITMAP(populate_pages_mask, UVM_MIGRATE_VMA_MAX_PAGES); + + // Mask of pages that failed allocation on the destination + DECLARE_BITMAP(allocation_failed_mask, UVM_MIGRATE_VMA_MAX_PAGES); + + // Mask of pages which are already resident at the destination. + DECLARE_BITMAP(dst_resident_pages_mask, UVM_MIGRATE_VMA_MAX_PAGES); + + // Global state managed by the caller + // + // These are scratch masks that can be used by the migrate_vma caller to + // save output page masks and orchestrate the migrate_vma + // retries/population calls if needed. + DECLARE_BITMAP(scratch1_mask, UVM_MIGRATE_VMA_MAX_PAGES); + DECLARE_BITMAP(scratch2_mask, UVM_MIGRATE_VMA_MAX_PAGES); + + // Arrays used by migrate_vma to store the src/dst pfns + unsigned long dst_pfn_array[UVM_MIGRATE_VMA_MAX_PAGES]; + unsigned long src_pfn_array[UVM_MIGRATE_VMA_MAX_PAGES]; + + // Internal state + // + uvm_tracker_t tracker; + + struct { + // Array of page IOMMU mappings created during allocate_and_copy. + // Required when using SYS aperture. They are freed in + // finalize_and_map. Also keep an array with the GPUs for which the + // mapping was created. + NvU64 addrs[UVM_MIGRATE_VMA_MAX_PAGES]; + uvm_gpu_t *addrs_gpus[UVM_MIGRATE_VMA_MAX_PAGES]; + + // Mask of pages with entries in the dma address arrays above + DECLARE_BITMAP(page_mask, UVM_MIGRATE_VMA_MAX_PAGES); + + // Number of pages for which IOMMU mapping were created + unsigned long num_pages; + } dma; + + // Processors where pages are resident before calling migrate_vma + uvm_processor_mask_t src_processors; + + // Array of per-processor page masks with the pages that are resident + // before calling migrate_vma. + struct { + DECLARE_BITMAP(page_mask, UVM_MIGRATE_VMA_MAX_PAGES); + } processors[UVM_ID_MAX_PROCESSORS]; + + // Number of pages in the migrate_vma call + unsigned long num_pages; + + // Number of pages that are directly populated on the destination + unsigned long num_populate_anon_pages; +} migrate_vma_state_t; + +#if defined(CONFIG_MIGRATE_VMA_HELPER) +struct migrate_vma { + struct vm_area_struct *vma; + unsigned long *dst; + unsigned long *src; + unsigned long start; + unsigned long end; +}; + +void uvm_migrate_vma_alloc_and_copy_helper(struct vm_area_struct *vma, + const unsigned long *src, + unsigned long *dst, + unsigned long start, + unsigned long end, + void *private); + +void uvm_migrate_vma_finalize_and_map_helper(struct vm_area_struct *vma, + const unsigned long *src, + const unsigned long *dst, + unsigned long start, + unsigned long end, + void *private); +#else +void uvm_migrate_vma_alloc_and_copy(struct migrate_vma *args, migrate_vma_state_t *state); +void uvm_migrate_vma_finalize_and_map(struct migrate_vma *args, migrate_vma_state_t *state); +#endif // CONFIG_MIGRATE_VMA_HELPER + +// Populates the given VA range and tries to migrate all the pages to dst_id. If +// the destination processor is the CPU, the NUMA node in dst_node_id is used. +// The input VA range must be fully backed by VMAs. This function relies on +// migrate_vma, which was added in Linux 4.14. If skip_mapped is set to true, +// then already mapped pages will not be migrated. For kernels that do not +// provide migrate_vma, this function populates the memory using get_user_pages +// and returns NV_WARN_NOTHING_TO_DO to complete the migration in user space for +// API calls made from userspace. Kernel callers are expected to handle this +// error according to their respective usecases. user_space_start and +// user_space_length will contain the full input range. skip_mapped is ignored +// for such kernels. If the destination is the CPU and dst_node_id is full, +// NV_ERR_MORE_PROCESSING_REQUIRED is returned and user-space will call +// UVM_MIGRATE with the next preferred CPU node (if more are available), +// starting at the address specified by user_space_start. If the destination is +// a GPU and a page could not be populated, return NV_ERR_NO_MEMORY. Otherwise, +// return NV_OK. This is fine because UvmMigrate/UvmMigrateAsync only guarantee +// that the memory is populated somewhere in the system, not that pages moved to +// the requested processor. +// +// migrate_vma does not support file-backed vmas yet. If a file-backed vma is +// found, populates the pages corresponding to the vma with get_user_pages() and +// returns NV_WARN_NOTHING_TO_DO. The caller is expected to handle this error. +// API calls will fall back to user-mode to complete the migration. Kernel +// callers are expected to handle this error according to the respective +// usecases. If NV_WARN_NOTHING_TO_DO is returned, user_space_start and +// user_space_length will contain the intersection of the vma address range and +// [start:start + length]. +// +// If a user-mode fallback is required but current->mm != uvm_migrate_args->mm, +// NV_ERR_NOT_SUPPORTED is returned since user mode can't perform such a +// migration. If the caller is a kernel thread, such as the GPU BH, this check +// is ignored since the caller is not expected to take such a fallback. +// +// Also, if no GPUs have been registered in the VA space, populates the pages +// corresponding to the first vma in the requested region using get_user_pages() +// and returns NV_WARN_NOTHING_TO_DO to fall back to user space for the +// userspace API callers to complete the whole migration. Kernel callers are +// expected to handle this error according to their respective usecases. +// +// If touch is true, a touch will be attempted on all pages in the requested +// range. All pages are only guaranteed to have been touched if +// NV_WARN_NOTHING_TO_DO or NV_OK is returned. +// +// Locking: mmap_lock must be held in read or write mode +NV_STATUS uvm_migrate_pageable(uvm_migrate_args_t *uvm_migrate_args); + +NV_STATUS uvm_migrate_pageable_init(void); + +void uvm_migrate_pageable_exit(void); +#else // UVM_MIGRATE_VMA_SUPPORTED + +static NV_STATUS uvm_migrate_pageable(uvm_migrate_args_t *uvm_migrate_args) +{ + NV_STATUS status; + + if (current->mm != uvm_migrate_args->mm && !(current->flags & PF_KTHREAD)) + return NV_ERR_NOT_SUPPORTED; + + status = uvm_populate_pageable(uvm_migrate_args->mm, + uvm_migrate_args->start, + uvm_migrate_args->length, + 0, + uvm_migrate_args->touch, + uvm_migrate_args->populate_permissions); + if (status != NV_OK) + return status; + + *(uvm_migrate_args->user_space_start) = uvm_migrate_args->start; + *(uvm_migrate_args->user_space_length) = uvm_migrate_args->length; + + return NV_WARN_NOTHING_TO_DO; +} + +static NV_STATUS uvm_migrate_pageable_init(void) +{ + return NV_OK; +} + +static void uvm_migrate_pageable_exit(void) +{ +} + +#endif // UVM_MIGRATE_VMA_SUPPORTED + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_mmu.c b/kernel-open/nvidia-uvm/uvm_mmu.c new file mode 100644 index 000000000..bc5e949d8 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_mmu.c @@ -0,0 +1,2479 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_types.h" +#include "uvm_forward_decl.h" +#include "uvm_gpu.h" +#include "uvm_mmu.h" +#include "uvm_hal.h" +#include "uvm_kvmalloc.h" +#include "uvm_pte_batch.h" +#include "uvm_tlb_batch.h" +#include "uvm_push.h" +#include "uvm_mem.h" +#include "uvm_va_space.h" + + + + + + + +// The page tree has 5 levels on Pascal, and the root is never freed by a normal +// 'put' operation which leaves a maximum of 4 levels +#define MAX_OPERATION_DEPTH 4 + + +// Wrappers for push begin handling channel_manager not being there when running +// the page tree unit test +// +// When in SR-IOV heavy, the push needs to be associated with the proxy channel +// (UVM_CHANNEL_TYPE_MEMOPS), because it is used to manipulate the page tree. +// In any other scenario using UVM_CHANNEL_TYPE_GPU_INTERNAL is preferred, +// because that type is normally associated with the LCE mapped to the most +// PCEs. The higher bandwidth is beneficial when doing bulk operations such as +// clearing PTEs, or initializing a page directory/table. +#define page_tree_begin_acquire(tree, tracker, push, format, ...) ({ \ + NV_STATUS status; \ + uvm_channel_manager_t *manager = (tree)->gpu->channel_manager; \ + \ + if (manager == NULL) \ + status = uvm_push_begin_fake((tree)->gpu, (push)); \ + else if (uvm_gpu_is_virt_mode_sriov_heavy((tree)->gpu)) \ + status = uvm_push_begin_acquire(manager, UVM_CHANNEL_TYPE_MEMOPS, (tracker), (push), (format), ##__VA_ARGS__); \ + else \ + status = uvm_push_begin_acquire(manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, (tracker), (push), (format), ##__VA_ARGS__);\ + \ + status; \ +}) + +// Default location of page table allocations +static uvm_aperture_t page_table_aperture = UVM_APERTURE_DEFAULT; + +static char *uvm_page_table_location; +module_param(uvm_page_table_location, charp, S_IRUGO); +MODULE_PARM_DESC(uvm_page_table_location, + "Set the location for UVM-allocated page tables. Choices are: vid, sys."); + +NV_STATUS uvm_mmu_init(void) +{ + UVM_ASSERT((page_table_aperture == UVM_APERTURE_VID) || + (page_table_aperture == UVM_APERTURE_SYS) || + (page_table_aperture == UVM_APERTURE_DEFAULT)); + + if (!uvm_page_table_location) + return NV_OK; + + // TODO: Bug 1766651: Add modes for testing, e.g. alternating vidmem and + // sysmem etc. + if (strcmp(uvm_page_table_location, "vid") == 0) { + page_table_aperture = UVM_APERTURE_VID; + } + else if (strcmp(uvm_page_table_location, "sys") == 0) { + page_table_aperture = UVM_APERTURE_SYS; + } + else { + pr_info("Invalid uvm_page_table_location %s. Using %s instead.\n", + uvm_page_table_location, + uvm_aperture_string(page_table_aperture)); + } + + return NV_OK; +} + +static NV_STATUS phys_mem_allocate_sysmem(uvm_page_tree_t *tree, NvLength size, uvm_mmu_page_table_alloc_t *out) +{ + NV_STATUS status = NV_OK; + NvU64 dma_addr; + unsigned long flags = __GFP_ZERO; + uvm_memcg_context_t memcg_context; + uvm_va_space_t *va_space; + struct mm_struct *mm = NULL; + + if (tree->type == UVM_PAGE_TREE_TYPE_USER && tree->gpu_va_space && UVM_CGROUP_ACCOUNTING_SUPPORTED()) { + va_space = tree->gpu_va_space->va_space; + mm = uvm_va_space_mm_retain(va_space); + if (mm) + uvm_memcg_context_start(&memcg_context, mm); + } + + // If mm is not NULL, memcg context has been started and we can use + // the account flags. + if (mm) + flags |= NV_UVM_GFP_FLAGS_ACCOUNT; + else + flags |= NV_UVM_GFP_FLAGS; + + out->handle.page = alloc_pages(flags, get_order(size)); + + // va_space and mm will be set only if the memcg context has been started. + if (mm) { + uvm_memcg_context_end(&memcg_context); + uvm_va_space_mm_release(va_space); + } + + if (out->handle.page == NULL) + return NV_ERR_NO_MEMORY; + + // Check for fake GPUs from the unit test + if (tree->gpu->parent->pci_dev) + status = uvm_gpu_map_cpu_pages(tree->gpu, out->handle.page, UVM_PAGE_ALIGN_UP(size), &dma_addr); + else + dma_addr = page_to_phys(out->handle.page); + + if (status != NV_OK) { + __free_pages(out->handle.page, get_order(size)); + return status; + } + + out->addr = uvm_gpu_phys_address(UVM_APERTURE_SYS, dma_addr); + out->size = size; + + return NV_OK; +} + +static NV_STATUS phys_mem_allocate_vidmem(uvm_page_tree_t *tree, + NvLength size, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_mmu_page_table_alloc_t *out) +{ + NV_STATUS status; + uvm_gpu_t *gpu = tree->gpu; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + + status = uvm_pmm_gpu_alloc_kernel(&gpu->pmm, 1, size, pmm_flags, &out->handle.chunk, &local_tracker); + if (status != NV_OK) + return status; + + if (!uvm_tracker_is_empty(&local_tracker)) { + uvm_mutex_lock(&tree->lock); + status = uvm_tracker_add_tracker_safe(&tree->tracker, &local_tracker); + uvm_mutex_unlock(&tree->lock); + } + + uvm_tracker_deinit(&local_tracker); + + if (status != NV_OK) { + uvm_pmm_gpu_free(&tree->gpu->pmm, out->handle.chunk, NULL); + return status; + } + + out->addr = uvm_gpu_phys_address(UVM_APERTURE_VID, out->handle.chunk->address); + out->size = size; + + return status; +} + +static NV_STATUS phys_mem_allocate(uvm_page_tree_t *tree, + NvLength size, + uvm_aperture_t location, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_mmu_page_table_alloc_t *out) +{ + UVM_ASSERT((location == UVM_APERTURE_VID) || (location == UVM_APERTURE_SYS)); + + memset(out, 0, sizeof(*out)); + + if (location == UVM_APERTURE_SYS) + return phys_mem_allocate_sysmem(tree, size, out); + else + return phys_mem_allocate_vidmem(tree, size, pmm_flags, out); +} + +static void phys_mem_deallocate_vidmem(uvm_page_tree_t *tree, uvm_mmu_page_table_alloc_t *ptr) +{ + uvm_assert_mutex_locked(&tree->lock); + UVM_ASSERT(ptr->addr.aperture == UVM_APERTURE_VID); + + uvm_pmm_gpu_free(&tree->gpu->pmm, ptr->handle.chunk, &tree->tracker); +} + +static void phys_mem_deallocate_sysmem(uvm_page_tree_t *tree, uvm_mmu_page_table_alloc_t *ptr) +{ + NV_STATUS status; + + uvm_assert_mutex_locked(&tree->lock); + + // Synchronize any pending operations before freeing the memory that might + // be used by them. + status = uvm_tracker_wait(&tree->tracker); + if (status != NV_OK) + UVM_ASSERT(status == uvm_global_get_status()); + + UVM_ASSERT(ptr->addr.aperture == UVM_APERTURE_SYS); + if (tree->gpu->parent->pci_dev) + uvm_gpu_unmap_cpu_pages(tree->gpu, ptr->addr.address, UVM_PAGE_ALIGN_UP(ptr->size)); + __free_pages(ptr->handle.page, get_order(ptr->size)); +} + +static void phys_mem_deallocate(uvm_page_tree_t *tree, uvm_mmu_page_table_alloc_t *ptr) +{ + if (ptr->addr.aperture == UVM_APERTURE_SYS) + phys_mem_deallocate_sysmem(tree, ptr); + else + phys_mem_deallocate_vidmem(tree, ptr); + + memset(ptr, 0, sizeof(*ptr)); +} + +static void page_table_range_init(uvm_page_table_range_t *range, + NvU32 page_size, + uvm_page_directory_t *dir, + NvU32 start_index, + NvU32 end_index) +{ + range->table = dir; + range->start_index = start_index; + range->entry_count = 1 + end_index - start_index; + range->page_size = page_size; + dir->ref_count += range->entry_count; +} + +static void phys_mem_init(uvm_page_tree_t *tree, NvU32 page_size, uvm_page_directory_t *dir, uvm_push_t *push) +{ + NvU64 clear_bits[2]; + uvm_mmu_mode_hal_t *hal = tree->hal; + + if (dir->depth == tree->hal->page_table_depth(page_size)) { + *clear_bits = 0; // Invalid PTE + } + else { + // passing in NULL for the phys_allocs will mark the child entries as invalid + uvm_mmu_page_table_alloc_t *phys_allocs[2] = {NULL, NULL}; + hal->make_pde(clear_bits, phys_allocs, dir->depth); + + // Make sure that using only clear_bits[0] will work + UVM_ASSERT(hal->entry_size(dir->depth) == sizeof(clear_bits[0]) || clear_bits[0] == clear_bits[1]); + } + + // initialize the memory to a reasonable value + tree->gpu->parent->ce_hal->memset_8(push, + uvm_gpu_address_from_phys(dir->phys_alloc.addr), + *clear_bits, + dir->phys_alloc.size); +} + +static uvm_page_directory_t *allocate_directory(uvm_page_tree_t *tree, + NvU32 page_size, + NvU32 depth, + uvm_pmm_alloc_flags_t pmm_flags) +{ + NV_STATUS status; + uvm_mmu_mode_hal_t *hal = tree->hal; + NvU32 entry_count; + NvLength phys_alloc_size = hal->allocation_size(depth, page_size); + uvm_page_directory_t *dir; + + // The page tree doesn't cache PTEs so space is not allocated for entries that are always PTEs. + // 2M PTEs may later become PDEs so pass UVM_PAGE_SIZE_AGNOSTIC, not page_size. + if (depth == hal->page_table_depth(UVM_PAGE_SIZE_AGNOSTIC)) + entry_count = 0; + else + entry_count = hal->entries_per_index(depth) << hal->index_bits(depth, page_size); + + dir = uvm_kvmalloc_zero(sizeof(uvm_page_directory_t) + sizeof(dir->entries[0]) * entry_count); + if (dir == NULL) + return NULL; + + status = phys_mem_allocate(tree, phys_alloc_size, tree->location, pmm_flags, &dir->phys_alloc); + + // Fall back to sysmem if allocating page tables in vidmem with eviction + // fails, and the fallback is allowed. + if ((status == NV_ERR_NO_MEMORY) && + (tree->location == UVM_APERTURE_VID) && + (tree->location_sys_fallback) && + ((pmm_flags & UVM_PMM_ALLOC_FLAGS_EVICT) != 0)) { + status = phys_mem_allocate(tree, phys_alloc_size, UVM_APERTURE_SYS, pmm_flags, &dir->phys_alloc); + } + + if (status != NV_OK) { + uvm_kvfree(dir); + return NULL; + } + dir->depth = depth; + + return dir; +} + +static inline NvU32 entry_index_from_vaddr(NvU64 vaddr, NvU32 addr_bit_shift, NvU32 bits) +{ + NvU64 mask = ((NvU64)1 << bits) - 1; + return (NvU32)((vaddr >> addr_bit_shift) & mask); +} + +static inline NvU32 index_to_entry(uvm_mmu_mode_hal_t *hal, NvU32 entry_index, NvU32 depth, NvU32 page_size) +{ + return hal->entries_per_index(depth) * entry_index + hal->entry_offset(depth, page_size); +} + +// pde_fill() populates pde_count PDE entries (starting at start_index) with +// the same mapping, i.e., with the same physical address (phys_addr). +static void pde_fill(uvm_page_tree_t *tree, + NvU32 depth, + uvm_mmu_page_table_alloc_t *directory, + NvU32 start_index, + NvU32 pde_count, + uvm_mmu_page_table_alloc_t **phys_addr, + uvm_push_t *push) +{ + NvU64 pde_data[2], entry_size; + uvm_gpu_address_t pde_entry_addr; + + UVM_ASSERT(start_index + pde_count <= uvm_mmu_page_tree_entries(tree, depth, UVM_PAGE_SIZE_AGNOSTIC)); + entry_size = tree->hal->entry_size(depth); + UVM_ASSERT(sizeof(pde_data) >= entry_size); + + tree->hal->make_pde(pde_data, phys_addr, depth); + pde_entry_addr = uvm_gpu_address_from_phys(directory->addr); + pde_entry_addr.address += start_index * entry_size; + + if (entry_size == sizeof(pde_data[0])) { + tree->gpu->parent->ce_hal->memset_8(push, pde_entry_addr, pde_data[0], sizeof(pde_data[0]) * pde_count); + } + else { + NvU32 max_inline_entries = UVM_PUSH_INLINE_DATA_MAX_SIZE / sizeof(pde_data); + uvm_gpu_address_t inline_data_addr; + uvm_push_inline_data_t inline_data; + NvU32 membar_flag = 0; + NvU32 i; + + if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE)) + membar_flag = UVM_PUSH_FLAG_NEXT_MEMBAR_NONE; + else if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU)) + membar_flag = UVM_PUSH_FLAG_NEXT_MEMBAR_GPU; + + for (i = 0; i < pde_count;) { + NvU32 j; + NvU32 entry_count = min(pde_count - i, max_inline_entries); + + uvm_push_inline_data_begin(push, &inline_data); + for (j = 0; j < entry_count; j++) + uvm_push_inline_data_add(&inline_data, pde_data, sizeof(pde_data)); + inline_data_addr = uvm_push_inline_data_end(&inline_data); + + // All but the first memcopy can be pipelined. We respect the + // caller's pipelining settings for the first push. + if (i != 0) + uvm_push_set_flag(push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + + // No membar is needed until the last copy. Otherwise, use + // caller's membar flag. + if (i + entry_count < pde_count) + uvm_push_set_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + else if (membar_flag) + uvm_push_set_flag(push, membar_flag); + + tree->gpu->parent->ce_hal->memcopy(push, pde_entry_addr, inline_data_addr, entry_count * sizeof(pde_data)); + + i += entry_count; + pde_entry_addr.address += sizeof(pde_data) * entry_count; + } + } +} + +static uvm_page_directory_t *host_pde_write(uvm_page_directory_t *dir, + uvm_page_directory_t *parent, + NvU32 index_in_parent) +{ + dir->host_parent = parent; + dir->index_in_parent = index_in_parent; + parent->ref_count++; + return dir; +} + +static void pde_write(uvm_page_tree_t *tree, + uvm_page_directory_t *dir, + NvU32 entry_index, + bool force_clear, + uvm_push_t *push) +{ + NvU32 i; + uvm_mmu_page_table_alloc_t *phys_allocs[2]; + NvU32 entries_per_index = tree->hal->entries_per_index(dir->depth); + + // extract physical allocs from non-null entries. + for (i = 0; i < entries_per_index; i++) { + uvm_page_directory_t *entry = dir->entries[entries_per_index * entry_index + i]; + if (entry == NULL || force_clear) + phys_allocs[i] = NULL; + else + phys_allocs[i] = &entry->phys_alloc; + } + + pde_fill(tree, dir->depth, &dir->phys_alloc, entry_index, 1, phys_allocs, push); +} + +static void host_pde_clear(uvm_page_tree_t *tree, uvm_page_directory_t *dir, NvU32 entry_index, NvU32 page_size) +{ + UVM_ASSERT(dir->ref_count > 0); + + dir->entries[index_to_entry(tree->hal, entry_index, dir->depth, page_size)] = NULL; + dir->ref_count--; +} + +static void pde_clear(uvm_page_tree_t *tree, uvm_page_directory_t *dir, NvU32 entry_index, NvU32 page_size, void *push) +{ + host_pde_clear(tree, dir, entry_index, page_size); + pde_write(tree, dir, entry_index, false, push); +} + +static uvm_chunk_sizes_mask_t allocation_sizes_for_big_page_size(uvm_parent_gpu_t *parent_gpu, NvU32 big_page_size) +{ + uvm_chunk_sizes_mask_t alloc_sizes = 0; + uvm_mmu_mode_hal_t *hal = parent_gpu->arch_hal->mmu_mode_hal(big_page_size); + + if (hal != NULL) { + unsigned long page_size_log2; + unsigned long page_sizes = hal->page_sizes(); + BUILD_BUG_ON(sizeof(hal->page_sizes()) > sizeof(page_sizes)); + + for_each_set_bit(page_size_log2, &page_sizes, BITS_PER_LONG) { + NvU32 i; + NvU32 page_size = (NvU32)(1ULL << page_size_log2); + for (i = 0; i <= hal->page_table_depth(page_size); i++) + alloc_sizes |= hal->allocation_size(i, page_size); + } + } + + return alloc_sizes; +} + +static NvU32 page_sizes_for_big_page_size(uvm_parent_gpu_t *parent_gpu, NvU32 big_page_size) +{ + uvm_mmu_mode_hal_t *hal = parent_gpu->arch_hal->mmu_mode_hal(big_page_size); + + if (hal != NULL) + return hal->page_sizes(); + + return 0; +} + +static void page_tree_end(uvm_page_tree_t *tree, uvm_push_t *push) +{ + if (tree->gpu->channel_manager != NULL) + uvm_push_end(push); + else + uvm_push_end_fake(push); +} + +static void page_tree_tracker_overwrite_with_push(uvm_page_tree_t *tree, uvm_push_t *push) +{ + uvm_assert_mutex_locked(&tree->lock); + + // No GPU work to track for fake GPU testing + if (tree->gpu->channel_manager == NULL) + return; + + uvm_tracker_overwrite_with_push(&tree->tracker, push); +} + +static NV_STATUS page_tree_end_and_wait(uvm_page_tree_t *tree, uvm_push_t *push) +{ + if (tree->gpu->channel_manager != NULL) + return uvm_push_end_and_wait(push); + else + uvm_push_end_fake(push); + + return NV_OK; +} + +// initialize new page tables and insert them into the tree +static NV_STATUS write_gpu_state(uvm_page_tree_t *tree, + NvU32 page_size, + NvS32 invalidate_depth, + NvU32 used_count, + uvm_page_directory_t **dirs_used) +{ + NvS32 i; + uvm_push_t push; + NV_STATUS status; + + // The logic of what membar is needed when is pretty subtle, please refer to + // the UVM Functional Spec (section 5.1) for all the details. + uvm_membar_t membar_after_writes = UVM_MEMBAR_GPU; + + uvm_assert_mutex_locked(&tree->lock); + + if (used_count == 0) + return NV_OK; + + status = page_tree_begin_acquire(tree, &tree->tracker, &push, "write_gpu_state: %u dirs", used_count); + if (status != NV_OK) + return status; + + // only do GPU work once all the allocations have succeeded + // first, zero-out the new allocations + for (i = 0; i < used_count; i++) { + // Appropriate membar will be done after all the writes. Pipelining can + // be enabled as they are all initializing newly allocated memory that + // cannot have any writes pending. + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + + phys_mem_init(tree, page_size, dirs_used[i], &push); + + if (dirs_used[i]->phys_alloc.addr.aperture == UVM_APERTURE_SYS) + membar_after_writes = UVM_MEMBAR_SYS; + } + + // Only a single membar is needed between the memsets of the page tables + // and the writes of the PDEs pointing to those page tables. + // The membar can be local if all of the page tables and PDEs are in GPU memory, + // but must be a sysmembar if any of them are in sysmem. + tree->gpu->parent->host_hal->wait_for_idle(&push); + uvm_hal_membar(tree->gpu, &push, membar_after_writes); + + // Reset back to a local membar by default + membar_after_writes = UVM_MEMBAR_GPU; + + // write entries bottom up, so that they are valid once they're inserted into the tree + for (i = used_count - 1; i >= 0; i--) { + uvm_page_directory_t *dir = dirs_used[i]; + + // Appropriate membar will be done after all the writes. Pipelining can + // be enabled as they are all independent and we just did a WFI above. + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + pde_write(tree, dir->host_parent, dir->index_in_parent, false, &push); + + // If any of the written PDEs is in sysmem, a sysmembar is needed before + // the TLB invalidate. + // Notably sysmembar is needed even though the writer (CE) and reader (MMU) are + // on the same GPU, because CE physical writes take the L2 bypass path. + if (dir->host_parent->phys_alloc.addr.aperture == UVM_APERTURE_SYS) + membar_after_writes = UVM_MEMBAR_SYS; + } + + tree->gpu->parent->host_hal->wait_for_idle(&push); + uvm_hal_membar(tree->gpu, &push, membar_after_writes); + + UVM_ASSERT(invalidate_depth >= 0); + + // Upgrades don't have to flush out accesses, so no membar is needed on the TLB invalidate. + tree->gpu->parent->host_hal->tlb_invalidate_all(&push, + uvm_page_tree_pdb(tree)->addr, + invalidate_depth, + UVM_MEMBAR_NONE); + + // We just did the appropriate membar after the WFI, so no need for another + // one in push_end(). + // At least currently as if the L2 bypass path changes to only require a GPU + // membar between PDE write and TLB invalidate, we'll need to push a + // sysmembar so the end-of-push semaphore is ordered behind the PDE writes. + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + page_tree_end(tree, &push); + page_tree_tracker_overwrite_with_push(tree, &push); + + return NV_OK; +} + +static void free_unused_directories(uvm_page_tree_t *tree, + NvU32 used_count, + uvm_page_directory_t **dirs_used, + uvm_page_directory_t **dir_cache) +{ + NvU32 i; + + // free unused entries + for (i = 0; i < MAX_OPERATION_DEPTH; i++) { + uvm_page_directory_t *dir = dir_cache[i]; + if (dir != NULL) { + NvU32 j; + + for (j = 0; j < used_count; j++) { + if (dir == dirs_used[j]) + break; + } + + if (j == used_count) { + phys_mem_deallocate(tree, &dir->phys_alloc); + uvm_kvfree(dir); + } + } + } + +} + +static NV_STATUS allocate_page_table(uvm_page_tree_t *tree, NvU32 page_size, uvm_mmu_page_table_alloc_t *out) +{ + NvU32 depth = tree->hal->page_table_depth(page_size); + NvLength alloc_size = tree->hal->allocation_size(depth, page_size); + + return phys_mem_allocate(tree, alloc_size, tree->location, UVM_PMM_ALLOC_FLAGS_EVICT, out); +} + +static void map_remap_deinit(uvm_page_tree_t *tree) +{ + if (tree->map_remap.pde0.size) + phys_mem_deallocate(tree, &tree->map_remap.pde0); + + if (tree->map_remap.ptes_invalid_4k.size) + phys_mem_deallocate(tree, &tree->map_remap.ptes_invalid_4k); +} + +static NV_STATUS map_remap_init(uvm_page_tree_t *tree) +{ + NV_STATUS status; + uvm_push_t push; + uvm_pte_batch_t batch; + NvU32 entry_size; + + // Allocate the ptes_invalid_4k. + status = allocate_page_table(tree, UVM_PAGE_SIZE_4K, &tree->map_remap.ptes_invalid_4k); + if (status != NV_OK) + goto error; + + // Allocate the pde0 struct (pde0 depth = depth(UVM_PAGE_SIZE_2M)) + // This is only needed for the 512M page size, since it requires a + // PDE1-depth(512M) PTE. We first map it to the pde0 directory, then we + // return the PTE for the get_ptes()'s caller. + if (tree->hal->page_sizes() & UVM_PAGE_SIZE_512M) { + status = allocate_page_table(tree, UVM_PAGE_SIZE_2M, &tree->map_remap.pde0); + if (status != NV_OK) + goto error; + } + status = page_tree_begin_acquire(tree, &tree->tracker, &push, "map remap init"); + if (status != NV_OK) + goto error; + + uvm_pte_batch_begin(&push, &batch); + entry_size = uvm_mmu_pte_size(tree, UVM_PAGE_SIZE_4K); + + // Invalidate all entries in the ptes_invalid_4k page table. + uvm_pte_batch_clear_ptes(&batch, + tree->map_remap.ptes_invalid_4k.addr, + 0, + entry_size, + tree->map_remap.ptes_invalid_4k.size / entry_size); + + uvm_pte_batch_end(&batch); + + // Set pde0 entries to ptes_invalid_4k. + if (tree->hal->page_sizes() & UVM_PAGE_SIZE_512M) { + uvm_mmu_page_table_alloc_t *phys_allocs[2] = {NULL, NULL}; + NvU32 depth = tree->hal->page_table_depth(UVM_PAGE_SIZE_4K) - 1; + size_t index_4k = tree->hal->entry_offset(depth, UVM_PAGE_SIZE_4K); + + // pde0 depth equals UVM_PAGE_SIZE_2M. + NvU32 pde0_depth = tree->hal->page_table_depth(UVM_PAGE_SIZE_2M); + NvU32 pde0_entries = tree->map_remap.pde0.size / tree->hal->entry_size(pde0_depth); + + // The big-page entry is NULL which makes it an invalid entry. + phys_allocs[index_4k] = &tree->map_remap.ptes_invalid_4k; + + // By default CE operations include a MEMBAR_SYS. MEMBAR_GPU is + // sufficient when pde0 is allocated in VIDMEM. + if (tree->map_remap.pde0.addr.aperture == UVM_APERTURE_VID) + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU); + + pde_fill(tree, + pde0_depth, + &tree->map_remap.pde0, + 0, + pde0_entries, + (uvm_mmu_page_table_alloc_t **)&phys_allocs, + &push); + } + + return page_tree_end_and_wait(tree, &push); +error: + map_remap_deinit(tree); + + return status; +} + +// The location of the page tree backing storage depends on the aperture passed +// to the page tree initialization ("init location" in the table below), and the +// uvm_page_table_location module parameter. Only when the initialization +// aperture is UVM_APERTURE_DEFAULT, and the module parameter is not set, +// the pages can be in more than one location: vidmem is given priority, but if +// the allocation fails it will fallback to sysmem. +// +// Behavior outside of SR-IOV heavy (bare metal, SR-IOV standard, etc): +// Inputs Outputs +// init location | uvm_page_table_location || tree->location | tree->location_sys_fallback +// --------------|-------------------------||----------------|---------------- +// vidmem | - || vidmem | false +// sysmem | - || sysmem | false +// default | || vidmem | true (1) +// default | vidmem || vidmem | false +// default | sysmem || sysmem | false +// + + + +// (1) The fallback to sysmem is always enabled. + +// +// In SR-IOV heavy the the page tree must be in vidmem, to prevent guest drivers +// from updating GPU page tables without hypervisor knowledge. +// Inputs Outputs +// init location | uvm_page_table_location || tree->location | tree->location_sys_fallback +// -------------|-------------------------||----------------|---------------- +// vidmem | - || vidmem | false +// sysmem | - || +// default | - || +// +static void page_tree_set_location(uvm_page_tree_t *tree, uvm_aperture_t location) +{ + UVM_ASSERT(tree->gpu != NULL); + UVM_ASSERT_MSG((location == UVM_APERTURE_VID) || + (location == UVM_APERTURE_SYS) || + (location == UVM_APERTURE_DEFAULT), + "Invalid location %s (%d)\n", uvm_aperture_string(location), (int)location); + + // The tree must be explicitly initialized in vidmem when in SR-IOV heavy. + // The only exception are "fake" GPUs used during page tree testing, which + // can be identified by having no channel manager. + if ((tree->gpu->channel_manager != NULL) && uvm_gpu_is_virt_mode_sriov_heavy(tree->gpu)) + UVM_ASSERT(location == UVM_APERTURE_VID); + + if (location == UVM_APERTURE_DEFAULT) { + if (page_table_aperture == UVM_APERTURE_DEFAULT) { + tree->location = UVM_APERTURE_VID; + + + + + + tree->location_sys_fallback = true; + + } + else { + tree->location = page_table_aperture; + tree->location_sys_fallback = false; + } + } + else { + tree->location = location; + tree->location_sys_fallback = false; + } +} + +NV_STATUS uvm_page_tree_init(uvm_gpu_t *gpu, + uvm_gpu_va_space_t *gpu_va_space, + uvm_page_tree_type_t type, + NvU32 big_page_size, + uvm_aperture_t location, + uvm_page_tree_t *tree) +{ + uvm_push_t push; + NV_STATUS status; + BUILD_BUG_ON(sizeof(uvm_page_directory_t) != offsetof(uvm_page_directory_t, entries)); + + UVM_ASSERT(type < UVM_PAGE_TREE_TYPE_COUNT); + + memset(tree, 0, sizeof(*tree)); + uvm_mutex_init(&tree->lock, UVM_LOCK_ORDER_PAGE_TREE); + tree->hal = gpu->parent->arch_hal->mmu_mode_hal(big_page_size); + UVM_ASSERT(tree->hal != NULL); + UVM_ASSERT(MAX_OPERATION_DEPTH >= tree->hal->page_table_depth(UVM_PAGE_SIZE_AGNOSTIC)); + tree->gpu = gpu; + tree->type = type; + tree->gpu_va_space = gpu_va_space; + tree->big_page_size = big_page_size; + + page_tree_set_location(tree, location); + + uvm_tracker_init(&tree->tracker); + + tree->root = allocate_directory(tree, UVM_PAGE_SIZE_AGNOSTIC, 0, UVM_PMM_ALLOC_FLAGS_EVICT); + + if (tree->root == NULL) + return NV_ERR_NO_MEMORY; + + if (gpu->parent->map_remap_larger_page_promotion) { + status = map_remap_init(tree); + if (status != NV_OK) + return status; + } + + status = page_tree_begin_acquire(tree, &tree->tracker, &push, "init page tree"); + if (status != NV_OK) + return status; + + phys_mem_init(tree, UVM_PAGE_SIZE_AGNOSTIC, tree->root, &push); + return page_tree_end_and_wait(tree, &push); +} + +void uvm_page_tree_deinit(uvm_page_tree_t *tree) +{ + UVM_ASSERT(tree->root->ref_count == 0); + + // Take the tree lock only to avoid assertions. It is not required for + // thread safety during deinit. + uvm_mutex_lock(&tree->lock); + + // Invalidate the entire PDB before destroying it. This is only required for + // ATS-enabled PDBs, because we have already invalidated all GMMU entries + // under this PDB. Stale ATS entries however might be present, and we need + // to invalidate them to handle the unlikely event that this PASID gets + // reused before those entries are evicted from the TLBs. + // + // While this is only required for ATS, we don't know at this point whether + // the PDB had ATS enabled, so just do it for all user PDBs. + if (g_uvm_global.ats.enabled && tree->type == UVM_PAGE_TREE_TYPE_USER) { + uvm_push_t push; + NV_STATUS status = page_tree_begin_acquire(tree, &tree->tracker, &push, "deinit inval all"); + + // Failure to get a push can only happen if we've hit a fatal UVM channel + // error. We can't perform the unmap, so just leave things in place for + // debug. + if (status == NV_OK) { + tree->gpu->parent->host_hal->tlb_invalidate_all(&push, uvm_page_tree_pdb(tree)->addr, 0, UVM_MEMBAR_NONE); + page_tree_end(tree, &push); + page_tree_tracker_overwrite_with_push(tree, &push); + } + else { + UVM_ASSERT(status == uvm_global_get_status()); + } + } + + (void)uvm_tracker_wait(&tree->tracker); + phys_mem_deallocate(tree, &tree->root->phys_alloc); + + if (tree->gpu->parent->map_remap_larger_page_promotion) + map_remap_deinit(tree); + + uvm_mutex_unlock(&tree->lock); + + uvm_tracker_deinit(&tree->tracker); + uvm_kvfree(tree->root); +} + +void uvm_page_tree_put_ptes_async(uvm_page_tree_t *tree, uvm_page_table_range_t *range) +{ + NvU32 free_count = 0; + NvU32 i; + uvm_page_directory_t *free_queue[MAX_OPERATION_DEPTH]; + uvm_page_directory_t *dir = range->table; + uvm_push_t push; + NV_STATUS status; + NvU32 invalidate_depth = 0; + + // The logic of what membar is needed when is pretty subtle, please refer to + // the UVM Functional Spec (section 5.1) for all the details. + uvm_membar_t membar_after_pde_clears = UVM_MEMBAR_GPU; + uvm_membar_t membar_after_invalidate = UVM_MEMBAR_GPU; + + UVM_ASSERT(tree->hal->page_table_depth(range->page_size) <= MAX_OPERATION_DEPTH); + + uvm_mutex_lock(&tree->lock); + + // release the range + UVM_ASSERT(dir->ref_count >= range->entry_count); + dir->ref_count -= range->entry_count; + + // traverse until we hit an in-use page, or the root + while (dir->host_parent != NULL && dir->ref_count == 0) { + uvm_page_directory_t *parent = dir->host_parent; + + if (free_count == 0) { + + // begin a push which will be submitted before the memory gets freed + status = page_tree_begin_acquire(tree, &tree->tracker, &push, "put ptes: start: %u, count: %u", + range->start_index, range->entry_count); + // Failure to get a push can only happen if we've hit a fatal UVM + // channel error. We can't perform the unmap, so just leave things + // in place for debug. + if (status != NV_OK) { + UVM_ASSERT(status == uvm_global_get_status()); + dir->ref_count += range->entry_count; + uvm_mutex_unlock(&tree->lock); + return; + } + } + + // All writes can be pipelined as put_ptes() cannot be called with any + // operations pending on the affected PTEs and PDEs. + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + + // Don't issue any membars as part of the clear, a single membar will be + // done below before the invalidate. + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + pde_clear(tree, dir->host_parent, dir->index_in_parent, range->page_size, &push); + + invalidate_depth = dir->host_parent->depth; + + // If any of the pointed to PDEs were in sysmem then a SYS membar is + // required after the TLB invalidate. + if (dir->phys_alloc.addr.aperture == UVM_APERTURE_SYS) + membar_after_invalidate = UVM_MEMBAR_SYS; + + // If any of the cleared PDEs were in sysmem then a SYS membar is + // required after the clears and before the TLB invalidate. + if (dir->host_parent->phys_alloc.addr.aperture == UVM_APERTURE_SYS) + membar_after_pde_clears = UVM_MEMBAR_SYS; + + // Add this dir to the queue of directories that should be freed once + // the tracker value of the associated PDE writes is known. + UVM_ASSERT(free_count < tree->hal->page_table_depth(range->page_size)); + free_queue[free_count++] = dir; + + dir = parent; + } + + if (free_count == 0) { + uvm_mutex_unlock(&tree->lock); + return; + } + + tree->gpu->parent->host_hal->wait_for_idle(&push); + uvm_hal_membar(tree->gpu, &push, membar_after_pde_clears); + tree->gpu->parent->host_hal->tlb_invalidate_all(&push, + uvm_page_tree_pdb(tree)->addr, + invalidate_depth, + membar_after_invalidate); + + // We just did the appropriate membar above, no need for another one in push_end(). + // At least currently as if the L2 bypass path changes to only require a GPU + // membar between PDE write and TLB invalidate, we'll need to push a + // sysmembar so the end-of-push semaphore is ordered behind the PDE writes. + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + page_tree_end(tree, &push); + page_tree_tracker_overwrite_with_push(tree, &push); + + // now that we've traversed all the way up the tree, free everything + for (i = 0; i < free_count; i++) { + phys_mem_deallocate(tree, &free_queue[i]->phys_alloc); + uvm_kvfree(free_queue[i]); + } + + uvm_mutex_unlock(&tree->lock); +} + +void uvm_page_tree_put_ptes(uvm_page_tree_t *tree, uvm_page_table_range_t *range) +{ + uvm_page_tree_put_ptes_async(tree, range); + (void)uvm_page_tree_wait(tree); +} + +NV_STATUS uvm_page_tree_wait(uvm_page_tree_t *tree) +{ + NV_STATUS status; + + uvm_mutex_lock(&tree->lock); + + status = uvm_tracker_wait(&tree->tracker); + + uvm_mutex_unlock(&tree->lock); + + return status; +} + +static NV_STATUS try_get_ptes(uvm_page_tree_t *tree, + NvU32 page_size, + NvU64 start, + NvLength size, + uvm_page_table_range_t *range, + NvU32 *cur_depth, + uvm_page_directory_t **dir_cache) +{ + uvm_mmu_mode_hal_t *hal = tree->hal; + + // bit index just beyond the most significant bit used to index the current entry + NvU32 addr_bit_shift = hal->num_va_bits(); + + // track depth upon which the invalidate occured + NvS32 invalidate_depth = -1; + uvm_page_directory_t *dir = tree->root; + + // directories used in attempt + NvU32 used_count = 0; + NvU32 i; + uvm_page_directory_t *dirs_used[MAX_OPERATION_DEPTH]; + + uvm_assert_mutex_locked(&tree->lock); + + UVM_ASSERT(is_power_of_2(page_size)); + + // ensure that the caller has specified a valid page size + UVM_ASSERT((page_size & hal->page_sizes()) != 0); + + // This algorithm will work with unaligned ranges, but the caller's intent is unclear + UVM_ASSERT_MSG(start % page_size == 0 && size % page_size == 0, "start 0x%llx size 0x%zx page_size 0x%x", + start, (size_t)size, page_size); + + // The GPU should be capable of addressing the passed range + UVM_ASSERT(uvm_gpu_can_address(tree->gpu, start, size)); + + while (true) { + + // index of the entry, for the first byte of the range, within its containing directory + NvU32 start_index; + + // index of the entry, for the last byte of the range, within its containing directory + NvU32 end_index; + + // pointer to PDE/PTE + uvm_page_directory_t **entry; + NvU32 index_bits = hal->index_bits(dir->depth, page_size); + + addr_bit_shift -= index_bits; + start_index = entry_index_from_vaddr(start, addr_bit_shift, index_bits); + end_index = entry_index_from_vaddr(start + size - 1, addr_bit_shift, index_bits); + + UVM_ASSERT(start_index <= end_index && end_index < (1 << index_bits)); + + entry = dir->entries + index_to_entry(hal, start_index, dir->depth, page_size); + + if (dir->depth == hal->page_table_depth(page_size)) { + page_table_range_init(range, page_size, dir, start_index, end_index); + break; + } + else { + UVM_ASSERT(start_index == end_index); + + if (*entry == NULL) { + if (dir_cache[dir->depth] == NULL) { + *cur_depth = dir->depth; + + // Undo the changes to the tree so that the dir cache remains private to the thread + for (i = 0; i < used_count; i++) + host_pde_clear(tree, dirs_used[i]->host_parent, dirs_used[i]->index_in_parent, page_size); + + return NV_ERR_MORE_PROCESSING_REQUIRED; + } + + *entry = host_pde_write(dir_cache[dir->depth], dir, start_index); + dirs_used[used_count++] = *entry; + + if (invalidate_depth == -1) + invalidate_depth = dir->depth; + } + } + dir = *entry; + } + + free_unused_directories(tree, used_count, dirs_used, dir_cache); + return write_gpu_state(tree, page_size, invalidate_depth, used_count, dirs_used); +} + +static NV_STATUS map_remap(uvm_page_tree_t *tree, NvU64 start, NvLength size, uvm_page_table_range_t *range) +{ + NV_STATUS status; + uvm_push_t push; + NvU32 page_sizes; + uvm_mmu_page_table_alloc_t *phys_alloc[1]; + + // TODO: Bug 2734399 + if (range->page_size != UVM_PAGE_SIZE_512M) + return NV_OK; + + UVM_ASSERT(tree->hal->entries_per_index(range->table->depth) == 1); + + status = page_tree_begin_acquire(tree, + &tree->tracker, + &push, + "map remap: [0x%llx, 0x%llx), page_size: %d", + start, + start + size, + range->page_size); + if (status != NV_OK) + return status; + + // By default CE operations include a MEMBAR_SYS. MEMBAR_GPU is + // sufficient when the range is allocated in VIDMEM. We must enforce the + // following ordering between operations: + // PDE write -> TLB invalidate -> MMU fills. + if (uvm_page_table_range_aperture(range) == UVM_APERTURE_VID) + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU); + + phys_alloc[0] = &tree->map_remap.pde0; + pde_fill(tree, + range->table->depth, + &range->table->phys_alloc, + range->start_index, + range->entry_count, + (uvm_mmu_page_table_alloc_t **)&phys_alloc, + &push); + + tree->gpu->parent->host_hal->wait_for_idle(&push); + + // Invalidate all the supported page sizes smaller than or equal to + // range->page_size, because the GPU TLBs may cache invalid entries using + // any page size they decide, including the smallest one. + page_sizes = (range->page_size | (range->page_size - 1)) & tree->hal->page_sizes(); + + // No membar is needed, any in-flight access to this range may fault and a + // lazy or delayed invalidate will evict the potential stale/invalid TLB + // entry. + uvm_tlb_batch_single_invalidate(tree, &push, start, size, page_sizes, UVM_MEMBAR_NONE); + + page_tree_end(tree, &push); + page_tree_tracker_overwrite_with_push(tree, &push); + return NV_OK; +} + +NV_STATUS uvm_page_tree_get_ptes_async(uvm_page_tree_t *tree, + NvU32 page_size, + NvU64 start, + NvLength size, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_page_table_range_t *range) +{ + NV_STATUS status; + NvU32 cur_depth = 0; + uvm_page_directory_t *dir_cache[MAX_OPERATION_DEPTH]; + memset(dir_cache, 0, sizeof(dir_cache)); + + uvm_mutex_lock(&tree->lock); + while ((status = try_get_ptes(tree, + page_size, + start, + size, + range, + &cur_depth, + dir_cache)) == NV_ERR_MORE_PROCESSING_REQUIRED) { + uvm_mutex_unlock(&tree->lock); + + // try_get_ptes never needs depth 0, so store a directory at its parent's depth + // TODO: Bug 1766655: Allocate everything below cur_depth instead of + // retrying for every level. + dir_cache[cur_depth] = allocate_directory(tree, page_size, cur_depth + 1, pmm_flags); + if (dir_cache[cur_depth] == NULL) { + uvm_mutex_lock(&tree->lock); + free_unused_directories(tree, 0, NULL, dir_cache); + uvm_mutex_unlock(&tree->lock); + return NV_ERR_NO_MEMORY; + } + + uvm_mutex_lock(&tree->lock); + } + + if ((status == NV_OK) && tree->gpu->parent->map_remap_larger_page_promotion) + status = map_remap(tree, start, size, range); + + uvm_mutex_unlock(&tree->lock); + + return status; +} + +NV_STATUS uvm_page_tree_get_ptes(uvm_page_tree_t *tree, + NvU32 page_size, + NvU64 start, + NvLength size, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_page_table_range_t *range) +{ + NV_STATUS status = uvm_page_tree_get_ptes_async(tree, page_size, start, size, pmm_flags, range); + if (status != NV_OK) + return status; + + return uvm_page_tree_wait(tree); +} + +void uvm_page_table_range_get_upper(uvm_page_tree_t *tree, + uvm_page_table_range_t *existing, + uvm_page_table_range_t *upper, + NvU32 num_upper_pages) +{ + NvU32 upper_start_index = existing->start_index + (existing->entry_count - num_upper_pages); + NvU32 upper_end_index = upper_start_index + num_upper_pages - 1; + + UVM_ASSERT(num_upper_pages); + UVM_ASSERT(num_upper_pages <= existing->entry_count); + + uvm_mutex_lock(&tree->lock); + page_table_range_init(upper, existing->page_size, existing->table, upper_start_index, upper_end_index); + uvm_mutex_unlock(&tree->lock); +} + +void uvm_page_table_range_shrink(uvm_page_tree_t *tree, uvm_page_table_range_t *range, NvU32 new_page_count) +{ + UVM_ASSERT(range->entry_count >= new_page_count); + + if (new_page_count > 0) { + // Take a ref count on the smaller portion of the PTEs, then drop the + // entire old range. + uvm_mutex_lock(&tree->lock); + + UVM_ASSERT(range->table->ref_count >= range->entry_count); + range->table->ref_count -= (range->entry_count - new_page_count); + + uvm_mutex_unlock(&tree->lock); + + range->entry_count = new_page_count; + } + else { + uvm_page_tree_put_ptes(tree, range); + } +} + +NV_STATUS uvm_page_tree_get_entry(uvm_page_tree_t *tree, + NvU32 page_size, + NvU64 start, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_page_table_range_t *single) +{ + NV_STATUS status = uvm_page_tree_get_ptes(tree, page_size, start, page_size, pmm_flags, single); + UVM_ASSERT(single->entry_count == 1); + return status; +} + +void uvm_page_tree_write_pde(uvm_page_tree_t *tree, uvm_page_table_range_t *single, uvm_push_t *push) +{ + UVM_ASSERT(single->entry_count == 1); + pde_write(tree, single->table, single->start_index, false, push); +} + +void uvm_page_tree_clear_pde(uvm_page_tree_t *tree, uvm_page_table_range_t *single, uvm_push_t *push) +{ + UVM_ASSERT(single->entry_count == 1); + pde_write(tree, single->table, single->start_index, true, push); +} + +static NV_STATUS poison_ptes(uvm_page_tree_t *tree, + uvm_page_directory_t *pte_dir, + uvm_page_directory_t *parent, + NvU32 page_size) +{ + NV_STATUS status; + uvm_push_t push; + + uvm_assert_mutex_locked(&tree->lock); + + UVM_ASSERT(pte_dir->depth == tree->hal->page_table_depth(page_size)); + + status = page_tree_begin_acquire(tree, &tree->tracker, &push, "Poisoning child table of page size %u", page_size); + if (status != NV_OK) + return status; + + tree->gpu->parent->ce_hal->memset_8(&push, + uvm_gpu_address_from_phys(pte_dir->phys_alloc.addr), + tree->hal->poisoned_pte(), + pte_dir->phys_alloc.size); + + // If both the new PTEs and the parent PDE are in vidmem, then a GPU- + // local membar is enough to keep the memset of the PTEs ordered with + // any later write of the PDE. Otherwise we need a sysmembar. See the + // comments in write_gpu_state. + if (pte_dir->phys_alloc.addr.aperture == UVM_APERTURE_VID && + parent->phys_alloc.addr.aperture == UVM_APERTURE_VID) + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU); + + page_tree_end(tree, &push); + + // The push acquired the tracker so it's ok to just overwrite it with + // the entry tracking the push. + page_tree_tracker_overwrite_with_push(tree, &push); + + return NV_OK; +} + +NV_STATUS uvm_page_tree_alloc_table(uvm_page_tree_t *tree, + NvU32 page_size, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_page_table_range_t *single, + uvm_page_table_range_t *children) +{ + bool should_free = false; + uvm_page_directory_t **entry; + uvm_page_directory_t *dir; + NV_STATUS status = NV_OK; + + UVM_ASSERT(single->entry_count == 1); + + entry = single->table->entries + index_to_entry(tree->hal, + single->start_index, + single->table->depth, + page_size); + + dir = allocate_directory(tree, page_size, single->table->depth + 1, pmm_flags); + if (dir == NULL) + return NV_ERR_NO_MEMORY; + + uvm_mutex_lock(&tree->lock); + + // The caller is responsible for initializing this table, so enforce that on + // debug builds. + if (UVM_IS_DEBUG()) { + status = poison_ptes(tree, dir, single->table, page_size); + if (status != NV_OK) + goto out; + } + + status = uvm_tracker_wait(&tree->tracker); + if (status != NV_OK) + goto out; + + // the range always refers to the entire page table + children->start_index = 0; + children->entry_count = 1 << tree->hal->index_bits(dir->depth, page_size); + children->page_size = page_size; + + // is this entry currently unassigned? + if (*entry == NULL) { + children->table = dir; + *entry = dir; + host_pde_write(dir, single->table, single->start_index); + } + else { + should_free = true; + children->table = *entry; + } + children->table->ref_count += children->entry_count; + +out: + if (should_free || status != NV_OK) { + phys_mem_deallocate(tree, &dir->phys_alloc); + uvm_kvfree(dir); + } + uvm_mutex_unlock(&tree->lock); + + return status; +} + +static size_t range_vec_calc_range_count(uvm_page_table_range_vec_t *range_vec) +{ + NvU64 pde_coverage = uvm_mmu_pde_coverage(range_vec->tree, range_vec->page_size); + NvU64 aligned_start = UVM_ALIGN_DOWN(range_vec->start, pde_coverage); + NvU64 aligned_end = UVM_ALIGN_UP(range_vec->start + range_vec->size, pde_coverage); + size_t count = uvm_div_pow2_64(aligned_end - aligned_start, pde_coverage); + + UVM_ASSERT(count != 0); + + return count; +} + +static NvU64 range_vec_calc_range_start(uvm_page_table_range_vec_t *range_vec, size_t i) +{ + NvU64 pde_coverage = uvm_mmu_pde_coverage(range_vec->tree, range_vec->page_size); + NvU64 aligned_start = UVM_ALIGN_DOWN(range_vec->start, pde_coverage); + NvU64 range_start = aligned_start + i * pde_coverage; + return max(range_vec->start, range_start); +} + +static NvU64 range_vec_calc_range_end(uvm_page_table_range_vec_t *range_vec, size_t i) +{ + NvU64 pde_coverage = uvm_mmu_pde_coverage(range_vec->tree, range_vec->page_size); + NvU64 range_start = range_vec_calc_range_start(range_vec, i); + NvU64 max_range_end = UVM_ALIGN_UP(range_start + 1, pde_coverage); + return min(range_vec->start + range_vec->size, max_range_end); +} + +static NvU64 range_vec_calc_range_size(uvm_page_table_range_vec_t *range_vec, size_t i) +{ + return range_vec_calc_range_end(range_vec, i) - range_vec_calc_range_start(range_vec, i); +} + +static size_t range_vec_calc_range_index(uvm_page_table_range_vec_t *range_vec, NvU64 addr) +{ + NvU64 pde_coverage = uvm_mmu_pde_coverage(range_vec->tree, range_vec->page_size); + NvU64 aligned_start = UVM_ALIGN_DOWN(range_vec->start, pde_coverage); + NvU64 aligned_addr = UVM_ALIGN_DOWN(addr, pde_coverage); + UVM_ASSERT(addr >= range_vec->start); + UVM_ASSERT(addr < range_vec->start + range_vec->size); + return (size_t)uvm_div_pow2_64(aligned_addr - aligned_start, pde_coverage); +} + +NV_STATUS uvm_page_table_range_vec_init(uvm_page_tree_t *tree, + NvU64 start, + NvU64 size, + NvU32 page_size, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_page_table_range_vec_t *range_vec) +{ + NV_STATUS status; + size_t i; + + UVM_ASSERT(size != 0); + UVM_ASSERT_MSG(IS_ALIGNED(start, page_size), "start 0x%llx page_size 0x%x\n", start, page_size); + UVM_ASSERT_MSG(IS_ALIGNED(size, page_size), "size 0x%llx page_size 0x%x\n", size, page_size); + + range_vec->tree = tree; + range_vec->page_size = page_size; + range_vec->start = start; + range_vec->size = size; + range_vec->range_count = range_vec_calc_range_count(range_vec); + + range_vec->ranges = uvm_kvmalloc_zero(sizeof(*range_vec->ranges) * range_vec->range_count); + if (!range_vec->ranges) { + status = NV_ERR_NO_MEMORY; + goto out; + } + + for (i = 0; i < range_vec->range_count; ++i) { + uvm_page_table_range_t *range = &range_vec->ranges[i]; + + NvU64 range_start = range_vec_calc_range_start(range_vec, i); + NvU64 range_size = range_vec_calc_range_size(range_vec, i); + + status = uvm_page_tree_get_ptes_async(tree, + page_size, + range_start, + range_size, + pmm_flags, + range); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to get PTEs for subrange %zd [0x%llx, 0x%llx) size 0x%llx, part of [0x%llx, 0x%llx)\n", + i, range_start, range_start + range_size, range_size, + start, size); + goto out; + } + } + + status = uvm_page_tree_wait(tree); + +out: + if (status != NV_OK) + uvm_page_table_range_vec_deinit(range_vec); + return status; +} + +NV_STATUS uvm_page_table_range_vec_create(uvm_page_tree_t *tree, + NvU64 start, + NvU64 size, + NvU32 page_size, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_page_table_range_vec_t **range_vec_out) +{ + NV_STATUS status; + uvm_page_table_range_vec_t *range_vec; + + range_vec = uvm_kvmalloc(sizeof(*range_vec)); + if (!range_vec) + return NV_ERR_NO_MEMORY; + + status = uvm_page_table_range_vec_init(tree, start, size, page_size, pmm_flags, range_vec); + if (status != NV_OK) + goto error; + + *range_vec_out = range_vec; + + return NV_OK; + +error: + uvm_kvfree(range_vec); + return status; +} + +NV_STATUS uvm_page_table_range_vec_split_upper(uvm_page_table_range_vec_t *range_vec, + NvU64 new_end, + uvm_page_table_range_vec_t *new_range_vec) +{ + size_t split_index; + size_t num_remaining_pages = 0; + uvm_page_table_range_t *range = NULL; + NvU64 pde_coverage = uvm_mmu_pde_coverage(range_vec->tree, range_vec->page_size); + NvU64 new_start = new_end + 1; + NvU64 range_base_va = UVM_ALIGN_DOWN(new_start, pde_coverage); + + UVM_ASSERT(new_range_vec); + UVM_ASSERT(new_end != 0); + UVM_ASSERT(new_end > range_vec->start && new_end < range_vec->start + range_vec->size); + UVM_ASSERT(IS_ALIGNED(new_start, range_vec->page_size)); + UVM_ASSERT(range_vec->range_count > 0); + + split_index = range_vec_calc_range_index(range_vec, new_start); + range = &range_vec->ranges[split_index]; + if (range_vec->start > range_base_va) + num_remaining_pages = (new_start - range_vec->start) / range_vec->page_size; + else + num_remaining_pages = (new_start - range_base_va) / range_vec->page_size; + + new_range_vec->tree = range_vec->tree; + new_range_vec->page_size = range_vec->page_size; + new_range_vec->start = new_start; + new_range_vec->size = range_vec->size - (new_range_vec->start - range_vec->start); + new_range_vec->range_count = range_vec->range_count - split_index; + + new_range_vec->ranges = uvm_kvmalloc_zero(sizeof(*new_range_vec->ranges) * new_range_vec->range_count); + if (!new_range_vec->ranges) + return NV_ERR_NO_MEMORY; + + // Handle splitting a range + if (num_remaining_pages) { + uvm_page_table_range_get_upper(range_vec->tree, + range, + &new_range_vec->ranges[0], + range->entry_count - num_remaining_pages); + uvm_page_table_range_shrink(range_vec->tree, range, num_remaining_pages); + } + + // Copy the remainder of the ranges from the existing vector to the new one. + memcpy(new_range_vec->ranges + !!num_remaining_pages, + range_vec->ranges + split_index + !!num_remaining_pages, + (new_range_vec->range_count - !!num_remaining_pages) * sizeof(*range)); + + // Adjust the coverage of range_vec. If the re-allocation of the ranges array + // fails, the old array is left untouched. + range_vec->size -= new_range_vec->size; + range_vec->range_count -= new_range_vec->range_count - !!num_remaining_pages; + range = uvm_kvrealloc(range_vec->ranges, range_vec->range_count * sizeof(*range)); + if (range) + range_vec->ranges = range; + + return NV_OK; +} + +NV_STATUS uvm_page_table_range_vec_clear_ptes(uvm_page_table_range_vec_t *range_vec, uvm_membar_t tlb_membar) +{ + NV_STATUS status = NV_OK; + NV_STATUS tracker_status; + size_t i; + uvm_page_tree_t *tree = range_vec->tree; + uvm_gpu_t *gpu = tree->gpu; + NvU32 page_size = range_vec->page_size; + NvU32 entry_size = uvm_mmu_pte_size(tree, page_size); + NvU64 invalid_pte = 0; + uvm_push_t push; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + + uvm_pte_batch_t pte_batch; + + UVM_ASSERT(range_vec); + UVM_ASSERT(tree); + UVM_ASSERT(gpu); + + i = 0; + while (i < range_vec->range_count) { + // Acquiring the previous push is not necessary for correctness as all + // the memsets can be done independently, but scheduling a lot of + // independent work for a big range could end up hogging the GPU + // for a long time while not providing much improvement. + status = page_tree_begin_acquire(tree, &tracker, &push, "Clearing PTEs for [0x%llx, 0x%llx)", + range_vec->start, range_vec->start + range_vec->size); + + if (status != NV_OK) + goto done; + + uvm_pte_batch_begin(&push, &pte_batch); + + for (; i < range_vec->range_count; ++i) { + uvm_page_table_range_t *range = &range_vec->ranges[i]; + uvm_gpu_phys_address_t first_entry_pa = uvm_page_table_range_entry_address(tree, range, 0); + uvm_pte_batch_clear_ptes(&pte_batch, first_entry_pa, invalid_pte, entry_size, range->entry_count); + + if (!uvm_push_has_space(&push, 512)) { + // Stop pushing the clears once we get close to a full push + break; + } + } + + uvm_pte_batch_end(&pte_batch); + + if (i == range_vec->range_count) + uvm_tlb_batch_single_invalidate(tree, &push, range_vec->start, range_vec->size, page_size, tlb_membar); + + page_tree_end(tree, &push); + + // Skip the tracking if in unit test mode + if (!tree->gpu->channel_manager) + continue; + + // The push acquired the tracker so it's ok to just overwrite it with + // the entry tracking the push. + uvm_tracker_overwrite_with_push(&tracker, &push); + } + +done: + tracker_status = uvm_tracker_wait_deinit(&tracker); + if (status == NV_OK) + status = tracker_status; + + return status; +} + +void uvm_page_table_range_vec_deinit(uvm_page_table_range_vec_t *range_vec) +{ + size_t i; + if (!range_vec) + return; + + if (range_vec->ranges) { + for (i = 0; i < range_vec->range_count; ++i) { + uvm_page_table_range_t *range = &range_vec->ranges[i]; + if (!range->entry_count) + break; + uvm_page_tree_put_ptes_async(range_vec->tree, range); + } + (void)uvm_page_tree_wait(range_vec->tree); + + uvm_kvfree(range_vec->ranges); + } + + memset(range_vec, 0, sizeof(*range_vec)); +} + +void uvm_page_table_range_vec_destroy(uvm_page_table_range_vec_t *range_vec) +{ + if (!range_vec) + return; + + uvm_page_table_range_vec_deinit(range_vec); + + uvm_kvfree(range_vec); +} + +NV_STATUS uvm_page_table_range_vec_write_ptes(uvm_page_table_range_vec_t *range_vec, + uvm_membar_t tlb_membar, + uvm_page_table_range_pte_maker_t pte_maker, + void *caller_data) +{ + NV_STATUS status = NV_OK; + NV_STATUS tracker_status; + NvU32 entry; + size_t i; + uvm_page_tree_t *tree = range_vec->tree; + uvm_gpu_t *gpu = tree->gpu; + NvU32 entry_size = uvm_mmu_pte_size(tree, range_vec->page_size); + + uvm_push_t push; + uvm_pte_batch_t pte_batch; + NvU64 offset = 0; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + bool last_push = false; + + // Use as much push space as possible leaving 1K of margin + static const NvU32 max_total_entry_size_per_push = UVM_MAX_PUSH_SIZE - 1024; + + NvU32 max_entries_per_push = max_total_entry_size_per_push / entry_size; + + for (i = 0; i < range_vec->range_count; ++i) { + uvm_page_table_range_t *range = &range_vec->ranges[i]; + NvU64 range_start = range_vec_calc_range_start(range_vec, i); + NvU64 range_size = range_vec_calc_range_size(range_vec, i); + uvm_gpu_phys_address_t entry_addr = uvm_page_table_range_entry_address(tree, range, 0); + entry = 0; + + while (entry < range->entry_count) { + NvU32 entry_limit_this_push = min(range->entry_count, entry + max_entries_per_push); + + // Acquiring the previous push is not necessary for correctness as all + // the PTE writes can be done independently, but scheduling a lot of + // independent work for a big range could end up hogging the GPU + // for a long time while not providing much improvement. + status = page_tree_begin_acquire(tree, + &tracker, + &push, + "Writing PTEs for range at [0x%llx, 0x%llx), subrange of range vec at [0x%llx, 0x%llx)", + range_start, + range_start + range_size, + range_vec->start, + range_vec->start + range_vec->size); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to begin push for writing PTEs: %s GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + goto done; + } + + uvm_pte_batch_begin(&push, &pte_batch); + + for (; entry < entry_limit_this_push; ++entry) { + NvU64 pte_bits = pte_maker(range_vec, offset, caller_data); + uvm_pte_batch_write_pte(&pte_batch, entry_addr, pte_bits, entry_size); + offset += range_vec->page_size; + entry_addr.address += entry_size; + } + + last_push = (i == range_vec->range_count - 1) && entry == range->entry_count; + + uvm_pte_batch_end(&pte_batch); + + if (last_push) { + // Invalidate TLBs as part of the last push + uvm_tlb_batch_single_invalidate(tree, + &push, + range_vec->start, + range_vec->size, + range_vec->page_size, + tlb_membar); + } + else { + // For pushes prior to the last one, uvm_pte_batch_end() has + // already pushed a membar that's enough to order the PTE writes + // with the TLB invalidate in the last push and that's all + // that's needed. + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + } + + page_tree_end(tree, &push); + + // Skip the tracking if in unit test mode + if (!tree->gpu->channel_manager) + continue; + + // The push acquired the tracker so it's ok to just overwrite it with + // the entry tracking the push. + uvm_tracker_overwrite_with_push(&tracker, &push); + } + } + +done: + tracker_status = uvm_tracker_wait_deinit(&tracker); + if (status == NV_OK) + status = tracker_status; + return status; +} + +typedef struct identity_mapping_pte_maker_data_struct +{ + NvU64 phys_offset; + uvm_aperture_t aperture; +} identity_mapping_pte_maker_data_t; + +static NvU64 identity_mapping_pte_maker(uvm_page_table_range_vec_t *range_vec, NvU64 offset, void *data) +{ + identity_mapping_pte_maker_data_t *vpdata = (identity_mapping_pte_maker_data_t *)data; + NvU64 pte_flags = vpdata->aperture == UVM_APERTURE_VID ? UVM_MMU_PTE_FLAGS_CACHED : UVM_MMU_PTE_FLAGS_NONE; + return range_vec->tree->hal->make_pte(vpdata->aperture, + offset + vpdata->phys_offset, + UVM_PROT_READ_WRITE_ATOMIC, + pte_flags); +} + +static NV_STATUS create_identity_mapping(uvm_gpu_t *gpu, + NvU64 base, + NvU64 size, + uvm_aperture_t aperture, + NvU64 phys_offset, + NvU32 page_size, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_page_table_range_vec_t **range_vec) +{ + NV_STATUS status; + identity_mapping_pte_maker_data_t data = + { + .phys_offset = phys_offset, + .aperture = aperture + }; + + status = uvm_page_table_range_vec_create(&gpu->address_space_tree, + base, + size, + page_size, + pmm_flags, + range_vec); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to init range vec for aperture %d identity mapping at [0x%llx, 0x%llx): %s, GPU %s\n", + aperture, + base, + base + size, + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + status = uvm_page_table_range_vec_write_ptes(*range_vec, UVM_MEMBAR_NONE, identity_mapping_pte_maker, &data); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to write PTEs for aperture %d identity mapping at [0x%llx, 0x%llx): %s, GPU %s\n", + aperture, + base, + base + size, + nvstatusToString(status), + uvm_gpu_name(gpu)); + return status; + } + + return NV_OK; +} + +static void destroy_identity_mapping(uvm_gpu_identity_mapping_t *mapping) +{ + if (mapping->range_vec == NULL) + return; + + (void)uvm_page_table_range_vec_clear_ptes(mapping->range_vec, UVM_MEMBAR_SYS); + uvm_page_table_range_vec_destroy(mapping->range_vec); + mapping->range_vec = NULL; +} + +bool uvm_mmu_gpu_needs_static_vidmem_mapping(uvm_gpu_t *gpu) +{ + + + + return false; + +} + +bool uvm_mmu_gpu_needs_dynamic_vidmem_mapping(uvm_gpu_t *gpu) +{ + return uvm_gpu_is_virt_mode_sriov_heavy(gpu); +} + +bool uvm_mmu_gpu_needs_dynamic_sysmem_mapping(uvm_gpu_t *gpu) +{ + return uvm_gpu_is_virt_mode_sriov_heavy(gpu); +} + +NV_STATUS create_static_vidmem_mapping(uvm_gpu_t *gpu) +{ + NvU32 page_size; + NvU64 size; + uvm_aperture_t aperture = UVM_APERTURE_VID; + NvU64 phys_offset = 0; + uvm_gpu_identity_mapping_t *flat_mapping = &gpu->static_flat_mapping; + + if (!uvm_mmu_gpu_needs_static_vidmem_mapping(gpu)) + return NV_OK; + + UVM_ASSERT(!uvm_mmu_gpu_needs_dynamic_vidmem_mapping(gpu)); + + page_size = uvm_mmu_biggest_page_size(&gpu->address_space_tree); + size = UVM_ALIGN_UP(gpu->mem_info.max_allocatable_address + 1, page_size); + + UVM_ASSERT(page_size); + UVM_ASSERT(size); + UVM_ASSERT(size <= UVM_GPU_MAX_PHYS_MEM); + + flat_mapping->base = gpu->parent->flat_vidmem_va_base; + + return create_identity_mapping(gpu, + flat_mapping->base, + size, + aperture, + phys_offset, + page_size, + UVM_PMM_ALLOC_FLAGS_EVICT, + &flat_mapping->range_vec); +} + +static void destroy_static_vidmem_mapping(uvm_gpu_t *gpu) +{ + if (!uvm_mmu_gpu_needs_static_vidmem_mapping(gpu)) + return; + + destroy_identity_mapping(&gpu->static_flat_mapping); +} + +NV_STATUS uvm_mmu_create_peer_identity_mappings(uvm_gpu_t *gpu, uvm_gpu_t *peer) +{ + NvU32 page_size; + NvU64 size; + uvm_aperture_t aperture; + NvU64 phys_offset; + uvm_gpu_identity_mapping_t *peer_mapping; + + if (gpu->parent->peer_copy_mode != UVM_GPU_PEER_COPY_MODE_VIRTUAL || peer->mem_info.size == 0) + return NV_OK; + + page_size = uvm_mmu_biggest_page_size(&gpu->address_space_tree); + size = UVM_ALIGN_UP(peer->mem_info.max_allocatable_address + 1, page_size); + aperture = uvm_gpu_peer_aperture(gpu, peer); + peer_mapping = uvm_gpu_get_peer_mapping(gpu, peer->id); + phys_offset = 0ULL; + + if (uvm_gpus_are_nvswitch_connected(gpu, peer)) { + // Add the 47-bit physical address routing bits for this peer to the + // generated PTEs + phys_offset = peer->parent->nvswitch_info.fabric_memory_window_start; + } + + UVM_ASSERT(page_size); + UVM_ASSERT(size); + UVM_ASSERT(size <= UVM_PEER_IDENTITY_VA_SIZE); + UVM_ASSERT(peer_mapping->base); + + return create_identity_mapping(gpu, + peer_mapping->base, + size, + aperture, + phys_offset, + page_size, + UVM_PMM_ALLOC_FLAGS_EVICT, + &peer_mapping->range_vec); +} + +void uvm_mmu_destroy_peer_identity_mappings(uvm_gpu_t *gpu, uvm_gpu_t *peer) +{ + if (gpu->parent->peer_copy_mode == UVM_GPU_PEER_COPY_MODE_VIRTUAL) + destroy_identity_mapping(uvm_gpu_get_peer_mapping(gpu, peer->id)); +} + +void uvm_mmu_init_gpu_chunk_sizes(uvm_parent_gpu_t *parent_gpu) +{ + uvm_chunk_sizes_mask_t sizes = page_sizes_for_big_page_size(parent_gpu, UVM_PAGE_SIZE_64K) | + page_sizes_for_big_page_size(parent_gpu, UVM_PAGE_SIZE_128K) | + PAGE_SIZE; + + // Although we may have to map PTEs smaller than PAGE_SIZE, user (managed) + // memory is never allocated with granularity smaller than PAGE_SIZE. Force + // PAGE_SIZE to be supported and the smallest allowed size so we don't have + // to handle allocating multiple chunks per page. + parent_gpu->mmu_user_chunk_sizes = sizes & PAGE_MASK; + + // Ampere+ GPUs support 512MB page size, however, the maximum chunk size is + // 2MB(i.e., UVM_CHUNK_SIZE_MAX), therefore we mask out any supported page + // size greater than UVM_CHUNK_SIZE_MAX from the chunk size list. + parent_gpu->mmu_user_chunk_sizes &= UVM_CHUNK_SIZES_MASK; + + parent_gpu->mmu_kernel_chunk_sizes = allocation_sizes_for_big_page_size(parent_gpu, UVM_PAGE_SIZE_64K) | + allocation_sizes_for_big_page_size(parent_gpu, UVM_PAGE_SIZE_128K); +} + +void uvm_mmu_init_gpu_peer_addresses(uvm_gpu_t *gpu) +{ + if (gpu->parent->peer_copy_mode == UVM_GPU_PEER_COPY_MODE_VIRTUAL) { + uvm_gpu_id_t gpu_id; + + for_each_gpu_id(gpu_id) { + uvm_gpu_get_peer_mapping(gpu, gpu_id)->base = gpu->parent->rm_va_base + + gpu->parent->rm_va_size + + UVM_PEER_IDENTITY_VA_SIZE * uvm_id_gpu_index(gpu_id); + } + + UVM_ASSERT(gpu->parent->uvm_mem_va_base >= + uvm_gpu_get_peer_mapping(gpu, uvm_gpu_id_from_value(UVM_ID_MAX_GPUS - 1))->base + + UVM_PEER_IDENTITY_VA_SIZE); + } +} + +static size_t root_chunk_mapping_index(uvm_gpu_t *gpu, uvm_gpu_root_chunk_mapping_t *root_chunk_mapping) +{ + return root_chunk_mapping - gpu->root_chunk_mappings.array; +} + +static NvU64 root_chunk_mapping_physical_address(uvm_gpu_t *gpu, uvm_gpu_root_chunk_mapping_t *root_chunk_mapping) +{ + return UVM_CHUNK_SIZE_MAX * root_chunk_mapping_index(gpu, root_chunk_mapping); +} + +static void root_chunk_mapping_lock(uvm_gpu_t *gpu, uvm_gpu_root_chunk_mapping_t *root_chunk_mapping) +{ + uvm_bit_lock(&gpu->root_chunk_mappings.bitlocks, root_chunk_mapping_index(gpu, root_chunk_mapping)); +} + +static void root_chunk_mapping_unlock(uvm_gpu_t *gpu, uvm_gpu_root_chunk_mapping_t *root_chunk_mapping) +{ + uvm_bit_unlock(&gpu->root_chunk_mappings.bitlocks, root_chunk_mapping_index(gpu, root_chunk_mapping)); +} + +static uvm_gpu_root_chunk_mapping_t *root_chunk_mapping_from_address(uvm_gpu_t *gpu, NvU64 addr) +{ + size_t index = addr / UVM_CHUNK_SIZE_MAX; + + UVM_ASSERT(addr <= gpu->mem_info.max_allocatable_address); + + return gpu->root_chunk_mappings.array + index; +} + +static uvm_gpu_root_chunk_mapping_t *root_chunk_mapping_from_chunk(uvm_gpu_t *gpu, uvm_gpu_chunk_t *chunk) +{ + return root_chunk_mapping_from_address(gpu, chunk->address); +} + +static void destroy_dynamic_vidmem_mapping(uvm_gpu_t *gpu) +{ + size_t i; + + if (!uvm_mmu_gpu_needs_dynamic_vidmem_mapping(gpu)) + return; + + if (gpu->root_chunk_mappings.array == NULL) + return; + + uvm_bit_locks_deinit(&gpu->root_chunk_mappings.bitlocks); + + for (i = 0; i < gpu->root_chunk_mappings.count; ++i) { + uvm_gpu_root_chunk_mapping_t *root_chunk_mapping = gpu->root_chunk_mappings.array + i; + + UVM_ASSERT(root_chunk_mapping->range == NULL); + UVM_ASSERT(root_chunk_mapping->num_mapped_pages == 0); + } + + uvm_kvfree(gpu->root_chunk_mappings.array); + gpu->root_chunk_mappings.array = NULL; +} + +static NV_STATUS create_dynamic_vidmem_mapping(uvm_gpu_t *gpu) +{ + NV_STATUS status; + size_t count; + + if (!uvm_mmu_gpu_needs_dynamic_vidmem_mapping(gpu)) + return NV_OK; + + UVM_ASSERT(!uvm_mmu_gpu_needs_static_vidmem_mapping(gpu)); + BUILD_BUG_ON(UVM_PAGE_SIZE_2M != UVM_CHUNK_SIZE_MAX); + UVM_ASSERT(uvm_mmu_page_size_supported(&gpu->address_space_tree, UVM_PAGE_SIZE_2M)); + UVM_ASSERT(gpu->pmm.initialized); + + count = gpu->pmm.root_chunks.count; + + gpu->root_chunk_mappings.array = uvm_kvmalloc_zero(sizeof(*gpu->root_chunk_mappings.array) * count); + if (gpu->root_chunk_mappings.array == NULL) + return NV_ERR_NO_MEMORY; + + gpu->root_chunk_mappings.count = count; + + status = uvm_bit_locks_init(&gpu->root_chunk_mappings.bitlocks, count, UVM_LOCK_ORDER_CHUNK_MAPPING); + if (status != NV_OK) + goto error; + + return NV_OK; + +error: + destroy_dynamic_vidmem_mapping(gpu); + return status; +} + +// Remove a root chunk mapping from the flat mapping address segment. +// +// This function is a specialized version of destroy_identity_mapping +// because the mapping fits into a single 2MB PTE, so there is no need to use +// page table range vectors. +static void root_chunk_mapping_destroy(uvm_gpu_t *gpu, uvm_gpu_root_chunk_mapping_t *root_chunk_mapping) +{ + NvU64 pa, va; + uvm_gpu_address_t gpu_virtual_address; + uvm_page_tree_t *tree; + uvm_gpu_phys_address_t entry_addr; + uvm_push_t push; + NvU32 entry_size; + uvm_pte_batch_t pte_batch; + NvU32 page_size; + NvU64 size; + NvU64 invalid_pte; + uvm_page_table_range_t *range = root_chunk_mapping->range; + + UVM_ASSERT(range != NULL); + + tree = &gpu->address_space_tree; + pa = root_chunk_mapping_physical_address(gpu, root_chunk_mapping); + gpu_virtual_address = uvm_gpu_address_virtual_from_vidmem_phys(gpu, pa); + va = gpu_virtual_address.address; + page_size = range->page_size; + size = page_size; + + (void) page_tree_begin_acquire(tree, NULL, &push, "Removing linear mapping at [0x%llx, 0x%llx)", va, va + size); + + uvm_pte_batch_begin(&push, &pte_batch); + entry_addr = uvm_page_table_range_entry_address(tree, range, 0); + invalid_pte = 0; + entry_size = uvm_mmu_pte_size(tree, page_size); + uvm_pte_batch_clear_ptes(&pte_batch, entry_addr, invalid_pte, entry_size, range->entry_count); + uvm_pte_batch_end(&pte_batch); + + uvm_tlb_batch_single_invalidate(tree, &push, va, size, page_size, UVM_MEMBAR_SYS); + + (void) page_tree_end_and_wait(tree, &push); + + uvm_page_tree_put_ptes(tree, range); + + uvm_kvfree(range); + root_chunk_mapping->range = NULL; +} + +// Add a root chunk mapping to the flat mapping address segment located in the +// UVM-internal GPU kernel address space. +// +// This function is a specialized version of create_identity_mapping because the +// mapping fits into a single 2MB PTE, so there is no need to use page table +// range vectors. +static NV_STATUS root_chunk_mapping_create(uvm_gpu_t *gpu, uvm_gpu_root_chunk_mapping_t *root_chunk_mapping) +{ + NV_STATUS status; + NvU64 pa, va; + uvm_gpu_address_t gpu_virtual_address; + uvm_page_table_range_t *range; + uvm_page_tree_t *tree; + uvm_gpu_phys_address_t entry_addr; + uvm_push_t push; + NvU64 pte_bits; + NvU32 entry_size; + NvU32 page_size = UVM_CHUNK_SIZE_MAX; + NvU64 size = UVM_CHUNK_SIZE_MAX; + + range = uvm_kvmalloc_zero(sizeof(*range)); + if (range == NULL) + return NV_ERR_NO_MEMORY; + + tree = &gpu->address_space_tree; + pa = root_chunk_mapping_physical_address(gpu, root_chunk_mapping); + gpu_virtual_address = uvm_gpu_address_virtual_from_vidmem_phys(gpu, pa); + va = gpu_virtual_address.address; + + UVM_ASSERT(IS_ALIGNED(va, page_size)); + + // A VA block lock is likely to be held, so eviction is not requested for + // allocations of page tree backing storage. + status = uvm_page_tree_get_ptes(tree, page_size, va, size, UVM_PMM_ALLOC_FLAGS_NONE, range); + if (status != NV_OK) { + uvm_kvfree(range); + return status; + } + + UVM_ASSERT(range->entry_count == 1); + + status = page_tree_begin_acquire(tree, NULL, &push, "Adding linear mapping at [0x%llx, 0x%llx)", va, va + size); + if (status != NV_OK) + goto error; + + entry_addr = uvm_page_table_range_entry_address(tree, range, 0); + pte_bits = tree->hal->make_pte(UVM_APERTURE_VID, pa, UVM_PROT_READ_WRITE_ATOMIC, UVM_MMU_PTE_FLAGS_CACHED); + entry_size = uvm_mmu_pte_size(tree, page_size); + uvm_pte_batch_single_write_ptes(&push, entry_addr, &pte_bits, entry_size, range->entry_count); + + uvm_tlb_batch_single_invalidate(tree, &push, va, page_size, page_size, UVM_MEMBAR_NONE); + + status = page_tree_end_and_wait(tree, &push); + if (status != NV_OK) + goto error; + + root_chunk_mapping->range = range; + + return NV_OK; + +error: + uvm_page_tree_put_ptes(tree, range); + uvm_kvfree(range); + + return status; +} + +NV_STATUS uvm_mmu_chunk_map(uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_root_chunk_mapping_t *root_chunk_mapping; + uvm_chunk_size_t chunk_size; + NvU16 num_mapped_pages; + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu = uvm_gpu_chunk_get_gpu(chunk); + + if (!uvm_mmu_gpu_needs_dynamic_vidmem_mapping(gpu)) + return NV_OK; + + chunk_size = uvm_gpu_chunk_get_size(chunk); + num_mapped_pages = chunk_size / PAGE_SIZE; + + // While there could be legitimate use cases for mapping a kernel chunk, + // currently it is disallowed. There are a couple of reasons for this. + // First, in SR-IOV heavy, disallowing kernel chunks prevents unintended + // mappings of page tables/directories when they share the backing + // allocation i.e. the memory backing the kernel chunk is in the same root + // chunk as the memory backing a GPU page tree node. Second, root chunk + // mappings are reference counted as multiples of PAGE_SIZE. User chunk + // sizes are guaranteed to be a multiple of that page size, but kernel chunk + // sizes can be smaller. + UVM_ASSERT(uvm_pmm_gpu_memory_type_is_user(chunk->type)); + + UVM_ASSERT(PAGE_ALIGNED(chunk_size)); + + root_chunk_mapping = root_chunk_mapping_from_chunk(gpu, chunk); + root_chunk_mapping_lock(gpu, root_chunk_mapping); + + if (root_chunk_mapping->num_mapped_pages == 0) { + UVM_ASSERT(root_chunk_mapping->range == NULL); + + status = root_chunk_mapping_create(gpu, root_chunk_mapping); + if (status != NV_OK) + goto out; + } + + // Check for reference counting overflow. The number of mapped pages can be + // above UVM_CHUNK_SIZE_MAX / PAGE_SIZE because the same chunk can be mapped + // multiple times. + UVM_ASSERT((NV_U32_MAX - root_chunk_mapping->num_mapped_pages) >= num_mapped_pages); + + root_chunk_mapping->num_mapped_pages += num_mapped_pages; + + out: + root_chunk_mapping_unlock(gpu, root_chunk_mapping); + return status; +} + +void uvm_mmu_chunk_unmap(uvm_gpu_chunk_t *chunk, uvm_tracker_t *tracker) +{ + uvm_gpu_root_chunk_mapping_t *root_chunk_mapping; + uvm_chunk_size_t chunk_size; + NvU16 num_unmapped_pages; + uvm_gpu_t *gpu; + + if (chunk == NULL) + return; + + gpu = uvm_gpu_chunk_get_gpu(chunk); + if (!uvm_mmu_gpu_needs_dynamic_vidmem_mapping(gpu)) + return; + + if (tracker != NULL) + uvm_tracker_wait(tracker); + + chunk_size = uvm_gpu_chunk_get_size(chunk); + num_unmapped_pages = chunk_size / PAGE_SIZE; + + UVM_ASSERT(uvm_pmm_gpu_memory_type_is_user(chunk->type)); + UVM_ASSERT(PAGE_ALIGNED(chunk_size)); + + root_chunk_mapping = root_chunk_mapping_from_chunk(gpu, chunk); + root_chunk_mapping_lock(gpu, root_chunk_mapping); + + if (root_chunk_mapping->range != NULL) { + UVM_ASSERT(root_chunk_mapping->num_mapped_pages >= num_unmapped_pages); + + root_chunk_mapping->num_mapped_pages -= num_unmapped_pages; + + if (root_chunk_mapping->num_mapped_pages == 0) + root_chunk_mapping_destroy(gpu, root_chunk_mapping); + } + else { + UVM_ASSERT(root_chunk_mapping->num_mapped_pages == 0); + } + + root_chunk_mapping_unlock(gpu, root_chunk_mapping); +} + + +static size_t sysmem_mapping_index(uvm_gpu_t *gpu, uvm_gpu_identity_mapping_t *sysmem_mapping) +{ + return sysmem_mapping - gpu->sysmem_mappings.array; +} + +static NvU64 sysmem_mapping_physical_address(uvm_gpu_t *gpu, uvm_gpu_identity_mapping_t *sysmem_mapping) +{ + return gpu->sysmem_mappings.mapping_size * sysmem_mapping_index(gpu, sysmem_mapping); +} + +static uvm_gpu_identity_mapping_t *sysmem_mapping_from_address(uvm_gpu_t *gpu, NvU64 pa) +{ + size_t index = pa / gpu->sysmem_mappings.mapping_size; + + UVM_ASSERT(index < gpu->sysmem_mappings.count); + + return gpu->sysmem_mappings.array + index; +} + +static void sysmem_mapping_lock(uvm_gpu_t *gpu, uvm_gpu_identity_mapping_t *sysmem_mapping) +{ + uvm_bit_lock(&gpu->sysmem_mappings.bitlocks, sysmem_mapping_index(gpu, sysmem_mapping)); +} + +static void sysmem_mapping_unlock(uvm_gpu_t *gpu, uvm_gpu_identity_mapping_t *sysmem_mapping) +{ + uvm_bit_unlock(&gpu->sysmem_mappings.bitlocks, sysmem_mapping_index(gpu, sysmem_mapping)); +} + +static void destroy_dynamic_sysmem_mapping(uvm_gpu_t *gpu) +{ + size_t i; + + if (!uvm_mmu_gpu_needs_dynamic_sysmem_mapping(gpu)) + return; + + if (gpu->sysmem_mappings.array == NULL) + return; + + uvm_bit_locks_deinit(&gpu->sysmem_mappings.bitlocks); + + for (i = 0; i < gpu->sysmem_mappings.count; ++i) + destroy_identity_mapping(gpu->sysmem_mappings.array + i); + + uvm_kvfree(gpu->sysmem_mappings.array); + gpu->sysmem_mappings.array = NULL; +} + +static NV_STATUS create_dynamic_sysmem_mapping(uvm_gpu_t *gpu) +{ + NV_STATUS status; + size_t count; + NvU64 mapping_size; + NvU64 flat_sysmem_va_size; + + if (!uvm_mmu_gpu_needs_dynamic_sysmem_mapping(gpu)) + return NV_OK; + + UVM_ASSERT(gpu->parent->flat_sysmem_va_base != 0); + + // The DMA addressable window is the maximum system physical memory + // addressable by the GPU (this limit is 128TB in Pascal-Ampere). The + // virtual mapping to sysmem is linear, so its size matches that of the + // physical address space. + flat_sysmem_va_size = gpu->parent->dma_addressable_limit + 1 - gpu->parent->dma_addressable_start; + + // The optimal mapping granularity is dependent on multiple factors: + // application access patterns, distribution of system memory pages across + // the physical address space, mapping costs (themselves highly variable: in + // SR-IOV each mapping addition adds a lot of overhead due to vGPU plugin + // involvement), metadata memory footprint (inversely proportional to the + // mapping size), etc. + mapping_size = 4ULL * 1024 * 1024 * 1024; + + // The mapping size should be at least 1GB, due to bitlock limitations. This + // shouldn't be a problem because the expectation is to use 512MB PTEs, and + // using a granularity of 1GB already results in allocating a large array of + // sysmem mappings with 128K entries. + UVM_ASSERT(is_power_of_2(mapping_size)); + UVM_ASSERT(mapping_size >= 1ULL * 1024 * 1024 * 1024); + UVM_ASSERT(mapping_size >= uvm_mmu_biggest_page_size(&gpu->address_space_tree)); + UVM_ASSERT(mapping_size <= flat_sysmem_va_size); + + flat_sysmem_va_size = UVM_ALIGN_UP(flat_sysmem_va_size, mapping_size); + + UVM_ASSERT((gpu->parent->flat_sysmem_va_base + flat_sysmem_va_size) <= gpu->parent->uvm_mem_va_base); + + count = flat_sysmem_va_size / mapping_size; + + gpu->sysmem_mappings.array = uvm_kvmalloc_zero(sizeof(*gpu->sysmem_mappings.array) * count); + if (gpu->sysmem_mappings.array == NULL) + return NV_ERR_NO_MEMORY; + + gpu->sysmem_mappings.mapping_size = mapping_size; + gpu->sysmem_mappings.count = count; + + status = uvm_bit_locks_init(&gpu->sysmem_mappings.bitlocks, count, UVM_LOCK_ORDER_CHUNK_MAPPING); + if (status != NV_OK) + goto error; + + return NV_OK; + +error: + destroy_dynamic_sysmem_mapping(gpu); + return status; +} + +NV_STATUS uvm_mmu_sysmem_map(uvm_gpu_t *gpu, NvU64 pa, NvU64 size) +{ + NvU64 curr_pa; + + if (!uvm_mmu_gpu_needs_dynamic_sysmem_mapping(gpu)) + return NV_OK; + + curr_pa = UVM_ALIGN_DOWN(pa, gpu->sysmem_mappings.mapping_size); + + while (curr_pa < (pa + size)) { + NV_STATUS status = NV_OK; + uvm_gpu_identity_mapping_t *sysmem_mapping = sysmem_mapping_from_address(gpu, curr_pa); + + sysmem_mapping_lock(gpu, sysmem_mapping); + + if (sysmem_mapping->range_vec == NULL) { + uvm_gpu_address_t virtual_address = uvm_gpu_address_virtual_from_sysmem_phys(gpu, curr_pa); + NvU64 phys_offset = curr_pa; + NvU32 page_size = uvm_mmu_biggest_page_size(&gpu->address_space_tree); + uvm_pmm_alloc_flags_t pmm_flags; + + // No eviction is requested when allocating the page tree storage, + // because in the common case the VA block lock is held. + pmm_flags = UVM_PMM_ALLOC_FLAGS_NONE; + + sysmem_mapping->base = uvm_parent_gpu_canonical_address(gpu->parent, virtual_address.address); + + status = create_identity_mapping(gpu, + sysmem_mapping->base, + gpu->sysmem_mappings.mapping_size, + UVM_APERTURE_SYS, + phys_offset, + page_size, + pmm_flags, + &sysmem_mapping->range_vec); + } + + sysmem_mapping_unlock(gpu, sysmem_mapping); + + // In case of error, don't undo previous mapping additions, since a + // concurrent thread may already have pushed work to the GPU that is + // dependent on the new mappings. + if (status != NV_OK) + return status; + + curr_pa += gpu->sysmem_mappings.mapping_size; + } + + return NV_OK; +} + +NV_STATUS uvm_mmu_create_flat_mappings(uvm_gpu_t *gpu) +{ + NV_STATUS status; + + status = create_dynamic_sysmem_mapping(gpu); + if (status != NV_OK) + return status; + + status = create_static_vidmem_mapping(gpu); + if (status != NV_OK) + goto error; + + status = create_dynamic_vidmem_mapping(gpu); + if (status != NV_OK) + goto error; + + return NV_OK; + +error: + uvm_mmu_destroy_flat_mappings(gpu); + return status; +} + +void uvm_mmu_destroy_flat_mappings(uvm_gpu_t *gpu) +{ + destroy_dynamic_vidmem_mapping(gpu); + destroy_static_vidmem_mapping(gpu); + destroy_dynamic_sysmem_mapping(gpu); +} + +NV_STATUS uvm_test_invalidate_tlb(UVM_TEST_INVALIDATE_TLB_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_gpu_t *gpu = NULL; + uvm_push_t push; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_va_space_t *gpu_va_space; + + // Check parameter values + if (params->membar < UvmInvalidateTlbMemBarNone || + params->membar > UvmInvalidateTlbMemBarLocal) { + return NV_ERR_INVALID_PARAMETER; + } + + if (params->target_va_mode < UvmTargetVaModeAll || + params->target_va_mode > UvmTargetVaModeTargeted) { + return NV_ERR_INVALID_PARAMETER; + } + + if (params->page_table_level < UvmInvalidatePageTableLevelAll || + params->page_table_level > UvmInvalidatePageTableLevelPde4) { + return NV_ERR_INVALID_PARAMETER; + } + + uvm_va_space_down_read(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, ¶ms->gpu_uuid); + if (!gpu) { + status = NV_ERR_INVALID_DEVICE; + goto unlock_exit; + } + + gpu_va_space = uvm_gpu_va_space_get(va_space, gpu); + UVM_ASSERT(gpu_va_space); + + status = uvm_push_begin(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &push, + "Pushing test invalidate, GPU %s", + uvm_gpu_name(gpu)); + if (status == NV_OK) + gpu->parent->host_hal->tlb_invalidate_test(&push, uvm_page_tree_pdb(&gpu_va_space->page_tables)->addr, params); + +unlock_exit: + // Wait for the invalidation to be performed + if (status == NV_OK) + status = uvm_push_end_and_wait(&push); + + uvm_va_space_up_read(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_mmu.h b/kernel-open/nvidia-uvm/uvm_mmu.h new file mode 100644 index 000000000..d76a0dbd7 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_mmu.h @@ -0,0 +1,649 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_MMU_H__ +#define __UVM_MMU_H__ + +#include "uvm_forward_decl.h" +#include "uvm_hal_types.h" +#include "uvm_pmm_gpu.h" +#include "uvm_types.h" +#include "uvm_common.h" +#include "uvm_tracker.h" +#include "uvm_test_ioctl.h" + +// Used when the page size isn't known and should not matter. +#define UVM_PAGE_SIZE_AGNOSTIC 0 + +// Memory layout of UVM's kernel VA space. +// The following memory regions are not to scale. + + + + + + + + + + + + + + + + + + + + + +// +// Pascal-Ampere: +// +----------------+ 512TB +// | | +// | (not used) | +// | | +// ------------------ +// |uvm_mem_t(128GB)| (uvm_mem_va_size) +// ------------------ 384TB (uvm_mem_va_base) +// | sysmem | +// | flat mapping | ==> 1ULL << NV_CHIP_EXTENDED_SYSTEM_PHYSICAL_ADDRESS_BITS +// | (up to 128TB) | +// ------------------ 256TB (flat_sysmem_va_base) +// | | +// | (not used) | +// | | +// ------------------ 132TB + 128GB (UVM_GPU_MAX_PHYS_MEM) +// | vidmem | +// | flat mapping | ==> UVM_GPU_MAX_PHYS_MEM +// | (128GB) | +// ------------------ 132TB (flat_vidmem_va_base) +// |peer ident. maps| +// |32 * 128GB = 4TB| ==> NV_MAX_DEVICES * UVM_PEER_IDENTITY_VA_SIZE +// ------------------ 128TB +// | | +// | rm_mem(128TB) | (rm_va_size) +// | | +// +----------------+ 0 (rm_va_base) +// +// Maxwell: +// +----------------+ 1TB +// | | +// | (not used) | +// | | +// ------------------ 896GB +// |uvm_mem_t(128GB)| (uvm_mem_va_size) +// ------------------ 768GB (uvm_mem_va_base) +// | | +// | (not used) | +// | | +// ------------------ 128GB +// | | +// | rm_mem(128GB) | (rm_va_size) +// | | +// +----------------+ 0 (rm_va_base) + +// Maximum memory of any GPU. +#define UVM_GPU_MAX_PHYS_MEM (128ull * 1024 * 1024 * 1024) + +// The size of VA that should be reserved per peer identity mapping. +// This should be at least the maximum amount of memory of any GPU. +#define UVM_PEER_IDENTITY_VA_SIZE UVM_GPU_MAX_PHYS_MEM + +// GPUs which support ATS perform a parallel lookup on both ATS and GMMU page +// tables. The ATS lookup can be disabled by setting a bit in the GMMU page +// tables. All GPUs which support ATS use the same mechanism (a bit in PDE1), +// and have the same PDE1 coverage (512MB). +// +// If the PTE format changes, this will need to move to the HAL. +#define UVM_GMMU_ATS_GRANULARITY (512ull * 1024 * 1024) + +// This represents an allocation containing either a page table or page +// directory. +typedef struct +{ + uvm_gpu_phys_address_t addr; + + NvU64 size; + union + { + struct page *page; + uvm_gpu_chunk_t *chunk; + } handle; +} uvm_mmu_page_table_alloc_t; + +// This structure in general refers to a page directory +// although it is also used to represent a page table, in which case entries is +// not allocated. +typedef struct uvm_page_directory_struct uvm_page_directory_t; + +struct uvm_page_directory_struct +{ + // parent directory + uvm_page_directory_t *host_parent; + + // index of this entry in the parent directory + NvU32 index_in_parent; + + // allocation that holds actual page table used by device + uvm_mmu_page_table_alloc_t phys_alloc; + + // count of references to all entries + NvU32 ref_count; + + // depth from the root + NvU32 depth; + + // pointers to child directories on the host. + // this array is variable length, so it needs to be last to allow it to + // take up extra space + uvm_page_directory_t *entries[0]; +}; + +enum +{ + UVM_MMU_PTE_FLAGS_NONE = 0, + + // data from the page pointed by the PTE may be cached incoherently by the + // GPU L2. This option may result in accessing stale data. For vidmem + // aperture, however, it is safe to use _CACHED, i.e., no memory request + // accesses stale data because L2 is in the datapath of the GPU memory. + UVM_MMU_PTE_FLAGS_CACHED = (1 << 0), + + // Disable access counters to the page pointed by the PTE. + UVM_MMU_PTE_FLAGS_ACCESS_COUNTERS_DISABLED = (1 << 1), + UVM_MMU_PTE_FLAGS_MASK = (1 << 2) - 1 +}; + +struct uvm_mmu_mode_hal_struct +{ + // bit pattern of a valid PTE. flags is a bitwise-or of UVM_MMU_PTE_FLAGS_*. + NvU64 (*make_pte)(uvm_aperture_t aperture, NvU64 address, uvm_prot_t prot, NvU64 flags); + + // bit pattern of a sked reflected PTE + NvU64 (*make_sked_reflected_pte)(void); + + // bit pattern of sparse PTE + // Sparse PTEs will indicate to MMU to route all reads and writes to the + // debug pages. Therefore, accesses to sparse mappings do not generate + // faults. + NvU64 (*make_sparse_pte)(void); + + // Bit pattern of an "unmapped" PTE. The GPU MMU recognizes two flavors of + // empty PTEs: + // 1) Invalid: Bit pattern of all 0s. There is no HAL function for this. + // 2) Unmapped: This pattern. + // + // The subtle difference is for big PTEs. Invalid big PTEs indicate to the + // GPU MMU that there might be 4k PTEs present instead, and that those 4k + // entries should be read and cached. Unmapped big PTEs indicate that there + // are no 4k PTEs below the unmapped big entry, so MMU should stop its walk + // and not cache any 4k entries which may be in memory. + // + // This is an optimization which reduces TLB pressure, reduces the number of + // TLB invalidates we must issue, and means we don't have to initialize the + // 4k PTEs which are covered by big PTEs since the MMU will never read them. + NvU64 (*unmapped_pte)(NvU32 page_size); + + // Bit pattern used for debug purposes to clobber PTEs which ought to be + // unused. In practice this will generate a PRIV violation or a physical + // memory out-of-range error so we can immediately identify bad PTE usage. + NvU64 (*poisoned_pte)(void); + + // write a PDE bit-pattern to entry based on the data in entries (which may + // point to two items for dual PDEs). + // any of allocs are allowed to be NULL, in which case they are to be + // treated as empty. + void (*make_pde)(void *entry, uvm_mmu_page_table_alloc_t **allocs, NvU32 depth); + + // size of an entry in a directory/table. Generally either 8 or 16 bytes. + // (in the case of Pascal dual PDEs) + NvLength (*entry_size)(NvU32 depth); + + // Two for dual PDEs, one otherwise. + NvU32 (*entries_per_index)(NvU32 depth); + + // For dual PDEs, this is ether 1 or 0, depending on the page size. + // This is used to index the host copy only. GPU PDEs are always entirely + // re-written using make_pde. + NvLength (*entry_offset)(NvU32 depth, NvU32 page_size); + + // number of virtual address bits used to index the directory/table at a + // given depth + NvU32 (*index_bits)(NvU32 depth, NvU32 page_size); + + // total number of bits that represent the virtual address space + NvU32 (*num_va_bits)(void); + + // the size, in bytes, of a directory/table at a given depth. + NvLength (*allocation_size)(NvU32 depth, NvU32 page_size); + + // the depth which corresponds to the page tables + NvU32 (*page_table_depth)(NvU32 page_size); + + // bitwise-or of supported page sizes + NvU32 (*page_sizes)(void); +}; + +struct uvm_page_table_range_struct +{ + uvm_page_directory_t *table; + NvU32 start_index; + NvU32 entry_count; + NvU32 page_size; +}; + +typedef enum +{ + UVM_PAGE_TREE_TYPE_USER, + UVM_PAGE_TREE_TYPE_KERNEL, + UVM_PAGE_TREE_TYPE_COUNT +} uvm_page_tree_type_t; + +struct uvm_page_tree_struct +{ + uvm_mutex_t lock; + uvm_gpu_t *gpu; + uvm_page_directory_t *root; + uvm_mmu_mode_hal_t *hal; + uvm_page_tree_type_t type; + NvU32 big_page_size; + + // Pointer to the GPU VA space containing the page tree. + // This pointer is set only for page trees of type + // UVM_PAGE_TREE_TYPE_USER and is used to get to the + // VA space mm_struct when CGroup accounting is enabled. + uvm_gpu_va_space_t *gpu_va_space; + + // Location of the physical pages backing the page directories and tables in + // the tree. If the location is UVM_APERTURE_SYS, all the pages are in + // sysmem. If it is UVM_APERTURE_VID, all the pages are in vidmem unless + // location_sys_fallback is true, in which case a sysmem fallback is allowed + // so the pages can be placed in either aperture. + uvm_aperture_t location; + bool location_sys_fallback; + + struct + { + // Page table where all entries are invalid small-page entries. + uvm_mmu_page_table_alloc_t ptes_invalid_4k; + + // PDE0 where all big-page entries are invalid, and small-page entries + // point to ptes_invalid_4k. + // pde0 is only used on Pascal-Ampere, i.e., they have the same PDE + // format. + uvm_mmu_page_table_alloc_t pde0; + } map_remap; + + // Tracker for all GPU operations on the tree + uvm_tracker_t tracker; +}; + +// A vector of page table ranges +struct uvm_page_table_range_vec_struct +{ + // The tree the range vector is from + uvm_page_tree_t *tree; + + // Start of the covered VA in bytes, always page_size aligned + NvU64 start; + + // Size of the covered VA in bytes, always page_size aligned + NvU64 size; + + // Page size used for all the page table ranges + NvU32 page_size; + + // Page table ranges covering the VA + uvm_page_table_range_t *ranges; + + // Number of allocated ranges + size_t range_count; +}; + +// Called at module init +NV_STATUS uvm_mmu_init(void); + +// Initialize MMU-specific information for the GPU/sub-processor +void uvm_mmu_init_gpu_chunk_sizes(uvm_parent_gpu_t *parent_gpu); +void uvm_mmu_init_gpu_peer_addresses(uvm_gpu_t *gpu); + +// Create a page tree structure and allocate the root directory. location +// behavior: +// - UVM_APERTURE_VID Force all page table allocations into vidmem +// - UVM_APERTURE_SYS Force all page table allocations into sysmem +// - UVM_APERTURE_DEFAULT Let the implementation decide +// +// On failure the caller must call uvm_page_tree_deinit() iff tree_out->root is +// valid. +NV_STATUS uvm_page_tree_init(uvm_gpu_t *gpu, + uvm_gpu_va_space_t *gpu_va_space, + uvm_page_tree_type_t type, + NvU32 big_page_size, + uvm_aperture_t location, + uvm_page_tree_t *tree_out); + +// Destroy the root directory of the page tree. +// This function asserts that there are no other elements left in the tree. +// All PTEs should have been put with uvm_page_tree_put_ptes before this +// function is called. +// uvm_page_tree_deinit() must be called when tree->root is valid. +void uvm_page_tree_deinit(uvm_page_tree_t *tree); + +// Returns the range of PTEs that correspond to [start, start + size). +// It is the caller's responsibility to ensure that this range falls within the +// same table. This function asserts that start and size are multiples of +// page_size because sub-page alignment information is not represented in +// *range PTE ranges may overlap. Each get_ptes (and friends) call must be +// paired with a put_ptes call on the same range. get_ptes may overwrite +// existing PTE values, so do not call get_ptes on overlapping ranges with +// the same page size without an intervening put_ptes. To duplicate a subset of +// an existing range or change the size of an existing range, use +// uvm_page_table_range_get_upper() and/or uvm_page_table_range_shrink(). +NV_STATUS uvm_page_tree_get_ptes(uvm_page_tree_t *tree, NvU32 page_size, NvU64 start, NvLength size, + uvm_pmm_alloc_flags_t pmm_flags, uvm_page_table_range_t *range); + +// Same as uvm_page_tree_get_ptes(), but doesn't synchronize the GPU work. +// +// All pending operations can be waited on with uvm_page_tree_wait(). +NV_STATUS uvm_page_tree_get_ptes_async(uvm_page_tree_t *tree, NvU32 page_size, NvU64 start, NvLength size, + uvm_pmm_alloc_flags_t pmm_flags, uvm_page_table_range_t *range); + +// Returns a single-entry page table range for the addresses passed. +// The size parameter must be a page size supported by this tree. +// This is equivalent to calling uvm_page_tree_get_ptes() with size equal to +// page_size. +NV_STATUS uvm_page_tree_get_entry(uvm_page_tree_t *tree, NvU32 page_size, NvU64 start, + uvm_pmm_alloc_flags_t pmm_flags, uvm_page_table_range_t *single); + +// For a single-entry page table range, write the PDE (which could be a dual +// PDE) to the GPU. +// This is useful when the GPU currently has a PTE but that entry can also +// contain a PDE. +// It is an error to call this on a PTE-only range. +// The parameter single can otherwise be any single-entry range such as those +// allocated from get_entry() or get_ptes(). +// This function performs no TLB invalidations. +void uvm_page_tree_write_pde(uvm_page_tree_t *tree, uvm_page_table_range_t *single, uvm_push_t *push); + +// For a single-entry page table range, clear the PDE on the GPU. +// It is an error to call this on a PTE-only range. +// The parameter single can otherwise be any single-entry range such as those +// allocated from get_entry() or get_ptes(). +// This function performs no TLB invalidations. +void uvm_page_tree_clear_pde(uvm_page_tree_t *tree, uvm_page_table_range_t *single, uvm_push_t *push); + +// For a single-entry page table range, allocate a sibling table for a given +// page size. This sibling entry will be inserted into the host cache, but will +// not be written to the GPU page tree. uvm_page_tree_write_pde() can be used to +// submit entries to the GPU page tree, using single. The range returned refers +// to all the PTEs in the sibling directory directly. +// +// It is the caller's responsibility to initialize the returned table before +// calling uvm_page_tree_write_pde. +NV_STATUS uvm_page_tree_alloc_table(uvm_page_tree_t *tree, + NvU32 page_size, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_page_table_range_t *single, + uvm_page_table_range_t *children); + +// Gets PTEs from the upper portion of an existing range and returns them in a +// new range. num_upper_pages is the number of pages that should be in the new +// range. It must be in the range [1, existing->entry_count]. +// +// The existing range is unmodified. +void uvm_page_table_range_get_upper(uvm_page_tree_t *tree, + uvm_page_table_range_t *existing, + uvm_page_table_range_t *upper, + NvU32 num_upper_pages); + +// Releases range's references on its upper pages to shrink the range down to +// to new_page_count, which must be >= range->entry_count. The range start +// remains the same. +// +// new_page_count is allowed to be 0, in which case this is equivalent to +// calling uvm_page_tree_put_ptes. +void uvm_page_table_range_shrink(uvm_page_tree_t *tree, uvm_page_table_range_t *range, NvU32 new_page_count); + +// Releases the range of PTEs. +// It is the caller's responsibility to ensure that the empty PTE patterns have +// already been written in the range passed to the function. +void uvm_page_tree_put_ptes(uvm_page_tree_t *tree, uvm_page_table_range_t *range); + +// Same as uvm_page_tree_put_ptes(), but doesn't synchronize the GPU work. +// +// All pending operations can be waited on with uvm_page_tree_wait(). +void uvm_page_tree_put_ptes_async(uvm_page_tree_t *tree, uvm_page_table_range_t *range); + +// Synchronize any pending operations +NV_STATUS uvm_page_tree_wait(uvm_page_tree_t *tree); + +// Returns the physical allocation that contains the root directory. +static uvm_mmu_page_table_alloc_t *uvm_page_tree_pdb(uvm_page_tree_t *tree) +{ + return &tree->root->phys_alloc; +} + +// Initialize a page table range vector covering the specified VA range +// [start, start + size) +// +// This splits the VA in the minimum amount of page table ranges required to +// cover it and calls uvm_page_tree_get_ptes() for each of them. +// +// The pmm_flags are only used if PTEs are allocated from vidmem +// +// Start and size are in bytes and need to be page_size aligned. +NV_STATUS uvm_page_table_range_vec_init(uvm_page_tree_t *tree, + NvU64 start, + NvU64 size, + NvU32 page_size, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_page_table_range_vec_t *range_vec); + +// Allocate and initialize a page table range vector. +// The pmm_flags are only used if PTEs are allocated from vidmem +NV_STATUS uvm_page_table_range_vec_create(uvm_page_tree_t *tree, + NvU64 start, + NvU64 size, + NvU32 page_size, + uvm_pmm_alloc_flags_t pmm_flags, + uvm_page_table_range_vec_t **range_vec_out); + +// Split a page table range vector in two using new_end as the split point. +// new_range_vec will contain the upper portion of range_vec, starting at +// new_end + 1. +// +// new_end + 1 is required to be within the address range of range_vec and be aligned to +// range_vec's page_size. +// +// On failure, the original range vector is left unmodified. +NV_STATUS uvm_page_table_range_vec_split_upper(uvm_page_table_range_vec_t *range_vec, + NvU64 new_end, + uvm_page_table_range_vec_t *new_range_vec); + +// Deinitialize a page table range vector and set all fields to 0. +// +// Put all the PTEs that the range vector covered. +void uvm_page_table_range_vec_deinit(uvm_page_table_range_vec_t *range_vec); + +// Deinitialize and free a page table range vector. +void uvm_page_table_range_vec_destroy(uvm_page_table_range_vec_t *range_vec); + +// A PTE making function used by uvm_page_table_range_vec_write_ptes() +// +// The function gets called for each page_size aligned offset within the VA +// covered by the range vector and is supposed to return the desired PTE bits +// for each offset. +// The caller_data pointer is what the caller passed in as caller_data to +// uvm_page_table_range_vec_write_ptes(). +typedef NvU64 (*uvm_page_table_range_pte_maker_t)(uvm_page_table_range_vec_t *range_vec, NvU64 offset, + void *caller_data); + +// Write all PTEs covered by the range vector using the given PTE making function. +// +// After writing all the PTEs a TLB invalidate operation is performed including +// the passed in tlb_membar. +// +// See comments about uvm_page_table_range_pte_maker_t for details about the +// PTE making callback. +NV_STATUS uvm_page_table_range_vec_write_ptes(uvm_page_table_range_vec_t *range_vec, uvm_membar_t tlb_membar, + uvm_page_table_range_pte_maker_t pte_maker, void *caller_data); + +// Set all PTEs covered by the range vector to an empty PTE +// +// After clearing all PTEs a TLB invalidate is performed including the given +// membar. +NV_STATUS uvm_page_table_range_vec_clear_ptes(uvm_page_table_range_vec_t *range_vec, uvm_membar_t tlb_membar); + +// Create peer identity mappings +NV_STATUS uvm_mmu_create_peer_identity_mappings(uvm_gpu_t *gpu, uvm_gpu_t *peer); + +// Destroy peer identity mappings +void uvm_mmu_destroy_peer_identity_mappings(uvm_gpu_t *gpu, uvm_gpu_t *peer); + +// Create or initialize flat mappings to vidmem or system memories. The +// mappings may cover the entire physical address space, or just parts of it. +// The mappings are used for engines that do not support physical addressing. +NV_STATUS uvm_mmu_create_flat_mappings(uvm_gpu_t *gpu); + +// Destroy the flat mappings created by uvm_mmu_create_flat_mappings(). +void uvm_mmu_destroy_flat_mappings(uvm_gpu_t *gpu); + +// Returns true if a static flat mapping covering the entire vidmem is required +// for the given GPU. +bool uvm_mmu_gpu_needs_static_vidmem_mapping(uvm_gpu_t *gpu); + +// Returns true if an on-demand flat mapping partially covering the GPU memory +// is required for the given device. +bool uvm_mmu_gpu_needs_dynamic_vidmem_mapping(uvm_gpu_t *gpu); + +// Returns true if an on-demand flat mapping partially covering the sysmem +// address space is required for the given device. +bool uvm_mmu_gpu_needs_dynamic_sysmem_mapping(uvm_gpu_t *gpu); + +// Add or remove a (linear) mapping to the root chunk containing the given +// chunk. The mapping is added to UVM's internal address space in the GPU +// associated with the chunk. The virtual address associated with a chunk +// physical address can be queried via uvm_gpu_address_virtual_from_vidmem_phys. +// +// Because the root chunk mapping is referenced counted, a map call may not add +// any actual GPU mappings. Similarly, an unmap call may not remove any GPU +// mappings. The reference counting scheme is based on the size of the input +// chunk requiring that, for a given root chunk, the combined unmap size matches +// the combined map size. For example, the same chunk can be mapped multiple +// times, but then it must be unmapped the same number of times. And if an +// already mapped chunk is split into multiple subchunks, each subchunk is +// expected to be unmapped once. +// +// The map and unmap functions are synchronous. A tracker can be passed to +// the unmap routine to express any input dependencies. +NV_STATUS uvm_mmu_chunk_map(uvm_gpu_chunk_t *chunk); +void uvm_mmu_chunk_unmap(uvm_gpu_chunk_t *chunk, uvm_tracker_t *tracker); + +// Map a system physical address interval. The mapping is added to UVM's +// internal address space in the given GPU. The resulting virtual address can be +// queried via uvm_gpu_address_virtual_from_sysmem_phys. +// +// The mapping persists until GPU deinitialization, such that no unmap +// functionality is exposed. The map operation is synchronous, and internally +// uses a large mapping granularity that in the common case exceeds the input +// size. +// +// The input address must be a GPU address as returned by uvm_gpu_map_cpu_pages +// for the given GPU. +NV_STATUS uvm_mmu_sysmem_map(uvm_gpu_t *gpu, NvU64 pa, NvU64 size); + +static NvU64 uvm_mmu_page_tree_entries(uvm_page_tree_t *tree, NvU32 depth, NvU32 page_size) +{ + return 1ull << tree->hal->index_bits(depth, page_size); +} + +static NvU64 uvm_mmu_pde_coverage(uvm_page_tree_t *tree, NvU32 page_size) +{ + NvU32 depth = tree->hal->page_table_depth(page_size); + return uvm_mmu_page_tree_entries(tree, depth, page_size) * page_size; +} + +static bool uvm_mmu_page_size_supported(uvm_page_tree_t *tree, NvU32 page_size) +{ + UVM_ASSERT_MSG(is_power_of_2(page_size), "0x%x\n", page_size); + + return (tree->hal->page_sizes() & page_size) != 0; +} + +static NvU32 uvm_mmu_biggest_page_size_up_to(uvm_page_tree_t *tree, NvU32 max_page_size) +{ + NvU32 page_sizes; + NvU32 page_size; + + UVM_ASSERT_MSG(is_power_of_2(max_page_size), "0x%x\n", max_page_size); + + // Calculate the supported page sizes that are not larger than the max + page_sizes = tree->hal->page_sizes() & (max_page_size | (max_page_size - 1)); + + // And pick the biggest one of them + page_size = 1 << __fls(page_sizes); + + UVM_ASSERT_MSG(uvm_mmu_page_size_supported(tree, page_size), "page_size 0x%x", page_size); + + return page_size; +} + +static NvU32 uvm_mmu_biggest_page_size(uvm_page_tree_t *tree) +{ + return 1 << __fls(tree->hal->page_sizes()); +} + +static NvU32 uvm_mmu_pte_size(uvm_page_tree_t *tree, NvU32 page_size) +{ + return tree->hal->entry_size(tree->hal->page_table_depth(page_size)); +} + +static NvU64 uvm_page_table_range_size(uvm_page_table_range_t *range) +{ + return ((NvU64)range->entry_count) * range->page_size; +} + +// Get the physical address of the entry at entry_index within the range +// (counted from range->start_index). +static uvm_gpu_phys_address_t uvm_page_table_range_entry_address(uvm_page_tree_t *tree, uvm_page_table_range_t *range, + size_t entry_index) +{ + NvU32 entry_size = uvm_mmu_pte_size(tree, range->page_size); + uvm_gpu_phys_address_t entry = range->table->phys_alloc.addr; + + UVM_ASSERT(entry_index < range->entry_count); + + entry.address += (range->start_index + entry_index) * entry_size; + return entry; +} + +static uvm_aperture_t uvm_page_table_range_aperture(uvm_page_table_range_t *range) +{ + return range->table->phys_alloc.addr.aperture; +} + +NV_STATUS uvm_test_invalidate_tlb(UVM_TEST_INVALIDATE_TLB_PARAMS *params, struct file *filp); + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_page_tree_test.c b/kernel-open/nvidia-uvm/uvm_page_tree_test.c new file mode 100644 index 000000000..67ded36ee --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_page_tree_test.c @@ -0,0 +1,2331 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_test.h" +#include "uvm_test_ioctl.h" +#include "uvm_gpu.h" +#include "uvm_global.h" +#include "uvm_hal.h" +#include "uvm_tlb_batch.h" +#include "uvm_mmu.h" +#include "uvm_kvmalloc.h" +// MAXWELL_* +#include "cla16f.h" +#include "clb0b5.h" +// PASCAL_* +#include "clb069.h" // MAXWELL_FAULT_BUFFER_A +#include "clc0b5.h" +#include "clc06f.h" +// VOLTA_* +#include "clc369.h" // MMU_FAULT_BUFFER +#include "clc3b5.h" +#include "clc36f.h" +// AMPERE_* +#include "clc56f.h" +#include "clc6b5.h" + + + + + +// ARCHITECTURE_* +#include "ctrl2080mc.h" + +#define BIG_PAGE_SIZE_PASCAL (1 << 16) +#define MAX_NUM_PAGE_SIZES (8) + +static void fake_ce_memset_8(uvm_push_t *push, uvm_gpu_address_t dst, NvU64 value, size_t size) +{ + size_t i; + + UVM_ASSERT(dst.aperture == UVM_APERTURE_SYS); + + for (i = 0; i < size; i += 8) + *(NvU64 *)phys_to_virt(dst.address + i) = value; +} + +static void *cpu_addr_from_fake(uvm_gpu_address_t fake_gpu_addr) +{ + if (fake_gpu_addr.is_virtual) + return (void*)fake_gpu_addr.address; + + UVM_ASSERT(fake_gpu_addr.aperture == UVM_APERTURE_SYS); + + return phys_to_virt(fake_gpu_addr.address); +} + +static void fake_ce_memcopy(uvm_push_t *push, uvm_gpu_address_t dst, uvm_gpu_address_t src, size_t size) +{ + memcpy(cpu_addr_from_fake(dst), cpu_addr_from_fake(src), size); +} + +static void fake_wait_for_idle(uvm_push_t *push) +{ +} + +static void fake_noop(uvm_push_t *push, NvU32 size) +{ + push->next += size / 4; +} + +static void fake_membar(uvm_push_t *push) +{ +} + +#define FAKE_TLB_INVALS_COUNT_MAX UVM_TLB_BATCH_MAX_ENTRIES + +typedef struct +{ + NvU64 base; + NvU64 size; + NvU32 page_size; + NvU32 depth; + uvm_membar_t membar; +} fake_tlb_invalidate_t; + +static NvU32 g_fake_invals_count = 0; +static fake_tlb_invalidate_t *g_fake_invals = NULL; +static fake_tlb_invalidate_t *g_last_fake_inval; +static bool g_fake_tlb_invals_tracking_enabled = false; + +// Allocate the tracking for TLB invalidates +static NV_STATUS fake_tlb_invals_alloc(void) +{ + UVM_ASSERT(!g_fake_invals); + g_fake_invals = (fake_tlb_invalidate_t *)uvm_kvmalloc(sizeof(*g_fake_invals) * FAKE_TLB_INVALS_COUNT_MAX); + if (!g_fake_invals) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +// Free the tracking for TLB invalidates +static void fake_tlb_invals_free(void) +{ + uvm_kvfree(g_fake_invals); + g_fake_invals = NULL; +} + +static void fake_tlb_invals_reset(void) +{ + UVM_ASSERT(g_fake_tlb_invals_tracking_enabled); + + g_fake_invals_count = 0; +} + +static void fake_tlb_invals_enable(void) +{ + UVM_ASSERT(g_fake_invals); + + g_fake_tlb_invals_tracking_enabled = true; +} + +static void fake_tlb_invals_disable(void) +{ + UVM_ASSERT(g_fake_invals); + + fake_tlb_invals_reset(); + g_fake_tlb_invals_tracking_enabled = false; +} + +// Fake TLB invalidate VA that just saves off the parameters so that they can be verified later +static void fake_tlb_invalidate_va(uvm_push_t *push, uvm_gpu_phys_address_t pdb, + NvU32 depth, NvU64 base, NvU64 size, NvU32 page_size, uvm_membar_t membar) +{ + if (!g_fake_tlb_invals_tracking_enabled) + return; + + ++g_fake_invals_count; + + if (g_fake_invals_count == FAKE_TLB_INVALS_COUNT_MAX + 1) { + // Assert on the first overflow + UVM_ASSERT(0); + } + + if (g_fake_invals_count > FAKE_TLB_INVALS_COUNT_MAX) + return; + + g_last_fake_inval = &g_fake_invals[g_fake_invals_count - 1]; + + g_last_fake_inval->base = base; + g_last_fake_inval->size = size; + g_last_fake_inval->page_size = page_size; + g_last_fake_inval->depth = depth; + g_last_fake_inval->membar = membar; +} + +static void fake_tlb_invalidate_all(uvm_push_t *push, uvm_gpu_phys_address_t pdb, NvU32 depth, uvm_membar_t membar) +{ + fake_tlb_invalidate_va(push, pdb, depth, 0, -1, 0, membar); +} + +static bool assert_no_invalidate(void) +{ + UVM_ASSERT(g_fake_tlb_invals_tracking_enabled); + + if (g_fake_invals_count != 0) { + UVM_TEST_PRINT("Expected no invalidates, but got %u instead\n", g_fake_invals_count); + return false; + } + + return true; +} + +static bool assert_and_reset_last_invalidate(NvU32 expected_depth, bool expected_membar) +{ + bool result = true; + + UVM_ASSERT(g_fake_tlb_invals_tracking_enabled); + + if (g_fake_invals_count == 0) { + UVM_TEST_PRINT("Expected an invalidate, but got none\n"); + return false; + } + if (g_fake_invals_count > FAKE_TLB_INVALS_COUNT_MAX) { + UVM_TEST_PRINT("Too many invalidates %u\n", g_fake_invals_count); + return false; + } + + if (g_last_fake_inval->depth != expected_depth) { + UVM_TEST_PRINT("Expected depth %u, got %u instead\n", expected_depth, g_last_fake_inval->depth); + result = false; + } + if ((g_last_fake_inval->membar == UVM_MEMBAR_NONE) == expected_membar) { + UVM_TEST_PRINT("Expected %s membar, got %s instead\n", + expected_membar ? "a" : "no", + uvm_membar_string(g_last_fake_inval->membar)); + result = false; + } + + fake_tlb_invals_reset(); + + return result; +} + +static bool assert_last_invalidate_all(NvU32 expected_depth, bool expected_membar) +{ + UVM_ASSERT(g_fake_tlb_invals_tracking_enabled); + + if (g_fake_invals_count != 1) { + UVM_TEST_PRINT("Expected a single invalidate, but got %u instead\n", g_fake_invals_count); + return false; + } + if (g_last_fake_inval->base != 0 || g_last_fake_inval->size != -1) { + UVM_TEST_PRINT("Expected invalidate all but got range [0x%llx, 0x%llx) instead\n", + g_last_fake_inval->base, g_last_fake_inval->base + g_last_fake_inval->size); + return false; + } + if (g_last_fake_inval->depth != expected_depth) { + UVM_TEST_PRINT("Expected depth %u, got %u instead\n", expected_depth, g_last_fake_inval->depth); + return false; + } + + return true; +} + +static bool assert_invalidate_range_specific(fake_tlb_invalidate_t *inval, + NvU64 base, NvU64 size, NvU32 page_size, NvU32 expected_depth, bool expected_membar) +{ + UVM_ASSERT(g_fake_tlb_invals_tracking_enabled); + + if (g_fake_invals_count == 0) { + UVM_TEST_PRINT("Expected an invalidate for range [0x%llx, 0x%llx), but got none\n", + base, base + size); + return false; + } + + if ((inval->base != base || inval->size != size) && inval->base != 0 && inval->size != -1) { + UVM_TEST_PRINT("Expected invalidate range [0x%llx, 0x%llx), but got range [0x%llx, 0x%llx) instead\n", + base, base + size, + inval->base, inval->base + inval->size); + return false; + } + if (inval->depth != expected_depth) { + UVM_TEST_PRINT("Expected depth %u, got %u instead\n", expected_depth, inval->depth); + return false; + } + if (inval->page_size != page_size && inval->base != 0 && inval->size != -1) { + UVM_TEST_PRINT("Expected page size %u, got %u instead\n", page_size, inval->page_size); + return false; + } + + return true; +} + +static bool assert_invalidate_range(NvU64 base, NvU64 size, NvU32 page_size, bool allow_inval_all, NvU32 range_depth, NvU32 all_depth, bool expected_membar) +{ + NvU32 i; + + UVM_ASSERT(g_fake_tlb_invals_tracking_enabled); + + if (g_fake_invals_count == 0) { + UVM_TEST_PRINT("Expected an invalidate for range [0x%llx, 0x%llx), but got none\n", + base, base + size); + return false; + } + + for (i = 0; i < g_fake_invals_count; ++i) { + fake_tlb_invalidate_t *inval = &g_fake_invals[i]; + if (inval->base == base && inval->size == size) + return assert_invalidate_range_specific(inval, base, size, page_size, range_depth, expected_membar); + } + + if (g_fake_invals_count == 1 && allow_inval_all) + return assert_last_invalidate_all(all_depth, expected_membar); + + UVM_TEST_PRINT("Couldn't find an invalidate for range [0x%llx, 0x%llx) in:\n", base, base + size); + for (i = 0; i < g_fake_invals_count; ++i) { + fake_tlb_invalidate_t *inval = &g_fake_invals[i]; + UVM_TEST_PRINT(" range %d [0x%llx, 0x%llx)\n", i, inval->base, inval->base + inval->size); + } + + return false; +} + +static NV_STATUS test_page_tree_init(uvm_gpu_t *gpu, NvU32 big_page_size, uvm_page_tree_t *tree) +{ + return uvm_page_tree_init(gpu, NULL, UVM_PAGE_TREE_TYPE_USER, big_page_size, UVM_APERTURE_SYS, tree); +} + +static NV_STATUS test_page_tree_get_ptes(uvm_page_tree_t *tree, + NvU32 page_size, + NvU64 start, + NvLength size, + uvm_page_table_range_t *range) +{ + return uvm_page_tree_get_ptes(tree, + page_size, + uvm_parent_gpu_canonical_address(tree->gpu->parent, start), + size, + UVM_PMM_ALLOC_FLAGS_NONE, + range); +} + +static NV_STATUS test_page_tree_get_entry(uvm_page_tree_t *tree, + NvU32 page_size, + NvU64 start, + uvm_page_table_range_t *single) +{ + return uvm_page_tree_get_entry(tree, + page_size, + uvm_parent_gpu_canonical_address(tree->gpu->parent, start), + UVM_PMM_ALLOC_FLAGS_NONE, + single); +} + +static NV_STATUS test_page_tree_alloc_table(uvm_page_tree_t *tree, + NvU32 page_size, + uvm_page_table_range_t *single, + uvm_page_table_range_t *children) +{ + return uvm_page_tree_alloc_table(tree, page_size, UVM_PMM_ALLOC_FLAGS_NONE, single, children); +} + +static bool assert_entry_no_invalidate(uvm_page_tree_t *tree, NvU32 page_size, NvU64 start) +{ + uvm_page_table_range_t entry; + bool result = true; + + if (test_page_tree_get_entry(tree, page_size, start, &entry) != NV_OK) + return false; + + if (!assert_no_invalidate()) + result = false; + + uvm_page_tree_put_ptes(tree, &entry); + + return assert_no_invalidate() && result; +} + +static bool assert_entry_invalidate(uvm_page_tree_t *tree, NvU32 page_size, NvU64 start, NvU32 depth, bool membar) +{ + uvm_page_table_range_t entry; + bool result = true; + + if (test_page_tree_get_entry(tree, page_size, start, &entry) != NV_OK) + return false; + + if (!assert_and_reset_last_invalidate(depth, false)) + result = false; + + uvm_page_tree_put_ptes(tree, &entry); + + return assert_and_reset_last_invalidate(depth, membar) && result; +} + +static NV_STATUS allocate_root(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS alloc_64k_memory(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + + NvLength size = 64 * 1024; + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, 0, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 1); + TEST_CHECK_RET(range.table->depth == 4); + TEST_CHECK_RET(range.start_index == 0); + TEST_CHECK_RET(range.page_size == UVM_PAGE_SIZE_64K); + TEST_CHECK_RET(tree.root->ref_count == 1); + TEST_CHECK_RET(tree.root->entries[0]->ref_count == 1); + TEST_CHECK_RET(tree.root->entries[0]->entries[0]->ref_count == 1); + TEST_CHECK_RET(tree.root->entries[0]->entries[0]->entries[0]->ref_count == 1); + TEST_CHECK_RET(tree.root->entries[0]->entries[0]->entries[0]->entries[0]->ref_count == 1); + TEST_CHECK_RET(range.table == tree.root->entries[0]->entries[0]->entries[0]->entries[0]); + uvm_page_tree_put_ptes(&tree, &range); + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +static NV_STATUS alloc_adjacent_64k_memory(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range1; + uvm_page_table_range_t range2; + + NvLength size = 64 * 1024; + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, size, size, &range1), NV_OK); + TEST_CHECK_RET(range1.entry_count == 1); + + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, 0, size, &range2), NV_OK); + TEST_CHECK_RET(range2.entry_count == 1); + TEST_CHECK_RET(range1.table == range2.table); + TEST_CHECK_RET(range1.table == tree.root->entries[0]->entries[0]->entries[0]->entries[0]); + TEST_CHECK_RET(range1.start_index == 1); + TEST_CHECK_RET(range2.start_index == 0); + + uvm_page_tree_put_ptes(&tree, &range1); + uvm_page_tree_put_ptes(&tree, &range2); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS alloc_adjacent_pde_64k_memory(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + uvm_page_table_range_t next_range; + NvLength size = 64 * 1024; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, 0, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 1); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, 2 * 1024 * 1024, size, &next_range), NV_OK); + TEST_CHECK_RET(range.table == tree.root->entries[0]->entries[0]->entries[0]->entries[0]); + TEST_CHECK_RET(next_range.table == tree.root->entries[0]->entries[0]->entries[0]->entries[2]); + uvm_page_tree_put_ptes(&tree, &range); + uvm_page_tree_put_ptes(&tree, &next_range); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + + +static NV_STATUS alloc_nearby_pde_64k_memory(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + uvm_page_table_range_t next_range; + NvLength size = 64 * 1024; + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, 6 * 1024 * 1024, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 1); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, 2 * 1024 * 1024, size, &next_range), NV_OK); + TEST_CHECK_RET(range.table == tree.root->entries[0]->entries[0]->entries[0]->entries[6]); + TEST_CHECK_RET(next_range.table == tree.root->entries[0]->entries[0]->entries[0]->entries[2]); + uvm_page_tree_put_ptes(&tree, &range); + uvm_page_tree_put_ptes(&tree, &next_range); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS allocate_then_free_all_16_64k(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range[16]; + + NvLength size = 64 * 1024; + NvLength stride = 32 * size; + NvLength start = stride * 256; + int i; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + + for (i = 0; i < 16; i++) + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, start + i * stride, size, range + i), NV_OK); + + TEST_CHECK_RET(tree.root->entries[0]->entries[0]->entries[1]->ref_count == 16); + + for (i = 0; i < 16; i++) + uvm_page_tree_put_ptes(&tree, range + i); + + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS allocate_then_free_8_8_64k(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range[16]; + + NvLength size = 64 * 1024; + NvLength stride = 32 * size; + NvLength start = stride * 248 + 256LL * 1024 * 1024 * 1024 + (1LL << 47); + int i; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + + for (i = 0; i < 16; i++) + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, start + i * stride, size, range + i), NV_OK); + + TEST_CHECK_RET(tree.root->entries[1]->entries[1]->entries[0]->ref_count == 8); + TEST_CHECK_RET(tree.root->entries[1]->entries[1]->entries[1]->ref_count == 8); + + for (i = 0; i < 16; i++) + uvm_page_tree_put_ptes(&tree, range + i); + + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS get_single_page_2m(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + + // use a start address not at the beginning of a PDE3 entry's range + NvU64 start = 34983UL * (1 << 21); + NvLength size = 1 << 21; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_2M, start, size, &range), NV_OK); + + TEST_CHECK_RET(range.entry_count == 1); + TEST_CHECK_RET(range.table->depth == 3); + TEST_CHECK_RET(range.page_size == UVM_PAGE_SIZE_2M); + + uvm_page_tree_put_ptes(&tree, &range); + TEST_CHECK_RET(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS alloc_512m_memory(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + + NvLength size = 512UL * 1024 * 1024; + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_512M, 0, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 1); + TEST_CHECK_RET(range.table->depth == 2); + TEST_CHECK_RET(range.start_index == 0); + TEST_CHECK_RET(range.page_size == UVM_PAGE_SIZE_512M); + TEST_CHECK_RET(tree.root->ref_count == 1); + TEST_CHECK_RET(tree.root->entries[0]->ref_count == 1); + TEST_CHECK_RET(tree.root->entries[0]->entries[0]->ref_count == 1); + TEST_CHECK_RET(range.table == tree.root->entries[0]->entries[0]); + uvm_page_tree_put_ptes(&tree, &range); + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS alloc_adjacent_512m_memory(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range1; + uvm_page_table_range_t range2; + + NvLength size = 512UL * 1024 * 1024; + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_512M, size, size, &range1), NV_OK); + TEST_CHECK_RET(range1.entry_count == 1); + + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_512M, 0, size, &range2), NV_OK); + TEST_CHECK_RET(range2.entry_count == 1); + TEST_CHECK_RET(range1.table == range2.table); + TEST_CHECK_RET(range1.table == tree.root->entries[0]->entries[0]); + TEST_CHECK_RET(range1.start_index == 1); + TEST_CHECK_RET(range2.start_index == 0); + + uvm_page_tree_put_ptes(&tree, &range1); + uvm_page_tree_put_ptes(&tree, &range2); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS get_single_page_512m(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + + // use a start address not at the beginning of a PDE2 entry's range + NvU64 start = 3UL * 512 * 1024 * 1024; + NvLength size = 512UL * 1024 * 1024; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_512M, start, size, &range), NV_OK); + + TEST_CHECK_RET(range.entry_count == 1); + TEST_CHECK_RET(range.table->depth == 2); + TEST_CHECK_RET(range.page_size == UVM_PAGE_SIZE_512M); + + uvm_page_tree_put_ptes(&tree, &range); + TEST_CHECK_RET(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS get_entire_table_4k(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + + NvU64 start = 1UL << 47; + + NvLength size = 1 << 21; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_4K, start, size, &range), NV_OK); + + TEST_CHECK_RET(range.table == tree.root->entries[1]->entries[0]->entries[0]->entries[1]); + TEST_CHECK_RET(range.entry_count == 512); + TEST_CHECK_RET(range.table->depth == 4); + TEST_CHECK_RET(range.page_size == UVM_PAGE_SIZE_4K); + TEST_CHECK_RET(tree.root->ref_count == 1); + + uvm_page_tree_put_ptes(&tree, &range); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS get_entire_table_512m(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + + NvU64 start = 1UL << 47; + NvLength size = 512UL * 512 * 1024 * 1024; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_512M, start, size, &range), NV_OK); + + TEST_CHECK_RET(range.table == tree.root->entries[1]->entries[0]); + TEST_CHECK_RET(range.entry_count == 512); + TEST_CHECK_RET(range.table->depth == 2); + TEST_CHECK_RET(range.page_size == UVM_PAGE_SIZE_512M); + TEST_CHECK_RET(tree.root->ref_count == 1); + + uvm_page_tree_put_ptes(&tree, &range); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS split_4k_from_2m(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range_2m; + uvm_page_table_range_t range_adj; + uvm_page_table_range_t range_4k; + uvm_page_table_range_t range_64k; + + NvU64 start = 1UL << 48; + NvLength size = 1 << 21; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_2M, start, size, &range_2m), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_2M, start + size, size, &range_adj), NV_OK); + + TEST_CHECK_RET(range_2m.entry_count == 1); + TEST_CHECK_RET(range_2m.table->depth == 3); + TEST_CHECK_RET(range_adj.entry_count == 1); + TEST_CHECK_RET(range_adj.table->depth == 3); + + // Need to release the 2 MB page so that the reference count is right. + uvm_page_tree_put_ptes(&tree, &range_2m); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_4K, start, 64 * 1024, &range_4k), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, + UVM_PAGE_SIZE_64K, + start + 64 * 1024, + size - 64 * 1024, + &range_64k), + NV_OK); + + TEST_CHECK_RET(range_4k.entry_count == 16); + TEST_CHECK_RET(range_4k.table->depth == 4); + TEST_CHECK_RET(range_4k.table == tree.root->entries[2]->entries[0]->entries[0]->entries[1]); + TEST_CHECK_RET(range_4k.start_index == 0); + + TEST_CHECK_RET(range_64k.entry_count == 31); + TEST_CHECK_RET(range_64k.table == tree.root->entries[2]->entries[0]->entries[0]->entries[0]); + TEST_CHECK_RET(range_64k.start_index == 1); + + // Free everything + uvm_page_tree_put_ptes(&tree, &range_adj); + uvm_page_tree_put_ptes(&tree, &range_4k); + uvm_page_tree_put_ptes(&tree, &range_64k); + + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS split_2m_from_512m(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range_512m; + uvm_page_table_range_t range_adj; + uvm_page_table_range_t range_2m; + + NvU64 start = 1UL << 48; + NvLength size = 512UL * 1024 * 1024; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_512M, start, size, &range_512m), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_512M, start + size, size, &range_adj), NV_OK); + + TEST_CHECK_RET(range_512m.entry_count == 1); + TEST_CHECK_RET(range_512m.table->depth == 2); + TEST_CHECK_RET(range_adj.entry_count == 1); + TEST_CHECK_RET(range_adj.table->depth == 2); + + // Need to release the 512M page so that the reference count is right. + uvm_page_tree_put_ptes(&tree, &range_512m); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_2M, start, size, &range_2m), NV_OK); + + TEST_CHECK_RET(range_2m.entry_count == 256); + TEST_CHECK_RET(range_2m.table->depth == 3); + TEST_CHECK_RET(range_2m.table == tree.root->entries[2]->entries[0]->entries[0]); + TEST_CHECK_RET(range_2m.start_index == 0); + + // Free everything + uvm_page_tree_put_ptes(&tree, &range_adj); + uvm_page_tree_put_ptes(&tree, &range_2m); + + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS get_512mb_range(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + + NvU64 start = 512 * (1 << 20); + NvU64 size = start; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_2M, start, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 256); + TEST_CHECK_RET(range.table->depth == 3); + TEST_CHECK_RET(range.start_index == 0); + uvm_page_tree_put_ptes(&tree, &range); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS get_2gb_range(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + + NvU64 start = 2UL * (1 << 30); + NvU64 size = start; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_512M, start, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 4); + TEST_CHECK_RET(range.table->depth == 2); + TEST_CHECK_RET(range.start_index == 4); + uvm_page_tree_put_ptes(&tree, &range); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS get_two_free_apart(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range1; + uvm_page_table_range_t range2; + + NvLength size = 1024 * 1024; + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_4K, size, size, &range1), NV_OK); + TEST_CHECK_RET(range1.entry_count == 256); + TEST_CHECK_RET(range1.table->ref_count == 256); + + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_4K, 0, size, &range2), NV_OK); + TEST_CHECK_RET(range2.entry_count == 256); + TEST_CHECK_RET(range2.table->ref_count == 512); + TEST_CHECK_RET(range1.table == range2.table); + // 4k page is second entry in a dual PDE + TEST_CHECK_RET(range1.table == tree.root->entries[0]->entries[0]->entries[0]->entries[1]); + TEST_CHECK_RET(range1.start_index == 256); + TEST_CHECK_RET(range2.start_index == 0); + + uvm_page_tree_put_ptes(&tree, &range1); + TEST_CHECK_RET(range2.table->ref_count == 256); + TEST_CHECK_RET(range2.table == tree.root->entries[0]->entries[0]->entries[0]->entries[1]); + uvm_page_tree_put_ptes(&tree, &range2); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS get_overlapping_dual_pdes(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range4k; + uvm_page_table_range_t range64k; + + NvLength size = 1024 * 1024; + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_4K, size, size, &range4k), NV_OK); + TEST_CHECK_RET(range4k.entry_count == 256); + TEST_CHECK_RET(range4k.table->ref_count == 256); + + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, size, size, &range64k), NV_OK); + TEST_CHECK_RET(range64k.entry_count == 16); + TEST_CHECK_RET(range64k.table->ref_count == 16); + // 4k page is second entry in a dual PDE + TEST_CHECK_RET(range64k.table == tree.root->entries[0]->entries[0]->entries[0]->entries[0]); + TEST_CHECK_RET(range64k.start_index == 16); + TEST_CHECK_RET(range4k.start_index == 256); + + uvm_page_tree_put_ptes(&tree, &range64k); + TEST_CHECK_RET(range4k.table->ref_count == 256); + TEST_CHECK_RET(range4k.table == tree.root->entries[0]->entries[0]->entries[0]->entries[1]); + uvm_page_tree_put_ptes(&tree, &range4k); + + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS split_and_free(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + + // 45 = 1 + 2 + 3 + ... + 9 + NvU64 size = 45 * (2 << 20); + NvU32 i; + NvU32 sum = 0; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_2M, 0, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 45); + TEST_CHECK_RET(range.table->depth == 3); + TEST_CHECK_RET(range.start_index == 0); + + for (i = 1; i <= 9; i++) { + range.entry_count = i; + range.start_index = sum; + uvm_page_tree_put_ptes(&tree, &range); + sum += i; + } + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS check_sizes(uvm_gpu_t *gpu) +{ + NvU32 user_sizes = UVM_PAGE_SIZE_2M; + NvU32 kernel_sizes = UVM_PAGE_SIZE_4K | 256; + + if (UVM_PAGE_SIZE_64K >= PAGE_SIZE) + user_sizes |= UVM_PAGE_SIZE_64K; + if (UVM_PAGE_SIZE_4K >= PAGE_SIZE) + user_sizes |= UVM_PAGE_SIZE_4K; + + TEST_CHECK_RET(gpu->parent->mmu_user_chunk_sizes == user_sizes); + TEST_CHECK_RET(gpu->parent->mmu_kernel_chunk_sizes == kernel_sizes); + + return NV_OK; +} + +static NV_STATUS fast_split_normal(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t parent; + uvm_page_table_range_t child_4k; + uvm_page_table_range_t child_64k; + + NvU64 start = 0; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_entry(&tree, UVM_PAGE_SIZE_2M, start, &parent), NV_OK); + TEST_CHECK_RET(parent.entry_count == 1); + TEST_CHECK_RET(parent.table->depth == 3); + TEST_CHECK_RET(parent.page_size == UVM_PAGE_SIZE_2M); + + MEM_NV_CHECK_RET(test_page_tree_alloc_table(&tree, UVM_PAGE_SIZE_4K, &parent, &child_4k), NV_OK); + TEST_CHECK_RET(child_4k.table->host_parent == parent.table); + TEST_CHECK_RET(child_4k.entry_count == 512); + TEST_CHECK_RET(child_4k.page_size == UVM_PAGE_SIZE_4K); + TEST_CHECK_RET(parent.table->ref_count == 2); + TEST_CHECK_RET(parent.table->entries[1] == child_4k.table); + + MEM_NV_CHECK_RET(test_page_tree_alloc_table(&tree, UVM_PAGE_SIZE_64K, &parent, &child_64k), NV_OK); + TEST_CHECK_RET(child_64k.table->host_parent == parent.table); + TEST_CHECK_RET(child_64k.entry_count == 32); + TEST_CHECK_RET(child_64k.page_size == UVM_PAGE_SIZE_64K); + TEST_CHECK_RET(parent.table->ref_count == 3); + TEST_CHECK_RET(parent.table->entries[0] == child_64k.table); + + uvm_page_tree_put_ptes(&tree, &parent); + TEST_CHECK_RET(parent.table->ref_count == 2); + uvm_page_tree_put_ptes(&tree, &child_4k); + TEST_CHECK_RET(parent.table->entries[1] == NULL); + uvm_page_tree_put_ptes(&tree, &child_64k); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS fast_split_double_backoff(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t parent; + uvm_page_table_range_t child_4k; + uvm_page_table_range_t child_64k; + uvm_page_table_range_t child_64k2; + + NvU64 start = 0; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_entry(&tree, UVM_PAGE_SIZE_2M, start, &parent), NV_OK); + TEST_CHECK_RET(parent.entry_count == 1); + TEST_CHECK_RET(parent.table->depth == 3); + TEST_CHECK_RET(parent.page_size == UVM_PAGE_SIZE_2M); + + MEM_NV_CHECK_RET(test_page_tree_alloc_table(&tree, UVM_PAGE_SIZE_4K, &parent, &child_4k), NV_OK); + TEST_CHECK_RET(child_4k.table->host_parent == parent.table); + TEST_CHECK_RET(child_4k.entry_count == 512); + TEST_CHECK_RET(child_4k.page_size == UVM_PAGE_SIZE_4K); + TEST_CHECK_RET(parent.table->ref_count == 2); + TEST_CHECK_RET(parent.table->entries[1] == child_4k.table); + + MEM_NV_CHECK_RET(test_page_tree_alloc_table(&tree, UVM_PAGE_SIZE_64K, &parent, &child_64k), NV_OK); + TEST_CHECK_RET(child_64k.table->host_parent == parent.table); + TEST_CHECK_RET(child_64k.entry_count == 32); + TEST_CHECK_RET(child_64k.page_size == UVM_PAGE_SIZE_64K); + TEST_CHECK_RET(parent.table->ref_count == 3); + TEST_CHECK_RET(parent.table->entries[0] == child_64k.table); + + MEM_NV_CHECK_RET(test_page_tree_alloc_table(&tree, UVM_PAGE_SIZE_64K, &parent, &child_64k2), NV_OK); + TEST_CHECK_RET(child_64k2.table->host_parent == parent.table); + TEST_CHECK_RET(child_64k2.entry_count == 32); + TEST_CHECK_RET(child_64k2.table->ref_count == 64); + TEST_CHECK_RET(child_64k2.page_size == UVM_PAGE_SIZE_64K); + TEST_CHECK_RET(child_64k2.table == child_64k.table); + TEST_CHECK_RET(parent.table->ref_count == 3); + TEST_CHECK_RET(parent.table->entries[0] == child_64k2.table); + + uvm_page_tree_put_ptes(&tree, &child_64k2); + + uvm_page_tree_put_ptes(&tree, &parent); + TEST_CHECK_RET(parent.table->ref_count == 2); + uvm_page_tree_put_ptes(&tree, &child_4k); + TEST_CHECK_RET(parent.table->entries[1] == NULL); + uvm_page_tree_put_ptes(&tree, &child_64k); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS test_tlb_invalidates(uvm_gpu_t *gpu) +{ + NV_STATUS status = NV_OK; + uvm_page_tree_t tree; + uvm_page_table_range_t entries[5]; + int i; + + // Depth 4 + NvU64 extent_pte = UVM_PAGE_SIZE_2M; + // Depth 3 + NvU64 extent_pde0 = extent_pte * (1ull << 8); + // Depth 2 + NvU64 extent_pde1 = extent_pde0 * (1ull << 9); + // Depth 1 + NvU64 extent_pde2 = extent_pde1 * (1ull << 9); + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + + fake_tlb_invals_enable(); + + TEST_CHECK_RET(assert_entry_invalidate(&tree, UVM_PAGE_SIZE_4K, 0, 0, true)); + TEST_CHECK_RET(assert_entry_invalidate(&tree, UVM_PAGE_SIZE_4K, 0, 0, true)); + + TEST_CHECK_RET(test_page_tree_get_entry(&tree, UVM_PAGE_SIZE_4K, 0, &entries[0]) == NV_OK); + TEST_CHECK_RET(assert_and_reset_last_invalidate(0, false)); + + TEST_CHECK_RET(assert_entry_no_invalidate(&tree, UVM_PAGE_SIZE_4K, extent_pte - UVM_PAGE_SIZE_4K)); + + TEST_CHECK_RET(assert_entry_invalidate(&tree, UVM_PAGE_SIZE_64K, 0, 3, true)); + + TEST_CHECK_RET(test_page_tree_get_entry(&tree, UVM_PAGE_SIZE_64K, 0, &entries[1]) == NV_OK); + TEST_CHECK_RET(assert_and_reset_last_invalidate(3, false)); + + TEST_CHECK_RET(test_page_tree_get_entry(&tree, UVM_PAGE_SIZE_4K, extent_pde0, &entries[2]) == NV_OK); + TEST_CHECK_RET(assert_and_reset_last_invalidate(2, false)); + + TEST_CHECK_RET(test_page_tree_get_entry(&tree, UVM_PAGE_SIZE_4K, extent_pde1, &entries[3]) == NV_OK); + TEST_CHECK_RET(assert_and_reset_last_invalidate(1, false)); + + TEST_CHECK_RET(test_page_tree_get_entry(&tree, UVM_PAGE_SIZE_4K, extent_pde2, &entries[4]) == NV_OK); + TEST_CHECK_RET(assert_and_reset_last_invalidate(0, false)); + + for (i = 4; i > 1; --i) { + uvm_page_tree_put_ptes(&tree, &entries[i]); + TEST_CHECK_RET(assert_and_reset_last_invalidate(4 - i, true)); + } + + uvm_page_tree_put_ptes(&tree, &entries[0]); + TEST_CHECK_RET(assert_and_reset_last_invalidate(3, true)); + + uvm_page_tree_put_ptes(&tree, &entries[1]); + TEST_CHECK_RET(assert_and_reset_last_invalidate(0, true)); + + fake_tlb_invals_disable(); + + uvm_page_tree_deinit(&tree); + + return status; +} + +static NV_STATUS test_tlb_batch_invalidates_case(uvm_page_tree_t *tree, NvU64 base, NvU64 size, NvU32 min_page_size, NvU32 max_page_size) +{ + NV_STATUS status = NV_OK; + uvm_push_t push; + uvm_tlb_batch_t batch; + uvm_gpu_t *gpu = tree->gpu; + int i, j; + + MEM_NV_CHECK_RET(uvm_push_begin_fake(gpu, &push), NV_OK); + + for (i = 1; i < 10; ++i) { + // If invalidate all ends up being used, the expected depth is the + // minimum depth across all the ranges. Start off with the min page size + // as that's the deepest. + NvU32 expected_inval_all_depth = tree->hal->page_table_depth(min_page_size); + NvU64 total_pages = 0; + + fake_tlb_invals_enable(); + + uvm_tlb_batch_begin(tree, &batch); + + for (j = 0; j < i; ++j) { + NvU32 used_max_page_size = (j & 1) ? max_page_size : min_page_size; + NvU32 expected_range_depth = tree->hal->page_table_depth(used_max_page_size); + expected_inval_all_depth = min(expected_inval_all_depth, expected_range_depth); + uvm_tlb_batch_invalidate(&batch, + base + (NvU64)j * 2 * size, + size, + min_page_size | used_max_page_size, + UVM_MEMBAR_NONE); + total_pages += size / min_page_size; + } + + uvm_tlb_batch_end(&batch, &push, UVM_MEMBAR_NONE); + + for (j = 0; j < i; ++j) { + NvU32 used_max_page_size = (j & 1) ? max_page_size : min_page_size; + NvU32 expected_range_depth = tree->hal->page_table_depth(used_max_page_size); + bool allow_inval_all = (total_pages > gpu->parent->tlb_batch.max_pages) || + !gpu->parent->tlb_batch.va_invalidate_supported || + (i > UVM_TLB_BATCH_MAX_ENTRIES); + TEST_CHECK_RET(assert_invalidate_range(base + (NvU64)j * 2 * size, + size, + min_page_size, + allow_inval_all, + expected_range_depth, + expected_inval_all_depth, + false)); + } + + fake_tlb_invals_disable(); + } + + uvm_push_end_fake(&push); + + return status; +} + +static NV_STATUS test_tlb_batch_invalidates(uvm_gpu_t *gpu, const NvU32 *page_sizes, const NvU32 page_sizes_count) +{ + NV_STATUS status = NV_OK; + uvm_page_tree_t tree; + + NvU32 min_index; + NvU32 max_index; + NvU32 size_index; + + static const NvU32 sizes_in_max_pages[] = { 1, 2, 3, 5, 7, 32 }; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, BIG_PAGE_SIZE_PASCAL, &tree), NV_OK); + + for (min_index = 0; min_index < page_sizes_count; ++min_index) { + for (max_index = min_index; max_index < page_sizes_count; ++max_index) { + for (size_index = 0; size_index < ARRAY_SIZE(sizes_in_max_pages); ++size_index) { + NvU32 min_page_size = page_sizes[min_index]; + NvU32 max_page_size = page_sizes[max_index]; + NvU64 size = (NvU64)sizes_in_max_pages[size_index] * max_page_size; + + TEST_CHECK_GOTO(test_tlb_batch_invalidates_case(&tree, + (NvU64)min_index * max_page_size, + size, + min_page_size, + max_page_size) == NV_OK, done); + } + } + } + +done: + uvm_page_tree_deinit(&tree); + + return status; +} + +typedef struct +{ + NvU64 count; + NV_STATUS status; +} test_pte_maker_data_t; + +static NvU64 test_range_vec_pte_maker(uvm_page_table_range_vec_t *range_vec, NvU64 offset, void *void_data) +{ + test_pte_maker_data_t *data = (test_pte_maker_data_t *)void_data; + if (range_vec->page_size * data->count != offset) { + data->status = NV_ERR_INVALID_STATE; + } + ++data->count; + return range_vec->size + offset; +} + +static bool assert_range_vec_ptes(uvm_page_table_range_vec_t *range_vec, bool expecting_cleared) +{ + NvU32 i; + NvU32 entry; + NvU64 offset = 0; + + for (i = 0; i < range_vec->range_count; ++i) { + uvm_page_table_range_t *range = &range_vec->ranges[i]; + + for (entry = 0; entry < range->entry_count; ++entry) { + uvm_gpu_phys_address_t pte_addr = uvm_page_table_range_entry_address(range_vec->tree, range, entry); + NvU64 *pte = (NvU64*)phys_to_virt(pte_addr.address); + NvU64 expected_pte = expecting_cleared ? 0 : range_vec->size + offset; + if (*pte != expected_pte) { + UVM_TEST_PRINT("PTE is 0x%llx instead of 0x%llx for offset 0x%llx within range [0x%llx, 0x%llx)\n", + *pte, expected_pte, offset, range_vec->start, range_vec->size); + return false; + } + offset += range_vec->page_size; + } + } + + return true; +} + +static NV_STATUS test_range_vec_write_ptes(uvm_page_table_range_vec_t *range_vec, uvm_membar_t membar) +{ + test_pte_maker_data_t data = { 0 }; + NvU32 page_table_depth = range_vec->tree->hal->page_table_depth(range_vec->page_size); + + fake_tlb_invals_enable(); + + TEST_CHECK_RET(uvm_page_table_range_vec_write_ptes(range_vec, membar, test_range_vec_pte_maker, &data) == NV_OK); + TEST_CHECK_RET(data.status == NV_OK); + TEST_CHECK_RET(data.count == range_vec->size / range_vec->page_size); + TEST_CHECK_RET(assert_invalidate_range_specific(g_last_fake_inval, + range_vec->start, range_vec->size, range_vec->page_size, page_table_depth, membar != UVM_MEMBAR_NONE)); + TEST_CHECK_RET(assert_range_vec_ptes(range_vec, false)); + + fake_tlb_invals_disable(); + + return NV_OK; +} + +static NV_STATUS test_range_vec_clear_ptes(uvm_page_table_range_vec_t *range_vec, uvm_membar_t membar) +{ + NvU32 page_table_depth = range_vec->tree->hal->page_table_depth(range_vec->page_size); + + fake_tlb_invals_enable(); + + TEST_CHECK_RET(uvm_page_table_range_vec_clear_ptes(range_vec, membar) == NV_OK); + TEST_CHECK_RET(assert_and_reset_last_invalidate(page_table_depth, membar != UVM_MEMBAR_NONE)); + TEST_CHECK_RET(assert_range_vec_ptes(range_vec, true)); + + fake_tlb_invals_disable(); + + return NV_OK; +} + +static NV_STATUS test_range_vec_create(uvm_page_tree_t *tree, NvU64 start, NvU64 size, NvU32 page_size, uvm_page_table_range_vec_t **range_vec_out) +{ + uvm_page_table_range_vec_t *range_vec; + uvm_pmm_alloc_flags_t pmm_flags = UVM_PMM_ALLOC_FLAGS_EVICT; + + TEST_CHECK_RET(uvm_page_table_range_vec_create(tree, start, size, page_size, pmm_flags, &range_vec) == NV_OK); + TEST_CHECK_RET(test_range_vec_write_ptes(range_vec, UVM_MEMBAR_NONE) == NV_OK); + TEST_CHECK_RET(test_range_vec_clear_ptes(range_vec, UVM_MEMBAR_GPU) == NV_OK); + TEST_CHECK_RET(test_range_vec_write_ptes(range_vec, UVM_MEMBAR_NONE) == NV_OK); + TEST_CHECK_RET(test_range_vec_write_ptes(range_vec, UVM_MEMBAR_SYS) == NV_OK); + TEST_CHECK_RET(test_range_vec_clear_ptes(range_vec, UVM_MEMBAR_SYS) == NV_OK); + + *range_vec_out = range_vec; + + return NV_OK; +} + +// Test page table range vector APIs. +// Notably the test leaks the page_tree and range_vec on error as it's hard to +// clean up on failure and the destructors would likely assert. +static NV_STATUS test_range_vec(uvm_gpu_t *gpu, NvU32 big_page_size, NvU32 page_size) +{ + NV_STATUS status = NV_OK; + uvm_page_tree_t tree; + uvm_page_table_range_vec_t *range_vec; + uvm_page_table_range_vec_t upper_range_vec; + NvU64 pde_coverage; + NvU64 page_table_entries; + NvU64 start; + NvU64 size; + NvU32 i; + NvU64 offsets[4]; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, big_page_size, &tree), NV_OK); + + pde_coverage = uvm_mmu_pde_coverage(&tree, page_size); + page_table_entries = pde_coverage / page_size; + + // Interesting page offsets + offsets[0] = 0; + offsets[1] = 1; + offsets[2] = page_table_entries / 2; + offsets[3] = page_table_entries - 1; + + // A single page + size = page_size; + for (i = 0; i < ARRAY_SIZE(offsets); ++i) { + NvU64 offset = offsets[i]; + start = offset * page_size; + TEST_CHECK_RET(test_range_vec_create(&tree, start, size, page_size, &range_vec) == NV_OK); + TEST_CHECK_RET(range_vec->range_count == 1); + TEST_CHECK_RET(range_vec->ranges[0].start_index == offset); + TEST_CHECK_RET(range_vec->ranges[0].entry_count == 1); + uvm_page_table_range_vec_destroy(range_vec); + } + + // A full page table extent offset by a non-zero multiple of page_size + size = pde_coverage; + for (i = 1; i < ARRAY_SIZE(offsets); ++i) { + NvU64 offset = offsets[i]; + start = pde_coverage + offset * page_size; + TEST_CHECK_RET(test_range_vec_create(&tree, start, size, page_size, &range_vec) == NV_OK); + TEST_CHECK_RET(range_vec->range_count == 2); + TEST_CHECK_RET(range_vec->ranges[0].start_index == offset); + TEST_CHECK_RET(range_vec->ranges[0].entry_count == page_table_entries - offset); + TEST_CHECK_RET(range_vec->ranges[1].start_index == 0); + TEST_CHECK_RET(range_vec->ranges[1].entry_count == offset); + uvm_page_table_range_vec_destroy(range_vec); + } + + // One page on each side of the page table extent boundary + start = pde_coverage - page_size; + size = 2 * page_size; + TEST_CHECK_RET(test_range_vec_create(&tree, start, size, page_size, &range_vec) == NV_OK); + TEST_CHECK_RET(range_vec->range_count == 2); + TEST_CHECK_RET(range_vec->ranges[0].entry_count == 1); + TEST_CHECK_RET(range_vec->ranges[1].entry_count == 1); + uvm_page_table_range_vec_destroy(range_vec); + + // Two pages on each side of the page table extent boundary and a full page + // table extent in between + start = pde_coverage - 2 * page_size; + size = pde_coverage + 4 * page_size; + TEST_CHECK_RET(test_range_vec_create(&tree, start, size, page_size, &range_vec) == NV_OK); + TEST_CHECK_RET(range_vec->range_count == 3); + TEST_CHECK_RET(range_vec->ranges[0].entry_count == 2); + TEST_CHECK_RET(range_vec->ranges[1].start_index == 0); + TEST_CHECK_RET(range_vec->ranges[1].entry_count == page_table_entries); + TEST_CHECK_RET(range_vec->ranges[2].entry_count == 2); + uvm_page_table_range_vec_destroy(range_vec); + + // Test splitting of a single page table extent in half + start = 0; + size = pde_coverage; + TEST_CHECK_RET(test_range_vec_create(&tree, start, size, page_size, &range_vec) == NV_OK); + TEST_CHECK_RET(uvm_page_table_range_vec_split_upper(range_vec, (pde_coverage / 2) - 1, &upper_range_vec) == NV_OK); + TEST_CHECK_RET(range_vec->range_count == 1); + TEST_CHECK_RET(range_vec->start == 0); + TEST_CHECK_RET(range_vec->size == pde_coverage / 2); + TEST_CHECK_RET(range_vec->ranges[0].entry_count == page_table_entries / 2); + TEST_CHECK_RET(upper_range_vec.range_count == 1); + TEST_CHECK_RET(upper_range_vec.start == pde_coverage / 2); + TEST_CHECK_RET(upper_range_vec.size == pde_coverage / 2); + TEST_CHECK_RET(upper_range_vec.ranges[0].entry_count == page_table_entries / 2); + uvm_page_table_range_vec_destroy(range_vec); + uvm_page_table_range_vec_deinit(&upper_range_vec); + + // Test splitting of two page table extents into two vectors + size = pde_coverage * 2; + TEST_CHECK_RET(test_range_vec_create(&tree, start, size, page_size, &range_vec) == NV_OK); + TEST_CHECK_RET(uvm_page_table_range_vec_split_upper(range_vec, pde_coverage - 1, &upper_range_vec) == NV_OK); + TEST_CHECK_RET(range_vec->range_count == 1); + TEST_CHECK_RET(range_vec->start == 0); + TEST_CHECK_RET(range_vec->size == pde_coverage); + TEST_CHECK_RET(range_vec->ranges[0].entry_count == page_table_entries); + TEST_CHECK_RET(upper_range_vec.range_count == 1); + TEST_CHECK_RET(upper_range_vec.start == pde_coverage); + TEST_CHECK_RET(upper_range_vec.size == pde_coverage); + TEST_CHECK_RET(upper_range_vec.ranges[0].entry_count == page_table_entries); + uvm_page_table_range_vec_destroy(range_vec); + uvm_page_table_range_vec_deinit(&upper_range_vec); + + // Test uneven split + TEST_CHECK_RET(test_range_vec_create(&tree, start, size, page_size, &range_vec) == NV_OK); + TEST_CHECK_RET(uvm_page_table_range_vec_split_upper(range_vec, + pde_coverage + page_size - 1, + &upper_range_vec) == NV_OK); + TEST_CHECK_RET(range_vec->range_count == 2); + TEST_CHECK_RET(range_vec->start == 0); + TEST_CHECK_RET(range_vec->size == pde_coverage + page_size); + TEST_CHECK_RET(range_vec->ranges[0].entry_count == page_table_entries); + TEST_CHECK_RET(range_vec->ranges[1].entry_count == 1); + TEST_CHECK_RET(upper_range_vec.range_count == 1); + TEST_CHECK_RET(upper_range_vec.start == pde_coverage + page_size); + TEST_CHECK_RET(upper_range_vec.size == pde_coverage - page_size); + TEST_CHECK_RET(upper_range_vec.ranges[0].entry_count == page_table_entries - 1); + uvm_page_table_range_vec_destroy(range_vec); + uvm_page_table_range_vec_deinit(&upper_range_vec); + + // Test splitting a partial page table extent + start = 2 * page_size; + size = pde_coverage - (2 * page_size); + TEST_CHECK_RET(test_range_vec_create(&tree, start, size, page_size, &range_vec) == NV_OK); + TEST_CHECK_RET(uvm_page_table_range_vec_split_upper(range_vec, + start + (size / 2) - 1, + &upper_range_vec) == NV_OK); + TEST_CHECK_RET(range_vec->range_count == 1); + TEST_CHECK_RET(range_vec->start == start); + TEST_CHECK_RET(range_vec->size == size / 2); + TEST_CHECK_RET(range_vec->ranges[0].entry_count == (size / 2) / page_size); + TEST_CHECK_RET(upper_range_vec.range_count == 1); + TEST_CHECK_RET(upper_range_vec.start == start + (size / 2)); + TEST_CHECK_RET(upper_range_vec.size == size / 2); + TEST_CHECK_RET(upper_range_vec.ranges[0].entry_count == (size / 2) / page_size); + uvm_page_table_range_vec_destroy(range_vec); + uvm_page_table_range_vec_deinit(&upper_range_vec); + + uvm_page_tree_deinit(&tree); + + return status; +} + +static NV_STATUS alloc_64k_memory_maxwell(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + + NvLength size = 64 * 1024; + MEM_NV_CHECK_RET(test_page_tree_init(gpu, UVM_PAGE_SIZE_64K, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, 0, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 1); + TEST_CHECK_RET(range.table->depth == 1); + TEST_CHECK_RET(range.start_index == 0); + TEST_CHECK_RET(tree.root->ref_count == 1); + TEST_CHECK_RET(tree.root->entries[0]->ref_count == 1); + TEST_CHECK_RET(range.table == tree.root->entries[0]); + uvm_page_tree_put_ptes(&tree, &range); + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS alloc_128k_memory_maxwell(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + NvLength size = 128 * 1024; + + // 64k big page mode + MEM_NV_CHECK_RET(test_page_tree_init(gpu, UVM_PAGE_SIZE_64K, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_64K, 0, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 2); + TEST_CHECK_RET(range.table->depth == 1); + TEST_CHECK_RET(range.start_index == 0); + TEST_CHECK_RET(range.page_size == UVM_PAGE_SIZE_64K); + TEST_CHECK_RET(tree.root->ref_count == 1); + TEST_CHECK_RET(tree.root->entries[0]->ref_count == 2); + TEST_CHECK_RET(range.table == tree.root->entries[0]); + uvm_page_tree_put_ptes(&tree, &range); + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + // 128k big page mode + MEM_NV_CHECK_RET(test_page_tree_init(gpu, UVM_PAGE_SIZE_128K, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_128K, 0, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 1); + TEST_CHECK_RET(range.table->depth == 1); + TEST_CHECK_RET(range.start_index == 0); + TEST_CHECK_RET(tree.root->ref_count == 1); + TEST_CHECK_RET(range.page_size == UVM_PAGE_SIZE_128K); + TEST_CHECK_RET(tree.root->entries[0]->ref_count == 1); + TEST_CHECK_RET(range.table == tree.root->entries[0]); + uvm_page_tree_put_ptes(&tree, &range); + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static uvm_mmu_page_table_alloc_t fake_table_alloc(uvm_aperture_t aperture, NvU64 address) +{ + return (uvm_mmu_page_table_alloc_t){.addr = uvm_gpu_phys_address(aperture, address) }; +} + +// Queries the supported page sizes of the GPU(uvm_gpu_t) and fills the +// page_sizes array up to MAX_NUM_PAGE_SIZE. Returns the number of elements in +// page_sizes; +size_t get_page_sizes(uvm_gpu_t *gpu, NvU32 *page_sizes) +{ + unsigned long page_size_log2; + unsigned long page_sizes_bitvec; + size_t count = 0; + uvm_mmu_mode_hal_t *hal = gpu->parent->arch_hal->mmu_mode_hal(BIG_PAGE_SIZE_PASCAL); + + UVM_ASSERT(hal != NULL); + UVM_ASSERT(page_sizes != NULL); + + page_sizes_bitvec = hal->page_sizes(); + + for_each_set_bit(page_size_log2, &page_sizes_bitvec, BITS_PER_LONG) { + NvU32 page_size = (NvU32)(1ULL << page_size_log2); + UVM_ASSERT(count < MAX_NUM_PAGE_SIZES); + page_sizes[count++] = page_size; + } + + return count; +} + +static NV_STATUS entry_test_page_size_pascal(uvm_gpu_t *gpu, size_t page_size) +{ + uvm_mmu_mode_hal_t *hal = gpu->parent->arch_hal->mmu_mode_hal(UVM_PAGE_SIZE_64K); + + // Page table entries + if (page_size == UVM_PAGE_SIZE_64K) + TEST_CHECK_RET(hal->unmapped_pte(page_size) == 0x20); + else + TEST_CHECK_RET(hal->unmapped_pte(page_size) == 0); + + return NV_OK; +} + +static NV_STATUS entry_test_page_size_volta(uvm_gpu_t *gpu, size_t page_size) +{ + return entry_test_page_size_pascal(gpu, page_size); +} + +static NV_STATUS entry_test_page_size_ampere(uvm_gpu_t *gpu, size_t page_size) +{ + return entry_test_page_size_volta(gpu, page_size); +} + + + + + + + + + + + + + + + + +typedef NV_STATUS (*entry_test_page_size_func)(uvm_gpu_t *gpu, size_t page_size); + +static NV_STATUS entry_test_maxwell(uvm_gpu_t *gpu) +{ + static const NvU32 big_page_sizes[] = {UVM_PAGE_SIZE_64K, UVM_PAGE_SIZE_128K}; + NvU64 pde_bits; + uvm_mmu_page_table_alloc_t *phys_allocs[2]; + uvm_mmu_page_table_alloc_t alloc_sys = fake_table_alloc(UVM_APERTURE_SYS, 0x9999999000LL); + uvm_mmu_page_table_alloc_t alloc_vid = fake_table_alloc(UVM_APERTURE_VID, 0x1BBBBBB000LL); + uvm_mmu_mode_hal_t *hal; + NvU32 i, j, big_page_size, page_size; + + for (i = 0; i < ARRAY_SIZE(big_page_sizes); i++) { + big_page_size = big_page_sizes[i]; + hal = gpu->parent->arch_hal->mmu_mode_hal(big_page_size); + + memset(phys_allocs, 0, sizeof(phys_allocs)); + + hal->make_pde(&pde_bits, phys_allocs, 0); + TEST_CHECK_RET(pde_bits == 0x0L); + + phys_allocs[0] = &alloc_sys; + phys_allocs[1] = &alloc_vid; + hal->make_pde(&pde_bits, phys_allocs, 0); + TEST_CHECK_RET(pde_bits == 0x1BBBBBBD99999992LL); + + phys_allocs[0] = &alloc_vid; + phys_allocs[1] = &alloc_sys; + hal->make_pde(&pde_bits, phys_allocs, 0); + TEST_CHECK_RET(pde_bits == 0x9999999E1BBBBBB1LL); + + for (j = 0; j <= 2; j++) { + if (j == 0) + page_size = UVM_PAGE_SIZE_4K; + else + page_size = big_page_size; + + if (page_size == UVM_PAGE_SIZE_4K) + TEST_CHECK_RET(hal->unmapped_pte(page_size) == 0); + else + TEST_CHECK_RET(hal->unmapped_pte(page_size) == 0x2); + } + + // uncached, i.e., the sysmem data is not cached in GPU's L2 + // cache. Clear the volatile bit. + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_SYS, + 0x9999999000LL, + UVM_PROT_READ_WRITE_ATOMIC, + UVM_MMU_PTE_FLAGS_NONE) == 0x599999991LL); + + // change to cached, set the volatile bit. + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_SYS, + 0x9999999000LL, + UVM_PROT_READ_WRITE_ATOMIC, + UVM_MMU_PTE_FLAGS_CACHED) == 0x499999991LL); + + // remove atomic + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_SYS, + 0x9999999000LL, + UVM_PROT_READ_WRITE, + UVM_MMU_PTE_FLAGS_CACHED) == 0x499999991LL); + + // read only + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_SYS, + 0x9999999000LL, + UVM_PROT_READ_ONLY, + UVM_MMU_PTE_FLAGS_CACHED) == 0x8000000499999995LL); + + // local video + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_VID, + 0x1BBBBBB000LL, + UVM_PROT_READ_ONLY, + UVM_MMU_PTE_FLAGS_CACHED) == 0x800000001BBBBBB5LL); + + // peer 0 + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_PEER_0, + 0x1BBBBBB000LL, + UVM_PROT_READ_ONLY, + UVM_MMU_PTE_FLAGS_CACHED) == 0x800000021BBBBBB5LL); + + // peer 7 + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_PEER_7, + 0x1BBBBBB000LL, + UVM_PROT_READ_ONLY, + UVM_MMU_PTE_FLAGS_CACHED) == 0x80000002FBBBBBB5LL); + } + + return NV_OK; +} + +static NV_STATUS entry_test_pascal(uvm_gpu_t *gpu, entry_test_page_size_func entry_test_page_size) +{ + NvU32 page_sizes[MAX_NUM_PAGE_SIZES]; + NvU64 pde_bits[2]; + size_t i, num_page_sizes; + uvm_mmu_page_table_alloc_t *phys_allocs[2] = {NULL, NULL}; + uvm_mmu_page_table_alloc_t alloc_sys = fake_table_alloc(UVM_APERTURE_SYS, 0x399999999999000LL); + uvm_mmu_page_table_alloc_t alloc_vid = fake_table_alloc(UVM_APERTURE_VID, 0x1BBBBBB000LL); + // big versions have [11:8] set as well to test the page table merging + uvm_mmu_page_table_alloc_t alloc_big_sys = fake_table_alloc(UVM_APERTURE_SYS, 0x399999999999900LL); + uvm_mmu_page_table_alloc_t alloc_big_vid = fake_table_alloc(UVM_APERTURE_VID, 0x1BBBBBBB00LL); + + uvm_mmu_mode_hal_t *hal = gpu->parent->arch_hal->mmu_mode_hal(UVM_PAGE_SIZE_64K); + + // Make sure cleared PDEs work as expected + hal->make_pde(pde_bits, phys_allocs, 0); + TEST_CHECK_RET(pde_bits[0] == 0); + + memset(pde_bits, 0xFF, sizeof(pde_bits)); + hal->make_pde(pde_bits, phys_allocs, 3); + TEST_CHECK_RET(pde_bits[0] == 0 && pde_bits[1] == 0); + + // Sys and vidmem PDEs + phys_allocs[0] = &alloc_sys; + hal->make_pde(pde_bits, phys_allocs, 0); + TEST_CHECK_RET(pde_bits[0] == 0x3999999999990C); + + phys_allocs[0] = &alloc_vid; + hal->make_pde(pde_bits, phys_allocs, 0); + TEST_CHECK_RET(pde_bits[0] == 0x1BBBBBB0A); + + // Dual PDEs + phys_allocs[0] = &alloc_big_sys; + phys_allocs[1] = &alloc_vid; + hal->make_pde(pde_bits, phys_allocs, 3); + TEST_CHECK_RET(pde_bits[0] == 0x3999999999999C && pde_bits[1] == 0x1BBBBBB0A); + + phys_allocs[0] = &alloc_big_vid; + phys_allocs[1] = &alloc_sys; + hal->make_pde(pde_bits, phys_allocs, 3); + TEST_CHECK_RET(pde_bits[0] == 0x1BBBBBBBA && pde_bits[1] == 0x3999999999990C); + + // uncached, i.e., the sysmem data is not cached in GPU's L2 cache. Clear + // the volatile bit. + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_SYS, + 0x399999999999000LL, + UVM_PROT_READ_WRITE_ATOMIC, + UVM_MMU_PTE_FLAGS_NONE) == 0x3999999999990D); + + // change to cached, set the volatile bit. + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_SYS, + 0x399999999999000LL, + UVM_PROT_READ_WRITE_ATOMIC, + UVM_MMU_PTE_FLAGS_CACHED) == 0x39999999999905); + + // remove atomic + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_SYS, + 0x399999999999000LL, + UVM_PROT_READ_WRITE, + UVM_MMU_PTE_FLAGS_CACHED) == 0x39999999999985); + + // read only + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_SYS, + 0x399999999999000LL, + UVM_PROT_READ_ONLY, + UVM_MMU_PTE_FLAGS_CACHED) == 0x399999999999C5); + + // local video + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_VID, + 0x1BBBBBB000LL, + UVM_PROT_READ_ONLY, + UVM_MMU_PTE_FLAGS_CACHED) == 0x1BBBBBBC1); + + // peer 0 + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_PEER_0, + 0x1BBBBBB000LL, + UVM_PROT_READ_ONLY, + UVM_MMU_PTE_FLAGS_CACHED) == 0x1BBBBBBC3); + + num_page_sizes = get_page_sizes(gpu, page_sizes); + + for (i = 0; i < num_page_sizes; i++) + TEST_NV_CHECK_RET(entry_test_page_size(gpu, page_sizes[i])); + + return NV_OK; +} + +static NV_STATUS entry_test_volta(uvm_gpu_t *gpu, entry_test_page_size_func entry_test_page_size) +{ + NvU32 page_sizes[MAX_NUM_PAGE_SIZES]; + NvU64 pde_bits[2]; + size_t i, num_page_sizes; + uvm_mmu_page_table_alloc_t *phys_allocs[2] = {NULL, NULL}; + uvm_mmu_page_table_alloc_t alloc_sys = fake_table_alloc(UVM_APERTURE_SYS, 0x399999999999000LL); + uvm_mmu_page_table_alloc_t alloc_vid = fake_table_alloc(UVM_APERTURE_VID, 0x1BBBBBB000LL); + + // big versions have [11:8] set as well to test the page table merging + uvm_mmu_page_table_alloc_t alloc_big_sys = fake_table_alloc(UVM_APERTURE_SYS, 0x399999999999900LL); + uvm_mmu_page_table_alloc_t alloc_big_vid = fake_table_alloc(UVM_APERTURE_VID, 0x1BBBBBBB00LL); + + uvm_mmu_mode_hal_t *hal = gpu->parent->arch_hal->mmu_mode_hal(UVM_PAGE_SIZE_64K); + + // Make sure cleared PDEs work as expected + hal->make_pde(pde_bits, phys_allocs, 0); + TEST_CHECK_RET(pde_bits[0] == 0); + + memset(pde_bits, 0xFF, sizeof(pde_bits)); + hal->make_pde(pde_bits, phys_allocs, 3); + TEST_CHECK_RET(pde_bits[0] == 0 && pde_bits[1] == 0); + + // Sys and vidmem PDEs + phys_allocs[0] = &alloc_sys; + hal->make_pde(pde_bits, phys_allocs, 0); + TEST_CHECK_RET(pde_bits[0] == 0x3999999999990C); + + phys_allocs[0] = &alloc_vid; + hal->make_pde(pde_bits, phys_allocs, 0); + TEST_CHECK_RET(pde_bits[0] == 0x1BBBBBB0A); + + // Dual PDEs + phys_allocs[0] = &alloc_big_sys; + phys_allocs[1] = &alloc_vid; + hal->make_pde(pde_bits, phys_allocs, 3); + TEST_CHECK_RET(pde_bits[0] == 0x3999999999999C && pde_bits[1] == 0x1BBBBBB0A); + + phys_allocs[0] = &alloc_big_vid; + phys_allocs[1] = &alloc_sys; + hal->make_pde(pde_bits, phys_allocs, 3); + TEST_CHECK_RET(pde_bits[0] == 0x1BBBBBBBA && pde_bits[1] == 0x3999999999990C); + + // NO_ATS PDE1 (depth 2) + phys_allocs[0] = &alloc_vid; + hal->make_pde(pde_bits, phys_allocs, 2); + if (g_uvm_global.ats.enabled) + TEST_CHECK_RET(pde_bits[0] == 0x1BBBBBB2A); + else + TEST_CHECK_RET(pde_bits[0] == 0x1BBBBBB0A); + + // peer 0 47-bit physical addressing + TEST_CHECK_RET(hal->make_pte(UVM_APERTURE_PEER_0, + 0x5BBBBBBBB000LL, + UVM_PROT_READ_ONLY, + UVM_MMU_PTE_FLAGS_CACHED) == 0x2DD1BBBBBBC3); + + num_page_sizes = get_page_sizes(gpu, page_sizes); + + for (i = 0; i < num_page_sizes; i++) + TEST_NV_CHECK_RET(entry_test_page_size(gpu, page_sizes[i])); + + return NV_OK; +} + +static NV_STATUS entry_test_ampere(uvm_gpu_t *gpu, entry_test_page_size_func entry_test_page_size) +{ + NvU32 page_sizes[MAX_NUM_PAGE_SIZES]; + NvU32 i, num_page_sizes; + + num_page_sizes = get_page_sizes(gpu, page_sizes); + + for (i = 0; i < num_page_sizes; i++) + TEST_NV_CHECK_RET(entry_test_page_size(gpu, page_sizes[i])); + + return NV_OK; +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +static NV_STATUS alloc_4k_maxwell(uvm_gpu_t *gpu) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + NvLength size = 4096; + + // 64k big page mode + MEM_NV_CHECK_RET(test_page_tree_init(gpu, UVM_PAGE_SIZE_64K, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_4K, 0, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 1); + TEST_CHECK_RET(range.table->depth == 1); + TEST_CHECK_RET(range.start_index == 0); + TEST_CHECK_RET(range.page_size == UVM_PAGE_SIZE_4K); + TEST_CHECK_RET(tree.root->ref_count == 1); + TEST_CHECK_RET(range.table == tree.root->entries[1]); + TEST_CHECK_RET(tree.root->entries[1]->ref_count == 1); + uvm_page_tree_put_ptes(&tree, &range); + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + // 128k big page mode + MEM_NV_CHECK_RET(test_page_tree_init(gpu, UVM_PAGE_SIZE_128K, &tree), NV_OK); + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, UVM_PAGE_SIZE_4K, 0, size, &range), NV_OK); + TEST_CHECK_RET(range.entry_count == 1); + TEST_CHECK_RET(range.table->depth == 1); + TEST_CHECK_RET(range.start_index == 0); + TEST_CHECK_RET(range.page_size == UVM_PAGE_SIZE_4K); + TEST_CHECK_RET(tree.root->ref_count == 1); + TEST_CHECK_RET(range.table == tree.root->entries[1]); + TEST_CHECK_RET(tree.root->entries[1]->ref_count == 1); + uvm_page_tree_put_ptes(&tree, &range); + UVM_ASSERT(tree.root->ref_count == 0); + uvm_page_tree_deinit(&tree); + + return NV_OK; +} + +static NV_STATUS shrink_test(uvm_gpu_t *gpu, NvU32 big_page_size, NvU32 page_size) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range; + NvU64 addr = 0; + NvLength size; + NvU32 num_pages, new_page_count; + int alignment; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, big_page_size, &tree), NV_OK); + + for (num_pages = 1; num_pages <= 3; num_pages++) { + for (alignment = 0; alignment <= 2; alignment++) { + size = num_pages * page_size; + + // Get the alignment of the range within a PDE + switch (alignment) { + case 0: // Start of the PDE + addr = 0; + break; + case 1: // In the middle of the PDE + addr = page_size; + break; + case 2: // At the end of the PDE + addr = uvm_mmu_pde_coverage(&tree, page_size) - size; + break; + } + + for (new_page_count = 0; new_page_count <= num_pages; new_page_count++) { + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, page_size, addr, size, &range), NV_OK); + TEST_CHECK_RET(range.table->ref_count == num_pages); + TEST_CHECK_RET(range.entry_count == num_pages); + TEST_CHECK_RET(range.start_index == addr / page_size); + + uvm_page_table_range_shrink(&tree, &range, new_page_count); + + if (new_page_count) { + TEST_CHECK_RET(range.table->ref_count == new_page_count); + TEST_CHECK_RET(range.entry_count == new_page_count); + TEST_CHECK_RET(range.start_index == addr / page_size); + uvm_page_tree_put_ptes(&tree, &range); + } + + TEST_CHECK_RET(tree.root->ref_count == 0); + } + } + } + + uvm_page_tree_deinit(&tree); + return NV_OK; +} + +static NV_STATUS get_upper_test(uvm_gpu_t *gpu, NvU32 big_page_size, NvU32 page_size) +{ + uvm_page_tree_t tree; + uvm_page_table_range_t range, upper_range; + NvU64 addr = 0; + NvLength size; + NvU32 num_pages, num_upper_pages; + int alignment, put_upper_first; + + MEM_NV_CHECK_RET(test_page_tree_init(gpu, big_page_size, &tree), NV_OK); + + for (num_pages = 1; num_pages <= 3; num_pages++) { + for (alignment = 0; alignment <= 2; alignment++) { + size = num_pages * page_size; + + // Get the alignment of the range within a PDE + switch (alignment) { + case 0: // Start of the PDE + addr = 0; + break; + case 1: // In the middle of the PDE + addr = page_size; + break; + case 2: // At the end of the PDE + addr = uvm_mmu_pde_coverage(&tree, page_size) - size; + break; + } + + for (num_upper_pages = 1; num_upper_pages <= num_pages; num_upper_pages++) { + for (put_upper_first = 0; put_upper_first <= 1; put_upper_first++) { + MEM_NV_CHECK_RET(test_page_tree_get_ptes(&tree, page_size, addr, size, &range), NV_OK); + TEST_CHECK_RET(range.table->ref_count == num_pages); + TEST_CHECK_RET(range.entry_count == num_pages); + TEST_CHECK_RET(range.start_index == addr / page_size); + + uvm_page_table_range_get_upper(&tree, &range, &upper_range, num_upper_pages); + + TEST_CHECK_RET(range.entry_count == num_pages); + TEST_CHECK_RET(range.start_index == addr / page_size); + + TEST_CHECK_RET(upper_range.entry_count == num_upper_pages); + TEST_CHECK_RET(upper_range.start_index == range.start_index + num_pages - num_upper_pages); + + TEST_CHECK_RET(range.table->ref_count == num_pages + num_upper_pages); + + if (put_upper_first) { + uvm_page_tree_put_ptes(&tree, &upper_range); + TEST_CHECK_RET(range.entry_count == num_pages); + TEST_CHECK_RET(range.start_index == addr / page_size); + TEST_CHECK_RET(range.table->ref_count == num_pages); + uvm_page_tree_put_ptes(&tree, &range); + } + else { + uvm_page_tree_put_ptes(&tree, &range); + TEST_CHECK_RET(upper_range.entry_count == num_upper_pages); + TEST_CHECK_RET(upper_range.start_index == (addr / page_size) + num_pages - num_upper_pages); + TEST_CHECK_RET(range.table->ref_count == num_upper_pages); + uvm_page_tree_put_ptes(&tree, &upper_range); + } + + TEST_CHECK_RET(tree.root->ref_count == 0); + } + } + } + } + + uvm_page_tree_deinit(&tree); + return NV_OK; +} + +static uvm_host_hal_t fake_host_hal = { + .noop = fake_noop, + .wait_for_idle = fake_wait_for_idle, + .membar_sys = fake_membar, + .membar_gpu = fake_membar, + .tlb_invalidate_all = fake_tlb_invalidate_all, + .tlb_invalidate_va = fake_tlb_invalidate_va, +}; +static uvm_ce_hal_t fake_ce_hal = { + .memset_8 = fake_ce_memset_8, + .memcopy = fake_ce_memcopy, +}; + +static NV_STATUS fake_gpu_init(NvU32 host_class, NvU32 ce_class, NvU32 architecture, uvm_gpu_t *fake_gpu) +{ + uvm_parent_gpu_t *fake_parent_gpu = fake_gpu->parent; + + fake_parent_gpu->num_retained_gpus = 1; + + fake_parent_gpu->rm_info.ceClass = ce_class; + fake_parent_gpu->rm_info.hostClass = host_class; + fake_parent_gpu->rm_info.gpuArch = architecture; + + TEST_CHECK_RET(uvm_hal_init_gpu(fake_parent_gpu) == NV_OK); + + uvm_hal_init_properties(fake_parent_gpu); + + // The PTE allocation code expects the address space tree HAL to be present + // (for example, when checking the addressing capabilities of a GPU). + // The selected page size (64K) should work across all supported GPU + // architectures. + fake_gpu->address_space_tree.hal = fake_parent_gpu->arch_hal->mmu_mode_hal(UVM_PAGE_SIZE_64K); + + fake_parent_gpu->host_hal = &fake_host_hal; + fake_parent_gpu->ce_hal = &fake_ce_hal; + + uvm_mmu_init_gpu_chunk_sizes(fake_parent_gpu); + uvm_mmu_init_gpu_peer_addresses(fake_gpu); + + return NV_OK; +} + +static NV_STATUS fake_gpu_init_maxwell(uvm_gpu_t *fake_gpu) +{ + // KEPLER_CHANNEL_GPFIFO_B host class is used for GM10x. + return fake_gpu_init(KEPLER_CHANNEL_GPFIFO_B, + MAXWELL_DMA_COPY_A, + NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GM000, + fake_gpu); +} + +static NV_STATUS fake_gpu_init_pascal(uvm_gpu_t *fake_gpu) +{ + return fake_gpu_init(PASCAL_CHANNEL_GPFIFO_A, + PASCAL_DMA_COPY_A, + NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GP100, + fake_gpu); +} + +static NV_STATUS fake_gpu_init_volta(uvm_gpu_t *fake_gpu) +{ + return fake_gpu_init(VOLTA_CHANNEL_GPFIFO_A, + VOLTA_DMA_COPY_A, + NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GV100, + fake_gpu); +} + +static NV_STATUS fake_gpu_init_ampere(uvm_gpu_t *fake_gpu) +{ + return fake_gpu_init(AMPERE_CHANNEL_GPFIFO_A, + AMPERE_DMA_COPY_A, + NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100, + fake_gpu); +} + + + + + + + + + + + +static NV_STATUS maxwell_test_page_tree(uvm_gpu_t *maxwell) +{ + // create a fake Maxwell GPU for this test. + static const NvU32 big_page_sizes[] = {UVM_PAGE_SIZE_64K, UVM_PAGE_SIZE_128K}; + NvU32 i, j, big_page_size, page_size; + + TEST_CHECK_RET(fake_gpu_init_maxwell(maxwell) == NV_OK); + + MEM_NV_CHECK_RET(allocate_root(maxwell), NV_OK); + MEM_NV_CHECK_RET(alloc_64k_memory_maxwell(maxwell), NV_OK); + MEM_NV_CHECK_RET(alloc_128k_memory_maxwell(maxwell), NV_OK); + MEM_NV_CHECK_RET(alloc_4k_maxwell(maxwell), NV_OK); + TEST_CHECK_RET(entry_test_maxwell(maxwell) == NV_OK); + + for (i = 0; i < ARRAY_SIZE(big_page_sizes); i++) { + big_page_size = big_page_sizes[i]; + for (j = 0; j < 2; j++) { + page_size = (j == 0) ? UVM_PAGE_SIZE_4K : big_page_size; + + MEM_NV_CHECK_RET(shrink_test(maxwell, big_page_size, page_size), NV_OK); + MEM_NV_CHECK_RET(get_upper_test(maxwell, big_page_size, page_size), NV_OK); + MEM_NV_CHECK_RET(test_range_vec(maxwell, big_page_size, page_size), NV_OK); + } + } + + return NV_OK; +} + +static NV_STATUS pascal_test_page_tree(uvm_gpu_t *pascal) +{ + // create a fake Pascal GPU for this test. + NvU32 tlb_batch_saved_max_pages; + NvU32 i; + NvU32 page_sizes[MAX_NUM_PAGE_SIZES]; + size_t num_page_sizes; + + TEST_CHECK_RET(fake_gpu_init_pascal(pascal) == NV_OK); + + num_page_sizes = get_page_sizes(pascal, page_sizes); + UVM_ASSERT(num_page_sizes > 0); + + MEM_NV_CHECK_RET(allocate_root(pascal), NV_OK); + MEM_NV_CHECK_RET(alloc_64k_memory(pascal), NV_OK); + MEM_NV_CHECK_RET(alloc_adjacent_64k_memory(pascal), NV_OK); + MEM_NV_CHECK_RET(alloc_adjacent_pde_64k_memory(pascal), NV_OK); + MEM_NV_CHECK_RET(alloc_nearby_pde_64k_memory(pascal), NV_OK); + MEM_NV_CHECK_RET(allocate_then_free_all_16_64k(pascal), NV_OK); + MEM_NV_CHECK_RET(allocate_then_free_8_8_64k(pascal), NV_OK); + MEM_NV_CHECK_RET(get_single_page_2m(pascal), NV_OK); + MEM_NV_CHECK_RET(get_entire_table_4k(pascal), NV_OK); + MEM_NV_CHECK_RET(split_4k_from_2m(pascal), NV_OK); + MEM_NV_CHECK_RET(get_512mb_range(pascal), NV_OK); + MEM_NV_CHECK_RET(get_two_free_apart(pascal), NV_OK); + MEM_NV_CHECK_RET(get_overlapping_dual_pdes(pascal), NV_OK); + MEM_NV_CHECK_RET(split_and_free(pascal), NV_OK); + MEM_NV_CHECK_RET(entry_test_pascal(pascal, entry_test_page_size_pascal), NV_OK); + MEM_NV_CHECK_RET(check_sizes(pascal), NV_OK); + MEM_NV_CHECK_RET(fast_split_normal(pascal), NV_OK); + MEM_NV_CHECK_RET(fast_split_double_backoff(pascal), NV_OK); + MEM_NV_CHECK_RET(test_tlb_invalidates(pascal), NV_OK); + MEM_NV_CHECK_RET(test_tlb_batch_invalidates(pascal, page_sizes, num_page_sizes), NV_OK); + + // Run the test again with a bigger limit on max pages + tlb_batch_saved_max_pages = pascal->parent->tlb_batch.max_pages; + pascal->parent->tlb_batch.max_pages = 1024 * 1024; + MEM_NV_CHECK_RET(test_tlb_batch_invalidates(pascal, page_sizes, num_page_sizes), NV_OK); + pascal->parent->tlb_batch.max_pages = tlb_batch_saved_max_pages; + + // And with per VA invalidates disabled + pascal->parent->tlb_batch.va_invalidate_supported = false; + MEM_NV_CHECK_RET(test_tlb_batch_invalidates(pascal, page_sizes, num_page_sizes), NV_OK); + pascal->parent->tlb_batch.va_invalidate_supported = true; + + for (i = 0; i < num_page_sizes; i++) { + MEM_NV_CHECK_RET(shrink_test(pascal, BIG_PAGE_SIZE_PASCAL, page_sizes[i]), NV_OK); + MEM_NV_CHECK_RET(get_upper_test(pascal, BIG_PAGE_SIZE_PASCAL, page_sizes[i]), NV_OK); + MEM_NV_CHECK_RET(test_range_vec(pascal, BIG_PAGE_SIZE_PASCAL, page_sizes[i]), NV_OK); + } + + return NV_OK; +} + +static NV_STATUS volta_test_page_tree(uvm_gpu_t *volta) +{ + TEST_CHECK_RET(fake_gpu_init_volta(volta) == NV_OK); + + MEM_NV_CHECK_RET(entry_test_volta(volta, entry_test_page_size_volta), NV_OK); + + return NV_OK; +} + +static NV_STATUS ampere_test_page_tree(uvm_gpu_t *ampere) +{ + NvU32 i, tlb_batch_saved_max_pages; + NvU32 page_sizes[MAX_NUM_PAGE_SIZES]; + size_t num_page_sizes; + + TEST_CHECK_RET(fake_gpu_init_ampere(ampere) == NV_OK); + + num_page_sizes = get_page_sizes(ampere, page_sizes); + UVM_ASSERT(num_page_sizes > 0); + + MEM_NV_CHECK_RET(alloc_512m_memory(ampere), NV_OK); + MEM_NV_CHECK_RET(alloc_adjacent_512m_memory(ampere), NV_OK); + MEM_NV_CHECK_RET(get_single_page_512m(ampere), NV_OK); + MEM_NV_CHECK_RET(get_entire_table_512m(ampere), NV_OK); + + // Although there is no support for the 512M page size for managed memory, + // we run tests that split 512M pages into 256x2M pages because UVM handles + // the PTEs for all supported page sizes. + MEM_NV_CHECK_RET(split_2m_from_512m(ampere), NV_OK); + MEM_NV_CHECK_RET(get_2gb_range(ampere), NV_OK); + MEM_NV_CHECK_RET(entry_test_ampere(ampere, entry_test_page_size_ampere), NV_OK); + + // TLB invalidate + MEM_NV_CHECK_RET(test_tlb_invalidates(ampere), NV_OK); + + // TLB batch invalidate + MEM_NV_CHECK_RET(test_tlb_batch_invalidates(ampere, page_sizes, num_page_sizes), NV_OK); + + // Run the test again with a bigger limit on max pages + tlb_batch_saved_max_pages = ampere->parent->tlb_batch.max_pages; + ampere->parent->tlb_batch.max_pages = 1024 * 1024; + MEM_NV_CHECK_RET(test_tlb_batch_invalidates(ampere, page_sizes, num_page_sizes), NV_OK); + ampere->parent->tlb_batch.max_pages = tlb_batch_saved_max_pages; + + // And with per VA invalidates disabled + ampere->parent->tlb_batch.va_invalidate_supported = false; + MEM_NV_CHECK_RET(test_tlb_batch_invalidates(ampere, page_sizes, num_page_sizes), NV_OK); + ampere->parent->tlb_batch.va_invalidate_supported = true; + + for (i = 0; i < num_page_sizes; i++) { + MEM_NV_CHECK_RET(shrink_test(ampere, BIG_PAGE_SIZE_PASCAL, page_sizes[i]), NV_OK); + MEM_NV_CHECK_RET(get_upper_test(ampere, BIG_PAGE_SIZE_PASCAL, page_sizes[i]), NV_OK); + MEM_NV_CHECK_RET(test_range_vec(ampere, BIG_PAGE_SIZE_PASCAL, page_sizes[i]), NV_OK); + } + + return NV_OK; +} + + + + + + + + + + + + + +NV_STATUS uvm_test_page_tree(UVM_TEST_PAGE_TREE_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_parent_gpu_t *parent_gpu; + uvm_gpu_t *gpu; + + parent_gpu = uvm_kvmalloc_zero(sizeof(*parent_gpu)); + if (!parent_gpu) + return NV_ERR_NO_MEMORY; + + gpu = uvm_kvmalloc_zero(sizeof(*gpu)); + if (!gpu) { + uvm_kvfree(parent_gpu); + return NV_ERR_NO_MEMORY; + } + + parent_gpu->gpus[0] = gpu; + gpu->parent = parent_gpu; + + // At least test_tlb_invalidates() relies on global state + // (g_tlb_invalidate_*) so make sure only one test instance can run at a time. + uvm_mutex_lock(&g_uvm_global.global_lock); + + // Allocate the fake TLB tracking state. Notably tests still need to enable + // and disable the tracking with explicit fake_tlb_invals_enable/disable() + // calls. + TEST_NV_CHECK_GOTO(fake_tlb_invals_alloc(), done); + + TEST_NV_CHECK_GOTO(maxwell_test_page_tree(gpu), done); + TEST_NV_CHECK_GOTO(pascal_test_page_tree(gpu), done); + TEST_NV_CHECK_GOTO(volta_test_page_tree(gpu), done); + TEST_NV_CHECK_GOTO(ampere_test_page_tree(gpu), done); + + + + +done: + fake_tlb_invals_free(); + + uvm_mutex_unlock(&g_uvm_global.global_lock); + + uvm_kvfree(gpu); + uvm_kvfree(parent_gpu); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_pascal.c b/kernel-open/nvidia-uvm/uvm_pascal.c new file mode 100644 index 000000000..a2dd460b5 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pascal.c @@ -0,0 +1,101 @@ +/******************************************************************************* + Copyright (c) 2016-2020 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hal.h" +#include "uvm_gpu.h" +#include "uvm_mem.h" +#include "uvm_pascal_fault_buffer.h" + +static unsigned uvm_force_prefetch_fault_support = 0; +module_param(uvm_force_prefetch_fault_support, uint, S_IRUGO); + +void uvm_hal_pascal_arch_init_properties(uvm_parent_gpu_t *parent_gpu) +{ + parent_gpu->tlb_batch.va_invalidate_supported = true; + + parent_gpu->tlb_batch.va_range_invalidate_supported = false; + + // TODO: Bug 1767241: Run benchmarks to figure out a good number + parent_gpu->tlb_batch.max_pages = 32; + + parent_gpu->utlb_per_gpc_count = uvm_pascal_get_utlbs_per_gpc(parent_gpu); + + parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.gpcCount * parent_gpu->utlb_per_gpc_count; + { + uvm_fault_buffer_entry_t *dummy; + UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8))); + } + + // A single top level PDE on Pascal covers 128 TB and that's the minimum + // size that can be used. + parent_gpu->rm_va_base = 0; + parent_gpu->rm_va_size = 128ull * 1024 * 1024 * 1024 * 1024; + + parent_gpu->uvm_mem_va_base = 384ull * 1024 * 1024 * 1024 * 1024; + parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE; + + parent_gpu->peer_copy_mode = UVM_GPU_PEER_COPY_MODE_VIRTUAL; + + // Not all units on Pascal support 49-bit addressing, including those which + // access channel buffers. + parent_gpu->max_channel_va = 1ULL << 40; + + parent_gpu->max_host_va = 1ULL << 40; + + // Pascal can map sysmem with any page size + parent_gpu->can_map_sysmem_with_large_pages = true; + + // Prefetch faults are disabled by default in Pascal + parent_gpu->prefetch_fault_supported = uvm_force_prefetch_fault_support != 0; + + // Pascal and Volta require post-invalidate membars to flush out HSHUB. See + // bug 1975028. GP10x chips do not have HSHUB, so they don't need any. + UVM_ASSERT(parent_gpu->rm_info.gpuArch == NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GP100); + if (parent_gpu->rm_info.gpuImplementation == NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GP100 || + parent_gpu->rm_info.gpuImplementation == NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GP000) { + parent_gpu->num_hshub_tlb_invalidate_membars = 2; + } + else + parent_gpu->num_hshub_tlb_invalidate_membars = 0; + + // Pascal cannot place GPFIFO in vidmem + parent_gpu->gpfifo_in_vidmem_supported = false; + + parent_gpu->replayable_faults_supported = true; + + parent_gpu->non_replayable_faults_supported = false; + + parent_gpu->access_counters_supported = false; + + parent_gpu->fault_cancel_va_supported = false; + + parent_gpu->scoped_atomics_supported = false; + + parent_gpu->sparse_mappings_supported = true; + + parent_gpu->map_remap_larger_page_promotion = false; + + parent_gpu->smc.supported = false; + + parent_gpu->plc_supported = false; +} diff --git a/kernel-open/nvidia-uvm/uvm_pascal_ce.c b/kernel-open/nvidia-uvm/uvm_pascal_ce.c new file mode 100644 index 000000000..369afbd5e --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pascal_ce.c @@ -0,0 +1,149 @@ +/******************************************************************************* + Copyright (c) 2016-2020 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hal.h" +#include "uvm_push.h" +#include "clc0b5.h" + +void uvm_hal_pascal_ce_offset_out(uvm_push_t *push, NvU64 offset_out) +{ + NV_PUSH_2U(C0B5, OFFSET_OUT_UPPER, HWVALUE(C0B5, OFFSET_OUT_UPPER, UPPER, NvOffset_HI32(offset_out)), + OFFSET_OUT_LOWER, HWVALUE(C0B5, OFFSET_OUT_LOWER, VALUE, NvOffset_LO32(offset_out))); +} + +void uvm_hal_pascal_ce_offset_in_out(uvm_push_t *push, NvU64 offset_in, NvU64 offset_out) +{ + NV_PUSH_4U(C0B5, OFFSET_IN_UPPER, HWVALUE(C0B5, OFFSET_IN_UPPER, UPPER, NvOffset_HI32(offset_in)), + OFFSET_IN_LOWER, HWVALUE(C0B5, OFFSET_IN_LOWER, VALUE, NvOffset_LO32(offset_in)), + OFFSET_OUT_UPPER, HWVALUE(C0B5, OFFSET_OUT_UPPER, UPPER, NvOffset_HI32(offset_out)), + OFFSET_OUT_LOWER, HWVALUE(C0B5, OFFSET_OUT_LOWER, VALUE, NvOffset_LO32(offset_out))); +} + +// Perform an appropriate membar before a semaphore operation. Returns whether +// the semaphore operation should include a flush. +static bool pascal_membar_before_semaphore(uvm_push_t *push) +{ + uvm_gpu_t *gpu; + + if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE)) { + // No MEMBAR requested, don't use a flush. + return false; + } + + if (!uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU)) { + // By default do a MEMBAR SYS and for that we can just use flush on the + // semaphore operation. + return true; + } + + // MEMBAR GPU requested, do it on the HOST and skip the CE flush as CE + // doesn't have this capability. + gpu = uvm_push_get_gpu(push); + gpu->parent->host_hal->wait_for_idle(push); + gpu->parent->host_hal->membar_gpu(push); + + return false; +} + +void uvm_hal_pascal_ce_semaphore_release(uvm_push_t *push, NvU64 gpu_va, NvU32 payload) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + NvU32 flush_value; + NvU32 launch_dma_plc_mode; + bool use_flush; + + use_flush = pascal_membar_before_semaphore(push); + + if (use_flush) + flush_value = HWCONST(C0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE); + else + flush_value = HWCONST(C0B5, LAUNCH_DMA, FLUSH_ENABLE, FALSE); + + NV_PUSH_3U(C0B5, SET_SEMAPHORE_A, HWVALUE(C0B5, SET_SEMAPHORE_A, UPPER, NvOffset_HI32(gpu_va)), + SET_SEMAPHORE_B, HWVALUE(C0B5, SET_SEMAPHORE_B, LOWER, NvOffset_LO32(gpu_va)), + SET_SEMAPHORE_PAYLOAD, payload); + + launch_dma_plc_mode = gpu->parent->ce_hal->plc_mode(); + + NV_PUSH_1U(C0B5, LAUNCH_DMA, flush_value | + HWCONST(C0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NONE) | + HWCONST(C0B5, LAUNCH_DMA, SEMAPHORE_TYPE, RELEASE_ONE_WORD_SEMAPHORE) | + launch_dma_plc_mode); +} + +void uvm_hal_pascal_ce_semaphore_reduction_inc(uvm_push_t *push, NvU64 gpu_va, NvU32 payload) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + NvU32 flush_value; + NvU32 launch_dma_plc_mode; + bool use_flush; + + use_flush = pascal_membar_before_semaphore(push); + + if (use_flush) + flush_value = HWCONST(C0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE); + else + flush_value = HWCONST(C0B5, LAUNCH_DMA, FLUSH_ENABLE, FALSE); + + NV_PUSH_3U(C0B5, SET_SEMAPHORE_A, HWVALUE(C0B5, SET_SEMAPHORE_A, UPPER, NvOffset_HI32(gpu_va)), + SET_SEMAPHORE_B, HWVALUE(C0B5, SET_SEMAPHORE_B, LOWER, NvOffset_LO32(gpu_va)), + SET_SEMAPHORE_PAYLOAD, payload); + + launch_dma_plc_mode = gpu->parent->ce_hal->plc_mode(); + + NV_PUSH_1U(C0B5, LAUNCH_DMA, flush_value | + HWCONST(C0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NONE) | + HWCONST(C0B5, LAUNCH_DMA, SEMAPHORE_TYPE, RELEASE_ONE_WORD_SEMAPHORE) | + HWCONST(C0B5, LAUNCH_DMA, SEMAPHORE_REDUCTION, INC) | + HWCONST(C0B5, LAUNCH_DMA, SEMAPHORE_REDUCTION_SIGN, UNSIGNED) | + HWCONST(C0B5, LAUNCH_DMA, SEMAPHORE_REDUCTION_ENABLE, TRUE) | + launch_dma_plc_mode); +} + +void uvm_hal_pascal_ce_semaphore_timestamp(uvm_push_t *push, NvU64 gpu_va) +{ + uvm_gpu_t *gpu; + NvU32 flush_value; + NvU32 launch_dma_plc_mode; + bool use_flush; + + use_flush = pascal_membar_before_semaphore(push); + + if (use_flush) + flush_value = HWCONST(C0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE); + else + flush_value = HWCONST(C0B5, LAUNCH_DMA, FLUSH_ENABLE, FALSE); + + NV_PUSH_3U(C0B5, SET_SEMAPHORE_A, HWVALUE(C0B5, SET_SEMAPHORE_A, UPPER, NvOffset_HI32(gpu_va)), + SET_SEMAPHORE_B, HWVALUE(C0B5, SET_SEMAPHORE_B, LOWER, NvOffset_LO32(gpu_va)), + SET_SEMAPHORE_PAYLOAD, 0xdeadbeef); + + gpu = uvm_push_get_gpu(push); + launch_dma_plc_mode = gpu->parent->ce_hal->plc_mode(); + + NV_PUSH_1U(C0B5, LAUNCH_DMA, flush_value | + HWCONST(C0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NONE) | + HWCONST(C0B5, LAUNCH_DMA, SEMAPHORE_TYPE, RELEASE_FOUR_WORD_SEMAPHORE) | + launch_dma_plc_mode); +} + diff --git a/kernel-open/nvidia-uvm/uvm_pascal_fault_buffer.c b/kernel-open/nvidia-uvm/uvm_pascal_fault_buffer.c new file mode 100644 index 000000000..020f5ce18 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pascal_fault_buffer.c @@ -0,0 +1,301 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_hal.h" +#include "uvm_push.h" +#include "hwref/pascal/gp100/dev_fault.h" +#include "clb069.h" +#include "uvm_pascal_fault_buffer.h" + +typedef struct { + NvU8 bufferEntry[NVB069_FAULT_BUF_SIZE]; +} fault_buffer_entry_b069_t; + +void uvm_hal_pascal_clear_replayable_faults(uvm_parent_gpu_t *parent_gpu, NvU32 get) +{ + // No-op, this function is only used by pulse-based interrupt GPUs. +} + +void uvm_hal_pascal_enable_replayable_faults(uvm_parent_gpu_t *parent_gpu) +{ + volatile NvU32 *reg; + NvU32 mask; + + reg = parent_gpu->fault_buffer_info.rm_info.replayable.pPmcIntrEnSet; + mask = parent_gpu->fault_buffer_info.rm_info.replayable.replayableFaultMask; + + UVM_GPU_WRITE_ONCE(*reg, mask); +} + +void uvm_hal_pascal_disable_replayable_faults(uvm_parent_gpu_t *parent_gpu) +{ + volatile NvU32 *reg; + NvU32 mask; + + reg = parent_gpu->fault_buffer_info.rm_info.replayable.pPmcIntrEnClear; + mask = parent_gpu->fault_buffer_info.rm_info.replayable.replayableFaultMask; + + UVM_GPU_WRITE_ONCE(*reg, mask); +} + +NvU32 uvm_hal_pascal_fault_buffer_read_put(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 put = UVM_GPU_READ_ONCE(*parent_gpu->fault_buffer_info.rm_info.replayable.pFaultBufferPut); + UVM_ASSERT(put < parent_gpu->fault_buffer_info.replayable.max_faults); + + return put; +} + +NvU32 uvm_hal_pascal_fault_buffer_read_get(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 get = UVM_GPU_READ_ONCE(*parent_gpu->fault_buffer_info.rm_info.replayable.pFaultBufferGet); + UVM_ASSERT(get < parent_gpu->fault_buffer_info.replayable.max_faults); + + return get; +} + +void uvm_hal_pascal_fault_buffer_write_get(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + UVM_ASSERT(index < parent_gpu->fault_buffer_info.replayable.max_faults); + + UVM_GPU_WRITE_ONCE(*parent_gpu->fault_buffer_info.rm_info.replayable.pFaultBufferGet, index); +} + +static uvm_fault_access_type_t get_fault_access_type(const NvU32 *fault_entry) +{ + NvU32 hw_access_type_value = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, ACCESS_TYPE); + + switch (hw_access_type_value) + { + case NV_PFAULT_ACCESS_TYPE_READ: + return UVM_FAULT_ACCESS_TYPE_READ; + case NV_PFAULT_ACCESS_TYPE_WRITE: + return UVM_FAULT_ACCESS_TYPE_WRITE; + case NV_PFAULT_ACCESS_TYPE_ATOMIC: + return UVM_FAULT_ACCESS_TYPE_ATOMIC_STRONG; + case NV_PFAULT_ACCESS_TYPE_PREFETCH: + return UVM_FAULT_ACCESS_TYPE_PREFETCH; + } + + UVM_ASSERT_MSG(false, "Invalid fault access type value: %d\n", hw_access_type_value); + + return UVM_FAULT_ACCESS_TYPE_COUNT; +} + +static uvm_fault_type_t get_fault_type(const NvU32 *fault_entry) +{ + NvU32 hw_fault_type_value = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, FAULT_TYPE); + + switch (hw_fault_type_value) + { + case NV_PFAULT_FAULT_TYPE_PDE: + return UVM_FAULT_TYPE_INVALID_PDE; + case NV_PFAULT_FAULT_TYPE_PTE: + return UVM_FAULT_TYPE_INVALID_PTE; + case NV_PFAULT_FAULT_TYPE_RO_VIOLATION: + return UVM_FAULT_TYPE_WRITE; + case NV_PFAULT_FAULT_TYPE_ATOMIC_VIOLATION: + return UVM_FAULT_TYPE_ATOMIC; + + case NV_PFAULT_FAULT_TYPE_PDE_SIZE: + return UVM_FAULT_TYPE_PDE_SIZE; + case NV_PFAULT_FAULT_TYPE_VA_LIMIT_VIOLATION: + return UVM_FAULT_TYPE_VA_LIMIT_VIOLATION; + case NV_PFAULT_FAULT_TYPE_UNBOUND_INST_BLOCK: + return UVM_FAULT_TYPE_UNBOUND_INST_BLOCK; + case NV_PFAULT_FAULT_TYPE_PRIV_VIOLATION: + return UVM_FAULT_TYPE_PRIV_VIOLATION; + case NV_PFAULT_FAULT_TYPE_PITCH_MASK_VIOLATION: + return UVM_FAULT_TYPE_PITCH_MASK_VIOLATION; + case NV_PFAULT_FAULT_TYPE_WORK_CREATION: + return UVM_FAULT_TYPE_WORK_CREATION; + case NV_PFAULT_FAULT_TYPE_UNSUPPORTED_APERTURE: + return UVM_FAULT_TYPE_UNSUPPORTED_APERTURE; + case NV_PFAULT_FAULT_TYPE_COMPRESSION_FAILURE: + return UVM_FAULT_TYPE_COMPRESSION_FAILURE; + case NV_PFAULT_FAULT_TYPE_UNSUPPORTED_KIND: + return UVM_FAULT_TYPE_UNSUPPORTED_KIND; + case NV_PFAULT_FAULT_TYPE_REGION_VIOLATION: + return UVM_FAULT_TYPE_REGION_VIOLATION; + case NV_PFAULT_FAULT_TYPE_POISONED: + return UVM_FAULT_TYPE_POISONED; + } + + UVM_ASSERT_MSG(false, "Invalid fault type value: %d\n", hw_fault_type_value); + + return UVM_FAULT_TYPE_COUNT; +} + +static uvm_fault_client_type_t get_fault_client_type(const NvU32 *fault_entry) +{ + NvU32 hw_client_type_value = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, MMU_CLIENT_TYPE); + + switch (hw_client_type_value) + { + case NV_PFAULT_MMU_CLIENT_TYPE_GPC: + return UVM_FAULT_CLIENT_TYPE_GPC; + case NV_PFAULT_MMU_CLIENT_TYPE_HUB: + return UVM_FAULT_CLIENT_TYPE_HUB; + } + + UVM_ASSERT_MSG(false, "Invalid mmu client type value: %d\n", hw_client_type_value); + + return UVM_FAULT_CLIENT_TYPE_COUNT; +} + +static uvm_aperture_t get_fault_inst_aperture(NvU32 *fault_entry) +{ + NvU32 hw_aperture_value = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, INST_APERTURE); + + switch (hw_aperture_value) + { + case NVB069_FAULT_BUF_ENTRY_INST_APERTURE_VID_MEM: + return UVM_APERTURE_VID; + case NVB069_FAULT_BUF_ENTRY_INST_APERTURE_SYS_MEM_COHERENT: + case NVB069_FAULT_BUF_ENTRY_INST_APERTURE_SYS_MEM_NONCOHERENT: + return UVM_APERTURE_SYS; + } + + UVM_ASSERT_MSG(false, "Invalid inst aperture value: %d\n", hw_aperture_value); + + return UVM_APERTURE_MAX; +} + +static NvU32 *get_fault_buffer_entry(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + fault_buffer_entry_b069_t *buffer_start; + NvU32 *fault_entry; + + UVM_ASSERT(index < parent_gpu->fault_buffer_info.replayable.max_faults); + + buffer_start = (fault_buffer_entry_b069_t *)parent_gpu->fault_buffer_info.rm_info.replayable.bufferAddress; + fault_entry = (NvU32 *)&buffer_start[index]; + + return fault_entry; +} + +void uvm_hal_pascal_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_fault_buffer_entry_t *buffer_entry) +{ + NV_STATUS status; + NvU32 *fault_entry; + NvU64 addr_hi, addr_lo; + NvU64 timestamp_hi, timestamp_lo; + NvU16 gpc_utlb_id; + NvU32 utlb_id; + + BUILD_BUG_ON(NVB069_FAULT_BUF_SIZE > UVM_GPU_MMU_MAX_FAULT_PACKET_SIZE); + status = NV_OK; + + fault_entry = get_fault_buffer_entry(parent_gpu, index); + + // Valid bit must be set before this function is called + UVM_ASSERT(parent_gpu->fault_buffer_hal->entry_is_valid(parent_gpu, index)); + + addr_hi = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, INST_HI); + addr_lo = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, INST_LO); + buffer_entry->instance_ptr.address = addr_lo + (addr_hi << HWSIZE_MW(B069, FAULT_BUF_ENTRY, INST_LO)); + // HW value contains the 4K page number. Shift to build the full address + buffer_entry->instance_ptr.address <<= 12; + + buffer_entry->instance_ptr.aperture = get_fault_inst_aperture(fault_entry); + + addr_hi = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, ADDR_HI); + addr_lo = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, ADDR_LO); + buffer_entry->fault_address = addr_lo + (addr_hi << HWSIZE_MW(B069, FAULT_BUF_ENTRY, ADDR_LO)); + buffer_entry->fault_address = uvm_parent_gpu_canonical_address(parent_gpu, buffer_entry->fault_address); + + timestamp_hi = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, TIMESTAMP_HI); + timestamp_lo = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, TIMESTAMP_LO); + buffer_entry->timestamp = timestamp_lo + (timestamp_hi << HWSIZE_MW(B069, FAULT_BUF_ENTRY, TIMESTAMP_LO)); + + buffer_entry->fault_type = get_fault_type(fault_entry); + + buffer_entry->fault_access_type = get_fault_access_type(fault_entry); + + buffer_entry->fault_source.client_type = get_fault_client_type(fault_entry); + if (buffer_entry->fault_source.client_type == UVM_FAULT_CLIENT_TYPE_HUB) + UVM_ASSERT_MSG(false, "Invalid client type: HUB\n"); + + buffer_entry->fault_source.client_id = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, CLIENT); + BUILD_BUG_ON(sizeof(buffer_entry->fault_source.client_id) * 8 < DRF_SIZE_MW(NVB069_FAULT_BUF_ENTRY_CLIENT)); + + buffer_entry->fault_source.gpc_id = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, GPC_ID); + BUILD_BUG_ON(sizeof(buffer_entry->fault_source.gpc_id) * 8 < DRF_SIZE_MW(NVB069_FAULT_BUF_ENTRY_GPC_ID)); + + gpc_utlb_id = parent_gpu->arch_hal->mmu_client_id_to_utlb_id(buffer_entry->fault_source.client_id); + UVM_ASSERT(gpc_utlb_id < parent_gpu->utlb_per_gpc_count); + + // Compute global uTLB id + utlb_id = buffer_entry->fault_source.gpc_id * parent_gpu->utlb_per_gpc_count + gpc_utlb_id; + UVM_ASSERT(utlb_id < parent_gpu->fault_buffer_info.replayable.utlb_count); + + buffer_entry->fault_source.utlb_id = utlb_id; + + buffer_entry->is_replayable = true; + buffer_entry->is_virtual = true; + buffer_entry->in_protected_mode = false; + buffer_entry->fault_source.mmu_engine_type = UVM_MMU_ENGINE_TYPE_GRAPHICS; + buffer_entry->fault_source.mmu_engine_id = NV_PFAULT_MMU_ENG_ID_GRAPHICS; + buffer_entry->fault_source.ve_id = 0; + + // Automatically clear valid bit for the entry in the fault buffer + uvm_hal_pascal_fault_buffer_entry_clear_valid(parent_gpu, index); +} + +bool uvm_hal_pascal_fault_buffer_entry_is_valid(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + NvU32 *fault_entry; + bool is_valid; + + fault_entry = get_fault_buffer_entry(parent_gpu, index); + + is_valid = READ_HWVALUE_MW(fault_entry, B069, FAULT_BUF_ENTRY, VALID); + + return is_valid; +} + +void uvm_hal_pascal_fault_buffer_entry_clear_valid(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + NvU32 *fault_entry; + + fault_entry = get_fault_buffer_entry(parent_gpu, index); + + WRITE_HWCONST_MW(fault_entry, B069, FAULT_BUF_ENTRY, VALID, FALSE); +} + +NvU32 uvm_hal_pascal_fault_buffer_entry_size(uvm_parent_gpu_t *parent_gpu) +{ + return NVB069_FAULT_BUF_SIZE; +} + +void uvm_hal_pascal_fault_buffer_parse_non_replayable_entry_unsupported(uvm_parent_gpu_t *parent_gpu, + void *fault_packet, + uvm_fault_buffer_entry_t *buffer_entry) +{ + UVM_ASSERT_MSG(false, "fault_buffer_parse_non_replayable_entry called on Pascal GPU\n"); +} diff --git a/kernel-open/nvidia-uvm/uvm_pascal_fault_buffer.h b/kernel-open/nvidia-uvm/uvm_pascal_fault_buffer.h new file mode 100644 index 000000000..23ed67044 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pascal_fault_buffer.h @@ -0,0 +1,56 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_HAL_PASCAL_FAULT_BUFFER_H__ +#define __UVM_HAL_PASCAL_FAULT_BUFFER_H__ + +#include "nvtypes.h" +#include "uvm_common.h" +#include "uvm_gpu.h" + +// There are up to 5 TPCs per GPC in Pascal, and there is 1 LTP uTLB per TPC. Besides, there is one RGG uTLB per GPC. +// Each TPC has a number of clients that can make requests to its uTLB: 1xTPCCS, 1xPE, 2xT1. The client ids are local +// to their GPC and the id mapping is linear across TPCs: +// TPC_n has TPCCS_n, PE_n, T1_p, and T1_q, where p=2*n and q=p+1. +// +// NV_PFAULT_CLIENT_GPC_LTP_UTLB_n and NV_PFAULT_CLIENT_GPC_RGG_UTLB enums can be ignored. These will never be reported +// in a fault message, and should never be used in an invalidate. Therefore, we define our own values. +typedef enum { + UVM_PASCAL_GPC_UTLB_ID_RGG = 0, + UVM_PASCAL_GPC_UTLB_ID_LTP0 = 1, + UVM_PASCAL_GPC_UTLB_ID_LTP1 = 2, + UVM_PASCAL_GPC_UTLB_ID_LTP2 = 3, + UVM_PASCAL_GPC_UTLB_ID_LTP3 = 4, + UVM_PASCAL_GPC_UTLB_ID_LTP4 = 5, + + UVM_PASCAL_GPC_UTLB_COUNT, +} uvm_pascal_gpc_utlb_id_t; + +static NvU32 uvm_pascal_get_utlbs_per_gpc(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 utlbs = parent_gpu->rm_info.maxTpcPerGpcCount + 1; + UVM_ASSERT(utlbs <= UVM_PASCAL_GPC_UTLB_COUNT); + return utlbs; +} + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_pascal_host.c b/kernel-open/nvidia-uvm/uvm_pascal_host.c new file mode 100644 index 000000000..7c57aa0c3 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pascal_host.c @@ -0,0 +1,358 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_hal_types.h" +#include "uvm_hal.h" +#include "uvm_push.h" +#include "uvm_channel.h" +#include "clc06f.h" +#include "clc076.h" + +void uvm_hal_pascal_host_membar_sys(uvm_push_t *push) +{ + NV_PUSH_4U(C06F, MEM_OP_A, 0, + MEM_OP_B, 0, + MEM_OP_C, HWCONST(C06F, MEM_OP_C, MEMBAR_TYPE, SYS_MEMBAR), + MEM_OP_D, HWCONST(C06F, MEM_OP_D, OPERATION, MEMBAR)); +} + +void uvm_hal_pascal_host_membar_gpu(uvm_push_t *push) +{ + NV_PUSH_4U(C06F, MEM_OP_A, 0, + MEM_OP_B, 0, + MEM_OP_C, HWCONST(C06F, MEM_OP_C, MEMBAR_TYPE, MEMBAR), + MEM_OP_D, HWCONST(C06F, MEM_OP_D, OPERATION, MEMBAR)); +} + +void uvm_hal_pascal_host_tlb_invalidate_all(uvm_push_t *push, uvm_gpu_phys_address_t pdb, NvU32 depth, uvm_membar_t membar) +{ + NvU32 aperture_value; + NvU32 page_table_level; + NvU32 pdb_lo; + NvU32 pdb_hi; + NvU32 ack_value = 0; + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + + if (pdb.aperture == UVM_APERTURE_VID) + aperture_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, VID_MEM); + else + aperture_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + + pdb_lo = pdb.address & HWMASK(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + pdb_hi = pdb.address >> HWSIZE(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + + // PDE3 is the highest level on Pascal, see the comment in uvm_pascal_mmu.c for details. + UVM_ASSERT_MSG(depth < NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3, "depth %u", depth); + page_table_level = NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 - depth; + + if (membar != UVM_MEMBAR_NONE) { + // If a GPU or SYS membar is needed, ACK_TYPE needs to be set to + // GLOBALLY to make sure all the pending accesses can be picked up by + // the membar. + ack_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_ACK_TYPE, GLOBALLY); + } + + NV_PUSH_4U(C06F, MEM_OP_A, HWCONST(C06F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS), + MEM_OP_B, 0, + MEM_OP_C, HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE) | + HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_REPLAY, NONE) | + HWVALUE(C06F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, page_table_level) | + aperture_value | + ack_value, + MEM_OP_D, HWCONST(C06F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE) | + HWVALUE(C06F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); + + uvm_hal_tlb_invalidate_membar(push, membar); +} + +void uvm_hal_pascal_host_tlb_invalidate_va(uvm_push_t *push, uvm_gpu_phys_address_t pdb, NvU32 depth, NvU64 base, NvU64 size, NvU32 page_size, uvm_membar_t membar) +{ + NvU32 aperture_value; + NvU32 page_table_level; + NvU32 pdb_lo; + NvU32 pdb_hi; + NvU32 ack_value = 0; + NvU64 va; + NvU32 va_lo; + NvU32 va_hi; + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + + if (pdb.aperture == UVM_APERTURE_VID) + aperture_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, VID_MEM); + else + aperture_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + + pdb_lo = pdb.address & HWMASK(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + pdb_hi = pdb.address >> HWSIZE(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + + // PDE3 is the highest level on Pascal, see the comment in uvm_pascal_mmu.c for details. + UVM_ASSERT_MSG(depth < NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3, "depth %u", depth); + page_table_level = NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 - depth; + + if (membar != UVM_MEMBAR_NONE) { + // If a GPU or SYS membar is needed, ACK_TYPE needs to be set to + // GLOBALLY to make sure all the pending accesses can be picked up by + // the membar. + ack_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_ACK_TYPE, GLOBALLY); + } + + UVM_ASSERT_MSG(IS_ALIGNED(page_size, 1 << 12), "page_size 0x%x\n", page_size); + UVM_ASSERT_MSG(IS_ALIGNED(base, page_size), "base 0x%llx page_size 0x%x\n", base, page_size); + UVM_ASSERT_MSG(IS_ALIGNED(size, page_size), "size 0x%llx page_size 0x%x\n", size, page_size); + UVM_ASSERT_MSG(size > 0, "size 0x%llx\n", size); + + base >>= 12; + size >>= 12; + page_size >>= 12; + + for (va = base; va < base + size; va += page_size) { + va_lo = va & HWMASK(C06F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + va_hi = va >> HWSIZE(C06F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + NV_PUSH_4U(C06F, MEM_OP_A, HWCONST(C06F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS) | + HWVALUE(C06F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO, va_lo), + MEM_OP_B, HWVALUE(C06F, MEM_OP_B, TLB_INVALIDATE_TARGET_ADDR_HI, va_hi), + MEM_OP_C, HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE) | + HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_REPLAY, NONE) | + HWVALUE(C06F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, page_table_level) | + aperture_value | + ack_value, + MEM_OP_D, HWCONST(C06F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE_TARGETED) | + HWVALUE(C06F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); + } + + uvm_hal_tlb_invalidate_membar(push, membar); +} + +void uvm_hal_pascal_host_tlb_invalidate_test(uvm_push_t *push, uvm_gpu_phys_address_t pdb, + UVM_TEST_INVALIDATE_TLB_PARAMS *params) +{ + NvU32 ack_value = 0; + NvU32 invalidate_gpc_value = 0; + NvU32 aperture_value = 0; + NvU32 pdb_lo = 0; + NvU32 pdb_hi = 0; + NvU32 page_table_level = 0; + uvm_membar_t membar; + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + if (pdb.aperture == UVM_APERTURE_VID) + aperture_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, VID_MEM); + else + aperture_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + + pdb_lo = pdb.address & HWMASK(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + pdb_hi = pdb.address >> HWSIZE(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + + if (params->page_table_level != UvmInvalidatePageTableLevelAll) { + // PDE3 is the highest level on Pascal, see the comment in + // uvm_pascal_mmu.c for details. + page_table_level = min((NvU32)UvmInvalidatePageTableLevelPde3, params->page_table_level) - 1; + } + + if (params->membar != UvmInvalidateTlbMemBarNone) { + // If a GPU or SYS membar is needed, ack_value needs to be set to + // GLOBALLY to make sure all the pending accesses can be picked up by + // the membar. + ack_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_ACK_TYPE, GLOBALLY); + } + + if (params->disable_gpc_invalidate) + invalidate_gpc_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_GPC, DISABLE); + else + invalidate_gpc_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE); + + if (params->target_va_mode == UvmTargetVaModeTargeted) { + NvU64 va = params->va >> 12; + + NvU32 va_lo = va & HWMASK(C06F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + NvU32 va_hi = va >> HWSIZE(C06F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + NV_PUSH_4U(C06F, MEM_OP_A, HWCONST(C06F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS) | + HWVALUE(C06F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO, va_lo), + MEM_OP_B, HWVALUE(C06F, MEM_OP_B, TLB_INVALIDATE_TARGET_ADDR_HI, va_hi), + MEM_OP_C, HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_REPLAY, NONE) | + HWVALUE(C06F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, page_table_level) | + HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + invalidate_gpc_value | + aperture_value | + ack_value, + MEM_OP_D, HWCONST(C06F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE_TARGETED) | + HWVALUE(C06F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); + } + else { + NV_PUSH_4U(C06F, MEM_OP_A, HWCONST(C06F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS), + MEM_OP_B, 0, + MEM_OP_C, HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_REPLAY, NONE) | + HWVALUE(C06F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, page_table_level) | + HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + invalidate_gpc_value | + aperture_value | + ack_value, + MEM_OP_D, HWCONST(C06F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE) | + HWVALUE(C06F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); + } + + if (params->membar == UvmInvalidateTlbMemBarSys) + membar = UVM_MEMBAR_SYS; + else if (params->membar == UvmInvalidateTlbMemBarLocal) + membar = UVM_MEMBAR_GPU; + else + membar = UVM_MEMBAR_NONE; + + uvm_hal_tlb_invalidate_membar(push, membar); +} + +void uvm_hal_pascal_replay_faults(uvm_push_t *push, uvm_fault_replay_type_t type) +{ + NvU32 aperture_value; + NvU32 replay_value = 0; + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + uvm_gpu_phys_address_t pdb; + NvU32 va_lo = 0; + NvU32 va_hi = 0; + NvU32 pdb_lo; + NvU32 pdb_hi; + + // MMU will not forward the replay to the uTLBs if the PDB is not in the MMU PDB_ID cache. If + // we have stale entries filling the fault buffer and a context switch has happened, the new + // context may have faulted without writing its entries into the buffer. To force a replay + // regardless of which faults happen to be in the uTLB replay lists, we use the PDB of the + // channel used to push the replay, which is guaranteed to be in the cache as it is required + // to be resident for the channel to push the method. In order to minimize the performance hit + // of the invalidation, we just invalidate PTEs for address 0x0. + UVM_ASSERT_MSG(type == UVM_FAULT_REPLAY_TYPE_START || type == UVM_FAULT_REPLAY_TYPE_START_ACK_ALL, + "replay_type: %u\n", type); + pdb = uvm_page_tree_pdb(&gpu->address_space_tree)->addr; + + if (pdb.aperture == UVM_APERTURE_VID) + aperture_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, VID_MEM); + else + aperture_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + + pdb_lo = pdb.address & HWMASK(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + pdb_hi = pdb.address >> HWSIZE(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + + if (type == UVM_FAULT_REPLAY_TYPE_START) + replay_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_REPLAY, START); + else if (type == UVM_FAULT_REPLAY_TYPE_START_ACK_ALL) + replay_value = HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_REPLAY, START_ACK_ALL); + + NV_PUSH_4U(C06F, MEM_OP_A, HWCONST(C06F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS) | + HWVALUE(C06F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO, va_lo), + MEM_OP_B, HWVALUE(C06F, MEM_OP_B, TLB_INVALIDATE_TARGET_ADDR_HI, va_hi), + MEM_OP_C, HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C06F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE) | + HWCONST(C06F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, PTE_ONLY) | + aperture_value | + replay_value, + MEM_OP_D, HWCONST(C06F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE_TARGETED) | + HWVALUE(C06F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); +} + +static NvU32 instance_ptr_aperture_type_to_hw_value(uvm_aperture_t aperture) +{ + switch (aperture) + { + case UVM_APERTURE_SYS: + return HWCONST(C076, FAULT_CANCEL_A, INST_APERTURE, SYS_MEM_COHERENT); + case UVM_APERTURE_VID: + return HWCONST(C076, FAULT_CANCEL_A, INST_APERTURE, VID_MEM); + default: + UVM_ASSERT_MSG(false, "Invalid aperture_type %d\n", aperture); + } + + return 0; +} + +void uvm_hal_pascal_host_init(uvm_push_t *push) +{ + if (uvm_channel_is_ce(push->channel)) + NV_PUSH_1U(C076, SET_OBJECT, GP100_UVM_SW); +} + +static void instance_ptr_address_to_hw_values(NvU64 instance_ptr_address, + NvU32 *instance_ptr_lo, + NvU32 *instance_ptr_hi) +{ + // instance_ptr must be 4K aligned + UVM_ASSERT_MSG(IS_ALIGNED(instance_ptr_address, 1 << 12), "instance_ptr 0x%llx\n", instance_ptr_address); + instance_ptr_address >>= 12; + + *instance_ptr_lo = instance_ptr_address & HWMASK(C076, FAULT_CANCEL_A, INST_LOW); + *instance_ptr_hi = instance_ptr_address >> HWSIZE(C076, FAULT_CANCEL_A, INST_LOW); +} + +void uvm_hal_pascal_cancel_faults_global(uvm_push_t *push, uvm_gpu_phys_address_t instance_ptr) +{ + NvU32 instance_ptr_lo, instance_ptr_hi; + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + + // Global cancellations affect all MIG instances, not only the one that + // originated the fault to be cancelled + UVM_ASSERT(!gpu->parent->smc.enabled); + + instance_ptr_address_to_hw_values(instance_ptr.address, &instance_ptr_lo, &instance_ptr_hi); + + NV_PUSH_3U(C076, FAULT_CANCEL_A, HWVALUE(C076, FAULT_CANCEL_A, INST_LOW, instance_ptr_lo) | + instance_ptr_aperture_type_to_hw_value(instance_ptr.aperture), + FAULT_CANCEL_B, HWVALUE(C076, FAULT_CANCEL_B, INST_HI, instance_ptr_hi), + FAULT_CANCEL_C, HWCONST(C076, FAULT_CANCEL_C, MODE, GLOBAL)); +} + +void uvm_hal_pascal_cancel_faults_targeted(uvm_push_t *push, + uvm_gpu_phys_address_t instance_ptr, + NvU32 gpc_id, + NvU32 client_id) +{ + NvU32 instance_ptr_lo, instance_ptr_hi; + + instance_ptr_address_to_hw_values(instance_ptr.address, &instance_ptr_lo, &instance_ptr_hi); + + NV_PUSH_3U(C076, FAULT_CANCEL_A, HWVALUE(C076, FAULT_CANCEL_A, INST_LOW, instance_ptr_lo) | + instance_ptr_aperture_type_to_hw_value(instance_ptr.aperture), + FAULT_CANCEL_B, HWVALUE(C076, FAULT_CANCEL_B, INST_HI, instance_ptr_hi), + FAULT_CANCEL_C, HWVALUE(C076, FAULT_CANCEL_C, CLIENT_ID, client_id) | + HWVALUE(C076, FAULT_CANCEL_C, GPC_ID, gpc_id) | + HWCONST(C076, FAULT_CANCEL_C, MODE, TARGETED)); +} diff --git a/kernel-open/nvidia-uvm/uvm_pascal_mmu.c b/kernel-open/nvidia-uvm/uvm_pascal_mmu.c new file mode 100644 index 000000000..8b17319e9 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pascal_mmu.c @@ -0,0 +1,439 @@ +/******************************************************************************* + Copyright (c) 2015-2020 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +// For Pascal, UVM page tree 'depth' maps to hardware as follows: +// +// UVM depth HW level VA bits +// 0 PDE3 48:47 +// 1 PDE2 46:38 +// 2 PDE1 37:29 +// 3 PDE0 (dual 64k/4k PDE, or 2M PTE) 28:21 +// 4 PTE_64K / PTE_4K 20:16 / 20:12 + +#include "uvm_types.h" +#include "uvm_forward_decl.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_mmu.h" +#include "uvm_push_macros.h" +#include "uvm_pascal_fault_buffer.h" +#include "hwref/pascal/gp100/dev_fault.h" +#include "hwref/pascal/gp100/dev_fb.h" +#include "hwref/pascal/gp100/dev_mmu.h" + +#define MMU_BIG 0 +#define MMU_SMALL 1 + +static NvU32 entries_per_index_pascal(NvU32 depth) +{ + UVM_ASSERT(depth < 5); + if (depth == 3) + return 2; + return 1; +} + +static NvLength entry_offset_pascal(NvU32 depth, NvU32 page_size) +{ + UVM_ASSERT(depth < 5); + if (page_size == UVM_PAGE_SIZE_4K && depth == 3) + return MMU_SMALL; + return MMU_BIG; +} + +static NvU64 single_pde_pascal(uvm_mmu_page_table_alloc_t *phys_alloc) +{ + NvU64 pde_bits = 0; + + if (phys_alloc != NULL) { + NvU64 address = phys_alloc->addr.address >> NV_MMU_VER2_PDE_ADDRESS_SHIFT; + pde_bits |= HWCONST64(_MMU_VER2, PDE, IS_PDE, TRUE) | + HWCONST64(_MMU_VER2, PDE, VOL, TRUE); + + switch (phys_alloc->addr.aperture) { + case UVM_APERTURE_SYS: + pde_bits |= HWCONST64(_MMU_VER2, PDE, APERTURE, SYSTEM_COHERENT_MEMORY) | + HWVALUE64(_MMU_VER2, PDE, ADDRESS_SYS, address); + break; + case UVM_APERTURE_VID: + pde_bits |= HWCONST64(_MMU_VER2, PDE, APERTURE, VIDEO_MEMORY) | + HWVALUE64(_MMU_VER2, PDE, ADDRESS_VID, address); + break; + default: + UVM_ASSERT_MSG(0, "Invalid aperture: %d\n", phys_alloc->addr.aperture); + break; + } + } + + return pde_bits; +} + +static NvU64 big_half_pde_pascal(uvm_mmu_page_table_alloc_t *phys_alloc) +{ + NvU64 pde_bits = 0; + + if (phys_alloc != NULL) { + NvU64 address = phys_alloc->addr.address >> NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SHIFT; + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, VOL_BIG, TRUE); + + switch (phys_alloc->addr.aperture) { + case UVM_APERTURE_SYS: + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, APERTURE_BIG, SYSTEM_COHERENT_MEMORY) | + HWVALUE64(_MMU_VER2, DUAL_PDE, ADDRESS_BIG_SYS, address); + break; + case UVM_APERTURE_VID: + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, APERTURE_BIG, VIDEO_MEMORY) | + HWVALUE64(_MMU_VER2, DUAL_PDE, ADDRESS_BIG_VID, address); + break; + default: + UVM_ASSERT_MSG(0, "Invalid big aperture %d\n", phys_alloc->addr.aperture); + break; + } + } + + return pde_bits; +} + +static NvU64 small_half_pde_pascal(uvm_mmu_page_table_alloc_t *phys_alloc) +{ + NvU64 pde_bits = 0; + + if (phys_alloc != NULL) { + NvU64 address = phys_alloc->addr.address >> NV_MMU_VER2_DUAL_PDE_ADDRESS_SHIFT; + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, VOL_SMALL, TRUE); + + switch (phys_alloc->addr.aperture) { + case UVM_APERTURE_SYS: + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, APERTURE_SMALL, SYSTEM_COHERENT_MEMORY); + pde_bits |= HWVALUE64(_MMU_VER2, DUAL_PDE, ADDRESS_SMALL_SYS, address); + break; + case UVM_APERTURE_VID: + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, APERTURE_SMALL, VIDEO_MEMORY); + pde_bits |= HWVALUE64(_MMU_VER2, DUAL_PDE, ADDRESS_SMALL_VID, address); + break; + default: + UVM_ASSERT_MSG(0, "Invalid small aperture %d\n", phys_alloc->addr.aperture); + break; + } + } + + return pde_bits; +} + +static void make_pde_pascal(void *entry, uvm_mmu_page_table_alloc_t **phys_allocs, NvU32 depth) +{ + NvU32 entry_count = entries_per_index_pascal(depth); + NvU64 *entry_bits = (NvU64 *)entry; + + if (entry_count == 1) { + *entry_bits = single_pde_pascal(*phys_allocs); + } + else if (entry_count == 2) { + entry_bits[MMU_BIG] = big_half_pde_pascal(phys_allocs[MMU_BIG]); + entry_bits[MMU_SMALL] = small_half_pde_pascal(phys_allocs[MMU_SMALL]); + + // This entry applies to the whole dual PDE but is stored in the lower bits + entry_bits[MMU_BIG] |= HWCONST64(_MMU_VER2, DUAL_PDE, IS_PDE, TRUE); + } + else { + UVM_ASSERT_MSG(0, "Invalid number of entries per index: %d\n", entry_count); + } +} + +static NvLength entry_size_pascal(NvU32 depth) +{ + UVM_ASSERT(depth < 5); + if (depth == 3) + return 16; + else + return 8; +} + +static NvU32 index_bits_pascal(NvU32 depth, NvU32 page_size) +{ + static const NvU32 bit_widths[] = {2, 9, 9, 8}; + // some code paths keep on querying this until they get a 0, meaning only the page offset remains. + UVM_ASSERT(depth < 5); + if (depth < 4) { + return bit_widths[depth]; + } + else if (depth == 4) { + switch (page_size) { + case UVM_PAGE_SIZE_4K: + return 9; + case UVM_PAGE_SIZE_64K: + return 5; + default: + break; + } + } + return 0; +} + +static NvU32 num_va_bits_pascal(void) +{ + return 49; +} + +static NvLength allocation_size_pascal(NvU32 depth, NvU32 page_size) +{ + UVM_ASSERT(depth < 5); + if (depth == 4 && page_size == UVM_PAGE_SIZE_64K) + return 256; + // depth 0 requires only a 32 byte allocation, but it must be 4k aligned + return 4096; +} + +static NvU32 page_table_depth_pascal(NvU32 page_size) +{ + if (page_size == UVM_PAGE_SIZE_2M) + return 3; + else + return 4; +} + +static NvU32 page_sizes_pascal(void) +{ + return UVM_PAGE_SIZE_2M | UVM_PAGE_SIZE_64K | UVM_PAGE_SIZE_4K; +} + +static NvU64 unmapped_pte_pascal(NvU32 page_size) +{ + // Setting the privilege bit on an otherwise-zeroed big PTE causes the + // corresponding 4k PTEs to be ignored. This allows the invalidation of a + // mixed PDE range to be much faster. + if (page_size != UVM_PAGE_SIZE_64K) + return 0; + + // When VALID == 0, MMU still reads the VOL and PRIV fields. VOL == 1 + // indicates that the PTE is sparse, so make sure we don't use it. + return HWCONST64(_MMU_VER2, PTE, VALID, FALSE) | + HWCONST64(_MMU_VER2, PTE, VOL, FALSE) | + HWCONST64(_MMU_VER2, PTE, PRIVILEGE, TRUE); +} + +static NvU64 make_pte_pascal(uvm_aperture_t aperture, NvU64 address, uvm_prot_t prot, NvU64 flags) +{ + NvU8 aperture_bits = 0; + NvU64 pte_bits = 0; + + UVM_ASSERT(prot != UVM_PROT_NONE); + UVM_ASSERT((flags & ~UVM_MMU_PTE_FLAGS_MASK) == 0); + + // valid 0:0 + pte_bits |= HWCONST64(_MMU_VER2, PTE, VALID, TRUE); + + // aperture 2:1 + if (aperture == UVM_APERTURE_SYS) + aperture_bits = NV_MMU_VER2_PTE_APERTURE_SYSTEM_COHERENT_MEMORY; + else if (aperture == UVM_APERTURE_VID) + aperture_bits = NV_MMU_VER2_PTE_APERTURE_VIDEO_MEMORY; + else if (aperture >= UVM_APERTURE_PEER_0 && aperture <= UVM_APERTURE_PEER_7) + aperture_bits = NV_MMU_VER2_PTE_APERTURE_PEER_MEMORY; + else + UVM_ASSERT_MSG(0, "Invalid aperture: %d\n", aperture); + + pte_bits |= HWVALUE64(_MMU_VER2, PTE, APERTURE, aperture_bits); + + // volatile 3:3 + if (flags & UVM_MMU_PTE_FLAGS_CACHED) + pte_bits |= HWCONST64(_MMU_VER2, PTE, VOL, FALSE); + else + pte_bits |= HWCONST64(_MMU_VER2, PTE, VOL, TRUE); + + // encrypted 4:4 + pte_bits |= HWCONST64(_MMU_VER2, PTE, ENCRYPTED, FALSE); + + // privilege 5:5 + pte_bits |= HWCONST64(_MMU_VER2, PTE, PRIVILEGE, FALSE); + + // read only 6:6 + if (prot == UVM_PROT_READ_ONLY) + pte_bits |= HWCONST64(_MMU_VER2, PTE, READ_ONLY, TRUE); + else + pte_bits |= HWCONST64(_MMU_VER2, PTE, READ_ONLY, FALSE); + + // atomic disable 7:7 + if (prot == UVM_PROT_READ_WRITE_ATOMIC) + pte_bits |= HWCONST64(_MMU_VER2, PTE, ATOMIC_DISABLE, FALSE); + else + pte_bits |= HWCONST64(_MMU_VER2, PTE, ATOMIC_DISABLE, TRUE); + + address >>= NV_MMU_VER2_PTE_ADDRESS_SHIFT; + if (aperture == UVM_APERTURE_SYS) { + // sys address 53:8 + pte_bits |= HWVALUE64(_MMU_VER2, PTE, ADDRESS_SYS, address); + } + else { + // vid address 32:8 + pte_bits |= HWVALUE64(_MMU_VER2, PTE, ADDRESS_VID, address); + + + // peer id 35:33 + if (aperture != UVM_APERTURE_VID) + pte_bits |= HWVALUE64(_MMU_VER2, PTE, ADDRESS_VID_PEER, UVM_APERTURE_PEER_ID(aperture)); + + // comptagline 53:36 + pte_bits |= HWVALUE64(_MMU_VER2, PTE, COMPTAGLINE, 0); + } + + pte_bits |= HWVALUE64(_MMU_VER2, PTE, KIND, NV_MMU_PTE_KIND_PITCH); + + return pte_bits; +} + +static NvU64 make_sked_reflected_pte_pascal(void) +{ + NvU64 pte_bits = 0; + + pte_bits |= HWCONST64(_MMU_VER2, PTE, VALID, TRUE); + pte_bits |= HWVALUE64(_MMU_VER2, PTE, KIND, NV_MMU_PTE_KIND_SMSKED_MESSAGE); + + return pte_bits; +} + +static NvU64 make_sparse_pte_pascal(void) +{ + return HWCONST64(_MMU_VER2, PTE, VALID, FALSE) | + HWCONST64(_MMU_VER2, PTE, VOL, TRUE); +} + +static NvU64 poisoned_pte_pascal(void) +{ + // An invalid PTE won't be fatal from faultable units like SM, which is the + // most likely source of bad PTE accesses. + + // Engines with priv accesses won't fault on the priv PTE, so add a backup + // mechanism using an impossible memory address. MMU will trigger an + // interrupt when it detects a bad physical address. + // + // This address has to fit within 37 bits (max address width of vidmem) and + // be aligned to page_size. + NvU64 phys_addr = 0x1bad000000ULL; + + NvU64 pte_bits = make_pte_pascal(UVM_APERTURE_VID, phys_addr, UVM_PROT_READ_ONLY, UVM_MMU_PTE_FLAGS_NONE); + return WRITE_HWCONST64(pte_bits, _MMU_VER2, PTE, PRIVILEGE, TRUE); +} + +static uvm_mmu_mode_hal_t pascal_mmu_mode_hal = +{ + .make_pte = make_pte_pascal, + .make_sked_reflected_pte = make_sked_reflected_pte_pascal, + .make_sparse_pte = make_sparse_pte_pascal, + .unmapped_pte = unmapped_pte_pascal, + .poisoned_pte = poisoned_pte_pascal, + .make_pde = make_pde_pascal, + .entry_size = entry_size_pascal, + .index_bits = index_bits_pascal, + .entries_per_index = entries_per_index_pascal, + .entry_offset = entry_offset_pascal, + .num_va_bits = num_va_bits_pascal, + .allocation_size = allocation_size_pascal, + .page_table_depth = page_table_depth_pascal, + .page_sizes = page_sizes_pascal +}; + +uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_pascal(NvU32 big_page_size) +{ + UVM_ASSERT(big_page_size == UVM_PAGE_SIZE_64K || big_page_size == UVM_PAGE_SIZE_128K); + + // TODO: Bug 1789555: RM should reject the creation of GPU VA spaces with + // 128K big page size for Pascal+ GPUs + if (big_page_size == UVM_PAGE_SIZE_128K) + return NULL; + + return &pascal_mmu_mode_hal; +} + +void uvm_hal_pascal_mmu_enable_prefetch_faults(uvm_parent_gpu_t *parent_gpu) +{ + volatile NvU32 *prefetch_control; + NvU32 prefetch_control_value; + + prefetch_control = parent_gpu->fault_buffer_info.rm_info.replayable.pPrefetchCtrl; + + prefetch_control_value = UVM_GPU_READ_ONCE(*prefetch_control); + prefetch_control_value = WRITE_HWCONST(prefetch_control_value, _PFB_PRI_MMU_PAGE, FAULT_CTRL, PRF_FILTER, SEND_ALL); + UVM_GPU_WRITE_ONCE(*prefetch_control, prefetch_control_value); +} + +void uvm_hal_pascal_mmu_disable_prefetch_faults(uvm_parent_gpu_t *parent_gpu) +{ + volatile NvU32 *prefetch_control; + NvU32 prefetch_control_value; + + prefetch_control = parent_gpu->fault_buffer_info.rm_info.replayable.pPrefetchCtrl; + + prefetch_control_value = UVM_GPU_READ_ONCE(*prefetch_control); + prefetch_control_value = WRITE_HWCONST(prefetch_control_value, _PFB_PRI_MMU_PAGE, FAULT_CTRL, PRF_FILTER, SEND_NONE); + UVM_GPU_WRITE_ONCE(*prefetch_control, prefetch_control_value); +} + +NvU16 uvm_hal_pascal_mmu_client_id_to_utlb_id(NvU16 client_id) +{ + switch (client_id) { + case NV_PFAULT_CLIENT_GPC_RAST: + case NV_PFAULT_CLIENT_GPC_GCC: + case NV_PFAULT_CLIENT_GPC_GPCCS: + return UVM_PASCAL_GPC_UTLB_ID_RGG; + case NV_PFAULT_CLIENT_GPC_PE_0: + case NV_PFAULT_CLIENT_GPC_TPCCS_0: + case NV_PFAULT_CLIENT_GPC_L1_0: + case NV_PFAULT_CLIENT_GPC_T1_0: + case NV_PFAULT_CLIENT_GPC_L1_1: + case NV_PFAULT_CLIENT_GPC_T1_1: + return UVM_PASCAL_GPC_UTLB_ID_LTP0; + case NV_PFAULT_CLIENT_GPC_PE_1: + case NV_PFAULT_CLIENT_GPC_TPCCS_1: + case NV_PFAULT_CLIENT_GPC_L1_2: + case NV_PFAULT_CLIENT_GPC_T1_2: + case NV_PFAULT_CLIENT_GPC_L1_3: + case NV_PFAULT_CLIENT_GPC_T1_3: + return UVM_PASCAL_GPC_UTLB_ID_LTP1; + case NV_PFAULT_CLIENT_GPC_PE_2: + case NV_PFAULT_CLIENT_GPC_TPCCS_2: + case NV_PFAULT_CLIENT_GPC_L1_4: + case NV_PFAULT_CLIENT_GPC_T1_4: + case NV_PFAULT_CLIENT_GPC_L1_5: + case NV_PFAULT_CLIENT_GPC_T1_5: + return UVM_PASCAL_GPC_UTLB_ID_LTP2; + case NV_PFAULT_CLIENT_GPC_PE_3: + case NV_PFAULT_CLIENT_GPC_TPCCS_3: + case NV_PFAULT_CLIENT_GPC_L1_6: + case NV_PFAULT_CLIENT_GPC_T1_6: + case NV_PFAULT_CLIENT_GPC_L1_7: + case NV_PFAULT_CLIENT_GPC_T1_7: + return UVM_PASCAL_GPC_UTLB_ID_LTP3; + case NV_PFAULT_CLIENT_GPC_PE_4: + case NV_PFAULT_CLIENT_GPC_TPCCS_4: + case NV_PFAULT_CLIENT_GPC_L1_8: + case NV_PFAULT_CLIENT_GPC_T1_8: + case NV_PFAULT_CLIENT_GPC_L1_9: + case NV_PFAULT_CLIENT_GPC_T1_9: + return UVM_PASCAL_GPC_UTLB_ID_LTP4; + default: + UVM_ASSERT_MSG(false, "Invalid client value: 0x%x\n", client_id); + } + + return 0; +} diff --git a/kernel-open/nvidia-uvm/uvm_peer_identity_mappings_test.c b/kernel-open/nvidia-uvm/uvm_peer_identity_mappings_test.c new file mode 100644 index 000000000..ed3c8e2c7 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_peer_identity_mappings_test.c @@ -0,0 +1,172 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_test.h" +#include "uvm_va_space.h" +#include "uvm_mem.h" +#include "uvm_push.h" +#include "uvm_hal.h" + +#define MEM_ALLOCATION_SIZE (4 * 1024 * 1024) + +static NV_STATUS try_peer_access_remote_gpu_memory(uvm_gpu_t *local_gpu, uvm_gpu_t *peer_gpu) +{ + NV_STATUS status = NV_OK; + uvm_mem_t *vidmem = NULL; + uvm_mem_t *sysmem = NULL; + uvm_push_t push; + uvm_gpu_address_t local_gpu_sysmem = {0}; + uvm_gpu_address_t peer_gpu_sysmem = {0}; + uvm_gpu_address_t peer_gpu_vidmem = {0}; + void *cpu_va = NULL; + volatile NvU32 *cpu_array; + NvU32 i; + + // allocate CPU memory + status = uvm_mem_alloc_sysmem_and_map_cpu_kernel(MEM_ALLOCATION_SIZE, current->mm, &sysmem); + TEST_CHECK_GOTO(status == NV_OK, cleanup); + + // get CPU address + cpu_va = uvm_mem_get_cpu_addr_kernel(sysmem); + TEST_CHECK_GOTO(cpu_va != 0, cleanup); + cpu_array = (volatile NvU32 *)cpu_va; + + // map sysmem to both GPUs + status = uvm_mem_map_gpu_kernel(sysmem, local_gpu); + TEST_CHECK_GOTO(status == NV_OK, cleanup); + + status = uvm_mem_map_gpu_kernel(sysmem, peer_gpu); + TEST_CHECK_GOTO(status == NV_OK, cleanup); + + // get local GPU address for the sysmem + local_gpu_sysmem = uvm_mem_gpu_address_virtual_kernel(sysmem, local_gpu); + TEST_CHECK_GOTO(local_gpu_sysmem.address != 0, cleanup); + + peer_gpu_sysmem = uvm_mem_gpu_address_virtual_kernel(sysmem, peer_gpu); + TEST_CHECK_GOTO(peer_gpu_sysmem.address != 0, cleanup); + + // allocate vidmem on remote GPU + status = uvm_mem_alloc_vidmem(MEM_ALLOCATION_SIZE, peer_gpu, &vidmem); + TEST_CHECK_GOTO(status == NV_OK, cleanup); + TEST_CHECK_GOTO(IS_ALIGNED(MEM_ALLOCATION_SIZE, vidmem->chunk_size), cleanup); + + // map onto GPU + status = uvm_mem_map_gpu_kernel(vidmem, peer_gpu); + TEST_CHECK_GOTO(status == NV_OK, cleanup); + + // get remote GPU virtual address for its vidmem + peer_gpu_vidmem = uvm_mem_gpu_address_virtual_kernel(vidmem, peer_gpu); + TEST_CHECK_GOTO(status == NV_OK, cleanup); + + // initialize memory using CPU + for (i = 0; i < MEM_ALLOCATION_SIZE / sizeof(NvU32); i++) + cpu_array[i] = i; + + // copy sysmem to remote GPUs memory + status = uvm_push_begin(peer_gpu->channel_manager, + UVM_CHANNEL_TYPE_CPU_TO_GPU, + &push, + "peer identity mapping test initialization"); + TEST_CHECK_GOTO(status == NV_OK, cleanup); + peer_gpu->parent->ce_hal->memcopy(&push, peer_gpu_vidmem, peer_gpu_sysmem, MEM_ALLOCATION_SIZE); + status = uvm_push_end_and_wait(&push); + TEST_CHECK_GOTO(status == NV_OK, cleanup); + + // set the sysmem back to zero + memset((void *)cpu_array, '\0', MEM_ALLOCATION_SIZE); + + // use the peer mapping to copy back to sysmem + status = uvm_push_begin(local_gpu->channel_manager, + UVM_CHANNEL_TYPE_GPU_TO_GPU, + &push, + "peer identity mapping test"); + TEST_CHECK_GOTO(status == NV_OK, cleanup); + for (i = 0; i < MEM_ALLOCATION_SIZE / vidmem->chunk_size; i++) { + uvm_gpu_address_t local_gpu_peer = uvm_mem_gpu_address_copy(vidmem, + local_gpu, + vidmem->chunk_size * i, + vidmem->chunk_size); + uvm_gpu_address_t local_gpu_sysmem_offset = local_gpu_sysmem; + local_gpu_sysmem_offset.address += vidmem->chunk_size * i; + local_gpu->parent->ce_hal->memcopy(&push, local_gpu_sysmem_offset, local_gpu_peer, vidmem->chunk_size); + } + status = uvm_push_end_and_wait(&push); + TEST_CHECK_GOTO(status == NV_OK, cleanup); + + for (i = 0; i < MEM_ALLOCATION_SIZE / sizeof(NvU32); i++) { + if (cpu_array[i] != i) { + UVM_TEST_PRINT("Expected %u at offset %u but got %u\n", i, i, cpu_array[i]); + status = NV_ERR_INVALID_STATE; + } + } + +cleanup: + uvm_mem_free(vidmem); + uvm_mem_free(sysmem); + return status; +} + +NV_STATUS uvm_test_peer_identity_mappings(UVM_TEST_PEER_IDENTITY_MAPPINGS_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_gpu_t *gpu_a; + uvm_gpu_t *gpu_b; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_va_space_down_read(va_space); + gpu_a = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpuA); + gpu_b = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpuB); + + if (gpu_a == NULL || gpu_b == NULL) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + if (gpu_a->parent->peer_copy_mode != gpu_b->parent->peer_copy_mode) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + if (gpu_a->parent->peer_copy_mode != UVM_GPU_PEER_COPY_MODE_VIRTUAL) { + status = NV_WARN_NOTHING_TO_DO; + goto done; + } + + // Indirect peers don't use identity mappings + if (!uvm_processor_mask_test(&va_space->can_access[uvm_id_value(gpu_a->id)], gpu_b->id) || + uvm_processor_mask_test(&va_space->indirect_peers[uvm_id_value(gpu_a->id)], gpu_b->id)) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + status = try_peer_access_remote_gpu_memory(gpu_a, gpu_b); + if (status != NV_OK) + goto done; + + status = try_peer_access_remote_gpu_memory(gpu_b, gpu_a); + if (status != NV_OK) + goto done; +done: + uvm_va_space_up_read(va_space); + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_perf_events.c b/kernel-open/nvidia-uvm/uvm_perf_events.c new file mode 100644 index 000000000..9aa01cc0d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_events.c @@ -0,0 +1,212 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_test.h" +#include "uvm_perf_events.h" +#include "uvm_va_space.h" + +// Entry of the event callback list +typedef struct +{ + uvm_perf_event_callback_t callback; + + struct list_head callback_list_node; +} callback_desc_t; + +// Cache for callback descriptor list entries +static struct kmem_cache *g_callback_desc_cache; + +// Check if the callback list already contains an entry for the given callback. Caller needs to hold (at least) read +// va_space_events lock +static callback_desc_t *event_list_find_callback(uvm_perf_va_space_events_t *va_space_events, + struct list_head *callback_list, uvm_perf_event_callback_t callback) +{ + callback_desc_t *callback_desc; + + uvm_assert_rwsem_locked(&va_space_events->lock); + + list_for_each_entry(callback_desc, callback_list, callback_list_node) { + if (callback_desc->callback == callback) + return callback_desc; + } + + return NULL; +} + +NV_STATUS uvm_perf_register_event_callback_locked(uvm_perf_va_space_events_t *va_space_events, + uvm_perf_event_t event_id, + uvm_perf_event_callback_t callback) +{ + callback_desc_t *callback_desc; + struct list_head *callback_list; + + UVM_ASSERT(event_id >= 0 && event_id < UVM_PERF_EVENT_COUNT); + UVM_ASSERT(callback); + + uvm_assert_rwsem_locked_write(&va_space_events->lock); + + callback_list = &va_space_events->event_callbacks[event_id]; + + UVM_ASSERT(!event_list_find_callback(va_space_events, callback_list, callback)); + + callback_desc = kmem_cache_alloc(g_callback_desc_cache, NV_UVM_GFP_FLAGS); + if (!callback_desc) + return NV_ERR_NO_MEMORY; + + callback_desc->callback = callback; + list_add_tail(&callback_desc->callback_list_node, callback_list); + + return NV_OK; +} + +NV_STATUS uvm_perf_register_event_callback(uvm_perf_va_space_events_t *va_space_events, uvm_perf_event_t event_id, + uvm_perf_event_callback_t callback) +{ + NV_STATUS status; + + uvm_down_write(&va_space_events->lock); + status = uvm_perf_register_event_callback_locked(va_space_events, event_id, callback); + uvm_up_write(&va_space_events->lock); + + return status; +} + +void uvm_perf_unregister_event_callback_locked(uvm_perf_va_space_events_t *va_space_events, uvm_perf_event_t event_id, + uvm_perf_event_callback_t callback) +{ + callback_desc_t *callback_desc; + struct list_head *callback_list; + + UVM_ASSERT(event_id >= 0 && event_id < UVM_PERF_EVENT_COUNT); + UVM_ASSERT(callback); + + uvm_assert_rwsem_locked_write(&va_space_events->lock); + + callback_list = &va_space_events->event_callbacks[event_id]; + callback_desc = event_list_find_callback(va_space_events, callback_list, callback); + + if (!callback_desc) + return; + + list_del(&callback_desc->callback_list_node); + + kmem_cache_free(g_callback_desc_cache, callback_desc); +} + +void uvm_perf_unregister_event_callback(uvm_perf_va_space_events_t *va_space_events, uvm_perf_event_t event_id, + uvm_perf_event_callback_t callback) +{ + uvm_down_write(&va_space_events->lock); + uvm_perf_unregister_event_callback_locked(va_space_events, event_id, callback); + uvm_up_write(&va_space_events->lock); +} + +void uvm_perf_event_notify(uvm_perf_va_space_events_t *va_space_events, uvm_perf_event_t event_id, + uvm_perf_event_data_t *event_data) +{ + callback_desc_t *callback_desc; + struct list_head *callback_list; + + UVM_ASSERT(event_id >= 0 && event_id < UVM_PERF_EVENT_COUNT); + UVM_ASSERT(event_data); + + callback_list = &va_space_events->event_callbacks[event_id]; + + uvm_down_read(&va_space_events->lock); + + // Invoke all registered callbacks for the events + list_for_each_entry(callback_desc, callback_list, callback_list_node) { + callback_desc->callback(event_id, event_data); + } + + uvm_up_read(&va_space_events->lock); +} + +bool uvm_perf_is_event_callback_registered(uvm_perf_va_space_events_t *va_space_events, + uvm_perf_event_t event_id, + uvm_perf_event_callback_t callback) +{ + callback_desc_t *callback_desc; + struct list_head *callback_list; + + uvm_assert_rwsem_locked(&va_space_events->lock); + + callback_list = &va_space_events->event_callbacks[event_id]; + callback_desc = event_list_find_callback(va_space_events, callback_list, callback); + + return callback_desc != NULL; +} + +NV_STATUS uvm_perf_init_va_space_events(uvm_va_space_t *va_space, uvm_perf_va_space_events_t *va_space_events) +{ + unsigned event_id; + + uvm_init_rwsem(&va_space_events->lock, UVM_LOCK_ORDER_VA_SPACE_EVENTS); + + // Initialize event callback lists + for (event_id = 0; event_id < UVM_PERF_EVENT_COUNT; ++event_id) + INIT_LIST_HEAD(&va_space_events->event_callbacks[event_id]); + + va_space_events->va_space = va_space; + + return NV_OK; +} + +void uvm_perf_destroy_va_space_events(uvm_perf_va_space_events_t *va_space_events) +{ + unsigned event_id; + + // If the va_space member was not set, va_space creation failed before initializing its va_space_events member. We + // are done. + if (!va_space_events->va_space) + return; + + // Destroy all event callback lists' entries + for (event_id = 0; event_id < UVM_PERF_EVENT_COUNT; ++event_id) { + callback_desc_t *callback_desc, *callback_desc_tmp; + struct list_head *callback_list; + + callback_list = &va_space_events->event_callbacks[event_id]; + + list_for_each_entry_safe(callback_desc, callback_desc_tmp, callback_list, callback_list_node) { + list_del(&callback_desc->callback_list_node); + kmem_cache_free(g_callback_desc_cache, callback_desc); + } + } + + va_space_events->va_space = NULL; +} + +NV_STATUS uvm_perf_events_init(void) +{ + g_callback_desc_cache = NV_KMEM_CACHE_CREATE("uvm_perf_callback_list", callback_desc_t); + if (!g_callback_desc_cache) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +void uvm_perf_events_exit(void) +{ + kmem_cache_destroy_safe(&g_callback_desc_cache); +} diff --git a/kernel-open/nvidia-uvm/uvm_perf_events.h b/kernel-open/nvidia-uvm/uvm_perf_events.h new file mode 100644 index 000000000..9179d6cb8 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_events.h @@ -0,0 +1,376 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PERF_EVENTS_H__ +#define __UVM_PERF_EVENTS_H__ + +#include "uvm_linux.h" +#include "uvm_forward_decl.h" +#include "uvm_processors.h" +#include "uvm_hal_types.h" +#include "uvm_lock.h" +#include "uvm_va_block_types.h" + +// uvm_perf_events is an event notification dispatcher that broadcasts events +// to clients. Clients register functions to be called under specific events. +// The callback lists are stored per va_space and, therefore, different +// callbacks can be registered per client. This will be useful to use +// different performance heuristic implementations depending on the GPU +// resources used by each process. For example, on a system with Pascal + +// Maxwell GPUs, VA spaces which have Maxwell GPU VA spaces will be restrited +// to the UVM-Lite feature set, while a VA space which only uses the Pascal +// GPU will not be downgraded. Registering/unregistering callbacks requires +// holding the VA space events lock in write mode. The exact locking +// guarantees under which callbacks are executed depend on the specific event, +// but the VA space events lock is held in read mode for all of them. The +// additional locking guarantees are defined in each event definition. + +// Performance-related events that can be notified +typedef enum +{ + // Locking: uvm_va_space: at least in read mode, uvm_va_block: exclusive / nobody is referencing the block anymore + UVM_PERF_EVENT_BLOCK_DESTROY = 0, + + // Locking: uvm_va_space: write + UVM_PERF_EVENT_BLOCK_SHRINK, + + // Locking: uvm_va_space: write + UVM_PERF_EVENT_RANGE_DESTROY, + + // Locking: uvm_va_space: write + UVM_PERF_EVENT_RANGE_SHRINK, + + // Locking: uvm_va_space: write + UVM_PERF_EVENT_MODULE_UNLOAD, + + // Locking: uvm_va_space: at least in read mode, uvm_va_block: exclusive (if uvm_va_block is not NULL) + UVM_PERF_EVENT_FAULT, + + // Locking: uvm_va_block: exclusive. Notably the uvm_va_space lock may not be held on eviction. + UVM_PERF_EVENT_MIGRATION, + + // Locking: uvm_va_space: at least in read mode, uvm_va_block: exclusive + UVM_PERF_EVENT_REVOCATION, + + UVM_PERF_EVENT_COUNT, +} uvm_perf_event_t; + +// Format of the data passed to callbacks. Scope must be filled with the appropriate values by the code which notifies +// the event +typedef union +{ + struct + { + uvm_va_block_t *block; + } block_destroy; + + struct + { + uvm_va_block_t *block; + } block_shrink; + + struct + { + uvm_va_range_t *range; + } range_destroy; + + struct + { + uvm_va_range_t *range; + } range_shrink; + + struct + { + uvm_perf_module_t *module; + + // Only one of these two can be set. The other one must be NULL + uvm_va_block_t *block; + uvm_va_range_t *range; + } module_unload; + + struct + { + // This field contains the VA space where this fault was reported. + // If block is not NULL, this field must match + // uvm_va_block_get_va_space(block). + uvm_va_space_t *space; + + // VA block for the page where the fault was triggered if it exists, + // NULL otherwise (this can happen if the fault is fatal or the + // VA block could not be created). + uvm_va_block_t *block; + + // ID of the faulting processor + uvm_processor_id_t proc_id; + + // ID of the preferred location processor + uvm_processor_id_t preferred_location; + + // Fault descriptor + union + { + struct + { + uvm_fault_buffer_entry_t *buffer_entry; + + NvU32 batch_id; + + bool is_duplicate; + } gpu; + + struct + { + NvU64 fault_va; + + bool is_write; + + NvU64 pc; + } cpu; + }; + } fault; + + // This event is emitted during migration and the residency bits may be + // stale. Do not rely on them in the callbacks. + struct + { + uvm_push_t *push; + uvm_va_block_t *block; + + // ID of the destination processor of the migration + uvm_processor_id_t dst; + + // ID of the source processor of the migration + uvm_processor_id_t src; + + // Start address of the memory range being migrated + NvU64 address; + + // Number of bytes being migrated + NvU64 bytes; + + // Whether the page has been copied or moved + uvm_va_block_transfer_mode_t transfer_mode; + + // Event that performed the call to make_resident + uvm_make_resident_cause_t cause; + + // Pointer to the make_resident context from the va_block_context + // struct used by the operation that triggered the make_resident call. + uvm_make_resident_context_t *make_resident_context; + } migration; + + struct + { + uvm_va_block_t *block; + + // ID of the processor whose access permissions have been revoked + uvm_processor_id_t proc_id; + + // Start address of the memory range being revoked + NvU64 address; + + // Number of bytes of the memory range being revoked + NvU64 bytes; + + // Old access permission + uvm_prot_t old_prot; + + // New access permission + uvm_prot_t new_prot; + } revocation; +} uvm_perf_event_data_t; + +// Type of the function that can be registered as a callback +// +// event_id: is the event being notified. Passing it to the callback enables using the same function to handle +// different events. +// event_data: extra event data that is passed to the callback function. The format of data passed for each event type +// is declared in the uvm_perf_event_data_t union +typedef void (*uvm_perf_event_callback_t)(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data); + +typedef struct +{ + // Lock protecting the events + // + // Held for write during registration/unregistration of callbacks and for + // read during notification of events. + // + // Also used by tools to protect their state and registration of perf event callbacks. + uvm_rw_semaphore_t lock; + + // Array of callbacks for event notification + struct list_head event_callbacks[UVM_PERF_EVENT_COUNT]; + + uvm_va_space_t *va_space; +} uvm_perf_va_space_events_t; + +// Initialize event notifiction for a va_space. This must be called from va_space construction. No locking required +NV_STATUS uvm_perf_init_va_space_events(uvm_va_space_t *va_space, uvm_perf_va_space_events_t *va_space_events); + +// Finalize event notifiction for a va_space. Caller must hold va_space lock in write mode +void uvm_perf_destroy_va_space_events(uvm_perf_va_space_events_t *va_space_events); + +// Register a callback to be executed under the given event. The given callback cannot have been already registered for +// the same event, although the same callback can be registered for different events. +NV_STATUS uvm_perf_register_event_callback(uvm_perf_va_space_events_t *va_space_events, + uvm_perf_event_t event_id, uvm_perf_event_callback_t callback); + +// Same as uvm_perf_register_event_callback(), but the caller must hold +// va_space_events lock in write mode. +NV_STATUS uvm_perf_register_event_callback_locked(uvm_perf_va_space_events_t *va_space_events, + uvm_perf_event_t event_id, uvm_perf_event_callback_t callback); + +// Removes a callback for the given event. It's safe to call with a callback that hasn't been registered. +void uvm_perf_unregister_event_callback(uvm_perf_va_space_events_t *va_space_events, uvm_perf_event_t event_id, + uvm_perf_event_callback_t callback); + +// Same as uvm_perf_unregister_event_callback(), but the caller must hold +// va_space_events lock in write mode. +void uvm_perf_unregister_event_callback_locked(uvm_perf_va_space_events_t *va_space_events, uvm_perf_event_t event_id, + uvm_perf_event_callback_t callback); + +// Invoke the callbacks registered for the given event. Callbacks cannot fail. +// Acquires the va_space_events lock internally +void uvm_perf_event_notify(uvm_perf_va_space_events_t *va_space_events, uvm_perf_event_t event_id, + uvm_perf_event_data_t *event_data); + +// Checks if the given callback is already registered for the event. +// va_space_events.lock must be held in either mode by the caller. +bool uvm_perf_is_event_callback_registered(uvm_perf_va_space_events_t *va_space_events, + uvm_perf_event_t event_id, + uvm_perf_event_callback_t callback); + +// Initialization/cleanup functions +NV_STATUS uvm_perf_events_init(void); +void uvm_perf_events_exit(void); + +// Helper to notify migration events +static inline void uvm_perf_event_notify_migration(uvm_perf_va_space_events_t *va_space_events, + uvm_push_t *push, + uvm_va_block_t *va_block, + uvm_processor_id_t dst, + uvm_processor_id_t src, + NvU64 address, + NvU64 bytes, + uvm_va_block_transfer_mode_t transfer_mode, + uvm_make_resident_cause_t cause, + uvm_make_resident_context_t *make_resident_context) +{ + uvm_perf_event_data_t event_data = + { + .migration = + { + .push = push, + .block = va_block, + .dst = dst, + .src = src, + .address = address, + .bytes = bytes, + .transfer_mode = transfer_mode, + .cause = cause, + .make_resident_context = make_resident_context, + } + }; + + uvm_perf_event_notify(va_space_events, UVM_PERF_EVENT_MIGRATION, &event_data); +} + +// Helper to notify gpu fault events +static inline void uvm_perf_event_notify_gpu_fault(uvm_perf_va_space_events_t *va_space_events, + uvm_va_block_t *va_block, + uvm_gpu_id_t gpu_id, + uvm_processor_id_t preferred_location, + uvm_fault_buffer_entry_t *buffer_entry, + NvU32 batch_id, + bool is_duplicate) +{ + uvm_perf_event_data_t event_data = + { + .fault = + { + .space = va_space_events->va_space, + .block = va_block, + .proc_id = gpu_id, + .preferred_location = preferred_location, + }, + }; + + event_data.fault.gpu.buffer_entry = buffer_entry; + event_data.fault.gpu.batch_id = batch_id; + event_data.fault.gpu.is_duplicate = is_duplicate; + + uvm_perf_event_notify(va_space_events, UVM_PERF_EVENT_FAULT, &event_data); +} + +// Helper to notify cpu fault events +static inline void uvm_perf_event_notify_cpu_fault(uvm_perf_va_space_events_t *va_space_events, + uvm_va_block_t *va_block, + uvm_processor_id_t preferred_location, + NvU64 fault_va, + bool is_write, + NvU64 pc) +{ + uvm_perf_event_data_t event_data = + { + .fault = + { + .space = va_space_events->va_space, + .block = va_block, + .proc_id = UVM_ID_CPU, + .preferred_location = preferred_location, + } + }; + + event_data.fault.cpu.fault_va = fault_va, + event_data.fault.cpu.is_write = is_write, + event_data.fault.cpu.pc = pc, + + uvm_perf_event_notify(va_space_events, UVM_PERF_EVENT_FAULT, &event_data); +} + +// Helper to notify permission revocation +static inline void uvm_perf_event_notify_revocation(uvm_perf_va_space_events_t *va_space_events, + uvm_va_block_t *va_block, + uvm_processor_id_t id, + NvU64 addr, + NvU64 bytes, + uvm_prot_t old_prot, + uvm_prot_t new_prot) +{ + uvm_perf_event_data_t event_data = + { + .revocation = + { + .block = va_block, + .proc_id = id, + .address = addr, + .bytes = bytes, + .old_prot = old_prot, + .new_prot = new_prot, + } + }; + + uvm_perf_event_notify(va_space_events, UVM_PERF_EVENT_REVOCATION, &event_data); +} + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_perf_events_test.c b/kernel-open/nvidia-uvm/uvm_perf_events_test.c new file mode 100644 index 000000000..c91adee3a --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_events_test.c @@ -0,0 +1,99 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_perf_events.h" +#include "uvm_va_block.h" +#include "uvm_va_range.h" +#include "uvm_va_space.h" +#include "uvm_kvmalloc.h" +#include "uvm_test.h" + +// Global variable used to check that callbacks are correctly executed +static int test_data; + +static void callback_inc_1(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + ++test_data; +} + +static void callback_inc_2(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + test_data += 2; +} + +static NV_STATUS test_events(uvm_va_space_t *va_space) +{ + NV_STATUS status; + uvm_perf_event_data_t event_data; + + uvm_va_block_t block; + + test_data = 0; + + memset(&event_data, 0, sizeof(event_data)); + + // Use CPU id to avoid triggering the GPU stats update code + event_data.fault.proc_id = UVM_ID_CPU; + + // Register a callback for page fault + status = uvm_perf_register_event_callback(&va_space->perf_events, UVM_PERF_EVENT_FAULT, callback_inc_1); + TEST_CHECK_GOTO(status == NV_OK, done); + // Register a callback for page fault + status = uvm_perf_register_event_callback(&va_space->perf_events, UVM_PERF_EVENT_FAULT, callback_inc_2); + TEST_CHECK_GOTO(status == NV_OK, done); + + // va_space read lock is required for page fault event notification + uvm_va_space_down_read(va_space); + + // Notify (fake) page fault. The two registered callbacks for this event increment the value of test_value + event_data.fault.block = █ + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_FAULT, &event_data); + + uvm_va_space_up_read(va_space); + + // test_data was initialized to zero. It should have been incremented by 1 and 2, respectively in the callbacks + TEST_CHECK_GOTO(test_data == 3, done); + +done: + // Unregister all callbacks + uvm_perf_unregister_event_callback(&va_space->perf_events, UVM_PERF_EVENT_FAULT, callback_inc_1); + uvm_perf_unregister_event_callback(&va_space->perf_events, UVM_PERF_EVENT_FAULT, callback_inc_2); + + return status; +} + +NV_STATUS uvm_test_perf_events_sanity(UVM_TEST_PERF_EVENTS_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space; + + va_space = uvm_va_space_get(filp); + + status = test_events(va_space); + if (status != NV_OK) + goto done; + +done: + return status; +} + diff --git a/kernel-open/nvidia-uvm/uvm_perf_heuristics.c b/kernel-open/nvidia-uvm/uvm_perf_heuristics.c new file mode 100644 index 000000000..392f914ba --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_heuristics.c @@ -0,0 +1,110 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_perf_heuristics.h" +#include "uvm_perf_thrashing.h" +#include "uvm_perf_prefetch.h" +#include "uvm_gpu_access_counters.h" +#include "uvm_va_space.h" + +NV_STATUS uvm_perf_heuristics_init() +{ + NV_STATUS status; + + status = uvm_perf_thrashing_init(); + if (status != NV_OK) + return status; + + status = uvm_perf_prefetch_init(); + if (status != NV_OK) + return status; + + status = uvm_perf_access_counters_init(); + if (status != NV_OK) + return status; + + return NV_OK; +} + +void uvm_perf_heuristics_exit() +{ + uvm_perf_access_counters_exit(); + uvm_perf_prefetch_exit(); + uvm_perf_thrashing_exit(); +} + +NV_STATUS uvm_perf_heuristics_add_gpu(uvm_gpu_t *gpu) +{ + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + return uvm_perf_thrashing_add_gpu(gpu); +} + +void uvm_perf_heuristics_remove_gpu(uvm_gpu_t *gpu) +{ + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + uvm_perf_thrashing_remove_gpu(gpu); +} + +NV_STATUS uvm_perf_heuristics_load(uvm_va_space_t *va_space) +{ + NV_STATUS status; + + status = uvm_perf_thrashing_load(va_space); + if (status != NV_OK) + return status; + status = uvm_perf_prefetch_load(va_space); + if (status != NV_OK) + return status; + status = uvm_perf_access_counters_load(va_space); + if (status != NV_OK) + return status; + + return NV_OK; +} + +NV_STATUS uvm_perf_heuristics_register_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu) +{ + uvm_assert_rwsem_locked_write(&va_space->lock); + + return uvm_perf_thrashing_register_gpu(va_space, gpu); +} + +void uvm_perf_heuristics_stop(uvm_va_space_t *va_space) +{ + uvm_assert_lockable_order(UVM_LOCK_ORDER_VA_SPACE); + + // Prefetch heuristics don't need a stop operation for now + uvm_perf_thrashing_stop(va_space); +} + +void uvm_perf_heuristics_unload(uvm_va_space_t *va_space) +{ + uvm_assert_rwsem_locked_write(&va_space->lock); + + uvm_perf_access_counters_unload(va_space); + uvm_perf_prefetch_unload(va_space); + uvm_perf_thrashing_unload(va_space); +} diff --git a/kernel-open/nvidia-uvm/uvm_perf_heuristics.h b/kernel-open/nvidia-uvm/uvm_perf_heuristics.h new file mode 100644 index 000000000..b6e63883d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_heuristics.h @@ -0,0 +1,57 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PERF_HEURISTICS_H__ +#define __UVM_PERF_HEURISTICS_H__ + +#include "nvtypes.h" +#include "uvm_forward_decl.h" + +// Global initialization/cleanup functions +NV_STATUS uvm_perf_heuristics_init(void); +void uvm_perf_heuristics_exit(void); + +// Per-GPU initialization/cleanup functions. They are called from add_gpu, +// remove_gpu. +// +// Locking: the global lock must be held when calling this function +NV_STATUS uvm_perf_heuristics_add_gpu(uvm_gpu_t *gpu); +void uvm_perf_heuristics_remove_gpu(uvm_gpu_t *gpu); + +// Per-VA space initialization/cleanup functions + +// This function is called during VA space creation +NV_STATUS uvm_perf_heuristics_load(uvm_va_space_t *va_space); + +// This function is called when a GPU is registered on a VA space +NV_STATUS uvm_perf_heuristics_register_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu); + +// The following two functions are called during VA space teardown. +// uvm_perf_heuristics_stop is called first with no lock taken on the VA space. +// This is required because the performance heuristics modues may have scheduled +// delayed work that needs to take the VA space lock. Therefore, +// uvm_perf_heuristics_stop needs to cancel/flush any pending work. +void uvm_perf_heuristics_stop(uvm_va_space_t *va_space); +void uvm_perf_heuristics_unload(uvm_va_space_t *va_space); + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_perf_module.c b/kernel-open/nvidia-uvm/uvm_perf_module.c new file mode 100644 index 000000000..7f1e06111 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_module.c @@ -0,0 +1,151 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_perf_events.h" +#include "uvm_perf_module.h" +#include "uvm_va_block.h" +#include "uvm_va_range.h" +#include "uvm_va_space.h" + +void uvm_perf_module_type_set_data(uvm_perf_module_data_desc_t *modules_data, void *data, uvm_perf_module_type_t type) +{ + UVM_ASSERT(type >= 0 && type < UVM_PERF_MODULE_TYPE_COUNT); + UVM_ASSERT(data); + // Data can only be created once per module + UVM_ASSERT(!modules_data[type].data); + + modules_data[type].data = data; +} + +void uvm_perf_module_type_unset_data(uvm_perf_module_data_desc_t *modules_data, uvm_perf_module_type_t type) +{ + UVM_ASSERT(type >= 0 && type < UVM_PERF_MODULE_TYPE_COUNT); + // Data should have been previously set + UVM_ASSERT(modules_data[type].data); + + memset(&modules_data[type], 0, sizeof(modules_data[type])); +} + +NV_STATUS uvm_perf_module_load(uvm_perf_module_t *module, uvm_va_space_t *va_space) +{ + NV_STATUS status; + size_t i, j; + + uvm_assert_rwsem_locked_write(&va_space->lock); + UVM_ASSERT(va_space->perf_modules[module->type] == NULL); + + for (i = 0; i < UVM_PERF_EVENT_COUNT; ++i) { + if (module->callbacks[i] != NULL) { + status = uvm_perf_register_event_callback(&va_space->perf_events, i, module->callbacks[i]); + if (status != NV_OK) + goto error; + } + } + + va_space->perf_modules[module->type] = module; + + return NV_OK; + +error: + for (j = 0; j < i; ++j) { + if (module->callbacks[j] != NULL) + uvm_perf_unregister_event_callback(&va_space->perf_events, j, module->callbacks[j]); + } + + return status; +} + +void uvm_perf_module_unload(uvm_perf_module_t *module, uvm_va_space_t *va_space) +{ + uvm_perf_event_data_t event_data; + uvm_va_range_t *va_range; + uvm_va_block_t *block; + size_t i; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + if (!va_space->perf_modules[module->type]) + return; + + event_data.module_unload.module = module; + + // Iterate over all va_range/va_blocks in the va_space + uvm_for_each_va_range(va_range, va_space) { + if (va_range->type != UVM_VA_RANGE_TYPE_MANAGED) + continue; + + for_each_va_block_in_va_range(va_range, block) { + uvm_mutex_lock(&block->lock); + + // Notify a fake va_block destruction to destroy the module-allocated data + event_data.module_unload.block = block; + event_data.module_unload.range = NULL; + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_MODULE_UNLOAD, &event_data); + + uvm_mutex_unlock(&block->lock); + } + // Notify a fake va_range destruction to destroy the module-allocated data + event_data.module_unload.block = NULL; + event_data.module_unload.range = va_range; + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_MODULE_UNLOAD, &event_data); + } + + for (i = 0; i < UVM_PERF_EVENT_COUNT; ++i) { + if (module->callbacks[i] != NULL) + uvm_perf_unregister_event_callback(&va_space->perf_events, i, module->callbacks[i]); + } + + va_space->perf_modules[module->type] = NULL; +} + +uvm_perf_module_t *uvm_perf_module_for_type(uvm_va_space_t *va_space, uvm_perf_module_type_t type) +{ + uvm_assert_rwsem_locked(&va_space->lock); + + return va_space->perf_modules[type]; +} + + +void uvm_perf_module_init(const char *name, uvm_perf_module_type_t type, + uvm_perf_module_event_callback_desc_t *callbacks, size_t callback_count, + uvm_perf_module_t *module) +{ + size_t i; + + UVM_ASSERT(callbacks); + UVM_ASSERT(module); + UVM_ASSERT(callback_count <= UVM_PERF_EVENT_COUNT); + + memset(module->callbacks, 0, sizeof(module->callbacks)); + + // Register all the given callbacks + for (i = 0; i < callback_count; ++i) { + UVM_ASSERT(callbacks[i].event_id >= 0 && callbacks[i].event_id < UVM_PERF_EVENT_COUNT); + UVM_ASSERT(module->callbacks[callbacks[i].event_id] == NULL); + + module->callbacks[callbacks[i].event_id] = callbacks[i].callback; + } + + module->name = name; + module->type = type; +} diff --git a/kernel-open/nvidia-uvm/uvm_perf_module.h b/kernel-open/nvidia-uvm/uvm_perf_module.h new file mode 100644 index 000000000..b716ce208 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_module.h @@ -0,0 +1,119 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PERF_MODULE_H__ +#define __UVM_PERF_MODULE_H__ + +#include "uvm_common.h" +#include "uvm_forward_decl.h" +#include "uvm_perf_events.h" + +// uvm_perf_module is an abstraction that provide the basic infrastructure to +// develop a performance heuristics module for UVM. A perf module defines +// callbacks for a number of perf events, and provides a placeholder to store +// data, that can be used by the module to create information regarding events +// to provide performance hints. Data is not created on a specific point of +// time, modules are in charge of allocating data the first time they need it. +// If data allocation fails, the module must keep being functional (it will +// just not give performance hints). Perf modules have a global description but +// they are loaded/unloaded within each va_space. When a module is unloaded it +// sends the destroy events to each block/range in the va space (and for the +// va_space itself), so that the data allocated by the module is freed. + +// enum that contains the main sub-modules that handle performance +// optimizations in UVM. +// +// - UVM_PERF_MODULE_TYPE_THRASHING: detects memory thrashing scenarios and +// provides thrashing prevention mechanisms +// - UVM_PERF_MODULE_TYPE_PREFETCH: detects memory prefetching opportunities +// - UVM_PERF_MODULE_TYPE_ACCESS_COUNTERS: migrates memory using access counter +// notifications +typedef enum +{ + UVM_PERF_MODULE_FIRST_TYPE = 0, + + UVM_PERF_MODULE_TYPE_TEST = UVM_PERF_MODULE_FIRST_TYPE, + UVM_PERF_MODULE_TYPE_THRASHING, + UVM_PERF_MODULE_TYPE_PREFETCH, + UVM_PERF_MODULE_TYPE_ACCESS_COUNTERS, + + UVM_PERF_MODULE_TYPE_COUNT, +} uvm_perf_module_type_t; + +struct uvm_perf_module_struct +{ + const char *name; + + uvm_perf_module_type_t type; + + // Array of event callbacks + uvm_perf_event_callback_t callbacks[UVM_PERF_EVENT_COUNT]; +}; + +// Placeholder to store module-allocated data +typedef struct +{ + // Pointer to module-allocated data + void *data; +} uvm_perf_module_data_desc_t; + +// Event callback descriptor +typedef struct +{ + uvm_perf_event_t event_id; + + uvm_perf_event_callback_t callback; +} uvm_perf_module_event_callback_desc_t; + +// Obtain the descriptor of module-allocated data for the given module type +static inline void *uvm_perf_module_type_data(uvm_perf_module_data_desc_t *modules_data, uvm_perf_module_type_t type) +{ + UVM_ASSERT(type >= UVM_PERF_MODULE_FIRST_TYPE && type < UVM_PERF_MODULE_TYPE_COUNT); + + return modules_data[type].data; +} + +void uvm_perf_module_type_set_data(uvm_perf_module_data_desc_t *modules_data, void *data, uvm_perf_module_type_t type); + +// Clear data after the module has freed it +void uvm_perf_module_type_unset_data(uvm_perf_module_data_desc_t *modules_data, uvm_perf_module_type_t type); + +// Register all the callbacks defined by the module in uvm_perf_module_init. Caller must hold va_space lock in write +// mode +NV_STATUS uvm_perf_module_load(uvm_perf_module_t *module, uvm_va_space_t *va_space); + +// Remove data allocated by the module for all va_block/va_range in the va_space, and in the va_space struct itself +// It also unregisters all the callbacks defined by the module. Caller must hold va_space lock in write mode +void uvm_perf_module_unload(uvm_perf_module_t *module, uvm_va_space_t *va_space); + +// Obtain the module loaded in the va_space for the given perf module type. Caller must hold va_space lock at least in +// read mode +uvm_perf_module_t *uvm_perf_module_for_type(uvm_va_space_t *va_space, uvm_perf_module_type_t type); + +// Initialize a performance heuristics module. This must be called once, before any call to uvm_perf_module_load using +// this module +void uvm_perf_module_init(const char *name, uvm_perf_module_type_t type, + uvm_perf_module_event_callback_desc_t *callbacks, size_t callback_count, + uvm_perf_module_t *module); + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_perf_module_test.c b/kernel-open/nvidia-uvm/uvm_perf_module_test.c new file mode 100644 index 000000000..bd1d516e5 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_module_test.c @@ -0,0 +1,289 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_kvmalloc.h" +#include "uvm_perf_module.h" +#include "uvm_va_block.h" +#include "uvm_va_space.h" +#include "uvm_test.h" + +static const uvm_perf_module_type_t module1_type = UVM_PERF_MODULE_TYPE_TEST; +typedef int module1_data_type_t; + +static const uvm_perf_module_type_t module2_type = UVM_PERF_MODULE_TYPE_TEST; +typedef struct { + int value[4]; +} module2_data_type_t; + +static uvm_perf_module_t module1; +static uvm_perf_module_t module2; + +// Convenience function that obtains the data for the given module, and allocates it if it does not exist yet +static void *get_or_alloc_data(uvm_perf_module_data_desc_t *modules_data, uvm_perf_module_t *module, size_t type_size) +{ + void *data_ret; + + data_ret = uvm_perf_module_type_data(modules_data, module->type); + if (!data_ret) { + data_ret = uvm_kvmalloc_zero(type_size); + if (data_ret) + uvm_perf_module_type_set_data(modules_data, data_ret, module->type); + } + return data_ret; +} + +// Block destruction callback for module1 +static void module1_destroy(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + uvm_va_block_t *va_block; + void *data; + + if (event_id == UVM_PERF_EVENT_BLOCK_DESTROY) { + va_block = event_data->block_destroy.block; + } + else { + if (event_data->module_unload.module != &module1 || !event_data->module_unload.block) + return; + + va_block = event_data->module_unload.block; + } + + data = uvm_perf_module_type_data(va_block->perf_modules_data, module1.type); + + if (data) { + uvm_kvfree(data); + uvm_perf_module_type_unset_data(va_block->perf_modules_data, module1.type); + } +} + +// Page fault callback for module1 +static void module1_fault(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + module1_data_type_t *data; + uvm_va_block_t *va_block; + + va_block = event_data->fault.block; + + data = get_or_alloc_data(va_block->perf_modules_data, &module1, sizeof(*data)); + if (!data) + return; + + ++(*data); +} + +// Block destruction callback for module2 +static void module2_destroy(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + void *data; + uvm_va_block_t *va_block; + + if (event_id == UVM_PERF_EVENT_BLOCK_DESTROY) { + va_block = event_data->block_destroy.block; + } + else { + if (event_data->module_unload.module != &module2 || !event_data->module_unload.block) + return; + + va_block = event_data->module_unload.block; + } + + data = uvm_perf_module_type_data(va_block->perf_modules_data, module2.type); + if (data) { + uvm_kvfree(data); + uvm_perf_module_type_unset_data(va_block->perf_modules_data, module2.type); + } +} + +// Page fault callback for module2 +static void module2_fault(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + module2_data_type_t *data; + uvm_va_block_t *va_block; + + va_block = event_data->fault.block; + + data = get_or_alloc_data(va_block->perf_modules_data, &module2, sizeof(*data)); + if (!data) + return; + + data->value[0] += 1; + data->value[1] += 2; + data->value[2] += 3; + data->value[3] += 4; +} + +// This test: +// 1) Initializes modules named module1 and module2 +// 2) Loads module1 +// 3) Gets two va_blocks that must already exist in the given address (created using user-level calls) +// 4) Notifies some page faults on the blocks +// 5) Checks if the callbacks have executed correctly +// 6) Unloads module1 +// 7) Checks that data allocated by module1 has been freed +// 8) Loads module2 +// 9) Notifies some page faults on the blocks +// 10) Checks if the callbacks have executed correctly +// 11) Unloads module2 +// 12) Checks that data allocated by module2 has been freed +static NV_STATUS test_module_replace(uvm_va_space_t *va_space, NvU64 addr) +{ + NV_STATUS status; + uvm_perf_event_data_t event_data; + void *module1_data; + void *module2_data; + + uvm_va_block_t *block1, *block2; + + uvm_perf_module_event_callback_desc_t module1_callbacks[] = { + { UVM_PERF_EVENT_BLOCK_DESTROY, module1_destroy }, + { UVM_PERF_EVENT_MODULE_UNLOAD, module1_destroy }, + { UVM_PERF_EVENT_FAULT, module1_fault }, + }; + + uvm_perf_module_event_callback_desc_t module2_callbacks[] = { + { UVM_PERF_EVENT_BLOCK_DESTROY, module2_destroy}, + { UVM_PERF_EVENT_MODULE_UNLOAD, module2_destroy }, + { UVM_PERF_EVENT_FAULT, module2_fault}, + }; + + memset(&event_data, 0, sizeof(event_data)); + + // Use CPU id to avoid triggering the GPU stats update code + event_data.fault.proc_id = UVM_ID_CPU; + + uvm_perf_module_init("module1", module1_type, module1_callbacks, ARRAY_SIZE(module1_callbacks), &module1); + uvm_perf_module_init("module2", module2_type, module2_callbacks, ARRAY_SIZE(module2_callbacks), &module2); + + // We take va_space write lock during all the test to prevent blocks from disappearing + uvm_va_space_down_write(va_space); + + status = uvm_perf_module_load(&module1, va_space); + TEST_CHECK_GOTO(status == NV_OK, fail_space_write_status); + + status = uvm_va_block_find(va_space, addr, &block1); + TEST_CHECK_GOTO(status == NV_OK, fail_space_write_status); + + status = uvm_va_block_find(va_space, (addr + UVM_VA_BLOCK_SIZE) & ~(UVM_VA_BLOCK_SIZE - 1), &block2); + TEST_CHECK_GOTO(status == NV_OK, fail_space_write_status); + + // Notify (fake) page fault on block1 + event_data.fault.block = block1; + uvm_mutex_lock(&block1->lock); + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_FAULT, &event_data); + uvm_mutex_unlock(&block1->lock); + + // Notify two (fake) page faults on block2 + event_data.fault.block = block2; + uvm_mutex_lock(&block2->lock); + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_FAULT, &event_data); + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_FAULT, &event_data); + uvm_mutex_unlock(&block2->lock); + + module1_data = uvm_perf_module_type_data(block1->perf_modules_data, module1.type); + if (module1_data) + TEST_CHECK_GOTO((*(module1_data_type_t *) module1_data) == 1, fail_space_write_invalid_state); + module1_data = uvm_perf_module_type_data(block2->perf_modules_data, module1.type); + if (module1_data) + TEST_CHECK_GOTO((*(module1_data_type_t *) module1_data) == 2, fail_space_write_invalid_state); + + uvm_perf_module_unload(&module1, va_space); + + // Module-allocated data must be freed on module unload + module1_data = uvm_perf_module_type_data(block1->perf_modules_data, module1.type); + TEST_CHECK_GOTO(module1_data == NULL, fail_space_write_invalid_state); + module1_data = uvm_perf_module_type_data(block2->perf_modules_data, module1.type); + TEST_CHECK_GOTO(module1_data == NULL, fail_space_write_invalid_state); + + status = uvm_perf_module_load(&module2, va_space); + TEST_CHECK_GOTO(status == NV_OK, fail_space_write_status); + + // Notify two (fake) page faults on block1 + event_data.fault.block = block1; + uvm_mutex_lock(&block1->lock); + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_FAULT, &event_data); + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_FAULT, &event_data); + uvm_mutex_unlock(&block1->lock); + + // Notify (fake) page fault on block2 + event_data.fault.block = block2; + uvm_mutex_lock(&block2->lock); + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_FAULT, &event_data); + uvm_mutex_unlock(&block2->lock); + + module2_data = uvm_perf_module_type_data(block1->perf_modules_data, module2.type); + if (module2_data) { + TEST_CHECK_GOTO(((module2_data_type_t *) module2_data)->value[0] == 2, fail_space_write_invalid_state); + TEST_CHECK_GOTO(((module2_data_type_t *) module2_data)->value[1] == 4, fail_space_write_invalid_state); + TEST_CHECK_GOTO(((module2_data_type_t *) module2_data)->value[2] == 6, fail_space_write_invalid_state); + TEST_CHECK_GOTO(((module2_data_type_t *) module2_data)->value[3] == 8, fail_space_write_invalid_state); + } + + module2_data = uvm_perf_module_type_data(block2->perf_modules_data, module2.type); + if (module2_data) { + TEST_CHECK_GOTO(((module2_data_type_t *) module2_data)->value[0] == 1, fail_space_write_invalid_state); + TEST_CHECK_GOTO(((module2_data_type_t *) module2_data)->value[1] == 2, fail_space_write_invalid_state); + TEST_CHECK_GOTO(((module2_data_type_t *) module2_data)->value[2] == 3, fail_space_write_invalid_state); + TEST_CHECK_GOTO(((module2_data_type_t *) module2_data)->value[3] == 4, fail_space_write_invalid_state); + } + + uvm_perf_module_unload(&module2, va_space); + + // Module-allocated data must be freed on module unload + module2_data = uvm_perf_module_type_data(block2->perf_modules_data, module2.type); + TEST_CHECK_GOTO(module2_data == NULL, fail_space_write_invalid_state); + module2_data = uvm_perf_module_type_data(block2->perf_modules_data, module2.type); + TEST_CHECK_GOTO(module2_data == NULL, fail_space_write_invalid_state); + + uvm_va_space_up_write(va_space); + + return NV_OK; + +fail_space_write_status: + uvm_va_space_up_write(va_space); + + return status; + +fail_space_write_invalid_state: + uvm_va_space_up_write(va_space); + + return NV_ERR_INVALID_STATE; +} + +NV_STATUS uvm_test_perf_module_sanity(UVM_TEST_PERF_MODULE_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space; + + // Two blocks are needed + if (params->range_size <= UVM_VA_BLOCK_SIZE) { + status = NV_ERR_INVALID_ARGUMENT; + goto fail; + } + + va_space = uvm_va_space_get(filp); + + status = test_module_replace(va_space, params->range_address); + +fail: + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_perf_prefetch.c b/kernel-open/nvidia-uvm/uvm_perf_prefetch.c new file mode 100644 index 000000000..9effafa54 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_prefetch.c @@ -0,0 +1,526 @@ +/******************************************************************************* + Copyright (c) 2016-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN hint OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_perf_events.h" +#include "uvm_perf_module.h" +#include "uvm_perf_prefetch.h" +#include "uvm_kvmalloc.h" +#include "uvm_va_block.h" +#include "uvm_va_range.h" +#include "uvm_test.h" + +// Global cache to allocate the per-VA block prefetch detection structures +static struct kmem_cache *g_prefetch_info_cache __read_mostly; + +// Per-VA block prefetch detection structure +typedef struct +{ + uvm_page_mask_t prefetch_pages; + + uvm_page_mask_t migrate_pages; + + uvm_va_block_bitmap_tree_t bitmap_tree; + + uvm_processor_id_t last_migration_proc_id; + + uvm_va_block_region_t region; + + size_t big_page_size; + + uvm_va_block_region_t big_pages_region; + + NvU16 pending_prefetch_pages; + + NvU16 fault_migrations_to_last_proc; +} block_prefetch_info_t; + +// +// Tunables for prefetch detection/prevention (configurable via module parameters) +// + +// Enable/disable prefetch performance heuristics +static unsigned uvm_perf_prefetch_enable = 1; + +// TODO: Bug 1778037: [uvm] Use adaptive threshold for page prefetching +#define UVM_PREFETCH_THRESHOLD_DEFAULT 51 + +// Percentage of children subregions that need to be resident in order to +// trigger prefetching of the remaining subregions +// +// Valid values 1-100 +static unsigned uvm_perf_prefetch_threshold = UVM_PREFETCH_THRESHOLD_DEFAULT; + +#define UVM_PREFETCH_MIN_FAULTS_MIN 1 +#define UVM_PREFETCH_MIN_FAULTS_DEFAULT 1 +#define UVM_PREFETCH_MIN_FAULTS_MAX 20 + +// Minimum number of faults on a block in order to enable the prefetching +// logic +static unsigned uvm_perf_prefetch_min_faults = UVM_PREFETCH_MIN_FAULTS_DEFAULT; + +// Module parameters for the tunables +module_param(uvm_perf_prefetch_enable, uint, S_IRUGO); +module_param(uvm_perf_prefetch_threshold, uint, S_IRUGO); +module_param(uvm_perf_prefetch_min_faults, uint, S_IRUGO); + +static bool g_uvm_perf_prefetch_enable; +static unsigned g_uvm_perf_prefetch_threshold; +static unsigned g_uvm_perf_prefetch_min_faults; + +// Callback declaration for the performance heuristics events +static void prefetch_block_destroy_cb(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data); + +static uvm_va_block_region_t compute_prefetch_region(uvm_page_index_t page_index, block_prefetch_info_t *prefetch_info) +{ + NvU16 counter; + uvm_va_block_bitmap_tree_iter_t iter; + uvm_va_block_bitmap_tree_t *bitmap_tree = &prefetch_info->bitmap_tree; + uvm_va_block_region_t prefetch_region = uvm_va_block_region(bitmap_tree->leaf_count, + bitmap_tree->leaf_count + 1); + + uvm_va_block_bitmap_tree_traverse_counters(counter, bitmap_tree, page_index, &iter) { + uvm_va_block_region_t subregion = uvm_va_block_bitmap_tree_iter_get_range(bitmap_tree, &iter); + NvU16 subregion_pages = uvm_va_block_region_num_pages(subregion); + + UVM_ASSERT(counter <= subregion_pages); + if (counter * 100 > subregion_pages * g_uvm_perf_prefetch_threshold) + prefetch_region = subregion; + } + + // Clamp prefetch region to actual pages + if (prefetch_region.first < bitmap_tree->leaf_count) { + if (prefetch_region.first < prefetch_info->region.first) + prefetch_region.first = prefetch_info->region.first; + + if (prefetch_region.outer > prefetch_info->region.outer) + prefetch_region.outer = prefetch_info->region.outer; + } + + return prefetch_region; +} + +// Performance heuristics module for prefetch +static uvm_perf_module_t g_module_prefetch; + +static uvm_perf_module_event_callback_desc_t g_callbacks_prefetch[] = { + { UVM_PERF_EVENT_BLOCK_DESTROY, prefetch_block_destroy_cb }, + { UVM_PERF_EVENT_MODULE_UNLOAD, prefetch_block_destroy_cb }, + { UVM_PERF_EVENT_BLOCK_SHRINK, prefetch_block_destroy_cb } +}; + +// Get the prefetch detection struct for the given block +static block_prefetch_info_t *prefetch_info_get(uvm_va_block_t *va_block) +{ + return uvm_perf_module_type_data(va_block->perf_modules_data, UVM_PERF_MODULE_TYPE_PREFETCH); +} + +static void prefetch_info_destroy(uvm_va_block_t *va_block) +{ + block_prefetch_info_t *prefetch_info = prefetch_info_get(va_block); + if (prefetch_info) { + kmem_cache_free(g_prefetch_info_cache, prefetch_info); + uvm_perf_module_type_unset_data(va_block->perf_modules_data, UVM_PERF_MODULE_TYPE_PREFETCH); + } +} + +// Get the prefetch detection struct for the given block or create it if it +// does not exist +static block_prefetch_info_t *prefetch_info_get_create(uvm_va_block_t *va_block) +{ + block_prefetch_info_t *prefetch_info = prefetch_info_get(va_block); + if (!prefetch_info) { + // Create some ghost leaves so we can align the tree to big page boundary. We use the + // largest page size to handle the worst-case scenario + size_t big_page_size = UVM_PAGE_SIZE_128K; + uvm_va_block_region_t big_pages_region = uvm_va_block_big_page_region_all(va_block, big_page_size); + size_t num_leaves = uvm_va_block_num_cpu_pages(va_block); + + // If the va block is not big enough to fit 128KB pages, maybe it still can fit 64KB pages + if (big_pages_region.outer == 0) { + big_page_size = UVM_PAGE_SIZE_64K; + big_pages_region = uvm_va_block_big_page_region_all(va_block, big_page_size); + } + + if (big_pages_region.first > 0) + num_leaves += (big_page_size / PAGE_SIZE - big_pages_region.first); + + UVM_ASSERT(num_leaves <= PAGES_PER_UVM_VA_BLOCK); + + prefetch_info = nv_kmem_cache_zalloc(g_prefetch_info_cache, NV_UVM_GFP_FLAGS); + if (!prefetch_info) + goto fail; + + prefetch_info->last_migration_proc_id = UVM_ID_INVALID; + + uvm_va_block_bitmap_tree_init_from_page_count(&prefetch_info->bitmap_tree, num_leaves); + + uvm_perf_module_type_set_data(va_block->perf_modules_data, prefetch_info, UVM_PERF_MODULE_TYPE_PREFETCH); + } + + return prefetch_info; + +fail: + prefetch_info_destroy(va_block); + + return NULL; +} + +static void grow_fault_granularity_if_no_thrashing(block_prefetch_info_t *prefetch_info, + uvm_va_block_region_t region, + const uvm_page_mask_t *faulted_pages, + const uvm_page_mask_t *thrashing_pages) +{ + if (!uvm_page_mask_region_empty(faulted_pages, region) && + (!thrashing_pages || uvm_page_mask_region_empty(thrashing_pages, region))) { + region.first += prefetch_info->region.first; + region.outer += prefetch_info->region.first; + uvm_page_mask_region_fill(&prefetch_info->bitmap_tree.pages, region); + } +} + +static void grow_fault_granularity(uvm_va_block_t *va_block, + block_prefetch_info_t *prefetch_info, + const uvm_page_mask_t *faulted_pages, + const uvm_page_mask_t *thrashing_pages) +{ + size_t num_big_pages; + size_t big_page_index; + uvm_va_block_region_t block_region = uvm_va_block_region_from_block(va_block); + + // Migrate whole "prefix" if no page in it is thrashing + if (prefetch_info->big_pages_region.first > 0) { + uvm_va_block_region_t prefix_region = uvm_va_block_region(0, prefetch_info->big_pages_region.first); + + grow_fault_granularity_if_no_thrashing(prefetch_info, prefix_region, faulted_pages, thrashing_pages); + } + + // Migrate whole big pages if they are not thrashing + num_big_pages = uvm_va_block_num_big_pages(va_block, prefetch_info->big_page_size); + for (big_page_index = 0; big_page_index < num_big_pages; ++big_page_index) { + uvm_va_block_region_t big_region = uvm_va_block_big_page_region(va_block, + big_page_index, + prefetch_info->big_page_size); + + grow_fault_granularity_if_no_thrashing(prefetch_info, big_region, faulted_pages, thrashing_pages); + } + + // Migrate whole "suffix" if no page in it is thrashing + if (prefetch_info->big_pages_region.outer < block_region.outer) { + uvm_va_block_region_t suffix_region = uvm_va_block_region(prefetch_info->big_pages_region.outer, + block_region.outer); + + grow_fault_granularity_if_no_thrashing(prefetch_info, suffix_region, faulted_pages, thrashing_pages); + } +} + +// Within a block we only allow prefetching to a single processor. Therefore, if two processors +// are accessing non-overlapping regions within the same block they won't benefit from +// prefetching. +// +// TODO: Bug 1778034: [uvm] Explore prefetching to different processors within a VA block +void uvm_perf_prefetch_prenotify_fault_migrations(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t new_residency, + const uvm_page_mask_t *faulted_pages, + uvm_va_block_region_t region) +{ + uvm_page_index_t page_index; + block_prefetch_info_t *prefetch_info; + const uvm_page_mask_t *resident_mask = NULL; + const uvm_page_mask_t *thrashing_pages = NULL; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_va_policy_t *policy = va_block_context->policy; + + uvm_assert_rwsem_locked(&va_space->lock); + + if (!g_uvm_perf_prefetch_enable) + return; + + prefetch_info = prefetch_info_get_create(va_block); + if (!prefetch_info) + return; + + if (!uvm_id_equal(prefetch_info->last_migration_proc_id, new_residency)) { + prefetch_info->last_migration_proc_id = new_residency; + prefetch_info->fault_migrations_to_last_proc = 0; + } + + prefetch_info->pending_prefetch_pages = 0; + + if (UVM_ID_IS_CPU(new_residency) || va_block->gpus[uvm_id_gpu_index(new_residency)] != NULL) + resident_mask = uvm_va_block_resident_mask_get(va_block, new_residency); + + // If this is a first-touch fault and the destination processor is the + // preferred location, populate the whole VA block + if (uvm_processor_mask_empty(&va_block->resident) && + uvm_id_equal(new_residency, policy->preferred_location)) { + uvm_page_mask_region_fill(&prefetch_info->prefetch_pages, uvm_va_block_region_from_block(va_block)); + goto done; + } + + if (resident_mask) + uvm_page_mask_or(&prefetch_info->bitmap_tree.pages, resident_mask, faulted_pages); + else + uvm_page_mask_copy(&prefetch_info->bitmap_tree.pages, faulted_pages); + + // Get the big page size for the new residency + // Assume 64K size if the new residency is the CPU or no GPU va space is + // registered in the current process for this GPU. + if (UVM_ID_IS_GPU(new_residency) && + uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, new_residency)) { + uvm_gpu_t *gpu = uvm_va_space_get_gpu(va_space, new_residency); + prefetch_info->big_page_size = uvm_va_block_gpu_big_page_size(va_block, gpu); + } + else { + prefetch_info->big_page_size = UVM_PAGE_SIZE_64K; + } + + // Adjust the prefetch tree to big page granularity to make sure that we + // get big page-friendly prefetching hints + prefetch_info->big_pages_region = uvm_va_block_big_page_region_all(va_block, prefetch_info->big_page_size); + if (prefetch_info->big_pages_region.first > 0) { + prefetch_info->region.first = prefetch_info->big_page_size / PAGE_SIZE - prefetch_info->big_pages_region.first; + + uvm_page_mask_shift_left(&prefetch_info->bitmap_tree.pages, + &prefetch_info->bitmap_tree.pages, + prefetch_info->region.first); + } + else { + prefetch_info->region.first = 0; + } + + prefetch_info->region.outer = prefetch_info->region.first + uvm_va_block_num_cpu_pages(va_block); + + thrashing_pages = uvm_perf_thrashing_get_thrashing_pages(va_block); + + // Assume big pages by default. Prefetch the rest of 4KB subregions within the big page + // region unless there is thrashing. + grow_fault_granularity(va_block, prefetch_info, faulted_pages, thrashing_pages); + + // Do not compute prefetch regions with faults on pages that are thrashing + if (thrashing_pages) + uvm_page_mask_andnot(&prefetch_info->migrate_pages, faulted_pages, thrashing_pages); + else + uvm_page_mask_copy(&prefetch_info->migrate_pages, faulted_pages); + + // Update the tree using the migration mask to compute the pages to prefetch + uvm_page_mask_zero(&prefetch_info->prefetch_pages); + for_each_va_block_page_in_region_mask(page_index, &prefetch_info->migrate_pages, region) { + uvm_va_block_region_t prefetch_region = compute_prefetch_region(page_index + prefetch_info->region.first, + prefetch_info); + uvm_page_mask_region_fill(&prefetch_info->prefetch_pages, prefetch_region); + + // Early out if we have already prefetched until the end of the VA block + if (prefetch_region.outer == prefetch_info->region.outer) + break; + } + + // Adjust prefetching page mask + if (prefetch_info->region.first > 0) { + uvm_page_mask_shift_right(&prefetch_info->prefetch_pages, + &prefetch_info->prefetch_pages, + prefetch_info->region.first); + } + +done: + // Do not prefetch pages that are going to be migrated/populated due to a + // fault + uvm_page_mask_andnot(&prefetch_info->prefetch_pages, + &prefetch_info->prefetch_pages, + faulted_pages); + + // TODO: Bug 1765432: prefetching pages that are already mapped on the CPU + // would trigger a remap, which may cause a large overhead. Therefore, + // exclude them from the mask. + if (UVM_ID_IS_CPU(new_residency)) { + uvm_page_mask_and(&va_block_context->scratch_page_mask, + resident_mask, + &va_block->cpu.pte_bits[UVM_PTE_BITS_CPU_READ]); + uvm_page_mask_andnot(&prefetch_info->prefetch_pages, + &prefetch_info->prefetch_pages, + &va_block_context->scratch_page_mask); + } + + // Avoid prefetching pages that are thrashing + if (thrashing_pages) { + uvm_page_mask_andnot(&prefetch_info->prefetch_pages, + &prefetch_info->prefetch_pages, + thrashing_pages); + } + + prefetch_info->fault_migrations_to_last_proc += uvm_page_mask_region_weight(faulted_pages, region); + prefetch_info->pending_prefetch_pages = uvm_page_mask_weight(&prefetch_info->prefetch_pages); +} + +uvm_perf_prefetch_hint_t uvm_perf_prefetch_get_hint(uvm_va_block_t *va_block, + const uvm_page_mask_t *new_residency_mask) +{ + uvm_perf_prefetch_hint_t ret = UVM_PERF_PREFETCH_HINT_NONE(); + block_prefetch_info_t *prefetch_info; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + + if (!g_uvm_perf_prefetch_enable) + return ret; + + if (!va_space->test.page_prefetch_enabled) + return ret; + + prefetch_info = prefetch_info_get(va_block); + if (!prefetch_info) + return ret; + + if (prefetch_info->fault_migrations_to_last_proc >= g_uvm_perf_prefetch_min_faults && + prefetch_info->pending_prefetch_pages > 0) { + bool changed = false; + uvm_range_group_range_t *rgr; + + // Only prefetch in range group ranges which have pages that need to + // move. + uvm_range_group_for_each_range_in(rgr, va_space, va_block->start, va_block->end) { + uvm_va_block_region_t region = uvm_va_block_region_from_start_end(va_block, + max(rgr->node.start, va_block->start), + min(rgr->node.end, va_block->end)); + + if (uvm_page_mask_region_empty(new_residency_mask, region) && + !uvm_page_mask_region_empty(&prefetch_info->prefetch_pages, region)) { + uvm_page_mask_region_clear(&prefetch_info->prefetch_pages, region); + changed = true; + } + } + + if (changed) + prefetch_info->pending_prefetch_pages = uvm_page_mask_weight(&prefetch_info->prefetch_pages); + + if (prefetch_info->pending_prefetch_pages > 0) { + ret.residency = prefetch_info->last_migration_proc_id; + ret.prefetch_pages_mask = &prefetch_info->prefetch_pages; + } + } + + return ret; +} + +void prefetch_block_destroy_cb(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + uvm_va_block_t *va_block; + + UVM_ASSERT(g_uvm_perf_prefetch_enable); + + UVM_ASSERT(event_id == UVM_PERF_EVENT_BLOCK_DESTROY || + event_id == UVM_PERF_EVENT_MODULE_UNLOAD || + event_id == UVM_PERF_EVENT_BLOCK_SHRINK); + + if (event_id == UVM_PERF_EVENT_BLOCK_DESTROY) + va_block = event_data->block_destroy.block; + else if (event_id == UVM_PERF_EVENT_BLOCK_SHRINK) + va_block = event_data->block_shrink.block; + else + va_block = event_data->module_unload.block; + + if (!va_block) + return; + + prefetch_info_destroy(va_block); +} + +NV_STATUS uvm_perf_prefetch_load(uvm_va_space_t *va_space) +{ + if (!g_uvm_perf_prefetch_enable) + return NV_OK; + + return uvm_perf_module_load(&g_module_prefetch, va_space); +} + +void uvm_perf_prefetch_unload(uvm_va_space_t *va_space) +{ + if (!g_uvm_perf_prefetch_enable) + return; + + uvm_perf_module_unload(&g_module_prefetch, va_space); +} + +NV_STATUS uvm_perf_prefetch_init() +{ + g_uvm_perf_prefetch_enable = uvm_perf_prefetch_enable != 0; + + if (!g_uvm_perf_prefetch_enable) + return NV_OK; + + uvm_perf_module_init("perf_prefetch", UVM_PERF_MODULE_TYPE_PREFETCH, g_callbacks_prefetch, + ARRAY_SIZE(g_callbacks_prefetch), &g_module_prefetch); + + g_prefetch_info_cache = NV_KMEM_CACHE_CREATE("block_prefetch_info_t", block_prefetch_info_t); + if (!g_prefetch_info_cache) + return NV_ERR_NO_MEMORY; + + if (uvm_perf_prefetch_threshold <= 100) { + g_uvm_perf_prefetch_threshold = uvm_perf_prefetch_threshold; + } + else { + pr_info("Invalid value %u for uvm_perf_prefetch_threshold. Using %u instead\n", + uvm_perf_prefetch_threshold, UVM_PREFETCH_THRESHOLD_DEFAULT); + + g_uvm_perf_prefetch_threshold = UVM_PREFETCH_THRESHOLD_DEFAULT; + } + + if (uvm_perf_prefetch_min_faults >= UVM_PREFETCH_MIN_FAULTS_MIN && + uvm_perf_prefetch_min_faults <= UVM_PREFETCH_MIN_FAULTS_MAX) { + g_uvm_perf_prefetch_min_faults = uvm_perf_prefetch_min_faults; + } + else { + pr_info("Invalid value %u for uvm_perf_prefetch_min_faults. Using %u instead\n", + uvm_perf_prefetch_min_faults, UVM_PREFETCH_MIN_FAULTS_DEFAULT); + + g_uvm_perf_prefetch_min_faults = UVM_PREFETCH_MIN_FAULTS_DEFAULT; + } + + return NV_OK; +} + +void uvm_perf_prefetch_exit() +{ + if (!g_uvm_perf_prefetch_enable) + return; + + kmem_cache_destroy_safe(&g_prefetch_info_cache); +} + +NV_STATUS uvm_test_set_page_prefetch_policy(UVM_TEST_SET_PAGE_PREFETCH_POLICY_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + if (params->policy >= UVM_TEST_PAGE_PREFETCH_POLICY_MAX) + return NV_ERR_INVALID_ARGUMENT; + + uvm_va_space_down_write(va_space); + + if (params->policy == UVM_TEST_PAGE_PREFETCH_POLICY_ENABLE) + va_space->test.page_prefetch_enabled = true; + else + va_space->test.page_prefetch_enabled = false; + + uvm_va_space_up_write(va_space); + + return NV_OK; +} diff --git a/kernel-open/nvidia-uvm/uvm_perf_prefetch.h b/kernel-open/nvidia-uvm/uvm_perf_prefetch.h new file mode 100644 index 000000000..3e052c451 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_prefetch.h @@ -0,0 +1,61 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN hint OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PERF_PREFETCH_H__ +#define __UVM_PERF_PREFETCH_H__ + +#include "uvm_linux.h" +#include "uvm_processors.h" +#include "uvm_va_block_types.h" + +typedef struct +{ + const uvm_page_mask_t *prefetch_pages_mask; + + uvm_processor_id_t residency; +} uvm_perf_prefetch_hint_t; + +// Global initialization/cleanup functions +NV_STATUS uvm_perf_prefetch_init(void); +void uvm_perf_prefetch_exit(void); + +// VA space Initialization/cleanup functions +NV_STATUS uvm_perf_prefetch_load(uvm_va_space_t *va_space); +void uvm_perf_prefetch_unload(uvm_va_space_t *va_space); + +// Obtain a hint with the pages that may be prefetched in the block +uvm_perf_prefetch_hint_t uvm_perf_prefetch_get_hint(uvm_va_block_t *va_block, + const uvm_page_mask_t *new_residency_mask); + +// Notify that the given mask of pages within region is going to migrate to +// the given residency. The caller must hold the va_space lock. +void uvm_perf_prefetch_prenotify_fault_migrations(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t new_residency, + const uvm_page_mask_t *migrate_pages, + uvm_va_block_region_t region); + +#define UVM_PERF_PREFETCH_HINT_NONE() \ + (uvm_perf_prefetch_hint_t){ NULL, UVM_ID_INVALID } + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_perf_thrashing.c b/kernel-open/nvidia-uvm/uvm_perf_thrashing.c new file mode 100644 index 000000000..79a060b4e --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_thrashing.c @@ -0,0 +1,2132 @@ +/******************************************************************************* + Copyright (c) 2016-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN hint OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_api.h" +#include "uvm_perf_events.h" +#include "uvm_perf_module.h" +#include "uvm_perf_thrashing.h" +#include "uvm_perf_utils.h" +#include "uvm_va_block.h" +#include "uvm_va_range.h" +#include "uvm_kvmalloc.h" +#include "uvm_tools.h" +#include "uvm_procfs.h" +#include "uvm_test.h" + +// Number of bits for page-granularity time stamps. Currently we ignore the first 6 bits +// of the timestamp (i.e. we have 64ns resolution, which is good enough) +#define PAGE_THRASHING_LAST_TIME_STAMP_BITS 58 +#define PAGE_THRASHING_NUM_EVENTS_BITS 3 + +#define PAGE_THRASHING_THROTTLING_END_TIME_STAMP_BITS 58 +#define PAGE_THRASHING_THROTTLING_COUNT_BITS 8 + +// Per-page thrashing detection structure. +typedef struct +{ + struct + { + // Last time stamp when a thrashing-related event was recorded + NvU64 last_time_stamp : PAGE_THRASHING_LAST_TIME_STAMP_BITS; + + bool has_migration_events : 1; + + bool has_revocation_events : 1; + + // Number of consecutive "thrashing" events (within the configured + // thrashing lapse) + NvU8 num_thrashing_events : PAGE_THRASHING_NUM_EVENTS_BITS; + + bool pinned : 1; + }; + + struct + { + // Deadline for throttled processors to wake up + NvU64 throttling_end_time_stamp : PAGE_THRASHING_THROTTLING_END_TIME_STAMP_BITS; + + // Number of times a processor has been throttled. This is used to + // determine when the page needs to get pinned. After getting pinned + // this field is always 0. + NvU8 throttling_count : PAGE_THRASHING_THROTTLING_COUNT_BITS; + }; + + // Processors accessing this page + uvm_processor_mask_t processors; + + // Processors that have been throttled. This must be a subset of processors + uvm_processor_mask_t throttled_processors; + + // Memory residency for the page when in pinning phase + uvm_processor_id_t pinned_residency_id; + + // Processor not to be throttled in the current throttling period + uvm_processor_id_t do_not_throttle_processor_id; +} page_thrashing_info_t; + +// Per-VA block thrashing detection structure. This state is protected by the +// VA block lock. +typedef struct +{ + page_thrashing_info_t *pages; + + NvU16 num_thrashing_pages; + + NvU8 thrashing_reset_count; + + uvm_processor_id_t last_processor; + + NvU64 last_time_stamp; + + NvU64 last_thrashing_time_stamp; + + // Stats + NvU32 throttling_count; + + uvm_page_mask_t thrashing_pages; + + struct + { + NvU32 count; + + uvm_page_mask_t mask; + + // List of pinned pages. This list is only used if the pinning timeout + // is not 0. + struct list_head list; + } pinned_pages; +} block_thrashing_info_t; + +// Descriptor for a page that has been pinned due to thrashing. This structure +// is only used if the pinning timeout is not 0. +typedef struct +{ + uvm_va_block_t *va_block; + + // Page index within va_block + uvm_page_index_t page_index; + + // Absolute timestamp after which the page will be unpinned + NvU64 deadline; + + // Entry in the per-VA Space list of pinned pages. See + // va_space_thrashing_info_t::pinned_pages::list. + struct list_head va_space_list_entry; + + // Entry in the per-VA Block list of pinned pages. See + // block_thrashing_info_t::pinned_pages::list. + struct list_head va_block_list_entry; +} pinned_page_t; + +// Per-VA space data structures and policy configuration +typedef struct +{ + // Per-VA space accounting of pinned pages that is used to speculatively + // unpin pages after the configured timeout. This struct is only used if + // the pinning timeout is not 0. + struct + { + // Work descriptor that is executed asynchronously by a helper thread + struct delayed_work dwork; + + // List of pinned pages. They are (mostly) ordered by unpin deadline. + // New entries are inserted blindly at the tail since the expectation + // is that they will have the largest deadline value. However, given + // the drift between when multiple threads query their timestamps and + // add those pages to the list under the lock, it might not be + // strictly ordered. But this is OK since the difference will be very + // small and they will be eventually removed from the list. + // + // Entries are removed when they reach the deadline by the function + // configured in dwork. This list is protected by lock. + struct list_head list; + + uvm_spinlock_t lock; + + uvm_va_block_context_t va_block_context; + + // Flag used to avoid scheduling delayed unpinning operations after + // uvm_perf_thrashing_stop has been called. + bool in_va_space_teardown; + } pinned_pages; + + struct + { + // Whether thrashing mitigation is enabled on this VA space + bool enable; + + // true if the thrashing mitigation parameters have been modified using + // test ioctls + bool test_overrides; + + // + // Fields below are the thrashing mitigation parameters on the VA space + // + unsigned threshold; + + unsigned pin_threshold; + + NvU64 lapse_ns; + + NvU64 nap_ns; + + NvU64 epoch_ns; + + unsigned max_resets; + + NvU64 pin_ns; + } params; + + uvm_va_space_t *va_space; +} va_space_thrashing_info_t; + +typedef struct +{ + // Entry for the per-processor thrashing_stats file in procfs + struct proc_dir_entry *procfs_file; + + // Number of times thrashing is detected + atomic64_t num_thrashing; + + // Number of times the processor was throttled while thrashing + atomic64_t num_throttle; + + // Number of times a page was pinned on this processor while thrashing + atomic64_t num_pin_local; + + // Number of times a page was pinned on a different processor while thrashing + atomic64_t num_pin_remote; +} processor_thrashing_stats_t; + +// Pre-allocated thrashing stats structure for the CPU. This is only valid if +// uvm_procfs_is_debug_enabled() returns true. +static processor_thrashing_stats_t g_cpu_thrashing_stats; + +#define PROCESSOR_THRASHING_STATS_INC(va_space, proc, field) \ + do { \ + processor_thrashing_stats_t *_processor_stats = thrashing_stats_get_or_null(va_space, proc); \ + if (_processor_stats) \ + atomic64_inc(&_processor_stats->field); \ + } while (0) + +// Global caches for the per-VA block thrashing detection structures +static struct kmem_cache *g_va_block_thrashing_info_cache __read_mostly; +static struct kmem_cache *g_pinned_page_cache __read_mostly; + +// +// Tunables for thrashing detection/prevention (configurable via module parameters) +// + +#define UVM_PERF_THRASHING_ENABLE_DEFAULT 1 + +// Enable/disable thrashing performance heuristics +static unsigned uvm_perf_thrashing_enable = UVM_PERF_THRASHING_ENABLE_DEFAULT; + +#define UVM_PERF_THRASHING_THRESHOLD_DEFAULT 3 +#define UVM_PERF_THRASHING_THRESHOLD_MAX ((1 << PAGE_THRASHING_NUM_EVENTS_BITS) - 1) + +// Number of consecutive thrashing events to initiate thrashing prevention +// +// Maximum value is UVM_PERF_THRASHING_THRESHOLD_MAX +static unsigned uvm_perf_thrashing_threshold = UVM_PERF_THRASHING_THRESHOLD_DEFAULT; + +#define UVM_PERF_THRASHING_PIN_THRESHOLD_DEFAULT 10 +#define UVM_PERF_THRASHING_PIN_THRESHOLD_MAX ((1 << PAGE_THRASHING_THROTTLING_COUNT_BITS) - 1) + +// Number of consecutive throttling operations before trying to map remotely +// +// Maximum value is UVM_PERF_THRASHING_PIN_THRESHOLD_MAX +static unsigned uvm_perf_thrashing_pin_threshold = UVM_PERF_THRASHING_PIN_THRESHOLD_DEFAULT; + +// TODO: Bug 1768615: [uvm] Automatically tune default values for thrashing +// detection/prevention parameters +#define UVM_PERF_THRASHING_LAPSE_USEC_DEFAULT 500 +#define UVM_PERF_THRASHING_LAPSE_USEC_DEFAULT_EMULATION (UVM_PERF_THRASHING_LAPSE_USEC_DEFAULT * 800) + +// Lapse of time in microseconds that determines if two consecutive events on +// the same page can be considered thrashing +static unsigned uvm_perf_thrashing_lapse_usec = UVM_PERF_THRASHING_LAPSE_USEC_DEFAULT; + +#define UVM_PERF_THRASHING_NAP_DEFAULT 1 +#define UVM_PERF_THRASHING_NAP_MAX 100 + +// Time that the processor being throttled is forbidden to work on the thrashing +// page. This value is a multiplier of uvm_perf_thrashing_lapse_usec. +static unsigned uvm_perf_thrashing_nap = UVM_PERF_THRASHING_NAP_DEFAULT; + +#define UVM_PERF_THRASHING_EPOCH_DEFAULT 2000 + +// Time lapse after which we consider thrashing is no longer happening. This +// value is a multiplier of uvm_perf_thrashing_lapse_usec. +static unsigned uvm_perf_thrashing_epoch = UVM_PERF_THRASHING_EPOCH_DEFAULT; + +// When pages are pinned and the rest of thrashing processors are mapped +// remotely we lose track of who is accessing the page for the rest of +// program execution. This can lead to tremendous performance loss if the page +// is not thrashing anymore and it is always being accessed remotely. +// In order to avoid that scenario, we use a timer that unpins memory after +// some time. We use a per-VA space list of pinned pages, sorted by the +// deadline at which it will be unmapped from remote processors. Therefore, +// next remote access will trigger a fault that will migrate the page. +#define UVM_PERF_THRASHING_PIN_DEFAULT 300 +#define UVM_PERF_THRASHING_PIN_DEFAULT_EMULATION 10 + +// Time for which a page remains pinned. This value is a multiplier of +// uvm_perf_thrashing_lapse_usec. 0 means that it is pinned forever. +static unsigned uvm_perf_thrashing_pin = UVM_PERF_THRASHING_PIN_DEFAULT; + +// Number of times a VA block can be reset back to non-thrashing. This +// mechanism tries to avoid performing optimizations on a block that periodically +// causes thrashing +#define UVM_PERF_THRASHING_MAX_RESETS_DEFAULT 4 + +static unsigned uvm_perf_thrashing_max_resets = UVM_PERF_THRASHING_MAX_RESETS_DEFAULT; + +// Module parameters for the tunables +module_param(uvm_perf_thrashing_enable, uint, S_IRUGO); +module_param(uvm_perf_thrashing_threshold, uint, S_IRUGO); +module_param(uvm_perf_thrashing_pin_threshold, uint, S_IRUGO); +module_param(uvm_perf_thrashing_lapse_usec, uint, S_IRUGO); +module_param(uvm_perf_thrashing_nap, uint, S_IRUGO); +module_param(uvm_perf_thrashing_epoch, uint, S_IRUGO); +module_param(uvm_perf_thrashing_pin, uint, S_IRUGO); +module_param(uvm_perf_thrashing_max_resets, uint, S_IRUGO); + +// See map_remote_on_atomic_fault uvm_va_block.c +unsigned uvm_perf_map_remote_on_native_atomics_fault = 0; +module_param(uvm_perf_map_remote_on_native_atomics_fault, uint, S_IRUGO); + +// Global post-processed values of the module parameters. They can be overriden +// per VA-space. +static bool g_uvm_perf_thrashing_enable; +static unsigned g_uvm_perf_thrashing_threshold; +static unsigned g_uvm_perf_thrashing_pin_threshold; +static NvU64 g_uvm_perf_thrashing_lapse_usec; +static NvU64 g_uvm_perf_thrashing_nap; +static NvU64 g_uvm_perf_thrashing_epoch; +static NvU64 g_uvm_perf_thrashing_pin; +static unsigned g_uvm_perf_thrashing_max_resets; + +// Helper macros to initialize thrashing parameters from module parameters +// +// This helper returns whether the type for the parameter is signed +#define THRASHING_PARAMETER_IS_SIGNED(v) (((typeof(v)) -1) < 0) + +// Macro that initializes the given thrashing parameter and checks its validity +// (within [_mi:_ma]). Otherwise it is initialized with the given default +// parameter _d. The user value is read from _v, and the final value is stored +// in a variable named g_##_v, so it must be declared, too. Only unsigned +// parameters are supported. +#define INIT_THRASHING_PARAMETER_MIN_MAX(_v, _d, _mi, _ma) \ + do { \ + unsigned v = (_v); \ + unsigned d = (_d); \ + unsigned mi = (_mi); \ + unsigned ma = (_ma); \ + \ + BUILD_BUG_ON(sizeof(_v) > sizeof(unsigned)); \ + BUILD_BUG_ON(THRASHING_PARAMETER_IS_SIGNED(_v)); \ + \ + UVM_ASSERT(mi <= ma); \ + UVM_ASSERT(d >= mi); \ + UVM_ASSERT(d <= ma); \ + \ + if (v >= mi && v <= ma) { \ + g_##_v = v; \ + } \ + else { \ + pr_info("Invalid value %u for " #_v ". Using %u instead\n", v, d); \ + \ + g_##_v = d; \ + } \ + } while (0) + +#define INIT_THRASHING_PARAMETER(v, d) INIT_THRASHING_PARAMETER_MIN_MAX(v, d, 0u, UINT_MAX) + +#define INIT_THRASHING_PARAMETER_MIN(v, d, mi) INIT_THRASHING_PARAMETER_MIN_MAX(v, d, mi, UINT_MAX) +#define INIT_THRASHING_PARAMETER_MAX(v, d, ma) INIT_THRASHING_PARAMETER_MIN_MAX(v, d, 0u, ma) + +#define INIT_THRASHING_PARAMETER_NONZERO(v, d) INIT_THRASHING_PARAMETER_MIN_MAX(v, d, 1u, UINT_MAX) +#define INIT_THRASHING_PARAMETER_NONZERO_MAX(v, d, ma) INIT_THRASHING_PARAMETER_MIN_MAX(v, d, 1u, ma) + +#define INIT_THRASHING_PARAMETER_TOGGLE(v, d) INIT_THRASHING_PARAMETER_MIN_MAX(v, d, 0u, 1u) + +// Helpers to get/set the time stamp +static NvU64 page_thrashing_get_time_stamp(page_thrashing_info_t *entry) +{ + return entry->last_time_stamp << (64 - PAGE_THRASHING_LAST_TIME_STAMP_BITS); +} + +static void page_thrashing_set_time_stamp(page_thrashing_info_t *entry, NvU64 time_stamp) +{ + entry->last_time_stamp = time_stamp >> (64 - PAGE_THRASHING_LAST_TIME_STAMP_BITS); +} + +static NvU64 page_thrashing_get_throttling_end_time_stamp(page_thrashing_info_t *entry) +{ + return entry->throttling_end_time_stamp << (64 - PAGE_THRASHING_THROTTLING_END_TIME_STAMP_BITS); +} + +static void page_thrashing_set_throttling_end_time_stamp(page_thrashing_info_t *entry, NvU64 time_stamp) +{ + entry->throttling_end_time_stamp = time_stamp >> (64 - PAGE_THRASHING_THROTTLING_END_TIME_STAMP_BITS); +} + +// Performance heuristics module for thrashing +static uvm_perf_module_t g_module_thrashing; + +// Callback declaration for the performance heuristics events +static void thrashing_event_cb(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data); +static void thrashing_block_destroy_cb(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data); + +static uvm_perf_module_event_callback_desc_t g_callbacks_thrashing[] = { + { UVM_PERF_EVENT_BLOCK_DESTROY, thrashing_block_destroy_cb }, + { UVM_PERF_EVENT_MODULE_UNLOAD, thrashing_block_destroy_cb }, + { UVM_PERF_EVENT_BLOCK_SHRINK , thrashing_block_destroy_cb }, + { UVM_PERF_EVENT_MIGRATION, thrashing_event_cb }, + { UVM_PERF_EVENT_REVOCATION, thrashing_event_cb } +}; + +static int nv_procfs_read_thrashing_stats(struct seq_file *s, void *v) +{ + processor_thrashing_stats_t *processor_stats = (processor_thrashing_stats_t *)s->private; + + UVM_ASSERT(processor_stats); + + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + UVM_SEQ_OR_DBG_PRINT(s, "thrashing %llu\n", (NvU64)atomic64_read(&processor_stats->num_thrashing)); + UVM_SEQ_OR_DBG_PRINT(s, "throttle %llu\n", (NvU64)atomic64_read(&processor_stats->num_throttle)); + UVM_SEQ_OR_DBG_PRINT(s, "pin_local %llu\n", (NvU64)atomic64_read(&processor_stats->num_pin_local)); + UVM_SEQ_OR_DBG_PRINT(s, "pin_remote %llu\n", (NvU64)atomic64_read(&processor_stats->num_pin_remote)); + + uvm_up_read(&g_uvm_global.pm.lock); + + return 0; +} + +static int nv_procfs_read_thrashing_stats_entry(struct seq_file *s, void *v) +{ + UVM_ENTRY_RET(nv_procfs_read_thrashing_stats(s, v)); +} + +UVM_DEFINE_SINGLE_PROCFS_FILE(thrashing_stats_entry); + +#define THRASHING_STATS_FILE_NAME "thrashing_stats" + +// Initialization/deinitialization of CPU thrashing stats +// +static NV_STATUS cpu_thrashing_stats_init(void) +{ + struct proc_dir_entry *cpu_base_dir_entry = uvm_procfs_get_cpu_base_dir(); + + if (uvm_procfs_is_debug_enabled()) { + UVM_ASSERT(!g_cpu_thrashing_stats.procfs_file); + g_cpu_thrashing_stats.procfs_file = NV_CREATE_PROC_FILE(THRASHING_STATS_FILE_NAME, + cpu_base_dir_entry, + thrashing_stats_entry, + &g_cpu_thrashing_stats); + if (!g_cpu_thrashing_stats.procfs_file) + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +} + +static void cpu_thrashing_stats_exit(void) +{ + if (g_cpu_thrashing_stats.procfs_file) { + UVM_ASSERT(uvm_procfs_is_debug_enabled()); + uvm_procfs_destroy_entry(g_cpu_thrashing_stats.procfs_file); + g_cpu_thrashing_stats.procfs_file = NULL; + } +} + +// Get the thrashing stats struct for the given VA space if it exists +// +// No lock may be held. Therefore, the stats must be updated using atomics +static processor_thrashing_stats_t *gpu_thrashing_stats_get_or_null(uvm_gpu_t *gpu) +{ + return uvm_perf_module_type_data(gpu->perf_modules_data, UVM_PERF_MODULE_TYPE_THRASHING); +} + +static processor_thrashing_stats_t *thrashing_stats_get_or_null(uvm_va_space_t *va_space, uvm_processor_id_t id) +{ + if (UVM_ID_IS_CPU(id)) { + if (g_cpu_thrashing_stats.procfs_file) + return &g_cpu_thrashing_stats; + + return NULL; + } + + return gpu_thrashing_stats_get_or_null(uvm_va_space_get_gpu(va_space, id)); +} + +// Create the thrashing stats struct for the given GPU +// +// Global lock needs to be held +static NV_STATUS gpu_thrashing_stats_create(uvm_gpu_t *gpu) +{ + processor_thrashing_stats_t *gpu_thrashing; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + UVM_ASSERT(gpu_thrashing_stats_get_or_null(gpu) == NULL); + UVM_ASSERT(uvm_procfs_is_debug_enabled()); + + gpu_thrashing = uvm_kvmalloc_zero(sizeof(*gpu_thrashing)); + if (!gpu_thrashing) + return NV_ERR_NO_MEMORY; + + gpu_thrashing->procfs_file = NV_CREATE_PROC_FILE(THRASHING_STATS_FILE_NAME, + gpu->procfs.dir, + thrashing_stats_entry, + gpu_thrashing); + if (!gpu_thrashing->procfs_file) { + uvm_kvfree(gpu_thrashing); + return NV_ERR_OPERATING_SYSTEM; + } + + uvm_perf_module_type_set_data(gpu->perf_modules_data, gpu_thrashing, UVM_PERF_MODULE_TYPE_THRASHING); + + return NV_OK; +} + +static void gpu_thrashing_stats_destroy(uvm_gpu_t *gpu) +{ + processor_thrashing_stats_t *gpu_thrashing = gpu_thrashing_stats_get_or_null(gpu); + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + if (gpu_thrashing) { + uvm_perf_module_type_unset_data(gpu->perf_modules_data, UVM_PERF_MODULE_TYPE_THRASHING); + + if (gpu_thrashing->procfs_file) + uvm_procfs_destroy_entry(gpu_thrashing->procfs_file); + + uvm_kvfree(gpu_thrashing); + } +} + +// Get the thrashing detection struct for the given VA space if it exists +// +// VA space lock needs to be held +static va_space_thrashing_info_t *va_space_thrashing_info_get_or_null(uvm_va_space_t *va_space) +{ + uvm_assert_rwsem_locked(&va_space->lock); + + return uvm_perf_module_type_data(va_space->perf_modules_data, UVM_PERF_MODULE_TYPE_THRASHING); +} + +// Get the thrashing detection struct for the given VA space. It asserts that +// the information has been previously created. +// +// VA space lock needs to be held +static va_space_thrashing_info_t *va_space_thrashing_info_get(uvm_va_space_t *va_space) +{ + va_space_thrashing_info_t *va_space_thrashing = va_space_thrashing_info_get_or_null(va_space); + UVM_ASSERT(va_space_thrashing); + + return va_space_thrashing; +} + +static void va_space_thrashing_info_init_params(va_space_thrashing_info_t *va_space_thrashing) +{ + UVM_ASSERT(!va_space_thrashing->params.test_overrides); + + va_space_thrashing->params.enable = g_uvm_perf_thrashing_enable; + + // Snap the thrashing parameters so that they can be tuned per VA space + va_space_thrashing->params.threshold = g_uvm_perf_thrashing_threshold; + va_space_thrashing->params.pin_threshold = g_uvm_perf_thrashing_pin_threshold; + + // Default thrashing parameters are overriden for simulated/emulated GPUs + if (g_uvm_global.num_simulated_devices > 0 && + (g_uvm_perf_thrashing_lapse_usec == UVM_PERF_THRASHING_LAPSE_USEC_DEFAULT)) { + va_space_thrashing->params.lapse_ns = UVM_PERF_THRASHING_LAPSE_USEC_DEFAULT_EMULATION * 1000; + } + else { + va_space_thrashing->params.lapse_ns = g_uvm_perf_thrashing_lapse_usec * 1000; + } + + va_space_thrashing->params.nap_ns = va_space_thrashing->params.lapse_ns * g_uvm_perf_thrashing_nap; + va_space_thrashing->params.epoch_ns = va_space_thrashing->params.lapse_ns * g_uvm_perf_thrashing_epoch; + + if (g_uvm_global.num_simulated_devices > 0 && (g_uvm_perf_thrashing_pin == UVM_PERF_THRASHING_PIN_DEFAULT)) { + va_space_thrashing->params.pin_ns = va_space_thrashing->params.lapse_ns + * UVM_PERF_THRASHING_PIN_DEFAULT_EMULATION; + } + else { + va_space_thrashing->params.pin_ns = va_space_thrashing->params.lapse_ns * g_uvm_perf_thrashing_pin; + } + + va_space_thrashing->params.max_resets = g_uvm_perf_thrashing_max_resets; +} + +// Create the thrashing detection struct for the given VA space +// +// VA space lock needs to be held in write mode +static va_space_thrashing_info_t *va_space_thrashing_info_create(uvm_va_space_t *va_space) +{ + va_space_thrashing_info_t *va_space_thrashing; + uvm_assert_rwsem_locked_write(&va_space->lock); + + UVM_ASSERT(va_space_thrashing_info_get_or_null(va_space) == NULL); + + va_space_thrashing = uvm_kvmalloc_zero(sizeof(*va_space_thrashing)); + if (va_space_thrashing) { + va_space_thrashing->va_space = va_space; + + va_space_thrashing_info_init_params(va_space_thrashing); + + uvm_perf_module_type_set_data(va_space->perf_modules_data, va_space_thrashing, UVM_PERF_MODULE_TYPE_THRASHING); + } + + return va_space_thrashing; +} + +// Destroy the thrashing detection struct for the given VA space +// +// VA space lock needs to be in write mode +static void va_space_thrashing_info_destroy(uvm_va_space_t *va_space) +{ + va_space_thrashing_info_t *va_space_thrashing = va_space_thrashing_info_get_or_null(va_space); + uvm_assert_rwsem_locked_write(&va_space->lock); + + if (va_space_thrashing) { + uvm_perf_module_type_unset_data(va_space->perf_modules_data, UVM_PERF_MODULE_TYPE_THRASHING); + uvm_kvfree(va_space_thrashing); + } +} + +// Get the thrashing detection struct for the given block +static block_thrashing_info_t *thrashing_info_get(uvm_va_block_t *va_block) +{ + uvm_assert_mutex_locked(&va_block->lock); + return uvm_perf_module_type_data(va_block->perf_modules_data, UVM_PERF_MODULE_TYPE_THRASHING); +} + +// Get the thrashing detection struct for the given block or create it if it +// does not exist +static block_thrashing_info_t *thrashing_info_get_create(uvm_va_block_t *va_block) +{ + block_thrashing_info_t *block_thrashing = thrashing_info_get(va_block); + + BUILD_BUG_ON((1 << 8 * sizeof(block_thrashing->num_thrashing_pages)) < PAGES_PER_UVM_VA_BLOCK); + BUILD_BUG_ON((1 << 16) < UVM_ID_MAX_PROCESSORS); + + if (!block_thrashing) { + block_thrashing = nv_kmem_cache_zalloc(g_va_block_thrashing_info_cache, NV_UVM_GFP_FLAGS); + if (!block_thrashing) + goto done; + + block_thrashing->last_processor = UVM_ID_INVALID; + INIT_LIST_HEAD(&block_thrashing->pinned_pages.list); + + uvm_perf_module_type_set_data(va_block->perf_modules_data, block_thrashing, UVM_PERF_MODULE_TYPE_THRASHING); + } + +done: + return block_thrashing; +} + +static void thrashing_reset_pages_in_region(uvm_va_block_t *va_block, NvU64 address, NvU64 bytes); + +// Destroy the thrashing detection struct for the given block +static void thrashing_info_destroy(uvm_va_block_t *va_block) +{ + block_thrashing_info_t *block_thrashing = thrashing_info_get(va_block); + + if (block_thrashing) { + thrashing_reset_pages_in_region(va_block, va_block->start, uvm_va_block_size(va_block)); + + uvm_perf_module_type_unset_data(va_block->perf_modules_data, UVM_PERF_MODULE_TYPE_THRASHING); + + uvm_kvfree(block_thrashing->pages); + kmem_cache_free(g_va_block_thrashing_info_cache, block_thrashing); + } +} + +void thrashing_block_destroy_cb(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + uvm_va_block_t *va_block; + + UVM_ASSERT(g_uvm_perf_thrashing_enable); + + UVM_ASSERT(event_id == UVM_PERF_EVENT_BLOCK_DESTROY || + event_id == UVM_PERF_EVENT_BLOCK_SHRINK || + event_id == UVM_PERF_EVENT_MODULE_UNLOAD); + + if (event_id == UVM_PERF_EVENT_BLOCK_DESTROY) + va_block = event_data->block_destroy.block; + else if (event_id == UVM_PERF_EVENT_BLOCK_SHRINK) + va_block = event_data->block_shrink.block; + else + va_block = event_data->module_unload.block; + + if (!va_block) + return; + + thrashing_info_destroy(va_block); +} + +// Sanity checks of the thrashing tracking state +static bool thrashing_state_checks(uvm_va_block_t *va_block, + block_thrashing_info_t *block_thrashing, + page_thrashing_info_t *page_thrashing, + uvm_page_index_t page_index) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + va_space_thrashing_info_t *va_space_thrashing = va_space_thrashing_info_get(va_space); + + if (!block_thrashing) { + UVM_ASSERT(!page_thrashing); + return true; + } + + UVM_ASSERT(uvm_page_mask_subset(&block_thrashing->pinned_pages.mask, &block_thrashing->thrashing_pages)); + + if (page_thrashing) { + UVM_ASSERT(block_thrashing->pages); + UVM_ASSERT(page_thrashing == &block_thrashing->pages[page_index]); + } + else { + UVM_ASSERT(!uvm_page_mask_test(&block_thrashing->thrashing_pages, page_index)); + return true; + } + + UVM_ASSERT(uvm_processor_mask_subset(&page_thrashing->throttled_processors, + &page_thrashing->processors)); + + if (uvm_page_mask_test(&block_thrashing->thrashing_pages, page_index)) + UVM_ASSERT(page_thrashing->num_thrashing_events >= va_space_thrashing->params.threshold); + + if (page_thrashing->pinned) { + UVM_ASSERT(uvm_page_mask_test(&block_thrashing->pinned_pages.mask, page_index)); + UVM_ASSERT(UVM_ID_IS_VALID(page_thrashing->pinned_residency_id)); + UVM_ASSERT(page_thrashing->throttling_count == 0); + } + else { + UVM_ASSERT(!uvm_page_mask_test(&block_thrashing->pinned_pages.mask, page_index)); + UVM_ASSERT(UVM_ID_IS_INVALID(page_thrashing->pinned_residency_id)); + + if (!uvm_processor_mask_empty(&page_thrashing->throttled_processors)) { + UVM_ASSERT(page_thrashing->throttling_count > 0); + UVM_ASSERT(uvm_page_mask_test(&block_thrashing->thrashing_pages, page_index)); + } + } + + return true; +} + +// Update throttling heuristics. Mainly check if a new throttling period has +// started and choose the next processor not to be throttled. This function +// is executed before the thrashing mitigation logic kicks in. +static void thrashing_throttle_update(va_space_thrashing_info_t *va_space_thrashing, + uvm_va_block_t *va_block, + page_thrashing_info_t *page_thrashing, + uvm_processor_id_t processor, + NvU64 time_stamp) +{ + NvU64 current_end_time_stamp = page_thrashing_get_throttling_end_time_stamp(page_thrashing); + + uvm_assert_mutex_locked(&va_block->lock); + + if (time_stamp > current_end_time_stamp) { + NvU64 throttling_end_time_stamp = time_stamp + va_space_thrashing->params.nap_ns; + page_thrashing_set_throttling_end_time_stamp(page_thrashing, throttling_end_time_stamp); + + // Avoid choosing the same processor in consecutive thrashing periods + if (uvm_id_equal(page_thrashing->do_not_throttle_processor_id, processor)) + page_thrashing->do_not_throttle_processor_id = UVM_ID_INVALID; + else + page_thrashing->do_not_throttle_processor_id = processor; + } + else if (UVM_ID_IS_INVALID(page_thrashing->do_not_throttle_processor_id)) { + page_thrashing->do_not_throttle_processor_id = processor; + } +} + +// Throttle the execution of a processor. If this is the first processor being +// throttled for a throttling period, compute the time stamp until which the +// rest of processors will be throttled on fault. +// +// - Page may be pinned (possible in thrashing due to revocation, such as +// in system-wide atomics) +// - Requesting processor must not be throttled at this point. +// +static void thrashing_throttle_processor(uvm_va_block_t *va_block, + block_thrashing_info_t *block_thrashing, + page_thrashing_info_t *page_thrashing, + uvm_page_index_t page_index, + uvm_processor_id_t processor) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + NvU64 address = uvm_va_block_cpu_page_address(va_block, page_index); + + uvm_assert_mutex_locked(&va_block->lock); + + UVM_ASSERT(!uvm_id_equal(processor, page_thrashing->do_not_throttle_processor_id)); + + if (!uvm_processor_mask_test_and_set(&page_thrashing->throttled_processors, processor)) { + // CPU is throttled by sleeping. This is done in uvm_vm_fault so it + // drops the VA block and VA space locks. Throttling start/end events + // are recorded around the sleep calls. + if (UVM_ID_IS_GPU(processor)) + uvm_tools_record_throttling_start(va_space, address, processor); + + if (!page_thrashing->pinned) + UVM_PERF_SATURATING_INC(page_thrashing->throttling_count); + + UVM_PERF_SATURATING_INC(block_thrashing->throttling_count); + } + + UVM_ASSERT(thrashing_state_checks(va_block, block_thrashing, page_thrashing, page_index)); +} + +// Stop throttling on the given processor. If this is the last processor being +// throttled for a throttling period, it will clear the throttling period. +// +// - Page may be pinned (possible in thrashing due to revocation, such as +// in system-wide atomics) +// - Requesting processor must be throttled at this point. +// +static void thrashing_throttle_end_processor(uvm_va_block_t *va_block, + block_thrashing_info_t *block_thrashing, + page_thrashing_info_t *page_thrashing, + uvm_page_index_t page_index, + uvm_processor_id_t processor) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + NvU64 address = uvm_va_block_cpu_page_address(va_block, page_index); + + UVM_ASSERT(uvm_processor_mask_test(&page_thrashing->throttled_processors, processor)); + uvm_processor_mask_clear(&page_thrashing->throttled_processors, processor); + if (uvm_processor_mask_empty(&page_thrashing->throttled_processors)) + page_thrashing_set_throttling_end_time_stamp(page_thrashing, 0); + + // See comment regarding throttling start/end events for CPU in + // thrashing_throttle_processor + if (UVM_ID_IS_GPU(processor)) + uvm_tools_record_throttling_end(va_space, address, processor); + + UVM_ASSERT(thrashing_state_checks(va_block, block_thrashing, page_thrashing, page_index)); +} + +// Clear the throttling state for all processors. This is used while +// transitioning to pinned state and during thrashing information reset. +static void thrashing_throttling_reset_page(uvm_va_block_t *va_block, + block_thrashing_info_t *block_thrashing, + page_thrashing_info_t *page_thrashing, + uvm_page_index_t page_index) +{ + uvm_processor_id_t processor_id; + + for_each_id_in_mask(processor_id, &page_thrashing->throttled_processors) { + thrashing_throttle_end_processor(va_block, + block_thrashing, + page_thrashing, + page_index, + processor_id); + } + + UVM_ASSERT(uvm_processor_mask_empty(&page_thrashing->throttled_processors)); +} + +// Find the pinned page descriptor for the given page index. Return NULL if the +// page is not pinned. +static pinned_page_t *find_pinned_page(block_thrashing_info_t *block_thrashing, uvm_page_index_t page_index) +{ + pinned_page_t *pinned_page; + + list_for_each_entry(pinned_page, &block_thrashing->pinned_pages.list, va_block_list_entry) { + if (pinned_page->page_index == page_index) + return pinned_page; + } + + return NULL; +} + +// Pin a page on the specified processor. All thrashing processors will be +// mapped remotely on this location, when possible +// +// - Requesting processor cannot be throttled +// +static NV_STATUS thrashing_pin_page(va_space_thrashing_info_t *va_space_thrashing, + uvm_va_block_t *va_block, + block_thrashing_info_t *block_thrashing, + page_thrashing_info_t *page_thrashing, + uvm_page_index_t page_index, + NvU64 time_stamp, + uvm_processor_id_t residency, + uvm_processor_id_t requester) +{ + uvm_processor_mask_t current_residency; + + uvm_assert_mutex_locked(&va_block->lock); + UVM_ASSERT(!uvm_processor_mask_test(&page_thrashing->throttled_processors, requester)); + + uvm_va_block_page_resident_processors(va_block, page_index, ¤t_residency); + + // If we are pinning the page for the first time or we are pinning it on a + // different location that the current location, reset the throttling state + // to make sure that we flush any pending ThrottlingEnd events. + if (!page_thrashing->pinned || !uvm_processor_mask_test(¤t_residency, residency)) + thrashing_throttling_reset_page(va_block, block_thrashing, page_thrashing, page_index); + + if (!page_thrashing->pinned) { + if (va_space_thrashing->params.pin_ns > 0) { + pinned_page_t *pinned_page = nv_kmem_cache_zalloc(g_pinned_page_cache, NV_UVM_GFP_FLAGS); + if (!pinned_page) + return NV_ERR_NO_MEMORY; + + pinned_page->va_block = va_block; + pinned_page->page_index = page_index; + pinned_page->deadline = time_stamp + va_space_thrashing->params.pin_ns; + + uvm_spin_lock(&va_space_thrashing->pinned_pages.lock); + + list_add_tail(&pinned_page->va_space_list_entry, &va_space_thrashing->pinned_pages.list); + list_add_tail(&pinned_page->va_block_list_entry, &block_thrashing->pinned_pages.list); + + // We only schedule the delayed work if the list was empty before + // adding this page. Otherwise, we just add it to the list. The + // unpinning helper will remove from the list those pages with + // deadline prior to its wakeup timestamp and will reschedule + // itself if there are remaining pages in the list. + if (list_is_singular(&va_space_thrashing->pinned_pages.list) && + !va_space_thrashing->pinned_pages.in_va_space_teardown) { + int scheduled; + scheduled = schedule_delayed_work(&va_space_thrashing->pinned_pages.dwork, + usecs_to_jiffies(va_space_thrashing->params.pin_ns / 1000)); + UVM_ASSERT(scheduled != 0); + } + + uvm_spin_unlock(&va_space_thrashing->pinned_pages.lock); + } + + page_thrashing->throttling_count = 0; + page_thrashing->pinned = true; + UVM_PERF_SATURATING_INC(block_thrashing->pinned_pages.count); + uvm_page_mask_set(&block_thrashing->pinned_pages.mask, page_index); + } + + page_thrashing->pinned_residency_id = residency; + + UVM_ASSERT(thrashing_state_checks(va_block, block_thrashing, page_thrashing, page_index)); + + return NV_OK; +} + +// Unpin a page. This function just clears the pinning tracking state, and does +// not remove remote mappings on the page. Callers will need to do it manually +// BEFORE calling this function, if so desired. +// - Page must be pinned +// +static void thrashing_unpin_page(va_space_thrashing_info_t *va_space_thrashing, + uvm_va_block_t *va_block, + block_thrashing_info_t *block_thrashing, + page_thrashing_info_t *page_thrashing, + uvm_page_index_t page_index) +{ + uvm_assert_mutex_locked(&va_block->lock); + UVM_ASSERT(page_thrashing->pinned); + + if (va_space_thrashing->params.pin_ns > 0) { + bool do_free = false; + pinned_page_t *pinned_page = find_pinned_page(block_thrashing, page_index); + + UVM_ASSERT(pinned_page); + UVM_ASSERT(pinned_page->page_index == page_index); + UVM_ASSERT(pinned_page->va_block == va_block); + + // The va_space_list_entry and va_block_list_entry have special + // meanings here: + // - va_space_list_entry: when the delayed unpin worker removes the + // pinned_page from this list, it takes the ownership of the page and + // is in charge of freeing it. + // - va_block_list_entry: by removing the page from this list, + // thrashing_unpin_page tells the unpin delayed worker to skip + // unpinning that page. + uvm_spin_lock(&va_space_thrashing->pinned_pages.lock); + list_del_init(&pinned_page->va_block_list_entry); + + if (!list_empty(&pinned_page->va_space_list_entry)) { + do_free = true; + list_del_init(&pinned_page->va_space_list_entry); + + if (list_empty(&va_space_thrashing->pinned_pages.list)) + cancel_delayed_work(&va_space_thrashing->pinned_pages.dwork); + } + + uvm_spin_unlock(&va_space_thrashing->pinned_pages.lock); + + if (do_free) + kmem_cache_free(g_pinned_page_cache, pinned_page); + } + + page_thrashing->pinned_residency_id = UVM_ID_INVALID; + page_thrashing->pinned = false; + uvm_page_mask_clear(&block_thrashing->pinned_pages.mask, page_index); + + UVM_ASSERT(thrashing_state_checks(va_block, block_thrashing, page_thrashing, page_index)); +} + +static void thrashing_detected(uvm_va_block_t *va_block, + block_thrashing_info_t *block_thrashing, + page_thrashing_info_t *page_thrashing, + uvm_page_index_t page_index, + uvm_processor_id_t processor_id) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + NvU64 address = uvm_va_block_cpu_page_address(va_block, page_index); + + // Thrashing detected, record the event + uvm_tools_record_thrashing(va_space, address, PAGE_SIZE, &page_thrashing->processors); + if (!uvm_page_mask_test_and_set(&block_thrashing->thrashing_pages, page_index)) + ++block_thrashing->num_thrashing_pages; + + PROCESSOR_THRASHING_STATS_INC(va_space, processor_id, num_thrashing); + + UVM_ASSERT(thrashing_state_checks(va_block, block_thrashing, page_thrashing, page_index)); +} + +// Clear the thrashing information for the given page. This function does not +// unmap remote mappings on the page. Callers will need to do it BEFORE calling +// this function, if so desired +static void thrashing_reset_page(va_space_thrashing_info_t *va_space_thrashing, + uvm_va_block_t *va_block, + block_thrashing_info_t *block_thrashing, + uvm_page_index_t page_index) +{ + page_thrashing_info_t *page_thrashing = &block_thrashing->pages[page_index]; + uvm_assert_mutex_locked(&va_block->lock); + + UVM_ASSERT(block_thrashing->num_thrashing_pages > 0); + UVM_ASSERT(uvm_page_mask_test(&block_thrashing->thrashing_pages, page_index)); + UVM_ASSERT(page_thrashing->num_thrashing_events > 0); + + thrashing_throttling_reset_page(va_block, block_thrashing, page_thrashing, page_index); + UVM_ASSERT(uvm_processor_mask_empty(&page_thrashing->throttled_processors)); + + if (page_thrashing->pinned) + thrashing_unpin_page(va_space_thrashing, va_block, block_thrashing, page_thrashing, page_index); + + page_thrashing->last_time_stamp = 0; + page_thrashing->has_migration_events = 0; + page_thrashing->has_revocation_events = 0; + page_thrashing->num_thrashing_events = 0; + uvm_processor_mask_zero(&page_thrashing->processors); + + if (uvm_page_mask_test_and_clear(&block_thrashing->thrashing_pages, page_index)) + --block_thrashing->num_thrashing_pages; + + UVM_ASSERT(thrashing_state_checks(va_block, block_thrashing, page_thrashing, page_index)); +} + +// Call thrashing_reset_page for all the thrashing pages in the region +// described by address and bytes +static void thrashing_reset_pages_in_region(uvm_va_block_t *va_block, NvU64 address, NvU64 bytes) +{ + uvm_page_index_t page_index; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + va_space_thrashing_info_t *va_space_thrashing = va_space_thrashing_info_get(va_space); + block_thrashing_info_t *block_thrashing = NULL; + uvm_va_block_region_t region = uvm_va_block_region_from_start_size(va_block, address, bytes); + + block_thrashing = thrashing_info_get(va_block); + if (!block_thrashing || !block_thrashing->pages) + return; + + // Update all pages in the region + for_each_va_block_page_in_region_mask(page_index, &block_thrashing->thrashing_pages, region) + thrashing_reset_page(va_space_thrashing, va_block, block_thrashing, page_index); +} + + +// Unmap remote mappings from the given processors on the pinned pages +// described by region and block_thrashing->pinned pages. +static NV_STATUS unmap_remote_pinned_pages_from_processors(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + block_thrashing_info_t *block_thrashing, + uvm_va_block_region_t region, + const uvm_processor_mask_t *unmap_processors) +{ + NV_STATUS status = NV_OK; + NV_STATUS tracker_status; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + uvm_processor_id_t processor_id; + uvm_va_policy_t *policy = va_block_context->policy; + + uvm_assert_mutex_locked(&va_block->lock); + + for_each_id_in_mask(processor_id, unmap_processors) { + UVM_ASSERT(uvm_id_equal(processor_id, policy->preferred_location) || + !uvm_processor_mask_test(&policy->accessed_by, processor_id)); + + if (uvm_processor_mask_test(&va_block->resident, processor_id)) { + const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, processor_id); + + if (!uvm_page_mask_andnot(&va_block_context->caller_page_mask, + &block_thrashing->pinned_pages.mask, + resident_mask)) + continue; + } + else { + uvm_page_mask_copy(&va_block_context->caller_page_mask, + &block_thrashing->pinned_pages.mask); + } + + status = uvm_va_block_unmap(va_block, + va_block_context, + processor_id, + region, + &va_block_context->caller_page_mask, + &local_tracker); + if (status != NV_OK) + break; + } + + tracker_status = uvm_tracker_add_tracker_safe(&va_block->tracker, &local_tracker); + if (status == NV_OK) + status = tracker_status; + + uvm_tracker_deinit(&local_tracker); + + return status; +} + +// Unmap remote mappings from all processors on the pinned pages +// described by region and block_thrashing->pinned pages. +static NV_STATUS unmap_remote_pinned_pages_from_all_processors(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_va_block_region_t region) +{ + block_thrashing_info_t *block_thrashing; + uvm_processor_mask_t unmap_processors; + uvm_va_policy_t *policy; + + uvm_assert_mutex_locked(&va_block->lock); + + block_thrashing = thrashing_info_get(va_block); + if (!block_thrashing || !block_thrashing->pages) + return NV_OK; + + if (uvm_page_mask_empty(&block_thrashing->pinned_pages.mask)) + return NV_OK; + + // Unmap all mapped processors (that are not SetAccessedBy) with + // no copy of the page + policy = uvm_va_policy_get(va_block, uvm_va_block_region_start(va_block, region)); + + uvm_processor_mask_andnot(&unmap_processors, &va_block->mapped, &policy->accessed_by); + + return unmap_remote_pinned_pages_from_processors(va_block, + va_block_context, + block_thrashing, + region, + &unmap_processors); +} + +// Check that we are not migrating pages away from its pinned location and +// that we are not prefetching thrashing pages. +static bool migrating_wrong_pages(uvm_va_block_t *va_block, + NvU64 address, + NvU64 bytes, + uvm_processor_id_t proc_id, + uvm_make_resident_cause_t cause) +{ + uvm_page_index_t page_index; + block_thrashing_info_t *block_thrashing = NULL; + uvm_va_block_region_t region = uvm_va_block_region_from_start_size(va_block, address, bytes); + + block_thrashing = thrashing_info_get(va_block); + if (!block_thrashing || !block_thrashing->pages) + return false; + + for_each_va_block_page_in_region(page_index, region) { + page_thrashing_info_t *page_thrashing = &block_thrashing->pages[page_index]; + UVM_ASSERT_MSG(!page_thrashing->pinned || uvm_id_equal(proc_id, page_thrashing->pinned_residency_id), + "Migrating to %u instead of %u\n", + uvm_id_value(proc_id), uvm_id_value(page_thrashing->pinned_residency_id)); + if (cause == UVM_MAKE_RESIDENT_CAUSE_PREFETCH) + UVM_ASSERT(!uvm_page_mask_test(&block_thrashing->thrashing_pages, page_index)); + } + + return false; +} + +static bool is_migration_pinned_pages_update(uvm_va_block_t *va_block, + const uvm_perf_event_data_t *event_data, + NvU64 address, + NvU64 bytes) +{ + const block_thrashing_info_t *block_thrashing = NULL; + uvm_va_block_region_t region = uvm_va_block_region_from_start_size(va_block, address, bytes); + bool ret; + + if (event_data->migration.cause != UVM_MAKE_RESIDENT_CAUSE_REPLAYABLE_FAULT && + event_data->migration.cause != UVM_MAKE_RESIDENT_CAUSE_ACCESS_COUNTER) { + return false; + } + + block_thrashing = thrashing_info_get(va_block); + if (!block_thrashing || !block_thrashing->pages) + return false; + + ret = uvm_page_mask_region_full(&block_thrashing->pinned_pages.mask, region); + if (ret) { + uvm_page_index_t page_index; + for_each_va_block_page_in_region(page_index, region) { + page_thrashing_info_t *page_thrashing = &block_thrashing->pages[page_index]; + UVM_ASSERT(uvm_id_equal(page_thrashing->pinned_residency_id, event_data->migration.dst)); + } + } + + return ret; +} + +// This function processes migration/revocation events and determines if the +// affected pages are thrashing or not. +void thrashing_event_cb(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + va_space_thrashing_info_t *va_space_thrashing; + block_thrashing_info_t *block_thrashing = NULL; + uvm_va_block_t *va_block; + uvm_va_space_t *va_space; + NvU64 address; + NvU64 bytes; + uvm_processor_id_t processor_id; + uvm_page_index_t page_index; + NvU64 time_stamp; + uvm_va_block_region_t region; + uvm_read_duplication_policy_t read_duplication; + + UVM_ASSERT(g_uvm_perf_thrashing_enable); + + UVM_ASSERT(event_id == UVM_PERF_EVENT_MIGRATION || event_id == UVM_PERF_EVENT_REVOCATION); + + if (event_id == UVM_PERF_EVENT_MIGRATION) { + va_block = event_data->migration.block; + address = event_data->migration.address; + bytes = event_data->migration.bytes; + processor_id = event_data->migration.dst; + + // Skip the thrashing detection logic on eviction as we cannot take + // the VA space lock + if (event_data->migration.cause == UVM_MAKE_RESIDENT_CAUSE_EVICTION) + return; + + // Do not perform checks during the first part of staging copies + if (!uvm_id_equal(event_data->migration.dst, event_data->migration.make_resident_context->dest_id)) + return; + + va_space = uvm_va_block_get_va_space(va_block); + va_space_thrashing = va_space_thrashing_info_get(va_space); + if (!va_space_thrashing->params.enable) + return; + + // TODO: Bug 2046423: HMM will need to look up the policy when + // read duplication is supported. + read_duplication = uvm_va_block_is_hmm(va_block) ? + UVM_READ_DUPLICATION_UNSET : + uvm_va_range_get_policy(va_block->va_range)->read_duplication; + + // We only care about migrations due to replayable faults, access + // counters and page prefetching. For non-replayable faults, UVM will + // try not to migrate memory since CE is transferring data anyway. + // However, we can still see migration events due to initial + // population. The rest of migrations are triggered due to user + // commands or advice (such as read duplication) which takes precedence + // over our heuristics. Therefore, we clear our internal tracking + // state. + if ((event_data->migration.cause != UVM_MAKE_RESIDENT_CAUSE_REPLAYABLE_FAULT && + event_data->migration.cause != UVM_MAKE_RESIDENT_CAUSE_ACCESS_COUNTER && + event_data->migration.cause != UVM_MAKE_RESIDENT_CAUSE_PREFETCH) || + (event_data->migration.transfer_mode != UVM_VA_BLOCK_TRANSFER_MODE_MOVE) || + (read_duplication == UVM_READ_DUPLICATION_ENABLED)) { + thrashing_reset_pages_in_region(va_block, address, bytes); + return; + } + + // Assert that we are not migrating pages that are pinned away from + // their pinning residency, or prefetching pages that are thrashing + UVM_ASSERT(!migrating_wrong_pages(va_block, address, bytes, processor_id, event_data->migration.cause)); + + // If we are being migrated due to pinning just return + if (is_migration_pinned_pages_update(va_block, event_data, address, bytes)) + return; + } + else { + va_block = event_data->revocation.block; + address = event_data->revocation.address; + bytes = event_data->revocation.bytes; + processor_id = event_data->revocation.proc_id; + + va_space = uvm_va_block_get_va_space(va_block); + va_space_thrashing = va_space_thrashing_info_get(va_space); + if (!va_space_thrashing->params.enable) + return; + } + + block_thrashing = thrashing_info_get_create(va_block); + if (!block_thrashing) + return; + + time_stamp = NV_GETTIME(); + + if (!block_thrashing->pages) { + // Don't create the per-page tracking structure unless there is some potential thrashing within the block + NvU16 num_block_pages; + + if (block_thrashing->last_time_stamp == 0 || + uvm_id_equal(block_thrashing->last_processor, processor_id) || + time_stamp - block_thrashing->last_time_stamp > va_space_thrashing->params.lapse_ns) { + goto done; + } + + num_block_pages = uvm_va_block_size(va_block) / PAGE_SIZE; + + block_thrashing->pages = uvm_kvmalloc_zero(sizeof(*block_thrashing->pages) * num_block_pages); + if (!block_thrashing->pages) + goto done; + + for (page_index = 0; page_index < num_block_pages; ++page_index) { + block_thrashing->pages[page_index].pinned_residency_id = UVM_ID_INVALID; + block_thrashing->pages[page_index].do_not_throttle_processor_id = UVM_ID_INVALID; + } + } + + region = uvm_va_block_region_from_start_size(va_block, address, bytes); + + // Update all pages in the region + for_each_va_block_page_in_region(page_index, region) { + page_thrashing_info_t *page_thrashing = &block_thrashing->pages[page_index]; + NvU64 last_time_stamp = page_thrashing_get_time_stamp(page_thrashing); + + // It is not possible that a pinned page is migrated here, since the + // fault that triggered the migration should have unpinned it in its + // call to uvm_perf_thrashing_get_hint. Moreover page prefetching never + // includes pages that are thrashing (including pinning) + if (event_id == UVM_PERF_EVENT_MIGRATION) + UVM_ASSERT(page_thrashing->pinned == 0); + + uvm_processor_mask_set(&page_thrashing->processors, processor_id); + page_thrashing_set_time_stamp(page_thrashing, time_stamp); + + if (last_time_stamp == 0) + continue; + + if (time_stamp - last_time_stamp <= va_space_thrashing->params.lapse_ns) { + UVM_PERF_SATURATING_INC(page_thrashing->num_thrashing_events); + if (page_thrashing->num_thrashing_events == va_space_thrashing->params.threshold) + thrashing_detected(va_block, block_thrashing, page_thrashing, page_index, processor_id); + + if (page_thrashing->num_thrashing_events >= va_space_thrashing->params.threshold) + block_thrashing->last_thrashing_time_stamp = time_stamp; + + if (event_id == UVM_PERF_EVENT_MIGRATION) + page_thrashing->has_migration_events = true; + else + page_thrashing->has_revocation_events = true; + } + else if (page_thrashing->num_thrashing_events >= va_space_thrashing->params.threshold && + !page_thrashing->pinned) { + thrashing_reset_page(va_space_thrashing, va_block, block_thrashing, page_index); + } + } + +done: + block_thrashing->last_time_stamp = time_stamp; + block_thrashing->last_processor = processor_id; +} + +static bool thrashing_processors_can_access(uvm_va_space_t *va_space, + page_thrashing_info_t *page_thrashing, + uvm_processor_id_t to) +{ + if (UVM_ID_IS_INVALID(to)) + return false; + + return uvm_processor_mask_subset(&page_thrashing->processors, + &va_space->accessible_from[uvm_id_value(to)]); +} + +static bool thrashing_processors_have_fast_access_to(uvm_va_space_t *va_space, + page_thrashing_info_t *page_thrashing, + uvm_processor_id_t to) +{ + uvm_processor_mask_t fast_to; + + if (UVM_ID_IS_INVALID(to)) + return false; + + // Combine NVLINK and native atomics mask since we could have PCIe + // atomics in the future + uvm_processor_mask_and(&fast_to, + &va_space->has_nvlink[uvm_id_value(to)], + &va_space->has_native_atomics[uvm_id_value(to)]); + uvm_processor_mask_set(&fast_to, to); + + return uvm_processor_mask_subset(&page_thrashing->processors, &fast_to); +} + +static void thrashing_processors_common_locations(uvm_va_space_t *va_space, + page_thrashing_info_t *page_thrashing, + uvm_processor_mask_t *common_locations) +{ + bool is_first = true; + uvm_processor_id_t id; + + // Find processors that can be accessed from all thrashing processors. For + // example: if A, B and C are thrashing, and A can access B and C can access + // B, too, B would be the common location. + uvm_processor_mask_zero(common_locations); + + for_each_id_in_mask(id, &page_thrashing->processors) { + if (is_first) + uvm_processor_mask_copy(common_locations, &va_space->can_access[uvm_id_value(id)]); + else + uvm_processor_mask_and(common_locations, common_locations, &va_space->can_access[uvm_id_value(id)]); + + is_first = false; + } +} + +static bool preferred_location_is_thrashing(uvm_processor_id_t preferred_location, + page_thrashing_info_t *page_thrashing) +{ + if (UVM_ID_IS_INVALID(preferred_location)) + return false; + + return uvm_processor_mask_test(&page_thrashing->processors, preferred_location); +} + +static uvm_perf_thrashing_hint_t get_hint_for_migration_thrashing(va_space_thrashing_info_t *va_space_thrashing, + uvm_va_block_t *va_block, + uvm_page_index_t page_index, + page_thrashing_info_t *page_thrashing, + uvm_processor_id_t requester) +{ + uvm_perf_thrashing_hint_t hint; + uvm_processor_id_t closest_resident_id; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_processor_id_t do_not_throttle_processor = page_thrashing->do_not_throttle_processor_id; + uvm_processor_id_t pinned_residency = page_thrashing->pinned_residency_id; + uvm_va_policy_t *policy; + uvm_processor_id_t preferred_location; + + policy = uvm_va_policy_get(va_block, uvm_va_block_cpu_page_address(va_block, page_index)); + + preferred_location = policy->preferred_location; + + hint.type = UVM_PERF_THRASHING_HINT_TYPE_NONE; + + closest_resident_id = uvm_va_block_page_get_closest_resident(va_block, page_index, requester); + UVM_ASSERT(UVM_ID_IS_VALID(closest_resident_id)); + + if (thrashing_processors_can_access(va_space, page_thrashing, preferred_location)) { + // The logic in uvm_va_block_select_residency chooses the preferred + // location if the requester can access it, so all processors should + // naturally get mapped to the preferred without thrashing. However, + // we can get here if preferred location was set after processors + // started thrashing. + // + // TODO: Bug 2527408. Reset thrashing history when a user policy + // changes in a VA block. + hint.type = UVM_PERF_THRASHING_HINT_TYPE_PIN; + hint.pin.residency = preferred_location; + } + else if (!preferred_location_is_thrashing(preferred_location, page_thrashing) && + thrashing_processors_have_fast_access_to(va_space, page_thrashing, closest_resident_id)) { + // This is a fast path for those scenarios in which all thrashing + // processors have fast (NVLINK + native atomics) access to the current + // residency. This is skipped if the preferred location is thrashing and + // not accessible by the rest of thrashing processors. Otherwise, we + // would be in the condition above. + if (UVM_ID_IS_CPU(closest_resident_id)) { + // On P9 systems, we prefer the CPU to map vidmem (since it can + // cache it), so don't map the GPU to sysmem. + if (UVM_ID_IS_GPU(requester)) { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_PIN; + hint.pin.residency = requester; + } + } + else { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_PIN; + hint.pin.residency = closest_resident_id; + } + } + else if (uvm_id_equal(requester, preferred_location)) { + if (page_thrashing->pinned) { + // If the faulting processor is the preferred location, we can + // only: + // 1) Pin to the preferred location + // 2) Throttle if it's pinned elsewhere and we are not the + // do_not_throttle_processor + if (uvm_id_equal(preferred_location, pinned_residency) || + uvm_id_equal(preferred_location, do_not_throttle_processor)) { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_PIN; + hint.pin.residency = preferred_location; + } + else { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_THROTTLE; + } + } + else if (!uvm_id_equal(preferred_location, do_not_throttle_processor)) { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_THROTTLE; + } + else if (page_thrashing->throttling_count >= va_space_thrashing->params.pin_threshold) { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_PIN; + hint.pin.residency = preferred_location; + } + } + else if (page_thrashing->pinned) { + // 1) If the requester is the do_not_throttle_processor pin it to the + // requester if all thrashing processors can access the requester, + // or to a common location, or to the requester anyway if no common + // location found. + // 2) Try to map the current pinned residency. + // 3) Throttle. + if (uvm_id_equal(requester, do_not_throttle_processor)) { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_PIN; + + if (thrashing_processors_can_access(va_space, page_thrashing, requester)) { + hint.pin.residency = requester; + } + else { + uvm_processor_mask_t common_locations; + + thrashing_processors_common_locations(va_space, page_thrashing, &common_locations); + if (uvm_processor_mask_empty(&common_locations)) { + hint.pin.residency = requester; + } + else { + // Find the common location that is closest to the requester + hint.pin.residency = uvm_processor_mask_find_closest_id(va_space, &common_locations, requester); + } + } + } + else if (uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(page_thrashing->pinned_residency_id)], requester)) { + UVM_ASSERT(uvm_id_equal(closest_resident_id, pinned_residency)); + + hint.type = UVM_PERF_THRASHING_HINT_TYPE_PIN; + hint.pin.residency = pinned_residency; + } + else { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_THROTTLE; + } + } + else if (!uvm_id_equal(requester, do_not_throttle_processor)) { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_THROTTLE; + } + else if (page_thrashing->throttling_count >= va_space_thrashing->params.pin_threshold) { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_PIN; + hint.pin.residency = requester; + } + + if (hint.type == UVM_PERF_THRASHING_HINT_TYPE_PIN && + !uvm_va_space_processor_has_memory(va_space, hint.pin.residency)) + hint.pin.residency = UVM_ID_CPU; + + return hint; +} + +// Function called on fault that tells the fault handler if any operation +// should be performed to minimize thrashing. The logic is as follows: +// +// - Phase0: Block thrashing. If a number of consecutive thrashing events have +// been detected on the VA block, per-page thrashing tracking information is +// created. +// - Phase1: Throttling. When several processors fight over a page, we start a +// "throttling period". During that period, only one processor will be able +// to service faults on the page, and the rest will be throttled. All CPU +// faults are considered to belong to the same device, even if they come from +// different CPU threads. +// - Phase2: Pinning. After a number of consecutive throttling periods, the page +// is pinned on a specific processor which all of the thrashing processors can +// access. +// - Phase3: Revocation throttling. Even if the page is pinned, it can be still +// thrashing due to revocation events (mainly due to system-wide atomics). In +// that case we keep the page pinned while applying the same algorithm as in +// Phase1. +uvm_perf_thrashing_hint_t uvm_perf_thrashing_get_hint(uvm_va_block_t *va_block, + NvU64 address, + uvm_processor_id_t requester) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + va_space_thrashing_info_t *va_space_thrashing = va_space_thrashing_info_get(va_space); + block_thrashing_info_t *block_thrashing = NULL; + page_thrashing_info_t *page_thrashing = NULL; + uvm_perf_thrashing_hint_t hint; + uvm_page_index_t page_index = uvm_va_block_cpu_page_index(va_block, address); + NvU64 time_stamp; + NvU64 last_time_stamp; + + hint.type = UVM_PERF_THRASHING_HINT_TYPE_NONE; + + if (!va_space_thrashing->params.enable) + return hint; + + // If we don't have enough memory to store thrashing information, we assume + // no thrashing + block_thrashing = thrashing_info_get(va_block); + if (!block_thrashing) + return hint; + + // If the per-page tracking structure has not been created yet, we assume + // no thrashing + if (!block_thrashing->pages) + return hint; + + time_stamp = NV_GETTIME(); + + if (block_thrashing->last_thrashing_time_stamp != 0 && + (time_stamp - block_thrashing->last_thrashing_time_stamp > va_space_thrashing->params.epoch_ns) && + block_thrashing->pinned_pages.count == 0 && + block_thrashing->thrashing_reset_count < va_space_thrashing->params.max_resets) { + uvm_page_index_t reset_page_index; + + ++block_thrashing->thrashing_reset_count; + + // Clear the state of throttled processors to make sure that we flush + // any pending ThrottlingEnd events + for_each_va_block_page_in_mask(reset_page_index, &block_thrashing->thrashing_pages, va_block) { + thrashing_throttling_reset_page(va_block, + block_thrashing, + &block_thrashing->pages[reset_page_index], + reset_page_index); + } + + // Reset per-page tracking structure + // TODO: Bug 1769904 [uvm] Speculatively unpin pages that were pinned on a specific memory due to thrashing + UVM_ASSERT(uvm_page_mask_empty(&block_thrashing->pinned_pages.mask)); + uvm_kvfree(block_thrashing->pages); + block_thrashing->pages = NULL; + block_thrashing->num_thrashing_pages = 0; + block_thrashing->last_processor = UVM_ID_INVALID; + block_thrashing->last_time_stamp = 0; + block_thrashing->last_thrashing_time_stamp = 0; + uvm_page_mask_zero(&block_thrashing->thrashing_pages); + goto done; + } + + page_thrashing = &block_thrashing->pages[page_index]; + + // Not enough thrashing events yet + if (page_thrashing->num_thrashing_events < va_space_thrashing->params.threshold) + goto done; + + // If the requesting processor is throttled, check the throttling end time + // stamp + if (uvm_processor_mask_test(&page_thrashing->throttled_processors, requester)) { + NvU64 throttling_end_time_stamp = page_thrashing_get_throttling_end_time_stamp(page_thrashing); + if (time_stamp < throttling_end_time_stamp && + !uvm_id_equal(requester, page_thrashing->do_not_throttle_processor_id)) { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_THROTTLE; + goto done; + } + + thrashing_throttle_end_processor(va_block, block_thrashing, page_thrashing, page_index, requester); + } + + UVM_ASSERT(!uvm_processor_mask_test(&page_thrashing->throttled_processors, requester)); + + last_time_stamp = page_thrashing_get_time_stamp(page_thrashing); + + // If the lapse since the last thrashing event is longer than a thrashing + // lapse we are no longer thrashing + if (time_stamp - last_time_stamp > va_space_thrashing->params.lapse_ns && + !page_thrashing->pinned) { + goto done; + } + + // Set the requesting processor in the thrashing processors mask + uvm_processor_mask_set(&page_thrashing->processors, requester); + + UVM_ASSERT(page_thrashing->has_migration_events || page_thrashing->has_revocation_events); + + // Update throttling heuristics + thrashing_throttle_update(va_space_thrashing, va_block, page_thrashing, requester, time_stamp); + + if (page_thrashing->pinned && + page_thrashing->has_revocation_events && + !uvm_id_equal(requester, page_thrashing->do_not_throttle_processor_id)) { + + // When we get revocation thrashing, this is due to system-wide atomics + // downgrading the permissions of other processors. Revocations only + // happen when several processors are mapping the same page and there + // are no migrations. In this case, the only thing we can do is to + // throttle the execution of the processors. + hint.type = UVM_PERF_THRASHING_HINT_TYPE_THROTTLE; + } + else { + hint = get_hint_for_migration_thrashing(va_space_thrashing, + va_block, + page_index, + page_thrashing, + requester); + } + +done: + if (hint.type == UVM_PERF_THRASHING_HINT_TYPE_PIN) { + NV_STATUS status = thrashing_pin_page(va_space_thrashing, + va_block, + block_thrashing, + page_thrashing, + page_index, + time_stamp, + hint.pin.residency, + requester); + + // If there was some problem pinning the page (i.e. OOM), demote to + // throttling) + if (status != NV_OK) { + hint.type = UVM_PERF_THRASHING_HINT_TYPE_THROTTLE; + } + else { + if (uvm_id_equal(hint.pin.residency, requester)) + PROCESSOR_THRASHING_STATS_INC(va_space, requester, num_pin_local); + else + PROCESSOR_THRASHING_STATS_INC(va_space, requester, num_pin_remote); + + uvm_processor_mask_copy(&hint.pin.processors, &page_thrashing->processors); + } + } + + if (hint.type == UVM_PERF_THRASHING_HINT_TYPE_THROTTLE) { + thrashing_throttle_processor(va_block, + block_thrashing, + page_thrashing, + page_index, + requester); + + PROCESSOR_THRASHING_STATS_INC(va_space, requester, num_throttle); + + hint.throttle.end_time_stamp = page_thrashing_get_throttling_end_time_stamp(page_thrashing); + } + else if (hint.type == UVM_PERF_THRASHING_HINT_TYPE_NONE && page_thrashing) { + UVM_ASSERT(!uvm_processor_mask_test(&page_thrashing->throttled_processors, requester)); + UVM_ASSERT(!page_thrashing->pinned); + UVM_ASSERT(UVM_ID_IS_INVALID(page_thrashing->pinned_residency_id)); + } + + return hint; +} + +uvm_processor_mask_t *uvm_perf_thrashing_get_thrashing_processors(uvm_va_block_t *va_block, NvU64 address) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + va_space_thrashing_info_t *va_space_thrashing = va_space_thrashing_info_get(va_space); + block_thrashing_info_t *block_thrashing = NULL; + page_thrashing_info_t *page_thrashing = NULL; + uvm_page_index_t page_index = uvm_va_block_cpu_page_index(va_block, address); + + UVM_ASSERT(g_uvm_perf_thrashing_enable); + UVM_ASSERT(va_space_thrashing->params.enable); + + block_thrashing = thrashing_info_get(va_block); + UVM_ASSERT(block_thrashing); + + UVM_ASSERT(block_thrashing->pages); + + page_thrashing = &block_thrashing->pages[page_index]; + + return &page_thrashing->processors; +} + +const uvm_page_mask_t *uvm_perf_thrashing_get_thrashing_pages(uvm_va_block_t *va_block) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + va_space_thrashing_info_t *va_space_thrashing = va_space_thrashing_info_get(va_space); + block_thrashing_info_t *block_thrashing = NULL; + + if (!va_space_thrashing->params.enable) + return NULL; + + block_thrashing = thrashing_info_get(va_block); + if (!block_thrashing) + return NULL; + + if (block_thrashing->num_thrashing_pages == 0) + return NULL; + + return &block_thrashing->thrashing_pages; +} + +bool uvm_perf_thrashing_is_block_thrashing(uvm_va_block_t *va_block) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + va_space_thrashing_info_t *va_space_thrashing = va_space_thrashing_info_get(va_space); + block_thrashing_info_t *block_thrashing = NULL; + + if (!va_space_thrashing->params.enable) + return false; + + block_thrashing = thrashing_info_get(va_block); + if (!block_thrashing) + return false; + + return block_thrashing->num_thrashing_pages > 0; +} + +#define TIMER_GRANULARITY_NS 20000ULL +static void thrashing_unpin_pages(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + va_space_thrashing_info_t *va_space_thrashing = container_of(dwork, va_space_thrashing_info_t, pinned_pages.dwork); + uvm_va_space_t *va_space = va_space_thrashing->va_space; + + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + + // Take the VA space lock so that VA blocks don't go away during this + // operation. + uvm_va_space_down_read(va_space); + + if (va_space_thrashing->pinned_pages.in_va_space_teardown) + goto exit_no_list_lock; + + while (1) { + pinned_page_t *pinned_page; + uvm_va_block_t *va_block; + + uvm_spin_lock(&va_space_thrashing->pinned_pages.lock); + pinned_page = list_first_entry_or_null(&va_space_thrashing->pinned_pages.list, + pinned_page_t, + va_space_list_entry); + + if (pinned_page) { + NvU64 now = NV_GETTIME(); + + if (pinned_page->deadline <= (now + TIMER_GRANULARITY_NS)) { + list_del_init(&pinned_page->va_space_list_entry); + + // Work cancellation is left to thrashing_unpin_page() as this + // would only catch the following pattern: + // - Worker thread A is in thrashing_unpin_pages but hasn't + // looked at the list yet + // - Thread B then removes the last entry + // - Thread C then adds a new entry and re-schedules work + // - Worker thread A removes the entry added by C because the + // deadline has passed (unlikely), then cancels the work + // scheduled by C. + } + else { + NvU64 elapsed_us = (pinned_page->deadline - now) / 1000; + + schedule_delayed_work(&va_space_thrashing->pinned_pages.dwork, usecs_to_jiffies(elapsed_us)); + uvm_spin_unlock(&va_space_thrashing->pinned_pages.lock); + break; + } + } + + uvm_spin_unlock(&va_space_thrashing->pinned_pages.lock); + + if (!pinned_page) + break; + + va_block = pinned_page->va_block; + uvm_mutex_lock(&va_block->lock); + + // Only operate if the pinned page's tracking state isn't already + // cleared by thrashing_unpin_page() + if (!list_empty(&pinned_page->va_block_list_entry)) { + uvm_page_index_t page_index = pinned_page->page_index; + block_thrashing_info_t *block_thrashing = thrashing_info_get(va_block); + + UVM_ASSERT(block_thrashing); + UVM_ASSERT(uvm_page_mask_test(&block_thrashing->pinned_pages.mask, page_index)); + + va_space_thrashing->pinned_pages.va_block_context.policy = + uvm_va_policy_get(va_block, uvm_va_block_cpu_page_address(va_block, page_index)); + + unmap_remote_pinned_pages_from_all_processors(va_block, + &va_space_thrashing->pinned_pages.va_block_context, + uvm_va_block_region_for_page(page_index)); + thrashing_reset_page(va_space_thrashing, va_block, block_thrashing, page_index); + } + + uvm_mutex_unlock(&va_block->lock); + kmem_cache_free(g_pinned_page_cache, pinned_page); + } + +exit_no_list_lock: + uvm_va_space_up_read(va_space); +} + +static void thrashing_unpin_pages_entry(struct work_struct *work) +{ + UVM_ENTRY_VOID(thrashing_unpin_pages(work)); +} + +NV_STATUS uvm_perf_thrashing_load(uvm_va_space_t *va_space) +{ + va_space_thrashing_info_t *va_space_thrashing; + NV_STATUS status; + + status = uvm_perf_module_load(&g_module_thrashing, va_space); + if (status != NV_OK) + return status; + + va_space_thrashing = va_space_thrashing_info_create(va_space); + if (!va_space_thrashing) + return NV_ERR_NO_MEMORY; + + uvm_spin_lock_init(&va_space_thrashing->pinned_pages.lock, UVM_LOCK_ORDER_LEAF); + INIT_LIST_HEAD(&va_space_thrashing->pinned_pages.list); + INIT_DELAYED_WORK(&va_space_thrashing->pinned_pages.dwork, thrashing_unpin_pages_entry); + + return NV_OK; +} + +void uvm_perf_thrashing_stop(uvm_va_space_t *va_space) +{ + va_space_thrashing_info_t *va_space_thrashing; + + uvm_va_space_down_write(va_space); + va_space_thrashing = va_space_thrashing_info_get_or_null(va_space); + + // Prevent further unpinning operations from being scheduled + if (va_space_thrashing) + va_space_thrashing->pinned_pages.in_va_space_teardown = true; + + uvm_va_space_up_write(va_space); + + // Cancel any pending work. We can safely access va_space_thrashing + // because this function is called once from the VA space teardown path, + // and the only function that frees it is uvm_perf_thrashing_unload, + // which is called later in the teardown path. + if (va_space_thrashing) + (void)cancel_delayed_work_sync(&va_space_thrashing->pinned_pages.dwork); +} + +void uvm_perf_thrashing_unload(uvm_va_space_t *va_space) +{ + va_space_thrashing_info_t *va_space_thrashing = va_space_thrashing_info_get_or_null(va_space); + + uvm_perf_module_unload(&g_module_thrashing, va_space); + + // Make sure that there are not pending work items + if (va_space_thrashing) { + UVM_ASSERT(va_space_thrashing->pinned_pages.in_va_space_teardown); + UVM_ASSERT(list_empty(&va_space_thrashing->pinned_pages.list)); + + va_space_thrashing_info_destroy(va_space); + } +} + +NV_STATUS uvm_perf_thrashing_register_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu) +{ + // If a simulated GPU is registered, re-initialize thrashing parameters in + // case they need to be adjusted + if (g_uvm_global.num_simulated_devices > 0) { + va_space_thrashing_info_t *va_space_thrashing = va_space_thrashing_info_get(va_space); + + if (!va_space_thrashing->params.test_overrides) + va_space_thrashing_info_init_params(va_space_thrashing); + } + + return NV_OK; +} + +NV_STATUS uvm_perf_thrashing_init() +{ + NV_STATUS status; + + INIT_THRASHING_PARAMETER_TOGGLE(uvm_perf_thrashing_enable, UVM_PERF_THRASHING_ENABLE_DEFAULT); + if (!g_uvm_perf_thrashing_enable) + return NV_OK; + + uvm_perf_module_init("perf_thrashing", + UVM_PERF_MODULE_TYPE_THRASHING, + g_callbacks_thrashing, + ARRAY_SIZE(g_callbacks_thrashing), + &g_module_thrashing); + + INIT_THRASHING_PARAMETER_NONZERO_MAX(uvm_perf_thrashing_threshold, + UVM_PERF_THRASHING_THRESHOLD_DEFAULT, + UVM_PERF_THRASHING_THRESHOLD_MAX); + + INIT_THRASHING_PARAMETER_NONZERO_MAX(uvm_perf_thrashing_pin_threshold, + UVM_PERF_THRASHING_PIN_THRESHOLD_DEFAULT, + UVM_PERF_THRASHING_PIN_THRESHOLD_MAX); + + INIT_THRASHING_PARAMETER_NONZERO(uvm_perf_thrashing_lapse_usec, UVM_PERF_THRASHING_LAPSE_USEC_DEFAULT); + + INIT_THRASHING_PARAMETER_NONZERO_MAX(uvm_perf_thrashing_nap, + UVM_PERF_THRASHING_NAP_DEFAULT, + UVM_PERF_THRASHING_NAP_MAX); + + + INIT_THRASHING_PARAMETER_NONZERO(uvm_perf_thrashing_epoch, UVM_PERF_THRASHING_EPOCH_DEFAULT); + + INIT_THRASHING_PARAMETER(uvm_perf_thrashing_pin, UVM_PERF_THRASHING_PIN_DEFAULT); + + INIT_THRASHING_PARAMETER(uvm_perf_thrashing_max_resets, UVM_PERF_THRASHING_MAX_RESETS_DEFAULT); + + g_va_block_thrashing_info_cache = NV_KMEM_CACHE_CREATE("uvm_block_thrashing_info_t", block_thrashing_info_t); + if (!g_va_block_thrashing_info_cache) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + g_pinned_page_cache = NV_KMEM_CACHE_CREATE("uvm_pinned_page_t", pinned_page_t); + if (!g_pinned_page_cache) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + status = cpu_thrashing_stats_init(); + if (status != NV_OK) + goto error; + + return NV_OK; + +error: + uvm_perf_thrashing_exit(); + + return status; +} + +void uvm_perf_thrashing_exit() +{ + cpu_thrashing_stats_exit(); + + kmem_cache_destroy_safe(&g_va_block_thrashing_info_cache); + kmem_cache_destroy_safe(&g_pinned_page_cache); +} + +NV_STATUS uvm_perf_thrashing_add_gpu(uvm_gpu_t *gpu) +{ + if (!uvm_procfs_is_debug_enabled()) + return NV_OK; + + return gpu_thrashing_stats_create(gpu); +} + +void uvm_perf_thrashing_remove_gpu(uvm_gpu_t *gpu) +{ + gpu_thrashing_stats_destroy(gpu); +} + +NV_STATUS uvm_test_get_page_thrashing_policy(UVM_TEST_GET_PAGE_THRASHING_POLICY_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + va_space_thrashing_info_t *va_space_thrashing; + + uvm_va_space_down_read(va_space); + + va_space_thrashing = va_space_thrashing_info_get(va_space); + + if (va_space_thrashing->params.enable) { + params->policy = UVM_TEST_PAGE_THRASHING_POLICY_ENABLE; + params->nap_ns = va_space_thrashing->params.nap_ns; + params->pin_ns = va_space_thrashing->params.pin_ns; + params->map_remote_on_native_atomics_fault = uvm_perf_map_remote_on_native_atomics_fault != 0; + } + else { + params->policy = UVM_TEST_PAGE_THRASHING_POLICY_DISABLE; + } + + uvm_va_space_up_read(va_space); + + return NV_OK; +} + +NV_STATUS uvm_test_set_page_thrashing_policy(UVM_TEST_SET_PAGE_THRASHING_POLICY_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + va_space_thrashing_info_t *va_space_thrashing; + + if (params->policy >= UVM_TEST_PAGE_THRASHING_POLICY_MAX) + return NV_ERR_INVALID_ARGUMENT; + + if (!g_uvm_perf_thrashing_enable) + return NV_ERR_INVALID_STATE; + + uvm_va_space_down_write(va_space); + + va_space_thrashing = va_space_thrashing_info_get(va_space); + va_space_thrashing->params.test_overrides = true; + + if (params->policy == UVM_TEST_PAGE_THRASHING_POLICY_ENABLE) { + if (va_space_thrashing->params.enable) + goto done_unlock_va_space; + + va_space_thrashing->params.pin_ns = params->pin_ns; + va_space_thrashing->params.enable = true; + } + else { + if (!va_space_thrashing->params.enable) + goto done_unlock_va_space; + + va_space_thrashing->params.enable = false; + } + + // When disabling thrashing detection, destroy the thrashing tracking + // information for all VA blocks and unpin pages + if (!va_space_thrashing->params.enable) { + uvm_va_range_t *va_range; + + uvm_for_each_va_range(va_range, va_space) { + uvm_va_block_t *va_block; + + if (va_range->type != UVM_VA_RANGE_TYPE_MANAGED) + continue; + + for_each_va_block_in_va_range(va_range, va_block) { + uvm_va_block_region_t va_block_region = uvm_va_block_region_from_block(va_block); + uvm_va_block_context_t *block_context = uvm_va_space_block_context(va_space, NULL); + + block_context->policy = uvm_va_range_get_policy(va_range); + + uvm_mutex_lock(&va_block->lock); + + // Unmap may split PTEs and require a retry. Needs to be called + // before the pinned pages information is destroyed. + status = UVM_VA_BLOCK_RETRY_LOCKED(va_block, + NULL, + unmap_remote_pinned_pages_from_all_processors(va_block, + block_context, + va_block_region)); + + thrashing_info_destroy(va_block); + + uvm_mutex_unlock(&va_block->lock); + + // Re-enable thrashing on failure to avoid getting asserts + // about having state while thrashing is disabled + if (status != NV_OK) { + va_space_thrashing->params.enable = true; + goto done_unlock_va_space; + } + } + } + } + +done_unlock_va_space: + uvm_va_space_up_write(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_perf_thrashing.h b/kernel-open/nvidia-uvm/uvm_perf_thrashing.h new file mode 100644 index 000000000..596c97e54 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_thrashing.h @@ -0,0 +1,106 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PERF_THRASHING_H__ +#define __UVM_PERF_THRASHING_H__ + +#include "uvm_linux.h" +#include "uvm_extern_decl.h" +#include "uvm_forward_decl.h" +#include "uvm_processors.h" +#include "uvm_va_block_types.h" + +typedef enum +{ + // No thrashing detected + UVM_PERF_THRASHING_HINT_TYPE_NONE = 0, + + // Map remotely to avoid future faults (does not help with revocations due + // to system-wide atomics) + UVM_PERF_THRASHING_HINT_TYPE_PIN = 1, + + // Throttle execution of the calling processor (this can be implemented by + // sleeping or handing other faults) + UVM_PERF_THRASHING_HINT_TYPE_THROTTLE = 2, + + // TODO: Bug 1877578: Implement heuristics-driven read-duplication + // Add a thrashing hint type to read-duplicate a page when it is being + // accessed read-only from different processors +} uvm_perf_thrashing_hint_type_t; + +typedef struct +{ + uvm_perf_thrashing_hint_type_t type; + + union + { + struct + { + // Map to this processor, which must be accessible, at least, from + // the calling processor + uvm_processor_id_t residency; + + // Processors to be mapped, when possible, to the new residency + uvm_processor_mask_t processors; + } pin; + + struct + { + // Absolute timestamp in ns after which the throttled processor is + // allowed to start servicing faults on the thrashing page. + NvU64 end_time_stamp; + } throttle; + }; +} uvm_perf_thrashing_hint_t; + +// Obtain a hint to prevent thrashing on the page with given address +uvm_perf_thrashing_hint_t uvm_perf_thrashing_get_hint(uvm_va_block_t *va_block, NvU64 address, + uvm_processor_id_t requester); + +// Obtain a pointer to a mask with the processors that are thrashing on the +// given page. This function assumes that thrashing has been just reported on +// the page. It will fail otherwise. +uvm_processor_mask_t *uvm_perf_thrashing_get_thrashing_processors(uvm_va_block_t *va_block, NvU64 address); + +const uvm_page_mask_t *uvm_perf_thrashing_get_thrashing_pages(uvm_va_block_t *va_block); + +// Returns true if any page in the block is thrashing, or false otherwise +bool uvm_perf_thrashing_is_block_thrashing(uvm_va_block_t *va_block); + +// Global initialization/cleanup functions +NV_STATUS uvm_perf_thrashing_init(void); +void uvm_perf_thrashing_exit(void); + +// Per-GPU initialization/cleanup functions. See comments in +// uvm_perf_heuristics.h +NV_STATUS uvm_perf_thrashing_add_gpu(uvm_gpu_t *gpu); +void uvm_perf_thrashing_remove_gpu(uvm_gpu_t *gpu); + +// VA space Initialization/cleanup functions. See comments in +// uvm_perf_heuristics.h +NV_STATUS uvm_perf_thrashing_load(uvm_va_space_t *va_space); +NV_STATUS uvm_perf_thrashing_register_gpu(uvm_va_space_t *va_space, uvm_gpu_t *gpu); +void uvm_perf_thrashing_stop(uvm_va_space_t *va_space); +void uvm_perf_thrashing_unload(uvm_va_space_t *va_space); + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_perf_utils.c b/kernel-open/nvidia-uvm/uvm_perf_utils.c new file mode 100644 index 000000000..ac524390e --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_utils.c @@ -0,0 +1,84 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_perf_utils.h" +#include "uvm_kvmalloc.h" + +static inline size_t leaves_to_levels(size_t leaf_count) +{ + return ilog2(roundup_pow_of_two(leaf_count)) + 1; +} + +// Helper function to compute all the nodes required to store a complete binary tree for the given number of leaves +static inline size_t leaves_to_nodes(size_t leaf_count) +{ + size_t ret = 0; + do { + ret += leaf_count; + leaf_count = (leaf_count == 1)? 0 : (leaf_count + 1) / 2; + } while (leaf_count > 0); + + return ret; +} + +NV_STATUS uvm_perf_tree_init(uvm_perf_tree_t *tree, size_t node_size, size_t leaf_count) +{ + NV_STATUS status; + size_t bytes; + + status = NV_OK; + + tree->leaf_count = leaf_count; + tree->level_count = leaves_to_levels(leaf_count); + tree->node_count = leaves_to_nodes(leaf_count); + tree->pow2_leaf_count = roundup_pow_of_two(tree->leaf_count); + + bytes = tree->node_count * node_size; + + // With this check we make sure that our shift operations will not overflow + UVM_ASSERT(tree->level_count <= (sizeof(size_t) * 8 - 1)); + tree->nodes = uvm_kvmalloc_zero(bytes); + if (!tree->nodes) + status = NV_ERR_NO_MEMORY; + return status; +} + +void uvm_perf_tree_destroy(uvm_perf_tree_t *tree) +{ + UVM_ASSERT(tree); + UVM_ASSERT(tree->nodes); + + uvm_kvfree(tree->nodes); + tree->leaf_count = 0; + tree->nodes = NULL; +} + +void uvm_perf_tree_clear(uvm_perf_tree_t *tree, size_t node_size) +{ + UVM_ASSERT(tree); + UVM_ASSERT(tree->nodes); + + memset(tree->nodes, 0, tree->node_count * node_size); +} diff --git a/kernel-open/nvidia-uvm/uvm_perf_utils.h b/kernel-open/nvidia-uvm/uvm_perf_utils.h new file mode 100644 index 000000000..01f011901 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_utils.h @@ -0,0 +1,265 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PERF_UTILS_H__ +#define __UVM_PERF_UTILS_H__ + +#include "uvm_common.h" + +// Macros to perform increments that saturate at the maximum values allowed by the variable underlying storage +#define UVM_PERF_SATURATING_ADD(counter,value) \ +({ \ + NvU64 expected; \ + NvU64 old; \ + \ + old = (counter); \ + expected = (NvU64)(counter) + (NvU64)(value); \ + (counter) += (value); \ + if ((counter) != expected || expected < old) \ + (counter) = -1; \ + (counter); \ +}) + +#define UVM_PERF_SATURATING_INC(counter) UVM_PERF_SATURATING_ADD((counter), 1) + +// Array-based tree type for fix-sized binary trees. Nodes are stored in a contiguous array, ordered per level (from +// the leaf to the root). These trees are mainly used to keep statistics for memory regions. Stats are updated from a +// leaf node (which typically represents a page) up to the root of the tree (which represents the whole memory region +// tracked by the tree). Thus, statistics are transparently aggregated for different memory region size granularities. +// +// Restrictions: trees of up 63 levels are supported for 64-bit architectures, 31 levels for 32-bit architectures. This +// is because tree iterators use signed identifiers. + +typedef struct +{ + // Number of leaves + size_t leaf_count; + + // Number of node levels in the tree (32/64 maximum, so we can use a single byte) + u8 level_count; + + // Total number of nodes (leaf + internal) + size_t node_count; + + // Number of leaves that would make this tree a complete binary tree + size_t pow2_leaf_count; + + void *nodes; +} uvm_perf_tree_t; + +// Tree traversal +// +// Full- and complete-binary trees have properties that enable easy traversal of the tree using simple division and +// multiplication operations. However, forcing trees to be complete could leave to a huge waste of memory (up to 25% +// per tree). Therefore, tree traversals here need to compute the number of elements in the current/lower level before +// moving to the upper/lower level. Level 0 is the root level, (level_count - 1) is the leaves' level. This can be +// easily done with the following computation (see uvm_perf_tree_level_node_count): +// +// (1 << level) - (missing_leaves_to_pow2 >> ((levels - 1) - level)) +// +// Once we have the offset to the beginning of the current level, we only need to add the index of the node visited +// within the current level. This is done as follows (see uvm_perf_tree_index_in_level): +// +// node_idx >> ((levels - 1) - level) +// +// We provide a type for tree traversals to allow macros to store the necessary information to transparently perform +// these computations. Thus, an uvm_perf_tree_iter_t object needs to be passed to the tree traversal macros. +typedef struct +{ + // For branch traversals: the index of the origin/destination leaf + // For complete traversals: the index of the node in the current level + ssize_t node_idx; + + // Current level in the traversal. Needs to be negative to allow detecting when we are out of bounds + s8 level_idx; + + // Offset of the current level within the node array + size_t level_offset; +} uvm_perf_tree_iter_t; + +// Tree initialization. Computes the total number of levels and nodes required for the given number of leaf nodes and +// allocates its memory. Nodes' memory is zero-initialized. +// +// Returns NV_OK if initialization succeeded, or +// NV_ERR_NO_MEMORY if node allocation failed +NV_STATUS uvm_perf_tree_init(uvm_perf_tree_t *tree, size_t node_size, size_t leaf_count); + +// Tree destruction. It frees the memory used by the nodes +void uvm_perf_tree_destroy(uvm_perf_tree_t *tree); + +// Resets the contents of the nodes +void uvm_perf_tree_clear(uvm_perf_tree_t *tree, size_t node_size); + +// Initializes the context for a tree traversal from the leaves to the root +static void uvm_perf_tree_init_up_traversal(const uvm_perf_tree_t *tree, uvm_perf_tree_iter_t *iter) +{ + iter->level_idx = tree->level_count - 1; + iter->level_offset = 0; +} + +static void uvm_perf_tree_init_up_branch_traversal(const uvm_perf_tree_t *tree, size_t leaf, uvm_perf_tree_iter_t *iter) +{ + uvm_perf_tree_init_up_traversal(tree, iter); + + iter->node_idx = leaf; +} + +// Initializes the context for a tree traversal from the root to the leaves +static void uvm_perf_tree_init_down_traversal(const uvm_perf_tree_t *tree, uvm_perf_tree_iter_t *iter) +{ + iter->level_idx = 0; + iter->level_offset = tree->node_count - 1; +} + +static void uvm_perf_tree_init_down_branch_traversal(const uvm_perf_tree_t *tree, size_t leaf, uvm_perf_tree_iter_t *iter) +{ + uvm_perf_tree_init_down_traversal(tree, iter); + + iter->node_idx = leaf; +} + +// Computes the index of the node visited for the traversal in the current level +static size_t uvm_perf_tree_iter_leaf_to_index_in_level(const uvm_perf_tree_t *tree, const uvm_perf_tree_iter_t *iter) +{ + return iter->node_idx >> ((tree->level_count - 1) - iter->level_idx); +} + +// Computes the number of nodes in the given level +static size_t uvm_perf_tree_level_node_count(const uvm_perf_tree_t *tree, size_t level_idx) +{ + size_t level_pow2_node_count; + size_t level_missing_nodes_to_pow2; + + level_pow2_node_count = (size_t)1 << level_idx; + level_missing_nodes_to_pow2 = (tree->pow2_leaf_count - tree->leaf_count) >> ((tree->level_count - 1) - level_idx); + + return level_pow2_node_count - level_missing_nodes_to_pow2; +} + +// Function to compute the range of leaves that lie beneath any of the nodes in the tree. +// +// IMPORTANT: This functions may only be used in branch traversals +#define uvm_perf_tree_iter_max_leaves(tree, iter) \ + ((typeof((tree)->leaf_count))1 << (((tree)->level_count - 1) - (iter)->level_idx)) + +#define uvm_perf_tree_iter_leaf_range_start(tree, iter) \ + UVM_ALIGN_DOWN((iter)->node_idx, uvm_perf_tree_iter_max_leaves((tree), (iter))) + +#define uvm_perf_tree_iter_leaf_range(tree, iter) \ +({ \ + typeof((tree)->leaf_count) __range_leaves = uvm_perf_tree_iter_max_leaves((tree), (iter)); \ + typeof((tree)->leaf_count) __range_start = uvm_perf_tree_iter_leaf_range_start((tree), (iter)); \ + typeof((tree)->leaf_count) __range_end_max = __range_start + __range_leaves; \ + typeof((tree)->leaf_count) __range_end = min(__range_end_max, (tree)->leaf_count); \ + __range_end - __range_start; \ +}) + +// Obtains the current node pointed by the traversal context when doing a branch traversal +#define UVM_PERF_TREE_ITER_BRANCH_CURRENT(tree,node_type,iter) \ +({ \ + (iter)->level_idx < 0 || (iter)->level_idx >= ((tree)->level_count) ? \ + NULL: \ + ((node_type *)(tree)->nodes) + (iter)->level_offset + uvm_perf_tree_iter_leaf_to_index_in_level((tree), (iter)); \ +}) + +// Obtains the current node pointed by the traversal context when doing a full traversal +#define UVM_PERF_TREE_ITER_CURRENT(tree,node_type,iter) \ +({ \ + (iter)->level_idx < 0 || (iter)->level_idx >= ((tree)->level_count) ? \ + NULL: \ + ((node_type *)(tree)->nodes) + (iter)->level_offset + (iter)->node_idx; \ +}) + +// Obtains the leaf node corresponding to the given leaf node index +#define UVM_PERF_TREE_LEAF(tree,node_type,leaf_idx) \ +({ \ + ((node_type *)(tree)->nodes) + leaf_idx; \ +}) + +// Obtains the root node of the tree +#define UVM_PERF_TREE_ROOT(tree,node_type) \ +({ \ + ((node_type *)(tree)->nodes) + ((tree)->node_count - 1); \ +}) + +// Functions to update the tree traversal context with the information of the next level (up/down) +static void uvm_perf_tree_traverse_up(const uvm_perf_tree_t *tree, uvm_perf_tree_iter_t *iter) +{ + // Nodes of the next level (up) are stored AFTER the current level + iter->level_offset += uvm_perf_tree_level_node_count(tree, iter->level_idx--); +} + +static void uvm_perf_tree_traverse_down(const uvm_perf_tree_t *tree, uvm_perf_tree_iter_t *iter) +{ + // Nodes of the next level (down) are stored BEFORE the current level. Since we are at the beginning of the current + // level, we must skip all the nodes of the NEXT level. + iter->level_offset -= uvm_perf_tree_level_node_count(tree, ++iter->level_idx); +} + +// Complete branch traversal from the given leaf up to the root of the tree. A pointer to the node in each level of the +// traversal is stored in node +#define uvm_perf_tree_traverse_leaf_to_root(tree,leaf,node,iter) \ + for (uvm_perf_tree_init_up_branch_traversal((tree), (leaf), (iter)), \ + (node) = UVM_PERF_TREE_ITER_BRANCH_CURRENT((tree), typeof(*(node)), (iter)); \ + (node) != NULL; \ + uvm_perf_tree_traverse_up((tree), (iter)), \ + (node) = UVM_PERF_TREE_ITER_BRANCH_CURRENT((tree), typeof(*(node)), (iter))) + +// Complete branch traversal from the root of the tree down to the given leaf index. A pointer to the node in each level +// of the traversal is stored in node +#define uvm_perf_tree_traverse_root_to_leaf(tree,leaf,node,iter) \ + for (uvm_perf_tree_init_down_branch_traversal((tree), (leaf), (iter)), \ + (node) = UVM_PERF_TREE_ITER_BRANCH_CURRENT((tree), typeof(*(node)), (iter)); \ + (node) != NULL; \ + uvm_perf_tree_traverse_down((tree), (iter)), \ + (node) = UVM_PERF_TREE_ITER_BRANCH_CURRENT((tree), typeof(*(node)), (iter))) + +// Iterate over all tree levels from root to leaves +#define uvm_perf_tree_for_each_level_down(tree,iter) \ + for (uvm_perf_tree_init_down_traversal((tree), (iter)); \ + (iter)->level_idx < (tree)->level_count; \ + uvm_perf_tree_traverse_down((tree), (iter))) + +// Iterate over all tree levels from leaves to root +#define uvm_perf_tree_for_each_level_up(tree,iter) \ + for (uvm_perf_tree_init_up_traversal((tree), (iter)); \ + (iter)->level_idx >= 0; \ + uvm_perf_tree_traverse_up((tree), (iter))) + +// Iterate over all nodes within a level of the tree (left to right) +#define uvm_perf_tree_level_for_each_node(tree,node,iter) \ + for ((iter)->node_idx = 0, \ + (node) = UVM_PERF_TREE_ITER_CURRENT((tree), typeof(*(node)), (iter)); \ + (iter)->node_idx < uvm_perf_tree_level_node_count((tree), (iter)->level_idx); \ + ++(iter)->node_idx, \ + ++(node)) + +// Iterate over all nodes within a level of the tree right to left +#define uvm_perf_tree_level_for_each_node_reverse(tree,node,iter) \ + for ((iter)->node_idx = uvm_perf_tree_level_node_count((tree), (iter)->level_idx) - 1, \ + (node) = UVM_PERF_TREE_ITER_CURRENT((tree), typeof(*(node)), (iter)); \ + (iter)->node_idx >= 0; \ + --(iter)->node_idx, \ + --(node)) + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_perf_utils_test.c b/kernel-open/nvidia-uvm/uvm_perf_utils_test.c new file mode 100644 index 000000000..41ac15e4e --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_perf_utils_test.c @@ -0,0 +1,747 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_perf_utils.h" +#include "uvm_va_block.h" +#include "uvm_test.h" + +static NV_STATUS test_saturating_counter_basic(void) +{ + NvU8 counter8; + NvU16 counter16; + NvU32 counter32; + NvU64 counter64; + + NvU8 max8 = -1; + NvU16 max16 = -1; + NvU32 max32 = -1; + NvU64 max64 = -1; + + counter8 = 0; + + UVM_PERF_SATURATING_INC(counter8); + TEST_CHECK_RET(counter8 == 1); + UVM_PERF_SATURATING_INC(counter8); + TEST_CHECK_RET(counter8 == 2); + + UVM_PERF_SATURATING_ADD(counter8, 5); + TEST_CHECK_RET(counter8 == 7); + + // Counter saturating at maximum unsigned char value + UVM_PERF_SATURATING_ADD(counter8, max8); + TEST_CHECK_RET(counter8 == max8); + + counter16 = 0; + + UVM_PERF_SATURATING_INC(counter16); + TEST_CHECK_RET(counter16 == 1); + UVM_PERF_SATURATING_INC(counter16); + TEST_CHECK_RET(counter16 == 2); + + UVM_PERF_SATURATING_ADD(counter16, 5); + TEST_CHECK_RET(counter16 == 7); + + // Counter saturating at maximum unsigned short value + UVM_PERF_SATURATING_ADD(counter16, max16); + TEST_CHECK_RET(counter16 == max16); + + counter32 = 0; + + UVM_PERF_SATURATING_INC(counter32); + TEST_CHECK_RET(counter32 == 1); + UVM_PERF_SATURATING_INC(counter32); + TEST_CHECK_RET(counter32 == 2); + + UVM_PERF_SATURATING_ADD(counter32, 5); + TEST_CHECK_RET(counter32 == 7); + + // Counter saturating at maximum unsigned long int value + UVM_PERF_SATURATING_ADD(counter32, max32); + TEST_CHECK_RET(counter32 == max32); + + counter64 = 0; + + UVM_PERF_SATURATING_INC(counter64); + TEST_CHECK_RET(counter64 == 1); + UVM_PERF_SATURATING_INC(counter64); + TEST_CHECK_RET(counter64 == 2); + + UVM_PERF_SATURATING_ADD(counter64, 5); + TEST_CHECK_RET(counter64 == 7); + + // Counter saturating at maximum unsigned long long int value + UVM_PERF_SATURATING_ADD(counter64, max64); + TEST_CHECK_RET(counter64 == max64); + + return NV_OK; +} + +struct region +{ + unsigned char read_faults : 4; + unsigned char write_faults : 4; + unsigned char atomic_faults : 4; + unsigned char upgrades : 4; +}; + +static NV_STATUS test_saturating_counter_bitfields(void) +{ + struct region r; + + memset(&r, 0, sizeof(r)); + + UVM_PERF_SATURATING_INC(r.read_faults); + TEST_CHECK_RET(r.read_faults == 1); + UVM_PERF_SATURATING_INC(r.write_faults); + TEST_CHECK_RET(r.write_faults == 1); + UVM_PERF_SATURATING_ADD(r.atomic_faults, 18); + TEST_CHECK_RET(r.atomic_faults == 15); + + return NV_OK; +} + +static NV_STATUS test_saturating_counter(void) +{ + NV_STATUS status; + + status = test_saturating_counter_basic(); + if (status != NV_OK) + goto fail; + status = test_saturating_counter_bitfields(); + +fail: + return status; +} + +static NV_STATUS test_tree_pow2(void) +{ + NV_STATUS status; + uvm_perf_tree_t my_int_tree; + uvm_perf_tree_iter_t iter; + int *node; + + status = uvm_perf_tree_init(&my_int_tree, sizeof(int), 8); + MEM_NV_CHECK_RET(status, NV_OK); + + TEST_CHECK_GOTO(my_int_tree.node_count == 8 * 2 - 1, fail); + TEST_CHECK_GOTO(my_int_tree.level_count == 4, fail); + + uvm_perf_tree_traverse_leaf_to_root(&my_int_tree, 3, node, &iter) { + ++*node; + } + + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 0, fail); + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 1] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 3] == 0, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[12 + 0] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[12 + 1] == 0, fail); + // Level 3 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 1, fail); + + TEST_CHECK_GOTO(*UVM_PERF_TREE_ROOT(&my_int_tree, int) == 1, fail); + + uvm_perf_tree_traverse_leaf_to_root(&my_int_tree, 6, node, &iter) { + ++*node; + } + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 0, fail); + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 1] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 3] == 1, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[12 + 0] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[12 + 1] == 1, fail); + // Level 3 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 2, fail); + + TEST_CHECK_GOTO(*UVM_PERF_TREE_ROOT(&my_int_tree, int) == 2, fail); + + uvm_perf_tree_traverse_root_to_leaf(&my_int_tree, 7, node, &iter) { + ++*node; + } + + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 1, fail); + + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 0) == &((int *)my_int_tree.nodes)[0], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 1) == &((int *)my_int_tree.nodes)[1], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 2) == &((int *)my_int_tree.nodes)[2], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 3) == &((int *)my_int_tree.nodes)[3], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 4) == &((int *)my_int_tree.nodes)[4], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 5) == &((int *)my_int_tree.nodes)[5], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 6) == &((int *)my_int_tree.nodes)[6], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 7) == &((int *)my_int_tree.nodes)[7], fail); + + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 1] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 3] == 2, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[12 + 0] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[12 + 1] == 2, fail); + // Level 3 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 3, fail); + + TEST_CHECK_GOTO(UVM_PERF_TREE_ROOT(&my_int_tree, int) == &((int *)my_int_tree.nodes)[14 + 0], fail); + + uvm_perf_tree_clear(&my_int_tree, sizeof(int)); + + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 0, fail); + + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8 + 3] == 0, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[12 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[12 + 1] == 0, fail); + // Level 3 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 0, fail); + + uvm_perf_tree_destroy(&my_int_tree); + + return NV_OK; + +fail: + uvm_perf_tree_destroy(&my_int_tree); + + return NV_ERR_INVALID_STATE; +} + +static NV_STATUS test_tree_non_pow2(void) +{ + NV_STATUS status; + uvm_perf_tree_t my_int_tree; + uvm_perf_tree_iter_t iter; + int *node; + + status = uvm_perf_tree_init(&my_int_tree, sizeof(int), 7); + MEM_NV_CHECK_RET(status, NV_OK); + + TEST_CHECK_GOTO(my_int_tree.node_count == 7 + 4 + 2 + 1, fail); + TEST_CHECK_GOTO(my_int_tree.level_count == 4, fail); + + uvm_perf_tree_destroy(&my_int_tree); + + status = uvm_perf_tree_init(&my_int_tree, sizeof(int), 9); + MEM_NV_CHECK_RET(status, NV_OK); + + TEST_CHECK_GOTO(my_int_tree.node_count == 9 + 5 + 3 + 2 + 1, fail); + TEST_CHECK_GOTO(my_int_tree.level_count == 5, fail); + + uvm_perf_tree_destroy(&my_int_tree); + + status = uvm_perf_tree_init(&my_int_tree, sizeof(int), 13); + MEM_NV_CHECK_RET(status, NV_OK); + + TEST_CHECK_GOTO(my_int_tree.node_count == 13 + 7 + 4 + 2 + 1, fail); + TEST_CHECK_GOTO(my_int_tree.level_count == 5, fail); + + uvm_perf_tree_destroy(&my_int_tree); + + status = uvm_perf_tree_init(&my_int_tree, sizeof(int), 15); + MEM_NV_CHECK_RET(status, NV_OK); + + TEST_CHECK_GOTO(my_int_tree.node_count == 15 + 8 + 4 + 2 + 1, fail); + TEST_CHECK_GOTO(my_int_tree.level_count == 5, fail); + + uvm_perf_tree_destroy(&my_int_tree); + + status = uvm_perf_tree_init(&my_int_tree, sizeof(int), 9); + MEM_NV_CHECK_RET(status, NV_OK); + + uvm_perf_tree_traverse_leaf_to_root(&my_int_tree, 6, node, &iter) { + ++*node; + } + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8] == 0, fail); + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 3] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 4] == 0, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 1] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 2] == 0, fail); + // Level 3 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 0] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 1] == 0, fail); + // Level 4 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[19 + 0] == 1, fail); + + TEST_CHECK_GOTO(*UVM_PERF_TREE_ROOT(&my_int_tree, int) == 1, fail); + + uvm_perf_tree_traverse_root_to_leaf(&my_int_tree, 2, node, &iter) { + ++*node; + } + + uvm_perf_tree_traverse_root_to_leaf(&my_int_tree, 8, node, &iter) { + ++*node; + } + + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8] == 1, fail); + + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 0) == &((int *)my_int_tree.nodes)[0], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 1) == &((int *)my_int_tree.nodes)[1], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 2) == &((int *)my_int_tree.nodes)[2], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 3) == &((int *)my_int_tree.nodes)[3], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 4) == &((int *)my_int_tree.nodes)[4], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 5) == &((int *)my_int_tree.nodes)[5], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 6) == &((int *)my_int_tree.nodes)[6], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 7) == &((int *)my_int_tree.nodes)[7], fail); + TEST_CHECK_GOTO(UVM_PERF_TREE_LEAF(&my_int_tree, int, 8) == &((int *)my_int_tree.nodes)[8], fail); + + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 1] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 3] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 4] == 1, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 1] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 2] == 1, fail); + // Level 3 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 0] == 2, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 1] == 1, fail); + // Level 4 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[19 + 0] == 3, fail); + + TEST_CHECK_GOTO(UVM_PERF_TREE_ROOT(&my_int_tree, int) == &((int *)my_int_tree.nodes)[19 + 0], fail); + + uvm_perf_tree_destroy(&my_int_tree); + + return NV_OK; + +fail: + uvm_perf_tree_destroy(&my_int_tree); + + return NV_ERR_INVALID_STATE; +} + +static NV_STATUS test_branch_traversal(void) +{ + NV_STATUS status; + uvm_perf_tree_t my_int_tree; + uvm_perf_tree_iter_t iter; + int *node; + int value; + + status = uvm_perf_tree_init(&my_int_tree, sizeof(int), 9); + MEM_NV_CHECK_RET(status, NV_OK); + + value = 1; + + /* + * Level idx + * ========= + * 0 0 + * _______/ \______ + * 1 0 1 + * __/ \__ __/ + * 2 0 1 2 + * / \ / \ / + * 3 0 1 2 3 4 + * / \ / \ / \ / \ / \ + * 4 0 1 2 3 4 5 6 7 8 9 + */ + uvm_perf_tree_traverse_leaf_to_root(&my_int_tree, 6, node, &iter) { + if (iter.level_idx == 4) { + TEST_CHECK_GOTO(uvm_perf_tree_iter_max_leaves(&my_int_tree, &iter) == 1, fail); + TEST_CHECK_GOTO(uvm_perf_tree_iter_leaf_range(&my_int_tree, &iter) == 1, fail); + TEST_CHECK_GOTO(uvm_perf_tree_iter_leaf_range_start(&my_int_tree, &iter) == 6, fail); + } + else if (iter.level_idx == 3) { + TEST_CHECK_GOTO(uvm_perf_tree_iter_max_leaves(&my_int_tree, &iter) == 2, fail); + TEST_CHECK_GOTO(uvm_perf_tree_iter_leaf_range(&my_int_tree, &iter) == 2, fail); + TEST_CHECK_GOTO(uvm_perf_tree_iter_leaf_range_start(&my_int_tree, &iter) == 6, fail); + } + else if (iter.level_idx == 2) { + TEST_CHECK_GOTO(uvm_perf_tree_iter_max_leaves(&my_int_tree, &iter) == 4, fail); + TEST_CHECK_GOTO(uvm_perf_tree_iter_leaf_range(&my_int_tree, &iter) == 4, fail); + TEST_CHECK_GOTO(uvm_perf_tree_iter_leaf_range_start(&my_int_tree, &iter) == 4, fail); + } + else if (iter.level_idx == 1) { + TEST_CHECK_GOTO(uvm_perf_tree_iter_max_leaves(&my_int_tree, &iter) == 8, fail); + TEST_CHECK_GOTO(uvm_perf_tree_iter_leaf_range(&my_int_tree, &iter) == 8, fail); + TEST_CHECK_GOTO(uvm_perf_tree_iter_leaf_range_start(&my_int_tree, &iter) == 0, fail); + } + else if (iter.level_idx == 0) { + TEST_CHECK_GOTO(uvm_perf_tree_iter_max_leaves(&my_int_tree, &iter) == 16, fail); + TEST_CHECK_GOTO(uvm_perf_tree_iter_leaf_range(&my_int_tree, &iter) == 9, fail); + TEST_CHECK_GOTO(uvm_perf_tree_iter_leaf_range_start(&my_int_tree, &iter) == 0, fail); + } + + *node += value++; + } + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8] == 0, fail); + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 3] == 2, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 4] == 0, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 1] == 3, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 2] == 0, fail); + // Level 3 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 0] == 4, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 1] == 0, fail); + // Level 4 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[19 + 0] == 5, fail); + + TEST_CHECK_GOTO(*UVM_PERF_TREE_ROOT(&my_int_tree, int) == 5, fail); + + uvm_perf_tree_traverse_root_to_leaf(&my_int_tree, 6, node, &iter) { + *node -= --value; + } + + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8] == 0, fail); + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 2] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 3] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 4] == 0, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 1] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 2] == 0, fail); + // Level 3 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 0] == 0, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 1] == 0, fail); + // Level 4 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[19 + 0] == 0, fail); + + uvm_perf_tree_destroy(&my_int_tree); + + return NV_OK; + +fail: + uvm_perf_tree_destroy(&my_int_tree); + + return NV_ERR_INVALID_STATE; +} + +static NV_STATUS test_tree_traversal(void) +{ + NV_STATUS status; + uvm_perf_tree_t my_int_tree; + uvm_perf_tree_iter_t iter; + int *node; + int value; + + status = uvm_perf_tree_init(&my_int_tree, sizeof(int), 9); + MEM_NV_CHECK_RET(status, NV_OK); + + value = 1; + + uvm_perf_tree_for_each_level_down(&my_int_tree, &iter) { + uvm_perf_tree_level_for_each_node(&my_int_tree, node, &iter) { + *node = value++; + } + } + + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 12, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 13, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 14, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 15, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 16, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 17, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 18, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 19, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8] == 20, fail); + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 0] == 7, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 1] == 8, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 2] == 9, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 3] == 10, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 4] == 11, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 4, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 1] == 5, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 2] == 6, fail); + // Level 3 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 0] == 2, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 1] == 3, fail); + // Level 4 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[19 + 0] == 1, fail); + + value = 1; + + uvm_perf_tree_for_each_level_up(&my_int_tree, &iter) { + // Traverse nodes left to right in each level + uvm_perf_tree_level_for_each_node(&my_int_tree, node, &iter) { + *node = value++; + } + } + + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 1, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 2, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 3, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 4, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 5, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 6, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 7, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 8, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8] == 9, fail); + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 0] == 10, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 1] == 11, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 2] == 12, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 3] == 13, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 4] == 14, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 15, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 1] == 16, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 2] == 17, fail); + // Level 3 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 0] == 18, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 1] == 19, fail); + // Level 4 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[19 + 0] == 20, fail); + + value = 1; + + uvm_perf_tree_for_each_level_down(&my_int_tree, &iter) { + uvm_perf_tree_level_for_each_node_reverse(&my_int_tree, node, &iter) { + *node = value++; + } + } + + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 20, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 19, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 18, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 17, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 16, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 15, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 14, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 13, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8] == 12, fail); + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 0] == 11, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 1] == 10, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 2] == 9, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 3] == 8, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 4] == 7, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 6, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 1] == 5, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 2] == 4, fail); + // Level 3 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 0] == 3, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 1] == 2, fail); + // Level 4 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[19 + 0] == 1, fail); + + value = 1; + + uvm_perf_tree_for_each_level_up(&my_int_tree, &iter) { + // Traverse nodes right to left in each level + uvm_perf_tree_level_for_each_node_reverse(&my_int_tree, node, &iter) { + *node = value++; + } + } + + // Level 0 (leafs) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[0] == 9, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[1] == 8, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[2] == 7, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[3] == 6, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[4] == 5, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[5] == 4, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[6] == 3, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[7] == 2, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[8] == 1, fail); + // Level 1 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 0] == 14, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 1] == 13, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 2] == 12, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 3] == 11, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[9 + 4] == 10, fail); + // Level 2 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 0] == 17, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 1] == 16, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[14 + 2] == 15, fail); + // Level 3 + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 0] == 19, fail); + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[17 + 1] == 18, fail); + // Level 4 (root) + TEST_CHECK_GOTO(((int *)my_int_tree.nodes)[19 + 0] == 20, fail); + + uvm_perf_tree_destroy(&my_int_tree); + + return NV_OK; + +fail: + uvm_perf_tree_destroy(&my_int_tree); + + return NV_ERR_INVALID_STATE; +} + +static NV_STATUS test_bitmap_tree_traversal(void) +{ + int value; + uvm_va_block_bitmap_tree_t tree; + uvm_va_block_bitmap_tree_iter_t iter; + + uvm_va_block_bitmap_tree_init_from_page_count(&tree, 9); + + TEST_CHECK_RET(tree.level_count == 5); + TEST_CHECK_RET(tree.leaf_count == 9); + + uvm_page_mask_set(&tree.pages, 1); + uvm_page_mask_set(&tree.pages, 2); + uvm_page_mask_set(&tree.pages, 4); + uvm_page_mask_set(&tree.pages, 7); + uvm_page_mask_set(&tree.pages, 8); + + uvm_va_block_bitmap_tree_traverse_counters(value, &tree, 6, &iter) { + if (iter.level_idx == 4) + TEST_CHECK_RET(value == 0); + else if (iter.level_idx == 3) + TEST_CHECK_RET(value == 1); + else if (iter.level_idx == 2) + TEST_CHECK_RET(value == 2); + else if (iter.level_idx == 1) + TEST_CHECK_RET(value == 4); + else if (iter.level_idx == 0) + TEST_CHECK_RET(value == 5); + } + + return NV_OK; +} + +static NV_STATUS test_trees(void) +{ + NV_STATUS status; + + status = test_tree_pow2(); + if (status != NV_OK) + goto fail; + status = test_tree_non_pow2(); + if (status != NV_OK) + goto fail; + status = test_branch_traversal(); + if (status != NV_OK) + goto fail; + status = test_tree_traversal(); + if (status != NV_OK) + goto fail; + status = test_bitmap_tree_traversal(); + +fail: + return status; +} + +NV_STATUS uvm_test_perf_utils_sanity(UVM_TEST_PERF_UTILS_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + + status = test_saturating_counter(); + if (status != NV_OK) + goto fail; + status = test_trees(); + +fail: + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_pmm_gpu.c b/kernel-open/nvidia-uvm/uvm_pmm_gpu.c new file mode 100644 index 000000000..34ac47ccf --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pmm_gpu.c @@ -0,0 +1,3836 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +// +// High level description of PMM is in the header file, here some implementation +// details are discussed. +// +// There is one PMM object per GPU and the PMM state among GPUs is completely +// separate with the exception of a few shared kmem caches. +// +// PMM allocates all of the memory it manages from PMA which is the common GPU +// Physical Memory Allocator shared by UVM and RM (it's included as part of RM, +// but logically separate from it). +// +// The state of each GPU memory chunk is tracked in uvm_gpu_chunk_t objects. +// Each chunk has a type, size and state. Type and size are persistent +// throughout chunk's lifetime while its state changes as it's allocated, split, +// merged and freed. +// +// PMM maintains a pre-allocated flat array of root chunks covering all possible +// physical allocations that can be returned from PMA. For simplicity, PMM +// always allocates 2M (UVM_CHUNK_SIZE_MAX) chunks from PMA and each naturally +// aligned 2M chunk represents a single root chunk. The root chunks array is +// indexed by the physical address of each chunk divided by UVM_CHUNK_SIZE_MAX +// allowing for a simple and fast lookup of root chunks. +// +// Each root chunk has a tracker for any pending operations on the root chunk +// (including all of its subchunks in case it's split) to support asynchronous +// alloc and free. Each tracker is protected by a separate bitlock (see +// root_chunk_lock()) as synchronizing any pending operations might take a long +// time and it would be undesirable for that to block other operations of PMM. +// Notably some synchronization is required as part of allocation to handle GPU +// lifetime issues across VA spaces (see comments in uvm_pmm_gpu_alloc()). Bit +// locks (instead of a mutex in each root chunk) are used to save space. +// +// All free chunks (UVM_PMM_GPU_CHUNK_STATE_FREE) are kept on free lists, with +// one list per each combination of memory type and chunk size (see usage of +// uvm_pmm_gpu_t::free_list for reference). This allows for a very quick +// allocation and freeing of chunks in case the right size is already available +// on alloc or no merges are required on free. See claim_free_chunk() for +// allocation and chunk_free_locked() for freeing. +// +// When a chunk is allocated it transitions into the temporarily pinned state +// (UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED) until it's unpinned when it becomes +// allocated (UVM_PMM_GPU_CHUNK_STATE_ALLOCATED). This transition is only +// meaningful for user memory chunks where temporarily pinned chunks cannot be +// evicted. Kernel memory type chunks do not support eviction at all and they +// are transitioned into the allocated state as part of the allocation itself +// (see uvm_pmm_gpu_alloc_kernel). When the chunk is freed it transitions back +// to the free state and is placed on an appropriate free list. +// +// To support smaller allocations, PMM internally splits and merges root chunks +// as needed. Splitting and merging is protected by an exclusive lock +// (uvm_pmm_gpu_t::lock) to prevent PMM from over-allocating root chunks in case +// multiple threads race for a small allocation and there are no free chunks +// immediately available. +// +// Splitting is performed lazily, i.e. chunks are only split when a chunk of the +// requested type and size is not available. Splits are only done to the next +// smaller size and hence may need to be performed multiple times recursively to +// get to the desired chunk size. See alloc_chunk_with_splits(). All split +// chunks under the root chunk form a tree with all internal nodes being in +// split state and leaf nodes being in any of the free, allocated or pinned +// states. +// +// Merging is performed eagerly, i.e. whenever all chunks under a parent (split) +// chunk become free, they are merged into one bigger chunk. See +// free_chunk_with_merges(). +// +// Splitting and merging already allocated chunks is also exposed to the users of +// allocated chunks. See uvm_pmm_gpu_split_chunk() and uvm_pmm_gpu_merge_chunk(). +// +// As splits and merges are protected by a single PMM mutex, they are only +// performed when really necessary. See alloc_chunk() that falls back to split +// only as the last step and free_chunk() that similarly first tries performing +// a quick free. +// +// When a memory allocation from PMA fails and eviction is requested, PMM will +// check whether it can evict any user memory chunks to satisfy the request. +// All allocated user memory root chunks are tracked in an LRU list +// (root_chunks.va_block_used). A root chunk is moved to the tail of that list +// whenever any of its subchunks is allocated (unpinned) by a VA block (see +// uvm_pmm_gpu_unpin_temp()). When a root chunk is selected for eviction, it has +// the eviction flag set (see pick_root_chunk_to_evict()). This flag affects +// many of the PMM operations on all of the subchunks of the root chunk being +// evicted. See usage of (root_)chunk_is_in_eviction(), in particular in +// chunk_free_locked() and claim_free_chunk(). +// +// To evict a root chunk, all of its free subchunks are pinned, then all +// resident pages backed by it are moved to the CPU one VA block at a time. +// After all of them are moved, the root chunk is merged and returned to the +// caller. See evict_root_chunk() for details. +// +// Eviction is also possible to be triggered by PMA. This makes it possible for +// other PMA clients (most importantly RM which CUDA uses for non-UVM +// allocations) to successfully allocate memory from the user memory pool +// allocated by UVM. UVM registers two eviction callbacks with PMA that PMA +// calls as needed to perform the eviction: +// - uvm_pmm_gpu_pma_evict_range - for evicting a physical range +// - uvm_pmm_gpu_pma_evict_pages - for evicting a number of pages +// +// Both of them perform the eviction using the same building blocks as internal +// eviction, but see their implementation and references to pma.h for more +// details. +// +// PMM locking +// - PMM mutex +// Exclusive lock protecting both internal and external splits and merges, and +// eviction. +// +// - PMM list lock +// Protects state transitions of chunks and their movement among lists. +// +// - PMM root chunk bit locks +// Each bit lock protects the corresponding root chunk's allocation, freeing +// from/to PMA, root chunk trackers, and root chunk indirect_peer mappings. +// +// - PMA allocation/eviction lock +// A read-write semaphore used by the eviction path to flush any pending +// allocations. See usage of pma_lock in alloc_root_chunk() and +// uvm_pmm_gpu_pma_evict_range(). +// +// == Trade-offs === +// +// In general, PMM is optimized towards Pascal+ and 2M VA blocks (that's also +// the UVM_CHUNK_SIZE_MAX) as Pascal+ makes much heavier use of PMM: +// - Oversubscription is Pascal+ only +// - On pre-Pascal (UVM-Lite) CUDA currently pre-populates all managed memory +// and hence performance matters mostly only during CUDA memory allocation. +// - On Pascal+ CUDA doesn't pre-populate and memory is allocated on first +// touch. +// +// The root chunk size matching the VA block chunk size allows PMM to avoid +// having to split and merge for the hopefully (HMM might make this hard) common +// allocation size of 2M on Pascal+. +// +// Careful benchmarks and tweaking of PMM are yet to be performed, but there is +// some evidence for PMA to potentially cause issues for oversubscription (see +// bug 1775408). +// + +#include "uvm_common.h" +#include "nv_uvm_interface.h" +#include "uvm_api.h" +#include "uvm_gpu.h" +#include "uvm_pmm_gpu.h" +#include "uvm_mem.h" +#include "uvm_mmu.h" +#include "uvm_global.h" +#include "uvm_kvmalloc.h" +#include "uvm_va_space.h" +#include "uvm_va_block.h" +#include "uvm_test.h" +#include "uvm_linux.h" + + + + +static int uvm_global_oversubscription = 1; +module_param(uvm_global_oversubscription, int, S_IRUGO); +MODULE_PARM_DESC(uvm_global_oversubscription, "Enable (1) or disable (0) global oversubscription support."); + +#define UVM_PERF_PMA_BATCH_NONPINNED_ORDER_DEFAULT 6 + +// Non-pinned root chunks are allocated in batches, in order to minimize the +// number of calls into PMA. The number of root chunks in the batch is: +// (1 << uvm_perf_pma_batch_nonpinned_order) +static unsigned uvm_perf_pma_batch_nonpinned_order = UVM_PERF_PMA_BATCH_NONPINNED_ORDER_DEFAULT; +module_param(uvm_perf_pma_batch_nonpinned_order, uint, S_IRUGO); + +// Helper type for refcounting cache +typedef struct +{ + // Cache for given split size + struct kmem_cache *cache; + + // Number of GPUs using given split size + NvU32 refcount; + + // Name of cache + char name[32]; +} kmem_cache_ref_t; + +static kmem_cache_ref_t g_pma_address_batch_cache_ref; + +struct uvm_pmm_gpu_chunk_suballoc_struct +{ + // Number of allocated chunks (including pinned ones) + NvU32 allocated; + + // Number of pinned leaf chunks under this chunk + // + // Tracked only for suballocs of root chunks to know whether a root chunk + // can be evicted. This is not in the uvm_gpu_root_chunk_t itself to stop + // the root chunks array from growing too much. + // TODO: Bug 1765193: Consider moving this to a union with the parent + // pointer in uvm_gpu_chunk_t as root chunks never have a parent or just put + // in the root chunk directly. + // TODO: Bug 1765193: This could be NvU16 if we enforce the smallest chunk + // size to be at least 2^21 / 2^16 = 32 bytes. + NvU32 pinned_leaf_chunks; + + // Array of all child subchunks + // TODO: Bug 1765461: Can the array be inlined? It could save the parent + // pointer. + uvm_gpu_chunk_t *subchunks[0]; +}; + +typedef enum +{ + CHUNK_WALK_PRE_ORDER, + CHUNK_WALK_POST_ORDER +} chunk_walk_order_t; + +typedef NV_STATUS (*chunk_walk_func_t)(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, void *data); + +// Cache for allocation of uvm_pmm_gpu_chunk_suballoc_t. At index n it stores +// a suballoc structure for size 2**n. +// +// For convenience of init/deinit code level 0 is for allocation of chunks +static kmem_cache_ref_t chunk_split_cache[UVM_PMM_CHUNK_SPLIT_CACHE_SIZES]; +#define CHUNK_CACHE chunk_split_cache[0].cache + +const char *uvm_pmm_gpu_memory_type_string(uvm_pmm_gpu_memory_type_t type) +{ + switch (type) { + UVM_ENUM_STRING_CASE(UVM_PMM_GPU_MEMORY_TYPE_USER); + + + + UVM_ENUM_STRING_CASE(UVM_PMM_GPU_MEMORY_TYPE_KERNEL); + + + + UVM_ENUM_STRING_DEFAULT(); + } + + + + + BUILD_BUG_ON(UVM_PMM_GPU_MEMORY_TYPE_COUNT != 2); + +} + +const char *uvm_pmm_gpu_chunk_state_string(uvm_pmm_gpu_chunk_state_t state) +{ + switch (state) { + UVM_ENUM_STRING_CASE(UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED); + UVM_ENUM_STRING_CASE(UVM_PMM_GPU_CHUNK_STATE_FREE); + UVM_ENUM_STRING_CASE(UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT); + UVM_ENUM_STRING_CASE(UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + UVM_ENUM_STRING_CASE(UVM_PMM_GPU_CHUNK_STATE_ALLOCATED); + UVM_ENUM_STRING_DEFAULT(); + } +} + +// The PMA APIs that can be called from PMA eviction callbacks (pmaPinPages and +// pmaFreePages*) need to be called differently depending whether it's as part +// of PMA eviction or not. The PMM context is used to plumb that information +// through the stack in a couple of places. +typedef enum +{ + PMM_CONTEXT_DEFAULT, + PMM_CONTEXT_PMA_EVICTION, +} uvm_pmm_context_t; + +// Freeing the root chunk not only needs to differentiate between two different +// contexts for calling pmaFreePages(), but also in some cases the free back to +// PMA needs to be skipped altogether. +typedef enum +{ + FREE_ROOT_CHUNK_MODE_DEFAULT, + FREE_ROOT_CHUNK_MODE_PMA_EVICTION, + FREE_ROOT_CHUNK_MODE_SKIP_PMA_FREE +} free_root_chunk_mode_t; + +static free_root_chunk_mode_t free_root_chunk_mode_from_pmm_context(uvm_pmm_context_t pmm_context) +{ + switch (pmm_context) { + case PMM_CONTEXT_DEFAULT: + return FREE_ROOT_CHUNK_MODE_DEFAULT; + case PMM_CONTEXT_PMA_EVICTION: + return FREE_ROOT_CHUNK_MODE_PMA_EVICTION; + default: + UVM_ASSERT_MSG(false, "Invalid PMM context: 0x%x\n", pmm_context); + return FREE_ROOT_CHUNK_MODE_DEFAULT; + } +} + +static NV_STATUS alloc_chunk(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_chunk_size_t chunk_size, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunk); +static NV_STATUS alloc_root_chunk(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunk); +static void free_root_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk, free_root_chunk_mode_t free_mode); +static NV_STATUS split_gpu_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); +static void free_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); +static void free_chunk_with_merges(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); +static bool free_next_available_root_chunk(uvm_pmm_gpu_t *pmm, uvm_pmm_gpu_memory_type_t type); +static struct list_head *find_free_list(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_chunk_size_t chunk_size, + uvm_pmm_list_zero_t zero_type); +static bool check_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); +static struct list_head *find_free_list_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); +static void chunk_free_locked(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); + +static size_t root_chunk_index(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk) +{ + size_t index = root_chunk->chunk.address / UVM_CHUNK_SIZE_MAX; + UVM_ASSERT(index < pmm->root_chunks.count); + return index; +} + +static void root_chunk_lock(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk) +{ + uvm_bit_lock(&pmm->root_chunks.bitlocks, root_chunk_index(pmm, root_chunk)); +} + +static void uvm_assert_root_chunk_locked(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk) +{ + uvm_assert_bit_locked(&pmm->root_chunks.bitlocks, root_chunk_index(pmm, root_chunk)); +} + +static void root_chunk_unlock(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk) +{ + uvm_bit_unlock(&pmm->root_chunks.bitlocks, root_chunk_index(pmm, root_chunk)); +} + +// TODO: Bug 1795559: Remove once PMA eviction is considered safe enough not to +// have an opt-out. +static bool gpu_supports_pma_eviction(uvm_gpu_t *gpu) +{ + return uvm_global_oversubscription && uvm_gpu_supports_eviction(gpu); +} + +uvm_gpu_t *uvm_pmm_to_gpu(uvm_pmm_gpu_t *pmm) +{ + return container_of(pmm, uvm_gpu_t, pmm); +} + +static uvm_gpu_root_chunk_t *root_chunk_from_address(uvm_pmm_gpu_t *pmm, NvU64 addr) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + size_t index = addr / UVM_CHUNK_SIZE_MAX; + uvm_gpu_root_chunk_t *root_chunk = &pmm->root_chunks.array[index]; + + UVM_ASSERT_MSG(addr <= gpu->mem_info.max_allocatable_address, + "Address 0x%llx vidmem max phys 0x%llx GPU %s\n", + addr, + gpu->mem_info.max_allocatable_address, + uvm_gpu_name(gpu)); + UVM_ASSERT(root_chunk->chunk.address == UVM_ALIGN_DOWN(addr, UVM_CHUNK_SIZE_MAX)); + + return root_chunk; +} + +static uvm_gpu_root_chunk_t *root_chunk_from_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + return root_chunk_from_address(pmm, chunk->address); +} + +static bool chunk_is_root_chunk(uvm_gpu_chunk_t *chunk) +{ + return uvm_gpu_chunk_get_size(chunk) == UVM_CHUNK_SIZE_MAX; +} + +static bool chunk_is_root_chunk_pinned(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + + uvm_assert_spinlock_locked(&pmm->list_lock); + + chunk = &root_chunk->chunk; + + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED) + return true; + else if (chunk->state != UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT) + return false; + + UVM_ASSERT(chunk->suballoc); + + return chunk->suballoc->pinned_leaf_chunks > 0; +} + +// Pin a chunk and update its root chunk's pinned leaf chunks count if the chunk is not a root chunk +static void chunk_pin(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + + uvm_assert_spinlock_locked(&pmm->list_lock); + UVM_ASSERT(chunk->state != UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + chunk->state = UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED; + + if (chunk_is_root_chunk(chunk)) + return; + + // For subchunks, update the pinned leaf chunks count tracked in the suballoc of the root chunk. + chunk = &root_chunk->chunk; + + // The passed-in subchunk is not the root chunk so the root chunk has to be split + UVM_ASSERT_MSG(chunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT, "chunk state %s\n", + uvm_pmm_gpu_chunk_state_string(chunk->state)); + + chunk->suballoc->pinned_leaf_chunks++; +} + +// Unpin a chunk and update its root chunk's pinned leaf chunks count if the chunk is not a root chunk +static void chunk_unpin(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, uvm_pmm_gpu_chunk_state_t new_state) +{ + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + + uvm_assert_spinlock_locked(&pmm->list_lock); + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + UVM_ASSERT(chunk->va_block == NULL); + UVM_ASSERT(chunk_is_root_chunk_pinned(pmm, chunk)); + UVM_ASSERT(new_state != UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + chunk->state = new_state; + + if (chunk_is_root_chunk(chunk)) + return; + + // For subchunks, update the pinned leaf chunks count tracked in the suballoc of the root chunk. + chunk = &root_chunk->chunk; + + // The passed-in subchunk is not the root chunk so the root chunk has to be split + UVM_ASSERT_MSG(chunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT, "chunk state %s\n", + uvm_pmm_gpu_chunk_state_string(chunk->state)); + + UVM_ASSERT(chunk->suballoc->pinned_leaf_chunks != 0); + chunk->suballoc->pinned_leaf_chunks--; +} + +bool uvm_pmm_gpu_memory_type_is_user(uvm_pmm_gpu_memory_type_t type) +{ + UVM_ASSERT(type < UVM_PMM_GPU_MEMORY_TYPE_COUNT); + + switch (type) { + + + + + case UVM_PMM_GPU_MEMORY_TYPE_USER: + + return true; + default: + return false; + } +} + + + + + + + + + + + + + + +static void uvm_gpu_chunk_set_in_eviction(uvm_gpu_chunk_t *chunk, bool in_eviction) +{ + UVM_ASSERT(uvm_pmm_gpu_memory_type_is_user(chunk->type)); + UVM_ASSERT(uvm_gpu_chunk_get_size(chunk) == UVM_CHUNK_SIZE_MAX); + chunk->in_eviction = in_eviction; +} + +// A helper that queries the eviction flag of root chunk of the given chunk. +// Eviction is only tracked for root chunks. +static bool chunk_is_in_eviction(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + return root_chunk_from_chunk(pmm, chunk)->chunk.in_eviction; +} + +uvm_gpu_t *uvm_gpu_chunk_get_gpu(const uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_t *gpu = uvm_gpu_get(uvm_global_gpu_id_from_index(chunk->gpu_global_index)); + UVM_ASSERT(gpu); + + return gpu; +} + +struct page *uvm_gpu_chunk_to_page(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NvU64 sys_addr = chunk->address + uvm_gpu_numa_info(gpu)->system_memory_window_start; + unsigned long pfn = sys_addr >> PAGE_SHIFT; + + UVM_ASSERT(sys_addr + uvm_gpu_chunk_get_size(chunk) <= uvm_gpu_numa_info(gpu)->system_memory_window_end + 1); + UVM_ASSERT(gpu->parent->numa_info.enabled); + + return pfn_to_page(pfn); +} + +void uvm_pmm_gpu_sync(uvm_pmm_gpu_t *pmm) +{ + size_t i; + + if (!pmm->initialized) + return; + + // Just go over all root chunks and sync the ones that are not PMA OWNED. + // This is slow, but uvm_pmm_gpu_sync() is a rarely used operation not + // critical for performance. + for (i = 0; i < pmm->root_chunks.count; ++i) { + uvm_gpu_root_chunk_t *root_chunk = &pmm->root_chunks.array[i]; + + root_chunk_lock(pmm, root_chunk); + if (root_chunk->chunk.state != UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED) { + NV_STATUS status = uvm_tracker_wait(&root_chunk->tracker); + if (status != NV_OK) + UVM_ASSERT(status == uvm_global_get_status()); + } + root_chunk_unlock(pmm, root_chunk); + } +} + +static uvm_pmm_gpu_memory_type_t pmm_squash_memory_type(uvm_parent_gpu_t *parent_gpu, uvm_pmm_gpu_memory_type_t type) +{ + + + + + + + + + + + + + return type; + +} + +NV_STATUS uvm_pmm_gpu_alloc(uvm_pmm_gpu_t *pmm, + size_t num_chunks, + uvm_chunk_size_t chunk_size, + uvm_pmm_gpu_memory_type_t mem_type, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunks, + uvm_tracker_t *out_tracker) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NV_STATUS status; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + size_t i; + + UVM_ASSERT((unsigned)mem_type < UVM_PMM_GPU_MEMORY_TYPE_COUNT); + UVM_ASSERT_MSG(is_power_of_2(chunk_size), "chunk size %u\n", chunk_size); + UVM_ASSERT_MSG(chunk_size & pmm->chunk_sizes[mem_type], "chunk size %u\n", chunk_size); + UVM_ASSERT(num_chunks == 0 || chunks); + UVM_ASSERT((flags & UVM_PMM_ALLOC_FLAGS_MASK) == flags); + + if (flags & UVM_PMM_ALLOC_FLAGS_EVICT) { + // If eviction is requested then VA block locks need to be lockable + uvm_assert_lockable_order(UVM_LOCK_ORDER_VA_BLOCK); + } + + mem_type = pmm_squash_memory_type(gpu->parent, mem_type); + for (i = 0; i < num_chunks; i++) { + uvm_gpu_root_chunk_t *root_chunk; + + status = alloc_chunk(pmm, mem_type, chunk_size, flags, &chunks[i]); + if (status != NV_OK) + goto error; + + root_chunk = root_chunk_from_chunk(pmm, chunks[i]); + + root_chunk_lock(pmm, root_chunk); + uvm_tracker_remove_completed(&root_chunk->tracker); + status = uvm_tracker_add_tracker_safe(&local_tracker, &root_chunk->tracker); + root_chunk_unlock(pmm, root_chunk); + + if (status != NV_OK) { + i++; + goto error; + } + } + + // Before we return to the caller, we need to ensure that the tracker only + // contains tracker entries belonging to the PMM's GPU. Otherwise we + // could leak trackers for other GPUs into VA spaces which never + // registered those GPUs, causing lifetime problems when those GPUs go + // away. + status = uvm_tracker_wait_for_other_gpus(&local_tracker, gpu); + if (status != NV_OK) + goto error; + + if (out_tracker) { + status = uvm_tracker_add_tracker_safe(out_tracker, &local_tracker); + uvm_tracker_clear(&local_tracker); + if (status != NV_OK) + goto error; + } + + return uvm_tracker_wait_deinit(&local_tracker); + +error: + uvm_tracker_deinit(&local_tracker); + while (i-- > 0) + free_chunk(pmm, chunks[i]); + + // Reset the array to make error handling easier for callers. + memset(chunks, 0, sizeof(chunks[0]) * num_chunks); + + return status; +} + +NV_STATUS uvm_pmm_gpu_alloc_kernel(uvm_pmm_gpu_t *pmm, + size_t num_chunks, + uvm_chunk_size_t chunk_size, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunks, + uvm_tracker_t *out_tracker) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NV_STATUS status; + size_t i; + uvm_pmm_gpu_memory_type_t memory_type = UVM_PMM_GPU_MEMORY_TYPE_KERNEL; + + + + + + status = uvm_pmm_gpu_alloc(pmm, num_chunks, chunk_size, memory_type, flags, chunks, out_tracker); + if (status != NV_OK) + return status; + + for (i = 0; i < num_chunks; ++i) { + UVM_ASSERT(chunks[i]->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + uvm_spin_lock(&pmm->list_lock); + chunk_unpin(pmm, chunks[i], UVM_PMM_GPU_CHUNK_STATE_ALLOCATED); + uvm_spin_unlock(&pmm->list_lock); + } + + return NV_OK; +} + +static void chunk_update_lists_locked(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + + uvm_assert_spinlock_locked(&pmm->list_lock); + + if (uvm_pmm_gpu_memory_type_is_user(chunk->type)) { + if (chunk_is_root_chunk_pinned(pmm, chunk)) { + UVM_ASSERT(root_chunk->chunk.state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT || + root_chunk->chunk.state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + list_del_init(&root_chunk->chunk.list); + } + else if (root_chunk->chunk.state != UVM_PMM_GPU_CHUNK_STATE_FREE) { + UVM_ASSERT(root_chunk->chunk.state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT || + root_chunk->chunk.state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED); + list_move_tail(&root_chunk->chunk.list, &pmm->root_chunks.va_block_used); + } + } + + // TODO: Bug 1757148: Improve fragmentation of split chunks + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_FREE) + list_move_tail(&chunk->list, find_free_list_chunk(pmm, chunk)); + else if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED) + list_del_init(&chunk->list); +} + +void uvm_pmm_gpu_unpin_temp(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, uvm_va_block_t *va_block) +{ + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + UVM_ASSERT(uvm_pmm_gpu_memory_type_is_user(chunk->type)); + + INIT_LIST_HEAD(&chunk->list); + + uvm_spin_lock(&pmm->list_lock); + + UVM_ASSERT(!chunk->va_block); + UVM_ASSERT(va_block); + UVM_ASSERT(chunk->va_block_page_index < uvm_va_block_num_cpu_pages(va_block)); + + chunk_unpin(pmm, chunk, UVM_PMM_GPU_CHUNK_STATE_ALLOCATED); + chunk->va_block = va_block; + chunk_update_lists_locked(pmm, chunk); + + uvm_spin_unlock(&pmm->list_lock); +} + +void uvm_pmm_gpu_free(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, uvm_tracker_t *tracker) +{ + NV_STATUS status; + uvm_gpu_root_chunk_t *root_chunk; + + if (!chunk) + return; + + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + root_chunk = root_chunk_from_chunk(pmm, chunk); + + if (tracker) { + uvm_tracker_remove_completed(tracker); + + root_chunk_lock(pmm, root_chunk); + + // Remove any completed entries from the root tracker to prevent it from + // growing too much over time. + uvm_tracker_remove_completed(&root_chunk->tracker); + + status = uvm_tracker_add_tracker_safe(&root_chunk->tracker, tracker); + if (status != NV_OK) + UVM_ASSERT(status == uvm_global_get_status()); + + root_chunk_unlock(pmm, root_chunk); + } + + free_chunk(pmm, chunk); +} + +static NvU32 num_subchunks(uvm_gpu_chunk_t *parent) +{ + uvm_chunk_size_t parent_size, child_size; + UVM_ASSERT(parent->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT); + parent_size = uvm_gpu_chunk_get_size(parent); + child_size = uvm_gpu_chunk_get_size(parent->suballoc->subchunks[0]); + return (NvU32)uvm_div_pow2_64(parent_size, child_size); +} + +static uvm_gpu_chunk_t *next_sibling(uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_chunk_t *parent = chunk->parent; + size_t index; + + UVM_ASSERT(parent); + UVM_ASSERT(parent->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT); + + index = (size_t)uvm_div_pow2_64(chunk->address - parent->address, uvm_gpu_chunk_get_size(chunk)); + UVM_ASSERT(index < num_subchunks(parent)); + + ++index; + if (index == num_subchunks(parent)) + return NULL; + + return parent->suballoc->subchunks[index]; +} + +// Check that the chunk is in a mergeable state: all children must be pinned or +// or all children must be allocated with the same reverse mapping. +// +// Always returns true so it can be called from an assert macro. +static bool assert_chunk_mergeable(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_chunk_t *first_child = chunk->suballoc->subchunks[0]; + uvm_va_block_t *child_va_block = first_child->va_block; + size_t i; + + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT); + UVM_ASSERT(first_child->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED || + first_child->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED); + + for (i = 1; i < num_subchunks(chunk); i++) { + uvm_gpu_chunk_t *child = chunk->suballoc->subchunks[i]; + + UVM_ASSERT(child->state == first_child->state); + if (first_child->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED) { + uvm_gpu_chunk_t *prev_child = chunk->suballoc->subchunks[i-1]; + + UVM_ASSERT(child->va_block == child_va_block); + UVM_ASSERT(child->va_block_page_index == + prev_child->va_block_page_index + uvm_gpu_chunk_get_size(prev_child) / PAGE_SIZE); + } + } + + if (first_child->state == UVM_PMM_GPU_CHUNK_STATE_FREE) { + UVM_ASSERT(chunk->suballoc->allocated == 0); + } + else { + UVM_ASSERT_MSG(chunk->suballoc->allocated == num_subchunks(chunk), "%u != %u\n", + chunk->suballoc->allocated, num_subchunks(chunk)); + } + + return true; +} + +// Merges a previously-split chunk. Assumes that all of its children have +// uniform state. This only merges leaves, so none of the children can be in the +// split state themselves. +// +// The children need to be removed from any lists before the merge. +// +// The merged chunk inherits the former state of its children. +static void merge_gpu_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_pmm_gpu_chunk_suballoc_t *suballoc; + uvm_gpu_chunk_t *subchunk; + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + uvm_pmm_gpu_chunk_state_t child_state; + size_t i, num_sub = num_subchunks(chunk); + + uvm_assert_mutex_locked(&pmm->lock); + UVM_ASSERT(assert_chunk_mergeable(pmm, chunk)); + + // Transition the chunk state under the list lock first and then clean up + // the subchunk state. + uvm_spin_lock(&pmm->list_lock); + + child_state = chunk->suballoc->subchunks[0]->state; + + if (child_state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED) { + subchunk = chunk->suballoc->subchunks[0]; + UVM_ASSERT(subchunk->va_block); + chunk->va_block = subchunk->va_block; + chunk->va_block_page_index = subchunk->va_block_page_index; + } + else if (child_state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED) { + UVM_ASSERT(root_chunk->chunk.suballoc->pinned_leaf_chunks >= num_sub); + root_chunk->chunk.suballoc->pinned_leaf_chunks += 1 - num_sub; + } + + chunk->state = child_state; + suballoc = chunk->suballoc; + chunk->suballoc = NULL; + + // The resulting chunk is assumed to be non-zero as a simplification, + // instead of checking that all the subchunks are zero, since callers of + // uvm_pmm_gpu_alloc are not required to clear it. However, we think that + // this covers all relevant cases since it is uncommon to split a chunk and + // not to use any of the subchunks later on. + chunk->is_zero = false; + + uvm_spin_unlock(&pmm->list_lock); + + for (i = 0; i < num_sub; i++) { + subchunk = suballoc->subchunks[i]; + + // The subchunks should have been removed from their lists prior to the + // merge. + UVM_ASSERT(list_empty(&subchunk->list)); + + if (child_state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED) + UVM_ASSERT(subchunk->va_block != NULL); + + kmem_cache_free(CHUNK_CACHE, subchunk); + } + + kmem_cache_free(chunk_split_cache[ilog2(num_sub)].cache, suballoc); +} + +// Checks that chunk is below ancestor in the tree. Always returns true so it +// can be called from an assert macro. +static bool assert_chunk_under(uvm_gpu_chunk_t *chunk, uvm_gpu_chunk_t *ancestor) +{ + UVM_ASSERT(ancestor->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT); + UVM_ASSERT(ancestor->suballoc); + UVM_ASSERT(ancestor->address <= chunk->address); + UVM_ASSERT(chunk->address < ancestor->address + uvm_gpu_chunk_get_size(ancestor)); + UVM_ASSERT(uvm_gpu_chunk_get_size(chunk) <= uvm_gpu_chunk_get_size(ancestor)); + return true; +} + +// Traverses the chunk tree from start in the given traversal order. +// +// If the callback returns a status value of NV_WARN_NOTHING_TO_DO when doing +// pre-order traversal, the traversal skips walking below that chunk. In all +// other cases, returning any non-NV_OK value stops the walk immediately and +// returns that status to the caller. +// +// Be careful modifying the tree from the callback. Changing the tree below the +// input chunk is fine and modifying the input chunk itself is fine, but the +// callback must not modify the tree above the input chunk. If that is needed, +// return a non-NV_OK status from the walk and re-start the walk. +static NV_STATUS chunk_walk(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t *start, + chunk_walk_func_t func, + void *data, + chunk_walk_order_t order) +{ + NV_STATUS status = NV_OK; + uvm_gpu_chunk_t *curr, *sibling; + + curr = start; + + do { + if (curr != start) + UVM_ASSERT(assert_chunk_under(curr, start)); + + if (order == CHUNK_WALK_PRE_ORDER) { + status = func(pmm, curr, data); + if (status != NV_OK && status != NV_WARN_NOTHING_TO_DO) + return status; + } + + // Skip downward traversal on pre-order if requested + if (status != NV_WARN_NOTHING_TO_DO && curr->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT) { + // If the chunk is split, walk down + curr = curr->suballoc->subchunks[0]; + } + else { + // This is a leaf chunk. If not start itself, check siblings. + while (curr != start) { + if (order == CHUNK_WALK_POST_ORDER) { + status = func(pmm, curr, data); + if (status != NV_OK) + return status; + } + + sibling = next_sibling(curr); + if (sibling) { + curr = sibling; + break; + } + + // curr is the last chunk in its parent. Walk up and try again. + curr = curr->parent; + UVM_ASSERT(curr); + UVM_ASSERT(curr->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT); + } + } + } while (curr != start); + + // Invoke the final callback for start + if (order == CHUNK_WALK_POST_ORDER) + return func(pmm, curr, data); + + return NV_OK; +} + +static NV_STATUS chunk_walk_pre_order(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *start, chunk_walk_func_t func, void *data) +{ + return chunk_walk(pmm, start, func, data, CHUNK_WALK_PRE_ORDER); +} + +static NV_STATUS chunk_walk_post_order(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *start, chunk_walk_func_t func, void *data) +{ + return chunk_walk(pmm, start, func, data, CHUNK_WALK_POST_ORDER); +} + +typedef struct +{ + // Target size for the leaf subchunks + uvm_chunk_size_t min_size; + + // Number of subchunks split to this point. If the subchunks array is non- + // NULL, this is the number of elements currently in the array. + size_t num_subchunks_curr; + + // Number of subchunks needed for the whole split + size_t num_subchunks_total; + + // Storage for the final split chunks. May be NULL. + uvm_gpu_chunk_t **subchunks; + + // For testing + bool inject_error; +} split_walk_t; + +static NV_STATUS split_walk_func(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, void *data) +{ + uvm_chunk_size_t chunk_size, child_size; + uvm_chunk_sizes_mask_t chunk_sizes = pmm->chunk_sizes[chunk->type]; + size_t i, num_children; + split_walk_t *args = data; + NV_STATUS status; + + chunk_size = uvm_gpu_chunk_get_size(chunk); + UVM_ASSERT(chunk_size > args->min_size); + + child_size = uvm_chunk_find_prev_size(chunk_sizes, chunk_size); + UVM_ASSERT(child_size != UVM_CHUNK_SIZE_INVALID); + num_children = chunk_size / child_size; + + if (unlikely(args->inject_error)) { + // Inject errors on the last split. inject_split_error is a bitfield, + // so we must take the lock to modify it. This path is only used in + // testing. + if (child_size == args->min_size && + args->num_subchunks_curr + num_children == args->num_subchunks_total) { + uvm_spin_lock(&pmm->list_lock); + chunk->inject_split_error = true; + uvm_spin_unlock(&pmm->list_lock); + } + } + + status = split_gpu_chunk(pmm, chunk); + if (status != NV_OK) + return status; + + // If we've hit our target, add all child subchunks to the array + if (child_size == args->min_size) { + for (i = 0; i < num_children; i++) { + UVM_ASSERT(args->num_subchunks_curr < args->num_subchunks_total); + if (args->subchunks) + args->subchunks[args->num_subchunks_curr] = chunk->suballoc->subchunks[i]; + ++args->num_subchunks_curr; + } + + // No need to walk below this chunk + return NV_WARN_NOTHING_TO_DO; + } + + return NV_OK; +} + +static NV_STATUS merge_walk_func(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, void *data) +{ + // The merge walk uses post-order traversal, so all subchunks are guaranteed + // to have already been merged. + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT) + merge_gpu_chunk(pmm, chunk); + return NV_OK; +} + +static void uvm_pmm_gpu_merge_chunk_locked(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + NV_STATUS status; + + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED); + + uvm_assert_mutex_locked(&pmm->lock); + + status = chunk_walk_post_order(pmm, chunk, merge_walk_func, NULL); + + // merge_walk_func can't fail + UVM_ASSERT(status == NV_OK); + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); +} + +NV_STATUS uvm_pmm_gpu_split_chunk(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t *chunk, + uvm_chunk_size_t subchunk_size, + uvm_gpu_chunk_t **subchunks) +{ + NV_STATUS status; + split_walk_t walk_args = + { + .min_size = subchunk_size, + .num_subchunks_curr = 0, + .num_subchunks_total = uvm_gpu_chunk_get_size(chunk) / subchunk_size, + .subchunks = subchunks, + .inject_error = chunk->inject_split_error, + }; + + UVM_ASSERT(is_power_of_2(subchunk_size)); + UVM_ASSERT(subchunk_size & pmm->chunk_sizes[chunk->type]); + UVM_ASSERT(subchunk_size < uvm_gpu_chunk_get_size(chunk)); + + uvm_mutex_lock(&pmm->lock); + + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + // If we're supposed to inject an error, clear out the root chunk's flag so + // we can inject after nearly all chunks have been split. Otherwise + // split_gpu_chunk will fail on the first try, without creating the tree. + if (unlikely(walk_args.inject_error)) { + // inject_split_error is a bitfield, so we must take the lock to modify + // it. This path is only used in testing. + uvm_spin_lock(&pmm->list_lock); + chunk->inject_split_error = false; + uvm_spin_unlock(&pmm->list_lock); + } + + status = chunk_walk_pre_order(pmm, chunk, split_walk_func, &walk_args); + if (status != NV_OK) { + // Put the chunk back in its original state + uvm_pmm_gpu_merge_chunk_locked(pmm, chunk); + } + else { + UVM_ASSERT(walk_args.num_subchunks_curr == walk_args.num_subchunks_total); + } + + uvm_mutex_unlock(&pmm->lock); + return status; +} + +typedef struct +{ + size_t num_written; + size_t num_to_write; + size_t num_to_skip; + uvm_gpu_chunk_t **subchunks; +} get_subchunks_walk_t; + +static NV_STATUS get_subchunks_walk_func(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, void *data) +{ + get_subchunks_walk_t *args = data; + + // We're only collecting leaf chunks + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT) + return NV_OK; + + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + if (args->num_to_skip) { + --args->num_to_skip; + return NV_OK; + } + + UVM_ASSERT(args->num_written < args->num_to_write); + args->subchunks[args->num_written++] = chunk; + + // Bail immediately once we hit our limit. Note that this is not an error: + // we just need to exit the walk. + if (args->num_written == args->num_to_write) + return NV_ERR_OUT_OF_RANGE; + + return NV_OK; +} + +size_t uvm_pmm_gpu_get_subchunks(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t *parent, + size_t start_index, + size_t num_subchunks, + uvm_gpu_chunk_t **subchunks) +{ + NV_STATUS status; + + get_subchunks_walk_t walk_args = + { + .num_written = 0, + .num_to_write = num_subchunks, + .num_to_skip = start_index, + .subchunks = subchunks, + }; + + if (num_subchunks == 0) + return 0; + + UVM_ASSERT(parent->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + parent->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED || + parent->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT); + + uvm_mutex_lock(&pmm->lock); + + // Either pre- or post-order would work. Pick post-order just because we + // only care about leaf chunks and we may exit early, so we'd get slightly + // fewer callbacks. + status = chunk_walk_post_order(pmm, parent, get_subchunks_walk_func, &walk_args); + if (status != NV_OK) { + UVM_ASSERT(status == NV_ERR_OUT_OF_RANGE); + UVM_ASSERT(walk_args.num_written == walk_args.num_to_write); + } + + uvm_mutex_unlock(&pmm->lock); + return walk_args.num_written; +} + +static uvm_gpu_chunk_t *list_first_chunk(struct list_head *list) +{ + return list_first_entry_or_null(list, uvm_gpu_chunk_t, list); +} + +void uvm_pmm_gpu_merge_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_mutex_lock(&pmm->lock); + uvm_pmm_gpu_merge_chunk_locked(pmm, chunk); + uvm_mutex_unlock(&pmm->lock); +} + +static void root_chunk_unmap_indirect_peer(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk, uvm_gpu_t *other_gpu) +{ + uvm_gpu_root_chunk_indirect_peer_t *indirect_peer; + size_t index = root_chunk_index(pmm, root_chunk); + long long new_count; + NV_STATUS status; + + indirect_peer = &pmm->root_chunks.indirect_peer[uvm_id_gpu_index(other_gpu->id)]; + + uvm_assert_root_chunk_locked(pmm, root_chunk); + UVM_ASSERT(indirect_peer->dma_addrs); + UVM_ASSERT(root_chunk->chunk.state != UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED); + UVM_ASSERT(uvm_processor_mask_test(&root_chunk->indirect_peers_mapped, other_gpu->id)); + + // The tracker could have work which requires the indirect peer mappings to + // remain until finished, such as PTE unmaps of this chunk from indirect + // peers, so we need to wait. We also need to wait on the entire tracker, + // not just other_gpu's entries, because there might be implicit chained + // dependencies in the tracker. + // + // We know there can't be any other work which requires these mappings: + // - If we're freeing the root chunk back to PMA or switching types of the + // root chunk, nothing else can reference the chunk. + // + // - If the chunk is still allocated then global peer access must be in the + // process of being disabled, say because one of the GPUs is being + // unregistered. We know that all VA spaces must have already called + // disable_peers and have waited on those PTE unmaps. The chunk could be + // freed concurrently with this indirect peer unmap, but that will be + // serialized by the root chunk lock. + status = uvm_tracker_wait(&root_chunk->tracker); + if (status != NV_OK) + UVM_ASSERT(uvm_global_get_status() != NV_OK); + + uvm_gpu_unmap_cpu_pages(other_gpu, indirect_peer->dma_addrs[index], UVM_CHUNK_SIZE_MAX); + uvm_processor_mask_clear(&root_chunk->indirect_peers_mapped, other_gpu->id); + new_count = atomic64_dec_return(&indirect_peer->map_count); + UVM_ASSERT(new_count >= 0); +} + +static void root_chunk_unmap_indirect_peers(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk) +{ + uvm_gpu_id_t other_gpu_id; + + // Root chunks should use a global processor mask as they are not bound to + // a specific VA space. However, indirect peers are not supported when SMC + // partitioning is enabled and, therefore, we can obtain the uvm_gpu_t + // object directly from the uvm_parent_gpu_t object's id. + for_each_gpu_id_in_mask(other_gpu_id, &root_chunk->indirect_peers_mapped) { + uvm_gpu_t *other_gpu = uvm_gpu_get_by_processor_id(other_gpu_id); + root_chunk_unmap_indirect_peer(pmm, root_chunk, other_gpu); + } +} + +NV_STATUS uvm_pmm_gpu_indirect_peer_init(uvm_pmm_gpu_t *pmm, uvm_gpu_t *accessing_gpu) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NvU64 *dma_addrs; + uvm_gpu_root_chunk_indirect_peer_t *indirect_peer; + NV_STATUS status = NV_OK; + + indirect_peer = &pmm->root_chunks.indirect_peer[uvm_id_gpu_index(accessing_gpu->id)]; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + UVM_ASSERT(uvm_gpus_are_indirect_peers(gpu, accessing_gpu)); + UVM_ASSERT(!indirect_peer->dma_addrs); + UVM_ASSERT(atomic64_read(&indirect_peer->map_count) == 0); + + // Each root chunk tracks whether it has a mapping to a given indirect peer, + // so we don't need to initialize this array. + dma_addrs = uvm_kvmalloc(pmm->root_chunks.count * sizeof(dma_addrs[0])); + if (!dma_addrs) + status = NV_ERR_NO_MEMORY; + else + indirect_peer->dma_addrs = dma_addrs; + + return status; +} + +static bool check_indirect_peer_empty(uvm_pmm_gpu_t *pmm, uvm_gpu_t *other_gpu) +{ + uvm_gpu_root_chunk_indirect_peer_t *indirect_peer; + size_t i; + + indirect_peer = &pmm->root_chunks.indirect_peer[uvm_id_gpu_index(other_gpu->id)]; + + for (i = 0; i < pmm->root_chunks.count; i++) { + uvm_gpu_root_chunk_t *root_chunk = &pmm->root_chunks.array[i]; + + // This doesn't take the root chunk lock because checking the mask is an + // atomic operation. + if (uvm_processor_mask_test(&root_chunk->indirect_peers_mapped, other_gpu->id)) { + UVM_ASSERT(atomic64_read(&indirect_peer->map_count) > 0); + return false; + } + } + + UVM_ASSERT(atomic64_read(&indirect_peer->map_count) == 0); + return true; +} + +void uvm_pmm_gpu_indirect_peer_destroy(uvm_pmm_gpu_t *pmm, uvm_gpu_t *other_gpu) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + uvm_gpu_root_chunk_indirect_peer_t *indirect_peer; + size_t i; + + indirect_peer = &pmm->root_chunks.indirect_peer[uvm_id_gpu_index(other_gpu->id)]; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + UVM_ASSERT(uvm_gpus_are_indirect_peers(gpu, other_gpu)); + + if (!indirect_peer->dma_addrs) { + UVM_ASSERT(check_indirect_peer_empty(pmm, other_gpu)); + return; + } + + // Just go over all root chunks and unmap them. This is slow, but it is not + // a frequent operation. + for (i = 0; i < pmm->root_chunks.count && atomic64_read(&indirect_peer->map_count); i++) { + uvm_gpu_root_chunk_t *root_chunk = &pmm->root_chunks.array[i]; + + // Take the root chunk lock to prevent chunks from transitioning in or + // out of the PMA_OWNED state, and to serialize updates to the tracker + // and indirect_peers_mapped mask. Note that indirect peers besides + // other_gpu could be trying to create mappings concurrently. + root_chunk_lock(pmm, root_chunk); + + if (root_chunk->chunk.state == UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED) + UVM_ASSERT(uvm_processor_mask_empty(&root_chunk->indirect_peers_mapped)); + else if (uvm_processor_mask_test(&root_chunk->indirect_peers_mapped, other_gpu->id)) + root_chunk_unmap_indirect_peer(pmm, root_chunk, other_gpu); + + root_chunk_unlock(pmm, root_chunk); + } + + UVM_ASSERT(check_indirect_peer_empty(pmm, other_gpu)); + + uvm_kvfree(indirect_peer->dma_addrs); + indirect_peer->dma_addrs = NULL; +} + +NV_STATUS uvm_pmm_gpu_indirect_peer_map(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, uvm_gpu_t *accessing_gpu) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + uvm_gpu_root_chunk_indirect_peer_t *indirect_peer; + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + size_t index = root_chunk_index(pmm, root_chunk); + NV_STATUS status = NV_OK; + + indirect_peer = &pmm->root_chunks.indirect_peer[uvm_id_gpu_index(accessing_gpu->id)]; + + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED); + + UVM_ASSERT(uvm_gpus_are_indirect_peers(gpu, accessing_gpu)); + UVM_ASSERT(indirect_peer->dma_addrs); + + // Serialize: + // - Concurrent mappings to this root chunk (same or different GPUs) + // - Concurrent unmappings of this root chunk (must be a different GPU) + root_chunk_lock(pmm, root_chunk); + + if (!uvm_processor_mask_test(&root_chunk->indirect_peers_mapped, accessing_gpu->id)) { + status = uvm_gpu_map_cpu_pages(accessing_gpu, + uvm_gpu_chunk_to_page(pmm, &root_chunk->chunk), + UVM_CHUNK_SIZE_MAX, + &indirect_peer->dma_addrs[index]); + if (status == NV_OK) { + uvm_processor_mask_set(&root_chunk->indirect_peers_mapped, accessing_gpu->id); + atomic64_inc(&indirect_peer->map_count); + } + } + + root_chunk_unlock(pmm, root_chunk); + return status; +} + +NvU64 uvm_pmm_gpu_indirect_peer_addr(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, uvm_gpu_t *accessing_gpu) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + uvm_gpu_root_chunk_indirect_peer_t *indirect_peer; + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + size_t index = root_chunk_index(pmm, root_chunk); + NvU64 chunk_offset = chunk->address - root_chunk->chunk.address; + + indirect_peer = &pmm->root_chunks.indirect_peer[uvm_id_gpu_index(accessing_gpu->id)]; + + UVM_ASSERT(uvm_gpus_are_indirect_peers(gpu, accessing_gpu)); + UVM_ASSERT(indirect_peer->dma_addrs); + UVM_ASSERT(uvm_processor_mask_test(&root_chunk->indirect_peers_mapped, accessing_gpu->id)); + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT); + + return indirect_peer->dma_addrs[index] + chunk_offset; +} + +uvm_gpu_phys_address_t uvm_pmm_gpu_peer_phys_address(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t *chunk, + uvm_gpu_t *accessing_gpu) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + uvm_gpu_peer_t *peer_caps = uvm_gpu_peer_caps(accessing_gpu, gpu); + uvm_aperture_t aperture = uvm_gpu_peer_aperture(accessing_gpu, gpu); + NvU64 addr; + + if (peer_caps->is_indirect_peer) + addr = uvm_pmm_gpu_indirect_peer_addr(pmm, chunk, accessing_gpu); + else if (uvm_gpus_are_nvswitch_connected(accessing_gpu, gpu)) + addr = chunk->address + gpu->parent->nvswitch_info.fabric_memory_window_start; + else + addr = chunk->address; + + return uvm_gpu_phys_address(aperture, addr); +} + +uvm_gpu_address_t uvm_pmm_gpu_peer_copy_address(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t *chunk, + uvm_gpu_t *accessing_gpu) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + uvm_gpu_peer_t *peer_caps = uvm_gpu_peer_caps(accessing_gpu, gpu); + uvm_gpu_identity_mapping_t *gpu_peer_mapping; + + UVM_ASSERT(peer_caps->link_type != UVM_GPU_LINK_INVALID); + + if (peer_caps->is_indirect_peer || + (accessing_gpu->parent->peer_copy_mode == UVM_GPU_PEER_COPY_MODE_PHYSICAL)) { + // Indirect peers are accessed as sysmem addresses, so they don't need + // to use identity mappings. + return uvm_gpu_address_from_phys(uvm_pmm_gpu_peer_phys_address(pmm, chunk, accessing_gpu)); + } + + UVM_ASSERT(accessing_gpu->parent->peer_copy_mode == UVM_GPU_PEER_COPY_MODE_VIRTUAL); + gpu_peer_mapping = uvm_gpu_get_peer_mapping(accessing_gpu, gpu->id); + + return uvm_gpu_address_virtual(gpu_peer_mapping->base + chunk->address); +} + +static NV_STATUS evict_root_chunk_from_va_block(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk, uvm_va_block_t *va_block) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NV_STATUS status; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + + UVM_ASSERT(va_block); + + // To evict the chunks from the VA block we need to lock it, but we already + // have the PMM lock held. Unlock it first and re-lock it after. + uvm_mutex_unlock(&pmm->lock); + + uvm_mutex_lock(&va_block->lock); + + status = uvm_va_block_evict_chunks(va_block, gpu, &root_chunk->chunk, &tracker); + + uvm_mutex_unlock(&va_block->lock); + + // The block has been retained by find_and_retain_va_block_to_evict(), + // release it here as it's not needed any more. Notably do that even if + // uvm_va_block_evict_chunks() fails. + uvm_va_block_release(va_block); + + if (status == NV_OK) { + root_chunk_lock(pmm, root_chunk); + status = uvm_tracker_add_tracker_safe(&root_chunk->tracker, &tracker); + root_chunk_unlock(pmm, root_chunk); + } + + uvm_tracker_deinit(&tracker); + + uvm_mutex_lock(&pmm->lock); + + return status; +} + +void uvm_pmm_gpu_mark_chunk_evicted(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_spin_lock(&pmm->list_lock); + + UVM_ASSERT(chunk_is_in_eviction(pmm, chunk)); + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED); + UVM_ASSERT(chunk->va_block != NULL); + + chunk->va_block = NULL; + chunk->va_block_page_index = PAGES_PER_UVM_VA_BLOCK; + chunk_pin(pmm, chunk); + + uvm_spin_unlock(&pmm->list_lock); +} + +static NV_STATUS pin_free_chunks_func(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, void *data) +{ + uvm_assert_mutex_locked(&pmm->lock); + + uvm_spin_lock(&pmm->list_lock); + + UVM_ASSERT(chunk_is_in_eviction(pmm, chunk)); + + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_FREE) { + list_del_init(&chunk->list); + chunk_pin(pmm, chunk); + if (chunk->parent) + chunk->parent->suballoc->allocated++; + } + + uvm_spin_unlock(&pmm->list_lock); + + return NV_OK; +} + +static NV_STATUS free_first_pinned_chunk_func(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, void *data) +{ + uvm_assert_mutex_locked(&pmm->lock); + + UVM_ASSERT(!chunk_is_in_eviction(pmm, chunk)); + + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED) { + free_chunk_with_merges(pmm, chunk); + return NV_ERR_MORE_DATA_AVAILABLE; + } + + return NV_OK; +} + +typedef struct +{ + uvm_va_block_t *va_block_to_evict_from; +} evict_data_t; + +static NV_STATUS find_and_retain_va_block_to_evict(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, void *data) +{ + NV_STATUS status = NV_OK; + evict_data_t *evict_data = (evict_data_t *)data; + + UVM_ASSERT(evict_data->va_block_to_evict_from == NULL); + + uvm_spin_lock(&pmm->list_lock); + + // All free chunks should have been pinned already by pin_free_chunks_func(). + UVM_ASSERT_MSG(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT, + "state %s\n", uvm_pmm_gpu_chunk_state_string(chunk->state)); + + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED) { + UVM_ASSERT(chunk->va_block); + evict_data->va_block_to_evict_from = chunk->va_block; + uvm_va_block_retain(chunk->va_block); + status = NV_ERR_MORE_DATA_AVAILABLE; + } + + uvm_spin_unlock(&pmm->list_lock); + + return status; +} + +static bool root_chunk_has_elevated_page(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + uvm_gpu_chunk_t *chunk = &root_chunk->chunk; + struct page *page; + + if (!gpu->parent->numa_info.enabled) + return false; + + page = uvm_gpu_chunk_to_page(pmm, chunk); + + return page_count(page) > UVM_CHUNK_SIZE_MAX / PAGE_SIZE; +} + +static NV_STATUS evict_root_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk, uvm_pmm_context_t pmm_context) +{ + NV_STATUS status; + NV_STATUS free_status; + uvm_gpu_chunk_t *chunk = &root_chunk->chunk; + const uvm_pmm_gpu_memory_type_t type = chunk->type; + + uvm_assert_mutex_locked(&pmm->lock); + + // First pin all the free subchunks + status = chunk_walk_pre_order(pmm, chunk, pin_free_chunks_func, NULL); + UVM_ASSERT(status == NV_OK); + while (1) { + evict_data_t evict = {0}; + status = chunk_walk_pre_order(pmm, chunk, find_and_retain_va_block_to_evict, &evict); + + // find_and_retain_va_block_to_evict() returns NV_ERR_MORE_DATA_AVAILABLE + // immediately after finding the first VA block to evict from and NV_OK + // if no more blocks are left. + if (status != NV_ERR_MORE_DATA_AVAILABLE) { + UVM_ASSERT(status == NV_OK); + break; + } + + // Evict the chunks from the VA block. Notably this will unlock and + // re-lock the PMM mutex. This is ok as we don't rely on any PMM state + // that can change across the calls. In particular, the walk to pick the + // next VA block to evict above is always started from the root chunk. + status = evict_root_chunk_from_va_block(pmm, root_chunk, evict.va_block_to_evict_from); + if (status != NV_OK) + goto error; + } + + // All of the leaf chunks should be pinned now, merge them all back into a + // pinned root chunk. + uvm_pmm_gpu_merge_chunk_locked(pmm, chunk); + + uvm_spin_lock(&pmm->list_lock); + + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + uvm_gpu_chunk_set_in_eviction(chunk, false); + + chunk->is_zero = false; + + uvm_spin_unlock(&pmm->list_lock); + + // Bug 2085760: Check if there is any page within the evicted chunk with an + // elevated refcount. In such case there is another holder of the page, + // which prevents us from reusing it. This can happen on systems where + // struct pages backed by GPU memory are directly available to third-party + // device drivers. Note that at this point, the chunk ends up not being in + // a chunk free list. We can just free it, so PMA will handle the page with + // elevated refcount. + if (root_chunk_has_elevated_page(pmm, root_chunk)) { + free_root_chunk(pmm, root_chunk, free_root_chunk_mode_from_pmm_context(pmm_context)); + return NV_ERR_IN_USE; + } + + UVM_ASSERT(check_chunk(pmm, chunk)); + + return NV_OK; + +error: + // On error we need to free all the chunks that we were able to evict so + // far. They should all be pinned. + + // Clear the eviction state so any new chunks freed by other threads are + // actually freed instead of pinned. We need the list lock to make the + // eviction check and conditional pin in chunk_free_locked atomic with our + // free-if-pinned loop below. + uvm_spin_lock(&pmm->list_lock); + + uvm_gpu_chunk_set_in_eviction(chunk, false); + + // In case we didn't manage to evict any chunks and hence the root is still + // unpinned, we need to put it back on an eviction list. + // chunk_update_lists_locked() will do that. + chunk_update_lists_locked(pmm, chunk); + + uvm_spin_unlock(&pmm->list_lock); + + do { + free_status = chunk_walk_pre_order(pmm, chunk, free_first_pinned_chunk_func, NULL); + } while (free_status == NV_ERR_MORE_DATA_AVAILABLE); + UVM_ASSERT(free_status == NV_OK); + + (void)free_next_available_root_chunk(pmm, type); + + return status; +} + +static bool chunk_is_evictable(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + + uvm_assert_spinlock_locked(&pmm->list_lock); + + if (root_chunk->chunk.state == UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED) + return false; + + if (chunk_is_root_chunk_pinned(pmm, chunk)) + return false; + + if (chunk_is_in_eviction(pmm, chunk)) + return false; + + // An evictable chunk's root should be on one of the eviction lists. + UVM_ASSERT(!list_empty(&root_chunk->chunk.list)); + + return true; +} + +static void chunk_start_eviction(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + chunk = &root_chunk->chunk; + + uvm_assert_spinlock_locked(&pmm->list_lock); + + UVM_ASSERT(chunk_is_evictable(pmm, chunk)); + UVM_ASSERT(!list_empty(&chunk->list)); + + list_del_init(&chunk->list); + uvm_gpu_chunk_set_in_eviction(chunk, true); +} + +static void root_chunk_update_eviction_list(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, struct list_head *list) +{ + uvm_spin_lock(&pmm->list_lock); + + UVM_ASSERT(uvm_gpu_chunk_get_size(chunk) == UVM_CHUNK_SIZE_MAX); + UVM_ASSERT(uvm_pmm_gpu_memory_type_is_user(chunk->type)); + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + if (!chunk_is_root_chunk_pinned(pmm, chunk) && !chunk_is_in_eviction(pmm, chunk)) { + // An unpinned chunk not selected for eviction should be on one of the + // eviction lists. + UVM_ASSERT(!list_empty(&chunk->list)); + + list_move_tail(&chunk->list, list); + } + + uvm_spin_unlock(&pmm->list_lock); +} + +void uvm_pmm_gpu_mark_root_chunk_used(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + root_chunk_update_eviction_list(pmm, chunk, &pmm->root_chunks.va_block_used); +} + +void uvm_pmm_gpu_mark_root_chunk_unused(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + root_chunk_update_eviction_list(pmm, chunk, &pmm->root_chunks.va_block_unused); +} + + + + + + +static uvm_gpu_root_chunk_t *pick_root_chunk_to_evict(uvm_pmm_gpu_t *pmm) +{ + uvm_gpu_chunk_t *chunk; + + uvm_spin_lock(&pmm->list_lock); + + // Check if there are root chunks sitting in the free lists. Non-zero + // chunks are preferred. + chunk = list_first_chunk(find_free_list(pmm, + UVM_PMM_GPU_MEMORY_TYPE_USER, + UVM_CHUNK_SIZE_MAX, + UVM_PMM_LIST_NO_ZERO)); + if (chunk) + UVM_ASSERT(!chunk->is_zero); + + if (!chunk) { + chunk = list_first_chunk(find_free_list(pmm, + UVM_PMM_GPU_MEMORY_TYPE_USER, + UVM_CHUNK_SIZE_MAX, + UVM_PMM_LIST_ZERO)); + if (chunk) + UVM_ASSERT(chunk->is_zero); + } + + if (!chunk) + chunk = list_first_chunk(&pmm->root_chunks.va_block_unused); + + // TODO: Bug 1765193: Move the chunks to the tail of the used list whenever + // they get mapped. + if (!chunk) + chunk = list_first_chunk(&pmm->root_chunks.va_block_used); + + if (chunk) + chunk_start_eviction(pmm, chunk); + + uvm_spin_unlock(&pmm->list_lock); + + if (chunk) + return root_chunk_from_chunk(pmm, chunk); + return NULL; +} + +static NV_STATUS pick_and_evict_root_chunk(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_pmm_context_t pmm_context, + uvm_gpu_chunk_t **out_chunk) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NV_STATUS status; + uvm_gpu_chunk_t *chunk; + uvm_gpu_root_chunk_t *root_chunk; + + UVM_ASSERT(uvm_gpu_supports_eviction(gpu)); + + uvm_assert_mutex_locked(&pmm->lock); + + root_chunk = pick_root_chunk_to_evict(pmm); + if (!root_chunk) + return NV_ERR_NO_MEMORY; + + status = evict_root_chunk(pmm, root_chunk, pmm_context); + if (status != NV_OK) + return status; + + chunk = &root_chunk->chunk; + + if (uvm_pmm_gpu_memory_type_is_kernel(type)) { + NvU32 flags = 0; + if (pmm_context == PMM_CONTEXT_PMA_EVICTION) + flags |= UVM_PMA_CALLED_FROM_PMA_EVICTION; + + // Transitioning user memory type to kernel memory type requires pinning + // it so that PMA doesn't pick it for eviction. + status = nvUvmInterfacePmaPinPages(pmm->pma, + &chunk->address, + 1, + UVM_CHUNK_SIZE_MAX, + flags); + if (status == NV_ERR_IN_USE) { + // Pinning can fail if some of the pages have been chosen for + // eviction already. In that case free the root chunk back to PMA + // and let the caller retry. + free_root_chunk(pmm, root_chunk, free_root_chunk_mode_from_pmm_context(pmm_context)); + + return status; + } + + UVM_ASSERT_MSG(status == NV_OK, + "pmaPinPages(root_chunk=0x%llx) failed unexpectedly: %s\n", + chunk->address, + nvstatusToString(status)); + + // Unmap any indirect peer physical mappings for this chunk, since + // kernel chunks generally don't need them. + root_chunk_lock(pmm, root_chunk); + root_chunk_unmap_indirect_peers(pmm, root_chunk); + root_chunk_unlock(pmm, root_chunk); + + uvm_spin_lock(&pmm->list_lock); + chunk->type = type; + uvm_spin_unlock(&pmm->list_lock); + } + + *out_chunk = chunk; + return NV_OK; +} + +static NV_STATUS pick_and_evict_root_chunk_retry(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_pmm_context_t pmm_context, + uvm_gpu_chunk_t **out_chunk) +{ + NV_STATUS status; + + // Eviction can fail if the chunk gets selected for PMA eviction at + // the same time. Keep retrying. + do { + status = pick_and_evict_root_chunk(pmm, type, pmm_context, out_chunk); + } while (status == NV_ERR_IN_USE); + + return status; +} + +static uvm_gpu_chunk_t *find_free_chunk_locked(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_chunk_size_t chunk_size, + uvm_pmm_list_zero_t zero_type) +{ + struct list_head *free_list = find_free_list(pmm, type, chunk_size, zero_type); + uvm_gpu_chunk_t *tmp, *chunk; + + uvm_assert_spinlock_locked(&pmm->list_lock); + + list_for_each_entry_safe(chunk, tmp, free_list, list) { + if (zero_type == UVM_PMM_LIST_ZERO) + UVM_ASSERT(chunk->is_zero); + else + UVM_ASSERT(!chunk->is_zero); + + if (chunk_is_in_eviction(pmm, chunk)) { + // Remove chunks that have been picked for eviction from the free + // lists. The eviction path does it with pin_free_chunks_func(), + // but there is a window between when a root chunk is chosen for + // eviction and all of its subchunks are removed from free lists. + list_del_init(&chunk->list); + } + else { + // Bug 2085760: When NUMA GPU is enabled, also check that the root + // chunk containing the candidate free chunk doesn't have any page + // escaped to another driver. If that is the case, just skip such + // chunk hoping that the page will eventually lose the extra + // reference. + // References can only be added when a virtual mapping to the page + // exists, so once a chunk in the free list has no elevated pages + // the chunk is safe to reuse. + if (!root_chunk_has_elevated_page(pmm, root_chunk_from_chunk(pmm, chunk))) + return chunk; + } + } + + return NULL; +} + +static uvm_gpu_chunk_t *claim_free_chunk(uvm_pmm_gpu_t *pmm, uvm_pmm_gpu_memory_type_t type, uvm_chunk_size_t chunk_size) +{ + uvm_gpu_chunk_t *chunk; + + uvm_spin_lock(&pmm->list_lock); + + // Prefer zero free chunks as they are likely going to be used for a new + // allocation. + // + // TODO: Bug 2446832: Allow callers to request non-zero chunks in PMM + // allocation functions, so we don't waste zero chunks. + chunk = find_free_chunk_locked(pmm, type, chunk_size, UVM_PMM_LIST_ZERO); + + if (!chunk) + chunk = find_free_chunk_locked(pmm, type, chunk_size, UVM_PMM_LIST_NO_ZERO); + + if (!chunk) + goto out; + + UVM_ASSERT_MSG(uvm_gpu_chunk_get_size(chunk) == chunk_size, "chunk size %u expected %u\n", + uvm_gpu_chunk_get_size(chunk), chunk_size); + UVM_ASSERT(chunk->type == type); + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_FREE); + UVM_ASSERT(!chunk_is_in_eviction(pmm, chunk)); + + if (chunk->parent) { + UVM_ASSERT(chunk->parent->suballoc); + UVM_ASSERT(chunk->parent->type == type); + UVM_ASSERT(chunk->parent->suballoc->allocated < num_subchunks(chunk->parent)); + chunk->parent->suballoc->allocated++; + } + + chunk_pin(pmm, chunk); + chunk_update_lists_locked(pmm, chunk); + +out: + uvm_spin_unlock(&pmm->list_lock); + + return chunk; +} + +static NV_STATUS alloc_or_evict_root_chunk(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunk_out) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NV_STATUS status; + uvm_gpu_chunk_t *chunk; + + status = alloc_root_chunk(pmm, type, flags, &chunk); + if (status != NV_OK) { + if ((flags & UVM_PMM_ALLOC_FLAGS_EVICT) && uvm_gpu_supports_eviction(gpu)) + status = pick_and_evict_root_chunk_retry(pmm, type, PMM_CONTEXT_DEFAULT, chunk_out); + + return status; + } + + *chunk_out = chunk; + return status; +} + +// Same as alloc_or_evit_root_chunk(), but without the PMM lock held. +static NV_STATUS alloc_or_evict_root_chunk_unlocked(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunk_out) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NV_STATUS status; + uvm_gpu_chunk_t *chunk; + + status = alloc_root_chunk(pmm, type, flags, &chunk); + if (status != NV_OK) { + if ((flags & UVM_PMM_ALLOC_FLAGS_EVICT) && uvm_gpu_supports_eviction(gpu)) { + uvm_mutex_lock(&pmm->lock); + status = pick_and_evict_root_chunk_retry(pmm, type, PMM_CONTEXT_DEFAULT, chunk_out); + uvm_mutex_unlock(&pmm->lock); + } + + return status; + } + + *chunk_out = chunk; + return status; +} + +static NV_STATUS alloc_chunk_with_splits(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_chunk_size_t chunk_size, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **out_chunk) +{ + NV_STATUS status; + uvm_chunk_size_t cur_size; + uvm_gpu_chunk_t *chunk; + uvm_chunk_sizes_mask_t chunk_sizes = pmm->chunk_sizes[type]; + + uvm_assert_mutex_locked(&pmm->lock); + UVM_ASSERT(chunk_size != UVM_CHUNK_SIZE_MAX); + + // Check for a free chunk again in case a different thread freed something + // up while this thread was waiting for the PMM lock. + chunk = claim_free_chunk(pmm, type, chunk_size); + if (chunk) { + // A free chunk was claimed, return immediately. + UVM_ASSERT(check_chunk(pmm, chunk)); + + *out_chunk = chunk; + return NV_OK; + } + + cur_size = chunk_size; + + // Look for a bigger free chunk that can be split + for_each_chunk_size_from(cur_size, chunk_sizes) { + chunk = claim_free_chunk(pmm, type, cur_size); + if (chunk) + break; + } + + if (unlikely(!chunk)) { + status = alloc_or_evict_root_chunk(pmm, type, flags, &chunk); + if (status != NV_OK) + return status; + cur_size = UVM_CHUNK_SIZE_MAX; + UVM_ASSERT(uvm_gpu_chunk_get_size(chunk) == cur_size); + } + + UVM_ASSERT(chunk); + + for_each_chunk_size_rev_from(cur_size, chunk_sizes) { + NvU32 i; + uvm_gpu_chunk_t *parent; + + UVM_ASSERT(uvm_gpu_chunk_get_size(chunk) == cur_size); + UVM_ASSERT(chunk->type == type); + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + if (chunk->parent) { + UVM_ASSERT(chunk->parent->suballoc); + UVM_ASSERT(uvm_gpu_chunk_get_size(chunk->parent) == uvm_chunk_find_next_size(chunk_sizes, cur_size)); + UVM_ASSERT(chunk->parent->type == type); + UVM_ASSERT_MSG(chunk->parent->suballoc->allocated <= num_subchunks(chunk->parent), "allocated %u num %u\n", + chunk->parent->suballoc->allocated, num_subchunks(chunk->parent)); + } + + if (cur_size == chunk_size) { + *out_chunk = chunk; + return NV_OK; + } + + status = split_gpu_chunk(pmm, chunk); + if (status != NV_OK) { + free_chunk_with_merges(pmm, chunk); + return status; + } + + parent = chunk; + + // Use the first subchunk for further splitting, if needed. + chunk = parent->suballoc->subchunks[0]; + + // And add the rest to the free list + uvm_spin_lock(&pmm->list_lock); + + for (i = 1; i < num_subchunks(parent); ++i) + chunk_free_locked(pmm, parent->suballoc->subchunks[i]); + + uvm_spin_unlock(&pmm->list_lock); + } + UVM_PANIC(); +} + +// Allocates a single chunk of a given size. If needed splits a chunk of bigger size +// or, if that is not possible, allocates from PMA or evicts. +NV_STATUS alloc_chunk(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_chunk_size_t chunk_size, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **out_chunk) +{ + NV_STATUS status; + uvm_gpu_chunk_t *chunk; + + chunk = claim_free_chunk(pmm, type, chunk_size); + if (chunk) { + // A free chunk could be claimed, we are done. + *out_chunk = chunk; + return NV_OK; + } + + if (chunk_size == UVM_CHUNK_SIZE_MAX) { + // For chunks of root chunk size we won't be doing any splitting so we + // can just directly try allocating without holding the PMM lock. If + // eviction is necessary, the lock will be acquired internally. + status = alloc_or_evict_root_chunk_unlocked(pmm, type, flags, &chunk); + if (status != NV_OK) + return status; + + *out_chunk = chunk; + return NV_OK; + } + + // We didn't find a free chunk and we will require splits so acquire the PMM lock. + uvm_mutex_lock(&pmm->lock); + + status = alloc_chunk_with_splits(pmm, type, chunk_size, flags, &chunk); + + uvm_mutex_unlock(&pmm->lock); + + if (status != NV_OK) { + (void)free_next_available_root_chunk(pmm, type); + return status; + } + + *out_chunk = chunk; + + return NV_OK; +} + +// Initialize the given root chunk. If the initial state is +// UVM_PMM_GPU_CHUNK_STATE_FREE, the chunk is added to the corresponding free +// list. +// +// PMA lock must be held by the caller +static void init_root_chunk(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_gpu_root_chunk_t *root_chunk, + uvm_pmm_gpu_chunk_state_t initial_state, + bool is_zero) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + uvm_gpu_chunk_t *chunk = &root_chunk->chunk; + + uvm_assert_rwsem_locked(&pmm->pma_lock); + + root_chunk_lock(pmm, root_chunk); + + uvm_tracker_init(&root_chunk->tracker); + + uvm_spin_lock(&pmm->list_lock); + + UVM_ASSERT_MSG(chunk->state == UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED, + "Address 0x%llx state %s GPU %s\n", + chunk->address, + uvm_pmm_gpu_chunk_state_string(chunk->state), + uvm_gpu_name(gpu)); + + UVM_ASSERT(chunk->parent == NULL); + UVM_ASSERT(chunk->suballoc == NULL); + UVM_ASSERT(chunk->va_block == NULL); + UVM_ASSERT(chunk->va_block_page_index == PAGES_PER_UVM_VA_BLOCK); + UVM_ASSERT(list_empty(&chunk->list)); + UVM_ASSERT(uvm_gpu_chunk_get_size(chunk) == UVM_CHUNK_SIZE_MAX); + UVM_ASSERT(!root_chunk_has_elevated_page(pmm, root_chunk)); + + UVM_ASSERT(initial_state == UVM_PMM_GPU_CHUNK_STATE_FREE || + initial_state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + chunk->type = type; + chunk->state = initial_state; + chunk->is_zero = is_zero; + + chunk_update_lists_locked(pmm, chunk); + + uvm_spin_unlock(&pmm->list_lock); + + root_chunk_unlock(pmm, root_chunk); +} + +NV_STATUS alloc_root_chunk(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **out_chunk) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NV_STATUS status; + UvmPmaAllocationOptions options = {0}; + NvU32 num_chunks; + NvU32 i; + bool used_kmem_cache = false; + UvmGpuPointer pa; + UvmGpuPointer *pas; + + // TODO: Bug 2444368: On P9 systems, PMA scrubbing is very slow. For now, + // zero the chunk within UVM. Re-evaluate this condition once PMA scrubbing + // is improved. + // + // TODO: Bug 2446832: Most (all?) kernel chunks don't require scrubbing. + // Also, user pages that are about to be overwritten, don't need to be + // zeroed, either. Add an interface to uvm_pmm_gpu_alloc for callers to + // specify when they don't need zeroed pages. + const bool skip_pma_scrubbing = gpu->parent->numa_info.enabled; + UVM_ASSERT(uvm_pmm_gpu_memory_type_is_user(type) || uvm_pmm_gpu_memory_type_is_kernel(type)); + + options.flags = UVM_PMA_ALLOCATE_DONT_EVICT; + + if (uvm_pmm_gpu_memory_type_is_kernel(type) || !gpu_supports_pma_eviction(gpu)) + options.flags |= UVM_PMA_ALLOCATE_PINNED; + + if (skip_pma_scrubbing) + options.flags |= UVM_PMA_ALLOCATE_NO_ZERO; + + // TODO: Bug 200480500: Batching is currently disabled on P9. Re-enable + // when the performance of best-effort allocations is verified. + if (gpu->parent->numa_info.enabled) + flags |= UVM_PMM_ALLOC_FLAGS_DONT_BATCH; + + + + + + + + + if (!gpu->parent->rm_info.isSimulated && + !(options.flags & UVM_PMA_ALLOCATE_PINNED) && + !(flags & UVM_PMM_ALLOC_FLAGS_DONT_BATCH)) { + num_chunks = 1 << uvm_perf_pma_batch_nonpinned_order; + + // Allocate a batch of root chunks in order to reduce the number of + // calls to PMA. The first one is returned as allocated, the rest are + // added to the corresponding free list. + pas = kmem_cache_alloc(g_pma_address_batch_cache_ref.cache, NV_UVM_GFP_FLAGS); + if (!pas) + return NV_ERR_NO_MEMORY; + + // Make the allocation best-effort to avoid retries if the whole batch + // cannot be allocated. + options.flags |= UVM_PMA_ALLOCATE_ALLOW_PARTIAL; + + used_kmem_cache = true; + } + else { + num_chunks = 1; + + pas = &pa; + } + + // Acquire the PMA lock for read so that uvm_pmm_gpu_pma_evict_range() can + // flush out any pending allocs. + uvm_down_read(&pmm->pma_lock); + + status = nvUvmInterfacePmaAllocPages(pmm->pma, num_chunks, UVM_CHUNK_SIZE_MAX, &options, pas); + if (status != NV_OK) + goto exit_unlock; + + // Batched allocations are best-effort. Therefore, we need to adjust the + // number of allocated chunks. + if (used_kmem_cache) { + UVM_ASSERT(options.numPagesAllocated <= num_chunks); + UVM_ASSERT(options.numPagesAllocated > 0); + num_chunks = options.numPagesAllocated; + } + + for (i = 0; i < num_chunks; ++i) { + uvm_pmm_gpu_chunk_state_t initial_state; + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_address(pmm, pas[i]); + uvm_gpu_chunk_t *chunk = &root_chunk->chunk; + + if (i == 0) { + initial_state = UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED; + *out_chunk = chunk; + } + else { + initial_state = UVM_PMM_GPU_CHUNK_STATE_FREE; + } + + UVM_ASSERT_MSG(IS_ALIGNED(pas[i], UVM_CHUNK_SIZE_MAX), "Address 0x%llx\n", pas[i]); + UVM_ASSERT(chunk->address == pas[i]); + + init_root_chunk(pmm, + type, + root_chunk, + initial_state, + !!(options.resultFlags & UVM_PMA_ALLOCATE_RESULT_IS_ZERO)); + } + +exit_unlock: + uvm_up_read(&pmm->pma_lock); + + if (used_kmem_cache) + kmem_cache_free(g_pma_address_batch_cache_ref.cache, pas); + + return status; +} + +void free_root_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_root_chunk_t *root_chunk, free_root_chunk_mode_t free_mode) +{ + NV_STATUS status; + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + uvm_gpu_chunk_t *chunk = &root_chunk->chunk; + NvU32 flags = 0; + + // Acquire the PMA lock for read so that uvm_pmm_gpu_pma_evict_range() can + // flush out any pending frees. + uvm_down_read(&pmm->pma_lock); + + root_chunk_lock(pmm, root_chunk); + + root_chunk_unmap_indirect_peers(pmm, root_chunk); + + status = uvm_tracker_wait_deinit(&root_chunk->tracker); + if (status != NV_OK) { + // TODO: Bug 1766184: Handle RC/ECC. For now just go ahead and free the chunk anyway. + UVM_ASSERT(uvm_global_get_status() != NV_OK); + } + + uvm_spin_lock(&pmm->list_lock); + + UVM_ASSERT_MSG(chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED, + "Address 0x%llx state %s GPU %s\n", + chunk->address, + uvm_pmm_gpu_chunk_state_string(chunk->state), + uvm_gpu_name(gpu)); + UVM_ASSERT(list_empty(&chunk->list)); + + chunk_unpin(pmm, chunk, UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED); + + uvm_spin_unlock(&pmm->list_lock); + + root_chunk_unlock(pmm, root_chunk); + + if (free_mode == FREE_ROOT_CHUNK_MODE_SKIP_PMA_FREE) { + uvm_up_read(&pmm->pma_lock); + return; + } + + if (free_mode == FREE_ROOT_CHUNK_MODE_PMA_EVICTION) + flags |= UVM_PMA_CALLED_FROM_PMA_EVICTION; + + if (chunk->is_zero) + flags |= UVM_PMA_FREE_IS_ZERO; + + nvUvmInterfacePmaFreePages(pmm->pma, &chunk->address, 1, UVM_CHUNK_SIZE_MAX, flags); + + uvm_up_read(&pmm->pma_lock); +} + +// Splits the input chunk into subchunks of the next size down. The chunk state +// can be UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED or UVM_PMM_GPU_CHUNK_STATE_ALLOCATED. +// +// UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED: This is a split for allocation. +// +// UVM_PMM_GPU_CHUNK_STATE_ALLOCATED: This is an in-place split. The new chunks +// are also marked allocated and they inherit the reverse map from the original. +// +// The PMM lock must be held when calling this function. +NV_STATUS split_gpu_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_chunk_size_t chunk_size = uvm_gpu_chunk_get_size(chunk); + uvm_chunk_sizes_mask_t chunk_sizes = pmm->chunk_sizes[chunk->type]; + uvm_chunk_size_t subchunk_size; + size_t cache_idx, num_sub; + int i; + NV_STATUS status; + uvm_pmm_gpu_chunk_suballoc_t *suballoc; + uvm_gpu_chunk_t *subchunk; + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + + uvm_assert_mutex_locked(&pmm->lock); + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + subchunk_size = uvm_chunk_find_prev_size(chunk_sizes, chunk_size); + UVM_ASSERT(subchunk_size != UVM_CHUNK_SIZE_INVALID); + + num_sub = chunk_size / subchunk_size; + cache_idx = ilog2(num_sub); + UVM_ASSERT(chunk_split_cache[cache_idx].cache != NULL); + + suballoc = nv_kmem_cache_zalloc(chunk_split_cache[cache_idx].cache, NV_UVM_GFP_FLAGS); + if (suballoc == NULL) + return NV_ERR_NO_MEMORY; + + for (i = 0; i < num_sub; i++) { + // If requested, inject a failure on the last subchunk + if (unlikely(chunk->inject_split_error) && i == num_sub - 1) { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + + subchunk = nv_kmem_cache_zalloc(CHUNK_CACHE, NV_UVM_GFP_FLAGS); + if (!subchunk) { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + suballoc->subchunks[i] = subchunk; + + subchunk->gpu_global_index = chunk->gpu_global_index; + subchunk->address = chunk->address + i * subchunk_size; + subchunk->type = chunk->type; + uvm_gpu_chunk_set_size(subchunk, subchunk_size); + subchunk->parent = chunk; + subchunk->va_block_page_index = PAGES_PER_UVM_VA_BLOCK; + subchunk->is_zero = chunk->is_zero; + INIT_LIST_HEAD(&subchunk->list); + + // The child inherits the parent's state. + subchunk->state = chunk->state; + + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED) { + UVM_ASSERT(chunk->va_block); + uvm_assert_mutex_locked(&chunk->va_block->lock); + subchunk->va_block = chunk->va_block; + subchunk->va_block_page_index = chunk->va_block_page_index + (i * subchunk_size) / PAGE_SIZE; + } + } + + // We're splitting an allocated or pinned chunk in-place. + suballoc->allocated = num_sub; + + // Now that all of the subchunk state has been initialized, transition the + // parent into the split state under the list lock. + uvm_spin_lock(&pmm->list_lock); + + chunk->suballoc = suballoc; + + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED) { + chunk->va_block = NULL; + chunk->va_block_page_index = PAGES_PER_UVM_VA_BLOCK; + } + else if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED) { + // -1 for the parent chunk that is going to transition into the split state. + root_chunk->chunk.suballoc->pinned_leaf_chunks += num_sub - 1; + + // When a pinned root chunk gets split, the count starts at 0 not + // accounting for the root chunk itself so add the 1 back. + if (chunk_is_root_chunk(chunk)) + root_chunk->chunk.suballoc->pinned_leaf_chunks += 1; + } + + chunk->state = UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT; + + uvm_spin_unlock(&pmm->list_lock); + + return NV_OK; +cleanup: + for (i = 0; i < num_sub; i++) { + if (suballoc->subchunks[i] == NULL) + break; + kmem_cache_free(CHUNK_CACHE, suballoc->subchunks[i]); + } + kmem_cache_free(chunk_split_cache[cache_idx].cache, suballoc); + return status; +} + +// Sanity check the chunk, the chunk's tree, and any mappings to the chunk. The +// chunk must be newly-freed or newly-allocated, but its state may not reflect +// that yet. +// +// This function always returns true so it can be called from an assert macro. +static bool check_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + uvm_chunk_sizes_mask_t chunk_sizes = pmm->chunk_sizes[chunk->type]; + uvm_gpu_chunk_t *parent = chunk->parent; + uvm_chunk_size_t chunk_size = uvm_gpu_chunk_get_size(chunk); + uvm_chunk_size_t parent_size; + + UVM_ASSERT(chunk_size & chunk_sizes); + UVM_ASSERT(IS_ALIGNED(chunk->address, chunk_size)); + UVM_ASSERT(uvm_global_id_equal(uvm_global_gpu_id_from_index(chunk->gpu_global_index), gpu->global_id)); + + + + + + + + + + + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT) + UVM_ASSERT(chunk_size > uvm_chunk_find_first_size(chunk_sizes)); + + if (parent) { + UVM_ASSERT(parent->type == chunk->type); + + parent_size = uvm_gpu_chunk_get_size(parent); + UVM_ASSERT(uvm_chunk_find_next_size(chunk_sizes, chunk_size) == parent_size); + UVM_ASSERT(parent_size <= uvm_chunk_find_last_size(chunk_sizes)); + + UVM_ASSERT(parent->state == UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT); + UVM_ASSERT(parent->suballoc); + UVM_ASSERT(parent->suballoc->allocated > 0); + UVM_ASSERT(parent->suballoc->allocated <= num_subchunks(parent)); + + UVM_ASSERT(parent->address <= chunk->address); + UVM_ASSERT(chunk->address < parent->address + parent_size); + } + else { + UVM_ASSERT(chunk_size == uvm_chunk_find_last_size(chunk_sizes)); + } + + if (uvm_pmm_sysmem_mappings_indirect_supported()) { + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + uvm_gpu_id_t other_gpu_id; + + root_chunk_lock(pmm, root_chunk); + + // See root_chunk_unmap_indirect_peers for the usage of uvm_gpu_get + for_each_gpu_id_in_mask(other_gpu_id, &root_chunk->indirect_peers_mapped) { + uvm_gpu_t *other_gpu = uvm_gpu_get_by_processor_id(other_gpu_id); + NvU64 peer_addr = uvm_pmm_gpu_indirect_peer_addr(pmm, chunk, other_gpu); + uvm_reverse_map_t reverse_map; + size_t num_mappings; + + num_mappings = uvm_pmm_sysmem_mappings_dma_to_virt(&other_gpu->pmm_reverse_sysmem_mappings, + peer_addr, + uvm_gpu_chunk_get_size(chunk), + &reverse_map, + 1); + UVM_ASSERT(num_mappings == 0); + } + + root_chunk_unlock(pmm, root_chunk); + } + + return true; +} + +static bool chunk_is_last_allocated_child(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_assert_spinlock_locked(&pmm->list_lock); + + if (!chunk->parent) + return false; + + return chunk->parent->suballoc->allocated == 1; +} + +static void chunk_free_locked(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_chunk(pmm, chunk); + + uvm_assert_spinlock_locked(&pmm->list_lock); + + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + if (root_chunk->chunk.in_eviction) { + // A root chunk with pinned subchunks would never be picked for eviction + // so this one has to be in the allocated state. Pin it and let the + // evicting thread pick it up. + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED); + UVM_ASSERT(chunk->va_block != NULL); + UVM_ASSERT(chunk->va_block_page_index != PAGES_PER_UVM_VA_BLOCK); + UVM_ASSERT(list_empty(&chunk->list)); + chunk->va_block = NULL; + chunk->va_block_page_index = PAGES_PER_UVM_VA_BLOCK; + chunk->is_zero = false; + chunk_pin(pmm, chunk); + return; + } + + if (chunk->parent) { + UVM_ASSERT(chunk->parent->suballoc->allocated > 0); + --chunk->parent->suballoc->allocated; + if (chunk->parent->suballoc->allocated == 0) { + // Freeing the last subchunk should trigger a merge and the PMM + // mutex is required to perform it. + uvm_assert_mutex_locked(&pmm->lock); + } + } + + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED) { + chunk_unpin(pmm, chunk, UVM_PMM_GPU_CHUNK_STATE_FREE); + } + else { + chunk->state = UVM_PMM_GPU_CHUNK_STATE_FREE; + chunk->va_block = NULL; + } + + chunk->va_block_page_index = PAGES_PER_UVM_VA_BLOCK; + chunk->is_zero = false; + + chunk_update_lists_locked(pmm, chunk); +} + +static bool try_chunk_free(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + bool freed = false; + + uvm_spin_lock(&pmm->list_lock); + + chunk->inject_split_error = false; + + // Chunks that are the last allocated child need to trigger a merge and are + // handled by free_or_prepare_for_merge(). + if (!chunk_is_last_allocated_child(pmm, chunk)) { + chunk_free_locked(pmm, chunk); + freed = true; + } + + uvm_spin_unlock(&pmm->list_lock); + + return freed; +} + +// Return NULL if the chunk could be freed immediately. Otherwise, if the chunk +// was the last allocated child, return the parent chunk to be merged with all +// of its children taken off the free list in TEMP_PINNED state. +static uvm_gpu_chunk_t *free_or_prepare_for_merge(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_chunk_t *parent = NULL; + NvU32 i; + + uvm_assert_mutex_locked(&pmm->lock); + + if (!chunk->parent) { + bool freed = try_chunk_free(pmm, chunk); + + // Freeing a root chunk should never fail + UVM_ASSERT(freed); + + return NULL; + } + + uvm_spin_lock(&pmm->list_lock); + + if (chunk_is_last_allocated_child(pmm, chunk)) + parent = chunk->parent; + + chunk_free_locked(pmm, chunk); + + if (parent == NULL) { + UVM_ASSERT(chunk->parent->suballoc->allocated != 0); + goto done; + } + + UVM_ASSERT(chunk->parent->suballoc->allocated == 0); + + // Pin all the subchunks to prepare them for being merged. + for (i = 0; i < num_subchunks(chunk->parent); ++i) { + uvm_gpu_chunk_t *subchunk = chunk->parent->suballoc->subchunks[i]; + + UVM_ASSERT(subchunk->state == UVM_PMM_GPU_CHUNK_STATE_FREE); + + list_del_init(&subchunk->list); + subchunk->state = UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED; + } + root_chunk_from_chunk(pmm, chunk)->chunk.suballoc->pinned_leaf_chunks += num_subchunks(chunk->parent); + + chunk->parent->suballoc->allocated = num_subchunks(chunk->parent); + parent = chunk->parent; + +done: + uvm_spin_unlock(&pmm->list_lock); + + return parent; +} + +static void free_chunk_with_merges(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + uvm_assert_mutex_locked(&pmm->lock); + + while (1) { + // When called from the free_chunk path this check_chunk is redundant, + // but we have some PMM-internal direct calls of this function. + UVM_ASSERT(check_chunk(pmm, chunk)); + + chunk = free_or_prepare_for_merge(pmm, chunk); + if (!chunk) + break; + + merge_gpu_chunk(pmm, chunk); + } +} + +// Mark the chunk as free and put it on the free list. If this is a suballocated +// chunk and the parent has no more allocated chunks, the parent is freed and so +// on up the tree. +static void free_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + bool try_free = true; + const bool is_root = chunk_is_root_chunk(chunk); + const uvm_pmm_gpu_memory_type_t type = chunk->type; + + UVM_ASSERT(chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED || + chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + + UVM_ASSERT(check_chunk(pmm, chunk)); + + if (try_chunk_free(pmm, chunk)) { + try_free = is_root; + } + else { + // Freeing a chunk can only fail if it requires merging. Take the PMM lock + // and free it with merges supported. + uvm_mutex_lock(&pmm->lock); + free_chunk_with_merges(pmm, chunk); + uvm_mutex_unlock(&pmm->lock); + } + + // Once try_chunk_free succeeds or free_chunk_with_merges returns, it's no + // longer safe to access chunk in general. All you know is that the + // chunk you freed was put on the free list by the call. Since the spin lock + // has been dropped, any other thread could have come in and allocated the + // chunk in the meantime. Therefore, this next step just looks for a + // root chunk to free, without assuming that one is actually there. + + if (try_free) + (void)free_next_available_root_chunk(pmm, type); +} + +// Finds and frees the next root chunk of the given type (if any) that can be +// freed. Returns true if a root chunk was freed, or false otherwise. +bool free_next_available_root_chunk(uvm_pmm_gpu_t *pmm, uvm_pmm_gpu_memory_type_t type) +{ + uvm_gpu_chunk_t *result; + + UVM_ASSERT(uvm_chunk_find_last_size(pmm->chunk_sizes[type]) == UVM_CHUNK_SIZE_MAX); + + uvm_spin_lock(&pmm->list_lock); + + // Prefer non-zero free chunk as memory is about to be released to PMA + result = list_first_chunk(find_free_list(pmm, type, UVM_CHUNK_SIZE_MAX, UVM_PMM_LIST_NO_ZERO)); + if (result) + UVM_ASSERT(!result->is_zero); + + if (!result) { + result = list_first_chunk(find_free_list(pmm, type, UVM_CHUNK_SIZE_MAX, UVM_PMM_LIST_ZERO)); + if (result) + UVM_ASSERT(result->is_zero); + } + + if (result != NULL) { + list_del_init(&result->list); + UVM_ASSERT(result->state == UVM_PMM_GPU_CHUNK_STATE_FREE); + UVM_ASSERT(uvm_gpu_chunk_get_size(result) == UVM_CHUNK_SIZE_MAX); + UVM_ASSERT(result->type == type); + + // The chunk has been freed and removed from the free list so it + // can't get allocated again, but it could be targeted for eviction + // by physical address. Pin it temporarily to protect the chunk from + // eviction between dropping the list lock and taking the root chunk + // lock. + chunk_pin(pmm, result); + } + + uvm_spin_unlock(&pmm->list_lock); + + if (result != NULL) { + free_root_chunk(pmm, root_chunk_from_chunk(pmm, result), FREE_ROOT_CHUNK_MODE_DEFAULT); + return true; + } + + return false; +} + +// Get free list for the given chunk size and type +struct list_head *find_free_list(uvm_pmm_gpu_t *pmm, + uvm_pmm_gpu_memory_type_t type, + uvm_chunk_size_t chunk_size, + uvm_pmm_list_zero_t zero_type) +{ + uvm_chunk_sizes_mask_t chunk_sizes = pmm->chunk_sizes[type]; + size_t idx = hweight_long(chunk_sizes & (chunk_size - 1)); + UVM_ASSERT(is_power_of_2(chunk_size)); + UVM_ASSERT_MSG(chunk_size & chunk_sizes, "chunk size 0x%x chunk sizes 0x%x\n", chunk_size, chunk_sizes); + return &pmm->free_list[type][idx][zero_type]; +} + +struct list_head *find_free_list_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk) +{ + return find_free_list(pmm, + chunk->type, + uvm_gpu_chunk_get_size(chunk), + chunk->is_zero? UVM_PMM_LIST_ZERO : UVM_PMM_LIST_NO_ZERO); +} + +static bool uvm_pmm_should_inject_pma_eviction_error(uvm_pmm_gpu_t *pmm) +{ + uvm_assert_mutex_locked(&pmm->lock); + + if (unlikely(pmm->inject_pma_evict_error_after_num_chunks > 0)) + return --pmm->inject_pma_evict_error_after_num_chunks == 0; + + return false; +} + + + + + + + + +// See the documentation of pmaEvictPagesCb_t in pma.h for details of the +// expected semantics. +static NV_STATUS uvm_pmm_gpu_pma_evict_pages(void *void_pmm, + NvU32 page_size, + NvU64 *pages, + NvU32 num_pages_to_evict, + NvU64 phys_start, + NvU64 phys_end) +{ + NV_STATUS status; + uvm_pmm_gpu_t *pmm = (uvm_pmm_gpu_t *)void_pmm; + uvm_gpu_chunk_t *chunk; + NvU64 num_pages_evicted_so_far = 0; + NvU64 num_pages_left_to_evict = num_pages_to_evict; + const NvU64 pages_per_chunk = UVM_CHUNK_SIZE_MAX / page_size; + bool all_pages_are_zero = true; + + UVM_ASSERT(IS_ALIGNED(UVM_CHUNK_SIZE_MAX, page_size)); + UVM_ASSERT(UVM_CHUNK_SIZE_MAX >= page_size); + + while (num_pages_left_to_evict > 0) { + uvm_gpu_root_chunk_t *root_chunk; + uvm_page_index_t page_index; + NvU64 pages_this_time = min(pages_per_chunk, num_pages_left_to_evict); + + uvm_mutex_lock(&pmm->lock); + + if (uvm_pmm_should_inject_pma_eviction_error(pmm)) { + status = NV_ERR_NO_MEMORY; + } + else { + status = pick_and_evict_root_chunk_retry(pmm, + UVM_PMM_GPU_MEMORY_TYPE_KERNEL, + PMM_CONTEXT_PMA_EVICTION, + &chunk); + } + uvm_mutex_unlock(&pmm->lock); + + // TODO: Bug 1795559: Consider waiting for any pinned user allocations + // to be unpinned. + if (status != NV_OK) + goto error; + + root_chunk = root_chunk_from_chunk(pmm, chunk); + + if (chunk->address < phys_start || chunk->address + UVM_CHUNK_SIZE_MAX > phys_end) { + // If the chunk we get is outside of the physical range requested, + // just give up and return an error. + // + // TODO: Bug 1795559: PMA pre-populates the array of pages with a + // list of candidates that were unpinned before triggering eviction. + // If they were marked for eviction, we could fall back to evicting + // those instead and be sure that it succeeds. + free_root_chunk(pmm, root_chunk, FREE_ROOT_CHUNK_MODE_PMA_EVICTION); + status = NV_ERR_NO_MEMORY; + goto error; + } + + all_pages_are_zero = all_pages_are_zero && chunk->is_zero; + + // Free the root chunk as far as PMM's state is concerned, but skip the + // free back to PMA as that would make it available for other PMA + // allocations. + free_root_chunk(pmm, root_chunk, FREE_ROOT_CHUNK_MODE_SKIP_PMA_FREE); + + for (page_index = 0; page_index < pages_this_time; page_index++) + pages[num_pages_evicted_so_far++] = chunk->address + page_index * page_size; + + num_pages_left_to_evict -= pages_this_time; + + // If we didn't use a whole root chunk, free its tail back to PMA + // directly. + if (pages_this_time != pages_per_chunk) { + NvU64 address = chunk->address + pages_this_time * page_size; + NvU64 num_pages = pages_per_chunk - pages_this_time; + NvU32 free_flags = UVM_PMA_CALLED_FROM_PMA_EVICTION | UVM_PMA_ALLOCATE_CONTIGUOUS; + + if (chunk->is_zero) + free_flags |= UVM_PMA_FREE_IS_ZERO; + + // Free the whole tail as a contiguous allocation + nvUvmInterfacePmaFreePages(pmm->pma, &address, num_pages, page_size, free_flags); + } + } + + return NV_OK; + +error: + // On error, free all of the evicted pages back to PMA directly. + if (num_pages_evicted_so_far > 0) { + NvU32 free_flags = UVM_PMA_CALLED_FROM_PMA_EVICTION; + + if (all_pages_are_zero) + free_flags |= UVM_PMA_FREE_IS_ZERO; + + nvUvmInterfacePmaFreePages(pmm->pma, pages, num_pages_evicted_so_far, page_size, free_flags); + } + + return status; +} + +static NV_STATUS uvm_pmm_gpu_pma_evict_pages_wrapper(void *void_pmm, + NvU32 page_size, + NvU64 *pages, + NvU32 num_pages_to_evict, + NvU64 phys_start, + NvU64 phys_end) +{ + NV_STATUS status; + + // RM invokes the eviction callbacks with its API lock held, but not its GPU + // lock. + uvm_record_lock_rm_api(); + status = uvm_pmm_gpu_pma_evict_pages(void_pmm, page_size, pages, num_pages_to_evict, phys_start, phys_end); + uvm_record_unlock_rm_api(); + return status; +} + +static NV_STATUS uvm_pmm_gpu_pma_evict_pages_wrapper_entry(void *void_pmm, + NvU32 page_size, + NvU64 *pages, + NvU32 num_pages_to_evict, + NvU64 phys_start, + NvU64 phys_end) +{ + UVM_ENTRY_RET(uvm_pmm_gpu_pma_evict_pages_wrapper(void_pmm, + page_size, + pages, + num_pages_to_evict, + phys_start, + phys_end)); +} + +// See the documentation of pmaEvictRangeCb_t in pma.h for details of the +// expected semantics. +static NV_STATUS uvm_pmm_gpu_pma_evict_range(void *void_pmm, NvU64 phys_begin, NvU64 phys_end) +{ + NV_STATUS status; + uvm_pmm_gpu_t *pmm = (uvm_pmm_gpu_t *)void_pmm; + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NvU64 address = UVM_ALIGN_DOWN(phys_begin, UVM_CHUNK_SIZE_MAX); + + UVM_ASSERT_MSG(phys_begin <= phys_end, "range [0x%llx, 0x%llx]\n", phys_begin, phys_end); + UVM_ASSERT_MSG(phys_end <= gpu->mem_info.max_allocatable_address, + "range [0x%llx, 0x%llx]\n", + phys_begin, + phys_end); + + // Make sure that all pending allocations, that could have started before + // the eviction callback was called, are done. This is required to guarantee + // that any address that, PMA thinks, is owned by UVM has been indeed recorded + // in PMM's state. Taking the pma_lock in write mode will make sure all + // readers (pending allocations and frees) are done, but will also + // unnecessarily stop new allocations from starting until it's released. + // TODO: Bug 1795559: SRCU would likely be better for this type of + // synchronization, but that's GPL. Figure out whether we can do anything + // better easily. + uvm_down_write(&pmm->pma_lock); + uvm_up_write(&pmm->pma_lock); + + for (; address <= phys_end; address += UVM_CHUNK_SIZE_MAX) { + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_address(pmm, address); + uvm_gpu_chunk_t *chunk = &root_chunk->chunk; + bool eviction_started = false; + uvm_spin_loop_t spin; + bool should_inject_error; + + uvm_spin_loop_init(&spin); + + // Wait until we can start eviction or the chunk is returned to PMA + do { + uvm_spin_lock(&pmm->list_lock); + + if (chunk->state != UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED) { + UVM_ASSERT(uvm_pmm_gpu_memory_type_is_user(chunk->type)); + + if (chunk_is_evictable(pmm, chunk)) { + chunk_start_eviction(pmm, chunk); + eviction_started = true; + } + } + + uvm_spin_unlock(&pmm->list_lock); + + // TODO: Bug 1795559: Replace this with a wait queue. + if (UVM_SPIN_LOOP(&spin) == NV_ERR_TIMEOUT_RETRY) { + UVM_ERR_PRINT("Stuck waiting for root chunk 0x%llx to be unpinned, giving up\n", chunk->address); + return NV_ERR_NO_MEMORY; + } + } while (!eviction_started && chunk->state != UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED); + + // The eviction callback gets called with a physical range that might be + // only partially allocated by UVM. Skip the chunks that UVM doesn't own. + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED) + continue; + + uvm_mutex_lock(&pmm->lock); + + status = evict_root_chunk(pmm, root_chunk, PMM_CONTEXT_PMA_EVICTION); + should_inject_error = uvm_pmm_should_inject_pma_eviction_error(pmm); + + uvm_mutex_unlock(&pmm->lock); + + if (status != NV_OK) + return status; + + free_root_chunk(pmm, root_chunk, FREE_ROOT_CHUNK_MODE_PMA_EVICTION); + + if (should_inject_error) + return NV_ERR_NO_MEMORY; + } + + // Make sure that all pending frees for chunks that the eviction above could + // have observed as PMA owned are done. This is required to guarantee that + // any address that, PMM thinks, is owned by PMA, has been actually freed + // back to PMA. Taking the pma_lock in write mode will make sure all + // readers (pending frees) are done, but will also unnecessarily stop new + // allocations and frees from starting until it's released. + uvm_down_write(&pmm->pma_lock); + uvm_up_write(&pmm->pma_lock); + + return NV_OK; +} + +static NV_STATUS uvm_pmm_gpu_pma_evict_range_wrapper(void *void_pmm, NvU64 phys_begin, NvU64 phys_end) +{ + NV_STATUS status; + + // RM invokes the eviction callbacks with its API lock held, but not its GPU + // lock. + uvm_record_lock_rm_api(); + status = uvm_pmm_gpu_pma_evict_range(void_pmm, phys_begin, phys_end); + uvm_record_unlock_rm_api(); + return status; +} + +static NV_STATUS uvm_pmm_gpu_pma_evict_range_wrapper_entry(void *void_pmm, NvU64 phys_begin, NvU64 phys_end) +{ + UVM_ENTRY_RET(uvm_pmm_gpu_pma_evict_range_wrapper(void_pmm, phys_begin, phys_end)); +} + +static void deinit_chunk_split_cache(uvm_pmm_gpu_t *pmm) +{ + unsigned long subchunk_count_log2; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + for_each_set_bit(subchunk_count_log2, pmm->chunk_split_cache_initialized, UVM_PMM_CHUNK_SPLIT_CACHE_SIZES) { + UVM_ASSERT(chunk_split_cache[subchunk_count_log2].refcount > 0); + UVM_ASSERT(chunk_split_cache[subchunk_count_log2].cache); + + if (--chunk_split_cache[subchunk_count_log2].refcount == 0) + kmem_cache_destroy_safe(&chunk_split_cache[subchunk_count_log2].cache); + + __clear_bit(subchunk_count_log2, pmm->chunk_split_cache_initialized); + } +} + +static NV_STATUS init_chunk_split_cache_level(uvm_pmm_gpu_t *pmm, size_t level) +{ + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + if (!test_bit(level, pmm->chunk_split_cache_initialized)) { + if (!chunk_split_cache[level].cache) { + size_t size; + size_t align; + if (level == 0) { + strncpy(chunk_split_cache[level].name, "uvm_gpu_chunk_t", sizeof(chunk_split_cache[level].name) - 1); + size = sizeof(uvm_gpu_chunk_t); + align = __alignof__(uvm_gpu_chunk_t); + } else { + snprintf(chunk_split_cache[level].name, + sizeof(chunk_split_cache[level].name), + "uvm_gpu_chunk_%u", (unsigned)level); + size = sizeof(uvm_pmm_gpu_chunk_suballoc_t) + (sizeof(uvm_gpu_chunk_t *) << level); + align = __alignof__(uvm_pmm_gpu_chunk_suballoc_t); + } + chunk_split_cache[level].cache = + nv_kmem_cache_create(chunk_split_cache[level].name, size, align); + + + if (!chunk_split_cache[level].cache) + return NV_ERR_NO_MEMORY; + + UVM_ASSERT(chunk_split_cache[level].refcount == 0); + } else { + UVM_ASSERT(chunk_split_cache[level].refcount > 0); + } + + ++chunk_split_cache[level].refcount; + UVM_ASSERT_MSG(chunk_split_cache[level].refcount != 0, "Overflow of refcount\n"); + + __set_bit(level, pmm->chunk_split_cache_initialized); + } + + return NV_OK; +} + +// Initializes the split cache for given GPU. +// +// It walks through all memory splits - in other words all ratios of neighboring +// pairs of sizes - and allocates kmem cache for them, unless they are already +// allocated. +// +// It also bumps the refcount if this GPU did not use such split yet. +static NV_STATUS init_chunk_split_cache(uvm_pmm_gpu_t *pmm) +{ + NV_STATUS status; + uvm_pmm_gpu_memory_type_t type; + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + for (type = 0; type < UVM_PMM_GPU_MEMORY_TYPE_COUNT; type++) { + uvm_chunk_size_t prev_size, cur_size; + uvm_chunk_sizes_mask_t chunk_sizes = pmm->chunk_sizes[type]; + // Iterate over each pair of neighboring sizes. Note that same level + // may be visited multiple times and it is handled internally by + // init_chunk_split_cache_level + prev_size = uvm_chunk_find_first_size(chunk_sizes); + cur_size = uvm_chunk_find_next_size(chunk_sizes, prev_size); + for_each_chunk_size_from(cur_size, chunk_sizes) { + size_t subchunk_count = cur_size / prev_size; + size_t level = ilog2(subchunk_count); + status = init_chunk_split_cache_level(pmm, level); + if (status != NV_OK) + return status; + + prev_size = cur_size; + } + } + + return init_chunk_split_cache_level(pmm, 0); +} + +static NV_STATUS init_pma_address_batch_cache(uvm_pmm_gpu_t *pmm) +{ + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + if (!g_pma_address_batch_cache_ref.cache) { + const size_t address_batch_size = sizeof(UvmGpuPointer) << uvm_perf_pma_batch_nonpinned_order; + + snprintf(g_pma_address_batch_cache_ref.name, + sizeof(g_pma_address_batch_cache_ref.name), + "pma_address_batch"); + g_pma_address_batch_cache_ref.cache = + nv_kmem_cache_create(g_pma_address_batch_cache_ref.name, + address_batch_size, __alignof__(UvmGpuPointer)); + + if (!g_pma_address_batch_cache_ref.cache) + return NV_ERR_NO_MEMORY; + + UVM_ASSERT(g_pma_address_batch_cache_ref.refcount == 0); + } + else { + UVM_ASSERT(g_pma_address_batch_cache_ref.refcount > 0); + } + + pmm->pma_address_cache_initialized = true; + + ++g_pma_address_batch_cache_ref.refcount; + UVM_ASSERT_MSG(g_pma_address_batch_cache_ref.refcount != 0, "Overflow of refcount\n"); + + return NV_OK; +} + +static void deinit_pma_address_batch_cache(uvm_pmm_gpu_t *pmm) +{ + if (pmm->pma_address_cache_initialized) { + UVM_ASSERT(g_pma_address_batch_cache_ref.refcount > 0); + UVM_ASSERT(g_pma_address_batch_cache_ref.cache); + + if (--g_pma_address_batch_cache_ref.refcount == 0) + kmem_cache_destroy_safe(&g_pma_address_batch_cache_ref.cache); + + pmm->pma_address_cache_initialized = false; + } +} + +static void deinit_caches(uvm_pmm_gpu_t *pmm) +{ + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + deinit_pma_address_batch_cache(pmm); + deinit_chunk_split_cache(pmm); +} + +static NV_STATUS init_caches(uvm_pmm_gpu_t *pmm) +{ + NV_STATUS status; + + status = init_pma_address_batch_cache(pmm); + if (status != NV_OK) + goto cleanup; + + status = init_chunk_split_cache(pmm); + if (status != NV_OK) + goto cleanup; + + return NV_OK; + +cleanup: + deinit_caches(pmm); + + return status; +} + +typedef struct +{ + // Start/end of the physical region to be traversed (IN) + NvU64 phys_start; + NvU64 phys_end; + + // Pointer to the array of mappins where to store results (OUT) + uvm_reverse_map_t *mappings; + + // Number of entries written to mappings (OUT) + NvU32 num_mappings; +} get_chunk_mappings_data_t; + +// Chunk traversal function used for phys-to-virt translation. These are the +// possible return values. +// +// - NV_ERR_OUT_OF_RANGE: no allocated physical chunks were found +// - NV_ERR_MORE_DATA_AVAILABLE: allocated physical chunks were found +// - NV_OK: allocated physical chunks may have been found. Check num_mappings +static NV_STATUS get_chunk_mappings_in_range(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, void *data) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + get_chunk_mappings_data_t *get_chunk_mappings_data = (get_chunk_mappings_data_t *)data; + NvU64 chunk_end = chunk->address + uvm_gpu_chunk_get_size(chunk) - 1; + + uvm_assert_mutex_locked(&pmm->lock); + + // Kernel chunks do not have assigned VA blocks so we can just skip them + if (uvm_pmm_gpu_memory_type_is_kernel(chunk->type)) + return NV_WARN_NOTHING_TO_DO; + + // This chunk is located before the requested physical range. Skip its + // children and keep going + if (chunk_end < get_chunk_mappings_data->phys_start) + return NV_WARN_NOTHING_TO_DO; + + // We are beyond the search phys range. Stop traversing. + if (chunk->address > get_chunk_mappings_data->phys_end) { + if (get_chunk_mappings_data->num_mappings > 0) + return NV_ERR_MORE_DATA_AVAILABLE; + else + return NV_ERR_OUT_OF_RANGE; + } + + uvm_spin_lock(&pmm->list_lock); + + // Return results for allocated leaf chunks, only + if (chunk->state == UVM_PMM_GPU_CHUNK_STATE_ALLOCATED) { + uvm_reverse_map_t *reverse_map; + + UVM_ASSERT(chunk->va_block); + uvm_va_block_retain(chunk->va_block); + + reverse_map = &get_chunk_mappings_data->mappings[get_chunk_mappings_data->num_mappings]; + + reverse_map->va_block = chunk->va_block; + reverse_map->region = uvm_va_block_region(chunk->va_block_page_index, + chunk->va_block_page_index + uvm_gpu_chunk_get_size(chunk) / PAGE_SIZE); + reverse_map->owner = gpu->id; + + // If we land in the middle of a chunk, adjust the offset + if (get_chunk_mappings_data->phys_start > chunk->address) { + NvU64 offset = get_chunk_mappings_data->phys_start - chunk->address; + + reverse_map->region.first += offset / PAGE_SIZE; + } + + // If the physical range doesn't cover the whole chunk, adjust num_pages + if (get_chunk_mappings_data->phys_end < chunk_end) + reverse_map->region.outer -= (chunk_end - get_chunk_mappings_data->phys_end) / PAGE_SIZE; + + ++get_chunk_mappings_data->num_mappings; + } + + uvm_spin_unlock(&pmm->list_lock); + + return NV_OK; +} + +NvU32 uvm_pmm_gpu_phys_to_virt(uvm_pmm_gpu_t *pmm, NvU64 phys_addr, NvU64 region_size, uvm_reverse_map_t *out_mappings) +{ + NvU64 chunk_base_addr = UVM_ALIGN_DOWN(phys_addr, UVM_CHUNK_SIZE_MAX); + NvU64 size_in_chunk = min(UVM_CHUNK_SIZE_MAX - (phys_addr - chunk_base_addr), region_size); + NvU32 num_mappings = 0; + + UVM_ASSERT(PAGE_ALIGNED(phys_addr)); + UVM_ASSERT(PAGE_ALIGNED(region_size)); + + uvm_mutex_lock(&pmm->lock); + + // Traverse the whole requested region + do { + NV_STATUS status = NV_OK; + uvm_gpu_root_chunk_t *root_chunk = root_chunk_from_address(pmm, phys_addr); + uvm_gpu_chunk_t *chunk = &root_chunk->chunk; + get_chunk_mappings_data_t get_chunk_mappings_data; + + get_chunk_mappings_data.phys_start = phys_addr; + get_chunk_mappings_data.phys_end = phys_addr + size_in_chunk - 1; + get_chunk_mappings_data.mappings = out_mappings + num_mappings; + get_chunk_mappings_data.num_mappings = 0; + + // Walk the chunks for the current root chunk + status = chunk_walk_pre_order(pmm, + chunk, + get_chunk_mappings_in_range, + &get_chunk_mappings_data); + if (status == NV_ERR_OUT_OF_RANGE) + break; + + if (get_chunk_mappings_data.num_mappings > 0) { + UVM_ASSERT(status == NV_OK || status == NV_ERR_MORE_DATA_AVAILABLE); + num_mappings += get_chunk_mappings_data.num_mappings; + } + else { + UVM_ASSERT(status == NV_OK); + } + + region_size -= size_in_chunk; + phys_addr += size_in_chunk; + size_in_chunk = min((NvU64)UVM_CHUNK_SIZE_MAX, region_size); + } while (region_size > 0); + + uvm_mutex_unlock(&pmm->lock); + + return num_mappings; +} + +NV_STATUS uvm_pmm_gpu_init(uvm_pmm_gpu_t *pmm) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + const uvm_chunk_sizes_mask_t chunk_size_init[][UVM_PMM_GPU_MEMORY_TYPE_COUNT] = + { + + + + + + + + { gpu->parent->mmu_user_chunk_sizes, gpu->parent->mmu_kernel_chunk_sizes }, + { 0, uvm_mem_kernel_chunk_sizes(gpu)}, + + }; + NV_STATUS status = NV_OK; + size_t i, j, k; + + // UVM_CHUNK_SIZE_INVALID is UVM_CHUNK_SIZE_MAX shifted left by 1. This protects + // UVM_CHUNK_SIZE_INVALID from being negative + BUILD_BUG_ON(UVM_CHUNK_SIZE_MAX >= UVM_CHUNK_SIZE_INVALID); + + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + for (i = 0; i < ARRAY_SIZE(pmm->free_list); i++) { + for (j = 0; j < ARRAY_SIZE(pmm->free_list[i]); j++) { + for (k = 0; k < ARRAY_SIZE(pmm->free_list[i][j]); k++) + INIT_LIST_HEAD(&pmm->free_list[i][j][k]); + } + } + INIT_LIST_HEAD(&pmm->root_chunks.va_block_used); + INIT_LIST_HEAD(&pmm->root_chunks.va_block_unused); + + uvm_mutex_init(&pmm->lock, UVM_LOCK_ORDER_PMM); + uvm_init_rwsem(&pmm->pma_lock, UVM_LOCK_ORDER_PMM_PMA); + uvm_spin_lock_init(&pmm->list_lock, UVM_LOCK_ORDER_LEAF); + + pmm->initialized = true; + + for (i = 0; i < UVM_PMM_GPU_MEMORY_TYPE_COUNT; i++) { + pmm->chunk_sizes[i] = 0; + // Add the common root chunk size to all memory types + pmm->chunk_sizes[i] |= UVM_CHUNK_SIZE_MAX; + for (j = 0; j < ARRAY_SIZE(chunk_size_init); j++) + pmm->chunk_sizes[i] |= chunk_size_init[j][i]; + + UVM_ASSERT(pmm->chunk_sizes[i] < UVM_CHUNK_SIZE_INVALID); + UVM_ASSERT_MSG(hweight_long(pmm->chunk_sizes[i]) <= UVM_MAX_CHUNK_SIZES, + "chunk sizes %lu, max chunk sizes %u\n", hweight_long(pmm->chunk_sizes[i]), UVM_MAX_CHUNK_SIZES); + } + + status = init_caches(pmm); + if (status != NV_OK) + goto cleanup; + + // Assert that max physical address of the GPU is not unreasonably big for + // creating the flat array of root chunks. Currently the worst case is a + // Maxwell GPU that has 0.5 GB of its physical memory mapped at the 64GB + // physical address. 256GB should provide reasonable amount of + // future-proofing and results in 128K chunks which is still manageable. + UVM_ASSERT_MSG(gpu->mem_info.max_allocatable_address < 256ull * 1024 * 1024 * 1024, + "Max physical address over 256GB: %llu\n", + gpu->mem_info.max_allocatable_address); + + // Align up the size to have a root chunk for the last part of the FB. PMM + // won't be able to allocate it, if it doesn't fit a whole root chunk, but + // it's convenient to have it for uvm_test_pma_alloc_free(). + pmm->root_chunks.count = UVM_ALIGN_UP(gpu->mem_info.max_allocatable_address, UVM_CHUNK_SIZE_MAX) / + UVM_CHUNK_SIZE_MAX; + pmm->root_chunks.array = uvm_kvmalloc_zero(sizeof(*pmm->root_chunks.array) * pmm->root_chunks.count); + if (!pmm->root_chunks.array) { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + + // Initialize all root chunks to be PMA owned and set their addresses + for (i = 0; i < pmm->root_chunks.count; ++i) { + uvm_gpu_chunk_t *chunk = &pmm->root_chunks.array[i].chunk; + + INIT_LIST_HEAD(&chunk->list); + chunk->gpu_global_index = uvm_global_id_gpu_index(gpu->global_id); + chunk->state = UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED; + uvm_gpu_chunk_set_size(chunk, UVM_CHUNK_SIZE_MAX); + chunk->address = i * UVM_CHUNK_SIZE_MAX; + chunk->va_block_page_index = PAGES_PER_UVM_VA_BLOCK; + } + + status = uvm_bit_locks_init(&pmm->root_chunks.bitlocks, pmm->root_chunks.count, UVM_LOCK_ORDER_PMM_ROOT_CHUNK); + if (status != NV_OK) + goto cleanup; + + if (gpu->mem_info.size != 0) { + status = uvm_rm_locked_call(nvUvmInterfaceGetPmaObject(uvm_gpu_device_handle(gpu), &pmm->pma, &pmm->pma_stats)); + + if (status != NV_OK) + goto cleanup; + + if (gpu_supports_pma_eviction(gpu)) { + status = nvUvmInterfacePmaRegisterEvictionCallbacks(pmm->pma, + uvm_pmm_gpu_pma_evict_pages_wrapper_entry, + uvm_pmm_gpu_pma_evict_range_wrapper_entry, + pmm); + if (status != NV_OK) + goto cleanup; + } + } + + return NV_OK; +cleanup: + uvm_pmm_gpu_deinit(pmm); + return status; +} + +// Return to PMA any remaining free root chunks. Currently only USER +// (non-pinned) chunks are pre-allocated, so the KERNEL free list should be +// empty at this point. However, we may want to batch the allocation of pinned +// pages in the future, too. +static void release_free_root_chunks(uvm_pmm_gpu_t *pmm) +{ + uvm_pmm_gpu_memory_type_t type; + + for (type = 0; type < UVM_PMM_GPU_MEMORY_TYPE_COUNT; ++type) { + uvm_pmm_list_zero_t zero_type; + + while (free_next_available_root_chunk(pmm, type)) + ; + + for (zero_type = 0; zero_type < UVM_PMM_LIST_ZERO_COUNT; ++zero_type) + UVM_ASSERT(list_empty(find_free_list(pmm, type, UVM_CHUNK_SIZE_MAX, zero_type))); + } +} + +void uvm_pmm_gpu_deinit(uvm_pmm_gpu_t *pmm) +{ + uvm_gpu_t *gpu; + size_t i, j, k; + + if (!pmm->initialized) + return; + + release_free_root_chunks(pmm); + + gpu = uvm_pmm_to_gpu(pmm); + if (gpu->mem_info.size != 0 && gpu_supports_pma_eviction(gpu)) + nvUvmInterfacePmaUnregisterEvictionCallbacks(pmm->pma); + + // TODO: Bug 1766184: Handle ECC/RC + for (i = 0; i < ARRAY_SIZE(pmm->free_list); i++) { + for (j = 0; j < ARRAY_SIZE(pmm->free_list[i]); j++) { + for (k = 0; k < ARRAY_SIZE(pmm->free_list[i][j]); ++k) { + UVM_ASSERT_MSG(list_empty(&pmm->free_list[i][j][k]), "i: %s, j: %zu, k: %zu\n", + uvm_pmm_gpu_memory_type_string(i), j, k); + } + } + } + + uvm_bit_locks_deinit(&pmm->root_chunks.bitlocks); + + for (i = 0; i < ARRAY_SIZE(pmm->root_chunks.indirect_peer); i++) { + UVM_ASSERT(pmm->root_chunks.indirect_peer[i].dma_addrs == NULL); + UVM_ASSERT(atomic64_read(&pmm->root_chunks.indirect_peer[i].map_count) == 0); + } + + if (pmm->root_chunks.array) { + // Make sure that all chunks have been returned to PMA + for (i = 0; i < pmm->root_chunks.count; ++i) { + uvm_gpu_chunk_t *chunk = &pmm->root_chunks.array[i].chunk; + UVM_ASSERT_MSG(chunk->state == UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED, + "index %zu state %s GPU %s\n", + i, + uvm_pmm_gpu_chunk_state_string(chunk->state), + uvm_gpu_name(gpu)); + } + } + uvm_kvfree(pmm->root_chunks.array); + + deinit_caches(pmm); + + pmm->initialized = false; +} + +NV_STATUS uvm_test_evict_chunk(UVM_TEST_EVICT_CHUNK_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_block_t *block = NULL; + uvm_gpu_root_chunk_t *root_chunk = NULL; + uvm_pmm_gpu_t *pmm; + struct mm_struct *mm; + + params->chunk_was_evicted = NV_FALSE; + params->evicted_physical_address = 0; + params->chunk_size_backing_virtual = 0; + + mm = uvm_va_space_mm_retain_lock(va_space); + uvm_va_space_down_read(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu || !uvm_gpu_supports_eviction(gpu)) { + uvm_va_space_up_read(va_space); + return NV_ERR_INVALID_DEVICE; + } + pmm = &gpu->pmm; + + // Retain the GPU before unlocking the VA space so that it sticks around. + uvm_gpu_retain(gpu); + + // For virtual mode, look up and retain the block first so that eviction can + // be started without the VA space lock held. + if (params->eviction_mode == UvmTestEvictModeVirtual) { + status = uvm_va_block_find_create(va_space, mm, params->address, NULL, &block); + if (status != NV_OK) { + uvm_va_space_up_read(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + goto out; + } + + // Retain the block before unlocking the VA space lock so that we can + // safely access it later. + uvm_va_block_retain(block); + } + + // Unlock the VA space to emulate real eviction better where a VA space lock + // may not be held or may be held for a different VA space. + uvm_va_space_up_read(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + + if (params->eviction_mode == UvmTestEvictModeVirtual) { + UVM_ASSERT(block); + + uvm_mutex_lock(&block->lock); + + // As the VA space lock is not held we need to make sure the block + // is still alive. + if (!uvm_va_block_is_dead(block)) { + // The block might have been split in the meantime and may no longer + // cover the address as a result. + if (params->address >= block->start && params->address <= block->end) { + uvm_gpu_chunk_t *chunk = uvm_va_block_lookup_gpu_chunk(block, gpu, params->address); + + uvm_spin_lock(&pmm->list_lock); + if (chunk && chunk_is_evictable(pmm, chunk)) { + chunk_start_eviction(pmm, chunk); + root_chunk = root_chunk_from_chunk(pmm, chunk); + params->chunk_size_backing_virtual = uvm_gpu_chunk_get_size(chunk); + } + uvm_spin_unlock(&pmm->list_lock); + } + } + else { + // Consider it an error to free the block before the eviction ioctl + // is done. + status = NV_ERR_INVALID_ADDRESS; + } + + uvm_mutex_unlock(&block->lock); + uvm_va_block_release(block); + + if (status != NV_OK) + goto out; + } + else if (params->eviction_mode == UvmTestEvictModePhysical) { + uvm_gpu_chunk_t *chunk; + size_t index = params->address / UVM_CHUNK_SIZE_MAX; + + if (index >= pmm->root_chunks.count) { + status = NV_ERR_INVALID_ADDRESS; + goto out; + } + + root_chunk = &pmm->root_chunks.array[index]; + chunk = &root_chunk->chunk; + + uvm_spin_lock(&pmm->list_lock); + + if (chunk_is_evictable(pmm, chunk)) + chunk_start_eviction(pmm, chunk); + else + chunk = NULL; + + uvm_spin_unlock(&pmm->list_lock); + + if (!chunk) + root_chunk = NULL; + } + else if (params->eviction_mode == UvmTestEvictModeDefault) { + root_chunk = pick_root_chunk_to_evict(pmm); + } + else { + UVM_DBG_PRINT("Invalid eviction mode: 0x%x\n", params->eviction_mode); + status = NV_ERR_INVALID_ARGUMENT; + goto out; + } + + if (!root_chunk) { + // Not finding a chunk to evict is not considered an error, the caller + // can inspect the targeted_chunk_size to see whether anything was evicted. + goto out; + } + + uvm_mutex_lock(&pmm->lock); + status = evict_root_chunk(pmm, root_chunk, PMM_CONTEXT_DEFAULT); + uvm_mutex_unlock(&pmm->lock); + + if (status != NV_OK) + goto out; + + params->chunk_was_evicted = NV_TRUE; + params->evicted_physical_address = root_chunk->chunk.address; + free_chunk(pmm, &root_chunk->chunk); + +out: + uvm_gpu_release(gpu); + return status; +} + +static NV_STATUS test_check_pma_allocated_chunks(uvm_pmm_gpu_t *pmm, + UVM_TEST_PMA_ALLOC_FREE_PARAMS *params, + NvU64 *pages) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + for (i = 0; i < params->num_pages; ++i) { + uvm_gpu_root_chunk_t *root_chunk; + NvU64 address; + if (params->contiguous) + address = pages[0] + ((NvU64)params->page_size) * i; + else + address = pages[i]; + + root_chunk = root_chunk_from_address(pmm, address); + + if (!IS_ALIGNED(address, params->page_size)) { + UVM_TEST_PRINT("Returned unaligned address 0x%llx page size %u\n", address, params->page_size); + status = NV_ERR_INVALID_STATE; + } + + // The chunk should still be in the PMA owned state + uvm_spin_lock(&pmm->list_lock); + if (root_chunk->chunk.state != UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED) { + UVM_TEST_PRINT("Root chunk 0x%llx invalid state: %s, allocated [0x%llx, 0x%llx)\n", + root_chunk->chunk.address, + uvm_pmm_gpu_chunk_state_string(root_chunk->chunk.state), + address, address + params->page_size); + status = NV_ERR_INVALID_STATE; + } + uvm_spin_unlock(&pmm->list_lock); + } + return status; +} + +NV_STATUS uvm_test_pma_alloc_free(UVM_TEST_PMA_ALLOC_FREE_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + uvm_pmm_gpu_t *pmm; + NvU64 page; + NvU64 *pages = NULL; + NvU32 free_flags; + UvmPmaAllocationOptions options = {0}; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + pmm = &gpu->pmm; + + options.flags = UVM_PMA_ALLOCATE_PINNED; + if (params->contiguous) { + options.flags |= UVM_PMA_ALLOCATE_CONTIGUOUS; + pages = &page; + } + else { + pages = uvm_kvmalloc(sizeof(*pages) * params->num_pages); + if (!pages) { + status = NV_ERR_NO_MEMORY; + goto out; + } + } + if (params->phys_begin != 0 || params->phys_end != 0) { + options.physBegin = params->phys_begin; + options.physEnd = params->phys_end; + options.flags |= UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE; + } + + status = nvUvmInterfacePmaAllocPages(pmm->pma, params->num_pages, params->page_size, &options, pages); + if (status != NV_OK) + goto out; + + status = test_check_pma_allocated_chunks(pmm, params, pages); + if (status != NV_OK) { + UVM_TEST_PRINT("Failed before the nap\n"); + goto free; + } + + if (params->nap_us_before_free) + usleep_range(params->nap_us_before_free, params->nap_us_before_free + 10); + + status = test_check_pma_allocated_chunks(pmm, params, pages); + if (status != NV_OK) + UVM_TEST_PRINT("Failed after the nap\n"); + +free: + free_flags = options.flags; + + if (!!(options.resultFlags & UVM_PMA_ALLOCATE_RESULT_IS_ZERO)) + free_flags |= UVM_PMA_FREE_IS_ZERO; + + nvUvmInterfacePmaFreePages(gpu->pmm.pma, pages, params->num_pages, params->page_size, free_flags); + +out: + if (!params->contiguous) + uvm_kvfree(pages); + + uvm_gpu_release(gpu); + return status; +} + +NV_STATUS uvm_test_pmm_alloc_free_root(UVM_TEST_PMM_ALLOC_FREE_ROOT_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + uvm_pmm_gpu_t *pmm; + uvm_gpu_chunk_t *chunk; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + pmm = &gpu->pmm; + + status = uvm_pmm_gpu_alloc_user(pmm, + 1, + UVM_CHUNK_SIZE_MAX, + UVM_PMM_ALLOC_FLAGS_EVICT | UVM_PMM_ALLOC_FLAGS_DONT_BATCH, + &chunk, + &tracker); + + if (status != NV_OK) + goto out; + + if (params->nap_us_before_free) + usleep_range(params->nap_us_before_free, params->nap_us_before_free + 10); + + uvm_pmm_gpu_free(pmm, chunk, NULL); + uvm_tracker_deinit(&tracker); + +out: + uvm_gpu_release(gpu); + return status; +} + +NV_STATUS uvm_test_pmm_inject_pma_evict_error(UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR_PARAMS *params, struct file *filp) +{ + uvm_gpu_t *gpu; + uvm_pmm_gpu_t *pmm; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + pmm = &gpu->pmm; + + uvm_mutex_lock(&pmm->lock); + pmm->inject_pma_evict_error_after_num_chunks = params->error_after_num_chunks; + uvm_mutex_unlock(&pmm->lock); + + uvm_gpu_release(gpu); + return NV_OK; +} + +NV_STATUS uvm_test_pmm_release_free_root_chunks(UVM_TEST_PMM_RELEASE_FREE_ROOT_CHUNKS_PARAMS *params, + struct file *filp) +{ + uvm_gpu_t *gpu; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + release_free_root_chunks(&gpu->pmm); + + uvm_gpu_release(gpu); + return NV_OK; +} + +NV_STATUS uvm_test_pma_get_batch_size(UVM_TEST_PMA_GET_BATCH_SIZE_PARAMS *params, struct file *filp) +{ + uvm_gpu_t *gpu; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + if (gpu->parent->rm_info.isSimulated) + params->pma_batch_size = UVM_CHUNK_SIZE_MAX; + else + params->pma_batch_size = (1 << uvm_perf_pma_batch_nonpinned_order) * UVM_CHUNK_SIZE_MAX; + + uvm_gpu_release(gpu); + return NV_OK; +} + +NV_STATUS uvm_test_pmm_query_pma_stats(UVM_TEST_PMM_QUERY_PMA_STATS_PARAMS *params, struct file *filp) +{ + uvm_gpu_t *gpu; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + params->pma_stats.numFreePages64k = UVM_READ_ONCE(gpu->pmm.pma_stats->numFreePages64k); + params->pma_stats.numFreePages2m = UVM_READ_ONCE(gpu->pmm.pma_stats->numFreePages2m); + + uvm_gpu_release(gpu); + return NV_OK; +} diff --git a/kernel-open/nvidia-uvm/uvm_pmm_gpu.h b/kernel-open/nvidia-uvm/uvm_pmm_gpu.h new file mode 100644 index 000000000..ee254e292 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pmm_gpu.h @@ -0,0 +1,688 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PMM_GPU_H__ +#define __UVM_PMM_GPU_H__ + +// +// The Physical Memory Manager (PMM) manages the life cycle of GPU physical +// memory. +// +// The memory is managed in GPU chunks of different sizes (uvm_chunk_size_t) and +// users of PMM need to explicitly register the chunk sizes they need to be +// supported (see chunk_size_init_func in uvm_pmm_gpu_init()). +// +// Two memory types (uvm_pmm_gpu_memory_type_t) are supported, one for user and +// one for kernel allocations. The user memory type is used only for backing +// user data managed by VA blocks and kernel memory type is used for everything +// else. The distinction exists to support oversubscription, which requires the +// ability to evict already allocated memory from its users on-demand to satisfy +// new memory allocations when no more unused memory is available. Eviction is +// limited to the user memory type as it's a very complex operation requiring +// integration between PMM and other UVM driver modules. The assumption is that +// the vast majority of memory should be used for user data as everything else +// can be considered overhead and should be minimized. Two flavors of +// oversubscription exist: internal oversubscription allowing PMM allocations to +// evict other PMM allocations and external oversubscription allowing other PMA +// clients to evict memory used by PMM. +// +// Both allocation and freeing of memory support asynchronous operations where +// the allocated/freed GPU memory chunks can have pending GPU operations +// returned when allocating memory and passed in when freeing it via trackers. +// + +#include "uvm_forward_decl.h" +#include "uvm_lock.h" +#include "uvm_processors.h" +#include "uvm_tracker.h" +#include "uvm_va_block_types.h" +#include "uvm_linux.h" +#include "uvm_types.h" +#include "nv_uvm_types.h" + +typedef enum +{ + UVM_CHUNK_SIZE_1 = 1ULL, + UVM_CHUNK_SIZE_2 = 2ULL, + UVM_CHUNK_SIZE_4 = 4ULL, + UVM_CHUNK_SIZE_8 = 8ULL, + UVM_CHUNK_SIZE_16 = 16ULL, + UVM_CHUNK_SIZE_32 = 32ULL, + UVM_CHUNK_SIZE_64 = 64ULL, + UVM_CHUNK_SIZE_128 = 128ULL, + UVM_CHUNK_SIZE_256 = 256ULL, + UVM_CHUNK_SIZE_512 = 512ULL, + UVM_CHUNK_SIZE_1K = 1024ULL, + UVM_CHUNK_SIZE_2K = 2*1024ULL, + UVM_CHUNK_SIZE_4K = 4*1024ULL, + UVM_CHUNK_SIZE_8K = 8*1024ULL, + UVM_CHUNK_SIZE_16K = 16*1024ULL, + UVM_CHUNK_SIZE_32K = 32*1024ULL, + UVM_CHUNK_SIZE_64K = 64*1024ULL, + UVM_CHUNK_SIZE_128K = 128*1024ULL, + UVM_CHUNK_SIZE_256K = 256*1024ULL, + UVM_CHUNK_SIZE_512K = 512*1024ULL, + UVM_CHUNK_SIZE_1M = 1024*1024ULL, + UVM_CHUNK_SIZE_2M = 2*1024*1024ULL, + UVM_CHUNK_SIZE_MAX = UVM_CHUNK_SIZE_2M, + UVM_CHUNK_SIZE_INVALID = UVM_CHUNK_SIZE_MAX * 2ULL +} uvm_chunk_size_t; + +#define UVM_CHUNK_SIZES_MASK (uvm_chunk_sizes_mask_t)(UVM_CHUNK_SIZE_MAX | (UVM_CHUNK_SIZE_MAX-1)) + +typedef enum +{ + // Memory type for backing user pages. On Pascal+ it can be evicted. + UVM_PMM_GPU_MEMORY_TYPE_USER, + + + + + + + + + // Memory type for internal UVM allocations. It cannot be evicted. + UVM_PMM_GPU_MEMORY_TYPE_KERNEL, + + + + + + + + // Number of types - MUST BE LAST. + UVM_PMM_GPU_MEMORY_TYPE_COUNT +} uvm_pmm_gpu_memory_type_t; + +const char *uvm_pmm_gpu_memory_type_string(uvm_pmm_gpu_memory_type_t type); + +// Returns true if the given memory type is used to back user pages. +bool uvm_pmm_gpu_memory_type_is_user(uvm_pmm_gpu_memory_type_t type); + +// Returns true if the given memory type is used to back internal UVM +// allocations. +static bool uvm_pmm_gpu_memory_type_is_kernel(uvm_pmm_gpu_memory_type_t type) +{ + return !uvm_pmm_gpu_memory_type_is_user(type); +} + +typedef enum +{ + // Chunk belongs to PMA. Code outside PMM should not have access to + // it and it is likely a bug in UVM code (either in PMM or outside) + // if that happens. + UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED, + + // Chunk is on free list. That is it can be reused or returned to PMA + // as soon as its tracker is done. Code outside PMM should not have + // access to this chunk and it is likely a bug in UVM code (either in + // PMM or outside) if that happens. + UVM_PMM_GPU_CHUNK_STATE_FREE, + + // Chunk is split into subchunks. + UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT, + + // Chunk is temporarily pinned. + // + // This state is used for user memory chunks that have been allocated, but haven't + // been unpinned yet and also internally when a chunk is about to be split. + UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED, + + // Chunk is allocated. That is it is backing some VA block + UVM_PMM_GPU_CHUNK_STATE_ALLOCATED, + + // Number of states - MUST BE LAST + UVM_PMM_GPU_CHUNK_STATE_COUNT +} uvm_pmm_gpu_chunk_state_t; + +const char *uvm_pmm_gpu_chunk_state_string(uvm_pmm_gpu_chunk_state_t state); + +typedef enum +{ + // No flags passed + UVM_PMM_ALLOC_FLAGS_NONE, + + // If there is no free memory, allocation may evict chunks instead of + // returning error immediately. Therefore it must not be called under the + // VA block lock. + UVM_PMM_ALLOC_FLAGS_EVICT = (1 << 0), + + // Do not use batching in this call if PMA page allocaion is required + UVM_PMM_ALLOC_FLAGS_DONT_BATCH = (1 << 1), + + UVM_PMM_ALLOC_FLAGS_MASK = (1 << 2) - 1 +} uvm_pmm_alloc_flags_t; + + +typedef enum +{ + // Identifier for lists with zeroed chunks + UVM_PMM_LIST_ZERO, + + // Identifier for lists with non-zeroed chunks + UVM_PMM_LIST_NO_ZERO, + + // Number of states for zeroed/non-zeroed chunk lists - MUST BE LAST + UVM_PMM_LIST_ZERO_COUNT +} uvm_pmm_list_zero_t; + +static void uvm_pmm_list_zero_checks(void) +{ + BUILD_BUG_ON(UVM_PMM_LIST_ZERO_COUNT > 2); +} + +// Maximum chunk sizes per type of allocation in single GPU. +// The worst case today is Maxwell with 4 allocations sizes for page tables and +// 2 page sizes used by uvm_mem_t. Notably one of the allocations for page +// tables is 2M which is our common root chunk size. +#define UVM_MAX_CHUNK_SIZES 6 + +// This specifies a maximum GAP between 2 allocation levels. +#define UVM_PMM_MAX_SUBCHUNKS UVM_CHUNK_SIZE_MAX + +#define UVM_PMM_CHUNK_SPLIT_CACHE_SIZES (ilog2(UVM_PMM_MAX_SUBCHUNKS) + 1) +#define UVM_CHUNK_SIZE_MASK_SIZE (ilog2(UVM_CHUNK_SIZE_MAX) + 1) + +typedef uvm_chunk_size_t uvm_chunk_sizes_mask_t; + +typedef struct uvm_pmm_gpu_chunk_suballoc_struct uvm_pmm_gpu_chunk_suballoc_t; + +typedef struct uvm_gpu_chunk_struct uvm_gpu_chunk_t; +struct uvm_gpu_chunk_struct +{ + // Physical address of GPU chunk. This may be removed to save memory + // if we will be able to get it from reverse map and changed + // into smaller index for subchunks. + NvU64 address; + + struct + { + // We use +1 in the order_base_2 calls appropriately to avoid compiler + // warnings due to the bitfields being too narrow for the values of + // their types. + uvm_pmm_gpu_memory_type_t type : order_base_2(UVM_PMM_GPU_MEMORY_TYPE_COUNT + 1); + + // The eviction flag is internal and used only for root chunks. It's + // set by the eviction path once a chunk is chosen for eviction in + // chunk_start_eviction(). Also see the (root_)chunk_is_in_eviction() + // helpers. + bool in_eviction : 1; + + bool inject_split_error : 1; + + // This flag is initalized when allocating a new root chunk from PMA. + // It is set to true, if PMA already scrubbed the chunk. The flag is + // only valid at allocation time (after uvm_pmm_gpu_alloc call), and + // the caller is not required to clear it before freeing the chunk. The + // VA block chunk population code can query it to skip zeroing the + // chunk. + bool is_zero : 1; + + uvm_pmm_gpu_chunk_state_t state : order_base_2(UVM_PMM_GPU_CHUNK_STATE_COUNT + 1); + + size_t log2_size : order_base_2(UVM_CHUNK_SIZE_MASK_SIZE); + + // Start page index within va_block + uvm_page_index_t va_block_page_index : order_base_2(PAGES_PER_UVM_VA_BLOCK + 1); + + // This allows determining what PMM owns the chunk. Users of this field + // must only use it if the owning GPU is retained. + // TODO: Bug 2008200: Enforce single PMM instance per GPU + NvU32 gpu_global_index : order_base_2(UVM_GLOBAL_ID_MAX_PROCESSORS); + }; + + // List entry. + // + // Guaranteed to be a valid list node at all times for simplicity. + // + // Protected by PMM's list_lock when managed by PMM. Notably the list node + // can be used by the allocator of the chunk after alloc and before the + // chunk is unpinned or freed. + struct list_head list; + + // The VA block using the chunk, if any. + // User chunks that are not backed by a VA block are considered to be + // temporarily pinned and cannot be evicted. + uvm_va_block_t *va_block; + + // If this is subchunk it points to the parent - in other words + // chunk of bigger size which contains this chunk. + uvm_gpu_chunk_t *parent; + + // Array describing suballocations + uvm_pmm_gpu_chunk_suballoc_t *suballoc; +}; + +typedef struct uvm_gpu_root_chunk_struct +{ + uvm_gpu_chunk_t chunk; + + // Pending operations for all GPU chunks under the root chunk. + // + // Protected by the corresponding root chunk bit lock. + uvm_tracker_t tracker; + + // Indirect peers which have IOMMU mappings to this root chunk. The mapped + // addresses are stored in this root chunk's index in + // uvm_pmm_gpu_t::root_chunks.indirect_peer[id].dma_addrs. + // + // Protected by the corresponding root chunk bit lock. + // + // We can use a regular processor id because indirect peers are not allowed + // between partitioned GPUs when SMC is enabled. + uvm_processor_mask_t indirect_peers_mapped; +} uvm_gpu_root_chunk_t; + +typedef struct +{ + // Indirect peers are GPUs which can coherently access this GPU's memory, + // but are routed through an intermediate processor. Indirect peers access + // each others' memory with the SYS aperture rather then a PEER aperture, + // meaning they need IOMMU mappings: + // + // accessing_gpu ==> IOMMU ==> CPU ==> owning_gpu (this GPU) + // + // This array has one entry per root chunk on this GPU. Each entry + // contains the IOMMU address accessing_gpu needs to use in order to + // access this GPU's root chunk. The root chunks are mapped as whole + // regions both for tracking simplicity and to allow GPUs to map with + // large PTEs. + // + // An array entry is valid iff accessing_gpu's ID is set in the + // corresponding root chunk's indirect_peers_mapped mask. + // + // Management of these addresses would be simpler if they were stored + // in the root chunks themselves, but in the common case there are only + // a small number of indirect peers in a system. Dynamic array + // allocation per indirect peer wastes less memory. + NvU64 *dma_addrs; + + // Number of this GPU's root chunks mapped for each indirect peer. + atomic64_t map_count; +} uvm_gpu_root_chunk_indirect_peer_t; + +typedef struct +{ + // Sizes of the MMU + uvm_chunk_sizes_mask_t chunk_sizes[UVM_PMM_GPU_MEMORY_TYPE_COUNT]; + + // PMA (Physical Memory Allocator) opaque handle + void *pma; + + // PMA statistics used for eviction heuristics + const UvmPmaStatistics *pma_stats; + + struct + { + // Array of all root chunks indexed by their physical address divided by + // UVM_CHUNK_SIZE_MAX. + // + // This array is pre-allocated during uvm_pmm_gpu_init() for all + // possible physical addresses (based on + // gpu::vidmem_max_physical_address). + size_t count; + uvm_gpu_root_chunk_t *array; + + // Bit locks for the root chunks with 1 bit per each root chunk + uvm_bit_locks_t bitlocks; + + // List of root chunks unused by VA blocks, i.e. allocated, but not + // holding any resident pages. These take priority when evicting as no + // data needs to be migrated for them to be evicted. + // + // For simplicity, the list is approximate, tracking unused chunks only + // from root chunk sized (2M) VA blocks. + // + // Updated by the VA block code with + // uvm_pmm_gpu_mark_root_chunk_(un)used(). + struct list_head va_block_unused; + + // List of root chunks used by VA blocks + struct list_head va_block_used; + + uvm_gpu_root_chunk_indirect_peer_t indirect_peer[UVM_ID_MAX_GPUS]; + } root_chunks; + + // Lock protecting PMA allocation, freeing and eviction + uvm_rw_semaphore_t pma_lock; + + // Lock protecting splits, merges and walks of chunks. + uvm_mutex_t lock; + + // Lock protecting lists and chunk's state transitions. + uvm_spinlock_t list_lock; + + // Free chunk lists. There are separate lists for non-zero and zero chunks. + struct list_head free_list[UVM_PMM_GPU_MEMORY_TYPE_COUNT][UVM_MAX_CHUNK_SIZES][UVM_PMM_LIST_ZERO_COUNT]; + + // Inject an error after evicting a number of chunks. 0 means no error left + // to be injected. + NvU32 inject_pma_evict_error_after_num_chunks; + + // The mask of the initialized chunk sizes + DECLARE_BITMAP(chunk_split_cache_initialized, UVM_PMM_CHUNK_SPLIT_CACHE_SIZES); + + bool initialized; + + bool pma_address_cache_initialized; +} uvm_pmm_gpu_t; + +// Return containing GPU +uvm_gpu_t *uvm_pmm_to_gpu(uvm_pmm_gpu_t *pmm); + +// Initialize PMM on GPU +NV_STATUS uvm_pmm_gpu_init(uvm_pmm_gpu_t *pmm); + +// Deinitialize the PMM on GPU +void uvm_pmm_gpu_deinit(uvm_pmm_gpu_t *pmm); + +static uvm_chunk_size_t uvm_gpu_chunk_get_size(uvm_gpu_chunk_t *chunk) +{ + return ((uvm_chunk_size_t)1) << chunk->log2_size; +} + +static void uvm_gpu_chunk_set_size(uvm_gpu_chunk_t *chunk, uvm_chunk_size_t size) +{ + chunk->log2_size = ilog2(size); +} + +// Retrieve the GPU associated with the chunk. Users of this helper must only +// use it if the owning GPU is retained. +uvm_gpu_t *uvm_gpu_chunk_get_gpu(const uvm_gpu_chunk_t *chunk); + +// Return the first struct page corresponding to the physical address range +// of the given chunk. +// +// Notes: +// - The GPU must have NUMA support enabled. +// - For chunks smaller than a system page, this function returns the struct +// page containing the chunk's starting address. +struct page *uvm_gpu_chunk_to_page(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); + +// Allocates num_chunks chunks of size chunk_size in caller-supplied array (chunks). +// +// Returned chunks are in the TEMP_PINNED state, requiring a call to either +// uvm_pmm_gpu_unpin_temp or uvm_pmm_gpu_free. If a tracker is passed in, all +// the pending operations on the allocated chunks will be added to it +// guaranteeing that all the entries come from the same GPU as the PMM. +// Otherwise, when tracker is NULL, all the pending operations will be +// synchronized before returning to the caller. +// +// Each of the allocated chunks list nodes (uvm_gpu_chunk_t::list) can be used +// by the caller until the chunk is unpinned (uvm_pmm_gpu_unpin_temp) or freed +// (uvm_pmm_gpu_free). If used, the list node has to be returned to a valid +// state before calling either of the APIs. +// +// In case of an error, the chunks array is guaranteed to be cleared. +NV_STATUS uvm_pmm_gpu_alloc(uvm_pmm_gpu_t *pmm, + size_t num_chunks, + uvm_chunk_size_t chunk_size, + uvm_pmm_gpu_memory_type_t mem_type, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunks, + uvm_tracker_t *out_tracker); + +// Helper for allocating kernel memory +// +// Internally calls uvm_pmm_gpu_alloc() and sets the state of all chunks to +// allocated on success. + + + + +NV_STATUS uvm_pmm_gpu_alloc_kernel(uvm_pmm_gpu_t *pmm, + size_t num_chunks, + uvm_chunk_size_t chunk_size, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunks, + uvm_tracker_t *out_tracker); + +// Helper for allocating user memory +// +// Simple wrapper that just uses UVM_PMM_GPU_MEMORY_TYPE_USER for the memory +// type. +// +// If the memory returned by the PMM allocator cannot be physically addressed, +// the MMU interface provides user chunk mapping and unmapping functions +// (uvm_mmu_chunk_map/unmap) that enable virtual addressing. + + + + + +static NV_STATUS uvm_pmm_gpu_alloc_user(uvm_pmm_gpu_t *pmm, + size_t num_chunks, + uvm_chunk_size_t chunk_size, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunks, + uvm_tracker_t *out_tracker) +{ + return uvm_pmm_gpu_alloc(pmm, num_chunks, chunk_size, UVM_PMM_GPU_MEMORY_TYPE_USER, flags, chunks, out_tracker); +} + +// Unpin a temporarily pinned chunk and set its reverse map to a VA block +// +// Can only be used on user memory. +void uvm_pmm_gpu_unpin_temp(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, uvm_va_block_t *va_block); + +// Frees the chunk. This also unpins the chunk if it is temporarily pinned. +// +// The tracker is optional and a NULL tracker indicates that no new operation +// has been pushed for the chunk, but the tracker returned as part of +// its allocation doesn't have to be completed as PMM will synchronize it +// internally if needed. A non-NULL tracker indicates any additional pending +// operations on the chunk pushed by the caller that need to be synchronized +// before freeing or re-using the chunk. +void uvm_pmm_gpu_free(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, uvm_tracker_t *tracker); + +// Splits the input chunk in-place into smaller chunks of subchunk_size. No data +// is moved, and the smaller chunks remain allocated. +// +// If the subchunks array is non-NULL, it will be filled with +// (uvm_gpu_chunk_get_size(chunk) / subchunk_size) chunks in address order. The +// new chunks must all be freed individually. +// +// If the subchunks array is NULL, the split chunks can be retrieved later by +// passing the original parent chunk to uvm_pmm_gpu_get_subchunks. +// +// On error, the original chunk remains unmodified. +// +// The chunk must be in the ALLOCATED state with the owning VA block lock held, +// or the TEMP_PINNED state. +// +// subchunk_size must be a valid chunk size for the given type. +// +// The chunk can be re-merged if desired using uvm_pmm_gpu_merge_chunk. +NV_STATUS uvm_pmm_gpu_split_chunk(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t *chunk, + uvm_chunk_size_t subchunk_size, + uvm_gpu_chunk_t **subchunks); + +// Retrieve leaf subchunks under parent. Up to num_subchunks chunks are copied +// into the subchunks array in address order, starting with the subchunk at +// start_index. start_index can be thought of as the number of leaf subchunks to +// skip before beginning the copy. +// +// parent can be in the ALLOCATED state, in which case parent is the only chunk +// which may be copied into the subchunks array. +// +// num_subchunks may be 0. +// +// Returns the number of subchunks written to the array. This may be less than +// num_subchunks depending on the value of start_index and how many subchunks +// are present under parent. +size_t uvm_pmm_gpu_get_subchunks(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t *parent, + size_t start_index, + size_t num_subchunks, + uvm_gpu_chunk_t **subchunks); + +// Merges a chunk previously split with uvm_pmm_gpu_split_chunk. All of chunk's +// leaf children must be allocated. +void uvm_pmm_gpu_merge_chunk(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); + +// Waits for all free chunk trackers (removing their completed entries) to complete. +// +// This inherently races with any chunks being freed to this PMM. The assumption +// is that the caller doesn't care about preventing new chunks from being freed, +// just that any already-freed chunks will be synced. +void uvm_pmm_gpu_sync(uvm_pmm_gpu_t *pmm); + +// Mark an allocated chunk as evicted +void uvm_pmm_gpu_mark_chunk_evicted(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); + +// Initialize indirect peer state so accessing_gpu is ready to create mappings +// to pmm's root chunks. +// +// Locking: The global lock must be held. +NV_STATUS uvm_pmm_gpu_indirect_peer_init(uvm_pmm_gpu_t *pmm, uvm_gpu_t *accessing_gpu); + +// Tear down indirect peer state from other_gpu to pmm's GPU. Any existing IOMMU +// mappings from other_gpu to this GPU are torn down. +// +// Locking: The global lock must be held. +void uvm_pmm_gpu_indirect_peer_destroy(uvm_pmm_gpu_t *pmm, uvm_gpu_t *other_gpu); + +// Create an IOMMU mapping to allow accessing_gpu to access chunk on pmm's GPU. +// chunk can be any size, and can be mapped more than once (the address will not +// change). The address can be retrieved using uvm_pmm_gpu_indirect_peer_addr. +// +// Note that there is no corresponding unmap call. The mappings will be removed +// automatically as necessary when the chunk is freed. This allows mappings to +// be reused as much as possible. +NV_STATUS uvm_pmm_gpu_indirect_peer_map(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, uvm_gpu_t *accessing_gpu); + +// Retrieve the system address accessing_gpu must use to access this chunk. +// uvm_pmm_gpu_indirect_peer_map must have been called first. +NvU64 uvm_pmm_gpu_indirect_peer_addr(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk, uvm_gpu_t *accessing_gpu); + +// Returns the physical address for use by accessing_gpu of a vidmem allocation +// on the peer pmm->gpu. This address can be used for making PTEs on +// accessing_gpu, but not for copying between the two GPUs. For that, use +// uvm_gpu_peer_copy_address. +uvm_gpu_phys_address_t uvm_pmm_gpu_peer_phys_address(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t *chunk, + uvm_gpu_t *accessing_gpu); + +// Returns the physical or virtual address for use by accessing_gpu to copy to/ +// from a vidmem allocation on the peer pmm->gpu. This may be different from +// uvm_gpu_peer_phys_address to handle CE limitations in addressing peer +// physical memory directly. +uvm_gpu_address_t uvm_pmm_gpu_peer_copy_address(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t *chunk, + uvm_gpu_t *accessing_gpu); + +// Mark a user chunk as used +// +// If the chunk is pinned or selected for eviction, this won't do anything. The +// chunk can be pinned when it's being initially populated by the VA block. +// Allow that state to make this API easy to use for the caller. +void uvm_pmm_gpu_mark_root_chunk_used(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); + +// Mark an allocated user chunk as unused +void uvm_pmm_gpu_mark_root_chunk_unused(uvm_pmm_gpu_t *pmm, uvm_gpu_chunk_t *chunk); + +static bool uvm_gpu_chunk_same_root(uvm_gpu_chunk_t *chunk1, uvm_gpu_chunk_t *chunk2) +{ + return UVM_ALIGN_DOWN(chunk1->address, UVM_CHUNK_SIZE_MAX) == UVM_ALIGN_DOWN(chunk2->address, UVM_CHUNK_SIZE_MAX); +} + +// Finds the first (smallest) size in the chunk_sizes mask +static uvm_chunk_size_t uvm_chunk_find_first_size(uvm_chunk_sizes_mask_t chunk_sizes) +{ + UVM_ASSERT(chunk_sizes); + return (uvm_chunk_size_t)1 << __ffs(chunk_sizes); +} + +// Finds the last (biggest) size in the chunk_sizes mask +static uvm_chunk_size_t uvm_chunk_find_last_size(uvm_chunk_sizes_mask_t chunk_sizes) +{ + UVM_ASSERT(chunk_sizes); + return (uvm_chunk_size_t)1 << __fls(chunk_sizes); +} + +// Finds the smallest size in the chunk_sizes mask which is larger than +// chunk_size. If there is no such value returns UVM_CHUNK_SIZE_INVALID. +static uvm_chunk_size_t uvm_chunk_find_next_size(uvm_chunk_sizes_mask_t chunk_sizes, uvm_chunk_size_t chunk_size) +{ + UVM_ASSERT(is_power_of_2(chunk_size)); + UVM_ASSERT(chunk_sizes & chunk_size); + BUILD_BUG_ON(sizeof(chunk_sizes) > sizeof(unsigned long)); + return (uvm_chunk_size_t)1 << __ffs((chunk_sizes & ~((chunk_size << 1) - 1)) | UVM_CHUNK_SIZE_INVALID); +} + +// Finds the largest size in the chunk_sizes mask which is smaller than +// chunk_size. If there is no such value returns UVM_CHUNK_SIZE_INVALID. +static uvm_chunk_size_t uvm_chunk_find_prev_size(uvm_chunk_sizes_mask_t chunk_sizes, uvm_chunk_size_t chunk_size) +{ + UVM_ASSERT(is_power_of_2(chunk_size)); + UVM_ASSERT(chunk_sizes & chunk_size); + chunk_sizes = chunk_sizes & (chunk_size - 1); + if (!chunk_sizes) + return UVM_CHUNK_SIZE_INVALID; + return (uvm_chunk_size_t)1 << __fls(chunk_sizes); +} + +// Obtain the {va_block, virt_addr} information for the chunks in the given +// [phys_addr:phys_addr + region_size) range. One entry per chunk is returned. +// phys_addr and region_size must be page-aligned. +// +// Valid translations are written to out_mappings sequentially (there are no +// gaps). The caller is required to provide enough entries in out_pages for the +// whole region. The function returns the number of entries written to +// out_mappings. +// +// The returned reverse map is a snapshot: it is stale as soon as it is +// returned, and the caller is responsible for locking the VA block(s) and +// checking that the chunks are still there. Also, the VA block(s) are +// retained, and it's up to the caller to release them. +NvU32 uvm_pmm_gpu_phys_to_virt(uvm_pmm_gpu_t *pmm, NvU64 phys_addr, NvU64 region_size, uvm_reverse_map_t *out_mappings); + +// Iterates over every size in the input mask from smallest to largest +#define for_each_chunk_size(__size, __chunk_sizes) \ + for ((__size) = (__chunk_sizes) ? uvm_chunk_find_first_size(__chunk_sizes) : \ + UVM_CHUNK_SIZE_INVALID; \ + (__size) != UVM_CHUNK_SIZE_INVALID; \ + (__size) = uvm_chunk_find_next_size((__chunk_sizes), (__size))) + +// Iterates over every size in the input mask from largest to smallest +#define for_each_chunk_size_rev(__size, __chunk_sizes) \ + for ((__size) = (__chunk_sizes) ? uvm_chunk_find_last_size(__chunk_sizes) : \ + UVM_CHUNK_SIZE_INVALID; \ + (__size) != UVM_CHUNK_SIZE_INVALID; \ + (__size) = uvm_chunk_find_prev_size((__chunk_sizes), (__size))) + +// Iterates over every size in the input mask from smallest to largest, starting +// from and including __size. __size must be present in the mask. +#define for_each_chunk_size_from(__size, __chunk_sizes) \ + for (; (__size) != UVM_CHUNK_SIZE_INVALID; \ + (__size) = uvm_chunk_find_next_size((__chunk_sizes), (__size))) + +// Iterates over every size in the input mask from largest to smallest, starting +// from and including __size. __size must be present in the mask. +#define for_each_chunk_size_rev_from(__size, __chunk_sizes) \ + for (; (__size) != UVM_CHUNK_SIZE_INVALID; \ + (__size) = uvm_chunk_find_prev_size((__chunk_sizes), (__size))) + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_pmm_sysmem.c b/kernel-open/nvidia-uvm/uvm_pmm_sysmem.c new file mode 100644 index 000000000..97f367146 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pmm_sysmem.c @@ -0,0 +1,1518 @@ +/******************************************************************************* + Copyright (c) 2017-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_gpu.h" +#include "uvm_pmm_sysmem.h" +#include "uvm_kvmalloc.h" +#include "uvm_va_block.h" +#include "uvm_va_space.h" + +static int uvm_cpu_chunk_allocation_sizes = UVM_CPU_CHUNK_SIZES; +module_param(uvm_cpu_chunk_allocation_sizes, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(uvm_cpu_chunk_allocation_sizes, "OR'ed value of all CPU chunk allocation sizes."); + +static struct kmem_cache *g_reverse_page_map_cache __read_mostly; + +NV_STATUS uvm_pmm_sysmem_init(void) +{ + g_reverse_page_map_cache = NV_KMEM_CACHE_CREATE("uvm_pmm_sysmem_page_reverse_map_t", + uvm_reverse_map_t); + if (!g_reverse_page_map_cache) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +void uvm_pmm_sysmem_exit(void) +{ + kmem_cache_destroy_safe(&g_reverse_page_map_cache); +} + +NV_STATUS uvm_pmm_sysmem_mappings_init(uvm_gpu_t *gpu, uvm_pmm_sysmem_mappings_t *sysmem_mappings) +{ + memset(sysmem_mappings, 0, sizeof(*sysmem_mappings)); + + sysmem_mappings->gpu = gpu; + + uvm_mutex_init(&sysmem_mappings->reverse_map_lock, UVM_LOCK_ORDER_LEAF); + uvm_init_radix_tree_preloadable(&sysmem_mappings->reverse_map_tree); + + return NV_OK; +} + +void uvm_pmm_sysmem_mappings_deinit(uvm_pmm_sysmem_mappings_t *sysmem_mappings) +{ + if (sysmem_mappings->gpu) { + UVM_ASSERT_MSG(radix_tree_empty(&sysmem_mappings->reverse_map_tree), + "radix_tree not empty for GPU %s\n", + uvm_gpu_name(sysmem_mappings->gpu)); + } + + sysmem_mappings->gpu = NULL; +} + +// TODO: Bug 1995015: use a more efficient data structure for +// physically-contiguous allocations. +NV_STATUS uvm_pmm_sysmem_mappings_add_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 virt_addr, + NvU64 region_size, + uvm_va_block_t *va_block, + uvm_processor_id_t owner) +{ + NV_STATUS status = NV_OK; + uvm_reverse_map_t *new_reverse_map; + NvU64 key; + const NvU64 base_key = dma_addr / PAGE_SIZE; + const NvU32 num_pages = region_size / PAGE_SIZE; + uvm_page_index_t page_index; + + UVM_ASSERT(va_block); + UVM_ASSERT(!uvm_va_block_is_dead(va_block)); + UVM_ASSERT(IS_ALIGNED(dma_addr, region_size)); + UVM_ASSERT(IS_ALIGNED(virt_addr, region_size)); + UVM_ASSERT(region_size <= UVM_VA_BLOCK_SIZE); + UVM_ASSERT(is_power_of_2(region_size)); + UVM_ASSERT(uvm_va_block_contains_address(va_block, virt_addr)); + UVM_ASSERT(uvm_va_block_contains_address(va_block, virt_addr + region_size - 1)); + uvm_assert_mutex_locked(&va_block->lock); + + if (!sysmem_mappings->gpu->parent->access_counters_supported) + return NV_OK; + + new_reverse_map = nv_kmem_cache_zalloc(g_reverse_page_map_cache, NV_UVM_GFP_FLAGS); + if (!new_reverse_map) + return NV_ERR_NO_MEMORY; + + page_index = uvm_va_block_cpu_page_index(va_block, virt_addr); + + new_reverse_map->va_block = va_block; + new_reverse_map->region = uvm_va_block_region(page_index, page_index + num_pages); + new_reverse_map->owner = owner; + + uvm_mutex_lock(&sysmem_mappings->reverse_map_lock); + for (key = base_key; key < base_key + num_pages; ++key) { + int ret = radix_tree_insert(&sysmem_mappings->reverse_map_tree, key, new_reverse_map); + if (ret != 0) { + NvU64 remove_key; + + for (remove_key = base_key; remove_key < key; ++remove_key) + (void *)radix_tree_delete(&sysmem_mappings->reverse_map_tree, remove_key); + + kmem_cache_free(g_reverse_page_map_cache, new_reverse_map); + status = errno_to_nv_status(ret); + break; + } + } + uvm_mutex_unlock(&sysmem_mappings->reverse_map_lock); + + // The assert is added for Coverity's sake. It is equivalent to adding + // assert(num_pages > 0) before the loop. However, Coverity is not able to + // deduce that the loop has to execute at least once from num_pages > 0. + UVM_ASSERT(key != base_key || status != NV_OK); + + return status; +} + +static void pmm_sysmem_mappings_remove_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + bool check_mapping) +{ + uvm_reverse_map_t *reverse_map; + NvU64 key; + const NvU64 base_key = dma_addr / PAGE_SIZE; + + if (!sysmem_mappings->gpu->parent->access_counters_supported) + return; + + uvm_mutex_lock(&sysmem_mappings->reverse_map_lock); + + reverse_map = radix_tree_delete(&sysmem_mappings->reverse_map_tree, base_key); + if (check_mapping) + UVM_ASSERT(reverse_map); + + if (!reverse_map) { + uvm_mutex_unlock(&sysmem_mappings->reverse_map_lock); + return; + } + + uvm_assert_mutex_locked(&reverse_map->va_block->lock); + + for (key = base_key + 1; key < base_key + uvm_va_block_region_num_pages(reverse_map->region); ++key) { + uvm_reverse_map_t *curr_reverse_map = radix_tree_delete(&sysmem_mappings->reverse_map_tree, key); + UVM_ASSERT(curr_reverse_map == reverse_map); + } + + uvm_mutex_unlock(&sysmem_mappings->reverse_map_lock); + + kmem_cache_free(g_reverse_page_map_cache, reverse_map); +} + +void uvm_pmm_sysmem_mappings_remove_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings, NvU64 dma_addr) +{ + pmm_sysmem_mappings_remove_gpu_mapping(sysmem_mappings, dma_addr, true); +} + +void uvm_pmm_sysmem_mappings_remove_gpu_mapping_on_eviction(uvm_pmm_sysmem_mappings_t *sysmem_mappings, NvU64 dma_addr) +{ + pmm_sysmem_mappings_remove_gpu_mapping(sysmem_mappings, dma_addr, false); +} + +void uvm_pmm_sysmem_mappings_reparent_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + uvm_va_block_t *va_block) +{ + NvU64 virt_addr; + uvm_reverse_map_t *reverse_map; + const NvU64 base_key = dma_addr / PAGE_SIZE; + uvm_page_index_t new_start_page; + + UVM_ASSERT(PAGE_ALIGNED(dma_addr)); + UVM_ASSERT(va_block); + UVM_ASSERT(!uvm_va_block_is_dead(va_block)); + + if (!sysmem_mappings->gpu->parent->access_counters_supported) + return; + + uvm_mutex_lock(&sysmem_mappings->reverse_map_lock); + + reverse_map = radix_tree_lookup(&sysmem_mappings->reverse_map_tree, base_key); + UVM_ASSERT(reverse_map); + + // Compute virt address by hand since the old VA block may be messed up + // during split + virt_addr = reverse_map->va_block->start + reverse_map->region.first * PAGE_SIZE; + new_start_page = uvm_va_block_cpu_page_index(va_block, virt_addr); + + reverse_map->region = uvm_va_block_region(new_start_page, + new_start_page + uvm_va_block_region_num_pages(reverse_map->region)); + reverse_map->va_block = va_block; + + UVM_ASSERT(uvm_va_block_contains_address(va_block, uvm_reverse_map_start(reverse_map))); + UVM_ASSERT(uvm_va_block_contains_address(va_block, uvm_reverse_map_end(reverse_map))); + + uvm_mutex_unlock(&sysmem_mappings->reverse_map_lock); +} + +NV_STATUS uvm_pmm_sysmem_mappings_split_gpu_mappings(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 new_region_size) +{ + uvm_reverse_map_t *orig_reverse_map; + const NvU64 base_key = dma_addr / PAGE_SIZE; + const size_t num_pages = new_region_size / PAGE_SIZE; + size_t old_num_pages; + size_t subregion, num_subregions; + uvm_reverse_map_t **new_reverse_maps; + + UVM_ASSERT(IS_ALIGNED(dma_addr, new_region_size)); + UVM_ASSERT(new_region_size <= UVM_VA_BLOCK_SIZE); + UVM_ASSERT(is_power_of_2(new_region_size)); + + if (!sysmem_mappings->gpu->parent->access_counters_supported) + return NV_OK; + + uvm_mutex_lock(&sysmem_mappings->reverse_map_lock); + orig_reverse_map = radix_tree_lookup(&sysmem_mappings->reverse_map_tree, base_key); + uvm_mutex_unlock(&sysmem_mappings->reverse_map_lock); + + // We can access orig_reverse_map outside the tree lock because we hold the + // VA block lock so we cannot have concurrent modifications in the tree for + // the mappings of the chunks that belong to that VA block. + UVM_ASSERT(orig_reverse_map); + UVM_ASSERT(orig_reverse_map->va_block); + uvm_assert_mutex_locked(&orig_reverse_map->va_block->lock); + old_num_pages = uvm_va_block_region_num_pages(orig_reverse_map->region); + UVM_ASSERT(num_pages < old_num_pages); + + num_subregions = old_num_pages / num_pages; + + new_reverse_maps = uvm_kvmalloc_zero(sizeof(*new_reverse_maps) * (num_subregions - 1)); + if (!new_reverse_maps) + return NV_ERR_NO_MEMORY; + + // Allocate the descriptors for the new subregions + for (subregion = 1; subregion < num_subregions; ++subregion) { + uvm_reverse_map_t *new_reverse_map = nv_kmem_cache_zalloc(g_reverse_page_map_cache, NV_UVM_GFP_FLAGS); + uvm_page_index_t page_index = orig_reverse_map->region.first + num_pages * subregion; + + if (new_reverse_map == NULL) { + // On error, free the previously-created descriptors + while (--subregion != 0) + kmem_cache_free(g_reverse_page_map_cache, new_reverse_maps[subregion - 1]); + + uvm_kvfree(new_reverse_maps); + return NV_ERR_NO_MEMORY; + } + + new_reverse_map->va_block = orig_reverse_map->va_block; + new_reverse_map->region = uvm_va_block_region(page_index, page_index + num_pages); + new_reverse_map->owner = orig_reverse_map->owner; + + new_reverse_maps[subregion - 1] = new_reverse_map; + } + + uvm_mutex_lock(&sysmem_mappings->reverse_map_lock); + + for (subregion = 1; subregion < num_subregions; ++subregion) { + NvU64 key; + + for (key = base_key + num_pages * subregion; key < base_key + num_pages * (subregion + 1); ++key) { + void **slot = radix_tree_lookup_slot(&sysmem_mappings->reverse_map_tree, key); + UVM_ASSERT(slot); + UVM_ASSERT(radix_tree_deref_slot(slot) == orig_reverse_map); + + NV_RADIX_TREE_REPLACE_SLOT(&sysmem_mappings->reverse_map_tree, slot, new_reverse_maps[subregion - 1]); + } + } + + orig_reverse_map->region = uvm_va_block_region(orig_reverse_map->region.first, + orig_reverse_map->region.first + num_pages); + + uvm_mutex_unlock(&sysmem_mappings->reverse_map_lock); + + uvm_kvfree(new_reverse_maps); + return NV_OK; +} + +void uvm_pmm_sysmem_mappings_merge_gpu_mappings(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 new_region_size) +{ + uvm_reverse_map_t *first_reverse_map; + uvm_page_index_t running_page_index; + NvU64 key; + const NvU64 base_key = dma_addr / PAGE_SIZE; + const size_t num_pages = new_region_size / PAGE_SIZE; + size_t num_mapping_pages; + + UVM_ASSERT(IS_ALIGNED(dma_addr, new_region_size)); + UVM_ASSERT(new_region_size <= UVM_VA_BLOCK_SIZE); + UVM_ASSERT(is_power_of_2(new_region_size)); + + if (!sysmem_mappings->gpu->parent->access_counters_supported) + return; + + uvm_mutex_lock(&sysmem_mappings->reverse_map_lock); + + // Find the first mapping in the region + first_reverse_map = radix_tree_lookup(&sysmem_mappings->reverse_map_tree, base_key); + UVM_ASSERT(first_reverse_map); + num_mapping_pages = uvm_va_block_region_num_pages(first_reverse_map->region); + UVM_ASSERT(num_pages >= num_mapping_pages); + UVM_ASSERT(IS_ALIGNED(base_key, num_mapping_pages)); + + // The region in the tree matches the size of the merged region, just return + if (num_pages == num_mapping_pages) + goto unlock_no_update; + + // Otherwise update the rest of slots to point at the same reverse map + // descriptor + key = base_key + uvm_va_block_region_num_pages(first_reverse_map->region); + running_page_index = first_reverse_map->region.outer; + while (key < base_key + num_pages) { + uvm_reverse_map_t *reverse_map = NULL; + void **slot = radix_tree_lookup_slot(&sysmem_mappings->reverse_map_tree, key); + size_t slot_index; + UVM_ASSERT(slot); + + reverse_map = radix_tree_deref_slot(slot); + UVM_ASSERT(reverse_map); + UVM_ASSERT(reverse_map != first_reverse_map); + UVM_ASSERT(reverse_map->va_block == first_reverse_map->va_block); + UVM_ASSERT(uvm_id_equal(reverse_map->owner, first_reverse_map->owner)); + UVM_ASSERT(reverse_map->region.first == running_page_index); + + NV_RADIX_TREE_REPLACE_SLOT(&sysmem_mappings->reverse_map_tree, slot, first_reverse_map); + + num_mapping_pages = uvm_va_block_region_num_pages(reverse_map->region); + UVM_ASSERT(IS_ALIGNED(key, num_mapping_pages)); + UVM_ASSERT(key + num_mapping_pages <= base_key + num_pages); + + for (slot_index = 1; slot_index < num_mapping_pages; ++slot_index) { + slot = radix_tree_lookup_slot(&sysmem_mappings->reverse_map_tree, key + slot_index); + UVM_ASSERT(slot); + UVM_ASSERT(reverse_map == radix_tree_deref_slot(slot)); + + NV_RADIX_TREE_REPLACE_SLOT(&sysmem_mappings->reverse_map_tree, slot, first_reverse_map); + } + + key += num_mapping_pages; + running_page_index = reverse_map->region.outer; + + kmem_cache_free(g_reverse_page_map_cache, reverse_map); + } + + // Grow the first mapping to cover the whole region + first_reverse_map->region.outer = first_reverse_map->region.first + num_pages; + +unlock_no_update: + uvm_mutex_unlock(&sysmem_mappings->reverse_map_lock); +} + +size_t uvm_pmm_sysmem_mappings_dma_to_virt(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 region_size, + uvm_reverse_map_t *out_mappings, + size_t max_out_mappings) +{ + NvU64 key; + size_t num_mappings = 0; + const NvU64 base_key = dma_addr / PAGE_SIZE; + NvU32 num_pages = region_size / PAGE_SIZE; + + UVM_ASSERT(region_size >= PAGE_SIZE); + UVM_ASSERT(PAGE_ALIGNED(region_size)); + UVM_ASSERT(sysmem_mappings->gpu->parent->access_counters_supported); + UVM_ASSERT(max_out_mappings > 0); + + uvm_mutex_lock(&sysmem_mappings->reverse_map_lock); + + key = base_key; + do { + uvm_reverse_map_t *reverse_map = radix_tree_lookup(&sysmem_mappings->reverse_map_tree, key); + + if (reverse_map) { + size_t num_chunk_pages = uvm_va_block_region_num_pages(reverse_map->region); + NvU32 page_offset = key & (num_chunk_pages - 1); + NvU32 num_mapping_pages = min(num_pages, (NvU32)num_chunk_pages - page_offset); + + // Sysmem mappings are removed during VA block destruction. + // Therefore, we can safely retain the VA blocks as long as they + // are in the reverse map and we hold the reverse map lock. + uvm_va_block_retain(reverse_map->va_block); + out_mappings[num_mappings] = *reverse_map; + out_mappings[num_mappings].region.first += page_offset; + out_mappings[num_mappings].region.outer = out_mappings[num_mappings].region.first + num_mapping_pages; + + if (++num_mappings == max_out_mappings) + break; + + num_pages -= num_mapping_pages; + key += num_mapping_pages; + } + else { + --num_pages; + ++key; + } + } + while (num_pages > 0); + + uvm_mutex_unlock(&sysmem_mappings->reverse_map_lock); + + return num_mappings; +} + +uvm_chunk_sizes_mask_t uvm_cpu_chunk_get_allocation_sizes(void) +{ + return uvm_cpu_chunk_allocation_sizes & UVM_CPU_CHUNK_SIZES; +} + +static void uvm_cpu_chunk_set_phys_size(uvm_cpu_chunk_t *chunk, uvm_chunk_size_t size) +{ +#if !UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() + chunk->log2_phys_size = ilog2(size); +#endif +} + +uvm_chunk_size_t uvm_cpu_chunk_get_size(uvm_cpu_chunk_t *chunk) +{ +#if UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() + return PAGE_SIZE; +#else + uvm_chunk_size_t chunk_size; + + UVM_ASSERT(chunk); + UVM_ASSERT(uvm_cpu_chunk_get_phys_size(chunk)); + chunk_size = uvm_va_block_region_size(chunk->region); + UVM_ASSERT(uvm_cpu_chunk_get_phys_size(chunk) >= chunk_size); + return chunk_size; +#endif +} + +#if UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() +struct page *uvm_cpu_chunk_get_cpu_page(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + UVM_ASSERT(chunk); + return chunk; +} + +void uvm_cpu_chunk_put(uvm_cpu_chunk_t *chunk) +{ + UVM_ASSERT(chunk); + put_page(chunk); +} + +NV_STATUS uvm_cpu_chunk_gpu_mapping_alloc(uvm_va_block_t *va_block, uvm_gpu_id_t id) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, id); + size_t num_pages = uvm_va_block_num_cpu_pages(va_block); + + UVM_ASSERT(gpu_state); + gpu_state->cpu_chunks_dma_addrs = uvm_kvmalloc_zero(num_pages * sizeof(gpu_state->cpu_chunks_dma_addrs[0])); + if (!gpu_state->cpu_chunks_dma_addrs) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +void uvm_cpu_chunk_gpu_mapping_split(uvm_va_block_t *existing, uvm_va_block_t *new, uvm_gpu_id_t id) +{ + uvm_va_block_gpu_state_t *existing_state = uvm_va_block_gpu_state_get(existing, id); + uvm_va_block_gpu_state_t *new_state = uvm_va_block_gpu_state_get(new, id); + size_t new_pages = uvm_va_block_num_cpu_pages(new); + + memcpy(&new_state->cpu_chunks_dma_addrs[0], + &existing_state->cpu_chunks_dma_addrs[uvm_va_block_num_cpu_pages(existing) - new_pages], + new_pages * sizeof(new_state->cpu_chunks_dma_addrs[0])); +} + +void uvm_cpu_chunk_gpu_mapping_free(uvm_va_block_t *va_block, uvm_gpu_id_t id) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, id); + + if (gpu_state) + uvm_kvfree(gpu_state->cpu_chunks_dma_addrs); +} + +NV_STATUS uvm_cpu_chunk_set_gpu_mapping_addr(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_cpu_chunk_t *chunk, + uvm_gpu_id_t id, + NvU64 dma_addr) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, id); + + gpu_state->cpu_chunks_dma_addrs[page_index] = dma_addr; + return NV_OK; +} + +NvU64 uvm_cpu_chunk_get_gpu_mapping_addr(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_cpu_chunk_t *chunk, + uvm_gpu_id_t id) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, id); + + return gpu_state->cpu_chunks_dma_addrs[page_index]; +} + +NV_STATUS uvm_cpu_chunk_insert_in_block(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + if (!va_block->cpu.chunks) { + va_block->cpu.chunks = (unsigned long)uvm_kvmalloc_zero(uvm_va_block_num_cpu_pages(va_block) * + sizeof(uvm_cpu_chunk_t *)); + if (!va_block->cpu.chunks) + return NV_ERR_NO_MEMORY; + } + + UVM_ASSERT(!uvm_page_mask_test(&va_block->cpu.allocated, page_index)); + UVM_ASSERT(((uvm_cpu_chunk_t **)va_block->cpu.chunks)[page_index] == NULL); + ((uvm_cpu_chunk_t **)va_block->cpu.chunks)[page_index] = chunk; + uvm_page_mask_set(&va_block->cpu.allocated, page_index); + return NV_OK; +} + +void uvm_cpu_chunk_remove_from_block(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + UVM_ASSERT(uvm_page_mask_test(&va_block->cpu.allocated, page_index)); + UVM_ASSERT(((uvm_cpu_chunk_t **)va_block->cpu.chunks)[page_index] != NULL); + ((uvm_cpu_chunk_t **)va_block->cpu.chunks)[page_index] = NULL; + uvm_page_mask_clear(&va_block->cpu.allocated, page_index); +} + +uvm_cpu_chunk_t *uvm_cpu_chunk_get_chunk_for_page(uvm_va_block_t *va_block, uvm_page_index_t page_index) +{ + UVM_ASSERT(page_index < uvm_va_block_num_cpu_pages(va_block)); + if (!uvm_page_mask_test(&va_block->cpu.allocated, page_index)) + return NULL; + + return ((uvm_cpu_chunk_t **)va_block->cpu.chunks)[page_index]; +} + +NV_STATUS uvm_cpu_chunk_alloc(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + struct mm_struct *mm, + uvm_cpu_chunk_t **new_chunk) +{ + uvm_cpu_chunk_t *chunk = NULL; + gfp_t alloc_flags; + NV_STATUS status; + + UVM_ASSERT(!uvm_page_mask_test(&va_block->cpu.allocated, page_index)); + UVM_ASSERT(new_chunk); + + alloc_flags = (mm ? NV_UVM_GFP_FLAGS_ACCOUNT : NV_UVM_GFP_FLAGS) | GFP_HIGHUSER; + + if (!uvm_va_block_page_resident_processors_count(va_block, page_index)) + alloc_flags |= __GFP_ZERO; + + chunk = alloc_pages(alloc_flags, 0); + if (!chunk) + return NV_ERR_NO_MEMORY; + + if (alloc_flags & __GFP_ZERO) + SetPageDirty(chunk); + + status = uvm_cpu_chunk_insert_in_block(va_block, chunk, page_index); + if (status != NV_OK) { + uvm_cpu_chunk_put(chunk); + return status; + } + + *new_chunk = chunk; + return NV_OK; +} + +#else + +struct page *uvm_cpu_chunk_get_cpu_page(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + uvm_va_block_region_t chunk_region; + + UVM_ASSERT(chunk); + UVM_ASSERT(chunk->page); + chunk_region = uvm_va_block_chunk_region(va_block, uvm_cpu_chunk_get_size(chunk), page_index); + return chunk->page + (page_index - chunk_region.first); +} + +static NvU64 uvm_cpu_chunk_get_virt_addr(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk) +{ + UVM_ASSERT(chunk); + UVM_ASSERT(chunk->region.first < chunk->region.outer); + return uvm_va_block_cpu_page_address(va_block, chunk->region.first); +} + +static void cpu_chunk_release(nv_kref_t *kref) +{ + uvm_cpu_chunk_t *chunk = container_of(kref, uvm_cpu_chunk_t, refcount); + uvm_cpu_chunk_t *parent = chunk->parent; + + if (uvm_processor_mask_get_gpu_count(&chunk->gpu_mappings.dma_addrs_mask) > 1) + uvm_kvfree(chunk->gpu_mappings.dynamic_entries); + + if (!parent) { + uvm_assert_spinlock_unlocked(&chunk->lock); + uvm_kvfree(chunk->dirty_bitmap); + put_page(chunk->page); + } + else { + uvm_cpu_chunk_put(parent); + } + + uvm_kvfree(chunk); +} + +void uvm_cpu_chunk_get(uvm_cpu_chunk_t *chunk) +{ + UVM_ASSERT(chunk); + nv_kref_get(&chunk->refcount); +} + +void uvm_cpu_chunk_put(uvm_cpu_chunk_t *chunk) +{ + UVM_ASSERT(chunk); + + nv_kref_put(&chunk->refcount, cpu_chunk_release); +} + +NV_STATUS uvm_cpu_chunk_gpu_mapping_alloc(uvm_va_block_t *va_block, uvm_gpu_id_t id) +{ + return NV_OK; +} + +void uvm_cpu_chunk_gpu_mapping_split(uvm_va_block_t *existing, uvm_va_block_t *va_block, uvm_gpu_id_t id) +{ + return; +} + +void uvm_cpu_chunk_gpu_mapping_free(uvm_va_block_t *va_block, uvm_gpu_id_t id) +{ + return; +} + +static NvU32 compute_gpu_mappings_entry_index(uvm_processor_mask_t dma_addrs_mask, uvm_gpu_id_t id) +{ + uvm_processor_mask_t subset_mask; + + // Compute the array index for the given GPU ID by masking off all bits + // above the id and then counting the number of bits remaining. + uvm_processor_mask_zero(&subset_mask); + bitmap_set(subset_mask.bitmap, 0, uvm_id_value(id) + 1); + uvm_processor_mask_and(&subset_mask, &dma_addrs_mask, &subset_mask); + + if (uvm_processor_mask_empty(&subset_mask)) + return 0; + + return uvm_processor_mask_get_gpu_count(&subset_mask) - 1; +} + +NV_STATUS uvm_cpu_chunk_set_gpu_mapping_addr(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_cpu_chunk_t *chunk, + uvm_gpu_id_t id, + NvU64 dma_addr) +{ + NvU32 num_existing_entries = uvm_processor_mask_get_gpu_count(&chunk->gpu_mappings.dma_addrs_mask); + NvU32 num_new_entries; + NvU32 array_index; + NvU64 *new_entries; + + if (uvm_processor_mask_empty(&chunk->gpu_mappings.dma_addrs_mask)) { + uvm_processor_mask_set(&chunk->gpu_mappings.dma_addrs_mask, id); + chunk->gpu_mappings.static_entry = dma_addr; + return NV_OK; + } + + if (uvm_processor_mask_test(&chunk->gpu_mappings.dma_addrs_mask, id)) { + if (num_existing_entries == 1) { + chunk->gpu_mappings.static_entry = dma_addr; + } + else { + array_index = compute_gpu_mappings_entry_index(chunk->gpu_mappings.dma_addrs_mask, id); + chunk->gpu_mappings.dynamic_entries[array_index] = dma_addr; + } + return NV_OK; + } + + num_new_entries = num_existing_entries + 1; + if (num_existing_entries == 1) { + new_entries = uvm_kvmalloc(sizeof(*new_entries) * num_new_entries); + + if (new_entries) { + uvm_processor_id_t first = uvm_processor_mask_find_first_id(&chunk->gpu_mappings.dma_addrs_mask); + + if (uvm_id_value(first) < uvm_id_value(id)) + new_entries[0] = chunk->gpu_mappings.static_entry; + else + new_entries[1] = chunk->gpu_mappings.static_entry; + } + } + else { + new_entries = uvm_kvrealloc(chunk->gpu_mappings.dynamic_entries, + sizeof(*new_entries) * num_new_entries); + if (new_entries) { + // Get the number of bits set below the input id. + num_existing_entries = compute_gpu_mappings_entry_index(chunk->gpu_mappings.dma_addrs_mask, id); + for (; num_existing_entries < num_new_entries - 1; num_existing_entries++) + new_entries[num_existing_entries + 1] = new_entries[num_existing_entries]; + } + } + + if (!new_entries) + return NV_ERR_NO_MEMORY; + + chunk->gpu_mappings.dynamic_entries = new_entries; + uvm_processor_mask_set(&chunk->gpu_mappings.dma_addrs_mask, id); + array_index = compute_gpu_mappings_entry_index(chunk->gpu_mappings.dma_addrs_mask, id); + chunk->gpu_mappings.dynamic_entries[array_index] = dma_addr; + + return NV_OK; +} + +NvU64 uvm_cpu_chunk_get_gpu_mapping_addr(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_cpu_chunk_t *chunk, + uvm_gpu_id_t id) +{ + NvU64 dma_addr; + + if (!uvm_processor_mask_test(&chunk->gpu_mappings.dma_addrs_mask, id)) + return 0; + + if (uvm_processor_mask_get_gpu_count(&chunk->gpu_mappings.dma_addrs_mask) == 1) { + dma_addr = chunk->gpu_mappings.static_entry; + } + else { + NvU32 array_index = compute_gpu_mappings_entry_index(chunk->gpu_mappings.dma_addrs_mask, id); + + dma_addr = chunk->gpu_mappings.dynamic_entries[array_index]; + } + + return dma_addr; +} + +// The bottom two bits of uvm_va_block_t::chunks is used to indicate how +// CPU chunks are stored. +// +// CPU chunk storage is handled in three different ways depending on the +// type of chunks the VA block owns. This is done to minimize the memory +// required to hold metadata. +typedef enum +{ + // The uvm_va_block_t::chunk pointer points to a single 2MB + // CPU chunk. + UVM_CPU_CHUNK_STORAGE_CHUNK = 0, + + // The uvm_va_block_t::chunks pointer points to an array of + // pointers to CPU chunks. + UVM_CPU_CHUNK_STORAGE_ARRAY, + + // The uvm_va_block_t::chunks pointer points to a + // structure of mixed (64K and 4K) chunks. + UVM_CPU_CHUNK_STORAGE_MIXED, + UVM_CPU_CHUNK_STORAGE_COUNT, +} uvm_cpu_chunk_storage_type_t; + +#define UVM_CPU_CHUNK_STORAGE_MASK 0x3 + +#define UVM_CPU_STORAGE_GET_PTR(block) ((void *)((block)->cpu.chunks & ~UVM_CPU_CHUNK_STORAGE_MASK)) +#define UVM_CPU_STORAGE_GET_TYPE(block) \ + ((uvm_cpu_chunk_storage_type_t)((block)->cpu.chunks & UVM_CPU_CHUNK_STORAGE_MASK)) + +// The maximum number of slots in the mixed chunk mode (64K + 4K chunks) is one +// more than MAX_BIG_PAGES_PER_UVM_VA_BLOCK to account for misaligned VA blocks. +#define MAX_BIG_CPU_CHUNK_SLOTS_PER_UVM_VA_BLOCK (MAX_BIG_PAGES_PER_UVM_VA_BLOCK + 1) + +#define MAX_SMALL_CHUNK_PER_BIG_SLOT (UVM_CHUNK_SIZE_64K / PAGE_SIZE) + +// This structure is used when a VA block contains 64K or a mix of 64K and 4K +// CPU chunks. +// For every 64K CPU chunks, big_chunks will have its corresponding bit set +// and the corresponding index in slots will point directly to the +// uvm_cpu_chunk_t structure. +// +// For 4K CPU chunks, the corresponding bit in big_chunks will be clear and +// the element in slots will point to an array of 16 uvm_cpu_chunk_t pointers. +typedef struct { + DECLARE_BITMAP(big_chunks, MAX_BIG_CPU_CHUNK_SLOTS_PER_UVM_VA_BLOCK); + void *slots[MAX_BIG_CPU_CHUNK_SLOTS_PER_UVM_VA_BLOCK]; +} uvm_cpu_chunk_storage_mixed_t; + +static uvm_page_index_t compute_slot_index(uvm_va_block_t *va_block, uvm_page_index_t page_index) +{ + uvm_va_block_region_t block_region = uvm_va_block_region_from_block(va_block); + size_t prefix; + uvm_page_index_t big_page_index; + + if (page_index < block_region.first || page_index >= block_region.outer) + return MAX_BIG_PAGES_PER_UVM_VA_BLOCK; + + prefix = (UVM_ALIGN_UP(va_block->start, UVM_CHUNK_SIZE_64K) - va_block->start) / PAGE_SIZE; + + if (page_index < prefix) + return 0; + + big_page_index = ((page_index - prefix) / MAX_SMALL_CHUNK_PER_BIG_SLOT) + !!prefix; + UVM_ASSERT(big_page_index < MAX_BIG_CPU_CHUNK_SLOTS_PER_UVM_VA_BLOCK); + + return big_page_index; +} + +static size_t compute_small_index(uvm_va_block_t *va_block, uvm_page_index_t page_index) +{ + size_t prefix = (UVM_ALIGN_UP(va_block->start, UVM_CHUNK_SIZE_64K) - va_block->start) / PAGE_SIZE; + + if (page_index < prefix) + return page_index; + + return (page_index - prefix) % MAX_SMALL_CHUNK_PER_BIG_SLOT; +} + +NV_STATUS uvm_cpu_chunk_insert_in_block(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + uvm_chunk_size_t chunk_size = uvm_cpu_chunk_get_size(chunk); + uvm_page_index_t big_page_index; + uvm_cpu_chunk_storage_mixed_t *mixed; + uvm_cpu_chunk_t **chunks = NULL; + + // We only want to use the bottom two bits of a pointer. + BUILD_BUG_ON(UVM_CPU_CHUNK_STORAGE_COUNT > 4); + + chunk->region = uvm_va_block_region(page_index, page_index + uvm_cpu_chunk_num_pages(chunk)); + UVM_ASSERT(chunk->region.outer <= PAGES_PER_UVM_VA_BLOCK); + + // We want to protect against two threads manipulating the VA block's CPU + // chunks at the same time. However, when a block is split, the new block's + // lock is locked without tracking. So, we can't use + // uvm_assert_mutex_locked(). + UVM_ASSERT(mutex_is_locked(&va_block->lock.m)); + + if (!va_block->cpu.chunks) { + switch (chunk_size) { + case UVM_CHUNK_SIZE_2M: + break; + case UVM_CHUNK_SIZE_64K: + mixed = uvm_kvmalloc_zero(sizeof(*mixed)); + if (!mixed) + return NV_ERR_NO_MEMORY; + + va_block->cpu.chunks = (unsigned long)mixed | UVM_CPU_CHUNK_STORAGE_MIXED; + break; + case UVM_CHUNK_SIZE_4K: + chunks = uvm_kvmalloc_zero(sizeof(*chunks) * uvm_va_block_num_cpu_pages(va_block)); + if (!chunks) + return NV_ERR_NO_MEMORY; + + va_block->cpu.chunks = (unsigned long)chunks | UVM_CPU_CHUNK_STORAGE_ARRAY; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + } + + switch (UVM_CPU_STORAGE_GET_TYPE(va_block)) { + case UVM_CPU_CHUNK_STORAGE_CHUNK: + if (va_block->cpu.chunks) + return NV_ERR_INVALID_STATE; + UVM_ASSERT(chunk_size == UVM_CHUNK_SIZE_2M); + va_block->cpu.chunks = (unsigned long)chunk | UVM_CPU_CHUNK_STORAGE_CHUNK; + break; + case UVM_CPU_CHUNK_STORAGE_MIXED: + mixed = UVM_CPU_STORAGE_GET_PTR(va_block); + big_page_index = compute_slot_index(va_block, page_index); + UVM_ASSERT(big_page_index != MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + UVM_ASSERT(compute_slot_index(va_block, page_index + uvm_cpu_chunk_num_pages(chunk) - 1) == big_page_index); + + if (test_bit(big_page_index, mixed->big_chunks)) + return NV_ERR_INVALID_STATE; + + if (chunk_size == UVM_CHUNK_SIZE_64K) { + mixed->slots[big_page_index] = chunk; + set_bit(big_page_index, mixed->big_chunks); + } + else { + size_t slot_index; + + UVM_ASSERT(chunk_size == UVM_CHUNK_SIZE_4K); + chunks = mixed->slots[big_page_index]; + + if (!chunks) { + chunks = uvm_kvmalloc_zero(sizeof(*chunks) * MAX_SMALL_CHUNK_PER_BIG_SLOT); + if (!chunks) + return NV_ERR_NO_MEMORY; + mixed->slots[big_page_index] = chunks; + } + + slot_index = compute_small_index(va_block, page_index); + chunks[slot_index] = chunk; + } + break; + case UVM_CPU_CHUNK_STORAGE_ARRAY: + chunks = UVM_CPU_STORAGE_GET_PTR(va_block); + if (chunk_size == UVM_CHUNK_SIZE_64K) { + uvm_cpu_chunk_t **subchunks = NULL; + uvm_page_index_t sub_page_index; + + mixed = uvm_kvmalloc_zero(sizeof(*mixed)); + if (!mixed) + return NV_ERR_NO_MEMORY; + + big_page_index = compute_slot_index(va_block, page_index); + UVM_ASSERT(big_page_index != MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + UVM_ASSERT(compute_slot_index(va_block, page_index + uvm_cpu_chunk_num_pages(chunk) - 1) == + big_page_index); + mixed->slots[big_page_index] = chunk; + set_bit(big_page_index, mixed->big_chunks); + + for (sub_page_index = 0; sub_page_index < uvm_va_block_num_cpu_pages(va_block); sub_page_index++) { + uvm_cpu_chunk_t *subchunk = chunks[sub_page_index]; + size_t subchunk_index = compute_small_index(va_block, sub_page_index); + + if (!subchunk) + continue; + + if (!subchunks || compute_slot_index(va_block, sub_page_index) != big_page_index) { + subchunks = uvm_kvmalloc_zero(sizeof(*subchunks) * MAX_SMALL_CHUNK_PER_BIG_SLOT); + if (!subchunks) { + size_t i; + + for (i = 0; i < MAX_BIG_CPU_CHUNK_SLOTS_PER_UVM_VA_BLOCK; i++) { + if (!test_bit(i, mixed->big_chunks) && mixed->slots[i]) + uvm_kvfree(mixed->slots[i]); + } + + uvm_kvfree(mixed); + return NV_ERR_NO_MEMORY; + } + + big_page_index = compute_slot_index(va_block, sub_page_index); + UVM_ASSERT(mixed->slots[big_page_index] == NULL); + mixed->slots[big_page_index] = subchunks; + } + + subchunks[subchunk_index] = subchunk; + if (subchunk_index == MAX_SMALL_CHUNK_PER_BIG_SLOT - 1) + subchunks = NULL; + } + + va_block->cpu.chunks = (unsigned long)mixed | UVM_CPU_CHUNK_STORAGE_MIXED; + uvm_kvfree(chunks); + } + else { + chunks[page_index] = chunk; + } + + default: + break; + } + + uvm_page_mask_region_fill(&va_block->cpu.allocated, + uvm_va_block_region(page_index, page_index + uvm_cpu_chunk_num_pages(chunk))); + + return NV_OK; +} + +void uvm_cpu_chunk_remove_from_block(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + uvm_cpu_chunk_storage_mixed_t *mixed; + uvm_page_index_t big_page_index; + uvm_cpu_chunk_t **chunks; + + // We want to protect against two threads manipulating the VA block's CPU + // chunks at the same time. However, when a block is split, the new block's + // lock is locked without tracking. So, we can't use + // uvm_assert_mutex_locked(). + UVM_ASSERT(mutex_is_locked(&va_block->lock.m)); + UVM_ASSERT(va_block->cpu.chunks); + + switch (UVM_CPU_STORAGE_GET_TYPE(va_block)) { + case UVM_CPU_CHUNK_STORAGE_CHUNK: + UVM_ASSERT(uvm_cpu_chunk_get_size(chunk) == UVM_CHUNK_SIZE_2M); + UVM_ASSERT(UVM_CPU_STORAGE_GET_PTR(va_block) == chunk); + va_block->cpu.chunks = 0; + break; + case UVM_CPU_CHUNK_STORAGE_MIXED: + UVM_ASSERT(uvm_cpu_chunk_get_size(chunk) != UVM_CHUNK_SIZE_2M); + mixed = UVM_CPU_STORAGE_GET_PTR(va_block); + big_page_index = compute_slot_index(va_block, page_index); + UVM_ASSERT(big_page_index != MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + UVM_ASSERT(mixed->slots[big_page_index] != NULL); + + if (test_bit(big_page_index, mixed->big_chunks)) { + UVM_ASSERT(uvm_cpu_chunk_get_size(chunk) == UVM_CHUNK_SIZE_64K); + UVM_ASSERT(mixed->slots[big_page_index] == chunk); + mixed->slots[big_page_index] = NULL; + clear_bit(big_page_index, mixed->big_chunks); + } + else { + size_t slot_index; + + UVM_ASSERT(uvm_cpu_chunk_get_size(chunk) == UVM_CHUNK_SIZE_4K); + chunks = mixed->slots[big_page_index]; + slot_index = compute_small_index(va_block, page_index); + UVM_ASSERT(chunks[slot_index] == chunk); + chunks[slot_index] = NULL; + + for (slot_index = 0; slot_index < MAX_SMALL_CHUNK_PER_BIG_SLOT; slot_index++) { + if (chunks[slot_index]) + break; + } + + if (slot_index == MAX_SMALL_CHUNK_PER_BIG_SLOT) { + uvm_kvfree(chunks); + mixed->slots[big_page_index] = NULL; + } + } + + break; + case UVM_CPU_CHUNK_STORAGE_ARRAY: + UVM_ASSERT(uvm_cpu_chunk_get_size(chunk) == UVM_CHUNK_SIZE_4K); + chunks = UVM_CPU_STORAGE_GET_PTR(va_block); + UVM_ASSERT(chunks[page_index] == chunk); + chunks[page_index] = NULL; + break; + default: + return; + }; + + uvm_page_mask_region_clear(&va_block->cpu.allocated, + uvm_va_block_region(page_index, page_index + uvm_cpu_chunk_num_pages(chunk))); + + if (uvm_page_mask_empty(&va_block->cpu.allocated)) { + if (UVM_CPU_STORAGE_GET_TYPE(va_block) != UVM_CPU_CHUNK_STORAGE_CHUNK) + uvm_kvfree(UVM_CPU_STORAGE_GET_PTR(va_block)); + va_block->cpu.chunks = 0; + } +} + +uvm_cpu_chunk_t *uvm_cpu_chunk_get_chunk_for_page(uvm_va_block_t *va_block, uvm_page_index_t page_index) +{ + uvm_cpu_chunk_storage_mixed_t *mixed; + uvm_cpu_chunk_t *chunk; + uvm_cpu_chunk_t **chunks; + uvm_page_index_t big_page_index; + size_t slot_index; + + if (page_index >= uvm_va_block_num_cpu_pages(va_block) || !uvm_page_mask_test(&va_block->cpu.allocated, page_index)) + return NULL; + + UVM_ASSERT(va_block->cpu.chunks); + + switch (UVM_CPU_STORAGE_GET_TYPE(va_block)) { + case UVM_CPU_CHUNK_STORAGE_CHUNK: + return UVM_CPU_STORAGE_GET_PTR(va_block); + case UVM_CPU_CHUNK_STORAGE_MIXED: + mixed = UVM_CPU_STORAGE_GET_PTR(va_block); + big_page_index = compute_slot_index(va_block, page_index); + UVM_ASSERT(big_page_index != MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + UVM_ASSERT(mixed->slots[big_page_index] != NULL); + if (test_bit(big_page_index, mixed->big_chunks)) + return mixed->slots[big_page_index]; + + chunks = mixed->slots[big_page_index]; + slot_index = compute_small_index(va_block, page_index); + chunk = chunks[slot_index]; + break; + case UVM_CPU_CHUNK_STORAGE_ARRAY: + chunks = UVM_CPU_STORAGE_GET_PTR(va_block); + chunk = chunks[page_index]; + break; + default: + return NULL; + } + + UVM_ASSERT(chunk); + return chunk; +} + +NV_STATUS uvm_cpu_chunk_alloc(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + struct mm_struct *mm, + uvm_cpu_chunk_t **new_chunk) +{ + uvm_va_block_test_t *block_test = uvm_va_block_get_test(va_block); + uvm_cpu_chunk_t *chunk = NULL; + NvU32 cpu_allocation_sizes; + uvm_page_mask_t zero_page_mask; + uvm_gpu_id_t id; + struct page *page = NULL; + uvm_chunk_size_t alloc_size; + uvm_va_block_region_t region; + uvm_va_space_t *va_space; + uvm_processor_mask_t uvm_lite_gpus; + gfp_t base_alloc_flags; + NV_STATUS status; + + UVM_ASSERT(new_chunk); + + // Limit the allocation sizes only to the ones supported. + cpu_allocation_sizes = uvm_cpu_chunk_get_allocation_sizes(); + + if (block_test && block_test->cpu_chunk_allocation_size_mask) + cpu_allocation_sizes &= block_test->cpu_chunk_allocation_size_mask; + + // Get a mask of all the block pages that are resident somewhere. + uvm_page_mask_zero(&zero_page_mask); + for_each_id_in_mask(id, &va_block->resident) + uvm_page_mask_or(&zero_page_mask, &zero_page_mask, uvm_va_block_resident_mask_get(va_block, id)); + + // If the VA space has a UVM-Lite GPU registered, only PAGE_SIZE allocations + // should be used in order to avoid extra copies due to dirty compound + // pages. + va_space = uvm_va_block_get_va_space(va_block); + uvm_processor_mask_andnot(&uvm_lite_gpus, &va_space->registered_gpus, &va_space->faultable_processors); + if (!uvm_processor_mask_empty(&uvm_lite_gpus)) + cpu_allocation_sizes = PAGE_SIZE; + + base_alloc_flags = (mm ? NV_UVM_GFP_FLAGS_ACCOUNT : NV_UVM_GFP_FLAGS) | GFP_HIGHUSER; + + // Attempt to allocate CPU pages with the largest physically contiguous + // size from the set of CPU chunk sizes that we can. + // This is accomplished by: + // 1. Aligning the CPU page address down to the allocation size. + // 2. Ensuring that the entire allocation region fits withing the VA + // block. + // 3. Ensuring that the region covered by the allocation is empty. + for_each_chunk_size_rev(alloc_size, cpu_allocation_sizes) { + NvU64 alloc_virt_addr; + uvm_page_mask_t scratch_page_mask; + uvm_page_index_t alloc_page_index; + gfp_t alloc_flags = base_alloc_flags; + + if (alloc_size < PAGE_SIZE) + break; + + alloc_virt_addr = UVM_ALIGN_DOWN(uvm_va_block_cpu_page_address(va_block, page_index), alloc_size); + + if (!uvm_va_block_contains_address(va_block, alloc_virt_addr) || + !uvm_va_block_contains_address(va_block, alloc_virt_addr + alloc_size - 1)) + continue; + + alloc_page_index = uvm_va_block_cpu_page_index(va_block, alloc_virt_addr); + region = uvm_va_block_region(alloc_page_index, alloc_page_index + (alloc_size / PAGE_SIZE)); + uvm_page_mask_init_from_region(&scratch_page_mask, region, NULL); + uvm_page_mask_and(&scratch_page_mask, &va_block->cpu.allocated, &scratch_page_mask); + + if (!uvm_page_mask_empty(&scratch_page_mask)) + continue; + + // For allocation sizes higher than PAGE_SIZE, use __GFP_NORETRY in + // order to avoid higher allocation latency from the kernel compacting + // memory to satisfy the request. + if (alloc_size > PAGE_SIZE) + alloc_flags |= __GFP_COMP | __GFP_NORETRY; + + // If not all pages in the allocation region are resident somewhere, + // zero out the allocated page. + // This could be wasteful if only a few pages in high-order allocation + // need to be zero'ed out but the alternative is to map single sub- + // pages one-by-one. + if (!uvm_page_mask_region_full(&zero_page_mask, region)) + alloc_flags |= __GFP_ZERO; + + page = alloc_pages(alloc_flags, get_order(alloc_size)); + if (page) { + if (alloc_flags & __GFP_ZERO) + SetPageDirty(page); + break; + } + } + + if (!page) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + chunk = uvm_kvmalloc_zero(sizeof(*chunk)); + if (!chunk) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + chunk->page = page; + uvm_cpu_chunk_set_phys_size(chunk, alloc_size); + chunk->region = region; + nv_kref_init(&chunk->refcount); + uvm_spin_lock_init(&chunk->lock, UVM_LOCK_ORDER_LEAF); + if (alloc_size > PAGE_SIZE) { + chunk->dirty_bitmap = uvm_kvmalloc_zero(BITS_TO_LONGS(alloc_size / PAGE_SIZE) * sizeof(*chunk->dirty_bitmap)); + if (!chunk->dirty_bitmap) { + status = NV_ERR_NO_MEMORY; + goto error; + } + } + + status = uvm_cpu_chunk_insert_in_block(va_block, chunk, chunk->region.first); + if (status != NV_OK) + goto error; + + if (new_chunk) + *new_chunk = chunk; + + return NV_OK; + +error: + + // If chunk has been allocated, uvm_cpu_chunk_put() will release the chunk + // and the page. Otherwise, only release the page. + if (chunk) + uvm_cpu_chunk_put(chunk); + else if (page) + __free_pages(page, get_order(alloc_size)); + + return status; +} + +NV_STATUS uvm_cpu_chunk_split(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_chunk_size_t new_size) +{ + NV_STATUS status = NV_OK; + NV_STATUS insert_status; + uvm_cpu_chunk_t *new_chunk; + uvm_page_index_t running_page_index = chunk->region.first; + uvm_page_index_t next_page_index; + size_t num_new_chunks; + size_t num_subchunk_pages; + size_t i; + + UVM_ASSERT(chunk); + UVM_ASSERT(is_power_of_2(new_size)); + UVM_ASSERT(new_size < uvm_cpu_chunk_get_size(chunk)); + + // We subtract 1 from the computed number of subchunks because we always + // keep the original chunk as the first in the block's list. This is so we + // don't lose the physical chunk. + // All new subchunks will point to the original chunk as their parent. + num_new_chunks = (uvm_cpu_chunk_get_size(chunk) / new_size) - 1; + num_subchunk_pages = new_size / PAGE_SIZE; + running_page_index += num_subchunk_pages; + + // Remove the existing chunk from the block first. We re-insert it after + // the split. + uvm_cpu_chunk_remove_from_block(va_block, chunk, chunk->region.first); + + for (i = 0; i < num_new_chunks; i++) { + uvm_page_index_t relative_page_index = running_page_index - chunk->region.first; + uvm_gpu_id_t id; + + new_chunk = uvm_kvmalloc_zero(sizeof(*new_chunk)); + if (!new_chunk) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + new_chunk->page = chunk->page + relative_page_index; + new_chunk->offset = chunk->offset + relative_page_index; + new_chunk->region = uvm_va_block_region(running_page_index, running_page_index + num_subchunk_pages); + uvm_cpu_chunk_set_phys_size(new_chunk, new_size); + nv_kref_init(&new_chunk->refcount); + + // This lock is unused for logical blocks but initialize it for + // consistency. + uvm_spin_lock_init(&new_chunk->lock, UVM_LOCK_ORDER_LEAF); + new_chunk->parent = chunk; + uvm_cpu_chunk_get(new_chunk->parent); + + for_each_gpu_id(id) { + NvU64 parent_dma_addr = uvm_cpu_chunk_get_gpu_mapping_addr(va_block, running_page_index, chunk, id); + + if (!parent_dma_addr) + continue; + + uvm_cpu_chunk_set_gpu_mapping_addr(va_block, + relative_page_index, + new_chunk, + id, + parent_dma_addr + (relative_page_index * PAGE_SIZE)); + } + + status = uvm_cpu_chunk_insert_in_block(va_block, new_chunk, new_chunk->region.first); + if (status != NV_OK) { + uvm_cpu_chunk_put(new_chunk); + goto error; + } + + running_page_index += num_subchunk_pages; + } + + chunk->region = uvm_va_block_region(chunk->region.first, chunk->region.first + num_subchunk_pages); + +error: + // Re-insert the split chunk. This is done unconditionally in both the + // success and error paths. The difference is that on the success path, + // the chunk's region has been updated. + // This operation should never fail with NV_ERR_NO_MEMORY since all + // state memory should already be allocated. Failing with other errors + // is a programmer error. + insert_status = uvm_cpu_chunk_insert_in_block(va_block, chunk, chunk->region.first); + UVM_ASSERT(insert_status != NV_ERR_INVALID_ARGUMENT && insert_status != NV_ERR_INVALID_STATE); + + if (status != NV_OK) { + for_each_cpu_chunk_in_block_region_safe(new_chunk, + running_page_index, + next_page_index, + va_block, + chunk->region) { + uvm_cpu_chunk_remove_from_block(va_block, new_chunk, new_chunk->region.first); + uvm_cpu_chunk_put(new_chunk); + } + } + + return status; +} + +uvm_cpu_chunk_t *uvm_cpu_chunk_merge(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk) +{ + uvm_cpu_chunk_t *parent; + uvm_cpu_chunk_t *subchunk; + uvm_chunk_sizes_mask_t merge_sizes = uvm_cpu_chunk_get_allocation_sizes(); + uvm_chunk_size_t merge_chunk_size; + uvm_chunk_size_t parent_phys_size; + uvm_chunk_size_t chunk_size; + uvm_va_block_region_t subchunk_region; + uvm_page_index_t page_index; + uvm_page_index_t next_page_index; + NV_STATUS insert_status; + + UVM_ASSERT(chunk); + parent = chunk->parent; + + // If the chunk does not have a parent, a merge cannot be done. + if (!parent) + return NULL; + + chunk_size = uvm_cpu_chunk_get_size(chunk); + parent_phys_size = uvm_cpu_chunk_get_phys_size(parent); + + // Remove all sizes above the parent's physical size. + merge_sizes &= parent_phys_size | (parent_phys_size - 1); + + // Remove all sizes including and below the chunk's current size. + merge_sizes &= ~(chunk_size | (chunk_size - 1)); + + // Find the largest size that is fully contained within the VA block. + for_each_chunk_size_rev(merge_chunk_size, merge_sizes) { + NvU64 parent_start = uvm_cpu_chunk_get_virt_addr(va_block, parent); + NvU64 parent_end = parent_start + parent_phys_size - 1; + + if (uvm_va_block_contains_address(va_block, parent_start) && + uvm_va_block_contains_address(va_block, parent_start + merge_chunk_size - 1) && + IS_ALIGNED(parent_start, merge_chunk_size) && + IS_ALIGNED(parent_end + 1, merge_chunk_size)) + break; + } + + if (merge_chunk_size == UVM_CHUNK_SIZE_INVALID) + return NULL; + + if (uvm_cpu_chunk_get_size(parent) == merge_chunk_size) + return NULL; + + UVM_ASSERT(chunk_size == uvm_cpu_chunk_get_size(parent)); + UVM_ASSERT(IS_ALIGNED(merge_chunk_size, chunk_size)); + + subchunk_region = uvm_va_block_region(parent->region.first + uvm_cpu_chunk_num_pages(parent), + parent->region.first + (merge_chunk_size / PAGE_SIZE)); + + // Remove the first (parent) subchunk. It will be re-inserted later with an + // updated region. + uvm_cpu_chunk_remove_from_block(va_block, parent, parent->region.first); + + for_each_cpu_chunk_in_block_region_safe(subchunk, page_index, next_page_index, va_block, subchunk_region) { + UVM_ASSERT(subchunk); + uvm_cpu_chunk_remove_from_block(va_block, subchunk, subchunk->region.first); + uvm_cpu_chunk_put(subchunk); + } + + parent->region = uvm_va_block_region(parent->region.first, parent->region.first + (merge_chunk_size / PAGE_SIZE)); + insert_status = uvm_cpu_chunk_insert_in_block(va_block, parent, parent->region.first); + UVM_ASSERT(insert_status != NV_ERR_INVALID_ARGUMENT && insert_status != NV_ERR_INVALID_STATE); + + return parent; +} + +static uvm_cpu_chunk_t *get_parent_cpu_chunk(uvm_cpu_chunk_t *chunk) +{ + UVM_ASSERT(chunk); + + while (chunk->parent) + chunk = chunk->parent; + + return chunk; +} + +// Check the CPU PTE dirty bit and if set, clear it and fill the +// physical chunk's dirty bitmap. +static void check_cpu_dirty_flag(uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + struct page *page; + + UVM_ASSERT(!chunk->parent); + uvm_assert_spinlock_locked(&chunk->lock); + + // Kernels prior to v4.5 used the flags within the individual pages even for + // compound pages. + page = chunk->page + page_index; + if (PageDirty(page)) { + bitmap_fill(chunk->dirty_bitmap, uvm_cpu_chunk_get_phys_size(chunk) / PAGE_SIZE); + ClearPageDirty(page); + } +} + +static uvm_cpu_chunk_t *get_parent_and_page_index(uvm_cpu_chunk_t *chunk, uvm_page_index_t *out_page_index) +{ + uvm_cpu_chunk_t *parent; + uvm_page_index_t page_index; + + UVM_ASSERT(chunk); + UVM_ASSERT(chunk->page); + UVM_ASSERT(out_page_index); + page_index = *out_page_index; + UVM_ASSERT(chunk->region.first <= page_index && page_index < chunk->region.outer); + + page_index = chunk->offset + (page_index - chunk->region.first); + parent = get_parent_cpu_chunk(chunk); + UVM_ASSERT(page_index < uvm_cpu_chunk_get_phys_size(parent) / PAGE_SIZE); + *out_page_index = page_index; + return parent; +} + +void uvm_cpu_chunk_mark_dirty(uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + uvm_cpu_chunk_t *parent; + + parent = get_parent_and_page_index(chunk, &page_index); + if (uvm_cpu_chunk_get_phys_size(parent) == PAGE_SIZE) { + SetPageDirty(parent->page); + return; + } + + uvm_spin_lock(&parent->lock); + set_bit(page_index, parent->dirty_bitmap); + uvm_spin_unlock(&parent->lock); +} + +void uvm_cpu_chunk_mark_clean(uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + uvm_cpu_chunk_t *parent; + + parent = get_parent_and_page_index(chunk, &page_index); + if (uvm_cpu_chunk_get_phys_size(parent) == PAGE_SIZE) { + ClearPageDirty(parent->page); + return; + } + + uvm_spin_lock(&parent->lock); + check_cpu_dirty_flag(parent, page_index); + clear_bit(page_index, parent->dirty_bitmap); + uvm_spin_unlock(&parent->lock); +} + +bool uvm_cpu_chunk_is_dirty(uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + uvm_cpu_chunk_t *parent; + bool dirty; + + parent = get_parent_and_page_index(chunk, &page_index); + if (uvm_cpu_chunk_get_phys_size(parent) == PAGE_SIZE) + return PageDirty(parent->page); + + uvm_spin_lock(&parent->lock); + check_cpu_dirty_flag(parent, page_index); + dirty = test_bit(page_index, parent->dirty_bitmap); + uvm_spin_unlock(&parent->lock); + + return dirty; +} +#endif // !UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() + +uvm_cpu_chunk_t *uvm_cpu_chunk_first_in_block(uvm_va_block_t *va_block, uvm_page_index_t *out_page_index) +{ + uvm_cpu_chunk_t *chunk = NULL; + uvm_page_index_t page_index; + uvm_va_block_region_t block_region = uvm_va_block_region_from_block(va_block); + + page_index = uvm_va_block_first_page_in_mask(block_region, &va_block->cpu.allocated); + if (page_index < block_region.outer) + chunk = uvm_cpu_chunk_get_chunk_for_page(va_block, page_index); + + if (out_page_index) + *out_page_index = page_index; + + return chunk; +} + +uvm_cpu_chunk_t *uvm_cpu_chunk_next(uvm_va_block_t *va_block, uvm_page_index_t *previous_page_index) +{ + uvm_va_block_region_t block_region; + + UVM_ASSERT(va_block); + UVM_ASSERT(previous_page_index); + + block_region = uvm_va_block_region_from_block(va_block); + *previous_page_index = uvm_va_block_next_page_in_mask(block_region, &va_block->cpu.allocated, *previous_page_index); + if (*previous_page_index == block_region.outer) + return NULL; + + return uvm_cpu_chunk_get_chunk_for_page(va_block, *previous_page_index); +} diff --git a/kernel-open/nvidia-uvm/uvm_pmm_sysmem.h b/kernel-open/nvidia-uvm/uvm_pmm_sysmem.h new file mode 100644 index 000000000..053ac96f2 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pmm_sysmem.h @@ -0,0 +1,488 @@ +/******************************************************************************* + Copyright (c) 2017-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PMM_SYSMEM_H__ +#define __UVM_PMM_SYSMEM_H__ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_forward_decl.h" +#include "uvm_lock.h" + +// Module to handle per-GPU user mappings to sysmem physical memory. Notably, +// this implements a reverse map of the DMA address to {va_block, virt_addr}. +// This is required by the GPU access counters feature since they may provide a +// physical address in the notification packet (GPA notifications). We use the +// table to obtain the VAs of the memory regions being accessed remotely. The +// reverse map is implemented by a radix tree, which is indexed using the +// DMA address. For now, only PAGE_SIZE translations are supported (i.e. no +// big/huge pages). +// +// TODO: Bug 1995015: add support for physically-contiguous mappings. +struct uvm_pmm_sysmem_mappings_struct +{ + uvm_gpu_t *gpu; + + struct radix_tree_root reverse_map_tree; + + uvm_mutex_t reverse_map_lock; +}; + +// See comments in uvm_linux.h +#ifdef NV_RADIX_TREE_REPLACE_SLOT_PRESENT +#define uvm_pmm_sysmem_mappings_indirect_supported() true +#else +#define uvm_pmm_sysmem_mappings_indirect_supported() false +#endif + +// Global initialization/exit functions, that need to be called during driver +// initialization/tear-down. These are needed to allocate/free global internal +// data structures. +NV_STATUS uvm_pmm_sysmem_init(void); +void uvm_pmm_sysmem_exit(void); + +// Initialize per-GPU sysmem mapping tracking +NV_STATUS uvm_pmm_sysmem_mappings_init(uvm_gpu_t *gpu, uvm_pmm_sysmem_mappings_t *sysmem_mappings); + +// Destroy per-GPU sysmem mapping tracking. The caller must ensure that all the +// mappings have been removed before calling this function. +void uvm_pmm_sysmem_mappings_deinit(uvm_pmm_sysmem_mappings_t *sysmem_mappings); + +// If the GPU used to initialize sysmem_mappings supports access counters, the +// dma_addr -> {va_block, virt_addr} mapping is inserted in the reverse map. +NV_STATUS uvm_pmm_sysmem_mappings_add_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 virt_addr, + NvU64 region_size, + uvm_va_block_t *va_block, + uvm_processor_id_t owner); + +static NV_STATUS uvm_pmm_sysmem_mappings_add_gpu_chunk_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 virt_addr, + NvU64 region_size, + uvm_va_block_t *va_block, + uvm_gpu_id_t owner) +{ + if (!uvm_pmm_sysmem_mappings_indirect_supported()) + return NV_OK; + + return uvm_pmm_sysmem_mappings_add_gpu_mapping(sysmem_mappings, + dma_addr, + virt_addr, + region_size, + va_block, + owner); +} + +// If the GPU used to initialize sysmem_mappings supports access counters, the +// entries for the physical region starting at dma_addr are removed from the +// reverse map. +void uvm_pmm_sysmem_mappings_remove_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings, NvU64 dma_addr); + +static void uvm_pmm_sysmem_mappings_remove_gpu_chunk_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings, NvU64 dma_addr) +{ + if (uvm_pmm_sysmem_mappings_indirect_supported()) + uvm_pmm_sysmem_mappings_remove_gpu_mapping(sysmem_mappings, dma_addr); +} + +// Like uvm_pmm_sysmem_mappings_remove_gpu_mapping but it doesn't assert if the +// mapping doesn't exist. See uvm_va_block_evict_chunks for more information. +void uvm_pmm_sysmem_mappings_remove_gpu_mapping_on_eviction(uvm_pmm_sysmem_mappings_t *sysmem_mappings, NvU64 dma_addr); + +// If the GPU used to initialize sysmem_mappings supports access counters, the +// mapping for the region starting at dma_addr is updated with va_block. +// This is required on VA block split. +void uvm_pmm_sysmem_mappings_reparent_gpu_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + uvm_va_block_t *va_block); + +static void uvm_pmm_sysmem_mappings_reparent_gpu_chunk_mapping(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + uvm_va_block_t *va_block) +{ + if (uvm_pmm_sysmem_mappings_indirect_supported()) + uvm_pmm_sysmem_mappings_reparent_gpu_mapping(sysmem_mappings, dma_addr, va_block); +} + +// If the GPU used to initialize sysmem_mappings supports access counters, the +// mapping for the region starting at dma_addr is split into regions of +// new_region_size. new_region_size must be a power of two and smaller than the +// previously-registered size. +NV_STATUS uvm_pmm_sysmem_mappings_split_gpu_mappings(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 new_region_size); + +static NV_STATUS uvm_pmm_sysmem_mappings_split_gpu_chunk_mappings(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 new_region_size) +{ + if (!uvm_pmm_sysmem_mappings_indirect_supported()) + return NV_OK; + + return uvm_pmm_sysmem_mappings_split_gpu_mappings(sysmem_mappings, dma_addr, new_region_size); +} + +// If the GPU used to initialize sysmem_mappings supports access counters, all +// the mappings within the region [dma_addr, dma_addr + new_region_size) are +// merged into a single mapping. new_region_size must be a power of two. The +// whole region must be previously populated with mappings and all of them must +// have the same VA block and processor owner. +void uvm_pmm_sysmem_mappings_merge_gpu_mappings(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 new_region_size); + +static void uvm_pmm_sysmem_mappings_merge_gpu_chunk_mappings(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 new_region_size) +{ + if (uvm_pmm_sysmem_mappings_indirect_supported()) + uvm_pmm_sysmem_mappings_merge_gpu_mappings(sysmem_mappings, dma_addr, new_region_size); +} + +// Obtain the {va_block, virt_addr} information for the mappings in the given +// [dma_addr:dma_addr + region_size) range. dma_addr and region_size must be +// page-aligned. +// +// Valid translations are written to out_mappings sequentially (there are no +// gaps). max_out_mappings are written, at most. The caller is required to +// provide enough entries in out_mappings. +// +// The VA Block in each returned translation entry is retained, and it's up to +// the caller to release them +size_t uvm_pmm_sysmem_mappings_dma_to_virt(uvm_pmm_sysmem_mappings_t *sysmem_mappings, + NvU64 dma_addr, + NvU64 region_size, + uvm_reverse_map_t *out_mappings, + size_t max_out_mappings); + +#define UVM_CPU_CHUNK_SIZES PAGE_SIZE + +#if UVM_CPU_CHUNK_SIZES == PAGE_SIZE +#define UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() 1 +typedef struct page uvm_cpu_chunk_t; +#else +#define UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() 0 +typedef struct uvm_cpu_chunk_struct uvm_cpu_chunk_t; + +// CPU memory chunk descriptor. +// CPU memory chunks represent a physically contiguous CPU memory +// allocation. +// CPU memory chunks can be created due to CPU page allocation or +// CPU chunk splitting. Chunks created due to page allocations are +// referred to as "physical chunks", while chunks resulting from +// splitting are referred to as "logical chunks". +struct uvm_cpu_chunk_struct +{ + // Pointer to the CPU page backing this CPU chunk. + // For physical chunks, this will point to the head page. Physical + // chunk allocation will set the reference count for the struct + // page (compound or not) to 1. + // + // For logical chunks, this will point to the struct page from + // the compound page array corresponding to the correct page index. + // Because freeing a logical chunk does not result in freeing of + // any struct page(s) and both physical and logical chunks are + // reference counted, there is no need to take separate references + // to the struct page for logical chunks. + struct page *page; + + // For logical chunks, this points to the parent chunk (which + // could also be a logical chunk). For physical chunks, this + // is NULL. + uvm_cpu_chunk_t *parent; + + // Page offset of this chunk within the physical size of + // the parent. + uvm_page_index_t offset; + + // Region within the VA block covered by this CPU chunk. + uvm_va_block_region_t region; + + // Chunk reference count used when a CPU chunk is split. Each + // child sub-chunk will increment the reference count of its + // parent. + nv_kref_t refcount; + + // Size of the chunk at the time of its creation. + // For chunks, which are the result of a split, this + // value will be the size of the chunk prior to the + // split. + // For chunks resulting from page allocations (physical), + // this value is the size of the physical allocation. + size_t log2_phys_size : order_base_2(UVM_CHUNK_SIZE_MASK_SIZE); + + struct { + // Per-GPU array of DMA mapping addresses for the chunk. + // The DMA mapping addresses for logical chunks are adjusted + // to the correct offset within the parent chunk. + union { + NvU64 static_entry; + NvU64 *dynamic_entries; + }; + uvm_processor_mask_t dma_addrs_mask; + } gpu_mappings; + + // Lock protecting dirty_bitmap + uvm_spinlock_t lock; + + // A dynamically allocated bitmap (one per PAGE_SIZE page) used + // to track dirty state of each PAGE_SIZE page. + // Dirty state is tracked only by physical chunks. Therefore, + // for logical chunks this will be NULL; + unsigned long *dirty_bitmap; +}; +#endif // UVM_CPU_CHUNK_SIZES == PAGE_SIZE + +// Return the set of allowed CPU chunk allocation sizes. +uvm_chunk_sizes_mask_t uvm_cpu_chunk_get_allocation_sizes(void); + +// Allocate a physical CPU chunk for the specified page index and owned by +// va_block. +// +// The size of the allocated CPU chunk may be any of the allowed sizes and +// depends on several factors: +// * Allocation will be attempted in reverse order - highest to lowest - in +// order ensure that the highest possible size is used. +// * An allocation size will be used if: +// - the VA region within the block covered by the allocation size is +// aligned to that allocation size, +// - the VA block region corresponding to the allocation size is empty +// (has no previously populated pages), and +// - the system allows a page allocation of that size. +// +// If mm is not NULL, the chunks memory will be added to the mm's memory cgroup. +// +// If a CPU chunk allocation succeeds, NV_OK is returned. If new_chunk is not +// NULL it will be set to point to the newly allocated chunk. On failure, +// NV_ERR_NO_MEMORY is returned. +NV_STATUS uvm_cpu_chunk_alloc(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + struct mm_struct *mm, + uvm_cpu_chunk_t **new_chunk); + +// Insert a CPU chunk in the va_block's storage structures. +// +// On success, NV_OK is returned. On error, +// - NV_ERR_NO_MEMORY is returned if memory allocation for any if the internal +// structures did not succeed. +// - NV_ERR_INVALID_ARGUMENT is returned if the size of the chunk to be inserted +// in invalid. +// - NV_ERR_INVALID_STATE is returned if a matching chunk already exists in the +// block. +NV_STATUS uvm_cpu_chunk_insert_in_block(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index); + +// Remove a CPU chunk from the va_block's storage structures. +// The chunk is not freed, only removed from the block's storage structures. +void uvm_cpu_chunk_remove_from_block(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index); + +// Return the CPU chunk backing page_index within the VA block. +// If page_index is beyond the boundary of the VA block or a CPU chunk for +// the specified page has not been allocated and/or inserted into the block, +// NULL is returned. +uvm_cpu_chunk_t *uvm_cpu_chunk_get_chunk_for_page(uvm_va_block_t *block, uvm_page_index_t page_index); + +// Return the physical size of the CPU chunk. +// The physical size of the CPU chunk is the size of the physical CPU +// memory backing the CPU chunk. It is set at CPU chunk allocation time +static uvm_chunk_size_t uvm_cpu_chunk_get_phys_size(uvm_cpu_chunk_t *chunk) +{ +#if UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() + return (uvm_chunk_size_t)PAGE_SIZE; +#else + return ((uvm_chunk_size_t)1) << chunk->log2_phys_size; +#endif +} + +// Return the size of the CPU chunk. While the physical size of the CPU +// chunk reflects the size of the physical memory backing the chunk, this +// size is the effective size of the chunk and changes as result of CPU +// chunk splits. +uvm_chunk_size_t uvm_cpu_chunk_get_size(uvm_cpu_chunk_t *chunk); + +// Return the number of base system pages covered by the CPU chunk. +static size_t uvm_cpu_chunk_num_pages(uvm_cpu_chunk_t *chunk) +{ + UVM_ASSERT(chunk); + return uvm_cpu_chunk_get_size(chunk) / PAGE_SIZE; +} + +static bool uvm_cpu_chunk_is_physical(uvm_cpu_chunk_t *chunk) +{ +#if UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() + return true; +#else + return chunk->parent == NULL; +#endif +} + +// Return a pointer to the struct page backing page_index within the owning +// VA block. +struct page *uvm_cpu_chunk_get_cpu_page(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index); + +// Take a reference to the CPU chunk. +void uvm_cpu_chunk_get(uvm_cpu_chunk_t *chunk); + +// Release a reference to the CPU chunk. When the reference count +// drops to zero, the CPU chunk will be freed. Physical CPU chunks +// will also free the CPU pages backing the chunk. +void uvm_cpu_chunk_put(uvm_cpu_chunk_t *chunk); + +NV_STATUS uvm_cpu_chunk_gpu_mapping_alloc(uvm_va_block_t *va_block, uvm_gpu_id_t id); +void uvm_cpu_chunk_gpu_mapping_split(uvm_va_block_t *existing, uvm_va_block_t *new, uvm_gpu_id_t id); +void uvm_cpu_chunk_gpu_mapping_free(uvm_va_block_t *va_block, uvm_gpu_id_t id); + +// Set the CPU chunk's DMA mapping address for the specified GPU ID. +NV_STATUS uvm_cpu_chunk_set_gpu_mapping_addr(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_cpu_chunk_t *chunk, + uvm_gpu_id_t id, + NvU64 dma_addr); + +// Get the CPU chunk's DMA mapping address for the specified GPU ID. +NvU64 uvm_cpu_chunk_get_gpu_mapping_addr(uvm_va_block_t *block, + uvm_page_index_t page_index, + uvm_cpu_chunk_t *chunk, + uvm_gpu_id_t id); + +#if !UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() +// Split a CPU chunk into a set of CPU chunks of size new_size. +// new_size has to be one of the supported CPU chunk allocation sizes and has to +// be smaller than the current size of chunk. +// +// On success, NV_OK is returned. All new chunks will have chunk as parent and +// chunk's size will have been updated to new_size. +// +// Note that due to the way CPU chunks are managed and split, the number of +// newly created chunks will be (size_of(chunk) / new_size) - 1. +// +// On failure NV_ERR_NO_MEMORY will be returned. chunk's size will not be +// modified. +NV_STATUS uvm_cpu_chunk_split(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_chunk_size_t new_size); + +// Merge chunk's parent to the highest possible CPU chunk size fully contained +// within the parent's owning VA block. +// +// The size to which chunks are merged is determined by finding the largest +// size from the set of allowed CPU chunk sizes that satisfies both criteria +// below: +// * The VA range of the parent chunk resulting from the merge has to be +// fully contained within the VA block. +// * The start and end VA addresses of the parent based on its physical +// size have to be aligned to the merge size. +// +// It is possible that a merge cannot be done if chunk does not have a parent +// (it is a physical chunk), chunk's owning VA block is not the same as +// its parent's owning VA block, or there is no chunk size that satisfied both +// the above criteria. +// +// Return a pointer to the merged chunk. If a merge could not be done, return +// NULL. +uvm_cpu_chunk_t *uvm_cpu_chunk_merge(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk); + +// Mark the CPU sub-page page_index in the CPU chunk as dirty. +// page_index has to be a page withing the chunk's region. +void uvm_cpu_chunk_mark_dirty(uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index); + +// Mark the CPU sub-pages page_index in the CPU chunk as clean. +// page_index has to be a page withing the chunk's region. +void uvm_cpu_chunk_mark_clean(uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index); + +// Return true if the CPU sub-pages page_index in the CPU chunk are dirty. +// page_index has to be a page withing the chunk's region. +bool uvm_cpu_chunk_is_dirty(uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index); + +#else // UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() + +static NV_STATUS uvm_cpu_chunk_split(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk, uvm_chunk_size_t new_size) +{ + return NV_OK; +} + +static uvm_cpu_chunk_t *uvm_cpu_chunk_merge(uvm_va_block_t *va_block, uvm_cpu_chunk_t *chunk) +{ + return NULL; +} + +static void uvm_cpu_chunk_mark_dirty(uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + SetPageDirty(chunk); +} + +static void uvm_cpu_chunk_mark_clean(uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + ClearPageDirty(chunk); +} + +static bool uvm_cpu_chunk_is_dirty(uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + return PageDirty(chunk); +} +#endif // !UVM_CPU_CHUNK_SIZE_IS_PAGE_SIZE() + +// Return the first CPU chunk in the block. If no CPU chunks have been +// allocated and/or inserted into the block, NULL is returned. +// If not NULL, page_index will be set to the first page of the block covered by +// the returned chunk. +uvm_cpu_chunk_t *uvm_cpu_chunk_first_in_block(uvm_va_block_t *va_block, uvm_page_index_t *out_page_index); + +// Return the next CPU chunk in the block owning chunk. +// previous_page_index is the index after which to start searching. Its value +// will be updated with the starting page index of the next chunk in the block. +uvm_cpu_chunk_t *uvm_cpu_chunk_next(uvm_va_block_t *va_block, uvm_page_index_t *previous_page_index); + +#define for_each_cpu_chunk_in_block(chunk, page_index, va_block) \ + for ((chunk) = uvm_cpu_chunk_first_in_block((va_block), &(page_index)); \ + (chunk) != NULL; \ + (page_index) += uvm_cpu_chunk_num_pages(chunk) - 1, (chunk) = uvm_cpu_chunk_next((va_block), &(page_index))) + +#define for_each_cpu_chunk_in_block_safe(chunk, page_index, next_page_index, va_block) \ + for ((chunk) = uvm_cpu_chunk_first_in_block((va_block), &(page_index)), \ + (next_page_index) = (page_index) + ((chunk) ? uvm_cpu_chunk_num_pages(chunk) : 0); \ + (chunk) != NULL; \ + (page_index) = (next_page_index) - 1, (chunk) = uvm_cpu_chunk_next((va_block), &(page_index)), \ + (next_page_index) = (page_index) + ((chunk) ? uvm_cpu_chunk_num_pages(chunk) : 0)) + +// Use a special symbol for the region so it does not replace the chunk's region +// structure member. +#define for_each_cpu_chunk_in_block_region(chunk, page_index, va_block, __region) \ + for ((page_index) = uvm_va_block_first_page_in_mask((__region), &(va_block)->cpu.allocated), \ + (chunk) = uvm_cpu_chunk_get_chunk_for_page((va_block), (page_index)); \ + (chunk) != NULL && page_index < (__region).outer; \ + (page_index) += uvm_cpu_chunk_num_pages(chunk) - 1, (chunk) = uvm_cpu_chunk_next((va_block), &(page_index)) + +#define for_each_cpu_chunk_in_block_region_safe(chunk, page_index, next_page_index, va_block, __region) \ + for ((page_index) = uvm_va_block_first_page_in_mask((__region), &(va_block)->cpu.allocated), \ + (chunk) = uvm_cpu_chunk_get_chunk_for_page((va_block), (page_index)), \ + (next_page_index) = (page_index) + (chunk ? uvm_cpu_chunk_num_pages(chunk) : 0); \ + (chunk) != NULL && page_index < (__region).outer; \ + (page_index) = (next_page_index) - 1, (chunk) = uvm_cpu_chunk_next((va_block), &(page_index)), \ + (next_page_index) = (page_index) + (chunk ? uvm_cpu_chunk_num_pages(chunk) : 0)) + +static NV_STATUS uvm_test_get_cpu_chunk_allocation_sizes(UVM_TEST_GET_CPU_CHUNK_ALLOC_SIZES_PARAMS *params, + struct file *filp) +{ + params->alloc_size_mask = (NvU32)uvm_cpu_chunk_get_allocation_sizes(); + return NV_OK; +} +#endif diff --git a/kernel-open/nvidia-uvm/uvm_pmm_sysmem_test.c b/kernel-open/nvidia-uvm/uvm_pmm_sysmem_test.c new file mode 100644 index 000000000..5e8646951 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pmm_sysmem_test.c @@ -0,0 +1,578 @@ +/******************************************************************************* + Copyright (c) 2017-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_test.h" +#include "uvm_test_ioctl.h" + +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_pmm_sysmem.h" +#include "uvm_va_block.h" +#include "uvm_va_range.h" +#include "uvm_va_space.h" + +// Pre-allocated array used for dma-to-virt translations +static uvm_reverse_map_t g_sysmem_translations[PAGES_PER_UVM_VA_BLOCK]; + +// We use our own separate reverse map to easily specify contiguous DMA +// address ranges +static uvm_pmm_sysmem_mappings_t g_reverse_map; + +static uvm_gpu_t *g_volta_plus_gpu; + +// Check that the DMA addresses in the range defined by +// [base_dma_addr:base_dma_addr + uvm_va_block_size(va_block)] and page_mask +// are registered in the reverse map, using one call per entry. The returned +// virtual addresses must belong to va_block. The function assumes a 1:1 +// dma-to-virt mapping for the whole VA block +static NV_STATUS check_reverse_map_block_page(uvm_va_block_t *va_block, + NvU64 base_dma_addr, + const uvm_page_mask_t *page_mask) +{ + uvm_page_index_t page_index; + + for_each_va_block_page(page_index, va_block) { + size_t num_pages; + + memset(g_sysmem_translations, 0, sizeof(g_sysmem_translations)); + num_pages = uvm_pmm_sysmem_mappings_dma_to_virt(&g_reverse_map, + base_dma_addr + page_index * PAGE_SIZE, + PAGE_SIZE, + g_sysmem_translations, + PAGES_PER_UVM_VA_BLOCK); + if (!page_mask || uvm_page_mask_test(page_mask, page_index)) { + TEST_CHECK_RET(num_pages == 1); + TEST_CHECK_RET(g_sysmem_translations[0].va_block == va_block); + TEST_CHECK_RET(nv_kref_read(&va_block->kref) >= 2); + TEST_CHECK_RET(uvm_reverse_map_start(&g_sysmem_translations[0]) == uvm_va_block_cpu_page_address(va_block, page_index)); + TEST_CHECK_RET(uvm_va_block_region_num_pages(g_sysmem_translations[0].region) == 1); + TEST_CHECK_RET(UVM_ID_IS_CPU(g_sysmem_translations[0].owner)); + uvm_va_block_release(g_sysmem_translations[0].va_block); + } + else { + TEST_CHECK_RET(num_pages == 0); + } + } + + return NV_OK; +} + +// Check that the DMA addresses in the range defined by +// [base_dma_addr:base_dma_addr + uvm_va_block_size(va_block)] and page_mask +// are registered in the reverse map, using a single translation call. The +// returned virtual addresses must belong to va_block. The function assumes a +// 1:1 dma-to-virt mapping for the whole VA block +static NV_STATUS check_reverse_map_block_batch(uvm_va_block_t *va_block, + NvU64 base_dma_addr, + const uvm_page_mask_t *page_mask) +{ + size_t num_translations; + size_t num_pages; + size_t reverse_map_index; + + memset(g_sysmem_translations, 0, sizeof(g_sysmem_translations)); + num_translations = uvm_pmm_sysmem_mappings_dma_to_virt(&g_reverse_map, + base_dma_addr, + uvm_va_block_size(va_block), + g_sysmem_translations, + PAGES_PER_UVM_VA_BLOCK); + if (num_translations == 0 && page_mask) + TEST_CHECK_RET(uvm_page_mask_empty(page_mask)); + + num_pages = 0; + for (reverse_map_index = 0; reverse_map_index < num_translations; ++reverse_map_index) { + uvm_reverse_map_t *reverse_map = &g_sysmem_translations[reverse_map_index]; + size_t num_reverse_map_pages = uvm_va_block_region_num_pages(reverse_map->region); + + num_pages += num_reverse_map_pages; + + TEST_CHECK_RET(reverse_map->va_block == va_block); + TEST_CHECK_RET(nv_kref_read(&va_block->kref) >= 2); + uvm_va_block_release(reverse_map->va_block); + TEST_CHECK_RET(UVM_ID_IS_CPU(reverse_map->owner)); + } + + if (page_mask) + TEST_CHECK_RET(num_pages == uvm_page_mask_weight(page_mask)); + else + TEST_CHECK_RET(num_pages == uvm_va_block_num_cpu_pages(va_block)); + + return NV_OK; +} + +// Check that the DMA addresses for all the CPU pages of the two given VA blocks +// are registered in the reverse map, using a single translation call. The +// returned virtual addresses must belong to one of the blocks. The function +// assumes a 1:1 dma-to-virt mapping for each VA block and that va_block1 is +// mapped behind va_block0. +static NV_STATUS check_reverse_map_two_blocks_batch(NvU64 base_dma_addr, + uvm_va_block_t *va_block0, + uvm_va_block_t *va_block1) +{ + size_t num_pages; + size_t num_translations; + size_t reverse_map_index; + + memset(g_sysmem_translations, 0, sizeof(g_sysmem_translations)); + num_translations = uvm_pmm_sysmem_mappings_dma_to_virt(&g_reverse_map, + base_dma_addr, + UVM_VA_BLOCK_SIZE, + g_sysmem_translations, + PAGES_PER_UVM_VA_BLOCK); + TEST_CHECK_RET(num_translations == 2); + + num_pages = 0; + for (reverse_map_index = 0; reverse_map_index < num_translations; ++reverse_map_index) { + uvm_va_block_t *block; + uvm_reverse_map_t *reverse_map = &g_sysmem_translations[reverse_map_index]; + NvU64 virt_addr = uvm_reverse_map_start(reverse_map); + size_t num_reverse_map_pages = uvm_va_block_region_num_pages(reverse_map->region); + + if (reverse_map_index == 0) + block = va_block0; + else + block = va_block1; + + TEST_CHECK_RET(reverse_map->va_block == block); + TEST_CHECK_RET(nv_kref_read(&block->kref) >= 2); + uvm_va_block_release(reverse_map->va_block); + TEST_CHECK_RET(num_reverse_map_pages == uvm_va_block_num_cpu_pages(block)); + TEST_CHECK_RET(virt_addr == block->start); + TEST_CHECK_RET(UVM_ID_IS_CPU(reverse_map->owner)); + + num_pages += num_reverse_map_pages; + } + + TEST_CHECK_RET(num_pages == uvm_va_block_num_cpu_pages(va_block0) + uvm_va_block_num_cpu_pages(va_block1)); + + return NV_OK; +} + +static const NvU64 g_base_dma_addr = UVM_VA_BLOCK_SIZE; + +// This function adds the mappings for all the subregions in va_block defined +// by page_mask. g_base_dma_addr is used as the base DMA address for the whole +// VA block. +static NV_STATUS test_pmm_sysmem_reverse_map_single(uvm_va_block_t *va_block, + uvm_page_mask_t *page_mask, + uvm_chunk_size_t split_size, + bool merge) +{ + NV_STATUS status = NV_OK; + uvm_va_block_region_t subregion; + + TEST_CHECK_RET(is_power_of_2(split_size)); + TEST_CHECK_RET(split_size >= PAGE_SIZE); + + for_each_va_block_subregion_in_mask(subregion, page_mask, uvm_va_block_region_from_block(va_block)) { + TEST_CHECK_RET(is_power_of_2(uvm_va_block_region_size(subregion))); + uvm_mutex_lock(&va_block->lock); + status = uvm_pmm_sysmem_mappings_add_gpu_mapping(&g_reverse_map, + g_base_dma_addr + subregion.first * PAGE_SIZE, + va_block->start + subregion.first * PAGE_SIZE, + uvm_va_block_region_size(subregion), + va_block, + UVM_ID_CPU); + uvm_mutex_unlock(&va_block->lock); + if (status != NV_OK) + return status; + } + + TEST_CHECK_RET(check_reverse_map_block_page(va_block, g_base_dma_addr, page_mask) == NV_OK); + TEST_CHECK_RET(check_reverse_map_block_batch(va_block, g_base_dma_addr, page_mask) == NV_OK); + + if (split_size != UVM_CHUNK_SIZE_MAX) { + for_each_va_block_subregion_in_mask(subregion, page_mask, uvm_va_block_region_from_block(va_block)) { + TEST_CHECK_RET(uvm_va_block_region_size(subregion) > split_size); + + uvm_mutex_lock(&va_block->lock); + status = uvm_pmm_sysmem_mappings_split_gpu_mappings(&g_reverse_map, + g_base_dma_addr + subregion.first * PAGE_SIZE, + split_size); + uvm_mutex_unlock(&va_block->lock); + TEST_CHECK_RET(status == NV_OK); + } + + TEST_CHECK_RET(check_reverse_map_block_page(va_block, g_base_dma_addr, page_mask) == NV_OK); + TEST_CHECK_RET(check_reverse_map_block_batch(va_block, g_base_dma_addr, page_mask) == NV_OK); + } + + if (split_size != UVM_CHUNK_SIZE_MAX && merge) { + for_each_va_block_subregion_in_mask(subregion, page_mask, uvm_va_block_region_from_block(va_block)) { + uvm_pmm_sysmem_mappings_merge_gpu_mappings(&g_reverse_map, + g_base_dma_addr + subregion.first * PAGE_SIZE, + uvm_va_block_region_size(subregion)); + } + + TEST_CHECK_RET(check_reverse_map_block_page(va_block, g_base_dma_addr, page_mask) == NV_OK); + TEST_CHECK_RET(check_reverse_map_block_batch(va_block, g_base_dma_addr, page_mask) == NV_OK); + } + + for_each_va_block_subregion_in_mask(subregion, page_mask, uvm_va_block_region_from_block(va_block)) { + NvU64 subregion_dma_addr = g_base_dma_addr + subregion.first * PAGE_SIZE; + + if (split_size == UVM_CHUNK_SIZE_MAX || merge) { + uvm_mutex_lock(&va_block->lock); + uvm_pmm_sysmem_mappings_remove_gpu_mapping(&g_reverse_map, subregion_dma_addr); + uvm_mutex_unlock(&va_block->lock); + } + else { + size_t chunk; + size_t num_chunks = uvm_va_block_region_size(subregion) / split_size; + TEST_CHECK_RET(num_chunks > 1); + + uvm_mutex_lock(&va_block->lock); + + for (chunk = 0; chunk < num_chunks; ++chunk) + uvm_pmm_sysmem_mappings_remove_gpu_mapping(&g_reverse_map, subregion_dma_addr + chunk * split_size); + + uvm_mutex_unlock(&va_block->lock); + } + } + + uvm_page_mask_zero(page_mask); + + TEST_CHECK_RET(check_reverse_map_block_page(va_block, g_base_dma_addr, page_mask) == NV_OK); + TEST_CHECK_RET(check_reverse_map_block_batch(va_block, g_base_dma_addr, page_mask) == NV_OK); + + return status; +} + +static uvm_page_mask_t g_page_mask; + +static NV_STATUS test_pmm_sysmem_reverse_map_single_whole(uvm_va_space_t *va_space, NvU64 addr) +{ + NV_STATUS status; + uvm_va_block_t *va_block; + const bool merge_array[] = {false, true}; + const uvm_chunk_size_t chunk_split_array[] = { UVM_CHUNK_SIZE_4K, UVM_CHUNK_SIZE_64K, UVM_CHUNK_SIZE_MAX }; + unsigned merge_index; + unsigned chunk_split_index; + + status = uvm_va_block_find(va_space, addr, &va_block); + if (status != NV_OK) + return status; + + TEST_CHECK_RET(is_power_of_2(uvm_va_block_size(va_block))); + + for (merge_index = 0; merge_index < ARRAY_SIZE(merge_array); ++merge_index) { + for (chunk_split_index = 0; chunk_split_index < ARRAY_SIZE(chunk_split_array); ++chunk_split_index) { + // The reverse map has PAGE_SIZE granularity + if (chunk_split_array[chunk_split_index] < PAGE_SIZE) + continue; + + uvm_page_mask_region_fill(&g_page_mask, uvm_va_block_region_from_block(va_block)); + + TEST_CHECK_RET(test_pmm_sysmem_reverse_map_single(va_block, + &g_page_mask, + chunk_split_array[chunk_split_index], + merge_array[merge_index]) == NV_OK); + } + } + + return status; +} + +static NV_STATUS test_pmm_sysmem_reverse_map_single_pattern(uvm_va_space_t *va_space, NvU64 addr) +{ + NV_STATUS status; + uvm_va_block_t *va_block; + uvm_page_index_t page_index; + + status = uvm_va_block_find(va_space, addr, &va_block); + if (status != NV_OK) + return status; + + uvm_page_mask_zero(&g_page_mask); + + for_each_va_block_page(page_index, va_block) { + if (page_index % 2 == 0) + uvm_page_mask_set(&g_page_mask, page_index); + } + + return test_pmm_sysmem_reverse_map_single(va_block, &g_page_mask, UVM_CHUNK_SIZE_MAX, false); +} + +// This function assumes that addr points at a VA range with 4 sized VA blocks +// with size UVM_VA_BLOCK_SIZE / 4. +static NV_STATUS test_pmm_sysmem_reverse_map_many_blocks(uvm_va_space_t *va_space, NvU64 addr) +{ + NV_STATUS status; + uvm_va_block_t *va_block0; + uvm_va_block_t *va_block1; + NvU64 base_dma_addr0; + NvU64 base_dma_addr1; + + status = uvm_va_block_find(va_space, addr + UVM_VA_BLOCK_SIZE / 4, &va_block0); + if (status != NV_OK) + return status; + + status = uvm_va_block_find(va_space, addr + 3 * UVM_VA_BLOCK_SIZE / 4, &va_block1); + if (status != NV_OK) + return status; + + TEST_CHECK_RET(va_block0 != va_block1); + + base_dma_addr0 = g_base_dma_addr + uvm_va_block_size(va_block0); + base_dma_addr1 = base_dma_addr0 + uvm_va_block_size(va_block0); + + TEST_CHECK_RET(is_power_of_2(uvm_va_block_size(va_block0))); + TEST_CHECK_RET(is_power_of_2(uvm_va_block_size(va_block1))); + + uvm_mutex_lock(&va_block0->lock); + status = uvm_pmm_sysmem_mappings_add_gpu_mapping(&g_reverse_map, + base_dma_addr0, + va_block0->start, + uvm_va_block_size(va_block0), + va_block0, + UVM_ID_CPU); + uvm_mutex_unlock(&va_block0->lock); + TEST_CHECK_RET(status == NV_OK); + + uvm_mutex_lock(&va_block1->lock); + status = uvm_pmm_sysmem_mappings_add_gpu_mapping(&g_reverse_map, + base_dma_addr1, + va_block1->start, + uvm_va_block_size(va_block1), + va_block1, + UVM_ID_CPU); + uvm_mutex_unlock(&va_block1->lock); + + // Check each VA block individually + if (status == NV_OK) { + TEST_CHECK_GOTO(check_reverse_map_block_page(va_block0, base_dma_addr0, NULL) == NV_OK, error); + TEST_CHECK_GOTO(check_reverse_map_block_batch(va_block0, base_dma_addr0, NULL) == NV_OK, error); + TEST_CHECK_GOTO(check_reverse_map_block_page(va_block1, base_dma_addr1, NULL) == NV_OK, error); + TEST_CHECK_GOTO(check_reverse_map_block_batch(va_block1, base_dma_addr1, NULL) == NV_OK, error); + + // Check both VA blocks at the same time + TEST_CHECK_GOTO(check_reverse_map_two_blocks_batch(g_base_dma_addr, va_block0, va_block1) == NV_OK, error); + +error: + uvm_mutex_lock(&va_block1->lock); + uvm_pmm_sysmem_mappings_remove_gpu_mapping(&g_reverse_map, base_dma_addr1); + uvm_mutex_unlock(&va_block1->lock); + } + + uvm_mutex_lock(&va_block0->lock); + uvm_pmm_sysmem_mappings_remove_gpu_mapping(&g_reverse_map, base_dma_addr0); + uvm_mutex_unlock(&va_block0->lock); + + return status; +} + +// This function registers a non-uniform distribution of chunks (mixing 4K and 64K chunks) +// and merges them back to verify that the logic is working. +static NV_STATUS test_pmm_sysmem_reverse_map_merge(uvm_va_space_t *va_space, NvU64 addr) +{ + NV_STATUS status = NV_OK; + uvm_va_block_t *va_block; + const unsigned chunks_64k_pos[] = + { + 16, + 64, + 96, + 192, + 208, + 224, + 288, + 320, + 384, + 480 + }; + uvm_page_index_t page_index; + unsigned i; + + if (PAGE_SIZE != UVM_PAGE_SIZE_4K) + return NV_OK; + + status = uvm_va_block_find(va_space, addr, &va_block); + if (status != NV_OK) + return status; + + TEST_CHECK_RET(uvm_va_block_size(va_block) == UVM_VA_BLOCK_SIZE); + + page_index = 0; + for (i = 0; i < ARRAY_SIZE(chunks_64k_pos); ++i) { + // Fill with 4K mappings until the next 64K mapping + while (page_index < chunks_64k_pos[i]) { + uvm_mutex_lock(&va_block->lock); + status = uvm_pmm_sysmem_mappings_add_gpu_mapping(&g_reverse_map, + g_base_dma_addr + page_index * PAGE_SIZE, + uvm_va_block_cpu_page_address(va_block, page_index), + PAGE_SIZE, + va_block, + UVM_ID_CPU); + uvm_mutex_unlock(&va_block->lock); + TEST_CHECK_RET(status == NV_OK); + + ++page_index; + } + + // Register the 64K mapping + uvm_mutex_lock(&va_block->lock); + status = uvm_pmm_sysmem_mappings_add_gpu_mapping(&g_reverse_map, + g_base_dma_addr + page_index * PAGE_SIZE, + uvm_va_block_cpu_page_address(va_block, page_index), + UVM_CHUNK_SIZE_64K, + va_block, + UVM_ID_CPU); + uvm_mutex_unlock(&va_block->lock); + TEST_CHECK_RET(status == NV_OK); + + page_index += UVM_PAGE_SIZE_64K / PAGE_SIZE; + } + + // Fill the tail with 4K mappings, too + while (page_index < PAGES_PER_UVM_VA_BLOCK) { + uvm_mutex_lock(&va_block->lock); + status = uvm_pmm_sysmem_mappings_add_gpu_mapping(&g_reverse_map, + g_base_dma_addr + page_index * PAGE_SIZE, + uvm_va_block_cpu_page_address(va_block, page_index), + PAGE_SIZE, + va_block, + UVM_ID_CPU); + uvm_mutex_unlock(&va_block->lock); + TEST_CHECK_RET(status == NV_OK); + + ++page_index; + } + + TEST_CHECK_RET(check_reverse_map_block_page(va_block, g_base_dma_addr, NULL) == NV_OK); + TEST_CHECK_RET(check_reverse_map_block_batch(va_block, g_base_dma_addr, NULL) == NV_OK); + + uvm_mutex_lock(&va_block->lock); + uvm_pmm_sysmem_mappings_merge_gpu_mappings(&g_reverse_map, + g_base_dma_addr, + uvm_va_block_size(va_block)); + uvm_mutex_unlock(&va_block->lock); + + TEST_CHECK_RET(check_reverse_map_block_page(va_block, g_base_dma_addr, NULL) == NV_OK); + TEST_CHECK_RET(check_reverse_map_block_batch(va_block, g_base_dma_addr, NULL) == NV_OK); + + uvm_mutex_lock(&va_block->lock); + uvm_pmm_sysmem_mappings_remove_gpu_mapping(&g_reverse_map, g_base_dma_addr); + uvm_mutex_unlock(&va_block->lock); + + return status; +} + +static NV_STATUS test_pmm_sysmem_reverse_map_remove_on_eviction(uvm_va_space_t *va_space, NvU64 addr) +{ + uvm_va_block_t *va_block; + NV_STATUS status = uvm_va_block_find(va_space, addr, &va_block); + + if (status != NV_OK) + return status; + + TEST_CHECK_RET(is_power_of_2(uvm_va_block_size(va_block))); + + uvm_mutex_lock(&va_block->lock); + status = uvm_pmm_sysmem_mappings_add_gpu_mapping(&g_reverse_map, + g_base_dma_addr, + addr, + uvm_va_block_size(va_block), + va_block, + UVM_ID_CPU); + uvm_mutex_unlock(&va_block->lock); + + uvm_mutex_lock(&va_block->lock); + uvm_pmm_sysmem_mappings_remove_gpu_mapping(&g_reverse_map, g_base_dma_addr); + uvm_mutex_unlock(&va_block->lock); + + TEST_CHECK_RET(status == NV_OK); + + uvm_pmm_sysmem_mappings_remove_gpu_mapping_on_eviction(&g_reverse_map, g_base_dma_addr); + uvm_pmm_sysmem_mappings_remove_gpu_mapping_on_eviction(&g_reverse_map, g_base_dma_addr); + + return NV_OK; +} + +static NV_STATUS test_pmm_sysmem_reverse_map(uvm_va_space_t *va_space, NvU64 addr1, NvU64 addr2) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + + g_volta_plus_gpu = NULL; + + // Find a GPU with support for access counters, since it is required to add + // or remove entries to the reverse map. + for_each_va_space_gpu(gpu, va_space) { + if (gpu->parent->access_counters_supported) { + // Initialize the reverse map. + status = uvm_pmm_sysmem_mappings_init(gpu, &g_reverse_map); + if (status != NV_OK) + return status; + + g_volta_plus_gpu = gpu; + break; + } + } + + if (!g_volta_plus_gpu) + return NV_ERR_INVALID_DEVICE; + + status = test_pmm_sysmem_reverse_map_single_whole(va_space, addr1); + + if (status == NV_OK) + status = test_pmm_sysmem_reverse_map_single_pattern(va_space, addr1); + + if (status == NV_OK) + status = test_pmm_sysmem_reverse_map_many_blocks(va_space, addr2); + + if (status == NV_OK) + status = test_pmm_sysmem_reverse_map_merge(va_space, addr1); + + if (status == NV_OK) + status = test_pmm_sysmem_reverse_map_remove_on_eviction(va_space, addr1); + + uvm_pmm_sysmem_mappings_deinit(&g_reverse_map); + + return status; +} + +NV_STATUS uvm_test_pmm_sysmem(UVM_TEST_PMM_SYSMEM_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space; + + va_space = uvm_va_space_get(filp); + + // Take the global lock to void interferences from different instances of + // the test, since we use a bunch of global variables + uvm_mutex_lock(&g_uvm_global.global_lock); + uvm_va_space_down_write(va_space); + + if (uvm_pmm_sysmem_mappings_indirect_supported()) { + status = test_pmm_sysmem_reverse_map(va_space, params->range_address1, params->range_address2); + } + else { + UVM_TEST_PRINT("Skipping kernel_driver_pmm_sysmem test due to lack of support for radix_tree_replace_slot in Linux 4.10"); + status = NV_OK; + } + + uvm_va_space_up_write(va_space); + uvm_mutex_unlock(&g_uvm_global.global_lock); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_pmm_test.c b/kernel-open/nvidia-uvm/uvm_pmm_test.c new file mode 100644 index 000000000..0fc76e1b1 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pmm_test.c @@ -0,0 +1,1449 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_pmm_gpu.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_hal.h" +#include "uvm_va_block.h" +#include "uvm_va_range.h" +#include "uvm_va_space.h" +#include "uvm_tracker.h" +#include "uvm_push.h" +#include "uvm_mem.h" +#include "uvm_kvmalloc.h" + +#include "uvm_test.h" +#include "uvm_test_ioctl.h" +#include "uvm_test_rng.h" + +#define CHUNKS_PER_BUCKET 128 + +typedef struct +{ + struct list_head entry; + uvm_gpu_chunk_t *chunks[CHUNKS_PER_BUCKET]; +} pmm_leak_bucket_t; + +typedef struct +{ + uvm_gpu_chunk_t *chunk; + uvm_tracker_t tracker; + NvU32 pattern; + struct list_head node; +} test_chunk_t; + +// When the basic_test free_pattern is BASIC_TEST_FREE_PATTERN_EVERY_N, this +// controls how many allocs to do before a free. +#define BASIC_TEST_FREE_EVERY_N 3 + +// Number of allocations to make in part of basic_test. This is 33 because +// that's a decent balance between the widest gap between chunk levels (causing +// us to fill up at least one root chunk), and 33*UVM_CHUNK_SIZE_MAX isn't too +// big. +#define BASIC_TEST_STATIC_ALLOCATIONS 33 + +typedef enum +{ + BASIC_TEST_FREE_PATTERN_IMMEDIATE, + BASIC_TEST_FREE_PATTERN_ALL_FORWARD, + BASIC_TEST_FREE_PATTERN_ALL_REVERSE, + BASIC_TEST_FREE_PATTERN_EVERY_N, + BASIC_TEST_FREE_PATTERN_COUNT +} basic_test_free_pattern_t; + +typedef struct +{ + // List of all allocated test_chunk_t's + struct list_head list; + + // Total number of chunks allocated in this test + size_t num_chunks_total; + + uvm_va_space_t *va_space; + uvm_pmm_gpu_t *pmm; + uvm_mem_t *verif_mem; + uvm_pmm_gpu_memory_type_t type; + basic_test_free_pattern_t free_pattern; +} basic_test_state_t; + +typedef enum +{ + SPLIT_TEST_MODE_NORMAL, + SPLIT_TEST_MODE_MERGE, + SPLIT_TEST_MODE_INJECT_ERROR, + SPLIT_TEST_MODE_COUNT +} split_test_mode_t; + +// This helper needs to stay in sync with the one in uvm_pmm_gpu.c +// It is duplicated because we do not want to expose it as an API. +static uvm_pmm_gpu_memory_type_t pmm_squash_memory_type(uvm_parent_gpu_t *parent_gpu, uvm_pmm_gpu_memory_type_t type) +{ + + + + + + + + + + + + + + + + + + + + return type; +} + +// Verify that the input chunks are in the correct state following alloc +static NV_STATUS check_chunks(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t **chunks, + size_t num_chunks, + uvm_chunk_size_t chunk_size, + uvm_pmm_gpu_memory_type_t mem_type) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + size_t i; + + mem_type = pmm_squash_memory_type(gpu->parent, mem_type); + for (i = 0; i < num_chunks; i++) { + TEST_CHECK_RET(chunks[i]); + TEST_CHECK_RET(chunks[i]->suballoc == NULL); + TEST_CHECK_RET(chunks[i]->type == mem_type); + TEST_CHECK_RET(chunks[i]->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED); + TEST_CHECK_RET(uvm_gpu_chunk_get_size(chunks[i]) == chunk_size); + TEST_CHECK_RET(IS_ALIGNED(chunks[i]->address, chunk_size)); + } + + return NV_OK; +} + +static NV_STATUS check_alloc_tracker(uvm_pmm_gpu_t *pmm, uvm_tracker_t *tracker) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + uvm_tracker_entry_t *tracker_entry; + + // The tracker entries returned from an alloc are not allowed to contain + // entries for any GPU other than the owner. This is to prevent leaking + // tracker entries from other GPUs into VA spaces which never registered + // those GPUs. + for_each_tracker_entry(tracker_entry, tracker) + TEST_CHECK_RET(uvm_tracker_entry_gpu(tracker_entry) == gpu); + + return NV_OK; +} + +static NV_STATUS chunk_alloc_check_common(uvm_pmm_gpu_t *pmm, + size_t num_chunks, + uvm_chunk_size_t chunk_size, + uvm_pmm_gpu_memory_type_t mem_type, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunks, + uvm_tracker_t *local_tracker, + uvm_tracker_t *tracker) +{ + NV_STATUS status; + NV_STATUS check_status; + + check_status = check_alloc_tracker(pmm, local_tracker); + + if (tracker) { + status = uvm_tracker_add_tracker_safe(tracker, local_tracker); + uvm_tracker_clear(local_tracker); + } + else { + status = uvm_tracker_wait(local_tracker); + } + uvm_tracker_deinit(local_tracker); + + if (check_status == NV_OK) + check_status = status; + + if (check_status != NV_OK) + return check_status; + + return check_chunks(pmm, chunks, num_chunks, chunk_size, mem_type); +} + +static NV_STATUS chunk_alloc_check(uvm_pmm_gpu_t *pmm, + size_t num_chunks, + uvm_chunk_size_t chunk_size, + uvm_pmm_gpu_memory_type_t mem_type, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunks, + uvm_tracker_t *tracker) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NV_STATUS status; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + + // If the GPU has no memory then PMA will fail with NV_ERR_INVALID_ARGUMENT. + // Callers assert that only NV_OK or NV_ERR_NO_MEMORY are returned here, so + // replace the error to avoid noise. + if (gpu->mem_info.size == 0) + return NV_ERR_NO_MEMORY; + + status = uvm_pmm_gpu_alloc(pmm, num_chunks, chunk_size, mem_type, flags, chunks, &local_tracker); + if (status != NV_OK) + return status; + + return chunk_alloc_check_common(pmm, num_chunks, chunk_size, mem_type, flags, chunks, &local_tracker, tracker); +} + +static NV_STATUS chunk_alloc_user_check(uvm_pmm_gpu_t *pmm, + size_t num_chunks, + uvm_chunk_size_t chunk_size, + uvm_pmm_gpu_memory_type_t mem_type, + uvm_pmm_alloc_flags_t flags, + uvm_gpu_chunk_t **chunks, + uvm_tracker_t *tracker) +{ + NV_STATUS status; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + + status = uvm_pmm_gpu_alloc(pmm, num_chunks, chunk_size, mem_type, flags, chunks, &local_tracker); + if (status != NV_OK) + return status; + + return chunk_alloc_check_common(pmm, num_chunks, chunk_size, mem_type, flags, chunks, &local_tracker, tracker); +} + +static NV_STATUS check_leak(uvm_gpu_t *gpu, uvm_chunk_size_t chunk_size, uvm_pmm_gpu_memory_type_t type, NvS64 limit, NvU64 *chunks) +{ + NV_STATUS status = NV_OK; + pmm_leak_bucket_t *bucket, *next; + LIST_HEAD(allocations); + *chunks = 0; + while (limit != *chunks) { + int k; + pmm_leak_bucket_t *allocated; + allocated = kzalloc(sizeof(pmm_leak_bucket_t), GFP_KERNEL); + if (allocated == NULL) { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + list_add(&allocated->entry, &allocations); + for (k = 0; k < CHUNKS_PER_BUCKET && limit != *chunks; k++) { + status = chunk_alloc_check(&gpu->pmm, + 1, + chunk_size, + type, + UVM_PMM_ALLOC_FLAGS_NONE, + &allocated->chunks[k], + NULL); + UVM_ASSERT(status == NV_OK || status == NV_ERR_NO_MEMORY); + if (status != NV_OK) { + if (limit == -1 && status == NV_ERR_NO_MEMORY) + status = NV_OK; + goto cleanup; + } + (*chunks)++; + if (fatal_signal_pending(current)) { + status = NV_ERR_SIGNAL_PENDING; + goto cleanup; + } + } + } +cleanup: + list_for_each_entry_safe(bucket, next, &allocations, entry) { + int k; + for (k = 0; k < CHUNKS_PER_BUCKET; k++) { + if (!bucket->chunks[k]) + break; + uvm_pmm_gpu_free(&gpu->pmm, bucket->chunks[k], NULL); + } + list_del(&bucket->entry); + kfree(bucket); + } + return status; +} + +// Tracker is an in/out dependency +static NV_STATUS do_memset_4(uvm_gpu_t *gpu, uvm_gpu_address_t dst, NvU32 val, size_t size, uvm_tracker_t *tracker) +{ + NV_STATUS status; + uvm_push_t push; + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_GPU_INTERNAL, + tracker, + &push, + "memset {%s, 0x%llx} %zu bytes to 0x%08x", + uvm_gpu_address_aperture_string(dst), + dst.address, + size, + val); + if (status != NV_OK) + return status; + + gpu->parent->ce_hal->memset_4(&push, dst, val, size); + uvm_push_end(&push); + uvm_tracker_overwrite_with_push(tracker, &push); + + return NV_OK; +} + +// Tracker is an in/out dependency +static NV_STATUS gpu_mem_check(uvm_gpu_t *gpu, + uvm_mem_t *verif_mem, + uvm_gpu_address_t src, + size_t size, + NvU32 expected, + uvm_tracker_t *tracker) +{ + NV_STATUS status; + uvm_push_t push; + uvm_gpu_address_t verif_gpu_addr; + NvU32 *verif_cpu_addr = uvm_mem_get_cpu_addr_kernel(verif_mem); + size_t i; + + + + + + + + + + UVM_ASSERT(verif_mem->size >= size); + memset(verif_cpu_addr, 0, size); + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_GPU_TO_CPU, + tracker, + &push, + "GPU -> CPU {%s, 0x%llx} %zu bytes expecting 0x%08x", + uvm_gpu_address_aperture_string(src), + src.address, + size, + expected); + if (status != NV_OK) + return status; + + verif_gpu_addr = uvm_mem_gpu_address_virtual_kernel(verif_mem, gpu); + gpu->parent->ce_hal->memcopy(&push, verif_gpu_addr, src, size); + TEST_NV_CHECK_RET(uvm_push_end_and_wait(&push)); + + for (i = 0; i < size / sizeof(verif_cpu_addr[0]); i++) { + if (verif_cpu_addr[i] != expected) { + UVM_TEST_PRINT("GPU read of {%s, 0x%llx} %zu bytes expected pattern 0x%08x, but offset %zu is 0x%08x\n", + uvm_gpu_address_aperture_string(src), + src.address, + size, + expected, + i * sizeof(verif_cpu_addr[0]), + verif_cpu_addr[i]); + return NV_ERR_INVALID_STATE; + } + } + + return NV_OK; +} + +static NV_STATUS init_test_chunk(uvm_va_space_t *va_space, + uvm_pmm_gpu_t *pmm, + test_chunk_t *test_chunk, + uvm_pmm_gpu_memory_type_t type, + uvm_chunk_size_t size, + NvU32 pattern) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NV_STATUS status = NV_OK; + uvm_push_t push; + uvm_gpu_address_t chunk_addr; + uvm_gpu_t *other_gpu; + + INIT_LIST_HEAD(&test_chunk->node); + uvm_tracker_init(&test_chunk->tracker); + test_chunk->pattern = pattern; + + MEM_NV_CHECK_RET(chunk_alloc_check(pmm, 1, size, type, UVM_PMM_ALLOC_FLAGS_EVICT, &test_chunk->chunk, &test_chunk->tracker), NV_OK); + + TEST_NV_CHECK_GOTO(uvm_mmu_chunk_map(test_chunk->chunk), chunk_free); + + if (uvm_mmu_gpu_needs_static_vidmem_mapping(gpu) || uvm_mmu_gpu_needs_dynamic_vidmem_mapping(gpu)) + chunk_addr = uvm_gpu_address_virtual_from_vidmem_phys(gpu, test_chunk->chunk->address); + else + chunk_addr = uvm_gpu_address_physical(UVM_APERTURE_VID, test_chunk->chunk->address); + + // Fill the chunk + TEST_NV_CHECK_GOTO(do_memset_4(gpu, chunk_addr, pattern, size, &test_chunk->tracker), chunk_unmap); + + // Launch dummy pushes on all other GPUs. This will increase the chances of + // a subsequent re-alloc of this chunk needing to synchronize the tracker. + // See the tracker comment in check_alloc_tracker. + for_each_va_space_gpu(other_gpu, va_space) { + if (other_gpu == gpu) + continue; + + status = uvm_push_begin(other_gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &push, + "dummy push for chunk {%s, %u} with 0x%08x", + uvm_pmm_gpu_memory_type_string(type), + size, + pattern); + TEST_NV_CHECK_GOTO(status, chunk_unmap); + + other_gpu->parent->host_hal->noop(&push, 4); + + uvm_push_end(&push); + TEST_NV_CHECK_GOTO(uvm_tracker_add_push_safe(&test_chunk->tracker, &push), chunk_unmap); + } + + return NV_OK; + +chunk_unmap: + uvm_mmu_chunk_unmap(test_chunk->chunk, &test_chunk->tracker); + +chunk_free: + uvm_pmm_gpu_free(pmm, test_chunk->chunk, &test_chunk->tracker); + uvm_tracker_deinit(&test_chunk->tracker); + return status; +} + +static NV_STATUS destroy_test_chunk(uvm_pmm_gpu_t *pmm, test_chunk_t *test_chunk, uvm_mem_t *verif_mem) +{ + uvm_gpu_t *gpu = uvm_pmm_to_gpu(pmm); + NV_STATUS status; + uvm_gpu_address_t chunk_addr; + uvm_gpu_chunk_t *chunk = test_chunk->chunk; + uvm_chunk_size_t size = uvm_gpu_chunk_get_size(chunk); + + if (uvm_mmu_gpu_needs_static_vidmem_mapping(gpu) || uvm_mmu_gpu_needs_dynamic_vidmem_mapping(gpu)) + chunk_addr = uvm_gpu_address_virtual_from_vidmem_phys(gpu, chunk->address); + else + chunk_addr = uvm_gpu_address_physical(UVM_APERTURE_VID, chunk->address); + + status = gpu_mem_check(gpu, verif_mem, chunk_addr, size, test_chunk->pattern, &test_chunk->tracker); + + list_del(&test_chunk->node); + uvm_mmu_chunk_unmap(chunk, &test_chunk->tracker); + uvm_pmm_gpu_free(pmm, chunk, &test_chunk->tracker); + uvm_tracker_deinit(&test_chunk->tracker); + return status; +} + +static bool basic_test_should_free(basic_test_state_t *test_state) +{ + if (test_state->free_pattern == BASIC_TEST_FREE_PATTERN_IMMEDIATE) + return true; + + return test_state->free_pattern == BASIC_TEST_FREE_PATTERN_EVERY_N && + (test_state->num_chunks_total % BASIC_TEST_FREE_EVERY_N) == 0; +} + +static NV_STATUS basic_test_alloc(basic_test_state_t *test_state, uvm_chunk_size_t size) +{ + test_chunk_t *test_chunk; + NvU32 pattern; + NV_STATUS status = NV_OK; + + test_chunk = uvm_kvmalloc_zero(sizeof(*test_chunk)); + if (!test_chunk) { + UVM_TEST_PRINT("Failed to allocate test_chunk\n"); + return NV_ERR_NO_MEMORY; + } + + pattern = current->pid | (test_state->num_chunks_total << 16); + + status = init_test_chunk(test_state->va_space, test_state->pmm, test_chunk, test_state->type, size, pattern); + if (status != NV_OK) { + uvm_kvfree(test_chunk); + return status; + } + + list_add_tail(&test_chunk->node, &test_state->list); + ++test_state->num_chunks_total; + + if (basic_test_should_free(test_state)) { + test_chunk = list_first_entry(&test_state->list, test_chunk_t, node); + status = destroy_test_chunk(test_state->pmm, test_chunk, test_state->verif_mem); + uvm_kvfree(test_chunk); + } + + return status; +} + +static NV_STATUS basic_test_free_all(basic_test_state_t *test_state) +{ + test_chunk_t *test_chunk; + NV_STATUS temp_status, status = NV_OK; + + while (!list_empty(&test_state->list)) { + if (test_state->free_pattern == BASIC_TEST_FREE_PATTERN_ALL_REVERSE) + test_chunk = list_last_entry(&test_state->list, test_chunk_t, node); + else // Handles cleanup and BASIC_TEST_FREE_PATTERN_ALL_FORWARD + test_chunk = list_first_entry(&test_state->list, test_chunk_t, node); + + temp_status = destroy_test_chunk(test_state->pmm, test_chunk, test_state->verif_mem); + if (status == NV_OK) + status = temp_status; + + uvm_kvfree(test_chunk); + } + + return status; +} + +// Try to allocate enough smaller chunks to fully fill the largest chunk, plus +// a little extra. +static size_t basic_test_num_allocations(uvm_chunk_size_t size) +{ + return (UVM_CHUNK_SIZE_MAX / size) + 1; +} + +// - Allocate multiple chunks of all possible sizes and types using various +// patterns +// - Write a unique value to each chunk +// - Free those chunks in various patterns, verifying the unique value +static NV_STATUS basic_test(uvm_va_space_t *va_space, uvm_gpu_t *gpu, + UvmTestPmmSanityMode mode) +{ + uvm_chunk_size_t size; + uvm_chunk_sizes_mask_t chunk_sizes; + basic_test_state_t test_state; + NV_STATUS status = NV_OK; // Implicitly modified by TEST_NV_CHECK_GOTO + size_t i; + int first_memory_type, last_memory_type; + int first_free_pattern, last_free_pattern; + + if (mode == UvmTestPmmSanityModeBasic) { + first_memory_type = UVM_PMM_GPU_MEMORY_TYPE_USER; + + + + last_memory_type = UVM_PMM_GPU_MEMORY_TYPE_USER; + + first_free_pattern = BASIC_TEST_FREE_PATTERN_EVERY_N; + last_free_pattern = BASIC_TEST_FREE_PATTERN_EVERY_N; + } + else { + first_memory_type = 0; + last_memory_type = UVM_PMM_GPU_MEMORY_TYPE_COUNT - 1; + + first_free_pattern = 0; + last_free_pattern = BASIC_TEST_FREE_PATTERN_COUNT - 1; + } + + // Note that we can't really test PMM in isolation, since even pushing work + // to the GPU requires using PMM to create GPU page tables for the + // pushbuffers. We could handle that in theory by forcing sysmem page + // tables, but that would require re-allocating the entire GPU address + // space. + + memset(&test_state, 0, sizeof(test_state)); + INIT_LIST_HEAD(&test_state.list); + test_state.va_space = va_space; + test_state.pmm = &gpu->pmm; + MEM_NV_CHECK_RET(uvm_mem_alloc_sysmem_and_map_cpu_kernel(UVM_CHUNK_SIZE_MAX, current->mm, &test_state.verif_mem), + NV_OK); + TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(test_state.verif_mem, gpu), out); + + for (test_state.type = first_memory_type; test_state.type <= last_memory_type; test_state.type++) { + // In SR-IOV heavy, virtual mappings will be created for each chunk + // this test allocates before accessing it on the GPU. But currently + // it is not allowed to map a kernel chunk, so skip those. + if (uvm_gpu_is_virt_mode_sriov_heavy(gpu) && uvm_pmm_gpu_memory_type_is_kernel(test_state.type)) + continue; + + chunk_sizes = gpu->pmm.chunk_sizes[test_state.type]; + + for (test_state.free_pattern = first_free_pattern; + test_state.free_pattern <= last_free_pattern; + test_state.free_pattern++) { + + // Outer loop over size, increasing + size = uvm_chunk_find_first_size(chunk_sizes); + for_each_chunk_size_from(size, chunk_sizes) { + for (i = 0; i < basic_test_num_allocations(size); i++) + TEST_NV_CHECK_GOTO(basic_test_alloc(&test_state, size), out); + } + TEST_NV_CHECK_GOTO(basic_test_free_all(&test_state), out); + + // Outer loop over size, decreasing + size = uvm_chunk_find_last_size(chunk_sizes); + for_each_chunk_size_rev_from(size, chunk_sizes) { + for (i = 0; i < basic_test_num_allocations(size); i++) + TEST_NV_CHECK_GOTO(basic_test_alloc(&test_state, size), out); + } + TEST_NV_CHECK_GOTO(basic_test_free_all(&test_state), out); + + // Inner loop over size, increasing + for (i = 0; i < BASIC_TEST_STATIC_ALLOCATIONS; i++) { + size = uvm_chunk_find_first_size(chunk_sizes); + for_each_chunk_size_from(size, chunk_sizes) + TEST_NV_CHECK_GOTO(basic_test_alloc(&test_state, size), out); + } + TEST_NV_CHECK_GOTO(basic_test_free_all(&test_state), out); + + // Inner loop over size, decreasing + for (i = 0; i < BASIC_TEST_STATIC_ALLOCATIONS; i++) { + size = uvm_chunk_find_last_size(chunk_sizes); + for_each_chunk_size_rev_from(size, chunk_sizes) + TEST_NV_CHECK_GOTO(basic_test_alloc(&test_state, size), out); + } + TEST_NV_CHECK_GOTO(basic_test_free_all(&test_state), out); + } + } + +out: + if (status != NV_OK) + basic_test_free_all(&test_state); + UVM_ASSERT(list_empty(&test_state.list)); + uvm_mem_free(test_state.verif_mem); + return status; +} + +static NV_STATUS get_subchunks_test(uvm_pmm_gpu_t *pmm, + uvm_gpu_chunk_t *parent, + uvm_gpu_chunk_t **expected_children, + size_t num_children) +{ + uvm_gpu_chunk_t **subchunks = NULL; + NV_STATUS status = NV_OK; + size_t count, start_index, size = num_children * sizeof(subchunks[0]); + + subchunks = uvm_kvmalloc(size); + if (!subchunks) { + UVM_TEST_PRINT("Failed to allocate subchunks\n"); + return NV_ERR_NO_MEMORY; + } + + // Verify all + memset(subchunks, 0, size); + TEST_CHECK_GOTO(uvm_pmm_gpu_get_subchunks(pmm, parent, 0, num_children, subchunks) == num_children, out); + TEST_CHECK_GOTO(memcmp(expected_children, subchunks, num_children * sizeof(subchunks[0])) == 0, out); + + // Get first half + count = num_children / 2; + memset(subchunks, 0, size); + TEST_CHECK_GOTO(uvm_pmm_gpu_get_subchunks(pmm, parent, 0, count, subchunks) == count, out); + TEST_CHECK_GOTO(memcmp(expected_children, subchunks, count * sizeof(subchunks[0])) == 0, out); + + // Get second half, intentionally requesting more subchunks than available + start_index = num_children / 2; + count = num_children - start_index; + memset(subchunks, 0, size); + TEST_CHECK_GOTO(uvm_pmm_gpu_get_subchunks(pmm, parent, start_index, num_children, subchunks) == count, out); + TEST_CHECK_GOTO(memcmp(&expected_children[start_index], subchunks, count * sizeof(subchunks[0])) == 0, out); + + // Larger-than-possible start_index + TEST_CHECK_GOTO(uvm_pmm_gpu_get_subchunks(pmm, parent, num_children, 1, subchunks) == 0, out); + +out: + uvm_kvfree(subchunks); + return status; +} + +// Always frees parent chunk, even on error return +static NV_STATUS split_test_single(uvm_pmm_gpu_t *pmm, + test_chunk_t *parent, + uvm_chunk_size_t child_size, + split_test_mode_t mode, + uvm_mem_t *verif_mem) +{ + uvm_pmm_gpu_memory_type_t parent_type = parent->chunk->type; + uvm_chunk_size_t parent_size = uvm_gpu_chunk_get_size(parent->chunk); + NvU64 parent_addr = parent->chunk->address; + size_t i, num_children = (size_t)(parent_size / child_size); + uvm_gpu_chunk_t **split_chunks = NULL; + uvm_gpu_chunk_t *temp_chunk; + test_chunk_t child_wrapper; + NV_STATUS temp_status, status = NV_OK; + + // Verify that we can get "subchunks" of a non-split chunk + TEST_CHECK_RET(uvm_pmm_gpu_get_subchunks(pmm, parent->chunk, 0, 2, &temp_chunk) == 1); + TEST_CHECK_RET(temp_chunk == parent->chunk); + + split_chunks = uvm_kvmalloc(num_children * sizeof(split_chunks[0])); + if (!split_chunks) { + UVM_TEST_PRINT("Failed to allocate split_chunks\n"); + status = NV_ERR_NO_MEMORY; + goto error; + } + + if (mode == SPLIT_TEST_MODE_INJECT_ERROR) + parent->chunk->inject_split_error = true; + + status = uvm_pmm_gpu_split_chunk(pmm, parent->chunk, child_size, split_chunks); + + if (mode == SPLIT_TEST_MODE_INJECT_ERROR) { + // This case verifies that a split failure will leave the chunk in its + // original state. + + if (status != NV_ERR_NO_MEMORY) { + UVM_TEST_PRINT("Injecting split error failed, returned %s\n", nvstatusToString(status)); + status = NV_ERR_INVALID_STATE; + + // Let the error label clean up the split children + parent->chunk = NULL; + goto error; + } + + status = destroy_test_chunk(pmm, parent, verif_mem); + } + else { + TEST_NV_CHECK_GOTO(status, error); + + temp_chunk = parent->chunk; + parent->chunk = NULL; + + // Sanity check split + for (i = 0; i < num_children; i++) { + TEST_CHECK_GOTO(split_chunks[i], error); + TEST_CHECK_GOTO(split_chunks[i]->address == parent_addr + i * child_size, error); + TEST_CHECK_GOTO(split_chunks[i]->suballoc == NULL, error); + TEST_CHECK_GOTO(split_chunks[i]->type == parent_type, error); + TEST_CHECK_GOTO(split_chunks[i]->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED, error); + TEST_CHECK_GOTO(uvm_gpu_chunk_get_size(split_chunks[i]) == child_size, error); + } + + status = get_subchunks_test(pmm, temp_chunk, split_chunks, num_children); + if (status != NV_OK) + goto error; + + if (mode == SPLIT_TEST_MODE_MERGE) { + parent->chunk = temp_chunk; + uvm_pmm_gpu_merge_chunk(pmm, parent->chunk); + TEST_CHECK_GOTO(parent->chunk->address == parent_addr, error); + TEST_CHECK_GOTO(parent->chunk->suballoc == NULL, error); + TEST_CHECK_GOTO(parent->chunk->state == UVM_PMM_GPU_CHUNK_STATE_TEMP_PINNED, error); + status = destroy_test_chunk(pmm, parent, verif_mem); + } + else { + // Destroy split chunks, verifying the original pattern + for (i = 0; i < num_children; i++) { + child_wrapper.chunk = split_chunks[i]; + child_wrapper.pattern = parent->pattern; + temp_status = uvm_tracker_init_from(&child_wrapper.tracker, &parent->tracker); + if (status == NV_OK) + status = temp_status; + + // destroy_test_chunk does list_del + INIT_LIST_HEAD(&child_wrapper.node); + + temp_status = destroy_test_chunk(pmm, &child_wrapper, verif_mem); + if (status == NV_OK) + status = temp_status; + } + + uvm_tracker_deinit(&parent->tracker); + } + } + + uvm_kvfree(split_chunks); + return status; + +error: + if (parent->chunk) { + uvm_mmu_chunk_unmap(parent->chunk, &parent->tracker); + uvm_pmm_gpu_free(pmm, parent->chunk, &parent->tracker); + } + else { + for (i = 0; i < num_children; i++) { + uvm_mmu_chunk_unmap(split_chunks[i], &parent->tracker); + uvm_pmm_gpu_free(pmm, split_chunks[i], &parent->tracker); + } + } + + uvm_kvfree(split_chunks); + return status; +} + +// Splits each possible non-leaf chunk size into all possible sizes below that +// size, and verifies that the data in the chunk remains intact. +static NV_STATUS split_test(uvm_va_space_t *va_space, uvm_gpu_t *gpu) +{ + uvm_pmm_gpu_memory_type_t type; + uvm_chunk_size_t parent_size, child_size; + NvU32 pattern; + NvU32 count = 0; + test_chunk_t parent_test_chunk; + NV_STATUS status = NV_OK; + uvm_mem_t *verif_mem = NULL; + split_test_mode_t mode; + + // Check the num_subchunks == 0 case + TEST_CHECK_RET(uvm_pmm_gpu_get_subchunks(&gpu->pmm, NULL, 0, 0, NULL) == 0); + + MEM_NV_CHECK_RET(uvm_mem_alloc_sysmem_and_map_cpu_kernel(UVM_CHUNK_SIZE_MAX, current->mm, &verif_mem), NV_OK); + TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(verif_mem, gpu), out); + + for (type = 0; type < UVM_PMM_GPU_MEMORY_TYPE_COUNT; type++) { + // In SR-IOV heavy, virtual mappings will be created for each chunk + // this test allocates before accessing it on the GPU. But currently + // it is not allowed to map a kernel chunk, so skip those. + if (uvm_gpu_is_virt_mode_sriov_heavy(gpu) && uvm_pmm_gpu_memory_type_is_kernel(type)) + continue; + + // Test every available parent size except the smallest, which obviously + // can't be split. + parent_size = uvm_chunk_find_next_size(gpu->pmm.chunk_sizes[type], + uvm_chunk_find_first_size(gpu->pmm.chunk_sizes[type])); + + for_each_chunk_size_from(parent_size, gpu->pmm.chunk_sizes[type]) { + // Split from parent_size to every smaller supported size + child_size = uvm_chunk_find_prev_size(gpu->pmm.chunk_sizes[type], parent_size); + + for_each_chunk_size_rev_from(child_size, gpu->pmm.chunk_sizes[type]) { + + for (mode = 0; mode < SPLIT_TEST_MODE_COUNT; mode++) { + pattern = current->pid | (count << 16); + ++count; + + status = init_test_chunk(va_space, &gpu->pmm, &parent_test_chunk, type, parent_size, pattern); + if (status != NV_OK) + goto out; + + status = split_test_single(&gpu->pmm, &parent_test_chunk, child_size, mode, verif_mem); + if (status != NV_OK) + goto out; + } + } + } + } + +out: + uvm_mem_free(verif_mem); + return status; +} + +NV_STATUS uvm_test_pmm_query(UVM_TEST_PMM_QUERY_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + switch (params->key) { + case UVM_TEST_CHUNK_SIZE_GET_USER_SIZE: + params->value = gpu->pmm.chunk_sizes[UVM_PMM_GPU_MEMORY_TYPE_USER]; + status = NV_OK; + break; + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + } + + uvm_gpu_release(gpu); + return status; +} + +NV_STATUS uvm_test_pmm_sanity(UVM_TEST_PMM_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_t *gpu; + + if (params->mode != UvmTestPmmSanityModeBasic && + params->mode != UvmTestPmmSanityModeFull) { + return NV_ERR_INVALID_ARGUMENT; + } + + uvm_va_space_down_read(va_space); + + for_each_va_space_gpu(gpu, va_space) { + status = basic_test(va_space, gpu, params->mode); + if (status != NV_OK) + goto out; + + status = split_test(va_space, gpu); + if (status != NV_OK) + goto out; + } + +out: + uvm_va_space_up_read(va_space); + return status; +} + +NV_STATUS uvm_test_pmm_check_leak(UVM_TEST_PMM_CHECK_LEAK_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + uvm_pmm_gpu_memory_type_t first_user_mode = UVM_PMM_GPU_MEMORY_TYPE_USER; + uvm_pmm_gpu_memory_type_t last_user_mode = UVM_PMM_GPU_MEMORY_TYPE_USER; + uvm_pmm_gpu_memory_type_t current_user_mode = first_user_mode; + + + + + + if (params->alloc_limit < -1) + return NV_ERR_INVALID_ARGUMENT; + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + for (; current_user_mode <= last_user_mode; current_user_mode++) { + status = check_leak(gpu, params->chunk_size, current_user_mode, params->alloc_limit, ¶ms->allocated); + if (status != NV_OK) + break; + } + + uvm_gpu_release(gpu); + return status; +} + +NV_STATUS __test_pmm_async_alloc_type(uvm_va_space_t *va_space, + uvm_gpu_t *gpu, + size_t num_chunks, + uvm_pmm_gpu_memory_type_t mem_type, + size_t work_iterations) +{ + NV_STATUS status; + NV_STATUS tracker_status = NV_OK; + uvm_gpu_chunk_t **chunks; + uvm_chunk_size_t chunk_size = PAGE_SIZE; + uvm_gpu_t *work_gpu; + uvm_mem_t *dummy_buffer = NULL; + uvm_mem_alloc_params_t mem_params; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + uvm_push_t push; + NvU32 i; + uvm_global_processor_mask_t global_mask; + + chunks = uvm_kvmalloc_zero(num_chunks * sizeof(chunks[0])); + if (!chunks) + return NV_ERR_NO_MEMORY; + + memset(&mem_params, 0, sizeof(mem_params)); + mem_params.backing_gpu = NULL; + mem_params.size = 1024*1024; + mem_params.mm = current->mm; + status = uvm_mem_alloc(&mem_params, &dummy_buffer); + if (status != NV_OK) + goto out; + + uvm_va_space_global_gpus(va_space, &global_mask); + + status = uvm_mem_map_kernel(dummy_buffer, &global_mask); + if (status != NV_OK) + goto out; + + // Alloc lots of small chunks to trigger suballocation + status = chunk_alloc_user_check(&gpu->pmm, + num_chunks, + chunk_size, + mem_type, + UVM_PMM_ALLOC_FLAGS_NONE, + chunks, + &tracker); + if (status != NV_OK) + goto out; + + // Push long-running work on all GPUs and collect it all in the tracker + for (i = 0; i < work_iterations; i++) { + if (fatal_signal_pending(current)) { + status = NV_ERR_SIGNAL_PENDING; + goto out; + } + + for_each_va_space_gpu(work_gpu, va_space) { + // Acquire the prior iteration just to make things even slower + status = uvm_push_begin_acquire(work_gpu->channel_manager, + UVM_CHANNEL_TYPE_GPU_INTERNAL, + &tracker, + &push, + "memset"); + if (status != NV_OK) + goto out; + + work_gpu->parent->ce_hal->memset_1(&push, + uvm_mem_gpu_address_virtual_kernel(dummy_buffer, work_gpu), + 0, + mem_params.size); + uvm_push_end(&push); + + TEST_NV_CHECK_GOTO(uvm_tracker_add_push_safe(&tracker, &push), out); + } + } + + // Free every other chunk to keep the suballocation around + for (i = 0; i < num_chunks; i += 2) { + uvm_pmm_gpu_free(&gpu->pmm, chunks[i], &tracker); + chunks[i] = NULL; + } + + // Re-alloc chunks to verify that the returned trackers don't have work for + // other GPUs (chunk_alloc_user_check() checks that). + for (i = 0; i < num_chunks; i += 2) { + status = chunk_alloc_user_check(&gpu->pmm, + 1, + chunk_size, + mem_type, + UVM_PMM_ALLOC_FLAGS_NONE, + &chunks[i], + NULL); + if (status != NV_OK) + goto out; + } + +out: + if (chunks) { + for (i = 0; i < num_chunks; i++) { + if (chunks[i]) + uvm_pmm_gpu_free(&gpu->pmm, chunks[i], &tracker); + } + } + + tracker_status = uvm_tracker_wait_deinit(&tracker); + uvm_mem_free(dummy_buffer); + uvm_kvfree(chunks); + + return status == NV_OK ? tracker_status : status; +} + +NV_STATUS uvm_test_pmm_async_alloc(UVM_TEST_PMM_ASYNC_ALLOC_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_t *gpu; + uvm_pmm_gpu_memory_type_t first_user_mode = UVM_PMM_GPU_MEMORY_TYPE_USER; + uvm_pmm_gpu_memory_type_t last_user_mode = UVM_PMM_GPU_MEMORY_TYPE_USER; + uvm_pmm_gpu_memory_type_t current_user_mode = first_user_mode; + + + + + + uvm_va_space_down_read(va_space); + gpu = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu) { + uvm_va_space_up_read(va_space); + return NV_ERR_INVALID_DEVICE; + } + + for (; current_user_mode <= last_user_mode; current_user_mode++) { + status = __test_pmm_async_alloc_type(va_space, + gpu, + params->num_chunks, + current_user_mode, + params->num_work_iterations); + if (status != NV_OK) + break; + } + + uvm_va_space_up_read(va_space); + + return status; +} + +static uvm_reverse_map_t g_reverse_map_entries[PAGES_PER_UVM_VA_BLOCK * 4]; + +static NV_STATUS test_pmm_reverse_map_single(uvm_gpu_t *gpu, uvm_va_space_t *va_space, NvU64 addr) +{ + NV_STATUS status = NV_OK; + NvU32 num_translations; + uvm_va_block_t *va_block; + uvm_gpu_phys_address_t phys_addr; + bool is_resident; + + status = uvm_va_block_find(va_space, addr, &va_block); + if (status != NV_OK) + return status; + + TEST_CHECK_RET(uvm_va_block_size(va_block) == UVM_VA_BLOCK_SIZE); + + // Verify that all pages are populated on the GPU + uvm_mutex_lock(&va_block->lock); + + is_resident = uvm_processor_mask_test(&va_block->resident, gpu->id) && + uvm_page_mask_full(uvm_va_block_resident_mask_get(va_block, gpu->id)); + if (is_resident) + phys_addr = uvm_va_block_gpu_phys_page_address(va_block, 0, gpu); + + uvm_mutex_unlock(&va_block->lock); + + TEST_CHECK_RET(is_resident); + + // In this test a single VA range covers the whole 2MB physical region. We + // expect a single translation to be returned for a 2MB chunk. + num_translations = uvm_pmm_gpu_phys_to_virt(&gpu->pmm, phys_addr.address, UVM_VA_BLOCK_SIZE, g_reverse_map_entries); + TEST_CHECK_RET(num_translations == 1); + TEST_CHECK_RET(g_reverse_map_entries[0].va_block == va_block); + TEST_CHECK_RET(g_reverse_map_entries[0].region.first == 0); + TEST_CHECK_RET(uvm_va_block_region_num_pages(g_reverse_map_entries[0].region) == uvm_va_block_num_cpu_pages(va_block)); + + uvm_va_block_release(va_block); + + return NV_OK; +} + +static NV_STATUS test_pmm_reverse_map_many_blocks(uvm_gpu_t *gpu, uvm_va_space_t *va_space, NvU64 addr, NvU64 size) +{ + uvm_va_range_t *va_range; + uvm_va_block_t *va_block = NULL; + NvU32 num_blocks; + NvU32 index = 0; + uvm_gpu_phys_address_t phys_addr = {0}; + bool is_resident; + + // In this test, the [addr:addr + size) VA region contains + // several VA ranges with different sizes. + + // Find the first block to compute the base physical address of the root + // chunk + uvm_for_each_va_range_in(va_range, va_space, addr, addr + size - 1) { + va_block = uvm_va_range_block(va_range, 0); + if (va_block) + break; + } + TEST_CHECK_RET(va_block); + + uvm_mutex_lock(&va_block->lock); + + is_resident = uvm_id_equal(uvm_va_block_page_get_closest_resident(va_block, 0, gpu->id), gpu->id); + if (is_resident) { + phys_addr = uvm_va_block_gpu_phys_page_address(va_block, 0, gpu); + phys_addr.address = UVM_ALIGN_DOWN(phys_addr.address, UVM_VA_BLOCK_SIZE); + } + + uvm_mutex_unlock(&va_block->lock); + + TEST_CHECK_RET(is_resident); + + // Perform the lookup for the whole root chunk + num_blocks = uvm_pmm_gpu_phys_to_virt(&gpu->pmm, phys_addr.address, size, g_reverse_map_entries); + TEST_CHECK_RET(num_blocks != 0); + + // Iterate over all VA ranges and their VA blocks within the 2MB VA region. + // Some blocks are not populated. However, we assume that blocks have been + // populated in order so they have been assigned physical addresses + // incrementally. Therefore, the reverse translations will show them in + // order. + uvm_for_each_va_range_in(va_range, va_space, addr, addr + size - 1) { + uvm_va_block_t *va_block; + + for_each_va_block_in_va_range(va_range, va_block) { + NvU32 num_va_block_pages = 0; + + // Iterate over all the translations for the current VA block. One + // translation per chunk is returned. We compute the total number of + // pages covered in the translations to check that they match with + // the number of pages in the VA block. + while (g_reverse_map_entries[index].va_block == va_block) { + uvm_reverse_map_t *reverse_mapping; + + reverse_mapping = &g_reverse_map_entries[index]; + + uvm_va_block_release(va_block); + num_va_block_pages += uvm_va_block_region_num_pages(reverse_mapping->region); + UVM_ASSERT(uvm_va_block_contains_address(va_block, uvm_reverse_map_start(reverse_mapping))); + UVM_ASSERT(uvm_va_block_contains_address(va_block, uvm_reverse_map_end(reverse_mapping))); + + uvm_mutex_lock(&va_block->lock); + + // Verify that all pages are populated on the GPU + is_resident = uvm_page_mask_region_full(uvm_va_block_resident_mask_get(va_block, gpu->id), + reverse_mapping->region); + + uvm_mutex_unlock(&va_block->lock); + + TEST_CHECK_RET(is_resident); + + ++index; + } + + if (num_va_block_pages) + TEST_CHECK_RET(num_va_block_pages == uvm_va_block_num_cpu_pages(va_block)); + } + } + TEST_CHECK_RET(index == num_blocks); + + return NV_OK; +} + +NV_STATUS uvm_test_pmm_reverse_map(UVM_TEST_PMM_REVERSE_MAP_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_gpu_t *gpu; + uvm_va_space_t *va_space; + + va_space = uvm_va_space_get(filp); + + // Take the global lock to void interferences from different instances of + // the test, since we use global variables + uvm_mutex_lock(&g_uvm_global.global_lock); + uvm_va_space_down_write(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu || !uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)) { + status = NV_ERR_INVALID_DEVICE; + goto exit_unlock; + } + + status = test_pmm_reverse_map_single(gpu, va_space, params->range_address1); + + if (status == NV_OK) + status = test_pmm_reverse_map_many_blocks(gpu, va_space, params->range_address2, params->range_size2); + +exit_unlock: + uvm_va_space_up_write(va_space); + uvm_mutex_unlock(&g_uvm_global.global_lock); + + return status; +} + +static NV_STATUS test_indirect_peers(uvm_gpu_t *owning_gpu, uvm_gpu_t *accessing_gpu) +{ + uvm_pmm_gpu_t *pmm = &owning_gpu->pmm; + size_t chunk_size = uvm_chunk_find_first_size(pmm->chunk_sizes[UVM_PMM_GPU_MEMORY_TYPE_USER]); + uvm_gpu_chunk_t *parent_chunk = NULL; + uvm_gpu_chunk_t **chunks = NULL; + size_t i, num_chunks = UVM_CHUNK_SIZE_MAX / chunk_size; + NV_STATUS tracker_status, status = NV_OK; + uvm_mem_t *verif_mem = NULL; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + uvm_gpu_address_t local_addr; + uvm_gpu_address_t peer_addr; + NvU32 init_val = 0x12345678; + NvU32 new_val = 0xabcdc0de; + + chunks = uvm_kvmalloc_zero(num_chunks * sizeof(chunks[0])); + if (!chunks) + return NV_ERR_NO_MEMORY; + + + + + + TEST_NV_CHECK_GOTO(uvm_mem_alloc_sysmem_and_map_cpu_kernel(UVM_CHUNK_SIZE_MAX, current->mm, &verif_mem), out); + TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(verif_mem, owning_gpu), out); + TEST_NV_CHECK_GOTO(uvm_mem_map_gpu_kernel(verif_mem, accessing_gpu), out); + + // Allocate a root chunk then split it to test multiple mappings across + // contiguous chunks under the same root. + TEST_NV_CHECK_GOTO(uvm_pmm_gpu_alloc_user(pmm, + 1, + UVM_CHUNK_SIZE_MAX, + UVM_PMM_ALLOC_FLAGS_EVICT, + &parent_chunk, + NULL), out); + + TEST_NV_CHECK_GOTO(uvm_pmm_gpu_split_chunk(pmm, parent_chunk, chunk_size, chunks), out); + parent_chunk = NULL; + + // Verify contiguity and multiple mappings under a root chunk + for (i = 0; i < num_chunks; i++) { + TEST_NV_CHECK_GOTO(uvm_pmm_gpu_indirect_peer_map(pmm, chunks[i], accessing_gpu), out); + TEST_CHECK_GOTO(uvm_pmm_gpu_indirect_peer_addr(pmm, chunks[i], accessing_gpu) == + uvm_pmm_gpu_indirect_peer_addr(pmm, chunks[0], accessing_gpu) + i * chunk_size, + out); + } + + // Check that accessing_gpu can read and write + local_addr = uvm_gpu_address_physical(UVM_APERTURE_VID, chunks[0]->address); + peer_addr = uvm_pmm_gpu_peer_copy_address(&owning_gpu->pmm, chunks[0], accessing_gpu); + + // Init on local GPU + TEST_NV_CHECK_GOTO(do_memset_4(owning_gpu, local_addr, init_val, UVM_CHUNK_SIZE_MAX, &tracker), out); + + // Read using indirect peer and verify + TEST_NV_CHECK_GOTO(gpu_mem_check(accessing_gpu, + verif_mem, + peer_addr, + UVM_CHUNK_SIZE_MAX, + init_val, + &tracker), out); + + // Write from indirect peer + TEST_NV_CHECK_GOTO(do_memset_4(accessing_gpu, peer_addr, new_val, UVM_CHUNK_SIZE_MAX, &tracker), out); + + // Read using local gpu and verify + TEST_NV_CHECK_GOTO(gpu_mem_check(owning_gpu, verif_mem, local_addr, UVM_CHUNK_SIZE_MAX, new_val, &tracker), out); + +out: + tracker_status = uvm_tracker_wait_deinit(&tracker); + if (status == NV_OK && tracker_status != NV_OK) { + UVM_TEST_PRINT("Tracker wait failed\n"); + status = tracker_status; + } + + if (parent_chunk) { + uvm_pmm_gpu_free(pmm, parent_chunk, NULL); + } + else { + for (i = 0; i < num_chunks; i++) { + if (chunks[i]) + uvm_pmm_gpu_free(pmm, chunks[i], NULL); + } + } + + if (verif_mem) + uvm_mem_free(verif_mem); + + uvm_kvfree(chunks); + return status; +} + +NV_STATUS uvm_test_pmm_indirect_peers(UVM_TEST_PMM_INDIRECT_PEERS_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_t *owning_gpu, *accessing_gpu; + bool ran_test = false; + + uvm_va_space_down_read(va_space); + + for_each_va_space_gpu(owning_gpu, va_space) { + for_each_va_space_gpu_in_mask(accessing_gpu, va_space, &va_space->indirect_peers[uvm_id_value(owning_gpu->id)]) { + ran_test = true; + status = test_indirect_peers(owning_gpu, accessing_gpu); + if (status != NV_OK) + goto out; + } + } + + if (!ran_test) + status = NV_WARN_NOTHING_TO_DO; + +out: + uvm_va_space_up_read(va_space); + return status; +} + +static NV_STATUS test_chunk_with_elevated_page(uvm_gpu_t *gpu) +{ + uvm_pmm_gpu_t *pmm = &gpu->pmm; + size_t chunk_size = uvm_chunk_find_first_size(pmm->chunk_sizes[UVM_PMM_GPU_MEMORY_TYPE_USER]); + uvm_gpu_chunk_t *parent_chunk = NULL, *parent_root = NULL; + uvm_gpu_chunk_t **chunks = NULL; + uvm_gpu_chunk_t *new_chunk = NULL; + size_t i, num_chunks = UVM_CHUNK_SIZE_MAX / chunk_size; + NV_STATUS status = NV_OK; + struct page *page = NULL; + + chunks = uvm_kvmalloc_zero(num_chunks * sizeof(chunks[0])); + if (!chunks) + return NV_ERR_NO_MEMORY; + + // Allocate a root chunk then split it to test multiple mappings across + // contiguous chunks under the same root. + TEST_NV_CHECK_GOTO(uvm_pmm_gpu_alloc_user(pmm, + 1, + UVM_CHUNK_SIZE_MAX, + UVM_PMM_ALLOC_FLAGS_EVICT, + &parent_chunk, + NULL), out); + + // Keep an extra reference to just one page within the parent chunk. + // This will make the whole root chunk non-allocatable. + page = uvm_gpu_chunk_to_page(pmm, parent_chunk); + get_page(page); + + TEST_NV_CHECK_GOTO(uvm_pmm_gpu_split_chunk(pmm, parent_chunk, chunk_size, chunks), out); + + parent_root = parent_chunk; + parent_chunk = NULL; + + // Free some of the chunks + for (i = 0; i < num_chunks/2; i++) { + UVM_ASSERT(chunks[i]); + uvm_pmm_gpu_free(pmm, chunks[i], NULL); + chunks[i] = NULL; + } + + // Now try alloc a chunk of that size. + // Expect that the allocation will fail or return a chunk with a + // different parent. + status = chunk_alloc_check(pmm, + 1, + chunk_size, + UVM_PMM_GPU_MEMORY_TYPE_USER, + UVM_PMM_ALLOC_FLAGS_NONE, + &new_chunk, + NULL); + UVM_ASSERT(status == NV_OK || status == NV_ERR_NO_MEMORY); + + if (status == NV_OK) + TEST_CHECK_GOTO(!uvm_gpu_chunk_same_root(new_chunk, parent_root), out); + else if (status == NV_ERR_NO_MEMORY) + status = NV_OK; + + for (i = num_chunks/2; i < num_chunks; i++) { + UVM_ASSERT(chunks[i]); + uvm_pmm_gpu_free(pmm, chunks[i], NULL); + chunks[i] = NULL; + } + +out: + if (parent_chunk) { + uvm_pmm_gpu_free(pmm, parent_chunk, NULL); + } + else { + for (i = 0; i < num_chunks; i++) { + if (chunks[i]) + uvm_pmm_gpu_free(pmm, chunks[i], NULL); + } + } + + if (new_chunk) + uvm_pmm_gpu_free(pmm, new_chunk, NULL); + + if (page) + put_page(page); + + uvm_kvfree(chunks); + return status; +} + +NV_STATUS uvm_test_pmm_chunk_with_elevated_page(UVM_TEST_PMM_CHUNK_WITH_ELEVATED_PAGE_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_t *gpu; + bool ran_test = false; + + uvm_va_space_down_read(va_space); + + for_each_va_space_gpu(gpu, va_space) { + if (!gpu->parent->numa_info.enabled) + continue; + + ran_test = true; + status = test_chunk_with_elevated_page(gpu); + if (status != NV_OK) + goto out; + } + + if (!ran_test) + status = NV_WARN_NOTHING_TO_DO; + +out: + uvm_va_space_up_read(va_space); + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_policy.c b/kernel-open/nvidia-uvm/uvm_policy.c new file mode 100644 index 000000000..4e3da152e --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_policy.c @@ -0,0 +1,930 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_ioctl.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" +#include "uvm_va_block.h" +#include "uvm_api.h" +#include "uvm_tracker.h" +#include "uvm_gpu.h" +#include "uvm_va_space_mm.h" + +bool uvm_is_valid_vma_range(struct mm_struct *mm, NvU64 start, NvU64 length) +{ + const NvU64 end = start + length; + struct vm_area_struct *vma; + + UVM_ASSERT(mm); + uvm_assert_mmap_lock_locked(mm); + + vma = find_vma_intersection(mm, start, end); + + while (vma && (vma->vm_start <= start)) { + if (vma->vm_end >= end) + return true; + start = vma->vm_end; + vma = vma->vm_next; + } + + return false; +} + +NV_STATUS uvm_api_range_type_check(uvm_va_space_t *va_space, struct mm_struct *mm, NvU64 base, NvU64 length) +{ + uvm_va_range_t *va_range, *va_range_last; + const NvU64 last_address = base + length - 1; + + if (mm) + uvm_assert_mmap_lock_locked(mm); + + uvm_assert_rwsem_locked(&va_space->lock); + + if (uvm_api_range_invalid(base, length)) + return NV_ERR_INVALID_ADDRESS; + + // Check if passed interval overlaps with any VA range. + if (uvm_va_space_range_empty(va_space, base, last_address)) { + if (g_uvm_global.ats.enabled && + uvm_va_space_pageable_mem_access_supported(va_space) && + mm && + uvm_is_valid_vma_range(mm, base, length)) + return NV_WARN_NOTHING_TO_DO; + else if (uvm_hmm_is_enabled(va_space) && + mm && + uvm_is_valid_vma_range(mm, base, length)) + return NV_OK; + else + return NV_ERR_INVALID_ADDRESS; + } + + va_range_last = NULL; + + uvm_for_each_managed_va_range_in_contig(va_range, va_space, base, last_address) + va_range_last = va_range; + + // Check if passed interval overlaps with an unmanaged VA range, or a + // sub-interval not tracked by a VA range + if (!va_range_last || va_range_last->node.end < last_address) + return NV_ERR_INVALID_ADDRESS; + + // Passed interval is fully covered by managed VA ranges + return NV_OK; +} + +static NV_STATUS split_as_needed(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_policy_is_split_needed_t split_needed_cb, + void *data) +{ + uvm_va_range_t *va_range; + + UVM_ASSERT(PAGE_ALIGNED(addr)); + + // Look for UVM managed allocations first, then look for HMM policies. + va_range = uvm_va_range_find(va_space, addr); + if (!va_range) + return uvm_hmm_split_as_needed(va_space, addr, split_needed_cb, data); + + // If the policy range doesn't span addr, we're done. + if (addr == va_range->node.start) + return NV_OK; + + // Only managed ranges can be split. + if (va_range->type != UVM_VA_RANGE_TYPE_MANAGED) + return NV_ERR_INVALID_ADDRESS; + + if (split_needed_cb(uvm_va_range_get_policy(va_range), data)) + return uvm_va_range_split(va_range, addr - 1, NULL); + + return NV_OK; +} + +// Split policy ranges if split_needed_cb() returns true, where start_addr and +// end_addr (exclusive) define the process virtual address range. +// If splits are performed, start_addr and end_addr will be the starting +// and ending addresses of the newly-split policy range. +static NV_STATUS split_span_as_needed(uvm_va_space_t *va_space, + NvU64 start_addr, + NvU64 end_addr, + uvm_va_policy_is_split_needed_t split_needed_cb, + void *data) +{ + NV_STATUS status; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + status = split_as_needed(va_space, start_addr, split_needed_cb, data); + if (status != NV_OK) + return status; + + return split_as_needed(va_space, end_addr, split_needed_cb, data); +} + +static bool preferred_location_is_split_needed(uvm_va_policy_t *policy, void *data) +{ + uvm_processor_id_t processor_id; + + UVM_ASSERT(data); + + processor_id = *(uvm_processor_id_t*)data; + return !uvm_id_equal(processor_id, policy->preferred_location); +} + +static NV_STATUS preferred_location_unmap_remote_pages(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context) +{ + NV_STATUS status = NV_OK; + NV_STATUS tracker_status; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + uvm_va_policy_t *policy = va_block_context->policy; + uvm_processor_id_t preferred_location = policy->preferred_location; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + const uvm_page_mask_t *mapped_mask; + + if (UVM_ID_IS_INVALID(preferred_location) || !uvm_processor_mask_test(&va_block->mapped, preferred_location)) + goto done; + + // Read duplication takes precedence over PreferredLocation. No mappings + // need to be removed. + if (uvm_va_policy_is_read_duplicate(policy, va_space)) + goto done; + + mapped_mask = uvm_va_block_map_mask_get(va_block, preferred_location); + + if (uvm_processor_mask_test(&va_block->resident, preferred_location)) { + const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, preferred_location); + + if (!uvm_page_mask_andnot(&va_block_context->caller_page_mask, mapped_mask, resident_mask)) + goto done; + } + else { + uvm_page_mask_copy(&va_block_context->caller_page_mask, mapped_mask); + } + + status = uvm_va_block_unmap(va_block, + va_block_context, + preferred_location, + uvm_va_block_region_from_block(va_block), + &va_block_context->caller_page_mask, + &local_tracker); + + tracker_status = uvm_tracker_add_tracker_safe(&va_block->tracker, &local_tracker); + if (status == NV_OK) + status = tracker_status; + +done: + uvm_tracker_deinit(&local_tracker); + + return status; +} + +NV_STATUS uvm_va_block_set_preferred_location_locked(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context) +{ + uvm_assert_mutex_locked(&va_block->lock); + + uvm_va_block_mark_cpu_dirty(va_block); + + return preferred_location_unmap_remote_pages(va_block, va_block_context); +} + +static NV_STATUS preferred_location_set(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 base, + NvU64 length, + uvm_processor_id_t preferred_location, + uvm_va_range_t **first_va_range_to_migrate, + uvm_tracker_t *out_tracker) +{ + uvm_va_range_t *va_range, *va_range_last; + const NvU64 last_address = base + length - 1; + bool preferred_location_is_faultable_gpu = false; + NV_STATUS status; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + if (UVM_ID_IS_VALID(preferred_location)) { + *first_va_range_to_migrate = NULL; + preferred_location_is_faultable_gpu = UVM_ID_IS_GPU(preferred_location) && + uvm_processor_mask_test(&va_space->faultable_processors, + preferred_location); + } + + status = split_span_as_needed(va_space, + base, + last_address + 1, + preferred_location_is_split_needed, + &preferred_location); + if (status != NV_OK) + return status; + + va_range_last = NULL; + uvm_for_each_managed_va_range_in_contig(va_range, va_space, base, last_address) { + bool found_non_migratable_interval = false; + + va_range_last = va_range; + + // If we didn't split the ends, check that they match + if (va_range->node.start < base || va_range->node.end > last_address) + UVM_ASSERT(uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, preferred_location)); + + if (UVM_ID_IS_VALID(preferred_location)) { + const NvU64 start = max(base, va_range->node.start); + const NvU64 end = min(last_address, va_range->node.end); + + found_non_migratable_interval = !uvm_range_group_all_migratable(va_space, start, end); + + if (found_non_migratable_interval && preferred_location_is_faultable_gpu) + return NV_ERR_INVALID_DEVICE; + } + + status = uvm_va_range_set_preferred_location(va_range, preferred_location, mm, out_tracker); + if (status != NV_OK) + return status; + + // Return the first VA range that needs to be migrated so the caller + // function doesn't need to traverse the tree again + if (found_non_migratable_interval && (*first_va_range_to_migrate == NULL)) + *first_va_range_to_migrate = va_range; + } + + if (va_range_last) { + UVM_ASSERT(va_range_last->node.end >= last_address); + return NV_OK; + } + + return uvm_hmm_set_preferred_location(va_space, preferred_location, base, last_address); +} + +NV_STATUS uvm_api_set_preferred_location(const UVM_SET_PREFERRED_LOCATION_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + NV_STATUS tracker_status; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_range_t *va_range = NULL; + uvm_va_range_t *first_va_range_to_migrate = NULL; + struct mm_struct *mm; + uvm_processor_id_t preferred_location_id; + bool has_va_space_write_lock; + const NvU64 start = params->requestedBase; + const NvU64 length = params->length; + const NvU64 end = start + length - 1; + bool range_is_ats = false; + + UVM_ASSERT(va_space); + + mm = uvm_va_space_mm_or_current_retain_lock(va_space); + uvm_va_space_down_write(va_space); + has_va_space_write_lock = true; + + status = uvm_api_range_type_check(va_space, mm, start, length); + if (status != NV_OK) { + if (status != NV_WARN_NOTHING_TO_DO) + goto done; + + status = NV_OK; + range_is_ats = true; + } + + // If the CPU is the preferred location, we don't have to find the associated uvm_gpu_t + if (uvm_uuid_is_cpu(¶ms->preferredLocation)) { + preferred_location_id = UVM_ID_CPU; + } + else { + // Translate preferredLocation into a live GPU ID, and check that this + // GPU can address the virtual address range + uvm_gpu_t *gpu = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->preferredLocation); + + if (!gpu) + status = NV_ERR_INVALID_DEVICE; + else if (!uvm_gpu_can_address(gpu, start, length)) + status = NV_ERR_OUT_OF_RANGE; + + if (status != NV_OK) + goto done; + + preferred_location_id = gpu->id; + } + + UVM_ASSERT(status == NV_OK); + + // TODO: Bug 2098544: On ATS systems, honor the preferred location policy + // for system memory ranges instead of ignoring it. + if (range_is_ats) + goto done; + + status = preferred_location_set(va_space, mm, start, length, preferred_location_id, &first_va_range_to_migrate, &local_tracker); + if (status != NV_OK) + goto done; + + // No VA range to migrate, early exit + if (!first_va_range_to_migrate) + goto done; + + uvm_va_space_downgrade_write(va_space); + has_va_space_write_lock = false; + + // No need to check for holes in the VA ranges span here, this was checked by preferred_location_set + for (va_range = first_va_range_to_migrate; va_range; va_range = uvm_va_space_iter_next(va_range, end)) { + uvm_range_group_range_iter_t iter; + NvU64 cur_start = max(start, va_range->node.start); + NvU64 cur_end = min(end, va_range->node.end); + + uvm_range_group_for_each_migratability_in(&iter, va_space, cur_start, cur_end) { + if (!iter.migratable) { + status = uvm_range_group_va_range_migrate(va_range, iter.start, iter.end, &local_tracker); + if (status != NV_OK) + goto done; + } + } + } + +done: + tracker_status = uvm_tracker_wait_deinit(&local_tracker); + + if (has_va_space_write_lock) + uvm_va_space_up_write(va_space); + else + uvm_va_space_up_read(va_space); + + uvm_va_space_mm_or_current_release_unlock(va_space, mm); + + return status == NV_OK ? tracker_status : status; +} + +NV_STATUS uvm_api_unset_preferred_location(const UVM_UNSET_PREFERRED_LOCATION_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + NV_STATUS tracker_status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + struct mm_struct *mm; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + + UVM_ASSERT(va_space); + + mm = uvm_va_space_mm_or_current_retain_lock(va_space); + uvm_va_space_down_write(va_space); + + status = uvm_api_range_type_check(va_space, mm, params->requestedBase, params->length); + + if (status == NV_OK) + status = preferred_location_set(va_space, mm, params->requestedBase, params->length, UVM_ID_INVALID, NULL, &local_tracker); + else if (status == NV_WARN_NOTHING_TO_DO) + status = NV_OK; + + tracker_status = uvm_tracker_wait_deinit(&local_tracker); + + uvm_va_space_up_write(va_space); + uvm_va_space_mm_or_current_release_unlock(va_space, mm); + return status == NV_OK ? tracker_status : status; +} + +static NV_STATUS va_block_set_accessed_by_locked(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t processor_id, + uvm_tracker_t *out_tracker) +{ + NV_STATUS status; + NV_STATUS tracker_status; + + uvm_assert_mutex_locked(&va_block->lock); + + status = uvm_va_block_add_mappings(va_block, + va_block_context, + processor_id, + uvm_va_block_region_from_block(va_block), + NULL, + UvmEventMapRemoteCausePolicy); + + tracker_status = uvm_tracker_add_tracker_safe(out_tracker, &va_block->tracker); + + return status == NV_OK ? tracker_status : status; +} + +NV_STATUS uvm_va_block_set_accessed_by(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t processor_id) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + NV_STATUS status; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + + UVM_ASSERT(!uvm_va_block_is_hmm(va_block)); + + va_block_context->policy = uvm_va_range_get_policy(va_block->va_range); + + // Read duplication takes precedence over SetAccesedBy. Do not add mappings + // if read duplication is enabled. + if (uvm_va_policy_is_read_duplicate(va_block_context->policy, va_space)) + return NV_OK; + + status = UVM_VA_BLOCK_LOCK_RETRY(va_block, NULL, + va_block_set_accessed_by_locked(va_block, + va_block_context, + processor_id, + &local_tracker)); + + // TODO: Bug 1767224: Combine all accessed_by operations into single tracker + if (status == NV_OK) + status = uvm_tracker_wait(&local_tracker); + + uvm_tracker_deinit(&local_tracker); + return status; +} + +typedef struct +{ + uvm_processor_id_t processor_id; + bool set_bit; +} accessed_by_split_params_t; + +static bool accessed_by_is_split_needed(uvm_va_policy_t *policy, void *data) +{ + accessed_by_split_params_t *params = (accessed_by_split_params_t*)data; + + UVM_ASSERT(params); + + return (uvm_processor_mask_test(&policy->accessed_by, params->processor_id) != params->set_bit); +} + +static NV_STATUS accessed_by_set(uvm_va_space_t *va_space, + NvU64 base, + NvU64 length, + const NvProcessorUuid *processor_uuid, + bool set_bit) +{ + uvm_processor_id_t processor_id = UVM_ID_INVALID; + uvm_va_range_t *va_range, *va_range_last; + struct mm_struct *mm; + const NvU64 last_address = base + length - 1; + bool range_is_sysmem = false; + accessed_by_split_params_t split_params; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + NV_STATUS status; + NV_STATUS tracker_status; + + UVM_ASSERT(va_space); + + mm = uvm_va_space_mm_or_current_retain_lock(va_space); + uvm_va_space_down_write(va_space); + + status = uvm_api_range_type_check(va_space, mm, base, length); + if (status != NV_OK) { + if (status != NV_WARN_NOTHING_TO_DO) + goto done; + status = NV_OK; + range_is_sysmem = true; + } + + if (uvm_uuid_is_cpu(processor_uuid)) { + processor_id = UVM_ID_CPU; + } + else { + // Translate processor_uuid into a live GPU ID, and check that this GPU + // can address the virtual address range + uvm_gpu_t *gpu = uvm_va_space_get_gpu_by_uuid(va_space, processor_uuid); + if (!gpu) + status = NV_ERR_INVALID_DEVICE; + else if (!uvm_gpu_can_address(gpu, base, length)) + status = NV_ERR_OUT_OF_RANGE; + + if (status != NV_OK) + goto done; + + processor_id = gpu->id; + } + + if (range_is_sysmem) + goto done; + + split_params.processor_id = processor_id; + split_params.set_bit = set_bit; + status = split_span_as_needed(va_space, + base, + last_address + 1, + accessed_by_is_split_needed, + &split_params); + if (status != NV_OK) + goto done; + + va_range_last = NULL; + uvm_for_each_managed_va_range_in_contig(va_range, va_space, base, last_address) { + va_range_last = va_range; + + // If we didn't split the ends, check that they match + if (va_range->node.start < base || va_range->node.end > last_address) + UVM_ASSERT(uvm_processor_mask_test(&uvm_va_range_get_policy(va_range)->accessed_by, + processor_id) == set_bit); + + if (set_bit) { + status = uvm_va_range_set_accessed_by(va_range, processor_id, mm, &local_tracker); + if (status != NV_OK) + goto done; + } + else { + uvm_va_range_unset_accessed_by(va_range, processor_id, &local_tracker); + } + } + + if (va_range_last) { + UVM_ASSERT(va_range_last->node.end >= last_address); + goto done; + } + + status = uvm_hmm_set_accessed_by(va_space, + processor_id, + set_bit, + base, + last_address); + +done: + tracker_status = uvm_tracker_wait_deinit(&local_tracker); + + uvm_va_space_up_write(va_space); + uvm_va_space_mm_or_current_release_unlock(va_space, mm); + + return status == NV_OK ? tracker_status : status; +} + +NV_STATUS uvm_api_set_accessed_by(const UVM_SET_ACCESSED_BY_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + return accessed_by_set(va_space, params->requestedBase, params->length, ¶ms->accessedByUuid, true); +} + +NV_STATUS uvm_api_unset_accessed_by(const UVM_UNSET_ACCESSED_BY_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + return accessed_by_set(va_space, params->requestedBase, params->length, ¶ms->accessedByUuid, false); +} + +static NV_STATUS va_block_set_read_duplication_locked(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context) +{ + uvm_processor_id_t src_id; + + uvm_assert_mutex_locked(&va_block->lock); + + for_each_id_in_mask(src_id, &va_block->resident) { + NV_STATUS status; + uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, src_id); + + // Calling uvm_va_block_make_resident_read_duplicate will break all + // SetAccessedBy and remote mappings + status = uvm_va_block_make_resident_read_duplicate(va_block, + va_block_retry, + va_block_context, + src_id, + uvm_va_block_region_from_block(va_block), + resident_mask, + NULL, + UVM_MAKE_RESIDENT_CAUSE_API_HINT); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +NV_STATUS uvm_va_block_set_read_duplication(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context) +{ + NV_STATUS status; + uvm_va_block_retry_t va_block_retry; + + status = UVM_VA_BLOCK_LOCK_RETRY(va_block, &va_block_retry, + va_block_set_read_duplication_locked(va_block, + &va_block_retry, + va_block_context)); + + return status; +} + +static NV_STATUS va_block_unset_read_duplication_locked(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_tracker_t *out_tracker) +{ + NV_STATUS status; + uvm_processor_id_t processor_id; + uvm_va_block_region_t block_region = uvm_va_block_region_from_block(va_block); + uvm_page_mask_t *break_read_duplication_pages = &va_block_context->caller_page_mask; + uvm_va_policy_t *policy = va_block_context->policy; + uvm_processor_id_t preferred_location = policy->preferred_location; + uvm_processor_mask_t accessed_by = policy->accessed_by; + + uvm_assert_mutex_locked(&va_block->lock); + + // 1- Iterate over all processors with resident copies to avoid migrations + // and invalidate the rest of copies + + // If preferred_location is set and has resident copies, give it preference + if (UVM_ID_IS_VALID(preferred_location) && + uvm_processor_mask_test(&va_block->resident, preferred_location)) { + uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, preferred_location); + bool is_mask_empty = !uvm_page_mask_and(break_read_duplication_pages, + &va_block->read_duplicated_pages, + resident_mask); + + if (!is_mask_empty) { + // make_resident breaks read duplication + status = uvm_va_block_make_resident(va_block, + va_block_retry, + va_block_context, + preferred_location, + block_region, + break_read_duplication_pages, + NULL, + UVM_MAKE_RESIDENT_CAUSE_API_HINT); + if (status != NV_OK) + return status; + } + } + + // Then iterate over the rest of processors + for_each_id_in_mask(processor_id, &va_block->resident) { + uvm_page_mask_t *resident_mask; + bool is_mask_empty; + + if (uvm_id_equal(processor_id, preferred_location)) + continue; + + resident_mask = uvm_va_block_resident_mask_get(va_block, processor_id); + is_mask_empty = !uvm_page_mask_and(break_read_duplication_pages, + &va_block->read_duplicated_pages, + resident_mask); + if (is_mask_empty) + continue; + + // make_resident breaks read duplication + status = uvm_va_block_make_resident(va_block, + va_block_retry, + va_block_context, + processor_id, + block_region, + break_read_duplication_pages, + NULL, + UVM_MAKE_RESIDENT_CAUSE_API_HINT); + if (status != NV_OK) + return status; + } + + // 2- Re-establish SetAccessedBy mappings + for_each_id_in_mask(processor_id, &accessed_by) { + status = va_block_set_accessed_by_locked(va_block, + va_block_context, + processor_id, + out_tracker); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +NV_STATUS uvm_va_block_unset_read_duplication(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context) +{ + uvm_va_block_retry_t va_block_retry; + NV_STATUS status = NV_OK; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + + // Restore all SetAccessedBy mappings + status = UVM_VA_BLOCK_LOCK_RETRY(va_block, &va_block_retry, + va_block_unset_read_duplication_locked(va_block, + &va_block_retry, + va_block_context, + &local_tracker)); + if (status == NV_OK) + status = uvm_tracker_wait(&local_tracker); + + uvm_tracker_deinit(&local_tracker); + + return status; +} + +static bool read_duplication_is_split_needed(uvm_va_policy_t *policy, void *data) +{ + uvm_read_duplication_policy_t new_policy; + + UVM_ASSERT(data); + + new_policy = *(uvm_read_duplication_policy_t *)data; + return policy->read_duplication != new_policy; +} + +static NV_STATUS read_duplication_set(uvm_va_space_t *va_space, NvU64 base, NvU64 length, bool enable) +{ + uvm_va_range_t *va_range, *va_range_last; + struct mm_struct *mm; + const NvU64 last_address = base + length - 1; + NV_STATUS status; + uvm_read_duplication_policy_t new_policy; + + UVM_ASSERT(va_space); + + // We need mmap_lock as we may create CPU mappings + mm = uvm_va_space_mm_or_current_retain_lock(va_space); + uvm_va_space_down_write(va_space); + + status = uvm_api_range_type_check(va_space, mm, base, length); + if (status != NV_OK) { + if (status == NV_WARN_NOTHING_TO_DO) + status = NV_OK; + + goto done; + } + + // Note that we never set the policy back to UNSET + new_policy = enable ? UVM_READ_DUPLICATION_ENABLED : UVM_READ_DUPLICATION_DISABLED; + + status = split_span_as_needed(va_space, + base, + last_address + 1, + read_duplication_is_split_needed, + &new_policy); + if (status != NV_OK) + goto done; + + va_range_last = NULL; + uvm_for_each_managed_va_range_in_contig(va_range, va_space, base, last_address) { + va_range_last = va_range; + + // If we didn't split the ends, check that they match + if (va_range->node.start < base || va_range->node.end > last_address) + UVM_ASSERT(uvm_va_range_get_policy(va_range)->read_duplication == new_policy); + + // If the va_space cannot currently read duplicate, only change the user + // state. All memory should already have read duplication unset. + if (uvm_va_space_can_read_duplicate(va_space, NULL)) { + + // Handle SetAccessedBy mappings + if (new_policy == UVM_READ_DUPLICATION_ENABLED) { + status = uvm_va_range_set_read_duplication(va_range, mm); + if (status != NV_OK) + goto done; + } + else { + // If unsetting read duplication fails, the return status is + // not propagated back to the caller + (void)uvm_va_range_unset_read_duplication(va_range, mm); + } + } + + uvm_va_range_get_policy(va_range)->read_duplication = new_policy; + } + + if (va_range_last) { + UVM_ASSERT(va_range_last->node.end >= last_address); + goto done; + } + + status = uvm_hmm_set_read_duplication(va_space, + new_policy, + base, + last_address); + +done: + uvm_va_space_up_write(va_space); + uvm_va_space_mm_or_current_release_unlock(va_space, mm); + return status; +} + +NV_STATUS uvm_api_enable_read_duplication(const UVM_ENABLE_READ_DUPLICATION_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + return read_duplication_set(va_space, params->requestedBase, params->length, true); +} + +NV_STATUS uvm_api_disable_read_duplication(const UVM_DISABLE_READ_DUPLICATION_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + return read_duplication_set(va_space, params->requestedBase, params->length, false); +} + +static NV_STATUS system_wide_atomics_set(uvm_va_space_t *va_space, const NvProcessorUuid *gpu_uuid, bool enable) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + bool already_enabled; + + uvm_va_space_down_write(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, gpu_uuid); + if (!gpu) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + if (gpu->parent->scoped_atomics_supported) { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + if (!uvm_processor_mask_test(&va_space->faultable_processors, gpu->id)) { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + already_enabled = uvm_processor_mask_test(&va_space->system_wide_atomics_enabled_processors, gpu->id); + if (enable && !already_enabled) { + uvm_va_range_t *va_range; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + uvm_va_block_context_t *va_block_context = uvm_va_space_block_context(va_space, NULL); + NV_STATUS tracker_status; + + // Revoke atomic mappings from the calling GPU + uvm_for_each_va_range(va_range, va_space) { + uvm_va_block_t *va_block; + + if (va_range->type != UVM_VA_RANGE_TYPE_MANAGED) + continue; + + va_block_context->policy = uvm_va_range_get_policy(va_range); + for_each_va_block_in_va_range(va_range, va_block) { + uvm_page_mask_t *non_resident_pages = &va_block_context->caller_page_mask; + + uvm_mutex_lock(&va_block->lock); + + if (!uvm_processor_mask_test(&va_block->mapped, gpu->id)) { + uvm_mutex_unlock(&va_block->lock); + continue; + } + + uvm_page_mask_complement(non_resident_pages, &va_block->gpus[uvm_id_gpu_index(gpu->id)]->resident); + + status = uvm_va_block_revoke_prot(va_block, + va_block_context, + gpu->id, + uvm_va_block_region_from_block(va_block), + non_resident_pages, + UVM_PROT_READ_WRITE_ATOMIC, + &va_block->tracker); + + tracker_status = uvm_tracker_add_tracker_safe(&local_tracker, &va_block->tracker); + + uvm_mutex_unlock(&va_block->lock); + + if (status == NV_OK) + status = tracker_status; + + if (status != NV_OK) { + uvm_tracker_deinit(&local_tracker); + goto done; + } + } + } + status = uvm_tracker_wait_deinit(&local_tracker); + + uvm_processor_mask_set(&va_space->system_wide_atomics_enabled_processors, gpu->id); + } + else if (!enable && already_enabled) { + // TODO: Bug 1767229: Promote write mappings to atomic + uvm_processor_mask_clear(&va_space->system_wide_atomics_enabled_processors, gpu->id); + } + +done: + uvm_va_space_up_write(va_space); + return status; +} + +NV_STATUS uvm_api_enable_system_wide_atomics(UVM_ENABLE_SYSTEM_WIDE_ATOMICS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + return system_wide_atomics_set(va_space, ¶ms->gpu_uuid, true); +} + +NV_STATUS uvm_api_disable_system_wide_atomics(UVM_DISABLE_SYSTEM_WIDE_ATOMICS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + return system_wide_atomics_set(va_space, ¶ms->gpu_uuid, false); +} diff --git a/kernel-open/nvidia-uvm/uvm_populate_pageable.c b/kernel-open/nvidia-uvm/uvm_populate_pageable.c new file mode 100644 index 000000000..bd31e544d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_populate_pageable.c @@ -0,0 +1,233 @@ +/******************************************************************************* + Copyright (c) 2018-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_ioctl.h" +#include "uvm_linux.h" +#include "uvm_lock.h" +#include "uvm_api.h" +#include "uvm_va_range.h" +#include "uvm_va_space.h" +#include "uvm_populate_pageable.h" + +static bool is_write_populate(struct vm_area_struct *vma, uvm_populate_permissions_t populate_permissions) +{ + switch (populate_permissions) { + case UVM_POPULATE_PERMISSIONS_INHERIT: + return vma->vm_flags & VM_WRITE; + case UVM_POPULATE_PERMISSIONS_ANY: + return false; + case UVM_POPULATE_PERMISSIONS_WRITE: + return true; + default: + UVM_ASSERT(0); + return false; + } +} + +NV_STATUS uvm_populate_pageable_vma(struct vm_area_struct *vma, + unsigned long start, + unsigned long length, + int min_prot, + bool touch, + uvm_populate_permissions_t populate_permissions) +{ + unsigned long vma_num_pages; + unsigned long outer = start + length; + const bool is_writable = is_write_populate(vma, populate_permissions); + struct mm_struct *mm = vma->vm_mm; + unsigned long vm_flags = vma->vm_flags; + bool uvm_managed_vma; + long ret; + struct page **pages = NULL; + NV_STATUS status = NV_OK; + + UVM_ASSERT(PAGE_ALIGNED(start)); + UVM_ASSERT(PAGE_ALIGNED(outer)); + UVM_ASSERT(vma->vm_end > start); + UVM_ASSERT(vma->vm_start < outer); + uvm_assert_mmap_lock_locked(mm); + + // On most CPU architectures, write permission implies RW permission. + if (vm_flags & VM_WRITE) + vm_flags = vm_flags | VM_READ; + + if ((vm_flags & min_prot) != min_prot) + return NV_ERR_INVALID_ADDRESS; + + // Adjust to input range boundaries + start = max(start, vma->vm_start); + outer = min(outer, vma->vm_end); + + vma_num_pages = (outer - start) / PAGE_SIZE; + + // Please see the comment in uvm_ats_service_fault() regarding the usage of + // the touch parameter for more details. + if (touch) { + pages = uvm_kvmalloc(vma_num_pages * sizeof(pages[0])); + if (!pages) + return NV_ERR_NO_MEMORY; + } + + // If the input vma is managed by UVM, temporarily remove the record + // associated with the locking of mmap_lock, in order to avoid a "locked + // twice" validation error triggered when also acquiring mmap_lock in the + // page fault handler. The page fault is caused by get_user_pages. + uvm_managed_vma = uvm_file_is_nvidia_uvm(vma->vm_file); + if (uvm_managed_vma) + uvm_record_unlock_mmap_lock_read(mm); + + ret = NV_GET_USER_PAGES_REMOTE(NULL, mm, start, vma_num_pages, is_writable, 0, pages, NULL); + + if (uvm_managed_vma) + uvm_record_lock_mmap_lock_read(mm); + + if (ret < 0) { + status = errno_to_nv_status(ret); + goto out; + } + + // We couldn't populate all pages, return error + if (ret < vma_num_pages) { + if (touch) { + unsigned long i; + + for (i = 0; i < ret; i++) { + UVM_ASSERT(pages[i]); + put_page(pages[i]); + } + } + + status = NV_ERR_NO_MEMORY; + goto out; + } + + if (touch) { + unsigned long i; + + for (i = 0; i < vma_num_pages; i++) { + uvm_touch_page(pages[i]); + put_page(pages[i]); + } + } + +out: + uvm_kvfree(pages); + return status; +} + +NV_STATUS uvm_populate_pageable(struct mm_struct *mm, + const unsigned long start, + const unsigned long length, + int min_prot, + bool touch, + uvm_populate_permissions_t populate_permissions) +{ + struct vm_area_struct *vma; + const unsigned long end = start + length; + unsigned long prev_end = end; + + UVM_ASSERT(PAGE_ALIGNED(start)); + UVM_ASSERT(PAGE_ALIGNED(length)); + uvm_assert_mmap_lock_locked(mm); + + vma = find_vma_intersection(mm, start, end); + if (!vma || (start < vma->vm_start)) + return NV_ERR_INVALID_ADDRESS; + + // VMAs are validated and populated one at a time, since they may have + // different protection flags + // Validation of VM_SPECIAL flags is delegated to get_user_pages + for (; vma->vm_start <= prev_end; vma = vma->vm_next) { + NV_STATUS status = uvm_populate_pageable_vma(vma, start, end - start, min_prot, touch, populate_permissions); + + if (status != NV_OK) + return status; + + if (vma->vm_end >= end) + return NV_OK; + + prev_end = vma->vm_end; + } + + // Input range not fully covered by VMAs + return NV_ERR_INVALID_ADDRESS; +} + +NV_STATUS uvm_api_populate_pageable(const UVM_POPULATE_PAGEABLE_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + bool allow_managed; + bool skip_prot_check; + int min_prot; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + if (params->flags & ~UVM_POPULATE_PAGEABLE_FLAGS_ALL) + return NV_ERR_INVALID_ARGUMENT; + + if ((params->flags & UVM_POPULATE_PAGEABLE_FLAGS_TEST_ALL) && !uvm_enable_builtin_tests) { + UVM_INFO_PRINT("Test flag set for UVM_POPULATE_PAGEABLE. Did you mean to insmod with uvm_enable_builtin_tests=1?\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // Population of managed ranges is only allowed for test purposes. The goal + // is to validate that it is possible to populate pageable ranges backed by + // VMAs with the VM_MIXEDMAP or VM_DONTEXPAND special flags set. But since + // there is no portable way to force allocation of such memory from user + // space, and it is not safe to change the flags of an already created + // VMA from kernel space, we take advantage of the fact that managed ranges + // have both special flags set at creation time (see uvm_mmap) + allow_managed = params->flags & UVM_POPULATE_PAGEABLE_FLAG_ALLOW_MANAGED; + + skip_prot_check = params->flags & UVM_POPULATE_PAGEABLE_FLAG_SKIP_PROT_CHECK; + if (skip_prot_check) + min_prot = 0; + else + min_prot = VM_READ; + + // Check size, alignment and overflow. VMA validations are performed by + // populate_pageable + if (uvm_api_range_invalid(params->base, params->length)) + return NV_ERR_INVALID_ADDRESS; + + // mmap_lock is needed to traverse the vmas in the input range and call + // into get_user_pages. Unlike most UVM APIs, this one is defined to only + // work on current->mm, not the mm associated with the VA space (if any). + uvm_down_read_mmap_lock(current->mm); + + if (allow_managed || uvm_va_space_range_empty(va_space, params->base, params->base + params->length - 1)) { + status = uvm_populate_pageable(current->mm, + params->base, + params->length, + min_prot, + false, + UVM_POPULATE_PERMISSIONS_INHERIT); + } + else { + status = NV_ERR_INVALID_ADDRESS; + } + + uvm_up_read_mmap_lock(current->mm); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_populate_pageable.h b/kernel-open/nvidia-uvm/uvm_populate_pageable.h new file mode 100644 index 000000000..5c19fa559 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_populate_pageable.h @@ -0,0 +1,66 @@ +/******************************************************************************* + Copyright (c) 2018 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_POPULATE_PAGEABLE_H__ +#define __UVM_POPULATE_PAGEABLE_H__ + +// Types of permissions to influence the address range population. +// PERMISSIONS_INHERIT will use the permissions of the vma. +// PERMISSIONS_ANY will populate the pages with any permissions allowed by the +// vma. This behaves the same as PERMISSIONS_INHERIT except that write mappings +// are not guaranteed if the vma has VM_WRITE. +// PERMISSIONS_WRITE will populate the pages only if the vma has write access. +// This guarantees write mappings are populated. +typedef enum +{ + UVM_POPULATE_PERMISSIONS_INHERIT, + UVM_POPULATE_PERMISSIONS_ANY, + UVM_POPULATE_PERMISSIONS_WRITE, +} uvm_populate_permissions_t; + +// Populate the pages of the given vma that overlap with the +// [start:start+length) range. If any of the pages was not populated, we return +// NV_ERR_NO_MEMORY. See the comment below for details on the touch argument. +// +// Locking: vma->vm_mm->mmap_lock must be held in read or write mode +NV_STATUS uvm_populate_pageable_vma(struct vm_area_struct *vma, + unsigned long start, + unsigned long length, + int min_prot, + bool touch, + uvm_populate_permissions_t populate_permissions); + +// Populate all the pages in the given range by calling get_user_pages. The +// range must be fully backed by vmas. If any of the pages was not populated, we +// return NV_ERR_NO_MEMORY. The caller can request a touch of the populated +// pages. This can be useful in virtualization environment. See +// uvm_ats_service_fault() for example usage. +// +// Locking: mm->mmap_lock must be held in read or write mode +NV_STATUS uvm_populate_pageable(struct mm_struct *mm, + unsigned long start, + unsigned long length, + int min_prot, + bool touch, + uvm_populate_permissions_t populate_permissions); +#endif diff --git a/kernel-open/nvidia-uvm/uvm_processors.h b/kernel-open/nvidia-uvm/uvm_processors.h new file mode 100644 index 000000000..39b5a0be7 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_processors.h @@ -0,0 +1,580 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PROCESSORS_H__ +#define __UVM_PROCESSORS_H__ + +#include "uvm_linux.h" +#include "uvm_common.h" + +#define UVM_MAX_UNIQUE_GPU_PAIRS SUM_FROM_0_TO_N(UVM_MAX_GPUS - 1) + +// Processor identifiers +// ===================== +// +// UVM uses its own identifiers to refer to the processors in the system. For +// simplicity (and performance), integers are used. However, in order to +// provide type safety, they are wrapped within the uvm_processor_id_t struct. +// The range of valid identifiers needs to cover the maximum number of +// supported GPUs on a system plus the CPU. CPU is assigned value 0, and GPUs +// range: [1, UVM_ID_MAX_GPUS]. +// +// There are some functions that only expect GPU identifiers and, in order to +// make it clearer, the uvm_gpu_id_t alias type is provided. However, as this +// type is just a typedef of uvm_processor_id_t, there is no type checking +// performed by the compiler. +// +// Identifier value vs index +// ------------------------- +// +// Although we can rely on helpers for most of the operations related to +// processor ids, there are some scenarios in which we need to obtain their +// numerical value. Notably: +// - Logging +// - Array indexing +// +// Therefore, a helper is provided to obtain this value. However, there is a +// special case for array indexing, as there are some arrays that only contain +// entries for GPUs. In that case, the array cannot be directly indexed with +// the identifier's value. Instead, we use a helper that provides the index of +// the GPU within the GPU id space (basically id - 1). +// +// In the diagram below, MAX_SUB is used to abbreviate +// UVM_ID_MAX_SUB_PROCESSORS. +// +// |-------------------------- uvm_processor_id_t ----------------------| +// | | +// | |----------------------- uvm_gpu_id_t ------------------------|| +// | | || +// Proc type | CPU | GPU ... GPU ... GPU || +// | | || +// ID values | 0 | 1 ... i+1 ... UVM_ID_MAX_PROCESSORS-1 || +// +// GPU index 0 ... i ... UVM_ID_MAX_GPUS-1 +// | | | | +// | | | | +// | |-------------| | |-----------------------------| +// | | | | +// | | | | +// GPU index 0 ... MAX_SUB-1 ... i*MAX_SUB ... (i+1)*MAX_SUB-1 ... UVM_GLOBAL_ID_MAX_GPUS-1 +// +// ID values | 0 | 1 ... MAX_SUB ... (i*MAX_SUB)+1 ... (i+1)*MAX_SUB ... UVM_GLOBAL_ID_MAX_PROCESSORS-1 || +// | | || +// Proc type | CPU | GPU ... GPU ... GPU ... GPU ... GPU || +// | | || +// | |-------------------------------------- uvm_global_gpu_id_t ---------------------------------------|| +// | | +// |----------------------------------------- uvm_global_processor_id_t -------------------------------------| +// +// When SMC is enabled, each GPU partition gets its own uvm_gpu_t object. +// However, there can only be a single partition per GPU in a VA space, so +// uvm_processor_id_t/uvm_processor_mask_t can still be used when operating +// in the context of a VA space. In the global context, types that can refer +// to all individual partitions need to be used, though. Therefore, we +// provide the uvm_global_gpu_id_t/uvm_global_processor_mask_t types and the +// corresponding uvm_global_gpu_id*/uvm_global_processor_mask* helpers. + +#define UVM_PROCESSOR_MASK(mask_t, \ + prefix_fn_mask, \ + maxval, \ + proc_id_t, \ + proc_id_ctor) \ + \ +typedef struct \ +{ \ + DECLARE_BITMAP(bitmap, maxval); \ +} mask_t; \ + \ +static bool prefix_fn_mask##_test(const mask_t *mask, proc_id_t id) \ +{ \ + UVM_ASSERT_MSG(id.val < (maxval), "id %u\n", id.val); \ + \ + return test_bit(id.val, mask->bitmap); \ +} \ + \ +static void prefix_fn_mask##_set_atomic(mask_t *mask, proc_id_t id) \ +{ \ + UVM_ASSERT_MSG(id.val < (maxval), "id %u\n", id.val); \ + \ + set_bit(id.val, mask->bitmap); \ +} \ + \ +static void prefix_fn_mask##_set(mask_t *mask, proc_id_t id) \ +{ \ + UVM_ASSERT_MSG(id.val < (maxval), "id %u\n", id.val); \ + \ + __set_bit(id.val, mask->bitmap); \ +} \ + \ +static void prefix_fn_mask##_clear_atomic(mask_t *mask, proc_id_t id) \ +{ \ + UVM_ASSERT_MSG(id.val < (maxval), "id %u\n", id.val); \ + \ + clear_bit(id.val, mask->bitmap); \ +} \ + \ +static void prefix_fn_mask##_clear(mask_t *mask, proc_id_t id) \ +{ \ + UVM_ASSERT_MSG(id.val < (maxval), "id %u\n", id.val); \ + \ + __clear_bit(id.val, mask->bitmap); \ +} \ + \ +static bool prefix_fn_mask##_test_and_set_atomic(mask_t *mask, proc_id_t id) \ +{ \ + UVM_ASSERT_MSG(id.val < (maxval), "id %u\n", id.val); \ + \ + return test_and_set_bit(id.val, mask->bitmap); \ +} \ + \ +static bool prefix_fn_mask##_test_and_set(mask_t *mask, proc_id_t id) \ +{ \ + UVM_ASSERT_MSG(id.val < (maxval), "id %u\n", id.val); \ + \ + return __test_and_set_bit(id.val, mask->bitmap); \ +} \ + \ +static bool prefix_fn_mask##_test_and_clear_atomic(mask_t *mask, proc_id_t id) \ +{ \ + UVM_ASSERT_MSG(id.val < (maxval), "id %u\n", id.val); \ + \ + return test_and_clear_bit(id.val, mask->bitmap); \ +} \ + \ +static bool prefix_fn_mask##_test_and_clear(mask_t *mask, proc_id_t id) \ +{ \ + UVM_ASSERT_MSG(id.val < (maxval), "id %u\n", id.val); \ + \ + return __test_and_clear_bit(id.val, mask->bitmap); \ +} \ + \ +static void prefix_fn_mask##_zero(mask_t *mask) \ +{ \ + bitmap_zero(mask->bitmap, (maxval)); \ +} \ + \ +static bool prefix_fn_mask##_empty(const mask_t *mask) \ +{ \ + return bitmap_empty(mask->bitmap, (maxval)); \ +} \ + \ +static void prefix_fn_mask##_copy(mask_t *dst, const mask_t *src) \ +{ \ + bitmap_copy(dst->bitmap, src->bitmap, (maxval)); \ +} \ + \ +static bool prefix_fn_mask##_and(mask_t *dst, const mask_t *src1, const mask_t *src2) \ +{ \ + return bitmap_and(dst->bitmap, src1->bitmap, src2->bitmap, (maxval)) != 0; \ +} \ + \ +static void prefix_fn_mask##_or(mask_t *dst, const mask_t *src1, const mask_t *src2) \ +{ \ + bitmap_or(dst->bitmap, src1->bitmap, src2->bitmap, (maxval)); \ +} \ + \ +static bool prefix_fn_mask##_andnot(mask_t *dst, const mask_t *src1, const mask_t *src2) \ +{ \ + return bitmap_andnot(dst->bitmap, src1->bitmap, src2->bitmap, (maxval)); \ +} \ + \ +static void prefix_fn_mask##_xor(mask_t *dst, const mask_t *src1, const mask_t *src2) \ +{ \ + bitmap_xor(dst->bitmap, src1->bitmap, src2->bitmap, (maxval)); \ +} \ + \ +static proc_id_t prefix_fn_mask##_find_first_id(const mask_t *mask) \ +{ \ + return proc_id_ctor(find_first_bit(mask->bitmap, (maxval))); \ +} \ + \ +static proc_id_t prefix_fn_mask##_find_first_gpu_id(const mask_t *mask) \ +{ \ + return proc_id_ctor(find_next_bit(mask->bitmap, (maxval), UVM_ID_GPU0_VALUE)); \ +} \ + \ +static proc_id_t prefix_fn_mask##_find_next_id(const mask_t *mask, proc_id_t min_id) \ +{ \ + return proc_id_ctor(find_next_bit(mask->bitmap, (maxval), min_id.val)); \ +} \ + \ +static proc_id_t prefix_fn_mask##_find_next_gpu_id(const mask_t *mask, proc_id_t min_gpu_id) \ +{ \ + return proc_id_ctor(find_next_bit(mask->bitmap, (maxval), min_gpu_id.val)); \ +} \ + \ +static proc_id_t prefix_fn_mask##_find_first_unset_id(const mask_t *mask) \ +{ \ + return proc_id_ctor(find_first_zero_bit(mask->bitmap, (maxval))); \ +} \ + \ +static proc_id_t prefix_fn_mask##_find_next_unset_id(const mask_t *mask, proc_id_t min_id) \ +{ \ + return proc_id_ctor(find_next_zero_bit(mask->bitmap, (maxval), min_id.val)); \ +} \ + \ +static bool prefix_fn_mask##_equal(const mask_t *mask_in1, const mask_t *mask_in2) \ +{ \ + return bitmap_equal(mask_in1->bitmap, mask_in2->bitmap, (maxval)) != 0; \ +} \ + \ +static bool prefix_fn_mask##_subset(const mask_t *subset, const mask_t *mask) \ +{ \ + return bitmap_subset(subset->bitmap, mask->bitmap, (maxval)) != 0; \ +} \ + \ +static NvU32 prefix_fn_mask##_get_count(const mask_t *mask) \ +{ \ + return bitmap_weight(mask->bitmap, (maxval)); \ +} \ + \ +static NvU32 prefix_fn_mask##_get_gpu_count(const mask_t *mask) \ +{ \ + NvU32 gpu_count = prefix_fn_mask##_get_count(mask); \ + \ + if (prefix_fn_mask##_test(mask, proc_id_ctor(UVM_ID_CPU_VALUE))) \ + --gpu_count; \ + \ + return gpu_count; \ +} + +typedef struct +{ + NvU32 val; +} uvm_processor_id_t; + +typedef struct +{ + NvU32 val; +} uvm_global_processor_id_t; + +typedef uvm_processor_id_t uvm_gpu_id_t; +typedef uvm_global_processor_id_t uvm_global_gpu_id_t; + +// Static value assigned to the CPU +#define UVM_ID_CPU_VALUE 0 +#define UVM_ID_GPU0_VALUE (UVM_ID_CPU_VALUE + 1) + +// ID values for the CPU and first GPU, respectively; the values for both types +// of IDs must match to enable sharing of UVM_PROCESSOR_MASK(). +#define UVM_GLOBAL_ID_CPU_VALUE UVM_ID_CPU_VALUE +#define UVM_GLOBAL_ID_GPU0_VALUE UVM_ID_GPU0_VALUE + +// Maximum number of GPUs/processors that can be represented with the id types +#define UVM_ID_MAX_GPUS UVM_MAX_GPUS +#define UVM_ID_MAX_PROCESSORS UVM_MAX_PROCESSORS + +#define UVM_ID_MAX_SUB_PROCESSORS 8 + +#define UVM_GLOBAL_ID_MAX_GPUS (UVM_MAX_GPUS * UVM_ID_MAX_SUB_PROCESSORS) +#define UVM_GLOBAL_ID_MAX_PROCESSORS (UVM_GLOBAL_ID_MAX_GPUS + 1) + +#define UVM_ID_CPU ((uvm_processor_id_t) { .val = UVM_ID_CPU_VALUE }) +#define UVM_ID_INVALID ((uvm_processor_id_t) { .val = UVM_ID_MAX_PROCESSORS }) +#define UVM_GLOBAL_ID_CPU ((uvm_global_processor_id_t) { .val = UVM_GLOBAL_ID_CPU_VALUE }) +#define UVM_GLOBAL_ID_INVALID ((uvm_global_processor_id_t) { .val = UVM_GLOBAL_ID_MAX_PROCESSORS }) + +#define UVM_ID_CHECK_BOUNDS(id) UVM_ASSERT_MSG(id.val <= UVM_ID_MAX_PROCESSORS, "id %u\n", id.val) + +#define UVM_GLOBAL_ID_CHECK_BOUNDS(id) UVM_ASSERT_MSG(id.val <= UVM_GLOBAL_ID_MAX_PROCESSORS, "id %u\n", id.val) + +static int uvm_id_cmp(uvm_processor_id_t id1, uvm_processor_id_t id2) +{ + UVM_ID_CHECK_BOUNDS(id1); + UVM_ID_CHECK_BOUNDS(id2); + + return UVM_CMP_DEFAULT(id1.val, id2.val); +} + +static bool uvm_id_equal(uvm_processor_id_t id1, uvm_processor_id_t id2) +{ + UVM_ID_CHECK_BOUNDS(id1); + UVM_ID_CHECK_BOUNDS(id2); + + return id1.val == id2.val; +} + +static bool uvm_global_id_equal(uvm_global_processor_id_t id1, uvm_global_processor_id_t id2) +{ + UVM_GLOBAL_ID_CHECK_BOUNDS(id1); + UVM_GLOBAL_ID_CHECK_BOUNDS(id2); + + return id1.val == id2.val; +} + +#define UVM_ID_IS_CPU(id) uvm_id_equal(id, UVM_ID_CPU) +#define UVM_ID_IS_INVALID(id) uvm_id_equal(id, UVM_ID_INVALID) +#define UVM_ID_IS_VALID(id) (!UVM_ID_IS_INVALID(id)) +#define UVM_ID_IS_GPU(id) (!UVM_ID_IS_CPU(id) && !UVM_ID_IS_INVALID(id)) + +#define UVM_GLOBAL_ID_IS_CPU(id) uvm_global_id_equal(id, UVM_GLOBAL_ID_CPU) +#define UVM_GLOBAL_ID_IS_INVALID(id) uvm_global_id_equal(id, UVM_GLOBAL_ID_INVALID) +#define UVM_GLOBAL_ID_IS_VALID(id) (!UVM_GLOBAL_ID_IS_INVALID(id)) +#define UVM_GLOBAL_ID_IS_GPU(id) (!UVM_GLOBAL_ID_IS_CPU(id) && !UVM_GLOBAL_ID_IS_INVALID(id)) + +static uvm_processor_id_t uvm_id_from_value(NvU32 val) +{ + uvm_processor_id_t ret = { .val = val }; + + UVM_ID_CHECK_BOUNDS(ret); + + return ret; +} + +static uvm_gpu_id_t uvm_gpu_id_from_value(NvU32 val) +{ + uvm_gpu_id_t ret = uvm_id_from_value(val); + + UVM_ASSERT(!UVM_ID_IS_CPU(ret)); + + return ret; +} + +static uvm_global_processor_id_t uvm_global_id_from_value(NvU32 val) +{ + uvm_global_processor_id_t ret = { .val = val }; + + UVM_GLOBAL_ID_CHECK_BOUNDS(ret); + + return ret; +} + +static uvm_global_gpu_id_t uvm_global_gpu_id_from_value(NvU32 val) +{ + uvm_global_gpu_id_t ret = uvm_global_id_from_value(val); + + UVM_ASSERT(!UVM_GLOBAL_ID_IS_CPU(ret)); + + return ret; +} + +// Create a GPU id from the given GPU id index (previously obtained via +// uvm_id_gpu_index) +static uvm_gpu_id_t uvm_gpu_id_from_index(NvU32 index) +{ + return uvm_gpu_id_from_value(index + UVM_ID_GPU0_VALUE); +} + +static uvm_processor_id_t uvm_id_next(uvm_processor_id_t id) +{ + ++id.val; + + UVM_ID_CHECK_BOUNDS(id); + + return id; +} + +static uvm_gpu_id_t uvm_gpu_id_next(uvm_gpu_id_t id) +{ + UVM_ASSERT(UVM_ID_IS_GPU(id)); + + ++id.val; + + UVM_ID_CHECK_BOUNDS(id); + + return id; +} + +// Same as uvm_gpu_id_from_index but for uvm_global_processor_id_t +static uvm_global_gpu_id_t uvm_global_gpu_id_from_index(NvU32 index) +{ + return uvm_global_gpu_id_from_value(index + UVM_GLOBAL_ID_GPU0_VALUE); +} + +static uvm_global_processor_id_t uvm_global_id_next(uvm_global_processor_id_t id) +{ + ++id.val; + + UVM_GLOBAL_ID_CHECK_BOUNDS(id); + + return id; +} + +static uvm_global_gpu_id_t uvm_global_gpu_id_next(uvm_global_gpu_id_t id) +{ + UVM_ASSERT(UVM_GLOBAL_ID_IS_GPU(id)); + + ++id.val; + + UVM_GLOBAL_ID_CHECK_BOUNDS(id); + + return id; +} + +// This function returns the numerical value within [0, UVM_ID_MAX_PROCESSORS) +// of the given processor id +static NvU32 uvm_id_value(uvm_processor_id_t id) +{ + UVM_ASSERT(UVM_ID_IS_VALID(id)); + + return id.val; +} + +// This function returns the numerical value within +// [0, UVM_GLOBAL_ID_MAX_PROCESSORS) of the given processor id +static NvU32 uvm_global_id_value(uvm_global_processor_id_t id) +{ + UVM_ASSERT(UVM_GLOBAL_ID_IS_VALID(id)); + + return id.val; +} + +// This function returns the index of the given GPU id within the GPU id space +// [0, UVM_ID_MAX_GPUS) +static NvU32 uvm_id_gpu_index(uvm_gpu_id_t id) +{ + UVM_ASSERT(UVM_ID_IS_GPU(id)); + + return id.val - UVM_ID_GPU0_VALUE; +} + +// This function returns the index of the given GPU id within the GPU id space +// [0, UVM_GLOBAL_ID_MAX_GPUS) +static NvU32 uvm_global_id_gpu_index(const uvm_global_gpu_id_t id) +{ + UVM_ASSERT(UVM_GLOBAL_ID_IS_GPU(id)); + + return id.val - UVM_GLOBAL_ID_GPU0_VALUE; +} + +static NvU32 uvm_global_id_gpu_index_from_gpu_id(const uvm_gpu_id_t id) +{ + UVM_ASSERT(UVM_ID_IS_GPU(id)); + + return uvm_id_gpu_index(id) * UVM_ID_MAX_SUB_PROCESSORS; +} + +static NvU32 uvm_id_gpu_index_from_global_gpu_id(const uvm_global_gpu_id_t id) +{ + UVM_ASSERT(UVM_GLOBAL_ID_IS_GPU(id)); + + return uvm_global_id_gpu_index(id) / UVM_ID_MAX_SUB_PROCESSORS; +} + +static uvm_global_gpu_id_t uvm_global_gpu_id_from_gpu_id(const uvm_gpu_id_t id) +{ + UVM_ASSERT(UVM_ID_IS_GPU(id)); + + return uvm_global_gpu_id_from_index(uvm_global_id_gpu_index_from_gpu_id(id)); +} + +static uvm_global_gpu_id_t uvm_global_gpu_id_from_parent_index(NvU32 index) +{ + UVM_ASSERT(index < UVM_MAX_GPUS); + + return uvm_global_gpu_id_from_gpu_id(uvm_gpu_id_from_value(index + UVM_GLOBAL_ID_GPU0_VALUE)); +} + +static uvm_global_gpu_id_t uvm_global_gpu_id_from_sub_processor_index(const uvm_gpu_id_t id, NvU32 sub_index) +{ + NvU32 index; + + UVM_ASSERT(sub_index < UVM_ID_MAX_SUB_PROCESSORS); + + index = uvm_global_id_gpu_index_from_gpu_id(id) + sub_index; + return uvm_global_gpu_id_from_index(index); +} + +static uvm_gpu_id_t uvm_gpu_id_from_global_gpu_id(const uvm_global_gpu_id_t id) +{ + UVM_ASSERT(UVM_GLOBAL_ID_IS_GPU(id)); + + return uvm_gpu_id_from_index(uvm_id_gpu_index_from_global_gpu_id(id)); +} + +static NvU32 uvm_global_id_sub_processor_index(const uvm_global_gpu_id_t id) +{ + return uvm_global_id_gpu_index(id) % UVM_ID_MAX_SUB_PROCESSORS; +} + +UVM_PROCESSOR_MASK(uvm_processor_mask_t, \ + uvm_processor_mask, \ + UVM_ID_MAX_PROCESSORS, \ + uvm_processor_id_t, \ + uvm_id_from_value) + +UVM_PROCESSOR_MASK(uvm_global_processor_mask_t, \ + uvm_global_processor_mask, \ + UVM_GLOBAL_ID_MAX_PROCESSORS, \ + uvm_global_processor_id_t, \ + uvm_global_id_from_value) + +// Like uvm_processor_mask_subset but ignores the CPU in both masks. Returns +// whether the GPUs in subset are a subset of the GPUs in mask. +static bool uvm_processor_mask_gpu_subset(const uvm_processor_mask_t *subset, const uvm_processor_mask_t *mask) +{ + uvm_processor_mask_t subset_gpus; + uvm_processor_mask_copy(&subset_gpus, subset); + uvm_processor_mask_clear(&subset_gpus, UVM_ID_CPU); + return uvm_processor_mask_subset(&subset_gpus, mask); +} + +#define for_each_id_in_mask(id, mask) \ + for ((id) = uvm_processor_mask_find_first_id(mask); \ + UVM_ID_IS_VALID(id); \ + (id) = uvm_processor_mask_find_next_id((mask), uvm_id_next(id))) + +#define for_each_gpu_id_in_mask(gpu_id, mask) \ + for ((gpu_id) = uvm_processor_mask_find_first_gpu_id((mask)); \ + UVM_ID_IS_VALID(gpu_id); \ + (gpu_id) = uvm_processor_mask_find_next_id((mask), uvm_gpu_id_next(gpu_id))) + +#define for_each_global_id_in_mask(id, mask) \ + for ((id) = uvm_global_processor_mask_find_first_id(mask); \ + UVM_GLOBAL_ID_IS_VALID(id); \ + (id) = uvm_global_processor_mask_find_next_id((mask), uvm_global_id_next(id))) + +#define for_each_global_gpu_id_in_mask(gpu_id, mask) \ + for ((gpu_id) = uvm_global_processor_mask_find_first_gpu_id((mask)); \ + UVM_GLOBAL_ID_IS_VALID(gpu_id); \ + (gpu_id) = uvm_global_processor_mask_find_next_id((mask), uvm_global_gpu_id_next(gpu_id))) + +// Helper to iterate over all valid gpu ids +#define for_each_gpu_id(i) \ + for (i = uvm_gpu_id_from_value(UVM_ID_GPU0_VALUE); UVM_ID_IS_VALID(i); i = uvm_gpu_id_next(i)) +#define for_each_global_gpu_id(i) \ + for (i = uvm_global_gpu_id_from_value(UVM_GLOBAL_ID_GPU0_VALUE); UVM_GLOBAL_ID_IS_VALID(i); i = uvm_global_gpu_id_next(i)) + +#define for_each_global_sub_processor_id_in_gpu(id, i) \ + for (i = uvm_global_gpu_id_from_gpu_id(id); \ + UVM_GLOBAL_ID_IS_VALID(i) && \ + (uvm_global_id_value(i) < uvm_global_id_value(uvm_global_gpu_id_from_gpu_id(id)) + UVM_ID_MAX_SUB_PROCESSORS); \ + i = uvm_global_gpu_id_next(i)) + +// Helper to iterate over all valid gpu ids +#define for_each_processor_id(i) for (i = UVM_ID_CPU; UVM_ID_IS_VALID(i); i = uvm_id_next(i)) + +#define for_each_global_id(i) for (i = UVM_GLOBAL_ID_CPU; UVM_GLOBAL_ID_IS_VALID(i); i = uvm_global_id_next(i)) + +static bool uvm_processor_uuid_eq(const NvProcessorUuid *uuid1, const NvProcessorUuid *uuid2) +{ + return memcmp(uuid1, uuid2, sizeof(*uuid1)) == 0; +} + +// Copies a UUID from source (src) to destination (dst). +static void uvm_processor_uuid_copy(NvProcessorUuid *dst, const NvProcessorUuid *src) +{ + memcpy(dst, src, sizeof(*dst)); +} + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_procfs.c b/kernel-open/nvidia-uvm/uvm_procfs.c new file mode 100644 index 000000000..b95866fd6 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_procfs.c @@ -0,0 +1,107 @@ +/******************************************************************************* + Copyright (c) 2015-2018 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_global.h" +#include "uvm_procfs.h" +#include "uvm_gpu.h" + +#include "nv-procfs.h" +#include "uvm_linux.h" + +#define UVM_PROC_DIR_NAME "driver/nvidia-uvm" +#define UVM_PROC_GPUS_DIR_NAME "gpus" +#define UVM_PROC_CPU_DIR_NAME "cpu" + +#if defined(CONFIG_PROC_FS) + // This parameter enables additional debug procfs entries. It's enabled by + // default for debug and develop builds and disabled for release builds. + int uvm_enable_debug_procfs = UVM_IS_DEBUG() || UVM_IS_DEVELOP(); + module_param(uvm_enable_debug_procfs, int, S_IRUGO); + MODULE_PARM_DESC(uvm_enable_debug_procfs, "Enable debug procfs entries in /proc/" UVM_PROC_DIR_NAME); +#else + int uvm_enable_debug_procfs = 0; +#endif + +static struct proc_dir_entry *uvm_proc_dir; +static struct proc_dir_entry *uvm_proc_gpus; +static struct proc_dir_entry *uvm_proc_cpu; + +NV_STATUS uvm_procfs_init() +{ + if (!uvm_procfs_is_enabled()) + return NV_OK; + + uvm_proc_dir = NV_CREATE_PROC_DIR(UVM_PROC_DIR_NAME, NULL); + if (uvm_proc_dir == NULL) + return NV_ERR_OPERATING_SYSTEM; + + uvm_proc_gpus = NV_CREATE_PROC_DIR(UVM_PROC_GPUS_DIR_NAME, uvm_proc_dir); + if (uvm_proc_gpus == NULL) + return NV_ERR_OPERATING_SYSTEM; + + uvm_proc_cpu = NV_CREATE_PROC_DIR(UVM_PROC_CPU_DIR_NAME, uvm_proc_dir); + if (uvm_proc_cpu == NULL) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +void uvm_procfs_exit() +{ + uvm_procfs_destroy_entry(uvm_proc_dir); +} + +// TODO: Bug 1767237: Copied from nv-procfs.c. Refactor it out to +// nv-procfs-common.c. +static void procfs_destroy_entry_with_root(struct proc_dir_entry *entry, struct proc_dir_entry *delimiter) +{ +#if defined(NV_PROC_REMOVE_PRESENT) + proc_remove(entry); +#else + while (entry) { + struct proc_dir_entry *next = entry->next; + if (entry->subdir) + procfs_destroy_entry_with_root(entry->subdir, delimiter); + remove_proc_entry(entry->name, entry->parent); + if (entry == delimiter) + break; + entry = next; + } +#endif +} + +void uvm_procfs_destroy_entry(struct proc_dir_entry *entry) +{ + procfs_destroy_entry_with_root(entry, entry); +} + +struct proc_dir_entry *uvm_procfs_get_gpu_base_dir() +{ + return uvm_proc_gpus; +} + +struct proc_dir_entry *uvm_procfs_get_cpu_base_dir() +{ + return uvm_proc_cpu; +} + diff --git a/kernel-open/nvidia-uvm/uvm_procfs.h b/kernel-open/nvidia-uvm/uvm_procfs.h new file mode 100644 index 000000000..0521b62c1 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_procfs.h @@ -0,0 +1,84 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PROCFS_H__ +#define __UVM_PROCFS_H__ + +#include "uvm_extern_decl.h" +#include "uvm_forward_decl.h" +#include "uvm_linux.h" +#include "nv-procfs.h" +#include "conftest.h" + +NV_STATUS uvm_procfs_init(void); +void uvm_procfs_exit(void); + +// Is procfs enabled at all? +static bool uvm_procfs_is_enabled(void) +{ +#if defined(CONFIG_PROC_FS) + return true; +#else + return false; +#endif +} + +// Is debug procfs enabled? This indicates that debug procfs files should be +// created. +static bool uvm_procfs_is_debug_enabled(void) +{ + return uvm_enable_debug_procfs != 0; +} + +struct proc_dir_entry *uvm_procfs_get_gpu_base_dir(void); +struct proc_dir_entry *uvm_procfs_get_cpu_base_dir(void); + +void uvm_procfs_destroy_entry(struct proc_dir_entry *entry); + +int uvm_procfs_open_callback(void); +void uvm_procfs_close_callback(void); + +// Helper for printing into a seq_file if it's not NULL and UVM_DBG_PRINT +// otherwise. Useful when sharing a print function for both debug output and +// procfs output. +#define UVM_SEQ_OR_DBG_PRINT(seq_file, format, ...) \ + do { \ + if (seq_file != NULL) \ + seq_printf(seq_file, format, ##__VA_ARGS__); \ + else \ + UVM_DBG_PRINT(format, ##__VA_ARGS__); \ + } while (0) + +#if defined(CONFIG_PROC_FS) + +// Defer PM lock acquisition until the respective read() callback +// is invoked, to ensure the lock is acquired and released by +// the same thread. Else the lock tracking validation code must +// be disabled for this lock, which is undesirable. As a result, +// lockless macro is used below. See bug 2594854 for additional +// information. +#define UVM_DEFINE_SINGLE_PROCFS_FILE(name) \ + NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY_WITHOUT_LOCK(name) +#endif + +#endif // __UVM_PROCFS_H__ diff --git a/kernel-open/nvidia-uvm/uvm_pte_batch.c b/kernel-open/nvidia-uvm/uvm_pte_batch.c new file mode 100644 index 000000000..64fdb5496 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pte_batch.c @@ -0,0 +1,216 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_pte_batch.h" +#include "uvm_hal.h" + +static bool uvm_gpu_phys_address_eq(uvm_gpu_phys_address_t pa1, uvm_gpu_phys_address_t pa2) +{ + return pa1.address == pa2.address && pa1.aperture == pa2.aperture; +} + +void uvm_pte_batch_begin(uvm_push_t *push, uvm_pte_batch_t *batch) +{ + memset(batch, 0, sizeof(*batch)); + + batch->membar = UVM_MEMBAR_GPU; + batch->push = push; +} + +static void uvm_pte_batch_flush_ptes_inline(uvm_pte_batch_t *batch) +{ + uvm_gpu_address_t inline_data_addr; + uvm_gpu_t *gpu = uvm_push_get_gpu(batch->push); + size_t ptes_size = batch->pte_count * batch->pte_entry_size; + + UVM_ASSERT(batch->pte_count != 0); + UVM_ASSERT(batch->inlining); + UVM_ASSERT_MSG(ptes_size == uvm_push_inline_data_size(&batch->inline_data), "ptes size %zd inline data %zd\n", + ptes_size, uvm_push_inline_data_size(&batch->inline_data)); + + batch->inlining = false; + inline_data_addr = uvm_push_inline_data_end(&batch->inline_data); + + uvm_push_set_flag(batch->push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + uvm_push_set_flag(batch->push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + gpu->parent->ce_hal->memcopy(batch->push, + uvm_gpu_address_from_phys(batch->pte_first_address), + inline_data_addr, + ptes_size); +} + +static void uvm_pte_batch_flush_ptes_memset(uvm_pte_batch_t *batch) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(batch->push); + uvm_gpu_address_t addr = uvm_gpu_address_from_phys(batch->pte_first_address); + NvU32 i; + + UVM_ASSERT(batch->pte_count != 0); + UVM_ASSERT(!batch->inlining); + + for (i = 0; i < batch->pte_count; ++i) { + uvm_push_set_flag(batch->push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + uvm_push_set_flag(batch->push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + gpu->parent->ce_hal->memset_8(batch->push, addr, batch->pte_bits_queue[i], sizeof(NvU64)); + addr.address += batch->pte_entry_size; + } +} + +static void uvm_pte_batch_flush_ptes(uvm_pte_batch_t *batch) +{ + if (batch->pte_count == 0) + return; + + if (batch->inlining) + uvm_pte_batch_flush_ptes_inline(batch); + else + uvm_pte_batch_flush_ptes_memset(batch); + + batch->pte_count = 0; +} + +static void uvm_pte_batch_write_consecutive_inline(uvm_pte_batch_t *batch, NvU64 pte_bits) +{ + size_t extra_size = batch->pte_entry_size - sizeof(pte_bits); + + UVM_ASSERT(extra_size < batch->pte_entry_size); + UVM_ASSERT(batch->inlining); + + // Add the PTE bits + uvm_push_inline_data_add(&batch->inline_data, &pte_bits, sizeof(pte_bits)); + + // And zero out the rest of the entry if anything remaining + if (extra_size != 0) + memset(uvm_push_inline_data_get(&batch->inline_data, extra_size), 0, extra_size); +} + +static void uvm_pte_batch_write_consecutive(uvm_pte_batch_t *batch, NvU64 pte_bits) +{ + if (batch->inlining) { + uvm_pte_batch_write_consecutive_inline(batch, pte_bits); + } + else { + UVM_ASSERT_MSG(batch->pte_count < UVM_PTE_BATCH_MAX_PTES, "pte_count %u\n", batch->pte_count); + batch->pte_bits_queue[batch->pte_count] = pte_bits; + } + ++batch->pte_count; +} + +static void pte_batch_begin_inline(uvm_pte_batch_t *batch) +{ + NvU32 i; + + UVM_ASSERT(!batch->inlining); + + batch->inlining = true; + uvm_push_inline_data_begin(batch->push, &batch->inline_data); + + for (i = 0; i < batch->pte_count; ++i) + uvm_pte_batch_write_consecutive_inline(batch, batch->pte_bits_queue[i]); +} + +void uvm_pte_batch_write_ptes(uvm_pte_batch_t *batch, uvm_gpu_phys_address_t first_pte, NvU64 *pte_bits, NvU32 entry_size, NvU32 entry_count) +{ + NvU32 max_entries = UVM_PUSH_INLINE_DATA_MAX_SIZE / entry_size; + + // Updating PTEs in sysmem requires a sysmembar after writing them and + // before any TLB invalidates. + if (first_pte.aperture == UVM_APERTURE_SYS) + batch->membar = UVM_MEMBAR_SYS; + + while (entry_count > 0) { + NvU32 entries_this_time; + + uvm_pte_batch_flush_ptes(batch); + pte_batch_begin_inline(batch); + + entries_this_time = min(max_entries, entry_count); + uvm_push_inline_data_add(&batch->inline_data, pte_bits, entries_this_time * entry_size); + + batch->pte_entry_size = entry_size; + batch->pte_first_address = first_pte; + batch->pte_count = entries_this_time; + + pte_bits += entries_this_time * (entry_size / sizeof(*pte_bits)); + first_pte.address += entries_this_time * entry_size; + entry_count -= entries_this_time; + } +} + +void uvm_pte_batch_write_pte(uvm_pte_batch_t *batch, uvm_gpu_phys_address_t pte, NvU64 pte_bits, NvU32 pte_size) +{ + uvm_gpu_phys_address_t consecutive_pte_address = batch->pte_first_address; + bool needs_flush = false; + consecutive_pte_address.address += batch->pte_count * pte_size; + + // Updating PTEs in sysmem requires a sysmembar after writing them and + // before any TLB invalidates. + if (pte.aperture == UVM_APERTURE_SYS) + batch->membar = UVM_MEMBAR_SYS; + + // Note that pte_count and pte_entry_size can be zero for the first PTE. + // That's ok as the first PTE will never need a flush. + if ((batch->pte_count + 1) * batch->pte_entry_size > UVM_PUSH_INLINE_DATA_MAX_SIZE) + needs_flush = true; + + if (!uvm_gpu_phys_address_eq(pte, consecutive_pte_address)) + needs_flush = true; + + if (batch->pte_entry_size != pte_size) + needs_flush = true; + + if (needs_flush) { + uvm_pte_batch_flush_ptes(batch); + batch->pte_first_address = pte; + batch->pte_entry_size = pte_size; + } + + if (!batch->inlining && batch->pte_count == UVM_PTE_BATCH_MAX_PTES) + pte_batch_begin_inline(batch); + + uvm_pte_batch_write_consecutive(batch, pte_bits); +} + +void uvm_pte_batch_clear_ptes(uvm_pte_batch_t *batch, uvm_gpu_phys_address_t first_pte, NvU64 empty_pte_bits, NvU32 entry_size, NvU32 entry_count) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(batch->push); + + // TODO: Bug 1767241: Allow small clears to batch + uvm_pte_batch_flush_ptes(batch); + + uvm_push_set_flag(batch->push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + uvm_push_set_flag(batch->push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + gpu->parent->ce_hal->memset_8(batch->push, + uvm_gpu_address_from_phys(first_pte), + empty_pte_bits, + entry_size * entry_count); + + if (first_pte.aperture == UVM_APERTURE_SYS) + batch->membar = UVM_MEMBAR_SYS; +} + +void uvm_pte_batch_end(uvm_pte_batch_t *batch) +{ + uvm_pte_batch_flush_ptes(batch); + uvm_hal_wfi_membar(batch->push, batch->membar); +} diff --git a/kernel-open/nvidia-uvm/uvm_pte_batch.h b/kernel-open/nvidia-uvm/uvm_pte_batch.h new file mode 100644 index 000000000..ccacfacd3 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pte_batch.h @@ -0,0 +1,97 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PTE_BATCH_H__ +#define __UVM_PTE_BATCH_H__ + +#include "uvm_forward_decl.h" +#include "uvm_hal_types.h" +#include "uvm_push.h" + +// Max PTEs to queue up for memsets before switching to inline memcopy. +// Currently inline memcopy has to read the data from sysmem (that's where the +// pushbuffer is) adding some latency so it's not obvious that it's better than +// a memset that can just write the PTE bits to local vidmem. On the other hand, +// launching a CE memset operation per each PTE also adds latency and takes a +// lot of pushbuffer space. +// +// TODO: Bug 1767241: Benchmark the two approaches to see whether the current +// number makes sense. +// +// TODO: Bug 1767241: If pushbuffer is ever moved to vidmem the tradeoffs can +// change as inline memcopy would have lower latency. +#define UVM_PTE_BATCH_MAX_PTES 4 + +struct uvm_pte_batch_struct +{ + uvm_push_t *push; + + uvm_push_inline_data_t inline_data; + bool inlining; + + uvm_gpu_phys_address_t pte_first_address; + NvU32 pte_entry_size; + NvU64 pte_bits_queue[UVM_PTE_BATCH_MAX_PTES]; + NvU32 pte_count; + + // A membar to be applied after all the PTE writes. + // Starts out as UVM_MEMBAR_GPU and is promoted to UVM_MEMBAR_SYS if any of + // the written PTEs are in sysmem. + uvm_membar_t membar; +}; + +// Begin a PTE batch +void uvm_pte_batch_begin(uvm_push_t *push, uvm_pte_batch_t *batch); + +// End a PTE batch +// +// This pushes all the queued up PTE operations, followed by a membar. +// The membar is chosen based on the aperture of all the written PTEs. +// +// This is guaranteed to push a Host WFI, so it can be followed immediately by a +// TLB invalidate. +void uvm_pte_batch_end(uvm_pte_batch_t *batch); + +// Queue up a write of PTEs from a buffer +void uvm_pte_batch_write_ptes(uvm_pte_batch_t *batch, + uvm_gpu_phys_address_t first_pte, NvU64 *pte_bits, NvU32 entry_size, NvU32 entry_count); + +// Queue up a single PTE write +void uvm_pte_batch_write_pte(uvm_pte_batch_t *batch, + uvm_gpu_phys_address_t pte, NvU64 pte_bits, NvU32 entry_size); + +// Queue up a clear of PTEs +void uvm_pte_batch_clear_ptes(uvm_pte_batch_t *batch, + uvm_gpu_phys_address_t first_pte, NvU64 pte_bits, NvU32 entry_size, NvU32 entry_count); + +// A helper for a single call to uvm_pte_batch_write_ptes() that begin and ends the PTE batch internally +static void uvm_pte_batch_single_write_ptes(uvm_push_t *push, + uvm_gpu_phys_address_t first_pte, NvU64 *pte_bits, NvU32 entry_size, NvU32 entry_count) +{ + uvm_pte_batch_t batch; + uvm_pte_batch_begin(push, &batch); + uvm_pte_batch_write_ptes(&batch, first_pte, pte_bits, entry_size, entry_count); + uvm_pte_batch_end(&batch); +} + +#endif // __UVM_PTE_BATCH_H__ diff --git a/kernel-open/nvidia-uvm/uvm_push.c b/kernel-open/nvidia-uvm/uvm_push.c new file mode 100644 index 000000000..423b6175d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_push.c @@ -0,0 +1,448 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_extern_decl.h" +#include "uvm_forward_decl.h" +#include "uvm_push.h" +#include "uvm_channel.h" +#include "uvm_hal.h" +#include "uvm_kvmalloc.h" +#include "uvm_linux.h" +#include "nv_stdarg.h" + +// This parameter enables push description tracking in push info. It's enabled +// by default for debug and develop builds and disabled for release builds. +static unsigned uvm_debug_enable_push_desc = UVM_IS_DEBUG() || UVM_IS_DEVELOP(); +module_param(uvm_debug_enable_push_desc, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(uvm_debug_enable_push_desc, "Enable push description tracking"); + +static unsigned uvm_debug_enable_push_acquire_info = 0; +module_param(uvm_debug_enable_push_acquire_info, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(uvm_debug_enable_push_acquire_info, "Enable push acquire information tracking"); + +static uvm_push_acquire_info_t *push_acquire_info_from_push(uvm_push_t *push) +{ + uvm_channel_t *channel = push->channel; + + UVM_ASSERT(channel != NULL); + UVM_ASSERT(push->channel_tracking_value == 0); + + UVM_ASSERT_MSG(push->push_info_index < channel->num_gpfifo_entries, "index %u\n", push->push_info_index); + + if (!uvm_debug_enable_push_acquire_info) + return NULL; + + return &channel->push_acquire_infos[push->push_info_index]; +} + +// Acquire a single tracker entry. Subsequently pushed GPU work will not start +// before the work tracked by tracker entry is complete. +static void push_acquire_tracker_entry(uvm_push_t *push, + uvm_tracker_entry_t *tracker_entry, + uvm_push_acquire_info_t *push_acquire_info) +{ + uvm_channel_t *entry_channel; + uvm_channel_t *channel; + NvU64 semaphore_va; + uvm_gpu_t *gpu; + + UVM_ASSERT(push != NULL); + UVM_ASSERT(tracker_entry != NULL); + + entry_channel = tracker_entry->channel; + if (entry_channel == NULL) + return; + + channel = push->channel; + if (channel == entry_channel) + return; + + semaphore_va = uvm_channel_tracking_semaphore_get_gpu_va_in_channel(entry_channel, channel); + gpu = uvm_channel_get_gpu(channel); + + gpu->parent->host_hal->semaphore_acquire(push, semaphore_va, (NvU32)tracker_entry->value); + + if (push_acquire_info) { + const NvU32 num_values = push_acquire_info->num_values; + + UVM_ASSERT(uvm_debug_enable_push_acquire_info); + + if (num_values < UVM_PUSH_ACQUIRE_INFO_MAX_ENTRIES) { + push_acquire_info->values[num_values].value = tracker_entry->value; + push_acquire_info->values[num_values].gpu_id = uvm_channel_get_gpu(entry_channel)->id; + push_acquire_info->values[num_values].is_proxy = uvm_channel_is_proxy(channel); + + if (uvm_channel_is_proxy(channel)) { + push_acquire_info->values[num_values].proxy.pool_index = uvm_channel_index_in_pool(channel); + } + else { + push_acquire_info->values[num_values].runlist_id = entry_channel->channel_info.hwRunlistId; + push_acquire_info->values[num_values].channel_id = entry_channel->channel_info.hwChannelId; + } + } + ++push_acquire_info->num_values; + } +} + +void uvm_push_acquire_tracker(uvm_push_t *push, uvm_tracker_t *tracker) +{ + uvm_tracker_entry_t *entry; + uvm_push_acquire_info_t *push_acquire_info; + + UVM_ASSERT(push != NULL); + + if (tracker == NULL) + return; + + uvm_tracker_remove_completed(tracker); + + push_acquire_info = push_acquire_info_from_push(push); + + for_each_tracker_entry(entry, tracker) + push_acquire_tracker_entry(push, entry, push_acquire_info); +} + +static NV_STATUS push_reserve_channel(uvm_channel_manager_t *manager, + uvm_channel_type_t channel_type, + uvm_gpu_t *dst_gpu, + uvm_channel_t **channel) +{ + NV_STATUS status; + + // Pick a channel and reserve a GPFIFO entry + // TODO: Bug 1764953: use the dependencies in the tracker to pick a channel + // in a smarter way. + if (dst_gpu == NULL) + status = uvm_channel_reserve_type(manager, channel_type, channel); + else + status = uvm_channel_reserve_gpu_to_gpu(manager, dst_gpu, channel); + + if (status == NV_OK) + UVM_ASSERT(*channel); + + return status; +} + +static void push_set_description(uvm_push_t *push, const char *format, va_list args) +{ + uvm_push_info_t *push_info; + + UVM_ASSERT(uvm_push_info_is_tracking_descriptions()); + + push_info = uvm_push_info_from_push(push); + vsnprintf(push_info->description, sizeof(push_info->description), format, args); +} + +void uvm_push_set_description(uvm_push_t *push, const char *format, ...) +{ + va_list va; + + if (!uvm_push_info_is_tracking_descriptions()) + return; + + va_start(va, format); + push_set_description(push, format, va); + va_end(va); +} + +// Internal helper to fill info push info as part of beginning a push. +static void push_fill_info(uvm_push_t *push, + const char *filename, + const char *function, + int line, + const char *format, + va_list args) +{ + uvm_push_acquire_info_t *push_acquire_info; + uvm_push_info_t *push_info = uvm_push_info_from_push(push); + + push_info->filename = kbasename(filename); + push_info->function = function; + push_info->line = line; + + push_acquire_info = push_acquire_info_from_push(push); + if (push_acquire_info) + push_acquire_info->num_values = 0; + + if (uvm_push_info_is_tracking_descriptions()) + push_set_description(push, format, args); +} + +static NV_STATUS push_begin_acquire_with_info(uvm_channel_t *channel, + uvm_tracker_t *tracker, + uvm_push_t *push, + const char *filename, + const char *function, + int line, + const char *format, + va_list args) +{ + NV_STATUS status; + + memset(push, 0, sizeof(*push)); + + push->gpu = uvm_channel_get_gpu(channel); + + status = uvm_channel_begin_push(channel, push); + if (status != NV_OK) + return status; + + push_fill_info(push, filename, function, line, format, args); + + uvm_push_acquire_tracker(push, tracker); + + return NV_OK; +} + +__attribute__ ((format(printf, 9, 10))) +NV_STATUS __uvm_push_begin_acquire_with_info(uvm_channel_manager_t *manager, + uvm_channel_type_t type, + uvm_gpu_t *dst_gpu, + uvm_tracker_t *tracker, + uvm_push_t *push, + const char *filename, + const char *function, + int line, + const char *format, ...) +{ + va_list args; + NV_STATUS status; + uvm_channel_t *channel; + + if (dst_gpu != NULL) { + UVM_ASSERT(type == UVM_CHANNEL_TYPE_GPU_TO_GPU); + UVM_ASSERT(dst_gpu != manager->gpu); + } + + status = push_reserve_channel(manager, type, dst_gpu, &channel); + if (status != NV_OK) + return status; + + UVM_ASSERT(channel); + + va_start(args, format); + status = push_begin_acquire_with_info(channel, tracker, push, filename, function, line, format, args); + va_end(args); + + return status; +} + +__attribute__ ((format(printf, 7, 8))) +NV_STATUS __uvm_push_begin_acquire_on_channel_with_info(uvm_channel_t *channel, + uvm_tracker_t *tracker, + uvm_push_t *push, + const char *filename, + const char *function, + int line, + const char *format, ...) +{ + va_list args; + NV_STATUS status; + + status = uvm_channel_reserve(channel); + if (status != NV_OK) + return status; + + va_start(args, format); + status = push_begin_acquire_with_info(channel, tracker, push, filename, function, line, format, args); + va_end(args); + + return status; +} + +bool uvm_push_info_is_tracking_descriptions() +{ + return uvm_debug_enable_push_desc != 0; +} + +bool uvm_push_info_is_tracking_acquires() +{ + return uvm_debug_enable_push_acquire_info != 0; +} + +void uvm_push_end(uvm_push_t *push) +{ + uvm_push_flag_t flag; + uvm_channel_end_push(push); + + flag = find_first_bit(push->flags, UVM_PUSH_FLAG_COUNT); + + // All flags should be reset by the end of the push + UVM_ASSERT_MSG(flag == UVM_PUSH_FLAG_COUNT, "first flag set %d\n", flag); +} + +NV_STATUS uvm_push_wait(uvm_push_t *push) +{ + uvm_tracker_entry_t entry; + uvm_push_get_tracker_entry(push, &entry); + + return uvm_tracker_wait_for_entry(&entry); +} + +NV_STATUS uvm_push_end_and_wait(uvm_push_t *push) +{ + uvm_push_end(push); + + return uvm_push_wait(push); +} + +NV_STATUS uvm_push_begin_fake(uvm_gpu_t *gpu, uvm_push_t *push) +{ + memset(push, 0, sizeof(*push)); + push->begin = (NvU32 *)uvm_kvmalloc(UVM_MAX_PUSH_SIZE); + if (!push->begin) + return NV_ERR_NO_MEMORY; + + push->next = push->begin; + push->gpu = gpu; + + return NV_OK; +} + +void uvm_push_end_fake(uvm_push_t *push) +{ + uvm_kvfree(push->begin); + push->begin = NULL; +} + +void *uvm_push_inline_data_get(uvm_push_inline_data_t *data, size_t size) +{ + void *buffer = data->next_data; + + UVM_ASSERT(!uvm_global_is_suspended()); + + UVM_ASSERT_MSG(uvm_push_get_size(data->push) + uvm_push_inline_data_size(data) + UVM_METHOD_SIZE + size <= UVM_MAX_PUSH_SIZE, + "push size %u inline data size %zu new data size %zu max push %u\n", + uvm_push_get_size(data->push), uvm_push_inline_data_size(data), size, UVM_MAX_PUSH_SIZE); + UVM_ASSERT_MSG(uvm_push_inline_data_size(data) + size <= UVM_PUSH_INLINE_DATA_MAX_SIZE, + "inline data size %zu new data size %zu max %u\n", + uvm_push_inline_data_size(data), size, UVM_PUSH_INLINE_DATA_MAX_SIZE); + + data->next_data += size; + + return buffer; +} + +void *uvm_push_inline_data_get_aligned(uvm_push_inline_data_t *data, size_t size, size_t alignment) +{ + NvU64 next_ptr = (NvU64)(uintptr_t)data->next_data; + size_t offset = 0; + char *buffer; + + UVM_ASSERT_MSG(IS_ALIGNED(alignment, UVM_METHOD_SIZE), "alignment %zu\n", alignment); + + offset = UVM_ALIGN_UP(next_ptr, alignment) - next_ptr; + + buffer = (char *)uvm_push_inline_data_get(data, size + offset); + return buffer + offset; +} + +uvm_gpu_address_t uvm_push_inline_data_end(uvm_push_inline_data_t *data) +{ + NvU64 inline_data_address; + uvm_push_t *push = data->push; + uvm_channel_t *channel = push->channel; + + // Round up the inline data size to the method size + size_t noop_size = roundup(uvm_push_inline_data_size(data), UVM_METHOD_SIZE); + + if (channel == NULL) { + // Fake push, just return the CPU address. + inline_data_address = (NvU64) (uintptr_t)(push->next + 1); + } + else { + // Offset of the inlined data within the push. + inline_data_address = (push->next - push->begin + 1) * UVM_METHOD_SIZE; + + // Add GPU VA of the push begin + inline_data_address += uvm_pushbuffer_get_gpu_va_for_push(channel->pool->manager->pushbuffer, push); + } + + // This will place a noop right before the inline data that was written. + // Plus UVM_METHOD_SIZE for the noop method itself. + uvm_push_get_gpu(push)->parent->host_hal->noop(push, noop_size + UVM_METHOD_SIZE); + + return uvm_gpu_address_virtual(inline_data_address); +} + +// Same as uvm_push_get_single_inline_buffer() but provides the specified +// alignment. +static void *push_get_single_inline_buffer_aligned(uvm_push_t *push, + size_t size, + size_t alignment, + uvm_gpu_address_t *gpu_address) +{ + uvm_push_inline_data_t data; + void *buffer; + + uvm_push_inline_data_begin(push, &data); + buffer = uvm_push_inline_data_get_aligned(&data, size, alignment); + *gpu_address = uvm_push_inline_data_end(&data); + + gpu_address->address = UVM_ALIGN_UP(gpu_address->address, alignment); + + return buffer; +} + +void *uvm_push_get_single_inline_buffer(uvm_push_t *push, size_t size, uvm_gpu_address_t *gpu_address) +{ + return push_get_single_inline_buffer_aligned(push, size, UVM_METHOD_SIZE, gpu_address); +} + +NvU64 *uvm_push_timestamp(uvm_push_t *push) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + const size_t timestamp_size = 16; + NvU64 *timestamp; + uvm_gpu_address_t address; + + timestamp = (NvU64 *)push_get_single_inline_buffer_aligned(push, timestamp_size, timestamp_size, &address); + // Timestamp is in the second half of the 16 byte semaphore release + timestamp += 1; + + if (uvm_channel_is_ce(push->channel)) + gpu->parent->ce_hal->semaphore_timestamp(push, address.address); + + + + + else + UVM_ASSERT_MSG(0, "Semaphore release timestamp on an unsupported channel.\n"); + + return timestamp; +} + +bool uvm_push_method_validate(uvm_push_t *push, NvU8 subch, NvU32 method_address, NvU32 method_data) +{ + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + + if (subch == UVM_SUBCHANNEL_CE) + return gpu->parent->ce_hal->method_validate(push, method_address, method_data); + else if (subch == UVM_SUBCHANNEL_HOST) + return gpu->parent->host_hal->method_validate(push, method_address, method_data); + else if (subch == UVM_SW_OBJ_SUBCHANNEL) + return gpu->parent->host_hal->sw_method_validate(push, method_address, method_data); + + UVM_ERR_PRINT("Unsupported subchannel 0x%x\n", subch); + return false; +} diff --git a/kernel-open/nvidia-uvm/uvm_push.h b/kernel-open/nvidia-uvm/uvm_push.h new file mode 100644 index 000000000..6d8d15021 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_push.h @@ -0,0 +1,430 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PUSH_H__ +#define __UVM_PUSH_H__ + +#include "uvm_forward_decl.h" +#include "uvm_hal_types.h" +#include "uvm_channel.h" +#include "uvm_push_macros.h" +#include "uvm_tracker.h" +#include "nvtypes.h" + +// Space (in bytes) used by uvm_push_end() on a CE channel. +// This is the storage required by a semaphore release. +#define UVM_PUSH_CE_END_SIZE 24 + + + + + +// The max amount of inline push data is limited by how much space can be jumped +// over with a single NOOP method. +#define UVM_PUSH_INLINE_DATA_MAX_SIZE (UVM_METHOD_COUNT_MAX * UVM_METHOD_SIZE) + +typedef enum +{ + // By default all CE transfers are not pipelined. + // This flag indicates that next CE transfer should be pipelined. + UVM_PUSH_FLAG_CE_NEXT_PIPELINED, + + // By default all operations include a membar sys after any transfer and + // before a semaphore operation. + // This flag indicates that next operation should use no membar at all. + UVM_PUSH_FLAG_NEXT_MEMBAR_NONE, + + // By default all operations include a membar sys after any transfer and + // before a semaphore operation. + // This flag indicates that next operation should use a membar gpu instead. + UVM_PUSH_FLAG_NEXT_MEMBAR_GPU, + + UVM_PUSH_FLAG_COUNT, +} uvm_push_flag_t; + +struct uvm_push_struct +{ + // Location of the first method of the push + NvU32 *begin; + + // Location of the next method to be written + NvU32 *next; + + // The GPU the push is being done on + uvm_gpu_t *gpu; + + // The channel the push is being done on or has been finished on + uvm_channel_t *channel; + + // The tracking value when the push completes on the GPU on the channel + // above. It will be 0 for an on-going push. + NvU64 channel_tracking_value; + + // Index for the push info stored within the channel. + // Only valid for an on-going push (after uvm_push_begin*(), but before + // uvm_push_end()). + NvU32 push_info_index; + + // A bitmap of flags from uvm_push_flag_t + DECLARE_BITMAP(flags, UVM_PUSH_FLAG_COUNT); +}; + +#define UVM_PUSH_ACQUIRE_INFO_MAX_ENTRIES 16 + +// Use a custom type to keep track of acquired values, instead of +// using uvm_tracker_entry_t, to avoid having to clear the entries on GPU +// removal +struct uvm_push_acquire_info_struct +{ + struct + { + NvU64 value; + uvm_gpu_id_t gpu_id; + bool is_proxy; + + // Runlist and channel ID not exposed by proxy channels, so those are + // identified by their pool index + union + { + // UVM internal channels + struct + { + NvU32 runlist_id; + NvU32 channel_id; + }; + + // Proxy channels (SR-IOV heavy only) + struct + { + NvU32 pool_index; + } proxy; + }; + } values[UVM_PUSH_ACQUIRE_INFO_MAX_ENTRIES]; + + NvU32 num_values; +}; + +struct uvm_push_info_struct +{ + // List node used to track available push info entries + struct list_head available_list_node; + + // Filename where the push was started + const char *filename; + + // Line number where the push was started + int line; + + // Function where the push was started + const char *function; + + // Description of the push created from the uvm_push_begin*() format and + // arguments. + char description[128]; + + // Procedure to be called when the corresponding push is complete. + // This procedure is called with the UVM_LOCK_ORDER_CHANNEL spin lock held. + void (*on_complete)(void *); + void *on_complete_data; +}; + +typedef struct +{ + // The push the inline data is part of + uvm_push_t *push; + + // Location of the next data to be written + char *next_data; +} uvm_push_inline_data_t; + +// Set the push description after the push already begun. This is useful if +// the description includes data generated after the push started. +void uvm_push_set_description(uvm_push_t *push, const char *format, ...); + +// Is tracking push descriptions enabled? +bool uvm_push_info_is_tracking_descriptions(void); + +// Is tracking of values acquired by the push enabled? +bool uvm_push_info_is_tracking_acquires(void); + +// Internal helper for the uvm_push_begin* family of macros +__attribute__ ((format(printf, 9, 10))) +NV_STATUS __uvm_push_begin_acquire_with_info(uvm_channel_manager_t *manager, + uvm_channel_type_t type, + uvm_gpu_t *dst_gpu, + uvm_tracker_t *tracker, + uvm_push_t *push, + const char *filename, + const char *function, + int line, + const char *format, ...); + +// Internal helper for uvm_push_begin_on_channel and +// uvm_push_begin_acquire_on_channel +__attribute__ ((format(printf, 7, 8))) +NV_STATUS __uvm_push_begin_acquire_on_channel_with_info(uvm_channel_t *channel, + uvm_tracker_t *tracker, + uvm_push_t *push, + const char *filename, + const char *function, + int line, + const char *format, ...); + +// Begin a push on a channel of channel_type type +// Picks the first available channel. If all channels of the given type are +// busy, spin waits for one to become available. +// +// Notably requires a description of the push to be provided. This is currently +// unused, but will be in the future for tracking push history. +// +// Locking: on success acquires the concurrent push semaphore until uvm_push_end() +#define uvm_push_begin(manager, type, push, format, ...) \ + __uvm_push_begin_acquire_with_info((manager), (type), NULL, NULL, (push), \ + __FILE__, __FUNCTION__, __LINE__, (format), ##__VA_ARGS__) + +// Begin a push on a channel of channel_type type with dependencies in the tracker +// This is equivalent to starting a push and acquiring the tracker, but in the +// future it will have the ability to pick the channel to do a push on in a +// smarter way based on its dependencies. +// +// Same as for uvm_push_acquire_tracker(), the tracker can be NULL. In this case +// this will be equivalent to just uvm_push_begin(). +// +// Locking: on success acquires the concurrent push semaphore until uvm_push_end() +#define uvm_push_begin_acquire(manager, type, tracker, push, format, ...) \ + __uvm_push_begin_acquire_with_info((manager), (type), NULL, (tracker), (push), \ + __FILE__, __FUNCTION__, __LINE__, (format), ##__VA_ARGS__) + +// Specialization of uvm_push_begin that is optimized for pushes that +// transfer data from manager->gpu to dst_gpu. +// dst_gpu must be NULL or a GPU other than manager->gpu +#define uvm_push_begin_gpu_to_gpu(manager, dst_gpu, push, format, ...) \ + __uvm_push_begin_acquire_with_info((manager), UVM_CHANNEL_TYPE_GPU_TO_GPU, (dst_gpu), NULL, (push), \ + __FILE__, __FUNCTION__, __LINE__, (format), ##__VA_ARGS__) + +// Same as uvm_push_begin_gpu_to_gpu except it also acquires the input tracker +// for the caller +#define uvm_push_begin_acquire_gpu_to_gpu(manager, dst_gpu, tracker, push, format, ...) \ + __uvm_push_begin_acquire_with_info((manager), UVM_CHANNEL_TYPE_GPU_TO_GPU, (dst_gpu), (tracker), (push), \ + __FILE__, __FUNCTION__, __LINE__, (format), ##__VA_ARGS__) + +// Begin a push on a specific channel +// If the channel is busy, spin wait for it to become available. +// +// Locking: on success acquires the concurrent push semaphore until uvm_push_end() +#define uvm_push_begin_on_channel(channel, push, format, ...) \ + __uvm_push_begin_acquire_on_channel_with_info((channel), NULL, (push), \ + __FILE__, __FUNCTION__, __LINE__, (format), ##__VA_ARGS__) + +// Same as uvm_push_begin_on_channel except it also acquires the input tracker +// for the caller +#define uvm_push_begin_acquire_on_channel(channel, tracker, push, format, ...) \ + __uvm_push_begin_acquire_on_channel_with_info((channel), (tracker), (push), \ + __FILE__, __FUNCTION__, __LINE__, (format), ##__VA_ARGS__) + +// End a push +// Finishes the push and submits the methods to the GPU. +// +// This will always release the channel tracking semaphore with CE and that +// release can be affected by setting the push flags (commonly +// UVM_PUSH_FLAGS_CE_NEXT_FLUSH) prior to calling uvm_push_end(). +// +// Notably doesn't wait for the push to complete on the GPU and is also +// guaranteed not to block waiting on any other GPU work to complete. The only +// contention that can happen is with other CPU threads updating channel and/or +// pushbuffer state, but all of these updates are expected to be fast. +// +// Completion of the push on the GPU can be tracked with a tracker by using +// uvm_tracker_add_push() or can be waited on directly with uvm_push_wait(). +// Also see uvm_push_end_and_wait() that combines ending and waiting for a push. +// +// Locking: releases the concurrent push semaphore acquired in uvm_push_begin*() +void uvm_push_end(uvm_push_t *push); + +// Wait for a push to complete its execution on the GPU. +// +// The push has to be finished prior to calling this function. +// Notably currently this will only check for errors on the channel the push has +// been made on while waiting for it to complete. +NV_STATUS uvm_push_wait(uvm_push_t *push); + +// End a push and wait for it to complete execution on the GPU +// Shortcut for uvm_push_end() and uvm_push_wait(). +NV_STATUS uvm_push_end_and_wait(uvm_push_t *push); + +// Get the tracker entry tracking the push +// The push has to be finished before calling this function. +static void uvm_push_get_tracker_entry(uvm_push_t *push, uvm_tracker_entry_t *entry) +{ + UVM_ASSERT(push->channel_tracking_value != 0); + UVM_ASSERT(push->channel != NULL); + + entry->channel = push->channel; + entry->value = push->channel_tracking_value; +} + +// Acquire all the entries in the tracker. +// Subsequently pushed GPU work will not start before all the work tracked by +// tracker is complete. +// Notably a NULL tracker is handled the same way as an empty tracker. +void uvm_push_acquire_tracker(uvm_push_t *push, uvm_tracker_t *tracker); + +// Set a push flag +static void uvm_push_set_flag(uvm_push_t *push, uvm_push_flag_t flag) +{ + UVM_ASSERT_MSG(flag < UVM_PUSH_FLAG_COUNT, "flag %u\n", (unsigned)flag); + + __set_bit(flag, push->flags); +} + +// Get and reset (if set) a push flag +static bool uvm_push_get_and_reset_flag(uvm_push_t *push, uvm_push_flag_t flag) +{ + UVM_ASSERT_MSG(flag < UVM_PUSH_FLAG_COUNT, "flag %u\n", (unsigned)flag); + + return __test_and_clear_bit(flag, push->flags); +} + +// Get and reset (if set) a membar push flag +static uvm_membar_t uvm_push_get_and_reset_membar_flag(uvm_push_t *push) +{ + if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE)) + return UVM_MEMBAR_NONE; + + if (uvm_push_get_and_reset_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU)) + return UVM_MEMBAR_GPU; + + return UVM_MEMBAR_SYS; +} + +// Get the size of the push so far +static NvU32 uvm_push_get_size(uvm_push_t *push) +{ + return (push->next - push->begin) * sizeof(*push->next); +} + +// Check whether the push still has free_space bytes available to be pushed +static bool uvm_push_has_space(uvm_push_t *push, NvU32 free_space) +{ + return (UVM_MAX_PUSH_SIZE - uvm_push_get_size(push)) >= free_space; +} + +// Fake push begin and end +// +// These do just enough for inline push data and uvm_push_get_gpu() to work. +// Used by tests that run on fake GPUs without a channel manager (see +// uvm_page_tree_test.c for an example). +NV_STATUS uvm_push_begin_fake(uvm_gpu_t *gpu, uvm_push_t *push); +void uvm_push_end_fake(uvm_push_t *push); + +// Begin an inline data fragment in the push +// +// The inline data will be ignored by the GPU, but can be referenced from +// subsequent commands via its GPU virtual address that's returned by +// uvm_push_inline_data_end(). +// Up to UVM_PUSH_INLINE_DATA_MAX_SIZE bytes can be added inline in the push +// with various helpers below. The start of the data is guaranteed to be +// initially aligned to UVM_METHOD_SIZE (4). +// While an inline data fragment is on-going (after inline_data_begin() but +// before inline_data_end()) no other commands should be issued in the push. +// +// Also see uvm_push_get_single_inline_buffer() for a simple way of adding a +// specified amount of data in one step. +static void uvm_push_inline_data_begin(uvm_push_t *push, uvm_push_inline_data_t *data) +{ + data->push = push; + // +1 for the NOOP method inserted at inline_data_end() + data->next_data = (char*)(push->next + 1); +} + +// End an line data fragment in the push +// +// Returns back the GPU address of the beginning of the inline data fragment. +uvm_gpu_address_t uvm_push_inline_data_end(uvm_push_inline_data_t *data); + +// Get the current size of the on-going inline data fragment. +// +// Can only be used while an inline data fragment is on-going. +static size_t uvm_push_inline_data_size(uvm_push_inline_data_t *data) +{ + return data->next_data - (char*)(data->push->next + 1); +} + +// Get a buffer of size bytes of inline data in the push +// +// Returns the CPU pointer to the beginning of the new size bytes of data that +// the caller is supposed to write. The buffer can be accessed as long as the +// push is on-going. +void *uvm_push_inline_data_get(uvm_push_inline_data_t *data, size_t size); + +// Same as uvm_push_inline_data_get() but provides the specified alignment. +void *uvm_push_inline_data_get_aligned(uvm_push_inline_data_t *data, size_t size, size_t alignment); + +// Get a single buffer of size bytes of inline data in the push +// +// Returns the CPU pointer to the beginning of the buffer. The buffer can be +// accessed as long as the push is on-going. Also returns the GPU address of the +// buffer that can be accessed by commands in the same push. +// +// This is a wrapper around uvm_push_inline_data_begin() and +// uvm_push_inline_data_end() so see their comments for more details. +void *uvm_push_get_single_inline_buffer(uvm_push_t *push, size_t size, uvm_gpu_address_t *gpu_address); + +// Helper that copies size bytes of data from src into the inline data fragment +static void uvm_push_inline_data_add(uvm_push_inline_data_t *data, const void *src, size_t size) +{ + memcpy(uvm_push_inline_data_get(data, size), src, size); +} + +// Push an operation releasing a timestamp into the pushbuffer. +// +// Returns the CPU pointer into the pushbuffer where the timestamp is going to +// be written. The timestamp can be accessed from the on_complete callback of +// the push. +NvU64 *uvm_push_timestamp(uvm_push_t *push); + +static uvm_gpu_t *uvm_push_get_gpu(uvm_push_t *push) +{ + UVM_ASSERT(push->gpu); + + return push->gpu; +} + +// Validate that the given method can be pushed to the underlying channel. The +// method contents can be used to further validate individual fields. +bool uvm_push_method_validate(uvm_push_t *push, NvU8 subch, NvU32 method_address, NvU32 method_data); + +// Retrieve the push info object for a push that has already started +static uvm_push_info_t *uvm_push_info_from_push(uvm_push_t *push) +{ + uvm_channel_t *channel = push->channel; + + UVM_ASSERT(channel != NULL); + UVM_ASSERT(push->channel_tracking_value == 0); + + UVM_ASSERT_MSG(push->push_info_index < channel->num_gpfifo_entries, "index %u\n", push->push_info_index); + + return &channel->push_infos[push->push_info_index]; +} + +#endif // __UVM_PUSH_H__ diff --git a/kernel-open/nvidia-uvm/uvm_push_macros.h b/kernel-open/nvidia-uvm/uvm_push_macros.h new file mode 100644 index 000000000..5f07a4c8a --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_push_macros.h @@ -0,0 +1,268 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PUSH_MACROS_H__ +#define __UVM_PUSH_MACROS_H__ + +#include "uvm_extern_decl.h" +#include "uvm_forward_decl.h" +#include "uvm_channel.h" +#include "nvtypes.h" +#include "nvmisc.h" +#include "cla06fsubch.h" +#include "cla16f.h" +#include "clb06f.h" + +#define HWMASK(d, r, f) DRF_MASK(NV ## d ## _ ## r ## _ ## f) +#define HWSHIFTMASK(d, r, f) DRF_SHIFTMASK(NV ## d ## _ ## r ## _ ## f) +#define HWSIZE(d, r, f) DRF_SIZE(NV ## d ## _ ## r ## _ ## f) +#define HWCONST(d, r, f, c) DRF_DEF(d, _ ## r, _ ## f, _ ## c) +#define HWVALUE(d, r, f, v) ({ \ + NvU32 _v = (v); \ + NvU32 val = DRF_NUM(d, _ ## r, _ ## f, _v); \ + UVM_ASSERT_MSG(_v == DRF_VAL(d, _ ## r, _ ## f, val), "v 0x%x mask 0x%x\n", _v, HWMASK(d, r, f)); \ + val; \ + }) + +#define HWMASK64(d, r, f) DRF_MASK64(NV ## d ## _ ## r ## _ ## f) +#define HWCONST64(d, r, f, c) DRF_DEF64(d, _ ## r, _ ## f, _ ## c) +#define HWVALUE64(d, r, f, v) ({ \ + NvU64 _v = (v); \ + NvU64 val = DRF_NUM64(d, _ ## r, _ ## f, _v); \ + UVM_ASSERT_MSG(_v == DRF_VAL64(d, _ ## r, _ ## f, val), "v 0x%llx mask 0x%llx\n", _v, HWMASK64(d, r, f)); \ + val; \ + }) + +#define HWMASK_MW(d, r, f) DRF_MASK_MW(NV ## d ## _ ## r ## _ ## f) +#define HWSIZE_MW(d, r, f) DRF_SIZE_MW(NV ## d ## _ ## r ## _ ## f) +#define HWCONST_MW(d, r, f, c) DRF_DEF_MW(d, _ ## r, _ ## f, _ ## c) +#define HWVALUE_MW(d, r, f, v) ({ \ + NvU32 _v = (v); \ + NvU32 val = DRF_NUM_MW(d, _ ## r, _ ## f, _v); \ + UVM_ASSERT_MSG(_v == DRF_VAL_MW(d, _ ## r, _ ## f, val), "v 0x%x mask 0x%x\n", _v, HWMASK_MW(d, r, f)); \ + val; \ + }) + +#define WRITE_HWCONST(v, d, r, f, c) FLD_SET_DRF(d,_##r,_##f,_##c, v) +#define WRITE_HWVALUE(v, d, r, f, n) FLD_SET_DRF_NUM(d,_##r,_##f, n, v) + +// nvmisc.h has FLD_SET_DRF_NUM64, but not FLD_SET_DRF64 +#define UVM_FLD_SET_DRF64(d,r,f,c,v) ((((NvU64)(v)) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c)) + +#define WRITE_HWCONST64(v, d, r, f, c) UVM_FLD_SET_DRF64(d,_##r,_##f,_##c, v) +#define WRITE_HWVALUE64(v, d, r, f, n) FLD_SET_DRF_NUM64(d,_##r,_##f, n, v) + +#define READ_HWVALUE(v, d, r, f) DRF_VAL(d,_##r,_##f, v) +#define READ_HWVALUE_MW(v, d, r, f) DRF_VAL_MW(d,_##r,_##f, v) +#define WRITE_HWCONST_MW(v, d, r, f, c) FLD_ASSIGN_MW(NV##d##_##r##_##f, DRF_DEF_MW(d,_##r,_##f,_##c), v) +#define WRITE_HWVALUE_MW(v, d, r, f, n) FLD_ASSIGN_MW(NV##d##_##r##_##f, DRF_NUM_MW(d,_##r,_##f,n), v) + +// Host methods ignore the subchannel, just use 0 +#define UVM_SUBCHANNEL_HOST 0 + +// Starting with Kepler HW settled on using a fixed subchannel for CE. +#define UVM_SUBCHANNEL_CE NVA06F_SUBCHANNEL_COPY_ENGINE + +#define UVM_SUBCHANNEL_A16F UVM_SUBCHANNEL_HOST + +#define UVM_SUBCHANNEL_B06F UVM_SUBCHANNEL_HOST +#define UVM_SUBCHANNEL_B0B5 UVM_SUBCHANNEL_CE + +#define UVM_SUBCHANNEL_C06F UVM_SUBCHANNEL_HOST +#define UVM_SUBCHANNEL_C0B5 UVM_SUBCHANNEL_CE + +#define UVM_SUBCHANNEL_C36F UVM_SUBCHANNEL_HOST +#define UVM_SUBCHANNEL_C46F UVM_SUBCHANNEL_HOST + +#define UVM_SUBCHANNEL_C56F UVM_SUBCHANNEL_HOST +#define UVM_SUBCHANNEL_C6B5 UVM_SUBCHANNEL_CE + + + + + + +// Channel for UVM SW methods. This is defined in nv_uvm_types.h. RM does not +// care about the specific number as long as it's bigger than the largest HW +// value. For example, Kepler reserves subchannels 5-7 for software objects. +#define UVM_SUBCHANNEL_C076 UVM_SW_OBJ_SUBCHANNEL + + + + + + +#define UVM_METHOD_SIZE 4 +#define UVM_METHOD_COUNT_MAX HWMASK(B06F, DMA, INCR_COUNT) +#if HWMASK(B06F, DMA, INCR_COUNT) != HWMASK(B06F, DMA, NONINCR_COUNT) +#error "Unable to define UVM_METHOD_COUNT_MAX" +#endif + +#define UVM_METHOD_INC(subch, address, count) \ + (HWCONST(B06F, DMA, SEC_OP, INC_METHOD) | \ + HWVALUE(B06F, DMA, INCR_ADDRESS, (address) >> 2) | \ + HWVALUE(B06F, DMA, INCR_SUBCHANNEL, (subch)) | \ + HWVALUE(B06F, DMA, INCR_COUNT, (count))) + +#define UVM_METHOD_NONINC(subch, address, count) \ + (HWCONST(B06F, DMA, SEC_OP, NON_INC_METHOD) | \ + HWVALUE(B06F, DMA, NONINCR_ADDRESS, (address) >> 2) | \ + HWVALUE(B06F, DMA, NONINCR_SUBCHANNEL, (subch)) | \ + HWVALUE(B06F, DMA, NONINCR_COUNT, (count))) + +#define __UVM_ASSERT_CONTIGUOUS_METHODS(a1, a2) BUILD_BUG_ON((a2) - (a1) != 4) + +// __NV_PUSH_*U support being called recursively from the N+1 sized method with +// the _0U doing all the common things. +// Notably all the push macros assume that symbol "push" of type uvm_push_t * is +// in scope. +#define __NV_PUSH_0U(subch, count, a1) \ + do { \ + UVM_ASSERT(!uvm_global_is_suspended()); \ + UVM_ASSERT(uvm_push_get_size(push) + (count + 1) * 4 <= UVM_MAX_PUSH_SIZE); \ + UVM_ASSERT_MSG(a1 % 4 == 0, "Address %u\n", a1); \ + \ + push->next[0] = UVM_METHOD_INC(subch, a1, count); \ + ++push->next; \ + } while (0) + +#define __NV_PUSH_1U(subch, count, a1,d1) \ + do { \ + __NV_PUSH_0U(subch, count, a1); \ + push->next[0] = d1; \ + UVM_ASSERT_MSG(uvm_push_method_validate(push, subch, a1, d1), \ + "Method validation failed in channel %s\n", \ + push->channel->name); \ + ++push->next; \ + } while (0) + +#define __NV_PUSH_2U(subch, count, a1,d1, a2,d2) \ + do { \ + __UVM_ASSERT_CONTIGUOUS_METHODS(a1, a2); \ + __NV_PUSH_1U(subch, count, a1,d1); \ + UVM_ASSERT_MSG(uvm_push_method_validate(push, subch, a2, d2), \ + "Method validation failed in channel %s\n", \ + push->channel->name); \ + push->next[0] = d2; \ + ++push->next; \ + } while (0) + +#define __NV_PUSH_3U(subch, count, a1,d1, a2,d2, a3,d3) \ + do { \ + __UVM_ASSERT_CONTIGUOUS_METHODS(a2, a3); \ + __NV_PUSH_2U(subch, count, a1,d1, a2,d2); \ + UVM_ASSERT_MSG(uvm_push_method_validate(push, subch, a3, d3), \ + "Method validation failed in channel %s\n", \ + push->channel->name); \ + push->next[0] = d3; \ + ++push->next; \ + } while (0) + +#define __NV_PUSH_4U(subch, count, a1,d1, a2,d2, a3,d3, a4,d4) \ + do { \ + __UVM_ASSERT_CONTIGUOUS_METHODS(a3, a4); \ + __NV_PUSH_3U(subch, count, a1,d1, a2,d2, a3,d3); \ + UVM_ASSERT_MSG(uvm_push_method_validate(push, subch, a4, d4), \ + "Method validation failed in channel %s\n", \ + push->channel->name); \ + push->next[0] = d4; \ + ++push->next; \ + } while (0) + +#define __NV_PUSH_5U(subch, count, a1,d1, a2,d2, a3,d3, a4,d4, a5,d5) \ + do { \ + __UVM_ASSERT_CONTIGUOUS_METHODS(a4, a5); \ + __NV_PUSH_4U(subch, count, a1,d1, a2,d2, a3,d3, a4,d4); \ + UVM_ASSERT_MSG(uvm_push_method_validate(push, subch, a5, d5), \ + "Method validation failed in channel %s\n", \ + push->channel->name); \ + push->next[0] = d5; \ + ++push->next; \ + } while (0) + +#define __NV_PUSH_6U(subch, count, a1,d1, a2,d2, a3,d3, a4,d4, a5,d5, a6,d6) \ + do { \ + __UVM_ASSERT_CONTIGUOUS_METHODS(a5, a6); \ + __NV_PUSH_5U(subch, count, a1,d1, a2,d2, a3,d3, a4,d4, a5,d5); \ + UVM_ASSERT_MSG(uvm_push_method_validate(push, subch, a6, d6), \ + "Method validation failed in channel %s\n", \ + push->channel->name); \ + push->next[0] = d6; \ + ++push->next; \ + } while (0) + +#define NV_PUSH_1U(class, a1,d1) \ + __NV_PUSH_1U(UVM_SUBCHANNEL_ ## class, 1, \ + NV ## class ## _ ## a1, d1) + +#define NV_PUSH_2U(class, a1,d1, a2,d2) \ + __NV_PUSH_2U(UVM_SUBCHANNEL_ ## class, 2, \ + NV ## class ## _ ## a1, d1, \ + NV ## class ## _ ## a2, d2) + +#define NV_PUSH_3U(class, a1,d1, a2,d2, a3,d3) \ + __NV_PUSH_3U(UVM_SUBCHANNEL_ ## class, 3, \ + NV ## class ## _ ## a1, d1, \ + NV ## class ## _ ## a2, d2, \ + NV ## class ## _ ## a3, d3) + +#define NV_PUSH_4U(class, a1,d1, a2,d2, a3,d3, a4,d4) \ + __NV_PUSH_4U(UVM_SUBCHANNEL_ ## class, 4, \ + NV ## class ## _ ## a1, d1, \ + NV ## class ## _ ## a2, d2, \ + NV ## class ## _ ## a3, d3, \ + NV ## class ## _ ## a4, d4) + +#define NV_PUSH_5U(class, a1,d1, a2,d2, a3,d3, a4,d4, a5,d5) \ + __NV_PUSH_5U(UVM_SUBCHANNEL_ ## class, 5, \ + NV ## class ## _ ## a1, d1, \ + NV ## class ## _ ## a2, d2, \ + NV ## class ## _ ## a3, d3, \ + NV ## class ## _ ## a4, d4, \ + NV ## class ## _ ## a5, d5) + +#define NV_PUSH_6U(class, a1,d1, a2,d2, a3,d3, a4,d4, a5,d5, a6,d6) \ + __NV_PUSH_6U(UVM_SUBCHANNEL_ ## class, 6, \ + NV ## class ## _ ## a1, d1, \ + NV ## class ## _ ## a2, d2, \ + NV ## class ## _ ## a3, d3, \ + NV ## class ## _ ## a4, d4, \ + NV ## class ## _ ## a5, d5, \ + NV ## class ## _ ## a6, d6) + +// Non-incrementing method with count data fields following it. The data is left +// untouched and hence it's primarily useful for a NOP method. +#define __NV_PUSH_NU_NONINC(subch, count, address) \ + do { \ + UVM_ASSERT(!uvm_global_is_suspended()); \ + UVM_ASSERT(uvm_push_get_size(push) + (count + 1) * 4 <= UVM_MAX_PUSH_SIZE); \ + UVM_ASSERT_MSG(address % 4 == 0, "Address %u\n", address); \ + push->next[0] = UVM_METHOD_NONINC(subch, address, count); \ + push->next += count + 1; \ + } while (0) + +#define NV_PUSH_NU_NONINC(class, a1, count) \ + __NV_PUSH_NU_NONINC(UVM_SUBCHANNEL_ ## class, count, \ + NV ## class ## _ ## a1) + +#endif // __UVM_PUSH_MACROS_H__ diff --git a/kernel-open/nvidia-uvm/uvm_push_test.c b/kernel-open/nvidia-uvm/uvm_push_test.c new file mode 100644 index 000000000..ed5a5d823 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_push_test.c @@ -0,0 +1,905 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include + +#include "uvm_global.h" +#include "uvm_channel.h" +#include "uvm_hal.h" +#include "uvm_mem.h" +#include "uvm_push.h" +#include "uvm_test.h" +#include "uvm_test_rng.h" +#include "uvm_thread_context.h" +#include "uvm_va_space.h" +#include "uvm_tracker.h" +#include "uvm_gpu_semaphore.h" +#include "uvm_kvmalloc.h" + +#define TEST_PUSH_INTERLEAVING_NUM_PAUSED_PUSHES 2 + +static NvU32 get_push_end_size(uvm_channel_t *channel) +{ + if (uvm_channel_is_ce(channel)) + return UVM_PUSH_CE_END_SIZE; + + + + + + return 0; +} + + + + +static NV_STATUS test_push_end_size(uvm_va_space_t *va_space) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + NvU32 push_size; + NvU32 i; + + for_each_va_space_gpu(gpu, va_space) { + for (i = 0; i < UVM_CHANNEL_TYPE_COUNT; ++i) { + uvm_push_t push; + NvU32 push_end_size; + uvm_channel_type_t type = i; + + + + + + status = uvm_push_begin(gpu->channel_manager, type, &push, "type %u\n", (unsigned)type); + TEST_CHECK_GOTO(status == NV_OK, done); + + push_end_size = get_push_end_size(push.channel); + push_size = uvm_push_get_size(&push); + uvm_push_end(&push); + if (uvm_push_get_size(&push) - push_size != push_end_size) { + UVM_TEST_PRINT("push_end_size incorrect, %u instead of %u for GPU %s\n", + uvm_push_get_size(&push) - push_size, + push_end_size, + uvm_gpu_name(gpu)); + status = NV_ERR_INVALID_STATE; + goto done; + } + } + } + +done: + for_each_va_space_gpu(gpu, va_space) { + uvm_channel_manager_wait(gpu->channel_manager); + } + + return status; +} + +typedef enum { + TEST_INLINE_ADD, + TEST_INLINE_GET, + TEST_INLINE_SINGLE_BUFFER, + TEST_INLINE_MAX, +} test_inline_type_t; + +static NV_STATUS test_push_inline_data_gpu(uvm_gpu_t *gpu) +{ + static const size_t test_sizes[] = { 1, 2, 3, 4, 8, 31, 32, 1023, 1024, 1025, UVM_PUSH_INLINE_DATA_MAX_SIZE }; + NV_STATUS status; + int i, j; + int test_inline_type; + uvm_push_t push; + uvm_mem_t *mem = NULL; + char *verif; + + status = uvm_mem_alloc_sysmem_and_map_cpu_kernel(UVM_PUSH_INLINE_DATA_MAX_SIZE, current->mm, &mem); + TEST_CHECK_GOTO(status == NV_OK, done); + + status = uvm_mem_map_gpu_kernel(mem, gpu); + TEST_CHECK_GOTO(status == NV_OK, done); + + verif = (char *)uvm_mem_get_cpu_addr_kernel(mem); + + for (test_inline_type = 0; test_inline_type < TEST_INLINE_MAX; ++test_inline_type) { + for (i = 0; i < ARRAY_SIZE(test_sizes); ++i) { + size_t test_size = test_sizes[i]; + uvm_push_inline_data_t data; + size_t inline_data_size = 0; + uvm_gpu_address_t data_gpu_address; + char *inline_buf; + + status = uvm_push_begin(gpu->channel_manager, + UVM_CHANNEL_TYPE_GPU_INTERNAL, + &push, + "Inline data size %zu", + test_size); + TEST_CHECK_GOTO(status == NV_OK, done); + + // Do a noop first to test inline data starting at different offsets + gpu->parent->host_hal->noop(&push, roundup(min(test_size, (size_t)4096), UVM_METHOD_SIZE)); + + switch (test_inline_type) { + case TEST_INLINE_ADD: + uvm_push_inline_data_begin(&push, &data); + for (j = 0; j < test_size; ++j) { + char value = 1 + i + j; + uvm_push_inline_data_add(&data, &value, 1); + } + inline_data_size = uvm_push_inline_data_size(&data); + data_gpu_address = uvm_push_inline_data_end(&data); + break; + case TEST_INLINE_GET: + uvm_push_inline_data_begin(&push, &data); + inline_buf = (char*)uvm_push_inline_data_get(&data, test_size); + inline_data_size = uvm_push_inline_data_size(&data); + data_gpu_address = uvm_push_inline_data_end(&data); + for (j = 0; j < test_size; ++j) + inline_buf[j] = 1 + i + j; + break; + case TEST_INLINE_SINGLE_BUFFER: + inline_buf = (char*)uvm_push_get_single_inline_buffer(&push, test_size, &data_gpu_address); + inline_data_size = test_size; + for (j = 0; j < test_size; ++j) + inline_buf[j] = 1 + i + j; + break; + } + + + gpu->parent->ce_hal->memcopy(&push, + uvm_mem_gpu_address_virtual_kernel(mem, gpu), + data_gpu_address, + test_size); + status = uvm_push_end_and_wait(&push); + TEST_CHECK_GOTO(status == NV_OK, done); + + TEST_CHECK_GOTO(inline_data_size == test_size, done); + + for (j = 0; j < test_size; ++j) { + char expected = 1 + i + j; + if (verif[j] != expected) { + UVM_TEST_PRINT("size %zu verif[%d] = %d instead of %d\n", test_size, j, verif[j], expected); + status = NV_ERR_INVALID_STATE; + goto done; + } + } + } + } +done: + uvm_mem_free(mem); + + return status; +} + +static NV_STATUS test_push_inline_data(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + + for_each_va_space_gpu(gpu, va_space) { + TEST_CHECK_RET(test_push_inline_data_gpu(gpu) == NV_OK); + } + + return NV_OK; +} + +// Test that begins UVM_PUSH_MAX_CONCURRENT_PUSHES number of pushes before +// ending any of them on each GPU. +// Notably starting more than a single push is not safe to do outside of a test +// as if multiple threads tried doing so, it could easily deadlock. +static NV_STATUS test_concurrent_pushes(uvm_va_space_t *va_space) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + NvU32 i; + uvm_push_t *pushes; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + + // As noted above, this test does unsafe things that would be detected by + // lock tracking, opt-out. + uvm_thread_context_lock_disable_tracking(); + + pushes = uvm_kvmalloc_zero(sizeof(*pushes) * UVM_PUSH_MAX_CONCURRENT_PUSHES); + if (pushes == NULL) { + status = NV_ERR_NO_MEMORY; + goto done; + } + + for_each_va_space_gpu(gpu, va_space) { + for (i = 0; i < UVM_PUSH_MAX_CONCURRENT_PUSHES; ++i) { + uvm_push_t *push = &pushes[i]; + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_CPU_TO_GPU, push, "concurrent push %u", i); + TEST_CHECK_GOTO(status == NV_OK, done); + } + for (i = 0; i < UVM_PUSH_MAX_CONCURRENT_PUSHES; ++i) { + uvm_push_t *push = &pushes[i]; + uvm_push_end(push); + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, push), done); + } + TEST_CHECK_GOTO(tracker.size != 0, done); + + status = uvm_tracker_wait(&tracker); + TEST_CHECK_GOTO(status == NV_OK, done); + } + +done: + uvm_thread_context_lock_enable_tracking(); + + uvm_tracker_deinit(&tracker); + + uvm_kvfree(pushes); + + return status; +} + +static void add_to_counter(void* ptr, int value) +{ + atomic_t *atomic = (atomic_t*) ptr; + atomic_add(value, atomic); +} + +static void add_one_to_counter(void* ptr) +{ + add_to_counter(ptr, 1); +} + +static void add_two_to_counter(void* ptr) +{ + add_to_counter(ptr, 2); +} + +static NV_STATUS test_push_interleaving_on_gpu(uvm_gpu_t* gpu) +{ + NV_STATUS status; + uvm_channel_t *channel; + uvm_push_t push; + NvU32 i; + NvU32 *host_va; + NvU64 gpu_va; + NvU32 observed, expected; + unsigned int num_non_paused_pushes; + uvm_push_t pushes_not_ended[TEST_PUSH_INTERLEAVING_NUM_PAUSED_PUSHES]; + const NvLength size = sizeof(NvU32) * (1 + TEST_PUSH_INTERLEAVING_NUM_PAUSED_PUSHES); + uvm_rm_mem_t *mem = NULL; + atomic_t on_complete_counter = ATOMIC_INIT(0); + + // This test issues virtual memcopies/memsets, which in SR-IOV heavy cannot + // be pushed to a proxy channel. Pushing to a UVM internal CE channel works + // in all scenarios. + channel = uvm_channel_any_of_type(gpu->channel_manager, UVM_CHANNEL_POOL_TYPE_CE); + TEST_CHECK_RET(channel != NULL); + + if (channel->num_gpfifo_entries <= TEST_PUSH_INTERLEAVING_NUM_PAUSED_PUSHES) { + UVM_TEST_PRINT("Insufficient number of gpfifo entries per channel to run this test. Expected at least %u " + "entries, but found %u\n", + TEST_PUSH_INTERLEAVING_NUM_PAUSED_PUSHES + 1, + channel->num_gpfifo_entries); + return NV_ERR_INVALID_STATE; + } + num_non_paused_pushes = channel->num_gpfifo_entries; + + // The UVM driver only allows push interleaving across separate threads, but + // it is hard to consistenly replicate the interleaving. Instead, we + // temporarily disable lock tracking, so we can interleave pushes from a + // single thread. + uvm_thread_context_lock_disable_tracking(); + + status = uvm_rm_mem_alloc_and_map_cpu(gpu, UVM_RM_MEM_TYPE_SYS, size, &mem); + TEST_CHECK_GOTO(status == NV_OK, done); + host_va = (NvU32*)uvm_rm_mem_get_cpu_va(mem); + gpu_va = uvm_rm_mem_get_gpu_va(mem, gpu, uvm_channel_is_proxy(channel)); + memset(host_va, 0, size); + + // Begin a few pushes on the channel, but do not end them yet. + // Each pushed method sets a magic number on an independent memory location. + for (i = 0; i < TEST_PUSH_INTERLEAVING_NUM_PAUSED_PUSHES; ++i) { + uvm_push_info_t *push_info; + + status = uvm_push_begin_on_channel(channel, pushes_not_ended + i, "Set to 0x%x", 0xDEADBEEF + i); + TEST_CHECK_GOTO(status == NV_OK, done); + gpu->parent->ce_hal->memset_v_4(pushes_not_ended + i, + gpu_va + sizeof(NvU32) * (i + 1), + 0xDEADBEEF + i, + sizeof(NvU32)); + + push_info = uvm_push_info_from_push(pushes_not_ended + i); + push_info->on_complete = add_two_to_counter; + push_info->on_complete_data = &on_complete_counter; + } + + // Push N (N = #channel entries) value increments to the same channel. + for (i = 0; i < num_non_paused_pushes; ++i) { + uvm_push_info_t *push_info; + + status = uvm_push_begin_on_channel(channel, &push, "inc to %u", i + 1); + TEST_CHECK_GOTO(status == NV_OK, done); + gpu->parent->ce_hal->semaphore_reduction_inc(&push, gpu_va, num_non_paused_pushes); + + push_info = uvm_push_info_from_push(&push); + push_info->on_complete = add_one_to_counter; + push_info->on_complete_data = &on_complete_counter; + + uvm_push_end(&push); + } + + // End the pending pushes + for (i = 0; i < TEST_PUSH_INTERLEAVING_NUM_PAUSED_PUSHES; ++i) + uvm_push_end(pushes_not_ended + i); + + // When the channel manager becomes idle, the GPU methods have been + // completed, and the CPU completion callbacks associated with the push + // have been invoked. + status = uvm_channel_manager_wait(channel->pool->manager); + TEST_CHECK_GOTO(status == NV_OK, done); + + observed = host_va[0]; + expected = num_non_paused_pushes; + if (observed != expected) { + UVM_TEST_PRINT("Observed counter %u but expected %u\n", observed, expected); + status = NV_ERR_INVALID_STATE; + goto done; + } + + for (i = 0; i < TEST_PUSH_INTERLEAVING_NUM_PAUSED_PUSHES; ++i) { + observed = host_va[i + 1]; + expected = 0xDEADBEEF + i; + if (observed != expected) { + UVM_TEST_PRINT("Observed magic number 0x%x but expected 0x%x\n", observed, expected); + status = NV_ERR_INVALID_STATE; + goto done; + } + } + + observed = atomic_read(&on_complete_counter); + expected = TEST_PUSH_INTERLEAVING_NUM_PAUSED_PUSHES * 2 + num_non_paused_pushes; + if (observed != expected) { + UVM_TEST_PRINT("Wrong value of counter incremented by push info callback. Observed %u but expected %u\n", + observed, + expected); + status = NV_ERR_INVALID_STATE; + goto done; + } + +done: + uvm_rm_mem_free(mem); + uvm_thread_context_lock_enable_tracking(); + + return status; +} + +// Using a single thread, interleave pushes and check that the result is +// consistent with a non-interleaved sequence. +// 1) Begin a few pushes in channel X but do not end them. Each pushed (GPU) +// method sets a individual value in an independent system memory location. +// Each push is associated with a push info (CPU) callback that atomically +// adds 2 to a memory location M +// 2) Begin and end many pushes in the same channel X such that all the gpfifo +// entries are filled. All the pushed methods do the same thing: atomically +// increment a given system memory location. +// Each push is associated with a push info callback that atomically +// increments the memory location M +// 3) End the pending pushes +// +// The final state should be the same as in the non-interleaved sequence +// (1)-(3)-(2) +// +// Starting more than a single push is not safe to do outside of a test as if +// multiple threads tried doing so, it could easily deadlock. +static NV_STATUS test_push_interleaving(uvm_va_space_t *va_space) +{ + NV_STATUS status; + uvm_gpu_t *gpu; + + BUILD_BUG_ON(TEST_PUSH_INTERLEAVING_NUM_PAUSED_PUSHES >= UVM_PUSH_MAX_CONCURRENT_PUSHES); + + for_each_va_space_gpu(gpu, va_space) { + status = test_push_interleaving_on_gpu(gpu); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +// Push exactly UVM_MAX_PUSH_SIZE methods while acquiring a semaphore +// This is very tightly coupled with the pushbuffer implementation and method +// sizes, which is not ideal, but allows to test corner cases in the pushbuffer +// management code. +static NV_STATUS test_push_exactly_max_push(uvm_gpu_t *gpu, + uvm_push_t *push, + uvm_channel_type_t channel_type, + uvm_gpu_semaphore_t *sema_to_acquire, + NvU32 value) +{ + NV_STATUS status; + NvU64 semaphore_gpu_va; + NvU32 push_end_size; + + status = uvm_push_begin(gpu->channel_manager, channel_type, push, "Test push"); + if (status != NV_OK) + return status; + + TEST_CHECK_RET(uvm_push_has_space(push, UVM_MAX_PUSH_SIZE)); + TEST_CHECK_RET(!uvm_push_has_space(push, UVM_MAX_PUSH_SIZE + 1)); + + semaphore_gpu_va = uvm_gpu_semaphore_get_gpu_va(sema_to_acquire, gpu, uvm_channel_is_proxy(push->channel)); + gpu->parent->host_hal->semaphore_acquire(push, semaphore_gpu_va, value); + + // Push a noop leaving just push_end_size in the pushbuffer. + push_end_size = get_push_end_size(push->channel); + gpu->parent->host_hal->noop(push, UVM_MAX_PUSH_SIZE - uvm_push_get_size(push) - push_end_size); + + TEST_CHECK_RET(uvm_push_has_space(push, push_end_size)); + TEST_CHECK_RET(!uvm_push_has_space(push, push_end_size + 1)); + uvm_push_end(push); + + UVM_ASSERT_MSG(uvm_push_get_size(push) == UVM_MAX_PUSH_SIZE, "push_size %u\n", uvm_push_get_size(push)); + + return NV_OK; +} + +static NvU32 test_count_idle_chunks(uvm_pushbuffer_t *pushbuffer) +{ + NvU32 i; + NvU32 count = 0; + for (i = 0; i < UVM_PUSHBUFFER_CHUNKS; ++i) + count += test_bit(i, pushbuffer->idle_chunks) ? 1 : 0; + return count; +} + +static NvU32 test_count_available_chunks(uvm_pushbuffer_t *pushbuffer) +{ + NvU32 i; + NvU32 count = 0; + for (i = 0; i < UVM_PUSHBUFFER_CHUNKS; ++i) + count += test_bit(i, pushbuffer->available_chunks) ? 1 : 0; + return count; +} + +// Reuse the whole pushbuffer 4 times, one UVM_MAX_PUSH_SIZE at a time +#define EXTRA_MAX_PUSHES_WHILE_FULL (4 * UVM_PUSHBUFFER_SIZE / UVM_MAX_PUSH_SIZE) + +// Test doing pushes of exactly UVM_MAX_PUSH_SIZE size and only allowing them to +// complete one by one. +static NV_STATUS test_max_pushes_on_gpu_and_channel_type(uvm_gpu_t *gpu, uvm_channel_type_t channel_type) +{ + NV_STATUS status; + + uvm_tracker_t tracker; + uvm_gpu_semaphore_t sema; + NvU32 total_push_size = 0; + NvU32 push_count = 0; + NvU32 i; + + uvm_tracker_init(&tracker); + + status = uvm_gpu_semaphore_alloc(gpu->semaphore_pool, &sema); + TEST_CHECK_GOTO(status == NV_OK, done); + + uvm_gpu_semaphore_set_payload(&sema, 0); + + // Need to wait for all channels to completely idle so that the pushbuffer + // is in completely idle state when we begin. + status = uvm_channel_manager_wait(gpu->channel_manager); + TEST_CHECK_GOTO(status == NV_OK, done); + + while (uvm_pushbuffer_has_space(gpu->channel_manager->pushbuffer)) { + uvm_push_t push; + + ++push_count; + + status = test_push_exactly_max_push(gpu, &push, channel_type, &sema, push_count); + TEST_CHECK_GOTO(status == NV_OK, done); + + total_push_size += uvm_push_get_size(&push); + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, &push), done); + } + + if (total_push_size != UVM_PUSHBUFFER_SIZE) { + UVM_TEST_PRINT("Unexpected space in the pushbuffer, total push %u\n", total_push_size); + uvm_pushbuffer_print(gpu->channel_manager->pushbuffer); + status = NV_ERR_INVALID_STATE; + goto done; + } + + TEST_CHECK_GOTO(test_count_available_chunks(gpu->channel_manager->pushbuffer) == 0, done); + TEST_CHECK_GOTO(test_count_idle_chunks(gpu->channel_manager->pushbuffer) == 0, done); + + for (i = 0; i < EXTRA_MAX_PUSHES_WHILE_FULL; ++i) { + uvm_push_t push; + + // There should be no space for another push until the sema is + // incremented. Incrementing the same allows a single push to complete + // freeing exactly UVM_MAX_PUSH_SIZE space. + if (uvm_pushbuffer_has_space(gpu->channel_manager->pushbuffer)) { + UVM_TEST_PRINT("Unexpected space in the pushbuffer for iter %d\n", i); + uvm_pushbuffer_print(gpu->channel_manager->pushbuffer); + status = NV_ERR_INVALID_STATE; + goto done; + } + + uvm_gpu_semaphore_set_payload(&sema, i + 1); + + ++push_count; + + // Take UVM_MAX_PUSH_SIZE space. This should leave no space left again. + status = test_push_exactly_max_push(gpu, &push, channel_type, &sema, push_count); + TEST_CHECK_GOTO(status == NV_OK, done); + + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, &push), done); + } + +done: + uvm_gpu_semaphore_set_payload(&sema, push_count); + uvm_tracker_wait_deinit(&tracker); + + uvm_gpu_semaphore_free(&sema); + + return status; +} + +static NV_STATUS test_max_pushes_on_gpu(uvm_gpu_t *gpu) +{ + + + + + + TEST_NV_CHECK_RET(test_max_pushes_on_gpu_and_channel_type(gpu, UVM_CHANNEL_TYPE_GPU_INTERNAL)); + + return NV_OK; +} + +// Test doing UVM_PUSHBUFFER_CHUNKS independent pushes expecting each one to use +// a different chunk in the pushbuffer. +static NV_STATUS test_idle_chunks_on_gpu(uvm_gpu_t *gpu) +{ + NV_STATUS status; + + uvm_gpu_semaphore_t sema; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + NvU32 i; + + uvm_tracker_init(&tracker); + + status = uvm_gpu_semaphore_alloc(gpu->semaphore_pool, &sema); + TEST_CHECK_GOTO(status == NV_OK, done); + + uvm_gpu_semaphore_set_payload(&sema, 0); + + // Need to wait for all channels to completely idle so that the pushbuffer + // is in completely idle state when we begin. + status = uvm_channel_manager_wait(gpu->channel_manager); + TEST_CHECK_GOTO(status == NV_OK, done); + + for (i = 0; i < UVM_PUSHBUFFER_CHUNKS; ++i) { + NvU64 semaphore_gpu_va; + uvm_push_t push; + + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, "Push using chunk %u", i); + TEST_CHECK_GOTO(status == NV_OK, done); + + semaphore_gpu_va = uvm_gpu_semaphore_get_gpu_va(&sema, gpu, uvm_channel_is_proxy(push.channel)); + gpu->parent->host_hal->semaphore_acquire(&push, semaphore_gpu_va, i + 1); + uvm_push_end(&push); + + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, &push), done); + + if (test_count_idle_chunks(gpu->channel_manager->pushbuffer) != UVM_PUSHBUFFER_CHUNKS - i - 1) { + UVM_TEST_PRINT("Unexpected count of idle chunks in the pushbuffer %u instead of %u\n", + test_count_idle_chunks(gpu->channel_manager->pushbuffer), UVM_PUSHBUFFER_CHUNKS - i - 1); + uvm_pushbuffer_print(gpu->channel_manager->pushbuffer); + status = NV_ERR_INVALID_STATE; + goto done; + } + } + uvm_gpu_semaphore_set_payload(&sema, UVM_PUSHBUFFER_CHUNKS + 1); + + status = uvm_channel_manager_wait(gpu->channel_manager); + TEST_CHECK_GOTO(status == NV_OK, done); + + if (test_count_idle_chunks(gpu->channel_manager->pushbuffer) != UVM_PUSHBUFFER_CHUNKS) { + UVM_TEST_PRINT("Unexpected count of idle chunks in the pushbuffer %u\n", + test_count_idle_chunks(gpu->channel_manager->pushbuffer)); + uvm_pushbuffer_print(gpu->channel_manager->pushbuffer); + status = NV_ERR_INVALID_STATE; + goto done; + } + +done: + uvm_gpu_semaphore_set_payload(&sema, UVM_PUSHBUFFER_CHUNKS + 1); + uvm_tracker_wait(&tracker); + + uvm_gpu_semaphore_free(&sema); + uvm_tracker_deinit(&tracker); + + return status; +} + +static NV_STATUS test_pushbuffer(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + + for_each_va_space_gpu(gpu, va_space) { + TEST_NV_CHECK_RET(test_max_pushes_on_gpu(gpu)); + TEST_NV_CHECK_RET(test_idle_chunks_on_gpu(gpu)); + } + return NV_OK; +} + +typedef struct +{ + NvU64 *timestmap_in_pushbuffer; + NvU64 timestamp; +} timestamp_test_t; + +static void timestamp_on_complete(void *void_data) +{ + timestamp_test_t *data = (timestamp_test_t *)void_data; + + if (uvm_global_get_status() != NV_OK) { + // Do nothing if a global error has been set as the callback might be + // called from teardown where the reference to test data is no longer + // valid. + return; + } + + data->timestamp = *data->timestmap_in_pushbuffer; +} + +static NV_STATUS test_timestamp_on_gpu(uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_push_t push; + timestamp_test_t test_data = {0}; + NvU32 i; + NvU64 last_stamp = 0; + + for (i = 0; i < 10; ++i) { + status = uvm_push_begin(gpu->channel_manager, UVM_CHANNEL_TYPE_GPU_INTERNAL, &push, "Releasing a timestamp"); + if (status != NV_OK) + return status; + + test_data.timestmap_in_pushbuffer = uvm_push_timestamp(&push); + uvm_push_info_from_push(&push)->on_complete = timestamp_on_complete; + uvm_push_info_from_push(&push)->on_complete_data = &test_data; + uvm_push_end(&push); + + // Synchronize the channel manager to make sure the on_complete + // callbacks have a chance to run. + status = uvm_channel_manager_wait(gpu->channel_manager); + TEST_CHECK_RET(status == NV_OK); + + TEST_CHECK_RET(test_data.timestamp != 0); + TEST_CHECK_RET(test_data.timestamp > last_stamp); + last_stamp = test_data.timestamp; + } + + return NV_OK; +} + +static NV_STATUS test_timestamp(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + + for_each_va_space_gpu(gpu, va_space) + TEST_CHECK_RET(test_timestamp_on_gpu(gpu) == NV_OK); + + return NV_OK; +} + +static NV_STATUS sync_memcopy(uvm_channel_type_t type, uvm_mem_t *dst, uvm_mem_t *src) +{ + uvm_push_t push; + uvm_gpu_address_t dst_va; + uvm_gpu_address_t src_va; + uvm_gpu_t *gpu; + NV_STATUS status; + + UVM_ASSERT(uvm_mem_is_vidmem(src) || uvm_mem_is_vidmem(dst)); + + if (type == UVM_CHANNEL_TYPE_CPU_TO_GPU || type == UVM_CHANNEL_TYPE_GPU_TO_CPU) { + gpu = (type == UVM_CHANNEL_TYPE_CPU_TO_GPU) ? dst->backing_gpu : src->backing_gpu; + status = uvm_push_begin(gpu->channel_manager, type, &push, uvm_channel_type_to_string(type)); + if (status != NV_OK) + return status; + + dst_va = uvm_mem_gpu_address_virtual_kernel(dst, gpu); + src_va = uvm_mem_gpu_address_virtual_kernel(src, gpu); + gpu->parent->ce_hal->memcopy(&push, dst_va, src_va, src->size); + } + else { + unsigned i; + const NvU32 chunk_size = src->chunk_size; + + UVM_ASSERT((src->size % chunk_size) == 0); + + gpu = src->backing_gpu; + status = uvm_push_begin_gpu_to_gpu(gpu->channel_manager, + dst->backing_gpu, + &push, + uvm_channel_type_to_string(type)); + + for (i = 0; i < src->size / chunk_size; i++) { + dst_va = uvm_mem_gpu_address_copy(dst, gpu, i * chunk_size, chunk_size); + src_va = uvm_mem_gpu_address_copy(src, gpu, i * chunk_size, chunk_size); + gpu->parent->ce_hal->memcopy(&push, dst_va, src_va, chunk_size); + } + } + + return uvm_push_end_and_wait(&push); +} + +static bool can_do_peer_copies(uvm_va_space_t *va_space, uvm_gpu_t *gpu_a, uvm_gpu_t *gpu_b) +{ + if (gpu_a == gpu_b || !uvm_processor_mask_test(&va_space->can_copy_from[uvm_id_value(gpu_a->id)], gpu_b->id)) + return false; + + UVM_ASSERT(uvm_processor_mask_test(&va_space->can_copy_from[uvm_id_value(gpu_b->id)], gpu_a->id)); + + // TODO: Bug 2028875. Indirect peers are not supported for now. + if (uvm_gpus_are_indirect_peers(gpu_a, gpu_b)) + return false; + + return true; +} + +// Test the GPU to GPU push interface by transferring data between each +// permutation of GPU peers. +static NV_STATUS test_push_gpu_to_gpu(uvm_va_space_t *va_space) +{ + NvU32 i; + NV_STATUS status; + uvm_gpu_t *gpu, *gpu_a, *gpu_b; + uvm_mem_t *mem[UVM_ID_MAX_PROCESSORS] = {NULL}; + NvU32 *host_ptr; + const size_t size = 1024 * 1024; + bool waive = true; + + for_each_va_space_gpu(gpu_a, va_space) { + for_each_va_space_gpu(gpu_b, va_space) { + if (can_do_peer_copies(va_space, gpu_a, gpu_b)) { + waive = false; + break; + } + } + } + + if (waive) + return NV_OK; + + // Alloc and initialize host buffer + status = uvm_mem_alloc_sysmem_and_map_cpu_kernel(size, current->mm, &mem[UVM_ID_CPU_VALUE]); + TEST_CHECK_GOTO(status == NV_OK, done); + + host_ptr = (NvU32 *)uvm_mem_get_cpu_addr_kernel(mem[UVM_ID_CPU_VALUE]); + + for (i = 0; i < size / sizeof(NvU32); ++i) + host_ptr[i] = i + 1; + + // Allocate vidmem on each GPU, and map the host buffer + for_each_va_space_gpu(gpu, va_space) { + status = uvm_mem_alloc_vidmem(size, gpu, &mem[uvm_id_value(gpu->id)]); + TEST_CHECK_GOTO(status == NV_OK, done); + + status = uvm_mem_map_gpu_kernel(mem[uvm_id_value(gpu->id)], gpu); + TEST_CHECK_GOTO(status == NV_OK, done); + + status = uvm_mem_map_gpu_kernel(mem[UVM_ID_CPU_VALUE], gpu); + TEST_CHECK_GOTO(status == NV_OK, done); + } + + // Copy buffer between each pair of GPU peers, in both directions + for_each_va_space_gpu(gpu_a, va_space) { + for_each_va_space_gpu(gpu_b, va_space) { + if (!can_do_peer_copies(va_space, gpu_a, gpu_b)) + continue; + + // Copy from CPU to the first GPU, and then zero out the host copy + status = sync_memcopy(UVM_CHANNEL_TYPE_CPU_TO_GPU, + mem[uvm_id_value(gpu_a->id)], + mem[UVM_ID_CPU_VALUE]); + TEST_CHECK_GOTO(status == NV_OK, done); + + memset(host_ptr, 0, size / sizeof(NvU32)); + + // Copy from the first GPU to the second GPU + status = sync_memcopy(UVM_CHANNEL_TYPE_GPU_TO_GPU, + mem[uvm_id_value(gpu_b->id)], + mem[uvm_id_value(gpu_a->id)]); + TEST_CHECK_GOTO(status == NV_OK, done); + + // Copy from the second GPU back to the host, and check result + status = sync_memcopy(UVM_CHANNEL_TYPE_GPU_TO_CPU, + mem[UVM_ID_CPU_VALUE], + mem[uvm_id_value(gpu_b->id)]); + TEST_CHECK_GOTO(status == NV_OK, done); + + for (i = 0; i < size / sizeof(NvU32); ++i) { + if (host_ptr[i] != i + 1) { + UVM_TEST_PRINT("host_ptr[%u] = %u instead of %u when copying between %s and %s\n", + i, + host_ptr[i], + i + 1, + uvm_gpu_name(gpu_a), + uvm_gpu_name(gpu_b)); + status = NV_ERR_INVALID_STATE; + TEST_CHECK_GOTO(status == NV_OK, done); + } + } + } + } + + done: + for_each_va_space_gpu(gpu, va_space) + uvm_mem_free(mem[uvm_id_value(gpu->id)]); + + uvm_mem_free(mem[UVM_ID_CPU_VALUE]); + + return status; +} + +NV_STATUS uvm_test_push_sanity(UVM_TEST_PUSH_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + // Take the global lock as some of the tests rely on being the + // only thread doing pushes and could deadlock otherwise. + uvm_mutex_lock(&g_uvm_global.global_lock); + uvm_va_space_down_read_rm(va_space); + + status = test_push_end_size(va_space); + if (status != NV_OK) + goto done; + + status = test_push_inline_data(va_space); + if (status != NV_OK) + goto done; + + status = test_concurrent_pushes(va_space); + if (status != NV_OK) + goto done; + + status = test_push_interleaving(va_space); + if (status != NV_OK) + goto done; + + status = test_push_gpu_to_gpu(va_space); + if (status != NV_OK) + goto done; + + status = test_pushbuffer(va_space); + if (status != NV_OK) + goto done; + + if (!params->skipTimestampTest) { + status = test_timestamp(va_space); + if (status != NV_OK) + goto done; + } + +done: + uvm_va_space_up_read_rm(va_space); + uvm_mutex_unlock(&g_uvm_global.global_lock); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_pushbuffer.c b/kernel-open/nvidia-uvm/uvm_pushbuffer.c new file mode 100644 index 000000000..324eef737 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pushbuffer.c @@ -0,0 +1,488 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_api.h" +#include "uvm_pushbuffer.h" +#include "uvm_channel.h" +#include "uvm_global.h" +#include "uvm_lock.h" +#include "uvm_procfs.h" +#include "uvm_push.h" +#include "uvm_kvmalloc.h" +#include "uvm_gpu.h" +#include "uvm_common.h" +#include "uvm_linux.h" + +// Print pushbuffer state into a seq_file if provided or with UVM_DBG_PRINT() if not. +static void uvm_pushbuffer_print_common(uvm_pushbuffer_t *pushbuffer, struct seq_file *s); + +static int nv_procfs_read_pushbuffer_info(struct seq_file *s, void *v) +{ + uvm_pushbuffer_t *pushbuffer = (uvm_pushbuffer_t *)s->private; + + if (!uvm_down_read_trylock(&g_uvm_global.pm.lock)) + return -EAGAIN; + + uvm_pushbuffer_print_common(pushbuffer, s); + + uvm_up_read(&g_uvm_global.pm.lock); + + return 0; +} + +static int nv_procfs_read_pushbuffer_info_entry(struct seq_file *s, void *v) +{ + UVM_ENTRY_RET(nv_procfs_read_pushbuffer_info(s, v)); +} + +UVM_DEFINE_SINGLE_PROCFS_FILE(pushbuffer_info_entry); + +static NV_STATUS create_procfs(uvm_pushbuffer_t *pushbuffer) +{ + uvm_gpu_t *gpu = pushbuffer->channel_manager->gpu; + + // The pushbuffer info file is for debug only + if (!uvm_procfs_is_debug_enabled()) + return NV_OK; + + pushbuffer->procfs.info_file = NV_CREATE_PROC_FILE("pushbuffer", + gpu->procfs.dir, + pushbuffer_info_entry, + pushbuffer); + if (pushbuffer->procfs.info_file == NULL) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +NV_STATUS uvm_pushbuffer_create(uvm_channel_manager_t *channel_manager, uvm_pushbuffer_t **pushbuffer_out) +{ + NV_STATUS status; + int i; + uvm_gpu_t *gpu = channel_manager->gpu; + + uvm_pushbuffer_t *pushbuffer = uvm_kvmalloc_zero(sizeof(*pushbuffer)); + if (pushbuffer == NULL) + return NV_ERR_NO_MEMORY; + + pushbuffer->channel_manager = channel_manager; + + uvm_spin_lock_init(&pushbuffer->lock, UVM_LOCK_ORDER_LEAF); + + // Currently the pushbuffer supports UVM_PUSHBUFFER_CHUNKS of concurrent + // pushes. + uvm_sema_init(&pushbuffer->concurrent_pushes_sema, UVM_PUSHBUFFER_CHUNKS, UVM_LOCK_ORDER_PUSH); + + UVM_ASSERT(channel_manager->conf.pushbuffer_loc == UVM_BUFFER_LOCATION_SYS || + channel_manager->conf.pushbuffer_loc == UVM_BUFFER_LOCATION_VID); + + status = uvm_rm_mem_alloc_and_map_cpu(gpu, + (channel_manager->conf.pushbuffer_loc == UVM_BUFFER_LOCATION_SYS)? + UVM_RM_MEM_TYPE_SYS: + UVM_RM_MEM_TYPE_GPU, + UVM_PUSHBUFFER_SIZE, + &pushbuffer->memory); + if (status != NV_OK) + goto error; + + bitmap_fill(pushbuffer->idle_chunks, UVM_PUSHBUFFER_CHUNKS); + bitmap_fill(pushbuffer->available_chunks, UVM_PUSHBUFFER_CHUNKS); + + for (i = 0; i < UVM_PUSHBUFFER_CHUNKS; ++i) + INIT_LIST_HEAD(&pushbuffer->chunks[i].pending_gpfifos); + + status = create_procfs(pushbuffer); + if (status != NV_OK) + goto error; + + *pushbuffer_out = pushbuffer; + + return status; + +error: + uvm_pushbuffer_destroy(pushbuffer); + return status; +} + +static uvm_pushbuffer_chunk_t *get_chunk_in_mask(uvm_pushbuffer_t *pushbuffer, unsigned long *mask) +{ + NvU32 index = find_first_bit(mask, UVM_PUSHBUFFER_CHUNKS); + + uvm_assert_spinlock_locked(&pushbuffer->lock); + + if (index == UVM_PUSHBUFFER_CHUNKS) + return NULL; + + return &pushbuffer->chunks[index]; +} + +static uvm_pushbuffer_chunk_t *get_available_chunk(uvm_pushbuffer_t *pushbuffer) +{ + return get_chunk_in_mask(pushbuffer, pushbuffer->available_chunks); +} + +static uvm_pushbuffer_chunk_t *get_idle_chunk(uvm_pushbuffer_t *pushbuffer) +{ + return get_chunk_in_mask(pushbuffer, pushbuffer->idle_chunks); +} + +static NvU32 chunk_get_index(uvm_pushbuffer_t *pushbuffer, uvm_pushbuffer_chunk_t *chunk) +{ + NvU32 index = chunk - pushbuffer->chunks; + UVM_ASSERT(index < UVM_PUSHBUFFER_CHUNKS); + return index; +} + +static NvU32 chunk_get_offset(uvm_pushbuffer_t *pushbuffer, uvm_pushbuffer_chunk_t *chunk) +{ + return chunk_get_index(pushbuffer, chunk) * UVM_PUSHBUFFER_CHUNK_SIZE; +} + +static void set_chunk(uvm_pushbuffer_t *pushbuffer, uvm_pushbuffer_chunk_t *chunk, unsigned long *mask) +{ + NvU32 index = chunk_get_index(pushbuffer, chunk); + + uvm_assert_spinlock_locked(&pushbuffer->lock); + + __set_bit(index, mask); +} + +static void clear_chunk(uvm_pushbuffer_t *pushbuffer, uvm_pushbuffer_chunk_t *chunk, unsigned long *mask) +{ + NvU32 index = chunk_get_index(pushbuffer, chunk); + + uvm_assert_spinlock_locked(&pushbuffer->lock); + + __clear_bit(index, mask); +} + +static uvm_pushbuffer_chunk_t *pick_chunk(uvm_pushbuffer_t *pushbuffer) +{ + uvm_pushbuffer_chunk_t *chunk = get_idle_chunk(pushbuffer); + + uvm_assert_spinlock_locked(&pushbuffer->lock); + + if (chunk == NULL) + chunk = get_available_chunk(pushbuffer); + + return chunk; +} + +static bool try_claim_chunk(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push, uvm_pushbuffer_chunk_t **chunk_out) +{ + uvm_pushbuffer_chunk_t *chunk; + + uvm_spin_lock(&pushbuffer->lock); + + chunk = pick_chunk(pushbuffer); + if (!chunk) + goto done; + + chunk->current_push = push; + clear_chunk(pushbuffer, chunk, pushbuffer->idle_chunks); + clear_chunk(pushbuffer, chunk, pushbuffer->available_chunks); + +done: + uvm_spin_unlock(&pushbuffer->lock); + *chunk_out = chunk; + + return chunk != NULL; +} + +static NvU32 *chunk_get_next_push_start_addr(uvm_pushbuffer_t *pushbuffer, uvm_pushbuffer_chunk_t *chunk) +{ + char *push_start = (char *)uvm_rm_mem_get_cpu_va(pushbuffer->memory); + push_start += chunk_get_offset(pushbuffer, chunk); + push_start += chunk->next_push_start; + + UVM_ASSERT(((NvU64)push_start) % sizeof(NvU32) == 0); + + return (NvU32*)push_start; +} + +static NV_STATUS claim_chunk(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push, uvm_pushbuffer_chunk_t **chunk_out) +{ + NV_STATUS status = NV_OK; + uvm_channel_manager_t *channel_manager = pushbuffer->channel_manager; + uvm_spin_loop_t spin; + + if (try_claim_chunk(pushbuffer, push, chunk_out)) + return NV_OK; + + uvm_channel_manager_update_progress(channel_manager); + + uvm_spin_loop_init(&spin); + while (!try_claim_chunk(pushbuffer, push, chunk_out) && status == NV_OK) { + UVM_SPIN_LOOP(&spin); + status = uvm_channel_manager_check_errors(channel_manager); + uvm_channel_manager_update_progress(channel_manager); + } + + return status; +} + +NV_STATUS uvm_pushbuffer_begin_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push) +{ + uvm_pushbuffer_chunk_t *chunk; + NV_STATUS status; + + UVM_ASSERT(pushbuffer); + UVM_ASSERT(push); + + // Note that this semaphore is uvm_up()ed in end_push(). + uvm_down(&pushbuffer->concurrent_pushes_sema); + + status = claim_chunk(pushbuffer, push, &chunk); + if (status != NV_OK) { + uvm_up(&pushbuffer->concurrent_pushes_sema); + return status; + } + + UVM_ASSERT(chunk); + + push->begin = chunk_get_next_push_start_addr(pushbuffer, chunk); + push->next = push->begin; + + return NV_OK; +} + +static uvm_gpfifo_entry_t *chunk_get_first_gpfifo(uvm_pushbuffer_chunk_t *chunk) +{ + return list_first_entry_or_null(&chunk->pending_gpfifos, uvm_gpfifo_entry_t, pending_list_node); +} + +static uvm_gpfifo_entry_t *chunk_get_last_gpfifo(uvm_pushbuffer_chunk_t *chunk) +{ + return list_last_entry_or_null(&chunk->pending_gpfifos, uvm_gpfifo_entry_t, pending_list_node); +} + +// Get the cpu put within the chunk (in range [0, UVM_PUSHBUFFER_CHUNK_SIZE]) +static NvU32 chunk_get_cpu_put(uvm_pushbuffer_t *pushbuffer, uvm_pushbuffer_chunk_t *chunk) +{ + uvm_gpfifo_entry_t *gpfifo = chunk_get_last_gpfifo(chunk); + + uvm_assert_spinlock_locked(&pushbuffer->lock); + + if (gpfifo != NULL) + return gpfifo->pushbuffer_offset + gpfifo->pushbuffer_size - chunk_get_offset(pushbuffer, chunk); + else + return 0; +} + +// Get the gpu get within the chunk (in range [0, UVM_PUSHBUFFER_CHUNK_SIZE)) +static NvU32 chunk_get_gpu_get(uvm_pushbuffer_t *pushbuffer, uvm_pushbuffer_chunk_t *chunk) +{ + uvm_gpfifo_entry_t *gpfifo = chunk_get_first_gpfifo(chunk); + + uvm_assert_spinlock_locked(&pushbuffer->lock); + + if (gpfifo != NULL) + return gpfifo->pushbuffer_offset - chunk_get_offset(pushbuffer, chunk); + else + return 0; +} + +static void update_chunk(uvm_pushbuffer_t *pushbuffer, uvm_pushbuffer_chunk_t *chunk) +{ + NvU32 gpu_get = chunk_get_gpu_get(pushbuffer, chunk); + NvU32 cpu_put = chunk_get_cpu_put(pushbuffer, chunk); + + uvm_assert_spinlock_locked(&pushbuffer->lock); + + if (gpu_get == cpu_put) { + // cpu_put can be equal to gpu_get both when the chunk is full and empty. We + // can tell apart the cases by checking whether the pending GPFIFOs list is + // empty. + if (!list_empty(&chunk->pending_gpfifos)) + return; + + // Chunk completely idle + set_chunk(pushbuffer, chunk, pushbuffer->idle_chunks); + set_chunk(pushbuffer, chunk, pushbuffer->available_chunks); + UVM_ASSERT_MSG(cpu_put == 0, "cpu put %u\n", cpu_put); + + // For a completely idle chunk, always start at the very beginning. This + // helps avoid the waste that can happen at the very end of the chunk + // described at the top of uvm_pushbuffer.h. + chunk->next_push_start = 0; + } + else if (gpu_get > cpu_put) { + if (gpu_get - cpu_put >= UVM_MAX_PUSH_SIZE) { + // Enough space between put and get + set_chunk(pushbuffer, chunk, pushbuffer->available_chunks); + chunk->next_push_start = cpu_put; + } + } + else if (UVM_PUSHBUFFER_CHUNK_SIZE >= cpu_put + UVM_MAX_PUSH_SIZE) { + UVM_ASSERT_MSG(gpu_get < cpu_put, "gpu_get %u cpu_put %u\n", gpu_get, cpu_put); + + // Enough space at the end + set_chunk(pushbuffer, chunk, pushbuffer->available_chunks); + chunk->next_push_start = cpu_put; + } + else if (gpu_get >= UVM_MAX_PUSH_SIZE) { + UVM_ASSERT_MSG(gpu_get < cpu_put, "gpu_get %u cpu_put %u\n", gpu_get, cpu_put); + + // Enough space at the beginning + set_chunk(pushbuffer, chunk, pushbuffer->available_chunks); + chunk->next_push_start = 0; + } +} + +void uvm_pushbuffer_destroy(uvm_pushbuffer_t *pushbuffer) +{ + if (pushbuffer == NULL) + return; + + uvm_procfs_destroy_entry(pushbuffer->procfs.info_file); + + uvm_rm_mem_free(pushbuffer->memory); + uvm_kvfree(pushbuffer); +} + +static uvm_pushbuffer_chunk_t *offset_to_chunk(uvm_pushbuffer_t *pushbuffer, NvU32 offset) +{ + UVM_ASSERT(offset < UVM_PUSHBUFFER_SIZE); + return &pushbuffer->chunks[offset / UVM_PUSHBUFFER_CHUNK_SIZE]; +} + +static uvm_pushbuffer_chunk_t *gpfifo_to_chunk(uvm_pushbuffer_t *pushbuffer, uvm_gpfifo_entry_t *gpfifo) +{ + uvm_pushbuffer_chunk_t *chunk = offset_to_chunk(pushbuffer, gpfifo->pushbuffer_offset); + UVM_ASSERT(offset_to_chunk(pushbuffer, gpfifo->pushbuffer_offset + gpfifo->pushbuffer_size - 1) == chunk); + return chunk; +} + +void uvm_pushbuffer_mark_completed(uvm_pushbuffer_t *pushbuffer, uvm_gpfifo_entry_t *gpfifo) +{ + uvm_pushbuffer_chunk_t *chunk = gpfifo_to_chunk(pushbuffer, gpfifo); + uvm_push_info_t *push_info = gpfifo->push_info; + bool need_to_update_chunk = false; + + if (push_info->on_complete != NULL) + push_info->on_complete(push_info->on_complete_data); + + push_info->on_complete = NULL; + push_info->on_complete_data = NULL; + + uvm_spin_lock(&pushbuffer->lock); + + if (gpfifo == chunk_get_first_gpfifo(chunk)) + need_to_update_chunk = true; + else if (gpfifo == chunk_get_last_gpfifo(chunk)) + need_to_update_chunk = true; + + list_del(&gpfifo->pending_list_node); + + // If current_push is not NULL, updating the chunk is delayed till + // uvm_pushbuffer_end_push() is called for that push. + if (need_to_update_chunk && chunk->current_push == NULL) + update_chunk(pushbuffer, chunk); + + uvm_spin_unlock(&pushbuffer->lock); +} + +NvU32 uvm_pushbuffer_get_offset_for_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push) +{ + NvU32 offset = (char*)push->begin - (char *)uvm_rm_mem_get_cpu_va(pushbuffer->memory); + + UVM_ASSERT(((NvU64)offset) % sizeof(NvU32) == 0); + + return offset; +} + +NvU64 uvm_pushbuffer_get_gpu_va_for_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push) +{ + NvU64 pushbuffer_base; + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + bool is_proxy_channel = uvm_channel_is_proxy(push->channel); + + pushbuffer_base = uvm_rm_mem_get_gpu_va(pushbuffer->memory, gpu, is_proxy_channel); + + return pushbuffer_base + uvm_pushbuffer_get_offset_for_push(pushbuffer, push); +} + +void uvm_pushbuffer_end_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push, uvm_gpfifo_entry_t *gpfifo) +{ + uvm_pushbuffer_chunk_t *chunk = gpfifo_to_chunk(pushbuffer, gpfifo); + + uvm_assert_spinlock_locked(&push->channel->pool->lock); + + uvm_spin_lock(&pushbuffer->lock); + + list_add_tail(&gpfifo->pending_list_node, &chunk->pending_gpfifos); + + update_chunk(pushbuffer, chunk); + + UVM_ASSERT(chunk->current_push == push); + chunk->current_push = NULL; + + uvm_spin_unlock(&pushbuffer->lock); + + // uvm_pushbuffer_end_push() needs to be called with the channel lock held + // while the concurrent pushes sema has a higher lock order. To keep the + // code structure simple, just up out of order here. + uvm_up_out_of_order(&pushbuffer->concurrent_pushes_sema); +} + +bool uvm_pushbuffer_has_space(uvm_pushbuffer_t *pushbuffer) +{ + bool has_space; + + uvm_spin_lock(&pushbuffer->lock); + + has_space = pick_chunk(pushbuffer) != NULL; + + uvm_spin_unlock(&pushbuffer->lock); + + return has_space; +} + +void uvm_pushbuffer_print_common(uvm_pushbuffer_t *pushbuffer, struct seq_file *s) +{ + NvU32 i; + + UVM_SEQ_OR_DBG_PRINT(s, "Pushbuffer for GPU %s\n", uvm_gpu_name(pushbuffer->channel_manager->gpu)); + UVM_SEQ_OR_DBG_PRINT(s, " has space: %d\n", uvm_pushbuffer_has_space(pushbuffer)); + + uvm_spin_lock(&pushbuffer->lock); + + for (i = 0; i < UVM_PUSHBUFFER_CHUNKS; ++i) { + uvm_pushbuffer_chunk_t *chunk = &pushbuffer->chunks[i]; + NvU32 cpu_put = chunk_get_cpu_put(pushbuffer, chunk); + NvU32 gpu_get = chunk_get_gpu_get(pushbuffer, chunk); + UVM_SEQ_OR_DBG_PRINT(s, " chunk %u put %u get %u next %u available %d idle %d\n", + i, + cpu_put, gpu_get, chunk->next_push_start, + test_bit(i, pushbuffer->available_chunks) ? 1 : 0, + test_bit(i, pushbuffer->idle_chunks) ? 1 : 0); + + } + + uvm_spin_unlock(&pushbuffer->lock); +} + +void uvm_pushbuffer_print(uvm_pushbuffer_t *pushbuffer) +{ + return uvm_pushbuffer_print_common(pushbuffer, NULL); +} diff --git a/kernel-open/nvidia-uvm/uvm_pushbuffer.h b/kernel-open/nvidia-uvm/uvm_pushbuffer.h new file mode 100644 index 000000000..f9b80aa3b --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_pushbuffer.h @@ -0,0 +1,239 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_PUSHBUFFER_H__ +#define __UVM_PUSHBUFFER_H__ + +#include "uvm_forward_decl.h" +#include "uvm_lock.h" +#include "uvm_linux.h" +#include "nvtypes.h" + +// +// UVM pushbuffer +// +// The UVM pushbuffer is a memory allocator specialized for managing allocations +// used as the backing store for methods sent to the GPU (pushes, abstracted by +// uvm_push_t). Each pushbuffer is usable only with a specific channel manager +// and hence a specific GPU as that allows for greater flexibility down the road +// (e.g. moving the pushbuffer allocation to GPU memory in some cases). +// +// The usage of the pushbuffer always follows the same pattern: +// 1) The CPU requests a new allocation to do a push in. The allocation is +// always initially of UVM_MAX_PUSH_SIZE and its usage is tracked by the +// UVM push abstraction (uvm_push_t). +// 2) The CPU writes some GPU methods +// 3) The CPU finishes and reports how much of the UVM_MAX_PUSH_SIZE space was used +// 4) The methods are queued to be read by the GPU (by referencing them in a GPFIFO entry) +// 5) At some later time the CPU notices the methods have been completed and +// reports that the allocation can now be reused. +// +// Notably 1) could happen concurrently from multiple CPU threads and in 4) the +// GPU has multiple independent queues of execution (UVM channels). +// +// With the above in mind, we can go through the implementation details of the +// current solution. +// The pushbuffer backing store is a single big allocation logically divided +// into largely independent parts called chunks. +// Each chunk is roughly a ringbuffer tracking multiple pending pushes being +// processed by the GPU. The pushbuffer maintains two bitmaps, one tracking +// completely idle (with no pending pushes) chunks and a second one tracking +// available (with pending pushes, but still enough space for a new push) +// chunks. When a new allocation is requested, idle chunks are always used +// first and after that available chunks are consulted. If none are available, +// the CPU spin waits on the GPU to complete some of the pending pushes making +// space for a new one. +// +// To explain how chunks track pending pushes we will go through an example +// modifying a chunk's state. Let's start with a few pending pushes in the +// chunk: +// +// [ [push P1][push P2][free unusable space][push P3][push P4] ] +// ^ gpu_get ^ cpu_put +// +// The beginning of the first pending push is called the GPU get and the end of the +// last push is called the CPU put. This follows the HW GPFIFO naming that's a true +// ringbuffer (always completing in order and supporting transparent +// wrap-around). All the memory between gpu_get and cpu_put is considered +// unusable. Pushes within a chunk can finish out of order as each chunk can +// service pushes from multiple channels. And hence there can be some space +// between the first and last push that's already free, but unusable. The space +// after cpu_put and before gpu_get is available to be allocated to a new push, +// and if that happens the chunk above could change to: +// +// [ [push P1][push P2][free unusable space][push P3][push P4][push P5] ] +// ^ gpu_get ^ cpu_put +// +// Then, say push P2 completes: +// +// [ [push P1][free unusable space ][push P3][push P4][push P5] ] +// ^ gpu_get ^ cpu_put +// +// We can see that P2 completing only expands the unusable free space, but if P1 +// finishes we get: +// +// [ [push P3][push P4][push P5] ] +// ^ gpu_get ^ cpu_put +// +// This shows that some cases cause waste, but on the other hand allow for +// tracking of pending pushes and free space to be trivial. Each pending push of +// a chunk is in a doubly linked list with its head in the chunk. Each new push +// is added at the tail and when a push completes it's removed from the list. +// The gpu_get and/or cpu_put only change when the last/first push in the +// list finishes or a new push is added. The pending pushes are represented by +// the software state tracking GPFIFO entries (uvm_gpfifo_entry_t in +// uvm_channel.h) that are all allocated at channel creation (a HW channel has +// a fixed limit of GPFIFO entries it supports that's chosen at channel creation +// and we allocate all the SW state for them at channel creation as well). This +// allows all the operations of the pushbuffer to be free of any memory +// allocation. +// +// To illustrate that a chunk is only roughly a ringbuffer, let's see what +// happens when another push is added to the chunk above, but there is not +// enough space between cpu_put and the end of the chunk to fit UVM_MAX_PUSH_SIZE: +// [[push P6][free space ][push P3][push P4][push P5] ] +// ^ cpu_put ^ gpu_get +// +// The GPU reading the pushbuffer expects it to be in a consecutive VA and hence +// the pending pushes cannot wrap around in the chunk leading to some potential +// waste at the end. +// +// The pushbuffer implementation is configurable through a few defines below, +// but careful tweaking of them is yet to be done. +// + +// TODO: Bug 1764958: Calculate/measure the maximum push size and tweak the +// number of chunks and size of each after benchmarks. +// +// Below are the measurements borrowed from uvm_channel_mgmt.h. They will need +// to be adjusted and verified once all the operations are implemented in this +// driver, but for now we will set the MAX_PUSH to 128K as that seems pretty +// safe. +// +// A pushbuffer needs to accomodate all possible operations on a 2 Mb Va region +// per gpu. The longest sequence of operations would be: +// Acquire 3 + 32 trackers: +// replay tracker, instancePtr tracker, 2Mb descriptor tracker and 32 trackers +// one each for 64Kb of phys mem. +// Each tracker can have ~64 tracker items (35 x 64 x 20 bytes acquire = 45k) +// Unmap 4k ptes for 2Mb va (Inline pte data + header = ~4k) +// Invalidate for every 4k (512 * 20 bytes = 10k) +// Migrate data worth 2 Mb (512 * 48 bytes to do copy = 24k) +// Map 4k ptes for 2Mb va (4k inline pte data + header = ~4k) +// Invalidate for every 4k (512 * 20 bytes = 10k) +// Total Total= ~100k +// +#define UVM_MAX_PUSH_SIZE (128 * 1024) +#define UVM_PUSHBUFFER_CHUNK_SIZE (8 * UVM_MAX_PUSH_SIZE) +#define UVM_PUSHBUFFER_CHUNKS 16 + +// Total size of the pushbuffer +#define UVM_PUSHBUFFER_SIZE (UVM_PUSHBUFFER_CHUNK_SIZE * UVM_PUSHBUFFER_CHUNKS) + +// The max number of concurrent pushes that can be happening at the same time. +// Concurrent pushes are ones that are after uvm_push_begin*(), but before +// uvm_push_end(). +#define UVM_PUSH_MAX_CONCURRENT_PUSHES UVM_PUSHBUFFER_CHUNKS + +typedef struct +{ + // Offset within the chunk of where a next push should begin if there is + // space for one. Updated in update_chunk(). + NvU32 next_push_start; + + // List of uvm_gpfifo_entry_t that are pending and used this chunk. New + // entries are always added at the tail of the list. + struct list_head pending_gpfifos; + + // Currently on-going push in the chunk. There can be only one at a time. + uvm_push_t *current_push; +} uvm_pushbuffer_chunk_t; + +struct uvm_pushbuffer_struct +{ + uvm_channel_manager_t *channel_manager; + + // Memory allocation backing the pushbuffer + uvm_rm_mem_t *memory; + + // Array of the pushbuffer chunks + uvm_pushbuffer_chunk_t chunks[UVM_PUSHBUFFER_CHUNKS]; + + // Chunks that do not have an on-going push and have at least + // UVM_MAX_PUSH_SIZE space free. + DECLARE_BITMAP(available_chunks, UVM_PUSHBUFFER_CHUNKS); + + // Chunks that do not have an on-going push nor any pending pushes. + DECLARE_BITMAP(idle_chunks, UVM_PUSHBUFFER_CHUNKS); + + // Lock protecting chunk state and the bitmaps. + uvm_spinlock_t lock; + + // Semaphore enforcing a limited number of concurrent pushes. + // Decremented in uvm_pushbuffer_begin_push(), incremented in + // uvm_pushbuffer_end_push(). + // Initialized to the number of chunks as that's how many concurrent pushes + // are supported. + uvm_semaphore_t concurrent_pushes_sema; + + struct + { + struct proc_dir_entry *info_file; + } procfs; +}; + +// Create a pushbuffer +NV_STATUS uvm_pushbuffer_create(uvm_channel_manager_t *channel_manager, uvm_pushbuffer_t **pushbuffer_out); + +// Destroy the pushbuffer +void uvm_pushbuffer_destroy(uvm_pushbuffer_t *pushbuffer); + +// Get an allocation for a push from the pushbuffer +// Waits until a chunk is available and claims it for the push. The chunk used +// for the push will be unavailable for any new pushes until +// uvm_pushbuffer_end_push() for the push is called. +NV_STATUS uvm_pushbuffer_begin_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push); + +// Complete a pending push +// Updates the chunk state the pending push used +void uvm_pushbuffer_mark_completed(uvm_pushbuffer_t *pushbuffer, uvm_gpfifo_entry_t *gpfifo); + +// Get the GPU VA for an ongoing push +NvU64 uvm_pushbuffer_get_gpu_va_for_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push); + +// Get the offset of the beginning of the push from the base of the pushbuffer allocation +NvU32 uvm_pushbuffer_get_offset_for_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push); + +// End an on-going push +// Updates the state of the chunk making it available for new pushes if it has +// enough space left. +void uvm_pushbuffer_end_push(uvm_pushbuffer_t *pushbuffer, uvm_push_t *push, uvm_gpfifo_entry_t *gpfifo); + +// Query whether the pushbuffer has space for another push +// Mostly useful in pushbuffer tests +bool uvm_pushbuffer_has_space(uvm_pushbuffer_t *pushbuffer); + +// Helper to print pushbuffer state for debugging +void uvm_pushbuffer_print(uvm_pushbuffer_t *pushbuffer); + +#endif // __UVM_PUSHBUFFER_H__ diff --git a/kernel-open/nvidia-uvm/uvm_range_allocator.c b/kernel-open/nvidia-uvm/uvm_range_allocator.c new file mode 100644 index 000000000..114156f2a --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_range_allocator.c @@ -0,0 +1,168 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_range_allocator.h" +#include "uvm_kvmalloc.h" + +NV_STATUS uvm_range_allocator_init(NvU64 size, uvm_range_allocator_t *range_allocator) +{ + NV_STATUS status; + uvm_range_tree_node_t *node; + + uvm_spin_lock_init(&range_allocator->lock, UVM_LOCK_ORDER_LEAF); + uvm_range_tree_init(&range_allocator->range_tree); + + UVM_ASSERT(size > 0); + + node = uvm_kvmalloc(sizeof(*node)); + if (!node) + return NV_ERR_NO_MEMORY; + + node->start = 0; + node->end = size - 1; + + status = uvm_range_tree_add(&range_allocator->range_tree, node); + UVM_ASSERT(status == NV_OK); + + range_allocator->size = size; + + return NV_OK; +} + +void uvm_range_allocator_deinit(uvm_range_allocator_t *range_allocator) +{ + uvm_range_tree_node_t *node; + + node = uvm_range_tree_iter_first(&range_allocator->range_tree, 0, range_allocator->size - 1); + UVM_ASSERT(node); + UVM_ASSERT_MSG(node->start == 0 && node->end == range_allocator->size - 1, "start 0x%llx end 0x%llx\n", node->start, node->end); + + // Remove the node for completeness even though after deinit the state of + // tree doesn't matter anyway. + uvm_range_tree_remove(&range_allocator->range_tree, node); + + uvm_kvfree(node); +} + +NV_STATUS uvm_range_allocator_alloc(uvm_range_allocator_t *range_allocator, NvU64 size, NvU64 alignment, uvm_range_allocation_t *range_alloc) +{ + uvm_range_tree_node_t *node; + bool found = false; + + UVM_ASSERT(size > 0); + + if (alignment == 0) + alignment = 1; + + UVM_ASSERT(is_power_of_2(alignment)); + + if (size > range_allocator->size) + return NV_ERR_UVM_ADDRESS_IN_USE; + + // Pre-allocate a tree node as part of the allocation so that freeing the + // range won't require allocating memory and will always succeed. + range_alloc->node = uvm_kvmalloc(sizeof(*range_alloc->node)); + if (!range_alloc->node) + return NV_ERR_NO_MEMORY; + + uvm_spin_lock(&range_allocator->lock); + + // This is a very simple brute force going over all the free ranges in + // address order and returning the first one that's big enough. + // This could be improved by e.g. also maintaining a binary tree of free + // ranges ordered by their size. + uvm_range_tree_for_each_in(node, &range_allocator->range_tree, 0, range_allocator->size - size) { + NvU64 aligned_start = UVM_ALIGN_UP(node->start, alignment); + NvU64 aligned_end = aligned_start + size - 1; + + // Check for overflow of aligned_start and aligned_end + if (aligned_start < node->start || aligned_end < aligned_start) + continue; + + // Check whether it fits + if (aligned_end > node->end) + continue; + + // The allocation always wastes the [node->start, aligned_start) space, + // but it's expected that there will always be plenty of free space to + // allocate from and wasting that space should help avoid fragmentation. + + range_alloc->aligned_start = aligned_start; + range_alloc->node->start = node->start; + range_alloc->node->end = aligned_end; + + if (aligned_end < node->end) { + // Shrink the node if the claimed size is smaller than the node. + uvm_range_tree_shrink_node(&range_allocator->range_tree, node, aligned_end + 1, node->end); + } + else { + // Otherwise just remove it. The removal is safe to do in the loop + // over nodes as a break is following immediately. + UVM_ASSERT(node->end == aligned_end); + uvm_range_tree_remove(&range_allocator->range_tree, node); + uvm_kvfree(node); + } + found = true; + break; + } + + uvm_spin_unlock(&range_allocator->lock); + + if (!found) { + uvm_kvfree(range_alloc->node); + range_alloc->node = NULL; + return NV_ERR_UVM_ADDRESS_IN_USE; + } + + return NV_OK; +} + +void uvm_range_allocator_free(uvm_range_allocator_t *range_allocator, uvm_range_allocation_t *range_alloc) +{ + NV_STATUS status; + uvm_range_tree_node_t *adjacent_node; + + if (!range_alloc) + return; + + UVM_ASSERT(range_alloc->node); + + uvm_spin_lock(&range_allocator->lock); + + // Add the pre-allocated free range to the tree + status = uvm_range_tree_add(&range_allocator->range_tree, range_alloc->node); + UVM_ASSERT(status == NV_OK); + + // And try merging it with adjacent nodes + adjacent_node = uvm_range_tree_merge_prev(&range_allocator->range_tree, range_alloc->node); + if (adjacent_node) + uvm_kvfree(adjacent_node); + + adjacent_node = uvm_range_tree_merge_next(&range_allocator->range_tree, range_alloc->node); + if (adjacent_node) + uvm_kvfree(adjacent_node); + + uvm_spin_unlock(&range_allocator->lock); + + range_alloc->node = NULL; +} diff --git a/kernel-open/nvidia-uvm/uvm_range_allocator.h b/kernel-open/nvidia-uvm/uvm_range_allocator.h new file mode 100644 index 000000000..08045c229 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_range_allocator.h @@ -0,0 +1,73 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_RANGE_ALLOCATOR_H__ +#define __UVM_RANGE_ALLOCATOR_H__ + +#include "uvm_range_tree.h" +#include "uvm_lock.h" + +typedef struct { + // Lock protecting the state of the range allocator + uvm_spinlock_t lock; + + // Size of the range to allocate from + NvU64 size; + + // Range tree tracking all the free ranges + uvm_range_tree_t range_tree; +} uvm_range_allocator_t; + +// A free range allocation +typedef struct { + // The allocated start of the range + NvU64 aligned_start; + + // A tree node allocated at the time of range allocation and used by the + // range allocator when the range allocation is freed. This allows to + // guarantee that uvm_range_allocator_free() always succeeds. + uvm_range_tree_node_t *node; +} uvm_range_allocation_t; + +// Initialize the range allocator with the given size +NV_STATUS uvm_range_allocator_init(NvU64 size, uvm_range_allocator_t *range_allocator); + +// Deinitialize the range allocator +// +// All allocated ranges need to have been freed before the range allocator is deinitialized +void uvm_range_allocator_deinit(uvm_range_allocator_t *range_allocator); + +// Alloc a free range of the given size and alignment +// +// Size needs to be greater or equal to 1. +// Alignment needs to be a power of 2 or 0. Alignment of 0 is converted into +// alignment of 1. +// +// On success, the start of the allocated range is returned in +// free_range_alloc->aligned_start. +NV_STATUS uvm_range_allocator_alloc(uvm_range_allocator_t *range_allocator, NvU64 size, NvU64 alignment, uvm_range_allocation_t *free_range_alloc); + +// Free a previously allocated range +void uvm_range_allocator_free(uvm_range_allocator_t *range_allocator, uvm_range_allocation_t *range_alloc); + +#endif // __UVM_RANGE_ALLOCATOR_H__ diff --git a/kernel-open/nvidia-uvm/uvm_range_allocator_test.c b/kernel-open/nvidia-uvm/uvm_range_allocator_test.c new file mode 100644 index 000000000..fe1366e58 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_range_allocator_test.c @@ -0,0 +1,351 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#include "uvm_common.h" +#include "uvm_range_allocator.h" +#include "uvm_test_rng.h" +#include "uvm_kvmalloc.h" + +#include "uvm_test.h" +#include "uvm_test_ioctl.h" + +// Verify that no range is currently allocated from the allocator +static NV_STATUS test_check_range_allocator_empty(uvm_range_allocator_t *range_allocator) +{ + uvm_range_tree_node_t *node; + node = uvm_range_tree_find(&range_allocator->range_tree, 0); + TEST_CHECK_RET(node != NULL); + + TEST_CHECK_RET(node->start == 0); + TEST_CHECK_RET(node->end == range_allocator->size - 1); + + return NV_OK; +} + +static NvU64 range_alloc_size(uvm_range_allocation_t *alloc) +{ + return uvm_range_tree_node_size(alloc->node); +} + +static NV_STATUS test_alloc_range(uvm_range_allocator_t *range_allocator, NvU64 size, NvU64 alignment, uvm_range_allocation_t *alloc) +{ + NV_STATUS status; + NvU64 node_start; + NvU64 node_end; + + status = uvm_range_allocator_alloc(range_allocator, size, alignment, alloc); + if (status != NV_OK) + return status; + + node_start = alloc->node->start; + node_end = alloc->node->end; + TEST_CHECK_RET(node_start <= alloc->aligned_start); + TEST_CHECK_RET(alloc->aligned_start + size > alloc->aligned_start); + TEST_CHECK_RET(alloc->aligned_start + size - 1 == node_end); + TEST_CHECK_RET(IS_ALIGNED(alloc->aligned_start, alignment)); + TEST_CHECK_RET(uvm_range_tree_iter_first(&range_allocator->range_tree, node_start, node_end) == NULL); + + return NV_OK; +} + +static NvU64 test_free_range(uvm_range_allocator_t *range_allocator, uvm_range_allocation_t *alloc) +{ + NvU64 size = range_alloc_size(alloc); + + uvm_range_allocator_free(range_allocator, alloc); + + return size; +} + +#define BASIC_TEST_SIZE (1024ull * 1024 * 1024) +#define BASIC_TEST_MAX_ALLOCS (128) + +// Check that a specific range is free in the allocator +static NV_STATUS test_check_free_range(uvm_range_allocator_t *range_allocator, NvU64 start, NvU64 size) +{ + uvm_range_tree_node_t *node = uvm_range_tree_find(&range_allocator->range_tree, start); + TEST_CHECK_RET(node != NULL); + TEST_CHECK_RET(node->start == start); + TEST_CHECK_RET(uvm_range_tree_node_size(node) == size); + return NV_OK; +} + +// Notably this test leaks memory on failure as it's hard to clean up correctly +// if something goes wrong and uvm_range_allocator_deinit would likely hit +// asserts. +static NV_STATUS basic_test(void) +{ + NV_STATUS status; + uvm_range_allocator_t range_allocator; + uvm_range_allocation_t *range_allocs; + NvU32 i; + const NvU64 max_alignment = 1ull << 63; + + range_allocs = uvm_kvmalloc(sizeof(*range_allocs) * BASIC_TEST_MAX_ALLOCS); + if (!range_allocs) + return NV_ERR_NO_MEMORY; + + status = uvm_range_allocator_init(BASIC_TEST_SIZE, &range_allocator); + TEST_CHECK_RET(status == NV_OK); + uvm_range_allocator_deinit(&range_allocator); + + status = uvm_range_allocator_init(BASIC_TEST_SIZE, &range_allocator); + TEST_CHECK_RET(status == NV_OK); + + TEST_CHECK_RET(test_alloc_range(&range_allocator, BASIC_TEST_SIZE, 1, &range_allocs[0]) == NV_OK); + test_free_range(&range_allocator, &range_allocs[0]); + + TEST_CHECK_RET(test_alloc_range(&range_allocator, BASIC_TEST_SIZE, 1, &range_allocs[0]) == NV_OK); + test_free_range(&range_allocator, &range_allocs[0]); + + TEST_CHECK_RET(test_alloc_range(&range_allocator, BASIC_TEST_SIZE + 1, 1, &range_allocs[0]) == NV_ERR_UVM_ADDRESS_IN_USE); + TEST_CHECK_RET(test_alloc_range(&range_allocator, ULLONG_MAX, 1, &range_allocs[0]) == NV_ERR_UVM_ADDRESS_IN_USE); + + for (i = 0; i < 4; ++i) + TEST_CHECK_RET(test_alloc_range(&range_allocator, BASIC_TEST_SIZE / 4, 1, &range_allocs[i]) == NV_OK); + + test_free_range(&range_allocator, &range_allocs[0]); + test_free_range(&range_allocator, &range_allocs[2]); + test_free_range(&range_allocator, &range_allocs[1]); + test_free_range(&range_allocator, &range_allocs[3]); + + for (i = 0; i < 4; ++i) + TEST_CHECK_RET(uvm_range_allocator_alloc(&range_allocator, BASIC_TEST_SIZE / 4, 1, &range_allocs[i]) == NV_OK); + + for (i = 0; i < 4; ++i) + test_free_range(&range_allocator, &range_allocs[i]); + + TEST_CHECK_RET(test_alloc_range(&range_allocator, 1, BASIC_TEST_SIZE / 2, &range_allocs[0]) == NV_OK); + TEST_CHECK_RET(test_alloc_range(&range_allocator, 1, BASIC_TEST_SIZE / 2, &range_allocs[1]) == NV_OK); + TEST_CHECK_RET(test_alloc_range(&range_allocator, 1, BASIC_TEST_SIZE / 2, &range_allocs[2]) == NV_ERR_UVM_ADDRESS_IN_USE); + + for (i = 0; i < 2; ++i) + test_free_range(&range_allocator, &range_allocs[i]); + + uvm_range_allocator_deinit(&range_allocator); + + status = uvm_range_allocator_init(ULLONG_MAX, &range_allocator); + TEST_CHECK_RET(status == NV_OK); + + TEST_CHECK_RET(test_alloc_range(&range_allocator, ULLONG_MAX, 1, &range_allocs[0]) == NV_OK); + test_free_range(&range_allocator, &range_allocs[0]); + TEST_CHECK_RET(test_alloc_range(&range_allocator, ULLONG_MAX, max_alignment, &range_allocs[0]) == NV_OK); + test_free_range(&range_allocator, &range_allocs[0]); + TEST_CHECK_RET(test_alloc_range(&range_allocator, 1, 1, &range_allocs[0]) == NV_OK); + TEST_CHECK_RET(test_alloc_range(&range_allocator, 1, max_alignment, &range_allocs[1]) == NV_OK); + TEST_CHECK_RET(test_alloc_range(&range_allocator, 1, max_alignment, &range_allocs[2]) == NV_ERR_UVM_ADDRESS_IN_USE); + test_free_range(&range_allocator, &range_allocs[1]); + + TEST_CHECK_RET(test_alloc_range(&range_allocator, -2, 4, &range_allocs[1]) == NV_ERR_UVM_ADDRESS_IN_USE); + + test_free_range(&range_allocator, &range_allocs[0]); + + TEST_CHECK_RET(test_alloc_range(&range_allocator, ULLONG_MAX - 3 * 128, max_alignment, &range_allocs[0]) == NV_OK); + TEST_CHECK_RET(test_check_free_range(&range_allocator, ULLONG_MAX - 3 * 128, 3 * 128) == NV_OK); + + TEST_CHECK_RET(test_alloc_range(&range_allocator, 128, 1, &range_allocs[1]) == NV_OK); + TEST_CHECK_RET(test_alloc_range(&range_allocator, 128, 1, &range_allocs[2]) == NV_OK); + TEST_CHECK_RET(test_alloc_range(&range_allocator, 128, 1, &range_allocs[3]) == NV_OK); + + // Free the first 128 byte alloc leaving 256 bytes at the end. + // This assumes the allocator will give out the lowest address first and + // will need to be adjusted if the implementation changes. + TEST_CHECK_RET(range_allocs[1].aligned_start == ULLONG_MAX - 3 * 128); + test_free_range(&range_allocator, &range_allocs[1]); + + // Check for cases that could likely cause overflow + TEST_CHECK_RET(test_alloc_range(&range_allocator, 3 * 128, 256, &range_allocs[1]) == NV_ERR_UVM_ADDRESS_IN_USE); + TEST_CHECK_RET(test_alloc_range(&range_allocator, 3 * 128, max_alignment, &range_allocs[1]) == NV_ERR_UVM_ADDRESS_IN_USE); + TEST_CHECK_RET(test_alloc_range(&range_allocator, ULLONG_MAX, max_alignment, &range_allocs[1]) == NV_ERR_UVM_ADDRESS_IN_USE); + TEST_CHECK_RET(test_alloc_range(&range_allocator, ULLONG_MAX, 1, &range_allocs[1]) == NV_ERR_UVM_ADDRESS_IN_USE); + TEST_CHECK_RET(test_alloc_range(&range_allocator, 128, max_alignment, &range_allocs[1]) == NV_ERR_UVM_ADDRESS_IN_USE); + TEST_CHECK_RET(test_alloc_range(&range_allocator, 128, 1024, &range_allocs[1]) == NV_ERR_UVM_ADDRESS_IN_USE); + + test_free_range(&range_allocator, &range_allocs[2]); + test_free_range(&range_allocator, &range_allocs[3]); + test_free_range(&range_allocator, &range_allocs[0]); + + uvm_range_allocator_deinit(&range_allocator); + + uvm_kvfree(range_allocs); + + return NV_OK; +} + +#define RANDOM_TEST_SIZE 1024 + +typedef struct +{ + uvm_range_allocator_t range_allocator; + + uvm_test_rng_t rng; + + // Currently allocated ranges can be indexed by [0, allocated_ranges) + uvm_range_allocation_t *range_allocs; + size_t allocated_ranges; + + // Total size of free ranges in the allocator + size_t free_size; + + // Total count of successful allocs, for verbose reporting + NvU64 total_allocs; +} random_test_state_t; + +static NV_STATUS random_test_alloc_range(random_test_state_t *state, NvU64 size, NvU64 alignment) +{ + NV_STATUS status; + uvm_range_allocation_t *range_alloc; + + if (state->free_size == 0) + return NV_OK; + + UVM_ASSERT(state->allocated_ranges < state->range_allocator.size); + + range_alloc = &state->range_allocs[state->allocated_ranges]; + status = test_alloc_range(&state->range_allocator, size, alignment, range_alloc); + if (status != NV_OK) + return status; + + ++state->allocated_ranges; + + UVM_ASSERT(state->free_size >= range_alloc_size(range_alloc)); + state->free_size -= range_alloc_size(range_alloc); + + ++state->total_allocs; + + return NV_OK; +} + +static NvU64 random_test_free_range(random_test_state_t *state, NvU32 index) +{ + uvm_range_allocation_t *alloc = &state->range_allocs[index]; + NvU32 size = range_alloc_size(alloc); + + state->free_size += size; + UVM_ASSERT(state->free_size <= state->range_allocator.size); + + test_free_range(&state->range_allocator, alloc); + + UVM_ASSERT(state->allocated_ranges > 0); + --state->allocated_ranges; + if (index != state->allocated_ranges) + state->range_allocs[index] = state->range_allocs[state->allocated_ranges]; + + return size; + +} + +static NV_STATUS random_test_free_random_range(random_test_state_t *state) +{ + NvU32 index; + NvU64 freed_size; + NV_STATUS status; + + if (state->allocated_ranges == 0) + return NV_OK; + + index = uvm_test_rng_range_ptr(&state->rng, 0, state->allocated_ranges - 1); + freed_size = random_test_free_range(state, index); + + // Reallocating the same size as just freed with 1-byte alignment should always work + status = random_test_alloc_range(state, freed_size, 1); + TEST_CHECK_RET(status == NV_OK); + + // Free the just reallocated range + random_test_free_range(state, state->allocated_ranges - 1); + + return NV_OK; +} + +// Randomized test performing one of 3 actions on a free range allocator in each iteration: +// - Allocate all expected free space with 1-byte allocations and then free +// random allocations until at least half of the space is empty. +// - Free a random allocation +// - Allocate a random range (this can fail) +// +// Notably this test leaks memory on failure as it's hard to clean up correctly +// if something goes wrong and uvm_range_allocator_deinit would likely hit +// asserts. +static NV_STATUS random_test(NvU32 iters, NvU32 seed, bool verbose) +{ + NV_STATUS status; + random_test_state_t state; + int i; + + memset(&state, 0, sizeof(state)); + + state.free_size = RANDOM_TEST_SIZE; + uvm_test_rng_init(&state.rng, seed); + + state.range_allocs = uvm_kvmalloc(sizeof(*state.range_allocs) * RANDOM_TEST_SIZE); + if (!state.range_allocs) + return NV_ERR_NO_MEMORY; + + status = uvm_range_allocator_init(state.free_size, &state.range_allocator); + TEST_CHECK_RET(status == NV_OK); + + for (i = 0; i < iters; ++i) { + NvU32 action = uvm_test_rng_range_32(&state.rng, 0, 20); + if (action == 0) { + // Make sure we can allocate all of the expected free area with 1 byte ranges + while (state.free_size > 0) + TEST_CHECK_RET(random_test_alloc_range(&state, 1, 1) == NV_OK); + + // And then free up enough random ranges to make it at least half empty + while (state.free_size < RANDOM_TEST_SIZE / 2) + TEST_CHECK_RET(random_test_free_random_range(&state) == NV_OK); + } + else if (action < 5) { + TEST_CHECK_RET(random_test_free_random_range(&state) == NV_OK); + } + else { + NvU32 size = uvm_test_rng_range_32(&state.rng, 1, max(state.free_size / 4, (size_t)1)); + NvU32 alignment = 1ull << uvm_test_rng_range_32(&state.rng, 0, 5); + + status = random_test_alloc_range(&state, size, alignment); + // Random alloc is expected to fail some times. + TEST_CHECK_RET(status == NV_OK || status == NV_ERR_UVM_ADDRESS_IN_USE); + } + } + + while (state.allocated_ranges > 0) + TEST_CHECK_RET(random_test_free_random_range(&state) == NV_OK); + + if (verbose) + UVM_TEST_PRINT("Iters %u, total allocs made %llu\n", iters, state.total_allocs); + + TEST_CHECK_RET(test_check_range_allocator_empty(&state.range_allocator) == NV_OK); + + uvm_range_allocator_deinit(&state.range_allocator); + uvm_kvfree(state.range_allocs); + return NV_OK; +} + +NV_STATUS uvm_test_range_allocator_sanity(UVM_TEST_RANGE_ALLOCATOR_SANITY_PARAMS *params, struct file *filp) +{ + TEST_CHECK_RET(basic_test() == NV_OK); + TEST_CHECK_RET(random_test(params->iters, params->seed, params->verbose > 0) == NV_OK); + + return NV_OK; +} diff --git a/kernel-open/nvidia-uvm/uvm_range_group.c b/kernel-open/nvidia-uvm/uvm_range_group.c new file mode 100644 index 000000000..0ffa6fa8b --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_range_group.c @@ -0,0 +1,875 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_range_group.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" +#include "uvm_va_block.h" +#include "uvm_tools.h" +#include "uvm_ioctl.h" +#include "uvm_types.h" +#include "uvm_api.h" +#include "uvm_test.h" + +static struct kmem_cache *g_uvm_range_group_cache __read_mostly; +static struct kmem_cache *g_uvm_range_group_range_cache __read_mostly; + +NV_STATUS uvm_range_group_init(void) +{ + g_uvm_range_group_cache = NV_KMEM_CACHE_CREATE("uvm_range_group_t", uvm_range_group_t); + if (!g_uvm_range_group_cache) + return NV_ERR_NO_MEMORY; + + g_uvm_range_group_range_cache = NV_KMEM_CACHE_CREATE("uvm_range_group_range_t", uvm_range_group_range_t); + if (!g_uvm_range_group_range_cache) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +void uvm_range_group_exit(void) +{ + kmem_cache_destroy_safe(&g_uvm_range_group_cache); + kmem_cache_destroy_safe(&g_uvm_range_group_range_cache); +} + +static uvm_range_group_range_t *range_group_range_create(uvm_range_group_t *range_group, + NvU64 start, + NvU64 end) +{ + uvm_range_group_range_t *rgr = nv_kmem_cache_zalloc(g_uvm_range_group_range_cache, NV_UVM_GFP_FLAGS); + if (rgr == NULL) + return NULL; + + UVM_ASSERT(range_group != NULL); + INIT_LIST_HEAD(&rgr->range_group_migrated_list_node); + list_add(&rgr->range_group_list_node, &range_group->ranges); + rgr->range_group = range_group; + + rgr->node.start = start; + rgr->node.end = end; + + return rgr; +} + +static void uvm_range_group_range_destroy(uvm_range_group_range_t *rgr) +{ + if (rgr == NULL) + return; + + list_del(&rgr->range_group_list_node); + + // The VA space should be locked in write mode when this function is called, + // so we don't need to acquire the migrated_list lock. + list_del(&rgr->range_group_migrated_list_node); + kmem_cache_free(g_uvm_range_group_range_cache, rgr); +} + +NV_STATUS uvm_api_create_range_group(UVM_CREATE_RANGE_GROUP_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_range_group_t *range_group = NULL; + NV_STATUS status = NV_OK; + int ret; + + range_group = nv_kmem_cache_zalloc(g_uvm_range_group_cache, NV_UVM_GFP_FLAGS); + if (!range_group) + return NV_ERR_NO_MEMORY; + + range_group->id = atomic64_inc_return(&va_space->range_group_id_counter); + UVM_ASSERT(range_group->id != UVM_RANGE_GROUP_ID_NONE); + + atomic_set(&range_group->allow_migration, 1); + INIT_LIST_HEAD(&range_group->ranges); + INIT_LIST_HEAD(&range_group->migrated_ranges); + uvm_spin_lock_init(&range_group->migrated_ranges_lock, UVM_LOCK_ORDER_LEAF); + + uvm_va_space_down_write(va_space); + + ret = radix_tree_insert(&va_space->range_groups, range_group->id, range_group); + status = errno_to_nv_status(ret); + if (status != NV_OK) { + kmem_cache_free(g_uvm_range_group_cache, range_group); + goto done; + } + + params->rangeGroupId = range_group->id; + +done: + uvm_va_space_up_write(va_space); + + return status; +} + +static void uvm_range_group_destroy(uvm_va_space_t *va_space, uvm_range_group_t *range_group) +{ + uvm_range_group_range_t *rgr, *tmp; + + list_for_each_entry_safe(rgr, tmp, &range_group->ranges, range_group_list_node) { + UVM_ASSERT(rgr->range_group == range_group); + + uvm_range_tree_remove(&va_space->range_group_ranges, &rgr->node); + uvm_range_group_range_destroy(rgr); + } + + UVM_ASSERT(list_empty(&range_group->migrated_ranges)); + + kmem_cache_free(g_uvm_range_group_cache, range_group); +} + +NV_STATUS uvm_api_destroy_range_group(UVM_DESTROY_RANGE_GROUP_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_range_group_t *range_group = NULL; + NV_STATUS status = NV_OK; + + uvm_va_space_down_write(va_space); + + range_group = radix_tree_delete(&va_space->range_groups, params->rangeGroupId); + if (!range_group) { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + uvm_range_group_destroy(va_space, range_group); + +done: + uvm_va_space_up_write(va_space); + return status; +} + +void uvm_range_group_radix_tree_destroy(uvm_va_space_t *va_space) +{ + uvm_range_group_t *range_group = NULL; + struct radix_tree_root *root = &va_space->range_groups; + NvU64 index = 1; + + while (radix_tree_gang_lookup(root, (void**)&range_group, index, 1)) { + UVM_ASSERT(range_group); + radix_tree_delete(root, range_group->id); + index = range_group->id + 1; + uvm_range_group_destroy(va_space, range_group); + } +} + +static NV_STATUS uvm_range_group_va_range_migrate_block_locked(uvm_va_range_t *va_range, + uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_va_block_region_t region, + uvm_tracker_t *tracker) +{ + NV_STATUS status; + NV_STATUS tracker_status; + uvm_gpu_id_t gpu_id; + uvm_processor_mask_t map_mask; + + // Unmapping UVM_ID_CPU is guaranteed to never fail + status = uvm_va_block_unmap(va_block, va_block_context, UVM_ID_CPU, region, NULL, NULL); + UVM_ASSERT(status == NV_OK); + + if (uvm_va_policy_is_read_duplicate(uvm_va_range_get_policy(va_range), va_range->va_space)) { + status = uvm_va_block_make_resident_read_duplicate(va_block, + va_block_retry, + va_block_context, + uvm_va_range_get_policy(va_range)->preferred_location, + region, + NULL, + NULL, + UVM_MAKE_RESIDENT_CAUSE_API_SET_RANGE_GROUP); + } + else { + status = uvm_va_block_make_resident(va_block, + va_block_retry, + va_block_context, + uvm_va_range_get_policy(va_range)->preferred_location, + region, + NULL, + NULL, + UVM_MAKE_RESIDENT_CAUSE_API_SET_RANGE_GROUP); + } + if (status != NV_OK) + return status; + + // 1- Map all UVM-Lite SetAccessedBy GPUs and the preferred location with + // RWA permission + status = uvm_va_block_map_mask(va_block, + va_block_context, + &va_range->uvm_lite_gpus, + region, + NULL, + UVM_PROT_READ_WRITE_ATOMIC, + UvmEventMapRemoteCauseCoherence); + if (status != NV_OK) + goto out; + + // 2- Map faultable SetAccessedBy GPUs. + uvm_processor_mask_and(&map_mask, + &uvm_va_range_get_policy(va_range)->accessed_by, + &va_range->va_space->can_access[uvm_id_value(uvm_va_range_get_policy(va_range)->preferred_location)]); + uvm_processor_mask_andnot(&map_mask, &map_mask, &va_range->uvm_lite_gpus); + + for_each_gpu_id_in_mask(gpu_id, &map_mask) { + status = uvm_va_block_add_mappings(va_block, + va_block_context, + gpu_id, + region, + NULL, + UvmEventMapRemoteCausePolicy); + if (status != NV_OK) + goto out; + } + +out: + tracker_status = uvm_tracker_add_tracker_safe(tracker, &va_block->tracker); + + return status == NV_OK ? tracker_status : status; +} + +NV_STATUS uvm_range_group_va_range_migrate(uvm_va_range_t *va_range, + NvU64 start, + NvU64 end, + uvm_tracker_t *out_tracker) +{ + uvm_va_block_t *va_block = NULL; + size_t i = 0; + NV_STATUS status = NV_OK; + uvm_va_block_retry_t va_block_retry; + uvm_va_block_context_t *va_block_context; + + // This path is only called for non-migratable range groups so it never + // creates CPU mappings, meaning no mm is needed. + va_block_context = uvm_va_block_context_alloc(NULL); + if (!va_block_context) + return NV_ERR_NO_MEMORY; + + uvm_assert_rwsem_locked(&va_range->va_space->lock); + va_block_context->policy = uvm_va_range_get_policy(va_range); + + // Iterate over blocks, populating them if necessary + for (i = uvm_va_range_block_index(va_range, start); i <= uvm_va_range_block_index(va_range, end); ++i) { + uvm_va_block_region_t region; + status = uvm_va_range_block_create(va_range, i, &va_block); + if (status != NV_OK) + break; + + region = uvm_va_block_region_from_start_end(va_block, + max(start, va_block->start), + min(end, va_block->end)); + + status = UVM_VA_BLOCK_LOCK_RETRY(va_block, &va_block_retry, + uvm_range_group_va_range_migrate_block_locked(va_range, + va_block, + &va_block_retry, + va_block_context, + region, + out_tracker)); + if (status != NV_OK) + break; + } + + uvm_va_block_context_free(va_block_context); + + return status; +} + +NV_STATUS uvm_api_set_range_group(UVM_SET_RANGE_GROUP_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_range_group_t *range_group = NULL; + uvm_va_range_t *va_range, *va_range_last; + unsigned long long last_address = params->requestedBase + params->length - 1; + uvm_tracker_t local_tracker; + NV_STATUS tracker_status; + NV_STATUS status = NV_OK; + bool has_va_space_write_lock; + bool migratable; + + UVM_ASSERT(va_space); + + // Check address and length alignment + if (uvm_api_range_invalid(params->requestedBase, params->length)) + return NV_ERR_INVALID_ADDRESS; + + uvm_tracker_init(&local_tracker); + + uvm_va_space_down_write(va_space); + has_va_space_write_lock = true; + + // Check that range group exists + range_group = radix_tree_lookup(&va_space->range_groups, params->rangeGroupId); + if (!range_group && (params->rangeGroupId != UVM_RANGE_GROUP_ID_NONE)) { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + // If the desired range group is not migratable, any overlapping va_ranges + // must have a preferred location + migratable = uvm_range_group_migratable(range_group); + va_range_last = NULL; + uvm_for_each_managed_va_range_in_contig(va_range, va_space, params->requestedBase, last_address) { + va_range_last = va_range; + if (!migratable && UVM_ID_IS_INVALID(uvm_va_range_get_policy(va_range)->preferred_location)) { + status = NV_ERR_INVALID_ADDRESS; + goto done; + } + } + + // Check that we were able to iterate over the entire range without any gaps + if (!va_range_last || va_range_last->node.end < last_address) { + status = NV_ERR_INVALID_ADDRESS; + goto done; + } + + status = uvm_range_group_assign_range(va_space, range_group, params->requestedBase, last_address); + if (status != NV_OK) + goto done; + + // Early exit if no need for migration + if (uvm_range_group_migratable(range_group)) + goto done; + + uvm_va_space_downgrade_write(va_space); + has_va_space_write_lock = false; + + // Already checked for gaps above + uvm_for_each_va_range_in(va_range, va_space, params->requestedBase, last_address) { + status = uvm_range_group_va_range_migrate(va_range, + max(va_range->node.start, params->requestedBase), + min(va_range->node.end, last_address), + &local_tracker); + if (status != NV_OK) + goto done; + } + +done: + tracker_status = uvm_tracker_wait_deinit(&local_tracker); + + if (has_va_space_write_lock) + uvm_va_space_up_write(va_space); + else + uvm_va_space_up_read(va_space); + + return status == NV_OK ? tracker_status : status; +} + +static NV_STATUS uvm_range_group_prevent_migration(uvm_range_group_t *range_group, + uvm_va_space_t *va_space) +{ + uvm_range_group_range_t *rgr = NULL; + uvm_va_range_t *va_range; + uvm_processor_id_t preferred_location; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + NV_STATUS tracker_status; + NV_STATUS status = NV_OK; + + LIST_HEAD(local_migrated_ranges); + + UVM_ASSERT(range_group); + UVM_ASSERT(va_space); + + uvm_assert_rwsem_locked(&va_space->lock); + + // Move the range group's migrated_ranges list to the local_migrated_ranges + // list and process it from there. + uvm_spin_lock(&range_group->migrated_ranges_lock); + list_replace_init(&range_group->migrated_ranges, &local_migrated_ranges); + uvm_spin_unlock(&range_group->migrated_ranges_lock); + + while (true) { + // Delete each item from the beginning of the list. + uvm_spin_lock(&range_group->migrated_ranges_lock); + rgr = list_first_entry_or_null(&local_migrated_ranges, + uvm_range_group_range_t, + range_group_migrated_list_node); + if (rgr) + list_del_init(&rgr->range_group_migrated_list_node); + uvm_spin_unlock(&range_group->migrated_ranges_lock); + + if (!rgr) + break; + + uvm_for_each_va_range_in(va_range, va_space, rgr->node.start, rgr->node.end) { + // VA ranges need to have a preferred location set in order for their + // range group to be set to non-migratable. + preferred_location = uvm_va_range_get_policy(va_range)->preferred_location; + if (UVM_ID_IS_INVALID(preferred_location)) { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // If the preferred location is a GPU, check that it's not + // fault-capable + if (UVM_ID_IS_GPU(preferred_location) && + uvm_processor_mask_test(&va_space->faultable_processors, preferred_location)) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + // Check that all UVM-Lite GPUs are able to access the + // preferred location + if (!uvm_processor_mask_subset(&va_range->uvm_lite_gpus, + &va_space->accessible_from[uvm_id_value(preferred_location)])) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + // Perform the migration of the VA range. + status = uvm_range_group_va_range_migrate(va_range, + max(va_range->node.start, rgr->node.start), + min(va_range->node.end, rgr->node.end), + &local_tracker); + if (status != NV_OK) + goto done; + } + } + +done: + tracker_status = uvm_tracker_wait_deinit(&local_tracker); + if (status == NV_OK) + status = tracker_status; + + // We may have exited early, in which case rgr may be unprocessed and + // local_migrated_ranges may not be empty. These entries should be put back + // on range_group's migrated_ranges list. + if (status != NV_OK) { + uvm_spin_lock(&range_group->migrated_ranges_lock); + if (rgr) + list_move_tail(&rgr->range_group_migrated_list_node, &range_group->migrated_ranges); + list_splice_tail(&local_migrated_ranges, &range_group->migrated_ranges); + uvm_spin_unlock(&range_group->migrated_ranges_lock); + } + + return status; +} + +static NV_STATUS uvm_range_groups_set_migration_policy(uvm_va_space_t *va_space, + NvU64 *range_group_ids, + NvU64 num_group_ids, + bool allow_migration) +{ + NV_STATUS status = NV_OK; + NvU64 i; + uvm_range_group_t *range_groups[UVM_MAX_RANGE_GROUPS_PER_IOCTL_CALL]; + + UVM_ASSERT(va_space); + + if (!range_group_ids || num_group_ids == 0 || num_group_ids > UVM_MAX_RANGE_GROUPS_PER_IOCTL_CALL) + return NV_ERR_INVALID_ARGUMENT; + + if (!allow_migration) { + // We take the VA space in write mode to perform range group lookup and + // change the migratability to false. This will wait for any concurrent + // fault/migration to finish. + // + // TODO: Bug 1878225: Use a finer grain synchronization mechanism to + // prevent races with concurrent fault handling/migration operations. + uvm_va_space_down_write(va_space); + } + else { + // It is safe to allow migration with the VA space in read mode since + // it just flips the value of allow_migration. + uvm_va_space_down_read(va_space); + } + + for (i = 0; i < num_group_ids; ++i) { + range_groups[i] = radix_tree_lookup(&va_space->range_groups, range_group_ids[i]); + if (!range_groups[i]) { + if (!allow_migration) + uvm_va_space_up_write(va_space); + else + uvm_va_space_up_read(va_space); + + return NV_ERR_OBJECT_NOT_FOUND; + } + + atomic_set(&range_groups[i]->allow_migration, !!allow_migration); + } + + // If we are allowing migration, we are done. + if (!allow_migration) { + // Any fault handler/migration that executes after downgrading the lock + // mode will see migratability disabled. + uvm_va_space_downgrade_write(va_space); + + for (i = 0; i < num_group_ids; ++i) { + status = uvm_range_group_prevent_migration(range_groups[i], va_space); + if (status != NV_OK) + break; + } + } + + uvm_va_space_up_read(va_space); + return status; +} + +static uvm_range_group_range_t *range_group_range_container(uvm_range_tree_node_t *node) +{ + if (node == NULL) + return NULL; + return container_of(node, uvm_range_group_range_t, node); +} + +static uvm_range_group_range_t *range_group_range_prev(uvm_va_space_t *va_space, uvm_range_group_range_t *range) +{ + uvm_range_tree_node_t *node = uvm_range_tree_prev(&va_space->range_group_ranges, &range->node); + return range_group_range_container(node); +} + +static uvm_range_group_range_t *range_group_range_next(uvm_va_space_t *va_space, uvm_range_group_range_t *range) +{ + uvm_range_tree_node_t *node = uvm_range_tree_next(&va_space->range_group_ranges, &range->node); + return range_group_range_container(node); +} + +NV_STATUS uvm_range_group_assign_range(uvm_va_space_t *va_space, uvm_range_group_t *range_group, NvU64 start, NvU64 end) +{ + NV_STATUS status; + uvm_range_group_range_t *rgr; + uvm_range_group_range_t *temp; + uvm_range_group_range_t *next; + uvm_range_group_range_t *prev; + uvm_range_group_range_t *new_rgr = NULL; + LIST_HEAD(internal_nodes); + + uvm_assert_rwsem_locked_write(&va_space->lock); + + if (range_group != NULL) { + new_rgr = range_group_range_create(range_group, start, end); + if (new_rgr == NULL) + return NV_ERR_NO_MEMORY; + } + + uvm_range_group_for_each_range_in(rgr, va_space, start, end) { + if (rgr->node.start < start && rgr->node.end > end) { + // The region described by [start, end] lies entirely within rgr + // and does not sit on rgr's boundary. + NvU64 orig_end = rgr->node.end; + + // Check if the rgr is already part of the specified range group and, + // if so, do nothing. + if (rgr->range_group == range_group) { + uvm_range_group_range_destroy(new_rgr); + return NV_OK; + } + + // rgr needs to be split to make room for the new range group range. + // Do this by first creating a new range group range called "next" + // for the region of memory just above new_rgr, then shrink rgr + // down so that it fits just below new_rgr. + next = range_group_range_create(rgr->range_group, end + 1, orig_end); + if (next == NULL) { + uvm_range_group_range_destroy(new_rgr); + return NV_ERR_NO_MEMORY; + } + + uvm_range_tree_shrink_node(&va_space->range_group_ranges, &rgr->node, rgr->node.start, start - 1); + status = uvm_range_tree_add(&va_space->range_group_ranges, &next->node); + UVM_ASSERT(status == NV_OK); + + // Check if we need to add "next" to the range group's migrated list. + uvm_spin_lock(&rgr->range_group->migrated_ranges_lock); + if (!list_empty(&rgr->range_group_migrated_list_node)) + list_move_tail(&next->range_group_migrated_list_node, &next->range_group->migrated_ranges); + uvm_spin_unlock(&rgr->range_group->migrated_ranges_lock); + } + else if (rgr->node.start < start) { + // The region [start, end] overlaps with the end of rgr. + // Shrink rgr by moving its end downward. + uvm_range_tree_shrink_node(&va_space->range_group_ranges, &rgr->node, rgr->node.start, start - 1); + } + else if (rgr->node.end > end) { + // The region [start, end] overlaps with the begining of rgr. + // Shrink rgr by moving its beginning upward. + uvm_range_tree_shrink_node(&va_space->range_group_ranges, &rgr->node, end + 1, rgr->node.end); + } + else { + // rgr lies entirely within [start, end]. rgr should be destroyed. + list_move(&rgr->range_group_list_node, &internal_nodes); + } + } + + // Always add new_rgr to range_group's migrated list. + if (new_rgr) { + uvm_spin_lock(&new_rgr->range_group->migrated_ranges_lock); + list_move_tail(&new_rgr->range_group_migrated_list_node, &new_rgr->range_group->migrated_ranges); + uvm_spin_unlock(&new_rgr->range_group->migrated_ranges_lock); + } + + list_for_each_entry_safe(rgr, temp, &internal_nodes, range_group_list_node) { + uvm_range_tree_remove(&va_space->range_group_ranges, &rgr->node); + uvm_range_group_range_destroy(rgr); + } + + if (range_group == NULL) + return NV_OK; + + status = uvm_range_tree_add(&va_space->range_group_ranges, &new_rgr->node); + UVM_ASSERT(status == NV_OK); + + prev = range_group_range_prev(va_space, new_rgr); + if (prev != NULL && prev->node.end + 1 == new_rgr->node.start && prev->range_group == new_rgr->range_group) { + uvm_range_tree_merge_prev(&va_space->range_group_ranges, &new_rgr->node); + uvm_range_group_range_destroy(prev); + } + + next = range_group_range_next(va_space, new_rgr); + if (next != NULL && next->node.start - 1 == new_rgr->node.end && next->range_group == new_rgr->range_group) { + uvm_range_tree_merge_next(&va_space->range_group_ranges, &new_rgr->node); + uvm_range_group_range_destroy(next); + } + + return NV_OK; +} + +bool uvm_range_group_address_migratable(uvm_va_space_t *va_space, NvU64 address) +{ + uvm_range_group_range_t *rgr = uvm_range_group_range_find(va_space, address); + return rgr == NULL || uvm_range_group_migratable(rgr->range_group); +} + +bool uvm_range_group_any_migratable(uvm_va_space_t *va_space, NvU64 start, NvU64 end) +{ + uvm_range_group_range_iter_t iter; + uvm_range_group_for_all_ranges_in(&iter, va_space, start, end) { + if (iter.migratable) + return true; + } + + return false; +} + +bool uvm_range_group_all_migratable(uvm_va_space_t *va_space, NvU64 start, NvU64 end) +{ + uvm_range_group_range_t *rgr; + uvm_range_group_for_each_range_in(rgr, va_space, start, end) { + if (!uvm_range_group_migratable(rgr->range_group)) + return false; + } + + return true; +} + +uvm_range_group_range_t *uvm_range_group_range_find(uvm_va_space_t *va_space, NvU64 addr) +{ + uvm_range_tree_node_t *node; + uvm_assert_rwsem_locked(&va_space->lock); + + node = uvm_range_tree_find(&va_space->range_group_ranges, addr); + return range_group_range_container(node); +} + +uvm_range_group_range_t *uvm_range_group_range_iter_first(uvm_va_space_t *va_space, NvU64 start, NvU64 end) +{ + uvm_range_tree_node_t *node = uvm_range_tree_iter_first(&va_space->range_group_ranges, start, end); + return range_group_range_container(node); +} + +uvm_range_group_range_t *uvm_range_group_range_iter_next(uvm_va_space_t *va_space, + uvm_range_group_range_t *range, + NvU64 end) +{ + uvm_range_tree_node_t *node = uvm_range_tree_iter_next(&va_space->range_group_ranges, &range->node, end); + return range_group_range_container(node); +} + +static void range_group_range_iter_advance(uvm_range_group_range_iter_t *iter, NvU64 end) +{ + if (iter->node == NULL) { + iter->end = end; + } + else { + iter->is_current = iter->start >= iter->node->node.start; + if (iter->is_current) + iter->end = min(iter->node->node.end, end); + else + iter->end = min(iter->node->node.start - 1, end); + } + iter->migratable = iter->node == NULL || !iter->is_current || uvm_range_group_migratable(iter->node->range_group); +} + + +void uvm_range_group_range_iter_all_first(uvm_va_space_t *va_space, + NvU64 start, + NvU64 end, + uvm_range_group_range_iter_t *iter) +{ + iter->valid = true; + iter->start = start; + iter->node = uvm_range_group_range_iter_first(va_space, start, end); + + range_group_range_iter_advance(iter, end); +} + +bool uvm_range_group_range_iter_all_next(uvm_va_space_t *va_space, + uvm_range_group_range_iter_t *iter, + NvU64 end) +{ + iter->valid = iter->end < end; + if (!iter->valid) + return false; + + iter->start = iter->end + 1; + if (iter->is_current) + iter->node = uvm_range_group_range_iter_next(va_space, iter->node, end); + + range_group_range_iter_advance(iter, end); + return true; +} + +void uvm_range_group_range_migratability_iter_first(uvm_va_space_t *va_space, + NvU64 start, + NvU64 end, + uvm_range_group_range_iter_t *iter) +{ + uvm_range_group_range_iter_t next; + + uvm_range_group_range_iter_all_first(va_space, start, end, iter); + next = *iter; + + while (uvm_range_group_range_iter_all_next(va_space, &next, end) && next.migratable == iter->migratable) + *iter = next; + + iter->start = start; +} + +void uvm_range_group_range_migratability_iter_next(uvm_va_space_t *va_space, + uvm_range_group_range_iter_t *iter, + NvU64 end) +{ + uvm_range_group_range_iter_t next; + NvU64 start; + + if (!uvm_range_group_range_iter_all_next(va_space, iter, end)) + return; + + start = iter->start; + next = *iter; + while (uvm_range_group_range_iter_all_next(va_space, &next, end) && next.migratable == iter->migratable) + *iter = next; + + iter->start = start; +} + +void uvm_range_group_migratable_page_mask(uvm_va_block_t *va_block, + uvm_va_block_region_t region, + uvm_page_mask_t *mask_out) +{ + uvm_range_group_range_iter_t iter; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + + uvm_page_mask_zero(mask_out); + + uvm_range_group_for_each_migratability_in(&iter, + va_space, + uvm_va_block_region_start(va_block, region), + uvm_va_block_region_end(va_block, region)) { + if (iter.migratable) + uvm_page_mask_region_fill(mask_out, uvm_va_block_region_from_start_end(va_block, iter.start, iter.end)); + } +} + +NV_STATUS uvm_api_prevent_migration_range_groups(UVM_PREVENT_MIGRATION_RANGE_GROUPS_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + status = uvm_range_groups_set_migration_policy(va_space, params->rangeGroupIds, params->numGroupIds, false); + if (status == NV_OK) + uvm_tools_flush_events(); + + return status; +} + +NV_STATUS uvm_api_allow_migration_range_groups(UVM_ALLOW_MIGRATION_RANGE_GROUPS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + return uvm_range_groups_set_migration_policy(va_space, params->rangeGroupIds, params->numGroupIds, true); +} + +NV_STATUS uvm_test_range_group_range_info(UVM_TEST_RANGE_GROUP_RANGE_INFO_PARAMS *params, struct file *filp) +{ + uvm_range_group_range_t *rgr; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_va_space_down_read(va_space); + + rgr = uvm_range_group_range_iter_first(va_space, params->lookup_address, ULLONG_MAX); + + params->range_group_present = rgr != NULL && rgr->node.start <= params->lookup_address; + if (params->range_group_present) { + params->range_group_range_start = rgr->node.start; + params->range_group_range_end = rgr->node.end; + params->range_group_id = rgr->range_group->id; + } + else { + uvm_range_group_range_t *prev; + uvm_range_group_range_t *next = rgr; + + if (next) { + params->range_group_range_end = next->node.start - 1; + prev = range_group_range_prev(va_space, next); + } + else { + params->range_group_range_end = ULLONG_MAX; + prev = list_last_entry_or_null(&va_space->range_group_ranges.head, uvm_range_group_range_t, node.list); + } + + if (prev) + params->range_group_range_start = prev->node.end + 1; + else + params->range_group_range_start = 0; + + params->range_group_id = UVM_RANGE_GROUP_ID_NONE; + } + + uvm_va_space_up_read(va_space); + + return NV_OK; +} + +NV_STATUS uvm_test_range_group_range_count(UVM_TEST_RANGE_GROUP_RANGE_COUNT_PARAMS *params, struct file *filp) +{ + uvm_range_group_range_t *rgr; + uvm_range_group_t *range_group; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + params->count = 0; + uvm_va_space_down_read(va_space); + + range_group = radix_tree_lookup(&va_space->range_groups, params->rangeGroupId); + if (range_group == NULL) { + uvm_va_space_up_read(va_space); + return NV_ERR_OBJECT_NOT_FOUND; + } + + list_for_each_entry(rgr, &range_group->ranges, range_group_list_node) { + UVM_ASSERT(rgr->range_group == range_group); + params->count++; + } + + uvm_va_space_up_read(va_space); + + return NV_OK; +} diff --git a/kernel-open/nvidia-uvm/uvm_range_group.h b/kernel-open/nvidia-uvm/uvm_range_group.h new file mode 100644 index 000000000..4a053ec37 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_range_group.h @@ -0,0 +1,202 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_RANGE_GROUP_H__ +#define __UVM_RANGE_GROUP_H__ + +#include "nvtypes.h" +#include "uvm_range_tree.h" +#include "uvm_forward_decl.h" +#include "uvm_lock.h" +#include "uvm_va_block_types.h" + +typedef struct uvm_range_group_struct +{ + // Group ID + NvU64 id; + + // Does this group allow migration? + // This is not protected by a lock, which is okay since it is valid + // for a range group to be set as migratable while a migration of its + // VA ranges to their preferred location has not been completed yet. + // Using atomic_t rather than bool here to emphasize the fact that this + // field is special + atomic_t allow_migration; + + // range group ranges in this group + struct list_head ranges; + + // Range group ranges in this group that have been migrated from their preferred location. This should always be + // a subset of the ranges list. Any time this list is modified the migrated_ranges_lock should be acquired. + struct list_head migrated_ranges; + uvm_spinlock_t migrated_ranges_lock; +} uvm_range_group_t; + +typedef struct uvm_range_group_range_struct +{ + uvm_range_tree_node_t node; + struct list_head range_group_list_node; + struct list_head range_group_migrated_list_node; + uvm_range_group_t *range_group; +} uvm_range_group_range_t; + +// This structure can represent gaps in the tree too +typedef struct uvm_range_group_range_iter_struct +{ + NvU64 start; + NvU64 end; + + // Whether this range is migratable. + bool migratable; + + // Whether the iterator is pointing to a valid element. + // Set to false when iteration moves past the last element. + bool valid; + + // Whether start and end lie within node. + bool is_current; + + // When valid and is_current are true, the node that corresponds to the position of the iterator + // Also see uvm_range_group_range_iter_all_first() + uvm_range_group_range_t *node; +} uvm_range_group_range_iter_t; + +static inline bool uvm_range_group_migratable(uvm_range_group_t *range_group) +{ + // A NULL range group is always migratable + return !range_group || atomic_read(&range_group->allow_migration); +} + +// Causes [start, end] to have a range group of range_group. +// This function may split or overwrite existing range group ranges to accomplish this; merging when possible. +// If range_group is NULL, this function will clear all range group ranges for the given range. +// It is not necessary to clear a range before overwriting it with a new range_group association. +NV_STATUS uvm_range_group_assign_range(uvm_va_space_t *va_space, uvm_range_group_t *range_group, NvU64 start, NvU64 end); + +// False when the address belongs to a range group range of a non-migratable range group. +// True otherwise, even if the address is invalid. +bool uvm_range_group_address_migratable(uvm_va_space_t *va_space, NvU64 address); + +// False if all addresses in the range belong to range group ranges of non-migratable range groups. +// True otherwise, even if the range is invalid. +bool uvm_range_group_any_migratable(uvm_va_space_t *va_space, NvU64 start, NvU64 end); + +// False if any address in the range belongs to a range group range of a non-migratable range group. +// True otherwise, even if the range is invalid. +bool uvm_range_group_all_migratable(uvm_va_space_t *va_space, NvU64 start, NvU64 end); + +// These iterators return range group ranges, skipping over addresses that have no associated range group. +uvm_range_group_range_t *uvm_range_group_range_find(uvm_va_space_t *va_space, NvU64 addr); +uvm_range_group_range_t *uvm_range_group_range_iter_first(uvm_va_space_t *va_space, NvU64 start, NvU64 end); +uvm_range_group_range_t *uvm_range_group_range_iter_next(uvm_va_space_t *va_space, + uvm_range_group_range_t *range, + NvU64 end); + +// These iterators return range group ranges one at a time, but also yield gaps in between range group ranges. +// When iter refers to a range_group_range, iter->is_current will be true and iter->node will be the range group range. +// When iter refers to a gap, iter->is_current will be false. +// This function will always return a valid iterator, as long as start <= end, which is required. +// iter->valid is set when iter refers to a range within the caller-specified range. It is cleared to indicate the end of iteration. +// It does not reflect the validity of those virtual addresses in the va_space. +// iter_all_first and iter_all_next clamp iter->start and iter->end to the start and end values passed by the caller. +void uvm_range_group_range_iter_all_first(uvm_va_space_t *va_space, + NvU64 start, + NvU64 end, + uvm_range_group_range_iter_t *iter); + +// Advance an iterator that yields range group ranges as well as any gaps in between them. +// Returns iter->valid. +bool uvm_range_group_range_iter_all_next(uvm_va_space_t *va_space, + uvm_range_group_range_iter_t *iter, + NvU64 end); + +// These iterators return the largest possible range of addresses that are either all migratable or not. +// To determine which is the case, check iter->migratable. +// This function will always return a valid iterator, as long as start <= end, which is required. +// migratability_iter_first and migratability_iter_next clamp iter->start and iter->end +// to the start and end values passed by the caller. +void uvm_range_group_range_migratability_iter_first(uvm_va_space_t *va_space, + NvU64 start, + NvU64 end, + uvm_range_group_range_iter_t *iter); + +// Advance a migratability iterator. Check iter->valid to determine whether iter refers to a valid region. +void uvm_range_group_range_migratability_iter_next(uvm_va_space_t *va_space, + uvm_range_group_range_iter_t *iter, + NvU64 end); + +// This iterator is like uvm_range_group_range_migratability_iter_next except that it allows the caller to +// modify the range group range tree during iteration, and is slightly less efficient. +static void uvm_range_group_range_migratability_iter_next_safe(uvm_va_space_t *va_space, + uvm_range_group_range_iter_t *iter, + NvU64 end) +{ + iter->valid = iter->end < end; + if (!iter->valid) + return; + uvm_range_group_range_migratability_iter_first(va_space, iter->end + 1, end, iter); +} + +// Fill mask_out with all migratable pages in region. The mask is guaranteed to +// be zero outside the region. +void uvm_range_group_migratable_page_mask(uvm_va_block_t *va_block, + uvm_va_block_region_t region, + uvm_page_mask_t *mask_out); + +NV_STATUS uvm_range_group_init(void); +void uvm_range_group_exit(void); +void uvm_range_group_radix_tree_destroy(uvm_va_space_t *va_space); + +// Move a non-migratable VA range to its preferred location and add +// mappings for processors in the accessed by mask and for the preferred +// location (with the exception of CPU which never gets any mapping). +// +// This does not wait for the migration to complete. The work is added to the +// output tracker. +NV_STATUS uvm_range_group_va_range_migrate(uvm_va_range_t *va_range, + NvU64 start, + NvU64 end, + uvm_tracker_t *out_tracker); + +#define uvm_range_group_for_each_range_in(node, va_space, start, end) \ + for ((node) = uvm_range_group_range_iter_first((va_space), (start), (end)); \ + (node); \ + (node) = uvm_range_group_range_iter_next((va_space), (node), (end))) + +#define uvm_range_group_for_all_ranges_in(iter, va_space, start, end) \ + for (uvm_range_group_range_iter_all_first((va_space), (start), (end), (iter)); \ + (iter)->valid; \ + uvm_range_group_range_iter_all_next((va_space), (iter), (end))) + +#define uvm_range_group_for_each_migratability_in(iter, va_space, start, end) \ + for (uvm_range_group_range_migratability_iter_first((va_space), (start), (end), (iter)); \ + (iter)->valid; \ + uvm_range_group_range_migratability_iter_next((va_space), (iter), (end))) + +#define uvm_range_group_for_each_migratability_in_safe(iter, va_space, start, end) \ + for (uvm_range_group_range_migratability_iter_first((va_space), (start), (end), (iter)); \ + (iter)->valid; \ + uvm_range_group_range_migratability_iter_next_safe((va_space), (iter), (end))) + + +#endif // __UVM_RANGE_GROUP_H__ diff --git a/kernel-open/nvidia-uvm/uvm_range_group_tree_test.c b/kernel-open/nvidia-uvm/uvm_range_group_tree_test.c new file mode 100644 index 000000000..78b5b5131 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_range_group_tree_test.c @@ -0,0 +1,385 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_test.h" +#include "uvm_test_ioctl.h" +#include "uvm_va_space.h" +#include "uvm_range_group.h" +#include "uvm_api.h" +#include "uvm_forward_decl.h" + +#define RANGE_GROUP_COUNT ARRAY_SIZE(((UVM_TEST_RANGE_GROUP_TREE_PARAMS *)0)->rangeGroupIds) + +#define uvm_range_group_for_each_range(node, va_space) uvm_range_group_for_each_range_in(node, va_space, 0, ULLONG_MAX) + +static NV_STATUS range_group_owns_ranges(uvm_range_group_t *range_group, NvU64 count) +{ + NvU64 i = 0; + uvm_range_group_range_t *rgr; + list_for_each_entry(rgr, &range_group->ranges, range_group_list_node) { + TEST_CHECK_RET(rgr->range_group == range_group); + i++; + } + TEST_CHECK_RET(i == count); + return NV_OK; +} + +static NV_STATUS range_group_tree_empty(uvm_va_space_t *va_space, uvm_range_group_t **range_groups) +{ + NvU64 i; + uvm_range_group_range_t *rgr; + + for (i = 0; i < RANGE_GROUP_COUNT; i++) + TEST_CHECK_RET(range_group_owns_ranges(range_groups[i], 0) == NV_OK); + + i = 0; + uvm_range_group_for_each_range(rgr, va_space) + i++; + + TEST_CHECK_RET(i == 0); + return NV_OK; +} + +static NV_STATUS iterator_test(uvm_va_space_t *va_space, uvm_range_group_t **range_groups) +{ + uvm_range_group_range_iter_t iter; + NvU64 i; + + // no elements + i = 0; + + uvm_range_group_for_all_ranges_in(&iter, va_space, 0, ULLONG_MAX) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end == ULLONG_MAX); + TEST_CHECK_RET(iter.node == NULL); + i++; + } + TEST_CHECK_RET(i == 1); + + i = 0; + uvm_range_group_for_each_migratability_in(&iter, va_space, 0, ULLONG_MAX) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end = ULLONG_MAX); + TEST_CHECK_RET(iter.migratable); + i++; + } + TEST_CHECK_RET(i == 1); + + // single element at the beginning + i = 0; + MEM_NV_CHECK_RET(uvm_range_group_assign_range(va_space, range_groups[0], 0, 3 * PAGE_SIZE - 1), NV_OK); + uvm_range_group_for_all_ranges_in(&iter, va_space, 0, ULLONG_MAX) { + if (i == 0) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end == 3 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.node->range_group == range_groups[0]); + TEST_CHECK_RET(iter.is_current); + } + else if (i == 1) { + TEST_CHECK_RET(iter.start == 3 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == ULLONG_MAX); + TEST_CHECK_RET(iter.node == NULL); + } + i++; + } + TEST_CHECK_RET(i == 2); + + // the whole range should be migratable + i = 0; + uvm_range_group_for_each_migratability_in(&iter, va_space, 0, ULLONG_MAX) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end = ULLONG_MAX); + TEST_CHECK_RET(iter.migratable); + i++; + } + TEST_CHECK_RET(i == 1); + + // disallow migration in range, now iterate over it + atomic_set(&range_groups[0]->allow_migration, 0); + i = 0; + uvm_range_group_for_all_ranges_in(&iter, va_space, 0, ULLONG_MAX) { + if (i == 0) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end == 3 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.node->range_group == range_groups[0]); + TEST_CHECK_RET(!iter.migratable); + TEST_CHECK_RET(iter.is_current); + } + else if (i == 1) { + TEST_CHECK_RET(iter.start == 3 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == ULLONG_MAX); + TEST_CHECK_RET(iter.migratable); + TEST_CHECK_RET(iter.node == NULL); + } + i++; + } + TEST_CHECK_RET(i == 2); + + // ensure that boundaries are set correctly when overshooting an element + i = 0; + uvm_range_group_for_all_ranges_in(&iter, va_space, PAGE_SIZE, 4 * PAGE_SIZE - 1) { + if (i == 0) { + TEST_CHECK_RET(iter.start == PAGE_SIZE); + TEST_CHECK_RET(iter.end == 3 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.node->range_group == range_groups[0]); + TEST_CHECK_RET(iter.is_current); + } + else if (i == 1) { + TEST_CHECK_RET(iter.start == 3 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 4 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.node == NULL); + } + i++; + } + TEST_CHECK_RET(i == 2); + + + // ensure that boundaries are set correctly when internal to an element + i = 0; + uvm_range_group_for_all_ranges_in(&iter, va_space, PAGE_SIZE, 2 * PAGE_SIZE - 1) { + TEST_CHECK_RET(iter.start == PAGE_SIZE); + TEST_CHECK_RET(iter.end == 2 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.node->range_group == range_groups[0]); + TEST_CHECK_RET(iter.is_current); + i++; + } + TEST_CHECK_RET(i == 1); + + + // delete the node and restore to migratable + MEM_NV_CHECK_RET(uvm_range_group_assign_range(va_space, NULL, 0, ULLONG_MAX), NV_OK); + TEST_CHECK_RET(range_group_tree_empty(va_space, range_groups) == NV_OK); + atomic_set(&range_groups[0]->allow_migration, 1); + + + // insert two adjacent nodes + MEM_NV_CHECK_RET(uvm_range_group_assign_range(va_space, range_groups[0], 2 * PAGE_SIZE, 3 * PAGE_SIZE - 1), NV_OK); + MEM_NV_CHECK_RET(uvm_range_group_assign_range(va_space, range_groups[1], 3 * PAGE_SIZE, 4 * PAGE_SIZE - 1), NV_OK); + + i = 0; + uvm_range_group_for_all_ranges_in(&iter, va_space, 0, ULLONG_MAX) { + if (i == 0) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end == 2 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.node->range_group == range_groups[0]); + TEST_CHECK_RET(!iter.is_current); + } + else if (i == 1) { + TEST_CHECK_RET(iter.start == 2 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 3 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.node->range_group == range_groups[0]); + TEST_CHECK_RET(iter.is_current); + } + else if (i == 2) { + TEST_CHECK_RET(iter.start == 3 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 4 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.node->range_group == range_groups[1]); + TEST_CHECK_RET(iter.is_current); + } + else if (i == 3) { + TEST_CHECK_RET(iter.start == 4 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == ULLONG_MAX); + TEST_CHECK_RET(iter.node == NULL); + } + i++; + } + TEST_CHECK_RET(i == 4); + + // the whole range should be migratable + i = 0; + uvm_range_group_for_each_migratability_in(&iter, va_space, 0, ULLONG_MAX) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end = ULLONG_MAX); + TEST_CHECK_RET(iter.migratable); + i++; + } + TEST_CHECK_RET(i == 1); + + // now there should be one block that is not migratable + atomic_set(&range_groups[0]->allow_migration, 0); + i = 0; + uvm_range_group_for_each_migratability_in(&iter, va_space, 0, ULLONG_MAX) { + if (i == 0) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end == 2 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.migratable); + } + else if (i == 1) { + TEST_CHECK_RET(iter.start == 2 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 3 * PAGE_SIZE - 1); + TEST_CHECK_RET(!iter.migratable); + } + else if (i == 2) { + TEST_CHECK_RET(iter.start == 3 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == ULLONG_MAX); + TEST_CHECK_RET(iter.migratable); + } + i++; + } + TEST_CHECK_RET(i == 3); + + // make both not migratable + atomic_set(&range_groups[1]->allow_migration, 0); + i = 0; + uvm_range_group_for_each_migratability_in(&iter, va_space, 0, ULLONG_MAX) { + if (i == 0) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end == 2 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.migratable); + } + else if (i == 1) { + TEST_CHECK_RET(iter.start == 2 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 4 * PAGE_SIZE - 1); + TEST_CHECK_RET(!iter.migratable); + } + else if (i == 2) { + TEST_CHECK_RET(iter.start == 4 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == ULLONG_MAX); + TEST_CHECK_RET(iter.migratable); + } + i++; + } + TEST_CHECK_RET(i == 3); + + // make the first one migratable again + atomic_set(&range_groups[0]->allow_migration, 1); + i = 0; + uvm_range_group_for_each_migratability_in(&iter, va_space, 0, ULLONG_MAX) { + if (i == 0) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end == 3 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.migratable); + } + else if (i == 1) { + TEST_CHECK_RET(iter.start == 3 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 4 * PAGE_SIZE - 1); + TEST_CHECK_RET(!iter.migratable); + } + else if (i == 2) { + TEST_CHECK_RET(iter.start == 4 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == ULLONG_MAX); + TEST_CHECK_RET(iter.migratable); + } + i++; + } + TEST_CHECK_RET(i == 3); + + // test the 'safe' iterator + i = 0; + uvm_range_group_for_each_migratability_in_safe(&iter, va_space, 0, ULLONG_MAX) { + if (i == 0) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end == 3 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.migratable); + } + else if (i == 1) { + TEST_CHECK_RET(iter.start == 3 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 4 * PAGE_SIZE - 1); + TEST_CHECK_RET(!iter.migratable); + } + else if (i == 2) { + TEST_CHECK_RET(iter.start == 4 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == ULLONG_MAX); + TEST_CHECK_RET(iter.migratable); + } + i++; + } + TEST_CHECK_RET(i == 3); + + i = 0; + uvm_range_group_for_each_migratability_in_safe(&iter, va_space, PAGE_SIZE, 5 * PAGE_SIZE - 1) { + if (i == 0) { + TEST_CHECK_RET(iter.start == PAGE_SIZE); + TEST_CHECK_RET(iter.end == 3 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.migratable); + } + else if (i == 1) { + TEST_CHECK_RET(iter.start == 3 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 4 * PAGE_SIZE - 1); + TEST_CHECK_RET(!iter.migratable); + } + else if (i == 2) { + TEST_CHECK_RET(iter.start == 4 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 5 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.migratable); + } + i++; + } + TEST_CHECK_RET(i == 3); + + i = 0; + uvm_range_group_for_each_migratability_in_safe(&iter, va_space, PAGE_SIZE, 5 * PAGE_SIZE - 1) { + if (i == 0) { + TEST_CHECK_RET(iter.start == PAGE_SIZE); + TEST_CHECK_RET(iter.end == 3 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.migratable); + MEM_NV_CHECK_RET(uvm_range_group_assign_range(va_space, NULL, iter.start, iter.end), NV_OK); + } + else if (i == 1) { + TEST_CHECK_RET(iter.start == 3 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 4 * PAGE_SIZE - 1); + TEST_CHECK_RET(!iter.migratable); + MEM_NV_CHECK_RET(uvm_range_group_assign_range(va_space, NULL, iter.start, iter.end), NV_OK); + + } + else if (i == 2) { + TEST_CHECK_RET(iter.start == 4 * PAGE_SIZE); + TEST_CHECK_RET(iter.end == 5 * PAGE_SIZE - 1); + TEST_CHECK_RET(iter.migratable); + MEM_NV_CHECK_RET(uvm_range_group_assign_range(va_space, NULL, iter.start, iter.end), NV_OK); + } + i++; + } + TEST_CHECK_RET(i == 3); + + i = 0; + uvm_range_group_for_each_migratability_in_safe(&iter, va_space, 0, ULLONG_MAX) { + TEST_CHECK_RET(iter.start == 0); + TEST_CHECK_RET(iter.end == ULLONG_MAX); + TEST_CHECK_RET(iter.migratable); + i++; + } + TEST_CHECK_RET(i == 1); + + return NV_OK; +} + +NV_STATUS uvm_test_range_group_tree(UVM_TEST_RANGE_GROUP_TREE_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + size_t i; + uvm_range_group_t *range_groups[RANGE_GROUP_COUNT]; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_va_space_down_write(va_space); + for (i = 0; i < RANGE_GROUP_COUNT; i++) { + range_groups[i] = radix_tree_lookup(&va_space->range_groups, params->rangeGroupIds[i]); + if (range_groups[i] == NULL) { + uvm_va_space_up_write(va_space); + return NV_ERR_INVALID_PARAMETER; + } + } + status = iterator_test(va_space, range_groups); + uvm_va_space_up_write(va_space); + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_range_tree.c b/kernel-open/nvidia-uvm/uvm_range_tree.c new file mode 100644 index 000000000..12f9a90f1 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_range_tree.c @@ -0,0 +1,263 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_range_tree.h" + +static uvm_range_tree_node_t *get_range_node(struct rb_node *rb_node) +{ + return rb_entry(rb_node, uvm_range_tree_node_t, rb_node); +} + +static bool range_nodes_overlap(uvm_range_tree_node_t *a, uvm_range_tree_node_t *b) +{ + return uvm_ranges_overlap(a->start, a->end, b->start, b->end); +} + +// Workhorse tree walking function. +// +// The parent and next pointers may be NULL if the caller doesn't need them. +// They facilitate node addition and range-based searches. +// +// If a node contains addr: +// - That node is returned +// - The parent pointer is set to node's parent, or to NULL if the node is the +// root. +// - The next pointer is set to the next node in address order in the tree, or +// to NULL if node is the last node in the tree. +// +// If no node contains addr: +// - NULL is returned +// - The parent pointer is set to the node under which a new node containing +// addr should be inserted. This will be NULL if the tree is empty. +// - The next pointer is set to the first node containing an address > addr, or +// NULL if there are no such nodes in the tree. +static uvm_range_tree_node_t *range_node_find(uvm_range_tree_t *tree, + NvU64 addr, + uvm_range_tree_node_t **parent, + uvm_range_tree_node_t **next) +{ + struct rb_node *rb_node = tree->rb_root.rb_node; + uvm_range_tree_node_t *node = NULL; + uvm_range_tree_node_t *_parent = NULL; + + while (rb_node) { + node = get_range_node(rb_node); + + if (addr < node->start) + rb_node = rb_node->rb_left; + else if (addr > node->end) + rb_node = rb_node->rb_right; + else // node contains addr + break; + + _parent = node; + } + + if (!rb_node) + node = NULL; + + if (parent) + *parent = _parent; + if (next) { + *next = NULL; // Handles the empty tree case + if (node) { + *next = uvm_range_tree_next(tree, node); + } + else if (_parent) { + if (_parent->start > addr) + *next = _parent; + else + *next = uvm_range_tree_next(tree, _parent); + } + } + + return node; +} + +void uvm_range_tree_init(uvm_range_tree_t *tree) +{ + memset(tree, 0, sizeof(*tree)); + tree->rb_root = RB_ROOT; + INIT_LIST_HEAD(&tree->head); +} + +NV_STATUS uvm_range_tree_add(uvm_range_tree_t *tree, uvm_range_tree_node_t *node) +{ + uvm_range_tree_node_t *match, *parent, *prev, *next; + + UVM_ASSERT(node->start <= node->end); + + match = range_node_find(tree, node->start, &parent, NULL); + if (match) + return NV_ERR_UVM_ADDRESS_IN_USE; + + // If no match we know that the new start isn't contained in any existing + // node, but we still have to check for overlap on the rest of the new range. + + // If there's no parent and we didn't match on the root node, the tree is + // empty. + if (!parent) { + rb_link_node(&node->rb_node, NULL, &tree->rb_root.rb_node); + rb_insert_color(&node->rb_node, &tree->rb_root); + list_add(&node->list, &tree->head); + return NV_OK; + } + + // We know that start isn't contained in parent, but the rest of the new + // range might be. + if (range_nodes_overlap(node, parent)) + return NV_ERR_UVM_ADDRESS_IN_USE; + + // Verify that the new node doesn't overlap with its neighbor and insert + if (node->start < parent->start) { + // parent's prev can't overlap with node, otherwise it must overlap with + // start and would've been found by range_node_find above. + prev = uvm_range_tree_prev(tree, parent); + if (prev) + UVM_ASSERT(!range_nodes_overlap(node, prev)); + + rb_link_node(&node->rb_node, &parent->rb_node, &parent->rb_node.rb_left); + list_add_tail(&node->list, &parent->list); + } + else { + next = uvm_range_tree_next(tree, parent); + if (next && range_nodes_overlap(node, next)) + return NV_ERR_UVM_ADDRESS_IN_USE; + + rb_link_node(&node->rb_node, &parent->rb_node, &parent->rb_node.rb_right); + list_add(&node->list, &parent->list); + } + + rb_insert_color(&node->rb_node, &tree->rb_root); + return NV_OK; +} + +void uvm_range_tree_shrink_node(uvm_range_tree_t *tree, uvm_range_tree_node_t *node, NvU64 new_start, NvU64 new_end) +{ + UVM_ASSERT_MSG(new_start <= new_end, "new_start 0x%llx new_end 0x%llx\n", new_start, new_end); + UVM_ASSERT_MSG(node->start <= new_start, "start 0x%llx new_start 0x%llx\n", node->start, new_start); + UVM_ASSERT_MSG(node->end >= new_end, "end 0x%llx new_end 0x%llx\n", node->end, new_end); + + // The tree is not needed currently, but might be in the future. + (void)tree; + + node->start = new_start; + node->end = new_end; +} + +void uvm_range_tree_adjust_interval(uvm_range_tree_t *tree, + NvU64 addr, + NvU64 *startp, + NvU64 *endp) +{ + uvm_range_tree_node_t *node; + NvU64 start = *startp; + NvU64 end = *endp; + + uvm_range_tree_for_each_in(node, tree, start, end) { + if (node->start > addr) { + end = node->start - 1; + break; + } + else if (node->end < addr) + start = node->end + 1; + else + UVM_ASSERT_MSG(0, "Found node at address 0x%llx\n", addr); + } + + *startp = start; + *endp = end; +} + +void uvm_range_tree_split(uvm_range_tree_t *tree, + uvm_range_tree_node_t *existing, + uvm_range_tree_node_t *new) +{ + NV_STATUS status; + + UVM_ASSERT(new->start > existing->start); + UVM_ASSERT(new->start <= existing->end); + + // existing doesn't have to move anywhere, we just need to adjust its + // ranges. new will need to be inserted into the tree. + // + // Future optimization: insertion could walk down the tree starting from + // existing rather than from the root. + new->end = existing->end; + existing->end = new->start - 1; + status = uvm_range_tree_add(tree, new); + UVM_ASSERT(status == NV_OK); // There shouldn't be any collisions +} + +uvm_range_tree_node_t *uvm_range_tree_merge_prev(uvm_range_tree_t *tree, uvm_range_tree_node_t *node) +{ + uvm_range_tree_node_t *prev = uvm_range_tree_prev(tree, node); + if (!prev || prev->end != node->start - 1) + return NULL; + + uvm_range_tree_remove(tree, prev); + node->start = prev->start; + return prev; +} + +uvm_range_tree_node_t *uvm_range_tree_merge_next(uvm_range_tree_t *tree, uvm_range_tree_node_t *node) +{ + uvm_range_tree_node_t *next = uvm_range_tree_next(tree, node); + if (!next || next->start != node->end + 1) + return NULL; + + uvm_range_tree_remove(tree, next); + node->end = next->end; + return next; +} + +uvm_range_tree_node_t *uvm_range_tree_find(uvm_range_tree_t *tree, NvU64 addr) +{ + return range_node_find(tree, addr, NULL, NULL); +} + +uvm_range_tree_node_t *uvm_range_tree_iter_first(uvm_range_tree_t *tree, NvU64 start, NvU64 end) +{ + uvm_range_tree_node_t *node, *next; + + UVM_ASSERT(start <= end); + + node = range_node_find(tree, start, NULL, &next); + if (node) + return node; + + // We didn't find a node containing start itself. Check if the target range + // overlaps with the next node after start. + if (next) { + // Sanity checks + UVM_ASSERT(start < next->start); + if (uvm_range_tree_prev(tree, next)) + UVM_ASSERT(uvm_range_tree_prev(tree, next)->end < start); + + if (next->start <= end) + return next; + } + + return NULL; +} diff --git a/kernel-open/nvidia-uvm/uvm_range_tree.h b/kernel-open/nvidia-uvm/uvm_range_tree.h new file mode 100644 index 000000000..fca7bc581 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_range_tree.h @@ -0,0 +1,157 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_RANGE_TREE_H__ +#define __UVM_RANGE_TREE_H__ + +#include "uvm_linux.h" +#include "nvstatus.h" + +// Tree-based data structure for looking up and iterating over objects with +// provided [start, end] ranges. The ranges are not allowed to overlap. +// +// All locking is up to the caller. + +typedef struct uvm_range_tree_struct +{ + // Tree of uvm_range_tree_node_t's sorted by start. + struct rb_root rb_root; + + // List of uvm_range_tree_node_t's sorted by start. This is an optimization + // to avoid calling rb_next and rb_prev frequently, particularly while + // iterating. + struct list_head head; +} uvm_range_tree_t; + +typedef struct uvm_range_tree_node_struct +{ + NvU64 start; + // end is inclusive + NvU64 end; + + struct rb_node rb_node; + struct list_head list; +} uvm_range_tree_node_t; + + +void uvm_range_tree_init(uvm_range_tree_t *tree); + +// Set node->start and node->end before calling this function. Overlapping +// ranges are not allowed. If the new node overlaps with an existing range node, +// NV_ERR_UVM_ADDRESS_IN_USE is returned. +NV_STATUS uvm_range_tree_add(uvm_range_tree_t *tree, uvm_range_tree_node_t *node); + +static void uvm_range_tree_remove(uvm_range_tree_t *tree, uvm_range_tree_node_t *node) +{ + rb_erase(&node->rb_node, &tree->rb_root); + list_del(&node->list); +} + +// Shrink an existing node to [new_start, new_end]. +// The new range needs to be a subrange of the range being updated, that is +// new_start needs to be greater or equal to node->start and new_end needs to be +// lesser or equal to node->end. +void uvm_range_tree_shrink_node(uvm_range_tree_t *tree, uvm_range_tree_node_t *node, NvU64 new_start, NvU64 new_end); + +// Adjust start and end to be the largest contiguous interval surrounding addr +// between *startp and *endp and without overlapping an existing tree node. +// This function assumes there is no node that includes addr. +void uvm_range_tree_adjust_interval(uvm_range_tree_t *tree, NvU64 addr, NvU64 *startp, NvU64 *endp); + +// Splits an existing node into two pieces, with the new node always after the +// existing node. The caller must set new->start before calling this function. +// existing should not be modified by the caller. On return, existing will +// contain its updated smaller bounds. +// +// Before: [----------- existing ------------] +// After: [---- existing ----][---- new ----] +// ^new->start +void uvm_range_tree_split(uvm_range_tree_t *tree, + uvm_range_tree_node_t *existing, + uvm_range_tree_node_t *new); + +// Attempts to merge the given node with the prev/next node in address order. +// If the prev/next node is not adjacent to the given node, NULL is returned. +// Otherwise the provided node is kept in the tree and extended to cover the +// adjacent node. The adjacent node is removed and returned. +uvm_range_tree_node_t *uvm_range_tree_merge_prev(uvm_range_tree_t *tree, uvm_range_tree_node_t *node); +uvm_range_tree_node_t *uvm_range_tree_merge_next(uvm_range_tree_t *tree, uvm_range_tree_node_t *node); + +// Returns the node containing addr, if any +uvm_range_tree_node_t *uvm_range_tree_find(uvm_range_tree_t *tree, NvU64 addr); + +// Returns the prev/next node in address order, or NULL if none exists +static uvm_range_tree_node_t *uvm_range_tree_prev(uvm_range_tree_t *tree, uvm_range_tree_node_t *node) +{ + if (list_is_first(&node->list, &tree->head)) + return NULL; + return list_prev_entry(node, list); +} + +static uvm_range_tree_node_t *uvm_range_tree_next(uvm_range_tree_t *tree, uvm_range_tree_node_t *node) +{ + if (list_is_last(&node->list, &tree->head)) + return NULL; + return list_next_entry(node, list); +} + +// Returns the first node in the range [start, end], if any +uvm_range_tree_node_t *uvm_range_tree_iter_first(uvm_range_tree_t *tree, NvU64 start, NvU64 end); + +// Return true if the range tree is empty. +static bool uvm_range_tree_empty(uvm_range_tree_t *tree) +{ + return list_empty(&tree->head); +} + +static NvU64 uvm_range_tree_node_size(uvm_range_tree_node_t *node) +{ + return node->end - node->start + 1; +} + +// Returns the node following the provided node in address order, if that node's +// start <= the provided end. +static uvm_range_tree_node_t *uvm_range_tree_iter_next(uvm_range_tree_t *tree, uvm_range_tree_node_t *node, NvU64 end) +{ + uvm_range_tree_node_t *next = uvm_range_tree_next(tree, node); + if (next && next->start <= end) + return next; + return NULL; +} + +#define uvm_range_tree_for_each(node, tree) list_for_each_entry((node), &(tree)->head, list) + +#define uvm_range_tree_for_each_safe(node, next, tree) \ + list_for_each_entry_safe((node), (next), &(tree)->head, list) + +#define uvm_range_tree_for_each_in(node, tree, start, end) \ + for ((node) = uvm_range_tree_iter_first((tree), (start), (end)); \ + (node); \ + (node) = uvm_range_tree_iter_next((tree), (node), (end))) + +#define uvm_range_tree_for_each_in_safe(node, next, tree, start, end) \ + for ((node) = uvm_range_tree_iter_first((tree), (start), (end)); \ + (node) ? ((next) = uvm_range_tree_iter_next((tree), (node), (end)), true) : false; \ + (node) = (next)) + +#endif // __UVM_RANGE_TREE_H__ diff --git a/kernel-open/nvidia-uvm/uvm_range_tree_test.c b/kernel-open/nvidia-uvm/uvm_range_tree_test.c new file mode 100644 index 000000000..e4fa28146 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_range_tree_test.c @@ -0,0 +1,1465 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_range_tree.h" +#include "uvm_kvmalloc.h" + +#include "uvm_test.h" +#include "uvm_test_ioctl.h" +#include "uvm_test_rng.h" + +// ------------------- Range Tree Test (RTT) ------------------- // + +// Arbitrary value, must be >= 1 +#define MAX_NODES_INIT 32 + +typedef enum +{ + RTT_OP_ADD, + RTT_OP_REMOVE, + RTT_OP_SPLIT, + RTT_OP_MERGE, + RTT_OP_SHRINK, + RTT_OP_MAX +} rtt_op_t; + +// Range Tree Test state +typedef struct rtt_state_struct +{ + uvm_range_tree_t tree; + uvm_test_rng_t rng; + + // Array of allocated nodes, unsorted + uvm_range_tree_node_t **nodes; + + // Number of nodes in the array + size_t count; + + // Number of nodes which can fit in the nodes array + size_t max; + + // The probability of shrinking a node instead of doing an add or remove + NvU32 shrink_probability; + + // The current probability of selecting an add operation over a remove + NvU32 add_chance; + + // The current probability of selecting a split operation over a merge + NvU32 split_chance; + + // For debug + struct + { + // The sum of all ranges currently in the tree + NvU64 size_sum; + + NvU64 total_adds; + NvU64 failed_adds; + NvU64 max_attempts_add; + NvU64 total_removes; + NvU64 total_shrinks; + NvU64 failed_shrinks; + NvU64 total_splits; + NvU64 failed_splits; + NvU64 max_attempts_split; + NvU64 total_merges; + NvU64 failed_merges; + NvU64 max_attempts_merge; + } stats; + +} rtt_state_t; + +typedef struct +{ + // end is inclusive + NvU64 start; + NvU64 end; +} rtt_range_t; + +static rtt_range_t rtt_node_get_range(uvm_range_tree_node_t *node) +{ + rtt_range_t range = {node->start, node->end}; + return range; +} + +// Since end is inclusive a range can't have a size of 0. A return value of 0 +// means that the range is 2^64. +static NvU64 rtt_get_range_size(rtt_range_t *range) +{ + return range->end - range->start + 1; +} + +static bool rtt_ranges_overlap(rtt_range_t *a, rtt_range_t *b) +{ + return uvm_ranges_overlap(a->start, a->end, b->start, b->end); +} + +static bool rtt_range_overlaps_node(uvm_range_tree_node_t *node, rtt_range_t *range) +{ + rtt_range_t temp = rtt_node_get_range(node); + return rtt_ranges_overlap(&temp, range); +} + +static void rtt_state_destroy(rtt_state_t *state) +{ + size_t i; + + if (!state) + return; + + for (i = 0; i < state->count; i++) + uvm_kvfree(state->nodes[i]); + + uvm_kvfree(state->nodes); + uvm_kvfree(state); +} + +static rtt_state_t *rtt_state_create(void) +{ + rtt_state_t *state = uvm_kvmalloc_zero(sizeof(*state)); + if (!state) + return NULL; + + state->max = MAX_NODES_INIT; + state->nodes = uvm_kvmalloc(state->max * sizeof(state->nodes[0])); + if (!state->nodes) { + uvm_kvfree(state); + return NULL; + } + + uvm_range_tree_init(&state->tree); + return state; +} + +static uvm_range_tree_node_t *rtt_alloc_node(rtt_state_t *state) +{ + uvm_range_tree_node_t *node; + uvm_range_tree_node_t **new_nodes; + size_t new_max; + + node = uvm_kvmalloc_zero(sizeof(*node)); + if (!node) + goto error; + + // Grow the nodes array if we're full. Do this here rather than when adding + // to the nodes array because this happens before the tree is modified. + // Recovering from a failure on adding the node to the array requires the + // caller to undo tree operations, possibly before we've tested that they + // work. + // + // Doing this frequently won't get into a thrashing state since max never + // shrinks. + if (state->count == state->max) { + new_max = max((size_t)1, 2*state->max); + + new_nodes = uvm_kvrealloc(state->nodes, new_max * sizeof(state->nodes[0])); + if (!new_nodes) + goto error; + state->nodes = new_nodes; + state->max = new_max; + } + + return node; + +error: + uvm_kvfree(node); + return NULL; +} + +static NV_STATUS rtt_range_add(rtt_state_t *state, rtt_range_t *range, uvm_range_tree_node_t **new_node) +{ + NV_STATUS status; + uvm_range_tree_node_t *node; + + node = rtt_alloc_node(state); + if (!node) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + // Attempt insertion into the tree itself + node->start = range->start; + node->end = range->end; + status = uvm_range_tree_add(&state->tree, node); + if (status != NV_OK) + goto error; + + if (uvm_range_tree_node_size(node) != rtt_get_range_size(range)) { + uvm_range_tree_remove(&state->tree, node); + status = NV_ERR_INVALID_STATE; + goto error; + } + + UVM_ASSERT(state->count < state->max); // Forced by rtt_alloc_node + state->nodes[state->count] = node; + ++state->count; + state->stats.size_sum += rtt_get_range_size(range); + ++state->stats.total_adds; + + if (new_node) + *new_node = node; + + return NV_OK; + +error: + uvm_kvfree(node); + return status; +} + +static NV_STATUS rtt_index_remove(rtt_state_t *state, size_t index) +{ + uvm_range_tree_node_t *node; + NvU64 size; + + TEST_CHECK_RET(state->count > 0); + + node = state->nodes[index]; + size = uvm_range_tree_node_size(node); + uvm_range_tree_remove(&state->tree, node); + uvm_kvfree(node); + + // We don't care about ordering so move the last node into the free slot + --state->count; + state->nodes[index] = state->nodes[state->count]; + state->stats.size_sum -= size; + ++state->stats.total_removes; + + return NV_OK; +} + +static NV_STATUS rtt_node_shrink(rtt_state_t *state, uvm_range_tree_node_t *node, NvU64 new_start, NvU64 new_end) +{ + NvU64 old_size; + NvU64 new_size; + + TEST_CHECK_RET(new_start >= node->start); + TEST_CHECK_RET(new_end <= node->end); + + old_size = uvm_range_tree_node_size(node); + new_size = new_end - new_start + 1; + + uvm_range_tree_shrink_node(&state->tree, node, new_start, new_end); + + ++state->stats.total_shrinks; + state->stats.size_sum -= (old_size - new_size); + + return NV_OK; +} + +static NV_STATUS rtt_node_split(rtt_state_t *state, + uvm_range_tree_node_t *node, + NvU64 new_end, + uvm_range_tree_node_t **new_node) +{ + NV_STATUS status; + uvm_range_tree_node_t *new; + + TEST_CHECK_RET(new_end >= node->start); + TEST_CHECK_RET(new_end < node->end); + + new = rtt_alloc_node(state); + if (!new ) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + new->start = new_end + 1; + uvm_range_tree_split(&state->tree, node, new); + + UVM_ASSERT(state->count < state->max); // Forced by rtt_alloc_node + state->nodes[state->count] = new; + ++state->count; + // No changes needed to size_sum + ++state->stats.total_splits; + + if (new_node) + *new_node = new; + + return NV_OK; + +error: + uvm_kvfree(new); + return status; +} + +static NV_STATUS rtt_check_node(rtt_state_t *state, uvm_range_tree_node_t *node) +{ + uvm_range_tree_node_t *temp, *prev, *next; + NvU64 start, mid, end; + + start = node->start; + end = node->end; + mid = start + ((end - start) / 2); + + TEST_CHECK_RET(!uvm_range_tree_empty(&state->tree)); + + if (start > 0) + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, start - 1) != node); + + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, start) == node); + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, mid) == node); + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, end) == node); + TEST_CHECK_RET(uvm_range_tree_node_size(node) == end - start + 1); + + if (end < ULLONG_MAX) + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, end + 1) != node); + + uvm_range_tree_for_each_in(temp, &state->tree, start, end) + TEST_CHECK_RET(temp == node); + + prev = uvm_range_tree_prev(&state->tree, node); + if (prev) { + TEST_CHECK_RET(prev->end < node->start); + TEST_CHECK_RET(uvm_range_tree_next(&state->tree, prev) == node); + } + else { + TEST_CHECK_RET(uvm_range_tree_iter_first(&state->tree, 0, ULLONG_MAX) == node); + } + + next = uvm_range_tree_next(&state->tree, node); + if (next) { + TEST_CHECK_RET(node->end < next->start); + TEST_CHECK_RET(uvm_range_tree_prev(&state->tree, next) == node); + } + else { + TEST_CHECK_RET(uvm_range_tree_iter_next(&state->tree, node, ULLONG_MAX) == NULL); + } + + return NV_OK; +} + +static NV_STATUS rtt_check_iterator_all(rtt_state_t *state) +{ + uvm_range_tree_node_t *node, *next, *prev = NULL, *expected = NULL; + size_t iter_count = 0; + + uvm_range_tree_for_each(node, &state->tree) { + if (expected) + TEST_CHECK_RET(node == expected); + + if (prev) + TEST_CHECK_RET(prev->end < node->start); + TEST_CHECK_RET(uvm_range_tree_prev(&state->tree, node) == prev); + + ++iter_count; + prev = node; + expected = uvm_range_tree_next(&state->tree, node); + } + TEST_CHECK_RET(expected == NULL); + + TEST_CHECK_RET(iter_count == state->count); + + iter_count = 0; + expected = NULL; + prev = NULL; + uvm_range_tree_for_each_safe(node, next, &state->tree) { + if (expected) + TEST_CHECK_RET(node == expected); + + if (prev) + TEST_CHECK_RET(prev->end < node->start); + TEST_CHECK_RET(uvm_range_tree_prev(&state->tree, node) == prev); + + ++iter_count; + prev = node; + expected = uvm_range_tree_next(&state->tree, node); + } + TEST_CHECK_RET(expected == NULL); + + TEST_CHECK_RET(iter_count == state->count); + return NV_OK; +} + + +// Attempts to add the given range to the tree and performs some sanity checks +// on the outcome. This is O(N) in the number of nodes currently in the tree. +// Return value meanings: +// +// NV_OK The range was added successfully and the sanity +// checks passed. +// +// NV_ERR_UVM_ADDRESS_IN_USE The range addition failed because the tree +// detected a collision in [range->start, +// range->end]. The collision sanity checks passed. +// +// NV_ERR_INVALID_STATE The sanity checks failed for any reason. +// +// NV_ERR_NO_MEMORY The obvious. +// +static NV_STATUS rtt_range_add_check(rtt_state_t *state, rtt_range_t *range) +{ + NV_STATUS status; + uvm_range_tree_node_t *node = NULL; + size_t i; + int overlap = 0; + + UVM_ASSERT(range->start <= range->end); + + // Determine whether this should succeed or fail + for (i = 0; i < state->count; i++) { + if (rtt_range_overlaps_node(state->nodes[i], range)) { + overlap = 1; + break; + } + } + + status = rtt_range_add(state, range, &node); + + if (overlap) { + // Verify failure + MEM_NV_CHECK_RET(status, NV_ERR_UVM_ADDRESS_IN_USE); + + // The tree said there's already a range there. Check whether its + // internal state is consistent. + node = uvm_range_tree_iter_first(&state->tree, range->start, range->end); + TEST_CHECK_RET(node); + TEST_CHECK_RET(rtt_range_overlaps_node(node, range)); + } + else { + // Verify success + MEM_NV_CHECK_RET(status, NV_OK); + status = rtt_check_node(state, node); + } + + return status; +} + +// Returns NV_ERR_INVALID_STATE on sanity check failure, NV_OK otherwise. +static NV_STATUS rtt_index_remove_check(rtt_state_t *state, size_t index) +{ + uvm_range_tree_node_t *node, *prev, *next; + NvU64 start, end; + NV_STATUS status; + + TEST_CHECK_RET(index < state->count); + node = state->nodes[index]; + start = node->start; + end = node->end; + + status = rtt_check_node(state, node); + if (status != NV_OK) + return status; + + prev = uvm_range_tree_prev(&state->tree, node); + next = uvm_range_tree_next(&state->tree, node); + + status = rtt_index_remove(state, index); + if (status != NV_OK) + return status; + + // Verify removal + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, start) == NULL); + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, end) == NULL); + TEST_CHECK_RET(uvm_range_tree_iter_first(&state->tree, start, end) == NULL); + if (prev) + TEST_CHECK_RET(uvm_range_tree_next(&state->tree, prev) == next); + if (next) + TEST_CHECK_RET(uvm_range_tree_prev(&state->tree, next) == prev); + if (!prev && !next) { + TEST_CHECK_RET(uvm_range_tree_empty(&state->tree)); + TEST_CHECK_RET(state->count == 0); + } + else { + TEST_CHECK_RET(!uvm_range_tree_empty(&state->tree)); + } + + return NV_OK; +} + +// Returns NV_ERR_INVALID_STATE on sanity check failure, NV_OK otherwise. +static NV_STATUS rtt_node_shrink_check(rtt_state_t *state, uvm_range_tree_node_t *node, NvU64 new_start, NvU64 new_end) +{ + uvm_range_tree_node_t *prev, *next; + NV_STATUS status; + NvU64 old_start = node->start; + NvU64 old_end = node->end; + + status = rtt_check_node(state, node); + if (status != NV_OK) + return status; + + prev = uvm_range_tree_prev(&state->tree, node); + next = uvm_range_tree_next(&state->tree, node); + + status = rtt_node_shrink(state, node, new_start, new_end); + if (status != NV_OK) + return status; + + status = rtt_check_node(state, node); + if (status != NV_OK) + return status; + + TEST_CHECK_RET(uvm_range_tree_prev(&state->tree, node) == prev); + TEST_CHECK_RET(uvm_range_tree_next(&state->tree, node) == next); + if (old_start != new_start) + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, old_start) == NULL); + if (old_end != new_end) + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, old_end) == NULL); + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, new_start) == node); + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, new_end) == node); + + return NV_OK; +} + +static NV_STATUS rtt_remove_all_check(rtt_state_t *state) +{ + NV_STATUS status; + + status = rtt_check_iterator_all(state); + if (status != NV_OK) + return status; + + while (state->count) { + status = rtt_index_remove_check(state, 0); + if (status != NV_OK) + return status; + } + return NV_OK; +} + +static NV_STATUS rtt_node_split_check(rtt_state_t *state, uvm_range_tree_node_t *node, NvU64 new_end) +{ + uvm_range_tree_node_t *prev, *next, *new = NULL; + NV_STATUS status; + + status = rtt_check_node(state, node); + if (status != NV_OK) + return status; + + prev = uvm_range_tree_prev(&state->tree, node); + next = uvm_range_tree_next(&state->tree, node); + + status = rtt_node_split(state, node, new_end, &new); + if (status != NV_OK) + return status; + + status = rtt_check_node(state, node); + if (status != NV_OK) + return status; + status = rtt_check_node(state, new); + if (status != NV_OK) + return status; + + TEST_CHECK_RET(uvm_range_tree_prev(&state->tree, node) == prev); + TEST_CHECK_RET(uvm_range_tree_next(&state->tree, node) == new); + TEST_CHECK_RET(uvm_range_tree_prev(&state->tree, new) == node); + TEST_CHECK_RET(uvm_range_tree_next(&state->tree, new) == next); + return NV_OK; +} + +// The rtt_index_merge_check_* functions don't have a non-check helper because +// both the helper and the caller need to walk the whole array to properly free +// the removed node. It's simpler to just handle all that in the same function. +static NV_STATUS rtt_index_merge_check_prev(rtt_state_t *state, size_t index) +{ + uvm_range_tree_node_t *node, *prev, *returned, *expected = NULL; + size_t i = 0; // Shut up compiler + NV_STATUS status; + + TEST_CHECK_RET(index < state->count); + node = state->nodes[index]; + + status = rtt_check_node(state, node); + if (status != NV_OK) + return status; + + // Figure out if this should succeed or fail + if (node->start != 0) { + for (i = 0; i < state->count; i++) { + if (state->nodes[i]->end == node->start - 1) { + expected = state->nodes[i]; + break; + } + } + } + + prev = uvm_range_tree_prev(&state->tree, node); + if (expected) { + TEST_CHECK_RET(prev == expected); + status = rtt_check_node(state, expected); + if (status != NV_OK) + return status; + } + else if (prev) { + TEST_CHECK_RET(prev->end < node->start - 1); + } + + returned = uvm_range_tree_merge_prev(&state->tree, node); + TEST_CHECK_RET(returned == expected); + + status = rtt_check_node(state, node); + if (status != NV_OK) + return status; + + if (expected) { + TEST_CHECK_RET(node->start == expected->start); + + // We don't care about ordering so move the last node into the free slot + uvm_kvfree(expected); + --state->count; + state->nodes[i] = state->nodes[state->count]; + // No change to size + ++state->stats.total_merges; + + return NV_OK; + } + + // Failed merge + return NV_ERR_INVALID_ADDRESS; +} + +static NV_STATUS rtt_index_merge_check_next(rtt_state_t *state, size_t index) +{ + uvm_range_tree_node_t *node, *next, *returned, *expected = NULL; + size_t i = 0; // Shut up compiler + NV_STATUS status; + + TEST_CHECK_RET(index < state->count); + node = state->nodes[index]; + + status = rtt_check_node(state, node); + if (status != NV_OK) + return status; + + // Figure out if this should succeed or fail + if (node->end != ULLONG_MAX) { + for (i = 0; i < state->count; i++) { + if (state->nodes[i]->start == node->end + 1) { + expected = state->nodes[i]; + break; + } + } + } + + next = uvm_range_tree_next(&state->tree, node); + if (expected) { + TEST_CHECK_RET(next == expected); + status = rtt_check_node(state, expected); + if (status != NV_OK) + return status; + } + else if (next) { + TEST_CHECK_RET(next->start > node->end + 1); + } + + returned = uvm_range_tree_merge_next(&state->tree, node); + TEST_CHECK_RET(returned == expected); + + status = rtt_check_node(state, node); + if (status != NV_OK) + return status; + + if (expected) { + TEST_CHECK_RET(node->end == expected->end); + + // We don't care about ordering so move the last node into the free slot + uvm_kvfree(expected); + --state->count; + state->nodes[i] = state->nodes[state->count]; + // No change to size + ++state->stats.total_merges; + + return NV_OK; + } + + // Failed merge + return NV_ERR_INVALID_ADDRESS; +} + + +// Directed test helpers for using hard-coded values + +// Returns the index of the node containing addr, or state->count if none. +static size_t rtt_node_find(rtt_state_t *state, NvU64 addr) +{ + size_t i; + for (i = 0; i < state->count; i++) { + if (state->nodes[i]->start <= addr && addr <= state->nodes[i]->end) + break; + } + return i; +} + +static NV_STATUS rtt_range_add_check_val(rtt_state_t *state, NvU64 start, NvU64 end) +{ + rtt_range_t range = {start, end}; + return rtt_range_add_check(state, &range); +} + +static NV_STATUS rtt_index_remove_check_val(rtt_state_t *state, NvU64 addr) +{ + size_t index = rtt_node_find(state, addr); + if (index == state->count) + return NV_ERR_INVALID_STATE; + return rtt_index_remove_check(state, index); +} + +static NV_STATUS rtt_node_shrink_check_val(rtt_state_t *state, NvU64 new_start, NvU64 new_end) +{ + size_t index = rtt_node_find(state, new_start); + if (index == state->count) + return NV_ERR_INVALID_STATE; + return rtt_node_shrink_check(state, state->nodes[index], new_start, new_end); +} + +static NV_STATUS rtt_node_split_check_val(rtt_state_t *state, NvU64 new_end) +{ + size_t index = rtt_node_find(state, new_end); + if (index == state->count || new_end == state->nodes[index]->end) + return NV_ERR_INVALID_STATE; + return rtt_node_split_check(state, state->nodes[index], new_end); +} + +static NV_STATUS rtt_index_merge_check_prev_val(rtt_state_t *state, NvU64 addr) +{ + size_t index = rtt_node_find(state, addr); + if (index == state->count) + return NV_ERR_INVALID_STATE; + return rtt_index_merge_check_prev(state, index); +} + +static NV_STATUS rtt_index_merge_check_next_val(rtt_state_t *state, NvU64 addr) +{ + size_t index = rtt_node_find(state, addr); + if (index == state->count) + return NV_ERR_INVALID_STATE; + return rtt_index_merge_check_next(state, index); +} + +static NV_STATUS rtt_directed(rtt_state_t *state) +{ + uvm_range_tree_node_t *node; + + // Empty tree + TEST_CHECK_RET(uvm_range_tree_empty(&state->tree)); + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, 0) == NULL); + TEST_CHECK_RET(uvm_range_tree_find(&state->tree, ULLONG_MAX) == NULL); + uvm_range_tree_for_each(node, &state->tree) + TEST_CHECK_RET(0); + uvm_range_tree_for_each_in(node, &state->tree, 0, 0) + TEST_CHECK_RET(0); + uvm_range_tree_for_each_in(node, &state->tree, 0, ULLONG_MAX) + TEST_CHECK_RET(0); + uvm_range_tree_for_each_in(node, &state->tree, ULLONG_MAX, ULLONG_MAX) + TEST_CHECK_RET(0); + + // Consume entire range + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, ULLONG_MAX), NV_OK); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 0), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, ULLONG_MAX), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, ULLONG_MAX, ULLONG_MAX), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 1), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 5, 7), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 7, ULLONG_MAX), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_remove_all_check(state), NV_OK); + + // Two non-overlapping ranges + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 10, 20), NV_OK); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 5), NV_OK); // Non-adjacent left + MEM_NV_CHECK_RET(rtt_index_remove_check_val(state, 0), NV_OK); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 9), NV_OK); // Adjacent left + MEM_NV_CHECK_RET(rtt_index_remove_check_val(state, 0), NV_OK); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 21, 30), NV_OK); // Adjacent right + MEM_NV_CHECK_RET(rtt_index_remove_check_val(state, 21), NV_OK); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 25, 30), NV_OK); // Non-adjacent right + MEM_NV_CHECK_RET(rtt_remove_all_check(state), NV_OK); + + // Two overlapping ranges + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 10, 20), NV_OK); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 10), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 9, 11), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 10, 20), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 11, 19), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 19, 21), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 20, 30), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 30), NV_ERR_UVM_ADDRESS_IN_USE); + MEM_NV_CHECK_RET(rtt_remove_all_check(state), NV_OK); + + // Fill gaps + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 10), NV_OK); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 20, 30), NV_OK); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 12, 18), NV_OK); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 11, 11), NV_OK); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 19, 19), NV_OK); + MEM_NV_CHECK_RET(rtt_remove_all_check(state), NV_OK); + + // Split ranges (new ranges of size 1) + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 2), NV_OK); // [0-----2] + MEM_NV_CHECK_RET(rtt_node_split_check_val(state, 0), NV_OK); // [0][1--2] + MEM_NV_CHECK_RET(rtt_node_split_check_val(state, 1), NV_OK); // [0][1][2] + MEM_NV_CHECK_RET(rtt_index_remove_check_val(state, 1), NV_OK); // [0] [2] + MEM_NV_CHECK_RET(rtt_remove_all_check(state), NV_OK); + + // Split ranges (new ranges of size >1) + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 11), NV_OK); // [0-----------11] + MEM_NV_CHECK_RET(rtt_node_split_check_val(state, 3), NV_OK); // [0-3][4------11] + MEM_NV_CHECK_RET(rtt_node_split_check_val(state, 7), NV_OK); // [0-3][4-7][8-11] + MEM_NV_CHECK_RET(rtt_index_remove_check_val(state, 4), NV_OK); // [0-3] [8-11] + MEM_NV_CHECK_RET(rtt_remove_all_check(state), NV_OK); + + // Merges + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 0), NV_OK); // [0] + MEM_NV_CHECK_RET(rtt_index_merge_check_prev_val(state, 0), NV_ERR_INVALID_ADDRESS); + MEM_NV_CHECK_RET(rtt_index_merge_check_next_val(state, 0), NV_ERR_INVALID_ADDRESS); + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 1, 1), NV_OK); // [0][1] + MEM_NV_CHECK_RET(rtt_index_merge_check_next_val(state, 0), NV_OK); // [0--1] + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 2, 2), NV_OK); // [0--1][2] + MEM_NV_CHECK_RET(rtt_index_merge_check_prev_val(state, 2), NV_OK); // [0-----2] + MEM_NV_CHECK_RET(rtt_remove_all_check(state), NV_OK); + + // Shrinks + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 0, 20), NV_OK); // [0---------------------20] + MEM_NV_CHECK_RET(rtt_node_shrink_check_val(state, 5, 15), NV_OK); // [5------------15] + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 5, 5), NV_ERR_UVM_ADDRESS_IN_USE); // [5------------15] + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 15, 15), NV_ERR_UVM_ADDRESS_IN_USE); // [5------------15] + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 16, 16), NV_OK); // [5------------15][16] + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 4, 4), NV_OK); // [4][5------------15][16] + MEM_NV_CHECK_RET(rtt_node_shrink_check_val(state, 10, 10), NV_OK); // [4] [10] [16] + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 5, 9), NV_OK); // [4][5--9][10] [16] + MEM_NV_CHECK_RET(rtt_range_add_check_val(state, 11, 15), NV_OK); // [4][5--9][10][11-15][16] + MEM_NV_CHECK_RET(rtt_remove_all_check(state), NV_OK); + + return NV_OK; +} + +NV_STATUS uvm_test_range_tree_directed(UVM_TEST_RANGE_TREE_DIRECTED_PARAMS *params, struct file *filp) +{ + rtt_state_t *state; + NV_STATUS status; + + state = rtt_state_create(); + if (!state) + return NV_ERR_NO_MEMORY; + status = rtt_directed(state); + rtt_state_destroy(state); + return status; +} + +// ------------------------------ Random Test ------------------------------ // + +// Randomly place a block of the given size in the range described by bounds. +// size == 0 means size == 2^64. +static void rtt_rand_place(uvm_test_rng_t *rng, NvU64 size, rtt_range_t *bounds, rtt_range_t *out) +{ + UVM_ASSERT(bounds->start <= bounds->end); + + if (size == 0) { + // No placement choice + UVM_ASSERT(bounds->start == 0 && bounds->end == ULLONG_MAX); + out->start = 0; + out->end = ULLONG_MAX; + } + else { + UVM_ASSERT(rtt_get_range_size(bounds) == 0 || size <= rtt_get_range_size(bounds)); + + // Select a placement with uniform distribution. Note that bounds->end + + // 1 might overflow, but we know that size >= 1 so the range will be + // sane. + out->start = uvm_test_rng_range_64(rng, bounds->start, bounds->end + 1 - size); + out->end = out->start + size - 1; + } +} + +// Compute a range in [0, max_end] of random size. The size is selected with +// logarithmic distribution for a good mix of large and small ranges. +static void rtt_get_rand_range(uvm_test_rng_t *rng, NvU64 max_end, rtt_range_t *out) +{ + rtt_range_t bounds = {0, max_end}; + NvU64 size; + + // Offset size by 1 to handle overflow when max_end is ULLONG_MAX. + size = uvm_test_rng_range_log64(rng, 0, max_end) + 1; + rtt_rand_place(rng, size, &bounds, out); +} + +// Like rtt_get_rand_range but guarantees that the generated range will overlap +// with the input cover range. This is used to generate overlapping ranges to +// verify collision detection. +static void rtt_get_rand_range_covering(uvm_test_rng_t *rng, + NvU64 max_end, + rtt_range_t *cover, + rtt_range_t *out) +{ + NvU64 size; + rtt_range_t bounds; + + UVM_ASSERT(cover->end <= max_end); + + // Pick a logarithmic size. Offset by 1 to handle overflow when max_end is + // ULLONG_MAX. + size = uvm_test_rng_range_log64(rng, 0, max_end) + 1; + if (size == ULLONG_MAX) { + // No choice + UVM_ASSERT(max_end == ULLONG_MAX); + out->start = 0; + out->end = ULLONG_MAX; + return; + } + + // Compute the range where a block of size can be placed to still overlap + // with the input range. + if (cover->start < size) + bounds.start = 0; + else + bounds.start = cover->start - size + 1; + + // Make sure we don't exceed max_end while still covering the range. Also + // watch out for overflowing max_end in these calculations. + if (size > max_end - cover->end) + bounds.end = max_end; + else + bounds.end = cover->end + size - 1; + + rtt_rand_place(rng, size, &bounds, out); + UVM_ASSERT(rtt_ranges_overlap(cover, out)); +} + +// Attempt to add N ranges to the tree, where N is randomly selected from the +// range [1, params->max_batch_count]. Each range is randomly chosen. +// +// Repeats eachs individual addition on collision up to params->max_attempts +// times. If the attempt threshold is reached this stops trying to add more +// ranges, adjusts the RNG probabilities to prefer remove operations, and +// returns NV_ERR_BUSY_RETRY. +static NV_STATUS rtt_batch_add(rtt_state_t *state, UVM_TEST_RANGE_TREE_RANDOM_PARAMS *params) +{ + size_t size = 0, ranges_to_add, max_ranges; + NvU32 collisions = 0; + NV_STATUS status = NV_OK; + rtt_range_t range, bounds = {0, params->max_end}; + + max_ranges = params->max_ranges - state->count; + if (max_ranges == 0) + return NV_OK; + + max_ranges = min(max_ranges, (size_t)params->max_batch_count); + ranges_to_add = uvm_test_rng_range_ptr(&state->rng, 1, max_ranges); + + if (params->verbose) + UVM_TEST_PRINT("Adding %zu ranges\n", ranges_to_add); + + while (ranges_to_add) { + if (fatal_signal_pending(current)) + return NV_ERR_SIGNAL_PENDING; + + // If we succeeded the last range add, pick a new range + if (status != NV_ERR_UVM_ADDRESS_IN_USE) { + rtt_get_rand_range(&state->rng, params->max_end, &range); + size = rtt_get_range_size(&range); + } + else { + // We collided last time. Try again in a new spot with a reduced + // size. + if (size == 0) // means 2^64 + size = ((size_t)-1) / 2; + else + size = max((size_t)1, size/2); + rtt_rand_place(&state->rng, size, &bounds, &range); + } + + // Try to add the new range + status = rtt_range_add_check(state, &range); + if (status == NV_ERR_UVM_ADDRESS_IN_USE) { + ++collisions; + ++state->stats.failed_adds; + if (collisions >= params->max_attempts) { + ++state->stats.max_attempts_add; + if (params->verbose) { + UVM_TEST_PRINT("Collision threshold reached with %zu ranges covering %llu (max_end %llu)\n", + state->count, state->stats.size_sum, params->max_end); + } + + // Tell RNG to prefer removes + state->add_chance = 100 - params->high_probability; + return NV_ERR_BUSY_RETRY; + } + if (params->verbose) + UVM_TEST_PRINT("Failed to add [%llu, %llu], trying again\n", range.start, range.end); + } + else { + MEM_NV_CHECK_RET(status, NV_OK); + if (params->verbose) + UVM_TEST_PRINT("Added [%llu, %llu]\n", range.start, range.end); + --ranges_to_add; + collisions = 0; + } + } + + return NV_OK; +} + +// Removes N ranges from the tree, where N is randomly selected from the range +// [1, params->max_batch_count]. +static NV_STATUS rtt_batch_remove(rtt_state_t *state, UVM_TEST_RANGE_TREE_RANDOM_PARAMS *params) +{ + size_t index, max_ranges, ranges_to_remove; + NV_STATUS status; + + if (state->count == 0) + return NV_OK; + + max_ranges = min(state->count, (size_t)params->max_batch_count); + ranges_to_remove = uvm_test_rng_range_ptr(&state->rng, 1, max_ranges); + + if (params->verbose) + UVM_TEST_PRINT("Removing %zu ranges\n", ranges_to_remove); + + while (ranges_to_remove) { + index = uvm_test_rng_range_ptr(&state->rng, 0, state->count - 1); + if (params->verbose) + UVM_TEST_PRINT("Removing [%llu, %llu]\n", state->nodes[index]->start, state->nodes[index]->end); + status = rtt_index_remove_check(state, index); + if (status != NV_OK) + return status; + --ranges_to_remove; + } + + return NV_OK; +} + +// Attempts to shrink a randomly-selected range in the tree. On selecting a range +// of size 1, the attempt is repeated with another range up to the +// params->max_attempts threshold. +static NV_STATUS rtt_rand_shrink(rtt_state_t *state, UVM_TEST_RANGE_TREE_RANDOM_PARAMS *params) +{ + uvm_range_tree_node_t *node = NULL; + NvU64 old_start; + NvU64 old_end; + NvU64 new_start; + NvU64 new_end; + NvU32 i; + NV_STATUS status; + + if (state->count == 0) + return NV_OK; + + // Randomly try to find a shrinkable range (size > 1) + for (i = 0; i < params->max_attempts; i++) { + size_t index; + if (fatal_signal_pending(current)) + return NV_ERR_SIGNAL_PENDING; + + index = uvm_test_rng_range_ptr(&state->rng, 0, state->count - 1); + if (state->nodes[index]->start != state->nodes[index]->end) { + node = state->nodes[index]; + break; + } + ++state->stats.failed_shrinks; + } + + if (!node) + return NV_ERR_BUSY_RETRY; + + // Pick a random new start and new end + old_start = node->start; + old_end = node->end; + new_start = uvm_test_rng_range_64(&state->rng, node->start, node->end); + new_end = uvm_test_rng_range_64(&state->rng, node->start, node->end); + if (new_end < new_start) { + // Swap start and end to get a valid range + swap(new_start, new_end); + } + status = rtt_node_shrink_check(state, node, new_start, new_end); + if (status != NV_OK) + return status; + + if (params->verbose) { + UVM_TEST_PRINT("Shrink [%llu, %llu] to [%llu, %llu]\n", + old_start, old_end, + new_start, new_end); + } + + return NV_OK; +} + +// Attempts to split a randomly-selected range in the tree. On selecting a range +// of size 1, the attempt is repeated with another range up to the +// params->max_attempts threshold. On reaching the attempt threshold the RNG +// probabilities are adjusted to prefer merge operations and NV_ERR_BUSY_RETRY +// is returned. +static NV_STATUS rtt_rand_split(rtt_state_t *state, UVM_TEST_RANGE_TREE_RANDOM_PARAMS *params) +{ + uvm_range_tree_node_t *node = NULL; + rtt_range_t old_range; + size_t index; + NvU64 new_end; + NvU32 i; + NV_STATUS status; + + if (state->count == 0 || state->count == params->max_ranges) + return NV_OK; + + // Randomly try to find a splittable range (size > 1) + for (i = 0; i < params->max_attempts; i++) { + if (fatal_signal_pending(current)) + return NV_ERR_SIGNAL_PENDING; + + index = uvm_test_rng_range_ptr(&state->rng, 0, state->count - 1); + if (state->nodes[index]->start != state->nodes[index]->end) { + node = state->nodes[index]; + break; + } + ++state->stats.failed_splits; + } + + if (!node) { + ++state->stats.max_attempts_split; + if (params->verbose) { + UVM_TEST_PRINT("Split attempt threshold reached with %zu ranges covering %llu (max_end %llu)\n", + state->count, state->stats.size_sum, params->max_end); + } + + // Tell the RNG to prefer merges + state->split_chance = 100 - params->high_probability; + return NV_ERR_BUSY_RETRY; + } + + // Pick a random split point and do the split + old_range = rtt_node_get_range(node); + new_end = uvm_test_rng_range_64(&state->rng, node->start, node->end - 1); + status = rtt_node_split_check(state, node, new_end); + if (status != NV_OK) + return status; + + if (params->verbose) { + UVM_TEST_PRINT("Split [%llu, %llu] into [%llu, %llu][%llu, %llu]\n", + old_range.start, old_range.end, + old_range.start, new_end, new_end + 1, old_range.end); + } + + return NV_OK; +} + +// Attempts to merge a randomly-selected range in the tree in a randomly-selected +// direction (next or prev). On selecting a range with a non-adjacent neighbor, +// the attempt is repeated with another range up to the params->max_attempts +// threshold. On reaching the attempt threshold the RNG probabilities are +// adjusted to prefer split operations and NV_ERR_BUSY_RETRY is returned. +static NV_STATUS rtt_rand_merge(rtt_state_t *state, UVM_TEST_RANGE_TREE_RANDOM_PARAMS *params) +{ + uvm_range_tree_node_t *node; + size_t index; + NvU32 i; + NV_STATUS status; + rtt_range_t old_range; + int try_prev; + + if (state->count < 2) + return NV_OK; + + // Randomly try to find a mergeable range + for (i = 0; i < params->max_attempts; i++) { + if (fatal_signal_pending(current)) + return NV_ERR_SIGNAL_PENDING; + + // Pick a new direction each time + try_prev = uvm_test_rng_range_32(&state->rng, 0, 1); + + index = uvm_test_rng_range_ptr(&state->rng, 0, state->count - 1); + node = state->nodes[index]; + old_range = rtt_node_get_range(node); + + if (try_prev) + status = rtt_index_merge_check_prev(state, index); + else + status = rtt_index_merge_check_next(state, index); + + if (status == NV_OK) { + if (params->verbose) { + UVM_TEST_PRINT("Merged [%llu, %llu] to [%llu, %llu]\n", + old_range.start, old_range.end, + node->start, node->end); + } + return NV_OK; + } + else if (status != NV_ERR_INVALID_ADDRESS) { + return status; + } + + ++state->stats.failed_merges; + } + + // We exceeded max_attempts. Tell the RNG to prefer splits. + if (params->verbose) { + UVM_TEST_PRINT("Merge attempt threshold reached with %zu ranges covering %llu (max_end %llu)\n", + state->count, state->stats.size_sum, params->max_end); + } + + ++state->stats.max_attempts_merge; + state->split_chance = params->high_probability; + return NV_ERR_BUSY_RETRY; +} + +// Randomly generate a range that collides with an allocated range and verify +// that adding the range fails. +static NV_STATUS rtt_rand_collision_check(rtt_state_t *state, NvU64 max_end) +{ + size_t index; + rtt_range_t cover, check; + + if (state->count == 0) + return NV_OK; + + // Pick an existing node at random and generate a range which overlaps that + // node. + index = uvm_test_rng_range_ptr(&state->rng, 0, state->count - 1); + cover = rtt_node_get_range(state->nodes[index]); + rtt_get_rand_range_covering(&state->rng, max_end, &cover, &check); + + MEM_NV_CHECK_RET(rtt_range_add(state, &check, NULL), NV_ERR_UVM_ADDRESS_IN_USE); + + return NV_OK; +} + +// Generate a random range and verify that the tree iterator walks all nodes +// in that range in order. +static NV_STATUS rtt_rand_iterator_check(rtt_state_t *state, NvU64 max_end) +{ + uvm_range_tree_node_t *node, *prev = NULL; + size_t i, target_count = 0, iter_count = 0; + rtt_range_t range; + + // Generate the range to check + rtt_get_rand_range(&state->rng, max_end, &range); + + // Phase 1: Iterate through the unordered list, counting how many nodes we + // ought to see from the tree iterator. + for (i = 0; i < state->count; i++) + target_count += rtt_range_overlaps_node(state->nodes[i], &range); + + // Phase 2: Use the tree iterator + uvm_range_tree_for_each_in(node, &state->tree, range.start, range.end) { + TEST_CHECK_RET(rtt_range_overlaps_node(node, &range)); + if (prev) + TEST_CHECK_RET(prev->end < node->start); + ++iter_count; + prev = node; + } + + TEST_CHECK_RET(iter_count == target_count); + return NV_OK; +} + +static rtt_op_t rtt_get_rand_op(rtt_state_t *state, UVM_TEST_RANGE_TREE_RANDOM_PARAMS *params) +{ + NvU32 r_group, r_sub; + + // The possible options depend on the current number of nodes in the tree: + // 0 add + // 1 (max == 1) remove + // 1 (max != 1) add, remove, shrink, split + // >1, count == 0) + return RTT_OP_ADD; + if (state->count == 1 && state->count == params->max_ranges) + return RTT_OP_REMOVE; + + // r_group selects between the two groups of operations, either {add/remove/shrink} + // or {merge/split}. r_sub selects the sub operation within that group based + // on the current probability settings. + r_group = uvm_test_rng_range_32(&state->rng, 1, 100); + r_sub = uvm_test_rng_range_32(&state->rng, 1, 100); + + if (state->count < params->max_ranges) { + if (r_group <= params->add_remove_shrink_group_probability) { + if (r_sub <= state->shrink_probability) + return RTT_OP_SHRINK; + // After giving shrink a chance, redo the randomization for add/remove. + r_sub = uvm_test_rng_range_32(&state->rng, 1, 100); + + if (r_sub <= state->add_chance) + return RTT_OP_ADD; + return RTT_OP_REMOVE; + } + else { + if (state->count == 1 || r_sub <= state->split_chance) + return RTT_OP_SPLIT; + return RTT_OP_MERGE; + } + } + + // We're at max + if (r_group <= params->add_remove_shrink_group_probability) + return RTT_OP_REMOVE; + return RTT_OP_MERGE; +} + +// This random stress test performs the following every iteration of the main +// loop: +// - Perform a random operation on the tree, one of: +// - Add a randomized number of elements from the tree +// - Remove a randomized number of elements from the tree +// - Shrink a random element in the tree +// - Split a random element in the tree +// - Merge a random element in the tree with its neighbor +// - Randomly generate ranges that overlap with at least one node, attempt to +// add those ranges to the tree, and verify that they fail. +// - Randomly generate ranges and verify that tree iterator reports all nodes +// in the range in the proper order. +// +// Operations are split into two groups: +// +// Group 1: add/remove/shrink +// Group 2: split/merge +// +// params->add_remove_shrink_group_probability is used to select which operation +// group to use each iteration. The selection of operation within that group +// depends on the current "mode." Initially, add and split operations are +// weighted heavily (with params->high_probability). If we reach the +// params->max_attempts threshold while trying to perform one of those +// operations, the probability of that operation is reversed to prefer removes +// or merges respectively. +// +// In the case of add/remove, the probability will also change if the tree is +// empty or full. +// +// A better (less random) test would be to track the available free ranges and +// randomly perform an allocation somewhere there. Then the collisions would be +// completely deterministic, and we could be guaranteed to eventually fill all +// space. The trouble is that tracking free ranges essentially requires building +// a simple allocator, with merge/split logic. That would increase the +// complexity of this test immensely, so instead we're doing best-effort. +static NV_STATUS rtt_random(rtt_state_t *state, UVM_TEST_RANGE_TREE_RANDOM_PARAMS *params) +{ + rtt_op_t op; + NvU64 i; + NvU32 j; + NV_STATUS status; + + state->shrink_probability = params->shrink_probability; + + // Prefer adds and splits initially to build the tree + state->add_chance = params->high_probability; + state->split_chance = params->high_probability; + + for (i = 0; i < params->main_iterations; i++) { + + // Since we could spend a long time here, catch ctrl-c + if (fatal_signal_pending(current)) + return NV_ERR_SIGNAL_PENDING; + + if (params->verbose) + UVM_TEST_PRINT("Iteration %llu: count %zu\n", i, state->count); + + // Modify the tree randomly. First adjust the add/remove probability if + // we're at the limits + if (state->count == 0) + state->add_chance = params->high_probability; + else if (state->count == params->max_ranges) + state->add_chance = 100 - params->high_probability; + + status = NV_OK; + op = rtt_get_rand_op(state, params); + switch (op) { + case RTT_OP_ADD: + status = rtt_batch_add(state, params); + break; + case RTT_OP_REMOVE: + status = rtt_batch_remove(state, params); + break; + case RTT_OP_SHRINK: + status = rtt_rand_shrink(state, params); + break; + case RTT_OP_SPLIT: + status = rtt_rand_split(state, params); + break; + case RTT_OP_MERGE: + status = rtt_rand_merge(state, params); + break; + default: + UVM_ASSERT(0); + } + + if (status != NV_OK && status != NV_ERR_BUSY_RETRY) { + // Don't print on ctrl-c + if (status != NV_ERR_SIGNAL_PENDING) + UVM_ERR_PRINT("rtt_op %d failed with status 0x%08x on iteration %llu\n", op, status, i); + return status; + } + + // Do collision detection + if (state->count) { + rtt_range_t whole = {0, ULLONG_MAX}; + MEM_NV_CHECK_RET(rtt_range_add(state, &whole, NULL), NV_ERR_UVM_ADDRESS_IN_USE); + for (j = 0; j < params->collision_checks; j++) { + status = rtt_rand_collision_check(state, params->max_end); + if (status != NV_OK) { + UVM_ERR_PRINT("rtt_rand_collision_check failed with status 0x%08x on iteration %llu, %u\n", + status, i, j); + return status; + } + } + } + + // Iterator checking + status = rtt_check_iterator_all(state); + if (status != NV_OK) + return status; + for (j = 0; j < params->iterator_checks; j++) { + status = rtt_rand_iterator_check(state, params->max_end); + if (status != NV_OK) { + UVM_ERR_PRINT("rtt_rand_iterator_check failed with status 0x%08x on iteration %llu, %u\n", + status, i, j); + return status; + } + } + } + + params->stats.total_adds = state->stats.total_adds; + params->stats.failed_adds = state->stats.failed_adds; + params->stats.max_attempts_add = state->stats.max_attempts_add; + params->stats.total_removes = state->stats.total_removes; + params->stats.total_splits = state->stats.total_splits; + params->stats.failed_splits = state->stats.failed_splits; + params->stats.max_attempts_split = state->stats.max_attempts_split; + params->stats.total_merges = state->stats.total_merges; + params->stats.failed_merges = state->stats.failed_merges; + params->stats.max_attempts_merge = state->stats.max_attempts_merge; + params->stats.total_shrinks = state->stats.total_shrinks; + params->stats.failed_shrinks = state->stats.failed_shrinks; + + return NV_OK; +} + +NV_STATUS uvm_test_range_tree_random(UVM_TEST_RANGE_TREE_RANDOM_PARAMS *params, struct file *filp) +{ + rtt_state_t *state; + NV_STATUS status; + + if (params->high_probability > 100 || + params->add_remove_shrink_group_probability > 100 || + params->max_batch_count == 0) + return NV_ERR_INVALID_PARAMETER; + + state = rtt_state_create(); + if (!state) + return NV_ERR_NO_MEMORY; + + uvm_test_rng_init(&state->rng, params->seed); + status = rtt_random(state, params); + rtt_state_destroy(state); + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_rb_tree.c b/kernel-open/nvidia-uvm/uvm_rb_tree.c new file mode 100644 index 000000000..9a100ae47 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_rb_tree.c @@ -0,0 +1,114 @@ +/******************************************************************************* + Copyright (c) 2020 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_rb_tree.h" + +static uvm_rb_tree_node_t *get_uvm_rb_tree_node(struct rb_node *rb_node) +{ + return rb_entry(rb_node, uvm_rb_tree_node_t, rb_node); +} + +static uvm_rb_tree_node_t *uvm_rb_tree_find_node(uvm_rb_tree_t *tree, + NvU64 key, + uvm_rb_tree_node_t **parent, + uvm_rb_tree_node_t **next) +{ + struct rb_node *rb_node = tree->rb_root.rb_node; + uvm_rb_tree_node_t *node = NULL; + uvm_rb_tree_node_t *_parent = NULL; + + while (rb_node) { + node = get_uvm_rb_tree_node(rb_node); + + if (key < node->key) + rb_node = rb_node->rb_left; + else if (key > node->key) + rb_node = rb_node->rb_right; + else + break; + + _parent = node; + } + + if (!rb_node) + node = NULL; + + if (parent) + *parent = _parent; + if (next) { + *next = NULL; // Handles the empty tree case + if (node) { + *next = uvm_rb_tree_next(tree, node); + } + else if (_parent) { + if (_parent->key > key) + *next = _parent; + else + *next = uvm_rb_tree_next(tree, _parent); + } + } + + return node; +} + +void uvm_rb_tree_init(uvm_rb_tree_t *tree) +{ + memset(tree, 0, sizeof(*tree)); + tree->rb_root = RB_ROOT; + INIT_LIST_HEAD(&tree->head); +} + +NV_STATUS uvm_rb_tree_insert(uvm_rb_tree_t *tree, uvm_rb_tree_node_t *node) +{ + uvm_rb_tree_node_t *match, *parent; + + match = uvm_rb_tree_find_node(tree, node->key, &parent, NULL); + if (match) + return NV_ERR_IN_USE; + + // If there's no parent and we didn't match on the root node, the tree is + // empty. + if (!parent) { + rb_link_node(&node->rb_node, NULL, &tree->rb_root.rb_node); + rb_insert_color(&node->rb_node, &tree->rb_root); + list_add(&node->list, &tree->head); + return NV_OK; + } + + if (node->key < parent->key) { + rb_link_node(&node->rb_node, &parent->rb_node, &parent->rb_node.rb_left); + list_add_tail(&node->list, &parent->list); + } + else { + rb_link_node(&node->rb_node, &parent->rb_node, &parent->rb_node.rb_right); + list_add(&node->list, &parent->list); + } + + rb_insert_color(&node->rb_node, &tree->rb_root); + return NV_OK; +} + +uvm_rb_tree_node_t *uvm_rb_tree_find(uvm_rb_tree_t *tree, NvU64 key) +{ + return uvm_rb_tree_find_node(tree, key, NULL, NULL); +} diff --git a/kernel-open/nvidia-uvm/uvm_rb_tree.h b/kernel-open/nvidia-uvm/uvm_rb_tree.h new file mode 100644 index 000000000..3ffbef7e5 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_rb_tree.h @@ -0,0 +1,111 @@ +/******************************************************************************* + Copyright (c) 2020 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_RB_TREE_H__ +#define __UVM_RB_TREE_H__ + +#include "nvtypes.h" +#include "nvstatus.h" +#include +#include +#include +#include "nv-list-helpers.h" + +// UVM RB trees are an implementation of Red-Black trees, which include some +// optimizations for fast iteration over the elements in the tree. +// +// This implementation requires unique 64-bit keys. +// +// All locking is up to the caller. + +typedef struct +{ + NvU64 key; + + struct rb_node rb_node; + struct list_head list; +} uvm_rb_tree_node_t; + +typedef struct +{ + // Tree of uvm_rb_tree_node_t's sorted by key. + struct rb_root rb_root; + + // List of uvm_rb_tree_node_t's sorted by key. This is an optimization + // to avoid calling rb_next and rb_prev frequently, particularly while + // iterating. + struct list_head head; + +} uvm_rb_tree_t; + +#define UVM_RB_TREE_CLEAR_NODE(node) RB_CLEAR_NODE(&(node)->rb_node) +#define UVM_RB_TREE_EMPTY_NODE(node) RB_EMPTY_NODE(&(node)->rb_node) + +// Initialize a UVM RB Tree. +void uvm_rb_tree_init(uvm_rb_tree_t *tree); + +// Insert a node into the tree. node->key should be set prior to calling this +// function. +// If a node with a matching key exists, NV_ERR_IN_USE is returned. +NV_STATUS uvm_rb_tree_insert(uvm_rb_tree_t *tree, uvm_rb_tree_node_t *node); + +static void uvm_rb_tree_remove(uvm_rb_tree_t *tree, uvm_rb_tree_node_t *node) +{ + rb_erase(&node->rb_node, &tree->rb_root); + list_del(&node->list); +} + +// Return node matching key, if any. +uvm_rb_tree_node_t *uvm_rb_tree_find(uvm_rb_tree_t *tree, NvU64 key); + +static uvm_rb_tree_node_t *uvm_rb_tree_first(uvm_rb_tree_t *tree) +{ + return list_first_entry_or_null(&tree->head, uvm_rb_tree_node_t, list); +} + +// Returns the prev/next node in key order, or NULL if none exists +static uvm_rb_tree_node_t *uvm_rb_tree_prev(uvm_rb_tree_t *tree, uvm_rb_tree_node_t *node) +{ + if (list_is_first(&node->list, &tree->head)) + return NULL; + return list_prev_entry(node, list); +} + +static uvm_rb_tree_node_t *uvm_rb_tree_next(uvm_rb_tree_t *tree, uvm_rb_tree_node_t *node) +{ + if (list_is_last(&node->list, &tree->head)) + return NULL; + return list_next_entry(node, list); +} + +// Return true if the range tree is empty. +static bool uvm_rb_tree_empty(uvm_rb_tree_t *tree) +{ + return list_empty(&tree->head); +} + +#define uvm_rb_tree_for_each(node, tree) list_for_each_entry((node), &(tree)->head, list) + +#define uvm_rb_tree_for_each_safe(node, next, tree) list_for_each_entry_safe((node), (next), &(tree)->head, list) + +#endif // __UVM_RB_TREE_H__ diff --git a/kernel-open/nvidia-uvm/uvm_rb_tree_test.c b/kernel-open/nvidia-uvm/uvm_rb_tree_test.c new file mode 100644 index 000000000..c009e8df7 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_rb_tree_test.c @@ -0,0 +1,383 @@ +/******************************************************************************* + Copyright (c) 2020 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_kvmalloc.h" +#include "uvm_rb_tree.h" +#include "uvm_test.h" +#include "uvm_test_rng.h" + +typedef struct +{ + NvU64 key; + uvm_rb_tree_node_t node; + struct list_head list; +} rbtt_tree_node_t; + +typedef enum +{ + RBTT_OP_ADD, + RBTT_OP_REMOVE, + RBTT_OP_COUNT +} rbtt_test_op_t; + +typedef struct +{ + uvm_rb_tree_t tree; + uvm_test_rng_t rng; + + // List of all nodes used for tracking and verification. + // Nodes in the list are in insertion order. + struct list_head nodes; + rbtt_test_op_t preferred_op; + size_t count; +} rbtt_state_t; + +static rbtt_state_t *rbtt_state_create(void) +{ + rbtt_state_t *state = uvm_kvmalloc_zero(sizeof(*state)); + + if (!state) + return NULL; + + INIT_LIST_HEAD(&state->nodes); + uvm_rb_tree_init(&state->tree); + return state; +} + +static void rbtt_state_destroy(rbtt_state_t *state) +{ + rbtt_tree_node_t *node, *next; + + list_for_each_entry_safe(node, next, &state->nodes, list) { + list_del(&node->list); + uvm_kvfree(node); + } + + uvm_kvfree(state); +} + +static NV_STATUS rbtt_check_tree(rbtt_state_t *state) +{ + uvm_rb_tree_node_t *tree_node = NULL; + uvm_rb_tree_node_t *next; + rbtt_tree_node_t *node; + + list_for_each_entry(node, &state->nodes, list) { + tree_node = uvm_rb_tree_find(&state->tree, node->key); + TEST_CHECK_RET(tree_node); + TEST_CHECK_RET(tree_node == &node->node); + } + + // Check tree iterators. + if (state->count == 0) { + TEST_CHECK_RET(uvm_rb_tree_empty(&state->tree)); + TEST_CHECK_RET(uvm_rb_tree_first(&state->tree) == NULL); + uvm_rb_tree_for_each(tree_node, &state->tree) + TEST_CHECK_RET(0); + uvm_rb_tree_for_each_safe(tree_node, next, &state->tree) + TEST_CHECK_RET(0); + } + else { + uvm_rb_tree_node_t *prev = NULL; + uvm_rb_tree_node_t *curr; + size_t tree_node_count = 0; + + TEST_CHECK_RET(!uvm_rb_tree_empty(&state->tree)); + curr = uvm_rb_tree_first(&state->tree); + TEST_CHECK_RET(curr != NULL); + + uvm_rb_tree_for_each(tree_node, &state->tree) { + TEST_CHECK_RET(curr == tree_node); + TEST_CHECK_RET(uvm_rb_tree_prev(&state->tree, tree_node) == prev); + if (prev) + TEST_CHECK_RET(prev->key < tree_node->key); + prev = tree_node; + curr = uvm_rb_tree_next(&state->tree, tree_node); + tree_node_count++; + } + + TEST_CHECK_RET(curr == NULL); + TEST_CHECK_RET(tree_node_count == state->count); + + tree_node_count = 0; + prev = NULL; + curr = uvm_rb_tree_first(&state->tree); + uvm_rb_tree_for_each_safe(tree_node, next, &state->tree) { + TEST_CHECK_RET(curr == tree_node); + TEST_CHECK_RET(uvm_rb_tree_prev(&state->tree, tree_node) == prev); + if (prev) + TEST_CHECK_RET(prev->key < tree_node->key); + prev = tree_node; + curr = uvm_rb_tree_next(&state->tree, tree_node); + tree_node_count++; + } + + TEST_CHECK_RET(curr == NULL); + TEST_CHECK_RET(tree_node_count == state->count); + } + + + return NV_OK; +} + +static rbtt_tree_node_t *rbtt_node_alloc(void) +{ + rbtt_tree_node_t *node = uvm_kvmalloc_zero(sizeof(*node)); + + if (!node) + return NULL; + + INIT_LIST_HEAD(&node->list); + return node; +} + +static NV_STATUS rbtt_add_node(rbtt_state_t *state, NvU64 key) +{ + rbtt_tree_node_t *node = rbtt_node_alloc(); + NV_STATUS status; + + if (!node) + return NV_ERR_NO_MEMORY; + + node->key = key; + node->node.key = key; + + status = uvm_rb_tree_insert(&state->tree, &node->node); + if (status == NV_OK) { + list_add_tail(&node->list, &state->nodes); + state->count++; + } else { + uvm_kvfree(node); + } + + return status; +} + +// This function assumes that node is a valid tree node. +// All validation checks should be done by the caller. +static void rbtt_tree_remove_node(rbtt_state_t *state, rbtt_tree_node_t *node) +{ + uvm_rb_tree_remove(&state->tree, &node->node); + list_del(&node->list); + uvm_kvfree(node); + UVM_ASSERT(state->count > 0); + state->count--; +} + +static NV_STATUS rbtt_tree_remove_by_key(rbtt_state_t *state, NvU64 key) +{ + uvm_rb_tree_node_t *tree_node; + rbtt_tree_node_t *node; + bool exists; + + list_for_each_entry(node, &state->nodes, list) { + if (node->key == key) + break; + } + + // If node is equal to the head of the list, there is no node + // matching key in our the list. + exists = &node->list != &state->nodes; + + tree_node = uvm_rb_tree_find(&state->tree, key); + if (exists) { + TEST_CHECK_RET(tree_node); + TEST_CHECK_RET(node->key == tree_node->key); + rbtt_tree_remove_node(state, node); + } + else { + TEST_CHECK_RET(tree_node == NULL); + } + + return rbtt_check_tree(state); +} + +static NV_STATUS rbtt_tree_remove_all(rbtt_state_t *state) +{ + rbtt_tree_node_t *node, *next; + + list_for_each_entry_safe(node, next, &state->nodes, list) + TEST_NV_CHECK_RET(rbtt_tree_remove_by_key(state, node->key)); + + return NV_OK; +} + +static NV_STATUS rbtt_test_directed(rbtt_state_t *state) +{ + TEST_CHECK_RET(uvm_rb_tree_empty(&state->tree)); + TEST_CHECK_RET(uvm_rb_tree_find(&state->tree, 0) == NULL); + TEST_CHECK_RET(uvm_rb_tree_find(&state->tree, ULLONG_MAX) == NULL); + TEST_CHECK_RET(uvm_rb_tree_first(&state->tree) == NULL); + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + + MEM_NV_CHECK_RET(rbtt_add_node(state, 0), NV_OK); + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + MEM_NV_CHECK_RET(rbtt_add_node(state, ULLONG_MAX), NV_OK); + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + MEM_NV_CHECK_RET(rbtt_add_node(state, ULLONG_MAX / 2), NV_OK); + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + MEM_NV_CHECK_RET(rbtt_add_node(state, 0), NV_ERR_IN_USE); + MEM_NV_CHECK_RET(rbtt_add_node(state, ULLONG_MAX), NV_ERR_IN_USE); + MEM_NV_CHECK_RET(rbtt_add_node(state, ULLONG_MAX / 2), NV_ERR_IN_USE); + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + + // Create gaps and exactly fill them. + MEM_NV_CHECK_RET(rbtt_add_node(state, 2), NV_OK); + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + MEM_NV_CHECK_RET(rbtt_add_node(state, 4), NV_OK); + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + MEM_NV_CHECK_RET(rbtt_add_node(state, 1), NV_OK); + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + MEM_NV_CHECK_RET(rbtt_add_node(state, 3), NV_OK); + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + + TEST_NV_CHECK_RET(rbtt_tree_remove_by_key(state, ULLONG_MAX / 2)); + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + TEST_NV_CHECK_RET(rbtt_tree_remove_all(state)); + TEST_CHECK_RET(uvm_rb_tree_empty(&state->tree)); + + return NV_OK; +} + +NV_STATUS uvm_test_rb_tree_directed(UVM_TEST_RB_TREE_DIRECTED_PARAMS *params, struct file *filp) +{ + rbtt_state_t *state = rbtt_state_create(); + NV_STATUS status; + + if (!state) + return NV_ERR_NO_MEMORY; + + status = rbtt_test_directed(state); + rbtt_state_destroy(state); + return status; +} + +static bool rbtt_test_random_should_fail(rbtt_state_t *state, NvU64 key) +{ + rbtt_tree_node_t *node; + bool should_fail = NV_FALSE; + + list_for_each_entry(node, &state->nodes, list) { + if (node->key == key) { + should_fail = NV_TRUE; + break; + } + } + + return should_fail; +} + +static rbtt_tree_node_t *rbtt_test_get_random_node(rbtt_state_t *state) +{ + rbtt_tree_node_t *node; + size_t index; + + if (!state->count) + return NULL; + + index = uvm_test_rng_range_ptr(&state->rng, 0, state->count - 1); + node = list_first_entry(&state->nodes, rbtt_tree_node_t, list); + while (index--) + node = list_next_entry(node, list); + + UVM_ASSERT(node); + return node; +} + +static rbtt_test_op_t rbtt_test_get_random_op(rbtt_state_t *state, size_t limit) +{ + // The algorithm is designed to grow the tree until it reaches the + // limit, then shrink it until it is empty, while still randomizing + // the operations. + + if (state->count == 0) { + state->preferred_op = RBTT_OP_ADD; + return RBTT_OP_ADD; + } + else if (state->count == limit) { + state->preferred_op = RBTT_OP_REMOVE; + return RBTT_OP_REMOVE; + } + + if (uvm_test_rng_range_32(&state->rng, 0, 3) == 0) { + BUILD_BUG_ON((int)RBTT_OP_COUNT != 2); + return !state->preferred_op; + } + + return state->preferred_op; +} + +static NV_STATUS rbtt_test_random(rbtt_state_t *state, UVM_TEST_RB_TREE_RANDOM_PARAMS *params) +{ + rbtt_tree_node_t *node; + rbtt_test_op_t op; + NvU64 i; + NvU64 key; + NvU64 key_range_max = params->range_max ? params->range_max : ULLONG_MAX; + + for (i = 0; i < params->iterations; i++) { + bool should_fail; + + if (fatal_signal_pending(current)) + return NV_ERR_SIGNAL_PENDING; + + op = rbtt_test_get_random_op(state, params->node_limit); + switch (op) { + case RBTT_OP_ADD: + // By using a logarithmic key distribution, we are going to get + // grouping in the lower ranges of the key space, which increases the + // chance for collisions. + key = uvm_test_rng_range_log64(&state->rng, 0, key_range_max); + should_fail = rbtt_test_random_should_fail(state, key); + MEM_NV_CHECK_RET(rbtt_add_node(state, key), should_fail ? NV_ERR_IN_USE : NV_OK); + break; + case RBTT_OP_REMOVE: + node = rbtt_test_get_random_node(state); + if (node) + rbtt_tree_remove_node(state, node); + else + TEST_CHECK_RET(state->count == 0); + default: + break; + } + + TEST_NV_CHECK_RET(rbtt_check_tree(state)); + } + + return NV_OK; +} + +NV_STATUS uvm_test_rb_tree_random(UVM_TEST_RB_TREE_RANDOM_PARAMS *params, struct file *filp) +{ + rbtt_state_t *state = rbtt_state_create(); + NV_STATUS status; + + if (!state) + return NV_ERR_NO_MEMORY; + + uvm_test_rng_init(&state->rng, params->seed); + status = rbtt_test_random(state, params); + rbtt_state_destroy(state); + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_rm_mem.c b/kernel-open/nvidia-uvm/uvm_rm_mem.c new file mode 100644 index 000000000..25ab08fe5 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_rm_mem.c @@ -0,0 +1,435 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_rm_mem.h" +#include "uvm_gpu.h" +#include "uvm_global.h" +#include "uvm_kvmalloc.h" +#include "uvm_linux.h" +#include "nv_uvm_interface.h" + +bool uvm_rm_mem_mapped_on_gpu(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + return uvm_global_processor_mask_test(&rm_mem->mapped_on, gpu->global_id); +} + +bool uvm_rm_mem_mapped_on_gpu_proxy(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + if (rm_mem->proxy_vas == NULL) + return false; + + if (rm_mem->proxy_vas[uvm_global_id_value(gpu->global_id)] == 0) + return false; + + UVM_ASSERT(uvm_rm_mem_mapped_on_gpu(rm_mem, gpu)); + UVM_ASSERT(uvm_gpu_uses_proxy_channel_pool(gpu)); + + return true; +} + +bool uvm_rm_mem_mapped_on_cpu(uvm_rm_mem_t *rm_mem) +{ + return uvm_global_processor_mask_test(&rm_mem->mapped_on, UVM_GLOBAL_ID_CPU); +} + +static void rm_mem_set_gpu_va(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu, NvU64 va) +{ + rm_mem->vas[uvm_global_id_value(gpu->global_id)] = va; + uvm_global_processor_mask_set(&rm_mem->mapped_on, gpu->global_id); +} + +static void rm_mem_set_gpu_proxy_va(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu, NvU64 va) +{ + rm_mem->proxy_vas[uvm_global_id_value(gpu->global_id)] = va; +} + +static void rm_mem_set_cpu_va(uvm_rm_mem_t *rm_mem, void *va) +{ + rm_mem->vas[UVM_GLOBAL_ID_CPU_VALUE] = (uintptr_t) va; + uvm_global_processor_mask_set(&rm_mem->mapped_on, UVM_GLOBAL_ID_CPU); +} + +static void rm_mem_clear_gpu_va(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(!uvm_rm_mem_mapped_on_gpu_proxy(rm_mem, gpu)); + + uvm_global_processor_mask_clear(&rm_mem->mapped_on, gpu->global_id); + rm_mem->vas[uvm_global_id_value(gpu->global_id)] = 0; +} + +static void rm_mem_clear_gpu_proxy_va(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + rm_mem->proxy_vas[uvm_global_id_value(gpu->global_id)] = 0; +} + +static void rm_mem_clear_cpu_va(uvm_rm_mem_t *rm_mem) +{ + uvm_global_processor_mask_clear(&rm_mem->mapped_on, UVM_GLOBAL_ID_CPU); + rm_mem->vas[UVM_GLOBAL_ID_CPU_VALUE] = 0; +} + +NvU64 uvm_rm_mem_get_gpu_uvm_va(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + UVM_ASSERT_MSG(uvm_rm_mem_mapped_on_gpu(rm_mem, gpu), "GPU %s\n", uvm_gpu_name(gpu)); + + return rm_mem->vas[uvm_global_id_value(gpu->global_id)]; +} + +NvU64 uvm_rm_mem_get_gpu_proxy_va(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(uvm_rm_mem_mapped_on_gpu_proxy(rm_mem, gpu)); + + return rm_mem->proxy_vas[uvm_global_id_value(gpu->global_id)]; +} + +NvU64 uvm_rm_mem_get_gpu_va(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu, bool is_proxy_va_space) +{ + if (is_proxy_va_space) + return uvm_rm_mem_get_gpu_proxy_va(rm_mem, gpu); + else + return uvm_rm_mem_get_gpu_uvm_va(rm_mem, gpu); +} + +void *uvm_rm_mem_get_cpu_va(uvm_rm_mem_t *rm_mem) +{ + UVM_ASSERT(uvm_rm_mem_mapped_on_cpu(rm_mem)); + + return (void *)(uintptr_t)rm_mem->vas[UVM_GLOBAL_ID_CPU_VALUE]; +} + +static NV_STATUS rm_mem_map_gpu_proxy(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_gpu_t *gpu_owner; + NvU64 gpu_owner_va; + NvU64 proxy_va; + + UVM_ASSERT(uvm_rm_mem_mapped_on_gpu(rm_mem, gpu)); + + if (!uvm_gpu_uses_proxy_channel_pool(gpu)) + return NV_OK; + + if (uvm_rm_mem_mapped_on_gpu_proxy(rm_mem, gpu)) + return NV_OK; + + if (rm_mem->proxy_vas == NULL) { + NvU64 *proxy_vas = uvm_kvmalloc_zero(sizeof(rm_mem->vas)); + if (proxy_vas == NULL) + return NV_ERR_NO_MEMORY; + + rm_mem->proxy_vas = proxy_vas; + } + + gpu_owner = rm_mem->gpu_owner; + gpu_owner_va = uvm_rm_mem_get_gpu_uvm_va(rm_mem, gpu_owner); + + status = uvm_rm_locked_call(nvUvmInterfacePagingChannelsMap(gpu_owner->rm_address_space, + gpu_owner_va, + uvm_gpu_device_handle(gpu), + &proxy_va)); + if (status != NV_OK) { + UVM_ERR_PRINT("nvUvmInterfacePagingChannelsMap() failed: %s, src GPU %s, dst GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu_owner), + uvm_gpu_name(gpu)); + return status; + } + + rm_mem_set_gpu_proxy_va(rm_mem, gpu, proxy_va); + + return NV_OK; +} + +static void rm_mem_unmap_gpu_proxy(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + uvm_gpu_t *gpu_owner; + NvU64 gpu_owner_va; + + if (!uvm_rm_mem_mapped_on_gpu_proxy(rm_mem, gpu)) + return; + + gpu_owner = rm_mem->gpu_owner; + gpu_owner_va = uvm_rm_mem_get_gpu_uvm_va(rm_mem, gpu_owner); + + + uvm_rm_locked_call_void(nvUvmInterfacePagingChannelsUnmap(gpu_owner->rm_address_space, + gpu_owner_va, + uvm_gpu_device_handle(gpu))); + + rm_mem_clear_gpu_proxy_va(rm_mem, gpu); +} + +NV_STATUS uvm_rm_mem_alloc(uvm_gpu_t *gpu, uvm_rm_mem_type_t type, NvLength size, uvm_rm_mem_t **rm_mem_out) +{ + NV_STATUS status = NV_OK; + uvm_rm_mem_t *rm_mem; + UvmGpuAllocInfo alloc_info = {0}; + NvU64 gpu_va; + + UVM_ASSERT(gpu); + UVM_ASSERT((type == UVM_RM_MEM_TYPE_SYS) || (type == UVM_RM_MEM_TYPE_GPU)); + UVM_ASSERT(size != 0); + + rm_mem = uvm_kvmalloc_zero(sizeof(*rm_mem)); + if (rm_mem == NULL) + return NV_ERR_NO_MEMORY; + + + + + + + + + if (type == UVM_RM_MEM_TYPE_SYS) + status = uvm_rm_locked_call(nvUvmInterfaceMemoryAllocSys(gpu->rm_address_space, size, &gpu_va, &alloc_info)); + else + status = uvm_rm_locked_call(nvUvmInterfaceMemoryAllocFB(gpu->rm_address_space, size, &gpu_va, &alloc_info)); + + if (status != NV_OK) { + UVM_ERR_PRINT("nvUvmInterfaceMemoryAlloc%s() failed: %s, GPU %s\n", + type == UVM_RM_MEM_TYPE_SYS ? "Sys" : "FB", + nvstatusToString(status), + uvm_gpu_name(gpu)); + goto error; + } + + rm_mem->gpu_owner = gpu; + rm_mem->type = type; + rm_mem->size = size; + rm_mem_set_gpu_va(rm_mem, gpu, gpu_va); + + status = rm_mem_map_gpu_proxy(rm_mem, gpu); + if (status != NV_OK) + goto error; + + *rm_mem_out = rm_mem; + return NV_OK; + +error: + uvm_rm_mem_free(rm_mem); + return status; +} + +NV_STATUS uvm_rm_mem_map_cpu(uvm_rm_mem_t *rm_mem) +{ + NV_STATUS status; + uvm_gpu_t *gpu; + NvU64 gpu_va; + void *cpu_va; + + UVM_ASSERT(rm_mem); + + if (uvm_rm_mem_mapped_on_cpu(rm_mem)) + return NV_OK; + + gpu = rm_mem->gpu_owner; + gpu_va = uvm_rm_mem_get_gpu_uvm_va(rm_mem, gpu); + + status = uvm_rm_locked_call(nvUvmInterfaceMemoryCpuMap(gpu->rm_address_space, + gpu_va, + rm_mem->size, + &cpu_va, + UVM_PAGE_SIZE_DEFAULT)); + if (status != NV_OK) { + UVM_ERR_PRINT("nvUvmInterfaceMemoryCpuMap() failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + return status; + } + + rm_mem_set_cpu_va(rm_mem, cpu_va); + + return NV_OK; +} + +void uvm_rm_mem_unmap_cpu(uvm_rm_mem_t *rm_mem) +{ + UVM_ASSERT(rm_mem); + + if (!uvm_rm_mem_mapped_on_cpu(rm_mem)) + return; + + uvm_rm_locked_call_void(nvUvmInterfaceMemoryCpuUnMap(rm_mem->gpu_owner->rm_address_space, + uvm_rm_mem_get_cpu_va(rm_mem))); + + rm_mem_clear_cpu_va(rm_mem); +} + +NV_STATUS uvm_rm_mem_map_gpu(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_gpu_t *gpu_owner; + NvU64 gpu_owner_va; + NvU64 gpu_va; + + UVM_ASSERT(rm_mem); + UVM_ASSERT(gpu); + + // Peer mappings not supported yet + UVM_ASSERT(rm_mem->type == UVM_RM_MEM_TYPE_SYS); + + if (uvm_rm_mem_mapped_on_gpu(rm_mem, gpu)) + return NV_OK; + + gpu_owner = rm_mem->gpu_owner; + gpu_owner_va = uvm_rm_mem_get_gpu_uvm_va(rm_mem, gpu_owner); + + status = uvm_rm_locked_call(nvUvmInterfaceDupAllocation(gpu_owner->rm_address_space, + gpu_owner_va, + gpu->rm_address_space, + &gpu_va)); + if (status != NV_OK) { + UVM_ERR_PRINT("nvUvmInterfaceDupAllocation() failed: %s, src GPU %s, dest GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu_owner), + uvm_gpu_name(gpu)); + return status; + } + + rm_mem_set_gpu_va(rm_mem, gpu, gpu_va); + + // Map to proxy VA space, if applicable + return rm_mem_map_gpu_proxy(rm_mem, gpu); +} + +// This internal unmap variant allows the GPU owner to be unmapped, unlike +// uvm_rm_mem_unmap_gpu +static void rm_mem_unmap_gpu(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + NvU64 va; + + if (!uvm_rm_mem_mapped_on_gpu(rm_mem, gpu)) + return; + + // Remove mappings in proxy address space, if any + rm_mem_unmap_gpu_proxy(rm_mem, gpu); + + va = uvm_rm_mem_get_gpu_uvm_va(rm_mem, gpu); + uvm_rm_locked_call_void(nvUvmInterfaceMemoryFree(gpu->rm_address_space, va)); + rm_mem_clear_gpu_va(rm_mem, gpu); +} + +void uvm_rm_mem_unmap_gpu(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu) +{ + UVM_ASSERT(rm_mem); + UVM_ASSERT(gpu); + + // Cannot unmap from the gpu that owns the allocation. + UVM_ASSERT_MSG(rm_mem->gpu_owner != gpu, "GPU %s\n", uvm_gpu_name(gpu)); + + rm_mem_unmap_gpu(rm_mem, gpu); +} + +void uvm_rm_mem_free(uvm_rm_mem_t *rm_mem) +{ + uvm_global_gpu_id_t gpu_id; + uvm_gpu_t *gpu_owner; + + if (rm_mem == NULL) + return; + + // If the GPU owner is not set, allocation of backing storage by RM failed + gpu_owner = rm_mem->gpu_owner; + if (gpu_owner == NULL) { + uvm_kvfree(rm_mem); + return; + } + + uvm_rm_mem_unmap_cpu(rm_mem); + + // Don't use for_each_global_gpu_in_mask() as the owning GPU might be being + // destroyed and already removed from the global GPU array causing the iteration + // to stop prematurely. + for_each_global_gpu_id_in_mask(gpu_id, &rm_mem->mapped_on) { + if (!uvm_global_id_equal(gpu_id, gpu_owner->global_id)) + uvm_rm_mem_unmap_gpu(rm_mem, uvm_gpu_get(gpu_id)); + } + + rm_mem_unmap_gpu(rm_mem, gpu_owner); + + UVM_ASSERT_MSG(uvm_global_processor_mask_empty(&rm_mem->mapped_on), + "Left-over %u mappings in rm_mem\n", + uvm_global_processor_mask_get_count(&rm_mem->mapped_on)); + + uvm_kvfree(rm_mem->proxy_vas); + uvm_kvfree(rm_mem); +} + +NV_STATUS uvm_rm_mem_alloc_and_map_cpu(uvm_gpu_t *gpu, uvm_rm_mem_type_t type, NvLength size, uvm_rm_mem_t **rm_mem_out) +{ + uvm_rm_mem_t *rm_mem; + NV_STATUS status; + + status = uvm_rm_mem_alloc(gpu, type, size, &rm_mem); + if (status != NV_OK) + return status; + + status = uvm_rm_mem_map_cpu(rm_mem); + if (status != NV_OK) + goto error; + + *rm_mem_out = rm_mem; + + return NV_OK; + +error: + uvm_rm_mem_free(rm_mem); + return status; +} + +NV_STATUS uvm_rm_mem_map_all_gpus(uvm_rm_mem_t *rm_mem) +{ + uvm_gpu_t *gpu; + + UVM_ASSERT(rm_mem); + + for_each_global_gpu(gpu) { + NV_STATUS status = uvm_rm_mem_map_gpu(rm_mem, gpu); + if (status != NV_OK) + return status; + } + return NV_OK; +} + +NV_STATUS uvm_rm_mem_alloc_and_map_all(uvm_gpu_t *gpu, uvm_rm_mem_type_t type, NvLength size, uvm_rm_mem_t **rm_mem_out) +{ + uvm_rm_mem_t *rm_mem; + NV_STATUS status; + + UVM_ASSERT(gpu); + + status = uvm_rm_mem_alloc_and_map_cpu(gpu, type, size, &rm_mem); + if (status != NV_OK) + return status; + + status = uvm_rm_mem_map_all_gpus(rm_mem); + if (status != NV_OK) + goto error; + + *rm_mem_out = rm_mem; + + return NV_OK; + +error: + uvm_rm_mem_free(rm_mem); + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_rm_mem.h b/kernel-open/nvidia-uvm/uvm_rm_mem.h new file mode 100644 index 000000000..a83801453 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_rm_mem.h @@ -0,0 +1,136 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_RM_MEM_H__ +#define __UVM_RM_MEM_H__ + +#include "uvm_forward_decl.h" +#include "uvm_processors.h" +#include "uvm_test_ioctl.h" + +typedef enum +{ + UVM_RM_MEM_TYPE_GPU, + UVM_RM_MEM_TYPE_SYS, +} uvm_rm_mem_type_t; + +// Abstraction for memory allocations done through the UVM-RM interface +struct uvm_rm_mem_struct +{ + // Type of the memory + uvm_rm_mem_type_t type; + + // Mask of processors the memory is mapped on + uvm_global_processor_mask_t mapped_on; + + // VA of the memory on the UVM internal address space of each processor. If + // the memory has not been mapped on a given processor, the VA is zero. + NvU64 vas[UVM_GLOBAL_ID_MAX_PROCESSORS]; + + // VA of the memory in the proxy address space of each processor. If + // the memory has not been mapped on a given processor, the VA is zero. + // If the memory is mapped on the proxy address space of a processor, then + // it must be mapped on UVM's internal address space. + // + // This array is only allocated in SR-IOV heavy. It is sized, and indexed, + // as the 'vas' array. + NvU64 *proxy_vas; + + // The GPU the allocation originated from + uvm_gpu_t *gpu_owner; + + // Size of the allocation + NvLength size; +}; + +// Allocate memory of the given type and size in the GPU's UVM internal address +// space, and (in SR-IOV heavy) map it on the proxy address space as well. +// +// The GPU cannot be NULL and the memory is going to mapped on the GPU for the +// lifetime of the allocation. For sysmem allocations other GPUs can have a +// mapping created and removed dynamically with the uvm_rm_mem_(un)map_gpu() +// functions. +// +// Locking: +// - Internally acquires: +// - RM API lock +// - RM GPUs lock +NV_STATUS uvm_rm_mem_alloc(uvm_gpu_t *gpu, uvm_rm_mem_type_t type, NvLength size, uvm_rm_mem_t **rm_mem_out); + +// Free the memory. +// Clear all mappings and free the memory +// +// Locking same as uvm_rm_mem_alloc() +void uvm_rm_mem_free(uvm_rm_mem_t *rm_mem); + +// Map/Unmap on the CPU +// Locking same as uvm_rm_mem_alloc() +NV_STATUS uvm_rm_mem_map_cpu(uvm_rm_mem_t *rm_mem); +void uvm_rm_mem_unmap_cpu(uvm_rm_mem_t *rm_mem); + +// Shortcut for uvm_rm_mem_alloc() + uvm_rm_mem_map_cpu(). +// The function fails and nothing is allocated if any of the intermediate steps fail. +// +// Locking same as uvm_rm_mem_alloc() +NV_STATUS uvm_rm_mem_alloc_and_map_cpu(uvm_gpu_t *gpu, uvm_rm_mem_type_t type, NvLength size, uvm_rm_mem_t **rm_mem_out); + +// Shortcut for uvm_rm_mem_alloc_and_map_cpu() + uvm_rm_mem_map_all_gpus() +// The function fails and nothing is allocated if any of the intermediate steps fail. +// +// Locking same as uvm_rm_mem_alloc() +NV_STATUS uvm_rm_mem_alloc_and_map_all(uvm_gpu_t *gpu, uvm_rm_mem_type_t type, NvLength size, uvm_rm_mem_t **rm_mem_out); + +// Map/Unmap on UVM's internal address space of a GPU. In SR-IOV heavy the +// operation is also applied on the GPU's proxy address space. +// +// Supported only for sysmem (UVM_RM_MEM_TYPE_SYS). The GPU has to be different +// from the one the memory was originally allocated for. +// +// Locking same as uvm_rm_mem_alloc() +NV_STATUS uvm_rm_mem_map_gpu(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu); +void uvm_rm_mem_unmap_gpu(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu); + +// Map on UVM's internal address space of all GPUs retained by the UVM driver +// that do not yet have this allocation mapped. In SR-IOV heavy the memory is +// also mapped on the proxy address space of all GPUs. +// +// Locking same as uvm_rm_mem_alloc() +NV_STATUS uvm_rm_mem_map_all_gpus(uvm_rm_mem_t *rm_mem); + +// Get the CPU VA, GPU VA (UVM internal/kernel address space), or GPU (proxy +// address space) +void *uvm_rm_mem_get_cpu_va(uvm_rm_mem_t *rm_mem); +NvU64 uvm_rm_mem_get_gpu_uvm_va(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu); +NvU64 uvm_rm_mem_get_gpu_proxy_va(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu); + +// Get the GPU VA of the given memory in UVM's internal address space (if the +// flag is false), or proxy address space (if flag is true). +NvU64 uvm_rm_mem_get_gpu_va(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu, bool is_proxy_va_space); + +// Query if the memory is mapped on the CPU, GPU (UVM internal/kernel address +// space), or GPU (proxy address space) +bool uvm_rm_mem_mapped_on_cpu(uvm_rm_mem_t *rm_mem); +bool uvm_rm_mem_mapped_on_gpu(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu); +bool uvm_rm_mem_mapped_on_gpu_proxy(uvm_rm_mem_t *rm_mem, uvm_gpu_t *gpu); + +#endif // __UVM_RM_MEM_H__ diff --git a/kernel-open/nvidia-uvm/uvm_rm_mem_test.c b/kernel-open/nvidia-uvm/uvm_rm_mem_test.c new file mode 100644 index 000000000..252a75474 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_rm_mem_test.c @@ -0,0 +1,178 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_rm_mem.h" +#include "uvm_test.h" +#include "uvm_test_ioctl.h" +#include "uvm_va_space.h" +#include "uvm_kvmalloc.h" + +static NV_STATUS map_cpu(uvm_rm_mem_t *rm_mem) +{ + void *cpu_va; + + // Map + TEST_NV_CHECK_RET(uvm_rm_mem_map_cpu(rm_mem)); + TEST_CHECK_RET(uvm_rm_mem_mapped_on_cpu(rm_mem)); + + // Mapping if already mapped is OK + TEST_NV_CHECK_RET(uvm_rm_mem_map_cpu(rm_mem)); + + // Unmap + uvm_rm_mem_unmap_cpu(rm_mem); + // Unmapping already unmapped also OK + uvm_rm_mem_unmap_cpu(rm_mem); + + // Map again + TEST_NV_CHECK_RET(uvm_rm_mem_map_cpu(rm_mem)); + + cpu_va = uvm_rm_mem_get_cpu_va(rm_mem); + TEST_CHECK_RET(cpu_va != NULL); + + // Check that the CPU VA is writable. + // memset and memcpy might cause alignment faults on aarch64. + // See Bug 2668765 for more details. Since this is a test ioctl and + // therefore not a critical performance path, it's safe to use memset_io on + // all platforms. + memset_io(cpu_va, 0, rm_mem->size); + + return NV_OK; +} + +static NV_STATUS map_gpu_owner(uvm_rm_mem_t *rm_mem) +{ + uvm_gpu_t *gpu = rm_mem->gpu_owner; + + // The memory should have been automatically mapped in the GPU owner + TEST_CHECK_RET(uvm_rm_mem_mapped_on_gpu(rm_mem, gpu)); + + // In SR-IOV heavy, there are two VA spaces per GPU, so there are two + // mappings for a single rm_mem object on a GPU, even if the memory is + // located in vidmem. + TEST_CHECK_RET(uvm_rm_mem_mapped_on_gpu_proxy(rm_mem, gpu) == uvm_gpu_uses_proxy_channel_pool(gpu)); + + // Explicitly mapping or unmapping to the GPU that owns the allocation is + // not allowed, so the testing related to GPU owners is simpler than that of + // other GPUs. + return NV_OK; +} + +static NV_STATUS map_other_gpus(uvm_rm_mem_t *rm_mem, uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu_owner = rm_mem->gpu_owner; + uvm_gpu_t *gpu; + + for_each_va_space_gpu(gpu, va_space) { + if (gpu == gpu_owner) + continue; + + TEST_NV_CHECK_RET(uvm_rm_mem_map_gpu(rm_mem, gpu)); + TEST_CHECK_RET(uvm_rm_mem_mapped_on_gpu(rm_mem, gpu)); + + // Mappings are not ref counted, so additional map calls are no-ops + TEST_NV_CHECK_RET(uvm_rm_mem_map_gpu(rm_mem, gpu)); + + // The previous GPU map calls added mappings to the proxy VA space + // when in SR-IOV heavy mode + TEST_CHECK_RET(uvm_rm_mem_mapped_on_gpu_proxy(rm_mem, gpu) == uvm_gpu_uses_proxy_channel_pool(gpu)); + + // Unmapping removes all mappings + uvm_rm_mem_unmap_gpu(rm_mem, gpu); + TEST_CHECK_RET(!uvm_rm_mem_mapped_on_gpu(rm_mem, gpu)); + TEST_CHECK_RET(!uvm_rm_mem_mapped_on_gpu_proxy(rm_mem, gpu)); + + // Additional unmappings are no-ops + uvm_rm_mem_unmap_gpu(rm_mem, gpu); + TEST_CHECK_RET(!uvm_rm_mem_mapped_on_gpu(rm_mem, gpu)); + TEST_CHECK_RET(!uvm_rm_mem_mapped_on_gpu_proxy(rm_mem, gpu)); + + // Subsequent mappings should behave as they did in the beginning. + TEST_NV_CHECK_RET(uvm_rm_mem_map_gpu(rm_mem, gpu)); + TEST_CHECK_RET(uvm_rm_mem_mapped_on_gpu(rm_mem, gpu)); + + TEST_CHECK_RET(uvm_rm_mem_mapped_on_gpu_proxy(rm_mem, gpu) == uvm_gpu_uses_proxy_channel_pool(gpu)); + } + + return NV_OK; +} + +static NV_STATUS test_all_gpus_in_va(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + uvm_rm_mem_t *rm_mem = NULL; + NV_STATUS status = NV_OK; + + // Create allocations of these types + static const uvm_rm_mem_type_t mem_types[] = { UVM_RM_MEM_TYPE_SYS, UVM_RM_MEM_TYPE_GPU }; + // Create allocations of these sizes + static const size_t sizes[] = {1, 4, 16, 128, 1024, 4096, 1024 * 1024, 4 * 1024 * 1024}; + + uvm_assert_rwsem_locked(&va_space->lock); + + TEST_CHECK_RET(!uvm_processor_mask_empty(&va_space->registered_gpus)); + + for_each_va_space_gpu(gpu, va_space) { + int i, j; + + for (i = 0; i < ARRAY_SIZE(sizes); ++i) { + for (j = 0; j < ARRAY_SIZE(mem_types); ++j) { + + // Create an allocation in the GPU's address space + TEST_NV_CHECK_RET(uvm_rm_mem_alloc(gpu, mem_types[j], sizes[i], &rm_mem)); + + // Test CPU mappings + TEST_NV_CHECK_GOTO(map_cpu(rm_mem), error); + + // Test mappings in the GPU owning the allocation + TEST_NV_CHECK_GOTO(map_gpu_owner(rm_mem), error); + + // For sysmem allocations, test mappings on all other GPUs + if (rm_mem->type == UVM_RM_MEM_TYPE_SYS) + TEST_NV_CHECK_GOTO(map_other_gpus(rm_mem, va_space), error); + + uvm_rm_mem_free(rm_mem); + } + } + } + + return NV_OK; + +error: + uvm_rm_mem_free(rm_mem); + + return status; +} + +NV_STATUS uvm_test_rm_mem_sanity(UVM_TEST_RM_MEM_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_va_space_down_read_rm(va_space); + + status = test_all_gpus_in_va(va_space); + + uvm_va_space_up_read_rm(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_test.c b/kernel-open/nvidia-uvm/uvm_test.c new file mode 100644 index 000000000..a738a1e34 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_test.c @@ -0,0 +1,337 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_api.h" +#include "uvm_test.h" +#include "uvm_test_ioctl.h" +#include "uvm_global.h" +#include "uvm_va_space.h" +#include "uvm_va_space_mm.h" +#include "uvm_va_range.h" +#include "uvm_va_block.h" +#include "uvm_test_rng.h" +#include "uvm_kvmalloc.h" +#include "uvm_perf_events.h" +#include "uvm_tools.h" +#include "uvm_mmu.h" +#include "uvm_gpu_access_counters.h" +#include "uvm_pmm_sysmem.h" + +static NV_STATUS uvm_test_get_gpu_ref_count(UVM_TEST_GET_GPU_REF_COUNT_PARAMS *params, struct file *filp) +{ + NvU64 retained_count = 0; + uvm_parent_gpu_t *parent_gpu; + uvm_gpu_t *gpu = NULL; + + uvm_mutex_lock(&g_uvm_global.global_lock); + + parent_gpu = uvm_parent_gpu_get_by_uuid(¶ms->gpu_uuid); + if (parent_gpu) + gpu = uvm_gpu_get_by_parent_and_swizz_id(parent_gpu, params->swizz_id); + + if (gpu != NULL) + retained_count = uvm_gpu_retained_count(gpu); + + uvm_mutex_unlock(&g_uvm_global.global_lock); + + params->ref_count = retained_count; + return NV_OK; +} + +static NV_STATUS uvm_test_peer_ref_count(UVM_TEST_PEER_REF_COUNT_PARAMS *params, struct file *filp) +{ + NvU64 registered_ref_count = 0; + uvm_gpu_t *gpu0 = NULL; + uvm_gpu_t *gpu1 = NULL; + NV_STATUS status = NV_OK; + + uvm_mutex_lock(&g_uvm_global.global_lock); + + gpu0 = uvm_gpu_get_by_uuid(¶ms->gpu_uuid_1); + gpu1 = uvm_gpu_get_by_uuid(¶ms->gpu_uuid_2); + + if (gpu0 != NULL && gpu1 != NULL) { + uvm_gpu_peer_t *peer_caps = uvm_gpu_peer_caps(gpu0, gpu1); + registered_ref_count = peer_caps->ref_count; + } + else { + status = NV_ERR_INVALID_DEVICE; + } + + uvm_mutex_unlock(&g_uvm_global.global_lock); + + params->ref_count = registered_ref_count; + + return status; +} + +static NV_STATUS uvm_test_make_channel_stops_immediate(UVM_TEST_MAKE_CHANNEL_STOPS_IMMEDIATE_PARAMS *params, + struct file *filp) +{ + uvm_va_space_get(filp)->user_channel_stops_are_immediate = NV_TRUE; + + return NV_OK; +} + +static NV_STATUS uvm_test_nv_kthread_q(UVM_TEST_NV_KTHREAD_Q_PARAMS *params, struct file *filp) +{ + // The nv-kthread-q system returns 0 or -1, because it is not actually + // part of UVM. UVM needs to run this test, because otherwise, the + // nv-kthread-q code would not get adequate test coverage. That's because + // UVM is the first user of nv-kthread-q. + int result = nv_kthread_q_run_self_test(); + if (result == 0) + return NV_OK; + + return NV_ERR_INVALID_STATE; +} + +static NV_STATUS uvm_test_numa_get_closest_cpu_node_to_gpu(UVM_TEST_NUMA_GET_CLOSEST_CPU_NODE_TO_GPU_PARAMS *params, + struct file *filp) +{ + uvm_gpu_t *gpu; + NV_STATUS status; + uvm_rm_user_object_t user_rm_va_space = { + .rm_control_fd = -1, + .user_client = params->client, + .user_object = params->smc_part_ref + }; + + status = uvm_gpu_retain_by_uuid(¶ms->gpu_uuid, &user_rm_va_space, &gpu); + if (status != NV_OK) + return status; + + params->node_id = gpu->parent->closest_cpu_numa_node; + uvm_gpu_release(gpu); + return NV_OK; +} + +// Callers of this function should ensure that node is not NUMA_NO_NODE in order +// to avoid overrunning the kernel's node to cpumask map. +static NV_STATUS uvm_test_verify_bh_affinity(uvm_intr_handler_t *isr, int node) +{ + UVM_ASSERT(node != NUMA_NO_NODE); + + // If the bottom half has been executed but its cpus_used_mask is empty + // something obviously went wrong. Otherwise, check that the CPUs on which + // the bottom half was executed is a subset of the NUMA node's cpumask. + if ((isr->stats.bottom_half_count && cpumask_empty(&isr->stats.cpus_used_mask)) || + !cpumask_subset(&isr->stats.cpus_used_mask, uvm_cpumask_of_node(node))) { + UVM_TEST_PRINT("ISR BH cpu mask check failed! BH ran on CPU cores outside NUMA %u\n", + node); + return NV_ERR_INVALID_STATE; + } + return NV_OK; +} + +static NV_STATUS uvm_test_numa_check_affinity(UVM_TEST_NUMA_CHECK_AFFINITY_PARAMS *params, struct file *filp) +{ + uvm_gpu_t *gpu; + NV_STATUS status; + uvm_rm_user_object_t user_rm_va_space = { + .rm_control_fd = -1, + .user_client = params->client, + .user_object = params->smc_part_ref + }; + + if (!UVM_THREAD_AFFINITY_SUPPORTED()) + return NV_ERR_NOT_SUPPORTED; + + status = uvm_gpu_retain_by_uuid(¶ms->gpu_uuid, &user_rm_va_space, &gpu); + if (status != NV_OK) + return status; + + // If the GPU is not attached to a NUMA node, there is nothing to do. + if (gpu->parent->closest_cpu_numa_node == NUMA_NO_NODE) { + status = NV_ERR_NOT_SUPPORTED; + goto release; + } + + if (gpu->parent->replayable_faults_supported) { + uvm_gpu_replayable_faults_isr_lock(gpu->parent); + status = uvm_test_verify_bh_affinity(&gpu->parent->isr.replayable_faults, + gpu->parent->closest_cpu_numa_node); + uvm_gpu_replayable_faults_isr_unlock(gpu->parent); + if (status != NV_OK) + goto release; + + if (gpu->parent->non_replayable_faults_supported) { + uvm_gpu_non_replayable_faults_isr_lock(gpu->parent); + status = uvm_test_verify_bh_affinity(&gpu->parent->isr.non_replayable_faults, + gpu->parent->closest_cpu_numa_node); + uvm_gpu_non_replayable_faults_isr_unlock(gpu->parent); + if (status != NV_OK) + goto release; + } + + if (gpu->parent->access_counters_supported) { + uvm_gpu_access_counters_isr_lock(gpu->parent); + status = uvm_test_verify_bh_affinity(&gpu->parent->isr.access_counters, + gpu->parent->closest_cpu_numa_node); + uvm_gpu_access_counters_isr_unlock(gpu->parent); + } + } +release: + uvm_gpu_release(gpu); + return status; +} + +static NV_STATUS uvm_test_get_kernel_virtual_address(UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS_PARAMS *params, + struct file *filp) +{ + params->addr = (NvU64)uvm_va_space_get(filp); + + return NV_OK; +} + +static NV_STATUS uvm_test_get_user_space_end_address(UVM_TEST_GET_USER_SPACE_END_ADDRESS_PARAMS *params, + struct file *flip) +{ + params->user_space_end_address = TASK_SIZE; + + return NV_OK; +} + +static NV_STATUS uvm_test_cgroup_accounting_supported(UVM_TEST_CGROUP_ACCOUNTING_SUPPORTED_PARAMS *params, + struct file *flip) +{ + return UVM_CGROUP_ACCOUNTING_SUPPORTED() ? NV_OK : NV_ERR_NOT_SUPPORTED; +} + +long uvm_test_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + // Disable all test entry points if the module parameter wasn't provided. + // These should not be enabled in a production environment. + if (!uvm_enable_builtin_tests) { + UVM_INFO_PRINT("ioctl %d not found. Did you mean to insmod with uvm_enable_builtin_tests=1?\n", cmd); + return -EINVAL; + } + + switch (cmd) + { + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_GET_GPU_REF_COUNT, uvm_test_get_gpu_ref_count); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RNG_SANITY, uvm_test_rng_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RANGE_TREE_DIRECTED, uvm_test_range_tree_directed); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RANGE_TREE_RANDOM, uvm_test_range_tree_random); + UVM_ROUTE_CMD_ALLOC_INIT_CHECK(UVM_TEST_VA_RANGE_INFO, uvm_test_va_range_info); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RM_MEM_SANITY, uvm_test_rm_mem_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_GPU_SEMAPHORE_SANITY, uvm_test_gpu_semaphore_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PEER_REF_COUNT, uvm_test_peer_ref_count); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_RANGE_SPLIT, uvm_test_va_range_split); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_RANGE_INJECT_SPLIT_ERROR, uvm_test_va_range_inject_split_error); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PAGE_TREE, uvm_test_page_tree); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_CHANGE_PTE_MAPPING, uvm_test_change_pte_mapping); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_TRACKER_SANITY, uvm_test_tracker_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PUSH_SANITY, uvm_test_push_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_CHANNEL_SANITY, uvm_test_channel_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_CHANNEL_STRESS, uvm_test_channel_stress); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_CE_SANITY, uvm_test_ce_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_HOST_SANITY, uvm_test_host_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_BLOCK_INFO, uvm_test_va_block_info); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_LOCK_SANITY, uvm_test_lock_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PERF_UTILS_SANITY, uvm_test_perf_utils_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_KVMALLOC, uvm_test_kvmalloc); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_QUERY, uvm_test_pmm_query); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_CHECK_LEAK, uvm_test_pmm_check_leak); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PERF_EVENTS_SANITY, uvm_test_perf_events_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PERF_MODULE_SANITY, uvm_test_perf_module_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RANGE_ALLOCATOR_SANITY, uvm_test_range_allocator_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_GET_RM_PTES, uvm_test_get_rm_ptes); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_FAULT_BUFFER_FLUSH, uvm_test_fault_buffer_flush); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_INJECT_TOOLS_EVENT, uvm_test_inject_tools_event); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_INCREMENT_TOOLS_COUNTER, uvm_test_increment_tools_counter); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_MEM_SANITY, uvm_test_mem_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_MAKE_CHANNEL_STOPS_IMMEDIATE, uvm_test_make_channel_stops_immediate); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_BLOCK_INJECT_ERROR, uvm_test_va_block_inject_error); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PEER_IDENTITY_MAPPINGS, uvm_test_peer_identity_mappings); + UVM_ROUTE_CMD_ALLOC_INIT_CHECK(UVM_TEST_VA_RESIDENCY_INFO, uvm_test_va_residency_info); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_ASYNC_ALLOC, uvm_test_pmm_async_alloc); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_SET_PREFETCH_FILTERING, uvm_test_set_prefetch_filtering); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_SANITY, uvm_test_pmm_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_INVALIDATE_TLB, uvm_test_invalidate_tlb); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_BLOCK, uvm_test_va_block); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_EVICT_CHUNK, uvm_test_evict_chunk); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_FLUSH_DEFERRED_WORK, uvm_test_flush_deferred_work); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_NV_KTHREAD_Q, uvm_test_nv_kthread_q); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_SET_PAGE_PREFETCH_POLICY, uvm_test_set_page_prefetch_policy); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RANGE_GROUP_TREE, uvm_test_range_group_tree); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RANGE_GROUP_RANGE_INFO, uvm_test_range_group_range_info); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RANGE_GROUP_RANGE_COUNT, uvm_test_range_group_range_count); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE, + uvm_test_get_prefetch_faults_reenable_lapse); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE, + uvm_test_set_prefetch_faults_reenable_lapse); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS, uvm_test_get_kernel_virtual_address); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMA_ALLOC_FREE, uvm_test_pma_alloc_free); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_ALLOC_FREE_ROOT, uvm_test_pmm_alloc_free_root); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR, uvm_test_pmm_inject_pma_evict_error); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_ACCESS_COUNTERS_ENABLED_BY_DEFAULT, + uvm_test_access_counters_enabled_by_default); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RECONFIGURE_ACCESS_COUNTERS, uvm_test_reconfigure_access_counters); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RESET_ACCESS_COUNTERS, uvm_test_reset_access_counters); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_SET_IGNORE_ACCESS_COUNTERS, uvm_test_set_ignore_access_counters); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_CHECK_CHANNEL_VA_SPACE, uvm_test_check_channel_va_space); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_ENABLE_NVLINK_PEER_ACCESS, uvm_test_enable_nvlink_peer_access); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_DISABLE_NVLINK_PEER_ACCESS, uvm_test_disable_nvlink_peer_access); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_GET_PAGE_THRASHING_POLICY, uvm_test_get_page_thrashing_policy); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_SET_PAGE_THRASHING_POLICY, uvm_test_set_page_thrashing_policy); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_SYSMEM, uvm_test_pmm_sysmem); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_REVERSE_MAP, uvm_test_pmm_reverse_map); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_INDIRECT_PEERS, uvm_test_pmm_indirect_peers); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_SPACE_MM_RETAIN, uvm_test_va_space_mm_retain); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_SPACE_MM_DELAY_SHUTDOWN, uvm_test_va_space_mm_delay_shutdown); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_CHUNK_WITH_ELEVATED_PAGE, uvm_test_pmm_chunk_with_elevated_page); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_SPACE_INJECT_ERROR, uvm_test_va_space_inject_error); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_GET_GPU_TIME, uvm_test_get_gpu_time); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_RELEASE_FREE_ROOT_CHUNKS, uvm_test_pmm_release_free_root_chunks); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_DRAIN_REPLAYABLE_FAULTS, uvm_test_drain_replayable_faults); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMA_GET_BATCH_SIZE, uvm_test_pma_get_batch_size); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_PMM_QUERY_PMA_STATS, uvm_test_pmm_query_pma_stats); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_NUMA_GET_CLOSEST_CPU_NODE_TO_GPU, + uvm_test_numa_get_closest_cpu_node_to_gpu); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_NUMA_CHECK_AFFINITY, uvm_test_numa_check_affinity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_SPACE_ADD_DUMMY_THREAD_CONTEXTS, + uvm_test_va_space_add_dummy_thread_contexts); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_SPACE_REMOVE_DUMMY_THREAD_CONTEXTS, + uvm_test_va_space_remove_dummy_thread_contexts); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_THREAD_CONTEXT_SANITY, uvm_test_thread_context_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_THREAD_CONTEXT_PERF, uvm_test_thread_context_perf); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_GET_PAGEABLE_MEM_ACCESS_TYPE, uvm_test_get_pageable_mem_access_type); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_TOOLS_FLUSH_REPLAY_EVENTS, uvm_test_tools_flush_replay_events); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_REGISTER_UNLOAD_STATE_BUFFER, uvm_test_register_unload_state_buffer); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RB_TREE_DIRECTED, uvm_test_rb_tree_directed); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_RB_TREE_RANDOM, uvm_test_rb_tree_random); + UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_TEST_GET_USER_SPACE_END_ADDRESS, uvm_test_get_user_space_end_address); + UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_TEST_GET_CPU_CHUNK_ALLOC_SIZES, uvm_test_get_cpu_chunk_allocation_sizes); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_HMM_SANITY, uvm_test_hmm_sanity); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_VA_RANGE_INJECT_ADD_GPU_VA_SPACE_ERROR, + uvm_test_va_range_inject_add_gpu_va_space_error); + UVM_ROUTE_CMD_STACK_INIT_CHECK(UVM_TEST_DESTROY_GPU_VA_SPACE_DELAY, uvm_test_destroy_gpu_va_space_delay); + + + + UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_TEST_CGROUP_ACCOUNTING_SUPPORTED, uvm_test_cgroup_accounting_supported); + } + + return -EINVAL; +} diff --git a/kernel-open/nvidia-uvm/uvm_test.h b/kernel-open/nvidia-uvm/uvm_test.h new file mode 100644 index 000000000..8b7b56a1b --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_test.h @@ -0,0 +1,194 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_TEST_H__ +#define __UVM_TEST_H__ + +#include "uvm_linux.h" +#include "uvm_common.h" +#include "uvm_test_ioctl.h" + +// Unlike UVM_INFO_PRINT, this prints on release builds +#define UVM_TEST_PRINT(fmt, ...) UVM_PRINT_FUNC(pr_info, " " fmt, ##__VA_ARGS__) + +// WARNING: This macro will return out of the current scope +#define TEST_CHECK_RET(cond) \ + do { \ + if (unlikely(!(cond))) { \ + UVM_TEST_PRINT("Test check failed, condition '%s' not true\n", #cond); \ + on_uvm_test_fail(); \ + return NV_ERR_INVALID_STATE; \ + } \ + } while (0) + +// WARNING: This macro will return out of the current scope +#define TEST_NV_CHECK_RET(call) \ + do { \ + NV_STATUS _status = (call); \ + if (unlikely(_status != NV_OK)) { \ + UVM_TEST_PRINT("Test check failed, call '%s' returned '%s', expected '%s'\n", \ + #call, \ + nvstatusToString(_status), \ + nvstatusToString(NV_OK)); \ + on_uvm_test_fail(); \ + return _status; \ + } \ + } while (0) + +// Checking macro which doesn't mask NV_ERR_NO_MEMORY +#define MEM_NV_CHECK_RET(call, expected) \ + do { \ + NV_STATUS _status = (call); \ + if (unlikely(_status != (expected))) { \ + UVM_TEST_PRINT("Test check failed, call '%s' returned '%s', expected '%s'\n", \ + #call, \ + nvstatusToString(_status), \ + nvstatusToString(expected)); \ + on_uvm_test_fail(); \ + if (_status == NV_ERR_NO_MEMORY) \ + return _status; \ + return NV_ERR_INVALID_STATE; \ + } \ + } while (0) + +// Checking macro which sets a local variable 'status' (assumed to be in scope) +// on failure and jumps to the label. +#define TEST_NV_CHECK_GOTO(call, label) \ + do { \ + NV_STATUS _status = (call); \ + if (unlikely(_status != NV_OK)) { \ + UVM_TEST_PRINT("Test check failed, call '%s' returned '%s', expected '%s'\n", \ + #call, \ + nvstatusToString(_status), \ + nvstatusToString(NV_OK)); \ + on_uvm_test_fail(); \ + status = _status; \ + goto label; \ + } \ + } while (0) + +// WARNING: This macro sets status and jumps to (goto) a label on failure +#define TEST_CHECK_GOTO(cond, label) \ + do { \ + if (unlikely(!(cond))) { \ + UVM_TEST_PRINT("Test check failed, condition '%s' not true\n", #cond); \ + status = NV_ERR_INVALID_STATE; \ + on_uvm_test_fail(); \ + goto label; \ + } \ + } while (0) + + +long uvm_test_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +NV_STATUS uvm_test_range_tree_directed(UVM_TEST_RANGE_TREE_DIRECTED_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_range_tree_random(UVM_TEST_RANGE_TREE_RANDOM_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_range_allocator_sanity(UVM_TEST_RANGE_ALLOCATOR_SANITY_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_page_tree(UVM_TEST_PAGE_TREE_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_rm_mem_sanity(UVM_TEST_RM_MEM_SANITY_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_mem_sanity(UVM_TEST_MEM_SANITY_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_gpu_semaphore_sanity(UVM_TEST_GPU_SEMAPHORE_SANITY_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_tracker_sanity(UVM_TEST_TRACKER_SANITY_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_push_sanity(UVM_TEST_PUSH_SANITY_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_channel_sanity(UVM_TEST_CHANNEL_SANITY_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_channel_stress(UVM_TEST_CHANNEL_STRESS_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_ce_sanity(UVM_TEST_CE_SANITY_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_host_sanity(UVM_TEST_HOST_SANITY_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_lock_sanity(UVM_TEST_LOCK_SANITY_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_perf_utils_sanity(UVM_TEST_PERF_UTILS_SANITY_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_pmm_query(UVM_TEST_PMM_QUERY_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_pmm_sanity(UVM_TEST_PMM_SANITY_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_pmm_check_leak(UVM_TEST_PMM_CHECK_LEAK_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_pmm_async_alloc(UVM_TEST_PMM_ASYNC_ALLOC_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_pma_alloc_free(UVM_TEST_PMA_ALLOC_FREE_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_pma_get_batch_size(UVM_TEST_PMA_GET_BATCH_SIZE_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_pmm_alloc_free_root(UVM_TEST_PMM_ALLOC_FREE_ROOT_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_pmm_inject_pma_evict_error(UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_pmm_indirect_peers(UVM_TEST_PMM_INDIRECT_PEERS_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_pmm_query_pma_stats(UVM_TEST_PMM_QUERY_PMA_STATS_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_perf_events_sanity(UVM_TEST_PERF_EVENTS_SANITY_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_perf_module_sanity(UVM_TEST_PERF_MODULE_SANITY_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_get_rm_ptes(UVM_TEST_GET_RM_PTES_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_fault_buffer_flush(UVM_TEST_FAULT_BUFFER_FLUSH_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_peer_identity_mappings(UVM_TEST_PEER_IDENTITY_MAPPINGS_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_set_prefetch_filtering(UVM_TEST_SET_PREFETCH_FILTERING_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_va_block(UVM_TEST_VA_BLOCK_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_evict_chunk(UVM_TEST_EVICT_CHUNK_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_flush_deferred_work(UVM_TEST_FLUSH_DEFERRED_WORK_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_set_page_prefetch_policy(UVM_TEST_SET_PAGE_PREFETCH_POLICY_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_get_page_thrashing_policy(UVM_TEST_GET_PAGE_THRASHING_POLICY_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_set_page_thrashing_policy(UVM_TEST_SET_PAGE_THRASHING_POLICY_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_range_group_tree(UVM_TEST_RANGE_GROUP_TREE_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_range_group_range_info(UVM_TEST_RANGE_GROUP_RANGE_INFO_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_range_group_range_count(UVM_TEST_RANGE_GROUP_RANGE_COUNT_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_get_prefetch_faults_reenable_lapse(UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_set_prefetch_faults_reenable_lapse(UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_check_channel_va_space(UVM_TEST_CHECK_CHANNEL_VA_SPACE_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_pmm_sysmem(UVM_TEST_PMM_SYSMEM_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_pmm_reverse_map(UVM_TEST_PMM_REVERSE_MAP_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_pmm_chunk_with_elevated_page(UVM_TEST_PMM_CHUNK_WITH_ELEVATED_PAGE_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_va_space_inject_error(UVM_TEST_VA_SPACE_INJECT_ERROR_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_get_gpu_time(UVM_TEST_GET_GPU_TIME_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_pmm_release_free_root_chunks(UVM_TEST_PMM_RELEASE_FREE_ROOT_CHUNKS_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_drain_replayable_faults(UVM_TEST_DRAIN_REPLAYABLE_FAULTS_PARAMS *params, struct file *filp); + +NV_STATUS uvm_test_va_space_add_dummy_thread_contexts(UVM_TEST_VA_SPACE_ADD_DUMMY_THREAD_CONTEXTS_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_va_space_remove_dummy_thread_contexts(UVM_TEST_VA_SPACE_REMOVE_DUMMY_THREAD_CONTEXTS_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_thread_context_sanity(UVM_TEST_THREAD_CONTEXT_SANITY_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_thread_context_perf(UVM_TEST_THREAD_CONTEXT_PERF_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_tools_flush_replay_events(UVM_TEST_TOOLS_FLUSH_REPLAY_EVENTS_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_register_unload_state_buffer(UVM_TEST_REGISTER_UNLOAD_STATE_BUFFER_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_rb_tree_directed(UVM_TEST_RB_TREE_DIRECTED_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_rb_tree_random(UVM_TEST_RB_TREE_RANDOM_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_hmm_sanity(UVM_TEST_HMM_SANITY_PARAMS *params, struct file *filp); + + + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_test_ioctl.h b/kernel-open/nvidia-uvm/uvm_test_ioctl.h new file mode 100644 index 000000000..be3925a25 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_test_ioctl.h @@ -0,0 +1,1411 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVidia Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef __UVM_TEST_IOCTL_H__ +#define __UVM_TEST_IOCTL_H__ + +#ifndef __KERNEL__ + +#endif +#include "uvm_types.h" +#include "uvm_ioctl.h" +#include "nv_uvm_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Offset the test ioctl to leave space for the api ones +#define UVM_TEST_IOCTL_BASE(i) UVM_IOCTL_BASE(200 + i) + +#define UVM_TEST_GET_GPU_REF_COUNT UVM_TEST_IOCTL_BASE(0) +typedef struct +{ + // In params + NvProcessorUuid gpu_uuid; + NvU32 swizz_id; + // Out params + NvU64 ref_count NV_ALIGN_BYTES(8); + NV_STATUS rmStatus; +} UVM_TEST_GET_GPU_REF_COUNT_PARAMS; + +#define UVM_TEST_RNG_SANITY UVM_TEST_IOCTL_BASE(1) +typedef struct +{ + NV_STATUS rmStatus; +} UVM_TEST_RNG_SANITY_PARAMS; + +#define UVM_TEST_RANGE_TREE_DIRECTED UVM_TEST_IOCTL_BASE(2) +typedef struct +{ + NV_STATUS rmStatus; +} UVM_TEST_RANGE_TREE_DIRECTED_PARAMS; + +#define UVM_TEST_RANGE_TREE_RANDOM UVM_TEST_IOCTL_BASE(3) +typedef struct +{ + NvU32 seed; // In + NvU64 main_iterations NV_ALIGN_BYTES(8); // In + NvU32 verbose; // In + + // Probability (0-100) + // + // When the test starts up, it adds and splits ranges with high_probability. + // Eventually when adds and splits fail too often, they'll invert their + // probability to 100 - high_probability. They'll switch back when the tree + // becomes too empty. + // + // This can be < 50, but the test will not be very interesting. + NvU32 high_probability; // In + + // Probability (0-100) + // + // Every main iteration a group of operations is selected with this + // probability. The group consists of either "add/remove" or "split/merge." + // This is the chance that the "add/remove" group is selected each + // iteration. + NvU32 add_remove_shrink_group_probability; + + // Probability (0-100) + // + // Probability of picking the shrink operation instead of add/remove if the + // add/remove/shrink group of operations is selected. + NvU32 shrink_probability; + + // The number of collision verification checks to make each main iteration + NvU32 collision_checks; // In + + // The number of tree iterator verification checks to make each main + // iteration. + NvU32 iterator_checks; // In + + // Highest range value to use + NvU64 max_end NV_ALIGN_BYTES(8); // In + + // Maximum number of range nodes to put in the tree + NvU64 max_ranges NV_ALIGN_BYTES(8); // In + + // Maximum number of range nodes to add or remove at one time + NvU64 max_batch_count NV_ALIGN_BYTES(8); // In + + // add, split, and merge operations all operate on randomly-selected ranges + // or nodes. It's possible, sometimes even likely, that the operation cannot + // be performed on the selected range or node. + // + // For example, when a range node is added its range is selected at random + // without regard to range nodes already in the tree. If a collision occurs + // when the test attempts to add that node to the tree, a new, smaller + // random range is selected and the attempt is made again. + // + // max_attempts is the maximum number of times to keep picking new ranges or + // nodes before giving up on the operation. + NvU32 max_attempts; // In + + struct + { + NvU64 total_adds NV_ALIGN_BYTES(8); + NvU64 failed_adds NV_ALIGN_BYTES(8); + NvU64 max_attempts_add NV_ALIGN_BYTES(8); + NvU64 total_removes NV_ALIGN_BYTES(8); + NvU64 total_splits NV_ALIGN_BYTES(8); + NvU64 failed_splits NV_ALIGN_BYTES(8); + NvU64 max_attempts_split NV_ALIGN_BYTES(8); + NvU64 total_merges NV_ALIGN_BYTES(8); + NvU64 failed_merges NV_ALIGN_BYTES(8); + NvU64 max_attempts_merge NV_ALIGN_BYTES(8); + NvU64 total_shrinks NV_ALIGN_BYTES(8); + NvU64 failed_shrinks NV_ALIGN_BYTES(8); + } stats; // Out + + NV_STATUS rmStatus; // Out +} UVM_TEST_RANGE_TREE_RANDOM_PARAMS; + +// Keep this in sync with uvm_va_range_type_t in uvm_va_range.h +typedef enum +{ + UVM_TEST_VA_RANGE_TYPE_INVALID = 0, + UVM_TEST_VA_RANGE_TYPE_MANAGED, + UVM_TEST_VA_RANGE_TYPE_EXTERNAL, + UVM_TEST_VA_RANGE_TYPE_CHANNEL, + UVM_TEST_VA_RANGE_TYPE_SKED_REFLECTED, + UVM_TEST_VA_RANGE_TYPE_SEMAPHORE_POOL, + UVM_TEST_VA_RANGE_TYPE_MAX +} UVM_TEST_VA_RANGE_TYPE; + +// Keep this in sync with uvm_read_duplication_t in uvm_va_range.h +typedef enum +{ + UVM_TEST_READ_DUPLICATION_UNSET = 0, + UVM_TEST_READ_DUPLICATION_ENABLED, + UVM_TEST_READ_DUPLICATION_DISABLED, + UVM_TEST_READ_DUPLICATION_MAX +} UVM_TEST_READ_DUPLICATION_POLICY; + +typedef struct +{ + // Note: if this is a zombie or not owned by the calling process, the vma info + // will not be filled out and is invalid. + NvU64 vma_start NV_ALIGN_BYTES(8); // Out + NvU64 vma_end NV_ALIGN_BYTES(8); // Out, inclusive + NvBool is_zombie; // Out + // Note: if this is a zombie, this field is meaningless. + NvBool owned_by_calling_process; // Out +} UVM_TEST_VA_RANGE_INFO_MANAGED; + +#define UVM_TEST_VA_RANGE_INFO UVM_TEST_IOCTL_BASE(4) +typedef struct +{ + NvU64 lookup_address NV_ALIGN_BYTES(8); // In + + NvU64 va_range_start NV_ALIGN_BYTES(8); // Out + NvU64 va_range_end NV_ALIGN_BYTES(8); // Out, inclusive + NvU32 read_duplication; // Out (UVM_TEST_READ_DUPLICATION_POLICY) + NvProcessorUuid preferred_location; // Out + NvProcessorUuid accessed_by[UVM_MAX_PROCESSORS]; // Out + NvU32 accessed_by_count; // Out + NvU32 type; // Out (UVM_TEST_VA_RANGE_TYPE) + union + { + UVM_TEST_VA_RANGE_INFO_MANAGED managed NV_ALIGN_BYTES(8); // Out + // More here eventually + }; + + // NV_ERR_INVALID_ADDRESS lookup_address doesn't match a UVM range + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_RANGE_INFO_PARAMS; + +#define UVM_TEST_RM_MEM_SANITY UVM_TEST_IOCTL_BASE(5) +typedef struct +{ + // Out params + NV_STATUS rmStatus; +} UVM_TEST_RM_MEM_SANITY_PARAMS; + +#define UVM_TEST_GPU_SEMAPHORE_SANITY UVM_TEST_IOCTL_BASE(6) +typedef struct +{ + // Out params + NV_STATUS rmStatus; +} UVM_TEST_GPU_SEMAPHORE_SANITY_PARAMS; + +#define UVM_TEST_PEER_REF_COUNT UVM_TEST_IOCTL_BASE(7) +typedef struct +{ + // In params + NvProcessorUuid gpu_uuid_1; + NvProcessorUuid gpu_uuid_2; + + // Out params + NV_STATUS rmStatus; + NvU64 ref_count NV_ALIGN_BYTES(8); +} UVM_TEST_PEER_REF_COUNT_PARAMS; + +// Force an existing UVM range to split. split_address will be the new end of +// the existing range. A new range will be created covering +// [split_address+1, original end]. +// +// Error returns: +// NV_ERR_INVALID_ADDRESS +// - split_address+1 isn't page-aligned +// - split_address doesn't match a splittable UVM range +// - The range cannot be split at split_address because split_address is +// already the end of the range. +#define UVM_TEST_VA_RANGE_SPLIT UVM_TEST_IOCTL_BASE(8) +typedef struct +{ + NvU64 split_address NV_ALIGN_BYTES(8); // In + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_RANGE_SPLIT_PARAMS; + +// Forces the next range split on the range covering lookup_address to fail with +// an out-of-memory error. Only the next split will fail. Subsequent ones will +// succeed. The split can come from any source, such as vma splitting or +// UVM_TEST_VA_RANGE_SPLIT. +// +// Error returns: +// NV_ERR_INVALID_ADDRESS +// - lookup_address doesn't match a UVM range +#define UVM_TEST_VA_RANGE_INJECT_SPLIT_ERROR UVM_TEST_IOCTL_BASE(9) +typedef struct +{ + NvU64 lookup_address NV_ALIGN_BYTES(8); // In + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_RANGE_INJECT_SPLIT_ERROR_PARAMS; + +#define UVM_TEST_PAGE_TREE UVM_TEST_IOCTL_BASE(10) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_PAGE_TREE_PARAMS; + +// Given a VA and a target processor, forcibly set that processor's mapping to +// the VA to the given permissions. This may require changing other processors' +// mappings. For example, setting an atomic mapping for a given GPU might make +// other GPUs' mappings read-only. +// +// If the mapping changes from invalid to anything else, this call always +// attempts to create direct mappings from the given processor to the current +// physical memory backing the target address. If a direct mapping cannot be +// created, or no physical memory currently backs the VA, +// NV_ERR_INVALID_OPERATION is returned. +// +// uuid is allowed to be NV_PROCESSOR_UUID_CPU_DEFAULT. +// +// Error returns: +// NV_ERR_INVALID_DEVICE +// - uuid is an unknown value +// - uuid is a GPU that hasn't been registered with this process +// +// NV_ERR_INVALID_ADDRESS +// - VA is unknown to the kernel +// - VA isn't aligned to the system page size +// +// NV_ERR_INVALID_STATE +// - A mapping for va can't be accessed because it belongs to another process +// +// NV_ERR_INVALID_ARGUMENT +// - mapping is not a valid enum value +// +// NV_ERR_INVALID_ACCESS_TYPE +// - The mapping permissions aren't logically allowed. For example, +// UVM_TEST_PTE_MAPPING_READ_WRITE can't be set on a read-only mapping. +// +// NV_ERR_INVALID_OPERATION +// - mapping is not UVM_TEST_PTE_MAPPING_INVALID, and a direct mapping from the +// given processor to the physical memory currently backing VA cannot be +// created. +#define UVM_TEST_CHANGE_PTE_MAPPING UVM_TEST_IOCTL_BASE(11) + +typedef enum +{ + UVM_TEST_PTE_MAPPING_INVALID = 0, + UVM_TEST_PTE_MAPPING_READ_ONLY, + UVM_TEST_PTE_MAPPING_READ_WRITE, + UVM_TEST_PTE_MAPPING_READ_WRITE_ATOMIC, + UVM_TEST_PTE_MAPPING_MAX +} UVM_TEST_PTE_MAPPING; + +typedef struct +{ + NvProcessorUuid uuid NV_ALIGN_BYTES(8); // In + NvU64 va NV_ALIGN_BYTES(8); // In + NvU32 mapping; // In (UVM_TEST_PTE_MAPPING) + NV_STATUS rmStatus; // Out +} UVM_TEST_CHANGE_PTE_MAPPING_PARAMS; + +#define UVM_TEST_TRACKER_SANITY UVM_TEST_IOCTL_BASE(12) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_TRACKER_SANITY_PARAMS; + +#define UVM_TEST_PUSH_SANITY UVM_TEST_IOCTL_BASE(13) +typedef struct +{ + NvBool skipTimestampTest; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_PUSH_SANITY_PARAMS; + +#define UVM_TEST_CHANNEL_SANITY UVM_TEST_IOCTL_BASE(14) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_CHANNEL_SANITY_PARAMS; + +typedef enum +{ + UVM_TEST_CHANNEL_STRESS_MODE_NOOP_PUSH = 0, + UVM_TEST_CHANNEL_STRESS_MODE_UPDATE_CHANNELS, + UVM_TEST_CHANNEL_STRESS_MODE_STREAM, +} UVM_TEST_CHANNEL_STRESS_MODE; + +#define UVM_TEST_CHANNEL_STRESS UVM_TEST_IOCTL_BASE(15) +typedef struct +{ + NvU32 mode; // In + + // Number of iterations: + // mode == NOOP_PUSH: number of noop pushes + // mode == UPDATE_CHANNELS: number of updates + // mode == STREAM: number of iterations per stream + NvU32 iterations; + + NvU32 num_streams; // In, used only for mode == UVM_TEST_CHANNEL_STRESS_MODE_STREAM + NvU32 seed; // In + NvU32 verbose; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_CHANNEL_STRESS_PARAMS; + +#define UVM_TEST_CE_SANITY UVM_TEST_IOCTL_BASE(16) +typedef struct +{ + NvBool skipTimestampTest; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_CE_SANITY_PARAMS; + +#define UVM_TEST_VA_BLOCK_INFO UVM_TEST_IOCTL_BASE(17) + +// See UVM_VA_BLOCK_SIZE in uvm_va_block.h for an explanation of this number +#define UVM_TEST_VA_BLOCK_SIZE (2ull*1024*1024) + +typedef struct +{ + NvU64 lookup_address NV_ALIGN_BYTES(8); // In + + + NvU64 va_block_start NV_ALIGN_BYTES(8); // Out + NvU64 va_block_end NV_ALIGN_BYTES(8); // Out, inclusive + + // NV_ERR_INVALID_ADDRESS lookup_address doesn't match a UVM range + // + // NV_ERR_OBJECT_NOT_FOUND lookup_address matched a UVM range on this file + // but the corresponding block has not yet been + // populated + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_BLOCK_INFO_PARAMS; + +#define UVM_TEST_LOCK_SANITY UVM_TEST_IOCTL_BASE(18) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_LOCK_SANITY_PARAMS; + +#define UVM_TEST_PERF_UTILS_SANITY UVM_TEST_IOCTL_BASE(19) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_PERF_UTILS_SANITY_PARAMS; + +#define UVM_TEST_KVMALLOC UVM_TEST_IOCTL_BASE(20) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_KVMALLOC_PARAMS; + +#define UVM_TEST_PMM_QUERY UVM_TEST_IOCTL_BASE(21) +typedef enum +{ + // Get the value of valid user allocations as key + UVM_TEST_CHUNK_SIZE_GET_USER_SIZE +} uvm_test_pmm_query_key_t; + +typedef struct +{ + // In params + NvProcessorUuid gpu_uuid; + NvU64 key; + // Out params + NvU64 value; + NV_STATUS rmStatus; +} UVM_TEST_PMM_QUERY_PARAMS; + +#define UVM_TEST_PMM_CHECK_LEAK UVM_TEST_IOCTL_BASE(22) + +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvU64 chunk_size; // In + NvS64 alloc_limit; // In. Number of chunks to allocate. -1 means unlimited + NvU64 allocated; // Out. Number of chunks actually allocated + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_CHECK_LEAK_PARAMS; + +#define UVM_TEST_PERF_EVENTS_SANITY UVM_TEST_IOCTL_BASE(23) +typedef struct +{ + // Out params + NV_STATUS rmStatus; +} UVM_TEST_PERF_EVENTS_SANITY_PARAMS; + +#define UVM_TEST_PERF_MODULE_SANITY UVM_TEST_IOCTL_BASE(24) +typedef struct +{ + // In params + NvU64 range_address NV_ALIGN_BYTES(8); + NvU32 range_size; + // Out params + NV_STATUS rmStatus; +} UVM_TEST_PERF_MODULE_SANITY_PARAMS; + +#define UVM_TEST_RANGE_ALLOCATOR_SANITY UVM_TEST_IOCTL_BASE(25) +typedef struct +{ + // In params + NvU32 verbose; + NvU32 seed; + NvU32 iters; + + // Out params + NV_STATUS rmStatus; +} UVM_TEST_RANGE_ALLOCATOR_SANITY_PARAMS; + +#define UVM_TEST_GET_RM_PTES UVM_TEST_IOCTL_BASE(26) +typedef enum +{ + UVM_TEST_GET_RM_PTES_SINGLE_GPU = 0, + UVM_TEST_GET_RM_PTES_MULTI_GPU_SUPPORTED, + UVM_TEST_GET_RM_PTES_MULTI_GPU_SLI_SUPPORTED, + UVM_TEST_GET_RM_PTES_MULTI_GPU_NOT_SUPPORTED, + UVM_TEST_GET_RM_PTES_MAX +} UVM_TEST_PTE_RM_PTES_TEST_MODE; + +typedef struct +{ + // In + NvS32 rmCtrlFd; // For future use. (security check) + NvHandle hClient; + NvHandle hMemory; + NvU32 test_mode; // (UVM_TEST_PTE_RM_PTES_TEST_MODE) + NvU64 size NV_ALIGN_BYTES(8); + NvProcessorUuid gpu_uuid; + + // Out + NV_STATUS rmStatus; +} UVM_TEST_GET_RM_PTES_PARAMS; + +#define UVM_TEST_FAULT_BUFFER_FLUSH UVM_TEST_IOCTL_BASE(27) +typedef struct +{ + NvU64 iterations; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_FAULT_BUFFER_FLUSH_PARAMS; + +#define UVM_TEST_INJECT_TOOLS_EVENT UVM_TEST_IOCTL_BASE(28) +typedef struct +{ + // In params + UvmEventEntry entry; // contains only NvUxx types + NvU32 count; + + // Out param + NV_STATUS rmStatus; +} UVM_TEST_INJECT_TOOLS_EVENT_PARAMS; + +#define UVM_TEST_INCREMENT_TOOLS_COUNTER UVM_TEST_IOCTL_BASE(29) +typedef struct +{ + // In params + NvU64 amount NV_ALIGN_BYTES(8); // amount to increment + NvU32 counter; // name of counter + NvProcessorUuid processor; + NvU32 count; // number of times to increment + + // Out param + NV_STATUS rmStatus; +} UVM_TEST_INCREMENT_TOOLS_COUNTER_PARAMS; + +#define UVM_TEST_MEM_SANITY UVM_TEST_IOCTL_BASE(30) +typedef struct +{ + // Out params + NV_STATUS rmStatus; +} UVM_TEST_MEM_SANITY_PARAMS; + +#define UVM_TEST_MAKE_CHANNEL_STOPS_IMMEDIATE UVM_TEST_IOCTL_BASE(32) +typedef struct +{ + // Out params + NV_STATUS rmStatus; +} UVM_TEST_MAKE_CHANNEL_STOPS_IMMEDIATE_PARAMS; + +// Inject an error into the VA block covering the lookup_address +// +// If page_table_allocation_retry_force_count is non-0 then the next count +// page table allocations under the VA block will be forced to do +// allocation-retry. +// +// If user_pages_allocation_retry_force_count is non-0 then the next count user +// memory allocations under the VA block will be forced to do allocation-retry. +// +// If eviction_failure is NV_TRUE, the next eviction attempt from the VA block +// will fail with NV_ERR_NO_MEMORY. +// +// If cpu_pages_allocation_error is NV_TRUE, the subsequent operations that +// need to allocate CPU pages will fail with NV_ERR_NO_MEMORY. +// +// If populate_failure is NV_TRUE, a retry error will be injected after the next +// successful user memory allocation under the VA block but before that +// allocation is used by the block. This is similar to +// user_pages_allocation_retry_force_count, but the injection point simulates +// driver metadata allocation failure. +// +// Error returns: +// NV_ERR_INVALID_ADDRESS +// - lookup_address doesn't match a UVM range +#define UVM_TEST_VA_BLOCK_INJECT_ERROR UVM_TEST_IOCTL_BASE(33) +typedef struct +{ + NvU64 lookup_address NV_ALIGN_BYTES(8); // In + NvU32 page_table_allocation_retry_force_count; // In + NvU32 user_pages_allocation_retry_force_count; // In + NvU32 cpu_chunk_allocation_size_mask; // In + NvBool eviction_error; // In + NvBool cpu_pages_allocation_error; // In + NvBool populate_error; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_BLOCK_INJECT_ERROR_PARAMS; + +#define UVM_TEST_PEER_IDENTITY_MAPPINGS UVM_TEST_IOCTL_BASE(34) +typedef struct +{ + // In params + NvProcessorUuid gpuA; + NvProcessorUuid gpuB; + // Out param + NV_STATUS rmStatus; +} UVM_TEST_PEER_IDENTITY_MAPPINGS_PARAMS; + +#define UVM_TEST_VA_RESIDENCY_INFO UVM_TEST_IOCTL_BASE(35) +typedef struct +{ + NvU64 lookup_address NV_ALIGN_BYTES(8); // In + + // Whether to wait on the block tracker before returning. Fields like + // resident_on and mapped_on represent state which will be valid when the + // block tracker is complete. If is_async is true, then those fields will + // still be filled out as if the tracker is done, but the actual residency + // or mapping changes may not have been performed yet. + NvBool is_async; // In + + // Array of processors which have a resident copy of the page containing + // lookup_address. + NvProcessorUuid resident_on[UVM_MAX_PROCESSORS]; // Out + NvU32 resident_on_count; // Out + + // The size of the physical allocation backing lookup_address. Only the + // system-page-sized portion of this allocation which contains + // lookup_address is guaranteed to be resident on the corresponding + // processor. + NvU32 resident_physical_size[UVM_MAX_PROCESSORS]; // Out + + // The physical address of the physical allocation backing lookup_address. + NvU64 resident_physical_address[UVM_MAX_PROCESSORS] NV_ALIGN_BYTES(8); // Out + + // Array of processors which have a virtual mapping covering lookup_address. + NvProcessorUuid mapped_on[UVM_MAX_PROCESSORS]; // Out + NvU32 mapping_type[UVM_MAX_PROCESSORS]; // Out + NvU32 mapped_on_count; // Out + + // The size of the virtual mapping covering lookup_address on each + // mapped_on processor. + NvU32 page_size[UVM_MAX_PROCESSORS]; // Out + + // Array of processors which have physical memory populated that would back + // lookup_address if it was resident. + NvProcessorUuid populated_on[UVM_MAX_PROCESSORS]; // Out + NvU32 populated_on_count; // Out + + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_RESIDENCY_INFO_PARAMS; + +#define UVM_TEST_PMM_ASYNC_ALLOC UVM_TEST_IOCTL_BASE(36) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvU32 num_chunks; // In + NvU32 num_work_iterations; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_ASYNC_ALLOC_PARAMS; + +typedef enum +{ + UVM_TEST_PREFETCH_FILTERING_MODE_FILTER_ALL, // Disable all prefetch faults + UVM_TEST_PREFETCH_FILTERING_MODE_FILTER_NONE, // Enable all prefetch faults +} UvmTestPrefetchFilteringMode; + +#define UVM_TEST_SET_PREFETCH_FILTERING UVM_TEST_IOCTL_BASE(37) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvU32 filtering_mode; // In (UvmTestPrefetchFilteringMode) + NV_STATUS rmStatus; // Out +} UVM_TEST_SET_PREFETCH_FILTERING_PARAMS; + +typedef enum +{ + UvmTestPmmSanityModeFull = 1, + UvmTestPmmSanityModeBasic = 2, +} UvmTestPmmSanityMode; + +#define UVM_TEST_PMM_SANITY UVM_TEST_IOCTL_BASE(40) +typedef struct +{ + // Test mode of type UvmTestPmmSanityMode + NvU32 mode; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_SANITY_PARAMS; + +typedef enum +{ + UvmInvalidateTlbMemBarNone = 1, + UvmInvalidateTlbMemBarSys = 2, + UvmInvalidateTlbMemBarLocal = 3, +} UvmInvalidateTlbMembarType; + +typedef enum +{ + UvmInvalidatePageTableLevelAll = 1, + UvmInvalidatePageTableLevelPte = 2, + UvmInvalidatePageTableLevelPde0 = 3, + UvmInvalidatePageTableLevelPde1 = 4, + UvmInvalidatePageTableLevelPde2 = 5, + UvmInvalidatePageTableLevelPde3 = 6, + UvmInvalidatePageTableLevelPde4 = 7, +} UvmInvalidatePageTableLevel; + +typedef enum +{ + UvmTargetVaModeAll = 1, + UvmTargetVaModeTargeted = 2, +} UvmTargetVaMode; + +#define UVM_TEST_INVALIDATE_TLB UVM_TEST_IOCTL_BASE(41) +typedef struct +{ + // In params + NvProcessorUuid gpu_uuid; + NvU64 va NV_ALIGN_BYTES(8); + NvU32 target_va_mode; // UvmTargetVaMode + NvU32 page_table_level; // UvmInvalidatePageTableLevel + NvU32 membar; // UvmInvalidateTlbMembarType + NvBool disable_gpc_invalidate; + + // Out params + NV_STATUS rmStatus; +} UVM_TEST_INVALIDATE_TLB_PARAMS; + +#define UVM_TEST_VA_BLOCK UVM_TEST_IOCTL_BASE(42) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_BLOCK_PARAMS; + +typedef enum +{ + // Default policy based eviction + // + // Evicts a chunk that the default eviction path would pick. + UvmTestEvictModeDefault = 1, + + // Virtual address based eviction + // + // Evicts the root chunk that the chunk backing the provided virtual address + // belongs to. + UvmTestEvictModeVirtual, + + // Physical address based eviction + // + // Evicts the root chunk covering the provided physical address. + UvmTestEvictModePhysical, +} UvmTestEvictMode; + +// Evict a chunk chosen according to one the test eviction modes specified +// above. Eviction may not always be possible, but as long as the arguments are +// valid NV_OK will be returned. To check whether eviction happened, the +// chunk_was_evicted flag needs to be inspected. +#define UVM_TEST_EVICT_CHUNK UVM_TEST_IOCTL_BASE(43) +typedef struct +{ + // The GPU to evict from, has to be registered in the VA space. + NvProcessorUuid gpu_uuid; // In + + // UvmTestEvictMode + NvU32 eviction_mode; // In + + // Virtual or physical address if evictionMode is UvmTestEvictModeVirtual or + // UvmTestEvictModePhysical. + NvU64 address NV_ALIGN_BYTES(8); // In + + // Flag indicating whether the eviction was performed. + NvBool chunk_was_evicted; // Out + + // Physical address of the evicted root chunk. Notably 0 is a valid physical address. + NvU64 evicted_physical_address NV_ALIGN_BYTES(8); // Out + + // For the virtual eviction mode, returns the size of the chunk that was + // backing the virtual address before being evicted. 0 otherwise. + NvU64 chunk_size_backing_virtual NV_ALIGN_BYTES(8); // Out + + NV_STATUS rmStatus; // Out +} UVM_TEST_EVICT_CHUNK_PARAMS; + +typedef enum +{ + // Flush deferred accessed by mappings + UvmTestDeferredWorkTypeAcessedByMappings = 1, +} UvmTestDeferredWorkType; + +#define UVM_TEST_FLUSH_DEFERRED_WORK UVM_TEST_IOCTL_BASE(44) +typedef struct +{ + // UvmTestDeferredWorkType + NvU32 work_type; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_FLUSH_DEFERRED_WORK_PARAMS; + +#define UVM_TEST_NV_KTHREAD_Q UVM_TEST_IOCTL_BASE(45) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_NV_KTHREAD_Q_PARAMS; + +typedef enum +{ + UVM_TEST_PAGE_PREFETCH_POLICY_ENABLE = 0, + UVM_TEST_PAGE_PREFETCH_POLICY_DISABLE, + UVM_TEST_PAGE_PREFETCH_POLICY_MAX +} UVM_TEST_PAGE_PREFETCH_POLICY; + +#define UVM_TEST_SET_PAGE_PREFETCH_POLICY UVM_TEST_IOCTL_BASE(46) +typedef struct +{ + NvU32 policy; // In (UVM_TEST_PAGE_PREFETCH_POLICY) + NV_STATUS rmStatus; // Out +} UVM_TEST_SET_PAGE_PREFETCH_POLICY_PARAMS; + +#define UVM_TEST_RANGE_GROUP_TREE UVM_TEST_IOCTL_BASE(47) +typedef struct +{ + NvU64 rangeGroupIds[4] NV_ALIGN_BYTES(8); // In + NV_STATUS rmStatus; // Out +} UVM_TEST_RANGE_GROUP_TREE_PARAMS; + +#define UVM_TEST_RANGE_GROUP_RANGE_INFO UVM_TEST_IOCTL_BASE(48) +typedef struct +{ + NvU64 lookup_address NV_ALIGN_BYTES(8); // In + + NvU64 range_group_range_start NV_ALIGN_BYTES(8); // Out + NvU64 range_group_range_end NV_ALIGN_BYTES(8); // Out, inclusive + NvU64 range_group_id NV_ALIGN_BYTES(8); // Out + NvU32 range_group_present; // Out + NV_STATUS rmStatus; // Out +} UVM_TEST_RANGE_GROUP_RANGE_INFO_PARAMS; + +#define UVM_TEST_RANGE_GROUP_RANGE_COUNT UVM_TEST_IOCTL_BASE(49) +typedef struct +{ + NvU64 rangeGroupId NV_ALIGN_BYTES(8); // In + NvU64 count NV_ALIGN_BYTES(8); // Out + NV_STATUS rmStatus; // Out +} UVM_TEST_RANGE_GROUP_RANGE_COUNT_PARAMS; + +#define UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE UVM_TEST_IOCTL_BASE(50) +typedef struct +{ + NvU32 reenable_lapse; // Out: Lapse in miliseconds + NV_STATUS rmStatus; // Out +} UVM_TEST_GET_PREFETCH_FAULTS_REENABLE_LAPSE_PARAMS; + +#define UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE UVM_TEST_IOCTL_BASE(51) +typedef struct +{ + NvU32 reenable_lapse; // In: Lapse in miliseconds + NV_STATUS rmStatus; // Out +} UVM_TEST_SET_PREFETCH_FAULTS_REENABLE_LAPSE_PARAMS; + +#define UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS UVM_TEST_IOCTL_BASE(52) +typedef struct +{ + NvU64 addr NV_ALIGN_BYTES(8); // Out + NV_STATUS rmStatus; // Out +} UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS_PARAMS; + +// Allocate and free memory directly from PMA with eviction enabled. This allows +// to simulate RM-like allocations, but without the RM API lock serializing +// everything. +#define UVM_TEST_PMA_ALLOC_FREE UVM_TEST_IOCTL_BASE(53) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvU32 page_size; + NvBool contiguous; + NvU64 num_pages NV_ALIGN_BYTES(8); // In + NvU64 phys_begin NV_ALIGN_BYTES(8); // In + NvU64 phys_end NV_ALIGN_BYTES(8); // In + NvU32 nap_us_before_free; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_PMA_ALLOC_FREE_PARAMS; + +// Allocate and free user memory directly from PMM with eviction enabled. +// +// Provides a direct way of exercising PMM allocs, eviction and frees of user +// memory type. +#define UVM_TEST_PMM_ALLOC_FREE_ROOT UVM_TEST_IOCTL_BASE(54) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvU32 nap_us_before_free; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_ALLOC_FREE_ROOT_PARAMS; + +// Inject a PMA eviction error after the specified number of chunks are +// evicted. +#define UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR UVM_TEST_IOCTL_BASE(55) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvU32 error_after_num_chunks; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_INJECT_PMA_EVICT_ERROR_PARAMS; + +// Change configuration of access counters. This call will disable access +// counters and reenable them using the new configuration. All previous +// notifications will be lost +// +// The reconfiguration affects all VA spaces that rely on the access +// counters information for the same GPU. To avoid conflicting configurations, +// only one VA space is allowed to reconfigure the GPU at a time. +// +// Error returns: +// NV_ERR_INVALID_STATE +// - The GPU has already been reconfigured in a different VA space +#define UVM_TEST_RECONFIGURE_ACCESS_COUNTERS UVM_TEST_IOCTL_BASE(56) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + + // Type UVM_ACCESS_COUNTER_GRANULARITY from nv_uvm_types.h + NvU32 mimc_granularity; // In + NvU32 momc_granularity; // In + + // Type UVM_ACCESS_COUNTER_USE_LIMIT from nv_uvm_types.h + NvU32 mimc_use_limit; // In + NvU32 momc_use_limit; // In + + NvU32 threshold; // In + NvBool enable_mimc_migrations; // In + NvBool enable_momc_migrations; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_RECONFIGURE_ACCESS_COUNTERS_PARAMS; + +typedef enum +{ + UVM_TEST_ACCESS_COUNTER_RESET_MODE_ALL = 0, + UVM_TEST_ACCESS_COUNTER_RESET_MODE_TARGETED, + UVM_TEST_ACCESS_COUNTER_RESET_MODE_MAX +} UVM_TEST_ACCESS_COUNTER_RESET_MODE; + +typedef enum +{ + UVM_TEST_ACCESS_COUNTER_TYPE_MIMC = 0, + UVM_TEST_ACCESS_COUNTER_TYPE_MOMC, + UVM_TEST_ACCESS_COUNTER_TYPE_MAX +} UVM_TEST_ACCESS_COUNTER_TYPE; + +// Clear the contents of the access counters. This call supports different +// modes for targeted/global resets. +#define UVM_TEST_RESET_ACCESS_COUNTERS UVM_TEST_IOCTL_BASE(57) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + + // Type UVM_TEST_ACCESS_COUNTER_RESET_MODE + NvU32 mode; // In + + // Type UVM_TEST_ACCESS_COUNTER_TYPE + NvU32 counter_type; // In + + NvU32 bank; // In + NvU32 tag; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_RESET_ACCESS_COUNTERS_PARAMS; + +// Do not handle access counter notifications when they arrive. This call is +// used to force an overflow of the access counter notification buffer +#define UVM_TEST_SET_IGNORE_ACCESS_COUNTERS UVM_TEST_IOCTL_BASE(58) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvBool ignore; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_SET_IGNORE_ACCESS_COUNTERS_PARAMS; + +// Verifies that the given channel is registered under the UVM VA space of +// vaSpaceFd. Returns NV_OK if so, NV_ERR_INVALID_CHANNEL if not. +#define UVM_TEST_CHECK_CHANNEL_VA_SPACE UVM_TEST_IOCTL_BASE(59) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvS32 rm_ctrl_fd; // In + NvHandle client; // In + NvHandle channel; // In + NvU32 ve_id; // In + NvS32 va_space_fd; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_CHECK_CHANNEL_VA_SPACE_PARAMS; + +// +// UvmTestEnableNvlinkPeerAccess +// +#define UVM_TEST_ENABLE_NVLINK_PEER_ACCESS UVM_TEST_IOCTL_BASE(60) +typedef struct +{ + NvProcessorUuid gpuUuidA; // IN + NvProcessorUuid gpuUuidB; // IN + NV_STATUS rmStatus; // OUT +} UVM_TEST_ENABLE_NVLINK_PEER_ACCESS_PARAMS; + +// +// UvmTestDisableNvlinkPeerAccess +// +#define UVM_TEST_DISABLE_NVLINK_PEER_ACCESS UVM_TEST_IOCTL_BASE(61) +typedef struct +{ + NvProcessorUuid gpuUuidA; // IN + NvProcessorUuid gpuUuidB; // IN + NV_STATUS rmStatus; // OUT +} UVM_TEST_DISABLE_NVLINK_PEER_ACCESS_PARAMS; + +typedef enum +{ + UVM_TEST_PAGE_THRASHING_POLICY_ENABLE = 0, + UVM_TEST_PAGE_THRASHING_POLICY_DISABLE, + UVM_TEST_PAGE_THRASHING_POLICY_MAX +} UVM_TEST_PAGE_THRASHING_POLICY; + +// This ioctl returns the thrashing mitigation parameters on the current VA +// space. Note that these values may change after a simulated/emulated GPU is +// registered on the VA space. +#define UVM_TEST_GET_PAGE_THRASHING_POLICY UVM_TEST_IOCTL_BASE(62) +typedef struct +{ + NvU32 policy; // Out (UVM_TEST_PAGE_THRASHING_POLICY) + NvU64 nap_ns NV_ALIGN_BYTES(8); // Out + NvU64 pin_ns NV_ALIGN_BYTES(8); // Out + NvBool map_remote_on_native_atomics_fault; // Out + NV_STATUS rmStatus; // Out +} UVM_TEST_GET_PAGE_THRASHING_POLICY_PARAMS; + +#define UVM_TEST_SET_PAGE_THRASHING_POLICY UVM_TEST_IOCTL_BASE(63) +typedef struct +{ + NvU32 policy; // In (UVM_TEST_PAGE_THRASHING_POLICY) + NvU64 pin_ns NV_ALIGN_BYTES(8); // In + NV_STATUS rmStatus; // Out +} UVM_TEST_SET_PAGE_THRASHING_POLICY_PARAMS; + +#define UVM_TEST_PMM_SYSMEM UVM_TEST_IOCTL_BASE(64) +typedef struct +{ + NvU64 range_address1 NV_ALIGN_BYTES(8); // In + NvU64 range_address2 NV_ALIGN_BYTES(8); // In + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_SYSMEM_PARAMS; + +#define UVM_TEST_PMM_REVERSE_MAP UVM_TEST_IOCTL_BASE(65) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvU64 range_address1 NV_ALIGN_BYTES(8); // In + NvU64 range_address2 NV_ALIGN_BYTES(8); // In + NvU64 range_size2 NV_ALIGN_BYTES(8); // In + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_REVERSE_MAP_PARAMS; + +#define UVM_TEST_PMM_INDIRECT_PEERS UVM_TEST_IOCTL_BASE(66) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_INDIRECT_PEERS_PARAMS; + +// Calls uvm_va_space_mm_retain on a VA space, operates on the mm, optionally +// sleeps for a while, then releases the va_space_mm and returns. The idea is to +// simulate retaining a va_space_mm from a thread like the GPU fault handler +// which operates outside of the normal context of the VA space. +#define UVM_TEST_VA_SPACE_MM_RETAIN UVM_TEST_IOCTL_BASE(67) +typedef struct +{ + // The kernel virtual address of the uvm_va_space on which to attempt + // retain. This can be obtained via UVM_TEST_GET_KERNEL_VIRTUAL_ADDRESS. + // + // The reason to use this instead of looking it up from an fd as normal is + // to allow testing of calling threads which race with UVM VA space destroy + // (file close). We wouldn't be able to test that path if this was an fd. + NvU64 va_space_ptr NV_ALIGN_BYTES(8); // In + + // User virtual address within the va_space_mm. If the va_space_mm is + // successfully retained, this address is read once before sleeping and once + // after (if sleep_us > 0). + NvU64 addr NV_ALIGN_BYTES(8); // In + + // On success, this contains the value of addr read prior to the sleep. + NvU64 val_before NV_ALIGN_BYTES(8); // In + + // On success, and if sleep_us > 0, this contains the value of addr read + // after the sleep. This is invalid if sleep_us == 0. + NvU64 val_after NV_ALIGN_BYTES(8); // In + + // Approximate duration for which to sleep with the va_space_mm retained. + NvU64 sleep_us NV_ALIGN_BYTES(8); // In + + // NV_ERR_MISSING_TABLE_ENTRY va_space_ptr is not a valid VA space + // NV_ERR_PAGE_TABLE_NOT_AVAIL Could not retain va_space_mm + // (uvm_va_space_mm_retain returned NULL) + // NV_ERR_INVALID_ADDRESS addr is invalid in va_space_mm + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_SPACE_MM_RETAIN_PARAMS; + +// Forces the VA space mm_shutdown callback to delay until more than one thread +// has entered the callback. This provides a high probability of exercising code +// to handle this race condition between exit_mmap and file close. +// +// The delay has an upper bound to prevent an infinite stall. +#define UVM_TEST_VA_SPACE_MM_DELAY_SHUTDOWN UVM_TEST_IOCTL_BASE(68) +typedef struct +{ + NvBool verbose; + + // NV_ERR_PAGE_TABLE_NOT_AVAIL if no va_space_mm is present + NV_STATUS rmStatus; +} UVM_TEST_VA_SPACE_MM_DELAY_SHUTDOWN_PARAMS; + +#define UVM_TEST_PMM_CHUNK_WITH_ELEVATED_PAGE UVM_TEST_IOCTL_BASE(69) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_CHUNK_WITH_ELEVATED_PAGE_PARAMS; + +#define UVM_TEST_GET_GPU_TIME UVM_TEST_IOCTL_BASE(70) +typedef struct +{ + // GPU to query time from. GPU must have been previously registered + NvProcessorUuid gpu_uuid; // In + + NvU64 timestamp_ns NV_ALIGN_BYTES(8); // Out + NV_STATUS rmStatus; // Out +} UVM_TEST_GET_GPU_TIME_PARAMS; + +// Check if access counters are enabled upon registration of the given GPU +#define UVM_TEST_ACCESS_COUNTERS_ENABLED_BY_DEFAULT UVM_TEST_IOCTL_BASE(71) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvBool enabled; // Out + + NV_STATUS rmStatus; // Out +} UVM_TEST_ACCESS_COUNTERS_ENABLED_BY_DEFAULT_PARAMS; + +// Inject an error into the VA space +// +// If migrate_vma_allocation_fail_nth is greater than 0, the nth page +// allocation within migrate_vma will fail. +#define UVM_TEST_VA_SPACE_INJECT_ERROR UVM_TEST_IOCTL_BASE(72) +typedef struct +{ + NvU32 migrate_vma_allocation_fail_nth; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_SPACE_INJECT_ERROR_PARAMS; + +// Release to PMA all free root chunks +#define UVM_TEST_PMM_RELEASE_FREE_ROOT_CHUNKS UVM_TEST_IOCTL_BASE(73) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_RELEASE_FREE_ROOT_CHUNKS_PARAMS; + +// Wait until all pending replayable faults have been processed. If there are +// still pending packets when timeout_ns is reached, the ioctl returns +// NV_ERR_TIMEOUT. +// +// This function should be called after the kernel producing the faults has been +// synchronized. This should ensure that PUT != GET and faults will not be +// missed even if the driver has not started to process them, yet. +#define UVM_TEST_DRAIN_REPLAYABLE_FAULTS UVM_TEST_IOCTL_BASE(74) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvU64 timeout_ns; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_DRAIN_REPLAYABLE_FAULTS_PARAMS; + +// Get module config PMA batch size in bytes +#define UVM_TEST_PMA_GET_BATCH_SIZE UVM_TEST_IOCTL_BASE(75) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvU64 pma_batch_size; NV_ALIGN_BYTES(8) // Out + + NV_STATUS rmStatus; // Out +} UVM_TEST_PMA_GET_BATCH_SIZE_PARAMS; + +// Request PMA's global statistics +#define UVM_TEST_PMM_QUERY_PMA_STATS UVM_TEST_IOCTL_BASE(76) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + UvmPmaStatistics pma_stats; // Out + + NV_STATUS rmStatus; // Out +} UVM_TEST_PMM_QUERY_PMA_STATS_PARAMS; + +#define UVM_TEST_NUMA_GET_CLOSEST_CPU_NODE_TO_GPU UVM_TEST_IOCTL_BASE(77) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvHandle client; // In + NvHandle smc_part_ref; // In + + // On kernels with NUMA support, this entry contains the closest CPU NUMA + // node to this GPU. Otherwise, the value will be -1. + NvS32 node_id; // Out + NV_STATUS rmStatus; // Out +} UVM_TEST_NUMA_GET_CLOSEST_CPU_NODE_TO_GPU_PARAMS; + +// Test whether the bottom halves have run on the correct CPUs based on the +// NUMA node locality of the GPU. +// +// Failure is reported if: +// 1. The GPU has serviced faults but the mask tracking which CPUs the +// bottom half ran on was empty, or +// 2. The set of CPUs where the bottom half ran is not a subset of the CPUs +// attached to the NUMA node. +// +// This IOCTL returns NV_OK on success, NV_ERR_INVALID_STATE on failure, or +// NV_ERR_NOT_SUPPORTED if UVM thread affinity is not supported. +#define UVM_TEST_NUMA_CHECK_AFFINITY UVM_TEST_IOCTL_BASE(78) +typedef struct +{ + NvProcessorUuid gpu_uuid; // In + NvHandle client; // In + NvHandle smc_part_ref; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_NUMA_CHECK_AFFINITY_PARAMS; + +#define UVM_TEST_VA_SPACE_ADD_DUMMY_THREAD_CONTEXTS UVM_TEST_IOCTL_BASE(79) +typedef struct +{ + // Number of thread contexts to add per thread context table entry + NvU32 num_dummy_thread_contexts; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_SPACE_ADD_DUMMY_THREAD_CONTEXTS_PARAMS; + +#define UVM_TEST_VA_SPACE_REMOVE_DUMMY_THREAD_CONTEXTS UVM_TEST_IOCTL_BASE(80) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_SPACE_REMOVE_DUMMY_THREAD_CONTEXTS_PARAMS; + +#define UVM_TEST_THREAD_CONTEXT_SANITY UVM_TEST_IOCTL_BASE(81) +typedef struct +{ + // Iterations to run. + NvU32 iterations; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_THREAD_CONTEXT_SANITY_PARAMS; + +#define UVM_TEST_THREAD_CONTEXT_PERF UVM_TEST_IOCTL_BASE(82) +typedef struct +{ + // Iterations to run. + NvU32 iterations; // In + + // Delay, in microseconds, between thread context addition and removal + NvU32 delay_us; // In + + // Median time, in nanoseconds, spent in adding and then deleting a thread + // context. + NvU64 ns NV_ALIGN_BYTES(8); // Out + + NV_STATUS rmStatus; // Out +} UVM_TEST_THREAD_CONTEXT_PERF_PARAMS; + +typedef enum +{ + UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_NONE = 0, + + // Pageable memory cannot be accessed, but there is an association between + // this VA space and its owning process. For example, this enables the GPU + // fault handler to establish CPU mappings. + UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_MMU_NOTIFIER, + + UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_HMM, + UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_ATS_KERNEL, + UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_ATS_DRIVER, + UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_COUNT +} UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE; + +#define UVM_TEST_GET_PAGEABLE_MEM_ACCESS_TYPE UVM_TEST_IOCTL_BASE(83) +typedef struct +{ + // UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE + NvU32 type; // Out + + NV_STATUS rmStatus; // Out +} UVM_TEST_GET_PAGEABLE_MEM_ACCESS_TYPE_PARAMS; + +// Some events, like fault replays, may not immediately show up in the events +// queue despite calling UVM_TOOLS_FLUSH_EVENTS since that will only flush +// completed events but not pending events. Successful completion of this IOCTL +// guarantees that any replays issued on the given GPU prior to the call will +// have its event enqueued in all the tools sessions which have replay events +// enabled. Also, this IOCTL includes an implicit UVM_TOOLS_FLUSH_EVENTS call. +// Hence, this IOCTL is a superset of UVM_TOOLS_FLUSH_EVENTS. Since this call is +// more expensive than UVM_TOOLS_FLUSH_EVENTS, callers who don't need the above +// mentioned guarantee should consider calling UVM_TOOLS_FLUSH_EVENTS instead. +#define UVM_TEST_TOOLS_FLUSH_REPLAY_EVENTS UVM_TEST_IOCTL_BASE(84) +typedef struct +{ + NvProcessorUuid gpuUuid; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_TOOLS_FLUSH_REPLAY_EVENTS_PARAMS; + +// Many checks are performed when the driver is unloaded. In the event of an +// error, a warning message may be printed to the kernel log. In automated +// testing, a systematic way to check the state of the driver after it is +// unloaded is required for additional test coverage. One userland process may +// register to receive the driver state after its unload, since we cannot use +// /proc or /sys to retrieve driver-specific information for an unloaded driver. +// Any userland process registers the given address (unload_state_buf) with the +// UVM driver. On module unload, if an address has been registered, debugging +// state is written to that address. The data in the address is valid once +// module unload completes. +// Error returns: +// NV_ERR_IN_USE +// - The unload state buffer has already been registered. +// NV_ERR_INVALID_ADDRESS +// - unload_state_buf is invalid. +// - unload_state_buf is not 8-byte aligned. + +#define UVM_TEST_REGISTER_UNLOAD_STATE_BUFFER UVM_TEST_IOCTL_BASE(85) + +// Unload debugging states: +#define UVM_TEST_UNLOAD_STATE_MEMORY_LEAK ((NvU64)0x1) + +typedef struct +{ + // unload_state_buf points to a 8-byte buf and must be aligned to 8 bytes. + NvU64 unload_state_buf; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_REGISTER_UNLOAD_STATE_BUFFER_PARAMS; + +#define UVM_TEST_RB_TREE_DIRECTED UVM_TEST_IOCTL_BASE(86) + +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_RB_TREE_DIRECTED_PARAMS; + +#define UVM_TEST_RB_TREE_RANDOM UVM_TEST_IOCTL_BASE(87) + +typedef struct +{ + NvU64 iterations NV_ALIGN_BYTES(8); // In + + // Upper key range bound. Randomly generated node keys will not exceed this + // value. + NvU64 range_max; // In + + // This parameter is used to control the size of the tree. + // The number of nodes in the tree will bounce between 0 and this limit. + // See uvm_rb_tree_test.c:rbtt_test_get_random_op() for full description. + NvU32 node_limit; // In + NvU32 seed; // In + + NV_STATUS rmStatus; // Out +} UVM_TEST_RB_TREE_RANDOM_PARAMS; + +#define UVM_TEST_HOST_SANITY UVM_TEST_IOCTL_BASE(88) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_HOST_SANITY_PARAMS; + +#define UVM_TEST_GET_USER_SPACE_END_ADDRESS UVM_TEST_IOCTL_BASE(90) +typedef struct +{ + NvU64 user_space_end_address; // Out + NV_STATUS rmStatus; // Out +} UVM_TEST_GET_USER_SPACE_END_ADDRESS_PARAMS; + +#define UVM_TEST_GET_CPU_CHUNK_ALLOC_SIZES UVM_TEST_IOCTL_BASE(91) +typedef struct +{ + NvU32 alloc_size_mask; // Out + NvU32 rmStatus; // Out +} UVM_TEST_GET_CPU_CHUNK_ALLOC_SIZES_PARAMS; + +#define UVM_TEST_HMM_SANITY UVM_TEST_IOCTL_BASE(92) +typedef struct +{ + NvU64 hmm_address NV_ALIGN_BYTES(8); // In + NvU64 hmm_length NV_ALIGN_BYTES(8); // In + NvU64 uvm_address NV_ALIGN_BYTES(8); // In + NvU64 uvm_length NV_ALIGN_BYTES(8); // In + NV_STATUS rmStatus; // Out +} UVM_TEST_HMM_SANITY_PARAMS; + +// Forces the next range covering the lookup_address to fail in +// uvm_va_range_add_gpu_va_space() with an out-of-memory error. Only the next +// uvm_va_range_add_gpu_va_space() will fail. Subsequent ones will succeed. +// +// Error returns: +// NV_ERR_INVALID_ADDRESS +// - lookup_address doesn't match a UVM range +#define UVM_TEST_VA_RANGE_INJECT_ADD_GPU_VA_SPACE_ERROR UVM_TEST_IOCTL_BASE(93) +typedef struct +{ + NvU64 lookup_address NV_ALIGN_BYTES(8); // In + NV_STATUS rmStatus; // Out +} UVM_TEST_VA_RANGE_INJECT_ADD_GPU_VA_SPACE_ERROR_PARAMS; + +// Forces destroy_gpu_va_space() to delay execution. This provides a high +// probability of exercising the race condition between concurrent +// UvmRegisterGpuVaSpace() calls on the same {va_space, gpu} pair in the +// ATS_KERNEL case. +#define UVM_TEST_DESTROY_GPU_VA_SPACE_DELAY UVM_TEST_IOCTL_BASE(94) +typedef struct +{ + NvU64 delay_us; // In + NV_STATUS rmStatus; // Out +} UVM_TEST_DESTROY_GPU_VA_SPACE_DELAY_PARAMS; + + + + + + + + + +#define UVM_TEST_CGROUP_ACCOUNTING_SUPPORTED UVM_TEST_IOCTL_BASE(96) +typedef struct +{ + NV_STATUS rmStatus; // Out +} UVM_TEST_CGROUP_ACCOUNTING_SUPPORTED_PARAMS; + +#ifdef __cplusplus +} +#endif + +#endif // __UVM_TEST_IOCTL_H__ diff --git a/kernel-open/nvidia-uvm/uvm_test_rng.c b/kernel-open/nvidia-uvm/uvm_test_rng.c new file mode 100644 index 000000000..4e12854ef --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_test_rng.c @@ -0,0 +1,340 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_test_rng.h" +#include "uvm_linux.h" + +#include "uvm_test.h" + +// George Marsaglia's RNG: +// https://groups.google.com/forum/#!msg/sci.stat.math/5yb0jwf1stw/ApaXM3IRy-0J +// https://groups.google.com/forum/#!msg/sci.math.num-analysis/yoaCpGWKEk0/UXCxgufdTesJ +// +// This is intended for testing purposes ONLY, not for anything which needs to +// be secure. get_random_bytes is not sufficient for testing purposes because we +// need reproducible sequences for testing. The prandom family would work fine +// but they aren't available on kernels < 2.6.35. + +void uvm_test_rng_init(uvm_test_rng_t *rng, NvU32 seed) +{ + rng->z = 362436069; + rng->w = 521288629; + rng->jcong = 380116160; + rng->jsr = seed; +} + +NvU32 uvm_test_rng_32(uvm_test_rng_t *rng) +{ + unsigned int mwc; + + rng->z = 36969*(rng->z & 65535) + (rng->z >> 16); + + rng->w = 18000*(rng->w & 65535) + (rng->w >> 16); + + rng->jcong = 69069*rng->jcong + 1234567; + + rng->jsr ^= (rng->jsr << 17); + rng->jsr ^= (rng->jsr >> 13); + rng->jsr ^= (rng->jsr << 5); + + mwc = (rng->z << 16) + rng->w; + + return (mwc ^ rng->jcong) + rng->jsr; +} + +NvU64 uvm_test_rng_64(uvm_test_rng_t *rng) +{ + NvU64 val64; + val64 = uvm_test_rng_32(rng); + val64 <<= 32; + val64 |= uvm_test_rng_32(rng); + return val64; +} + +NvUPtr uvm_test_rng_ptr(uvm_test_rng_t *rng) +{ + if (sizeof(NvUPtr) == sizeof(NvU32)) + return uvm_test_rng_32(rng); + return (NvUPtr)uvm_test_rng_64(rng); +} + +// These range-based computations are subject to modulo bias, depending on the +// range. As described above, this is good enough for testing purposes. +NvU32 uvm_test_rng_range_32(uvm_test_rng_t *rng, NvU32 lo, NvU32 hi) +{ + if (lo == 0 && hi == ~0U) + return uvm_test_rng_32(rng); + return lo + (uvm_test_rng_32(rng) % (hi - lo + 1)); +} + +NvU64 uvm_test_rng_range_64(uvm_test_rng_t *rng, NvU64 lo, NvU64 hi) +{ + if (lo == 0 && hi == ~0ULL) + return uvm_test_rng_64(rng); + return lo + (uvm_test_rng_64(rng) % (hi - lo + 1)); +} + +NvUPtr uvm_test_rng_range_ptr(uvm_test_rng_t *rng, NvUPtr lo, NvUPtr hi) +{ + if (sizeof(NvUPtr) == sizeof(NvU32)) + return uvm_test_rng_range_32(rng, lo, hi); + return (NvUPtr)uvm_test_rng_range_64(rng, lo, hi); +} + + +// Logarithmic distribution + +NvU64 uvm_test_rng_log64(uvm_test_rng_t *rng) +{ + return uvm_test_rng_range_log64(rng, 0, ~0ULL); +} + +NvU64 uvm_test_rng_range_log64(uvm_test_rng_t *rng, NvU64 lo, NvU64 hi) +{ + NvU32 log2_lo, log2_hi, rand_exp; + NvU64 rand_lo, rand_hi; + + // This is a very rough approximation of a logarithmic distribution. It + // weights each power of 2 covered in the range [lo, hi] equally, then + // uses a uniform distribution to select a value with that power of 2. + // + // This means that if the input range is for example [32, 64], 64 will be + // selected 50% of the time. The other 50% will be equally distributed among + // the range [32, 63]. + // + // A more mathematically-correct distribution requires doing fixed-point + // exponentiation. That's more trouble than it's worth for the purposes of + // selecting random ranges for testing, which is the current goal of this + // implementation. + + if (hi == 0) + return 0; // lo must also be 0 + + // Compute the log2 floor of both lo and hi + if (lo == 0) + log2_lo = 0; + else + log2_lo = ilog2(lo); + log2_hi = ilog2(hi); + + // Pick a random exponent. If lo is 0, offset things so we can use an + // "exponent" value of 0 to return 0. + rand_exp = uvm_test_rng_range_32(rng, log2_lo, log2_hi + (lo == 0)); + if (lo == 0) { + if (rand_exp == 0) + return 0; + --rand_exp; // Didn't pick 0 so re-normalize the exponent + } + + // Pick a random number in the range [2^rand_exp, 2^(rand_exp+1)) + rand_lo = 1ULL << rand_exp; + if (rand_exp == 63) // Overflow on left-shift is undefined + rand_hi = ~0ULL; + else + rand_hi = (1ULL << (rand_exp + 1)) - 1; + + // Clamp + rand_lo = max(rand_lo, lo); + rand_hi = min(rand_hi, hi); + + return uvm_test_rng_range_64(rng, rand_lo, rand_hi); +} + +void uvm_test_rng_memset(uvm_test_rng_t *rng, void *ptr, size_t size) +{ + // This implementation is optimized to generate as few random numbers as + // possible, and to write to memory in natively-aligned chunks. This means + // the code is somewhat ugly because it has to handle all starting + // alignments and sizes. + + // Easy casting + union + { + NvUPtr u; + void *pv; + NvU8 *p8; + NvUPtr *p_native; + } p, p_end; + + NvUPtr val; + p.pv = ptr; + p_end.u = p.u + size; + + // Initial bytes until we get aligned + if ((p.u % sizeof(*p.p_native)) && p.u < p_end.u) { + val = uvm_test_rng_ptr(rng); + do { + *p.p8++ = (NvU8)(val & 0xFF); + val >>= 8; + } while ((p.u % sizeof(*p.p_native)) && p.u < p_end.u); + } + + // Aligned steady state + while (p.p_native + 1 <= p_end.p_native) { + val = uvm_test_rng_ptr(rng); + *p.p_native++ = val; + } + + // Unaligned cleanup at end + if (p.p8 < p_end.p8) { + val = uvm_test_rng_ptr(rng); + do { + *p.p8++ = (NvU8)(val & 0xFF); + val >>= 8; + } while (p.p8 < p_end.p8); + } +} + +// -------- Unit test -------- + +#define RNG_RANGE_TRIALS 10 + +typedef struct test_range32_struct +{ + NvU32 lo, hi; +} test_range32_t; + +typedef struct test_range64_struct +{ + NvU64 lo, hi; +} test_range64_t; + +static const test_range32_t test_ranges32[] = +{ + {0, 0}, + {0, 1}, + {0, 100}, + {0, 0x7fffffff}, + {0, 0x80000000}, + {0, 0xffffffff}, + {1, 1}, + {1, 2}, + {100, 100}, + {100, 0x80000000}, + {0xfffffffe, 0xffffffff}, + {0xffffffff, 0xffffffff}, +}; + +static const test_range64_t test_ranges64[] = +{ + {0, 0}, + {0, 1}, + {0, 100}, + {0, 0xffffffff}, + {0, 0x100000000ull}, + {0, 0xffffffffffffffffull}, + {1, 1}, + {1, 2}, + {100, 100}, + {100, 0x800000000000ull}, + {0xfffffffffffffffeull, 0xffffffffffffffffull}, + {0xffffffffffffffffull, 0xffffffffffffffffull}, +}; + +// Known initial sequences with seed == 0 +static const NvU32 test_vals32[] = +{ + 0xfa0ad9e5, + 0x50328964, + 0x68745401, + 0x346765d1 +}; + +static const NvU64 test_vals64[] = +{ + 0xfa0ad9e550328964, + 0x68745401346765d1, + 0x5ce392ad7cdff94e, + 0x4c75b15ad18c8d81 +}; + +static const NvU64 test_vals_log64[] = +{ + 0x68745401, + 0x34e, + 0x23f4ea57, + 0x587e5f3fc99332b +}; + +NV_STATUS uvm_test_rng_sanity(UVM_TEST_RNG_SANITY_PARAMS *params, struct file *file) +{ + uvm_test_rng_t rng; + size_t i, j; + NvU32 seed = 0; + + // Check known initial sequences + uvm_test_rng_init(&rng, seed); + for (i = 0; i < ARRAY_SIZE(test_vals32); i++) + TEST_CHECK_RET(uvm_test_rng_32(&rng) == test_vals32[i]); + + uvm_test_rng_init(&rng, seed); + for (i = 0; i < ARRAY_SIZE(test_vals64); i++) + TEST_CHECK_RET(uvm_test_rng_64(&rng) == test_vals64[i]); + + uvm_test_rng_init(&rng, seed); + for (i = 0; i < ARRAY_SIZE(test_vals64); i++) + TEST_CHECK_RET(uvm_test_rng_ptr(&rng) == (NvUPtr)test_vals64[i]); + + uvm_test_rng_init(&rng, seed); + for (i = 0; i < ARRAY_SIZE(test_vals_log64); i++) + TEST_CHECK_RET(uvm_test_rng_log64(&rng) == test_vals_log64[i]); + + // Check memset + uvm_test_rng_init(&rng, seed); + for (i = 0; i < ARRAY_SIZE(test_vals64); i++) { + NvU64 r; + uvm_test_rng_memset(&rng, &r, sizeof(r)); + TEST_CHECK_RET(r == test_vals64[i]); + } + + // Check that values fall within specified ranges + uvm_test_rng_init(&rng, seed); + for (i = 0; i < ARRAY_SIZE(test_ranges32); i++) { + NvU32 lo = test_ranges32[i].lo; + NvU32 hi = test_ranges32[i].hi; + for (j = 0; j < RNG_RANGE_TRIALS; j++) { + NvU32 r = uvm_test_rng_range_32(&rng, lo, hi); + TEST_CHECK_RET(r >= lo && r <= hi); + } + } + + uvm_test_rng_init(&rng, seed); + for (i = 0; i < ARRAY_SIZE(test_ranges64); i++) { + NvU64 lo = test_ranges64[i].lo; + NvU64 hi = test_ranges64[i].hi; + for (j = 0; j < RNG_RANGE_TRIALS; j++) { + NvU64 r; + + r = uvm_test_rng_range_64(&rng, lo, hi); + TEST_CHECK_RET(r >= lo && r <= hi); + + r = uvm_test_rng_range_ptr(&rng, lo, hi); + TEST_CHECK_RET((NvUPtr)r >= (NvUPtr)lo && (NvUPtr)r <= (NvUPtr)hi); + + r = uvm_test_rng_range_log64(&rng, lo, hi); + TEST_CHECK_RET(r >= lo && r <= hi); + } + } + + return NV_OK; +} diff --git a/kernel-open/nvidia-uvm/uvm_test_rng.h b/kernel-open/nvidia-uvm/uvm_test_rng.h new file mode 100644 index 000000000..dbda3331d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_test_rng.h @@ -0,0 +1,62 @@ +/******************************************************************************* + Copyright (c) 2015 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_TEST_RNG_H__ +#define __UVM_TEST_RNG_H__ + +#include "uvm_test_ioctl.h" +#include "uvm_linux.h" + +// Seed-able RNG for generating test data + +typedef struct +{ + unsigned int z; + unsigned int w; + unsigned int jsr; + unsigned int jcong; +} uvm_test_rng_t; + +void uvm_test_rng_init(uvm_test_rng_t *rng, NvU32 seed); + +// Fill the input buffer with random data +void uvm_test_rng_memset(uvm_test_rng_t *rng, void *ptr, size_t size); + +// Uniform distribution + +NvU32 uvm_test_rng_32(uvm_test_rng_t *rng); +NvU64 uvm_test_rng_64(uvm_test_rng_t *rng); +NvUPtr uvm_test_rng_ptr(uvm_test_rng_t *rng); + +NvU32 uvm_test_rng_range_32(uvm_test_rng_t *rng, NvU32 lo, NvU32 hi); +NvU64 uvm_test_rng_range_64(uvm_test_rng_t *rng, NvU64 lo, NvU64 hi); +NvUPtr uvm_test_rng_range_ptr(uvm_test_rng_t *rng, NvUPtr lo, NvUPtr hi); + +// Logarithmic distribution + +NvU64 uvm_test_rng_log64(uvm_test_rng_t *rng); +NvU64 uvm_test_rng_range_log64(uvm_test_rng_t *rng, NvU64 lo, NvU64 hi); + +NV_STATUS uvm_test_rng_sanity(UVM_TEST_RNG_SANITY_PARAMS *params, struct file *file); + +#endif // __UVM_TEST_RNG_H__ diff --git a/kernel-open/nvidia-uvm/uvm_thread_context.c b/kernel-open/nvidia-uvm/uvm_thread_context.c new file mode 100644 index 000000000..03d006d8a --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_thread_context.c @@ -0,0 +1,670 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_forward_decl.h" +#include "uvm_thread_context.h" + +#include "uvm_linux.h" +#include "uvm_common.h" + +// Thread local storage implementation. +// +// The global data structure that contains the set of active thread contexts +// is a table of UVM_THREAD_CONTEXT_TABLE_SIZE entries of type +// uvm_thread_context_table_entry_t. +// Each entry contains a small array of UVM_THREAD_CONTEXT_ARRAY_SIZE entries, +// a red-black tree, and a lock protecting the tree. +// +// The thread_context_non_interrupt_table_entry() function maps the current task +// (i.e. the current thread context) to a table entry. That function also +// recommends a position within the entry's array, but that index can be safely +// ignored: the thread context can be located in any array slot, or in the +// red-black tree. +// +// The described global data structures try to minimize contention among +// threads at two levels. First, thread_context_non_interrupt_table_entry() +// relies on a hash function to evenly spread threads among table entries. +// Second, when several threads are mapped to the same table entry, the same +// hash function spreads them evenly among the array entries, which can +// be independently and atomically updated. If the array is full, the thread +// context of the current task is stored in the red-black tree of the table +// entry, which is protected by a single lock. +// +// Both the table and array entries are cache aligned to avoid false sharing +// overheads due to cache thrashing between concurrent operations on separate +// thread contexts. + +#define UVM_THREAD_CONTEXT_ARRAY_SIZE 8 + +typedef struct { + void *acquired[UVM_LOCK_ORDER_COUNT]; +} uvm_thread_context_lock_acquired_t; + +typedef struct { + // If zero, the entry is empty. Otherwise, task is equal to the value of + // get_current() for the thread associated with thread_context; + atomic64_t task; + + uvm_thread_context_t *thread_context; +} ____cacheline_aligned_in_smp uvm_thread_context_array_entry_t; + +// The thread's context information is stored in the array or the red-black +// tree. +typedef struct { + // Small array where thread contexts are stored first. Each array entry + // can be atomically claimed or released. + uvm_thread_context_array_entry_t array[UVM_THREAD_CONTEXT_ARRAY_SIZE]; + + // Red-black tree, used when the array is full. A red-black tree is chosen + // because additions and removals are frequent operations: every time the + // UVM module is entered, there is one addition, one removal, and one + // lookup. The same UVM call may result on additional lookups. + struct rb_root tree; + + // Spinlock protecting the tree. A raw lock is chosen because UVM locks + // rely on thread context information to be available for lock tracking. + spinlock_t tree_lock; +} ____cacheline_aligned_in_smp uvm_thread_context_table_entry_t; + +// Global data structure containing all the active thread contexts +static uvm_thread_context_table_entry_t g_thread_context_table[UVM_THREAD_CONTEXT_TABLE_SIZE]; + +static bool g_thread_context_table_initialized __read_mostly = false; + +// Per CPU context wrapper, used for interrupt context. Zero initialized. +static DEFINE_PER_CPU(uvm_thread_context_wrapper_t, interrupt_thread_context_wrapper); + +// Array of acquired locks, used in the interrupt path. The non-interrupt path +// allocates the array when locking for the first time. +static DEFINE_PER_CPU(uvm_thread_context_lock_acquired_t, interrupt_thread_context_lock_acquired); + +static void thread_context_non_interrupt_remove(uvm_thread_context_t *thread_context, + uvm_thread_context_table_entry_t *thread_context_entry); + +bool uvm_thread_context_wrapper_is_used() +{ + // The wrapper contains lock information. While uvm_record_lock_X + // routines are a no-op outside of debug mode, unit tests do invoke their + // internal counterparts __uvm_record_lock_X. To add coverage, lock + // information is made available in develop and release modes if the + // builtin tests are enabled. + return UVM_IS_DEBUG() || uvm_enable_builtin_tests; +} + +bool uvm_thread_context_global_initialized(void) +{ + return g_thread_context_table_initialized; +} + +void uvm_thread_context_global_init(void) +{ + size_t table_index; + + UVM_ASSERT(!uvm_thread_context_global_initialized()); + + for (table_index = 0; table_index < UVM_THREAD_CONTEXT_TABLE_SIZE; table_index++) { + uvm_thread_context_table_entry_t *table_entry = g_thread_context_table + table_index; + + spin_lock_init(&table_entry->tree_lock); + table_entry->tree = RB_ROOT; + } + + g_thread_context_table_initialized = true; +} + +void uvm_thread_context_global_exit(void) +{ + size_t table_index; + uvm_thread_context_t *curr_thread_context = uvm_thread_context(); + + UVM_ASSERT(uvm_thread_context_global_initialized()); + + // Search for thread contexts that were added but never removed. + // There should be one thread context left: the one added by the UVM module + // exit routine that invoked this function. To prevent the exit routine from + // deleting its thread context after deinitialization of the global table, + // it is deleted here. uvm_thread_context_remove will detect that the global + // shutdown already happened and skip. + for (table_index = 0; table_index < UVM_THREAD_CONTEXT_TABLE_SIZE; table_index++) { + size_t array_index; + struct rb_node *node; + uvm_thread_context_table_entry_t *table_entry = g_thread_context_table + table_index; + + for (array_index = 0; array_index < UVM_THREAD_CONTEXT_ARRAY_SIZE; array_index++) { + uvm_thread_context_t *thread_context; + uvm_thread_context_array_entry_t *array_entry = table_entry->array + array_index; + + NvU64 task = atomic64_read(&array_entry->task); + + if (task == 0) + continue; + + thread_context = array_entry->thread_context; + + UVM_ASSERT_MSG(thread_context == curr_thread_context, + "Left-over thread_context 0x%llx task 0x%llx\n", + (NvU64) thread_context, + (NvU64) thread_context->task); + + thread_context_non_interrupt_remove(thread_context, table_entry); + } + + node = rb_first(&table_entry->tree); + + while (node) { + uvm_thread_context_t *thread_context = rb_entry(node, uvm_thread_context_t, node); + + UVM_ASSERT_MSG(thread_context == curr_thread_context, + "Left-over thread_context 0x%llx task 0x%llx\n", + (NvU64) thread_context, + (NvU64) thread_context->task); + + thread_context_non_interrupt_remove(thread_context, table_entry); + node = rb_first(&table_entry->tree); + } + } + + g_thread_context_table_initialized = false; +} + +static uvm_thread_context_t *thread_context_non_interrupt_tree_search(struct rb_root *root, struct task_struct *task) +{ + struct rb_node *node = root->rb_node; + uintptr_t task_uintptr = (uintptr_t) task; + + while (node) { + uvm_thread_context_t *thread_context = rb_entry(node, uvm_thread_context_t, node); + uintptr_t thread_context_task_uintptr = (uintptr_t) thread_context->task; + + if (thread_context_task_uintptr == task_uintptr) + return thread_context; + + node = (thread_context_task_uintptr > task_uintptr)? node->rb_left : node->rb_right; + } + + return NULL; +} + +static bool thread_context_non_interrupt_tree_insert(struct rb_root *root, uvm_thread_context_t *new_thread_context) +{ + struct rb_node **node_ptr = &root->rb_node; + struct rb_node *node = root->rb_node; + struct rb_node *parent = NULL; + const struct task_struct *task = new_thread_context->task; + uintptr_t task_uintptr = (uintptr_t) task; + + while (node) { + uvm_thread_context_t *thread_context = rb_entry(node, uvm_thread_context_t, node); + uintptr_t thread_context_task_uintptr = (uintptr_t) thread_context->task; + + if (thread_context_task_uintptr == task_uintptr) + return false; + + parent = node; + node_ptr = (thread_context_task_uintptr > task_uintptr) ? &node->rb_left : &node->rb_right; + node = *node_ptr; + } + + rb_link_node(&new_thread_context->node, parent, node_ptr); + rb_insert_color(&new_thread_context->node, root); + + return true; +} + +static void thread_context_lock_interrupt_patch_acquired(uvm_thread_context_lock_t *context_lock) +{ + uvm_thread_context_lock_acquired_t *thread_context_lock_acquired; + + UVM_ASSERT(in_interrupt()); + UVM_ASSERT(context_lock->acquired == NULL); + + // Stich the preallocated, per-CPU array to the thread context lock. + thread_context_lock_acquired = &get_cpu_var(interrupt_thread_context_lock_acquired); + put_cpu_var(interrupt_thread_context_lock_acquired); + context_lock->acquired = (void**) thread_context_lock_acquired; +} + +static uvm_thread_context_lock_t *thread_context_lock_of(uvm_thread_context_t *thread_context) +{ + uvm_thread_context_wrapper_t *thread_context_wrapper; + uvm_thread_context_lock_t *context_lock; + + if (!uvm_thread_context_wrapper_is_used()) + return NULL; + + thread_context_wrapper = container_of(thread_context, uvm_thread_context_wrapper_t, context); + context_lock = &thread_context_wrapper->context_lock; + + // When the wrapper is used, the thread context lock is always present but + // its acquired locks array may not, due to a failed allocation. Instead of + // working around the missing array, pretend that the entire lock context + // does not exist. This situation can only happen in non-interrupt paths. + if (context_lock->acquired == NULL) { + if (in_interrupt()) + thread_context_lock_interrupt_patch_acquired(context_lock); + else + return NULL; + } + + return context_lock; +} + +static void thread_context_non_interrupt_init(uvm_thread_context_t *thread_context) +{ + UVM_ASSERT(!in_interrupt()); + + thread_context->array_index = UVM_THREAD_CONTEXT_ARRAY_SIZE; + + if (uvm_thread_context_wrapper_is_used()) { + uvm_thread_context_wrapper_t *thread_context_wrapper; + uvm_thread_context_lock_t *context_lock; + + thread_context_wrapper = container_of(thread_context, uvm_thread_context_wrapper_t, context); + context_lock = &thread_context_wrapper->context_lock; + + memset(context_lock, 0, sizeof(*context_lock)); + + // If this allocation fails, the lock context will appear as not + // present, but the rest of the thread context is usable. + context_lock->acquired = kmalloc(sizeof(context_lock->acquired[0]) * UVM_LOCK_ORDER_COUNT, NV_UVM_GFP_FLAGS); + } +} + +static void thread_context_non_interrupt_deinit(uvm_thread_context_t *thread_context) +{ + uvm_thread_context_lock_t *context_lock; + + UVM_ASSERT(!in_interrupt()); + + context_lock = thread_context_lock_of(thread_context); + if (context_lock != NULL) { + UVM_ASSERT(__uvm_check_all_unlocked(context_lock)); + + kfree(context_lock->acquired); + context_lock->acquired = NULL; + } +} + +// Return the table entry and array index within that entry where the thread +// context of the current task is located. +// +// The array index should be interpreted as a hint: the thread context of the +// current taks may be stored at a different array index, or in the tree. +static uvm_thread_context_table_entry_t *thread_context_non_interrupt_table_entry(size_t *array_index_hint) +{ + size_t table_index; + NvU64 current_ptr = (NvU64) current; + NvU32 hash = jhash_2words((NvU32) current_ptr, (NvU32) (current_ptr >> 32), 0); + + BUILD_BUG_ON(UVM_THREAD_CONTEXT_TABLE_SIZE > (1 << 16)); + BUILD_BUG_ON(UVM_THREAD_CONTEXT_ARRAY_SIZE > (1 << 16)); + UVM_ASSERT(!in_interrupt()); + + // The upper 16 bits of the hash value index the table; the lower 16 + // index the array + table_index = (hash >> 16) % UVM_THREAD_CONTEXT_TABLE_SIZE; + + if (array_index_hint != NULL) + *array_index_hint = hash % UVM_THREAD_CONTEXT_ARRAY_SIZE; + + return g_thread_context_table + table_index; +} + +static uvm_thread_context_t *thread_context_non_interrupt(void) +{ + unsigned long flags; + size_t i, array_index; + uvm_thread_context_t *thread_context; + uvm_thread_context_table_entry_t *table_entry = thread_context_non_interrupt_table_entry(&array_index); + + for (i = array_index; i < (UVM_THREAD_CONTEXT_ARRAY_SIZE + array_index); i++) { + size_t curr_array_index = i % UVM_THREAD_CONTEXT_ARRAY_SIZE; + uvm_thread_context_array_entry_t *array_entry = table_entry->array + curr_array_index; + + if (atomic64_read(&array_entry->task) == (NvU64) current) { + thread_context = array_entry->thread_context; + + UVM_ASSERT(thread_context != NULL); + UVM_ASSERT(thread_context->array_index == curr_array_index); + + return thread_context; + } + } + + spin_lock_irqsave(&table_entry->tree_lock, flags); + thread_context = thread_context_non_interrupt_tree_search(&table_entry->tree, current); + spin_unlock_irqrestore(&table_entry->tree_lock, flags); + + return thread_context; +} + +static uvm_thread_context_t *thread_context_interrupt(void) +{ + uvm_thread_context_wrapper_t *thread_context_wrapper; + + // As we are in interrupt anyway it would be best to just use this_cpu_ptr() + // but it was added in 2.6.33 and the interface is non-trivial to implement + // prior to that. + thread_context_wrapper = &get_cpu_var(interrupt_thread_context_wrapper); + put_cpu_var(interrupt_thread_context_wrapper); + + return &thread_context_wrapper->context; +} + +static uvm_thread_context_t *thread_context_current(void) +{ + return in_interrupt() ? thread_context_interrupt() : thread_context_non_interrupt(); +} + +bool uvm_thread_context_present(void) +{ + return thread_context_current() != NULL; +} + +uvm_thread_context_t *uvm_thread_context(void) +{ + uvm_thread_context_t *thread_context = thread_context_current(); + + // If this assertion fires is probably because an entry point into the + // UVM module has not been wrapped with a UVM_ENTRY_X macro. The entry point + // to wrap is the first nvidia-uvm function in the error call stack printed + // by the assertion. + UVM_ASSERT(thread_context != NULL); + + return thread_context; +} + +// The addition logic takes into account that there may be a different thread +// context already associated with the given task. This happens in the uncommon +// case of re-entering the UVM module. Therefore, it is worth approaching the +// addition in a optimistic (speculative) fashion: if a slot is empty in the +// array, it is immediately taken. Should we discover later on that the task +// already has a thread context associated with it in the rest of the array or +// the tree, the previously claimed array slot is released. +static bool thread_context_non_interrupt_add(uvm_thread_context_t *thread_context, + uvm_thread_context_table_entry_t *table_entry, + size_t array_index_hint) +{ + size_t i; + NvU64 task; + unsigned long flags; + bool added; + + UVM_ASSERT(!in_interrupt()); + UVM_ASSERT(thread_context != NULL); + UVM_ASSERT(table_entry != NULL); + UVM_ASSERT(table_entry - g_thread_context_table >= 0); + UVM_ASSERT(table_entry - g_thread_context_table < UVM_THREAD_CONTEXT_TABLE_SIZE); + UVM_ASSERT(array_index_hint < UVM_THREAD_CONTEXT_ARRAY_SIZE); + + thread_context_non_interrupt_init(thread_context); + UVM_ASSERT(thread_context->array_index == UVM_THREAD_CONTEXT_ARRAY_SIZE); + + task = (NvU64) thread_context->task; + UVM_ASSERT(task > 0); + + for (i = array_index_hint; i < (array_index_hint + UVM_THREAD_CONTEXT_ARRAY_SIZE); i++) { + const size_t curr_array_index = i % UVM_THREAD_CONTEXT_ARRAY_SIZE; + uvm_thread_context_array_entry_t *array_entry = table_entry->array + curr_array_index; + + if (thread_context->array_index == UVM_THREAD_CONTEXT_ARRAY_SIZE) { + NvU64 old = atomic64_cmpxchg(&array_entry->task, 0, task); + + // Task already added a different thread context. There is nothing + // to undo because the current thread context has not been inserted. + if (old == task) + return false; + + // Speculatively add the current thread context. + if (old == 0) + thread_context->array_index = curr_array_index; + } + else if (atomic64_read(&array_entry->task) == task) { + + // Task already added a different thread context to the array, so + // undo the speculative insertion + atomic64_set(&table_entry->array[thread_context->array_index].task, 0); + + return false; + } + } + + spin_lock_irqsave(&table_entry->tree_lock, flags); + + if (thread_context->array_index == UVM_THREAD_CONTEXT_ARRAY_SIZE) { + + // If the task already added a different thread context to the tree, + // there is nothing to undo because the current thread context has not + // been inserted. + added = thread_context_non_interrupt_tree_insert(&table_entry->tree, thread_context); + } + else if (thread_context_non_interrupt_tree_search(&table_entry->tree, thread_context->task) != NULL) { + + // Task already added a different thread context to the tree, so undo + // the speculative insertion + atomic64_set(&table_entry->array[thread_context->array_index].task, 0); + + added = false; + } + else { + + // Speculative insertion succeeded: a thread context associated with the + // same task has not been found in the array or the tree. + table_entry->array[thread_context->array_index].thread_context = thread_context; + added = true; + } + + spin_unlock_irqrestore(&table_entry->tree_lock, flags); + return added; +} + +bool uvm_thread_context_add(uvm_thread_context_t *thread_context) +{ + uvm_thread_context_table_entry_t *table_entry; + size_t array_index; + + UVM_ASSERT(thread_context != NULL); + UVM_ASSERT(!in_interrupt()); + + // Initialize the thread context table. This can only happen when loading + // the UVM module + if (!uvm_thread_context_global_initialized()) + uvm_thread_context_global_init(); + + thread_context->task = current; + table_entry = thread_context_non_interrupt_table_entry(&array_index); + return thread_context_non_interrupt_add(thread_context, table_entry, array_index); +} + +bool uvm_thread_context_add_at(uvm_thread_context_t *thread_context, size_t table_index) +{ + uvm_thread_context_table_entry_t *table_entry; + + UVM_ASSERT(uvm_enable_builtin_tests != 0); + UVM_ASSERT(uvm_thread_context_global_initialized()); + + table_entry = g_thread_context_table + table_index; + return thread_context_non_interrupt_add(thread_context, table_entry, 0); +} + +static void thread_context_non_interrupt_remove(uvm_thread_context_t *thread_context, + uvm_thread_context_table_entry_t *table_entry) +{ + NvU32 array_index; + + UVM_ASSERT(!in_interrupt()); + UVM_ASSERT(thread_context != NULL); + UVM_ASSERT(table_entry != NULL); + UVM_ASSERT(table_entry - g_thread_context_table >= 0); + UVM_ASSERT(table_entry - g_thread_context_table < UVM_THREAD_CONTEXT_TABLE_SIZE); + + array_index = thread_context->array_index; + UVM_ASSERT(array_index <= UVM_THREAD_CONTEXT_ARRAY_SIZE); + + // We cannot use RB_EMPTY_NODE to determine if the thread context is in the + // tree, because the tree lock is not held and we haven't called RB_CLEAR_NODE. + // If the thread context is indeed in the tree, concurrent operations on + // the parent pointer/color of the thread context's node could result in + // RB_EMPTY_NODE(thread_context->node) being true. + if (array_index != UVM_THREAD_CONTEXT_ARRAY_SIZE) { + + uvm_thread_context_array_entry_t *array_entry = table_entry->array + array_index; + + UVM_ASSERT(array_index < UVM_THREAD_CONTEXT_ARRAY_SIZE); + UVM_ASSERT(atomic64_read(&array_entry->task) == (NvU64) thread_context->task); + + // Clear the task. The memory barrier prevents the write from being + // moved before a previous (in program order) write to the entry's + // thread_context field in thread_context_non_interrupt_add. + // + // A more detailed explanation about why the memory barrier is needed + // before an atomic write, and why we are not using a different flavor + // of atomic write such as atomic64_set_release, can be found in + // uvm_gpu_semaphore.c:update_completed_value_locked(). + smp_mb__before_atomic(); + atomic64_set(&array_entry->task, 0); + } + else { + unsigned long flags; + + spin_lock_irqsave(&table_entry->tree_lock, flags); + rb_erase(&thread_context->node, &table_entry->tree); + spin_unlock_irqrestore(&table_entry->tree_lock, flags); + } + + thread_context_non_interrupt_deinit(thread_context); +} + +void uvm_thread_context_remove(uvm_thread_context_t *thread_context) +{ + uvm_thread_context_table_entry_t *table_entry; + + UVM_ASSERT(thread_context != NULL); + UVM_ASSERT(!in_interrupt()); + + // If the thread context table has been deinitialized, then we must be in + // the UVM module unload path, and the thread context added during the call + // of uvm_exit has already been removed in the global deinitialization. + if (!uvm_thread_context_global_initialized()) + return; + + UVM_ASSERT(thread_context->task == current); + UVM_ASSERT(uvm_thread_context() == thread_context); + + table_entry = thread_context_non_interrupt_table_entry(NULL); + thread_context_non_interrupt_remove(thread_context, table_entry); +} + +void uvm_thread_context_remove_at(uvm_thread_context_t *thread_context, size_t table_index) +{ + uvm_thread_context_table_entry_t *table_entry = g_thread_context_table + table_index; + + UVM_ASSERT(uvm_enable_builtin_tests != 0); + + thread_context_non_interrupt_remove(thread_context, table_entry); +} + +// Move operation +// -Lock information is copied to the destination, and cleared in the source. +// -Locations in the global array or tree are not copied nor cleared, since +// they may be needed for a later removal of the source, and are no longer +// valid after it. +// -When adding new members to the thread context, consider if they need to be +// moved +static void thread_context_move(uvm_thread_context_t *dst, uvm_thread_context_t *src) +{ + uvm_thread_context_lock_t *src_context_lock, *dst_context_lock; + + UVM_ASSERT(uvm_enable_builtin_tests != 0); + + src_context_lock = thread_context_lock_of(src); + dst_context_lock = thread_context_lock_of(dst); + + if ((dst_context_lock != NULL) && (src_context_lock != NULL)) { + size_t acquired_size = sizeof(src_context_lock->acquired[0]) * UVM_LOCK_ORDER_COUNT; + + dst_context_lock->skip_lock_tracking = src_context_lock->skip_lock_tracking; + src_context_lock->skip_lock_tracking = false; + + // Note that the locks are not released, even when they appear as such + // if we query the source thread context. They are still acquired in the + // destination context. + bitmap_copy(dst_context_lock->acquired_lock_orders, + src_context_lock->acquired_lock_orders, + UVM_LOCK_ORDER_COUNT); + bitmap_zero(src_context_lock->acquired_lock_orders, UVM_LOCK_ORDER_COUNT); + + bitmap_copy(dst_context_lock->exclusive_acquired_lock_orders, + src_context_lock->exclusive_acquired_lock_orders, + UVM_LOCK_ORDER_COUNT); + bitmap_zero(src_context_lock->exclusive_acquired_lock_orders, UVM_LOCK_ORDER_COUNT); + + bitmap_copy(dst_context_lock->out_of_order_acquired_lock_orders, + src_context_lock->out_of_order_acquired_lock_orders, + UVM_LOCK_ORDER_COUNT); + bitmap_zero(src_context_lock->out_of_order_acquired_lock_orders, UVM_LOCK_ORDER_COUNT); + + memcpy(dst_context_lock->acquired, src_context_lock->acquired, acquired_size); + } +} + +void uvm_thread_context_save(uvm_thread_context_t *dst) +{ + thread_context_non_interrupt_init(dst); + thread_context_move(dst, uvm_thread_context()); +} + +void uvm_thread_context_restore(uvm_thread_context_t *src) +{ + thread_context_move(uvm_thread_context(), src); + thread_context_non_interrupt_deinit(src); +} + +uvm_thread_context_lock_t *uvm_thread_context_lock_get(void) +{ + return thread_context_lock_of(uvm_thread_context()); +} + +void uvm_thread_context_lock_disable_tracking(void) +{ + uvm_thread_context_lock_t *context_lock = thread_context_lock_of(uvm_thread_context()); + + if (context_lock == NULL) + return; + + ++context_lock->skip_lock_tracking; + + UVM_ASSERT(context_lock->skip_lock_tracking != 0); +} + +void uvm_thread_context_lock_enable_tracking(void) +{ + uvm_thread_context_lock_t *context_lock = thread_context_lock_of(uvm_thread_context()); + + if (context_lock == NULL) + return; + + UVM_ASSERT(context_lock->skip_lock_tracking > 0); + + --context_lock->skip_lock_tracking; +} diff --git a/kernel-open/nvidia-uvm/uvm_thread_context.h b/kernel-open/nvidia-uvm/uvm_thread_context.h new file mode 100644 index 000000000..bb101ca12 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_thread_context.h @@ -0,0 +1,166 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_THREAD_CONTEXT_H__ +#define __UVM_THREAD_CONTEXT_H__ + +#include "uvm_forward_decl.h" +#include "uvm_lock.h" +#include "uvm_common.h" +#include "uvm_linux.h" + +#define UVM_THREAD_CONTEXT_TABLE_SIZE 64 + +// Used to track lock correctness and store information about locks held by each +// thread. +struct uvm_thread_context_lock_struct +{ + // Opt-out of lock tracking if >0 + NvU32 skip_lock_tracking; + + // Bitmap of acquired lock orders + DECLARE_BITMAP(acquired_lock_orders, UVM_LOCK_ORDER_COUNT); + + // Bitmap of exclusively acquired lock orders + DECLARE_BITMAP(exclusive_acquired_lock_orders, UVM_LOCK_ORDER_COUNT); + + // Bitmap of lock orders acquired out of order (via trylock) + DECLARE_BITMAP(out_of_order_acquired_lock_orders, UVM_LOCK_ORDER_COUNT); + + // Array of pointers to acquired locks. Indexed by lock order, so the + // array size is UVM_LOCK_ORDER_COUNT + // + // The value at a given index is undefined if the corresponding bit is not + // set in acquired_locked_orders. + void **acquired; +}; + +// UVM thread contexts provide thread local storage for all logical threads +// executing in the UVM driver. Both user and interrupt contexts are supported. +struct uvm_thread_context_struct +{ + // Pointer to the thread (task) associated with the context + // + // This field is ignored in interrupt paths + struct task_struct *task; + + // This context is present at the given array index if array_index is less + // than UVM_THREAD_CONTEXT_ARRAY_SIZE; otherwise is in the tree. + // + // This field is ignored in interrupt paths + NvU32 array_index; + + // Pointer to enclosing node (if any) in red-black tree + // + // This field is ignored in interrupt paths + struct rb_node node; +}; + +bool uvm_thread_context_wrapper_is_used(void); + +// A thread context wrapper augments a thread context with additional +// information useful for debugging, profiling, etc. Since the extra information +// can take up considerable space, it is only allocated if +// uvm_thread_context_wrapper_is_used() returns true. +struct uvm_thread_context_wrapper_struct +{ + uvm_thread_context_t context; + + uvm_thread_context_lock_t context_lock; +}; + +bool uvm_thread_context_global_initialized(void); +void uvm_thread_context_global_init(void); +void uvm_thread_context_global_exit(void); + +// Retrieve the current thread context. Asserts if it does not exist. +uvm_thread_context_t *uvm_thread_context(void); + +// Returns true if the current thread context is present: it has been added but +// not removed, or we are in a interrupt path. +bool uvm_thread_context_present(void); + +// Set the current thread context to be thread_context. Returns false if there +// is a different thread context already associated with the current task i.e. +// the insertion did not happen. This scenario arises when re-entering the UVM +// module, because the first entry point already associated the thread with a +// different thread context. +// +// Do not invoke this function in a interrupt path. +bool uvm_thread_context_add(uvm_thread_context_t *thread_context); + +// Reset the current thread context, which should be thread_context. +// +// The current thread context is passed, even if it is guaranteed to match +// uvm_thread_context(), to avoid the lookup overhead. +// +// Do not invoke this function in a interrupt path. +void uvm_thread_context_remove(uvm_thread_context_t *thread_context); + +// Add or remove thread contexts at the given global thread context table +// index. Used only in testing. +// +// Thread contexts inserted using uvm_thread_context_add_at cannot be retrieved +// using uvm_thread_context() +bool uvm_thread_context_add_at(uvm_thread_context_t *op_context, size_t table_index); +void uvm_thread_context_remove_at(uvm_thread_context_t *op_context, size_t table_index); + +// Save the state of the current thread context to the given thread context, and +// then clear the state of the current thread context. +// +// Used only in testing: for example, we may want to ensure that a test starts +// with no locks held in the current thread context, so we save its state at the +// beginning of the test, and restore the state at the end. +// +// This function breaks any expectations about what the current thread context +// should contain. For example, any IOCTL entry point into the UVM module +// results on a power management lock being acquired. Clearing the lock state +// in the current thread context will confuse code that assumes the lock is +// acquired at all times. +void uvm_thread_context_save(uvm_thread_context_t *dst); + +// Restore the state of the current thread context out of the given (backup) +// thread context. +void uvm_thread_context_restore(uvm_thread_context_t *src); + +// Get the current thread lock context. Returns NULL if there is no thread lock +// context (we are in release mode, or an internal allocation failed). +uvm_thread_context_lock_t *uvm_thread_context_lock_get(void); + +// Disable lock tracking in the current thread lock context +// Lock tracking is enabled by default, but can be disabled by using this +// function. +// The disable lock tracking calls are refcounted so to enable tracking back all +// of the disable calls have to be paired with an enable call. +// +// This is needed in some tests that need to violate lock ordering, e.g. one of +// the push tests acquires the push sema multiple times. +void uvm_thread_context_lock_disable_tracking(void); + +// Enable back lock tracking in the current thread lock context. Should be +// paired with a previous uvm_thread_lock_context_disable_lock_tracking() call. +// The lock tracking is enabled back only when all previous disable calls have +// been paired with an enable call. +void uvm_thread_context_lock_enable_tracking(void); + +#endif // __UVM_THREAD_CONTEXT_H__ diff --git a/kernel-open/nvidia-uvm/uvm_thread_context_test.c b/kernel-open/nvidia-uvm/uvm_thread_context_test.c new file mode 100644 index 000000000..9bf3df486 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_thread_context_test.c @@ -0,0 +1,139 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ +#include "uvm_api.h" +#include "uvm_thread_context.h" +#include "uvm_va_space.h" +#include "uvm_kvmalloc.h" +#include "uvm_test.h" + + +static NvU64 timed_udelay(NvU64 delay_us) +{ + NvU64 start = NV_GETTIME(); + + udelay(delay_us); + + return NV_GETTIME() - start; +} + +static NvU64 timed_udelay_entry(NvU64 delay_us) +{ + UVM_ENTRY_RET(timed_udelay(delay_us)); +} + +// Measure the overhead of wrapping entry functions i.e. the overhead of +// adding and removing a thread context. +NV_STATUS uvm_test_thread_context_perf(UVM_TEST_THREAD_CONTEXT_PERF_PARAMS *params, struct file *filp) +{ + NvU64 start; + NvU32 i; + uvm_thread_context_t *thread_context; + uvm_thread_context_wrapper_t thread_context_wrapper_backup; + const NvU64 delay_us = params->delay_us; + NvU64 total_delay_ns = 0; + + if (params->iterations == 0) + return NV_ERR_INVALID_ARGUMENT; + + TEST_CHECK_RET(uvm_thread_context_present()); + TEST_CHECK_RET(uvm_thread_context_wrapper_is_used()); + + thread_context = uvm_thread_context(); + uvm_thread_context_save(&thread_context_wrapper_backup.context); + + // Remove the current thread context, forcing UVM_ENTRY_VOID to register a + // new thread context instead of reusing the existing one. + uvm_thread_context_remove(thread_context); + TEST_CHECK_RET(!uvm_thread_context_present()); + + start = NV_GETTIME(); + + for (i = 0; i < params->iterations; i++) { + if (delay_us > 0) + total_delay_ns += timed_udelay_entry(delay_us); + else + UVM_ENTRY_VOID(); + } + + // Report average iteration time + params->ns = ((NV_GETTIME() - start - total_delay_ns) / params->iterations); + + TEST_CHECK_RET(uvm_thread_context_add(thread_context)); + TEST_CHECK_RET(uvm_thread_context_present()); + + uvm_thread_context_restore(&thread_context_wrapper_backup.context); + + return NV_OK; +} + +static uvm_thread_context_t *inner_thread_context(void) +{ + UVM_ENTRY_RET(uvm_thread_context()); +} + +NV_STATUS uvm_test_thread_context_sanity(UVM_TEST_THREAD_CONTEXT_SANITY_PARAMS *params, struct file *filp) +{ + NvU32 i; + uvm_thread_context_t *thread_context, *nested_thread_context; + uvm_thread_context_wrapper_t thread_context_wrapper_backup; + + if (params->iterations == 0) + return NV_ERR_INVALID_ARGUMENT; + + TEST_CHECK_RET(uvm_thread_context_present()); + TEST_CHECK_RET(uvm_thread_context_wrapper_is_used()); + + thread_context = uvm_thread_context(); + + // Nested entry points do not add new thread contexts. Instead, they reuse + // the top-most thread context. + nested_thread_context = inner_thread_context(); + TEST_CHECK_RET(nested_thread_context == thread_context); + + uvm_thread_context_save(&thread_context_wrapper_backup.context); + + uvm_thread_context_remove(thread_context); + TEST_CHECK_RET(!uvm_thread_context_present()); + + // The removal of a thread context is expected to be the last operation on + // every UVM module entry point. This test breaks that expectation, so + // proceed carefully and avoid invoking any function that assumes the + // presence of the thread context. + + // The loop is used to detect concurrency errors when multiple threads add + // or remove their contexts to/from global data structures. + for (i = 0; i < params->iterations; i++) { + UVM_ENTRY_VOID(); + TEST_CHECK_RET(!uvm_thread_context_present()); + } + + // The restored thread context may be added to an array or tree location + // different from the original one. + TEST_CHECK_RET(uvm_thread_context_add(thread_context)); + + uvm_thread_context_restore(&thread_context_wrapper_backup.context); + + TEST_CHECK_RET(uvm_thread_context_present()); + + return NV_OK; +} diff --git a/kernel-open/nvidia-uvm/uvm_tlb_batch.c b/kernel-open/nvidia-uvm/uvm_tlb_batch.c new file mode 100644 index 000000000..ecd516637 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_tlb_batch.c @@ -0,0 +1,138 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_tlb_batch.h" +#include "uvm_hal.h" + +void uvm_tlb_batch_begin(uvm_page_tree_t *tree, uvm_tlb_batch_t *batch) +{ + memset(batch, 0, sizeof(*batch)); + batch->tree = tree; +} + +static NvU32 smallest_page_size(NvU32 page_sizes) +{ + UVM_ASSERT(page_sizes != 0); + + return 1u << __ffs(page_sizes); +} + +static NvU32 biggest_page_size(NvU32 page_sizes) +{ + UVM_ASSERT(page_sizes != 0); + + return 1u << __fls(page_sizes); +} + +static void tlb_batch_flush_invalidate_per_va(uvm_tlb_batch_t *batch, uvm_push_t *push) +{ + uvm_page_tree_t *tree = batch->tree; + uvm_gpu_phys_address_t pdb_addr = uvm_page_tree_pdb(tree)->addr; + uvm_membar_t membar = UVM_MEMBAR_NONE; + NvU32 i; + + for (i = 0; i < batch->count; ++i) { + uvm_tlb_batch_range_t *entry = &batch->ranges[i]; + NvU32 min_page_size = smallest_page_size(entry->page_sizes); + NvU32 max_page_size = biggest_page_size(entry->page_sizes); + + // Use the depth of the max page size as it's the broadest + NvU32 depth = tree->hal->page_table_depth(max_page_size); + + UVM_ASSERT(hweight32(entry->page_sizes) > 0); + + // Do the required membar only after the last invalidate + if (i == batch->count - 1) + membar = batch->membar; + + // Use the min page size for the targeted VA invalidate as each page + // needs to be invalidated separately. + tree->gpu->parent->host_hal->tlb_invalidate_va(push, + pdb_addr, + depth, + entry->start, + entry->size, + min_page_size, + membar); + } +} + +static void tlb_batch_flush_invalidate_all(uvm_tlb_batch_t *batch, uvm_push_t *push) +{ + uvm_page_tree_t *tree = batch->tree; + uvm_gpu_t *gpu = tree->gpu; + NvU32 page_table_depth = tree->hal->page_table_depth(batch->biggest_page_size); + + gpu->parent->host_hal->tlb_invalidate_all(push, uvm_page_tree_pdb(tree)->addr, page_table_depth, batch->membar); +} + +static bool tlb_batch_should_invalidate_all(uvm_tlb_batch_t *batch) +{ + if (!batch->tree->gpu->parent->tlb_batch.va_invalidate_supported) + return true; + + if (batch->count > UVM_TLB_BATCH_MAX_ENTRIES) + return true; + + if (batch->tree->gpu->parent->tlb_batch.va_range_invalidate_supported) + return batch->total_ranges > batch->tree->gpu->parent->tlb_batch.max_ranges; + + return batch->total_pages > batch->tree->gpu->parent->tlb_batch.max_pages; +} + +void uvm_tlb_batch_end(uvm_tlb_batch_t *batch, uvm_push_t *push, uvm_membar_t tlb_membar) +{ + if (batch->count == 0) + return; + + batch->membar = uvm_membar_max(tlb_membar, batch->membar); + + if (tlb_batch_should_invalidate_all(batch)) + tlb_batch_flush_invalidate_all(batch, push); + else + tlb_batch_flush_invalidate_per_va(batch, push); +} + +void uvm_tlb_batch_invalidate(uvm_tlb_batch_t *batch, NvU64 start, NvU64 size, NvU32 page_sizes, uvm_membar_t tlb_membar) +{ + uvm_tlb_batch_range_t *new_entry; + + batch->membar = uvm_membar_max(tlb_membar, batch->membar); + + ++batch->count; + + if (batch->tree->gpu->parent->tlb_batch.va_range_invalidate_supported) + batch->total_ranges++; + else + batch->total_pages += uvm_div_pow2_64(size, smallest_page_size(page_sizes)); + + batch->biggest_page_size = max(batch->biggest_page_size, biggest_page_size(page_sizes)); + + if (tlb_batch_should_invalidate_all(batch)) + return; + + new_entry = &batch->ranges[batch->count - 1]; + new_entry->start = start; + new_entry->size = size; + new_entry->page_sizes = page_sizes; +} diff --git a/kernel-open/nvidia-uvm/uvm_tlb_batch.h b/kernel-open/nvidia-uvm/uvm_tlb_batch.h new file mode 100644 index 000000000..c5f833bd6 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_tlb_batch.h @@ -0,0 +1,110 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_TLB_BATCH_H__ +#define __UVM_TLB_BATCH_H__ + +#include "uvm_forward_decl.h" +#include "uvm_hal_types.h" + +// Max number of separate VA ranges to track before falling back to invalidate all. +// TLB batches take space on the stack so this number should be big enough to +// cover our common cases, but not bigger. +// +// TODO: Bug 1767241: Once we have all the paths using TLB invalidates +// implemented, verify whether it makes sense. +#define UVM_TLB_BATCH_MAX_ENTRIES 4 + +typedef struct +{ + NvU64 start; + NvU64 size; + + // Min and max page size ored together + NvU32 page_sizes; +} uvm_tlb_batch_range_t; + +struct uvm_tlb_batch_struct +{ + uvm_page_tree_t *tree; + + union + { + // Total number of pages covered by the queued up ranges so far + NvU32 total_pages; + + // Total number of ranges that have been invalidated so far + // Each range can be invalidated using a single Host method on supported GPUs + NvU32 total_ranges; + }; + + // Queued up ranges to invalidate + uvm_tlb_batch_range_t ranges[UVM_TLB_BATCH_MAX_ENTRIES]; + NvU32 count; + + // Biggest page size across all queued up invalidates + NvU32 biggest_page_size; + + // Max membar across all queued up invalidates + uvm_membar_t membar; +}; + +// Begin a TLB invalidate batch +void uvm_tlb_batch_begin(uvm_page_tree_t *tree, uvm_tlb_batch_t *batch); + +// Queue up an invalidate of the [start, start + size) range that will invalidate +// all TLB cache entries for all page sizes included in the page sizes mask. +// The smallest page size in the mask affects the density of the per VA TLB +// invalidate (if one ends up being used) and the largest page size affects the +// depth of the issued TLB invalidates. +// +// If the membar parameter is not UVM_MEMBAR_NONE, the specified membar will +// be performed logically after the TLB invalidate such that all physical memory +// accesses using the old translations are ordered to the scope of the membar. +void uvm_tlb_batch_invalidate(uvm_tlb_batch_t *batch, NvU64 start, NvU64 size, NvU32 page_sizes, uvm_membar_t tlb_membar); + +// End a TLB invalidate batch +// +// This will push the required TLB invalidate to invalidate all the queued up +// ranges. +// +// The tlb_membar argument has the same behavior as in uvm_tlb_batch_invalidate. +// This allows callers which use the same membar for all calls to +// uvm_tlb_batch_invalidate to pass a single membar once at the end of the +// batch. +void uvm_tlb_batch_end(uvm_tlb_batch_t *batch, uvm_push_t *push, uvm_membar_t tlb_membar); + +// Helper for invalidating a single range immediately. +// +// Internally begins and ends a TLB batch. +static void uvm_tlb_batch_single_invalidate(uvm_page_tree_t *tree, uvm_push_t *push, + NvU64 start, NvU64 size, NvU32 page_sizes, uvm_membar_t tlb_membar) +{ + uvm_tlb_batch_t batch; + + uvm_tlb_batch_begin(tree, &batch); + uvm_tlb_batch_invalidate(&batch, start, size, page_sizes, UVM_MEMBAR_NONE); + uvm_tlb_batch_end(&batch, push, tlb_membar); +} + +#endif // __UVM_TLB_BATCH_H__ diff --git a/kernel-open/nvidia-uvm/uvm_tools.c b/kernel-open/nvidia-uvm/uvm_tools.c new file mode 100644 index 000000000..9363de8e8 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_tools.c @@ -0,0 +1,2333 @@ +/******************************************************************************* + Copyright (c) 2016-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ +#include "uvm_common.h" +#include "uvm_ioctl.h" +#include "uvm_gpu.h" +#include "uvm_hal.h" +#include "uvm_tools.h" +#include "uvm_va_space.h" +#include "uvm_api.h" +#include "uvm_hal_types.h" +#include "uvm_va_block.h" +#include "uvm_va_range.h" +#include "uvm_push.h" +#include "uvm_forward_decl.h" +#include "uvm_range_group.h" +#include "uvm_mem.h" +#include "nv_speculation_barrier.h" + +// We limit the number of times a page can be retained by the kernel +// to prevent the user from maliciously passing UVM tools the same page +// over and over again in an attempt to overflow the refcount. +#define MAX_PAGE_COUNT (1 << 20) + +typedef struct +{ + NvU32 get_ahead; + NvU32 get_behind; + NvU32 put_ahead; + NvU32 put_behind; +} uvm_tools_queue_snapshot_t; + +typedef struct +{ + uvm_spinlock_t lock; + NvU64 subscribed_queues; + struct list_head queue_nodes[UvmEventNumTypesAll]; + + struct page **queue_buffer_pages; + UvmEventEntry *queue; + NvU32 queue_buffer_count; + NvU32 notification_threshold; + + struct page **control_buffer_pages; + UvmToolsEventControlData *control; + + wait_queue_head_t wait_queue; + bool is_wakeup_get_valid; + NvU32 wakeup_get; +} uvm_tools_queue_t; + +typedef struct +{ + struct list_head counter_nodes[UVM_TOTAL_COUNTERS]; + NvU64 subscribed_counters; + + struct page **counter_buffer_pages; + NvU64 *counters; + + bool all_processors; + NvProcessorUuid processor; +} uvm_tools_counter_t; + +// private_data for /dev/nvidia-uvm-tools +typedef struct +{ + bool is_queue; + struct file *uvm_file; + union + { + uvm_tools_queue_t queue; + uvm_tools_counter_t counter; + }; +} uvm_tools_event_tracker_t; + +// Delayed events +// +// Events that require gpu timestamps for asynchronous operations use a delayed +// notification mechanism. Each event type registers a callback that is invoked +// from the update_progress channel routines. The callback then enqueues a +// work item that takes care of notifying the events. This module keeps a +// global list of channels with pending events. Other modules or user apps (via +// ioctl) may call uvm_tools_flush_events to update the progress of the channels +// in the list, as needed. +// +// User apps will need to flush events before removing gpus to avoid getting +// events with gpus ids that have been removed. + +// This object describes the pending migrations operations within a VA block +typedef struct +{ + nv_kthread_q_item_t queue_item; + uvm_processor_id_t dst; + uvm_processor_id_t src; + uvm_va_space_t *va_space; + + uvm_channel_t *channel; + struct list_head events; + NvU64 start_timestamp_cpu; + NvU64 end_timestamp_cpu; + NvU64 *start_timestamp_gpu_addr; + NvU64 start_timestamp_gpu; + NvU64 range_group_id; +} block_migration_data_t; + +// This object represents a specific pending migration within a VA block +typedef struct +{ + struct list_head events_node; + NvU64 bytes; + NvU64 address; + NvU64 *end_timestamp_gpu_addr; + NvU64 end_timestamp_gpu; + UvmEventMigrationCause cause; +} migration_data_t; + +// This object represents a pending gpu faut replay operation +typedef struct +{ + nv_kthread_q_item_t queue_item; + uvm_channel_t *channel; + uvm_gpu_id_t gpu_id; + NvU32 batch_id; + uvm_fault_client_type_t client_type; + NvU64 timestamp; + NvU64 timestamp_gpu; + NvU64 *timestamp_gpu_addr; +} replay_data_t; + +// This object describes the pending map remote operations within a VA block +typedef struct +{ + nv_kthread_q_item_t queue_item; + uvm_processor_id_t src; + uvm_processor_id_t dst; + UvmEventMapRemoteCause cause; + NvU64 timestamp; + uvm_va_space_t *va_space; + + uvm_channel_t *channel; + struct list_head events; +} block_map_remote_data_t; + +// This object represents a pending map remote operation +typedef struct +{ + struct list_head events_node; + + NvU64 address; + NvU64 size; + NvU64 timestamp_gpu; + NvU64 *timestamp_gpu_addr; +} map_remote_data_t; + + +static struct cdev g_uvm_tools_cdev; +static LIST_HEAD(g_tools_va_space_list); +static NvU32 g_tools_enabled_event_count[UvmEventNumTypesAll]; +static uvm_rw_semaphore_t g_tools_va_space_list_lock; +static struct kmem_cache *g_tools_event_tracker_cache __read_mostly = NULL; +static struct kmem_cache *g_tools_block_migration_data_cache __read_mostly = NULL; +static struct kmem_cache *g_tools_migration_data_cache __read_mostly = NULL; +static struct kmem_cache *g_tools_replay_data_cache __read_mostly = NULL; +static struct kmem_cache *g_tools_block_map_remote_data_cache __read_mostly = NULL; +static struct kmem_cache *g_tools_map_remote_data_cache __read_mostly = NULL; +static uvm_spinlock_t g_tools_channel_list_lock; +static LIST_HEAD(g_tools_channel_list); +static nv_kthread_q_t g_tools_queue; + +static NV_STATUS tools_update_status(uvm_va_space_t *va_space); + +static uvm_tools_event_tracker_t *tools_event_tracker(struct file *filp) +{ + return (uvm_tools_event_tracker_t *)atomic_long_read((atomic_long_t *)&filp->private_data); +} + +static bool tracker_is_queue(uvm_tools_event_tracker_t *event_tracker) +{ + return event_tracker != NULL && event_tracker->is_queue; +} + +static bool tracker_is_counter(uvm_tools_event_tracker_t *event_tracker) +{ + return event_tracker != NULL && !event_tracker->is_queue; +} + +static uvm_va_space_t *tools_event_tracker_va_space(uvm_tools_event_tracker_t *event_tracker) +{ + uvm_va_space_t *va_space; + UVM_ASSERT(event_tracker->uvm_file); + va_space = uvm_va_space_get(event_tracker->uvm_file); + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + return va_space; +} + +static void uvm_put_user_pages_dirty(struct page **pages, NvU64 page_count) +{ + NvU64 i; + + for (i = 0; i < page_count; i++) { + set_page_dirty(pages[i]); + put_page(pages[i]); + } +} + +static void unmap_user_pages(struct page **pages, void *addr, NvU64 size) +{ + size = DIV_ROUND_UP(size, PAGE_SIZE); + vunmap((NvU8 *)addr); + uvm_put_user_pages_dirty(pages, size); + uvm_kvfree(pages); +} + +// Map virtual memory of data from [user_va, user_va + size) of current process into kernel. +// Sets *addr to kernel mapping and *pages to the array of struct pages that contain the memory. +static NV_STATUS map_user_pages(NvU64 user_va, NvU64 size, void **addr, struct page ***pages) +{ + NV_STATUS status = NV_OK; + long ret = 0; + long num_pages; + long i; + struct vm_area_struct **vmas = NULL; + + *addr = NULL; + *pages = NULL; + num_pages = DIV_ROUND_UP(size, PAGE_SIZE); + + if (uvm_api_range_invalid(user_va, num_pages * PAGE_SIZE)) { + status = NV_ERR_INVALID_ADDRESS; + goto fail; + } + + *pages = uvm_kvmalloc(sizeof(struct page *) * num_pages); + if (*pages == NULL) { + status = NV_ERR_NO_MEMORY; + goto fail; + } + + vmas = uvm_kvmalloc(sizeof(struct vm_area_struct *) * num_pages); + if (vmas == NULL) { + status = NV_ERR_NO_MEMORY; + goto fail; + } + + nv_mmap_read_lock(current->mm); + ret = NV_GET_USER_PAGES(user_va, num_pages, 1, 0, *pages, vmas); + nv_mmap_read_unlock(current->mm); + if (ret != num_pages) { + status = NV_ERR_INVALID_ARGUMENT; + goto fail; + } + + for (i = 0; i < num_pages; i++) { + if (page_count((*pages)[i]) > MAX_PAGE_COUNT || uvm_file_is_nvidia_uvm(vmas[i]->vm_file)) { + status = NV_ERR_INVALID_ARGUMENT; + goto fail; + } + } + + *addr = vmap(*pages, num_pages, VM_MAP, PAGE_KERNEL); + if (*addr == NULL) + goto fail; + + uvm_kvfree(vmas); + return NV_OK; + +fail: + if (*pages == NULL) + return status; + + uvm_kvfree(vmas); + + if (ret > 0) + uvm_put_user_pages_dirty(*pages, ret); + else if (ret < 0) + status = errno_to_nv_status(ret); + + uvm_kvfree(*pages); + *pages = NULL; + return status; +} + +static void insert_event_tracker(uvm_va_space_t *va_space, + struct list_head *node, + NvU32 list_count, + NvU64 list_mask, + NvU64 *subscribed_mask, + struct list_head *lists, + NvU64 *inserted_lists) +{ + NvU32 i; + NvU64 insertable_lists = list_mask & ~*subscribed_mask; + + uvm_assert_rwsem_locked_write(&g_tools_va_space_list_lock); + uvm_assert_rwsem_locked_write(&va_space->tools.lock); + + for (i = 0; i < list_count; i++) { + if (insertable_lists & (1ULL << i)) { + ++g_tools_enabled_event_count[i]; + list_add(node + i, lists + i); + } + } + + *subscribed_mask |= list_mask; + *inserted_lists = insertable_lists; +} + +static void remove_event_tracker(uvm_va_space_t *va_space, + struct list_head *node, + NvU32 list_count, + NvU64 list_mask, + NvU64 *subscribed_mask) +{ + NvU32 i; + NvU64 removable_lists = list_mask & *subscribed_mask; + + uvm_assert_rwsem_locked_write(&g_tools_va_space_list_lock); + uvm_assert_rwsem_locked_write(&va_space->tools.lock); + + for (i = 0; i < list_count; i++) { + if (removable_lists & (1ULL << i)) { + UVM_ASSERT(g_tools_enabled_event_count[i] > 0); + --g_tools_enabled_event_count[i]; + list_del(node + i); + } + } + + *subscribed_mask &= ~list_mask; +} + +static bool queue_needs_wakeup(uvm_tools_queue_t *queue, uvm_tools_queue_snapshot_t *sn) +{ + NvU32 queue_mask = queue->queue_buffer_count - 1; + + uvm_assert_spinlock_locked(&queue->lock); + return ((queue->queue_buffer_count + sn->put_behind - sn->get_ahead) & queue_mask) >= queue->notification_threshold; +} + +static void destroy_event_tracker(uvm_tools_event_tracker_t *event_tracker) +{ + if (event_tracker->uvm_file != NULL) { + NV_STATUS status; + uvm_va_space_t *va_space = tools_event_tracker_va_space(event_tracker); + + uvm_down_write(&g_tools_va_space_list_lock); + uvm_down_write(&va_space->perf_events.lock); + uvm_down_write(&va_space->tools.lock); + + if (event_tracker->is_queue) { + uvm_tools_queue_t *queue = &event_tracker->queue; + + remove_event_tracker(va_space, + queue->queue_nodes, + UvmEventNumTypesAll, + queue->subscribed_queues, + &queue->subscribed_queues); + + if (queue->queue != NULL) { + unmap_user_pages(queue->queue_buffer_pages, + queue->queue, + queue->queue_buffer_count * sizeof(UvmEventEntry)); + } + + if (queue->control != NULL) { + unmap_user_pages(queue->control_buffer_pages, + queue->control, + sizeof(UvmToolsEventControlData)); + } + } + else { + uvm_tools_counter_t *counters = &event_tracker->counter; + + remove_event_tracker(va_space, + counters->counter_nodes, + UVM_TOTAL_COUNTERS, + counters->subscribed_counters, + &counters->subscribed_counters); + + if (counters->counters != NULL) { + unmap_user_pages(counters->counter_buffer_pages, + counters->counters, + UVM_TOTAL_COUNTERS * sizeof(NvU64)); + } + } + + // de-registration should not fail + status = tools_update_status(va_space); + UVM_ASSERT(status == NV_OK); + + uvm_up_write(&va_space->tools.lock); + uvm_up_write(&va_space->perf_events.lock); + uvm_up_write(&g_tools_va_space_list_lock); + + fput(event_tracker->uvm_file); + } + kmem_cache_free(g_tools_event_tracker_cache, event_tracker); +} + +static void enqueue_event(const UvmEventEntry *entry, uvm_tools_queue_t *queue) +{ + UvmToolsEventControlData *ctrl = queue->control; + uvm_tools_queue_snapshot_t sn; + NvU32 queue_size = queue->queue_buffer_count; + NvU32 queue_mask = queue_size - 1; + + // Prevent processor speculation prior to accessing user-mapped memory to + // avoid leaking information from side-channel attacks. There are many + // possible paths leading to this point and it would be difficult and error- + // prone to audit all of them to determine whether user mode could guide + // this access to kernel memory under speculative execution, so to be on the + // safe side we'll just always block speculation. + nv_speculation_barrier(); + + uvm_spin_lock(&queue->lock); + + // ctrl is mapped into user space with read and write permissions, + // so its values cannot be trusted. + sn.get_behind = atomic_read((atomic_t *)&ctrl->get_behind) & queue_mask; + sn.put_behind = atomic_read((atomic_t *)&ctrl->put_behind) & queue_mask; + sn.put_ahead = (sn.put_behind + 1) & queue_mask; + + // one free element means that the queue is full + if (((queue_size + sn.get_behind - sn.put_behind) & queue_mask) == 1) { + atomic64_inc((atomic64_t *)&ctrl->dropped + entry->eventData.eventType); + goto unlock; + } + + memcpy(queue->queue + sn.put_behind, entry, sizeof(*entry)); + + sn.put_behind = sn.put_ahead; + // put_ahead and put_behind will always be the same outside of queue->lock + // this allows the user-space consumer to choose either a 2 or 4 pointer synchronization approach + atomic_set((atomic_t *)&ctrl->put_ahead, sn.put_behind); + atomic_set((atomic_t *)&ctrl->put_behind, sn.put_behind); + + sn.get_ahead = atomic_read((atomic_t *)&ctrl->get_ahead); + // if the queue needs to be woken up, only signal if we haven't signaled before for this value of get_ahead + if (queue_needs_wakeup(queue, &sn) && !(queue->is_wakeup_get_valid && queue->wakeup_get == sn.get_ahead)) { + queue->is_wakeup_get_valid = true; + queue->wakeup_get = sn.get_ahead; + wake_up_all(&queue->wait_queue); + } + +unlock: + uvm_spin_unlock(&queue->lock); +} + +static void uvm_tools_record_event(uvm_va_space_t *va_space, const UvmEventEntry *entry) +{ + NvU8 eventType = entry->eventData.eventType; + uvm_tools_queue_t *queue; + + UVM_ASSERT(eventType < UvmEventNumTypesAll); + + uvm_assert_rwsem_locked(&va_space->tools.lock); + + list_for_each_entry(queue, va_space->tools.queues + eventType, queue_nodes[eventType]) + enqueue_event(entry, queue); +} + +static void uvm_tools_broadcast_event(const UvmEventEntry *entry) +{ + uvm_va_space_t *va_space; + + uvm_down_read(&g_tools_va_space_list_lock); + list_for_each_entry(va_space, &g_tools_va_space_list, tools.node) { + uvm_down_read(&va_space->tools.lock); + uvm_tools_record_event(va_space, entry); + uvm_up_read(&va_space->tools.lock); + } + uvm_up_read(&g_tools_va_space_list_lock); +} + +static bool counter_matches_processor(UvmCounterName counter, const NvProcessorUuid *processor) +{ + // For compatibility with older counters, CPU faults for memory with a preferred location are reported + // for their preferred location as well as for the CPU device itself. + // This check prevents double counting in the aggregate count. + if (counter == UvmCounterNameCpuPageFaultCount) + return uvm_processor_uuid_eq(processor, &NV_PROCESSOR_UUID_CPU_DEFAULT); + return true; +} + +static void uvm_tools_inc_counter(uvm_va_space_t *va_space, + UvmCounterName counter, + NvU64 amount, + const NvProcessorUuid *processor) +{ + UVM_ASSERT((NvU32)counter < UVM_TOTAL_COUNTERS); + uvm_assert_rwsem_locked(&va_space->tools.lock); + + if (amount > 0) { + uvm_tools_counter_t *counters; + + // Prevent processor speculation prior to accessing user-mapped memory + // to avoid leaking information from side-channel attacks. There are + // many possible paths leading to this point and it would be difficult + // and error-prone to audit all of them to determine whether user mode + // could guide this access to kernel memory under speculative execution, + // so to be on the safe side we'll just always block speculation. + nv_speculation_barrier(); + + list_for_each_entry(counters, va_space->tools.counters + counter, counter_nodes[counter]) { + if ((counters->all_processors && counter_matches_processor(counter, processor)) || + uvm_processor_uuid_eq(&counters->processor, processor)) { + atomic64_add(amount, (atomic64_t *)(counters->counters + counter)); + } + } + } +} + +static bool tools_is_counter_enabled(uvm_va_space_t *va_space, UvmCounterName counter) +{ + uvm_assert_rwsem_locked(&va_space->tools.lock); + + UVM_ASSERT(counter < UVM_TOTAL_COUNTERS); + return !list_empty(va_space->tools.counters + counter); +} + +static bool tools_is_event_enabled(uvm_va_space_t *va_space, UvmEventType event) +{ + uvm_assert_rwsem_locked(&va_space->tools.lock); + + UVM_ASSERT(event < UvmEventNumTypesAll); + return !list_empty(va_space->tools.queues + event); +} + +static bool tools_is_event_enabled_in_any_va_space(UvmEventType event) +{ + bool ret = false; + + uvm_down_read(&g_tools_va_space_list_lock); + ret = g_tools_enabled_event_count[event] != 0; + uvm_up_read(&g_tools_va_space_list_lock); + + return ret; +} + +static bool tools_are_enabled(uvm_va_space_t *va_space) +{ + NvU32 i; + + uvm_assert_rwsem_locked(&va_space->tools.lock); + + for (i = 0; i < UVM_TOTAL_COUNTERS; i++) { + if (tools_is_counter_enabled(va_space, i)) + return true; + } + for (i = 0; i < UvmEventNumTypesAll; i++) { + if (tools_is_event_enabled(va_space, i)) + return true; + } + return false; +} + +static bool tools_is_fault_callback_needed(uvm_va_space_t *va_space) +{ + return tools_is_event_enabled(va_space, UvmEventTypeCpuFault) || + tools_is_event_enabled(va_space, UvmEventTypeGpuFault) || + tools_is_counter_enabled(va_space, UvmCounterNameCpuPageFaultCount) || + tools_is_counter_enabled(va_space, UvmCounterNameGpuPageFaultCount); +} + +static bool tools_is_migration_callback_needed(uvm_va_space_t *va_space) +{ + return tools_is_event_enabled(va_space, UvmEventTypeMigration) || + tools_is_event_enabled(va_space, UvmEventTypeReadDuplicate) || + tools_is_counter_enabled(va_space, UvmCounterNameBytesXferDtH) || + tools_is_counter_enabled(va_space, UvmCounterNameBytesXferHtD); +} + +static int uvm_tools_open(struct inode *inode, struct file *filp) +{ + filp->private_data = NULL; + return -nv_status_to_errno(uvm_global_get_status()); +} + +static int uvm_tools_open_entry(struct inode *inode, struct file *filp) +{ + UVM_ENTRY_RET(uvm_tools_open(inode, filp)); +} + +static int uvm_tools_release(struct inode *inode, struct file *filp) +{ + uvm_tools_event_tracker_t *event_tracker = tools_event_tracker(filp); + if (event_tracker != NULL) { + destroy_event_tracker(event_tracker); + filp->private_data = NULL; + } + return -nv_status_to_errno(uvm_global_get_status()); +} + +static int uvm_tools_release_entry(struct inode *inode, struct file *filp) +{ + UVM_ENTRY_RET(uvm_tools_release(inode, filp)); +} + +static long uvm_tools_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_TOOLS_INIT_EVENT_TRACKER, uvm_api_tools_init_event_tracker); + UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_TOOLS_SET_NOTIFICATION_THRESHOLD, uvm_api_tools_set_notification_threshold); + UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_TOOLS_EVENT_QUEUE_ENABLE_EVENTS, uvm_api_tools_event_queue_enable_events); + UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_TOOLS_EVENT_QUEUE_DISABLE_EVENTS, uvm_api_tools_event_queue_disable_events); + UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_TOOLS_ENABLE_COUNTERS, uvm_api_tools_enable_counters); + UVM_ROUTE_CMD_STACK_NO_INIT_CHECK(UVM_TOOLS_DISABLE_COUNTERS, uvm_api_tools_disable_counters); + } + + uvm_thread_assert_all_unlocked(); + + return -EINVAL; +} + +static long uvm_tools_unlocked_ioctl_entry(struct file *filp, unsigned int cmd, unsigned long arg) +{ + UVM_ENTRY_RET(uvm_tools_unlocked_ioctl(filp, cmd, arg)); +} + +static unsigned uvm_tools_poll(struct file *filp, poll_table *wait) +{ + int flags = 0; + uvm_tools_queue_snapshot_t sn; + uvm_tools_event_tracker_t *event_tracker; + UvmToolsEventControlData *ctrl; + + if (uvm_global_get_status() != NV_OK) + return POLLERR; + + event_tracker = tools_event_tracker(filp); + if (!tracker_is_queue(event_tracker)) + return POLLERR; + + uvm_spin_lock(&event_tracker->queue.lock); + + event_tracker->queue.is_wakeup_get_valid = false; + ctrl = event_tracker->queue.control; + sn.get_ahead = atomic_read((atomic_t *)&ctrl->get_ahead); + sn.put_behind = atomic_read((atomic_t *)&ctrl->put_behind); + + if (queue_needs_wakeup(&event_tracker->queue, &sn)) + flags = POLLIN | POLLRDNORM; + + uvm_spin_unlock(&event_tracker->queue.lock); + + poll_wait(filp, &event_tracker->queue.wait_queue, wait); + return flags; +} + +static unsigned uvm_tools_poll_entry(struct file *filp, poll_table *wait) +{ + UVM_ENTRY_RET(uvm_tools_poll(filp, wait)); +} + +static UvmEventFaultType g_hal_to_tools_fault_type_table[UVM_FAULT_TYPE_COUNT] = { + [UVM_FAULT_TYPE_INVALID_PDE] = UvmFaultTypeInvalidPde, + [UVM_FAULT_TYPE_INVALID_PTE] = UvmFaultTypeInvalidPte, + [UVM_FAULT_TYPE_ATOMIC] = UvmFaultTypeAtomic, + [UVM_FAULT_TYPE_WRITE] = UvmFaultTypeWrite, + [UVM_FAULT_TYPE_PDE_SIZE] = UvmFaultTypeInvalidPdeSize, + [UVM_FAULT_TYPE_VA_LIMIT_VIOLATION] = UvmFaultTypeLimitViolation, + [UVM_FAULT_TYPE_UNBOUND_INST_BLOCK] = UvmFaultTypeUnboundInstBlock, + [UVM_FAULT_TYPE_PRIV_VIOLATION] = UvmFaultTypePrivViolation, + [UVM_FAULT_TYPE_PITCH_MASK_VIOLATION] = UvmFaultTypePitchMaskViolation, + [UVM_FAULT_TYPE_WORK_CREATION] = UvmFaultTypeWorkCreation, + [UVM_FAULT_TYPE_UNSUPPORTED_APERTURE] = UvmFaultTypeUnsupportedAperture, + [UVM_FAULT_TYPE_COMPRESSION_FAILURE] = UvmFaultTypeCompressionFailure, + [UVM_FAULT_TYPE_UNSUPPORTED_KIND] = UvmFaultTypeUnsupportedKind, + [UVM_FAULT_TYPE_REGION_VIOLATION] = UvmFaultTypeRegionViolation, + [UVM_FAULT_TYPE_POISONED] = UvmFaultTypePoison, +}; + +// TODO: add new value for weak atomics in tools +static UvmEventMemoryAccessType g_hal_to_tools_fault_access_type_table[UVM_FAULT_ACCESS_TYPE_COUNT] = { + [UVM_FAULT_ACCESS_TYPE_ATOMIC_STRONG] = UvmEventMemoryAccessTypeAtomic, + [UVM_FAULT_ACCESS_TYPE_ATOMIC_WEAK] = UvmEventMemoryAccessTypeAtomic, + [UVM_FAULT_ACCESS_TYPE_WRITE] = UvmEventMemoryAccessTypeWrite, + [UVM_FAULT_ACCESS_TYPE_READ] = UvmEventMemoryAccessTypeRead, + [UVM_FAULT_ACCESS_TYPE_PREFETCH] = UvmEventMemoryAccessTypePrefetch +}; + +static UvmEventApertureType g_hal_to_tools_aperture_table[UVM_APERTURE_MAX] = { + [UVM_APERTURE_PEER_0] = UvmEventAperturePeer0, + [UVM_APERTURE_PEER_1] = UvmEventAperturePeer1, + [UVM_APERTURE_PEER_2] = UvmEventAperturePeer2, + [UVM_APERTURE_PEER_3] = UvmEventAperturePeer3, + [UVM_APERTURE_PEER_4] = UvmEventAperturePeer4, + [UVM_APERTURE_PEER_5] = UvmEventAperturePeer5, + [UVM_APERTURE_PEER_6] = UvmEventAperturePeer6, + [UVM_APERTURE_PEER_7] = UvmEventAperturePeer7, + [UVM_APERTURE_SYS] = UvmEventApertureSys, + [UVM_APERTURE_VID] = UvmEventApertureVid, +}; + +static UvmEventFaultClientType g_hal_to_tools_fault_client_type_table[UVM_FAULT_CLIENT_TYPE_COUNT] = { + [UVM_FAULT_CLIENT_TYPE_GPC] = UvmEventFaultClientTypeGpc, + [UVM_FAULT_CLIENT_TYPE_HUB] = UvmEventFaultClientTypeHub, +}; + +static void record_gpu_fault_instance(uvm_gpu_t *gpu, + uvm_va_space_t *va_space, + const uvm_fault_buffer_entry_t *fault_entry, + NvU64 batch_id, + NvU64 timestamp) +{ + UvmEventEntry entry; + UvmEventGpuFaultInfo *info = &entry.eventData.gpuFault; + memset(&entry, 0, sizeof(entry)); + + info->eventType = UvmEventTypeGpuFault; + info->gpuIndex = uvm_id_value(gpu->id); + info->faultType = g_hal_to_tools_fault_type_table[fault_entry->fault_type]; + info->accessType = g_hal_to_tools_fault_access_type_table[fault_entry->fault_access_type]; + info->clientType = g_hal_to_tools_fault_client_type_table[fault_entry->fault_source.client_type]; + if (fault_entry->is_replayable) + info->gpcId = fault_entry->fault_source.gpc_id; + else + info->channelId = fault_entry->fault_source.channel_id; + info->clientId = fault_entry->fault_source.client_id; + info->address = fault_entry->fault_address; + info->timeStamp = timestamp; + info->timeStampGpu = fault_entry->timestamp; + info->batchId = batch_id; + + uvm_tools_record_event(va_space, &entry); +} + +static void uvm_tools_record_fault(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + uvm_va_space_t *va_space = event_data->fault.space; + + UVM_ASSERT(event_id == UVM_PERF_EVENT_FAULT); + UVM_ASSERT(event_data->fault.space); + + uvm_assert_rwsem_locked(&va_space->lock); + uvm_assert_rwsem_locked(&va_space->perf_events.lock); + UVM_ASSERT(va_space->tools.enabled); + + uvm_down_read(&va_space->tools.lock); + UVM_ASSERT(tools_is_fault_callback_needed(va_space)); + + if (UVM_ID_IS_CPU(event_data->fault.proc_id)) { + if (tools_is_event_enabled(va_space, UvmEventTypeCpuFault)) { + UvmEventEntry entry; + UvmEventCpuFaultInfo *info = &entry.eventData.cpuFault; + memset(&entry, 0, sizeof(entry)); + + info->eventType = UvmEventTypeCpuFault; + if (event_data->fault.cpu.is_write) + info->accessType = UvmEventMemoryAccessTypeWrite; + else + info->accessType = UvmEventMemoryAccessTypeRead; + + info->address = event_data->fault.cpu.fault_va; + info->timeStamp = NV_GETTIME(); + // assume that current owns va_space + info->pid = uvm_get_stale_process_id(); + info->threadId = uvm_get_stale_thread_id(); + info->pc = event_data->fault.cpu.pc; + + uvm_tools_record_event(va_space, &entry); + } + if (tools_is_counter_enabled(va_space, UvmCounterNameCpuPageFaultCount)) { + uvm_processor_id_t preferred_location; + + // The UVM Lite tools interface did not represent the CPU as a UVM + // device. It reported CPU faults against the corresponding + // allocation's 'home location'. Though this driver's tools + // interface does include a CPU device, for compatibility, the + // driver still reports faults against a buffer's preferred + // location, in addition to the CPU. + uvm_tools_inc_counter(va_space, UvmCounterNameCpuPageFaultCount, 1, &NV_PROCESSOR_UUID_CPU_DEFAULT); + + preferred_location = event_data->fault.preferred_location; + if (UVM_ID_IS_GPU(preferred_location)) { + uvm_gpu_t *gpu = uvm_va_space_get_gpu(va_space, preferred_location); + uvm_tools_inc_counter(va_space, UvmCounterNameCpuPageFaultCount, 1, uvm_gpu_uuid(gpu)); + } + } + } + else { + uvm_gpu_t *gpu = uvm_va_space_get_gpu(va_space, event_data->fault.proc_id); + UVM_ASSERT(gpu); + + if (tools_is_event_enabled(va_space, UvmEventTypeGpuFault)) { + NvU64 timestamp = NV_GETTIME(); + uvm_fault_buffer_entry_t *fault_entry = event_data->fault.gpu.buffer_entry; + uvm_fault_buffer_entry_t *fault_instance; + + record_gpu_fault_instance(gpu, va_space, fault_entry, event_data->fault.gpu.batch_id, timestamp); + + list_for_each_entry(fault_instance, &fault_entry->merged_instances_list, merged_instances_list) + record_gpu_fault_instance(gpu, va_space, fault_instance, event_data->fault.gpu.batch_id, timestamp); + } + + if (tools_is_counter_enabled(va_space, UvmCounterNameGpuPageFaultCount)) + uvm_tools_inc_counter(va_space, UvmCounterNameGpuPageFaultCount, 1, uvm_gpu_uuid(gpu)); + } + uvm_up_read(&va_space->tools.lock); +} + +static void add_pending_event_for_channel(uvm_channel_t *channel) +{ + uvm_assert_spinlock_locked(&g_tools_channel_list_lock); + + if (channel->tools.pending_event_count++ == 0) + list_add_tail(&channel->tools.channel_list_node, &g_tools_channel_list); +} + +static void remove_pending_event_for_channel(uvm_channel_t *channel) +{ + uvm_assert_spinlock_locked(&g_tools_channel_list_lock); + UVM_ASSERT(channel->tools.pending_event_count > 0); + if (--channel->tools.pending_event_count == 0) + list_del_init(&channel->tools.channel_list_node); +} + + +static void record_migration_events(void *args) +{ + block_migration_data_t *block_mig = (block_migration_data_t *)args; + migration_data_t *mig; + migration_data_t *next; + UvmEventEntry entry; + UvmEventMigrationInfo *info = &entry.eventData.migration; + uvm_va_space_t *va_space = block_mig->va_space; + + NvU64 gpu_timestamp = block_mig->start_timestamp_gpu; + + // Initialize fields that are constant throughout the whole block + memset(&entry, 0, sizeof(entry)); + info->eventType = UvmEventTypeMigration; + info->srcIndex = uvm_id_value(block_mig->src); + info->dstIndex = uvm_id_value(block_mig->dst); + info->beginTimeStamp = block_mig->start_timestamp_cpu; + info->endTimeStamp = block_mig->end_timestamp_cpu; + info->rangeGroupId = block_mig->range_group_id; + + uvm_down_read(&va_space->tools.lock); + list_for_each_entry_safe(mig, next, &block_mig->events, events_node) { + UVM_ASSERT(mig->bytes > 0); + list_del(&mig->events_node); + + info->address = mig->address; + info->migratedBytes = mig->bytes; + info->beginTimeStampGpu = gpu_timestamp; + info->endTimeStampGpu = mig->end_timestamp_gpu; + info->migrationCause = mig->cause; + gpu_timestamp = mig->end_timestamp_gpu; + kmem_cache_free(g_tools_migration_data_cache, mig); + + uvm_tools_record_event(va_space, &entry); + } + uvm_up_read(&va_space->tools.lock); + + UVM_ASSERT(list_empty(&block_mig->events)); + kmem_cache_free(g_tools_block_migration_data_cache, block_mig); +} + +static void record_migration_events_entry(void *args) +{ + UVM_ENTRY_VOID(record_migration_events(args)); +} + +static void on_block_migration_complete(void *ptr) +{ + migration_data_t *mig; + block_migration_data_t *block_mig = (block_migration_data_t *)ptr; + + block_mig->end_timestamp_cpu = NV_GETTIME(); + block_mig->start_timestamp_gpu = *block_mig->start_timestamp_gpu_addr; + list_for_each_entry(mig, &block_mig->events, events_node) + mig->end_timestamp_gpu = *mig->end_timestamp_gpu_addr; + + nv_kthread_q_item_init(&block_mig->queue_item, record_migration_events_entry, block_mig); + + // The UVM driver may notice that work in a channel is complete in a variety of situations + // and the va_space lock is not always held in all of them, nor can it always be taken safely on them. + // Dispatching events requires the va_space lock to be held in at least read mode, so + // this callback simply enqueues the dispatching onto a queue, where the + // va_space lock is always safe to acquire. + uvm_spin_lock(&g_tools_channel_list_lock); + remove_pending_event_for_channel(block_mig->channel); + nv_kthread_q_schedule_q_item(&g_tools_queue, &block_mig->queue_item); + uvm_spin_unlock(&g_tools_channel_list_lock); +} + +static void record_replay_event_helper(uvm_gpu_id_t gpu_id, + NvU32 batch_id, + uvm_fault_client_type_t client_type, + NvU64 timestamp, + NvU64 timestamp_gpu) +{ + UvmEventEntry entry; + + memset(&entry, 0, sizeof(entry)); + entry.eventData.gpuFaultReplay.eventType = UvmEventTypeGpuFaultReplay; + entry.eventData.gpuFaultReplay.gpuIndex = uvm_id_value(gpu_id); + entry.eventData.gpuFaultReplay.batchId = batch_id; + entry.eventData.gpuFaultReplay.clientType = g_hal_to_tools_fault_client_type_table[client_type]; + entry.eventData.gpuFaultReplay.timeStamp = timestamp; + entry.eventData.gpuFaultReplay.timeStampGpu = timestamp_gpu; + + uvm_tools_broadcast_event(&entry); +} + +static void record_replay_events(void *args) +{ + replay_data_t *replay = (replay_data_t *)args; + + record_replay_event_helper(replay->gpu_id, + replay->batch_id, + replay->client_type, + replay->timestamp, + replay->timestamp_gpu); + + kmem_cache_free(g_tools_replay_data_cache, replay); +} + +static void record_replay_events_entry(void *args) +{ + UVM_ENTRY_VOID(record_replay_events(args)); +} + +static void on_replay_complete(void *ptr) +{ + replay_data_t *replay = (replay_data_t *)ptr; + replay->timestamp_gpu = *replay->timestamp_gpu_addr; + + nv_kthread_q_item_init(&replay->queue_item, record_replay_events_entry, ptr); + + uvm_spin_lock(&g_tools_channel_list_lock); + remove_pending_event_for_channel(replay->channel); + nv_kthread_q_schedule_q_item(&g_tools_queue, &replay->queue_item); + uvm_spin_unlock(&g_tools_channel_list_lock); + +} + +static UvmEventMigrationCause g_make_resident_to_tools_migration_cause[UVM_MAKE_RESIDENT_CAUSE_MAX] = { + [UVM_MAKE_RESIDENT_CAUSE_REPLAYABLE_FAULT] = UvmEventMigrationCauseCoherence, + [UVM_MAKE_RESIDENT_CAUSE_NON_REPLAYABLE_FAULT] = UvmEventMigrationCauseCoherence, + [UVM_MAKE_RESIDENT_CAUSE_ACCESS_COUNTER] = UvmEventMigrationCauseAccessCounters, + [UVM_MAKE_RESIDENT_CAUSE_PREFETCH] = UvmEventMigrationCausePrefetch, + [UVM_MAKE_RESIDENT_CAUSE_EVICTION] = UvmEventMigrationCauseEviction, + [UVM_MAKE_RESIDENT_CAUSE_API_TOOLS] = UvmEventMigrationCauseInvalid, + [UVM_MAKE_RESIDENT_CAUSE_API_MIGRATE] = UvmEventMigrationCauseUser, + [UVM_MAKE_RESIDENT_CAUSE_API_SET_RANGE_GROUP] = UvmEventMigrationCauseCoherence, + [UVM_MAKE_RESIDENT_CAUSE_API_HINT] = UvmEventMigrationCauseUser, +}; + +// This event is notified asynchronously when all the migrations pushed to the +// same uvm_push_t object in a call to block_copy_resident_pages_between have +// finished +static void uvm_tools_record_migration(uvm_perf_event_t event_id, uvm_perf_event_data_t *event_data) +{ + uvm_va_block_t *va_block = event_data->migration.block; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + + UVM_ASSERT(event_id == UVM_PERF_EVENT_MIGRATION); + + uvm_assert_mutex_locked(&va_block->lock); + uvm_assert_rwsem_locked(&va_space->perf_events.lock); + UVM_ASSERT(va_space->tools.enabled); + + uvm_down_read(&va_space->tools.lock); + UVM_ASSERT(tools_is_migration_callback_needed(va_space)); + + if (tools_is_event_enabled(va_space, UvmEventTypeMigration)) { + migration_data_t *mig; + uvm_push_info_t *push_info = uvm_push_info_from_push(event_data->migration.push); + block_migration_data_t *block_mig = (block_migration_data_t *)push_info->on_complete_data; + + if (push_info->on_complete != NULL) { + mig = kmem_cache_alloc(g_tools_migration_data_cache, NV_UVM_GFP_FLAGS); + if (mig == NULL) + goto done_unlock; + + mig->address = event_data->migration.address; + mig->bytes = event_data->migration.bytes; + mig->end_timestamp_gpu_addr = uvm_push_timestamp(event_data->migration.push); + mig->cause = g_make_resident_to_tools_migration_cause[event_data->migration.cause]; + + list_add_tail(&mig->events_node, &block_mig->events); + } + } + + // Increment counters + if (UVM_ID_IS_CPU(event_data->migration.src) && + tools_is_counter_enabled(va_space, UvmCounterNameBytesXferHtD)) { + uvm_gpu_t *gpu = uvm_va_space_get_gpu(va_space, event_data->migration.dst); + uvm_tools_inc_counter(va_space, + UvmCounterNameBytesXferHtD, + event_data->migration.bytes, + uvm_gpu_uuid(gpu)); + } + if (UVM_ID_IS_CPU(event_data->migration.dst) && + tools_is_counter_enabled(va_space, UvmCounterNameBytesXferDtH)) { + uvm_gpu_t *gpu = uvm_va_space_get_gpu(va_space, event_data->migration.src); + uvm_tools_inc_counter(va_space, + UvmCounterNameBytesXferDtH, + event_data->migration.bytes, + uvm_gpu_uuid(gpu)); + } + +done_unlock: + uvm_up_read(&va_space->tools.lock); +} + +// This event is notified asynchronously when it is marked as completed in the +// pushbuffer the replay method belongs to. +void uvm_tools_broadcast_replay(uvm_gpu_t *gpu, + uvm_push_t *push, + NvU32 batch_id, + uvm_fault_client_type_t client_type) +{ + uvm_push_info_t *push_info = uvm_push_info_from_push(push); + replay_data_t *replay; + + // Perform delayed notification only if some VA space has signed up for + // UvmEventTypeGpuFaultReplay + if (!tools_is_event_enabled_in_any_va_space(UvmEventTypeGpuFaultReplay)) + return; + + replay = kmem_cache_alloc(g_tools_replay_data_cache, NV_UVM_GFP_FLAGS); + if (replay == NULL) + return; + + UVM_ASSERT(push_info->on_complete == NULL && push_info->on_complete_data == NULL); + + replay->timestamp_gpu_addr = uvm_push_timestamp(push); + replay->gpu_id = gpu->id; + replay->batch_id = batch_id; + replay->client_type = client_type; + replay->timestamp = NV_GETTIME(); + replay->channel = push->channel; + + push_info->on_complete_data = replay; + push_info->on_complete = on_replay_complete; + + uvm_spin_lock(&g_tools_channel_list_lock); + add_pending_event_for_channel(replay->channel); + uvm_spin_unlock(&g_tools_channel_list_lock); +} + + +void uvm_tools_broadcast_replay_sync(uvm_gpu_t *gpu, + NvU32 batch_id, + uvm_fault_client_type_t client_type) +{ + UVM_ASSERT(!gpu->parent->has_clear_faulted_channel_method); + + if (!tools_is_event_enabled_in_any_va_space(UvmEventTypeGpuFaultReplay)) + return; + + record_replay_event_helper(gpu->id, + batch_id, + client_type, + NV_GETTIME(), + gpu->parent->host_hal->get_time(gpu)); +} + +void uvm_tools_broadcast_access_counter(uvm_gpu_t *gpu, + const uvm_access_counter_buffer_entry_t *buffer_entry, + bool on_managed) +{ + UvmEventEntry entry; + UvmEventTestAccessCounterInfo *info = &entry.testEventData.accessCounter; + + // Perform delayed notification only if some VA space has signed up for + // UvmEventTypeAccessCounter + if (!tools_is_event_enabled_in_any_va_space(UvmEventTypeTestAccessCounter)) + return; + + if (!buffer_entry->address.is_virtual) + UVM_ASSERT(UVM_ID_IS_VALID(buffer_entry->physical_info.resident_id)); + + memset(&entry, 0, sizeof(entry)); + + info->eventType = UvmEventTypeTestAccessCounter; + info->srcIndex = uvm_id_value(gpu->id); + info->address = buffer_entry->address.address; + info->isVirtual = buffer_entry->address.is_virtual? 1: 0; + if (buffer_entry->address.is_virtual) { + info->instancePtr = buffer_entry->virtual_info.instance_ptr.address; + info->instancePtrAperture = g_hal_to_tools_aperture_table[buffer_entry->virtual_info.instance_ptr.aperture]; + info->veId = buffer_entry->virtual_info.ve_id; + } + else { + info->aperture = g_hal_to_tools_aperture_table[buffer_entry->address.aperture]; + } + info->isFromCpu = buffer_entry->counter_type == UVM_ACCESS_COUNTER_TYPE_MOMC? 1: 0; + info->onManaged = on_managed? 1 : 0; + info->value = buffer_entry->counter_value; + info->subGranularity = buffer_entry->sub_granularity; + info->bank = buffer_entry->bank; + info->tag = buffer_entry->tag; + + uvm_tools_broadcast_event(&entry); +} + +// This function is used as a begin marker to group all migrations within a VA +// block that are performed in the same call to +// block_copy_resident_pages_between. All of these are pushed to the same +// uvm_push_t object, and will be notified in burst when the last one finishes. +void uvm_tools_record_block_migration_begin(uvm_va_block_t *va_block, + uvm_push_t *push, + uvm_processor_id_t dst_id, + uvm_processor_id_t src_id, + NvU64 start, + uvm_make_resident_cause_t cause) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_range_group_range_t *range; + + // Calls from tools read/write functions to make_resident must not trigger + // any migration + UVM_ASSERT(cause != UVM_MAKE_RESIDENT_CAUSE_API_TOOLS); + + // During evictions the va_space lock is not held. + if (cause != UVM_MAKE_RESIDENT_CAUSE_EVICTION) + uvm_assert_rwsem_locked(&va_space->lock); + + if (!va_space->tools.enabled) + return; + + uvm_down_read(&va_space->tools.lock); + + // Perform delayed notification only if the VA space has signed up for + // UvmEventTypeMigration + if (tools_is_event_enabled(va_space, UvmEventTypeMigration)) { + block_migration_data_t *block_mig; + uvm_push_info_t *push_info = uvm_push_info_from_push(push); + + UVM_ASSERT(push_info->on_complete == NULL && push_info->on_complete_data == NULL); + + block_mig = kmem_cache_alloc(g_tools_block_migration_data_cache, NV_UVM_GFP_FLAGS); + if (block_mig == NULL) + goto done_unlock; + + block_mig->start_timestamp_gpu_addr = uvm_push_timestamp(push); + block_mig->channel = push->channel; + block_mig->start_timestamp_cpu = NV_GETTIME(); + block_mig->dst = dst_id; + block_mig->src = src_id; + block_mig->range_group_id = UVM_RANGE_GROUP_ID_NONE; + + // During evictions, it is not safe to uvm_range_group_range_find() because the va_space lock is not held. + if (cause != UVM_MAKE_RESIDENT_CAUSE_EVICTION) { + range = uvm_range_group_range_find(va_space, start); + if (range != NULL) + block_mig->range_group_id = range->range_group->id; + } + block_mig->va_space = va_space; + + INIT_LIST_HEAD(&block_mig->events); + push_info->on_complete_data = block_mig; + push_info->on_complete = on_block_migration_complete; + + uvm_spin_lock(&g_tools_channel_list_lock); + add_pending_event_for_channel(block_mig->channel); + uvm_spin_unlock(&g_tools_channel_list_lock); + } + +done_unlock: + uvm_up_read(&va_space->tools.lock); +} + +void uvm_tools_record_read_duplicate(uvm_va_block_t *va_block, + uvm_processor_id_t dst, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + + if (!va_space->tools.enabled) + return; + + uvm_down_read(&va_space->tools.lock); + if (tools_is_event_enabled(va_space, UvmEventTypeReadDuplicate)) { + // Read-duplication events + UvmEventEntry entry; + UvmEventReadDuplicateInfo *info_read_duplicate = &entry.eventData.readDuplicate; + uvm_page_index_t page_index; + memset(&entry, 0, sizeof(entry)); + + info_read_duplicate->eventType = UvmEventTypeReadDuplicate; + info_read_duplicate->size = PAGE_SIZE; + info_read_duplicate->timeStamp = NV_GETTIME(); + + for_each_va_block_page_in_region_mask(page_index, page_mask, region) { + uvm_processor_id_t id; + uvm_processor_mask_t resident_processors; + + info_read_duplicate->address = uvm_va_block_cpu_page_address(va_block, page_index); + info_read_duplicate->processors = 0; + + uvm_va_block_page_resident_processors(va_block, page_index, &resident_processors); + for_each_id_in_mask(id, &resident_processors) + info_read_duplicate->processors |= (1 << uvm_id_value(id)); + + uvm_tools_record_event(va_space, &entry); + } + } + uvm_up_read(&va_space->tools.lock); +} + +void uvm_tools_record_read_duplicate_invalidate(uvm_va_block_t *va_block, + uvm_processor_id_t dst, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + + if (!va_space->tools.enabled) + return; + + uvm_down_read(&va_space->tools.lock); + if (tools_is_event_enabled(va_space, UvmEventTypeReadDuplicateInvalidate)) { + UvmEventEntry entry; + uvm_page_index_t page_index; + UvmEventReadDuplicateInvalidateInfo *info = &entry.eventData.readDuplicateInvalidate; + memset(&entry, 0, sizeof(entry)); + + info->eventType = UvmEventTypeReadDuplicateInvalidate; + info->residentIndex = uvm_id_value(dst); + info->size = PAGE_SIZE; + info->timeStamp = NV_GETTIME(); + + for_each_va_block_page_in_region_mask(page_index, page_mask, region) { + UVM_ASSERT(uvm_page_mask_test(&va_block->read_duplicated_pages, page_index)); + + info->address = uvm_va_block_cpu_page_address(va_block, page_index); + uvm_tools_record_event(va_space, &entry); + } + } + uvm_up_read(&va_space->tools.lock); +} + +static void tools_schedule_completed_events(void) +{ + uvm_channel_t *channel; + uvm_channel_t *next_channel; + NvU64 channel_count = 0; + NvU64 i; + + uvm_spin_lock(&g_tools_channel_list_lock); + + // retain every channel list entry currently in the list and keep track of their count. + list_for_each_entry(channel, &g_tools_channel_list, tools.channel_list_node) { + ++channel->tools.pending_event_count; + ++channel_count; + } + uvm_spin_unlock(&g_tools_channel_list_lock); + + if (channel_count == 0) + return; + + // new entries always appear at the end, and all the entries seen in the first loop have been retained + // so it is safe to go through them + channel = list_first_entry(&g_tools_channel_list, uvm_channel_t, tools.channel_list_node); + for (i = 0; i < channel_count; i++) { + uvm_channel_update_progress_all(channel); + channel = list_next_entry(channel, tools.channel_list_node); + } + + // now release all the entries we retained in the beginning + i = 0; + uvm_spin_lock(&g_tools_channel_list_lock); + list_for_each_entry_safe(channel, next_channel, &g_tools_channel_list, tools.channel_list_node) { + if (i++ == channel_count) + break; + + remove_pending_event_for_channel(channel); + } + uvm_spin_unlock(&g_tools_channel_list_lock); +} + +void uvm_tools_record_cpu_fatal_fault(uvm_va_space_t *va_space, + NvU64 address, + bool is_write, + UvmEventFatalReason reason) +{ + uvm_assert_rwsem_locked(&va_space->lock); + + if (!va_space->tools.enabled) + return; + + uvm_down_read(&va_space->tools.lock); + if (tools_is_event_enabled(va_space, UvmEventTypeFatalFault)) { + UvmEventEntry entry; + UvmEventFatalFaultInfo *info = &entry.eventData.fatalFault; + memset(&entry, 0, sizeof(entry)); + + info->eventType = UvmEventTypeFatalFault; + info->processorIndex = UVM_ID_CPU_VALUE; + info->timeStamp = NV_GETTIME(); + info->address = address; + info->accessType = is_write? UvmEventMemoryAccessTypeWrite: UvmEventMemoryAccessTypeRead; + // info->faultType is not valid for cpu faults + info->reason = reason; + + uvm_tools_record_event(va_space, &entry); + } + uvm_up_read(&va_space->tools.lock); +} + +void uvm_tools_record_gpu_fatal_fault(uvm_gpu_id_t gpu_id, + uvm_va_space_t *va_space, + const uvm_fault_buffer_entry_t *buffer_entry, + UvmEventFatalReason reason) +{ + uvm_assert_rwsem_locked(&va_space->lock); + + if (!va_space->tools.enabled) + return; + + uvm_down_read(&va_space->tools.lock); + if (tools_is_event_enabled(va_space, UvmEventTypeFatalFault)) { + UvmEventEntry entry; + UvmEventFatalFaultInfo *info = &entry.eventData.fatalFault; + memset(&entry, 0, sizeof(entry)); + + info->eventType = UvmEventTypeFatalFault; + info->processorIndex = uvm_id_value(gpu_id); + info->timeStamp = NV_GETTIME(); + info->address = buffer_entry->fault_address; + info->accessType = g_hal_to_tools_fault_access_type_table[buffer_entry->fault_access_type]; + info->faultType = g_hal_to_tools_fault_type_table[buffer_entry->fault_type]; + info->reason = reason; + + uvm_tools_record_event(va_space, &entry); + } + uvm_up_read(&va_space->tools.lock); +} + +void uvm_tools_record_thrashing(uvm_va_space_t *va_space, + NvU64 address, + size_t region_size, + const uvm_processor_mask_t *processors) +{ + UVM_ASSERT(address); + UVM_ASSERT(PAGE_ALIGNED(address)); + UVM_ASSERT(region_size > 0); + + uvm_assert_rwsem_locked(&va_space->lock); + + if (!va_space->tools.enabled) + return; + + uvm_down_read(&va_space->tools.lock); + if (tools_is_event_enabled(va_space, UvmEventTypeThrashingDetected)) { + UvmEventEntry entry; + UvmEventThrashingDetectedInfo *info = &entry.eventData.thrashing; + memset(&entry, 0, sizeof(entry)); + + info->eventType = UvmEventTypeThrashingDetected; + info->address = address; + info->size = region_size; + info->timeStamp = NV_GETTIME(); + bitmap_copy((long unsigned *)&info->processors, processors->bitmap, UVM_ID_MAX_PROCESSORS); + + uvm_tools_record_event(va_space, &entry); + } + uvm_up_read(&va_space->tools.lock); +} + +void uvm_tools_record_throttling_start(uvm_va_space_t *va_space, NvU64 address, uvm_processor_id_t processor) +{ + UVM_ASSERT(address); + UVM_ASSERT(PAGE_ALIGNED(address)); + UVM_ASSERT(UVM_ID_IS_VALID(processor)); + + uvm_assert_rwsem_locked(&va_space->lock); + + if (!va_space->tools.enabled) + return; + + uvm_down_read(&va_space->tools.lock); + if (tools_is_event_enabled(va_space, UvmEventTypeThrottlingStart)) { + UvmEventEntry entry; + UvmEventThrottlingStartInfo *info = &entry.eventData.throttlingStart; + memset(&entry, 0, sizeof(entry)); + + info->eventType = UvmEventTypeThrottlingStart; + info->processorIndex = uvm_id_value(processor); + info->address = address; + info->timeStamp = NV_GETTIME(); + + uvm_tools_record_event(va_space, &entry); + } + uvm_up_read(&va_space->tools.lock); +} + +void uvm_tools_record_throttling_end(uvm_va_space_t *va_space, NvU64 address, uvm_processor_id_t processor) +{ + UVM_ASSERT(address); + UVM_ASSERT(PAGE_ALIGNED(address)); + UVM_ASSERT(UVM_ID_IS_VALID(processor)); + + uvm_assert_rwsem_locked(&va_space->lock); + + if (!va_space->tools.enabled) + return; + + uvm_down_read(&va_space->tools.lock); + if (tools_is_event_enabled(va_space, UvmEventTypeThrottlingEnd)) { + UvmEventEntry entry; + UvmEventThrottlingEndInfo *info = &entry.eventData.throttlingEnd; + memset(&entry, 0, sizeof(entry)); + + info->eventType = UvmEventTypeThrottlingEnd; + info->processorIndex = uvm_id_value(processor); + info->address = address; + info->timeStamp = NV_GETTIME(); + + uvm_tools_record_event(va_space, &entry); + } + uvm_up_read(&va_space->tools.lock); +} + +static void record_map_remote_events(void *args) +{ + block_map_remote_data_t *block_map_remote = (block_map_remote_data_t *)args; + map_remote_data_t *map_remote, *next; + UvmEventEntry entry; + uvm_va_space_t *va_space = block_map_remote->va_space; + + memset(&entry, 0, sizeof(entry)); + + entry.eventData.mapRemote.eventType = UvmEventTypeMapRemote; + entry.eventData.mapRemote.srcIndex = uvm_id_value(block_map_remote->src); + entry.eventData.mapRemote.dstIndex = uvm_id_value(block_map_remote->dst); + entry.eventData.mapRemote.mapRemoteCause = block_map_remote->cause; + entry.eventData.mapRemote.timeStamp = block_map_remote->timestamp; + + uvm_down_read(&va_space->tools.lock); + list_for_each_entry_safe(map_remote, next, &block_map_remote->events, events_node) { + list_del(&map_remote->events_node); + + entry.eventData.mapRemote.address = map_remote->address; + entry.eventData.mapRemote.size = map_remote->size; + entry.eventData.mapRemote.timeStampGpu = map_remote->timestamp_gpu; + kmem_cache_free(g_tools_map_remote_data_cache, map_remote); + + uvm_tools_record_event(va_space, &entry); + } + uvm_up_read(&va_space->tools.lock); + + UVM_ASSERT(list_empty(&block_map_remote->events)); + kmem_cache_free(g_tools_block_map_remote_data_cache, block_map_remote); +} + +static void record_map_remote_events_entry(void *args) +{ + UVM_ENTRY_VOID(record_map_remote_events(args)); +} + +static void on_map_remote_complete(void *ptr) +{ + block_map_remote_data_t *block_map_remote = (block_map_remote_data_t *)ptr; + map_remote_data_t *map_remote; + + // Only GPU mappings use the deferred mechanism + UVM_ASSERT(UVM_ID_IS_GPU(block_map_remote->src)); + list_for_each_entry(map_remote, &block_map_remote->events, events_node) + map_remote->timestamp_gpu = *map_remote->timestamp_gpu_addr; + + nv_kthread_q_item_init(&block_map_remote->queue_item, record_map_remote_events_entry, ptr); + + uvm_spin_lock(&g_tools_channel_list_lock); + remove_pending_event_for_channel(block_map_remote->channel); + nv_kthread_q_schedule_q_item(&g_tools_queue, &block_map_remote->queue_item); + uvm_spin_unlock(&g_tools_channel_list_lock); +} + +void uvm_tools_record_map_remote(uvm_va_block_t *va_block, + uvm_push_t *push, + uvm_processor_id_t processor, + uvm_processor_id_t residency, + NvU64 address, + size_t region_size, + UvmEventMapRemoteCause cause) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + + UVM_ASSERT(UVM_ID_IS_VALID(processor)); + UVM_ASSERT(UVM_ID_IS_VALID(residency)); + UVM_ASSERT(cause != UvmEventMapRemoteCauseInvalid); + + uvm_assert_rwsem_locked(&va_space->lock); + + if (!va_space->tools.enabled) + return; + + uvm_down_read(&va_space->tools.lock); + if (!tools_is_event_enabled(va_space, UvmEventTypeMapRemote)) + goto done; + + if (UVM_ID_IS_CPU(processor)) { + UvmEventEntry entry; + memset(&entry, 0, sizeof(entry)); + + entry.eventData.mapRemote.eventType = UvmEventTypeMapRemote; + entry.eventData.mapRemote.srcIndex = uvm_id_value(processor); + entry.eventData.mapRemote.dstIndex = uvm_id_value(residency); + entry.eventData.mapRemote.mapRemoteCause = cause; + entry.eventData.mapRemote.timeStamp = NV_GETTIME(); + entry.eventData.mapRemote.address = address; + entry.eventData.mapRemote.size = region_size; + entry.eventData.mapRemote.timeStampGpu = 0; + + UVM_ASSERT(entry.eventData.mapRemote.mapRemoteCause != UvmEventMapRemoteCauseInvalid); + + uvm_tools_record_event(va_space, &entry); + } + else { + uvm_push_info_t *push_info = uvm_push_info_from_push(push); + block_map_remote_data_t *block_map_remote; + map_remote_data_t *map_remote; + + // The first call on this pushbuffer creates the per-VA block structure + if (push_info->on_complete == NULL) { + UVM_ASSERT(push_info->on_complete_data == NULL); + + block_map_remote = kmem_cache_alloc(g_tools_block_map_remote_data_cache, NV_UVM_GFP_FLAGS); + if (block_map_remote == NULL) + goto done; + + block_map_remote->src = processor; + block_map_remote->dst = residency; + block_map_remote->cause = cause; + block_map_remote->timestamp = NV_GETTIME(); + block_map_remote->va_space = va_space; + block_map_remote->channel = push->channel; + INIT_LIST_HEAD(&block_map_remote->events); + + push_info->on_complete_data = block_map_remote; + push_info->on_complete = on_map_remote_complete; + + uvm_spin_lock(&g_tools_channel_list_lock); + add_pending_event_for_channel(block_map_remote->channel); + uvm_spin_unlock(&g_tools_channel_list_lock); + } + else { + block_map_remote = push_info->on_complete_data; + } + UVM_ASSERT(block_map_remote); + + map_remote = kmem_cache_alloc(g_tools_map_remote_data_cache, NV_UVM_GFP_FLAGS); + if (map_remote == NULL) + goto done; + + map_remote->address = address; + map_remote->size = region_size; + map_remote->timestamp_gpu_addr = uvm_push_timestamp(push); + + list_add_tail(&map_remote->events_node, &block_map_remote->events); + } + +done: + uvm_up_read(&va_space->tools.lock); +} + +NV_STATUS uvm_api_tools_init_event_tracker(UVM_TOOLS_INIT_EVENT_TRACKER_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_tools_event_tracker_t *event_tracker; + + event_tracker = nv_kmem_cache_zalloc(g_tools_event_tracker_cache, NV_UVM_GFP_FLAGS); + if (event_tracker == NULL) + return NV_ERR_NO_MEMORY; + + event_tracker->uvm_file = fget(params->uvmFd); + if (event_tracker->uvm_file == NULL) { + status = NV_ERR_INSUFFICIENT_PERMISSIONS; + goto fail; + } + + if (!uvm_file_is_nvidia_uvm(event_tracker->uvm_file)) { + fput(event_tracker->uvm_file); + event_tracker->uvm_file = NULL; + status = NV_ERR_INSUFFICIENT_PERMISSIONS; + goto fail; + } + + status = uvm_va_space_initialized(uvm_va_space_get(event_tracker->uvm_file)); + if (status != NV_OK) { + fput(event_tracker->uvm_file); + event_tracker->uvm_file = NULL; + goto fail; + } + + event_tracker->is_queue = params->queueBufferSize != 0; + if (event_tracker->is_queue) { + uvm_tools_queue_t *queue = &event_tracker->queue; + uvm_spin_lock_init(&queue->lock, UVM_LOCK_ORDER_LEAF); + init_waitqueue_head(&queue->wait_queue); + + if (params->queueBufferSize > UINT_MAX) { + status = NV_ERR_INVALID_ARGUMENT; + goto fail; + } + + queue->queue_buffer_count = (NvU32)params->queueBufferSize; + queue->notification_threshold = queue->queue_buffer_count / 2; + + // queue_buffer_count must be a power of 2, of at least 2 + if (!is_power_of_2(queue->queue_buffer_count) || queue->queue_buffer_count < 2) { + status = NV_ERR_INVALID_ARGUMENT; + goto fail; + } + + status = map_user_pages(params->queueBuffer, + queue->queue_buffer_count * sizeof(UvmEventEntry), + (void **)&queue->queue, + &queue->queue_buffer_pages); + if (status != NV_OK) + goto fail; + + status = map_user_pages(params->controlBuffer, + sizeof(UvmToolsEventControlData), + (void **)&queue->control, + &queue->control_buffer_pages); + + if (status != NV_OK) + goto fail; + } + else { + uvm_tools_counter_t *counter = &event_tracker->counter; + counter->all_processors = params->allProcessors; + counter->processor = params->processor; + status = map_user_pages(params->controlBuffer, + sizeof(NvU64) * UVM_TOTAL_COUNTERS, + (void **)&counter->counters, + &counter->counter_buffer_pages); + if (status != NV_OK) + goto fail; + } + + if (nv_atomic_long_cmpxchg((atomic_long_t *)&filp->private_data, 0, (long)event_tracker) != 0) { + status = NV_ERR_INVALID_ARGUMENT; + goto fail; + } + + return NV_OK; + +fail: + destroy_event_tracker(event_tracker); + return status; +} + +NV_STATUS uvm_api_tools_set_notification_threshold(UVM_TOOLS_SET_NOTIFICATION_THRESHOLD_PARAMS *params, struct file *filp) +{ + UvmToolsEventControlData *ctrl; + uvm_tools_queue_snapshot_t sn; + uvm_tools_event_tracker_t *event_tracker = tools_event_tracker(filp); + + if (!tracker_is_queue(event_tracker)) + return NV_ERR_INVALID_ARGUMENT; + + uvm_spin_lock(&event_tracker->queue.lock); + + event_tracker->queue.notification_threshold = params->notificationThreshold; + + ctrl = event_tracker->queue.control; + sn.put_behind = atomic_read((atomic_t *)&ctrl->put_behind); + sn.get_ahead = atomic_read((atomic_t *)&ctrl->get_ahead); + + if (queue_needs_wakeup(&event_tracker->queue, &sn)) + wake_up_all(&event_tracker->queue.wait_queue); + + uvm_spin_unlock(&event_tracker->queue.lock); + + return NV_OK; +} + +static NV_STATUS tools_update_perf_events_callbacks(uvm_va_space_t *va_space) +{ + NV_STATUS status; + + uvm_assert_rwsem_locked_write(&va_space->perf_events.lock); + uvm_assert_rwsem_locked_write(&va_space->tools.lock); + + if (tools_is_fault_callback_needed(va_space)) { + if (!uvm_perf_is_event_callback_registered(&va_space->perf_events, UVM_PERF_EVENT_FAULT, uvm_tools_record_fault)) { + status = uvm_perf_register_event_callback_locked(&va_space->perf_events, + UVM_PERF_EVENT_FAULT, + uvm_tools_record_fault); + + if (status != NV_OK) + return status; + } + } + else { + if (uvm_perf_is_event_callback_registered(&va_space->perf_events, UVM_PERF_EVENT_FAULT, uvm_tools_record_fault)) { + uvm_perf_unregister_event_callback_locked(&va_space->perf_events, + UVM_PERF_EVENT_FAULT, + uvm_tools_record_fault); + } + } + + if (tools_is_migration_callback_needed(va_space)) { + if (!uvm_perf_is_event_callback_registered(&va_space->perf_events, UVM_PERF_EVENT_MIGRATION, uvm_tools_record_migration)) { + status = uvm_perf_register_event_callback_locked(&va_space->perf_events, + UVM_PERF_EVENT_MIGRATION, + uvm_tools_record_migration); + + if (status != NV_OK) + return status; + } + } + else { + if (uvm_perf_is_event_callback_registered(&va_space->perf_events, UVM_PERF_EVENT_MIGRATION, uvm_tools_record_migration)) { + uvm_perf_unregister_event_callback_locked(&va_space->perf_events, + UVM_PERF_EVENT_MIGRATION, + uvm_tools_record_migration); + } + } + + return NV_OK; +} + +static NV_STATUS tools_update_status(uvm_va_space_t *va_space) +{ + NV_STATUS status; + bool should_be_enabled; + uvm_assert_rwsem_locked_write(&g_tools_va_space_list_lock); + uvm_assert_rwsem_locked_write(&va_space->perf_events.lock); + uvm_assert_rwsem_locked_write(&va_space->tools.lock); + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + + status = tools_update_perf_events_callbacks(va_space); + if (status != NV_OK) + return status; + + should_be_enabled = tools_are_enabled(va_space); + if (should_be_enabled != va_space->tools.enabled) { + if (should_be_enabled) + list_add(&va_space->tools.node, &g_tools_va_space_list); + else + list_del(&va_space->tools.node); + + va_space->tools.enabled = should_be_enabled; + } + + return NV_OK; +} + +#define EVENT_FLAGS_BITS (sizeof(NvU64) * 8) + +static bool mask_contains_invalid_events(NvU64 event_flags) +{ + const unsigned long *event_mask = (const unsigned long *)&event_flags; + DECLARE_BITMAP(helper_mask, EVENT_FLAGS_BITS); + DECLARE_BITMAP(valid_events_mask, EVENT_FLAGS_BITS); + DECLARE_BITMAP(tests_events_mask, EVENT_FLAGS_BITS); + + bitmap_zero(tests_events_mask, EVENT_FLAGS_BITS); + bitmap_set(tests_events_mask, + UvmEventTestTypesFirst, + UvmEventTestTypesLast - UvmEventTestTypesFirst + 1); + + bitmap_zero(valid_events_mask, EVENT_FLAGS_BITS); + bitmap_set(valid_events_mask, 1, UvmEventNumTypes - 1); + + if (uvm_enable_builtin_tests) + bitmap_or(valid_events_mask, valid_events_mask, tests_events_mask, EVENT_FLAGS_BITS); + + // Make sure that test event ids do not overlap with regular events + BUILD_BUG_ON(UvmEventTestTypesFirst < UvmEventNumTypes); + BUILD_BUG_ON(UvmEventTestTypesFirst > UvmEventTestTypesLast); + BUILD_BUG_ON(UvmEventTestTypesLast >= UvmEventNumTypesAll); + + // Make sure that no test event ever changes the size of UvmEventEntry + BUILD_BUG_ON(sizeof(((UvmEventEntry *)NULL)->testEventData) > + sizeof(((UvmEventEntry *)NULL)->eventData)); + BUILD_BUG_ON(UvmEventNumTypesAll > EVENT_FLAGS_BITS); + + if (!bitmap_andnot(helper_mask, event_mask, valid_events_mask, EVENT_FLAGS_BITS)) + return false; + + if (!uvm_enable_builtin_tests && bitmap_and(helper_mask, event_mask, tests_events_mask, EVENT_FLAGS_BITS)) + UVM_INFO_PRINT("Event index not found. Did you mean to insmod with uvm_enable_builtin_tests=1?\n"); + + return true; +} + +NV_STATUS uvm_api_tools_event_queue_enable_events(UVM_TOOLS_EVENT_QUEUE_ENABLE_EVENTS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space; + uvm_tools_event_tracker_t *event_tracker = tools_event_tracker(filp); + NV_STATUS status = NV_OK; + NvU64 inserted_lists; + + if (!tracker_is_queue(event_tracker)) + return NV_ERR_INVALID_ARGUMENT; + + if (mask_contains_invalid_events(params->eventTypeFlags)) + return NV_ERR_INVALID_ARGUMENT; + + va_space = tools_event_tracker_va_space(event_tracker); + + uvm_down_write(&g_tools_va_space_list_lock); + uvm_down_write(&va_space->perf_events.lock); + uvm_down_write(&va_space->tools.lock); + + insert_event_tracker(va_space, + event_tracker->queue.queue_nodes, + UvmEventNumTypesAll, + params->eventTypeFlags, + &event_tracker->queue.subscribed_queues, + va_space->tools.queues, + &inserted_lists); + + // perform any necessary registration + status = tools_update_status(va_space); + if (status != NV_OK) { + // on error, unregister any newly registered event + remove_event_tracker(va_space, + event_tracker->queue.queue_nodes, + UvmEventNumTypes, + inserted_lists, + &event_tracker->queue.subscribed_queues); + } + + uvm_up_write(&va_space->tools.lock); + uvm_up_write(&va_space->perf_events.lock); + uvm_up_write(&g_tools_va_space_list_lock); + + return status; +} + +NV_STATUS uvm_api_tools_event_queue_disable_events(UVM_TOOLS_EVENT_QUEUE_DISABLE_EVENTS_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space; + uvm_tools_event_tracker_t *event_tracker = tools_event_tracker(filp); + + if (!tracker_is_queue(event_tracker)) + return NV_ERR_INVALID_ARGUMENT; + + va_space = tools_event_tracker_va_space(event_tracker); + + uvm_down_write(&g_tools_va_space_list_lock); + uvm_down_write(&va_space->perf_events.lock); + uvm_down_write(&va_space->tools.lock); + remove_event_tracker(va_space, + event_tracker->queue.queue_nodes, + UvmEventNumTypesAll, + params->eventTypeFlags, + &event_tracker->queue.subscribed_queues); + + // de-registration should not fail + status = tools_update_status(va_space); + UVM_ASSERT(status == NV_OK); + + uvm_up_write(&va_space->tools.lock); + uvm_up_write(&va_space->perf_events.lock); + uvm_up_write(&g_tools_va_space_list_lock); + return NV_OK; +} + +NV_STATUS uvm_api_tools_enable_counters(UVM_TOOLS_ENABLE_COUNTERS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space; + uvm_tools_event_tracker_t *event_tracker = tools_event_tracker(filp); + NV_STATUS status = NV_OK; + NvU64 inserted_lists; + + if (!tracker_is_counter(event_tracker)) + return NV_ERR_INVALID_ARGUMENT; + + va_space = tools_event_tracker_va_space(event_tracker); + + uvm_down_write(&g_tools_va_space_list_lock); + uvm_down_write(&va_space->perf_events.lock); + uvm_down_write(&va_space->tools.lock); + + insert_event_tracker(va_space, + event_tracker->counter.counter_nodes, + UVM_TOTAL_COUNTERS, + params->counterTypeFlags, + &event_tracker->counter.subscribed_counters, + va_space->tools.counters, + &inserted_lists); + + // perform any necessary registration + status = tools_update_status(va_space); + if (status != NV_OK) { + remove_event_tracker(va_space, + event_tracker->counter.counter_nodes, + UVM_TOTAL_COUNTERS, + inserted_lists, + &event_tracker->counter.subscribed_counters); + } + + uvm_up_write(&va_space->tools.lock); + uvm_up_write(&va_space->perf_events.lock); + uvm_up_write(&g_tools_va_space_list_lock); + + return status; +} + +NV_STATUS uvm_api_tools_disable_counters(UVM_TOOLS_DISABLE_COUNTERS_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space; + uvm_tools_event_tracker_t *event_tracker = tools_event_tracker(filp); + + if (!tracker_is_counter(event_tracker)) + return NV_ERR_INVALID_ARGUMENT; + + va_space = tools_event_tracker_va_space(event_tracker); + + uvm_down_write(&g_tools_va_space_list_lock); + uvm_down_write(&va_space->perf_events.lock); + uvm_down_write(&va_space->tools.lock); + remove_event_tracker(va_space, + event_tracker->counter.counter_nodes, + UVM_TOTAL_COUNTERS, + params->counterTypeFlags, + &event_tracker->counter.subscribed_counters); + + // de-registration should not fail + status = tools_update_status(va_space); + UVM_ASSERT(status == NV_OK); + + uvm_up_write(&va_space->tools.lock); + uvm_up_write(&va_space->perf_events.lock); + uvm_up_write(&g_tools_va_space_list_lock); + + return NV_OK; +} + +static NV_STATUS tools_access_va_block(uvm_va_block_t *va_block, + uvm_va_block_context_t *block_context, + NvU64 target_va, + NvU64 size, + bool is_write, + uvm_mem_t *stage_mem) +{ + if (is_write) { + return UVM_VA_BLOCK_LOCK_RETRY(va_block, + NULL, + uvm_va_block_write_from_cpu(va_block, block_context, target_va, stage_mem, size)); + } + else { + return UVM_VA_BLOCK_LOCK_RETRY(va_block, + NULL, + uvm_va_block_read_to_cpu(va_block, stage_mem, target_va, size)); + + } +} + +static NV_STATUS tools_access_process_memory(uvm_va_space_t *va_space, + NvU64 target_va, + NvU64 size, + NvU64 user_va, + NvU64 *bytes, + bool is_write) +{ + NV_STATUS status; + uvm_mem_t *stage_mem = NULL; + void *stage_addr; + uvm_global_processor_mask_t *retained_global_gpus = NULL; + uvm_global_processor_mask_t *global_gpus = NULL; + uvm_va_block_context_t *block_context = NULL; + struct mm_struct *mm = NULL; + + retained_global_gpus = uvm_kvmalloc(sizeof(*retained_global_gpus)); + if (retained_global_gpus == NULL) + return NV_ERR_NO_MEMORY; + + uvm_global_processor_mask_zero(retained_global_gpus); + + global_gpus = uvm_kvmalloc(sizeof(*global_gpus)); + if (global_gpus == NULL) { + status = NV_ERR_NO_MEMORY; + goto exit; + } + + mm = uvm_va_space_mm_or_current_retain(va_space); + + status = uvm_mem_alloc_sysmem_and_map_cpu_kernel(PAGE_SIZE, mm, &stage_mem); + if (status != NV_OK) + goto exit; + + if (is_write) { + block_context = uvm_va_block_context_alloc(mm); + if (!block_context) { + status = NV_ERR_NO_MEMORY; + goto exit; + } + } + + stage_addr = uvm_mem_get_cpu_addr_kernel(stage_mem); + *bytes = 0; + + while (*bytes < size) { + uvm_gpu_t *gpu; + uvm_va_block_t *block; + void *user_va_start = (void *) (user_va + *bytes); + NvU64 target_va_start = target_va + *bytes; + NvU64 bytes_left = size - *bytes; + NvU64 page_offset = target_va_start & (PAGE_SIZE - 1); + NvU64 bytes_now = min(bytes_left, (NvU64)(PAGE_SIZE - page_offset)); + + if (is_write) { + NvU64 remaining = nv_copy_from_user(stage_addr, user_va_start, bytes_now); + if (remaining != 0) { + status = NV_ERR_INVALID_ARGUMENT; + goto exit; + } + } + + // The RM flavor of the lock is needed to perform ECC checks. + uvm_va_space_down_read_rm(va_space); + status = uvm_va_block_find_create_managed(va_space, target_va_start, &block); + if (status != NV_OK) { + uvm_va_space_up_read_rm(va_space); + goto exit; + } + + uvm_va_space_global_gpus(va_space, global_gpus); + + for_each_global_gpu_in_mask(gpu, global_gpus) { + if (uvm_global_processor_mask_test_and_set(retained_global_gpus, gpu->global_id)) + continue; + + // The retention of each GPU ensures that the staging memory is + // freed before the unregistration of any of the GPUs is mapped on. + // Each GPU is retained once. + uvm_gpu_retain(gpu); + + // Accessing the VA block may result in copying data between the CPU + // and a GPU. Conservatively add virtual mappings to all the GPUs + // (even if those mappings may never be used) as tools read/write is + // not on a performance critical path. + status = uvm_mem_map_gpu_kernel(stage_mem, gpu); + if (status != NV_OK) { + uvm_va_space_up_read_rm(va_space); + goto exit; + } + } + + status = tools_access_va_block(block, block_context, target_va_start, bytes_now, is_write, stage_mem); + + // For simplicity, check for ECC errors on all GPUs registered in the VA + // space + if (status == NV_OK) + status = uvm_global_mask_check_ecc_error(global_gpus); + + uvm_va_space_up_read_rm(va_space); + if (status != NV_OK) + goto exit; + + if (!is_write) { + NvU64 remaining; + + // Prevent processor speculation prior to accessing user-mapped + // memory to avoid leaking information from side-channel attacks. + // Under speculation, a valid VA range which does not contain + // target_va could be used, and the block index could run off the + // end of the array. Information about the state of that kernel + // memory could be inferred if speculative execution gets to the + // point where the data is copied out. + nv_speculation_barrier(); + + remaining = nv_copy_to_user(user_va_start, stage_addr, bytes_now); + if (remaining > 0) { + status = NV_ERR_INVALID_ARGUMENT; + goto exit; + } + } + + *bytes += bytes_now; + } + +exit: + uvm_va_block_context_free(block_context); + + uvm_mem_free(stage_mem); + + uvm_global_mask_release(retained_global_gpus); + + if (mm) + uvm_va_space_mm_or_current_release(va_space, mm); + + uvm_kvfree(global_gpus); + uvm_kvfree(retained_global_gpus); + + return status; +} + +NV_STATUS uvm_api_tools_read_process_memory(UVM_TOOLS_READ_PROCESS_MEMORY_PARAMS *params, struct file *filp) +{ + return tools_access_process_memory(uvm_va_space_get(filp), + params->targetVa, + params->size, + params->buffer, + ¶ms->bytesRead, + false); +} + +NV_STATUS uvm_api_tools_write_process_memory(UVM_TOOLS_WRITE_PROCESS_MEMORY_PARAMS *params, struct file *filp) +{ + return tools_access_process_memory(uvm_va_space_get(filp), + params->targetVa, + params->size, + params->buffer, + ¶ms->bytesWritten, + true); +} + +NV_STATUS uvm_test_inject_tools_event(UVM_TEST_INJECT_TOOLS_EVENT_PARAMS *params, struct file *filp) +{ + NvU32 i; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + if (params->entry.eventData.eventType >= UvmEventNumTypesAll) + return NV_ERR_INVALID_ARGUMENT; + + uvm_down_read(&va_space->tools.lock); + for (i = 0; i < params->count; i++) + uvm_tools_record_event(va_space, ¶ms->entry); + uvm_up_read(&va_space->tools.lock); + return NV_OK; +} + +NV_STATUS uvm_test_increment_tools_counter(UVM_TEST_INCREMENT_TOOLS_COUNTER_PARAMS *params, struct file *filp) +{ + NvU32 i; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + if (params->counter >= UVM_TOTAL_COUNTERS) + return NV_ERR_INVALID_ARGUMENT; + + uvm_down_read(&va_space->tools.lock); + for (i = 0; i < params->count; i++) + uvm_tools_inc_counter(va_space, params->counter, params->amount, ¶ms->processor); + uvm_up_read(&va_space->tools.lock); + + return NV_OK; +} + +NV_STATUS uvm_api_tools_get_processor_uuid_table(UVM_TOOLS_GET_PROCESSOR_UUID_TABLE_PARAMS *params, struct file *filp) +{ + NvProcessorUuid *uuids; + NvU64 remaining; + uvm_gpu_t *gpu; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uuids = uvm_kvmalloc_zero(sizeof(NvProcessorUuid) * UVM_ID_MAX_PROCESSORS); + if (uuids == NULL) + return NV_ERR_NO_MEMORY; + + uvm_processor_uuid_copy(&uuids[UVM_ID_CPU_VALUE], &NV_PROCESSOR_UUID_CPU_DEFAULT); + params->count = 1; + + uvm_va_space_down_read(va_space); + for_each_va_space_gpu(gpu, va_space) { + uvm_processor_uuid_copy(&uuids[uvm_id_value(gpu->id)], uvm_gpu_uuid(gpu)); + if (uvm_id_value(gpu->id) + 1 > params->count) + params->count = uvm_id_value(gpu->id) + 1; + } + uvm_va_space_up_read(va_space); + + remaining = nv_copy_to_user((void *)params->tablePtr, uuids, sizeof(NvProcessorUuid) * params->count); + uvm_kvfree(uuids); + + if (remaining != 0) + return NV_ERR_INVALID_ADDRESS; + + return NV_OK; +} + +void uvm_tools_flush_events() +{ + tools_schedule_completed_events(); + + nv_kthread_q_flush(&g_tools_queue); +} + +NV_STATUS uvm_api_tools_flush_events(UVM_TOOLS_FLUSH_EVENTS_PARAMS *params, struct file *filp) +{ + uvm_tools_flush_events(); + return NV_OK; +} + +NV_STATUS uvm_test_tools_flush_replay_events(UVM_TEST_TOOLS_FLUSH_REPLAY_EVENTS_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu = NULL; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, ¶ms->gpuUuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + // Wait for register-based fault clears to queue the replay event + if (!gpu->parent->has_clear_faulted_channel_method) { + uvm_gpu_non_replayable_faults_isr_lock(gpu->parent); + uvm_gpu_non_replayable_faults_isr_unlock(gpu->parent); + } + + // Wait for pending fault replay methods to complete (replayable faults on + // all GPUs, and non-replayable faults on method-based GPUs). + status = uvm_channel_manager_wait(gpu->channel_manager); + + // Flush any pending events even if (status != NV_OK) + uvm_tools_flush_events(); + uvm_gpu_release(gpu); + + return status; +} + +static const struct file_operations uvm_tools_fops = +{ + .open = uvm_tools_open_entry, + .release = uvm_tools_release_entry, + .unlocked_ioctl = uvm_tools_unlocked_ioctl_entry, +#if NVCPU_IS_X86_64 + .compat_ioctl = uvm_tools_unlocked_ioctl_entry, +#endif + .poll = uvm_tools_poll_entry, + .owner = THIS_MODULE, +}; + +static void _uvm_tools_destroy_cache_all(void) +{ + // The pointers are initialized to NULL, + // it's safe to call destroy on all of them. + kmem_cache_destroy_safe(&g_tools_event_tracker_cache); + kmem_cache_destroy_safe(&g_tools_block_migration_data_cache); + kmem_cache_destroy_safe(&g_tools_migration_data_cache); + kmem_cache_destroy_safe(&g_tools_replay_data_cache); + kmem_cache_destroy_safe(&g_tools_block_map_remote_data_cache); + kmem_cache_destroy_safe(&g_tools_map_remote_data_cache); +} + +int uvm_tools_init(dev_t uvm_base_dev) +{ + dev_t uvm_tools_dev = MKDEV(MAJOR(uvm_base_dev), NVIDIA_UVM_TOOLS_MINOR_NUMBER); + int ret = -ENOMEM; // This will be updated later if allocations succeed + + uvm_init_rwsem(&g_tools_va_space_list_lock, UVM_LOCK_ORDER_TOOLS_VA_SPACE_LIST); + + g_tools_event_tracker_cache = NV_KMEM_CACHE_CREATE("uvm_tools_event_tracker_t", + uvm_tools_event_tracker_t); + if (!g_tools_event_tracker_cache) + goto err_cache_destroy; + + g_tools_block_migration_data_cache = NV_KMEM_CACHE_CREATE("uvm_tools_block_migration_data_t", + block_migration_data_t); + if (!g_tools_block_migration_data_cache) + goto err_cache_destroy; + + g_tools_migration_data_cache = NV_KMEM_CACHE_CREATE("uvm_tools_migration_data_t", + migration_data_t); + if (!g_tools_migration_data_cache) + goto err_cache_destroy; + + g_tools_replay_data_cache = NV_KMEM_CACHE_CREATE("uvm_tools_replay_data_t", + replay_data_t); + if (!g_tools_replay_data_cache) + goto err_cache_destroy; + + g_tools_block_map_remote_data_cache = NV_KMEM_CACHE_CREATE("uvm_tools_block_map_remote_data_t", + block_map_remote_data_t); + if (!g_tools_block_map_remote_data_cache) + goto err_cache_destroy; + + g_tools_map_remote_data_cache = NV_KMEM_CACHE_CREATE("uvm_tools_map_remote_data_t", + map_remote_data_t); + if (!g_tools_map_remote_data_cache) + goto err_cache_destroy; + + uvm_spin_lock_init(&g_tools_channel_list_lock, UVM_LOCK_ORDER_LEAF); + + ret = nv_kthread_q_init(&g_tools_queue, "UVM Tools Event Queue"); + if (ret < 0) + goto err_cache_destroy; + + uvm_init_character_device(&g_uvm_tools_cdev, &uvm_tools_fops); + ret = cdev_add(&g_uvm_tools_cdev, uvm_tools_dev, 1); + if (ret != 0) { + UVM_ERR_PRINT("cdev_add (major %u, minor %u) failed: %d\n", MAJOR(uvm_tools_dev), + MINOR(uvm_tools_dev), ret); + goto err_stop_thread; + } + + return ret; + +err_stop_thread: + nv_kthread_q_stop(&g_tools_queue); + +err_cache_destroy: + _uvm_tools_destroy_cache_all(); + return ret; +} + +void uvm_tools_exit(void) +{ + unsigned i; + cdev_del(&g_uvm_tools_cdev); + + nv_kthread_q_stop(&g_tools_queue); + + for (i = 0; i < UvmEventNumTypesAll; ++i) + UVM_ASSERT(g_tools_enabled_event_count[i] == 0); + + UVM_ASSERT(list_empty(&g_tools_va_space_list)); + + _uvm_tools_destroy_cache_all(); +} diff --git a/kernel-open/nvidia-uvm/uvm_tools.h b/kernel-open/nvidia-uvm/uvm_tools.h new file mode 100644 index 000000000..7b5a5be9e --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_tools.h @@ -0,0 +1,121 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_TOOLS_H__ +#define __UVM_TOOLS_H__ + +#include "uvm_types.h" +#include "uvm_processors.h" +#include "uvm_forward_decl.h" +#include "uvm_test_ioctl.h" +#include "uvm_hal_types.h" +#include "uvm_va_block_types.h" + +NV_STATUS uvm_test_inject_tools_event(UVM_TEST_INJECT_TOOLS_EVENT_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_increment_tools_counter(UVM_TEST_INCREMENT_TOOLS_COUNTER_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_tools_flush_replay_events(UVM_TEST_TOOLS_FLUSH_REPLAY_EVENTS_PARAMS *params, struct file *filp); + +NV_STATUS uvm_api_tools_read_process_memory(UVM_TOOLS_READ_PROCESS_MEMORY_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_write_process_memory(UVM_TOOLS_WRITE_PROCESS_MEMORY_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_get_processor_uuid_table(UVM_TOOLS_GET_PROCESSOR_UUID_TABLE_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_tools_flush_events(UVM_TOOLS_FLUSH_EVENTS_PARAMS *params, struct file *filp); + +static UvmEventFatalReason uvm_tools_status_to_fatal_fault_reason(NV_STATUS status) +{ + switch (status) { + case NV_OK: + return UvmEventFatalReasonInvalid; + case NV_ERR_NO_MEMORY: + return UvmEventFatalReasonOutOfMemory; + case NV_ERR_INVALID_ADDRESS: + return UvmEventFatalReasonInvalidAddress; + case NV_ERR_INVALID_ACCESS_TYPE: + return UvmEventFatalReasonInvalidPermissions; + case NV_ERR_INVALID_OPERATION: + return UvmEventFatalReasonInvalidOperation; + default: + return UvmEventFatalReasonInternalError; + } +} + +void uvm_tools_record_cpu_fatal_fault(uvm_va_space_t *va_space, + NvU64 address, + bool is_write, + UvmEventFatalReason reason); + +void uvm_tools_record_gpu_fatal_fault(uvm_gpu_id_t gpu_id, + uvm_va_space_t *va_space, + const uvm_fault_buffer_entry_t *fault_entry, + UvmEventFatalReason reason); + +void uvm_tools_record_thrashing(uvm_va_space_t *va_space, + NvU64 address, + size_t region_size, + const uvm_processor_mask_t *processors); + +void uvm_tools_record_throttling_start(uvm_va_space_t *va_space, NvU64 address, uvm_processor_id_t processor); + +void uvm_tools_record_throttling_end(uvm_va_space_t *va_space, NvU64 address, uvm_processor_id_t processor); + +void uvm_tools_record_map_remote(uvm_va_block_t *va_block, + uvm_push_t *push, + uvm_processor_id_t processor, + uvm_processor_id_t residency, + NvU64 address, + size_t region_size, + UvmEventMapRemoteCause cause); + +void uvm_tools_record_block_migration_begin(uvm_va_block_t *va_block, + uvm_push_t *push, + uvm_processor_id_t dst_id, + uvm_processor_id_t src_id, + NvU64 start, + uvm_make_resident_cause_t cause); + +void uvm_tools_record_read_duplicate(uvm_va_block_t *va_block, + uvm_processor_id_t dst, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask); + +void uvm_tools_record_read_duplicate_invalidate(uvm_va_block_t *va_block, + uvm_processor_id_t dst, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask); + +void uvm_tools_broadcast_replay(uvm_gpu_t *gpu, + uvm_push_t *push, + NvU32 batch_id, + uvm_fault_client_type_t client_type); + +void uvm_tools_broadcast_replay_sync(uvm_gpu_t *gpu, + NvU32 batch_id, + uvm_fault_client_type_t client_type); + +void uvm_tools_broadcast_access_counter(uvm_gpu_t *gpu, + const uvm_access_counter_buffer_entry_t *buffer_entry, + bool on_managed); + +// schedules completed events and then waits from the to be dispatched +void uvm_tools_flush_events(void); + +#endif // __UVM_TOOLS_H__ diff --git a/kernel-open/nvidia-uvm/uvm_tools_init.h b/kernel-open/nvidia-uvm/uvm_tools_init.h new file mode 100644 index 000000000..252e70ca8 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_tools_init.h @@ -0,0 +1,33 @@ +/******************************************************************************* + Copyright (c) 2016 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef _UVM_TOOLS_INIT_H_ +#define _UVM_TOOLS_INIT_H_ + +#include "uvm_common.h" + +int uvm_tools_init(dev_t uvm_base_dev); + +void uvm_tools_exit(void); + +#endif // _UVM_TOOLS_INIT_H_ diff --git a/kernel-open/nvidia-uvm/uvm_tracker.c b/kernel-open/nvidia-uvm/uvm_tracker.c new file mode 100644 index 000000000..542e5e154 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_tracker.c @@ -0,0 +1,438 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_tracker.h" +#include "uvm_push.h" +#include "uvm_channel.h" +#include "uvm_kvmalloc.h" +#include "uvm_gpu.h" +#include "uvm_global.h" +#include "uvm_common.h" +#include "uvm_linux.h" + +static bool tracker_is_using_static_entries(uvm_tracker_t *tracker) +{ + return tracker->max_size == ARRAY_SIZE(tracker->static_entries); +} + +static void free_entries(uvm_tracker_t *tracker) +{ + if (tracker_is_using_static_entries(tracker)) + return; + uvm_kvfree(tracker->dynamic_entries); +} + +uvm_tracker_entry_t *uvm_tracker_get_entries(uvm_tracker_t *tracker) +{ + if (tracker_is_using_static_entries(tracker)) { + return tracker->static_entries; + } + else { + UVM_ASSERT(tracker->dynamic_entries != NULL); + return tracker->dynamic_entries; + } +} + +static uvm_tracker_entry_t *get_new_entry(uvm_tracker_t *tracker) +{ + NV_STATUS status = uvm_tracker_reserve(tracker, 1); + if (status != NV_OK) + return NULL; + UVM_ASSERT(tracker->size < tracker->max_size); + + return &uvm_tracker_get_entries(tracker)[tracker->size++]; +} + +NV_STATUS uvm_tracker_init_from(uvm_tracker_t *dst, uvm_tracker_t *src) +{ + NV_STATUS status; + uvm_tracker_init(dst); + status = uvm_tracker_overwrite(dst, src); + if (status != NV_OK) { + uvm_tracker_deinit(dst); + uvm_tracker_init(dst); + } + return status; +} + +void uvm_tracker_deinit(uvm_tracker_t *tracker) +{ + free_entries(tracker); + memset(tracker, 0, sizeof(*tracker)); +} + +NV_STATUS uvm_tracker_overwrite(uvm_tracker_t *dst, uvm_tracker_t *src) +{ + NV_STATUS status; + + uvm_tracker_clear(dst); + + status = uvm_tracker_reserve(dst, src->size); + if (status != NV_OK) + return status; + + dst->size = src->size; + memcpy(uvm_tracker_get_entries(dst), + uvm_tracker_get_entries(src), + src->size * sizeof(*uvm_tracker_get_entries(dst))); + + return NV_OK; +} + +NV_STATUS uvm_tracker_reserve(uvm_tracker_t *tracker, NvU32 min_free_entries) +{ + if (tracker->size + min_free_entries > tracker->max_size) { + // Special case the first resize to jump from 1 all the way to 8. + // This is based on a guess that if a tracker needs more than 1 + // entry it likely needs much more. + // TODO: Bug 1764961: Verify that guess. + NvU32 new_max_size = max((NvU32)8, (NvU32)roundup_pow_of_two(tracker->size + min_free_entries)); + uvm_tracker_entry_t *new_entries; + + if (tracker_is_using_static_entries(tracker)) { + new_entries = uvm_kvmalloc(sizeof(*new_entries) * new_max_size); + if (new_entries) + memcpy(new_entries, tracker->static_entries, sizeof(*new_entries) * tracker->size); + } else { + new_entries = uvm_kvrealloc(tracker->dynamic_entries, sizeof(*new_entries) * new_max_size); + } + if (!new_entries) + return NV_ERR_NO_MEMORY; + tracker->dynamic_entries = new_entries; + tracker->max_size = new_max_size; + } + UVM_ASSERT(tracker->size + min_free_entries <= tracker->max_size); + return NV_OK; +} + +NV_STATUS uvm_tracker_add_push(uvm_tracker_t *tracker, uvm_push_t *push) +{ + uvm_tracker_entry_t entry; + + uvm_push_get_tracker_entry(push, &entry); + + return uvm_tracker_add_entry(tracker, &entry); +} + +NV_STATUS uvm_tracker_add_entry(uvm_tracker_t *tracker, uvm_tracker_entry_t *new_entry) +{ + uvm_tracker_entry_t *tracker_entry; + + for_each_tracker_entry(tracker_entry, tracker) { + if (tracker_entry->channel == new_entry->channel) { + tracker_entry->value = max(tracker_entry->value, new_entry->value); + return NV_OK; + } + } + + tracker_entry = get_new_entry(tracker); + if (tracker_entry == NULL) + return NV_ERR_NO_MEMORY; + + *tracker_entry = *new_entry; + + return NV_OK; +} + +void uvm_tracker_overwrite_with_entry(uvm_tracker_t *tracker, uvm_tracker_entry_t *entry) +{ + NV_STATUS status; + + uvm_tracker_clear(tracker); + + // An empty tracker always has space for at least one entry so this cannot + // fail. + status = uvm_tracker_add_entry(tracker, entry); + UVM_ASSERT(status == NV_OK); +} + +void uvm_tracker_overwrite_with_push(uvm_tracker_t *tracker, uvm_push_t *push) +{ + uvm_tracker_entry_t entry; + + uvm_push_get_tracker_entry(push, &entry); + + uvm_tracker_overwrite_with_entry(tracker, &entry); +} + +static NV_STATUS reserve_for_entries_from_tracker(uvm_tracker_t *dst, uvm_tracker_t *src) +{ + NvU32 needed_free_entries = 0; + uvm_tracker_entry_t *src_entry, *dst_entry; + + for_each_tracker_entry(src_entry, src) { + bool found = false; + for_each_tracker_entry(dst_entry, dst) { + if (dst_entry->channel == src_entry->channel) { + found = true; + break; + } + } + if (!found) + needed_free_entries++; + } + + return uvm_tracker_reserve(dst, needed_free_entries); +} + +NV_STATUS uvm_tracker_add_tracker(uvm_tracker_t *dst, uvm_tracker_t *src) +{ + NV_STATUS status; + uvm_tracker_entry_t *src_entry; + + if (src == dst) + return NV_OK; + + status = uvm_tracker_reserve(dst, src->size); + if (status == NV_ERR_NO_MEMORY) { + uvm_tracker_remove_completed(dst); + uvm_tracker_remove_completed(src); + status = reserve_for_entries_from_tracker(dst, src); + } + if (status != NV_OK) { + return status; + } + + for_each_tracker_entry(src_entry, src) { + status = uvm_tracker_add_entry(dst, src_entry); + UVM_ASSERT_MSG(status == NV_OK, "Expected success with reserved memory but got error %d\n", status); + } + + return NV_OK; +} + +NV_STATUS uvm_tracker_overwrite_safe(uvm_tracker_t *dst, uvm_tracker_t *src) +{ + NV_STATUS status = uvm_tracker_overwrite(dst, src); + if (status == NV_ERR_NO_MEMORY) { + UVM_DBG_PRINT_RL("Failed to overwrite tracker, waiting\n"); + status = uvm_tracker_wait(src); + } + return status; +} + +NV_STATUS uvm_tracker_add_push_safe(uvm_tracker_t *tracker, uvm_push_t *push) +{ + NV_STATUS status = uvm_tracker_add_push(tracker, push); + if (status == NV_ERR_NO_MEMORY) { + UVM_DBG_PRINT_RL("Failed to add push to tracker, waiting\n"); + status = uvm_push_wait(push); + } + return status; +} + +NV_STATUS uvm_tracker_add_entry_safe(uvm_tracker_t *tracker, uvm_tracker_entry_t *new_entry) +{ + NV_STATUS status = uvm_tracker_add_entry(tracker, new_entry); + if (status == NV_ERR_NO_MEMORY) { + UVM_DBG_PRINT_RL("Failed to add entry to tracker, waiting\n"); + status = uvm_tracker_wait_for_entry(new_entry); + } + return status; +} + +NV_STATUS uvm_tracker_add_tracker_safe(uvm_tracker_t *dst, uvm_tracker_t *src) +{ + NV_STATUS status = uvm_tracker_add_tracker(dst, src); + if (status == NV_ERR_NO_MEMORY) { + UVM_DBG_PRINT_RL("Failed to add tracker to tracker, waiting\n"); + status = uvm_tracker_wait(src); + } + return status; +} + +bool uvm_tracker_is_entry_completed(uvm_tracker_entry_t *tracker_entry) +{ + if (!tracker_entry->channel) + return true; + + return uvm_channel_is_value_completed(tracker_entry->channel, tracker_entry->value); +} + +static void uvm_tracker_entry_print_pending_pushes(uvm_tracker_entry_t *entry) +{ + uvm_channel_t *channel = entry->channel; + uvm_gpu_t *gpu = uvm_channel_get_gpu(channel); + + UVM_DBG_PRINT("Tracker entry for value %llu (sema VA 0x%llx) channel %s GPU %s\n", + entry->value, + uvm_channel_tracking_semaphore_get_gpu_va(channel), + channel->name, + uvm_gpu_name(gpu)); + + uvm_channel_print_pending_pushes(channel); +} + +static void uvm_tracker_print_pending_pushes(uvm_tracker_t *tracker) +{ + uvm_tracker_entry_t *entry; + for_each_tracker_entry(entry, tracker) + uvm_tracker_entry_print_pending_pushes(entry); +} + +static NV_STATUS wait_for_entry_with_spin(uvm_tracker_entry_t *tracker_entry, uvm_spin_loop_t *spin) +{ + NV_STATUS status = NV_OK; + + while (!uvm_tracker_is_entry_completed(tracker_entry) && status == NV_OK) { + if (UVM_SPIN_LOOP(spin) == NV_ERR_TIMEOUT_RETRY) + uvm_tracker_entry_print_pending_pushes(tracker_entry); + + status = uvm_channel_check_errors(tracker_entry->channel); + if (status == NV_OK) + status = uvm_global_get_status(); + } + + if (status != NV_OK) { + UVM_ASSERT(status == uvm_global_get_status()); + tracker_entry->channel = NULL; + tracker_entry->value = 0; + } + + return status; +} + +NV_STATUS uvm_tracker_wait_for_entry(uvm_tracker_entry_t *tracker_entry) +{ + uvm_spin_loop_t spin; + uvm_spin_loop_init(&spin); + return wait_for_entry_with_spin(tracker_entry, &spin); +} + +NV_STATUS uvm_tracker_wait(uvm_tracker_t *tracker) +{ + NV_STATUS status = NV_OK; + uvm_spin_loop_t spin; + + uvm_spin_loop_init(&spin); + while (!uvm_tracker_is_completed(tracker) && status == NV_OK) { + if (UVM_SPIN_LOOP(&spin) == NV_ERR_TIMEOUT_RETRY) + uvm_tracker_print_pending_pushes(tracker); + + status = uvm_tracker_check_errors(tracker); + } + + if (status != NV_OK) { + UVM_ASSERT(status == uvm_global_get_status()); + + // Just clear the tracker without printing anything extra. If one of the + // entries from this tracker caused a channel error, + // uvm_tracker_check_errors() would have already printed it. And if we + // hit a global error for some other reason, we don't want to spam the + // log with all other pending entries. + // + // See the comment for uvm_tracker_wait() on why the entries are cleared. + uvm_tracker_clear(tracker); + } + + return status; +} + +NV_STATUS uvm_tracker_wait_for_other_gpus(uvm_tracker_t *tracker, uvm_gpu_t *gpu) +{ + NV_STATUS status = NV_OK; + uvm_tracker_entry_t *entry; + uvm_spin_loop_t spin; + + uvm_spin_loop_init(&spin); + + for_each_tracker_entry(entry, tracker) { + if (uvm_tracker_entry_gpu(entry) == gpu) + continue; + + status = wait_for_entry_with_spin(entry, &spin); + if (status != NV_OK) + break; + } + + if (status == NV_OK) { + uvm_tracker_remove_completed(tracker); + } + else { + UVM_ASSERT(status == uvm_global_get_status()); + uvm_tracker_clear(tracker); + } + + return status; +} + +NV_STATUS uvm_tracker_check_errors(uvm_tracker_t *tracker) +{ + uvm_tracker_entry_t *tracker_entry; + NV_STATUS status = uvm_global_get_status(); + + if (status != NV_OK) + return status; + + for_each_tracker_entry(tracker_entry, tracker) { + status = uvm_channel_check_errors(tracker_entry->channel); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +NV_STATUS uvm_tracker_query(uvm_tracker_t *tracker) +{ + NV_STATUS status; + bool completed = uvm_tracker_is_completed(tracker); + + status = uvm_tracker_check_errors(tracker); + if (status != NV_OK) + return status; + + return completed ? NV_OK : NV_WARN_MORE_PROCESSING_REQUIRED; +} + +void uvm_tracker_remove_completed(uvm_tracker_t *tracker) +{ + NvU32 i = 0; + + uvm_tracker_entry_t *entries = uvm_tracker_get_entries(tracker); + + // Keep removing completed entries until we run out of entries + while (i < tracker->size) { + if (uvm_tracker_is_entry_completed(&entries[i])) { + --tracker->size; + if (i != tracker->size) + entries[i] = entries[tracker->size]; + } + else { + ++i; + } + } +} + +bool uvm_tracker_is_completed(uvm_tracker_t *tracker) +{ + uvm_tracker_remove_completed(tracker); + + return tracker->size == 0; +} + +uvm_gpu_t *uvm_tracker_entry_gpu(uvm_tracker_entry_t *entry) +{ + return uvm_channel_get_gpu(entry->channel); +} + diff --git a/kernel-open/nvidia-uvm/uvm_tracker.h b/kernel-open/nvidia-uvm/uvm_tracker.h new file mode 100644 index 000000000..de9f703fd --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_tracker.h @@ -0,0 +1,219 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_TRACKER_H__ +#define __UVM_TRACKER_H__ + +#include "uvm_forward_decl.h" + +#include "uvm_linux.h" +#include "nvtypes.h" +#include "nvstatus.h" + +typedef struct +{ + // Tracked channel + // + // If NULL the entry is considered to be completed. + uvm_channel_t *channel; + + // Tracked channel's tracking semaphore value + NvU64 value; +} uvm_tracker_entry_t; + +typedef struct +{ + union + { + // The default static storage can fit a single entry as that's likely + // the most common use-case. If the tracker ever needs more space, a + // dynamic allocation will be made as part of adding an entry and + // dynamic_entries below will be used. + uvm_tracker_entry_t static_entries[1]; + + // Pointer to the array with dynamically allocated entries + uvm_tracker_entry_t *dynamic_entries; + }; + + // Number of used entries in the tracker + NvU32 size; + + // Max number of entries that the entries array can store currently + NvU32 max_size; + +} uvm_tracker_t; + +// Static initializer for a tracker. +// Importantly max_size needs to be set to the size of the static_entries array +// so that uvm_tracker_get_entries() works correctly. +// Note that the extra braces are necessary to avoid missing braces warning all the way down to: +// (near initialization for tracker..static_entries[0]) [-Wmissing-braces] +#define UVM_TRACKER_INIT() { { { { 0 } } }, 0, ARRAY_SIZE(((uvm_tracker_t *)0)->static_entries) } + +// Initialize a tracker +// This is guaranteed not to allocate any memory. +static void uvm_tracker_init(uvm_tracker_t *tracker) +{ + *tracker = (uvm_tracker_t)UVM_TRACKER_INIT(); +} + +// Deinitialize a tracker +// This will free any dynamic entries from the tracker +void uvm_tracker_deinit(uvm_tracker_t *tracker); + +// Overwrite a tracker using different tracker, which must be previously initialized. +// This may require allocating memory to fit entries in the tracker. On failure, +// dst is cleared and src is left unmodified. +NV_STATUS uvm_tracker_overwrite(uvm_tracker_t *dst, uvm_tracker_t *src); + +// Initialize tracker from another tracker. +// This may require allocating memory to fit entries in the tracker. +// On failure, uvm_tracker_deinit(dst) is safe but not required. +NV_STATUS uvm_tracker_init_from(uvm_tracker_t *dst, uvm_tracker_t *src); + +// Clear a tracker +// Remove all entries from tracker +// +// This won't change the max size of the tracker. +static void uvm_tracker_clear(uvm_tracker_t *tracker) +{ + tracker->size = 0; +} + +// Reserve enough space so min_free_entries can be added to the tracker +// without requiring memory allocation. +NV_STATUS uvm_tracker_reserve(uvm_tracker_t *tracker, NvU32 min_free_entries); + +// Add a push to the tracker +// The push needs be finished, i.e. uvm_push_end*() has been called on it. +// This may require allocating memory to fit a new entry in the tracker +NV_STATUS uvm_tracker_add_push(uvm_tracker_t *tracker, uvm_push_t *push); + +// Add a uvm_tracker_entry_t to a tracker +// This may require allocating memory to fit a new entry in the tracker +NV_STATUS uvm_tracker_add_entry(uvm_tracker_t *tracker, uvm_tracker_entry_t *new_entry); + +// Overwrite the tracker with a single entry +// This will never allocate memory as an empty tracker always has space for at +// least one entry and hence can never fail. +void uvm_tracker_overwrite_with_entry(uvm_tracker_t *tracker, uvm_tracker_entry_t *new_entry); + +// Overwrite the tracker with an entry from a push +// This will never allocate memory as an empty tracker always has space for at +// least one entry and hence can never fail. +void uvm_tracker_overwrite_with_push(uvm_tracker_t *tracker, uvm_push_t *push); + +// Add all entries from another tracker +// This may require allocating memory to fit a new entry in the tracker. +// On error no entries are added to destination tracker. +NV_STATUS uvm_tracker_add_tracker(uvm_tracker_t *dst, uvm_tracker_t *src); + +// "Safe" versions of the above. If memory cannot be allocated to add the new +// entries, these functions stall until entries are free. +NV_STATUS uvm_tracker_overwrite_safe(uvm_tracker_t *dst, uvm_tracker_t *src); +NV_STATUS uvm_tracker_add_push_safe(uvm_tracker_t *tracker, uvm_push_t *push); +NV_STATUS uvm_tracker_add_entry_safe(uvm_tracker_t *tracker, uvm_tracker_entry_t *new_entry); +NV_STATUS uvm_tracker_add_tracker_safe(uvm_tracker_t *dst, uvm_tracker_t *src); + +// Query whether all entries in the tracker are complete +// +// This won't change the max size of the tracker. +bool uvm_tracker_is_completed(uvm_tracker_t *tracker); + +// Wait for all tracker entries to complete +// This can only fail if a fatal error is hit that uvm_tracker_check_errors() +// would return. +// Both, on success and failure, all the entries will be removed from the +// tracker. This is done even on failure as otherwise it could be impossible to +// remove some entries from the tracker and they would eventually become invalid +// after the channels they track are destroyed. +// +// This won't change the max size of the tracker. +NV_STATUS uvm_tracker_wait(uvm_tracker_t *tracker); + +// Wait for all tracker entries for other GPUs to complete +// +// This can only fail if a fatal error is hit that uvm_tracker_check_errors() +// would return. On success, all the entries for GPUs other than the passed in +// GPU will be removed from the tracker. On failure, all entries will be removed +// from the tracker, same as for uvm_tracker_wait(). +// +// This won't change the max size of the tracker. +NV_STATUS uvm_tracker_wait_for_other_gpus(uvm_tracker_t *tracker, uvm_gpu_t *gpu); + +// Helper to wait for a tracker, then deinit it. +static NV_STATUS uvm_tracker_wait_deinit(uvm_tracker_t *tracker) +{ + NV_STATUS status = uvm_tracker_wait(tracker); + uvm_tracker_deinit(tracker); + return status; +} + +// Wait for a single tracker entry +// +// Similarly to uvm_tracker_wait(), if a global error is hit, the tracker_entry +// will be set to an empty value that's considered complete (its channel set to NULL). +NV_STATUS uvm_tracker_wait_for_entry(uvm_tracker_entry_t *tracker_entry); + +bool uvm_tracker_is_entry_completed(uvm_tracker_entry_t *tracker_entry); + +// Check for a global error and errors on all the channels that are tracked by the tracker +// +// This won't change the max size of the tracker. +NV_STATUS uvm_tracker_check_errors(uvm_tracker_t *tracker); + +// Check whether a tracker is complete and check for any errors +// This is a shortcut for uvm_tracker_is_completed() + uvm_tracker_check_errors(). +// If the tracker is complete and there are no errors, NV_OK is returned. +// If the tracker is not complete and there are no errors, NV_WARN_MORE_PROCESSING_REQUIRED is returned. +// If there are any errors, the error is returned. +// +// This won't change the max size of the tracker. +// +// Warning: If you call this in a spin loop, you should call schedule() +// periodically to break deadlock between RM and UVM. See the comments in +// uvm_spin_loop. +NV_STATUS uvm_tracker_query(uvm_tracker_t *tracker); + +// Query all entries for completion and remove the completed ones +// +// This won't change the max size of the tracker. +void uvm_tracker_remove_completed(uvm_tracker_t *tracker); + +// Get the array of tracker entries +uvm_tracker_entry_t *uvm_tracker_get_entries(uvm_tracker_t *tracker); + +static bool uvm_tracker_is_empty(uvm_tracker_t *tracker) +{ + return tracker->size == 0; +} + +uvm_gpu_t *uvm_tracker_entry_gpu(uvm_tracker_entry_t *entry); + +// Helper to iterate over all tracker entries +#define for_each_tracker_entry(entry, tracker) \ + for (entry = &uvm_tracker_get_entries(tracker)[0]; \ + entry != &uvm_tracker_get_entries(tracker)[(tracker)->size]; \ + ++entry) + +#endif // __UVM_TRACKER_H__ diff --git a/kernel-open/nvidia-uvm/uvm_tracker_test.c b/kernel-open/nvidia-uvm/uvm_tracker_test.c new file mode 100644 index 000000000..40576be3c --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_tracker_test.c @@ -0,0 +1,444 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_channel.h" +#include "uvm_global.h" +#include "uvm_hal.h" +#include "uvm_push.h" +#include "uvm_test.h" +#include "uvm_tracker.h" +#include "uvm_va_space.h" + +static NV_STATUS assert_tracker_is_completed(uvm_tracker_t *tracker) +{ + TEST_NV_CHECK_RET(uvm_tracker_query(tracker)); + TEST_CHECK_RET(uvm_tracker_is_completed(tracker)); + TEST_NV_CHECK_RET(uvm_tracker_wait(tracker)); + TEST_NV_CHECK_RET(uvm_tracker_check_errors(tracker)); + TEST_CHECK_RET(tracker->size == 0); + uvm_tracker_remove_completed(tracker); + uvm_tracker_clear(tracker); + + return NV_OK; +} + +static NV_STATUS assert_tracker_is_not_completed(uvm_tracker_t *tracker) +{ + uvm_tracker_remove_completed(tracker); + TEST_CHECK_RET(uvm_tracker_query(tracker) == NV_WARN_MORE_PROCESSING_REQUIRED); + TEST_CHECK_RET(!uvm_tracker_is_completed(tracker)); + TEST_NV_CHECK_RET(uvm_tracker_check_errors(tracker)); + TEST_CHECK_RET(tracker->size != 0); + + return NV_OK; +} + +// This test schedules some GPU work behind a semaphore and then allows the GPU +// to progress one tracker entry at a time verifying that the tracker entries +// are completed as expected. +static NV_STATUS test_tracker_completion(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + uvm_tracker_t tracker; + uvm_gpu_semaphore_t sema; + NvU32 count = 0; + NvU32 payload; + NV_STATUS status = NV_OK; + uvm_spin_loop_t spin; + + gpu = uvm_va_space_find_first_gpu(va_space); + TEST_CHECK_RET(gpu != NULL); + + TEST_NV_CHECK_RET(uvm_gpu_semaphore_alloc(gpu->semaphore_pool, &sema)); + + uvm_tracker_init(&tracker); + TEST_NV_CHECK_GOTO(assert_tracker_is_completed(&tracker), done); + + // The following assumes that it's possible to begin a small push that won't + // be able to finish (it's behind a semaphore that will be released from the + // CPU later) for each channel on a each GPU. + for_each_va_space_gpu(gpu, va_space) { + uvm_channel_pool_t *pool; + + uvm_for_each_pool(pool, gpu->channel_manager) { + uvm_channel_t *channel; + + uvm_for_each_channel_in_pool(channel, pool) { + uvm_push_t push; + NvU64 semaphore_gpu_va; + + ++count; + TEST_NV_CHECK_GOTO(uvm_push_begin_on_channel(channel, &push, "Test push"), done); + + semaphore_gpu_va = uvm_gpu_semaphore_get_gpu_va(&sema, gpu, uvm_channel_is_proxy(channel)); + + // Acquire increasing semaphore payloads on all channels so that they can be completed one by one + gpu->parent->host_hal->semaphore_acquire(&push, semaphore_gpu_va, count); + + uvm_push_end(&push); + + if (count & 1) + TEST_NV_CHECK_GOTO(uvm_tracker_add_push_safe(&tracker, &push), done); + else + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, &push), done); + } + } + } + + TEST_NV_CHECK_GOTO(assert_tracker_is_not_completed(&tracker), done); + + for (payload = 0; payload < count; ++payload) { + TEST_CHECK_GOTO(tracker.size == count - payload, done); + TEST_NV_CHECK_GOTO(assert_tracker_is_not_completed(&tracker), done); + + // Release the next payload allowing a single channel to complete + uvm_gpu_semaphore_set_payload(&sema, payload + 1); + + uvm_spin_loop_init(&spin); + while (tracker.size == count - payload) { + UVM_SPIN_LOOP(&spin); + uvm_tracker_remove_completed(&tracker); + } + + TEST_CHECK_GOTO(tracker.size == count - payload - 1, done); + } + + TEST_NV_CHECK_GOTO(assert_tracker_is_completed(&tracker), done); + +done: + uvm_gpu_semaphore_free(&sema); + uvm_tracker_wait_deinit(&tracker); + return status; +} + +static NV_STATUS test_tracker_basic(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + uvm_channel_t *channel; + uvm_tracker_t tracker; + uvm_tracker_entry_t entry; + NvU32 count = 0; + NV_STATUS status = NV_OK; + + gpu = uvm_va_space_find_first_gpu(va_space); + if (gpu == NULL) + return NV_ERR_INVALID_STATE; + + channel = uvm_channel_any(gpu->channel_manager); + if (channel == NULL) + return NV_ERR_INVALID_STATE; + + uvm_tracker_init(&tracker); + TEST_CHECK_GOTO(assert_tracker_is_completed(&tracker) == NV_OK, done); + + // Some channel + entry.channel = channel; + entry.value = 1; + + status = uvm_tracker_add_entry(&tracker, &entry); + TEST_CHECK_GOTO(status == NV_OK, done); + TEST_CHECK_RET(tracker.size == 1); + + status = uvm_tracker_add_entry(&tracker, &entry); + TEST_CHECK_GOTO(status == NV_OK, done); + TEST_CHECK_RET(tracker.size == 1); + TEST_CHECK_RET(uvm_tracker_get_entries(&tracker)[0].channel == entry.channel); + TEST_CHECK_RET(uvm_tracker_get_entries(&tracker)[0].value == entry.value); + + entry.value = 10; + status = uvm_tracker_add_entry(&tracker, &entry); + TEST_CHECK_GOTO(status == NV_OK, done); + TEST_CHECK_RET(tracker.size == 1); + TEST_CHECK_RET(uvm_tracker_get_entries(&tracker)[0].channel == entry.channel); + TEST_CHECK_RET(uvm_tracker_get_entries(&tracker)[0].value == entry.value); + + // Adding an older value for the same channel should have no effect + entry.value = 5; + status = uvm_tracker_add_entry(&tracker, &entry); + TEST_CHECK_GOTO(status == NV_OK, done); + TEST_CHECK_RET(tracker.size == 1); + TEST_CHECK_RET(uvm_tracker_get_entries(&tracker)[0].value == 10); + + uvm_tracker_clear(&tracker); + + TEST_CHECK_GOTO(assert_tracker_is_completed(&tracker) == NV_OK, done); + + for_each_va_space_gpu(gpu, va_space) { + uvm_channel_pool_t *pool; + + uvm_for_each_pool(pool, gpu->channel_manager) { + uvm_channel_t *channel; + + uvm_for_each_channel_in_pool(channel, pool) { + entry.channel = channel; + entry.value = uvm_channel_update_completed_value(channel); + if (count & 1) + status = uvm_tracker_add_entry_safe(&tracker, &entry); + else + status = uvm_tracker_add_entry(&tracker, &entry); + TEST_CHECK_GOTO(status == NV_OK, done); + ++count; + } + } + } + + TEST_CHECK_GOTO(tracker.size == count, done); + + // All the entries that we added are already completed + TEST_CHECK_GOTO(assert_tracker_is_completed(&tracker) == NV_OK, done); + + count = 0; + for_each_va_space_gpu(gpu, va_space) { + uvm_channel_pool_t *pool; + + uvm_for_each_pool(pool, gpu->channel_manager) { + uvm_channel_t *channel; + + uvm_for_each_channel_in_pool(channel, pool) { + uvm_push_t push; + status = uvm_push_begin_on_channel(channel, &push, "Test push"); + TEST_CHECK_GOTO(status == NV_OK, done); + + uvm_push_end(&push); + + TEST_NV_CHECK_GOTO(uvm_tracker_add_push(&tracker, &push), done); + ++count; + } + } + } + + TEST_CHECK_GOTO(tracker.size == count, done); + TEST_CHECK_GOTO(uvm_tracker_wait(&tracker) == NV_OK, done); + // After a wait, the tracker should be complete + TEST_CHECK_GOTO(assert_tracker_is_completed(&tracker) == NV_OK, done); + +done: + uvm_tracker_deinit(&tracker); + return status; +} + +static NV_STATUS test_tracker_overwrite(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + uvm_channel_t *channel; + uvm_tracker_t tracker, dup_tracker; + uvm_tracker_entry_t entry; + uvm_tracker_entry_t *entry_iter, *dup_entry_iter; + NV_STATUS status = NV_OK; + bool dup_tracker_init = false; + NvU32 count = 0; + + gpu = uvm_va_space_find_first_gpu(va_space); + if (gpu == NULL) + return NV_ERR_INVALID_STATE; + + channel = uvm_channel_any(gpu->channel_manager); + if (channel == NULL) + return NV_ERR_INVALID_STATE; + + uvm_tracker_init(&tracker); + TEST_CHECK_GOTO(assert_tracker_is_completed(&tracker) == NV_OK, done); + + // Some channel + entry.channel = channel; + entry.value = 1; + + status = uvm_tracker_add_entry(&tracker, &entry); + TEST_CHECK_GOTO(status == NV_OK, done); + TEST_CHECK_RET(tracker.size == 1); + TEST_CHECK_RET(uvm_tracker_get_entries(&tracker)[0].channel == entry.channel); + TEST_CHECK_RET(uvm_tracker_get_entries(&tracker)[0].value == entry.value); + + status = uvm_tracker_init_from(&dup_tracker, &tracker); + TEST_CHECK_GOTO(status == NV_OK, done); + dup_tracker_init = true; + TEST_CHECK_RET(dup_tracker.size == 1); + TEST_CHECK_RET(uvm_tracker_get_entries(&dup_tracker)[0].channel == entry.channel); + TEST_CHECK_RET(uvm_tracker_get_entries(&dup_tracker)[0].value == entry.value); + + entry.value = 2; + + uvm_tracker_overwrite_with_entry(&dup_tracker, &entry); + TEST_CHECK_RET(dup_tracker.size == 1); + TEST_CHECK_RET(uvm_tracker_get_entries(&dup_tracker)[0].channel == entry.channel); + TEST_CHECK_RET(uvm_tracker_get_entries(&dup_tracker)[0].value == entry.value); + + for_each_va_space_gpu(gpu, va_space) { + uvm_channel_pool_t *pool; + + uvm_for_each_pool(pool, gpu->channel_manager) { + uvm_channel_t *channel; + + uvm_for_each_channel_in_pool(channel, pool) { + entry.channel = channel; + entry.value = uvm_channel_update_completed_value(channel); + status = uvm_tracker_add_entry(&tracker, &entry); + TEST_CHECK_GOTO(status == NV_OK, done); + ++count; + } + } + } + TEST_CHECK_GOTO(tracker.size == count, done); + + status = uvm_tracker_overwrite(&dup_tracker, &tracker); + TEST_CHECK_GOTO(dup_tracker.size == count, done); + for_each_tracker_entry(dup_entry_iter, &dup_tracker) { + bool found = false; + for_each_tracker_entry(entry_iter, &tracker) { + if (entry_iter->channel == dup_entry_iter->channel && entry_iter->value == dup_entry_iter->value) { + found = true; + break; + } + } + TEST_CHECK_RET(found); + } + for_each_tracker_entry(entry_iter, &tracker) { + bool found = false; + for_each_tracker_entry(dup_entry_iter, &dup_tracker) { + if (entry_iter->channel == dup_entry_iter->channel && entry_iter->value == dup_entry_iter->value) { + found = true; + break; + } + } + TEST_CHECK_RET(found); + } + +done: + uvm_tracker_deinit(&tracker); + if (dup_tracker_init) + uvm_tracker_deinit(&dup_tracker); + return status; +} + +static NV_STATUS test_tracker_add_tracker(uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + uvm_channel_t *channel; + uvm_tracker_t tracker, dup_tracker; + uvm_tracker_entry_t entry; + uvm_tracker_entry_t *entry_iter, *dup_entry_iter; + NV_STATUS status = NV_OK; + NvU32 count = 0; + + gpu = uvm_va_space_find_first_gpu(va_space); + if (gpu == NULL) + return NV_ERR_INVALID_STATE; + + channel = uvm_channel_any(gpu->channel_manager); + if (channel == NULL) + return NV_ERR_INVALID_STATE; + + uvm_tracker_init(&tracker); + uvm_tracker_init(&dup_tracker); + TEST_CHECK_GOTO(assert_tracker_is_completed(&tracker) == NV_OK, done); + + // Some channel + entry.channel = channel; + entry.value = 1; + + status = uvm_tracker_add_entry(&tracker, &entry); + TEST_CHECK_GOTO(status == NV_OK, done); + TEST_CHECK_RET(tracker.size == 1); + TEST_CHECK_RET(uvm_tracker_get_entries(&tracker)[0].channel == entry.channel); + TEST_CHECK_RET(uvm_tracker_get_entries(&tracker)[0].value == entry.value); + + status = uvm_tracker_add_tracker(&dup_tracker, &tracker); + TEST_CHECK_GOTO(status == NV_OK, done); + TEST_CHECK_RET(dup_tracker.size == 1); + TEST_CHECK_RET(uvm_tracker_get_entries(&dup_tracker)[0].channel == entry.channel); + TEST_CHECK_RET(uvm_tracker_get_entries(&dup_tracker)[0].value == entry.value); + + for_each_va_space_gpu(gpu, va_space) { + uvm_channel_pool_t *pool; + + uvm_for_each_pool(pool, gpu->channel_manager) { + uvm_channel_t *channel; + + uvm_for_each_channel_in_pool(channel, pool) { + entry.channel = channel; + entry.value = uvm_channel_update_completed_value(channel); + status = uvm_tracker_add_entry(&tracker, &entry); + TEST_CHECK_GOTO(status == NV_OK, done); + ++count; + } + } + } + TEST_CHECK_GOTO(tracker.size == count, done); + + status = uvm_tracker_add_tracker_safe(&dup_tracker, &tracker); + TEST_CHECK_GOTO(dup_tracker.size == count, done); + for_each_tracker_entry(dup_entry_iter, &dup_tracker) { + bool found = false; + for_each_tracker_entry(entry_iter, &tracker) { + if (entry_iter->channel == dup_entry_iter->channel && entry_iter->value == dup_entry_iter->value) { + found = true; + break; + } + } + TEST_CHECK_RET(found); + } + for_each_tracker_entry(entry_iter, &tracker) { + bool found = false; + for_each_tracker_entry(dup_entry_iter, &dup_tracker) { + if (entry_iter->channel == dup_entry_iter->channel && entry_iter->value == dup_entry_iter->value) { + found = true; + break; + } + } + TEST_CHECK_RET(found); + } + +done: + uvm_tracker_deinit(&tracker); + uvm_tracker_deinit(&dup_tracker); + return status; +} + +NV_STATUS uvm_test_tracker_sanity(UVM_TEST_TRACKER_SANITY_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_va_space_down_read_rm(va_space); + + status = test_tracker_basic(va_space); + if (status != NV_OK) + goto done; + + status = test_tracker_completion(va_space); + if (status != NV_OK) + goto done; + + status = test_tracker_overwrite(va_space); + if (status != NV_OK) + goto done; + + status = test_tracker_add_tracker(va_space); + if (status != NV_OK) + goto done; + +done: + uvm_va_space_up_read_rm(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_turing.c b/kernel-open/nvidia-uvm/uvm_turing.c new file mode 100644 index 000000000..c7e3c1618 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_turing.c @@ -0,0 +1,94 @@ +/******************************************************************************* + Copyright (c) 2017-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hal.h" +#include "uvm_gpu.h" +#include "uvm_mem.h" +#include "uvm_turing_fault_buffer.h" + +void uvm_hal_turing_arch_init_properties(uvm_parent_gpu_t *parent_gpu) +{ + parent_gpu->tlb_batch.va_invalidate_supported = true; + + parent_gpu->tlb_batch.va_range_invalidate_supported = true; + + // TODO: Bug 1767241: Run benchmarks to figure out a good number + parent_gpu->tlb_batch.max_ranges = 8; + + parent_gpu->utlb_per_gpc_count = uvm_turing_get_utlbs_per_gpc(parent_gpu); + + parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.gpcCount * parent_gpu->utlb_per_gpc_count; + { + uvm_fault_buffer_entry_t *dummy; + UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8))); + } + + // A single top level PDE on Turing covers 128 TB and that's the minimum + // size that can be used. + parent_gpu->rm_va_base = 0; + parent_gpu->rm_va_size = 128ull * 1024 * 1024 * 1024 * 1024; + + parent_gpu->uvm_mem_va_base = 384ull * 1024 * 1024 * 1024 * 1024; + parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE; + + parent_gpu->peer_copy_mode = UVM_GPU_PEER_COPY_MODE_VIRTUAL; + + // Not all units on Turing support 49-bit addressing, including those which + // access channel buffers. + parent_gpu->max_channel_va = 1ULL << 40; + + parent_gpu->max_host_va = 1ULL << 40; + + // Turing can map sysmem with any page size + parent_gpu->can_map_sysmem_with_large_pages = true; + + // Prefetch instructions will generate faults + parent_gpu->prefetch_fault_supported = true; + + // Turing can place GPFIFO in vidmem + parent_gpu->gpfifo_in_vidmem_supported = true; + + parent_gpu->replayable_faults_supported = true; + + parent_gpu->non_replayable_faults_supported = true; + + parent_gpu->access_counters_supported = true; + + parent_gpu->fault_cancel_va_supported = true; + + parent_gpu->scoped_atomics_supported = true; + + // SW method is not currently supported on Turing. + // See Bug 3254782: [RM] Support clear_faulted SW method on Volta and Turing + parent_gpu->has_clear_faulted_channel_sw_method = false; + + parent_gpu->has_clear_faulted_channel_method = true; + + parent_gpu->sparse_mappings_supported = true; + + parent_gpu->map_remap_larger_page_promotion = false; + + parent_gpu->smc.supported = false; + + parent_gpu->plc_supported = true; +} diff --git a/kernel-open/nvidia-uvm/uvm_turing_access_counter_buffer.c b/kernel-open/nvidia-uvm/uvm_turing_access_counter_buffer.c new file mode 100644 index 000000000..3f441832f --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_turing_access_counter_buffer.c @@ -0,0 +1,66 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_hal.h" + +static void clear_access_counter_notifications_interrupt(uvm_parent_gpu_t *parent_gpu) +{ + volatile NvU32 *reg; + NvU32 mask; + + reg = parent_gpu->access_counter_buffer_info.rm_info.pHubIntr; + mask = parent_gpu->access_counter_buffer_info.rm_info.accessCounterMask; + + UVM_GPU_WRITE_ONCE(*reg, mask); +} + +void uvm_hal_turing_disable_access_counter_notifications(uvm_parent_gpu_t *parent_gpu) +{ + volatile NvU32 *reg; + NvU32 mask; + + reg = parent_gpu->access_counter_buffer_info.rm_info.pHubIntrEnClear; + mask = parent_gpu->access_counter_buffer_info.rm_info.accessCounterMask; + + UVM_GPU_WRITE_ONCE(*reg, mask); + + wmb(); + + // See the comment in uvm_hal_turing_disable_replayable_faults + clear_access_counter_notifications_interrupt(parent_gpu); +} + +void uvm_hal_turing_clear_access_counter_notifications(uvm_parent_gpu_t *parent_gpu, NvU32 get) +{ + clear_access_counter_notifications_interrupt(parent_gpu); + + wmb(); + + // Write GET to force the re-evaluation of the interrupt condition after the + // interrupt bit has been cleared. + UVM_GPU_WRITE_ONCE(*parent_gpu->access_counter_buffer_info.rm_info.pAccessCntrBufferGet, get); +} + diff --git a/kernel-open/nvidia-uvm/uvm_turing_fault_buffer.c b/kernel-open/nvidia-uvm/uvm_turing_fault_buffer.c new file mode 100644 index 000000000..eb0cda72c --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_turing_fault_buffer.c @@ -0,0 +1,67 @@ +/******************************************************************************* + Copyright (c) 2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_hal.h" + +static void clear_replayable_faults_interrupt(uvm_parent_gpu_t *parent_gpu) +{ + volatile NvU32 *reg; + NvU32 mask; + + reg = parent_gpu->fault_buffer_info.rm_info.replayable.pPmcIntr; + mask = parent_gpu->fault_buffer_info.rm_info.replayable.replayableFaultMask; + + UVM_GPU_WRITE_ONCE(*reg, mask); +} + +void uvm_hal_turing_clear_replayable_faults(uvm_parent_gpu_t *parent_gpu, NvU32 get) +{ + clear_replayable_faults_interrupt(parent_gpu); + + wmb(); + + // Write GET to force the re-evaluation of the interrupt condition after the + // interrupt bit has been cleared. + parent_gpu->fault_buffer_hal->write_get(parent_gpu, get); +} + +void uvm_hal_turing_disable_replayable_faults(uvm_parent_gpu_t *parent_gpu) +{ + volatile NvU32 *reg; + NvU32 mask; + + reg = parent_gpu->fault_buffer_info.rm_info.replayable.pPmcIntrEnClear; + mask = parent_gpu->fault_buffer_info.rm_info.replayable.replayableFaultMask; + + UVM_GPU_WRITE_ONCE(*reg, mask); + + wmb(); + + // We clear the interrupts right after disabling them in order to avoid + // triggering unnecessary new interrupts after re-enabling them if the + // interrupt condition is no longer true. + clear_replayable_faults_interrupt(parent_gpu); +} diff --git a/kernel-open/nvidia-uvm/uvm_turing_fault_buffer.h b/kernel-open/nvidia-uvm/uvm_turing_fault_buffer.h new file mode 100644 index 000000000..95b1ea0c9 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_turing_fault_buffer.h @@ -0,0 +1,59 @@ +/******************************************************************************* + Copyright (c) 2017-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_HAL_TURING_FAULT_BUFFER_H__ +#define __UVM_HAL_TURING_FAULT_BUFFER_H__ + +#include "nvtypes.h" +#include "uvm_common.h" +#include "uvm_gpu.h" + +// There are up to 8 TPCs per GPC in Turing, and there is 1 LTP uTLB per TPC. Besides, there is one RGG uTLB per GPC. +// Each TPC has a number of clients that can make requests to its uTLB: 1xTPCCS, 1xPE, 2xT1. The client ids are local +// to their GPC and the id mapping is linear across TPCs: +// TPC_n has TPCCS_n, PE_n, T1_p, and T1_q, where p=2*n and q=p+1. +// +// NV_PFAULT_CLIENT_GPC_LTP_UTLB_n and NV_PFAULT_CLIENT_GPC_RGG_UTLB enums can be ignored. These will never be reported +// in a fault message, and should never be used in an invalidate. Therefore, we define our own values. +typedef enum { + UVM_TURING_GPC_UTLB_ID_RGG = 0, + UVM_TURING_GPC_UTLB_ID_LTP0 = 1, + UVM_TURING_GPC_UTLB_ID_LTP1 = 2, + UVM_TURING_GPC_UTLB_ID_LTP2 = 3, + UVM_TURING_GPC_UTLB_ID_LTP3 = 4, + UVM_TURING_GPC_UTLB_ID_LTP4 = 5, + UVM_TURING_GPC_UTLB_ID_LTP5 = 6, + UVM_TURING_GPC_UTLB_ID_LTP6 = 7, + UVM_TURING_GPC_UTLB_ID_LTP7 = 8, + + UVM_TURING_GPC_UTLB_COUNT, +} uvm_turing_gpc_utlb_id_t; + +static NvU32 uvm_turing_get_utlbs_per_gpc(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 utlbs = parent_gpu->rm_info.maxTpcPerGpcCount + 1; + UVM_ASSERT(utlbs <= UVM_TURING_GPC_UTLB_COUNT); + return utlbs; +} + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_turing_host.c b/kernel-open/nvidia-uvm/uvm_turing_host.c new file mode 100644 index 000000000..295cabcf1 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_turing_host.c @@ -0,0 +1,100 @@ +/******************************************************************************* + Copyright (c) 2017-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hal.h" +#include "uvm_push.h" +#include "uvm_user_channel.h" +#include "clc46f.h" + +void uvm_hal_turing_host_semaphore_release(uvm_push_t *push, NvU64 gpu_va, NvU32 payload) +{ + NvU32 sem_lo; + UVM_ASSERT(!(NvOffset_LO32(gpu_va) & ~HWSHIFTMASK(C46F, SEM_ADDR_LO, OFFSET))); + sem_lo = READ_HWVALUE(NvOffset_LO32(gpu_va), C46F, SEM_ADDR_LO, OFFSET); + + uvm_hal_wfi_membar(push, uvm_push_get_and_reset_membar_flag(push)); + + NV_PUSH_5U(C46F, SEM_ADDR_LO, HWVALUE(C46F, SEM_ADDR_LO, OFFSET, sem_lo), + SEM_ADDR_HI, HWVALUE(C46F, SEM_ADDR_HI, OFFSET, NvOffset_HI32(gpu_va)), + SEM_PAYLOAD_LO, payload, + SEM_PAYLOAD_HI, 0, + SEM_EXECUTE, HWCONST(C46F, SEM_EXECUTE, OPERATION, RELEASE) | + HWCONST(C46F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) | + HWCONST(C46F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS) | + HWCONST(C46F, SEM_EXECUTE, RELEASE_WFI, DIS)); +} + +void uvm_hal_turing_host_semaphore_acquire(uvm_push_t *push, NvU64 gpu_va, NvU32 payload) +{ + NvU32 sem_lo; + UVM_ASSERT(!(NvOffset_LO32(gpu_va) & ~HWSHIFTMASK(C46F, SEM_ADDR_LO, OFFSET))); + sem_lo = READ_HWVALUE(NvOffset_LO32(gpu_va), C46F, SEM_ADDR_LO, OFFSET); + NV_PUSH_5U(C46F, SEM_ADDR_LO, HWVALUE(C46F, SEM_ADDR_LO, OFFSET, sem_lo), + SEM_ADDR_HI, HWVALUE(C46F, SEM_ADDR_HI, OFFSET, NvOffset_HI32(gpu_va)), + SEM_PAYLOAD_LO, payload, + SEM_PAYLOAD_HI, 0, + SEM_EXECUTE, HWCONST(C46F, SEM_EXECUTE, OPERATION, ACQ_CIRC_GEQ) | + HWCONST(C46F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) | + HWCONST(C46F, SEM_EXECUTE, ACQUIRE_SWITCH_TSG, EN)); +} + +void uvm_hal_turing_host_clear_faulted_channel_method(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *fault) +{ + NvU32 clear_type_value = 0; + + UVM_ASSERT(user_channel->gpu->parent->has_clear_faulted_channel_method); + + if (fault->fault_source.mmu_engine_type == UVM_MMU_ENGINE_TYPE_HOST) { + clear_type_value = HWCONST(C46F, CLEAR_FAULTED, TYPE, PBDMA_FAULTED); + } + else if (fault->fault_source.mmu_engine_type == UVM_MMU_ENGINE_TYPE_CE) { + clear_type_value = HWCONST(C46F, CLEAR_FAULTED, TYPE, ENG_FAULTED); + } + else { + UVM_ASSERT_MSG(false, "Unsupported MMU engine type %s\n", + uvm_mmu_engine_type_string(fault->fault_source.mmu_engine_type)); + } + + NV_PUSH_1U(C46F, CLEAR_FAULTED, HWVALUE(C46F, CLEAR_FAULTED, HANDLE, user_channel->clear_faulted_token) | + clear_type_value); +} + +// Direct copy of uvm_hal_maxwell_host_set_gpfifo_entry(). It removes +// GP_ENTRY1_PRIV_KERNEL, which has been deprecated in Turing+. +void uvm_hal_turing_host_set_gpfifo_entry(NvU64 *fifo_entry, NvU64 pushbuffer_va, NvU32 pushbuffer_length) +{ + NvU64 fifo_entry_value; + + UVM_ASSERT(!uvm_global_is_suspended()); + UVM_ASSERT_MSG(pushbuffer_va % 4 == 0, "pushbuffer va unaligned: %llu\n", pushbuffer_va); + UVM_ASSERT_MSG(pushbuffer_length % 4 == 0, "pushbuffer length unaligned: %u\n", pushbuffer_length); + + fifo_entry_value = HWVALUE(C46F, GP_ENTRY0, GET, NvU64_LO32(pushbuffer_va) >> 2); + fifo_entry_value |= (NvU64)(HWVALUE(C46F, GP_ENTRY1, GET_HI, NvU64_HI32(pushbuffer_va)) | + HWVALUE(C46F, GP_ENTRY1, LENGTH, pushbuffer_length >> 2)) << 32; + + *fifo_entry = fifo_entry_value; +} + diff --git a/kernel-open/nvidia-uvm/uvm_turing_mmu.c b/kernel-open/nvidia-uvm/uvm_turing_mmu.c new file mode 100644 index 000000000..d0324b72d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_turing_mmu.c @@ -0,0 +1,184 @@ +/******************************************************************************* + Copyright (c) 2017-2020 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_types.h" +#include "uvm_forward_decl.h" +#include "uvm_global.h" +#include "uvm_hal.h" +#include "uvm_mmu.h" +#include "hwref/turing/tu102/dev_mmu.h" +#include "hwref/turing/tu102/dev_fault.h" + +// This is mainly a copy of make_pte_volta in uvm_volta_mmu. This version +// sets NV_MMU_PTE_KIND_GENERIC_MEMORY, instead, since NV_MMU_PTE_KIND_PITCH +// no longer exists. +static NvU64 make_pte_turing(uvm_aperture_t aperture, NvU64 address, uvm_prot_t prot, NvU64 flags) +{ + NvU8 aperture_bits = 0; + NvU64 pte_bits = 0; + + UVM_ASSERT(prot != UVM_PROT_NONE); + UVM_ASSERT((flags & ~UVM_MMU_PTE_FLAGS_MASK) == 0); + + // valid 0:0 + pte_bits |= HWCONST64(_MMU_VER2, PTE, VALID, TRUE); + + // aperture 2:1 + if (aperture == UVM_APERTURE_SYS) + aperture_bits = NV_MMU_VER2_PTE_APERTURE_SYSTEM_COHERENT_MEMORY; + else if (aperture == UVM_APERTURE_VID) + aperture_bits = NV_MMU_VER2_PTE_APERTURE_VIDEO_MEMORY; + else if (aperture >= UVM_APERTURE_PEER_0 && aperture <= UVM_APERTURE_PEER_7) + aperture_bits = NV_MMU_VER2_PTE_APERTURE_PEER_MEMORY; + else + UVM_ASSERT_MSG(0, "Invalid aperture: %d\n", aperture); + + pte_bits |= HWVALUE64(_MMU_VER2, PTE, APERTURE, aperture_bits); + + // volatile 3:3 + if (flags & UVM_MMU_PTE_FLAGS_CACHED) + pte_bits |= HWCONST64(_MMU_VER2, PTE, VOL, FALSE); + else + pte_bits |= HWCONST64(_MMU_VER2, PTE, VOL, TRUE); + + // encrypted 4:4 + pte_bits |= HWCONST64(_MMU_VER2, PTE, ENCRYPTED, FALSE); + + // privilege 5:5 + pte_bits |= HWCONST64(_MMU_VER2, PTE, PRIVILEGE, FALSE); + + // read only 6:6 + if (prot == UVM_PROT_READ_ONLY) + pte_bits |= HWCONST64(_MMU_VER2, PTE, READ_ONLY, TRUE); + else + pte_bits |= HWCONST64(_MMU_VER2, PTE, READ_ONLY, FALSE); + + // atomic disable 7:7 + if (prot == UVM_PROT_READ_WRITE_ATOMIC) + pte_bits |= HWCONST64(_MMU_VER2, PTE, ATOMIC_DISABLE, FALSE); + else + pte_bits |= HWCONST64(_MMU_VER2, PTE, ATOMIC_DISABLE, TRUE); + + address >>= NV_MMU_VER2_PTE_ADDRESS_SHIFT; + if (aperture == UVM_APERTURE_SYS) { + // sys address 53:8 + pte_bits |= HWVALUE64(_MMU_VER2, PTE, ADDRESS_SYS, address); + } + else { + NvU64 addr_lo = address & HWMASK64(_MMU_VER2, PTE, ADDRESS_VID); + NvU64 addr_hi = address >> HWSIZE(_MMU_VER2, PTE, ADDRESS_VID); + + + // vid address 32:8 for bits 36:12 of the physical address + pte_bits |= HWVALUE64(_MMU_VER2, PTE, ADDRESS_VID, addr_lo); + + // comptagline 53:36 - this can be overloaded in some cases to reference + // a 47-bit physical address. Currently, the only known cases of this + // is for nvswitch, where peer id is the fabric id programmed for + // such peer mappings + pte_bits |= HWVALUE64(_MMU_VER2, PTE, COMPTAGLINE, addr_hi); + + // peer id 35:33 + if (aperture != UVM_APERTURE_VID) + pte_bits |= HWVALUE64(_MMU_VER2, PTE, ADDRESS_VID_PEER, UVM_APERTURE_PEER_ID(aperture)); + } + + pte_bits |= HWVALUE64(_MMU_VER2, PTE, KIND, NV_MMU_PTE_KIND_GENERIC_MEMORY); + + return pte_bits; +} + +static NvU64 make_sked_reflected_pte_turing(void) +{ + NvU64 pte_bits = 0; + + pte_bits |= HWCONST64(_MMU_VER2, PTE, VALID, TRUE); + pte_bits |= HWVALUE64(_MMU_VER2, PTE, KIND, NV_MMU_PTE_KIND_SMSKED_MESSAGE); + + return pte_bits; +} + +static NvU64 poisoned_pte_turing(void) +{ + // An invalid PTE won't be fatal from faultable units like SM, which is the + // most likely source of bad PTE accesses. + + // Engines with priv accesses won't fault on the priv PTE, so add a backup + // mechanism using an impossible memory address. MMU will trigger an + // interrupt when it detects a bad physical address. + // + // This address has to fit within 37 bits (max address width of vidmem) and + // be aligned to page_size. + NvU64 phys_addr = 0x1bad000000ULL; + + NvU64 pte_bits = make_pte_turing(UVM_APERTURE_VID, phys_addr, UVM_PROT_READ_ONLY, UVM_MMU_PTE_FLAGS_NONE); + return WRITE_HWCONST64(pte_bits, _MMU_VER2, PTE, PRIVILEGE, TRUE); +} + + +static uvm_mmu_mode_hal_t turing_mmu_mode_hal; + +uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_turing(NvU32 big_page_size) +{ + static bool initialized = false; + + UVM_ASSERT(big_page_size == UVM_PAGE_SIZE_64K || big_page_size == UVM_PAGE_SIZE_128K); + + // TODO: Bug 1789555: RM should reject the creation of GPU VA spaces with + // 128K big page size for Pascal+ GPUs + if (big_page_size == UVM_PAGE_SIZE_128K) + return NULL; + + if (!initialized) { + uvm_mmu_mode_hal_t *volta_mmu_mode_hal = uvm_hal_mmu_mode_volta(big_page_size); + UVM_ASSERT(volta_mmu_mode_hal); + + // The assumption made is that arch_hal->mmu_mode_hal() will be + // called under the global lock the first time, so check it here. + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + turing_mmu_mode_hal = *volta_mmu_mode_hal; + turing_mmu_mode_hal.make_pte = make_pte_turing; + turing_mmu_mode_hal.make_sked_reflected_pte = make_sked_reflected_pte_turing; + turing_mmu_mode_hal.poisoned_pte = poisoned_pte_turing; + + initialized = true; + } + + return &turing_mmu_mode_hal; +} + +uvm_mmu_engine_type_t uvm_hal_turing_mmu_engine_id_to_type(NvU16 mmu_engine_id) +{ + if (mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_HOST0 && mmu_engine_id <= NV_PFAULT_MMU_ENG_ID_HOST14) + return UVM_MMU_ENGINE_TYPE_HOST; + + if (mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_CE0 && mmu_engine_id <= NV_PFAULT_MMU_ENG_ID_CE8) + return UVM_MMU_ENGINE_TYPE_CE; + + // We shouldn't be servicing faults from any other engines + UVM_ASSERT_MSG(mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_GRAPHICS && mmu_engine_id < NV_PFAULT_MMU_ENG_ID_BAR1, + "Unexpected engine ID: 0x%x\n", mmu_engine_id); + + return UVM_MMU_ENGINE_TYPE_GRAPHICS; +} diff --git a/kernel-open/nvidia-uvm/uvm_types.h b/kernel-open/nvidia-uvm/uvm_types.h new file mode 100644 index 000000000..088ecc89a --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_types.h @@ -0,0 +1,1081 @@ +/******************************************************************************* + Copyright (c) 2013-2020 NVidia Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +// +// uvm_types.h +// +// This file contains basic datatypes that UVM requires. +// + +#ifndef _UVM_TYPES_H_ +#define _UVM_TYPES_H_ + +#include "nvlimits.h" +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvCpuUuid.h" + +#ifndef __KERNEL__ + +#endif + +/******************************************************************************* + UVM stream types +*******************************************************************************/ + +typedef enum +{ + UvmStreamTypeRegular = 0, + UvmStreamTypeAll = 1, + UvmStreamTypeNone = 2 +} UvmStreamType; + +#define UVM_STREAM_INVALID ((UvmStream)0ULL) +#define UVM_STREAM_ALL ((UvmStream)2ULL) +#define UVM_STREAM_NONE ((UvmStream)3ULL) + +typedef unsigned long long UvmStream; + +#define UVM_MAX_GPUS NV_MAX_DEVICES +#define UVM_MAX_PROCESSORS (UVM_MAX_GPUS + 1) + +#define UVM_INIT_FLAGS_DISABLE_HMM ((NvU64)0x1) +#define UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE ((NvU64)0x2) +#define UVM_INIT_FLAGS_MASK ((NvU64)0x3) + +#define UVM_RANGE_GROUP_ID_NONE ((NvU64)0) + +//------------------------------------------------------------------------------ +// UVM GPU mapping types +// +// These types indicate the kinds of accesses allowed from a given GPU at the +// specified virtual address range. There are 3 basic kinds of accesses: read, +// write and atomics. Each type indicates what kinds of accesses are allowed. +// Accesses of any disallowed kind are fatal. The "Default" type specifies that +// the UVM driver should decide on the types of accesses allowed. +//------------------------------------------------------------------------------ +typedef enum +{ + UvmGpuMappingTypeDefault = 0, + UvmGpuMappingTypeReadWriteAtomic = 1, + UvmGpuMappingTypeReadWrite = 2, + UvmGpuMappingTypeReadOnly = 3, + UvmGpuMappingTypeCount = 4 +} UvmGpuMappingType; + +//------------------------------------------------------------------------------ +// UVM GPU caching types +// +// These types indicate the cacheability of the specified virtual address range +// from a given GPU. The "Default" type specifies that the UVM driver should +// set caching on or off as required to follow the UVM coherence model. The +// "ForceUncached" and "ForceCached" types will always turn caching off or on +// respectively. These two types override the cacheability specified by the UVM +// coherence model. +//------------------------------------------------------------------------------ +typedef enum +{ + UvmGpuCachingTypeDefault = 0, + UvmGpuCachingTypeForceUncached = 1, + UvmGpuCachingTypeForceCached = 2, + UvmGpuCachingTypeCount = 3 +} UvmGpuCachingType; + +//------------------------------------------------------------------------------ +// UVM GPU format types +// +// These types indicate the memory format of the specified virtual address +// range for a given GPU. The "Default" type specifies that the UVM driver will +// detect the format based on the allocation and is mutually inclusive with +// UvmGpuFormatElementBitsDefault. +//------------------------------------------------------------------------------ +typedef enum { + UvmGpuFormatTypeDefault = 0, + UvmGpuFormatTypeBlockLinear = 1, + UvmGpuFormatTypeCount = 2 +} UvmGpuFormatType; + +//------------------------------------------------------------------------------ +// UVM GPU Element bits types +// +// These types indicate the element size of the specified virtual address range +// for a given GPU. The "Default" type specifies that the UVM driver will +// detect the element size based on the allocation and is mutually inclusive +// with UvmGpuFormatTypeDefault. The element size is specified in bits: +// UvmGpuFormatElementBits8 uses the 8-bits format. +//------------------------------------------------------------------------------ +typedef enum { + UvmGpuFormatElementBitsDefault = 0, + UvmGpuFormatElementBits8 = 1, + UvmGpuFormatElementBits16 = 2, + // Cuda does not support 24-bit width + UvmGpuFormatElementBits32 = 4, + UvmGpuFormatElementBits64 = 5, + UvmGpuFormatElementBits128 = 6, + UvmGpuFormatElementBitsCount = 7 +} UvmGpuFormatElementBits; + +//------------------------------------------------------------------------------ +// UVM GPU Compression types +// +// These types indicate the compression type of the specified virtual address +// range for a given GPU. The "Default" type specifies that the UVM driver will +// detect the compression attributes based on the allocation. Any type other +// than the default will override the compression behavior of the physical +// allocation. UvmGpuCompressionTypeEnabledNoPlc will disable PLC but enables +// generic compression. UvmGpuCompressionTypeEnabledNoPlc type is only supported +// on Turing plus GPUs. Since UvmGpuCompressionTypeEnabledNoPlc type enables +// generic compression, it can only be used when the compression attribute of +// the underlying physical allocation is enabled. +//------------------------------------------------------------------------------ +typedef enum { + UvmGpuCompressionTypeDefault = 0, + UvmGpuCompressionTypeEnabledNoPlc = 1, + UvmGpuCompressionTypeCount = 2 +} UvmGpuCompressionType; + +typedef struct +{ + NvProcessorUuid gpuUuid; + NvU32 gpuMappingType; // UvmGpuMappingType + NvU32 gpuCachingType; // UvmGpuCachingType + NvU32 gpuFormatType; // UvmGpuFormatType + NvU32 gpuElementBits; // UvmGpuFormatElementBits + NvU32 gpuCompressionType; // UvmGpuCompressionType +} UvmGpuMappingAttributes; + +// forward declaration of OS-dependent structure +struct UvmGlobalState_tag; + +// Platform specific parameters for UvmRegisterGpu* +typedef union +{ + struct { + // File descriptor for RM's control file + int ctrlFd; + // RM client handle + NvHandle hClient; + // RM SMC partition reference + NvHandle hSmcPartRef; + } rm_linux; +} UvmGpuPlatformParams; + +// Platform specific parameters for UvmRegisterGpuVaSpace +typedef union +{ + struct { + // File descriptor for RM's control file + int ctrlFd; + // RM client handle + NvHandle hClient; + // RM GPU VA space handle + NvHandle hVaSpace; + } rm_linux; + struct { + // RM client handle + NvHandle hClient; + // RM GPU VA space handle + NvHandle hVaSpace; + } rm_windows; +} UvmGpuVaSpacePlatformParams; + +// Platform specific parameters for UvmRegisterChannel and UvmUnregisterChannel +typedef union +{ + struct { + // File descriptor for RM's control file + int ctrlFd; + // RM client handle + NvHandle hClient; + // RM channel handle + NvHandle hChannel; + } rm_linux; +} UvmChannelPlatformParams; + +// Platform specific parameters for UvmMapExternalAllocation +typedef union +{ + struct { + // File descriptor for RM's control file + int ctrlFd; + // RM client handle + NvHandle hClient; + // RM allocation handle + NvHandle hMemory; + } rm_linux; +} UvmAllocationPlatformParams; + +//------------------------------------------------------------------------------ +// Tools API types +//------------------------------------------------------------------------------ + +#define UVM_DEBUG_V1 0x00000001 + +typedef NvUPtr UvmDebugSession; + +//------------------------------------------------------------------------------ +// Counter scope: It can be one of the following: +// - Single GPU for a process (UvmCounterScopeProcessSingleGpu) +// - Aggregate of all GPUs for a process (UvmCounterScopeProcessAllGpu) +// - Single GPU system-wide (UvmCounterScopeGlobalSingleGpu) +// (UvmCounterScopeGlobalSingleGpu is not supported for CUDA 6.0) +// +// Note: The user must not assume that the counter values are equal to zero +// at the time of enabling counters. +// Difference between end state counter value and start state counter value +// should be used to find out the correct value over a given period of time. +//------------------------------------------------------------------------------ +typedef enum +{ + UvmCounterScopeProcessSingleGpu = 0, + UvmCounterScopeProcessAllGpu = 1, + UvmCounterScopeGlobalSingleGpu = 2, + UvmCounterScopeSize +} UvmCounterScope; + +//------------------------------------------------------------------------------ +// Following numbers assigned to the counter name are used to index their value +// in the counter array. +//------------------------------------------------------------------------------ +typedef enum +{ + UvmCounterNameBytesXferHtD = 0, // host to device + UvmCounterNameBytesXferDtH = 1, // device to host + UvmCounterNameCpuPageFaultCount = 2, +#ifdef __windows__ + UvmCounterNameWddmBytesXferBtH = 3, // backing store to host + UvmCounterNameWddmBytesXferHtB = 4, // host to backing store + // + // eviction (device to backing store) + // + UvmCounterNameWddmBytesXferDtB = 5, + // + // restoration (backing store to device) + // + UvmCounterNameWddmBytesXferBtD = 6, +#endif + // + // bytes prefetched host to device. + // These bytes are also counted in + // UvmCounterNameBytesXferHtD + // + UvmCounterNamePrefetchBytesXferHtD = 7, + // + // bytes prefetched device to host. + // These bytes are also counted in + // UvmCounterNameBytesXferDtH + // + UvmCounterNamePrefetchBytesXferDtH = 8, + // + // number of faults reported on the GPU + // + UvmCounterNameGpuPageFaultCount = 9, + UVM_TOTAL_COUNTERS +} UvmCounterName; + +#define UVM_COUNTER_NAME_FLAG_BYTES_XFER_HTD 0x1 +#define UVM_COUNTER_NAME_FLAG_BYTES_XFER_DTH 0x2 +#define UVM_COUNTER_NAME_FLAG_CPU_PAGE_FAULT_COUNT 0x4 +#define UVM_COUNTER_NAME_FLAG_WDDM_BYTES_XFER_BTH 0x8 +#define UVM_COUNTER_NAME_FLAG_WDDM_BYTES_XFER_HTB 0x10 +#define UVM_COUNTER_NAME_FLAG_BYTES_XFER_DTB 0x20 +#define UVM_COUNTER_NAME_FLAG_BYTES_XFER_BTD 0x40 +#define UVM_COUNTER_NAME_FLAG_PREFETCH_BYTES_XFER_HTD 0x80 +#define UVM_COUNTER_NAME_FLAG_PREFETCH_BYTES_XFER_DTH 0x100 +#define UVM_COUNTER_NAME_FLAG_GPU_PAGE_FAULT_COUNT 0x200 + +//------------------------------------------------------------------------------ +// UVM counter config structure +// +// - scope: Please see the UvmCounterScope enum (above), for details. +// - name: Name of the counter. Please check UvmCounterName for list. +// - gpuid: Identifies the GPU for which the counter will be enabled/disabled +// This parameter is ignored in AllGpu scopes. +// - state: A value of 0 will disable the counter, a value of 1 will enable +// the counter. +//------------------------------------------------------------------------------ +typedef struct +{ + NvU32 scope; //UVM_DEBUG_V1 (UvmCounterScope) + NvU32 name; //UVM_DEBUG_V1 (UvmCounterName) + NvProcessorUuid gpuid; //UVM_DEBUG_V1 + NvU32 state; //UVM_DEBUG_V1 +} UvmCounterConfig; + +#define UVM_COUNTER_CONFIG_STATE_DISABLE_REQUESTED 0 +#define UVM_COUNTER_CONFIG_STATE_ENABLE_REQUESTED 1 + +typedef enum +{ + UvmEventMemoryAccessTypeInvalid = 0, + UvmEventMemoryAccessTypeRead = 1, + UvmEventMemoryAccessTypeWrite = 2, + UvmEventMemoryAccessTypeAtomic = 3, + UvmEventMemoryAccessTypePrefetch = 4, + // ---- Add new values above this line + UvmEventNumMemoryAccessTypes +} UvmEventMemoryAccessType; + +typedef enum +{ + UvmEventTypeInvalid = 0, + + UvmEventTypeMemoryViolation = 1, + UvmEventTypeCpuFault = UvmEventTypeMemoryViolation, + UvmEventTypeMigration = 2, + UvmEventTypeGpuFault = 3, + UvmEventTypeGpuFaultReplay = 4, + UvmEventTypeFaultBufferOverflow = 5, + UvmEventTypeFatalFault = 6, + UvmEventTypeReadDuplicate = 7, + UvmEventTypeReadDuplicateInvalidate = 8, + UvmEventTypePageSizeChange = 9, + UvmEventTypeThrashingDetected = 10, + UvmEventTypeThrottlingStart = 11, + UvmEventTypeThrottlingEnd = 12, + UvmEventTypeMapRemote = 13, + UvmEventTypeEviction = 14, + + // ---- Add new values above this line + UvmEventNumTypes, + + // ---- Private event types for uvm tests + UvmEventTestTypesFirst = 63, + + UvmEventTypeTestAccessCounter = UvmEventTestTypesFirst, + + UvmEventTestTypesLast = UvmEventTypeTestAccessCounter, + + UvmEventNumTypesAll +} UvmEventType; + +//------------------------------------------------------------------------------ +// Bit flags used to enable/ disable events: +//------------------------------------------------------------------------------ +#define UVM_EVENT_ENABLE_MEMORY_VIOLATION ((NvU64)1 << UvmEventTypeMemoryViolation) +#define UVM_EVENT_ENABLE_CPU_FAULT ((NvU64)1 << UvmEventTypeCpuFault) +#define UVM_EVENT_ENABLE_MIGRATION ((NvU64)1 << UvmEventTypeMigration) +#define UVM_EVENT_ENABLE_GPU_FAULT ((NvU64)1 << UvmEventTypeGpuFault) +#define UVM_EVENT_ENABLE_GPU_FAULT_REPLAY ((NvU64)1 << UvmEventTypeGpuFaultReplay) +#define UVM_EVENT_ENABLE_FAULT_BUFFER_OVERFLOW ((NvU64)1 << UvmEventTypeFaultBufferOverflow) +#define UVM_EVENT_ENABLE_FATAL_FAULT ((NvU64)1 << UvmEventTypeFatalFault) +#define UVM_EVENT_ENABLE_READ_DUPLICATE ((NvU64)1 << UvmEventTypeReadDuplicate) +#define UVM_EVENT_ENABLE_READ_DUPLICATE_INVALIDATE ((NvU64)1 << UvmEventTypeReadDuplicateInvalidate) +#define UVM_EVENT_ENABLE_PAGE_SIZE_CHANGE ((NvU64)1 << UvmEventTypePageSizeChange) +#define UVM_EVENT_ENABLE_THRASHING_DETECTED ((NvU64)1 << UvmEventTypeThrashingDetected) +#define UVM_EVENT_ENABLE_THROTTLING_START ((NvU64)1 << UvmEventTypeThrottlingStart) +#define UVM_EVENT_ENABLE_THROTTLING_END ((NvU64)1 << UvmEventTypeThrottlingEnd) +#define UVM_EVENT_ENABLE_MAP_REMOTE ((NvU64)1 << UvmEventTypeMapRemote) +#define UVM_EVENT_ENABLE_EVICTION ((NvU64)1 << UvmEventTypeEviction) +#define UVM_EVENT_ENABLE_TEST_ACCESS_COUNTER ((NvU64)1 << UvmEventTypeTestAccessCounter) + +//------------------------------------------------------------------------------ +// Information associated with a memory violation event +//------------------------------------------------------------------------------ +typedef struct +{ + // + // eventType has to be 1st argument of this structure. Setting eventType to + // UvmEventTypeMemoryViolation helps to identify event data in a queue. + // + NvU8 eventType; + NvU8 accessType; // read/write violation (UvmEventMemoryAccessType) + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets. + // + NvU16 padding16Bits; + NvU32 padding32Bits; + NvU64 address; // faulting address + NvU64 timeStamp; // cpu time when the fault occurred + NvU32 pid; // process id causing the fault + NvU32 threadId; // thread id causing the fault + NvU64 pc; // address of the instruction causing the fault +} UvmEventCpuFaultInfo; + +typedef enum +{ + UvmEventMigrationDirectionInvalid = 0, + UvmEventMigrationDirectionCpuToGpu = 1, + UvmEventMigrationDirectionGpuToCpu = 2, + // ---- Add new values above this line + UvmEventNumMigrationDirections +} UvmEventMigrationDirection; + +//------------------------------------------------------------------------------ +// Information associated with a migration event +//------------------------------------------------------------------------------ +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeMigration helps to identify event data in + // a queue. + // + NvU8 eventType; + // direction of migration (UvmEventMigrationDirection ) + // this field is deprecated, in favor of (src|dst)Index + NvU8 direction; + // + // Indices are used for the source and destination of migration instead of + // using gpu uuid/cpu id. This reduces the size of each event. gpuIndex to + // gpuUuid relation can be obtained from UvmEventGetGpuUuidTable. + // Currently we do not distinguish between CPUs so they all use index 0xFF. + // + NvU8 srcIndex; // source CPU/GPU index + NvU8 dstIndex; // destination CPU/GPU index + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU32 padding32Bits; + NvU64 address; // base virtual addr used for migration + NvU64 migratedBytes; // number of bytes migrated + NvU64 beginTimeStamp; // cpu time stamp when the migration was + // queued on the gpu + NvU64 endTimeStamp; // cpu time stamp when the migration + // finalization was communicated to the cpu + NvU64 streamId; // stream causing the migration +} UvmEventMigrationInfo_Lite; + +typedef enum +{ + // These fault types are handled and may be "fixed" by the UVM driver + UvmFaultTypeInvalid = 0, + UvmFaultTypeInvalidPde = 1, + UvmFaultTypeInvalidPte = 2, + UvmFaultTypeWrite = 3, + UvmFaultTypeAtomic = 4, + // The next fault types are fatal and cannot be serviced by the UVM driver + UvmFaultTypeFatal = 5, + UvmFaultTypeInvalidPdeSize = UvmFaultTypeFatal, + UvmFaultTypeLimitViolation = 6, + UvmFaultTypeUnboundInstBlock = 7, + UvmFaultTypePrivViolation = 8, + UvmFaultTypePitchMaskViolation = 9, + UvmFaultTypeWorkCreation = 10, + UvmFaultTypeUnsupportedAperture = 11, + UvmFaultTypeCompressionFailure = 12, + UvmFaultTypeUnsupportedKind = 13, + UvmFaultTypeRegionViolation = 14, + UvmFaultTypePoison = 15, + // ---- Add new values above this line + UvmEventNumFaultTypes +} UvmEventFaultType; + +typedef enum +{ + UvmEventFatalReasonInvalid = 0, + UvmEventFatalReasonInvalidAddress = 1, + UvmEventFatalReasonInvalidPermissions = 2, + UvmEventFatalReasonInvalidFaultType = 3, + UvmEventFatalReasonOutOfMemory = 4, + UvmEventFatalReasonInternalError = 5, + + // This value is reported when a fault is triggered in an invalid context + // Example: CPU fault on a managed allocation while a kernel is running on a pre-Pascal GPU + UvmEventFatalReasonInvalidOperation = 6, + // ---- Add new values above this line + UvmEventNumFatalReasons +} UvmEventFatalReason; + +typedef enum +{ + UvmEventMigrationCauseInvalid = 0, + + // The migration was initiated by the user via UvmMigrate/UvmMigrateAsync + UvmEventMigrationCauseUser = 1, + + // The UVM runtime initiated the migration to ensure that processors can + // access data coherently + UvmEventMigrationCauseCoherence = 2, + + // Speculative migration of pages that are likely to be accessed in the + // near future. Initiated by the UVM driver performance heuristics. + UvmEventMigrationCausePrefetch = 3, + + // Migration performed to evict memory from the GPU. + UvmEventMigrationCauseEviction = 4, + + // Migration of pages that are being accessed remotely by the GPU and + // detected via access counter notifications. + UvmEventMigrationCauseAccessCounters = 5, + + // ---- Add new values above this line + UvmEventNumMigrationCauses +} UvmEventMigrationCause; + +//------------------------------------------------------------------------------ +// Information associated with a migration event UVM onwards +//------------------------------------------------------------------------------ +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. Setting eventType + // to UvmEventTypeMigration helps to identify event data in a queue. + // + NvU8 eventType; + // + // Cause that triggered the migration + // + NvU8 migrationCause; + // + // Indices are used for the source and destination of migration instead of + // using gpu uuid/cpu id. This reduces the size of each event. The index to + // gpuUuid relation can be obtained from UvmToolsGetProcessorUuidTable. + // Currently we do not distinguish between CPUs so they all use index 0. + // + NvU8 srcIndex; // source CPU/GPU index + NvU8 dstIndex; // destination CPU/GPU index + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU32 padding32Bits; + NvU64 address; // base virtual addr used for migration + NvU64 migratedBytes; // number of bytes migrated + NvU64 beginTimeStamp; // cpu time stamp when the memory transfer + // was queued on the gpu + NvU64 endTimeStamp; // cpu time stamp when the memory transfer + // finalization was communicated to the cpu + // For asynchronous operations this field + // will be zero + NvU64 rangeGroupId; // range group tied with this migration + NvU64 beginTimeStampGpu; // time stamp when the migration started + // on the gpu + NvU64 endTimeStampGpu; // time stamp when the migration finished + // on the gpu +} UvmEventMigrationInfo; + +typedef enum +{ + UvmEventFaultClientTypeInvalid = 0, + UvmEventFaultClientTypeGpc = 1, + UvmEventFaultClientTypeHub = 2, + + // ---- Add new values above this line + UvmEventNumFaultClientTypes +} UvmEventFaultClientType; + +//------------------------------------------------------------------------------ +// This info is provided per gpu fault +// This event can be treated as a start event for gpu fault handling +//------------------------------------------------------------------------------ +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeGpuFault helps to identify event data in + // a queue. + // + NvU8 eventType; + NvU8 faultType; // type of gpu fault, refer UvmEventFaultType + NvU8 accessType; // memory access type, refer UvmEventMemoryAccessType + NvU8 gpuIndex; // GPU that experienced the fault + union + { + NvU16 gpcId; // If this is a replayable fault, this field contains + // the physical GPC index where the fault was + // triggered + + NvU16 channelId; // If this is a non-replayable fault, this field + // contains the id of the channel that launched the + // operation that caused the fault. + // + // TODO: Bug 3283289: this field is ambiguous for + // Ampere+ GPUs, but it is never consumed by clients. + }; + NvU16 clientId; // Id of the MMU client that triggered the fault. This + // is the value provided by HW and is architecture- + // specific. There are separate client ids for + // different client types (See dev_fault.h). + NvU64 address; // virtual address at which gpu faulted + NvU64 timeStamp; // time stamp when the cpu started processing the + // fault + NvU64 timeStampGpu; // gpu time stamp when the fault entry was written + // in the fault buffer + NvU32 batchId; // Per-GPU unique id to identify the faults serviced + // in batch before: + // - Issuing a replay for replayable faults + // - Re-scheduling the channel for non-replayable + // faults. + NvU8 clientType; // Volta+ GPUs can fault on clients other than GR. + // UvmEventFaultClientTypeGpc indicates replayable + // fault, while UvmEventFaultClientTypeHub indicates + // non-replayable fault. + + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU8 padding8Bits; + NvU16 padding16Bits; +} UvmEventGpuFaultInfo; + +//------------------------------------------------------------------------------ +// This info is provided when a gpu fault is replayed (for replayable faults) +// or when the channel that launched the operation that triggered the fault is +// rescheduled for execution (for non-replayable faults). +// +// This event can be treated as an end event for gpu fault handling. +// Any other events eg migration events caused as a side-effect of the gpu fault +// would lie between the start and end event. +//------------------------------------------------------------------------------ +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeGpuFaultReplay helps to identify event + // data in a queue. + // + NvU8 eventType; + NvU8 gpuIndex; // GPU that experienced the fault + NvU8 clientType; // See clientType in UvmEventGpuFaultInfo + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU8 padding8bits; + NvU32 batchId; // Per-GPU unique id to identify the faults that have + // been serviced in batch + NvU64 timeStamp; // cpu time when the replay of the faulting memory + // accesses is queued on the gpu + NvU64 timeStampGpu; // gpu time stamp when the replay operation finished + // executing on the gpu +} UvmEventGpuFaultReplayInfo; + +//------------------------------------------------------------------------------ +// This info is provided per fatal fault +//------------------------------------------------------------------------------ +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeFatalFault helps to identify event data in + // a queue. + // + NvU8 eventType; + NvU8 faultType; // type of gpu fault, refer UvmEventFaultType. Only valid + // if processorIndex is a GPU + NvU8 accessType; // memory access type, refer UvmEventMemoryAccessType + NvU8 processorIndex; // processor that experienced the fault + NvU8 reason; // reason why the fault is fatal, refer UvmEventFatalReason + NvU8 padding8bits; + NvU16 padding16bits; + NvU64 address; // virtual address at which the processor faulted + NvU64 timeStamp; // CPU time when the fault is detected to be fatal +} UvmEventFatalFaultInfo; + +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeReadDuplicate helps to identify event + // data in a queue. + // + NvU8 eventType; + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU8 padding8bits; + NvU16 padding16bits; + NvU32 padding32bits; + NvU64 processors; // mask that specifies in which processors this + // memory region is read-duplicated + NvU64 address; // virtual address of the memory region that is + // read-duplicated + NvU64 size; // size in bytes of the memory region that is + // read-duplicated + NvU64 timeStamp; // cpu time stamp when the memory region becomes + // read-duplicate. Since many processors can + // participate in read-duplicate this is time stamp + // when all the operations have been pushed to all + // the processors. +} UvmEventReadDuplicateInfo; + +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeReadDuplicateInvalidate helps to + // identify event data in a queue. + // + NvU8 eventType; + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU8 residentIndex; // index of the cpu/gpu that now contains the only + // valid copy of the memory region + NvU16 padding16bits; + NvU32 padding32bits; + NvU64 address; // virtual address of the memory region that is + // read-duplicated + NvU64 size; // size of the memory region that is + // read-duplicated + NvU64 timeStamp; // cpu time stamp when the memory region is no + // longer read-duplicate. Since many processors can + // participate in read-duplicate this is time stamp + // when all the operations have been pushed to all + // the processors. +} UvmEventReadDuplicateInvalidateInfo; + + +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypePageSizeChange helps to identify event + // data in a queue. + // + NvU8 eventType; + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU8 processorIndex; // cpu/gpu processor index for which the page size + // changed + NvU16 padding16bits; + NvU32 size; // new page size + NvU64 address; // virtual address of the page whose size has + // changed + NvU64 timeStamp; // cpu time stamp when the new page size is + // queued on the gpu +} UvmEventPageSizeChangeInfo; + +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeThrashingDetected helps to identify event + // data in a queue. + // + NvU8 eventType; + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU8 padding8bits; + NvU16 padding16bits; + NvU32 padding32bits; + NvU64 processors; // mask that specifies which processors are + // fighting for this memory region + NvU64 address; // virtual address of the memory region that is + // thrashing + NvU64 size; // size of the memory region that is thrashing + NvU64 timeStamp; // cpu time stamp when thrashing is detected +} UvmEventThrashingDetectedInfo; + +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeThrottlingStart helps to identify event data + // in a queue. + // + NvU8 eventType; + NvU8 processorIndex; // index of the cpu/gpu that was throttled + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU16 padding16bits; + NvU32 padding32bits; + NvU64 address; // address of the page whose servicing is being + // throttled + NvU64 timeStamp; // cpu start time stamp for the throttling operation +} UvmEventThrottlingStartInfo; + +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeThrottlingEnd helps to identify event data + // in a queue. + // + NvU8 eventType; + NvU8 processorIndex; // index of the cpu/gpu that was throttled + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU16 padding16bits; + NvU32 padding32bits; + NvU64 address; // address of the page whose servicing is being + // throttled + NvU64 timeStamp; // cpu end time stamp for the throttling operation +} UvmEventThrottlingEndInfo; + +typedef enum +{ + UvmEventMapRemoteCauseInvalid = 0, + + // The remote mapping is created to ensure coherence on systems with no + // GPU fault support (UVM-Lite) + UvmEventMapRemoteCauseCoherence = 1, + + // The thrashing mitigation policy pinned a memory region on a specific + // processor memory. This cause is used for the remote mappings created + // on the rest of processors to the pinned location. + UvmEventMapRemoteCauseThrashing = 2, + + // The remote mapping was created to enforce the PreferredLocation or + // AccessedBy hints provided by the user. + UvmEventMapRemoteCausePolicy = 3, + + // There is no available memory on the system so a remote mapping was + // created to the current location. + UvmEventMapRemoteCauseOutOfMemory = 4, + + // On GPUs with access counters, memory evicted to sysmem is always mapped + // from the GPU. The UVM driver will invalidate the mapping if the region + // is heavily accessed by the GPU later on. + UvmEventMapRemoteCauseEviction = 5, +} UvmEventMapRemoteCause; + +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeMapRemote helps to identify event data + // in a queue. + // + NvU8 eventType; + NvU8 srcIndex; // index of the cpu/gpu being remapped + NvU8 dstIndex; // index of the cpu/gpu memory that contains the + // memory region data + NvU8 mapRemoteCause; // field to type UvmEventMapRemoteCause that tells + // the cause for the page to be mapped remotely + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU32 padding32bits; + NvU64 address; // virtual address of the memory region that is + // thrashing + NvU64 size; // size of the memory region that is thrashing + NvU64 timeStamp; // cpu time stamp when all the required operations + // have been pushed to the processor + NvU64 timeStampGpu; // time stamp when the new mapping is effective in + // the processor specified by srcIndex. If srcIndex + // is a cpu, this field will be zero. +} UvmEventMapRemoteInfo; + +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeEviction helps to identify event data + // in a queue. + // + NvU8 eventType; + NvU8 srcIndex; // index of the cpu/gpu from which data is being + // evicted + NvU8 dstIndex; // index of the cpu/gpu memory to which data is + // going to be stored + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + NvU8 padding8bits; + NvU32 padding32bits; + NvU64 addressOut; // virtual address of the memory region that is + // being evicted + NvU64 addressIn; // virtual address that caused the eviction + NvU64 size; // size of the memory region that being evicted + NvU64 timeStamp; // cpu time stamp when eviction starts on the cpu +} UvmEventEvictionInfo; + +// TODO: Bug 1870362: [uvm] Provide virtual address and processor index in +// AccessCounter events +// +// Currently we are just passing raw information from the notification buffer +// entries, which includes physical address + aperture. Instead, translate the +// information to something more useful such as virtual address and then index +// of the processor where the accessed data is resident. Most of the +// implementation is required to service access counter notifications +// themselves. +typedef enum +{ + UvmEventAperturePeer0 = 1, + UvmEventAperturePeer1 = 2, + UvmEventAperturePeer2 = 3, + UvmEventAperturePeer3 = 4, + UvmEventAperturePeer4 = 5, + UvmEventAperturePeer5 = 6, + UvmEventAperturePeer6 = 7, + UvmEventAperturePeer7 = 8, + UvmEventAperturePeerMax = UvmEventAperturePeer7, + UvmEventApertureSys = 9, + UvmEventApertureVid = 10, +} UvmEventApertureType; + +typedef struct +{ + // + // eventType has to be the 1st argument of this structure. + // Setting eventType = UvmEventTypeAccessCounter helps to identify event data + // in a queue. + // + NvU8 eventType; + NvU8 srcIndex; // index of the gpu that received the access counter + // notification + // + // This structure is shared between UVM kernel and tools. + // Manually padding the structure so that compiler options like pragma pack + // or malign-double will have no effect on the field offsets + // + // See uvm_access_counter_buffer_entry_t for details + NvU8 aperture; + NvU8 instancePtrAperture; + + NvU8 isVirtual; + NvU8 isFromCpu; + + NvU8 veId; + NvU8 onManaged; // The access counter notification was triggered on + // a managed memory region + + NvU32 value; + NvU32 subGranularity; + NvU32 tag; + NvU32 bank; + NvU64 address; + NvU64 instancePtr; +} UvmEventTestAccessCounterInfo; + +//------------------------------------------------------------------------------ +// Entry added in the event queue buffer when an enabled event occurs. For +// compatibility with all tools ensure that this structure is 64 bit aligned. +//------------------------------------------------------------------------------ +typedef struct +{ + union + { + union + { + NvU8 eventType; + UvmEventMigrationInfo_Lite migration_Lite; + + UvmEventCpuFaultInfo cpuFault; + UvmEventMigrationInfo migration; + UvmEventGpuFaultInfo gpuFault; + UvmEventGpuFaultReplayInfo gpuFaultReplay; + UvmEventFatalFaultInfo fatalFault; + UvmEventReadDuplicateInfo readDuplicate; + UvmEventReadDuplicateInvalidateInfo readDuplicateInvalidate; + UvmEventPageSizeChangeInfo pageSizeChange; + UvmEventThrashingDetectedInfo thrashing; + UvmEventThrottlingStartInfo throttlingStart; + UvmEventThrottlingEndInfo throttlingEnd; + UvmEventMapRemoteInfo mapRemote; + UvmEventEvictionInfo eviction; + } eventData; + + union + { + NvU8 eventType; + + UvmEventTestAccessCounterInfo accessCounter; + } testEventData; + }; +} UvmEventEntry; + +//------------------------------------------------------------------------------ +// Type of time stamp used in the event entry: +// +// On windows we support QPC type which uses RDTSC if possible else fallbacks to +// HPET. +// +// On Linux ClockGetTime provides similar functionality. +// In UvmEventTimeStampTypeAuto the system decides which time stamp best suites +// current environment. +//------------------------------------------------------------------------------ +typedef enum +{ + UvmEventTimeStampTypeInvalid = 0, + UvmEventTimeStampTypeWin32QPC = 1, + UvmEventTimeStampTypePosixClockGetTime = 2, + UvmEventTimeStampTypeAuto = 3, + // ---- Add new values above this line + UvmEventNumTimeStampTypes +} UvmEventTimeStampType; + +//------------------------------------------------------------------------------ +// An opaque queue handle is returned to the user when a queue is created. +//------------------------------------------------------------------------------ +typedef NvUPtr UvmEventQueueHandle; + +//------------------------------------------------------------------------------ +// Setting default page size to 4k, +// this can be updated to 64k in case of power PC +//------------------------------------------------------------------------------ +#define UVM_DEBUG_ACCESS_PAGE_SIZE (1 << 12) // 4k page + +typedef enum +{ + UvmDebugAccessTypeRead = 0, + UvmDebugAccessTypeWrite = 1, +} UvmDebugAccessType; + +typedef struct UvmEventControlData_tag { + // entries between get_ahead and get_behind are currently being read + volatile NvU32 get_ahead; + volatile NvU32 get_behind; + // entries between put_ahead and put_behind are currently being written + volatile NvU32 put_ahead; + volatile NvU32 put_behind; + + // counter of dropped events + NvU64 dropped[UvmEventNumTypesAll]; +} UvmToolsEventControlData; + +//------------------------------------------------------------------------------ +// UVM Tools forward types (handles) definitions +//------------------------------------------------------------------------------ +struct UvmToolsSession_tag; +struct UvmToolsEventQueue_tag; +struct UvmToolsCounters_tag; + +typedef struct UvmToolsSession_tag UvmToolsSession; +typedef struct UvmToolsEventQueue_tag UvmToolsEventQueue; +typedef struct UvmToolsCounters_tag UvmToolsCounters; + +typedef UvmToolsSession *UvmToolsSessionHandle; +typedef UvmToolsEventQueue *UvmToolsEventQueueHandle; +typedef UvmToolsCounters *UvmToolsCountersHandle; + +#endif // _UVM_TYPES_H_ diff --git a/kernel-open/nvidia-uvm/uvm_unit_test.h b/kernel-open/nvidia-uvm/uvm_unit_test.h new file mode 100644 index 000000000..a87d019e8 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_unit_test.h @@ -0,0 +1,97 @@ +/******************************************************************************* + Copyright (c) 2015-2019 NVidia Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*******************************************************************************/ + +#ifndef __UVM_UNIT_TEST_H__ +#define __UVM_UNIT_TEST_H__ + +#include "nvstatus.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//----------------------------------------------------------------------------- +// UvmGetGlobalStatePointer +// +// This allows an application and any number of shared libraries to effectively +// share an instance of UVM, even though it is a static library. +// +// Note that calling this function causes initialization of the global state +// if it has not already occurred. +// +//----------------------------------------------------------------------------- +struct UvmGlobalState_tag *UvmGetGlobalStatePointer(void); + +//----------------------------------------------------------------------------- +// UvmSetGlobalStatePointer +// +// This allows an instance of the UVM user mode library to share state with +// another, making it behave as a single instance. Because it is a static +// library, this is needed for an application and a shared library, or multiple +// shared libraries, to share a UVM instance. It is only used in testing. +// +// This method must be called before the global state has been initialized, so +// effectively before any other call to this library. The global state used by +// the library is fixed once it has been set. +// +// Error codes: +// NV_ERR_INVALID_STATE: +// The global state has already been initialized. +// +// NV_ERR_INVALID_ARGUMENT: +// pGlobalState is NULL. +// +//----------------------------------------------------------------------------- +NV_STATUS UvmSetGlobalStatePointer(struct UvmGlobalState_tag *pGlobalState); + +// +// TODO: Bug 1766104: Remove this with uvmfull +// +// ioctl command numbers for the debug-build-only tests that +// live in uvm_gpu_op_testc.c +// +// This type should be really put into user-kernel shared types specific file, +// e.g. uvm_test_ioctl.h. Leaving it here temporarily to keep it compatibile +// with older drivers. +// +typedef enum +{ + UVM_GPU_OPS_SAMPLE_TEST = 0, + UVM_CHANNEL_MGMT_API_BASIC_MIGRATION_TEST, + UVM_CHANNEL_MGMT_API_PUSHBUFFER_SIMPLE_SANITY_TEST, + UVM_REGION_TRACKER_SANITY_TEST, + UVM_CHANNEL_DIRECTED_TEST, + UVM_CHANNEL_MGMT_API_INLINE_REGION_SANITY_TEST, + UVM_CHANNEL_PHYSICAL_MEMCOPY_TEST, + UVM_CHANNEL_PAGESIZE_4K_TO_128K_DIRECTED_TEST, + UVM_CHANNEL_PAGESIZE_4K_TO_2M_DIRECTED_TEST, + UVM_CHANNEL_PAGESIZE_4K_TO_128K_CONTIGUOUS_DIRECTED_TEST, + UVM_CHANNEL_PAGESIZE_4K_TO_2M_CONTIGUOUS_DIRECTED_TEST, + UVM_CHANNEL_P2P_MEMCOPY_TEST, + UVM_TEST_END +} UvmTests; + +#ifdef __cplusplus +} +#endif + +#endif // __UVM_UNIT_TEST_H__ diff --git a/kernel-open/nvidia-uvm/uvm_user_channel.c b/kernel-open/nvidia-uvm/uvm_user_channel.c new file mode 100644 index 000000000..a2234f256 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_user_channel.c @@ -0,0 +1,1056 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" +#include "uvm_lock.h" +#include "uvm_hal_types.h" +#include "uvm_mmu.h" +#include "uvm_user_channel.h" +#include "uvm_kvmalloc.h" +#include "uvm_api.h" +#include "uvm_gpu.h" +#include "uvm_tracker.h" +#include "uvm_map_external.h" +#include "nv_uvm_interface.h" +#include "uvm_test.h" + +#include + +// Sort channel resources from highest to lowest alignments +static int resource_align_high_cmp(const void *a, const void *b) +{ + const UvmGpuChannelResourceInfo *resource_a = a; + const UvmGpuChannelResourceInfo *resource_b = b; + + if (resource_a->alignment > resource_b->alignment) + return -1; + if (resource_a->alignment < resource_b->alignment) + return 1; + return 0; +} + +static NV_STATUS get_rm_channel_resources(uvm_user_channel_t *user_channel, UvmGpuChannelInstanceInfo *channel_info) +{ + UvmGpuChannelResourceInfo *resources = NULL; + NvU32 i, num_resources = user_channel->num_resources; + + // Note that num_resources may be 0, in which case resources will be + // ZERO_SIZE_PTR. This is preferred to setting resources to NULL, since we + // use NULL to indicate error conditions in various cleanup paths. + + resources = uvm_kvmalloc_zero(num_resources * sizeof(resources[0])); + if (!resources) + return NV_ERR_NO_MEMORY; + + memcpy(resources, channel_info->resourceInfo, num_resources * sizeof(resources[0])); + + // Info fix-up + for (i = 0; i < num_resources; i++) { + UvmGpuMemoryInfo *mem_info = &resources[i].resourceInfo; + + // RM can return alignments of 0, so make sure it's at least page size + // before we start using it. + resources[i].alignment = max(resources[i].alignment, (NvU64)mem_info->pageSize); + + // RM tracks logical size, so the size might not be a multiple of page + // size. This would cause problems in our tracking. + mem_info->size = UVM_ALIGN_UP(mem_info->size, mem_info->pageSize); + } + + // Sort the resources from highest to lowest alignment. This should + // guarantee that they fit in the provided VA space, regardless of the order + // used to calculate the total size. + sort(resources, num_resources, sizeof(resources[0]), resource_align_high_cmp, NULL); + + user_channel->resources = resources; + return NV_OK; +} + +static NV_STATUS uvm_user_channel_create(uvm_va_space_t *va_space, + const NvProcessorUuid *uuid, + uvm_rm_user_object_t *user_rm_channel, + uvm_user_channel_t **out_channel, + NvU64 base, + NvU64 length) +{ + UvmGpuChannelInstanceInfo *channel_info = NULL; + uvm_user_channel_t *user_channel = NULL; + NV_STATUS status = NV_OK; + NvU32 rm_client = user_rm_channel->user_client; + NvU32 rm_channel = user_rm_channel->user_object; + uvm_gpu_t *gpu; + void *rm_retained_channel = NULL; + + *out_channel = NULL; + + gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + user_channel = uvm_kvmalloc_zero(sizeof(*user_channel)); + if (!user_channel) + return NV_ERR_NO_MEMORY; + + user_channel->gpu = gpu; + memcpy(&user_channel->user_rm_channel, user_rm_channel, sizeof(*user_rm_channel)); + INIT_LIST_HEAD(&user_channel->list_node); + UVM_RB_TREE_CLEAR_NODE(&user_channel->instance_ptr.node); + nv_kref_init(&user_channel->kref); + + uvm_tracker_init(&user_channel->clear_faulted_tracker); + + user_channel->gpu_va_space = uvm_gpu_va_space_get(va_space, gpu); + UVM_ASSERT(user_channel->gpu_va_space); + + // Convert the user channel handles into a handle safe for kernel use. This + // also takes a ref-count on the instance pointer, though not on other + // channel state. + // + // TODO: Bug 1624521: This interface needs to use rm_control_fd to do + // validation. + channel_info = uvm_kvmalloc_zero(sizeof(*channel_info)); + if (!channel_info) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + (void)user_channel->user_rm_channel.rm_control_fd; + status = uvm_rm_locked_call(nvUvmInterfaceRetainChannel(user_channel->gpu_va_space->duped_gpu_va_space, + rm_client, + rm_channel, + &rm_retained_channel, + channel_info)); + if (status != NV_OK) { + UVM_DBG_PRINT("Failed to retain channel {0x%x, 0x%x}: %s, GPU: %s\n", + rm_client, + rm_channel, + nvstatusToString(status), + uvm_gpu_name(gpu)); + goto error; + } + + if (channel_info->sysmem) + user_channel->instance_ptr.addr.aperture = UVM_APERTURE_SYS; + else + user_channel->instance_ptr.addr.aperture = UVM_APERTURE_VID; + + user_channel->instance_ptr.addr.address = channel_info->base; + user_channel->rm_retained_channel = rm_retained_channel; + user_channel->hw_runlist_id = channel_info->runlistId; + user_channel->hw_channel_id = channel_info->chId; + user_channel->num_resources = channel_info->resourceCount; + user_channel->engine_type = channel_info->channelEngineType; + user_channel->in_subctx = channel_info->bInSubctx == NV_TRUE; + user_channel->subctx_id = channel_info->subctxId; + user_channel->tsg.valid = channel_info->bTsgChannel == NV_TRUE; + user_channel->tsg.id = channel_info->tsgId; + user_channel->tsg.max_subctx_count = channel_info->tsgMaxSubctxCount; + user_channel->work_submission_token = channel_info->workSubmissionToken; + user_channel->work_submission_offset = channel_info->workSubmissionOffset; + user_channel->clear_faulted_token = channel_info->clearFaultedToken; + user_channel->chram_channel_register = channel_info->pChramChannelRegister; + user_channel->smc_engine_id = channel_info->smcEngineId; + user_channel->smc_engine_ve_id_offset = channel_info->smcEngineVeIdOffset; + + if (!gpu->parent->smc.supported) { + UVM_ASSERT(user_channel->smc_engine_id == 0); + UVM_ASSERT(user_channel->smc_engine_ve_id_offset == 0); + } + + // Only GR supports subcontexts. + if (user_channel->in_subctx) { + UVM_ASSERT(user_channel->engine_type == UVM_GPU_CHANNEL_ENGINE_TYPE_GR); + UVM_ASSERT(user_channel->tsg.valid); + UVM_ASSERT(user_channel->subctx_id < user_channel->tsg.max_subctx_count); + } + + if (user_channel->tsg.valid) + UVM_ASSERT(user_channel->tsg.max_subctx_count <= gpu->max_subcontexts); + + // If num_resources == 0, as can happen with CE channels, we ignore base and + // length. + if (user_channel->num_resources > 0 && uvm_api_range_invalid(base, length)) { + status = NV_ERR_INVALID_ADDRESS; + goto error; + } + + status = get_rm_channel_resources(user_channel, channel_info); + if (status != NV_OK) + goto error; + + uvm_kvfree(channel_info); + + *out_channel = user_channel; + return NV_OK; + +error: + // uvm_user_channel_destroy_detached expects this + user_channel->gpu_va_space = NULL; + uvm_user_channel_destroy_detached(user_channel); + uvm_kvfree(channel_info); + return status; +} + +static uvm_user_channel_t *find_user_channel(uvm_va_space_t *va_space, uvm_rm_user_object_t *user_rm_channel) +{ + uvm_user_channel_t *user_channel; + uvm_gpu_va_space_t *gpu_va_space; + + // This is a pretty naive search but it's unlikely to show up in a perf- + // critical path. We could optimize it in the future with a table lookup + // instead, if it becomes a problem. + for_each_gpu_va_space(gpu_va_space, va_space) { + list_for_each_entry(user_channel, &gpu_va_space->registered_channels, list_node) { + if (user_channel->user_rm_channel.user_client == user_rm_channel->user_client && + user_channel->user_rm_channel.user_object == user_rm_channel->user_object) + return user_channel; + } + } + + return NULL; +} + +// Find a pre-existing channel VA range which already maps this rm_descriptor. +// The criteria are: +// 1) gpu_va_space must match +// 2) rm_descriptor must match +// 3) new_user_channel's TSG matches existing mappings +static uvm_va_range_t *find_va_range(uvm_user_channel_t *new_user_channel, NvP64 rm_descriptor) +{ + uvm_gpu_va_space_t *gpu_va_space = new_user_channel->gpu_va_space; + uvm_va_range_t *range; + + // We can only allow sharing within a TSG + if (!new_user_channel->tsg.valid) + return NULL; + + list_for_each_entry(range, &gpu_va_space->channel_va_ranges, channel.list_node) { + UVM_ASSERT(range->type == UVM_VA_RANGE_TYPE_CHANNEL); + UVM_ASSERT(range->channel.ref_count > 0); + + if (range->channel.tsg.valid && + range->channel.tsg.id == new_user_channel->tsg.id && + range->channel.rm_descriptor == rm_descriptor) + return range; + } + + return NULL; +} + +// Find an unallocated VA region of the given size and alignment witin the range +// [base, end]. base must not be 0. If no such region exists, 0 is returned. +static NvU64 find_va_in_range(uvm_va_space_t *va_space, NvU64 base, NvU64 end, NvU64 size, NvU64 alignment) +{ + NvU64 curr_start = base, curr_end; + uvm_va_range_t *va_range; + + UVM_ASSERT(base); + UVM_ASSERT(base < end); + UVM_ASSERT(size); + UVM_ASSERT(alignment); + + while (1) { + // Find the next aligned addr + curr_start = UVM_ALIGN_UP(curr_start, alignment); + curr_end = curr_start + size - 1; + + // Check for exceeding end and for arithmetic overflow + if (curr_start < base || curr_end > end || curr_start > curr_end) + return 0; + + // Check if the range is free + va_range = uvm_va_space_iter_first(va_space, curr_start, curr_end); + if (!va_range) + return curr_start; + + // Advance to the next available slot + curr_start = va_range->node.end + 1; + } +} + +// Allocate or reuse a VA range for the given channel resource, but don't map +// it. If a new VA range is allocated, the VA used is the first unallocated VA +// in the range [base, end] which has the appropriate alignment and size for the +// given resource. +static NV_STATUS create_va_range(struct mm_struct *mm, + uvm_user_channel_t *user_channel, + NvU64 base, + NvU64 end, + NvU32 resource_index) +{ + uvm_gpu_va_space_t *gpu_va_space = user_channel->gpu_va_space; + UvmGpuChannelResourceInfo *resource = &user_channel->resources[resource_index]; + UvmGpuMemoryInfo *mem_info = &resource->resourceInfo; + uvm_va_range_t *range = NULL; + NvU64 start; + uvm_aperture_t aperture; + NV_STATUS status; + + uvm_assert_rwsem_locked_write(&gpu_va_space->va_space->lock); + + if (mem_info->sysmem) + aperture = UVM_APERTURE_SYS; + else + aperture = UVM_APERTURE_VID; + + // See if we've already mapped this resource + range = find_va_range(user_channel, resource->resourceDescriptor); + if (range) { + // We've already mapped this resource, so just bump the ref count + UVM_ASSERT(IS_ALIGNED(range->node.start, resource->alignment)); + UVM_ASSERT(uvm_va_range_size(range) >= mem_info->size); + UVM_ASSERT(range->channel.aperture == aperture); + + ++range->channel.ref_count; + user_channel->va_ranges[resource_index] = range; + return NV_OK; + } + + // This is a new VA range. Find an available VA in the input region and + // allocate it there. + start = find_va_in_range(gpu_va_space->va_space, base, end, mem_info->size, resource->alignment); + if (!start) { + UVM_DBG_PRINT("Range exceeded: allowed [0x%llx, 0x%llx], align: 0x%llx size: 0x%llx\n", + base, + end, + resource->alignment, + mem_info->size); + return NV_ERR_INVALID_ADDRESS; + } + + // TODO: Bug 1734586: RM computes alignments incorrectly + + status = uvm_va_range_create_channel(gpu_va_space->va_space, + mm, + start, + start + mem_info->size - 1, + &range); + if (status != NV_OK) { + UVM_ASSERT(status != NV_ERR_UVM_ADDRESS_IN_USE); + goto error; + } + + range->channel.gpu_va_space = gpu_va_space; + range->channel.aperture = aperture; + range->channel.rm_descriptor = resource->resourceDescriptor; + range->channel.rm_id = resource->resourceId; + range->channel.tsg.valid = user_channel->tsg.valid; + range->channel.tsg.id = user_channel->tsg.id; + range->channel.ref_count = 1; + list_add(&range->channel.list_node, &gpu_va_space->channel_va_ranges); + + user_channel->va_ranges[resource_index] = range; + return NV_OK; + +error: + if (range) { + range->channel.ref_count = 0; // Destroy assumes this + uvm_va_range_destroy(range, NULL); + } + return status; +} + +static void destroy_va_ranges(uvm_user_channel_t *user_channel) +{ + size_t i; + + if (!user_channel || !user_channel->va_ranges) + return; + + for (i = 0; i < user_channel->num_resources; i++) { + uvm_va_range_t *resource_range = user_channel->va_ranges[i]; + if (!resource_range) + continue; + + UVM_ASSERT(resource_range->type == UVM_VA_RANGE_TYPE_CHANNEL); + UVM_ASSERT(resource_range->channel.rm_descriptor == user_channel->resources[i].resourceDescriptor); + UVM_ASSERT(resource_range->channel.rm_id == user_channel->resources[i].resourceId); + UVM_ASSERT(resource_range->channel.tsg.valid == user_channel->tsg.valid); + UVM_ASSERT(resource_range->channel.tsg.id == user_channel->tsg.id); + + // Drop the ref count on each each range + UVM_ASSERT(resource_range->channel.ref_count > 0); + if (!resource_range->channel.tsg.valid) + UVM_ASSERT(resource_range->channel.ref_count == 1); + + --resource_range->channel.ref_count; + if (resource_range->channel.ref_count == 0) + uvm_va_range_destroy(resource_range, NULL); + } + + uvm_kvfree(user_channel->va_ranges); + user_channel->va_ranges = NULL; +} + +// Channels need virtual allocations to operate, but we don't know about them. +// This function carves out a chunk within [base, end] for each allocation for +// later mapping. +static NV_STATUS create_va_ranges(struct mm_struct *mm, + uvm_user_channel_t *user_channel, + NvU64 base, + NvU64 end) +{ + NvU32 i; + NV_STATUS status; + + user_channel->va_ranges = uvm_kvmalloc_zero(user_channel->num_resources * sizeof(user_channel->va_ranges[0])); + if (!user_channel->va_ranges) + return NV_ERR_NO_MEMORY; + + for (i = 0; i < user_channel->num_resources; i++) { + status = create_va_range(mm, user_channel, base, end, i); + if (status != NV_OK) + goto error; + } + + return NV_OK; + +error: + destroy_va_ranges(user_channel); + return status; +} + +// "Binding" the resouces tells RM the virtual address of each allocation so it +// can in turn tell the HW where they are. +static NV_STATUS bind_channel_resources(uvm_user_channel_t *user_channel) +{ + UvmGpuChannelResourceBindParams *resource_va_list = NULL; + NV_STATUS status = NV_OK; + NvU32 i; + + resource_va_list = uvm_kvmalloc_zero(user_channel->num_resources * sizeof(resource_va_list[0])); + if (!resource_va_list) { + status = NV_ERR_NO_MEMORY; + goto out; + } + + for (i = 0; i < user_channel->num_resources; i++) { + uvm_va_range_t *resource_range = user_channel->va_ranges[i]; + + UVM_ASSERT(resource_range); + UVM_ASSERT(resource_range->type == UVM_VA_RANGE_TYPE_CHANNEL); + UVM_ASSERT(resource_range->channel.rm_descriptor == user_channel->resources[i].resourceDescriptor); + UVM_ASSERT(resource_range->channel.rm_id == user_channel->resources[i].resourceId); + UVM_ASSERT(resource_range->channel.tsg.valid == user_channel->tsg.valid); + UVM_ASSERT(resource_range->channel.tsg.id == user_channel->tsg.id); + + resource_va_list[i].resourceId = resource_range->channel.rm_id; + resource_va_list[i].resourceVa = resource_range->node.start; + } + + status = uvm_rm_locked_call(nvUvmInterfaceBindChannelResources(user_channel->rm_retained_channel, + resource_va_list)); + if (status != NV_OK) { + UVM_DBG_PRINT("Failed to bind channel resources for {0x%x, 0x%x}: %s, GPU: %s\n", + user_channel->user_rm_channel.user_client, + user_channel->user_rm_channel.user_object, + nvstatusToString(status), + uvm_gpu_name(user_channel->gpu_va_space->gpu)); + goto out; + } + + atomic_set(&user_channel->is_bound, 1); + +out: + uvm_kvfree(resource_va_list); + return status; +} + +// Map the already-created VA ranges by getting the PTEs for each allocation +// from RM. The caller is responsible for destroying the VA ranges if the +// mappings fail. +static NV_STATUS uvm_user_channel_map_resources(uvm_user_channel_t *user_channel) +{ + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + NvU32 i; + NV_STATUS status = NV_OK, tracker_status; + uvm_map_rm_params_t map_rm_params = + { + // Some of these resources need to be privileged and/or read- only, so + // use default types to let RM set those fields. + .map_offset = 0, + .mapping_type = UvmGpuMappingTypeDefault, + .caching_type = UvmGpuCachingTypeDefault, + .format_type = UvmGpuFormatTypeDefault, + .element_bits = UvmGpuFormatElementBitsDefault, + .compression_type = UvmGpuCompressionTypeDefault, + }; + + for (i = 0; i < user_channel->num_resources; i++) { + UvmGpuMemoryInfo *mem_info; + uvm_va_range_t *range = user_channel->va_ranges[i]; + + // Skip already-mapped VA ranges. Note that the ref count might not be + // 1 even if the range is unmapped, because a thread which fails to map + // will drop and re-take the VA space lock in uvm_register_channel + // leaving a shareable VA range in the list unmapped. This thread could + // have attached to it during that window, so we'll do the mapping + // instead. + if (range->channel.pt_range_vec.ranges) { + UVM_ASSERT(range->channel.ref_count >= 1); + continue; + } + + mem_info = &user_channel->resources[i].resourceInfo; + status = uvm_va_range_map_rm_allocation(range, user_channel->gpu, mem_info, &map_rm_params, NULL, &tracker); + if (status != NV_OK) { + // We can't destroy the VA ranges here since we only have the VA + // space lock in read mode, so let the caller handle it. + break; + } + } + + // Always wait for the tracker even on error so we don't have any pending + // map operations happening during the subsequent destroy. + tracker_status = uvm_tracker_wait_deinit(&tracker); + return status == NV_OK ? tracker_status : status; +} + +static NV_STATUS uvm_register_channel_under_write(struct mm_struct *mm, + uvm_user_channel_t *user_channel, + NvU64 base, + NvU64 length) +{ + uvm_gpu_va_space_t *gpu_va_space = user_channel->gpu_va_space; + uvm_va_space_t *va_space = gpu_va_space->va_space; + NV_STATUS status; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + // Currently all user channels are stopped when any process using the VA + // space is torn down, unless it passed + // UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE, but the VA space could be + // shared and some other process could still try registering a channel. + // Just disallow it for now. + if (atomic_read(&va_space->user_channels_stopped)) + return NV_ERR_INVALID_STATE; + + // If the VA space's mm has been torn down we can't allow more work. mm + // teardown (when available) stops all channels, so the check above should + // provide this guarantee. + UVM_ASSERT(!va_space->disallow_new_registers); + + // The GPU VA space is on its way out, so act as if it's already been + // unregistered. See gpu_va_space_stop_all_channels. + if (atomic_read(&gpu_va_space->disallow_new_channels)) + return NV_ERR_INVALID_DEVICE; + + // Verify that this handle pair wasn't already registered. This is just to + // keep our internal state consistent, since a match doesn't mean that the + // handles still represent the same channel. See the comment in + // uvm_user_channel.h. + if (find_user_channel(va_space, &user_channel->user_rm_channel)) + return NV_ERR_INVALID_CHANNEL; + + // TODO: Bug 1757136: Check that this handle pair also wasn't already + // registered on other GPUs in the GPU's SLI group. + + if (user_channel->num_resources > 0) { + NvU64 end = base + length - 1; + + if (end >= gpu_va_space->gpu->parent->max_channel_va) + return NV_ERR_OUT_OF_RANGE; + + // Create and insert the VA ranges, but don't map them yet since we + // can't call RM until we downgrade the lock to read mode. + status = create_va_ranges(mm, user_channel, base, end); + if (status != NV_OK) + return status; + } + + list_add(&user_channel->list_node, &gpu_va_space->registered_channels); + + return NV_OK; +} + +static NV_STATUS uvm_register_channel(uvm_va_space_t *va_space, + const NvProcessorUuid *uuid, + uvm_rm_user_object_t *user_rm_channel, + NvU64 base, + NvU64 length) +{ + NV_STATUS status; + uvm_gpu_t *gpu; + struct mm_struct *mm; + uvm_gpu_va_space_t *gpu_va_space; + uvm_user_channel_t *user_channel = NULL; + LIST_HEAD(deferred_free_list); + + uvm_va_space_down_read_rm(va_space); + + status = uvm_user_channel_create(va_space, uuid, user_rm_channel, &user_channel, base, length); + if (status != NV_OK) { + uvm_va_space_up_read_rm(va_space); + return status; + } + + // Retain the GPU VA space so our channel's gpu_va_space pointer remains + // valid after we drop the lock. + uvm_gpu_va_space_retain(user_channel->gpu_va_space); + + // Retain the GPU since retaining the gpu_va_space doesn't prevent it from + // going away after we drop the lock. + gpu = user_channel->gpu; + uvm_gpu_retain(gpu); + + uvm_va_space_up_read_rm(va_space); + + // The mm needs to be locked in order to remove stale HMM va_blocks. + mm = uvm_va_space_mm_retain_lock(va_space); + + // We have the RM objects now so we know what the VA range layout should be. + // Re-take the VA space lock in write mode to create and insert them. + uvm_va_space_down_write(va_space); + + // We dropped the lock so we have to re-verify that this gpu_va_space is + // still valid. If so, then the GPU is also still registered under the VA + // space. + gpu_va_space = user_channel->gpu_va_space; + if (uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_DEAD) { + status = NV_ERR_INVALID_DEVICE; + user_channel->gpu_va_space = NULL; + + // uvm_user_channel_detach expects a valid VA space, so we can't call it + // here. Just add this channel to the list directly so it gets + // destroyed in the error handler. + uvm_deferred_free_object_add(&deferred_free_list, + &user_channel->deferred_free, + UVM_DEFERRED_FREE_OBJECT_TYPE_CHANNEL); + } + + uvm_gpu_va_space_release(gpu_va_space); + if (status != NV_OK) + goto error_under_write; + + UVM_ASSERT(gpu == uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, uuid)); + UVM_ASSERT(gpu_va_space == uvm_gpu_va_space_get(va_space, gpu)); + + // Performs verification checks and inserts the channel's VA ranges into the + // VA space, but doesn't map them. + status = uvm_register_channel_under_write(mm, user_channel, base, length); + if (status != NV_OK) + goto error_under_write; + + if (mm) { + uvm_up_read_mmap_lock_out_of_order(mm); + uvm_va_space_mm_release(va_space); + } + + // The subsequent mappings will need to call into RM, which means we must + // downgrade the VA space lock to read mode. Although we're in read mode no + // other threads could modify this channel or its VA ranges: other threads + // which call channel register will first take the VA space lock in write + // mode above, and threads which call channel unregister or GPU VA space + // unregister unmap operate entirely with the lock in write mode. + uvm_va_space_downgrade_write_rm(va_space); + + status = uvm_user_channel_map_resources(user_channel); + if (status != NV_OK) + goto error_under_read; + + // Tell the GPU page fault handler about this instance_ptr -> user_channel + // mapping + status = uvm_gpu_add_user_channel(gpu, user_channel); + if (status != NV_OK) + goto error_under_read; + + status = bind_channel_resources(user_channel); + if (status != NV_OK) + goto error_under_read; + + uvm_va_space_up_read_rm(va_space); + uvm_gpu_release(gpu); + return NV_OK; + +error_under_write: + if (user_channel->gpu_va_space) + uvm_user_channel_detach(user_channel, &deferred_free_list); + uvm_va_space_up_write(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + uvm_deferred_free_object_list(&deferred_free_list); + uvm_gpu_release(gpu); + return status; + +error_under_read: + // We have to destroy the VA ranges, which means we need to re-take the VA + // space lock in write mode. That in turn means we must retain the channel + // so its memory doesn't get freed from under us (though it could get + // unregistered). Note that we also still have the GPU retained. + uvm_user_channel_retain(user_channel); + uvm_va_space_up_read_rm(va_space); + + uvm_va_space_down_write(va_space); + + // If a new channel was registered which uses our unmapped VA ranges, that + // new channel is responsible for mapping them if we haven't gotten there + // yet. See uvm_user_channel_map_resources. It will take a reference on them + // anyway so they won't go away. + + // If the channel was unregistered by another thread (explicitly or via GPU + // VA space unregister), the thread which did the unregister is responsible + // for destroying the channel. + if (user_channel->gpu_va_space) { + uvm_user_channel_detach(user_channel, &deferred_free_list); + uvm_va_space_up_write(va_space); + uvm_deferred_free_object_list(&deferred_free_list); + } + else { + uvm_va_space_up_write(va_space); + } + + uvm_user_channel_release(user_channel); + uvm_gpu_release(gpu); + return status; +} + +NV_STATUS uvm_api_register_channel(UVM_REGISTER_CHANNEL_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_rm_user_object_t user_rm_channel = + { + .rm_control_fd = params->rmCtrlFd, + .user_client = params->hClient, + .user_object = params->hChannel + }; + return uvm_register_channel(va_space, ¶ms->gpuUuid, &user_rm_channel, params->base, params->length); +} + +static void free_user_channel(nv_kref_t *nv_kref) +{ + uvm_user_channel_t *user_channel = container_of(nv_kref, uvm_user_channel_t, kref); + UVM_ASSERT(!user_channel->gpu_va_space); + UVM_ASSERT(!user_channel->va_ranges); + UVM_ASSERT(!atomic_read(&user_channel->is_bound)); + uvm_kvfree(user_channel); +} + +void uvm_user_channel_release(uvm_user_channel_t *user_channel) +{ + if (user_channel) + nv_kref_put(&user_channel->kref, free_user_channel); +} + +void uvm_user_channel_stop(uvm_user_channel_t *user_channel) +{ + uvm_va_space_t *va_space = user_channel->gpu_va_space->va_space; + + if (!user_channel->rm_retained_channel) + return; + + // Skip if this channel was never bound, or if it's already been stopped. + // Note that since we only hold the VA space lock in read mode here, two + // threads could race and both call nvUvmInterfaceStopChannel concurrently. + // RM handles that with internal locking, so it's not a problem. + if (!atomic_read(&user_channel->is_bound)) + return; + + // TODO: Bug 1799173: Normal lock tracking should handle this assert once + // all RM calls have been moved out from under the VA space lock in + // write mode. + uvm_assert_rwsem_locked_read(&va_space->lock); + + // TODO: Bug 1737765. This doesn't stop the user from putting the + // channel back on the runlist, which could put stale instance + // pointers back in the fault buffer. + uvm_rm_locked_call_void(nvUvmInterfaceStopChannel(user_channel->rm_retained_channel, + va_space->user_channel_stops_are_immediate)); + + // Multiple threads could perform this set concurrently, but is_bound never + // transitions back to 1 after being set to 0 so that's not a problem. + atomic_set(&user_channel->is_bound, 0); +} + +void uvm_user_channel_detach(uvm_user_channel_t *user_channel, struct list_head *deferred_free_list) +{ + uvm_va_space_t *va_space; + uvm_gpu_va_space_t *gpu_va_space; + + if (!user_channel) + return; + + gpu_va_space = user_channel->gpu_va_space; + UVM_ASSERT(gpu_va_space); + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + va_space = gpu_va_space->va_space; + uvm_assert_rwsem_locked_write(&va_space->lock); + + // The caller is required to have already stopped the channel. We can't do + // it here since we're holding the VA space lock in write mode. + UVM_ASSERT(!atomic_read(&user_channel->is_bound)); + + if (!UVM_RB_TREE_EMPTY_NODE(&user_channel->instance_ptr.node)) { + // Prevent the bottom half from servicing faults on this channel. Note + // that this only prevents new faults from being serviced. It doesn't + // flush out faults currently being serviced, nor prior faults still + // pending in the fault buffer. Those are handled separately. + uvm_gpu_remove_user_channel(user_channel->gpu_va_space->gpu, user_channel); + + // We can't release the channel back to RM here because leftover state + // for this channel (such as the instance pointer) could still be in the + // GPU fault buffer, so we need to prevent that state from being + // reallocated until we can flush the buffer. Flushing the buffer means + // taking the GPU isr_lock, so the caller is required to do that. + } + + list_del(&user_channel->list_node); + + uvm_deferred_free_object_add(deferred_free_list, + &user_channel->deferred_free, + UVM_DEFERRED_FREE_OBJECT_TYPE_CHANNEL); + + destroy_va_ranges(user_channel); + + user_channel->gpu_va_space = NULL; +} + +void uvm_user_channel_destroy_detached(uvm_user_channel_t *user_channel) +{ + // Check that this channel was already detached + UVM_ASSERT(user_channel->gpu_va_space == NULL); + + // On Volta+ GPUs, clearing non-replayable faults requires pushing the + // channel id into a method. The bottom half fault handler adds all such + // methods to a per-user_channel clear_faulted_tracker. We need to wait for + // this tracker before calling nvUvmInterfaceReleaseChannel, since that + // will release the channel id back to RM. + uvm_tracker_wait_deinit(&user_channel->clear_faulted_tracker); + + if (user_channel->resources) { + UVM_ASSERT(!user_channel->va_ranges); + + uvm_kvfree(user_channel->resources); + } + + if (user_channel->rm_retained_channel) + uvm_rm_locked_call_void(nvUvmInterfaceReleaseChannel(user_channel->rm_retained_channel)); + + uvm_user_channel_release(user_channel); +} + +static NV_STATUS uvm_unregister_channel(uvm_va_space_t *va_space, uvm_rm_user_object_t *user_rm_channel) +{ + uvm_gpu_t *gpu = NULL; + uvm_user_channel_t *user_channel = NULL; + NV_STATUS status = NV_OK; + LIST_HEAD(deferred_free_list); + + // Despite taking the VA space lock in read mode, since this also takes the + // serialize_writers_lock it also flushes out threads which may be about to + // bind this channel. Without that we might stop the channel first, then the + // other thread could come in and re-bind the channel. + uvm_va_space_down_read_rm(va_space); + + // Tell RM to kill the channel before we start unmapping its allocations. + // This is to prevent spurious MMU faults during teardown. + user_channel = find_user_channel(va_space, user_rm_channel); + if (user_channel) { + uvm_user_channel_retain(user_channel); + uvm_user_channel_stop(user_channel); + } + + uvm_va_space_up_read_rm(va_space); + + if (!user_channel) + return NV_ERR_INVALID_CHANNEL; + + // Re-take the lock in write mode to detach the channel + uvm_va_space_down_write(va_space); + + // We dropped the lock so we have to re-verify that someone else didn't come + // in and detach us. + if (user_channel->gpu_va_space) { + gpu = user_channel->gpu_va_space->gpu; + uvm_user_channel_detach(user_channel, &deferred_free_list); + uvm_gpu_retain(gpu); + } + else { + status = NV_ERR_INVALID_CHANNEL; + } + + uvm_va_space_up_write(va_space); + + if (status == NV_OK) { + uvm_deferred_free_object_list(&deferred_free_list); + uvm_gpu_release(gpu); + } + + uvm_user_channel_release(user_channel); + + return status; +} + +NV_STATUS uvm_api_unregister_channel(UVM_UNREGISTER_CHANNEL_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_rm_user_object_t user_rm_channel = + { + .rm_control_fd = -1, // Not needed for a UVM-internal handle lookup + .user_client = params->hClient, + .user_object = params->hChannel + }; + return uvm_unregister_channel(va_space, &user_rm_channel); +} + +static NV_STATUS uvm_test_check_channel_va_space_get_info(uvm_va_space_t *va_space, + UVM_TEST_CHECK_CHANNEL_VA_SPACE_PARAMS *params, + UvmGpuChannelInstanceInfo *channel_info) +{ + uvm_gpu_t *gpu; + uvm_gpu_va_space_t *gpu_va_space; + void *rm_retained_channel; + NV_STATUS status; + + uvm_va_space_down_read_rm(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, ¶ms->gpu_uuid); + if (!gpu) { + status = NV_ERR_INVALID_DEVICE; + goto out; + } + + gpu_va_space = uvm_gpu_va_space_get(va_space, gpu); + UVM_ASSERT(gpu_va_space); + + // Look up the instance pointer + // + // TODO: Bug 1624521: This interface needs to use rmCtrlFd to do validation + memset(channel_info, 0, sizeof(*channel_info)); + status = uvm_rm_locked_call(nvUvmInterfaceRetainChannel(gpu_va_space->duped_gpu_va_space, + params->client, + params->channel, + &rm_retained_channel, + channel_info)); + if (status != NV_OK) + goto out; + + uvm_rm_locked_call_void(nvUvmInterfaceReleaseChannel(rm_retained_channel)); + +out: + uvm_va_space_up_read_rm(va_space); + return status; +} + +NV_STATUS uvm_test_check_channel_va_space(UVM_TEST_CHECK_CHANNEL_VA_SPACE_PARAMS *params, struct file *filp) +{ + struct file *va_space_filp = NULL; + uvm_va_space_t *va_space = NULL; + uvm_va_space_t *channel_va_space; + uvm_gpu_t *gpu; + uvm_fault_buffer_entry_t fault_entry; + UvmGpuChannelInstanceInfo *channel_info; + NV_STATUS status; + + memset(&fault_entry, 0, sizeof(fault_entry)); + + channel_info = uvm_kvmalloc_zero(sizeof(*channel_info)); + if (!channel_info) { + status = NV_ERR_NO_MEMORY; + goto out; + } + + // The channel is owned by this file, so we have to query it using this + // file's VA space. + status = uvm_test_check_channel_va_space_get_info(uvm_va_space_get(filp), params, channel_info); + if (status != NV_OK) + goto out; + + // We need to do the lookup using the input file's VA space + va_space_filp = fget(params->va_space_fd); + if (!uvm_file_is_nvidia_uvm(va_space_filp)) { + status = NV_ERR_INVALID_ARGUMENT; + goto out; + } + + va_space = uvm_va_space_get(va_space_filp); + uvm_va_space_down_read(va_space); + + // We can do this query outside of the lock, but doing it within the lock + // simplifies error handling. + status = uvm_va_space_initialized(va_space); + if (status != NV_OK) + goto out; + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpu_uuid); + if (!gpu || !uvm_processor_mask_test(&va_space->faultable_processors, gpu->id)) { + status = NV_ERR_INVALID_DEVICE; + goto out; + } + + if (params->ve_id >= gpu->max_subcontexts) { + status = NV_ERR_INVALID_ARGUMENT; + goto out; + } + + // Craft enough of the fault entry to do a VA space translation + fault_entry.fault_type = UVM_FAULT_TYPE_INVALID_PTE; + + if (channel_info->sysmem) + fault_entry.instance_ptr.aperture = UVM_APERTURE_SYS; + else + fault_entry.instance_ptr.aperture = UVM_APERTURE_VID; + fault_entry.instance_ptr.address = channel_info->base; + + if (channel_info->channelEngineType == UVM_GPU_CHANNEL_ENGINE_TYPE_GR) { + fault_entry.fault_source.client_type = UVM_FAULT_CLIENT_TYPE_GPC; + fault_entry.fault_source.mmu_engine_type = UVM_MMU_ENGINE_TYPE_GRAPHICS; + fault_entry.fault_source.ve_id = params->ve_id; + // Translated to the SMC engine-local VEID + fault_entry.fault_source.ve_id += channel_info->smcEngineVeIdOffset; + + } + else if (channel_info->channelEngineType == UVM_GPU_CHANNEL_ENGINE_TYPE_CE && + gpu->parent->non_replayable_faults_supported) { + fault_entry.fault_source.client_type = UVM_FAULT_CLIENT_TYPE_HUB; + fault_entry.fault_source.mmu_engine_type = UVM_MMU_ENGINE_TYPE_CE; + fault_entry.fault_source.ve_id = 0; + } + else { + status = NV_ERR_INVALID_CHANNEL; + goto out; + } + + // We can ignore the return code because this ioctl only cares about whether + // the provided channel + VEID matches the provided VA space. In all of the + // non-NV_OK cases the translation will fail and we should return + // NV_ERR_INVALID_CHANNEL. channel_va_space == NULL for all such cases. + (void)uvm_gpu_fault_entry_to_va_space(gpu, &fault_entry, &channel_va_space); + + if (channel_va_space == va_space) + status = NV_OK; + else + status = NV_ERR_INVALID_CHANNEL; + +out: + if (va_space_filp) { + if (va_space) + uvm_va_space_up_read(va_space); + fput(va_space_filp); + } + + uvm_kvfree(channel_info); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_user_channel.h b/kernel-open/nvidia-uvm/uvm_user_channel.h new file mode 100644 index 000000000..214140fe3 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_user_channel.h @@ -0,0 +1,261 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_USER_CHANNEL_H__ +#define __UVM_USER_CHANNEL_H__ + +#include "uvm_forward_decl.h" +#include "uvm_va_space.h" +#include "uvm_hal_types.h" +#include "uvm_rb_tree.h" +#include "nv-kref.h" + +// This structure contains the VA spaces of all the subcontexts in a TSG. It +// is stored in a per-GPU UVM RB tree and is required to perform instance_ptr +// to VA space translations when channels are registered in a subcontext, +// since SM fault/access counter notification packets may report any +// instance_ptr in the TSG. +typedef struct +{ + // Number of instance pointers referencing this subcontext info descriptor + NvU32 total_refcount; + + // All channels in a TSG must be bound to the same SMC Engine + NvU32 smc_engine_id; + + // Array of per-subcontext information + struct + { + uvm_va_space_t *va_space; + + // Number of instance pointers referencing this specific subcontext + NvU32 refcount; + } *subctxs; + + // UVM RB tree node for insertion into the parent GPU's tsg_table. + uvm_rb_tree_node_t node; +} uvm_user_channel_subctx_info_t; + +struct uvm_user_channel_struct +{ + // Parent GPU VA space + uvm_gpu_va_space_t *gpu_va_space; + + // Parent GPU. This is also available in gpu_va_space->gpu, but we need a + // separate pointer which outlives the gpu_va_space during deferred channel + // teardown. + uvm_gpu_t *gpu; + + // RM handles used to register this channel. We store them for UVM-internal + // purposes to look up the uvm_user_channel_t for unregistration, rather + // than introducing a new "UVM channel handle" object for user space. + // + // DO NOT TRUST THESE VALUES AFTER UVM_REGISTER_CHANNEL. They are passed by + // user-space at channel registration time to validate the channel with RM, + // but afterwards the user could free and reallocate either of the client or + // object handles, so we can't pass them to RM trusting that they still + // represent this channel. + // + // That's ok because we never pass these handles to RM again after + // registration. + uvm_rm_user_object_t user_rm_channel; + + // Type of the engine the channel is bound to + UVM_GPU_CHANNEL_ENGINE_TYPE engine_type; + + // true if the channel belongs to a subcontext or false if the channel + // belongs to a regular context + bool in_subctx; + + // Subcontext ID, aka VEID + NvU32 subctx_id; + + struct + { + // Whether the channel belongs to a TSG or not + bool valid; + + // If valid is true, tsg_id contains the ID of the TSG + NvU32 id; + + // If valid is true, this is the maximum number of subcontexts in the TSG + NvU32 max_subctx_count; + } tsg; + + // This is the value that needs to be used when ringing the channel's + // doorbell + NvU32 work_submission_token; + + // This is the address of the channel's doorbell + volatile NvU32 *work_submission_offset; + + // On Turing+, the CLEAR_FAULTED method requires passing a RM-provided + // handle to identify the channel. + NvU32 clear_faulted_token; + + // Tracker used to aggregate clear faulted operations, needed for user + // channel removal + uvm_tracker_t clear_faulted_tracker; + + // Address of the NV_CHRAM_CHANNEL register. Only valid on GPUs with + // non_replayable_faults_supported && !has_clear_faulted_channel_method + volatile NvU32 *chram_channel_register; + + // Id of the SMC engine this channel is bound to, or zero if the GPU + // does not support SMC or it is a CE channel + NvU32 smc_engine_id; + + // VEIDs are partitioned under SMC (each SMC engine owns a subrange of + // VEIDs), but the VEID reported in fault packets* is the global (although + // GFID-local) VEID. In order to compute the SMC engine-local VEID, we need + // to subtract smc_engine_ve_id_offset from the reported one. + // + // *Access counter packets already report SMC engine-local VEIDs. + NvU32 smc_engine_ve_id_offset; + + // If in_subctx is true, subctx_info will point at a per-TSG data structure + // that contains the VA spaces of all the subcontexts in the TSG. This value + // is assigned in uvm_gpu_add_user_channel. + uvm_user_channel_subctx_info_t *subctx_info; + + // Number of resources reported by RM. This is the size of both the + // resources and va_ranges arrays. + size_t num_resources; + + // Array of all resources for this channel, shared or not. Virtual mappings + // for matching physical resources are shared across all channels in the + // same GPU VA space and TSG. Each channel will retain the mappings (VA + // ranges) it uses at channel register and will release them at + // uvm_user_channel_destroy_detached, so these physical resources outlive + // the corresponding VA ranges. + UvmGpuChannelResourceInfo *resources; + + // Array of all VA ranges associated with this channel. Entry i in this + // array corresponds to resource i in the resources array above and has the + // same descriptor. uvm_user_channel_detach will drop the ref counts for + // these VA ranges, potentially destroying them. + uvm_va_range_t **va_ranges; + + // Physical instance pointer. There is a 1:1 mapping between instance + // pointer and channel. GPU faults report an instance pointer, and the GPU + // fault handler converts this instance pointer into the parent + // uvm_va_space_t. + struct + { + // Physical address of the instance pointer. + uvm_gpu_phys_address_t addr; + + // Node for inserting the user channel in the parent GPU instance + // pointer table. The node will be initialized as an empty UVM RB node + // on user channel creation and will transition to not empty when + // instance_ptr -> user_channel translation has been added + // to the per-GPU UVM RB tree + uvm_rb_tree_node_t node; + } instance_ptr; + + // Opaque object which refers to this channel in the nvUvmInterface APIs + void *rm_retained_channel; + + // Hardware runlist and channel IDs, used for debugging and fault processing + NvU32 hw_runlist_id; + NvU32 hw_channel_id; + + // Node in the owning gpu_va_space's registered_channels list. Cleared once + // the channel is detached. + struct list_head list_node; + + // Boolean which is set during the window between + // nvUvmInterfaceBindChannelResources and nvUvmInterfaceStopChannel. This is + // an atomic_t because multiple threads may call nvUvmInterfaceStopChannel + // and clear this concurrently. + atomic_t is_bound; + + // Node for the deferred free list where this channel is stored upon being + // detached. + uvm_deferred_free_object_t deferred_free; + + // Reference count for this user channel. This only protects the memory + // object itself, for use in cases when user channel needs to be accessed + // across dropping and re-acquiring the VA space lock. + nv_kref_t kref; + + struct + { + bool scheduled; + + nv_kthread_q_item_t kill_channel_q_item; + + uvm_va_space_t *va_space; + + char fault_packet[UVM_GPU_MMU_MAX_FAULT_PACKET_SIZE]; + } kill_channel; +}; + +// Retains the user channel memory object. uvm_user_channel_destroy_detached and +// uvm_user_channel_release drop the count. This is used to keep the +// user channel object allocated when dropping and re-taking the VA space lock. +// If another thread called uvm_user_channel_detach in the meantime, +// user_channel->gpu_va_space will be NULL. +static inline void uvm_user_channel_retain(uvm_user_channel_t *user_channel) +{ + nv_kref_get(&user_channel->kref); +} + +// This only frees the user channel object itself, so the user channel must have +// been detached and destroyed prior to the final release. +void uvm_user_channel_release(uvm_user_channel_t *user_channel); + +// User-facing APIs (uvm_api_register_channel, uvm_api_unregister_channel) are +// declared in uvm_api.h. + +// First phase of user channel destroy which stops a user channel, forcibly if +// necessary. After calling this function no new GPU faults targeting this +// channel will arrive, but old faults may continue to be serviced. +// +// LOCKING: The owning VA space must be locked in read mode, not write mode. +void uvm_user_channel_stop(uvm_user_channel_t *user_channel); + +// Second phase of user channel destroy which detaches the channel from the +// parent gpu_va_space and adds it to the list of pending objects to be freed. +// uvm_user_channel_stop must have already been called on this channel. +// +// All virtual mappings associated with the channel are torn down. The +// user_channel object and the instance pointer and resources it contains are +// not destroyed. The caller must use uvm_user_channel_destroy_detached to do +// that. +// +// This multi-phase approach allows the caller to drop the VA space lock and +// flush the fault buffer before removing the instance pointer. See +// uvm_gpu_destroy_detached_channels. +// +// LOCKING: The owning VA space must be locked in write mode. +void uvm_user_channel_detach(uvm_user_channel_t *user_channel, struct list_head *deferred_free_list); + +// Third phase of user channel destroy which frees the user_channel object and +// releases the corresponding resources and instance pointer. The channel must +// have been detached first. +// +// LOCKING: No lock is required, but the owning GPU must be retained. +void uvm_user_channel_destroy_detached(uvm_user_channel_t *user_channel); + +#endif // __UVM_USER_CHANNEL_H__ diff --git a/kernel-open/nvidia-uvm/uvm_va_block.c b/kernel-open/nvidia-uvm/uvm_va_block.c new file mode 100644 index 000000000..6aa0c5e1d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_block.c @@ -0,0 +1,10593 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_common.h" +#include "uvm_api.h" +#include "uvm_gpu.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" +#include "uvm_va_block.h" +#include "uvm_hal_types.h" +#include "uvm_kvmalloc.h" +#include "uvm_tools.h" +#include "uvm_push.h" +#include "uvm_hal.h" +#include "uvm_perf_thrashing.h" +#include "uvm_perf_prefetch.h" +#include "uvm_mem.h" +#include "uvm_gpu_access_counters.h" +#include "uvm_va_space_mm.h" +#include "uvm_test_ioctl.h" + + + + +typedef enum +{ + BLOCK_PTE_OP_MAP, + BLOCK_PTE_OP_REVOKE, + BLOCK_PTE_OP_COUNT +} block_pte_op_t; + +static NvU64 uvm_perf_authorized_cpu_fault_tracking_window_ns = 300000; + +static struct kmem_cache *g_uvm_va_block_cache __read_mostly; +static struct kmem_cache *g_uvm_va_block_gpu_state_cache __read_mostly; +static struct kmem_cache *g_uvm_page_mask_cache __read_mostly; +static struct kmem_cache *g_uvm_va_block_context_cache __read_mostly; + +static int uvm_fault_force_sysmem __read_mostly = 0; +module_param(uvm_fault_force_sysmem, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(uvm_fault_force_sysmem, "Force (1) using sysmem storage for pages that faulted. Default: 0."); + +static int uvm_perf_map_remote_on_eviction __read_mostly = 1; +module_param(uvm_perf_map_remote_on_eviction, int, S_IRUGO); + +// Caching is always disabled for mappings to remote memory. The following two +// module parameters can be used to force caching for GPU peer/sysmem mappings. +// +// However, it is important to note that it may not be safe to enable caching +// in the general case so the enablement should only be used for experiments. +static unsigned uvm_exp_gpu_cache_peermem __read_mostly = 0; +module_param(uvm_exp_gpu_cache_peermem, uint, S_IRUGO); +MODULE_PARM_DESC(uvm_exp_gpu_cache_peermem, + "Force caching for mappings to peer memory. " + "This is an experimental parameter that may cause correctness issues if used."); + +static unsigned uvm_exp_gpu_cache_sysmem __read_mostly = 0; +module_param(uvm_exp_gpu_cache_sysmem, uint, S_IRUGO); +MODULE_PARM_DESC(uvm_exp_gpu_cache_sysmem, + "Force caching for mappings to system memory. " + "This is an experimental parameter that may cause correctness issues if used."); + +static void block_deferred_eviction_mappings_entry(void *args); + +uvm_va_space_t *uvm_va_block_get_va_space_maybe_dead(uvm_va_block_t *va_block) +{ +#if UVM_IS_CONFIG_HMM() + if (va_block->hmm.va_space) + return va_block->hmm.va_space; +#endif + + if (va_block->va_range) + return va_block->va_range->va_space; + + return NULL; +} + +uvm_va_space_t *uvm_va_block_get_va_space(uvm_va_block_t *va_block) +{ + uvm_va_space_t *va_space; + + UVM_ASSERT(!uvm_va_block_is_dead(va_block)); + + va_space = uvm_va_block_get_va_space_maybe_dead(va_block); + UVM_ASSERT(va_space); + + return va_space; +} + +static NvU64 block_gpu_pte_flag_cacheable(uvm_va_block_t *block, uvm_gpu_t *gpu, uvm_processor_id_t resident_id) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + UVM_ASSERT(UVM_ID_IS_VALID(resident_id)); + + // Local vidmem is always cached + if (uvm_id_equal(resident_id, gpu->id)) + return UVM_MMU_PTE_FLAGS_CACHED; + + if (UVM_ID_IS_CPU(resident_id)) + return uvm_exp_gpu_cache_sysmem == 0 ? UVM_MMU_PTE_FLAGS_NONE : UVM_MMU_PTE_FLAGS_CACHED; + + UVM_ASSERT(uvm_processor_mask_test(&va_space->can_access[uvm_id_value(gpu->id)], resident_id)); + + return uvm_exp_gpu_cache_peermem == 0 ? UVM_MMU_PTE_FLAGS_NONE : UVM_MMU_PTE_FLAGS_CACHED; +} + +static uvm_gpu_t *block_get_gpu(uvm_va_block_t *block, uvm_gpu_id_t gpu_id) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + return uvm_va_space_get_gpu(va_space, gpu_id); +} + +static const char *block_processor_name(uvm_va_block_t *block, uvm_processor_id_t id) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + return uvm_va_space_processor_name(va_space, id); +} + +static bool block_processor_has_memory(uvm_va_block_t *block, uvm_processor_id_t id) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + return uvm_va_space_processor_has_memory(va_space, id); +} + +static bool is_uvm_fault_force_sysmem_set(void) +{ + // Only enforce this during testing + return uvm_enable_builtin_tests && uvm_fault_force_sysmem != 0; +} + +static bool va_space_map_remote_on_eviction(uvm_va_space_t *va_space) +{ + return uvm_perf_map_remote_on_eviction && + uvm_va_space_has_access_counter_migrations(va_space); +} + +static const uvm_processor_mask_t *block_get_uvm_lite_gpus(uvm_va_block_t *va_block) +{ + // Note that for HMM we always return a pointer to a zero bitmap + // (not allocated on the stack) since uvm_lite GPUs are not supported. + static const uvm_processor_mask_t uvm_lite_gpus = {}; + + if (uvm_va_block_is_hmm(va_block)) + return &uvm_lite_gpus; + else + return &va_block->va_range->uvm_lite_gpus; +} + +void uvm_va_block_retry_init(uvm_va_block_retry_t *retry) +{ + if (!retry) + return; + + uvm_tracker_init(&retry->tracker); + INIT_LIST_HEAD(&retry->used_chunks); + INIT_LIST_HEAD(&retry->free_chunks); +} + +static bool block_verify_cpu_chunks(uvm_va_block_t *block) +{ + uvm_cpu_chunk_t *chunk; + size_t alloced_pages = 0; + NvU64 tracking_virt_addr = block->start; + uvm_page_mask_t region_mask; + uvm_page_index_t page_index; + uvm_va_block_region_t block_region = uvm_va_block_region_from_block(block); + + for_each_cpu_chunk_in_block(chunk, page_index, block) { + NvU64 chunk_virt_addr = uvm_va_block_cpu_page_address(block, page_index); + size_t num_chunk_pages = uvm_cpu_chunk_num_pages(chunk); + uvm_page_index_t chunk_page; + + UVM_ASSERT(tracking_virt_addr <= chunk_virt_addr); + if (tracking_virt_addr > chunk_virt_addr) + return false; + + UVM_ASSERT(uvm_va_block_contains_address(block, chunk_virt_addr)); + if (!uvm_va_block_contains_address(block, chunk_virt_addr)) + return false; + + alloced_pages += uvm_cpu_chunk_num_pages(chunk); + uvm_page_mask_init_from_region(®ion_mask, + uvm_va_block_region(page_index, page_index + uvm_cpu_chunk_num_pages(chunk)), + NULL); + UVM_ASSERT(uvm_page_mask_intersects(&block->cpu.allocated, ®ion_mask)); + if (!uvm_page_mask_intersects(&block->cpu.allocated, ®ion_mask)) + return false; + + tracking_virt_addr = chunk_virt_addr; + + for (chunk_page = page_index; chunk_page < page_index + num_chunk_pages; chunk_page++) { + UVM_ASSERT(uvm_cpu_chunk_get_chunk_for_page(block, chunk_page) == chunk); + if (uvm_cpu_chunk_get_chunk_for_page(block, chunk_page) != chunk) + return false; + } + } + + UVM_ASSERT(alloced_pages == uvm_page_mask_weight(&block->cpu.allocated)); + if (alloced_pages != uvm_page_mask_weight(&block->cpu.allocated)) + return false; + + for_each_va_block_page_in_region_mask(page_index, &block->cpu.allocated, block_region) { + uvm_cpu_chunk_t *next; + uvm_page_index_t next_page_index; + + chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index); + UVM_ASSERT(chunk); + if (!chunk) + return false; + + next_page_index = uvm_va_block_next_page_in_mask(block_region, + &block->cpu.allocated, + page_index + uvm_cpu_chunk_num_pages(chunk) - 1); + next = uvm_cpu_chunk_next(block, &next_page_index); + + if (next_page_index < block_region.outer) { + UVM_ASSERT(next && + uvm_va_block_cpu_page_address(block, page_index) + uvm_cpu_chunk_get_size(chunk) <= + uvm_va_block_cpu_page_address(block, next_page_index)); + if (!next || + (uvm_va_block_cpu_page_address(block, page_index) + uvm_cpu_chunk_get_size(chunk) > + uvm_va_block_cpu_page_address(block, next_page_index))) + return false; + } + else { + UVM_ASSERT(next == NULL); + if (next != NULL) + return false; + } + } + + return true; +} + +// Frees any left-over free chunks and unpins all the used chunks +void uvm_va_block_retry_deinit(uvm_va_block_retry_t *retry, uvm_va_block_t *va_block) +{ + uvm_gpu_t *gpu; + uvm_gpu_chunk_t *gpu_chunk; + uvm_gpu_chunk_t *next_chunk; + + if (!retry) + return; + + uvm_tracker_deinit(&retry->tracker); + + // Free any unused chunks + list_for_each_entry_safe(gpu_chunk, next_chunk, &retry->free_chunks, list) { + list_del_init(&gpu_chunk->list); + gpu = uvm_gpu_chunk_get_gpu(gpu_chunk); + uvm_pmm_gpu_free(&gpu->pmm, gpu_chunk, NULL); + } + + // Unpin all the used chunks now that we are done + list_for_each_entry_safe(gpu_chunk, next_chunk, &retry->used_chunks, list) { + list_del_init(&gpu_chunk->list); + gpu = uvm_gpu_chunk_get_gpu(gpu_chunk); + uvm_pmm_gpu_unpin_temp(&gpu->pmm, gpu_chunk, va_block); + } +} + +static void block_retry_add_free_chunk(uvm_va_block_retry_t *retry, uvm_gpu_chunk_t *gpu_chunk) +{ + list_add_tail(&gpu_chunk->list, &retry->free_chunks); +} + +static void block_retry_add_used_chunk(uvm_va_block_retry_t *retry, uvm_gpu_chunk_t *gpu_chunk) +{ + list_add_tail(&gpu_chunk->list, &retry->used_chunks); +} + +static uvm_gpu_chunk_t *block_retry_get_free_chunk(uvm_va_block_retry_t *retry, uvm_gpu_t *gpu, uvm_chunk_size_t size) +{ + uvm_gpu_chunk_t *gpu_chunk; + + list_for_each_entry(gpu_chunk, &retry->free_chunks, list) { + if (uvm_gpu_chunk_get_gpu(gpu_chunk) == gpu && uvm_gpu_chunk_get_size(gpu_chunk) == size) { + list_del_init(&gpu_chunk->list); + return gpu_chunk; + } + } + + return NULL; +} + +// Encapsulates a reference to a physical page belonging to a specific processor +// within a VA block. +typedef struct +{ + // Processor the page is on + uvm_processor_id_t processor; + + // The page index + uvm_page_index_t page_index; +} block_phys_page_t; + +static block_phys_page_t block_phys_page(uvm_processor_id_t processor, uvm_page_index_t page_index) +{ + return (block_phys_page_t){ processor, page_index }; +} + +NV_STATUS uvm_va_block_init(void) +{ + if (uvm_enable_builtin_tests) + g_uvm_va_block_cache = NV_KMEM_CACHE_CREATE("uvm_va_block_wrapper_t", uvm_va_block_wrapper_t); + else + g_uvm_va_block_cache = NV_KMEM_CACHE_CREATE("uvm_va_block_t", uvm_va_block_t); + + if (!g_uvm_va_block_cache) + return NV_ERR_NO_MEMORY; + + g_uvm_va_block_gpu_state_cache = NV_KMEM_CACHE_CREATE("uvm_va_block_gpu_state_t", uvm_va_block_gpu_state_t); + if (!g_uvm_va_block_gpu_state_cache) + return NV_ERR_NO_MEMORY; + + g_uvm_page_mask_cache = NV_KMEM_CACHE_CREATE("uvm_page_mask_t", uvm_page_mask_t); + if (!g_uvm_page_mask_cache) + return NV_ERR_NO_MEMORY; + + g_uvm_va_block_context_cache = NV_KMEM_CACHE_CREATE("uvm_va_block_context_t", uvm_va_block_context_t); + if (!g_uvm_va_block_context_cache) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +void uvm_va_block_exit(void) +{ + kmem_cache_destroy_safe(&g_uvm_va_block_context_cache); + kmem_cache_destroy_safe(&g_uvm_page_mask_cache); + kmem_cache_destroy_safe(&g_uvm_va_block_gpu_state_cache); + kmem_cache_destroy_safe(&g_uvm_va_block_cache); +} + +uvm_va_block_context_t *uvm_va_block_context_alloc(struct mm_struct *mm) +{ + uvm_va_block_context_t *block_context = kmem_cache_alloc(g_uvm_va_block_context_cache, NV_UVM_GFP_FLAGS); + if (block_context) + uvm_va_block_context_init(block_context, mm); + + return block_context; +} + +void uvm_va_block_context_free(uvm_va_block_context_t *va_block_context) +{ + if (va_block_context) + kmem_cache_free(g_uvm_va_block_context_cache, va_block_context); +} + +// Convert from page_index to chunk_index. The goal is for each system page in +// the region [start, start + size) to be covered by the largest naturally- +// aligned user chunk size. +size_t uvm_va_block_gpu_chunk_index_range(NvU64 start, + NvU64 size, + uvm_gpu_t *gpu, + uvm_page_index_t page_index, + uvm_chunk_size_t *out_chunk_size) +{ + uvm_chunk_sizes_mask_t chunk_sizes = gpu->parent->mmu_user_chunk_sizes; + uvm_chunk_size_t chunk_size, final_chunk_size; + size_t num_chunks, num_chunks_total; + NvU64 addr, end, aligned_start, aligned_addr, aligned_end, temp_size; + + UVM_ASSERT(PAGE_ALIGNED(start)); + UVM_ASSERT(PAGE_ALIGNED(size)); + UVM_ASSERT(size > 0); + UVM_ASSERT(size <= UVM_CHUNK_SIZE_2M); + UVM_ASSERT(UVM_ALIGN_DOWN(start, UVM_CHUNK_SIZE_2M) == UVM_ALIGN_DOWN(start + size - 1, UVM_CHUNK_SIZE_2M)); + BUILD_BUG_ON(UVM_VA_BLOCK_SIZE != UVM_CHUNK_SIZE_2M); + + // PAGE_SIZE needs to be the lowest natively-supported chunk size in the + // mask, since we never deal with chunk sizes smaller than that (although we + // may have PTEs mapping pages smaller than that). + UVM_ASSERT(uvm_chunk_find_first_size(chunk_sizes) == PAGE_SIZE); + + // Optimize the ideal Pascal+ case: the whole block is covered by a single + // 2M page. + if ((chunk_sizes & UVM_CHUNK_SIZE_2M) && size == UVM_CHUNK_SIZE_2M) { + UVM_ASSERT(IS_ALIGNED(start, UVM_CHUNK_SIZE_2M)); + final_chunk_size = UVM_CHUNK_SIZE_2M; + num_chunks_total = 0; + goto out; + } + + // Only one 2M chunk can fit within a VA block on any GPU architecture, so + // remove that size from consideration. + chunk_sizes &= ~UVM_CHUNK_SIZE_2M; + + // Next common case: the whole block is aligned and sized to perfectly fit + // the largest page size. + // + // TODO: Bug 1750144: This might not be the common case for HMM. Verify that + // this helps performance more than it hurts. + final_chunk_size = uvm_chunk_find_last_size(chunk_sizes); + if (IS_ALIGNED(start, final_chunk_size) && IS_ALIGNED(size, final_chunk_size)) { + num_chunks_total = (size_t)uvm_div_pow2_64(page_index * PAGE_SIZE, final_chunk_size); + goto out; + } + + // We didn't hit our special paths. Do it the hard way. + + num_chunks_total = 0; + addr = start + page_index * PAGE_SIZE; + end = start + size; + final_chunk_size = 0; + UVM_ASSERT(addr < end); + + // The below loop collapses almost completely when chunk_size == PAGE_SIZE + // since in that lowest-common-denominator case everything is already + // aligned. Skip it and handle that specially after the loop. + // + // Note that since we removed 2M already above, this loop will only iterate + // once on x86 Pascal+ since only 64K is left. + chunk_sizes &= ~PAGE_SIZE; + + // This loop calculates the number of chunks between start and addr by + // calculating the number of whole chunks of each size between them, + // starting with the largest allowed chunk size. This requires fewer + // iterations than if we began from start and kept calculating the next + // larger chunk size boundary. + for_each_chunk_size_rev(chunk_size, chunk_sizes) { + aligned_start = UVM_ALIGN_UP(start, chunk_size); + aligned_addr = UVM_ALIGN_DOWN(addr, chunk_size); + aligned_end = UVM_ALIGN_DOWN(end, chunk_size); + + // If addr and start are within the same chunk, try smaller + if (aligned_start > aligned_addr) + continue; + + // If addr and end are not in the same chunk, then addr is covered by a + // single chunk of the current size. Ignore smaller boundaries between + // addr and aligned_addr. + if (aligned_addr < aligned_end && final_chunk_size == 0) { + addr = aligned_addr; + final_chunk_size = chunk_size; + } + + // How many chunks of this size are between start and addr? Note that + // this might be 0 since aligned_addr and aligned_start could be in the + // same chunk. + num_chunks = uvm_div_pow2_32(((NvU32)aligned_addr - aligned_start), chunk_size); + num_chunks_total += num_chunks; + + // We've already accounted for these chunks, so "remove" them by + // bringing start, addr, and end closer together to calculate the + // remaining chunk sizes. + temp_size = num_chunks * chunk_size; + addr -= temp_size; + end -= temp_size; + + // Once there's no separation between addr and start, and we've + // successfully found the right chunk size when taking end into account, + // we're done. + if (addr == start && final_chunk_size) + break; + } + + // Handle PAGE_SIZE cleanup since we skipped it in the loop + num_chunks_total += (addr - start) / PAGE_SIZE; + if (final_chunk_size == 0) + final_chunk_size = PAGE_SIZE; + +out: + if (out_chunk_size) + *out_chunk_size = final_chunk_size; + + return num_chunks_total; +} + +static size_t block_gpu_chunk_index(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_page_index_t page_index, + uvm_chunk_size_t *out_chunk_size) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_chunk_size_t size; + uvm_gpu_chunk_t *chunk; + + size_t index = uvm_va_block_gpu_chunk_index_range(block->start, uvm_va_block_size(block), gpu, page_index, &size); + + UVM_ASSERT(size >= PAGE_SIZE); + + if (gpu_state) { + UVM_ASSERT(gpu_state->chunks); + chunk = gpu_state->chunks[index]; + if (chunk) { + UVM_ASSERT(uvm_gpu_chunk_get_size(chunk) == size); + UVM_ASSERT(chunk->state != UVM_PMM_GPU_CHUNK_STATE_PMA_OWNED); + UVM_ASSERT(chunk->state != UVM_PMM_GPU_CHUNK_STATE_FREE); + } + } + + if (out_chunk_size) + *out_chunk_size = size; + + return index; +} + +// Compute the size of the chunk known to start at start_page_index +static uvm_chunk_size_t block_gpu_chunk_size(uvm_va_block_t *block, uvm_gpu_t *gpu, uvm_page_index_t start_page_index) +{ + uvm_chunk_sizes_mask_t chunk_sizes = gpu->parent->mmu_user_chunk_sizes; + uvm_chunk_sizes_mask_t start_alignments, pow2_leq_size, allowed_sizes; + NvU64 start = uvm_va_block_cpu_page_address(block, start_page_index); + NvU64 size = block->end - start + 1; + + // Create a mask of all sizes for which start is aligned. x ^ (x-1) yields a + // mask of the rightmost 1 bit in x, as well as all trailing 0 bits in x. + // Example: 1011000 -> 0001111 + start_alignments = (uvm_chunk_sizes_mask_t)(start ^ (start - 1)); + + // Next, compute all sizes (powers of two) which are <= size. + pow2_leq_size = (uvm_chunk_sizes_mask_t)rounddown_pow_of_two(size); + pow2_leq_size |= pow2_leq_size - 1; + + // Now and them all together to get our list of GPU-supported chunk sizes + // which are aligned to start and will fit within size. + allowed_sizes = chunk_sizes & start_alignments & pow2_leq_size; + + // start and size must always be aligned to at least the smallest supported + // chunk size (PAGE_SIZE). + UVM_ASSERT(allowed_sizes >= PAGE_SIZE); + + // Take the largest allowed size + return uvm_chunk_find_last_size(allowed_sizes); +} + +static size_t block_num_gpu_chunks(uvm_va_block_t *block, uvm_gpu_t *gpu) +{ + return block_gpu_chunk_index(block, gpu, uvm_va_block_cpu_page_index(block, block->end), NULL) + 1; +} + +static size_t block_num_gpu_chunks_range(NvU64 start, NvU64 size, uvm_gpu_t *gpu) +{ + uvm_page_index_t last_page_index = (size_t)((size / PAGE_SIZE) - 1); + return uvm_va_block_gpu_chunk_index_range(start, size, gpu, last_page_index, NULL) + 1; +} + +uvm_gpu_chunk_t *uvm_va_block_lookup_gpu_chunk(uvm_va_block_t *va_block, uvm_gpu_t *gpu, NvU64 address) +{ + size_t chunk_index; + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, gpu->id); + uvm_page_index_t page_index = uvm_va_block_cpu_page_index(va_block, address); + + uvm_assert_mutex_locked(&va_block->lock); + + if (!gpu_state) + return NULL; + + chunk_index = block_gpu_chunk_index(va_block, gpu, page_index, NULL); + + return gpu_state->chunks[chunk_index]; +} + +NV_STATUS uvm_va_block_create(uvm_va_range_t *va_range, + NvU64 start, + NvU64 end, + uvm_va_block_t **out_block) +{ + uvm_va_block_t *block = NULL; + NvU64 size = end - start + 1; + NV_STATUS status; + + UVM_ASSERT(PAGE_ALIGNED(start)); + UVM_ASSERT(PAGE_ALIGNED(end + 1)); + UVM_ASSERT(PAGE_ALIGNED(size)); + UVM_ASSERT(size > 0); + UVM_ASSERT(size <= UVM_VA_BLOCK_SIZE); + + if (va_range) { + // Create a UVM managed va_block. + UVM_ASSERT(start >= va_range->node.start); + UVM_ASSERT(end <= va_range->node.end); + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + } + + // Blocks can't span a block alignment boundary + UVM_ASSERT(UVM_VA_BLOCK_ALIGN_DOWN(start) == UVM_VA_BLOCK_ALIGN_DOWN(end)); + + if (uvm_enable_builtin_tests) { + uvm_va_block_wrapper_t *block_wrapper = nv_kmem_cache_zalloc(g_uvm_va_block_cache, NV_UVM_GFP_FLAGS); + + if (block_wrapper) + block = &block_wrapper->block; + } + else { + block = nv_kmem_cache_zalloc(g_uvm_va_block_cache, NV_UVM_GFP_FLAGS); + } + + if (!block) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + nv_kref_init(&block->kref); + uvm_mutex_init(&block->lock, UVM_LOCK_ORDER_VA_BLOCK); + block->start = start; + block->end = end; + block->va_range = va_range; + uvm_tracker_init(&block->tracker); + + nv_kthread_q_item_init(&block->eviction_mappings_q_item, block_deferred_eviction_mappings_entry, block); + + *out_block = block; + return NV_OK; + +error: + uvm_va_block_release(block); + return status; +} + +static void block_gpu_unmap_phys_all_cpu_pages(uvm_va_block_t *block, uvm_gpu_t *gpu) +{ + uvm_cpu_chunk_t *chunk; + uvm_page_index_t page_index; + + for_each_cpu_chunk_in_block(chunk, page_index, block) { + NvU64 gpu_mapping_addr; + + UVM_ASSERT(chunk); + gpu_mapping_addr = uvm_cpu_chunk_get_gpu_mapping_addr(block, page_index, chunk, gpu->id); + if (gpu_mapping_addr != 0) { + uvm_pmm_sysmem_mappings_remove_gpu_mapping(&gpu->pmm_reverse_sysmem_mappings, gpu_mapping_addr); + uvm_gpu_unmap_cpu_pages(gpu, gpu_mapping_addr, uvm_cpu_chunk_get_size(chunk)); + uvm_cpu_chunk_set_gpu_mapping_addr(block, page_index, chunk, gpu->id, 0); + } + } +} + +static NV_STATUS block_gpu_map_phys_all_cpu_pages(uvm_va_block_t *block, uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_cpu_chunk_t *chunk; + NvU64 block_mapping_size = uvm_va_block_size(block); + uvm_page_index_t page_index; + + UVM_ASSERT(IS_ALIGNED(block_mapping_size, UVM_PAGE_SIZE_4K)); + + for_each_cpu_chunk_in_block(chunk, page_index, block) { + uvm_chunk_size_t chunk_size = uvm_cpu_chunk_get_size(chunk); + NvU64 gpu_mapping_addr = uvm_cpu_chunk_get_gpu_mapping_addr(block, page_index, chunk, gpu->id); + + UVM_ASSERT_MSG(gpu_mapping_addr == 0, "GPU%u DMA address 0x%llx\n", uvm_id_value(gpu->id), gpu_mapping_addr); + + status = uvm_gpu_map_cpu_pages(gpu, + uvm_cpu_chunk_get_cpu_page(block, chunk, page_index), + chunk_size, + &gpu_mapping_addr); + if (status != NV_OK) + goto error; + + uvm_cpu_chunk_set_gpu_mapping_addr(block, page_index, chunk, gpu->id, gpu_mapping_addr); + + // In some configurations such as SR-IOV heavy, the chunk cannot be + // referenced using its physical address. Create a kernel mapping. + status = uvm_mmu_sysmem_map(gpu, gpu_mapping_addr, chunk_size); + if (status != NV_OK) + goto error; + + status = uvm_pmm_sysmem_mappings_add_gpu_mapping(&gpu->pmm_reverse_sysmem_mappings, + uvm_cpu_chunk_get_gpu_mapping_addr(block, + page_index, + chunk, + gpu->id), + uvm_va_block_cpu_page_address(block, page_index), + chunk_size, + block, + UVM_ID_CPU); + if (status != NV_OK) + goto error; + } + + return NV_OK; + +error: + block_gpu_unmap_phys_all_cpu_pages(block, gpu); + return status; +} + +static NV_STATUS block_sysmem_mappings_add_gpu_chunk(uvm_va_block_t *block, + uvm_gpu_t *local_gpu, + uvm_gpu_chunk_t *chunk, + uvm_gpu_t *accessing_gpu) +{ + NvU64 peer_addr = uvm_pmm_gpu_indirect_peer_addr(&local_gpu->pmm, chunk, accessing_gpu); + return uvm_pmm_sysmem_mappings_add_gpu_chunk_mapping(&accessing_gpu->pmm_reverse_sysmem_mappings, + peer_addr, + block->start + chunk->va_block_page_index * PAGE_SIZE, + uvm_gpu_chunk_get_size(chunk), + block, + local_gpu->id); +} + +static void block_sysmem_mappings_remove_gpu_chunk(uvm_gpu_t *local_gpu, + uvm_gpu_chunk_t *chunk, + uvm_gpu_t *accessing_gpu) +{ + NvU64 peer_addr = uvm_pmm_gpu_indirect_peer_addr(&local_gpu->pmm, chunk, accessing_gpu); + uvm_pmm_sysmem_mappings_remove_gpu_chunk_mapping(&accessing_gpu->pmm_reverse_sysmem_mappings, peer_addr); +} + +static NV_STATUS block_gpu_map_all_chunks_indirect_peer(uvm_va_block_t *block, + uvm_gpu_t *local_gpu, + uvm_gpu_t *accessing_gpu) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, local_gpu->id); + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + size_t num_chunks, i; + NV_STATUS status; + + UVM_ASSERT(uvm_processor_mask_test(&va_space->indirect_peers[uvm_id_value(local_gpu->id)], + accessing_gpu->id)); + + // If no chunks are allocated currently, the mappings will be created later + // at chunk allocation. + if (!gpu_state || !gpu_state->chunks) + return NV_OK; + + num_chunks = block_num_gpu_chunks(block, local_gpu); + for (i = 0; i < num_chunks; i++) { + uvm_gpu_chunk_t *chunk = gpu_state->chunks[i]; + if (!chunk) + continue; + + status = uvm_pmm_gpu_indirect_peer_map(&local_gpu->pmm, chunk, accessing_gpu); + if (status != NV_OK) + goto error; + + status = block_sysmem_mappings_add_gpu_chunk(block, local_gpu, chunk, accessing_gpu); + if (status != NV_OK) + goto error; + } + + return NV_OK; + +error: + while (i-- > 0) { + uvm_gpu_chunk_t *chunk = gpu_state->chunks[i]; + if (chunk) { + // Indirect peer mappings are removed lazily by PMM, so if an error + // occurs the mappings established above will be removed when the + // chunk is freed later on. We only need to remove the sysmem + // reverse mappings. + block_sysmem_mappings_remove_gpu_chunk(local_gpu, chunk, accessing_gpu); + } + } + + return status; +} + +// Mappings for indirect peers are removed lazily by PMM, but we need to remove +// the entries from the reverse map. +static void block_gpu_unmap_all_chunks_indirect_peer(uvm_va_block_t *block, + uvm_gpu_t *local_gpu, + uvm_gpu_t *accessing_gpu) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, local_gpu->id); + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + size_t num_chunks, i; + + UVM_ASSERT(uvm_processor_mask_test(&va_space->indirect_peers[uvm_id_value(local_gpu->id)], + accessing_gpu->id)); + + // Exit if no chunks are allocated currently. + if (!gpu_state || !gpu_state->chunks) + return; + + num_chunks = block_num_gpu_chunks(block, local_gpu); + for (i = 0; i < num_chunks; i++) { + uvm_gpu_chunk_t *chunk = gpu_state->chunks[i]; + if (chunk) + block_sysmem_mappings_remove_gpu_chunk(local_gpu, chunk, accessing_gpu); + } +} + +// Retrieves the gpu_state for the given GPU, allocating it if it doesn't exist +static uvm_va_block_gpu_state_t *block_gpu_state_get_alloc(uvm_va_block_t *block, uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + + if (gpu_state) + return gpu_state; + + gpu_state = nv_kmem_cache_zalloc(g_uvm_va_block_gpu_state_cache, NV_UVM_GFP_FLAGS); + if (!gpu_state) + return NULL; + + gpu_state->chunks = uvm_kvmalloc_zero(block_num_gpu_chunks(block, gpu) * sizeof(gpu_state->chunks[0])); + if (!gpu_state->chunks) + goto error; + + block->gpus[uvm_id_gpu_index(gpu->id)] = gpu_state; + + status = uvm_cpu_chunk_gpu_mapping_alloc(block, gpu->id); + if (status != NV_OK) + goto error; + + status = block_gpu_map_phys_all_cpu_pages(block, gpu); + if (status != NV_OK) + goto error; + + return gpu_state; + +error: + if (gpu_state) { + if (gpu_state->chunks) + uvm_kvfree(gpu_state->chunks); + uvm_cpu_chunk_gpu_mapping_free(block, gpu->id); + kmem_cache_free(g_uvm_va_block_gpu_state_cache, gpu_state); + } + block->gpus[uvm_id_gpu_index(gpu->id)] = NULL; + + return NULL; +} + +static void block_unmap_cpu_chunk_on_gpus(uvm_va_block_t *block, uvm_cpu_chunk_t *chunk, uvm_page_index_t page_index) +{ + uvm_gpu_id_t id; + + for_each_gpu_id(id) { + NvU64 gpu_mapping_addr; + uvm_gpu_t *gpu; + + if (!uvm_va_block_gpu_state_get(block, id)) + continue; + + gpu_mapping_addr = uvm_cpu_chunk_get_gpu_mapping_addr(block, page_index, chunk, id); + if (gpu_mapping_addr == 0) + continue; + + gpu = block_get_gpu(block, id); + uvm_pmm_sysmem_mappings_remove_gpu_mapping(&gpu->pmm_reverse_sysmem_mappings, gpu_mapping_addr); + uvm_gpu_unmap_cpu_pages(gpu, gpu_mapping_addr, uvm_cpu_chunk_get_size(chunk)); + uvm_cpu_chunk_set_gpu_mapping_addr(block, page_index, chunk, id, 0); + } +} + +static NV_STATUS block_map_cpu_chunk_on_gpus(uvm_va_block_t *block, uvm_page_index_t page_index) +{ + NV_STATUS status; + uvm_gpu_id_t id; + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index); + uvm_chunk_size_t chunk_size = uvm_cpu_chunk_get_size(chunk); + uvm_va_block_region_t chunk_region = uvm_va_block_chunk_region(block, chunk_size, page_index); + + UVM_ASSERT(chunk); + + // We can't iterate over va_space->registered_gpus because we might be + // on the eviction path, which does not have the VA space lock held. We have + // the VA block lock held however, so the gpu_states can't change. + uvm_assert_mutex_locked(&block->lock); + + // Only physical chunks can be mapped. + UVM_ASSERT(uvm_cpu_chunk_is_physical(chunk)); + + for_each_gpu_id(id) { + NvU64 gpu_mapping_addr; + uvm_gpu_t *gpu; + + if (!uvm_va_block_gpu_state_get(block, id)) + continue; + + gpu_mapping_addr = uvm_cpu_chunk_get_gpu_mapping_addr(block, page_index, chunk, id); + UVM_ASSERT_MSG(gpu_mapping_addr == 0, "GPU%u DMA address 0x%llx\n", uvm_id_value(id), gpu_mapping_addr); + + gpu = block_get_gpu(block, id); + status = uvm_gpu_map_cpu_pages(gpu, + uvm_cpu_chunk_get_cpu_page(block, chunk, chunk_region.first), + chunk_size, + &gpu_mapping_addr); + if (status != NV_OK) + goto error; + + uvm_cpu_chunk_set_gpu_mapping_addr(block, chunk_region.first, chunk, id, gpu_mapping_addr); + + // In some configurations such as SR-IOV heavy, the chunk cannot be + // referenced using its physical address. Create a kernel mapping. + status = uvm_mmu_sysmem_map(gpu, gpu_mapping_addr, chunk_size); + if (status != NV_OK) + goto error; + + status = uvm_pmm_sysmem_mappings_add_gpu_mapping(&gpu->pmm_reverse_sysmem_mappings, + uvm_cpu_chunk_get_gpu_mapping_addr(block, + chunk_region.first, + chunk, + id), + uvm_va_block_cpu_page_address(block, chunk_region.first), + chunk_size, + block, + UVM_ID_CPU); + if (status != NV_OK) + goto error; + } + + return NV_OK; + +error: + block_unmap_cpu_chunk_on_gpus(block, chunk, page_index); + return status; +} + +// Create physical mappings to allow other GPUs to access this chunk. +static NV_STATUS block_map_indirect_peers_to_gpu_chunk(uvm_va_block_t *block, uvm_gpu_t *gpu, uvm_gpu_chunk_t *chunk) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + uvm_gpu_t *accessing_gpu, *remove_gpu; + NV_STATUS status; + + // Unlike block_map_cpu_chunk_on_gpus, this function isn't called on the + // eviction path, so we can assume that the VA space is locked. + // + // TODO: Bug 2007346: In the future we may want to enable eviction to peers, + // meaning we may need to allocate peer memory and map it on the + // eviction path. That will require making sure that peers can't be + // enabled or disabled either in the VA space or globally within this + // function. + uvm_assert_rwsem_locked(&va_space->lock); + uvm_assert_mutex_locked(&block->lock); + + for_each_va_space_gpu_in_mask(accessing_gpu, va_space, &va_space->indirect_peers[uvm_id_value(gpu->id)]) { + status = uvm_pmm_gpu_indirect_peer_map(&gpu->pmm, chunk, accessing_gpu); + if (status != NV_OK) + goto error; + + status = block_sysmem_mappings_add_gpu_chunk(block, gpu, chunk, accessing_gpu); + if (status != NV_OK) + goto error; + } + + return NV_OK; + +error: + for_each_va_space_gpu_in_mask(remove_gpu, va_space, &va_space->indirect_peers[uvm_id_value(gpu->id)]) { + if (remove_gpu == accessing_gpu) + break; + + // Indirect peer mappings are removed lazily by PMM, so if an error + // occurs the mappings established above will be removed when the + // chunk is freed later on. We only need to remove the sysmem + // reverse mappings. + block_sysmem_mappings_remove_gpu_chunk(gpu, chunk, remove_gpu); + } + + return status; +} + +static void block_unmap_indirect_peers_from_gpu_chunk(uvm_va_block_t *block, uvm_gpu_t *gpu, uvm_gpu_chunk_t *chunk) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + uvm_gpu_t *peer_gpu; + + uvm_assert_rwsem_locked(&va_space->lock); + uvm_assert_mutex_locked(&block->lock); + + // Indirect peer mappings are removed lazily by PMM, so we only need to + // remove the sysmem reverse mappings. + for_each_va_space_gpu_in_mask(peer_gpu, va_space, &va_space->indirect_peers[uvm_id_value(gpu->id)]) + block_sysmem_mappings_remove_gpu_chunk(gpu, chunk, peer_gpu); +} + +// Mark a CPU page as dirty. +static void block_mark_cpu_page_dirty(uvm_va_block_t *block, uvm_page_index_t page_index) +{ + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index); + uvm_cpu_chunk_mark_dirty(chunk, page_index); +} + +// Mark a CPU page as clean. +static void block_mark_cpu_page_clean(uvm_va_block_t *block, uvm_page_index_t page_index) +{ + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index); + uvm_cpu_chunk_mark_clean(chunk, page_index); +} + +// Check if a CPU page is dirty. +static bool block_cpu_page_is_dirty(uvm_va_block_t *block, uvm_page_index_t page_index) +{ + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index); + return uvm_cpu_chunk_is_dirty(chunk, page_index); +} + +// Allocates the input page in the block, if it doesn't already exist +// +// Also maps the page for physical access by all GPUs used by the block, which +// is required for IOMMU support. +// +// TODO: Bug 1995015: Optimize this function and its callers to avoid calling for +// each page index. +static NV_STATUS block_populate_page_cpu(uvm_va_block_t *block, uvm_page_index_t page_index, struct mm_struct *mm) +{ + NV_STATUS status; + uvm_cpu_chunk_t *chunk = NULL; + uvm_va_block_test_t *block_test = uvm_va_block_get_test(block); + + if (uvm_page_mask_test(&block->cpu.allocated, page_index)) + return NV_OK; + + UVM_ASSERT(!uvm_page_mask_test(&block->cpu.resident, page_index)); + + // Return out of memory error if the tests have requested it. As opposed to + // other error injection settings, this one is persistent. + if (block_test && block_test->inject_cpu_pages_allocation_error) + return NV_ERR_NO_MEMORY; + + status = uvm_cpu_chunk_alloc(block, page_index, mm, &chunk); + if (status != NV_OK) + goto error; + + status = block_map_cpu_chunk_on_gpus(block, page_index); + +error: + if (status != NV_OK && chunk) { + uvm_cpu_chunk_remove_from_block(block, chunk, page_index); + uvm_cpu_chunk_put(chunk); + } + + return status; +} + +// Try allocating a chunk. If eviction was required, +// NV_ERR_MORE_PROCESSING_REQUIRED will be returned since the block's lock was +// unlocked and relocked. The caller is responsible for adding the chunk to the +// retry used_chunks list. +static NV_STATUS block_alloc_gpu_chunk(uvm_va_block_t *block, + uvm_va_block_retry_t *retry, + uvm_gpu_t *gpu, + uvm_chunk_size_t size, + uvm_gpu_chunk_t **out_gpu_chunk) +{ + NV_STATUS status = NV_OK; + uvm_gpu_chunk_t *gpu_chunk; + + // First try getting a free chunk from previously-made allocations. + gpu_chunk = block_retry_get_free_chunk(retry, gpu, size); + if (!gpu_chunk) { + uvm_va_block_test_t *block_test = uvm_va_block_get_test(block); + if (block_test && block_test->user_pages_allocation_retry_force_count > 0) { + // Force eviction by pretending the allocation failed with no memory + --block_test->user_pages_allocation_retry_force_count; + status = NV_ERR_NO_MEMORY; + } + else { + // Try allocating a new one without eviction + status = uvm_pmm_gpu_alloc_user(&gpu->pmm, 1, size, UVM_PMM_ALLOC_FLAGS_NONE, &gpu_chunk, &retry->tracker); + } + + if (status == NV_ERR_NO_MEMORY) { + // If that fails with no memory, try allocating with eviction and + // return back to the caller immediately so that the operation can + // be restarted. + uvm_mutex_unlock(&block->lock); + + status = uvm_pmm_gpu_alloc_user(&gpu->pmm, 1, size, UVM_PMM_ALLOC_FLAGS_EVICT, &gpu_chunk, &retry->tracker); + if (status == NV_OK) { + block_retry_add_free_chunk(retry, gpu_chunk); + status = NV_ERR_MORE_PROCESSING_REQUIRED; + } + + uvm_mutex_lock(&block->lock); + return status; + } + else if (status != NV_OK) { + return status; + } + } + + *out_gpu_chunk = gpu_chunk; + return NV_OK; +} + +static bool block_gpu_has_page_tables(uvm_va_block_t *block, uvm_gpu_t *gpu) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + + if (!gpu_state) + return false; + + return gpu_state->page_table_range_4k.table || + gpu_state->page_table_range_big.table || + gpu_state->page_table_range_2m.table; +} + +// A helper to get a known-to-be-present GPU VA space given a VA block that's +// locked. In order to use this function, the caller must know that at least one +// of these conditions is true: +// +// 1) The VA space lock is held +// 2) The VA block has active page tables for the GPU +// +// If the VA space lock is held (#1), then the gpu_va_space obviously can't go +// away. +// +// On the eviction path, we don't have a lock on the VA space state. However, +// since remove_gpu_va_space walks each block to unmap the GPU and free GPU page +// tables before destroying the gpu_va_space, we're guaranteed that if this GPU +// has page tables (#2), the gpu_va_space can't go away while we're holding the +// block lock. +static uvm_gpu_va_space_t *uvm_va_block_get_gpu_va_space(uvm_va_block_t *va_block, uvm_gpu_t *gpu) +{ + uvm_gpu_va_space_t *gpu_va_space; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + + UVM_ASSERT(gpu); + + if (!block_gpu_has_page_tables(va_block, gpu)) + uvm_assert_rwsem_locked(&va_space->lock); + + UVM_ASSERT(uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, gpu->id)); + + gpu_va_space = va_space->gpu_va_spaces[uvm_id_gpu_index(gpu->id)]; + + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + UVM_ASSERT(gpu_va_space->va_space == va_space); + UVM_ASSERT(gpu_va_space->gpu == gpu); + + return gpu_va_space; +} + +static bool block_gpu_supports_2m(uvm_va_block_t *block, uvm_gpu_t *gpu) +{ + uvm_gpu_va_space_t *gpu_va_space; + + if (uvm_va_block_size(block) < UVM_PAGE_SIZE_2M) + return false; + + UVM_ASSERT(uvm_va_block_size(block) == UVM_PAGE_SIZE_2M); + + gpu_va_space = uvm_va_block_get_gpu_va_space(block, gpu); + return uvm_mmu_page_size_supported(&gpu_va_space->page_tables, UVM_PAGE_SIZE_2M); +} + +NvU32 uvm_va_block_gpu_big_page_size(uvm_va_block_t *va_block, uvm_gpu_t *gpu) +{ + uvm_gpu_va_space_t *gpu_va_space; + + gpu_va_space = uvm_va_block_get_gpu_va_space(va_block, gpu); + return gpu_va_space->page_tables.big_page_size; +} + +static uvm_va_block_region_t range_big_page_region_all(NvU64 start, NvU64 end, NvU32 big_page_size) +{ + NvU64 first_addr = UVM_ALIGN_UP(start, big_page_size); + NvU64 outer_addr = UVM_ALIGN_DOWN(end + 1, big_page_size); + + // The range must fit within a VA block + UVM_ASSERT(UVM_VA_BLOCK_ALIGN_DOWN(start) == UVM_VA_BLOCK_ALIGN_DOWN(end)); + + if (outer_addr <= first_addr) + return uvm_va_block_region(0, 0); + + return uvm_va_block_region((first_addr - start) / PAGE_SIZE, (outer_addr - start) / PAGE_SIZE); +} + +static size_t range_num_big_pages(NvU64 start, NvU64 end, NvU32 big_page_size) +{ + uvm_va_block_region_t region = range_big_page_region_all(start, end, big_page_size); + return (size_t)uvm_div_pow2_64(uvm_va_block_region_size(region), big_page_size); +} + +uvm_va_block_region_t uvm_va_block_big_page_region_all(uvm_va_block_t *va_block, NvU32 big_page_size) +{ + return range_big_page_region_all(va_block->start, va_block->end, big_page_size); +} + +size_t uvm_va_block_num_big_pages(uvm_va_block_t *va_block, NvU32 big_page_size) +{ + return range_num_big_pages(va_block->start, va_block->end, big_page_size); +} + +NvU64 uvm_va_block_big_page_addr(uvm_va_block_t *va_block, size_t big_page_index, NvU32 big_page_size) +{ + NvU64 addr = UVM_ALIGN_UP(va_block->start, big_page_size) + (big_page_index * big_page_size); + UVM_ASSERT(addr >= va_block->start); + UVM_ASSERT(addr < va_block->end); + return addr; +} + +uvm_va_block_region_t uvm_va_block_big_page_region(uvm_va_block_t *va_block, size_t big_page_index, NvU32 big_page_size) +{ + NvU64 page_addr = uvm_va_block_big_page_addr(va_block, big_page_index, big_page_size); + + // Assume that we don't have to handle multiple big PTEs per system page. + // It's not terribly difficult to implement, but we don't currently have a + // use case. + UVM_ASSERT(big_page_size >= PAGE_SIZE); + + return uvm_va_block_region_from_start_size(va_block, page_addr, big_page_size); +} + +// Returns the big page index (the bit index within +// uvm_va_block_gpu_state_t::big_ptes) corresponding to page_index. If +// page_index cannot be covered by a big PTE due to alignment or block size, +// MAX_BIG_PAGES_PER_UVM_VA_BLOCK is returned. +size_t uvm_va_block_big_page_index(uvm_va_block_t *va_block, uvm_page_index_t page_index, NvU32 big_page_size) +{ + uvm_va_block_region_t big_region_all = uvm_va_block_big_page_region_all(va_block, big_page_size); + size_t big_index; + + // Note that this condition also handles the case of having no big pages in + // the block, in which case .first >= .outer. + if (page_index < big_region_all.first || page_index >= big_region_all.outer) + return MAX_BIG_PAGES_PER_UVM_VA_BLOCK; + + big_index = (size_t)uvm_div_pow2_64((page_index - big_region_all.first) * PAGE_SIZE, big_page_size); + + UVM_ASSERT(uvm_va_block_big_page_addr(va_block, big_index, big_page_size) >= va_block->start); + UVM_ASSERT(uvm_va_block_big_page_addr(va_block, big_index, big_page_size) + big_page_size <= va_block->end + 1); + + return big_index; +} + +static void uvm_page_mask_init_from_big_ptes(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_page_mask_t *mask_out, + const unsigned long *big_ptes_in) +{ + uvm_va_block_region_t big_region; + size_t big_page_index; + NvU32 big_page_size = uvm_va_block_gpu_big_page_size(block, gpu); + + uvm_page_mask_zero(mask_out); + + for_each_set_bit(big_page_index, big_ptes_in, MAX_BIG_PAGES_PER_UVM_VA_BLOCK) { + big_region = uvm_va_block_big_page_region(block, big_page_index, big_page_size); + uvm_page_mask_region_fill(mask_out, big_region); + } +} + +NvU32 uvm_va_block_page_size_cpu(uvm_va_block_t *va_block, uvm_page_index_t page_index) +{ + if (!uvm_page_mask_test(&va_block->cpu.pte_bits[UVM_PTE_BITS_CPU_READ], page_index)) + return 0; + + UVM_ASSERT(uvm_processor_mask_test(&va_block->mapped, UVM_ID_CPU)); + + // Despite the fact that physical CPU memory can be allocated at sizes + // greater than PAGE_SIZE, vm_insert_page(s)() always maps CPU memory + // with 4K PTEs. Until the core kernel adds support for PMD mappings, + // the return value of this function will remain at PAGE_SIZE. + return PAGE_SIZE; +} + +NvU32 uvm_va_block_page_size_gpu(uvm_va_block_t *va_block, uvm_gpu_id_t gpu_id, uvm_page_index_t page_index) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, gpu_id); + size_t big_page_size, big_page_index; + + if (!gpu_state) + return 0; + + if (!uvm_page_mask_test(&gpu_state->pte_bits[UVM_PTE_BITS_GPU_READ], page_index)) + return 0; + + UVM_ASSERT(uvm_processor_mask_test(&va_block->mapped, gpu_id)); + + if (gpu_state->pte_is_2m) + return UVM_PAGE_SIZE_2M; + + big_page_size = uvm_va_block_gpu_big_page_size(va_block, block_get_gpu(va_block, gpu_id)); + big_page_index = uvm_va_block_big_page_index(va_block, page_index, big_page_size); + if (big_page_index != MAX_BIG_PAGES_PER_UVM_VA_BLOCK && test_bit(big_page_index, gpu_state->big_ptes)) + return big_page_size; + + return UVM_PAGE_SIZE_4K; +} + +// Get the size of the physical allocation backing the page, or 0 if not +// resident. Note that this is different from uvm_va_block_page_size_* because +// those return the size of the PTE which maps the page index, which may be +// smaller than the physical allocation. +static NvU32 block_phys_page_size(uvm_va_block_t *block, block_phys_page_t page) +{ + uvm_va_block_gpu_state_t *gpu_state; + uvm_chunk_size_t chunk_size; + + if (UVM_ID_IS_CPU(page.processor)) { + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page.page_index); + + if (!uvm_page_mask_test(&block->cpu.resident, page.page_index)) + return 0; + + UVM_ASSERT(uvm_processor_mask_test(&block->resident, UVM_ID_CPU)); + return (NvU32)uvm_cpu_chunk_get_size(chunk); + } + + gpu_state = uvm_va_block_gpu_state_get(block, page.processor); + if (!gpu_state || !uvm_page_mask_test(&gpu_state->resident, page.page_index)) + return 0; + + UVM_ASSERT(uvm_processor_mask_test(&block->resident, page.processor)); + block_gpu_chunk_index(block, block_get_gpu(block, page.processor), page.page_index, &chunk_size); + return (NvU32)chunk_size; +} + +static uvm_pte_bits_cpu_t get_cpu_pte_bit_index(uvm_prot_t prot) +{ + uvm_pte_bits_cpu_t pte_bit_index = UVM_PTE_BITS_CPU_MAX; + + // ATOMIC and WRITE are synonyms for the CPU + if (prot == UVM_PROT_READ_WRITE_ATOMIC || prot == UVM_PROT_READ_WRITE) + pte_bit_index = UVM_PTE_BITS_CPU_WRITE; + else if (prot == UVM_PROT_READ_ONLY) + pte_bit_index = UVM_PTE_BITS_CPU_READ; + else + UVM_ASSERT_MSG(false, "Invalid access permissions %s\n", uvm_prot_string(prot)); + + return pte_bit_index; +} + +static uvm_pte_bits_gpu_t get_gpu_pte_bit_index(uvm_prot_t prot) +{ + uvm_pte_bits_gpu_t pte_bit_index = UVM_PTE_BITS_GPU_MAX; + + if (prot == UVM_PROT_READ_WRITE_ATOMIC) + pte_bit_index = UVM_PTE_BITS_GPU_ATOMIC; + else if (prot == UVM_PROT_READ_WRITE) + pte_bit_index = UVM_PTE_BITS_GPU_WRITE; + else if (prot == UVM_PROT_READ_ONLY) + pte_bit_index = UVM_PTE_BITS_GPU_READ; + else + UVM_ASSERT_MSG(false, "Invalid access permissions %s\n", uvm_prot_string(prot)); + + return pte_bit_index; +} + +uvm_page_mask_t *uvm_va_block_resident_mask_get(uvm_va_block_t *block, uvm_processor_id_t processor) +{ + uvm_va_block_gpu_state_t *gpu_state; + + if (UVM_ID_IS_CPU(processor)) + return &block->cpu.resident; + + gpu_state = uvm_va_block_gpu_state_get(block, processor); + + UVM_ASSERT(gpu_state); + return &gpu_state->resident; +} + +// Get the page residency mask for a processor +// +// Notably this will allocate GPU state if not yet present and if that fails +// NULL is returned. +static uvm_page_mask_t *block_resident_mask_get_alloc(uvm_va_block_t *block, uvm_processor_id_t processor) +{ + uvm_va_block_gpu_state_t *gpu_state; + + if (UVM_ID_IS_CPU(processor)) + return &block->cpu.resident; + + gpu_state = block_gpu_state_get_alloc(block, block_get_gpu(block, processor)); + if (!gpu_state) + return NULL; + + return &gpu_state->resident; +} + +static const uvm_page_mask_t *block_map_with_prot_mask_get(uvm_va_block_t *block, + uvm_processor_id_t processor, + uvm_prot_t prot) +{ + uvm_va_block_gpu_state_t *gpu_state; + + if (UVM_ID_IS_CPU(processor)) + return &block->cpu.pte_bits[get_cpu_pte_bit_index(prot)]; + + gpu_state = uvm_va_block_gpu_state_get(block, processor); + + UVM_ASSERT(gpu_state); + return &gpu_state->pte_bits[get_gpu_pte_bit_index(prot)]; +} + +const uvm_page_mask_t *uvm_va_block_map_mask_get(uvm_va_block_t *block, uvm_processor_id_t processor) +{ + return block_map_with_prot_mask_get(block, processor, UVM_PROT_READ_ONLY); +} + +static const uvm_page_mask_t *block_evicted_mask_get(uvm_va_block_t *block, uvm_gpu_id_t gpu_id) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu_id); + UVM_ASSERT(gpu_state); + + return &gpu_state->evicted; +} + +static bool block_is_page_resident_anywhere(uvm_va_block_t *block, uvm_page_index_t page_index) +{ + uvm_processor_id_t id; + for_each_id_in_mask(id, &block->resident) { + if (uvm_page_mask_test(uvm_va_block_resident_mask_get(block, id), page_index)) + return true; + } + + return false; +} + +static bool block_processor_page_is_populated(uvm_va_block_t *block, uvm_processor_id_t proc, uvm_page_index_t page_index) +{ + uvm_va_block_gpu_state_t *gpu_state; + size_t chunk_index; + + if (UVM_ID_IS_CPU(proc)) + return uvm_page_mask_test(&block->cpu.allocated, page_index); + + gpu_state = uvm_va_block_gpu_state_get(block, proc); + if (!gpu_state) + return false; + + chunk_index = block_gpu_chunk_index(block, block_get_gpu(block, proc), page_index, NULL); + return gpu_state->chunks[chunk_index] != NULL; +} + +static bool block_processor_page_is_resident_on(uvm_va_block_t *block, uvm_processor_id_t proc, uvm_page_index_t page_index) +{ + const uvm_page_mask_t *resident_mask; + + if (UVM_ID_IS_CPU(proc)) { + resident_mask = &block->cpu.resident; + } + else { + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, proc); + if (!gpu_state) + return false; + + resident_mask = &gpu_state->resident; + } + + return uvm_page_mask_test(resident_mask, page_index); +} + +void uvm_va_block_region_authorized_gpus(uvm_va_block_t *va_block, + uvm_va_block_region_t region, + uvm_prot_t access_permission, + uvm_processor_mask_t *authorized_gpus) +{ + uvm_gpu_id_t gpu_id; + uvm_pte_bits_gpu_t search_gpu_bit = get_gpu_pte_bit_index(access_permission); + + uvm_processor_mask_zero(authorized_gpus); + + // Test all GPUs with mappings on the block + for_each_gpu_id_in_mask(gpu_id, &va_block->mapped) { + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, gpu_id); + if (gpu_state && !uvm_page_mask_region_empty(&gpu_state->pte_bits[search_gpu_bit], region)) + uvm_processor_mask_set(authorized_gpus, gpu_id); + } +} + +void uvm_va_block_region_authorized_processors(uvm_va_block_t *va_block, + uvm_va_block_region_t region, + uvm_prot_t access_permission, + uvm_processor_mask_t *authorized_processors) +{ + uvm_pte_bits_cpu_t search_cpu_bit = get_cpu_pte_bit_index(access_permission); + + // Compute GPUs + uvm_va_block_region_authorized_gpus(va_block, region, access_permission, authorized_processors); + + // Test CPU + if (uvm_processor_mask_test(&va_block->mapped, UVM_ID_CPU) && + !uvm_page_mask_region_empty(&va_block->cpu.pte_bits[search_cpu_bit], region)) { + uvm_processor_mask_set(authorized_processors, UVM_ID_CPU); + } +} + +void uvm_va_block_page_authorized_gpus(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_prot_t access_permission, + uvm_processor_mask_t *authorized_gpus) +{ + uvm_va_block_region_authorized_gpus(va_block, + uvm_va_block_region_for_page(page_index), + access_permission, + authorized_gpus); +} + +void uvm_va_block_page_authorized_processors(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_prot_t access_permission, + uvm_processor_mask_t *authorized_processors) +{ + uvm_va_block_region_authorized_processors(va_block, + uvm_va_block_region_for_page(page_index), + access_permission, + authorized_processors); +} + +bool uvm_va_block_is_gpu_authorized_on_whole_region(uvm_va_block_t *va_block, + uvm_va_block_region_t region, + uvm_gpu_id_t gpu_id, + uvm_prot_t required_prot) +{ + uvm_pte_bits_gpu_t search_gpu_bit = get_gpu_pte_bit_index(required_prot); + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, gpu_id); + + if (!gpu_state) + return false; + + return uvm_page_mask_region_full(&gpu_state->pte_bits[search_gpu_bit], region); +} + +bool uvm_va_block_is_processor_authorized_on_whole_region(uvm_va_block_t *va_block, + uvm_va_block_region_t region, + uvm_processor_id_t processor_id, + uvm_prot_t required_prot) +{ + if (UVM_ID_IS_CPU(processor_id)) { + uvm_pte_bits_cpu_t search_cpu_bit = get_cpu_pte_bit_index(required_prot); + + return uvm_page_mask_region_full(&va_block->cpu.pte_bits[search_cpu_bit], region); + } + else { + return uvm_va_block_is_gpu_authorized_on_whole_region(va_block, region, processor_id, required_prot); + } +} + +bool uvm_va_block_page_is_gpu_authorized(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_gpu_id_t gpu_id, + uvm_prot_t required_prot) +{ + return uvm_va_block_is_gpu_authorized_on_whole_region(va_block, + uvm_va_block_region_for_page(page_index), + gpu_id, + required_prot); +} + +bool uvm_va_block_page_is_processor_authorized(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_id_t processor_id, + uvm_prot_t required_prot) +{ + return uvm_va_block_is_processor_authorized_on_whole_region(va_block, + uvm_va_block_region_for_page(page_index), + processor_id, + required_prot); +} + +void uvm_va_block_page_resident_gpus(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_mask_t *resident_gpus) +{ + uvm_gpu_id_t id; + uvm_processor_mask_zero(resident_gpus); + + for_each_gpu_id_in_mask(id, &va_block->resident) { + if (uvm_page_mask_test(uvm_va_block_resident_mask_get(va_block, id), page_index)) { + UVM_ASSERT(block_processor_page_is_populated(va_block, id, page_index)); + uvm_processor_mask_set(resident_gpus, id); + } + } +} + +void uvm_va_block_page_resident_processors(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_mask_t *resident_processors) +{ + uvm_va_block_page_resident_gpus(va_block, page_index, resident_processors); + + if (uvm_page_mask_test(uvm_va_block_resident_mask_get(va_block, UVM_ID_CPU), page_index)) { + UVM_ASSERT(block_processor_page_is_populated(va_block, UVM_ID_CPU, page_index)); + uvm_processor_mask_set(resident_processors, UVM_ID_CPU); + } +} + +NvU32 uvm_va_block_page_resident_processors_count(uvm_va_block_t *va_block, uvm_page_index_t page_index) +{ + uvm_processor_mask_t resident_processors; + uvm_va_block_page_resident_processors(va_block, page_index, &resident_processors); + + return uvm_processor_mask_get_count(&resident_processors); +} + +uvm_processor_id_t uvm_va_block_page_get_closest_resident(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_id_t processor) +{ + return uvm_va_block_page_get_closest_resident_in_mask(va_block, page_index, processor, NULL); +} + +uvm_processor_id_t uvm_va_block_page_get_closest_resident_in_mask(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_id_t processor, + const uvm_processor_mask_t *processor_mask) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_processor_mask_t search_mask; + uvm_processor_id_t id; + + if (processor_mask) + uvm_processor_mask_and(&search_mask, processor_mask, &va_block->resident); + else + uvm_processor_mask_copy(&search_mask, &va_block->resident); + + for_each_closest_id(id, &search_mask, processor, va_space) { + if (uvm_page_mask_test(uvm_va_block_resident_mask_get(va_block, id), page_index)) + return id; + } + + return UVM_ID_INVALID; +} + +// We don't track the specific aperture of each mapped page. Instead, we assume +// that each virtual mapping from a given processor always targets the closest +// processor on which that page is resident (with special rules for UVM-Lite). +// +// This function verifies that assumption: before a page becomes resident on a +// new location, assert that no processor has a valid mapping to a farther +// processor on that page. +static bool block_check_resident_proximity(uvm_va_block_t *block, uvm_page_index_t page_index, uvm_processor_id_t new_residency) +{ + uvm_processor_mask_t resident_procs, mapped_procs; + uvm_processor_id_t mapped_id, closest_id; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + uvm_processor_mask_andnot(&mapped_procs, &block->mapped, block_get_uvm_lite_gpus(block)); + + for_each_id_in_mask(mapped_id, &mapped_procs) { + if (!uvm_page_mask_test(uvm_va_block_map_mask_get(block, mapped_id), page_index)) + continue; + + uvm_va_block_page_resident_processors(block, page_index, &resident_procs); + UVM_ASSERT(!uvm_processor_mask_empty(&resident_procs)); + UVM_ASSERT(!uvm_processor_mask_test(&resident_procs, new_residency)); + uvm_processor_mask_set(&resident_procs, new_residency); + closest_id = uvm_processor_mask_find_closest_id(va_space, &resident_procs, mapped_id); + UVM_ASSERT(!uvm_id_equal(closest_id, new_residency)); + } + + return true; +} + +// Returns the processor to which page_index should be mapped on gpu +static uvm_processor_id_t block_gpu_get_processor_to_map(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_page_index_t page_index) +{ + uvm_processor_id_t dest_id; + + // UVM-Lite GPUs can only map pages on the preferred location + if (uvm_processor_mask_test(block_get_uvm_lite_gpus(block), gpu->id)) + return uvm_va_range_get_policy(block->va_range)->preferred_location; + + // Otherwise we always map the closest resident processor + dest_id = uvm_va_block_page_get_closest_resident(block, page_index, gpu->id); + UVM_ASSERT(UVM_ID_IS_VALID(dest_id)); + return dest_id; +} + +// Returns the processor to which page_index should be mapped on mapping_id +static uvm_processor_id_t block_get_processor_to_map(uvm_va_block_t *block, + uvm_processor_id_t mapping_id, + uvm_page_index_t page_index) +{ + + if (UVM_ID_IS_CPU(mapping_id)) + return uvm_va_block_page_get_closest_resident(block, page_index, mapping_id); + + return block_gpu_get_processor_to_map(block, block_get_gpu(block, mapping_id), page_index); +} + +static void block_get_mapped_processors(uvm_va_block_t *block, + uvm_processor_id_t resident_id, + uvm_page_index_t page_index, + uvm_processor_mask_t *mapped_procs) +{ + uvm_processor_id_t mapped_id; + + uvm_processor_mask_zero(mapped_procs); + + for_each_id_in_mask(mapped_id, &block->mapped) { + if (uvm_page_mask_test(uvm_va_block_map_mask_get(block, mapped_id), page_index)) { + uvm_processor_id_t to_map_id = block_get_processor_to_map(block, mapped_id, page_index); + + if (uvm_id_equal(to_map_id, resident_id)) + uvm_processor_mask_set(mapped_procs, mapped_id); + } + } +} + +// We use block_gpu_get_processor_to_map to find the destination processor of a +// given GPU mapping. This function is called when the mapping is established to +// sanity check that the destination of the mapping matches the query. +static bool block_check_mapping_residency_region(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_processor_id_t mapping_dest, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask) +{ + uvm_page_index_t page_index; + for_each_va_block_page_in_region_mask(page_index, page_mask, region) { + NvU64 va = uvm_va_block_cpu_page_address(block, page_index); + uvm_processor_id_t proc_to_map = block_gpu_get_processor_to_map(block, gpu, page_index); + UVM_ASSERT_MSG(uvm_id_equal(mapping_dest, proc_to_map), + "VA 0x%llx on %s: mapping %s, supposed to map %s", + va, + uvm_gpu_name(gpu), + block_processor_name(block, mapping_dest), + block_processor_name(block, proc_to_map)); + } + return true; +} + +static bool block_check_mapping_residency(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_processor_id_t mapping_dest, + const uvm_page_mask_t *page_mask) +{ + return block_check_mapping_residency_region(block, + gpu, + mapping_dest, + uvm_va_block_region_from_block(block), + page_mask); +} + +// Check that there are no mappings targeting resident_id from any processor in +// the block. +static bool block_check_processor_not_mapped(uvm_va_block_t *block, uvm_processor_id_t resident_id) +{ + uvm_processor_id_t mapped_id; + uvm_page_index_t page_index; + + for_each_id_in_mask(mapped_id, &block->mapped) { + const uvm_page_mask_t *map_mask = uvm_va_block_map_mask_get(block, mapped_id); + + for_each_va_block_page_in_mask(page_index, map_mask, block) { + uvm_processor_id_t to_map_id = block_get_processor_to_map(block, mapped_id, page_index); + UVM_ASSERT(!uvm_id_equal(to_map_id, resident_id)); + } + } + + return true; +} + +// Zero all pages of the newly-populated chunk which are not resident anywhere +// else in the system, adding that work to the block's tracker. In all cases, +// this function adds a dependency on passed in tracker to the block's tracker. +static NV_STATUS block_zero_new_gpu_chunk(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_gpu_chunk_t *chunk, + uvm_va_block_region_t chunk_region, + uvm_tracker_t *tracker) +{ + uvm_va_block_gpu_state_t *gpu_state; + NV_STATUS status; + uvm_gpu_address_t memset_addr_base, memset_addr; + uvm_push_t push; + uvm_gpu_id_t id; + uvm_va_block_region_t subregion; + uvm_page_mask_t *zero_mask; + + UVM_ASSERT(uvm_va_block_region_size(chunk_region) == uvm_gpu_chunk_get_size(chunk)); + + if (chunk->is_zero) + return NV_OK; + + gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + zero_mask = kmem_cache_alloc(g_uvm_page_mask_cache, NV_UVM_GFP_FLAGS); + + if (!zero_mask) + return NV_ERR_NO_MEMORY; + + // Tradeoff: zeroing entire chunk vs zeroing only the pages needed for the + // operation. + // + // We may over-zero the page with this approach. For example, we might be + // populating a 2MB chunk because only a single page within that chunk needs + // to be made resident. If we also zero non-resident pages outside of the + // strict region, we could waste the effort if those pages are populated on + // another processor later and migrated here. + // + // We zero all non-resident pages in the chunk anyway for two reasons: + // + // 1) Efficiency. It's better to do all zeros as pipelined transfers once + // rather than scatter them around for each populate operation. + // + // 2) Optimizing the common case of block_populate_gpu_chunk being called + // for already-populated chunks. If we zero once at initial populate, we + // can simply check whether the chunk is present in the array. Otherwise + // we'd have to recompute the "is any page resident" mask every time. + + // Roll up all pages in chunk_region which are resident somewhere + uvm_page_mask_zero(zero_mask); + for_each_id_in_mask(id, &block->resident) + uvm_page_mask_or(zero_mask, zero_mask, uvm_va_block_resident_mask_get(block, id)); + + // If all pages in the chunk are resident somewhere, we don't need to clear + // anything. Just make sure the chunk is tracked properly. + if (uvm_page_mask_region_full(zero_mask, chunk_region)) { + status = uvm_tracker_add_tracker_safe(&block->tracker, tracker); + goto out; + } + + // Complement to get the pages which are not resident anywhere. These + // are the pages which must be zeroed. + uvm_page_mask_complement(zero_mask, zero_mask); + + if (uvm_mmu_gpu_needs_static_vidmem_mapping(gpu) || uvm_mmu_gpu_needs_dynamic_vidmem_mapping(gpu)) + memset_addr_base = uvm_gpu_address_virtual_from_vidmem_phys(gpu, chunk->address); + else + memset_addr_base = uvm_gpu_address_physical(UVM_APERTURE_VID, chunk->address); + + memset_addr = memset_addr_base; + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_GPU_INTERNAL, + tracker, + &push, + "Zero out chunk [0x%llx, 0x%llx) for region [0x%llx, 0x%llx) in va block [0x%llx, 0x%llx)", + chunk->address, + chunk->address + uvm_gpu_chunk_get_size(chunk), + uvm_va_block_region_start(block, chunk_region), + uvm_va_block_region_end(block, chunk_region) + 1, + block->start, + block->end + 1); + if (status != NV_OK) + goto out; + + for_each_va_block_subregion_in_mask(subregion, zero_mask, chunk_region) { + // Pipeline the memsets since they never overlap with each other + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + + // We'll push one membar later for all memsets in this loop + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + + memset_addr.address = memset_addr_base.address + (subregion.first - chunk_region.first) * PAGE_SIZE; + gpu->parent->ce_hal->memset_8(&push, memset_addr, 0, uvm_va_block_region_size(subregion)); + } + + // A membar from this GPU is required between this memset and any PTE write + // pointing this or another GPU to this chunk. Otherwise an engine could + // read the PTE then access the page before the memset write is visible to + // that engine. + // + // This memset writes GPU memory, so local mappings need only a GPU-local + // membar. We can't easily determine here whether a peer GPU will ever map + // this page in the future, so always use a sysmembar. uvm_push_end provides + // one by default. + // + // TODO: Bug 1766424: Use GPU-local membars if no peer can currently map + // this page. When peer access gets enabled, do a MEMBAR_SYS at that + // point. + uvm_push_end(&push); + status = uvm_tracker_add_push_safe(&block->tracker, &push); + +out: + if (zero_mask) + kmem_cache_free(g_uvm_page_mask_cache, zero_mask); + + return status; +} + +static NV_STATUS block_populate_gpu_chunk(uvm_va_block_t *block, + uvm_va_block_retry_t *retry, + uvm_gpu_t *gpu, + size_t chunk_index, + uvm_va_block_region_t chunk_region) +{ + uvm_va_block_gpu_state_t *gpu_state = block_gpu_state_get_alloc(block, gpu); + uvm_gpu_chunk_t *chunk = NULL; + uvm_chunk_size_t chunk_size = uvm_va_block_region_size(chunk_region); + uvm_va_block_test_t *block_test = uvm_va_block_get_test(block); + NV_STATUS status; + + if (!gpu_state) + return NV_ERR_NO_MEMORY; + + uvm_assert_mutex_locked(&block->lock); + UVM_ASSERT(chunk_index < block_num_gpu_chunks(block, gpu)); + UVM_ASSERT(chunk_size & gpu->parent->mmu_user_chunk_sizes); + + // We zero chunks as necessary at initial population, so if the chunk is + // already populated we're done. See the comment in + // block_zero_new_gpu_chunk. + if (gpu_state->chunks[chunk_index]) + return NV_OK; + + UVM_ASSERT(uvm_page_mask_region_empty(&gpu_state->resident, chunk_region)); + + status = block_alloc_gpu_chunk(block, retry, gpu, chunk_size, &chunk); + if (status != NV_OK) + return status; + + // In some configurations such as SR-IOV heavy, the chunk cannot be + // referenced using its physical address. Create a virtual mapping. + status = uvm_mmu_chunk_map(chunk); + if (status != NV_OK) + goto chunk_free; + + status = block_zero_new_gpu_chunk(block, gpu, chunk, chunk_region, &retry->tracker); + if (status != NV_OK) + goto chunk_unmap; + + // It is safe to modify the page index field without holding any PMM locks + // because the chunk is pinned, which means that none of the other fields in + // the bitmap can change. + chunk->va_block_page_index = chunk_region.first; + + // va_block_page_index is a bitfield of size PAGE_SHIFT. Make sure at + // compile-time that it can store VA Block page indexes. + BUILD_BUG_ON(PAGES_PER_UVM_VA_BLOCK >= PAGE_SIZE); + + status = block_map_indirect_peers_to_gpu_chunk(block, gpu, chunk); + if (status != NV_OK) + goto chunk_unmap; + + if (block_test && block_test->inject_populate_error) { + block_test->inject_populate_error = false; + + // Use NV_ERR_MORE_PROCESSING_REQUIRED to force a retry rather than + // causing a fatal OOM failure. + status = NV_ERR_MORE_PROCESSING_REQUIRED; + goto chunk_unmap_indirect_peers; + } + + // Record the used chunk so that it can be unpinned at the end of the whole + // operation. + block_retry_add_used_chunk(retry, chunk); + gpu_state->chunks[chunk_index] = chunk; + + return NV_OK; + +chunk_unmap_indirect_peers: + block_unmap_indirect_peers_from_gpu_chunk(block, gpu, chunk); + +chunk_unmap: + uvm_mmu_chunk_unmap(chunk, &block->tracker); + +chunk_free: + // block_zero_new_gpu_chunk may have pushed memsets on this chunk which it + // placed in the block tracker. + uvm_pmm_gpu_free(&gpu->pmm, chunk, &block->tracker); + + return status; +} + +// Populate all chunks which cover the given region and page mask. +static NV_STATUS block_populate_pages_gpu(uvm_va_block_t *block, + uvm_va_block_retry_t *retry, + uvm_gpu_t *gpu, + uvm_va_block_region_t region, + const uvm_page_mask_t *populate_mask) +{ + uvm_va_block_region_t chunk_region, check_region; + size_t chunk_index; + uvm_page_index_t page_index; + uvm_chunk_size_t chunk_size; + NV_STATUS status; + + page_index = uvm_va_block_first_page_in_mask(region, populate_mask); + if (page_index == region.outer) + return NV_OK; + + chunk_index = block_gpu_chunk_index(block, gpu, page_index, &chunk_size); + chunk_region = uvm_va_block_chunk_region(block, chunk_size, page_index); + + while (1) { + check_region = uvm_va_block_region(max(chunk_region.first, region.first), + min(chunk_region.outer, region.outer)); + page_index = uvm_va_block_first_page_in_mask(check_region, populate_mask); + if (page_index != check_region.outer) { + status = block_populate_gpu_chunk(block, retry, gpu, chunk_index, chunk_region); + if (status != NV_OK) + return status; + } + + if (check_region.outer == region.outer) + break; + + ++chunk_index; + chunk_size = block_gpu_chunk_size(block, gpu, chunk_region.outer); + chunk_region = uvm_va_block_region(chunk_region.outer, chunk_region.outer + (chunk_size / PAGE_SIZE)); + } + + return NV_OK; +} + +static NV_STATUS block_populate_pages(uvm_va_block_t *block, + uvm_va_block_retry_t *retry, + uvm_va_block_context_t *block_context, + uvm_processor_id_t dest_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask) +{ + NV_STATUS status = NV_OK; + const uvm_page_mask_t *resident_mask = block_resident_mask_get_alloc(block, dest_id); + uvm_page_index_t page_index; + uvm_page_mask_t *populate_page_mask = &block_context->make_resident.page_mask; + uvm_memcg_context_t memcg_context; + + if (!resident_mask) + return NV_ERR_NO_MEMORY; + + if (page_mask) + uvm_page_mask_andnot(populate_page_mask, page_mask, resident_mask); + else + uvm_page_mask_complement(populate_page_mask, resident_mask); + + if (UVM_ID_IS_GPU(dest_id)) + return block_populate_pages_gpu(block, retry, block_get_gpu(block, dest_id), region, populate_page_mask); + + uvm_memcg_context_start(&memcg_context, block_context->mm); + + for_each_va_block_page_in_region_mask(page_index, populate_page_mask, region) { + status = block_populate_page_cpu(block, page_index, block_context->mm); + if (status != NV_OK) + break; + } + + uvm_memcg_context_end(&memcg_context); + return status; +} + +static const uvm_processor_mask_t *block_get_can_copy_from_mask(uvm_va_block_t *block, uvm_processor_id_t from) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + return &va_space->can_copy_from[uvm_id_value(from)]; +} + +static bool block_can_copy_from(uvm_va_block_t *va_block, uvm_processor_id_t from, uvm_processor_id_t to) +{ + return uvm_processor_mask_test(block_get_can_copy_from_mask(va_block, to), from); +} + +// Get the chunk containing the given page, along with the offset of that page +// within the chunk. +static uvm_gpu_chunk_t *block_phys_page_chunk(uvm_va_block_t *block, block_phys_page_t block_page, size_t *chunk_offset) +{ + uvm_gpu_t *gpu = block_get_gpu(block, block_page.processor); + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, block_page.processor); + size_t chunk_index; + uvm_gpu_chunk_t *chunk; + uvm_chunk_size_t chunk_size; + + UVM_ASSERT(gpu_state); + + chunk_index = block_gpu_chunk_index(block, gpu, block_page.page_index, &chunk_size); + chunk = gpu_state->chunks[chunk_index]; + UVM_ASSERT(chunk); + + if (chunk_offset) { + size_t page_offset = block_page.page_index - + uvm_va_block_chunk_region(block,chunk_size, block_page.page_index).first; + *chunk_offset = page_offset * PAGE_SIZE; + } + + return chunk; +} + +// Get the physical GPU address of a block's page from the POV of the specified GPU +// This is the address that should be used for making PTEs for the specified GPU. +static uvm_gpu_phys_address_t block_phys_page_address(uvm_va_block_t *block, + block_phys_page_t block_page, + uvm_gpu_t *gpu) +{ + uvm_va_block_gpu_state_t *accessing_gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + size_t chunk_offset; + uvm_gpu_chunk_t *chunk; + + UVM_ASSERT(accessing_gpu_state); + + if (UVM_ID_IS_CPU(block_page.processor)) { + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, block_page.page_index); + NvU64 dma_addr = uvm_cpu_chunk_get_gpu_mapping_addr(block, block_page.page_index, chunk, gpu->id); + uvm_va_block_region_t chunk_region = uvm_va_block_chunk_region(block, + uvm_cpu_chunk_get_size(chunk), + block_page.page_index); + + // The page should be mapped for physical access already as we do that + // eagerly on CPU page population and GPU state alloc. + UVM_ASSERT(dma_addr != 0); + dma_addr += (block_page.page_index - chunk_region.first) * PAGE_SIZE; + + return uvm_gpu_phys_address(UVM_APERTURE_SYS, dma_addr); + } + + chunk = block_phys_page_chunk(block, block_page, &chunk_offset); + + if (uvm_id_equal(block_page.processor, gpu->id)) { + return uvm_gpu_phys_address(UVM_APERTURE_VID, chunk->address + chunk_offset); + } + else { + uvm_gpu_phys_address_t phys_addr; + uvm_gpu_t *owning_gpu = block_get_gpu(block, block_page.processor); + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + UVM_ASSERT(uvm_va_space_peer_enabled(va_space, gpu, owning_gpu)); + phys_addr = uvm_pmm_gpu_peer_phys_address(&owning_gpu->pmm, chunk, gpu); + phys_addr.address += chunk_offset; + return phys_addr; + } +} + +// Get the physical GPU address of a block's page from the POV of the specified +// GPU, suitable for accessing the memory from UVM-internal CE channels. +// +// Notably this is may be different from block_phys_page_address() to handle CE +// limitations in addressing physical memory directly. +static uvm_gpu_address_t block_phys_page_copy_address(uvm_va_block_t *block, + block_phys_page_t block_page, + uvm_gpu_t *gpu) +{ + uvm_gpu_t *owning_gpu; + size_t chunk_offset; + uvm_gpu_chunk_t *chunk; + uvm_gpu_address_t copy_addr; + uvm_va_space_t *va_space; + bool page_in_cpu, page_in_local_gpu; + + UVM_ASSERT_MSG(block_can_copy_from(block, gpu->id, block_page.processor), + "from %s to %s\n", + block_processor_name(block, gpu->id), + block_processor_name(block, block_page.processor)); + + page_in_cpu = UVM_ID_IS_CPU(block_page.processor); + page_in_local_gpu = uvm_id_equal(block_page.processor, gpu->id); + + // CPU and local GPU accesses can rely on block_phys_page_address, but the + // resulting physical address may need to be converted into virtual. + if (page_in_cpu || page_in_local_gpu) { + uvm_gpu_phys_address_t gpu_phys_address = block_phys_page_address(block, block_page, gpu); + + if (page_in_cpu && uvm_mmu_gpu_needs_dynamic_sysmem_mapping(gpu)) + return uvm_gpu_address_virtual_from_sysmem_phys(gpu, gpu_phys_address.address); + + if (page_in_local_gpu && uvm_mmu_gpu_needs_dynamic_vidmem_mapping(gpu)) + return uvm_gpu_address_virtual_from_vidmem_phys(gpu, gpu_phys_address.address); + + return uvm_gpu_address_from_phys(gpu_phys_address); + } + + va_space = uvm_va_block_get_va_space(block); + + // See the comments on the peer_identity_mappings_supported assignments in + // the HAL for why we disable direct copies between peers. + owning_gpu = block_get_gpu(block, block_page.processor); + + UVM_ASSERT(uvm_va_space_peer_enabled(va_space, gpu, owning_gpu)); + + chunk = block_phys_page_chunk(block, block_page, &chunk_offset); + copy_addr = uvm_pmm_gpu_peer_copy_address(&owning_gpu->pmm, chunk, gpu); + copy_addr.address += chunk_offset; + return copy_addr; +} + +uvm_gpu_phys_address_t uvm_va_block_gpu_phys_page_address(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_gpu_t *gpu) +{ + uvm_assert_mutex_locked(&va_block->lock); + + return block_phys_page_address(va_block, block_phys_page(gpu->id, page_index), gpu); +} + +// Begin a push appropriate for copying data from src_id processor to dst_id processor. +// One of src_id and dst_id needs to be a GPU. +static NV_STATUS block_copy_begin_push(uvm_va_block_t *va_block, + uvm_processor_id_t dst_id, + uvm_processor_id_t src_id, + uvm_tracker_t *tracker, + uvm_push_t *push) +{ + uvm_channel_type_t channel_type; + uvm_gpu_t *gpu; + + UVM_ASSERT_MSG(!uvm_id_equal(src_id, dst_id), + "Unexpected copy to self, processor %s\n", + block_processor_name(va_block, src_id)); + + if (UVM_ID_IS_CPU(src_id)) { + gpu = block_get_gpu(va_block, dst_id); + channel_type = UVM_CHANNEL_TYPE_CPU_TO_GPU; + } + else if (UVM_ID_IS_CPU(dst_id)) { + gpu = block_get_gpu(va_block, src_id); + channel_type = UVM_CHANNEL_TYPE_GPU_TO_CPU; + } + else { + // For GPU to GPU copies, prefer to "push" the data from the source as + // that works better at least for P2P over PCI-E. + gpu = block_get_gpu(va_block, src_id); + + channel_type = UVM_CHANNEL_TYPE_GPU_TO_GPU; + } + + UVM_ASSERT_MSG(block_can_copy_from(va_block, gpu->id, dst_id), + "GPU %s dst %s src %s\n", + block_processor_name(va_block, gpu->id), + block_processor_name(va_block, dst_id), + block_processor_name(va_block, src_id)); + UVM_ASSERT_MSG(block_can_copy_from(va_block, gpu->id, src_id), + "GPU %s dst %s src %s\n", + block_processor_name(va_block, gpu->id), + block_processor_name(va_block, dst_id), + block_processor_name(va_block, src_id)); + + if (channel_type == UVM_CHANNEL_TYPE_GPU_TO_GPU) { + uvm_gpu_t *dst_gpu = block_get_gpu(va_block, dst_id); + return uvm_push_begin_acquire_gpu_to_gpu(gpu->channel_manager, + dst_gpu, + tracker, + push, + "Copy from %s to %s for block [0x%llx, 0x%llx]", + block_processor_name(va_block, src_id), + block_processor_name(va_block, dst_id), + va_block->start, + va_block->end); + } + + return uvm_push_begin_acquire(gpu->channel_manager, + channel_type, + tracker, + push, + "Copy from %s to %s for block [0x%llx, 0x%llx]", + block_processor_name(va_block, src_id), + block_processor_name(va_block, dst_id), + va_block->start, + va_block->end); +} + +// A page is clean iff... +// the destination is the preferred location and +// the source is the CPU and +// the destination does not support faults/eviction and +// the CPU page is not dirty +static bool block_page_is_clean(uvm_va_block_t *block, + uvm_processor_id_t dst_id, + uvm_processor_id_t src_id, + uvm_page_index_t page_index) +{ + return !uvm_va_block_is_hmm(block) && + uvm_id_equal(dst_id, uvm_va_range_get_policy(block->va_range)->preferred_location) && + UVM_ID_IS_CPU(src_id) && + !block_get_gpu(block, dst_id)->parent->isr.replayable_faults.handling && + !block_cpu_page_is_dirty(block, page_index); +} + +// When the destination is the CPU... +// if the source is the preferred location, mark as clean +// otherwise, mark as dirty +static void block_update_page_dirty_state(uvm_va_block_t *block, + uvm_processor_id_t dst_id, + uvm_processor_id_t src_id, + uvm_page_index_t page_index) +{ + if (UVM_ID_IS_GPU(dst_id) || uvm_va_block_is_hmm(block)) + return; + + if (uvm_id_equal(src_id, uvm_va_range_get_policy(block->va_range)->preferred_location)) + block_mark_cpu_page_clean(block, page_index); + else + block_mark_cpu_page_dirty(block, page_index); +} + +static void block_mark_memory_used(uvm_va_block_t *block, uvm_processor_id_t id) +{ + uvm_gpu_t *gpu; + + if (UVM_ID_IS_CPU(id)) + return; + + gpu = block_get_gpu(block, id); + + // If the block is of the max size and the GPU supports eviction, mark the + // root chunk as used in PMM. + if (uvm_va_block_size(block) == UVM_CHUNK_SIZE_MAX && uvm_gpu_supports_eviction(gpu)) { + // The chunk has to be there if this GPU is resident + UVM_ASSERT(uvm_processor_mask_test(&block->resident, id)); + uvm_pmm_gpu_mark_root_chunk_used(&gpu->pmm, uvm_va_block_gpu_state_get(block, gpu->id)->chunks[0]); + } +} + +static void block_set_resident_processor(uvm_va_block_t *block, uvm_processor_id_t id) +{ + UVM_ASSERT(!uvm_page_mask_empty(uvm_va_block_resident_mask_get(block, id))); + + if (uvm_processor_mask_test_and_set(&block->resident, id)) + return; + + block_mark_memory_used(block, id); +} + +static void block_clear_resident_processor(uvm_va_block_t *block, uvm_processor_id_t id) +{ + uvm_gpu_t *gpu; + + UVM_ASSERT(uvm_page_mask_empty(uvm_va_block_resident_mask_get(block, id))); + + if (!uvm_processor_mask_test_and_clear(&block->resident, id)) + return; + + if (UVM_ID_IS_CPU(id)) + return; + + gpu = block_get_gpu(block, id); + + // If the block is of the max size and the GPU supports eviction, mark the + // root chunk as unused in PMM. + if (uvm_va_block_size(block) == UVM_CHUNK_SIZE_MAX && uvm_gpu_supports_eviction(gpu)) { + // The chunk may not be there any more when residency is cleared. + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + if (gpu_state && gpu_state->chunks[0]) + uvm_pmm_gpu_mark_root_chunk_unused(&gpu->pmm, gpu_state->chunks[0]); + } +} + +typedef enum +{ + BLOCK_TRANSFER_MODE_INTERNAL_MOVE = 1, + BLOCK_TRANSFER_MODE_INTERNAL_COPY = 2, + BLOCK_TRANSFER_MODE_INTERNAL_MOVE_TO_STAGE = 3, + BLOCK_TRANSFER_MODE_INTERNAL_MOVE_FROM_STAGE = 4, + BLOCK_TRANSFER_MODE_INTERNAL_COPY_TO_STAGE = 5, + BLOCK_TRANSFER_MODE_INTERNAL_COPY_FROM_STAGE = 6 +} block_transfer_mode_internal_t; + +static uvm_va_block_transfer_mode_t get_block_transfer_mode_from_internal(block_transfer_mode_internal_t transfer_mode) +{ + switch (transfer_mode) { + case BLOCK_TRANSFER_MODE_INTERNAL_MOVE: + case BLOCK_TRANSFER_MODE_INTERNAL_MOVE_TO_STAGE: + case BLOCK_TRANSFER_MODE_INTERNAL_MOVE_FROM_STAGE: + return UVM_VA_BLOCK_TRANSFER_MODE_MOVE; + + case BLOCK_TRANSFER_MODE_INTERNAL_COPY: + case BLOCK_TRANSFER_MODE_INTERNAL_COPY_TO_STAGE: + case BLOCK_TRANSFER_MODE_INTERNAL_COPY_FROM_STAGE: + return UVM_VA_BLOCK_TRANSFER_MODE_COPY; + } + + UVM_ASSERT_MSG(0, "Invalid transfer mode %u\n", transfer_mode); + return 0; +} + +static bool block_phys_copy_contig_check(uvm_va_block_t *block, + uvm_page_index_t page_index, + const uvm_gpu_address_t *base_address, + uvm_processor_id_t proc_id, + uvm_gpu_t *copying_gpu) +{ + uvm_gpu_address_t page_address; + uvm_gpu_address_t contig_address = *base_address; + + contig_address.address += page_index * PAGE_SIZE; + + page_address = block_phys_page_copy_address(block, block_phys_page(proc_id, page_index), copying_gpu); + + return uvm_gpu_addr_cmp(page_address, contig_address) == 0; +} + +// Check if the VA block has a single physically-contiguous chunk of storage +// on the processor. +static bool is_block_phys_contig(uvm_va_block_t *block, uvm_processor_id_t id) +{ + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_first_in_block(block, NULL); + + if (UVM_ID_IS_GPU(id)) + return uvm_va_block_size(block) == block_gpu_chunk_size(block, block_get_gpu(block, id), 0); + + return chunk && (uvm_va_block_size(block) <= uvm_cpu_chunk_get_size(chunk)); +} + +static uvm_va_block_region_t block_phys_contig_region(uvm_va_block_t *block, + uvm_page_index_t page_index, + uvm_processor_id_t resident_id) +{ + if (UVM_ID_IS_CPU(resident_id)) { + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index); + return uvm_va_block_region(page_index, page_index + uvm_cpu_chunk_num_pages(chunk)); + } + else { + uvm_chunk_size_t chunk_size; + (void)block_gpu_chunk_index(block, block_get_gpu(block, resident_id), page_index, &chunk_size); + return uvm_va_block_chunk_region(block, chunk_size, page_index); + } +} + +// Copies pages resident on the src_id processor to the dst_id processor +// +// The function adds the pages that were successfully copied to the output +// migrated_pages mask and returns the number of pages in copied_pages. These +// fields are reliable even if an error is returned. +// +// Acquires the block's tracker and adds all of its pushes to the copy_tracker. +static NV_STATUS block_copy_resident_pages_between(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_processor_id_t dst_id, + uvm_processor_id_t src_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + const uvm_page_mask_t *prefetch_page_mask, + block_transfer_mode_internal_t transfer_mode, + uvm_page_mask_t *migrated_pages, + NvU32 *copied_pages, + uvm_tracker_t *copy_tracker) +{ + NV_STATUS tracker_status, status = NV_OK; + uvm_page_mask_t *src_resident_mask = uvm_va_block_resident_mask_get(block, src_id); + uvm_page_mask_t *dst_resident_mask = uvm_va_block_resident_mask_get(block, dst_id); + uvm_gpu_t *copying_gpu = NULL; + uvm_push_t push; + uvm_page_index_t page_index; + uvm_page_index_t contig_start_index = region.outer; + uvm_page_index_t last_index = region.outer; + uvm_page_mask_t *copy_mask = &block_context->make_resident.copy_resident_pages_between_mask; + uvm_range_group_range_t *rgr = NULL; + bool rgr_has_changed = false; + uvm_make_resident_cause_t cause = block_context->make_resident.cause; + uvm_make_resident_cause_t contig_cause = cause; + const bool may_prefetch = (cause == UVM_MAKE_RESIDENT_CAUSE_REPLAYABLE_FAULT || + cause == UVM_MAKE_RESIDENT_CAUSE_NON_REPLAYABLE_FAULT || + cause == UVM_MAKE_RESIDENT_CAUSE_ACCESS_COUNTER) && !!prefetch_page_mask; + const bool is_src_phys_contig = is_block_phys_contig(block, src_id); + const bool is_dst_phys_contig = is_block_phys_contig(block, dst_id); + uvm_gpu_address_t contig_src_address = {0}; + uvm_gpu_address_t contig_dst_address = {0}; + uvm_va_range_t *va_range = block->va_range; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + const uvm_va_block_transfer_mode_t block_transfer_mode = get_block_transfer_mode_from_internal(transfer_mode); + + *copied_pages = 0; + + if (uvm_id_equal(dst_id, src_id)) + return NV_OK; + + uvm_page_mask_init_from_region(copy_mask, region, src_resident_mask); + + if (page_mask) + uvm_page_mask_and(copy_mask, copy_mask, page_mask); + + // If there are not pages to be copied, exit early + if (!uvm_page_mask_andnot(copy_mask, copy_mask, dst_resident_mask)) + return NV_OK; + + // uvm_range_group_range_iter_first should only be called when the va_space + // lock is held, which is always the case unless an eviction is taking + // place. + if (cause != UVM_MAKE_RESIDENT_CAUSE_EVICTION) { + rgr = uvm_range_group_range_iter_first(va_space, + uvm_va_block_region_start(block, region), + uvm_va_block_region_end(block, region)); + rgr_has_changed = true; + } + + for_each_va_block_page_in_region_mask(page_index, copy_mask, region) { + NvU64 page_start = uvm_va_block_cpu_page_address(block, page_index); + uvm_make_resident_cause_t page_cause = (may_prefetch && uvm_page_mask_test(prefetch_page_mask, page_index))? + UVM_MAKE_RESIDENT_CAUSE_PREFETCH: + cause; + + UVM_ASSERT(block_check_resident_proximity(block, page_index, dst_id)); + + if (UVM_ID_IS_CPU(dst_id)) { + uvm_memcg_context_t memcg_context; + + // To support staging through CPU, populate CPU pages on demand. + // GPU destinations should have their pages populated already, but + // that might change if we add staging through GPUs. + uvm_memcg_context_start(&memcg_context, block_context->mm); + status = block_populate_page_cpu(block, page_index, block_context->mm); + uvm_memcg_context_end(&memcg_context); + if (status != NV_OK) + break; + } + + UVM_ASSERT(block_processor_page_is_populated(block, dst_id, page_index)); + + // If we're not evicting and we're migrating away from the preferred + // location, then we should add the range group range to the list of + // migrated ranges in the range group. It's safe to skip this because + // the use of range_group's migrated_ranges list is a UVM-Lite + // optimization - eviction is not supported on UVM-Lite GPUs. + if (cause != UVM_MAKE_RESIDENT_CAUSE_EVICTION && !uvm_va_block_is_hmm(block) && + uvm_id_equal(src_id, uvm_va_range_get_policy(va_range)->preferred_location)) { + // rgr_has_changed is used to minimize the number of times the + // migrated_ranges_lock is taken. It is set to false when the range + // group range pointed by rgr is added to the migrated_ranges list, + // and it is just set back to true when we move to a different + // range group range. + + // The current page could be after the end of rgr. Iterate over the + // range group ranges until rgr's end location is greater than or + // equal to the current page. + while (rgr && rgr->node.end < page_start) { + rgr = uvm_range_group_range_iter_next(va_space, rgr, uvm_va_block_region_end(block, region)); + rgr_has_changed = true; + } + + // Check whether the current page lies within rgr. A single page + // must entirely reside within a range group range. Since we've + // incremented rgr until its end is higher than page_start, we now + // check if page_start lies within rgr. + if (rgr && rgr_has_changed && page_start >= rgr->node.start && page_start <= rgr->node.end) { + uvm_spin_lock(&rgr->range_group->migrated_ranges_lock); + if (list_empty(&rgr->range_group_migrated_list_node)) + list_move_tail(&rgr->range_group_migrated_list_node, &rgr->range_group->migrated_ranges); + uvm_spin_unlock(&rgr->range_group->migrated_ranges_lock); + + rgr_has_changed = false; + } + } + + // No need to copy pages that haven't changed. Just clear residency + // information + if (block_page_is_clean(block, dst_id, src_id, page_index)) + continue; + + if (!copying_gpu) { + status = block_copy_begin_push(block, dst_id, src_id, &block->tracker, &push); + if (status != NV_OK) + break; + copying_gpu = uvm_push_get_gpu(&push); + + // Record all processors involved in the copy + uvm_processor_mask_set(&block_context->make_resident.all_involved_processors, copying_gpu->id); + uvm_processor_mask_set(&block_context->make_resident.all_involved_processors, dst_id); + uvm_processor_mask_set(&block_context->make_resident.all_involved_processors, src_id); + + // This function is called just once per VA block and needs to + // receive the "main" cause for the migration (it mainly checks if + // we are in the eviction path). Therefore, we pass cause instead + // of contig_cause + uvm_tools_record_block_migration_begin(block, &push, dst_id, src_id, page_start, cause); + } + else { + uvm_push_set_flag(&push, UVM_PUSH_FLAG_CE_NEXT_PIPELINED); + } + + block_update_page_dirty_state(block, dst_id, src_id, page_index); + + if (last_index == region.outer) { + contig_start_index = page_index; + contig_cause = page_cause; + + // Computing the physical address is a non-trivial operation and + // seems to be a performance limiter on systems with 2 or more + // NVLINK links. Therefore, for physically-contiguous block + // storage, we cache the start address and compute the page address + // using the page index. + if (is_src_phys_contig) + contig_src_address = block_phys_page_copy_address(block, block_phys_page(src_id, 0), copying_gpu); + if (is_dst_phys_contig) + contig_dst_address = block_phys_page_copy_address(block, block_phys_page(dst_id, 0), copying_gpu); + } + else if ((page_index != last_index + 1) || contig_cause != page_cause) { + uvm_va_block_region_t contig_region = uvm_va_block_region(contig_start_index, last_index + 1); + size_t contig_region_size = uvm_va_block_region_size(contig_region); + UVM_ASSERT(uvm_va_block_region_contains_region(region, contig_region)); + + // If both src and dst are physically-contiguous, consolidate copies + // of contiguous pages into a single method. + if (is_src_phys_contig && is_dst_phys_contig) { + uvm_gpu_address_t src_address = contig_src_address; + uvm_gpu_address_t dst_address = contig_dst_address; + + src_address.address += contig_start_index * PAGE_SIZE; + dst_address.address += contig_start_index * PAGE_SIZE; + + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + + + + + + copying_gpu->parent->ce_hal->memcopy(&push, dst_address, src_address, contig_region_size); + } + + uvm_perf_event_notify_migration(&va_space->perf_events, + &push, + block, + dst_id, + src_id, + uvm_va_block_region_start(block, contig_region), + contig_region_size, + block_transfer_mode, + contig_cause, + &block_context->make_resident); + + contig_start_index = page_index; + contig_cause = page_cause; + } + + if (is_src_phys_contig) + UVM_ASSERT(block_phys_copy_contig_check(block, page_index, &contig_src_address, src_id, copying_gpu)); + if (is_dst_phys_contig) + UVM_ASSERT(block_phys_copy_contig_check(block, page_index, &contig_dst_address, dst_id, copying_gpu)); + + if (!is_src_phys_contig || !is_dst_phys_contig) { + uvm_gpu_address_t src_address; + uvm_gpu_address_t dst_address; + + if (is_src_phys_contig) { + src_address = contig_src_address; + src_address.address += page_index * PAGE_SIZE; + } + else { + src_address = block_phys_page_copy_address(block, block_phys_page(src_id, page_index), copying_gpu); + } + + if (is_dst_phys_contig) { + dst_address = contig_dst_address; + dst_address.address += page_index * PAGE_SIZE; + } + else { + dst_address = block_phys_page_copy_address(block, block_phys_page(dst_id, page_index), copying_gpu); + } + + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + copying_gpu->parent->ce_hal->memcopy(&push, dst_address, src_address, PAGE_SIZE); + } + + last_index = page_index; + } + + // Copy the remaining pages + if (copying_gpu) { + uvm_va_block_region_t contig_region = uvm_va_block_region(contig_start_index, last_index + 1); + size_t contig_region_size = uvm_va_block_region_size(contig_region); + UVM_ASSERT(uvm_va_block_region_contains_region(region, contig_region)); + + if (is_src_phys_contig && is_dst_phys_contig) { + uvm_gpu_address_t src_address = contig_src_address; + uvm_gpu_address_t dst_address = contig_dst_address; + + src_address.address += contig_start_index * PAGE_SIZE; + dst_address.address += contig_start_index * PAGE_SIZE; + + uvm_push_set_flag(&push, UVM_PUSH_FLAG_NEXT_MEMBAR_NONE); + copying_gpu->parent->ce_hal->memcopy(&push, dst_address, src_address, contig_region_size); + } + + uvm_perf_event_notify_migration(&va_space->perf_events, + &push, + block, + dst_id, + src_id, + uvm_va_block_region_start(block, contig_region), + contig_region_size, + block_transfer_mode, + contig_cause, + &block_context->make_resident); + + // TODO: Bug 1766424: If the destination is a GPU and the copy was done + // by that GPU, use a GPU-local membar if no peer can currently + // map this page. When peer access gets enabled, do a MEMBAR_SYS + // at that point. + uvm_push_end(&push); + tracker_status = uvm_tracker_add_push_safe(copy_tracker, &push); + if (status == NV_OK) + status = tracker_status; + } + + // Update VA block status bits + // + // Only update the bits for the pages that succeded + if (status != NV_OK) + uvm_page_mask_region_clear(copy_mask, uvm_va_block_region(page_index, PAGES_PER_UVM_VA_BLOCK)); + + *copied_pages = uvm_page_mask_weight(copy_mask); + + if (*copied_pages) { + uvm_page_mask_or(migrated_pages, migrated_pages, copy_mask); + + uvm_page_mask_or(dst_resident_mask, dst_resident_mask, copy_mask); + block_set_resident_processor(block, dst_id); + + if (transfer_mode == BLOCK_TRANSFER_MODE_INTERNAL_MOVE_FROM_STAGE) { + // Check whether there are any resident pages left on src + if (!uvm_page_mask_andnot(src_resident_mask, src_resident_mask, copy_mask)) + block_clear_resident_processor(block, src_id); + } + + // If we are staging the copy due to read duplication, we keep the copy there + if (transfer_mode == BLOCK_TRANSFER_MODE_INTERNAL_COPY || + transfer_mode == BLOCK_TRANSFER_MODE_INTERNAL_COPY_TO_STAGE) + uvm_page_mask_or(&block->read_duplicated_pages, &block->read_duplicated_pages, copy_mask); + + if (transfer_mode == BLOCK_TRANSFER_MODE_INTERNAL_COPY_FROM_STAGE) + UVM_ASSERT(uvm_page_mask_subset(copy_mask, &block->read_duplicated_pages)); + + // Any move operation implies that mappings have been removed from all + // non-UVM-Lite GPUs + if (transfer_mode == BLOCK_TRANSFER_MODE_INTERNAL_MOVE || + transfer_mode == BLOCK_TRANSFER_MODE_INTERNAL_MOVE_TO_STAGE) + uvm_page_mask_andnot(&block->maybe_mapped_pages, &block->maybe_mapped_pages, copy_mask); + + // Record ReadDuplicate events here, after the residency bits have been + // updated + if (block_transfer_mode == UVM_VA_BLOCK_TRANSFER_MODE_COPY) + uvm_tools_record_read_duplicate(block, dst_id, region, copy_mask); + + // If we are migrating due to an eviction, set the GPU as evicted and + // mark the evicted pages. If we are migrating away from the CPU this + // means that those pages are not evicted. + if (cause == UVM_MAKE_RESIDENT_CAUSE_EVICTION) { + uvm_va_block_gpu_state_t *src_gpu_state = uvm_va_block_gpu_state_get(block, src_id); + UVM_ASSERT(src_gpu_state); + UVM_ASSERT(UVM_ID_IS_CPU(dst_id)); + + uvm_page_mask_or(&src_gpu_state->evicted, &src_gpu_state->evicted, copy_mask); + uvm_processor_mask_set(&block->evicted_gpus, src_id); + } + else if (UVM_ID_IS_GPU(dst_id) && uvm_processor_mask_test(&block->evicted_gpus, dst_id)) { + uvm_va_block_gpu_state_t *dst_gpu_state = uvm_va_block_gpu_state_get(block, dst_id); + UVM_ASSERT(dst_gpu_state); + + if (!uvm_page_mask_andnot(&dst_gpu_state->evicted, &dst_gpu_state->evicted, copy_mask)) + uvm_processor_mask_clear(&block->evicted_gpus, dst_id); + } + } + + return status; +} + +// Copy resident pages to the destination from all source processors in the +// src_processor_mask +// +// The function adds the pages that were successfully copied to the output +// migrated_pages mask and returns the number of pages in copied_pages. These +// fields are reliable even if an error is returned. +static NV_STATUS block_copy_resident_pages_mask(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_processor_id_t dst_id, + const uvm_processor_mask_t *src_processor_mask, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + const uvm_page_mask_t *prefetch_page_mask, + block_transfer_mode_internal_t transfer_mode, + NvU32 max_pages_to_copy, + uvm_page_mask_t *migrated_pages, + NvU32 *copied_pages_out, + uvm_tracker_t *tracker_out) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + uvm_processor_id_t src_id; + uvm_processor_mask_t search_mask; + + uvm_processor_mask_copy(&search_mask, src_processor_mask); + + *copied_pages_out = 0; + + for_each_closest_id(src_id, &search_mask, dst_id, va_space) { + NV_STATUS status; + NvU32 copied_pages_from_src; + + UVM_ASSERT(!uvm_id_equal(src_id, dst_id)); + + status = block_copy_resident_pages_between(block, + block_context, + dst_id, + src_id, + region, + page_mask, + prefetch_page_mask, + transfer_mode, + migrated_pages, + &copied_pages_from_src, + tracker_out); + *copied_pages_out += copied_pages_from_src; + UVM_ASSERT(*copied_pages_out <= max_pages_to_copy); + + if (status != NV_OK) + return status; + + // Break out once we copied max pages already + if (*copied_pages_out == max_pages_to_copy) + break; + } + + return NV_OK; +} + +static void break_read_duplication_in_region(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_processor_id_t dst_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask) +{ + uvm_processor_id_t id; + uvm_page_mask_t *break_pages_in_region = &block_context->scratch_page_mask; + + uvm_page_mask_init_from_region(break_pages_in_region, region, page_mask); + + UVM_ASSERT(uvm_page_mask_subset(break_pages_in_region, uvm_va_block_resident_mask_get(block, dst_id))); + + // Clear read_duplicated bit for all pages in region + uvm_page_mask_andnot(&block->read_duplicated_pages, &block->read_duplicated_pages, break_pages_in_region); + + // Clear residency bits for all processors other than dst_id + for_each_id_in_mask(id, &block->resident) { + uvm_page_mask_t *other_resident_mask; + + if (uvm_id_equal(id, dst_id)) + continue; + + other_resident_mask = uvm_va_block_resident_mask_get(block, id); + + if (!uvm_page_mask_andnot(other_resident_mask, other_resident_mask, break_pages_in_region)) + block_clear_resident_processor(block, id); + } +} + +static void block_copy_set_first_touch_residency(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_processor_id_t dst_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask) +{ + uvm_page_index_t page_index; + uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(block, dst_id); + uvm_page_mask_t *first_touch_mask = &block_context->make_resident.page_mask; + + if (page_mask) + uvm_page_mask_andnot(first_touch_mask, page_mask, resident_mask); + else + uvm_page_mask_complement(first_touch_mask, resident_mask); + + uvm_page_mask_region_clear_outside(first_touch_mask, region); + + for_each_va_block_page_in_mask(page_index, first_touch_mask, block) { + UVM_ASSERT(!block_is_page_resident_anywhere(block, page_index)); + UVM_ASSERT(block_processor_page_is_populated(block, dst_id, page_index)); + UVM_ASSERT(block_check_resident_proximity(block, page_index, dst_id)); + } + + uvm_page_mask_or(resident_mask, resident_mask, first_touch_mask); + if (!uvm_page_mask_empty(resident_mask)) + block_set_resident_processor(block, dst_id); + + // Add them to the output mask, too + uvm_page_mask_or(&block_context->make_resident.pages_changed_residency, + &block_context->make_resident.pages_changed_residency, + first_touch_mask); +} + +// Copy resident pages from other processors to the destination and mark any +// pages not resident anywhere as resident on the destination. +// All the pages on the destination need to be populated by the caller first. +// Pages not resident anywhere else need to be zeroed out as well. +// +// If UVM_VA_BLOCK_TRANSFER_MODE_COPY is passed, processors that already have a +// copy of the page will keep it. Conversely, if UVM_VA_BLOCK_TRANSFER_MODE_MOVE +// is passed, the page will no longer be resident in any processor other than dst_id. +static NV_STATUS block_copy_resident_pages(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_processor_id_t dst_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + const uvm_page_mask_t *prefetch_page_mask, + uvm_va_block_transfer_mode_t transfer_mode) +{ + NV_STATUS status = NV_OK; + NV_STATUS tracker_status; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(block, dst_id); + NvU32 missing_pages_count; + NvU32 pages_copied; + NvU32 pages_copied_to_cpu; + uvm_processor_mask_t src_processor_mask; + uvm_page_mask_t *copy_page_mask = &block_context->make_resident.page_mask; + uvm_page_mask_t *migrated_pages = &block_context->make_resident.pages_migrated; + uvm_page_mask_t *staged_pages = &block_context->make_resident.pages_staged; + block_transfer_mode_internal_t transfer_mode_internal; + + uvm_page_mask_zero(migrated_pages); + + if (page_mask) + uvm_page_mask_andnot(copy_page_mask, page_mask, resident_mask); + else + uvm_page_mask_complement(copy_page_mask, resident_mask); + + missing_pages_count = uvm_page_mask_region_weight(copy_page_mask, region); + + // If nothing needs to be copied, just check if we need to break + // read-duplication (i.e. transfer_mode is UVM_VA_BLOCK_TRANSFER_MODE_MOVE) + if (missing_pages_count == 0) + goto out; + + // TODO: Bug 1753731: Add P2P2P copies staged through a GPU + // TODO: Bug 1753731: When a page is resident in multiple locations due to + // read-duplication, spread out the source of the copy so we don't + // bottleneck on a single location. + + uvm_processor_mask_zero(&src_processor_mask); + + if (!uvm_id_equal(dst_id, UVM_ID_CPU)) { + // If the destination is a GPU, first move everything from processors + // with copy access supported. Notably this will move pages from the CPU + // as well even if later some extra copies from CPU are required for + // staged copies. + uvm_processor_mask_and(&src_processor_mask, block_get_can_copy_from_mask(block, dst_id), &block->resident); + uvm_processor_mask_clear(&src_processor_mask, dst_id); + + status = block_copy_resident_pages_mask(block, + block_context, + dst_id, + &src_processor_mask, + region, + copy_page_mask, + prefetch_page_mask, + transfer_mode == UVM_VA_BLOCK_TRANSFER_MODE_COPY? + BLOCK_TRANSFER_MODE_INTERNAL_COPY: + BLOCK_TRANSFER_MODE_INTERNAL_MOVE, + missing_pages_count, + migrated_pages, + &pages_copied, + &local_tracker); + + UVM_ASSERT(missing_pages_count >= pages_copied); + missing_pages_count -= pages_copied; + + if (status != NV_OK) + goto out; + + if (missing_pages_count == 0) + goto out; + + if (pages_copied) + uvm_page_mask_andnot(copy_page_mask, copy_page_mask, migrated_pages); + } + + // Now copy from everywhere else to the CPU. This is both for when the + // destination is the CPU (src_processor_mask empty) and for a staged copy + // (src_processor_mask containing processors with copy access to dst_id). + uvm_processor_mask_andnot(&src_processor_mask, &block->resident, &src_processor_mask); + uvm_processor_mask_clear(&src_processor_mask, dst_id); + uvm_processor_mask_clear(&src_processor_mask, UVM_ID_CPU); + + uvm_page_mask_zero(staged_pages); + + if (UVM_ID_IS_CPU(dst_id)) { + transfer_mode_internal = transfer_mode == UVM_VA_BLOCK_TRANSFER_MODE_COPY? + BLOCK_TRANSFER_MODE_INTERNAL_COPY: + BLOCK_TRANSFER_MODE_INTERNAL_MOVE; + } + else { + transfer_mode_internal = transfer_mode == UVM_VA_BLOCK_TRANSFER_MODE_COPY? + BLOCK_TRANSFER_MODE_INTERNAL_COPY_TO_STAGE: + BLOCK_TRANSFER_MODE_INTERNAL_MOVE_TO_STAGE; + } + + status = block_copy_resident_pages_mask(block, + block_context, + UVM_ID_CPU, + &src_processor_mask, + region, + copy_page_mask, + prefetch_page_mask, + transfer_mode_internal, + missing_pages_count, + staged_pages, + &pages_copied_to_cpu, + &local_tracker); + if (status != NV_OK) + goto out; + + // If destination is the CPU then we copied everything there above + if (UVM_ID_IS_CPU(dst_id)) { + uvm_page_mask_or(migrated_pages, migrated_pages, staged_pages); + missing_pages_count -= pages_copied_to_cpu; + + goto out; + } + + // Add everything to the block's tracker so that the + // block_copy_resident_pages_between() call below will acquire it. + status = uvm_tracker_add_tracker_safe(&block->tracker, &local_tracker); + if (status != NV_OK) + goto out; + uvm_tracker_clear(&local_tracker); + + // Now copy staged pages from the CPU to the destination. + status = block_copy_resident_pages_between(block, + block_context, + dst_id, + UVM_ID_CPU, + region, + staged_pages, + prefetch_page_mask, + transfer_mode == UVM_VA_BLOCK_TRANSFER_MODE_COPY? + BLOCK_TRANSFER_MODE_INTERNAL_COPY_FROM_STAGE: + BLOCK_TRANSFER_MODE_INTERNAL_MOVE_FROM_STAGE, + migrated_pages, + &pages_copied, + &local_tracker); + + UVM_ASSERT(missing_pages_count >= pages_copied); + missing_pages_count -= pages_copied; + + if (status != NV_OK) + goto out; + + // If we get here, that means we were staging the copy through the CPU and + // we should copy as many pages from the CPU as we copied to the CPU. + UVM_ASSERT(pages_copied == pages_copied_to_cpu); + +out: + // Pages that weren't resident anywhere else were populated at the + // destination directly. Mark them as resident now. We only do it if there + // have been no errors because we cannot identify which pages failed. + if (status == NV_OK && missing_pages_count > 0) + block_copy_set_first_touch_residency(block, block_context, dst_id, region, page_mask); + + // Break read duplication + if (transfer_mode == UVM_VA_BLOCK_TRANSFER_MODE_MOVE) { + const uvm_page_mask_t *break_read_duplication_mask; + + if (status == NV_OK) { + break_read_duplication_mask = page_mask; + } + else { + // We reuse this mask since copy_page_mask is no longer used in the + // function + + if (page_mask) + uvm_page_mask_and(&block_context->make_resident.page_mask, resident_mask, page_mask); + else + uvm_page_mask_copy(&block_context->make_resident.page_mask, resident_mask); + + break_read_duplication_mask = &block_context->make_resident.page_mask; + } + break_read_duplication_in_region(block, block_context, dst_id, region, break_read_duplication_mask); + } + + // Accumulate the pages that migrated into the output mask + uvm_page_mask_or(&block_context->make_resident.pages_changed_residency, + &block_context->make_resident.pages_changed_residency, + migrated_pages); + + // Add everything from the local tracker to the block's tracker. + // Notably this is also needed for handling block_copy_resident_pages_between() + // failures in the first loop. + tracker_status = uvm_tracker_add_tracker_safe(&block->tracker, &local_tracker); + uvm_tracker_deinit(&local_tracker); + + return status == NV_OK ? tracker_status : status; +} + +NV_STATUS uvm_va_block_make_resident(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t dest_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + const uvm_page_mask_t *prefetch_page_mask, + uvm_make_resident_cause_t cause) +{ + NV_STATUS status; + uvm_processor_mask_t unmap_processor_mask; + uvm_page_mask_t *unmap_page_mask = &va_block_context->make_resident.page_mask; + uvm_page_mask_t *resident_mask; + + va_block_context->make_resident.dest_id = dest_id; + va_block_context->make_resident.cause = cause; + + if (prefetch_page_mask) { + UVM_ASSERT(cause == UVM_MAKE_RESIDENT_CAUSE_REPLAYABLE_FAULT || + cause == UVM_MAKE_RESIDENT_CAUSE_NON_REPLAYABLE_FAULT || + cause == UVM_MAKE_RESIDENT_CAUSE_ACCESS_COUNTER); + } + + uvm_assert_mutex_locked(&va_block->lock); + UVM_ASSERT(uvm_va_block_is_hmm(va_block) || va_block->va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + + resident_mask = block_resident_mask_get_alloc(va_block, dest_id); + if (!resident_mask) + return NV_ERR_NO_MEMORY; + + // Unmap all mapped processors except for UVM-Lite GPUs as their mappings + // are largely persistent. + uvm_processor_mask_andnot(&unmap_processor_mask, &va_block->mapped, block_get_uvm_lite_gpus(va_block)); + + if (page_mask) + uvm_page_mask_andnot(unmap_page_mask, page_mask, resident_mask); + else + uvm_page_mask_complement(unmap_page_mask, resident_mask); + + // Unmap all pages not resident on the destination + status = uvm_va_block_unmap_mask(va_block, va_block_context, &unmap_processor_mask, region, unmap_page_mask); + if (status != NV_OK) + return status; + + if (page_mask) + uvm_page_mask_and(unmap_page_mask, page_mask, &va_block->read_duplicated_pages); + else + uvm_page_mask_init_from_region(unmap_page_mask, region, &va_block->read_duplicated_pages); + + // Also unmap read-duplicated pages excluding dest_id + uvm_processor_mask_clear(&unmap_processor_mask, dest_id); + status = uvm_va_block_unmap_mask(va_block, va_block_context, &unmap_processor_mask, region, unmap_page_mask); + if (status != NV_OK) + return status; + + uvm_tools_record_read_duplicate_invalidate(va_block, + dest_id, + region, + unmap_page_mask); + + // Note that block_populate_pages and block_move_resident_pages also use + // va_block_context->make_resident.page_mask. + unmap_page_mask = NULL; + + status = block_populate_pages(va_block, va_block_retry, va_block_context, dest_id, region, page_mask); + if (status != NV_OK) + return status; + + status = block_copy_resident_pages(va_block, + va_block_context, + dest_id, + region, + page_mask, + prefetch_page_mask, + UVM_VA_BLOCK_TRANSFER_MODE_MOVE); + if (status != NV_OK) + return status; + + // Update eviction heuristics, if needed. Notably this could repeat the call + // done in block_set_resident_processor(), but that doesn't do anything bad + // and it's simpler to keep it in both places. + // + // Skip this if we didn't do anything (the input region and/or page mask was + // empty). + if (uvm_processor_mask_test(&va_block->resident, dest_id)) + block_mark_memory_used(va_block, dest_id); + + return NV_OK; +} + +// Combination function which prepares the input {region, page_mask} for +// entering read-duplication. It: +// - Unmaps all processors but revoke_id +// - Revokes write access from revoke_id +static NV_STATUS block_prep_read_duplicate_mapping(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t revoke_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask) +{ + uvm_processor_mask_t unmap_processor_mask; + uvm_processor_id_t unmap_id; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + NV_STATUS status, tracker_status; + + // Unmap everybody except revoke_id + uvm_processor_mask_andnot(&unmap_processor_mask, &va_block->mapped, block_get_uvm_lite_gpus(va_block)); + uvm_processor_mask_clear(&unmap_processor_mask, revoke_id); + + for_each_id_in_mask(unmap_id, &unmap_processor_mask) { + status = uvm_va_block_unmap(va_block, + va_block_context, + unmap_id, + region, + page_mask, + &local_tracker); + if (status != NV_OK) + goto out; + } + + // Revoke WRITE/ATOMIC access permissions from the remaining mapped + // processor. + status = uvm_va_block_revoke_prot(va_block, + va_block_context, + revoke_id, + region, + page_mask, + UVM_PROT_READ_WRITE, + &local_tracker); + if (status != NV_OK) + goto out; + +out: + tracker_status = uvm_tracker_add_tracker_safe(&va_block->tracker, &local_tracker); + uvm_tracker_deinit(&local_tracker); + return status == NV_OK ? tracker_status : status; +} + +NV_STATUS uvm_va_block_make_resident_read_duplicate(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t dest_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + const uvm_page_mask_t *prefetch_page_mask, + uvm_make_resident_cause_t cause) +{ + NV_STATUS status = NV_OK; + uvm_processor_id_t src_id; + + va_block_context->make_resident.dest_id = dest_id; + va_block_context->make_resident.cause = cause; + + if (prefetch_page_mask) { + // TODO: Bug 1877578: investigate automatic read-duplicate policies + UVM_ASSERT(cause == UVM_MAKE_RESIDENT_CAUSE_REPLAYABLE_FAULT || + cause == UVM_MAKE_RESIDENT_CAUSE_NON_REPLAYABLE_FAULT || + cause == UVM_MAKE_RESIDENT_CAUSE_ACCESS_COUNTER); + } + + uvm_assert_mutex_locked(&va_block->lock); + UVM_ASSERT(!uvm_va_block_is_dead(va_block)); + + // For pages that are entering read-duplication we need to unmap remote + // mappings and revoke RW and higher access permissions. + // + // The current implementation: + // - Unmaps pages from all processors but the one with the resident copy + // - Revokes write access from the processor with the resident copy + for_each_id_in_mask(src_id, &va_block->resident) { + // Note that the below calls to block_populate_pages and + // block_move_resident_pages also use + // va_block_context->make_resident.page_mask. + uvm_page_mask_t *preprocess_page_mask = &va_block_context->make_resident.page_mask; + const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, src_id); + UVM_ASSERT(!uvm_page_mask_empty(resident_mask)); + + if (page_mask) + uvm_page_mask_andnot(preprocess_page_mask, page_mask, &va_block->read_duplicated_pages); + else + uvm_page_mask_complement(preprocess_page_mask, &va_block->read_duplicated_pages); + + // If there are no pages that need to be unmapped/revoked, skip to the + // next processor + if (!uvm_page_mask_and(preprocess_page_mask, preprocess_page_mask, resident_mask)) + continue; + + status = block_prep_read_duplicate_mapping(va_block, va_block_context, src_id, region, preprocess_page_mask); + if (status != NV_OK) + return status; + } + + status = block_populate_pages(va_block, va_block_retry, va_block_context, dest_id, region, page_mask); + if (status != NV_OK) + return status; + + status = block_copy_resident_pages(va_block, + va_block_context, + dest_id, + region, + page_mask, + prefetch_page_mask, + UVM_VA_BLOCK_TRANSFER_MODE_COPY); + if (status != NV_OK) + return status; + + // Update eviction heuristics, if needed. Notably this could repeat the call + // done in block_set_resident_processor(), but that doesn't do anything bad + // and it's simpler to keep it in both places. + // + // Skip this if we didn't do anything (the input region and/or page mask was + // empty). + if (uvm_processor_mask_test(&va_block->resident, dest_id)) + block_mark_memory_used(va_block, dest_id); + + return NV_OK; +} + +// Looks up the current CPU mapping state of page from the +// block->cpu.pte_bits bitmaps. If write access is enabled, +// UVM_PROT_READ_WRITE_ATOMIC is returned instead of UVM_PROT_READ_WRITE, since +// write access implies atomic access for CPUs. +static uvm_prot_t block_page_prot_cpu(uvm_va_block_t *block, uvm_page_index_t page_index) +{ + uvm_prot_t prot; + + UVM_ASSERT(!uvm_va_block_is_dead(block)); + + if (uvm_page_mask_test(&block->cpu.pte_bits[UVM_PTE_BITS_CPU_WRITE], page_index)) + prot = UVM_PROT_READ_WRITE_ATOMIC; + else if (uvm_page_mask_test(&block->cpu.pte_bits[UVM_PTE_BITS_CPU_READ], page_index)) + prot = UVM_PROT_READ_ONLY; + else + prot = UVM_PROT_NONE; + + return prot; +} + +// Looks up the current GPU mapping state of page from the +// block->gpus[i]->pte_bits bitmaps. +static uvm_prot_t block_page_prot_gpu(uvm_va_block_t *block, uvm_gpu_t *gpu, uvm_page_index_t page_index) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_prot_t prot; + + UVM_ASSERT(!uvm_va_block_is_dead(block)); + + if (!gpu_state) + return UVM_PROT_NONE; + + if (uvm_page_mask_test(&gpu_state->pte_bits[UVM_PTE_BITS_GPU_ATOMIC], page_index)) + prot = UVM_PROT_READ_WRITE_ATOMIC; + else if (uvm_page_mask_test(&gpu_state->pte_bits[UVM_PTE_BITS_GPU_WRITE], page_index)) + prot = UVM_PROT_READ_WRITE; + else if (uvm_page_mask_test(&gpu_state->pte_bits[UVM_PTE_BITS_GPU_READ], page_index)) + prot = UVM_PROT_READ_ONLY; + else + prot = UVM_PROT_NONE; + + return prot; +} + +static uvm_prot_t block_page_prot(uvm_va_block_t *block, uvm_processor_id_t id, uvm_page_index_t page_index) +{ + if (UVM_ID_IS_CPU(id)) + return block_page_prot_cpu(block, page_index); + else + return block_page_prot_gpu(block, block_get_gpu(block, id), page_index); +} + +// Returns true if the block has any valid CPU PTE mapping in the block region. +static bool block_has_valid_mapping_cpu(uvm_va_block_t *block, uvm_va_block_region_t region) +{ + size_t valid_page; + + UVM_ASSERT(region.outer <= uvm_va_block_num_cpu_pages(block)); + + // Early-out: check whether any address in this block has a CPU mapping + if (!uvm_processor_mask_test(&block->mapped, UVM_ID_CPU)) { + UVM_ASSERT(uvm_page_mask_empty(&block->cpu.pte_bits[UVM_PTE_BITS_CPU_READ])); + UVM_ASSERT(uvm_page_mask_empty(&block->cpu.pte_bits[UVM_PTE_BITS_CPU_WRITE])); + return false; + } + + // All valid mappings have at least read permissions so we only need to + // inspect the read bits. + valid_page = uvm_va_block_first_page_in_mask(region, &block->cpu.pte_bits[UVM_PTE_BITS_CPU_READ]); + if (valid_page == region.outer) + return false; + + UVM_ASSERT(block_page_prot_cpu(block, valid_page) != UVM_PROT_NONE); + return true; +} + +static bool block_check_chunk_indirect_peers(uvm_va_block_t *block, uvm_gpu_t *gpu, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_t *accessing_gpu; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + if (!uvm_pmm_sysmem_mappings_indirect_supported()) + return true; + + for_each_va_space_gpu_in_mask(accessing_gpu, va_space, &va_space->indirect_peers[uvm_id_value(gpu->id)]) { + NvU64 peer_addr = uvm_pmm_gpu_indirect_peer_addr(&gpu->pmm, chunk, accessing_gpu); + uvm_reverse_map_t reverse_map; + size_t num_mappings; + + num_mappings = uvm_pmm_sysmem_mappings_dma_to_virt(&accessing_gpu->pmm_reverse_sysmem_mappings, + peer_addr, + uvm_gpu_chunk_get_size(chunk), + &reverse_map, + 1); + UVM_ASSERT(num_mappings == 1); + UVM_ASSERT(reverse_map.va_block == block); + UVM_ASSERT(reverse_map.region.first == chunk->va_block_page_index); + UVM_ASSERT(uvm_va_block_region_size(reverse_map.region) == uvm_gpu_chunk_get_size(chunk)); + + uvm_va_block_release_no_destroy(reverse_map.va_block); + } + + return true; +} + +// Sanity check the given GPU's chunks array +static bool block_check_chunks(uvm_va_block_t *block, uvm_gpu_id_t id) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, id); + uvm_gpu_t *gpu; + size_t i, num_chunks; + uvm_page_index_t page_index; + uvm_chunk_size_t chunk_size; + + if (!gpu_state) + return true; + + gpu = block_get_gpu(block, id); + + num_chunks = block_num_gpu_chunks(block, gpu); + for (page_index = 0, i = 0; i < num_chunks; i++) { + uvm_gpu_chunk_t *chunk = gpu_state->chunks[i]; + size_t chunk_index = block_gpu_chunk_index(block, gpu, page_index, &chunk_size); + + if (chunk_index != i) { + UVM_ERR_PRINT("chunk index mismatch: calculated %zu, is in %zu. VA block [0x%llx, 0x%llx) GPU %u page_index: %u\n", + chunk_index, + i, + block->start, + block->end + 1, + uvm_id_value(id), + page_index); + return false; + } + + if (chunk) { + if (chunk_size != uvm_gpu_chunk_get_size(chunk)) { + UVM_ERR_PRINT("chunk size mismatch: calc %u, actual %u. VA block [0x%llx, 0x%llx) GPU: %u page_index: %u chunk index: %zu\n", + chunk_size, + uvm_gpu_chunk_get_size(chunk), + block->start, + block->end + 1, + uvm_id_value(id), + page_index, + i); + return false; + } + + if (chunk->state != UVM_PMM_GPU_CHUNK_STATE_ALLOCATED) { + UVM_ERR_PRINT("Invalid chunk state %s. VA block [0x%llx, 0x%llx) GPU: %u page_index: %u chunk index: %zu chunk_size: %u\n", + uvm_pmm_gpu_chunk_state_string(chunk->state), + block->start, + block->end + 1, + uvm_id_value(id), + page_index, + i, + chunk_size); + return false; + } + + UVM_ASSERT(chunk->va_block == block); + UVM_ASSERT(chunk->va_block_page_index == page_index); + + UVM_ASSERT(block_check_chunk_indirect_peers(block, gpu, chunk)); + } + + page_index += chunk_size / PAGE_SIZE; + } + + return true; +} + +// Sanity checks for page mappings +static bool block_check_mappings_page(uvm_va_block_t *block, uvm_page_index_t page_index) +{ + uvm_processor_mask_t atomic_mappings, write_mappings, read_mappings; + uvm_processor_mask_t lite_read_mappings, lite_atomic_mappings; + uvm_processor_mask_t remaining_mappings, temp_mappings; + uvm_processor_mask_t resident_processors; + const uvm_processor_mask_t *residency_accessible_from = NULL; + const uvm_processor_mask_t *residency_has_native_atomics = NULL; + uvm_processor_id_t residency, id; + uvm_va_range_t *va_range = block->va_range; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + uvm_processor_id_t preferred_location = va_range ? + uvm_va_range_get_policy(va_range)->preferred_location : + UVM_ID_INVALID; + const uvm_processor_mask_t *uvm_lite_gpus = block_get_uvm_lite_gpus(block); + + uvm_va_block_page_authorized_processors(block, page_index, UVM_PROT_READ_WRITE_ATOMIC, + &atomic_mappings); + uvm_va_block_page_authorized_processors(block, page_index, UVM_PROT_READ_WRITE, + &write_mappings); + uvm_va_block_page_authorized_processors(block, page_index, UVM_PROT_READ_ONLY, + &read_mappings); + + // Each access bit implies all accesses below it + UVM_ASSERT(uvm_processor_mask_subset(&atomic_mappings, &write_mappings)); + UVM_ASSERT(uvm_processor_mask_subset(&write_mappings, &read_mappings)); + UVM_ASSERT(uvm_processor_mask_subset(&read_mappings, &block->mapped)); + + uvm_va_block_page_resident_processors(block, page_index, &resident_processors); + UVM_ASSERT(uvm_processor_mask_subset(&resident_processors, &block->resident)); + + // Sanity check block_get_mapped_processors + uvm_processor_mask_copy(&remaining_mappings, &read_mappings); + for_each_id_in_mask(residency, &resident_processors) { + block_get_mapped_processors(block, residency, page_index, &temp_mappings); + UVM_ASSERT(uvm_processor_mask_subset(&temp_mappings, &remaining_mappings)); + uvm_processor_mask_andnot(&remaining_mappings, &remaining_mappings, &temp_mappings); + } + + // Any remaining mappings point to non-resident locations, so they must be + // UVM-Lite mappings. + UVM_ASSERT(uvm_processor_mask_subset(&remaining_mappings, uvm_lite_gpus)); + + residency = uvm_processor_mask_find_first_id(&resident_processors); + + if (uvm_processor_mask_get_count(&resident_processors) > 0) { + residency_accessible_from = &va_space->accessible_from[uvm_id_value(residency)]; + residency_has_native_atomics = &va_space->has_native_atomics[uvm_id_value(residency)]; + } + + // If the page is not resident, there should be no valid mappings + UVM_ASSERT_MSG(uvm_processor_mask_get_count(&resident_processors) > 0 || + uvm_processor_mask_get_count(&read_mappings) == 0, + "Resident: 0x%lx - Mappings R: 0x%lx W: 0x%lx A: 0x%lx - SWA: 0x%lx - RD: 0x%lx\n", + *resident_processors.bitmap, + *read_mappings.bitmap, *write_mappings.bitmap, *atomic_mappings.bitmap, + *va_space->system_wide_atomics_enabled_processors.bitmap, + *block->read_duplicated_pages.bitmap); + + // Test read_duplicated_pages mask + UVM_ASSERT_MSG((uvm_processor_mask_get_count(&resident_processors) <= 1 && + !uvm_page_mask_test(&block->read_duplicated_pages, page_index)) || + (uvm_processor_mask_get_count(&resident_processors) > 1 && + uvm_page_mask_test(&block->read_duplicated_pages, page_index)), + "Resident: 0x%lx - Mappings R: 0x%lx W: 0x%lx A: 0x%lx - SWA: 0x%lx - RD: 0x%lx\n", + *resident_processors.bitmap, + *read_mappings.bitmap, *write_mappings.bitmap, *atomic_mappings.bitmap, + *va_space->system_wide_atomics_enabled_processors.bitmap, + *block->read_duplicated_pages.bitmap); + + if (!uvm_processor_mask_empty(uvm_lite_gpus)) + UVM_ASSERT(UVM_ID_IS_VALID(preferred_location)); + + // UVM-Lite checks. Since the range group is made non-migratable before the + // actual migrations for that range group happen, we can only make those + // checks which are valid on both migratable and non-migratable range + // groups. + uvm_processor_mask_and(&lite_read_mappings, &read_mappings, uvm_lite_gpus); + uvm_processor_mask_and(&lite_atomic_mappings, &atomic_mappings, uvm_lite_gpus); + + // Any mapping from a UVM-Lite GPU must be atomic... + UVM_ASSERT(uvm_processor_mask_equal(&lite_read_mappings, &lite_atomic_mappings)); + + // ... and must have access to preferred_location + if (UVM_ID_IS_VALID(preferred_location)) { + const uvm_processor_mask_t *preferred_location_accessible_from; + + preferred_location_accessible_from = &va_space->accessible_from[uvm_id_value(preferred_location)]; + UVM_ASSERT(uvm_processor_mask_subset(&lite_atomic_mappings, preferred_location_accessible_from)); + } + + for_each_id_in_mask(id, &lite_atomic_mappings) + UVM_ASSERT(uvm_processor_mask_test(&va_space->can_access[uvm_id_value(id)], preferred_location)); + + // Exclude uvm_lite_gpus from mappings' masks after UVM-Lite tests + uvm_processor_mask_andnot(&read_mappings, &read_mappings, uvm_lite_gpus); + uvm_processor_mask_andnot(&write_mappings, &write_mappings, uvm_lite_gpus); + uvm_processor_mask_andnot(&atomic_mappings, &atomic_mappings, uvm_lite_gpus); + + // Pages set to zero in maybe_mapped_pages must not be mapped on any + // non-UVM-Lite GPU + if (!uvm_page_mask_test(&block->maybe_mapped_pages, page_index)) { + UVM_ASSERT_MSG(uvm_processor_mask_get_count(&read_mappings) == 0, + "Resident: 0x%lx - Mappings Block: 0x%lx / Page R: 0x%lx W: 0x%lx A: 0x%lx\n", + *resident_processors.bitmap, + *block->mapped.bitmap, + *read_mappings.bitmap, *write_mappings.bitmap, *atomic_mappings.bitmap); + } + + // atomic mappings from GPUs with disabled system-wide atomics are treated + // as write mappings. Therefore, we remove them from the atomic mappings mask + uvm_processor_mask_and(&atomic_mappings, &atomic_mappings, &va_space->system_wide_atomics_enabled_processors); + + if (!uvm_processor_mask_empty(&read_mappings)) { + // Read-duplicate: if a page is resident in multiple locations, it + // must be resident locally on each mapped processor. + if (uvm_processor_mask_get_count(&resident_processors) > 1) { + UVM_ASSERT_MSG(uvm_processor_mask_subset(&read_mappings, &resident_processors), + "Read-duplicate copies from remote processors\n" + "Resident: 0x%lx - Mappings R: 0x%lx W: 0x%lx A: 0x%lx - SWA: 0x%lx - RD: 0x%lx\n", + *resident_processors.bitmap, + *read_mappings.bitmap, *write_mappings.bitmap, *atomic_mappings.bitmap, + *va_space->system_wide_atomics_enabled_processors.bitmap, + *block->read_duplicated_pages.bitmap); + } + else { + // Processors with mappings must have access to the processor that + // has the valid copy + UVM_ASSERT_MSG(uvm_processor_mask_subset(&read_mappings, residency_accessible_from), + "Not all processors have access to %s\n", + "Resident: 0x%lx - Mappings R: 0x%lx W: 0x%lx A: 0x%lx -" + "Access: 0x%lx - Native Atomics: 0x%lx - SWA: 0x%lx\n", + uvm_va_space_processor_name(va_space, residency), + *resident_processors.bitmap, + *read_mappings.bitmap, + *write_mappings.bitmap, + *atomic_mappings.bitmap, + *residency_accessible_from->bitmap, + *residency_has_native_atomics->bitmap, + *va_space->system_wide_atomics_enabled_processors.bitmap); + for_each_id_in_mask(id, &read_mappings) { + UVM_ASSERT(uvm_processor_mask_test(&va_space->can_access[uvm_id_value(id)], residency)); + + if (uvm_processor_mask_test(&va_space->indirect_peers[uvm_id_value(residency)], id)) { + uvm_gpu_t *resident_gpu = uvm_va_space_get_gpu(va_space, residency); + uvm_gpu_t *mapped_gpu = uvm_va_space_get_gpu(va_space, id); + uvm_gpu_chunk_t *chunk = block_phys_page_chunk(block, block_phys_page(residency, page_index), NULL); + + // This function will assert if no mapping exists + (void)uvm_pmm_gpu_indirect_peer_addr(&resident_gpu->pmm, chunk, mapped_gpu); + } + } + } + } + + // If any processor has a writable mapping, there must only be one copy of + // the page in the system + if (!uvm_processor_mask_empty(&write_mappings)) { + UVM_ASSERT_MSG(uvm_processor_mask_get_count(&resident_processors) == 1, + "Too many resident copies for pages with write_mappings\n" + "Resident: 0x%lx - Mappings R: 0x%lx W: 0x%lx A: 0x%lx - SWA: 0x%lx - RD: 0x%lx\n", + *resident_processors.bitmap, + *read_mappings.bitmap, + *write_mappings.bitmap, + *atomic_mappings.bitmap, + *va_space->system_wide_atomics_enabled_processors.bitmap, + *block->read_duplicated_pages.bitmap); + } + + if (!uvm_processor_mask_empty(&atomic_mappings)) { + uvm_processor_mask_t native_atomics; + + uvm_processor_mask_and(&native_atomics, &atomic_mappings, residency_has_native_atomics); + + if (uvm_processor_mask_empty(&native_atomics)) { + // No other faultable processor should be able to write + uvm_processor_mask_and(&write_mappings, &write_mappings, &va_space->faultable_processors); + + UVM_ASSERT_MSG(uvm_processor_mask_get_count(&write_mappings) == 1, + "Too many write mappings to %s from processors with non-native atomics\n" + "Resident: 0x%lx - Mappings R: 0x%lx W: 0x%lx A: 0x%lx -" + "Access: 0x%lx - Native Atomics: 0x%lx - SWA: 0x%lx\n", + uvm_va_space_processor_name(va_space, residency), + *resident_processors.bitmap, + *read_mappings.bitmap, + *write_mappings.bitmap, + *atomic_mappings.bitmap, + *residency_accessible_from->bitmap, + *residency_has_native_atomics->bitmap, + *va_space->system_wide_atomics_enabled_processors.bitmap); + + // Only one processor outside of the native group can have atomics enabled + UVM_ASSERT_MSG(uvm_processor_mask_get_count(&atomic_mappings) == 1, + "Too many atomics mappings to %s from processors with non-native atomics\n" + "Resident: 0x%lx - Mappings R: 0x%lx W: 0x%lx A: 0x%lx -" + "Access: 0x%lx - Native Atomics: 0x%lx - SWA: 0x%lx\n", + uvm_va_space_processor_name(va_space, residency), + *resident_processors.bitmap, + *read_mappings.bitmap, + *write_mappings.bitmap, + *atomic_mappings.bitmap, + *residency_accessible_from->bitmap, + *residency_has_native_atomics->bitmap, + *va_space->system_wide_atomics_enabled_processors.bitmap); + } + else { + uvm_processor_mask_t non_native_atomics; + + // One or more processors within the native group have atomics enabled. + // All processors outside of that group may have write but not atomic + // permissions. + uvm_processor_mask_andnot(&non_native_atomics, &atomic_mappings, residency_has_native_atomics); + + UVM_ASSERT_MSG(uvm_processor_mask_empty(&non_native_atomics), + "atomic mappings to %s from processors native and non-native\n" + "Resident: 0x%lx - Mappings R: 0x%lx W: 0x%lx A: 0x%lx -" + "Access: 0x%lx - Native Atomics: 0x%lx - SWA: 0x%lx\n", + uvm_va_space_processor_name(va_space, residency), + *resident_processors.bitmap, + *read_mappings.bitmap, + *write_mappings.bitmap, + *atomic_mappings.bitmap, + *residency_accessible_from->bitmap, + *residency_has_native_atomics->bitmap, + *va_space->system_wide_atomics_enabled_processors.bitmap); + } + } + + return true; +} + +static bool block_check_mappings_ptes(uvm_va_block_t *block, uvm_gpu_t *gpu) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_va_block_gpu_state_t *resident_gpu_state; + uvm_pte_bits_gpu_t pte_bit; + uvm_processor_id_t resident_id; + uvm_prot_t prot; + NvU32 big_page_size; + size_t num_big_pages, big_page_index; + uvm_va_block_region_t big_region, chunk_region; + uvm_gpu_chunk_t *chunk; + + if (!gpu_state->page_table_range_4k.table) + UVM_ASSERT(!gpu_state->activated_4k); + + if (!gpu_state->page_table_range_big.table) { + UVM_ASSERT(!gpu_state->initialized_big); + UVM_ASSERT(!gpu_state->activated_big); + } + + // It's only safe to check the PTE mappings if we have page tables. See + // uvm_va_block_get_gpu_va_space. + if (!block_gpu_has_page_tables(block, gpu)) { + UVM_ASSERT(!uvm_processor_mask_test(&block->mapped, gpu->id)); + return true; + } + + big_page_size = uvm_va_block_gpu_big_page_size(block, gpu); + num_big_pages = uvm_va_block_num_big_pages(block, big_page_size); + + if (block_gpu_supports_2m(block, gpu)) { + if (gpu_state->page_table_range_big.table || gpu_state->page_table_range_4k.table) { + // 2M blocks require the 2M entry to be allocated for the lower + // ranges to also be allocated. + UVM_ASSERT(gpu_state->page_table_range_2m.table); + } + else if (gpu_state->page_table_range_2m.table) { + // If the 2M entry is present but the lower ones aren't, the PTE + // must be 2M. + UVM_ASSERT(gpu_state->pte_is_2m); + } + } + else { + UVM_ASSERT(!gpu_state->page_table_range_2m.table); + if (num_big_pages == 0) + UVM_ASSERT(!gpu_state->page_table_range_big.table); + } + + // If we have the big table and it's in use then it must have been + // initialized, even if it doesn't currently contain active PTEs. + if ((!block_gpu_supports_2m(block, gpu) && gpu_state->page_table_range_big.table) || + (block_gpu_supports_2m(block, gpu) && !gpu_state->pte_is_2m && gpu_state->activated_big)) + UVM_ASSERT(gpu_state->initialized_big); + + if (gpu_state->pte_is_2m) { + UVM_ASSERT(block_gpu_supports_2m(block, gpu)); + UVM_ASSERT(gpu_state->page_table_range_2m.table); + UVM_ASSERT(bitmap_empty(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + UVM_ASSERT(!gpu_state->force_4k_ptes); + + // GPU architectures which support 2M pages only support 64K as the big + // page size. All of the 2M code assumes that + // MAX_BIG_PAGES_PER_UVM_VA_BLOCK covers a 2M PTE exactly (bitmap_full, + // bitmap_complement, etc). + BUILD_BUG_ON((UVM_PAGE_SIZE_2M / UVM_PAGE_SIZE_64K) != MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + prot = block_page_prot_gpu(block, gpu, 0); + + // All page permissions match + for (pte_bit = 0; pte_bit < UVM_PTE_BITS_GPU_MAX; pte_bit++) { + if (prot == UVM_PROT_NONE || pte_bit > get_gpu_pte_bit_index(prot)) + UVM_ASSERT(uvm_page_mask_empty(&gpu_state->pte_bits[pte_bit])); + else + UVM_ASSERT(uvm_page_mask_full(&gpu_state->pte_bits[pte_bit])); + } + + if (prot != UVM_PROT_NONE) { + resident_id = block_gpu_get_processor_to_map(block, gpu, 0); + + // block_check_resident_proximity verifies that no closer processor + // has a resident page, so we don't need to check that all pages + // have the same resident_id. + + // block_check_mappings_page verifies that all pages marked resident + // are backed by populated memory. + + // The mapped processor should be fully resident and physically- + // contiguous. + UVM_ASSERT(uvm_page_mask_full(uvm_va_block_resident_mask_get(block, resident_id))); + + if (UVM_ID_IS_GPU(resident_id)) { + resident_gpu_state = uvm_va_block_gpu_state_get(block, resident_id); + UVM_ASSERT(resident_gpu_state); + UVM_ASSERT(uvm_gpu_chunk_get_size(resident_gpu_state->chunks[0]) == UVM_CHUNK_SIZE_2M); + } + else { + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_first_in_block(block, NULL); + + UVM_ASSERT(uvm_page_mask_full(&block->cpu.allocated)); + UVM_ASSERT(chunk); + UVM_ASSERT(uvm_cpu_chunk_get_size(chunk) == UVM_CHUNK_SIZE_2M); + } + } + } + else if (!bitmap_empty(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)) { + UVM_ASSERT(gpu_state->page_table_range_big.table); + UVM_ASSERT(!gpu_state->force_4k_ptes); + UVM_ASSERT(num_big_pages > 0); + UVM_ASSERT(gpu_state->initialized_big); + + for (big_page_index = 0; big_page_index < num_big_pages; big_page_index++) { + big_region = uvm_va_block_big_page_region(block, big_page_index, big_page_size); + + if (!test_bit(big_page_index, gpu_state->big_ptes)) { + // If there are valid mappings but this isn't a big PTE, the + // mapping must be using the 4k PTEs. + if (!uvm_page_mask_region_empty(&gpu_state->pte_bits[UVM_PTE_BITS_GPU_READ], big_region)) + UVM_ASSERT(gpu_state->page_table_range_4k.table); + continue; + } + + prot = block_page_prot_gpu(block, gpu, big_region.first); + + // All page permissions match + for (pte_bit = 0; pte_bit < UVM_PTE_BITS_GPU_MAX; pte_bit++) { + if (prot == UVM_PROT_NONE || pte_bit > get_gpu_pte_bit_index(prot)) + UVM_ASSERT(uvm_page_mask_region_empty(&gpu_state->pte_bits[pte_bit], big_region)); + else + UVM_ASSERT(uvm_page_mask_region_full(&gpu_state->pte_bits[pte_bit], big_region)); + } + + if (prot != UVM_PROT_NONE) { + resident_id = block_gpu_get_processor_to_map(block, gpu, big_region.first); + + // The mapped processor should be fully resident and physically- + // contiguous. Exception: UVM-Lite GPUs always map the preferred + // location even if the memory is resident elsewhere. Skip the + // residency check but still verify contiguity. + if (!uvm_processor_mask_test(block_get_uvm_lite_gpus(block), gpu->id)) { + UVM_ASSERT(uvm_page_mask_region_full(uvm_va_block_resident_mask_get(block, resident_id), + big_region)); + } + + if (UVM_ID_IS_CPU(resident_id)) { + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, big_region.first); + + UVM_ASSERT(gpu->parent->can_map_sysmem_with_large_pages); + UVM_ASSERT(uvm_cpu_chunk_get_size(chunk) >= uvm_va_block_region_size(big_region)); + } + else { + // Check GPU chunks + chunk = block_phys_page_chunk(block, block_phys_page(resident_id, big_region.first), NULL); + chunk_region = uvm_va_block_chunk_region(block, uvm_gpu_chunk_get_size(chunk), big_region.first); + UVM_ASSERT(uvm_va_block_region_contains_region(chunk_region, big_region)); + } + } + } + } + + return true; +} + +static bool block_check_mappings(uvm_va_block_t *block) +{ + uvm_page_index_t page_index; + uvm_processor_id_t id; + + // Verify the master masks, since block_check_mappings_page relies on them + for_each_processor_id(id) { + const uvm_page_mask_t *resident_mask, *map_mask; + + if (UVM_ID_IS_GPU(id) && !uvm_va_block_gpu_state_get(block, id)) { + UVM_ASSERT(!uvm_processor_mask_test(&block->resident, id)); + UVM_ASSERT(!uvm_processor_mask_test(&block->mapped, id)); + UVM_ASSERT(!uvm_processor_mask_test(&block->evicted_gpus, id)); + continue; + } + + resident_mask = uvm_va_block_resident_mask_get(block, id); + UVM_ASSERT(uvm_processor_mask_test(&block->resident, id) == !uvm_page_mask_empty(resident_mask)); + + map_mask = uvm_va_block_map_mask_get(block, id); + UVM_ASSERT(uvm_processor_mask_test(&block->mapped, id) == !uvm_page_mask_empty(map_mask)); + + if (UVM_ID_IS_GPU(id)) { + const uvm_page_mask_t *evicted_mask = block_evicted_mask_get(block, id); + UVM_ASSERT(uvm_processor_mask_test(&block->evicted_gpus, id) == !uvm_page_mask_empty(evicted_mask)); + + // Pages cannot be resident if they are marked as evicted + UVM_ASSERT(!uvm_page_mask_intersects(evicted_mask, resident_mask)); + + // Pages cannot be resident on a GPU with no memory + if (!block_processor_has_memory(block, id)) + UVM_ASSERT(!uvm_processor_mask_test(&block->resident, id)); + } + } + + // Check that every page has coherent mappings + for_each_va_block_page(page_index, block) + block_check_mappings_page(block, page_index); + + for_each_gpu_id(id) { + if (uvm_va_block_gpu_state_get(block, id)) { + uvm_gpu_t *gpu = block_get_gpu(block, id); + + // Check big and/or 2M PTE state + block_check_mappings_ptes(block, gpu); + } + } + + return true; +} + +// See the comments on uvm_va_block_unmap +static void block_unmap_cpu(uvm_va_block_t *block, uvm_va_block_region_t region, const uvm_page_mask_t *unmap_pages) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + uvm_pte_bits_cpu_t pte_bit; + bool unmapped_something = false; + uvm_va_block_region_t subregion; + NvU32 num_mapped_processors; + + // Early-out if nothing in the region is mapped + if (!block_has_valid_mapping_cpu(block, region)) + return; + + num_mapped_processors = uvm_processor_mask_get_count(&block->mapped); + + // If we are unmapping a page which we are tracking due to CPU faults with + // correct permissions, clear the info. This will cover both the unmap and + // revoke cases (since we implement CPU revocation by unmap + map) + if (block->cpu.fault_authorized.first_fault_stamp && + uvm_page_mask_region_test(unmap_pages, region, block->cpu.fault_authorized.page_index)) + block->cpu.fault_authorized.first_fault_stamp = 0; + + for_each_va_block_subregion_in_mask(subregion, unmap_pages, region) { + if (!block_has_valid_mapping_cpu(block, subregion)) + continue; + + unmap_mapping_range(&va_space->mapping, + uvm_va_block_region_start(block, subregion), + uvm_va_block_region_size(subregion), 1); + + for (pte_bit = 0; pte_bit < UVM_PTE_BITS_CPU_MAX; pte_bit++) + uvm_page_mask_region_clear(&block->cpu.pte_bits[pte_bit], subregion); + + // If the CPU is the only processor with mappings we can safely mark + // the pages as fully unmapped + if (num_mapped_processors == 1) + uvm_page_mask_region_clear(&block->maybe_mapped_pages, subregion); + + unmapped_something = true; + } + + if (!unmapped_something) + return; + + // Check whether the block has any more mappings + if (uvm_page_mask_empty(&block->cpu.pte_bits[UVM_PTE_BITS_CPU_READ])) { + UVM_ASSERT(uvm_page_mask_empty(&block->cpu.pte_bits[UVM_PTE_BITS_CPU_WRITE])); + uvm_processor_mask_clear(&block->mapped, UVM_ID_CPU); + } + + UVM_ASSERT(block_check_mappings(block)); +} + +// Given a mask of mapped pages, returns true if any of the pages in the mask +// are mapped remotely by the given GPU. +static bool block_has_remote_mapping_gpu(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_id_t gpu_id, + const uvm_page_mask_t *mapped_pages) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu_id); + + if (!gpu_state) + return false; + + // The caller must ensure that all pages of the input mask are really mapped + UVM_ASSERT(uvm_page_mask_subset(mapped_pages, &gpu_state->pte_bits[UVM_PTE_BITS_GPU_READ])); + + // UVM-Lite GPUs map the preferred location if it's accessible, regardless + // of the resident location. + if (uvm_processor_mask_test(block_get_uvm_lite_gpus(block), gpu_id)) { + if (uvm_page_mask_empty(mapped_pages)) + return false; + + return !uvm_id_equal(uvm_va_range_get_policy(block->va_range)->preferred_location, gpu_id); + } + + // Remote pages are pages which are mapped but not resident locally + return uvm_page_mask_andnot(&block_context->scratch_page_mask, mapped_pages, &gpu_state->resident); +} + +// Writes pte_clear_val to the 4k PTEs covered by clear_page_mask. If +// clear_page_mask is NULL, all 4k PTEs in the {block, gpu} are written. +// +// If tlb_batch is provided, the 4k PTEs written are added to the batch. The +// caller is responsible for ending the TLB batch with the appropriate membar. +static void block_gpu_pte_clear_4k(uvm_va_block_t *block, + uvm_gpu_t *gpu, + const uvm_page_mask_t *clear_page_mask, + NvU64 pte_clear_val, + uvm_pte_batch_t *pte_batch, + uvm_tlb_batch_t *tlb_batch) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_gpu_phys_address_t pte_addr; + NvU32 pte_size = uvm_mmu_pte_size(tree, UVM_PAGE_SIZE_4K); + uvm_va_block_region_t region = uvm_va_block_region_from_block(block); + uvm_va_block_region_t subregion; + size_t num_ptes, ptes_per_page = PAGE_SIZE / UVM_PAGE_SIZE_4K; + + for_each_va_block_subregion_in_mask(subregion, clear_page_mask, region) { + num_ptes = uvm_va_block_region_num_pages(subregion) * ptes_per_page; + + pte_addr = uvm_page_table_range_entry_address(tree, + &gpu_state->page_table_range_4k, + subregion.first * ptes_per_page); + + uvm_pte_batch_clear_ptes(pte_batch, pte_addr, pte_clear_val, pte_size, num_ptes); + + if (tlb_batch) { + uvm_tlb_batch_invalidate(tlb_batch, + uvm_va_block_region_start(block, subregion), + uvm_va_block_region_size(subregion), + UVM_PAGE_SIZE_4K, + UVM_MEMBAR_NONE); + } + } +} + +// Writes the 4k PTEs covered by write_page_mask using memory from resident_id +// with new_prot permissions. new_prot must not be UVM_PROT_NONE: use +// block_gpu_pte_clear_4k instead. +// +// If write_page_mask is NULL, all 4k PTEs in the {block, gpu} are written. +// +// If tlb_batch is provided, the 4k PTEs written are added to the batch. The +// caller is responsible for ending the TLB batch with the appropriate membar. +static void block_gpu_pte_write_4k(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_processor_id_t resident_id, + uvm_prot_t new_prot, + const uvm_page_mask_t *write_page_mask, + uvm_pte_batch_t *pte_batch, + uvm_tlb_batch_t *tlb_batch) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + NvU32 pte_size = uvm_mmu_pte_size(tree, UVM_PAGE_SIZE_4K); + const size_t ptes_per_page = PAGE_SIZE / UVM_PAGE_SIZE_4K; + uvm_va_block_region_t contig_region = {0}; + uvm_gpu_phys_address_t contig_addr = {0}; + uvm_gpu_phys_address_t page_addr = {0}; + uvm_page_index_t page_index; + NvU64 pte_flags = block_gpu_pte_flag_cacheable(block, gpu, resident_id); + + UVM_ASSERT(new_prot != UVM_PROT_NONE); + UVM_ASSERT(UVM_ID_IS_VALID(resident_id)); + + for_each_va_block_page_in_mask(page_index, write_page_mask, block) { + uvm_gpu_phys_address_t pte_addr; + size_t i; + + // Assume that this mapping will be used to write to the page + if (new_prot > UVM_PROT_READ_ONLY && UVM_ID_IS_CPU(resident_id)) + block_mark_cpu_page_dirty(block, page_index); + + if (page_index >= contig_region.outer) { + contig_region = block_phys_contig_region(block, page_index, resident_id); + contig_addr = block_phys_page_address(block, block_phys_page(resident_id, contig_region.first), gpu); + page_addr = contig_addr; + } + + page_addr.address = contig_addr.address + (page_index - contig_region.first) * PAGE_SIZE; + + pte_addr = uvm_page_table_range_entry_address(tree, + &gpu_state->page_table_range_4k, + page_index * ptes_per_page); + + // Handle PAGE_SIZE > GPU PTE size + for (i = 0; i < ptes_per_page; i++) { + NvU64 pte_val = tree->hal->make_pte(page_addr.aperture, page_addr.address, new_prot, pte_flags); + uvm_pte_batch_write_pte(pte_batch, pte_addr, pte_val, pte_size); + page_addr.address += UVM_PAGE_SIZE_4K; + pte_addr.address += pte_size; + } + + if (tlb_batch) { + NvU64 page_virt_addr = uvm_va_block_cpu_page_address(block, page_index); + uvm_tlb_batch_invalidate(tlb_batch, page_virt_addr, PAGE_SIZE, UVM_PAGE_SIZE_4K, UVM_MEMBAR_NONE); + } + } +} + +// Writes all 4k PTEs under the big PTE regions described by big_ptes_covered. +// This is used to initialize the 4k PTEs when splitting 2M and big PTEs. It +// only writes 4k PTEs, not big PTEs. +// +// For those 4k PTEs, new_pages_mask indicates which ones should inherit the +// mapping from the corresponding big page (0) and which ones should be written +// using memory from resident_id and new_prot (1). Unlike the other pte_write +// functions, new_prot may be UVM_PROT_NONE. +// +// If resident_id is UVM_ID_INVALID, this function looks up the resident ID +// which should inherit the current permissions. new_prot must be UVM_PROT_NONE +// in this case. +// +// new_pages_mask must not be NULL. +// +// No TLB invalidates are required since we've set up the lower PTEs to never be +// cached by the GPU's MMU when covered by larger PTEs. +static void block_gpu_pte_big_split_write_4k(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + uvm_processor_id_t resident_id, + uvm_prot_t new_prot, + const unsigned long *big_ptes_covered, + const uvm_page_mask_t *new_pages_mask, + uvm_pte_batch_t *pte_batch) +{ + uvm_va_block_region_t big_region; + size_t big_page_index; + uvm_processor_id_t curr_resident_id; + uvm_prot_t curr_prot; + NvU32 big_page_size = uvm_va_block_gpu_big_page_size(block, gpu); + + if (UVM_ID_IS_INVALID(resident_id)) + UVM_ASSERT(new_prot == UVM_PROT_NONE); + + for_each_set_bit(big_page_index, big_ptes_covered, MAX_BIG_PAGES_PER_UVM_VA_BLOCK) { + big_region = uvm_va_block_big_page_region(block, big_page_index, big_page_size); + + curr_prot = block_page_prot_gpu(block, gpu, big_region.first); + + // The unmap path doesn't know the current residency ahead of time, so + // we have to look it up. + if (UVM_ID_IS_INVALID(resident_id)) { + curr_resident_id = block_gpu_get_processor_to_map(block, gpu, big_region.first); + } + else { + // Check that we aren't changing the aperture of the existing + // mappings. It could be legal in some cases (switching from {RO, A} + // to {RO, B} for example) but we'd need to issue TLB membars. + if (curr_prot != UVM_PROT_NONE) + UVM_ASSERT(uvm_id_equal(block_gpu_get_processor_to_map(block, gpu, big_region.first), resident_id)); + + curr_resident_id = resident_id; + } + + // pages in new_pages_mask under this big page get new_prot + uvm_page_mask_zero(&block_context->scratch_page_mask); + uvm_page_mask_region_fill(&block_context->scratch_page_mask, big_region); + if (uvm_page_mask_and(&block_context->scratch_page_mask, &block_context->scratch_page_mask, new_pages_mask)) { + if (new_prot == UVM_PROT_NONE) { + block_gpu_pte_clear_4k(block, gpu, &block_context->scratch_page_mask, 0, pte_batch, NULL); + } + else { + block_gpu_pte_write_4k(block, + gpu, + curr_resident_id, + new_prot, + &block_context->scratch_page_mask, + pte_batch, + NULL); + } + } + + // All other pages under this big page inherit curr_prot + uvm_page_mask_zero(&block_context->scratch_page_mask); + uvm_page_mask_region_fill(&block_context->scratch_page_mask, big_region); + if (uvm_page_mask_andnot(&block_context->scratch_page_mask, &block_context->scratch_page_mask, new_pages_mask)) { + if (curr_prot == UVM_PROT_NONE) { + block_gpu_pte_clear_4k(block, gpu, &block_context->scratch_page_mask, 0, pte_batch, NULL); + } + else { + block_gpu_pte_write_4k(block, + gpu, + curr_resident_id, + curr_prot, + &block_context->scratch_page_mask, + pte_batch, + NULL); + } + } + } +} + +// Writes pte_clear_val to the big PTEs in big_ptes_mask. If big_ptes_mask is +// NULL, all big PTEs in the {block, gpu} are cleared. +// +// If tlb_batch is provided, the big PTEs written are added to the batch. The +// caller is responsible for ending the TLB batch with the appropriate membar. +static void block_gpu_pte_clear_big(uvm_va_block_t *block, + uvm_gpu_t *gpu, + const unsigned long *big_ptes_mask, + NvU64 pte_clear_val, + uvm_pte_batch_t *pte_batch, + uvm_tlb_batch_t *tlb_batch) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_gpu_va_space_t *gpu_va_space = uvm_va_block_get_gpu_va_space(block, gpu); + NvU32 big_page_size = gpu_va_space->page_tables.big_page_size; + uvm_gpu_phys_address_t pte_addr; + NvU32 pte_size = uvm_mmu_pte_size(&gpu_va_space->page_tables, big_page_size); + size_t big_page_index; + DECLARE_BITMAP(big_ptes_to_clear, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + if (big_ptes_mask) + bitmap_copy(big_ptes_to_clear, big_ptes_mask, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + else + bitmap_set(big_ptes_to_clear, 0, uvm_va_block_num_big_pages(block, big_page_size)); + + for_each_set_bit(big_page_index, big_ptes_to_clear, MAX_BIG_PAGES_PER_UVM_VA_BLOCK) { + pte_addr = uvm_page_table_range_entry_address(&gpu_va_space->page_tables, + &gpu_state->page_table_range_big, + big_page_index); + uvm_pte_batch_clear_ptes(pte_batch, pte_addr, pte_clear_val, pte_size, 1); + + if (tlb_batch) { + uvm_tlb_batch_invalidate(tlb_batch, + uvm_va_block_big_page_addr(block, big_page_index, big_page_size), + big_page_size, + big_page_size, + UVM_MEMBAR_NONE); + } + } +} + +// Writes the big PTEs in big_ptes_mask using memory from resident_id with +// new_prot permissions. new_prot must not be UVM_PROT_NONE: use +// block_gpu_pte_clear_big instead. +// +// Unlike block_gpu_pte_clear_big, big_ptes_mask must not be NULL. +// +// If tlb_batch is provided, the big PTEs written are added to the batch. The +// caller is responsible for ending the TLB batch with the appropriate membar. +static void block_gpu_pte_write_big(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_processor_id_t resident_id, + uvm_prot_t new_prot, + const unsigned long *big_ptes_mask, + uvm_pte_batch_t *pte_batch, + uvm_tlb_batch_t *tlb_batch) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_gpu_va_space_t *gpu_va_space = uvm_va_block_get_gpu_va_space(block, gpu); + uvm_page_tree_t *tree = &gpu_va_space->page_tables; + NvU32 big_page_size = tree->big_page_size; + NvU32 pte_size = uvm_mmu_pte_size(tree, big_page_size); + size_t big_page_index; + uvm_va_block_region_t contig_region = {0}; + uvm_gpu_phys_address_t contig_addr = {0}; + uvm_gpu_phys_address_t page_addr = {0}; + NvU64 pte_flags = block_gpu_pte_flag_cacheable(block, gpu, resident_id); + + UVM_ASSERT(new_prot != UVM_PROT_NONE); + UVM_ASSERT(UVM_ID_IS_VALID(resident_id)); + UVM_ASSERT(big_ptes_mask); + + if (!bitmap_empty(big_ptes_mask, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)) { + UVM_ASSERT(uvm_va_block_num_big_pages(block, big_page_size) > 0); + + if (!gpu->parent->can_map_sysmem_with_large_pages) + UVM_ASSERT(UVM_ID_IS_GPU(resident_id)); + } + + for_each_set_bit(big_page_index, big_ptes_mask, MAX_BIG_PAGES_PER_UVM_VA_BLOCK) { + NvU64 pte_val; + uvm_gpu_phys_address_t pte_addr; + uvm_va_block_region_t big_region = uvm_va_block_big_page_region(block, big_page_index, big_page_size); + + // Assume that this mapping will be used to write to the page + if (new_prot > UVM_PROT_READ_ONLY && UVM_ID_IS_CPU(resident_id)) { + uvm_page_index_t page_index; + + for_each_va_block_page_in_region(page_index, big_region) + block_mark_cpu_page_dirty(block, page_index); + } + + if (big_region.first >= contig_region.outer) { + contig_region = block_phys_contig_region(block, big_region.first, resident_id); + contig_addr = block_phys_page_address(block, block_phys_page(resident_id, contig_region.first), gpu); + page_addr = contig_addr; + } + + page_addr.address = contig_addr.address + (big_region.first - contig_region.first) * PAGE_SIZE; + + pte_addr = uvm_page_table_range_entry_address(tree, &gpu_state->page_table_range_big, big_page_index); + pte_val = tree->hal->make_pte(page_addr.aperture, page_addr.address, new_prot, pte_flags); + uvm_pte_batch_write_pte(pte_batch, pte_addr, pte_val, pte_size); + + if (tlb_batch) { + uvm_tlb_batch_invalidate(tlb_batch, + uvm_va_block_region_start(block, big_region), + big_page_size, + big_page_size, + UVM_MEMBAR_NONE); + } + } +} + +// Switches any mix of valid or invalid 4k PTEs under the big PTEs in +// big_ptes_to_merge to an unmapped big PTE. This also ends both pte_batch and +// tlb_batch in order to poison the now-unused 4k PTEs. +// +// The 4k PTEs are invalidated with the specified membar. +static void block_gpu_pte_merge_big_and_end(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + const unsigned long *big_ptes_to_merge, + uvm_push_t *push, + uvm_pte_batch_t *pte_batch, + uvm_tlb_batch_t *tlb_batch, + uvm_membar_t tlb_membar) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + NvU32 big_page_size = tree->big_page_size; + NvU64 unmapped_pte_val = tree->hal->unmapped_pte(big_page_size); + size_t big_page_index; + DECLARE_BITMAP(dummy_big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + UVM_ASSERT(!bitmap_empty(big_ptes_to_merge, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + UVM_ASSERT(!bitmap_and(dummy_big_ptes, gpu_state->big_ptes, big_ptes_to_merge, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + + // We can be called with the 4k PTEs in two cases: + // 1) 4k PTEs allocated. In this case the 4k PTEs are currently active. + // + // 2) 4k PTEs unallocated. In this case the GPU may not have invalid 4k PTEs + // active under the big PTE, depending on whether neighboring blocks + // caused the page tables to be allocated. + // + // In both cases we need to invalidate the 4k PTEs in case the GPU MMU has + // them cached. + + // Each big PTE is currently invalid so the 4ks are active (or unallocated). + // First make the big PTEs unmapped to disable future lookups of the 4ks + // under it. We can't directly transition the entry from valid 4k PTEs to + // valid big PTEs, because that could cause the GPU TLBs to cache the same + // VA in different cache lines. That could cause memory ordering to not be + // maintained. + block_gpu_pte_clear_big(block, gpu, big_ptes_to_merge, unmapped_pte_val, pte_batch, tlb_batch); + + // Now invalidate the big PTEs we just wrote as well as all 4ks under them. + // Subsequent MMU fills will stop at the now-unmapped big PTEs, so we only + // need to invalidate the 4k PTEs without actually writing them. + for_each_set_bit(big_page_index, big_ptes_to_merge, MAX_BIG_PAGES_PER_UVM_VA_BLOCK) { + uvm_tlb_batch_invalidate(tlb_batch, + uvm_va_block_big_page_addr(block, big_page_index, big_page_size), + big_page_size, + big_page_size | UVM_PAGE_SIZE_4K, + UVM_MEMBAR_NONE); + } + + // End the batches for the caller. We need to do this here in order to + // poison the 4ks below. + uvm_pte_batch_end(pte_batch); + uvm_tlb_batch_end(tlb_batch, push, tlb_membar); + + // As a guard against bad PTE writes/TLB invalidates, fill the now-unused + // PTEs with a pattern which will trigger fatal faults on access. We have to + // do this after the TLB invalidate of the big PTEs, or the GPU might use + // the new values. + if (UVM_IS_DEBUG() && gpu_state->page_table_range_4k.table) { + uvm_page_mask_init_from_big_ptes(block, gpu, &block_context->scratch_page_mask, big_ptes_to_merge); + uvm_pte_batch_begin(push, pte_batch); + block_gpu_pte_clear_4k(block, + gpu, + &block_context->scratch_page_mask, + tree->hal->poisoned_pte(), + pte_batch, + NULL); + uvm_pte_batch_end(pte_batch); + } +} + +// Writes 0 (invalid) to the 2M PTE for this {block, gpu}. +// +// If tlb_batch is provided, the 2M PTE is added to the batch. The caller is +// responsible for ending the TLB batch with the appropriate membar. +static void block_gpu_pte_clear_2m(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_pte_batch_t *pte_batch, + uvm_tlb_batch_t *tlb_batch) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_gpu_phys_address_t pte_addr = uvm_page_table_range_entry_address(tree, &gpu_state->page_table_range_2m, 0); + NvU32 pte_size = uvm_mmu_pte_size(tree, UVM_PAGE_SIZE_2M); + + // uvm_pte_batch_write_pte only writes the lower 8 bytes of the 16-byte PTE, + // which would cause a problem when trying to make the entry invalid since + // both halves must be 0. Using uvm_pte_batch_clear_ptes writes the entire + // 16 bytes. + uvm_pte_batch_clear_ptes(pte_batch, pte_addr, 0, pte_size, 1); + + if (tlb_batch) + uvm_tlb_batch_invalidate(tlb_batch, block->start, UVM_PAGE_SIZE_2M, UVM_PAGE_SIZE_2M, UVM_MEMBAR_NONE); +} + +// Writes the 2M PTE for {block, gpu} using memory from resident_id with +// new_prot permissions. new_prot must not be UVM_PROT_NONE: use +// block_gpu_pte_clear_2m instead. +// +// If tlb_batch is provided, the 2M PTE is added to the batch. The caller is +// responsible for ending the TLB batch with the appropriate membar. +static void block_gpu_pte_write_2m(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_processor_id_t resident_id, + uvm_prot_t new_prot, + uvm_pte_batch_t *pte_batch, + uvm_tlb_batch_t *tlb_batch) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_gpu_phys_address_t pte_addr = uvm_page_table_range_entry_address(tree, &gpu_state->page_table_range_2m, 0); + uvm_gpu_phys_address_t page_addr; + NvU32 pte_size = uvm_mmu_pte_size(tree, UVM_PAGE_SIZE_2M); + NvU64 pte_val; + NvU64 pte_flags = block_gpu_pte_flag_cacheable(block, gpu, resident_id); + + UVM_ASSERT(new_prot != UVM_PROT_NONE); + UVM_ASSERT(UVM_ID_IS_VALID(resident_id)); + + if (UVM_ID_IS_CPU(resident_id)) + block_mark_cpu_page_dirty(block, 0); + + page_addr = block_phys_page_address(block, block_phys_page(resident_id, 0), gpu); + pte_val = tree->hal->make_pte(page_addr.aperture, page_addr.address, new_prot, pte_flags); + uvm_pte_batch_write_pte(pte_batch, pte_addr, pte_val, pte_size); + + if (tlb_batch) + uvm_tlb_batch_invalidate(tlb_batch, block->start, UVM_PAGE_SIZE_2M, UVM_PAGE_SIZE_2M, UVM_MEMBAR_NONE); +} + +static bool block_gpu_needs_to_activate_table(uvm_va_block_t *block, uvm_gpu_t *gpu) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + + if (!block_gpu_supports_2m(block, gpu)) + return false; + + if ((gpu_state->page_table_range_big.table && !gpu_state->activated_big) || + (gpu_state->page_table_range_4k.table && !gpu_state->activated_4k)) + return true; + + return false; +} + +// Only used if 2M PTEs are supported. Either transitions a 2M PTE to a PDE, or +// activates a newly-allocated page table (big or 4k) while the other is already +// active. The caller must have already written the new PTEs under the table +// with the appropriate membar. +static void block_gpu_write_pde(uvm_va_block_t *block, uvm_gpu_t *gpu, uvm_push_t *push, uvm_tlb_batch_t *tlb_batch) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + + if (!gpu_state->pte_is_2m) + UVM_ASSERT(block_gpu_needs_to_activate_table(block, gpu)); + + UVM_ASSERT(gpu_state->page_table_range_big.table || gpu_state->page_table_range_4k.table); + + // We always need a membar to order PDE/PTE writes with the TLB invalidate. + // write_pde will do a MEMBAR_SYS by default. + if (uvm_page_table_range_aperture(&gpu_state->page_table_range_2m) == UVM_APERTURE_VID) + uvm_push_set_flag(push, UVM_PUSH_FLAG_NEXT_MEMBAR_GPU); + uvm_page_tree_write_pde(tree, &gpu_state->page_table_range_2m, push); + + gpu->parent->host_hal->wait_for_idle(push); + + // Invalidate just the PDE + uvm_tlb_batch_invalidate(tlb_batch, block->start, UVM_PAGE_SIZE_2M, UVM_PAGE_SIZE_2M, UVM_MEMBAR_NONE); + + if (gpu_state->page_table_range_big.table) + gpu_state->activated_big = true; + + if (gpu_state->page_table_range_4k.table) + gpu_state->activated_4k = true; +} + +// Called to switch the 2M PTE (valid or invalid) to a PDE. The caller should +// have written all lower PTEs as appropriate into the given pte_batch already. +// This function ends the PTE batch, activates the 2M PDE, and does a TLB +// invalidate. +// +// The caller does not need to do any TLB invalidates since none of the lower +// PTEs could be cached. +static void block_gpu_pte_finish_split_2m(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_push_t *push, + uvm_pte_batch_t *pte_batch, + uvm_tlb_batch_t *tlb_batch, + uvm_membar_t tlb_membar) +{ + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_prot_t curr_prot = block_page_prot_gpu(block, gpu, 0); + + // Step 1: Make the 2M entry invalid. We can't directly transition from a + // valid 2M PTE to valid lower PTEs, because that could cause the + // GPU TLBs to cache the same VA in different cache lines. That + // could cause memory ordering to not be maintained. + // + // If the 2M PTE is already invalid, no TLB invalidate is needed. + + if (curr_prot == UVM_PROT_NONE) { + // If we aren't downgrading, then we don't need a membar. + UVM_ASSERT(tlb_membar == UVM_MEMBAR_NONE); + + // End the batch, which pushes a membar to ensure that the caller's PTE + // writes below 2M are observed before the PDE write we're about to do. + uvm_pte_batch_end(pte_batch); + } + else { + // The 64k and 4k PTEs can't possibly be cached since the 2M entry is + // not yet a PDE, so we just need to invalidate this single 2M entry. + uvm_tlb_batch_begin(tree, tlb_batch); + block_gpu_pte_clear_2m(block, gpu, pte_batch, tlb_batch); + + // Make sure the PTE writes are observed before the TLB invalidate + uvm_pte_batch_end(pte_batch); + uvm_tlb_batch_end(tlb_batch, push, tlb_membar); + } + + // Step 2: Switch the 2M entry from invalid to a PDE. This activates the + // smaller PTEs. + uvm_tlb_batch_begin(tree, tlb_batch); + block_gpu_write_pde(block, gpu, push, tlb_batch); + uvm_tlb_batch_end(tlb_batch, push, UVM_MEMBAR_NONE); +} + +// Switches any mix of valid or invalid 4k or 64k PTEs to an invalid 2M PTE. +// Any lower PTEs are invalidated with the specified membar. +static void block_gpu_pte_merge_2m(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + uvm_push_t *push, + uvm_membar_t tlb_membar) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_pte_batch_t *pte_batch = &block_context->mapping.pte_batch; + uvm_tlb_batch_t *tlb_batch = &block_context->mapping.tlb_batch; + NvU32 tlb_inval_sizes; + + UVM_ASSERT(!gpu_state->pte_is_2m); + UVM_ASSERT(gpu_state->page_table_range_big.table || gpu_state->page_table_range_4k.table); + + // The 2M entry is currently a PDE, so first make it invalid. We can't + // directly transition the entry from a valid PDE to a valid 2M PTE, because + // that could cause the GPU TLBs to cache the same VA in different cache + // lines. That could cause memory ordering to not be maintained. + uvm_pte_batch_begin(push, pte_batch); + block_gpu_pte_clear_2m(block, gpu, pte_batch, NULL); + uvm_pte_batch_end(pte_batch); + + // Now invalidate both the 2M entry we just wrote as well as all lower-level + // entries which could be cached. Subsequent MMU fills will stop at the now- + // invalid 2M entry, so we only need to invalidate the lower PTEs without + // actually writing them. + tlb_inval_sizes = UVM_PAGE_SIZE_2M; + if (gpu_state->page_table_range_big.table) + tlb_inval_sizes |= UVM_PAGE_SIZE_64K; + + // Strictly-speaking we only need to invalidate those 4k ranges which are + // not covered by a big pte. However, any such invalidate will require + // enough 4k invalidates to force the TLB batching to invalidate everything + // anyway, so just do the simpler thing. + if (!bitmap_full(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)) + tlb_inval_sizes |= UVM_PAGE_SIZE_4K; + + uvm_tlb_batch_begin(tree, tlb_batch); + uvm_tlb_batch_invalidate(tlb_batch, block->start, UVM_PAGE_SIZE_2M, tlb_inval_sizes, UVM_MEMBAR_NONE); + uvm_tlb_batch_end(tlb_batch, push, tlb_membar); + + // As a guard against bad PTE writes/TLB invalidates, fill the now-unused + // PTEs with a pattern which will trigger fatal faults on access. We have to + // do this after the TLB invalidate of the 2M entry, or the GPU might use + // the new values. + if (UVM_IS_DEBUG()) { + uvm_pte_batch_begin(push, pte_batch); + + if (gpu_state->page_table_range_big.table) { + block_gpu_pte_clear_big(block, + gpu, + NULL, + tree->hal->poisoned_pte(), + pte_batch, + NULL); + } + + if (gpu_state->page_table_range_4k.table) { + block_gpu_pte_clear_4k(block, + gpu, + NULL, + tree->hal->poisoned_pte(), + pte_batch, + NULL); + } + + uvm_pte_batch_end(pte_batch); + } +} + +static uvm_membar_t block_pte_op_membar(block_pte_op_t pte_op, uvm_gpu_t *gpu, uvm_processor_id_t resident_id) +{ + // Permissions upgrades (MAP) don't need membars + if (pte_op == BLOCK_PTE_OP_MAP) + return UVM_MEMBAR_NONE; + + UVM_ASSERT(UVM_ID_IS_VALID(resident_id)); + UVM_ASSERT(pte_op == BLOCK_PTE_OP_REVOKE); + + // Permissions downgrades always need a membar on TLB invalidate. If the + // mapped memory was local, we only need a GPU-local membar. + if (uvm_id_equal(gpu->id, resident_id)) + return UVM_MEMBAR_GPU; + + // Otherwise, remote memory needs a sysmembar + return UVM_MEMBAR_SYS; +} + +// Write the 2M PTE for {block, gpu} to the memory on resident_id with new_prot +// permissions. If the 2M entry is currently a PDE, it is first merged into a +// PTE. +// +// new_prot must not be UVM_PROT_NONE: use block_gpu_unmap_to_2m instead. +// +// pte_op specifies whether this is a MAP or REVOKE operation, which determines +// the TLB membar required. +static void block_gpu_map_to_2m(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + uvm_processor_id_t resident_id, + uvm_prot_t new_prot, + uvm_push_t *push, + block_pte_op_t pte_op) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_gpu_va_space_t *gpu_va_space = uvm_va_block_get_gpu_va_space(block, gpu); + uvm_pte_batch_t *pte_batch = &block_context->mapping.pte_batch; + uvm_tlb_batch_t *tlb_batch = &block_context->mapping.tlb_batch; + uvm_membar_t tlb_membar; + + UVM_ASSERT(new_prot != UVM_PROT_NONE); + + // If we have a mix of big and 4k PTEs, we have to first merge them to an + // invalid 2M PTE. + if (!gpu_state->pte_is_2m) { + block_gpu_pte_merge_2m(block, block_context, gpu, push, UVM_MEMBAR_NONE); + + gpu_state->pte_is_2m = true; + bitmap_zero(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + } + + // Write the new permissions + uvm_pte_batch_begin(push, pte_batch); + uvm_tlb_batch_begin(&gpu_va_space->page_tables, tlb_batch); + + block_gpu_pte_write_2m(block, gpu, resident_id, new_prot, pte_batch, tlb_batch); + + uvm_pte_batch_end(pte_batch); + + tlb_membar = block_pte_op_membar(pte_op, gpu, resident_id); + uvm_tlb_batch_end(tlb_batch, push, tlb_membar); +} + +// Combination split + map operation, called when only part of a 2M PTE mapping +// is being changed. This splits an existing valid or invalid 2M PTE into the +// mix of big and 4k PTEs described by block_context->mapping.new_pte_state. +// +// The PTEs covering the pages in pages_to_write are written to the memory on +// resident_id with new_prot permissions. new_prot must not be UVM_PROT_NONE. +// +// The PTEs covering the pages not set in pages_to_write inherit the mapping of +// the current 2M PTE. If the current mapping is valid, it must target +// resident_id. +// +// pte_op specifies whether this is a MAP or REVOKE operation, which determines +// the TLB membar required. +static void block_gpu_map_split_2m(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + uvm_processor_id_t resident_id, + const uvm_page_mask_t *pages_to_write, + uvm_prot_t new_prot, + uvm_push_t *push, + block_pte_op_t pte_op) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_va_block_new_pte_state_t *new_pte_state = &block_context->mapping.new_pte_state; + uvm_pte_batch_t *pte_batch = &block_context->mapping.pte_batch; + uvm_tlb_batch_t *tlb_batch = &block_context->mapping.tlb_batch; + uvm_prot_t curr_prot = block_page_prot_gpu(block, gpu, 0); + uvm_membar_t tlb_membar; + DECLARE_BITMAP(big_ptes_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + DECLARE_BITMAP(big_ptes_inherit, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + DECLARE_BITMAP(big_ptes_new_prot, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + UVM_ASSERT(gpu_state->pte_is_2m); + + if (!gpu_state->page_table_range_4k.table) + UVM_ASSERT(bitmap_full(new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + + uvm_pte_batch_begin(push, pte_batch); + + // Since the 2M entry is active as a PTE, the GPU MMU can't fetch entries + // from the lower levels. This means we don't need to issue a TLB invalidate + // when writing those levels. + + // Cases to handle: + // 1) Big PTEs which inherit curr_prot + // 2) Big PTEs which get new_prot + // 3) Big PTEs which are split to 4k + // a) 4k PTEs which inherit curr_prot under the split big PTEs + // b) 4k PTEs which get new_prot under the split big PTEs + + // Compute the big PTEs which will need to be split to 4k, if any. + bitmap_complement(big_ptes_split, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + if (gpu_state->page_table_range_big.table) { + // Case 1: Write the big PTEs which will inherit the 2M permissions, if + // any. These are the big PTEs which are unchanged (uncovered) by the + // operation. + bitmap_andnot(big_ptes_inherit, + new_pte_state->big_ptes, + new_pte_state->big_ptes_covered, + MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + if (curr_prot == UVM_PROT_NONE) { + block_gpu_pte_clear_big(block, + gpu, + big_ptes_inherit, + tree->hal->unmapped_pte(UVM_PAGE_SIZE_64K), + pte_batch, + NULL); + } + else { + block_gpu_pte_write_big(block, gpu, resident_id, curr_prot, big_ptes_inherit, pte_batch, NULL); + } + + // Case 2: Write the new big PTEs + bitmap_and(big_ptes_new_prot, + new_pte_state->big_ptes, + new_pte_state->big_ptes_covered, + MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + block_gpu_pte_write_big(block, gpu, resident_id, new_prot, big_ptes_new_prot, pte_batch, NULL); + + // Case 3: Write the big PTEs which cover 4k PTEs + block_gpu_pte_clear_big(block, gpu, big_ptes_split, 0, pte_batch, NULL); + + // We just wrote all possible big PTEs, so mark them as initialized + gpu_state->initialized_big = true; + } + else { + UVM_ASSERT(bitmap_empty(new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + } + + // Cases 3a and 3b: Write all 4k PTEs under all now-split big PTEs + block_gpu_pte_big_split_write_4k(block, + block_context, + gpu, + resident_id, + new_prot, + big_ptes_split, + pages_to_write, + pte_batch); + + // Activate the 2M PDE. This ends the pte_batch and issues a single TLB + // invalidate for the 2M entry. + tlb_membar = block_pte_op_membar(pte_op, gpu, resident_id); + block_gpu_pte_finish_split_2m(block, gpu, push, pte_batch, tlb_batch, tlb_membar); + + gpu_state->pte_is_2m = false; + bitmap_copy(gpu_state->big_ptes, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); +} + +// Split the existing 2M PTE into big and 4k PTEs. No permissions are changed. +// +// new_big_ptes specifies which PTEs should be big. NULL means all PTEs should +// be 4k. +static void block_gpu_split_2m(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + const unsigned long *new_big_ptes, + uvm_push_t *push) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_pte_batch_t *pte_batch = &block_context->mapping.pte_batch; + uvm_tlb_batch_t *tlb_batch = &block_context->mapping.tlb_batch; + uvm_prot_t curr_prot = block_page_prot_gpu(block, gpu, 0); + DECLARE_BITMAP(new_big_ptes_local, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + DECLARE_BITMAP(big_ptes_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + NvU64 unmapped_pte_val; + uvm_processor_id_t curr_residency; + + UVM_ASSERT(gpu_state->pte_is_2m); + + if (new_big_ptes) + bitmap_copy(new_big_ptes_local, new_big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + else + bitmap_zero(new_big_ptes_local, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + if (!bitmap_empty(new_big_ptes_local, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)) + UVM_ASSERT(gpu_state->page_table_range_big.table); + + // We're splitting from 2M to big only, so we'll be writing all big PTEs + if (gpu_state->page_table_range_big.table) + gpu_state->initialized_big = true; + + // Cases to handle: + // 1) Big PTEs which inherit curr_prot + // 2) Big PTEs which are split to 4k + // a) 4k PTEs inherit curr_prot under the split big PTEs + + // big_ptes_split will cover the 4k regions + bitmap_complement(big_ptes_split, new_big_ptes_local, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + uvm_page_mask_init_from_big_ptes(block, gpu, &block_context->scratch_page_mask, big_ptes_split); + + uvm_pte_batch_begin(push, pte_batch); + + // Since the 2M entry is active as a PTE, the GPU MMU can't fetch entries + // from the lower levels. This means we don't need to issue a TLB invalidate + // when writing those levels. + + if (curr_prot == UVM_PROT_NONE) { + unmapped_pte_val = tree->hal->unmapped_pte(tree->big_page_size); + + // Case 2a: Clear the 4k PTEs under big_ptes_split + block_gpu_pte_clear_4k(block, gpu, &block_context->scratch_page_mask, 0, pte_batch, NULL); + + // Case 1: Make the remaining big PTEs unmapped + block_gpu_pte_clear_big(block, gpu, new_big_ptes_local, unmapped_pte_val, pte_batch, NULL); + } + else { + curr_residency = block_gpu_get_processor_to_map(block, gpu, 0); + + // Case 2a: Write the new 4k PTEs under big_ptes_split + block_gpu_pte_write_4k(block, + gpu, + curr_residency, + curr_prot, + &block_context->scratch_page_mask, + pte_batch, + NULL); + + // Case 1: Write the new big PTEs + block_gpu_pte_write_big(block, gpu, curr_residency, curr_prot, new_big_ptes_local, pte_batch, NULL); + } + + // Case 2: Make big_ptes_split invalid to activate the 4k PTEs + if (gpu_state->page_table_range_big.table) + block_gpu_pte_clear_big(block, gpu, big_ptes_split, 0, pte_batch, NULL); + + // Activate the 2M PDE. This ends the pte_batch and issues a single TLB + // invalidate for the 2M entry. No membar is necessary since we aren't + // changing permissions. + block_gpu_pte_finish_split_2m(block, gpu, push, pte_batch, tlb_batch, UVM_MEMBAR_NONE); + + gpu_state->pte_is_2m = false; + bitmap_copy(gpu_state->big_ptes, new_big_ptes_local, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); +} + +// Split the big PTEs in big_ptes_to_split into 4k PTEs. No permissions are +// changed. +// +// big_ptes_to_split must not be NULL. +static void block_gpu_split_big(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + const unsigned long *big_ptes_to_split, + uvm_push_t *push) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_pte_batch_t *pte_batch = &block_context->mapping.pte_batch; + uvm_tlb_batch_t *tlb_batch = &block_context->mapping.tlb_batch; + NvU32 big_page_size = tree->big_page_size; + uvm_va_block_region_t big_region; + uvm_processor_id_t resident_id; + size_t big_page_index; + uvm_prot_t curr_prot; + DECLARE_BITMAP(big_ptes_valid, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + UVM_ASSERT(!gpu_state->pte_is_2m); + UVM_ASSERT(bitmap_subset(big_ptes_to_split, gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + UVM_ASSERT(!bitmap_empty(big_ptes_to_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + + uvm_pte_batch_begin(push, pte_batch); + uvm_tlb_batch_begin(tree, tlb_batch); + + // Write all 4k PTEs under all big PTEs which are being split. We'll make + // the big PTEs inactive below after flushing these writes. No TLB + // invalidate is needed since the big PTE is active. + bitmap_zero(big_ptes_valid, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + for_each_set_bit(big_page_index, big_ptes_to_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK) { + big_region = uvm_va_block_big_page_region(block, big_page_index, big_page_size); + curr_prot = block_page_prot_gpu(block, gpu, big_region.first); + + uvm_page_mask_zero(&block_context->scratch_page_mask); + uvm_page_mask_region_fill(&block_context->scratch_page_mask, big_region); + if (curr_prot == UVM_PROT_NONE) { + block_gpu_pte_clear_4k(block, gpu, &block_context->scratch_page_mask, 0, pte_batch, NULL); + } + else { + __set_bit(big_page_index, big_ptes_valid); + + resident_id = block_gpu_get_processor_to_map(block, gpu, big_region.first); + + block_gpu_pte_write_4k(block, + gpu, + resident_id, + curr_prot, + &block_context->scratch_page_mask, + pte_batch, + NULL); + } + } + + // Unmap the big PTEs which are valid and are being split to 4k. We can't + // directly transition from a valid big PTE to valid lower PTEs, because + // that could cause the GPU TLBs to cache the same VA in different cache + // lines. That could cause memory ordering to not be maintained. + block_gpu_pte_clear_big(block, gpu, big_ptes_valid, tree->hal->unmapped_pte(big_page_size), pte_batch, tlb_batch); + + // End the batches. We have to commit the membars and TLB invalidates + // before we finish splitting formerly-big PTEs. No membar is necessary + // since we aren't changing permissions. + uvm_pte_batch_end(pte_batch); + uvm_tlb_batch_end(tlb_batch, push, UVM_MEMBAR_NONE); + + // Finish the split by switching the big PTEs from unmapped to invalid. This + // causes the GPU MMU to start reading the 4k PTEs instead of stopping at + // the unmapped big PTEs. + uvm_pte_batch_begin(push, pte_batch); + uvm_tlb_batch_begin(tree, tlb_batch); + + block_gpu_pte_clear_big(block, gpu, big_ptes_to_split, 0, pte_batch, tlb_batch); + + uvm_pte_batch_end(pte_batch); + + // Finally, activate the page tables if they're inactive + if (block_gpu_needs_to_activate_table(block, gpu)) + block_gpu_write_pde(block, gpu, push, tlb_batch); + + uvm_tlb_batch_end(tlb_batch, push, UVM_MEMBAR_NONE); + + bitmap_andnot(gpu_state->big_ptes, gpu_state->big_ptes, big_ptes_to_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); +} + +// Changes permissions on some pre-existing mix of big and 4k PTEs into some +// other mix of big and 4k PTEs, as described by +// block_context->mapping.new_pte_state. +// +// The PTEs covering the pages in pages_to_write are written to the memory on +// resident_id with new_prot permissions. new_prot must not be UVM_PROT_NONE. +// +// pte_op specifies whether this is a MAP or REVOKE operation, which determines +// the TLB membar required. +static void block_gpu_map_big_and_4k(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + uvm_processor_id_t resident_id, + const uvm_page_mask_t *pages_to_write, + uvm_prot_t new_prot, + uvm_push_t *push, + block_pte_op_t pte_op) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_va_block_new_pte_state_t *new_pte_state = &block_context->mapping.new_pte_state; + uvm_pte_batch_t *pte_batch = &block_context->mapping.pte_batch; + uvm_tlb_batch_t *tlb_batch = &block_context->mapping.tlb_batch; + DECLARE_BITMAP(big_ptes_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + DECLARE_BITMAP(big_ptes_before_or_after, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + DECLARE_BITMAP(big_ptes_merge, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + DECLARE_BITMAP(big_ptes_mask, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + uvm_va_block_region_t big_region; + size_t big_page_index; + NvU32 big_page_size = tree->big_page_size; + uvm_membar_t tlb_membar = block_pte_op_membar(pte_op, gpu, resident_id); + + UVM_ASSERT(!gpu_state->pte_is_2m); + + uvm_pte_batch_begin(push, pte_batch); + uvm_tlb_batch_begin(tree, tlb_batch); + + // All of these cases might be perfomed in the same call: + // 1) Split currently-big PTEs to 4k + // a) Write new 4k PTEs which inherit curr_prot under the split big PTEs + // b) Write new 4k PTEs which get new_prot under the split big PTEs + // 2) Merge currently-4k PTEs to big with new_prot + // 3) Write currently-big PTEs which wholly get new_prot + // 4) Write currently-4k PTEs which get new_prot + // 5) Initialize big PTEs which are not covered by this operation + + // Cases 1a and 1b: Write all 4k PTEs under all currently-big PTEs which are + // being split. We'll make the big PTEs inactive below after flushing these + // writes. No TLB invalidate is needed since the big PTE is active. + // + // Mask computation: big_before && !big_after + bitmap_andnot(big_ptes_split, gpu_state->big_ptes, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + block_gpu_pte_big_split_write_4k(block, + block_context, + gpu, + resident_id, + new_prot, + big_ptes_split, + pages_to_write, + pte_batch); + + // Case 4: Write the 4k PTEs which weren't covered by a big PTE before, and + // remain uncovered after the operation. + // + // Mask computation: !big_before && !big_after + bitmap_or(big_ptes_before_or_after, gpu_state->big_ptes, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + uvm_page_mask_init_from_big_ptes(block, gpu, &block_context->scratch_page_mask, big_ptes_before_or_after); + if (uvm_page_mask_andnot(&block_context->scratch_page_mask, pages_to_write, &block_context->scratch_page_mask)) { + block_gpu_pte_write_4k(block, + gpu, + resident_id, + new_prot, + &block_context->scratch_page_mask, + pte_batch, + tlb_batch); + } + + // Case 5: If the big page table is newly-allocated, make sure that all big + // PTEs we aren't otherwise writing (that is, those which cover 4k PTEs) are + // all initialized to invalid. + // + // The similar case of making newly-allocated big PTEs unmapped when no + // lower 4k table is present is handled by having + // block_gpu_compute_new_pte_state set new_pte_state->big_ptes + // appropriately. + if (gpu_state->page_table_range_big.table && !gpu_state->initialized_big) { + // TODO: Bug 1766424: If we have the 4k page table already, we could + // attempt to merge all uncovered big PTE regions when first + // allocating the big table. That's probably not worth doing. + UVM_ASSERT(gpu_state->page_table_range_4k.table); + UVM_ASSERT(bitmap_empty(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + bitmap_complement(big_ptes_mask, new_pte_state->big_ptes, uvm_va_block_num_big_pages(block, big_page_size)); + block_gpu_pte_clear_big(block, gpu, big_ptes_mask, 0, pte_batch, tlb_batch); + gpu_state->initialized_big = true; + } + + // Case 1 (step 1): Unmap the currently-big PTEs which are valid and are + // being split to 4k. We can't directly transition from a valid big PTE to + // valid lower PTEs, because that could cause the GPU TLBs to cache the same + // VA in different cache lines. That could cause memory ordering to not be + // maintained. + bitmap_zero(big_ptes_mask, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + for_each_set_bit(big_page_index, big_ptes_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK) { + big_region = uvm_va_block_big_page_region(block, big_page_index, big_page_size); + if (uvm_page_mask_test(&gpu_state->pte_bits[UVM_PTE_BITS_GPU_READ], big_region.first)) + __set_bit(big_page_index, big_ptes_mask); + } + + block_gpu_pte_clear_big(block, gpu, big_ptes_mask, tree->hal->unmapped_pte(big_page_size), pte_batch, tlb_batch); + + // Case 3: Write the currently-big PTEs which remain big PTEs, and are + // wholly changing permissions. + // + // Mask computation: big_before && big_after && covered + bitmap_and(big_ptes_mask, gpu_state->big_ptes, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + if (bitmap_and(big_ptes_mask, big_ptes_mask, new_pte_state->big_ptes_covered, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)) + block_gpu_pte_write_big(block, gpu, resident_id, new_prot, big_ptes_mask, pte_batch, tlb_batch); + + // Case 2 (step 1): Merge the new big PTEs and end the batches, now that + // we've done all of the independent PTE writes we can. This also merges + // newly-allocated uncovered big PTEs to unmapped (see + // block_gpu_compute_new_pte_state). + // + // Mask computation: !big_before && big_after + if (bitmap_andnot(big_ptes_merge, new_pte_state->big_ptes, gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)) { + // This writes the newly-big PTEs to unmapped and ends the PTE and TLB + // batches. + block_gpu_pte_merge_big_and_end(block, + block_context, + gpu, + big_ptes_merge, + push, + pte_batch, + tlb_batch, + tlb_membar); + + // Remove uncovered big PTEs. We needed to merge them to unmapped above, + // but they shouldn't get new_prot below. + bitmap_and(big_ptes_merge, big_ptes_merge, new_pte_state->big_ptes_covered, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + } + else { + // End the batches. We have to commit the membars and TLB invalidates + // before we finish splitting formerly-big PTEs. + uvm_pte_batch_end(pte_batch); + uvm_tlb_batch_end(tlb_batch, push, tlb_membar); + } + + if (!bitmap_empty(big_ptes_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK) || + !bitmap_empty(big_ptes_merge, MAX_BIG_PAGES_PER_UVM_VA_BLOCK) || + block_gpu_needs_to_activate_table(block, gpu)) { + + uvm_pte_batch_begin(push, pte_batch); + uvm_tlb_batch_begin(tree, tlb_batch); + + // Case 1 (step 2): Finish splitting our big PTEs, if we have any, by + // switching them from unmapped to invalid. This causes the GPU MMU to + // start reading the 4k PTEs instead of stopping at the unmapped big + // PTEs. + block_gpu_pte_clear_big(block, gpu, big_ptes_split, 0, pte_batch, tlb_batch); + + // Case 2 (step 2): Finish merging our big PTEs, if we have any, by + // switching them from unmapped to new_prot. + block_gpu_pte_write_big(block, gpu, resident_id, new_prot, big_ptes_merge, pte_batch, tlb_batch); + + uvm_pte_batch_end(pte_batch); + + // Finally, activate the page tables if they're inactive + if (block_gpu_needs_to_activate_table(block, gpu)) + block_gpu_write_pde(block, gpu, push, tlb_batch); + + uvm_tlb_batch_end(tlb_batch, push, UVM_MEMBAR_NONE); + } + + // Update gpu_state + bitmap_copy(gpu_state->big_ptes, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); +} + +// Unmap all PTEs for {block, gpu}. If the 2M entry is currently a PDE, it is +// merged into a PTE. +static void block_gpu_unmap_to_2m(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + uvm_push_t *push, + uvm_membar_t tlb_membar) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_gpu_va_space_t *gpu_va_space = uvm_va_block_get_gpu_va_space(block, gpu); + uvm_pte_batch_t *pte_batch = &block_context->mapping.pte_batch; + uvm_tlb_batch_t *tlb_batch = &block_context->mapping.tlb_batch; + + if (gpu_state->pte_is_2m) { + // If we're already mapped as a valid 2M PTE, just write it to invalid + uvm_pte_batch_begin(push, pte_batch); + uvm_tlb_batch_begin(&gpu_va_space->page_tables, tlb_batch); + + block_gpu_pte_clear_2m(block, gpu, pte_batch, tlb_batch); + + uvm_pte_batch_end(pte_batch); + uvm_tlb_batch_end(tlb_batch, push, tlb_membar); + } + else { + // Otherwise we have a mix of big and 4K PTEs which need to be merged + // into an invalid 2M PTE. + block_gpu_pte_merge_2m(block, block_context, gpu, push, tlb_membar); + + gpu_state->pte_is_2m = true; + bitmap_zero(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + } +} + +// Combination split + unmap operation, called when only part of a valid 2M PTE +// mapping is being unmapped. The 2M PTE is split into a mix of valid and +// invalid big and/or 4k PTEs, as described by +// block_context->mapping.new_pte_state. +// +// The PTEs covering the pages in pages_to_unmap are cleared (unmapped). +// +// The PTEs covering the pages not set in pages_to_unmap inherit the mapping of +// the current 2M PTE. +static void block_gpu_unmap_split_2m(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + const uvm_page_mask_t *pages_to_unmap, + uvm_push_t *push, + uvm_membar_t tlb_membar) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_va_block_new_pte_state_t *new_pte_state = &block_context->mapping.new_pte_state; + uvm_pte_batch_t *pte_batch = &block_context->mapping.pte_batch; + uvm_tlb_batch_t *tlb_batch = &block_context->mapping.tlb_batch; + uvm_prot_t curr_prot = block_page_prot_gpu(block, gpu, 0); + uvm_processor_id_t resident_id; + DECLARE_BITMAP(big_ptes_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + DECLARE_BITMAP(big_ptes_inherit, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + DECLARE_BITMAP(big_ptes_new_prot, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + UVM_ASSERT(gpu_state->pte_is_2m); + + resident_id = block_gpu_get_processor_to_map(block, gpu, 0); + + uvm_pte_batch_begin(push, pte_batch); + + // Since the 2M entry is active as a PTE, the GPU MMU can't fetch entries + // from the lower levels. This means we don't need to issue a TLB invalidate + // when writing those levels. + + // Cases to handle: + // 1) Big PTEs which inherit curr_prot + // 2) Big PTEs which get unmapped + // 3) Big PTEs which are split to 4k + // a) 4k PTEs which inherit curr_prot under the split big PTEs + // b) 4k PTEs which get unmapped under the split big PTEs + + // Compute the big PTEs which will need to be split to 4k, if any. + bitmap_complement(big_ptes_split, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + if (gpu_state->page_table_range_big.table) { + // Case 1: Write the big PTEs which will inherit the 2M permissions, if + // any. These are the big PTEs which are unchanged (uncovered) by the + // operation. + bitmap_andnot(big_ptes_inherit, + new_pte_state->big_ptes, + new_pte_state->big_ptes_covered, + MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + block_gpu_pte_write_big(block, gpu, resident_id, curr_prot, big_ptes_inherit, pte_batch, NULL); + + // Case 2: Clear the new big PTEs which get unmapped (those not covering + // 4ks) + bitmap_and(big_ptes_new_prot, + new_pte_state->big_ptes, + new_pte_state->big_ptes_covered, + MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + block_gpu_pte_clear_big(block, + gpu, + big_ptes_new_prot, + tree->hal->unmapped_pte(UVM_PAGE_SIZE_64K), + pte_batch, + NULL); + + // Case 3: Write the big PTEs which cover 4k PTEs + block_gpu_pte_clear_big(block, gpu, big_ptes_split, 0, pte_batch, NULL); + + // We just wrote all possible big PTEs, so mark them as initialized + gpu_state->initialized_big = true; + } + else { + UVM_ASSERT(bitmap_empty(new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + UVM_ASSERT(bitmap_full(new_pte_state->big_ptes_covered, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + } + + // Cases 3a and 3b: Write all 4k PTEs under all now-split big PTEs + block_gpu_pte_big_split_write_4k(block, + block_context, + gpu, + resident_id, + UVM_PROT_NONE, + big_ptes_split, + pages_to_unmap, + pte_batch); + + // And activate the 2M PDE. This ends the pte_batch and issues a single TLB + // invalidate for the 2M entry. + block_gpu_pte_finish_split_2m(block, gpu, push, pte_batch, tlb_batch, tlb_membar); + + gpu_state->pte_is_2m = false; + bitmap_copy(gpu_state->big_ptes, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); +} + +// Unmap some pre-existing mix of big and 4k PTEs into some other mix of big +// and 4k PTEs. +// +// The PTEs covering the pages in pages_to_unmap are cleared (unmapped). +static void block_gpu_unmap_big_and_4k(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + const uvm_page_mask_t *pages_to_unmap, + uvm_push_t *push, + uvm_membar_t tlb_membar) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_page_tree_t *tree = &uvm_va_block_get_gpu_va_space(block, gpu)->page_tables; + uvm_va_block_new_pte_state_t *new_pte_state = &block_context->mapping.new_pte_state; + uvm_pte_batch_t *pte_batch = &block_context->mapping.pte_batch; + uvm_tlb_batch_t *tlb_batch = &block_context->mapping.tlb_batch; + DECLARE_BITMAP(big_ptes_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + DECLARE_BITMAP(big_ptes_before_or_after, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + DECLARE_BITMAP(big_ptes_mask, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + NvU32 big_page_size = tree->big_page_size; + NvU64 unmapped_pte_val = tree->hal->unmapped_pte(big_page_size); + + UVM_ASSERT(!gpu_state->pte_is_2m); + + uvm_pte_batch_begin(push, pte_batch); + uvm_tlb_batch_begin(tree, tlb_batch); + + // All of these cases might be perfomed in the same call: + // 1) Split currently-big PTEs to 4k + // a) Write new 4k PTEs which inherit curr_prot under the split big PTEs + // b) Clear new 4k PTEs which get unmapped under the split big PTEs + // 2) Merge currently-4k PTEs to unmapped big + // 3) Clear currently-big PTEs which wholly get unmapped + // 4) Clear currently-4k PTEs which get unmapped + // 5) Initialize big PTEs which are not covered by this operation + + // Cases 1a and 1b: Write all 4k PTEs under all currently-big PTEs which are + // being split. We'll make the big PTEs inactive below after flushing these + // writes. No TLB invalidate is needed since the big PTE is active. + // + // Mask computation: big_before && !big_after + bitmap_andnot(big_ptes_split, gpu_state->big_ptes, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + block_gpu_pte_big_split_write_4k(block, + block_context, + gpu, + UVM_ID_INVALID, + UVM_PROT_NONE, + big_ptes_split, + pages_to_unmap, + pte_batch); + + // Case 4: Clear the 4k PTEs which weren't covered by a big PTE before, and + // remain uncovered after the unmap. + // + // Mask computation: !big_before && !big_after + bitmap_or(big_ptes_before_or_after, gpu_state->big_ptes, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + uvm_page_mask_init_from_big_ptes(block, gpu, &block_context->scratch_page_mask, big_ptes_before_or_after); + if (uvm_page_mask_andnot(&block_context->scratch_page_mask, pages_to_unmap, &block_context->scratch_page_mask)) + block_gpu_pte_clear_4k(block, gpu, &block_context->scratch_page_mask, 0, pte_batch, tlb_batch); + + // Case 5: If the big page table is newly-allocated, make sure that all big + // PTEs we aren't otherwise writing (that is, those which cover 4k PTEs) are + // all initialized to invalid. + // + // The similar case of making newly-allocated big PTEs unmapped when no + // lower 4k table is present is handled by having + // block_gpu_compute_new_pte_state set new_pte_state->big_ptes + // appropriately. + if (gpu_state->page_table_range_big.table && !gpu_state->initialized_big) { + // TODO: Bug 1766424: If we have the 4k page table already, we could + // attempt to merge all uncovered big PTE regions when first + // allocating the big table. That's probably not worth doing. + UVM_ASSERT(gpu_state->page_table_range_4k.table); + UVM_ASSERT(bitmap_empty(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + bitmap_complement(big_ptes_mask, new_pte_state->big_ptes, uvm_va_block_num_big_pages(block, big_page_size)); + block_gpu_pte_clear_big(block, gpu, big_ptes_mask, 0, pte_batch, tlb_batch); + gpu_state->initialized_big = true; + } + + // Case 3 and step 1 of case 1: Unmap both currently-big PTEs which are + // getting wholly unmapped, and those currently-big PTEs which are being + // split to 4k. We can't directly transition from a valid big PTE to valid + // lower PTEs, because that could cause the GPU TLBs to cache the same VA in + // different cache lines. That could cause memory ordering to not be + // maintained. + // + // Mask computation: (big_before && big_after && covered) || + // (big_before && !big_after) + bitmap_and(big_ptes_mask, gpu_state->big_ptes, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + bitmap_and(big_ptes_mask, big_ptes_mask, new_pte_state->big_ptes_covered, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + bitmap_or(big_ptes_mask, big_ptes_mask, big_ptes_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + block_gpu_pte_clear_big(block, gpu, big_ptes_mask, unmapped_pte_val, pte_batch, tlb_batch); + + // Case 2: Merge the new big PTEs and end the batches, now that we've done + // all of the independent PTE writes we can. + // + // Mask computation: !big_before && big_after + if (bitmap_andnot(big_ptes_mask, new_pte_state->big_ptes, gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)) { + // This writes the newly-big PTEs to unmapped and ends the PTE and TLB + // batches. + block_gpu_pte_merge_big_and_end(block, + block_context, + gpu, + big_ptes_mask, + push, + pte_batch, + tlb_batch, + tlb_membar); + } + else { + // End the batches. We have to commit the membars and TLB invalidates + // before we finish splitting formerly-big PTEs. + uvm_pte_batch_end(pte_batch); + uvm_tlb_batch_end(tlb_batch, push, tlb_membar); + } + + if (!bitmap_empty(big_ptes_split, MAX_BIG_PAGES_PER_UVM_VA_BLOCK) || + block_gpu_needs_to_activate_table(block, gpu)) { + uvm_pte_batch_begin(push, pte_batch); + uvm_tlb_batch_begin(tree, tlb_batch); + + // Case 1 (step 2): Finish splitting our big PTEs, if we have any, by + // switching them from unmapped to invalid. This causes the GPU MMU to + // start reading the 4k PTEs instead of stopping at the unmapped big + // PTEs. + block_gpu_pte_clear_big(block, gpu, big_ptes_split, 0, pte_batch, tlb_batch); + + uvm_pte_batch_end(pte_batch); + + // Finally, activate the page tables if they're inactive + if (block_gpu_needs_to_activate_table(block, gpu)) + block_gpu_write_pde(block, gpu, push, tlb_batch); + + uvm_tlb_batch_end(tlb_batch, push, UVM_MEMBAR_NONE); + } + + // Update gpu_state + bitmap_copy(gpu_state->big_ptes, new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); +} + +// When PTE state is about to change (for example due to a map/unmap/revoke +// operation), this function decides how to split and merge the PTEs in response +// to that operation. +// +// The operation is described with the two page masks: +// +// - pages_changing indicates which pages will have their PTE mappings changed +// on the GPU in some way as a result of the operation (for example, which +// pages will actually have their mapping permissions upgraded). +// +// - page_mask_after indicates which pages on this GPU will have exactly the +// same PTE attributes (permissions, residency) as pages_changing after the +// operation is applied. +// +// PTEs are merged eagerly. +static void block_gpu_compute_new_pte_state(uvm_va_block_t *block, + uvm_gpu_t *gpu, + uvm_processor_id_t resident_id, + const uvm_page_mask_t *pages_changing, + const uvm_page_mask_t *page_mask_after, + uvm_va_block_new_pte_state_t *new_pte_state) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_va_block_region_t big_region_all, big_page_region, region; + NvU32 big_page_size; + uvm_page_index_t page_index; + size_t big_page_index; + DECLARE_BITMAP(big_ptes_not_covered, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + bool can_make_new_big_ptes, region_full; + + memset(new_pte_state, 0, sizeof(*new_pte_state)); + new_pte_state->needs_4k = true; + + // TODO: Bug 1676485: Force a specific page size for perf testing + + if (gpu_state->force_4k_ptes) + return; + + UVM_ASSERT(uvm_page_mask_subset(pages_changing, page_mask_after)); + + if (block_gpu_supports_2m(block, gpu)) { + // If all pages in the 2M mask have the same attributes after the + // operation is applied, we can use a 2M PTE. + if (uvm_page_mask_full(page_mask_after) && + (!UVM_ID_IS_CPU(resident_id) || is_block_phys_contig(block, UVM_ID_CPU))) { + new_pte_state->pte_is_2m = true; + new_pte_state->needs_4k = false; + return; + } + } + + // Find big PTEs with matching attributes + + // Can this block fit any big pages? + big_page_size = uvm_va_block_gpu_big_page_size(block, gpu); + big_region_all = uvm_va_block_big_page_region_all(block, big_page_size); + if (big_region_all.first >= big_region_all.outer) + return; + + new_pte_state->needs_4k = false; + + can_make_new_big_ptes = true; + + // Big pages can be used when mapping sysmem if the GPU supports it (Pascal+). + if (UVM_ID_IS_CPU(resident_id) && !gpu->parent->can_map_sysmem_with_large_pages) + can_make_new_big_ptes = false; + + // We must not fail during teardown: unmap (resident_id == UVM_ID_INVALID) + // with no splits required. That means we should avoid allocating PTEs + // which are only needed for merges. + // + // This only matters if we're merging to big PTEs. If we're merging to 2M, + // then we must already have the 2M level (since it has to be allocated + // before the lower levels). + // + // If pte_is_2m already and we don't have a big table, we're splitting so we + // have to allocate. + if (UVM_ID_IS_INVALID(resident_id) && !gpu_state->page_table_range_big.table && !gpu_state->pte_is_2m) + can_make_new_big_ptes = false; + + for_each_va_block_page_in_region_mask(page_index, pages_changing, big_region_all) { + uvm_va_block_region_t contig_region = {0}; + + big_page_index = uvm_va_block_big_page_index(block, page_index, big_page_size); + big_page_region = uvm_va_block_big_page_region(block, big_page_index, big_page_size); + + if (!UVM_ID_IS_INVALID(resident_id)) + contig_region = block_phys_contig_region(block, page_index, resident_id); + + __set_bit(big_page_index, new_pte_state->big_ptes_covered); + + region_full = uvm_page_mask_region_full(page_mask_after, big_page_region); + if (region_full && UVM_ID_IS_INVALID(resident_id)) + __set_bit(big_page_index, new_pte_state->big_ptes_fully_unmapped); + + // When mapping sysmem, we can use big pages only if we are mapping all pages + // in the big page subregion and the CPU pages backing the subregion are + // physically contiguous. + if (can_make_new_big_ptes && region_full && + (!UVM_ID_IS_CPU(resident_id) || + (contig_region.first <= big_page_region.first && contig_region.outer >= big_page_region.outer))) { + __set_bit(big_page_index, new_pte_state->big_ptes); + } + + if (!test_bit(big_page_index, new_pte_state->big_ptes)) + new_pte_state->needs_4k = true; + + // Skip to the end of the region + page_index = big_page_region.outer - 1; + } + + if (!new_pte_state->needs_4k) { + // All big page regions in pages_changing will be big PTEs. Now check if + // there are any unaligned pages outside of big_region_all which are + // changing. + region = uvm_va_block_region(0, big_region_all.first); + if (!uvm_page_mask_region_empty(pages_changing, region)) { + new_pte_state->needs_4k = true; + } + else { + region = uvm_va_block_region(big_region_all.outer, uvm_va_block_num_cpu_pages(block)); + if (!uvm_page_mask_region_empty(pages_changing, region)) + new_pte_state->needs_4k = true; + } + } + + // Now add in the PTEs which should be big but weren't covered by this + // operation. + // + // Note that we can't assume that a given page table range has been + // initialized if it's present here, since it could have been allocated by a + // thread which had to restart its operation due to allocation retry. + if (gpu_state->pte_is_2m || (block_gpu_supports_2m(block, gpu) && !gpu_state->page_table_range_2m.table)) { + // We're splitting a 2M PTE so all of the uncovered big PTE regions will + // become big PTEs which inherit the 2M permissions. If we haven't + // allocated the 2M table yet, it will start as a 2M PTE until the lower + // levels are allocated, so it's the same split case regardless of + // whether this operation will need to retry a later allocation. + bitmap_complement(big_ptes_not_covered, new_pte_state->big_ptes_covered, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + } + else if (!gpu_state->page_table_range_4k.table && !new_pte_state->needs_4k) { + // If we don't have 4k PTEs and we won't be allocating them for this + // operation, all of our PTEs need to be big. + UVM_ASSERT(!bitmap_empty(new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + bitmap_zero(big_ptes_not_covered, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + bitmap_set(big_ptes_not_covered, 0, uvm_va_block_num_big_pages(block, big_page_size)); + } + else { + // Otherwise, add in all of the currently-big PTEs which are unchanging. + // They won't be written, but they need to be carried into the new + // gpu_state->big_ptes when it's updated. + bitmap_andnot(big_ptes_not_covered, + gpu_state->big_ptes, + new_pte_state->big_ptes_covered, + MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + } + + bitmap_or(new_pte_state->big_ptes, new_pte_state->big_ptes, big_ptes_not_covered, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); +} + +// Wrapper around uvm_page_tree_get_ptes() and uvm_page_tree_alloc_table() that +// handles allocation retry. If the block lock has been unlocked and relocked as +// part of the allocation, NV_ERR_MORE_PROCESSING_REQUIRED is returned to signal +// to the caller that the operation likely needs to be restarted. If that +// happens, the pending tracker is added to the block's tracker. +static NV_STATUS block_alloc_pt_range_with_retry(uvm_va_block_t *va_block, + uvm_gpu_t *gpu, + NvU32 page_size, + uvm_page_table_range_t *page_table_range, + uvm_tracker_t *pending_tracker) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, gpu->id); + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_gpu_va_space_t *gpu_va_space = uvm_va_block_get_gpu_va_space(va_block, gpu); + uvm_page_tree_t *page_tables = &gpu_va_space->page_tables; + uvm_va_block_test_t *va_block_test = uvm_va_block_get_test(va_block); + uvm_page_table_range_t local_range; + NV_STATUS status; + + // Blocks may contain large PTEs without starting on a PTE boundary or + // having an aligned size. Cover the PTEs of this size in the block's + // interior so we match uvm_va_block_gpu_state_t::big_ptes. + NvU64 start = UVM_ALIGN_UP(va_block->start, page_size); + NvU64 size = UVM_ALIGN_DOWN(va_block->end + 1, page_size) - start; + + // VA blocks which can use the 2MB level as either a PTE or a PDE need to + // account for the PDE specially, so they must use uvm_page_tree_alloc_table + // to allocate the lower levels. + bool use_alloc_table = block_gpu_supports_2m(va_block, gpu) && page_size < UVM_PAGE_SIZE_2M; + + uvm_assert_rwsem_locked(&va_space->lock); + UVM_ASSERT(page_table_range->table == NULL); + + if (va_block_test && va_block_test->page_table_allocation_retry_force_count > 0) { + --va_block_test->page_table_allocation_retry_force_count; + status = NV_ERR_NO_MEMORY; + } + else if (use_alloc_table) { + // Pascal+: 4k/64k tables under a 2M entry + UVM_ASSERT(gpu_state->page_table_range_2m.table); + status = uvm_page_tree_alloc_table(page_tables, + page_size, + UVM_PMM_ALLOC_FLAGS_NONE, + &gpu_state->page_table_range_2m, + page_table_range); + } + else { + // 4k/big tables on pre-Pascal, and the 2M entry on Pascal+ + status = uvm_page_tree_get_ptes(page_tables, + page_size, + start, + size, + UVM_PMM_ALLOC_FLAGS_NONE, + page_table_range); + } + + if (status == NV_OK) + goto allocated; + + if (status != NV_ERR_NO_MEMORY) + return status; + + // Before unlocking the block lock, any pending work on the block has to be + // added to the block's tracker. + if (pending_tracker) { + status = uvm_tracker_add_tracker_safe(&va_block->tracker, pending_tracker); + if (status != NV_OK) + return status; + } + + // Unlock the va block and retry with eviction enabled + uvm_mutex_unlock(&va_block->lock); + + if (use_alloc_table) { + // Although we don't hold the block lock here, it's safe to pass + // gpu_state->page_table_range_2m to the page tree code because we know + // that the 2m range has already been allocated, and that it can't go + // away while we have the va_space lock held. + status = uvm_page_tree_alloc_table(page_tables, + page_size, + UVM_PMM_ALLOC_FLAGS_EVICT, + &gpu_state->page_table_range_2m, + &local_range); + } + else { + status = uvm_page_tree_get_ptes(page_tables, + page_size, + start, + size, + UVM_PMM_ALLOC_FLAGS_EVICT, + &local_range); + } + + uvm_mutex_lock(&va_block->lock); + + if (status != NV_OK) + return status; + + status = NV_ERR_MORE_PROCESSING_REQUIRED; + + if (page_table_range->table) { + // A different caller allocated the page tables in the meantime, release the + // local copy. + uvm_page_tree_put_ptes(page_tables, &local_range); + return status; + } + + *page_table_range = local_range; + +allocated: + // Mark the 2M PTE as active when we first allocate it, since we don't have + // any PTEs below it yet. + if (page_size == UVM_PAGE_SIZE_2M) { + UVM_ASSERT(!gpu_state->pte_is_2m); + gpu_state->pte_is_2m = true; + } + else if (page_size != UVM_PAGE_SIZE_4K) { + // uvm_page_tree_get_ptes initializes big PTEs to invalid. + // uvm_page_tree_alloc_table does not, so we'll have to do it later. + if (use_alloc_table) + UVM_ASSERT(!gpu_state->initialized_big); + else + gpu_state->initialized_big = true; + } + + return status; +} + +// Helper which allocates all page table ranges necessary for the given page +// sizes. See block_alloc_pt_range_with_retry. +static NV_STATUS block_alloc_ptes_with_retry(uvm_va_block_t *va_block, + uvm_gpu_t *gpu, + NvU32 page_sizes, + uvm_tracker_t *pending_tracker) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, gpu->id); + uvm_gpu_va_space_t *gpu_va_space = uvm_va_block_get_gpu_va_space(va_block, gpu); + uvm_page_table_range_t *range; + NvU32 page_size; + NV_STATUS status, final_status = NV_OK; + + UVM_ASSERT(gpu_state); + + // Blocks which can map 2M PTE/PDEs must always allocate the 2MB level first + // in order to allocate the levels below. + if (block_gpu_supports_2m(va_block, gpu)) + page_sizes |= UVM_PAGE_SIZE_2M; + + UVM_ASSERT((page_sizes & gpu_va_space->page_tables.hal->page_sizes()) == page_sizes); + + for_each_chunk_size_rev(page_size, page_sizes) { + if (page_size == UVM_PAGE_SIZE_2M) + range = &gpu_state->page_table_range_2m; + else if (page_size == UVM_PAGE_SIZE_4K) + range = &gpu_state->page_table_range_4k; + else + range = &gpu_state->page_table_range_big; + + if (range->table) + continue; + + if (page_size == UVM_PAGE_SIZE_2M) { + UVM_ASSERT(!gpu_state->pte_is_2m); + UVM_ASSERT(!gpu_state->page_table_range_big.table); + UVM_ASSERT(!gpu_state->page_table_range_4k.table); + } + else if (page_size != UVM_PAGE_SIZE_4K) { + UVM_ASSERT(uvm_va_block_num_big_pages(va_block, uvm_va_block_gpu_big_page_size(va_block, gpu)) > 0); + UVM_ASSERT(bitmap_empty(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + } + + status = block_alloc_pt_range_with_retry(va_block, gpu, page_size, range, pending_tracker); + + // Keep going to allocate the remaining levels even if the allocation + // requires a retry, since we'll likely still need them when we retry + // anyway. + if (status == NV_ERR_MORE_PROCESSING_REQUIRED) + final_status = NV_ERR_MORE_PROCESSING_REQUIRED; + else if (status != NV_OK) + return status; + } + + return final_status; +} + +static NV_STATUS block_alloc_ptes_new_state(uvm_va_block_t *va_block, + uvm_gpu_t *gpu, + uvm_va_block_new_pte_state_t *new_pte_state, + uvm_tracker_t *pending_tracker) +{ + NvU32 page_sizes = 0; + + if (new_pte_state->pte_is_2m) { + page_sizes |= UVM_PAGE_SIZE_2M; + } + else { + if (!bitmap_empty(new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)) + page_sizes |= uvm_va_block_gpu_big_page_size(va_block, gpu); + + if (new_pte_state->needs_4k) + page_sizes |= UVM_PAGE_SIZE_4K; + else + UVM_ASSERT(!bitmap_empty(new_pte_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)); + } + + return block_alloc_ptes_with_retry(va_block, gpu, page_sizes, pending_tracker); +} + +// Make sure that GMMU PDEs down to PDE1 are populated for the given VA block. +// This is currently used on ATS systems to prevent GPUs from inadvertently +// accessing sysmem via ATS because there is no PDE1 in the GMMU page tables, +// which is where the NOATS bit resides. +// +// The current implementation simply pre-allocates the PTEs for the VA Block, +// which is wasteful because the GPU may never need them. +// +// TODO: Bug 2064188: Change the MMU code to be able to directly refcount PDE1 +// page table entries without having to request PTEs. +static NV_STATUS block_pre_populate_pde1_gpu(uvm_va_block_t *block, + uvm_gpu_va_space_t *gpu_va_space, + uvm_tracker_t *pending_tracker) +{ + NvU32 page_sizes = 0; + uvm_gpu_t *gpu = gpu_va_space->gpu; + uvm_va_block_gpu_state_t *gpu_state = block_gpu_state_get_alloc(block, gpu); + + UVM_ASSERT(gpu_state); + UVM_ASSERT(gpu_va_space); + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + UVM_ASSERT(gpu_va_space->ats.enabled); + + // If the VA Block supports 2M pages, allocate the 2M PTE only, as it + // requires less memory + if (block_gpu_supports_2m(block, gpu)) { + page_sizes = UVM_PAGE_SIZE_2M; + } + else { + // ATS is only enabled on P9 + Volta, therefore, PAGE_SIZE should + // be 64K and should match Volta big page size + UVM_ASSERT(uvm_va_block_gpu_big_page_size(block, gpu) == PAGE_SIZE); + page_sizes = UVM_PAGE_SIZE_64K; + } + + return block_alloc_ptes_with_retry(block, gpu, page_sizes, pending_tracker); +} + +static NV_STATUS block_pre_populate_pde1_all_gpus(uvm_va_block_t *block, uvm_tracker_t *pending_tracker) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + NV_STATUS status = NV_OK; + + // Pre-populate PDEs down to PDE1 for all GPU VA spaces on ATS systems. See + // comments in block_pre_populate_pde1_gpu. + if (g_uvm_global.ats.enabled && !block->cpu.ever_mapped) { + uvm_gpu_va_space_t *gpu_va_space; + + for_each_gpu_va_space(gpu_va_space, va_space) { + // We only care about systems where ATS is supported and the application + // enabled it. + if (!gpu_va_space->ats.enabled) + continue; + + status = block_pre_populate_pde1_gpu(block, gpu_va_space, pending_tracker); + if (status != NV_OK) + break; + } + } + + return status; +} + +static NV_STATUS block_unmap_gpu(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + const uvm_page_mask_t *unmap_page_mask, + uvm_tracker_t *out_tracker) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_pte_bits_gpu_t pte_bit; + uvm_push_t push; + uvm_membar_t tlb_membar = UVM_MEMBAR_GPU; + uvm_page_mask_t *pages_to_unmap = &block_context->mapping.page_mask; + NV_STATUS status; + uvm_va_block_new_pte_state_t *new_pte_state = &block_context->mapping.new_pte_state; + bool mask_empty; + + // We have to check gpu_state before looking at any VA space state like our + // gpu_va_space, because we could be on the eviction path where we don't + // have a lock on that state. However, since remove_gpu_va_space walks each + // block to unmap the GPU before destroying the gpu_va_space, we're + // guaranteed that if this GPU has page tables, the gpu_va_space can't go + // away while we're holding the block lock. + if (!block_gpu_has_page_tables(block, gpu)) + return NV_OK; + + if (!uvm_page_mask_and(pages_to_unmap, unmap_page_mask, &gpu_state->pte_bits[UVM_PTE_BITS_GPU_READ])) + return NV_OK; + + // block_gpu_compute_new_pte_state needs a mask of pages which will have + // matching attributes after the operation is performed. In the case of + // unmap, those are the pages with unset bits. + uvm_page_mask_andnot(&block_context->scratch_page_mask, &gpu_state->pte_bits[UVM_PTE_BITS_GPU_READ], pages_to_unmap); + uvm_page_mask_complement(&block_context->scratch_page_mask, &block_context->scratch_page_mask); + block_gpu_compute_new_pte_state(block, + gpu, + UVM_ID_INVALID, + pages_to_unmap, + &block_context->scratch_page_mask, + new_pte_state); + + status = block_alloc_ptes_new_state(block, gpu, new_pte_state, out_tracker); + if (status != NV_OK) + return status; + + // All PTE downgrades need a membar. If any of the unmapped PTEs pointed to + // remote memory, we must use a sysmembar. + if (block_has_remote_mapping_gpu(block, block_context, gpu->id, pages_to_unmap)) + tlb_membar = UVM_MEMBAR_SYS; + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &block->tracker, + &push, + "Unmapping pages in block [0x%llx, 0x%llx)", + block->start, + block->end + 1); + if (status != NV_OK) + return status; + + if (new_pte_state->pte_is_2m) { + // We're either unmapping a whole valid 2M PTE, or we're unmapping all + // remaining pages in a split 2M PTE. + block_gpu_unmap_to_2m(block, block_context, gpu, &push, tlb_membar); + } + else if (gpu_state->pte_is_2m) { + // The block is currently mapped as a valid 2M PTE and we're unmapping + // some pages within the 2M, so we have to split it into the appropriate + // mix of big and 4k PTEs. + block_gpu_unmap_split_2m(block, block_context, gpu, pages_to_unmap, &push, tlb_membar); + } + else { + // We're unmapping some pre-existing mix of big and 4K PTEs into some + // other mix of big and 4K PTEs. + block_gpu_unmap_big_and_4k(block, block_context, gpu, pages_to_unmap, &push, tlb_membar); + } + + uvm_push_end(&push); + + if (!uvm_processor_mask_test(block_get_uvm_lite_gpus(block), gpu->id)) { + uvm_processor_mask_t non_uvm_lite_gpus; + uvm_processor_mask_andnot(&non_uvm_lite_gpus, &block->mapped, block_get_uvm_lite_gpus(block)); + + UVM_ASSERT(uvm_processor_mask_test(&non_uvm_lite_gpus, gpu->id)); + + // If the GPU is the only non-UVM-Lite processor with mappings, we can + // safely mark pages as fully unmapped + if (uvm_processor_mask_get_count(&non_uvm_lite_gpus) == 1) + uvm_page_mask_andnot(&block->maybe_mapped_pages, &block->maybe_mapped_pages, pages_to_unmap); + } + + // Clear block PTE state + for (pte_bit = 0; pte_bit < UVM_PTE_BITS_GPU_MAX; pte_bit++) { + mask_empty = !uvm_page_mask_andnot(&gpu_state->pte_bits[pte_bit], + &gpu_state->pte_bits[pte_bit], + pages_to_unmap); + if (pte_bit == UVM_PTE_BITS_GPU_READ && mask_empty) + uvm_processor_mask_clear(&block->mapped, gpu->id); + } + + UVM_ASSERT(block_check_mappings(block)); + + return uvm_tracker_add_push_safe(out_tracker, &push); +} + +NV_STATUS uvm_va_block_unmap(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t id, + uvm_va_block_region_t region, + const uvm_page_mask_t *unmap_page_mask, + uvm_tracker_t *out_tracker) +{ + uvm_page_mask_t *region_page_mask = &va_block_context->mapping.map_running_page_mask; + + UVM_ASSERT(!uvm_va_block_is_dead(va_block)); + uvm_assert_mutex_locked(&va_block->lock); + + if (UVM_ID_IS_CPU(id)) { + block_unmap_cpu(va_block, region, unmap_page_mask); + return NV_OK; + } + + uvm_page_mask_init_from_region(region_page_mask, region, unmap_page_mask); + + return block_unmap_gpu(va_block, va_block_context, block_get_gpu(va_block, id), region_page_mask, out_tracker); +} + +// This function essentially works as a wrapper around vm_insert_page (hence +// the similar function prototype). This is needed since vm_insert_page +// doesn't take permissions as input, but uses vma->vm_page_prot instead. +// Since we may have multiple VA blocks under one VMA which need to map +// with different permissions, we have to manually change vma->vm_page_prot for +// each call to vm_insert_page. Multiple faults under one VMA in separate +// blocks can be serviced concurrently, so the VMA wrapper lock is used +// to protect access to vma->vm_page_prot. +static NV_STATUS uvm_cpu_insert_page(struct vm_area_struct *vma, + NvU64 addr, + struct page *page, + uvm_prot_t new_prot) +{ + uvm_vma_wrapper_t *vma_wrapper; + unsigned long target_flags; + pgprot_t target_pgprot; + int ret; + + UVM_ASSERT(vma); + UVM_ASSERT(vma->vm_private_data); + + vma_wrapper = vma->vm_private_data; + target_flags = vma->vm_flags; + + if (new_prot == UVM_PROT_READ_ONLY) + target_flags &= ~VM_WRITE; + + target_pgprot = vm_get_page_prot(target_flags); + + // Take VMA wrapper lock to check vma->vm_page_prot + uvm_down_read(&vma_wrapper->lock); + + // Take a write lock if we need to modify the VMA vm_page_prot + // - vma->vm_page_prot creates writable PTEs but new prot is RO + // - vma->vm_page_prot creates read-only PTEs but new_prot is RW + if (pgprot_val(vma->vm_page_prot) != pgprot_val(target_pgprot)) { + uvm_up_read(&vma_wrapper->lock); + uvm_down_write(&vma_wrapper->lock); + + vma->vm_page_prot = target_pgprot; + + uvm_downgrade_write(&vma_wrapper->lock); + } + + ret = vm_insert_page(vma, addr, page); + uvm_up_read(&vma_wrapper->lock); + if (ret) { + UVM_ASSERT_MSG(ret == -ENOMEM, "ret: %d\n", ret); + return errno_to_nv_status(ret); + } + + return NV_OK; +} + +// Creates or upgrades a CPU mapping for the given page, updating the block's +// mapping and pte_bits bitmaps as appropriate. Upon successful return, the page +// will be mapped with at least new_prot permissions. +// +// This never downgrades mappings, so new_prot must not be UVM_PROT_NONE. Use +// block_unmap_cpu or uvm_va_block_revoke_prot instead. +// +// If the existing mapping is >= new_prot already, this is a no-op. +// +// It is the caller's responsibility to: +// - Revoke mappings from other processors as appropriate so the CPU can map +// with new_prot permissions +// - Guarantee that vm_insert_page is safe to use (vma->vm_mm has a reference +// and mmap_lock is held in at least read mode) +// - Ensure that the struct page corresponding to the physical memory being +// mapped exists +// - Manage the block's residency bitmap +// - Ensure that the block hasn't been killed (block->va_range is present) +// - Update the pte/mapping tracking state on success +static NV_STATUS block_map_cpu_page_to(uvm_va_block_t *block, + uvm_processor_id_t resident_id, + uvm_page_index_t page_index, + uvm_prot_t new_prot) +{ + uvm_prot_t curr_prot = block_page_prot_cpu(block, page_index); + uvm_va_range_t *va_range = block->va_range; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + struct vm_area_struct *vma; + NV_STATUS status; + NvU64 addr; + struct page *page; + + UVM_ASSERT(uvm_va_block_is_hmm(block) || va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + UVM_ASSERT(new_prot != UVM_PROT_NONE); + UVM_ASSERT(new_prot < UVM_PROT_MAX); + UVM_ASSERT(uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(resident_id)], UVM_ID_CPU)); + + uvm_assert_mutex_locked(&block->lock); + if (UVM_ID_IS_CPU(resident_id)) + UVM_ASSERT(uvm_page_mask_test(&block->cpu.allocated, page_index)); + + // For the CPU, write implies atomic + if (new_prot == UVM_PROT_READ_WRITE) + new_prot = UVM_PROT_READ_WRITE_ATOMIC; + + // Only upgrades are supported in this function + UVM_ASSERT(curr_prot <= new_prot); + + if (new_prot == curr_prot) + return NV_OK; + + // Check for existing VMA permissions. They could have been modified after + // the initial mmap by mprotect. + if (!uvm_va_block_is_hmm(block) && new_prot > uvm_va_range_logical_prot(va_range)) + return NV_ERR_INVALID_ACCESS_TYPE; + + if (uvm_va_block_is_hmm(block)) { + // Do not map CPU pages because they belong to the Linux kernel. + return NV_OK; + } + + UVM_ASSERT(va_range); + + if (UVM_ID_IS_CPU(resident_id) && UVM_ID_IS_CPU(uvm_va_range_get_policy(va_range)->preferred_location)) { + // Add the page's range group range to the range group's migrated list. + uvm_range_group_range_t *rgr = uvm_range_group_range_find(va_space, + uvm_va_block_cpu_page_address(block, page_index)); + if (rgr != NULL) { + uvm_spin_lock(&rgr->range_group->migrated_ranges_lock); + if (list_empty(&rgr->range_group_migrated_list_node)) + list_move_tail(&rgr->range_group_migrated_list_node, &rgr->range_group->migrated_ranges); + uvm_spin_unlock(&rgr->range_group->migrated_ranges_lock); + } + } + + // It's possible here that current->mm != vma->vm_mm. That can happen for + // example due to access_process_vm (ptrace) or get_user_pages from another + // driver. + // + // In such cases the caller has taken care of ref counting vma->vm_mm for + // us, so we can safely operate on the vma but we can't use + // uvm_va_range_vma_current. + vma = uvm_va_range_vma(va_range); + uvm_assert_mmap_lock_locked(vma->vm_mm); + UVM_ASSERT(!uvm_va_space_mm_enabled(va_space) || va_space->va_space_mm.mm == vma->vm_mm); + + // Add the mapping + addr = uvm_va_block_cpu_page_address(block, page_index); + + // This unmap handles upgrades as vm_insert_page returns -EBUSY when + // there's already a mapping present at fault_addr, so we have to unmap + // first anyway when upgrading from RO -> RW. + if (curr_prot != UVM_PROT_NONE) + unmap_mapping_range(&va_space->mapping, addr, PAGE_SIZE, 1); + + // Don't map the CPU until prior copies and GPU PTE updates finish, + // otherwise we might not stay coherent. + status = uvm_tracker_wait(&block->tracker); + if (status != NV_OK) + return status; + + if (UVM_ID_IS_CPU(resident_id)) { + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index); + + // TODO: Bug 3283417: This can be removed if vm_insert_pages() is used instead of + // vm_insert_page(). + page = uvm_cpu_chunk_get_cpu_page(block, chunk, page_index); + UVM_ASSERT(page); + } + else { + uvm_gpu_t *gpu = uvm_va_space_get_gpu(va_space, resident_id); + size_t chunk_offset; + uvm_gpu_chunk_t *chunk = block_phys_page_chunk(block, block_phys_page(resident_id, page_index), &chunk_offset); + + UVM_ASSERT(gpu->parent->numa_info.enabled); + + page = uvm_gpu_chunk_to_page(&gpu->pmm, chunk) + chunk_offset / PAGE_SIZE; + } + + return uvm_cpu_insert_page(vma, addr, page, new_prot); +} + +// Maps the CPU to the given pages which are resident on resident_id. +// map_page_mask is an in/out parameter: the pages which are mapped to +// resident_id are removed from the mask before returning. +// +// Caller must ensure that: +// - Pages in map_page_mask must not be set in the corresponding cpu.pte_bits +// mask for the requested protection. +static NV_STATUS block_map_cpu_to(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_processor_id_t resident_id, + uvm_va_block_region_t region, + uvm_page_mask_t *map_page_mask, + uvm_prot_t new_prot, + uvm_tracker_t *out_tracker) +{ + NV_STATUS status = NV_OK; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + uvm_page_index_t page_index; + uvm_page_mask_t *pages_to_map = &block_context->mapping.page_mask; + const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(block, resident_id); + uvm_pte_bits_cpu_t prot_pte_bit = get_cpu_pte_bit_index(new_prot); + uvm_pte_bits_cpu_t pte_bit; + + UVM_ASSERT(uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(resident_id)], UVM_ID_CPU)); + + // TODO: Bug 1766424: Check if optimizing the unmap_mapping_range calls + // within block_map_cpu_page_to by doing them once here is helpful. + + UVM_ASSERT(!uvm_page_mask_and(&block_context->scratch_page_mask, + map_page_mask, + &block->cpu.pte_bits[prot_pte_bit])); + + // The pages which will actually change are those in the input page mask + // which are resident on the target. + if (!uvm_page_mask_and(pages_to_map, map_page_mask, resident_mask)) + return NV_OK; + + status = block_pre_populate_pde1_all_gpus(block, out_tracker); + if (status != NV_OK) + return status; + + block->cpu.ever_mapped = true; + + for_each_va_block_page_in_region_mask(page_index, pages_to_map, region) { + status = block_map_cpu_page_to(block, + resident_id, + page_index, + new_prot); + if (status != NV_OK) + break; + + uvm_processor_mask_set(&block->mapped, UVM_ID_CPU); + } + + // If there was some error, shrink the region so that we only update the + // pte/mapping tracking bits for the pages that succeeded + if (status != NV_OK) { + region = uvm_va_block_region(region.first, page_index); + uvm_page_mask_region_clear_outside(pages_to_map, region); + } + + // If pages are mapped from a remote residency, notify the remote mapping + // events to tools. We skip event notification if the cause is Invalid. We + // use it to signal that this function is being called from the revocation + // path to avoid reporting duplicate events. + if (UVM_ID_IS_GPU(resident_id) && + va_space->tools.enabled && + block_context->mapping.cause != UvmEventMapRemoteCauseInvalid) { + uvm_va_block_region_t subregion; + for_each_va_block_subregion_in_mask(subregion, pages_to_map, region) { + uvm_tools_record_map_remote(block, + NULL, + UVM_ID_CPU, + resident_id, + uvm_va_block_region_start(block, subregion), + uvm_va_block_region_size(subregion), + block_context->mapping.cause); + } + } + + // Update CPU mapping state + for (pte_bit = 0; pte_bit <= prot_pte_bit; pte_bit++) + uvm_page_mask_or(&block->cpu.pte_bits[pte_bit], &block->cpu.pte_bits[pte_bit], pages_to_map); + + uvm_page_mask_or(&block->maybe_mapped_pages, &block->maybe_mapped_pages, pages_to_map); + + UVM_ASSERT(block_check_mappings(block)); + + // Remove all pages that were newly-mapped from the input mask + uvm_page_mask_andnot(map_page_mask, map_page_mask, pages_to_map); + + return status; +} + +// Maps the GPU to the given pages which are resident on resident_id. +// map_page_mask is an in/out parameter: the pages which are mapped +// to resident_id are removed from the mask before returning. +// +// Caller must ensure that: +// - Pages in map_page_mask must not be set in the corresponding pte_bits mask +// for the requested protection on the mapping GPU. +static NV_STATUS block_map_gpu_to(uvm_va_block_t *va_block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + uvm_processor_id_t resident_id, + uvm_page_mask_t *map_page_mask, + uvm_prot_t new_prot, + uvm_tracker_t *out_tracker) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, gpu->id); + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_push_t push; + NV_STATUS status; + uvm_page_mask_t *pages_to_map = &block_context->mapping.page_mask; + const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, resident_id); + uvm_pte_bits_gpu_t pte_bit; + uvm_pte_bits_gpu_t prot_pte_bit = get_gpu_pte_bit_index(new_prot); + uvm_va_block_new_pte_state_t *new_pte_state = &block_context->mapping.new_pte_state; + block_pte_op_t pte_op; + + UVM_ASSERT(map_page_mask); + UVM_ASSERT(uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(resident_id)], gpu->id)); + + if (uvm_processor_mask_test(block_get_uvm_lite_gpus(va_block), gpu->id)) + UVM_ASSERT(uvm_id_equal(resident_id, uvm_va_range_get_policy(va_block->va_range)->preferred_location)); + + UVM_ASSERT(!uvm_page_mask_and(&block_context->scratch_page_mask, + map_page_mask, + &gpu_state->pte_bits[prot_pte_bit])); + + // The pages which will actually change are those in the input page mask + // which are resident on the target. + if (!uvm_page_mask_and(pages_to_map, map_page_mask, resident_mask)) + return NV_OK; + + UVM_ASSERT(block_check_mapping_residency(va_block, gpu, resident_id, pages_to_map)); + + // For PTE merge/split computation, compute all resident pages which will + // have exactly new_prot after performing the mapping. + uvm_page_mask_or(&block_context->scratch_page_mask, &gpu_state->pte_bits[prot_pte_bit], pages_to_map); + if (prot_pte_bit < UVM_PTE_BITS_GPU_ATOMIC) { + uvm_page_mask_andnot(&block_context->scratch_page_mask, + &block_context->scratch_page_mask, + &gpu_state->pte_bits[prot_pte_bit + 1]); + } + uvm_page_mask_and(&block_context->scratch_page_mask, &block_context->scratch_page_mask, resident_mask); + + block_gpu_compute_new_pte_state(va_block, + gpu, + resident_id, + pages_to_map, + &block_context->scratch_page_mask, + new_pte_state); + + status = block_alloc_ptes_new_state(va_block, gpu, new_pte_state, out_tracker); + if (status != NV_OK) + return status; + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &va_block->tracker, + &push, + "Mapping pages in block [0x%llx, 0x%llx) as %s", + va_block->start, + va_block->end + 1, + uvm_prot_string(new_prot)); + if (status != NV_OK) + return status; + + pte_op = BLOCK_PTE_OP_MAP; + if (new_pte_state->pte_is_2m) { + // We're either modifying permissions of a pre-existing 2M PTE, or all + // permissions match so we can merge to a new 2M PTE. + block_gpu_map_to_2m(va_block, block_context, gpu, resident_id, new_prot, &push, pte_op); + } + else if (gpu_state->pte_is_2m) { + // Permissions on a subset of the existing 2M PTE are being upgraded, so + // we have to split it into the appropriate mix of big and 4k PTEs. + block_gpu_map_split_2m(va_block, block_context, gpu, resident_id, pages_to_map, new_prot, &push, pte_op); + } + else { + // We're upgrading permissions on some pre-existing mix of big and 4K + // PTEs into some other mix of big and 4K PTEs. + block_gpu_map_big_and_4k(va_block, block_context, gpu, resident_id, pages_to_map, new_prot, &push, pte_op); + } + + // If we are mapping remotely, record the event + if (va_space->tools.enabled && !uvm_id_equal(resident_id, gpu->id)) { + uvm_va_block_region_t subregion, region = uvm_va_block_region_from_block(va_block); + + UVM_ASSERT(block_context->mapping.cause != UvmEventMapRemoteCauseInvalid); + + for_each_va_block_subregion_in_mask(subregion, pages_to_map, region) { + uvm_tools_record_map_remote(va_block, + &push, + gpu->id, + resident_id, + uvm_va_block_region_start(va_block, subregion), + uvm_va_block_region_size(subregion), + block_context->mapping.cause); + } + } + + uvm_push_end(&push); + + // Update GPU mapping state + for (pte_bit = 0; pte_bit <= prot_pte_bit; pte_bit++) + uvm_page_mask_or(&gpu_state->pte_bits[pte_bit], &gpu_state->pte_bits[pte_bit], pages_to_map); + + uvm_processor_mask_set(&va_block->mapped, gpu->id); + + // If we are mapping a UVM-Lite GPU do not update maybe_mapped_pages + if (!uvm_processor_mask_test(block_get_uvm_lite_gpus(va_block), gpu->id)) + uvm_page_mask_or(&va_block->maybe_mapped_pages, &va_block->maybe_mapped_pages, pages_to_map); + + // Remove all pages resident on this processor from the input mask, which + // were newly-mapped. + uvm_page_mask_andnot(map_page_mask, map_page_mask, pages_to_map); + + UVM_ASSERT(block_check_mappings(va_block)); + + return uvm_tracker_add_push_safe(out_tracker, &push); +} + +static void map_get_allowed_destinations(uvm_va_block_t *block, + uvm_va_policy_t *policy, + uvm_processor_id_t id, + uvm_processor_mask_t *allowed_mask) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + if (uvm_processor_mask_test(block_get_uvm_lite_gpus(block), id)) { + // UVM-Lite can only map resident pages on the preferred location + uvm_processor_mask_zero(allowed_mask); + uvm_processor_mask_set(allowed_mask, policy->preferred_location); + } + else if ((uvm_va_policy_is_read_duplicate(policy, va_space) || uvm_id_equal(policy->preferred_location, id)) && + uvm_va_space_processor_has_memory(va_space, id)) { + // When operating under read-duplication we should only map the local + // processor to cause fault-and-duplicate of remote pages. + // + // The same holds when this processor is the preferred location: only + // create local mappings to force remote pages to fault-and-migrate. + uvm_processor_mask_zero(allowed_mask); + uvm_processor_mask_set(allowed_mask, id); + } + else { + // Common case: Just map wherever the memory happens to reside + uvm_processor_mask_and(allowed_mask, &block->resident, &va_space->can_access[uvm_id_value(id)]); + return; + } + + // Clamp to resident and accessible processors + uvm_processor_mask_and(allowed_mask, allowed_mask, &block->resident); + uvm_processor_mask_and(allowed_mask, allowed_mask, &va_space->can_access[uvm_id_value(id)]); +} + +NV_STATUS uvm_va_block_map(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t id, + uvm_va_block_region_t region, + const uvm_page_mask_t *map_page_mask, + uvm_prot_t new_prot, + UvmEventMapRemoteCause cause, + uvm_tracker_t *out_tracker) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_gpu_t *gpu = NULL; + uvm_processor_mask_t allowed_destinations; + uvm_processor_id_t resident_id; + const uvm_page_mask_t *pte_mask; + uvm_page_mask_t *running_page_mask = &va_block_context->mapping.map_running_page_mask; + NV_STATUS status; + + va_block_context->mapping.cause = cause; + + UVM_ASSERT(new_prot != UVM_PROT_NONE); + UVM_ASSERT(new_prot < UVM_PROT_MAX); + uvm_assert_mutex_locked(&va_block->lock); + + // Mapping is not supported on the eviction path that doesn't hold the VA + // space lock. + uvm_assert_rwsem_locked(&va_space->lock); + + if (UVM_ID_IS_CPU(id)) { + uvm_pte_bits_cpu_t prot_pte_bit; + + // Check if the current thread is allowed to call vm_insert_page + if (!uvm_va_block_is_hmm(va_block) && !uvm_va_range_vma_check(va_block->va_range, va_block_context->mm)) + return NV_OK; + + prot_pte_bit = get_cpu_pte_bit_index(new_prot); + pte_mask = &va_block->cpu.pte_bits[prot_pte_bit]; + } + else { + uvm_va_block_gpu_state_t *gpu_state; + uvm_pte_bits_gpu_t prot_pte_bit; + + gpu = uvm_va_space_get_gpu(va_space, id); + + // Although this GPU UUID is registered in the VA space, it might not have a + // GPU VA space registered. + if (!uvm_gpu_va_space_get(va_space, gpu)) + return NV_OK; + + gpu_state = block_gpu_state_get_alloc(va_block, gpu); + if (!gpu_state) + return NV_ERR_NO_MEMORY; + + prot_pte_bit = get_gpu_pte_bit_index(new_prot); + pte_mask = &gpu_state->pte_bits[prot_pte_bit]; + } + + uvm_page_mask_init_from_region(running_page_mask, region, map_page_mask); + + if (!uvm_page_mask_andnot(running_page_mask, running_page_mask, pte_mask)) + return NV_OK; + + // Map per resident location so we can more easily detect physically- + // contiguous mappings. + map_get_allowed_destinations(va_block, va_block_context->policy, id, &allowed_destinations); + + for_each_closest_id(resident_id, &allowed_destinations, id, va_space) { + if (UVM_ID_IS_CPU(id)) { + status = block_map_cpu_to(va_block, + va_block_context, + resident_id, + region, + running_page_mask, + new_prot, + out_tracker); + } + else { + status = block_map_gpu_to(va_block, + va_block_context, + gpu, + resident_id, + running_page_mask, + new_prot, + out_tracker); + } + + if (status != NV_OK) + return status; + + // If we've mapped all requested pages, we're done + if (uvm_page_mask_region_empty(running_page_mask, region)) + break; + } + + return NV_OK; +} + +// Revokes the given pages mapped by cpu. This is implemented by unmapping all +// pages and mapping them later with the lower permission. This is required +// because vm_insert_page can only be used for upgrades from Invalid. +// +// Caller must ensure that: +// - Pages in revoke_page_mask must be set in the +// cpu.pte_bits[UVM_PTE_BITS_CPU_WRITE] mask. +static NV_STATUS block_revoke_cpu_write(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_va_block_region_t region, + const uvm_page_mask_t *revoke_page_mask, + uvm_tracker_t *out_tracker) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + uvm_va_block_region_t subregion; + + UVM_ASSERT(revoke_page_mask); + + UVM_ASSERT(uvm_page_mask_subset(revoke_page_mask, &block->cpu.pte_bits[UVM_PTE_BITS_CPU_WRITE])); + + block_unmap_cpu(block, region, revoke_page_mask); + + // Coalesce revocation event notification + for_each_va_block_subregion_in_mask(subregion, revoke_page_mask, region) { + uvm_perf_event_notify_revocation(&va_space->perf_events, + block, + UVM_ID_CPU, + uvm_va_block_region_start(block, subregion), + uvm_va_block_region_size(subregion), + UVM_PROT_READ_WRITE_ATOMIC, + UVM_PROT_READ_ONLY); + } + + // uvm_va_block_map will skip this remap if we aren't holding the right mm + // lock. + return uvm_va_block_map(block, + block_context, + UVM_ID_CPU, + region, + revoke_page_mask, + UVM_PROT_READ_ONLY, + UvmEventMapRemoteCauseInvalid, + out_tracker); +} + +static void block_revoke_prot_gpu_perf_notify(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + uvm_prot_t prot_revoked, + const uvm_page_mask_t *pages_revoked) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, gpu->id); + uvm_va_block_region_t subregion, region = uvm_va_block_region_from_block(block); + uvm_pte_bits_gpu_t pte_bit; + + for (pte_bit = UVM_PTE_BITS_GPU_ATOMIC; pte_bit >= get_gpu_pte_bit_index(prot_revoked); pte_bit--) { + uvm_prot_t old_prot; + + if (!uvm_page_mask_and(&block_context->scratch_page_mask, &gpu_state->pte_bits[pte_bit], pages_revoked)) + continue; + + if (pte_bit == UVM_PTE_BITS_GPU_ATOMIC) + old_prot = UVM_PROT_READ_WRITE_ATOMIC; + else + old_prot = UVM_PROT_READ_WRITE; + + for_each_va_block_subregion_in_mask(subregion, &block_context->scratch_page_mask, region) { + uvm_perf_event_notify_revocation(&va_space->perf_events, + block, + gpu->id, + uvm_va_block_region_start(block, subregion), + uvm_va_block_region_size(subregion), + old_prot, + prot_revoked - 1); + } + } +} + +// Revokes the given pages mapped by gpu which are resident on resident_id. +// revoke_page_mask is an in/out parameter: the pages which have the appropriate +// permissions and are mapped to resident_id are removed from the mask before +// returning. +// +// Caller must ensure that: +// - Pages in map_page_mask must be set in the corresponding pte_bits mask for +// the protection to be revoked on the mapping GPU. +static NV_STATUS block_revoke_prot_gpu_to(uvm_va_block_t *va_block, + uvm_va_block_context_t *block_context, + uvm_gpu_t *gpu, + uvm_processor_id_t resident_id, + uvm_page_mask_t *revoke_page_mask, + uvm_prot_t prot_to_revoke, + uvm_tracker_t *out_tracker) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, gpu->id); + uvm_push_t push; + NV_STATUS status; + uvm_pte_bits_gpu_t pte_bit; + uvm_pte_bits_gpu_t prot_pte_bit = get_gpu_pte_bit_index(prot_to_revoke); + uvm_prot_t new_prot = prot_to_revoke - 1; + uvm_va_block_new_pte_state_t *new_pte_state = &block_context->mapping.new_pte_state; + block_pte_op_t pte_op; + const uvm_page_mask_t *resident_mask = uvm_va_block_resident_mask_get(va_block, resident_id); + uvm_page_mask_t *pages_to_revoke = &block_context->mapping.page_mask; + + UVM_ASSERT(revoke_page_mask); + UVM_ASSERT(uvm_page_mask_subset(revoke_page_mask, &gpu_state->pte_bits[prot_pte_bit])); + + // The pages which will actually change are those in the input page mask + // which are resident on the target. + if (!uvm_page_mask_and(pages_to_revoke, revoke_page_mask, resident_mask)) + return NV_OK; + + UVM_ASSERT(block_check_mapping_residency(va_block, gpu, resident_id, pages_to_revoke)); + + // For PTE merge/split computation, compute all resident pages which will + // have exactly prot_to_revoke-1 after performing the revocation. + uvm_page_mask_andnot(&block_context->scratch_page_mask, &gpu_state->pte_bits[prot_pte_bit], pages_to_revoke); + uvm_page_mask_andnot(&block_context->scratch_page_mask, + &gpu_state->pte_bits[prot_pte_bit - 1], + &block_context->scratch_page_mask); + uvm_page_mask_and(&block_context->scratch_page_mask, &block_context->scratch_page_mask, resident_mask); + + block_gpu_compute_new_pte_state(va_block, + gpu, + resident_id, + pages_to_revoke, + &block_context->scratch_page_mask, + new_pte_state); + + status = block_alloc_ptes_new_state(va_block, gpu, new_pte_state, out_tracker); + if (status != NV_OK) + return status; + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &va_block->tracker, + &push, + "Revoking %s access privileges in block [0x%llx, 0x%llx) ", + uvm_prot_string(prot_to_revoke), + va_block->start, + va_block->end + 1); + if (status != NV_OK) + return status; + + pte_op = BLOCK_PTE_OP_REVOKE; + if (new_pte_state->pte_is_2m) { + // We're either modifying permissions of a pre-existing 2M PTE, or all + // permissions match so we can merge to a new 2M PTE. + block_gpu_map_to_2m(va_block, block_context, gpu, resident_id, new_prot, &push, pte_op); + } + else if (gpu_state->pte_is_2m) { + // Permissions on a subset of the existing 2M PTE are being downgraded, + // so we have to split it into the appropriate mix of big and 4k PTEs. + block_gpu_map_split_2m(va_block, block_context, gpu, resident_id, pages_to_revoke, new_prot, &push, pte_op); + } + else { + // We're downgrading permissions on some pre-existing mix of big and 4K + // PTEs into some other mix of big and 4K PTEs. + block_gpu_map_big_and_4k(va_block, block_context, gpu, resident_id, pages_to_revoke, new_prot, &push, pte_op); + } + + uvm_push_end(&push); + + block_revoke_prot_gpu_perf_notify(va_block, block_context, gpu, prot_to_revoke, pages_to_revoke); + + // Update GPU mapping state + for (pte_bit = UVM_PTE_BITS_GPU_ATOMIC; pte_bit >= prot_pte_bit; pte_bit--) + uvm_page_mask_andnot(&gpu_state->pte_bits[pte_bit], &gpu_state->pte_bits[pte_bit], pages_to_revoke); + + // Remove all pages resident on this processor from the input mask, which + // pages which were revoked and pages which already had the correct + // permissions. + uvm_page_mask_andnot(revoke_page_mask, revoke_page_mask, pages_to_revoke); + + UVM_ASSERT(block_check_mappings(va_block)); + + return uvm_tracker_add_push_safe(out_tracker, &push); +} + +NV_STATUS uvm_va_block_revoke_prot(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t id, + uvm_va_block_region_t region, + const uvm_page_mask_t *revoke_page_mask, + uvm_prot_t prot_to_revoke, + uvm_tracker_t *out_tracker) +{ + uvm_gpu_t *gpu; + uvm_va_block_gpu_state_t *gpu_state; + uvm_processor_mask_t resident_procs; + uvm_processor_id_t resident_id; + uvm_page_mask_t *running_page_mask = &va_block_context->mapping.revoke_running_page_mask; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_pte_bits_gpu_t prot_pte_bit; + + UVM_ASSERT(prot_to_revoke > UVM_PROT_READ_ONLY); + UVM_ASSERT(prot_to_revoke < UVM_PROT_MAX); + uvm_assert_mutex_locked(&va_block->lock); + + if (UVM_ID_IS_CPU(id)) { + if (prot_to_revoke == UVM_PROT_READ_WRITE_ATOMIC) + return NV_OK; + + uvm_page_mask_init_from_region(running_page_mask, region, revoke_page_mask); + + if (uvm_page_mask_and(running_page_mask, running_page_mask, &va_block->cpu.pte_bits[UVM_PTE_BITS_CPU_WRITE])) + return block_revoke_cpu_write(va_block, va_block_context, region, running_page_mask, out_tracker); + + return NV_OK; + } + + gpu = uvm_va_space_get_gpu(va_space, id); + + // UVM-Lite GPUs should never have access revoked + UVM_ASSERT_MSG(!uvm_processor_mask_test(block_get_uvm_lite_gpus(va_block), gpu->id), + "GPU %s\n", uvm_gpu_name(gpu)); + + // Return early if there are no mappings for the GPU present in the block + if (!uvm_processor_mask_test(&va_block->mapped, gpu->id)) + return NV_OK; + + gpu_state = uvm_va_block_gpu_state_get(va_block, gpu->id); + prot_pte_bit = get_gpu_pte_bit_index(prot_to_revoke); + + uvm_page_mask_init_from_region(running_page_mask, region, revoke_page_mask); + + if (!uvm_page_mask_and(running_page_mask, running_page_mask, &gpu_state->pte_bits[prot_pte_bit])) + return NV_OK; + + // Revoke per resident location so we can more easily detect physically- + // contiguous mappings. + uvm_processor_mask_copy(&resident_procs, &va_block->resident); + + for_each_closest_id(resident_id, &resident_procs, gpu->id, va_space) { + NV_STATUS status = block_revoke_prot_gpu_to(va_block, + va_block_context, + gpu, + resident_id, + running_page_mask, + prot_to_revoke, + out_tracker); + if (status != NV_OK) + return status; + + // If we've revoked all requested pages, we're done + if (uvm_page_mask_region_empty(running_page_mask, region)) + break; + } + + return NV_OK; +} + +NV_STATUS uvm_va_block_map_mask(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + const uvm_processor_mask_t *map_processor_mask, + uvm_va_block_region_t region, + const uvm_page_mask_t *map_page_mask, + uvm_prot_t new_prot, + UvmEventMapRemoteCause cause) +{ + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + NV_STATUS status = NV_OK; + NV_STATUS tracker_status; + uvm_processor_id_t id; + + for_each_id_in_mask(id, map_processor_mask) { + status = uvm_va_block_map(va_block, + va_block_context, + id, + region, + map_page_mask, + new_prot, + cause, + &local_tracker); + if (status != NV_OK) + break; + } + + // Regardless of error, add the successfully-pushed mapping operations into + // the block's tracker. Note that we can't overwrite the tracker because we + // aren't guaranteed that the map actually pushed anything (in which case it + // would've acquired the block tracker first). + tracker_status = uvm_tracker_add_tracker_safe(&va_block->tracker, &local_tracker); + uvm_tracker_deinit(&local_tracker); + + return status == NV_OK ? tracker_status : status; +} + +NV_STATUS uvm_va_block_unmap_mask(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + const uvm_processor_mask_t *unmap_processor_mask, + uvm_va_block_region_t region, + const uvm_page_mask_t *unmap_page_mask) +{ + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + NV_STATUS status = NV_OK; + NV_STATUS tracker_status; + uvm_processor_id_t id; + + // Watch out, unmap_mask could change during iteration since it could be + // va_block->mapped. + for_each_id_in_mask(id, unmap_processor_mask) { + // Errors could either be a system-fatal error (ECC) or an allocation + // retry due to PTE splitting. In either case we should stop after + // hitting the first one. + status = uvm_va_block_unmap(va_block, va_block_context, id, region, unmap_page_mask, &local_tracker); + if (status != NV_OK) + break; + } + + // See the comment in uvm_va_block_map_mask for adding to the tracker. + tracker_status = uvm_tracker_add_tracker_safe(&va_block->tracker, &local_tracker); + uvm_tracker_deinit(&local_tracker); + + return status == NV_OK ? tracker_status : status; +} + +NV_STATUS uvm_va_block_revoke_prot_mask(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + const uvm_processor_mask_t *revoke_processor_mask, + uvm_va_block_region_t region, + const uvm_page_mask_t *revoke_page_mask, + uvm_prot_t prot_to_revoke) +{ + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + NV_STATUS status = NV_OK; + NV_STATUS tracker_status; + uvm_processor_id_t id; + + for_each_id_in_mask(id, revoke_processor_mask) { + status = uvm_va_block_revoke_prot(va_block, + va_block_context, + id, + region, + revoke_page_mask, + prot_to_revoke, + &local_tracker); + if (status != NV_OK) + break; + } + + // See the comment in uvm_va_block_map_mask for adding to the tracker. + tracker_status = uvm_tracker_add_tracker_safe(&va_block->tracker, &local_tracker); + uvm_tracker_deinit(&local_tracker); + + return status == NV_OK ? tracker_status : status; +} + +// Updates the read_duplicated_pages mask in the block when the state of GPU id +// is being destroyed +static void update_read_duplicated_pages_mask(uvm_va_block_t *block, + uvm_gpu_id_t id, + uvm_va_block_gpu_state_t *gpu_state) +{ + uvm_gpu_id_t running_id; + bool first = true; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + uvm_va_block_context_t *block_context = uvm_va_space_block_context(va_space, NULL); + uvm_page_mask_t *running_page_mask = &block_context->update_read_duplicated_pages.running_page_mask; + uvm_page_mask_t *tmp_page_mask = &block_context->scratch_page_mask; + + uvm_page_mask_zero(&block->read_duplicated_pages); + + for_each_id_in_mask(running_id, &block->resident) { + const uvm_page_mask_t *running_residency_mask; + + if (uvm_id_equal(running_id, id)) + continue; + + running_residency_mask = uvm_va_block_resident_mask_get(block, running_id); + + if (first) { + uvm_page_mask_copy(running_page_mask, running_residency_mask); + first = false; + continue; + } + + if (uvm_page_mask_and(tmp_page_mask, running_page_mask, running_residency_mask)) + uvm_page_mask_or(&block->read_duplicated_pages, &block->read_duplicated_pages, tmp_page_mask); + + uvm_page_mask_or(running_page_mask, running_page_mask, running_residency_mask); + } +} + +// Unmaps all GPU mappings under this block, frees the page tables, and frees +// all the GPU chunks. This simply drops the chunks on the floor, so the caller +// must take care of copying the data elsewhere if it needs to remain intact. +// +// This serializes on the block tracker since it must unmap page tables. +static void block_destroy_gpu_state(uvm_va_block_t *block, uvm_gpu_id_t id) +{ + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, id); + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + uvm_gpu_va_space_t *gpu_va_space; + uvm_gpu_t *gpu, *other_gpu; + + if (!gpu_state) + return; + + uvm_assert_mutex_locked(&block->lock); + + // Unmap PTEs and free page tables + gpu = uvm_va_space_get_gpu(va_space, id); + gpu_va_space = uvm_gpu_va_space_get(va_space, gpu); + if (gpu_va_space) + uvm_va_block_remove_gpu_va_space(block, gpu_va_space, NULL); + + UVM_ASSERT(!uvm_processor_mask_test(&block->mapped, id)); + + // No processor should have this GPU mapped at this point + UVM_ASSERT(block_check_processor_not_mapped(block, id)); + + // We need to remove the mappings of the indirect peers from the reverse + // map when the GPU state is being destroyed (for example, on + // unregister_gpu) and when peer access between indirect peers is disabled. + // However, we need to avoid double mapping removals. There are two + // possible scenarios: + // - Disable peer access first. This will remove all mappings between A and + // B GPUs, and the indirect_peers bit is cleared. Thus, the later call to + // unregister_gpu will not operate on that pair of GPUs. + // - Unregister GPU first. This will remove all mappings from all indirect + // peers to the GPU being unregistered. It will also destroy its GPU state. + // Subsequent calls to disable peers will remove the mappings from the GPU + // being unregistered, but never to the GPU being unregistered (since it no + // longer has a valid GPU state). + for_each_va_space_gpu_in_mask(other_gpu, va_space, &va_space->indirect_peers[uvm_id_value(gpu->id)]) + block_gpu_unmap_all_chunks_indirect_peer(block, gpu, other_gpu); + + if (gpu_state->chunks) { + size_t i, num_chunks; + + update_read_duplicated_pages_mask(block, id, gpu_state); + uvm_page_mask_zero(&gpu_state->resident); + block_clear_resident_processor(block, id); + + num_chunks = block_num_gpu_chunks(block, gpu); + for (i = 0; i < num_chunks; i++) { + uvm_gpu_chunk_t *chunk = gpu_state->chunks[i]; + if (!chunk) + continue; + + uvm_mmu_chunk_unmap(chunk, &block->tracker); + uvm_pmm_gpu_free(&gpu->pmm, chunk, &block->tracker); + } + + uvm_kvfree(gpu_state->chunks); + } + else { + UVM_ASSERT(!uvm_processor_mask_test(&block->resident, id)); + } + + + // Pending operations may still need the DMA memory to be mapped. + uvm_tracker_wait(&block->tracker); + + block_gpu_unmap_phys_all_cpu_pages(block, gpu); + uvm_cpu_chunk_gpu_mapping_free(block, gpu->id); + uvm_processor_mask_clear(&block->evicted_gpus, id); + + kmem_cache_free(g_uvm_va_block_gpu_state_cache, gpu_state); + block->gpus[uvm_id_gpu_index(id)] = NULL; +} + +static void block_put_ptes_safe(uvm_page_tree_t *tree, uvm_page_table_range_t *range) +{ + if (range->table) { + uvm_page_tree_put_ptes(tree, range); + memset(range, 0, sizeof(*range)); + } +} + +NV_STATUS uvm_va_block_add_gpu_va_space(uvm_va_block_t *va_block, uvm_gpu_va_space_t *gpu_va_space) +{ + uvm_assert_mutex_locked(&va_block->lock); + + if (!gpu_va_space->ats.enabled || !va_block->cpu.ever_mapped) + return NV_OK; + + // Pre-populate PDEs down to PDE1 for all GPU VA spaces on ATS systems. See + // comments in pre_populate_pde1_gpu. + return block_pre_populate_pde1_gpu(va_block, gpu_va_space, NULL); +} + +void uvm_va_block_remove_gpu_va_space(uvm_va_block_t *va_block, uvm_gpu_va_space_t *gpu_va_space, struct mm_struct *mm) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_va_block_context_t *block_context = uvm_va_space_block_context(va_space, mm); + uvm_pte_batch_t *pte_batch = &block_context->mapping.pte_batch; + uvm_tlb_batch_t *tlb_batch = &block_context->mapping.tlb_batch; + uvm_gpu_t *gpu = gpu_va_space->gpu; + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, gpu->id); + uvm_va_block_region_t region = uvm_va_block_region_from_block(va_block); + uvm_push_t push; + NV_STATUS status; + + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + + if (!gpu_state) + return; + + uvm_assert_mutex_locked(&va_block->lock); + + // Unmapping the whole block won't cause a page table split, so this should + // only fail if we have a system-fatal error. + status = uvm_va_block_unmap(va_block, block_context, gpu->id, region, NULL, &local_tracker); + if (status != NV_OK) { + UVM_ASSERT(status == uvm_global_get_status()); + return; // Just leak + } + + UVM_ASSERT(!uvm_processor_mask_test(&va_block->mapped, gpu->id)); + + // Reset the page tables if other allocations could reuse them + if (!block_gpu_supports_2m(va_block, gpu) && + !bitmap_empty(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)) { + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &local_tracker, + &push, + "Resetting PTEs for block [0x%llx, 0x%llx)", + va_block->start, + va_block->end + 1); + if (status != NV_OK) { + UVM_ASSERT(status == uvm_global_get_status()); + return; // Just leak + } + + uvm_pte_batch_begin(&push, pte_batch); + uvm_tlb_batch_begin(&gpu_va_space->page_tables, tlb_batch); + + // When the big PTEs is active, the 4k PTEs under it are garbage. Make + // them invalid so the page tree code can reuse them for other + // allocations on this VA. These don't need TLB invalidates since the + // big PTEs above them are active. + if (gpu_state->page_table_range_4k.table) { + uvm_page_mask_init_from_big_ptes(va_block, gpu, &block_context->scratch_page_mask, gpu_state->big_ptes); + block_gpu_pte_clear_4k(va_block, gpu, &block_context->scratch_page_mask, 0, pte_batch, NULL); + } + + // We unmapped all big PTEs above, which means they have the unmapped + // pattern so the GPU MMU won't read 4k PTEs under them. Set them to + // invalid to activate the 4ks below so new allocations using just those + // 4k PTEs will work. + block_gpu_pte_clear_big(va_block, gpu, gpu_state->big_ptes, 0, pte_batch, tlb_batch); + + uvm_pte_batch_end(pte_batch); + uvm_tlb_batch_end(tlb_batch, &push, UVM_MEMBAR_NONE); + + uvm_push_end(&push); + uvm_tracker_overwrite_with_push(&local_tracker, &push); + } + + // The unmap must finish before we free the page tables + status = uvm_tracker_wait_deinit(&local_tracker); + if (status != NV_OK) + return; // System-fatal error, just leak + + // Note that if the PTE is currently 2M with lower tables allocated but not + // in use, calling put_ptes on those lower ranges will re-write the 2M entry + // to be a PDE. + block_put_ptes_safe(&gpu_va_space->page_tables, &gpu_state->page_table_range_4k); + block_put_ptes_safe(&gpu_va_space->page_tables, &gpu_state->page_table_range_big); + block_put_ptes_safe(&gpu_va_space->page_tables, &gpu_state->page_table_range_2m); + + gpu_state->pte_is_2m = false; + gpu_state->initialized_big = false; + gpu_state->activated_big = false; + gpu_state->activated_4k = false; + bitmap_zero(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + UVM_ASSERT(block_check_mappings(va_block)); +} + +NV_STATUS uvm_va_block_enable_peer(uvm_va_block_t *va_block, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + + UVM_ASSERT(uvm_gpu_peer_caps(gpu0, gpu1)->link_type != UVM_GPU_LINK_INVALID); + uvm_assert_rwsem_locked_write(&va_space->lock); + uvm_assert_mutex_locked(&va_block->lock); + + if (uvm_processor_mask_test(&va_space->indirect_peers[uvm_id_value(gpu0->id)], gpu1->id)) { + status = block_gpu_map_all_chunks_indirect_peer(va_block, gpu0, gpu1); + if (status != NV_OK) + return status; + + status = block_gpu_map_all_chunks_indirect_peer(va_block, gpu1, gpu0); + if (status != NV_OK) { + block_gpu_unmap_all_chunks_indirect_peer(va_block, gpu0, gpu1); + return status; + } + } + + // TODO: Bug 1767224: Refactor the uvm_va_block_set_accessed_by logic so we + // call it here. + + return NV_OK; +} + +void uvm_va_block_disable_peer(uvm_va_block_t *va_block, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + NV_STATUS status; + uvm_tracker_t tracker = UVM_TRACKER_INIT(); + uvm_va_block_context_t *block_context = uvm_va_space_block_context(va_space, NULL); + uvm_page_mask_t *unmap_page_mask = &block_context->caller_page_mask; + const uvm_page_mask_t *resident0; + const uvm_page_mask_t *resident1; + + uvm_assert_mutex_locked(&va_block->lock); + + // See comment in block_destroy_gpu_state + if (uvm_processor_mask_test(&va_space->indirect_peers[uvm_id_value(gpu0->id)], gpu1->id)) { + block_gpu_unmap_all_chunks_indirect_peer(va_block, gpu0, gpu1); + block_gpu_unmap_all_chunks_indirect_peer(va_block, gpu1, gpu0); + } + + // If either of the GPUs doesn't have GPU state then nothing could be mapped + // between them. + if (!uvm_va_block_gpu_state_get(va_block, gpu0->id) || !uvm_va_block_gpu_state_get(va_block, gpu1->id)) + return; + + resident0 = uvm_va_block_resident_mask_get(va_block, gpu0->id); + resident1 = uvm_va_block_resident_mask_get(va_block, gpu1->id); + + // Unmap all pages resident on gpu1, but not on gpu0, from gpu0 + if (uvm_page_mask_andnot(unmap_page_mask, resident1, resident0)) { + status = block_unmap_gpu(va_block, block_context, gpu0, unmap_page_mask, &tracker); + if (status != NV_OK) { + // Since all PTEs unmapped by this call have the same aperture, page + // splits should never be required so any failure should be the + // result of a system-fatal error. + UVM_ASSERT_MSG(status == uvm_global_get_status(), + "Unmapping failed: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu0)); + } + } + + // Unmap all pages resident on gpu0, but not on gpu1, from gpu1 + if (uvm_page_mask_andnot(unmap_page_mask, resident0, resident1)) { + status = block_unmap_gpu(va_block, block_context, gpu1, unmap_page_mask, &tracker); + if (status != NV_OK) { + UVM_ASSERT_MSG(status == uvm_global_get_status(), + "Unmapping failed: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu0)); + } + } + + status = uvm_tracker_add_tracker_safe(&va_block->tracker, &tracker); + if (status != NV_OK) + UVM_ASSERT(status == uvm_global_get_status()); + + status = uvm_tracker_wait_deinit(&tracker); + if (status != NV_OK) + UVM_ASSERT(status == uvm_global_get_status()); +} + +void uvm_va_block_unmap_preferred_location_uvm_lite(uvm_va_block_t *va_block, uvm_gpu_t *gpu) +{ + NV_STATUS status; + uvm_va_range_t *va_range = va_block->va_range; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_va_block_context_t *block_context = uvm_va_space_block_context(va_space, NULL); + uvm_va_block_region_t region = uvm_va_block_region_from_block(va_block); + + uvm_assert_mutex_locked(&va_block->lock); + UVM_ASSERT(uvm_processor_mask_test(&va_range->uvm_lite_gpus, gpu->id)); + + // If the GPU doesn't have GPU state then nothing could be mapped. + if (!uvm_va_block_gpu_state_get(va_block, gpu->id)) + return; + + // In UVM-Lite mode, mappings to the preferred location are not tracked + // directly, so just unmap the whole block. + status = uvm_va_block_unmap(va_block, block_context, gpu->id, region, NULL, &va_block->tracker); + if (status != NV_OK) { + // Unmapping the whole block should not cause page splits so any failure + // should be the result of a system-fatal error. + UVM_ASSERT_MSG(status == uvm_global_get_status(), + "Unmapping failed: %s, GPU %s\n", + nvstatusToString(status), uvm_gpu_name(gpu)); + } + + status = uvm_tracker_wait(&va_block->tracker); + if (status != NV_OK) { + UVM_ASSERT_MSG(status == uvm_global_get_status(), + "Unmapping failed: %s, GPU %s\n", + nvstatusToString(status), uvm_gpu_name(gpu)); + } +} + +// Evict pages from the GPU by moving each resident region to the CPU +// +// Notably the caller needs to support allocation-retry as +// uvm_va_block_migrate_locked() requires that. +static NV_STATUS block_evict_pages_from_gpu(uvm_va_block_t *va_block, uvm_gpu_t *gpu, struct mm_struct *mm) +{ + NV_STATUS status = NV_OK; + const uvm_page_mask_t *resident = uvm_va_block_resident_mask_get(va_block, gpu->id); + uvm_va_block_region_t region = uvm_va_block_region_from_block(va_block); + uvm_va_block_region_t subregion; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_va_block_context_t *block_context = uvm_va_space_block_context(va_space, mm); + + if (!uvm_va_block_is_hmm(va_block)) + block_context->policy = uvm_va_range_get_policy(va_block->va_range); + + // Move all subregions resident on the GPU to the CPU + for_each_va_block_subregion_in_mask(subregion, resident, region) { + // Need to set block_context->policy for HMM. + if (uvm_va_block_is_hmm(va_block)) { + uvm_va_policy_node_t *node; + + node = uvm_va_policy_node_find(va_block, uvm_va_block_region_start(va_block, subregion)); + if (node) { + uvm_page_index_t outer = uvm_va_block_cpu_page_index(va_block, + node->node.end) + 1; + // If the policy doesn't cover the subregion, truncate the + // subregion. + if (subregion.outer > outer) + subregion.outer = outer; + block_context->policy = &node->policy; + } + else + block_context->policy = &uvm_va_policy_default; + } + status = uvm_va_block_migrate_locked(va_block, + NULL, + block_context, + subregion, + UVM_ID_CPU, + UVM_MIGRATE_MODE_MAKE_RESIDENT_AND_MAP, + NULL); + if (status != NV_OK) + return status; + } + + UVM_ASSERT(!uvm_processor_mask_test(&va_block->resident, gpu->id)); + return NV_OK; +} + +// This handles allocation-retry internally and hence might unlock and relock +// block's lock. +static void block_unregister_gpu_locked(uvm_va_block_t *va_block, uvm_gpu_t *gpu, struct mm_struct *mm) +{ + NV_STATUS status; + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(va_block, gpu->id); + + uvm_assert_mutex_locked(&va_block->lock); + + if (!gpu_state) + return; + + // The mappings should've already been torn down by GPU VA space unregister + UVM_ASSERT(!uvm_processor_mask_test(&va_block->mapped, gpu->id)); + UVM_ASSERT(uvm_page_mask_empty(&gpu_state->pte_bits[UVM_PTE_BITS_GPU_READ])); + UVM_ASSERT(!block_gpu_has_page_tables(va_block, gpu)); + + // Use UVM_VA_BLOCK_RETRY_LOCKED() as the va block lock is already taken and + // we don't rely on any state of the block across the call. + status = UVM_VA_BLOCK_RETRY_LOCKED(va_block, NULL, block_evict_pages_from_gpu(va_block, gpu, mm)); + if (status != NV_OK) { + UVM_ERR_PRINT("Failed to evict GPU pages on GPU unregister: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu)); + uvm_global_set_fatal_error(status); + } + + // This function will copy the block's tracker into each chunk then free the + // chunk to PMM. If we do this before waiting for the block tracker below + // we'll populate PMM's free chunks with tracker entries, which gives us + // better testing coverage of chunk synchronization on GPU unregister. + block_destroy_gpu_state(va_block, gpu->id); + + // Any time a GPU is unregistered we need to make sure that there are no + // pending (direct or indirect) tracker entries for that GPU left in the + // block's tracker. The only way to ensure that is to wait for the whole + // tracker. + status = uvm_tracker_wait(&va_block->tracker); + if (status != NV_OK) + UVM_ASSERT(status == uvm_global_get_status()); +} + +void uvm_va_block_unregister_gpu(uvm_va_block_t *va_block, uvm_gpu_t *gpu, struct mm_struct *mm) +{ + // Take the lock internally to not expose the caller to allocation-retry. + uvm_mutex_lock(&va_block->lock); + + block_unregister_gpu_locked(va_block, gpu, mm); + + uvm_mutex_unlock(&va_block->lock); +} + +static void block_mark_region_cpu_dirty(uvm_va_block_t *va_block, uvm_va_block_region_t region) +{ + uvm_page_index_t page_index; + + uvm_assert_mutex_locked(&va_block->lock); + + for_each_va_block_page_in_region_mask (page_index, &va_block->cpu.resident, region) + block_mark_cpu_page_dirty(va_block, page_index); +} + +// Tears down everything within the block, but doesn't free the block itself. +// Note that when uvm_va_block_kill is called, this is called twice: once for +// the initial kill itself, then again when the block's ref count is eventually +// destroyed. block->va_range is used to track whether the block has already +// been killed. +static void block_kill(uvm_va_block_t *block) +{ + uvm_va_space_t *va_space; + uvm_perf_event_data_t event_data; + uvm_cpu_chunk_t *chunk; + uvm_gpu_id_t id; + NV_STATUS status; + uvm_va_block_region_t region = uvm_va_block_region_from_block(block); + uvm_page_index_t page_index; + + if (uvm_va_block_is_dead(block)) + return; + + va_space = uvm_va_block_get_va_space(block); + event_data.block_destroy.block = block; + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_BLOCK_DESTROY, &event_data); + + // Unmap all processors in parallel first. Unmapping the whole block won't + // cause a page table split, so this should only fail if we have a system- + // fatal error. + if (!uvm_processor_mask_empty(&block->mapped)) { + uvm_va_block_context_t *block_context = uvm_va_space_block_context(va_space, NULL); + + // We could only be killed with mapped GPU state by VA range free or VA + // space teardown, so it's safe to use the va_space's block_context + // because both of those have the VA space lock held in write mode. + status = uvm_va_block_unmap_mask(block, block_context, &block->mapped, region, NULL); + UVM_ASSERT(status == uvm_global_get_status()); + } + + UVM_ASSERT(uvm_processor_mask_empty(&block->mapped)); + + // Free the GPU page tables and chunks + for_each_gpu_id(id) + block_destroy_gpu_state(block, id); + + // Wait for the GPU PTE unmaps before freeing CPU memory + uvm_tracker_wait_deinit(&block->tracker); + + // No processor should have the CPU mapped at this point + UVM_ASSERT(block_check_processor_not_mapped(block, UVM_ID_CPU)); + + // Free CPU pages + for_each_va_block_page(page_index, block) { + chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index); + if (!chunk) + continue; + + // be conservative. + // Tell the OS we wrote to the page because we sometimes clear the dirty bit after writing to it. + uvm_cpu_chunk_mark_dirty(chunk, page_index); + uvm_cpu_chunk_remove_from_block(block, chunk, page_index); + uvm_cpu_chunk_put(chunk); + } + + uvm_kvfree((void *)block->cpu.chunks); + block->cpu.chunks = 0; + + // Clearing the resident bit isn't strictly necessary since this block + // is getting destroyed, but it keeps state consistent for assertions. + uvm_page_mask_zero(&block->cpu.resident); + block_clear_resident_processor(block, UVM_ID_CPU); + + if (uvm_va_block_is_hmm(block)) + uvm_va_policy_clear(block, block->start, block->end); + + block->va_range = NULL; +#if UVM_IS_CONFIG_HMM() + block->hmm.va_space = NULL; +#endif +} + +// Called when the block's ref count drops to 0 +void uvm_va_block_destroy(nv_kref_t *nv_kref) +{ + uvm_va_block_t *block = container_of(nv_kref, uvm_va_block_t, kref); + + // Nobody else should have a reference when freeing + uvm_assert_mutex_unlocked(&block->lock); + + uvm_mutex_lock(&block->lock); + block_kill(block); + uvm_mutex_unlock(&block->lock); + + if (uvm_enable_builtin_tests) { + uvm_va_block_wrapper_t *block_wrapper = container_of(block, uvm_va_block_wrapper_t, block); + + kmem_cache_free(g_uvm_va_block_cache, block_wrapper); + } + else { + kmem_cache_free(g_uvm_va_block_cache, block); + } +} + +void uvm_va_block_kill(uvm_va_block_t *va_block) +{ + uvm_mutex_lock(&va_block->lock); + block_kill(va_block); + uvm_mutex_unlock(&va_block->lock); + + // May call block_kill again + uvm_va_block_release(va_block); +} + +static NV_STATUS block_split_presplit_ptes_gpu(uvm_va_block_t *existing, uvm_va_block_t *new, uvm_gpu_t *gpu) +{ + uvm_va_block_gpu_state_t *existing_gpu_state = uvm_va_block_gpu_state_get(existing, gpu->id); + uvm_va_space_t *va_space = uvm_va_block_get_va_space(existing); + uvm_va_block_context_t *block_context = uvm_va_space_block_context(va_space, NULL); + NvU32 big_page_size = uvm_va_block_gpu_big_page_size(existing, gpu); + NvU32 alloc_sizes; + DECLARE_BITMAP(new_big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + uvm_page_index_t new_start_page_index = uvm_va_block_cpu_page_index(existing, new->start); + size_t big_page_index; + uvm_push_t push; + NV_STATUS status; + + // We only have to split to big PTEs if we're currently a 2M PTE + if (existing_gpu_state->pte_is_2m) { + // We can skip the split if the 2M PTE is invalid and we have no lower + // PTEs. + if (block_page_prot_gpu(existing, gpu, 0) == UVM_PROT_NONE && + !existing_gpu_state->page_table_range_big.table && + !existing_gpu_state->page_table_range_4k.table) + return NV_OK; + + alloc_sizes = big_page_size; + bitmap_fill(new_big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + if (!IS_ALIGNED(new->start, big_page_size)) { + alloc_sizes |= UVM_PAGE_SIZE_4K; + + big_page_index = uvm_va_block_big_page_index(existing, new_start_page_index, big_page_size); + __clear_bit(big_page_index, new_big_ptes); + } + + status = block_alloc_ptes_with_retry(existing, gpu, alloc_sizes, NULL); + if (status != NV_OK) + return status; + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &existing->tracker, + &push, + "Splitting 2M PTE, existing [0x%llx, 0x%llx) new [0x%llx, 0x%llx)", + existing->start, existing->end + 1, + new->start, new->end + 1); + if (status != NV_OK) + return status; + + block_gpu_split_2m(existing, block_context, gpu, new_big_ptes, &push); + } + else { + big_page_index = uvm_va_block_big_page_index(existing, new_start_page_index, big_page_size); + + // If the split point is on a big page boundary, or if the split point + // is not currently covered by a big PTE, we don't have to split + // anything. + if (IS_ALIGNED(new->start, big_page_size) || + big_page_index == MAX_BIG_PAGES_PER_UVM_VA_BLOCK || + !test_bit(big_page_index, existing_gpu_state->big_ptes)) + return NV_OK; + + status = block_alloc_ptes_with_retry(existing, gpu, UVM_PAGE_SIZE_4K, NULL); + if (status != NV_OK) + return status; + + bitmap_zero(new_big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + __set_bit(big_page_index, new_big_ptes); + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &existing->tracker, + &push, + "Splitting big PTE, existing [0x%llx, 0x%llx) new [0x%llx, 0x%llx)", + existing->start, existing->end + 1, + new->start, new->end + 1); + if (status != NV_OK) + return status; + + block_gpu_split_big(existing, block_context, gpu, new_big_ptes, &push); + } + + uvm_push_end(&push); + + // Adding this push to existing block tracker will cause all GPU PTE splits + // to serialize on each other, but it's simpler than maintaining a separate + // tracker and this path isn't performance-critical. + return uvm_tracker_add_push_safe(&existing->tracker, &push); +} + +static NV_STATUS block_split_presplit_ptes(uvm_va_block_t *existing, uvm_va_block_t *new) +{ + uvm_gpu_t *gpu; + uvm_gpu_id_t id; + NV_STATUS status; + + for_each_gpu_id(id) { + if (!uvm_va_block_gpu_state_get(existing, id)) + continue; + + gpu = block_get_gpu(existing, id); + + if (block_gpu_has_page_tables(existing, gpu)) { + status = block_split_presplit_ptes_gpu(existing, new, gpu); + if (status != NV_OK) + return status; + } + } + + return NV_OK; +} + +typedef struct +{ + // Number of chunks contained by this VA block + size_t num_chunks; + + // Index of the "interesting" chunk, either adjacent to or spanning the + // split point depending on which block this is. + size_t chunk_index; + + // Size of the chunk referenced by chunk_index + uvm_chunk_size_t chunk_size; +} block_gpu_chunk_split_state_t; + +static void block_gpu_chunk_get_split_state(block_gpu_chunk_split_state_t *state, + NvU64 start, + NvU64 end, + uvm_page_index_t page_index, + uvm_gpu_t *gpu) +{ + NvU64 size = end - start + 1; + state->num_chunks = block_num_gpu_chunks_range(start, size, gpu); + state->chunk_index = uvm_va_block_gpu_chunk_index_range(start, size, gpu, page_index, &state->chunk_size); +} + +static void block_merge_chunk(uvm_va_block_t *block, uvm_gpu_t *gpu, uvm_gpu_chunk_t *chunk) +{ + uvm_gpu_t *accessing_gpu; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + uvm_pmm_gpu_merge_chunk(&gpu->pmm, chunk); + + for_each_va_space_gpu_in_mask(accessing_gpu, va_space, &va_space->indirect_peers[uvm_id_value(gpu->id)]) { + NvU64 peer_addr = uvm_pmm_gpu_indirect_peer_addr(&gpu->pmm, chunk, accessing_gpu); + + uvm_pmm_sysmem_mappings_merge_gpu_chunk_mappings(&accessing_gpu->pmm_reverse_sysmem_mappings, + peer_addr, + uvm_gpu_chunk_get_size(chunk)); + } +} + +// Perform any chunk splitting and array growing required for this block split, +// but don't actually move chunk pointers anywhere. +static NV_STATUS block_presplit_gpu_chunks(uvm_va_block_t *existing, uvm_va_block_t *new, uvm_gpu_t *gpu) +{ + uvm_va_block_gpu_state_t *existing_gpu_state = uvm_va_block_gpu_state_get(existing, gpu->id); + uvm_gpu_t *accessing_gpu; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(existing); + uvm_gpu_chunk_t **temp_chunks; + uvm_gpu_chunk_t *original_chunk, *curr_chunk; + uvm_page_index_t split_page_index = uvm_va_block_cpu_page_index(existing, new->start); + uvm_chunk_sizes_mask_t split_sizes; + uvm_chunk_size_t subchunk_size; + NV_STATUS status; + block_gpu_chunk_split_state_t existing_before_state, existing_after_state, new_state; + + block_gpu_chunk_get_split_state(&existing_before_state, existing->start, existing->end, split_page_index, gpu); + block_gpu_chunk_get_split_state(&existing_after_state, existing->start, new->start - 1, split_page_index - 1, gpu); + block_gpu_chunk_get_split_state(&new_state, new->start, new->end, 0, gpu); + + // Even though we're splitting existing, we could wind up requiring a larger + // chunks array if we split a large chunk into many smaller ones. + if (existing_after_state.num_chunks > existing_before_state.num_chunks) { + temp_chunks = uvm_kvrealloc(existing_gpu_state->chunks, + existing_after_state.num_chunks * sizeof(existing_gpu_state->chunks[0])); + if (!temp_chunks) + return NV_ERR_NO_MEMORY; + existing_gpu_state->chunks = temp_chunks; + } + + original_chunk = existing_gpu_state->chunks[existing_before_state.chunk_index]; + + // If the chunk covering the split point is not populated, we're done. We've + // already grown the array to cover any new chunks which may be populated + // later. + if (!original_chunk) + return NV_OK; + + // Figure out the splits we need to perform. Remove all sizes >= the current + // size, and all sizes < the target size. Note that the resulting mask will + // be 0 if the sizes match (we're already splitting at a chunk boundary). + UVM_ASSERT(uvm_gpu_chunk_get_size(original_chunk) == existing_before_state.chunk_size); + UVM_ASSERT(existing_before_state.chunk_size >= new_state.chunk_size); + split_sizes = gpu->parent->mmu_user_chunk_sizes; + split_sizes &= existing_before_state.chunk_size - 1; + split_sizes &= ~(new_state.chunk_size - 1); + + // Keep splitting the chunk covering the split point until we hit the target + // size. + curr_chunk = original_chunk; + for_each_chunk_size_rev(subchunk_size, split_sizes) { + size_t last_index, num_subchunks; + + status = uvm_pmm_gpu_split_chunk(&gpu->pmm, curr_chunk, subchunk_size, NULL); + if (status != NV_OK) + goto error; + + // Split physical GPU mappings for indirect peers + for_each_va_space_gpu_in_mask(accessing_gpu, va_space, &va_space->indirect_peers[uvm_id_value(gpu->id)]) { + NvU64 peer_addr = uvm_pmm_gpu_indirect_peer_addr(&gpu->pmm, curr_chunk, accessing_gpu); + + status = uvm_pmm_sysmem_mappings_split_gpu_chunk_mappings(&accessing_gpu->pmm_reverse_sysmem_mappings, + peer_addr, + subchunk_size); + if (status != NV_OK) + goto error; + } + + if (subchunk_size == new_state.chunk_size) + break; + + // Compute the last subchunk index prior to the split point. Divide the + // entire address space into units of subchunk_size, then mod by the + // number of subchunks within the parent. + last_index = (size_t)uvm_div_pow2_64(new->start - 1, subchunk_size); + num_subchunks = (size_t)uvm_div_pow2_64(uvm_gpu_chunk_get_size(curr_chunk), subchunk_size); + UVM_ASSERT(num_subchunks > 1); + last_index &= num_subchunks - 1; + + uvm_pmm_gpu_get_subchunks(&gpu->pmm, curr_chunk, last_index, 1, &curr_chunk); + UVM_ASSERT(uvm_gpu_chunk_get_size(curr_chunk) == subchunk_size); + } + + // Note that existing's chunks array still has a pointer to original_chunk, + // not to any newly-split subchunks. If a subsequent split failure occurs on + // a later GPU we'll have to merge it back. Once we're past the preallocate + // stage we'll remove it from the chunks array and move the new split chunks + // in. + + return NV_OK; + +error: + // On error we need to leave the chunk in its initial state + block_merge_chunk(existing, gpu, original_chunk); + + return status; +} + +// Perform any CPU chunk splitting that may be required for this block split. +// Just like block_presplit_gpu_chunks, no chunks are moved to the new block. +static NV_STATUS block_presplit_cpu_chunks(uvm_va_block_t *existing, uvm_va_block_t *new) +{ + uvm_page_index_t page_index = uvm_va_block_cpu_page_index(existing, new->start); + uvm_cpu_chunk_t *splitting_chunk; + uvm_chunk_size_t split_sizes = uvm_cpu_chunk_get_allocation_sizes(); + uvm_chunk_size_t subchunk_size; + NV_STATUS status = NV_OK; + + UVM_ASSERT(!IS_ALIGNED(new->start, UVM_VA_BLOCK_SIZE)); + + // If the page covering the split point has not been populated, there is no + // need to split. + if (!uvm_page_mask_test(&existing->cpu.allocated, page_index)) + return NV_OK; + + splitting_chunk = uvm_cpu_chunk_get_chunk_for_page(existing, page_index); + + // If the chunk spanning the split point is already at the correct size, + // there is nothing to do. + if (IS_ALIGNED(new->start, uvm_cpu_chunk_get_size(splitting_chunk))) + return NV_OK; + + // Remove all sizes above the chunk's current size. + split_sizes &= uvm_cpu_chunk_get_size(splitting_chunk) - 1; + // Remove all sizes below the alignment of the new block's start. + split_sizes &= ~(IS_ALIGNED(new->start, UVM_CHUNK_SIZE_64K) ? UVM_CHUNK_SIZE_64K - 1 : 0); + + for_each_chunk_size_rev(subchunk_size, split_sizes) { + uvm_gpu_id_t id; + + UVM_ASSERT(IS_ALIGNED(uvm_cpu_chunk_get_size(splitting_chunk), subchunk_size)); + + for_each_gpu_id(id) { + uvm_gpu_t *gpu; + + if (!uvm_va_block_gpu_state_get(existing, id)) + continue; + + // If the parent chunk has not been mapped, there is nothing to split. + if (uvm_cpu_chunk_get_gpu_mapping_addr(existing, page_index, splitting_chunk, id) == 0) + continue; + + gpu = block_get_gpu(existing, id); + status = uvm_pmm_sysmem_mappings_split_gpu_mappings(&gpu->pmm_reverse_sysmem_mappings, + uvm_cpu_chunk_get_gpu_mapping_addr(existing, + page_index, + splitting_chunk, + id), + subchunk_size); + if (status != NV_OK) + return status; + } + + status = uvm_cpu_chunk_split(existing, splitting_chunk, subchunk_size); + if (status != NV_OK) + return status; + + splitting_chunk = uvm_cpu_chunk_get_chunk_for_page(existing, page_index); + } + + return NV_OK; +} + +static void block_merge_cpu_chunks(uvm_va_block_t *existing, uvm_va_block_t *new) +{ + uvm_page_index_t page_index = uvm_va_block_cpu_page_index(existing, new->start); + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(existing, page_index); + uvm_va_space_t *va_space = existing->va_range->va_space; + uvm_gpu_id_t id; + + if (!chunk) + return; + + // Merge the CPU chunk. If a merge was not done, nothing else needs to be done. + chunk = uvm_cpu_chunk_merge(existing, chunk); + if (!chunk) + return; + + for_each_gpu_id(id) { + NvU64 gpu_mapping_addr; + uvm_gpu_t *gpu; + + if (!uvm_va_block_gpu_state_get(existing, id)) + continue; + + gpu_mapping_addr = uvm_cpu_chunk_get_gpu_mapping_addr(existing, page_index, chunk, id); + if (gpu_mapping_addr == 0) + continue; + + gpu = uvm_va_space_get_gpu(va_space, id); + uvm_pmm_sysmem_mappings_merge_gpu_mappings(&gpu->pmm_reverse_sysmem_mappings, + gpu_mapping_addr, + uvm_cpu_chunk_get_size(chunk)); + } +} + +// Pre-allocate everything which doesn't require retry on both existing and new +// which will be needed to handle a split. If this fails, existing must remain +// functionally unmodified. +static NV_STATUS block_split_preallocate_no_retry(uvm_va_block_t *existing, uvm_va_block_t *new) +{ + NV_STATUS status; + uvm_gpu_t *gpu; + uvm_gpu_id_t id; + uvm_page_index_t split_page_index; + uvm_va_range_t *existing_va_range = existing->va_range; + + status = block_presplit_cpu_chunks(existing, new); + if (status != NV_OK) + goto error; + + for_each_gpu_id(id) { + if (!uvm_va_block_gpu_state_get(existing, id)) + continue; + + gpu = block_get_gpu(existing, id); + + status = block_presplit_gpu_chunks(existing, new, gpu); + if (status != NV_OK) + goto error; + + if (!block_gpu_state_get_alloc(new, gpu)) { + status = NV_ERR_NO_MEMORY; + goto error; + } + } + + if (existing_va_range && existing_va_range->inject_split_error) { + existing_va_range->inject_split_error = false; + status = NV_ERR_NO_MEMORY; + goto error; + } + + if (uvm_va_block_is_hmm(existing)) { + uvm_va_policy_node_t *node = uvm_va_policy_node_find(existing, new->start); + + if (node && node->node.start != new->start) { + status = uvm_va_policy_node_split(existing, node, new->start - 1, NULL); + if (status != NV_OK) + goto error; + } + } + + return NV_OK; + +error: + // Merge back the chunks we split + split_page_index = uvm_va_block_cpu_page_index(existing, new->start); + + for_each_gpu_id(id) { + uvm_gpu_chunk_t *chunk; + size_t chunk_index; + uvm_va_block_gpu_state_t *existing_gpu_state = uvm_va_block_gpu_state_get(existing, id); + + if (!existing_gpu_state) + continue; + + // If the chunk spanning the split point was split, merge it back + gpu = block_get_gpu(existing, id); + chunk_index = block_gpu_chunk_index(existing, gpu, split_page_index, NULL); + chunk = existing_gpu_state->chunks[chunk_index]; + if (!chunk || chunk->state != UVM_PMM_GPU_CHUNK_STATE_IS_SPLIT) + continue; + + block_merge_chunk(existing, gpu, chunk); + + // We could attempt to shrink the chunks array back down, but it doesn't + // hurt much to have it larger than necessary, and we'd have to handle + // the shrink call failing anyway on this error path. + + } + + block_merge_cpu_chunks(existing, new); + + return status; +} + +// Re-calculate the block's top-level processor masks: +// - block->mapped +// - block->resident +// +// This is called on block split. +static void block_set_processor_masks(uvm_va_block_t *block) +{ + size_t num_pages = uvm_va_block_num_cpu_pages(block); + uvm_va_block_region_t block_region = uvm_va_block_region(0, num_pages); + uvm_gpu_id_t id; + + if (uvm_page_mask_region_empty(&block->cpu.pte_bits[UVM_PTE_BITS_CPU_READ], block_region)) { + UVM_ASSERT(uvm_page_mask_region_empty(&block->cpu.pte_bits[UVM_PTE_BITS_CPU_WRITE], block_region)); + uvm_processor_mask_clear(&block->mapped, UVM_ID_CPU); + } + else { + uvm_processor_mask_set(&block->mapped, UVM_ID_CPU); + } + + if (uvm_page_mask_region_empty(&block->cpu.resident, block_region)) { + uvm_va_space_t *va_space = uvm_va_block_get_va_space(block); + + if (uvm_processor_mask_get_gpu_count(&va_space->can_access[UVM_ID_CPU_VALUE]) == 0) + UVM_ASSERT(!uvm_processor_mask_test(&block->mapped, UVM_ID_CPU)); + + block_clear_resident_processor(block, UVM_ID_CPU); + } + else { + block_set_resident_processor(block, UVM_ID_CPU); + } + + for_each_gpu_id(id) { + uvm_va_block_gpu_state_t *gpu_state = uvm_va_block_gpu_state_get(block, id); + if (!gpu_state) + continue; + + if (uvm_page_mask_region_empty(&gpu_state->pte_bits[UVM_PTE_BITS_GPU_READ], block_region)) { + UVM_ASSERT(uvm_page_mask_region_empty(&gpu_state->pte_bits[UVM_PTE_BITS_GPU_WRITE], block_region)); + UVM_ASSERT(uvm_page_mask_region_empty(&gpu_state->pte_bits[UVM_PTE_BITS_GPU_ATOMIC], block_region)); + uvm_processor_mask_clear(&block->mapped, id); + } + else { + uvm_processor_mask_set(&block->mapped, id); + } + + if (uvm_page_mask_region_empty(&gpu_state->resident, block_region)) + block_clear_resident_processor(block, id); + else + block_set_resident_processor(block, id); + + if (uvm_page_mask_region_empty(&gpu_state->evicted, block_region)) + uvm_processor_mask_clear(&block->evicted_gpus, id); + else + uvm_processor_mask_set(&block->evicted_gpus, id); + } +} + +// Split a PAGES_PER_UVM_VA_BLOCK sized bitmap into new and existing parts +// corresponding to a block split. +static void block_split_page_mask(uvm_page_mask_t *existing_mask, + size_t existing_pages, + uvm_page_mask_t *new_mask, + size_t new_pages) +{ + UVM_ASSERT_MSG(existing_pages + new_pages <= PAGES_PER_UVM_VA_BLOCK, "existing %zu new %zu\n", + existing_pages, new_pages); + + // The new block is always in the upper region of existing, so shift the bit + // vectors down. + // + // Note that bitmap_shift_right requires both dst and src to be the same + // size. That's ok since we don't scale them by block size. + uvm_page_mask_shift_right(new_mask, existing_mask, existing_pages); + uvm_page_mask_region_clear(existing_mask, uvm_va_block_region(existing_pages, existing_pages + new_pages)); +} + +// Split the CPU state within the existing block. existing's start is correct +// but its end has not yet been adjusted. +static void block_split_cpu(uvm_va_block_t *existing, uvm_va_block_t *new) +{ + size_t existing_pages, new_pages = uvm_va_block_num_cpu_pages(new); + uvm_pte_bits_cpu_t pte_bit; + uvm_va_block_region_t block_region = uvm_va_block_region_from_block(existing); + uvm_page_index_t split_page_index = uvm_va_block_cpu_page_index(existing, new->start); + uvm_page_index_t page_index; + uvm_cpu_chunk_t *chunk; + uvm_va_range_t *existing_va_range = existing->va_range; + + if (existing_va_range) { + UVM_ASSERT(existing->va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + UVM_ASSERT(existing->va_range->type == new->va_range->type); + } + UVM_ASSERT(existing->start < new->start); + UVM_ASSERT(existing->end == new->end); + + UVM_ASSERT(PAGE_ALIGNED(new->start)); + UVM_ASSERT(PAGE_ALIGNED(existing->start)); + + existing_pages = (new->start - existing->start) / PAGE_SIZE; + + // We don't have to unmap the CPU since its virtual -> physical mappings + // don't change. + + page_index = uvm_va_block_next_page_in_mask(block_region, &existing->cpu.allocated, split_page_index - 1); + + while (page_index < block_region.outer) { + uvm_page_index_t new_chunk_page_index; + NV_STATUS status; + + chunk = uvm_cpu_chunk_get_chunk_for_page(existing, page_index); + UVM_ASSERT(chunk); + + uvm_cpu_chunk_remove_from_block(existing, chunk, page_index); + + // The chunk has to be adjusted for the new block before inserting it. + new_chunk_page_index = page_index - split_page_index; + status = uvm_cpu_chunk_insert_in_block(new, chunk, new_chunk_page_index); + UVM_ASSERT(status == NV_OK); + page_index = uvm_va_block_next_page_in_mask(block_region, + &existing->cpu.allocated, + page_index + uvm_cpu_chunk_num_pages(chunk) - 1); + } + + new->cpu.ever_mapped = existing->cpu.ever_mapped; + + block_split_page_mask(&existing->cpu.resident, existing_pages, &new->cpu.resident, new_pages); + + for (pte_bit = 0; pte_bit < UVM_PTE_BITS_CPU_MAX; pte_bit++) + block_split_page_mask(&existing->cpu.pte_bits[pte_bit], existing_pages, &new->cpu.pte_bits[pte_bit], new_pages); +} + +// Fill out the blocks' chunks arrays with the chunks split by +// block_presplit_gpu_chunks. +static void block_copy_split_gpu_chunks(uvm_va_block_t *existing, uvm_va_block_t *new, uvm_gpu_t *gpu) +{ + uvm_va_block_gpu_state_t *existing_gpu_state = uvm_va_block_gpu_state_get(existing, gpu->id); + uvm_va_block_gpu_state_t *new_gpu_state = uvm_va_block_gpu_state_get(new, gpu->id); + uvm_gpu_chunk_t **temp_chunks; + uvm_gpu_chunk_t *original_chunk; + block_gpu_chunk_split_state_t existing_before_state, existing_after_state, new_state; + size_t num_pre_chunks, num_post_chunks, num_split_chunks_existing, num_split_chunks_new; + uvm_page_index_t split_page_index = uvm_va_block_cpu_page_index(existing, new->start); + size_t i; + + block_gpu_chunk_get_split_state(&existing_before_state, existing->start, existing->end, split_page_index, gpu); + block_gpu_chunk_get_split_state(&existing_after_state, existing->start, new->start - 1, split_page_index - 1, gpu); + block_gpu_chunk_get_split_state(&new_state, new->start, new->end, 0, gpu); + + // General case (B is original_chunk): + // split + // v + // existing (before) [------ A -----][------ B -----][------ C -----] + // existing (after) [------ A -----][- B0 -] + // new [- B1 -][------ C -----] + // + // Note that the logic below also handles the case of the split happening at + // a chunk boundary. That case behaves as though there is no B0 chunk. + + // Number of chunks to the left and right of original_chunk (A and C above). + // Either or both of these may be 0. + num_pre_chunks = existing_before_state.chunk_index; + num_post_chunks = existing_before_state.num_chunks - num_pre_chunks - 1; + + // Number of subchunks under existing's portion of original_chunk (B0 above) + num_split_chunks_existing = existing_after_state.num_chunks - num_pre_chunks; + + // Number of subchunks under new's portion of original_chunk (B1 above) + num_split_chunks_new = new_state.num_chunks - num_post_chunks; + + UVM_ASSERT(num_pre_chunks + num_split_chunks_existing > 0); + UVM_ASSERT(num_split_chunks_new > 0); + + // Copy post chunks from the end of existing into new (C above) + memcpy(&new_gpu_state->chunks[num_split_chunks_new], + &existing_gpu_state->chunks[existing_before_state.chunk_index + 1], + num_post_chunks * sizeof(new_gpu_state->chunks[0])); + + // Save off the original split chunk since we may overwrite the array + original_chunk = existing_gpu_state->chunks[existing_before_state.chunk_index]; + + // Fill out the new pointers + if (original_chunk) { + // Note that if the split happened at a chunk boundary, original_chunk + // will not be split. In that case, num_split_chunks_existing will be 0 + // and num_split_chunks_new will be 1, so the left copy will be skipped + // and the right copy will pick up the chunk. + + // Copy left newly-split chunks into existing (B0 above). The array was + // re-sized in block_presplit_gpu_chunks as necessary. + size_t num_subchunks; + + num_subchunks = uvm_pmm_gpu_get_subchunks(&gpu->pmm, + original_chunk, + 0, // start_index + num_split_chunks_existing, + &existing_gpu_state->chunks[existing_before_state.chunk_index]); + UVM_ASSERT(num_subchunks == num_split_chunks_existing); + + // Copy right newly-split chunks into new (B1 above), overwriting the + // pointer to the original chunk. + num_subchunks = uvm_pmm_gpu_get_subchunks(&gpu->pmm, + original_chunk, + num_split_chunks_existing, // start_index + num_split_chunks_new, + &new_gpu_state->chunks[0]); + UVM_ASSERT(num_subchunks == num_split_chunks_new); + } + else { + // If the chunk wasn't already populated we don't need to copy pointers + // anywhere, but we need to clear out stale pointers from existing's + // array covering the new elements. new's chunks array was already zero- + // initialized. + memset(&existing_gpu_state->chunks[existing_before_state.chunk_index], + 0, + num_split_chunks_existing * sizeof(existing_gpu_state->chunks[0])); + } + + // Since we update the reverse map information, protect it against a + // concurrent lookup + uvm_spin_lock(&gpu->pmm.list_lock); + + // Update the reverse map of all the chunks that are now under the new block + for (i = 0; i < new_state.num_chunks; ++i) { + if (new_gpu_state->chunks[i]) { + UVM_ASSERT(new_gpu_state->chunks[i]->va_block == existing); + new_gpu_state->chunks[i]->va_block = new; + + // Adjust the page_index within the VA block for the new subchunks in + // the new VA block + UVM_ASSERT(new_gpu_state->chunks[i]->va_block_page_index >= split_page_index); + new_gpu_state->chunks[i]->va_block_page_index -= split_page_index; + } + } + + uvm_spin_unlock(&gpu->pmm.list_lock); + + // Attempt to shrink existing's chunk allocation. If the realloc fails, just + // keep on using the old larger one. + if (existing_after_state.num_chunks < existing_before_state.num_chunks) { + temp_chunks = uvm_kvrealloc(existing_gpu_state->chunks, + existing_after_state.num_chunks * sizeof(existing_gpu_state->chunks[0])); + if (temp_chunks) + existing_gpu_state->chunks = temp_chunks; + } +} + +static void block_split_gpu(uvm_va_block_t *existing, uvm_va_block_t *new, uvm_gpu_id_t gpu_id) +{ + uvm_va_block_gpu_state_t *existing_gpu_state = uvm_va_block_gpu_state_get(existing, gpu_id); + uvm_va_block_gpu_state_t *new_gpu_state = uvm_va_block_gpu_state_get(new, gpu_id); + uvm_va_space_t *va_space = uvm_va_block_get_va_space(existing); + uvm_gpu_va_space_t *gpu_va_space; + uvm_gpu_t *gpu; + uvm_gpu_t *accessing_gpu; + size_t new_pages = uvm_va_block_num_cpu_pages(new); + size_t existing_pages, existing_pages_4k, existing_pages_big, new_pages_big; + uvm_pte_bits_gpu_t pte_bit; + size_t num_chunks, i; + uvm_cpu_chunk_t *cpu_chunk; + uvm_page_index_t page_index; + + if (!existing_gpu_state) + return; + + gpu = uvm_va_space_get_gpu(va_space, gpu_id); + UVM_ASSERT(new_gpu_state); + + new_gpu_state->force_4k_ptes = existing_gpu_state->force_4k_ptes; + + UVM_ASSERT(PAGE_ALIGNED(new->start)); + UVM_ASSERT(PAGE_ALIGNED(existing->start)); + existing_pages = (new->start - existing->start) / PAGE_SIZE; + + uvm_cpu_chunk_gpu_mapping_split(existing, new, gpu_id); + + for_each_cpu_chunk_in_block(cpu_chunk, page_index, new) { + uvm_pmm_sysmem_mappings_reparent_gpu_mapping(&gpu->pmm_reverse_sysmem_mappings, + uvm_cpu_chunk_get_gpu_mapping_addr(new, + page_index, + cpu_chunk, + gpu_id), + new); + } + + block_copy_split_gpu_chunks(existing, new, gpu); + + num_chunks = block_num_gpu_chunks(new, gpu); + + // Reparent GPU mappings for indirect peers + for (i = 0; i < num_chunks; ++i) { + uvm_gpu_chunk_t *chunk = new_gpu_state->chunks[i]; + if (!chunk) + continue; + + for_each_va_space_gpu_in_mask(accessing_gpu, va_space, &va_space->indirect_peers[uvm_id_value(gpu->id)]) { + NvU64 peer_addr = uvm_pmm_gpu_indirect_peer_addr(&gpu->pmm, chunk, accessing_gpu); + + uvm_pmm_sysmem_mappings_reparent_gpu_chunk_mapping(&accessing_gpu->pmm_reverse_sysmem_mappings, + peer_addr, + new); + } + } + + block_split_page_mask(&existing_gpu_state->resident, + existing_pages, + &new_gpu_state->resident, + new_pages); + + for (pte_bit = 0; pte_bit < UVM_PTE_BITS_GPU_MAX; pte_bit++) { + block_split_page_mask(&existing_gpu_state->pte_bits[pte_bit], existing_pages, + &new_gpu_state->pte_bits[pte_bit], new_pages); + } + + // Adjust page table ranges. + gpu_va_space = uvm_gpu_va_space_get(va_space, gpu); + if (gpu_va_space) { + if (existing_gpu_state->page_table_range_big.table) { + NvU32 big_page_size = uvm_va_block_gpu_big_page_size(existing, gpu); + + // existing's end has not been adjusted yet + existing_pages_big = range_num_big_pages(existing->start, new->start - 1, big_page_size); + + // Take references on all big pages covered by new + new_pages_big = uvm_va_block_num_big_pages(new, big_page_size); + if (new_pages_big) { + uvm_page_table_range_get_upper(&gpu_va_space->page_tables, + &existing_gpu_state->page_table_range_big, + &new_gpu_state->page_table_range_big, + new_pages_big); + + // If the split point is within a big page region, we might have + // a gap since neither existing nor new can use it anymore. + // Get the top N bits from existing's mask to handle that. + bitmap_shift_right(new_gpu_state->big_ptes, + existing_gpu_state->big_ptes, + uvm_va_block_num_big_pages(existing, big_page_size) - new_pages_big, + MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + new_gpu_state->initialized_big = existing_gpu_state->initialized_big; + } + + // Drop existing's references on the big PTEs it no longer covers + // now that new has references on them. Note that neither existing + // nor new might have big PTEs after the split. In that case, this + // shrink will free the entire old range. + uvm_page_table_range_shrink(&gpu_va_space->page_tables, + &existing_gpu_state->page_table_range_big, + existing_pages_big); + + if (existing_pages_big == 0) { + memset(&existing_gpu_state->page_table_range_big, 0, sizeof(existing_gpu_state->page_table_range_big)); + existing_gpu_state->initialized_big = false; + } + + bitmap_clear(existing_gpu_state->big_ptes, + existing_pages_big, + MAX_BIG_PAGES_PER_UVM_VA_BLOCK - existing_pages_big); + } + + if (existing_gpu_state->page_table_range_4k.table) { + // Since existing and new share the same PDE we just need to bump + // the ref-count on new's sub-range. + uvm_page_table_range_get_upper(&gpu_va_space->page_tables, + &existing_gpu_state->page_table_range_4k, + &new_gpu_state->page_table_range_4k, + uvm_va_block_size(new) / UVM_PAGE_SIZE_4K); + + // Drop existing's references on the PTEs it no longer covers now + // that new has references on them. + existing_pages_4k = existing_pages * (PAGE_SIZE / UVM_PAGE_SIZE_4K); + uvm_page_table_range_shrink(&gpu_va_space->page_tables, + &existing_gpu_state->page_table_range_4k, + existing_pages_4k); + } + + // We have to set this explicitly to handle the case of splitting an + // invalid, active 2M PTE with no lower page tables allocated. + if (existing_gpu_state->pte_is_2m) { + UVM_ASSERT(!existing_gpu_state->page_table_range_big.table); + UVM_ASSERT(!existing_gpu_state->page_table_range_4k.table); + existing_gpu_state->pte_is_2m = false; + } + + // existing can't possibly cover 2MB after a split, so drop any 2M PTE + // references it has. We've taken the necessary references on the lower + // tables above. + block_put_ptes_safe(&gpu_va_space->page_tables, &existing_gpu_state->page_table_range_2m); + existing_gpu_state->activated_big = false; + existing_gpu_state->activated_4k = false; + } + + block_split_page_mask(&existing_gpu_state->evicted, existing_pages, &new_gpu_state->evicted, new_pages); +} + +NV_STATUS uvm_va_block_split(uvm_va_block_t *existing_va_block, + NvU64 new_end, + uvm_va_block_t **new_va_block, + uvm_va_range_t *new_va_range) +{ + uvm_va_space_t *va_space; + uvm_va_block_t *new_block = NULL; + NV_STATUS status; + + va_space = new_va_range->va_space; + UVM_ASSERT(existing_va_block->va_range); + UVM_ASSERT(existing_va_block->va_range->va_space == va_space); + UVM_ASSERT(!uvm_va_block_is_hmm(existing_va_block)); + + // External range types can't be split + UVM_ASSERT(existing_va_block->va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + UVM_ASSERT(new_va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + uvm_assert_rwsem_locked_write(&va_space->lock); + + UVM_ASSERT(new_end > existing_va_block->start); + UVM_ASSERT(new_end < existing_va_block->end); + UVM_ASSERT(PAGE_ALIGNED(new_end + 1)); + + status = uvm_va_block_create(new_va_range, new_end + 1, existing_va_block->end, &new_block); + if (status != NV_OK) + return status; + + // We're protected from other splits and faults by the va_space lock being + // held in write mode, but that doesn't stop the reverse mapping (eviction + // path) from inspecting the existing block. Stop those threads by taking + // the block lock. When a reverse mapping thread takes this lock after the + // split has been performed, it will have to re-inspect state and may see + // that it should use the newly-split block instead. + uvm_mutex_lock(&existing_va_block->lock); + + status = uvm_va_block_split_locked(existing_va_block, new_end, new_block, new_va_range); + + uvm_mutex_unlock(&existing_va_block->lock); + + if (status != NV_OK) + uvm_va_block_release(new_block); + else if (new_va_block) + *new_va_block = new_block; + + return status; +} + +NV_STATUS uvm_va_block_split_locked(uvm_va_block_t *existing_va_block, + NvU64 new_end, + uvm_va_block_t *new_block, + uvm_va_range_t *new_va_range) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(existing_va_block); + uvm_gpu_id_t id; + NV_STATUS status; + uvm_perf_event_data_t event_data; + + for_each_gpu_id(id) + UVM_ASSERT(block_check_chunks(existing_va_block, id)); + + // As soon as we update existing's reverse mappings to point to the newly- + // split block, the eviction path could try to operate on the new block. + // Lock that out too until new is ready. + // + // Note that we usually shouldn't nest block locks, but it's ok here because + // we just created new_block so no other thread could possibly take it out + // of order with existing's lock. + uvm_mutex_lock_no_tracking(&new_block->lock); + + // The split has to be transactional, meaning that if we fail, the existing + // block must not be modified. Handle that by pre-allocating everything we + // might need under both existing and new at the start so we only have a + // single point of failure. + + // Since pre-allocation might require allocating new PTEs, we have to handle + // allocation retry which might drop existing's block lock. The + // preallocation is split into two steps for that: the first part which + // allocates and splits PTEs can handle having the block lock dropped then + // re-taken. It won't modify existing_va_block other than adding new PTE + // allocations and splitting existing PTEs, which is always safe. + status = UVM_VA_BLOCK_RETRY_LOCKED(existing_va_block, + NULL, + block_split_presplit_ptes(existing_va_block, new_block)); + if (status != NV_OK) + goto out; + + // Pre-allocate, stage two. This modifies existing_va_block in ways which + // violate many assumptions (such as changing chunk size), but it will put + // things back into place on a failure without dropping the block lock. + status = block_split_preallocate_no_retry(existing_va_block, new_block); + if (status != NV_OK) + goto out; + + // We'll potentially be freeing page tables, so we need to wait for any + // outstanding work before we start + status = uvm_tracker_wait(&existing_va_block->tracker); + if (status != NV_OK) + goto out; + + // Update existing's state only once we're past all failure points + + event_data.block_shrink.block = existing_va_block; + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_BLOCK_SHRINK, &event_data); + + block_split_cpu(existing_va_block, new_block); + + for_each_gpu_id(id) + block_split_gpu(existing_va_block, new_block, id); + + // Update the size of the existing block first so that + // block_set_processor_masks can use block_{set,clear}_resident_processor + // that relies on the size to be correct. + existing_va_block->end = new_end; + + block_split_page_mask(&existing_va_block->read_duplicated_pages, + uvm_va_block_num_cpu_pages(existing_va_block), + &new_block->read_duplicated_pages, + uvm_va_block_num_cpu_pages(new_block)); + + block_split_page_mask(&existing_va_block->maybe_mapped_pages, + uvm_va_block_num_cpu_pages(existing_va_block), + &new_block->maybe_mapped_pages, + uvm_va_block_num_cpu_pages(new_block)); + + block_set_processor_masks(existing_va_block); + block_set_processor_masks(new_block); + + if (uvm_va_block_is_hmm(existing_va_block)) + uvm_va_policy_node_split_move(existing_va_block, new_block); + +out: + // Run checks on existing_va_block even on failure, since an error must + // leave the block in a consistent state. + for_each_gpu_id(id) { + UVM_ASSERT(block_check_chunks(existing_va_block, id)); + if (status == NV_OK) + UVM_ASSERT(block_check_chunks(new_block, id)); + } + + UVM_ASSERT(block_check_mappings(existing_va_block)); + UVM_ASSERT(block_verify_cpu_chunks(existing_va_block)); + if (status == NV_OK) { + UVM_ASSERT(block_check_mappings(new_block)); + UVM_ASSERT(block_verify_cpu_chunks(new_block)); + } + + uvm_mutex_unlock_no_tracking(&new_block->lock); + + return status; +} + +static bool block_region_might_read_duplicate(uvm_va_block_t *va_block, + uvm_va_block_region_t region) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_va_range_t *va_range = va_block->va_range; + + if (!uvm_va_space_can_read_duplicate(va_space, NULL)) + return false; + + // TODO: Bug 2046423: need to implement read duplication support in Linux. + if (uvm_va_block_is_hmm(va_block) || + uvm_va_range_get_policy(va_range)->read_duplication == UVM_READ_DUPLICATION_DISABLED) + return false; + + if (uvm_va_range_get_policy(va_range)->read_duplication == UVM_READ_DUPLICATION_UNSET + && uvm_page_mask_region_weight(&va_block->read_duplicated_pages, region) == 0) + return false; + + return true; +} + +// Returns the new access permission for the processor that faulted or +// triggered access counter notifications on the given page +// +// TODO: Bug 1766424: this function works on a single page at a time. This +// could be changed in the future to optimize multiple faults/counters on +// contiguous pages. +static uvm_prot_t compute_new_permission(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_id_t fault_processor_id, + uvm_processor_id_t new_residency, + uvm_fault_access_type_t access_type) +{ + uvm_va_range_t *va_range; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_prot_t logical_prot, new_prot; + + // TODO: Bug 1766432: Refactor into policies. Current policy is + // query_promote: upgrade access privileges to avoid future faults IF + // they don't trigger further revocations. + va_range = va_block->va_range; + + new_prot = uvm_fault_access_type_to_prot(access_type); + logical_prot = uvm_va_range_logical_prot(va_range); + + UVM_ASSERT(logical_prot >= new_prot); + + if (logical_prot > UVM_PROT_READ_ONLY && new_prot == UVM_PROT_READ_ONLY && + !block_region_might_read_duplicate(va_block, uvm_va_block_region_for_page(page_index))) { + uvm_processor_mask_t processors_with_atomic_mapping; + uvm_processor_mask_t revoke_processors; + + uvm_va_block_page_authorized_processors(va_block, + page_index, + UVM_PROT_READ_WRITE_ATOMIC, + &processors_with_atomic_mapping); + + uvm_processor_mask_andnot(&revoke_processors, + &processors_with_atomic_mapping, + &va_space->has_native_atomics[uvm_id_value(new_residency)]); + + // Only check if there are no faultable processors in the revoke processors mask + uvm_processor_mask_and(&revoke_processors, &revoke_processors, &va_space->faultable_processors); + + if (uvm_processor_mask_empty(&revoke_processors)) + new_prot = UVM_PROT_READ_WRITE; + } + if (logical_prot == UVM_PROT_READ_WRITE_ATOMIC && new_prot == UVM_PROT_READ_WRITE) { + if (uvm_processor_mask_test(&va_space->has_native_atomics[uvm_id_value(new_residency)], fault_processor_id)) + new_prot = UVM_PROT_READ_WRITE_ATOMIC; + } + + return new_prot; +} + +static NV_STATUS do_block_add_mappings_after_migration(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t new_residency, + uvm_processor_id_t processor_id, + const uvm_processor_mask_t *map_processors, + uvm_va_block_region_t region, + const uvm_page_mask_t *map_page_mask, + uvm_prot_t max_prot, + const uvm_processor_mask_t *thrashing_processors, + uvm_tracker_t *tracker) +{ + NV_STATUS status; + uvm_processor_id_t map_processor_id; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_prot_t new_map_prot = max_prot; + uvm_processor_mask_t map_processors_local; + + uvm_processor_mask_copy(&map_processors_local, map_processors); + + // Handle atomic mappings separately + if (max_prot == UVM_PROT_READ_WRITE_ATOMIC) { + bool this_processor_has_native_atomics; + + this_processor_has_native_atomics = + uvm_processor_mask_test(&va_space->has_native_atomics[uvm_id_value(new_residency)], processor_id); + + if (this_processor_has_native_atomics) { + uvm_processor_mask_t map_atomic_processors; + + // Compute processors with native atomics to the residency + uvm_processor_mask_and(&map_atomic_processors, + &map_processors_local, + &va_space->has_native_atomics[uvm_id_value(new_residency)]); + + // Filter out these mapped processors for the next steps + uvm_processor_mask_andnot(&map_processors_local, &map_processors_local, &map_atomic_processors); + + for_each_id_in_mask(map_processor_id, &map_atomic_processors) { + UvmEventMapRemoteCause cause = UvmEventMapRemoteCausePolicy; + if (thrashing_processors && uvm_processor_mask_test(thrashing_processors, map_processor_id)) + cause = UvmEventMapRemoteCauseThrashing; + + status = uvm_va_block_map(va_block, + va_block_context, + map_processor_id, + region, + map_page_mask, + UVM_PROT_READ_WRITE_ATOMIC, + cause, + tracker); + if (status != NV_OK) + return status; + } + + new_map_prot = UVM_PROT_READ_WRITE; + } + else { + if (UVM_ID_IS_CPU(processor_id)) + new_map_prot = UVM_PROT_READ_WRITE; + else + new_map_prot = UVM_PROT_READ_ONLY; + } + } + + // Map the rest of processors + for_each_id_in_mask(map_processor_id, &map_processors_local) { + UvmEventMapRemoteCause cause = UvmEventMapRemoteCausePolicy; + uvm_prot_t final_map_prot; + bool map_processor_has_enabled_system_wide_atomics = + uvm_processor_mask_test(&va_space->system_wide_atomics_enabled_processors, map_processor_id); + + // Write mappings from processors with disabled system-wide atomics are treated like atomics + if (new_map_prot == UVM_PROT_READ_WRITE && !map_processor_has_enabled_system_wide_atomics) + final_map_prot = UVM_PROT_READ_WRITE_ATOMIC; + else + final_map_prot = new_map_prot; + + if (thrashing_processors && uvm_processor_mask_test(thrashing_processors, map_processor_id)) + cause = UvmEventMapRemoteCauseThrashing; + + status = uvm_va_block_map(va_block, + va_block_context, + map_processor_id, + region, + map_page_mask, + final_map_prot, + cause, + tracker); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +NV_STATUS uvm_va_block_add_mappings_after_migration(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t new_residency, + uvm_processor_id_t processor_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *map_page_mask, + uvm_prot_t max_prot, + const uvm_processor_mask_t *thrashing_processors) +{ + NV_STATUS tracker_status, status = NV_OK; + uvm_processor_mask_t map_other_processors, map_uvm_lite_gpus; + uvm_processor_id_t map_processor_id; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + const uvm_page_mask_t *final_page_mask = map_page_mask; + uvm_tracker_t local_tracker = UVM_TRACKER_INIT(); + uvm_va_policy_t *policy = va_block_context->policy; + uvm_processor_id_t preferred_location; + + // Read duplication takes precedence over SetAccesedBy. + // + // Exclude ranges with read duplication set... + if (uvm_va_policy_is_read_duplicate(policy, va_space)) { + status = NV_OK; + goto out; + } + + // ... and pages read-duplicated by performance heuristics + if (policy->read_duplication == UVM_READ_DUPLICATION_UNSET) { + if (map_page_mask) { + uvm_page_mask_andnot(&va_block_context->mapping.filtered_page_mask, + map_page_mask, + &va_block->read_duplicated_pages); + } + else { + uvm_page_mask_complement(&va_block_context->mapping.filtered_page_mask, &va_block->read_duplicated_pages); + } + final_page_mask = &va_block_context->mapping.filtered_page_mask; + } + + // Add mappings for accessed_by processors and the given processor mask + if (thrashing_processors) + uvm_processor_mask_or(&map_other_processors, &policy->accessed_by, thrashing_processors); + else + uvm_processor_mask_copy(&map_other_processors, &policy->accessed_by); + + // Only processors that can access the new location must be considered + uvm_processor_mask_and(&map_other_processors, + &map_other_processors, + &va_space->accessible_from[uvm_id_value(new_residency)]); + + // Exclude caller processor as it must have already been mapped + uvm_processor_mask_clear(&map_other_processors, processor_id); + + // Exclude preferred location so it won't get remote mappings + preferred_location = policy->preferred_location; + if (UVM_ID_IS_VALID(preferred_location) && + !uvm_id_equal(new_residency, preferred_location) && + uvm_va_space_processor_has_memory(va_space, preferred_location)) { + uvm_processor_mask_clear(&map_other_processors, preferred_location); + } + + // Map the UVM-Lite GPUs if the new location is the preferred location. This + // will only create mappings on first touch. After that they're persistent + // so uvm_va_block_map will be a no-op. + uvm_processor_mask_and(&map_uvm_lite_gpus, &map_other_processors, block_get_uvm_lite_gpus(va_block)); + if (!uvm_processor_mask_empty(&map_uvm_lite_gpus) && + uvm_id_equal(new_residency, preferred_location)) { + for_each_id_in_mask(map_processor_id, &map_uvm_lite_gpus) { + status = uvm_va_block_map(va_block, + va_block_context, + map_processor_id, + region, + final_page_mask, + UVM_PROT_READ_WRITE_ATOMIC, + UvmEventMapRemoteCauseCoherence, + &local_tracker); + if (status != NV_OK) + goto out; + } + } + + uvm_processor_mask_andnot(&map_other_processors, &map_other_processors, block_get_uvm_lite_gpus(va_block)); + + // We can't map non-migratable pages to the CPU. If we have any, build a + // new mask of migratable pages and map the CPU separately. + if (uvm_processor_mask_test(&map_other_processors, UVM_ID_CPU) && + !uvm_range_group_all_migratable(va_space, + uvm_va_block_region_start(va_block, region), + uvm_va_block_region_end(va_block, region))) { + uvm_page_mask_t *migratable_mask = &va_block_context->mapping.migratable_mask; + + uvm_range_group_migratable_page_mask(va_block, region, migratable_mask); + if (uvm_page_mask_and(migratable_mask, migratable_mask, final_page_mask)) { + uvm_processor_mask_t cpu_mask; + uvm_processor_mask_zero(&cpu_mask); + uvm_processor_mask_set(&cpu_mask, UVM_ID_CPU); + + status = do_block_add_mappings_after_migration(va_block, + va_block_context, + new_residency, + processor_id, + &cpu_mask, + region, + migratable_mask, + max_prot, + thrashing_processors, + &local_tracker); + if (status != NV_OK) + goto out; + } + + uvm_processor_mask_clear(&map_other_processors, UVM_ID_CPU); + } + + status = do_block_add_mappings_after_migration(va_block, + va_block_context, + new_residency, + processor_id, + &map_other_processors, + region, + final_page_mask, + max_prot, + thrashing_processors, + &local_tracker); + if (status != NV_OK) + goto out; + +out: + tracker_status = uvm_tracker_add_tracker_safe(&va_block->tracker, &local_tracker); + uvm_tracker_deinit(&local_tracker); + return status == NV_OK ? tracker_status : status; +} + +// TODO: Bug 1750144: check logical permissions from HMM to know what's the +// maximum allowed. +uvm_prot_t uvm_va_block_page_compute_highest_permission(uvm_va_block_t *va_block, + uvm_processor_id_t processor_id, + uvm_page_index_t page_index) +{ + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_processor_mask_t resident_processors; + NvU32 resident_processors_count; + + if (uvm_processor_mask_test(block_get_uvm_lite_gpus(va_block), processor_id)) + return UVM_PROT_READ_WRITE_ATOMIC; + + uvm_va_block_page_resident_processors(va_block, page_index, &resident_processors); + resident_processors_count = uvm_processor_mask_get_count(&resident_processors); + + if (resident_processors_count == 0) { + return UVM_PROT_NONE; + } + else if (resident_processors_count > 1) { + // If there are many copies, we can only map READ ONLY + // + // The block state doesn't track the mapping target (aperture) of each + // individual PTE, just the permissions and where the data is resident. + // If the data is resident in multiple places, then we have a problem + // since we can't know where the PTE points. This means we won't know + // what needs to be unmapped for cases like UvmUnregisterGpu and + // UvmDisablePeerAccess. + // + // The simple way to solve this is to enforce that a read-duplication + // mapping always points to local memory. + if (uvm_processor_mask_test(&resident_processors, processor_id)) + return UVM_PROT_READ_ONLY; + + return UVM_PROT_NONE; + } + else { + uvm_processor_id_t atomic_id; + uvm_processor_id_t residency; + uvm_processor_mask_t atomic_mappings; + uvm_processor_mask_t write_mappings; + + // Search the id of the processor with the only resident copy + residency = uvm_processor_mask_find_first_id(&resident_processors); + UVM_ASSERT(UVM_ID_IS_VALID(residency)); + + // If we cannot map the processor with the resident copy, exit + if (!uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(residency)], processor_id)) + return UVM_PROT_NONE; + + // Fast path: if the page is not mapped anywhere else, it can be safely + // mapped with RWA permission + if (!uvm_page_mask_test(&va_block->maybe_mapped_pages, page_index)) + return UVM_PROT_READ_WRITE_ATOMIC; + + uvm_va_block_page_authorized_processors(va_block, page_index, UVM_PROT_READ_WRITE_ATOMIC, &atomic_mappings); + + // Exclude processors with system-wide atomics disabled from atomic_mappings + uvm_processor_mask_and(&atomic_mappings, + &atomic_mappings, + &va_space->system_wide_atomics_enabled_processors); + + // Exclude the processor for which the mapping protections are being computed + uvm_processor_mask_clear(&atomic_mappings, processor_id); + + // If there is any processor with atomic mapping, check if it has native atomics to the processor + // with the resident copy. If it does not, we can only map READ ONLY + atomic_id = uvm_processor_mask_find_first_id(&atomic_mappings); + if (UVM_ID_IS_VALID(atomic_id) && + !uvm_processor_mask_test(&va_space->has_native_atomics[uvm_id_value(residency)], atomic_id)) { + return UVM_PROT_READ_ONLY; + } + + uvm_va_block_page_authorized_processors(va_block, page_index, UVM_PROT_READ_WRITE, &write_mappings); + + // Exclude the processor for which the mapping protections are being computed + uvm_processor_mask_clear(&write_mappings, processor_id); + + // At this point, any processor with atomic mappings either has native atomics support to the + // processor with the resident copy or has disabled system-wide atomics. If the requesting + // processor has disabled system-wide atomics or has native atomics to that processor, we can + // map with ATOMIC privileges. Likewise, if there are no other processors with WRITE or ATOMIC + // mappings, we can map with ATOMIC privileges. + if (!uvm_processor_mask_test(&va_space->system_wide_atomics_enabled_processors, processor_id) || + uvm_processor_mask_test(&va_space->has_native_atomics[uvm_id_value(residency)], processor_id) || + uvm_processor_mask_empty(&write_mappings)) { + return UVM_PROT_READ_WRITE_ATOMIC; + } + + return UVM_PROT_READ_WRITE; + } +} + +NV_STATUS uvm_va_block_add_mappings(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t processor_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + UvmEventMapRemoteCause cause) +{ + uvm_va_range_t *va_range = va_block->va_range; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + NV_STATUS status = NV_OK; + uvm_page_index_t page_index; + uvm_range_group_range_iter_t iter; + uvm_prot_t prot_to_map; + + if (UVM_ID_IS_CPU(processor_id) && !uvm_va_block_is_hmm(va_block)) { + if (!uvm_va_range_vma_check(va_range, va_block_context->mm)) + return NV_OK; + + uvm_range_group_range_migratability_iter_first(va_space, + uvm_va_block_region_start(va_block, region), + uvm_va_block_region_end(va_block, region), + &iter); + } + + for (prot_to_map = UVM_PROT_READ_ONLY; prot_to_map <= UVM_PROT_READ_WRITE_ATOMIC; ++prot_to_map) + va_block_context->mask_by_prot[prot_to_map - 1].count = 0; + + for_each_va_block_page_in_region_mask(page_index, page_mask, region) { + // Read duplication takes precedence over SetAccesedBy. Exclude pages + // read-duplicated by performance heuristics + if (uvm_page_mask_test(&va_block->read_duplicated_pages, page_index)) + continue; + + prot_to_map = uvm_va_block_page_compute_highest_permission(va_block, processor_id, page_index); + if (prot_to_map == UVM_PROT_NONE) + continue; + + if (UVM_ID_IS_CPU(processor_id) && !uvm_va_block_is_hmm(va_block)) { + while (uvm_va_block_cpu_page_index(va_block, iter.end) < page_index) { + uvm_range_group_range_migratability_iter_next(va_space, + &iter, + uvm_va_block_region_end(va_block, region)); + } + + if (!iter.migratable) + continue; + } + + if (va_block_context->mask_by_prot[prot_to_map - 1].count++ == 0) + uvm_page_mask_zero(&va_block_context->mask_by_prot[prot_to_map - 1].page_mask); + + uvm_page_mask_set(&va_block_context->mask_by_prot[prot_to_map - 1].page_mask, page_index); + } + + for (prot_to_map = UVM_PROT_READ_ONLY; prot_to_map <= UVM_PROT_READ_WRITE_ATOMIC; ++prot_to_map) { + if (va_block_context->mask_by_prot[prot_to_map - 1].count == 0) + continue; + + status = uvm_va_block_map(va_block, + va_block_context, + processor_id, + region, + &va_block_context->mask_by_prot[prot_to_map - 1].page_mask, + prot_to_map, + cause, + &va_block->tracker); + if (status != NV_OK) + break; + } + + return status; +} + +static bool can_read_duplicate(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_va_policy_t *policy, + const uvm_perf_thrashing_hint_t *thrashing_hint) +{ + if (uvm_va_policy_is_read_duplicate(policy, uvm_va_block_get_va_space(va_block))) + return true; + + if (policy->read_duplication != UVM_READ_DUPLICATION_DISABLED && + uvm_page_mask_test(&va_block->read_duplicated_pages, page_index) && + thrashing_hint->type != UVM_PERF_THRASHING_HINT_TYPE_PIN) + return true; + + return false; +} + +// TODO: Bug 1827400: If the faulting processor has support for native +// atomics to the current location and the faults on the page were +// triggered by atomic accesses only, we keep the current residency. +// This is a short-term solution to exercise remote atomics over +// NVLINK when possible (not only when preferred location is set to +// the remote GPU) as they are much faster than relying on page +// faults and permission downgrades, which cause thrashing. In the +// future, the thrashing detection/prevention heuristics should +// detect and handle this case. +static bool map_remote_on_atomic_fault(uvm_va_space_t *va_space, + NvU32 access_type_mask, + uvm_processor_id_t processor_id, + uvm_processor_id_t residency) +{ + // This policy can be enabled/disabled using a module parameter + if (!uvm_perf_map_remote_on_native_atomics_fault) + return false; + + // Only consider atomics faults + if (uvm_fault_access_type_mask_lowest(access_type_mask) < UVM_FAULT_ACCESS_TYPE_ATOMIC_WEAK) + return false; + + // We cannot differentiate CPU writes from atomics. We exclude CPU faults + // from the logic explained above in order to avoid mapping CPU to vidmem + // memory due to a write. + if (UVM_ID_IS_CPU(processor_id)) + return false; + + // On P9 systems (which have native HW support for system-wide atomics), we + // have determined experimentally that placing memory on a GPU yields the + // best performance on most cases (since CPU can cache vidmem but not vice + // versa). Therefore, don't map remotely if the current residency is + // sysmem. + if (UVM_ID_IS_CPU(residency)) + return false; + + return uvm_processor_mask_test(&va_space->has_native_atomics[uvm_id_value(residency)], processor_id); +} + +// TODO: Bug 1766424: this function works on a single page at a time. This +// could be changed in the future to optimize multiple faults or access +// counter notifications on contiguous pages. +static uvm_processor_id_t block_select_residency(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_id_t processor_id, + NvU32 access_type_mask, + uvm_va_policy_t *policy, + const uvm_perf_thrashing_hint_t *thrashing_hint, + uvm_service_operation_t operation, + bool *read_duplicate) +{ + uvm_processor_id_t closest_resident_processor; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + bool may_read_duplicate; + uvm_processor_id_t preferred_location; + + if (is_uvm_fault_force_sysmem_set()) { + *read_duplicate = false; + return UVM_ID_CPU; + } + + may_read_duplicate = can_read_duplicate(va_block, page_index, policy, thrashing_hint); + + // Read/prefetch faults on a VA range with read duplication enabled + // always create a copy of the page on the faulting processor's memory. + // Note that access counters always use UVM_FAULT_ACCESS_TYPE_PREFETCH, + // which will lead to read duplication if it is enabled. + *read_duplicate = may_read_duplicate && + (uvm_fault_access_type_mask_highest(access_type_mask) <= UVM_FAULT_ACCESS_TYPE_READ); + + if (*read_duplicate) + return processor_id; + + *read_duplicate = false; + + // If read-duplication is active in the page but we are not + // read-duplicating because the access type is not a read or a prefetch, + // the faulting processor should get a local copy + if (may_read_duplicate) + return processor_id; + + // If the faulting processor is the preferred location always migrate + preferred_location = policy->preferred_location; + if (uvm_id_equal(processor_id, preferred_location)) { + if (thrashing_hint->type != UVM_PERF_THRASHING_HINT_TYPE_NONE) { + UVM_ASSERT(thrashing_hint->type == UVM_PERF_THRASHING_HINT_TYPE_PIN); + if (uvm_va_space_processor_has_memory(va_space, processor_id)) + UVM_ASSERT(uvm_id_equal(thrashing_hint->pin.residency, processor_id)); + } + + return processor_id; + } + + if (thrashing_hint->type == UVM_PERF_THRASHING_HINT_TYPE_PIN) { + UVM_ASSERT(uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(thrashing_hint->pin.residency)], + processor_id)); + return thrashing_hint->pin.residency; + } + + closest_resident_processor = uvm_va_block_page_get_closest_resident(va_block, page_index, processor_id); + + // If the page is not resident anywhere, select the preferred location as + // long as the preferred location is accessible from the faulting processor. + // Otherwise select the faulting processor. + if (UVM_ID_IS_INVALID(closest_resident_processor)) { + if (UVM_ID_IS_VALID(preferred_location) && + uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(preferred_location)], + processor_id)) { + return preferred_location; + } + + return processor_id; + } + + // AccessedBy mappings might have not been created for the CPU if the thread + // which made the memory resident did not have the proper references on the + // mm_struct (for example, the GPU fault handling path when + // uvm_va_space_mm_enabled() is false). + // + // Also, in uvm_migrate_*, we implement a two-pass scheme in which + // AccessedBy mappings may be delayed to the second pass. This can produce + // faults even if the faulting processor is in the accessed_by mask. + // + // Here, we keep it on the current residency and we just add the missing + // mapping. + if (uvm_processor_mask_test(&policy->accessed_by, processor_id) && + uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(closest_resident_processor)], processor_id) && + operation != UVM_SERVICE_OPERATION_ACCESS_COUNTERS) { + return closest_resident_processor; + } + + // Check if we should map the closest resident processor remotely on atomic + // fault + if (map_remote_on_atomic_fault(va_space, access_type_mask, processor_id, closest_resident_processor)) + return closest_resident_processor; + + // If the processor has access to the preferred location, and the page is + // not resident on the accessing processor, move it to the preferred + // location. + if (!uvm_id_equal(closest_resident_processor, processor_id) && + UVM_ID_IS_VALID(preferred_location) && + uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(preferred_location)], processor_id)) + return preferred_location; + + // If the page is resident on a processor other than the preferred location, + // or the faulting processor can't access the preferred location, we select + // the faulting processor as the new residency. + return processor_id; +} + +uvm_processor_id_t uvm_va_block_select_residency(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_id_t processor_id, + NvU32 access_type_mask, + uvm_va_policy_t *policy, + const uvm_perf_thrashing_hint_t *thrashing_hint, + uvm_service_operation_t operation, + bool *read_duplicate) +{ + uvm_processor_id_t id = block_select_residency(va_block, + page_index, + processor_id, + access_type_mask, + policy, + thrashing_hint, + operation, + read_duplicate); + + // If the intended residency doesn't have memory, fall back to the CPU. + if (!block_processor_has_memory(va_block, id)) { + *read_duplicate = false; + return UVM_ID_CPU; + } + + return id; +} + +static bool check_access_counters_dont_revoke(uvm_va_block_t *block, + uvm_va_block_context_t *block_context, + uvm_va_block_region_t region, + const uvm_processor_mask_t *revoke_processors, + const uvm_page_mask_t *revoke_page_mask, + uvm_prot_t revoke_prot) +{ + uvm_processor_id_t id; + for_each_id_in_mask(id, revoke_processors) { + const uvm_page_mask_t *mapped_with_prot = block_map_with_prot_mask_get(block, id, revoke_prot); + + uvm_page_mask_and(&block_context->caller_page_mask, revoke_page_mask, mapped_with_prot); + + UVM_ASSERT(uvm_page_mask_region_weight(&block_context->caller_page_mask, region) == 0); + } + + return true; +} + +NV_STATUS uvm_va_block_service_locked(uvm_processor_id_t processor_id, + uvm_va_block_t *va_block, + uvm_va_block_retry_t *block_retry, + uvm_service_block_context_t *service_context) +{ + NV_STATUS status = NV_OK; + uvm_processor_id_t new_residency; + uvm_prot_t new_prot; + uvm_va_range_t *va_range = va_block->va_range; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + uvm_perf_prefetch_hint_t prefetch_hint = UVM_PERF_PREFETCH_HINT_NONE(); + uvm_processor_mask_t processors_involved_in_cpu_migration; + + uvm_assert_mutex_locked(&va_block->lock); + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + + // GPU fault servicing must be done under the VA space read lock. GPU fault + // servicing is required for RM to make forward progress, and we allow other + // threads to call into RM while holding the VA space lock in read mode. If + // we took the VA space lock in write mode on the GPU fault service path, + // we could deadlock because the thread in RM which holds the VA space lock + // for read wouldn't be able to complete until fault servicing completes. + if (service_context->operation != UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS || UVM_ID_IS_CPU(processor_id)) + uvm_assert_rwsem_locked(&va_space->lock); + else + uvm_assert_rwsem_locked_read(&va_space->lock); + + // Performance heuristics policy: we only consider prefetching when there + // are migrations to a single processor, only. + if (uvm_processor_mask_get_count(&service_context->resident_processors) == 1) { + uvm_page_index_t page_index; + uvm_page_mask_t *new_residency_mask; + uvm_va_policy_t *policy = service_context->block_context.policy; + + new_residency = uvm_processor_mask_find_first_id(&service_context->resident_processors); + new_residency_mask = &service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency; + + // Update prefetch tracking structure with the pages that will migrate + // due to faults + uvm_perf_prefetch_prenotify_fault_migrations(va_block, + &service_context->block_context, + new_residency, + new_residency_mask, + service_context->region); + + prefetch_hint = uvm_perf_prefetch_get_hint(va_block, new_residency_mask); + + // Obtain the prefetch hint and give a fake fault access type to the + // prefetched pages + if (UVM_ID_IS_VALID(prefetch_hint.residency)) { + UVM_ASSERT(prefetch_hint.prefetch_pages_mask != NULL); + + for_each_va_block_page_in_mask(page_index, prefetch_hint.prefetch_pages_mask, va_block) { + UVM_ASSERT(!uvm_page_mask_test(new_residency_mask, page_index)); + + service_context->access_type[page_index] = UVM_FAULT_ACCESS_TYPE_PREFETCH; + + if (uvm_va_policy_is_read_duplicate(policy, va_space) || + (policy->read_duplication != UVM_READ_DUPLICATION_DISABLED && + uvm_page_mask_test(&va_block->read_duplicated_pages, page_index))) { + if (service_context->read_duplicate_count++ == 0) + uvm_page_mask_zero(&service_context->read_duplicate_mask); + + uvm_page_mask_set(&service_context->read_duplicate_mask, page_index); + } + } + + service_context->region = uvm_va_block_region_from_block(va_block); + } + } + + for (new_prot = UVM_PROT_READ_ONLY; new_prot < UVM_PROT_MAX; ++new_prot) + service_context->mappings_by_prot[new_prot-1].count = 0; + + uvm_processor_mask_zero(&processors_involved_in_cpu_migration); + + // 1- Migrate pages and compute mapping protections + for_each_id_in_mask(new_residency, &service_context->resident_processors) { + uvm_processor_mask_t *all_involved_processors = &service_context->block_context.make_resident.all_involved_processors; + uvm_page_mask_t *new_residency_mask = &service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency; + uvm_page_mask_t *did_migrate_mask = &service_context->block_context.make_resident.pages_changed_residency; + uvm_page_index_t page_index; + uvm_make_resident_cause_t cause; + + UVM_ASSERT_MSG(service_context->operation == UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS || + service_context->operation == UVM_SERVICE_OPERATION_NON_REPLAYABLE_FAULTS || + service_context->operation == UVM_SERVICE_OPERATION_ACCESS_COUNTERS, + "Invalid operation value %u\n", service_context->operation); + + if (service_context->operation == UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS) + cause = UVM_MAKE_RESIDENT_CAUSE_REPLAYABLE_FAULT; + else if (service_context->operation == UVM_SERVICE_OPERATION_NON_REPLAYABLE_FAULTS) + cause = UVM_MAKE_RESIDENT_CAUSE_NON_REPLAYABLE_FAULT; + else + cause = UVM_MAKE_RESIDENT_CAUSE_ACCESS_COUNTER; + + // 1.1- Migrate pages + + // Reset masks before all of the make_resident calls + uvm_page_mask_zero(did_migrate_mask); + uvm_processor_mask_zero(all_involved_processors); + + if (UVM_ID_IS_VALID(prefetch_hint.residency)) { + UVM_ASSERT(uvm_id_equal(prefetch_hint.residency, new_residency)); + UVM_ASSERT(prefetch_hint.prefetch_pages_mask != NULL); + + uvm_page_mask_or(new_residency_mask, new_residency_mask, prefetch_hint.prefetch_pages_mask); + } + + if (service_context->read_duplicate_count == 0 || + uvm_page_mask_andnot(&service_context->block_context.caller_page_mask, + new_residency_mask, + &service_context->read_duplicate_mask)) { + status = uvm_va_block_make_resident(va_block, + block_retry, + &service_context->block_context, + new_residency, + service_context->region, + service_context->read_duplicate_count == 0? + new_residency_mask: + &service_context->block_context.caller_page_mask, + prefetch_hint.prefetch_pages_mask, + cause); + if (status != NV_OK) + return status; + } + + if (service_context->read_duplicate_count != 0 && + uvm_page_mask_and(&service_context->block_context.caller_page_mask, + new_residency_mask, + &service_context->read_duplicate_mask)) { + status = uvm_va_block_make_resident_read_duplicate(va_block, + block_retry, + &service_context->block_context, + new_residency, + service_context->region, + &service_context->block_context.caller_page_mask, + prefetch_hint.prefetch_pages_mask, + cause); + if (status != NV_OK) + return status; + } + + if (UVM_ID_IS_CPU(new_residency)) { + // Save all the processors involved in migrations to the CPU for + // an ECC check before establishing the CPU mappings. + uvm_processor_mask_copy(&processors_involved_in_cpu_migration, all_involved_processors); + } + + if (UVM_ID_IS_CPU(processor_id) && !uvm_processor_mask_empty(all_involved_processors)) + service_context->cpu_fault.did_migrate = true; + + uvm_page_mask_andnot(&service_context->did_not_migrate_mask, new_residency_mask, did_migrate_mask); + + // 1.2 - Compute mapping protections for the requesting processor on + // the new residency + for_each_va_block_page_in_region_mask(page_index, new_residency_mask, service_context->region) { + new_prot = compute_new_permission(va_block, + page_index, + processor_id, + new_residency, + service_context->access_type[page_index]); + + if (service_context->mappings_by_prot[new_prot-1].count++ == 0) + uvm_page_mask_zero(&service_context->mappings_by_prot[new_prot-1].page_mask); + + uvm_page_mask_set(&service_context->mappings_by_prot[new_prot-1].page_mask, page_index); + } + + // 1.3- Revoke permissions + // + // NOTE: uvm_va_block_make_resident destroys mappings to old locations. + // Thus, we need to revoke only if residency did not change and we + // are mapping higher than READ ONLY. + for (new_prot = UVM_PROT_READ_WRITE; new_prot <= UVM_PROT_READ_WRITE_ATOMIC; ++new_prot) { + bool pages_need_revocation; + uvm_processor_mask_t revoke_processors; + uvm_prot_t revoke_prot; + bool this_processor_has_enabled_atomics; + + if (service_context->mappings_by_prot[new_prot-1].count == 0) + continue; + + pages_need_revocation = uvm_page_mask_and(&service_context->revocation_mask, + &service_context->did_not_migrate_mask, + &service_context->mappings_by_prot[new_prot-1].page_mask); + if (!pages_need_revocation) + continue; + + uvm_processor_mask_and(&revoke_processors, &va_block->mapped, &va_space->faultable_processors); + + // Do not revoke the processor that took the fault + uvm_processor_mask_clear(&revoke_processors, processor_id); + + this_processor_has_enabled_atomics = uvm_processor_mask_test(&va_space->system_wide_atomics_enabled_processors, + processor_id); + + // Atomic operations on processors with system-wide atomics + // disabled or with native atomics access to new_residency + // behave like writes. + if (new_prot == UVM_PROT_READ_WRITE || + !this_processor_has_enabled_atomics || + uvm_processor_mask_test(&va_space->has_native_atomics[uvm_id_value(new_residency)], processor_id)) { + + // Exclude processors with native atomics on the resident copy + uvm_processor_mask_andnot(&revoke_processors, + &revoke_processors, + &va_space->has_native_atomics[uvm_id_value(new_residency)]); + + // Exclude processors with disabled system-wide atomics + uvm_processor_mask_and(&revoke_processors, + &revoke_processors, + &va_space->system_wide_atomics_enabled_processors); + } + + if (UVM_ID_IS_CPU(processor_id)) { + revoke_prot = UVM_PROT_READ_WRITE_ATOMIC; + } + else { + revoke_prot = (new_prot == UVM_PROT_READ_WRITE_ATOMIC)? UVM_PROT_READ_WRITE: + UVM_PROT_READ_WRITE_ATOMIC; + } + + // UVM-Lite processors must always have RWA mappings + if (uvm_processor_mask_andnot(&revoke_processors, &revoke_processors, block_get_uvm_lite_gpus(va_block))) { + // Access counters should never trigger revocations apart from + // read-duplication, which are performed in the calls to + // uvm_va_block_make_resident_read_duplicate, above. + if (service_context->operation == UVM_SERVICE_OPERATION_ACCESS_COUNTERS) { + UVM_ASSERT(check_access_counters_dont_revoke(va_block, + &service_context->block_context, + service_context->region, + &revoke_processors, + &service_context->revocation_mask, + revoke_prot)); + } + + // Downgrade other processors' mappings + status = uvm_va_block_revoke_prot_mask(va_block, + &service_context->block_context, + &revoke_processors, + service_context->region, + &service_context->revocation_mask, + revoke_prot); + if (status != NV_OK) + return status; + } + } + } + + // 2- Check for ECC errors on all GPUs involved in the migration if CPU is + // the destination. Migrations in response to CPU faults are special + // because they're on the only path (apart from tools) where CUDA is not + // involved and wouldn't have a chance to do its own ECC checking. + if (service_context->operation == UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS && + UVM_ID_IS_CPU(processor_id) && + !uvm_processor_mask_empty(&processors_involved_in_cpu_migration)) { + uvm_gpu_t *gpu; + + // Before checking for ECC errors, make sure all of the GPU work + // is finished. Creating mappings on the CPU would have to wait + // for the tracker anyway so this shouldn't hurt performance. + status = uvm_tracker_wait(&va_block->tracker); + if (status != NV_OK) + return status; + + for_each_va_space_gpu_in_mask(gpu, va_space, &processors_involved_in_cpu_migration) { + // We cannot call into RM here so use the no RM ECC check. + status = uvm_gpu_check_ecc_error_no_rm(gpu); + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) { + // In case we need to call into RM to be sure whether + // there is an ECC error or not, signal that to the + // caller by adding the GPU to the mask. + // + // In that case the ECC error might be noticed only after + // the CPU mappings have been already created below, + // exposing different CPU threads to the possibly corrupt + // data, but this thread will fault eventually and that's + // considered to be an acceptable trade-off between + // performance and ECC error containment. + uvm_processor_mask_set(&service_context->cpu_fault.gpus_to_check_for_ecc, gpu->id); + status = NV_OK; + } + if (status != NV_OK) + return status; + } + } + + // 3- Map requesting processor with the necessary privileges + for (new_prot = UVM_PROT_READ_ONLY; new_prot <= UVM_PROT_READ_WRITE_ATOMIC; ++new_prot) { + const uvm_page_mask_t *map_prot_mask = &service_context->mappings_by_prot[new_prot-1].page_mask; + + if (service_context->mappings_by_prot[new_prot-1].count == 0) + continue; + + // 3.1 - Unmap CPU pages + if (service_context->operation != UVM_SERVICE_OPERATION_ACCESS_COUNTERS && UVM_ID_IS_CPU(processor_id)) { + // The kernel can downgrade our CPU mappings at any time without + // notifying us, which means our PTE state could be stale. We + // handle this by unmapping the CPU PTE and re-mapping it again. + // + // A CPU fault is unexpected if: + // curr_prot == RW || (!is_write && curr_prot == RO) + status = uvm_va_block_unmap(va_block, + &service_context->block_context, + UVM_ID_CPU, + service_context->region, + map_prot_mask, + NULL); + if (status != NV_OK) + return status; + } + + // 3.2 - Add new mappings + + // The faulting processor can be mapped remotely due to user policy or + // the thrashing mitigation heuristics. Therefore, we set the cause + // accordingly in each case. + + // Map pages that are thrashing first + if (service_context->thrashing_pin_count > 0 && va_space->tools.enabled) { + uvm_page_mask_t *helper_page_mask = &service_context->block_context.caller_page_mask; + bool pages_need_mapping = uvm_page_mask_and(helper_page_mask, + map_prot_mask, + &service_context->thrashing_pin_mask); + if (pages_need_mapping) { + status = uvm_va_block_map(va_block, + &service_context->block_context, + processor_id, + service_context->region, + helper_page_mask, + new_prot, + UvmEventMapRemoteCauseThrashing, + &va_block->tracker); + if (status != NV_OK) + return status; + + // Remove thrashing pages from the map mask + pages_need_mapping = uvm_page_mask_andnot(helper_page_mask, + map_prot_mask, + &service_context->thrashing_pin_mask); + if (!pages_need_mapping) + continue; + + map_prot_mask = helper_page_mask; + } + } + + status = uvm_va_block_map(va_block, + &service_context->block_context, + processor_id, + service_context->region, + map_prot_mask, + new_prot, + UvmEventMapRemoteCausePolicy, + &va_block->tracker); + if (status != NV_OK) + return status; + } + + // 4- If pages did migrate, map SetAccessedBy processors, except for UVM-Lite + for_each_id_in_mask(new_residency, &service_context->resident_processors) { + const uvm_page_mask_t *new_residency_mask; + new_residency_mask = &service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency; + + for (new_prot = UVM_PROT_READ_ONLY; new_prot <= UVM_PROT_READ_WRITE_ATOMIC; ++new_prot) { + uvm_page_mask_t *map_prot_mask = &service_context->block_context.caller_page_mask; + bool pages_need_mapping; + + if (service_context->mappings_by_prot[new_prot-1].count == 0) + continue; + + pages_need_mapping = uvm_page_mask_and(map_prot_mask, + new_residency_mask, + &service_context->mappings_by_prot[new_prot-1].page_mask); + if (!pages_need_mapping) + continue; + + // Map pages that are thrashing + if (service_context->thrashing_pin_count > 0) { + uvm_page_index_t page_index; + + for_each_va_block_page_in_region_mask(page_index, + &service_context->thrashing_pin_mask, + service_context->region) { + uvm_processor_mask_t *map_thrashing_processors = NULL; + NvU64 page_addr = uvm_va_block_cpu_page_address(va_block, page_index); + + // Check protection type + if (!uvm_page_mask_test(map_prot_mask, page_index)) + continue; + + map_thrashing_processors = uvm_perf_thrashing_get_thrashing_processors(va_block, page_addr); + + status = uvm_va_block_add_mappings_after_migration(va_block, + &service_context->block_context, + new_residency, + processor_id, + uvm_va_block_region_for_page(page_index), + map_prot_mask, + new_prot, + map_thrashing_processors); + if (status != NV_OK) + return status; + } + + pages_need_mapping = uvm_page_mask_andnot(map_prot_mask, + map_prot_mask, + &service_context->thrashing_pin_mask); + if (!pages_need_mapping) + continue; + } + + // Map the the rest of pages in a single shot + status = uvm_va_block_add_mappings_after_migration(va_block, + &service_context->block_context, + new_residency, + processor_id, + service_context->region, + map_prot_mask, + new_prot, + NULL); + if (status != NV_OK) + return status; + } + } + + return NV_OK; +} + +// Check if we are faulting on a page with valid permissions to check if we can +// skip fault handling. See uvm_va_block_t::cpu::fault_authorized for more +// details +static bool skip_cpu_fault_with_valid_permissions(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_fault_access_type_t fault_access_type) +{ + if (uvm_va_block_page_is_processor_authorized(va_block, + page_index, + UVM_ID_CPU, + uvm_fault_access_type_to_prot(fault_access_type))) { + NvU64 now = NV_GETTIME(); + pid_t pid = current->pid; + + // Latch the pid/timestamp/page_index values for the first time + if (!va_block->cpu.fault_authorized.first_fault_stamp) { + va_block->cpu.fault_authorized.first_fault_stamp = now; + va_block->cpu.fault_authorized.first_pid = pid; + va_block->cpu.fault_authorized.page_index = page_index; + + return true; + } + + // If the same thread shows up again, this means that the kernel + // downgraded the page's PTEs. Service the fault to force a remap of + // the page. + if (va_block->cpu.fault_authorized.first_pid == pid && + va_block->cpu.fault_authorized.page_index == page_index) { + va_block->cpu.fault_authorized.first_fault_stamp = 0; + } + else { + // If the window has expired, clear the information and service the + // fault. Otherwise, just return + if (now - va_block->cpu.fault_authorized.first_fault_stamp > uvm_perf_authorized_cpu_fault_tracking_window_ns) + va_block->cpu.fault_authorized.first_fault_stamp = 0; + else + return true; + } + } + + return false; +} + +static NV_STATUS block_cpu_fault_locked(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + NvU64 fault_addr, + uvm_fault_access_type_t fault_access_type, + uvm_service_block_context_t *service_context) +{ + uvm_va_range_t *va_range = va_block->va_range; + uvm_va_space_t *va_space = uvm_va_block_get_va_space(va_block); + NV_STATUS status = NV_OK; + uvm_page_index_t page_index; + uvm_perf_thrashing_hint_t thrashing_hint; + uvm_processor_id_t new_residency; + bool read_duplicate; + + uvm_assert_rwsem_locked(&va_space->lock); + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + + UVM_ASSERT(fault_addr >= va_block->start); + UVM_ASSERT(fault_addr <= va_block->end); + + // There are up to three mm_structs to worry about, and they might all be + // different: + // + // 1) vma->vm_mm + // 2) current->mm + // 3) va_space->va_space_mm.mm (though note that if this is valid, then it + // must match vma->vm_mm). + // + // The kernel guarantees that vma->vm_mm has a reference taken with + // mmap_lock held on the CPU fault path, so tell the fault handler to use + // that one. current->mm might differ if we're on the access_process_vm + // (ptrace) path or if another driver is calling get_user_pages. + service_context->block_context.mm = uvm_va_range_vma(va_range)->vm_mm; + uvm_assert_mmap_lock_locked(service_context->block_context.mm); + + service_context->block_context.policy = uvm_va_policy_get(va_block, fault_addr); + + if (service_context->num_retries == 0) { + // notify event to tools/performance heuristics + uvm_perf_event_notify_cpu_fault(&va_space->perf_events, + va_block, + service_context->block_context.policy->preferred_location, + fault_addr, + fault_access_type > UVM_FAULT_ACCESS_TYPE_READ, + KSTK_EIP(current)); + } + + // Check logical permissions + status = uvm_va_range_check_logical_permissions(va_block->va_range, + UVM_ID_CPU, + fault_access_type, + uvm_range_group_address_migratable(va_space, fault_addr)); + if (status != NV_OK) + return status; + + uvm_processor_mask_zero(&service_context->cpu_fault.gpus_to_check_for_ecc); + + page_index = uvm_va_block_cpu_page_index(va_block, fault_addr); + if (skip_cpu_fault_with_valid_permissions(va_block, page_index, fault_access_type)) + return NV_OK; + + thrashing_hint = uvm_perf_thrashing_get_hint(va_block, fault_addr, UVM_ID_CPU); + // Throttling is implemented by sleeping in the fault handler on the CPU + if (thrashing_hint.type == UVM_PERF_THRASHING_HINT_TYPE_THROTTLE) { + service_context->cpu_fault.wakeup_time_stamp = thrashing_hint.throttle.end_time_stamp; + return NV_WARN_MORE_PROCESSING_REQUIRED; + } + + service_context->read_duplicate_count = 0; + service_context->thrashing_pin_count = 0; + service_context->operation = UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS; + + if (thrashing_hint.type == UVM_PERF_THRASHING_HINT_TYPE_PIN) { + uvm_page_mask_zero(&service_context->thrashing_pin_mask); + uvm_page_mask_set(&service_context->thrashing_pin_mask, page_index); + service_context->thrashing_pin_count = 1; + } + + // Compute new residency and update the masks + new_residency = uvm_va_block_select_residency(va_block, + page_index, + UVM_ID_CPU, + uvm_fault_access_type_mask_bit(fault_access_type), + service_context->block_context.policy, + &thrashing_hint, + UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS, + &read_duplicate); + + // Initialize the minimum necessary state in the fault service context + uvm_processor_mask_zero(&service_context->resident_processors); + + // Set new residency and update the masks + uvm_processor_mask_set(&service_context->resident_processors, new_residency); + + // The masks need to be fully zeroed as the fault region may grow due to prefetching + uvm_page_mask_zero(&service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency); + uvm_page_mask_set(&service_context->per_processor_masks[uvm_id_value(new_residency)].new_residency, page_index); + + if (read_duplicate) { + uvm_page_mask_zero(&service_context->read_duplicate_mask); + uvm_page_mask_set(&service_context->read_duplicate_mask, page_index); + service_context->read_duplicate_count = 1; + } + + service_context->access_type[page_index] = fault_access_type; + + service_context->region = uvm_va_block_region_for_page(page_index); + + status = uvm_va_block_service_locked(UVM_ID_CPU, va_block, va_block_retry, service_context); + + ++service_context->num_retries; + + return status; +} + +NV_STATUS uvm_va_block_cpu_fault(uvm_va_block_t *va_block, + NvU64 fault_addr, + bool is_write, + uvm_service_block_context_t *service_context) +{ + NV_STATUS status; + uvm_va_block_retry_t va_block_retry; + uvm_fault_access_type_t fault_access_type; + + if (is_write) + fault_access_type = UVM_FAULT_ACCESS_TYPE_ATOMIC_STRONG; + else + fault_access_type = UVM_FAULT_ACCESS_TYPE_READ; + + service_context->num_retries = 0; + service_context->cpu_fault.did_migrate = false; + + // We have to use vm_insert_page instead of handing the page to the kernel + // and letting it insert the mapping, and we must do that while holding the + // lock on this VA block. Otherwise there will be a window in which we think + // we've mapped the page but the CPU mapping hasn't actually been created + // yet. During that window a GPU fault event could arrive and claim + // ownership of that VA, "unmapping" it. Then later the kernel would + // eventually establish the mapping, and we'd end up with both CPU and GPU + // thinking they each owned the page. + // + // This function must only be called when it's safe to call vm_insert_page. + // That is, there must be a reference held on the vma's vm_mm, and + // vm_mm->mmap_lock is held in at least read mode. Note that current->mm + // might not be vma->vm_mm. + status = UVM_VA_BLOCK_LOCK_RETRY(va_block, + &va_block_retry, + block_cpu_fault_locked(va_block, + &va_block_retry, + fault_addr, + fault_access_type, + service_context)); + return status; +} + +NV_STATUS uvm_va_block_find(uvm_va_space_t *va_space, NvU64 addr, uvm_va_block_t **out_block) +{ + uvm_va_range_t *va_range; + uvm_va_block_t *block; + size_t index; + + va_range = uvm_va_range_find(va_space, addr); + if (!va_range) + return uvm_hmm_va_block_find(va_space, addr, out_block); + + UVM_ASSERT(uvm_hmm_va_block_find(va_space, addr, out_block) == NV_ERR_INVALID_ADDRESS || + uvm_hmm_va_block_find(va_space, addr, out_block) == NV_ERR_OBJECT_NOT_FOUND); + + if (va_range->type != UVM_VA_RANGE_TYPE_MANAGED) + return NV_ERR_INVALID_ADDRESS; + + index = uvm_va_range_block_index(va_range, addr); + block = uvm_va_range_block(va_range, index); + if (!block) + return NV_ERR_OBJECT_NOT_FOUND; + + *out_block = block; + return NV_OK; +} + +NV_STATUS uvm_va_block_find_create(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 addr, + uvm_va_block_context_t *va_block_context, + uvm_va_block_t **out_block) +{ + uvm_va_range_t *va_range; + size_t index; + + va_range = uvm_va_range_find(va_space, addr); + if (!va_range) { + if (!mm) + return NV_ERR_INVALID_ADDRESS; + return uvm_hmm_va_block_find_create(va_space, addr, va_block_context, out_block); + } + + UVM_ASSERT(uvm_hmm_va_block_find(va_space, addr, out_block) == NV_ERR_INVALID_ADDRESS || + uvm_hmm_va_block_find(va_space, addr, out_block) == NV_ERR_OBJECT_NOT_FOUND); + + if (va_range->type != UVM_VA_RANGE_TYPE_MANAGED) + return NV_ERR_INVALID_ADDRESS; + + index = uvm_va_range_block_index(va_range, addr); + return uvm_va_range_block_create(va_range, index, out_block); +} + +NV_STATUS uvm_va_block_write_from_cpu(uvm_va_block_t *va_block, + uvm_va_block_context_t *block_context, + NvU64 dst, + uvm_mem_t *src_mem, + size_t size) +{ + NV_STATUS status; + uvm_page_index_t page_index = uvm_va_block_cpu_page_index(va_block, dst); + NvU64 page_offset = dst & (PAGE_SIZE - 1); + uvm_processor_id_t proc = uvm_va_block_page_get_closest_resident(va_block, page_index, UVM_ID_CPU); + uvm_va_block_region_t region = uvm_va_block_region_for_page(page_index); + void *src = uvm_mem_get_cpu_addr_kernel(src_mem); + uvm_gpu_t *gpu; + uvm_gpu_address_t src_gpu_address; + uvm_gpu_address_t dst_gpu_address; + uvm_push_t push; + + uvm_assert_mutex_locked(&va_block->lock); + UVM_ASSERT_MSG(UVM_ALIGN_DOWN(dst, PAGE_SIZE) == UVM_ALIGN_DOWN(dst + size - 1, PAGE_SIZE), + "dst 0x%llx size 0x%zx\n", dst, size); + + if (UVM_ID_IS_INVALID(proc)) + proc = UVM_ID_CPU; + + // Use make_resident() in all cases to break read-duplication, but + // block_retry can be NULL as if the page is not resident yet we will make + // it resident on the CPU. + // Notably we don't care about coherence with respect to atomics from other + // processors. + status = uvm_va_block_make_resident(va_block, + NULL, + block_context, + proc, + region, + NULL, + NULL, + UVM_MAKE_RESIDENT_CAUSE_API_TOOLS); + + if (status != NV_OK) + return status; + + if (UVM_ID_IS_CPU(proc)) { + char *mapped_page; + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(va_block, page_index); + struct page *page = uvm_cpu_chunk_get_cpu_page(va_block, chunk, page_index); + + status = uvm_tracker_wait(&va_block->tracker); + if (status != NV_OK) + return status; + + mapped_page = (char *)kmap(page); + memcpy(mapped_page + page_offset, src, size); + kunmap(page); + + return NV_OK; + } + + gpu = block_get_gpu(va_block, proc); + + dst_gpu_address = block_phys_page_copy_address(va_block, block_phys_page(proc, page_index), gpu); + dst_gpu_address.address += page_offset; + + src_gpu_address = uvm_mem_gpu_address_virtual_kernel(src_mem, gpu); + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_CPU_TO_GPU, + &va_block->tracker, + &push, + "Direct write to [0x%llx, 0x%llx)", + dst, + dst + size); + if (status != NV_OK) + return status; + + gpu->parent->ce_hal->memcopy(&push, dst_gpu_address, src_gpu_address, size); + return uvm_push_end_and_wait(&push); +} + +NV_STATUS uvm_va_block_read_to_cpu(uvm_va_block_t *va_block, uvm_mem_t *dst_mem, NvU64 src, size_t size) +{ + NV_STATUS status; + uvm_page_index_t page_index = uvm_va_block_cpu_page_index(va_block, src); + NvU64 page_offset = src & (PAGE_SIZE - 1); + uvm_processor_id_t proc = uvm_va_block_page_get_closest_resident(va_block, page_index, UVM_ID_CPU); + void *dst = uvm_mem_get_cpu_addr_kernel(dst_mem); + uvm_gpu_t *gpu; + uvm_gpu_address_t src_gpu_address; + uvm_gpu_address_t dst_gpu_address; + uvm_push_t push; + + uvm_assert_mutex_locked(&va_block->lock); + UVM_ASSERT_MSG(UVM_ALIGN_DOWN(src, PAGE_SIZE) == UVM_ALIGN_DOWN(src + size - 1, PAGE_SIZE), + "src 0x%llx size 0x%zx\n", src, size); + + if (UVM_ID_IS_INVALID(proc)) { + memset(dst, 0, size); + return NV_OK; + } + + if (UVM_ID_IS_CPU(proc)) { + char *mapped_page; + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(va_block, page_index); + struct page *page = uvm_cpu_chunk_get_cpu_page(va_block, chunk, page_index); + + status = uvm_tracker_wait(&va_block->tracker); + if (status != NV_OK) + return status; + + mapped_page = (char *)kmap(page); + memcpy(dst, mapped_page + page_offset, size); + kunmap(page); + + return NV_OK; + } + + gpu = block_get_gpu(va_block, proc); + + dst_gpu_address = uvm_mem_gpu_address_virtual_kernel(dst_mem, gpu); + + src_gpu_address = block_phys_page_copy_address(va_block, block_phys_page(proc, page_index), gpu); + src_gpu_address.address += page_offset; + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_GPU_TO_CPU, + &va_block->tracker, + &push, + "Direct read from [0x%llx, 0x%llx)", + src, + src + size); + if (status != NV_OK) + return status; + + gpu->parent->ce_hal->memcopy(&push, dst_gpu_address, src_gpu_address, size); + + return uvm_push_end_and_wait(&push); +} + +// Deferred work item reestablishing accessed by mappings after eviction. On +// GPUs with access counters enabled, the evicted GPU will also get remote +// mappings. +static void block_deferred_eviction_mappings(void *args) +{ + uvm_va_block_t *va_block = (uvm_va_block_t*)args; + uvm_va_space_t *va_space; + uvm_processor_id_t id; + uvm_va_block_context_t *block_context = NULL; + struct mm_struct *mm = NULL; + + uvm_mutex_lock(&va_block->lock); + va_space = uvm_va_block_get_va_space_maybe_dead(va_block); + uvm_mutex_unlock(&va_block->lock); + + if (!va_space) { + // Block has been killed in the meantime + goto done; + } + + mm = uvm_va_space_mm_retain_lock(va_space); + + block_context = uvm_va_block_context_alloc(mm); + if (!block_context) + goto done; + + // The block wasn't dead when we checked above and that's enough to + // guarantee that the VA space is still around, because + // uvm_va_space_destroy() flushes the associated nv_kthread_q, and that + // flush waits for this function call to finish. + uvm_va_space_down_read(va_space); + + // Now that we have the VA space lock held, we can check whether the block + // is still alive since the VA space write lock is needed to kill blocks. + if (uvm_va_block_is_dead(va_block)) + goto unlock; + + if (!uvm_va_block_is_hmm(va_block)) { + uvm_va_range_t *va_range = va_block->va_range; + NV_STATUS status = NV_OK; + + block_context->policy = uvm_va_range_get_policy(va_range); + for_each_id_in_mask(id, &uvm_va_range_get_policy(va_range)->accessed_by) { + status = uvm_va_block_set_accessed_by(va_block, block_context, id); + if (status != NV_OK) + break; + } + + // On Volta+ GPUs, we can map evicted memory since we can pull it back + // thanks to the access counters notifications + if (status == NV_OK && va_space_map_remote_on_eviction(va_space)) { + uvm_processor_mask_t map_processors; + + // Exclude the processors that have been already mapped due to + // AccessedBy + uvm_processor_mask_andnot(&map_processors, + &va_block->evicted_gpus, + &uvm_va_range_get_policy(va_range)->accessed_by); + + for_each_gpu_id_in_mask(id, &map_processors) { + uvm_gpu_t *gpu = uvm_va_space_get_gpu(va_space, id); + uvm_va_block_gpu_state_t *gpu_state; + + if (!gpu->parent->access_counters_supported) + continue; + + gpu_state = uvm_va_block_gpu_state_get(va_block, id); + UVM_ASSERT(gpu_state); + + // TODO: Bug 2096389: uvm_va_block_add_mappings does not add + // remote mappings to read-duplicated pages. Add support for it + // or create a new function. + status = UVM_VA_BLOCK_LOCK_RETRY(va_block, NULL, + uvm_va_block_add_mappings(va_block, + block_context, + id, + uvm_va_block_region_from_block(va_block), + &gpu_state->evicted, + UvmEventMapRemoteCauseEviction)); + if (status != NV_OK) + break; + } + } + + if (status != NV_OK) { + UVM_ERR_PRINT("Deferred mappings to evicted memory for block [0x%llx, 0x%llx] failed %s, processor %s\n", + va_block->start, + va_block->end, + nvstatusToString(status), + uvm_va_space_processor_name(va_space, id)); + } + } + +unlock: + uvm_va_space_up_read(va_space); + uvm_va_block_context_free(block_context); + +done: + uvm_va_space_mm_release_unlock(va_space, mm); + uvm_va_block_release(va_block); +} + +static void block_deferred_eviction_mappings_entry(void *args) +{ + UVM_ENTRY_VOID(block_deferred_eviction_mappings(args)); +} + +NV_STATUS uvm_va_block_evict_chunks(uvm_va_block_t *va_block, + uvm_gpu_t *gpu, + uvm_gpu_chunk_t *root_chunk, + uvm_tracker_t *tracker) +{ + NV_STATUS status = NV_OK; + NvU32 i; + uvm_va_block_gpu_state_t *gpu_state; + uvm_va_block_region_t chunk_region; + size_t num_gpu_chunks = block_num_gpu_chunks(va_block, gpu); + size_t chunks_to_evict = 0; + uvm_va_block_context_t *block_context; + uvm_page_mask_t *pages_to_evict; + uvm_va_block_test_t *va_block_test = uvm_va_block_get_test(va_block); + uvm_va_space_t *va_space = uvm_va_block_get_va_space_maybe_dead(va_block); + struct mm_struct *mm; + + uvm_assert_mutex_locked(&va_block->lock); + + // The block might have been killed in the meantime + if (!va_space) + return NV_OK; + + gpu_state = uvm_va_block_gpu_state_get(va_block, gpu->id); + if (!gpu_state) + return NV_OK; + + if (va_block_test && va_block_test->inject_eviction_error) { + va_block_test->inject_eviction_error = false; + return NV_ERR_NO_MEMORY; + } + + // We cannot take this block's VA space or mmap_lock locks on the eviction + // path, however, we retain mm in order to support accounting of CPU memory + // allocations. If mappings need to be created, + // block_deferred_eviction_mappings() will be scheduled below. + mm = uvm_va_space_mm_retain(va_space); + block_context = uvm_va_block_context_alloc(mm); + if (!block_context) { + if (mm) + uvm_va_space_mm_release(va_space); + return NV_ERR_NO_MEMORY; + } + + pages_to_evict = &block_context->caller_page_mask; + uvm_page_mask_zero(pages_to_evict); + chunk_region.outer = 0; + + // Find all chunks that are subchunks of the root chunk + for (i = 0; i < num_gpu_chunks; ++i) { + uvm_chunk_size_t chunk_size; + size_t chunk_index = block_gpu_chunk_index(va_block, gpu, chunk_region.outer, &chunk_size); + UVM_ASSERT(chunk_index == i); + chunk_region.first = chunk_region.outer; + chunk_region.outer = chunk_region.first + chunk_size / PAGE_SIZE; + + if (!gpu_state->chunks[i]) + continue; + if (!uvm_gpu_chunk_same_root(gpu_state->chunks[i], root_chunk)) + continue; + + uvm_page_mask_region_fill(pages_to_evict, chunk_region); + ++chunks_to_evict; + } + + if (chunks_to_evict == 0) + goto out; + + // Only move pages resident on the GPU + uvm_page_mask_and(pages_to_evict, pages_to_evict, uvm_va_block_resident_mask_get(va_block, gpu->id)); + + block_context->policy = uvm_va_range_get_policy(va_block->va_range); + + // TODO: Bug 1765193: make_resident() breaks read-duplication, but it's not + // necessary to do so for eviction. Add a version that unmaps only the + // processors that have mappings to the pages being evicted. + status = uvm_va_block_make_resident(va_block, + NULL, + block_context, + UVM_ID_CPU, + uvm_va_block_region_from_block(va_block), + pages_to_evict, + NULL, + UVM_MAKE_RESIDENT_CAUSE_EVICTION); + if (status != NV_OK) + goto out; + + // VA space lock may not be held and hence we cannot reestablish any + // mappings here and need to defer it to a work queue. + // + // Reading the accessed_by mask without the VA space lock is safe because + // adding a new processor to the mask triggers going over all the VA blocks + // in the range and locking them. And we hold one of the VA block's locks. + // + // If uvm_va_range_set_accessed_by() hasn't called + // uvm_va_block_set_accessed_by() for this block yet then it will take care + // of adding the mapping after we are done. If it already did then we are + // guaranteed to see the new processor in the accessed_by mask because we + // locked the block's lock that the thread calling + // uvm_va_range_set_accessed_by() unlocked after updating the mask. + // + // If a processor gets removed from the mask then we might not notice and + // schedule the work item anyway, but that's benign as + // block_deferred_eviction_mappings() re-examines the mask. + // + // Checking if access counters migrations are enabled on a VA space is racy + // without holding the VA space lock. However, this is fine as + // block_deferred_eviction_mappings() reexamines the value with the VA space + // lock being held. + if (uvm_processor_mask_get_count(&block_context->policy->accessed_by) > 0 || + (gpu->parent->access_counters_supported && + va_space_map_remote_on_eviction(va_space) && + !uvm_va_block_is_hmm(va_block))) { + // Always retain the VA block first so that it's safe for the deferred + // callback to release it immediately after it runs. + uvm_va_block_retain(va_block); + + if (!nv_kthread_q_schedule_q_item(&g_uvm_global.global_q, + &va_block->eviction_mappings_q_item)) { + // And release it if no new callback was scheduled + uvm_va_block_release_no_destroy(va_block); + } + } + + status = uvm_tracker_add_tracker_safe(tracker, &va_block->tracker); + if (status != NV_OK) + goto out; + + for (i = 0; i < num_gpu_chunks; ++i) { + uvm_gpu_id_t accessing_gpu_id; + uvm_gpu_chunk_t *chunk = gpu_state->chunks[i]; + + if (!chunk) + continue; + if (!uvm_gpu_chunk_same_root(chunk, root_chunk)) + continue; + + // Remove the mappings of indirect peers from the reverse map. We + // access the indirect peer mask from the VA space without holding the + // VA space lock. Therefore, we can race with enable_peer/disable_peer + // operations. However this is fine: + // + // The enable_peer sequence is as follows: + // + // set_bit in va_space->indirect_peers + // uvm_va_block_enable_peer; + // + // - If we read the mask BEFORE it is set or AFTER the mapping has + // been added to the map there is no race. + // - If we read the mask AFTER it is set but BEFORE adding the mapping + // to the reverse map, we will try to remove it although it is not + // there yet. Therefore, we use + // uvm_pmm_sysmem_mappings_remove_gpu_mapping_on_eviction, which does + // not check if the mapping is present in the reverse map. + // + // The disable_peer sequence is as follows: + // + // uvm_va_block_disable_peer; + // clear_bit in va_space->indirect_peers + // + // - If we read the mask BEFORE the mapping has been added to the map + // or AFTER the bit has been cleared, there is no race. + // - If we read the mask AFTER the mapping has been removed and BEFORE + // the bit is cleared, we will try to remove the mapping, too. + // Again, uvm_pmm_sysmem_mappings_remove_gpu_mapping_on_eviction works + // in this scenario. + // Obtain the uvm_gpu_t directly via the parent GPU's id since indirect + // peers are not supported when SMC is enabled. + for_each_gpu_id_in_mask(accessing_gpu_id, &va_space->indirect_peers[uvm_id_value(gpu->id)]) { + uvm_gpu_t *accessing_gpu = uvm_va_space_get_gpu(va_space, accessing_gpu_id); + NvU64 peer_addr = uvm_pmm_gpu_indirect_peer_addr(&gpu->pmm, chunk, accessing_gpu); + + uvm_pmm_sysmem_mappings_remove_gpu_mapping_on_eviction(&accessing_gpu->pmm_reverse_sysmem_mappings, + peer_addr); + } + + uvm_mmu_chunk_unmap(chunk, tracker); + + uvm_pmm_gpu_mark_chunk_evicted(&gpu->pmm, gpu_state->chunks[i]); + gpu_state->chunks[i] = NULL; + } + +out: + uvm_va_block_context_free(block_context); + if (mm) + uvm_va_space_mm_release(va_space); + + return status; +} + +static NV_STATUS block_gpu_force_4k_ptes(uvm_va_block_t *block, uvm_va_block_context_t *block_context, uvm_gpu_t *gpu) +{ + uvm_va_block_gpu_state_t *gpu_state = block_gpu_state_get_alloc(block, gpu); + uvm_push_t push; + NV_STATUS status; + + // See comment in uvm_va_block_set_cancel + UVM_ASSERT(!gpu->parent->fault_cancel_va_supported); + + if (!gpu_state) + return NV_ERR_NO_MEMORY; + + // Force all pages to be 4K and prevent future upgrades during cancel + gpu_state->force_4k_ptes = true; + + // If we have no page tables we're done. For fault cancel we need to make + // sure that fatal faults are on different 4k PTEs than non-fatal faults, + // and we need to service all non-fatal faults before issuing the cancel. So + // either all faults are fatal and we have no PTEs (we're PROT_NONE), or + // we'll allocate PTEs later when we service the non-fatal faults. Those + // PTEs will be 4k since force_4k_ptes is set. + if (!block_gpu_has_page_tables(block, gpu)) + return NV_OK; + + // Are we 4k already? + if (!gpu_state->pte_is_2m && bitmap_empty(gpu_state->big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK)) + return NV_OK; + + status = block_alloc_ptes_with_retry(block, gpu, UVM_PAGE_SIZE_4K, NULL); + if (status != NV_OK) + return status; + + status = uvm_push_begin_acquire(gpu->channel_manager, + UVM_CHANNEL_TYPE_MEMOPS, + &block->tracker, + &push, + "Forcing 4k PTEs on block [0x%llx, 0x%llx)", + block->start, + block->end + 1); + if (status != NV_OK) + return status; + + if (gpu_state->pte_is_2m) + block_gpu_split_2m(block, block_context, gpu, NULL, &push); + else + block_gpu_split_big(block, block_context, gpu, gpu_state->big_ptes, &push); + + uvm_push_end(&push); + + UVM_ASSERT(block_check_mappings(block)); + + return uvm_tracker_add_push_safe(&block->tracker, &push); +} + +NV_STATUS uvm_va_block_set_cancel(uvm_va_block_t *va_block, uvm_va_block_context_t *block_context, uvm_gpu_t *gpu) +{ + uvm_assert_mutex_locked(&va_block->lock); + + // Volta+ devices support a global VA cancel method that does not require + // 4k PTEs. Thus, skip doing this PTE splitting, particularly because it + // could result in 4k PTEs on P9 systems which otherwise would never need + // them. + if (gpu->parent->fault_cancel_va_supported) + return NV_OK; + + return block_gpu_force_4k_ptes(va_block, block_context, gpu); +} + +NV_STATUS uvm_test_va_block_inject_error(UVM_TEST_VA_BLOCK_INJECT_ERROR_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + struct mm_struct *mm; + uvm_va_block_t *va_block; + uvm_va_block_test_t *va_block_test; + NV_STATUS status = NV_OK; + + mm = uvm_va_space_mm_retain_lock(va_space); + uvm_va_space_down_read(va_space); + + status = uvm_va_block_find_create(va_space, mm, params->lookup_address, NULL, &va_block); + if (status != NV_OK) + goto out; + + va_block_test = uvm_va_block_get_test(va_block); + UVM_ASSERT(va_block_test); + + uvm_mutex_lock(&va_block->lock); + + if (params->page_table_allocation_retry_force_count) + va_block_test->page_table_allocation_retry_force_count = params->page_table_allocation_retry_force_count; + + if (params->user_pages_allocation_retry_force_count) + va_block_test->user_pages_allocation_retry_force_count = params->user_pages_allocation_retry_force_count; + + if (params->cpu_chunk_allocation_size_mask) + va_block_test->cpu_chunk_allocation_size_mask = params->cpu_chunk_allocation_size_mask; + + if (params->eviction_error) + va_block_test->inject_eviction_error = params->eviction_error; + + if (params->cpu_pages_allocation_error) + va_block_test->inject_cpu_pages_allocation_error = params->cpu_pages_allocation_error; + + if (params->populate_error) + va_block_test->inject_populate_error = params->populate_error; + + uvm_mutex_unlock(&va_block->lock); + +out: + uvm_va_space_up_read(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + return status; +} + +static uvm_prot_t g_uvm_test_pte_mapping_to_prot[UVM_TEST_PTE_MAPPING_MAX] = +{ + [UVM_TEST_PTE_MAPPING_INVALID] = UVM_PROT_NONE, + [UVM_TEST_PTE_MAPPING_READ_ONLY] = UVM_PROT_READ_ONLY, + [UVM_TEST_PTE_MAPPING_READ_WRITE] = UVM_PROT_READ_WRITE, + [UVM_TEST_PTE_MAPPING_READ_WRITE_ATOMIC] = UVM_PROT_READ_WRITE_ATOMIC, +}; + +static UVM_TEST_PTE_MAPPING g_uvm_prot_to_test_pte_mapping[UVM_PROT_MAX] = +{ + [UVM_PROT_NONE] = UVM_TEST_PTE_MAPPING_INVALID, + [UVM_PROT_READ_ONLY] = UVM_TEST_PTE_MAPPING_READ_ONLY, + [UVM_PROT_READ_WRITE] = UVM_TEST_PTE_MAPPING_READ_WRITE, + [UVM_PROT_READ_WRITE_ATOMIC] = UVM_TEST_PTE_MAPPING_READ_WRITE_ATOMIC, +}; + +NV_STATUS uvm_test_change_pte_mapping(UVM_TEST_CHANGE_PTE_MAPPING_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_block_t *block; + struct mm_struct *mm; + NV_STATUS status = NV_OK; + uvm_prot_t curr_prot, new_prot; + uvm_gpu_t *gpu = NULL; + uvm_processor_id_t id; + uvm_tracker_t local_tracker; + uvm_va_block_region_t region; + uvm_va_block_context_t *block_context = NULL; + + if (!PAGE_ALIGNED(params->va)) + return NV_ERR_INVALID_ADDRESS; + + if (params->mapping >= UVM_TEST_PTE_MAPPING_MAX) + return NV_ERR_INVALID_ARGUMENT; + + new_prot = g_uvm_test_pte_mapping_to_prot[params->mapping]; + + // mmap_lock isn't needed for invalidating CPU mappings, but it will be + // needed for inserting them. + mm = uvm_va_space_mm_or_current_retain_lock(va_space); + uvm_va_space_down_read(va_space); + + if (uvm_uuid_is_cpu(¶ms->uuid)) { + id = UVM_ID_CPU; + } + else { + gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, ¶ms->uuid); + if (!gpu) { + status = NV_ERR_INVALID_DEVICE; + goto out; + } + + // Check if the GPU can access the VA + if (!uvm_gpu_can_address(gpu, params->va, PAGE_SIZE)) { + status = NV_ERR_OUT_OF_RANGE; + goto out; + } + + id = gpu->id; + } + + block_context = uvm_va_block_context_alloc(mm); + if (!block_context) { + status = NV_ERR_NO_MEMORY; + goto out; + } + + status = uvm_va_block_find_create(va_space, mm, params->va, block_context, &block); + if (status != NV_OK) + goto out; + + uvm_mutex_lock(&block->lock); + + region = uvm_va_block_region_from_start_size(block, params->va, PAGE_SIZE); + curr_prot = block_page_prot(block, id, region.first); + + if (new_prot == curr_prot) { + status = NV_OK; + goto out_block; + } + + // TODO: Bug 1766124: Upgrades might require revoking other processors' + // access privileges. We just fail for now. Only downgrades are + // supported. If we allowed upgrades, we would need to check the mm + // like we do for revocation below. + if (new_prot > curr_prot) { + status = NV_ERR_INVALID_OPERATION; + goto out_block; + } + + block_context->policy = uvm_va_policy_get(block, params->va); + + if (new_prot == UVM_PROT_NONE) { + status = uvm_va_block_unmap(block, block_context, id, region, NULL, &block->tracker); + } + else { + UVM_ASSERT(block_is_page_resident_anywhere(block, region.first)); + + // Revoking CPU mappings performs a combination of unmap + map. The map + // portion requires a valid mm. + if (UVM_ID_IS_CPU(id) && !uvm_va_block_is_hmm(block) && !uvm_va_range_vma_check(block->va_range, mm)) { + status = NV_ERR_INVALID_STATE; + } + else { + status = uvm_va_block_revoke_prot(block, + block_context, + id, + region, + NULL, + new_prot + 1, + &block->tracker); + } + } + +out_block: + if (status == NV_OK) + status = uvm_tracker_init_from(&local_tracker, &block->tracker); + + uvm_mutex_unlock(&block->lock); + + if (status == NV_OK) + status = uvm_tracker_wait_deinit(&local_tracker); + +out: + uvm_va_space_up_read(va_space); + uvm_va_space_mm_or_current_release_unlock(va_space, mm); + + uvm_va_block_context_free(block_context); + + return status; +} + +NV_STATUS uvm_test_va_block_info(UVM_TEST_VA_BLOCK_INFO_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_block_t *va_block; + NV_STATUS status = NV_OK; + + BUILD_BUG_ON(UVM_TEST_VA_BLOCK_SIZE != UVM_VA_BLOCK_SIZE); + + uvm_va_space_down_read(va_space); + + status = uvm_va_block_find(va_space, params->lookup_address, &va_block); + if (status != NV_OK) + goto out; + + params->va_block_start = va_block->start; + params->va_block_end = va_block->end; + +out: + uvm_va_space_up_read(va_space); + return status; +} + +NV_STATUS uvm_test_va_residency_info(UVM_TEST_VA_RESIDENCY_INFO_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_OK; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_range_t *va_range = NULL; + uvm_va_block_t *block = NULL; + NvU32 count = 0; + uvm_processor_mask_t resident_on_mask; + uvm_processor_id_t id; + uvm_page_index_t page_index; + unsigned release_block_count = 0; + NvU64 addr = UVM_ALIGN_DOWN(params->lookup_address, PAGE_SIZE); + + uvm_va_space_down_read(va_space); + + va_range = uvm_va_range_find(va_space, addr); + if (!va_range || va_range->type != UVM_VA_RANGE_TYPE_MANAGED) { + status = NV_ERR_INVALID_ADDRESS; + goto out; + } + + status = uvm_va_block_find(va_space, addr, &block); + if (status != NV_OK) { + UVM_ASSERT(status == NV_ERR_OBJECT_NOT_FOUND); + + params->resident_on_count = 0; + params->populated_on_count = 0; + params->mapped_on_count = 0; + + status = NV_OK; + + goto out; + } + + uvm_mutex_lock(&block->lock); + + page_index = uvm_va_block_cpu_page_index(block, addr); + uvm_va_block_page_resident_processors(block, page_index, &resident_on_mask); + + for_each_id_in_mask(id, &resident_on_mask) { + block_phys_page_t block_page = block_phys_page(id, page_index); + uvm_va_space_processor_uuid(va_space, ¶ms->resident_on[count], id); + params->resident_physical_size[count] = block_phys_page_size(block, block_page); + if (UVM_ID_IS_CPU(id)) { + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index); + + params->resident_physical_address[count] = page_to_phys(uvm_cpu_chunk_get_cpu_page(block, + chunk, + page_index)); + } + else { + params->resident_physical_address[count] = + block_phys_page_address(block, block_page, uvm_va_space_get_gpu(va_space, id)).address; + } + ++count; + } + params->resident_on_count = count; + + count = 0; + for_each_id_in_mask(id, &block->mapped) { + NvU32 page_size = uvm_va_block_page_size_processor(block, id, page_index); + if (page_size == 0) + continue; + + uvm_va_space_processor_uuid(va_space, ¶ms->mapped_on[count], id); + + params->mapping_type[count] = g_uvm_prot_to_test_pte_mapping[block_page_prot(block, id, page_index)]; + UVM_ASSERT(params->mapping_type[count] != UVM_TEST_PTE_MAPPING_INVALID); + + params->page_size[count] = page_size; + ++count; + } + + if (params->resident_on_count == 1) { + if (uvm_processor_mask_test(&resident_on_mask, UVM_ID_CPU)) { + if (uvm_pmm_sysmem_mappings_indirect_supported()) { + for_each_gpu_id(id) { + NvU32 page_size = uvm_va_block_page_size_processor(block, id, page_index); + uvm_reverse_map_t sysmem_page; + uvm_cpu_chunk_t *chunk = uvm_cpu_chunk_get_chunk_for_page(block, page_index); + size_t num_pages; + uvm_gpu_t *gpu; + + if (!uvm_va_block_gpu_state_get(block, id)) + continue; + + gpu = uvm_va_space_get_gpu(va_space, id); + + if (!gpu->parent->access_counters_supported) + continue; + + num_pages = uvm_pmm_sysmem_mappings_dma_to_virt(&gpu->pmm_reverse_sysmem_mappings, + uvm_cpu_chunk_get_gpu_mapping_addr(block, + page_index, + chunk, + id), + uvm_cpu_chunk_get_size(chunk), + &sysmem_page, + 1); + if (page_size > 0) + UVM_ASSERT(num_pages == 1); + else + UVM_ASSERT(num_pages <= 1); + + if (num_pages == 1) { + UVM_ASSERT(sysmem_page.va_block == block); + UVM_ASSERT(uvm_reverse_map_start(&sysmem_page) <= addr); + UVM_ASSERT(uvm_reverse_map_end(&sysmem_page) > addr); + + ++release_block_count; + } + } + } + } + else { + uvm_gpu_id_t id = uvm_processor_mask_find_first_id(&resident_on_mask); + uvm_reverse_map_t gpu_mapping; + size_t num_pages; + uvm_gpu_t *gpu = uvm_va_space_get_gpu(va_space, id); + uvm_gpu_phys_address_t phys_addr; + + phys_addr = uvm_va_block_gpu_phys_page_address(block, page_index, gpu); + num_pages = uvm_pmm_gpu_phys_to_virt(&gpu->pmm, phys_addr.address, PAGE_SIZE, &gpu_mapping); + + // Chunk may be in TEMP_PINNED state so it may not have a VA block + // assigned. In that case, we don't get a valid translation. + if (num_pages > 0) { + UVM_ASSERT(num_pages == 1); + UVM_ASSERT(gpu_mapping.va_block == block); + UVM_ASSERT(uvm_reverse_map_start(&gpu_mapping) == addr); + + ++release_block_count; + } + } + } + + params->mapped_on_count = count; + + count = 0; + for_each_processor_id(id) { + if (!block_processor_page_is_populated(block, id, page_index)) + continue; + + uvm_va_space_processor_uuid(va_space, ¶ms->populated_on[count], id); + ++count; + } + params->populated_on_count = count; + +out: + if (block) { + if (!params->is_async && status == NV_OK) + status = uvm_tracker_wait(&block->tracker); + uvm_mutex_unlock(&block->lock); + while (release_block_count--) + uvm_va_block_release(block); + } + uvm_va_space_up_read(va_space); + return status; +} + +void uvm_va_block_mark_cpu_dirty(uvm_va_block_t *va_block) +{ + block_mark_region_cpu_dirty(va_block, uvm_va_block_region_from_block(va_block)); +} diff --git a/kernel-open/nvidia-uvm/uvm_va_block.h b/kernel-open/nvidia-uvm/uvm_va_block.h new file mode 100644 index 000000000..3ea0e992d --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_block.h @@ -0,0 +1,1981 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_VA_BLOCK_H__ +#define __UVM_VA_BLOCK_H__ + +#include "uvm_forward_decl.h" +#include "uvm_types.h" +#include "uvm_linux.h" +#include "nv-kref.h" +#include "uvm_common.h" +#include "uvm_perf_module.h" +#include "uvm_processors.h" +#include "uvm_lock.h" +#include "uvm_test_ioctl.h" +#include "uvm_tracker.h" +#include "uvm_pmm_gpu.h" +#include "uvm_perf_thrashing.h" +#include "uvm_perf_utils.h" +#include "uvm_va_block_types.h" +#include "uvm_range_tree.h" +#include "uvm_mmu.h" +#include "nv-kthread-q.h" + +#include + +// VA blocks are the leaf nodes in the uvm_va_space tree for managed allocations +// (VA ranges with type == UVM_VA_RANGE_TYPE_MANAGED): +// +// UVM: uvm_va_space -> uvm_va_range -> uvm_va_block +// HMM: uvm_va_space -> uvm_va_block +// +// Each VA block is contained within a single VA range, and contains state on +// VAs covered by that block. Most importantly, the block tracks the current +// state of the virtual-to-physical mappings for all VAs within that block +// across all processors, along with the physical residency location for each +// VA. +// +// The block serializes both CPU and GPU operations on all VAs under that block. +// The CPU work is serialized with the block lock, and the GPU work is +// serialized by the block work tracker which itself is protected by the block +// lock. +// +// The size of each block varies from the size of the smallest VA range +// (PAGE_SIZE) to the max block size specified by UVM_VA_BLOCK_BITS. No block +// will span a 2^UVM_VA_BLOCK_BITS boundary in VA space. The size of the block +// is determined by the alignment of the parent VA range and the block's +// placement within the range. +// +// Note that this means user space will get best allocation efficiency if it +// allocates memory in 2^UVM_VA_BLOCK_BITS naturally-aligned chunks. + +// enums used for indexing into the array of pte_bits bitmaps in the VA block +// which hold the current state of each PTE. For a given {processor, PTE}, the +// bits represented here must be enough to re-create the non-address portion of +// the PTE for that processor. + +// If _READ is not set, the PTE mapping is not valid. +// If _WRITE is set, _READ is also set (_WRITE implies _READ). +typedef enum +{ + UVM_PTE_BITS_CPU_READ, + UVM_PTE_BITS_CPU_WRITE, + UVM_PTE_BITS_CPU_MAX +} uvm_pte_bits_cpu_t; + +// If _READ is not set, the PTE mapping is not valid. +// If _WRITE is set, _READ is also set (_WRITE implies _READ). +// If _ATOMIC is set, _WRITE is also set (_ATOMIC implies _WRITE and _READ). +// +// TODO: Bug 1764925: Track volatile here too if we add GPU L2 caching +typedef enum +{ + UVM_PTE_BITS_GPU_READ, + UVM_PTE_BITS_GPU_WRITE, + UVM_PTE_BITS_GPU_ATOMIC, + UVM_PTE_BITS_GPU_MAX +} uvm_pte_bits_gpu_t; + +typedef struct +{ + // Per-page residency bit vector, used for fast traversal + // of resident pages. + // + // This follows the same semantics as the CPU residency bit vector and + // notably each bit still represents a PAGE_SIZE amount of data, but the + // physical GPU memory is tracked by an array of GPU chunks below. + uvm_page_mask_t resident; + + // Pages that have been evicted to sysmem + uvm_page_mask_t evicted; + + NvU64 *cpu_chunks_dma_addrs; + + // Array of naturally-aligned chunks. Each chunk has the largest possible + // size which can fit within the block, so they are not uniform size. + // + // The number of chunks in the array is calculated using + // block_num_gpu_chunks. The size of each chunk is calculated using + // block_gpu_chunk_index. + uvm_gpu_chunk_t **chunks; + + // These page table ranges are not necessarily all used at the same time. + // The block might also be too small or not aligned properly to use the + // larger ranges, in which case they're never allocated. + // + // Once a range is allocated we keep it around to avoid constant allocation + // overhead when doing PTE splitting and merging. + // + // Check range.table to see if a given range has been allocated yet. + // + // page_table_range_big's range covers the big PTEs which fit within the + // interior of this block. See the big_ptes field. + uvm_page_table_range_t page_table_range_2m; + uvm_page_table_range_t page_table_range_big; + uvm_page_table_range_t page_table_range_4k; + + // These flags are ignored unless the {block, gpu} pair supports a 2M page + // size. In that case it's the responsibility of the block code to make the + // lower page tables active by calling uvm_page_tree_write_pde. + // + // They can be allocated and activated separately, so we have to track them + // separately. + // + // Activated only means that uvm_page_tree_write_pde has been called at some + // point in the past with the appropriate range allocated. It does not imply + // that the 2M entry is a PDE (see pte_is_2m). + bool activated_big; + bool activated_4k; + + // For {block, gpu} pairs which support the 2M page size, the page table + // ranges are uninitialized on allocation. This flag tracks whether the big + // PTEs have been initialized. + // + // We don't need an equivalent flag for the 4k range because we always write + // just the 4k PTEs not covered by higher-level PTEs. Big PTEs however can + // be allocated and activated late while the 4k PTEs are already active, in + // which case we need to initialize the entire big range. + bool initialized_big; + + // Sticky state to split PTEs to 4k and keep them there. Used when a fatal + // fault has been detected on this GPU to avoid false dependencies within + // the uTLB for fatal and non-fatal faults on the same larger PTE, which + // could lead to wrong fault attribution. + bool force_4k_ptes; + + // This table shows the HW PTE states given all permutations of pte_is_2m, + // big_ptes, and pte_bits. Note that the first row assumes that the 4k page + // tables have been allocated (if not, then no PDEs are allocated either). + // + // |-------------- SW state --------------|------------------- HW state --------------------| + // pte_is_2m pte_is_big pte_bits[READ] | Page size PDE0(2M only) Big PTE 4k PTE + // ---------------------------------------------------------------------------------------- + // 0 0 0 | 4k Valid PDE Invalid [1] Invalid + // 0 0 1 | 4k Valid PDE Invalid [1] Valid + // 0 1 0 | Big Valid PDE Unmapped [2] x + // 0 1 1 | Big Valid PDE Valid x + // 1 must be 0 0 | 2M Invalid x x + // 1 must be 0 1 | 2M Valid PTE x x + // + // [1]: The big PTE may be unallocated, in which case its pointer won't be + // valid in the parent PDE. If the big PTE is allocated, it will be + // invalid so the 4k PTEs are active. + // + // [2]: The unmapped big PTE pattern differs from the invalid pattern, and + // it prevents HW from reading the 4k entries. See the unmapped_pte() + // MMU HAL function. + + // If pte_is_2m is true, there is a 2M PTE covering this VA block (valid or + // invalid). If false then we're in one of the following scenarios: + // 1) This {block, gpu} does not support 2M pages. + // 2) 2M pages are supported but the page_table_range_2m has not been + // allocated (implying that the other page table ranges have not been + // allocated either). + // 3) page_table_range_2m has been allocated, but the big_ptes bitmap should + // be used to determine the mix of big and 4k PTEs. + bool pte_is_2m; + + // When pte_is_2m is false, this block consists of any possible mix of big + // and 4k PTEs. This bitmap describes that mix. A set bit indicates that the + // corresponding big-page-sized region of the block is covered by a big PTE. + // A cleared bit indicates that it is covered by 4k PTEs. + // + // Neither setting implies that the PTE currently has a valid mapping, it + // just indicates which PTE is read by the GPU (see the table above). + // + // The indices represent the corresponding big PTEs in the block's interior. + // For example, a block with alignment and size of one 4k page on either + // side of a big page will only use bit 0. Use uvm_va_block_big_page_index to look + // the big_ptes index of a page. + // + // The block might not be able to fit any big PTEs, in which case this + // bitmap is always zero. Use uvm_va_block_gpu_num_big_pages to find the number of + // valid bits in this mask. + DECLARE_BITMAP(big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + // See the comments for uvm_va_block_mmap_t::cpu.pte_bits. + // + // The major difference is that these bits are always accurate since, unlike + // the CPU PTEs, the UVM driver is in full control of these mappings. + // + // Note that the granularity is always PAGE_SIZE, not whatever GPU PTE size + // happens to currently map these regions. PAGE_SIZE is the minimum + // granularity of operations on the VA blocks. As a future optimization we + // could consider sub-PAGE_SIZE operations if PAGE_SIZE > 4K and the CPU + // isn't involved, for example false sharing among peer GPUs. + uvm_page_mask_t pte_bits[UVM_PTE_BITS_GPU_MAX]; + +} uvm_va_block_gpu_state_t; + +// TODO: Bug 1766180: Worst-case we could have one of these per system page. +// Options: +// 1) Rely on the OOM killer to prevent the user from trying to do that +// 2) Be much more space-conscious in this struct (difficult) +// 3) Cap the per-process range and/or block count, like vm.max_map_count +// does for vmas +struct uvm_va_block_struct +{ + // Reference count for this block. References are held by: + // - The parent VA range for managed blocks or VA space for HMM blocks + // - The reverse map + // - The eviction path temporarily when attempting to evict a GPU page under + // this block + // + // This isn't protected by the lock on the eviction path, so it must be + // atomic. nv_kref provides that. + nv_kref_t kref; + + // Lock protecting the block. See the comment at the top of uvm.c. + uvm_mutex_t lock; + + // Parent VA range. UVM managed blocks have this set. HMM blocks will have + // va_range set to NULL and hmm.va_space set instead. Dead blocks that are + // waiting for the last ref count to be removed have va_range and + // hmm.va_space set to NULL (could be either type of block). + // + // This field can be read while holding either the block lock or just the VA + // space lock in read mode, since it can only change when the VA space lock + // is held in write mode. + uvm_va_range_t *va_range; + + // Virtual address [start, end] covered by this block. These fields can be + // read while holding either the block lock or just the VA space lock in + // read mode, since they can only change when the VA space lock is held in + // write mode. + NvU64 start; + NvU64 end; + + // Per-processor residency bit vector, used for fast lookup of which + // processors are active in this block. + // + // A set bit means the corresponding processor has a coherent physical copy + // of memory somewhere in the block. The per-processor state must then be + // inspected to find out which pages. The processor may or may not have a + // mapping to that physical memory, however. + // + // A cleared bit means the corresponding processor does not have a coherent + // physical copy of any pages under this block. The processor may still have + // cached pages allocated for future use, however. It also may have mappings + // to pages resident on other processors. + uvm_processor_mask_t resident; + + // Per-processor mapping bit vector, used for fast lookup of which + // processors are active in this block. + // + // A set bit means the corresponding processor has an active, valid page + // table mapping to some VA in this block. The per-processor pte_bits state + // must then be inspected to find out the mapping address and permissions. + // + // A cleared bit means the corresponding processor has no virtual mappings + // within this block (all pte_bits entries are 0). + uvm_processor_mask_t mapped; + + // Per-processor evicted bit vector, used for fast lookup of which GPUs + // have evicted pages in this block. + // + // A set bit means the corresponding processor was the residency of some of + // the pages in the block when they were evicted due to memory capacity + // limitations. The per-processor state must then be inspected to find out + // which pages. + // + // A cleared bit means the corresponding processor has no evicted pages + // within this block (all evicted entries are 0). + uvm_processor_mask_t evicted_gpus; + + struct + { + // Per-page residency bit vector, used for fast traversal of resident + // pages. + // + // A set bit means the CPU has a coherent copy of the physical page + // resident in its memory, and that the corresponding entry in the pages + // array is present. This does not mean that the coherent copy is + // currently mapped anywhere, however. A page may be resident on + // multiple processors when in read-duplicate mode. + // + // A cleared bit means the CPU does not have a coherent copy of that + // page resident. The corresponding entry in the pages array may or may + // not present. If the entry is present, it's a cached page which can be + // reused in the future. + // + // Allocating PAGES_PER_UVM_VA_BLOCK is overkill when the block is + // smaller than UVM_VA_BLOCK_SIZE, but it's not much extra memory + // overhead on the whole. + uvm_page_mask_t resident; + + // CPU memory chunks represent physically contiguous CPU memory + // allocations. See uvm_pmm_sysmem.h for more details on CPU chunks. + // This member is meant to hold an opaque value indicating the CPU + // chunk storage method. For more details on CPU chunk storage, + // see uvm_cpu_chunk_storage_type_t in uvm_pmm_sysmem.c. + unsigned long chunks; + + // Per-page allocation bit vector. + // + // A set bit means that a CPU page has been allocated for the + // corresponding page index. + uvm_page_mask_t allocated; + + // Per-page mapping bit vectors, one per bit we need to track. These are + // used for fast traversal of valid mappings in the block. These contain + // all non-address bits needed to establish a virtual mapping on this + // processor (permissions, cacheability, etc). + // + // A cleared bit in UVM_PTE_BITS_CPU_READ means the CPU has no valid + // virtual mapping to that address (the access will fault). Further, + // UVM_PTE_BITS_CPU_WRITE is guaranteed to also be clear. + // + // A set bit in UVM_PTE_BITS_CPU_READ means the CPU has a valid mapping + // at that address with at least read permissions. The physical page for + // that mapping is contained in the pages array. If + // UVM_PTE_BITS_CPU_WRITE is not set, the mapping is read-only. + // Otherwise, the mapping is read-write. + // + // Note that this is the maximum permissions a PTE could have, but not + // necessarily the actual current permissions of the CPU PTEs. The UVM + // driver will never change the PTEs without updating this state, but + // the kernel can downgrade our CPU mappings at any time without + // notifying the UVM driver (for example in response to user space + // calling madvise with MADV_DONTNEED). + uvm_page_mask_t pte_bits[UVM_PTE_BITS_CPU_MAX]; + + // Whether the CPU has ever mapped a page on this VA block. This is + // used to force GMMU PDE1 pre-population on ATS systems. See + // pre_populate_gpu_pde1 in uvm_va_block.c for more information. + NvU8 ever_mapped : 1; + + // We can get "unexpected" faults if multiple CPU threads fault on the + // same address simultaneously and race to create the mapping. Since + // our CPU fault handler always unmaps to handle the case where the + // kernel downgrades our CPU mappings, we can introduce an infinite + // stream of CPU faults in multi-threaded workloads. + // + // In order to handle this scenario, we keep track of the first thread + // that faulted on a page with valid permissions and the timestamp. + // Then, we keep track of the subsequent faults on that page during a + // window of time. If the first thread faults again on the page, that + // will indicate that the mapping has been downgraded by the kernel and + // we need to remap it. Faults from the rest of threads are just + // ignored. The information is also cleared on the following events: + // - The tracking window finishes + // - The page is unmapped + struct + { + // Timestamp when the first fault was detected. This also is used + // as a flag that the contents of this struct are valid + NvU64 first_fault_stamp; + + // First thread that faulted while having valid permissions. we + // don't take a reference on the pid so we shouldn't ever use it + // for task-lookup in the kernel. We only use it as a heuristic so + // it's OK if the pid gets destroyed or reused. + pid_t first_pid; + + // Index of the page whose faults are being tracked + uvm_page_index_t page_index; + } fault_authorized; + } cpu; + + // Per-GPU residency and mapping state + // + // TODO: Bug 1766180: Even though these are pointers, making this a static + // array will use up a non-trivial amount of storage for small blocks. + // In most cases we won't have anywhere near this many GPUs active + // anyway. Consider using a dense array of just the GPUs registered in + // this VA space, depending on the perf of accessing that array and on + // how noticeable this memory overhead actually is. + uvm_va_block_gpu_state_t *gpus[UVM_ID_MAX_GPUS]; + + // Mask to keep track of the pages that are read-duplicate + uvm_page_mask_t read_duplicated_pages; + + // Mask to keep track of the pages that are not mapped on any non-UVM-Lite + // processor. + // 0: Page is definitely not mapped by any processors + // 1: Page may or may not be mapped by a processor + // + // This mask sets the bit when the page is mapped on any non-UVM-Lite + // processor but it is not always unset on unmap (to avoid a performance + // impact). Therefore, it can contain false negatives. It should be only + // used for opportunistic optimizations that have a fast path for pages + // that are not mapped anywhere (see uvm_va_block_migrate_locked, for + // example), but not the other way around. + uvm_page_mask_t maybe_mapped_pages; + + // Tracks all outstanding GPU work related to this block: GPU copies, PTE + // updates, TLB invalidates, etc. The residency and mapping state is only + // valid once this tracker is done. + // + // CPU operations need to wait for this tracker to be done. GPU operations + // need to acquire it before pushing their work, then that work must be + // added to this tracker before the block's lock is dropped. + uvm_tracker_t tracker; + + // A queue item for establishing eviction mappings in a deferred way + nv_kthread_q_item_t eviction_mappings_q_item; + + uvm_perf_module_data_desc_t perf_modules_data[UVM_PERF_MODULE_TYPE_COUNT]; + +#if UVM_IS_CONFIG_HMM() + struct + { + + // The MMU notifier is registered per va_block. + struct mmu_interval_notifier notifier; + + + // Parent VA space pointer. It is NULL for UVM managed blocks or if + // the HMM block is dead. This field can be read while holding the + // block lock and is only modified while holding the va_space write + // lock and va_block lock (same as the va_range pointer). + uvm_va_space_t *va_space; + + // Tree of uvm_va_policy_node_t. The policy node ranges always cover + // all or part of a VMA range or a contiguous range of VMAs within the + // va_block. Policy nodes are resized or deleted when the underlying + // VMA range is changed by Linux via the invalidate() callback. + // Otherwise, policies could be stale after munmap(). + // Locking: The va_block lock is needed to access or modify the tree. + uvm_range_tree_t va_policy_tree; + + // Storage node for range tree of va_blocks. + uvm_range_tree_node_t node; + } hmm; +#endif +}; + +// We define additional per-VA Block fields for testing. When +// uvm_enable_builtin_tests is defined, all VA Blocks will have +// uvm_va_block_wrapper_t size. Otherwise, the test fields are not available. +// Use the uvm_va_block_get_test function defined below to obtain a safe +// pointer to uvm_va_block_test_t from a uvm_va_block_t pointer. +struct uvm_va_block_wrapper_struct +{ + uvm_va_block_t block; + + struct uvm_va_block_test_struct + { + // Count of how many page table allocations should be forced to retry + // with eviction enabled. Used for testing only. + NvU32 page_table_allocation_retry_force_count; + + // Count of how many user pages allocations should be forced to retry + // with eviction enabled. Used for testing only. + NvU32 user_pages_allocation_retry_force_count; + + // Mask of chunk sizes to be used for CPU chunk allocations. + // The actual set of chunk sizes to be used will be the set resulting + // from AND'ing this value with the value of + // uvm_cpu_chunk_allocation_sizes module parameter. + NvU32 cpu_chunk_allocation_size_mask; + + // Force the next eviction attempt on this block to fail. Used for + // testing only. + bool inject_eviction_error; + + // Subsequent operations that need to allocate CPU pages will fail. As + // opposed to other error injection settings, this one is persistent. + // This is because this error is supposed to be fatal and tests verify + // the state of the VA blocks after the failure. However, some tests + // use kernels to trigger migrations and a fault replay could trigger + // a successful migration if this error flag is cleared. + bool inject_cpu_pages_allocation_error; + + // Force the next successful chunk allocation to then fail. Used for testing + // only to simulate driver metadata allocation failure. + bool inject_populate_error; + } test; +}; + +// Tracking needed for supporting allocation-retry of user GPU memory +typedef struct +{ + // A tracker used for all allocations from PMM. + uvm_tracker_t tracker; + + // List of allocated chunks (uvm_gpu_chunk_t). Currently all chunks are of + // the same size. However it can contain chunks from multiple GPUs. All + // remaining free chunks are freed when the operation is finished with + // uvm_va_block_retry_deinit(). + struct list_head free_chunks; + + // List of chunks allocated and used during the block operation. This list + // can contain chunks from multiple GPUs. All the used chunks are unpinned + // when the operation is finished with uvm_va_block_retry_deinit(). + struct list_head used_chunks; +} uvm_va_block_retry_t; + +// Module load/exit +NV_STATUS uvm_va_block_init(void); +void uvm_va_block_exit(void); + +// Allocates and initializes the block. The block's ref count is initialized to +// 1. The caller is responsible for inserting the block into its parent +// va_range. +// +// The caller must be holding the VA space lock in at least read mode. +// +// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED. +NV_STATUS uvm_va_block_create(uvm_va_range_t *va_range, + NvU64 start, + NvU64 end, + uvm_va_block_t **out_block); + +// Internal function called only when uvm_va_block_release drops the ref count +// to 0. Do not call directly. +void uvm_va_block_destroy(nv_kref_t *kref); + +static inline void uvm_va_block_retain(uvm_va_block_t *va_block) +{ + nv_kref_get(&va_block->kref); +} + +static inline void uvm_va_block_release(uvm_va_block_t *va_block) +{ + if (va_block) { + // The calling thread shouldn't be holding the block's mutex when + // releasing the block as it might get destroyed. + uvm_assert_unlocked_order(UVM_LOCK_ORDER_VA_BLOCK); + nv_kref_put(&va_block->kref, uvm_va_block_destroy); + } +} + +// Same as uvm_va_block_release but the caller may be holding the VA block lock. +// The caller must ensure that the refcount will not get to zero in this call. +static inline void uvm_va_block_release_no_destroy(uvm_va_block_t *va_block) +{ + int destroyed = nv_kref_put(&va_block->kref, uvm_va_block_destroy); + UVM_ASSERT(!destroyed); +} + +// Returns true if the block is managed by HMM. +// Locking: This can be called while holding either the block lock or just the +// VA space lock in read mode, since it can only change when the VA space lock +// is held in write mode. +static inline bool uvm_va_block_is_hmm(uvm_va_block_t *va_block) +{ +#if UVM_IS_CONFIG_HMM() + return va_block->hmm.va_space; +#else + return false; +#endif +} + +// Return true if the block is dead. +// Locking: This can be called while holding either the block lock or just the +// VA space lock in read mode, since it can only change when the VA space lock +// is held in write mode. +static inline bool uvm_va_block_is_dead(uvm_va_block_t *va_block) +{ + if (va_block->va_range) + return false; + +#if UVM_IS_CONFIG_HMM() + if (va_block->hmm.va_space) + return false; +#endif + + return true; +} + +static inline uvm_va_block_gpu_state_t *uvm_va_block_gpu_state_get(uvm_va_block_t *va_block, uvm_gpu_id_t gpu_id) +{ + return va_block->gpus[uvm_id_gpu_index(gpu_id)]; +} + +// Return the va_space pointer of the given block or NULL if the block is dead. +// Locking: This can be called while holding either the block lock or just the +// VA space lock in read mode, since it can only change when the VA space lock +// is held in write mode. +uvm_va_space_t *uvm_va_block_get_va_space_maybe_dead(uvm_va_block_t *va_block); + +// Return the va_space pointer of the given block assuming the block is not dead +// (asserts that it is not dead and asserts va_space is not NULL). +// Locking: This can be called while holding either the block lock or just the +// VA space lock in read mode, since it can only change when the VA space lock +// is held in write mode. +uvm_va_space_t *uvm_va_block_get_va_space(uvm_va_block_t *va_block); + +// Dynamic cache-based allocation for uvm_va_block_context_t. +// +// See uvm_va_block_context_init() for a description of the mm parameter. +uvm_va_block_context_t *uvm_va_block_context_alloc(struct mm_struct *mm); +void uvm_va_block_context_free(uvm_va_block_context_t *va_block_context); + +// Initialization of an already-allocated uvm_va_block_context_t. +// +// mm is used to initialize the value of va_block_context->mm. NULL is allowed. +static void uvm_va_block_context_init(uvm_va_block_context_t *va_block_context, struct mm_struct *mm) +{ + UVM_ASSERT(va_block_context); + + // Write garbage into the VA Block context to ensure that the UVM code + // clears masks appropriately + if (UVM_IS_DEBUG()) + memset(va_block_context, 0xff, sizeof(*va_block_context)); + + va_block_context->mm = mm; +} + +// TODO: Bug 1766480: Using only page masks instead of a combination of regions +// and page masks could simplify the below APIs and their implementations +// at the cost of having to scan the whole mask for small regions. +// Investigate the performance effects of doing that. + +// Moves the physical pages of the given region onto the destination processor. +// If page_mask is non-NULL, the movement is further restricted to only those +// pages in the region which are present in the mask. +// +// prefetch_page_mask may be passed as a subset of page_mask when cause is +// UVM_MAKE_RESIDENT_CAUSE_FAULT to indicate pages that have been pulled due +// to automatic page prefetching heuristics. For pages in this mask, +// UVM_MAKE_RESIDENT_CAUSE_PREFETCH will be reported in migration events, +// instead. +// +// This function breaks read duplication for all given pages even if they +// don't migrate. Pages which are not resident on the destination processor +// will also be unmapped from all existing processors, be populated in the +// destination processor's memory, and copied to the new physical location. +// Any new memory will be zeroed if it is the first allocation for that page +// in the system. +// +// This function does not create any new virtual mappings. +// +// This function acquires/waits for the va_block tracker and updates that +// tracker with any new work pushed. +// +// Allocation-retry: this operation may need to perform eviction to be able to +// allocate GPU memory successfully and if that happens, +// NV_WARN_MORE_PROCESSING_REQUIRED will be returned. That also means that the +// block's lock has been unlocked and relocked as part of the call and that the +// whole sequence of operations performed under the block's lock needs to be +// attempted again. To facilitate that, the caller needs to provide the same +// va_block_retry struct for each attempt that has been initialized before the first +// attempt and needs to be deinitialized after the last one. Most callers can +// just use UVM_VA_BLOCK_LOCK_RETRY() that takes care of that for the caller. +// +// If dest_id is the CPU then va_block_retry can be NULL and allocation-retry of +// user memory is guaranteed not to happen. Allocation-retry of page tables can +// still occur though. +// +// va_block_context must be non-NULL. This function will set a bit in +// va_block_context->make_resident.pages_changed_residency for each page that +// changed residency (due to a migration or first population) as a result of the +// operation. This function only sets bits in that mask. It is the caller's +// responsiblity to zero the mask or not first. +// +// Notably any status other than NV_OK indicates that the block's lock might +// have been unlocked and relocked. +// +// LOCKING: The caller must hold the va_block lock. +NV_STATUS uvm_va_block_make_resident(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t dest_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + const uvm_page_mask_t *prefetch_page_mask, + uvm_make_resident_cause_t cause); + +// Similar to uvm_va_block_make_resident (read documentation there). The main +// differences are: +// - Pages are copied not moved (i.e. other copies of the page are not +// unmapped) +// - Processors with a resident copy of pages that migrated have write and +// atomic access permission revoked, unlike in uvm_va_block_make_resident +// where they are unmapped +// - All remote mappings (due to either SetAccessedBy or performance heuristics) +// are broken +// - LOCKING: If va_block_context->mm != NULL, va_block_context->mm->mmap_lock +// must be held in at least read mode. +NV_STATUS uvm_va_block_make_resident_read_duplicate(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t dest_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + const uvm_page_mask_t *prefetch_page_mask, + uvm_make_resident_cause_t cause); + +// Creates or upgrades a mapping from the input processor to the given virtual +// address region. Pages which already have new_prot permissions or higher are +// skipped, so this call ensures that the range is mapped with at least new_prot +// permissions. new_prot must not be UVM_PROT_NONE. uvm_va_block_unmap or +// uvm_va_block_revoke_prot should be used to downgrade permissions instead. +// +// The mapped pages are described by the region parameter and the map page mask +// that allows the caller to restrict the map operation to specific pages within +// the region. If the page mask is NULL then the whole region is mapped. +// +// If the input processor is a GPU with no GPU VA space registered, or if the +// input processor is the CPU and this thread is not allowed to create CPU +// mappings, this function does nothing. CPU mappings are only allowed if +// uvm_va_range_vma_check(va_block_context->mm) is valid, so the caller must +// set va_block_context->mm before calling this function. +// +// cause specifies the cause to be reported in events in case a remote mapping +// is created. +// +// Any CPU mappings will wait for the va_block tracker. If this function pushes +// GPU work it will first acquire the va_block tracker, then add the pushed work +// to out_tracker. It is the caller's responsibility to add this work to +// va_block's tracker. Note that while it is generally safe to run map +// operations on different GPUs concurrently, two PTE operations (map, unmap, +// revoke) on the same GPU must be serialized even if they target different +// pages because the earlier operation can cause a PTE split or merge which is +// assumed by the later operation. +// +// va_block_context must not be NULL. +// +// If allocation-retry was required as part of the operation and was successful, +// NV_ERR_MORE_PROCESSING_REQUIRED is returned. In this case, the entries in the +// out_tracker were added to the block's tracker and then the block's lock was +// unlocked and relocked. +// +// In general, any status other than NV_OK indicates that the block's lock might +// have been unlocked and relocked. +// +// LOCKING: The caller must hold the va block lock. If va_block_context->mm != +// NULL, va_block_context->mm->mmap_lock must be held in at least read +// mode. +NV_STATUS uvm_va_block_map(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t id, + uvm_va_block_region_t region, + const uvm_page_mask_t *map_page_mask, + uvm_prot_t new_prot, + UvmEventMapRemoteCause cause, + uvm_tracker_t *out_tracker); + +// Like uvm_va_block_map, except it maps all processors in the input mask. The +// VA block tracker contains all map operations on return. +// +// Note that this can return NV_ERR_MORE_PROCESSING_REQUIRED just like +// uvm_va_block_map() indicating that the operation needs to be retried. +NV_STATUS uvm_va_block_map_mask(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + const uvm_processor_mask_t *map_processor_mask, + uvm_va_block_region_t region, + const uvm_page_mask_t *map_page_mask, + uvm_prot_t new_prot, + UvmEventMapRemoteCause cause); + +// Unmaps virtual regions from a single processor. This does not free page +// tables or physical memory. This is safe to call on the eviction path, but the +// caller must ensure that the block hasn't been killed. +// +// The unmapped pages are described by the region parameter and the unmap page +// mask that allows the caller to restrict the unmap operation to specific pages +// within the region. If the page mask is NULL then the whole region is +// unmapped. +// +// If id is UVM_ID_CPU, this is guaranteed to return NV_OK, and this is safe to +// call without holding a reference on the mm which owns the associated vma. +// +// Any CPU unmappings will wait for the va_block tracker. If this function +// pushes GPU work it will first acquire the va_block tracker, then add the +// pushed work to out_tracker. It is the caller's responsibility to add this +// work to va_block's tracker. Note that while it is generally safe to run unmap +// operations on different GPUs concurrently, two PTE operations (map, unmap, +// revoke) on the same GPU must be serialized even if they target different +// pages because the earlier operation can cause a PTE split or merge which is +// assumed by the later operation. +// +// va_block_context must not be NULL. +// +// If allocation-retry was required as part of the operation and was successful, +// NV_ERR_MORE_PROCESSING_REQUIRED is returned. In this case, the entries in the +// out_tracker were added to the block's tracker and then the block's lock was +// unlocked and relocked. It is guaranteed that retry will not be required if +// the unmap does not cause a PTE split. Examples of operations which will not +// cause a PTE split include unmapping the entire block, unmapping all PTEs with +// matching attributes, and unmapping all PTEs which point to the same physical +// chunk. +// +// LOCKING: The caller must hold the va_block lock. +NV_STATUS uvm_va_block_unmap(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t id, + uvm_va_block_region_t region, + const uvm_page_mask_t *unmap_page_mask, + uvm_tracker_t *out_tracker); + +// Like uvm_va_block_unmap, except it unmaps all processors in the input mask. +// The VA block tracker contains all map operations on return. +NV_STATUS uvm_va_block_unmap_mask(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + const uvm_processor_mask_t *unmap_processor_mask, + uvm_va_block_region_t region, + const uvm_page_mask_t *unmap_page_mask); + +// Function called when the preferred location changes. Notably: +// - Mark all CPU pages as dirty because the new processor may not have +// up-to-date data. +// - Unmap the preferred location's processor from any pages in this region +// which are not resident on the preferred location. +// LOCKING: The caller must hold the VA block lock. +NV_STATUS uvm_va_block_set_preferred_location_locked(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context); + +// Maps the given processor to all resident pages in this block, as allowed by +// location and policy. Waits for the operation to complete before returning. +// +// LOCKING: This takes and releases the VA block lock. If va_block_context->mm +// != NULL, va_block_context->mm->mmap_lock must be held in at least +// read mode. +NV_STATUS uvm_va_block_set_accessed_by(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t processor_id); + +// Breaks SetAccessedBy and remote mappings +// +// va_block_context must NOT be NULL +// +// LOCKING: This takes and releases the VA block lock. If va_block_context->mm +// != NULL, va_block_context->mm->mmap_lock must be held in at least +// read mode. +NV_STATUS uvm_va_block_set_read_duplication(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context); + +// Restores SetAccessedBy mappings +// +// va_block_context must NOT be NULL +// +// LOCKING: This takes and releases the VA block lock. If va_block_context->mm +// != NULL, va_block_context->mm->mmap_lock must be held in at least +// read mode. +NV_STATUS uvm_va_block_unset_read_duplication(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context); + +// API for access privilege revocation +// +// Revoke prot_to_revoke access permissions for the given processor. +// +// The revoked pages are described by the region parameter and the revoke page +// mask that allows the caller to restrict the revoke operation to specific +// pages within the region. +// +// prot_to_revoke must be greater than UVM_PROT_READ_ONLY. Caller should call +// unmap explicitly if it wants to revoke all access privileges. +// +// If id is UVM_ID_CPU, and prot_to_revoke is UVM_PROT_READ_WRITE_ATOMIC, no +// action is performed. If the processor id corresponds to the CPU and the +// caller cannot establish CPU mappings because it does not have a reference on +// vma->vm_mm (va_block_context->mm != vma->vm_mm), the page will be simply +// unmapped. Caller should call unmap explicitly if it wants to revoke all +// access privileges. +// +// Any CPU revocation will wait for the va_block tracker. If this function +// pushes GPU work it will first acquire the va_block tracker, then add the +// pushed work to out_tracker. It is the caller's responsibility to add this +// work to va_block's tracker. Note that while it is generally safe to run +// revocation operations on different GPUs concurrently, two PTE operations +// (map, unmap, revoke) on the same GPU must be serialized even if they target +// different pages because the earlier operation can cause a PTE split or merge +// which is assumed by the later operation. +// +// va_block_context must not be NULL. +// +// If allocation-retry was required as part of the operation and was successful, +// NV_ERR_MORE_PROCESSING_REQUIRED is returned. In this case, the entries in the +// out_tracker were added to the block's tracker and then the block's lock was +// unlocked and relocked. +// +// In general, any status other than NV_OK indicates that the block's lock might +// have been unlocked and relocked. +// +// LOCKING: The caller must hold the va block lock. If va_block_context->mm != +// NULL, va_block_context->mm->mmap_lock must be held in at least read +// mode. +NV_STATUS uvm_va_block_revoke_prot(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t id, + uvm_va_block_region_t region, + const uvm_page_mask_t *revoke_page_mask, + uvm_prot_t prot_to_revoke, + uvm_tracker_t *out_tracker); + +// Like uvm_va_block_revoke_prot(), except it revokes all processors in the +// input mask. The VA block tracker contains all revocation operations on +// return. +// +// Note that this can return NV_ERR_MORE_PROCESSING_REQUIRED just like +// uvm_va_block_revoke_prot() indicating that the operation needs to be retried. +NV_STATUS uvm_va_block_revoke_prot_mask(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + const uvm_processor_mask_t *revoke_processor_mask, + uvm_va_block_region_t region, + const uvm_page_mask_t *revoke_page_mask, + uvm_prot_t prot_to_revoke); + +// Tries to map all pages in the given region and map_page_mask with at most +// max_prot privileges for appropriate processors as determined by the +// accessed_by mask, heuristics and the given processor mask (excluding +// processor_id, which triggered the migration and should have already been +// mapped). +// +// va_block_context must not be NULL. +// +// This function acquires/waits for the va_block tracker and updates that +// tracker with any new work pushed. +// +// Note that this can return NV_ERR_MORE_PROCESSING_REQUIRED just like +// uvm_va_block_map() indicating that the operation needs to be retried. +// +// LOCKING: The caller must hold the va block lock. If va_block_context->mm != +// NULL, va_block_context->mm->mmap_lock must be held in at least read +// mode. +NV_STATUS uvm_va_block_add_mappings_after_migration(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t new_residency, + uvm_processor_id_t processor_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *map_page_mask, + uvm_prot_t max_prot, + const uvm_processor_mask_t *processor_mask); + +// Maps processors using SetAccessedBy to all resident pages in the region +// parameter. On Volta+ it is also used to map evicted pages that can be later +// pulled back by using access counters. +// +// This function acquires/waits for the va_block tracker and updates that +// tracker with any new work pushed. +// +// Note that this can return NV_ERR_MORE_PROCESSING_REQUIRED just like +// uvm_va_block_map() indicating that the operation needs to be retried. +// +// va_block_context must not be NULL. +// +// LOCKING: The caller must hold the va block lock. If va_block_context->mm != +// NULL, va_block_context->mm->mmap_lock must be held in at least read +// mode. +NV_STATUS uvm_va_block_add_mappings(uvm_va_block_t *va_block, + uvm_va_block_context_t *va_block_context, + uvm_processor_id_t processor_id, + uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + UvmEventMapRemoteCause cause); + +// Notifies the VA block that a new GPU VA space has been created. +// LOCKING: The caller must hold the va_block lock +NV_STATUS uvm_va_block_add_gpu_va_space(uvm_va_block_t *va_block, uvm_gpu_va_space_t *gpu_va_space); + +// Destroys the VA block's mappings and page tables on the GPU, if it has any. +// +// If mm != NULL, that mm is used for any CPU mappings which may be created as +// a result of this call. See uvm_va_block_context_t::mm for details. +// +// LOCKING: The caller must hold the va_block lock. If mm != NULL, the caller +// must hold mm->mmap_lock in at least read mode. +void uvm_va_block_remove_gpu_va_space(uvm_va_block_t *va_block, uvm_gpu_va_space_t *gpu_va_space, struct mm_struct *mm); + +// Creates any mappings necessary in this VA block between the two GPUs, in +// either direction. +// LOCKING: The caller must hold the va_block lock +NV_STATUS uvm_va_block_enable_peer(uvm_va_block_t *va_block, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1); + +// Unmaps all page tables in this VA block which have peer mappings between +// the two GPUs, in either direction. +// LOCKING: The caller must hold the va_block lock +void uvm_va_block_disable_peer(uvm_va_block_t *va_block, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1); + +// Unmap any mappings from GPU to the preferred location. +// +// The GPU has to be in UVM-Lite mode. +// +// LOCKING: The caller must hold the va_block lock +void uvm_va_block_unmap_preferred_location_uvm_lite(uvm_va_block_t *va_block, uvm_gpu_t *gpu); + +// Frees all memory under this block associated with this GPU. Any portion of +// the block which is resident on the GPU is evicted to sysmem before being +// freed. +// +// If mm != NULL, that mm is used for any CPU mappings which may be created as +// a result of this call. See uvm_va_block_context_t::mm for details. +// +// LOCKING: This takes and releases the VA block lock. If mm != NULL, the caller +// must hold mm->mmap_lock in at least read mode. +void uvm_va_block_unregister_gpu(uvm_va_block_t *va_block, uvm_gpu_t *gpu, struct mm_struct *mm); + +// Unmaps all memory associated with the block and drops the ref count of the +// block. This allows the caller to free resources associated with this block +// regardless of the block's current ref count. Most importantly it allows the +// VA covered by this block to be immediately available for other page table +// mappings upon return. +// +// This clears block->va_range, so only the VA range destroy path should call +// it. Other paths with references on this block, specifically the eviction path +// which temporarily takes a reference to the block, must always check the block +// state after taking the block lock to see if their mapping is still in place. +// +// All of the unmap and state destruction steps are also performed when the ref +// count goes to 0, so this function only needs to be called if the block's +// resources need to be reclaimed immediately. +// +// The caller should not lock the block before calling this function. +// +// This performs a uvm_va_block_release. +void uvm_va_block_kill(uvm_va_block_t *va_block); + +// Exactly the same split semantics as uvm_va_range_split, including error +// handling. See that function's comments for details. +// +// new_va_block's va_range is set to new_va_range before any reverse mapping is +// established to the new block, but the caller is responsible for inserting the +// new block into the range. +NV_STATUS uvm_va_block_split(uvm_va_block_t *existing_va_block, + NvU64 new_end, + uvm_va_block_t **new_va_block, + uvm_va_range_t *new_va_range); + +// Exactly the same split semantics as uvm_va_block_split, including error +// handling except the existing_va_block block lock needs to be held and +// the new_va_block has to be preallocated. +// +// new_va_block's va_range is set to new_va_range before any reverse mapping is +// established to the new block, but the caller is responsible for inserting the +// new block into the range. +NV_STATUS uvm_va_block_split_locked(uvm_va_block_t *existing_va_block, + NvU64 new_end, + uvm_va_block_t *new_va_block, + uvm_va_range_t *new_va_range); + +// Handles a CPU fault in the given VA block, performing any operations +// necessary to establish a coherent CPU mapping (migrations, cache invalidates, +// etc.). +// +// Locking: +// - vma->vm_mm->mmap_lock must be held in at least read mode. Note, that +// might not be the same as current->mm->mmap_lock. +// - va_space lock must be held in at least read mode +// +// service_context->block_context.mm is ignored and vma->vm_mm is used instead. +// +// Returns NV_ERR_INVALID_ACCESS_TYPE if a CPU mapping to fault_addr cannot be +// accessed, for example because it's within a range group which is non- +// migratable. +NV_STATUS uvm_va_block_cpu_fault(uvm_va_block_t *va_block, + NvU64 fault_addr, + bool is_write, + uvm_service_block_context_t *service_context); + +// Performs any operations necessary to establish a coherent mapping +// (migrations, cache invalidates, etc.) in response to the given service block +// context +// +// Locking: +// - service_context->block_context.mm->mmap_lock must be held in at least +// read mode, if valid. +// - va_space lock must be held in at least read mode +// - va_block lock must be held +// +// If allocation-retry was required as part of the operation and was successful, +// NV_ERR_MORE_PROCESSING_REQUIRED is returned. In this case, the block's lock was +// unlocked and relocked. +// +// NV_WARN_MORE_PROCESSING_REQUIRED indicates that thrashing has been detected +// and the performance heuristics logic decided to throttle execution. +// Any other error code different than NV_OK indicates OOM or a global fatal +// error. +NV_STATUS uvm_va_block_service_locked(uvm_processor_id_t processor_id, + uvm_va_block_t *va_block, + uvm_va_block_retry_t *block_retry, + uvm_service_block_context_t *service_context); + +// Size of the block in bytes. Guaranteed to be a page-aligned value between +// PAGE_SIZE and UVM_VA_BLOCK_SIZE. +static inline NvU64 uvm_va_block_size(uvm_va_block_t *block) +{ + NvU64 size = block->end - block->start + 1; + UVM_ASSERT(PAGE_ALIGNED(size)); + UVM_ASSERT(size >= PAGE_SIZE); + UVM_ASSERT(size <= UVM_VA_BLOCK_SIZE); + return size; +} + +// Number of pages with PAGE_SIZE in the block +static inline size_t uvm_va_block_num_cpu_pages(uvm_va_block_t *block) +{ + return uvm_va_block_size(block) / PAGE_SIZE; +} + +// VA of the given page using CPU page size. page_index must be valid +static inline NvU64 uvm_va_block_cpu_page_address(uvm_va_block_t *block, uvm_page_index_t page_index) +{ + UVM_ASSERT(page_index < uvm_va_block_num_cpu_pages(block)); + return block->start + PAGE_SIZE * page_index; +} + +// Get the page physical address on the given GPU +// +// This will assert that GPU state is indeed present. +uvm_gpu_phys_address_t uvm_va_block_gpu_phys_page_address(uvm_va_block_t *va_block, uvm_page_index_t page_index, uvm_gpu_t *gpu); + +static bool uvm_va_block_contains_address(uvm_va_block_t *block, NvU64 address) +{ + return address >= block->start && address <= block->end; +} + +// Obtain a pointer to the uvm_va_block_test_t structure for the given VA +// block. If uvm_enable_builtin_tests is unset, NULL will be returned. +static uvm_va_block_test_t *uvm_va_block_get_test(uvm_va_block_t *va_block) +{ + if (uvm_enable_builtin_tests) + return &container_of(va_block, uvm_va_block_wrapper_t, block)->test; + + return NULL; +} + +// Get the page residency mask for a processor if it's known to be there. +// +// If the processor is a GPU, this will assert that GPU state is indeed present. +uvm_page_mask_t *uvm_va_block_resident_mask_get(uvm_va_block_t *block, uvm_processor_id_t processor); + +// Get the page mapped mask for a processor. The returned mask cannot be +// directly modified by the caller +// +// If the processor is a GPU, this will assert that GPU state is indeed present. +const uvm_page_mask_t *uvm_va_block_map_mask_get(uvm_va_block_t *block, uvm_processor_id_t processor); + +// VA block lookup functions. There are a number of permutations which might be +// useful, such as looking up the block from {va_space, va_range} x {addr, +// block index}. The ones implemented here and in uvm_va_range.h support the +// primary two use cases, which are: +// 1) Iterating over all VA blocks in a VA range. This uses block indices on the +// VA range: +// uvm_va_range_num_blocks +// uvm_va_range_block_index +// uvm_va_range_block +// uvm_va_range_block_create +// 2) Operating on a single VA block (fault). This looks up the block using the +// VA space and address: +// uvm_va_block_find +// uvm_va_block_find_create + +// Finds the UVM or HMM VA block containing addr, if any. The va_space->lock +// must be held in at least read mode. Return values: +// NV_ERR_INVALID_ADDRESS addr is not a UVM_VA_RANGE_TYPE_MANAGED va_range nor +// a HMM enabled VMA. +// +// NV_ERR_OBJECT_NOT_FOUND addr is valid but no block has been allocated to +// cover it yet +// +// NV_OK The block was returned successfully +NV_STATUS uvm_va_block_find(uvm_va_space_t *va_space, NvU64 addr, uvm_va_block_t **out_block); + +// Same as uvm_va_block_find except that the block is created if not found. +// If addr is covered by a UVM_VA_RANGE_TYPE_MANAGED va_range, a managed block +// will be created. Otherwise, if addr is not covered by any va_range, mm is +// non-NULL, and HMM is enabled in the va_space, an HMM block will be created. +// In either case, if mm is non-NULL, it must be retained and locked in at +// least read mode. Return values: +// NV_ERR_INVALID_ADDRESS addr is not a UVM_VA_RANGE_TYPE_MANAGED va_range nor +// a HMM enabled VMA. +// NV_ERR_NO_MEMORY memory could not be allocated. +NV_STATUS uvm_va_block_find_create(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 addr, + uvm_va_block_context_t *va_block_context, + uvm_va_block_t **out_block); + +// Same as uvm_va_block_find_create except that only UVM managed va_blocks are +// created if not already present in the VA range. +static NV_STATUS uvm_va_block_find_create_managed(uvm_va_space_t *va_space, + NvU64 addr, + uvm_va_block_t **out_block) +{ + return uvm_va_block_find_create(va_space, NULL, addr, NULL, out_block); +} + +// Look up a chunk backing a specific address within the VA block. Returns NULL if none. +uvm_gpu_chunk_t *uvm_va_block_lookup_gpu_chunk(uvm_va_block_t *va_block, uvm_gpu_t *gpu, NvU64 address); + +typedef enum +{ + UVM_MIGRATE_MODE_MAKE_RESIDENT, + UVM_MIGRATE_MODE_MAKE_RESIDENT_AND_MAP, +} uvm_migrate_mode_t; + +// Implementation of the UvmMigrate() API at the VA block scope. +// +// The out_tracker can be NULL. +// +// If do_mappings is false, mappings are not added after pages have been +// migrated. +// +// The caller needs to handle allocation-retry. va_block_retry can be NULL if +// the destination is the CPU. +// +// va_block_context must not be NULL. +// +// LOCKING: The caller must hold the va_block lock. If va_block_context->mm != +// NULL, va_block_context->mm->mmap_lock must be held in at least +// read mode. +NV_STATUS uvm_va_block_migrate_locked(uvm_va_block_t *va_block, + uvm_va_block_retry_t *va_block_retry, + uvm_va_block_context_t *va_block_context, + uvm_va_block_region_t region, + uvm_processor_id_t dest_id, + uvm_migrate_mode_t mode, + uvm_tracker_t *out_tracker); + +// Write block's data from a CPU buffer +// +// The [dst, dst + size) range has to fit within a single PAGE_SIZE page. +// +// The caller needs to support allocation-retry of page tables. +// +// LOCKING: The caller must hold the va_block lock +NV_STATUS uvm_va_block_write_from_cpu(uvm_va_block_t *va_block, + uvm_va_block_context_t *block_context, + NvU64 dst, + uvm_mem_t *src, + size_t size); + +// Read block's data into a CPU buffer +// +// The [src, src + size) range has to fit within a single PAGE_SIZE page. +// +// LOCKING: The caller must hold the va_block lock +NV_STATUS uvm_va_block_read_to_cpu(uvm_va_block_t *va_block, uvm_mem_t *dst, NvU64 src, size_t size); + +// Initialize va block retry tracking +void uvm_va_block_retry_init(uvm_va_block_retry_t *uvm_va_block_retry); + +// Deinitialize va block retry tracking after a block operation +// +// Frees all the remaining free chunks and unpins all the used chunks. +void uvm_va_block_retry_deinit(uvm_va_block_retry_t *uvm_va_block_retry, uvm_va_block_t *va_block); + +// Evict all chunks from the block that are subchunks of the passed in root_chunk. +// +// Add all the work tracking the eviction to the tracker. +// +// Returns NV_OK if the block is dead or doesn't have any subchunks of the +// root_chunk. +// +// LOCKING: The caller must hold the va_block lock +NV_STATUS uvm_va_block_evict_chunks(uvm_va_block_t *va_block, + uvm_gpu_t *gpu, + uvm_gpu_chunk_t *root_chunk, + uvm_tracker_t *tracker); + +NV_STATUS uvm_test_va_block_inject_error(UVM_TEST_VA_BLOCK_INJECT_ERROR_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_change_pte_mapping(UVM_TEST_CHANGE_PTE_MAPPING_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_va_block_info(UVM_TEST_VA_BLOCK_INFO_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_va_residency_info(UVM_TEST_VA_RESIDENCY_INFO_PARAMS *params, struct file *filp); + +// Compute the offset in system pages of addr from the start of va_block. +static uvm_page_index_t uvm_va_block_cpu_page_index(uvm_va_block_t *va_block, NvU64 addr) +{ + UVM_ASSERT(addr >= va_block->start); + UVM_ASSERT(addr <= va_block->end); + return (addr - va_block->start) / PAGE_SIZE; +} + +// Computes the size and index in the gpu_state chunks array of the GPU chunk +// which corresponds to the given page_index of the VA region. +size_t uvm_va_block_gpu_chunk_index_range(NvU64 start, + NvU64 size, + uvm_gpu_t *gpu, + uvm_page_index_t page_index, + uvm_chunk_size_t *out_chunk_size); + +// If there are any resident CPU pages in the block, mark them as dirty +void uvm_va_block_mark_cpu_dirty(uvm_va_block_t *va_block); + +// Sets the internal state required to handle fault cancellation +// +// This function may require allocating page tables to split big pages into 4K +// pages. If allocation-retry was required as part of the operation and was +// successful, NV_ERR_MORE_PROCESSING_REQUIRED is returned. In this case the +// block's lock was unlocked and relocked. +// +// LOCKING: The caller must hold the va_block lock. +NV_STATUS uvm_va_block_set_cancel(uvm_va_block_t *va_block, uvm_va_block_context_t *block_context, uvm_gpu_t *gpu); + +// +// uvm_va_block_region_t helpers +// + +static uvm_va_block_region_t uvm_va_block_region(uvm_page_index_t first, uvm_page_index_t outer) +{ + BUILD_BUG_ON(PAGES_PER_UVM_VA_BLOCK >= (1 << (sizeof(first) * 8))); + + UVM_ASSERT(first <= outer); + + return (uvm_va_block_region_t){ .first = first, .outer = outer }; +} + +static uvm_va_block_region_t uvm_va_block_region_for_page(uvm_page_index_t page_index) +{ + return uvm_va_block_region(page_index, page_index + 1); +} + +static size_t uvm_va_block_region_num_pages(uvm_va_block_region_t region) +{ + return region.outer - region.first; +} + +static NvU64 uvm_va_block_region_size(uvm_va_block_region_t region) +{ + return uvm_va_block_region_num_pages(region) * PAGE_SIZE; +} + +static NvU64 uvm_va_block_region_start(uvm_va_block_t *va_block, uvm_va_block_region_t region) +{ + return va_block->start + region.first * PAGE_SIZE; +} + +static NvU64 uvm_va_block_region_end(uvm_va_block_t *va_block, uvm_va_block_region_t region) +{ + return va_block->start + region.outer * PAGE_SIZE - 1; +} + +static bool uvm_va_block_region_contains_region(uvm_va_block_region_t region, uvm_va_block_region_t subregion) +{ + return subregion.first >= region.first && subregion.outer <= region.outer; +} + +static bool uvm_va_block_region_contains_page(uvm_va_block_region_t region, uvm_page_index_t page_index) +{ + return uvm_va_block_region_contains_region(region, uvm_va_block_region_for_page(page_index)); +} + +// Create a block range from a va block and start and end virtual addresses +// within the block. +static uvm_va_block_region_t uvm_va_block_region_from_start_end(uvm_va_block_t *va_block, NvU64 start, NvU64 end) +{ + uvm_va_block_region_t region; + + UVM_ASSERT(start < end); + UVM_ASSERT(start >= va_block->start); + UVM_ASSERT(end <= va_block->end); + UVM_ASSERT(PAGE_ALIGNED(start)); + UVM_ASSERT(PAGE_ALIGNED(end + 1)); + + region.first = uvm_va_block_cpu_page_index(va_block, start); + region.outer = uvm_va_block_cpu_page_index(va_block, end) + 1; + + return region; +} + +static uvm_va_block_region_t uvm_va_block_region_from_start_size(uvm_va_block_t *va_block, NvU64 start, NvU64 size) +{ + return uvm_va_block_region_from_start_end(va_block, start, start + size - 1); +} + +static uvm_va_block_region_t uvm_va_block_region_from_block(uvm_va_block_t *va_block) +{ + return uvm_va_block_region(0, uvm_va_block_num_cpu_pages(va_block)); +} + +static bool uvm_page_mask_test(const uvm_page_mask_t *mask, uvm_page_index_t page_index) +{ + UVM_ASSERT(page_index < PAGES_PER_UVM_VA_BLOCK); + + return test_bit(page_index, mask->bitmap); +} + +static bool uvm_page_mask_test_and_set(uvm_page_mask_t *mask, uvm_page_index_t page_index) +{ + UVM_ASSERT(page_index < PAGES_PER_UVM_VA_BLOCK); + + return __test_and_set_bit(page_index, mask->bitmap); +} + +static bool uvm_page_mask_test_and_clear(uvm_page_mask_t *mask, uvm_page_index_t page_index) +{ + UVM_ASSERT(page_index < PAGES_PER_UVM_VA_BLOCK); + + return __test_and_clear_bit(page_index, mask->bitmap); +} + +static void uvm_page_mask_set(uvm_page_mask_t *mask, uvm_page_index_t page_index) +{ + UVM_ASSERT(page_index < PAGES_PER_UVM_VA_BLOCK); + + __set_bit(page_index, mask->bitmap); +} + +static void uvm_page_mask_clear(uvm_page_mask_t *mask, uvm_page_index_t page_index) +{ + UVM_ASSERT(page_index < PAGES_PER_UVM_VA_BLOCK); + + __clear_bit(page_index, mask->bitmap); +} + +static bool uvm_page_mask_region_test(const uvm_page_mask_t *mask, + uvm_va_block_region_t region, + uvm_page_index_t page_index) +{ + if (!uvm_va_block_region_contains_page(region, page_index)) + return false; + + return !mask || uvm_page_mask_test(mask, page_index); +} + +static NvU32 uvm_page_mask_region_weight(const uvm_page_mask_t *mask, uvm_va_block_region_t region) +{ + NvU32 weight_before = 0; + + if (region.first > 0) + weight_before = bitmap_weight(mask->bitmap, region.first); + + return bitmap_weight(mask->bitmap, region.outer) - weight_before; +} + +static bool uvm_page_mask_region_empty(const uvm_page_mask_t *mask, uvm_va_block_region_t region) +{ + return find_next_bit(mask->bitmap, region.outer, region.first) == region.outer; +} + +static bool uvm_page_mask_region_full(const uvm_page_mask_t *mask, uvm_va_block_region_t region) +{ + return find_next_zero_bit(mask->bitmap, region.outer, region.first) == region.outer; +} + +static void uvm_page_mask_region_fill(uvm_page_mask_t *mask, uvm_va_block_region_t region) +{ + bitmap_set(mask->bitmap, region.first, region.outer - region.first); +} + +static void uvm_page_mask_region_clear(uvm_page_mask_t *mask, uvm_va_block_region_t region) +{ + bitmap_clear(mask->bitmap, region.first, region.outer - region.first); +} + +static void uvm_page_mask_region_clear_outside(uvm_page_mask_t *mask, uvm_va_block_region_t region) +{ + if (region.first > 0) + bitmap_clear(mask->bitmap, 0, region.first); + if (region.outer < PAGES_PER_UVM_VA_BLOCK) + bitmap_clear(mask->bitmap, region.outer, PAGES_PER_UVM_VA_BLOCK - region.outer); +} + +static void uvm_page_mask_zero(uvm_page_mask_t *mask) +{ + bitmap_zero(mask->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +static bool uvm_page_mask_empty(const uvm_page_mask_t *mask) +{ + return bitmap_empty(mask->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +static bool uvm_page_mask_full(const uvm_page_mask_t *mask) +{ + return bitmap_full(mask->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +static bool uvm_page_mask_and(uvm_page_mask_t *mask_out, const uvm_page_mask_t *mask_in1, const uvm_page_mask_t *mask_in2) +{ + return bitmap_and(mask_out->bitmap, mask_in1->bitmap, mask_in2->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +static bool uvm_page_mask_andnot(uvm_page_mask_t *mask_out, const uvm_page_mask_t *mask_in1, const uvm_page_mask_t *mask_in2) +{ + return bitmap_andnot(mask_out->bitmap, mask_in1->bitmap, mask_in2->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +static void uvm_page_mask_or(uvm_page_mask_t *mask_out, const uvm_page_mask_t *mask_in1, const uvm_page_mask_t *mask_in2) +{ + bitmap_or(mask_out->bitmap, mask_in1->bitmap, mask_in2->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +static void uvm_page_mask_complement(uvm_page_mask_t *mask_out, const uvm_page_mask_t *mask_in) +{ + bitmap_complement(mask_out->bitmap, mask_in->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +static void uvm_page_mask_copy(uvm_page_mask_t *mask_out, const uvm_page_mask_t *mask_in) +{ + bitmap_copy(mask_out->bitmap, mask_in->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +static NvU32 uvm_page_mask_weight(const uvm_page_mask_t *mask) +{ + return bitmap_weight(mask->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +static bool uvm_page_mask_subset(const uvm_page_mask_t *subset, const uvm_page_mask_t *mask) +{ + return bitmap_subset(subset->bitmap, mask->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +static bool uvm_page_mask_init_from_region(uvm_page_mask_t *mask_out, + uvm_va_block_region_t region, + const uvm_page_mask_t *mask_in) +{ + uvm_page_mask_zero(mask_out); + uvm_page_mask_region_fill(mask_out, region); + + if (mask_in) + return uvm_page_mask_and(mask_out, mask_out, mask_in); + + return true; +} + +static void uvm_page_mask_shift_right(uvm_page_mask_t *mask_out, const uvm_page_mask_t *mask_in, unsigned shift) +{ + bitmap_shift_right(mask_out->bitmap, mask_in->bitmap, shift, PAGES_PER_UVM_VA_BLOCK); +} + +static void uvm_page_mask_shift_left(uvm_page_mask_t *mask_out, const uvm_page_mask_t *mask_in, unsigned shift) +{ + bitmap_shift_left(mask_out->bitmap, mask_in->bitmap, shift, PAGES_PER_UVM_VA_BLOCK); +} + +static bool uvm_page_mask_intersects(const uvm_page_mask_t *mask1, const uvm_page_mask_t *mask2) +{ + return bitmap_intersects(mask1->bitmap, mask2->bitmap, PAGES_PER_UVM_VA_BLOCK); +} + +// Print the given page mask on the given buffer using hex symbols. The +// minimum required size of the buffer is UVM_PAGE_MASK_PRINT_MIN_BUFFER_SIZE. +static void uvm_page_mask_print(const uvm_page_mask_t *mask, char *buffer) +{ + // There are two cases, which depend on PAGE_SIZE + if (PAGES_PER_UVM_VA_BLOCK > 32) { + NvLength current_long_idx = UVM_PAGE_MASK_WORDS - 1; + const char *buffer_end = buffer + UVM_PAGE_MASK_PRINT_MIN_BUFFER_SIZE; + + UVM_ASSERT(sizeof(*mask->bitmap) == 8); + + // For 4KB pages, we need to iterate over multiple words + do { + NvU64 current_long = mask->bitmap[current_long_idx]; + + buffer += sprintf(buffer, "%016llx", current_long); + if (current_long_idx != 0) + buffer += sprintf(buffer, ":"); + } while (current_long_idx-- != 0); + + UVM_ASSERT(buffer <= buffer_end); + } + else { + NvU32 value = (unsigned)*mask->bitmap; + + UVM_ASSERT(PAGES_PER_UVM_VA_BLOCK == 32); + + // For 64KB pages, a single print suffices + sprintf(buffer, "%08x", value); + } +} + +static uvm_va_block_region_t uvm_va_block_first_subregion_in_mask(uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask) +{ + uvm_va_block_region_t subregion; + + if (!page_mask) + return region; + + subregion.first = find_next_bit(page_mask->bitmap, region.outer, region.first); + subregion.outer = find_next_zero_bit(page_mask->bitmap, region.outer, subregion.first + 1); + return subregion; +} + +static uvm_va_block_region_t uvm_va_block_next_subregion_in_mask(uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + uvm_va_block_region_t previous_subregion) +{ + uvm_va_block_region_t subregion; + + if (!page_mask) { + subregion.first = region.outer; + subregion.outer = region.outer; + return subregion; + } + + subregion.first = find_next_bit(page_mask->bitmap, region.outer, previous_subregion.outer + 1); + subregion.outer = find_next_zero_bit(page_mask->bitmap, region.outer, subregion.first + 1); + return subregion; +} + +// Iterate over contiguous subregions of the region given by the page mask. +// If the page mask is NULL then it behaves as if it was a fully set mask and +// the only subregion iterated over will be the region itself. +#define for_each_va_block_subregion_in_mask(subregion, page_mask, region) \ + for ((subregion) = uvm_va_block_first_subregion_in_mask((region), (page_mask)); \ + (subregion).first != (region).outer; \ + (subregion) = uvm_va_block_next_subregion_in_mask((region), (page_mask), (subregion))) + +static uvm_page_index_t uvm_va_block_first_page_in_mask(uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask) +{ + if (page_mask) + return find_next_bit(page_mask->bitmap, region.outer, region.first); + else + return region.first; +} + +static uvm_page_index_t uvm_va_block_next_page_in_mask(uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + uvm_page_index_t previous_page) +{ + if (page_mask) { + return find_next_bit(page_mask->bitmap, region.outer, previous_page + 1); + } + else { + UVM_ASSERT(previous_page < region.outer); + return previous_page + 1; + } +} + +static uvm_page_index_t uvm_va_block_first_unset_page_in_mask(uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask) +{ + if (page_mask) + return find_next_zero_bit(page_mask->bitmap, region.outer, region.first); + else + return region.first; +} + +static uvm_page_index_t uvm_va_block_next_unset_page_in_mask(uvm_va_block_region_t region, + const uvm_page_mask_t *page_mask, + uvm_page_index_t previous_page) +{ + if (page_mask) { + return find_next_zero_bit(page_mask->bitmap, region.outer, previous_page + 1); + } + else { + UVM_ASSERT(previous_page < region.outer); + return previous_page + 1; + } +} + +static NvU64 uvm_reverse_map_start(const uvm_reverse_map_t *reverse_map) +{ + return uvm_va_block_cpu_page_address(reverse_map->va_block, reverse_map->region.first); +} + +static NvU64 uvm_reverse_map_end(const uvm_reverse_map_t *reverse_map) +{ + return uvm_va_block_cpu_page_address(reverse_map->va_block, reverse_map->region.first) + + uvm_va_block_region_size(reverse_map->region) - 1; +} + +// Iterate over contiguous pages of the region given by the page mask. +// If the page mask is NULL then it behaves as if it was a fully set mask and +// it will iterate over all pages within the region. +#define for_each_va_block_page_in_region_mask(page_index, page_mask, region) \ + for ((page_index) = uvm_va_block_first_page_in_mask((region), (page_mask)); \ + (page_index) != (region).outer; \ + (page_index) = uvm_va_block_next_page_in_mask((region), (page_mask), (page_index))) + +// Same as for_each_va_block_page_in_region_mask, but the region spans the +// whole given VA block +#define for_each_va_block_page_in_mask(page_index, page_mask, va_block) \ + for_each_va_block_page_in_region_mask(page_index, page_mask, uvm_va_block_region_from_block(va_block)) + +// Similar to for_each_va_block_page_in_region_mask, but iterating over pages +// whose bit is unset. +#define for_each_va_block_unset_page_in_region_mask(page_index, page_mask, region) \ + for ((page_index) = uvm_va_block_first_unset_page_in_mask((region), (page_mask)); \ + (page_index) != (region).outer; \ + (page_index) = uvm_va_block_next_unset_page_in_mask((region), (page_mask), (page_index))) + +// Similar to for_each_va_block_page_in_mask, but iterating over pages whose +// bit is unset. +#define for_each_va_block_unset_page_in_mask(page_index, page_mask, va_block) \ + for_each_va_block_unset_page_in_region_mask(page_index, page_mask, uvm_va_block_region_from_block(va_block)) + +// Iterate over all pages within the given region +#define for_each_va_block_page_in_region(page_index, region) \ + for_each_va_block_page_in_region_mask((page_index), NULL, (region)) + +// Iterate over all pages within the given VA block +#define for_each_va_block_page(page_index, va_block) \ + for_each_va_block_page_in_region((page_index), uvm_va_block_region_from_block(va_block)) + +static void uvm_va_block_bitmap_tree_init_from_page_count(uvm_va_block_bitmap_tree_t *bitmap_tree, size_t page_count) +{ + bitmap_tree->leaf_count = page_count; + bitmap_tree->level_count = ilog2(roundup_pow_of_two(page_count)) + 1; + uvm_page_mask_zero(&bitmap_tree->pages); +} + +static void uvm_va_block_bitmap_tree_init(uvm_va_block_bitmap_tree_t *bitmap_tree, uvm_va_block_t *va_block) +{ + size_t num_pages = uvm_va_block_num_cpu_pages(va_block); + uvm_va_block_bitmap_tree_init_from_page_count(bitmap_tree, num_pages); +} + +static void uvm_va_block_bitmap_tree_iter_init(const uvm_va_block_bitmap_tree_t *bitmap_tree, + uvm_page_index_t page_index, + uvm_va_block_bitmap_tree_iter_t *iter) +{ + UVM_ASSERT(bitmap_tree->level_count > 0); + UVM_ASSERT_MSG(page_index < bitmap_tree->leaf_count, + "%zd vs %zd", + (size_t)page_index, + (size_t)bitmap_tree->leaf_count); + + iter->level_idx = bitmap_tree->level_count - 1; + iter->node_idx = page_index; +} + +static uvm_va_block_region_t uvm_va_block_bitmap_tree_iter_get_range(const uvm_va_block_bitmap_tree_t *bitmap_tree, + const uvm_va_block_bitmap_tree_iter_t *iter) +{ + NvU16 range_leaves = uvm_perf_tree_iter_leaf_range(bitmap_tree, iter); + NvU16 range_start = uvm_perf_tree_iter_leaf_range_start(bitmap_tree, iter); + uvm_va_block_region_t subregion = uvm_va_block_region(range_start, range_start + range_leaves); + + UVM_ASSERT(iter->level_idx >= 0); + UVM_ASSERT(iter->level_idx < bitmap_tree->level_count); + + return subregion; +} + +static NvU16 uvm_va_block_bitmap_tree_iter_get_count(const uvm_va_block_bitmap_tree_t *bitmap_tree, + const uvm_va_block_bitmap_tree_iter_t *iter) +{ + uvm_va_block_region_t subregion = uvm_va_block_bitmap_tree_iter_get_range(bitmap_tree, iter); + + return uvm_page_mask_region_weight(&bitmap_tree->pages, subregion); +} + +#define uvm_va_block_bitmap_tree_traverse_counters(counter,tree,page,iter) \ + for (uvm_va_block_bitmap_tree_iter_init((tree), (page), (iter)), \ + (counter) = uvm_va_block_bitmap_tree_iter_get_count((tree), (iter)); \ + (iter)->level_idx >= 0; \ + (counter) = --(iter)->level_idx < 0? 0: \ + uvm_va_block_bitmap_tree_iter_get_count((tree), (iter))) + +// Return the block region covered by the given chunk size. page_index must be +// any page within the block known to be covered by the chunk. +static uvm_va_block_region_t uvm_va_block_chunk_region(uvm_va_block_t *block, + uvm_chunk_size_t chunk_size, + uvm_page_index_t page_index) +{ + NvU64 page_addr = uvm_va_block_cpu_page_address(block, page_index); + NvU64 chunk_start_addr = UVM_ALIGN_DOWN(page_addr, chunk_size); + uvm_page_index_t first = (uvm_page_index_t)((chunk_start_addr - block->start) / PAGE_SIZE); + return uvm_va_block_region(first, first + (chunk_size / PAGE_SIZE)); +} + +// +// Helpers for page state (permissions, size, residency) +// + +// Compute the gpus that have at least the given access permissions for the +// range described by region and page_mask. The function sets the bit if any +// page in the region has the permissions. +void uvm_va_block_region_authorized_gpus(uvm_va_block_t *va_block, + uvm_va_block_region_t region, + uvm_prot_t access_permission, + uvm_processor_mask_t *authorized_gpus); + +// Compute the processors that have at least the given access permissions for the +// range described by region and page_mask. The function sets the bit if any +// page in the region has the permissions. +void uvm_va_block_region_authorized_processors(uvm_va_block_t *va_block, + uvm_va_block_region_t region, + uvm_prot_t access_permission, + uvm_processor_mask_t *authorized_processors); + +void uvm_va_block_page_authorized_gpus(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_prot_t access_permission, + uvm_processor_mask_t *authorized_gpus); + +void uvm_va_block_page_authorized_processors(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_prot_t access_permission, + uvm_processor_mask_t *authorized_processors); + +bool uvm_va_block_is_gpu_authorized_on_whole_region(uvm_va_block_t *va_block, + uvm_va_block_region_t region, + uvm_gpu_id_t gpu_id, + uvm_prot_t required_prot); + +bool uvm_va_block_is_processor_authorized_on_whole_region(uvm_va_block_t *va_block, + uvm_va_block_region_t region, + uvm_processor_id_t processor_id, + uvm_prot_t required_prot); + +bool uvm_va_block_page_is_gpu_authorized(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_gpu_id_t gpu_id, + uvm_prot_t required_prot); + +bool uvm_va_block_page_is_processor_authorized(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_id_t processor_id, + uvm_prot_t required_prot); + +// Compute the gpus that have a copy of the given page resident in their memory +void uvm_va_block_page_resident_gpus(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_mask_t *resident_gpus); + +// Compute the processors that have a copy of the given page resident in their memory +void uvm_va_block_page_resident_processors(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_mask_t *resident_processors); + +// Count how many processors have a copy of the given page resident in their memory +NvU32 uvm_va_block_page_resident_processors_count(uvm_va_block_t *va_block, uvm_page_index_t page_index); + +// Get the processor with a resident copy of a page closest to the given processor +uvm_processor_id_t uvm_va_block_page_get_closest_resident(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_id_t processor); + +uvm_processor_id_t uvm_va_block_page_get_closest_resident_in_mask(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_id_t processor, + const uvm_processor_mask_t *processor_mask); + +// Get CPU page size or 0 if it is not mapped +NvU32 uvm_va_block_page_size_cpu(uvm_va_block_t *va_block, uvm_page_index_t page_index); + +// Get GPU page size or 0 if it is not mapped on the given GPU +NvU32 uvm_va_block_page_size_gpu(uvm_va_block_t *va_block, uvm_gpu_id_t gpu_id, uvm_page_index_t page_index); + +// Get page size or 0 if it is not mapped on the given processor +static NvU32 uvm_va_block_page_size_processor(uvm_va_block_t *va_block, + uvm_processor_id_t processor_id, + uvm_page_index_t page_index) +{ + if (UVM_ID_IS_CPU(processor_id)) + return uvm_va_block_page_size_cpu(va_block, page_index); + else + return uvm_va_block_page_size_gpu(va_block, processor_id, page_index); +} + +// Returns the big page size for the GPU VA space of the block +NvU32 uvm_va_block_gpu_big_page_size(uvm_va_block_t *va_block, uvm_gpu_t *gpu); + +// Returns the number of big pages in the VA block for the given size +size_t uvm_va_block_num_big_pages(uvm_va_block_t *va_block, NvU32 big_page_size); + +// Returns the number of big pages in the VA block for the big page size on the +// given GPU +static size_t uvm_va_block_gpu_num_big_pages(uvm_va_block_t *va_block, uvm_gpu_t *gpu) +{ + return uvm_va_block_num_big_pages(va_block, uvm_va_block_gpu_big_page_size(va_block, gpu)); +} + +// Returns the start address of the given big page index and big page size +NvU64 uvm_va_block_big_page_addr(uvm_va_block_t *va_block, size_t big_page_index, NvU32 big_page_size); + +// Returns the region [start, end] of the given big page index and big page size +uvm_va_block_region_t uvm_va_block_big_page_region(uvm_va_block_t *va_block, + size_t big_page_index, + NvU32 big_page_size); + +// Returns the largest sub-region region of [start, end] which can fit big +// pages. If the region cannot fit any big pages, an invalid region (0, 0) is +// returned. +uvm_va_block_region_t uvm_va_block_big_page_region_all(uvm_va_block_t *va_block, NvU32 big_page_size); + +// Returns the big page index (the bit index within +// uvm_va_block_gpu_state_t::big_ptes) corresponding to page_index. If +// page_index cannot be covered by a big PTE due to alignment or block size, +// MAX_BIG_PAGES_PER_UVM_VA_BLOCK is returned. +size_t uvm_va_block_big_page_index(uvm_va_block_t *va_block, uvm_page_index_t page_index, NvU32 big_page_size); + +// Returns the new residency for a page that faulted or triggered access +// counter notifications. The read_duplicate output parameter indicates if the +// page meets the requirements to be read-duplicated +uvm_processor_id_t uvm_va_block_select_residency(uvm_va_block_t *va_block, + uvm_page_index_t page_index, + uvm_processor_id_t processor_id, + NvU32 access_type_mask, + uvm_va_policy_t *policy, + const uvm_perf_thrashing_hint_t *thrashing_hint, + uvm_service_operation_t operation, + bool *read_duplicate); + +// Return the maximum mapping protection for processor_id that will not require +// any permision revocation on the rest of processors. +uvm_prot_t uvm_va_block_page_compute_highest_permission(uvm_va_block_t *va_block, + uvm_processor_id_t processor_id, + uvm_page_index_t page_index); + +// A helper macro for handling allocation-retry +// +// The macro takes a VA block, uvm_va_block_retry_t struct and a function call +// to retry as long as it returns NV_ERR_MORE_PROCESSING_REQUIRED. +// +// block_retry can be NULL if it's not necessary for the function call, +// otherwise it will be initialized and deinitialized by the macro. +// +// The macro also locks and unlocks the block's lock internally as it's expected +// that the block's lock has been unlocked and relocked whenever the function call +// returns NV_ERR_MORE_PROCESSING_REQUIRED and this makes it clear that the +// block's state is not locked across these calls. +#define UVM_VA_BLOCK_LOCK_RETRY(va_block, block_retry, call) ({ \ + NV_STATUS status; \ + uvm_va_block_t *__block = (va_block); \ + uvm_va_block_retry_t *__retry = (block_retry); \ + \ + uvm_va_block_retry_init(__retry); \ + \ + uvm_mutex_lock(&__block->lock); \ + \ + do { \ + status = (call); \ + } while (status == NV_ERR_MORE_PROCESSING_REQUIRED); \ + \ + uvm_mutex_unlock(&__block->lock); \ + \ + uvm_va_block_retry_deinit(__retry, __block); \ + \ + status; \ +}) + +// A helper macro for handling allocation-retry +// +// The macro takes a VA block, uvm_va_block_retry_t struct and a function call +// to retry as long as it returns NV_ERR_MORE_PROCESSING_REQUIRED. +// +// block_retry can be NULL if it's not necessary for the function call, +// otherwise it will be initialized and deinitialized by the macro. +// +// This macro, as opposed to UVM_VA_BLOCK_LOCK_RETRY(), expects the block lock +// to be already taken. Notably the block's lock might be unlocked and relocked +// as part of the call. +#define UVM_VA_BLOCK_RETRY_LOCKED(va_block, block_retry, call) ({ \ + NV_STATUS status; \ + uvm_va_block_t *__block = (va_block); \ + uvm_va_block_retry_t *__retry = (block_retry); \ + \ + uvm_va_block_retry_init(__retry); \ + \ + uvm_assert_mutex_locked(&__block->lock); \ + \ + do { \ + status = (call); \ + } while (status == NV_ERR_MORE_PROCESSING_REQUIRED); \ + \ + uvm_va_block_retry_deinit(__retry, __block); \ + \ + status; \ +}) + +#endif // __UVM_VA_BLOCK_H__ diff --git a/kernel-open/nvidia-uvm/uvm_va_block_test.c b/kernel-open/nvidia-uvm/uvm_va_block_test.c new file mode 100644 index 000000000..138963a07 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_block_test.c @@ -0,0 +1,112 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_test.h" +#include "uvm_test_ioctl.h" +#include "uvm_va_block.h" +#include "uvm_va_space.h" +#include "uvm_mmu.h" + +static NV_STATUS test_chunk_index_range(NvU64 start, NvU64 size, uvm_gpu_t *gpu) +{ + size_t chunk_index, last_chunk_index = 0; + uvm_chunk_size_t chunk_size, test_chunk_size; + NvU64 addr, next_addr, outer_addr; + + if (fatal_signal_pending(current)) + return NV_ERR_SIGNAL_PENDING; + + outer_addr = start + size; + + for (addr = start; addr < outer_addr;) { + uvm_page_index_t start_page_index = (uvm_page_index_t)((addr - start) / PAGE_SIZE); + + chunk_index = uvm_va_block_gpu_chunk_index_range(start, size, gpu, start_page_index, &chunk_size); + if (addr == start) + TEST_CHECK_RET(chunk_index == 0); + else + TEST_CHECK_RET(chunk_index == last_chunk_index + 1); + + last_chunk_index = chunk_index; + + TEST_CHECK_RET(chunk_size >= PAGE_SIZE); + TEST_CHECK_RET(IS_ALIGNED(addr, chunk_size)); + next_addr = addr + chunk_size; + TEST_CHECK_RET(next_addr <= outer_addr); + + // Verify that this is the largest allowed chunk size + for_each_chunk_size_rev(test_chunk_size, gpu->parent->mmu_user_chunk_sizes) { + if (IS_ALIGNED(addr, test_chunk_size) && addr + test_chunk_size <= outer_addr) { + TEST_CHECK_RET(test_chunk_size == chunk_size); + break; + } + } + + // Check that every page covered by this chunk returns the same values + for (addr += PAGE_SIZE; addr < next_addr; addr += PAGE_SIZE) { + uvm_page_index_t page_index = (uvm_page_index_t)((addr - start) / PAGE_SIZE); + last_chunk_index = uvm_va_block_gpu_chunk_index_range(start, size, gpu, page_index, &test_chunk_size); + TEST_CHECK_RET(last_chunk_index == chunk_index); + TEST_CHECK_RET(test_chunk_size == chunk_size); + } + } + + return NV_OK; +} + +static NV_STATUS test_chunk_index(uvm_gpu_t *gpu) +{ + // Fake a random address aligned to a block boundary + const NvU64 start = 17 * UVM_VA_BLOCK_SIZE; + uvm_va_block_region_t region; + + for (region.first = 0; region.first < PAGES_PER_UVM_VA_BLOCK; ++region.first) { + for (region.outer = region.first + 1; region.outer <= PAGES_PER_UVM_VA_BLOCK; ++region.outer) { + NV_STATUS status = test_chunk_index_range(start + region.first * PAGE_SIZE, + uvm_va_block_region_size(region), + gpu); + if (status != NV_OK) + return status; + } + } + + return NV_OK; +} + +NV_STATUS uvm_test_va_block(UVM_TEST_VA_BLOCK_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_gpu_t *gpu; + NV_STATUS status = NV_OK; + + uvm_va_space_down_read(va_space); + + for_each_va_space_gpu(gpu, va_space) + TEST_NV_CHECK_GOTO(test_chunk_index(gpu), out); + +out: + uvm_va_space_up_read(va_space); + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_va_block_types.h b/kernel-open/nvidia-uvm/uvm_va_block_types.h new file mode 100644 index 000000000..d83862d66 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_block_types.h @@ -0,0 +1,297 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_VA_BLOCK_TYPES_H__ +#define __UVM_VA_BLOCK_TYPES_H__ + +#include "uvm_common.h" +#include "uvm_pte_batch.h" +#include "uvm_tlb_batch.h" + +// UVM_VA_BLOCK_BITS is 21, meaning the maximum block size is 2MB. Rationale: +// - 2MB matches the largest Pascal GPU page size so it's a natural fit +// - 2MB won't span more than one PDE on any chip, so the VA blocks never need +// to track more than a single GPU PDE. +// - 2MB is a decent tradeoff between memory overhead and serialization +// contention. +// +#define UVM_VA_BLOCK_BITS 21 + +// Max size of a block in bytes +#define UVM_VA_BLOCK_SIZE (1ULL << UVM_VA_BLOCK_BITS) + +#define UVM_VA_BLOCK_ALIGN_DOWN(addr) UVM_ALIGN_DOWN(addr, UVM_VA_BLOCK_SIZE) +#define UVM_VA_BLOCK_ALIGN_UP(addr) UVM_ALIGN_UP(addr, UVM_VA_BLOCK_SIZE) + +#define PAGES_PER_UVM_VA_BLOCK (UVM_VA_BLOCK_SIZE / PAGE_SIZE) + +#define UVM_MIN_BIG_PAGE_SIZE UVM_PAGE_SIZE_64K +#define MAX_BIG_PAGES_PER_UVM_VA_BLOCK (UVM_VA_BLOCK_SIZE / UVM_MIN_BIG_PAGE_SIZE) + +// Prefetch heuristics shift the VA Block page mask so that it is always +// aligned to big page granularity. Big page is guaranteed not to exceed +// UVM_VA_BLOCK_SIZE, so it will use 2 * PAGES_PER_UVM_VA_BLOCK pages at +// most. Note that uvm_page_index_t needs to be able to hold outer page +// indices (one beyond the last one), for example in uvm_va_block_region_t. +#if (2 * PAGES_PER_UVM_VA_BLOCK) <= NV_U8_MAX + typedef NvU8 uvm_page_index_t; +#elif (2 * PAGES_PER_UVM_VA_BLOCK) <= NV_U16_MAX + typedef NvU16 uvm_page_index_t; +#else + #warning "Suspicious value for PAGES_PER_UVM_VA_BLOCK" + typedef NvU32 uvm_page_index_t; +#endif + +// Encapsulates a [first, outer) region of pages within a va block +typedef struct +{ + // Page indices within the va block + uvm_page_index_t first; + uvm_page_index_t outer; +} uvm_va_block_region_t; + +typedef struct +{ + DECLARE_BITMAP(bitmap, PAGES_PER_UVM_VA_BLOCK); +} uvm_page_mask_t; + +// Encapsulates a counter tree built on top of a page mask bitmap in +// which each leaf represents a page in the block. It contains +// leaf_count and level_count so that it can use some macros for +// perf trees +typedef struct +{ + uvm_page_mask_t pages; + + NvU16 leaf_count; + + NvU8 level_count; +} uvm_va_block_bitmap_tree_t; + +// Iterator for the bitmap tree. It contains level_idx and node_idx so +// that it can use some macros for perf trees +typedef struct +{ + s8 level_idx; + + uvm_page_index_t node_idx; +} uvm_va_block_bitmap_tree_iter_t; + +// When updating GPU PTEs, this struct describes the new arrangement of PTE +// sizes. It is calculated before the operation is applied so we know which PTE +// sizes to allocate. +// +// This only decribes the new layout. The operation page mask describes the new +// permissions of each of these PTEs. +typedef struct +{ + // Whether the new PTE should remain 2m (if already 2m) or merged to 2m. + // The meaning is the same as uvm_va_block_gpu_state_t::pte_is_2m. If this + // is set, the other fields can be ignored. + bool pte_is_2m; + + // Whether the operation requires writing 4k PTEs and thus needs them + // allocated. Mutually exclusive to pte_is_2m, but not to big_ptes. + bool needs_4k; + + // These are the PTEs which will be big after the operation is done. This + // field will become the new value of uvm_va_block_gpu_state_t::big_ptes, so + // it contains both those big PTEs which are being modified by the + // operation, and any pre-existing big PTEs which remain unchanged. The + // latter will not have the corresponding bit set in big_ptes_covered. + DECLARE_BITMAP(big_ptes, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + // These are the big PTE regions which the operation is touching. These may + // or may not be big PTEs: use the big_ptes bitmap to determine that. For + // example, a bit set here but not in big_ptes means that the PTE size for + // that region should be 4k, and that some of those 4k PTEs will be written + // by the operation. + DECLARE_BITMAP(big_ptes_covered, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); + + // These are the big PTE regions which will no longer have any valid + // mappings after the operation. Only the bits which are set in + // big_ptes_covered are valid. + DECLARE_BITMAP(big_ptes_fully_unmapped, MAX_BIG_PAGES_PER_UVM_VA_BLOCK); +} uvm_va_block_new_pte_state_t; + +// Event that triggered the call to uvm_va_block_make_resident/ +// uvm_va_block_make_resident_read_duplicate +typedef enum +{ + UVM_MAKE_RESIDENT_CAUSE_REPLAYABLE_FAULT, + UVM_MAKE_RESIDENT_CAUSE_NON_REPLAYABLE_FAULT, + UVM_MAKE_RESIDENT_CAUSE_ACCESS_COUNTER, + UVM_MAKE_RESIDENT_CAUSE_PREFETCH, + UVM_MAKE_RESIDENT_CAUSE_EVICTION, + UVM_MAKE_RESIDENT_CAUSE_API_TOOLS, + UVM_MAKE_RESIDENT_CAUSE_API_MIGRATE, + UVM_MAKE_RESIDENT_CAUSE_API_SET_RANGE_GROUP, + UVM_MAKE_RESIDENT_CAUSE_API_HINT, + + UVM_MAKE_RESIDENT_CAUSE_MAX +} uvm_make_resident_cause_t; + +// Page masks are printed using hex digits printing last to first from left to +// right. For readability, a colon is added to separate each group of pages +// stored in the same word of the bitmap. +#define UVM_PAGE_MASK_WORDS (PAGES_PER_UVM_VA_BLOCK / BITS_PER_LONG) +#define UVM_PAGE_MASK_PRINT_NUM_COLONS (UVM_PAGE_MASK_WORDS > 0? UVM_PAGE_MASK_WORDS - 1 : 0) +#define UVM_PAGE_MASK_PRINT_MIN_BUFFER_SIZE (PAGES_PER_UVM_VA_BLOCK / 4 + UVM_PAGE_MASK_PRINT_NUM_COLONS + 1) + +typedef struct +{ + // Pages that need to be mapped with the corresponding protection + uvm_page_mask_t page_mask; + + // Number of pages that need to be mapped with the corresponding + // protections. This is the same value as the result of + // bitmap_weight(page_mask) + unsigned count; +} uvm_prot_page_mask_array_t[UVM_PROT_MAX - 1]; + +// In the worst case some VA block operations require more state than we should +// reasonably store on the stack. Instead, we dynamically allocate VA block +// contexts. These are used for almost all operations on VA blocks. +typedef struct +{ + // Available as scratch space for the caller. Not used by any of the VA + // block APIs. + uvm_page_mask_t caller_page_mask; + + // Available as scratch space for the internal APIs. This is like a caller- + // save register: it shouldn't be used across function calls which also take + // this block_context. + uvm_page_mask_t scratch_page_mask; + + // State used by uvm_va_block_make_resident + struct uvm_make_resident_context_struct + { + // Masks used internally + uvm_page_mask_t page_mask; + uvm_page_mask_t copy_resident_pages_between_mask; + uvm_page_mask_t pages_staged; + uvm_page_mask_t pages_migrated; + + // Out mask filled in by uvm_va_block_make_resident to indicate which + // pages actually changed residency. + uvm_page_mask_t pages_changed_residency; + + // Out mask of all processors involved in the migration either as + // source, destination or the processor performing the copy. + // Used to perform ECC checks after the migration is done. + uvm_processor_mask_t all_involved_processors; + + // Final residency for the data. This is useful for callees to know if + // a migration is part of a staging copy + uvm_processor_id_t dest_id; + + // Event that triggered the call + uvm_make_resident_cause_t cause; + } make_resident; + + // State used by the mapping APIs (unmap, map, revoke). This could be used + // at the same time as the state in make_resident. + struct + { + // Master mask used by uvm_va_block_map and uvm_va_block_unmap, but + // they are never called concurrently. Bits are removed as the operation + // progresses. + uvm_page_mask_t map_running_page_mask; + + // Master mask used by uvm_va_block_revoke. It can be used concurrently + // with map_running_page_mask since revoke calls unmap and map. Bits + // are removed as the operation progresses. + uvm_page_mask_t revoke_running_page_mask; + + uvm_page_mask_t page_mask; + uvm_page_mask_t filtered_page_mask; + uvm_page_mask_t migratable_mask; + + uvm_va_block_new_pte_state_t new_pte_state; + + uvm_pte_batch_t pte_batch; + uvm_tlb_batch_t tlb_batch; + + // Event that triggered the call to the mapping function + UvmEventMapRemoteCause cause; + } mapping; + + // Used when adding page mappings with using different protections + uvm_prot_page_mask_array_t mask_by_prot; + + struct + { + uvm_page_mask_t running_page_mask; + } update_read_duplicated_pages; + + // mm to use for the operation. If this is non-NULL, the caller guarantees + // that the mm will be valid (reference held) for the duration of the + // block operation. + // + // If this is NULL, the block operation skips anything which would require + // the mm, such as creating CPU mappings. + struct mm_struct *mm; + + uvm_va_policy_t *policy; + + +#if UVM_IS_CONFIG_HMM() + struct + { + // Cached VMA pointer. This is only valid while holding the mmap_lock. + struct vm_area_struct *vma; + } hmm; +#endif + + + // Convenience buffer for page mask prints + char page_mask_string_buffer[UVM_PAGE_MASK_PRINT_MIN_BUFFER_SIZE]; +} uvm_va_block_context_t; + +typedef enum +{ + UVM_VA_BLOCK_TRANSFER_MODE_MOVE = 1, + UVM_VA_BLOCK_TRANSFER_MODE_COPY = 2 +} uvm_va_block_transfer_mode_t; + +struct uvm_reverse_map_struct +{ + // VA block where the VA region of this Phys/DMA -> Virt translation + // belongs to + uvm_va_block_t *va_block; + + // VA block region covered by this translation + uvm_va_block_region_t region; + + // Processor the physical memory range belongs to + uvm_processor_id_t owner; +}; + +typedef enum +{ + UVM_SERVICE_OPERATION_REPLAYABLE_FAULTS, + UVM_SERVICE_OPERATION_NON_REPLAYABLE_FAULTS, + UVM_SERVICE_OPERATION_ACCESS_COUNTERS, +} uvm_service_operation_t; + +#endif diff --git a/kernel-open/nvidia-uvm/uvm_va_policy.c b/kernel-open/nvidia-uvm/uvm_va_policy.c new file mode 100644 index 000000000..770a9c06b --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_policy.c @@ -0,0 +1,469 @@ +/******************************************************************************* + Copyright (c) 2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_types.h" +#include "uvm_va_policy.h" +#include "uvm_va_block.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" + +uvm_va_policy_t uvm_va_policy_default __read_mostly = { + .preferred_location = UVM_ID_INVALID, + .read_duplication = UVM_READ_DUPLICATION_UNSET, +}; + +bool uvm_va_policy_is_read_duplicate(uvm_va_policy_t *policy, uvm_va_space_t *va_space) +{ + return policy->read_duplication == UVM_READ_DUPLICATION_ENABLED && + uvm_va_space_can_read_duplicate(va_space, NULL); +} + +uvm_va_policy_t *uvm_va_policy_get(uvm_va_block_t *va_block, NvU64 addr) +{ + uvm_assert_mutex_locked(&va_block->lock); + + if (uvm_va_block_is_hmm(va_block)) { + uvm_va_policy_node_t *node = uvm_va_policy_node_find(va_block, addr); + + return node ? &node->policy : &uvm_va_policy_default; + } + else + return uvm_va_range_get_policy(va_block->va_range); +} + +#if UVM_IS_CONFIG_HMM() + +static struct kmem_cache *g_uvm_va_policy_node_cache __read_mostly; + +static uvm_va_policy_node_t *uvm_va_policy_node_container(uvm_range_tree_node_t *tree_node) +{ + return container_of(tree_node, uvm_va_policy_node_t, node); +} + +static uvm_va_policy_t *uvm_va_policy_container(uvm_range_tree_node_t *tree_node) +{ + if (!tree_node) + return NULL; + + return &uvm_va_policy_node_container(tree_node)->policy; +} + +NV_STATUS uvm_va_policy_init(void) +{ + g_uvm_va_policy_node_cache = NV_KMEM_CACHE_CREATE("uvm_va_policy_node_t", uvm_va_policy_node_t); + if (!g_uvm_va_policy_node_cache) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +void uvm_va_policy_exit(void) +{ + kmem_cache_destroy_safe(&g_uvm_va_policy_node_cache); +} + +static uvm_va_policy_node_t *uvm_va_policy_node_alloc(NvU64 start, NvU64 end) +{ + uvm_va_policy_node_t *node; + + UVM_ASSERT(PAGE_ALIGNED(start)); + UVM_ASSERT(PAGE_ALIGNED(end + 1)); + + node = nv_kmem_cache_zalloc(g_uvm_va_policy_node_cache, NV_UVM_GFP_FLAGS); + if (!node) + return NULL; + + node->node.start = start; + node->node.end = end; + + return node; +} + +static void uvm_va_policy_node_free(uvm_va_policy_node_t *node) +{ + kmem_cache_free(g_uvm_va_policy_node_cache, node); +} + +static uvm_va_policy_node_t *uvm_va_policy_node_create(uvm_va_block_t *va_block, NvU64 start, NvU64 end) +{ + uvm_va_policy_node_t *node; + NV_STATUS status; + + UVM_ASSERT(uvm_va_block_is_hmm(va_block)); + uvm_assert_mutex_locked(&va_block->lock); + UVM_ASSERT(start >= va_block->start); + UVM_ASSERT(end <= va_block->end); + + node = uvm_va_policy_node_alloc(start, end); + if (!node) + return NULL; + + node->policy = uvm_va_policy_default; + + status = uvm_range_tree_add(&va_block->hmm.va_policy_tree, &node->node); + UVM_ASSERT(status == NV_OK); + + return node; +} + +uvm_va_policy_node_t *uvm_va_policy_node_find(uvm_va_block_t *va_block, NvU64 addr) +{ + uvm_range_tree_node_t *tree_node; + + UVM_ASSERT(uvm_va_block_is_hmm(va_block)); + uvm_assert_mutex_locked(&va_block->lock); + + tree_node = uvm_range_tree_find(&va_block->hmm.va_policy_tree, addr); + if (!tree_node) + return NULL; + + return uvm_va_policy_node_container(tree_node); +} + +uvm_va_policy_node_t *uvm_va_policy_node_iter_first(uvm_va_block_t *va_block, NvU64 start, NvU64 end) +{ + uvm_range_tree_node_t *tree_node; + + UVM_ASSERT(uvm_va_block_is_hmm(va_block)); + uvm_assert_mutex_locked(&va_block->lock); + + tree_node = uvm_range_tree_iter_first(&va_block->hmm.va_policy_tree, start, end); + if (!tree_node) + return NULL; + + return uvm_va_policy_node_container(tree_node); +} + +uvm_va_policy_node_t *uvm_va_policy_node_iter_next(uvm_va_block_t *va_block, + uvm_va_policy_node_t *node, + NvU64 end) +{ + uvm_range_tree_node_t *tree_node; + + if (!node) + return NULL; + + tree_node = uvm_range_tree_iter_next(&va_block->hmm.va_policy_tree, &node->node, end); + if (!tree_node) + return NULL; + + return uvm_va_policy_node_container(tree_node); +} + +NV_STATUS uvm_va_policy_node_split(uvm_va_block_t *va_block, + uvm_va_policy_node_t *old, + NvU64 new_end, + uvm_va_policy_node_t **new_ptr) +{ + uvm_va_policy_node_t *new; + + UVM_ASSERT(uvm_va_block_is_hmm(va_block)); + uvm_assert_mutex_locked(&va_block->lock); + + UVM_ASSERT(new_end > old->node.start); + UVM_ASSERT(new_end < old->node.end); + + new = uvm_va_policy_node_alloc(new_end + 1, old->node.end); + if (!new) + return NV_ERR_NO_MEMORY; + + new->policy = old->policy; + + uvm_range_tree_split(&va_block->hmm.va_policy_tree, &old->node, &new->node); + + if (new_ptr) + *new_ptr = new; + + return NV_OK; +} + +void uvm_va_policy_node_split_move(uvm_va_block_t *old_va_block, + uvm_va_block_t *new_va_block) +{ + uvm_va_policy_node_t *node, *next; + NV_STATUS status; + + UVM_ASSERT(uvm_va_block_is_hmm(old_va_block)); + UVM_ASSERT(uvm_va_block_is_hmm(new_va_block)); + uvm_assert_mutex_locked(&old_va_block->lock); + + UVM_ASSERT(old_va_block->end + 1 == new_va_block->start); + + uvm_for_each_va_policy_node_in_safe(node, next, old_va_block, new_va_block->start, new_va_block->end) { + uvm_range_tree_remove(&old_va_block->hmm.va_policy_tree, &node->node); + UVM_ASSERT(node->node.start >= new_va_block->start); + UVM_ASSERT(node->node.end <= new_va_block->end); + status = uvm_range_tree_add(&new_va_block->hmm.va_policy_tree, &node->node); + UVM_ASSERT(status == NV_OK); + } +} + +void uvm_va_policy_clear(uvm_va_block_t *va_block, NvU64 start, NvU64 end) +{ + uvm_va_policy_node_t *node, *new; + uvm_range_tree_node_t *tree_node; + NV_STATUS status; + + UVM_ASSERT(uvm_va_block_is_hmm(va_block)); + uvm_assert_mutex_locked(&va_block->lock); + + tree_node = uvm_range_tree_iter_first(&va_block->hmm.va_policy_tree, start, end); + while (tree_node) { + node = uvm_va_policy_node_container(tree_node); + tree_node = uvm_range_tree_iter_next(&va_block->hmm.va_policy_tree, &node->node, end); + + if (node->node.start < start) { + if (node->node.end <= end) { + uvm_range_tree_shrink_node(&va_block->hmm.va_policy_tree, &node->node, node->node.start, start - 1); + continue; + } + + status = uvm_va_policy_node_split(va_block, node, start - 1, &new); + // If we can't split, save the policy before the part being cleared + // but forget the policy after the range to be cleared. + // Since policy isn't a guarantee, at least this is safe. + if (status != NV_OK) { + uvm_range_tree_shrink_node(&va_block->hmm.va_policy_tree, &node->node, node->node.start, start - 1); + continue; + } + + node = new; + } + + if (node->node.end > end) { + uvm_range_tree_shrink_node(&va_block->hmm.va_policy_tree, &node->node, end + 1, node->node.end); + continue; + } + + uvm_range_tree_remove(&va_block->hmm.va_policy_tree, &node->node); + uvm_va_policy_node_free(node); + } +} + +static void uvm_va_policy_node_set(uvm_va_policy_node_t *node, + uvm_va_policy_type_t which, + uvm_processor_id_t processor_id, + uvm_read_duplication_policy_t new_policy) +{ + switch (which) { + case UVM_VA_POLICY_PREFERRED_LOCATION: + UVM_ASSERT(!UVM_ID_IS_INVALID(processor_id)); + node->policy.preferred_location = processor_id; + break; + + case UVM_VA_POLICY_ACCESSED_BY: + UVM_ASSERT(!UVM_ID_IS_INVALID(processor_id)); + uvm_processor_mask_set(&node->policy.accessed_by, processor_id); + break; + + case UVM_VA_POLICY_READ_DUPLICATION: + UVM_ASSERT(new_policy == UVM_READ_DUPLICATION_ENABLED || + new_policy == UVM_READ_DUPLICATION_DISABLED); + node->policy.read_duplication = new_policy; + break; + + default: + UVM_ASSERT_MSG(0, "Unknown policy type %u\n", which); + break; + } +} + +static void uvm_va_policy_node_clear(uvm_va_block_t *va_block, + uvm_va_policy_node_t *node, + uvm_va_policy_type_t which, + uvm_processor_id_t processor_id, + uvm_read_duplication_policy_t new_policy) +{ + switch (which) { + case UVM_VA_POLICY_PREFERRED_LOCATION: + UVM_ASSERT(UVM_ID_IS_INVALID(processor_id)); + node->policy.preferred_location = processor_id; + break; + + case UVM_VA_POLICY_ACCESSED_BY: + UVM_ASSERT(!UVM_ID_IS_INVALID(processor_id)); + uvm_processor_mask_clear(&node->policy.accessed_by, processor_id); + break; + + case UVM_VA_POLICY_READ_DUPLICATION: + default: + // Read duplication is never set back to UVM_READ_DUPLICATION_UNSET. + UVM_ASSERT(0); + break; + } + + // Check to see if the node is now the default and can be removed. + if (UVM_ID_IS_INVALID(node->policy.preferred_location) && + uvm_processor_mask_empty(&node->policy.accessed_by) && + node->policy.read_duplication == UVM_READ_DUPLICATION_UNSET) { + uvm_range_tree_remove(&va_block->hmm.va_policy_tree, &node->node); + uvm_va_policy_node_free(node); + } +} + +static uvm_va_policy_node_t *create_node_and_set(uvm_va_block_t *va_block, + NvU64 start, + NvU64 end, + uvm_va_policy_type_t which, + uvm_processor_id_t processor_id, + uvm_read_duplication_policy_t new_policy) +{ + uvm_va_policy_node_t *node; + + // Create a new node for the missing range. + node = uvm_va_policy_node_create(va_block, start, end); + if (!node) + return node; + + uvm_va_policy_node_set(node, which, processor_id, new_policy); + + return node; +} + +static bool va_policy_node_split_needed(uvm_va_policy_node_t *node, + NvU64 start, + NvU64 end, + uvm_va_policy_type_t which, + bool is_default, + uvm_processor_id_t processor_id, + uvm_read_duplication_policy_t new_policy) +{ + // If the node doesn't extend beyond the range being set, it doesn't need + // to be split. + if (node->node.start >= start && node->node.end <= end) + return false; + + // If the new policy value doesn't match the old value, a split is needed. + switch (which) { + case UVM_VA_POLICY_PREFERRED_LOCATION: + return !uvm_id_equal(node->policy.preferred_location, processor_id); + + case UVM_VA_POLICY_ACCESSED_BY: + if (is_default) + return uvm_processor_mask_test(&node->policy.accessed_by, processor_id); + else + return !uvm_processor_mask_test(&node->policy.accessed_by, processor_id); + + case UVM_VA_POLICY_READ_DUPLICATION: + return node->policy.read_duplication != new_policy; + + default: + UVM_ASSERT(0); + return false; + } +} + +NV_STATUS uvm_va_policy_set_range(uvm_va_block_t *va_block, + NvU64 start, + NvU64 end, + uvm_va_policy_type_t which, + bool is_default, + uvm_processor_id_t processor_id, + uvm_read_duplication_policy_t new_policy) +{ + uvm_va_policy_node_t *node, *next, *new; + NvU64 addr; + NvU64 node_start; + NvU64 node_end; + + UVM_ASSERT(uvm_va_block_is_hmm(va_block)); + uvm_assert_mutex_locked(&va_block->lock); + UVM_ASSERT(PAGE_ALIGNED(start)); + UVM_ASSERT(PAGE_ALIGNED(end + 1)); + UVM_ASSERT(start < end); + UVM_ASSERT(start >= va_block->start); + UVM_ASSERT(start < va_block->end); + UVM_ASSERT(end <= va_block->end); + + // Note that the policy range ends have already been split so we only need + // to fill in the middle or remove nodes. + node = uvm_va_policy_node_iter_first(va_block, start, end); + + if (!node) { + // There is no policy in the given range so it is already the default. + if (is_default) + return NV_OK; + + // Create a new node for the missing range. + node = create_node_and_set(va_block, + start, + end, + which, + processor_id, + new_policy); + if (!node) + return NV_ERR_NO_MEMORY; + + return NV_OK; + } + + for (addr = start; node; addr = node_end + 1, node = next) { + node_start = node->node.start; + node_end = node->node.end; + + // Nodes should have been split before setting policy so verify that. + UVM_ASSERT(!va_policy_node_split_needed(node, start, end, which, is_default, processor_id, new_policy)); + + next = uvm_va_policy_node_iter_next(va_block, node, end); + + if (is_default) { + uvm_va_policy_node_clear(va_block, node, which, processor_id, new_policy); + // Note that node may have been deleted. + } + else { + uvm_va_policy_node_set(node, which, processor_id, new_policy); + + // TODO: Bug 1707562: Add support for merging policy ranges. + } + + if (!is_default && addr < node_start) { + // Create a new node for the missing range on the left. + new = create_node_and_set(va_block, + addr, + node_start - 1, + which, + processor_id, + new_policy); + if (!new) + return NV_ERR_NO_MEMORY; + } + else if (!is_default && !next && node_end < end) { + // Create a new node for the missing range on the right. + new = create_node_and_set(va_block, + node_end + 1, + end, + which, + processor_id, + new_policy); + if (!new) + return NV_ERR_NO_MEMORY; + break; + } + } + + return NV_OK; +} + +#endif // UVM_IS_CONFIG_HMM() diff --git a/kernel-open/nvidia-uvm/uvm_va_policy.h b/kernel-open/nvidia-uvm/uvm_va_policy.h new file mode 100644 index 000000000..b9e84cf3e --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_policy.h @@ -0,0 +1,218 @@ +/******************************************************************************* + Copyright (c) 2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_VA_POLICY_H__ +#define __UVM_VA_POLICY_H__ + +#include "uvm_linux.h" +#include "uvm_forward_decl.h" +#include "uvm_processors.h" +#include "uvm_range_tree.h" + +// This enum must be kept in sync with UVM_TEST_READ_DUPLICATION_POLICY in +// uvm_test_ioctl.h +typedef enum +{ + UVM_READ_DUPLICATION_UNSET = 0, + UVM_READ_DUPLICATION_ENABLED, + UVM_READ_DUPLICATION_DISABLED, + UVM_READ_DUPLICATION_MAX +} uvm_read_duplication_policy_t; + +typedef enum +{ + UVM_VA_POLICY_PREFERRED_LOCATION = 0, + UVM_VA_POLICY_ACCESSED_BY, + UVM_VA_POLICY_READ_DUPLICATION, +} uvm_va_policy_type_t; + +// +// A policy covers one or more contiguous Linux VMAs or portion of a VMA and +// does not cover non-existant VMAs. +// The VA range is determined from either the uvm_va_range_t for UVM managed +// allocations or the uvm_va_policy_node_t for HMM allocations. +// +typedef struct uvm_va_policy_struct +{ + // Read duplication policy for this VA range (unset, enabled, or disabled). + uvm_read_duplication_policy_t read_duplication; + + // Processor ID of the preferred location for this VA range. + // This is set to UVM_ID_INVALID if no preferred location is set. + uvm_processor_id_t preferred_location; + + // Mask of processors that are accessing this VA range and should have + // their page tables updated to access the (possibly remote) pages. + uvm_processor_mask_t accessed_by; + +} uvm_va_policy_t; + +// Policy nodes are used for storing policies in HMM va_blocks. +// The va_block lock protects the tree so that invalidation callbacks can +// update the VA policy tree. +typedef struct uvm_va_policy_node_struct +{ + // Storage for the policy tree node. It also contains the range start and + // end. Start and end + 1 have to be PAGE_SIZED aligned. + uvm_range_tree_node_t node; + + uvm_va_policy_t policy; + +} uvm_va_policy_node_t; + +// Function pointer prototype for uvm_hmm_split_as_needed() callback. +typedef bool (*uvm_va_policy_is_split_needed_t)(uvm_va_policy_t *policy, void *data); + +// Default policy to save uvm_va_policy_node_t space in HMM va_blocks. +extern uvm_va_policy_t uvm_va_policy_default; + +bool uvm_va_policy_is_read_duplicate(uvm_va_policy_t *policy, uvm_va_space_t *va_space); + +// Returns the uvm_va_policy_t containing addr or default policy if not found. +// The va_block can be either a UVM or HMM va_block. +// Locking: The va_block lock must be held. +uvm_va_policy_t *uvm_va_policy_get(uvm_va_block_t *va_block, NvU64 addr); + +#if UVM_IS_CONFIG_HMM() + +// Module load/exit +NV_STATUS uvm_va_policy_init(void); +void uvm_va_policy_exit(void); + +// Returns the uvm_va_policy_node_t containing addr or NULL if not found. +// The va_block must be a HMM va_block. +// Locking: The va_block lock must be held. +uvm_va_policy_node_t *uvm_va_policy_node_find(uvm_va_block_t *va_block, NvU64 addr); + +// Split the old node. The old node will end at 'new_end' and the new node will +// start at 'new_end' + 1 and end at old end. +// The va_block must be a HMM va_block. +// Locking: The va_block lock must be held. +NV_STATUS uvm_va_policy_node_split(uvm_va_block_t *va_block, + uvm_va_policy_node_t *old, + NvU64 new_end, + uvm_va_policy_node_t **new_ptr); + +// Move hints from 'old' to 'new' which must both be HMM va_blocks. +// The old va_block policies should have been pre-split and since we don't +// merge policy ranges, they should still be split after locking/unlocking +// 'old'. This should be called after splitting a block. +// TODO: Bug 1707562: Add support for merging policy ranges. +// Locking: The va_block lock must be held for both old and new. +void uvm_va_policy_node_split_move(uvm_va_block_t *old_va_block, + uvm_va_block_t *new_va_block); + +// Remove all policy in the given range start/end where 'end' is inclusive. +// This function may clear a range larger than start/end if clearing the range +// requires memory allocation and the memory allocation fails. +// The va_block must be a HMM va_block. +// Locking: The va_block lock must be held. +void uvm_va_policy_clear(uvm_va_block_t *va_block, NvU64 start, NvU64 end); + +// Fill in any missing policy nodes for the given range and set the policy +// to the given value. The caller is expected to split any policy nodes +// before calling this function so the range being set does not. +// The va_block must be a HMM va_block. +// Note that start and end + 1 must be page aligned, 'end' is inclusive. +// TODO: Bug 1707562: Add support for merging policy ranges. +// Locking: The va_block lock must be held. +NV_STATUS uvm_va_policy_set_range(uvm_va_block_t *va_block, + NvU64 start, + NvU64 end, + uvm_va_policy_type_t which, + bool is_default, + uvm_processor_id_t processor_id, + uvm_read_duplication_policy_t new_policy); + +// Iterators for specific VA policy ranges. + +// Returns the first policy node in the range [start, end], if any. +// The va_block must be a HMM va_block. +// Locking: The va_block lock must be held. +uvm_va_policy_node_t *uvm_va_policy_node_iter_first(uvm_va_block_t *va_block, NvU64 start, NvU64 end); + +// Returns the next VA policy following the provided policy in address order, +// if that policy's start <= the provided end. +// The va_block must be a HMM va_block. +// Locking: The va_block lock must be held. +uvm_va_policy_node_t *uvm_va_policy_node_iter_next(uvm_va_block_t *va_block, uvm_va_policy_node_t *node, NvU64 end); + +#define uvm_for_each_va_policy_node_in(node, va_block, start, end) \ + for ((node) = uvm_va_policy_node_iter_first((va_block), (start), (end)); \ + (node); \ + (node) = uvm_va_policy_node_iter_next((va_block), (node), (end))) + +#define uvm_for_each_va_policy_node_in_safe(node, next, va_block, start, end) \ + for ((node) = uvm_va_policy_node_iter_first((va_block), (start), (end)), \ + (next) = uvm_va_policy_node_iter_next((va_block), (node), (end)); \ + (node); \ + (node) = (next)) + +#else // UVM_IS_CONFIG_HMM() + +static NV_STATUS uvm_va_policy_init(void) +{ + return NV_OK; +} + +static void uvm_va_policy_exit(void) +{ +} + +static uvm_va_policy_node_t *uvm_va_policy_node_find(uvm_va_block_t *va_block, NvU64 addr) +{ + UVM_ASSERT(0); + return NULL; +} + +static NV_STATUS uvm_va_policy_node_split(uvm_va_block_t *va_block, + uvm_va_policy_node_t *old, + NvU64 new_end, + uvm_va_policy_node_t **new_ptr) +{ + return NV_OK; +} + +static void uvm_va_policy_node_split_move(uvm_va_block_t *old_va_block, + uvm_va_block_t *new_va_block) +{ +} + +static void uvm_va_policy_clear(uvm_va_block_t *va_block, NvU64 start, NvU64 end) +{ +} + +static NV_STATUS uvm_va_policy_set_range(uvm_va_block_t *va_block, + NvU64 start, + NvU64 end, + uvm_va_policy_type_t which, + bool is_default, + uvm_processor_id_t processor_id, + uvm_read_duplication_policy_t new_policy) +{ + return NV_OK; +} + +#endif // UVM_IS_CONFIG_HMM() + +#endif // __UVM_VA_POLICY_H__ diff --git a/kernel-open/nvidia-uvm/uvm_va_range.c b/kernel-open/nvidia-uvm/uvm_va_range.c new file mode 100644 index 000000000..be000f7e7 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_range.c @@ -0,0 +1,2086 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_linux.h" +#include "uvm_types.h" +#include "uvm_api.h" +#include "uvm_va_range.h" +#include "uvm_va_block.h" +#include "uvm_kvmalloc.h" +#include "uvm_map_external.h" +#include "uvm_perf_thrashing.h" + + + +#include "nv_uvm_interface.h" + +static struct kmem_cache *g_uvm_va_range_cache __read_mostly; +static struct kmem_cache *g_uvm_vma_wrapper_cache __read_mostly; + +NV_STATUS uvm_va_range_init(void) +{ + g_uvm_va_range_cache = NV_KMEM_CACHE_CREATE("uvm_va_range_t", uvm_va_range_t); + if (!g_uvm_va_range_cache) + return NV_ERR_NO_MEMORY; + + g_uvm_vma_wrapper_cache = NV_KMEM_CACHE_CREATE("uvm_vma_wrapper_t", uvm_vma_wrapper_t); + if (!g_uvm_vma_wrapper_cache) + return NV_ERR_NO_MEMORY; + + return uvm_va_block_init(); +} + +void uvm_va_range_exit(void) +{ + uvm_va_block_exit(); + kmem_cache_destroy_safe(&g_uvm_va_range_cache); + kmem_cache_destroy_safe(&g_uvm_vma_wrapper_cache); +} + +static NvU64 block_calc_start(uvm_va_range_t *va_range, size_t index) +{ + NvU64 range_start = UVM_VA_BLOCK_ALIGN_DOWN(va_range->node.start); + NvU64 block_start = range_start + index * UVM_VA_BLOCK_SIZE; + NvU64 start = max(va_range->node.start, block_start); + UVM_ASSERT(start < va_range->node.end); + return start; +} + +static NvU64 block_calc_end(uvm_va_range_t *va_range, size_t index) +{ + NvU64 start = block_calc_start(va_range, index); + NvU64 block_end = UVM_VA_BLOCK_ALIGN_UP(start + 1) - 1; // Inclusive end + NvU64 end = min(va_range->node.end, block_end); + UVM_ASSERT(end > va_range->node.start); + return end; +} + +// Called before the range's bounds have been adjusted. This may not actually +// shrink the blocks array. For example, if the shrink attempt fails then +// va_range's old array is left intact. This may waste memory, but it means this +// function cannot fail. +static void blocks_array_shrink(uvm_va_range_t *va_range, size_t new_num_blocks) +{ + size_t new_size = new_num_blocks * sizeof(va_range->blocks[0]); + atomic_long_t *new_blocks; + + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + UVM_ASSERT(va_range->blocks); + UVM_ASSERT(uvm_kvsize(va_range->blocks) >= uvm_va_range_num_blocks(va_range) * sizeof(va_range->blocks[0])); + UVM_ASSERT(new_num_blocks); + UVM_ASSERT(new_num_blocks <= uvm_va_range_num_blocks(va_range)); + + // TODO: Bug 1766579: This could be optimized by only shrinking the array + // when the new size is half of the old size or some similar + // threshold. Need to profile this on real apps to see if that's worth + // doing. + + new_blocks = uvm_kvrealloc(va_range->blocks, new_size); + if (!new_blocks) { + // If we failed to allocate a smaller array, just leave the old one as-is + UVM_DBG_PRINT("Failed to shrink range [0x%llx, 0x%llx] from %zu blocks to %zu blocks\n", + va_range->node.start, + va_range->node.end, + uvm_kvsize(va_range->blocks) / sizeof(va_range->blocks[0]), + new_num_blocks); + return; + } + + va_range->blocks = new_blocks; +} + +static uvm_va_range_t *uvm_va_range_alloc(uvm_va_space_t *va_space, NvU64 start, NvU64 end) +{ + uvm_va_range_t *va_range = nv_kmem_cache_zalloc(g_uvm_va_range_cache, NV_UVM_GFP_FLAGS); + if (!va_range) + return NULL; + + uvm_assert_rwsem_locked_write(&va_space->lock); + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + + va_range->va_space = va_space; + va_range->node.start = start; + va_range->node.end = end; + + // The range is inserted into the VA space tree only at the end of creation, + // so clear the node so the destroy path knows whether to remove it. + RB_CLEAR_NODE(&va_range->node.rb_node); + + return va_range; +} + +static NV_STATUS uvm_va_range_alloc_reclaim(uvm_va_space_t *va_space, + struct mm_struct *mm, + uvm_va_range_type_t type, + NvU64 start, + NvU64 end, + uvm_va_range_t **out_va_range) +{ + uvm_va_range_t *va_range; + NV_STATUS status; + + // Check for no overlap with HMM blocks. + status = uvm_hmm_va_block_reclaim(va_space, mm, start, end); + if (status != NV_OK) + return status; + + va_range = uvm_va_range_alloc(va_space, start, end); + if (!va_range) + return NV_ERR_NO_MEMORY; + + va_range->type = type; + + *out_va_range = va_range; + return NV_OK; +} + +static uvm_va_range_t *uvm_va_range_alloc_managed(uvm_va_space_t *va_space, NvU64 start, NvU64 end) +{ + uvm_va_range_t *va_range = NULL; + + va_range = uvm_va_range_alloc(va_space, start, end); + if (!va_range) + goto error; + + va_range->type = UVM_VA_RANGE_TYPE_MANAGED; + + uvm_va_range_get_policy(va_range)->read_duplication = UVM_READ_DUPLICATION_UNSET; + uvm_va_range_get_policy(va_range)->preferred_location = UVM_ID_INVALID; + + va_range->blocks = uvm_kvmalloc_zero(uvm_va_range_num_blocks(va_range) * sizeof(va_range->blocks[0])); + if (!va_range->blocks) { + UVM_DBG_PRINT("Failed to allocate %zu blocks\n", uvm_va_range_num_blocks(va_range)); + goto error; + } + + return va_range; + +error: + uvm_va_range_destroy(va_range, NULL); + return NULL; +} + +NV_STATUS uvm_va_range_create_mmap(uvm_va_space_t *va_space, + struct mm_struct *mm, + uvm_vma_wrapper_t *vma_wrapper, + uvm_va_range_t **out_va_range) +{ + NV_STATUS status; + struct vm_area_struct *vma = vma_wrapper->vma; + uvm_va_range_t *va_range = NULL; + + // Check for no overlap with HMM blocks. + status = uvm_hmm_va_block_reclaim(va_space, mm, vma->vm_start, vma->vm_end - 1); + if (status != NV_OK) + return status; + + // vma->vm_end is exclusive but va_range end is inclusive + va_range = uvm_va_range_alloc_managed(va_space, vma->vm_start, vma->vm_end - 1); + if (!va_range) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + va_range->managed.vma_wrapper = vma_wrapper; + + status = uvm_range_tree_add(&va_space->va_range_tree, &va_range->node); + if (status != NV_OK) + goto error; + + if (out_va_range) + *out_va_range = va_range; + + return NV_OK; + +error: + uvm_va_range_destroy(va_range, NULL); + return status; +} + +NV_STATUS uvm_va_range_create_external(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 length, + uvm_va_range_t **out_va_range) +{ + NV_STATUS status; + uvm_va_range_t *va_range = NULL; + NvU32 i; + + status = uvm_va_range_alloc_reclaim(va_space, + mm, + UVM_VA_RANGE_TYPE_EXTERNAL, + start, + start + length - 1, + &va_range); + if (status != NV_OK) + return status; + + for (i = 0; i < ARRAY_SIZE(va_range->external.gpu_ranges); i++) { + uvm_mutex_init(&va_range->external.gpu_ranges[i].lock, UVM_LOCK_ORDER_EXT_RANGE_TREE); + uvm_range_tree_init(&va_range->external.gpu_ranges[i].tree); + } + + status = uvm_range_tree_add(&va_space->va_range_tree, &va_range->node); + if (status != NV_OK) + goto error; + + if (out_va_range) + *out_va_range = va_range; + + return NV_OK; + +error: + uvm_va_range_destroy(va_range, NULL); + return status; +} + +NV_STATUS uvm_va_range_create_channel(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 end, + uvm_va_range_t **out_va_range) +{ + NV_STATUS status; + uvm_va_range_t *va_range = NULL; + + status = uvm_va_range_alloc_reclaim(va_space, + mm, + UVM_VA_RANGE_TYPE_CHANNEL, + start, + end, + &va_range); + if (status != NV_OK) + return status; + + INIT_LIST_HEAD(&va_range->channel.list_node); + + status = uvm_range_tree_add(&va_space->va_range_tree, &va_range->node); + if (status != NV_OK) + goto error; + + if (out_va_range) + *out_va_range = va_range; + + return NV_OK; + +error: + uvm_va_range_destroy(va_range, NULL); + return status; +} + +NV_STATUS uvm_va_range_create_sked_reflected(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 length, + uvm_va_range_t **out_va_range) +{ + NV_STATUS status; + uvm_va_range_t *va_range = NULL; + + status = uvm_va_range_alloc_reclaim(va_space, + mm, + UVM_VA_RANGE_TYPE_SKED_REFLECTED, + start, + start + length - 1, + &va_range); + if (status != NV_OK) + return status; + + status = uvm_range_tree_add(&va_space->va_range_tree, &va_range->node); + if (status != NV_OK) + goto error; + + if (out_va_range) + *out_va_range = va_range; + + return NV_OK; + +error: + uvm_va_range_destroy(va_range, NULL); + return status; +} + +NV_STATUS uvm_va_range_create_semaphore_pool(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 length, + const UvmGpuMappingAttributes *per_gpu_attrs, + NvU32 per_gpu_attrs_count, + uvm_va_range_t **out_va_range) +{ + static const uvm_mem_gpu_mapping_attrs_t default_attrs = { + .protection = UVM_PROT_READ_WRITE_ATOMIC, + .is_cacheable = false + }; + + NV_STATUS status; + uvm_va_range_t *va_range = NULL; + uvm_mem_alloc_params_t mem_alloc_params = { 0 }; + NvU32 i; + uvm_gpu_id_t gpu_id; + + status = uvm_va_range_alloc_reclaim(va_space, + mm, + UVM_VA_RANGE_TYPE_SEMAPHORE_POOL, + start, + start + length - 1, + &va_range); + if (status != NV_OK) + return status; + + uvm_tracker_init(&va_range->semaphore_pool.tracker); + uvm_mutex_init(&va_range->semaphore_pool.tracker_lock, UVM_LOCK_ORDER_SEMA_POOL_TRACKER); + + status = uvm_range_tree_add(&va_space->va_range_tree, &va_range->node); + if (status != NV_OK) + goto error; + + // The semaphore pool memory is located in sysmem, and must be zeroed upon + // allocation because it may be mapped on the user VA space. + mem_alloc_params.page_size = UVM_PAGE_SIZE_DEFAULT; + mem_alloc_params.size = length; + mem_alloc_params.zero = true; + mem_alloc_params.mm = mm; + + va_range->semaphore_pool.default_gpu_attrs = default_attrs; + va_range->semaphore_pool.owner = NULL; + + for_each_gpu_id(gpu_id) + va_range->semaphore_pool.gpu_attrs[uvm_id_gpu_index(gpu_id)] = default_attrs; + + for (i = 0; i < per_gpu_attrs_count; i++) { + uvm_gpu_t *gpu; + uvm_mem_gpu_mapping_attrs_t attrs = default_attrs; + + status = uvm_mem_translate_gpu_attributes(&per_gpu_attrs[i], va_space, &gpu, &attrs); + if (status != NV_OK) + goto error; + + + + + + + if (attrs.is_cacheable) { + // At most 1 GPU can have this memory cached, in which case it is + // the 'owner' GPU. + if (va_range->semaphore_pool.owner != NULL) { + UVM_DBG_PRINT("Caching of semaphore pool requested on >1 GPU."); + status = NV_ERR_INVALID_ARGUMENT; + goto error; + } + + va_range->semaphore_pool.owner = gpu; + } + + va_range->semaphore_pool.gpu_attrs[uvm_id_gpu_index(gpu->id)] = attrs; + } + + status = uvm_mem_alloc(&mem_alloc_params, &va_range->semaphore_pool.mem); + if (status != NV_OK) + goto error; + + status = uvm_mem_map_cpu_kernel(va_range->semaphore_pool.mem); + if (status != NV_OK) + goto error; + + if (out_va_range) + *out_va_range = va_range; + + return NV_OK; + +error: + uvm_va_range_destroy(va_range, NULL); + return status; +} + +static void uvm_va_range_destroy_managed(uvm_va_range_t *va_range) +{ + uvm_va_block_t *block; + uvm_va_block_t *block_tmp; + uvm_perf_event_data_t event_data; + NV_STATUS status; + + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + + if (va_range->blocks) { + // Unmap and drop our ref count on each block + for_each_va_block_in_va_range_safe(va_range, block, block_tmp) + uvm_va_block_kill(block); + + uvm_kvfree(va_range->blocks); + } + + event_data.range_destroy.range = va_range; + uvm_perf_event_notify(&va_range->va_space->perf_events, UVM_PERF_EVENT_RANGE_DESTROY, &event_data); + + status = uvm_range_group_assign_range(va_range->va_space, NULL, va_range->node.start, va_range->node.end); + UVM_ASSERT(status == NV_OK); +} + +static void uvm_va_range_destroy_external(uvm_va_range_t *va_range, struct list_head *deferred_free_list) +{ + uvm_gpu_t *gpu; + + if (uvm_processor_mask_empty(&va_range->external.mapped_gpus)) + return; + + UVM_ASSERT(deferred_free_list); + + for_each_va_space_gpu_in_mask(gpu, va_range->va_space, &va_range->external.mapped_gpus) { + uvm_ext_gpu_range_tree_t *range_tree = uvm_ext_gpu_range_tree(va_range, gpu); + uvm_ext_gpu_map_t *ext_map, *ext_map_next; + + uvm_mutex_lock(&range_tree->lock); + uvm_ext_gpu_map_for_each_safe(ext_map, ext_map_next, va_range, gpu) + uvm_ext_gpu_map_destroy(va_range, ext_map, deferred_free_list); + uvm_mutex_unlock(&range_tree->lock); + } + + UVM_ASSERT(uvm_processor_mask_empty(&va_range->external.mapped_gpus)); +} + +static void uvm_va_range_destroy_channel(uvm_va_range_t *va_range) +{ + uvm_gpu_va_space_t *gpu_va_space = va_range->channel.gpu_va_space; + uvm_membar_t membar; + + UVM_ASSERT(va_range->channel.ref_count == 0); + + // Unmap the buffer + if (gpu_va_space && va_range->channel.pt_range_vec.ranges) { + if (va_range->channel.aperture == UVM_APERTURE_VID) + membar = UVM_MEMBAR_GPU; + else + membar = UVM_MEMBAR_SYS; + + uvm_page_table_range_vec_clear_ptes(&va_range->channel.pt_range_vec, membar); + uvm_page_table_range_vec_deinit(&va_range->channel.pt_range_vec); + } + + list_del(&va_range->channel.list_node); + + // Channel unregister handles releasing this descriptor back to RM + va_range->channel.rm_descriptor = 0; +} + +static void uvm_va_range_destroy_sked_reflected(uvm_va_range_t *va_range) +{ + uvm_gpu_va_space_t *gpu_va_space = va_range->sked_reflected.gpu_va_space; + + if (!gpu_va_space || !va_range->sked_reflected.pt_range_vec.ranges) + return; + + // The SKED reflected mapping has no physical backing and hence no physical + // accesses can be pending to it and no membar is needed. + uvm_page_table_range_vec_clear_ptes(&va_range->sked_reflected.pt_range_vec, UVM_MEMBAR_NONE); + uvm_page_table_range_vec_deinit(&va_range->sked_reflected.pt_range_vec); + + va_range->sked_reflected.gpu_va_space = NULL; +} + +static void uvm_va_range_destroy_semaphore_pool(uvm_va_range_t *va_range) +{ + NV_STATUS status = uvm_tracker_wait_deinit(&va_range->semaphore_pool.tracker); + if (status != NV_OK) { + UVM_ASSERT_MSG(status == uvm_global_get_status(), + "uvm_tracker_wait() returned %d (%s) in uvm_va_range_destroy_semaphore_pool()\n", + status, + nvstatusToString(status)); + } + uvm_mem_free(va_range->semaphore_pool.mem); + va_range->semaphore_pool.mem = NULL; +} + +void uvm_va_range_destroy(uvm_va_range_t *va_range, struct list_head *deferred_free_list) +{ + if (!va_range) + return; + + if (!RB_EMPTY_NODE(&va_range->node.rb_node)) + uvm_range_tree_remove(&va_range->va_space->va_range_tree, &va_range->node); + + switch (va_range->type) { + case UVM_VA_RANGE_TYPE_INVALID: + // Skip partially-created ranges with unset types + break; + case UVM_VA_RANGE_TYPE_MANAGED: + uvm_va_range_destroy_managed(va_range); + break; + case UVM_VA_RANGE_TYPE_EXTERNAL: + uvm_va_range_destroy_external(va_range, deferred_free_list); + break; + case UVM_VA_RANGE_TYPE_CHANNEL: + uvm_va_range_destroy_channel(va_range); + break; + case UVM_VA_RANGE_TYPE_SKED_REFLECTED: + uvm_va_range_destroy_sked_reflected(va_range); + break; + case UVM_VA_RANGE_TYPE_SEMAPHORE_POOL: + uvm_va_range_destroy_semaphore_pool(va_range); + break; + default: + UVM_ASSERT_MSG(0, "[0x%llx, 0x%llx] has type %d\n", + va_range->node.start, va_range->node.end, va_range->type); + } + + kmem_cache_free(g_uvm_va_range_cache, va_range); +} + +void uvm_va_range_zombify(uvm_va_range_t *va_range) +{ + if (!va_range) + return; + + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + UVM_ASSERT(va_range->managed.vma_wrapper); + + // Destroy will be done by uvm_destroy_vma_managed + va_range->managed.vma_wrapper = NULL; +} + +NV_STATUS uvm_api_clean_up_zombie_resources(UVM_CLEAN_UP_ZOMBIE_RESOURCES_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_range_t *va_range, *va_range_next; + + uvm_va_space_down_write(va_space); + + uvm_for_each_va_range_safe(va_range, va_range_next, va_space) { + if (uvm_va_range_is_managed_zombie(va_range)) + uvm_va_range_destroy(va_range, NULL); + } + + uvm_va_space_up_write(va_space); + + return NV_OK; +} + +NV_STATUS uvm_api_validate_va_range(UVM_VALIDATE_VA_RANGE_PARAMS *params, struct file *filp) +{ + NV_STATUS status = NV_ERR_INVALID_ADDRESS; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_range_t *va_range; + + uvm_va_space_down_read(va_space); + + va_range = uvm_va_range_find(va_space, params->base); + if (va_range && va_range->node.start == params->base && va_range->node.end + 1 == params->base + params->length) + status = NV_OK; + + uvm_va_space_up_read(va_space); + + return status; +} + +static NV_STATUS va_range_add_gpu_va_space_managed(uvm_va_range_t *va_range, + uvm_gpu_va_space_t *gpu_va_space, + struct mm_struct *mm) +{ + uvm_va_space_t *va_space = va_range->va_space; + uvm_gpu_t *gpu = gpu_va_space->gpu; + NV_STATUS status = NV_OK; + const bool should_add_remote_mappings = + uvm_processor_mask_test(&uvm_va_range_get_policy(va_range)->accessed_by, gpu->id) || + uvm_processor_mask_test(&va_range->uvm_lite_gpus, gpu->id); + + // By this time, the gpu is already in the registration mask. + const bool should_disable_read_duplication = + uvm_va_range_get_policy(va_range)->read_duplication == UVM_READ_DUPLICATION_ENABLED && + (uvm_va_space_can_read_duplicate(va_space, NULL) != uvm_va_space_can_read_duplicate(va_space, gpu)); + + // Combine conditions to perform a single VA block traversal + if (gpu_va_space->ats.enabled || should_add_remote_mappings || should_disable_read_duplication) { + uvm_va_block_t *va_block; + uvm_va_block_context_t *va_block_context = uvm_va_space_block_context(va_space, mm); + + va_block_context->policy = uvm_va_range_get_policy(va_range); + + // TODO: Bug 2090378. Consolidate all per-VA block operations within + // uvm_va_block_add_gpu_va_space so we only need to take the VA block + // once. + for_each_va_block_in_va_range(va_range, va_block) { + if (gpu_va_space->ats.enabled) { + // Notify that a new GPU VA space has been created. This is only + // currently used for PDE1 pre-population on ATS systems. + status = UVM_VA_BLOCK_LOCK_RETRY(va_block, NULL, uvm_va_block_add_gpu_va_space(va_block, gpu_va_space)); + if (status != NV_OK) + break; + } + + if (should_add_remote_mappings) { + // Now that we have a GPU VA space, map any VA ranges for which + // this GPU is a UVM-Lite GPU or has accessed_by set. + status = uvm_va_block_set_accessed_by(va_block, va_block_context, gpu->id); + if (status != NV_OK) + break; + } + + if (should_disable_read_duplication) { + status = uvm_va_block_unset_read_duplication(va_block, va_block_context); + if (status != NV_OK) + break; + } + } + } + + return status; +} + +static NV_STATUS va_range_add_gpu_va_space_semaphore_pool(uvm_va_range_t *va_range, uvm_gpu_t *gpu) +{ + uvm_mem_gpu_mapping_attrs_t *attrs; + + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL); + UVM_ASSERT(uvm_mem_mapped_on_gpu_kernel(va_range->semaphore_pool.mem, gpu)); + + attrs = &va_range->semaphore_pool.gpu_attrs[uvm_id_gpu_index(gpu->id)]; + + return uvm_mem_map_gpu_user(va_range->semaphore_pool.mem, + gpu, + va_range->va_space, + (void *)va_range->node.start, + attrs); +} + +NV_STATUS uvm_va_range_add_gpu_va_space(uvm_va_range_t *va_range, + uvm_gpu_va_space_t *gpu_va_space, + struct mm_struct *mm) +{ + UVM_ASSERT(va_range->type < UVM_VA_RANGE_TYPE_MAX); + + if (va_range->inject_add_gpu_va_space_error) { + va_range->inject_add_gpu_va_space_error = false; + return NV_ERR_NO_MEMORY; + } + + switch (va_range->type) { + case UVM_VA_RANGE_TYPE_MANAGED: + return va_range_add_gpu_va_space_managed(va_range, gpu_va_space, mm); + case UVM_VA_RANGE_TYPE_SEMAPHORE_POOL: + return va_range_add_gpu_va_space_semaphore_pool(va_range, gpu_va_space->gpu); + default: + return NV_OK; + } +} + +static void va_range_remove_gpu_va_space_managed(uvm_va_range_t *va_range, + uvm_gpu_va_space_t *gpu_va_space, + struct mm_struct *mm) +{ + uvm_va_block_t *va_block; + uvm_va_space_t *va_space = va_range->va_space; + bool should_enable_read_duplicate; + uvm_va_block_context_t *va_block_context = uvm_va_space_block_context(va_space, mm); + + va_block_context->policy = uvm_va_range_get_policy(va_range); + should_enable_read_duplicate = + uvm_va_range_get_policy(va_range)->read_duplication == UVM_READ_DUPLICATION_ENABLED && + uvm_va_space_can_read_duplicate(va_space, NULL) != uvm_va_space_can_read_duplicate(va_space, gpu_va_space->gpu); + + for_each_va_block_in_va_range(va_range, va_block) { + uvm_mutex_lock(&va_block->lock); + uvm_va_block_remove_gpu_va_space(va_block, gpu_va_space, mm); + uvm_mutex_unlock(&va_block->lock); + + if (should_enable_read_duplicate) + uvm_va_block_set_read_duplication(va_block, va_block_context); + } +} + +static void va_range_remove_gpu_va_space_external(uvm_va_range_t *va_range, + uvm_gpu_t *gpu, + struct list_head *deferred_free_list) +{ + uvm_ext_gpu_range_tree_t *range_tree; + uvm_ext_gpu_map_t *ext_map, *ext_map_next; + + UVM_ASSERT(deferred_free_list); + + range_tree = uvm_ext_gpu_range_tree(va_range, gpu); + uvm_mutex_lock(&range_tree->lock); + + uvm_ext_gpu_map_for_each_safe(ext_map, ext_map_next, va_range, gpu) + uvm_ext_gpu_map_destroy(va_range, ext_map, deferred_free_list); + + uvm_mutex_unlock(&range_tree->lock); +} + +static void va_range_remove_gpu_va_space_semaphore_pool(uvm_va_range_t *va_range, uvm_gpu_t *gpu) +{ + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL); + + + + + + + + uvm_mem_unmap_gpu_user(va_range->semaphore_pool.mem, gpu); + +} + +void uvm_va_range_remove_gpu_va_space(uvm_va_range_t *va_range, + uvm_gpu_va_space_t *gpu_va_space, + struct mm_struct *mm, + struct list_head *deferred_free_list) +{ + switch (va_range->type) { + case UVM_VA_RANGE_TYPE_MANAGED: + va_range_remove_gpu_va_space_managed(va_range, gpu_va_space, mm); + break; + case UVM_VA_RANGE_TYPE_EXTERNAL: + va_range_remove_gpu_va_space_external(va_range, gpu_va_space->gpu, deferred_free_list); + break; + case UVM_VA_RANGE_TYPE_CHANNEL: + // All channels under this GPU VA space should've been removed before + // removing the GPU VA space. + UVM_ASSERT(va_range->channel.gpu_va_space != gpu_va_space); + break; + case UVM_VA_RANGE_TYPE_SKED_REFLECTED: + if (va_range->sked_reflected.gpu_va_space == gpu_va_space) + uvm_va_range_destroy_sked_reflected(va_range); + break; + case UVM_VA_RANGE_TYPE_SEMAPHORE_POOL: + va_range_remove_gpu_va_space_semaphore_pool(va_range, gpu_va_space->gpu); + break; + default: + UVM_ASSERT_MSG(0, "[0x%llx, 0x%llx] has type %d\n", + va_range->node.start, va_range->node.end, va_range->type); + } +} + +static NV_STATUS uvm_va_range_enable_peer_managed(uvm_va_range_t *va_range, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + NV_STATUS status; + uvm_va_block_t *va_block; + bool gpu0_accessed_by = uvm_processor_mask_test(&uvm_va_range_get_policy(va_range)->accessed_by, gpu0->id); + bool gpu1_accessed_by = uvm_processor_mask_test(&uvm_va_range_get_policy(va_range)->accessed_by, gpu1->id); + uvm_va_space_t *va_space = va_range->va_space; + uvm_va_block_context_t *va_block_context = uvm_va_space_block_context(va_space, NULL); + + va_block_context->policy = uvm_va_range_get_policy(va_range); + + for_each_va_block_in_va_range(va_range, va_block) { + // TODO: Bug 1767224: Refactor the uvm_va_block_set_accessed_by logic + // into uvm_va_block_enable_peer. + uvm_mutex_lock(&va_block->lock); + status = uvm_va_block_enable_peer(va_block, gpu0, gpu1); + uvm_mutex_unlock(&va_block->lock); + + if (status != NV_OK) + return status; + + // For UVM-Lite at most one GPU needs to map the peer GPU if it's the + // preferred location, but it doesn't hurt to just try mapping both. + if (gpu0_accessed_by) { + status = uvm_va_block_set_accessed_by(va_block, + va_block_context, + gpu0->id); + if (status != NV_OK) + return status; + } + + if (gpu1_accessed_by) { + status = uvm_va_block_set_accessed_by(va_block, + va_block_context, + gpu1->id); + if (status != NV_OK) + return status; + } + } + + return NV_OK; +} + +NV_STATUS uvm_va_range_enable_peer(uvm_va_range_t *va_range, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + switch (va_range->type) { + case UVM_VA_RANGE_TYPE_MANAGED: + return uvm_va_range_enable_peer_managed(va_range, gpu0, gpu1); + case UVM_VA_RANGE_TYPE_EXTERNAL: + // UVM_VA_RANGE_TYPE_EXTERNAL doesn't create new mappings when enabling peer access + return NV_OK; + case UVM_VA_RANGE_TYPE_CHANNEL: + // UVM_VA_RANGE_TYPE_CHANNEL should never have peer mappings + return NV_OK; + case UVM_VA_RANGE_TYPE_SKED_REFLECTED: + // UVM_VA_RANGE_TYPE_SKED_REFLECTED should never have peer mappings + return NV_OK; + case UVM_VA_RANGE_TYPE_SEMAPHORE_POOL: + // UVM_VA_RANGE_TYPE_SEMAPHORE_POOL should never have peer mappings + return NV_OK; + default: + UVM_ASSERT_MSG(0, "[0x%llx, 0x%llx] has type %d\n", + va_range->node.start, va_range->node.end, va_range->type); + return NV_ERR_NOT_SUPPORTED; + } +} + +static void uvm_va_range_disable_peer_external(uvm_va_range_t *va_range, + uvm_gpu_t *mapping_gpu, + uvm_gpu_t *owning_gpu, + struct list_head *deferred_free_list) +{ + uvm_ext_gpu_range_tree_t *range_tree; + uvm_ext_gpu_map_t *ext_map, *ext_map_next; + + range_tree = uvm_ext_gpu_range_tree(va_range, mapping_gpu); + uvm_mutex_lock(&range_tree->lock); + uvm_ext_gpu_map_for_each_safe(ext_map, ext_map_next, va_range, mapping_gpu) { + if (ext_map->owning_gpu == owning_gpu && !ext_map->is_sysmem) { + UVM_ASSERT(deferred_free_list); + uvm_ext_gpu_map_destroy(va_range, ext_map, deferred_free_list); + } + } + uvm_mutex_unlock(&range_tree->lock); +} + +static void uvm_va_range_disable_peer_managed(uvm_va_range_t *va_range, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + uvm_va_block_t *va_block; + uvm_gpu_t *uvm_lite_gpu_to_unmap = NULL; + + bool uvm_lite_mode = uvm_processor_mask_test(&va_range->uvm_lite_gpus, gpu0->id) && + uvm_processor_mask_test(&va_range->uvm_lite_gpus, gpu1->id); + + if (uvm_lite_mode) { + // In UVM-Lite mode, the UVM-Lite GPUs can only have mappings to the the + // preferred location. If peer mappings are being disabled to the + // preferred location, then unmap the other GPU. + // Nothing to do otherwise. + if (uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, gpu0->id)) + uvm_lite_gpu_to_unmap = gpu1; + else if (uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, gpu1->id)) + uvm_lite_gpu_to_unmap = gpu0; + else + return; + } + + for_each_va_block_in_va_range(va_range, va_block) { + uvm_mutex_lock(&va_block->lock); + if (uvm_lite_mode) + uvm_va_block_unmap_preferred_location_uvm_lite(va_block, uvm_lite_gpu_to_unmap); + else + uvm_va_block_disable_peer(va_block, gpu0, gpu1); + uvm_mutex_unlock(&va_block->lock); + } + + if (uvm_lite_mode && !uvm_range_group_all_migratable(va_range->va_space, va_range->node.start, va_range->node.end)) { + UVM_ASSERT(uvm_lite_gpu_to_unmap); + + // Migration is prevented, but we had to unmap a UVM-Lite GPU. Update + // the accessed by and UVM-Lite GPUs masks as it cannot be considered a + // UVM-Lite GPU any more. + uvm_va_range_unset_accessed_by(va_range, uvm_lite_gpu_to_unmap->id, NULL); + } +} + +void uvm_va_range_disable_peer(uvm_va_range_t *va_range, + uvm_gpu_t *gpu0, + uvm_gpu_t *gpu1, + struct list_head *deferred_free_list) +{ + + switch (va_range->type) { + case UVM_VA_RANGE_TYPE_MANAGED: + uvm_va_range_disable_peer_managed(va_range, gpu0, gpu1); + break; + case UVM_VA_RANGE_TYPE_EXTERNAL: + // If GPU 0 has a mapping to GPU 1, remove GPU 0's mapping + uvm_va_range_disable_peer_external(va_range, gpu0, gpu1, deferred_free_list); + // If GPU 1 has a mapping to GPU 0, remove GPU 1's mapping + uvm_va_range_disable_peer_external(va_range, gpu1, gpu0, deferred_free_list); + break; + case UVM_VA_RANGE_TYPE_CHANNEL: + // UVM_VA_RANGE_TYPE_CHANNEL should never have peer mappings + break; + case UVM_VA_RANGE_TYPE_SKED_REFLECTED: + // UVM_VA_RANGE_TYPE_SKED_REFLECTED should never have peer mappings + break; + case UVM_VA_RANGE_TYPE_SEMAPHORE_POOL: + // UVM_VA_RANGE_TYPE_SEMAPHORE_POOL should never have peer mappings + break; + default: + UVM_ASSERT_MSG(0, "[0x%llx, 0x%llx] has type %d\n", + va_range->node.start, va_range->node.end, va_range->type); + } +} + +static NV_STATUS va_range_register_gpu_semaphore_pool(uvm_va_range_t *va_range, uvm_gpu_t *gpu) +{ + // TODO: Bug 1812419: pass GPU mapping attributes to uvm_mem_map_gpu_kernel + // once that function accepts them. + return uvm_mem_map_gpu_kernel(va_range->semaphore_pool.mem, gpu); +} + +NV_STATUS uvm_va_range_register_gpu(uvm_va_range_t *va_range, uvm_gpu_t *gpu) +{ + UVM_ASSERT(va_range->type < UVM_VA_RANGE_TYPE_MAX); + uvm_assert_rwsem_locked_write(&va_range->va_space->lock); + + if (va_range->type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL) + return va_range_register_gpu_semaphore_pool(va_range, gpu); + + return NV_OK; +} + +static void va_range_unregister_gpu_managed(uvm_va_range_t *va_range, uvm_gpu_t *gpu, struct mm_struct *mm) +{ + uvm_va_block_t *va_block; + + // Reset preferred location and accessed-by of VA ranges if needed + // Note: ignoring the return code of uvm_va_range_set_preferred_location since this + // will only return on error when setting a preferred location, not on a reset + if (uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, gpu->id)) + (void)uvm_va_range_set_preferred_location(va_range, UVM_ID_INVALID, mm, NULL); + + uvm_va_range_unset_accessed_by(va_range, gpu->id, NULL); + + // Migrate and free any remaining resident allocations on this GPU + for_each_va_block_in_va_range(va_range, va_block) + uvm_va_block_unregister_gpu(va_block, gpu, mm); +} + +// The GPU being unregistered can't have any remaining mappings, since those +// were removed when the corresponding GPU VA space was removed. However, other +// GPUs could still have mappings to memory resident on this GPU, so we have to +// unmap those. +static void va_range_unregister_gpu_external(uvm_va_range_t *va_range, + uvm_gpu_t *gpu, + struct list_head *deferred_free_list) +{ + uvm_ext_gpu_map_t *ext_map, *ext_map_next; + uvm_gpu_t *other_gpu; + + for_each_va_space_gpu_in_mask(other_gpu, va_range->va_space, &va_range->external.mapped_gpus) { + uvm_ext_gpu_range_tree_t *range_tree = uvm_ext_gpu_range_tree(va_range, other_gpu); + UVM_ASSERT(other_gpu != gpu); + + uvm_mutex_lock(&range_tree->lock); + uvm_ext_gpu_map_for_each_safe(ext_map, ext_map_next, va_range, other_gpu) { + if (ext_map->owning_gpu == gpu) { + UVM_ASSERT(deferred_free_list); + uvm_ext_gpu_map_destroy(va_range, ext_map, deferred_free_list); + } + } + uvm_mutex_unlock(&range_tree->lock); + } +} + +static void va_range_unregister_gpu_semaphore_pool(uvm_va_range_t *va_range, uvm_gpu_t *gpu) +{ + NV_STATUS status; + + // Ranges for this GPU should have been previously unmapped from the user VA + // space during GPU VA space unregister, which should have already happened. + UVM_ASSERT(!uvm_mem_mapped_on_gpu_user(va_range->semaphore_pool.mem, gpu)); + UVM_ASSERT(uvm_mem_mapped_on_gpu_kernel(va_range->semaphore_pool.mem, gpu)); + + uvm_mutex_lock(&va_range->semaphore_pool.tracker_lock); + status = uvm_tracker_wait(&va_range->semaphore_pool.tracker); + uvm_mutex_unlock(&va_range->semaphore_pool.tracker_lock); + if (status != NV_OK) + UVM_ASSERT(status == uvm_global_get_status()); + + uvm_mem_unmap_gpu_phys(va_range->semaphore_pool.mem, gpu); + + va_range->semaphore_pool.gpu_attrs[uvm_id_gpu_index(gpu->id)] = va_range->semaphore_pool.default_gpu_attrs; + if (va_range->semaphore_pool.owner == gpu) + va_range->semaphore_pool.owner = NULL; +} + +void uvm_va_range_unregister_gpu(uvm_va_range_t *va_range, + uvm_gpu_t *gpu, + struct mm_struct *mm, + struct list_head *deferred_free_list) +{ + switch (va_range->type) { + case UVM_VA_RANGE_TYPE_MANAGED: + va_range_unregister_gpu_managed(va_range, gpu, mm); + break; + case UVM_VA_RANGE_TYPE_EXTERNAL: + va_range_unregister_gpu_external(va_range, gpu, deferred_free_list); + break; + case UVM_VA_RANGE_TYPE_CHANNEL: + // All ranges should have been destroyed by GPU VA space unregister, + // which should have already happened. + UVM_ASSERT(va_range->channel.gpu_va_space->gpu != gpu); + break; + case UVM_VA_RANGE_TYPE_SKED_REFLECTED: + // All ranges for this GPU should have been unmapped by GPU VA space + // unregister (uvm_va_range_destroy_sked_reflected), which should + // have already happened. + if (va_range->sked_reflected.gpu_va_space != NULL) + UVM_ASSERT(va_range->sked_reflected.gpu_va_space->gpu != gpu); + break; + case UVM_VA_RANGE_TYPE_SEMAPHORE_POOL: + va_range_unregister_gpu_semaphore_pool(va_range, gpu); + break; + default: + UVM_ASSERT_MSG(0, "[0x%llx, 0x%llx] has type %d\n", + va_range->node.start, va_range->node.end, va_range->type); + } +} + +// Split existing's blocks into new. new's blocks array has already been +// allocated. This is called before existing's range node is split, so it +// overlaps new. new is always in the upper region of existing. +// +// The caller will do the range tree split. +// +// If this fails it leaves existing unchanged. +static NV_STATUS uvm_va_range_split_blocks(uvm_va_range_t *existing, uvm_va_range_t *new) +{ + uvm_va_block_t *old_block, *block = NULL; + size_t existing_blocks, split_index, new_index = 0; + NV_STATUS status; + + UVM_ASSERT(new->node.start > existing->node.start); + UVM_ASSERT(new->node.end <= existing->node.end); + + split_index = uvm_va_range_block_index(existing, new->node.start); + + // Handle a block spanning the split point + if (block_calc_start(existing, split_index) != new->node.start) { + // If a populated block actually spans the split point, we have to split + // the block. Otherwise just account for the extra entry in the arrays. + old_block = uvm_va_range_block(existing, split_index); + if (old_block) { + UVM_ASSERT(old_block->start < new->node.start); + status = uvm_va_block_split(old_block, new->node.start - 1, &block, new); + if (status != NV_OK) + return status; + + // No memory barrier is needed since we're holding the va_space lock in + // write mode, so no other thread can access the blocks array. + atomic_long_set(&new->blocks[0], (long)block); + } + + new_index = 1; + } + + // uvm_va_block_split gets first crack at injecting an error. If it did so, + // we wouldn't be here. However, not all splits will call uvm_va_block_split + // so we need an extra check here. We can't push this injection later since + // all paths past this point assume success, so they modify existing's + // state. + if (existing->inject_split_error) { + UVM_ASSERT(!block); + existing->inject_split_error = false; + return NV_ERR_NO_MEMORY; + } + + existing_blocks = split_index + new_index; + + // Copy existing's blocks over to new, accounting for the explicit + // assignment above in case we did a block split. There are two general + // cases: + // + // No split: + // split_index + // v + // existing (before) [----- A ----][----- B ----][----- C ----] + // existing (after) [----- A ----] + // new [----- B ----][----- C ----] + // + // Split: + // split_index + // v + // existing (before) [----- A ----][----- B ----][----- C ----] + // existing (after [----- A ----][- B -] + // new [- N -][----- C ----] + // ^new->blocks[0] + + // Note, if we split the last block of existing, this won't iterate at all. + for (; new_index < uvm_va_range_num_blocks(new); new_index++) { + block = uvm_va_range_block(existing, split_index + new_index); + if (!block) { + // new's array was cleared at allocation + UVM_ASSERT(uvm_va_range_block(new, new_index) == NULL); + continue; + } + + // As soon as we make this assignment and drop the lock, the reverse + // mapping code can start looking at new, so new must be ready to go. + uvm_mutex_lock(&block->lock); + UVM_ASSERT(block->va_range == existing); + block->va_range = new; + uvm_mutex_unlock(&block->lock); + + // No memory barrier is needed since we're holding the va_space lock in + // write mode, so no other thread can access the blocks array. + atomic_long_set(&new->blocks[new_index], (long)block); + atomic_long_set(&existing->blocks[split_index + new_index], (long)NULL); + } + + blocks_array_shrink(existing, existing_blocks); + + return NV_OK; +} + +NV_STATUS uvm_va_range_split(uvm_va_range_t *existing_va_range, + NvU64 new_end, + uvm_va_range_t **new_va_range) +{ + uvm_va_space_t *va_space = existing_va_range->va_space; + uvm_va_range_t *new = NULL; + uvm_perf_event_data_t event_data; + NV_STATUS status; + + UVM_ASSERT(existing_va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + UVM_ASSERT(new_end > existing_va_range->node.start); + UVM_ASSERT(new_end < existing_va_range->node.end); + UVM_ASSERT(PAGE_ALIGNED(new_end + 1)); + uvm_assert_rwsem_locked_write(&va_space->lock); + + new = uvm_va_range_alloc_managed(va_space, new_end + 1, existing_va_range->node.end); + if (!new) { + status = NV_ERR_NO_MEMORY; + goto error; + } + + // The new va_range is under the same vma. If this is a uvm_vm_open, the + // caller takes care of updating existing's vma_wrapper for us. + new->managed.vma_wrapper = existing_va_range->managed.vma_wrapper; + + // Copy over state before splitting blocks so any block lookups happening + // concurrently on the eviction path will see the new range's data. + uvm_va_range_get_policy(new)->read_duplication = uvm_va_range_get_policy(existing_va_range)->read_duplication; + uvm_va_range_get_policy(new)->preferred_location = uvm_va_range_get_policy(existing_va_range)->preferred_location; + uvm_processor_mask_copy(&uvm_va_range_get_policy(new)->accessed_by, + &uvm_va_range_get_policy(existing_va_range)->accessed_by); + uvm_processor_mask_copy(&new->uvm_lite_gpus, &existing_va_range->uvm_lite_gpus); + + status = uvm_va_range_split_blocks(existing_va_range, new); + if (status != NV_OK) + goto error; + + // Finally, update the VA range tree + uvm_range_tree_split(&va_space->va_range_tree, &existing_va_range->node, &new->node); + + if (new->type == UVM_VA_RANGE_TYPE_MANAGED) { + event_data.range_shrink.range = new; + uvm_perf_event_notify(&va_space->perf_events, UVM_PERF_EVENT_RANGE_SHRINK, &event_data); + } + + if (new_va_range) + *new_va_range = new; + return NV_OK; + +error: + uvm_va_range_destroy(new, NULL); + return status; + +} + +static inline uvm_va_range_t *uvm_va_range_container(uvm_range_tree_node_t *node) +{ + if (!node) + return NULL; + return container_of(node, uvm_va_range_t, node); +} + +uvm_va_range_t *uvm_va_range_find(uvm_va_space_t *va_space, NvU64 addr) +{ + uvm_assert_rwsem_locked(&va_space->lock); + return uvm_va_range_container(uvm_range_tree_find(&va_space->va_range_tree, addr)); +} + +uvm_va_range_t *uvm_va_space_iter_first(uvm_va_space_t *va_space, NvU64 start, NvU64 end) +{ + uvm_range_tree_node_t *node = uvm_range_tree_iter_first(&va_space->va_range_tree, start, end); + return uvm_va_range_container(node); +} + +uvm_va_range_t *uvm_va_space_iter_next(uvm_va_range_t *va_range, NvU64 end) +{ + uvm_range_tree_node_t *node; + + // Handling a NULL va_range here makes uvm_for_each_va_range_in_safe much + // less messy + if (!va_range) + return NULL; + + node = uvm_range_tree_iter_next(&va_range->va_space->va_range_tree, &va_range->node, end); + return uvm_va_range_container(node); +} + +size_t uvm_va_range_num_blocks(uvm_va_range_t *va_range) +{ + NvU64 start = UVM_VA_BLOCK_ALIGN_DOWN(va_range->node.start); + NvU64 end = UVM_VA_BLOCK_ALIGN_UP(va_range->node.end); // End is inclusive + return (end - start) / UVM_VA_BLOCK_SIZE; +} + +size_t uvm_va_range_block_index(uvm_va_range_t *va_range, NvU64 addr) +{ + size_t addr_index, start_index, index; + + UVM_ASSERT(addr >= va_range->node.start); + UVM_ASSERT(addr <= va_range->node.end); + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + + // Each block will cover as much space as possible within the aligned + // UVM_VA_BLOCK_SIZE, up to the parent VA range boundaries. In other words, + // the entire VA space can be broken into UVM_VA_BLOCK_SIZE chunks. Even if + // there are multiple ranges (and thus multiple blocks) per actual + // UVM_VA_BLOCK_SIZE chunk, none of those will have more than 1 block unless + // they span a UVM_VA_BLOCK_SIZE alignment boundary. + addr_index = (size_t)(addr / UVM_VA_BLOCK_SIZE); + start_index = (size_t)(va_range->node.start / UVM_VA_BLOCK_SIZE); + + index = addr_index - start_index; + UVM_ASSERT(index < uvm_va_range_num_blocks(va_range)); + return index; +} + +NV_STATUS uvm_va_range_block_create(uvm_va_range_t *va_range, size_t index, uvm_va_block_t **out_block) +{ + uvm_va_block_t *block, *old; + NV_STATUS status; + + block = uvm_va_range_block(va_range, index); + if (!block) { + // No block has been created here yet, so allocate one and attempt to + // insert it. Note that this runs the risk of an out-of-memory error + // when multiple threads race and all concurrently allocate a block for + // the same address. This should be extremely rare. There is also + // precedent in the Linux kernel, which does the same thing for demand- + // allocation of anonymous pages. + status = uvm_va_block_create(va_range, + block_calc_start(va_range, index), + block_calc_end(va_range, index), + &block); + if (status != NV_OK) + return status; + + // Try to insert it + old = (uvm_va_block_t *)nv_atomic_long_cmpxchg(&va_range->blocks[index], + (long)NULL, + (long)block); + if (old) { + // Someone else beat us on the insert + uvm_va_block_release(block); + block = old; + } + } + + *out_block = block; + return NV_OK; +} + +uvm_va_block_t *uvm_va_range_block_next(uvm_va_range_t *va_range, uvm_va_block_t *va_block) +{ + uvm_va_space_t *va_space = va_range->va_space; + size_t i = 0; + + uvm_assert_rwsem_locked(&va_space->lock); + + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + + if (va_block) + i = uvm_va_range_block_index(va_range, va_block->start) + 1; + + for (; i < uvm_va_range_num_blocks(va_range); i++) { + va_block = uvm_va_range_block(va_range, i); + if (va_block) { + UVM_ASSERT(va_block->va_range == va_range); + UVM_ASSERT(uvm_va_range_block_index(va_range, va_block->start) == i); + return va_block; + } + } + + return NULL; +} + +static NV_STATUS range_unmap_mask(uvm_va_range_t *va_range, + const uvm_processor_mask_t *mask, + uvm_tracker_t *out_tracker) +{ + uvm_va_space_t *va_space = va_range->va_space; + uvm_va_block_context_t *block_context = uvm_va_space_block_context(va_space, NULL); + uvm_va_block_t *block; + + UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_MANAGED, "type 0x%x\n", va_range->type); + + if (uvm_processor_mask_empty(mask)) + return NV_OK; + + block_context->policy = uvm_va_range_get_policy(va_range); + + for_each_va_block_in_va_range(va_range, block) { + NV_STATUS status; + uvm_va_block_region_t region = uvm_va_block_region_from_block(block); + + uvm_mutex_lock(&block->lock); + status = uvm_va_block_unmap_mask(block, block_context, mask, region, NULL); + if (out_tracker) + uvm_tracker_add_tracker_safe(out_tracker, &block->tracker); + + uvm_mutex_unlock(&block->lock); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +static NV_STATUS range_unmap(uvm_va_range_t *va_range, uvm_processor_id_t processor, uvm_tracker_t *out_tracker) +{ + uvm_processor_mask_t mask; + + UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_MANAGED, "type 0x%x\n", va_range->type); + + uvm_processor_mask_zero(&mask); + uvm_processor_mask_set(&mask, processor); + + return range_unmap_mask(va_range, &mask, out_tracker); +} + +static NV_STATUS range_map_uvm_lite_gpus(uvm_va_range_t *va_range, uvm_tracker_t *out_tracker) +{ + NV_STATUS status = NV_OK; + uvm_va_block_t *va_block; + uvm_va_block_context_t *va_block_context = uvm_va_space_block_context(va_range->va_space, NULL); + + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + + if (uvm_processor_mask_empty(&va_range->uvm_lite_gpus)) + return NV_OK; + + va_block_context->policy = uvm_va_range_get_policy(va_range); + + for_each_va_block_in_va_range(va_range, va_block) { + // UVM-Lite GPUs always map with RWA + uvm_mutex_lock(&va_block->lock); + status = UVM_VA_BLOCK_RETRY_LOCKED(va_block, NULL, + uvm_va_block_map_mask(va_block, + va_block_context, + &va_range->uvm_lite_gpus, + uvm_va_block_region_from_block(va_block), + NULL, + UVM_PROT_READ_WRITE_ATOMIC, + UvmEventMapRemoteCauseCoherence)); + if (status == NV_OK && out_tracker) + status = uvm_tracker_add_tracker(out_tracker, &va_block->tracker); + + uvm_mutex_unlock(&va_block->lock); + if (status != NV_OK) + break; + } + + return status; +} + +// Calculate the mask of GPUs that should follow the UVM-Lite behaviour +static void calc_uvm_lite_gpus_mask(uvm_va_space_t *va_space, + uvm_processor_id_t preferred_location, + const uvm_processor_mask_t *accessed_by_mask, + uvm_processor_mask_t *uvm_lite_gpus) +{ + uvm_gpu_id_t gpu_id; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + // Zero out the mask first + uvm_processor_mask_zero(uvm_lite_gpus); + + // If no preferred location is set then there are no GPUs following the UVM-Lite behavior + if (UVM_ID_IS_INVALID(preferred_location)) + return; + + // If the preferred location is a faultable GPU, then no GPUs should follow + // the UVM-Lite behaviour. + if (UVM_ID_IS_GPU(preferred_location) && + uvm_processor_mask_test(&va_space->faultable_processors, preferred_location)) { + return; + } + + // Otherwise add all non-faultable GPUs to the UVM-Lite mask that have + // accessed by set. + for_each_gpu_id_in_mask(gpu_id, accessed_by_mask) { + if (!uvm_processor_mask_test(&va_space->faultable_processors, gpu_id)) + uvm_processor_mask_set(uvm_lite_gpus, gpu_id); + } + + // And the preferred location if it's a GPU + if (UVM_ID_IS_GPU(preferred_location)) + uvm_processor_mask_set(uvm_lite_gpus, preferred_location); +} + +// Update the mask of GPUs that follow the UVM-Lite behaviour +static void range_update_uvm_lite_gpus_mask(uvm_va_range_t *va_range) +{ + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + calc_uvm_lite_gpus_mask(va_range->va_space, + uvm_va_range_get_policy(va_range)->preferred_location, + &uvm_va_range_get_policy(va_range)->accessed_by, + &va_range->uvm_lite_gpus); +} + +NV_STATUS uvm_va_range_set_preferred_location(uvm_va_range_t *va_range, + uvm_processor_id_t preferred_location, + struct mm_struct *mm, + uvm_tracker_t *out_tracker) +{ + NV_STATUS status; + uvm_processor_mask_t all_uvm_lite_gpus; + uvm_processor_mask_t new_uvm_lite_gpus; + uvm_processor_mask_t set_accessed_by_processors; + uvm_range_group_range_iter_t iter; + uvm_range_group_range_t *rgr = NULL; + uvm_va_space_t *va_space = va_range->va_space; + uvm_va_block_t *va_block; + uvm_va_block_context_t *va_block_context; + + uvm_assert_rwsem_locked_write(&va_space->lock); + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + + if (uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, preferred_location)) + return NV_OK; + + // Mark all range group ranges within this VA range as migrated since the preferred location has changed. + uvm_range_group_for_each_range_in(rgr, va_space, va_range->node.start, va_range->node.end) { + uvm_spin_lock(&rgr->range_group->migrated_ranges_lock); + if (list_empty(&rgr->range_group_migrated_list_node)) + list_move_tail(&rgr->range_group_migrated_list_node, &rgr->range_group->migrated_ranges); + uvm_spin_unlock(&rgr->range_group->migrated_ranges_lock); + } + + // Calculate the new UVM-Lite GPUs mask, but don't update va_range state so + // that we can keep block_page_check_mappings() happy while updating the + // mappings. + calc_uvm_lite_gpus_mask(va_space, + preferred_location, + &uvm_va_range_get_policy(va_range)->accessed_by, + &new_uvm_lite_gpus); + + // If the range contains non-migratable range groups, check that new UVM-Lite GPUs + // can all map the new preferred location. + if (!uvm_range_group_all_migratable(va_space, va_range->node.start, va_range->node.end) && + UVM_ID_IS_VALID(preferred_location) && + !uvm_processor_mask_subset(&new_uvm_lite_gpus, &va_space->accessible_from[uvm_id_value(preferred_location)])) { + return NV_ERR_INVALID_DEVICE; + } + + if (UVM_ID_IS_INVALID(preferred_location)) { + uvm_range_group_for_each_migratability_in_safe(&iter, + va_space, + va_range->node.start, + va_range->node.end) { + if (!iter.migratable) { + // Clear the range group assocation for any unmigratable ranges if there is no preferred location + status = uvm_range_group_assign_range(va_space, NULL, iter.start, iter.end); + if (status != NV_OK) + return status; + } + } + } + + // Unmap all old and new UVM-Lite GPUs + // - GPUs that stop being UVM-Lite need to be unmapped so that they don't + // have stale mappings to the old preferred location. + // - GPUs that will continue to be UVM-Lite GPUs or are new UVM-Lite GPUs + // need to be unmapped so that the new preferred location can be mapped. + uvm_processor_mask_or(&all_uvm_lite_gpus, &va_range->uvm_lite_gpus, &new_uvm_lite_gpus); + status = range_unmap_mask(va_range, &all_uvm_lite_gpus, out_tracker); + if (status != NV_OK) + return status; + + // GPUs that stop being UVM-Lite, but are in the accessed_by mask need to + // have any possible mappings established. + uvm_processor_mask_andnot(&set_accessed_by_processors, &va_range->uvm_lite_gpus, &new_uvm_lite_gpus); + + // A GPU which had been in UVM-Lite mode before must still be in UVM-Lite + // mode if it is the new preferred location. Otherwise we'd have to be more + // careful below to not establish remote mappings to the new preferred + // location. + if (UVM_ID_IS_GPU(preferred_location)) + UVM_ASSERT(!uvm_processor_mask_test(&set_accessed_by_processors, preferred_location)); + + // The old preferred location should establish new remote mappings if it has + // accessed-by set. + if (UVM_ID_IS_VALID(uvm_va_range_get_policy(va_range)->preferred_location)) + uvm_processor_mask_set(&set_accessed_by_processors, uvm_va_range_get_policy(va_range)->preferred_location); + + uvm_processor_mask_and(&set_accessed_by_processors, + &set_accessed_by_processors, + &uvm_va_range_get_policy(va_range)->accessed_by); + + // Now update the va_range state + uvm_va_range_get_policy(va_range)->preferred_location = preferred_location; + uvm_processor_mask_copy(&va_range->uvm_lite_gpus, &new_uvm_lite_gpus); + + va_block_context = uvm_va_space_block_context(va_space, mm); + va_block_context->policy = uvm_va_range_get_policy(va_range); + + for_each_va_block_in_va_range(va_range, va_block) { + uvm_processor_id_t id; + + for_each_id_in_mask(id, &set_accessed_by_processors) { + status = uvm_va_block_set_accessed_by(va_block, va_block_context, id); + if (status != NV_OK) + return status; + } + + // Also, mark CPU pages as dirty and remove remote mappings from the new + // preferred location + uvm_mutex_lock(&va_block->lock); + status = UVM_VA_BLOCK_RETRY_LOCKED(va_block, + NULL, + uvm_va_block_set_preferred_location_locked(va_block, va_block_context)); + + if (out_tracker) + uvm_tracker_add_tracker_safe(out_tracker, &va_block->tracker); + + uvm_mutex_unlock(&va_block->lock); + + if (status != NV_OK) + return status; + + } + + // And lastly map all of the current UVM-Lite GPUs to the resident pages on + // the new preferred location. Anything that's not resident right now will + // get mapped on the next PreventMigration(). + return range_map_uvm_lite_gpus(va_range, out_tracker); +} + +NV_STATUS uvm_va_range_set_accessed_by(uvm_va_range_t *va_range, + uvm_processor_id_t processor_id, + struct mm_struct *mm, + uvm_tracker_t *out_tracker) +{ + NV_STATUS status; + uvm_va_block_t *va_block; + uvm_processor_mask_t new_uvm_lite_gpus; + uvm_va_space_t *va_space = va_range->va_space; + uvm_va_policy_t *policy = uvm_va_range_get_policy(va_range); + uvm_va_block_context_t *va_block_context; + + // If the range belongs to a non-migratable range group and that processor_id is a non-faultable GPU, + // check it can map the preferred location + if (!uvm_range_group_all_migratable(va_space, va_range->node.start, va_range->node.end) && + UVM_ID_IS_GPU(processor_id) && + !uvm_processor_mask_test(&va_space->faultable_processors, processor_id) && + !uvm_processor_mask_test(&va_space->accessible_from[uvm_id_value(policy->preferred_location)], processor_id)) + return NV_ERR_INVALID_DEVICE; + + uvm_processor_mask_set(&policy->accessed_by, processor_id); + + // If a GPU is already a UVM-Lite GPU then there is nothing else to do. + if (uvm_processor_mask_test(&va_range->uvm_lite_gpus, processor_id)) + return NV_OK; + + // Calculate the new UVM-Lite GPUs mask, but don't update it in the va range + // yet so that we can keep block_page_check_mappings() happy while updating + // the mappings. + calc_uvm_lite_gpus_mask(va_space, policy->preferred_location, &policy->accessed_by, &new_uvm_lite_gpus); + + if (uvm_processor_mask_test(&new_uvm_lite_gpus, processor_id)) { + // GPUs that become UVM-Lite GPUs need to unmap everything so that they + // can map the preferred location. + status = range_unmap(va_range, processor_id, out_tracker); + if (status != NV_OK) + return status; + } + + uvm_processor_mask_copy(&va_range->uvm_lite_gpus, &new_uvm_lite_gpus); + va_block_context = uvm_va_space_block_context(va_space, mm); + va_block_context->policy = policy; + + for_each_va_block_in_va_range(va_range, va_block) { + status = uvm_va_block_set_accessed_by(va_block, va_block_context, processor_id); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +void uvm_va_range_unset_accessed_by(uvm_va_range_t *va_range, + uvm_processor_id_t processor_id, + uvm_tracker_t *out_tracker) +{ + uvm_range_group_range_t *rgr = NULL; + + // Mark all range group ranges within this VA range as migrated. We do this to force + // uvm_range_group_set_migration_policy to re-check the policy state since we're changing it here. + uvm_range_group_for_each_range_in(rgr, va_range->va_space, va_range->node.start, va_range->node.end) { + uvm_spin_lock(&rgr->range_group->migrated_ranges_lock); + if (list_empty(&rgr->range_group_migrated_list_node)) + list_move_tail(&rgr->range_group_migrated_list_node, &rgr->range_group->migrated_ranges); + uvm_spin_unlock(&rgr->range_group->migrated_ranges_lock); + } + + uvm_processor_mask_clear(&uvm_va_range_get_policy(va_range)->accessed_by, processor_id); + + // If a UVM-Lite GPU is being removed from the accessed_by mask, it will + // also stop being a UVM-Lite GPU unless it's also the preferred location. + if (uvm_processor_mask_test(&va_range->uvm_lite_gpus, processor_id) && + !uvm_id_equal(uvm_va_range_get_policy(va_range)->preferred_location, processor_id)) { + range_unmap(va_range, processor_id, out_tracker); + } + + range_update_uvm_lite_gpus_mask(va_range); +} + +NV_STATUS uvm_va_range_set_read_duplication(uvm_va_range_t *va_range, struct mm_struct *mm) +{ + uvm_va_block_t *va_block; + uvm_va_block_context_t *va_block_context; + + if (uvm_va_range_get_policy(va_range)->read_duplication == UVM_READ_DUPLICATION_ENABLED) + return NV_OK; + + va_block_context = uvm_va_space_block_context(va_range->va_space, mm); + va_block_context->policy = uvm_va_range_get_policy(va_range); + + for_each_va_block_in_va_range(va_range, va_block) { + NV_STATUS status = uvm_va_block_set_read_duplication(va_block, va_block_context); + + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +NV_STATUS uvm_va_range_unset_read_duplication(uvm_va_range_t *va_range, struct mm_struct *mm) +{ + uvm_va_block_t *va_block; + uvm_va_block_context_t *va_block_context; + NV_STATUS status; + + if (uvm_va_range_get_policy(va_range)->read_duplication == UVM_READ_DUPLICATION_DISABLED) + return NV_OK; + + va_block_context = uvm_va_space_block_context(va_range->va_space, mm); + va_block_context->policy = uvm_va_range_get_policy(va_range); + + for_each_va_block_in_va_range(va_range, va_block) { + status = uvm_va_block_unset_read_duplication(va_block, va_block_context); + + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +uvm_vma_wrapper_t *uvm_vma_wrapper_alloc(struct vm_area_struct *vma) +{ + uvm_vma_wrapper_t *vma_wrapper = nv_kmem_cache_zalloc(g_uvm_vma_wrapper_cache, NV_UVM_GFP_FLAGS); + if (!vma_wrapper) + return NULL; + + vma_wrapper->vma = vma; + uvm_init_rwsem(&vma_wrapper->lock, UVM_LOCK_ORDER_LEAF); + + return vma_wrapper; +} + +void uvm_vma_wrapper_destroy(uvm_vma_wrapper_t *vma_wrapper) +{ + if (!vma_wrapper) + return; + + uvm_assert_rwsem_unlocked(&vma_wrapper->lock); + + kmem_cache_free(g_uvm_vma_wrapper_cache, vma_wrapper); +} + +uvm_prot_t uvm_va_range_logical_prot(uvm_va_range_t *va_range) +{ + uvm_prot_t logical_prot; + struct vm_area_struct *vma; + + UVM_ASSERT(va_range); + UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_MANAGED, "type: %d\n", va_range->type); + + // Zombified VA ranges no longer have a vma, so they have no permissions + if (uvm_va_range_is_managed_zombie(va_range)) + return UVM_PROT_NONE; + + vma = uvm_va_range_vma(va_range); + + if (!(vma->vm_flags & VM_READ)) + logical_prot = UVM_PROT_NONE; + else if (!(vma->vm_flags & VM_WRITE)) + logical_prot = UVM_PROT_READ_ONLY; + else + logical_prot = UVM_PROT_READ_WRITE_ATOMIC; + + return logical_prot; +} + +static bool fault_check_range_permission(uvm_va_range_t *va_range, uvm_fault_access_type_t access_type) +{ + uvm_prot_t logical_prot = uvm_va_range_logical_prot(va_range); + uvm_prot_t fault_prot = uvm_fault_access_type_to_prot(access_type); + + return fault_prot <= logical_prot; +} + +NV_STATUS uvm_va_range_check_logical_permissions(uvm_va_range_t *va_range, + uvm_processor_id_t processor_id, + uvm_fault_type_t access_type, + bool allow_migration) +{ + // CPU permissions are checked later by block_map_cpu_page. + // + // TODO: Bug 1766124: permissions are checked by block_map_cpu_page because + // it can also be called from change_pte. Make change_pte call this + // function and only check CPU permissions here. + if (UVM_ID_IS_GPU(processor_id)) { + // Zombified VA ranges no longer have a vma, so they have no permissions + if (uvm_va_range_is_managed_zombie(va_range)) + return NV_ERR_INVALID_ADDRESS; + + // GPU faults only check vma permissions if uvm_enable_builtin_tests is + // set, because the Linux kernel can change vm_flags at any moment (for + // example on mprotect) and here we are not guaranteed to have + // vma->vm_mm->mmap_lock. During tests we ensure that this scenario + // does not happen + // + // TODO: Bug 1896799: On HMM/ATS we could look up the mm here and do + // this check safely. + if (uvm_enable_builtin_tests && !fault_check_range_permission(va_range, access_type)) + return NV_ERR_INVALID_ACCESS_TYPE; + } + + // Non-migratable range: + // - CPU accesses are always fatal, regardless of the VA range residency + // - GPU accesses are fatal if the GPU can't map the preferred location + if (!allow_migration) { + if (UVM_ID_IS_CPU(processor_id)) { + return NV_ERR_INVALID_OPERATION; + } + else { + uvm_processor_id_t preferred_location = uvm_va_range_get_policy(va_range)->preferred_location; + + return uvm_processor_mask_test(&va_range->va_space->accessible_from[uvm_id_value(preferred_location)], + processor_id) ? + NV_OK : NV_ERR_INVALID_ACCESS_TYPE; + } + } + + return NV_OK; +} + + + +static NvU64 sked_reflected_pte_maker(uvm_page_table_range_vec_t *range_vec, NvU64 offset, void *caller_data) +{ + (void)caller_data; + + return range_vec->tree->hal->make_sked_reflected_pte(); +} + +static NV_STATUS uvm_map_sked_reflected_range(uvm_va_space_t *va_space, UVM_MAP_DYNAMIC_PARALLELISM_REGION_PARAMS *params) +{ + NV_STATUS status; + uvm_va_range_t *va_range = NULL; + uvm_gpu_t *gpu; + uvm_gpu_va_space_t *gpu_va_space; + uvm_page_tree_t *page_tables; + struct mm_struct *mm; + + if (uvm_api_range_invalid_4k(params->base, params->length)) + return NV_ERR_INVALID_ADDRESS; + + // The mm needs to be locked in order to remove stale HMM va_blocks. + mm = uvm_va_space_mm_retain_lock(va_space); + uvm_va_space_down_write(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, ¶ms->gpuUuid); + if (!gpu) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + // Check if the GPU can access the VA + if (!uvm_gpu_can_address(gpu, params->base, params->length)) { + status = NV_ERR_OUT_OF_RANGE; + goto done; + } + + gpu_va_space = va_space->gpu_va_spaces[uvm_id_gpu_index(gpu->id)]; + page_tables = &gpu_va_space->page_tables; + + // The VA range must exactly cover one supported GPU page + if (!is_power_of_2(params->length) || + !IS_ALIGNED(params->base, params->length) || + !uvm_mmu_page_size_supported(page_tables, params->length)) { + status = NV_ERR_INVALID_ADDRESS; + goto done; + } + + status = uvm_va_range_create_sked_reflected(va_space, mm, params->base, params->length, &va_range); + if (status != NV_OK) { + UVM_DBG_PRINT_RL("Failed to create sked reflected VA range [0x%llx, 0x%llx)\n", + params->base, params->base + params->length); + goto done; + } + + va_range->sked_reflected.gpu_va_space = gpu_va_space; + + status = uvm_page_table_range_vec_init(page_tables, + va_range->node.start, + uvm_va_range_size(va_range), + params->length, + UVM_PMM_ALLOC_FLAGS_EVICT, + &va_range->sked_reflected.pt_range_vec); + if (status != NV_OK) + goto done; + + status = uvm_page_table_range_vec_write_ptes(&va_range->sked_reflected.pt_range_vec, + UVM_MEMBAR_NONE, sked_reflected_pte_maker, NULL); + + if (status != NV_OK) + goto done; + +done: + if (status != NV_OK && va_range != NULL) + uvm_va_range_destroy(va_range, NULL); + + uvm_va_space_up_write(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + + return status; +} + +NV_STATUS uvm_api_map_dynamic_parallelism_region(UVM_MAP_DYNAMIC_PARALLELISM_REGION_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + // Notably the ranges created by the UvmMapDynamicParallelismRegion() API + // are referred to as "SKED reflected ranges" internally as it's more + // descriptive. + return uvm_map_sked_reflected_range(va_space, params); +} + +NV_STATUS uvm_api_alloc_semaphore_pool(UVM_ALLOC_SEMAPHORE_POOL_PARAMS *params, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_range_t *va_range = NULL; + uvm_gpu_t *gpu; + struct mm_struct *mm; + + if (uvm_api_range_invalid(params->base, params->length)) + return NV_ERR_INVALID_ADDRESS; + if (params->gpuAttributesCount > UVM_MAX_GPUS) + return NV_ERR_INVALID_ARGUMENT; + + + + + + + // The mm needs to be locked in order to remove stale HMM va_blocks. + mm = uvm_va_space_mm_retain_lock(va_space); + uvm_va_space_down_write(va_space); + + status = uvm_va_range_create_semaphore_pool(va_space, + mm, + params->base, + params->length, + params->perGpuAttributes, + params->gpuAttributesCount, + &va_range); + if (status != NV_OK) + goto unlock; + + for_each_va_space_gpu(gpu, va_space) { + status = va_range_register_gpu_semaphore_pool(va_range, gpu); + if (status != NV_OK) + goto done; + + if (!uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, gpu->id)) + continue; + + status = va_range_add_gpu_va_space_semaphore_pool(va_range, gpu); + if (status != NV_OK) + goto done; + } + +done: + if (status != NV_OK) + uvm_va_range_destroy(va_range, NULL); + +unlock: + uvm_va_space_up_write(va_space); + uvm_va_space_mm_release_unlock(va_space, mm); + return status; +} + +NV_STATUS uvm_test_va_range_info(UVM_TEST_VA_RANGE_INFO_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space; + uvm_va_range_t *va_range; + uvm_processor_id_t processor_id; + struct vm_area_struct *vma; + NV_STATUS status = NV_OK; + + va_space = uvm_va_space_get(filp); + + uvm_down_read_mmap_lock(current->mm); + uvm_va_space_down_read(va_space); + + va_range = uvm_va_range_find(va_space, params->lookup_address); + if (!va_range) { + status = NV_ERR_INVALID_ADDRESS; + goto out; + } + + params->va_range_start = va_range->node.start; + params->va_range_end = va_range->node.end; + + // -Wall implies -Wenum-compare, so cast through int to avoid warnings + BUILD_BUG_ON((int)UVM_READ_DUPLICATION_UNSET != (int)UVM_TEST_READ_DUPLICATION_UNSET); + BUILD_BUG_ON((int)UVM_READ_DUPLICATION_ENABLED != (int)UVM_TEST_READ_DUPLICATION_ENABLED); + BUILD_BUG_ON((int)UVM_READ_DUPLICATION_DISABLED != (int)UVM_TEST_READ_DUPLICATION_DISABLED); + BUILD_BUG_ON((int)UVM_READ_DUPLICATION_MAX != (int)UVM_TEST_READ_DUPLICATION_MAX); + params->read_duplication = uvm_va_range_get_policy(va_range)->read_duplication; + + if (UVM_ID_IS_INVALID(uvm_va_range_get_policy(va_range)->preferred_location)) + memset(¶ms->preferred_location, 0, sizeof(params->preferred_location)); + else + uvm_va_space_processor_uuid(va_space, + ¶ms->preferred_location, + uvm_va_range_get_policy(va_range)->preferred_location); + + params->accessed_by_count = 0; + for_each_id_in_mask(processor_id, &uvm_va_range_get_policy(va_range)->accessed_by) + uvm_va_space_processor_uuid(va_space, ¶ms->accessed_by[params->accessed_by_count++], processor_id); + + // -Wall implies -Wenum-compare, so cast through int to avoid warnings + BUILD_BUG_ON((int)UVM_TEST_VA_RANGE_TYPE_INVALID != (int)UVM_VA_RANGE_TYPE_INVALID); + BUILD_BUG_ON((int)UVM_TEST_VA_RANGE_TYPE_MANAGED != (int)UVM_VA_RANGE_TYPE_MANAGED); + BUILD_BUG_ON((int)UVM_TEST_VA_RANGE_TYPE_EXTERNAL != (int)UVM_VA_RANGE_TYPE_EXTERNAL); + BUILD_BUG_ON((int)UVM_TEST_VA_RANGE_TYPE_CHANNEL != (int)UVM_VA_RANGE_TYPE_CHANNEL); + BUILD_BUG_ON((int)UVM_TEST_VA_RANGE_TYPE_SKED_REFLECTED != (int)UVM_VA_RANGE_TYPE_SKED_REFLECTED); + BUILD_BUG_ON((int)UVM_TEST_VA_RANGE_TYPE_SEMAPHORE_POOL != (int)UVM_VA_RANGE_TYPE_SEMAPHORE_POOL); + BUILD_BUG_ON((int)UVM_TEST_VA_RANGE_TYPE_MAX != (int)UVM_VA_RANGE_TYPE_MAX); + params->type = va_range->type; + + switch (va_range->type) { + case UVM_VA_RANGE_TYPE_MANAGED: + if (!va_range->managed.vma_wrapper) { + params->managed.is_zombie = NV_TRUE; + goto out; + } + params->managed.is_zombie = NV_FALSE; + vma = uvm_va_range_vma_current(va_range); + if (!vma) { + // We aren't in the same mm as the one which owns the vma + params->managed.owned_by_calling_process = NV_FALSE; + goto out; + } + params->managed.owned_by_calling_process = NV_TRUE; + params->managed.vma_start = vma->vm_start; + params->managed.vma_end = vma->vm_end - 1; + break; + default: + break; + } + +out: + uvm_va_space_up_read(va_space); + uvm_up_read_mmap_lock(current->mm); + return status; +} + +NV_STATUS uvm_test_va_range_split(UVM_TEST_VA_RANGE_SPLIT_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_range_t *va_range; + NV_STATUS status = NV_OK; + + if (!PAGE_ALIGNED(params->split_address + 1)) + return NV_ERR_INVALID_ADDRESS; + + uvm_va_space_down_write(va_space); + + va_range = uvm_va_range_find(va_space, params->split_address); + if (!va_range || + va_range->node.end == params->split_address || + va_range->type != UVM_VA_RANGE_TYPE_MANAGED) { + status = NV_ERR_INVALID_ADDRESS; + goto out; + } + + status = uvm_va_range_split(va_range, params->split_address, NULL); + +out: + uvm_va_space_up_write(va_space); + return status; +} + +NV_STATUS uvm_test_va_range_inject_split_error(UVM_TEST_VA_RANGE_INJECT_SPLIT_ERROR_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_range_t *va_range; + NV_STATUS status = NV_OK; + + uvm_va_space_down_write(va_space); + + va_range = uvm_va_range_find(va_space, params->lookup_address); + if (!va_range || va_range->type != UVM_VA_RANGE_TYPE_MANAGED) { + status = NV_ERR_INVALID_ADDRESS; + goto out; + } + + va_range->inject_split_error = true; + +out: + uvm_va_space_up_write(va_space); + return status; +} + +NV_STATUS uvm_test_va_range_inject_add_gpu_va_space_error(UVM_TEST_VA_RANGE_INJECT_ADD_GPU_VA_SPACE_ERROR_PARAMS *params, + struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_range_t *va_range; + NV_STATUS status = NV_OK; + + uvm_va_space_down_write(va_space); + + va_range = uvm_va_range_find(va_space, params->lookup_address); + if (!va_range) { + status = NV_ERR_INVALID_ADDRESS; + goto out; + } + + va_range->inject_add_gpu_va_space_error = true; + +out: + uvm_va_space_up_write(va_space); + return status; +} + diff --git a/kernel-open/nvidia-uvm/uvm_va_range.h b/kernel-open/nvidia-uvm/uvm_va_range.h new file mode 100644 index 000000000..ee01863d2 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_range.h @@ -0,0 +1,884 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_VA_RANGE_H__ +#define __UVM_VA_RANGE_H__ + +#include "uvm_linux.h" +#include "nv-kref.h" +#include "uvm_common.h" +#include "uvm_perf_module.h" +#include "uvm_processors.h" +#include "uvm_gpu.h" +#include "uvm_lock.h" +#include "uvm_va_space.h" +#include "uvm_range_tree.h" +#include "uvm_va_policy.h" +#include "uvm_test_ioctl.h" +#include "uvm_range_group.h" +#include "uvm_forward_decl.h" +#include "uvm_mmu.h" +#include "uvm_hal_types.h" +#include "uvm_mem.h" +#include "uvm_tracker.h" +#include "uvm_ioctl.h" + +// VA Ranges are the UVM driver equivalent of Linux kernel vmas. They represent +// user allocations of any page-aligned size. We maintain these as a separate +// data structure from the vma tree for several reasons: +// +// 1) RM allocations mapped to the GPU by UVM don't have associated UVM vmas +// +// 2) We don't always have a separate reference on the vma's mm_struct, so we +// can't always lock mmap_lock on paths where current->mm != vma->vm_mm. +// +// 3) HMM vmas aren't ours, so we can't use their vm_private_data pointers. +// +// The tree as a whole is protected by va_space->lock. Faults and mappings only +// need to take the lock in read mode. +// Modification of the range state (such as changes to logical permissions or +// location preferences) must take the lock in write mode. +// +// VA ranges with type == UVM_VA_RANGE_TYPE_MANAGED: +// Each va_range is contained completely within a parent vma. There can be +// multiple va_ranges under the same vma, but not vice versa. All VAs within +// the va_range share the same policy state. +// +// Each va_range is a collection of VA blocks. The VA blocks each have +// individual locks, and they hold the current mapping and location state +// for their block across all processors (CPU and all GPUs). +// +// VA ranges with type == UVM_VA_RANGE_TYPE_EXTERNAL: +// These ranges track physical allocations made by RM. The UVM driver is +// responsible for mapping them to the GPU(s), but not to the CPU. These +// ranges do not support faulting nor migration, and they do not necessarily +// correspond to valid vmas. +// +// These ranges do not have blocks. All state (page tables, mapping handles, +// etc) is maintained within the range. +// +// VA ranges with type == UVM_VA_RANGE_TYPE_CHANNEL: +// These are similar to EXTERNAL ranges, except they represent internal +// allocations required for user channels to operate (context save areas, +// for example). +// +// VA ranges with type == UVM_VA_RANGE_TYPE_SKED_REFLECTED: +// These ranges track special SKED reflected mappings required for CNP. The +// mappings don't have any physical backing. They just use PTEs with a +// special kind, see make_sked_reflected_pte_pascal() for an example of the +// PTE encoding. +// Notably the API that creates these ranges calls them "dynamic parallelism +// regions", but we use "SKED reflected ranges" internally as it's more +// descriptive. +// +// VA ranges with type == UVM_VA_RANGE_TYPE_SEMAPHORE_POOL: +// These ranges track semaphore pool allocations. They are backed by sysmem, +// and persistently mapped on the CPU and all GPUs (with registered VA +// spaces) in a user VA space. The ranges are also mapped on UVM internal VA +// space on the CPU and all registered GPUs. +// +// These ranges do not have blocks. +// + +// This enum must be kept in sync with UVM_TEST_VA_RANGE_TYPE in +// uvm_test_ioctl.h +typedef enum +{ + UVM_VA_RANGE_TYPE_INVALID = 0, + UVM_VA_RANGE_TYPE_MANAGED, + UVM_VA_RANGE_TYPE_EXTERNAL, + UVM_VA_RANGE_TYPE_CHANNEL, + UVM_VA_RANGE_TYPE_SKED_REFLECTED, + UVM_VA_RANGE_TYPE_SEMAPHORE_POOL, + UVM_VA_RANGE_TYPE_MAX +} uvm_va_range_type_t; + +// Wrapper to protect access to VMA's vm_page_prot +typedef struct +{ + // Needed for creating CPU mappings on the va_range. Do not access this + // directly, instead use uvm_va_range_vma and friends. + struct vm_area_struct *vma; + + uvm_rw_semaphore_t lock; +} uvm_vma_wrapper_t; + +// TODO: Bug 1733295. VA range types should really be inverted. Instead of +// maintaining common node state with a union of structs, we should have +// separate C types for each VA range type. Each type would embed a common +// VA range node. +// +// There's a lot of state in the top-level uvm_va_range_t struct below +// which really belongs in the per-type structs (for example, blocks). +// We're deferring that cleanup to the full refactor. + +// va_range state when va_range.type == UVM_VA_RANGE_TYPE_MANAGED +typedef struct +{ + // This is null in the case of a zombie allocation. Zombie allocations are + // created from unfreed allocations at termination of a process which used + // UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE, when at least one other + // process is sharing the UVM file descriptor. + uvm_vma_wrapper_t *vma_wrapper; + + // UVM managed allocations only use this policy and never use the policy + // stored in the va_block for HMM allocations. + uvm_va_policy_t policy; + + uvm_perf_module_data_desc_t perf_modules_data[UVM_PERF_MODULE_TYPE_COUNT]; +} uvm_va_range_managed_t; + +typedef struct +{ + // GPU mapping the allocation. The GPU's RM address space is required when + // releasing the handle. + uvm_gpu_t *gpu; + + // RM handle to the physical allocation. This handle is dup'd into our client + // once - on initial mapping of the external allocation. If the allocation is + // ever split, its ref_count is incremented. The allocation is not released + // until the ref_count drops to 0. + NvHandle rm_handle; + + // Refcount for this handle/allocation. The refcount is used when external + // ranges are split, resulting in two ranges using the same physical allocation. + nv_kref_t ref_count; +} uvm_ext_gpu_mem_handle; + +typedef struct +{ + uvm_range_tree_node_t node; + + // Handle to the physical user allocation dup'd into our client. This + // prevents the allocation from being removed until we free it, even if the + // user frees their handle without telling us. + // This will be NULL for sparse mappings, which don't correspond to actual + // allocations. + uvm_ext_gpu_mem_handle *mem_handle; + + // Tracks completion of PTE writes on pt_range_vec. The tree lock + // protecting this ext_gpu_map may be dropped before those writes are + // complete, so subsequent operations on this ext_gpu_map must acquire this + // tracker before operating on pt_range_vec. + uvm_tracker_t tracker; + + // GPU on which this allocation is mapped. + uvm_gpu_t *gpu; + + // GPU which owns the allocation. For sysmem, this is the GPU that the + // sysmem was originally allocated under. For the allocation to remain valid + // we need to prevent the GPU from going away, similarly to P2P mapped + // memory. + // + // This field is not used for sparse mappings as they don't have an + // allocation and, hence, owning GPU. + // + // TODO: Bug 1811006: The semantics of sysmem might change depending on the + // resolution of this bug. + // + // TODO: Bug 1757136: For SLI, this is any GPU in the SLI group. We may need + // to handle that specially. + uvm_gpu_t *owning_gpu; + + // We need to know whether this memory is actually located on owning_gpu so + // we know what type of membar is needed at TLB invalidate time, and to know + // if the mapping GPU has to be unmapped on UvmDisablePeerAccess. + // + // This field is not used for sparse mappings as they don't have physical + // backing. + bool is_sysmem; + + // GPU page tables mapping the allocation + uvm_page_table_range_vec_t pt_range_vec; + + // Node for the deferred free list where this allocation is stored upon + // unmapped. + // + // This field is unused for sparse mappings. Since they don't have physical + // backing there is no RM object to be freed when the mapping is unmapped. + uvm_deferred_free_object_t deferred_free; +} uvm_ext_gpu_map_t; + +typedef struct +{ + // Lock protecting the range tree. + uvm_mutex_t lock; + + // Range tree that contains all of the mapped portions of an External VA + // range. The tree holds uvm_ext_gpu_map_t instances. + uvm_range_tree_t tree; +} uvm_ext_gpu_range_tree_t; + +typedef struct +{ + // Mask of GPUs which have mappings to this VA range. If a bit in this mask + // is set, the corresponding pointer in gpu_ranges is valid. + // The bitmap can be safely accessed by following the locking rules: + // * If the VA space lock is held for write, the mask can be read or written + // normally. + // * If the VA space lock is held for read, and one of the range tree locks is + // held, only the bit corresponding to that GPU range tree can be accessed. + // Writes must use uvm_processor_mask_set_atomic and + // uvm_processor_mask_clear_atomic to avoid clobbering other bits in the + // mask. If no range tree lock is held, the mask cannot be accessed. + // * If the VA space lock is not held, the mask cannot be accessed + uvm_processor_mask_t mapped_gpus; + + // Per-GPU tree of mapped external allocations. This has to be per-GPU in the VA + // range because each GPU is able to map a completely different set of + // allocations to the same VA range. + uvm_ext_gpu_range_tree_t gpu_ranges[UVM_ID_MAX_GPUS]; +} uvm_va_range_external_t; + +// va_range state when va_range.type == UVM_VA_RANGE_TYPE_CHANNEL. This +// represents a channel buffer resource and mapping. +typedef struct +{ + // Only a single GPU can map a channel resource, so we only need one GPU + // VA space parent. + uvm_gpu_va_space_t *gpu_va_space; + + // Page tables mapped by this range + uvm_page_table_range_vec_t pt_range_vec; + + // Physical location of this channel resource. All pages have the same + // aperture. + uvm_aperture_t aperture; + + // Note that this is not a normal RM object handle. It is a non-zero opaque + // identifier underneath the GPU VA space which represents this channel + // resource. Each channel using this VA range has retained this descriptor + // and is responsible for releasing it. That's safe because channels outlive + // their VA ranges. + NvP64 rm_descriptor; + + // This is an ID assigned by RM to each resource descriptor. + NvU32 rm_id; + + // The TSG which owns this mapping. Sharing of VA ranges is only allowed + // within the same TSG. If valid == false, no sharing is allowed because the + // channel is not in a TSG. + struct + { + bool valid; + NvU32 id; + } tsg; + + NvU64 ref_count; + + // Storage in the corresponding uvm_gpu_va_space's channel_va_ranges list + struct list_head list_node; +} uvm_va_range_channel_t; + +// va_range state when va_range.type == UVM_VA_RANGE_TYPE_SKED_REFLECTED. This +// represents a sked reflected mapping. +typedef struct +{ + // Each SKED reflected range is unique to a single GPU so only a single GPU + // VA space needs to be tracked. + uvm_gpu_va_space_t *gpu_va_space; + + // Page tables mapped by this range + uvm_page_table_range_vec_t pt_range_vec; +} uvm_va_range_sked_reflected_t; + +typedef struct +{ + uvm_mem_t *mem; + + // The optional owner is a GPU (at most one) that has the allocation cached - + // in this case, all writes must be done from this GPU. + // protected by va_space lock + uvm_gpu_t *owner; + + // Per-gpu attributes + uvm_mem_gpu_mapping_attrs_t gpu_attrs[UVM_ID_MAX_GPUS]; + + // Default attributes to assign when a new GPU is registered + uvm_mem_gpu_mapping_attrs_t default_gpu_attrs; + + // Tracks all outstanding GPU work using this allocation. + uvm_tracker_t tracker; + uvm_mutex_t tracker_lock; +} uvm_va_range_semaphore_pool_t; + +struct uvm_va_range_struct +{ + // Parent uvm_va_space. + uvm_va_space_t *va_space; + + // Storage in VA range tree. Also contains range start and end. + // start and end + 1 have to be PAGE_SIZED aligned. + uvm_range_tree_node_t node; + + // Force the next split on this range to fail. Set by error injection ioctl + // (testing purposes only). + bool inject_split_error; + + // Force the next register_gpu_va_space to fail while adding this va_range. + // Set by error injection ioctl (testing purposes only). + bool inject_add_gpu_va_space_error; + + // Mask of UVM-Lite GPUs for the VA range + // + // If the preferred location is set to a non-faultable GPU or the CPU, + // this mask contains all non-faultable GPUs that are in the accessed by + // mask and the preferred location itself if it's a GPU. Empty otherwise. + // + // All UVM-Lite GPUs have mappings only to the preferred location. The + // mappings are initially established only when the pages are resident on + // the preferred location, but persist after that until the preferred + // location is changed or a GPU stops being a UVM-Lite GPU. + uvm_processor_mask_t uvm_lite_gpus; + + // This is a uvm_va_block_t ** array of all VA block pointers under this + // range. The pointers can be accessed using the functions + // uvm_va_range_block() and uvm_va_range_block_create(). The latter + // allocates the block if it doesn't already exist. Once allocated, the + // blocks persist in the array until the parent VA range is destroyed. + // + // Concurrent on-demand allocation requires the use of either atomics or a + // spin lock. Given that we don't want to take a spin lock for every lookup, + // and that the blocks are persistent, atomics are preferred. + // + // The number of blocks is calculated from the range size using + // uvm_va_range_num_blocks(). + // + // TODO: Bug 1766585: Compare perf of up-front allocation and demand- + // allocation of blocks in the common case (lots of accessed blocks) + // and the sparse case. If the common case is hurt by demand- + // allocation, or if the sparse case isn't helped much, just allocate + // them all at range allocation. + atomic_long_t *blocks; + + uvm_va_range_type_t type; + union + { + uvm_va_range_managed_t managed; + uvm_va_range_external_t external; + uvm_va_range_channel_t channel; + uvm_va_range_sked_reflected_t sked_reflected; + uvm_va_range_semaphore_pool_t semaphore_pool; + }; +}; + +// Module load/exit +NV_STATUS uvm_va_range_init(void); +void uvm_va_range_exit(void); + +static NvU64 uvm_va_range_size(uvm_va_range_t *va_range) +{ + return uvm_range_tree_node_size(&va_range->node); +} + +static bool uvm_va_range_is_aligned(uvm_va_range_t *va_range, NvU64 alignment) +{ + return IS_ALIGNED(va_range->node.start, alignment) && IS_ALIGNED(uvm_va_range_size(va_range), alignment); +} + +static bool uvm_va_range_is_managed_zombie(uvm_va_range_t *va_range) +{ + return va_range->type == UVM_VA_RANGE_TYPE_MANAGED && va_range->managed.vma_wrapper == NULL; +} + +// Create a va_range with type UVM_VA_RANGE_TYPE_MANAGED. The out va_range pointer +// is optional. +// +// Returns NV_ERR_UVM_ADDRESS_IN_USE if the vma overlaps with an existing range +// in the va_space tree. +NV_STATUS uvm_va_range_create_mmap(uvm_va_space_t *va_space, + struct mm_struct *mm, + uvm_vma_wrapper_t *vma_wrapper, + uvm_va_range_t **out_va_range); + +// Create a va_range with type UVM_VA_RANGE_TYPE_EXTERNAL. The out va_range +// pointer is optional. +// +// Returns NV_ERR_UVM_ADDRESS_IN_USE if the range overlaps with an existing +// range in the va_space tree. +NV_STATUS uvm_va_range_create_external(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 length, + uvm_va_range_t **out_va_range); + +// Create a va_range with type UVM_VA_RANGE_TYPE_CHANNEL. The out va_range +// pointer is optional. +// +// Returns NV_ERR_UVM_ADDRESS_IN_USE if the range overlaps with an existing +// range in the va_space tree. +NV_STATUS uvm_va_range_create_channel(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 end, + uvm_va_range_t **out_va_range); + +NV_STATUS uvm_va_range_create_sked_reflected(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 length, + uvm_va_range_t **out_va_range); + +NV_STATUS uvm_va_range_create_semaphore_pool(uvm_va_space_t *va_space, + struct mm_struct *mm, + NvU64 start, + NvU64 length, + const UvmGpuMappingAttributes *per_gpu_attrs, + NvU32 per_gpu_attrs_count, + uvm_va_range_t **out_va_range); + +// Destroys any state associated with this VA range, removes the VA range from +// the VA space, and frees the VA range. +// +// deferred_free_list may be NULL if the VA range type is known to not require +// deferred free. Otherwise this function adds entries to the list for later +// processing by uvm_deferred_free_object_list. +void uvm_va_range_destroy(uvm_va_range_t *va_range, struct list_head *deferred_free_list); + +void uvm_va_range_zombify(uvm_va_range_t *va_range); + +NV_STATUS uvm_api_clean_up_zombie_resources(UVM_CLEAN_UP_ZOMBIE_RESOURCES_PARAMS *params, struct file *filp); +NV_STATUS uvm_api_validate_va_range(UVM_VALIDATE_VA_RANGE_PARAMS *params, struct file *filp); + +// Inform the VA range that a GPU VA space is now available for them to map, if +// the VA range is supposed to proactively map GPUs (UvmAllocSemaphorePool, +// UvmSetAccessedBy). +// +// If mm != NULL, that mm is used for any CPU mappings which may be created as +// a result of this call. See uvm_va_block_context_t::mm for details. +// +// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read +// mode. +NV_STATUS uvm_va_range_add_gpu_va_space(uvm_va_range_t *va_range, + uvm_gpu_va_space_t *gpu_va_space, + struct mm_struct *mm); + +// Destroy the VA range's mappings on the GPU, if it has any +// +// If mm != NULL, that mm is used for any CPU mappings which may be created as +// a result of this call. See uvm_va_block_context_t::mm for details. +// +// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read +// mode. +void uvm_va_range_remove_gpu_va_space(uvm_va_range_t *va_range, + uvm_gpu_va_space_t *gpu_va_space, + struct mm_struct *mm, + struct list_head *deferred_free_list); + +// Inform the VA range that peer mappings can now be established between the +// GPUs, if the VA range is supposed to proactively create them (UvmSetAccessedBy). +NV_STATUS uvm_va_range_enable_peer(uvm_va_range_t *va_range, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1); + +// Unmap all page tables in this VA range which have peer mappings between these +// two GPUs, in either direction. +void uvm_va_range_disable_peer(uvm_va_range_t *va_range, + uvm_gpu_t *gpu0, + uvm_gpu_t *gpu1, + struct list_head *deferred_free_list); + +// Notify the VA range of a newly registered GPU. +// +// LOCKING: the lock of the enclosing VA space is held in R/W mode +NV_STATUS uvm_va_range_register_gpu(uvm_va_range_t *va_range, uvm_gpu_t *gpu); + +// Unmap all page tables in this VA range which map memory owned by this GPU. +// Managed ranges will have any memory still resident on this GPU evicted to +// system memory. +// +// deferred_free_list may be NULL if the VA range type is known to not require +// deferred free. Otherwise this function adds entries to the list for later +// processing by uvm_deferred_free_object_list. +// +// If mm != NULL, that mm is used for any CPU mappings which may be created as +// a result of this call. See uvm_va_block_context_t::mm for details. +// +// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read +// mode. +void uvm_va_range_unregister_gpu(uvm_va_range_t *va_range, + uvm_gpu_t *gpu, + struct mm_struct *mm, + struct list_head *deferred_free_list); + +// Splits existing_va_range into two pieces, with new_va_range always after +// existing. existing is updated to have new_end. new_end+1 must be page- +// aligned. +// +// Before: [----------- existing ------------] +// After: [---- existing ----][---- new ----] +// ^new_end +// +// On error, existing_va_range is still accessible and is left in its original +// functional state. +// +// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED. +NV_STATUS uvm_va_range_split(uvm_va_range_t *existing_va_range, + NvU64 new_end, + uvm_va_range_t **new_va_range); + +// TODO: Bug 1707562: Merge va ranges + +// Returns the va_range containing addr, if any +uvm_va_range_t *uvm_va_range_find(uvm_va_space_t *va_space, NvU64 addr); + +static uvm_ext_gpu_map_t *uvm_ext_gpu_map_container(uvm_range_tree_node_t *node) +{ + if (!node) + return NULL; + return container_of(node, uvm_ext_gpu_map_t, node); +} + +// Iterators for all va_ranges + +#define uvm_for_each_va_range(va_range, va_space) \ + list_for_each_entry((va_range), &(va_space)->va_range_tree.head, node.list) + +#define uvm_for_each_va_range_safe(va_range, va_range_next, va_space) \ + list_for_each_entry_safe((va_range), (va_range_next), &(va_space)->va_range_tree.head, node.list) + + +// Iterators for specific ranges + +// Returns the first va_range in the range [start, end], if any +uvm_va_range_t *uvm_va_space_iter_first(uvm_va_space_t *va_space, NvU64 start, NvU64 end); + +// Returns the va_range following the provided va_range in address order, if +// that va_range's start <= the provided end. +uvm_va_range_t *uvm_va_space_iter_next(uvm_va_range_t *va_range, NvU64 end); + +// Like uvm_va_space_iter_next, but also returns NULL if the next va_range +// is not adjacent to the provided va_range. +static uvm_va_range_t *uvm_va_space_iter_next_contig(uvm_va_range_t *va_range, NvU64 end) +{ + uvm_va_range_t *next = uvm_va_space_iter_next(va_range, end); + if (next && next->node.start != va_range->node.end + 1) + return NULL; + return next; +} + +// Returns whether the range [start, end] has any VA ranges within it +static bool uvm_va_space_range_empty(uvm_va_space_t *va_space, NvU64 start, NvU64 end) +{ + return uvm_va_space_iter_first(va_space, start, end) == NULL; +} + +#define uvm_for_each_va_range_in(va_range, va_space, start, end) \ + for ((va_range) = uvm_va_space_iter_first((va_space), (start), (end)); \ + (va_range); \ + (va_range) = uvm_va_space_iter_next((va_range), (end))) + +#define uvm_for_each_va_range_in_safe(va_range, va_range_next, va_space, start, end) \ + for ((va_range) = uvm_va_space_iter_first((va_space), (start), (end)), \ + (va_range_next) = uvm_va_space_iter_next((va_range), (end)); \ + (va_range); \ + (va_range) = (va_range_next), (va_range_next) = uvm_va_space_iter_next((va_range), (end))) + +// Iterator for all contiguous VA ranges between [start, end]. If any part of +// [start, end] is not covered by a VA range, iteration stops. +#define uvm_for_each_va_range_in_contig(va_range, va_space, start, end) \ + for ((va_range) = uvm_va_space_iter_first((va_space), (start), (start)); \ + (va_range); \ + (va_range) = uvm_va_space_iter_next_contig((va_range), (end))) + +#define uvm_for_each_va_range_in_contig_from(va_range, va_space, first_va_range, end) \ + for ((va_range) = (first_va_range); \ + (va_range); \ + (va_range) = uvm_va_space_iter_next_contig((va_range), (end))) + +// Like uvm_for_each_va_range_in_contig but also stops iteration if any VA range +// has a type other than UVM_VA_RANGE_TYPE_MANAGED. +#define uvm_for_each_managed_va_range_in_contig(va_range, va_space, start, end) \ + for ((va_range) = uvm_va_space_iter_first((va_space), (start), (start)); \ + (va_range) && (va_range)->type == UVM_VA_RANGE_TYPE_MANAGED; \ + (va_range) = uvm_va_space_iter_next_contig((va_range), (end))) + +#define uvm_for_each_va_range_in_vma(va_range, vma) \ + uvm_for_each_va_range_in(va_range, \ + uvm_va_space_get(vma->vm_file), \ + vma->vm_start, \ + vma->vm_end - 1) + +#define uvm_for_each_va_range_in_vma_safe(va_range, va_range_next, vma) \ + uvm_for_each_va_range_in_safe(va_range, \ + va_range_next, \ + uvm_va_space_get(vma->vm_file), \ + vma->vm_start, \ + vma->vm_end - 1) + +// Only call this if you're sure that either: +// 1) You have a reference on the vma's vm_mm and that vma->vm_mm's mmap_lock is +// held; or +// 2) You won't be operating on the vma (as with vm_insert_page) or accessing +// any fields in the vma that can change without va_space->lock being held +// (such as vm_flags). +// +// Otherwise, use uvm_va_range_vma_current or uvm_va_range_vma_check and be +// prepared to handle a NULL return value. +static struct vm_area_struct *uvm_va_range_vma(uvm_va_range_t *va_range) +{ + struct vm_area_struct *vma; + UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_MANAGED, "type: %d", va_range->type); + UVM_ASSERT(va_range->managed.vma_wrapper); + + uvm_assert_rwsem_locked(&va_range->va_space->lock); + + // vm_file, vm_private_data, vm_start, and vm_end are all safe to access + // here because they can't change without the kernel calling vm_ops->open + // or vm_ops->close, which both take va_space->lock. + vma = va_range->managed.vma_wrapper->vma; + UVM_ASSERT(vma); + UVM_ASSERT_MSG(vma->vm_private_data == va_range->managed.vma_wrapper, + "vma: 0x%llx [0x%lx, 0x%lx] has vm_private_data 0x%llx\n", + (NvU64)vma, + vma->vm_start, + vma->vm_end - 1, + (NvU64)vma->vm_private_data); + UVM_ASSERT_MSG(va_range->va_space == uvm_va_space_get(vma->vm_file), + "va_range va_space: 0x%llx vm_file: 0x%llx vm_file va_space: 0x%llx", + (NvU64)va_range->va_space, + (NvU64)vma->vm_file, + (NvU64)uvm_va_space_get(vma->vm_file)); + UVM_ASSERT_MSG(va_range->node.start >= vma->vm_start, + "Range mismatch: va_range: [0x%llx, 0x%llx] vma: [0x%lx, 0x%lx]\n", + va_range->node.start, + va_range->node.end, + vma->vm_start, + vma->vm_end - 1); + UVM_ASSERT_MSG(va_range->node.end <= vma->vm_end - 1, + "Range mismatch: va_range: [0x%llx, 0x%llx] vma: [0x%lx, 0x%lx]\n", + va_range->node.start, + va_range->node.end, + vma->vm_start, + vma->vm_end - 1); + + return vma; +} + +// Check that the VA range's vma is safe to use under mm. If not, NULL is +// returned. If the vma is returned, there must be a reference on mm and +// mm->mmap_lock must be held. +static struct vm_area_struct *uvm_va_range_vma_check(uvm_va_range_t *va_range, struct mm_struct *mm) +{ + struct vm_area_struct *vma; + + UVM_ASSERT_MSG(va_range->type == UVM_VA_RANGE_TYPE_MANAGED, "type: %d\n", va_range->type); + + // Zombies don't have a vma_wrapper. + if (!va_range->managed.vma_wrapper) + return NULL; + + vma = uvm_va_range_vma(va_range); + + // Examples of mm on various paths: + // - CPU fault vma->vm_mm + // - GPU fault current->mm or va_space->va_space_mm.mm + // - IOCTL current->mm or va_space->va_space_mm.mm + // - Process teardown NULL + // + // Since the "safe" mm varies based on the path, we may not have a reference + // on the vma's owning mm_struct. We won't know that until we look at the + // vma. By then it's too late to take mmap_lock since mmap_lock is above the + // va_space lock in our lock ordering, and we must be holding the va_space + // lock to query the va_range. Hence the need to detect the cases in which + // it's safe to operate on the vma. + // + // When we can't detect for certain that mm is safe to use, we shouldn't + // operate on the vma at all. The vma can't be outright freed until we drop + // the va_space lock so the pointer itself will remain valid, but its fields + // (like vm_start and vm_end) could be modified behind our back. We also + // aren't allowed to call vm_insert_page unless we hold the vma's mmap_lock. + // + // Note that if uvm_va_space_mm_enabled() is true, then vma->vm_mm must be + // va_space->va_space_mm.mm because we enforce that at mmap. + // + // An interesting case is when vma->vm_mm != current->mm. This can happen + // due to fork, ptrace, process teardown, etc. It will also be the case in + // the GPU fault handler. + if (mm != vma->vm_mm) + return NULL; + + uvm_assert_mmap_lock_locked(vma->vm_mm); + return vma; +} + +// Helper for use when the only mm which is known is current->mm +static struct vm_area_struct *uvm_va_range_vma_current(uvm_va_range_t *va_range) +{ + return uvm_va_range_vma_check(va_range, current->mm); +} + +// Returns the maximum number of VA blocks which could be contained with the +// given va_range (number of elements in the va_range->blocks array). +// va_range->node.start and .end must be set. +// +// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED. +size_t uvm_va_range_num_blocks(uvm_va_range_t *va_range); + +// Get the index within the va_range->blocks array of the VA block +// corresponding to addr. The block pointer is not guaranteed to be valid. Use +// either uvm_va_range_block or uvm_va_range_block_create to look up the block. +// +// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED. +size_t uvm_va_range_block_index(uvm_va_range_t *va_range, NvU64 addr); + +// Looks up the VA block at va_range->blocks[index]. If no block is present at +// that index, NULL is returned. +// +// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED. +static uvm_va_block_t *uvm_va_range_block(uvm_va_range_t *va_range, size_t index) +{ + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + UVM_ASSERT(index < uvm_va_range_num_blocks(va_range)); + uvm_assert_rwsem_locked(&va_range->va_space->lock); + + return (uvm_va_block_t *)atomic_long_read(&va_range->blocks[index]); +} + +// Same as uvm_va_range_block except that the block is created if not already +// present in the array. If NV_OK is returned, the block has been allocated +// successfully. +// +// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED. +NV_STATUS uvm_va_range_block_create(uvm_va_range_t *va_range, size_t index, uvm_va_block_t **out_block); + +// Returns the first populated VA block in the VA range after the input +// va_block, or NULL if none. If the input va_block is NULL, this returns the +// first VA block in the VA range, if any exists. +uvm_va_block_t *uvm_va_range_block_next(uvm_va_range_t *va_range, uvm_va_block_t *va_block); + +// Iterate over populated VA blocks in the range. Does not create new VA blocks. +#define for_each_va_block_in_va_range(__va_range, __va_block) \ + for (__va_block = uvm_va_range_block_next(__va_range, NULL); \ + __va_block; \ + __va_block = uvm_va_range_block_next(__va_range, __va_block)) + +// Iterate over populated VA blocks in the range. Does not create new VA blocks. Safe version +#define for_each_va_block_in_va_range_safe(__va_range, __va_block, __va_block_next) \ + for (__va_block = uvm_va_range_block_next(__va_range, NULL), \ + __va_block_next = uvm_va_range_block_next(__va_range, __va_block); \ + __va_block; \ + __va_block = __va_block_next, \ + __va_block_next = __va_block? uvm_va_range_block_next(__va_range, __va_block) : NULL) + +// Set the VA range preferred location (or unset it if preferred location is +// UVM_ID_INVALID). +// +// Unsetting the preferred location potentially changes the range group +// association to UVM_RANGE_GROUP_ID_NONE if the VA range was previously +// associated with a non-migratable range group. +// +// Changing the preferred location also updates the mask and mappings of GPUs +// in UVM-Lite mode. +// +// The va_range must have type UVM_VA_RANGE_TYPE_MANAGED. +// +// If mm != NULL, that mm is used for any CPU mappings which may be created as +// a result of this call. See uvm_va_block_context_t::mm for details. +// +// If out_tracker != NULL any block work will be added to that tracker. +// +// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read +// mode. +NV_STATUS uvm_va_range_set_preferred_location(uvm_va_range_t *va_range, + uvm_processor_id_t preferred_location, + struct mm_struct *mm, + uvm_tracker_t *out_tracker); + +// Add a processor to the accessed_by mask and establish any new required +// mappings. +// +// Also update the mask of UVM-Lite GPUs if needed. +// +// If mm != NULL, that mm is used for any CPU mappings which may be created as +// a result of this call. See uvm_va_block_context_t::mm for details. +// +// If out_tracker != NULL any block work will be added to that tracker. +// +// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read +// mode. +NV_STATUS uvm_va_range_set_accessed_by(uvm_va_range_t *va_range, + uvm_processor_id_t processor_id, + struct mm_struct *mm, + uvm_tracker_t *out_tracker); + +// Remove a processor from the accessed_by mask +// +// If out_tracker != NULL any block work will be added to that tracker. +// +// This also updates the mask and mappings of the UVM-Lite GPUs if required. +void uvm_va_range_unset_accessed_by(uvm_va_range_t *va_range, + uvm_processor_id_t processor_id, + uvm_tracker_t *out_tracker); + +// Set read-duplication and remove any existing accessed_by and remote mappings +// +// If mm != NULL, that mm is used for any CPU mappings which may be created as +// a result of this call. See uvm_va_block_context_t::mm for details. +// +// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read +// mode. +NV_STATUS uvm_va_range_set_read_duplication(uvm_va_range_t *va_range, struct mm_struct *mm); + +// Unset read-duplication and establish accessed_by mappings +// +// If mm != NULL, that mm is used for any CPU mappings which may be created as +// a result of this call. See uvm_va_block_context_t::mm for details. +// +// LOCKING: If mm != NULL, the caller must hold mm->mmap_lock in at least read +// mode. +NV_STATUS uvm_va_range_unset_read_duplication(uvm_va_range_t *va_range, struct mm_struct *mm); + +// Create and destroy vma wrappers +uvm_vma_wrapper_t *uvm_vma_wrapper_alloc(struct vm_area_struct *vma); +void uvm_vma_wrapper_destroy(uvm_vma_wrapper_t *vma_wrapper); + +// Return the memory access permissions for the vma bound to the given VA range +uvm_prot_t uvm_va_range_logical_prot(uvm_va_range_t *va_range); + +// Check if processor_id is allowed to access the managed va_range with +// access_type permissions. Return values: +// +// NV_ERR_INVALID_ADDRESS The VA range is logically dead (zombie) +// NV_ERR_INVALID_ACCESS_TYPE The vma corresponding to the VA range does not +// allow access_type permissions, or migration is +// disallowed and processor_id cannot access the +// range remotely (UVM-Lite). +// NV_ERR_INVALID_OPERATION The access would violate the policies specified +// by UvmPreventMigrationRangeGroups. +NV_STATUS uvm_va_range_check_logical_permissions(uvm_va_range_t *va_range, + uvm_processor_id_t processor_id, + uvm_fault_type_t access_type, + bool allow_migration); + +static uvm_va_policy_t *uvm_va_range_get_policy(uvm_va_range_t *va_range) +{ + UVM_ASSERT(va_range->type == UVM_VA_RANGE_TYPE_MANAGED); + return &va_range->managed.policy; +} + +NV_STATUS uvm_test_va_range_info(UVM_TEST_VA_RANGE_INFO_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_va_range_split(UVM_TEST_VA_RANGE_SPLIT_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_va_range_inject_split_error(UVM_TEST_VA_RANGE_INJECT_SPLIT_ERROR_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_va_range_inject_add_gpu_va_space_error(UVM_TEST_VA_RANGE_INJECT_ADD_GPU_VA_SPACE_ERROR_PARAMS *params, + struct file *filp); + +#endif // __UVM_VA_RANGE_H__ diff --git a/kernel-open/nvidia-uvm/uvm_va_space.c b/kernel-open/nvidia-uvm/uvm_va_space.c new file mode 100644 index 000000000..c767003a6 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_space.c @@ -0,0 +1,2096 @@ +/******************************************************************************* + Copyright (c) 2015-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_api.h" +#include "uvm_va_space.h" +#include "uvm_va_range.h" +#include "uvm_lock.h" +#include "uvm_global.h" +#include "uvm_kvmalloc.h" +#include "uvm_perf_heuristics.h" +#include "uvm_user_channel.h" +#include "uvm_tools.h" +#include "uvm_thread_context.h" +#include "uvm_hal.h" +#include "uvm_map_external.h" +#include "uvm_ats.h" +#include "uvm_gpu_access_counters.h" +#include "uvm_hmm.h" +#include "uvm_va_space_mm.h" +#include "uvm_test.h" +#include "uvm_common.h" +#include "nv_uvm_interface.h" +#include "nv-kthread-q.h" + +static bool processor_mask_array_test(const uvm_processor_mask_t *mask, + uvm_processor_id_t mask_id, + uvm_processor_id_t id) +{ + return uvm_processor_mask_test(&mask[uvm_id_value(mask_id)], id); +} + +static void processor_mask_array_clear(uvm_processor_mask_t *mask, + uvm_processor_id_t mask_id, + uvm_processor_id_t id) +{ + uvm_processor_mask_clear(&mask[uvm_id_value(mask_id)], id); +} + +static void processor_mask_array_set(uvm_processor_mask_t *mask, + uvm_processor_id_t mask_id, + uvm_processor_id_t id) +{ + uvm_processor_mask_set(&mask[uvm_id_value(mask_id)], id); +} + +static bool processor_mask_array_empty(const uvm_processor_mask_t *mask, uvm_processor_id_t mask_id) +{ + return uvm_processor_mask_empty(&mask[uvm_id_value(mask_id)]); +} + +static NV_STATUS enable_peers(uvm_va_space_t *va_space, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1); +static void disable_peers(uvm_va_space_t *va_space, + uvm_gpu_t *gpu0, + uvm_gpu_t *gpu1, + struct list_head *deferred_free_list); +static void remove_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space, + struct mm_struct *mm, + struct list_head *deferred_free_list); +static void va_space_remove_dummy_thread_contexts(uvm_va_space_t *va_space); + +static void init_tools_data(uvm_va_space_t *va_space) +{ + int i; + + uvm_init_rwsem(&va_space->tools.lock, UVM_LOCK_ORDER_VA_SPACE_TOOLS); + + for (i = 0; i < ARRAY_SIZE(va_space->tools.counters); i++) + INIT_LIST_HEAD(va_space->tools.counters + i); + for (i = 0; i < ARRAY_SIZE(va_space->tools.queues); i++) + INIT_LIST_HEAD(va_space->tools.queues + i); +} + +static NV_STATUS register_gpu_nvlink_peers(uvm_va_space_t *va_space, uvm_gpu_t *gpu) +{ + uvm_gpu_t *other_gpu; + + uvm_assert_rwsem_locked(&va_space->lock); + + for_each_va_space_gpu(other_gpu, va_space) { + uvm_gpu_peer_t *peer_caps; + + if (uvm_id_equal(other_gpu->id, gpu->id)) + continue; + + peer_caps = uvm_gpu_peer_caps(gpu, other_gpu); + + if (peer_caps->link_type >= UVM_GPU_LINK_NVLINK_1) { + NV_STATUS status = enable_peers(va_space, gpu, other_gpu); + if (status != NV_OK) + return status; + } + } + + return NV_OK; +} + +static bool va_space_check_processors_masks(uvm_va_space_t *va_space) +{ + uvm_processor_id_t processor; + uvm_processor_mask_t processors; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + uvm_processor_mask_copy(&processors, &va_space->registered_gpus); + uvm_processor_mask_set(&processors, UVM_ID_CPU); + + for_each_id_in_mask(processor, &processors) { + uvm_processor_id_t other_processor; + + UVM_ASSERT(processor_mask_array_test(va_space->can_access, processor, processor)); + UVM_ASSERT(processor_mask_array_test(va_space->accessible_from, processor, processor)); + UVM_ASSERT(processor_mask_array_test(va_space->can_copy_from, processor, processor)); + UVM_ASSERT(processor_mask_array_test(va_space->can_copy_from, processor, UVM_ID_CPU)); + UVM_ASSERT(processor_mask_array_test(va_space->can_copy_from, UVM_ID_CPU, processor)); + + // NVLINK + UVM_ASSERT(!processor_mask_array_test(va_space->has_nvlink, processor, processor)); + UVM_ASSERT(uvm_processor_mask_subset(&va_space->has_nvlink[uvm_id_value(processor)], + &va_space->can_copy_from[uvm_id_value(processor)])); + + // Peers + UVM_ASSERT(!processor_mask_array_test(va_space->indirect_peers, processor, processor)); + UVM_ASSERT(uvm_processor_mask_subset(&va_space->indirect_peers[uvm_id_value(processor)], + &va_space->has_native_atomics[uvm_id_value(processor)])); + + // Atomics + UVM_ASSERT(processor_mask_array_test(va_space->has_native_atomics, processor, processor)); + UVM_ASSERT(uvm_processor_mask_subset(&va_space->has_native_atomics[uvm_id_value(processor)], + &va_space->can_copy_from[uvm_id_value(processor)])); + UVM_ASSERT(uvm_processor_mask_subset(&va_space->has_native_atomics[uvm_id_value(processor)], + &va_space->can_access[uvm_id_value(processor)])); + + for_each_id_in_mask(other_processor, &va_space->can_access[uvm_id_value(processor)]) + UVM_ASSERT(processor_mask_array_test(va_space->accessible_from, other_processor, processor)); + + for_each_id_in_mask(other_processor, &va_space->accessible_from[uvm_id_value(processor)]) + UVM_ASSERT(processor_mask_array_test(va_space->can_access, other_processor, processor)); + } + + return true; +} + +NV_STATUS uvm_va_space_create(struct inode *inode, struct file *filp) +{ + NV_STATUS status; + uvm_va_space_t *va_space = uvm_kvmalloc_zero(sizeof(*va_space)); + uvm_gpu_id_t gpu_id; + + if (!va_space) + return NV_ERR_NO_MEMORY; + + uvm_init_rwsem(&va_space->lock, UVM_LOCK_ORDER_VA_SPACE); + uvm_mutex_init(&va_space->serialize_writers_lock, UVM_LOCK_ORDER_VA_SPACE_SERIALIZE_WRITERS); + uvm_mutex_init(&va_space->read_acquire_write_release_lock, + UVM_LOCK_ORDER_VA_SPACE_READ_ACQUIRE_WRITE_RELEASE_LOCK); + uvm_spin_lock_init(&va_space->va_space_mm.lock, UVM_LOCK_ORDER_LEAF); + uvm_range_tree_init(&va_space->va_range_tree); + uvm_ats_init_va_space(va_space); + + // By default all struct files on the same inode share the same + // address_space structure (the inode's) across all processes. This means + // unmap_mapping_range would unmap virtual mappings across all processes on + // that inode. + // + // Since the UVM driver uses the mapping offset as the VA of the file's + // process, we need to isolate the mappings to each process. + address_space_init_once(&va_space->mapping); + va_space->mapping.host = inode; + + // Some paths in the kernel, for example force_page_cache_readahead which + // can be invoked from user-space via madvise MADV_WILLNEED and fadvise + // POSIX_FADV_WILLNEED, check the function pointers within + // file->f_mapping->a_ops for validity. However, those paths assume that a_ops + // itself is always valid. Handle that by using the inode's a_ops pointer, + // which is what f_mapping->a_ops would point to anyway if we weren't re- + // assigning f_mapping. + va_space->mapping.a_ops = inode->i_mapping->a_ops; + +#if defined(NV_ADDRESS_SPACE_HAS_BACKING_DEV_INFO) + va_space->mapping.backing_dev_info = inode->i_mapping->backing_dev_info; +#endif + + // Init to 0 since we rely on atomic_inc_return behavior to return 1 as the first ID + atomic64_set(&va_space->range_group_id_counter, 0); + + INIT_RADIX_TREE(&va_space->range_groups, NV_UVM_GFP_FLAGS); + uvm_range_tree_init(&va_space->range_group_ranges); + + bitmap_zero(va_space->enabled_peers, UVM_MAX_UNIQUE_GPU_PAIRS); + + // CPU is not explicitly registered in the va space + processor_mask_array_set(va_space->can_access, UVM_ID_CPU, UVM_ID_CPU); + processor_mask_array_set(va_space->accessible_from, UVM_ID_CPU, UVM_ID_CPU); + processor_mask_array_set(va_space->can_copy_from, UVM_ID_CPU, UVM_ID_CPU); + processor_mask_array_set(va_space->has_native_atomics, UVM_ID_CPU, UVM_ID_CPU); + + // CPU always participates in system-wide atomics + uvm_processor_mask_set(&va_space->system_wide_atomics_enabled_processors, UVM_ID_CPU); + uvm_processor_mask_set(&va_space->faultable_processors, UVM_ID_CPU); + + // Initialize the CPU/GPU affinity array. New CPU NUMA nodes are added at + // GPU registration time, but they are never freed on unregister_gpu + // (although the GPU is removed from the corresponding mask). + for_each_gpu_id(gpu_id) { + uvm_cpu_gpu_affinity_t *affinity = &va_space->gpu_cpu_numa_affinity[uvm_id_gpu_index(gpu_id)]; + + affinity->numa_node = -1; + uvm_processor_mask_zero(&affinity->gpus); + } + + init_waitqueue_head(&va_space->va_space_mm.last_retainer_wait_queue); + init_waitqueue_head(&va_space->gpu_va_space_deferred_free.wait_queue); + + filp->private_data = va_space; + filp->f_mapping = &va_space->mapping; + + va_space->test.page_prefetch_enabled = true; + + init_tools_data(va_space); + + uvm_va_space_down_write(va_space); + + status = uvm_perf_init_va_space_events(va_space, &va_space->perf_events); + if (status != NV_OK) + goto fail; + + status = uvm_perf_heuristics_load(va_space); + if (status != NV_OK) + goto fail; + + status = uvm_gpu_init_va_space(va_space); + if (status != NV_OK) + goto fail; + + UVM_ASSERT(va_space_check_processors_masks(va_space)); + + uvm_va_space_up_write(va_space); + + uvm_mutex_lock(&g_uvm_global.va_spaces.lock); + list_add_tail(&va_space->list_node, &g_uvm_global.va_spaces.list); + uvm_mutex_unlock(&g_uvm_global.va_spaces.lock); + + return NV_OK; + +fail: + uvm_perf_heuristics_unload(va_space); + uvm_perf_destroy_va_space_events(&va_space->perf_events); + uvm_va_space_up_write(va_space); + + uvm_kvfree(va_space); + + return status; +} + +// This function does *not* release the GPU, nor the GPU's PCIE peer pairings. +// Those are returned so the caller can do it after dropping the VA space lock. +static void unregister_gpu(uvm_va_space_t *va_space, + uvm_gpu_t *gpu, + struct mm_struct *mm, + struct list_head *deferred_free_list, + uvm_global_processor_mask_t *peers_to_release) +{ + uvm_gpu_t *peer_gpu; + uvm_va_range_t *va_range; + NvU32 peer_table_index; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + if (peers_to_release) + uvm_global_processor_mask_zero(peers_to_release); + + // If a GPU VA Space was explicitly registered, but not explicitly + // unregistered, unregister it and add all of its objects to the free list. + remove_gpu_va_space(uvm_gpu_va_space_get(va_space, gpu), mm, deferred_free_list); + + uvm_for_each_va_range(va_range, va_space) + uvm_va_range_unregister_gpu(va_range, gpu, mm, deferred_free_list); + + // If this GPU has any peer-to-peer pair that was explicitly enabled, but + // not explicitly disabled, disable it. + // Notably do this only after unregistering the GPU from VA ranges to make + // sure there is no pending work using the peer mappings within the VA + // blocks (in particular migrations using the peer identity mappings). + for_each_va_space_gpu(peer_gpu, va_space) { + if (gpu == peer_gpu) + continue; + + peer_table_index = uvm_gpu_peer_table_index(gpu->id, peer_gpu->id); + if (test_bit(peer_table_index, va_space->enabled_peers)) { + disable_peers(va_space, gpu, peer_gpu, deferred_free_list); + + // Only PCIE peers need to be globally released. NVLINK peers are + // brought up and torn down automatically within add_gpu and + // remove_gpu. + if (peers_to_release && g_uvm_global.peers[peer_table_index].link_type == UVM_GPU_LINK_PCIE) + uvm_global_processor_mask_set(peers_to_release, peer_gpu->global_id); + } + } + + if (gpu->parent->isr.replayable_faults.handling) + uvm_processor_mask_clear(&va_space->faultable_processors, gpu->id); + + uvm_processor_mask_clear(&va_space->system_wide_atomics_enabled_processors, gpu->id); + + processor_mask_array_clear(va_space->can_access, gpu->id, gpu->id); + processor_mask_array_clear(va_space->can_access, gpu->id, UVM_ID_CPU); + processor_mask_array_clear(va_space->can_access, UVM_ID_CPU, gpu->id); + UVM_ASSERT(processor_mask_array_empty(va_space->can_access, gpu->id)); + + processor_mask_array_clear(va_space->accessible_from, gpu->id, gpu->id); + processor_mask_array_clear(va_space->accessible_from, gpu->id, UVM_ID_CPU); + processor_mask_array_clear(va_space->accessible_from, UVM_ID_CPU, gpu->id); + UVM_ASSERT(processor_mask_array_empty(va_space->accessible_from, gpu->id)); + + processor_mask_array_clear(va_space->can_copy_from, gpu->id, gpu->id); + processor_mask_array_clear(va_space->can_copy_from, gpu->id, UVM_ID_CPU); + processor_mask_array_clear(va_space->can_copy_from, UVM_ID_CPU, gpu->id); + UVM_ASSERT(processor_mask_array_empty(va_space->can_copy_from, gpu->id)); + + processor_mask_array_clear(va_space->has_nvlink, gpu->id, UVM_ID_CPU); + processor_mask_array_clear(va_space->has_nvlink, UVM_ID_CPU, gpu->id); + UVM_ASSERT(processor_mask_array_empty(va_space->has_nvlink, gpu->id)); + + UVM_ASSERT(processor_mask_array_empty(va_space->indirect_peers, gpu->id)); + + processor_mask_array_clear(va_space->has_native_atomics, gpu->id, gpu->id); + processor_mask_array_clear(va_space->has_native_atomics, gpu->id, UVM_ID_CPU); + processor_mask_array_clear(va_space->has_native_atomics, UVM_ID_CPU, gpu->id); + UVM_ASSERT(processor_mask_array_empty(va_space->has_native_atomics, gpu->id)); + + uvm_processor_mask_clear(&va_space->registered_gpus, gpu->id); + va_space->registered_gpus_table[uvm_id_gpu_index(gpu->id)] = NULL; + + // Remove the GPU from the CPU/GPU affinity masks + if (gpu->parent->closest_cpu_numa_node != -1) { + uvm_gpu_id_t gpu_id; + + for_each_gpu_id(gpu_id) { + uvm_cpu_gpu_affinity_t *affinity = &va_space->gpu_cpu_numa_affinity[uvm_id_gpu_index(gpu_id)]; + + if (affinity->numa_node == gpu->parent->closest_cpu_numa_node) { + uvm_processor_mask_clear(&affinity->gpus, gpu->id); + break; + } + } + } + + + + + + + + + va_space_check_processors_masks(va_space); +} + +static void gpu_va_space_stop_all_channels(uvm_gpu_va_space_t *gpu_va_space) +{ + uvm_user_channel_t *user_channel; + + list_for_each_entry(user_channel, &gpu_va_space->registered_channels, list_node) + uvm_user_channel_stop(user_channel); + + // Prevent new channels from being registered since we'll be dropping the + // VA space lock shortly with the expectation that no more channels will + // arrive. + atomic_set(&gpu_va_space->disallow_new_channels, 1); +} + +// Detaches (unregisters) all user channels in a GPU VA space. The channels must +// have previously been stopped. +// +// The detached channels are added to the input list. The caller is expected to +// drop the VA space lock and call uvm_deferred_free_object_list to complete the +// destroy operation. +static void uvm_gpu_va_space_detach_all_user_channels(uvm_gpu_va_space_t *gpu_va_space, + struct list_head *deferred_free_list) +{ + uvm_user_channel_t *user_channel, *next_channel; + list_for_each_entry_safe(user_channel, next_channel, &gpu_va_space->registered_channels, list_node) + uvm_user_channel_detach(user_channel, deferred_free_list); +} + +void uvm_va_space_detach_all_user_channels(uvm_va_space_t *va_space, struct list_head *deferred_free_list) +{ + uvm_gpu_va_space_t *gpu_va_space; + for_each_gpu_va_space(gpu_va_space, va_space) + uvm_gpu_va_space_detach_all_user_channels(gpu_va_space, deferred_free_list); +} + +void uvm_va_space_destroy(uvm_va_space_t *va_space) +{ + uvm_va_range_t *va_range, *va_range_next; + uvm_gpu_t *gpu; + uvm_gpu_id_t gpu_id; + uvm_global_gpu_id_t global_gpu_id; + uvm_global_processor_mask_t retained_gpus; + LIST_HEAD(deferred_free_list); + + // Remove the VA space from the global list before we start tearing things + // down so other threads can't see the VA space in a partially-valid state. + uvm_mutex_lock(&g_uvm_global.va_spaces.lock); + list_del(&va_space->list_node); + uvm_mutex_unlock(&g_uvm_global.va_spaces.lock); + + uvm_perf_heuristics_stop(va_space); + + // Stop all channels before unmapping anything. This kills the channels and + // prevents spurious MMU faults from being generated (bug 1722021), but + // doesn't prevent the bottom half from servicing old faults for those + // channels. + // + // This involves making RM calls, so we have to do that with the VA space + // lock in read mode. + uvm_va_space_down_read_rm(va_space); + uvm_va_space_stop_all_user_channels(va_space); + uvm_va_space_up_read_rm(va_space); + + // The bottom half GPU page fault handler(s) could still look up and use + // this va_space via the GPU's instance_ptr_table. Lock them out while we + // tear down. Once we're done, the bottom half will fail to find any + // registered GPUs in the VA space, so those faults will be canceled. + uvm_va_space_down_write(va_space); + + uvm_va_space_global_gpus(va_space, &retained_gpus); + + bitmap_copy(va_space->enabled_peers_teardown, va_space->enabled_peers, UVM_MAX_UNIQUE_GPU_PAIRS); + + uvm_va_space_detach_all_user_channels(va_space, &deferred_free_list); + + // Destroy all VA ranges. We do this before unregistering the GPUs for + // performance, since GPU unregister will walk all VA ranges in the VA space + // multiple times. + uvm_for_each_va_range_safe(va_range, va_range_next, va_space) { + // All channel ranges should've been destroyed by the channel unregister + // above + UVM_ASSERT(va_range->type != UVM_VA_RANGE_TYPE_CHANNEL); + uvm_va_range_destroy(va_range, &deferred_free_list); + } + + uvm_hmm_va_space_destroy(va_space); + + uvm_range_group_radix_tree_destroy(va_space); + + // Unregister all GPUs in the VA space. Note that this does not release the + // GPUs nor peers. We do that below. + for_each_va_space_gpu(gpu, va_space) + unregister_gpu(va_space, gpu, NULL, &deferred_free_list, NULL); + + uvm_perf_heuristics_unload(va_space); + uvm_perf_destroy_va_space_events(&va_space->perf_events); + + va_space_remove_dummy_thread_contexts(va_space); + + uvm_va_space_up_write(va_space); + + UVM_ASSERT(uvm_processor_mask_empty(&va_space->registered_gpus)); + UVM_ASSERT(uvm_processor_mask_empty(&va_space->registered_gpu_va_spaces)); + + for_each_gpu_id(gpu_id) + UVM_ASSERT(va_space->registered_gpus_table[uvm_id_gpu_index(gpu_id)] == NULL); + + // The instance pointer mappings for this VA space have been removed so no + // new bottom halves can get to this VA space, but there could still be + // bottom halves running from before we removed the mapping. Rather than + // ref-count the VA space, just wait for them to finish. + // + // This is also required to synchronize any pending + // block_deferred_accessed_by() work items. + + nv_kthread_q_flush(&g_uvm_global.global_q); + + for_each_global_gpu_in_mask(gpu, &retained_gpus) { + if (!gpu->parent->isr.replayable_faults.handling) { + UVM_ASSERT(!gpu->parent->isr.non_replayable_faults.handling); + continue; + } + + nv_kthread_q_flush(&gpu->parent->isr.bottom_half_q); + + // The same applies to the kill channel kthreads. However, they need to + // be flushed after their bottom-half counterparts since the latter may + // schedule a channel kill. + if (gpu->parent->isr.non_replayable_faults.handling) + nv_kthread_q_flush(&gpu->parent->isr.kill_channel_q); + + if (gpu->parent->access_counters_supported) + uvm_gpu_access_counters_disable(gpu, va_space); + } + + // Check that all CPU/GPU affinity masks are empty + for_each_gpu_id(gpu_id) { + const uvm_cpu_gpu_affinity_t *affinity = &va_space->gpu_cpu_numa_affinity[uvm_id_gpu_index(gpu_id)]; + + UVM_ASSERT(uvm_processor_mask_empty(&affinity->gpus)); + } + + // ensure that there are no pending events that refer to this va_space + uvm_tools_flush_events(); + + // Perform cleanup we can't do while holding the VA space lock + + uvm_deferred_free_object_list(&deferred_free_list); + + // Remove the mm_struct association on this VA space, if any. This may + // invoke uvm_va_space_mm_shutdown(), which in turn will disable all + // channels and wait for any retainers to finish, so it has to be done + // outside of the VA space lock. + // + // Since we must already handle mm shutdown being called at any point prior + // to this call, this call can be made at any point in + // uvm_va_space_destroy(). It's beneficial to do it late after doing all + // deferred frees for GPU VA spaces and channels, because then + // uvm_va_space_mm_shutdown() will have minimal work to do. + uvm_va_space_mm_unregister(va_space); + + uvm_mutex_lock(&g_uvm_global.global_lock); + + // Release the GPUs and their peer counts. Do not use + // for_each_global_gpu_in_mask for the outer loop as it reads the GPU + // state, which might get destroyed. + for_each_global_gpu_id_in_mask(global_gpu_id, &retained_gpus) { + uvm_gpu_t *peer_gpu; + + gpu = uvm_gpu_get(global_gpu_id); + + uvm_global_processor_mask_clear(&retained_gpus, global_gpu_id); + + for_each_global_gpu_in_mask(peer_gpu, &retained_gpus) { + NvU32 peer_table_index = uvm_gpu_peer_table_index(gpu->id, peer_gpu->id); + if (test_bit(peer_table_index, va_space->enabled_peers_teardown)) { + uvm_gpu_peer_t *peer_caps = &g_uvm_global.peers[peer_table_index]; + + if (peer_caps->link_type == UVM_GPU_LINK_PCIE) + uvm_gpu_release_pcie_peer_access(gpu, peer_gpu); + + __clear_bit(peer_table_index, va_space->enabled_peers_teardown); + } + } + + uvm_gpu_release_locked(gpu); + } + + UVM_ASSERT(bitmap_empty(va_space->enabled_peers, UVM_MAX_UNIQUE_GPU_PAIRS)); + UVM_ASSERT(bitmap_empty(va_space->enabled_peers_teardown, UVM_MAX_UNIQUE_GPU_PAIRS)); + + uvm_mutex_unlock(&g_uvm_global.global_lock); + + uvm_kvfree(va_space); +} + +NV_STATUS uvm_va_space_initialize(uvm_va_space_t *va_space, NvU64 flags) +{ + NV_STATUS status = NV_OK; + + if (flags & ~UVM_INIT_FLAGS_MASK) + return NV_ERR_INVALID_ARGUMENT; + + uvm_down_write_mmap_lock(current->mm); + uvm_va_space_down_write(va_space); + + if (atomic_read(&va_space->initialized)) { + // Already initialized - check if parameters match + if (flags != va_space->initialization_flags) + status = NV_ERR_INVALID_ARGUMENT; + } + else { + va_space->initialization_flags = flags; + + status = uvm_va_space_mm_register(va_space); + if (status != NV_OK) + goto out; + + status = uvm_hmm_va_space_initialize(va_space); + if (status != NV_OK) + goto unreg; + + // Use release semantics to match the acquire semantics in + // uvm_va_space_initialized. See that function for details. All + // initialization must be complete by this point. + atomic_set_release(&va_space->initialized, 1); + } + +out: + uvm_va_space_up_write(va_space); + uvm_up_write_mmap_lock(current->mm); + return status; + +unreg: + uvm_va_space_up_write(va_space); + uvm_up_write_mmap_lock(current->mm); + // See the comment in uvm_va_space_mm_unregister() for why this has to be + // called after releasing the locks. + uvm_va_space_mm_unregister(va_space); + return status; +} + +void uvm_va_space_stop_all_user_channels(uvm_va_space_t *va_space) +{ + uvm_gpu_va_space_t *gpu_va_space; + uvm_user_channel_t *user_channel; + + // Skip if all channels have been already stopped. + if (atomic_read(&va_space->user_channels_stopped)) + return; + + uvm_assert_rwsem_locked_read(&va_space->lock); + + for_each_gpu_va_space(gpu_va_space, va_space) { + list_for_each_entry(user_channel, &gpu_va_space->registered_channels, list_node) + uvm_user_channel_stop(user_channel); + } + + // Since we're holding the VA space lock in read mode, multiple threads + // could set this concurrently. user_channels_stopped never transitions back + // to 0 after being set to 1 so that's not a problem. + atomic_set(&va_space->user_channels_stopped, 1); +} + +uvm_gpu_t *uvm_va_space_get_gpu_by_uuid(uvm_va_space_t *va_space, const NvProcessorUuid *gpu_uuid) +{ + uvm_gpu_t *gpu; + + for_each_va_space_gpu(gpu, va_space) { + if (uvm_processor_uuid_eq(uvm_gpu_uuid(gpu), gpu_uuid)) + return gpu; + } + + return NULL; +} + +uvm_gpu_t *uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(uvm_va_space_t *va_space, + const NvProcessorUuid *gpu_uuid) +{ + uvm_gpu_t *gpu; + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, gpu_uuid); + if (!gpu || !uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, gpu->id)) + return NULL; + + return gpu; +} + +uvm_gpu_t *uvm_va_space_retain_gpu_by_uuid(uvm_va_space_t *va_space, const NvProcessorUuid *gpu_uuid) +{ + uvm_gpu_t *gpu; + + uvm_va_space_down_read(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, gpu_uuid); + if (gpu) + uvm_gpu_retain(gpu); + + uvm_va_space_up_read(va_space); + + return gpu; +} + +bool uvm_va_space_can_read_duplicate(uvm_va_space_t *va_space, uvm_gpu_t *changing_gpu) +{ + uvm_processor_mask_t changing_gpu_mask; + uvm_processor_mask_t non_faultable_gpus; + uvm_processor_mask_t registered_gpu_va_spaces; + + uvm_processor_mask_zero(&changing_gpu_mask); + + if (changing_gpu) + uvm_processor_mask_set(&changing_gpu_mask, changing_gpu->id); + + // flip the bit of the changing GPU to represent the state change in progress + uvm_processor_mask_xor(®istered_gpu_va_spaces, &changing_gpu_mask, &va_space->registered_gpu_va_spaces); + + // Can't enable read-duplication if any non-fault-capable GPUs have GPU VA spaces registered + return !uvm_processor_mask_andnot(&non_faultable_gpus, ®istered_gpu_va_spaces, &va_space->faultable_processors); +} + +// Note that the "VA space" in the function name refers to a UVM per-process VA space. +// (This is different from a per-GPU VA space.) +NV_STATUS uvm_va_space_register_gpu(uvm_va_space_t *va_space, + const NvProcessorUuid *gpu_uuid, + const uvm_rm_user_object_t *user_rm_device, + NvBool *numa_enabled, + NvS32 *numa_node_id) +{ + NV_STATUS status; + uvm_va_range_t *va_range; + uvm_gpu_t *gpu; + uvm_gpu_t *other_gpu; + + status = uvm_gpu_retain_by_uuid(gpu_uuid, user_rm_device, &gpu); + if (status != NV_OK) + return status; + + // Enabling access counters requires taking the ISR lock, so it is done + // without holding the (deeper order) VA space lock. Enabling the counters + // after dropping the VA space lock would create a window of time in which + // another thread could see the GPU as registered, but access counters would + // be disabled. Therefore, the counters are enabled before taking the VA + // space lock. + if (uvm_gpu_access_counters_required(gpu->parent)) { + status = uvm_gpu_access_counters_enable(gpu, va_space); + if (status != NV_OK) { + uvm_gpu_release(gpu); + return status; + } + } + + uvm_va_space_down_write(va_space); + + // Make sure the gpu hasn't been already registered in this va space + if (uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + + // Mixing Volta and Pascal GPUs is not supported on P9 systems. + for_each_va_space_gpu(other_gpu, va_space) { + if ((gpu->parent->sysmem_link >= UVM_GPU_LINK_NVLINK_2 && + other_gpu->parent->sysmem_link < UVM_GPU_LINK_NVLINK_2) || + (gpu->parent->sysmem_link < UVM_GPU_LINK_NVLINK_2 && + other_gpu->parent->sysmem_link >= UVM_GPU_LINK_NVLINK_2)) { + status = NV_ERR_INVALID_DEVICE; + goto done; + } + } + + // The VA space's mm is being torn down, so don't allow more work + if (va_space->disallow_new_registers) { + status = NV_ERR_PAGE_TABLE_NOT_AVAIL; + goto done; + } + + + + + + + + + + + + + uvm_processor_mask_set(&va_space->registered_gpus, gpu->id); + va_space->registered_gpus_table[uvm_id_gpu_index(gpu->id)] = gpu; + + if (gpu->parent->isr.replayable_faults.handling) { + uvm_processor_mask_set(&va_space->faultable_processors, gpu->id); + // System-wide atomics are enabled by default + uvm_processor_mask_set(&va_space->system_wide_atomics_enabled_processors, gpu->id); + } + + // All GPUs have native atomics on their own memory + processor_mask_array_set(va_space->has_native_atomics, gpu->id, gpu->id); + + + + + if (gpu->parent->sysmem_link >= UVM_GPU_LINK_NVLINK_1) { + processor_mask_array_set(va_space->has_nvlink, gpu->id, UVM_ID_CPU); + processor_mask_array_set(va_space->has_nvlink, UVM_ID_CPU, gpu->id); + } + + if (gpu->parent->sysmem_link >= UVM_GPU_LINK_NVLINK_2) { + processor_mask_array_set(va_space->has_native_atomics, gpu->id, UVM_ID_CPU); + + if (gpu->parent->numa_info.enabled) { + processor_mask_array_set(va_space->can_access, UVM_ID_CPU, gpu->id); + processor_mask_array_set(va_space->accessible_from, gpu->id, UVM_ID_CPU); + processor_mask_array_set(va_space->has_native_atomics, UVM_ID_CPU, gpu->id); + } + } + + // All processors have direct access to their own memory + processor_mask_array_set(va_space->can_access, gpu->id, gpu->id); + processor_mask_array_set(va_space->accessible_from, gpu->id, gpu->id); + + + + + + + + + // All GPUs have direct access to sysmem + processor_mask_array_set(va_space->can_access, gpu->id, UVM_ID_CPU); + processor_mask_array_set(va_space->accessible_from, UVM_ID_CPU, gpu->id); + + + processor_mask_array_set(va_space->can_copy_from, gpu->id, gpu->id); + processor_mask_array_set(va_space->can_copy_from, gpu->id, UVM_ID_CPU); + processor_mask_array_set(va_space->can_copy_from, UVM_ID_CPU, gpu->id); + + // Update the CPU/GPU affinity masks + if (gpu->parent->closest_cpu_numa_node != -1) { + uvm_gpu_id_t gpu_id; + + for_each_gpu_id(gpu_id) { + uvm_cpu_gpu_affinity_t *affinity = &va_space->gpu_cpu_numa_affinity[uvm_id_gpu_index(gpu_id)]; + + // If this is the first time this node is seen, take a new entry of + // the array. Entries are never released in order to avoid having + // to deal with holes. + if (affinity->numa_node == -1) { + UVM_ASSERT(uvm_processor_mask_empty(&affinity->gpus)); + affinity->numa_node = gpu->parent->closest_cpu_numa_node; + } + + if (affinity->numa_node == gpu->parent->closest_cpu_numa_node) { + uvm_processor_mask_set(&affinity->gpus, gpu->id); + break; + } + } + } + + status = register_gpu_nvlink_peers(va_space, gpu); + if (status != NV_OK) + goto cleanup; + + status = uvm_perf_heuristics_register_gpu(va_space, gpu); + if (status != NV_OK) + goto cleanup; + + uvm_for_each_va_range(va_range, va_space) { + status = uvm_va_range_register_gpu(va_range, gpu); + if (status != NV_OK) + goto cleanup; + } + + if (gpu->parent->numa_info.enabled) { + *numa_enabled = NV_TRUE; + *numa_node_id = (NvS32)uvm_gpu_numa_info(gpu)->node_id; + } + else { + *numa_enabled = NV_FALSE; + *numa_node_id = -1; + } + + goto done; + +cleanup: + // Clear out all of the processor mask bits. No VA ranges have mapped or + // allocated anything on this GPU yet if we fail here, so we don't need + // a deferred_free_list, mm, etc. + unregister_gpu(va_space, gpu, NULL, NULL, NULL); + +done: + UVM_ASSERT(va_space_check_processors_masks(va_space)); + + uvm_va_space_up_write(va_space); + + if (status != NV_OK) { + // There is no risk of disabling access counters on a previously + // registered GPU: the enablement step would have failed before even + // discovering that the GPU is already registed. + if (uvm_gpu_access_counters_required(gpu->parent)) + uvm_gpu_access_counters_disable(gpu, va_space); + + uvm_gpu_release(gpu); + } + + return status; +} + +NV_STATUS uvm_va_space_unregister_gpu(uvm_va_space_t *va_space, const NvProcessorUuid *gpu_uuid) +{ + uvm_gpu_t *gpu; + uvm_gpu_va_space_t *gpu_va_space; + struct mm_struct *mm; + uvm_global_gpu_id_t peer_gpu_id; + uvm_global_processor_mask_t peers_to_release; + LIST_HEAD(deferred_free_list); + + // Stopping channels requires holding the VA space lock in read mode, so do + // it first. We start in write mode then drop to read in order to flush out + // other threads which are in the read-mode portion of any of the register + // or unregister operations. + uvm_va_space_down_write(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid(va_space, gpu_uuid); + if (!gpu) { + uvm_va_space_up_write(va_space); + return NV_ERR_INVALID_DEVICE; + } + + // We have to drop the VA space lock below mid-unregister. We have to + // prevent any other threads from coming in during that window and allowing + // new channels to enter the GPU. That means we must disallow: + // - GPU VA space register + // - GPU unregister (which would allow new GPU registers) + if (uvm_processor_mask_test(&va_space->gpu_unregister_in_progress, gpu->id)) { + uvm_va_space_up_write(va_space); + return NV_ERR_INVALID_DEVICE; + } + + uvm_processor_mask_set(&va_space->gpu_unregister_in_progress, gpu->id); + + uvm_va_space_downgrade_write_rm(va_space); + + gpu_va_space = uvm_gpu_va_space_get(va_space, gpu); + if (gpu_va_space) + gpu_va_space_stop_all_channels(gpu_va_space); + + // We need to drop the lock to re-take it in write mode. We don't have to + // retain the GPU because we've prevented other threads from unregistering + // it from the VA space until we're done. + uvm_va_space_up_read_rm(va_space); + + // If uvm_gpu_access_counters_required(gpu->parent) is true, a concurrent + // registration could enable access counters after they are disabled here. + // The concurrent registration will fail later on if it acquires the VA + // space lock before the unregistration does (because the GPU is still + // registered) and undo the access counters enablement, or succeed if it + // acquires the VA space lock after the unregistration does. Both outcomes + // result on valid states. + if (gpu->parent->access_counters_supported) + uvm_gpu_access_counters_disable(gpu, va_space); + + // mmap_lock is needed to establish CPU mappings to any pages evicted from + // the GPU if accessed by CPU is set for them. + mm = uvm_va_space_mm_or_current_retain_lock(va_space); + + uvm_va_space_down_write(va_space); + + // We blocked out other GPU unregisters, so this GPU must still be + // registered. However, the GPU VA space might have been unregistered on us. + UVM_ASSERT(uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)); + if (uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, gpu->id)) + UVM_ASSERT(uvm_gpu_va_space_get(va_space, gpu) == gpu_va_space); + + // This will call disable_peers for all GPU's peers, including NVLink + unregister_gpu(va_space, gpu, mm, &deferred_free_list, &peers_to_release); + + UVM_ASSERT(uvm_processor_mask_test(&va_space->gpu_unregister_in_progress, gpu->id)); + uvm_processor_mask_clear(&va_space->gpu_unregister_in_progress, gpu->id); + + uvm_va_space_up_write(va_space); + uvm_va_space_mm_or_current_release_unlock(va_space, mm); + + uvm_deferred_free_object_list(&deferred_free_list); + + // Release the VA space's GPU and peer counts + uvm_mutex_lock(&g_uvm_global.global_lock); + + // Do not use for_each_global_gpu_in_mask as it reads the peer GPU state, + // which might get destroyed when we release the peer entry. + for_each_global_gpu_id_in_mask(peer_gpu_id, &peers_to_release) { + uvm_gpu_t *peer_gpu = uvm_gpu_get(peer_gpu_id); + UVM_ASSERT(uvm_gpu_peer_caps(gpu, peer_gpu)->link_type == UVM_GPU_LINK_PCIE); + uvm_gpu_release_pcie_peer_access(gpu, peer_gpu); + } + + uvm_gpu_release_locked(gpu); + + uvm_mutex_unlock(&g_uvm_global.global_lock); + + return NV_OK; +} + +// This does *not* release the global GPU peer entry +static void disable_peers(uvm_va_space_t *va_space, + uvm_gpu_t *gpu0, + uvm_gpu_t *gpu1, + struct list_head *deferred_free_list) +{ + NvU32 table_index; + uvm_va_range_t *va_range; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + table_index = uvm_gpu_peer_table_index(gpu0->id, gpu1->id); + + if (!test_bit(table_index, va_space->enabled_peers)) + return; + + // Unmap all page tables in this VA space which have peer mappings between + // these two GPUs. + uvm_for_each_va_range(va_range, va_space) + uvm_va_range_disable_peer(va_range, gpu0, gpu1, deferred_free_list); + + processor_mask_array_clear(va_space->can_access, gpu0->id, gpu1->id); + processor_mask_array_clear(va_space->can_access, gpu1->id, gpu0->id); + processor_mask_array_clear(va_space->accessible_from, gpu0->id, gpu1->id); + processor_mask_array_clear(va_space->accessible_from, gpu1->id, gpu0->id); + processor_mask_array_clear(va_space->can_copy_from, gpu0->id, gpu1->id); + processor_mask_array_clear(va_space->can_copy_from, gpu1->id, gpu0->id); + processor_mask_array_clear(va_space->has_nvlink, gpu0->id, gpu1->id); + processor_mask_array_clear(va_space->has_nvlink, gpu1->id, gpu0->id); + processor_mask_array_clear(va_space->indirect_peers, gpu0->id, gpu1->id); + processor_mask_array_clear(va_space->indirect_peers, gpu1->id, gpu0->id); + processor_mask_array_clear(va_space->has_native_atomics, gpu0->id, gpu1->id); + processor_mask_array_clear(va_space->has_native_atomics, gpu1->id, gpu0->id); + + __clear_bit(table_index, va_space->enabled_peers); + + va_space_check_processors_masks(va_space); +} + +static NV_STATUS enable_peers(uvm_va_space_t *va_space, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + NV_STATUS status = NV_OK; + uvm_gpu_va_space_t *gpu_va_space0, *gpu_va_space1; + NvU32 table_index = 0; + uvm_gpu_peer_t *peer_caps; + uvm_va_range_t *va_range; + LIST_HEAD(deferred_free_list); + + uvm_assert_rwsem_locked_write(&va_space->lock); + + // We know the GPUs were retained already, so now verify that they've been + // registered by this specific VA space. + if (!uvm_processor_mask_test(&va_space->registered_gpus, gpu0->id) || + !uvm_processor_mask_test(&va_space->registered_gpus, gpu1->id)) { + return NV_ERR_INVALID_DEVICE; + } + + table_index = uvm_gpu_peer_table_index(gpu0->id, gpu1->id); + peer_caps = &g_uvm_global.peers[table_index]; + + UVM_ASSERT(!test_bit(table_index, va_space->enabled_peers)); + + // If both GPUs have registered GPU VA spaces already, their big page sizes + // must match. + gpu_va_space0 = uvm_gpu_va_space_get(va_space, gpu0); + gpu_va_space1 = uvm_gpu_va_space_get(va_space, gpu1); + if (gpu_va_space0 && + gpu_va_space1 && + gpu_va_space0->page_tables.big_page_size != gpu_va_space1->page_tables.big_page_size) { + return NV_ERR_NOT_COMPATIBLE; + } + + processor_mask_array_set(va_space->can_access, gpu0->id, gpu1->id); + processor_mask_array_set(va_space->can_access, gpu1->id, gpu0->id); + processor_mask_array_set(va_space->accessible_from, gpu0->id, gpu1->id); + processor_mask_array_set(va_space->accessible_from, gpu1->id, gpu0->id); + + if (gpu0->parent->peer_copy_mode != UVM_GPU_PEER_COPY_MODE_UNSUPPORTED) { + UVM_ASSERT_MSG(gpu1->parent->peer_copy_mode == gpu0->parent->peer_copy_mode, + "GPU %s GPU %s\n", + uvm_gpu_name(gpu0), + uvm_gpu_name(gpu1)); + + processor_mask_array_set(va_space->can_copy_from, gpu1->id, gpu0->id); + processor_mask_array_set(va_space->can_copy_from, gpu0->id, gpu1->id); + } + + // Pre-compute nvlink and native atomic masks for the new peers + if (peer_caps->link_type >= UVM_GPU_LINK_NVLINK_1) { + processor_mask_array_set(va_space->has_nvlink, gpu0->id, gpu1->id); + processor_mask_array_set(va_space->has_nvlink, gpu1->id, gpu0->id); + + processor_mask_array_set(va_space->has_native_atomics, gpu0->id, gpu1->id); + processor_mask_array_set(va_space->has_native_atomics, gpu1->id, gpu0->id); + + if (peer_caps->is_indirect_peer) { + UVM_ASSERT(peer_caps->link_type >= UVM_GPU_LINK_NVLINK_2); + UVM_ASSERT(gpu0->parent->numa_info.enabled); + UVM_ASSERT(gpu1->parent->numa_info.enabled); + + processor_mask_array_set(va_space->indirect_peers, gpu0->id, gpu1->id); + processor_mask_array_set(va_space->indirect_peers, gpu1->id, gpu0->id); + } + } + + UVM_ASSERT(va_space_check_processors_masks(va_space)); + __set_bit(table_index, va_space->enabled_peers); + + uvm_for_each_va_range(va_range, va_space) { + status = uvm_va_range_enable_peer(va_range, gpu0, gpu1); + if (status != NV_OK) + break; + } + + if (status != NV_OK) { + disable_peers(va_space, gpu0, gpu1, &deferred_free_list); + + // uvm_va_range_disable_peer adds only external allocations to the list, + // but uvm_va_range_enable_peer doesn't do anything for them. + UVM_ASSERT(list_empty(&deferred_free_list)); + } + + return status; +} + +// On success the GPUs and the P2P access have been retained, but the caller +// must not assume that the GPUs are still registered in the VA space after the +// call since the VA space lock is dropped. +static NV_STATUS retain_pcie_peers_from_uuids(uvm_va_space_t *va_space, + const NvProcessorUuid *gpu_uuid_1, + const NvProcessorUuid *gpu_uuid_2, + uvm_gpu_t **gpu0, + uvm_gpu_t **gpu1) +{ + NV_STATUS status = NV_OK; + + uvm_va_space_down_read_rm(va_space); + + // The UUIDs should have already been registered + *gpu0 = uvm_va_space_get_gpu_by_uuid(va_space, gpu_uuid_1); + *gpu1 = uvm_va_space_get_gpu_by_uuid(va_space, gpu_uuid_2); + + if (*gpu0 && *gpu1 && !uvm_id_equal((*gpu0)->id, (*gpu1)->id)) + status = uvm_gpu_retain_pcie_peer_access(*gpu0, *gpu1); + else + status = NV_ERR_INVALID_DEVICE; + + uvm_va_space_up_read_rm(va_space); + + return status; +} + +static bool uvm_va_space_pcie_peer_enabled(uvm_va_space_t *va_space, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + return !processor_mask_array_test(va_space->has_nvlink, gpu0->id, gpu1->id) && + uvm_va_space_peer_enabled(va_space, gpu0, gpu1); +} + +static bool uvm_va_space_nvlink_peer_enabled(uvm_va_space_t *va_space, uvm_gpu_t *gpu0, uvm_gpu_t *gpu1) +{ + return processor_mask_array_test(va_space->has_nvlink, gpu0->id, gpu1->id); +} + +static void free_gpu_va_space(nv_kref_t *nv_kref) +{ + uvm_gpu_va_space_t *gpu_va_space = container_of(nv_kref, uvm_gpu_va_space_t, kref); + uvm_gpu_va_space_state_t state = uvm_gpu_va_space_state(gpu_va_space); + UVM_ASSERT(state == UVM_GPU_VA_SPACE_STATE_INIT || state == UVM_GPU_VA_SPACE_STATE_DEAD); + uvm_kvfree(gpu_va_space); +} + +void uvm_gpu_va_space_release(uvm_gpu_va_space_t *gpu_va_space) +{ + if (gpu_va_space) + nv_kref_put(&gpu_va_space->kref, free_gpu_va_space); +} + +static void uvm_gpu_va_space_acquire_mmap_lock(struct mm_struct *mm) +{ + if (mm) { + // uvm_ats_register_gpu_va_space() requires mmap_lock to be held in + // write mode if IBM ATS support is provided through the kernel. + // mmap_lock is optional if IBM ATS support is provided through the + // driver. In all cases, We need mmap_lock at least in read mode to + // handle potential CPU mapping changes in + // uvm_va_range_add_gpu_va_space(). + if (UVM_ATS_IBM_SUPPORTED_IN_KERNEL()) + uvm_down_write_mmap_lock(mm); + else + uvm_down_read_mmap_lock(mm); + } +} + +static void uvm_gpu_va_space_release_mmap_lock(struct mm_struct *mm) +{ + if (mm) { + if (UVM_ATS_IBM_SUPPORTED_IN_KERNEL()) + uvm_up_write_mmap_lock(mm); + else + uvm_up_read_mmap_lock(mm); + } +} + +static NV_STATUS uvm_gpu_va_space_set_page_dir(uvm_gpu_va_space_t *gpu_va_space) +{ + NV_STATUS status; + uvm_gpu_phys_address_t pdb_phys; + NvU64 num_pdes; + NvU32 pasid = -1U; + + if (gpu_va_space->ats.enabled) { + pasid = gpu_va_space->ats.pasid; + UVM_ASSERT(pasid != -1U); + } + + // Replace the existing PDB, if present, with the new one allocated by UVM. + // This will fail if nvUvmInterfaceSetPageDirectory has already been called + // on the RM VA space object, which prevents the user from registering twice + // and corrupting our state. + // + // TODO: Bug 1733664: RM needs to preempt and disable channels during this + // operation. + pdb_phys = uvm_page_tree_pdb(&gpu_va_space->page_tables)->addr; + num_pdes = uvm_mmu_page_tree_entries(&gpu_va_space->page_tables, 0, UVM_PAGE_SIZE_AGNOSTIC); + status = uvm_rm_locked_call(nvUvmInterfaceSetPageDirectory(gpu_va_space->duped_gpu_va_space, + pdb_phys.address, + num_pdes, + pdb_phys.aperture == UVM_APERTURE_VID, + pasid)); + if (status != NV_OK) { + if (status == NV_ERR_NOT_SUPPORTED) { + // Convert to the return code specified by uvm.h for + // already-registered PDBs. + status = NV_ERR_INVALID_DEVICE; + } + else { + UVM_DBG_PRINT("nvUvmInterfaceSetPageDirectory() failed: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu_va_space->gpu)); + } + + return status; + } + + gpu_va_space->did_set_page_directory = true; + return status; +} + +void uvm_gpu_va_space_unset_page_dir(uvm_gpu_va_space_t *gpu_va_space) +{ + if (uvm_gpu_va_space_state(gpu_va_space) != UVM_GPU_VA_SPACE_STATE_INIT) + uvm_assert_rwsem_locked_read(&gpu_va_space->va_space->lock); + + if (gpu_va_space->did_set_page_directory) { + NV_STATUS status = uvm_rm_locked_call(nvUvmInterfaceUnsetPageDirectory(gpu_va_space->duped_gpu_va_space)); + UVM_ASSERT_MSG(status == NV_OK, + "nvUvmInterfaceUnsetPageDirectory() failed: %s, GPU %s\n", + nvstatusToString(status), + uvm_gpu_name(gpu_va_space->gpu)); + gpu_va_space->did_set_page_directory = false; + } +} + +static void destroy_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space) +{ + NvU64 delay_us = 0; + uvm_va_space_t *va_space; + uvm_gpu_va_space_state_t state; + + if (!gpu_va_space) + return; + + state = uvm_gpu_va_space_state(gpu_va_space); + UVM_ASSERT(state == UVM_GPU_VA_SPACE_STATE_INIT || state == UVM_GPU_VA_SPACE_STATE_DEAD); + + va_space = gpu_va_space->va_space; + UVM_ASSERT(va_space); + + delay_us = atomic64_read(&va_space->test.destroy_gpu_va_space_delay_us); + + if (delay_us) + udelay(delay_us); + + // Serialize this uvm_gpu_va_space_unset_page_dir call with the one in + // uvm_va_space_mm_shutdown, which also starts with the VA space lock in + // write mode. RM will serialize the calls internally, so we lock here only + // to avoid getting benign errors from nvUvmInterfaceUnsetPageDirectory. + // + // If we never got to add_gpu_va_space, then gpu_va_space was never + // registered within the va_space, so uvm_va_space_mm_shutdown couldn't see + // it and we don't have to take the lock. state is guaranteed to be + // UVM_GPU_VA_SPACE_STATE_INIT if add_gpu_va_space wasn't reached. + if (state != UVM_GPU_VA_SPACE_STATE_INIT) { + uvm_va_space_down_write(va_space); + uvm_va_space_downgrade_write_rm(va_space); + } + + uvm_gpu_va_space_unset_page_dir(gpu_va_space); + + if (state != UVM_GPU_VA_SPACE_STATE_INIT) + uvm_va_space_up_read_rm(va_space); + + if (gpu_va_space->page_tables.root) + uvm_page_tree_deinit(&gpu_va_space->page_tables); + + if (gpu_va_space->duped_gpu_va_space) + uvm_rm_locked_call_void(nvUvmInterfaceAddressSpaceDestroy(gpu_va_space->duped_gpu_va_space)); + + // If the state is DEAD, then this GPU VA space is tracked in + // va_space->gpu_va_space_deferred_free. uvm_ats_unregister_gpu_va_space may + // wait for this count to go to 0 via uvm_va_space_mm_shutdown, so we must + // decrement it before calling that function. + if (gpu_va_space->state == UVM_GPU_VA_SPACE_STATE_DEAD) { + int num_pending = atomic_dec_return(&va_space->gpu_va_space_deferred_free.num_pending); + if (num_pending == 0) + wake_up_all(&va_space->gpu_va_space_deferred_free.wait_queue); + else + UVM_ASSERT(num_pending > 0); + } + + // Note that this call may wait for faults to finish being serviced, which + // means it may depend on the VA space lock and mmap_lock. + uvm_ats_unregister_gpu_va_space(gpu_va_space); + + uvm_ats_unbind_gpu(gpu_va_space); + + + uvm_gpu_va_space_release(gpu_va_space); +} + +static NV_STATUS create_gpu_va_space(uvm_gpu_t *gpu, + uvm_va_space_t *va_space, + uvm_rm_user_object_t *user_rm_va_space, + uvm_gpu_va_space_t **out_gpu_va_space) +{ + NV_STATUS status; + uvm_gpu_va_space_t *gpu_va_space; + UvmGpuAddressSpaceInfo gpu_address_space_info; + + *out_gpu_va_space = NULL; + + gpu_va_space = uvm_kvmalloc_zero(sizeof(*gpu_va_space)); + if (!gpu_va_space) + return NV_ERR_NO_MEMORY; + + gpu_va_space->gpu = gpu; + gpu_va_space->va_space = va_space; + INIT_LIST_HEAD(&gpu_va_space->registered_channels); + INIT_LIST_HEAD(&gpu_va_space->channel_va_ranges); + nv_kref_init(&gpu_va_space->kref); + + // TODO: Bug 1624521: This interface needs to use rm_control_fd to do + // validation. + (void)user_rm_va_space->rm_control_fd; + status = uvm_rm_locked_call(nvUvmInterfaceDupAddressSpace(uvm_gpu_device_handle(gpu), + user_rm_va_space->user_client, + user_rm_va_space->user_object, + &gpu_va_space->duped_gpu_va_space, + &gpu_address_space_info)); + if (status != NV_OK) { + UVM_DBG_PRINT("failed to dup address space with error: %s, for GPU:%s \n", + nvstatusToString(status), uvm_gpu_name(gpu)); + goto error; + } + + gpu_va_space->ats.enabled = gpu_address_space_info.atsEnabled; + + // If ATS support in the UVM driver isn't enabled, fail registration of GPU + // VA spaces which have ATS enabled. + if (!g_uvm_global.ats.enabled && gpu_va_space->ats.enabled) { + UVM_INFO_PRINT("GPU VA space requires ATS, but ATS is not supported or enabled\n"); + status = NV_ERR_INVALID_FLAGS; + goto error; + } + + // If this GPU VA space uses ATS then pageable memory access must not have + // been disabled in the VA space. + if (gpu_va_space->ats.enabled && !uvm_va_space_pageable_mem_access_supported(va_space)) { + UVM_INFO_PRINT("GPU VA space requires ATS, but pageable memory access is not supported\n"); + status = NV_ERR_INVALID_FLAGS; + goto error; + } + + // RM allows the creation of VA spaces on Pascal with 128k big pages. We + // don't support that, so just fail those attempts. + // + // TODO: Bug 1789555: Remove this check once RM disallows this case. + if (!gpu->parent->arch_hal->mmu_mode_hal(gpu_address_space_info.bigPageSize)) { + status = NV_ERR_INVALID_FLAGS; + goto error; + } + + // Set up this GPU's page tables + UVM_ASSERT(gpu_va_space->page_tables.root == NULL); + status = uvm_page_tree_init(gpu, + gpu_va_space, + UVM_PAGE_TREE_TYPE_USER, + gpu_address_space_info.bigPageSize, + uvm_gpu_page_tree_init_location(gpu), + &gpu_va_space->page_tables); + if (status != NV_OK) { + UVM_ERR_PRINT("Initializing the page tree failed: %s, GPU %s\n", nvstatusToString(status), uvm_gpu_name(gpu)); + goto error; + } + + status = uvm_ats_bind_gpu(gpu_va_space); + if (status != NV_OK) + goto error; + + *out_gpu_va_space = gpu_va_space; + return NV_OK; + +error: + destroy_gpu_va_space(gpu_va_space); + return status; +} + +static void add_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space) +{ + uvm_va_space_t *va_space = gpu_va_space->va_space; + uvm_gpu_t *gpu = gpu_va_space->gpu; + + UVM_ASSERT(va_space); + uvm_assert_rwsem_locked_write(&va_space->lock); + + uvm_processor_mask_set(&va_space->registered_gpu_va_spaces, gpu->id); + va_space->gpu_va_spaces[uvm_id_gpu_index(gpu->id)] = gpu_va_space; + gpu_va_space->state = UVM_GPU_VA_SPACE_STATE_ACTIVE; +} + +static NV_STATUS check_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space) +{ + uvm_va_space_t *va_space = gpu_va_space->va_space; + uvm_gpu_t *gpu = gpu_va_space->gpu; + uvm_gpu_t *other_gpu; + uvm_gpu_va_space_t *other_gpu_va_space; + + UVM_ASSERT(va_space); + uvm_assert_rwsem_locked_write(&va_space->lock); + + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_INIT); + + if (!uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)) + return NV_ERR_INVALID_DEVICE; + + // RM will return an error from create_gpu_va_space if the given RM VA space + // object has already been registered by any VA space. Now we just need to + // check if a different VA space has already been registered. + if (uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, gpu->id)) + return NV_ERR_INVALID_DEVICE; + + // If a GPU unregister is in progress but temporarily dropped the VA space + // lock, we can't register new GPU VA spaces. + if (uvm_processor_mask_test(&va_space->gpu_unregister_in_progress, gpu->id)) + return NV_ERR_INVALID_DEVICE; + + // The VA space's mm is being torn down, so don't allow more work + if (va_space->disallow_new_registers) + return NV_ERR_PAGE_TABLE_NOT_AVAIL; + + // This GPU VA space must match its big page size with all enabled peers. + // Also, the new GPU VA space must have the same ATS setting as previously- + // registered GPU VA spaces + for_each_va_space_gpu_in_mask(other_gpu, va_space, &va_space->registered_gpu_va_spaces) { + UVM_ASSERT(other_gpu != gpu); + + other_gpu_va_space = uvm_gpu_va_space_get(va_space, other_gpu); + if (other_gpu_va_space->ats.enabled != gpu_va_space->ats.enabled) + return NV_ERR_INVALID_FLAGS; + + if (!test_bit(uvm_gpu_peer_table_index(gpu->id, other_gpu->id), va_space->enabled_peers)) + continue; + + if (gpu_va_space->page_tables.big_page_size != other_gpu_va_space->page_tables.big_page_size) + return NV_ERR_NOT_COMPATIBLE; + } + + return NV_OK; +} + +NV_STATUS uvm_va_space_register_gpu_va_space(uvm_va_space_t *va_space, + uvm_rm_user_object_t *user_rm_va_space, + const NvProcessorUuid *gpu_uuid) +{ + NV_STATUS status; + uvm_gpu_t *gpu; + uvm_gpu_va_space_t *gpu_va_space; + uvm_va_range_t *va_range; + struct mm_struct *mm; + LIST_HEAD(deferred_free_list); + + gpu = uvm_va_space_retain_gpu_by_uuid(va_space, gpu_uuid); + if (!gpu) + return NV_ERR_INVALID_DEVICE; + + mm = uvm_va_space_mm_or_current_retain(va_space); + + status = create_gpu_va_space(gpu, va_space, user_rm_va_space, &gpu_va_space); + if (status != NV_OK) + goto error_gpu_release; + + uvm_gpu_va_space_acquire_mmap_lock(mm); + uvm_va_space_down_write(va_space); + + status = check_gpu_va_space(gpu_va_space); + if (status != NV_OK) + goto error_unlock; + + status = uvm_ats_register_gpu_va_space(gpu_va_space); + if (status != NV_OK) + goto error_unlock; + + uvm_va_space_up_write(va_space); + uvm_gpu_va_space_release_mmap_lock(mm); + + status = uvm_gpu_va_space_set_page_dir(gpu_va_space); + if (status != NV_OK) + goto error_destroy; + + uvm_gpu_va_space_acquire_mmap_lock(mm); + uvm_va_space_down_write(va_space); + + // va_space state might have changed before the lock reacquire for write. + // So, check the state again. + status = check_gpu_va_space(gpu_va_space); + if (status != NV_OK) + goto error_unlock; + + add_gpu_va_space(gpu_va_space); + + // Tell the VA ranges that they can map this GPU, if they need to. + // + // Ideally we'd downgrade the VA space lock to read mode while adding new + // mappings, but that would complicate error handling since we have to + // remove the GPU VA space if any of these mappings fail. + uvm_for_each_va_range(va_range, va_space) { + status = uvm_va_range_add_gpu_va_space(va_range, gpu_va_space, mm); + if (status != NV_OK) + goto error; + } + + uvm_va_space_up_write(va_space); + uvm_gpu_va_space_release_mmap_lock(mm); + + uvm_va_space_mm_or_current_release(va_space, mm); + uvm_gpu_release(gpu); + + return NV_OK; + +error: + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + remove_gpu_va_space(gpu_va_space, mm, &deferred_free_list); + + // Nothing else could've been attached to this gpu_va_space (channels, + // external allocations) since we're still holding the VA space lock + // since add_gpu_va_space(). Therefore the GPU VA space itself should be + // the only item in the list, and we can just destroy it directly below. + UVM_ASSERT(list_is_singular(&deferred_free_list)); +error_unlock: + uvm_va_space_up_write(va_space); + uvm_gpu_va_space_release_mmap_lock(mm); +error_destroy: + destroy_gpu_va_space(gpu_va_space); +error_gpu_release: + uvm_va_space_mm_or_current_release(va_space, mm); + uvm_gpu_release(gpu); + return status; +} + +// The caller must have stopped all channels under this gpu_va_space before +// calling this function. +static void remove_gpu_va_space(uvm_gpu_va_space_t *gpu_va_space, + struct mm_struct *mm, + struct list_head *deferred_free_list) +{ + uvm_va_space_t *va_space; + uvm_va_range_t *va_range; + uvm_va_range_t *va_range_next; + + if (!gpu_va_space || uvm_gpu_va_space_state(gpu_va_space) != UVM_GPU_VA_SPACE_STATE_ACTIVE) + return; + + va_space = gpu_va_space->va_space; + UVM_ASSERT(va_space); + + uvm_assert_rwsem_locked_write(&va_space->lock); + + uvm_gpu_va_space_detach_all_user_channels(gpu_va_space, deferred_free_list); + + // Removing all registered channels should've removed all VA ranges used by + // those channels. + UVM_ASSERT(list_empty(&gpu_va_space->channel_va_ranges)); + + // Unmap all page tables in this VA space on this GPU. + // TODO: Bug 1799173: This will need to add objects to deferred_free_list + uvm_for_each_va_range_safe(va_range, va_range_next, va_space) + uvm_va_range_remove_gpu_va_space(va_range, gpu_va_space, mm, deferred_free_list); + + uvm_deferred_free_object_add(deferred_free_list, + &gpu_va_space->deferred_free, + UVM_DEFERRED_FREE_OBJECT_GPU_VA_SPACE); + + // Let uvm_va_space_mm_shutdown know that it has to wait for this GPU VA + // space to be destroyed. + atomic_inc(&va_space->gpu_va_space_deferred_free.num_pending); + + uvm_processor_mask_clear(&va_space->registered_gpu_va_spaces, gpu_va_space->gpu->id); + va_space->gpu_va_spaces[uvm_id_gpu_index(gpu_va_space->gpu->id)] = NULL; + gpu_va_space->state = UVM_GPU_VA_SPACE_STATE_DEAD; +} + +NV_STATUS uvm_va_space_unregister_gpu_va_space(uvm_va_space_t *va_space, const NvProcessorUuid *gpu_uuid) +{ + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu; + uvm_gpu_va_space_t *gpu_va_space; + struct mm_struct *mm; + LIST_HEAD(deferred_free_list); + + // Stopping channels requires holding the VA space lock in read mode, so do + // it first. This also takes the serialize_writers_lock, so we'll serialize + // with other threads about to perform channel binds in + // uvm_register_channel since. + uvm_va_space_down_read_rm(va_space); + + gpu = uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, gpu_uuid); + if (!gpu) { + uvm_va_space_up_read_rm(va_space); + return NV_ERR_INVALID_DEVICE; + } + + gpu_va_space = uvm_gpu_va_space_get(va_space, gpu); + UVM_ASSERT(gpu_va_space); + + gpu_va_space_stop_all_channels(gpu_va_space); + + // We need to drop the lock to re-take it in write mode + uvm_gpu_va_space_retain(gpu_va_space); + uvm_gpu_retain(gpu); + uvm_va_space_up_read_rm(va_space); + + mm = uvm_va_space_mm_or_current_retain_lock(va_space); + uvm_va_space_down_write(va_space); + + // We dropped the lock so we have to re-verify that this gpu_va_space is + // still valid. If so, then the GPU is also still registered under the VA + // space. If not, we raced with another unregister thread, so return an + // an error for double-unregister. + if (uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_DEAD) { + status = NV_ERR_INVALID_DEVICE; + } + else { + UVM_ASSERT(gpu == uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(va_space, gpu_uuid)); + UVM_ASSERT(gpu_va_space == uvm_gpu_va_space_get(va_space, gpu)); + + remove_gpu_va_space(gpu_va_space, mm, &deferred_free_list); + } + + uvm_va_space_up_write(va_space); + uvm_va_space_mm_or_current_release_unlock(va_space, mm); + + uvm_deferred_free_object_list(&deferred_free_list); + uvm_gpu_va_space_release(gpu_va_space); + uvm_gpu_release(gpu); + return status; +} + +bool uvm_va_space_peer_enabled(uvm_va_space_t *va_space, uvm_gpu_t *gpu1, uvm_gpu_t *gpu2) +{ + size_t table_index; + + UVM_ASSERT(uvm_processor_mask_test(&va_space->registered_gpus, gpu1->id)); + UVM_ASSERT(uvm_processor_mask_test(&va_space->registered_gpus, gpu2->id)); + + table_index = uvm_gpu_peer_table_index(gpu1->id, gpu2->id); + return !!test_bit(table_index, va_space->enabled_peers); +} + +uvm_processor_id_t uvm_processor_mask_find_closest_id(uvm_va_space_t *va_space, + const uvm_processor_mask_t *candidates, + uvm_processor_id_t src) +{ + uvm_processor_mask_t mask; + uvm_processor_id_t id; + + // Highest priority: the local processor itself + if (uvm_processor_mask_test(candidates, src)) + return src; + + // NvLink peers + if (uvm_processor_mask_and(&mask, candidates, &va_space->has_nvlink[uvm_id_value(src)])) { + uvm_processor_mask_t *indirect_peers; + uvm_processor_mask_t direct_peers; + + indirect_peers = &va_space->indirect_peers[uvm_id_value(src)]; + + // Direct peers, prioritizing GPU peers over CPU + if (uvm_processor_mask_andnot(&direct_peers, &mask, indirect_peers)) { + id = uvm_processor_mask_find_first_gpu_id(&direct_peers); + return UVM_ID_IS_INVALID(id)? UVM_ID_CPU : id; + } + + // Indirect peers + UVM_ASSERT(UVM_ID_IS_GPU(src)); + UVM_ASSERT(!uvm_processor_mask_test(&mask, UVM_ID_CPU)); + + return uvm_processor_mask_find_first_gpu_id(&mask); + } + + // If source is GPU, prioritize PCIe peers over CPU + if (uvm_processor_mask_and(&mask, candidates, &va_space->can_access[uvm_id_value(src)])) { + // CPUs only have direct access to GPU memory over NVLINK, not PCIe, and + // should have been selected above + UVM_ASSERT(UVM_ID_IS_GPU(src)); + + id = uvm_processor_mask_find_first_gpu_id(&mask); + return UVM_ID_IS_INVALID(id)? UVM_ID_CPU : id; + } + + // No GPUs with direct access are in the mask. Just pick the first + // processor in the mask, if any. + return uvm_processor_mask_find_first_id(candidates); +} + +static void uvm_deferred_free_object_channel(uvm_deferred_free_object_t *object, uvm_processor_mask_t *flushed_gpus) +{ + uvm_user_channel_t *channel = container_of(object, uvm_user_channel_t, deferred_free); + uvm_gpu_t *gpu = channel->gpu; + + // Flush out any faults with this instance pointer still in the buffer. This + // prevents us from re-allocating the same instance pointer for a new + // channel and mis-attributing old faults to it. + if (gpu->parent->replayable_faults_supported && !uvm_processor_mask_test(flushed_gpus, gpu->id)) { + uvm_gpu_fault_buffer_flush(gpu); + uvm_processor_mask_set(flushed_gpus, gpu->id); + } + + uvm_user_channel_destroy_detached(channel); +} + +void uvm_deferred_free_object_list(struct list_head *deferred_free_list) +{ + uvm_deferred_free_object_t *object, *next; + uvm_processor_mask_t flushed_gpus; + + // Used if there are any channels in the list + uvm_processor_mask_zero(&flushed_gpus); + + list_for_each_entry_safe(object, next, deferred_free_list, list_node) { + list_del(&object->list_node); + + switch (object->type) { + case UVM_DEFERRED_FREE_OBJECT_TYPE_CHANNEL: + uvm_deferred_free_object_channel(object, &flushed_gpus); + break; + case UVM_DEFERRED_FREE_OBJECT_GPU_VA_SPACE: + destroy_gpu_va_space(container_of(object, uvm_gpu_va_space_t, deferred_free)); + break; + case UVM_DEFERRED_FREE_OBJECT_TYPE_EXTERNAL_ALLOCATION: + uvm_ext_gpu_map_free(container_of(object, uvm_ext_gpu_map_t, deferred_free)); + break; + default: + UVM_ASSERT_MSG(0, "Invalid type %d\n", object->type); + } + } +} + +uvm_user_channel_t *uvm_gpu_va_space_get_user_channel(uvm_gpu_va_space_t *gpu_va_space, + uvm_gpu_phys_address_t instance_ptr) +{ + uvm_user_channel_t *user_channel; + uvm_va_space_t *va_space = gpu_va_space->va_space; + + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + uvm_assert_rwsem_locked(&va_space->lock); + + // TODO: Bug 1880191: This is called on every non-replayable fault service. + // Evaluate the performance impact of this list traversal and potentially + // replace it with something better. + list_for_each_entry(user_channel, &gpu_va_space->registered_channels, list_node) { + if (user_channel->instance_ptr.addr.address == instance_ptr.address && + user_channel->instance_ptr.addr.aperture == instance_ptr.aperture) { + return user_channel; + } + } + + return NULL; +} + +NV_STATUS uvm_api_enable_peer_access(UVM_ENABLE_PEER_ACCESS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu0 = NULL; + uvm_gpu_t *gpu1 = NULL; + size_t table_index; + + uvm_mutex_lock(&g_uvm_global.global_lock); + status = retain_pcie_peers_from_uuids(va_space, ¶ms->gpuUuidA, ¶ms->gpuUuidB, &gpu0, &gpu1); + uvm_mutex_unlock(&g_uvm_global.global_lock); + if (status != NV_OK) + return status; + + uvm_va_space_down_write(va_space); + + table_index = uvm_gpu_peer_table_index(gpu0->id, gpu1->id); + if (test_bit(table_index, va_space->enabled_peers)) + status = NV_ERR_INVALID_DEVICE; + else + status = enable_peers(va_space, gpu0, gpu1); + + uvm_va_space_up_write(va_space); + + if (status != NV_OK) { + uvm_mutex_lock(&g_uvm_global.global_lock); + uvm_gpu_release_pcie_peer_access(gpu0, gpu1); + uvm_mutex_unlock(&g_uvm_global.global_lock); + } + + return status; +} + +NV_STATUS uvm_api_disable_peer_access(UVM_DISABLE_PEER_ACCESS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu0, *gpu1; + LIST_HEAD(deferred_free_list); + + uvm_va_space_down_write(va_space); + + gpu0 = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpuUuidA); + gpu1 = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpuUuidB); + + if (!gpu0 || !gpu1) { + status = NV_ERR_INVALID_DEVICE; + goto error; + } + + if (uvm_id_equal(gpu0->id, gpu1->id)) { + status = NV_ERR_INVALID_DEVICE; + goto error; + } + + if (!uvm_va_space_pcie_peer_enabled(va_space, gpu0, gpu1)) { + status = NV_ERR_INVALID_DEVICE; + goto error; + } + + disable_peers(va_space, gpu0, gpu1, &deferred_free_list); + + // disable_peers doesn't release the GPU peer ref count, which means the two + // GPUs will remain retained even if another thread unregisters them from + // this VA space after we drop the lock. + uvm_va_space_up_write(va_space); + + uvm_deferred_free_object_list(&deferred_free_list); + + uvm_mutex_lock(&g_uvm_global.global_lock); + uvm_gpu_release_pcie_peer_access(gpu0, gpu1); + uvm_mutex_unlock(&g_uvm_global.global_lock); + + return NV_OK; + +error: + uvm_va_space_up_write(va_space); + return status; +} + +bool uvm_va_space_pageable_mem_access_supported(uvm_va_space_t *va_space) +{ + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + + // Any pageable memory access requires that we have mm_struct association + // via va_space_mm. + if (!uvm_va_space_mm_enabled(va_space)) + return false; + + // We might have systems with both ATS and HMM support. ATS gets priority. + if (g_uvm_global.ats.supported) + return g_uvm_global.ats.enabled; + + return uvm_hmm_is_enabled(va_space); +} + +NV_STATUS uvm_test_get_pageable_mem_access_type(UVM_TEST_GET_PAGEABLE_MEM_ACCESS_TYPE_PARAMS *params, + struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + params->type = UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_NONE; + + if (uvm_va_space_pageable_mem_access_supported(va_space)) { + if (g_uvm_global.ats.enabled) { + if (UVM_ATS_IBM_SUPPORTED_IN_KERNEL()) + params->type = UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_ATS_KERNEL; + else + params->type = UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_ATS_DRIVER; + } + else { + params->type = UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_HMM; + } + } + else if (uvm_va_space_mm_enabled(va_space)) { + params->type = UVM_TEST_PAGEABLE_MEM_ACCESS_TYPE_MMU_NOTIFIER; + } + + return NV_OK; +} + +NV_STATUS uvm_test_flush_deferred_work(UVM_TEST_FLUSH_DEFERRED_WORK_PARAMS *params, struct file *filp) +{ + UvmTestDeferredWorkType work_type = params->work_type; + + switch (work_type) { + case UvmTestDeferredWorkTypeAcessedByMappings: + nv_kthread_q_flush(&g_uvm_global.global_q); + return NV_OK; + default: + return NV_ERR_INVALID_ARGUMENT; + } +} + +NV_STATUS uvm_test_enable_nvlink_peer_access(UVM_TEST_ENABLE_NVLINK_PEER_ACCESS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu0 = NULL; + uvm_gpu_t *gpu1 = NULL; + size_t table_index; + uvm_gpu_peer_t *peer_caps = NULL; + + uvm_va_space_down_write(va_space); + + gpu0 = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpuUuidA); + gpu1 = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpuUuidB); + + if (gpu0 && gpu1 && !uvm_id_equal(gpu0->id, gpu1->id)) + peer_caps = uvm_gpu_peer_caps(gpu0, gpu1); + + if (!peer_caps || peer_caps->link_type < UVM_GPU_LINK_NVLINK_1) { + uvm_va_space_up_write(va_space); + return NV_ERR_INVALID_DEVICE; + } + + table_index = uvm_gpu_peer_table_index(gpu0->id, gpu1->id); + + // NVLink peers are automatically enabled in the VA space at VA space + // registration time. In order to avoid tests having to keep track of the + // different initial state for PCIe and NVLink peers, we just return NV_OK + // if NVLink peer were already enabled. + if (test_bit(table_index, va_space->enabled_peers)) + status = NV_OK; + else + status = enable_peers(va_space, gpu0, gpu1); + + uvm_va_space_up_write(va_space); + + return status; +} + +NV_STATUS uvm_test_disable_nvlink_peer_access(UVM_TEST_DISABLE_NVLINK_PEER_ACCESS_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + NV_STATUS status = NV_OK; + uvm_gpu_t *gpu0, *gpu1; + LIST_HEAD(deferred_free_list); + + uvm_va_space_down_write(va_space); + + gpu0 = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpuUuidA); + gpu1 = uvm_va_space_get_gpu_by_uuid(va_space, ¶ms->gpuUuidB); + + if (!gpu0 || !gpu1) { + status = NV_ERR_INVALID_DEVICE; + goto error; + } + + if (uvm_id_equal(gpu0->id, gpu1->id)) { + status = NV_ERR_INVALID_DEVICE; + goto error; + } + + if (!uvm_va_space_nvlink_peer_enabled(va_space, gpu0, gpu1)) { + status = NV_ERR_INVALID_DEVICE; + goto error; + } + + disable_peers(va_space, gpu0, gpu1, &deferred_free_list); + + uvm_va_space_up_write(va_space); + + uvm_deferred_free_object_list(&deferred_free_list); + + return NV_OK; + +error: + uvm_va_space_up_write(va_space); + return status; +} + +NV_STATUS uvm_test_va_space_inject_error(UVM_TEST_VA_SPACE_INJECT_ERROR_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + atomic_set(&va_space->test.migrate_vma_allocation_fail_nth, params->migrate_vma_allocation_fail_nth); + + return NV_OK; +} + +// Add a fixed number of dummy thread contexts to each thread context table. +// The newly added thread contexts are removed by calling +// uvm_test_va_space_remove_dummy_thread_contexts, or during VA space shutdown. +NV_STATUS uvm_test_va_space_add_dummy_thread_contexts(UVM_TEST_VA_SPACE_ADD_DUMMY_THREAD_CONTEXTS_PARAMS *params, + struct file *filp) +{ + size_t i; + uvm_va_space_t *va_space; + size_t total_dummy_thread_contexts = params->num_dummy_thread_contexts * UVM_THREAD_CONTEXT_TABLE_SIZE; + NV_STATUS status = NV_OK; + + if (params->num_dummy_thread_contexts == 0) + return NV_OK; + + va_space = uvm_va_space_get(filp); + + uvm_va_space_down_write(va_space); + + if (va_space->test.dummy_thread_context_wrappers != NULL) { + status = NV_ERR_INVALID_STATE; + goto out; + } + + if (va_space->test.num_dummy_thread_context_wrappers > 0) { + status = NV_ERR_INVALID_STATE; + goto out; + } + + if (!uvm_thread_context_wrapper_is_used()) { + status = NV_ERR_INVALID_STATE; + goto out; + } + + va_space->test.dummy_thread_context_wrappers = uvm_kvmalloc(sizeof(*va_space->test.dummy_thread_context_wrappers) * + total_dummy_thread_contexts); + if (va_space->test.dummy_thread_context_wrappers == NULL) { + status = NV_ERR_NO_MEMORY; + goto out; + } + + va_space->test.num_dummy_thread_context_wrappers = total_dummy_thread_contexts; + + for (i = 0; i < total_dummy_thread_contexts; i++) { + uvm_thread_context_t *thread_context = &va_space->test.dummy_thread_context_wrappers[i].context; + + // The context pointer is used to fill the task. + thread_context->task = (struct task_struct *) thread_context; + + uvm_thread_context_add_at(thread_context, i % UVM_THREAD_CONTEXT_TABLE_SIZE); + } + +out: + uvm_va_space_up_write(va_space); + + return status; +} + +static void va_space_remove_dummy_thread_contexts(uvm_va_space_t *va_space) +{ + size_t i; + + uvm_assert_rwsem_locked_write(&va_space->lock); + + if (va_space->test.dummy_thread_context_wrappers == NULL) { + UVM_ASSERT(va_space->test.num_dummy_thread_context_wrappers == 0); + return; + } + + UVM_ASSERT(uvm_thread_context_wrapper_is_used()); + UVM_ASSERT(uvm_enable_builtin_tests != 0); + UVM_ASSERT(va_space->test.num_dummy_thread_context_wrappers > 0); + + for (i = 0; i < va_space->test.num_dummy_thread_context_wrappers; i++) { + uvm_thread_context_t *thread_context = &va_space->test.dummy_thread_context_wrappers[i].context; + + uvm_thread_context_remove_at(thread_context, i % UVM_THREAD_CONTEXT_TABLE_SIZE); + } + + uvm_kvfree(va_space->test.dummy_thread_context_wrappers); + va_space->test.dummy_thread_context_wrappers = NULL; + va_space->test.num_dummy_thread_context_wrappers = 0; +} + +NV_STATUS uvm_test_va_space_remove_dummy_thread_contexts(UVM_TEST_VA_SPACE_REMOVE_DUMMY_THREAD_CONTEXTS_PARAMS *params, + struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + uvm_va_space_down_write(va_space); + + va_space_remove_dummy_thread_contexts(va_space); + + uvm_va_space_up_write(va_space); + + return NV_OK; +} + +NV_STATUS uvm_test_destroy_gpu_va_space_delay(UVM_TEST_DESTROY_GPU_VA_SPACE_DELAY_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + + // va_space lock is not needed here. + atomic64_set(&va_space->test.destroy_gpu_va_space_delay_us, params->delay_us); + + return NV_OK; +} diff --git a/kernel-open/nvidia-uvm/uvm_va_space.h b/kernel-open/nvidia-uvm/uvm_va_space.h new file mode 100644 index 000000000..92b680bc8 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_space.h @@ -0,0 +1,859 @@ +/******************************************************************************* + Copyright (c) 2015-2022 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_VA_SPACE_H__ +#define __UVM_VA_SPACE_H__ + +#include "uvm_processors.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_range_tree.h" +#include "uvm_range_group.h" +#include "uvm_forward_decl.h" +#include "uvm_mmu.h" +#include "uvm_linux.h" +#include "uvm_common.h" +#include "nv-kref.h" +#include "nv-linux.h" +#include "uvm_perf_events.h" +#include "uvm_perf_module.h" +#include "uvm_va_block_types.h" +#include "uvm_va_block.h" +#include "uvm_hmm.h" +#include "uvm_test_ioctl.h" +#include "uvm_ats.h" +#include "uvm_va_space_mm.h" + + + + +// uvm_deferred_free_object provides a mechanism for building and later freeing +// a list of objects which are owned by a VA space, but can't be freed while the +// VA space lock is held. + +typedef enum +{ + UVM_DEFERRED_FREE_OBJECT_TYPE_CHANNEL, + UVM_DEFERRED_FREE_OBJECT_GPU_VA_SPACE, + UVM_DEFERRED_FREE_OBJECT_TYPE_EXTERNAL_ALLOCATION, + UVM_DEFERRED_FREE_OBJECT_TYPE_COUNT +} uvm_deferred_free_object_type_t; + +typedef struct +{ + uvm_deferred_free_object_type_t type; + struct list_head list_node; +} uvm_deferred_free_object_t; + +static void uvm_deferred_free_object_add(struct list_head *list, + uvm_deferred_free_object_t *object, + uvm_deferred_free_object_type_t type) +{ + object->type = type; + list_add_tail(&object->list_node, list); +} + +// Walks the list of pending objects and frees each one as appropriate to its +// type. +// +// LOCKING: May take the GPU isr_lock and the RM locks. +void uvm_deferred_free_object_list(struct list_head *deferred_free_list); + +typedef enum +{ + // The GPU VA space has been initialized but not yet inserted into the + // parent VA space. + UVM_GPU_VA_SPACE_STATE_INIT = 0, + + // The GPU VA space is active in the VA space. + UVM_GPU_VA_SPACE_STATE_ACTIVE, + + // The GPU VA space is no longer active in the VA space. This state can be + // observed when threads retain the gpu_va_space then drop the VA space + // lock. After re-taking the VA space lock, the state must be inspected to + // see if another thread unregistered the gpu_va_space in the meantime. + UVM_GPU_VA_SPACE_STATE_DEAD, + + UVM_GPU_VA_SPACE_STATE_COUNT +} uvm_gpu_va_space_state_t; + +struct uvm_gpu_va_space_struct +{ + // Parent pointers + uvm_va_space_t *va_space; + uvm_gpu_t *gpu; + + uvm_gpu_va_space_state_t state; + + // Handle to the duped GPU VA space + // to be used for all further GPU VA space related UVM-RM interactions. + uvmGpuAddressSpaceHandle duped_gpu_va_space; + bool did_set_page_directory; + + uvm_page_tree_t page_tables; + + // List of all uvm_user_channel_t's under this GPU VA space + struct list_head registered_channels; + + // List of all uvm_va_range_t's under this GPU VA space with type == + // UVM_VA_RANGE_TYPE_CHANNEL. Used at channel registration time to find + // shareable VA ranges without having to iterate through all VA ranges in + // the VA space. + struct list_head channel_va_ranges; + + // Boolean which is 1 if no new channel registration is allowed. This is set + // when all the channels under the GPU VA space have been stopped to prevent + // new ones from entering after we drop the VA space lock. It is an atomic_t + // because multiple threads may set it to 1 concurrently. + atomic_t disallow_new_channels; + + // On VMA destruction, the fault buffer needs to be flushed for all the GPUs + // registered in the VA space to avoid leaving stale entries of the VA range + // that is going to be destroyed. Otherwise, these fault entries can be + // attributed to new VA ranges reallocated at the same addresses. However, + // uvm_vm_close is called with mm->mmap_lock taken and we cannot take the ISR + // lock. Therefore, we use a flag no notify the GPU fault handler that the + // fault buffer needs to be flushed, before servicing the faults that belong + // to the va_space. + bool needs_fault_buffer_flush; + + // Node for the deferred free list where this GPU VA space is stored upon + // being unregistered. + uvm_deferred_free_object_t deferred_free; + + // Reference count for this gpu_va_space. This only protects the memory + // object itself, for use in cases when the gpu_va_space needs to be + // accessed across dropping and re-acquiring the VA space lock. + nv_kref_t kref; + + // ATS specific state + uvm_ats_gpu_va_space_t ats; +}; + +typedef struct +{ + int numa_node; + + uvm_processor_mask_t gpus; +} uvm_cpu_gpu_affinity_t; + +struct uvm_va_space_struct +{ + // Mask of gpus registered with the va space + uvm_processor_mask_t registered_gpus; + + // Array of pointers to the uvm_gpu_t objects that correspond to the + // uvm_processor_id_t index. + // + // With SMC, GPUs can be partitioned so the number of uvm_gpu_t objects can + // be larger than UVM_ID_MAX_GPUS. However, each VA space can only + // subscribe to a single partition per GPU, so it is fine to have a regular + // processor mask. + uvm_gpu_t *registered_gpus_table[UVM_ID_MAX_GPUS]; + + // Mask of processors registered with the va space that support replayable faults + uvm_processor_mask_t faultable_processors; + + // Semaphore protecting the state of the va space + uvm_rw_semaphore_t lock; + + // Lock taken prior to taking the VA space lock in write mode, or prior to + // taking the VA space lock in read mode on a path which will call in RM. + // See UVM_LOCK_ORDER_VA_SPACE_SERIALIZE_WRITERS in uvm_lock.h. + uvm_mutex_t serialize_writers_lock; + + // Lock taken to serialize down_reads on the VA space lock with up_writes in + // other threads. See + // UVM_LOCK_ORDER_VA_SPACE_READ_ACQUIRE_WRITE_RELEASE_LOCK in uvm_lock.h. + uvm_mutex_t read_acquire_write_release_lock; + + // Tree of uvm_va_range_t's + uvm_range_tree_t va_range_tree; + + // Kernel mapping structure passed to unmap_mapping range to unmap CPU PTEs + // in this process. + struct address_space mapping; + + // Storage in g_uvm_global.va_spaces.list + struct list_head list_node; + + // Monotonically increasing counter for range groups IDs + atomic64_t range_group_id_counter; + + // Range groups + struct radix_tree_root range_groups; + uvm_range_tree_t range_group_ranges; + + // Peer to peer table + // A bitmask of peer to peer pairs enabled in this va_space + // indexed by a peer_table_index returned by uvm_gpu_peer_table_index(). + DECLARE_BITMAP(enabled_peers, UVM_MAX_UNIQUE_GPU_PAIRS); + + // Temporary copy of the above state used to avoid allocation during VA + // space destroy. + DECLARE_BITMAP(enabled_peers_teardown, UVM_MAX_UNIQUE_GPU_PAIRS); + + // Interpreting these processor masks: + // uvm_processor_mask_test(foo[A], B) + // ...should be read as "test if A foo B." For example: + // uvm_processor_mask_test(accessible_from[B], A) + // means "test if B is accessible_from A." + + // Pre-computed masks that contain, for each processor, a mask of processors + // which that processor can directly access. In other words, this will test + // whether A has direct access to B: + // uvm_processor_mask_test(can_access[A], B) + uvm_processor_mask_t can_access[UVM_ID_MAX_PROCESSORS]; + + // Pre-computed masks that contain, for each processor memory, a mask with + // the processors that have direct access enabled to its memory. This is the + // opposite direction as can_access. In other words, this will test whether + // A has direct access to B: + // uvm_processor_mask_test(accessible_from[B], A) + uvm_processor_mask_t accessible_from[UVM_ID_MAX_PROCESSORS]; + + // Pre-computed masks that contain, for each processor memory, a mask with + // the processors that can directly copy to and from its memory. This is + // almost the same as accessible_from masks, but also requires peer identity + // mappings to be supported for peer access. + uvm_processor_mask_t can_copy_from[UVM_ID_MAX_PROCESSORS]; + + // Pre-computed masks that contain, for each processor, a mask of processors + // to which that processor has NVLINK access. In other words, this will test + // whether A has NVLINK access to B: + // uvm_processor_mask_test(has_nvlink[A], B) + // This is a subset of can_access. + uvm_processor_mask_t has_nvlink[UVM_ID_MAX_PROCESSORS]; + + // Pre-computed masks that contain, for each processor memory, a mask with + // the processors that have direct access to its memory and native support + // for atomics in HW. This is a subset of accessible_from. + uvm_processor_mask_t has_native_atomics[UVM_ID_MAX_PROCESSORS]; + + // Pre-computed masks that contain, for each processor memory, a mask with + // the processors that are indirect peers. Indirect peers can access each + // other's memory like regular peers, but with additional latency and/or bw + // penalty. + uvm_processor_mask_t indirect_peers[UVM_ID_MAX_PROCESSORS]; + + // Mask of gpu_va_spaces registered with the va space + // indexed by gpu->id + uvm_processor_mask_t registered_gpu_va_spaces; + + // Mask of GPUs which have temporarily dropped the VA space lock mid- + // unregister. Used to make other paths return an error rather than + // corrupting state. + uvm_processor_mask_t gpu_unregister_in_progress; + + // Mask of processors that are participating in system-wide atomics + uvm_processor_mask_t system_wide_atomics_enabled_processors; + + // Mask of GPUs where access counters are enabled on this VA space + uvm_processor_mask_t access_counters_enabled_processors; + + // Array with information regarding CPU/GPU NUMA affinity. There is one + // entry per CPU NUMA node. Entries in the array are populated sequentially + // as new CPU NUMA nodes are discovered on GPU registration. Each entry + // contains a CPU NUMA node id, and a mask with the GPUs attached to it. + // Since each GPU can only be attached to one CPU node id, the array can + // contain information for up to UVM_ID_MAX_GPUS nodes. The information is + // stored in the VA space to avoid taking the global lock. + uvm_cpu_gpu_affinity_t gpu_cpu_numa_affinity[UVM_ID_MAX_GPUS]; + + + + + + + + + // Array of GPU VA spaces + uvm_gpu_va_space_t *gpu_va_spaces[UVM_ID_MAX_GPUS]; + + // Tracking of GPU VA spaces which have dropped the VA space lock and are + // pending destruction. uvm_va_space_mm_shutdown has to wait for those + // destroy operations to be completely done. + struct + { + atomic_t num_pending; + wait_queue_head_t wait_queue; + } gpu_va_space_deferred_free; + + // Per-va_space event notification information for performance heuristics + uvm_perf_va_space_events_t perf_events; + + uvm_perf_module_data_desc_t perf_modules_data[UVM_PERF_MODULE_TYPE_COUNT]; + + // Array of modules that are loaded in the va_space, indexed by module type + uvm_perf_module_t *perf_modules[UVM_PERF_MODULE_TYPE_COUNT]; + + // Lists of counters listening for events on this VA space + // Protected by lock + struct + { + bool enabled; + + uvm_rw_semaphore_t lock; + + // Lists of counters listening for events on this VA space + struct list_head counters[UVM_TOTAL_COUNTERS]; + struct list_head queues[UvmEventNumTypesAll]; + + // Node for this va_space in global subscribers list + struct list_head node; + } tools; + + // Boolean which is 1 if all user channels have been already stopped. This + // is an atomic_t because multiple threads may call + // uvm_va_space_stop_all_user_channels concurrently. + atomic_t user_channels_stopped; + + // Prevent future registrations of any kind (GPU, GPU VA space, channel). + // This is used when the associated va_space_mm is torn down, which has to + // prevent any new work from being started in this VA space. + bool disallow_new_registers; + + bool user_channel_stops_are_immediate; + + // Block context used for GPU unmap operations so that allocation is not + // required on the teardown path. This can only be used while the VA space + // lock is held in write mode. Access using uvm_va_space_block_context(). + uvm_va_block_context_t va_block_context; + + // UVM_INITIALIZE has been called. Until this is set, the VA space is + // inoperable. Use uvm_va_space_initialized() to check whether the VA space + // has been initialized. + atomic_t initialized; + NvU64 initialization_flags; + + // The mm currently associated with this VA space, if any. + uvm_va_space_mm_t va_space_mm; + + union + { + uvm_ats_va_space_t ats; + + // HMM information about this VA space. + uvm_hmm_va_space_t hmm; + }; + + struct + { + bool page_prefetch_enabled; + + atomic_t migrate_vma_allocation_fail_nth; + + uvm_thread_context_wrapper_t *dummy_thread_context_wrappers; + size_t num_dummy_thread_context_wrappers; + + atomic64_t destroy_gpu_va_space_delay_us; + } test; + + // Queue item for deferred f_ops->release() handling + nv_kthread_q_item_t deferred_release_q_item; +}; + +static uvm_gpu_t *uvm_va_space_get_gpu(uvm_va_space_t *va_space, uvm_gpu_id_t gpu_id) +{ + uvm_gpu_t *gpu; + + UVM_ASSERT(uvm_processor_mask_test(&va_space->registered_gpus, gpu_id)); + + gpu = va_space->registered_gpus_table[uvm_id_gpu_index(gpu_id)]; + + UVM_ASSERT(gpu); + UVM_ASSERT(uvm_gpu_get(gpu->global_id) == gpu); + + return gpu; +} + +static const char *uvm_va_space_processor_name(uvm_va_space_t *va_space, uvm_processor_id_t id) +{ + if (UVM_ID_IS_CPU(id)) + return "0: CPU"; + else + return uvm_gpu_name(uvm_va_space_get_gpu(va_space, id)); +} + +static void uvm_va_space_processor_uuid(uvm_va_space_t *va_space, NvProcessorUuid *uuid, uvm_processor_id_t id) +{ + if (UVM_ID_IS_CPU(id)) { + memcpy(uuid, &NV_PROCESSOR_UUID_CPU_DEFAULT, sizeof(*uuid)); + } + else { + uvm_gpu_t *gpu = uvm_va_space_get_gpu(va_space, id); + UVM_ASSERT(gpu); + memcpy(uuid, uvm_gpu_uuid(gpu), sizeof(*uuid)); + } +} + +static bool uvm_va_space_processor_has_memory(uvm_va_space_t *va_space, uvm_processor_id_t id) +{ + if (UVM_ID_IS_CPU(id)) + return true; + + return uvm_va_space_get_gpu(va_space, id)->mem_info.size > 0; +} + +// Checks if the VA space has been fully initialized (UVM_INITIALIZE has been +// called). Returns NV_OK if so, NV_ERR_ILLEGAL_ACTION otherwise. +// +// Locking: No requirements. The VA space lock does NOT need to be held when +// calling this function, though it is allowed. +static NV_STATUS uvm_va_space_initialized(uvm_va_space_t *va_space) +{ + // The common case by far is for the VA space to have already been + // initialized. This combined with the fact that some callers may never hold + // the VA space lock means we don't want the VA space lock to be taken to + // perform this check. + // + // Instead of locks, we rely on acquire/release memory ordering semantics. + // The release is done at the end of uvm_api_initialize() when the + // UVM_INITIALIZE ioctl completes. That opens the gate for any other + // threads. + // + // Using acquire semantics as opposed to a normal read will add slight + // overhead to every entry point on platforms with relaxed ordering. Should + // that overhead become noticeable we could have UVM_INITIALIZE use + // on_each_cpu to broadcast memory barriers. + if (likely(atomic_read_acquire(&va_space->initialized))) + return NV_OK; + + return NV_ERR_ILLEGAL_ACTION; +} + +NV_STATUS uvm_va_space_create(struct inode *inode, struct file *filp); +void uvm_va_space_destroy(uvm_va_space_t *va_space); + +// All VA space locking should be done with these wrappers. They're macros so +// lock assertions are attributed to line numbers correctly. + +#define uvm_va_space_down_write(__va_space) \ + do { \ + uvm_mutex_lock(&(__va_space)->serialize_writers_lock); \ + uvm_mutex_lock(&(__va_space)->read_acquire_write_release_lock); \ + uvm_down_write(&(__va_space)->lock); \ + } while (0) + +#define uvm_va_space_up_write(__va_space) \ + do { \ + uvm_up_write(&(__va_space)->lock); \ + uvm_mutex_unlock(&(__va_space)->read_acquire_write_release_lock); \ + uvm_mutex_unlock(&(__va_space)->serialize_writers_lock); \ + } while (0) + +#define uvm_va_space_downgrade_write(__va_space) \ + do { \ + uvm_downgrade_write(&(__va_space)->lock); \ + uvm_mutex_unlock_out_of_order(&(__va_space)->read_acquire_write_release_lock); \ + uvm_mutex_unlock_out_of_order(&(__va_space)->serialize_writers_lock); \ + } while (0) + +// Call this when holding the VA space lock for write in order to downgrade to +// read on a path which also needs to make RM calls. +#define uvm_va_space_downgrade_write_rm(__va_space) \ + do { \ + uvm_assert_mutex_locked(&(__va_space)->serialize_writers_lock); \ + uvm_downgrade_write(&(__va_space)->lock); \ + uvm_mutex_unlock_out_of_order(&(__va_space)->read_acquire_write_release_lock); \ + } while (0) + +#define uvm_va_space_down_read(__va_space) \ + do { \ + uvm_mutex_lock(&(__va_space)->read_acquire_write_release_lock); \ + uvm_down_read(&(__va_space)->lock); \ + uvm_mutex_unlock_out_of_order(&(__va_space)->read_acquire_write_release_lock); \ + } while (0) + +// Call this if RM calls need to be made while holding the VA space lock in read +// mode. Note that taking read_acquire_write_release_lock is unnecessary since +// the down_read is serialized with another thread's up_write by the +// serialize_writers_lock. +#define uvm_va_space_down_read_rm(__va_space) \ + do { \ + uvm_mutex_lock(&(__va_space)->serialize_writers_lock); \ + uvm_down_read(&(__va_space)->lock); \ + } while (0) + +#define uvm_va_space_up_read(__va_space) uvm_up_read(&(__va_space)->lock) + +#define uvm_va_space_up_read_rm(__va_space) \ + do { \ + uvm_up_read(&(__va_space)->lock); \ + uvm_mutex_unlock(&(__va_space)->serialize_writers_lock); \ + } while (0) + +// Initialize the VA space with the user-provided flags, enabling ioctls and +// mmap. +NV_STATUS uvm_va_space_initialize(uvm_va_space_t *va_space, NvU64 flags); + +// Get a registered gpu by uuid. This restricts the search for GPUs, to those that +// have been registered with a va_space. This returns NULL if the GPU is not present, or not +// registered with the va_space. +// +// LOCKING: The VA space lock must be held. +uvm_gpu_t *uvm_va_space_get_gpu_by_uuid(uvm_va_space_t *va_space, const NvProcessorUuid *gpu_uuid); + +// Like uvm_va_space_get_gpu_by_uuid, but also returns NULL if the GPU does +// not have a GPU VA space registered in the UVM va_space. +// +// LOCKING: The VA space lock must be held. +uvm_gpu_t *uvm_va_space_get_gpu_by_uuid_with_gpu_va_space(uvm_va_space_t *va_space, const NvProcessorUuid *gpu_uuid); + +// Same as uvm_va_space_get_gpu_by_uuid but it also retains the GPU. The caller +// cannot assume that the GPU is still registered in the VA space after the +// function returns. +// +// LOCKING: The function takes and releases the VA space lock in read mode. +uvm_gpu_t *uvm_va_space_retain_gpu_by_uuid(uvm_va_space_t *va_space, const NvProcessorUuid *gpu_uuid); + +// Returns whether read-duplication is supported +// If gpu is NULL, returns the current state. +// otherwise, it retuns what the result would be once the gpu's va space is added or removed +// (by inverting the gpu's current state) +bool uvm_va_space_can_read_duplicate(uvm_va_space_t *va_space, uvm_gpu_t *changing_gpu); + +// Register a gpu in the va space +// Note that each gpu can be only registered once in a va space +// +// This call returns whether the GPU memory is a NUMA node in the kernel and the +// corresponding node id. +NV_STATUS uvm_va_space_register_gpu(uvm_va_space_t *va_space, + const NvProcessorUuid *gpu_uuid, + const uvm_rm_user_object_t *user_rm_va_space, + NvBool *numa_enabled, + NvS32 *numa_node_id); + +// Unregister a gpu from the va space +NV_STATUS uvm_va_space_unregister_gpu(uvm_va_space_t *va_space, const NvProcessorUuid *gpu_uuid); + +// Registers a GPU VA space with the UVM VA space. +NV_STATUS uvm_va_space_register_gpu_va_space(uvm_va_space_t *va_space, + uvm_rm_user_object_t *user_rm_va_space, + const NvProcessorUuid *gpu_uuid); + +// Unregisters a GPU VA space from the UVM VA space. +NV_STATUS uvm_va_space_unregister_gpu_va_space(uvm_va_space_t *va_space, const NvProcessorUuid *gpu_uuid); + +// Stop all user channels +// +// This function sets a flag in the VA space indicating that all the channels +// have been already stopped and should only be used when no new user channels +// can be registered. +// +// LOCKING: The VA space lock must be held in read mode, not write. +void uvm_va_space_stop_all_user_channels(uvm_va_space_t *va_space); + +// Calls uvm_user_channel_detach on all user channels in a VA space. +// +// The detached channels are added to the input list. The caller is expected to +// drop the VA space lock and call uvm_deferred_free_object_list to complete the +// destroy operation. +// +// LOCKING: The owning VA space must be locked in write mode. +void uvm_va_space_detach_all_user_channels(uvm_va_space_t *va_space, struct list_head *deferred_free_list); + +// Returns whether peer access between these two GPUs has been enabled in this +// VA space. Both GPUs must be registered in the VA space. +bool uvm_va_space_peer_enabled(uvm_va_space_t *va_space, uvm_gpu_t *gpu1, uvm_gpu_t *gpu2); + +static uvm_va_space_t *uvm_va_space_get(struct file *filp) +{ + UVM_ASSERT(uvm_file_is_nvidia_uvm(filp)); + UVM_ASSERT_MSG(filp->private_data != NULL, "filp: 0x%llx", (NvU64)filp); + + return (uvm_va_space_t *)filp->private_data; +} + +static uvm_va_block_context_t *uvm_va_space_block_context(uvm_va_space_t *va_space, struct mm_struct *mm) +{ + uvm_assert_rwsem_locked_write(&va_space->lock); + if (mm) + uvm_assert_mmap_lock_locked(mm); + + uvm_va_block_context_init(&va_space->va_block_context, mm); + return &va_space->va_block_context; +} + +// Retains the GPU VA space memory object. destroy_gpu_va_space and +// uvm_gpu_va_space_release drop the count. This is used to keep the GPU VA +// space object allocated when dropping and re-taking the VA space lock. If +// another thread called remove_gpu_va_space in the meantime, +// gpu_va_space->state will be UVM_GPU_VA_SPACE_STATE_DEAD. +static inline void uvm_gpu_va_space_retain(uvm_gpu_va_space_t *gpu_va_space) +{ + nv_kref_get(&gpu_va_space->kref); +} + +// This only frees the GPU VA space object itself, so it must have been removed +// from its VA space and destroyed prior to the final release. +void uvm_gpu_va_space_release(uvm_gpu_va_space_t *gpu_va_space); + +// Wrapper for nvUvmInterfaceUnsetPageDirectory +void uvm_gpu_va_space_unset_page_dir(uvm_gpu_va_space_t *gpu_va_space); + +static uvm_gpu_va_space_state_t uvm_gpu_va_space_state(uvm_gpu_va_space_t *gpu_va_space) +{ + UVM_ASSERT(gpu_va_space->gpu); + UVM_ASSERT(gpu_va_space->va_space); + + return gpu_va_space->state; +} + +static uvm_gpu_va_space_t *uvm_gpu_va_space_get_by_parent_gpu(uvm_va_space_t *va_space, uvm_parent_gpu_t *parent_gpu) +{ + uvm_gpu_va_space_t *gpu_va_space; + + uvm_assert_rwsem_locked(&va_space->lock); + + if (!parent_gpu || !uvm_processor_mask_test(&va_space->registered_gpu_va_spaces, parent_gpu->id)) + return NULL; + + gpu_va_space = va_space->gpu_va_spaces[uvm_id_gpu_index(parent_gpu->id)]; + UVM_ASSERT(uvm_gpu_va_space_state(gpu_va_space) == UVM_GPU_VA_SPACE_STATE_ACTIVE); + UVM_ASSERT(gpu_va_space->va_space == va_space); + UVM_ASSERT(gpu_va_space->gpu->parent == parent_gpu); + + return gpu_va_space; +} + +static uvm_gpu_va_space_t *uvm_gpu_va_space_get(uvm_va_space_t *va_space, uvm_gpu_t *gpu) +{ + uvm_gpu_va_space_t *gpu_va_space; + + if (!gpu) + return NULL; + + gpu_va_space = uvm_gpu_va_space_get_by_parent_gpu(va_space, gpu->parent); + if (gpu_va_space) + UVM_ASSERT(gpu_va_space->gpu == gpu); + + return gpu_va_space; +} + +#define for_each_gpu_va_space(__gpu_va_space, __va_space) \ + for (__gpu_va_space = \ + uvm_gpu_va_space_get( \ + __va_space, \ + uvm_processor_mask_find_first_va_space_gpu(&__va_space->registered_gpu_va_spaces, va_space) \ + ); \ + __gpu_va_space; \ + __gpu_va_space = \ + uvm_gpu_va_space_get( \ + __va_space, \ + __uvm_processor_mask_find_next_va_space_gpu(&__va_space->registered_gpu_va_spaces, \ + va_space, \ + __gpu_va_space->gpu) \ + ) \ + ) + +// Return the first GPU set in the given mask or NULL. The caller must ensure +// that the GPUs set in the mask are registered in the VA space and cannot be +// unregistered during this call. +static uvm_gpu_t *uvm_processor_mask_find_first_va_space_gpu(const uvm_processor_mask_t *mask, uvm_va_space_t *va_space) +{ + uvm_gpu_t *gpu; + uvm_gpu_id_t gpu_id; + + UVM_ASSERT(uvm_processor_mask_gpu_subset(mask, &va_space->registered_gpus)); + + gpu_id = uvm_processor_mask_find_first_gpu_id(mask); + if (UVM_ID_IS_INVALID(gpu_id)) + return NULL; + + gpu = uvm_va_space_get_gpu(va_space, gpu_id); + UVM_ASSERT_MSG(gpu, "gpu_id %u\n", uvm_id_value(gpu_id)); + + return gpu; +} + +static uvm_gpu_t *uvm_va_space_find_first_gpu(uvm_va_space_t *va_space) +{ + uvm_assert_rwsem_locked(&va_space->lock); + + return uvm_processor_mask_find_first_va_space_gpu(&va_space->registered_gpus, va_space); +} + +// Same as uvm_processor_mask_find_next_va_space_gpu below, but gpu cannot be +// NULL +static uvm_gpu_t *__uvm_processor_mask_find_next_va_space_gpu(const uvm_processor_mask_t *mask, + uvm_va_space_t *va_space, + uvm_gpu_t *gpu) +{ + uvm_gpu_id_t gpu_id; + + UVM_ASSERT(gpu != NULL); + UVM_ASSERT(uvm_processor_mask_gpu_subset(mask, &va_space->registered_gpus)); + + gpu_id = uvm_processor_mask_find_next_id(mask, uvm_gpu_id_next(gpu->id)); + if (UVM_ID_IS_INVALID(gpu_id)) + return NULL; + + gpu = uvm_va_space_get_gpu(va_space, gpu_id); + UVM_ASSERT_MSG(gpu, "gpu_id %u\n", uvm_id_value(gpu_id)); + + return gpu; +} + +// Return the next GPU with an id larger than gpu->id set in the given mask. +// The function returns NULL if gpu is NULL. The caller must ensure that the +// GPUs set in the mask are registered in the VA space and cannot be +// unregistered during this call. +static uvm_gpu_t *uvm_processor_mask_find_next_va_space_gpu(const uvm_processor_mask_t *mask, + uvm_va_space_t *va_space, + uvm_gpu_t *gpu) +{ + if (gpu == NULL) + return NULL; + + return __uvm_processor_mask_find_next_va_space_gpu(mask, va_space, gpu); +} + +#define for_each_va_space_gpu_in_mask(gpu, va_space, mask) \ + for (({uvm_assert_rwsem_locked(&(va_space)->lock); \ + gpu = uvm_processor_mask_find_first_va_space_gpu(mask, va_space);}); \ + gpu != NULL; \ + gpu = __uvm_processor_mask_find_next_va_space_gpu(mask, va_space, gpu)) + +// Helper to iterate over all GPUs registered in a UVM VA space +#define for_each_va_space_gpu(gpu, va_space) \ + for_each_va_space_gpu_in_mask(gpu, va_space, &(va_space)->registered_gpus) + +static void uvm_va_space_global_gpus_in_mask(uvm_va_space_t *va_space, + uvm_global_processor_mask_t *global_mask, + const uvm_processor_mask_t *mask) +{ + uvm_gpu_t *gpu; + + uvm_global_processor_mask_zero(global_mask); + + for_each_va_space_gpu_in_mask(gpu, va_space, mask) + uvm_global_processor_mask_set(global_mask, gpu->global_id); +} + +static void uvm_va_space_global_gpus(uvm_va_space_t *va_space, uvm_global_processor_mask_t *global_mask) +{ + uvm_va_space_global_gpus_in_mask(va_space, global_mask, &va_space->registered_gpus); +} + +// Return the processor in the candidates mask that is "closest" to src, or +// UVM_ID_MAX_PROCESSORS if candidates is empty. The order is: +// - src itself +// - Direct NVLINK GPU peers if src is CPU or GPU (1) +// - NVLINK CPU if src is GPU +// - Indirect NVLINK GPU peers if src is GPU +// - PCIe peers if src is GPU (2) +// - CPU if src is GPU +// - Deterministic selection from the pool of candidates +// +// (1) When src is a GPU, NVLINK GPU peers are preferred over the CPU because in +// NUMA systems the CPU processor may refer to multiple CPU NUMA nodes, and +// the bandwidth between src and the farthest CPU node can be substantially +// lower than the bandwidth src and its peer GPUs. +// (2) TODO: Bug 1764943: Is copying from a PCI peer always better than copying +// from CPU? +uvm_processor_id_t uvm_processor_mask_find_closest_id(uvm_va_space_t *va_space, + const uvm_processor_mask_t *candidates, + uvm_processor_id_t src); + +// Iterate over each ID in mask in order of proximity to src. This is +// destructive to mask. +#define for_each_closest_id(id, mask, src, va_space) \ + for (id = uvm_processor_mask_find_closest_id(va_space, mask, src); \ + UVM_ID_IS_VALID(id); \ + uvm_processor_mask_clear(mask, id), id = uvm_processor_mask_find_closest_id(va_space, mask, src)) + +// Return the GPU whose memory corresponds to the given node_id +static uvm_gpu_t *uvm_va_space_find_gpu_with_memory_node_id(uvm_va_space_t *va_space, int node_id) +{ + uvm_gpu_t *gpu; + + UVM_ASSERT(nv_numa_node_has_memory(node_id)); + + if (!g_uvm_global.ats.supported) + return NULL; + + for_each_va_space_gpu(gpu, va_space) { + UVM_ASSERT(gpu->parent->numa_info.enabled); + + if (uvm_gpu_numa_info(gpu)->node_id == node_id) + return gpu; + } + + return NULL; +} + +static bool uvm_va_space_memory_node_is_gpu(uvm_va_space_t *va_space, int node_id) +{ + return uvm_va_space_find_gpu_with_memory_node_id(va_space, node_id) != NULL; +} + +// Return a processor mask with the GPUs attached to the node_id CPU memory +// node +static void uvm_va_space_get_gpus_attached_to_cpu_node(uvm_va_space_t *va_space, + int node_id, + uvm_processor_mask_t *gpus) +{ + uvm_gpu_id_t gpu_id; + + UVM_ASSERT(!uvm_va_space_memory_node_is_gpu(va_space, node_id)); + + for_each_gpu_id(gpu_id) { + const uvm_cpu_gpu_affinity_t *affinity = &va_space->gpu_cpu_numa_affinity[uvm_id_gpu_index(gpu_id)]; + if (affinity->numa_node == node_id) { + uvm_processor_mask_copy(gpus, &affinity->gpus); + return; + } + } + + uvm_processor_mask_zero(gpus); +} + +// Helper that returns the first GPU in the mask returned by +// uvm_va_space_get_gpus_attached_to_cpu_node or NULL if empty +static uvm_gpu_t *uvm_va_space_find_first_gpu_attached_to_cpu_node(uvm_va_space_t *va_space, int node_id) +{ + uvm_processor_mask_t gpus; + + uvm_va_space_get_gpus_attached_to_cpu_node(va_space, node_id, &gpus); + + return uvm_processor_mask_find_first_va_space_gpu(&gpus, va_space); +} + +// Obtain the user channel with the given instance_ptr. This is used during +// non-replayable fault service. This function needs to be called with the va +// space lock held in order to prevent channels from being removed. +uvm_user_channel_t *uvm_gpu_va_space_get_user_channel(uvm_gpu_va_space_t *gpu_va_space, + uvm_gpu_phys_address_t instance_ptr); + +// Whether some form of pageable access (ATS, HMM) is supported by the system on +// this VA space. This does NOT check whether GPUs with pageable support are +// present, just whether system + VA space support exists. +bool uvm_va_space_pageable_mem_access_supported(uvm_va_space_t *va_space); + +NV_STATUS uvm_test_get_pageable_mem_access_type(UVM_TEST_GET_PAGEABLE_MEM_ACCESS_TYPE_PARAMS *params, + struct file *filp); +NV_STATUS uvm_test_enable_nvlink_peer_access(UVM_TEST_ENABLE_NVLINK_PEER_ACCESS_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_disable_nvlink_peer_access(UVM_TEST_DISABLE_NVLINK_PEER_ACCESS_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_destroy_gpu_va_space_delay(UVM_TEST_DESTROY_GPU_VA_SPACE_DELAY_PARAMS *params, struct file *filp); +#endif // __UVM_VA_SPACE_H__ diff --git a/kernel-open/nvidia-uvm/uvm_va_space_mm.c b/kernel-open/nvidia-uvm/uvm_va_space_mm.c new file mode 100644 index 000000000..5b63d3b90 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_space_mm.c @@ -0,0 +1,668 @@ +/******************************************************************************* + Copyright (c) 2018-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_common.h" +#include "uvm_kvmalloc.h" +#include "uvm_va_space.h" +#include "uvm_va_space_mm.h" +#include "uvm_ats.h" +#include "uvm_api.h" +#include "uvm_test.h" +#include "uvm_test_ioctl.h" + +// +// This comment block describes some implementation rationale. See the header +// for the API descriptions. +// +// ========================= Retain count vs mm_users ========================== +// +// uvm_va_space_mm manages its own retained ref count and wait queue. When the +// mm is disabled via mmu_notifier_release, we use the wait queue to wait for +// the ref count to go to 0. +// +// An alternative is to scrap all that and just use mm_users. Retain would call +// something like mmget_not_zero(mm), and release would use mmput(). No +// additional count would be needed, since we'd be guaranteed that the mm_struct +// couldn't start teardown while we have the count. +// +// There are two reasons we don't take that approach. The first is that we +// cannot call mmput() on all paths which retain a va_space_mm, particularly the +// GPU fault handler. mmput() may result in exit_mmap(), which could result in +// RM calls and VA space destroy. Those need to wait for the GPU fault handler +// to finish, so if we took that approach we'd deadlock. +// +// There is a recent mmput_async() addition to the kernel which would resolve +// this problem. As of v5.2 it was not exported for drivers, although that +// appears to be in the works. However, even if exported we would have to +// implement a fallback path for earlier kernels, and the complexity of such a +// path would be significant. +// +// The second problem is that mmget_not_zero() approach is susceptible to +// livelock problems. Two threads, say the fault handlers for two GPUs, could +// conceivably keep mm_users high by constantly retaining and releasing, +// preventing the mm from ever being torn down. The va_space_mm implementation +// below does not have this problem because mm shutdown causes later retains to +// fail, which guarantees that the count will eventually go to 0. +// +// ============================ Handling mm teardown =========================== +// +// mmu_notifiers call the mm release callback both when the mm is really getting +// shut down, and whenever mmu_notifier_unregister is called. This has several +// consequences, including that these two paths can race. If they do race, they +// wait for each other to finish (real teardown of the mm won't start until the +// mmu_notifier_unregister's callback has returned, and mmu_notifier_unregister +// won't return until the mm release callback has returned). +// +// When the mm is really getting torn down, uvm_va_space_mm_shutdown is expected +// to stop all GPU memory accesses to that mm and stop servicing faults in that +// mm. This essentially shuts down the VA space for new work. The VA space +// object remains valid for most teardown ioctls until the file is closed, +// because it's legal for the associated process to die then for another process +// with a reference on the file to perform the unregisters or associated ioctls. +// This is particularly true for tools users. +// +// An exception to the above is UvmUnregisterChannel. Since channels are +// completely removed from the VA space on mm teardown, later channel +// unregisters will fail to find the handles and will return an error. +// +// The UVM driver will only call mmu_notifier_unregister during VA space destroy +// (file close). +// +// Here is a table of the various teardown scenarios: +// +// Can race with +// Scenario mm teardown +// ----------------------------------------------------------------------------- +// 1) Process exit (mm teardown, file open) - +// 2) Explicit file close in original mm No +// 3) Explicit file close in different mm Yes +// 4) Implicit file close (exit) in original mm No +// 5) Implicit file close (exit) in different mm Yes +// +// At a high level, the sequence of operations to perform during mm teardown is: +// +// 1) Stop all channels +// - Prevents new faults and accesses on non-MPS +// 2) Detach all channels +// - Prevents pending faults from being translated to this VA space +// - Non-replayable faults will be dropped so no new ones can arrive +// - Access counter notifications will be prevented from getting new +// translations to this VA space. Pending entries may attempt to retain +// the mm, but will drop the notification if they can't be serviced. +// 3) Flush the fault buffer +// - The only reason to flush the fault buffer is to avoid spurious +// cancels. If we didn't flush the fault buffer before marking the mm +// as dead, then remaining faults which require the mm would be +// cancelled. Since the faults might be stale, we would record cancel +// events which didn't really happen (the access didn't happen after +// the mm died). By flushing we clear out all stale faults, and in +// the case of MPS, cancel real faults after. +// 4) UnsetPageDir +// - Prevents new accesses on MPS +// 5) Mark the va_space_mm as dead +// - Prevents new retainers from using the mm. There won't be any more on +// the fault handling paths, but there could be others in worker threads. +// +// Here are some tables of each step in the sequence, and what operations can +// still be performed after each step. This is all from the perspective of a +// single VA space. "Untranslated" means that the fault entry has not been +// translated to a uvm_va_space yet. +// +// Replayable non-MPS Behavior: +// +// Can Pending Pending Can be +// access Can untranslated translated servicing +// memory fault faults faults faults +// ----------------------------------------------------------------------------- +// Shutdown start Yes Yes Service Service Yes +// Stop channels No No Service [1] Service [1] Yes [1] +// Detach channels No No Flush buffer Service [1] Yes [1], [2] +// Flush buffer No No None possible None possible No +// UnsetPageDir No No None possible None possible No +// +// +// Replayable MPS Behavior: +// +// Can Pending Pending Can be +// access Can untranslated translated servicing +// memory fault faults faults faults +// ----------------------------------------------------------------------------- +// Shutdown start Yes Yes Service Service Yes +// Stop channels Yes Yes Service Service Yes +// Detach channels Yes Yes Cancel, flush Service Yes +// Flush buffer Yes Yes Cancel, flush None possible No +// UnsetPageDir No [3] Yes Cancel, flush None possible No +// +// +// [1]: All pending faults in this VA space are stale since channel stop +// preempted the context. +// [2]: Faults in this VA space can't be serviced concurrently with detach since +// detach holds the VA space lock in write mode. Faults in other VA spaces +// can be serviced, and stale faults in this VA space can resume service +// after detach is done. +// [3]: Due to the nature of MPS, remaining work which had started under the VA +// space could still execute and attempt to make memory accesses. However, +// since the PDB at that point is empty and ATS is disabled (if available), +// all accesses will fault and be cancelled rather than successfully +// translate to physical memory. +// +// ============================================================================= + +#define UVM_VA_SPACE_MM_SHUTDOWN_DELAY_MAX_MS 100 + +static int uvm_enable_va_space_mm = 1; +module_param(uvm_enable_va_space_mm, int, S_IRUGO); +MODULE_PARM_DESC(uvm_enable_va_space_mm, + "Set to 0 to disable UVM from using mmu_notifiers to create " + "an association between a UVM VA space and a process. This " + "will also disable pageable memory access via either ATS or " + "HMM."); + +bool uvm_va_space_mm_enabled_system(void) +{ + return UVM_CAN_USE_MMU_NOTIFIERS() && uvm_enable_va_space_mm; +} + +bool uvm_va_space_mm_enabled(uvm_va_space_t *va_space) +{ + // A va_space doesn't have any association with an mm in multi-process + // sharing mode. + if (va_space->initialization_flags & UVM_INIT_FLAGS_MULTI_PROCESS_SHARING_MODE) + return false; + + return uvm_va_space_mm_enabled_system(); +} + +static void uvm_va_space_mm_shutdown(uvm_va_space_t *va_space); + +#if UVM_CAN_USE_MMU_NOTIFIERS() + + static uvm_va_space_t *get_va_space(struct mmu_notifier *mn) + { + // This may be called without a thread context present, so be careful + // what is used here. + return container_of(mn, uvm_va_space_t, va_space_mm.mmu_notifier); + } + + static void uvm_mmu_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) + { + UVM_ENTRY_VOID(uvm_va_space_mm_shutdown(get_va_space(mn))); + } + + static void uvm_mmu_notifier_invalidate_range_ats(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) + { + // In most cases ->invalidate_range() is called with exclusive end. + // uvm_ats_invalidate() expects an inclusive end so we have to + // convert it. + // + // There's a special case however. Kernel TLB gathering sometimes + // identifies "fullmm" invalidates by setting both start and end to ~0. + // + // It's unclear if there are any other cases in which the kernel will + // call us with start == end. Since we can't definitively say no, we + // conservatively treat all such calls as full invalidates. + if (start == end) { + start = 0; + end = ~0UL; + } + else { + --end; + } + + UVM_ENTRY_VOID(uvm_ats_invalidate(get_va_space(mn), start, end)); + } + + static struct mmu_notifier_ops uvm_mmu_notifier_ops_release = + { + .release = uvm_mmu_notifier_release, + }; + + static struct mmu_notifier_ops uvm_mmu_notifier_ops_ats = + { + .release = uvm_mmu_notifier_release, + .invalidate_range = uvm_mmu_notifier_invalidate_range_ats, + }; + + static int uvm_mmu_notifier_register(uvm_va_space_mm_t *va_space_mm) + { + UVM_ASSERT(va_space_mm->mm); + uvm_assert_mmap_lock_locked_write(va_space_mm->mm); + + if (UVM_ATS_IBM_SUPPORTED_IN_DRIVER() && g_uvm_global.ats.enabled) + va_space_mm->mmu_notifier.ops = &uvm_mmu_notifier_ops_ats; + else + va_space_mm->mmu_notifier.ops = &uvm_mmu_notifier_ops_release; + + return __mmu_notifier_register(&va_space_mm->mmu_notifier, va_space_mm->mm); + } + + static void uvm_mmu_notifier_unregister(uvm_va_space_mm_t *va_space_mm) + { + mmu_notifier_unregister(&va_space_mm->mmu_notifier, va_space_mm->mm); + } +#else + static int uvm_mmu_notifier_register(uvm_va_space_mm_t *va_space_mm) + { + UVM_ASSERT(0); + return 0; + } + + static void uvm_mmu_notifier_unregister(uvm_va_space_mm_t *va_space_mm) + { + UVM_ASSERT(0); + } +#endif // UVM_CAN_USE_MMU_NOTIFIERS() + +NV_STATUS uvm_va_space_mm_register(uvm_va_space_t *va_space) +{ + uvm_va_space_mm_t *va_space_mm = &va_space->va_space_mm; + int ret; + + uvm_assert_mmap_lock_locked_write(current->mm); + uvm_assert_rwsem_locked_write(&va_space->lock); + + UVM_ASSERT(uvm_va_space_initialized(va_space) != NV_OK); + if (!uvm_va_space_mm_enabled(va_space)) + return NV_OK; + + UVM_ASSERT(!va_space_mm->mm); + va_space_mm->mm = current->mm; + + // We must be prepared to handle callbacks as soon as we make this call, + // except for ->release() which can't be called since the mm belongs to + // current. + ret = uvm_mmu_notifier_register(va_space_mm); + if (ret) { + // Inform uvm_va_space_mm_unregister() that it has nothing to do. + va_space_mm->mm = NULL; + return errno_to_nv_status(ret); + } + + uvm_spin_lock(&va_space_mm->lock); + va_space_mm->alive = true; + uvm_spin_unlock(&va_space_mm->lock); + + return NV_OK; +} + +void uvm_va_space_mm_unregister(uvm_va_space_t *va_space) +{ + uvm_va_space_mm_t *va_space_mm = &va_space->va_space_mm; + + // We can't hold the VA space lock or mmap_lock across this function since + // mmu_notifier_unregister() may trigger uvm_va_space_mm_shutdown(), which + // takes those locks and also waits for other threads which may take those + // locks. + uvm_assert_unlocked_order(UVM_LOCK_ORDER_MMAP_LOCK); + uvm_assert_unlocked_order(UVM_LOCK_ORDER_VA_SPACE); + + if (!va_space_mm->mm) + return; + + UVM_ASSERT(uvm_va_space_mm_enabled(va_space)); + uvm_mmu_notifier_unregister(va_space_mm); + + // We're guaranteed that upon return from mmu_notifier_unregister(), + // uvm_va_space_mm_shutdown() will have been called (though perhaps not by + // this thread). Therefore all retainers have been flushed. + UVM_ASSERT(!va_space_mm->alive); + UVM_ASSERT(va_space_mm->retained_count == 0); + va_space_mm->mm = NULL; +} + +struct mm_struct *uvm_va_space_mm_retain(uvm_va_space_t *va_space) +{ + uvm_va_space_mm_t *va_space_mm = &va_space->va_space_mm; + struct mm_struct *mm = NULL; + + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + + if (!uvm_va_space_mm_enabled(va_space)) + return NULL; + + uvm_spin_lock(&va_space_mm->lock); + + if (va_space_mm->alive) { + ++va_space_mm->retained_count; + mm = va_space_mm->mm; + UVM_ASSERT(mm); + } + + uvm_spin_unlock(&va_space_mm->lock); + + return mm; +} + +struct mm_struct *uvm_va_space_mm_or_current_retain(uvm_va_space_t *va_space) +{ + uvm_va_space_mm_t *va_space_mm = &va_space->va_space_mm; + + // We should only attempt to use current->mm from a user thread + UVM_ASSERT(!(current->flags & PF_KTHREAD)); + + UVM_ASSERT(uvm_va_space_initialized(va_space) == NV_OK); + + // current->mm is NULL when we're in process teardown. In that case it + // doesn't make sense to use any mm. + if (!current->mm) + return NULL; + + // If the va_space_mm matches current->mm then it would be safe but sub- + // optimal to call uvm_va_space_mm_retain(). current->mm is always valid to + // use when non-NULL so there is no need to retain it. + if (!uvm_va_space_mm_enabled(va_space) || va_space_mm->mm == current->mm) + return current->mm; + + return uvm_va_space_mm_retain(va_space); +} + +void uvm_va_space_mm_release(uvm_va_space_t *va_space) +{ + uvm_va_space_mm_t *va_space_mm = &va_space->va_space_mm; + bool do_wake = false; + + UVM_ASSERT(uvm_va_space_mm_enabled(va_space)); + + // The mm must not have been torn down while we have it retained + UVM_ASSERT(va_space_mm->mm); + + uvm_spin_lock(&va_space_mm->lock); + + UVM_ASSERT(va_space_mm->retained_count > 0); + --va_space_mm->retained_count; + + // If we're the last retainer on a dead mm, signal any potential waiters + if (va_space_mm->retained_count == 0 && !va_space_mm->alive) + do_wake = true; + + uvm_spin_unlock(&va_space_mm->lock); + + // There could be multiple threads in uvm_va_space_mm_shutdown() waiting on + // us, so we have to wake up all waiters. + if (do_wake) + wake_up_all(&va_space_mm->last_retainer_wait_queue); +} + +void uvm_va_space_mm_or_current_release(uvm_va_space_t *va_space, struct mm_struct *mm) +{ + if (mm && mm != current->mm) + uvm_va_space_mm_release(va_space); +} + +static void uvm_va_space_mm_shutdown_delay(uvm_va_space_t *va_space) +{ + uvm_va_space_mm_t *va_space_mm = &va_space->va_space_mm; + NvU64 start_time; + int num_threads; + bool timed_out = false; + + if (!va_space_mm->test.delay_shutdown) + return; + + start_time = NV_GETTIME(); + + num_threads = atomic_inc_return(&va_space_mm->test.num_mm_shutdown_threads); + UVM_ASSERT(num_threads > 0); + + if (num_threads == 1) { + // Wait for another thread to arrive unless we time out + while (atomic_read(&va_space_mm->test.num_mm_shutdown_threads) == 1) { + if (NV_GETTIME() - start_time >= 1000*1000*UVM_VA_SPACE_MM_SHUTDOWN_DELAY_MAX_MS) { + timed_out = true; + break; + } + } + + if (va_space_mm->test.verbose) + UVM_TEST_PRINT("Multiple threads: %d\n", !timed_out); + } + + // No need to decrement num_mm_shutdown_threads since this va_space_mm is + // being shut down. +} + +// Handles the va_space's mm being torn down while the VA space still exists. +// This function won't return until all in-flight retainers have called +// uvm_va_space_mm_release(). Subsequent calls to uvm_va_space_mm_retain() will +// return NULL. +// +// uvm_va_space_mm_unregister() must still be called. It is guaranteed that +// uvm_va_space_mm_shutdown() will not be called after +// uvm_va_space_mm_unregister() returns, though they may execute concurrently. +// If so, uvm_va_space_mm_unregister() will not return until +// uvm_va_space_mm_shutdown() is done. +// +// After this call returns the VA space is essentially dead. GPUs cannot make +// any new memory accesses in registered GPU VA spaces, and no more GPU faults +// which are attributed to this VA space will arrive. Additionally, no more +// registration within the VA space is allowed (GPU, GPU VA space, or channel). +// +// The requirements for this callback are that, once we return, the GPU and +// driver are completely done using the associated mm_struct. This includes: +// +// 1) GPUs will not issue any more memory accesses under this mm +// 2) [ATS only] GPUs will not issue any more ATRs under this mm +// 3) The driver will not ask the kernel to service faults on this mm +// +static void uvm_va_space_mm_shutdown(uvm_va_space_t *va_space) +{ + uvm_va_space_mm_t *va_space_mm = &va_space->va_space_mm; + uvm_gpu_va_space_t *gpu_va_space; + uvm_gpu_t *gpu; + uvm_global_processor_mask_t gpus_to_flush; + LIST_HEAD(deferred_free_list); + + // The mm must not have been torn down completely yet, but it may have been + // marked as dead by a concurrent thread. + UVM_ASSERT(uvm_va_space_mm_enabled(va_space)); + UVM_ASSERT(va_space_mm->mm); + + // Inject a delay for testing if requested + uvm_va_space_mm_shutdown_delay(va_space); + + // There can be at most two threads here concurrently: + // + // 1) Thread A in process teardown of the original process + // + // 2) Thread B must be in the file close path of another process (either + // implicit or explicit), having already stopped all GPU accesses and + // having called uvm_va_space_mm_unregister. + // + // This corresponds to scenario #5 in the mm teardown block comment at the + // top of the file. We serialize between these threads with the VA space + // lock, but otherwise don't have any special handling: both threads will + // execute the full teardown sequence below. Also, remember that the threads + // won't return to their callers until both threads have returned from this + // function (following the rules for mmu_notifier_unregister). + + uvm_va_space_down_write(va_space); + + // Prevent future registrations of any kind. We'll be iterating over all + // GPUs and GPU VA spaces below but taking and dropping the VA space lock. + // It's ok for other threads to unregister those objects, but not to + // register new ones. + // + // We also need to prevent new channel work from arriving since we're trying + // to stop memory accesses. + va_space->disallow_new_registers = true; + + uvm_va_space_downgrade_write_rm(va_space); + + // Stop channels to prevent new accesses and new faults on non-MPS + uvm_va_space_stop_all_user_channels(va_space); + + uvm_va_space_up_read_rm(va_space); + + // Detach all channels to prevent pending untranslated faults from getting + // to this VA space. This also removes those channels from the VA space and + // puts them on the deferred free list, so only one thread will do this. + uvm_va_space_down_write(va_space); + uvm_va_space_detach_all_user_channels(va_space, &deferred_free_list); + uvm_va_space_global_gpus_in_mask(va_space, &gpus_to_flush, &va_space->faultable_processors); + uvm_global_mask_retain(&gpus_to_flush); + uvm_va_space_up_write(va_space); + + // Flush the fault buffer on all GPUs. This will avoid spurious cancels + // of stale pending translated faults after we clear va_space_mm->alive + // later. + for_each_global_gpu_in_mask(gpu, &gpus_to_flush) + uvm_gpu_fault_buffer_flush(gpu); + + uvm_global_mask_release(&gpus_to_flush); + + // Call nvUvmInterfaceUnsetPageDirectory. This has no effect on non-MPS. + // Under MPS this guarantees that no new GPU accesses will be made using + // this mm. + // + // We need only one thread to make this call, but two threads in here could + // race for it, or we could have one thread in here and one in + // destroy_gpu_va_space. Serialize these by starting in write mode then + // downgrading to read. + uvm_va_space_down_write(va_space); + uvm_va_space_downgrade_write_rm(va_space); + for_each_gpu_va_space(gpu_va_space, va_space) + uvm_gpu_va_space_unset_page_dir(gpu_va_space); + uvm_va_space_up_read_rm(va_space); + + // The above call to uvm_gpu_va_space_unset_page_dir handles the GPU VA + // spaces which are known to be registered. However, we could've raced with + // a concurrent uvm_va_space_unregister_gpu_va_space, giving this sequence: + // + // unregister_gpu_va_space uvm_va_space_mm_shutdown + // uvm_va_space_down_write + // remove_gpu_va_space + // uvm_va_space_up_write + // uvm_va_space_down_write(va_space); + // // No GPU VA spaces + // Unlock, return + // uvm_deferred_free_object_list + // uvm_gpu_va_space_unset_page_dir + // + // We have to be sure that all accesses in this GPU VA space are done before + // returning, so we have to wait for the other thread to finish its + // uvm_gpu_va_space_unset_page_dir call. + // + // We can be sure that num_pending will eventually go to zero because we've + // prevented new GPU VA spaces from being registered above. + wait_event(va_space->gpu_va_space_deferred_free.wait_queue, + atomic_read(&va_space->gpu_va_space_deferred_free.num_pending) == 0); + + // Now that there won't be any new GPU faults, prevent subsequent retainers + // from accessing this mm. + uvm_spin_lock(&va_space_mm->lock); + va_space_mm->alive = false; + uvm_spin_unlock(&va_space_mm->lock); + + // Finish channel destroy. This can be done at any point after detach as + // long as we don't hold the VA space lock. + uvm_deferred_free_object_list(&deferred_free_list); + + // Flush out all pending retainers + wait_event(va_space_mm->last_retainer_wait_queue, va_space_mm->retained_count == 0); +} + +static NV_STATUS mm_read64(struct mm_struct *mm, NvU64 addr, NvU64 *val) +{ + long ret; + int write = 0, force = 0; + struct page *page; + NvU64 *mapping; + + UVM_ASSERT(IS_ALIGNED(addr, sizeof(*val))); + + uvm_down_read_mmap_lock(mm); + ret = NV_GET_USER_PAGES_REMOTE(NULL, mm, (unsigned long)addr, 1, write, force, &page, NULL); + uvm_up_read_mmap_lock(mm); + + if (ret < 0) + return errno_to_nv_status(ret); + + UVM_ASSERT(ret == 1); + + mapping = (NvU64 *)((char *)kmap(page) + (addr % PAGE_SIZE)); + *val = *mapping; + kunmap(page); + put_page(page); + + return NV_OK; +} + +NV_STATUS uvm_test_va_space_mm_retain(UVM_TEST_VA_SPACE_MM_RETAIN_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = NULL; + struct mm_struct *mm = NULL; + NV_STATUS status = NV_OK; + + if (!IS_ALIGNED(params->addr, sizeof(params->val_before))) + return NV_ERR_INVALID_ARGUMENT; + + uvm_mutex_lock(&g_uvm_global.va_spaces.lock); + + list_for_each_entry(va_space, &g_uvm_global.va_spaces.list, list_node) { + if ((uintptr_t)va_space == params->va_space_ptr && uvm_va_space_initialized(va_space) == NV_OK) { + mm = uvm_va_space_mm_retain(va_space); + break; + } + } + + uvm_mutex_unlock(&g_uvm_global.va_spaces.lock); + + if ((uintptr_t)va_space != params->va_space_ptr) + return NV_ERR_MISSING_TABLE_ENTRY; + + if (!mm) + return NV_ERR_PAGE_TABLE_NOT_AVAIL; + + status = mm_read64(mm, params->addr, ¶ms->val_before); + + if (status == NV_OK && params->sleep_us) { + usleep_range(params->sleep_us, params->sleep_us + 1000); + status = mm_read64(mm, params->addr, ¶ms->val_after); + } + + uvm_va_space_mm_release(va_space); + return status; +} + +NV_STATUS uvm_test_va_space_mm_delay_shutdown(UVM_TEST_VA_SPACE_MM_DELAY_SHUTDOWN_PARAMS *params, struct file *filp) +{ + uvm_va_space_t *va_space = uvm_va_space_get(filp); + uvm_va_space_mm_t *va_space_mm = &va_space->va_space_mm; + NV_STATUS status = NV_ERR_PAGE_TABLE_NOT_AVAIL; + + uvm_va_space_down_write(va_space); + + if (uvm_va_space_mm_retain(va_space)) { + va_space_mm->test.delay_shutdown = true; + va_space_mm->test.verbose = params->verbose; + uvm_va_space_mm_release(va_space); + status = NV_OK; + } + + uvm_va_space_up_write(va_space); + + return status; +} diff --git a/kernel-open/nvidia-uvm/uvm_va_space_mm.h b/kernel-open/nvidia-uvm/uvm_va_space_mm.h new file mode 100644 index 000000000..c866e16e8 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_va_space_mm.h @@ -0,0 +1,186 @@ +/******************************************************************************* + Copyright (c) 2018-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_VA_SPACE_MM_H__ +#define __UVM_VA_SPACE_MM_H__ + +#include "uvm_linux.h" +#include "uvm_forward_decl.h" +#include "uvm_lock.h" +#include "uvm_test_ioctl.h" +#include "nv-kref.h" + +#include + +struct uvm_va_space_mm_struct +{ + // The mm currently associated with this VA space. Most callers shouldn't + // access this directly, but should instead use uvm_va_space_mm_retain()/ + // uvm_va_space_mm_release(). + // + // The pointer itself is valid between uvm_va_space_mm_register() and + // uvm_va_space_mm_unregister(), but should only be considered usable when + // retained or current. + struct mm_struct *mm; + +#if UVM_CAN_USE_MMU_NOTIFIERS() + struct mmu_notifier mmu_notifier; +#endif + + // Lock protecting the alive and retained_count fields. + uvm_spinlock_t lock; + + // Whether the mm is usable. uvm_va_space_mm_register() marks the mm as + // alive and uvm_va_space_mm_shutdown() marks it as dead. + bool alive; + + // Refcount for uvm_va_space_mm_retain()/uvm_va_space_mm_release() + NvU32 retained_count; + + // Wait queue for threads waiting for retainers to finish (retained_count + // going to 0 when not alive). + wait_queue_head_t last_retainer_wait_queue; + + // State which is only injected by test ioctls + struct + { + // Whether uvm_va_space_mm_shutdown() should do a timed wait for other + // threads to arrive. + bool delay_shutdown; + + bool verbose; + + // Number of threads which have called uvm_va_space_mm_shutdown(). Only + // used when delay_shutdown is true. + atomic_t num_mm_shutdown_threads; + } test; +}; + +// Whether the system can support creating an association between a VA space and +// an mm. +bool uvm_va_space_mm_enabled_system(void); + +// Whether this VA space is associated with an mm. This must not be called +// before uvm_va_space_initialize(). +bool uvm_va_space_mm_enabled(uvm_va_space_t *va_space); + +// Registers current->mm with the va_space. A reference is taken on the mm, +// meaning that until uvm_va_space_mm_unregister() is called the mm will remain +// a valid object in memory (mm_count), but is not guaranteed to remain alive +// (mm_users). +// +// Use uvm_va_space_mm_retain() to retrieve the mm. +// +// Locking: mmap_lock and the VA space lock must both be held for write. +NV_STATUS uvm_va_space_mm_register(uvm_va_space_t *va_space); + +// De-associate the mm from the va_space. This function won't return until all +// in-flight retainers have called uvm_va_space_mm_release(). Subsequent calls +// to uvm_va_space_mm_retain() will return NULL. +// +// This function may invoke uvm_va_space_mm_shutdown() so the caller must not +// hold either mmap_lock or the VA space lock. Since this API must provide the +// same guarantees as uvm_va_space_mm_shutdown(), the caller must also guarantee +// prior to calling this function that all GPUs in this VA space have stopped +// making accesses under this mm and will not be able to start again under that +// VA space. +// +// Locking: This function may take both mmap_lock and the VA space lock. +void uvm_va_space_mm_unregister(uvm_va_space_t *va_space); + +// Retains the current mm registered with this VA space. If no mm is currently +// registered, or if the registered mm is in the process of tearing down, NULL +// is returned. Otherwise, the returned mm will remain valid for normal use +// (locking mmap_lock, find_vma, get_user_pages, cgroup-accounted allocations, +// etc) until uvm_va_space_mm_release() is called. +// +// Please, note that a retained mm could have mm->users == 0. +// +// It is NOT necessary to hold the VA space lock when calling this function. +struct mm_struct *uvm_va_space_mm_retain(uvm_va_space_t *va_space); + +// Similar to uvm_va_space_mm_retain(), but falls back to returning current->mm +// when there is no mm registered with the VA space (that is, +// uvm_va_space_mm_enabled() would return false). This is both a convenience and +// an optimization of the common case in which current->mm == va_space_mm. +// uvm_va_space_mm_or_current_release() must be called to release the mm, and it +// must be called from the same thread which called +// uvm_va_space_mm_or_current_retain(). +// +// If a non-NULL mm is returned, the guarantees described by +// uvm_va_space_mm_retain() apply. If uvm_va_space_mm_enabled() is false the +// caller is responsible for validating that the returned mm matches the desired +// mm before performing an operation such as vm_insert_page(). See +// uvm_va_range_vma_check(). +// +// This should not be called from a kernel thread. +struct mm_struct *uvm_va_space_mm_or_current_retain(uvm_va_space_t *va_space); + +// Convenience wrapper around uvm_va_space_mm_retain() which also locks +// mmap_lock for read if valid. +static struct mm_struct *uvm_va_space_mm_retain_lock(uvm_va_space_t *va_space) +{ + struct mm_struct *mm = uvm_va_space_mm_retain(va_space); + if (mm) + uvm_down_read_mmap_lock(mm); + return mm; +} + +// Convenience wrapper around uvm_va_space_mm_or_current_retain() which also +// locks mmap_lock for read if valid. +static struct mm_struct *uvm_va_space_mm_or_current_retain_lock(uvm_va_space_t *va_space) +{ + struct mm_struct *mm = uvm_va_space_mm_or_current_retain(va_space); + if (mm) + uvm_down_read_mmap_lock(mm); + return mm; +} + +// Counterpart to uvm_va_space_mm_retain(). After this call, the mm must not be +// used again without another call to uvm_va_space_mm_retain(). +void uvm_va_space_mm_release(uvm_va_space_t *va_space); + +// Counterpart to uvm_va_space_mm_or_current_retain(). Must be called from the +// same thread which called uvm_va_space_mm_or_current_retain(). mm may be NULL, +// in which case this is a no-op. +void uvm_va_space_mm_or_current_release(uvm_va_space_t *va_space, struct mm_struct *mm); + +static void uvm_va_space_mm_release_unlock(uvm_va_space_t *va_space, struct mm_struct *mm) +{ + if (mm) { + uvm_up_read_mmap_lock(mm); + uvm_va_space_mm_release(va_space); + } +} + +static void uvm_va_space_mm_or_current_release_unlock(uvm_va_space_t *va_space, struct mm_struct *mm) +{ + if (mm) + uvm_up_read_mmap_lock(mm); + uvm_va_space_mm_or_current_release(va_space, mm); +} + +NV_STATUS uvm_test_va_space_mm_retain(UVM_TEST_VA_SPACE_MM_RETAIN_PARAMS *params, struct file *filp); +NV_STATUS uvm_test_va_space_mm_delay_shutdown(UVM_TEST_VA_SPACE_MM_DELAY_SHUTDOWN_PARAMS *params, struct file *filp); + +#endif // __UVM_VA_SPACE_MM_H__ diff --git a/kernel-open/nvidia-uvm/uvm_volta.c b/kernel-open/nvidia-uvm/uvm_volta.c new file mode 100644 index 000000000..5fb96bb24 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_volta.c @@ -0,0 +1,99 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_hal.h" +#include "uvm_gpu.h" +#include "uvm_mem.h" +#include "uvm_volta_fault_buffer.h" + +void uvm_hal_volta_arch_init_properties(uvm_parent_gpu_t *parent_gpu) +{ + parent_gpu->tlb_batch.va_invalidate_supported = true; + + parent_gpu->tlb_batch.va_range_invalidate_supported = true; + + // TODO: Bug 1767241: Run benchmarks to figure out a good number + parent_gpu->tlb_batch.max_ranges = 8; + + parent_gpu->utlb_per_gpc_count = uvm_volta_get_utlbs_per_gpc(parent_gpu); + + parent_gpu->fault_buffer_info.replayable.utlb_count = parent_gpu->rm_info.gpcCount * parent_gpu->utlb_per_gpc_count; + { + uvm_fault_buffer_entry_t *dummy; + UVM_ASSERT(parent_gpu->fault_buffer_info.replayable.utlb_count <= (1 << (sizeof(dummy->fault_source.utlb_id) * 8))); + } + + // A single top level PDE on Volta covers 128 TB and that's the minimum + // size that can be used. + parent_gpu->rm_va_base = 0; + parent_gpu->rm_va_size = 128ull * 1024 * 1024 * 1024 * 1024; + + parent_gpu->uvm_mem_va_base = 384ull * 1024 * 1024 * 1024 * 1024; + parent_gpu->uvm_mem_va_size = UVM_MEM_VA_SIZE; + + parent_gpu->peer_copy_mode = UVM_GPU_PEER_COPY_MODE_VIRTUAL; + + // Not all units on Volta support 49-bit addressing, including those which + // access channel buffers. + parent_gpu->max_channel_va = 1ULL << 40; + + parent_gpu->max_host_va = 1ULL << 40; + + // Volta can map sysmem with any page size + parent_gpu->can_map_sysmem_with_large_pages = true; + + // Prefetch instructions will generate faults + parent_gpu->prefetch_fault_supported = true; + + // Pascal and Volta require post-invalidate membars to flush out HSHUB. See + // bug 1975028. All GV100-class chips supported by UVM have HSHUB. + UVM_ASSERT(parent_gpu->rm_info.gpuArch == NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GV100); + parent_gpu->num_hshub_tlb_invalidate_membars = 2; + + // Volta can place GPFIFO in vidmem + parent_gpu->gpfifo_in_vidmem_supported = true; + + parent_gpu->replayable_faults_supported = true; + + parent_gpu->non_replayable_faults_supported = true; + + parent_gpu->access_counters_supported = true; + + parent_gpu->fault_cancel_va_supported = true; + + parent_gpu->scoped_atomics_supported = true; + + // SW method is not currently supported on Volta. + // See Bug 3254782: [RM] Support clear_faulted SW method on Volta and Turing + parent_gpu->has_clear_faulted_channel_sw_method = false; + + parent_gpu->has_clear_faulted_channel_method = true; + + parent_gpu->sparse_mappings_supported = true; + + parent_gpu->map_remap_larger_page_promotion = false; + + parent_gpu->smc.supported = false; + + parent_gpu->plc_supported = false; +} diff --git a/kernel-open/nvidia-uvm/uvm_volta_access_counter_buffer.c b/kernel-open/nvidia-uvm/uvm_volta_access_counter_buffer.c new file mode 100644 index 000000000..049a13681 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_volta_access_counter_buffer.c @@ -0,0 +1,228 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_hal.h" +#include "clc365.h" +#include "uvm_volta_fault_buffer.h" + +typedef struct { + NvU8 bufferEntry[NVC365_NOTIFY_BUF_SIZE]; +} access_counter_buffer_entry_c365_t; + +void uvm_hal_volta_enable_access_counter_notifications(uvm_parent_gpu_t *parent_gpu) +{ + volatile NvU32 *reg; + NvU32 mask; + + reg = parent_gpu->access_counter_buffer_info.rm_info.pHubIntrEnSet; + mask = parent_gpu->access_counter_buffer_info.rm_info.accessCounterMask; + + UVM_GPU_WRITE_ONCE(*reg, mask); +} + +void uvm_hal_volta_disable_access_counter_notifications(uvm_parent_gpu_t *parent_gpu) +{ + volatile NvU32 *reg; + NvU32 mask; + + reg = parent_gpu->access_counter_buffer_info.rm_info.pHubIntrEnClear; + mask = parent_gpu->access_counter_buffer_info.rm_info.accessCounterMask; + + UVM_GPU_WRITE_ONCE(*reg, mask); +} + +void uvm_hal_volta_clear_access_counter_notifications(uvm_parent_gpu_t *parent_gpu, NvU32 get) +{ + // No-op, this function is only used by pulse-based interrupt GPUs. +} + +NvU32 uvm_hal_volta_access_counter_buffer_entry_size(uvm_parent_gpu_t *parent_gpu) +{ + return NVC365_NOTIFY_BUF_SIZE; +} + +static uvm_aperture_t get_access_counter_inst_aperture(NvU32 *access_counter_entry) +{ + NvU32 hw_aperture_value = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, INST_APERTURE); + + switch (hw_aperture_value) { + case NVC365_NOTIFY_BUF_ENTRY_APERTURE_VID_MEM: + return UVM_APERTURE_VID; + case NVC365_NOTIFY_BUF_ENTRY_APERTURE_SYS_MEM_COHERENT: + case NVC365_NOTIFY_BUF_ENTRY_APERTURE_SYS_MEM_NONCOHERENT: + return UVM_APERTURE_SYS; + } + + UVM_ASSERT_MSG(false, "Invalid inst aperture value: %d\n", hw_aperture_value); + return UVM_APERTURE_MAX; +} + +static uvm_aperture_t get_access_counter_aperture(NvU32 *access_counter_entry) +{ + NvU32 hw_aperture_value = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, APERTURE); + NvU32 peer_id = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, PEER_ID); + + switch (hw_aperture_value) { + case NVC365_NOTIFY_BUF_ENTRY_APERTURE_VID_MEM: + return UVM_APERTURE_VID; + case NVC365_NOTIFY_BUF_ENTRY_APERTURE_PEER_MEM: + return UVM_APERTURE_PEER(peer_id); + case NVC365_NOTIFY_BUF_ENTRY_APERTURE_SYS_MEM_COHERENT: + case NVC365_NOTIFY_BUF_ENTRY_APERTURE_SYS_MEM_NONCOHERENT: + return UVM_APERTURE_SYS; + } + + UVM_ASSERT_MSG(false, "Invalid aperture value: %d\n", hw_aperture_value); + return UVM_APERTURE_MAX; +} + +static uvm_gpu_address_t get_address(uvm_parent_gpu_t *parent_gpu, NvU32 *access_counter_entry) +{ + NvU64 address; + bool is_virtual; + NvU64 addr_hi = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, ADDR_HI); + NvU64 addr_lo = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, ADDR_LO); + NvU32 addr_type_value = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, ADDR_TYPE); + + address = addr_lo + (addr_hi << HWSIZE_MW(C365, NOTIFY_BUF_ENTRY, ADDR_LO)); + is_virtual = (addr_type_value == NVC365_NOTIFY_BUF_ENTRY_ADDR_TYPE_GVA); + + if (is_virtual) { + address = uvm_parent_gpu_canonical_address(parent_gpu, address); + return uvm_gpu_address_virtual(address); + } + else { + uvm_aperture_t aperture = get_access_counter_aperture(access_counter_entry); + UVM_ASSERT_MSG(addr_type_value == NVC365_NOTIFY_BUF_ENTRY_ADDR_TYPE_GPA, + "Invalid address type%u\n", addr_type_value); + + return uvm_gpu_address_physical(aperture, address); + } +} + +static uvm_access_counter_type_t get_access_counter_type(NvU32 *access_counter_entry) +{ + NvU32 type_value = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, TYPE); + if (type_value == NVC365_NOTIFY_BUF_ENTRY_TYPE_CPU) + return UVM_ACCESS_COUNTER_TYPE_MOMC; + else + return UVM_ACCESS_COUNTER_TYPE_MIMC; +} + +static NvU32 *get_access_counter_buffer_entry(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + access_counter_buffer_entry_c365_t *buffer_start; + NvU32 *access_counter_entry; + + UVM_ASSERT(index < parent_gpu->access_counter_buffer_info.max_notifications); + + buffer_start = (access_counter_buffer_entry_c365_t *)parent_gpu->access_counter_buffer_info.rm_info.bufferAddress; + access_counter_entry = (NvU32 *)&buffer_start[index]; + + return access_counter_entry; +} + +bool uvm_hal_volta_access_counter_buffer_entry_is_valid(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + NvU32 *access_counter_entry; + bool is_valid; + + access_counter_entry = get_access_counter_buffer_entry(parent_gpu, index); + + is_valid = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, VALID); + + return is_valid; +} + +void uvm_hal_volta_access_counter_buffer_entry_clear_valid(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + NvU32 *access_counter_entry; + + access_counter_entry = get_access_counter_buffer_entry(parent_gpu, index); + + WRITE_HWCONST_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, VALID, FALSE); +} + +void uvm_hal_volta_access_counter_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_access_counter_buffer_entry_t *buffer_entry) +{ + NvU32 *access_counter_entry; + + // Valid bit must be set before this function is called + UVM_ASSERT(uvm_hal_volta_access_counter_buffer_entry_is_valid(parent_gpu, index)); + + access_counter_entry = get_access_counter_buffer_entry(parent_gpu, index); + + buffer_entry->counter_type = get_access_counter_type(access_counter_entry); + + buffer_entry->address = get_address(parent_gpu, access_counter_entry); + + if (buffer_entry->address.is_virtual) { + NvU64 inst_hi, inst_lo; + + inst_hi = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, INST_HI); + inst_lo = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, INST_LO); + buffer_entry->virtual_info.instance_ptr.address = + inst_lo + (inst_hi << HWSIZE_MW(C365, NOTIFY_BUF_ENTRY, INST_LO)); + + // HW value contains the 4K page number. Shift to build the full address + buffer_entry->virtual_info.instance_ptr.address <<= 12; + + buffer_entry->virtual_info.instance_ptr.aperture = get_access_counter_inst_aperture(access_counter_entry); + + buffer_entry->virtual_info.mmu_engine_id = + READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, MMU_ENGINE_ID); + + // MMU engine id aligns with the fault buffer packets. Therefore, we + // reuse the helpers to compute the MMU engine type and the VE ID from + // the fault buffer class + buffer_entry->virtual_info.mmu_engine_type = + parent_gpu->arch_hal->mmu_engine_id_to_type(buffer_entry->virtual_info.mmu_engine_id); + + buffer_entry->virtual_info.ve_id = + parent_gpu->fault_buffer_hal->get_ve_id(buffer_entry->virtual_info.mmu_engine_id, + buffer_entry->virtual_info.mmu_engine_type); + } + else if (buffer_entry->counter_type == UVM_ACCESS_COUNTER_TYPE_MIMC) { + // Ignore any set bit beyond 47 since it is the maximum physical address + // supported by the GPU. See the definition of + // uvm_gpu_t::dma_addressable_start for why higher bits might be set. + const NvU64 mask_46_0 = (0x1UL << 47) - 1; + buffer_entry->address.address &= mask_46_0; + } + + buffer_entry->counter_value = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, COUNTER_VAL); + + buffer_entry->sub_granularity = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, SUB_GRANULARITY); + + buffer_entry->bank = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, BANK); + + buffer_entry->tag = READ_HWVALUE_MW(access_counter_entry, C365, NOTIFY_BUF_ENTRY, NOTIFY_TAG); + + // Automatically clear valid bit for the entry in the access counter buffer + uvm_hal_volta_access_counter_buffer_entry_clear_valid(parent_gpu, index); +} diff --git a/kernel-open/nvidia-uvm/uvm_volta_fault_buffer.c b/kernel-open/nvidia-uvm/uvm_volta_fault_buffer.c new file mode 100644 index 000000000..07ca09263 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_volta_fault_buffer.c @@ -0,0 +1,347 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_gpu.h" +#include "uvm_hal.h" +#include "uvm_push.h" +#include "hwref/volta/gv100/dev_fault.h" +#include "hwref/volta/gv100/dev_fb.h" +#include "clc369.h" +#include "uvm_volta_fault_buffer.h" + +typedef struct { + NvU8 bufferEntry[NVC369_BUF_SIZE]; +} fault_buffer_entry_c369_t; + +NvU32 uvm_hal_volta_fault_buffer_read_put(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 put = UVM_GPU_READ_ONCE(*parent_gpu->fault_buffer_info.rm_info.replayable.pFaultBufferPut); + NvU32 index = READ_HWVALUE(put, _PFB_PRI_MMU, FAULT_BUFFER_PUT, PTR); + UVM_ASSERT(READ_HWVALUE(put, _PFB_PRI_MMU, FAULT_BUFFER_PUT, GETPTR_CORRUPTED) == + NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_GETPTR_CORRUPTED_NO); + + return index; +} + +NvU32 uvm_hal_volta_fault_buffer_read_get(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 get = UVM_GPU_READ_ONCE(*parent_gpu->fault_buffer_info.rm_info.replayable.pFaultBufferGet); + UVM_ASSERT(get < parent_gpu->fault_buffer_info.replayable.max_faults); + + return READ_HWVALUE(get, _PFB_PRI_MMU, FAULT_BUFFER_GET, PTR); +} + +void uvm_hal_volta_fault_buffer_write_get(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + NvU32 get = HWVALUE(_PFB_PRI_MMU, FAULT_BUFFER_GET, PTR, index); + UVM_ASSERT(index < parent_gpu->fault_buffer_info.replayable.max_faults); + + // If HW has detected an overflow condition (PUT == GET - 1 and a fault has + // arrived, which is dropped due to no more space in the fault buffer), it will + // not deliver any more faults into the buffer until the overflow condition has + // been cleared. The overflow condition is cleared by updating the GET index to + // indicate space in the buffer and writing 1 to the OVERFLOW bit in GET. + // Unfortunately, this can not be done in the same write because it can collide + // with an arriving fault on the same cycle, resulting in the overflow condition + // being instantly reasserted. + // However, if the index is updated first and then the OVERFLOW bit is cleared + // such a collision will not cause a reassertion of the overflow condition. + UVM_GPU_WRITE_ONCE(*parent_gpu->fault_buffer_info.rm_info.replayable.pFaultBufferGet, get); + + // Clear the getptr_corrupted/overflow bits. + get |= HWCONST(_PFB_PRI_MMU, FAULT_BUFFER_GET, GETPTR_CORRUPTED, CLEAR) | + HWCONST(_PFB_PRI_MMU, FAULT_BUFFER_GET, OVERFLOW, CLEAR); + UVM_GPU_WRITE_ONCE(*parent_gpu->fault_buffer_info.rm_info.replayable.pFaultBufferGet, get); +} + +// TODO: Bug 1835884: [uvm] Query the maximum number of subcontexts from RM +// ... to validate the ve_id +#define MAX_SUBCONTEXTS 64 +NvU8 uvm_hal_volta_fault_buffer_get_ve_id(NvU16 mmu_engine_id, uvm_mmu_engine_type_t mmu_engine_type) +{ + // Only graphics engines can generate MMU faults from different subcontexts + if (mmu_engine_type == UVM_MMU_ENGINE_TYPE_GRAPHICS) { + NvU16 ve_id = mmu_engine_id - NV_PFAULT_MMU_ENG_ID_GRAPHICS; + UVM_ASSERT(ve_id < MAX_SUBCONTEXTS); + + return (NvU8)ve_id; + } + else { + return 0; + } +} + +static uvm_fault_access_type_t get_fault_access_type(const NvU32 *fault_entry) +{ + NvU32 hw_access_type_value = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, ACCESS_TYPE); + + switch (hw_access_type_value) + { + case NV_PFAULT_ACCESS_TYPE_PHYS_READ: + case NV_PFAULT_ACCESS_TYPE_VIRT_READ: + return UVM_FAULT_ACCESS_TYPE_READ; + case NV_PFAULT_ACCESS_TYPE_PHYS_WRITE: + case NV_PFAULT_ACCESS_TYPE_VIRT_WRITE: + return UVM_FAULT_ACCESS_TYPE_WRITE; + case NV_PFAULT_ACCESS_TYPE_PHYS_ATOMIC: + case NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_STRONG: + return UVM_FAULT_ACCESS_TYPE_ATOMIC_STRONG; + case NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_WEAK: + return UVM_FAULT_ACCESS_TYPE_ATOMIC_WEAK; + case NV_PFAULT_ACCESS_TYPE_PHYS_PREFETCH: + case NV_PFAULT_ACCESS_TYPE_VIRT_PREFETCH: + return UVM_FAULT_ACCESS_TYPE_PREFETCH; + } + + UVM_ASSERT_MSG(false, "Invalid fault access type value: %d\n", hw_access_type_value); + + return UVM_FAULT_ACCESS_TYPE_COUNT; +} + +static bool is_fault_address_virtual(const NvU32 *fault_entry) +{ + NvU32 hw_access_type_value = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, ACCESS_TYPE); + + switch (hw_access_type_value) + { + case NV_PFAULT_ACCESS_TYPE_PHYS_READ: + case NV_PFAULT_ACCESS_TYPE_PHYS_WRITE: + case NV_PFAULT_ACCESS_TYPE_PHYS_ATOMIC: + case NV_PFAULT_ACCESS_TYPE_PHYS_PREFETCH: + return false; + case NV_PFAULT_ACCESS_TYPE_VIRT_READ: + case NV_PFAULT_ACCESS_TYPE_VIRT_WRITE: + case NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_STRONG: + case NV_PFAULT_ACCESS_TYPE_VIRT_ATOMIC_WEAK: + case NV_PFAULT_ACCESS_TYPE_VIRT_PREFETCH: + return true; + } + + UVM_ASSERT_MSG(false, "Invalid fault access type value: %d\n", hw_access_type_value); + + return UVM_FAULT_ACCESS_TYPE_COUNT; +} + +static uvm_fault_type_t get_fault_type(const NvU32 *fault_entry) +{ + NvU32 hw_fault_type_value = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, FAULT_TYPE); + + switch (hw_fault_type_value) + { + case NV_PFAULT_FAULT_TYPE_PDE: + return UVM_FAULT_TYPE_INVALID_PDE; + case NV_PFAULT_FAULT_TYPE_PTE: + return UVM_FAULT_TYPE_INVALID_PTE; + case NV_PFAULT_FAULT_TYPE_RO_VIOLATION: + return UVM_FAULT_TYPE_WRITE; + case NV_PFAULT_FAULT_TYPE_ATOMIC_VIOLATION: + return UVM_FAULT_TYPE_ATOMIC; + case NV_PFAULT_FAULT_TYPE_WO_VIOLATION: + return UVM_FAULT_TYPE_READ; + + case NV_PFAULT_FAULT_TYPE_PDE_SIZE: + return UVM_FAULT_TYPE_PDE_SIZE; + case NV_PFAULT_FAULT_TYPE_VA_LIMIT_VIOLATION: + return UVM_FAULT_TYPE_VA_LIMIT_VIOLATION; + case NV_PFAULT_FAULT_TYPE_UNBOUND_INST_BLOCK: + return UVM_FAULT_TYPE_UNBOUND_INST_BLOCK; + case NV_PFAULT_FAULT_TYPE_PRIV_VIOLATION: + return UVM_FAULT_TYPE_PRIV_VIOLATION; + case NV_PFAULT_FAULT_TYPE_PITCH_MASK_VIOLATION: + return UVM_FAULT_TYPE_PITCH_MASK_VIOLATION; + case NV_PFAULT_FAULT_TYPE_WORK_CREATION: + return UVM_FAULT_TYPE_WORK_CREATION; + case NV_PFAULT_FAULT_TYPE_UNSUPPORTED_APERTURE: + return UVM_FAULT_TYPE_UNSUPPORTED_APERTURE; + case NV_PFAULT_FAULT_TYPE_COMPRESSION_FAILURE: + return UVM_FAULT_TYPE_COMPRESSION_FAILURE; + case NV_PFAULT_FAULT_TYPE_UNSUPPORTED_KIND: + return UVM_FAULT_TYPE_UNSUPPORTED_KIND; + case NV_PFAULT_FAULT_TYPE_REGION_VIOLATION: + return UVM_FAULT_TYPE_REGION_VIOLATION; + case NV_PFAULT_FAULT_TYPE_POISONED: + return UVM_FAULT_TYPE_POISONED; + } + + UVM_ASSERT_MSG(false, "Invalid fault type value: %d\n", hw_fault_type_value); + + return UVM_FAULT_TYPE_COUNT; +} + +static uvm_fault_client_type_t get_fault_client_type(const NvU32 *fault_entry) +{ + NvU32 hw_client_type_value = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, MMU_CLIENT_TYPE); + + switch (hw_client_type_value) + { + case NV_PFAULT_MMU_CLIENT_TYPE_GPC: + return UVM_FAULT_CLIENT_TYPE_GPC; + case NV_PFAULT_MMU_CLIENT_TYPE_HUB: + return UVM_FAULT_CLIENT_TYPE_HUB; + } + + UVM_ASSERT_MSG(false, "Invalid mmu client type value: %d\n", hw_client_type_value); + + return UVM_FAULT_CLIENT_TYPE_COUNT; +} + +static uvm_aperture_t get_fault_inst_aperture(const NvU32 *fault_entry) +{ + NvU32 hw_aperture_value = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, INST_APERTURE); + + switch (hw_aperture_value) + { + case NVC369_BUF_ENTRY_INST_APERTURE_VID_MEM: + return UVM_APERTURE_VID; + case NVC369_BUF_ENTRY_INST_APERTURE_SYS_MEM_COHERENT: + case NVC369_BUF_ENTRY_INST_APERTURE_SYS_MEM_NONCOHERENT: + return UVM_APERTURE_SYS; + } + + UVM_ASSERT_MSG(false, "Invalid inst aperture value: %d\n", hw_aperture_value); + + return UVM_APERTURE_MAX; +} + +static NvU32 *get_fault_buffer_entry(uvm_parent_gpu_t *parent_gpu, NvU32 index) +{ + fault_buffer_entry_c369_t *buffer_start; + NvU32 *fault_entry; + + UVM_ASSERT(index < parent_gpu->fault_buffer_info.replayable.max_faults); + + buffer_start = (fault_buffer_entry_c369_t *)parent_gpu->fault_buffer_info.rm_info.replayable.bufferAddress; + fault_entry = (NvU32 *)&buffer_start[index]; + + return fault_entry; +} + +static void parse_fault_entry_common(uvm_parent_gpu_t *parent_gpu, + NvU32 *fault_entry, + uvm_fault_buffer_entry_t *buffer_entry) +{ + NV_STATUS status; + NvU64 addr_hi, addr_lo; + NvU64 timestamp_hi, timestamp_lo; + bool replayable_fault_enabled; + + status = NV_OK; + + addr_hi = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, INST_HI); + addr_lo = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, INST_LO); + buffer_entry->instance_ptr.address = addr_lo + (addr_hi << HWSIZE_MW(C369, BUF_ENTRY, INST_LO)); + // HW value contains the 4K page number. Shift to build the full address + buffer_entry->instance_ptr.address <<= 12; + + buffer_entry->instance_ptr.aperture = get_fault_inst_aperture(fault_entry); + + addr_hi = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, ADDR_HI); + addr_lo = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, ADDR_LO); + // HW value contains the 4K page number. Shift to build the full address + buffer_entry->fault_address = (addr_lo + (addr_hi << HWSIZE_MW(C369, BUF_ENTRY, ADDR_LO))) << 12; + buffer_entry->fault_address = uvm_parent_gpu_canonical_address(parent_gpu, buffer_entry->fault_address); + + timestamp_hi = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, TIMESTAMP_HI); + timestamp_lo = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, TIMESTAMP_LO); + buffer_entry->timestamp = timestamp_lo + (timestamp_hi << HWSIZE_MW(C369, BUF_ENTRY, TIMESTAMP_LO)); + + buffer_entry->fault_type = get_fault_type(fault_entry); + + buffer_entry->fault_access_type = get_fault_access_type(fault_entry); + + buffer_entry->fault_source.client_type = get_fault_client_type(fault_entry); + + buffer_entry->fault_source.client_id = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, CLIENT); + BUILD_BUG_ON(sizeof(buffer_entry->fault_source.client_id) * 8 < DRF_SIZE_MW(NVC369_BUF_ENTRY_CLIENT)); + + buffer_entry->fault_source.gpc_id = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, GPC_ID); + BUILD_BUG_ON(sizeof(buffer_entry->fault_source.gpc_id) * 8 < DRF_SIZE_MW(NVC369_BUF_ENTRY_GPC_ID)); + + buffer_entry->is_replayable = (READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, REPLAYABLE_FAULT) == + NVC369_BUF_ENTRY_REPLAYABLE_FAULT_TRUE); + + // Compute global uTLB id + if (buffer_entry->fault_source.client_type == UVM_FAULT_CLIENT_TYPE_GPC) { + NvU16 gpc_utlb_id = parent_gpu->arch_hal->mmu_client_id_to_utlb_id(buffer_entry->fault_source.client_id); + NvU32 utlb_id; + UVM_ASSERT(gpc_utlb_id < parent_gpu->utlb_per_gpc_count); + + utlb_id = buffer_entry->fault_source.gpc_id * parent_gpu->utlb_per_gpc_count + gpc_utlb_id; + UVM_ASSERT(utlb_id < parent_gpu->fault_buffer_info.replayable.utlb_count); + + buffer_entry->fault_source.utlb_id = utlb_id; + } + else if (buffer_entry->fault_source.client_type == UVM_FAULT_CLIENT_TYPE_HUB) { + buffer_entry->fault_source.utlb_id = 0; + } + + buffer_entry->fault_source.mmu_engine_id = READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, ENGINE_ID); + BUILD_BUG_ON(sizeof(buffer_entry->fault_source.mmu_engine_id) * 8 < DRF_SIZE_MW(NVC369_BUF_ENTRY_ENGINE_ID)); + + buffer_entry->fault_source.mmu_engine_type = + parent_gpu->arch_hal->mmu_engine_id_to_type(buffer_entry->fault_source.mmu_engine_id); + + buffer_entry->fault_source.ve_id = + parent_gpu->fault_buffer_hal->get_ve_id(buffer_entry->fault_source.mmu_engine_id, + buffer_entry->fault_source.mmu_engine_type); + BUILD_BUG_ON(1 << (sizeof(buffer_entry->fault_source.ve_id) * 8) < MAX_SUBCONTEXTS); + + buffer_entry->is_virtual = is_fault_address_virtual(fault_entry); + + buffer_entry->in_protected_mode = (READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, PROTECTED_MODE) == + NVC369_BUF_ENTRY_PROTECTED_MODE_TRUE); + + replayable_fault_enabled = (READ_HWVALUE_MW(fault_entry, C369, BUF_ENTRY, REPLAYABLE_FAULT_EN) == + NVC369_BUF_ENTRY_REPLAYABLE_FAULT_EN_TRUE); + UVM_ASSERT_MSG(replayable_fault_enabled, "Fault with REPLAYABLE_FAULT_EN bit unset\n"); +} + +void uvm_hal_volta_fault_buffer_parse_entry(uvm_parent_gpu_t *parent_gpu, + NvU32 index, + uvm_fault_buffer_entry_t *buffer_entry) +{ + NvU32 *fault_entry; + BUILD_BUG_ON(NVC369_BUF_SIZE > UVM_GPU_MMU_MAX_FAULT_PACKET_SIZE); + + fault_entry = get_fault_buffer_entry(parent_gpu, index); + + // Valid bit must be set before this function is called + UVM_ASSERT(parent_gpu->fault_buffer_hal->entry_is_valid(parent_gpu, index)); + + parse_fault_entry_common(parent_gpu, fault_entry, buffer_entry); + + // Automatically clear valid bit for the entry in the fault buffer + parent_gpu->fault_buffer_hal->entry_clear_valid(parent_gpu, index); +} + +void uvm_hal_volta_fault_buffer_parse_non_replayable_entry(uvm_parent_gpu_t *parent_gpu, + void *fault_packet, + uvm_fault_buffer_entry_t *buffer_entry) +{ + parse_fault_entry_common(parent_gpu, fault_packet, buffer_entry); + + // No need to clear the valid bit since the fault buffer for non-replayable + // faults is owned by RM and we are just parsing a copy of the packet +} diff --git a/kernel-open/nvidia-uvm/uvm_volta_fault_buffer.h b/kernel-open/nvidia-uvm/uvm_volta_fault_buffer.h new file mode 100644 index 000000000..b95252285 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_volta_fault_buffer.h @@ -0,0 +1,58 @@ +/******************************************************************************* + Copyright (c) 2016-2019 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#ifndef __UVM_HAL_VOLTA_FAULT_BUFFER_H__ +#define __UVM_HAL_VOLTA_FAULT_BUFFER_H__ + +#include "nvtypes.h" +#include "uvm_common.h" +#include "uvm_gpu.h" + +// There are up to 8 TPCs per GPC in Volta, and there is 1 LTP uTLB per TPC. Besides, there is one RGG uTLB per GPC. +// Each TPC has a number of clients that can make requests to its uTLB: 1xTPCCS, 1xPE, 2xT1. The client ids are local +// to their GPC and the id mapping is linear across TPCs: +// TPC_n has TPCCS_n, PE_n, T1_p, and T1_q, where p=2*n and q=p+1. +// +// NV_PFAULT_CLIENT_GPC_LTP_UTLB_n and NV_PFAULT_CLIENT_GPC_RGG_UTLB enums can be ignored. These will never be reported +// in a fault message, and should never be used in an invalidate. Therefore, we define our own values. +typedef enum { + UVM_VOLTA_GPC_UTLB_ID_RGG = 0, + UVM_VOLTA_GPC_UTLB_ID_LTP0 = 1, + UVM_VOLTA_GPC_UTLB_ID_LTP1 = 2, + UVM_VOLTA_GPC_UTLB_ID_LTP2 = 3, + UVM_VOLTA_GPC_UTLB_ID_LTP3 = 4, + UVM_VOLTA_GPC_UTLB_ID_LTP4 = 5, + UVM_VOLTA_GPC_UTLB_ID_LTP5 = 6, + UVM_VOLTA_GPC_UTLB_ID_LTP6 = 7, + UVM_VOLTA_GPC_UTLB_ID_LTP7 = 8, + + UVM_VOLTA_GPC_UTLB_COUNT, +} uvm_volta_gpc_utlb_id_t; + +static NvU32 uvm_volta_get_utlbs_per_gpc(uvm_parent_gpu_t *parent_gpu) +{ + NvU32 utlbs = parent_gpu->rm_info.maxTpcPerGpcCount + 1; + UVM_ASSERT(utlbs <= UVM_VOLTA_GPC_UTLB_COUNT); + return utlbs; +} +#endif diff --git a/kernel-open/nvidia-uvm/uvm_volta_host.c b/kernel-open/nvidia-uvm/uvm_volta_host.c new file mode 100644 index 000000000..b54931e8c --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_volta_host.c @@ -0,0 +1,340 @@ +/******************************************************************************* + Copyright (c) 2016-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_linux.h" +#include "uvm_global.h" +#include "uvm_hal.h" +#include "uvm_push.h" +#include "uvm_user_channel.h" +#include "clc36f.h" + +void uvm_hal_volta_host_write_gpu_put(uvm_channel_t *channel, NvU32 gpu_put) +{ + // We need to add a BAR1 read if GPPut is located in sysmem. This + // guarantees that any in-flight BAR1 writes from the CPU will have reached + // the GPU by the time the GPU reads the updated GPPut. Read the provided + // BAR1 mapping in channel_info. + if (channel->channel_info.dummyBar1Mapping) + UVM_GPU_READ_ONCE(*channel->channel_info.dummyBar1Mapping); + + UVM_GPU_WRITE_ONCE(*channel->channel_info.gpPut, gpu_put); + + wmb(); + + UVM_GPU_WRITE_ONCE(*channel->channel_info.workSubmissionOffset, channel->channel_info.workSubmissionToken); +} + +static NvU32 fault_cancel_va_mode_to_cancel_access_type(uvm_fault_cancel_va_mode_t cancel_va_mode) +{ + // There are only two logical cases from the perspective of UVM. Accesses to + // an invalid address, which will cancel all accesses on the page, and + // accesses with an invalid type on a read-only page, which will cancel all + // write/atomic accesses on the page. + switch (cancel_va_mode) + { + case UVM_FAULT_CANCEL_VA_MODE_ALL: + return HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_ACCESS_TYPE, VIRT_ALL); + case UVM_FAULT_CANCEL_VA_MODE_WRITE_AND_ATOMIC: + return HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_ACCESS_TYPE, VIRT_WRITE_AND_ATOMIC); + default: + UVM_ASSERT_MSG(false, "Invalid cancel_va_mode %d\n", cancel_va_mode); + } + + return 0; +} + +void uvm_hal_volta_cancel_faults_va(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + const uvm_fault_buffer_entry_t *fault_entry, + uvm_fault_cancel_va_mode_t cancel_va_mode) +{ + NvU32 aperture_value; + NvU32 pdb_lo; + NvU32 pdb_hi; + NvU32 addr_lo; + NvU32 addr_hi; + NvU32 access_type_value; + NvU64 addr = fault_entry->fault_address; + NvU32 mmu_engine_id = fault_entry->fault_source.mmu_engine_id; + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + + if (pdb.aperture == UVM_APERTURE_VID) + aperture_value = HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, VID_MEM); + else + aperture_value = HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx not aligned to 4KB\n", pdb.address); + pdb.address >>= 12; + + pdb_lo = pdb.address & HWMASK(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + pdb_hi = pdb.address >> HWSIZE(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + + access_type_value = fault_cancel_va_mode_to_cancel_access_type(cancel_va_mode); + + UVM_ASSERT_MSG(IS_ALIGNED(addr, 1 << 12), "addr 0x%llx not aligned to 4KB\n", addr); + addr >>= 12; + + addr_lo = addr & HWMASK(C36F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + addr_hi = addr >> HWSIZE(C36F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + + NV_PUSH_4U(C36F, MEM_OP_A, HWCONST(C36F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS) | + HWVALUE(C36F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO, addr_lo) | + HWVALUE(C36F, MEM_OP_A, TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID, mmu_engine_id), + MEM_OP_B, HWVALUE(C36F, MEM_OP_B, TLB_INVALIDATE_TARGET_ADDR_HI, addr_hi), + MEM_OP_C, HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE) | + HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_REPLAY, CANCEL_VA_GLOBAL) | + HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_ACK_TYPE, NONE) | + access_type_value | + aperture_value, + MEM_OP_D, HWCONST(C36F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE_TARGETED) | + HWVALUE(C36F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); +} + +void uvm_hal_volta_host_clear_faulted_channel_method(uvm_push_t *push, + uvm_user_channel_t *user_channel, + const uvm_fault_buffer_entry_t *fault) +{ + NvU32 clear_type_value = 0; + + UVM_ASSERT(user_channel->gpu->parent->has_clear_faulted_channel_method); + + if (fault->fault_source.mmu_engine_type == UVM_MMU_ENGINE_TYPE_HOST) { + clear_type_value = HWCONST(C36F, CLEAR_FAULTED, TYPE, PBDMA_FAULTED); + } + else if (fault->fault_source.mmu_engine_type == UVM_MMU_ENGINE_TYPE_CE) { + clear_type_value = HWCONST(C36F, CLEAR_FAULTED, TYPE, ENG_FAULTED); + } + else { + UVM_ASSERT_MSG(false, "Unsupported MMU engine type %s\n", + uvm_mmu_engine_type_string(fault->fault_source.mmu_engine_type)); + } + + NV_PUSH_1U(C36F, CLEAR_FAULTED, HWVALUE(C36F, CLEAR_FAULTED, CHID, user_channel->hw_channel_id) | + clear_type_value); +} + +void uvm_hal_volta_access_counter_clear_all(uvm_push_t *push) +{ + NV_PUSH_4U(C36F, MEM_OP_A, 0, + MEM_OP_B, 0, + MEM_OP_C, 0, + MEM_OP_D, HWCONST(C36F, MEM_OP_D, OPERATION, ACCESS_COUNTER_CLR) | + HWCONST(C36F, MEM_OP_D, ACCESS_COUNTER_CLR_TYPE, ALL)); +} + +static NvU32 get_access_counter_type_value(uvm_access_counter_type_t type) +{ + if (type == UVM_ACCESS_COUNTER_TYPE_MIMC) + return NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC; + else if (type == UVM_ACCESS_COUNTER_TYPE_MOMC) + return NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC; + else + UVM_ASSERT_MSG(false, "Invalid access counter type %u\n", type); + + return 0; +} + +static NvU32 get_access_counter_targeted_type_value(uvm_access_counter_type_t type) +{ + if (type == UVM_ACCESS_COUNTER_TYPE_MIMC) + return NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC; + else if (type == UVM_ACCESS_COUNTER_TYPE_MOMC) + return NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC; + else + UVM_ASSERT_MSG(false, "Invalid access counter type %u\n", type); + + return 0; +} + +void uvm_hal_volta_access_counter_clear_type(uvm_push_t *push, uvm_access_counter_type_t type) +{ + NvU32 type_value = get_access_counter_type_value(type); + + NV_PUSH_4U(C36F, MEM_OP_A, 0, + MEM_OP_B, 0, + MEM_OP_C, 0, + MEM_OP_D, HWCONST(C36F, MEM_OP_D, OPERATION, ACCESS_COUNTER_CLR) | + HWVALUE(C36F, MEM_OP_D, ACCESS_COUNTER_CLR_TYPE, type_value)); +} + +void uvm_hal_volta_access_counter_clear_targeted(uvm_push_t *push, + const uvm_access_counter_buffer_entry_t *buffer_entry) +{ + NvU32 targeted_type_value = get_access_counter_targeted_type_value(buffer_entry->counter_type); + + NV_PUSH_4U(C36F, MEM_OP_A, 0, + MEM_OP_B, 0, + MEM_OP_C, HWVALUE(C36F, MEM_OP_C, ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG, buffer_entry->tag), + MEM_OP_D, HWCONST(C36F, MEM_OP_D, OPERATION, ACCESS_COUNTER_CLR) | + HWCONST(C36F, MEM_OP_D, ACCESS_COUNTER_CLR_TYPE, TARGETED) | + HWVALUE(C36F, MEM_OP_D, ACCESS_COUNTER_CLR_TARGETED_TYPE, targeted_type_value) | + HWVALUE(C36F, MEM_OP_D, ACCESS_COUNTER_CLR_TARGETED_BANK, buffer_entry->bank)); +} + +void uvm_hal_volta_host_tlb_invalidate_va(uvm_push_t *push, + uvm_gpu_phys_address_t pdb, + NvU32 depth, + NvU64 base, + NvU64 size, + NvU32 page_size, + uvm_membar_t membar) +{ + NvU32 aperture_value; + NvU32 page_table_level; + NvU32 pdb_lo; + NvU32 pdb_hi; + NvU32 ack_value = 0; + NvU32 va_lo; + NvU32 va_hi; + NvU64 end; + NvU64 actual_base; + NvU64 actual_size; + NvU64 actual_end; + NvU32 log2_invalidation_size; + uvm_gpu_t *gpu = uvm_push_get_gpu(push); + + UVM_ASSERT_MSG(IS_ALIGNED(page_size, 1 << 12), "page_size 0x%x\n", page_size); + UVM_ASSERT_MSG(IS_ALIGNED(base, page_size), "base 0x%llx page_size 0x%x\n", base, page_size); + UVM_ASSERT_MSG(IS_ALIGNED(size, page_size), "size 0x%llx page_size 0x%x\n", size, page_size); + UVM_ASSERT_MSG(size > 0, "size 0x%llx\n", size); + + // The invalidation size must be a power-of-two number of pages containing + // the passed interval + end = base + size - 1; + log2_invalidation_size = __fls((unsigned long)(end ^ base)) + 1; + + if (log2_invalidation_size == 64) { + // Invalidate everything + gpu->parent->host_hal->tlb_invalidate_all(push, pdb, depth, membar); + return; + } + + // The hardware aligns the target address down to the invalidation size. + actual_size = 1ULL << log2_invalidation_size; + actual_base = UVM_ALIGN_DOWN(base, actual_size); + actual_end = actual_base + actual_size - 1; + UVM_ASSERT(actual_end >= end); + + // The invalidation size field expects log2(invalidation size in 4K), not + // log2(invalidation size in bytes) + log2_invalidation_size -= 12; + + // Address to invalidate, as a multiple of 4K. + base >>= 12; + va_lo = base & HWMASK(C36F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + va_hi = base >> HWSIZE(C36F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO); + + UVM_ASSERT_MSG(pdb.aperture == UVM_APERTURE_VID || pdb.aperture == UVM_APERTURE_SYS, "aperture: %u", pdb.aperture); + + if (pdb.aperture == UVM_APERTURE_VID) + aperture_value = HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, VID_MEM); + else + aperture_value = HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, SYS_MEM_COHERENT); + + UVM_ASSERT_MSG(IS_ALIGNED(pdb.address, 1 << 12), "pdb 0x%llx\n", pdb.address); + pdb.address >>= 12; + + pdb_lo = pdb.address & HWMASK(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + pdb_hi = pdb.address >> HWSIZE(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO); + + // PDE3 is the highest level on Pascal and Volta, see the comment in + // uvm_pascal_mmu.c for details. + UVM_ASSERT_MSG(depth < NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3, "depth %u", depth); + page_table_level = NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 - depth; + + if (membar != UVM_MEMBAR_NONE) { + // If a GPU or SYS membar is needed, ACK_TYPE needs to be set to + // GLOBALLY to make sure all the pending accesses can be picked up by + // the membar. + ack_value = HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_ACK_TYPE, GLOBALLY); + } + + NV_PUSH_4U(C36F, MEM_OP_A, HWVALUE(C36F, MEM_OP_A, TLB_INVALIDATE_INVALIDATION_SIZE, log2_invalidation_size) | + HWCONST(C36F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS) | + HWVALUE(C36F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO, va_lo), + MEM_OP_B, HWVALUE(C36F, MEM_OP_B, TLB_INVALIDATE_TARGET_ADDR_HI, va_hi), + MEM_OP_C, HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE) | + HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_REPLAY, NONE) | + HWVALUE(C36F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, page_table_level) | + aperture_value | + ack_value, + MEM_OP_D, HWCONST(C36F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE_TARGETED) | + HWVALUE(C36F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); + + uvm_hal_tlb_invalidate_membar(push, membar); +} + +void uvm_hal_volta_replay_faults(uvm_push_t *push, uvm_fault_replay_type_t type) +{ + NvU32 replay_value = 0; + const NvU32 va_lo = 0; + const NvU32 va_hi = 0; + const NvU32 pdb_lo = 0; + const NvU32 pdb_hi = 0; + + // On Volta+ the MMU will forward the replay to the uTLBs even if the PDB + // is not in the MMU PDB_ID cache. Therefore, we target a dummy PDB to + // avoid any VA invalidation, which could impact on the performance. + UVM_ASSERT_MSG(type == UVM_FAULT_REPLAY_TYPE_START || type == UVM_FAULT_REPLAY_TYPE_START_ACK_ALL, + "replay_type: %u\n", type); + + if (type == UVM_FAULT_REPLAY_TYPE_START) + replay_value = HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_REPLAY, START); + else if (type == UVM_FAULT_REPLAY_TYPE_START_ACK_ALL) + replay_value = HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_REPLAY, START_ACK_ALL); + + NV_PUSH_4U(C36F, MEM_OP_A, HWCONST(C36F, MEM_OP_A, TLB_INVALIDATE_SYSMEMBAR, DIS) | + HWVALUE(C36F, MEM_OP_A, TLB_INVALIDATE_TARGET_ADDR_LO, va_lo), + MEM_OP_B, HWVALUE(C36F, MEM_OP_B, TLB_INVALIDATE_TARGET_ADDR_HI, va_hi), + MEM_OP_C, HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_PDB, ONE) | + HWVALUE(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_ADDR_LO, pdb_lo) | + HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_GPC, ENABLE) | + HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_PAGE_TABLE_LEVEL, PTE_ONLY) | + HWCONST(C36F, MEM_OP_C, TLB_INVALIDATE_PDB_APERTURE, VID_MEM) | + replay_value, + MEM_OP_D, HWCONST(C36F, MEM_OP_D, OPERATION, MMU_TLB_INVALIDATE_TARGETED) | + HWVALUE(C36F, MEM_OP_D, TLB_INVALIDATE_PDB_ADDR_HI, pdb_hi)); +} + +void uvm_hal_volta_host_semaphore_timestamp(uvm_push_t *push, NvU64 gpu_va) +{ + NvU32 sem_lo; + UVM_ASSERT(!(NvOffset_LO32(gpu_va) & ~HWSHIFTMASK(C36F, SEM_ADDR_LO, OFFSET))); + sem_lo = READ_HWVALUE(NvOffset_LO32(gpu_va), C36F, SEM_ADDR_LO, OFFSET); + + uvm_hal_wfi_membar(push, uvm_push_get_and_reset_membar_flag(push)); + + NV_PUSH_5U(C36F, SEM_ADDR_LO, HWVALUE(C36F, SEM_ADDR_LO, OFFSET, sem_lo), + SEM_ADDR_HI, HWVALUE(C36F, SEM_ADDR_HI, OFFSET, NvOffset_HI32(gpu_va)), + SEM_PAYLOAD_LO, 0xdeadbeef, + SEM_PAYLOAD_HI, 0, + SEM_EXECUTE, HWCONST(C36F, SEM_EXECUTE, OPERATION, RELEASE) | + HWCONST(C36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) | + HWCONST(C36F, SEM_EXECUTE, RELEASE_TIMESTAMP, EN) | + HWCONST(C36F, SEM_EXECUTE, RELEASE_WFI, DIS)); +} diff --git a/kernel-open/nvidia-uvm/uvm_volta_mmu.c b/kernel-open/nvidia-uvm/uvm_volta_mmu.c new file mode 100644 index 000000000..e06436002 --- /dev/null +++ b/kernel-open/nvidia-uvm/uvm_volta_mmu.c @@ -0,0 +1,343 @@ +/******************************************************************************* + Copyright (c) 2017-2021 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "uvm_types.h" +#include "uvm_forward_decl.h" +#include "uvm_global.h" +#include "uvm_hal.h" +#include "uvm_mmu.h" +#include "uvm_volta_fault_buffer.h" +#include "hwref/volta/gv100/dev_mmu.h" +#include "hwref/volta/gv100/dev_fault.h" + +// Direct copy of make_pde_pascal and helpers, but adds NO_ATS in PDE1 +#define MMU_BIG 0 +#define MMU_SMALL 1 + +static NvU32 entries_per_index_volta(NvU32 depth) +{ + UVM_ASSERT(depth < 5); + if (depth == 3) + return 2; + return 1; +} + +static NvLength entry_offset_volta(NvU32 depth, NvU32 page_size) +{ + UVM_ASSERT(depth < 5); + if (page_size == UVM_PAGE_SIZE_4K && depth == 3) + return MMU_SMALL; + return MMU_BIG; +} + +static NvU64 single_pde_volta(uvm_mmu_page_table_alloc_t *phys_alloc, NvU32 depth) +{ + NvU64 pde_bits = 0; + + if (phys_alloc != NULL) { + NvU64 address = phys_alloc->addr.address >> NV_MMU_VER2_PDE_ADDRESS_SHIFT; + pde_bits |= HWCONST64(_MMU_VER2, PDE, IS_PDE, TRUE) | + HWCONST64(_MMU_VER2, PDE, VOL, TRUE); + + switch (phys_alloc->addr.aperture) { + case UVM_APERTURE_SYS: + pde_bits |= HWCONST64(_MMU_VER2, PDE, APERTURE, SYSTEM_COHERENT_MEMORY) | + HWVALUE64(_MMU_VER2, PDE, ADDRESS_SYS, address); + break; + case UVM_APERTURE_VID: + pde_bits |= HWCONST64(_MMU_VER2, PDE, APERTURE, VIDEO_MEMORY) | + HWVALUE64(_MMU_VER2, PDE, ADDRESS_VID, address); + break; + default: + UVM_ASSERT_MSG(0, "Invalid aperture: %d\n", phys_alloc->addr.aperture); + break; + } + + // Volta GPUs on ATS-enabled systems, perform a parallel lookup on both + // ATS and GMMU page tables. For managed memory we need to prevent this + // parallel lookup since we would not get any GPU fault if the CPU has + // a valid mapping. Also, for external ranges that are known to be + // mapped entirely on the GMMU page table we can skip the ATS lookup + // for performance reasons. This bit is set in PDE1 (depth 2) and, + // therefore, it applies to the underlying 512MB VA range. + // + // UVM sets NO_ATS for all Volta+ mappings on ATS systems. This is fine + // because CUDA ensures that all managed and external allocations are + // properly compartmentalized in 512MB-aligned VA regions. For + // cudaHostRegister CUDA cannot control the VA range, but we rely on + // ATS for those allocations so they can't use the NO_ATS bit. + if (depth == 2 && g_uvm_global.ats.enabled) + pde_bits |= HWCONST64(_MMU_VER2, PDE, NO_ATS, TRUE); + } + + return pde_bits; +} + +static NvU64 big_half_pde_volta(uvm_mmu_page_table_alloc_t *phys_alloc) +{ + NvU64 pde_bits = 0; + + if (phys_alloc != NULL) { + NvU64 address = phys_alloc->addr.address >> NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SHIFT; + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, VOL_BIG, TRUE); + + switch (phys_alloc->addr.aperture) { + case UVM_APERTURE_SYS: + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, APERTURE_BIG, SYSTEM_COHERENT_MEMORY) | + HWVALUE64(_MMU_VER2, DUAL_PDE, ADDRESS_BIG_SYS, address); + break; + case UVM_APERTURE_VID: + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, APERTURE_BIG, VIDEO_MEMORY) | + HWVALUE64(_MMU_VER2, DUAL_PDE, ADDRESS_BIG_VID, address); + break; + default: + UVM_ASSERT_MSG(0, "Invalid big aperture %d\n", phys_alloc->addr.aperture); + break; + } + } + + return pde_bits; +} + +static NvU64 small_half_pde_volta(uvm_mmu_page_table_alloc_t *phys_alloc) +{ + NvU64 pde_bits = 0; + + if (phys_alloc != NULL) { + NvU64 address = phys_alloc->addr.address >> NV_MMU_VER2_DUAL_PDE_ADDRESS_SHIFT; + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, VOL_SMALL, TRUE); + + switch (phys_alloc->addr.aperture) { + case UVM_APERTURE_SYS: + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, APERTURE_SMALL, SYSTEM_COHERENT_MEMORY); + pde_bits |= HWVALUE64(_MMU_VER2, DUAL_PDE, ADDRESS_SMALL_SYS, address); + break; + case UVM_APERTURE_VID: + pde_bits |= HWCONST64(_MMU_VER2, DUAL_PDE, APERTURE_SMALL, VIDEO_MEMORY); + pde_bits |= HWVALUE64(_MMU_VER2, DUAL_PDE, ADDRESS_SMALL_VID, address); + break; + default: + UVM_ASSERT_MSG(0, "Invalid small aperture %d\n", phys_alloc->addr.aperture); + break; + } + } + + return pde_bits; +} + +static void make_pde_volta(void *entry, uvm_mmu_page_table_alloc_t **phys_allocs, NvU32 depth) +{ + NvU32 entry_count = entries_per_index_volta(depth); + NvU64 *entry_bits = (NvU64 *)entry; + + if (entry_count == 1) { + *entry_bits = single_pde_volta(*phys_allocs, depth); + } + else if (entry_count == 2) { + entry_bits[MMU_BIG] = big_half_pde_volta(phys_allocs[MMU_BIG]); + entry_bits[MMU_SMALL] = small_half_pde_volta(phys_allocs[MMU_SMALL]); + + // This entry applies to the whole dual PDE but is stored in the lower + // bits + entry_bits[MMU_BIG] |= HWCONST64(_MMU_VER2, DUAL_PDE, IS_PDE, TRUE); + } + else { + UVM_ASSERT_MSG(0, "Invalid number of entries per index: %d\n", entry_count); + } +} + +// Direct copy of make_pte_pascal, but adds the bits necessary for 47-bit +// physical addressing +static NvU64 make_pte_volta(uvm_aperture_t aperture, NvU64 address, uvm_prot_t prot, NvU64 flags) +{ + NvU8 aperture_bits = 0; + NvU64 pte_bits = 0; + + UVM_ASSERT(prot != UVM_PROT_NONE); + UVM_ASSERT((flags & ~UVM_MMU_PTE_FLAGS_MASK) == 0); + + // valid 0:0 + pte_bits |= HWCONST64(_MMU_VER2, PTE, VALID, TRUE); + + // aperture 2:1 + if (aperture == UVM_APERTURE_SYS) + aperture_bits = NV_MMU_VER2_PTE_APERTURE_SYSTEM_COHERENT_MEMORY; + else if (aperture == UVM_APERTURE_VID) + aperture_bits = NV_MMU_VER2_PTE_APERTURE_VIDEO_MEMORY; + else if (aperture >= UVM_APERTURE_PEER_0 && aperture <= UVM_APERTURE_PEER_7) + aperture_bits = NV_MMU_VER2_PTE_APERTURE_PEER_MEMORY; + else + UVM_ASSERT_MSG(0, "Invalid aperture: %d\n", aperture); + + pte_bits |= HWVALUE64(_MMU_VER2, PTE, APERTURE, aperture_bits); + + // volatile 3:3 + if (flags & UVM_MMU_PTE_FLAGS_CACHED) + pte_bits |= HWCONST64(_MMU_VER2, PTE, VOL, FALSE); + else + pte_bits |= HWCONST64(_MMU_VER2, PTE, VOL, TRUE); + + // encrypted 4:4 + pte_bits |= HWCONST64(_MMU_VER2, PTE, ENCRYPTED, FALSE); + + // privilege 5:5 + pte_bits |= HWCONST64(_MMU_VER2, PTE, PRIVILEGE, FALSE); + + // read only 6:6 + if (prot == UVM_PROT_READ_ONLY) + pte_bits |= HWCONST64(_MMU_VER2, PTE, READ_ONLY, TRUE); + else + pte_bits |= HWCONST64(_MMU_VER2, PTE, READ_ONLY, FALSE); + + // atomic disable 7:7 + if (prot == UVM_PROT_READ_WRITE_ATOMIC) + pte_bits |= HWCONST64(_MMU_VER2, PTE, ATOMIC_DISABLE, FALSE); + else + pte_bits |= HWCONST64(_MMU_VER2, PTE, ATOMIC_DISABLE, TRUE); + + address >>= NV_MMU_VER2_PTE_ADDRESS_SHIFT; + if (aperture == UVM_APERTURE_SYS) { + // sys address 53:8 + pte_bits |= HWVALUE64(_MMU_VER2, PTE, ADDRESS_SYS, address); + } + else { + NvU64 addr_lo = address & HWMASK64(_MMU_VER2, PTE, ADDRESS_VID); + NvU64 addr_hi = address >> HWSIZE(_MMU_VER2, PTE, ADDRESS_VID); + + // vid address 32:8 for bits 36:12 of the physical address + pte_bits |= HWVALUE64(_MMU_VER2, PTE, ADDRESS_VID, addr_lo); + + // comptagline 53:36 - this can be overloaded in some cases to reference + // a 47-bit physical address. Currently, the only known cases of this + // is for nvswitch, where peer id is the fabric id programmed for + // such peer mappings + pte_bits |= HWVALUE64(_MMU_VER2, PTE, COMPTAGLINE, addr_hi); + + // peer id 35:33 + if (aperture != UVM_APERTURE_VID) + pte_bits |= HWVALUE64(_MMU_VER2, PTE, ADDRESS_VID_PEER, UVM_APERTURE_PEER_ID(aperture)); + } + + pte_bits |= HWVALUE64(_MMU_VER2, PTE, KIND, NV_MMU_PTE_KIND_PITCH); + + return pte_bits; +} + +static uvm_mmu_mode_hal_t volta_mmu_mode_hal; + +uvm_mmu_mode_hal_t *uvm_hal_mmu_mode_volta(NvU32 big_page_size) +{ + static bool initialized = false; + + UVM_ASSERT(big_page_size == UVM_PAGE_SIZE_64K || big_page_size == UVM_PAGE_SIZE_128K); + + // TODO: Bug 1789555: RM should reject the creation of GPU VA spaces with + // 128K big page size for Pascal+ GPUs + if (big_page_size == UVM_PAGE_SIZE_128K) + return NULL; + + if (!initialized) { + uvm_mmu_mode_hal_t *pascal_mmu_mode_hal = uvm_hal_mmu_mode_pascal(big_page_size); + UVM_ASSERT(pascal_mmu_mode_hal); + + // The assumption made is that arch_hal->mmu_mode_hal() will be + // called under the global lock the first time, so check it here. + uvm_assert_mutex_locked(&g_uvm_global.global_lock); + + volta_mmu_mode_hal = *pascal_mmu_mode_hal; + volta_mmu_mode_hal.make_pte = make_pte_volta; + volta_mmu_mode_hal.make_pde = make_pde_volta; + + initialized = true; + } + + return &volta_mmu_mode_hal; +} + +uvm_mmu_engine_type_t uvm_hal_volta_mmu_engine_id_to_type(NvU16 mmu_engine_id) +{ + if (mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_HOST0 && mmu_engine_id <= NV_PFAULT_MMU_ENG_ID_HOST13) + return UVM_MMU_ENGINE_TYPE_HOST; + + if (mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_CE0 && mmu_engine_id <= NV_PFAULT_MMU_ENG_ID_CE8) + return UVM_MMU_ENGINE_TYPE_CE; + + // We shouldn't be servicing faults from any other engines + UVM_ASSERT_MSG(mmu_engine_id >= NV_PFAULT_MMU_ENG_ID_GRAPHICS, "Unexpected engine ID: 0x%x\n", mmu_engine_id); + + return UVM_MMU_ENGINE_TYPE_GRAPHICS; +} + +NvU16 uvm_hal_volta_mmu_client_id_to_utlb_id(NvU16 client_id) +{ + switch (client_id) { + case NV_PFAULT_CLIENT_GPC_RAST: + case NV_PFAULT_CLIENT_GPC_GCC: + case NV_PFAULT_CLIENT_GPC_GPCCS: + return UVM_VOLTA_GPC_UTLB_ID_RGG; + case NV_PFAULT_CLIENT_GPC_PE_0: + case NV_PFAULT_CLIENT_GPC_TPCCS_0: + case NV_PFAULT_CLIENT_GPC_T1_0: + case NV_PFAULT_CLIENT_GPC_T1_1: + return UVM_VOLTA_GPC_UTLB_ID_LTP0; + case NV_PFAULT_CLIENT_GPC_PE_1: + case NV_PFAULT_CLIENT_GPC_TPCCS_1: + case NV_PFAULT_CLIENT_GPC_T1_2: + case NV_PFAULT_CLIENT_GPC_T1_3: + return UVM_VOLTA_GPC_UTLB_ID_LTP1; + case NV_PFAULT_CLIENT_GPC_PE_2: + case NV_PFAULT_CLIENT_GPC_TPCCS_2: + case NV_PFAULT_CLIENT_GPC_T1_4: + case NV_PFAULT_CLIENT_GPC_T1_5: + return UVM_VOLTA_GPC_UTLB_ID_LTP2; + case NV_PFAULT_CLIENT_GPC_PE_3: + case NV_PFAULT_CLIENT_GPC_TPCCS_3: + case NV_PFAULT_CLIENT_GPC_T1_6: + case NV_PFAULT_CLIENT_GPC_T1_7: + return UVM_VOLTA_GPC_UTLB_ID_LTP3; + case NV_PFAULT_CLIENT_GPC_PE_4: + case NV_PFAULT_CLIENT_GPC_TPCCS_4: + case NV_PFAULT_CLIENT_GPC_T1_8: + case NV_PFAULT_CLIENT_GPC_T1_9: + return UVM_VOLTA_GPC_UTLB_ID_LTP4; + case NV_PFAULT_CLIENT_GPC_PE_5: + case NV_PFAULT_CLIENT_GPC_TPCCS_5: + case NV_PFAULT_CLIENT_GPC_T1_10: + case NV_PFAULT_CLIENT_GPC_T1_11: + return UVM_VOLTA_GPC_UTLB_ID_LTP5; + case NV_PFAULT_CLIENT_GPC_PE_6: + case NV_PFAULT_CLIENT_GPC_TPCCS_6: + case NV_PFAULT_CLIENT_GPC_T1_12: + case NV_PFAULT_CLIENT_GPC_T1_13: + return UVM_VOLTA_GPC_UTLB_ID_LTP6; + case NV_PFAULT_CLIENT_GPC_PE_7: + case NV_PFAULT_CLIENT_GPC_TPCCS_7: + case NV_PFAULT_CLIENT_GPC_T1_14: + case NV_PFAULT_CLIENT_GPC_T1_15: + return UVM_VOLTA_GPC_UTLB_ID_LTP7; + default: + UVM_ASSERT_MSG(false, "Invalid client value: 0x%x\n", client_id); + } + + return 0; +} diff --git a/kernel-open/nvidia/export_nvswitch.h b/kernel-open/nvidia/export_nvswitch.h new file mode 100644 index 000000000..489e87064 --- /dev/null +++ b/kernel-open/nvidia/export_nvswitch.h @@ -0,0 +1,983 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVSWITCH_EXPORT_H_ +#define _NVSWITCH_EXPORT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nv_stdarg.h" +#include "nvlink_common.h" +#include "ioctl_common_nvswitch.h" + +#define NVSWITCH_DRIVER_NAME "nvidia-nvswitch" + +#define NVSWITCH_MAX_BARS 1 + +#define NVSWITCH_DEVICE_INSTANCE_MAX 64 + +#define PCI_CLASS_BRIDGE_NVSWITCH 0x0680 + +#ifndef PCI_VENDOR_ID_NVIDIA +#define PCI_VENDOR_ID_NVIDIA 0x10DE +#endif + +#define PCI_ADDR_OFFSET_VENDOR 0 +#define PCI_ADDR_OFFSET_DEVID 2 + +#define NVSWITCH_NSEC_PER_SEC 1000000000ULL + +#define NVSWITCH_DBG_LEVEL_MMIO 0x0 +#define NVSWITCH_DBG_LEVEL_INFO 0x1 +#define NVSWITCH_DBG_LEVEL_SETUP 0x2 +#define NVSWITCH_DBG_LEVEL_WARN 0x3 +#define NVSWITCH_DBG_LEVEL_ERROR 0x4 + +#define NVSWITCH_LOG_BUFFER_SIZE 512 + +#define NVSWITCH_DMA_DIR_TO_SYSMEM 0 +#define NVSWITCH_DMA_DIR_FROM_SYSMEM 1 +#define NVSWITCH_DMA_DIR_BIDIRECTIONAL 2 + +#define NVSWITCH_I2C_CMD_READ 0 +#define NVSWITCH_I2C_CMD_WRITE 1 +#define NVSWITCH_I2C_CMD_SMBUS_READ 2 +#define NVSWITCH_I2C_CMD_SMBUS_WRITE 3 +#define NVSWITCH_I2C_CMD_SMBUS_QUICK_READ 4 +#define NVSWITCH_I2C_CMD_SMBUS_QUICK_WRITE 5 + +typedef struct nvswitch_device nvswitch_device; +typedef struct NVSWITCH_CLIENT_EVENT NVSWITCH_CLIENT_EVENT; + +/* + * @Brief : The interface will check if the client's version is supported by the + * driver. + * + * @param[in] user_version Version of the interface that the client is + * compiled with. + * @param[out] kernel_version Version of the interface that the kernel driver + * is compiled with. This information will be + * filled even if the CTRL call returns + * -NVL_ERR_NOT_SUPPORTED due to version mismatch. + * @param[in] length Version string buffer length + * + * @returns NVL_SUCCESS if the client is using compatible + * interface. + * -NVL_ERR_NOT_SUPPORTED if the client is using + * incompatible interface. + * Or, Other NVL_XXX status value. + */ +NvlStatus +nvswitch_lib_check_api_version +( + const char *user_version, + char *kernel_version, + NvU32 length +); + +/* + * @Brief : Allocate a new nvswitch lib device instance. + * + * @Description : Creates and registers a new nvswitch device and registers + * with the nvlink library. This only initializes software state, + * it does not initialize the hardware state. + * + * @param[in] pci_domain pci domain of the device + * @param[in] pci_bus pci bus of the device + * @param[in] pci_device pci device of the device + * @param[in] pci_func pci function of the device + * @param[in] device_id pci device ID of the device + * @param[in] os_handle Device handle used to interact with OS layer + * @param[in] os_instance instance number of this device + * @param[out] device return device handle for interfacing with library + * + * @returns NVL_SUCCESS if the action succeeded + * an NVL error code otherwise + */ +NvlStatus +nvswitch_lib_register_device +( + NvU16 pci_domain, + NvU8 pci_bus, + NvU8 pci_device, + NvU8 pci_func, + NvU16 device_id, + void *os_handle, + NvU32 os_instance, + nvswitch_device **device +); + +/* + * @Brief : Clean-up the software state for a nvswitch device. + * + * @Description : + * + * @param[in] device device handle to destroy + * + * @returns none + */ +void +nvswitch_lib_unregister_device +( + nvswitch_device *device +); + +/* + * @Brief : Initialize the hardware for a nvswitch device. + * + * @Description : + * + * @param[in] device a reference to the device to initialize + * + * @returns NVL_SUCCESS if the action succeeded + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_PCI_ERROR if bar info unable to be retrieved + */ +NvlStatus +nvswitch_lib_initialize_device +( + nvswitch_device *device +); + +/* + * @Brief : Shutdown the hardware for a nvswitch device. + * + * @Description : + * + * @param[in] device a reference to the device to initialize + * + * @returns NVL_SUCCESS if the action succeeded + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_PCI_ERROR if bar info unable to be retrieved + */ +NvlStatus +nvswitch_lib_shutdown_device +( + nvswitch_device *device +); + +/* + * @Brief Control call (ioctl) interface. + * + * @param[in] device device to operate on + * @param[in] cmd Enumerated command to execute. + * @param[in] params Params structure to pass to the command. + * @param[in] params_size Size of the parameter structure. + * @param[in] osPrivate The private data structure for OS. + * + * @return NVL_SUCCESS on a successful command + * -NVL_NOT_FOUND if target device unable to be found + * -NVL_BAD_ARGS if an invalid cmd is provided + * -NVL_BAD_ARGS if a null arg is provided + * -NVL_ERR_GENERIC otherwise + */ +NvlStatus nvswitch_lib_ctrl +( + nvswitch_device *device, + NvU32 cmd, + void *params, + NvU64 size, + void *osPrivate +); + +/* + * @Brief: Retrieve PCI information for a switch based from device instance + * + * @Description : + * + * @param[in] lib_handle device to query + * @param[out] pciInfo return pointer to nvswitch lib copy of device info + */ +void nvswitch_lib_get_device_info +( + nvswitch_device *lib_handle, + struct nvlink_pci_info **pciInfo +); + +/* + * @Brief: Retrieve BIOS version for an nvswitch device + * + * @Description: For devices with a BIOS, this retrieves the BIOS version. + * + * @param[in] device device to query + * @param[out] version BIOS version is stored here + * + * @returns NVL_SUCCESS BIOS version was retrieved successfully + * -NVL_BAD_ARGS an invalid device is provided + * -NVL_ERR_INVALID_STATE an error occurred reading BIOS info + * -NVL_ERR_NOT_SUPPORTED device doesn't support this feature + */ + +NvlStatus +nvswitch_lib_get_bios_version +( + nvswitch_device *device, + NvU64 *version +); + + +/* + * @Brief: Retrieve whether the device supports PCI pin interrupts + * + * @Description: Returns whether the device can use PCI pin IRQs + * + * + * @returns NV_TRUE device can use PCI pin IRQs + * NV_FALSE device cannot use PCI pin IRQs + */ + +NvlStatus +nvswitch_lib_use_pin_irq +( + nvswitch_device *device +); + + +/* + * @Brief: Load platform information (emulation, simulation etc.). + * + * @param[in] lib_handle device + * + * @return NVL_SUCCESS on a successful command + * -NVL_BAD_ARGS if an invalid device is provided + */ +NvlStatus nvswitch_lib_load_platform_info +( + nvswitch_device *lib_handle +); + +/* + * @Brief : Enable interrupts for this device + * + * @Description : + * + * @param[in] device device to enable + * + * @returns NVL_SUCCESS + * -NVL_PCI_ERROR if there was a register access error + */ +void +nvswitch_lib_enable_interrupts +( + nvswitch_device *device +); + +/* + * @Brief : Disable interrupts for this device + * + * @Description : + * + * @param[in] device device to enable + * + * @returns NVL_SUCCESS + * -NVL_PCI_ERROR if there was a register access error + */ +void +nvswitch_lib_disable_interrupts +( + nvswitch_device *device +); + +/* + * @Brief : Check if interrupts are pending on this device + * + * @Description : + * + * @param[in] device device to check + * + * @returns NVL_SUCCESS if there were no errors and interrupts were handled + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_PCI_ERROR if there was a register access error + * -NVL_MORE_PROCESSING_REQUIRED no interrupts were found for this device + */ +NvlStatus +nvswitch_lib_check_interrupts +( + nvswitch_device *device +); + +/* + * @Brief : Services interrupts for this device + * + * @Description : + * + * @param[in] device device to service + * + * @returns NVL_SUCCESS if there were no errors and interrupts were handled + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_PCI_ERROR if there was a register access error + * -NVL_MORE_PROCESSING_REQUIRED no interrupts were found for this device + */ +NvlStatus +nvswitch_lib_service_interrupts +( + nvswitch_device *device +); + +/* + * @Brief : Get depth of error logs + * + * @Description : + * + * @param[in] device device to check + * + * @param[out] fatal Count of fatal errors + * @param[out] nonfatal Count of non-fatal errors + * + * @returns NVL_SUCCESS if there were no errors and interrupts were handled + * -NVL_NOT_FOUND if bad arguments provided + */ +NvlStatus +nvswitch_lib_get_log_count +( + nvswitch_device *device, + NvU32 *fatal, NvU32 *nonfatal +); + +/* + * @Brief : Periodic thread-based dispatcher for kernel functions + * + * @Description : Its purpose is to do any background subtasks (data collection, thermal + * monitoring, etc. These subtasks may need to run at varying intervals, and + * may even wish to adjust their execution period based on other factors. + * Each subtask's entry notes the last time it was executed and its desired + * execution period. This function returns back to the dispatcher the desired + * time interval before it should be called again. + * + * @param[in] device The device to run background tasks on + * + * @returns nsec interval to wait before the next call. + */ +NvU64 +nvswitch_lib_deferred_task_dispatcher +( + nvswitch_device *device +); + +/* + * @Brief : Perform post init tasks + * + * @Description : Any device initialization/tests which need the device to be + * initialized to a sane state go here. + * + * @param[in] device The device to run the post-init on + * + * @returns returns NvlStatus code, see nvlink_errors.h + */ +NvlStatus +nvswitch_lib_post_init_device +( + nvswitch_device *device +); + +/* + * @Brief : Perform post init tasks for a blacklisted device + * + * @Description : Any initialization tasks that should be run after a + * blacklisted item should go here. + * + * @param[in] device The device to run the post-init-blacklist on + * + * @returns void + */ +void +nvswitch_lib_post_init_blacklist_device +( + nvswitch_device *device +); + +/* + * @Brief : Get the UUID of the device + * + * @Description : Copies out the device's UUID into the uuid field + * + * @param[in] device The device to get the UUID from + * + * @param[out] uuid A pointer to a uuid struct in which the UUID is written to + * + * @returns void + */ +void +nvswitch_lib_get_uuid +( + nvswitch_device *device, + NvUuid *uuid +); + +/* + * @Brief : Get the Physical ID of the device + * + * @Description : Copies out the device's Physical ID into the phys_id field + * + * @param[in] device The device to get the UUID from + * + * @param[out] phys_id A pointer to a NvU32 which the physical ID is written to + * + * @returns NVL_SUCCESS if successful + * -NVL_BAD_ARGS if bad arguments provided + */ +NvlStatus +nvswitch_lib_get_physid +( + nvswitch_device *device, + NvU32 *phys_id +); + +/* + * @Brief : Read the Fabric State for a nvswitch device. + * + * @Description : Returns the Fabric State for the device + * + * @param[in] device a reference to the device + * @param[in] *ptrs references to the fabric state + * + * @returns NVL_SUCCESS if the action succeeded + * -NVL_BAD_ARGS if bad arguments provided + */ +NvlStatus +nvswitch_lib_read_fabric_state +( + nvswitch_device *device, + NVSWITCH_DEVICE_FABRIC_STATE *device_fabric_state, + NVSWITCH_DEVICE_BLACKLIST_REASON *device_blacklist_reason, + NVSWITCH_DRIVER_FABRIC_STATE *driver_fabric_state +); + +/* + * @Brief : Validates PCI device id + * + * @Description : Validates PCI device id + * + * @param[in] device The device id to be validated + * + * @returns True if device id is valid + */ +NvBool +nvswitch_lib_validate_device_id +( + NvU32 device_id +); + +/* + * @Brief : Gets an event if it exists in the Event list + * + * @Description : Gets an event if it is in the Device's Client + * Event list + * + * @param[in] device Device to operate on + * @param[in] osPrivate The private data structure for the OS + * @param[out] ppClientEvent Double pointer to client event + * + * @returns NVL_SUCCESS if client event found + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_NOT_FOUND if no client event found + */ +NvlStatus +nvswitch_lib_get_client_event +( + nvswitch_device *device, + void *osPrivate, + NVSWITCH_CLIENT_EVENT **ppClientEvent +); + +/* + * @Brief : Adds a single entry into the Event list + * + * @Description : Adds an entry into the front of the Device's + * Client Event List + * + * @param[in] device Device to operate on + * @param[in] osPrivate The private data structure for OS + * @param[in] pParams The parameters for the client event + * + * @returns NVL_SUCCESS if event added + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_NO_MEM if allocation fails + */ +NvlStatus +nvswitch_lib_add_client_event +( + nvswitch_device *device, + void *osPrivate, + NvU32 eventId +); + +/* + * @Brief : Removes entries from the Event list + * + * @Description : Removes the entries associated with osPrivate + * from the Device's Client Event List + * + * @param[in] device Device to operate on + * @param[in] osPrivate The private data structure for OS + * + * @returns NVL_SUCCESS if event removed + */ +NvlStatus +nvswitch_lib_remove_client_events +( + nvswitch_device *device, + void *osPrivate +); + +/* + * @Brief : Notifies all events with a matching event Id in the Client Event list + * + * @Description : Notifies all events with a matching event Id in the Client Event list + * + * @param[in] device Device to operate on + * @param[in] eventId The event ID to notify + * + * @returns NVL_SUCCESS if arguments are valid + * -NVL_BAD_ARGS if bad arguments provided + */ +NvlStatus +nvswitch_lib_notify_client_events +( + nvswitch_device *device, + NvU32 eventId +); + +/* + * @Brief : Gets a mask of valid I2C ports for the device + * + * @Description : Gets a mask of valid I2C ports for the device + * + * @param[in] device Device to operate on + * @param[out] validPortsMask A pointer to a mask of valid ports + * + * @returns NVL_SUCCESS if successfuly + * -NVL_BAD_ARGS if bad arguments provided + */ +NvlStatus +nvswitch_lib_get_valid_ports_mask +( + nvswitch_device *device, + NvU32 *validPortsMask +); + +/* + * @Brief : Returns a boolean if the I2C interface is supported for the device + * + * @Description : Returns a boolean if the I2C interface is supported for the device + * + * @param[in] device Device to operate on + * + * @returns NV_TRUE device can use the I2C interface + * NV_FALSE device cannot use the I2C interface + */ +NvBool +nvswitch_lib_is_i2c_supported +( + nvswitch_device *device +); + +/* + * @Brief : Performs an I2C transaction + * + * @Description : Performs an I2C transaction + * + * @param[in] device Device to operate on + * @param[in] port Port to issue I2C transaction + * @param[in] type Type of I2C transaction + * @param[in] addr Device address to perform I2C transaction on + * @param[in] command I2C command to perform on + * @param[in] len Length of the I2C transaction message + * @param[in/out] pData A pointer to the buffer containing the input/output data + * + * @returns NVL_SUCCESS if I2C transaction completes + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_ERR_INVALID_STATE if something internal went wrong + */ +NvlStatus +nvswitch_lib_i2c_transfer +( + nvswitch_device *device, + NvU32 port, + NvU8 type, + NvU8 addr, + NvU8 command, + NvU32 len, + NvU8 *pData +); + +/* + * Returns count of registered NvSwitch devices. + */ +NvU32 +nvswitch_os_get_device_count +( + void +); + +/* + * Get current time in nanoseconds + * The time is since epoch time (midnight UTC of January 1, 1970) + */ +NvU64 +nvswitch_os_get_platform_time +( + void +); + +#if (defined(_WIN32) || defined(_WIN64)) +#define NVSWITCH_PRINT_ATTRIB(str, arg1) +#else +#define NVSWITCH_PRINT_ATTRIB(str, arg1) \ + __attribute__ ((format (printf, (str), (arg1)))) +#endif // (defined(_WIN32) || defined(_WIN64)) + +/* + * printf wrapper + */ +void +NVSWITCH_PRINT_ATTRIB(2, 3) +nvswitch_os_print +( + int log_level, + const char *pFormat, + ... +); + +/* + * "Registry" interface for dword + */ +NvlStatus +nvswitch_os_read_registry_dword +( + void *os_handle, + const char *name, + NvU32 *data +); + +/* + * "Registry" interface for binary data + */ +NvlStatus +nvswitch_os_read_registery_binary +( + void *os_handle, + const char *name, + NvU8 *data, + NvU32 length +); + +NvBool +nvswitch_os_is_uuid_in_blacklist +( + NvUuid *uuid +); + + +/* + * Override platform/simulation settings for cases + */ +void +nvswitch_os_override_platform +( + void *os_handle, + NvBool *rtlsim +); + +/* + * Memory management interface + */ +NvlStatus +nvswitch_os_alloc_contig_memory +( + void *os_handle, + void **virt_addr, + NvU32 size, + NvBool force_dma32 +); + +void +nvswitch_os_free_contig_memory +( + void *os_handle, + void *virt_addr, + NvU32 size +); + +NvlStatus +nvswitch_os_map_dma_region +( + void *os_handle, + void *cpu_addr, + NvU64 *dma_handle, + NvU32 size, + NvU32 direction +); + +NvlStatus +nvswitch_os_unmap_dma_region +( + void *os_handle, + void *cpu_addr, + NvU64 dma_handle, + NvU32 size, + NvU32 direction +); + +NvlStatus +nvswitch_os_set_dma_mask +( + void *os_handle, + NvU32 dma_addr_width +); + +NvlStatus +nvswitch_os_sync_dma_region_for_cpu +( + void *os_handle, + NvU64 dma_handle, + NvU32 size, + NvU32 direction +); + +NvlStatus +nvswitch_os_sync_dma_region_for_device +( + void *os_handle, + NvU64 dma_handle, + NvU32 size, + NvU32 direction +); + +void * +nvswitch_os_malloc_trace +( + NvLength size, + const char *file, + NvU32 line +); + +void +nvswitch_os_free +( + void *pMem +); + +NvLength +nvswitch_os_strlen +( + const char *str +); + +char* +nvswitch_os_strncpy +( + char *pDest, + const char *pSrc, + NvLength length +); + +int +nvswitch_os_strncmp +( + const char *s1, + const char *s2, + NvLength length +); + +void * +nvswitch_os_memset +( + void *pDest, + int value, + NvLength size +); + +void * +nvswitch_os_memcpy +( + void *pDest, + const void *pSrc, + NvLength size +); + +int +nvswitch_os_memcmp +( + const void *s1, + const void *s2, + NvLength size +); + +/* + * Memory read / write interface + */ +NvU32 +nvswitch_os_mem_read32 +( + const volatile void * pAddress +); + +void +nvswitch_os_mem_write32 +( + volatile void *pAddress, + NvU32 data +); + +NvU64 +nvswitch_os_mem_read64 +( + const volatile void *pAddress +); + +void +nvswitch_os_mem_write64 +( + volatile void *pAddress, + NvU64 data +); + +/* + * Interface to write formatted output to sized buffer + */ +int +nvswitch_os_snprintf +( + char *pString, + NvLength size, + const char *pFormat, + ... +); + +/* + * Interface to write formatted output to sized buffer + */ +int +nvswitch_os_vsnprintf +( + char *buf, + NvLength size, + const char *fmt, + va_list arglist +); + +/* + * Debug assert and log interface + */ +void +nvswitch_os_assert_log +( + int cond, + const char *pFormat, + ... +); + +/* + * Interface to sleep for specified milliseconds. Yields the CPU to scheduler. + */ +void +nvswitch_os_sleep +( + unsigned int ms +); + +NvlStatus +nvswitch_os_acquire_fabric_mgmt_cap +( + void *osPrivate, + NvU64 capDescriptor +); + +int +nvswitch_os_is_fabric_manager +( + void *osPrivate +); + +int +nvswitch_os_is_admin +( + void +); + +NvlStatus +nvswitch_os_get_os_version +( + NvU32 *pMajorVer, + NvU32 *pMinorVer, + NvU32 *pBuildNum +); + +void +nvswitch_lib_smbpbi_log_sxid +( + nvswitch_device *device, + NvU32 sxid, + const char *pFormat, + ... +); + +/*! + * @brief: OS Specific handling to add an event. + */ +NvlStatus +nvswitch_os_add_client_event +( + void *osHandle, + void *osPrivate, + NvU32 eventId +); + +/*! + * @brief: OS specific handling to remove all events corresponding to osPrivate. + */ +NvlStatus +nvswitch_os_remove_client_event +( + void *osHandle, + void *osPrivate +); + +/*! + * @brief: OS specific handling to notify an event. + */ +NvlStatus +nvswitch_os_notify_client_event +( + void *osHandle, + void *osPrivate, + NvU32 eventId +); + +/*! + * @brief: Gets OS specific support for the REGISTER_EVENTS ioctl + */ +NvlStatus +nvswitch_os_get_supported_register_events_params +( + NvBool *bSupportsManyEvents, + NvBool *bUserSuppliesOsData +); + +#ifdef __cplusplus +} +#endif +#endif //_NVSWITCH_EXPORT_H_ diff --git a/kernel-open/nvidia/i2c_nvswitch.c b/kernel-open/nvidia/i2c_nvswitch.c new file mode 100644 index 000000000..592d5d846 --- /dev/null +++ b/kernel-open/nvidia/i2c_nvswitch.c @@ -0,0 +1,350 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "linux_nvswitch.h" +#include + +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + +#define NVSWITCH_I2C_GET_PARENT(adapter) \ + (NVSWITCH_DEV *)pci_get_drvdata(to_pci_dev((adapter)->dev.parent)); + +#define NVSWITCH_I2C_GET_ALGO_DATA(adapter) \ + (nvswitch_i2c_algo_data *)(adapter)->algo_data; + +typedef struct +{ + NvU32 port; +} nvswitch_i2c_algo_data; + +static int +nvswitch_i2c_algo_master_xfer +( + struct i2c_adapter *adapter, + struct i2c_msg msgs[], + int num +) +{ + int rc; + int i; + NvU32 port; + NvlStatus status = NVL_SUCCESS; + nvswitch_i2c_algo_data *i2c_algo_data; + NVSWITCH_DEV *nvswitch_dev; + const unsigned int supported_i2c_flags = I2C_M_RD +#if defined (I2C_M_DMA_SAFE) + | I2C_M_DMA_SAFE +#endif + ; + + nvswitch_dev = NVSWITCH_I2C_GET_PARENT(adapter); + if (nvswitch_dev == NULL) + { + return -ENODEV; + } + + rc = mutex_lock_interruptible(&nvswitch_dev->device_mutex); + if (rc) + { + return rc; + } + + if (nvswitch_dev->unusable) + { + printk(KERN_INFO "%s: a stale fd detected\n", nvswitch_dev->name); + status = NVL_ERR_INVALID_STATE; + goto nvswitch_i2c_algo_master_xfer_exit; + } + + i2c_algo_data = NVSWITCH_I2C_GET_ALGO_DATA(adapter); + if (i2c_algo_data == NULL) + { + status = NVL_ERR_INVALID_STATE; + goto nvswitch_i2c_algo_master_xfer_exit; + } + + port = i2c_algo_data->port; + + for (i = 0; (i < num) && (status == NVL_SUCCESS); i++) + { + if (msgs[i].flags & ~supported_i2c_flags) + { + status = NVL_ERR_NOT_SUPPORTED; + } + else + { + status = nvswitch_lib_i2c_transfer(nvswitch_dev->lib_device, port, + (msgs[i].flags & I2C_M_RD) ? + NVSWITCH_I2C_CMD_READ : NVSWITCH_I2C_CMD_WRITE, + (NvU8)(msgs[i].addr & 0x7f), 0, + (NvU32)(msgs[i].len & 0xffffUL), + (NvU8 *)msgs[i].buf); + } + } + +nvswitch_i2c_algo_master_xfer_exit: + mutex_unlock(&nvswitch_dev->device_mutex); + + rc = nvswitch_map_status(status); + return (rc == 0) ? num : rc; +} + +static int +nvswitch_i2c_algo_smbus_xfer +( + struct i2c_adapter *adapter, + u16 addr, + unsigned short flags, + char read_write, + u8 command, + int protocol, + union i2c_smbus_data *data +) +{ + int rc = -EIO; + NvU32 port; + NvU8 cmd; + NvU32 len; + NvU8 type; + NvU8 *xfer_data; + NvlStatus status = NVL_SUCCESS; + nvswitch_i2c_algo_data *i2c_algo_data; + NVSWITCH_DEV *nvswitch_dev; + + nvswitch_dev = NVSWITCH_I2C_GET_PARENT(adapter); + if (nvswitch_dev == NULL) + { + return -ENODEV; + } + + rc = mutex_lock_interruptible(&nvswitch_dev->device_mutex); + if (rc) + { + return rc; + } + + if (nvswitch_dev->unusable) + { + printk(KERN_INFO "%s: a stale fd detected\n", nvswitch_dev->name); + status = NVL_ERR_INVALID_STATE; + goto nvswitch_i2c_algo_smbus_xfer_exit; + } + + i2c_algo_data = NVSWITCH_I2C_GET_ALGO_DATA(adapter); + if (i2c_algo_data == NULL) + { + status = NVL_ERR_INVALID_STATE; + goto nvswitch_i2c_algo_smbus_xfer_exit; + } + + port = i2c_algo_data->port; + + switch (protocol) + { + case I2C_SMBUS_QUICK: + { + cmd = 0; + len = 0; + type = (read_write == I2C_SMBUS_READ) ? + NVSWITCH_I2C_CMD_SMBUS_QUICK_READ : + NVSWITCH_I2C_CMD_SMBUS_QUICK_WRITE; + xfer_data = NULL; + break; + } + case I2C_SMBUS_BYTE: + { + cmd = 0; + len = 1; + + if (read_write == I2C_SMBUS_READ) + { + type = NVSWITCH_I2C_CMD_READ; + xfer_data = (NvU8 *)&data->byte; + } + else + { + type = NVSWITCH_I2C_CMD_WRITE; + xfer_data = &command; + } + break; + } + case I2C_SMBUS_BYTE_DATA: + { + cmd = (NvU8)command; + len = 1; + type = (read_write == I2C_SMBUS_READ) ? + NVSWITCH_I2C_CMD_SMBUS_READ : + NVSWITCH_I2C_CMD_SMBUS_WRITE; + cmd = (NvU8)command; + xfer_data = (NvU8 *)&data->byte; + break; + } + case I2C_SMBUS_WORD_DATA: + { + cmd = (NvU8)command; + len = 2; + type = (read_write == I2C_SMBUS_READ) ? + NVSWITCH_I2C_CMD_SMBUS_READ : + NVSWITCH_I2C_CMD_SMBUS_WRITE; + xfer_data = (NvU8 *)&data->word; + break; + } + default: + { + status = NVL_BAD_ARGS; + goto nvswitch_i2c_algo_smbus_xfer_exit; + } + } + + status = nvswitch_lib_i2c_transfer(nvswitch_dev->lib_device, port, + type, (NvU8)(addr & 0x7f), + cmd, len, (NvU8 *)xfer_data); + +nvswitch_i2c_algo_smbus_xfer_exit: + mutex_unlock(&nvswitch_dev->device_mutex); + + return nvswitch_map_status(status); +} + +static u32 nvswitch_i2c_algo_functionality(struct i2c_adapter *adapter) +{ + return (I2C_FUNC_I2C | + I2C_FUNC_SMBUS_QUICK | + I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA); +} + +static struct i2c_algorithm nvswitch_i2c_algo = { + .master_xfer = nvswitch_i2c_algo_master_xfer, + .smbus_xfer = nvswitch_i2c_algo_smbus_xfer, + .functionality = nvswitch_i2c_algo_functionality, +}; + +struct i2c_adapter nvswitch_i2c_adapter_prototype = { + .owner = THIS_MODULE, + .algo = &nvswitch_i2c_algo, + .algo_data = NULL, +}; + +struct i2c_adapter * +nvswitch_i2c_add_adapter +( + NVSWITCH_DEV *nvswitch_dev, + NvU32 port +) +{ + struct i2c_adapter *adapter = NULL; + int rc = 0; + struct pci_dev *pci_dev; + nvswitch_i2c_algo_data *i2c_algo_data = NULL; + + if (nvswitch_dev == NULL) + { + printk(KERN_ERR "nvswitch_dev is NULL!\n"); + return NULL; + } + + adapter = nvswitch_os_malloc(sizeof(struct i2c_adapter)); + if (adapter == NULL) + { + return NULL; + } + + nvswitch_os_memcpy(adapter, + &nvswitch_i2c_adapter_prototype, + sizeof(struct i2c_adapter)); + + i2c_algo_data = nvswitch_os_malloc(sizeof(nvswitch_i2c_algo_data)); + if (i2c_algo_data == NULL) + { + goto cleanup; + } + + i2c_algo_data->port = port; + pci_dev = nvswitch_dev->pci_dev; + adapter->dev.parent = &pci_dev->dev; + adapter->algo_data = (void *)i2c_algo_data; + + rc = nvswitch_os_snprintf(adapter->name, + sizeof(adapter->name), + "NVIDIA NVSwitch i2c adapter %u at %x:%02x.%u", + port, + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), + PCI_FUNC(pci_dev->devfn)); + if ((rc < 0) && (rc >= sizeof(adapter->name))) + { + goto cleanup; + } + + rc = i2c_add_adapter(adapter); + if (rc < 0) + { + goto cleanup; + } + + return adapter; + +cleanup: + nvswitch_os_free(i2c_algo_data); + nvswitch_os_free(adapter); + + return NULL; +} + +void +nvswitch_i2c_del_adapter +( + struct i2c_adapter *adapter +) +{ + if (adapter != NULL) + { + nvswitch_os_free(adapter->algo_data); + i2c_del_adapter(adapter); + nvswitch_os_free(adapter); + } +} + +#else // (defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)) + +struct i2c_adapter * +nvswitch_i2c_add_adapter +( + NVSWITCH_DEV *nvswitch_dev, + NvU32 port +) +{ + return NULL; +} + +void +nvswitch_i2c_del_adapter +( + struct i2c_adapter *adapter +) +{ +} + +#endif // (defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)) diff --git a/kernel-open/nvidia/ioctl_common_nvswitch.h b/kernel-open/nvidia/ioctl_common_nvswitch.h new file mode 100644 index 000000000..2f23260aa --- /dev/null +++ b/kernel-open/nvidia/ioctl_common_nvswitch.h @@ -0,0 +1,144 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _IOCTL_COMMON_NVSWITCH_H_ +#define _IOCTL_COMMON_NVSWITCH_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define NVSWITCH_DEV_IO_TYPE 'd' +#define NVSWITCH_CTL_IO_TYPE 'c' + +/* + * Defines for IOCTL Hints + * + * NVSWITCH_IO_READ_ONLY : + * Only reads parameters from the kernel and does not pass any to it + * + * NVSWITCH_IO_WRITE_ONLY : + * Only writes parameters to the kernel, but does not want anything back. + * + * NVSWITCH_IO_WRITE_READ : + * Writes data to the kernel and wants information back + * + * NVSWITCH_IO_DEFAULT : + * Don't copy anything into the kernel, nor copy anything back. + */ +#define NVSWITCH_IO_READ_ONLY 0x0 +#define NVSWITCH_IO_WRITE_ONLY 0x1 +#define NVSWITCH_IO_WRITE_READ 0x2 +#define NVSWITCH_IO_DEFAULT 0x3 + +#if (defined(_WIN32) || defined(_WIN64)) +/* + * Values of less than 0x800 are reserved for Microsoft. + * Values of 0x800 and higher can be used by vendors. + */ +#define IOCTL_START_INDEX 0x800 + +/* + * Macro for defining new IOCTLs in a platform independent way. + */ +#define NVSWITCH_IOCTL_CODE(ioType, ctrl, paramType, direction) \ + CTL_CODE(FILE_DEVICE_UNKNOWN, IOCTL_START_INDEX + ctrl, METHOD_BUFFERED, \ + (FILE_READ_DATA | FILE_WRITE_DATA)) +#else + +/* + * Macro for defining new IOCTLs in a platform independent way. + * + * Select Linux specific IOCTL defining macro (_IO, _IOR, _IOW, _IOWR) + * based on IOCTL direction. + */ +#define NVSWITCH_IOCTL_CODE(ioType, ctrl, paramType, direction) \ + ((direction == NVSWITCH_IO_READ_ONLY) ? _IOR(ioType, ctrl, paramType) : \ + (direction == NVSWITCH_IO_WRITE_ONLY) ? _IOW(ioType, ctrl, paramType) : \ + (direction == NVSWITCH_IO_WRITE_READ) ? _IOWR(ioType, ctrl, paramType) : \ + _IO(ioType, ctrl)) + +#endif // (defined(_WIN32) || defined(_WIN64)) + +/* + * NVSWITCH_NVLINK_MAX_LANES is used by both internal and exteranl IOCTLs. + */ +#define NVSWITCH_NVLINK_MAX_LANES 4 + +/* + * Common Fabric State enums + * + * Definitions: + * Driver Fabric State is intended to reflect the state of the driver and + * fabric manager. Once FM sets the Driver State to CONFIGURED, it is + * expected the FM will send heartbeat updates. If the heartbeat is not + * received before the session timeout, then the driver reports status + * as MANAGER_TIMEOUT. + * + * Device Fabric State reflects the state of the nvswitch device. + * FM sets the Device Fabric State to CONFIGURED once FM is managing the + * device. If the Device Fabric State is BLACKLISTED then the device is + * not available for use; opens fail for a blacklisted device, and interrupts + * are disabled. + * + * Blacklist Reason provides additional detail of why a device is blacklisted. + */ +typedef enum nvswitch_driver_fabric_state +{ + NVSWITCH_DRIVER_FABRIC_STATE_OFFLINE = 0, // offline (No driver loaded) + NVSWITCH_DRIVER_FABRIC_STATE_STANDBY, // driver up, no FM + NVSWITCH_DRIVER_FABRIC_STATE_CONFIGURED, // driver up, FM up + NVSWITCH_DRIVER_FABRIC_STATE_MANAGER_TIMEOUT, // driver up, FM timed out + NVSWITCH_DRIVER_FABRIC_STATE_MANAGER_ERROR, // driver up, FM in error state + NVSWITCH_DRIVER_FABRIC_STATE_COUNT +} NVSWITCH_DRIVER_FABRIC_STATE; + +typedef enum nvswitch_device_fabric_state +{ + NVSWITCH_DEVICE_FABRIC_STATE_OFFLINE = 0, // offline: No driver, no FM + NVSWITCH_DEVICE_FABRIC_STATE_STANDBY, // driver up, no FM, not blacklisted + NVSWITCH_DEVICE_FABRIC_STATE_CONFIGURED, // driver up, FM up, not blacklisted + NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED, // device is blacklisted + NVSWITCH_DEVICE_FABRIC_STATE_COUNT +} NVSWITCH_DEVICE_FABRIC_STATE; + +typedef enum nvswitch_device_blacklist_mode +{ + NVSWITCH_DEVICE_BLACKLIST_REASON_NONE = 0, // device is not blacklisted + NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_OUT_OF_BAND, // manually blacklisted by out-of-band client + NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_IN_BAND, // manually blacklisted by in-band OS config + NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_PEER, // FM indicates blacklisted due to peer manual blacklisted + NVSWITCH_DEVICE_BLACKLIST_REASON_TRUNK_LINK_FAILURE, // FM indicates blacklisted due to trunk link failure + NVSWITCH_DEVICE_BLACKLIST_REASON_TRUNK_LINK_FAILURE_PEER, // FM indicates blacklisted due to trunk link failure of peer + NVSWITCH_DEVICE_BLACKLIST_REASON_ACCESS_LINK_FAILURE, // FM indicates blacklisted due to access link failure + NVSWITCH_DEVICE_BLACKLIST_REASON_ACCESS_LINK_FAILURE_PEER, // FM indicates blacklisted due to access link failure of peer + NVSWITCH_DEVICE_BLACKLIST_REASON_UNSPEC_DEVICE_FAILURE, // FM indicates blacklisted due to unspecified device failure + NVSWITCH_DEVICE_BLACKLIST_REASON_UNSPEC_DEVICE_FAILURE_PEER // FM indicates blacklisted due to unspec device failure of peer +} NVSWITCH_DEVICE_BLACKLIST_REASON; + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif //_IOCTL_COMMON_NVSWITCH_H_ diff --git a/kernel-open/nvidia/ioctl_nvswitch.h b/kernel-open/nvidia/ioctl_nvswitch.h new file mode 100644 index 000000000..fcae542c9 --- /dev/null +++ b/kernel-open/nvidia/ioctl_nvswitch.h @@ -0,0 +1,238 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _IOCTL_NVSWITCH_H_ +#define _IOCTL_NVSWITCH_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + +#include "ioctl_common_nvswitch.h" +#include "nvCpuUuid.h" + +/* 4 chars for "SWX-" prefix + 36 chars for UUID string + 1 char for '\0' */ +#define NVSWITCH_UUID_STRING_LENGTH 41 + +#define NVSWITCH_NIBBLE_TO_CHAR(nibble) \ + (((nibble) > 9) ? (((nibble) - 10) + 'A') : ((nibble) + '0')) + +static NV_INLINE +NvU32 nvswitch_uuid_to_string(NvUuid *uuid, char *str, NvU32 strLen) +{ + int i; + int j = 0; + + if ((uuid == NULL) || (str == NULL) || (strLen < NVSWITCH_UUID_STRING_LENGTH)) + { + return 0; + } + + str[j++] = 'S'; + str[j++] = 'W'; + str[j++] = 'X'; + str[j++] = '-'; + + for (i = 0; i < NV_UUID_LEN; i++) + { + if ((i == 4) || (i == 6) || (i == 8) || (i == 10)) + { + str[j++] = '-'; + } + + str[j++] = NVSWITCH_NIBBLE_TO_CHAR((uuid->uuid[i] & 0xF0) >> 4); + str[j++] = NVSWITCH_NIBBLE_TO_CHAR(uuid->uuid[i] & 0x0F); + } + + str[j++] = '\0'; + + return j; +} + +/* + * This file defines IOCTL calls that work with nvidia-nvswitchctl + * (device agnostic) node. + */ + +#define NVSWITCH_VERSION_STRING_LENGTH 64 + +/* + * Version string + */ +typedef struct +{ + char version[NVSWITCH_VERSION_STRING_LENGTH]; +} NVSWITCH_VERSION; + +/* + * NVSWITCH_CTL_CHECK_VERSION + * + * The interface will check if the client's version is supported by the driver. + * + * Parameters: + * user[in] + * Version of the interface that the client is compiled with. + * kernel[out] + * Version of the interface that the kernel driver is compiled with. + * is_compatible[out] + * Set to true, if user and kernel version are compatible. + */ +typedef struct +{ + NVSWITCH_VERSION user; + NVSWITCH_VERSION kernel; + NvBool is_compatible; +} NVSWITCH_CHECK_VERSION_PARAMS; + +/* + * Max devices supported by the driver + * + * See ctrl_dev_nvswitch.h for preprocessor definition modification guidelines. + */ +#define NVSWITCH_MAX_DEVICES 64 + +/* + * NVSWITCH_CTL_GET_DEVICES + * + * This control call will be removed soon. Use NVSWITCH_CTL_GET_DEVICES_V2 instead. + * + * Provides information about registered NvSwitch devices. + * + * Parameters: + * deviceInstance[out] + * Device instance of the device. This is same as the device minor number + * for Linux platforms. + */ +typedef struct +{ + NvU32 deviceInstance; + NvU32 pciDomain; + NvU32 pciBus; + NvU32 pciDevice; + NvU32 pciFunction; + /* See ctrl_dev_nvswitch.h for struct definition modification guidelines */ +} NVSWITCH_DEVICE_INSTANCE_INFO; + +typedef struct +{ + NvU32 deviceCount; + NVSWITCH_DEVICE_INSTANCE_INFO info[NVSWITCH_MAX_DEVICES]; + /* See ctrl_dev_nvswitch.h for struct definition modification guidelines */ +} NVSWITCH_GET_DEVICES_PARAMS; + +/* + * NVSWITCH_CTL_GET_DEVICES_V2 + * + * Provides information about registered NvSwitch devices. + * V2 adds a UUID field to the device instance info struct + * + * Parameters: + * deviceInstance[out] + * Device instance of the device. This is same as the device minor number + * for Linux platforms. + */ +typedef struct +{ + NvU32 deviceInstance; + NvUuid uuid; + NvU32 pciDomain; + NvU32 pciBus; + NvU32 pciDevice; + NvU32 pciFunction; + NVSWITCH_DRIVER_FABRIC_STATE driverState; + NVSWITCH_DEVICE_FABRIC_STATE deviceState; + NVSWITCH_DEVICE_BLACKLIST_REASON deviceReason; + NvU32 physId; + + /* See ctrl_dev_nvswitch.h for struct definition modification guidelines */ +} NVSWITCH_DEVICE_INSTANCE_INFO_V2; + +#define NVSWITCH_INVALID_PHYS_ID NV_U32_MAX + +typedef struct +{ + NvU32 deviceCount; + NVSWITCH_DEVICE_INSTANCE_INFO_V2 info[NVSWITCH_MAX_DEVICES]; + /* See ctrl_dev_nvswitch.h for struct definition modification guidelines */ +} NVSWITCH_GET_DEVICES_V2_PARAMS; + +#define NVSWITCH_DEVICE_NAME_STRING_LENGTH 10 + +/* + * CTRL_NVSWITCH_GET_DEVICE_NODES + * + * Provides a mapping of the VMWare kernel device names (vmfgx[N]) and registered + * NVSwitch devices (nvidia-nvswitch[N]). + * + * This IOCTL is only implemented for VMWare. + * + * Parameters: + * deviceInstance[out] + * Device instance of the device. This is same as the device minor number + * for VMWare platforms. + * dev_name[out] + * VMWare kernel device name of the nvswitch device (vmfgx[N]) + */ +typedef struct +{ + NvU32 deviceInstance; + NvUuid uuid; + NvU8 dev_name[NVSWITCH_DEVICE_NAME_STRING_LENGTH]; + /* See ctrl_dev_nvswitch.h for struct definition modification guidelines */ +} NVSWITCH_DEVICE_NODE_INFO; + +typedef struct +{ + NvU32 deviceCount; + NVSWITCH_DEVICE_NODE_INFO info[NVSWITCH_MAX_DEVICES]; + /* See ctrl_dev_nvswitch.h for struct definition modification guidelines */ +} NVSWITCH_GET_DEVICE_NODES_PARAMS; + +#define CTRL_NVSWITCH_GET_DEVICES 0x01 +#define CTRL_NVSWITCH_CHECK_VERSION 0x02 +#define CTRL_NVSWITCH_GET_DEVICES_V2 0x03 +#define CTRL_NVSWITCH_GET_DEVICE_NODES 0x04 + +/* + * Nvswitchctl (device agnostic) IOCTLs + */ + +#define IOCTL_NVSWITCH_GET_DEVICES \ + NVSWITCH_IOCTL_CODE(NVSWITCH_CTL_IO_TYPE, CTRL_NVSWITCH_GET_DEVICES, NVSWITCH_GET_DEVICES_PARAMS, \ + NVSWITCH_IO_READ_ONLY) +#define IOCTL_NVSWITCH_CHECK_VERSION \ + NVSWITCH_IOCTL_CODE(NVSWITCH_CTL_IO_TYPE, CTRL_NVSWITCH_CHECK_VERSION, NVSWITCH_CHECK_VERSION_PARAMS, \ + NVSWITCH_IO_WRITE_READ) +#define IOCTL_NVSWITCH_GET_DEVICES_V2 \ + NVSWITCH_IOCTL_CODE(NVSWITCH_CTL_IO_TYPE, CTRL_NVSWITCH_GET_DEVICES_V2, NVSWITCH_GET_DEVICES_V2_PARAMS, \ + NVSWITCH_IO_READ_ONLY) +#define IOCTL_NVSWITCH_GET_DEVICE_NODES \ + NVSWITCH_IOCTL_CODE(NVSWITCH_CTL_IO_TYPE, CTRL_NVSWITCH_GET_DEVICE_NODES, NVSWITCH_GET_DEVICE_NODES_PARAMS, \ + NVSWITCH_IO_READ_ONLY) + +#ifdef __cplusplus +} +#endif + +#endif //_IOCTL_NVSWITCH_H_ diff --git a/kernel-open/nvidia/linux_nvswitch.c b/kernel-open/nvidia/linux_nvswitch.c new file mode 100644 index 000000000..ab2fb9886 --- /dev/null +++ b/kernel-open/nvidia/linux_nvswitch.c @@ -0,0 +1,2673 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "linux_nvswitch.h" + +#include + +#include "conftest.h" +#include "nvlink_errors.h" +#include "nvlink_linux.h" +#include "nvCpuUuid.h" +#include "nv-time.h" +#include "nvlink_caps.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ioctl_nvswitch.h" + +const static struct +{ + NvlStatus status; + int err; +} nvswitch_status_map[] = { + { NVL_ERR_GENERIC, -EIO }, + { NVL_NO_MEM, -ENOMEM }, + { NVL_BAD_ARGS, -EINVAL }, + { NVL_ERR_INVALID_STATE, -EIO }, + { NVL_ERR_NOT_SUPPORTED, -EOPNOTSUPP }, + { NVL_NOT_FOUND, -EINVAL }, + { NVL_ERR_STATE_IN_USE, -EBUSY }, + { NVL_ERR_NOT_IMPLEMENTED, -ENOSYS }, + { NVL_ERR_INSUFFICIENT_PERMISSIONS, -EPERM }, + { NVL_ERR_OPERATING_SYSTEM, -EIO }, + { NVL_MORE_PROCESSING_REQUIRED, -EAGAIN }, + { NVL_SUCCESS, 0 }, +}; + +int +nvswitch_map_status +( + NvlStatus status +) +{ + int err = -EIO; + NvU32 i; + NvU32 limit = sizeof(nvswitch_status_map) / sizeof(nvswitch_status_map[0]); + + for (i = 0; i < limit; i++) + { + if (nvswitch_status_map[i].status == status || + nvswitch_status_map[i].status == -status) + { + err = nvswitch_status_map[i].err; + break; + } + } + + return err; +} + +#if !defined(IRQF_SHARED) +#define IRQF_SHARED SA_SHIRQ +#endif + +#define NV_FILE_INODE(file) (file)->f_inode + +static int nvswitch_probe(struct pci_dev *, const struct pci_device_id *); +static void nvswitch_remove(struct pci_dev *); + +static struct pci_device_id nvswitch_pci_table[] = +{ + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_BRIDGE_OTHER << 8), + .class_mask = ~0 + }, + {} +}; + +static struct pci_driver nvswitch_pci_driver = +{ + .name = NVSWITCH_DRIVER_NAME, + .id_table = nvswitch_pci_table, + .probe = nvswitch_probe, + .remove = nvswitch_remove, + .shutdown = nvswitch_remove +}; + +// +// nvidia_nvswitch_mknod uses minor number 255 to create nvidia-nvswitchctl +// node. Hence, if NVSWITCH_CTL_MINOR is changed, then NV_NVSWITCH_CTL_MINOR +// should be updated. See nvdia-modprobe-utils.h +// +#define NVSWITCH_CTL_MINOR 255 +#define NVSWITCH_MINOR_COUNT (NVSWITCH_CTL_MINOR + 1) + +// 32 bit hex value - including 0x prefix. (10 chars) +#define NVSWITCH_REGKEY_VALUE_LEN 10 + +static char *NvSwitchRegDwords; +module_param(NvSwitchRegDwords, charp, 0); +MODULE_PARM_DESC(NvSwitchRegDwords, "NvSwitch regkey"); + +static char *NvSwitchBlacklist; +module_param(NvSwitchBlacklist, charp, 0); +MODULE_PARM_DESC(NvSwitchBlacklist, "NvSwitchBlacklist=uuid[,uuid...]"); + +// +// Locking: +// We handle nvswitch driver locking in the OS layer. The nvswitch lib +// layer does not have its own locking. It relies on the OS layer for +// atomicity. +// +// All locking is done with sleep locks. We use threaded MSI interrupts to +// facilitate this. +// +// When handling a request from a user context we use the interruptible +// version to enable a quick ^C return if there is lock contention. +// +// nvswitch.driver_mutex is used to protect driver's global state, "struct +// NVSWITCH". The driver_mutex is taken during .probe, .remove, .open, +// .close, and nvswitch-ctl .ioctl operations. +// +// nvswitch_dev.device_mutex is used to protect per-device state, "struct +// NVSWITCH_DEV", once a device is opened. The device_mutex is taken during +// .ioctl, .poll and other background tasks. +// +// The kernel guarantees that .close won't happen while .ioctl and .poll +// are going on and without successful .open one can't execute any file ops. +// This behavior guarantees correctness of the locking model. +// +// If .close is invoked and holding the lock which is also used by threaded +// tasks such as interrupt, driver will deadlock while trying to stop such +// tasks. For example, when threaded interrupts are enabled, free_irq() calls +// kthread_stop() to flush pending interrupt tasks. The locking model +// makes sure that such deadlock cases don't happen. +// +// Lock ordering: +// nvswitch.driver_mutex +// nvswitch_dev.device_mutex +// +// Note: +// Due to bug 2856314, nvswitch_dev.device_mutex is taken when calling +// nvswitch_post_init_device() in nvswitch_probe(). +// + +// Per-chip driver state is defined in linux_nvswitch.h + +// Global driver state +typedef struct +{ + NvBool initialized; + struct cdev cdev; + struct cdev cdev_ctl; + dev_t devno; + atomic_t count; + struct mutex driver_mutex; + struct list_head devices; +} NVSWITCH; + +static NVSWITCH nvswitch = {0}; + +// NvSwitch event +typedef struct nvswitch_event_t +{ + wait_queue_head_t wait_q_event; + NvBool event_pending; +} nvswitch_event_t; + +typedef struct nvswitch_file_private +{ + NVSWITCH_DEV *nvswitch_dev; + nvswitch_event_t file_event; + struct + { + /* A duped file descriptor for fabric_mgmt capability */ + int fabric_mgmt; + } capability_fds; +} nvswitch_file_private_t; + +#define NVSWITCH_SET_FILE_PRIVATE(filp, data) ((filp)->private_data = (data)) +#define NVSWITCH_GET_FILE_PRIVATE(filp) ((nvswitch_file_private_t *)(filp)->private_data) + +static int nvswitch_device_open(struct inode *inode, struct file *file); +static int nvswitch_device_release(struct inode *inode, struct file *file); +static unsigned int nvswitch_device_poll(struct file *file, poll_table *wait); +static int nvswitch_device_ioctl(struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long arg); +static long nvswitch_device_unlocked_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg); + +static int nvswitch_ctl_ioctl(struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long arg); +static long nvswitch_ctl_unlocked_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg); + +struct file_operations device_fops = +{ + .owner = THIS_MODULE, +#if defined(NV_FILE_OPERATIONS_HAS_IOCTL) + .ioctl = nvswitch_device_ioctl, +#endif + .unlocked_ioctl = nvswitch_device_unlocked_ioctl, + .open = nvswitch_device_open, + .release = nvswitch_device_release, + .poll = nvswitch_device_poll +}; + +struct file_operations ctl_fops = +{ + .owner = THIS_MODULE, +#if defined(NV_FILE_OPERATIONS_HAS_IOCTL) + .ioctl = nvswitch_ctl_ioctl, +#endif + .unlocked_ioctl = nvswitch_ctl_unlocked_ioctl, +}; + +static int nvswitch_initialize_device_interrupt(NVSWITCH_DEV *nvswitch_dev); +static void nvswitch_shutdown_device_interrupt(NVSWITCH_DEV *nvswitch_dev); +static void nvswitch_load_bar_info(NVSWITCH_DEV *nvswitch_dev); +static void nvswitch_task_dispatch(NVSWITCH_DEV *nvswitch_dev); + +static NvBool +nvswitch_is_device_blacklisted +( + NVSWITCH_DEV *nvswitch_dev +) +{ + NVSWITCH_DEVICE_FABRIC_STATE device_fabric_state = 0; + NvlStatus status; + + status = nvswitch_lib_read_fabric_state(nvswitch_dev->lib_device, + &device_fabric_state, NULL, NULL); + + if (status != NVL_SUCCESS) + { + printk(KERN_INFO "%s: Failed to read fabric state, %x\n", nvswitch_dev->name, status); + return NV_FALSE; + } + + return device_fabric_state == NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED; +} + +static void +nvswitch_deinit_background_tasks +( + NVSWITCH_DEV *nvswitch_dev +) +{ + NV_ATOMIC_SET(nvswitch_dev->task_q_ready, 0); + + wake_up(&nvswitch_dev->wait_q_shutdown); + + nv_kthread_q_stop(&nvswitch_dev->task_q); +} + +static int +nvswitch_init_background_tasks +( + NVSWITCH_DEV *nvswitch_dev +) +{ + int rc; + + rc = nv_kthread_q_init(&nvswitch_dev->task_q, nvswitch_dev->sname); + if (rc) + { + printk(KERN_ERR "%s: Failed to create task queue\n", nvswitch_dev->name); + return rc; + } + + NV_ATOMIC_SET(nvswitch_dev->task_q_ready, 1); + + nv_kthread_q_item_init(&nvswitch_dev->task_item, + (nv_q_func_t) &nvswitch_task_dispatch, + nvswitch_dev); + + if (!nv_kthread_q_schedule_q_item(&nvswitch_dev->task_q, + &nvswitch_dev->task_item)) + { + printk(KERN_ERR "%s: Failed to schedule an item\n",nvswitch_dev->name); + rc = -ENODEV; + goto init_background_task_failed; + } + + return 0; + +init_background_task_failed: + nvswitch_deinit_background_tasks(nvswitch_dev); + + return rc; +} + +static NVSWITCH_DEV* +nvswitch_find_device(int minor) +{ + struct list_head *cur; + NVSWITCH_DEV *nvswitch_dev = NULL; + + list_for_each(cur, &nvswitch.devices) + { + nvswitch_dev = list_entry(cur, NVSWITCH_DEV, list_node); + if (nvswitch_dev->minor == minor) + { + return nvswitch_dev; + } + } + + return NULL; +} + +static int +nvswitch_find_minor(void) +{ + struct list_head *cur; + NVSWITCH_DEV *nvswitch_dev; + int minor; + int minor_in_use; + + for (minor = 0; minor < NVSWITCH_DEVICE_INSTANCE_MAX; minor++) + { + minor_in_use = 0; + + list_for_each(cur, &nvswitch.devices) + { + nvswitch_dev = list_entry(cur, NVSWITCH_DEV, list_node); + if (nvswitch_dev->minor == minor) + { + minor_in_use = 1; + break; + } + } + + if (!minor_in_use) + { + return minor; + } + } + + return NVSWITCH_DEVICE_INSTANCE_MAX; +} + +static int +nvswitch_init_i2c_adapters +( + NVSWITCH_DEV *nvswitch_dev +) +{ + NvlStatus retval; + NvU32 i, valid_ports_mask; + struct i2c_adapter *adapter; + nvswitch_i2c_adapter_entry *adapter_entry; + + if (!nvswitch_lib_is_i2c_supported(nvswitch_dev->lib_device)) + { + return 0; + } + + retval = nvswitch_lib_get_valid_ports_mask(nvswitch_dev->lib_device, + &valid_ports_mask); + if (retval != NVL_SUCCESS) + { + printk(KERN_ERR "Failed to get valid I2C ports mask.\n"); + return -ENODEV; + } + + FOR_EACH_INDEX_IN_MASK(32, i, valid_ports_mask) + { + adapter = nvswitch_i2c_add_adapter(nvswitch_dev, i); + if (adapter == NULL) + { + continue; + } + + adapter_entry = nvswitch_os_malloc(sizeof(*adapter_entry)); + if (adapter_entry == NULL) + { + printk(KERN_ERR "Failed to create I2C adapter entry.\n"); + nvswitch_i2c_del_adapter(adapter); + continue; + } + + adapter_entry->adapter = adapter; + + list_add_tail(&adapter_entry->entry, &nvswitch_dev->i2c_adapter_list); + } + FOR_EACH_INDEX_IN_MASK_END; + + return 0; +} + +static void +nvswitch_deinit_i2c_adapters +( + NVSWITCH_DEV *nvswitch_dev +) +{ + nvswitch_i2c_adapter_entry *curr; + nvswitch_i2c_adapter_entry *next; + + list_for_each_entry_safe(curr, + next, + &nvswitch_dev->i2c_adapter_list, + entry) + { + nvswitch_i2c_del_adapter(curr->adapter); + list_del(&curr->entry); + nvswitch_os_free(curr); + } +} + +static int +nvswitch_init_device +( + NVSWITCH_DEV *nvswitch_dev +) +{ + struct pci_dev *pci_dev = nvswitch_dev->pci_dev; + NvlStatus retval; + int rc; + + INIT_LIST_HEAD(&nvswitch_dev->i2c_adapter_list); + + retval = nvswitch_lib_register_device(NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), + PCI_FUNC(pci_dev->devfn), + pci_dev->device, + pci_dev, + nvswitch_dev->minor, + &nvswitch_dev->lib_device); + if (NVL_SUCCESS != retval) + { + printk(KERN_ERR "%s: Failed to register device : %d\n", + nvswitch_dev->name, + retval); + return -ENODEV; + } + + nvswitch_load_bar_info(nvswitch_dev); + + retval = nvswitch_lib_initialize_device(nvswitch_dev->lib_device); + if (NVL_SUCCESS != retval) + { + printk(KERN_ERR "%s: Failed to initialize device : %d\n", + nvswitch_dev->name, + retval); + rc = -ENODEV; + goto init_device_failed; + } + + nvswitch_lib_get_uuid(nvswitch_dev->lib_device, &nvswitch_dev->uuid); + + if (nvswitch_lib_get_bios_version(nvswitch_dev->lib_device, + &nvswitch_dev->bios_ver) != NVL_SUCCESS) + { + nvswitch_dev->bios_ver = 0; + } + + if (nvswitch_lib_get_physid(nvswitch_dev->lib_device, + &nvswitch_dev->phys_id) != NVL_SUCCESS) + { + nvswitch_dev->phys_id = NVSWITCH_INVALID_PHYS_ID; + } + + rc = nvswitch_initialize_device_interrupt(nvswitch_dev); + if (rc) + { + printk(KERN_ERR "%s: Failed to initialize interrupt : %d\n", + nvswitch_dev->name, + rc); + goto init_intr_failed; + } + + if (nvswitch_is_device_blacklisted(nvswitch_dev)) + { + printk(KERN_ERR "%s: Blacklisted nvswitch device\n", nvswitch_dev->name); + // Keep device registered for HAL access and Fabric State updates + return 0; + } + + nvswitch_lib_enable_interrupts(nvswitch_dev->lib_device); + + return 0; + +init_intr_failed: + nvswitch_lib_shutdown_device(nvswitch_dev->lib_device); + +init_device_failed: + nvswitch_lib_unregister_device(nvswitch_dev->lib_device); + nvswitch_dev->lib_device = NULL; + + return rc; +} + +static int +nvswitch_post_init_device +( + NVSWITCH_DEV *nvswitch_dev +) +{ + int rc; + NvlStatus retval; + + rc = nvswitch_init_i2c_adapters(nvswitch_dev); + if (rc < 0) + { + return rc; + } + + retval = nvswitch_lib_post_init_device(nvswitch_dev->lib_device); + if (retval != NVL_SUCCESS) + { + return -ENODEV; + } + + return 0; +} + +static void +nvswitch_post_init_blacklisted +( + NVSWITCH_DEV *nvswitch_dev +) +{ + nvswitch_lib_post_init_blacklist_device(nvswitch_dev->lib_device); +} + +static void +nvswitch_deinit_device +( + NVSWITCH_DEV *nvswitch_dev +) +{ + nvswitch_lib_disable_interrupts(nvswitch_dev->lib_device); + + nvswitch_shutdown_device_interrupt(nvswitch_dev); + + nvswitch_lib_shutdown_device(nvswitch_dev->lib_device); + + nvswitch_lib_unregister_device(nvswitch_dev->lib_device); + nvswitch_dev->lib_device = NULL; +} + +static void +nvswitch_init_file_event +( + nvswitch_file_private_t *private +) +{ + init_waitqueue_head(&private->file_event.wait_q_event); + private->file_event.event_pending = NV_FALSE; +} + +// +// Basic device open to support IOCTL interface +// +static int +nvswitch_device_open +( + struct inode *inode, + struct file *file +) +{ + NVSWITCH_DEV *nvswitch_dev; + int rc = 0; + nvswitch_file_private_t *private = NULL; + + // + // Get the major/minor device + // We might want this for routing requests to multiple nvswitches + // + printk(KERN_INFO "nvidia-nvswitch%d: open (major=%d)\n", + MINOR(inode->i_rdev), + MAJOR(inode->i_rdev)); + + rc = mutex_lock_interruptible(&nvswitch.driver_mutex); + if (rc) + { + return rc; + } + + nvswitch_dev = nvswitch_find_device(MINOR(inode->i_rdev)); + if (!nvswitch_dev) + { + rc = -ENODEV; + goto done; + } + + if (nvswitch_is_device_blacklisted(nvswitch_dev)) + { + rc = -ENODEV; + goto done; + } + + private = nvswitch_os_malloc(sizeof(*private)); + if (private == NULL) + { + rc = -ENOMEM; + goto done; + } + + private->nvswitch_dev = nvswitch_dev; + + nvswitch_init_file_event(private); + + private->capability_fds.fabric_mgmt = -1; + NVSWITCH_SET_FILE_PRIVATE(file, private); + + NV_ATOMIC_INC(nvswitch_dev->ref_count); + +done: + mutex_unlock(&nvswitch.driver_mutex); + + return rc; +} + +// +// Basic device release to support IOCTL interface +// +static int +nvswitch_device_release +( + struct inode *inode, + struct file *file +) +{ + nvswitch_file_private_t *private = NVSWITCH_GET_FILE_PRIVATE(file); + NVSWITCH_DEV *nvswitch_dev = private->nvswitch_dev; + + printk(KERN_INFO "nvidia-nvswitch%d: release (major=%d)\n", + MINOR(inode->i_rdev), + MAJOR(inode->i_rdev)); + + mutex_lock(&nvswitch.driver_mutex); + + nvswitch_lib_remove_client_events(nvswitch_dev->lib_device, (void *)private); + + // + // If there are no outstanding references and the device is marked + // unusable, free it. + // + if (NV_ATOMIC_DEC_AND_TEST(nvswitch_dev->ref_count) && + nvswitch_dev->unusable) + { + kfree(nvswitch_dev); + } + + if (private->capability_fds.fabric_mgmt > 0) + { + nvlink_cap_release(private->capability_fds.fabric_mgmt); + private->capability_fds.fabric_mgmt = -1; + } + + nvswitch_os_free(file->private_data); + NVSWITCH_SET_FILE_PRIVATE(file, NULL); + + mutex_unlock(&nvswitch.driver_mutex); + + return 0; +} + +static unsigned int +nvswitch_device_poll +( + struct file *file, + poll_table *wait +) +{ + nvswitch_file_private_t *private = NVSWITCH_GET_FILE_PRIVATE(file); + NVSWITCH_DEV *nvswitch_dev = private->nvswitch_dev; + int rc = 0; + NvlStatus status; + struct NVSWITCH_CLIENT_EVENT *client_event; + + rc = mutex_lock_interruptible(&nvswitch_dev->device_mutex); + if (rc) + { + return rc; + } + + if (nvswitch_dev->unusable) + { + printk(KERN_INFO "%s: a stale fd detected\n", nvswitch_dev->name); + rc = POLLHUP; + goto done; + } + + status = nvswitch_lib_get_client_event(nvswitch_dev->lib_device, + (void *) private, &client_event); + if (status != NVL_SUCCESS) + { + printk(KERN_INFO "%s: no events registered for fd\n", nvswitch_dev->name); + rc = POLLERR; + goto done; + } + + poll_wait(file, &private->file_event.wait_q_event, wait); + + if (private->file_event.event_pending) + { + rc = POLLPRI | POLLIN; + private->file_event.event_pending = NV_FALSE; + } + +done: + mutex_unlock(&nvswitch_dev->device_mutex); + + return rc; +} + +typedef struct { + void *kernel_params; // Kernel copy of ioctl parameters + unsigned long kernel_params_size; // Size of ioctl params according to user +} IOCTL_STATE; + +// +// Clean up any dynamically allocated memory for ioctl state +// +static void +nvswitch_ioctl_state_cleanup +( + IOCTL_STATE *state +) +{ + kfree(state->kernel_params); + state->kernel_params = NULL; +} + +// +// Initialize buffer state for ioctl. +// +// This handles allocating memory and copying user data into kernel space. The +// ioctl params structure only is supported. Nested data pointers are not handled. +// +// State is maintained in the IOCTL_STATE struct for use by the ioctl, _sync and +// _cleanup calls. +// +static int +nvswitch_ioctl_state_start(IOCTL_STATE *state, int cmd, unsigned long user_arg) +{ + int rc; + + state->kernel_params = NULL; + state->kernel_params_size = _IOC_SIZE(cmd); + + if (0 == state->kernel_params_size) + { + return 0; + } + + state->kernel_params = kzalloc(state->kernel_params_size, GFP_KERNEL); + if (NULL == state->kernel_params) + { + rc = -ENOMEM; + goto nvswitch_ioctl_state_start_fail; + } + + // Copy params to kernel buffers. Simple _IOR() ioctls can skip this step. + if (_IOC_DIR(cmd) & _IOC_WRITE) + { + rc = copy_from_user(state->kernel_params, + (const void *)user_arg, + state->kernel_params_size); + if (rc) + { + rc = -EFAULT; + goto nvswitch_ioctl_state_start_fail; + } + } + + return 0; + +nvswitch_ioctl_state_start_fail: + nvswitch_ioctl_state_cleanup(state); + return rc; +} + +// +// Synchronize any ioctl output in the kernel buffers to the user mode buffers. +// +static int +nvswitch_ioctl_state_sync +( + IOCTL_STATE *state, + int cmd, + unsigned long user_arg +) +{ + int rc; + + // Nothing to do if no buffer or write-only ioctl + if ((0 == state->kernel_params_size) || (0 == (_IOC_DIR(cmd) & _IOC_READ))) + { + return 0; + } + + // Copy params structure back to user mode + rc = copy_to_user((void *)user_arg, + state->kernel_params, + state->kernel_params_size); + if (rc) + { + rc = -EFAULT; + } + + return rc; +} + +static int +nvswitch_device_ioctl +( + struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long arg +) +{ + nvswitch_file_private_t *private = NVSWITCH_GET_FILE_PRIVATE(file); + NVSWITCH_DEV *nvswitch_dev = private->nvswitch_dev; + IOCTL_STATE state = {0}; + NvlStatus retval; + int rc = 0; + + if (_IOC_TYPE(cmd) != NVSWITCH_DEV_IO_TYPE) + { + return -EINVAL; + } + + rc = mutex_lock_interruptible(&nvswitch_dev->device_mutex); + if (rc) + { + return rc; + } + + if (nvswitch_dev->unusable) + { + printk(KERN_INFO "%s: a stale fd detected\n", nvswitch_dev->name); + rc = -ENODEV; + goto nvswitch_device_ioctl_exit; + } + + if (nvswitch_is_device_blacklisted(nvswitch_dev)) + { + printk(KERN_INFO "%s: ioctl attempted on blacklisted device\n", nvswitch_dev->name); + rc = -ENODEV; + goto nvswitch_device_ioctl_exit; + } + + rc = nvswitch_ioctl_state_start(&state, cmd, arg); + if (rc) + { + goto nvswitch_device_ioctl_exit; + } + + retval = nvswitch_lib_ctrl(nvswitch_dev->lib_device, + _IOC_NR(cmd), + state.kernel_params, + state.kernel_params_size, + file->private_data); + rc = nvswitch_map_status(retval); + if (!rc) + { + rc = nvswitch_ioctl_state_sync(&state, cmd, arg); + } + + nvswitch_ioctl_state_cleanup(&state); + +nvswitch_device_ioctl_exit: + mutex_unlock(&nvswitch_dev->device_mutex); + + return rc; +} + +static long +nvswitch_device_unlocked_ioctl +( + struct file *file, + unsigned int cmd, + unsigned long arg +) +{ + return nvswitch_device_ioctl(NV_FILE_INODE(file), file, cmd, arg); +} + +static int +nvswitch_ctl_check_version(NVSWITCH_CHECK_VERSION_PARAMS *p) +{ + NvlStatus retval; + + p->is_compatible = 0; + p->user.version[NVSWITCH_VERSION_STRING_LENGTH - 1] = '\0'; + + retval = nvswitch_lib_check_api_version(p->user.version, p->kernel.version, + NVSWITCH_VERSION_STRING_LENGTH); + if (retval == NVL_SUCCESS) + { + p->is_compatible = 1; + } + else if (retval == -NVL_ERR_NOT_SUPPORTED) + { + printk(KERN_ERR "nvidia-nvswitch: Version mismatch, " + "kernel version %s user version %s\n", + p->kernel.version, p->user.version); + } + else + { + // An unexpected failure + return nvswitch_map_status(retval); + } + + return 0; +} + +static void +nvswitch_ctl_get_devices(NVSWITCH_GET_DEVICES_PARAMS *p) +{ + int index = 0; + NVSWITCH_DEV *nvswitch_dev; + struct list_head *cur; + + BUILD_BUG_ON(NVSWITCH_DEVICE_INSTANCE_MAX != NVSWITCH_MAX_DEVICES); + + list_for_each(cur, &nvswitch.devices) + { + nvswitch_dev = list_entry(cur, NVSWITCH_DEV, list_node); + p->info[index].deviceInstance = nvswitch_dev->minor; + p->info[index].pciDomain = NV_PCI_DOMAIN_NUMBER(nvswitch_dev->pci_dev); + p->info[index].pciBus = NV_PCI_BUS_NUMBER(nvswitch_dev->pci_dev); + p->info[index].pciDevice = NV_PCI_SLOT_NUMBER(nvswitch_dev->pci_dev); + p->info[index].pciFunction = PCI_FUNC(nvswitch_dev->pci_dev->devfn); + index++; + } + + p->deviceCount = index; +} + +static void +nvswitch_ctl_get_devices_v2(NVSWITCH_GET_DEVICES_V2_PARAMS *p) +{ + int index = 0; + NVSWITCH_DEV *nvswitch_dev; + struct list_head *cur; + + BUILD_BUG_ON(NVSWITCH_DEVICE_INSTANCE_MAX != NVSWITCH_MAX_DEVICES); + + list_for_each(cur, &nvswitch.devices) + { + nvswitch_dev = list_entry(cur, NVSWITCH_DEV, list_node); + p->info[index].deviceInstance = nvswitch_dev->minor; + memcpy(&p->info[index].uuid, &nvswitch_dev->uuid, sizeof(nvswitch_dev->uuid)); + p->info[index].pciDomain = NV_PCI_DOMAIN_NUMBER(nvswitch_dev->pci_dev); + p->info[index].pciBus = NV_PCI_BUS_NUMBER(nvswitch_dev->pci_dev); + p->info[index].pciDevice = NV_PCI_SLOT_NUMBER(nvswitch_dev->pci_dev); + p->info[index].pciFunction = PCI_FUNC(nvswitch_dev->pci_dev->devfn); + p->info[index].physId = nvswitch_dev->phys_id; + + if (nvswitch_dev->lib_device != NULL) + { + mutex_lock(&nvswitch_dev->device_mutex); + (void)nvswitch_lib_read_fabric_state(nvswitch_dev->lib_device, + &p->info[index].deviceState, + &p->info[index].deviceReason, + &p->info[index].driverState); + mutex_unlock(&nvswitch_dev->device_mutex); + } + index++; + } + + p->deviceCount = index; +} + +#define NVSWITCH_CTL_CHECK_PARAMS(type, size) (sizeof(type) == size ? 0 : -EINVAL) + +static int +nvswitch_ctl_cmd_dispatch +( + unsigned int cmd, + void *params, + unsigned int param_size +) +{ + int rc; + + switch(cmd) + { + case CTRL_NVSWITCH_CHECK_VERSION: + rc = NVSWITCH_CTL_CHECK_PARAMS(NVSWITCH_CHECK_VERSION_PARAMS, + param_size); + if (!rc) + { + rc = nvswitch_ctl_check_version(params); + } + break; + case CTRL_NVSWITCH_GET_DEVICES: + rc = NVSWITCH_CTL_CHECK_PARAMS(NVSWITCH_GET_DEVICES_PARAMS, + param_size); + if (!rc) + { + nvswitch_ctl_get_devices(params); + } + break; + case CTRL_NVSWITCH_GET_DEVICES_V2: + rc = NVSWITCH_CTL_CHECK_PARAMS(NVSWITCH_GET_DEVICES_V2_PARAMS, + param_size); + if (!rc) + { + nvswitch_ctl_get_devices_v2(params); + } + break; + + default: + rc = -EINVAL; + break; + } + + return rc; +} + +static int +nvswitch_ctl_ioctl +( + struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long arg +) +{ + int rc = 0; + IOCTL_STATE state = {0}; + + if (_IOC_TYPE(cmd) != NVSWITCH_CTL_IO_TYPE) + { + return -EINVAL; + } + + rc = mutex_lock_interruptible(&nvswitch.driver_mutex); + if (rc) + { + return rc; + } + + rc = nvswitch_ioctl_state_start(&state, cmd, arg); + if (rc) + { + goto nvswitch_ctl_ioctl_exit; + } + + rc = nvswitch_ctl_cmd_dispatch(_IOC_NR(cmd), + state.kernel_params, + state.kernel_params_size); + if (!rc) + { + rc = nvswitch_ioctl_state_sync(&state, cmd, arg); + } + + nvswitch_ioctl_state_cleanup(&state); + +nvswitch_ctl_ioctl_exit: + mutex_unlock(&nvswitch.driver_mutex); + + return rc; +} + +static long +nvswitch_ctl_unlocked_ioctl +( + struct file *file, + unsigned int cmd, + unsigned long arg +) +{ + return nvswitch_ctl_ioctl(NV_FILE_INODE(file), file, cmd, arg); +} + +static irqreturn_t +nvswitch_isr_pending +( + int irq, + void *arg +) +{ + + NVSWITCH_DEV *nvswitch_dev = (NVSWITCH_DEV *)arg; + NvlStatus retval; + + // + // On silicon MSI must be enabled. Since interrupts will not be shared + // with MSI, we can simply signal the thread. + // + if (nvswitch_dev->irq_mechanism == NVSWITCH_IRQ_MSI) + { + return IRQ_WAKE_THREAD; + } + + if (nvswitch_dev->irq_mechanism == NVSWITCH_IRQ_PIN) + { + // + // We do not take mutex in the interrupt context. The interrupt + // check is safe to driver state. + // + retval = nvswitch_lib_check_interrupts(nvswitch_dev->lib_device); + + // Wake interrupt thread if there is an interrupt pending + if (-NVL_MORE_PROCESSING_REQUIRED == retval) + { + nvswitch_lib_disable_interrupts(nvswitch_dev->lib_device); + return IRQ_WAKE_THREAD; + } + + // PCI errors are handled else where. + if (-NVL_PCI_ERROR == retval) + { + return IRQ_NONE; + } + + if (NVL_SUCCESS != retval) + { + pr_err("nvidia-nvswitch: unrecoverable error in ISR\n"); + NVSWITCH_OS_ASSERT(0); + } + return IRQ_NONE; + } + + pr_err("nvidia-nvswitch: unsupported IRQ mechanism in ISR\n"); + NVSWITCH_OS_ASSERT(0); + + return IRQ_NONE; +} + +static irqreturn_t +nvswitch_isr_thread +( + int irq, + void *arg +) +{ + NVSWITCH_DEV *nvswitch_dev = (NVSWITCH_DEV *)arg; + NvlStatus retval; + + mutex_lock(&nvswitch_dev->device_mutex); + + retval = nvswitch_lib_service_interrupts(nvswitch_dev->lib_device); + + wake_up(&nvswitch_dev->wait_q_errors); + + if (nvswitch_dev->irq_mechanism == NVSWITCH_IRQ_PIN) + { + nvswitch_lib_enable_interrupts(nvswitch_dev->lib_device); + } + + mutex_unlock(&nvswitch_dev->device_mutex); + + if (WARN_ON(retval != NVL_SUCCESS)) + { + printk(KERN_ERR "%s: Interrupts disabled to avoid a storm\n", + nvswitch_dev->name); + } + + return IRQ_HANDLED; +} + +static void +nvswitch_task_dispatch +( + NVSWITCH_DEV *nvswitch_dev +) +{ + NvU64 nsec; + NvU64 timeout; + NvS64 rc; + + if (NV_ATOMIC_READ(nvswitch_dev->task_q_ready) == 0) + { + return; + } + + mutex_lock(&nvswitch_dev->device_mutex); + + nsec = nvswitch_lib_deferred_task_dispatcher(nvswitch_dev->lib_device); + + mutex_unlock(&nvswitch_dev->device_mutex); + + timeout = usecs_to_jiffies(nsec / NSEC_PER_USEC); + + rc = wait_event_interruptible_timeout(nvswitch_dev->wait_q_shutdown, + (NV_ATOMIC_READ(nvswitch_dev->task_q_ready) == 0), + timeout); + + // + // These background tasks should rarely, if ever, get interrupted. We use + // the "interruptible" variant of wait_event in order to avoid contributing + // to the system load average (/proc/loadavg), and to avoid softlockup + // warnings that can occur if a kernel thread lingers too long in an + // uninterruptible state. If this does get interrupted, we'd like to debug + // and find out why, so WARN in that case. + // + WARN_ON(rc < 0); + + // + // Schedule a work item only if the above actually timed out or got + // interrupted, without the condition becoming true. + // + if (rc <= 0) + { + if (!nv_kthread_q_schedule_q_item(&nvswitch_dev->task_q, + &nvswitch_dev->task_item)) + { + printk(KERN_ERR "%s: Failed to re-schedule background task\n", + nvswitch_dev->name); + } + } +} + +static int +nvswitch_probe +( + struct pci_dev *pci_dev, + const struct pci_device_id *id_table +) +{ + NVSWITCH_DEV *nvswitch_dev = NULL; + int rc = 0; + int minor; + + if (!nvswitch_lib_validate_device_id(pci_dev->device)) + { + return -EINVAL; + } + + printk(KERN_INFO "nvidia-nvswitch: Probing device %04x:%02x:%02x.%x, " + "Vendor Id = 0x%x, Device Id = 0x%x, Class = 0x%x \n", + NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), + PCI_FUNC(pci_dev->devfn), + pci_dev->vendor, + pci_dev->device, + pci_dev->class); + + mutex_lock(&nvswitch.driver_mutex); + + minor = nvswitch_find_minor(); + if (minor >= NVSWITCH_DEVICE_INSTANCE_MAX) + { + rc = -ERANGE; + goto find_minor_failed; + } + + nvswitch_dev = kzalloc(sizeof(*nvswitch_dev), GFP_KERNEL); + if (NULL == nvswitch_dev) + { + rc = -ENOMEM; + goto kzalloc_failed; + } + + mutex_init(&nvswitch_dev->device_mutex); + init_waitqueue_head(&nvswitch_dev->wait_q_errors); + init_waitqueue_head(&nvswitch_dev->wait_q_shutdown); + + snprintf(nvswitch_dev->name, sizeof(nvswitch_dev->name), + NVSWITCH_DRIVER_NAME "%d", minor); + + snprintf(nvswitch_dev->sname, sizeof(nvswitch_dev->sname), + NVSWITCH_SHORT_NAME "%d", minor); + + rc = pci_enable_device(pci_dev); + if (rc) + { + printk(KERN_ERR "%s: Failed to enable PCI device : %d\n", + nvswitch_dev->name, + rc); + goto pci_enable_device_failed; + } + + pci_set_master(pci_dev); + + rc = pci_request_regions(pci_dev, nvswitch_dev->name); + if (rc) + { + printk(KERN_ERR "%s: Failed to request memory regions : %d\n", + nvswitch_dev->name, + rc); + goto pci_request_regions_failed; + } + + nvswitch_dev->bar0 = pci_iomap(pci_dev, 0, 0); + if (!nvswitch_dev->bar0) + { + rc = -ENOMEM; + printk(KERN_ERR "%s: Failed to map BAR0 region : %d\n", + nvswitch_dev->name, + rc); + goto pci_iomap_failed; + } + + nvswitch_dev->pci_dev = pci_dev; + nvswitch_dev->minor = minor; + + rc = nvswitch_init_device(nvswitch_dev); + if (rc) + { + printk(KERN_ERR "%s: Failed to initialize device : %d\n", + nvswitch_dev->name, + rc); + goto init_device_failed; + } + + if (nvswitch_is_device_blacklisted(nvswitch_dev)) + { + nvswitch_post_init_blacklisted(nvswitch_dev); + goto blacklisted; + } + + // + // device_mutex held here because post_init entries may call soeService_HAL() + // with IRQs on. see bug 2856314 for more info + // + mutex_lock(&nvswitch_dev->device_mutex); + rc = nvswitch_post_init_device(nvswitch_dev); + mutex_unlock(&nvswitch_dev->device_mutex); + if (rc) + { + printk(KERN_ERR "%s:Failed during device post init : %d\n", + nvswitch_dev->name, rc); + goto post_init_device_failed; + } + +blacklisted: + rc = nvswitch_init_background_tasks(nvswitch_dev); + if (rc) + { + printk(KERN_ERR "%s: Failed to initialize background tasks : %d\n", + nvswitch_dev->name, + rc); + goto init_background_task_failed; + } + + pci_set_drvdata(pci_dev, nvswitch_dev); + + nvswitch_procfs_device_add(nvswitch_dev); + + list_add_tail(&nvswitch_dev->list_node, &nvswitch.devices); + + NV_ATOMIC_INC(nvswitch.count); + + mutex_unlock(&nvswitch.driver_mutex); + + return 0; + +init_background_task_failed: +post_init_device_failed: + nvswitch_deinit_device(nvswitch_dev); + +init_device_failed: + pci_iounmap(pci_dev, nvswitch_dev->bar0); + +pci_iomap_failed: + pci_release_regions(pci_dev); + +pci_request_regions_failed: +#ifdef CONFIG_PCI + pci_clear_master(pci_dev); +#endif + pci_disable_device(pci_dev); + +pci_enable_device_failed: + kfree(nvswitch_dev); + +kzalloc_failed: +find_minor_failed: + mutex_unlock(&nvswitch.driver_mutex); + + return rc; +} + +void +nvswitch_remove +( + struct pci_dev *pci_dev +) +{ + NVSWITCH_DEV *nvswitch_dev; + + mutex_lock(&nvswitch.driver_mutex); + + nvswitch_dev = pci_get_drvdata(pci_dev); + + if (nvswitch_dev == NULL) + { + goto done; + } + + printk(KERN_INFO "%s: removing device %04x:%02x:%02x.%x\n", + nvswitch_dev->name, + NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), + PCI_FUNC(pci_dev->devfn)); + + // + // Synchronize with device operations such as .ioctls/.poll, and then mark + // the device unusable. + // + mutex_lock(&nvswitch_dev->device_mutex); + nvswitch_dev->unusable = NV_TRUE; + mutex_unlock(&nvswitch_dev->device_mutex); + + NV_ATOMIC_DEC(nvswitch.count); + + list_del(&nvswitch_dev->list_node); + + nvswitch_deinit_i2c_adapters(nvswitch_dev); + + WARN_ON(!list_empty(&nvswitch_dev->i2c_adapter_list)); + + pci_set_drvdata(pci_dev, NULL); + + nvswitch_deinit_background_tasks(nvswitch_dev); + + nvswitch_deinit_device(nvswitch_dev); + + pci_iounmap(pci_dev, nvswitch_dev->bar0); + + pci_release_regions(pci_dev); + +#ifdef CONFIG_PCI + pci_clear_master(pci_dev); +#endif + + pci_disable_device(pci_dev); + + nvswitch_procfs_device_remove(nvswitch_dev); + + // Free nvswitch_dev only if it is not in use. + if (NV_ATOMIC_READ(nvswitch_dev->ref_count) == 0) + { + kfree(nvswitch_dev); + } + +done: + mutex_unlock(&nvswitch.driver_mutex); + + return; +} + +static void +nvswitch_load_bar_info +( + NVSWITCH_DEV *nvswitch_dev +) +{ + struct pci_dev *pci_dev = nvswitch_dev->pci_dev; + nvlink_pci_info *info; + NvU32 bar = 0; + + nvswitch_lib_get_device_info(nvswitch_dev->lib_device, &info); + + info->bars[0].offset = NVRM_PCICFG_BAR_OFFSET(0); + pci_read_config_dword(pci_dev, info->bars[0].offset, &bar); + + info->bars[0].busAddress = (bar & PCI_BASE_ADDRESS_MEM_MASK); + if (NV_PCI_RESOURCE_FLAGS(pci_dev, 0) & PCI_BASE_ADDRESS_MEM_TYPE_64) + { + pci_read_config_dword(pci_dev, info->bars[0].offset + 4, &bar); + info->bars[0].busAddress |= (((NvU64)bar) << 32); + } + + info->bars[0].baseAddr = NV_PCI_RESOURCE_START(pci_dev, 0); + + info->bars[0].barSize = NV_PCI_RESOURCE_SIZE(pci_dev, 0); + + info->bars[0].pBar = nvswitch_dev->bar0; +} + +static int +_nvswitch_initialize_msix_interrupt +( + NVSWITCH_DEV *nvswitch_dev +) +{ + // Not supported (bug 3018806) + return -EINVAL; +} + +static int +_nvswitch_initialize_msi_interrupt +( + NVSWITCH_DEV *nvswitch_dev +) +{ +#ifdef CONFIG_PCI_MSI + struct pci_dev *pci_dev = nvswitch_dev->pci_dev; + int rc; + + rc = pci_enable_msi(pci_dev); + if (rc) + { + return rc; + } + + return 0; +#else + return -EINVAL; +#endif +} + +static int +_nvswitch_get_irq_caps(NVSWITCH_DEV *nvswitch_dev, unsigned long *irq_caps) +{ + struct pci_dev *pci_dev; + + if (!nvswitch_dev || !irq_caps) + return -EINVAL; + + pci_dev = nvswitch_dev->pci_dev; + + if (pci_find_capability(pci_dev, PCI_CAP_ID_MSIX)) + set_bit(NVSWITCH_IRQ_MSIX, irq_caps); + + if (pci_find_capability(pci_dev, PCI_CAP_ID_MSI)) + set_bit(NVSWITCH_IRQ_MSI, irq_caps); + + if (nvswitch_lib_use_pin_irq(nvswitch_dev->lib_device)) + set_bit(NVSWITCH_IRQ_PIN, irq_caps); + + return 0; +} + +static int +nvswitch_initialize_device_interrupt +( + NVSWITCH_DEV *nvswitch_dev +) +{ + struct pci_dev *pci_dev = nvswitch_dev->pci_dev; + int flags = 0; + unsigned long irq_caps = 0; + int rc; + + if (_nvswitch_get_irq_caps(nvswitch_dev, &irq_caps)) + { + pr_err("%s: failed to retrieve device interrupt capabilities\n", + nvswitch_dev->name); + return -EINVAL; + } + + nvswitch_dev->irq_mechanism = NVSWITCH_IRQ_NONE; + + if (test_bit(NVSWITCH_IRQ_MSIX, &irq_caps)) + { + rc = _nvswitch_initialize_msix_interrupt(nvswitch_dev); + if (!rc) + { + nvswitch_dev->irq_mechanism = NVSWITCH_IRQ_MSIX; + pr_info("%s: using MSI-X\n", nvswitch_dev->name); + } + } + + if (nvswitch_dev->irq_mechanism == NVSWITCH_IRQ_NONE + && test_bit(NVSWITCH_IRQ_MSI, &irq_caps)) + { + rc = _nvswitch_initialize_msi_interrupt(nvswitch_dev); + if (!rc) + { + nvswitch_dev->irq_mechanism = NVSWITCH_IRQ_MSI; + pr_info("%s: using MSI\n", nvswitch_dev->name); + } + } + + if (nvswitch_dev->irq_mechanism == NVSWITCH_IRQ_NONE + && test_bit(NVSWITCH_IRQ_PIN, &irq_caps)) + { + flags |= IRQF_SHARED; + nvswitch_dev->irq_mechanism = NVSWITCH_IRQ_PIN; + pr_info("%s: using PCI pin\n", nvswitch_dev->name); + } + + if (nvswitch_dev->irq_mechanism == NVSWITCH_IRQ_NONE) + { + pr_err("%s: No supported interrupt mechanism was found. This device supports:\n", + nvswitch_dev->name); + + if (test_bit(NVSWITCH_IRQ_MSIX, &irq_caps)) + pr_err("%s: MSI-X\n", nvswitch_dev->name); + if (test_bit(NVSWITCH_IRQ_MSI, &irq_caps)) + pr_err("%s: MSI\n", nvswitch_dev->name); + if (test_bit(NVSWITCH_IRQ_PIN, &irq_caps)) + pr_err("%s: PCI Pin\n", nvswitch_dev->name); + + return -EINVAL; + } + + rc = request_threaded_irq(pci_dev->irq, + nvswitch_isr_pending, + nvswitch_isr_thread, + flags, nvswitch_dev->sname, + nvswitch_dev); + if (rc) + { +#ifdef CONFIG_PCI_MSI + if (nvswitch_dev->irq_mechanism == NVSWITCH_IRQ_MSI) + { + pci_disable_msi(pci_dev); + } +#endif + printk(KERN_ERR "%s: failed to get IRQ\n", + nvswitch_dev->name); + + return rc; + } + + return 0; +} + +void +nvswitch_shutdown_device_interrupt +( + NVSWITCH_DEV *nvswitch_dev +) +{ + struct pci_dev *pci_dev = nvswitch_dev->pci_dev; + + free_irq(pci_dev->irq, nvswitch_dev); +#ifdef CONFIG_PCI_MSI + if (nvswitch_dev->irq_mechanism == NVSWITCH_IRQ_MSI) + { + pci_disable_msi(pci_dev); + } +#endif +} + +static void +nvswitch_ctl_exit +( + void +) +{ + cdev_del(&nvswitch.cdev_ctl); +} + +static int +nvswitch_ctl_init +( + int major +) +{ + int rc = 0; + dev_t nvswitch_ctl = MKDEV(major, NVSWITCH_CTL_MINOR); + + cdev_init(&nvswitch.cdev_ctl, &ctl_fops); + + nvswitch.cdev_ctl.owner = THIS_MODULE; + + rc = cdev_add(&nvswitch.cdev_ctl, nvswitch_ctl, 1); + if (rc < 0) + { + printk(KERN_ERR "nvidia-nvswitch: Unable to create cdev ctl\n"); + return rc; + } + + return 0; +} + +// +// Initialize nvswitch driver SW state. This is currently called +// from the RM as a backdoor interface, and not by the Linux device +// manager +// +int +nvswitch_init +( + void +) +{ + int rc; + + if (nvswitch.initialized) + { + printk(KERN_ERR "nvidia-nvswitch: Interface already initialized\n"); + return -EBUSY; + } + + BUILD_BUG_ON(NVSWITCH_DEVICE_INSTANCE_MAX >= NVSWITCH_MINOR_COUNT); + + mutex_init(&nvswitch.driver_mutex); + + INIT_LIST_HEAD(&nvswitch.devices); + + rc = alloc_chrdev_region(&nvswitch.devno, + 0, + NVSWITCH_MINOR_COUNT, + NVSWITCH_DRIVER_NAME); + if (rc < 0) + { + printk(KERN_ERR "nvidia-nvswitch: Unable to create cdev region\n"); + goto alloc_chrdev_region_fail; + } + + printk(KERN_ERR, "nvidia-nvswitch: Major: %d Minor: %d\n", + MAJOR(nvswitch.devno), + MINOR(nvswitch.devno)); + + cdev_init(&nvswitch.cdev, &device_fops); + nvswitch.cdev.owner = THIS_MODULE; + rc = cdev_add(&nvswitch.cdev, nvswitch.devno, NVSWITCH_DEVICE_INSTANCE_MAX); + if (rc < 0) + { + printk(KERN_ERR "nvidia-nvswitch: Unable to create cdev\n"); + goto cdev_add_fail; + } + + rc = nvswitch_procfs_init(); + if (rc < 0) + { + goto nvswitch_procfs_init_fail; + } + + rc = pci_register_driver(&nvswitch_pci_driver); + if (rc < 0) + { + printk(KERN_ERR "nvidia-nvswitch: Failed to register driver : %d\n", rc); + goto pci_register_driver_fail; + } + + rc = nvswitch_ctl_init(MAJOR(nvswitch.devno)); + if (rc < 0) + { + goto nvswitch_ctl_init_fail; + } + + nvswitch.initialized = NV_TRUE; + + return 0; + +nvswitch_ctl_init_fail: + pci_unregister_driver(&nvswitch_pci_driver); + +pci_register_driver_fail: +nvswitch_procfs_init_fail: + cdev_del(&nvswitch.cdev); + +cdev_add_fail: + unregister_chrdev_region(nvswitch.devno, NVSWITCH_MINOR_COUNT); + +alloc_chrdev_region_fail: + + return rc; +} + +// +// Clean up driver state on exit. Currently called from RM backdoor call, +// and not by the Linux device manager. +// +void +nvswitch_exit +( + void +) +{ + if (NV_FALSE == nvswitch.initialized) + { + return; + } + + nvswitch_procfs_exit(); + + nvswitch_ctl_exit(); + + pci_unregister_driver(&nvswitch_pci_driver); + + cdev_del(&nvswitch.cdev); + + unregister_chrdev_region(nvswitch.devno, NVSWITCH_MINOR_COUNT); + + WARN_ON(!list_empty(&nvswitch.devices)); + + nvswitch.initialized = NV_FALSE; +} + +// +// Get current time in seconds.nanoseconds +// In this implementation, the time is from epoch time +// (midnight UTC of January 1, 1970) +// +NvU64 +nvswitch_os_get_platform_time +( + void +) +{ + struct timespec64 ts; + + ktime_get_raw_ts64(&ts); + return (NvU64) timespec64_to_ns(&ts); +} + +void +nvswitch_os_print +( + const int log_level, + const char *fmt, + ... +) +{ + va_list arglist; + char *kern_level; + char fmt_printk[NVSWITCH_LOG_BUFFER_SIZE]; + + switch (log_level) + { + case NVSWITCH_DBG_LEVEL_MMIO: + kern_level = KERN_DEBUG; + break; + case NVSWITCH_DBG_LEVEL_INFO: + kern_level = KERN_INFO; + break; + case NVSWITCH_DBG_LEVEL_SETUP: + kern_level = KERN_INFO; + break; + case NVSWITCH_DBG_LEVEL_WARN: + kern_level = KERN_WARNING; + break; + case NVSWITCH_DBG_LEVEL_ERROR: + kern_level = KERN_ERR; + break; + default: + kern_level = KERN_DEFAULT; + break; + } + + va_start(arglist, fmt); + snprintf(fmt_printk, sizeof(fmt_printk), "%s%s", kern_level, fmt); + vprintk(fmt_printk, arglist); + va_end(arglist); +} + +void +nvswitch_os_override_platform +( + void *os_handle, + NvBool *rtlsim +) +{ + // Never run on RTL + *rtlsim = NV_FALSE; +} + +NvlStatus +nvswitch_os_read_registery_binary +( + void *os_handle, + const char *name, + NvU8 *data, + NvU32 length +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvU32 +nvswitch_os_get_device_count +( + void +) +{ + return NV_ATOMIC_READ(nvswitch.count); +} + +// +// A helper to convert a string to an unsigned int. +// +// The string should be NULL terminated. +// Only works with base16 values. +// +static int +nvswitch_os_strtouint +( + char *str, + unsigned int *data +) +{ + char *p; + unsigned long long val; + + if (!str || !data) + { + return -EINVAL; + } + + *data = 0; + val = 0; + p = str; + + while (*p != '\0') + { + if ((tolower(*p) == 'x') && (*str == '0') && (p == str + 1)) + { + p++; + } + else if (*p >='0' && *p <= '9') + { + val = val * 16 + (*p - '0'); + p++; + } + else if (tolower(*p) >= 'a' && tolower(*p) <= 'f') + { + val = val * 16 + (tolower(*p) - 'a' + 10); + p++; + } + else + { + return -EINVAL; + } + } + + if (val > 0xFFFFFFFF) + { + return -EINVAL; + } + + *data = (unsigned int)val; + + return 0; +} + +NvlStatus +nvswitch_os_read_registry_dword +( + void *os_handle, + const char *name, + NvU32 *data +) +{ + char *regkey, *regkey_val_start, *regkey_val_end; + char regkey_val[NVSWITCH_REGKEY_VALUE_LEN + 1]; + NvU32 regkey_val_len = 0; + + *data = 0; + + if (!NvSwitchRegDwords) + { + return -NVL_ERR_GENERIC; + } + + regkey = strstr(NvSwitchRegDwords, name); + if (!regkey) + { + return -NVL_ERR_GENERIC; + } + + regkey = strchr(regkey, '='); + if (!regkey) + { + return -NVL_ERR_GENERIC; + } + + regkey_val_start = regkey + 1; + + regkey_val_end = strchr(regkey, ';'); + if (!regkey_val_end) + { + regkey_val_end = strchr(regkey, '\0'); + } + + regkey_val_len = regkey_val_end - regkey_val_start; + if (regkey_val_len > NVSWITCH_REGKEY_VALUE_LEN || regkey_val_len == 0) + { + return -NVL_ERR_GENERIC; + } + + strncpy(regkey_val, regkey_val_start, regkey_val_len); + regkey_val[regkey_val_len] = '\0'; + + if (nvswitch_os_strtouint(regkey_val, data) != 0) + { + return -NVL_ERR_GENERIC; + } + + return NVL_SUCCESS; +} + +static NvBool +_nvswitch_is_space(const char ch) +{ + return ((ch == ' ') || ((ch >= '\t') && (ch <= '\r'))); +} + +static char * +_nvswitch_remove_spaces(const char *in) +{ + unsigned int len = nvswitch_os_strlen(in) + 1; + const char *in_ptr; + char *out, *out_ptr; + + out = nvswitch_os_malloc(len); + if (out == NULL) + return NULL; + + in_ptr = in; + out_ptr = out; + + while (*in_ptr != '\0') + { + if (!_nvswitch_is_space(*in_ptr)) + *out_ptr++ = *in_ptr; + in_ptr++; + } + *out_ptr = '\0'; + + return out; +} + +/* + * Compare given string UUID with the NvSwitchBlacklist registry parameter string and + * return whether the UUID is in the NvSwitch blacklist + */ +NvBool +nvswitch_os_is_uuid_in_blacklist +( + NvUuid *uuid +) +{ + char *list; + char *ptr; + char *token; + NvU8 uuid_string[NVSWITCH_UUID_STRING_LENGTH]; + + if (NvSwitchBlacklist == NULL) + return NV_FALSE; + + if (nvswitch_uuid_to_string(uuid, uuid_string, NVSWITCH_UUID_STRING_LENGTH) == 0) + return NV_FALSE; + + if ((list = _nvswitch_remove_spaces(NvSwitchBlacklist)) == NULL) + return NV_FALSE; + + ptr = list; + + while ((token = strsep(&ptr, ",")) != NULL) + { + if (strcmp(token, uuid_string) == 0) + { + nvswitch_os_free(list); + return NV_TRUE; + } + } + nvswitch_os_free(list); + return NV_FALSE; +} + + +NvlStatus +nvswitch_os_alloc_contig_memory +( + void *os_handle, + void **virt_addr, + NvU32 size, + NvBool force_dma32 +) +{ + NvU32 gfp_flags; + unsigned long nv_gfp_addr = 0; + + if (!virt_addr) + return -NVL_BAD_ARGS; + + gfp_flags = GFP_KERNEL | (force_dma32 ? GFP_DMA32 : 0); + NV_GET_FREE_PAGES(nv_gfp_addr, get_order(size), gfp_flags); + + if(!nv_gfp_addr) + { + pr_err("nvidia-nvswitch: unable to allocate kernel memory\n"); + return -NVL_NO_MEM; + } + + *virt_addr = (void *)nv_gfp_addr; + + return NVL_SUCCESS; +} + +void +nvswitch_os_free_contig_memory +( + void *os_handle, + void *virt_addr, + NvU32 size +) +{ + NV_FREE_PAGES((unsigned long)virt_addr, get_order(size)); +} + +static inline int +_nvswitch_to_pci_dma_direction +( + NvU32 direction +) +{ + if (direction == NVSWITCH_DMA_DIR_TO_SYSMEM) + return DMA_FROM_DEVICE; + else if (direction == NVSWITCH_DMA_DIR_FROM_SYSMEM) + return DMA_TO_DEVICE; + else + return DMA_BIDIRECTIONAL; +} + +NvlStatus +nvswitch_os_map_dma_region +( + void *os_handle, + void *cpu_addr, + NvU64 *dma_handle, + NvU32 size, + NvU32 direction +) +{ + int dma_dir; + struct pci_dev *pdev = (struct pci_dev *)os_handle; + + if (!pdev || !cpu_addr || !dma_handle) + return -NVL_BAD_ARGS; + + dma_dir = _nvswitch_to_pci_dma_direction(direction); + + *dma_handle = (NvU64)dma_map_single(&pdev->dev, cpu_addr, size, dma_dir); + + if (dma_mapping_error(&pdev->dev, *dma_handle)) + { + pr_err("nvidia-nvswitch: unable to create PCI DMA mapping\n"); + return -NVL_ERR_GENERIC; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_os_unmap_dma_region +( + void *os_handle, + void *cpu_addr, + NvU64 dma_handle, + NvU32 size, + NvU32 direction +) +{ + int dma_dir; + struct pci_dev *pdev = (struct pci_dev *)os_handle; + + if (!pdev || !cpu_addr) + return -NVL_BAD_ARGS; + + dma_dir = _nvswitch_to_pci_dma_direction(direction); + + dma_unmap_single(&pdev->dev, dma_handle, size, dma_dir); + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_os_set_dma_mask +( + void *os_handle, + NvU32 dma_addr_width +) +{ + struct pci_dev *pdev = (struct pci_dev *)os_handle; + + if (!pdev) + return -NVL_BAD_ARGS; + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_addr_width))) + return -NVL_ERR_GENERIC; + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_os_sync_dma_region_for_cpu +( + void *os_handle, + NvU64 dma_handle, + NvU32 size, + NvU32 direction +) +{ + int dma_dir; + struct pci_dev *pdev = (struct pci_dev *)os_handle; + + if (!pdev) + return -NVL_BAD_ARGS; + + dma_dir = _nvswitch_to_pci_dma_direction(direction); + + dma_sync_single_for_cpu(&pdev->dev, dma_handle, size, dma_dir); + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_os_sync_dma_region_for_device +( + void *os_handle, + NvU64 dma_handle, + NvU32 size, + NvU32 direction +) +{ + int dma_dir; + struct pci_dev *pdev = (struct pci_dev *)os_handle; + + if (!pdev) + return -NVL_BAD_ARGS; + + dma_dir = _nvswitch_to_pci_dma_direction(direction); + + dma_sync_single_for_device(&pdev->dev, dma_handle, size, dma_dir); + + return NVL_SUCCESS; +} + +static inline void * +_nvswitch_os_malloc +( + NvLength size +) +{ + void *ptr = NULL; + + if (!NV_MAY_SLEEP()) + { + if (size <= NVSWITCH_KMALLOC_LIMIT) + { + ptr = kmalloc(size, NV_GFP_ATOMIC); + } + } + else + { + if (size <= NVSWITCH_KMALLOC_LIMIT) + { + ptr = kmalloc(size, NV_GFP_NO_OOM); + } + + if (ptr == NULL) + { + ptr = vmalloc(size); + } + } + + return ptr; +} + +void * +nvswitch_os_malloc_trace +( + NvLength size, + const char *file, + NvU32 line +) +{ +#if defined(NV_MEM_LOGGER) + void *ptr = _nvswitch_os_malloc(size); + if (ptr) + { + nv_memdbg_add(ptr, size, file, line); + } + + return ptr; +#else + return _nvswitch_os_malloc(size); +#endif +} + +static inline void +_nvswitch_os_free +( + void *ptr +) +{ + if (!ptr) + return; + + if (is_vmalloc_addr(ptr)) + { + vfree(ptr); + } + else + { + kfree(ptr); + } +} + +void +nvswitch_os_free +( + void *ptr +) +{ +#if defined (NV_MEM_LOGGER) + if (ptr == NULL) + return; + + nv_memdbg_remove(ptr, 0, NULL, 0); + + return _nvswitch_os_free(ptr); +#else + return _nvswitch_os_free(ptr); +#endif +} + +NvLength +nvswitch_os_strlen +( + const char *str +) +{ + return strlen(str); +} + +char* +nvswitch_os_strncpy +( + char *dest, + const char *src, + NvLength length +) +{ + return strncpy(dest, src, length); +} + +int +nvswitch_os_strncmp +( + const char *s1, + const char *s2, + NvLength length +) +{ + return strncmp(s1, s2, length); +} + +void * +nvswitch_os_memset +( + void *dest, + int value, + NvLength size +) +{ + return memset(dest, value, size); +} + +void * +nvswitch_os_memcpy +( + void *dest, + const void *src, + NvLength size +) +{ + return memcpy(dest, src, size); +} + +int +nvswitch_os_memcmp +( + const void *s1, + const void *s2, + NvLength size +) +{ + return memcmp(s1, s2, size); +} + +NvU32 +nvswitch_os_mem_read32 +( + const volatile void * address +) +{ + return (*(const volatile NvU32*)(address)); +} + +void +nvswitch_os_mem_write32 +( + volatile void *address, + NvU32 data +) +{ + (*(volatile NvU32 *)(address)) = data; +} + +NvU64 +nvswitch_os_mem_read64 +( + const volatile void * address +) +{ + return (*(const volatile NvU64 *)(address)); +} + +void +nvswitch_os_mem_write64 +( + volatile void *address, + NvU64 data +) +{ + (*(volatile NvU64 *)(address)) = data; +} + +int +nvswitch_os_snprintf +( + char *dest, + NvLength size, + const char *fmt, + ... +) +{ + va_list arglist; + int chars_written; + + va_start(arglist, fmt); + chars_written = vsnprintf(dest, size, fmt, arglist); + va_end(arglist); + + return chars_written; +} + +int +nvswitch_os_vsnprintf +( + char *buf, + NvLength size, + const char *fmt, + va_list arglist +) +{ + return vsnprintf(buf, size, fmt, arglist); +} + +void +nvswitch_os_assert_log +( + int cond, + const char *fmt, + ... +) +{ + if(cond == 0x0) + { + if (printk_ratelimit()) + { + va_list arglist; + char fmt_printk[NVSWITCH_LOG_BUFFER_SIZE]; + + va_start(arglist, fmt); + vsnprintf(fmt_printk, sizeof(fmt_printk), fmt, arglist); + va_end(arglist); + nvswitch_os_print(NVSWITCH_DBG_LEVEL_ERROR, fmt_printk); + WARN_ON(1); + } + dbg_breakpoint(); + } +} + +/* + * Sleep for specified milliseconds. Yields the CPU to scheduler. + */ +void +nvswitch_os_sleep +( + unsigned int ms +) +{ + NV_STATUS status; + status = nv_sleep_ms(ms); + + if (status != NV_OK) + { + if (printk_ratelimit()) + { + nvswitch_os_print(NVSWITCH_DBG_LEVEL_ERROR, "NVSwitch: requested" + " sleep duration %d msec exceeded %d msec\n", + ms, NV_MAX_ISR_DELAY_MS); + WARN_ON(1); + } + } +} + +NvlStatus +nvswitch_os_acquire_fabric_mgmt_cap +( + void *osPrivate, + NvU64 capDescriptor +) +{ + int dup_fd = -1; + nvswitch_file_private_t *private_data = (nvswitch_file_private_t *)osPrivate; + + if (private_data == NULL) + { + return -NVL_BAD_ARGS; + } + + dup_fd = nvlink_cap_acquire((int)capDescriptor, + NVLINK_CAP_FABRIC_MANAGEMENT); + if (dup_fd < 0) + { + return -NVL_ERR_OPERATING_SYSTEM; + } + + private_data->capability_fds.fabric_mgmt = dup_fd; + return NVL_SUCCESS; +} + +int +nvswitch_os_is_fabric_manager +( + void *osPrivate +) +{ + nvswitch_file_private_t *private_data = (nvswitch_file_private_t *)osPrivate; + + /* Make sure that fabric mgmt capbaility fd is valid */ + if ((private_data == NULL) || + (private_data->capability_fds.fabric_mgmt < 0)) + { + return 0; + } + + return 1; +} + +int +nvswitch_os_is_admin +( + void +) +{ + return NV_IS_SUSER(); +} + +#define NV_KERNEL_RELEASE ((LINUX_VERSION_CODE >> 16) & 0x0ff) +#define NV_KERNEL_VERSION ((LINUX_VERSION_CODE >> 8) & 0x0ff) +#define NV_KERNEL_SUBVERSION ((LINUX_VERSION_CODE) & 0x0ff) + +NvlStatus +nvswitch_os_get_os_version +( + NvU32 *pMajorVer, + NvU32 *pMinorVer, + NvU32 *pBuildNum +) +{ + if (pMajorVer) + *pMajorVer = NV_KERNEL_RELEASE; + if (pMinorVer) + *pMinorVer = NV_KERNEL_VERSION; + if (pBuildNum) + *pBuildNum = NV_KERNEL_SUBVERSION; + + return NVL_SUCCESS; +} + +/*! + * @brief: OS specific handling to add an event. + */ +NvlStatus +nvswitch_os_add_client_event +( + void *osHandle, + void *osPrivate, + NvU32 eventId +) +{ + return NVL_SUCCESS; +} + +/*! + * @brief: OS specific handling to remove all events corresponding to osPrivate. + */ +NvlStatus +nvswitch_os_remove_client_event +( + void *osHandle, + void *osPrivate +) +{ + return NVL_SUCCESS; +} + +/*! + * @brief: OS specific handling to notify an event. + */ +NvlStatus +nvswitch_os_notify_client_event +( + void *osHandle, + void *osPrivate, + NvU32 eventId +) +{ + nvswitch_file_private_t *private_data = (nvswitch_file_private_t *)osPrivate; + + if (private_data == NULL) + { + return -NVL_BAD_ARGS; + } + + private_data->file_event.event_pending = NV_TRUE; + wake_up_interruptible(&private_data->file_event.wait_q_event); + + return NVL_SUCCESS; +} + +/*! + * @brief: Gets OS specific support for the REGISTER_EVENTS ioctl + */ +NvlStatus +nvswitch_os_get_supported_register_events_params +( + NvBool *many_events, + NvBool *os_descriptor +) +{ + *many_events = NV_FALSE; + *os_descriptor = NV_FALSE; + return NVL_SUCCESS; +} diff --git a/kernel-open/nvidia/linux_nvswitch.h b/kernel-open/nvidia/linux_nvswitch.h new file mode 100644 index 000000000..14b1fa8da --- /dev/null +++ b/kernel-open/nvidia/linux_nvswitch.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LINUX_NVSWITCH_H +#define LINUX_NVSWITCH_H + +#include "nvmisc.h" +#include "nv-linux.h" +#include "nv-kthread-q.h" +#include "export_nvswitch.h" + +#define NVSWITCH_SHORT_NAME "nvswi" + +#define NVSWITCH_IRQ_NONE 0 +#define NVSWITCH_IRQ_MSIX 1 +#define NVSWITCH_IRQ_MSI 2 +#define NVSWITCH_IRQ_PIN 3 + +#define NVSWITCH_OS_ASSERT(_cond) \ + nvswitch_os_assert_log((_cond), "NVSwitch: Assertion failed in %s() at %s:%d\n", \ + __FUNCTION__ , __FILE__, __LINE__) + +#define NVSWITCH_KMALLOC_LIMIT (128 * 1024) + +#define nvswitch_os_malloc(_size) nvswitch_os_malloc_trace(_size, __FILE__, __LINE__) + +typedef struct +{ + struct list_head entry; + struct i2c_adapter *adapter; +} nvswitch_i2c_adapter_entry; + +// Per-chip driver state +typedef struct +{ + char name[sizeof(NVSWITCH_DRIVER_NAME) + 4]; + char sname[sizeof(NVSWITCH_SHORT_NAME) + 4]; /* short name */ + int minor; + NvUuid uuid; + struct mutex device_mutex; + nvswitch_device *lib_device; /* nvswitch library device */ + wait_queue_head_t wait_q_errors; + void *bar0; + struct nv_kthread_q task_q; /* Background task queue */ + struct nv_kthread_q_item task_item; /* Background dispatch task */ + atomic_t task_q_ready; + wait_queue_head_t wait_q_shutdown; + struct pci_dev *pci_dev; + atomic_t ref_count; + struct list_head list_node; + NvBool unusable; + NvU32 phys_id; + NvU64 bios_ver; +#if defined(CONFIG_PROC_FS) + struct proc_dir_entry *procfs_dir; +#endif + NvU8 irq_mechanism; + struct list_head i2c_adapter_list; +} NVSWITCH_DEV; + + +int nvswitch_map_status(NvlStatus status); +int nvswitch_procfs_init(void); +void nvswitch_procfs_exit(void); +int nvswitch_procfs_device_add(NVSWITCH_DEV *nvswitch_dev); +void nvswitch_procfs_device_remove(NVSWITCH_DEV *nvswitch_dev); +struct i2c_adapter *nvswitch_i2c_add_adapter(NVSWITCH_DEV *nvswitch_dev, NvU32 port); +void nvswitch_i2c_del_adapter(struct i2c_adapter *adapter); + +#endif // LINUX_NVSWITCH_H diff --git a/kernel-open/nvidia/nv-acpi.c b/kernel-open/nvidia/nv-acpi.c new file mode 100644 index 000000000..1169c1465 --- /dev/null +++ b/kernel-open/nvidia/nv-acpi.c @@ -0,0 +1,1880 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" + +#include + +#if defined(NV_LINUX_ACPI_EVENTS_SUPPORTED) +static NV_STATUS nv_acpi_extract_integer (const union acpi_object *, void *, NvU32, NvU32 *); +static NV_STATUS nv_acpi_extract_buffer (const union acpi_object *, void *, NvU32, NvU32 *); +static NV_STATUS nv_acpi_extract_package (const union acpi_object *, void *, NvU32, NvU32 *); +static NV_STATUS nv_acpi_extract_object (const union acpi_object *, void *, NvU32, NvU32 *); + +static int nv_acpi_add (struct acpi_device *); +static int nv_acpi_remove (struct acpi_device *device); +static void nv_acpi_event (acpi_handle, u32, void *); +static void nv_acpi_powersource_hotplug_event(acpi_handle, u32, void *); +static acpi_status nv_acpi_find_methods (acpi_handle, u32, void *, void **); +static NV_STATUS nv_acpi_nvif_method (NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); + +static NV_STATUS nv_acpi_wmmx_method (NvU32, NvU8 *, NvU16 *); + +static const struct acpi_device_id nv_video_device_ids[] = { + { + .id = ACPI_VIDEO_HID, + .driver_data = 0, + }, + { + .id = "", + .driver_data = 0, + }, +}; + +static struct acpi_driver *nv_acpi_driver; +static acpi_handle nvif_handle = NULL; +static acpi_handle nvif_parent_gpu_handle = NULL; +static acpi_handle wmmx_handle = NULL; + +// Used for AC Power Source Hotplug Handling +static acpi_handle psr_handle = NULL; +static acpi_handle psr_device_handle = NULL; +static nv_acpi_t *psr_nv_acpi_object = NULL; + +static NvBool battery_present = NV_FALSE; + +#define BIX_BATTERY_TECHNOLOGY_OFFSET 0x4 +#define BIF_BATTERY_TECHNOLOGY_OFFSET 0x3 +#define BATTERY_RECHARGABLE 0x1 + +/* Moved into acpi/video.h in Linux 4.10 */ +#ifndef ACPI_VIDEO_NOTIFY_PROBE +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 +#endif + +/* Added to acpi/video.h in Linux 3.1 */ +#ifndef ACPI_VIDEO_CLASS +#define ACPI_VIDEO_CLASS "video" +#endif + +static const struct acpi_driver nv_acpi_driver_template = { + .name = "NVIDIA ACPI Video Driver", + .class = ACPI_VIDEO_CLASS, + .ids = nv_video_device_ids, + .ops = { + .add = nv_acpi_add, + .remove = nv_acpi_remove, + }, +}; + +static int nv_acpi_get_device_handle(nv_state_t *nv, acpi_handle *dev_handle) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + +#if defined(DEVICE_ACPI_HANDLE) + *dev_handle = DEVICE_ACPI_HANDLE(nvl->dev); + return NV_TRUE; +#elif defined (ACPI_HANDLE) + *dev_handle = ACPI_HANDLE(nvl->dev); + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +static int nv_acpi_notify(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct acpi_bus_event *info = data; + nv_stack_t *sp = NULL; + nv_linux_state_t *nvl = container_of(nb, nv_linux_state_t, acpi_nb); + nv_state_t *nv = NV_STATE_PTR(nvl); + + if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { + if (nv_kmem_cache_alloc_stack(&sp) == 0) { + /* + * Function to handle device specific ACPI events + * such as display hotplug and D-notifier events. + */ + rm_acpi_notify(sp, nv, info->type); + nv_kmem_cache_free_stack(sp); + } + else + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_notify: failed to allocate stack\n"); + + /* + * Special case for ACPI_VIDEO_NOTIFY_PROBE event: intentionally return + * NOTIFY_BAD to inform acpi-video to stop generating keypresses for + * this event. + */ + if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { + return NOTIFY_BAD; + } + } + + return NOTIFY_DONE; +} + +void nv_acpi_register_notifier(nv_linux_state_t *nvl) +{ + nvl->acpi_nb.notifier_call = nv_acpi_notify; + register_acpi_notifier(&nvl->acpi_nb); +} + +void nv_acpi_unregister_notifier(nv_linux_state_t *nvl) +{ + unregister_acpi_notifier(&nvl->acpi_nb); +} + +int nv_acpi_init(void) +{ + /* + * This function will register the RM with the Linux + * ACPI subsystem. + */ + int status; + nvidia_stack_t *sp = NULL; + NvU32 acpi_event_config = 0; + NV_STATUS rmStatus; + + status = nv_kmem_cache_alloc_stack(&sp); + if (status != 0) + { + return status; + } + + rmStatus = rm_read_registry_dword(sp, NULL, + NV_REG_REGISTER_FOR_ACPI_EVENTS, &acpi_event_config); + nv_kmem_cache_free_stack(sp); + + if ((rmStatus == NV_OK) && (acpi_event_config == 0)) + return 0; + + if (nv_acpi_driver != NULL) + return -EBUSY; + + rmStatus = os_alloc_mem((void **)&nv_acpi_driver, + sizeof(struct acpi_driver)); + if (rmStatus != NV_OK) + return -ENOMEM; + + memcpy((void *)nv_acpi_driver, (void *)&nv_acpi_driver_template, + sizeof(struct acpi_driver)); + + status = acpi_bus_register_driver(nv_acpi_driver); + if (status < 0) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_init: acpi_bus_register_driver() failed (%d)!\n", status); + os_free_mem(nv_acpi_driver); + nv_acpi_driver = NULL; + } + + return status; +} + +int nv_acpi_uninit(void) +{ + nvidia_stack_t *sp = NULL; + NvU32 acpi_event_config = 0; + NV_STATUS rmStatus; + int rc; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + rmStatus = rm_read_registry_dword(sp, NULL, + NV_REG_REGISTER_FOR_ACPI_EVENTS, &acpi_event_config); + nv_kmem_cache_free_stack(sp); + + if ((rmStatus == NV_OK) && (acpi_event_config == 0)) + return 0; + + if (nv_acpi_driver == NULL) + return -ENXIO; + + acpi_bus_unregister_driver(nv_acpi_driver); + os_free_mem(nv_acpi_driver); + + nv_acpi_driver = NULL; + + return 0; +} + +static int nv_acpi_add(struct acpi_device *device) +{ + /* + * This function will cause RM to initialize the things it needs for acpi interaction + * on the display device. + */ + int status = -1; + NV_STATUS rmStatus = NV_ERR_GENERIC; + nv_acpi_t *pNvAcpiObject = NULL; + union acpi_object control_argument_0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list control_argument_list = { 0, NULL }; + nvidia_stack_t *sp = NULL; + struct list_head *node, *next; + unsigned long long device_id = 0; + int device_counter = 0; + + status = nv_kmem_cache_alloc_stack(&sp); + if (status != 0) + { + return status; + } + + // allocate data structure we need + rmStatus = os_alloc_mem((void **) &pNvAcpiObject, sizeof(nv_acpi_t)); + if (rmStatus != NV_OK) + { + nv_kmem_cache_free_stack(sp); + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_add: failed to allocate ACPI device management data!\n"); + return -ENOMEM; + } + + os_mem_set((void *)pNvAcpiObject, 0, sizeof(nv_acpi_t)); + + device->driver_data = pNvAcpiObject; + pNvAcpiObject->device = device; + + pNvAcpiObject->sp = sp; + + // grab handles to all the important nodes representing devices + + list_for_each_safe(node, next, &device->children) + { + struct acpi_device *dev = + list_entry(node, struct acpi_device, node); + + if (!dev) + continue; + + if (device_counter == NV_MAXNUM_DISPLAY_DEVICES) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_add: Total number of devices cannot exceed %d\n", + NV_MAXNUM_DISPLAY_DEVICES); + break; + } + + status = + acpi_evaluate_integer(dev->handle, "_ADR", NULL, &device_id); + if (ACPI_FAILURE(status)) + /* Couldnt query device_id for this device */ + continue; + + device_id = (device_id & 0xffff); + + if ((device_id != 0x100) && /* Not a known CRT device-id */ + (device_id != 0x200) && /* Not a known TV device-id */ + (device_id != 0x0110) && (device_id != 0x0118) && (device_id != 0x0400) && /* Not an LCD*/ + (device_id != 0x0111) && (device_id != 0x0120) && (device_id != 0x0300)) /* Not a known DVI device-id */ + { + /* This isnt a known device Id. + Do default switching on this system. */ + pNvAcpiObject->default_display_mask = 1; + break; + } + + pNvAcpiObject->pNvVideo[device_counter].dev_id = device_id; + pNvAcpiObject->pNvVideo[device_counter].dev_handle = dev->handle; + + device_counter++; + + } + + // arg 0, bits 1:0, 0 = enable events + control_argument_0.integer.type = ACPI_TYPE_INTEGER; + control_argument_0.integer.value = 0x0; + + // listify it + control_argument_list.count = 1; + control_argument_list.pointer = &control_argument_0; + + // _DOS method takes 1 argument and returns nothing + status = acpi_evaluate_object(device->handle, "_DOS", &control_argument_list, NULL); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_add: failed to enable display switch events (%d)!\n", status); + } + + status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + nv_acpi_event, pNvAcpiObject); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_add: failed to install event notification handler (%d)!\n", status); + } + else + { + try_module_get(THIS_MODULE); + pNvAcpiObject->notify_handler_installed = 1; + } + + return 0; +} + +static int nv_acpi_remove(struct acpi_device *device) +{ + /* + * This function will cause RM to relinquish control of the VGA ACPI device. + */ + acpi_status status; + union acpi_object control_argument_0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list control_argument_list = { 0, NULL }; + nv_acpi_t *pNvAcpiObject = device->driver_data; + + + pNvAcpiObject->default_display_mask = 0; + + // arg 0, bits 1:0, 1 = disable events + control_argument_0.integer.type = ACPI_TYPE_INTEGER; + control_argument_0.integer.value = 0x1; + + // listify it + control_argument_list.count = 1; + control_argument_list.pointer = &control_argument_0; + + // _DOS method takes 1 argument and returns nothing + status = acpi_evaluate_object(device->handle, "_DOS", &control_argument_list, NULL); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_remove: failed to disable display switch events (%d)!\n", status); + } + + if (pNvAcpiObject->notify_handler_installed) + { + // remove event notifier + status = acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, nv_acpi_event); + } + + if (pNvAcpiObject->notify_handler_installed && + ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_remove: failed to remove event notification handler (%d)!\n", status); + } + else + { + nv_kmem_cache_free_stack(pNvAcpiObject->sp); + os_free_mem((void *)pNvAcpiObject); + module_put(THIS_MODULE); + device->driver_data = NULL; + } + + return status; +} + +/* + * The ACPI specification defines IDs for various ACPI video + * extension events like display switch events, AC/battery + * events, docking events, etc.. + * Whenever an ACPI event is received by the corresponding + * event handler installed within the core NVIDIA driver, the + * code can verify the event ID before processing it. + */ +#define ACPI_DISPLAY_DEVICE_CHANGE_EVENT 0x80 +#define NVIF_NOTIFY_DISPLAY_DETECT 0xCB +#define NVIF_DISPLAY_DEVICE_CHANGE_EVENT NVIF_NOTIFY_DISPLAY_DETECT +static void nv_acpi_event(acpi_handle handle, u32 event_type, void *data) +{ + /* + * This function will handle acpi events from the linux kernel, used + * to detect notifications from the VGA device. + */ + nv_acpi_t *pNvAcpiObject = data; + u32 event_val = 0; + unsigned long long state; + int status = 0; + int device_counter = 0; + + if (event_type == NVIF_DISPLAY_DEVICE_CHANGE_EVENT) + { + /* We are getting NVIF events on this machine. We arent putting a very + extensive handling in-place to communicate back with SBIOS, know + the next enabled devices, and then do the switch. We just + pass a default display switch event, so that X-driver decides + the switching policy itself. */ + rm_system_event(pNvAcpiObject->sp, NV_SYSTEM_ACPI_DISPLAY_SWITCH_EVENT, 0); + } + if (event_type == ACPI_DISPLAY_DEVICE_CHANGE_EVENT) + { + if (pNvAcpiObject->default_display_mask != 1) + { + while ((device_counter < NV_MAXNUM_DISPLAY_DEVICES) && + (pNvAcpiObject->pNvVideo[device_counter].dev_handle)) + { + acpi_handle dev_handle = pNvAcpiObject->pNvVideo[device_counter].dev_handle; + int dev_id = pNvAcpiObject->pNvVideo[device_counter].dev_id; + + status = acpi_evaluate_integer(dev_handle, + "_DGS", + NULL, + &state); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_event: failed to query _DGS method for display device 0x%x\n", + dev_id); + } + else if (state) + { + /* Check if the device is a CRT ...*/ + if (dev_id == 0x0100) + { + event_val |= NV_HOTKEY_STATUS_DISPLAY_ENABLE_CRT; + } + /* device-id for a TV */ + else if (dev_id == 0x0200) + { + event_val |= NV_HOTKEY_STATUS_DISPLAY_ENABLE_TV; + } + else if ((dev_id == 0x0110) || /* device id for internal LCD */ + (dev_id == 0x0118) || /* alternate ACPI ID for the + internal LCD */ + (dev_id == 0x0400)) /* ACPI spec 3.0 specified + device id for a internal LCD*/ + { + event_val |= NV_HOTKEY_STATUS_DISPLAY_ENABLE_LCD; + } + else if ((dev_id == 0x0111) || /* the set + of possible device-ids for a DFP */ + (dev_id == 0x0120) || + (dev_id == 0x0300)) /* ACPI spec 3.0 specified + device id for non-LVDS DFP */ + { + event_val |= NV_HOTKEY_STATUS_DISPLAY_ENABLE_DFP; + } + } + device_counter++; + } + } + + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_event: Event-type 0x%x, Event-val 0x%x\n", + event_type, event_val); + + rm_system_event(pNvAcpiObject->sp, NV_SYSTEM_ACPI_DISPLAY_SWITCH_EVENT, event_val); + } + + // no unsubscription or re-enable necessary. Once DOD has been set, we are go. + // once we are subscribed to ACPI events, we don't have to re-subscribe unless + // unsubscribe. +} + +NV_STATUS NV_API_CALL nv_acpi_get_powersource(NvU32 *ac_plugged) +{ + unsigned long long val; + int status = 0; + + if (!ac_plugged) + return NV_ERR_INVALID_ARGUMENT; + + if (!psr_device_handle) + return NV_ERR_INVALID_ARGUMENT; + + // Check whether or not AC power is plugged in + status = acpi_evaluate_integer(psr_device_handle, "_PSR", NULL, &val); + if (ACPI_FAILURE(status)) + return NV_ERR_GENERIC; + + // AC Power Source Plug State + // - 0x0 unplugged + // - 0x1 plugged + *ac_plugged = (val == 0x1); + + return NV_OK; +} + +#define ACPI_POWER_SOURCE_CHANGE_EVENT 0x80 +static void nv_acpi_powersource_hotplug_event(acpi_handle handle, u32 event_type, void *data) +{ + /* + * This function will handle acpi events from the linux kernel, used + * to detect notifications from Power Source device + */ + nv_acpi_t *pNvAcpiObject = data; + u32 ac_plugged = 0; + + if (event_type == ACPI_POWER_SOURCE_CHANGE_EVENT) + { + if (nv_acpi_get_powersource(&ac_plugged) != NV_OK) + return; + + rm_system_event(pNvAcpiObject->sp, NV_SYSTEM_ACPI_BATTERY_POWER_EVENT, !ac_plugged); + } +} +/* + * End of ACPI event handler functions + */ + +/* Do the necessary allocations and install notifier "handler" on the device-node "device" */ +static nv_acpi_t* nv_install_notifier(struct acpi_device *device, acpi_notify_handler handler) +{ + nvidia_stack_t *sp = NULL; + nv_acpi_t *pNvAcpiObject = NULL; + NV_STATUS rmStatus = NV_ERR_GENERIC; + acpi_status status = -1; + + if (!device) + return NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NULL; + } + + rmStatus = os_alloc_mem((void **) &pNvAcpiObject, sizeof(nv_acpi_t)); + if (rmStatus != NV_OK) + goto return_error; + + os_mem_set((void *)pNvAcpiObject, 0, sizeof(nv_acpi_t)); + + // store a device reference in our object + pNvAcpiObject->device = device; + pNvAcpiObject->sp = sp; + + status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, + handler, pNvAcpiObject); + if (!ACPI_FAILURE(status)) + { + pNvAcpiObject->notify_handler_installed = 1; + + return pNvAcpiObject; + } + +return_error: + nv_kmem_cache_free_stack(sp); + if (pNvAcpiObject) + os_free_mem((void *)pNvAcpiObject); + + return NULL; +} + +/* Tear-down and remove whatever nv_install_notifier did */ +static void nv_uninstall_notifier(nv_acpi_t *pNvAcpiObject, acpi_notify_handler handler) +{ + acpi_status status; + + if (pNvAcpiObject && pNvAcpiObject->notify_handler_installed) + { + status = acpi_remove_notify_handler(pNvAcpiObject->device->handle, ACPI_DEVICE_NOTIFY, handler); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_methods_uninit: failed to remove event notification handler (%d)!\n", status); + } + else + { + nv_kmem_cache_free_stack(pNvAcpiObject->sp); + os_free_mem((void *)pNvAcpiObject); + } + } + + return; +} + +/* + * acpi methods init function. + * check if the NVIF, _DSM and WMMX methods are present in the acpi namespace. + * store NVIF, _DSM and WMMX handle if found. + */ + +void NV_API_CALL nv_acpi_methods_init(NvU32 *handlesPresent) +{ +#if defined(NV_ACPI_BUS_GET_DEVICE_PRESENT) + struct acpi_device *device = NULL; + int retVal = -1; +#endif + + + if (!handlesPresent) // Caller passed us invalid pointer. + return; + + + *handlesPresent = 0; + + NV_ACPI_WALK_NAMESPACE(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, nv_acpi_find_methods, NULL, NULL); + +#if defined(NV_ACPI_BUS_GET_DEVICE_PRESENT) + if (nvif_handle) + { + *handlesPresent = NV_ACPI_NVIF_HANDLE_PRESENT; + do + { + if (!nvif_parent_gpu_handle) /* unknown error */ + break; + + retVal = acpi_bus_get_device(nvif_parent_gpu_handle, &device); + + if (ACPI_FAILURE(retVal) || !device) + break; + + if (device->driver_data) + { + nvif_parent_gpu_handle = NULL; + break; /* Someone else has already populated this device + nodes' structures. So nothing more to be done */ + } + + device->driver_data = nv_install_notifier(device, nv_acpi_event); + + + if (!device->driver_data) + nvif_parent_gpu_handle = NULL; + + } while (0); + } +#endif + + if (wmmx_handle) + *handlesPresent = *handlesPresent | NV_ACPI_WMMX_HANDLE_PRESENT; + +#if defined(NV_ACPI_BUS_GET_DEVICE_PRESENT) + if (psr_handle) + { + // Since _PSR is not a per-GPU construct we only need to register a + // single notifier for the _PSR event. Skip registration for subsequent + // devices + if (psr_nv_acpi_object == NULL) + { + retVal = acpi_bus_get_device(psr_device_handle, &device); + + if (!(ACPI_FAILURE(retVal) || !device)) + { + psr_nv_acpi_object = nv_install_notifier(device, nv_acpi_powersource_hotplug_event); + } + } + } +#endif + + return; +} + +acpi_status nv_acpi_find_methods( + acpi_handle handle, + u32 nest_level, + void *dummy1, + void **dummy2 +) +{ + acpi_handle method_handle; + + if (!acpi_get_handle(handle, "NVIF", &method_handle)) + { + nvif_handle = method_handle; + nvif_parent_gpu_handle = handle; + } + + if (!acpi_get_handle(handle, "WMMX", &method_handle)) + { + wmmx_handle = method_handle; + } + + if (!acpi_get_handle(handle, "_PSR", &method_handle)) + { + psr_handle = method_handle; + psr_device_handle = handle; + } + + return 0; +} + +void NV_API_CALL nv_acpi_methods_uninit(void) +{ + struct acpi_device *device = NULL; + + nvif_handle = NULL; + wmmx_handle = NULL; + + if (psr_nv_acpi_object != NULL) + { + nv_uninstall_notifier(psr_nv_acpi_object, nv_acpi_powersource_hotplug_event); + + psr_handle = NULL; + psr_device_handle = NULL; + psr_nv_acpi_object = NULL; + } + + if (nvif_parent_gpu_handle == NULL) + return; + +#if defined(NV_ACPI_BUS_GET_DEVICE_PRESENT) + acpi_bus_get_device(nvif_parent_gpu_handle, &device); + + nv_uninstall_notifier(device->driver_data, nv_acpi_event); +#endif + + device->driver_data = NULL; + nvif_parent_gpu_handle = NULL; + + return; +} + +static NV_STATUS nv_acpi_extract_integer( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + if (acpi_object->type != ACPI_TYPE_INTEGER) + return NV_ERR_INVALID_ARGUMENT; + + if (acpi_object->integer.value & ~0xffffffffULL) + *data_size = sizeof(acpi_object->integer.value); + else + *data_size = sizeof(NvU32); + + if ((buffer_size < sizeof(NvU32)) || + ((buffer_size < sizeof(acpi_object->integer.value)) && + (acpi_object->integer.value & ~0xffffffffULL))) + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + memcpy(buffer, &acpi_object->integer.value, *data_size); + + return NV_OK; +} + +static NV_STATUS nv_acpi_extract_buffer( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + if (acpi_object->type != ACPI_TYPE_BUFFER) + return NV_ERR_INVALID_ARGUMENT; + + *data_size = acpi_object->buffer.length; + + if (buffer_size < acpi_object->buffer.length) + return NV_ERR_BUFFER_TOO_SMALL; + + memcpy(buffer, acpi_object->buffer.pointer, *data_size); + + return NV_OK; +} + +static NV_STATUS nv_acpi_extract_package( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + NV_STATUS status = NV_OK; + NvU32 i, element_size = 0; + + if (acpi_object->type != ACPI_TYPE_PACKAGE) + return NV_ERR_INVALID_ARGUMENT; + + *data_size = 0; + for (i = 0; i < acpi_object->package.count; i++) + { + buffer = ((char *)buffer + element_size); + buffer_size -= element_size; + + status = nv_acpi_extract_object(&acpi_object->package.elements[i], + buffer, buffer_size, &element_size); + if (status != NV_OK) + break; + + *data_size += element_size; + } + + return status; +} + +static NV_STATUS nv_acpi_extract_object( + const union acpi_object *acpi_object, + void *buffer, + NvU32 buffer_size, + NvU32 *data_size +) +{ + NV_STATUS status; + + switch (acpi_object->type) + { + case ACPI_TYPE_INTEGER: + status = nv_acpi_extract_integer(acpi_object, buffer, + buffer_size, data_size); + break; + + case ACPI_TYPE_BUFFER: + status = nv_acpi_extract_buffer(acpi_object, buffer, + buffer_size, data_size); + break; + + case ACPI_TYPE_PACKAGE: + status = nv_acpi_extract_package(acpi_object, buffer, + buffer_size, data_size); + break; + + case ACPI_TYPE_ANY: + /* + * ACPI_TYPE_ANY is used to represent a NULL/Uninitialized object which is objectType 0 + * in the ACPI SPEC. This should not be treated as error. + */ + status = NV_OK; + break; + + default: + status = NV_ERR_NOT_SUPPORTED; + } + + return status; +} + +NV_STATUS NV_API_CALL nv_acpi_method( + NvU32 acpi_method, + NvU32 function, + NvU32 subFunction, + void *inParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *outData, + NvU16 *outDataSize +) +{ + NV_STATUS status; + + switch (acpi_method) + { + case NV_EVAL_ACPI_METHOD_NVIF: + status = nv_acpi_nvif_method(function, + subFunction, + inParams, + inParamSize, + outStatus, + outData, + outDataSize); + break; + + case NV_EVAL_ACPI_METHOD_WMMX: + status = nv_acpi_wmmx_method(function, outData, outDataSize); + break; + + default: + status = NV_ERR_NOT_SUPPORTED; + } + + return status; +} + +/* + * This function executes an NVIF ACPI method. + */ +static NV_STATUS nv_acpi_nvif_method( + NvU32 function, + NvU32 subFunction, + void *inParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *outData, + NvU16 *outDataSize +) +{ + acpi_status status; + struct acpi_object_list input; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *nvif = NULL; + union acpi_object nvif_params[3]; + NvU16 localOutDataSize; + NvU8 localInParams[8]; + + if (!nvif_handle) + return NV_ERR_NOT_SUPPORTED; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_nvif_method: invalid context!\n"); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + nvif_params[0].integer.type = ACPI_TYPE_INTEGER; + nvif_params[0].integer.value = function; + + nvif_params[1].integer.type = ACPI_TYPE_INTEGER; + nvif_params[1].integer.value = subFunction; + + nvif_params[2].buffer.type = ACPI_TYPE_BUFFER; + + if (inParams && (inParamSize > 0)) + { + nvif_params[2].buffer.length = inParamSize; + nvif_params[2].buffer.pointer = inParams; + } + else + { + memset(localInParams, 0, 8); + nvif_params[2].buffer.length = 8; + nvif_params[2].buffer.pointer = localInParams; + } + + input.count = 3; + input.pointer = nvif_params; + + status = acpi_evaluate_object(nvif_handle, NULL, &input, &output); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_nvif_method: failed to get NVIF data, " + "status 0x%x, function 0x%x, subFunction 0x%x!\n", + status, function, subFunction); + return NV_ERR_GENERIC; + } + + nvif = output.pointer; + if (nvif && (nvif->type == ACPI_TYPE_BUFFER) && (nvif->buffer.length >= 4)) + { + if (outStatus) + { + *outStatus = nvif->buffer.pointer[3] << 24 | + nvif->buffer.pointer[2] << 16 | + nvif->buffer.pointer[1] << 8 | + nvif->buffer.pointer[0]; + } + + if (outData && outDataSize) + { + localOutDataSize = nvif->buffer.length - 4; + if (localOutDataSize <= *outDataSize) + { + *outDataSize = NV_MIN(*outDataSize, localOutDataSize); + memcpy(outData, &nvif->buffer.pointer[4], *outDataSize); + } + else + { + *outDataSize = localOutDataSize; + kfree(output.pointer); + return NV_ERR_BUFFER_TOO_SMALL; + } + } + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_nvif_method: NVIF data invalid, function 0x%x, " + "subFunction 0x%x!\n", function, subFunction); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + + kfree(output.pointer); + return NV_OK; +} + +#define MAX_INPUT_PARAM_SIZE 1024 +/* + * This function executes a _DSM ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_dsm_method( + nv_state_t *nv, + NvU8 *pAcpiDsmGuid, + NvU32 acpiDsmRev, + NvBool acpiNvpcfDsmFunction, + NvU32 acpiDsmSubFunction, + void *pInParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *pOutData, + NvU16 *pSize +) +{ + NV_STATUS status = NV_ERR_OPERATING_SYSTEM; + acpi_status acpi_status; + struct acpi_object_list input; + union acpi_object *dsm = NULL; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object dsm_params[4]; + NvU8 *argument3 = NULL; + NvU32 data_size; + acpi_handle dev_handle = NULL; + + if (!nv_acpi_get_device_handle(nv, &dev_handle)) + return NV_ERR_NOT_SUPPORTED; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if ((!pInParams) || (inParamSize > MAX_INPUT_PARAM_SIZE) || (!pOutData) || (!pSize)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: invalid argument(s)!\n", __FUNCTION__); + return NV_ERR_INVALID_ARGUMENT; + } + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_INFO, + "NVRM: %s: invalid argument(s)!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + status = os_alloc_mem((void **)&argument3, inParamSize); + if (status != NV_OK) + return status; + + // + // dsm_params[0].buffer.pointer and dsm_params[1].integer.value set in + // switch below based on acpiDsmFunction + // + + dsm_params[0].buffer.type = ACPI_TYPE_BUFFER; + dsm_params[0].buffer.length = 0x10; + dsm_params[0].buffer.pointer = pAcpiDsmGuid; + + dsm_params[1].integer.type = ACPI_TYPE_INTEGER; + dsm_params[1].integer.value = acpiDsmRev; + + dsm_params[2].integer.type = ACPI_TYPE_INTEGER; + dsm_params[2].integer.value = acpiDsmSubFunction; + + dsm_params[3].buffer.type = ACPI_TYPE_BUFFER; + dsm_params[3].buffer.length = inParamSize; + memcpy(argument3, pInParams, dsm_params[3].buffer.length); + dsm_params[3].buffer.pointer = argument3; + + // parameters for dsm calls (GUID, rev, subfunction, data) + input.count = 4; + input.pointer = dsm_params; + + if (acpiNvpcfDsmFunction) + { + // + // acpi_evaluate_object() can operate with either valid object pathname or + // valid object handle. For NVPCF DSM function, use valid pathname as we do + // not have device handle for NVPCF device + // + dev_handle = NULL; + acpi_status = acpi_evaluate_object(dev_handle, "\\_SB.NPCF._DSM", &input, &output); + } + else + { + acpi_status = acpi_evaluate_object(dev_handle, "_DSM", &input, &output); + } + + if (ACPI_FAILURE(acpi_status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed to evaluate _DSM method!\n", __FUNCTION__); + goto exit; + } + + dsm = output.pointer; + if (dsm != NULL) + { + if (outStatus) + { + *outStatus = dsm->buffer.pointer[3] << 24 | + dsm->buffer.pointer[2] << 16 | + dsm->buffer.pointer[1] << 8 | + dsm->buffer.pointer[0]; + } + + status = nv_acpi_extract_object(dsm, pOutData, *pSize, &data_size); + *pSize = data_size; + + kfree(output.pointer); + } + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: DSM data invalid!\n", __FUNCTION__); + } + +exit: + os_free_mem(argument3); + return status; +} + +/* + * This function executes a _DDC ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_ddc_method( + nv_state_t *nv, + void *pEdidBuffer, + NvU32 *pSize, + NvBool bReadMultiBlock +) +{ + acpi_status status; + struct acpi_device *device = NULL; + union acpi_object *ddc = NULL; + struct list_head *node, *next; + NvU32 i, largestEdidSize; + acpi_handle dev_handle = NULL; + acpi_handle lcd_dev_handle = NULL; + + if (!nv_acpi_get_device_handle(nv, &dev_handle)) + return NV_ERR_NOT_SUPPORTED; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + +#if defined(NV_ACPI_BUS_GET_DEVICE_PRESENT) + status = acpi_bus_get_device(dev_handle, &device); +#else + return NV_ERR_NOT_SUPPORTED; +#endif + + if (ACPI_FAILURE(status) || !device) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: invalid context!\n", + __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + list_for_each_safe(node, next, &device->children) + { + unsigned long long device_id = 0; + struct acpi_device *dev = + list_entry(node, struct acpi_device, node); + + if (!dev) + continue; + + status = acpi_evaluate_integer(dev->handle, "_ADR", NULL, &device_id); + if (ACPI_FAILURE(status)) + /* Couldnt query device_id for this device */ + continue; + + switch (device_id & 0xffff) { + case 0x0110: + case 0x0118: + case 0x0400: + case 0xA420: + lcd_dev_handle = dev->handle; + nv_printf(NV_DBG_INFO, "NVRM: %s Found LCD: %x\n", + __FUNCTION__, device_id); + break; + default: + break; + } + + if (lcd_dev_handle != NULL) + break; + } + + if (lcd_dev_handle == NULL) + { + nv_printf(NV_DBG_INFO, "NVRM: %s LCD not found\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + + // + // As per ACPI Spec 3.0: + // ARG0 = 0x1 for 128 bytes edid buffer + // ARG0 = 0x2 for 256 bytes edid buffer + // + + largestEdidSize = bReadMultiBlock ? 2 : 1; + + for (i = largestEdidSize; i >= 1; i--) + { + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object ddc_arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list input = { 1, &ddc_arg0 }; + + ddc_arg0.integer.value = i; + status = acpi_evaluate_object(lcd_dev_handle, "_DDC", &input, &output); + if (ACPI_SUCCESS(status)) { + ddc = output.pointer; + break; + } + } + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed status: %08x \n", + __FUNCTION__, + status); + return NV_ERR_GENERIC; + } + else + { + if (ddc && (ddc->type == ACPI_TYPE_BUFFER) && (ddc->buffer.length > 0)) + { + if (ddc->buffer.length <= *pSize) + { + *pSize = NV_MIN(*pSize, ddc->buffer.length); + memcpy(pEdidBuffer, ddc->buffer.pointer, *pSize); + } + else + { + kfree(ddc); + return NV_ERR_BUFFER_TOO_SMALL; + } + } + } + + kfree(ddc); + return NV_OK; +} + +/* + * This function executes a _ROM ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_rom_method( + nv_state_t *nv, + NvU32 *pInData, + NvU32 *pOutData +) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *rom; + union acpi_object rom_arg[2]; + struct acpi_object_list input = { 2, rom_arg }; + acpi_handle dev_handle = NULL; + uint32_t offset, length; + + if (!nv_acpi_get_device_handle(nv, &dev_handle)) + return NV_ERR_NOT_SUPPORTED; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: invalid context!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + offset = pInData[0]; + length = pInData[1]; + + rom_arg[0].type = ACPI_TYPE_INTEGER; + rom_arg[0].integer.value = offset; + rom_arg[1].type = ACPI_TYPE_INTEGER; + rom_arg[1].integer.value = length; + + status = acpi_evaluate_object(dev_handle, "_ROM", &input, &output); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed to evaluate _ROM method!\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + else + { + rom = output.pointer; + + if ((rom != NULL) && (rom->type == ACPI_TYPE_BUFFER) && + (rom->buffer.length >= length)) + { + memcpy(pOutData, rom->buffer.pointer, length); + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: Invalid _ROM data\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + } + + kfree(output.pointer); + return NV_OK; +} + +/* + * This function executes a _DOD ACPI method. + */ +NV_STATUS NV_API_CALL nv_acpi_dod_method( + nv_state_t *nv, + NvU32 *pOutData, + NvU32 *pSize +) +{ + acpi_status status; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *dod; + acpi_handle dev_handle = NULL; + NvU32 i, count = (*pSize / sizeof(NvU32)); + + if (!nv_acpi_get_device_handle(nv, &dev_handle)) + return NV_ERR_NOT_SUPPORTED; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: invalid context!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + status = acpi_evaluate_object(dev_handle, "_DOD", NULL, &output); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: failed to evaluate _DOD method!\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + else + { + dod = output.pointer; + *pSize = 0; + + if ((dod != NULL) && (dod->type == ACPI_TYPE_PACKAGE) && + (dod->package.count <= count)) + { + for (i = 0; i < dod->package.count; i++) + { + if (dod->package.elements[i].type != ACPI_TYPE_INTEGER) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: _DOD entry invalid!\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + + pOutData[i] = dod->package.elements[i].integer.value; + *pSize += sizeof(NvU32); + } + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: _DOD data too large!\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + } + + kfree(output.pointer); + return NV_OK; +} + +/* + * This function executes a WMMX ACPI method. + */ +static NV_STATUS nv_acpi_wmmx_method( + NvU32 arg2, + NvU8 *outData, + NvU16 *outDataSize +) +{ + acpi_status status; + struct acpi_object_list input; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *mmx = NULL; + union acpi_object mmx_params[3]; + + if (!wmmx_handle) + return NV_ERR_NOT_SUPPORTED; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_wmmx_method: invalid context!\n"); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + /* argument 0 and argument 1 are not used in WMMX method, passing 0 */ + + mmx_params[0].integer.type = ACPI_TYPE_INTEGER; + mmx_params[0].integer.value = 0; + + mmx_params[1].integer.type = ACPI_TYPE_INTEGER; + mmx_params[1].integer.value = 0; + + mmx_params[2].integer.type = ACPI_TYPE_INTEGER; + mmx_params[2].integer.value = arg2; + + input.count = 3; + input.pointer = mmx_params; + + status = acpi_evaluate_object(wmmx_handle, NULL, &input, &output); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, + "NVRM: nv_acpi_wmmx_method: failed to get WMMX data, " + "status 0x%x!\n", status); + return NV_ERR_GENERIC; + } + + mmx = output.pointer; + if (mmx && (mmx->type == ACPI_TYPE_BUFFER) && (mmx->buffer.length > 0)) + { + if (outData && outDataSize) + { + if (mmx->buffer.length <= *outDataSize) + { + *outDataSize = NV_MIN(*outDataSize, mmx->buffer.length); + memcpy(outData, mmx->buffer.pointer, *outDataSize); + } + else + { + kfree(output.pointer); + return NV_ERR_BUFFER_TOO_SMALL; + } + } + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nv_acpi_wmmx_method: WMMX data invalid.\n"); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + + kfree(output.pointer); + return NV_OK; +} + +NvBool nv_acpi_power_resource_method_present( + struct pci_dev *pdev +) +{ + acpi_handle handle = NULL; + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *object_package, *object_reference; + acpi_status status; + +#if defined(DEVICE_ACPI_HANDLE) + handle = DEVICE_ACPI_HANDLE(&pdev->dev); +#elif defined (ACPI_HANDLE) + handle = ACPI_HANDLE(&pdev->dev); +#endif + + if (!handle) + return NV_FALSE; + + status = acpi_evaluate_object(handle, "_PR3", NULL, &buf); + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO,"NVRM: Failed to evaluate _PR3 object\n"); + return NV_FALSE; + } + + if (!buf.pointer) + { + nv_printf(NV_DBG_INFO, "NVRM: output buffer pointer is null" + " for _PR3 method\n"); + return NV_FALSE; + } + + object_package = buf.pointer; + + /* + * _PR3 object should be of type package and + * it should contain only one reference + */ + if ((object_package->type != ACPI_TYPE_PACKAGE) && + (object_package->package.count != 0x1)) + { + nv_printf(NV_DBG_ERRORS,"NVRM: _PR3 object is not a type 'package'\n"); + return NV_FALSE; + } + + object_reference = object_package->package.elements; + + /* Check for the reference and the actual type of the reference. */ + if ((object_reference->reference.actual_type != ACPI_TYPE_POWER) && + (object_reference->type != ACPI_TYPE_LOCAL_REFERENCE)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: _PR3 object does not contain POWER Reference\n"); + return NV_FALSE; + } + return NV_TRUE; +} + +/* + * This function executes MUX ACPI methods. + */ +NV_STATUS NV_API_CALL nv_acpi_mux_method( + nv_state_t *nv, + NvU32 *pInOut, + NvU32 muxAcpiId, + const char *pMethodName +) +{ + acpi_status status; + struct acpi_device *device = NULL; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *mux = NULL; + union acpi_object mux_arg = { ACPI_TYPE_INTEGER }; + struct acpi_object_list input = { 1, &mux_arg }; + acpi_handle dev_handle = NULL; + acpi_handle mux_dev_handle = NULL; + unsigned long long device_id = 0; + struct list_head *node, *next; + + if ((strcmp(pMethodName, "MXDS") != 0) + && (strcmp(pMethodName, "MXDM") != 0)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: Unsupported ACPI method %s\n", + __FUNCTION__, pMethodName); + return NV_ERR_NOT_SUPPORTED; + } + else + { + nv_printf(NV_DBG_INFO, "NVRM: %s: Call for %s ACPI method \n", + __FUNCTION__, pMethodName); + } + + if (!nv_acpi_get_device_handle(nv, &dev_handle)) + return NV_ERR_NOT_SUPPORTED; + + if (!dev_handle) + return NV_ERR_INVALID_ARGUMENT; + +#if defined(NV_ACPI_BUS_GET_DEVICE_PRESENT) + status = acpi_bus_get_device(dev_handle, &device); +#else + return NV_ERR_NOT_SUPPORTED; +#endif + + + if (ACPI_FAILURE(status) || !device) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV_MAY_SLEEP()) + { +#if defined(DEBUG) + nv_printf(NV_DBG_ERRORS, "NVRM: %s: invalid context!\n", __FUNCTION__); +#endif + return NV_ERR_NOT_SUPPORTED; + } + + list_for_each_safe(node, next, &device->children) + { + struct acpi_device *dev = list_entry(node, struct acpi_device, node); + + if (!dev) + continue; + + status = acpi_evaluate_integer(dev->handle, "_ADR", NULL, &device_id); + if (ACPI_FAILURE(status)) + /* Could not query device_id for this device */ + continue; + + if (device_id == muxAcpiId) + { + mux_dev_handle = dev->handle; + break; + } + } + + if (mux_dev_handle == NULL) + { + nv_printf(NV_DBG_INFO, + "NVRM: %s Mux device handle not found\n", __FUNCTION__); + return NV_ERR_GENERIC; + } + + mux_arg.integer.type = ACPI_TYPE_INTEGER; + mux_arg.integer.value = (NvU64) *pInOut; + + status = acpi_evaluate_object(mux_dev_handle, (acpi_string)pMethodName, + &input, &output); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, "NVRM: %s: Failed to evaluate %s method!\n", + __FUNCTION__, pMethodName); + return NV_ERR_GENERIC; + } + else + { + mux = output.pointer; + + if (mux && (mux->type == ACPI_TYPE_INTEGER)) + { + *pInOut = mux->integer.value; + } + else + { + nv_printf(NV_DBG_INFO, + "NVRM: %s: Invalid MUX data\n", __FUNCTION__); + kfree(output.pointer); + return NV_ERR_GENERIC; + } + } + + kfree(output.pointer); + return NV_OK; +} + +static acpi_status nv_acpi_find_battery_info( + acpi_handle handle, + NvBool bUseBix +) +{ + acpi_status status = AE_OK; + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *object_package; + NvU32 battery_technology_offset; + + status = acpi_evaluate_object(handle, NULL, NULL, &buf); + + if (ACPI_FAILURE(status)) + { + nv_printf(NV_DBG_INFO, "NVRM: Failed to evaluate battery's object\n"); + return AE_OK; + } + + if (!buf.pointer) + { + nv_printf(NV_DBG_INFO, "NVRM: Battery object output buffer is null\n"); + return AE_OK; + } + + object_package = buf.pointer; + + if (object_package->type != ACPI_TYPE_PACKAGE) + { + nv_printf(NV_DBG_INFO, "NVRM: Battery method output is not package\n"); + return AE_OK; + } + + if (bUseBix) + { + battery_technology_offset = BIX_BATTERY_TECHNOLOGY_OFFSET; + } + else + { + battery_technology_offset = BIF_BATTERY_TECHNOLOGY_OFFSET; + } + + /* + * Only checking here for Battery technology type. + * Other fields like Battery Model/Serial number could also be checked but + * driver need to support the case where user has removed battery from the + * system. + * _STA method on the battery device handle couldn't be used due to the same + * reason. + * Hence just cheking if battery technology of slot is rechargable or not. + */ + + if ((object_package->package.elements[battery_technology_offset].type != ACPI_TYPE_INTEGER) || + (object_package->package.elements[battery_technology_offset].integer.value != BATTERY_RECHARGABLE)) + { + return AE_OK; + } + + battery_present = NV_TRUE; + + /* Stop traversing acpi tree. */ + return AE_CTRL_TERMINATE; +} + +static acpi_status nv_acpi_find_battery_device( + acpi_handle handle, + u32 nest_level, + void *dummy1, + void **dummy2 +) +{ + acpi_handle bif_method_handle; + acpi_handle bix_method_handle; + acpi_status status = AE_OK; + + // Find method Battery Information /Extended/ (_BIX or _BIF) and then Battery type. + if (!acpi_get_handle(handle, "_BIX", &bix_method_handle)) + { + status = nv_acpi_find_battery_info(bix_method_handle, NV_TRUE/*bUseBix*/); + } + + if ((battery_present == NV_FALSE) && + !acpi_get_handle(handle, "_BIF", &bif_method_handle)) + { + status = nv_acpi_find_battery_info(bif_method_handle, NV_FALSE/*bUseBix*/); + } + + return status; +} + +NvBool NV_API_CALL nv_acpi_is_battery_present(void) +{ + NV_ACPI_WALK_NAMESPACE(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, + nv_acpi_find_battery_device, NULL, NULL); + + if (battery_present == NV_TRUE) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +#else // NV_LINUX_ACPI_EVENTS_SUPPORTED + +int nv_acpi_init(void) +{ + return 0; +} + +int nv_acpi_uninit(void) +{ + return 0; +} + +void NV_API_CALL nv_acpi_methods_init(NvU32 *handlePresent) +{ + *handlePresent = 0; +} + +void NV_API_CALL nv_acpi_methods_uninit(void) +{ + return; +} + +NV_STATUS NV_API_CALL nv_acpi_method( + NvU32 acpi_method, + NvU32 function, + NvU32 subFunction, + void *inParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *outData, + NvU16 *outDataSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_dsm_method( + nv_state_t *nv, + NvU8 *pAcpiDsmGuid, + NvU32 acpiDsmRev, + NvBool acpiNvpcfDsmFunction, + NvU32 acpiDsmSubFunction, + void *pInParams, + NvU16 inParamSize, + NvU32 *outStatus, + void *pOutData, + NvU16 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_ddc_method( + nv_state_t *nv, + void *pEdidBuffer, + NvU32 *pSize, + NvBool bReadMultiBlock +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_rom_method( + nv_state_t *nv, + NvU32 *pInData, + NvU32 *pOutData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_acpi_dod_method( + nv_state_t *nv, + NvU32 *pOutData, + NvU32 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool nv_acpi_power_resource_method_present( + struct pci_dev *pdev +) +{ + return NV_FALSE; +} + +NV_STATUS NV_API_CALL nv_acpi_get_powersource(NvU32 *ac_plugged) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void nv_acpi_register_notifier(nv_linux_state_t *nvl) +{ + return; +} + +void nv_acpi_unregister_notifier(nv_linux_state_t *nvl) +{ + return; +} + +NV_STATUS NV_API_CALL nv_acpi_mux_method( + nv_state_t *nv, + NvU32 *pInOut, + NvU32 muxAcpiId, + const char *pMethodName +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool NV_API_CALL nv_acpi_is_battery_present(void) +{ + return NV_FALSE; +} +#endif diff --git a/kernel-open/nvidia/nv-caps.c b/kernel-open/nvidia/nv-caps.c new file mode 100644 index 000000000..673f8493e --- /dev/null +++ b/kernel-open/nvidia/nv-caps.c @@ -0,0 +1,821 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-linux.h" +#include "nv-caps.h" +#include "nv-procfs.h" +#include "nv-hash.h" + +extern int NVreg_ModifyDeviceFiles; + +/* sys_close() or __close_fd() */ +#include + +#define NV_CAP_DRV_MINOR_COUNT 8192 + +/* Hash table with 512 buckets */ +#define NV_CAP_HASH_BITS 9 +NV_DECLARE_HASHTABLE(g_nv_cap_hash_table, NV_CAP_HASH_BITS); + +#define NV_CAP_HASH_SIZE NV_HASH_SIZE(g_nv_cap_hash_table) + +#define nv_cap_hash_key(path) (nv_string_hash(path) % NV_CAP_HASH_SIZE) + +typedef struct nv_cap_table_entry +{ + /* name must be the first element */ + const char *name; + int minor; + struct hlist_node hlist; +} nv_cap_table_entry_t; + +#define NV_CAP_NUM_ENTRIES(_table) (sizeof(_table) / sizeof(_table[0])) + +static nv_cap_table_entry_t g_nv_cap_nvlink_table[] = +{ + {"/driver/nvidia-nvlink/capabilities/fabric-mgmt"} +}; + +static nv_cap_table_entry_t g_nv_cap_mig_table[] = +{ + {"/driver/nvidia/capabilities/mig/config"}, + {"/driver/nvidia/capabilities/mig/monitor"} +}; + +#define NV_CAP_MIG_CI_ENTRIES(_gi) \ + {_gi "/ci0/access"}, \ + {_gi "/ci1/access"}, \ + {_gi "/ci2/access"}, \ + {_gi "/ci3/access"}, \ + {_gi "/ci4/access"}, \ + {_gi "/ci5/access"}, \ + {_gi "/ci6/access"}, \ + {_gi "/ci7/access"} + +#define NV_CAP_MIG_GI_ENTRIES(_gpu) \ + {_gpu "/gi0/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi0"), \ + {_gpu "/gi1/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi1"), \ + {_gpu "/gi2/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi2"), \ + {_gpu "/gi3/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi3"), \ + {_gpu "/gi4/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi4"), \ + {_gpu "/gi5/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi5"), \ + {_gpu "/gi6/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi6"), \ + {_gpu "/gi7/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi7"), \ + {_gpu "/gi8/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi8"), \ + {_gpu "/gi9/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi9"), \ + {_gpu "/gi10/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi10"), \ + {_gpu "/gi11/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi11"), \ + {_gpu "/gi12/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi12"), \ + {_gpu "/gi13/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi13"), \ + {_gpu "/gi14/access"}, \ + NV_CAP_MIG_CI_ENTRIES(_gpu "/gi14") + +static nv_cap_table_entry_t g_nv_cap_mig_gpu_table[] = +{ + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu0/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu1/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu2/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu3/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu4/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu5/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu6/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu7/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu8/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu9/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu10/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu11/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu12/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu13/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu14/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu15/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu16/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu17/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu18/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu19/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu20/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu21/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu22/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu23/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu24/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu25/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu26/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu27/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu28/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu29/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu30/mig"), + NV_CAP_MIG_GI_ENTRIES("/driver/nvidia/capabilities/gpu31/mig") +}; + +struct nv_cap +{ + char *path; + char *name; + int minor; + int permissions; + int modify; + struct proc_dir_entry *parent; + struct proc_dir_entry *entry; +}; + +#define NV_CAP_PROCFS_WRITE_BUF_SIZE 128 + +typedef struct nv_cap_file_private +{ + int minor; + int permissions; + int modify; + char buffer[NV_CAP_PROCFS_WRITE_BUF_SIZE]; + off_t offset; +} nv_cap_file_private_t; + +struct +{ + NvBool initialized; + struct cdev cdev; + dev_t devno; +} g_nv_cap_drv; + +#define NV_CAP_PROCFS_DIR "driver/nvidia-caps" +#define NV_CAP_NAME_BUF_SIZE 128 + +static struct proc_dir_entry *nv_cap_procfs_dir; +static struct proc_dir_entry *nv_cap_procfs_nvlink_minors; +static struct proc_dir_entry *nv_cap_procfs_mig_minors; + +static int nv_procfs_read_nvlink_minors(struct seq_file *s, void *v) +{ + int i, count; + char name[NV_CAP_NAME_BUF_SIZE]; + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_nvlink_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_nvlink_table[i].name, + "/driver/nvidia-nvlink/capabilities/%s", name) == 1) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "%s %d\n", name, g_nv_cap_nvlink_table[i].minor); + } + } + + return 0; +} + +static int nv_procfs_read_mig_minors(struct seq_file *s, void *v) +{ + int i, count, gpu; + char name[NV_CAP_NAME_BUF_SIZE]; + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_mig_table[i].name, + "/driver/nvidia/capabilities/mig/%s", name) == 1) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "%s %d\n", name, g_nv_cap_mig_table[i].minor); + } + } + + count = NV_CAP_NUM_ENTRIES(g_nv_cap_mig_gpu_table); + for (i = 0; i < count; i++) + { + if (sscanf(g_nv_cap_mig_gpu_table[i].name, + "/driver/nvidia/capabilities/gpu%d/mig/%s", &gpu, name) == 2) + { + name[sizeof(name) - 1] = '\0'; + seq_printf(s, "gpu%d/%s %d\n", + gpu, name, g_nv_cap_mig_gpu_table[i].minor); + } + } + + return 0; +} + +NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(nvlink_minors, nv_system_pm_lock); + +NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(mig_minors, nv_system_pm_lock); + +static void nv_cap_procfs_exit(void) +{ + if (!nv_cap_procfs_dir) + { + return; + } + + nv_procfs_unregister_all(nv_cap_procfs_dir, nv_cap_procfs_dir); + nv_cap_procfs_dir = NULL; +} + +int nv_cap_procfs_init(void) +{ + nv_cap_procfs_dir = NV_CREATE_PROC_DIR(NV_CAP_PROCFS_DIR, NULL); + if (nv_cap_procfs_dir == NULL) + { + return -EACCES; + } + + nv_cap_procfs_mig_minors = NV_CREATE_PROC_FILE("mig-minors", + nv_cap_procfs_dir, + mig_minors, + NULL); + if (nv_cap_procfs_mig_minors == NULL) + { + goto cleanup; + } + + nv_cap_procfs_nvlink_minors = NV_CREATE_PROC_FILE("nvlink-minors", + nv_cap_procfs_dir, + nvlink_minors, + NULL); + if (nv_cap_procfs_nvlink_minors == NULL) + { + goto cleanup; + } + + return 0; + +cleanup: + nv_cap_procfs_exit(); + + return -EACCES; +} + +static int nv_cap_find_minor(char *path) +{ + unsigned int key = nv_cap_hash_key(path); + nv_cap_table_entry_t *entry; + + nv_hash_for_each_possible(g_nv_cap_hash_table, entry, hlist, key) + { + if (strcmp(path, entry->name) == 0) + { + return entry->minor; + } + } + + return -1; +} + +static void _nv_cap_table_init(nv_cap_table_entry_t *table, int count) +{ + int i; + unsigned int key; + static int minor = 0; + + for (i = 0; i < count; i++) + { + table[i].minor = minor++; + INIT_HLIST_NODE(&table[i].hlist); + key = nv_cap_hash_key(table[i].name); + nv_hash_add(g_nv_cap_hash_table, &table[i].hlist, key); + } + + WARN_ON(minor > NV_CAP_DRV_MINOR_COUNT); +} + +#define nv_cap_table_init(table) \ + _nv_cap_table_init(table, NV_CAP_NUM_ENTRIES(table)) + +static void nv_cap_tables_init(void) +{ + BUILD_BUG_ON(offsetof(nv_cap_table_entry_t, name) != 0); + + nv_hash_init(g_nv_cap_hash_table); + + nv_cap_table_init(g_nv_cap_nvlink_table); + nv_cap_table_init(g_nv_cap_mig_table); + nv_cap_table_init(g_nv_cap_mig_gpu_table); +} + +static ssize_t nv_cap_procfs_write(struct file *file, + const char __user *buffer, + size_t count, loff_t *pos) +{ + nv_cap_file_private_t *private = NULL; + unsigned long bytes_left; + char *proc_buffer; + + private = ((struct seq_file *)file->private_data)->private; + bytes_left = (sizeof(private->buffer) - private->offset - 1); + + if (count == 0) + { + return -EINVAL; + } + + if ((bytes_left == 0) || (count > bytes_left)) + { + return -ENOSPC; + } + + proc_buffer = &private->buffer[private->offset]; + + if (copy_from_user(proc_buffer, buffer, count)) + { + nv_printf(NV_DBG_ERRORS, "nv-caps: failed to copy in proc data!\n"); + return -EFAULT; + } + + private->offset += count; + proc_buffer[count] = '\0'; + + *pos = private->offset; + + return count; +} + +static int nv_cap_procfs_read(struct seq_file *s, void *v) +{ + nv_cap_file_private_t *private = s->private; + + seq_printf(s, "%s: %d\n", "DeviceFileMinor", private->minor); + seq_printf(s, "%s: %d\n", "DeviceFileMode", private->permissions); + seq_printf(s, "%s: %d\n", "DeviceFileModify", private->modify); + + return 0; +} + +static int nv_cap_procfs_open(struct inode *inode, struct file *file) +{ + nv_cap_file_private_t *private = NULL; + int rc; + nv_cap_t *cap = NV_PDE_DATA(inode); + + NV_KMALLOC(private, sizeof(nv_cap_file_private_t)); + if (private == NULL) + { + return -ENOMEM; + } + + private->minor = cap->minor; + private->permissions = cap->permissions; + private->offset = 0; + private->modify = cap->modify; + + rc = single_open(file, nv_cap_procfs_read, private); + if (rc < 0) + { + NV_KFREE(private, sizeof(nv_cap_file_private_t)); + return rc; + } + + rc = nv_down_read_interruptible(&nv_system_pm_lock); + if (rc < 0) + { + single_release(inode, file); + NV_KFREE(private, sizeof(nv_cap_file_private_t)); + } + + return rc; +} + +static int nv_cap_procfs_release(struct inode *inode, struct file *file) +{ + struct seq_file *s = file->private_data; + nv_cap_file_private_t *private = NULL; + char *buffer; + int modify; + nv_cap_t *cap = NV_PDE_DATA(inode); + + if (s != NULL) + { + private = s->private; + } + + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + if (private != NULL) + { + buffer = private->buffer; + + if (private->offset != 0) + { + if (sscanf(buffer, "DeviceFileModify: %d", &modify) == 1) + { + cap->modify = modify; + } + } + + NV_KFREE(private, sizeof(nv_cap_file_private_t)); + } + + /* + * All open files using the proc entry will be invalidated + * if the entry is removed. + */ + file->private_data = NULL; + + return 0; +} + +static nv_proc_ops_t g_nv_cap_procfs_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_cap_procfs_open, + .NV_PROC_OPS_RELEASE = nv_cap_procfs_release, + .NV_PROC_OPS_WRITE = nv_cap_procfs_write, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_LSEEK = seq_lseek, +}; + +/* forward declaration of g_nv_cap_drv_fops */ +static struct file_operations g_nv_cap_drv_fops; + +int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd) +{ + struct file *file; + int dup_fd; + struct inode *inode = NULL; + dev_t rdev = 0; + struct files_struct *files = current->files; + struct fdtable *fdt; + + if (cap == NULL) + { + return -1; + } + + file = fget(fd); + if (file == NULL) + { + return -1; + } + + inode = NV_FILE_INODE(file); + if (inode == NULL) + { + goto err; + } + + /* Make sure the fd belongs to the nv-cap-drv */ + if (file->f_op != &g_nv_cap_drv_fops) + { + goto err; + } + + /* Make sure the fd has the expected capability */ + rdev = inode->i_rdev; + if (MINOR(rdev) != cap->minor) + { + goto err; + } + + dup_fd = NV_GET_UNUSED_FD_FLAGS(O_CLOEXEC); + if (dup_fd < 0) + { + dup_fd = NV_GET_UNUSED_FD(); + if (dup_fd < 0) + { + goto err; + } + + /* + * Set CLOEXEC before installing the FD. + * + * If fork() happens in between, the opened unused FD will have + * a NULL struct file associated with it, which is okay. + * + * The only well known bug here is the race with dup(2), which is + * already documented in the kernel, see fd_install()'s description. + */ + + spin_lock(&files->file_lock); + fdt = files_fdtable(files); + NV_SET_CLOSE_ON_EXEC(dup_fd, fdt); + spin_unlock(&files->file_lock); + } + + fd_install(dup_fd, file); + return dup_fd; + +err: + fput(file); + return -1; +} + +void NV_API_CALL nv_cap_close_fd(int fd) +{ + if (fd == -1) + { + return; + } + + /* + * Acquire task_lock as we access current->files explicitly (__close_fd) + * and implicitly (sys_close), and it will race with the exit path. + */ + task_lock(current); + + /* Nothing to do, we are in exit path */ + if (current->files == NULL) + { + task_unlock(current); + return; + } + +/* + * From v4.17-rc1 (to v5.10.8) kernels have stopped exporting sys_close(fd) + * and started exporting __close_fd, as of this commit: + * 2018-04-02 2ca2a09d6215 ("fs: add ksys_close() wrapper; remove in-kernel + * calls to sys_close()") + * Kernels v5.11-rc1 onwards have stopped exporting __close_fd, and started + * exporting close_fd, as of this commit: + * 2020-12-20 8760c909f54a ("file: Rename __close_fd to close_fd and remove + * the files parameter") + */ +#if NV_IS_EXPORT_SYMBOL_PRESENT_close_fd + close_fd(fd); +#elif NV_IS_EXPORT_SYMBOL_PRESENT___close_fd + __close_fd(current->files, fd); +#else + sys_close(fd); +#endif + + task_unlock(current); +} + +static nv_cap_t* nv_cap_alloc(nv_cap_t *parent_cap, const char *name) +{ + nv_cap_t *cap; + int len; + + if (parent_cap == NULL || name == NULL) + { + return NULL; + } + + NV_KMALLOC(cap, sizeof(nv_cap_t)); + if (cap == NULL) + { + return NULL; + } + + len = strlen(name) + strlen(parent_cap->path) + 2; + NV_KMALLOC(cap->path, len); + if (cap->path == NULL) + { + NV_KFREE(cap, sizeof(nv_cap_t)); + return NULL; + } + + strcpy(cap->path, parent_cap->path); + strcat(cap->path, "/"); + strcat(cap->path, name); + + len = strlen(name) + 1; + NV_KMALLOC(cap->name, len); + if (cap->name == NULL) + { + NV_KFREE(cap->path, strlen(cap->path) + 1); + NV_KFREE(cap, sizeof(nv_cap_t)); + return NULL; + } + + strcpy(cap->name, name); + + cap->minor = -1; + cap->modify = NVreg_ModifyDeviceFiles; + + return cap; +} + +static void nv_cap_free(nv_cap_t *cap) +{ + if (cap == NULL) + { + return; + } + + NV_KFREE(cap->path, strlen(cap->path) + 1); + NV_KFREE(cap->name, strlen(cap->name) + 1); + NV_KFREE(cap, sizeof(nv_cap_t)); +} + +nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, + const char *name, int mode) +{ + nv_cap_t *cap = NULL; + int minor; + + cap = nv_cap_alloc(parent_cap, name); + if (cap == NULL) + { + return NULL; + } + + cap->parent = parent_cap->entry; + cap->permissions = mode; + + mode = (S_IFREG | S_IRUGO); + + minor = nv_cap_find_minor(cap->path); + if (minor < 0) + { + nv_cap_free(cap); + return NULL; + } + + cap->minor = minor; + + cap->entry = proc_create_data(name, mode, parent_cap->entry, + &g_nv_cap_procfs_fops, (void*)cap); + if (cap->entry == NULL) + { + nv_cap_free(cap); + return NULL; + } + + return cap; +} + +nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, + const char *name, int mode) +{ + nv_cap_t *cap = NULL; + + cap = nv_cap_alloc(parent_cap, name); + if (cap == NULL) + { + return NULL; + } + + cap->parent = parent_cap->entry; + cap->permissions = mode; + cap->minor = -1; + + mode = (S_IFDIR | S_IRUGO | S_IXUGO); + + cap->entry = NV_PROC_MKDIR_MODE(name, mode, parent_cap->entry); + if (cap->entry == NULL) + { + nv_cap_free(cap); + return NULL; + } + + return cap; +} + +nv_cap_t* NV_API_CALL nv_cap_init(const char *path) +{ + nv_cap_t parent_cap; + nv_cap_t *cap; + int mode; + char *name = NULL; + char dir[] = "/capabilities"; + + if (path == NULL) + { + return NULL; + } + + NV_KMALLOC(name, (strlen(path) + strlen(dir)) + 1); + if (name == NULL) + { + return NULL; + } + + strcpy(name, path); + strcat(name, dir); + parent_cap.entry = NULL; + parent_cap.path = ""; + parent_cap.name = ""; + mode = S_IRUGO | S_IXUGO; + cap = nv_cap_create_dir_entry(&parent_cap, name, mode); + + NV_KFREE(name, strlen(name) + 1); + return cap; +} + +void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap) +{ + if (WARN_ON(cap == NULL)) + { + return; + } + + remove_proc_entry(cap->name, cap->parent); + nv_cap_free(cap); +} + +static int nv_cap_drv_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int nv_cap_drv_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static struct file_operations g_nv_cap_drv_fops = +{ + .owner = THIS_MODULE, + .open = nv_cap_drv_open, + .release = nv_cap_drv_release +}; + +int NV_API_CALL nv_cap_drv_init(void) +{ + int rc; + + nv_cap_tables_init(); + + if (g_nv_cap_drv.initialized) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv is already initialized.\n"); + return -EBUSY; + } + + rc = alloc_chrdev_region(&g_nv_cap_drv.devno, + 0, + NV_CAP_DRV_MINOR_COUNT, + "nvidia-caps"); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev region.\n"); + return rc; + } + + cdev_init(&g_nv_cap_drv.cdev, &g_nv_cap_drv_fops); + + g_nv_cap_drv.cdev.owner = THIS_MODULE; + + rc = cdev_add(&g_nv_cap_drv.cdev, g_nv_cap_drv.devno, + NV_CAP_DRV_MINOR_COUNT); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv failed to create cdev.\n"); + goto cdev_add_fail; + } + + rc = nv_cap_procfs_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "nv-caps-drv: unable to init proc\n"); + goto proc_init_fail; + } + + g_nv_cap_drv.initialized = NV_TRUE; + + return 0; + +proc_init_fail: + cdev_del(&g_nv_cap_drv.cdev); + +cdev_add_fail: + unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT); + + return rc; +} + +void NV_API_CALL nv_cap_drv_exit(void) +{ + if (!g_nv_cap_drv.initialized) + { + return; + } + + nv_cap_procfs_exit(); + + cdev_del(&g_nv_cap_drv.cdev); + + unregister_chrdev_region(g_nv_cap_drv.devno, NV_CAP_DRV_MINOR_COUNT); + + g_nv_cap_drv.initialized = NV_FALSE; +} diff --git a/kernel-open/nvidia/nv-cray.c b/kernel-open/nvidia/nv-cray.c new file mode 100644 index 000000000..ad7f1f53c --- /dev/null +++ b/kernel-open/nvidia/nv-cray.c @@ -0,0 +1,217 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(CONFIG_CRAY_XT) +enum { + NV_FORMAT_STATE_ORDINARY, + NV_FORMAT_STATE_INTRODUCTION, + NV_FORMAT_STATE_FLAGS, + NV_FORMAT_STATE_FIELD_WIDTH, + NV_FORMAT_STATE_PRECISION, + NV_FORMAT_STATE_LENGTH_MODIFIER, + NV_FORMAT_STATE_CONVERSION_SPECIFIER +}; + +enum { + NV_LENGTH_MODIFIER_NONE, + NV_LENGTH_MODIFIER_CHAR, + NV_LENGTH_MODIFIER_SHORT_INT, + NV_LENGTH_MODIFIER_LONG_INT, + NV_LENGTH_MODIFIER_LONG_LONG_INT +}; + +#define NV_IS_FLAG(c) \ + ((c) == '#' || (c) == '0' || (c) == '-' || (c) == ' ' || (c) == '+') +#define NV_IS_LENGTH_MODIFIER(c) \ + ((c) == 'h' || (c) == 'l' || (c) == 'L' || (c) == 'q' || (c) == 'j' || \ + (c) == 'z' || (c) == 't') +#define NV_IS_CONVERSION_SPECIFIER(c) \ + ((c) == 'd' || (c) == 'i' || (c) == 'o' || (c) == 'u' || (c) == 'x' || \ + (c) == 'X' || (c) == 'e' || (c) == 'E' || (c) == 'f' || (c) == 'F' || \ + (c) == 'g' || (c) == 'G' || (c) == 'a' || (c) == 'A' || (c) == 'c' || \ + (c) == 's' || (c) == 'p') + +#define NV_MAX_NUM_INFO_MMRS 6 + +NV_STATUS nvos_forward_error_to_cray( + struct pci_dev *dev, + NvU32 error_number, + const char *format, + va_list ap +) +{ + NvU32 num_info_mmrs; + NvU64 x = 0, info_mmrs[NV_MAX_NUM_INFO_MMRS]; + int state = NV_FORMAT_STATE_ORDINARY; + int modifier = NV_LENGTH_MODIFIER_NONE; + NvU32 i, n = 0, m = 0; + + memset(info_mmrs, 0, sizeof(info_mmrs)); + while (*format != '\0') + { + switch (state) + { + case NV_FORMAT_STATE_ORDINARY: + if (*format == '%') + state = NV_FORMAT_STATE_INTRODUCTION; + break; + case NV_FORMAT_STATE_INTRODUCTION: + if (*format == '%') + { + state = NV_FORMAT_STATE_ORDINARY; + break; + } + case NV_FORMAT_STATE_FLAGS: + if (NV_IS_FLAG(*format)) + { + state = NV_FORMAT_STATE_FLAGS; + break; + } + else if (*format == '*') + { + state = NV_FORMAT_STATE_FIELD_WIDTH; + break; + } + case NV_FORMAT_STATE_FIELD_WIDTH: + if ((*format >= '0') && (*format <= '9')) + { + state = NV_FORMAT_STATE_FIELD_WIDTH; + break; + } + else if (*format == '.') + { + state = NV_FORMAT_STATE_PRECISION; + break; + } + case NV_FORMAT_STATE_PRECISION: + if ((*format >= '0') && (*format <= '9')) + { + state = NV_FORMAT_STATE_PRECISION; + break; + } + else if (NV_IS_LENGTH_MODIFIER(*format)) + { + state = NV_FORMAT_STATE_LENGTH_MODIFIER; + break; + } + else if (NV_IS_CONVERSION_SPECIFIER(*format)) + { + state = NV_FORMAT_STATE_CONVERSION_SPECIFIER; + break; + } + case NV_FORMAT_STATE_LENGTH_MODIFIER: + if ((*format == 'h') || (*format == 'l')) + { + state = NV_FORMAT_STATE_LENGTH_MODIFIER; + break; + } + else if (NV_IS_CONVERSION_SPECIFIER(*format)) + { + state = NV_FORMAT_STATE_CONVERSION_SPECIFIER; + break; + } + } + switch (state) + { + case NV_FORMAT_STATE_INTRODUCTION: + modifier = NV_LENGTH_MODIFIER_NONE; + break; + case NV_FORMAT_STATE_LENGTH_MODIFIER: + switch (*format) + { + case 'h': + modifier = (modifier == NV_LENGTH_MODIFIER_NONE) + ? NV_LENGTH_MODIFIER_SHORT_INT + : NV_LENGTH_MODIFIER_CHAR; + break; + case 'l': + modifier = (modifier == NV_LENGTH_MODIFIER_NONE) + ? NV_LENGTH_MODIFIER_LONG_INT + : NV_LENGTH_MODIFIER_LONG_LONG_INT; + break; + case 'q': + modifier = NV_LENGTH_MODIFIER_LONG_LONG_INT; + default: + return NV_ERR_INVALID_ARGUMENT; + } + break; + case NV_FORMAT_STATE_CONVERSION_SPECIFIER: + switch (*format) + { + case 'c': + case 'd': + case 'i': + x = (unsigned int)va_arg(ap, int); + break; + case 'o': + case 'u': + case 'x': + case 'X': + switch (modifier) + { + case NV_LENGTH_MODIFIER_LONG_LONG_INT: + x = va_arg(ap, unsigned long long int); + break; + case NV_LENGTH_MODIFIER_LONG_INT: + x = va_arg(ap, unsigned long int); + break; + case NV_LENGTH_MODIFIER_CHAR: + case NV_LENGTH_MODIFIER_SHORT_INT: + case NV_LENGTH_MODIFIER_NONE: + x = va_arg(ap, unsigned int); + break; + } + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + state = NV_FORMAT_STATE_ORDINARY; + for (i = 0; i < ((modifier == NV_LENGTH_MODIFIER_LONG_LONG_INT) + ? 2 : 1); i++) + { + if (m == NV_MAX_NUM_INFO_MMRS) + return NV_ERR_INSUFFICIENT_RESOURCES; + info_mmrs[m] = ((info_mmrs[m] << 32) | (x & 0xffffffff)); + x >>= 32; + if (++n == 2) + { + m++; + n = 0; + } + } + } + format++; + } + + num_info_mmrs = (m + (n != 0)); + if (num_info_mmrs > 0) + cray_nvidia_report_error(dev, error_number, num_info_mmrs, info_mmrs); + + return NV_OK; +} +#endif diff --git a/kernel-open/nvidia/nv-dma.c b/kernel-open/nvidia/nv-dma.c new file mode 100644 index 000000000..71ac10c94 --- /dev/null +++ b/kernel-open/nvidia/nv-dma.c @@ -0,0 +1,1305 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#define NV_DMA_DEV_PRINTF(debuglevel, dma_dev, format, ... ) \ + nv_printf(debuglevel, "NVRM: %s: " format, \ + (((dma_dev) && ((dma_dev)->dev)) ? dev_name((dma_dev)->dev) : \ + NULL), \ + ## __VA_ARGS__) + +NV_STATUS nv_create_dma_map_scatterlist (nv_dma_map_t *dma_map); +void nv_destroy_dma_map_scatterlist(nv_dma_map_t *dma_map); +NV_STATUS nv_map_dma_map_scatterlist (nv_dma_map_t *dma_map); +void nv_unmap_dma_map_scatterlist (nv_dma_map_t *dma_map); +static void nv_dma_unmap_contig (nv_dma_map_t *dma_map); +static void nv_dma_unmap_scatterlist (nv_dma_map_t *dma_map); + +static inline NvBool nv_dma_is_addressable( + nv_dma_device_t *dma_dev, + NvU64 start, + NvU64 size +) +{ + NvU64 limit = start + size - 1; + + return (start >= dma_dev->addressable_range.start) && + (limit <= dma_dev->addressable_range.limit) && + (limit >= start); +} + +static NV_STATUS nv_dma_map_contig( + nv_dma_device_t *dma_dev, + nv_dma_map_t *dma_map, + NvU64 *va +) +{ +#if defined(NV_DMA_MAP_PAGE_ATTRS_PRESENT) && defined(NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT) + *va = dma_map_page_attrs(dma_map->dev, dma_map->pages[0], 0, + dma_map->page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, + (dma_map->cache_type == NV_MEMORY_UNCACHED) ? + DMA_ATTR_SKIP_CPU_SYNC : 0); +#else + *va = dma_map_page(dma_map->dev, dma_map->pages[0], 0, + dma_map->page_count * PAGE_SIZE, DMA_BIDIRECTIONAL); +#endif + if (dma_mapping_error(dma_map->dev, *va)) + { + return NV_ERR_OPERATING_SYSTEM; + } + + dma_map->mapping.contig.dma_addr = *va; + + if (!nv_dma_is_addressable(dma_dev, *va, dma_map->page_count * PAGE_SIZE)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA address not in addressable range of device " + "(0x%llx-0x%llx, 0x%llx-0x%llx)\n", + *va, *va + (dma_map->page_count * PAGE_SIZE - 1), + dma_dev->addressable_range.start, + dma_dev->addressable_range.limit); + nv_dma_unmap_contig(dma_map); + return NV_ERR_INVALID_ADDRESS; + } + + return NV_OK; +} + +static void nv_dma_unmap_contig(nv_dma_map_t *dma_map) +{ +#if defined(NV_DMA_MAP_PAGE_ATTRS_PRESENT) && defined(NV_DMA_ATTR_SKIP_CPU_SYNC_PRESENT) + dma_unmap_page_attrs(dma_map->dev, dma_map->mapping.contig.dma_addr, + dma_map->page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, + (dma_map->cache_type == NV_MEMORY_UNCACHED) ? + DMA_ATTR_SKIP_CPU_SYNC : 0); +#else + dma_unmap_page(dma_map->dev, dma_map->mapping.contig.dma_addr, + dma_map->page_count * PAGE_SIZE, DMA_BIDIRECTIONAL); +#endif +} + +static void nv_fill_scatterlist +( + struct scatterlist *sgl, + struct page **pages, + unsigned int page_count +) +{ + unsigned int i; + struct scatterlist *sg; +#if defined(for_each_sg) + for_each_sg(sgl, sg, page_count, i) + { + sg_set_page(sg, pages[i], PAGE_SIZE, 0); + } +#else + for (i = 0; i < page_count; i++) + { + sg = &(sgl)[i]; + sg->page = pages[i]; + sg->length = PAGE_SIZE; + sg->offset = 0; + } +#endif +} + +NV_STATUS nv_create_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + /* + * We need to split our mapping into at most 4GB - PAGE_SIZE chunks. + * The Linux kernel stores the length (and offset) of a scatter-gather + * segment as an unsigned int, so it will overflow if we try to do + * anything larger. + */ + NV_STATUS status; + nv_dma_submap_t *submap; + NvU32 i; + NvU64 allocated_size = 0; + NvU64 num_submaps = dma_map->page_count + NV_DMA_SUBMAP_MAX_PAGES - 1; + NvU64 total_size = dma_map->page_count << PAGE_SHIFT; + + /* + * This turns into 64-bit division, which the ARMv7 kernel doesn't provide + * implicitly. Instead, we need to use the platform's do_div() to perform + * the division. + */ + do_div(num_submaps, NV_DMA_SUBMAP_MAX_PAGES); + + WARN_ON(NvU64_HI32(num_submaps) != 0); + + if (dma_map->import_sgt && (num_submaps != 1)) + { + return -EINVAL; + } + + dma_map->mapping.discontig.submap_count = NvU64_LO32(num_submaps); + + status = os_alloc_mem((void **)&dma_map->mapping.discontig.submaps, + sizeof(nv_dma_submap_t) * dma_map->mapping.discontig.submap_count); + if (status != NV_OK) + { + return status; + } + + os_mem_set((void *)dma_map->mapping.discontig.submaps, 0, + sizeof(nv_dma_submap_t) * dma_map->mapping.discontig.submap_count); + + /* If we have an imported SGT, just use that directly. */ + if (dma_map->import_sgt) + { + dma_map->mapping.discontig.submaps[0].page_count = dma_map->page_count; + dma_map->mapping.discontig.submaps[0].sgt = *dma_map->import_sgt; + dma_map->mapping.discontig.submaps[0].imported = NV_TRUE; + + return status; + } + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + NvU64 submap_size = NV_MIN(NV_DMA_SUBMAP_MAX_PAGES << PAGE_SHIFT, + total_size - allocated_size); + + submap->page_count = (NvU32)(submap_size >> PAGE_SHIFT); + + status = NV_ALLOC_DMA_SUBMAP_SCATTERLIST(dma_map, submap, i); + if (status != NV_OK) + { + submap->page_count = 0; + break; + } + +#if !defined(NV_SG_ALLOC_TABLE_FROM_PAGES_PRESENT) || \ + defined(NV_DOM0_KERNEL_PRESENT) + { + NvU64 page_idx = NV_DMA_SUBMAP_IDX_TO_PAGE_IDX(i); + nv_fill_scatterlist(submap->sgt.sgl, + &dma_map->pages[page_idx], submap->page_count); + } +#endif + + allocated_size += submap_size; + } + + WARN_ON(allocated_size != total_size); + + if (status != NV_OK) + { + nv_destroy_dma_map_scatterlist(dma_map); + } + + return status; +} + +NV_STATUS nv_map_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + NV_STATUS status = NV_OK; + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + /* Imported SGTs will have already been mapped by the exporter. */ + submap->sg_map_count = submap->imported ? + submap->sgt.orig_nents : + dma_map_sg(dma_map->dev, + submap->sgt.sgl, + submap->sgt.orig_nents, + DMA_BIDIRECTIONAL); + if (submap->sg_map_count == 0) + { + status = NV_ERR_OPERATING_SYSTEM; + break; + } + } + + if (status != NV_OK) + { + nv_unmap_dma_map_scatterlist(dma_map); + } + + return status; +} + +void nv_unmap_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + if (submap->sg_map_count == 0) + { + break; + } + + if (submap->imported) + { + /* Imported SGTs will be unmapped by the exporter. */ + continue; + } + + dma_unmap_sg(dma_map->dev, submap->sgt.sgl, + submap->sgt.orig_nents, + DMA_BIDIRECTIONAL); + } +} + +void nv_destroy_dma_map_scatterlist(nv_dma_map_t *dma_map) +{ + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + if ((submap->page_count == 0) || submap->imported) + { + break; + } + + sg_free_table(&submap->sgt); + } + + os_free_mem(dma_map->mapping.discontig.submaps); +} + +void nv_load_dma_map_scatterlist( + nv_dma_map_t *dma_map, + NvU64 *va_array +) +{ + unsigned int i, j; + struct scatterlist *sg; + nv_dma_submap_t *submap; + NvU64 sg_addr, sg_off, sg_len, k, l = 0; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + for_each_sg(submap->sgt.sgl, sg, submap->sg_map_count, j) + { + /* + * It is possible for pci_map_sg() to merge scatterlist entries, so + * make sure we account for that here. + */ + for (sg_addr = sg_dma_address(sg), sg_len = sg_dma_len(sg), + sg_off = 0, k = 0; + (sg_off < sg_len) && (k < submap->page_count); + sg_off += PAGE_SIZE, l++, k++) + { + va_array[l] = sg_addr + sg_off; + } + } + } +} + +static NV_STATUS nv_dma_map_scatterlist( + nv_dma_device_t *dma_dev, + nv_dma_map_t *dma_map, + NvU64 *va_array +) +{ + NV_STATUS status; + NvU64 i; + + status = nv_create_dma_map_scatterlist(dma_map); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate DMA mapping scatterlist!\n"); + return status; + } + + status = nv_map_dma_map_scatterlist(dma_map); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to create a DMA mapping!\n"); + nv_destroy_dma_map_scatterlist(dma_map); + return status; + } + + nv_load_dma_map_scatterlist(dma_map, va_array); + + for (i = 0; i < dma_map->page_count; i++) + { + if (!nv_dma_is_addressable(dma_dev, va_array[i], PAGE_SIZE)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA address not in addressable range of device " + "(0x%llx, 0x%llx-0x%llx)\n", + va_array[i], dma_dev->addressable_range.start, + dma_dev->addressable_range.limit); + nv_dma_unmap_scatterlist(dma_map); + return NV_ERR_INVALID_ADDRESS; + } + } + + return NV_OK; +} + +static void nv_dma_unmap_scatterlist(nv_dma_map_t *dma_map) +{ + nv_unmap_dma_map_scatterlist(dma_map); + nv_destroy_dma_map_scatterlist(dma_map); +} + +static void nv_dma_nvlink_addr_compress +( + nv_dma_device_t *dma_dev, + NvU64 *va_array, + NvU64 page_count, + NvBool contig +) +{ +#if defined(NVCPU_PPC64LE) + NvU64 addr = 0; + NvU64 i; + + /* + * On systems that support NVLink sysmem links, apply the required address + * compression scheme when links are trained. Otherwise check that PCIe and + * NVLink DMA mappings are equivalent as per requirements of Bug 1920398. + */ + if (dma_dev->nvlink) + { + for (i = 0; i < (contig ? 1 : page_count); i++) + { + va_array[i] = nv_compress_nvlink_addr(va_array[i]); + } + + return; + } + + for (i = 0; i < (contig ? 1 : page_count); i++) + { + addr = nv_compress_nvlink_addr(va_array[i]); + if (WARN_ONCE(va_array[i] != addr, + "unexpected DMA address compression (0x%llx, 0x%llx)\n", + va_array[i], addr)) + { + break; + } + } +#endif +} + +static void nv_dma_nvlink_addr_decompress +( + nv_dma_device_t *dma_dev, + NvU64 *va_array, + NvU64 page_count, + NvBool contig +) +{ +#if defined(NVCPU_PPC64LE) + NvU64 i; + + if (dma_dev->nvlink) + { + for (i = 0; i < (contig ? 1 : page_count); i++) + { + va_array[i] = nv_expand_nvlink_addr(va_array[i]); + } + } +#endif +} + +NV_STATUS NV_API_CALL nv_dma_map_sgt( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + NvU32 cache_type, + void **priv +) +{ + NV_STATUS status; + nv_dma_map_t *dma_map = NULL; + + if (priv == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (page_count > os_get_num_phys_pages()) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA mapping request too large!\n"); + return NV_ERR_INVALID_REQUEST; + } + + status = os_alloc_mem((void **)&dma_map, sizeof(nv_dma_map_t)); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate nv_dma_map_t!\n"); + return status; + } + + dma_map->dev = dma_dev->dev; + dma_map->pages = NULL; + dma_map->import_sgt = (struct sg_table *) *priv; + dma_map->page_count = page_count; + dma_map->contiguous = NV_FALSE; + dma_map->cache_type = cache_type; + + dma_map->mapping.discontig.submap_count = 0; + status = nv_dma_map_scatterlist(dma_dev, dma_map, va_array); + + if (status != NV_OK) + { + os_free_mem(dma_map); + } + else + { + *priv = dma_map; + nv_dma_nvlink_addr_compress(dma_dev, va_array, dma_map->page_count, + dma_map->contiguous); + } + + return status; +} + +NV_STATUS NV_API_CALL nv_dma_unmap_sgt( + nv_dma_device_t *dma_dev, + void **priv +) +{ + nv_dma_map_t *dma_map; + + if (priv == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + dma_map = *priv; + + *priv = NULL; + + nv_dma_unmap_scatterlist(dma_map); + + os_free_mem(dma_map); + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_dma_map_pages( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + NvBool contig, + NvU32 cache_type, + void **priv +) +{ + NV_STATUS status; + nv_dma_map_t *dma_map = NULL; + + if (priv == NULL) + { + /* + * IOMMU path has not been implemented yet to handle + * anything except a nv_dma_map_t as the priv argument. + */ + return NV_ERR_NOT_SUPPORTED; + } + + if (page_count > os_get_num_phys_pages()) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA mapping request too large!\n"); + return NV_ERR_INVALID_REQUEST; + } + + status = os_alloc_mem((void **)&dma_map, sizeof(nv_dma_map_t)); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate nv_dma_map_t!\n"); + return status; + } + + dma_map->dev = dma_dev->dev; + dma_map->pages = *priv; + dma_map->import_sgt = NULL; + dma_map->page_count = page_count; + dma_map->contiguous = contig; + dma_map->cache_type = cache_type; + + if (dma_map->page_count > 1 && !dma_map->contiguous) + { + dma_map->mapping.discontig.submap_count = 0; + status = nv_dma_map_scatterlist(dma_dev, dma_map, va_array); + } + else + { + /* + * Force single-page mappings to be contiguous to avoid scatterlist + * overhead. + */ + dma_map->contiguous = NV_TRUE; + + status = nv_dma_map_contig(dma_dev, dma_map, va_array); + } + + if (status != NV_OK) + { + os_free_mem(dma_map); + } + else + { + *priv = dma_map; + nv_dma_nvlink_addr_compress(dma_dev, va_array, dma_map->page_count, + dma_map->contiguous); + } + + return status; +} + +NV_STATUS NV_API_CALL nv_dma_unmap_pages( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + void **priv +) +{ + nv_dma_map_t *dma_map; + + if (priv == NULL) + { + /* + * IOMMU path has not been implemented yet to handle + * anything except a nv_dma_map_t as the priv argument. + */ + return NV_ERR_NOT_SUPPORTED; + } + + dma_map = *priv; + + if (page_count > os_get_num_phys_pages()) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "DMA unmapping request too large!\n"); + return NV_ERR_INVALID_REQUEST; + } + + if (page_count != dma_map->page_count) + { + NV_DMA_DEV_PRINTF(NV_DBG_WARNINGS, dma_dev, + "Requested to DMA unmap %llu pages, but there are %llu " + "in the mapping\n", page_count, dma_map->page_count); + return NV_ERR_INVALID_REQUEST; + } + + *priv = dma_map->pages; + + if (dma_map->contiguous) + { + nv_dma_unmap_contig(dma_map); + } + else + { + nv_dma_unmap_scatterlist(dma_map); + } + + os_free_mem(dma_map); + + return NV_OK; +} + +/* + * Wrappers used for DMA-remapping an nv_alloc_t during transition to more + * generic interfaces. + */ +NV_STATUS NV_API_CALL nv_dma_map_alloc +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + NvBool contig, + void **priv +) +{ + NV_STATUS status; + NvU64 i; + nv_alloc_t *at = *priv; + struct page **pages = NULL; + NvU32 cache_type = NV_MEMORY_CACHED; + NvU64 pages_size = sizeof(struct page *) * (contig ? 1 : page_count); + + /* If we have an imported SGT, just use that directly. */ + if (at && at->import_sgt) + { + *priv = at->import_sgt; + status = nv_dma_map_sgt(dma_dev, page_count, va_array, at->cache_type, + priv); + if (status != NV_OK) + { + *priv = at; + } + return status; + } + + /* + * Convert the nv_alloc_t into a struct page * array for + * nv_dma_map_pages(). + */ + status = os_alloc_mem((void **)&pages, pages_size); + if (status != NV_OK) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to allocate page array for DMA mapping!\n"); + return status; + } + + os_mem_set(pages, 0, pages_size); + + if (at != NULL) + { + WARN_ON(page_count != at->num_pages); + + if (at->flags.user) + { + pages[0] = at->user_pages[0]; + if (!contig) + { + for (i = 1; i < page_count; i++) + { + pages[i] = at->user_pages[i]; + } + } + } + else if (at->flags.physical && contig) + { + /* Supplied pages hold physical address */ + pages[0] = pfn_to_page(PFN_DOWN(va_array[0])); + } + cache_type = at->cache_type; + } + + if (pages[0] == NULL) + { + pages[0] = NV_GET_PAGE_STRUCT(va_array[0]); + if (!contig) + { + for (i = 1; i < page_count; i++) + { + pages[i] = NV_GET_PAGE_STRUCT(va_array[i]); + } + } + } + + *priv = pages; + status = nv_dma_map_pages(dma_dev, page_count, va_array, contig, cache_type, + priv); + if (status != NV_OK) + { + *priv = at; + os_free_mem(pages); + } + + return status; +} + +NV_STATUS NV_API_CALL nv_dma_unmap_alloc +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va_array, + void **priv +) +{ + NV_STATUS status = NV_OK; + nv_dma_map_t *dma_map; + + if (priv == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + dma_map = *priv; + + if (!dma_map->import_sgt) + { + status = nv_dma_unmap_pages(dma_dev, page_count, va_array, priv); + if (status != NV_OK) + { + /* + * If nv_dma_unmap_pages() fails, we hit an assert condition and the + * priv argument won't be the page array we allocated in + * nv_dma_map_alloc(), so we skip the free here. But note that since + * this is an assert condition it really should never happen. + */ + return status; + } + + /* Free the struct page * array allocated by nv_dma_map_alloc() */ + os_free_mem(*priv); + } else { + status = nv_dma_unmap_sgt(dma_dev, priv); + } + + return status; +} + +static NvBool nv_dma_is_map_resource_implemented +( + nv_dma_device_t *dma_dev +) +{ +#if defined(NV_DMA_MAP_RESOURCE_PRESENT) + const struct dma_map_ops *ops = get_dma_ops(dma_dev->dev); + + if (ops == NULL) + { + /* On pre-5.0 kernels, if dma_map_resource() is present, then we + * assume that ops != NULL. With direct_dma handling swiotlb on 5.0+ + * kernels, ops == NULL. + */ +#if defined(NV_DMA_IS_DIRECT_PRESENT) + return NV_TRUE; +#else + return NV_FALSE; +#endif + } + + return (ops->map_resource != NULL); +#else + return NV_FALSE; +#endif +} + +/* DMA-map a peer PCI device's BAR for peer access. */ +NV_STATUS NV_API_CALL nv_dma_map_peer +( + nv_dma_device_t *dma_dev, + nv_dma_device_t *peer_dma_dev, + NvU8 bar_index, + NvU64 page_count, + NvU64 *va +) +{ + struct pci_dev *peer_pci_dev = to_pci_dev(peer_dma_dev->dev); + struct resource *res; + NV_STATUS status; + + if (peer_pci_dev == NULL) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, peer_dma_dev, + "Not a PCI device"); + return NV_ERR_INVALID_REQUEST; + } + + BUG_ON(bar_index >= NV_GPU_NUM_BARS); + res = &peer_pci_dev->resource[bar_index]; + if (res->start == 0) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, peer_dma_dev, + "Resource %u not valid", + bar_index); + return NV_ERR_INVALID_REQUEST; + } + + if ((*va < res->start) || ((*va + (page_count * PAGE_SIZE)) > res->end)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, peer_dma_dev, + "Mapping requested (start = 0x%llx, page_count = 0x%llx)" + " outside of resource bounds (start = 0x%llx, end = 0x%llx)\n", + *va, page_count, res->start, res->end); + return NV_ERR_INVALID_REQUEST; + } + + if (nv_dma_is_map_resource_implemented(dma_dev)) + { + status = nv_dma_map_mmio(dma_dev, page_count, va); + } + else + { + /* + * Best effort - can't map through the iommu but at least try to + * convert to a bus address. + */ + NvU64 offset = *va - res->start; + *va = nv_pci_bus_address(peer_pci_dev, bar_index) + offset; + status = NV_OK; + } + + return status; +} + +void NV_API_CALL nv_dma_unmap_peer +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 va +) +{ + if (nv_dma_is_map_resource_implemented(dma_dev)) + { + nv_dma_unmap_mmio(dma_dev, page_count, va); + } +} + +/* DMA-map another anonymous device's MMIO region for peer access. */ +NV_STATUS NV_API_CALL nv_dma_map_mmio +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 *va +) +{ +#if defined(NV_DMA_MAP_RESOURCE_PRESENT) + NvU64 mmio_addr; + + BUG_ON(!va); + + mmio_addr = *va; + + *va = dma_map_resource(dma_dev->dev, mmio_addr, page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(dma_dev->dev, *va)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Failed to DMA map MMIO range [0x%llx-0x%llx]\n", + mmio_addr, mmio_addr + page_count * PAGE_SIZE - 1); + return NV_ERR_OPERATING_SYSTEM; + } + + /* + * The default implementation passes through the source address + * without failing. Adjust it using the DMA start address to keep RM's + * validation schemes happy. + */ + if (!nv_dma_is_map_resource_implemented(dma_dev)) + { + *va = *va + dma_dev->addressable_range.start; + } + + nv_dma_nvlink_addr_compress(dma_dev, va, page_count, NV_TRUE); + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL nv_dma_unmap_mmio +( + nv_dma_device_t *dma_dev, + NvU64 page_count, + NvU64 va +) +{ +#if defined(NV_DMA_MAP_RESOURCE_PRESENT) + if (!nv_dma_is_map_resource_implemented(dma_dev)) + { + va = va - dma_dev->addressable_range.start; + } + + nv_dma_nvlink_addr_decompress(dma_dev, &va, page_count, NV_TRUE); + + dma_unmap_resource(dma_dev->dev, va, page_count * PAGE_SIZE, + DMA_BIDIRECTIONAL, 0); +#endif +} + +/* + * Invalidate DMA mapping in CPU caches by "syncing" to the device. + * + * This is only implemented for ARM platforms, since other supported + * platforms are cache coherent and have not required this (we + * explicitly haven't supported SWIOTLB bounce buffering either where + * this would be needed). + */ +void NV_API_CALL nv_dma_cache_invalidate +( + nv_dma_device_t *dma_dev, + void *priv +) +{ +#if defined(NVCPU_AARCH64) + nv_dma_map_t *dma_map = priv; + + if (dma_map->contiguous) + { + dma_sync_single_for_device(dma_dev->dev, + dma_map->mapping.contig.dma_addr, + (size_t) PAGE_SIZE * dma_map->page_count, + DMA_FROM_DEVICE); + } + else + { + nv_dma_submap_t *submap; + NvU64 i; + + NV_FOR_EACH_DMA_SUBMAP(dma_map, submap, i) + { + dma_sync_sg_for_device(dma_dev->dev, + submap->sgt.sgl, + submap->sgt.orig_nents, + DMA_FROM_DEVICE); + } + } +#endif +} + +/* Enable DMA-mapping over NVLink */ +void NV_API_CALL nv_dma_enable_nvlink +( + nv_dma_device_t *dma_dev +) +{ + dma_dev->nvlink = NV_TRUE; +} + +#if defined(NV_LINUX_DMA_BUF_H_PRESENT) && \ + defined(NV_DRM_AVAILABLE) && defined(NV_DRM_DRM_GEM_H_PRESENT) + +/* + * drm_gem_object_{get/put}() added by commit + * e6b62714e87c8811d5564b6a0738dcde63a51774 (2017-02-28) and + * drm_gem_object_{reference/unreference}() removed by commit + * 3e70fd160cf0b1945225eaa08dd2cb8544f21cb8 (2018-11-15). + */ + +static inline void +nv_dma_gem_object_unreference_unlocked(struct drm_gem_object *gem) +{ +#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT) + +#if defined(NV_DRM_GEM_OBJECT_PUT_UNLOCK_PRESENT) + drm_gem_object_put_unlocked(gem); +#else + drm_gem_object_put(gem); +#endif + +#else + drm_gem_object_unreference_unlocked(gem); +#endif +} + +static inline void +nv_dma_gem_object_reference(struct drm_gem_object *gem) +{ +#if defined(NV_DRM_GEM_OBJECT_GET_PRESENT) + drm_gem_object_get(gem); +#else + drm_gem_object_reference(gem); +#endif +} + +NV_STATUS NV_API_CALL nv_dma_import_sgt +( + nv_dma_device_t *dma_dev, + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ + if ((dma_dev == NULL) || + (sgt == NULL) || + (gem == NULL)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Import arguments are NULL!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // Prevent the kernel module controlling GEM from being unloaded + if (!try_module_get(gem->dev->driver->fops->owner)) + { + NV_DMA_DEV_PRINTF(NV_DBG_ERRORS, dma_dev, + "Couldn't reference the GEM object's owner!\n"); + return NV_ERR_INVALID_DEVICE; + } + + // Do nothing with SGT, it is already mapped and pinned by the exporter + + nv_dma_gem_object_reference(gem); + + return NV_OK; +} + +void NV_API_CALL nv_dma_release_sgt +( + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ + if (gem == NULL) + { + return; + } + + // Do nothing with SGT, it will be unmapped and unpinned by the exporter + WARN_ON(sgt == NULL); + + nv_dma_gem_object_unreference_unlocked(gem); + + module_put(gem->dev->driver->fops->owner); +} + +#else + +NV_STATUS NV_API_CALL nv_dma_import_sgt +( + nv_dma_device_t *dma_dev, + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void NV_API_CALL nv_dma_release_sgt +( + struct sg_table *sgt, + struct drm_gem_object *gem +) +{ +} +#endif /* NV_LINUX_DMA_BUF_H_PRESENT && NV_DRM_AVAILABLE && NV_DRM_DRM_GEM_H_PRESENT */ + +#if defined(NV_LINUX_DMA_BUF_H_PRESENT) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#endif /* NV_LINUX_DMA_BUF_H_PRESENT */ + +#ifndef IMPORT_DMABUF_FUNCTIONS_DEFINED + +NV_STATUS NV_API_CALL nv_dma_import_dma_buf +( + nv_dma_device_t *dma_dev, + struct dma_buf *dma_buf, + NvU32 *size, + void **user_pages, + struct sg_table **sgt, + nv_dma_buf_t **import_priv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_dma_import_from_fd +( + nv_dma_device_t *dma_dev, + NvS32 fd, + NvU32 *size, + void **user_pages, + struct sg_table **sgt, + nv_dma_buf_t **import_priv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void NV_API_CALL nv_dma_release_dma_buf +( + void *user_pages, + nv_dma_buf_t *import_priv +) +{ +} +#endif /* !IMPORT_DMABUF_FUNCTIONS_DEFINED */ diff --git a/kernel-open/nvidia/nv-dmabuf.c b/kernel-open/nvidia/nv-dmabuf.c new file mode 100644 index 000000000..1d2073b8c --- /dev/null +++ b/kernel-open/nvidia/nv-dmabuf.c @@ -0,0 +1,896 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include +#include "nv-dmabuf.h" + + + +#if defined(CONFIG_DMA_SHARED_BUFFER) +typedef struct nv_dma_buf_mem_handle +{ + NvHandle h_memory; + NvU64 offset; + NvU64 size; + NvU64 bar1_va; +} nv_dma_buf_mem_handle_t; + +typedef struct nv_dma_buf_file_private +{ + nv_state_t *nv; + NvHandle h_client; + NvHandle h_device; + NvHandle h_subdevice; + NvU32 total_objects; + NvU32 num_objects; + NvU64 total_size; + NvU64 attached_size; + struct mutex lock; + nv_dma_buf_mem_handle_t *handles; + NvU64 bar1_va_ref_count; + void *mig_info; +} nv_dma_buf_file_private_t; + +static void +nv_dma_buf_free_file_private( + nv_dma_buf_file_private_t *priv +) +{ + if (priv == NULL) + { + return; + } + + if (priv->handles != NULL) + { + NV_KFREE(priv->handles, priv->total_objects * sizeof(priv->handles[0])); + priv->handles = NULL; + } + + mutex_destroy(&priv->lock); + + NV_KFREE(priv, sizeof(nv_dma_buf_file_private_t)); +} + +static nv_dma_buf_file_private_t* +nv_dma_buf_alloc_file_private( + NvU32 num_handles +) +{ + nv_dma_buf_file_private_t *priv = NULL; + + NV_KMALLOC(priv, sizeof(nv_dma_buf_file_private_t)); + if (priv == NULL) + { + return NULL; + } + + memset(priv, 0, sizeof(nv_dma_buf_file_private_t)); + + mutex_init(&priv->lock); + + NV_KMALLOC(priv->handles, num_handles * sizeof(priv->handles[0])); + if (priv->handles == NULL) + { + goto failed; + } + + memset(priv->handles, 0, num_handles * sizeof(priv->handles[0])); + + return priv; + +failed: + nv_dma_buf_free_file_private(priv); + + return NULL; +} + +// Must be called with RMAPI lock and GPU lock taken +static void +nv_dma_buf_undup_mem_handles_unlocked( + nvidia_stack_t *sp, + NvU32 index, + NvU32 num_objects, + nv_dma_buf_file_private_t *priv +) +{ + NvU32 i = 0; + + for (i = index; i < num_objects; i++) + { + if (priv->handles[i].h_memory == 0) + { + continue; + } + + rm_dma_buf_undup_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[i].h_memory); + + priv->attached_size -= priv->handles[i].size; + priv->handles[i].h_memory = 0; + priv->handles[i].offset = 0; + priv->handles[i].size = 0; + priv->num_objects--; + } +} + +static void +nv_dma_buf_undup_mem_handles( + nvidia_stack_t *sp, + NvU32 index, + NvU32 num_objects, + nv_dma_buf_file_private_t *priv +) +{ + NV_STATUS status; + + status = rm_acquire_api_lock(sp); + if (WARN_ON(status != NV_OK)) + { + return; + } + + status = rm_acquire_all_gpus_lock(sp); + if (WARN_ON(status != NV_OK)) + { + goto unlock_api_lock; + } + + nv_dma_buf_undup_mem_handles_unlocked(sp, index, num_objects, priv); + + rm_release_all_gpus_lock(sp); + +unlock_api_lock: + rm_release_api_lock(sp); +} + +static NV_STATUS +nv_dma_buf_dup_mem_handles( + nvidia_stack_t *sp, + nv_dma_buf_file_private_t *priv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ + NV_STATUS status = NV_OK; + NvU32 index = params->index; + NvU32 count = 0; + NvU32 i = 0; + + status = rm_acquire_api_lock(sp); + if (status != NV_OK) + { + return status; + } + + status = rm_acquire_gpu_lock(sp, priv->nv); + if (status != NV_OK) + { + goto unlock_api_lock; + } + + for (i = 0; i < params->numObjects; i++) + { + NvHandle h_memory_duped = 0; + + if (priv->handles[index].h_memory != 0) + { + status = NV_ERR_IN_USE; + goto failed; + } + + if (params->sizes[i] > priv->total_size - priv->attached_size) + { + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + status = rm_dma_buf_dup_mem_handle(sp, priv->nv, + params->hClient, + priv->h_client, + priv->h_device, + priv->h_subdevice, + priv->mig_info, + params->handles[i], + params->offsets[i], + params->sizes[i], + &h_memory_duped); + if (status != NV_OK) + { + goto failed; + } + + priv->attached_size += params->sizes[i]; + priv->handles[index].h_memory = h_memory_duped; + priv->handles[index].offset = params->offsets[i]; + priv->handles[index].size = params->sizes[i]; + priv->num_objects++; + index++; + count++; + } + + if ((priv->num_objects == priv->total_objects) && + (priv->attached_size != priv->total_size)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + rm_release_gpu_lock(sp, priv->nv); + + rm_release_api_lock(sp); + + return NV_OK; + +failed: + nv_dma_buf_undup_mem_handles_unlocked(sp, params->index, count, priv); + + rm_release_gpu_lock(sp, priv->nv); + +unlock_api_lock: + rm_release_api_lock(sp); + + return status; +} + +// Must be called with RMAPI lock and GPU lock taken +static void +nv_dma_buf_unmap_unlocked( + nvidia_stack_t *sp, + nv_dma_device_t *peer_dma_dev, + nv_dma_buf_file_private_t *priv, + struct sg_table *sgt, + NvU32 count +) +{ + NV_STATUS status; + NvU32 i; + NvU64 dma_len; + NvU64 dma_addr; + NvU64 bar1_va; + NvBool bar1_unmap_needed; + struct scatterlist *sg = NULL; + + bar1_unmap_needed = (priv->bar1_va_ref_count == 0); + + for_each_sg(sgt->sgl, sg, count, i) + { + dma_addr = sg_dma_address(sg); + dma_len = priv->handles[i].size; + bar1_va = priv->handles[i].bar1_va; + + WARN_ON(sg_dma_len(sg) != priv->handles[i].size); + + nv_dma_unmap_peer(peer_dma_dev, (dma_len / os_page_size), dma_addr); + + if (bar1_unmap_needed) + { + status = rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[i].h_memory, + priv->handles[i].size, + priv->handles[i].bar1_va); + WARN_ON(status != NV_OK); + } + } +} + +static struct sg_table* +nv_dma_buf_map( + struct dma_buf_attachment *attachment, + enum dma_data_direction direction +) +{ + NV_STATUS status; + nvidia_stack_t *sp = NULL; + struct scatterlist *sg = NULL; + struct sg_table *sgt = NULL; + struct dma_buf *buf = attachment->dmabuf; + struct device *dev = attachment->dev; + nv_dma_buf_file_private_t *priv = buf->priv; + nv_dma_device_t peer_dma_dev = {{ 0 }}; + NvBool bar1_map_needed; + NvBool bar1_unmap_needed; + NvU32 count = 0; + NvU32 i = 0; + int rc = 0; + + // + // We support importers that are able to handle MMIO resources + // not backed by struct page. This will need to be revisited + // when dma-buf support for P9 will be added. + // +#if defined(NV_DMA_BUF_HAS_DYNAMIC_ATTACHMENT) && \ + defined(NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER) + if (dma_buf_attachment_is_dynamic(attachment) && + !attachment->peer2peer) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to map dynamic attachment with no P2P support\n"); + return NULL; + } +#endif + + mutex_lock(&priv->lock); + + if (priv->num_objects != priv->total_objects) + { + goto unlock_priv; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + goto unlock_priv; + } + + status = rm_acquire_api_lock(sp); + if (status != NV_OK) + { + goto free_sp; + } + + status = rm_acquire_gpu_lock(sp, priv->nv); + if (status != NV_OK) + { + goto unlock_api_lock; + } + + NV_KMALLOC(sgt, sizeof(struct sg_table)); + if (sgt == NULL) + { + goto unlock_gpu_lock; + } + + memset(sgt, 0, sizeof(struct sg_table)); + + // + // RM currently returns contiguous BAR1, so we create as many + // sg entries as the number of handles being mapped. + // When RM can alloc discontiguous BAR1, this code will need to be revisited. + // + rc = sg_alloc_table(sgt, priv->num_objects, GFP_KERNEL); + if (rc != 0) + { + goto free_sgt; + } + + peer_dma_dev.dev = dev; + peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask; + bar1_map_needed = bar1_unmap_needed = (priv->bar1_va_ref_count == 0); + + for_each_sg(sgt->sgl, sg, priv->num_objects, i) + { + NvU64 dma_addr; + NvU64 dma_len; + + if (bar1_map_needed) + { + status = rm_dma_buf_map_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[i].h_memory, + priv->handles[i].offset, + priv->handles[i].size, + &priv->handles[i].bar1_va); + if (status != NV_OK) + { + goto unmap_handles; + } + } + + dma_addr = priv->handles[i].bar1_va; + dma_len = priv->handles[i].size; + + status = nv_dma_map_peer(&peer_dma_dev, priv->nv->dma_dev, + 0x1, (dma_len / os_page_size), &dma_addr); + if (status != NV_OK) + { + if (bar1_unmap_needed) + { + // Unmap the recently mapped memory handle + (void) rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client, + priv->handles[i].h_memory, + priv->handles[i].size, + priv->handles[i].bar1_va); + } + + // Unmap remaining memory handles + goto unmap_handles; + } + + sg_set_page(sg, NULL, dma_len, 0); + sg_dma_address(sg) = (dma_addr_t)dma_addr; + sg_dma_len(sg) = dma_len; + count++; + } + + priv->bar1_va_ref_count++; + + rm_release_gpu_lock(sp, priv->nv); + + rm_release_api_lock(sp); + + nv_kmem_cache_free_stack(sp); + + mutex_unlock(&priv->lock); + + return sgt; + +unmap_handles: + nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, count); + + sg_free_table(sgt); + +free_sgt: + NV_KFREE(sgt, sizeof(struct sg_table)); + +unlock_gpu_lock: + rm_release_gpu_lock(sp, priv->nv); + +unlock_api_lock: + rm_release_api_lock(sp); + +free_sp: + nv_kmem_cache_free_stack(sp); + +unlock_priv: + mutex_unlock(&priv->lock); + + return NULL; +} + +static void +nv_dma_buf_unmap( + struct dma_buf_attachment *attachment, + struct sg_table *sgt, + enum dma_data_direction direction +) +{ + NV_STATUS status; + struct dma_buf *buf = attachment->dmabuf; + struct device *dev = attachment->dev; + nvidia_stack_t *sp = NULL; + nv_dma_buf_file_private_t *priv = buf->priv; + nv_dma_device_t peer_dma_dev = {{ 0 }}; + int rc = 0; + + mutex_lock(&priv->lock); + + if (priv->num_objects != priv->total_objects) + { + goto unlock_priv; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (WARN_ON(rc != 0)) + { + goto unlock_priv; + } + + status = rm_acquire_api_lock(sp); + if (WARN_ON(status != NV_OK)) + { + goto free_sp; + } + + status = rm_acquire_gpu_lock(sp, priv->nv); + if (WARN_ON(status != NV_OK)) + { + goto unlock_api_lock; + } + + peer_dma_dev.dev = dev; + peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask; + + priv->bar1_va_ref_count--; + + nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, priv->num_objects); + + sg_free_table(sgt); + + NV_KFREE(sgt, sizeof(struct sg_table)); + + rm_release_gpu_lock(sp, priv->nv); + +unlock_api_lock: + rm_release_api_lock(sp); + +free_sp: + nv_kmem_cache_free_stack(sp); + +unlock_priv: + mutex_unlock(&priv->lock); +} + +static void +nv_dma_buf_release( + struct dma_buf *buf +) +{ + int rc = 0; + nvidia_stack_t *sp = NULL; + nv_dma_buf_file_private_t *priv = buf->priv; + nv_state_t *nv; + + if (priv == NULL) + { + return; + } + + nv = priv->nv; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (WARN_ON(rc != 0)) + { + return; + } + + nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv); + + rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device, + priv->h_subdevice, priv->mig_info); + + nv_dma_buf_free_file_private(priv); + buf->priv = NULL; + + nvidia_dev_put(nv->gpu_id, sp); + + nv_kmem_cache_free_stack(sp); + + return; +} + +static int +nv_dma_buf_mmap( + struct dma_buf *buf, + struct vm_area_struct *vma +) +{ + return -ENOTSUPP; +} + +#if defined(NV_DMA_BUF_OPS_HAS_KMAP) || \ + defined(NV_DMA_BUF_OPS_HAS_MAP) +static void* +nv_dma_buf_kmap_stub( + struct dma_buf *buf, + unsigned long page_num +) +{ + return NULL; +} + +static void +nv_dma_buf_kunmap_stub( + struct dma_buf *buf, + unsigned long page_num, + void *addr +) +{ + return; +} +#endif + +#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC) || \ + defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC) +static void* +nv_dma_buf_kmap_atomic_stub( + struct dma_buf *buf, + unsigned long page_num +) +{ + return NULL; +} + +static void +nv_dma_buf_kunmap_atomic_stub( + struct dma_buf *buf, + unsigned long page_num, + void *addr +) +{ + return; +} +#endif + +// +// Note: Some of the dma-buf operations are mandatory in some kernels. +// So stubs are added to prevent dma_buf_export() failure. +// The actual implementations of these interfaces is not really required +// for the export operation to work. +// +// Same functions are used for kmap*/map* because of this commit: +// f9b67f0014cb: dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic +// +static const struct dma_buf_ops nv_dma_buf_ops = { + .map_dma_buf = nv_dma_buf_map, + .unmap_dma_buf = nv_dma_buf_unmap, + .release = nv_dma_buf_release, + .mmap = nv_dma_buf_mmap, +#if defined(NV_DMA_BUF_OPS_HAS_KMAP) + .kmap = nv_dma_buf_kmap_stub, + .kunmap = nv_dma_buf_kunmap_stub, +#endif +#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC) + .kmap_atomic = nv_dma_buf_kmap_atomic_stub, + .kunmap_atomic = nv_dma_buf_kunmap_atomic_stub, +#endif +#if defined(NV_DMA_BUF_OPS_HAS_MAP) + .map = nv_dma_buf_kmap_stub, + .unmap = nv_dma_buf_kunmap_stub, +#endif +#if defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC) + .map_atomic = nv_dma_buf_kmap_atomic_stub, + .unmap_atomic = nv_dma_buf_kunmap_atomic_stub, +#endif +}; + +static NV_STATUS +nv_dma_buf_create( + nv_state_t *nv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ + int rc = 0; + NV_STATUS status; + nvidia_stack_t *sp = NULL; + struct dma_buf *buf = NULL; + nv_dma_buf_file_private_t *priv = NULL; + NvU32 gpu_id = nv->gpu_id; + + if (!nv->dma_buf_supported) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (params->index > (params->totalObjects - params->numObjects)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + priv = nv_dma_buf_alloc_file_private(params->totalObjects); + if (priv == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate dma-buf private\n"); + return NV_ERR_NO_MEMORY; + } + + priv->total_objects = params->totalObjects; + priv->total_size = params->totalSize; + priv->nv = nv; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + status = NV_ERR_NO_MEMORY; + goto cleanup_priv; + } + + rc = nvidia_dev_get(gpu_id, sp); + if (rc != 0) + { + status = NV_ERR_OPERATING_SYSTEM; + goto cleanup_sp; + } + + status = rm_dma_buf_get_client_and_device(sp, priv->nv, + params->hClient, + &priv->h_client, + &priv->h_device, + &priv->h_subdevice, + &priv->mig_info); + if (status != NV_OK) + { + goto cleanup_device; + } + + status = nv_dma_buf_dup_mem_handles(sp, priv, params); + if (status != NV_OK) + { + goto cleanup_client_and_device; + } + +#if (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 1) + { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &nv_dma_buf_ops; + exp_info.size = params->totalSize; + exp_info.flags = O_RDWR | O_CLOEXEC; + exp_info.priv = priv; + + buf = dma_buf_export(&exp_info); + } +#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 4) + buf = dma_buf_export(priv, &nv_dma_buf_ops, + params->totalSize, O_RDWR | O_CLOEXEC); +#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 5) + buf = dma_buf_export(priv, &nv_dma_buf_ops, + params->totalSize, O_RDWR | O_CLOEXEC, NULL); +#endif + + if (IS_ERR(buf)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to create dma-buf\n"); + + status = NV_ERR_OPERATING_SYSTEM; + + goto cleanup_handles; + } + + nv_kmem_cache_free_stack(sp); + + rc = dma_buf_fd(buf, O_RDWR | O_CLOEXEC); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf file descriptor\n"); + + // + // If dma-buf is successfully created, the dup'd handles + // clean-up should be done by the release callback. + // + dma_buf_put(buf); + + return NV_ERR_OPERATING_SYSTEM; + } + + params->fd = rc; + + return NV_OK; + +cleanup_handles: + nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv); + +cleanup_client_and_device: + rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device, + priv->h_subdevice, priv->mig_info); + +cleanup_device: + nvidia_dev_put(gpu_id, sp); + +cleanup_sp: + nv_kmem_cache_free_stack(sp); + +cleanup_priv: + nv_dma_buf_free_file_private(priv); + + return status; +} + +static NV_STATUS +nv_dma_buf_reuse( + nv_state_t *nv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ + int rc = 0; + NV_STATUS status = NV_OK; + nvidia_stack_t *sp = NULL; + struct dma_buf *buf = NULL; + nv_dma_buf_file_private_t *priv = NULL; + + buf = dma_buf_get(params->fd); + if (IS_ERR(buf)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf\n"); + return NV_ERR_OPERATING_SYSTEM; + } + + priv = buf->priv; + + if (priv == NULL) + { + status = NV_ERR_OPERATING_SYSTEM; + goto cleanup_dmabuf; + } + + rc = mutex_lock_interruptible(&priv->lock); + if (rc != 0) + { + status = NV_ERR_OPERATING_SYSTEM; + goto cleanup_dmabuf; + } + + if (params->index > (priv->total_objects - params->numObjects)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto unlock_priv; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + status = NV_ERR_NO_MEMORY; + goto unlock_priv; + } + + status = nv_dma_buf_dup_mem_handles(sp, priv, params); + if (status != NV_OK) + { + goto cleanup_sp; + } + +cleanup_sp: + nv_kmem_cache_free_stack(sp); + +unlock_priv: + mutex_unlock(&priv->lock); + +cleanup_dmabuf: + dma_buf_put(buf); + + return status; +} +#endif // CONFIG_DMA_SHARED_BUFFER + +NV_STATUS +nv_dma_buf_export( + nv_state_t *nv, + nv_ioctl_export_to_dma_buf_fd_t *params +) +{ +#if defined(CONFIG_DMA_SHARED_BUFFER) + NV_STATUS status; + + if ((params == NULL) || + (params->totalSize == 0) || + (params->numObjects == 0) || + (params->totalObjects == 0) || + (params->numObjects > NV_DMABUF_EXPORT_MAX_HANDLES) || + (params->numObjects > params->totalObjects)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // If fd >= 0, dma-buf already exists with this fd, so get dma-buf from fd. + // If fd == -1, dma-buf is not created yet, so create it and then store + // additional handles. + // + if (params->fd == -1) + { + status = nv_dma_buf_create(nv, params); + } + else if (params->fd >= 0) + { + status = nv_dma_buf_reuse(nv, params); + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + + return status; +#else + return NV_ERR_NOT_SUPPORTED; +#endif // CONFIG_DMA_SHARED_BUFFER +} + + + + + + + + + + + + + diff --git a/kernel-open/nvidia/nv-frontend.c b/kernel-open/nvidia/nv-frontend.c new file mode 100644 index 000000000..3aa684ef8 --- /dev/null +++ b/kernel-open/nvidia/nv-frontend.c @@ -0,0 +1,412 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" +#include "nv-frontend.h" + +#if defined(MODULE_LICENSE) + +MODULE_LICENSE("Dual MIT/GPL"); + + + +#endif +#if defined(MODULE_INFO) +MODULE_INFO(supported, "external"); +#endif +#if defined(MODULE_VERSION) +MODULE_VERSION(NV_VERSION_STRING); +#endif + +#ifdef MODULE_ALIAS_CHARDEV_MAJOR +MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER); +#endif + +/* + * MODULE_IMPORT_NS() is added by commit id 8651ec01daeda + * ("module: add support for symbol namespaces") in 5.4 + */ +#if defined(MODULE_IMPORT_NS) + + +/* + * DMA_BUF namespace is added by commit id 16b0314aa746 + * ("dma-buf: move dma-buf symbols into the DMA_BUF module namespace") in 5.16 + */ +MODULE_IMPORT_NS(DMA_BUF); + + +#endif + +static NvU32 nv_num_instances; + +// lock required to protect table. +struct semaphore nv_module_table_lock; + +// minor number table +nvidia_module_t *nv_minor_num_table[NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX + 1]; + +int nvidia_init_module(void); +void nvidia_exit_module(void); + +/* EXPORTS to Linux Kernel */ + +int nvidia_frontend_open(struct inode *, struct file *); +int nvidia_frontend_close(struct inode *, struct file *); +unsigned int nvidia_frontend_poll(struct file *, poll_table *); +int nvidia_frontend_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +long nvidia_frontend_unlocked_ioctl(struct file *, unsigned int, unsigned long); +long nvidia_frontend_compat_ioctl(struct file *, unsigned int, unsigned long); +int nvidia_frontend_mmap(struct file *, struct vm_area_struct *); + +/* character driver entry points */ +static struct file_operations nv_frontend_fops = { + .owner = THIS_MODULE, + .poll = nvidia_frontend_poll, +#if defined(NV_FILE_OPERATIONS_HAS_IOCTL) + .ioctl = nvidia_frontend_ioctl, +#endif + .unlocked_ioctl = nvidia_frontend_unlocked_ioctl, +#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64 + .compat_ioctl = nvidia_frontend_compat_ioctl, +#endif + .mmap = nvidia_frontend_mmap, + .open = nvidia_frontend_open, + .release = nvidia_frontend_close, +}; + +/* Helper functions */ + +static int add_device(nvidia_module_t *module, nv_linux_state_t *device, NvBool all) +{ + NvU32 i; + int rc = -1; + + // look for free a minor number and assign unique minor number to this device + for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++) + { + if (nv_minor_num_table[i] == NULL) + { + nv_minor_num_table[i] = module; + device->minor_num = i; + if (all == NV_TRUE) + { + device = device->next; + if (device == NULL) + { + rc = 0; + break; + } + } + else + { + rc = 0; + break; + } + } + } + return rc; +} + +static int remove_device(nvidia_module_t *module, nv_linux_state_t *device) +{ + int rc = -1; + + // remove this device from minor_number table + if ((device != NULL) && (nv_minor_num_table[device->minor_num] != NULL)) + { + nv_minor_num_table[device->minor_num] = NULL; + device->minor_num = 0; + rc = 0; + } + return rc; +} + +/* Export functions */ + +int nvidia_register_module(nvidia_module_t *module) +{ + int rc = 0; + NvU32 ctrl_minor_num; + + down(&nv_module_table_lock); + if (module->instance >= NV_MAX_MODULE_INSTANCES) + { + printk("NVRM: NVIDIA module instance %d registration failed.\n", + module->instance); + rc = -EINVAL; + goto done; + } + + ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance; + nv_minor_num_table[ctrl_minor_num] = module; + nv_num_instances++; +done: + up(&nv_module_table_lock); + + return rc; +} +EXPORT_SYMBOL(nvidia_register_module); + +int nvidia_unregister_module(nvidia_module_t *module) +{ + int rc = 0; + NvU32 ctrl_minor_num; + + down(&nv_module_table_lock); + + ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance; + if (nv_minor_num_table[ctrl_minor_num] == NULL) + { + printk("NVRM: NVIDIA module for %d instance does not exist\n", + module->instance); + rc = -1; + } + else + { + nv_minor_num_table[ctrl_minor_num] = NULL; + nv_num_instances--; + } + + up(&nv_module_table_lock); + + return rc; +} +EXPORT_SYMBOL(nvidia_unregister_module); + +int nvidia_frontend_add_device(nvidia_module_t *module, nv_linux_state_t * device) +{ + int rc = -1; + NvU32 ctrl_minor_num; + + down(&nv_module_table_lock); + ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance; + if (nv_minor_num_table[ctrl_minor_num] == NULL) + { + printk("NVRM: NVIDIA module for %d instance does not exist\n", + module->instance); + rc = -1; + } + else + { + rc = add_device(module, device, NV_FALSE); + } + up(&nv_module_table_lock); + + return rc; +} +EXPORT_SYMBOL(nvidia_frontend_add_device); + +int nvidia_frontend_remove_device(nvidia_module_t *module, nv_linux_state_t * device) +{ + int rc = 0; + NvU32 ctrl_minor_num; + + down(&nv_module_table_lock); + ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance; + if (nv_minor_num_table[ctrl_minor_num] == NULL) + { + printk("NVRM: NVIDIA module for %d instance does not exist\n", + module->instance); + rc = -1; + } + else + { + rc = remove_device(module, device); + } + up(&nv_module_table_lock); + + return rc; +} +EXPORT_SYMBOL(nvidia_frontend_remove_device); + +int nvidia_frontend_open( + struct inode *inode, + struct file *file +) +{ + int rc = -ENODEV; + nvidia_module_t *module = NULL; + + NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode); + + down(&nv_module_table_lock); + module = nv_minor_num_table[minor_num]; + + if ((module != NULL) && (module->open != NULL)) + { + // Increment the reference count of module to ensure that module does + // not get unloaded if its corresponding device file is open, for + // example nvidiaN.ko should not get unloaded if /dev/nvidiaN is open. + if (!try_module_get(module->owner)) + { + up(&nv_module_table_lock); + return -ENODEV; + } + rc = module->open(inode, file); + if (rc < 0) + { + module_put(module->owner); + } + } + + up(&nv_module_table_lock); + return rc; +} + +int nvidia_frontend_close( + struct inode *inode, + struct file *file +) +{ + int rc = -ENODEV; + nvidia_module_t *module = NULL; + + NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode); + + module = nv_minor_num_table[minor_num]; + + if ((module != NULL) && (module->close != NULL)) + { + rc = module->close(inode, file); + + // Decrement the reference count of module. + module_put(module->owner); + } + + return rc; +} + +unsigned int nvidia_frontend_poll( + struct file *file, + poll_table *wait +) +{ + unsigned int mask = 0; + struct inode *inode = NV_FILE_INODE(file); + NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode); + nvidia_module_t *module = nv_minor_num_table[minor_num]; + + if ((module != NULL) && (module->poll != NULL)) + mask = module->poll(file, wait); + + return mask; +} + +int nvidia_frontend_ioctl( + struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long i_arg) +{ + int rc = -ENODEV; + nvidia_module_t *module = NULL; + + NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode); + module = nv_minor_num_table[minor_num]; + + if ((module != NULL) && (module->ioctl != NULL)) + rc = module->ioctl(inode, file, cmd, i_arg); + + return rc; +} + +long nvidia_frontend_unlocked_ioctl( + struct file *file, + unsigned int cmd, + unsigned long i_arg +) +{ + return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg); +} + +long nvidia_frontend_compat_ioctl( + struct file *file, + unsigned int cmd, + unsigned long i_arg +) +{ + return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg); +} + +int nvidia_frontend_mmap( + struct file *file, + struct vm_area_struct *vma +) +{ + int rc = -ENODEV; + struct inode *inode = NV_FILE_INODE(file); + NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode); + nvidia_module_t *module = nv_minor_num_table[minor_num]; + + if ((module != NULL) && (module->mmap != NULL)) + rc = module->mmap(file, vma); + + return rc; +} + +static int __init nvidia_frontend_init_module(void) +{ + int status = 0; + + // initialise nvidia module table; + nv_num_instances = 0; + memset(nv_minor_num_table, 0, sizeof(nv_minor_num_table)); + NV_INIT_MUTEX(&nv_module_table_lock); + + status = nvidia_init_module(); + if (status < 0) + { + return status; + } + + // register char device + status = register_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend", &nv_frontend_fops); + if (status < 0) + { + printk("NVRM: register_chrdev() failed!\n"); + nvidia_exit_module(); + } + + return status; +} + +static void __exit nvidia_frontend_exit_module(void) +{ + /* + * If this is the last nvidia_module to be unregistered, cleanup and + * unregister char dev + */ + if (nv_num_instances == 1) + { + unregister_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend"); + } + + nvidia_exit_module(); +} + +module_init(nvidia_frontend_init_module); +module_exit(nvidia_frontend_exit_module); + diff --git a/kernel-open/nvidia/nv-frontend.h b/kernel-open/nvidia/nv-frontend.h new file mode 100644 index 000000000..1ce72a0b0 --- /dev/null +++ b/kernel-open/nvidia/nv-frontend.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_FRONTEND_H_ +#define _NV_FRONTEND_H_ + +#include "nvtypes.h" +#include "nv-linux.h" +#include "nv-register-module.h" + +#define NV_MAX_MODULE_INSTANCES 8 + +#define NV_FRONTEND_MINOR_NUMBER(x) minor((x)->i_rdev) + +#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX 255 +#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN (NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - \ + NV_MAX_MODULE_INSTANCES) + +#define NV_FRONTEND_IS_CONTROL_DEVICE(x) ((x <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX) && \ + (x > NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN)) + +int nvidia_frontend_add_device(nvidia_module_t *, nv_linux_state_t *); +int nvidia_frontend_remove_device(nvidia_module_t *, nv_linux_state_t *); + +extern nvidia_module_t *nv_minor_num_table[]; + +#endif diff --git a/kernel-open/nvidia/nv-i2c.c b/kernel-open/nvidia/nv-i2c.c new file mode 100644 index 000000000..f227d180e --- /dev/null +++ b/kernel-open/nvidia/nv-i2c.c @@ -0,0 +1,552 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + +static int nv_i2c_algo_master_xfer(struct i2c_adapter *adapter, struct i2c_msg msgs[], int num) +{ + nv_state_t *nv = (nv_state_t *)adapter->algo_data; + unsigned int i = 0; + int rc; + NV_STATUS rmStatus = NV_OK; + nvidia_stack_t *sp = NULL; + const unsigned int supported_i2c_flags = I2C_M_RD +#if defined(I2C_M_DMA_SAFE) + | I2C_M_DMA_SAFE +#endif + ; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + rc = -EIO; + + for (i = 0; ((i < (unsigned int)num) && (rmStatus == NV_OK)); i++) + { + if (msgs[i].flags & ~supported_i2c_flags) + { + /* we only support basic I2C reads/writes, reject any other commands */ + rc = -EINVAL; + nv_printf(NV_DBG_ERRORS, "NVRM: Unsupported I2C flags used. (flags:0x%08x)\n", + msgs[i].flags); + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + else + { + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (msgs[i].flags & I2C_M_RD) ? + NV_I2C_CMD_READ : NV_I2C_CMD_WRITE, + (NvU8)(msgs[i].addr & 0x7f), 0, + (NvU32)(msgs[i].len & 0xffffUL), + (NvU8 *)msgs[i].buf); + } + } + + nv_kmem_cache_free_stack(sp); + + return (rmStatus != NV_OK) ? rc : num; +} + +static int nv_i2c_algo_smbus_xfer( + struct i2c_adapter *adapter, + u16 addr, + unsigned short flags, + char read_write, + u8 command, + int size, + union i2c_smbus_data *data +) +{ + nv_state_t *nv = (nv_state_t *)adapter->algo_data; + int rc; + NV_STATUS rmStatus = NV_OK; + nvidia_stack_t *sp = NULL; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + rc = -EIO; + + switch (size) + { + case I2C_SMBUS_QUICK: + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_QUICK_READ : + NV_I2C_CMD_SMBUS_QUICK_WRITE, + (NvU8)(addr & 0x7f), 0, 0, NULL); + break; + + case I2C_SMBUS_BYTE: + if (read_write == I2C_SMBUS_READ) + { + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + NV_I2C_CMD_READ, + (NvU8)(addr & 0x7f), 0, 1, + (NvU8 *)&data->byte); + } + else + { + u8 data = command; + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + NV_I2C_CMD_WRITE, + (NvU8)(addr & 0x7f), 0, 1, + (NvU8 *)&data); + } + break; + + case I2C_SMBUS_BYTE_DATA: + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_READ : + NV_I2C_CMD_SMBUS_WRITE, + (NvU8)(addr & 0x7f), (NvU8)command, 1, + (NvU8 *)&data->byte); + break; + + case I2C_SMBUS_WORD_DATA: + if (read_write != I2C_SMBUS_READ) + { + data->block[1] = (data->word & 0xff); + data->block[2] = (data->word >> 8); + } + + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_READ : + NV_I2C_CMD_SMBUS_WRITE, + (NvU8)(addr & 0x7f), (NvU8)command, 2, + (NvU8 *)&data->block[1]); + + if (read_write == I2C_SMBUS_READ) + { + data->word = ((NvU16)data->block[1]) | + ((NvU16)data->block[2] << 8); + } + break; + + case I2C_SMBUS_BLOCK_DATA: + rmStatus = rm_i2c_transfer(sp, nv, (void *)adapter, + (read_write == I2C_SMBUS_READ) ? + NV_I2C_CMD_SMBUS_BLOCK_READ : + NV_I2C_CMD_SMBUS_BLOCK_WRITE, + (NvU8)(addr & 0x7f), (NvU8)command, + sizeof(data->block), + (NvU8 *)data->block); + break; + default: + rc = -EINVAL; + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + + nv_kmem_cache_free_stack(sp); + + return (rmStatus != NV_OK) ? rc : 0; +} + +static u32 nv_i2c_algo_functionality(struct i2c_adapter *adapter) +{ + nv_state_t *nv = (nv_state_t *)adapter->algo_data; + u32 ret = I2C_FUNC_I2C; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return 0; + } + + if (rm_i2c_is_smbus_capable(sp, nv, adapter)) + { + ret |= (I2C_FUNC_SMBUS_QUICK | + I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA); + } + + nv_kmem_cache_free_stack(sp); + + return ret; +} + +static struct i2c_algorithm nv_i2c_algo = { + .master_xfer = nv_i2c_algo_master_xfer, + .smbus_xfer = nv_i2c_algo_smbus_xfer, + .functionality = nv_i2c_algo_functionality, +}; + +struct i2c_adapter nv_i2c_adapter_prototype = { + .owner = THIS_MODULE, + .algo = &nv_i2c_algo, + .algo_data = NULL, +}; + +void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port) +{ + NV_STATUS rmStatus; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct i2c_adapter *pI2cAdapter = NULL; + int osstatus = 0; + + // get a i2c adapter + rmStatus = os_alloc_mem((void **)&pI2cAdapter,sizeof(struct i2c_adapter)); + + if (rmStatus != NV_OK) + return NULL; + + // fill in with default structure + os_mem_copy(pI2cAdapter, &nv_i2c_adapter_prototype, sizeof(struct i2c_adapter)); + + pI2cAdapter->dev.parent = nvl->dev; + + if (nvl->pci_dev != NULL) + { + snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name), + "NVIDIA i2c adapter %u at %x:%02x.%u", port, nv->pci_info.bus, + nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn)); + } + else + { + snprintf(pI2cAdapter->name, sizeof(pI2cAdapter->name), + "NVIDIA SOC i2c adapter %u", port); + } + + // add our data to the structure + pI2cAdapter->algo_data = (void *)nv; + + // attempt to register with the kernel + osstatus = i2c_add_adapter(pI2cAdapter); + + if (osstatus) + { + // free the memory and NULL the ptr + os_free_mem(pI2cAdapter); + + pI2cAdapter = NULL; + } + + return ((void *)pI2cAdapter); +} + +void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data) +{ + struct i2c_adapter *pI2cAdapter = (struct i2c_adapter *)data; + + if (pI2cAdapter) + { + // release with the OS + i2c_del_adapter(pI2cAdapter); + os_free_mem(pI2cAdapter); + } +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#else // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) + +void NV_API_CALL nv_i2c_del_adapter(nv_state_t *nv, void *data) +{ +} + +void* NV_API_CALL nv_i2c_add_adapter(nv_state_t *nv, NvU32 port) +{ + return NULL; +} + + + + + + + + + + + + + + + + + + + + + + + + + + +#endif // defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) diff --git a/kernel-open/nvidia/nv-ibmnpu.c b/kernel-open/nvidia/nv-ibmnpu.c new file mode 100644 index 000000000..fcf5dc45e --- /dev/null +++ b/kernel-open/nvidia/nv-ibmnpu.c @@ -0,0 +1,448 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * nv-ibmnpu.c - interface with the ibmnpu (IBM NVLink Processing Unit) "module" + */ +#include "nv-linux.h" + +#if defined(NVCPU_PPC64LE) +#include "nv-ibmnpu.h" +#include "nv-rsync.h" + +/* + * Temporary query to get the L1D cache block size directly from the device + * tree for the offline cache flush workaround, since the ppc64_caches symbol + * is unavailable to us. + */ +const NvU32 P9_L1D_CACHE_DEFAULT_BLOCK_SIZE = 0x80; + +#if defined(NV_OF_GET_PROPERTY_PRESENT) +static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void) +{ + const __be32 *block_size_prop; + + /* + * Attempt to look up the block size from device tree. If unavailable, just + * return the default that we see on these systems. + */ + struct device_node *cpu = of_find_node_by_type(NULL, "cpu"); + if (!cpu) + { + return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE; + } + + block_size_prop = of_get_property(cpu, "d-cache-block-size", NULL); + if (!block_size_prop) + { + return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE; + } + + return be32_to_cpu(*block_size_prop); +} +#else +static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void) +{ + return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE; +} +#endif + +/* + * GPU device memory can be exposed to the kernel as NUMA node memory via the + * IBMNPU devices associated with the GPU. The platform firmware will specify + * the parameters of where the memory lives in the system address space via + * firmware properties on the IBMNPU devices. These properties specify what + * memory can be accessed through the IBMNPU device, and the driver can online + * a GPU device's memory into the range accessible by its associated IBMNPU + * devices. + * + * This function calls over to the IBMNPU driver to query the parameters from + * firmware, and validates that the resulting parameters are acceptable. + */ +static void nv_init_ibmnpu_numa_info(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_npu_numa_info_t *npu_numa_info = &nvl->npu->numa_info; + struct pci_dev *npu_dev = nvl->npu->devs[0]; + NvU64 spa, gpa, aper_size; + + /* + * Terminology: + * - system physical address (spa): 47-bit NVIDIA physical address, which + * is the CPU real address with the NVLink address compression scheme + * already applied in firmware. + * - guest physical address (gpa): 56-bit physical address as seen by the + * operating system. This is the base address that we should use for + * onlining device memory. + */ + nvl->numa_info.node_id = ibmnpu_device_get_memory_config(npu_dev, &spa, &gpa, + &aper_size); + if (nvl->numa_info.node_id == NUMA_NO_NODE) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "No NUMA memory aperture found\n"); + return; + } + + /* Validate that the compressed system physical address is not too wide */ + if (spa & (~(BIT_ULL(nv_volta_dma_addr_size) - 1))) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Invalid NUMA memory system pa 0x%llx" + " on IBM-NPU device %04x:%02x:%02x.%u\n", + spa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev), + NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn)); + goto invalid_numa_config; + } + + /* + * Validate that the guest physical address is aligned to 128GB. + * This alignment requirement comes from the Volta address space + * size on POWER9. + */ + if (!IS_ALIGNED(gpa, BIT_ULL(nv_volta_addr_space_width))) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Invalid alignment in NUMA memory guest pa 0x%llx" + " on IBM-NPU device %04x:%02x:%02x.%u\n", + gpa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev), + NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn)); + goto invalid_numa_config; + } + + /* Validate that the aperture can map all of the device's framebuffer */ + if (aper_size < nv->fb->size) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Insufficient NUMA memory aperture size 0x%llx" + " on IBM-NPU device %04x:%02x:%02x.%u (0x%llx required)\n", + aper_size, NV_PCI_DOMAIN_NUMBER(npu_dev), + NV_PCI_BUS_NUMBER(npu_dev), NV_PCI_SLOT_NUMBER(npu_dev), + PCI_FUNC(npu_dev->devfn), nv->fb->size); + goto invalid_numa_config; + } + + npu_numa_info->compr_sys_phys_addr = spa; + npu_numa_info->guest_phys_addr = gpa; + + if (NVreg_EnableUserNUMAManagement) + { + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE); + } + else + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "User-mode NUMA onlining disabled.\n"); + nvl->numa_info.node_id = NUMA_NO_NODE; + } + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "NUMA memory aperture: " + "[spa = 0x%llx, gpa = 0x%llx, aper_size = 0x%llx]\n", + spa, gpa, aper_size); + + /* Get the CPU's L1D cache block size for offlining cache flush */ + npu_numa_info->l1d_cache_block_size = nv_ibm_get_cpu_l1d_cache_block_size(); + + return; + +invalid_numa_config: + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "NUMA memory aperture disabled due to invalid firmware configuration\n"); + nvl->numa_info.node_id = NUMA_NO_NODE; +} + +void nv_init_ibmnpu_info(nv_state_t *nv) +{ +#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct pci_dev *npu_dev = pnv_pci_get_npu_dev(nvl->pci_dev, 0); + NvU8 dev_count; + + if (!npu_dev) + { + return; + } + + if (os_alloc_mem((void **)&nvl->npu, sizeof(nv_ibmnpu_info_t)) != NV_OK) + { + return; + } + + os_mem_set(nvl->npu, 0, sizeof(nv_ibmnpu_info_t)); + + /* Find any other IBMNPU devices attached to this GPU */ + for (nvl->npu->devs[0] = npu_dev, dev_count = 1; + dev_count < NV_MAX_ATTACHED_IBMNPUS; dev_count++) + { + nvl->npu->devs[dev_count] = pnv_pci_get_npu_dev(nvl->pci_dev, dev_count); + if (!nvl->npu->devs[dev_count]) + { + break; + } + } + + nvl->npu->dev_count = dev_count; + + /* + * If we run out of space for IBMNPU devices, NV_MAX_ATTACHED_IBMNPUS will + * need to be bumped. + */ + WARN_ON((dev_count == NV_MAX_ATTACHED_IBMNPUS) && + pnv_pci_get_npu_dev(nvl->pci_dev, dev_count)); + + ibmnpu_device_get_genregs_info(npu_dev, &nvl->npu->genregs); + + if (nvl->npu->genregs.size > 0) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, + "IBM-NPU device %04x:%02x:%02x.%u associated with GPU " + " has a generation register space 0x%llx-0x%llx\n", + NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev), + NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn), + nvl->npu->genregs.start_addr, + nvl->npu->genregs.start_addr + nvl->npu->genregs.size - 1); + } + else + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, + "IBM-NPU device %04x:%02x:%02x.%u associated with GPU " + "does not support generation registers\n", + NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev), + NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn)); + } + + nv_init_ibmnpu_numa_info(nv); +#endif +} + +void nv_destroy_ibmnpu_info(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nvl->npu != NULL) + { + os_free_mem(nvl->npu); + nvl->npu = NULL; + } +} + +int nv_init_ibmnpu_devices(nv_state_t *nv) +{ + NvU8 i; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (!nvl->npu) + { + return 0; + } + + for (i = 0; i < nvl->npu->dev_count; i++) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, + "Initializing IBM-NPU device %04x:%02x:%02x.%u\n", + NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]), + NV_PCI_BUS_NUMBER(nvl->npu->devs[i]), + NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]), + PCI_FUNC(nvl->npu->devs[i]->devfn)); + + if (ibmnpu_init_device(nvl->npu->devs[i]) != NVL_SUCCESS) + { + nv_unregister_ibmnpu_devices(nv); + return -EIO; + } + + nvl->npu->initialized_dev_count++; + } + + return 0; +} + +void nv_unregister_ibmnpu_devices(nv_state_t *nv) +{ + NvU8 i; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (!nvl->npu) + { + return; + } + + for (i = 0; i < nvl->npu->initialized_dev_count; i++) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, + "Unregistering IBM-NPU device %04x:%02x:%02x.%u\n", + NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]), + NV_PCI_BUS_NUMBER(nvl->npu->devs[i]), + NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]), + PCI_FUNC(nvl->npu->devs[i]->devfn)); + + ibmnpu_unregister_device(nvl->npu->devs[i]); + } + + nvl->npu->initialized_dev_count = 0; +} + +NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr, + NvU64 *size, void **device) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nvl->npu == NULL || nvl->npu->genregs.size == 0) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (addr) + { + *addr = nvl->npu->genregs.start_addr; + } + + if (size) + { + *size = nvl->npu->genregs.size; + } + + if (device) + { + *device = (void*)nvl->npu->devs[0]; + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, + NvBool *mode) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nvl->npu == NULL || nvl->npu->genregs.size == 0) + { + return NV_ERR_NOT_SUPPORTED; + } + + *mode = nv_get_rsync_relaxed_ordering_mode(nv); + + return NV_OK; +} + +void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nvl->npu == NULL || nvl->npu->genregs.size == 0) + { + return; + } + + nv_wait_for_rsync(nv); +} + +int nv_get_ibmnpu_chip_id(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nvl->npu == NULL) + { + return -1; + } + + return ibmnpu_device_get_chip_id(nvl->npu->devs[0]); +} + +void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 offset, cbsize; + + /* + * The range is commonly an ioremap()ed mapping of the GPU's ATS range and + * needs to be compared against the created mappings. Alternatively, kernel + * page tables can be dumped through sysfs if CONFIG_PPC_PTDUMP is enabled. + */ + NV_DEV_PRINTF(NV_DBG_INFO, nv, + "Flushing CPU virtual range [0x%llx, 0x%llx)\n", + cpu_virtual, cpu_virtual + size); + + cbsize = nvl->npu->numa_info.l1d_cache_block_size; + + CACHE_FLUSH(); + + /* Force eviction of any cache lines from the NUMA-onlined region. */ + for (offset = 0; offset < size; offset += cbsize) + { + asm volatile("dcbf %0,%1" :: "r" (cpu_virtual), "r" (offset) : "memory"); + + /* Reschedule if necessary to avoid lockup warnings */ + cond_resched(); + } + + CACHE_FLUSH(); +} + +#else + +void nv_init_ibmnpu_info(nv_state_t *nv) +{ +} + +void nv_destroy_ibmnpu_info(nv_state_t *nv) +{ +} + +int nv_init_ibmnpu_devices(nv_state_t *nv) +{ + return 0; +} + +void nv_unregister_ibmnpu_devices(nv_state_t *nv) +{ +} + +NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr, + NvU64 *size, void **device) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, + NvBool *mode) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv) +{ +} + +int nv_get_ibmnpu_chip_id(nv_state_t *nv) +{ + return -1; +} + +void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 virtual, NvU64 size) +{ +} + +void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv) +{ +} + +#endif diff --git a/kernel-open/nvidia/nv-ibmnpu.h b/kernel-open/nvidia/nv-ibmnpu.h new file mode 100644 index 000000000..413b16b7b --- /dev/null +++ b/kernel-open/nvidia/nv-ibmnpu.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_IBMNPU_H_ +#define _NV_IBMNPU_H_ + +#if defined(NVCPU_PPC64LE) + +#include "ibmnpu_linux.h" + +#define NV_MAX_ATTACHED_IBMNPUS 6 + +typedef struct nv_npu_numa_info +{ + /* + * 47-bit NVIDIA 'system physical address': the hypervisor real 56-bit + * address with NVLink address compression scheme applied. + */ + NvU64 compr_sys_phys_addr; + + /* + * 56-bit NVIDIA 'guest physical address'/host virtual address. On + * unvirtualized systems, applying the NVLink address compression scheme + * to this address should be the same as compr_sys_phys_addr. + */ + NvU64 guest_phys_addr; + + /* + * L1 data cache block size on P9 - needed to manually flush/invalidate the + * NUMA region from the CPU caches after offlining. + */ + NvU32 l1d_cache_block_size; +} nv_npu_numa_info_t; + +struct nv_ibmnpu_info +{ + NvU8 dev_count; + NvU8 initialized_dev_count; + struct pci_dev *devs[NV_MAX_ATTACHED_IBMNPUS]; + ibmnpu_genregs_info_t genregs; + nv_npu_numa_info_t numa_info; +}; + +/* + * TODO: These parameters are specific to Volta/P9 configurations, and may + * need to be determined dynamically in the future. + */ +static const NvU32 nv_volta_addr_space_width = 37; +static const NvU32 nv_volta_dma_addr_size = 47; + +#endif + +void nv_init_ibmnpu_info(nv_state_t *nv); +void nv_destroy_ibmnpu_info(nv_state_t *nv); +int nv_init_ibmnpu_devices(nv_state_t *nv); +void nv_unregister_ibmnpu_devices(nv_state_t *nv); +int nv_get_ibmnpu_chip_id(nv_state_t *nv); +void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv); + +#endif diff --git a/kernel-open/nvidia/nv-kthread-q.c b/kernel-open/nvidia/nv-kthread-q.c new file mode 100644 index 000000000..5a95f4a40 --- /dev/null +++ b/kernel-open/nvidia/nv-kthread-q.c @@ -0,0 +1,335 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kthread-q.h" +#include "nv-list-helpers.h" + +#include +#include +#include +#include +#include + +#if defined(NV_LINUX_BUG_H_PRESENT) + #include +#else + #include +#endif + +// Today's implementation is a little simpler and more limited than the +// API description allows for in nv-kthread-q.h. Details include: +// +// 1. Each nv_kthread_q instance is a first-in, first-out queue. +// +// 2. Each nv_kthread_q instance is serviced by exactly one kthread. +// +// You can create any number of queues, each of which gets its own +// named kernel thread (kthread). You can then insert arbitrary functions +// into the queue, and those functions will be run in the context of the +// queue's kthread. + +#ifndef WARN + // Only *really* old kernels (2.6.9) end up here. Just use a simple printk + // to implement this, because such kernels won't be supported much longer. + #define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + printk(KERN_ERR format); \ + unlikely(__ret_warn_on); \ + }) +#endif + +#define NVQ_WARN(fmt, ...) \ + do { \ + if (in_interrupt()) { \ + WARN(1, "nv_kthread_q: [in interrupt]: " fmt, \ + ##__VA_ARGS__); \ + } \ + else { \ + WARN(1, "nv_kthread_q: task: %s: " fmt, \ + current->comm, \ + ##__VA_ARGS__); \ + } \ + } while (0) + +static int _main_loop(void *args) +{ + nv_kthread_q_t *q = (nv_kthread_q_t *)args; + nv_kthread_q_item_t *q_item = NULL; + unsigned long flags; + + while (1) { + // Normally this thread is never interrupted. However, + // down_interruptible (instead of down) is called here, + // in order to avoid being classified as a potentially + // hung task, by the kernel watchdog. + while (down_interruptible(&q->q_sem)) + NVQ_WARN("Interrupted during semaphore wait\n"); + + if (atomic_read(&q->main_loop_should_exit)) + break; + + spin_lock_irqsave(&q->q_lock, flags); + + // The q_sem semaphore prevents us from getting here unless there is + // at least one item in the list, so an empty list indicates a bug. + if (unlikely(list_empty(&q->q_list_head))) { + spin_unlock_irqrestore(&q->q_lock, flags); + NVQ_WARN("_main_loop: Empty queue: q: 0x%p\n", q); + continue; + } + + // Consume one item from the queue + q_item = list_first_entry(&q->q_list_head, + nv_kthread_q_item_t, + q_list_node); + + list_del_init(&q_item->q_list_node); + + spin_unlock_irqrestore(&q->q_lock, flags); + + // Run the item + q_item->function_to_run(q_item->function_args); + + // Make debugging a little simpler by clearing this between runs: + q_item = NULL; + } + + while (!kthread_should_stop()) + schedule(); + + return 0; +} + +void nv_kthread_q_stop(nv_kthread_q_t *q) +{ + // check if queue has been properly initialized + if (unlikely(!q->q_kthread)) + return; + + nv_kthread_q_flush(q); + + // If this assertion fires, then a caller likely either broke the API rules, + // by adding items after calling nv_kthread_q_stop, or possibly messed up + // with inadequate flushing of self-rescheduling q_items. + if (unlikely(!list_empty(&q->q_list_head))) + NVQ_WARN("list not empty after flushing\n"); + + if (likely(!atomic_read(&q->main_loop_should_exit))) { + + atomic_set(&q->main_loop_should_exit, 1); + + // Wake up the kthread so that it can see that it needs to stop: + up(&q->q_sem); + + kthread_stop(q->q_kthread); + q->q_kthread = NULL; + } +} + +// When CONFIG_VMAP_STACK is defined, the kernel thread stack allocator used by +// kthread_create_on_node relies on a 2 entry, per-core cache to minimize +// vmalloc invocations. The cache is NUMA-unaware, so when there is a hit, the +// stack location ends up being a function of the core assigned to the current +// thread, instead of being a function of the specified NUMA node. The cache was +// added to the kernel in commit ac496bf48d97f2503eaa353996a4dd5e4383eaf0 +// ("fork: Optimize task creation by caching two thread stacks per CPU if +// CONFIG_VMAP_STACK=y") +// +// To work around the problematic cache, we create up to three kernel threads +// -If the first thread's stack is resident on the preferred node, return this +// thread. +// -Otherwise, create a second thread. If its stack is resident on the +// preferred node, stop the first thread and return this one. +// -Otherwise, create a third thread. The stack allocator does not find a +// cached stack, and so falls back to vmalloc, which takes the NUMA hint into +// consideration. The first two threads are then stopped. +// +// When CONFIG_VMAP_STACK is not defined, the first kernel thread is returned. +// +// This function is never invoked when there is no NUMA preference (preferred +// node is NUMA_NO_NODE). +#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 +static struct task_struct *thread_create_on_node(int (*threadfn)(void *data), + nv_kthread_q_t *q, + int preferred_node, + const char *q_name) +{ + + unsigned i, j; + const static unsigned attempts = 3; + struct task_struct *thread[3]; + + for (i = 0;; i++) { + struct page *stack; + + thread[i] = kthread_create_on_node(threadfn, q, preferred_node, q_name); + + if (unlikely(IS_ERR(thread[i]))) { + + // Instead of failing, pick the previous thread, even if its + // stack is not allocated on the preferred node. + if (i > 0) + i--; + + break; + } + + // vmalloc is not used to allocate the stack, so simply return the + // thread, even if its stack may not be allocated on the preferred node + if (!is_vmalloc_addr(thread[i]->stack)) + break; + + // Ran out of attempts - return thread even if its stack may not be + // allocated on the preferred node + if ((i == (attempts - 1))) + break; + + // Get the NUMA node where the first page of the stack is resident. If + // it is the preferred node, select this thread. + stack = vmalloc_to_page(thread[i]->stack); + if (page_to_nid(stack) == preferred_node) + break; + } + + for (j = i; j > 0; j--) + kthread_stop(thread[j - 1]); + + return thread[i]; +} +#endif + +int nv_kthread_q_init_on_node(nv_kthread_q_t *q, const char *q_name, int preferred_node) +{ + memset(q, 0, sizeof(*q)); + + INIT_LIST_HEAD(&q->q_list_head); + spin_lock_init(&q->q_lock); + sema_init(&q->q_sem, 0); + + if (preferred_node == NV_KTHREAD_NO_NODE) { + q->q_kthread = kthread_create(_main_loop, q, q_name); + } + else { +#if NV_KTHREAD_Q_SUPPORTS_AFFINITY() == 1 + q->q_kthread = thread_create_on_node(_main_loop, q, preferred_node, q_name); +#else + return -ENOTSUPP; +#endif + } + + if (IS_ERR(q->q_kthread)) { + int err = PTR_ERR(q->q_kthread); + + // Clear q_kthread before returning so that nv_kthread_q_stop() can be + // safely called on it making error handling easier. + q->q_kthread = NULL; + + return err; + } + + wake_up_process(q->q_kthread); + + return 0; +} + +// Returns true (non-zero) if the item was actually scheduled, and false if the +// item was already pending in a queue. +static int _raw_q_schedule(nv_kthread_q_t *q, nv_kthread_q_item_t *q_item) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&q->q_lock, flags); + + if (likely(list_empty(&q_item->q_list_node))) + list_add_tail(&q_item->q_list_node, &q->q_list_head); + else + ret = 0; + + spin_unlock_irqrestore(&q->q_lock, flags); + + if (likely(ret)) + up(&q->q_sem); + + return ret; +} + +void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item, + nv_q_func_t function_to_run, + void *function_args) +{ + INIT_LIST_HEAD(&q_item->q_list_node); + q_item->function_to_run = function_to_run; + q_item->function_args = function_args; +} + +// Returns true (non-zero) if the q_item got scheduled, false otherwise. +int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q, + nv_kthread_q_item_t *q_item) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_schedule_q_item was " + "called with a non-alive q: 0x%p\n", q); + return 0; + } + + return _raw_q_schedule(q, q_item); +} + +static void _q_flush_function(void *args) +{ + struct completion *completion = (struct completion *)args; + complete(completion); +} + + +static void _raw_q_flush(nv_kthread_q_t *q) +{ + nv_kthread_q_item_t q_item; + DECLARE_COMPLETION(completion); + + nv_kthread_q_item_init(&q_item, _q_flush_function, &completion); + + _raw_q_schedule(q, &q_item); + + // Wait for the flush item to run. Once it has run, then all of the + // previously queued items in front of it will have run, so that means + // the flush is complete. + wait_for_completion(&completion); +} + +void nv_kthread_q_flush(nv_kthread_q_t *q) +{ + if (unlikely(atomic_read(&q->main_loop_should_exit))) { + NVQ_WARN("Not allowed: nv_kthread_q_flush was called after " + "nv_kthread_q_stop. q: 0x%p\n", q); + return; + } + + // This 2x flush is not a typing mistake. The queue really does have to be + // flushed twice, in order to take care of the case of a q_item that + // reschedules itself. + _raw_q_flush(q); + _raw_q_flush(q); +} diff --git a/kernel-open/nvidia/nv-memdbg.c b/kernel-open/nvidia/nv-memdbg.c new file mode 100644 index 000000000..033a42160 --- /dev/null +++ b/kernel-open/nvidia/nv-memdbg.c @@ -0,0 +1,232 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-memdbg.h" +#include "nv-linux.h" + +/* track who's allocating memory and print out a list of leaked allocations at + * teardown. + */ + +typedef struct { + struct rb_node rb_node; + void *addr; + NvU64 size; + NvU32 line; + const char *file; +} nv_memdbg_node_t; + +struct +{ + struct rb_root rb_root; + NvU64 untracked_bytes; + NvU64 num_untracked_allocs; + nv_spinlock_t lock; +} g_nv_memdbg; + +void nv_memdbg_init(void) +{ + NV_SPIN_LOCK_INIT(&g_nv_memdbg.lock); + g_nv_memdbg.rb_root = RB_ROOT; +} + +static nv_memdbg_node_t *nv_memdbg_node_entry(struct rb_node *rb_node) +{ + return rb_entry(rb_node, nv_memdbg_node_t, rb_node); +} + +static void nv_memdbg_insert_node(nv_memdbg_node_t *new) +{ + nv_memdbg_node_t *node; + struct rb_node **rb_node = &g_nv_memdbg.rb_root.rb_node; + struct rb_node *rb_parent = NULL; + + while (*rb_node) + { + node = nv_memdbg_node_entry(*rb_node); + + WARN_ON(new->addr == node->addr); + + rb_parent = *rb_node; + + if (new->addr < node->addr) + rb_node = &(*rb_node)->rb_left; + else + rb_node = &(*rb_node)->rb_right; + } + + rb_link_node(&new->rb_node, rb_parent, rb_node); + rb_insert_color(&new->rb_node, &g_nv_memdbg.rb_root); +} + +static nv_memdbg_node_t *nv_memdbg_remove_node(void *addr) +{ + nv_memdbg_node_t *node = NULL; + struct rb_node *rb_node = g_nv_memdbg.rb_root.rb_node; + + while (rb_node) + { + node = nv_memdbg_node_entry(rb_node); + if (addr == node->addr) + break; + else if (addr < node->addr) + rb_node = rb_node->rb_left; + else + rb_node = rb_node->rb_right; + } + + WARN_ON(!node || node->addr != addr); + + rb_erase(&node->rb_node, &g_nv_memdbg.rb_root); + return node; +} + +void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line) +{ + nv_memdbg_node_t *node; + unsigned long flags; + + WARN_ON(addr == NULL); + + /* If node allocation fails, we can still update the untracked counters */ + node = kmalloc(sizeof(*node), + NV_MAY_SLEEP() ? NV_GFP_KERNEL : NV_GFP_ATOMIC); + if (node) + { + node->addr = addr; + node->size = size; + node->file = file; + node->line = line; + } + + NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags); + + if (node) + { + nv_memdbg_insert_node(node); + } + else + { + ++g_nv_memdbg.num_untracked_allocs; + g_nv_memdbg.untracked_bytes += size; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags); +} + +void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line) +{ + nv_memdbg_node_t *node; + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&g_nv_memdbg.lock, flags); + + node = nv_memdbg_remove_node(addr); + if (!node) + { + WARN_ON(g_nv_memdbg.num_untracked_allocs == 0); + WARN_ON(g_nv_memdbg.untracked_bytes < size); + --g_nv_memdbg.num_untracked_allocs; + g_nv_memdbg.untracked_bytes -= size; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&g_nv_memdbg.lock, flags); + + if (node) + { + if ((size != 0) && (node->size != size)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: size mismatch on free: %llu != %llu\n", + size, node->size); + if (node->file) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: allocation: 0x%p @ %s:%d\n", + node->addr, node->file, node->line); + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: allocation: 0x%p\n", + node->addr); + } + os_dbg_breakpoint(); + } + + kfree(node); + } +} + +void nv_memdbg_exit(void) +{ + nv_memdbg_node_t *node; + NvU64 leaked_bytes = 0, num_leaked_allocs = 0; + + if (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: list of leaked memory allocations:\n"); + } + + while (!RB_EMPTY_ROOT(&g_nv_memdbg.rb_root)) + { + node = nv_memdbg_node_entry(rb_first(&g_nv_memdbg.rb_root)); + + leaked_bytes += node->size; + ++num_leaked_allocs; + + if (node->file) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %llu bytes, 0x%p @ %s:%d\n", + node->size, node->addr, node->file, node->line); + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %llu bytes, 0x%p\n", + node->size, node->addr); + } + + rb_erase(&node->rb_node, &g_nv_memdbg.rb_root); + kfree(node); + } + + /* If we failed to allocate a node at some point, we may have leaked memory + * even if the tree is empty */ + if (num_leaked_allocs > 0 || g_nv_memdbg.num_untracked_allocs > 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: total leaked memory: %llu bytes in %llu allocations\n", + leaked_bytes + g_nv_memdbg.untracked_bytes, + num_leaked_allocs + g_nv_memdbg.num_untracked_allocs); + + if (g_nv_memdbg.num_untracked_allocs > 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %llu bytes in %llu allocations untracked\n", + g_nv_memdbg.untracked_bytes, g_nv_memdbg.num_untracked_allocs); + } + } +} diff --git a/kernel-open/nvidia/nv-mmap.c b/kernel-open/nvidia/nv-mmap.c new file mode 100644 index 000000000..5c0f764c1 --- /dev/null +++ b/kernel-open/nvidia/nv-mmap.c @@ -0,0 +1,780 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv_speculation_barrier.h" + +/* + * The 'struct vm_operations' open() callback is called by the Linux + * kernel when the parent VMA is split or copied, close() when the + * current VMA is about to be deleted. + * + * We implement these callbacks to keep track of the number of user + * mappings of system memory allocations. This was motivated by a + * subtle interaction problem between the driver and the kernel with + * respect to the bookkeeping of pages marked reserved and later + * mapped with mmap(). + * + * Traditionally, the Linux kernel ignored reserved pages, such that + * when they were mapped via mmap(), the integrity of their usage + * counts depended on the reserved bit being set for as long as user + * mappings existed. + * + * Since we mark system memory pages allocated for DMA reserved and + * typically map them with mmap(), we need to ensure they remain + * reserved until the last mapping has been torn down. This worked + * correctly in most cases, but in a few, the RM API called into the + * RM to free memory before calling munmap() to unmap it. + * + * In the past, we allowed nv_free_pages() to remove the 'at' from + * the parent device's allocation list in this case, but didn't + * release the underlying pages until the last user mapping had been + * destroyed: + * + * In nvidia_vma_release(), we freed any resources associated with + * the allocation (IOMMU mappings, etc.) and cleared the + * underlying pages' reserved bits, but didn't free them. The kernel + * was expected to do this. + * + * This worked in practise, but made dangerous assumptions about the + * kernel's behavior and could fail in some cases. We now handle + * this case differently (see below). + */ +static void +nvidia_vma_open(struct vm_area_struct *vma) +{ + nv_alloc_t *at = NV_VMA_PRIVATE(vma); + + NV_PRINT_VMA(NV_DBG_MEMINFO, vma); + + if (at != NULL) + { + NV_ATOMIC_INC(at->usage_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + } +} + +/* + * (see above for additional information) + * + * If the 'at' usage count drops to zero with the updated logic, the + * the allocation is recorded in the free list of the private + * data associated with the file pointer; nvidia_close() uses this + * list to perform deferred free operations when the parent file + * descriptor is closed. This will typically happen when the process + * exits. + * + * Since this is technically a workaround to handle possible fallout + * from misbehaving clients, we additionally print a warning. + */ +static void +nvidia_vma_release(struct vm_area_struct *vma) +{ + nv_alloc_t *at = NV_VMA_PRIVATE(vma); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma)); + static int count = 0; + + NV_PRINT_VMA(NV_DBG_MEMINFO, vma); + + if (at != NULL && nv_alloc_release(nvlfp, at)) + { + if ((at->pid == os_get_current_process()) && + (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: late unmap, comm: %s, 0x%p\n", + __FUNCTION__, current->comm, at); + } + } +} + +static int +nvidia_vma_access( + struct vm_area_struct *vma, + unsigned long addr, + void *buffer, + int length, + int write +) +{ + nv_alloc_t *at = NULL; + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma)); + nv_state_t *nv = NV_STATE_PTR(nvlfp->nvptr); + NvU32 pageIndex, pageOffset; + void *kernel_mapping; + const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context; + NvU64 offset; + + pageIndex = ((addr - vma->vm_start) >> PAGE_SHIFT); + pageOffset = (addr & ~PAGE_MASK); + + if (!mmap_context->valid) + { + nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap context\n"); + return -EINVAL; + } + + offset = mmap_context->mmap_start; + + if (nv->flags & NV_FLAG_CONTROL) + { + at = NV_VMA_PRIVATE(vma); + + /* + * at can be NULL for peer IO mem. + */ + if (!at) + return -EINVAL; + + if (pageIndex >= at->num_pages) + return -EINVAL; + + /* + * For PPC64LE build, nv_array_index_no_speculate() is not defined + * therefore call nv_speculation_barrier(). + * When this definition is added, this platform check should be removed. + */ +#if !defined(NVCPU_PPC64LE) + pageIndex = nv_array_index_no_speculate(pageIndex, at->num_pages); +#else + nv_speculation_barrier(); +#endif + kernel_mapping = (void *)(at->page_table[pageIndex]->virt_addr + pageOffset); + } + else if (IS_FB_OFFSET(nv, offset, length)) + { + addr = (offset & PAGE_MASK); + kernel_mapping = os_map_kernel_space(addr, PAGE_SIZE, NV_MEMORY_UNCACHED); + if (kernel_mapping == NULL) + return -ENOMEM; + + kernel_mapping = ((char *)kernel_mapping + pageOffset); + } + else + return -EINVAL; + + length = NV_MIN(length, (int)(PAGE_SIZE - pageOffset)); + + if (write) + memcpy(kernel_mapping, buffer, length); + else + memcpy(buffer, kernel_mapping, length); + + if (at == NULL) + { + kernel_mapping = ((char *)kernel_mapping - pageOffset); + os_unmap_kernel_space(kernel_mapping, PAGE_SIZE); + } + + return length; +} + +static vm_fault_t nvidia_fault( +#if !defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + struct vm_area_struct *vma, +#endif + struct vm_fault *vmf +) +{ +#if defined(NV_VM_OPS_FAULT_REMOVED_VMA_ARG) + struct vm_area_struct *vma = vmf->vma; +#endif + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(NV_VMA_FILE(vma)); + nv_linux_state_t *nvl = nvlfp->nvptr; + nv_state_t *nv = NV_STATE_PTR(nvl); + vm_fault_t ret = VM_FAULT_NOPAGE; + + NvU64 page; + NvU64 num_pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT; + NvU64 pfn_start = + (nvlfp->mmap_context.mmap_start >> PAGE_SHIFT) + vma->vm_pgoff; + + // Mapping revocation is only supported for GPU mappings. + if (NV_IS_CTL_DEVICE(nv)) + { + return VM_FAULT_SIGBUS; + } + + // Wake up GPU and reinstate mappings only if we are not in S3/S4 entry + if (!down_read_trylock(&nv_system_pm_lock)) + { + return VM_FAULT_NOPAGE; + } + + down(&nvl->mmap_lock); + + // Wake up the GPU if it is not currently safe to mmap. + if (!nvl->safe_to_mmap) + { + NV_STATUS status; + + if (!nvl->gpu_wakeup_callback_needed) + { + // GPU wakeup callback already scheduled. + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + return VM_FAULT_NOPAGE; + } + + /* + * GPU wakeup cannot be completed directly in the fault handler due to the + * inability to take the GPU lock while mmap_lock is held. + */ + status = rm_schedule_gpu_wakeup(nvl->sp[NV_DEV_STACK_GPU_WAKEUP], nv); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: rm_schedule_gpu_wakeup failed: %x\n", status); + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + return VM_FAULT_SIGBUS; + } + // Ensure that we do not schedule duplicate GPU wakeup callbacks. + nvl->gpu_wakeup_callback_needed = NV_FALSE; + + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + return VM_FAULT_NOPAGE; + } + + // Safe to mmap, map all pages in this VMA. + for (page = 0; page < num_pages; page++) + { + NvU64 virt_addr = vma->vm_start + (page << PAGE_SHIFT); + NvU64 pfn = pfn_start + page; + + ret = nv_insert_pfn(vma, virt_addr, pfn, + nvlfp->mmap_context.remap_prot_extra); + if (ret != VM_FAULT_NOPAGE) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: nv_insert_pfn failed: %x\n", ret); + break; + } + + nvl->all_mappings_revoked = NV_FALSE; + } + up(&nvl->mmap_lock); + up_read(&nv_system_pm_lock); + + return ret; +} + +static struct vm_operations_struct nv_vm_ops = { + .open = nvidia_vma_open, + .close = nvidia_vma_release, + .fault = nvidia_fault, + .access = nvidia_vma_access, +}; + +int nv_encode_caching( + pgprot_t *prot, + NvU32 cache_type, + nv_memory_type_t memory_type +) +{ + pgprot_t tmp; + + if (prot == NULL) + { + tmp = __pgprot(0); + prot = &tmp; + } + + switch (cache_type) + { + case NV_MEMORY_UNCACHED_WEAK: +#if defined(NV_PGPROT_UNCACHED_WEAK) + *prot = NV_PGPROT_UNCACHED_WEAK(*prot); + break; +#endif + case NV_MEMORY_UNCACHED: + *prot = (memory_type == NV_MEMORY_TYPE_SYSTEM) ? + NV_PGPROT_UNCACHED(*prot) : + NV_PGPROT_UNCACHED_DEVICE(*prot); + break; +#if defined(NV_PGPROT_WRITE_COMBINED) && \ + defined(NV_PGPROT_WRITE_COMBINED_DEVICE) + case NV_MEMORY_WRITECOMBINED: + if (NV_ALLOW_WRITE_COMBINING(memory_type)) + { + *prot = (memory_type == NV_MEMORY_TYPE_FRAMEBUFFER) ? + NV_PGPROT_WRITE_COMBINED_DEVICE(*prot) : + NV_PGPROT_WRITE_COMBINED(*prot); + break; + } + + /* + * If WC support is unavailable, we need to return an error + * code to the caller, but need not print a warning. + * + * For frame buffer memory, callers are expected to use the + * UC- memory type if we report WC as unsupported, which + * translates to the effective memory type WC if a WC MTRR + * exists or else UC. + */ + return 1; +#endif + case NV_MEMORY_CACHED: + if (NV_ALLOW_CACHING(memory_type)) + break; + // Intentional fallthrough. + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: cache type %d not supported for memory type %d!\n", + cache_type, memory_type); + return 1; + } + return 0; +} + +int static nvidia_mmap_peer_io( + struct vm_area_struct *vma, + nv_alloc_t *at, + NvU64 page_index, + NvU64 pages +) +{ + int ret; + NvU64 start; + NvU64 size; + + BUG_ON(!at->flags.contig); + + start = at->page_table[page_index]->phys_addr; + size = pages * PAGE_SIZE; + + ret = nv_io_remap_page_range(vma, start, size, 0); + + return ret; +} + +int static nvidia_mmap_sysmem( + struct vm_area_struct *vma, + nv_alloc_t *at, + NvU64 page_index, + NvU64 pages +) +{ + NvU64 j; + int ret = 0; + unsigned long start = 0; + + NV_ATOMIC_INC(at->usage_count); + + start = vma->vm_start; + for (j = page_index; j < (page_index + pages); j++) + { + /* + * For PPC64LE build, nv_array_index_no_speculate() is not defined + * therefore call nv_speculation_barrier(). + * When this definition is added, this platform check should be removed. + */ +#if !defined(NVCPU_PPC64LE) + j = nv_array_index_no_speculate(j, (page_index + pages)); +#else + nv_speculation_barrier(); +#endif + +#if defined(NV_VGPU_KVM_BUILD) + if (at->flags.guest) + { + ret = nv_remap_page_range(vma, start, at->page_table[j]->phys_addr, + PAGE_SIZE, vma->vm_page_prot); + } + else +#endif + { + vma->vm_page_prot = nv_adjust_pgprot(vma->vm_page_prot, 0); + ret = vm_insert_page(vma, start, + NV_GET_PAGE_STRUCT(at->page_table[j]->phys_addr)); + } + + if (ret) + { + NV_ATOMIC_DEC(at->usage_count); + return -EAGAIN; + } + start += PAGE_SIZE; + } + + return ret; +} + +static int nvidia_mmap_numa( + struct vm_area_struct *vma, + const nv_alloc_mapping_context_t *mmap_context) +{ + NvU64 start, addr; + unsigned int pages; + NvU64 i; + + pages = NV_VMA_SIZE(vma) >> PAGE_SHIFT; + start = vma->vm_start; + + if (mmap_context->num_pages < pages) + { + return -EINVAL; + } + + // Needed for the linux kernel for mapping compound pages + vma->vm_flags |= VM_MIXEDMAP; + + for (i = 0, addr = mmap_context->page_array[0]; i < pages; + addr = mmap_context->page_array[++i], start += PAGE_SIZE) + { + if (vm_insert_page(vma, start, NV_GET_PAGE_STRUCT(addr)) != 0) + { + return -EAGAIN; + } + } + + return 0; +} + +int nvidia_mmap_helper( + nv_state_t *nv, + nv_linux_file_private_t *nvlfp, + nvidia_stack_t *sp, + struct vm_area_struct *vma, + void *vm_priv +) +{ + NvU32 prot = 0; + int ret; + const nv_alloc_mapping_context_t *mmap_context = &nvlfp->mmap_context; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status; + + if (nvlfp == NULL) + return NV_ERR_INVALID_ARGUMENT; + + /* + * If mmap context is not valid on this file descriptor, this mapping wasn't + * previously validated with the RM so it must be rejected. + */ + if (!mmap_context->valid) + { + nv_printf(NV_DBG_ERRORS, "NVRM: VM: invalid mmap\n"); + return -EINVAL; + } + + NV_PRINT_VMA(NV_DBG_MEMINFO, vma); + + status = nv_check_gpu_state(nv); + if (status != NV_OK) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, + "GPU is lost, skipping nvidia_mmap_helper\n"); + return status; + } + + NV_VMA_PRIVATE(vma) = vm_priv; + + prot = mmap_context->prot; + + /* + * Nvidia device node(nvidia#) maps device's BAR memory, + * Nvidia control node(nvidiactrl) maps system memory. + */ + if (!NV_IS_CTL_DEVICE(nv)) + { + NvU32 remap_prot_extra = mmap_context->remap_prot_extra; + NvU64 mmap_start = mmap_context->mmap_start; + NvU64 mmap_length = mmap_context->mmap_size; + NvU64 access_start = mmap_context->access_start; + NvU64 access_len = mmap_context->access_size; + + if (IS_REG_OFFSET(nv, access_start, access_len)) + { + if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED, + NV_MEMORY_TYPE_REGISTERS)) + { + return -ENXIO; + } + } + else if (IS_FB_OFFSET(nv, access_start, access_len)) + { + if (IS_UD_OFFSET(nv, access_start, access_len)) + { + if (nv_encode_caching(&vma->vm_page_prot, NV_MEMORY_UNCACHED, + NV_MEMORY_TYPE_FRAMEBUFFER)) + { + return -ENXIO; + } + } + else + { + if (nv_encode_caching(&vma->vm_page_prot, + rm_disable_iomap_wc() ? NV_MEMORY_UNCACHED : NV_MEMORY_WRITECOMBINED, + NV_MEMORY_TYPE_FRAMEBUFFER)) + { + if (nv_encode_caching(&vma->vm_page_prot, + NV_MEMORY_UNCACHED_WEAK, NV_MEMORY_TYPE_FRAMEBUFFER)) + { + return -ENXIO; + } + } + } + } + + down(&nvl->mmap_lock); + if (nvl->safe_to_mmap) + { + nvl->all_mappings_revoked = NV_FALSE; + + // + // This path is similar to the sysmem mapping code. + // TODO: Refactor is needed as part of bug#2001704. + // Use pfn_valid to determine whether the physical address has + // backing struct page. This is used to isolate P8 from P9. + // + if ((nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) && + !IS_REG_OFFSET(nv, access_start, access_len) && + (pfn_valid(PFN_DOWN(mmap_start)))) + { + ret = nvidia_mmap_numa(vma, mmap_context); + if (ret) + { + up(&nvl->mmap_lock); + return ret; + } + } + else + { + if (nv_io_remap_page_range(vma, mmap_start, mmap_length, + remap_prot_extra) != 0) + { + up(&nvl->mmap_lock); + return -EAGAIN; + } + } + } + up(&nvl->mmap_lock); + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND; + } + else + { + nv_alloc_t *at; + NvU64 page_index; + NvU64 pages; + NvU64 mmap_size; + + at = (nv_alloc_t *)mmap_context->alloc; + page_index = mmap_context->page_index; + mmap_size = NV_VMA_SIZE(vma); + pages = mmap_size >> PAGE_SHIFT; + + if ((page_index + pages) > at->num_pages) + { + return -ERANGE; + } + + /* + * Callers that pass in non-NULL VMA private data must never reach this + * code. They should be mapping on a non-control node. + */ + BUG_ON(NV_VMA_PRIVATE(vma)); + + if (at->flags.peer_io) + { + if (nv_encode_caching(&vma->vm_page_prot, + at->cache_type, + NV_MEMORY_TYPE_DEVICE_MMIO)) + { + return -ENXIO; + } + + /* + * There is no need to keep 'peer IO at' alive till vma_release like + * 'sysmem at' because there are no security concerns where a client + * could free RM allocated sysmem before unmapping it. Hence, vm_ops + * are NOP, and at->usage_count is never being used. + */ + NV_VMA_PRIVATE(vma) = NULL; + + ret = nvidia_mmap_peer_io(vma, at, page_index, pages); + + BUG_ON(NV_VMA_PRIVATE(vma)); + } + else + { + if (nv_encode_caching(&vma->vm_page_prot, + at->cache_type, + NV_MEMORY_TYPE_SYSTEM)) + { + return -ENXIO; + } + + NV_VMA_PRIVATE(vma) = at; + + ret = nvidia_mmap_sysmem(vma, at, page_index, pages); + } + + if (ret) + { + return ret; + } + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED); + vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP); + } + + if ((prot & NV_PROTECT_WRITEABLE) == 0) + { + vma->vm_page_prot = NV_PGPROT_READ_ONLY(vma->vm_page_prot); + vma->vm_flags &= ~VM_WRITE; + vma->vm_flags &= ~VM_MAYWRITE; + } + + vma->vm_ops = &nv_vm_ops; + + return 0; +} + +int nvidia_mmap( + struct file *file, + struct vm_area_struct *vma +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file); + nv_state_t *nv = NV_STATE_PTR(nvl); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nvidia_stack_t *sp = NULL; + int status; + + // + // Do not allow mmap operation if this is a fd into + // which rm objects have been exported. + // + if (nvlfp->nvfp.handles != NULL) + { + return -EINVAL; + } + + down(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_MMAP]); + + sp = nvlfp->fops_sp[NV_FOPS_STACK_INDEX_MMAP]; + + status = nvidia_mmap_helper(nv, nvlfp, sp, vma, NULL); + + up(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_MMAP]); + + return status; +} + +void +nv_revoke_gpu_mappings_locked( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_linux_file_private_t *nvlfp; + + /* Revoke all mappings for every open file */ + list_for_each_entry (nvlfp, &nvl->open_files, entry) + { + unmap_mapping_range(&nvlfp->mapping, 0, ~0, 1); + } + + nvl->all_mappings_revoked = NV_TRUE; +} + +NV_STATUS NV_API_CALL nv_revoke_gpu_mappings( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // Mapping revocation is only supported for GPU mappings. + if (NV_IS_CTL_DEVICE(nv)) + { + return NV_ERR_NOT_SUPPORTED; + } + + down(&nvl->mmap_lock); + + nv_revoke_gpu_mappings_locked(nv); + + up(&nvl->mmap_lock); + + return NV_OK; +} + +void NV_API_CALL nv_acquire_mmap_lock( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + down(&nvl->mmap_lock); +} + +void NV_API_CALL nv_release_mmap_lock( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + up(&nvl->mmap_lock); +} + +NvBool NV_API_CALL nv_get_all_mappings_revoked_locked( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // Caller must hold nvl->mmap_lock for all decisions based on this + return nvl->all_mappings_revoked; +} + +void NV_API_CALL nv_set_safe_to_mmap_locked( + nv_state_t *nv, + NvBool safe_to_mmap +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // Caller must hold nvl->mmap_lock + + /* + * If nvl->safe_to_mmap is transitioning from TRUE to FALSE, we expect to + * need to schedule a GPU wakeup callback when we fault. + * + * nvl->gpu_wakeup_callback_needed will be set to FALSE in nvidia_fault() + * after scheduling the GPU wakeup callback, preventing us from scheduling + * duplicates. + */ + if (!safe_to_mmap && nvl->safe_to_mmap) + { + nvl->gpu_wakeup_callback_needed = NV_TRUE; + } + + nvl->safe_to_mmap = safe_to_mmap; +} diff --git a/kernel-open/nvidia/nv-modeset-interface.c b/kernel-open/nvidia/nv-modeset-interface.c new file mode 100644 index 000000000..5a8911c68 --- /dev/null +++ b/kernel-open/nvidia/nv-modeset-interface.c @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-modeset-interface.h" + +#include "os-interface.h" +#include "nv-linux.h" +#include "nvstatus.h" +#include "nv.h" + +static const nvidia_modeset_callbacks_t *nv_modeset_callbacks; + +static int nvidia_modeset_rm_ops_alloc_stack(nvidia_stack_t **sp) +{ + return nv_kmem_cache_alloc_stack(sp); +} + +static void nvidia_modeset_rm_ops_free_stack(nvidia_stack_t *sp) +{ + if (sp != NULL) + { + nv_kmem_cache_free_stack(sp); + } +} + +static int nvidia_modeset_set_callbacks(const nvidia_modeset_callbacks_t *cb) +{ + if ((nv_modeset_callbacks != NULL && cb != NULL) || + (nv_modeset_callbacks == NULL && cb == NULL)) + { + return -EINVAL; + } + + nv_modeset_callbacks = cb; + return 0; +} + +void nvidia_modeset_suspend(NvU32 gpuId) +{ + if (nv_modeset_callbacks) + { + nv_modeset_callbacks->suspend(gpuId); + } +} + +void nvidia_modeset_resume(NvU32 gpuId) +{ + if (nv_modeset_callbacks) + { + nv_modeset_callbacks->resume(gpuId); + } +} + +static NvU32 nvidia_modeset_enumerate_gpus(nv_gpu_info_t *gpu_info) +{ + nv_linux_state_t *nvl; + unsigned int count; + + LOCK_NV_LINUX_DEVICES(); + + count = 0; + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + + /* + * The gpu_info[] array has NV_MAX_GPUS elements. Fail if there + * are more GPUs than that. + */ + if (count >= NV_MAX_GPUS) { + nv_printf(NV_DBG_WARNINGS, "NVRM: More than %d GPUs found.", + NV_MAX_GPUS); + count = 0; + break; + } + + gpu_info[count].gpu_id = nv->gpu_id; + + gpu_info[count].pci_info.domain = nv->pci_info.domain; + gpu_info[count].pci_info.bus = nv->pci_info.bus; + gpu_info[count].pci_info.slot = nv->pci_info.slot; + gpu_info[count].pci_info.function = nv->pci_info.function; + + gpu_info[count].os_device_ptr = nvl->dev; + + count++; + } + + UNLOCK_NV_LINUX_DEVICES(); + + return count; +} + +NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops) +{ + const nvidia_modeset_rm_ops_t local_rm_ops = { + .version_string = NV_VERSION_STRING, + .system_info = { + .allow_write_combining = NV_FALSE, + }, + .alloc_stack = nvidia_modeset_rm_ops_alloc_stack, + .free_stack = nvidia_modeset_rm_ops_free_stack, + .enumerate_gpus = nvidia_modeset_enumerate_gpus, + .open_gpu = nvidia_dev_get, + .close_gpu = nvidia_dev_put, + .op = rm_kernel_rmapi_op, /* provided by nv-kernel.o */ + .set_callbacks = nvidia_modeset_set_callbacks, + }; + + if (strcmp(rm_ops->version_string, NV_VERSION_STRING) != 0) + { + rm_ops->version_string = NV_VERSION_STRING; + return NV_ERR_GENERIC; + } + + *rm_ops = local_rm_ops; + + if (NV_ALLOW_WRITE_COMBINING(NV_MEMORY_TYPE_FRAMEBUFFER)) { + rm_ops->system_info.allow_write_combining = NV_TRUE; + } + + return NV_OK; +} + +EXPORT_SYMBOL(nvidia_get_rm_ops); diff --git a/kernel-open/nvidia/nv-msi.c b/kernel-open/nvidia/nv-msi.c new file mode 100644 index 000000000..7efaeb4cb --- /dev/null +++ b/kernel-open/nvidia/nv-msi.c @@ -0,0 +1,169 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-msi.h" +#include "nv-proto.h" + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) +void NV_API_CALL nv_init_msi(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc = 0; + + rc = pci_enable_msi(nvl->pci_dev); + if (rc == 0) + { + nv->interrupt_line = nvl->pci_dev->irq; + nv->flags |= NV_FLAG_USES_MSI; + nvl->num_intr = 1; + NV_KMALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * nvl->num_intr); + + if (nvl->irq_count == NULL) + { + nv->flags &= ~NV_FLAG_USES_MSI; + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Failed to allocate counter for MSI entry; " + "falling back to PCIe virtual-wire interrupts.\n"); + } + else + { + memset(nvl->irq_count, 0, sizeof(nv_irq_count_info_t) * nvl->num_intr); + nvl->current_num_irq_tracked = 0; + } + } + else + { + nv->flags &= ~NV_FLAG_USES_MSI; + if (nvl->pci_dev->irq != 0) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Failed to enable MSI; " + "falling back to PCIe virtual-wire interrupts.\n"); + } + } + + return; +} + +void NV_API_CALL nv_init_msix(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int num_intr = 0; + struct msix_entry *msix_entries; + int rc = 0; + int i; + + NV_SPIN_LOCK_INIT(&nvl->msix_isr_lock); + + rc = os_alloc_mutex(&nvl->msix_bh_mutex); + if (rc != 0) + goto failed; + + num_intr = nv_get_max_irq(nvl->pci_dev); + + if (num_intr > NV_RM_MAX_MSIX_LINES) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Reducing MSI-X count from %d to the " + "driver-supported maximum %d.\n", num_intr, NV_RM_MAX_MSIX_LINES); + num_intr = NV_RM_MAX_MSIX_LINES; + } + + NV_KMALLOC(nvl->msix_entries, sizeof(struct msix_entry) * num_intr); + if (nvl->msix_entries == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate MSI-X entries.\n"); + goto failed; + } + + for (i = 0, msix_entries = nvl->msix_entries; i < num_intr; i++, msix_entries++) + { + msix_entries->entry = i; + } + + NV_KMALLOC(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr); + + if (nvl->irq_count == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to allocate counter for MSI-X entries.\n"); + goto failed; + } + else + { + memset(nvl->irq_count, 0, sizeof(nv_irq_count_info_t) * num_intr); + nvl->current_num_irq_tracked = 0; + } + rc = nv_pci_enable_msix(nvl, num_intr); + if (rc != NV_OK) + goto failed; + + nv->flags |= NV_FLAG_USES_MSIX; + return; + +failed: + nv->flags &= ~NV_FLAG_USES_MSIX; + + if (nvl->msix_entries) + { + NV_KFREE(nvl->msix_entries, sizeof(struct msix_entry) * num_intr); + } + + if (nvl->irq_count) + { + NV_KFREE(nvl->irq_count, sizeof(nv_irq_count_info_t) * num_intr); + } + + if (nvl->msix_bh_mutex) + { + os_free_mutex(nvl->msix_bh_mutex); + nvl->msix_bh_mutex = NULL; + } + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to enable MSI-X.\n"); +} + +NvS32 NV_API_CALL nv_request_msix_irq(nv_linux_state_t *nvl) +{ + int i; + int j; + struct msix_entry *msix_entries; + int rc = NV_ERR_INVALID_ARGUMENT; + nv_state_t *nv = NV_STATE_PTR(nvl); + + for (i = 0, msix_entries = nvl->msix_entries; i < nvl->num_intr; + i++, msix_entries++) + { + rc = request_threaded_irq(msix_entries->vector, nvidia_isr_msix, + nvidia_isr_msix_kthread_bh, nv_default_irq_flags(nv), + nv_device_name, (void *)nvl); + if (rc) + { + for( j = 0; j < i; j++) + { + free_irq(nvl->msix_entries[i].vector, (void *)nvl); + } + break; + } + } + + return rc; +} +#endif diff --git a/kernel-open/nvidia/nv-p2p.c b/kernel-open/nvidia/nv-p2p.c new file mode 100644 index 000000000..1f090fbd1 --- /dev/null +++ b/kernel-open/nvidia/nv-p2p.c @@ -0,0 +1,956 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-ibmnpu.h" +#include "nv-rsync.h" + +#include "nv-p2p.h" +#include "rmp2pdefines.h" + +typedef struct nv_p2p_dma_mapping { + struct list_head list_node; + struct nvidia_p2p_dma_mapping *dma_mapping; +} nv_p2p_dma_mapping_t; + +typedef struct nv_p2p_mem_info { + void (*free_callback)(void *data); + void *data; + struct nvidia_p2p_page_table page_table; + struct { + struct list_head list_head; + struct semaphore lock; + } dma_mapping_list; + NvBool bPersistent; + void *private; +} nv_p2p_mem_info_t; + +int nvidia_p2p_cap_persistent_pages = 1; +EXPORT_SYMBOL(nvidia_p2p_cap_persistent_pages); + +// declared and created in nv.c +extern void *nvidia_p2p_page_t_cache; + +static struct nvidia_status_mapping { + NV_STATUS status; + int error; +} nvidia_status_mappings[] = { + { NV_ERR_GENERIC, -EIO }, + { NV_ERR_INSUFFICIENT_RESOURCES, -ENOMEM }, + { NV_ERR_NO_MEMORY, -ENOMEM }, + { NV_ERR_INVALID_ARGUMENT, -EINVAL }, + { NV_ERR_INVALID_OBJECT_HANDLE, -EINVAL }, + { NV_ERR_INVALID_STATE, -EIO }, + { NV_ERR_NOT_SUPPORTED, -ENOTSUPP }, + { NV_ERR_OBJECT_NOT_FOUND, -EINVAL }, + { NV_ERR_STATE_IN_USE, -EBUSY }, + { NV_ERR_GPU_UUID_NOT_FOUND, -ENODEV }, + { NV_OK, 0 }, +}; + +#define NVIDIA_STATUS_MAPPINGS \ + (sizeof(nvidia_status_mappings) / sizeof(struct nvidia_status_mapping)) + +static int nvidia_p2p_map_status(NV_STATUS status) +{ + int error = -EIO; + uint8_t i; + + for (i = 0; i < NVIDIA_STATUS_MAPPINGS; i++) + { + if (nvidia_status_mappings[i].status == status) + { + error = nvidia_status_mappings[i].error; + break; + } + } + return error; +} + +static NvU32 nvidia_p2p_page_size_mappings[NVIDIA_P2P_PAGE_SIZE_COUNT] = { + NVRM_P2P_PAGESIZE_SMALL_4K, NVRM_P2P_PAGESIZE_BIG_64K, NVRM_P2P_PAGESIZE_BIG_128K +}; + +static NV_STATUS nvidia_p2p_map_page_size(NvU32 page_size, NvU32 *page_size_index) +{ + NvU32 i; + + for (i = 0; i < NVIDIA_P2P_PAGE_SIZE_COUNT; i++) + { + if (nvidia_p2p_page_size_mappings[i] == page_size) + { + *page_size_index = i; + break; + } + } + + if (i == NVIDIA_P2P_PAGE_SIZE_COUNT) + return NV_ERR_GENERIC; + + return NV_OK; +} + +static NV_STATUS nv_p2p_insert_dma_mapping( + struct nv_p2p_mem_info *mem_info, + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + NV_STATUS status; + struct nv_p2p_dma_mapping *node; + + status = os_alloc_mem((void**)&node, sizeof(*node)); + if (status != NV_OK) + { + return status; + } + + down(&mem_info->dma_mapping_list.lock); + + node->dma_mapping = dma_mapping; + list_add_tail(&node->list_node, &mem_info->dma_mapping_list.list_head); + + up(&mem_info->dma_mapping_list.lock); + + return NV_OK; +} + +static struct nvidia_p2p_dma_mapping* nv_p2p_remove_dma_mapping( + struct nv_p2p_mem_info *mem_info, + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + struct nv_p2p_dma_mapping *cur; + struct nvidia_p2p_dma_mapping *ret_dma_mapping = NULL; + + down(&mem_info->dma_mapping_list.lock); + + list_for_each_entry(cur, &mem_info->dma_mapping_list.list_head, list_node) + { + if (dma_mapping == NULL || dma_mapping == cur->dma_mapping) + { + ret_dma_mapping = cur->dma_mapping; + list_del(&cur->list_node); + os_free_mem(cur); + break; + } + } + + up(&mem_info->dma_mapping_list.lock); + + return ret_dma_mapping; +} + +static void nv_p2p_free_dma_mapping( + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + nv_dma_device_t peer_dma_dev = {{ 0 }}; + NvU32 page_size; + NV_STATUS status; + NvU32 i; + + peer_dma_dev.dev = &dma_mapping->pci_dev->dev; + peer_dma_dev.addressable_range.limit = dma_mapping->pci_dev->dma_mask; + + page_size = nvidia_p2p_page_size_mappings[dma_mapping->page_size_type]; + + if (dma_mapping->private != NULL) + { + WARN_ON(page_size != PAGE_SIZE); + + status = nv_dma_unmap_alloc(&peer_dma_dev, + dma_mapping->entries, + dma_mapping->dma_addresses, + &dma_mapping->private); + WARN_ON(status != NV_OK); + } + else + { + for (i = 0; i < dma_mapping->entries; i++) + { + nv_dma_unmap_peer(&peer_dma_dev, page_size / PAGE_SIZE, + dma_mapping->dma_addresses[i]); + } + } + + os_free_mem(dma_mapping->dma_addresses); + + os_free_mem(dma_mapping); +} + +static void nv_p2p_free_page_table( + struct nvidia_p2p_page_table *page_table +) +{ + NvU32 i; + struct nvidia_p2p_dma_mapping *dma_mapping; + struct nv_p2p_mem_info *mem_info = NULL; + + mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table); + + dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL); + while (dma_mapping != NULL) + { + nv_p2p_free_dma_mapping(dma_mapping); + + dma_mapping = nv_p2p_remove_dma_mapping(mem_info, NULL); + } + + for (i = 0; i < page_table->entries; i++) + { + NV_KMEM_CACHE_FREE(page_table->pages[i], nvidia_p2p_page_t_cache); + } + + if (page_table->gpu_uuid != NULL) + { + os_free_mem(page_table->gpu_uuid); + } + + if (page_table->pages != NULL) + { + os_free_mem(page_table->pages); + } + + os_free_mem(mem_info); +} + +static NV_STATUS nv_p2p_put_pages( + nvidia_stack_t * sp, + uint64_t p2p_token, + uint32_t va_space, + uint64_t virtual_address, + struct nvidia_p2p_page_table **page_table +) +{ + NV_STATUS status; + struct nv_p2p_mem_info *mem_info = NULL; + + mem_info = container_of(*page_table, nv_p2p_mem_info_t, page_table); + + /* + * rm_p2p_put_pages returns NV_OK if the page_table was found and + * got unlinked from the RM's tracker (atomically). This ensures that + * RM's tear-down path does not race with this path. + * + * rm_p2p_put_pages returns NV_ERR_OBJECT_NOT_FOUND if the page_table + * was already unlinked. + */ + if (mem_info->bPersistent) + { + status = rm_p2p_put_pages_persistent(sp, mem_info->private, *page_table); + } + else + { + status = rm_p2p_put_pages(sp, p2p_token, va_space, + virtual_address, *page_table); + } + + if (status == NV_OK) + { + nv_p2p_free_page_table(*page_table); + *page_table = NULL; + } + else if (!mem_info->bPersistent && (status == NV_ERR_OBJECT_NOT_FOUND)) + { + status = NV_OK; + *page_table = NULL; + } + else + { + WARN_ON(status != NV_OK); + } + + return status; +} + +void NV_API_CALL nv_p2p_free_platform_data( + void *data +) +{ + if (data == NULL) + { + WARN_ON(data == NULL); + return; + } + + nv_p2p_free_page_table((struct nvidia_p2p_page_table*)data); +} + +int nvidia_p2p_init_mapping( + uint64_t p2p_token, + struct nvidia_p2p_params *params, + void (*destroy_callback)(void *data), + void *data +) +{ + return -ENOTSUPP; +} + +EXPORT_SYMBOL(nvidia_p2p_init_mapping); + +int nvidia_p2p_destroy_mapping(uint64_t p2p_token) +{ + return -ENOTSUPP; +} + +EXPORT_SYMBOL(nvidia_p2p_destroy_mapping); + +static void nv_p2p_mem_info_free_callback(void *data) +{ + nv_p2p_mem_info_t *mem_info = (nv_p2p_mem_info_t*) data; + + mem_info->free_callback(mem_info->data); + + nv_p2p_free_platform_data(&mem_info->page_table); +} + +int nvidia_p2p_get_pages( + uint64_t p2p_token, + uint32_t va_space, + uint64_t virtual_address, + uint64_t length, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void * data), + void *data +) +{ + NV_STATUS status; + nvidia_stack_t *sp = NULL; + struct nvidia_p2p_page *page; + struct nv_p2p_mem_info *mem_info = NULL; + NvU32 entries; + NvU32 *wreqmb_h = NULL; + NvU32 *rreqmb_h = NULL; + NvU64 *physical_addresses = NULL; + NvU32 page_count; + NvU32 i = 0; + NvBool bGetPages = NV_FALSE; + NvBool bGetUuid = NV_FALSE; + NvU32 page_size = NVRM_P2P_PAGESIZE_BIG_64K; + NvU32 page_size_index; + NvU64 temp_length; + NvU8 *gpu_uuid = NULL; + NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0}; + int rc; + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + *page_table = NULL; + status = os_alloc_mem((void **)&mem_info, sizeof(*mem_info)); + if (status != NV_OK) + { + goto failed; + } + + memset(mem_info, 0, sizeof(*mem_info)); + + INIT_LIST_HEAD(&mem_info->dma_mapping_list.list_head); + NV_INIT_MUTEX(&mem_info->dma_mapping_list.lock); + + *page_table = &(mem_info->page_table); + + mem_info->bPersistent = (free_callback == NULL); + + //asign length to temporary variable since do_div macro does in-place division + temp_length = length; + do_div(temp_length, page_size); + page_count = temp_length; + + if (length & (page_size - 1)) + { + page_count++; + } + + status = os_alloc_mem((void **)&physical_addresses, + (page_count * sizeof(NvU64))); + if (status != NV_OK) + { + goto failed; + } + status = os_alloc_mem((void **)&wreqmb_h, (page_count * sizeof(NvU32))); + if (status != NV_OK) + { + goto failed; + } + status = os_alloc_mem((void **)&rreqmb_h, (page_count * sizeof(NvU32))); + if (status != NV_OK) + { + goto failed; + } + + if (mem_info->bPersistent) + { + void *gpu_info = NULL; + + if ((p2p_token != 0) || (va_space != 0)) + { + status = -ENOTSUPP; + goto failed; + } + + status = rm_p2p_get_gpu_info(sp, virtual_address, length, &gpu_uuid, &gpu_info); + if (status != NV_OK) + { + goto failed; + } + + rc = nvidia_dev_get_uuid(gpu_uuid, sp); + if (rc != 0) + { + status = NV_ERR_GPU_UUID_NOT_FOUND; + goto failed; + } + + os_mem_copy(uuid, gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN); + + bGetUuid = NV_TRUE; + + status = rm_p2p_get_pages_persistent(sp, virtual_address, length, &mem_info->private, + physical_addresses, &entries, *page_table, gpu_info); + if (status != NV_OK) + { + goto failed; + } + } + else + { + // Get regular old-style, non-persistent mappings + status = rm_p2p_get_pages(sp, p2p_token, va_space, + virtual_address, length, physical_addresses, wreqmb_h, + rreqmb_h, &entries, &gpu_uuid, *page_table); + if (status != NV_OK) + { + goto failed; + } + } + + bGetPages = NV_TRUE; + (*page_table)->gpu_uuid = gpu_uuid; + + status = os_alloc_mem((void *)&(*page_table)->pages, + (entries * sizeof(page))); + if (status != NV_OK) + { + goto failed; + } + + (*page_table)->version = NVIDIA_P2P_PAGE_TABLE_VERSION; + + for (i = 0; i < entries; i++) + { + page = NV_KMEM_CACHE_ALLOC(nvidia_p2p_page_t_cache); + if (page == NULL) + { + status = NV_ERR_NO_MEMORY; + goto failed; + } + + memset(page, 0, sizeof(*page)); + + page->physical_address = physical_addresses[i]; + page->registers.fermi.wreqmb_h = wreqmb_h[i]; + page->registers.fermi.rreqmb_h = rreqmb_h[i]; + + (*page_table)->pages[i] = page; + (*page_table)->entries++; + } + + status = nvidia_p2p_map_page_size(page_size, &page_size_index); + if (status != NV_OK) + { + goto failed; + } + + (*page_table)->page_size = page_size_index; + + os_free_mem(physical_addresses); + os_free_mem(wreqmb_h); + os_free_mem(rreqmb_h); + + if (free_callback != NULL) + { + mem_info->free_callback = free_callback; + mem_info->data = data; + + status = rm_p2p_register_callback(sp, p2p_token, virtual_address, length, + *page_table, nv_p2p_mem_info_free_callback, mem_info); + if (status != NV_OK) + { + goto failed; + } + } + + nv_kmem_cache_free_stack(sp); + + return nvidia_p2p_map_status(status); + +failed: + if (physical_addresses != NULL) + { + os_free_mem(physical_addresses); + } + if (wreqmb_h != NULL) + { + os_free_mem(wreqmb_h); + } + if (rreqmb_h != NULL) + { + os_free_mem(rreqmb_h); + } + + if (bGetPages) + { + (void)nv_p2p_put_pages(sp, p2p_token, va_space, + virtual_address, page_table); + } + + if (bGetUuid) + { + nvidia_dev_put_uuid(uuid, sp); + } + + if (*page_table != NULL) + { + nv_p2p_free_page_table(*page_table); + } + + nv_kmem_cache_free_stack(sp); + + return nvidia_p2p_map_status(status); +} + +EXPORT_SYMBOL(nvidia_p2p_get_pages); + +/* + * This function is a no-op, but is left in place (for now), in order to allow + * third-party callers to build and run without errors or warnings. This is OK, + * because the missing functionality is provided by nv_p2p_free_platform_data, + * which is being called as part of the RM's cleanup path. + */ +int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table) +{ + return 0; +} + +EXPORT_SYMBOL(nvidia_p2p_free_page_table); + +int nvidia_p2p_put_pages( + uint64_t p2p_token, + uint32_t va_space, + uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table +) +{ + struct nv_p2p_mem_info *mem_info = NULL; + NvU8 uuid[NVIDIA_P2P_GPU_UUID_LEN] = {0}; + NV_STATUS status; + nvidia_stack_t *sp = NULL; + int rc = 0; + + os_mem_copy(uuid, page_table->gpu_uuid, NVIDIA_P2P_GPU_UUID_LEN); + + mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table); + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return -ENOMEM; + } + + status = nv_p2p_put_pages(sp, p2p_token, va_space, + virtual_address, &page_table); + + if (mem_info->bPersistent) + { + nvidia_dev_put_uuid(uuid, sp); + } + + nv_kmem_cache_free_stack(sp); + + return nvidia_p2p_map_status(status); +} + +EXPORT_SYMBOL(nvidia_p2p_put_pages); + +int nvidia_p2p_dma_map_pages( + struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping **dma_mapping +) +{ + NV_STATUS status; + nv_dma_device_t peer_dma_dev = {{ 0 }}; + nvidia_stack_t *sp = NULL; + NvU64 *dma_addresses = NULL; + NvU32 page_count; + NvU32 page_size; + enum nvidia_p2p_page_size_type page_size_type; + struct nv_p2p_mem_info *mem_info = NULL; + NvU32 i; + void *priv; + int rc; + + if (peer == NULL || page_table == NULL || dma_mapping == NULL || + page_table->gpu_uuid == NULL) + { + return -EINVAL; + } + + mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table); + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + return rc; + } + + *dma_mapping = NULL; + status = os_alloc_mem((void **)dma_mapping, sizeof(**dma_mapping)); + if (status != NV_OK) + { + goto failed; + } + memset(*dma_mapping, 0, sizeof(**dma_mapping)); + + page_count = page_table->entries; + + status = os_alloc_mem((void **)&dma_addresses, + (page_count * sizeof(NvU64))); + if (status != NV_OK) + { + goto failed; + } + + page_size_type = page_table->page_size; + + BUG_ON((page_size_type <= NVIDIA_P2P_PAGE_SIZE_4KB) || + (page_size_type >= NVIDIA_P2P_PAGE_SIZE_COUNT)); + + peer_dma_dev.dev = &peer->dev; + peer_dma_dev.addressable_range.limit = peer->dma_mask; + + page_size = nvidia_p2p_page_size_mappings[page_size_type]; + + for (i = 0; i < page_count; i++) + { + dma_addresses[i] = page_table->pages[i]->physical_address; + } + + status = rm_p2p_dma_map_pages(sp, &peer_dma_dev, + page_table->gpu_uuid, page_size, page_count, dma_addresses, &priv); + if (status != NV_OK) + { + goto failed; + } + + (*dma_mapping)->version = NVIDIA_P2P_DMA_MAPPING_VERSION; + (*dma_mapping)->page_size_type = page_size_type; + (*dma_mapping)->entries = page_count; + (*dma_mapping)->dma_addresses = dma_addresses; + (*dma_mapping)->private = priv; + (*dma_mapping)->pci_dev = peer; + + /* + * All success, it is safe to insert dma_mapping now. + */ + status = nv_p2p_insert_dma_mapping(mem_info, *dma_mapping); + if (status != NV_OK) + { + goto failed_insert; + } + + nv_kmem_cache_free_stack(sp); + + return 0; + +failed_insert: + nv_p2p_free_dma_mapping(*dma_mapping); + dma_addresses = NULL; + *dma_mapping = NULL; + +failed: + if (dma_addresses != NULL) + { + os_free_mem(dma_addresses); + } + + if (*dma_mapping != NULL) + { + os_free_mem(*dma_mapping); + *dma_mapping = NULL; + } + + nv_kmem_cache_free_stack(sp); + + return nvidia_p2p_map_status(status); +} + +EXPORT_SYMBOL(nvidia_p2p_dma_map_pages); + +int nvidia_p2p_dma_unmap_pages( + struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + struct nv_p2p_mem_info *mem_info = NULL; + + if (peer == NULL || dma_mapping == NULL || page_table == NULL) + { + return -EINVAL; + } + + mem_info = container_of(page_table, nv_p2p_mem_info_t, page_table); + + /* + * nv_p2p_remove_dma_mapping returns dma_mapping if the dma_mapping was + * found and got unlinked from the mem_info->dma_mapping_list (atomically). + * This ensures that the RM's tear-down path does not race with this path. + * + * nv_p2p_remove_dma_mappings returns NULL if the dma_mapping was already + * unlinked. + */ + if (nv_p2p_remove_dma_mapping(mem_info, dma_mapping) == NULL) + { + return 0; + } + + WARN_ON(peer != dma_mapping->pci_dev); + + BUG_ON((dma_mapping->page_size_type <= NVIDIA_P2P_PAGE_SIZE_4KB) || + (dma_mapping->page_size_type >= NVIDIA_P2P_PAGE_SIZE_COUNT)); + + nv_p2p_free_dma_mapping(dma_mapping); + + return 0; +} + +EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages); + +/* + * This function is a no-op, but is left in place (for now), in order to allow + * third-party callers to build and run without errors or warnings. This is OK, + * because the missing functionality is provided by nv_p2p_free_platform_data, + * which is being called as part of the RM's cleanup path. + */ +int nvidia_p2p_free_dma_mapping( + struct nvidia_p2p_dma_mapping *dma_mapping +) +{ + return 0; +} + +EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping); + +int nvidia_p2p_register_rsync_driver( + nvidia_p2p_rsync_driver_t *driver, + void *data +) +{ + if (driver == NULL) + { + return -EINVAL; + } + + if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver)) + { + return -EINVAL; + } + + if (driver->get_relaxed_ordering_mode == NULL || + driver->put_relaxed_ordering_mode == NULL || + driver->wait_for_rsync == NULL) + { + return -EINVAL; + } + + return nv_register_rsync_driver(driver->get_relaxed_ordering_mode, + driver->put_relaxed_ordering_mode, + driver->wait_for_rsync, data); +} + +EXPORT_SYMBOL(nvidia_p2p_register_rsync_driver); + +void nvidia_p2p_unregister_rsync_driver( + nvidia_p2p_rsync_driver_t *driver, + void *data +) +{ + if (driver == NULL) + { + WARN_ON(1); + return; + } + + if (!NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(driver)) + { + WARN_ON(1); + return; + } + + if (driver->get_relaxed_ordering_mode == NULL || + driver->put_relaxed_ordering_mode == NULL || + driver->wait_for_rsync == NULL) + { + WARN_ON(1); + return; + } + + nv_unregister_rsync_driver(driver->get_relaxed_ordering_mode, + driver->put_relaxed_ordering_mode, + driver->wait_for_rsync, data); +} + +EXPORT_SYMBOL(nvidia_p2p_unregister_rsync_driver); + +int nvidia_p2p_get_rsync_registers( + nvidia_p2p_rsync_reg_info_t **reg_info +) +{ + nv_linux_state_t *nvl; + nv_state_t *nv; + NV_STATUS status; + void *ptr = NULL; + NvU64 addr; + NvU64 size; + struct pci_dev *ibmnpu = NULL; + NvU32 index = 0; + NvU32 count = 0; + nvidia_p2p_rsync_reg_info_t *info = NULL; + nvidia_p2p_rsync_reg_t *regs = NULL; + + if (reg_info == NULL) + { + return -EINVAL; + } + + status = os_alloc_mem((void**)&info, sizeof(*info)); + if (status != NV_OK) + { + return -ENOMEM; + } + + memset(info, 0, sizeof(*info)); + + info->version = NVIDIA_P2P_RSYNC_REG_INFO_VERSION; + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl; nvl = nvl->next) + { + count++; + } + + status = os_alloc_mem((void**)®s, (count * sizeof(*regs))); + if (status != NV_OK) + { + nvidia_p2p_put_rsync_registers(info); + UNLOCK_NV_LINUX_DEVICES(); + return -ENOMEM; + } + + for (nvl = nv_linux_devices; nvl; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + + addr = 0; + size = 0; + + status = nv_get_ibmnpu_genreg_info(nv, &addr, &size, (void**)&ibmnpu); + if (status != NV_OK) + { + continue; + } + + ptr = nv_ioremap_nocache(addr, size); + if (ptr == NULL) + { + continue; + } + + regs[index].ptr = ptr; + regs[index].size = size; + regs[index].gpu = nvl->pci_dev; + regs[index].ibmnpu = ibmnpu; + regs[index].cluster_id = 0; + regs[index].socket_id = nv_get_ibmnpu_chip_id(nv); + + index++; + } + + UNLOCK_NV_LINUX_DEVICES(); + + info->regs = regs; + info->entries = index; + + if (info->entries == 0) + { + nvidia_p2p_put_rsync_registers(info); + return -ENODEV; + } + + *reg_info = info; + + return 0; +} + +EXPORT_SYMBOL(nvidia_p2p_get_rsync_registers); + +void nvidia_p2p_put_rsync_registers( + nvidia_p2p_rsync_reg_info_t *reg_info +) +{ + NvU32 i; + nvidia_p2p_rsync_reg_t *regs = NULL; + + if (reg_info == NULL) + { + return; + } + + if (reg_info->regs) + { + for (i = 0; i < reg_info->entries; i++) + { + regs = ®_info->regs[i]; + + if (regs->ptr) + { + nv_iounmap(regs->ptr, regs->size); + } + } + + os_free_mem(reg_info->regs); + } + + os_free_mem(reg_info); +} + +EXPORT_SYMBOL(nvidia_p2p_put_rsync_registers); diff --git a/kernel-open/nvidia/nv-p2p.h b/kernel-open/nvidia/nv-p2p.h new file mode 100644 index 000000000..c2059145d --- /dev/null +++ b/kernel-open/nvidia/nv-p2p.h @@ -0,0 +1,427 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_P2P_H_ +#define _NV_P2P_H_ + +/* + * NVIDIA P2P Structure Versioning + * + * For the nvidia_p2p_*_t structures allocated by the NVIDIA driver, it will + * set the version field of the structure according to the definition used by + * the NVIDIA driver. The "major" field of the version is defined as the upper + * 16 bits, and the "minor" field of the version is defined as the lower 16 + * bits. The version field will always be the first 4 bytes of the structure, + * and third-party drivers should check the value of this field in structures + * allocated by the NVIDIA driver to ensure runtime compatibility. + * + * In general, version numbers will be incremented as follows: + * - When a backwards-compatible change is made to the structure layout, the + * minor version for that structure will be incremented. Third-party drivers + * built against an older minor version will continue to work with the newer + * minor version used by the NVIDIA driver, without recompilation. + * - When a breaking change is made to the structure layout, the major version + * will be incremented. Third-party drivers built against an older major + * version require at least recompilation and potentially additional updates + * to use the new API. + */ +#define NVIDIA_P2P_MAJOR_VERSION_MASK 0xffff0000 +#define NVIDIA_P2P_MINOR_VERSION_MASK 0x0000ffff + +#define NVIDIA_P2P_MAJOR_VERSION(v) \ + (((v) & NVIDIA_P2P_MAJOR_VERSION_MASK) >> 16) + +#define NVIDIA_P2P_MINOR_VERSION(v) \ + (((v) & NVIDIA_P2P_MINOR_VERSION_MASK)) + +#define NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) \ + (NVIDIA_P2P_MAJOR_VERSION((p)->version) == NVIDIA_P2P_MAJOR_VERSION(v)) + +#define NVIDIA_P2P_VERSION_COMPATIBLE(p, v) \ + (NVIDIA_P2P_MAJOR_VERSION_MATCHES(p, v) && \ + (NVIDIA_P2P_MINOR_VERSION((p)->version) >= (NVIDIA_P2P_MINOR_VERSION(v)))) + +enum { + NVIDIA_P2P_ARCHITECTURE_TESLA = 0, + NVIDIA_P2P_ARCHITECTURE_FERMI, + NVIDIA_P2P_ARCHITECTURE_CURRENT = NVIDIA_P2P_ARCHITECTURE_FERMI +}; + +#define NVIDIA_P2P_PARAMS_VERSION 0x00010001 + +enum { + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_GPU = 0, + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE, + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX = \ + NVIDIA_P2P_PARAMS_ADDRESS_INDEX_THIRD_PARTY_DEVICE +}; + +#define NVIDIA_P2P_GPU_UUID_LEN 16 + +typedef +struct nvidia_p2p_params { + uint32_t version; + uint32_t architecture; + union nvidia_p2p_mailbox_addresses { + struct { + uint64_t wmb_addr; + uint64_t wmb_data; + uint64_t rreq_addr; + uint64_t rcomp_addr; + uint64_t reserved[2]; + } fermi; + } addresses[NVIDIA_P2P_PARAMS_ADDRESS_INDEX_MAX+1]; +} nvidia_p2p_params_t; + +/* + * Capability flag for users to detect + * driver support for persistent pages. + */ +extern int nvidia_p2p_cap_persistent_pages; +#define NVIDIA_P2P_CAP_PERSISTENT_PAGES + +/* + * This API is not supported. + */ +int nvidia_p2p_init_mapping(uint64_t p2p_token, + struct nvidia_p2p_params *params, + void (*destroy_callback)(void *data), + void *data); + +/* + * This API is not supported. + */ +int nvidia_p2p_destroy_mapping(uint64_t p2p_token); + +enum nvidia_p2p_page_size_type { + NVIDIA_P2P_PAGE_SIZE_4KB = 0, + NVIDIA_P2P_PAGE_SIZE_64KB, + NVIDIA_P2P_PAGE_SIZE_128KB, + NVIDIA_P2P_PAGE_SIZE_COUNT +}; + +typedef +struct nvidia_p2p_page { + uint64_t physical_address; + union nvidia_p2p_request_registers { + struct { + uint32_t wreqmb_h; + uint32_t rreqmb_h; + uint32_t rreqmb_0; + uint32_t reserved[3]; + } fermi; + } registers; +} nvidia_p2p_page_t; + +#define NVIDIA_P2P_PAGE_TABLE_VERSION 0x00010002 + +#define NVIDIA_P2P_PAGE_TABLE_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_PAGE_TABLE_VERSION) + +typedef +struct nvidia_p2p_page_table { + uint32_t version; + uint32_t page_size; /* enum nvidia_p2p_page_size_type */ + struct nvidia_p2p_page **pages; + uint32_t entries; + uint8_t *gpu_uuid; +} nvidia_p2p_page_table_t; + +/* + * @brief + * Make the pages underlying a range of GPU virtual memory + * accessible to a third-party device. + * + * This API only supports pinned, GPU-resident memory, such as that provided + * by cudaMalloc(). + * + * This API may sleep. + * + * @param[in] p2p_token + * A token that uniquely identifies the P2P mapping. + * @param[in] va_space + * A GPU virtual address space qualifier. + * @param[in] virtual_address + * The start address in the specified virtual address space. + * Address must be aligned to the 64KB boundary. + * @param[in] length + * The length of the requested P2P mapping. + * Length must be a multiple of 64KB. + * @param[out] page_table + * A pointer to an array of structures with P2P PTEs. + * @param[in] free_callback + * A pointer to the function to be invoked when the pages + * underlying the virtual address range are freed + * implicitly. + * If NULL, persistent pages will be returned. + * This means the pages underlying the range of GPU virtual memory + * will persist until explicitly freed by nvidia_p2p_put_pages(). + * Persistent GPU memory mappings are not supported on PowerPC, + + + + * MIG-enabled devices and vGPU. + + * @param[in] data + * A non-NULL opaque pointer to private data to be passed to the + * callback function. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_get_pages(uint64_t p2p_token, uint32_t va_space, + uint64_t virtual_address, + uint64_t length, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void *data), + void *data); + +#define NVIDIA_P2P_DMA_MAPPING_VERSION 0x00020003 + +#define NVIDIA_P2P_DMA_MAPPING_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_DMA_MAPPING_VERSION) + +struct pci_dev; + +typedef +struct nvidia_p2p_dma_mapping { + uint32_t version; + enum nvidia_p2p_page_size_type page_size_type; + uint32_t entries; + uint64_t *dma_addresses; + void *private; + struct pci_dev *pci_dev; +} nvidia_p2p_dma_mapping_t; + +/* + * @brief + * Make the physical pages retrieved using nvidia_p2p_get_pages accessible to + * a third-party device. + * + * @param[in] peer + * The struct pci_dev * of the peer device that needs to DMA to/from the + * mapping. + * @param[in] page_table + * The page table outlining the physical pages underlying the mapping, as + * retrieved with nvidia_p2p_get_pages(). + * @param[out] dma_mapping + * The DMA mapping containing the DMA addresses to use on the third-party + * device. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -ENOTSUPP if the requested operation is not supported. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_dma_map_pages(struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping **dma_mapping); + +/* + * @brief + * Unmap the physical pages previously mapped to the third-party device by + * nvidia_p2p_dma_map_pages(). + * + * @param[in] peer + * The struct pci_dev * of the peer device that the DMA mapping belongs to. + * @param[in] page_table + * The page table backing the DMA mapping to be unmapped. + * @param[in] dma_mapping + * The DMA mapping containing the DMA addresses used by the third-party + * device, as retrieved with nvidia_p2p_dma_map_pages(). After this call + * returns, neither this struct nor the addresses contained within will be + * valid for use by the third-party device. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_dma_unmap_pages(struct pci_dev *peer, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping *dma_mapping); + +/* + * @brief + * Release a set of pages previously made accessible to + * a third-party device. + * + * @param[in] p2p_token + * A token that uniquely identifies the P2P mapping. + * @param[in] va_space + * A GPU virtual address space qualifier. + * @param[in] virtual_address + * The start address in the specified virtual address space. + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + * -EIO if an unknown error occurred. + */ +int nvidia_p2p_put_pages(uint64_t p2p_token, uint32_t va_space, + uint64_t virtual_address, + struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Free a third-party P2P page table. (This function is a no-op.) + * + * @param[in] page_table + * A pointer to the array of structures with P2P PTEs. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + */ +int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Free a third-party P2P DMA mapping. (This function is a no-op.) + * + * @param[in] dma_mapping + * A pointer to the DMA mapping structure. + * + * @return + * 0 upon successful completion. + * -EINVAL if an invalid argument was supplied. + */ +int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping); + +#define NVIDIA_P2P_RSYNC_DRIVER_VERSION 0x00010001 + +#define NVIDIA_P2P_RSYNC_DRIVER_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_DRIVER_VERSION) + +typedef +struct nvidia_p2p_rsync_driver { + uint32_t version; + int (*get_relaxed_ordering_mode)(int *mode, void *data); + void (*put_relaxed_ordering_mode)(int mode, void *data); + void (*wait_for_rsync)(struct pci_dev *gpu, void *data); +} nvidia_p2p_rsync_driver_t; + +/* + * @brief + * Registers the rsync driver. + * + * @param[in] driver + * A pointer to the rsync driver structure. The NVIDIA driver would use, + * + * get_relaxed_ordering_mode to obtain a reference to the current relaxed + * ordering mode (treated as a boolean) from the rsync driver. + * + * put_relaxed_ordering_mode to release a reference to the current relaxed + * ordering mode back to the rsync driver. The NVIDIA driver will call this + * function once for each successful call to get_relaxed_ordering_mode, and + * the relaxed ordering mode must not change until the last reference is + * released. + * + * wait_for_rsync to call into the rsync module to issue RSYNC. This callback + * can't sleep or re-schedule as it may arrive under spinlocks. + * @param[in] data + * A pointer to the rsync driver's private data. + * + * @Returns + * 0 upon successful completion. + * -EINVAL parameters are incorrect. + * -EBUSY if a module is already registered or GPU devices are in use. + */ +int nvidia_p2p_register_rsync_driver(nvidia_p2p_rsync_driver_t *driver, + void *data); + +/* + * @brief + * Unregisters the rsync driver. + * + * @param[in] driver + * A pointer to the rsync driver structure. + * @param[in] data + * A pointer to the rsync driver's private data. + */ +void nvidia_p2p_unregister_rsync_driver(nvidia_p2p_rsync_driver_t *driver, + void *data); + +#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION 0x00020001 + +#define NVIDIA_P2P_RSYNC_REG_INFO_VERSION_COMPATIBLE(p) \ + NVIDIA_P2P_VERSION_COMPATIBLE(p, NVIDIA_P2P_RSYNC_REG_INFO_VERSION) + +typedef struct nvidia_p2p_rsync_reg { + void *ptr; + size_t size; + struct pci_dev *ibmnpu; + struct pci_dev *gpu; + uint32_t cluster_id; + uint32_t socket_id; +} nvidia_p2p_rsync_reg_t; + +typedef struct nvidia_p2p_rsync_reg_info { + uint32_t version; + nvidia_p2p_rsync_reg_t *regs; + size_t entries; +} nvidia_p2p_rsync_reg_info_t; + +/* + * @brief + * Gets rsync (GEN-ID) register information associated with the supported + * NPUs. + * + * The caller would use the returned information {GPU device, NPU device, + * socket-id, cluster-id} to pick the optimal generation registers to issue + * RSYNC (NVLink HW flush). + * + * The interface allocates structures to return the information, hence + * nvidia_p2p_put_rsync_registers() must be called to free the structures. + * + * Note, cluster-id is hardcoded to zero as early system configurations would + * only support cluster mode i.e. all devices would share the same cluster-id + * (0). In the future, appropriate kernel support would be needed to query + * cluster-ids. + * + * @param[out] reg_info + * A pointer to the rsync reg info structure. + * + * @Returns + * 0 Upon successful completion. Otherwise, returns negative value. + */ +int nvidia_p2p_get_rsync_registers(nvidia_p2p_rsync_reg_info_t **reg_info); + +/* + * @brief + * Frees the structures allocated by nvidia_p2p_get_rsync_registers(). + * + * @param[in] reg_info + * A pointer to the rsync reg info structure. + */ +void nvidia_p2p_put_rsync_registers(nvidia_p2p_rsync_reg_info_t *reg_info); + +#endif /* _NV_P2P_H_ */ diff --git a/kernel-open/nvidia/nv-pat.c b/kernel-open/nvidia/nv-pat.c new file mode 100644 index 000000000..1fa530d9c --- /dev/null +++ b/kernel-open/nvidia/nv-pat.c @@ -0,0 +1,478 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" +#include "nv-pat.h" + +int nv_pat_mode = NV_PAT_MODE_DISABLED; + +#if defined(NV_ENABLE_PAT_SUPPORT) +/* + * Private PAT support for use by the NVIDIA driver. This is used on + * kernels that do not modify the PAT to include a write-combining + * entry. + * + * On kernels that have CONFIG_X86_PAT, the NVIDIA driver still checks that the + * WC entry is as expected before using PAT. + */ + +#if defined(CONFIG_X86_PAT) +#define NV_ENABLE_BUILTIN_PAT_SUPPORT 0 +#else +#define NV_ENABLE_BUILTIN_PAT_SUPPORT 1 +#endif + + +#define NV_READ_PAT_ENTRIES(pat1, pat2) rdmsr(0x277, (pat1), (pat2)) +#define NV_WRITE_PAT_ENTRIES(pat1, pat2) wrmsr(0x277, (pat1), (pat2)) +#define NV_PAT_ENTRY(pat, index) \ + (((pat) & (0xff << ((index)*8))) >> ((index)*8)) + +#if NV_ENABLE_BUILTIN_PAT_SUPPORT + +static unsigned long orig_pat1, orig_pat2; + +static inline void nv_disable_caches(unsigned long *cr4) +{ + unsigned long cr0 = read_cr0(); + write_cr0(((cr0 & (0xdfffffff)) | 0x40000000)); + wbinvd(); + *cr4 = NV_READ_CR4(); + if (*cr4 & 0x80) NV_WRITE_CR4(*cr4 & ~0x80); + __flush_tlb(); +} + +static inline void nv_enable_caches(unsigned long cr4) +{ + unsigned long cr0 = read_cr0(); + wbinvd(); + __flush_tlb(); + write_cr0((cr0 & 0x9fffffff)); + if (cr4 & 0x80) NV_WRITE_CR4(cr4); +} + +static void nv_setup_pat_entries(void *info) +{ + unsigned long pat1, pat2, cr4; + unsigned long eflags; + +#if defined(NV_ENABLE_HOTPLUG_CPU) + int cpu = (NvUPtr)info; + if ((cpu != 0) && (cpu != (int)smp_processor_id())) + return; +#endif + + NV_SAVE_FLAGS(eflags); + NV_CLI(); + nv_disable_caches(&cr4); + + NV_READ_PAT_ENTRIES(pat1, pat2); + + pat1 &= 0xffff00ff; + pat1 |= 0x00000100; + + NV_WRITE_PAT_ENTRIES(pat1, pat2); + + nv_enable_caches(cr4); + NV_RESTORE_FLAGS(eflags); +} + +static void nv_restore_pat_entries(void *info) +{ + unsigned long cr4; + unsigned long eflags; + +#if defined(NV_ENABLE_HOTPLUG_CPU) + int cpu = (NvUPtr)info; + if ((cpu != 0) && (cpu != (int)smp_processor_id())) + return; +#endif + + NV_SAVE_FLAGS(eflags); + NV_CLI(); + nv_disable_caches(&cr4); + + NV_WRITE_PAT_ENTRIES(orig_pat1, orig_pat2); + + nv_enable_caches(cr4); + NV_RESTORE_FLAGS(eflags); +} + +/* + * NOTE 1: + * Functions register_cpu_notifier(), unregister_cpu_notifier(), + * macros register_hotcpu_notifier, register_hotcpu_notifier, + * and CPU states CPU_DOWN_FAILED, CPU_DOWN_PREPARE + * were removed by the following commit: + * 2016 Dec 25: b272f732f888d4cf43c943a40c9aaa836f9b7431 + * + * NV_REGISTER_CPU_NOTIFIER_PRESENT is true when + * register_cpu_notifier() is present. + * + * The functions cpuhp_setup_state() and cpuhp_remove_state() should be + * used as an alternative to register_cpu_notifier() and + * unregister_cpu_notifier() functions. The following + * commit introduced these functions as well as the enum cpuhp_state. + * 2016 Feb 26: 5b7aa87e0482be768486e0c2277aa4122487eb9d + * + * NV_CPUHP_CPUHP_STATE_PRESENT is true when cpuhp_setup_state() is present. + * + * For kernels where both cpuhp_setup_state() and register_cpu_notifier() + * are present, we still use register_cpu_notifier(). + */ + +static int +nvidia_cpu_teardown(unsigned int cpu) +{ +#if defined(NV_ENABLE_HOTPLUG_CPU) + unsigned int this_cpu = get_cpu(); + + if (this_cpu == cpu) + nv_restore_pat_entries(NULL); + else + smp_call_function(nv_restore_pat_entries, &cpu, 1); + + put_cpu(); +#endif + return 0; +} + +static int +nvidia_cpu_online(unsigned int cpu) +{ +#if defined(NV_ENABLE_HOTPLUG_CPU) + unsigned int this_cpu = get_cpu(); + + if (this_cpu == cpu) + nv_setup_pat_entries(NULL); + else + smp_call_function(nv_setup_pat_entries, &cpu, 1); + + put_cpu(); +#endif + return 0; +} + +static int nv_enable_builtin_pat_support(void) +{ + unsigned long pat1, pat2; + + NV_READ_PAT_ENTRIES(orig_pat1, orig_pat2); + nv_printf(NV_DBG_SETUP, "saved orig pats as 0x%lx 0x%lx\n", orig_pat1, orig_pat2); + + on_each_cpu(nv_setup_pat_entries, NULL, 1); + + NV_READ_PAT_ENTRIES(pat1, pat2); + nv_printf(NV_DBG_SETUP, "changed pats to 0x%lx 0x%lx\n", pat1, pat2); + return 1; +} + +static void nv_disable_builtin_pat_support(void) +{ + unsigned long pat1, pat2; + + on_each_cpu(nv_restore_pat_entries, NULL, 1); + + nv_pat_mode = NV_PAT_MODE_DISABLED; + + NV_READ_PAT_ENTRIES(pat1, pat2); + nv_printf(NV_DBG_SETUP, "restored orig pats as 0x%lx 0x%lx\n", pat1, pat2); +} + +static int +nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) +{ +/* CPU_DOWN_FAILED was added by the following commit + * 2004 Oct 18: 71da3667be80d30121df3972caa0bf5684228379 + * + * CPU_DOWN_PREPARE was added by the following commit + * 2004 Oct 18: d13d28de21d913aacd3c91e76e307fa2eb7835d8 + * + * We use one ifdef for both macros since they were added on the same day. + */ +#if defined(CPU_DOWN_FAILED) + switch (action) + { + case CPU_DOWN_FAILED: + case CPU_ONLINE: + nvidia_cpu_online((NvUPtr)hcpu); + break; + case CPU_DOWN_PREPARE: + nvidia_cpu_teardown((NvUPtr)hcpu); + break; + } +#endif + return NOTIFY_OK; +} + +/* + * See NOTE 1. + * In order to avoid warnings for unused variable when compiling against + * kernel versions which include changes of commit id + * b272f732f888d4cf43c943a40c9aaa836f9b7431, we have to protect declaration + * of nv_hotcpu_nfb with #if. + * + * NV_REGISTER_CPU_NOTIFIER_PRESENT is checked before + * NV_CPUHP_SETUP_STATE_PRESENT to avoid compilation warnings for unused + * variable nvidia_pat_online for kernels where both + * NV_REGISTER_CPU_NOTIFIER_PRESENT and NV_CPUHP_SETUP_STATE_PRESENT + * are true. + */ +#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU) +static struct notifier_block nv_hotcpu_nfb = { + .notifier_call = nvidia_cpu_callback, + .priority = 0 +}; +#elif defined(NV_CPUHP_SETUP_STATE_PRESENT) +static enum cpuhp_state nvidia_pat_online; +#endif + +static int +nvidia_register_cpu_hotplug_notifier(void) +{ + int ret; +/* See NOTE 1 */ +#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU) + /* register_hotcpu_notiifer() returns 0 on success or -ENOENT on failure */ + ret = register_hotcpu_notifier(&nv_hotcpu_nfb); +#elif defined(NV_CPUHP_SETUP_STATE_PRESENT) + /* + * cpuhp_setup_state() returns positive number on success when state is + * CPUHP_AP_ONLINE_DYN. On failure, it returns a negative number. + */ + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "nvidia/pat:online", + nvidia_cpu_online, + nvidia_cpu_teardown); + if (ret < 0) + { + /* + * If cpuhp_setup_state() fails, the cpuhp_remove_state() + * should never be called. If it gets called, we might remove + * some other state. Hence, explicitly set + * nvidia_pat_online to zero. This will trigger a BUG() + * in cpuhp_remove_state(). + */ + nvidia_pat_online = 0; + } + else + { + nvidia_pat_online = ret; + } +#else + + /* + * This function should be a no-op for kernels which + * - do not have CONFIG_HOTPLUG_CPU enabled, + * - do not have PAT support, + * - do not have the cpuhp_setup_state() function. + * + * On such kernels, returning an error here would result in module init + * failure. Hence, return 0 here. + */ + if (nv_pat_mode == NV_PAT_MODE_BUILTIN) + { + ret = 0; + } + else + { + ret = -EIO; + } +#endif + + if (ret < 0) + { + nv_disable_pat_support(); + nv_printf(NV_DBG_ERRORS, + "NVRM: CPU hotplug notifier registration failed!\n"); + return -EIO; + } + return 0; +} + +static void +nvidia_unregister_cpu_hotplug_notifier(void) +{ +/* See NOTE 1 */ +#if defined(NV_REGISTER_CPU_NOTIFIER_PRESENT) && defined(CONFIG_HOTPLUG_CPU) + unregister_hotcpu_notifier(&nv_hotcpu_nfb); +#elif defined(NV_CPUHP_SETUP_STATE_PRESENT) + cpuhp_remove_state(nvidia_pat_online); +#endif + return; +} + + +#else /* NV_ENABLE_BUILTIN_PAT_SUPPORT */ + +static int nv_enable_builtin_pat_support(void) +{ + return 0; +} +static void nv_disable_builtin_pat_support(void) +{ +} +static int nvidia_register_cpu_hotplug_notifier(void) +{ + return -EIO; +} +static void nvidia_unregister_cpu_hotplug_notifier(void) +{ +} + +#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */ + +static int nv_determine_pat_mode(void) +{ + unsigned int pat1, pat2, i; + NvU8 PAT_WC_index; + + if (!test_bit(X86_FEATURE_PAT, + (volatile unsigned long *)&boot_cpu_data.x86_capability)) + { + if ((boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) || + (boot_cpu_data.cpuid_level < 1) || + ((cpuid_edx(1) & (1 << 16)) == 0) || + (boot_cpu_data.x86 != 6) || (boot_cpu_data.x86_model >= 15)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: CPU does not support the PAT.\n"); + return NV_PAT_MODE_DISABLED; + } + } + + NV_READ_PAT_ENTRIES(pat1, pat2); + PAT_WC_index = 0xf; + + for (i = 0; i < 4; i++) + { + if (NV_PAT_ENTRY(pat1, i) == 0x01) + { + PAT_WC_index = i; + break; + } + + if (NV_PAT_ENTRY(pat2, i) == 0x01) + { + PAT_WC_index = (i + 4); + break; + } + } + + if (PAT_WC_index == 1) + { + return NV_PAT_MODE_KERNEL; + } + else if (PAT_WC_index != 0xf) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: PAT configuration unsupported.\n"); + return NV_PAT_MODE_DISABLED; + } + else + { +#if NV_ENABLE_BUILTIN_PAT_SUPPORT + return NV_PAT_MODE_BUILTIN; +#else + return NV_PAT_MODE_DISABLED; +#endif /* NV_ENABLE_BUILTIN_PAT_SUPPORT */ + } +} + + +int nv_enable_pat_support(void) +{ + if (nv_pat_mode != NV_PAT_MODE_DISABLED) + return 1; + + nv_pat_mode = nv_determine_pat_mode(); + + switch (nv_pat_mode) + { + case NV_PAT_MODE_DISABLED: + /* avoid the PAT if unavailable/unusable */ + return 0; + case NV_PAT_MODE_KERNEL: + /* inherit the kernel's PAT layout */ + return 1; + case NV_PAT_MODE_BUILTIN: + /* use builtin code to modify the PAT layout */ + break; + } + + return nv_enable_builtin_pat_support(); +} + +void nv_disable_pat_support(void) +{ + if (nv_pat_mode != NV_PAT_MODE_BUILTIN) + return; + + nv_disable_builtin_pat_support(); +} + +int nv_init_pat_support(nvidia_stack_t *sp) +{ + NV_STATUS status; + NvU32 data; + int disable_pat = 0; + int ret = 0; + + status = rm_read_registry_dword(sp, NULL, + NV_USE_PAGE_ATTRIBUTE_TABLE, &data); + if ((status == NV_OK) && ((int)data != ~0)) + { + disable_pat = (data == 0); + } + + if (!disable_pat) + { + nv_enable_pat_support(); + if (nv_pat_mode == NV_PAT_MODE_BUILTIN) + { + ret = nvidia_register_cpu_hotplug_notifier(); + return ret; + } + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: builtin PAT support disabled.\n"); + } + + return 0; +} + +void nv_teardown_pat_support(void) +{ + if (nv_pat_mode == NV_PAT_MODE_BUILTIN) + { + nv_disable_pat_support(); + nvidia_unregister_cpu_hotplug_notifier(); + } +} +#endif /* defined(NV_ENABLE_PAT_SUPPORT) */ diff --git a/kernel-open/nvidia/nv-pat.h b/kernel-open/nvidia/nv-pat.h new file mode 100644 index 000000000..0d26a84e1 --- /dev/null +++ b/kernel-open/nvidia/nv-pat.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_PAT_H_ +#define _NV_PAT_H_ + +#include "nv-linux.h" + + +#if defined(NV_ENABLE_PAT_SUPPORT) +extern int nv_init_pat_support(nvidia_stack_t *sp); +extern void nv_teardown_pat_support(void); +extern int nv_enable_pat_support(void); +extern void nv_disable_pat_support(void); +#else +static inline int nv_init_pat_support(nvidia_stack_t *sp) +{ + (void)sp; + return 0; +} + +static inline void nv_teardown_pat_support(void) +{ + return; +} + +static inline int nv_enable_pat_support(void) +{ + return 1; +} + +static inline void nv_disable_pat_support(void) +{ + return; +} +#endif + +#endif /* _NV_PAT_H_ */ diff --git a/kernel-open/nvidia/nv-pci-table.c b/kernel-open/nvidia/nv-pci-table.c new file mode 100644 index 000000000..e3494735a --- /dev/null +++ b/kernel-open/nvidia/nv-pci-table.c @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "nv-pci-table.h" + +/* Devices supported by RM */ +struct pci_device_id nv_pci_table[] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { } +}; + +/* Devices supported by all drivers in nvidia.ko */ +struct pci_device_id nv_module_device_table[] = { + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_3D << 8), + .class_mask = ~0 + }, + { + .vendor = PCI_VENDOR_ID_NVIDIA, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_BRIDGE_OTHER << 8), + .class_mask = ~0 + }, + { } +}; + +MODULE_DEVICE_TABLE(pci, nv_module_device_table); diff --git a/kernel-open/nvidia/nv-pci-table.h b/kernel-open/nvidia/nv-pci-table.h new file mode 100644 index 000000000..b28483bb6 --- /dev/null +++ b/kernel-open/nvidia/nv-pci-table.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PCI_TABLE_H_ +#define _NV_PCI_TABLE_H_ + +#include + +extern struct pci_device_id nv_pci_table[]; + +#endif /* _NV_PCI_TABLE_H_ */ diff --git a/kernel-open/nvidia/nv-pci.c b/kernel-open/nvidia/nv-pci.c new file mode 100644 index 000000000..2e1401511 --- /dev/null +++ b/kernel-open/nvidia/nv-pci.c @@ -0,0 +1,1092 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-pci-table.h" +#include "nv-pci-types.h" +#include "nv-pci.h" +#include "nv-ibmnpu.h" +#include "nv-frontend.h" +#include "nv-msi.h" +#include "nv-hypervisor.h" + +#if defined(NV_VGPU_KVM_BUILD) +#include "nv-vgpu-vfio-interface.h" +#endif + +#if defined(NV_SEQ_READ_ITER_PRESENT) +#include +#include +#endif + +static void +nv_check_and_exclude_gpu( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + char *uuid_str; + + uuid_str = rm_get_gpu_uuid(sp, nv); + if (uuid_str == NULL) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Unable to read UUID"); + return; + } + + if (nv_is_uuid_in_gpu_exclusion_list(uuid_str)) + { + NV_STATUS rm_status = rm_exclude_adapter(sp, nv); + if (rm_status != NV_OK) + { + NV_DEV_PRINTF_STATUS(NV_DBG_ERRORS, nv, rm_status, + "Failed to exclude GPU %s", uuid_str); + goto done; + } + nv->flags |= NV_FLAG_EXCLUDE; + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Excluded GPU %s successfully\n", + uuid_str); + } + +done: + os_free_mem(uuid_str); +} + +static NvBool nv_treat_missing_irq_as_error(void) +{ +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + return (nv_get_hypervisor_type() != OS_HYPERVISOR_HYPERV); +#else + return NV_TRUE; +#endif +} + +static void nv_init_dynamic_power_management +( + nvidia_stack_t *sp, + struct pci_dev *pci_dev +) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + nv_state_t *nv = NV_STATE_PTR(nvl); + char filename[50]; + int ret; + NvBool pr3_acpi_method_present = NV_FALSE; + + nvl->sysfs_config_file = NULL; + + ret = snprintf(filename, sizeof(filename), + "/sys/bus/pci/devices/%04x:%02x:%02x.0/config", + NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev)); + if (ret > 0 || ret < sizeof(filename)) + { + struct file *file = filp_open(filename, O_RDONLY, 0); + if (!IS_ERR(file)) + { +#if defined(NV_SEQ_READ_ITER_PRESENT) + /* + * Sanity check for confirming if file path is mounted over + * sysfs file system. + */ + if ((file->f_inode != NULL) && (file->f_inode->i_sb != NULL) && + (strcmp(file->f_inode->i_sb->s_id, "sysfs") == 0)) + { + struct seq_file *sf = file->private_data; + + /* + * Sanity check for confirming if 'file->private_data' + * actually points to 'struct seq_file'. + */ + if ((sf != NULL) && (sf->file == file) && (sf->op == NULL)) + { + struct kernfs_open_file *of = sf->private; + + /* + * Sanity check for confirming if 'sf->private' + * actually points to 'struct kernfs_open_file'. + */ + if ((of != NULL) && (of->file == file) && + (of->seq_file == sf)) + { + nvl->sysfs_config_file = file; + } + } + } + + if (nvl->sysfs_config_file == NULL) + { + filp_close(file, NULL); + } +#else + nvl->sysfs_config_file = file; +#endif + } + } + + if (nv_get_hypervisor_type() != OS_HYPERVISOR_UNKNOWN) + { + pr3_acpi_method_present = nv_acpi_power_resource_method_present(pci_dev); + } + else if (pci_dev->bus && pci_dev->bus->self) + { + pr3_acpi_method_present = nv_acpi_power_resource_method_present(pci_dev->bus->self); + } + + rm_init_dynamic_power_management(sp, nv, pr3_acpi_method_present); +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +/* find nvidia devices and set initial state */ +static int +nv_pci_probe +( + struct pci_dev *pci_dev, + const struct pci_device_id *id_table +) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + unsigned int i, j; + int flags = 0; + nvidia_stack_t *sp = NULL; + NvBool prev_nv_ats_supported = nv_ats_supported; + NV_STATUS status; + NvBool last_bar_64bit = NV_FALSE; + + nv_printf(NV_DBG_SETUP, "NVRM: probing 0x%x 0x%x, class 0x%x\n", + pci_dev->vendor, pci_dev->device, pci_dev->class); + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return -1; + } + + +#ifdef NV_PCI_SRIOV_SUPPORT + if (pci_dev->is_virtfn) + { +#if defined(NV_VGPU_KVM_BUILD) + nvl = pci_get_drvdata(pci_dev->physfn); + if (!nvl) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Aborting probe for VF %04x:%02x:%02x.%x " + "since PF is not bound to nvidia driver.\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + goto failed; + } + + if (pci_dev->dev.bus->iommu_ops == NULL) + { + nv = NV_STATE_PTR(nvl); + if (rm_is_iommu_needed_for_sriov(sp, nv)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Aborting probe for VF %04x:%02x:%02x.%x " + "since IOMMU is not present on the system.\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + goto failed; + } + } + + if (nvidia_vgpu_vfio_probe(pci_dev) != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Failed to register device to vGPU VFIO module"); + goto failed; + } + + nv_kmem_cache_free_stack(sp); + return 0; +#else + nv_printf(NV_DBG_ERRORS, "NVRM: Ignoring probe for VF %04x:%02x:%02x.%x ", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + + goto failed; +#endif /* NV_VGPU_KVM_BUILD */ + } +#endif /* NV_PCI_SRIOV_SUPPORT */ + + + if (!rm_is_supported_pci_device( + (pci_dev->class >> 16) & 0xFF, + (pci_dev->class >> 8) & 0xFF, + pci_dev->vendor, + pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device, + NV_FALSE /* print_legacy_warning */)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: ignoring the legacy GPU %04x:%02x:%02x.%x\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + goto failed; + } + + num_probed_nv_devices++; + + if (pci_enable_device(pci_dev) != 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: pci_enable_device failed, aborting\n"); + goto failed; + } + + if ((pci_dev->irq == 0 && !pci_find_capability(pci_dev, PCI_CAP_ID_MSIX)) + && nv_treat_missing_irq_as_error()) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Can't find an IRQ for your NVIDIA card!\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: Please check your BIOS settings.\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: [Plug & Play OS] should be set to NO\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: [Assign IRQ to VGA] should be set to YES \n"); + goto failed; + } + + for (i = 0, j = 0; i < NVRM_PCICFG_NUM_BARS && j < NV_GPU_NUM_BARS; i++) + { + if (NV_PCI_RESOURCE_VALID(pci_dev, i)) + { +#if defined(NV_PCI_MAX_MMIO_BITS_SUPPORTED) + if ((NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_TYPE_64) && + ((NV_PCI_RESOURCE_START(pci_dev, i) >> NV_PCI_MAX_MMIO_BITS_SUPPORTED))) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: This is a 64-bit BAR mapped above %dGB by the system\n" + "NVRM: BIOS or the %s kernel. This PCI I/O region assigned\n" + "NVRM: to your NVIDIA device is not supported by the kernel.\n" + "NVRM: BAR%d is %dM @ 0x%llx (PCI:%04x:%02x:%02x.%x)\n", + (1 << (NV_PCI_MAX_MMIO_BITS_SUPPORTED - 30)), + NV_KERNEL_NAME, i, + (NV_PCI_RESOURCE_SIZE(pci_dev, i) >> 20), + (NvU64)NV_PCI_RESOURCE_START(pci_dev, i), + NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), NV_PCI_SLOT_NUMBER(pci_dev), + PCI_FUNC(pci_dev->devfn)); + goto failed; + } +#endif + if ((NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_TYPE_64) && + (NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_MEM_PREFETCH)) + { + struct pci_dev *bridge = pci_dev->bus->self; + NvU32 base_upper, limit_upper; + + last_bar_64bit = NV_TRUE; + + if (bridge == NULL) + goto next_bar; + + pci_read_config_dword(pci_dev, NVRM_PCICFG_BAR_OFFSET(i) + 4, + &base_upper); + if (base_upper == 0) + goto next_bar; + + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, + &base_upper); + pci_read_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, + &limit_upper); + + if ((base_upper != 0) && (limit_upper != 0)) + goto next_bar; + + nv_printf(NV_DBG_ERRORS, + "NVRM: This is a 64-bit BAR mapped above 4GB by the system\n" + "NVRM: BIOS or the %s kernel, but the PCI bridge\n" + "NVRM: immediately upstream of this GPU does not define\n" + "NVRM: a matching prefetchable memory window.\n", + NV_KERNEL_NAME); + nv_printf(NV_DBG_ERRORS, + "NVRM: This may be due to a known Linux kernel bug. Please\n" + "NVRM: see the README section on 64-bit BARs for additional\n" + "NVRM: information.\n"); + goto failed; + } + +next_bar: + // + // If we are here, then we have found a valid BAR -- 32 or 64-bit. + // + j++; + continue; + } + + // + // If last_bar_64bit is "true" then, we are looking at the 2nd (upper) + // half of the 64-bit BAR. This is typically all 0s which looks invalid + // but it's normal and not a problem and we can ignore it and continue. + // + if (last_bar_64bit) + { + last_bar_64bit = NV_FALSE; + continue; + } + + // Invalid 32 or 64-bit BAR. + nv_printf(NV_DBG_ERRORS, + "NVRM: This PCI I/O region assigned to your NVIDIA device is invalid:\n" + "NVRM: BAR%d is %dM @ 0x%llx (PCI:%04x:%02x:%02x.%x)\n", i, + (NV_PCI_RESOURCE_SIZE(pci_dev, i) >> 20), + (NvU64)NV_PCI_RESOURCE_START(pci_dev, i), + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + goto failed; + } + + if (!request_mem_region(NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS), + NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS), + nv_device_name)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: request_mem_region failed for %dM @ 0x%llx. This can\n" + "NVRM: occur when a driver such as rivatv is loaded and claims\n" + "NVRM: ownership of the device's registers.\n", + (NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS) >> 20), + (NvU64)NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS)); + goto failed; + } + + NV_KMALLOC(nvl, sizeof(nv_linux_state_t)); + if (nvl == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate memory\n"); + goto err_not_supported; + } + + os_mem_set(nvl, 0, sizeof(nv_linux_state_t)); + + nv = NV_STATE_PTR(nvl); + + pci_set_drvdata(pci_dev, (void *)nvl); + + /* default to 32-bit PCI bus address space */ + pci_dev->dma_mask = 0xffffffffULL; + + nvl->dev = &pci_dev->dev; + nvl->pci_dev = pci_dev; + nvl->dma_dev.dev = nvl->dev; + + nv->pci_info.vendor_id = pci_dev->vendor; + nv->pci_info.device_id = pci_dev->device; + nv->subsystem_id = pci_dev->subsystem_device; + nv->subsystem_vendor = pci_dev->subsystem_vendor; + nv->os_state = (void *) nvl; + nv->dma_dev = &nvl->dma_dev; + nv->pci_info.domain = NV_PCI_DOMAIN_NUMBER(pci_dev); + nv->pci_info.bus = NV_PCI_BUS_NUMBER(pci_dev); + nv->pci_info.slot = NV_PCI_SLOT_NUMBER(pci_dev); + nv->handle = pci_dev; + nv->flags |= flags; + + if (!nv_lock_init_locks(sp, nv)) + { + goto err_not_supported; + } + + nvl->all_mappings_revoked = NV_TRUE; + nvl->safe_to_mmap = NV_TRUE; + nvl->gpu_wakeup_callback_needed = NV_TRUE; + INIT_LIST_HEAD(&nvl->open_files); + + for (i = 0, j = 0; i < NVRM_PCICFG_NUM_BARS && j < NV_GPU_NUM_BARS; i++) + { + if ((NV_PCI_RESOURCE_VALID(pci_dev, i)) && + (NV_PCI_RESOURCE_FLAGS(pci_dev, i) & PCI_BASE_ADDRESS_SPACE) + == PCI_BASE_ADDRESS_SPACE_MEMORY) + { + nv->bars[j].offset = NVRM_PCICFG_BAR_OFFSET(i); + nv->bars[j].cpu_address = NV_PCI_RESOURCE_START(pci_dev, i); + nv->bars[j].size = NV_PCI_RESOURCE_SIZE(pci_dev, i); + j++; + } + } + nv->regs = &nv->bars[NV_GPU_BAR_INDEX_REGS]; + nv->fb = &nv->bars[NV_GPU_BAR_INDEX_FB]; + + nv->interrupt_line = pci_dev->irq; + + NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_DISABLED); + nvl->numa_info.node_id = NUMA_NO_NODE; + + nv_init_ibmnpu_info(nv); + + + + + +#if defined(NVCPU_PPC64LE) + // Use HW NUMA support as a proxy for ATS support. This is true in the only + // PPC64LE platform where ATS is currently supported (IBM P9). + nv_ats_supported &= nv_platform_supports_numa(nvl); +#else + + + + + +#endif + if (nv_ats_supported) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "ATS supported by this GPU!\n"); + } + else + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "ATS not supported by this GPU. " + "Disabling ATS support for all the GPUs in the system!\n"); + } + + pci_set_master(pci_dev); + +#if defined(CONFIG_VGA_ARB) && !defined(NVCPU_PPC64LE) +#if defined(VGA_DEFAULT_DEVICE) +#if defined(NV_VGA_TRYGET_PRESENT) + vga_tryget(VGA_DEFAULT_DEVICE, VGA_RSRC_LEGACY_MASK); +#endif +#endif + vga_set_legacy_decoding(pci_dev, VGA_RSRC_NONE); +#endif + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping nv_pci_probe\n"); + goto err_not_supported; + } + + if ((rm_is_supported_device(sp, nv)) != NV_OK) + goto err_not_supported; + + if (!rm_init_private_state(sp, nv)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "rm_init_private_state() failed!\n"); + goto err_zero_dev; + } + + nv_printf(NV_DBG_INFO, + "NVRM: PCI:%04x:%02x:%02x.%x (%04x:%04x): BAR0 @ 0x%llx (%lluMB)\n", + nv->pci_info.domain, nv->pci_info.bus, nv->pci_info.slot, + PCI_FUNC(pci_dev->devfn), nv->pci_info.vendor_id, nv->pci_info.device_id, + nv->regs->cpu_address, (nv->regs->size >> 20)); + nv_printf(NV_DBG_INFO, + "NVRM: PCI:%04x:%02x:%02x.%x (%04x:%04x): BAR1 @ 0x%llx (%lluMB)\n", + nv->pci_info.domain, nv->pci_info.bus, nv->pci_info.slot, + PCI_FUNC(pci_dev->devfn), nv->pci_info.vendor_id, nv->pci_info.device_id, + nv->fb->cpu_address, (nv->fb->size >> 20)); + + num_nv_devices++; + + /* + * The newly created nvl object is added to the nv_linux_devices global list + * only after all the initialization operations for that nvl object are + * completed, so as to protect against simultaneous lookup operations which + * may discover a partially initialized nvl object in the list + */ + LOCK_NV_LINUX_DEVICES(); + + nv_linux_add_device_locked(nvl); + + UNLOCK_NV_LINUX_DEVICES(); + + if (nvidia_frontend_add_device((void *)&nv_fops, nvl) != 0) + goto err_remove_device; + +#if defined(NV_PM_VT_SWITCH_REQUIRED_PRESENT) + pm_vt_switch_required(nvl->dev, NV_TRUE); +#endif + + nv_init_dynamic_power_management(sp, pci_dev); + + nv_procfs_add_gpu(nvl); + + /* Parse and set any per-GPU registry keys specified. */ + nv_parse_per_device_option_string(sp); + +#if defined(NV_VGPU_KVM_BUILD) + if (nvidia_vgpu_vfio_probe(nvl->pci_dev) != NV_OK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to register device to vGPU VFIO module"); + nvidia_frontend_remove_device((void *)&nv_fops, nvl); + goto err_remove_device; + } +#endif + + nv_check_and_exclude_gpu(sp, nv); + + rm_set_rm_firmware_requested(sp, nv); + +#if defined(DPM_FLAG_NO_DIRECT_COMPLETE) + dev_pm_set_driver_flags(nvl->dev, DPM_FLAG_NO_DIRECT_COMPLETE); +#elif defined(DPM_FLAG_NEVER_SKIP) + dev_pm_set_driver_flags(nvl->dev, DPM_FLAG_NEVER_SKIP); +#endif + + nv_kmem_cache_free_stack(sp); + + return 0; + +err_remove_device: + LOCK_NV_LINUX_DEVICES(); + nv_linux_remove_device_locked(nvl); + UNLOCK_NV_LINUX_DEVICES(); + rm_cleanup_dynamic_power_management(sp, nv); +err_zero_dev: + rm_free_private_state(sp, nv); +err_not_supported: + nv_ats_supported = prev_nv_ats_supported; + nv_destroy_ibmnpu_info(nv); + nv_lock_destroy_locks(sp, nv); + if (nvl != NULL) + { + NV_KFREE(nvl, sizeof(nv_linux_state_t)); + } + release_mem_region(NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS), + NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS)); + NV_PCI_DISABLE_DEVICE(pci_dev); + pci_set_drvdata(pci_dev, NULL); +failed: + nv_kmem_cache_free_stack(sp); + return -1; +} + +static void +nv_pci_remove(struct pci_dev *pci_dev) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + nvidia_stack_t *sp = NULL; + + nv_printf(NV_DBG_SETUP, "NVRM: removing GPU %04x:%02x:%02x.%x\n", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + + +#ifdef NV_PCI_SRIOV_SUPPORT + if (pci_dev->is_virtfn) + { +#if defined(NV_VGPU_KVM_BUILD) + /* Arg 2 == NV_TRUE means that the PCI device should be removed */ + nvidia_vgpu_vfio_remove(pci_dev, NV_TRUE); +#endif /* NV_VGPU_KVM_BUILD */ + return; + } +#endif /* NV_PCI_SRIOV_SUPPORT */ + + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return; + } + + LOCK_NV_LINUX_DEVICES(); + nvl = pci_get_drvdata(pci_dev); + if (!nvl || (nvl->pci_dev != pci_dev)) + { + goto done; + } + + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + + /* + * Sanity check: A removed device shouldn't have a non-zero usage_count. + * For eGPU, fall off the bus along with clients active is a valid scenario. + * Hence skipping the sanity check for eGPU. + */ + if ((NV_ATOMIC_READ(nvl->usage_count) != 0) && !(nv->is_external_gpu)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Attempting to remove minor device %u with non-zero usage count!\n", + nvl->minor_num); + + /* + * We can't return from this function without corrupting state, so we wait for + * the usage count to go to zero. + */ + while (NV_ATOMIC_READ(nvl->usage_count) != 0) + { + + /* + * While waiting, release the locks so that other threads can make + * forward progress. + */ + up(&nvl->ldata_lock); + UNLOCK_NV_LINUX_DEVICES(); + + os_delay(500); + + /* Re-acquire the locks before checking again */ + LOCK_NV_LINUX_DEVICES(); + nvl = pci_get_drvdata(pci_dev); + if (!nvl) + { + /* The device was not found, which should not happen */ + nv_printf(NV_DBG_ERRORS, + "NVRM: Failed removal of minor device %u!\n", + nvl->minor_num); + WARN_ON(1); + goto done; + } + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + } + + nv_printf(NV_DBG_ERRORS, + "NVRM: Continuing with GPU removal for minor device %u\n", + nvl->minor_num); + } + + rm_check_for_gpu_surprise_removal(sp, nv); + + nv_linux_remove_device_locked(nvl); + + /* Remove proc entry for this GPU */ + nv_procfs_remove_gpu(nvl); + + rm_cleanup_dynamic_power_management(sp, nv); + + nv->removed = NV_TRUE; + + UNLOCK_NV_LINUX_DEVICES(); + +#if defined(NV_PM_VT_SWITCH_REQUIRED_PRESENT) + pm_vt_switch_unregister(&pci_dev->dev); +#endif + +#if defined(NV_VGPU_KVM_BUILD) + /* Arg 2 == NV_TRUE means that the PCI device should be removed */ + nvidia_vgpu_vfio_remove(pci_dev, NV_TRUE); +#endif + + /* Update the frontend data structures */ + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nvidia_frontend_remove_device((void *)&nv_fops, nvl); + } + + if ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) || (nv->flags & NV_FLAG_OPEN)) + { + nv_acpi_unregister_notifier(nvl); + if (nv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + rm_disable_gpu_state_persistence(sp, nv); + } + nv_shutdown_adapter(sp, nv, nvl); + nv_dev_free_stacks(nvl); + } + + if (nvl->sysfs_config_file != NULL) + { + filp_close(nvl->sysfs_config_file, NULL); + nvl->sysfs_config_file = NULL; + } + + nv_unregister_ibmnpu_devices(nv); + nv_destroy_ibmnpu_info(nv); + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nv_lock_destroy_locks(sp, nv); + } + + num_probed_nv_devices--; + + pci_set_drvdata(pci_dev, NULL); + + rm_i2c_remove_adapters(sp, nv); + rm_free_private_state(sp, nv); + release_mem_region(NV_PCI_RESOURCE_START(pci_dev, NV_GPU_BAR_INDEX_REGS), + NV_PCI_RESOURCE_SIZE(pci_dev, NV_GPU_BAR_INDEX_REGS)); + + num_nv_devices--; + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + NV_PCI_DISABLE_DEVICE(pci_dev); + NV_KFREE(nvl, sizeof(nv_linux_state_t)); + } + else + { + up(&nvl->ldata_lock); + } + + nv_kmem_cache_free_stack(sp); + return; + +done: + UNLOCK_NV_LINUX_DEVICES(); + nv_kmem_cache_free_stack(sp); +} + +static void +nv_pci_shutdown(struct pci_dev *pci_dev) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + + if ((nvl != NULL) && nvl->is_forced_shutdown) + { + nvl->is_forced_shutdown = NV_FALSE; + return; + } + + /* pci_clear_master is not defined for !CONFIG_PCI */ +#ifdef CONFIG_PCI + pci_clear_master(pci_dev); +#endif + + /* SHH HW mandates 1us delay to realise the effects of + * Bus Mater Enable(BME) disable. Adding 1us delay for + * all the chips as the delay is not in the data path + * and not big. Creating HAL for this would be a overkill. + */ + udelay(1); +} + +/*! + * @brief This function accepts pci information corresponding to a GPU + * and returns a reference to the nv_linux_state_t corresponding to that GPU. + * + * @param[in] domain Pci domain number for the GPU to be found. + * @param[in] bus Pci bus number for the GPU to be found. + * @param[in] slot Pci slot number for the GPU to be found. + * @param[in] function Pci function number for the GPU to be found. + * + * @return Pointer to nv_linux_state_t for the GPU if it is found, or NULL otherwise. + */ +nv_linux_state_t * find_pci(NvU32 domain, NvU8 bus, NvU8 slot, NvU8 function) +{ + nv_linux_state_t *nvl = NULL; + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + + if (nv->pci_info.domain == domain && + nv->pci_info.bus == bus && + nv->pci_info.slot == slot && + nv->pci_info.function == function) + { + break; + } + } + + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +int nvidia_dev_get_pci_info(const NvU8 *uuid, struct pci_dev **pci_dev_out, + NvU64 *dma_start, NvU64 *dma_limit) +{ + nv_linux_state_t *nvl; + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return -ENODEV; + + *pci_dev_out = nvl->pci_dev; + *dma_start = nvl->dma_dev.addressable_range.start; + *dma_limit = nvl->dma_dev.addressable_range.limit; + + up(&nvl->ldata_lock); + + return 0; +} + +NvU8 nv_find_pci_capability(struct pci_dev *pci_dev, NvU8 capability) +{ + u16 status = 0; + u8 cap_ptr = 0, cap_id = 0xff; + + pci_read_config_word(pci_dev, PCI_STATUS, &status); + status &= PCI_STATUS_CAP_LIST; + if (!status) + return 0; + + switch (pci_dev->hdr_type) { + case PCI_HEADER_TYPE_NORMAL: + case PCI_HEADER_TYPE_BRIDGE: + pci_read_config_byte(pci_dev, PCI_CAPABILITY_LIST, &cap_ptr); + break; + default: + return 0; + } + + do { + cap_ptr &= 0xfc; + pci_read_config_byte(pci_dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); + if (cap_id == capability) + return cap_ptr; + pci_read_config_byte(pci_dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr); + } while (cap_ptr && cap_id != 0xff); + + return 0; +} + +/* make sure the pci_driver called probe for all of our devices. + * we've seen cases where rivafb claims the device first and our driver + * doesn't get called. + */ +int +nv_pci_count_devices(void) +{ + struct pci_dev *pci_dev; + int count = 0; + + if (NVreg_RegisterPCIDriver == 0) + { + return 0; + } + + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL); + while (pci_dev) + { + if (rm_is_supported_pci_device( + PCI_BASE_CLASS_DISPLAY, + PCI_CLASS_DISPLAY_VGA & 0xFF, + pci_dev->vendor, + pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device, + NV_TRUE /* print_legacy_warning */)) + { + count++; + } + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pci_dev); + } + + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, NULL); + while (pci_dev) + { + if (rm_is_supported_pci_device( + (pci_dev->class >> 16) & 0xFF, + (pci_dev->class >> 8) & 0xFF, + pci_dev->vendor, + pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device, + NV_TRUE /* print_legacy_warning */)) + { + count++; + } + pci_dev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pci_dev); + } + + return count; +} + +#if defined(NV_PCI_ERROR_RECOVERY) +static pci_ers_result_t +nv_pci_error_detected( + struct pci_dev *pci_dev, + nv_pci_channel_state_t error +) +{ + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + + if ((nvl == NULL) || (nvl->pci_dev != pci_dev)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: invalid device!\n", __FUNCTION__); + return PCI_ERS_RESULT_NONE; + } + + /* + * Tell Linux to continue recovery of the device. The kernel will enable + * MMIO for the GPU and call the mmio_enabled callback. + */ + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static pci_ers_result_t +nv_pci_mmio_enabled( + struct pci_dev *pci_dev +) +{ + NV_STATUS status = NV_OK; + nv_stack_t *sp = NULL; + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + nv_state_t *nv = NULL; + + if ((nvl == NULL) || (nvl->pci_dev != pci_dev)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: invalid device!\n", __FUNCTION__); + goto done; + } + + nv = NV_STATE_PTR(nvl); + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: failed to allocate stack!\n", + __FUNCTION__); + goto done; + } + + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "A fatal error was detected.\n"); + + /* + * MMIO should be re-enabled now. If we still get bad reads, there's + * likely something wrong with the adapter itself that will require a + * reset. This should let us know whether the GPU has completely fallen + * off the bus or just did something the host didn't like. + */ + status = rm_is_supported_device(sp, nv); + if (status != NV_OK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "The kernel has enabled MMIO for the device,\n" + "NVRM: but it still appears unreachable. The device\n" + "NVRM: will not function properly until it is reset.\n"); + } + + status = rm_log_gpu_crash(sp, nv); + if (status != NV_OK) + { + NV_DEV_PRINTF_STATUS(NV_DBG_ERRORS, nv, status, + "Failed to log crash data\n"); + goto done; + } + +done: + if (sp != NULL) + { + nv_kmem_cache_free_stack(sp); + } + + /* + * Tell Linux to abandon recovery of the device. The kernel might be able + * to recover the device, but RM and clients don't yet support that. + */ + return PCI_ERS_RESULT_DISCONNECT; +} + +struct pci_error_handlers nv_pci_error_handlers = { + .error_detected = nv_pci_error_detected, + .mmio_enabled = nv_pci_mmio_enabled, +}; +#endif + +#if defined(CONFIG_PM) +extern struct dev_pm_ops nv_pm_ops; +#endif + +struct pci_driver nv_pci_driver = { + .name = MODULE_NAME, + .id_table = nv_pci_table, + .probe = nv_pci_probe, + .remove = nv_pci_remove, + .shutdown = nv_pci_shutdown, +#if defined(CONFIG_PM) + .driver.pm = &nv_pm_ops, +#endif +#if defined(NV_PCI_ERROR_RECOVERY) + .err_handler = &nv_pci_error_handlers, +#endif +}; + +void nv_pci_unregister_driver(void) +{ + if (NVreg_RegisterPCIDriver == 0) + { + return; + } + return pci_unregister_driver(&nv_pci_driver); +} + +int nv_pci_register_driver(void) +{ + if (NVreg_RegisterPCIDriver == 0) + { + return 0; + } + return pci_register_driver(&nv_pci_driver); +} diff --git a/kernel-open/nvidia/nv-procfs-utils.c b/kernel-open/nvidia/nv-procfs-utils.c new file mode 100644 index 000000000..b9d8524ad --- /dev/null +++ b/kernel-open/nvidia/nv-procfs-utils.c @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if defined(CONFIG_PROC_FS) + +#include "nv-procfs-utils.h" + +void +nv_procfs_unregister_all(struct proc_dir_entry *entry, struct proc_dir_entry *delimiter) +{ +#if defined(NV_PROC_REMOVE_PRESENT) + proc_remove(entry); +#else + while (entry) + { + struct proc_dir_entry *next = entry->next; + if (entry->subdir) + nv_procfs_unregister_all(entry->subdir, delimiter); + remove_proc_entry(entry->name, entry->parent); + if (entry == delimiter) + break; + entry = next; + } +#endif +} +#endif + diff --git a/kernel-open/nvidia/nv-procfs.c b/kernel-open/nvidia/nv-procfs.c new file mode 100644 index 000000000..01009907b --- /dev/null +++ b/kernel-open/nvidia/nv-procfs.c @@ -0,0 +1,1477 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(CONFIG_PROC_FS) + +#include "nv-procfs.h" +#include "nv_compiler.h" +#include "nv-reg.h" +#include "conftest/patches.h" +#include "nv-ibmnpu.h" + +#define NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(name) \ + NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nv_system_pm_lock) + +static const char *__README_warning = \ + "The NVIDIA graphics driver tries to detect potential problems\n" + "with the host system and warns about them using the system's\n" + "logging mechanisms. Important warning message are also logged\n" + "to dedicated text files in this directory.\n"; + +static const char *__README_patches = \ + "The NVIDIA graphics driver's kernel interface files can be\n" + "patched to improve compatibility with new Linux kernels or to\n" + "fix bugs in these files. When applied, each official patch\n" + "provides a short text file with a short description of itself\n" + "in this directory.\n"; + +static struct proc_dir_entry *proc_nvidia; +static struct proc_dir_entry *proc_nvidia_warnings; +static struct proc_dir_entry *proc_nvidia_patches; +static struct proc_dir_entry *proc_nvidia_gpus; + +extern char *NVreg_RegistryDwords; +extern char *NVreg_RegistryDwordsPerDevice; +extern char *NVreg_RmMsg; +extern char *NVreg_GpuBlacklist; +extern char *NVreg_TemporaryFilePath; +extern char *NVreg_ExcludedGpus; + +static char nv_registry_keys[NV_MAX_REGISTRY_KEYS_LENGTH]; + +#if defined(CONFIG_PM) +static nv_pm_action_depth_t nv_pm_action_depth = NV_PM_ACTION_DEPTH_DEFAULT; +#endif + +static int nv_procfs_read_registry(struct seq_file *s, void *v); + +#define NV_NUMA_STATUS_MSG_LEN (32) +#define NV_PROC_WRITE_BUFFER_SIZE (512 * PAGE_SIZE) + +typedef struct +{ + nvidia_stack_t *sp; + struct semaphore sp_lock; + + nv_state_t *nv; + + void *data; + off_t off; +} nv_procfs_private_t; + +/* + * Status messages directly corresponding to states in nv_numa_states_t. + */ +static const char *nv_numa_status_messages[] = +{ + "disabled", + "offline", + "online_in_progress", + "online", + "online_failed", + "offline_in_progress", + "offline_failed", +}; + +static int +nv_procfs_read_gpu_info( + struct seq_file *s, + void *v +) +{ + nv_state_t *nv = s->private; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct pci_dev *pci_dev = nvl->pci_dev; + char *type; + const char *name; + char *uuid; + char vbios_version[15]; + nvidia_stack_t *sp = NULL; + char firmware_version[64] = { 0 }; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return 0; + } + + if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE) != NV_OK) + { + nv_kmem_cache_free_stack(sp); + return 0; + } + + name = rm_get_device_name(pci_dev->device, + pci_dev->subsystem_vendor, + pci_dev->subsystem_device); + + seq_printf(s, "Model: \t\t %s\n", name); + seq_printf(s, "IRQ: \t\t %d\n", nv->interrupt_line); + + uuid = rm_get_gpu_uuid(sp, nv); + + if (uuid != NULL) + { + seq_printf(s, "GPU UUID: \t %s\n", uuid); + os_free_mem(uuid); + uuid = NULL; + } + + rm_get_vbios_version(sp, nv, vbios_version); + + seq_printf(s, "Video BIOS: \t %s\n", vbios_version); + + if (nv_find_pci_capability(pci_dev, PCI_CAP_ID_EXP)) + type = "PCIe"; + else + type = "PCI"; + seq_printf(s, "Bus Type: \t %s\n", type); + + seq_printf(s, "DMA Size: \t %d bits\n", + nv_count_bits(pci_dev->dma_mask)); + seq_printf(s, "DMA Mask: \t 0x%llx\n", pci_dev->dma_mask); + seq_printf(s, "Bus Location: \t %04x:%02x:%02x.%x\n", + nv->pci_info.domain, nv->pci_info.bus, + nv->pci_info.slot, PCI_FUNC(pci_dev->devfn)); + seq_printf(s, "Device Minor: \t %u\n", nvl->minor_num); + + rm_get_firmware_version(sp, nv, firmware_version, sizeof(firmware_version)); + if (firmware_version[0] != '\0') + { + seq_printf(s, "GPU Firmware: \t %s\n", firmware_version); + } + +#if defined(DEBUG) + do + { + int j; + for (j = 0; j < NV_GPU_NUM_BARS; j++) + { + seq_printf(s, "BAR%u: \t\t 0x%llx (%lluMB)\n", + j, nv->bars[j].cpu_address, (nv->bars[j].size >> 20)); + } + } while (0); +#endif + + seq_printf(s, "GPU Excluded:\t %s\n", + ((nv->flags & NV_FLAG_EXCLUDE) != 0) ? "Yes" : "No"); + + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE); + + nv_kmem_cache_free_stack(sp); + + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(gpu_info); + +static int +nv_procfs_read_power( + struct seq_file *s, + void *v +) +{ + nv_state_t *nv = s->private; + nvidia_stack_t *sp = NULL; + const char *vidmem_power_status; + const char *dynamic_power_status; + const char *gc6_support; + const char *gcoff_support; + NvU32 limitRated, limitCurr; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return 0; + } + + dynamic_power_status = rm_get_dynamic_power_management_status(sp, nv); + seq_printf(s, "Runtime D3 status: %s\n", dynamic_power_status); + + vidmem_power_status = rm_get_vidmem_power_status(sp, nv); + seq_printf(s, "Video Memory: %s\n\n", vidmem_power_status); + + seq_printf(s, "GPU Hardware Support:\n"); + gc6_support = rm_get_gpu_gcx_support(sp, nv, NV_TRUE); + seq_printf(s, " Video Memory Self Refresh: %s\n", gc6_support); + + gcoff_support = rm_get_gpu_gcx_support(sp, nv, NV_FALSE); + seq_printf(s, " Video Memory Off: %s\n\n", gcoff_support); + + seq_printf(s, "Power Limits:\n"); + status = rm_get_clientnvpcf_power_limits(sp, nv, &limitRated, &limitCurr); + if (status != NV_OK) + { + seq_printf(s, " Default: N/A milliwatts\n"); + seq_printf(s, " GPU Boost: N/A milliwatts\n"); + } + else + { + seq_printf(s, " Default: %u milliwatts\n", limitRated); + seq_printf(s, " GPU Boost: %u milliwatts\n", limitCurr); + } + + nv_kmem_cache_free_stack(sp); + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(power); + +static int +nv_procfs_read_version( + struct seq_file *s, + void *v +) +{ + seq_printf(s, "NVRM version: %s\n", pNVRM_ID); + seq_printf(s, "GCC version: %s\n", NV_COMPILER); + + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(version); + +static void +nv_procfs_close_file( + nv_procfs_private_t *nvpp +) +{ + nvidia_stack_t *sp; + + if (nvpp->data != NULL) + { + os_free_mem(nvpp->data); + } + + sp = nvpp->sp; + if (sp != NULL) + { + nv_kmem_cache_free_stack(sp); + } + + NV_KFREE(nvpp, sizeof(*nvpp)); +} + +static int +nv_procfs_open_file( + struct inode *inode, + struct file *file, + nv_procfs_private_t **pnvpp +) +{ + int retval = 0; + NV_STATUS status; + nv_procfs_private_t *nvpp = NULL; + nvidia_stack_t *sp = NULL; + + NV_KMALLOC(nvpp, sizeof(nv_procfs_private_t)); + if (nvpp == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate procfs private!\n"); + return -ENOMEM; + } + memset(nvpp, 0, sizeof(*nvpp)); + + NV_INIT_MUTEX(&nvpp->sp_lock); + + nvpp->nv = NV_PDE_DATA(inode); + + if (0 == (file->f_mode & FMODE_WRITE)) + goto done; + + retval = nv_kmem_cache_alloc_stack(&sp); + if (retval != 0) + { + goto done; + } + + status = os_alloc_mem((void **)&nvpp->data, NV_PROC_WRITE_BUFFER_SIZE); + if (status != NV_OK) + { + retval = -ENOMEM; + goto done; + } + + os_mem_set((void *)nvpp->data, 0, NV_PROC_WRITE_BUFFER_SIZE); + nvpp->sp = sp; + +done: + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + *pnvpp = nvpp; + + return 0; +} + +static int +nv_procfs_open_registry( + struct inode *inode, + struct file *file +) +{ + nv_procfs_private_t *nvpp = NULL; + int retval; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_registry, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_registry( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv; + nv_linux_state_t *nvl = NULL; + nvidia_stack_t *sp = nvpp->sp; + char *key_name, *key_value, *registry_keys; + size_t key_len, len; + long count; + NV_STATUS rm_status; + int rc = 0; + + if (0 != nvpp->off) + { + nv = nvpp->nv; + if (nv != NULL) + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + key_value = (char *)nvpp->data; + + key_name = strsep(&key_value, "="); + + if (NULL == key_name || NULL == key_value) + { + rc = -EINVAL; + goto done; + } + + key_len = (strlen(key_name) + 1); + count = (nvpp->off - key_len); + + if (count <= 0) + { + rc = -EINVAL; + goto done; + } + + rm_status = rm_write_registry_binary(sp, nv, key_name, + key_value, count); + if (rm_status != NV_OK) + { + rc = -EFAULT; + goto done; + } + + registry_keys = ((nvl != NULL) ? + nvl->registry_keys : nv_registry_keys); + if (strstr(registry_keys, key_name) != NULL) + goto done; + len = strlen(registry_keys); + + if ((len + key_len + 2) <= NV_MAX_REGISTRY_KEYS_LENGTH) + { + if (len != 0) + strcat(registry_keys, ", "); + strcat(registry_keys, key_name); + } + } + +done: + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return rc; +} + +static int +nv_procfs_read_params( + struct seq_file *s, + void *v +) +{ + unsigned int i; + nv_parm_t *entry; + + for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) + seq_printf(s, "%s: %u\n", entry->name, *entry->data); + + seq_printf(s, "RegistryDwords: \"%s\"\n", + (NVreg_RegistryDwords != NULL) ? NVreg_RegistryDwords : ""); + seq_printf(s, "RegistryDwordsPerDevice: \"%s\"\n", + (NVreg_RegistryDwordsPerDevice != NULL) ? NVreg_RegistryDwordsPerDevice : ""); + seq_printf(s, "RmMsg: \"%s\"\n", + (NVreg_RmMsg != NULL) ? NVreg_RmMsg : ""); + seq_printf(s, "GpuBlacklist: \"%s\"\n", + (NVreg_GpuBlacklist != NULL) ? NVreg_GpuBlacklist : ""); + seq_printf(s, "TemporaryFilePath: \"%s\"\n", + (NVreg_TemporaryFilePath != NULL) ? NVreg_TemporaryFilePath : ""); + seq_printf(s, "ExcludedGpus: \"%s\"\n", + (NVreg_ExcludedGpus != NULL) ? NVreg_ExcludedGpus : ""); + + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(params); + +static int +nv_procfs_read_registry( + struct seq_file *s, + void *v +) +{ + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv = nvpp->nv; + nv_linux_state_t *nvl = NULL; + char *registry_keys; + + if (nv != NULL) + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + registry_keys = ((nvl != NULL) ? + nvl->registry_keys : nv_registry_keys); + + seq_printf(s, "Binary: \"%s\"\n", registry_keys); + return 0; +} + +static ssize_t +nv_procfs_write_file( + struct file *file, + const char __user *buffer, + size_t count, + loff_t *pos +) +{ + int status = 0; + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + char *proc_buffer; + unsigned long bytes_left; + + down(&nvpp->sp_lock); + + bytes_left = (NV_PROC_WRITE_BUFFER_SIZE - nvpp->off - 1); + + if (count == 0) + { + status = -EINVAL; + goto done; + } + else if ((bytes_left == 0) || (count > bytes_left)) + { + status = -ENOSPC; + goto done; + } + + proc_buffer = &((char *)nvpp->data)[nvpp->off]; + + if (copy_from_user(proc_buffer, buffer, count)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy in proc data!\n"); + status = -EFAULT; + } + else + { + nvpp->off += count; + } + + *pos = nvpp->off; + +done: + up(&nvpp->sp_lock); + + return ((status < 0) ? status : (int)count); +} + +static nv_proc_ops_t nv_procfs_registry_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_registry, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_registry, +}; + +#if defined(CONFIG_PM) +static int +nv_procfs_show_suspend_depth( + struct seq_file *m, + void *v +) +{ + seq_printf(m, "default modeset uvm\n"); + return 0; +} + +static ssize_t +nv_procfs_write_suspend_depth( + struct file *file, + const char __user *buf, + size_t count, + loff_t *pos +) +{ + char kbuf[sizeof("modeset\n")]; + unsigned i; + + if (!NV_IS_SUSER()) + { + return -EPERM; + } + + if (count < strlen("uvm") || count > sizeof(kbuf)) + { + return -EINVAL; + } + + if (copy_from_user(kbuf, buf, count)) + { + return -EFAULT; + } + + count = min(count, sizeof(kbuf) - 1); + for (i = 0; i < count && isalpha(kbuf[i]); i++); + kbuf[i] = '\0'; + + if (strcasecmp(kbuf, "uvm") == 0) + { + nv_pm_action_depth = NV_PM_ACTION_DEPTH_UVM; + } + else if (strcasecmp(kbuf, "modeset") == 0) + { + nv_pm_action_depth = NV_PM_ACTION_DEPTH_MODESET; + } + else if (strcasecmp(kbuf, "default") == 0) + { + nv_pm_action_depth = NV_PM_ACTION_DEPTH_DEFAULT; + } + else + { + return -EINVAL; + } + + return count; +} + +static int +nv_procfs_open_suspend_depth( + struct inode *inode, + struct file *file +) +{ + return single_open(file, nv_procfs_show_suspend_depth, NULL); +} + +static nv_proc_ops_t nv_procfs_suspend_depth_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_suspend_depth, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_suspend_depth, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = single_release +}; + +static int +nv_procfs_show_suspend( + struct seq_file *m, + void *v +) +{ + seq_printf(m, "suspend hibernate resume\n"); + return 0; +} + +static ssize_t +nv_procfs_write_suspend( + struct file *file, + const char __user *buf, + size_t count, + loff_t *pos +) +{ + NV_STATUS status; + char kbuf[sizeof("hibernate\n")]; + nv_power_state_t power_state; + unsigned i; + + if (!NV_IS_SUSER()) + { + return -EPERM; + } + + if (count < strlen("resume") || count > sizeof(kbuf)) + { + return -EINVAL; + } + + if (copy_from_user(kbuf, buf, count)) + { + return -EFAULT; + } + + count = min(count, sizeof(kbuf) - 1); + for (i = 0; i < count && isalpha(kbuf[i]); i++); + kbuf[i] = '\0'; + + if (strcasecmp(kbuf, "suspend") == 0) + { + power_state = NV_POWER_STATE_IN_STANDBY; + } + else if (strcasecmp(kbuf, "hibernate") == 0) + { + power_state = NV_POWER_STATE_IN_HIBERNATE; + } + else if (strcasecmp(kbuf, "resume") == 0) + { + power_state = NV_POWER_STATE_RUNNING; + } + else + { + return -EINVAL; + } + + status = nv_set_system_power_state(power_state, nv_pm_action_depth); + + return (status != NV_OK) ? -EIO : count; +} + +static int +nv_procfs_open_suspend( + struct inode *inode, + struct file *file +) +{ + return single_open(file, nv_procfs_show_suspend, NULL); +} + +static nv_proc_ops_t nv_procfs_suspend_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_suspend, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_suspend, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = single_release +}; + +#endif + +/* + * Forwards error to nv_log_error which exposes data to vendor callback + */ +void +exercise_error_forwarding_va( + nv_state_t *nv, + NvU32 err, + const char *fmt, + ... +) +{ + va_list arguments; + + va_start(arguments, fmt); + nv_log_error(nv, err, fmt, arguments); + va_end(arguments); +} + +static int +nv_procfs_show_exercise_error_forwarding( + struct seq_file *m, + void *v +) +{ + return 0; +} + +static int +nv_procfs_open_exercise_error_forwarding( + struct inode *inode, + struct file *file +) +{ + nv_procfs_private_t *nvpp = NULL; + int retval; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_show_exercise_error_forwarding, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_exercise_error_forwarding( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv = nvpp->nv; + char *proc_buffer = &((char *)nvpp->data)[0]; + size_t count = nvpp->off; + int i = 0, status = 0; + NvU32 xid = 0; + const NvU8 MAX_XID_DIGITS = 3; + + while (i < count && i <= MAX_XID_DIGITS && proc_buffer[i] != ',') + { + if (proc_buffer[i] < '0' || proc_buffer[i] > '9') + { + status = -EINVAL; + goto done; + } + + xid = xid * 10 + (proc_buffer[i++] - '0'); + } + + if (count > (i + 1) && proc_buffer[i] == ',') + exercise_error_forwarding_va(nv, xid, &proc_buffer[i + 1], 0xdeadbee0, + 0xdeadbee1, 0xdeadbee2, 0xdeadbee3, 0xdeadbee4, 0xdeadbee5); + else + status = -EINVAL; + +done: + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return status; +} + +static nv_proc_ops_t nv_procfs_exercise_error_forwarding_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_exercise_error_forwarding, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_RELEASE = nv_procfs_close_exercise_error_forwarding, +}; + +static int +nv_procfs_read_unbind_lock( + struct seq_file *s, + void *v +) +{ + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv = nvpp->nv; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + down(&nvl->ldata_lock); + if (nv->flags & NV_FLAG_UNBIND_LOCK) + { + seq_printf(s, "1\n"); + } + else + { + seq_printf(s, "0\n"); + } + up(&nvl->ldata_lock); + + return 0; +} + +static int +nv_procfs_open_unbind_lock( + struct inode *inode, + struct file *file +) +{ + nv_procfs_private_t *nvpp = NULL; + int retval; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_unbind_lock, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_unbind_lock( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nv_state_t *nv; + nvidia_stack_t *sp = nvpp->sp; + int rc = 0; + nv_linux_state_t * nvl; + int value; + + if (0 != nvpp->off) + { + nv = nvpp->nv; + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (NULL == nvpp->data || NULL == nv) + { + rc = -EINVAL; + goto done; + } + + if (sscanf((char *)nvpp->data, "%u\n", &value) != 1) + { + rc = -EINVAL; + goto done; + } + + down(&nvl->ldata_lock); + if ((value == 1) && !(nv->flags & NV_FLAG_UNBIND_LOCK)) + { + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + rm_unbind_lock(sp, nv); + + if (nv->flags & NV_FLAG_UNBIND_LOCK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "UnbindLock acquired\n"); + } + else + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Could not acquire UnbindLock\n"); + } + } + else if ((value == 0) && (nv->flags & NV_FLAG_UNBIND_LOCK)) + { + nv->flags &= ~NV_FLAG_UNBIND_LOCK; + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "UnbindLock released\n"); + } + up(&nvl->ldata_lock); + } + +done: + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return rc; +} + +static nv_proc_ops_t nv_procfs_unbind_lock_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_unbind_lock, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_unbind_lock, +}; + +static const char* +numa_status_describe(nv_numa_status_t state) +{ + if (state < 0 || state >= NV_NUMA_STATUS_COUNT) + return "invalid"; + + return nv_numa_status_messages[state]; +} + +static NvBool +numa_is_change_allowed(nv_numa_status_t current_state, nv_numa_status_t requested) +{ + NvBool allowed = NV_TRUE; + + switch (requested) { + case NV_NUMA_STATUS_OFFLINE: + case NV_NUMA_STATUS_OFFLINE_FAILED: + allowed = (current_state == NV_NUMA_STATUS_OFFLINE_IN_PROGRESS); + break; + + /* All except Offline. */ + case NV_NUMA_STATUS_OFFLINE_IN_PROGRESS: + allowed = (current_state != NV_NUMA_STATUS_OFFLINE); + break; + + case NV_NUMA_STATUS_ONLINE: + allowed = (current_state == NV_NUMA_STATUS_ONLINE_IN_PROGRESS); + break; + + case NV_NUMA_STATUS_ONLINE_FAILED: + allowed = (current_state == NV_NUMA_STATUS_ONLINE_IN_PROGRESS) || + (current_state == NV_NUMA_STATUS_ONLINE); + break; + + case NV_NUMA_STATUS_ONLINE_IN_PROGRESS: + allowed = (current_state == NV_NUMA_STATUS_OFFLINE); + break; + + /* Fallthrough. */ + case NV_NUMA_STATUS_DISABLED: + default: + return NV_FALSE; + } + + return allowed; +} + +static NV_STATUS +numa_status_read( + nv_state_t *nv, + nv_stack_t *sp, + NvS32 *nid, + NvS32 *status, + NvU64 *numa_mem_addr, + NvU64 *numa_mem_size, + nv_offline_addresses_t *list +) +{ + NV_STATUS rm_status; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + down(&nvl->ldata_lock); + + /* + * If GPU has not been initialized but NUMA info is valid, populate + * NUMA node ID and status. Memory range and offline addresses cannot + * be read at this point so fill in dummy values. + */ + if (!(nv->flags & NV_FLAG_OPEN)) + { + if (nv_platform_supports_numa(nvl)) + { + *nid = nvl->numa_info.node_id; + *status = nv_get_numa_status(nvl); + *numa_mem_addr = 0; + *numa_mem_size = 0; + memset(list, 0x0, sizeof(*list)); + } + + rm_status = NV_ERR_NOT_READY; + goto done; + } + + list->numEntries = ARRAY_SIZE(list->addresses); + + rm_status = rm_get_gpu_numa_info(sp, nv, + nid, numa_mem_addr, numa_mem_size, + list->addresses, &list->numEntries); + *status = nv_get_numa_status(nvl); + +done: + up(&nvl->ldata_lock); + return rm_status; +} + +static int +nv_procfs_read_offline_pages( + struct seq_file *s, + void *v +) +{ + NvU32 i; + int retval = 0; + NV_STATUS rm_status; + nv_ioctl_numa_info_t numa_info; + nv_procfs_private_t *nvpp = s->private; + nv_stack_t *sp = nvpp->sp; + nv_state_t *nv = nvpp->nv; + + rm_status = numa_status_read(nv, sp, + &numa_info.nid, + &numa_info.status, + &numa_info.numa_mem_addr, + &numa_info.numa_mem_size, + &numa_info.offline_addresses); + + if (rm_status != NV_OK) + return -EIO; + + for (i = 0; i < numa_info.offline_addresses.numEntries; ++i) + { + seq_printf(s, "%p\n", + (void*) numa_info.offline_addresses.addresses[i]); + } + + return retval; +} + +static int +nv_procfs_open_offline_pages( + struct inode *inode, + struct file *file +) +{ + int retval; + nv_procfs_private_t *nvpp = NULL; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_offline_pages, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_offline_pages( + struct inode *inode, + struct file *file +) +{ + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return 0; +} + +static int +nv_procfs_read_numa_status( + struct seq_file *s, + void *v +) +{ + int retval = 0; + NV_STATUS rm_status; + nv_ioctl_numa_info_t numa_info; + nv_procfs_private_t *nvpp = s->private; + nv_stack_t *sp = nvpp->sp; + nv_state_t *nv = nvpp->nv; + + rm_status = numa_status_read(nv, sp, + &numa_info.nid, + &numa_info.status, + &numa_info.numa_mem_addr, + &numa_info.numa_mem_size, + &numa_info.offline_addresses); + + if ((rm_status != NV_OK) && (rm_status != NV_ERR_NOT_READY)) + return -EIO; + + /* Note: RM clients need to read block size from sysfs. */ + seq_printf(s, "Node: %d\n", numa_info.nid); + seq_printf(s, "Status: %s\n", numa_status_describe(numa_info.status)); + + if (rm_status == NV_OK) + { + seq_printf(s, "Address: %llx\n", numa_info.numa_mem_addr); + seq_printf(s, "Size: %llx\n", numa_info.numa_mem_size); + } + + return retval; +} + +static int +nv_procfs_open_numa_status( + struct inode *inode, + struct file *file +) +{ + int retval; + nv_procfs_private_t *nvpp = NULL; + + retval = nv_procfs_open_file(inode, file, &nvpp); + if (retval < 0) + { + return retval; + } + + retval = single_open(file, nv_procfs_read_numa_status, nvpp); + if (retval < 0) + { + nv_procfs_close_file(nvpp); + return retval; + } + + retval = nv_down_read_interruptible(&nv_system_pm_lock); + if (retval < 0) + { + single_release(inode, file); + nv_procfs_close_file(nvpp); + } + + return retval; +} + +static int +nv_procfs_close_numa_status( + struct inode *inode, + struct file *file +) +{ + int retval = 0; + struct seq_file *s = file->private_data; + nv_procfs_private_t *nvpp = s->private; + nvidia_stack_t *sp = nvpp->sp; + nv_state_t *nv = nvpp->nv; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + const size_t MAX_STATES = ARRAY_SIZE(nv_numa_status_messages); + nv_numa_status_t current_state = nv_get_numa_status(nvl); + char *cmd = nvpp->data; + + down(&nvl->ldata_lock); + + if (nvpp->off != 0) + { + NvU32 state; + nv_numa_status_t requested = NV_NUMA_STATUS_DISABLED; + NV_STATUS rm_status = NV_OK; + + for (state = 0; state < MAX_STATES; ++state) + { + if (strncmp(nv_numa_status_messages[state], + cmd, + NV_NUMA_STATUS_MSG_LEN) == 0) + { + requested = state; + break; + } + } + + if (requested != current_state) + { + /* Validate state transition. */ + if (!numa_is_change_allowed(current_state, requested)) + { + retval = -EINVAL; + goto done; + } + + if (requested == NV_NUMA_STATUS_OFFLINE_IN_PROGRESS) + { + /* + * If this call fails, RM is not ready to offline + * memory => retain status. + */ + rm_status = rm_gpu_numa_offline(sp, nv); + } + + if (rm_status == NV_OK) + { + retval = nv_set_numa_status(nvl, requested); + if (retval < 0) + goto done; + + if (requested == NV_NUMA_STATUS_ONLINE) + { + rm_status = rm_gpu_numa_online(sp, nv); + } + } + + retval = (rm_status == NV_OK) ? retval: -EBUSY; + } + } + +done: + up(&nvl->ldata_lock); + + up_read(&nv_system_pm_lock); + + single_release(inode, file); + + nv_procfs_close_file(nvpp); + + return retval; +} + +static const nv_proc_ops_t nv_procfs_numa_status_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_numa_status, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_WRITE = nv_procfs_write_file, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_numa_status, +}; + +static const nv_proc_ops_t nv_procfs_offline_pages_fops = { + NV_PROC_OPS_SET_OWNER() + .NV_PROC_OPS_OPEN = nv_procfs_open_offline_pages, + .NV_PROC_OPS_READ = seq_read, + .NV_PROC_OPS_LSEEK = seq_lseek, + .NV_PROC_OPS_RELEASE = nv_procfs_close_offline_pages, +}; + +static int +nv_procfs_read_text_file( + struct seq_file *s, + void *v +) +{ + seq_puts(s, s->private); + return 0; +} + +NV_DEFINE_SINGLE_NVRM_PROCFS_FILE(text_file); + +static void +nv_procfs_add_text_file( + struct proc_dir_entry *parent, + const char *filename, + const char *text +) +{ + NV_CREATE_PROC_FILE(filename, parent, text_file, (void *)text); +} +#endif + +void nv_procfs_add_warning( + const char *filename, + const char *text +) +{ +#if defined(CONFIG_PROC_FS) + nv_procfs_add_text_file(proc_nvidia_warnings, filename, text); +#endif +} + +int nv_procfs_init(void) +{ +#if defined(CONFIG_PROC_FS) + NvU32 i = 0; + char nv_dir_name[20]; + struct proc_dir_entry *entry; + + snprintf(nv_dir_name, sizeof(nv_dir_name), "driver/%s", nv_device_name); + + nv_dir_name[sizeof(nv_dir_name) - 1] = '\0'; + + proc_nvidia = NV_CREATE_PROC_DIR(nv_dir_name, NULL); + + if (!proc_nvidia) + goto failed; + + entry = NV_CREATE_PROC_FILE("params", proc_nvidia, params, NULL); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("registry", proc_nvidia, registry, NULL); + if (!entry) + goto failed; + +#if defined(CONFIG_PM) + entry = NV_CREATE_PROC_FILE("suspend_depth", proc_nvidia, suspend_depth, NULL); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("suspend", proc_nvidia, suspend, NULL); + if (!entry) + goto failed; +#endif + + proc_nvidia_warnings = NV_CREATE_PROC_DIR("warnings", proc_nvidia); + if (!proc_nvidia_warnings) + goto failed; + nv_procfs_add_text_file(proc_nvidia_warnings, "README", __README_warning); + + proc_nvidia_patches = NV_CREATE_PROC_DIR("patches", proc_nvidia); + if (!proc_nvidia_patches) + goto failed; + + for (i = 0; __nv_patches[i].short_description; i++) + { + nv_procfs_add_text_file(proc_nvidia_patches, + __nv_patches[i].short_description, __nv_patches[i].description); + } + + nv_procfs_add_text_file(proc_nvidia_patches, "README", __README_patches); + + entry = NV_CREATE_PROC_FILE("version", proc_nvidia, version, NULL); + if (!entry) + goto failed; + + proc_nvidia_gpus = NV_CREATE_PROC_DIR("gpus", proc_nvidia); + if (!proc_nvidia_gpus) + goto failed; +#endif + return 0; +#if defined(CONFIG_PROC_FS) +failed: + nv_procfs_unregister_all(proc_nvidia, proc_nvidia); + return -ENOMEM; +#endif +} + +void nv_procfs_exit(void) +{ +#if defined(CONFIG_PROC_FS) + nv_procfs_unregister_all(proc_nvidia, proc_nvidia); +#endif +} + +int nv_procfs_add_gpu(nv_linux_state_t *nvl) +{ +#if defined(CONFIG_PROC_FS) + nv_state_t *nv; + + /* Buffer size is 32 in order to fit the full name when PCI domain is 32 bit. */ + char name[32]; + struct proc_dir_entry *proc_nvidia_gpu, *entry; + + nv = NV_STATE_PTR(nvl); + + snprintf(name, sizeof(name), "%04x:%02x:%02x.%1x", + nv->pci_info.domain, nv->pci_info.bus, + nv->pci_info.slot, PCI_FUNC(nvl->pci_dev->devfn)); + + proc_nvidia_gpu = NV_CREATE_PROC_DIR(name, proc_nvidia_gpus); + if (!proc_nvidia_gpu) + goto failed; + + entry = NV_CREATE_PROC_FILE("information", proc_nvidia_gpu, gpu_info, + nv); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("registry", proc_nvidia_gpu, registry, nv); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("power", proc_nvidia_gpu, power, nv); + if (!entry) + goto failed; + + if (IS_EXERCISE_ERROR_FORWARDING_ENABLED()) + { + entry = NV_CREATE_PROC_FILE("exercise_error_forwarding", proc_nvidia_gpu, + exercise_error_forwarding, nv); + if (!entry) + goto failed; + } + + if (os_is_vgx_hyper()) + { + entry = NV_CREATE_PROC_FILE("unbindLock", proc_nvidia_gpu, unbind_lock, nv); + if (!entry) + goto failed; + } + + if (nv_get_numa_status(nvl) != NV_IOCTL_NUMA_STATUS_DISABLED) + { + entry = NV_CREATE_PROC_FILE("numa_status", proc_nvidia_gpu, numa_status, + nv); + if (!entry) + goto failed; + + entry = NV_CREATE_PROC_FILE("offline_pages", proc_nvidia_gpu, offline_pages, + nv); + if (!entry) + goto failed; + } + + nvl->proc_dir = proc_nvidia_gpu; +#endif + return 0; +#if defined(CONFIG_PROC_FS) +failed: + if (proc_nvidia_gpu) + { + nv_procfs_unregister_all(proc_nvidia_gpu, proc_nvidia_gpu); + } + return -1; +#endif +} + +void nv_procfs_remove_gpu(nv_linux_state_t *nvl) +{ +#if defined(CONFIG_PROC_FS) + nv_procfs_unregister_all(nvl->proc_dir, nvl->proc_dir); +#endif +} diff --git a/kernel-open/nvidia/nv-reg.h b/kernel-open/nvidia/nv-reg.h new file mode 100644 index 000000000..43cebbb1c --- /dev/null +++ b/kernel-open/nvidia/nv-reg.h @@ -0,0 +1,930 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RM_REG_H_ +#define _RM_REG_H_ + +#include "nvtypes.h" + +/* + * use NV_REG_STRING to stringify a registry key when using that registry key + */ + +#define __NV_REG_STRING(regkey) #regkey +#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey) + +/* + * use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition + * of registry keys in the kernel module source code. + */ + +#define __NV_REG_VAR(regkey) NVreg_##regkey + +#if defined(NV_MODULE_PARAMETER) +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value) +#endif + +#if defined(NV_MODULE_STRING_PARAMETER) +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value) +#endif + +#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) } + +/* + * Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of + * the regkey and the name of the module parameter. When using this macro, the + * name of the parameter is passed to the extra "parameter" argument, and it is + * this name that must be used in the NV_DEFINE_REG_ENTRY() macro. + */ + +#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)} + +/* + *----------------- registry key definitions-------------------------- + */ + +/* + * Option: ModifyDeviceFiles + * + * Description: + * + * When this option is enabled, the NVIDIA driver will verify the validity + * of the NVIDIA device files in /dev and attempt to dynamically modify + * and/or (re-)create them, if necessary. If you don't wish for the NVIDIA + * driver to touch the device files, you can use this registry key. + * + * This module parameter is only honored by the NVIDIA GPU driver and NVIDIA + * capability driver. Furthermore, the NVIDIA capability driver provides + * modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of + * this module parameter per device file. + * + * Possible Values: + * 0 = disable dynamic device file management + * 1 = enable dynamic device file management (default) + */ + +#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles +#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES) + +/* + * Option: DeviceFileUID + * + * Description: + * + * This registry key specifies the UID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default UID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_UID DeviceFileUID +#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID) + +/* + * Option: DeviceFileGID + * + * Description: + * + * This registry key specifies the GID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default GID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_GID DeviceFileGID +#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID) + +/* + * Option: DeviceFileMode + * + * Description: + * + * This registry key specifies the device file mode assigned to the NVIDIA + * device files created and/or modified by the NVIDIA driver when dynamic + * device file management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default mode is 0666 (octal, rw-rw-rw-). + */ + +#define __NV_DEVICE_FILE_MODE DeviceFileMode +#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE) + +/* + * Option: ResmanDebugLevel + * + * Default value: ~0 + */ + +#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel +#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL) + +/* + * Option: RmLogonRC + * + * Default value: 1 + */ + +#define __NV_RM_LOGON_RC RmLogonRC +#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC) + +/* + * Option: InitializeSystemMemoryAllocations + * + * Description: + * + * The NVIDIA Linux driver normally clears system memory it allocates + * for use with GPUs or within the driver stack. This is to ensure + * that potentially sensitive data is not rendered accessible by + * arbitrary user applications. + * + * Owners of single-user systems or similar trusted configurations may + * choose to disable the aforementioned clears using this option and + * potentially improve performance. + * + * Possible values: + * + * 1 = zero out system memory allocations (default) + * 0 = do not perform memory clears + */ + +#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + InitializeSystemMemoryAllocations +#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS) + +/* + * Option: RegistryDwords + * + * Description: + * + * This option accepts a semicolon-separated list of key=value pairs. Each + * key name is checked against the table of static options; if a match is + * found, the static option value is overridden, but invalid options remain + * invalid. Pairs that do not match an entry in the static option table + * are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwords=";;..." + */ + +#define __NV_REGISTRY_DWORDS RegistryDwords +#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS) + +/* + * Option: RegistryDwordsPerDevice + * + * Description: + * + * This option allows to specify registry keys per GPU device. It helps to + * control registry at GPU level of granularity. It accepts a semicolon + * separated list of key=value pairs. The first key value pair MUST be + * "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot + * number and F is the Function. This PCI BDF is used to identify which GPU to + * assign the registry keys that follows next. + * If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT + * found, then all the registry keys that follows are skipped, until we find next + * valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for + * the value of the "pci" string: + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * For each of the registry keys that follows, key name is checked against the + * table of static options; if a match is found, the static option value is + * overridden, but invalid options remain invalid. Pairs that do not match an + * entry in the static option table are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;;;..; \ + * pci=DDDD:BB:DD.F;;..;" + */ + +#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice +#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE) + +#define __NV_RM_MSG RmMsg +#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG) + +/* + * Option: UsePageAttributeTable + * + * Description: + * + * Enable/disable use of the page attribute table (PAT) available in + * modern x86/x86-64 processors to set the effective memory type of memory + * mappings to write-combining (WC). + * + * If enabled, an x86 processor with PAT support is present and the host + * system's Linux kernel did not configure one of the PAT entries to + * indicate the WC memory type, the driver will change the second entry in + * the PAT from its default (write-through (WT)) to WC at module load + * time. If the kernel did update one of the PAT entries, the driver will + * not modify the PAT. + * + * In both cases, the driver will honor attempts to map memory with the WC + * memory type by selecting the appropriate PAT entry using the correct + * set of PTE flags. + * + * Possible values: + * + * ~0 = use the NVIDIA driver's default logic (default) + * 1 = enable use of the PAT for WC mappings. + * 0 = disable use of the PAT for WC mappings. + */ + +#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable +#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE) + +/* + * Option: EnableMSI + * + * Description: + * + * When this option is enabled and the host kernel supports the MSI feature, + * the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the + * support for this feature instead of using PCI-E wired interrupt. + * + * Possible Values: + * + * 0 = disable MSI interrupt + * 1 = enable MSI interrupt (default) + * + */ + +#define __NV_ENABLE_MSI EnableMSI +#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI) + +/* + * Option: RegisterForACPIEvents + * + * Description: + * + * When this option is enabled, the NVIDIA driver will register with the + * ACPI subsystem to receive notification of ACPI events. + * + * Possible values: + * + * 1 - register for ACPI events (default) + * 0 - do not register for ACPI events + */ + +#define __NV_REGISTER_FOR_ACPI_EVENTS RegisterForACPIEvents +#define NV_REG_REGISTER_FOR_ACPI_EVENTS NV_REG_STRING(__NV_REGISTER_FOR_ACPI_EVENTS) + +/* + * Option: EnablePCIeGen3 + * + * Description: + * + * Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs + * when configured on SandyBridge E desktop platforms, NVIDIA feels that + * delivering a reliable, high-quality experience is not currently possible in + * PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and + * NVS Kepler products operate in PCIe Gen2 mode by default. You may use this + * option to enable PCIe Gen3 support. + * + * This is completely unsupported! + * + * Possible Values: + * + * 0: disable PCIe Gen3 support (default) + * 1: enable PCIe Gen3 support + */ + +#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3 +#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3) + +/* + * Option: MemoryPoolSize + * + * Description: + * + * When set to a non-zero value, this option specifies the size of the + * memory pool, given as a multiple of 1 GB, created on VMware ESXi to + * satisfy any system memory allocations requested by the NVIDIA kernel + * module. + */ + +#define __NV_MEMORY_POOL_SIZE MemoryPoolSize +#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE) + +/* + * Option: KMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for kmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize +#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE) + +/* + * Option: VMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for vmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize +#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE) + +/* + * Option: IgnoreMMIOCheck + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will ignore + * MMIO limit check during device probe on VMWare ESXi kernel. This is + * typically necessary when VMware ESXi MMIO limit differs between any + * base version and its updates. Customer using updates can set regkey + * to avoid probe failure. + */ + +#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck +#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK) + +/* + * Option: TCEBypassMode + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will attempt to setup + * all GPUs in "TCE bypass mode", in which DMA mappings of system memory bypass + * the IOMMU/TCE remapping hardware on IBM POWER systems. This is typically + * necessary for CUDA applications in which large system memory mappings may + * exceed the default TCE remapping capacity when operated in non-bypass mode. + * + * This option has no effect on non-POWER platforms. + * + * Possible Values: + * + * 0: system default TCE mode on all GPUs + * 1: enable TCE bypass mode on all GPUs + * 2: disable TCE bypass mode on all GPUs + */ +#define __NV_TCE_BYPASS_MODE TCEBypassMode +#define NV_REG_TCE_BYPASS_MODE NV_REG_STRING(__NV_TCE_BYPASS_MODE) + +#define NV_TCE_BYPASS_MODE_DEFAULT 0 +#define NV_TCE_BYPASS_MODE_ENABLE 1 +#define NV_TCE_BYPASS_MODE_DISABLE 2 + +/* + * Option: pci + * + * Description: + * + * On Unix platforms, per GPU based registry key can be specified as: + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,". + * where DDDD:BB:DD.F refers to Domain:Bus:Device.Function. + * We need this key "pci" to identify what follows next is a PCI BDF identifier, + * for which the registry keys are to be applied. + * + * This define is not used on non-UNIX platforms. + * + * Possible Formats for value: + * + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI BDF identifier string. + */ +#define __NV_PCI_DEVICE_BDF pci +#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF) + +/* + * Option: EnableStreamMemOPs + * + * Description: + * + * When this option is enabled, the CUDA driver will enable support for + * CUDA Stream Memory Operations in user-mode applications, which are so + * far required to be disabled by default due to limited support in + * devtools. + * + * Note: this is treated as a hint. MemOPs may still be left disabled by CUDA + * driver for other reasons. + * + * Possible Values: + * + * 0 = disable feature (default) + * 1 = enable feature + */ +#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs +#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS) + +/* + * Option: EnableUserNUMAManagement + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will require the + * user-mode NVIDIA Persistence daemon to manage the onlining and offlining + * of its NUMA device memory. + * + * This option has no effect on platforms that do not support onlining + * device memory to a NUMA node (this feature is only supported on certain + * POWER9 systems). + * + * Possible Values: + * + * 0: disable user-mode NUMA management + * 1: enable user-mode NUMA management (default) + */ +#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement +#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT) + +/* + * Option: GpuBlacklist + * + * Description: + * + * This option accepts a list of blacklisted GPUs, separated by commas, that + * cannot be attached or used. Each blacklisted GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. This regkey is deprecated and will be removed in the future. Use + * NV_REG_EXCLUDED_GPUS instead. + */ +#define __NV_GPU_BLACKLIST GpuBlacklist +#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST) + +/* + * Option: ExcludedGpus + * + * Description: + * + * This option accepts a list of excluded GPUs, separated by commas, that + * cannot be attached or used. Each excluded GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. + */ +#define __NV_EXCLUDED_GPUS ExcludedGpus +#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS) + +/* + * Option: NvLinkDisable + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will not attempt to + * initialize or train NVLink connections for any GPUs. System reboot is required + * for changes to take affect. + * + * This option has no effect if no GPUs support NVLink. + * + * Possible Values: + * + * 0: Do not disable NVLink (default) + * 1: Disable NVLink + */ +#define __NV_NVLINK_DISABLE NvLinkDisable +#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE) + +/* + * Option: RestrictProfilingToAdminUsers + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will prevent users + * without administrative access (i.e., the CAP_SYS_ADMIN capability) from + * using GPU performance counters. + * + * Possible Values: + * + * 0: Do not restrict GPU counters (default) + * 1: Restrict GPU counters to system administrators only + */ + +#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly +#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers +#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY) + +/* + * Option: TemporaryFilePath + * + * Description: + * + * When specified, this option changes the location in which the + * NVIDIA kernel module will create unnamed temporary files (e.g. to + * save the contents of video memory in). The indicated file must + * be a directory. By default, temporary files are created in /tmp. + */ +#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath +#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH) + +/* + * Option: PreserveVideoMemoryAllocations + * + * If enabled, this option prompts the NVIDIA kernel module to save and + * restore all video memory allocations across system power management + * cycles, i.e. suspend/resume and hibernate/restore. Otherwise, + * only select allocations are preserved. + * + * Possible Values: + * + * 0: Preserve only select video memory allocations (default) + * 1: Preserve all video memory allocations + */ +#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations +#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS) + + +/* + * Option: EnableS0ixPowerManagement + * + * When this option is enabled, the NVIDIA driver will use S0ix-based + * power management for system suspend/resume, if both the platform and + * the GPU support S0ix. + * + * During system suspend, if S0ix is enabled and + * video memory usage is above the threshold configured by + * 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept + * in self-refresh mode while the rest of the GPU is powered down. + * + * Otherwise, the driver will copy video memory contents to system memory + * and power off the video memory along with the GPU. + * + * Possible Values: + * + * 0: Disable S0ix based power management (default) + * 1: Enable S0ix based power management + */ + +#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement +#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT) + +/* + * Option: S0ixPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use during + * S0ix-based system power management. + * + * When S0ix is enabled and the system is suspended, the driver will + * compare the amount of video memory in use with this threshold, + * to decide whether to keep video memory in self-refresh or copy video + * memory content to system memory. + * + * See the 'EnableS0ixPowerManagement' option. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * Default value for this option is 256MB. + * + */ +#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + S0ixPowerManagementVideoMemoryThreshold +#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + + +/* + * Option: DynamicPowerManagement + * + * This option controls how aggressively the NVIDIA kernel module will manage + * GPU power through kernel interfaces. + * + * Possible Values: + * + * 0: Never allow the GPU to be powered down (default). + * 1: Power down the GPU when it is not initialized. + * 2: Power down the GPU after it has been inactive for some time. + * 3: (Default) Power down the GPU after a period of inactivity (i.e., + * mode 2) on Ampere or later notebooks. Otherwise, do not power down + * the GPU. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement +#define NV_REG_DYNAMIC_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT) + +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3 + +/* + * Option: DynamicPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use + * when selecting the dynamic power management scheme. + * + * When the driver detects that the GPU is idle, it will compare the amount + * of video memory in use with this threshold. + * + * If the current video memory usage is less than the threshold, the + * driver may preserve video memory contents in system memory and power off + * the video memory along with the GPU itself, if supported. Otherwise, + * the video memory will be kept in self-refresh mode while powering down + * the rest of the GPU, if supported. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * If the requested value is greater than 200MB (the default), then it + * will be capped to 200MB. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + DynamicPowerManagementVideoMemoryThreshold +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + +/* + * Option: RegisterPCIDriver + * + * Description: + * + * When this option is enabled, the NVIDIA driver will register with + * PCI subsystem. + * + * Possible values: + * + * 1 - register as PCI driver (default) + * 0 - do not register as PCI driver + */ + +#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver +#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER) + +/* + * Option: EnablePCIERelaxedOrderingMode + * + * Description: + * + * When this option is enabled, the registry key RmSetPCIERelaxedOrdering will + * be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing + * every device to set the relaxed ordering bit to 1 in all outbound MWr + * transaction-layer packets. This is equivalent to setting the regkey to + * FORCE_ENABLE as a non-per-device registry key. + * + * Possible values: + * 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default) + * 1 - Enable PCIe TLP relaxed ordering bit-setting + */ +#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode +#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \ + NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE) + +/* + * Option: EnableGpuFirmware + * + * Description: + * + * When this option is enabled, the NVIDIA driver will enable use of GPU + * firmware. + * + * Possible mode values: + * 0 - Do not enable GPU firmware + * 1 - Enable GPU firmware + * 2 - (Default) Use the default enablement policy for GPU firmware + * + * Setting this to anything other than 2 will alter driver firmware- + * enablement policies, possibly disabling GPU firmware where it would + * have otherwise been enabled by default. + * + * If this key is set globally to the system, the driver may still attempt + * to apply some policies to maintain uniform firmware modes across all + * GPUS. This may result in the driver failing initialization on some GPUs + * to maintain such a policy. + * + * If this key is set using NVreg_RegistryDwordsPerDevice, then the driver + * will attempt to honor whatever configuration is specified without applying + * additional policies. This may also result in failed GPU initialzations if + * the configuration is not possible (for example if the firmware is missing + * from the filesystem, or the GPU is not capable). + * + * Policy bits: + * + * POLICY_ALLOW_FALLBACK: + * As the normal behavior is to fail GPU initialization if this registry + * entry is set in such a way that results in an invalid configuration, if + * instead the user would like the driver to automatically try to fallback + * to initializing the failing GPU with firmware disabled, then this bit can + * be set (ex: 0x11 means try to enable GPU firmware but fall back if needed). + * Note that this can result in a mixed mode configuration (ex: GPU0 has + * firmware enabled, but GPU1 does not). + * + */ + +#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware +#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE) + +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002 + +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0 +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010 + +#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012 +#define NV_REG_ENABLE_GPU_FIRMWARE_INVALID_VALUE 0xFFFFFFFF + +/* + * Option: EnableGpuFirmwareLogs + * + * When this option is enabled, the NVIDIA driver will send GPU firmware logs + * to the system log, when possible. + * + * Possible values: + * 0 - Do not send GPU firmware logs to the system log + * 1 - Enable sending of GPU firmware logs to the system log + * 2 - (Default) Enable sending of GPU firmware logs to the system log for + * the debug kernel driver build only + */ +#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS) + +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002 + +/* + * Option: EnableDbgBreakpoint + * + * When this option is set to a non-zero value, and the kernel is configured + * appropriately, assertions within resman will trigger a CPU breakpoint (e.g., + * INT3 on x86_64), assumed to be caught by an attached debugger. + * + * When this option is set to the value zero (the default), assertions within + * resman will print to the system log, but no CPU breakpoint will be triggered. + */ +#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint + + +/* + * Option: OpenRmEnableUnsupportedGpus + * + * Open nvidia.ko support for features beyond what is used on Data Center GPUs + * is still fairly immature, so for now require users to opt into use of open + * nvidia.ko with a special registry key, if not on a Data Center GPU. + */ + +#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS) +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000 +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001 +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE + + +#if defined(NV_DEFINE_REGISTRY_KEY_TABLE) + +/* + *---------registry key parameter declarations-------------- + */ + +NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0); +NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666); +NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1); +NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0); +NV_DEFINE_REG_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS, 1); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1); +NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0); +NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1); +NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0); + +NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0); +NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256); + +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3); +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG); +NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT); + +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0); + + + +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1); + +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0); + +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL); + +/* + *----------------registry database definition---------------------- + */ + +/* + * You can enable any of the registry options disabled by default by + * editing their respective entries in the table below. The last field + * determines if the option is considered valid - in order for the + * changes to take effect, you need to recompile and reload the NVIDIA + * kernel module. + */ +nv_parm_t nv_parms[] = { + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TCE_BYPASS_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY, + __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS), + + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS), + {NULL, NULL} +}; + +#elif defined(NVRM) + +extern nv_parm_t nv_parms[]; + +#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */ + +#endif /* _RM_REG_H_ */ diff --git a/kernel-open/nvidia/nv-report-err.c b/kernel-open/nvidia/nv-report-err.c new file mode 100644 index 000000000..eec5af3e7 --- /dev/null +++ b/kernel-open/nvidia/nv-report-err.c @@ -0,0 +1,89 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ +#include "nv-linux.h" +#include "os-interface.h" + +#include "nv-report-err.h" + +nv_report_error_cb_t nv_error_cb_handle = NULL; + +int nv_register_error_cb(nv_report_error_cb_t report_error_cb) +{ + if (report_error_cb == NULL) + return -EINVAL; + + if (nv_error_cb_handle != NULL) + return -EBUSY; + + nv_error_cb_handle = report_error_cb; + return 0; +} + +EXPORT_SYMBOL(nv_register_error_cb); + +int nv_unregister_error_cb(void) +{ + if (nv_error_cb_handle == NULL) + return -EPERM; + + nv_error_cb_handle = NULL; + return 0; +} + +EXPORT_SYMBOL(nv_unregister_error_cb); + +struct pci_dev; + +void nv_report_error( + struct pci_dev *dev, + NvU32 error_number, + const char *format, + va_list ap +) +{ + va_list ap_copy; + char *buffer; + int length = 0; + int status = NV_OK; + + if (nv_error_cb_handle != NULL) + { + va_copy(ap_copy, ap); + length = vsnprintf(NULL, 0, format, ap); + va_end(ap_copy); + + if (length > 0) + { + status = os_alloc_mem((void *)&buffer, (length + 1)*sizeof(char)); + + if (status == NV_OK) + { + vsnprintf(buffer, length, format, ap); + nv_error_cb_handle(dev, error_number, buffer, length + 1); + os_free_mem(buffer); + } + } + } +} diff --git a/kernel-open/nvidia/nv-report-err.h b/kernel-open/nvidia/nv-report-err.h new file mode 100644 index 000000000..d48870921 --- /dev/null +++ b/kernel-open/nvidia/nv-report-err.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_REPORT_ERR_H_ +#define _NV_REPORT_ERR_H_ + +/* + * @brief + * Callback definition for obtaining XID error string and data. + * + * @param[in] pci_dev * + * Structure describring GPU PCI device. + * @param[in] uint32_t + * XID number + * @param[in] char * + * Error string with HWERR info. + * @param[in] int + * Length of error string. + */ +typedef void (*nv_report_error_cb_t)(struct pci_dev *, uint32_t, char *, int); + +/* + * @brief + * Register callback function to obtain XID error string and data. + * + * @param[in] report_error_cb + * A function pointer to recieve callback. + * + * @return + * 0 upon successful completion. + * -EINVAL callback handle is NULL. + * -EBUSY callback handle is already registered. + */ +int nv_register_error_cb(nv_report_error_cb_t report_error_cb); + +/* + * @brief + * Unregisters callback function handle. + * + * @return + * 0 upon successful completion. + * -EPERM unregister not permitted on NULL callback handle. + */ +int nv_unregister_error_cb(void); + +#endif /* _NV_REPORT_ERR_H_ */ diff --git a/kernel-open/nvidia/nv-rsync.c b/kernel-open/nvidia/nv-rsync.c new file mode 100644 index 000000000..57860baa1 --- /dev/null +++ b/kernel-open/nvidia/nv-rsync.c @@ -0,0 +1,201 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-linux.h" +#include "nv-rsync.h" + +nv_rsync_info_t g_rsync_info; + +void nv_init_rsync_info( + void +) +{ + g_rsync_info.relaxed_ordering_mode = NV_FALSE; + g_rsync_info.usage_count = 0; + g_rsync_info.data = NULL; + NV_INIT_MUTEX(&g_rsync_info.lock); +} + +void nv_destroy_rsync_info( + void +) +{ + WARN_ON(g_rsync_info.data); + WARN_ON(g_rsync_info.usage_count); + WARN_ON(g_rsync_info.relaxed_ordering_mode); +} + +int nv_get_rsync_info( + void +) +{ + int mode; + int rc = 0; + + down(&g_rsync_info.lock); + + if (g_rsync_info.usage_count == 0) + { + if (g_rsync_info.get_relaxed_ordering_mode) + { + rc = g_rsync_info.get_relaxed_ordering_mode(&mode, + g_rsync_info.data); + if (rc != 0) + { + goto done; + } + + g_rsync_info.relaxed_ordering_mode = !!mode; + } + } + + g_rsync_info.usage_count++; + +done: + up(&g_rsync_info.lock); + + return rc; +} + +void nv_put_rsync_info( + void +) +{ + int mode; + + down(&g_rsync_info.lock); + + g_rsync_info.usage_count--; + + if (g_rsync_info.usage_count == 0) + { + if (g_rsync_info.put_relaxed_ordering_mode) + { + mode = g_rsync_info.relaxed_ordering_mode; + g_rsync_info.put_relaxed_ordering_mode(mode, g_rsync_info.data); + g_rsync_info.relaxed_ordering_mode = NV_FALSE; + } + } + + up(&g_rsync_info.lock); +} + +int nv_register_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data +) +{ + int rc = 0; + + down(&g_rsync_info.lock); + + if (g_rsync_info.get_relaxed_ordering_mode != NULL) + { + rc = -EBUSY; + goto done; + } + + if (g_rsync_info.usage_count != 0) + { + rc = -EBUSY; + goto done; + } + + g_rsync_info.get_relaxed_ordering_mode = get_relaxed_ordering_mode; + g_rsync_info.put_relaxed_ordering_mode = put_relaxed_ordering_mode; + g_rsync_info.wait_for_rsync = wait_for_rsync; + g_rsync_info.data = data; + +done: + up(&g_rsync_info.lock); + + return rc; +} + +void nv_unregister_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data +) +{ + down(&g_rsync_info.lock); + + WARN_ON(g_rsync_info.usage_count != 0); + + WARN_ON(g_rsync_info.get_relaxed_ordering_mode != + get_relaxed_ordering_mode); + WARN_ON(g_rsync_info.put_relaxed_ordering_mode != + put_relaxed_ordering_mode); + WARN_ON(g_rsync_info.wait_for_rsync != wait_for_rsync); + WARN_ON(g_rsync_info.data != data); + + g_rsync_info.get_relaxed_ordering_mode = NULL; + g_rsync_info.put_relaxed_ordering_mode = NULL; + g_rsync_info.wait_for_rsync = NULL; + g_rsync_info.data = NULL; + + up(&g_rsync_info.lock); +} + +NvBool nv_get_rsync_relaxed_ordering_mode( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + /* shouldn't be called without opening a device */ + WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0); + + /* + * g_rsync_info.relaxed_ordering_mode can be safely accessed outside of + * g_rsync_info.lock once a device is opened. During nvidia_open(), we + * lock the relaxed ordering state by ref-counting the rsync module + * through get_relaxed_ordering_mode. + */ + return g_rsync_info.relaxed_ordering_mode; +} + +void nv_wait_for_rsync( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + /* shouldn't be called without opening a device */ + WARN_ON(NV_ATOMIC_READ(nvl->usage_count) == 0); + + /* + * g_rsync_info.relaxed_ordering_mode can be safely accessed outside of + * g_rsync_info.lock once a device is opened. During nvidia_open(), we + * block unregistration of the rsync driver by ref-counting the module + * through get_relaxed_ordering_mode. + */ + if (g_rsync_info.relaxed_ordering_mode) + { + WARN_ON(g_rsync_info.wait_for_rsync == NULL); + g_rsync_info.wait_for_rsync(nvl->pci_dev, g_rsync_info.data); + } +} diff --git a/kernel-open/nvidia/nv-rsync.h b/kernel-open/nvidia/nv-rsync.h new file mode 100644 index 000000000..6e262e62f --- /dev/null +++ b/kernel-open/nvidia/nv-rsync.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_RSYNC_H_ +#define _NV_RSYNC_H_ + +#include "nv-linux.h" + +typedef struct nv_rsync_info +{ + struct semaphore lock; + uint32_t usage_count; + NvBool relaxed_ordering_mode; + int (*get_relaxed_ordering_mode)(int *mode, void *data); + void (*put_relaxed_ordering_mode)(int mode, void *data); + void (*wait_for_rsync)(struct pci_dev *gpu, void *data); + void *data; +} nv_rsync_info_t; + +void nv_init_rsync_info(void); +void nv_destroy_rsync_info(void); +int nv_get_rsync_info(void); +void nv_put_rsync_info(void); +int nv_register_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data); +void nv_unregister_rsync_driver( + int (*get_relaxed_ordering_mode)(int *mode, void *data), + void (*put_relaxed_ordering_mode)(int mode, void *data), + void (*wait_for_rsync)(struct pci_dev *gpu, void *data), + void *data); +NvBool nv_get_rsync_relaxed_ordering_mode(nv_state_t *nv); +void nv_wait_for_rsync(nv_state_t *nv); + +#endif diff --git a/kernel-open/nvidia/nv-usermap.c b/kernel-open/nvidia/nv-usermap.c new file mode 100644 index 000000000..2d1a84c9d --- /dev/null +++ b/kernel-open/nvidia/nv-usermap.c @@ -0,0 +1,160 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-frontend.h" + +NV_STATUS NV_API_CALL nv_add_mapping_context_to_file( + nv_state_t *nv, + nv_usermap_access_params_t *nvuap, + NvU32 prot, + void *pAllocPriv, + NvU64 pageIndex, + NvU32 fd +) +{ + NV_STATUS status = NV_OK; + nv_alloc_mapping_context_t *nvamc = NULL; + nv_file_private_t *nvfp = NULL; + nv_linux_file_private_t *nvlfp = NULL; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + void *priv = NULL; + + nvfp = nv_get_file_private(fd, NV_IS_CTL_DEVICE(nv), &priv); + if (nvfp == NULL) + return NV_ERR_INVALID_ARGUMENT; + + nvlfp = nv_get_nvlfp_from_nvfp(nvfp); + + nvamc = &nvlfp->mmap_context; + + if (nvamc->valid) + { + status = NV_ERR_STATE_IN_USE; + goto done; + } + + if (NV_IS_CTL_DEVICE(nv)) + { + nvamc->alloc = pAllocPriv; + nvamc->page_index = pageIndex; + } + else + { + if (NV_STATE_PTR(nvlfp->nvptr) != nv) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + nvamc->mmap_start = nvuap->mmap_start; + nvamc->mmap_size = nvuap->mmap_size; + if (nv_get_numa_status(nvl) == NV_NUMA_STATUS_ONLINE) + { + nvamc->page_array = nvuap->page_array; + nvamc->num_pages = nvuap->num_pages; + } + nvamc->access_start = nvuap->access_start; + nvamc->access_size = nvuap->access_size; + nvamc->remap_prot_extra = nvuap->remap_prot_extra; + } + + nvamc->prot = prot; + nvamc->valid = NV_TRUE; + +done: + nv_put_file_private(priv); + + return status; +} + +NV_STATUS NV_API_CALL nv_alloc_user_mapping( + nv_state_t *nv, + void *pAllocPrivate, + NvU64 pageIndex, + NvU32 pageOffset, + NvU64 size, + NvU32 protect, + NvU64 *pUserAddress, + void **ppPrivate +) +{ + nv_alloc_t *at = pAllocPrivate; + + if (at->flags.contig) + *pUserAddress = (at->page_table[0]->phys_addr + (pageIndex * PAGE_SIZE) + pageOffset); + else + *pUserAddress = (at->page_table[pageIndex]->phys_addr + pageOffset); + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_free_user_mapping( + nv_state_t *nv, + void *pAllocPrivate, + NvU64 userAddress, + void *pPrivate +) +{ + return NV_OK; +} + +/* + * This function adjust the {mmap,access}_{start,size} to reflect platform-specific + * mechanisms for isolating mappings at a finer granularity than the os_page_size + */ +NV_STATUS NV_API_CALL nv_get_usermap_access_params( + nv_state_t *nv, + nv_usermap_access_params_t *nvuap +) +{ + NvU64 addr = nvuap->addr; + NvU64 size = nvuap->size; + + nvuap->remap_prot_extra = 0; + + /* + * Do verification and cache encoding based on the original + * (ostensibly smaller) mmap request, since accesses should be + * restricted to that range. + */ + if (rm_gpu_need_4k_page_isolation(nv) && + NV_4K_PAGE_ISOLATION_REQUIRED(addr, size)) + { +#if defined(NV_4K_PAGE_ISOLATION_PRESENT) + nvuap->remap_prot_extra = NV_PROT_4K_PAGE_ISOLATION; + nvuap->access_start = (NvU64)NV_4K_PAGE_ISOLATION_ACCESS_START(addr); + nvuap->access_size = NV_4K_PAGE_ISOLATION_ACCESS_LEN(addr, size); + nvuap->mmap_start = (NvU64)NV_4K_PAGE_ISOLATION_MMAP_ADDR(addr); + nvuap->mmap_size = NV_4K_PAGE_ISOLATION_MMAP_LEN(size); +#else + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "4K page isolation required but not available!\n"); + return NV_ERR_OPERATING_SYSTEM; +#endif + } + + return NV_OK; +} diff --git a/kernel-open/nvidia/nv-vm.c b/kernel-open/nvidia/nv-vm.c new file mode 100644 index 000000000..f59e81023 --- /dev/null +++ b/kernel-open/nvidia/nv-vm.c @@ -0,0 +1,726 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os-interface.h" +#include "nv.h" +#include "nv-linux.h" + +static inline void nv_set_contig_memory_uc(nvidia_pte_t *page_ptr, NvU32 num_pages) +{ +#if defined(NV_SET_MEMORY_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + unsigned long addr = (unsigned long)page_address(page); + set_memory_uc(addr, num_pages); +#elif defined(NV_SET_PAGES_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + set_pages_uc(page, num_pages); +#endif +} + +static inline void nv_set_contig_memory_wb(nvidia_pte_t *page_ptr, NvU32 num_pages) +{ +#if defined(NV_SET_MEMORY_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + unsigned long addr = (unsigned long)page_address(page); + set_memory_wb(addr, num_pages); +#elif defined(NV_SET_PAGES_UC_PRESENT) + struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); + set_pages_wb(page, num_pages); +#endif +} + +static inline int nv_set_memory_array_type_present(NvU32 type) +{ + switch (type) + { +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + return 1; + case NV_MEMORY_WRITEBACK: + return 1; +#endif + default: + return 0; + } +} + +static inline int nv_set_pages_array_type_present(NvU32 type) +{ + switch (type) + { +#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + return 1; + case NV_MEMORY_WRITEBACK: + return 1; +#endif + default: + return 0; + } +} + +static inline void nv_set_memory_array_type( + unsigned long *pages, + NvU32 num_pages, + NvU32 type +) +{ + switch (type) + { +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + set_memory_array_uc(pages, num_pages); + break; + case NV_MEMORY_WRITEBACK: + set_memory_array_wb(pages, num_pages); + break; +#endif + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): type %d unimplemented\n", + __FUNCTION__, type); + break; + } +} + +static inline void nv_set_pages_array_type( + struct page **pages, + NvU32 num_pages, + NvU32 type +) +{ + switch (type) + { +#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + case NV_MEMORY_UNCACHED: + set_pages_array_uc(pages, num_pages); + break; + case NV_MEMORY_WRITEBACK: + set_pages_array_wb(pages, num_pages); + break; +#endif + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): type %d unimplemented\n", + __FUNCTION__, type); + break; + } +} + +static inline void nv_set_contig_memory_type( + nvidia_pte_t *page_ptr, + NvU32 num_pages, + NvU32 type +) +{ + switch (type) + { + case NV_MEMORY_UNCACHED: + nv_set_contig_memory_uc(page_ptr, num_pages); + break; + case NV_MEMORY_WRITEBACK: + nv_set_contig_memory_wb(page_ptr, num_pages); + break; + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): type %d unimplemented\n", + __FUNCTION__, type); + } +} + +static inline void nv_set_memory_type(nv_alloc_t *at, NvU32 type) +{ + NvU32 i; + NV_STATUS status = NV_OK; +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + unsigned long *pages = NULL; +#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + struct page **pages = NULL; +#else + unsigned long *pages = NULL; +#endif + + nvidia_pte_t *page_ptr; + struct page *page; + + if (nv_set_memory_array_type_present(type)) + { + status = os_alloc_mem((void **)&pages, + at->num_pages * sizeof(unsigned long)); + + } + else if (nv_set_pages_array_type_present(type)) + { + status = os_alloc_mem((void **)&pages, + at->num_pages * sizeof(struct page*)); + } + + if (status != NV_OK) + pages = NULL; + + // + // If the set_{memory,page}_array_* functions are in the kernel interface, + // it's faster to use them since they work on non-contiguous memory, + // whereas the set_{memory,page}_* functions do not. + // + if (pages) + { + for (i = 0; i < at->num_pages; i++) + { + page_ptr = at->page_table[i]; + page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr); +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + pages[i] = (unsigned long)page_address(page); +#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + pages[i] = page; +#endif + } +#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT) + nv_set_memory_array_type(pages, at->num_pages, type); +#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT) + nv_set_pages_array_type(pages, at->num_pages, type); +#endif + os_free_mem(pages); + } + + // + // If the set_{memory,page}_array_* functions aren't present in the kernel + // interface, each page has to be set individually, which has been measured + // to be ~10x slower than using the set_{memory,page}_array_* functions. + // + else + { + for (i = 0; i < at->num_pages; i++) + nv_set_contig_memory_type(at->page_table[i], 1, type); + } +} + +static NvU64 nv_get_max_sysmem_address(void) +{ + NvU64 global_max_pfn = 0ULL; + int node_id; + + for_each_online_node(node_id) + { + global_max_pfn = max(global_max_pfn, node_end_pfn(node_id)); + } + + return ((global_max_pfn + 1) << PAGE_SHIFT) - 1; +} + +static unsigned int nv_compute_gfp_mask( + nv_state_t *nv, + nv_alloc_t *at +) +{ + unsigned int gfp_mask = NV_GFP_KERNEL; + struct device *dev = at->dev; + + /* + * If we know that SWIOTLB is enabled (and therefore we avoid calling the + * kernel to DMA-remap the pages), or if we are using dma_direct (which may + * transparently use the SWIOTLB for pages that are unaddressable by the + * device, in kernel versions 5.0 and later), limit our allocation pool + * to the first 4GB to avoid allocating pages outside of our device's + * addressable limit. + * Also, limit the allocation to the first 4GB if explicitly requested by + * setting the "nv->force_dma32_alloc" variable. + */ + if (!nv || !nv_requires_dma_remap(nv) || nv_is_dma_direct(dev) || nv->force_dma32_alloc) + { + NvU64 max_sysmem_address = nv_get_max_sysmem_address(); + if ((dev && dev->dma_mask && (*(dev->dma_mask) < max_sysmem_address)) || + (nv && nv->force_dma32_alloc)) + { + gfp_mask = NV_GFP_DMA32; + } + } +#if defined(__GFP_RETRY_MAYFAIL) + gfp_mask |= __GFP_RETRY_MAYFAIL; +#elif defined(__GFP_NORETRY) + gfp_mask |= __GFP_NORETRY; +#endif +#if defined(__GFP_ZERO) + if (at->flags.zeroed) + gfp_mask |= __GFP_ZERO; +#endif +#if defined(__GFP_THISNODE) + if (at->flags.node0) + gfp_mask |= __GFP_THISNODE; +#endif + // Compound pages are required by vm_insert_page for high-order page + // allocations + if (at->order > 0) + gfp_mask |= __GFP_COMP; + + return gfp_mask; +} + +/* + * This function is needed for allocating contiguous physical memory in xen + * dom0. Because of the use of xen sw iotlb in xen dom0, memory allocated by + * NV_GET_FREE_PAGES may not be machine contiguous when size is more than + * 1 page. nv_alloc_coherent_pages() will give us machine contiguous memory. + * Even though we get dma_address directly in this function, we will + * still call pci_map_page() later to get dma address. This is fine as it + * will return the same machine address. + */ +static NV_STATUS nv_alloc_coherent_pages( + nv_state_t *nv, + nv_alloc_t *at +) +{ + nvidia_pte_t *page_ptr; + NvU32 i; + unsigned int gfp_mask; + unsigned long virt_addr = 0; + dma_addr_t bus_addr; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + gfp_mask = nv_compute_gfp_mask(nv, at); + + virt_addr = (unsigned long)dma_alloc_coherent(dev, + at->num_pages * PAGE_SIZE, + &bus_addr, + gfp_mask); + if (!virt_addr) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__); + return NV_ERR_NO_MEMORY; + } + + for (i = 0; i < at->num_pages; i++) + { + page_ptr = at->page_table[i]; + + page_ptr->virt_addr = virt_addr + i * PAGE_SIZE; + page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr); + page_ptr->dma_addr = bus_addr + i * PAGE_SIZE; + } + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_contig_memory_type(at->page_table[0], + at->num_pages, + NV_MEMORY_UNCACHED); + } + + at->flags.coherent = NV_TRUE; + return NV_OK; +} + +static void nv_free_coherent_pages( + nv_alloc_t *at +) +{ + nvidia_pte_t *page_ptr; + struct device *dev = at->dev; + + page_ptr = at->page_table[0]; + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_contig_memory_type(at->page_table[0], + at->num_pages, + NV_MEMORY_WRITEBACK); + } + + dma_free_coherent(dev, at->num_pages * PAGE_SIZE, + (void *)page_ptr->virt_addr, page_ptr->dma_addr); +} + +NV_STATUS nv_alloc_contig_pages( + nv_state_t *nv, + nv_alloc_t *at +) +{ + NV_STATUS status; + nvidia_pte_t *page_ptr; + NvU32 i, j; + unsigned int gfp_mask; + unsigned long virt_addr = 0; + NvU64 phys_addr; + struct device *dev = at->dev; + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages); + + // TODO: This is a temporary WAR, and will be removed after fixing bug 200732409. + if (os_is_xen_dom0() || at->flags.unencrypted) + return nv_alloc_coherent_pages(nv, at); + + + + + + + + + at->order = get_order(at->num_pages * PAGE_SIZE); + gfp_mask = nv_compute_gfp_mask(nv, at); + + if (at->flags.node0) + { + NV_ALLOC_PAGES_NODE(virt_addr, 0, at->order, gfp_mask); + } + else + { + NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask); + } + if (virt_addr == 0) + { + if (os_is_vgx_hyper()) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory, trying coherent memory \n", __FUNCTION__); + + status = nv_alloc_coherent_pages(nv, at); + return status; + } + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__); + return NV_ERR_NO_MEMORY; + } +#if !defined(__GFP_ZERO) + if (at->flags.zeroed) + memset((void *)virt_addr, 0, (at->num_pages * PAGE_SIZE)); +#endif + + for (i = 0; i < at->num_pages; i++, virt_addr += PAGE_SIZE) + { + phys_addr = nv_get_kern_phys_address(virt_addr); + if (phys_addr == 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: %s: failed to look up physical address\n", + __FUNCTION__); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + page_ptr = at->page_table[i]; + page_ptr->phys_addr = phys_addr; + page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr); + page_ptr->virt_addr = virt_addr; + page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr); + + NV_MAYBE_RESERVE_PAGE(page_ptr); + } + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_contig_memory_type(at->page_table[0], + at->num_pages, + NV_MEMORY_UNCACHED); + } + + at->flags.coherent = NV_FALSE; + + return NV_OK; + +failed: + if (i > 0) + { + for (j = 0; j < i; j++) + NV_MAYBE_UNRESERVE_PAGE(at->page_table[j]); + } + + page_ptr = at->page_table[0]; + NV_FREE_PAGES(page_ptr->virt_addr, at->order); + + return status; +} + +void nv_free_contig_pages( + nv_alloc_t *at +) +{ + nvidia_pte_t *page_ptr; + unsigned int i; + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages); + + if (at->flags.coherent) + return nv_free_coherent_pages(at); + + if (at->cache_type != NV_MEMORY_CACHED) + { + nv_set_contig_memory_type(at->page_table[0], + at->num_pages, + NV_MEMORY_WRITEBACK); + } + + for (i = 0; i < at->num_pages; i++) + { + page_ptr = at->page_table[i]; + + if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count) + { + static int count = 0; + if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: %s: page count != initial page count (%u,%u)\n", + __FUNCTION__, NV_GET_PAGE_COUNT(page_ptr), + page_ptr->page_count); + } + } + NV_MAYBE_UNRESERVE_PAGE(page_ptr); + } + + page_ptr = at->page_table[0]; + + NV_FREE_PAGES(page_ptr->virt_addr, at->order); +} + +NV_STATUS nv_alloc_system_pages( + nv_state_t *nv, + nv_alloc_t *at +) +{ + NV_STATUS status; + nvidia_pte_t *page_ptr; + NvU32 i, j; + unsigned int gfp_mask; + unsigned long virt_addr = 0; + NvU64 phys_addr; + struct device *dev = at->dev; + dma_addr_t bus_addr; + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %u: %u pages\n", __FUNCTION__, at->num_pages); + + gfp_mask = nv_compute_gfp_mask(nv, at); + + for (i = 0; i < at->num_pages; i++) + { + if (at->flags.unencrypted && (dev != NULL)) + { + virt_addr = (unsigned long)dma_alloc_coherent(dev, + PAGE_SIZE, + &bus_addr, + gfp_mask); + at->flags.coherent = NV_TRUE; + } + else if (at->flags.node0) + { + NV_ALLOC_PAGES_NODE(virt_addr, 0, 0, gfp_mask); + } + else + { + NV_GET_FREE_PAGES(virt_addr, 0, gfp_mask); + } + + if (virt_addr == 0) + { + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__); + status = NV_ERR_NO_MEMORY; + goto failed; + } +#if !defined(__GFP_ZERO) + if (at->flags.zeroed) + memset((void *)virt_addr, 0, PAGE_SIZE); +#endif + + phys_addr = nv_get_kern_phys_address(virt_addr); + if (phys_addr == 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: %s: failed to look up physical address\n", + __FUNCTION__); + NV_FREE_PAGES(virt_addr, 0); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + +#if defined(_PAGE_NX) + if (((_PAGE_NX & pgprot_val(PAGE_KERNEL)) != 0) && + (phys_addr < 0x400000)) + { + nv_printf(NV_DBG_SETUP, + "NVRM: VM: %s: discarding page @ 0x%llx\n", + __FUNCTION__, phys_addr); + --i; + continue; + } +#endif + + page_ptr = at->page_table[i]; + page_ptr->phys_addr = phys_addr; + page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr); + page_ptr->virt_addr = virt_addr; + + // + // Use unencrypted dma_addr returned by dma_alloc_coherent() as + // nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled. + // + if (at->flags.coherent) + page_ptr->dma_addr = bus_addr; + else if (dev) + page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr); + else + page_ptr->dma_addr = page_ptr->phys_addr; + + NV_MAYBE_RESERVE_PAGE(page_ptr); + } + + if (at->cache_type != NV_MEMORY_CACHED) + nv_set_memory_type(at, NV_MEMORY_UNCACHED); + + return NV_OK; + +failed: + if (i > 0) + { + for (j = 0; j < i; j++) + { + page_ptr = at->page_table[j]; + NV_MAYBE_UNRESERVE_PAGE(page_ptr); + if (at->flags.coherent) + { + dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr, + page_ptr->dma_addr); + } + else + { + NV_FREE_PAGES(page_ptr->virt_addr, 0); + } + } + } + + return status; +} + +void nv_free_system_pages( + nv_alloc_t *at +) +{ + nvidia_pte_t *page_ptr; + unsigned int i; + struct device *dev = at->dev; + + nv_printf(NV_DBG_MEMINFO, + "NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages); + + if (at->cache_type != NV_MEMORY_CACHED) + nv_set_memory_type(at, NV_MEMORY_WRITEBACK); + + for (i = 0; i < at->num_pages; i++) + { + page_ptr = at->page_table[i]; + + if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count) + { + static int count = 0; + if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: VM: %s: page count != initial page count (%u,%u)\n", + __FUNCTION__, NV_GET_PAGE_COUNT(page_ptr), + page_ptr->page_count); + } + } + + NV_MAYBE_UNRESERVE_PAGE(page_ptr); + if (at->flags.coherent) + { + dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr, + page_ptr->dma_addr); + } + else + { + NV_FREE_PAGES(page_ptr->virt_addr, 0); + } + } +} + +NvUPtr nv_vm_map_pages( + struct page **pages, + NvU32 count, + NvBool cached, + NvBool unencrypted +) +{ + NvUPtr virt_addr = 0; + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: can't map %d pages, invalid context!\n", + __FUNCTION__, count); + os_dbg_breakpoint(); + return virt_addr; + } + + virt_addr = nv_vmap(pages, count, cached, unencrypted); + return virt_addr; +} + +void nv_vm_unmap_pages( + NvUPtr virt_addr, + NvU32 count +) +{ + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: can't unmap %d pages at 0x%0llx, " + "invalid context!\n", __FUNCTION__, count, virt_addr); + os_dbg_breakpoint(); + return; + } + + nv_vunmap(virt_addr, count); +} + +void nv_address_space_init_once(struct address_space *mapping) +{ +#if defined(NV_ADDRESS_SPACE_INIT_ONCE_PRESENT) + address_space_init_once(mapping); +#else + memset(mapping, 0, sizeof(*mapping)); + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); + +#if defined(NV_ADDRESS_SPACE_HAS_RWLOCK_TREE_LOCK) + // + // The .tree_lock member variable was changed from type rwlock_t, to + // spinlock_t, on 25 July 2008, by mainline commit + // 19fd6231279be3c3bdd02ed99f9b0eb195978064. + // + rwlock_init(&mapping->tree_lock); +#else + spin_lock_init(&mapping->tree_lock); +#endif + + spin_lock_init(&mapping->i_mmap_lock); + INIT_LIST_HEAD(&mapping->private_list); + spin_lock_init(&mapping->private_lock); + INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); + INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); +#endif /* !NV_ADDRESS_SPACE_INIT_ONCE_PRESENT */ +} diff --git a/kernel-open/nvidia/nv-vtophys.c b/kernel-open/nvidia/nv-vtophys.c new file mode 100644 index 000000000..df2a01e0e --- /dev/null +++ b/kernel-open/nvidia/nv-vtophys.c @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +NvU64 NV_API_CALL nv_get_kern_phys_address(NvU64 address) +{ + /* direct-mapped kernel address */ + if (virt_addr_valid(address)) + return __pa(address); + + nv_printf(NV_DBG_ERRORS, + "NVRM: can't translate address in %s()!\n", __FUNCTION__); + return 0; +} + diff --git a/kernel-open/nvidia/nv.c b/kernel-open/nvidia/nv.c new file mode 100644 index 000000000..dbcd8c04f --- /dev/null +++ b/kernel-open/nvidia/nv.c @@ -0,0 +1,5628 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvmisc.h" +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-p2p.h" +#include "nv-reg.h" +#include "nv-msi.h" +#include "nv-pci-table.h" + +#if defined(NV_UVM_ENABLE) +#include "nv_uvm_interface.h" +#endif + +#if defined(NV_VGPU_KVM_BUILD) +#include "nv-vgpu-vfio-interface.h" +#endif + + +#include "nvlink_proto.h" +#include "nvlink_caps.h" + + +#include "nv-frontend.h" +#include "nv-hypervisor.h" +#include "nv-ibmnpu.h" +#include "nv-rsync.h" +#include "nv-kthread-q.h" +#include "nv-pat.h" +#include "nv-dmabuf.h" + +#if !defined(CONFIG_RETPOLINE) +#include "nv-retpoline.h" +#endif + +#include + +#include /* HDA struct snd_card */ + +#include + +#if defined(NV_SOUND_HDAUDIO_H_PRESENT) +#include "sound/hdaudio.h" +#endif + +#if defined(NV_SOUND_HDA_CODEC_H_PRESENT) +#include +#include +#include +#endif + +#if defined(NV_SEQ_READ_ITER_PRESENT) +#include +#include +#include +#endif + +#include /* System DMI info */ + +#include + +#include "conftest/patches.h" + +#define RM_THRESHOLD_TOTAL_IRQ_COUNT 100000 +#define RM_THRESHOLD_UNAHNDLED_IRQ_COUNT 99900 +#define RM_UNHANDLED_TIMEOUT_US 100000 + +const NvBool nv_is_rm_firmware_supported_os = NV_TRUE; + +// Deprecated, use NV_REG_ENABLE_GPU_FIRMWARE instead +char *rm_firmware_active = NULL; +NV_MODULE_STRING_PARAMETER(rm_firmware_active); + +#define NV_FIRMWARE_GSP_FILENAME "nvidia/" NV_VERSION_STRING "/gsp.bin" +#define NV_FIRMWARE_GSP_LOG_FILENAME "nvidia/" NV_VERSION_STRING "/gsp_log.bin" + +MODULE_FIRMWARE(NV_FIRMWARE_GSP_FILENAME); + +/* + * Global NVIDIA capability state, for GPU driver + */ +nv_cap_t *nvidia_caps_root = NULL; + +/* + * our global state; one per device + */ +NvU32 num_nv_devices = 0; +NvU32 num_probed_nv_devices = 0; + +nv_linux_state_t *nv_linux_devices; + +/* + * And one for the control device + */ +nv_linux_state_t nv_ctl_device = { { 0 } }; + +nv_kthread_q_t nv_kthread_q; +nv_kthread_q_t nv_deferred_close_kthread_q; + +struct rw_semaphore nv_system_pm_lock; + +#if defined(CONFIG_PM) +static nv_power_state_t nv_system_power_state; +static nv_pm_action_depth_t nv_system_pm_action_depth; +struct semaphore nv_system_power_state_lock; +#endif + +void *nvidia_p2p_page_t_cache; +static void *nvidia_pte_t_cache; +void *nvidia_stack_t_cache; +static nvidia_stack_t *__nv_init_sp; + +static int nv_tce_bypass_mode = NV_TCE_BYPASS_MODE_DEFAULT; + +struct semaphore nv_linux_devices_lock; + +static NvTristate nv_chipset_is_io_coherent = NV_TRISTATE_INDETERMINATE; + +// True if all the successfully probed devices support ATS +// Assigned at device probe (module init) time +NvBool nv_ats_supported = NVCPU_IS_PPC64LE + + + + + +; + +// allow an easy way to convert all debug printfs related to events +// back and forth between 'info' and 'errors' +#if defined(NV_DBG_EVENTS) +#define NV_DBG_EVENTINFO NV_DBG_ERRORS +#else +#define NV_DBG_EVENTINFO NV_DBG_INFO +#endif + +#if defined(HDA_MAX_CODECS) +#define NV_HDA_MAX_CODECS HDA_MAX_CODECS +#else +#define NV_HDA_MAX_CODECS 8 +#endif + +/*** + *** STATIC functions, only in this file + ***/ + +/* nvos_ functions.. do not take a state device parameter */ +static int nvos_count_devices(void); + +static nv_alloc_t *nvos_create_alloc(struct device *, int); +static int nvos_free_alloc(nv_alloc_t *); + +/*** + *** EXPORTS to Linux Kernel + ***/ + +static irqreturn_t nvidia_isr_common_bh (void *); +static void nvidia_isr_bh_unlocked (void *); +static int nvidia_ctl_open (struct inode *, struct file *); +static int nvidia_ctl_close (struct inode *, struct file *); + +const char *nv_device_name = MODULE_NAME; +static const char *nvidia_stack_cache_name = MODULE_NAME "_stack_cache"; +static const char *nvidia_pte_cache_name = MODULE_NAME "_pte_cache"; +static const char *nvidia_p2p_page_cache_name = MODULE_NAME "_p2p_page_cache"; + +static int nvidia_open (struct inode *, struct file *); +static int nvidia_close (struct inode *, struct file *); +static unsigned int nvidia_poll (struct file *, poll_table *); +static int nvidia_ioctl (struct inode *, struct file *, unsigned int, unsigned long); + +/* character device entry points*/ +nvidia_module_t nv_fops = { + .owner = THIS_MODULE, + .module_name = MODULE_NAME, + .instance = MODULE_INSTANCE_NUMBER, + .open = nvidia_open, + .close = nvidia_close, + .ioctl = nvidia_ioctl, + .mmap = nvidia_mmap, + .poll = nvidia_poll, +}; + +#if defined(CONFIG_PM) +static int nv_pmops_suspend (struct device *dev); +static int nv_pmops_resume (struct device *dev); +static int nv_pmops_freeze (struct device *dev); +static int nv_pmops_thaw (struct device *dev); +static int nv_pmops_restore (struct device *dev); +static int nv_pmops_poweroff (struct device *dev); +static int nv_pmops_runtime_suspend (struct device *dev); +static int nv_pmops_runtime_resume (struct device *dev); + +struct dev_pm_ops nv_pm_ops = { + .suspend = nv_pmops_suspend, + .resume = nv_pmops_resume, + .freeze = nv_pmops_freeze, + .thaw = nv_pmops_thaw, + .poweroff = nv_pmops_poweroff, + .restore = nv_pmops_restore, + .runtime_suspend = nv_pmops_runtime_suspend, + .runtime_resume = nv_pmops_runtime_resume, +}; +#endif + +/*** + *** see nv.h for functions exported to other parts of resman + ***/ + +/*** + *** STATIC functions + ***/ + +#if defined(NVCPU_X86_64) +#define NV_AMD_SEV_BIT BIT(1) + +static +NvBool nv_is_sev_supported( + void +) +{ + unsigned int eax, ebx, ecx, edx; + + /* Check for the SME/SEV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + return NV_FALSE; + + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + /* Check whether SEV is supported */ + if (!(eax & NV_AMD_SEV_BIT)) + return NV_FALSE; + + return NV_TRUE; +} +#endif + +static +void nv_sev_init( + void +) +{ +#if defined(MSR_AMD64_SEV) && defined(NVCPU_X86_64) + NvU32 lo_val, hi_val; + + if (!nv_is_sev_supported()) + return; + + rdmsr(MSR_AMD64_SEV, lo_val, hi_val); + + os_sev_status = lo_val; +#if defined(MSR_AMD64_SEV_ENABLED) + os_sev_enabled = (os_sev_status & MSR_AMD64_SEV_ENABLED); +#endif +#endif +} + +static +nv_alloc_t *nvos_create_alloc( + struct device *dev, + int num_pages +) +{ + nv_alloc_t *at; + unsigned int pt_size, i; + + NV_KMALLOC(at, sizeof(nv_alloc_t)); + if (at == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate alloc info\n"); + return NULL; + } + + memset(at, 0, sizeof(nv_alloc_t)); + + at->dev = dev; + pt_size = num_pages * sizeof(nvidia_pte_t *); + if (os_alloc_mem((void **)&at->page_table, pt_size) != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate page table\n"); + NV_KFREE(at, sizeof(nv_alloc_t)); + return NULL; + } + + memset(at->page_table, 0, pt_size); + at->num_pages = num_pages; + NV_ATOMIC_SET(at->usage_count, 0); + + for (i = 0; i < at->num_pages; i++) + { + at->page_table[i] = NV_KMEM_CACHE_ALLOC(nvidia_pte_t_cache); + if (at->page_table[i] == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate page table entry\n"); + nvos_free_alloc(at); + return NULL; + } + memset(at->page_table[i], 0, sizeof(nvidia_pte_t)); + } + + at->pid = os_get_current_process(); + + return at; +} + +static +int nvos_free_alloc( + nv_alloc_t *at +) +{ + unsigned int i; + + if (at == NULL) + return -1; + + if (NV_ATOMIC_READ(at->usage_count)) + return 1; + + for (i = 0; i < at->num_pages; i++) + { + if (at->page_table[i] != NULL) + NV_KMEM_CACHE_FREE(at->page_table[i], nvidia_pte_t_cache); + } + os_free_mem(at->page_table); + + NV_KFREE(at, sizeof(nv_alloc_t)); + + return 0; +} + +static void +nv_module_resources_exit(nv_stack_t *sp) +{ + nv_kmem_cache_free_stack(sp); + + NV_KMEM_CACHE_DESTROY(nvidia_p2p_page_t_cache); + NV_KMEM_CACHE_DESTROY(nvidia_pte_t_cache); + NV_KMEM_CACHE_DESTROY(nvidia_stack_t_cache); +} + +static int __init +nv_module_resources_init(nv_stack_t **sp) +{ + int rc = -ENOMEM; + + nvidia_stack_t_cache = NV_KMEM_CACHE_CREATE(nvidia_stack_cache_name, + nvidia_stack_t); + if (nvidia_stack_t_cache == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nvidia_stack_t cache allocation failed.\n"); + goto exit; + } + + nvidia_pte_t_cache = NV_KMEM_CACHE_CREATE(nvidia_pte_cache_name, + nvidia_pte_t); + if (nvidia_pte_t_cache == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nvidia_pte_t cache allocation failed.\n"); + goto exit; + } + + nvidia_p2p_page_t_cache = NV_KMEM_CACHE_CREATE(nvidia_p2p_page_cache_name, + nvidia_p2p_page_t); + if (nvidia_p2p_page_t_cache == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: nvidia_p2p_page_t cache allocation failed.\n"); + goto exit; + } + + rc = nv_kmem_cache_alloc_stack(sp); + if (rc < 0) + { + goto exit; + } + +exit: + if (rc < 0) + { + nv_kmem_cache_free_stack(*sp); + + NV_KMEM_CACHE_DESTROY(nvidia_p2p_page_t_cache); + NV_KMEM_CACHE_DESTROY(nvidia_pte_t_cache); + NV_KMEM_CACHE_DESTROY(nvidia_stack_t_cache); + } + + return rc; +} + + +static void +nvlink_drivers_exit(void) +{ + +#if NVCPU_IS_64_BITS + nvswitch_exit(); +#endif + + +#if defined(NVCPU_PPC64LE) + ibmnpu_exit(); +#endif + + nvlink_core_exit(); +} + + + +static int __init +nvlink_drivers_init(void) +{ + int rc = 0; + + rc = nvlink_core_init(); + if (rc < 0) + { + nv_printf(NV_DBG_INFO, "NVRM: NVLink core init failed.\n"); + return rc; + } + +#if defined(NVCPU_PPC64LE) + rc = ibmnpu_init(); + if (rc < 0) + { + nv_printf(NV_DBG_INFO, "NVRM: IBM NPU init failed.\n"); + nvlink_core_exit(); + return rc; + } +#endif + + +#if NVCPU_IS_64_BITS + rc = nvswitch_init(); + if (rc < 0) + { + nv_printf(NV_DBG_INFO, "NVRM: NVSwitch init failed.\n"); +#if defined(NVCPU_PPC64LE) + ibmnpu_exit(); +#endif + nvlink_core_exit(); + } +#endif + + + return rc; +} + + +static void +nv_module_state_exit(nv_stack_t *sp) +{ + nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); + + nv_teardown_pat_support(); + + nv_kthread_q_stop(&nv_deferred_close_kthread_q); + nv_kthread_q_stop(&nv_kthread_q); + + nv_lock_destroy_locks(sp, nv); +} + +static int +nv_module_state_init(nv_stack_t *sp) +{ + int rc; + nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); + + nv->os_state = (void *)&nv_ctl_device; + + if (!nv_lock_init_locks(sp, nv)) + { + return -ENOMEM; + } + + rc = nv_kthread_q_init(&nv_kthread_q, "nv_queue"); + if (rc != 0) + { + goto exit; + } + + rc = nv_kthread_q_init(&nv_deferred_close_kthread_q, "nv_queue"); + if (rc != 0) + { + nv_kthread_q_stop(&nv_kthread_q); + goto exit; + } + + rc = nv_init_pat_support(sp); + if (rc < 0) + { + nv_kthread_q_stop(&nv_deferred_close_kthread_q); + nv_kthread_q_stop(&nv_kthread_q); + goto exit; + } + + nv_linux_devices = NULL; + NV_INIT_MUTEX(&nv_linux_devices_lock); + init_rwsem(&nv_system_pm_lock); + +#if defined(CONFIG_PM) + NV_INIT_MUTEX(&nv_system_power_state_lock); + nv_system_power_state = NV_POWER_STATE_RUNNING; + nv_system_pm_action_depth = NV_PM_ACTION_DEPTH_DEFAULT; +#endif + + NV_SPIN_LOCK_INIT(&nv_ctl_device.snapshot_timer_lock); + +exit: + if (rc < 0) + { + nv_lock_destroy_locks(sp, nv); + } + + return rc; +} + +static void __init +nv_registry_keys_init(nv_stack_t *sp) +{ + NV_STATUS status; + nv_state_t *nv = NV_STATE_PTR(&nv_ctl_device); + NvU32 data; + + /* + * Determine the TCE bypass mode here so it can be used during + * device probe. Also determine whether we should allow + * user-mode NUMA onlining of device memory. + */ + if (NVCPU_IS_PPC64LE) + { + status = rm_read_registry_dword(sp, nv, + NV_REG_TCE_BYPASS_MODE, + &data); + if ((status == NV_OK) && ((int)data != NV_TCE_BYPASS_MODE_DEFAULT)) + { + nv_tce_bypass_mode = data; + } + + if (NVreg_EnableUserNUMAManagement) + { + /* Force on the core RM registry key to match. */ + status = rm_write_registry_dword(sp, nv, "RMNumaOnlining", 1); + WARN_ON(status != NV_OK); + } + } +} + +static void __init +nv_report_applied_patches(void) +{ + unsigned i; + + for (i = 0; __nv_patches[i].short_description; i++) + { + if (i == 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Applied patches:\n"); + } + + nv_printf(NV_DBG_ERRORS, + "NVRM: Patch #%d: %s\n", i + 1, __nv_patches[i].short_description); + } +} + +static void +nv_drivers_exit(void) +{ + + + + nv_pci_unregister_driver(); + + nvidia_unregister_module(&nv_fops); +} + +static int __init +nv_drivers_init(void) +{ + int rc; + + rc = nvidia_register_module(&nv_fops); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to register character device.\n"); + return rc; + } + + rc = nv_pci_register_driver(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA PCI devices found.\n"); + rc = -ENODEV; + goto exit; + } + + + + + + + + + + + +exit: + if (rc < 0) + { + nvidia_unregister_module(&nv_fops); + } + + return rc; +} + +static void +nv_module_exit(nv_stack_t *sp) +{ + nv_module_state_exit(sp); + + rm_shutdown_rm(sp); + + nv_destroy_rsync_info(); + + nvlink_drivers_exit(); + + + nv_cap_drv_exit(); + + nv_module_resources_exit(sp); +} + +static int __init +nv_module_init(nv_stack_t **sp) +{ + int rc; + + rc = nv_module_resources_init(sp); + if (rc < 0) + { + return rc; + } + + rc = nv_cap_drv_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: nv-cap-drv init failed.\n"); + goto cap_drv_exit; + } + + + rc = nvlink_drivers_init(); + if (rc < 0) + { + goto cap_drv_exit; + } + + + nv_init_rsync_info(); + nv_sev_init(); + + if (!rm_init_rm(*sp)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: rm_init_rm() failed!\n"); + rc = -EIO; + goto nvlink_exit; + } + + rc = nv_module_state_init(*sp); + if (rc < 0) + { + goto init_rm_exit; + } + + return rc; + +init_rm_exit: + rm_shutdown_rm(*sp); + +nvlink_exit: + nv_destroy_rsync_info(); + + nvlink_drivers_exit(); + + +cap_drv_exit: + nv_cap_drv_exit(); + nv_module_resources_exit(*sp); + + return rc; +} + +/* + * In this function we check for the cases where GPU exclusion is not + * honored, and issue a warning. + * + * Only GPUs that support a mechanism to query UUID prior to + * initializing the GPU can be excluded, so that we can detect and + * exclude them during device probe. This function checks that an + * initialized GPU was not specified in the exclusion list, and issues a + * warning if so. + */ +static void +nv_assert_not_in_gpu_exclusion_list( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + char *uuid = rm_get_gpu_uuid(sp, nv); + + if (uuid == NULL) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Unable to read UUID"); + return; + } + + if (nv_is_uuid_in_gpu_exclusion_list(uuid)) + { + NV_DEV_PRINTF(NV_DBG_WARNINGS, nv, + "Could not exclude GPU %s because PBI is not supported\n", + uuid); + WARN_ON(1); + } + + os_free_mem(uuid); + + return; +} + +static int __init nv_caps_root_init(void) +{ + nvidia_caps_root = os_nv_cap_init("driver/" MODULE_NAME); + + return (nvidia_caps_root == NULL) ? -ENOENT : 0; +} + +static void nv_caps_root_exit(void) +{ + os_nv_cap_destroy_entry(nvidia_caps_root); + nvidia_caps_root = NULL; +} + +int __init nvidia_init_module(void) +{ + int rc; + NvU32 count; + nvidia_stack_t *sp = NULL; + const NvBool is_nvswitch_present = os_is_nvswitch_present(); + + nv_memdbg_init(); + + rc = nv_procfs_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize procfs.\n"); + return rc; + } + + rc = nv_caps_root_init(); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize capabilities.\n"); + goto procfs_exit; + } + + rc = nv_module_init(&sp); + if (rc < 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to initialize module.\n"); + goto caps_root_exit; + } + + count = nvos_count_devices(); + if ((count == 0) && (!is_nvswitch_present)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA GPU found.\n"); + rc = -ENODEV; + goto module_exit; + } + + rc = nv_drivers_init(); + if (rc < 0) + { + goto module_exit; + } + + if (num_probed_nv_devices != count) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA probe routine was not called for %d device(s).\n", + count - num_probed_nv_devices); + nv_printf(NV_DBG_ERRORS, + "NVRM: This can occur when a driver such as: \n" + "NVRM: nouveau, rivafb, nvidiafb or rivatv " + "\nNVRM: was loaded and obtained ownership of the NVIDIA device(s).\n"); + nv_printf(NV_DBG_ERRORS, + "NVRM: Try unloading the conflicting kernel module (and/or\n" + "NVRM: reconfigure your kernel without the conflicting\n" + "NVRM: driver(s)), then try loading the NVIDIA kernel module\n" + "NVRM: again.\n"); + } + + if ((num_probed_nv_devices == 0) && (!is_nvswitch_present)) + { + rc = -ENODEV; + nv_printf(NV_DBG_ERRORS, "NVRM: No NVIDIA devices probed.\n"); + goto drivers_exit; + } + + if (num_probed_nv_devices != num_nv_devices) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA probe routine failed for %d device(s).\n", + num_probed_nv_devices - num_nv_devices); + } + + if ((num_nv_devices == 0) && (!is_nvswitch_present)) + { + rc = -ENODEV; + nv_printf(NV_DBG_ERRORS, + "NVRM: None of the NVIDIA devices were initialized.\n"); + goto drivers_exit; + } + + /* + * Initialize registry keys after PCI driver registration has + * completed successfully to support per-device module + * parameters. + */ + nv_registry_keys_init(sp); + + nv_report_applied_patches(); + + nv_printf(NV_DBG_ERRORS, "NVRM: loading %s\n", pNVRM_ID); + +#if defined(NV_UVM_ENABLE) + rc = nv_uvm_init(); + if (rc != 0) + { + goto drivers_exit; + } +#endif + + __nv_init_sp = sp; + + return 0; + +drivers_exit: + nv_drivers_exit(); + +module_exit: + nv_module_exit(sp); + +caps_root_exit: + nv_caps_root_exit(); + +procfs_exit: + nv_procfs_exit(); + + return rc; +} + +void nvidia_exit_module(void) +{ + nvidia_stack_t *sp = __nv_init_sp; + +#if defined(NV_UVM_ENABLE) + nv_uvm_exit(); +#endif + + nv_drivers_exit(); + + nv_module_exit(sp); + + nv_caps_root_exit(); + + nv_procfs_exit(); + + nv_memdbg_exit(); +} + +static void *nv_alloc_file_private(void) +{ + nv_linux_file_private_t *nvlfp; + unsigned int i; + + NV_KMALLOC(nvlfp, sizeof(nv_linux_file_private_t)); + if (!nvlfp) + return NULL; + + memset(nvlfp, 0, sizeof(nv_linux_file_private_t)); + + for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i) + { + NV_INIT_MUTEX(&nvlfp->fops_sp_lock[i]); + } + init_waitqueue_head(&nvlfp->waitqueue); + NV_SPIN_LOCK_INIT(&nvlfp->fp_lock); + + return nvlfp; +} + +static void nv_free_file_private(nv_linux_file_private_t *nvlfp) +{ + nvidia_event_t *nvet; + + if (nvlfp == NULL) + return; + + for (nvet = nvlfp->event_data_head; nvet != NULL; nvet = nvlfp->event_data_head) + { + nvlfp->event_data_head = nvlfp->event_data_head->next; + NV_KFREE(nvet, sizeof(nvidia_event_t)); + } + + if (nvlfp->mmap_context.page_array != NULL) + { + os_free_mem(nvlfp->mmap_context.page_array); + } + + NV_KFREE(nvlfp, sizeof(nv_linux_file_private_t)); +} + + +static int nv_is_control_device( + struct inode *inode +) +{ + return (minor((inode)->i_rdev) == NV_CONTROL_DEVICE_MINOR); +} + +/* + * Search the global list of nv devices for the one with the given minor device + * number. If found, nvl is returned with nvl->ldata_lock taken. + */ +static nv_linux_state_t *find_minor(NvU32 minor) +{ + nv_linux_state_t *nvl; + + LOCK_NV_LINUX_DEVICES(); + nvl = nv_linux_devices; + while (nvl != NULL) + { + if (nvl->minor_num == minor) + { + down(&nvl->ldata_lock); + break; + } + nvl = nvl->next; + } + + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +/* + * Search the global list of nv devices for the one with the given gpu_id. + * If found, nvl is returned with nvl->ldata_lock taken. + */ +static nv_linux_state_t *find_gpu_id(NvU32 gpu_id) +{ + nv_linux_state_t *nvl; + + LOCK_NV_LINUX_DEVICES(); + nvl = nv_linux_devices; + while (nvl != NULL) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + if (nv->gpu_id == gpu_id) + { + down(&nvl->ldata_lock); + break; + } + nvl = nvl->next; + } + + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +/* + * Search the global list of nv devices for the one with the given UUID. Devices + * with missing UUID information are ignored. If found, nvl is returned with + * nvl->ldata_lock taken. + */ +nv_linux_state_t *find_uuid(const NvU8 *uuid) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + const NvU8 *dev_uuid; + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + dev_uuid = nv_get_cached_uuid(nv); + if (dev_uuid && memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0) + goto out; + up(&nvl->ldata_lock); + } + +out: + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +/* + * Search the global list of nv devices. The search logic is: + * + * 1) If any device has the given UUID, return it + * + * 2) If no device has the given UUID but at least one device is missing + * its UUID (for example because rm_init_adapter has not run on it yet), + * return that device. + * + * 3) If no device has the given UUID and all UUIDs are present, return NULL. + * + * In cases 1 and 2, nvl is returned with nvl->ldata_lock taken. + * + * The reason for this weird logic is because UUIDs aren't always available. See + * bug 1642200. + */ +static nv_linux_state_t *find_uuid_candidate(const NvU8 *uuid) +{ + nv_linux_state_t *nvl = NULL; + nv_state_t *nv; + const NvU8 *dev_uuid; + int use_missing; + int has_missing = 0; + + LOCK_NV_LINUX_DEVICES(); + + /* + * Take two passes through the list. The first pass just looks for the UUID. + * The second looks for the target or missing UUIDs. It would be nice if + * this could be done in a single pass by remembering which nvls are missing + * UUIDs, but we have to hold the nvl lock after we check for the UUID. + */ + for (use_missing = 0; use_missing <= 1; use_missing++) + { + for (nvl = nv_linux_devices; nvl; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + down(&nvl->ldata_lock); + dev_uuid = nv_get_cached_uuid(nv); + if (dev_uuid) + { + /* Case 1: If a device has the given UUID, return it */ + if (memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0) + goto out; + } + else + { + /* Case 2: If no device has the given UUID but at least one + * device is missing its UUID, return that device. */ + if (use_missing) + goto out; + has_missing = 1; + } + up(&nvl->ldata_lock); + } + + /* Case 3: If no device has the given UUID and all UUIDs are present, + * return NULL. */ + if (!has_missing) + break; + } + +out: + UNLOCK_NV_LINUX_DEVICES(); + return nvl; +} + +void nv_dev_free_stacks(nv_linux_state_t *nvl) +{ + NvU32 i; + for (i = 0; i < NV_DEV_STACK_COUNT; i++) + { + if (nvl->sp[i]) + { + nv_kmem_cache_free_stack(nvl->sp[i]); + nvl->sp[i] = NULL; + } + } +} + +static int nv_dev_alloc_stacks(nv_linux_state_t *nvl) +{ + NvU32 i; + int rc; + + for (i = 0; i < NV_DEV_STACK_COUNT; i++) + { + rc = nv_kmem_cache_alloc_stack(&nvl->sp[i]); + if (rc != 0) + { + nv_dev_free_stacks(nvl); + return rc; + } + } + + return 0; +} + +static int validate_numa_start_state(nv_linux_state_t *nvl) +{ + int rc = 0; + int numa_status = nv_get_numa_status(nvl); + + if (numa_status != NV_IOCTL_NUMA_STATUS_DISABLED) + { + if (nv_ctl_device.numa_memblock_size == 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: numa memblock size of zero " + "found during device start"); + rc = -EINVAL; + } + else + { + /* Keep the individual devices consistent with the control device */ + nvl->numa_memblock_size = nv_ctl_device.numa_memblock_size; + } + } + + return rc; +} + +NV_STATUS NV_API_CALL nv_get_num_dpaux_instances(nv_state_t *nv, NvU32 *num_instances) +{ + *num_instances = nv->num_dpaux_instance; + return NV_OK; +} + +void NV_API_CALL +nv_schedule_uvm_isr(nv_state_t *nv) +{ +#if defined(NV_UVM_ENABLE) + nv_uvm_event_interrupt(nv_get_cached_uuid(nv)); +#endif +} + +/* + * Brings up the device on the first file open. Assumes nvl->ldata_lock is held. + */ +static int nv_start_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + NvU32 msi_config = 0; +#endif + int rc = 0; + NvBool kthread_init = NV_FALSE; + NvBool power_ref = NV_FALSE; + + rc = nv_get_rsync_info(); + if (rc != 0) + { + return rc; + } + + rc = validate_numa_start_state(nvl); + if (rc != 0) + { + goto failed; + } + + if (nv_dev_is_pci(nvl->dev) && (nv->pci_info.device_id == 0)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: open of non-existent GPU with minor number %d\n", nvl->minor_num); + rc = -ENXIO; + goto failed; + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE) != NV_OK) + { + rc = -EINVAL; + goto failed; + } + power_ref = NV_TRUE; + } + else + { + if (rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE) != NV_OK) + { + rc = -EINVAL; + goto failed; + } + power_ref = NV_TRUE; + } + + rc = nv_init_ibmnpu_devices(nv); + if (rc != 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to initialize ibmnpu devices attached to GPU with minor number %d\n", + nvl->minor_num); + goto failed; + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rc = nv_dev_alloc_stacks(nvl); + if (rc != 0) + goto failed; + } + +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + if (nv_dev_is_pci(nvl->dev)) + { + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rm_read_registry_dword(sp, nv, NV_REG_ENABLE_MSI, &msi_config); + if (msi_config == 1) + { + if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSIX)) + { + nv_init_msix(nv); + } + if (pci_find_capability(nvl->pci_dev, PCI_CAP_ID_MSI) && + !(nv->flags & NV_FLAG_USES_MSIX)) + { + nv_init_msi(nv); + } + } + } + } +#endif + + if (((!(nv->flags & NV_FLAG_USES_MSI)) && (!(nv->flags & NV_FLAG_USES_MSIX))) + && (nv->interrupt_line == 0) && !(nv->flags & NV_FLAG_SOC_DISPLAY)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "No interrupts of any type are available. Cannot use this GPU.\n"); + rc = -EIO; + goto failed; + } + + rc = 0; + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + if (nv->flags & NV_FLAG_SOC_DISPLAY) + { + + + + } + else if (!(nv->flags & NV_FLAG_USES_MSIX)) + { + rc = request_threaded_irq(nv->interrupt_line, nvidia_isr, + nvidia_isr_kthread_bh, nv_default_irq_flags(nv), + nv_device_name, (void *)nvl); + } +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + else + { + rc = nv_request_msix_irq(nvl); + } +#endif + } + if (rc != 0) + { + if ((nv->interrupt_line != 0) && (rc == -EBUSY)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Tried to get IRQ %d, but another driver\n", + (unsigned int) nv->interrupt_line); + nv_printf(NV_DBG_ERRORS, "NVRM: has it and is not sharing it.\n"); + nv_printf(NV_DBG_ERRORS, "NVRM: You may want to verify that no audio driver"); + nv_printf(NV_DBG_ERRORS, " is using the IRQ.\n"); + } + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "request_irq() failed (%d)\n", rc); + goto failed; + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rc = os_alloc_mutex(&nvl->isr_bh_unlocked_mutex); + if (rc != 0) + goto failed; + nv_kthread_q_item_init(&nvl->bottom_half_q_item, nvidia_isr_bh_unlocked, (void *)nv); + rc = nv_kthread_q_init(&nvl->bottom_half_q, nv_device_name); + if (rc != 0) + goto failed; + kthread_init = NV_TRUE; + + rc = nv_kthread_q_init(&nvl->queue.nvk, "nv_queue"); + if (rc) + goto failed; + nv->queue = &nvl->queue; + } + + if (!rm_init_adapter(sp, nv)) + { + if (!(nv->flags & NV_FLAG_USES_MSIX) && + !(nv->flags & NV_FLAG_SOC_DISPLAY)) + { + free_irq(nv->interrupt_line, (void *) nvl); + } + else if (nv->flags & NV_FLAG_SOC_DISPLAY) + { + + + + } +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + else + { + nv_free_msix_irq(nvl); + } +#endif + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "rm_init_adapter failed, device minor number %d\n", + nvl->minor_num); + rc = -EIO; + goto failed; + } + + { + const NvU8 *uuid = rm_get_gpu_uuid_raw(sp, nv); + + if (uuid != NULL) + { +#if defined(NV_UVM_ENABLE) + nv_uvm_notify_start_device(uuid); +#endif + } + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv_acpi_register_notifier(nvl); + } + + nv->flags |= NV_FLAG_OPEN; + + /* + * Now that RM init is done, allow dynamic power to control the GPU in FINE + * mode, if enabled. (If the mode is COARSE, this unref will do nothing + * which will cause the GPU to remain powered up.) + * This is balanced by a FINE ref increment at the beginning of + * nv_stop_device(). + */ + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + + return 0; + +failed: +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + if (nv->flags & NV_FLAG_USES_MSI) + { + nv->flags &= ~NV_FLAG_USES_MSI; + NV_PCI_DISABLE_MSI(nvl->pci_dev); + if(nvl->irq_count) + NV_KFREE(nvl->irq_count, nvl->num_intr * sizeof(nv_irq_count_info_t)); + } + if (nv->flags & NV_FLAG_USES_MSIX) + { + nv->flags &= ~NV_FLAG_USES_MSIX; + pci_disable_msix(nvl->pci_dev); + NV_KFREE(nvl->irq_count, nvl->num_intr*sizeof(nv_irq_count_info_t)); + NV_KFREE(nvl->msix_entries, nvl->num_intr*sizeof(struct msix_entry)); + } + + if (nvl->msix_bh_mutex) + { + os_free_mutex(nvl->msix_bh_mutex); + nvl->msix_bh_mutex = NULL; + } +#endif + + if (nv->queue && !(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv->queue = NULL; + nv_kthread_q_stop(&nvl->queue.nvk); + } + + if (kthread_init && !(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + nv_kthread_q_stop(&nvl->bottom_half_q); + + if (nvl->isr_bh_unlocked_mutex) + { + os_free_mutex(nvl->isr_bh_unlocked_mutex); + nvl->isr_bh_unlocked_mutex = NULL; + } + + nv_dev_free_stacks(nvl); + + nv_unregister_ibmnpu_devices(nv); + + if (power_ref) + { + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE); + } + + nv_put_rsync_info(); + + return rc; +} + +/* + * Makes sure the device is ready for operations and increases nvl->usage_count. + * Assumes nvl->ldata_lock is held. + */ +static int nv_open_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int rc; + NV_STATUS status; + + if (os_is_vgx_hyper()) + { + /* fail open if GPU is being unbound */ + if (nv->flags & NV_FLAG_UNBIND_LOCK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Open failed as GPU is locked for unbind operation\n"); + return -ENODEV; + } + } + + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Opening GPU with minor number %d\n", + nvl->minor_num); + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "Device in removal process\n"); + return -ENODEV; + } + + if ( ! (nv->flags & NV_FLAG_OPEN)) + { + /* Sanity check: !NV_FLAG_OPEN requires usage_count == 0 */ + if (NV_ATOMIC_READ(nvl->usage_count) != 0) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "Minor device %u is referenced without being open!\n", + nvl->minor_num); + WARN_ON(1); + return -EBUSY; + } + + rc = nv_start_device(nv, sp); + if (rc != 0) + return rc; + } + else if (rm_is_device_sequestered(sp, nv)) + { + /* Do not increment the usage count of sequestered devices. */ + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Device is currently unavailable\n"); + return -EBUSY; + } + + NV_ATOMIC_INC(nvl->usage_count); + return 0; +} + +static void nv_init_mapping_revocation(nv_linux_state_t *nvl, + struct file *file, + nv_linux_file_private_t *nvlfp, + struct inode *inode) +{ + down(&nvl->mmap_lock); + + /* Set up struct address_space for use with unmap_mapping_range() */ + nv_address_space_init_once(&nvlfp->mapping); + nvlfp->mapping.host = inode; + nvlfp->mapping.a_ops = inode->i_mapping->a_ops; +#if defined(NV_ADDRESS_SPACE_HAS_BACKING_DEV_INFO) + nvlfp->mapping.backing_dev_info = inode->i_mapping->backing_dev_info; +#endif + file->f_mapping = &nvlfp->mapping; + + /* Add nvlfp to list of open files in nvl for mapping revocation */ + list_add(&nvlfp->entry, &nvl->open_files); + + up(&nvl->mmap_lock); +} + +/* +** nvidia_open +** +** nv driver open entry point. Sessions are created here. +*/ +int +nvidia_open( + struct inode *inode, + struct file *file +) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + int rc = 0; + nv_linux_file_private_t *nvlfp = NULL; + nvidia_stack_t *sp = NULL; + unsigned int i; + unsigned int k; + + nv_printf(NV_DBG_INFO, "NVRM: nvidia_open...\n"); + + nvlfp = nv_alloc_file_private(); + if (nvlfp == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate file private!\n"); + return -ENOMEM; + } + + rc = nv_kmem_cache_alloc_stack(&sp); + if (rc != 0) + { + nv_free_file_private(nvlfp); + return rc; + } + + for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i) + { + rc = nv_kmem_cache_alloc_stack(&nvlfp->fops_sp[i]); + if (rc != 0) + { + nv_kmem_cache_free_stack(sp); + for (k = 0; k < i; ++k) + { + nv_kmem_cache_free_stack(nvlfp->fops_sp[k]); + } + nv_free_file_private(nvlfp); + return rc; + } + } + + NV_SET_FILE_PRIVATE(file, nvlfp); + nvlfp->sp = sp; + + /* for control device, just jump to its open routine */ + /* after setting up the private data */ + if (nv_is_control_device(inode)) + { + rc = nvidia_ctl_open(inode, file); + if (rc != 0) + goto failed; + return rc; + } + + rc = nv_down_read_interruptible(&nv_system_pm_lock); + if (rc < 0) + goto failed; + + /* Takes nvl->ldata_lock */ + nvl = find_minor(NV_DEVICE_MINOR_NUMBER(inode)); + if (!nvl) + { + rc = -ENODEV; + up_read(&nv_system_pm_lock); + goto failed; + } + + nvlfp->nvptr = nvl; + nv = NV_STATE_PTR(nvl); + + if ((nv->flags & NV_FLAG_EXCLUDE) != 0) + { + char *uuid = rm_get_gpu_uuid(sp, nv); + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "open() not permitted for excluded %s\n", + (uuid != NULL) ? uuid : "GPU"); + if (uuid != NULL) + os_free_mem(uuid); + rc = -EPERM; + goto failed1; + } + + rc = nv_open_device(nv, sp); + /* Fall-through on error */ + + nv_assert_not_in_gpu_exclusion_list(sp, nv); + +failed1: + up(&nvl->ldata_lock); + + up_read(&nv_system_pm_lock); +failed: + if (rc != 0) + { + if (nvlfp != NULL) + { + nv_kmem_cache_free_stack(sp); + for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i) + { + nv_kmem_cache_free_stack(nvlfp->fops_sp[i]); + } + nv_free_file_private(nvlfp); + NV_SET_FILE_PRIVATE(file, NULL); + } + } + else + { + nv_init_mapping_revocation(nvl, file, nvlfp, inode); + } + + return rc; +} + +static void validate_numa_shutdown_state(nv_linux_state_t *nvl) +{ + int numa_status = nv_get_numa_status(nvl); + WARN_ON((numa_status != NV_IOCTL_NUMA_STATUS_OFFLINE) && + (numa_status != NV_IOCTL_NUMA_STATUS_DISABLED)); +} + +void nv_shutdown_adapter(nvidia_stack_t *sp, + nv_state_t *nv, + nv_linux_state_t *nvl) +{ + validate_numa_shutdown_state(nvl); + + rm_disable_adapter(sp, nv); + + // It's safe to call nv_kthread_q_stop even if queue is not initialized + nv_kthread_q_stop(&nvl->bottom_half_q); + + if (nv->queue != NULL) + { + nv->queue = NULL; + nv_kthread_q_stop(&nvl->queue.nvk); + } + + if (nvl->isr_bh_unlocked_mutex) + { + os_free_mutex(nvl->isr_bh_unlocked_mutex); + nvl->isr_bh_unlocked_mutex = NULL; + } + + if (!(nv->flags & NV_FLAG_USES_MSIX) && + !(nv->flags & NV_FLAG_SOC_DISPLAY)) + { + free_irq(nv->interrupt_line, (void *)nvl); + if (nv->flags & NV_FLAG_USES_MSI) + { + NV_PCI_DISABLE_MSI(nvl->pci_dev); + if(nvl->irq_count) + NV_KFREE(nvl->irq_count, nvl->num_intr * sizeof(nv_irq_count_info_t)); + } + } + else if (nv->flags & NV_FLAG_SOC_DISPLAY) + { + + + + } +#if defined(NV_LINUX_PCIE_MSI_SUPPORTED) + else + { + nv_free_msix_irq(nvl); + pci_disable_msix(nvl->pci_dev); + nv->flags &= ~NV_FLAG_USES_MSIX; + NV_KFREE(nvl->msix_entries, nvl->num_intr*sizeof(struct msix_entry)); + NV_KFREE(nvl->irq_count, nvl->num_intr*sizeof(nv_irq_count_info_t)); + } +#endif + + if (nvl->msix_bh_mutex) + { + os_free_mutex(nvl->msix_bh_mutex); + nvl->msix_bh_mutex = NULL; + } + + rm_shutdown_adapter(sp, nv); +} + +/* + * Tears down the device on the last file close. Assumes nvl->ldata_lock is + * held. + */ +static void nv_stop_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + static int persistence_mode_notice_logged; + + /* + * The GPU needs to be powered on to go through the teardown sequence. + * This balances the FINE unref at the end of nv_start_device(). + */ + rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + +#if defined(NV_UVM_ENABLE) + { + const NvU8* uuid; + // Inform UVM before disabling adapter. Use cached copy + uuid = nv_get_cached_uuid(nv); + if (uuid != NULL) + { + // this function cannot fail + nv_uvm_notify_stop_device(uuid); + } + } +#endif + /* Adapter is already shutdown as part of nvidia_pci_remove */ + if (!nv->removed) + { + if (nv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + rm_disable_adapter(sp, nv); + } + else + { + nv_acpi_unregister_notifier(nvl); + nv_shutdown_adapter(sp, nv, nvl); + } + } + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + nv_dev_free_stacks(nvl); + } + + if ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) && + (!persistence_mode_notice_logged) && (!os_is_vgx_hyper())) + { + nv_printf(NV_DBG_ERRORS, "NVRM: Persistence mode is deprecated and" + " will be removed in a future release. Please use" + " nvidia-persistenced instead.\n"); + persistence_mode_notice_logged = 1; + } + + /* leave INIT flag alone so we don't reinit every time */ + nv->flags &= ~NV_FLAG_OPEN; + + nv_unregister_ibmnpu_devices(nv); + + if (!(nv->flags & NV_FLAG_PERSISTENT_SW_STATE)) + { + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_COARSE); + } + else + { + /* If in legacy persistence mode, only unref FINE refcount. */ + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + } + + nv_put_rsync_info(); +} + +/* + * Decreases nvl->usage_count, stopping the device when it reaches 0. Assumes + * nvl->ldata_lock is held. + */ +static void nv_close_device(nv_state_t *nv, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Attempting to close unopened minor device %u!\n", + nvl->minor_num); + WARN_ON(1); + return; + } + + if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count)) + nv_stop_device(nv, sp); +} + +/* +** nvidia_close +** +** Primary driver close entry point. +*/ + +static void +nvidia_close_callback( + nv_linux_file_private_t *nvlfp +) +{ + nv_linux_state_t *nvl = nvlfp->nvptr; + nv_state_t *nv = NV_STATE_PTR(nvl); + nvidia_stack_t *sp = nvlfp->sp; + unsigned int i; + NvBool bRemove = NV_FALSE; + + rm_cleanup_file_private(sp, nv, &nvlfp->nvfp); + + down(&nvl->mmap_lock); + list_del(&nvlfp->entry); + up(&nvl->mmap_lock); + + down(&nvl->ldata_lock); + nv_close_device(nv, sp); + + bRemove = (!NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv)) && + (NV_ATOMIC_READ(nvl->usage_count) == 0) && + rm_get_device_remove_flag(sp, nv->gpu_id); + + for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i) + { + nv_kmem_cache_free_stack(nvlfp->fops_sp[i]); + } + + nv_free_file_private(nvlfp); + + /* + * In case of surprise removal of device, we have 2 cases as below: + * + * 1> When nvidia_pci_remove is scheduled prior to nvidia_close. + * nvidia_pci_remove will not destroy linux layer locks & nv linux state + * struct but will set variable nv->removed for nvidia_close. + * Once all the clients are closed, last nvidia_close will clean up linux + * layer locks and nv linux state struct. + * + * 2> When nvidia_close is scheduled prior to nvidia_pci_remove. + * This will be treated as normal working case. nvidia_close will not do + * any cleanup related to linux layer locks and nv linux state struct. + * nvidia_pci_remove when scheduled will do necessary cleanup. + */ + if ((NV_ATOMIC_READ(nvl->usage_count) == 0) && nv->removed) + { + nvidia_frontend_remove_device((void *)&nv_fops, nvl); + nv_lock_destroy_locks(sp, nv); + NV_KFREE(nvl, sizeof(nv_linux_state_t)); + } + else + { + up(&nvl->ldata_lock); + +#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE) + if (bRemove) + { + NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(nvl->pci_dev); + } +#endif + } + + nv_kmem_cache_free_stack(sp); +} + +static void nvidia_close_deferred(void *data) +{ + nv_linux_file_private_t *nvlfp = data; + + down_read(&nv_system_pm_lock); + + nvidia_close_callback(nvlfp); + + up_read(&nv_system_pm_lock); +} + +int +nvidia_close( + struct inode *inode, + struct file *file +) +{ + int rc; + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nv_linux_state_t *nvl = nvlfp->nvptr; + nv_state_t *nv = NV_STATE_PTR(nvl); + + NV_DEV_PRINTF(NV_DBG_INFO, nv, "nvidia_close on GPU with minor number %d\n", NV_DEVICE_MINOR_NUMBER(inode)); + + if (nv_is_control_device(inode)) + { + return nvidia_ctl_close(inode, file); + } + + NV_SET_FILE_PRIVATE(file, NULL); + + rc = nv_down_read_interruptible(&nv_system_pm_lock); + if (rc == 0) + { + nvidia_close_callback(nvlfp); + up_read(&nv_system_pm_lock); + } + else + { + nv_kthread_q_item_init(&nvlfp->deferred_close_q_item, + nvidia_close_deferred, + nvlfp); + rc = nv_kthread_q_schedule_q_item(&nv_deferred_close_kthread_q, + &nvlfp->deferred_close_q_item); + WARN_ON(rc == 0); + } + + return 0; +} + +unsigned int +nvidia_poll( + struct file *file, + poll_table *wait +) +{ + unsigned int mask = 0; + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + unsigned long eflags; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file); + nv_state_t *nv = NV_STATE_PTR(nvl); + NV_STATUS status; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping nvidia_poll\n"); + return POLLHUP; + } + + if ((file->f_flags & O_NONBLOCK) == 0) + poll_wait(file, &nvlfp->waitqueue, wait); + + NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags); + + if ((nvlfp->event_data_head != NULL) || nvlfp->dataless_event_pending) + { + mask = (POLLPRI | POLLIN); + nvlfp->dataless_event_pending = NV_FALSE; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + + return mask; +} + +#define NV_CTL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) == 0) \ + { \ + status = -EINVAL; \ + goto done; \ + } \ +} + +#define NV_ACTUAL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) != 0) \ + { \ + status = -EINVAL; \ + goto done; \ + } \ +} + +/* + * Fills the ci array with the state of num_entries devices. Returns -EINVAL if + * num_entries isn't big enough to hold all available devices. + */ +static int nvidia_read_card_info(nv_ioctl_card_info_t *ci, size_t num_entries) +{ + nv_state_t *nv; + nv_linux_state_t *nvl; + size_t i = 0; + int rc = 0; + + /* Clear each card's flags field the lazy way */ + memset(ci, 0, num_entries * sizeof(ci[0])); + + LOCK_NV_LINUX_DEVICES(); + + if (num_entries < num_nv_devices) + { + rc = -EINVAL; + goto out; + } + + for (nvl = nv_linux_devices; nvl && i < num_entries; nvl = nvl->next) + { + nv = NV_STATE_PTR(nvl); + + /* We do not include excluded GPUs in the list... */ + if ((nv->flags & NV_FLAG_EXCLUDE) != 0) + continue; + + ci[i].valid = NV_TRUE; + ci[i].pci_info.domain = nv->pci_info.domain; + ci[i].pci_info.bus = nv->pci_info.bus; + ci[i].pci_info.slot = nv->pci_info.slot; + ci[i].pci_info.vendor_id = nv->pci_info.vendor_id; + ci[i].pci_info.device_id = nv->pci_info.device_id; + ci[i].gpu_id = nv->gpu_id; + ci[i].interrupt_line = nv->interrupt_line; + ci[i].reg_address = nv->regs->cpu_address; + ci[i].reg_size = nv->regs->size; + ci[i].minor_number = nvl->minor_num; + if (nv_dev_is_pci(nvl->dev)) + { + ci[i].fb_address = nv->fb->cpu_address; + ci[i].fb_size = nv->fb->size; + } + i++; + } + +out: + UNLOCK_NV_LINUX_DEVICES(); + return rc; +} + +int +nvidia_ioctl( + struct inode *inode, + struct file *file, + unsigned int cmd, + unsigned long i_arg) +{ + NV_STATUS rmStatus; + int status = 0; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file); + nv_state_t *nv = NV_STATE_PTR(nvl); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nvidia_stack_t *sp = NULL; + nv_ioctl_xfer_t ioc_xfer; + void *arg_ptr = (void *) i_arg; + void *arg_copy = NULL; + size_t arg_size = 0; + int arg_cmd; + + nv_printf(NV_DBG_INFO, "NVRM: ioctl(0x%x, 0x%x, 0x%x)\n", + _IOC_NR(cmd), (unsigned int) i_arg, _IOC_SIZE(cmd)); + + status = nv_down_read_interruptible(&nv_system_pm_lock); + if (status < 0) + return status; + + down(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_IOCTL]); + sp = nvlfp->fops_sp[NV_FOPS_STACK_INDEX_IOCTL]; + + rmStatus = nv_check_gpu_state(nv); + if (rmStatus == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, "NVRM: GPU is lost, skipping nvidia_ioctl\n"); + status = -EINVAL; + goto done; + } + + arg_size = _IOC_SIZE(cmd); + arg_cmd = _IOC_NR(cmd); + + if (arg_cmd == NV_ESC_IOCTL_XFER_CMD) + { + if (arg_size != sizeof(nv_ioctl_xfer_t)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: invalid ioctl XFER structure size!\n"); + status = -EINVAL; + goto done; + } + + if (NV_COPY_FROM_USER(&ioc_xfer, arg_ptr, sizeof(ioc_xfer))) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to copy in ioctl XFER data!\n"); + status = -EFAULT; + goto done; + } + + arg_cmd = ioc_xfer.cmd; + arg_size = ioc_xfer.size; + arg_ptr = NvP64_VALUE(ioc_xfer.ptr); + + if (arg_size > NV_ABSOLUTE_MAX_IOCTL_SIZE) + { + nv_printf(NV_DBG_ERRORS, "NVRM: invalid ioctl XFER size!\n"); + status = -EINVAL; + goto done; + } + } + + NV_KMALLOC(arg_copy, arg_size); + if (arg_copy == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate ioctl memory\n"); + status = -ENOMEM; + goto done; + } + + if (NV_COPY_FROM_USER(arg_copy, arg_ptr, arg_size)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy in ioctl data!\n"); + status = -EFAULT; + goto done; + } + + switch (arg_cmd) + { + case NV_ESC_QUERY_DEVICE_INTR: + { + nv_ioctl_query_device_intr *query_intr = arg_copy; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if ((arg_size < sizeof(*query_intr)) || + (!nv->regs->map)) + { + status = -EINVAL; + goto done; + } + + query_intr->intrStatus = + *(nv->regs->map + (NV_RM_DEVICE_INTR_ADDRESS >> 2)); + query_intr->status = NV_OK; + break; + } + + /* pass out info about the card */ + case NV_ESC_CARD_INFO: + { + size_t num_arg_devices = arg_size / sizeof(nv_ioctl_card_info_t); + + NV_CTL_DEVICE_ONLY(nv); + + status = nvidia_read_card_info(arg_copy, num_arg_devices); + break; + } + + case NV_ESC_ATTACH_GPUS_TO_FD: + { + size_t num_arg_gpus = arg_size / sizeof(NvU32); + size_t i; + + NV_CTL_DEVICE_ONLY(nv); + + if (num_arg_gpus == 0 || nvlfp->num_attached_gpus != 0 || + arg_size % sizeof(NvU32) != 0) + { + status = -EINVAL; + goto done; + } + + NV_KMALLOC(nvlfp->attached_gpus, arg_size); + if (nvlfp->attached_gpus == NULL) + { + status = -ENOMEM; + goto done; + } + memcpy(nvlfp->attached_gpus, arg_copy, arg_size); + nvlfp->num_attached_gpus = num_arg_gpus; + + for (i = 0; i < nvlfp->num_attached_gpus; i++) + { + if (nvlfp->attached_gpus[i] == 0) + { + continue; + } + + if (nvidia_dev_get(nvlfp->attached_gpus[i], sp)) + { + while (i--) + { + if (nvlfp->attached_gpus[i] != 0) + nvidia_dev_put(nvlfp->attached_gpus[i], sp); + } + NV_KFREE(nvlfp->attached_gpus, arg_size); + nvlfp->num_attached_gpus = 0; + + status = -EINVAL; + break; + } + } + + break; + } + + case NV_ESC_CHECK_VERSION_STR: + { + NV_CTL_DEVICE_ONLY(nv); + + rmStatus = rm_perform_version_check(sp, arg_copy, arg_size); + status = ((rmStatus == NV_OK) ? 0 : -EINVAL); + break; + } + + case NV_ESC_SYS_PARAMS: + { + nv_ioctl_sys_params_t *api = arg_copy; + + NV_CTL_DEVICE_ONLY(nv); + + if (arg_size != sizeof(nv_ioctl_sys_params_t)) + { + status = -EINVAL; + goto done; + } + + /* numa_memblock_size should only be set once */ + if (nvl->numa_memblock_size == 0) + { + nvl->numa_memblock_size = api->memblock_size; + } + else + { + status = (nvl->numa_memblock_size == api->memblock_size) ? + 0 : -EBUSY; + goto done; + } + break; + } + + case NV_ESC_NUMA_INFO: + { + nv_ioctl_numa_info_t *api = arg_copy; + rmStatus = NV_OK; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (arg_size != sizeof(nv_ioctl_numa_info_t)) + { + status = -EINVAL; + goto done; + } + + api->offline_addresses.numEntries = + ARRAY_SIZE(api->offline_addresses.addresses), + + rmStatus = rm_get_gpu_numa_info(sp, nv, + &(api->nid), + &(api->numa_mem_addr), + &(api->numa_mem_size), + (api->offline_addresses.addresses), + &(api->offline_addresses.numEntries)); + if (rmStatus != NV_OK) + { + status = -EBUSY; + goto done; + } + + api->status = nv_get_numa_status(nvl); + api->memblock_size = nv_ctl_device.numa_memblock_size; + break; + } + + case NV_ESC_SET_NUMA_STATUS: + { + nv_ioctl_set_numa_status_t *api = arg_copy; + rmStatus = NV_OK; + + if (!NV_IS_SUSER()) + { + status = -EACCES; + goto done; + } + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (arg_size != sizeof(nv_ioctl_set_numa_status_t)) + { + status = -EINVAL; + goto done; + } + + /* + * The nv_linux_state_t for the device needs to be locked + * in order to prevent additional open()/close() calls from + * manipulating the usage count for the device while we + * determine if NUMA state can be changed. + */ + down(&nvl->ldata_lock); + + if (nv_get_numa_status(nvl) != api->status) + { + if (api->status == NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS) + { + /* + * Only the current client should have an open file + * descriptor for the device, to allow safe offlining. + */ + if (NV_ATOMIC_READ(nvl->usage_count) > 1) + { + status = -EBUSY; + goto unlock; + } + else + { + /* + * If this call fails, it indicates that RM + * is not ready to offline memory, and we should keep + * the current NUMA status of ONLINE. + */ + rmStatus = rm_gpu_numa_offline(sp, nv); + if (rmStatus != NV_OK) + { + status = -EBUSY; + goto unlock; + } + } + } + + status = nv_set_numa_status(nvl, api->status); + if (status < 0) + { + if (api->status == NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS) + (void) rm_gpu_numa_online(sp, nv); + goto unlock; + } + + if (api->status == NV_IOCTL_NUMA_STATUS_ONLINE) + { + rmStatus = rm_gpu_numa_online(sp, nv); + if (rmStatus != NV_OK) + { + status = -EBUSY; + goto unlock; + } + } + } + +unlock: + up(&nvl->ldata_lock); + + break; + } + + case NV_ESC_EXPORT_TO_DMABUF_FD: + { + nv_ioctl_export_to_dma_buf_fd_t *params = arg_copy; + + if (arg_size != sizeof(nv_ioctl_export_to_dma_buf_fd_t)) + { + status = -EINVAL; + goto done; + } + + NV_ACTUAL_DEVICE_ONLY(nv); + + params->status = nv_dma_buf_export(nv, params); + + break; + } + + default: + rmStatus = rm_ioctl(sp, nv, &nvlfp->nvfp, arg_cmd, arg_copy, arg_size); + status = ((rmStatus == NV_OK) ? 0 : -EINVAL); + break; + } + +done: + up(&nvlfp->fops_sp_lock[NV_FOPS_STACK_INDEX_IOCTL]); + + up_read(&nv_system_pm_lock); + + if (arg_copy != NULL) + { + if (status != -EFAULT) + { + if (NV_COPY_TO_USER(arg_ptr, arg_copy, arg_size)) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to copy out ioctl data\n"); + status = -EFAULT; + } + } + NV_KFREE(arg_copy, arg_size); + } + + return status; +} + +irqreturn_t +nvidia_isr_msix( + int irq, + void *arg +) +{ + irqreturn_t ret; + nv_linux_state_t *nvl = (void *) arg; + + // nvidia_isr_msix() is called for each of the MSI-X vectors and they can + // run in parallel on different CPUs (cores), but this is not currently + // supported by nvidia_isr() and its children. As a big hammer fix just + // spinlock around the nvidia_isr() call to serialize them. + // + // At this point interrupts are disabled on the CPU running our ISR (see + // comments for nv_default_irq_flags()) so a plain spinlock is enough. + NV_SPIN_LOCK(&nvl->msix_isr_lock); + + ret = nvidia_isr(irq, arg); + + NV_SPIN_UNLOCK(&nvl->msix_isr_lock); + + return ret; +} + +/* + * driver receives an interrupt + * if someone waiting, then hand it off. + */ +irqreturn_t +nvidia_isr( + int irq, + void *arg +) +{ + nv_linux_state_t *nvl = (void *) arg; + nv_state_t *nv = NV_STATE_PTR(nvl); + NvU32 need_to_run_bottom_half_gpu_lock_held = 0; + NvBool rm_handled = NV_FALSE, uvm_handled = NV_FALSE, rm_fault_handling_needed = NV_FALSE; + NvU32 rm_serviceable_fault_cnt = 0; + NvU32 sec, usec; + NvU16 index = 0; + NvU64 currentTime = 0; + NvBool found_irq = NV_FALSE; + + rm_gpu_copy_mmu_faults_unlocked(nvl->sp[NV_DEV_STACK_ISR], nv, &rm_serviceable_fault_cnt); + rm_fault_handling_needed = (rm_serviceable_fault_cnt != 0); + +#if defined (NV_UVM_ENABLE) + // + // Returns NV_OK if the UVM driver handled the interrupt + // + // Returns NV_ERR_NO_INTR_PENDING if the interrupt is not for + // the UVM driver. + // + // Returns NV_WARN_MORE_PROCESSING_REQUIRED if the UVM top-half ISR was + // unable to get its lock(s), due to other (UVM) threads holding them. + // + // RM can normally treat NV_WARN_MORE_PROCESSING_REQUIRED the same as + // NV_ERR_NO_INTR_PENDING, but in some cases the extra information may + // be helpful. + // + if (nv_uvm_event_interrupt(nv_get_cached_uuid(nv)) == NV_OK) + uvm_handled = NV_TRUE; +#endif + + rm_handled = rm_isr(nvl->sp[NV_DEV_STACK_ISR], nv, + &need_to_run_bottom_half_gpu_lock_held); + + /* Replicating the logic in linux kernel to track unhandled interrupt crossing a threshold */ + if ((nv->flags & NV_FLAG_USES_MSI) || (nv->flags & NV_FLAG_USES_MSIX)) + { + if (nvl->irq_count != NULL) + { + for (index = 0; index < nvl->current_num_irq_tracked; index++) + { + if (nvl->irq_count[index].irq == irq) + { + found_irq = NV_TRUE; + break; + } + + found_irq = NV_FALSE; + } + + if (!found_irq && nvl->current_num_irq_tracked < nvl->num_intr) + { + index = nvl->current_num_irq_tracked; + nvl->irq_count[index].irq = irq; + nvl->current_num_irq_tracked++; + found_irq = NV_TRUE; + } + + if (found_irq) + { + nvl->irq_count[index].total++; + + if(rm_handled == NV_FALSE) + { + os_get_current_time(&sec, &usec); + currentTime = ((NvU64)sec) * 1000000 + (NvU64)usec; + + /* Reset unhandled count if it's been more than 0.1 seconds since the last unhandled IRQ */ + if ((currentTime - nvl->irq_count[index].last_unhandled) > RM_UNHANDLED_TIMEOUT_US) + nvl->irq_count[index].unhandled = 1; + else + nvl->irq_count[index].unhandled++; + + nvl->irq_count[index].last_unhandled = currentTime; + rm_handled = NV_TRUE; + } + + if (nvl->irq_count[index].total >= RM_THRESHOLD_TOTAL_IRQ_COUNT) + { + if (nvl->irq_count[index].unhandled > RM_THRESHOLD_UNAHNDLED_IRQ_COUNT) + nv_printf(NV_DBG_ERRORS,"NVRM: Going over RM unhandled interrupt threshold for irq %d\n", irq); + + nvl->irq_count[index].total = 0; + nvl->irq_count[index].unhandled = 0; + nvl->irq_count[index].last_unhandled = 0; + } + } + else + nv_printf(NV_DBG_ERRORS,"NVRM: IRQ number out of valid range\n"); + } + } + + if (need_to_run_bottom_half_gpu_lock_held) + { + return IRQ_WAKE_THREAD; + } + else + { + // + // If rm_isr does not need to run a bottom half and mmu_faults_copied + // indicates that bottom half is needed, then we enqueue a kthread based + // bottom half, as this specific bottom_half will acquire the GPU lock + // + if (rm_fault_handling_needed) + nv_kthread_q_schedule_q_item(&nvl->bottom_half_q, &nvl->bottom_half_q_item); + } + + return IRQ_RETVAL(rm_handled || uvm_handled || rm_fault_handling_needed); +} + +irqreturn_t +nvidia_isr_kthread_bh( + int irq, + void *data +) +{ + return nvidia_isr_common_bh(data); +} + +irqreturn_t +nvidia_isr_msix_kthread_bh( + int irq, + void *data +) +{ + NV_STATUS status; + irqreturn_t ret; + nv_state_t *nv = (nv_state_t *) data; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + // + // Synchronize kthreads servicing bottom halves for different MSI-X vectors + // as they share same pre-allocated alt-stack. + // + status = os_acquire_mutex(nvl->msix_bh_mutex); + // os_acquire_mutex can only fail if we cannot sleep and we can + WARN_ON(status != NV_OK); + + ret = nvidia_isr_common_bh(data); + + os_release_mutex(nvl->msix_bh_mutex); + + return ret; +} + +static irqreturn_t +nvidia_isr_common_bh( + void *data +) +{ + nv_state_t *nv = (nv_state_t *) data; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nvidia_stack_t *sp = nvl->sp[NV_DEV_STACK_ISR_BH]; + NV_STATUS status; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, "NVRM: GPU is lost, skipping ISR bottom half\n"); + } + else + { + rm_isr_bh(sp, nv); + } + + return IRQ_HANDLED; +} + +static void +nvidia_isr_bh_unlocked( + void * args +) +{ + nv_state_t *nv = (nv_state_t *) args; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nvidia_stack_t *sp; + NV_STATUS status; + + // + // Synchronize kthreads servicing unlocked bottom half as they + // share same pre-allocated stack for alt-stack + // + status = os_acquire_mutex(nvl->isr_bh_unlocked_mutex); + if (status != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: Unable to take bottom_half mutex!\n", + __FUNCTION__); + WARN_ON(1); + } + + sp = nvl->sp[NV_DEV_STACK_ISR_BH_UNLOCKED]; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, + "NVRM: GPU is lost, skipping unlocked ISR bottom half\n"); + } + else + { + rm_isr_bh_unlocked(sp, nv); + } + + os_release_mutex(nvl->isr_bh_unlocked_mutex); +} + +static void +nvidia_rc_timer_callback( + struct nv_timer *nv_timer +) +{ + nv_linux_state_t *nvl = container_of(nv_timer, nv_linux_state_t, rc_timer); + nv_state_t *nv = NV_STATE_PTR(nvl); + nvidia_stack_t *sp = nvl->sp[NV_DEV_STACK_TIMER]; + NV_STATUS status; + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + nv_printf(NV_DBG_INFO, + "NVRM: GPU is lost, skipping device timer callbacks\n"); + return; + } + + if (rm_run_rc_callback(sp, nv) == NV_OK) + { + // set another timeout 1 sec in the future: + mod_timer(&nvl->rc_timer.kernel_timer, jiffies + HZ); + } +} + +/* +** nvidia_ctl_open +** +** nv control driver open entry point. Sessions are created here. +*/ +static int +nvidia_ctl_open( + struct inode *inode, + struct file *file +) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + nv_state_t *nv = NV_STATE_PTR(nvl); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + static int count = 0; + + nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_open\n"); + + down(&nvl->ldata_lock); + + /* save the nv away in file->private_data */ + nvlfp->nvptr = nvl; + + if (NV_ATOMIC_READ(nvl->usage_count) == 0) + { + nv->flags |= (NV_FLAG_OPEN | NV_FLAG_CONTROL); + + if ((nv_acpi_init() < 0) && + (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to register with the ACPI subsystem!\n"); + } + } + + NV_ATOMIC_INC(nvl->usage_count); + up(&nvl->ldata_lock); + + return 0; +} + + +/* +** nvidia_ctl_close +*/ +static int +nvidia_ctl_close( + struct inode *inode, + struct file *file +) +{ + nv_alloc_t *at, *next; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_FILEP(file); + nv_state_t *nv = NV_STATE_PTR(nvl); + nv_linux_file_private_t *nvlfp = NV_GET_LINUX_FILE_PRIVATE(file); + nvidia_stack_t *sp = nvlfp->sp; + static int count = 0; + unsigned int i; + + nv_printf(NV_DBG_INFO, "NVRM: nvidia_ctl_close\n"); + + down(&nvl->ldata_lock); + if (NV_ATOMIC_DEC_AND_TEST(nvl->usage_count)) + { + nv->flags &= ~NV_FLAG_OPEN; + + if ((nv_acpi_uninit() < 0) && + (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to unregister from the ACPI subsystem!\n"); + } + } + up(&nvl->ldata_lock); + + rm_cleanup_file_private(sp, nv, &nvlfp->nvfp); + + if (nvlfp->free_list != NULL) + { + at = nvlfp->free_list; + while (at != NULL) + { + next = at->next; + if (at->pid == os_get_current_process()) + NV_PRINT_AT(NV_DBG_MEMINFO, at); + nv_free_pages(nv, at->num_pages, + at->flags.contig, + at->cache_type, + (void *)at); + at = next; + } + } + + if (nvlfp->num_attached_gpus != 0) + { + size_t i; + + for (i = 0; i < nvlfp->num_attached_gpus; i++) + { + if (nvlfp->attached_gpus[i] != 0) + nvidia_dev_put(nvlfp->attached_gpus[i], sp); + } + + NV_KFREE(nvlfp->attached_gpus, sizeof(NvU32) * nvlfp->num_attached_gpus); + nvlfp->num_attached_gpus = 0; + } + + for (i = 0; i < NV_FOPS_STACK_INDEX_COUNT; ++i) + { + nv_kmem_cache_free_stack(nvlfp->fops_sp[i]); + } + + nv_free_file_private(nvlfp); + NV_SET_FILE_PRIVATE(file, NULL); + + nv_kmem_cache_free_stack(sp); + + return 0; +} + + +void NV_API_CALL +nv_set_dma_address_size( + nv_state_t *nv, + NvU32 phys_addr_bits +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 start_addr = nv_get_dma_start_address(nv); + NvU64 new_mask = (((NvU64)1) << phys_addr_bits) - 1; + + nvl->dma_dev.addressable_range.limit = start_addr + new_mask; + + /* + * The only scenario in which we definitely should not update the DMA mask + * is on POWER, when using TCE bypass mode (see nv_get_dma_start_address() + * for details), since the meaning of the DMA mask is overloaded in that + * case. + */ + if (!nvl->tce_bypass_enabled) + { + dma_set_mask(&nvl->pci_dev->dev, new_mask); + /* Certain kernels have a bug which causes pci_set_consistent_dma_mask + * to call GPL sme_active symbol, this bug has already been fixed in a + * minor release update but detect the failure scenario here to prevent + * an installation regression */ +#if !NV_IS_EXPORT_SYMBOL_GPL_sme_active + dma_set_coherent_mask(&nvl->pci_dev->dev, new_mask); +#endif + } +} + +static NvUPtr +nv_map_guest_pages(nv_alloc_t *at, + NvU64 address, + NvU32 page_count, + NvU32 page_idx) +{ + struct page **pages; + NvU32 j; + NvUPtr virt_addr; + + NV_KMALLOC(pages, sizeof(struct page *) * page_count); + if (pages == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate vmap() page descriptor table!\n"); + return 0; + } + + for (j = 0; j < page_count; j++) + { + pages[j] = NV_GET_PAGE_STRUCT(at->page_table[page_idx+j]->phys_addr); + } + + virt_addr = nv_vm_map_pages(pages, page_count, + at->cache_type == NV_MEMORY_CACHED, at->flags.unencrypted); + NV_KFREE(pages, sizeof(struct page *) * page_count); + + return virt_addr; +} + +NV_STATUS NV_API_CALL +nv_alias_pages( + nv_state_t *nv, + NvU32 page_cnt, + NvU32 contiguous, + NvU32 cache_type, + NvU64 guest_id, + NvU64 *pte_array, + void **priv_data +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU32 i=0; + nvidia_pte_t *page_ptr = NULL; + + at = nvos_create_alloc(nvl->dev, page_cnt); + + if (at == NULL) + { + return NV_ERR_NO_MEMORY; + } + + at->cache_type = cache_type; + if (contiguous) + at->flags.contig = NV_TRUE; +#if defined(NVCPU_AARCH64) + if (at->cache_type != NV_MEMORY_CACHED) + at->flags.aliased = NV_TRUE; +#endif + + at->flags.guest = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + for (i=0; i < at->num_pages; ++i) + { + page_ptr = at->page_table[i]; + + if (contiguous && i>0) + { + page_ptr->dma_addr = pte_array[0] + (i << PAGE_SHIFT); + } + else + { + page_ptr->dma_addr = pte_array[i]; + } + + page_ptr->phys_addr = page_ptr->dma_addr; + + /* aliased pages will be mapped on demand. */ + page_ptr->virt_addr = 0x0; + } + + at->guest_id = guest_id; + *priv_data = at; + NV_ATOMIC_INC(at->usage_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +/* + * This creates a dummy nv_alloc_t for peer IO mem, so that it can + * be mapped using NvRmMapMemory. + */ +NV_STATUS NV_API_CALL nv_register_peer_io_mem( + nv_state_t *nv, + NvU64 *phys_addr, + NvU64 page_count, + void **priv_data +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 i; + NvU64 addr; + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + return NV_ERR_NO_MEMORY; + + // IO regions should be uncached and contiguous + at->cache_type = NV_MEMORY_UNCACHED; + at->flags.contig = NV_TRUE; +#if defined(NVCPU_AARCH64) + at->flags.aliased = NV_TRUE; +#endif + at->flags.peer_io = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + addr = phys_addr[0]; + + for (i = 0; i < page_count; i++) + { + at->page_table[i]->phys_addr = addr; + addr += PAGE_SIZE; + } + + // No struct page array exists for this memory. + at->user_pages = NULL; + + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +void NV_API_CALL nv_unregister_peer_io_mem( + nv_state_t *nv, + void *priv_data +) +{ + nv_alloc_t *at = priv_data; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + nvos_free_alloc(at); +} + +/* + * By registering user pages, we create a dummy nv_alloc_t for it, so that the + * rest of the RM can treat it like any other alloc. + * + * This also converts the page array to an array of physical addresses. + */ +NV_STATUS NV_API_CALL nv_register_user_pages( + nv_state_t *nv, + NvU64 page_count, + NvU64 *phys_addr, + void *import_priv, + void **priv_data +) +{ + nv_alloc_t *at; + NvU64 i; + struct page **user_pages; + nv_linux_state_t *nvl; + nvidia_pte_t *page_ptr; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_register_user_pages: 0x%x\n", page_count); + user_pages = *priv_data; + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + { + return NV_ERR_NO_MEMORY; + } + + /* + * Anonymous memory currently must be write-back cacheable, and we can't + * enforce contiguity. + */ + at->cache_type = NV_MEMORY_UNCACHED; +#if defined(NVCPU_AARCH64) + at->flags.aliased = NV_TRUE; +#endif + + at->flags.user = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + for (i = 0; i < page_count; i++) + { + /* + * We only assign the physical address and not the DMA address, since + * this allocation hasn't been DMA-mapped yet. + */ + page_ptr = at->page_table[i]; + page_ptr->phys_addr = page_to_phys(user_pages[i]); + + phys_addr[i] = page_ptr->phys_addr; + } + + /* Save off the user pages array to be restored later */ + at->user_pages = user_pages; + + /* Save off the import private data to be returned later */ + if (import_priv != NULL) + { + at->import_priv = import_priv; + } + + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +void NV_API_CALL nv_unregister_user_pages( + nv_state_t *nv, + NvU64 page_count, + void **import_priv, + void **priv_data +) +{ + nv_alloc_t *at = *priv_data; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_unregister_user_pages: 0x%x\n", page_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + WARN_ON(!at->flags.user); + + /* Restore the user pages array for the caller to handle */ + *priv_data = at->user_pages; + + /* Return the import private data for the caller to handle */ + if (import_priv != NULL) + { + *import_priv = at->import_priv; + } + + nvos_free_alloc(at); +} + +/* + * This creates a dummy nv_alloc_t for existing physical allocations, so + * that it can be mapped using NvRmMapMemory and BAR2 code path. + */ +NV_STATUS NV_API_CALL nv_register_phys_pages( + nv_state_t *nv, + NvU64 *phys_addr, + NvU64 page_count, + NvU32 cache_type, + void **priv_data +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NvU64 i; + NvU64 addr; + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + return NV_ERR_NO_MEMORY; + /* + * Setting memory flags to cacheable and discontiguous. + */ + at->cache_type = cache_type; + + /* + * Only physical address is available so we don't try to reuse existing + * mappings + */ + at->flags.physical = NV_TRUE; + + at->order = get_order(at->num_pages * PAGE_SIZE); + + for (i = 0, addr = phys_addr[0]; i < page_count; addr = phys_addr[++i]) + { + at->page_table[i]->phys_addr = addr; + } + + at->user_pages = NULL; + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_register_sgt( + nv_state_t *nv, + NvU64 *phys_addr, + NvU64 page_count, + NvU32 cache_type, + void **priv_data, + struct sg_table *import_sgt, + void *import_priv +) +{ + nv_alloc_t *at; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + unsigned int i, j = 0; + NvU64 sg_addr, sg_off, sg_len; + struct scatterlist *sg; + + at = nvos_create_alloc(nvl->dev, page_count); + + if (at == NULL) + return NV_ERR_NO_MEMORY; + + /* Populate phys addrs with DMA addrs from SGT */ + for_each_sg(import_sgt->sgl, sg, import_sgt->nents, i) + { + /* + * It is possible for dma_map_sg() to merge scatterlist entries, so + * make sure we account for that here. + */ + for (sg_addr = sg_dma_address(sg), sg_len = sg_dma_len(sg), sg_off = 0; + (sg_off < sg_len) && (j < page_count); + sg_off += PAGE_SIZE, j++) + { + phys_addr[j] = sg_addr + sg_off; + } + } + + /* + * Setting memory flags to cacheable and discontiguous. + */ + at->cache_type = cache_type; + + at->import_sgt = import_sgt; + + /* Save off the import private data to be returned later */ + if (import_priv != NULL) + { + at->import_priv = import_priv; + } + + at->order = get_order(at->num_pages * PAGE_SIZE); + + *priv_data = at; + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; +} + +void NV_API_CALL nv_unregister_sgt( + nv_state_t *nv, + struct sg_table **import_sgt, + void **import_priv, + void *priv_data +) +{ + nv_alloc_t *at = priv_data; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_unregister_sgt\n"); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + /* Restore the imported SGT for the caller to handle */ + *import_sgt = at->import_sgt; + + /* Return the import private data for the caller to handle */ + if (import_priv != NULL) + { + *import_priv = at->import_priv; + } + + nvos_free_alloc(at); +} + +void NV_API_CALL nv_unregister_phys_pages( + nv_state_t *nv, + void *priv_data +) +{ + nv_alloc_t *at = priv_data; + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + nvos_free_alloc(at); +} + +NV_STATUS NV_API_CALL nv_get_num_phys_pages( + void *pAllocPrivate, + NvU32 *pNumPages +) +{ + nv_alloc_t *at = pAllocPrivate; + + if (!pNumPages) { + return NV_ERR_INVALID_ARGUMENT; + } + + *pNumPages = at->num_pages; + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_get_phys_pages( + void *pAllocPrivate, + void *pPages, + NvU32 *pNumPages +) +{ + nv_alloc_t *at = pAllocPrivate; + struct page **pages = (struct page **)pPages; + NvU32 page_count; + int i; + + if (!pNumPages || !pPages) { + return NV_ERR_INVALID_ARGUMENT; + } + + page_count = NV_MIN(*pNumPages, at->num_pages); + + for (i = 0; i < page_count; i++) { + pages[i] = NV_GET_PAGE_STRUCT(at->page_table[i]->phys_addr); + } + + *pNumPages = page_count; + + return NV_OK; +} + +void* NV_API_CALL nv_alloc_kernel_mapping( + nv_state_t *nv, + void *pAllocPrivate, + NvU64 pageIndex, + NvU32 pageOffset, + NvU64 size, + void **pPrivate +) +{ + nv_alloc_t *at = pAllocPrivate; + NvU32 j, page_count; + NvUPtr virt_addr; + struct page **pages; + NvBool isUserAllocatedMem; + + // + // For User allocated memory (like ErrorNotifier's) which is NOT allocated + // nor owned by RM, the RM driver just stores the physical address + // corresponding to that memory and does not map it until required. + // In that case, in page tables the virt_addr == 0, so first we need to map + // those pages to obtain virtual address. + // + isUserAllocatedMem = at->flags.user && + !at->page_table[pageIndex]->virt_addr && + at->page_table[pageIndex]->phys_addr; + + // + // User memory may NOT have kernel VA. So check this and fallback to else + // case to create one. + // + if (((size + pageOffset) <= PAGE_SIZE) && + !at->flags.guest && !at->flags.aliased && + !isUserAllocatedMem && !at->flags.physical) + { + *pPrivate = NULL; + return (void *)(at->page_table[pageIndex]->virt_addr + pageOffset); + } + else + { + size += pageOffset; + page_count = (size >> PAGE_SHIFT) + ((size & ~NV_PAGE_MASK) ? 1 : 0); + + if (at->flags.guest) + { + virt_addr = nv_map_guest_pages(at, + nv->bars[NV_GPU_BAR_INDEX_REGS].cpu_address, + page_count, pageIndex); + } + else + { + NV_KMALLOC(pages, sizeof(struct page *) * page_count); + if (pages == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate vmap() page descriptor table!\n"); + return NULL; + } + + for (j = 0; j < page_count; j++) + pages[j] = NV_GET_PAGE_STRUCT(at->page_table[pageIndex+j]->phys_addr); + + virt_addr = nv_vm_map_pages(pages, page_count, + at->cache_type == NV_MEMORY_CACHED, at->flags.unencrypted); + NV_KFREE(pages, sizeof(struct page *) * page_count); + } + + if (virt_addr == 0) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to map pages!\n"); + return NULL; + } + + *pPrivate = (void *)(NvUPtr)page_count; + return (void *)(virt_addr + pageOffset); + } + + return NULL; +} + +NV_STATUS NV_API_CALL nv_free_kernel_mapping( + nv_state_t *nv, + void *pAllocPrivate, + void *address, + void *pPrivate +) +{ + nv_alloc_t *at = pAllocPrivate; + NvUPtr virt_addr; + NvU32 page_count; + + virt_addr = ((NvUPtr)address & NV_PAGE_MASK); + page_count = (NvUPtr)pPrivate; + + if (at->flags.guest) + { + nv_iounmap((void *)virt_addr, (page_count * PAGE_SIZE)); + } + else if (pPrivate != NULL) + { + nv_vm_unmap_pages(virt_addr, page_count); + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL nv_alloc_pages( + nv_state_t *nv, + NvU32 page_count, + NvBool contiguous, + NvU32 cache_type, + NvBool zeroed, + NvBool unencrypted, + NvU64 *pte_array, + void **priv_data +) +{ + nv_alloc_t *at; + NV_STATUS status = NV_ERR_NO_MEMORY; + nv_linux_state_t *nvl = NULL; + NvBool will_remap = NV_FALSE; + NvU32 i; + struct device *dev = NULL; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_alloc_pages: %d pages\n", page_count); + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: contig %d cache_type %d\n", + contiguous, cache_type); + + // + // system memory allocation can be associated with a client instead of a gpu + // handle the case where per device state is NULL + // + if(nv) + { + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + will_remap = nv_requires_dma_remap(nv); + dev = nvl->dev; + } + + if (nv_encode_caching(NULL, cache_type, NV_MEMORY_TYPE_SYSTEM)) + return NV_ERR_NOT_SUPPORTED; + + at = nvos_create_alloc(dev, page_count); + if (at == NULL) + return NV_ERR_NO_MEMORY; + + at->cache_type = cache_type; + + if (contiguous) + at->flags.contig = NV_TRUE; + if (zeroed) + at->flags.zeroed = NV_TRUE; +#if defined(NVCPU_AARCH64) + if (at->cache_type != NV_MEMORY_CACHED) + at->flags.aliased = NV_TRUE; +#endif + if (unencrypted) + at->flags.unencrypted = NV_TRUE; + +#if defined(NVCPU_PPC64LE) + /* + * Starting on Power9 systems, DMA addresses for NVLink are no longer the + * same as used over PCIe. There is an address compression scheme required + * for NVLink ONLY which impacts the upper address bits of the DMA address. + * + * This divergence between PCIe and NVLink DMA mappings breaks assumptions + * in the driver where during initialization we allocate system memory + * for the GPU to access over PCIe before NVLink is trained -- and some of + * these mappings persist on the GPU. If these persistent mappings are not + * equivalent they will cause invalid DMA accesses from the GPU once we + * switch to NVLink. + * + * To work around this we limit all system memory allocations from the driver + * during the period before NVLink is enabled to be from NUMA node 0 (CPU 0) + * which has a CPU real address with the upper address bits (above bit 42) + * set to 0. Effectively making the PCIe and NVLink DMA mappings equivalent + * allowing persistent system memory mappings already programmed on the GPU + * to remain valid after NVLink is enabled. + * + * See Bug 1920398 for more details. + */ + if (nv && nvl->npu && !nvl->dma_dev.nvlink) + at->flags.node0 = NV_TRUE; +#endif + + if (at->flags.contig) + status = nv_alloc_contig_pages(nv, at); + else + status = nv_alloc_system_pages(nv, at); + + if (status != NV_OK) + goto failed; + + for (i = 0; i < ((contiguous) ? 1 : page_count); i++) + { + /* + * The contents of the pte_array[] depend on whether or not this device + * requires DMA-remapping. If it does, it should be the phys addresses + * used by the DMA-remapping paths, otherwise it should be the actual + * address that the device should use for DMA (which, confusingly, may + * be different than the CPU physical address, due to a static DMA + * offset). + */ + if ((nv == NULL) || will_remap) + { + pte_array[i] = at->page_table[i]->phys_addr; + } + else + { + pte_array[i] = nv_phys_to_dma(dev, + at->page_table[i]->phys_addr); + } + } + + *priv_data = at; + NV_ATOMIC_INC(at->usage_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + return NV_OK; + +failed: + nvos_free_alloc(at); + + return status; +} + +NV_STATUS NV_API_CALL nv_free_pages( + nv_state_t *nv, + NvU32 page_count, + NvBool contiguous, + NvU32 cache_type, + void *priv_data +) +{ + NV_STATUS rmStatus = NV_OK; + nv_alloc_t *at = priv_data; + + nv_printf(NV_DBG_MEMINFO, "NVRM: VM: nv_free_pages: 0x%x\n", page_count); + + NV_PRINT_AT(NV_DBG_MEMINFO, at); + + /* + * If the 'at' usage count doesn't drop to zero here, not all of + * the user mappings have been torn down in time - we can't + * safely free the memory. We report success back to the RM, but + * defer the actual free operation until later. + * + * This is described in greater detail in the comments above the + * nvidia_vma_(open|release)() callbacks in nv-mmap.c. + */ + if (!NV_ATOMIC_DEC_AND_TEST(at->usage_count)) + return NV_OK; + + if (!at->flags.guest) + { + if (at->flags.contig) + nv_free_contig_pages(at); + else + nv_free_system_pages(at); + } + + nvos_free_alloc(at); + + return rmStatus; +} + +NvBool nv_lock_init_locks +( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + nv_linux_state_t *nvl; + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + NV_INIT_MUTEX(&nvl->ldata_lock); + NV_INIT_MUTEX(&nvl->mmap_lock); + + NV_ATOMIC_SET(nvl->usage_count, 0); + + if (!rm_init_event_locks(sp, nv)) + return NV_FALSE; + + return NV_TRUE; +} + +void nv_lock_destroy_locks +( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + rm_destroy_event_locks(sp, nv); +} + +void NV_API_CALL nv_post_event( + nv_event_t *event, + NvHandle handle, + NvU32 index, + NvU32 info32, + NvU16 info16, + NvBool data_valid +) +{ + nv_linux_file_private_t *nvlfp = nv_get_nvlfp_from_nvfp(event->nvfp); + unsigned long eflags; + nvidia_event_t *nvet; + + NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags); + + if (data_valid) + { + NV_KMALLOC_ATOMIC(nvet, sizeof(nvidia_event_t)); + if (nvet == NULL) + { + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + return; + } + + if (nvlfp->event_data_tail != NULL) + nvlfp->event_data_tail->next = nvet; + if (nvlfp->event_data_head == NULL) + nvlfp->event_data_head = nvet; + nvlfp->event_data_tail = nvet; + nvet->next = NULL; + + nvet->event = *event; + nvet->event.hObject = handle; + nvet->event.index = index; + nvet->event.info32 = info32; + nvet->event.info16 = info16; + } + // + // 'event_pending' is interpreted by nvidia_poll() and nv_get_event() to + // mean that an event without data is pending. Therefore, only set it to + // true here if newly posted event is dataless. + // + else + { + nvlfp->dataless_event_pending = NV_TRUE; + } + + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + + wake_up_interruptible(&nvlfp->waitqueue); +} + +NvBool NV_API_CALL nv_is_rm_firmware_active( + nv_state_t *nv +) +{ + if (rm_firmware_active) + { + // "all" here means all GPUs + if (strcmp(rm_firmware_active, "all") == 0) + return NV_TRUE; + } + return NV_FALSE; +} + +const char *nv_firmware_path( + nv_firmware_t fw_type +) +{ + switch (fw_type) + { + case NV_FIRMWARE_GSP: + return NV_FIRMWARE_GSP_FILENAME; + case NV_FIRMWARE_GSP_LOG: + return NV_FIRMWARE_GSP_LOG_FILENAME; + } + return ""; +} + +const void* NV_API_CALL nv_get_firmware( + nv_state_t *nv, + nv_firmware_t fw_type, + const void **fw_buf, + NvU32 *fw_size +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + const struct firmware *fw; + + // path is relative to /lib/firmware + // if this fails it will print an error to dmesg + if (request_firmware(&fw, nv_firmware_path(fw_type), nvl->dev) != 0) + return NULL; + + *fw_size = fw->size; + *fw_buf = fw->data; + + return fw; +} + +void NV_API_CALL nv_put_firmware( + const void *fw_handle +) +{ + release_firmware(fw_handle); +} + +nv_file_private_t* NV_API_CALL nv_get_file_private( + NvS32 fd, + NvBool ctl, + void **os_private +) +{ + struct file *filp = NULL; + nv_linux_file_private_t *nvlfp = NULL; + dev_t rdev = 0; + + filp = fget(fd); + + if (filp == NULL || !NV_FILE_INODE(filp)) + { + goto fail; + } + + rdev = (NV_FILE_INODE(filp))->i_rdev; + + if (MAJOR(rdev) != NV_MAJOR_DEVICE_NUMBER) + { + goto fail; + } + + if (ctl) + { + if (MINOR(rdev) != NV_CONTROL_DEVICE_MINOR) + goto fail; + } + else + { + NvBool found = NV_FALSE; + int i; + + for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++) + { + if ((nv_minor_num_table[i] != NULL) && (MINOR(rdev) == i)) + { + found = NV_TRUE; + break; + } + } + + if (!found) + goto fail; + } + + nvlfp = NV_GET_LINUX_FILE_PRIVATE(filp); + + *os_private = filp; + + return &nvlfp->nvfp; + +fail: + + if (filp != NULL) + { + fput(filp); + } + + return NULL; +} + +void NV_API_CALL nv_put_file_private( + void *os_private +) +{ + struct file *filp = os_private; + fput(filp); +} + +int NV_API_CALL nv_get_event( + nv_file_private_t *nvfp, + nv_event_t *event, + NvU32 *pending +) +{ + nv_linux_file_private_t *nvlfp = nv_get_nvlfp_from_nvfp(nvfp); + nvidia_event_t *nvet; + unsigned long eflags; + + NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags); + + nvet = nvlfp->event_data_head; + if (nvet == NULL) + { + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + return NV_ERR_GENERIC; + } + + *event = nvet->event; + + if (nvlfp->event_data_tail == nvet) + nvlfp->event_data_tail = NULL; + nvlfp->event_data_head = nvet->next; + + *pending = (nvlfp->event_data_head != NULL); + + NV_SPIN_UNLOCK_IRQRESTORE(&nvlfp->fp_lock, eflags); + + NV_KFREE(nvet, sizeof(nvidia_event_t)); + + return NV_OK; +} + +int NV_API_CALL nv_start_rc_timer( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (nv->rc_timer_enabled) + return -1; + + nv_printf(NV_DBG_INFO, "NVRM: initializing rc timer\n"); + + nv_timer_setup(&nvl->rc_timer, nvidia_rc_timer_callback); + + nv->rc_timer_enabled = 1; + + // set the timeout for 1 second in the future: + mod_timer(&nvl->rc_timer.kernel_timer, jiffies + HZ); + + nv_printf(NV_DBG_INFO, "NVRM: rc timer initialized\n"); + + return 0; +} + +int NV_API_CALL nv_stop_rc_timer( + nv_state_t *nv +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + if (!nv->rc_timer_enabled) + return -1; + + nv_printf(NV_DBG_INFO, "NVRM: stopping rc timer\n"); + nv->rc_timer_enabled = 0; + del_timer_sync(&nvl->rc_timer.kernel_timer); + nv_printf(NV_DBG_INFO, "NVRM: rc timer stopped\n"); + + return 0; +} + +#define SNAPSHOT_TIMER_FREQ (jiffies + HZ / NV_SNAPSHOT_TIMER_HZ) + +static void snapshot_timer_callback(struct nv_timer *timer) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + nv_state_t *nv = NV_STATE_PTR(nvl); + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags); + if (nvl->snapshot_callback != NULL) + { + nvl->snapshot_callback(nv->profiler_context); + mod_timer(&timer->kernel_timer, SNAPSHOT_TIMER_FREQ); + } + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags); +} + +void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context)) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + + nvl->snapshot_callback = snapshot_callback; + nv_timer_setup(&nvl->snapshot_timer, snapshot_timer_callback); + mod_timer(&nvl->snapshot_timer.kernel_timer, SNAPSHOT_TIMER_FREQ); +} + +void NV_API_CALL nv_stop_snapshot_timer(void) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + NvBool timer_active; + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags); + timer_active = nvl->snapshot_callback != NULL; + nvl->snapshot_callback = NULL; + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags); + + if (timer_active) + del_timer_sync(&nvl->snapshot_timer.kernel_timer); +} + +void NV_API_CALL nv_flush_snapshot_timer(void) +{ + nv_linux_state_t *nvl = &nv_ctl_device; + nv_state_t *nv = NV_STATE_PTR(nvl); + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nvl->snapshot_timer_lock, flags); + if (nvl->snapshot_callback != NULL) + nvl->snapshot_callback(nv->profiler_context); + NV_SPIN_UNLOCK_IRQRESTORE(&nvl->snapshot_timer_lock, flags); +} + +static int __init +nvos_count_devices(void) +{ + int count; + + count = nv_pci_count_devices(); + + + + + return count; +} + +NvBool nvos_is_chipset_io_coherent(void) +{ + if (nv_chipset_is_io_coherent == NV_TRISTATE_INDETERMINATE) + { + nvidia_stack_t *sp = NULL; + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: cannot allocate stack for platform coherence check callback \n"); + WARN_ON(1); + return NV_FALSE; + } + + nv_chipset_is_io_coherent = rm_is_chipset_io_coherent(sp); + + nv_kmem_cache_free_stack(sp); + } + + return nv_chipset_is_io_coherent; +} + +#if defined(CONFIG_PM) +static NV_STATUS +nv_power_management( + nv_state_t *nv, + nv_pm_action_t pm_action +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + int status = NV_OK; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = nv_check_gpu_state(nv); + if (status == NV_ERR_GPU_IS_LOST) + { + NV_DEV_PRINTF(NV_DBG_INFO, nv, "GPU is lost, skipping PM event\n"); + goto failure; + } + + switch (pm_action) + { + case NV_PM_ACTION_STANDBY: + /* fall through */ + case NV_PM_ACTION_HIBERNATE: + { + status = rm_power_management(sp, nv, pm_action); + + nv_kthread_q_stop(&nvl->bottom_half_q); + + nv_disable_pat_support(); + break; + } + case NV_PM_ACTION_RESUME: + { + nv_enable_pat_support(); + + nv_kthread_q_item_init(&nvl->bottom_half_q_item, + nvidia_isr_bh_unlocked, (void *)nv); + + status = nv_kthread_q_init(&nvl->bottom_half_q, nv_device_name); + if (status != NV_OK) + break; + + status = rm_power_management(sp, nv, pm_action); + break; + } + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + } + +failure: + nv_kmem_cache_free_stack(sp); + + return status; +} + +static NV_STATUS +nv_restore_user_channels( + nv_state_t *nv +) +{ + NV_STATUS status = NV_OK; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + down(&nvl->ldata_lock); + + if ((nv->flags & NV_FLAG_OPEN) == 0) + { + goto done; + } + + status = rm_restart_user_channels(sp, nv); + WARN_ON(status != NV_OK); + + down(&nvl->mmap_lock); + + nv_set_safe_to_mmap_locked(nv, NV_TRUE); + + up(&nvl->mmap_lock); + + rm_unref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + +done: + up(&nvl->ldata_lock); + + nv_kmem_cache_free_stack(sp); + + return status; +} + +static NV_STATUS +nv_preempt_user_channels( + nv_state_t *nv +) +{ + NV_STATUS status = NV_OK; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + nv_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + down(&nvl->ldata_lock); + + if ((nv->flags & NV_FLAG_OPEN) == 0) + { + goto done; + } + + status = rm_ref_dynamic_power(sp, nv, NV_DYNAMIC_PM_FINE); + WARN_ON(status != NV_OK); + + down(&nvl->mmap_lock); + + nv_set_safe_to_mmap_locked(nv, NV_FALSE); + nv_revoke_gpu_mappings_locked(nv); + + up(&nvl->mmap_lock); + + status = rm_stop_user_channels(sp, nv); + WARN_ON(status != NV_OK); + +done: + up(&nvl->ldata_lock); + + nv_kmem_cache_free_stack(sp); + + return status; +} + +static NV_STATUS +nvidia_suspend( + struct device *dev, + nv_pm_action_t pm_action, + NvBool is_procfs_suspend +) +{ + NV_STATUS status = NV_OK; + struct pci_dev *pci_dev = NULL; + nv_linux_state_t *nvl; + nv_state_t *nv; + + if (nv_dev_is_pci(dev)) + { + pci_dev = to_pci_dev(dev); + nvl = pci_get_drvdata(pci_dev); + } + else + { + nvl = dev_get_drvdata(dev); + } + nv = NV_STATE_PTR(nvl); + + down(&nvl->ldata_lock); + + if (((nv->flags & NV_FLAG_OPEN) == 0) && + ((nv->flags & NV_FLAG_PERSISTENT_SW_STATE) == 0)) + { + goto done; + } + + if ((nv->flags & NV_FLAG_SUSPENDED) != 0) + { + nvl->suspend_count++; + goto pci_pm; + } + + if (nv->preserve_vidmem_allocations && !is_procfs_suspend) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "PreserveVideoMemoryAllocations module parameter is set. " + "System Power Management attempted without driver procfs suspend interface. " + "Please refer to the 'Configuring Power Management Support' section in the driver README.\n"); + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + nvidia_modeset_suspend(nv->gpu_id); + + status = nv_power_management(nv, pm_action); + + if (status != NV_OK) + { + nvidia_modeset_resume(nv->gpu_id); + goto done; + } + else + { + nv->flags |= NV_FLAG_SUSPENDED; + } + +pci_pm: + /* + * Check if PCI power state should be D0 during system suspend. The PCI PM + * core will change the power state only if the driver has not saved the + * state in it's suspend callback. + */ + if ((nv->d0_state_in_suspend) && (pci_dev != NULL) && + !is_procfs_suspend && (pm_action == NV_PM_ACTION_STANDBY)) + { + pci_save_state(pci_dev); + } + +done: + up(&nvl->ldata_lock); + + return status; +} + +static NV_STATUS +nvidia_resume( + struct device *dev, + nv_pm_action_t pm_action +) +{ + NV_STATUS status = NV_OK; + struct pci_dev *pci_dev; + nv_linux_state_t *nvl; + nv_state_t *nv; + + if (nv_dev_is_pci(dev)) + { + pci_dev = to_pci_dev(dev); + nvl = pci_get_drvdata(pci_dev); + } + else + { + nvl = dev_get_drvdata(dev); + } + nv = NV_STATE_PTR(nvl); + + down(&nvl->ldata_lock); + + if ((nv->flags & NV_FLAG_SUSPENDED) == 0) + { + goto done; + } + + if (nvl->suspend_count != 0) + { + nvl->suspend_count--; + } + else + { + status = nv_power_management(nv, pm_action); + + if (status == NV_OK) + { + nvidia_modeset_resume(nv->gpu_id); + nv->flags &= ~NV_FLAG_SUSPENDED; + } + } + +done: + up(&nvl->ldata_lock); + + return status; +} + +static NV_STATUS +nv_resume_devices( + nv_pm_action_t pm_action, + nv_pm_action_depth_t pm_action_depth +) +{ + nv_linux_state_t *nvl; + NvBool resume_devices = NV_TRUE; + NV_STATUS status; + + if (pm_action_depth == NV_PM_ACTION_DEPTH_MODESET) + { + goto resume_modeset; + } + + if (pm_action_depth == NV_PM_ACTION_DEPTH_UVM) + { + resume_devices = NV_FALSE; + } + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + if (resume_devices) + { + status = nvidia_resume(nvl->dev, pm_action); + WARN_ON(status != NV_OK); + } + } + + UNLOCK_NV_LINUX_DEVICES(); + + status = nv_uvm_resume(); + WARN_ON(status != NV_OK); + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + status = nv_restore_user_channels(NV_STATE_PTR(nvl)); + WARN_ON(status != NV_OK); + } + + UNLOCK_NV_LINUX_DEVICES(); + +resume_modeset: + nvidia_modeset_resume(0); + + return NV_OK; +} + +static NV_STATUS +nv_suspend_devices( + nv_pm_action_t pm_action, + nv_pm_action_depth_t pm_action_depth +) +{ + nv_linux_state_t *nvl; + NvBool resume_devices = NV_FALSE; + NV_STATUS status = NV_OK; + + nvidia_modeset_suspend(0); + + if (pm_action_depth == NV_PM_ACTION_DEPTH_MODESET) + { + return NV_OK; + } + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL && status == NV_OK; nvl = nvl->next) + { + status = nv_preempt_user_channels(NV_STATE_PTR(nvl)); + WARN_ON(status != NV_OK); + } + + UNLOCK_NV_LINUX_DEVICES(); + + if (status == NV_OK) + { + status = nv_uvm_suspend(); + WARN_ON(status != NV_OK); + } + if (status != NV_OK) + { + goto done; + } + + if (pm_action_depth == NV_PM_ACTION_DEPTH_UVM) + { + return NV_OK; + } + + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL && status == NV_OK; nvl = nvl->next) + { + status = nvidia_suspend(nvl->dev, pm_action, NV_TRUE); + WARN_ON(status != NV_OK); + } + if (status != NV_OK) + { + resume_devices = NV_TRUE; + } + + UNLOCK_NV_LINUX_DEVICES(); + +done: + if (status != NV_OK) + { + LOCK_NV_LINUX_DEVICES(); + + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + if (resume_devices) + { + nvidia_resume(nvl->dev, pm_action); + } + + nv_restore_user_channels(NV_STATE_PTR(nvl)); + } + + UNLOCK_NV_LINUX_DEVICES(); + } + + return status; +} + +NV_STATUS +nv_set_system_power_state( + nv_power_state_t power_state, + nv_pm_action_depth_t pm_action_depth +) +{ + NV_STATUS status; + nv_pm_action_t pm_action; + + switch (power_state) + { + case NV_POWER_STATE_IN_HIBERNATE: + pm_action = NV_PM_ACTION_HIBERNATE; + break; + case NV_POWER_STATE_IN_STANDBY: + pm_action = NV_PM_ACTION_STANDBY; + break; + case NV_POWER_STATE_RUNNING: + pm_action = NV_PM_ACTION_RESUME; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + down(&nv_system_power_state_lock); + + if (nv_system_power_state == power_state) + { + status = NV_OK; + goto done; + } + + if (power_state == NV_POWER_STATE_RUNNING) + { + status = nv_resume_devices(pm_action, nv_system_pm_action_depth); + up_write(&nv_system_pm_lock); + } + else + { + if (nv_system_power_state != NV_POWER_STATE_RUNNING) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + nv_system_pm_action_depth = pm_action_depth; + + down_write(&nv_system_pm_lock); + status = nv_suspend_devices(pm_action, nv_system_pm_action_depth); + if (status != NV_OK) + { + up_write(&nv_system_pm_lock); + goto done; + } + } + + nv_system_power_state = power_state; + +done: + up(&nv_system_power_state_lock); + + return status; +} + +int nv_pmops_suspend( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_suspend(dev, NV_PM_ACTION_STANDBY, NV_FALSE); + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_resume( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_resume(dev, NV_PM_ACTION_RESUME); + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_freeze( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_suspend(dev, NV_PM_ACTION_HIBERNATE, NV_FALSE); + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_thaw( + struct device *dev +) +{ + return 0; +} + +int nv_pmops_restore( + struct device *dev +) +{ + NV_STATUS status; + + status = nvidia_resume(dev, NV_PM_ACTION_RESUME); + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_poweroff( + struct device *dev +) +{ + return 0; +} + +static int +nvidia_transition_dynamic_power( + struct device *dev, + NvBool enter +) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + nv_linux_state_t *nvl = pci_get_drvdata(pci_dev); + nv_state_t *nv = NV_STATE_PTR(nvl); + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if ((nv->flags & (NV_FLAG_OPEN | NV_FLAG_PERSISTENT_SW_STATE)) == 0) + { + return 0; + } + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return -ENOMEM; + } + + status = rm_transition_dynamic_power(sp, nv, enter); + + nv_kmem_cache_free_stack(sp); + + return (status == NV_OK) ? 0 : -EIO; +} + +int nv_pmops_runtime_suspend( + struct device *dev +) +{ + return nvidia_transition_dynamic_power(dev, NV_TRUE); +} + +int nv_pmops_runtime_resume( + struct device *dev +) +{ + return nvidia_transition_dynamic_power(dev, NV_FALSE); +} +#endif /* defined(CONFIG_PM) */ + +nv_state_t* NV_API_CALL nv_get_adapter_state( + NvU32 domain, + NvU8 bus, + NvU8 slot +) +{ + nv_linux_state_t *nvl; + + LOCK_NV_LINUX_DEVICES(); + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + if (nv->pci_info.domain == domain && nv->pci_info.bus == bus + && nv->pci_info.slot == slot) + { + UNLOCK_NV_LINUX_DEVICES(); + return nv; + } + } + UNLOCK_NV_LINUX_DEVICES(); + + return NULL; +} + +nv_state_t* NV_API_CALL nv_get_ctl_state(void) +{ + return NV_STATE_PTR(&nv_ctl_device); +} + +NV_STATUS NV_API_CALL nv_log_error( + nv_state_t *nv, + NvU32 error_number, + const char *format, + va_list ap +) +{ + NV_STATUS status = NV_OK; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + nv_report_error(nvl->pci_dev, error_number, format, ap); +#if defined(CONFIG_CRAY_XT) + status = nvos_forward_error_to_cray(nvl->pci_dev, error_number, + format, ap); +#endif + + return status; +} + +NvU64 NV_API_CALL nv_get_dma_start_address( + nv_state_t *nv +) +{ +#if defined(NVCPU_PPC64LE) + struct pci_dev *pci_dev; + dma_addr_t dma_addr; + NvU64 saved_dma_mask; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + /* + * If TCE bypass is disabled via a module parameter, then just return + * the default (which is 0). + * + * Otherwise, the DMA start address only needs to be set once, and it + * won't change afterward. Just return the cached value if asked again, + * to avoid the kernel printing redundant messages to the kernel + * log when we call pci_set_dma_mask(). + */ + if ((nv_tce_bypass_mode == NV_TCE_BYPASS_MODE_DISABLE) || + (nvl->tce_bypass_enabled)) + { + return nvl->dma_dev.addressable_range.start; + } + + pci_dev = nvl->pci_dev; + + /* + * Linux on IBM POWER8 offers 2 different DMA set-ups, sometimes + * referred to as "windows". + * + * The "default window" provides a 2GB region of PCI address space + * located below the 32-bit line. The IOMMU is used to provide a + * "rich" mapping--any page in system memory can be mapped at an + * arbitrary address within this window. The mappings are dynamic + * and pass in and out of being as pci_map*()/pci_unmap*() calls + * are made. + * + * Dynamic DMA Windows (sometimes "Huge DDW") provides a linear + * mapping of the system's entire physical address space at some + * fixed offset above the 59-bit line. IOMMU is still used, and + * pci_map*()/pci_unmap*() are still required, but mappings are + * static. They're effectively set up in advance, and any given + * system page will always map to the same PCI bus address. I.e. + * physical 0x00000000xxxxxxxx => PCI 0x08000000xxxxxxxx + * + * This driver does not support the 2G default window because + * of its limited size, and for reasons having to do with UVM. + * + * Linux on POWER8 will only provide the DDW-style full linear + * mapping when the driver claims support for 64-bit DMA addressing + * (a pre-requisite because the PCI addresses used in this case will + * be near the top of the 64-bit range). The linear mapping + * is not available in all system configurations. + * + * Detect whether the linear mapping is present by claiming + * 64-bit support and then mapping physical page 0. For historical + * reasons, Linux on POWER8 will never map a page to PCI address 0x0. + * In the "default window" case page 0 will be mapped to some + * non-zero address below the 32-bit line. In the + * DDW/linear-mapping case, it will be mapped to address 0 plus + * some high-order offset. + * + * If the linear mapping is present and sane then return the offset + * as the starting address for all DMA mappings. + */ + saved_dma_mask = pci_dev->dma_mask; + if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64)) != 0) + { + goto done; + } + + dma_addr = pci_map_single(pci_dev, NULL, 1, DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(pci_dev, dma_addr)) + { + pci_set_dma_mask(pci_dev, saved_dma_mask); + goto done; + } + + pci_unmap_single(pci_dev, dma_addr, 1, DMA_BIDIRECTIONAL); + + /* + * From IBM: "For IODA2, native DMA bypass or KVM TCE-based implementation + * of full 64-bit DMA support will establish a window in address-space + * with the high 14 bits being constant and the bottom up-to-50 bits + * varying with the mapping." + * + * Unfortunately, we don't have any good interfaces or definitions from + * the kernel to get information about the DMA offset assigned by OS. + * However, we have been told that the offset will be defined by the top + * 14 bits of the address, and bits 40-49 will not vary for any DMA + * mappings until 1TB of system memory is surpassed; this limitation is + * essential for us to function properly since our current GPUs only + * support 40 physical address bits. We are in a fragile place where we + * need to tell the OS that we're capable of 64-bit addressing, while + * relying on the assumption that the top 24 bits will not vary in this + * case. + * + * The way we try to compute the window, then, is mask the trial mapping + * against the DMA capabilities of the device. That way, devices with + * greater addressing capabilities will only take the bits it needs to + * define the window. + */ + if ((dma_addr & DMA_BIT_MASK(32)) != 0) + { + /* + * Huge DDW not available - page 0 mapped to non-zero address below + * the 32-bit line. + */ + nv_printf(NV_DBG_WARNINGS, + "NVRM: DMA window limited by platform\n"); + pci_set_dma_mask(pci_dev, saved_dma_mask); + goto done; + } + else if ((dma_addr & saved_dma_mask) != 0) + { + NvU64 memory_size = os_get_num_phys_pages() * PAGE_SIZE; + if ((dma_addr & ~saved_dma_mask) != + ((dma_addr + memory_size) & ~saved_dma_mask)) + { + /* + * The physical window straddles our addressing limit boundary, + * e.g., for an adapter that can address up to 1TB, the window + * crosses the 40-bit limit so that the lower end of the range + * has different bits 63:40 than the higher end of the range. + * We can only handle a single, static value for bits 63:40, so + * we must fall back here. + */ + nv_printf(NV_DBG_WARNINGS, + "NVRM: DMA window limited by memory size\n"); + pci_set_dma_mask(pci_dev, saved_dma_mask); + goto done; + } + } + + nvl->tce_bypass_enabled = NV_TRUE; + nvl->dma_dev.addressable_range.start = dma_addr & ~(saved_dma_mask); + + /* Update the coherent mask to match */ + dma_set_coherent_mask(&pci_dev->dev, pci_dev->dma_mask); + +done: + return nvl->dma_dev.addressable_range.start; +#else + return 0; +#endif +} + +NV_STATUS NV_API_CALL nv_set_primary_vga_status( + nv_state_t *nv +) +{ + /* IORESOURCE_ROM_SHADOW wasn't added until 2.6.10 */ +#if defined(IORESOURCE_ROM_SHADOW) + nv_linux_state_t *nvl; + struct pci_dev *pci_dev; + + nvl = NV_GET_NVL_FROM_NV_STATE(nv); + pci_dev = nvl->pci_dev; + + nv->primary_vga = ((NV_PCI_RESOURCE_FLAGS(pci_dev, PCI_ROM_RESOURCE) & + IORESOURCE_ROM_SHADOW) == IORESOURCE_ROM_SHADOW); + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL nv_pci_trigger_recovery( + nv_state_t *nv +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; +#if defined(NV_PCI_ERROR_RECOVERY) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + /* + * Calling readl() on PPC64LE will allow the kernel to check its state for + * the device and update it accordingly. This needs to be done before + * checking if the PCI channel is offline, so that we don't check stale + * state. + * + * This will also kick off the recovery process for the device. + */ + if (NV_PCI_ERROR_RECOVERY_ENABLED()) + { + if (readl(nv->regs->map) == 0xFFFFFFFF) + { + if (pci_channel_offline(nvl->pci_dev)) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "PCI channel for the device is offline\n"); + status = NV_OK; + } + } + } +#endif + return status; +} + +NvBool NV_API_CALL nv_requires_dma_remap( + nv_state_t *nv +) +{ + NvBool dma_remap = NV_FALSE; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + dma_remap = !nv_dma_maps_swiotlb(nvl->dev); + return dma_remap; +} + +/* + * Intended for use by external kernel modules to list nvidia gpu ids. + */ +NvBool nvidia_get_gpuid_list(NvU32 *gpu_ids, NvU32 *gpu_count) +{ + nv_linux_state_t *nvl; + unsigned int count; + NvBool ret = NV_TRUE; + + LOCK_NV_LINUX_DEVICES(); + + count = 0; + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + count++; + + if (*gpu_count == 0) + { + goto done; + } + else if ((*gpu_count) < count) + { + ret = NV_FALSE; + goto done; + } + + count = 0; + for (nvl = nv_linux_devices; nvl != NULL; nvl = nvl->next) + { + nv_state_t *nv = NV_STATE_PTR(nvl); + gpu_ids[count++] = nv->gpu_id; + } + + +done: + + *gpu_count = count; + + UNLOCK_NV_LINUX_DEVICES(); + + return ret; +} + +/* + * Kernel-level analog to nvidia_open, intended for use by external + * kernel modules. This increments the ref count of the device with + * the given gpu_id and makes sure the device has been initialized. + * + * Clients of this interface are counted by the RM reset path, to ensure a + * GPU is not reset while the GPU is active. + * + * Returns -ENODEV if the given gpu_id does not exist. + */ +int nvidia_dev_get(NvU32 gpu_id, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl; + int rc; + + /* Takes nvl->ldata_lock */ + nvl = find_gpu_id(gpu_id); + if (!nvl) + return -ENODEV; + + rc = nv_open_device(NV_STATE_PTR(nvl), sp); + + if (rc == 0) + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_TRUE) != NV_OK); + + up(&nvl->ldata_lock); + return rc; +} + +/* + * Kernel-level analog to nvidia_close, intended for use by external + * kernel modules. This decrements the ref count of the device with + * the given gpu_id, potentially tearing it down. + */ +void nvidia_dev_put(NvU32 gpu_id, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl; + + /* Takes nvl->ldata_lock */ + nvl = find_gpu_id(gpu_id); + if (!nvl) + return; + + nv_close_device(NV_STATE_PTR(nvl), sp); + + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_FALSE) != NV_OK); + + up(&nvl->ldata_lock); +} + +/* + * Like nvidia_dev_get but uses UUID instead of gpu_id. Note that this may + * trigger initialization and teardown of unrelated devices to look up their + * UUIDs. + * + * Clients of this interface are counted by the RM reset path, to ensure a + * GPU is not reset while the GPU is active. + */ +int nvidia_dev_get_uuid(const NvU8 *uuid, nvidia_stack_t *sp) +{ + nv_state_t *nv = NULL; + nv_linux_state_t *nvl = NULL; + const NvU8 *dev_uuid; + int rc = 0; + + /* Takes nvl->ldata_lock */ + nvl = find_uuid_candidate(uuid); + while (nvl) + { + nv = NV_STATE_PTR(nvl); + + /* + * If the device is missing its UUID, this call exists solely so + * rm_get_gpu_uuid_raw will be called and we can inspect the UUID. + */ + rc = nv_open_device(nv, sp); + if (rc != 0) + goto out; + + /* The UUID should always be present following nv_open_device */ + dev_uuid = nv_get_cached_uuid(nv); + WARN_ON(!dev_uuid); + if (dev_uuid && memcmp(dev_uuid, uuid, GPU_UUID_LEN) == 0) + break; + + /* No match, try again. */ + nv_close_device(nv, sp); + up(&nvl->ldata_lock); + nvl = find_uuid_candidate(uuid); + } + + if (nvl) + { + rc = 0; + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_TRUE) != NV_OK); + } + else + rc = -ENODEV; + +out: + if (nvl) + up(&nvl->ldata_lock); + return rc; +} + +/* + * Like nvidia_dev_put but uses UUID instead of gpu_id. + */ +void nvidia_dev_put_uuid(const NvU8 *uuid, nvidia_stack_t *sp) +{ + nv_linux_state_t *nvl; + + /* Callers must already have called nvidia_dev_get_uuid() */ + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return; + + nv_close_device(NV_STATE_PTR(nvl), sp); + + WARN_ON(rm_set_external_kernel_client_count(sp, NV_STATE_PTR(nvl), NV_FALSE) != NV_OK); + + up(&nvl->ldata_lock); +} + +int nvidia_dev_block_gc6(const NvU8 *uuid, nvidia_stack_t *sp) + +{ + nv_linux_state_t *nvl; + + /* Callers must already have called nvidia_dev_get_uuid() */ + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return -ENODEV; + + if (rm_ref_dynamic_power(sp, NV_STATE_PTR(nvl), NV_DYNAMIC_PM_FINE) != NV_OK) + { + up(&nvl->ldata_lock); + return -EINVAL; + } + + up(&nvl->ldata_lock); + + return 0; +} + +int nvidia_dev_unblock_gc6(const NvU8 *uuid, nvidia_stack_t *sp) + +{ + nv_linux_state_t *nvl; + + /* Callers must already have called nvidia_dev_get_uuid() */ + + /* Takes nvl->ldata_lock */ + nvl = find_uuid(uuid); + if (!nvl) + return -ENODEV; + + rm_unref_dynamic_power(sp, NV_STATE_PTR(nvl), NV_DYNAMIC_PM_FINE); + + up(&nvl->ldata_lock); + + return 0; +} + +NV_STATUS NV_API_CALL nv_get_device_memory_config( + nv_state_t *nv, + NvU64 *compr_addr_sys_phys, + NvU64 *addr_guest_phys, + NvU32 *addr_width, + NvU32 *granularity, + NvS32 *node_id +) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + if (!nv_platform_supports_numa(nvl)) + { + return NV_ERR_NOT_SUPPORTED; + } + +#if defined(NVCPU_PPC64LE) + nv_npu_numa_info_t *numa_info; + + numa_info = &nvl->npu->numa_info; + + if (node_id != NULL) + { + *node_id = nvl->numa_info.node_id; + } + + if (compr_addr_sys_phys != NULL) + { + *compr_addr_sys_phys = + numa_info->compr_sys_phys_addr; + } + + if (addr_guest_phys != NULL) + { + *addr_guest_phys = + numa_info->guest_phys_addr; + } + + if (addr_width != NULL) + { + *addr_width = nv_volta_dma_addr_size - nv_volta_addr_space_width; + } + + if (granularity != NULL) + { + *granularity = nv_volta_addr_space_width; + } + + status = NV_OK; +#endif + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + return status; +} + +#if defined(NVCPU_PPC64LE) + +NV_STATUS NV_API_CALL nv_get_nvlink_line_rate( + nv_state_t *nvState, + NvU32 *linerate +) +{ +#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT) && defined(NV_OF_GET_PROPERTY_PRESENT) + + nv_linux_state_t *nvl; + struct pci_dev *npuDev; + NvU32 *pSpeedPtr = NULL; + NvU32 speed; + int len; + + if (nvState != NULL) + nvl = NV_GET_NVL_FROM_NV_STATE(nvState); + else + return NV_ERR_INVALID_ARGUMENT; + + if (!nvl->npu) + { + return NV_ERR_NOT_SUPPORTED; + } + + npuDev = nvl->npu->devs[0]; + if (!npuDev->dev.of_node) + { + nv_printf(NV_DBG_ERRORS, "NVRM: %s: OF Node not found in IBM-NPU device node\n", + __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + pSpeedPtr = (NvU32 *) of_get_property(npuDev->dev.of_node, "ibm,nvlink-speed", &len); + + if (pSpeedPtr) + { + speed = (NvU32) be32_to_cpup(pSpeedPtr); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + + if (!speed) + { + return NV_ERR_NOT_SUPPORTED; + } + else + { + *linerate = speed; + } + + return NV_OK; + +#endif + + return NV_ERR_NOT_SUPPORTED; +} + +#endif + +NV_STATUS NV_API_CALL nv_indicate_idle( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + struct file *file = nvl->sysfs_config_file; + loff_t f_pos = 0; + char buf; + + pm_runtime_put_noidle(dev); + +#if defined(NV_SEQ_READ_ITER_PRESENT) + { + struct kernfs_open_file *of = ((struct seq_file *)file->private_data)->private; + struct kernfs_node *kn; + + mutex_lock(&of->mutex); + kn = of->kn; + if (kn != NULL && atomic_inc_unless_negative(&kn->active)) + { + if ((kn->attr.ops != NULL) && (kn->attr.ops->read != NULL)) + { + kn->attr.ops->read(of, &buf, 1, f_pos); + } + atomic_dec(&kn->active); + } + mutex_unlock(&of->mutex); + } +#else +#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG) + kernel_read(file, &buf, 1, &f_pos); +#else + kernel_read(file, f_pos, &buf, 1); +#endif +#endif + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL nv_indicate_not_idle( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + pm_runtime_get_noresume(dev); + + nvl->is_forced_shutdown = NV_TRUE; + pci_bus_type.shutdown(dev); + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL nv_idle_holdoff( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + pm_runtime_get_noresume(dev); +#endif +} + +NvBool NV_API_CALL nv_dynamic_power_available( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + return nvl->sysfs_config_file != NULL; +#else + return NV_FALSE; +#endif +} + +/* caller should hold nv_linux_devices_lock using LOCK_NV_LINUX_DEVICES */ +void nv_linux_add_device_locked(nv_linux_state_t *nvl) +{ + if (nv_linux_devices == NULL) { + nv_linux_devices = nvl; + } + else + { + nv_linux_state_t *tnvl; + for (tnvl = nv_linux_devices; tnvl->next != NULL; tnvl = tnvl->next); + tnvl->next = nvl; + } +} + +/* caller should hold nv_linux_devices_lock using LOCK_NV_LINUX_DEVICES */ +void nv_linux_remove_device_locked(nv_linux_state_t *nvl) +{ + if (nvl == nv_linux_devices) { + nv_linux_devices = nvl->next; + } + else + { + nv_linux_state_t *tnvl; + for (tnvl = nv_linux_devices; tnvl->next != nvl; tnvl = tnvl->next); + tnvl->next = nvl->next; + } +} + +void NV_API_CALL nv_control_soc_irqs(nv_state_t *nv, NvBool bEnable) +{ + int count; + + if (bEnable) + { + for (count = 0; count < nv->num_soc_irqs; count++) + { + nv->soc_irq_info[count].bh_pending = NV_FALSE; + nv->current_soc_irq = -1; + enable_irq(nv->soc_irq_info[count].irq_num); + } + } + else + { + for (count = 0; count < nv->num_soc_irqs; count++) + { + disable_irq_nosync(nv->soc_irq_info[count].irq_num); + } + } +} + +NvU32 NV_API_CALL nv_get_dev_minor(nv_state_t *nv) +{ + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + + return nvl->minor_num; +} + +NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap(int fd, int *duped_fd) +{ + + *duped_fd = nvlink_cap_acquire(fd, NVLINK_CAP_FABRIC_MANAGEMENT); + if (*duped_fd < 0) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; + + + +} + +/* + * Wakes up the NVIDIA GPU HDA codec and contoller by reading + * codec proc file. + */ +void NV_API_CALL nv_audio_dynamic_power( + nv_state_t *nv +) +{ +/* + * The runtime power management for nvidia HDA controller can be possible + * after commit 07f4f97d7b4b ("vga_switcheroo: Use device link for HDA + * controller"). This commit has also moved 'PCI_CLASS_MULTIMEDIA_HD_AUDIO' + * macro from to . + * If 'NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT' is not defined, then + * this function will be stub function. + * + * Also, check if runtime PM is enabled in the kernel (with + * 'NV_PM_RUNTIME_AVAILABLE') and stub this function if it is disabled. This + * function uses kernel fields only present when the kconfig has runtime PM + * enabled. + */ +#if defined(NV_PCI_CLASS_MULTIMEDIA_HD_AUDIO_PRESENT) && defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + struct pci_dev *audio_pci_dev, *pci_dev; + struct snd_card *card; + + if (!nv_dev_is_pci(dev)) + return; + + pci_dev = to_pci_dev(dev); + + audio_pci_dev = os_pci_init_handle(NV_PCI_DOMAIN_NUMBER(pci_dev), + NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), + 1, NULL, NULL); + + if (audio_pci_dev == NULL) + return; + + /* + * Check if HDA controller is in pm suspended state. The HDA contoller + * can not be runtime resumed if this API is called during system + * suspend/resume time and HDA controller is in pm suspended state. + */ + if (audio_pci_dev->dev.power.is_suspended) + return; + + card = pci_get_drvdata(audio_pci_dev); + if (card == NULL) + return; + + /* + * Commit be57bfffb7b5 ("ALSA: hda: move hda_codec.h to include/sound") + * in v4.20-rc1 moved "hda_codec.h" header file from the private sound + * folder to include/sound. + */ +#if defined(NV_SOUND_HDA_CODEC_H_PRESENT) + { + struct list_head *p; + struct hda_codec *codec = NULL; + unsigned int cmd, res; + + /* + * Traverse the list of devices which the sound card maintains and + * search for HDA codec controller. + */ + list_for_each_prev(p, &card->devices) + { + struct snd_device *pdev = list_entry(p, struct snd_device, list); + + if (pdev->type == SNDRV_DEV_CODEC) + { + codec = pdev->device_data; + + /* + * NVIDIA HDA codec controller uses linux kernel HDA codec + * driver. Commit 05852448690d ("ALSA: hda - Support indirect + * execution of verbs") added support for overriding exec_verb. + * This codec->core.exec_verb will be codec_exec_verb() for + * NVIDIA HDA codec driver. + */ + if (codec->core.exec_verb == NULL) + { + return; + } + + break; + } + } + + if (codec == NULL) + { + return; + } + + /* If HDA codec controller is already runtime active, then return */ + if (snd_hdac_is_power_on(&codec->core)) + { + return; + } + + /* + * Encode codec verb for getting vendor ID from root node. + * Refer Intel High Definition Audio Specification for more details. + */ + cmd = (codec->addr << 28) | (AC_NODE_ROOT << 20) | + (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; + + /* + * It will internally increment the runtime PM refcount, + * wake-up the audio codec controller and send the HW + * command for getting vendor ID. Once the vendor ID will be + * returned back, then it will decrement the runtime PM refcount + * and runtime suspend audio codec controller again (If refcount is + * zero) once auto suspend counter expires. + */ + codec->core.exec_verb(&codec->core, cmd, 0, &res); + } +#else + { + int codec_addr; + + /* + * The filp_open() call below depends on the current task's fs_struct + * (current->fs), which may already be NULL if this is called during + * process teardown. + */ + if (current->fs == NULL) + return; + + /* If device is runtime active, then return */ + if (audio_pci_dev->dev.power.runtime_status == RPM_ACTIVE) + return; + + for (codec_addr = 0; codec_addr < NV_HDA_MAX_CODECS; codec_addr++) + { + char filename[48]; + NvU8 buf; + int ret; + + ret = snprintf(filename, sizeof(filename), + "/proc/asound/card%d/codec#%d", + card->number, codec_addr); + + if (ret > 0 && ret < sizeof(filename) && + (os_open_and_read_file(filename, &buf, 1) == NV_OK)) + { + break; + } + } + } +#endif +#endif +} + +static int nv_match_dev_state(const void *data, struct file *filp, unsigned fd) +{ + nv_linux_state_t *nvl = NULL; + dev_t rdev = 0; + + if (filp == NULL || + filp->private_data == NULL || + NV_FILE_INODE(filp) == NULL) + return 0; + + rdev = (NV_FILE_INODE(filp))->i_rdev; + if (MAJOR(rdev) != NV_MAJOR_DEVICE_NUMBER) + return 0; + + nvl = NV_GET_NVL_FROM_FILEP(filp); + if (nvl == NULL) + return 0; + + return (data == nvl); +} + +NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *nv) +{ + struct files_struct *files = current->files; + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + +#ifdef NV_ITERATE_FD_PRESENT + return !!iterate_fd(files, 0, nv_match_dev_state, nvl); +#else + struct fdtable *fdtable; + int ret_val = 0; + int fd = 0; + + if (files == NULL) + return 0; + + spin_lock(&files->file_lock); + + for (fdtable = files_fdtable(files); fd < fdtable->max_fds; fd++) + { + struct file *filp; + +#ifdef READ_ONCE + filp = READ_ONCE(fdtable->fd[fd]); +#else + filp = ACCESS_ONCE(fdtable->fd[fd]); + smp_read_barrier_depends(); +#endif + if (filp == NULL) + continue; + + ret_val = nv_match_dev_state(nvl, filp, fd); + if (ret_val) + break; + } + + spin_unlock(&files->file_lock); + + return !!ret_val; +#endif +} + + +NvBool NV_API_CALL nv_platform_supports_s0ix(void) +{ +#if defined(CONFIG_ACPI) + return (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) != 0; +#else + return NV_FALSE; +#endif +} + +NvBool NV_API_CALL nv_s2idle_pm_configured(void) +{ + NvU8 buf[8]; + +#if defined(NV_SEQ_READ_ITER_PRESENT) + struct file *file; + ssize_t num_read; + struct kiocb kiocb; + struct iov_iter iter; + struct kvec iov = { + .iov_base = &buf, + .iov_len = sizeof(buf), + }; + + if (os_open_readonly_file("/sys/power/mem_sleep", (void **)&file) != NV_OK) + { + return NV_FALSE; + } + + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = 0; + iov_iter_kvec(&iter, READ, &iov, 1, sizeof(buf)); + + num_read = seq_read_iter(&kiocb, &iter); + + os_close_file((void *)file); + + if (num_read != sizeof(buf)) + { + return NV_FALSE; + } +#else + if (os_open_and_read_file("/sys/power/mem_sleep", buf, + sizeof(buf)) != NV_OK) + { + return NV_FALSE; + } +#endif + + return (memcmp(buf, "[s2idle]", 8) == 0); +} + + +/* + * Function query system chassis info, to figure out if the platform is + * Laptop or Notebook. + * This function should be used when querying GPU form factor information is + * not possible via core RM or if querying both system and GPU form factor + * information is necessary. + */ +NvBool NV_API_CALL nv_is_chassis_notebook(void) +{ + const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); + + // + // Return true only for Laptop & Notebook + // As per SMBIOS spec Laptop = 9 and Notebook = 10 + // + return (chassis_type && (!strcmp(chassis_type, "9") || !strcmp(chassis_type, "10"))); +} + +void NV_API_CALL nv_allow_runtime_suspend +( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.runtime_auto == false) + { + dev->power.runtime_auto = true; + atomic_add_unless(&dev->power.usage_count, -1, 0); + } + + spin_unlock_irq(&dev->power.lock); +#endif +} + +void NV_API_CALL nv_disallow_runtime_suspend +( + nv_state_t *nv +) +{ +#if defined(NV_PM_RUNTIME_AVAILABLE) + nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv); + struct device *dev = nvl->dev; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.runtime_auto == true) + { + dev->power.runtime_auto = false; + atomic_inc(&dev->power.usage_count); + } + + spin_unlock_irq(&dev->power.lock); +#endif +} + +NvU32 NV_API_CALL nv_get_os_type(void) +{ + return OS_TYPE_LINUX; +} + +void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size) +{ +#if NVCPU_IS_PPC64LE + return nv_ibmnpu_cache_flush_range(nv, cpu_virtual, size); +#elif NVCPU_IS_AARCH64 + + NvU64 va, cbsize; + NvU64 end_cpu_virtual = cpu_virtual + size; + + nv_printf(NV_DBG_INFO, + "Flushing CPU virtual range [0x%llx, 0x%llx)\n", + cpu_virtual, end_cpu_virtual); + + cbsize = cache_line_size(); + // Align address to line size + cpu_virtual = NV_ALIGN_UP(cpu_virtual, cbsize); + + // Force eviction of any cache lines from the NUMA-onlined region. + for (va = cpu_virtual; va < end_cpu_virtual; va += cbsize) + { + asm volatile("dc civac, %0" : : "r" (va): "memory"); + // Reschedule if necessary to avoid lockup warnings + cond_resched(); + } + asm volatile("dsb sy" : : : "memory"); + +#endif +} + +static struct resource *nv_next_resource(struct resource *p) +{ + if (p->child != NULL) + return p->child; + + while ((p->sibling == NULL) && (p->parent != NULL)) + p = p->parent; + + return p->sibling; +} + +/* + * Function to get the correct PCI Bus memory window which can be mapped + * in the real mode emulator (emu). + * The function gets called during the initialization of the emu before + * remapping it to OS. + */ +void NV_API_CALL nv_get_updated_emu_seg( + NvU32 *start, + NvU32 *end +) +{ + struct resource *p; + + if (*start >= *end) + return; + + for (p = iomem_resource.child; (p != NULL); p = nv_next_resource(p)) + { + /* If we passed the resource we are looking for, stop */ + if (p->start > *end) + { + p = NULL; + break; + } + + /* Skip until we find a range that matches what we look for */ + if (p->end < *start) + continue; + + if ((p->end > *end) && (p->child)) + continue; + + if ((p->flags & IORESOURCE_MEM) != IORESOURCE_MEM) + continue; + + /* Found a match, break */ + break; + } + + if (p != NULL) + { + *start = max((resource_size_t)*start, p->start); + *end = min((resource_size_t)*end, p->end); + } +} diff --git a/kernel-open/nvidia/nv_gpu_ops.h b/kernel-open/nvidia/nv_gpu_ops.h new file mode 100644 index 000000000..6c09dda74 --- /dev/null +++ b/kernel-open/nvidia/nv_gpu_ops.h @@ -0,0 +1,301 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + * nv_gpu_ops.h + * + * This file defines the interface between the common RM layer + * and the OS specific platform layers. (Currently supported + * are Linux and KMD) + * + */ + +#ifndef _NV_GPU_OPS_H_ +#define _NV_GPU_OPS_H_ +#include "nvgputypes.h" +#include "nv_uvm_types.h" + +typedef struct gpuSession *gpuSessionHandle; +typedef struct gpuDevice *gpuDeviceHandle; +typedef struct gpuAddressSpace *gpuAddressSpaceHandle; +typedef struct gpuChannel *gpuChannelHandle; +typedef struct gpuObject *gpuObjectHandle; + +typedef struct gpuRetainedChannel_struct gpuRetainedChannel; + +NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session); + +NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session); + +NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session, + const gpuInfo *pGpuInfo, + const NvProcessorUuid *gpuGuid, + struct gpuDevice **device, + NvBool bCreateSmcPartition); + +NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device); + +NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device, + NvU64 vaBase, + NvU64 vaSize, + gpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1, + gpuDeviceHandle device2, + getP2PCapsParams *p2pCaps); + +void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace); + +NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace, + NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo); + +NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace, + NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo); + +NV_STATUS nvGpuOpsPmaAllocPages(void *pPma, + NvLength pageCount, + NvU32 pageSize, + gpuPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages); + +void nvGpuOpsPmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags); + +NV_STATUS nvGpuOpsPmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags); + +NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize); + +NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace, + const gpuChannelAllocParams *params, + gpuChannelHandle *channelHandle, + gpuChannelInfo *channelInfo); + +NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace, + NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset); + +void nvGpuOpsChannelDestroy(struct gpuChannel *channel); + +void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace, + NvU64 pointer); + +NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace, + NvU64 memory, NvLength length, + void **cpuPtr, NvU32 pageSize); + +void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace, + void* cpuPtr); + +NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device, + gpuCaps *caps); + +NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device, + gpuCesCaps *caps); + +NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuAddressSpace *dstVaSpace, + NvU64 *dstAddress); + +NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + gpuMemoryInfo *pGpuMemoryInfo); + +NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice, + NvHandle hSubDevice, NvU8 *gpuGuid, + unsigned guidLength); + +NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid, + const NvU8 *gpuUuid, + NvHandle *hClient, + NvHandle *hDevice, + NvHandle *hSubDevice); + +NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device, + NvHandle hPhysHandle); + +NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus); + +NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid, + const gpuClientInfo *pGpuClientInfo, + gpuInfo *pGpuInfo); + +NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId, + NvU32 *pSubdeviceId); + +NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts); + +NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device); + +NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet); + +NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid); + +NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace); + +NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt); + +NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace); + +NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo); + +NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo); + +NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo); + +NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device, + gpuFaultInfo *pFaultInfo); + +NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults); + +NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults); + +NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device, + NvHandle hUserClient, + NvHandle hUserVASpace, + struct gpuAddressSpace **vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device, + void **pPma, + const UvmPmaStatistics **pPmaPubStats); + +NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session, + gpuAccessCntrInfo *pAccessCntrInfo, + NvBool bOwnInterrupts); + +NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo, + gpuAccessCntrConfig *pAccessCntrConfig); + +NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1, + struct gpuDevice *device2, + NvHandle *hP2pObject); + +NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session, + NvHandle hP2pObject); + +NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace, + NvHandle hDupedMemory, + NvU64 offset, + NvU64 size, + gpuExternalMappingInfo *pGpuExternalMappingInfo); + +NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace, + NvHandle hClient, + NvHandle hChannel, + gpuRetainedChannel **retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo); + +void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel); + +NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel, + gpuChannelResourceBindParams *channelResourceBindParams); + +void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate); + +NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + gpuExternalMappingInfo *pGpuExternalMappingInfo); + +NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device, + const void *pFaultPacket); + +// Private interface used for windows only + +#if defined(NV_WINDOWS) +NV_STATUS nvGpuOpsGetRmHandleForSession(gpuSessionHandle hSession, NvHandle *hRmClient); + +NV_STATUS nvGpuOpsGetRmHandleForChannel(gpuChannelHandle hChannel, NvHandle *hRmChannel); +#endif // WINDOWS + +// Interface used for SR-IOV heavy + +NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device, + const gpuPagingChannelAllocParams *params, + gpuPagingChannelHandle *channelHandle, + gpuPagingChannelInfo *channelinfo); + +void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel); + +NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuDevice *device, + NvU64 *dstAddress); + +void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuDevice *device); + +NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel, + char *methodStream, + NvU32 methodStreamSize); + + + + + + + + + + + + + + + + + + + + + + + + +#endif /* _NV_GPU_OPS_H_*/ diff --git a/kernel-open/nvidia/nv_uvm_interface.c b/kernel-open/nvidia/nv_uvm_interface.c new file mode 100644 index 000000000..7e895a959 --- /dev/null +++ b/kernel-open/nvidia/nv_uvm_interface.c @@ -0,0 +1,1544 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file sets up the communication between the UVM driver and RM. RM will + * call the UVM driver providing to it the set of OPS it supports. UVM will + * then return by filling out the structure with the callbacks it supports. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#if defined(NV_UVM_ENABLE) + +#include "nv_uvm_interface.h" +#include "nv_gpu_ops.h" +#include "rm-gpu-ops.h" + +// This is really a struct UvmOpsUvmEvents *. It needs to be an atomic because +// it can be read outside of the g_pNvUvmEventsLock. Use getUvmEvents and +// setUvmEvents to access it. +static atomic_long_t g_pNvUvmEvents; +static struct semaphore g_pNvUvmEventsLock; + +static struct UvmOpsUvmEvents *getUvmEvents(void) +{ + return (struct UvmOpsUvmEvents *)atomic_long_read(&g_pNvUvmEvents); +} + +static void setUvmEvents(struct UvmOpsUvmEvents *newEvents) +{ + atomic_long_set(&g_pNvUvmEvents, (long)newEvents); +} + +static nvidia_stack_t *g_sp; +static struct semaphore g_spLock; + +// Use these to test g_sp usage. When DEBUG_GLOBAL_STACK, one out of every +// DEBUG_GLOBAL_STACK_THRESHOLD calls to nvUvmGetSafeStack will use g_sp. +#define DEBUG_GLOBAL_STACK 0 +#define DEBUG_GLOBAL_STACK_THRESHOLD 2 + +static atomic_t g_debugGlobalStackCount = ATOMIC_INIT(0); + +// Called at module load, not by an external client +int nv_uvm_init(void) +{ + int rc = nv_kmem_cache_alloc_stack(&g_sp); + if (rc != 0) + return rc; + + NV_INIT_MUTEX(&g_spLock); + NV_INIT_MUTEX(&g_pNvUvmEventsLock); + return 0; +} + +void nv_uvm_exit(void) +{ + // If this fires, the dependent driver never unregistered its callbacks with + // us before going away, leaving us potentially making callbacks to garbage + // memory. + WARN_ON(getUvmEvents() != NULL); + + nv_kmem_cache_free_stack(g_sp); +} + + +// Testing code to force use of the global stack every now and then +static NvBool forceGlobalStack(void) +{ + // Make sure that we do not try to allocate memory in interrupt or atomic + // context + if (DEBUG_GLOBAL_STACK || !NV_MAY_SLEEP()) + { + if ((atomic_inc_return(&g_debugGlobalStackCount) % + DEBUG_GLOBAL_STACK_THRESHOLD) == 0) + return NV_TRUE; + } + return NV_FALSE; +} + +// Guaranteed to always return a valid stack. It first attempts to allocate one +// from the pool. If that fails, it falls back to the global pre-allocated +// stack. This fallback will serialize. +// +// This is required so paths that free resources do not themselves require +// allocation of resources. +static nvidia_stack_t *nvUvmGetSafeStack(void) +{ + nvidia_stack_t *sp; + if (forceGlobalStack() || nv_kmem_cache_alloc_stack(&sp) != 0) + { + sp = g_sp; + down(&g_spLock); + } + return sp; +} + +static void nvUvmFreeSafeStack(nvidia_stack_t *sp) +{ + if (sp == g_sp) + up(&g_spLock); + else + nv_kmem_cache_free_stack(sp); +} + +NV_STATUS nvUvmInterfaceRegisterGpu(const NvProcessorUuid *gpuUuid, UvmGpuPlatformInfo *gpuInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + int rc; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + return NV_ERR_NO_MEMORY; + + rc = nvidia_dev_get_uuid(gpuUuid->uuid, sp); + if (rc == 0) + { + rc = nvidia_dev_get_pci_info(gpuUuid->uuid, + &gpuInfo->pci_dev, + &gpuInfo->dma_addressable_start, + &gpuInfo->dma_addressable_limit); + + // Block GPU from entering GC6 while used by UVM. + if (rc == 0) + rc = nvidia_dev_block_gc6(gpuUuid->uuid, sp); + + // Avoid leaking reference on GPU if we failed. + if (rc != 0) + nvidia_dev_put_uuid(gpuUuid->uuid, sp); + } + + switch (rc) + { + case 0: + status = NV_OK; + break; + case -ENOMEM: + status = NV_ERR_NO_MEMORY; + break; + case -ENODEV: + status = NV_ERR_GPU_UUID_NOT_FOUND; + break; + default: + status = NV_ERR_GENERIC; + break; + } + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceRegisterGpu); + +void nvUvmInterfaceUnregisterGpu(const NvProcessorUuid *gpuUuid) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + nvidia_dev_unblock_gc6(gpuUuid->uuid, sp); + nvidia_dev_put_uuid(gpuUuid->uuid, sp); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceUnregisterGpu); + +NV_STATUS nvUvmInterfaceSessionCreate(uvmGpuSessionHandle *session, + UvmPlatformInfo *platformInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + memset(platformInfo, 0, sizeof(*platformInfo)); + platformInfo->atsSupported = nv_ats_supported; + + + + + + status = rm_gpu_ops_create_session(sp, (gpuSessionHandle *)session); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceSessionCreate); + +NV_STATUS nvUvmInterfaceSessionDestroy(uvmGpuSessionHandle session) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_destroy_session(sp, (gpuSessionHandle)session); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceSessionDestroy); + +NV_STATUS nvUvmInterfaceDeviceCreate(uvmGpuSessionHandle session, + const UvmGpuInfo *pGpuInfo, + const NvProcessorUuid *gpuUuid, + uvmGpuDeviceHandle *device, + NvBool bCreateSmcPartition) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_device_create(sp, + (gpuSessionHandle)session, + (const gpuInfo *)pGpuInfo, + gpuUuid, + (gpuDeviceHandle *)device, + bCreateSmcPartition); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDeviceCreate); + +void nvUvmInterfaceDeviceDestroy(uvmGpuDeviceHandle device) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_device_destroy(sp, (gpuDeviceHandle)device); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceDeviceDestroy); + +NV_STATUS nvUvmInterfaceDupAddressSpace(uvmGpuDeviceHandle device, + NvHandle hUserClient, + NvHandle hUserVASpace, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_dup_address_space(sp, + (gpuDeviceHandle)device, + hUserClient, + hUserVASpace, + (gpuAddressSpaceHandle *)vaSpace, + vaSpaceInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDupAddressSpace); + +NV_STATUS nvUvmInterfaceAddressSpaceCreate(uvmGpuDeviceHandle device, + unsigned long long vaBase, + unsigned long long vaSize, + uvmGpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_address_space_create(sp, + (gpuDeviceHandle)device, + vaBase, + vaSize, + (gpuAddressSpaceHandle *)vaSpace, + vaSpaceInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceAddressSpaceCreate); + +void nvUvmInterfaceAddressSpaceDestroy(uvmGpuAddressSpaceHandle vaSpace) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_address_space_destroy( + sp, (gpuAddressSpaceHandle)vaSpace); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceAddressSpaceDestroy); + +NV_STATUS nvUvmInterfaceMemoryAllocFB(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_memory_alloc_fb( + sp, (gpuAddressSpaceHandle)vaSpace, + length, (NvU64 *) gpuPointer, + allocInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryAllocFB); + +NV_STATUS nvUvmInterfaceMemoryAllocSys(uvmGpuAddressSpaceHandle vaSpace, + NvLength length, UvmGpuPointer * gpuPointer, + UvmGpuAllocInfo * allocInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_memory_alloc_sys( + sp, (gpuAddressSpaceHandle)vaSpace, + length, (NvU64 *) gpuPointer, + allocInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} + +EXPORT_SYMBOL(nvUvmInterfaceMemoryAllocSys); + +NV_STATUS nvUvmInterfaceGetP2PCaps(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + UvmGpuP2PCapsParams * p2pCapsParams) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_p2p_caps(sp, + (gpuDeviceHandle)device1, + (gpuDeviceHandle)device2, + p2pCapsParams); + nv_kmem_cache_free_stack(sp); + return status; +} + +EXPORT_SYMBOL(nvUvmInterfaceGetP2PCaps); + +NV_STATUS nvUvmInterfaceGetPmaObject(uvmGpuDeviceHandle device, + void **pPma, + const UvmPmaStatistics **pPmaPubStats) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_pma_object(sp, (gpuDeviceHandle)device, pPma, (const nvgpuPmaStatistics_t *)pPmaPubStats); + + nv_kmem_cache_free_stack(sp); + return status; +} + +EXPORT_SYMBOL(nvUvmInterfaceGetPmaObject); + +NV_STATUS nvUvmInterfacePmaRegisterEvictionCallbacks(void *pPma, + uvmPmaEvictPagesCallback evictPages, + uvmPmaEvictRangeCallback evictRange, + void *callbackData) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_register_callbacks(sp, pPma, evictPages, evictRange, callbackData); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaRegisterEvictionCallbacks); + +void nvUvmInterfacePmaUnregisterEvictionCallbacks(void *pPma) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_pma_unregister_callbacks(sp, pPma); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePmaUnregisterEvictionCallbacks); + +NV_STATUS nvUvmInterfacePmaAllocPages(void *pPma, + NvLength pageCount, + NvU32 pageSize, + UvmPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_alloc_pages( + sp, pPma, + pageCount, + pageSize, + (nvgpuPmaAllocationOptions_t)pPmaAllocOptions, + pPages); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaAllocPages); + +NV_STATUS nvUvmInterfacePmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_pin_pages(sp, pPma, pPages, pageCount, pageSize, flags); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaPinPages); + +NV_STATUS nvUvmInterfacePmaUnpinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_pma_unpin_pages(sp, pPma, pPages, pageCount, pageSize); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePmaUnpinPages); + +void nvUvmInterfaceMemoryFree(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_memory_free( + sp, (gpuAddressSpaceHandle)vaSpace, + (NvU64) gpuPointer); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryFree); + +void nvUvmInterfacePmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_pma_free_pages(sp, pPma, pPages, pageCount, pageSize, flags); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePmaFreePages); + +NV_STATUS nvUvmInterfaceMemoryCpuMap(uvmGpuAddressSpaceHandle vaSpace, + UvmGpuPointer gpuPointer, NvLength length, void **cpuPtr, + NvU32 pageSize) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_memory_cpu_map( + sp, (gpuAddressSpaceHandle)vaSpace, + (NvU64) gpuPointer, length, cpuPtr, pageSize); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryCpuMap); + +void nvUvmInterfaceMemoryCpuUnMap(uvmGpuAddressSpaceHandle vaSpace, + void *cpuPtr) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_memory_cpu_ummap(sp, (gpuAddressSpaceHandle)vaSpace, cpuPtr); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceMemoryCpuUnMap); + +NV_STATUS nvUvmInterfaceChannelAllocate(uvmGpuAddressSpaceHandle vaSpace, + const UvmGpuChannelAllocParams *allocParams, + uvmGpuChannelHandle *channel, + UvmGpuChannelInfo *channelInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_channel_allocate(sp, + (gpuAddressSpaceHandle)vaSpace, + allocParams, + (gpuChannelHandle *)channel, + channelInfo); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceChannelAllocate); + +void nvUvmInterfaceChannelDestroy(uvmGpuChannelHandle channel) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_channel_destroy(sp, (gpuChannelHandle)channel); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceChannelDestroy); + +NV_STATUS nvUvmInterfaceQueryCaps(uvmGpuDeviceHandle device, + UvmGpuCaps * caps) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_query_caps(sp, (gpuDeviceHandle)device, caps); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceQueryCaps); + +NV_STATUS nvUvmInterfaceQueryCopyEnginesCaps(uvmGpuDeviceHandle device, + UvmGpuCopyEnginesCaps *caps) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_query_ces_caps(sp, (gpuDeviceHandle)device, caps); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceQueryCopyEnginesCaps); + +NV_STATUS nvUvmInterfaceGetGpuInfo(const NvProcessorUuid *gpuUuid, + const UvmGpuClientInfo *pGpuClientInfo, + UvmGpuInfo *pGpuInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_gpu_info(sp, gpuUuid, pGpuClientInfo, pGpuInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetGpuInfo); + +NV_STATUS nvUvmInterfaceServiceDeviceInterruptsRM(uvmGpuDeviceHandle device) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_service_device_interrupts_rm(sp, + (gpuDeviceHandle)device); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceServiceDeviceInterruptsRM); + +NV_STATUS nvUvmInterfaceSetPageDirectory(uvmGpuAddressSpaceHandle vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_set_page_directory(sp, (gpuAddressSpaceHandle)vaSpace, + physAddress, numEntries, bVidMemAperture, pasid); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceSetPageDirectory); + +NV_STATUS nvUvmInterfaceUnsetPageDirectory(uvmGpuAddressSpaceHandle vaSpace) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = + rm_gpu_ops_unset_page_directory(sp, (gpuAddressSpaceHandle)vaSpace); + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceUnsetPageDirectory); + +NV_STATUS nvUvmInterfaceDupAllocation(uvmGpuAddressSpaceHandle srcVaSpace, + NvU64 srcAddress, + uvmGpuAddressSpaceHandle dstVaSpace, + NvU64 *dstAddress) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_dup_allocation(sp, + (gpuAddressSpaceHandle)srcVaSpace, + srcAddress, + (gpuAddressSpaceHandle)dstVaSpace, + dstAddress); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDupAllocation); + +NV_STATUS nvUvmInterfaceDupMemory(uvmGpuDeviceHandle device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + UvmGpuMemoryInfo *pGpuMemoryInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_dup_memory(sp, + (gpuDeviceHandle)device, + hClient, + hPhysMemory, + hDupMemory, + pGpuMemoryInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDupMemory); + + +NV_STATUS nvUvmInterfaceFreeDupedHandle(uvmGpuDeviceHandle device, + NvHandle hPhysHandle) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_free_duped_handle(sp, + (gpuDeviceHandle)device, + hPhysHandle); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceFreeDupedHandle); + +NV_STATUS nvUvmInterfaceGetFbInfo(uvmGpuDeviceHandle device, + UvmGpuFbInfo * fbInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_fb_info(sp, (gpuDeviceHandle)device, fbInfo); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetFbInfo); + +NV_STATUS nvUvmInterfaceGetEccInfo(uvmGpuDeviceHandle device, + UvmGpuEccInfo * eccInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_ecc_info(sp, (gpuDeviceHandle)device, eccInfo); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetEccInfo); + +NV_STATUS nvUvmInterfaceOwnPageFaultIntr(uvmGpuDeviceHandle device, NvBool bOwnInterrupts) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_own_page_fault_intr(sp, (gpuDeviceHandle)device, bOwnInterrupts); + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceOwnPageFaultIntr); + + +NV_STATUS nvUvmInterfaceInitFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_init_fault_info(sp, + (gpuDeviceHandle)device, + pFaultInfo); + + // Preallocate a stack for functions called from ISR top half + pFaultInfo->nonReplayable.isr_sp = NULL; + pFaultInfo->nonReplayable.isr_bh_sp = NULL; + if (status == NV_OK) + { + // NOTE: nv_kmem_cache_alloc_stack does not allocate a stack on PPC. + // Therefore, the pointer can be NULL on success. Always use the + // returned error code to determine if the operation was successful. + int err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_sp); + if (!err) + { + err = nv_kmem_cache_alloc_stack((nvidia_stack_t **)&pFaultInfo->nonReplayable.isr_bh_sp); + if (err) + { + nv_kmem_cache_free_stack(pFaultInfo->nonReplayable.isr_sp); + pFaultInfo->nonReplayable.isr_sp = NULL; + } + } + + if (err) + { + rm_gpu_ops_destroy_fault_info(sp, + (gpuDeviceHandle)device, + pFaultInfo); + + status = NV_ERR_NO_MEMORY; + } + } + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceInitFaultInfo); + +NV_STATUS nvUvmInterfaceInitAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_init_access_cntr_info(sp, + (gpuDeviceHandle)device, + pAccessCntrInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceInitAccessCntrInfo); + +NV_STATUS nvUvmInterfaceEnableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo, + UvmGpuAccessCntrConfig *pAccessCntrConfig) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_enable_access_cntr (sp, + (gpuDeviceHandle)device, + pAccessCntrInfo, + pAccessCntrConfig); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceEnableAccessCntr); + +NV_STATUS nvUvmInterfaceDestroyFaultInfo(uvmGpuDeviceHandle device, + UvmGpuFaultInfo *pFaultInfo) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + // Free the preallocated stack for functions called from ISR + if (pFaultInfo->nonReplayable.isr_sp != NULL) + { + nv_kmem_cache_free_stack((nvidia_stack_t *)pFaultInfo->nonReplayable.isr_sp); + pFaultInfo->nonReplayable.isr_sp = NULL; + } + + if (pFaultInfo->nonReplayable.isr_bh_sp != NULL) + { + nv_kmem_cache_free_stack((nvidia_stack_t *)pFaultInfo->nonReplayable.isr_bh_sp); + pFaultInfo->nonReplayable.isr_bh_sp = NULL; + } + + status = rm_gpu_ops_destroy_fault_info(sp, + (gpuDeviceHandle)device, + pFaultInfo); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDestroyFaultInfo); + +NV_STATUS nvUvmInterfaceHasPendingNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + NvBool *hasPendingFaults) +{ + return rm_gpu_ops_has_pending_non_replayable_faults(pFaultInfo->nonReplayable.isr_sp, + pFaultInfo, + hasPendingFaults); +} +EXPORT_SYMBOL(nvUvmInterfaceHasPendingNonReplayableFaults); + +NV_STATUS nvUvmInterfaceGetNonReplayableFaults(UvmGpuFaultInfo *pFaultInfo, + void *pFaultBuffer, + NvU32 *numFaults) +{ + return rm_gpu_ops_get_non_replayable_faults(pFaultInfo->nonReplayable.isr_bh_sp, + pFaultInfo, + pFaultBuffer, + numFaults); +} +EXPORT_SYMBOL(nvUvmInterfaceGetNonReplayableFaults); + +NV_STATUS nvUvmInterfaceDestroyAccessCntrInfo(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_destroy_access_cntr_info(sp, + (gpuDeviceHandle)device, + pAccessCntrInfo); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDestroyAccessCntrInfo); + +NV_STATUS nvUvmInterfaceDisableAccessCntr(uvmGpuDeviceHandle device, + UvmGpuAccessCntrInfo *pAccessCntrInfo) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_disable_access_cntr(sp, + (gpuDeviceHandle)device, + pAccessCntrInfo); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceDisableAccessCntr); + +// this function is called by the UVM driver to register the ops +NV_STATUS nvUvmInterfaceRegisterUvmCallbacks(struct UvmOpsUvmEvents *importedUvmOps) +{ + NV_STATUS status = NV_OK; + + if (!importedUvmOps) + { + return NV_ERR_INVALID_ARGUMENT; + } + + down(&g_pNvUvmEventsLock); + if (getUvmEvents() != NULL) + { + status = NV_ERR_IN_USE; + } + else + { + // Be careful: as soon as the pointer is assigned, top half ISRs can + // start reading it to make callbacks, even before we drop the lock. + setUvmEvents(importedUvmOps); + } + up(&g_pNvUvmEventsLock); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceRegisterUvmCallbacks); + +static void flush_top_half(void *info) +{ + // Prior top halves on this core must have completed for this callback to + // run at all, so we're done. + return; +} + +void nvUvmInterfaceDeRegisterUvmOps(void) +{ + // Taking the lock forces us to wait for non-interrupt callbacks to finish + // up. + down(&g_pNvUvmEventsLock); + setUvmEvents(NULL); + up(&g_pNvUvmEventsLock); + + // We cleared the pointer so nv_uvm_event_interrupt can't invoke any new + // top half callbacks, but prior ones could still be executing on other + // cores. We can wait for them to finish by waiting for a context switch to + // happen on every core. + // + // This is slow, but since nvUvmInterfaceDeRegisterUvmOps is very rare + // (module unload) it beats having the top half synchronize with a spin lock + // every time. + // + // Note that since we dropped the lock, another set of callbacks could have + // already been registered. That's ok, since we just need to wait for old + // ones to finish. + on_each_cpu(flush_top_half, NULL, 1); +} +EXPORT_SYMBOL(nvUvmInterfaceDeRegisterUvmOps); + +NV_STATUS nv_uvm_suspend(void) +{ + NV_STATUS status = NV_OK; + struct UvmOpsUvmEvents *events; + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if (events && events->suspend) + { + status = events->suspend(); + } + + up(&g_pNvUvmEventsLock); + + return status; +} + +NV_STATUS nv_uvm_resume(void) +{ + NV_STATUS status = NV_OK; + struct UvmOpsUvmEvents *events; + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if (events && events->resume) + { + status = events->resume(); + } + + up(&g_pNvUvmEventsLock); + + return status; +} + +void nv_uvm_notify_start_device(const NvU8 *pUuid) +{ + NvProcessorUuid uvmUuid; + struct UvmOpsUvmEvents *events; + + memcpy(uvmUuid.uuid, pUuid, UVM_UUID_LEN); + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if(events && events->startDevice) + { + events->startDevice(&uvmUuid); + } + up(&g_pNvUvmEventsLock); +} + +void nv_uvm_notify_stop_device(const NvU8 *pUuid) +{ + NvProcessorUuid uvmUuid; + struct UvmOpsUvmEvents *events; + + memcpy(uvmUuid.uuid, pUuid, UVM_UUID_LEN); + + // Synchronize callbacks with unregistration + down(&g_pNvUvmEventsLock); + + // It's not strictly necessary to use a cached local copy of the events + // pointer here since it can't change under the lock, but we'll do it for + // consistency. + events = getUvmEvents(); + if(events && events->stopDevice) + { + events->stopDevice(&uvmUuid); + } + up(&g_pNvUvmEventsLock); +} + +NV_STATUS nv_uvm_event_interrupt(const NvU8 *pUuid) +{ + // + // This is called from interrupt context, so we can't take + // g_pNvUvmEventsLock to prevent the callbacks from being unregistered. Even + // if we could take the lock, we don't want to slow down the ISR more than + // absolutely necessary. + // + // Instead, we allow this function to be called concurrently with + // nvUvmInterfaceDeRegisterUvmOps. That function will clear the events + // pointer, then wait for all top halves to finish out. This means the + // pointer may change out from under us, but the callbacks are still safe to + // invoke while we're in this function. + // + // This requires that we read the pointer exactly once here so neither we + // nor the compiler make assumptions about the pointer remaining valid while + // in this function. + // + struct UvmOpsUvmEvents *events = getUvmEvents(); + + if (events && events->isrTopHalf) + return events->isrTopHalf((const NvProcessorUuid *)pUuid); + + // + // NV_OK means that the interrupt was for the UVM driver, so use + // NV_ERR_NO_INTR_PENDING to tell the caller that we didn't do anything. + // + return NV_ERR_NO_INTR_PENDING; +} + +NV_STATUS nvUvmInterfaceP2pObjectCreate(uvmGpuDeviceHandle device1, + uvmGpuDeviceHandle device2, + NvHandle *hP2pObject) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_p2p_object_create(sp, + (gpuDeviceHandle)device1, + (gpuDeviceHandle)device2, + hP2pObject); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceP2pObjectCreate); + +void nvUvmInterfaceP2pObjectDestroy(uvmGpuSessionHandle session, + NvHandle hP2pObject) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_p2p_object_destroy(sp, (gpuSessionHandle)session, hP2pObject); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceP2pObjectDestroy); + +NV_STATUS nvUvmInterfaceGetExternalAllocPtes(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hDupedMemory, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *gpuExternalMappingInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_external_alloc_ptes(sp, + (gpuAddressSpaceHandle)vaSpace, + hDupedMemory, + offset, + size, + gpuExternalMappingInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetExternalAllocPtes); + +NV_STATUS nvUvmInterfaceRetainChannel(uvmGpuAddressSpaceHandle vaSpace, + NvHandle hClient, + NvHandle hChannel, + void **retainedChannel, + UvmGpuChannelInstanceInfo *channelInstanceInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_retain_channel(sp, + (gpuAddressSpaceHandle)vaSpace, + hClient, + hChannel, + retainedChannel, + channelInstanceInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceRetainChannel); + +NV_STATUS nvUvmInterfaceBindChannelResources(void *retainedChannel, + UvmGpuChannelResourceBindParams *channelResourceBindParams) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_bind_channel_resources(sp, + retainedChannel, + channelResourceBindParams); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceBindChannelResources); + +void nvUvmInterfaceReleaseChannel(void *retainedChannel) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_release_channel(sp, retainedChannel); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceReleaseChannel); + +void nvUvmInterfaceStopChannel(void *retainedChannel, NvBool bImmediate) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + + rm_gpu_ops_stop_channel(sp, retainedChannel, bImmediate); + + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfaceStopChannel); + +NV_STATUS nvUvmInterfaceGetChannelResourcePtes(uvmGpuAddressSpaceHandle vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + UvmGpuExternalMappingInfo *externalMappingInfo) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_get_channel_resource_ptes(sp, + (gpuAddressSpaceHandle)vaSpace, + resourceDescriptor, + offset, + size, + externalMappingInfo); + + nv_kmem_cache_free_stack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceGetChannelResourcePtes); + +NV_STATUS nvUvmInterfaceReportNonReplayableFault(uvmGpuDeviceHandle device, + const void *pFaultPacket) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + NV_STATUS status; + + status = rm_gpu_ops_report_non_replayable_fault(sp, (gpuDeviceHandle)device, pFaultPacket); + + nvUvmFreeSafeStack(sp); + return status; +} +EXPORT_SYMBOL(nvUvmInterfaceReportNonReplayableFault); + +NV_STATUS nvUvmInterfacePagingChannelAllocate(uvmGpuDeviceHandle device, + const UvmGpuPagingChannelAllocParams *allocParams, + UvmGpuPagingChannelHandle *channel, + UvmGpuPagingChannelInfo *channelInfo) +{ + nvidia_stack_t *sp = NULL; + nvidia_stack_t *pushStreamSp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + return NV_ERR_NO_MEMORY; + + if (nv_kmem_cache_alloc_stack(&pushStreamSp) != 0) + { + nv_kmem_cache_free_stack(sp); + return NV_ERR_NO_MEMORY; + } + + status = rm_gpu_ops_paging_channel_allocate(sp, + (gpuDeviceHandle)device, + allocParams, + (gpuPagingChannelHandle *)channel, + channelInfo); + + if (status == NV_OK) + (*channel)->pushStreamSp = pushStreamSp; + else + nv_kmem_cache_free_stack(pushStreamSp); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelAllocate); + +void nvUvmInterfacePagingChannelDestroy(UvmGpuPagingChannelHandle channel) +{ + nvidia_stack_t *sp; + + if (channel == NULL) + return; + + sp = nvUvmGetSafeStack(); + nv_kmem_cache_free_stack(channel->pushStreamSp); + rm_gpu_ops_paging_channel_destroy(sp, (gpuPagingChannelHandle)channel); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelDestroy); + +NV_STATUS nvUvmInterfacePagingChannelsMap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device, + NvU64 *dstAddress) +{ + nvidia_stack_t *sp = NULL; + NV_STATUS status; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + return NV_ERR_NO_MEMORY; + + status = rm_gpu_ops_paging_channels_map(sp, + (gpuAddressSpaceHandle)srcVaSpace, + (NvU64)srcAddress, + (gpuDeviceHandle)device, + dstAddress); + + nv_kmem_cache_free_stack(sp); + + return status; +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelsMap); + +void nvUvmInterfacePagingChannelsUnmap(uvmGpuAddressSpaceHandle srcVaSpace, + UvmGpuPointer srcAddress, + uvmGpuDeviceHandle device) +{ + nvidia_stack_t *sp = nvUvmGetSafeStack(); + rm_gpu_ops_paging_channels_unmap(sp, + (gpuAddressSpaceHandle)srcVaSpace, + (NvU64)srcAddress, + (gpuDeviceHandle)device); + nvUvmFreeSafeStack(sp); +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelsUnmap); + +NV_STATUS nvUvmInterfacePagingChannelPushStream(UvmGpuPagingChannelHandle channel, + char *methodStream, + NvU32 methodStreamSize) +{ + return rm_gpu_ops_paging_channel_push_stream(channel->pushStreamSp, + (gpuPagingChannelHandle)channel, + methodStream, + methodStreamSize); +} +EXPORT_SYMBOL(nvUvmInterfacePagingChannelPushStream); + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +#else // NV_UVM_ENABLE + +NV_STATUS nv_uvm_suspend(void) +{ + return NV_OK; +} + +NV_STATUS nv_uvm_resume(void) +{ + return NV_OK; +} + +#endif // NV_UVM_ENABLE diff --git a/kernel-open/nvidia/nvidia-sources.Kbuild b/kernel-open/nvidia/nvidia-sources.Kbuild new file mode 100644 index 000000000..910a91cea --- /dev/null +++ b/kernel-open/nvidia/nvidia-sources.Kbuild @@ -0,0 +1,39 @@ +NVIDIA_SOURCES ?= +NVIDIA_SOURCES_CXX ?= + +NVIDIA_SOURCES += nvidia/nv.c +NVIDIA_SOURCES += nvidia/nv-pci.c +NVIDIA_SOURCES += nvidia/nv-dmabuf.c +NVIDIA_SOURCES += nvidia/nv-acpi.c +NVIDIA_SOURCES += nvidia/nv-cray.c +NVIDIA_SOURCES += nvidia/nv-dma.c +NVIDIA_SOURCES += nvidia/nv-i2c.c +NVIDIA_SOURCES += nvidia/nv-mmap.c +NVIDIA_SOURCES += nvidia/nv-p2p.c +NVIDIA_SOURCES += nvidia/nv-pat.c +NVIDIA_SOURCES += nvidia/nv-procfs.c +NVIDIA_SOURCES += nvidia/nv-procfs-utils.c +NVIDIA_SOURCES += nvidia/nv-usermap.c +NVIDIA_SOURCES += nvidia/nv-vm.c +NVIDIA_SOURCES += nvidia/nv-vtophys.c +NVIDIA_SOURCES += nvidia/os-interface.c +NVIDIA_SOURCES += nvidia/os-mlock.c +NVIDIA_SOURCES += nvidia/os-pci.c +NVIDIA_SOURCES += nvidia/os-registry.c +NVIDIA_SOURCES += nvidia/os-usermap.c +NVIDIA_SOURCES += nvidia/nv-modeset-interface.c +NVIDIA_SOURCES += nvidia/nv-pci-table.c +NVIDIA_SOURCES += nvidia/nv-kthread-q.c +NVIDIA_SOURCES += nvidia/nv-memdbg.c +NVIDIA_SOURCES += nvidia/nv-ibmnpu.c +NVIDIA_SOURCES += nvidia/nv-report-err.c +NVIDIA_SOURCES += nvidia/nv-rsync.c +NVIDIA_SOURCES += nvidia/nv-msi.c +NVIDIA_SOURCES += nvidia/nv-caps.c +NVIDIA_SOURCES += nvidia/nv-frontend.c +NVIDIA_SOURCES += nvidia/nv_uvm_interface.c +NVIDIA_SOURCES += nvidia/nvlink_linux.c +NVIDIA_SOURCES += nvidia/nvlink_caps.c +NVIDIA_SOURCES += nvidia/linux_nvswitch.c +NVIDIA_SOURCES += nvidia/procfs_nvswitch.c +NVIDIA_SOURCES += nvidia/i2c_nvswitch.c diff --git a/kernel-open/nvidia/nvidia.Kbuild b/kernel-open/nvidia/nvidia.Kbuild new file mode 100644 index 000000000..5eee12034 --- /dev/null +++ b/kernel-open/nvidia/nvidia.Kbuild @@ -0,0 +1,258 @@ +########################################################################### +# Kbuild fragment for nvidia.ko +########################################################################### + +# +# Define NVIDIA_{SOURCES,OBJECTS} +# + +include $(src)/nvidia/nvidia-sources.Kbuild +NVIDIA_OBJECTS = $(patsubst %.c,%.o,$(NVIDIA_SOURCES)) + +obj-m += nvidia.o +nvidia-y := $(NVIDIA_OBJECTS) + +NVIDIA_KO = nvidia/nvidia.ko + + +# +# nv-kernel.o_binary is the core binary component of nvidia.ko, shared +# across all UNIX platforms. Create a symlink, "nv-kernel.o" that +# points to nv-kernel.o_binary, and add nv-kernel.o to the list of +# objects to link into nvidia.ko. +# +# Note that: +# - The kbuild "clean" rule will delete all objects in nvidia-y (which +# is why we use a symlink instead of just adding nv-kernel.o_binary +# to nvidia-y). +# - kbuild normally uses the naming convention of ".o_shipped" for +# binary files. That is not used here, because the kbuild rule to +# create the "normal" object file from ".o_shipped" does a copy, not +# a symlink. This file is quite large, so a symlink is preferred. +# - The file added to nvidia-y should be relative to gmake's cwd. +# But, the target for the symlink rule should be prepended with $(obj). +# - The "symlink" command is called using kbuild's if_changed macro to +# generate an .nv-kernel.o.cmd file which can be used on subsequent +# runs to determine if the command line to create the symlink changed +# and needs to be re-executed. +# + +NVIDIA_BINARY_OBJECT := $(src)/nvidia/nv-kernel.o_binary +NVIDIA_BINARY_OBJECT_O := nvidia/nv-kernel.o + +quiet_cmd_symlink = SYMLINK $@ + cmd_symlink = ln -sf $< $@ + +targets += $(NVIDIA_BINARY_OBJECT_O) + +$(obj)/$(NVIDIA_BINARY_OBJECT_O): $(NVIDIA_BINARY_OBJECT) FORCE + $(call if_changed,symlink) + +nvidia-y += $(NVIDIA_BINARY_OBJECT_O) + + +# +# Define nvidia.ko-specific CFLAGS. +# + +NVIDIA_CFLAGS += -I$(src)/nvidia +NVIDIA_CFLAGS += -DNVIDIA_UNDEF_LEGACY_BIT_MACROS + +ifeq ($(NV_BUILD_TYPE),release) + NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG +endif + +ifeq ($(NV_BUILD_TYPE),develop) + NVIDIA_CFLAGS += -UDEBUG -U_DEBUG -DNDEBUG -DNV_MEM_LOGGER +endif + +ifeq ($(NV_BUILD_TYPE),debug) + NVIDIA_CFLAGS += -DDEBUG -D_DEBUG -UNDEBUG -DNV_MEM_LOGGER +endif + +$(call ASSIGN_PER_OBJ_CFLAGS, $(NVIDIA_OBJECTS), $(NVIDIA_CFLAGS)) + + +# +# nv-procfs.c requires nv-compiler.h +# + +NV_COMPILER_VERSION_HEADER = $(obj)/nv_compiler.h + +$(NV_COMPILER_VERSION_HEADER): + @echo \#define NV_COMPILER \"`$(CC) -v 2>&1 | tail -n 1`\" > $@ + +$(obj)/nvidia/nv-procfs.o: $(NV_COMPILER_VERSION_HEADER) + +clean-files += $(NV_COMPILER_VERSION_HEADER) + + +# +# Build nv-interface.o from the kernel interface layer objects, suitable +# for further processing by the top-level makefile to produce a precompiled +# kernel interface file. +# + +NVIDIA_INTERFACE := nvidia/nv-interface.o + +# Linux kernel v5.12 and later looks at "always-y", Linux kernel versions +# before v5.6 looks at "always"; kernel versions between v5.12 and v5.6 +# look at both. + +always += $(NVIDIA_INTERFACE) +always-y += $(NVIDIA_INTERFACE) + +$(obj)/$(NVIDIA_INTERFACE): $(addprefix $(obj)/,$(NVIDIA_OBJECTS)) + $(LD) -r -o $@ $^ + + +# +# Register the conftests needed by nvidia.ko +# + +NV_OBJECTS_DEPEND_ON_CONFTEST += $(NVIDIA_OBJECTS) + +NV_CONFTEST_FUNCTION_COMPILE_TESTS += hash__remap_4k_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += list_is_first +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_memory_array_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_pages_array_uc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_cache +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ioremap_wc +NV_CONFTEST_FUNCTION_COMPILE_TESTS += sg_alloc_table +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_get_domain_bus_and_slot +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_num_physpages +NV_CONFTEST_FUNCTION_COMPILE_TESTS += efi_enabled +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pde_data +NV_CONFTEST_FUNCTION_COMPILE_TESTS += proc_remove +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pm_vt_switch_required +NV_CONFTEST_FUNCTION_COMPILE_TESTS += xen_ioemu_inject_msi +NV_CONFTEST_FUNCTION_COMPILE_TESTS += phys_to_dma +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_dma_ops +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_attr_macros +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_page_attrs +NV_CONFTEST_FUNCTION_COMPILE_TESTS += write_cr4 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_property +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_node_by_phandle +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_node_to_nid +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pnv_pci_get_npu_dev +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_ibm_chip_id +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_bus_address +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_stop_and_remove_bus_device +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_remove_bus_device +NV_CONFTEST_FUNCTION_COMPILE_TESTS += register_cpu_notifier +NV_CONFTEST_FUNCTION_COMPILE_TESTS += cpuhp_setup_state +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_map_resource +NV_CONFTEST_FUNCTION_COMPILE_TESTS += get_backlight_device_by_name +NV_CONFTEST_FUNCTION_COMPILE_TESTS += timer_setup +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_msix_range +NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_read_has_pointer_pos_arg +NV_CONFTEST_FUNCTION_COMPILE_TESTS += kernel_write +NV_CONFTEST_FUNCTION_COMPILE_TESTS += kthread_create_on_node +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_find_matching_node +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dev_is_pci +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_direct_map_resource +NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_get_platform +NV_CONFTEST_FUNCTION_COMPILE_TESTS += tegra_bpmp_send_receive +NV_CONFTEST_FUNCTION_COMPILE_TESTS += flush_cache_all +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vmf_insert_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += jiffies_to_timespec +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_raw_ts64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += ktime_get_real_ts64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += full_name_hash +NV_CONFTEST_FUNCTION_COMPILE_TESTS += hlist_for_each_entry +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pci_enable_atomic_ops_to_root +NV_CONFTEST_FUNCTION_COMPILE_TESTS += vga_tryget +NV_CONFTEST_FUNCTION_COMPILE_TESTS += pgprot_decrypted +NV_CONFTEST_FUNCTION_COMPILE_TESTS += cc_mkdec +NV_CONFTEST_FUNCTION_COMPILE_TESTS += iterate_fd +NV_CONFTEST_FUNCTION_COMPILE_TESTS += seq_read_iter +NV_CONFTEST_FUNCTION_COMPILE_TESTS += sg_page_iter_page +NV_CONFTEST_FUNCTION_COMPILE_TESTS += unsafe_follow_pfn +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += drm_gem_object_put_unlocked +NV_CONFTEST_FUNCTION_COMPILE_TESTS += set_close_on_exec +NV_CONFTEST_FUNCTION_COMPILE_TESTS += add_memory_driver_managed +NV_CONFTEST_FUNCTION_COMPILE_TESTS += device_property_read_u64 +NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_of_platform_populate +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_dma_configure +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_count_elems_of_size +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_property_read_variable_u8_array +NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_new_client_device +NV_CONFTEST_FUNCTION_COMPILE_TESTS += i2c_unregister_device +NV_CONFTEST_FUNCTION_COMPILE_TESTS += of_get_named_gpio +NV_CONFTEST_FUNCTION_COMPILE_TESTS += devm_gpio_request_one +NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_input +NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_direction_output +NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_get_value +NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_set_value +NV_CONFTEST_FUNCTION_COMPILE_TESTS += gpio_to_irq +NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_get +NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_put +NV_CONFTEST_FUNCTION_COMPILE_TESTS += icc_set_bw +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_export_args +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_kmap_atomic +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_ops_has_map_atomic +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_has_dynamic_attachment +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_buf_attachment_has_peer2peer +NV_CONFTEST_FUNCTION_COMPILE_TESTS += dma_set_mask_and_coherent +NV_CONFTEST_FUNCTION_COMPILE_TESTS += acpi_bus_get_device + +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_of_node_to_nid +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_gpl_sme_active +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_map_sg_attrs +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_swiotlb_dma_ops +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present___close_fd +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_close_fd +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_unused_fd_flags +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_get_default_device +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_byte_offset +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_nvhost_syncpt_unit_interface_get_aperture +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_register_ipc_client +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_unregister_ipc_client +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dce_client_ipc_send_recv +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_dram_clk_to_mc_clk +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_get_dram_num_channels +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_tegra_dram_types +NV_CONFTEST_SYMBOL_COMPILE_TESTS += is_export_symbol_present_pxm_to_node + +NV_CONFTEST_TYPE_COMPILE_TESTS += file_operations +NV_CONFTEST_TYPE_COMPILE_TESTS += kuid_t +NV_CONFTEST_TYPE_COMPILE_TESTS += dma_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += swiotlb_dma_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += noncoherent_swiotlb_dma_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_has_address +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_insert_pfn_prot +NV_CONFTEST_TYPE_COMPILE_TESTS += vmf_insert_pfn_prot +NV_CONFTEST_TYPE_COMPILE_TESTS += address_space_init_once +NV_CONFTEST_TYPE_COMPILE_TESTS += vm_ops_fault_removed_vma_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += vmbus_channel_has_ringbuffer_page +NV_CONFTEST_TYPE_COMPILE_TESTS += device_driver_of_match_table +NV_CONFTEST_TYPE_COMPILE_TESTS += device_of_node +NV_CONFTEST_TYPE_COMPILE_TESTS += node_states_n_memory +NV_CONFTEST_TYPE_COMPILE_TESTS += kmem_cache_has_kobj_remove_work +NV_CONFTEST_TYPE_COMPILE_TESTS += sysfs_slab_unlink +NV_CONFTEST_TYPE_COMPILE_TESTS += proc_ops +NV_CONFTEST_TYPE_COMPILE_TESTS += timespec64 +NV_CONFTEST_TYPE_COMPILE_TESTS += vmalloc_has_pgprot_t_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += mm_has_mmap_lock +NV_CONFTEST_TYPE_COMPILE_TESTS += pci_channel_state +NV_CONFTEST_TYPE_COMPILE_TESTS += pci_dev_has_ats_enabled +NV_CONFTEST_TYPE_COMPILE_TESTS += mt_device_gre +NV_CONFTEST_TYPE_COMPILE_TESTS += remove_memory_has_nid_arg +NV_CONFTEST_TYPE_COMPILE_TESTS += add_memory_driver_managed_has_mhp_flags_arg + +NV_CONFTEST_GENERIC_COMPILE_TESTS += dom0_kernel_present +NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_vgpu_kvm_build +NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_build +NV_CONFTEST_GENERIC_COMPILE_TESTS += nvidia_grid_csp_build +NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages +NV_CONFTEST_GENERIC_COMPILE_TESTS += get_user_pages_remote +NV_CONFTEST_GENERIC_COMPILE_TESTS += pm_runtime_available +NV_CONFTEST_GENERIC_COMPILE_TESTS += vm_fault_t +NV_CONFTEST_GENERIC_COMPILE_TESTS += pci_class_multimedia_hd_audio +NV_CONFTEST_GENERIC_COMPILE_TESTS += drm_available diff --git a/kernel-open/nvidia/nvlink_caps.c b/kernel-open/nvidia/nvlink_caps.c new file mode 100644 index 000000000..ea329170a --- /dev/null +++ b/kernel-open/nvidia/nvlink_caps.c @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink_os.h" +#include "nvlink_linux.h" +#include "nvlink_caps.h" +#include "nv-caps.h" + +#define NVLINK_CAP_FABRIC_MGMT "fabric-mgmt" + +typedef struct +{ + nv_cap_t *root; + nv_cap_t *fabric_mgmt; +} nvlink_caps_t; + +static nvlink_caps_t nvlink_caps = {0}; + +int nvlink_cap_acquire(int fd, NvU32 type) +{ + int dup_fd = -1; + + if (fd < 0) + { + return -1; + } + + switch(type) + { + case NVLINK_CAP_FABRIC_MANAGEMENT: + { + dup_fd = nv_cap_validate_and_dup_fd(nvlink_caps.fabric_mgmt, fd); + if (dup_fd < 0) + { + nvlink_print(NVLINK_DBG_ERRORS, + "Failed to validate the fabric mgmt capability\n"); + return -1; + } + break; + } + default: + { + nvlink_print(NVLINK_DBG_ERRORS, "Unknown capability specified\n"); + return -1; + } + } + + return dup_fd; +} + +void nvlink_cap_release(int fd) +{ + if (fd < 0) + { + return; + } + + nv_cap_close_fd(fd); +} + +void nvlink_cap_exit(void) +{ + if (nvlink_caps.fabric_mgmt != NULL) + { + nv_cap_destroy_entry(nvlink_caps.fabric_mgmt); + nvlink_caps.fabric_mgmt = NULL; + } + + if (nvlink_caps.root != NULL) + { + nv_cap_destroy_entry(nvlink_caps.root); + nvlink_caps.root = NULL; + } +} + +int nvlink_cap_init(const char *path) +{ + if (path == NULL) + { + nvlink_print(NVLINK_DBG_ERRORS, "Invalid path: %s\n", path); + return -1; + } + + nvlink_caps.root = nv_cap_init(path); + if (nvlink_caps.root == NULL) + { + nvlink_print(NVLINK_DBG_ERRORS, "Failed to initialize capabilities\n"); + return -1; + } + + nvlink_caps.fabric_mgmt = nv_cap_create_file_entry(nvlink_caps.root, + NVLINK_CAP_FABRIC_MGMT, + S_IRUSR); + if (nvlink_caps.fabric_mgmt == NULL) + { + nvlink_print(NVLINK_DBG_ERRORS, "Failed to create fabric-mgmt entry\n"); + nvlink_cap_exit(); + return -1; + } + + return 0; +} diff --git a/kernel-open/nvidia/nvlink_caps.h b/kernel-open/nvidia/nvlink_caps.h new file mode 100644 index 000000000..4d60ce95b --- /dev/null +++ b/kernel-open/nvidia/nvlink_caps.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_CAPS_H_ +#define _NVLINK_CAPS_H_ + +/* List of supported capability type */ +#define NVLINK_CAP_FABRIC_MANAGEMENT 0 + +/* Max supported capabilities count */ +#define NVLINK_CAP_COUNT 1 + +int nvlink_cap_init(const char *path); +void nvlink_cap_exit(void); +int nvlink_cap_acquire(int fd, NvU32 type); +void nvlink_cap_release(int fd); + +#endif //_NVLINK_CAPS_H_ diff --git a/kernel-open/nvidia/nvlink_common.h b/kernel-open/nvidia/nvlink_common.h new file mode 100644 index 000000000..8cf88b490 --- /dev/null +++ b/kernel-open/nvidia/nvlink_common.h @@ -0,0 +1,176 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_COMMON_H_ +#define _NVLINK_COMMON_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvCpuUuid.h" +#include "nvlink_errors.h" + + +#ifndef NULL +#define NULL ((void *)0) +#endif + +// nvlink pci bar information +struct nvlink_pci_bar_info +{ + NvU64 busAddress; + NvU64 baseAddr; + NvU64 barSize; + NvU32 offset; + void *pBar; +}; + +#define MAX_NVLINK_BARS 2 + +// nvlink pci information +struct nvlink_pci_info +{ + NvU32 domain; + NvU8 bus; + NvU8 device; + NvU8 function; + NvU32 pciDeviceId; + NvU32 irq; + NvBool intHooked; + struct nvlink_pci_bar_info bars[MAX_NVLINK_BARS]; +}; + +// nvlink detailed device information +struct nvlink_detailed_device_info +{ + char *deviceName; + NvU64 deviceType; + NvU8 *devUuid; + NvBool bInitialized; + + + + void *dev_info; // Endpoint driver device info opaque + // to core lib. Passed from end point + // driver to core + + struct nvlink_pci_info *pciInfo; +}; + +// nvlink device registration parameters +struct nvlink_device_register_params +{ + // + // Core lib device info opaque to endpoint driver + // Passed from core lib to endpoint driver + // + void **deviceHandle; + char *driverName; + + struct nvlink_detailed_device_info *device_params; +}; + +// nvlink detailed link information +struct nvlink_detailed_link_info +{ + void *deviceHandle; // Core library device handle passed + // to endpoint driver during device + // registration + + void *link_info; // End point driver link info opaque + // to core lib. Passed from end point + // driver to core + + char *linkName; + NvU32 linkNumber; + NvU32 version; + NvBool bAcCoupled; + const void *link_handlers; +}; + +// nvlink link registration parameters +struct nvlink_link_register_params +{ + // + // Core lib link info opaque to endpoint driver + // Passed from core lib to endpoint driver + // + void **linkHandle; + + struct nvlink_detailed_link_info *link_params; +}; + +// nvlink client device handle +struct nvlink_device_handle +{ + NvU32 linkMask; + struct nvlink_pci_info pciInfo; +}; + +#define NVLINK_PCI_DEV_FMT "%04x:%02x:%02x.%x" +#define NVLINK_PCI_DEV_FMT_ARGS(info) (info)->domain, \ + (info)->bus, \ + (info)->device, \ + (info)->function + +// nvlink connection information +struct nvlink_conn_info +{ + NvU32 domain; + NvU16 bus; + NvU16 device; + NvU16 function; + NvU32 pciDeviceId; + NvU8 devUuid[NV_UUID_LEN]; + NvU64 deviceType; + NvU32 linkNumber; + NvBool bConnected; + NvU64 chipSid; +}; + +// nvlink ioctrl params +struct nvlink_ioctrl_params +{ + void *osPrivate; + NvU32 cmd; + void *buf; + NvU32 size; +}; + +// Typedefs +typedef struct nvlink_pci_bar_info nvlink_pci_bar_info; +typedef struct nvlink_pci_info nvlink_pci_info; +typedef struct nvlink_detailed_device_info nvlink_detailed_device_info; +typedef struct nvlink_detailed_link_info nvlink_detailed_link_info; +typedef struct nvlink_device_register_params nvlink_device_register_params; +typedef struct nvlink_link_register_params nvlink_link_register_params; +typedef struct nvlink_conn_info nvlink_conn_info; +typedef struct nvlink_ioctrl_params nvlink_ioctrl_params; + +#ifdef __cplusplus +} +#endif + +#endif //_NVLINK_COMMON_H_ diff --git a/kernel-open/nvidia/nvlink_errors.h b/kernel-open/nvidia/nvlink_errors.h new file mode 100644 index 000000000..d6784a39f --- /dev/null +++ b/kernel-open/nvidia/nvlink_errors.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_ERRORS_H_ +#define _NVLINK_ERRORS_H_ + +typedef int NvlStatus; + +#define NVL_SUCCESS (NvlStatus) 0 +#define NVL_BAD_ARGS (NvlStatus) 1 +#define NVL_NO_MEM (NvlStatus) 2 +#define NVL_NOT_FOUND (NvlStatus) 3 +#define NVL_INITIALIZATION_PARTIAL_FAILURE (NvlStatus) 4 +#define NVL_INITIALIZATION_TOTAL_FAILURE (NvlStatus) 5 +#define NVL_PCI_ERROR (NvlStatus) 6 +#define NVL_ERR_GENERIC (NvlStatus) 7 +#define NVL_ERR_INVALID_STATE (NvlStatus) 8 +#define NVL_UNBOUND_DEVICE (NvlStatus) 9 +#define NVL_MORE_PROCESSING_REQUIRED (NvlStatus)10 +#define NVL_IO_ERROR (NvlStatus)11 +#define NVL_ERR_STATE_IN_USE (NvlStatus)12 +#define NVL_ERR_NOT_SUPPORTED (NvlStatus)13 +#define NVL_ERR_NOT_IMPLEMENTED (NvlStatus)14 +#define NVL_ERR_INSUFFICIENT_PERMISSIONS (NvlStatus)15 +#define NVL_ERR_OPERATING_SYSTEM (NvlStatus)16 + +#endif // _NVLINK_ERRORS_H_ diff --git a/kernel-open/nvidia/nvlink_export.h b/kernel-open/nvidia/nvlink_export.h new file mode 100644 index 000000000..471ec8380 --- /dev/null +++ b/kernel-open/nvidia/nvlink_export.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_EXPORT_H_ +#define _NVLINK_EXPORT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvlink_common.h" + +/* + * Initializes core lib and does all that is needed + * to access NVLINK functionality on the current platform. + */ +NvlStatus nvlink_lib_initialize(void); + +/* + * Frees any related resources and then unloads core lib. + */ +NvlStatus nvlink_lib_unload(void); + +/* +* Entry point for nvlink ioctl calls. +*/ +NvlStatus nvlink_lib_ioctl_ctrl(nvlink_ioctrl_params *ctrl_params); + +#ifdef __cplusplus +} +#endif + +#endif //_NVLINK_EXPORT_H_ diff --git a/kernel-open/nvidia/nvlink_linux.c b/kernel-open/nvidia/nvlink_linux.c new file mode 100644 index 000000000..f1dcc5522 --- /dev/null +++ b/kernel-open/nvidia/nvlink_linux.c @@ -0,0 +1,643 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "conftest.h" + +#include "nvlink_os.h" +#include "nvlink_linux.h" +#include "nvlink_errors.h" +#include "nvlink_export.h" +#include "nv-linux.h" +#include "nv-procfs.h" +#include "nv-time.h" +#include "nvlink_caps.h" + +#include +#include +#include +#include +#include +#include + +#define MAX_ERROR_STRING 512 + +typedef struct nvlink_file_private +{ + struct + { + /* A duped file descriptor for fabric_mgmt capability */ + int fabric_mgmt; + } capability_fds; +} nvlink_file_private_t; + +#define NVLINK_SET_FILE_PRIVATE(filp, data) ((filp)->private_data = (data)) +#define NVLINK_GET_FILE_PRIVATE(filp) ((nvlink_file_private_t *)(filp)->private_data) + +typedef struct +{ + struct mutex lock; + NvBool initialized; + struct cdev cdev; + dev_t devno; + int opened; + int major_devnum; +} _nvlink_drvctx; + + +// nvlink driver local state +static _nvlink_drvctx nvlink_drvctx; + +#if defined(CONFIG_PROC_FS) +#define NV_DEFINE_SINGLE_NVLINK_PROCFS_FILE(name) \ + NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nv_system_pm_lock) +#endif + +#define NVLINK_PROCFS_DIR "driver/nvidia-nvlink" + +static struct proc_dir_entry *nvlink_procfs_dir = NULL; + +#if defined(CONFIG_PROC_FS) + static int nvlink_is_procfs_available = 1; +#else + static int nvlink_is_procfs_available = 0; +#endif + +static struct proc_dir_entry *nvlink_permissions = NULL; + +static int nv_procfs_read_permissions(struct seq_file *s, void *v) +{ + // Restrict device node permissions - 0666. + seq_printf(s, "%s: %u\n", "DeviceFileMode", 438); + + return 0; +} + +NV_DEFINE_SINGLE_NVLINK_PROCFS_FILE(permissions); + +static void nvlink_permissions_exit(void) +{ + if (!nvlink_permissions) + { + return; + } + + NV_REMOVE_PROC_ENTRY(nvlink_permissions); + nvlink_permissions = NULL; +} + +static int nvlink_permissions_init(void) +{ + if (!nvlink_procfs_dir) + { + return -EACCES; + } + + nvlink_permissions = NV_CREATE_PROC_FILE("permissions", + nvlink_procfs_dir, + permissions, + NULL); + if (!nvlink_permissions) + { + return -EACCES; + } + + return 0; +} + +static void nvlink_procfs_exit(void) +{ + nvlink_permissions_exit(); + + if (!nvlink_procfs_dir) + { + return; + } + + NV_REMOVE_PROC_ENTRY(nvlink_procfs_dir); + nvlink_procfs_dir = NULL; +} + +static int nvlink_procfs_init(void) +{ + int rc = 0; + + if (!nvlink_is_procfs_available) + { + return -EACCES; + } + + nvlink_procfs_dir = NV_CREATE_PROC_DIR(NVLINK_PROCFS_DIR, NULL); + if (!nvlink_procfs_dir) + { + return -EACCES; + } + + rc = nvlink_permissions_init(); + if (rc < 0) + { + goto cleanup; + } + + return 0; + +cleanup: + + nvlink_procfs_exit(); + + return rc; +} + +static int nvlink_fops_open(struct inode *inode, struct file *filp) +{ + int rc = 0; + nvlink_file_private_t *private = NULL; + + nvlink_print(NVLINK_DBG_INFO, "nvlink driver open\n"); + + mutex_lock(&nvlink_drvctx.lock); + + // nvlink lib driver is currently exclusive open. + if (nvlink_drvctx.opened) + { + rc = -EBUSY; + goto open_error; + } + + private = (nvlink_file_private_t *)nvlink_malloc(sizeof(*private)); + if (private == NULL) + { + rc = -ENOMEM; + goto open_error; + } + + private->capability_fds.fabric_mgmt = -1; + NVLINK_SET_FILE_PRIVATE(filp, private); + + // mark our state as opened + nvlink_drvctx.opened = NV_TRUE; + +open_error: + mutex_unlock(&nvlink_drvctx.lock); + return rc; +} + +static int nvlink_fops_release(struct inode *inode, struct file *filp) +{ + nvlink_file_private_t *private = NVLINK_GET_FILE_PRIVATE(filp); + + nvlink_print(NVLINK_DBG_INFO, "nvlink driver close\n"); + + WARN_ON(private == NULL); + + mutex_lock(&nvlink_drvctx.lock); + + if (private->capability_fds.fabric_mgmt > 0) + { + nvlink_cap_release(private->capability_fds.fabric_mgmt); + private->capability_fds.fabric_mgmt = -1; + } + + nvlink_free(filp->private_data); + NVLINK_SET_FILE_PRIVATE(filp, NULL); + + // mark the device as not opened + nvlink_drvctx.opened = NV_FALSE; + + mutex_unlock(&nvlink_drvctx.lock); + + return 0; +} + +static int nvlink_fops_ioctl(struct inode *inode, + struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + nvlink_ioctrl_params ctrl_params = {0}; + int param_size = _IOC_SIZE(cmd); + void *param_buf = NULL; + NvlStatus ret_val = 0; + int rc = 0; + + // no buffer for simple _IO types + if (param_size) + { + // allocate a buffer to hold user input + param_buf = kzalloc(param_size, GFP_KERNEL); + if (NULL == param_buf) + { + rc = -ENOMEM; + goto nvlink_ioctl_fail; + } + + // copy user input to kernel buffers. Simple _IOR() ioctls can skip this step. + if (_IOC_DIR(cmd) & _IOC_WRITE) + { + // copy user input to local buffer + if (copy_from_user(param_buf, (const void *)arg, param_size)) + { + rc = -EFAULT; + goto nvlink_ioctl_fail; + } + } + } + + ctrl_params.osPrivate = filp->private_data; + ctrl_params.cmd = _IOC_NR(cmd); + ctrl_params.buf = param_buf; + ctrl_params.size = param_size; + + ret_val = nvlink_lib_ioctl_ctrl(&ctrl_params); + if (NVL_SUCCESS != ret_val) + { + rc = -EINVAL; + goto nvlink_ioctl_fail; + } + + // no copy for write-only ioctl + if ((param_size) && (_IOC_DIR(cmd) & _IOC_READ)) + { + if (copy_to_user((void *)arg, ctrl_params.buf, ctrl_params.size)) + { + rc = -EFAULT; + goto nvlink_ioctl_fail; + } + } + +nvlink_ioctl_fail: + if (param_buf) + { + kfree(param_buf); + } + return rc; +} + +#define NV_FILE_INODE(file) (file)->f_inode + +static long nvlink_fops_unlocked_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + return nvlink_fops_ioctl(NV_FILE_INODE(file), file, cmd, arg); +} + + +static const struct file_operations nvlink_fops = { + .owner = THIS_MODULE, + .open = nvlink_fops_open, + .release = nvlink_fops_release, +#if defined(NV_FILE_OPERATIONS_HAS_IOCTL) + .ioctl = nvlink_fops_ioctl, +#endif + .unlocked_ioctl = nvlink_fops_unlocked_ioctl, +}; + +int __init nvlink_core_init(void) +{ + NvlStatus ret_val; + int rc; + + if (NV_TRUE == nvlink_drvctx.initialized) + { + nvlink_print(NVLINK_DBG_ERRORS, "nvlink core interface already initialized\n"); + return -EBUSY; + } + + mutex_init(&nvlink_drvctx.lock); + + ret_val = nvlink_lib_initialize(); + if (NVL_SUCCESS != ret_val) + { + nvlink_print(NVLINK_DBG_ERRORS, "Failed to initialize driver : %d\n", ret_val); + rc = -ENODEV; + goto nvlink_lib_initialize_fail; + } + + rc = alloc_chrdev_region(&nvlink_drvctx.devno, 0, NVLINK_NUM_MINOR_DEVICES, + NVLINK_DEVICE_NAME); + if (rc < 0) + { + nvlink_print(NVLINK_DBG_ERRORS, "alloc_chrdev_region failed: %d\n", rc); + goto alloc_chrdev_region_fail; + } + + nvlink_drvctx.major_devnum = MAJOR(nvlink_drvctx.devno); + nvlink_print(NVLINK_DBG_INFO, "Nvlink Core is being initialized, major device number %d\n", + nvlink_drvctx.major_devnum); + + cdev_init(&nvlink_drvctx.cdev, &nvlink_fops); + nvlink_drvctx.cdev.owner = THIS_MODULE; + rc = cdev_add(&nvlink_drvctx.cdev, nvlink_drvctx.devno, NVLINK_NUM_MINOR_DEVICES); + if (rc < 0) + { + nvlink_print(NVLINK_DBG_ERRORS, " Unable to create cdev\n"); + goto cdev_add_fail; + } + + rc = nvlink_procfs_init(); + if (rc < 0) + { + goto procfs_init_fail; + } + + rc = nvlink_cap_init(NVLINK_PROCFS_DIR); + if (rc < 0) + { + nvlink_print(NVLINK_DBG_ERRORS, " Unable to create capability\n"); + goto cap_init_fail; + } + + nvlink_drvctx.initialized = NV_TRUE; + + return 0; + +cap_init_fail: + nvlink_procfs_exit(); + +procfs_init_fail: + cdev_del(&nvlink_drvctx.cdev); + +cdev_add_fail: + unregister_chrdev_region(nvlink_drvctx.devno, NVLINK_NUM_MINOR_DEVICES); + +alloc_chrdev_region_fail: + nvlink_lib_unload(); + +nvlink_lib_initialize_fail: + nv_mutex_destroy(&nvlink_drvctx.lock); + return rc; +} + +void nvlink_core_exit(void) +{ + if (NV_FALSE == nvlink_drvctx.initialized) + { + return; + } + + nvlink_cap_exit(); + + nvlink_procfs_exit(); + + cdev_del(&nvlink_drvctx.cdev); + + unregister_chrdev_region(nvlink_drvctx.devno, NVLINK_NUM_MINOR_DEVICES); + + nvlink_lib_unload(); + + nv_mutex_destroy(&nvlink_drvctx.lock); + + nvlink_print(NVLINK_DBG_INFO, "Unregistered the Nvlink Core, major device number %d\n", + nvlink_drvctx.major_devnum); +} + +void +nvlink_print +( + const char *file, + int line, + const char *function, + int log_level, + const char *fmt, + ... +) +{ + va_list arglist; + char nv_string[MAX_ERROR_STRING]; + char *sys_log_level; + + switch (log_level) { + case NVLINK_DBG_LEVEL_INFO: + sys_log_level = KERN_INFO; + break; + case NVLINK_DBG_LEVEL_SETUP: + sys_log_level = KERN_DEBUG; + break; + case NVLINK_DBG_LEVEL_USERERRORS: + sys_log_level = KERN_NOTICE; + break; + case NVLINK_DBG_LEVEL_WARNINGS: + sys_log_level = KERN_WARNING; + break; + case NVLINK_DBG_LEVEL_ERRORS: + sys_log_level = KERN_ERR; + break; + default: + sys_log_level = KERN_INFO; + break; + } + + va_start(arglist, fmt); + vsnprintf(nv_string, sizeof(nv_string), fmt, arglist); + va_end(arglist); + + nv_string[sizeof(nv_string) - 1] = '\0'; + printk("%snvidia-nvlink: %s", sys_log_level, nv_string); +} + +void * nvlink_malloc(NvLength size) +{ + return kmalloc(size, GFP_KERNEL); +} + +void nvlink_free(void *ptr) +{ + return kfree(ptr); +} + +char * nvlink_strcpy(char *dest, const char *src) +{ + return strcpy(dest, src); +} + +int nvlink_strcmp(const char *dest, const char *src) +{ + return strcmp(dest, src); +} + +NvLength nvlink_strlen(const char *s) +{ + return strlen(s); +} + +int nvlink_snprintf(char *dest, NvLength size, const char *fmt, ...) +{ + va_list arglist; + int chars_written; + + va_start(arglist, fmt); + chars_written = vsnprintf(dest, size, fmt, arglist); + va_end(arglist); + + return chars_written; +} + +NvU32 nvlink_memRd32(const volatile void * address) +{ + return (*(const volatile NvU32*)(address)); +} + +void nvlink_memWr32(volatile void *address, NvU32 data) +{ + (*(volatile NvU32 *)(address)) = data; +} + +NvU64 nvlink_memRd64(const volatile void * address) +{ + return (*(const volatile NvU64 *)(address)); +} + +void nvlink_memWr64(volatile void *address, NvU64 data) +{ + (*(volatile NvU64 *)(address)) = data; +} + +void * nvlink_memset(void *dest, int value, NvLength size) +{ + return memset(dest, value, size); +} + +void * nvlink_memcpy(void *dest, const void *src, NvLength size) +{ + return memcpy(dest, src, size); +} + +int nvlink_memcmp(const void *s1, const void *s2, NvLength size) +{ + return memcmp(s1, s2, size); +} + +/* + * Sleep for specified milliseconds. Yields the CPU to scheduler. + */ +void nvlink_sleep(unsigned int ms) +{ + NV_STATUS status; + + status = nv_sleep_ms(ms); + + if (status != NV_OK) + { + if (printk_ratelimit()) + { + nvlink_print(NVLINK_DBG_ERRORS, "NVLink: requested sleep duration" + " %d msec exceeded %d msec\n", + ms, NV_MAX_ISR_DELAY_MS); + WARN_ON(1); + } + } +} + +void nvlink_assert(int cond) +{ + if ((cond) == 0x0) + { + if (printk_ratelimit()) + { + nvlink_print(NVLINK_DBG_ERRORS, "NVLink: Assertion failed!\n"); + WARN_ON(1); + } + + dbg_breakpoint(); + } +} + +void * nvlink_allocLock() +{ + struct semaphore *sema; + + sema = nvlink_malloc(sizeof(*sema)); + if (sema == NULL) + { + nvlink_print(NVLINK_DBG_ERRORS, "Failed to allocate sema!\n"); + return NULL; + } + sema_init(sema, 1); + + return sema; +} + +void nvlink_acquireLock(void *hLock) +{ + down(hLock); +} + +void nvlink_releaseLock(void *hLock) +{ + up(hLock); +} + +void nvlink_freeLock(void *hLock) +{ + if (NULL == hLock) + { + return; + } + + NVLINK_FREE(hLock); +} + +NvBool nvlink_isLockOwner(void *hLock) +{ + return NV_TRUE; +} + +NvlStatus nvlink_acquire_fabric_mgmt_cap(void *osPrivate, NvU64 capDescriptor) +{ + int dup_fd = -1; + nvlink_file_private_t *private_data = (nvlink_file_private_t *)osPrivate; + + if (private_data == NULL) + { + return NVL_BAD_ARGS; + } + + dup_fd = nvlink_cap_acquire((int)capDescriptor, + NVLINK_CAP_FABRIC_MANAGEMENT); + if (dup_fd < 0) + { + return NVL_ERR_OPERATING_SYSTEM; + } + + private_data->capability_fds.fabric_mgmt = dup_fd; + return NVL_SUCCESS; +} + +int nvlink_is_fabric_manager(void *osPrivate) +{ + nvlink_file_private_t *private_data = (nvlink_file_private_t *)osPrivate; + + /* Make sure that fabric mgmt capbaility fd is valid */ + if ((private_data == NULL) || + (private_data->capability_fds.fabric_mgmt < 0)) + { + return 0; + } + + return 1; +} + +int nvlink_is_admin(void) +{ + return NV_IS_SUSER(); +} diff --git a/kernel-open/nvidia/nvlink_linux.h b/kernel-open/nvidia/nvlink_linux.h new file mode 100644 index 000000000..58a2a2278 --- /dev/null +++ b/kernel-open/nvidia/nvlink_linux.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_LINUX_H_ +#define _NVLINK_LINUX_H_ + +#include // for entry/exit macros +#include // for "struct task_struct" +#include // for printk priority macros +#include + + +#define NVLINK_DEVICE_NAME "nvidia-nvlink" +#define NVLINK_NUM_MINOR_DEVICES 1 + +/* + * @Brief : Debug Breakpoint implementation + * + * @Description : + * + * @returns void + */ +static inline void +dbg_breakpoint(void) +{ + /* OS specific breakpoint implemented for NVLink library */ + #if defined(DEBUG) + #if defined(CONFIG_X86_REMOTE_DEBUG) || defined(CONFIG_KGDB) || defined(CONFIG_XMON) + #if defined(NVCPU_X86) || defined(NVCPU_X86_64) + __asm__ __volatile__ ("int $3"); + #elif defined(NVCPU_ARM) + __asm__ __volatile__ (".word %c0" :: "i" (KGDB_COMPILED_BREAK)); + #elif defined(NVCPU_AARCH64) + # warning "Need to implement dbg_breakpoint() for aarch64" + #elif defined(NVCPU_PPC64LE) + __asm__ __volatile__ ("trap"); + #endif /* NVCPU_X86 || NVCPU_X86_64 */ + #elif defined(CONFIG_KDB) + KDB_ENTER(); + #endif /* CONFIG_X86_REMOTE_DEBUG || CONFIG_KGDB || CONFIG_XMON */ + #endif /* DEBUG */ +} + +#endif //_NVLINK_LINUX_H_ diff --git a/kernel-open/nvidia/nvlink_os.h b/kernel-open/nvidia/nvlink_os.h new file mode 100644 index 000000000..4130bf98d --- /dev/null +++ b/kernel-open/nvidia/nvlink_os.h @@ -0,0 +1,86 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_OS_H_ +#define _NVLINK_OS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvlink_common.h" + +#define NVLINK_FREE(x) nvlink_free((void *)x) + +// Memory management functions +void * nvlink_malloc(NvLength); +void nvlink_free(void *); +void * nvlink_memset(void *, int, NvLength); +void * nvlink_memcpy(void *, const void *, NvLength); +int nvlink_memcmp(const void *, const void *, NvLength); +NvU32 nvlink_memRd32(const volatile void *); +void nvlink_memWr32(volatile void *, NvU32); +NvU64 nvlink_memRd64(const volatile void *); +void nvlink_memWr64(volatile void *, NvU64); + +// String management functions +char * nvlink_strcpy(char *, const char *); +NvLength nvlink_strlen(const char *); +int nvlink_strcmp(const char *, const char *); +int nvlink_snprintf(char *, NvLength, const char *, ...); + +// Locking support functions +void * nvlink_allocLock(void); +void nvlink_acquireLock(void *); +NvBool nvlink_isLockOwner(void *); +void nvlink_releaseLock(void *); +void nvlink_freeLock(void *); + +// Miscellaneous functions +void nvlink_assert(int expression); +void nvlink_sleep(unsigned int ms); +void nvlink_print(const char *, int, const char *, int, const char *, ...); +int nvlink_is_admin(void); + +// Capability functions +NvlStatus nvlink_acquire_fabric_mgmt_cap(void *osPrivate, NvU64 capDescriptor); +int nvlink_is_fabric_manager(void *osPrivate); + +#define NVLINK_DBG_LEVEL_INFO 0x0 +#define NVLINK_DBG_LEVEL_SETUP 0x1 +#define NVLINK_DBG_LEVEL_USERERRORS 0x2 +#define NVLINK_DBG_LEVEL_WARNINGS 0x3 +#define NVLINK_DBG_LEVEL_ERRORS 0x4 + +#define NVLINK_DBG_WHERE __FILE__, __LINE__, __FUNCTION__ +#define NVLINK_DBG_INFO NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_INFO +#define NVLINK_DBG_SETUP NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_SETUP +#define NVLINK_DBG_USERERRORS NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_USERERRORS +#define NVLINK_DBG_WARNINGS NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_WARNINGS +#define NVLINK_DBG_ERRORS NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_ERRORS + +#ifdef __cplusplus +} +#endif + +#endif //_NVLINK_OS_H_ diff --git a/kernel-open/nvidia/nvlink_pci.h b/kernel-open/nvidia/nvlink_pci.h new file mode 100644 index 000000000..53e6a418d --- /dev/null +++ b/kernel-open/nvidia/nvlink_pci.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_PCI_H_ +#define _NVLINK_PCI_H_ + +#include +#include "nvlink_common.h" + +#define NV_PCI_RESOURCE_START(dev, bar) pci_resource_start(dev, (bar)) +#define NV_PCI_RESOURCE_SIZE(dev, bar) pci_resource_len(dev, (bar)) +#define NV_PCI_RESOURCE_FLAGS(dev, bar) pci_resource_flags(dev, (bar)) + +#if defined(NVCPU_X86) +#define NV_PCI_RESOURCE_VALID(dev, bar) \ + ((NV_PCI_RESOURCE_START(dev, bar) != 0) && \ + (NV_PCI_RESOURCE_SIZE(dev, bar) != 0) && \ + (!((NV_PCI_RESOURCE_FLAGS(dev, bar) & PCI_BASE_ADDRESS_MEM_TYPE_64) && \ + ((NV_PCI_RESOURCE_START(dev, bar) >> PAGE_SHIFT) > 0xfffffULL)))) +#else +#define NV_PCI_RESOURCE_VALID(dev, bar) \ + ((NV_PCI_RESOURCE_START(dev, bar) != 0) && \ + (NV_PCI_RESOURCE_SIZE(dev, bar) != 0)) +#endif + +#define NV_PCI_DOMAIN_NUMBER(dev) (NvU32)pci_domain_nr(dev->bus) +#define NV_PCI_BUS_NUMBER(dev) (dev)->bus->number +#define NV_PCI_DEVFN(dev) (dev)->devfn +#define NV_PCI_SLOT_NUMBER(dev) PCI_SLOT(NV_PCI_DEVFN(dev)) + +#define NV_PCI_DEV_FMT NVLINK_PCI_DEV_FMT +#define NV_PCI_DEV_FMT_ARGS(dev) \ + NV_PCI_DOMAIN_NUMBER(dev), NV_PCI_BUS_NUMBER(dev), \ + NV_PCI_SLOT_NUMBER(dev), PCI_FUNC((dev)->devfn) + +#define NVRM_PCICFG_NUM_BARS 6 +#define NVRM_PCICFG_BAR_OFFSET(i) (0x10 + (i) * 4) + +#define NV_PCIE_CFG_MAX_OFFSET 0x1000 + +#endif // _NVLINK_PCI_H_ diff --git a/kernel-open/nvidia/nvlink_proto.h b/kernel-open/nvidia/nvlink_proto.h new file mode 100644 index 000000000..b161ba92b --- /dev/null +++ b/kernel-open/nvidia/nvlink_proto.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_PROTO_H_ +#define _NVLINK_PROTO_H_ + +#include "nvlink_common.h" + +/* + * Functions defined in nvlink_linux.c + */ + +int nvlink_core_init (void); +void nvlink_core_exit (void); + + +/* + * Functions defined in nvswitch_linux.c + */ +int nvswitch_init (void); +void nvswitch_exit (void); + + +#if defined(NVCPU_AARCH64) +/* + * Functions defined in tegrashim_linux.c (Tegra only) + */ +int tegrashim_init (void); +void tegrashim_exit (void); +NvlStatus tegrashim_init_device (struct pci_dev *); +#endif + +#endif /* _NVLINK_PROTO_H_ */ diff --git a/kernel-open/nvidia/os-interface.c b/kernel-open/nvidia/os-interface.c new file mode 100644 index 000000000..f8810c338 --- /dev/null +++ b/kernel-open/nvidia/os-interface.c @@ -0,0 +1,2136 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +#include "nv-time.h" + + + + + + + + + +extern char *NVreg_TemporaryFilePath; + +#define MAX_ERROR_STRING 512 +static char nv_error_string[MAX_ERROR_STRING]; +nv_spinlock_t nv_error_string_lock; + +extern nv_linux_state_t nv_ctl_device; + +extern nv_kthread_q_t nv_kthread_q; + +NvU32 os_page_size = PAGE_SIZE; +NvU64 os_page_mask = NV_PAGE_MASK; +NvU8 os_page_shift = PAGE_SHIFT; +NvU32 os_sev_status = 0; +NvBool os_sev_enabled = 0; + + +#if defined(CONFIG_DMA_SHARED_BUFFER) +NvBool os_dma_buf_enabled = NV_TRUE; +#else +NvBool os_dma_buf_enabled = NV_FALSE; +#endif // CONFIG_DMA_SHARED_BUFFER + + + + +void NV_API_CALL os_disable_console_access(void) +{ + console_lock(); +} + +void NV_API_CALL os_enable_console_access(void) +{ + console_unlock(); +} + +typedef struct semaphore os_mutex_t; + +// +// os_alloc_mutex - Allocate the RM mutex +// +// ppMutex - filled in with pointer to opaque structure to mutex data type +// +NV_STATUS NV_API_CALL os_alloc_mutex +( + void **ppMutex +) +{ + NV_STATUS rmStatus; + os_mutex_t *os_mutex; + + rmStatus = os_alloc_mem(ppMutex, sizeof(os_mutex_t)); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate mutex!\n"); + return rmStatus; + } + os_mutex = (os_mutex_t *)*ppMutex; + NV_INIT_MUTEX(os_mutex); + + return NV_OK; +} + +// +// os_free_mutex - Free resources associated with mutex allocated +// via os_alloc_mutex above. +// +// pMutex - Pointer to opaque structure to mutex data type +// +void NV_API_CALL os_free_mutex +( + void *pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + + if (os_mutex != NULL) + { + os_free_mem(pMutex); + } +} + +// +// pMutex - Pointer to opaque structure to mutex data type +// + +NV_STATUS NV_API_CALL os_acquire_mutex +( + void *pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + down(os_mutex); + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_cond_acquire_mutex +( + void * pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + + if (down_trylock(os_mutex)) + { + return NV_ERR_TIMEOUT_RETRY; + } + + return NV_OK; +} + + +void NV_API_CALL os_release_mutex +( + void *pMutex +) +{ + os_mutex_t *os_mutex = (os_mutex_t *)pMutex; + up(os_mutex); +} + +typedef struct semaphore os_semaphore_t; + + +void* NV_API_CALL os_alloc_semaphore +( + NvU32 initialValue +) +{ + NV_STATUS rmStatus; + os_semaphore_t *os_sema; + + rmStatus = os_alloc_mem((void *)&os_sema, sizeof(os_semaphore_t)); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate semaphore!\n"); + return NULL; + } + + NV_INIT_SEMA(os_sema, initialValue); + + return (void *)os_sema; +} + +void NV_API_CALL os_free_semaphore +( + void *pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + + os_free_mem(os_sema); +} + +NV_STATUS NV_API_CALL os_acquire_semaphore +( + void *pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + + if (!NV_MAY_SLEEP()) + { + return NV_ERR_INVALID_REQUEST; + } + down(os_sema); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_cond_acquire_semaphore +( + void * pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + // + // NOTE: down_trylock() is safe to call from IRQ, se we don't need an + // NV_MAY_SLEEP() check here. We do check it in os_cond_acquire_mutex(), + // even though it is also calling down_trylock(), since that keeps it + // in line with the kernel's 'struct mutex' API. + // + if (down_trylock(os_sema)) + { + return NV_ERR_TIMEOUT_RETRY; + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_release_semaphore +( + void *pSema +) +{ + os_semaphore_t *os_sema = (os_semaphore_t *)pSema; + up(os_sema); + return NV_OK; +} + +NvBool NV_API_CALL os_semaphore_may_sleep(void) +{ + return NV_MAY_SLEEP(); +} + +NvBool NV_API_CALL os_is_isr(void) +{ + return (in_irq()); +} + +// return TRUE if the caller is the super-user +NvBool NV_API_CALL os_is_administrator(void) +{ + return NV_IS_SUSER(); +} + +NvBool NV_API_CALL os_allow_priority_override(void) +{ + return capable(CAP_SYS_NICE); +} + +NvU64 NV_API_CALL os_get_num_phys_pages(void) +{ + return (NvU64)NV_NUM_PHYSPAGES; +} + +char* NV_API_CALL os_string_copy( + char *dst, + const char *src +) +{ + return strcpy(dst, src); +} + +NvU32 NV_API_CALL os_string_length( + const char* str +) +{ + return strlen(str); +} + +NvU32 NV_API_CALL os_strtoul(const char *str, char **endp, NvU32 base) +{ + return (NvU32)simple_strtoul(str, endp, base); +} + +NvS32 NV_API_CALL os_string_compare(const char *str1, const char *str2) +{ + return strcmp(str1, str2); +} + +void *os_mem_copy_custom( + void *dstPtr, + const void *srcPtr, + NvU32 length +) +{ + void *ret = dstPtr; + NvU32 dwords, bytes = length; + NvU8 *dst = dstPtr; + const NvU8 *src = srcPtr; + + if ((length >= 128) && + (((NvUPtr)dst & 3) == 0) & (((NvUPtr)src & 3) == 0)) + { + dwords = (length / sizeof(NvU32)); + bytes = (length % sizeof(NvU32)); + + while (dwords != 0) + { + *(NvU32 *)dst = *(const NvU32 *)src; + dst += sizeof(NvU32); + src += sizeof(NvU32); + dwords--; + } + } + + while (bytes != 0) + { + *dst = *src; + dst++; + src++; + bytes--; + } + + return ret; +} + +void *NV_API_CALL os_mem_copy( + void *dst, + const void *src, + NvU32 length +) +{ +#if defined(NVCPU_AARCH64) + /* + * TODO: Remove once memset/memcpy restructure is complete + * + * When performing memcpy for memory mapped as device, memcpy_[to/from]io + * must be used. WAR to check the source and destination to determine the + * correct memcpy_io to use. + * + * This WAR is limited to just aarch64 for now because the address range used + * to map ioremap and vmalloc is different on ppc64le, and is_vmalloc_addr() + * does not correctly handle this. is_ioremap_addr() is needed instead. This + * will have to be addressed when reorganizing RM to use the new memset model. + */ + if (is_vmalloc_addr(dst) && !is_vmalloc_addr(src)) + { + memcpy_toio(dst, src, length); + return dst; + } + else if (!is_vmalloc_addr(dst) && is_vmalloc_addr(src)) + { + memcpy_fromio(dst, src, length); + return dst; + } + else if (is_vmalloc_addr(dst) && is_vmalloc_addr(src)) + { + return os_mem_copy_custom(dst, src, length); + } + else +#endif + { +#if defined(CONFIG_CC_OPTIMIZE_FOR_SIZE) + /* + * When the kernel is configured with CC_OPTIMIZE_FOR_SIZE=y, Kbuild uses + * -Os universally. With -Os, GCC will aggressively inline builtins, even + * if -fno-builtin is specified, including memcpy with a tiny byte-copy + * loop on x86 (rep movsb). This is horrible for performance - a strict + * dword copy is much faster - so when we detect this case, just provide + * our own implementation. + */ + return os_mem_copy_custom(dst, src, length); +#else + /* + * Generally speaking, the kernel-provided memcpy will be the fastest, + * (optimized much better for the target architecture than the above + * loop), so we want to use that whenever we can get to it. + */ + return memcpy(dst, src, length); +#endif + } +} + +NV_STATUS NV_API_CALL os_memcpy_from_user( + void *to, + const void *from, + NvU32 n +) +{ + return (NV_COPY_FROM_USER(to, from, n) ? NV_ERR_INVALID_ADDRESS : NV_OK); +} + +NV_STATUS NV_API_CALL os_memcpy_to_user( + void *to, + const void *from, + NvU32 n +) +{ + return (NV_COPY_TO_USER(to, from, n) ? NV_ERR_INVALID_ADDRESS : NV_OK); +} + +void* NV_API_CALL os_mem_set( + void *dst, + NvU8 c, + NvU32 length +) +{ +#if defined(NVCPU_AARCH64) + /* + * TODO: Remove once memset/memcpy restructure is complete + * + * WAR to check the destination to determine if the memory is of type Device + * or Normal, and use the correct memset. + * + * This WAR is limited to just aarch64 for now because the address range used + * to map ioremap and vmalloc is different on ppc64le, and is_vmalloc_addr() + * does not correctly handle this. is_ioremap_addr() is needed instead. This + * will have to be addressed when reorganizing RM to use the new memset model. + */ + if (is_vmalloc_addr(dst)) + { + memset_io(dst, (int)c, length); + return dst; + } + else +#endif + return memset(dst, (int)c, length); +} + +NvS32 NV_API_CALL os_mem_cmp( + const NvU8 *buf0, + const NvU8* buf1, + NvU32 length +) +{ + return memcmp(buf0, buf1, length); +} + + +/* + * Operating System Memory Functions + * + * There are 2 interesting aspects of resource manager memory allocations + * that need special consideration on Linux: + * + * 1. They are typically very large, (e.g. single allocations of 164KB) + * + * 2. The resource manager assumes that it can safely allocate memory in + * interrupt handlers. + * + * The first requires that we call vmalloc, the second kmalloc. We decide + * which one to use at run time, based on the size of the request and the + * context. Allocations larger than 128KB require vmalloc, in the context + * of an ISR they fail. + */ + +#if defined(NV_VGX_HYPER) +/* + * Citrix Hypervisor-8.0 Dom0 sysmem ends up getting fragmented because + * of which high-order kmalloc allocations fail. We try to avoid it by + * requesting allocations not larger than 8K. + * + * KVM will be affected low memory pressure situation a lot, + * particularly if hugetlbfs hugepages are being used. Hence, 8K applies + * here too. + */ +#define KMALLOC_LIMIT 8192 +#else +#define KMALLOC_LIMIT 131072 +#endif + +#define VMALLOC_ALLOCATION_SIZE_FLAG (1 << 0) + +NV_STATUS NV_API_CALL os_alloc_mem( + void **address, + NvU64 size +) +{ + unsigned long alloc_size; + + if (address == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *address = NULL; + NV_MEM_TRACKING_PAD_SIZE(size); + + // + // NV_KMALLOC, nv_vmalloc take an input of 4 bytes in x86. To avoid + // truncation and wrong allocation, below check is required. + // + alloc_size = size; + + if (alloc_size != size) + return NV_ERR_INVALID_PARAMETER; + + if (!NV_MAY_SLEEP()) + { + if (alloc_size <= KMALLOC_LIMIT) + NV_KMALLOC_ATOMIC(*address, alloc_size); + } + else + { + if (alloc_size <= KMALLOC_LIMIT) + { + NV_KMALLOC_NO_OOM(*address, alloc_size); + } + if (*address == NULL) + { + *address = nv_vmalloc(alloc_size); + alloc_size |= VMALLOC_ALLOCATION_SIZE_FLAG; + } + } + + NV_MEM_TRACKING_HIDE_SIZE(address, alloc_size); + + return ((*address != NULL) ? NV_OK : NV_ERR_NO_MEMORY); +} + +void NV_API_CALL os_free_mem(void *address) +{ + NvU32 size; + + NV_MEM_TRACKING_RETRIEVE_SIZE(address, size); + + if (size & VMALLOC_ALLOCATION_SIZE_FLAG) + { + size &= ~VMALLOC_ALLOCATION_SIZE_FLAG; + nv_vfree(address, size); + } + else + NV_KFREE(address, size); +} + + +/***************************************************************************** +* +* Name: osGetCurrentTime +* +*****************************************************************************/ + +NV_STATUS NV_API_CALL os_get_current_time( + NvU32 *seconds, + NvU32 *useconds +) +{ + struct timespec64 tm; + + ktime_get_real_ts64(&tm); + + *seconds = tm.tv_sec; + *useconds = tm.tv_nsec / NSEC_PER_USEC; + + return NV_OK; +} + +// +// Get the High resolution tick count of the system uptime +// +NvU64 NV_API_CALL os_get_current_tick_hr(void) +{ + struct timespec64 tm; + ktime_get_raw_ts64(&tm); + return (NvU64) timespec64_to_ns(&tm); +} + +#if BITS_PER_LONG >= 64 + +NvU64 NV_API_CALL os_get_current_tick(void) +{ +#if defined(NV_JIFFIES_TO_TIMESPEC_PRESENT) + struct timespec ts; + jiffies_to_timespec(jiffies, &ts); + return (NvU64) timespec_to_ns(&ts); +#else + struct timespec64 ts; + jiffies_to_timespec64(jiffies, &ts); + return (NvU64) timespec64_to_ns(&ts); +#endif +} + +NvU64 NV_API_CALL os_get_tick_resolution(void) +{ + return (NvU64)jiffies_to_usecs(1) * NSEC_PER_USEC; +} + +#else + +NvU64 NV_API_CALL os_get_current_tick(void) +{ + /* + * 'jiffies' overflows regularly on 32-bit builds (unsigned long is 4 bytes + * instead of 8 bytes), so it's unwise to build a tick counter on it, since + * the rest of the Resman assumes the 'tick' returned from this function is + * monotonically increasing and never overflows. + * + * Instead, use the previous implementation that we've lived with since the + * beginning, which uses system clock time to calculate the tick. This is + * subject to problems if the system clock time changes dramatically + * (more than a second or so) while the Resman is actively tracking a + * timeout. + */ + NvU32 seconds, useconds; + + (void) os_get_current_time(&seconds, &useconds); + + return ((NvU64)seconds * NSEC_PER_SEC + + (NvU64)useconds * NSEC_PER_USEC); +} + +NvU64 NV_API_CALL os_get_tick_resolution(void) +{ + /* + * os_get_current_tick() uses os_get_current_time(), which has + * microsecond resolution. + */ + return 1000ULL; +} + +#endif + +//--------------------------------------------------------------------------- +// +// Misc services. +// +//--------------------------------------------------------------------------- + +NV_STATUS NV_API_CALL os_delay_us(NvU32 MicroSeconds) +{ + return nv_sleep_us(MicroSeconds); +} + +NV_STATUS NV_API_CALL os_delay(NvU32 MilliSeconds) +{ + return nv_sleep_ms(MilliSeconds); +} + +NvU64 NV_API_CALL os_get_cpu_frequency(void) +{ + NvU64 cpu_hz = 0; +#if defined(CONFIG_CPU_FREQ) + cpu_hz = (cpufreq_get(0) * 1000); +#elif defined(NVCPU_X86_64) + NvU64 tsc[2]; + + tsc[0] = nv_rdtsc(); + mdelay(250); + tsc[1] = nv_rdtsc(); + + cpu_hz = ((tsc[1] - tsc[0]) * 4); +#endif + return cpu_hz; +} + +NvU32 NV_API_CALL os_get_current_process(void) +{ + return NV_GET_CURRENT_PROCESS(); +} + +void NV_API_CALL os_get_current_process_name(char *buf, NvU32 len) +{ + task_lock(current); + strncpy(buf, current->comm, len - 1); + buf[len - 1] = '\0'; + task_unlock(current); +} + +NV_STATUS NV_API_CALL os_get_current_thread(NvU64 *threadId) +{ + if (in_interrupt()) + *threadId = 0; + else + *threadId = (NvU64) current->pid; + + return NV_OK; +} + +/*******************************************************************************/ +/* */ +/* Debug and logging utilities follow */ +/* */ +/*******************************************************************************/ + +// The current debug display level (default to maximum debug level) +NvU32 cur_debuglevel = 0xffffffff; + +/* + * The binary core of RM (nv-kernel.o) calls both out_string, and nv_printf. + */ +inline void NV_API_CALL out_string(const char *str) +{ + printk("%s", str); +} + +/* + * nv_printf() prints to the kernel log for the driver. + * Returns the number of characters written. + */ +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...) +{ + va_list arglist; + int chars_written = 0; + + if (debuglevel >= ((cur_debuglevel >> 4) & 0x3)) + { + size_t length; + char *temp; + + // When printk is called to extend the output of the previous line + // (i.e. when the previous line did not end in \n), the printk call + // must contain KERN_CONT. Older kernels still print the line + // correctly, but KERN_CONT was technically always required. + + // This means that every call to printk() needs to have a KERN_xxx + // prefix. The only way to get this is to rebuild the format string + // into a new buffer, with a KERN_xxx prefix prepended. + + // Unfortunately, we can't guarantee that two calls to nv_printf() + // won't be interrupted by a printk from another driver. So to be + // safe, we always append KERN_CONT. It's still technically wrong, + // but it works. + + // The long-term fix is to modify all NV_PRINTF-ish calls so that the + // string always contains only one \n (at the end) and NV_PRINTF_EX + // is deleted. But that is unlikely to ever happen. + + length = strlen(printf_format); + if (length < 1) + return 0; + + temp = kmalloc(length + sizeof(KERN_CONT), GFP_ATOMIC); + if (!temp) + return 0; + + // KERN_CONT changed in the 3.6 kernel, so we can't assume its + // composition or size. + memcpy(temp, KERN_CONT, sizeof(KERN_CONT) - 1); + memcpy(temp + sizeof(KERN_CONT) - 1, printf_format, length + 1); + + va_start(arglist, printf_format); + chars_written = vprintk(temp, arglist); + va_end(arglist); + + kfree(temp); + } + + return chars_written; +} + +NvS32 NV_API_CALL os_snprintf(char *buf, NvU32 size, const char *fmt, ...) +{ + va_list arglist; + int chars_written; + + va_start(arglist, fmt); + chars_written = vsnprintf(buf, size, fmt, arglist); + va_end(arglist); + + return chars_written; +} + +NvS32 NV_API_CALL os_vsnprintf(char *buf, NvU32 size, const char *fmt, va_list arglist) +{ + return vsnprintf(buf, size, fmt, arglist); +} + +void NV_API_CALL os_log_error(const char *fmt, va_list ap) +{ + unsigned long flags; + + NV_SPIN_LOCK_IRQSAVE(&nv_error_string_lock, flags); + + vsnprintf(nv_error_string, MAX_ERROR_STRING, fmt, ap); + nv_error_string[MAX_ERROR_STRING - 1] = 0; + printk(KERN_ERR "%s", nv_error_string); + + NV_SPIN_UNLOCK_IRQRESTORE(&nv_error_string_lock, flags); +} + +void NV_API_CALL os_io_write_byte( + NvU32 address, + NvU8 value +) +{ + outb(value, address); +} + +void NV_API_CALL os_io_write_word( + NvU32 address, + NvU16 value +) +{ + outw(value, address); +} + +void NV_API_CALL os_io_write_dword( + NvU32 address, + NvU32 value +) +{ + outl(value, address); +} + +NvU8 NV_API_CALL os_io_read_byte( + NvU32 address +) +{ + return inb(address); +} + +NvU16 NV_API_CALL os_io_read_word( + NvU32 address +) +{ + return inw(address); +} + +NvU32 NV_API_CALL os_io_read_dword( + NvU32 address +) +{ + return inl(address); +} + + +static NvBool NV_API_CALL xen_support_fully_virtualized_kernel(void) +{ +#if defined(NV_XEN_SUPPORT_FULLY_VIRTUALIZED_KERNEL) + return (os_is_vgx_hyper()); +#endif + return NV_FALSE; +} + +void* NV_API_CALL os_map_kernel_space( + NvU64 start, + NvU64 size_bytes, + NvU32 mode +) +{ + void *vaddr; + + if (!xen_support_fully_virtualized_kernel() && start == 0) + { + if (mode != NV_MEMORY_CACHED) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: os_map_kernel_space: won't map address 0x%0llx UC!\n", start); + return NULL; + } + else + return (void *)PAGE_OFFSET; + } + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: os_map_kernel_space: can't map 0x%0llx, invalid context!\n", start); + os_dbg_breakpoint(); + return NULL; + } + + switch (mode) + { + case NV_MEMORY_CACHED: + vaddr = nv_ioremap_cache(start, size_bytes); + break; + case NV_MEMORY_WRITECOMBINED: + vaddr = rm_disable_iomap_wc() ? + nv_ioremap_nocache(start, size_bytes) : + nv_ioremap_wc(start, size_bytes); + break; + case NV_MEMORY_UNCACHED: + case NV_MEMORY_DEFAULT: + vaddr = nv_ioremap_nocache(start, size_bytes); + break; + default: + nv_printf(NV_DBG_ERRORS, + "NVRM: os_map_kernel_space: unsupported mode!\n"); + return NULL; + } + + return vaddr; +} + +void NV_API_CALL os_unmap_kernel_space( + void *addr, + NvU64 size_bytes +) +{ + if (addr == (void *)PAGE_OFFSET) + return; + + nv_iounmap(addr, size_bytes); +} + +// flush the cpu's cache, uni-processor version +NV_STATUS NV_API_CALL os_flush_cpu_cache(void) +{ + CACHE_FLUSH(); + return NV_OK; +} + +// flush the cache of all cpus +NV_STATUS NV_API_CALL os_flush_cpu_cache_all(void) +{ +#if defined(NVCPU_AARCH64) + CACHE_FLUSH_ALL(); + return NV_OK; +#endif + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL os_flush_user_cache(void) +{ +#if defined(NVCPU_AARCH64) + if (!NV_MAY_SLEEP()) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // The Linux kernel does not export an interface for flushing a range, + // although it is possible. For now, just flush the entire cache to be + // safe. + // + CACHE_FLUSH_ALL(); + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL os_flush_cpu_write_combine_buffer(void) +{ + WRITE_COMBINE_FLUSH(); +} + +// override initial debug level from registry +void NV_API_CALL os_dbg_init(void) +{ + NvU32 new_debuglevel; + nvidia_stack_t *sp = NULL; + + NV_SPIN_LOCK_INIT(&nv_error_string_lock); + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return; + } + + if (NV_OK == rm_read_registry_dword(sp, NULL, + "ResmanDebugLevel", + &new_debuglevel)) + { + if (new_debuglevel != (NvU32)~0) + cur_debuglevel = new_debuglevel; + } + + nv_kmem_cache_free_stack(sp); +} + +void NV_API_CALL os_dbg_set_level(NvU32 new_debuglevel) +{ + nv_printf(NV_DBG_SETUP, "NVRM: Changing debuglevel from 0x%x to 0x%x\n", + cur_debuglevel, new_debuglevel); + cur_debuglevel = new_debuglevel; +} + +NV_STATUS NV_API_CALL os_schedule(void) +{ + if (NV_MAY_SLEEP()) + { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(1); + return NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: os_schedule: Attempted to yield" + " the CPU while in atomic or interrupt" + " context\n"); + return NV_ERR_ILLEGAL_ACTION; + } +} + +typedef struct { + nv_kthread_q_item_t item; + void *data; +} os_queue_data_t; + +static void os_execute_work_item(void *_oqd) +{ + os_queue_data_t *oqd = _oqd; + nvidia_stack_t *sp = NULL; + void *data = oqd->data; + + NV_KFREE(oqd, sizeof(os_queue_data_t)); + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return; + } + + rm_execute_work_item(sp, data); + + nv_kmem_cache_free_stack(sp); +} + +NV_STATUS NV_API_CALL os_queue_work_item(struct os_work_queue *queue, void *data) +{ + os_queue_data_t *oqd; + nv_kthread_q_t *kthread; + + /* Use the global queue unless a valid queue was provided */ + kthread = queue ? &queue->nvk : &nv_kthread_q; + + /* Make sure the kthread is active */ + if (unlikely(!kthread->q_kthread)) { + nv_printf(NV_DBG_ERRORS, "NVRM: queue is not enabled\n"); + return NV_ERR_NOT_READY; + } + + /* Allocate atomically just in case we're called in atomic context. */ + NV_KMALLOC_ATOMIC(oqd, sizeof(os_queue_data_t)); + if (!oqd) + return NV_ERR_NO_MEMORY; + + nv_kthread_q_item_init(&oqd->item, os_execute_work_item, oqd); + oqd->data = data; + + nv_kthread_q_schedule_q_item(kthread, &oqd->item); + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_flush_work_queue(struct os_work_queue *queue) +{ + nv_kthread_q_t *kthread; + + /* Use the global queue unless a valid queue was provided */ + kthread = queue ? &queue->nvk : &nv_kthread_q; + + if (NV_MAY_SLEEP()) + { + if (kthread->q_kthread) + nv_kthread_q_flush(kthread); + + return NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, + "NVRM: os_flush_work_queue: attempted to execute passive" + "work from an atomic or interrupt context.\n"); + return NV_ERR_ILLEGAL_ACTION; + } +} + +extern NvU32 NVreg_EnableDbgBreakpoint; + +void NV_API_CALL os_dbg_breakpoint(void) +{ + if (NVreg_EnableDbgBreakpoint == 0) + { + return; + } + +#if defined(CONFIG_X86_REMOTE_DEBUG) || defined(CONFIG_KGDB) || defined(CONFIG_XMON) + #if defined(NVCPU_X86_64) + __asm__ __volatile__ ("int $3"); + #elif defined(NVCPU_ARM) + __asm__ __volatile__ (".word %c0" :: "i" (KGDB_COMPILED_BREAK)); + #elif defined(NVCPU_AARCH64) + # warning "Need to implement os_dbg_breakpoint() for aarch64" + #elif defined(NVCPU_PPC64LE) + __asm__ __volatile__ ("trap"); + #endif // NVCPU_* +#elif defined(CONFIG_KDB) + KDB_ENTER(); +#endif // CONFIG_X86_REMOTE_DEBUG || CONFIG_KGDB || CONFIG_XMON +} + +NvU32 NV_API_CALL os_get_cpu_number() +{ + NvU32 cpu_id = get_cpu(); + put_cpu(); + return cpu_id; +} + +NvU32 NV_API_CALL os_get_cpu_count() +{ + return NV_NUM_CPUS(); +} + +NvBool NV_API_CALL os_pat_supported(void) +{ + return (nv_pat_mode != NV_PAT_MODE_DISABLED); +} + +NvBool NV_API_CALL os_is_efi_enabled(void) +{ + return NV_EFI_ENABLED(); +} + +void NV_API_CALL os_get_screen_info( + NvU64 *pPhysicalAddress, + NvU16 *pFbWidth, + NvU16 *pFbHeight, + NvU16 *pFbDepth, + NvU16 *pFbPitch, + NvU64 consoleBar1Address, + NvU64 consoleBar2Address +) +{ +#if defined(CONFIG_FB) + int i; + *pPhysicalAddress = 0; + *pFbWidth = *pFbHeight = *pFbDepth = *pFbPitch = 0; + + for (i = 0; i < num_registered_fb; i++) + { + if (!registered_fb[i]) + continue; + + /* Make sure base address is mapped to GPU BAR */ + if ((registered_fb[i]->fix.smem_start == consoleBar1Address) || + (registered_fb[i]->fix.smem_start == consoleBar2Address)) + { + *pPhysicalAddress = registered_fb[i]->fix.smem_start; + *pFbWidth = registered_fb[i]->var.xres; + *pFbHeight = registered_fb[i]->var.yres; + *pFbDepth = registered_fb[i]->var.bits_per_pixel; + *pFbPitch = registered_fb[i]->fix.line_length; + break; + } + } +#else + *pPhysicalAddress = 0; + *pFbWidth = *pFbHeight = *pFbDepth = *pFbPitch = 0; +#endif +} + +void NV_API_CALL os_dump_stack() +{ + dump_stack(); +} + +typedef struct os_spinlock_s +{ + nv_spinlock_t lock; + unsigned long eflags; +} os_spinlock_t; + +NV_STATUS NV_API_CALL os_alloc_spinlock(void **ppSpinlock) +{ + NV_STATUS rmStatus; + os_spinlock_t *os_spinlock; + + rmStatus = os_alloc_mem(ppSpinlock, sizeof(os_spinlock_t)); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate spinlock!\n"); + return rmStatus; + } + + os_spinlock = (os_spinlock_t *)*ppSpinlock; + NV_SPIN_LOCK_INIT(&os_spinlock->lock); + os_spinlock->eflags = 0; + return NV_OK; +} + +void NV_API_CALL os_free_spinlock(void *pSpinlock) +{ + os_free_mem(pSpinlock); +} + +NvU64 NV_API_CALL os_acquire_spinlock(void *pSpinlock) +{ + os_spinlock_t *os_spinlock = (os_spinlock_t *)pSpinlock; + unsigned long eflags; + + NV_SPIN_LOCK_IRQSAVE(&os_spinlock->lock, eflags); + os_spinlock->eflags = eflags; + +#if defined(NVCPU_X86_64) + eflags &= X86_EFLAGS_IF; +#elif defined(NVCPU_AARCH64) + eflags &= PSR_I_BIT; +#endif + return eflags; +} + +void NV_API_CALL os_release_spinlock(void *pSpinlock, NvU64 oldIrql) +{ + os_spinlock_t *os_spinlock = (os_spinlock_t *)pSpinlock; + unsigned long eflags; + + eflags = os_spinlock->eflags; + os_spinlock->eflags = 0; + NV_SPIN_UNLOCK_IRQRESTORE(&os_spinlock->lock, eflags); +} + +#define NV_KERNEL_RELEASE ((LINUX_VERSION_CODE >> 16) & 0x0ff) +#define NV_KERNEL_VERSION ((LINUX_VERSION_CODE >> 8) & 0x0ff) +#define NV_KERNEL_SUBVERSION ((LINUX_VERSION_CODE) & 0x0ff) + +NV_STATUS NV_API_CALL os_get_version_info(os_version_info * pOsVersionInfo) +{ + NV_STATUS status = NV_OK; + + pOsVersionInfo->os_major_version = NV_KERNEL_RELEASE; + pOsVersionInfo->os_minor_version = NV_KERNEL_VERSION; + pOsVersionInfo->os_build_number = NV_KERNEL_SUBVERSION; + +#if defined(UTS_RELEASE) + pOsVersionInfo->os_build_version_str = UTS_RELEASE; +#endif + +#if defined(UTS_VERSION) + pOsVersionInfo->os_build_date_plus_str = UTS_VERSION; +#endif + + return status; +} + +NvBool NV_API_CALL os_is_xen_dom0(void) +{ +#if defined(NV_DOM0_KERNEL_PRESENT) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +NvBool NV_API_CALL os_is_vgx_hyper(void) +{ +#if defined(NV_VGX_HYPER) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +NV_STATUS NV_API_CALL os_inject_vgx_msi(NvU16 guestID, NvU64 msiAddr, NvU32 msiData) +{ +#if defined(NV_VGX_HYPER) && defined(NV_DOM0_KERNEL_PRESENT) && \ + defined(NV_XEN_IOEMU_INJECT_MSI) + int rc = 0; + rc = xen_ioemu_inject_msi(guestID, msiAddr, msiData); + if (rc) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s: can't inject MSI to guest:%d, addr:0x%x, data:0x%x, err:%d\n", + __FUNCTION__, guestID, msiAddr, msiData, rc); + return NV_ERR_OPERATING_SYSTEM; + } + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NvBool NV_API_CALL os_is_grid_supported(void) +{ +#if defined(NV_GRID_BUILD) + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +NvU32 NV_API_CALL os_get_grid_csp_support(void) +{ +#if defined(NV_GRID_BUILD_CSP) + return NV_GRID_BUILD_CSP; +#else + return 0; +#endif +} + +void NV_API_CALL os_bug_check(NvU32 bugCode, const char *bugCodeStr) +{ + panic(bugCodeStr); +} + +NV_STATUS NV_API_CALL os_get_euid(NvU32 *pSecToken) +{ + *pSecToken = NV_CURRENT_EUID(); + return NV_OK; +} + +// These functions are needed only on x86_64 platforms. +#if defined(NVCPU_X86_64) + +static NvBool os_verify_checksum(const NvU8 *pMappedAddr, NvU32 length) +{ + NvU8 sum = 0; + NvU32 iter = 0; + + for (iter = 0; iter < length; iter++) + sum += pMappedAddr[iter]; + + return sum == 0; +} + +#define _VERIFY_SMBIOS3(_pMappedAddr) \ + _pMappedAddr && \ + (os_mem_cmp(_pMappedAddr, "_SM3_", 5) == 0 && \ + _pMappedAddr[6] < 32 && \ + _pMappedAddr[6] > 0 && \ + os_verify_checksum(_pMappedAddr, _pMappedAddr[6])) + +#define OS_VERIFY_SMBIOS3(pMappedAddr) _VERIFY_SMBIOS3((pMappedAddr)) + +#define _VERIFY_SMBIOS(_pMappedAddr) \ + _pMappedAddr && \ + (os_mem_cmp(_pMappedAddr, "_SM_", 4) == 0 && \ + _pMappedAddr[5] < 32 && \ + _pMappedAddr[5] > 0 && \ + os_verify_checksum(_pMappedAddr, _pMappedAddr[5]) && \ + os_mem_cmp((_pMappedAddr + 16), "_DMI_", 5) == 0 && \ + os_verify_checksum((_pMappedAddr + 16), 15)) + +#define OS_VERIFY_SMBIOS(pMappedAddr) _VERIFY_SMBIOS((pMappedAddr)) + +#define SMBIOS_LEGACY_BASE 0xF0000 +#define SMBIOS_LEGACY_SIZE 0x10000 + +static NV_STATUS os_get_smbios_header_legacy(NvU64 *pSmbsAddr) +{ + NV_STATUS status = NV_ERR_OPERATING_SYSTEM; + NvU8 *pMappedAddr = NULL; + NvU8 *pIterAddr = NULL; + + pMappedAddr = (NvU8*)os_map_kernel_space(SMBIOS_LEGACY_BASE, + SMBIOS_LEGACY_SIZE, + NV_MEMORY_CACHED); + if (pMappedAddr == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pIterAddr = pMappedAddr; + + for (; pIterAddr < (pMappedAddr + SMBIOS_LEGACY_SIZE); pIterAddr += 16) + { + if (OS_VERIFY_SMBIOS3(pIterAddr)) + { + *pSmbsAddr = SMBIOS_LEGACY_BASE + (pIterAddr - pMappedAddr); + status = NV_OK; + break; + } + + if (OS_VERIFY_SMBIOS(pIterAddr)) + { + *pSmbsAddr = SMBIOS_LEGACY_BASE + (pIterAddr - pMappedAddr); + status = NV_OK; + break; + } + } + + os_unmap_kernel_space(pMappedAddr, SMBIOS_LEGACY_SIZE); + + return status; +} + +// This function is needed only if "efi" is enabled. +#if (defined(NV_LINUX_EFI_H_PRESENT) && defined(CONFIG_EFI)) +static NV_STATUS os_verify_smbios_header_uefi(NvU64 smbsAddr) +{ + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + NvU64 start= 0, offset =0 , size = 32; + NvU8 *pMappedAddr = NULL, *pBufAddr = NULL; + + start = smbsAddr; + offset = (start & ~os_page_mask); + start &= os_page_mask; + size = ((size + offset + ~os_page_mask) & os_page_mask); + + pBufAddr = (NvU8*)os_map_kernel_space(start, + size, + NV_MEMORY_CACHED); + if (pBufAddr == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pMappedAddr = pBufAddr + offset; + + if (OS_VERIFY_SMBIOS3(pMappedAddr)) + { + status = NV_OK; + goto done; + } + + if (OS_VERIFY_SMBIOS(pMappedAddr)) + { + status = NV_OK; + } + +done: + os_unmap_kernel_space(pBufAddr, size); + return status; +} +#endif + +static NV_STATUS os_get_smbios_header_uefi(NvU64 *pSmbsAddr) +{ + NV_STATUS status = NV_ERR_OPERATING_SYSTEM; + +// Make sure that efi.h is present before using "struct efi". +#if (defined(NV_LINUX_EFI_H_PRESENT) && defined(CONFIG_EFI)) + +// Make sure that efi.h has SMBIOS3_TABLE_GUID present. +#if defined(SMBIOS3_TABLE_GUID) + if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) + { + status = os_verify_smbios_header_uefi(efi.smbios3); + if (status == NV_OK) + { + *pSmbsAddr = efi.smbios3; + return NV_OK; + } + } +#endif + + if (efi.smbios != EFI_INVALID_TABLE_ADDR) + { + status = os_verify_smbios_header_uefi(efi.smbios); + if (status == NV_OK) + { + *pSmbsAddr = efi.smbios; + return NV_OK; + } + } +#endif + + return status; +} + +#endif // defined(NVCPU_X86_64) + +// The function locates the SMBIOS entry point. +NV_STATUS NV_API_CALL os_get_smbios_header(NvU64 *pSmbsAddr) +{ + +#if !defined(NVCPU_X86_64) + return NV_ERR_NOT_SUPPORTED; +#else + NV_STATUS status = NV_OK; + + if (os_is_efi_enabled()) + { + status = os_get_smbios_header_uefi(pSmbsAddr); + } + else + { + status = os_get_smbios_header_legacy(pSmbsAddr); + } + + return status; +#endif +} + +NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi +( + NvU32 *pRsdpAddr +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + if (pRsdpAddr == NULL) + { + return NV_ERR_INVALID_STATE; + } + + *pRsdpAddr = 0; + +// Make sure that efi.h is present before using "struct efi". +#if (defined(NV_LINUX_EFI_H_PRESENT) && defined(CONFIG_EFI)) + + if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) + { + *pRsdpAddr = efi.acpi20; + status = NV_OK; + } + else if (efi.acpi != EFI_INVALID_TABLE_ADDR) + { + *pRsdpAddr = efi.acpi; + status = NV_OK; + } + else + { + nv_printf(NV_DBG_ERRORS, "NVRM: RSDP Not found!\n"); + status = NV_ERR_OPERATING_SYSTEM; + } +#endif + + return status; +} + +void NV_API_CALL os_add_record_for_crashLog(void *pbuffer, NvU32 size) +{ +} + +void NV_API_CALL os_delete_record_for_crashLog(void *pbuffer) +{ +} + +#if !defined(NV_VGPU_KVM_BUILD) +NV_STATUS NV_API_CALL os_call_vgpu_vfio(void *pvgpu_vfio_info, NvU32 cmd_type) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + +NV_STATUS NV_API_CALL os_alloc_pages_node +( + NvS32 nid, + NvU32 size, + NvU32 flag, + NvU64 *pAddress +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + +#if defined(__GFP_THISNODE) && defined(GFP_HIGHUSER_MOVABLE) && \ + defined(__GFP_COMP) && defined(__GFP_NORETRY) && defined(__GFP_NOWARN) + gfp_t gfp_mask; + struct page *alloc_addr; + unsigned int order = get_order(size); + + /* + * Explanation of flags used: + * + * 1. __GFP_THISNODE: This will make sure the allocation happens + * on the node specified by nid. + * + * 2. GFP_HIGHUSER_MOVABLE: This makes allocations from ZONE_MOVABLE. + * + * 3. __GFP_COMP: This will make allocations with compound + * pages, which is needed in order to use + * vm_insert_page API. + * + * 4. __GFP_NORETRY: Used to avoid the Linux kernel OOM killer. + * + * 5. __GFP_NOWARN: Used to avoid a WARN_ON in the slowpath if + * the requested order is too large (just fail + * instead). + * + * 6. (Optional) __GFP_RECLAIM: Used to allow/forbid reclaim. + * This is part of GFP_USER and consequently + * GFP_HIGHUSER_MOVABLE. + * + * Some of these flags are relatively more recent, with the last of them + * (GFP_HIGHUSER_MOVABLE) having been added with this Linux kernel commit: + * + * 2007-07-17 769848c03895b63e5662eb7e4ec8c4866f7d0183 + * + * Assume that this feature will only be used on kernels that support all + * of the needed GFP flags. + */ + + gfp_mask = __GFP_THISNODE | GFP_HIGHUSER_MOVABLE | __GFP_COMP | + __GFP_NORETRY | __GFP_NOWARN; + +#if defined(__GFP_RECLAIM) + if (flag & NV_ALLOC_PAGES_NODE_SKIP_RECLAIM) + { + gfp_mask &= ~(__GFP_RECLAIM); + } +#endif // defined(__GFP_RECLAIM) + + alloc_addr = alloc_pages_node(nid, gfp_mask, order); + if (alloc_addr == NULL) + { + nv_printf(NV_DBG_INFO, + "NVRM: alloc_pages_node(node = %d, order = %u) failed\n", + nid, order); + status = NV_ERR_NO_MEMORY; + } + else if (page_to_nid(alloc_addr) != nid) + { + // + // We can hit this case when a Linux kernel bug is not patched. + // The needed patch is https://patchwork.kernel.org/patch/10427387/ + // + nv_printf(NV_DBG_ERRORS, + "NVRM: alloc_pages_node(node = %d, order = %u) wrong node ID.\n", + nid, order); + __free_pages(alloc_addr, order); + status = NV_ERR_NO_MEMORY; + } + else + { + *pAddress = (NvU64)page_to_phys(alloc_addr); + status = NV_OK; + } +#endif // GFP flags + + return status; +} + +NV_STATUS NV_API_CALL os_get_page +( + NvU64 address +) +{ + get_page(NV_GET_PAGE_STRUCT(address)); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_put_page +( + NvU64 address +) +{ + put_page(NV_GET_PAGE_STRUCT(address)); + return NV_OK; +} + +NvU32 NV_API_CALL os_get_page_refcount +( + NvU64 address +) +{ + return NV_PAGE_COUNT(NV_GET_PAGE_STRUCT(address)); +} + +NvU32 NV_API_CALL os_count_tail_pages +( + NvU64 address +) +{ + NvU32 order = compound_order(compound_head(NV_GET_PAGE_STRUCT(address))); + + return 1 << order; +} + +void NV_API_CALL os_free_pages_phys +( + NvU64 address, + NvU32 size +) +{ + __free_pages(NV_GET_PAGE_STRUCT(address), get_order(size)); +} + +NV_STATUS NV_API_CALL os_numa_memblock_size +( + NvU64 *memblock_size +) +{ + if (nv_ctl_device.numa_memblock_size == 0) + return NV_ERR_INVALID_STATE; + *memblock_size = nv_ctl_device.numa_memblock_size; + return NV_OK; +} + +NV_STATUS NV_API_CALL os_call_nv_vmbus(NvU32 vmbus_cmd, void *input) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL os_open_temporary_file +( + void **ppFile +) +{ +#if defined(O_TMPFILE) + struct file *file; + const char *default_path = "/tmp"; + const int flags = O_TMPFILE | O_LARGEFILE | O_RDWR; + const char *path = NVreg_TemporaryFilePath; + + /* + * The filp_open() call below depends on the current task's fs_struct + * (current->fs), which may already be NULL if this is called during + * process teardown. + */ + if (current->fs == NULL) + { + return NV_ERR_OPERATING_SYSTEM; + } + + if (!path) + { + path = default_path; + } + + file = filp_open(path, flags, 0); + if (IS_ERR(file)) + { + if ((path != default_path) && (PTR_ERR(file) == -ENOENT)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The temporary file path specified via the NVreg_TemporaryFilePath\n" + "NVRM: module parameter does not exist. Defaulting to /tmp.\n"); + + file = filp_open(default_path, flags, 0); + } + } + + if (IS_ERR(file)) + { + return NV_ERR_OPERATING_SYSTEM; + } + + *ppFile = (void *)file; + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void NV_API_CALL os_close_file +( + void *pFile +) +{ + filp_close(pFile, NULL); +} + +#define NV_MAX_NUM_FILE_IO_RETRIES 10 + +NV_STATUS NV_API_CALL os_write_file +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ +#if defined(NV_KERNEL_WRITE_PRESENT) + loff_t f_pos = offset; + ssize_t num_written; + int num_retries = NV_MAX_NUM_FILE_IO_RETRIES; + +retry: +#if defined(NV_KERNEL_WRITE_HAS_POINTER_POS_ARG) + num_written = kernel_write(pFile, pBuffer, size, &f_pos); +#else + num_written = kernel_write(pFile, pBuffer, size, f_pos); +#endif + if (num_written < 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + else if (num_written < size) + { + if (num_written > 0) + { + pBuffer += num_written; + size -= num_written; + } + if (--num_retries > 0) + { + cond_resched(); + goto retry; + } + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS NV_API_CALL os_read_file +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ + loff_t f_pos = offset; + ssize_t num_read; + int num_retries = NV_MAX_NUM_FILE_IO_RETRIES; + +retry: +#if defined(NV_KERNEL_READ_HAS_POINTER_POS_ARG) + num_read = kernel_read(pFile, pBuffer, size, &f_pos); +#else + num_read = kernel_read(pFile, f_pos, pBuffer, size); +#endif + if (num_read < 0) + { + return NV_ERR_OPERATING_SYSTEM; + } + else if (num_read < size) + { + if (num_read > 0) + { + pBuffer += num_read; + size -= num_read; + } + if (--num_retries > 0) + { + cond_resched(); + goto retry; + } + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_open_readonly_file +( + const char *filename, + void **ppFile +) +{ + struct file *file; + + /* + * The filp_open() call below depends on the current task's fs_struct + * (current->fs), which may already be NULL if this is called during + * process teardown. + */ + if (current->fs == NULL) + { + return NV_ERR_OPERATING_SYSTEM; + } + + file = filp_open(filename, O_RDONLY, 0); + if (IS_ERR(file)) + { + return NV_ERR_OPERATING_SYSTEM; + } + + *ppFile = (void *)file; + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_open_and_read_file +( + const char *filename, + NvU8 *buf, + NvU64 count +) +{ + void *fileHandle; + NV_STATUS status; + + status = os_open_readonly_file(filename, &fileHandle); + if (status != NV_OK) + { + return status; + } + + status = os_read_file(fileHandle, buf, count, 0); + + os_close_file(fileHandle); + + return status; +} + +NvBool NV_API_CALL os_is_nvswitch_present(void) +{ + struct pci_device_id nvswitch_pci_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), + .class = PCI_CLASS_BRIDGE_OTHER << 8, + .class_mask = PCI_ANY_ID + }, + {0} + }; + + return !!pci_dev_present(nvswitch_pci_table); +} + +void NV_API_CALL os_get_random_bytes +( + NvU8 *bytes, + NvU16 numBytes +) +{ + get_random_bytes(bytes, numBytes); +} + +NV_STATUS NV_API_CALL os_alloc_wait_queue +( + os_wait_queue **wq +) +{ + NV_KMALLOC(*wq, sizeof(os_wait_queue)); + if (*wq == NULL) + return NV_ERR_NO_MEMORY; + + init_completion(&(*wq)->q); + + return NV_OK; +} + +void NV_API_CALL os_free_wait_queue +( + os_wait_queue *wq +) +{ + NV_KFREE(wq, sizeof(os_wait_queue)); +} + +void NV_API_CALL os_wait_uninterruptible +( + os_wait_queue *wq +) +{ + wait_for_completion(&wq->q); +} + +void NV_API_CALL os_wait_interruptible +( + os_wait_queue *wq +) +{ + wait_for_completion_interruptible(&wq->q); +} + +void NV_API_CALL os_wake_up +( + os_wait_queue *wq +) +{ + complete_all(&wq->q); +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +nv_cap_t* NV_API_CALL os_nv_cap_init +( + const char *path +) +{ + return nv_cap_init(path); +} + +nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry +( + nv_cap_t *parent_cap, + const char *name, + int mode +) +{ + return nv_cap_create_dir_entry(parent_cap, name, mode); +} + +nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry +( + nv_cap_t *parent_cap, + const char *name, + int mode +) +{ + return nv_cap_create_file_entry(parent_cap, name, mode); +} + +void NV_API_CALL os_nv_cap_destroy_entry +( + nv_cap_t *cap +) +{ + nv_cap_destroy_entry(cap); +} + +int NV_API_CALL os_nv_cap_validate_and_dup_fd +( + const nv_cap_t *cap, + int fd +) +{ + return nv_cap_validate_and_dup_fd(cap, fd); +} + +void NV_API_CALL os_nv_cap_close_fd +( + int fd +) +{ + nv_cap_close_fd(fd); +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/kernel-open/nvidia/os-mlock.c b/kernel-open/nvidia/os-mlock.c new file mode 100644 index 000000000..2369baebf --- /dev/null +++ b/kernel-open/nvidia/os-mlock.c @@ -0,0 +1,287 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +static inline int nv_follow_pfn(struct vm_area_struct *vma, + unsigned long address, + unsigned long *pfn) +{ +#if defined(NV_UNSAFE_FOLLOW_PFN_PRESENT) + return unsafe_follow_pfn(vma, address, pfn); +#else + return follow_pfn(vma, address, pfn); +#endif +} + +/*! + * @brief Locates the PFNs for a user IO address range, and converts those to + * their associated PTEs. + * + * @param[in] vma VMA that contains the virtual address range given by the + * start and page count parameters. + * @param[in] start Beginning of the virtual address range of the IO PTEs. + * @param[in] page_count Number of pages containing the IO range being + * mapped. + * @param[in,out] pte_array Storage array for PTE addresses. Must be large + * enough to contain at least page_count pointers. + * + * @return NV_OK if the PTEs were identified successfully, error otherwise. + */ +static NV_STATUS get_io_ptes(struct vm_area_struct *vma, + NvUPtr start, + NvU64 page_count, + NvU64 **pte_array) +{ + NvU64 i; + unsigned long pfn; + + for (i = 0; i < page_count; i++) + { + if (nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0) + { + return NV_ERR_INVALID_ADDRESS; + } + + pte_array[i] = (NvU64 *)(pfn << PAGE_SHIFT); + + if (i == 0) + continue; + + // + // This interface is to be used for contiguous, uncacheable I/O regions. + // Internally, osCreateOsDescriptorFromIoMemory() checks the user-provided + // flags against this, and creates a single memory descriptor with the same + // attributes. This check ensures the actual mapping supplied matches the + // user's declaration. Ensure the PFNs represent a contiguous range, + // error if they do not. + // + if ((NvU64)pte_array[i] != (((NvU64)pte_array[i-1]) + PAGE_SIZE)) + { + return NV_ERR_INVALID_ADDRESS; + } + } + return NV_OK; +} + +/*! + * @brief Pins user IO pages that have been mapped to the user processes virtual + * address space with remap_pfn_range. + * + * @param[in] vma VMA that contains the virtual address range given by the + * start and the page count. + * @param[in] start Beginning of the virtual address range of the IO pages. + * @param[in] page_count Number of pages to pin from start. + * @param[in,out] page_array Storage array for pointers to the pinned pages. + * Must be large enough to contain at least page_count + * pointers. + * + * @return NV_OK if the pages were pinned successfully, error otherwise. + */ +static NV_STATUS get_io_pages(struct vm_area_struct *vma, + NvUPtr start, + NvU64 page_count, + struct page **page_array) +{ + NV_STATUS rmStatus = NV_OK; + NvU64 i, pinned = 0; + unsigned long pfn; + + for (i = 0; i < page_count; i++) + { + if ((nv_follow_pfn(vma, (start + (i * PAGE_SIZE)), &pfn) < 0) || + (!pfn_valid(pfn))) + { + rmStatus = NV_ERR_INVALID_ADDRESS; + break; + } + + // Page-backed memory mapped to userspace with remap_pfn_range + page_array[i] = pfn_to_page(pfn); + get_page(page_array[i]); + pinned++; + } + + if (pinned < page_count) + { + for (i = 0; i < pinned; i++) + put_page(page_array[i]); + rmStatus = NV_ERR_INVALID_ADDRESS; + } + + return rmStatus; +} + +NV_STATUS NV_API_CALL os_lookup_user_io_memory( + void *address, + NvU64 page_count, + NvU64 **pte_array, + void **page_array +) +{ + NV_STATUS rmStatus; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long pfn; + NvUPtr start = (NvUPtr)address; + void **result_array; + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): invalid context!\n", __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + rmStatus = os_alloc_mem((void **)&result_array, (page_count * sizeof(NvP64))); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate page table!\n"); + return rmStatus; + } + + nv_mmap_read_lock(mm); + + // find the first VMA which intersects the interval start_addr..end_addr-1, + vma = find_vma_intersection(mm, start, start+1); + + // Verify that the given address range is contained in a single vma + if ((vma == NULL) || ((vma->vm_flags & (VM_IO | VM_PFNMAP)) == 0) || + !((vma->vm_start <= start) && + ((vma->vm_end - start) >> PAGE_SHIFT >= page_count))) + { + nv_printf(NV_DBG_ERRORS, + "Cannot map memory with base addr 0x%llx and size of 0x%llx pages\n", + start ,page_count); + rmStatus = NV_ERR_INVALID_ADDRESS; + goto done; + } + + if (nv_follow_pfn(vma, start, &pfn) < 0) + { + rmStatus = NV_ERR_INVALID_ADDRESS; + goto done; + } + + if (pfn_valid(pfn)) + { + rmStatus = get_io_pages(vma, start, page_count, (struct page **)result_array); + if (rmStatus == NV_OK) + *page_array = (void *)result_array; + } + else + { + rmStatus = get_io_ptes(vma, start, page_count, (NvU64 **)result_array); + if (rmStatus == NV_OK) + *pte_array = (NvU64 *)result_array; + } + +done: + nv_mmap_read_unlock(mm); + + if (rmStatus != NV_OK) + { + os_free_mem(result_array); + } + + return rmStatus; +} + +NV_STATUS NV_API_CALL os_lock_user_pages( + void *address, + NvU64 page_count, + void **page_array, + NvU32 flags +) +{ + NV_STATUS rmStatus; + struct mm_struct *mm = current->mm; + struct page **user_pages; + NvU64 i, pinned; + NvBool write = DRF_VAL(_LOCK_USER_PAGES, _FLAGS, _WRITE, flags), force = 0; + int ret; + + if (!NV_MAY_SLEEP()) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: %s(): invalid context!\n", __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + rmStatus = os_alloc_mem((void **)&user_pages, + (page_count * sizeof(*user_pages))); + if (rmStatus != NV_OK) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: failed to allocate page table!\n"); + return rmStatus; + } + + nv_mmap_read_lock(mm); + ret = NV_GET_USER_PAGES((unsigned long)address, + page_count, write, force, user_pages, NULL); + nv_mmap_read_unlock(mm); + pinned = ret; + + if (ret < 0) + { + os_free_mem(user_pages); + return NV_ERR_INVALID_ADDRESS; + } + else if (pinned < page_count) + { + for (i = 0; i < pinned; i++) + put_page(user_pages[i]); + os_free_mem(user_pages); + return NV_ERR_INVALID_ADDRESS; + } + + *page_array = user_pages; + + return NV_OK; +} + +NV_STATUS NV_API_CALL os_unlock_user_pages( + NvU64 page_count, + void *page_array +) +{ + NvBool write = 1; + struct page **user_pages = page_array; + NvU32 i; + + for (i = 0; i < page_count; i++) + { + if (write) + set_page_dirty_lock(user_pages[i]); + put_page(user_pages[i]); + } + + os_free_mem(user_pages); + + return NV_OK; +} diff --git a/kernel-open/nvidia/os-pci.c b/kernel-open/nvidia/os-pci.c new file mode 100644 index 000000000..3fdf487f5 --- /dev/null +++ b/kernel-open/nvidia/os-pci.c @@ -0,0 +1,206 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +void* NV_API_CALL os_pci_init_handle( + NvU32 domain, + NvU8 bus, + NvU8 slot, + NvU8 function, + NvU16 *vendor, + NvU16 *device +) +{ + struct pci_dev *dev; + unsigned int devfn = PCI_DEVFN(slot, function); + + if (!NV_MAY_SLEEP()) + return NULL; + + dev = NV_GET_DOMAIN_BUS_AND_SLOT(domain, bus, devfn); + if (dev != NULL) + { + if (vendor) *vendor = dev->vendor; + if (device) *device = dev->device; + pci_dev_put(dev); /* TODO: Fix me! (hotplug) */ + } + return (void *) dev; +} + +NV_STATUS NV_API_CALL os_pci_read_byte( + void *handle, + NvU32 offset, + NvU8 *pReturnValue +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + { + *pReturnValue = 0xff; + return NV_ERR_NOT_SUPPORTED; + } + pci_read_config_byte( (struct pci_dev *) handle, offset, pReturnValue); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_read_word( + void *handle, + NvU32 offset, + NvU16 *pReturnValue +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + { + *pReturnValue = 0xffff; + return NV_ERR_NOT_SUPPORTED; + } + pci_read_config_word( (struct pci_dev *) handle, offset, pReturnValue); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_read_dword( + void *handle, + NvU32 offset, + NvU32 *pReturnValue +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + { + *pReturnValue = 0xffffffff; + return NV_ERR_NOT_SUPPORTED; + } + pci_read_config_dword( (struct pci_dev *) handle, offset, pReturnValue); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_write_byte( + void *handle, + NvU32 offset, + NvU8 value +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + return NV_ERR_NOT_SUPPORTED; + + pci_write_config_byte( (struct pci_dev *) handle, offset, value); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_write_word( + void *handle, + NvU32 offset, + NvU16 value +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + return NV_ERR_NOT_SUPPORTED; + + pci_write_config_word( (struct pci_dev *) handle, offset, value); + return NV_OK; +} + +NV_STATUS NV_API_CALL os_pci_write_dword( + void *handle, + NvU32 offset, + NvU32 value +) +{ + if (offset >= NV_PCIE_CFG_MAX_OFFSET) + return NV_ERR_NOT_SUPPORTED; + + pci_write_config_dword( (struct pci_dev *) handle, offset, value); + return NV_OK; +} + +NvBool NV_API_CALL os_pci_remove_supported(void) +{ +#if defined NV_PCI_STOP_AND_REMOVE_BUS_DEVICE + return NV_TRUE; +#else + return NV_FALSE; +#endif +} + +void NV_API_CALL os_pci_remove( + void *handle +) +{ +#if defined(NV_PCI_STOP_AND_REMOVE_BUS_DEVICE) + NV_PCI_STOP_AND_REMOVE_BUS_DEVICE(handle); +#elif defined(DEBUG) + nv_printf(NV_DBG_ERRORS, + "NVRM: %s() is called even though NV_PCI_STOP_AND_REMOVE_BUS_DEVICE is not defined\n", + __FUNCTION__); + os_dbg_breakpoint(); +#endif +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/kernel-open/nvidia/os-registry.c b/kernel-open/nvidia/os-registry.c new file mode 100644 index 000000000..ed0d09e93 --- /dev/null +++ b/kernel-open/nvidia/os-registry.c @@ -0,0 +1,336 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ +#define NV_DEFINE_REGISTRY_KEY_TABLE +#include "os-interface.h" +#include "nv-linux.h" +#include "nv-reg.h" +#include "nv-gpu-info.h" + +/*! + * @brief This function parses the PCI BDF identifier string and returns the + * Domain, Bus, Device and function components from the PCI BDF string. + * + * This parser is highly adaptable and hence allows PCI BDF string in following + * 3 formats. + * + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * @param[in] pci_dev_str String containing the BDF to be parsed. + * @param[out] pci_domain Pointer where pci_domain is to be returned. + * @param[out] pci_bus Pointer where pci_bus is to be returned. + * @param[out] pci_slot Pointer where pci_slot is to be returned. + * @param[out] pci_func Pointer where pci_func is to be returned. + * + * @return NV_TRUE if succeeds, or NV_FALSE otherwise. + */ +static NV_STATUS pci_str_to_bdf(char *pci_dev_str, NvU32 *pci_domain, + NvU32 *pci_bus, NvU32 *pci_slot, NvU32 *pci_func) +{ + char *option_string = NULL; + char *token, *string; + NvU32 domain, bus, slot; + NV_STATUS status = NV_OK; + + // + // remove_spaces() allocates memory, hence we need to keep a pointer + // to the original string for freeing at end of function. + // + if ((option_string = rm_remove_spaces(pci_dev_str)) == NULL) + { + // memory allocation failed, returning + return NV_ERR_GENERIC; + } + + string = option_string; + + if (!strlen(string) || !pci_domain || !pci_bus || !pci_slot || !pci_func) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if ((token = strsep(&string, ".")) != NULL) + { + // PCI device can have maximum 8 functions only. + if ((string != NULL) && (!(*string >= '0' && *string <= '7') || + (strlen(string) > 1))) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI function in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + else if (string == NULL) + { + *pci_func = 0; + } + else + { + *pci_func = (NvU32)(*string - '0'); + } + + domain = simple_strtoul(token, &string, 16); + + if ((string == NULL) || (*string != ':') || (*(string + 1) == '\0')) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI domain/bus in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + token = string; + bus = simple_strtoul((token + 1), &string, 16); + + if (string == NULL) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI bus/slot in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (*string != '\0') + { + if ((*string != ':') || (*(string + 1) == '\0')) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI slot in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + token = string; + slot = (NvU32)simple_strtoul(token + 1, &string, 16); + if ((slot == 0) && ((token + 1) == string)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Invalid PCI slot in token %s\n", + pci_dev_str); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + *pci_domain = domain; + *pci_bus = bus; + *pci_slot = slot; + } + else + { + *pci_slot = bus; + *pci_bus = domain; + *pci_domain = 0; + } + status = NV_OK; + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + +done: + // Freeing the memory allocated by remove_spaces(). + os_free_mem(option_string); + return status; +} + +/*! + * @brief This function parses the registry keys per GPU device. It accepts a + * semicolon separated list of key=value pairs. The first key value pair MUST be + * "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot + * number and F is the Function. This PCI BDF is used to identify which GPU to + * assign the registry keys that follows next. + * If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT + * found, then all the registry keys that follows are skipped, until we find next + * valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for + * the value of the "pci" string: + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * + * @param[in] sp pointer to nvidia_stack_t struct. + * + * @return NV_OK if succeeds, or NV_STATUS error code otherwise. + */ +NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp) +{ + NV_STATUS status = NV_OK; + char *option_string = NULL; + char *ptr, *token; + char *name, *value; + NvU32 data, domain, bus, slot, func; + nv_linux_state_t *nvl = NULL; + nv_state_t *nv = NULL; + + if (NVreg_RegistryDwordsPerDevice != NULL) + { + if ((option_string = rm_remove_spaces(NVreg_RegistryDwordsPerDevice)) == NULL) + { + return NV_ERR_GENERIC; + } + + ptr = option_string; + + while ((token = strsep(&ptr, ";")) != NULL) + { + if (!(name = strsep(&token, "=")) || !strlen(name)) + { + continue; + } + + if (!(value = strsep(&token, "=")) || !strlen(value)) + { + continue; + } + + if (strsep(&token, "=") != NULL) + { + continue; + } + + // If this key is "pci", then value is pci_dev id string + // which needs special parsing as it is NOT a dword. + if (strcmp(name, NV_REG_PCI_DEVICE_BDF) == 0) + { + status = pci_str_to_bdf(value, &domain, &bus, &slot, &func); + + // Check if PCI_DEV id string was in a valid format or NOT. + if (NV_OK != status) + { + // lets reset cached pci dev + nv = NULL; + } + else + { + nvl = find_pci(domain, bus, slot, func); + // + // If NO GPU found corresponding to this GPU, then reset + // cached state. This helps ignore the following registry + // keys until valid PCI BDF is found in the commandline. + // + if (!nvl) + { + nv = NULL; + } + else + { + nv = NV_STATE_PTR(nvl); + } + } + continue; + } + + // + // Check if cached pci_dev string in the commandline is in valid + // format, else we will skip all the successive registry entries + // ( pairs) until a valid PCI_DEV string is encountered + // in the commandline. + // + if (!nv) + continue; + + data = (NvU32)simple_strtoul(value, NULL, 0); + + rm_write_registry_dword(sp, nv, name, data); + } + + os_free_mem(option_string); + } + return status; +} + +/* + * Compare given string UUID with the GpuBlacklist or ExcludedGpus registry + * parameter string and return whether the UUID is in the GPU exclusion list + */ +NvBool nv_is_uuid_in_gpu_exclusion_list(const char *uuid) +{ + const char *input; + char *list; + char *ptr; + char *token; + + // + // When both NVreg_GpuBlacklist and NVreg_ExcludedGpus are defined + // NVreg_ExcludedGpus takes precedence. + // + if (NVreg_ExcludedGpus != NULL) + input = NVreg_ExcludedGpus; + else if (NVreg_GpuBlacklist != NULL) + input = NVreg_GpuBlacklist; + else + return NV_FALSE; + + if ((list = rm_remove_spaces(input)) == NULL) + return NV_FALSE; + + ptr = list; + + while ((token = strsep(&ptr, ",")) != NULL) + { + if (strcmp(token, uuid) == 0) + { + os_free_mem(list); + return NV_TRUE; + } + } + os_free_mem(list); + return NV_FALSE; +} + +NV_STATUS NV_API_CALL os_registry_init(void) +{ + nv_parm_t *entry; + unsigned int i; + nvidia_stack_t *sp = NULL; + + if (nv_kmem_cache_alloc_stack(&sp) != 0) + { + return NV_ERR_NO_MEMORY; + } + + if (NVreg_RmMsg != NULL) + { + rm_write_registry_string(sp, NULL, + "RmMsg", NVreg_RmMsg, strlen(NVreg_RmMsg)); + } + + rm_parse_option_string(sp, NVreg_RegistryDwords); + + for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) + { + rm_write_registry_dword(sp, NULL, entry->name, *entry->data); + } + + nv_kmem_cache_free_stack(sp); + + return NV_OK; +} diff --git a/kernel-open/nvidia/os-usermap.c b/kernel-open/nvidia/os-usermap.c new file mode 100644 index 000000000..2022e0fac --- /dev/null +++ b/kernel-open/nvidia/os-usermap.c @@ -0,0 +1,78 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define __NO_VERSION__ + +#include "os-interface.h" +#include "nv-linux.h" + +void* NV_API_CALL os_map_user_space( + NvU64 start, + NvU64 size_bytes, + NvU32 mode, + NvU32 protect, + void **priv_data +) +{ + return (void *)(NvUPtr)start; +} + +void NV_API_CALL os_unmap_user_space( + void *address, + NvU64 size, + void *priv_data +) +{ +} + +NV_STATUS NV_API_CALL os_match_mmap_offset( + void *pAllocPrivate, + NvU64 offset, + NvU64 *pPageIndex +) +{ + nv_alloc_t *at = pAllocPrivate; + NvU64 i; + + for (i = 0; i < at->num_pages; i++) + { + if (at->flags.contig) + { + if (offset == (at->page_table[0]->phys_addr + (i * PAGE_SIZE))) + { + *pPageIndex = i; + return NV_OK; + } + } + else + { + if (offset == at->page_table[i]->phys_addr) + { + *pPageIndex = i; + return NV_OK; + } + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} diff --git a/kernel-open/nvidia/procfs_nvswitch.c b/kernel-open/nvidia/procfs_nvswitch.c new file mode 100644 index 000000000..a1230456e --- /dev/null +++ b/kernel-open/nvidia/procfs_nvswitch.c @@ -0,0 +1,205 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "linux_nvswitch.h" +#include "nv-procfs.h" + +#include + +#if defined(CONFIG_PROC_FS) + +#define NV_DEFINE_SINGLE_NVSWITCH_PROCFS_FILE(name) \ + NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, nv_system_pm_lock) + +#define NVSWITCH_PROCFS_DIR "driver/nvidia-nvswitch" + +static struct proc_dir_entry *nvswitch_procfs_dir; +static struct proc_dir_entry *nvswitch_permissions; +static struct proc_dir_entry *nvswitch_procfs_devices; + +static int +nv_procfs_read_permissions +( + struct seq_file *s, + void *v +) +{ + // Restrict device node permissions - 0666. Used by nvidia-modprobe. + seq_printf(s, "%s: %u\n", "DeviceFileMode", 438); + + return 0; +} + +NV_DEFINE_SINGLE_NVSWITCH_PROCFS_FILE(permissions); + +static int +nv_procfs_read_device_info +( + struct seq_file *s, + void *v +) +{ + NVSWITCH_DEV *nvswitch_dev = s->private; + + if (!nvswitch_dev) + { + NVSWITCH_OS_ASSERT(0); + return -EFAULT; + } + + seq_printf(s, "BIOS Version: "); + + if (nvswitch_dev->bios_ver) + { + seq_printf(s, "%02llx.%02llx.%02llx.%02llx.%02llx\n", + nvswitch_dev->bios_ver >> 32, + (nvswitch_dev->bios_ver >> 24) & 0xFF, + (nvswitch_dev->bios_ver >> 16) & 0xFF, + (nvswitch_dev->bios_ver >> 8) & 0xFF, + nvswitch_dev->bios_ver & 0xFF); + } + else + { + seq_printf(s, "N/A\n"); + } + + return 0; +} + +NV_DEFINE_SINGLE_NVSWITCH_PROCFS_FILE(device_info); + +void +nvswitch_procfs_device_remove +( + NVSWITCH_DEV *nvswitch_dev +) +{ + if (!nvswitch_dev || !nvswitch_dev->procfs_dir) + { + NVSWITCH_OS_ASSERT(0); + return; + } + + nv_procfs_unregister_all(nvswitch_dev->procfs_dir, nvswitch_dev->procfs_dir); + nvswitch_dev->procfs_dir = NULL; +} + +int +nvswitch_procfs_device_add +( + NVSWITCH_DEV *nvswitch_dev +) +{ + struct pci_dev *pci_dev; + struct proc_dir_entry *device_dir, *entry; + char name[32]; + + if (!nvswitch_dev || !nvswitch_dev->pci_dev) + { + NVSWITCH_OS_ASSERT(0); + return -1; + } + + pci_dev = nvswitch_dev->pci_dev; + + snprintf(name, sizeof(name), "%04x:%02x:%02x.%1x", + NV_PCI_DOMAIN_NUMBER(pci_dev), NV_PCI_BUS_NUMBER(pci_dev), + NV_PCI_SLOT_NUMBER(pci_dev), PCI_FUNC(pci_dev->devfn)); + + device_dir = NV_CREATE_PROC_DIR(name, nvswitch_procfs_devices); + if (!device_dir) + return -1; + + nvswitch_dev->procfs_dir = device_dir; + + entry = NV_CREATE_PROC_FILE("information", device_dir, device_info, + nvswitch_dev); + if (!entry) + goto failed; + + return 0; + +failed: + nvswitch_procfs_device_remove(nvswitch_dev); + return -1; +} + +void +nvswitch_procfs_exit +( + void +) +{ + if (!nvswitch_procfs_dir) + { + return; + } + + nv_procfs_unregister_all(nvswitch_procfs_dir, nvswitch_procfs_dir); + nvswitch_procfs_dir = NULL; +} + +int +nvswitch_procfs_init +( + void +) +{ + nvswitch_procfs_dir = NV_CREATE_PROC_DIR(NVSWITCH_PROCFS_DIR, NULL); + if (!nvswitch_procfs_dir) + { + return -EACCES; + } + + nvswitch_permissions = NV_CREATE_PROC_FILE("permissions", + nvswitch_procfs_dir, + permissions, + NULL); + if (!nvswitch_permissions) + { + goto cleanup; + } + + nvswitch_procfs_devices = NV_CREATE_PROC_DIR("devices", nvswitch_procfs_dir); + if (!nvswitch_procfs_devices) + { + goto cleanup; + } + + return 0; + +cleanup: + + nvswitch_procfs_exit(); + + return -EACCES; +} + +#else // !CONFIG_PROC_FS + +int nvswitch_procfs_init(void) { return 0; } +void nvswitch_procfs_exit(void) { } +int nvswitch_procfs_device_add(NVSWITCH_DEV *nvswitch_dev) { return 0; } +void nvswitch_procfs_device_remove(NVSWITCH_DEV *nvswitch_dev) { } + +#endif // CONFIG_PROC_FS diff --git a/kernel-open/nvidia/rmp2pdefines.h b/kernel-open/nvidia/rmp2pdefines.h new file mode 100644 index 000000000..2ef8458d6 --- /dev/null +++ b/kernel-open/nvidia/rmp2pdefines.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMP2PDEFINES_H_ +#define _RMP2PDEFINES_H_ + +#define NVRM_P2P_PAGESIZE_SMALL_4K (4 << 10) +#define NVRM_P2P_PAGESIZE_BIG_64K (64 << 10) +#define NVRM_P2P_PAGESIZE_BIG_128K (128 << 10) + +#endif diff --git a/src/common/displayport/inc/dp_address.h b/src/common/displayport/inc/dp_address.h new file mode 100644 index 000000000..505632df0 --- /dev/null +++ b/src/common/displayport/inc/dp_address.h @@ -0,0 +1,284 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_address.h * +* Basic class for AUX Address * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_ADDRESS_H +#define INCLUDED_DP_ADDRESS_H + +#include "dp_internal.h" + +namespace DisplayPort +{ + class Address + { + public: + enum + { + maxHops = 15, // update DP_MAX_ADDRESS_HOPS when changed (in displayportCommon.h) + maxHopsHDCP = 7, + maxPortCount = 15 + }; + + Address() + { + clear(); + } + + Address(unsigned hop0) + { + clear(); + hop[hops++] = hop0; + } + + Address(unsigned hop0, unsigned hop1) + { + clear(); + hop[hops++] = hop0; + hop[hops++] = hop1; + } + + Address(const Address & other) + { + clear(); + for(unsigned i = 0; i < other.size(); i++) + { + append(other[i]); + } + } + + void clear() + { + hops = 0; + for (unsigned i = 0; i < maxHops; i++) + { + hop[i] = 0; + } + } + + Address parent() const + { + DP_ASSERT(hops != 0); + Address addr = *this; + addr.hops --; + return addr; + } + + unsigned tail() const + { + if (hops == 0) + { + DP_ASSERT(hops != 0); + return 0; + } + return hop[hops-1]; + } + + void append(unsigned port) + { + if (hops >= maxHops) + { + DP_ASSERT(0); + return; + } + hop[hops++] = port; + } + + void prepend(unsigned port) + { + if (hops >= maxHops) + { + DP_ASSERT(0); + return; + } + hops++; + for (unsigned i = hops - 1; i > 0; i--) + hop[i] = hop[i-1]; + hop[0] = port; + } + + void pop() + { + if (hops == 0) + { + DP_ASSERT(0); + return; + } + hops--; + } + + // Just to keep clear copy + Address & operator = (const Address & other) + { + clear(); + for(unsigned i = 0; i < other.size(); i++) + { + append(other[i]); + } + + return *this; + } + + bool operator == (const Address & other) const + { + if (other.size() != size()) + return false; + + for (unsigned i = 0; i < hops; i++) + if (other[i] != (*this)[i]) + return false; + + return true; + } + + // + // Sort by size first, then "alphabetically" (lexicographical see wikipedia) + // + bool operator > (const Address & other) const + { + if (size() > other.size()) + return true; + else if (size() < other.size()) + return false; + + for (unsigned i = 0; i < hops; i++) + { + if ((*this)[i] > other[i]) + return true; + else if ((*this)[i] < other[i]) + return false; + } + + return false; + } + + // + // Sort by size first, then "alphabetically" (lexicographical see wikipedia) + // + bool operator < (const Address & other) const + { + if (size() < other.size()) + return true; + else if (size() > other.size()) + return false; + + for (unsigned i = 0; i < hops; i++) + { + if ((*this)[i] < other[i]) + return true; + else if ((*this)[i] > other[i]) + return false; + } + + return false; + } + + bool operator >= (const Address & other) const + { + return !((*this) < other); + } + + bool operator <= (const Address & other) const + { + return !((*this) > other); + } + + bool operator != (const Address & other) const + { + return !((*this) == other); + } + + unsigned size() const + { + return hops; + } + + unsigned & operator [](unsigned index) + { + DP_ASSERT(index < hops); + return hop[index]; + } + + const unsigned & operator [](unsigned index) const + { + DP_ASSERT(index < hops); + return hop[index]; + } + + bool under(const Address & root) const + { + if (size() < root.size()) + return false; + + for (unsigned i = 0; i < root.size(); i++) + if ((*this)[i] != root[i]) + return false; + + return true; + } + + typedef char StringBuffer[maxHops*3+1]; + char * toString(StringBuffer & buffer, bool removeLeadingZero = false) const + { + char * p = &buffer[0]; + int hopsWritten = 0; + for (unsigned i = 0; i < hops; i++) + { + if (i == 0 && hop[0] == 0 && removeLeadingZero) + continue; + if (hopsWritten > 0) + *p++ = '.'; + if (hop[i] >= 10) + *p++ = (char)(hop[i] / 10 +'0'); + *p++ = (char)(hop[i] % 10 + '0'); + hopsWritten++; + } + + *p++= 0; + return (char *)&buffer[0]; + } + + // Large enough to fit 4 hops into every NvU32 + typedef NvU32 NvU32Buffer[(maxHops-1)/4+1 < 4 ? 4 : (maxHops-1)/4+1]; + NvU32 * toNvU32Buffer(NvU32Buffer & buffer) const + { + for (unsigned i = 0; i < hops; i++) + { + buffer[i/4] |= ((NvU8) hop[i]) << (i % 4) * 8; + } + + return (NvU32 *)&buffer[0]; + } + + private: + unsigned hop[maxHops]; + unsigned hops; + }; +} + +#endif //INCLUDED_DP_ADDRESS_H diff --git a/src/common/displayport/inc/dp_auxbus.h b/src/common/displayport/inc/dp_auxbus.h new file mode 100644 index 000000000..12d03881d --- /dev/null +++ b/src/common/displayport/inc/dp_auxbus.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_auxbus.h * +* Interface for low level access to the aux bus. * +* This is the synchronous version of the interface. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_AUXBUS_H +#define INCLUDED_DP_AUXBUS_H + +namespace DisplayPort +{ + class AuxBus : virtual public Object + { + public: + enum status + { + success, + defer, + nack, + unSupported, + }; + + enum Action + { + read, + write, + writeStatusUpdateRequest, // I2C only + }; + + enum Type + { + native, + i2c, + i2cMot + }; + + virtual status transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason = NULL, + NvU8 offset = 0, NvU8 nWriteTransactions = 0) = 0; + + virtual unsigned transactionSize() = 0; + virtual status fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) { return nack; } + virtual void setDevicePlugged(bool) {} + virtual ~AuxBus() {} + }; + + // + // Wraps an auxbus interface with one that prints all the input and output + // + AuxBus * CreateAuxLogger(AuxBus * auxBus); +} + +#endif //INCLUDED_DP_AUXBUS_H diff --git a/src/common/displayport/inc/dp_auxdefs.h b/src/common/displayport/inc/dp_auxdefs.h new file mode 100644 index 000000000..26f34c927 --- /dev/null +++ b/src/common/displayport/inc/dp_auxdefs.h @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_auxdefs.h * +* Definitions for DPCD AUX offsets * +* Should be used sparingly (DPCD HAL preferred) * +* * +\***************************************************************************/ + +#ifndef __DP_AUXDEFS_H__ +#define __DP_AUXDEFS_H__ + +#define DPCD_MESSAGEBOX_SIZE 48 + +// +// This definitions are being used for orin Hdcp opensourcing. Ideally this +// should be replaced with build flags. Bug ID: 200733434 +// +#define DP_OPTION_HDCP_SUPPORT_ENABLE 1 /* HDCP Enable */ + +#define DP_OPTION_HDCP_12_ENABLED 1 /* DP1.2 HDCP ENABLE */ + +#define DP_OPTION_QSE_ENABLED 1 /* Remove here when QSE p4r check-in */ + +// +// If a message is outstanding for at least 4 seconds +// assume no reply is coming through +// +#define DPCD_MESSAGE_REPLY_TIMEOUT 4000 + +#define DPCD_LINK_ADDRESS_MESSAGE_RETRIES 20 // 20 retries +#define DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN 10 // 10ms between attempts + +// pointing to the defaults for LAM settings to start with +#define DPCD_REMOTE_DPCD_WRITE_MESSAGE_RETRIES DPCD_LINK_ADDRESS_MESSAGE_RETRIES +#define DPCD_REMOTE_DPCD_WRITE_MESSAGE_COOLDOWN DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN + +#define DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES 7 // 7 retries +#define DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN +#define DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV 20 // 20ms between attempts + +#define DPCD_QUERY_STREAM_MESSAGE_RETRIES 7 // 7 retries +#define DPCD_QUERY_STREAM_MESSAGE_COOLDOWN 20 // 20ms between attempts + +#define MST_EDID_RETRIES 20 +#define MST_EDID_COOLDOWN 10 + +#define MST_ALLOCATE_RETRIES 10 +#define MST_ALLOCATE_COOLDOWN 10 + +#define HDCP_AUTHENTICATION_RETRIES 6 // 6 retries +#define HDCP_CPIRQ_RXSTAUS_RETRIES 3 +#define HDCP_AUTHENTICATION_COOLDOWN 1000// 1 sec between attempts +#define HDCP22_AUTHENTICATION_COOLDOWN 2000// 2 sec between attempts +#define HDCP_AUTHENTICATION_COOLDOWN_HPD 3000// 3 sec for first stream Add +#define HDCP_CPIRQ_RXSTATUS_COOLDOWN 20 // 20ms between attempts + +// Need to re-submit Stream Validation request to falcon microcontroller after 1 sec if current request fails +#define HDCP_STREAM_VALIDATION_RESUBMIT_COOLDOWN 1000 + +// +// Wait till 8secs for completion of the KSV and Stream Validation, if that doesn't complete +// then timeout. +// +#define HDCP_STREAM_VALIDATION_REQUEST_COOLDOWN 8000 + +#define DPCD_OUI_NVIDIA 0x00044B + +// +// Define maximum retry count that checking Payload ID table updated before +// trigger ACT sequence. +// +#define PAYLOADIDTABLE_UPDATED_CHECK_RETRIES 300 + +#endif // __DP_AUXDEFS_H__ diff --git a/src/common/displayport/inc/dp_auxretry.h b/src/common/displayport/inc/dp_auxretry.h new file mode 100644 index 000000000..2f20949e6 --- /dev/null +++ b/src/common/displayport/inc/dp_auxretry.h @@ -0,0 +1,181 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_auxretry.h * +* Adapter interface for friendlier AuxBus * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_AUXRETRY_H +#define INCLUDED_DP_AUXRETRY_H + +#include "dp_auxbus.h" +#include "dp_timeout.h" + +namespace DisplayPort +{ + enum + { + minimumRetriesOnDefer = 7 + }; + + class AuxRetry + { + AuxBus * aux; + public: + AuxRetry(AuxBus * aux = 0) + : aux(aux) + { + } + + AuxBus * getDirect() + { + return aux; + } + + enum status + { + ack, + nack, + unsupportedRegister, + defer + }; + + // + // Perform an aux read transaction. + // - Automatically handles defers up to retry limit + // - Retries on partial read + // + virtual status readTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + + // + // Similar to readTransaction except that it supports reading + // larger spans than AuxBus::transactionSize() + // + virtual status read(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + + // + // Perform an aux write transaction. + // - Automatically handles defers up to retry limit + // - Retries on partial write + // + virtual status writeTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + + // + // Similar to writeTransaction except that it supports writin + // larger spans than AuxBus::transactionSize() + // + virtual status write(int address, NvU8 * buffer, unsigned size, unsigned retries = minimumRetriesOnDefer); + }; + + class AuxLogger : public AuxBus + { + AuxBus * bus; + char hex[256]; + char hex_body[256]; + char hint[128]; + + public: + AuxLogger(AuxBus * bus) : bus(bus) + { + } + + const char * getAction(Action action) + { + if (action == read) + return "rd "; + else if (action == write) + return "wr "; + else if (action == writeStatusUpdateRequest) + return "writeStatusUpdateRequest "; + else + DP_ASSERT(0); + return "???"; + } + + const char * getType(Type typ) + { + if (typ == native) + return ""; + else if (typ == i2c) + return "i2c "; + else if (typ == i2cMot) + return "i2cMot "; + else + DP_ASSERT(0); + return "???"; + } + + const char * getStatus(status stat) + { + if (stat == success) + return ""; + else if (stat == nack) + return "(nack) "; + else if (stat == defer) + return "(defer) "; + else + DP_ASSERT(0); + return "???"; + } + + const char * getRequestId(unsigned requestIdentifier) + { + switch(requestIdentifier) + { + case 0x1: return "LINK_ADDRESS"; + case 0x4: return "CLEAR_PAT"; + case 0x10: return "ENUM_PATH"; + case 0x11: return "ALLOCATE"; + case 0x12: return "QUERY"; + case 0x20: return "DPCD_READ"; + case 0x21: return "DPCD_WRITE"; + case 0x22: return "I2C_READ"; + case 0x23: return "I2C_WRITE"; + case 0x24: return "POWER_UP_PHY"; + case 0x25: return "POWER_DOWN_PHY"; + case 0x38: return "HDCP_STATUS"; + default: return ""; + } + } + + virtual status transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, unsigned * pNakReason, + NvU8 offset, NvU8 nWriteTransactions); + + virtual unsigned transactionSize() + { + return bus->transactionSize(); + } + + virtual status fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) + { + return bus->fecTransaction(fecStatus, fecErrorCount, flags); + } + }; +} + +#endif //INCLUDED_DP_AUXRETRY_H diff --git a/src/common/displayport/inc/dp_bitstream.h b/src/common/displayport/inc/dp_bitstream.h new file mode 100644 index 000000000..3d01f7415 --- /dev/null +++ b/src/common/displayport/inc/dp_bitstream.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_bitstream.h * +* This is an implementation of the big endian bit stream * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_BITSTREAM_H +#define INCLUDED_DP_BITSTREAM_H + +#include "dp_buffer.h" + +namespace DisplayPort +{ + // + // Bitstream reader interface + // - reads a packed stream of bits in Big Endian format + // - handles alignment, buffering, and buffer bounds checking + // + class BitStreamReader + { + Buffer * sourceBuffer; + unsigned bitsOffset; + unsigned bitsEnd; + + public: + // Read 1-32 bits from the stream into *value. Returns true on success + bool read(unsigned * value, unsigned bits); + + // Read 1-32 bits from stream. Returns 'default' on failure. + unsigned readOrDefault(unsigned bits, unsigned defaultValue); + + // Skip bits until we're aligned to the power of two alignment + bool align(unsigned align); + + unsigned offset(); + Buffer * buffer(); + BitStreamReader(Buffer * buffer, unsigned bitsOffset, unsigned bitsCount); + }; + + // + // Bitstream writer interface + // + class BitStreamWriter + { + Buffer * targetBuffer; + unsigned bitsOffset; + public: + // + // Create a bitstream writer at a specific bit offset + // into an already existing buffer + // + BitStreamWriter(Buffer * buffer, unsigned bitsOffset = 0); + + // + // Write n bits to the buffer in big endian format. + // No buffering is performed. + // + bool write(unsigned value, unsigned bits); + + // + // Emit zero's until the offset is divisible by align. + // CAVEAT: align must be a power of 2 (eg 8) + // + bool align(unsigned align); + + // + // Get current offset and buffer target + // + unsigned offset(); + Buffer * buffer(); + }; +} + +#endif //INCLUDED_DP_BITSTREAM_H diff --git a/src/common/displayport/inc/dp_buffer.h b/src/common/displayport/inc/dp_buffer.h new file mode 100644 index 000000000..6d3e6f501 --- /dev/null +++ b/src/common/displayport/inc/dp_buffer.h @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_buffer.h * +* Resizable byte buffer and stream classes * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_BUFFER_H +#define INCLUDED_DP_BUFFER_H + +#include "dp_internal.h" + +namespace DisplayPort +{ + class Buffer + { + public: + NvU8 *data; // Data buffer + unsigned length; // bytes used + unsigned capacity; // size of allocation + bool errorState; // did we lose a malloc in there? + public: + // + // Write will only fail if we're unable to reallocate the buffer. In this case + // the buffer will be reset to its empty state. + // + const NvU8 * getData() const { return data; } + NvU8 * getData() { return data; } + bool resize(unsigned newSize); + void memZero(); + void reset(); + unsigned getLength() const { return length; } + + // Is in error state? This happens if malloc fails. Error state is + // held until reset is called. + bool isError() const; + + Buffer(const Buffer & other); + Buffer(NvU8 * data, unsigned size); + Buffer & operator = (const Buffer & other); + Buffer(); + ~Buffer(); + + void swap(Buffer & other) { + swap_args(other.data, data); + swap_args(other.length, length); + swap_args(other.capacity, capacity); + swap_args(other.errorState, errorState); + } + + bool operator== (const Buffer & other) const; + }; + + class Stream + { + protected: + Buffer * parent; + unsigned byteOffset; + public: + Stream(Buffer * buffer); + bool seek(unsigned where); + bool read(NvU8 * buffer, unsigned size); + bool write(NvU8 * buffer, unsigned size); + + // returns error state of buffer + bool isError() const; + unsigned remaining(); + unsigned offset(); + }; + + void swapBuffers(Buffer & left, Buffer & right); +} + +#endif //INCLUDED_DP_BUFFER_H diff --git a/src/common/displayport/inc/dp_configcaps.h b/src/common/displayport/inc/dp_configcaps.h new file mode 100644 index 000000000..bf563aa0f --- /dev/null +++ b/src/common/displayport/inc/dp_configcaps.h @@ -0,0 +1,535 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_configcaps.h * +* Abstraction for basic caps registers * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_CONFIGCAPS_H +#define INCLUDED_DP_CONFIGCAPS_H + +#include "dp_connector.h" +#include "dp_auxretry.h" +#include "dp_linkconfig.h" +#include "dp_regkeydatabase.h" + +namespace DisplayPort +{ + enum PowerState + { + PowerStateD0 = 1, + PowerStateD3 = 2, + PowerStateD3AuxOn = 5 + }; + + // Extended caps = offset 0x80 + enum DwnStreamPortType + { + DISPLAY_PORT = 0, + ANALOG_VGA, + DVI, + HDMI, + WITHOUT_EDID, + DISPLAY_PORT_PLUSPLUS + } ; + + enum DwnStreamPortAttribute + { + RESERVED = 0, + IL_720_480_60HZ, + IL_720_480_50HZ, + IL_1920_1080_60HZ, + IL_1920_1080_50HZ, + PG_1280_720_60HZ, + PG_1280_720_50_HZ, + } ; + + // DPCD Offset 102 enums + enum TrainingPatternSelectType + { + TRAINING_DISABLED, + TRAINING_PAT_ONE, + TRAINING_PAT_TWO, + TRAINING_PAT_THREE, + }; + + enum SymbolErrorSelectType + { + DISPARITY_ILLEGAL_SYMBOL_ERROR, + DISPARITY_ERROR, + ILLEGAL_SYMBOL_ERROR, + }; + + // DPCD Offset 1A1 enums + enum MultistreamHotplugMode + { + HPD_LONG_PULSE, + IRQ_HPD, + }; + + // DPCD Offset 220 + enum TestPatternType + { + NO_PATTERN, + COLOR_RAMPS, + BLACK_WHITE, + COLOR_SQUARE, + } ; + + // DPCD Offset 232, 233 + enum ColorFormatType + { + RGB, + YCbCr_422, + YCbCr_444, + } ; + + enum DynamicRangeType + { + VESA, + CEA, + } ; + + enum YCBCRCoeffType + { + ITU601, + ITU709, + } ; + + #define HDCP_BCAPS_SIZE (0x1) + #define HDCP_VPRIME_SIZE (0x14) + #define HDCP_KSV_FIFO_SIZE (0xF) + #define HDCP_KSV_FIFO_WINDOWS_RETRY (0x3) + #define HDCP22_BCAPS_SIZE (0x1) + + // Bstatus DPCD offset 0x68029 + #define HDCPREADY (0x1) + #define R0PRIME_AVAILABLE (0x2) + #define LINK_INTEGRITY_FAILURE (0x4) + #define REAUTHENTICATION_REQUEST (0x8) + + struct BInfo + { + bool maxCascadeExceeded; + unsigned depth; + bool maxDevsExceeded; + unsigned deviceCount; + }; + + struct BCaps + { + bool repeater; + bool HDCPCapable; + }; + + enum + { + PHYSICAL_PORT_START = 0x0, + PHYSICAL_PORT_END = 0x7, + LOGICAL_PORT_START = 0x8, + LOGICAL_PORT_END = 0xF + }; + + class LaneStatus + { + public: + // + // Lane Status + // CAUTION: Only updated on IRQ/HPD right now + // + virtual bool getLaneStatusClockRecoveryDone(int lane) = 0; // DPCD offset 202, 203 + virtual bool getLaneStatusSymbolLock(int lane)= 0; + virtual bool getInterlaneAlignDone() = 0; + virtual bool getDownStreamPortStatusChange() = 0; + }; + + class TestRequest + { + public: + virtual bool getPendingTestRequestTraining() = 0; // DPCD offset 218 + virtual void getTestRequestTraining(LinkRate & rate, unsigned & lanes) = 0; // DPCD offset 219, 220 + virtual bool getPendingAutomatedTestRequest() = 0; // DPCD offset 218 + virtual bool getPendingTestRequestEdidRead() = 0; // DPCD offset 218 + virtual bool getPendingTestRequestPhyCompliance() = 0; // DPCD offset 218 + virtual LinkQualityPatternType getPhyTestPattern() = 0; // DPCD offset 248 + virtual AuxRetry::status setTestResponse(bool ack, bool edidChecksumWrite = false) = 0; + virtual AuxRetry::status setTestResponseChecksum(NvU8 checksum) = 0; + }; + + class LegacyPort + { + public: + virtual DwnStreamPortType getDownstreamPortType() = 0; + virtual DwnStreamPortAttribute getDownstreamNonEDIDPortAttribute() = 0; + + // For port type = HDMI + virtual NvU64 getMaxTmdsClkRate() = 0; + }; + + class LinkState + { + public: + // + // Link state + // + virtual bool isPostLtAdjustRequestSupported() = 0; + virtual void setPostLtAdjustRequestGranted(bool bGrantPostLtRequest) = 0; + virtual bool getIsPostLtAdjRequestInProgress() = 0; // DPCD offset 204 + virtual TrainingPatternSelectType getTrainingPatternSelect() = 0; // DPCD offset 102 + + virtual bool setTrainingMultiLaneSet(NvU8 numLanes, + NvU8 *voltSwingSet, + NvU8 *preEmphasisSet) = 0; + + virtual bool readTraining(NvU8* voltageSwingLane, + NvU8* preemphasisLane = 0, + NvU8* trainingScoreLane = 0, + NvU8* postCursor = 0, + NvU8 activeLaneCount = 0) = 0; + + virtual bool isLaneSettingsChanged(NvU8* oldVoltageSwingLane, + NvU8* newVoltageSwingLane, + NvU8* oldPreemphasisLane, + NvU8* newPreemphasisLane, + NvU8 activeLaneCount) = 0; + + virtual AuxRetry::status setIgnoreMSATimingParamters(bool msaTimingParamIgnoreEn) = 0; + virtual AuxRetry::status setLinkQualLaneSet(unsigned lane, LinkQualityPatternType linkQualPattern) = 0; + virtual AuxRetry::status setLinkQualPatternSet(LinkQualityPatternType linkQualPattern, unsigned laneCount = 0) = 0; + }; + + class LinkCapabilities + { + public: + + // + // Physical layer feature set + // + virtual NvU64 getMaxLinkRate() = 0; // Maximum byte-block in Hz + virtual unsigned getMaxLaneCount() = 0; // DPCD offset 2 + virtual unsigned getMaxLaneCountSupportedAtLinkRate(LinkRate linkRate) = 0; + virtual bool getEnhancedFraming() = 0; + virtual bool getSupportsNoHandshakeTraining() = 0; + virtual bool getMsaTimingparIgnored() = 0; + virtual bool getDownstreamPort(NvU8 *portType) = 0; // DPCD offset 5 + virtual bool getSupportsMultistream() = 0; // DPCD offset 21h + virtual bool getNoLinkTraining() = 0; // DPCD offset 330h + virtual unsigned getPhyRepeaterCount() = 0; // DPCD offset F0002h + }; + + class OUI + { + public: + virtual bool getOuiSupported() = 0; + virtual AuxRetry::status setOuiSource(unsigned ouiId, const char * model, size_t modelNameLength, NvU8 chipRevision) = 0; + virtual bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0; + }; + + class HDCP + { + public: + virtual bool getBKSV(NvU8 *bKSV) = 0; // DPCD offset 0x68000 + virtual bool getBCaps(BCaps &bCaps, NvU8 * rawByte = 0) = 0; // DPCD offset 0x68028 + virtual bool getHdcp22BCaps(BCaps &bCaps, NvU8 * rawByte = 0) = 0; // DPCD offset 0x6921D + virtual bool getBinfo(BInfo &bInfo) = 0; // DPCD offset 0x6802A + + // Generic interfaces for HDCP 1.x / 2.2 + virtual bool getRxStatus(const HDCPState &hdcpState, NvU8 *data) = 0; + }; + + class DPCDHAL : + virtual public Object, + public TestRequest, + public LaneStatus, + public LinkState, + public LinkCapabilities, + public OUI, + public HDCP + { + public: + // + // Notifications of external events + // We sent IRQ/HPD events to the HAL so that it knows + // when to re-read the registers. All the remaining + // calls are either accessors to cached state (caps), + // or DPCD get/setters + // + virtual void notifyIRQ() = 0; + virtual void notifyHPD(bool status, bool bSkipDPCDRead = false) = 0; + + virtual void populateFakeDpcd() = 0; + + // DPCD override routines + virtual void overrideMaxLinkRate(NvU32 overrideMaxLinkRate) = 0; + virtual void overrideMaxLaneCount(NvU32 maxLaneCount) = 0; + virtual void skipCableBWCheck(NvU32 maxLaneAtHighRate, NvU32 maxLaneAtLowRate) = 0; + virtual void overrideOptimalLinkCfg(LinkRate optimalLinkRate, NvU32 optimalLaneCount) = 0; + virtual void overrideOptimalLinkRate(LinkRate optimalLinkRate) = 0; + + virtual bool isDpcdOffline() = 0; + virtual void setAuxBus(AuxBus * bus) = 0; + virtual NvU32 getVideoFallbackSupported() = 0; + // + // Cached CAPS + // These are only re-read when notifyHPD is called + // + virtual unsigned getRevisionMajor() = 0; + virtual unsigned getRevisionMinor() = 0; + + virtual unsigned lttprGetRevisionMajor() = 0; + virtual unsigned lttprGetRevisionMinor() = 0; + + virtual bool getSDPExtnForColorimetry() = 0; + + bool isAtLeastVersion(unsigned major, unsigned minor) + { + if (getRevisionMajor() > major) + return true; + + if (getRevisionMajor() < major) + return false; + + return getRevisionMinor() >= minor; + } + + bool isVersion(unsigned major, unsigned minor) + { + if ((getRevisionMajor() == major) && + (getRevisionMinor() == minor)) + return true; + + return false; + } + + bool lttprIsAtLeastVersion(unsigned major, unsigned minor) + { + if (lttprGetRevisionMajor() > major) + return true; + + if (lttprGetRevisionMinor() < major) + return false; + + return lttprGetRevisionMinor() >= minor; + } + + bool lttprIsVersion(unsigned major, unsigned minor) + { + if ((lttprGetRevisionMajor() == major) && + (lttprGetRevisionMinor() == minor)) + return true; + + return false; + } + + // Convert Link Bandwidth read from DPCD register to Linkrate + NvU64 mapLinkBandiwdthToLinkrate(NvU32 linkBandwidth) + { + if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _1_62_GBPS, linkBandwidth)) + return RBR; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _2_70_GBPS, linkBandwidth)) + return HBR; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _5_40_GBPS, linkBandwidth)) + return HBR2; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_BANDWIDTH, _VAL, _8_10_GBPS, linkBandwidth)) + return HBR3; + else + { + DP_ASSERT(0 && "Unknown link bandwidth. Assuming HBR"); + return HBR; + } + } + + // + // Native aux transaction size (16 for AUX) + // + virtual size_t getTransactionSize() = 0; + + // + // SST Branching device/dongle/repeater + // - Describes downstream port limitations + // - Not for use with MST + // - Primarily used for dongles (look at port 0 for pclk limits) + // + virtual LegacyPort * getLegacyPort(unsigned index) = 0; + virtual unsigned getLegacyPortCount() = 0; + + virtual PCONCaps * getPCONCaps() = 0; + + // + // Single stream specific caps + // + virtual unsigned getNumberOfAudioEndpoints() = 0; + virtual int getSinkCount() = 0; + virtual void setSinkCount(int sinkCount) = 0; + + // + // MISC + // + virtual bool isPC2Disabled() = 0; + virtual void setPC2Disabled(bool disabled) = 0; + + virtual void setDPCDOffline(bool enable) = 0; + virtual void updateDPCDOffline() = 0; + + virtual void setSupportsESI(bool bIsESISupported) = 0; + virtual void setLttprSupported(bool isLttprSupported) = 0; + + // + // Intermediate Link Rate (eDP ILR) + // + virtual void setIndexedLinkrateEnabled(bool newVal) = 0; + virtual bool isIndexedLinkrateEnabled() = 0; + virtual bool isIndexedLinkrateCapable() = 0; + virtual NvU16 *getLinkRateTable() = 0; + virtual bool getRawLinkRateTable(NvU8 *buffer = NULL) = 0; + + // + // Link power state management + // + virtual bool setPowerState(PowerState newState) = 0; + virtual PowerState getPowerState() = 0; + // + // Multistream + // + virtual bool getGUID(GUID & guid) = 0; // DPCD offset 30 + virtual AuxRetry::status setGUID(GUID & guid) = 0; + virtual AuxRetry::status setMessagingEnable(bool uprequestEnable, bool upstreamIsSource) = 0; + virtual AuxRetry::status setMultistreamLink(bool bMultistream) = 0; + virtual void payloadTableClearACT() = 0; + virtual bool payloadWaitForACTReceived() = 0; + virtual bool payloadAllocate(unsigned streamId, unsigned begin, unsigned count) = 0; + virtual bool clearPendingMsg() = 0; + virtual bool isMessagingEnabled() = 0; + + // + // If set to IRQ we'll receive CSN messages on hotplugs (which are actually easy to miss). + // If set to HPD mode we'll always receive an HPD whenever the topology changes. + // The library supports using both modes. + // + virtual AuxRetry::status setMultistreamHotplugMode(MultistreamHotplugMode notifyType) = 0; + + // + // Interrupts + // + virtual bool interruptContentProtection() = 0; + virtual void clearInterruptContentProtection() = 0; + + virtual bool intteruptMCCS() = 0; + virtual void clearInterruptMCCS() = 0; + + virtual bool interruptDownReplyReady() = 0; + virtual void clearInterruptDownReplyReady() = 0; + + virtual bool interruptUpRequestReady() = 0; + virtual void clearInterruptUpRequestReady() = 0; + + virtual bool interruptCapabilitiesChanged() = 0; + virtual void clearInterruptCapabilitiesChanged() = 0; + + virtual bool getLinkStatusChanged() = 0; + virtual void clearLinkStatusChanged() = 0; + + virtual bool getHdmiLinkStatusChanged() = 0; + virtual void clearHdmiLinkStatusChanged() = 0; + + virtual bool getStreamStatusChanged() = 0; + virtual void clearStreamStatusChanged() =0; + + virtual void setDirtyLinkStatus(bool dirty) = 0; + virtual void refreshLinkStatus() = 0; + virtual bool isLinkStatusValid(unsigned lanes) = 0; + + virtual void getCustomTestPattern(NvU8 *testPattern) = 0; // DPCD offset 250 - 259 + + // + // Message Boxes + // + virtual AuxRetry::status writeDownRequestMessageBox(NvU8 * data, size_t length) = 0; + virtual size_t getDownRequestMessageBoxSize() = 0; + + virtual AuxRetry::status writeUpReplyMessageBox(NvU8 * data, size_t length) = 0; + virtual size_t getUpReplyMessageBoxSize() = 0; + + virtual AuxRetry::status readDownReplyMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0; + virtual size_t getDownReplyMessageBoxSize() = 0; + + virtual AuxRetry::status readUpRequestMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0; + virtual size_t getUpRequestMessageBoxSize() = 0; + + // MST<->SST override + virtual void overrideMultiStreamCap(bool mstCapable) = 0; + virtual bool getMultiStreamCapOverride() = 0; + + virtual bool getDpcdMultiStreamCap(void) = 0; + + // Set GPU DP support capability + virtual void setGpuDPSupportedVersions(bool supportDp1_2, bool supportDp1_4) = 0; + + // Set GPU FEC support capability + virtual void setGpuFECSupported(bool bSupportFEC) = 0; + + virtual void applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase) = 0; + + // PCON configuration + + // Reset PCON (to default state) + virtual void resetProtocolConverter() = 0; + // Source control mode and FRL/HDMI mode selection. + virtual bool setSourceControlMode(bool bEnableSourceControlMode, bool bEnableFRLMode) = 0; + + virtual bool checkPCONFrlReady(bool *bFrlReady) = 0; + virtual bool setupPCONFrlLinkAssessment(NvU32 linkBw, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false) = 0; + + virtual bool checkPCONFrlLinkStatus(NvU32 *frlRate) = 0; + + virtual bool queryHdmiLinkStatus(bool *bLinkActive, bool *bLinkReady) = 0; + virtual NvU32 restorePCONFrlLink(NvU32 linkBwMask, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false) = 0; + + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps) = 0; + virtual bool updatePsrConfiguration(vesaPsrConfig config) = 0; + virtual bool readPsrConfiguration(vesaPsrConfig *config) = 0; + virtual bool readPsrState(vesaPsrState *psrState) = 0; + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) = 0; + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr) = 0; + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr) = 0; + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrErr) = 0; + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrErr) = 0; + + virtual ~DPCDHAL() {} + + }; + + // + // Implement interface + // + DPCDHAL * MakeDPCDHAL(AuxBus * bus, Timer * timer); +} + +#endif //INCLUDED_DP_CONFIGCAPS_H diff --git a/src/common/displayport/inc/dp_connector.h b/src/common/displayport/inc/dp_connector.h new file mode 100644 index 000000000..24dff412c --- /dev/null +++ b/src/common/displayport/inc/dp_connector.h @@ -0,0 +1,678 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_connector.h * +* This is the primary client interface. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_CONNECTOR_H +#define INCLUDED_DP_CONNECTOR_H + +#include "dp_auxdefs.h" +#include "dp_object.h" +#include "dp_mainlink.h" +#include "dp_auxbus.h" +#include "dp_address.h" +#include "dp_guid.h" +#include "dp_evoadapter.h" +#include "dp_auxbus.h" +#include "dp_auxretry.h" +#include "displayport.h" +#include "dp_vrr.h" +#include "../../modeset/timing/nvt_dsc_pps.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" + +namespace DisplayPort +{ + class EvoInterface; + + typedef enum + { + DP_TESTMESSAGE_STATUS_SUCCESS = 0, + DP_TESTMESSAGE_STATUS_ERROR = 0xDEADBEEF, + DP_TESTMESSAGE_STATUS_ERROR_INSUFFICIENT_INPUT_BUFFER = 0xDEADBEED, + DP_TESTMESSAGE_STATUS_ERROR_INVALID_PARAM = 0xDEADBEEC + // new error code should be here + } DP_TESTMESSAGE_STATUS; + + typedef enum + { + False = 0, + True = 1, + Indeterminate = 2 + } TriState; + + enum ConnectorType + { + connectorDisplayPort, + connectorHDMI, + connectorDVI, + connectorVGA + }; + + typedef struct portMap + { + NvU16 validMap; // port i is valid = bit i is high + NvU16 inputMap; // port i is input port = bit i is high && validMap bit i is high + NvU16 internalMap; // port i is internal = bit i is high && validMap bit i is high + } PortMap; + + enum ForceDsc + { + DSC_DEFAULT, + DSC_FORCE_ENABLE, + DSC_FORCE_DISABLE + }; + + struct DpModesetParams + { + unsigned headIndex; + ModesetInfo modesetInfo; + DP_COLORFORMAT colorFormat; + NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS msaparams; + + DpModesetParams() : headIndex(0), modesetInfo(), colorFormat(dpColorFormat_Unknown), msaparams() {} + + DpModesetParams(unsigned newHeadIndex, + ModesetInfo newModesetInfo, + DP_COLORFORMAT newColorFormat = dpColorFormat_Unknown) : + headIndex(newHeadIndex), + modesetInfo(newModesetInfo), + colorFormat(newColorFormat), + msaparams() {} + + DpModesetParams(unsigned newHeadIndex, + ModesetInfo *newModesetInfo, + DP_COLORFORMAT newColorFormat = dpColorFormat_Unknown) : + headIndex(newHeadIndex), + modesetInfo(*newModesetInfo), + colorFormat(newColorFormat), + msaparams() {} + + }; + + struct DscOutParams + { + unsigned PPS[DSC_MAX_PPS_SIZE_DWORD]; // Out - PPS SDP data + }; + + struct DscParams + { + bool bCheckWithDsc; // [IN] - Client telling DP Library to check with DSC. + ForceDsc forceDsc; // [IN] - Client telling DP Library to force enable/disable DSC + DSC_INFO::FORCED_DSC_PARAMS* forcedParams; // [IN] - Client telling DP Library to force certain DSC params. + bool bEnableDsc; // [OUT] - DP Library telling client that DSC is needed for this mode. + unsigned bitsPerPixelX16; // [IN/OUT] - Bits per pixel value multiplied by 16 + DscOutParams *pDscOutParams; // [OUT] - DSC parameters + + DscParams() : bCheckWithDsc(false), forceDsc(DSC_DEFAULT), forcedParams(NULL), bEnableDsc(false), bitsPerPixelX16(0), pDscOutParams(NULL) {} + }; + + class Group; + + bool SetConfigSingleHeadMultiStreamMode(Group **targets, // Array of group pointers given for getting configured in single head multistream mode. + NvU32 displayIDs[], // Array of displayIDs given for getting configured in single head multistream mode. + NvU32 numStreams, // Number of streams driven out from single head. + DP_SINGLE_HEAD_MULTI_STREAM_MODE mode, // Configuration mode : SST or MST + bool bSetConfig, // Set or clear the configuration. + NvU8 vbiosPrimaryDispIdIndex = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, // VBIOS primary display ID index in displayIDs[] array + bool bEnableAudioOverRightPanel = false); // Audio MUX config : right or left panel + + // + // Device object + // This object represents a displayport device. Devices are not reported + // to clients until the EDID is already on file. + // + class Device : virtual public Object + { + public: + + virtual bool isPlugged() = 0; + virtual bool isVideoSink() = 0; // Invariant: won't change once reported + virtual bool isAudioSink() = 0; // Invariant + + virtual bool isLoop() = 0; // the address starts and ends at th same device + virtual bool isRedundant() = 0; + virtual bool isMustDisconnect() = 0; // Is this monitor's head being attach preventing + // us from enumerating other panels? + virtual bool isZombie() = 0; // Head is attached but we're not connected + virtual bool isCableOk() = 0; // cable can be not ok, whenwe saw hpd, device connected, but can't talk over aux + + virtual bool isLogical() = 0; // Is device connected to logical port + + virtual Address getTopologyAddress() const = 0; // Invariant + virtual bool isMultistream() = 0; + + virtual ConnectorType getConnectorType() = 0; // Invariant + + virtual unsigned getEDIDSize() const= 0; // Invariant + // Copies EDID into client buffer. Fails if the buffer is too small + virtual bool getEDID(char * buffer, unsigned size) const = 0; + + virtual unsigned getRawEDIDSize() const= 0; + // Copies RawEDID into client buffer. Fails if the buffer is too small + virtual bool getRawEDID(char * buffer, unsigned size) const = 0; + + virtual bool getPCONCaps(PCONCaps *pPCONCaps) = 0; + + virtual bool isFallbackEdid() = 0; // is the device edid a fallback one? + virtual GUID getGUID() const = 0; // Returns the GUID for the device + virtual bool isPowerSuspended() = 0; + virtual bool isActive() = 0; // Whether the device has a head attached to it + virtual TriState hdcpAvailableHop() = 0; // Whether the device support HDCP, + // regardless of whether the path leading to it supports HDCP. + virtual TriState hdcpAvailable() = 0; // Whether HDCP can be enabled. + // Note this checks that the entire path to the node support HDCP. + + virtual PortMap getPortMap() const = 0; + + virtual void setPanelPowerParams(bool bSinkPowerStateD0, bool bPanelPowerStateOn) = 0; + virtual Group * getOwningGroup() = 0; // Return the group this device is currently a member of + + virtual AuxBus * getRawAuxChannel() = 0; // No automatic retry on DEFER. See limitations in dp_auxbus.h + virtual AuxRetry * getAuxChannel() = 0; // User friendly AUX interface + + virtual Device * getParent() = 0; + virtual Device * getChild(unsigned portNumber) = 0; + + virtual void dpcdOverrides() = 0; // Apply DPCD overrides if required + + virtual bool getDpcdRevision(unsigned * major, unsigned * minor) = 0; // get the dpcd revision (maybe cached) + + virtual bool getSDPExtnForColorimetrySupported() = 0; + + virtual bool getIgnoreMSACap() = 0; + + virtual AuxRetry::status setIgnoreMSAEnable(bool msaTimingParamIgnoreEn) = 0; + + virtual NvBool isDSCPossible() = 0; + + virtual NvBool isDSCSupported() = 0; + + virtual DscCaps getDscCaps() = 0; + + // + // This function returns the device itself or its parent device that is doing + // DSC decompression for it. + // + virtual Device* getDevDoingDscDecompression() = 0; + virtual void markDeviceForDeletion() = 0; + + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) = 0; + + // This interface is still nascent. Please don't use it. Read size limit is 16 bytes. + virtual AuxBus::status getDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason = NULL) = 0; + + virtual AuxBus::status setDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason = NULL) = 0; + + virtual AuxBus::status dscCrcControl(NvBool bEnable, gpuDscCrc *gpuData, sinkDscCrc *sinkData) = 0; + virtual AuxBus::status queryFecData(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) = 0; + + // + // The address send here will be right shifted by the library. DD should + // send the DDC address without the shift. + // Parameter bForceMot in both getI2cData and setI2cData is used to forfully set + // the MOT bit. It is needed for some special cases where the MOT bit shouldn't + // be set but some customers need it to please their monitors. + // + virtual bool getI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false) = 0; + virtual bool setI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false) = 0; + + // + // Calls VRR enablement implementation in dp_vrr.cpp. + // The enablement steps include interaction over DPAux in the vendor specific + // DPCD space. + // + virtual bool startVrrEnablement() = 0; // VF: calls actual enablement code. + virtual void resetVrrEnablement() = 0; // VF: resets enablement state. + virtual bool isVrrMonitorEnabled() = 0; // VF: gets monitor enablement state. + virtual bool isVrrDriverEnabled() = 0; // VF: gets driver enablement state. + + // If the sink support MSA override in MST environment. + virtual bool isMSAOverMSTCapable() = 0; + virtual bool isFakedMuxDevice() = 0; + + protected: + virtual ~Device() {} + + }; + + class Group : virtual public Object + { + public: + + // + // Routines for changing which panels are in a group. To move a stream to a new + // monitor without a modeset: + // remove(old_panel) + // insert(new_panel) + // The library will automatically switch over to the new configuration + // + virtual void insert(Device * dev) = 0; + virtual void remove(Device * dev) = 0; + + // + // group->enumDevices(0) - Get first element + // group->enumDevices(i) - Get next element + // + // for (Device * i = group->enumDevices(0); i; i = group->enumDevices(i)) + // + virtual Device * enumDevices(Device * previousDevice) = 0; + + virtual void destroy() = 0; // Destroy the group object + + // Toggles the encryption status for the stream. + // Returns whether encryption is currently enabled. + virtual bool hdcpGetEncrypted() = 0; + + protected: + virtual ~Group() {} + + }; + + class Connector : virtual public Object + { + public: + // + // Normally the Connector::EventSink callsback can occur in response to the following + // 1. Timer callbacks + // 2. notifyLongPulse/notifyShortPulse + // + class EventSink + { + public: + virtual void newDevice(Device * dev) = 0; // New device appears in topology + virtual void lostDevice(Device * dev) = 0; // Lost device from topology + // Device object ceases to exist after this call + + virtual void notifyMustDisconnect(Group * grp) = 0; // Notification that an attached head is preventing + // us from completing detection of a newly connected device + + virtual void notifyDetectComplete() = 0; // Rolling call. Happens every time we've done another full + // detect on the topology + + virtual void bandwidthChangeNotification(Device * dev, bool isComplianceMode) = 0; // Available bandwidth to panel has changed, or panel has + // become a zombie + + virtual void notifyZombieStateChange(Device * dev, bool zombied) = 0; // Notification that zombie device was attached or dettached + virtual void notifyCableOkStateChange(Device * dev, bool cableOk) = 0; // Notification that device got cable state chagne (true - cable is good, false - cables is bad) + virtual void notifyHDCPCapDone(Device * dev, bool hdcpCap) = 0; // Notification that device's HDCP cap detection is done and get state change. + virtual void notifyMCCSEvent(Device * dev) = 0; // Notification that an MCCS event is coming + }; + + // Query current Device topology + virtual Device * enumDevices(Device * previousDevice) = 0; + + // Called before system enters an S3 state + virtual void pause() = 0; + + // Get maximum link configuration + virtual LinkConfiguration getMaxLinkConfig() = 0; + + // Get currently active link configuration + virtual LinkConfiguration getActiveLinkConfig() = 0; + + // Get Current link configuration + virtual void getCurrentLinkConfig(unsigned & laneCount, NvU64 & linkRate) = 0; + + // Get the clock calculation supported by the panel + virtual unsigned getPanelDataClockMultiplier() = 0; + + // Get the clock calculation supported by the GPU + virtual unsigned getGpuDataClockMultiplier() = 0; + + // Resume from standby/initial boot notification + // The library is considered to start up in the suspended state. You must make this + // API call to enable the library. None of the library APIs are functional before + // this call. + // + // Returns the group representing the firmware panel if any is active. + // + // plugged Does RM report the root-port DisplayId in + // its plugged connector mask + // + // firmwareLinkHandsOff RM does NOT report the rootport displayId as active, + // but one of the active panels shares the same SOR. + // + // firmwareDPActive RM reports the rootport displayId in the active device list + // but display-driver hasn't yet performed its first modeset. + // + // isUefiSystem DD tells the library whether this system is a UEFI based + // one so that the library can get the current and max link config + // from RM/UEFI instead of trying to determine them on its own. + // + // firmwareHead Head being used to drive the firmware + // display, if firmwareDPActive is true. + // + // bFirmwareLinkUseMultistream + // Specifies whether the firmware connector is being driven in SST + // (false) or MST (true) mode. + // + // bDisableVbiosScratchRegisterUpdate + // Disables update of + // NV_PDISP_SOR_DP_SCRATCH_RAD/MISC scratch + // pad registers with last lit up display + // address. This address is used by VBIOS in + // case of driver unload or BSOD. + // + // bAllowMST Allow/Disallow Multi-streaming + // + virtual Group * resume(bool firmwareLinkHandsOff, + bool firmwareDPActive, + bool plugged, + bool isUefiSystem = false, + unsigned firmwareHead = 0, + bool bFirmwareLinkUseMultistream = false, + bool bDisableVbiosScratchRegisterUpdate = false, + bool bAllowMST = true) = 0; + + // The display-driver should enable hands off mode when attempting + // to use a shared resource (such as the SOR) in a non-DP configuration. + virtual void enableLinkHandsOff() = 0; + virtual void releaseLinkHandsOff() = 0; + + // Usage scenario: + // beginCompoundQuery() + // compoundQueryAttach(1280x1024) + // compoundQueryAttach(1920x1080) + // .endCompoundQuery() + // Will tell you if you have sufficient bandwidth to operate + // two panels at 1920x1080 and 1280x1024 assuming all currently + // attached panels are detached. + virtual void beginCompoundQuery() = 0; + + // + // twoChannelAudioHz + // If you need 192khz stereo specify 192000 here. + // + // eightChannelAudioHz + // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + // + // pixelClockHz + // Requested pixel clock for the mode + // + // depth + // Requested color depth + // + virtual bool compoundQueryAttach(Group * target, + unsigned twoChannelAudioHz, + unsigned eightChannelAudioHz, + NvU64 pixelClockHz, + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) = 0; + + virtual bool compoundQueryAttach(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams) = 0; // DSC parameters + + virtual bool endCompoundQuery() = 0; + + // Interface to indicate if clients need to perform a head shutdown before a modeset + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) = 0; + + // Interface to indicate if clients need to perform a head shutdown before a modeset + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + ModesetInfo modesetInfo) = 0; // Modeset info relevant DSC data + + // + // Interface for clients to query library if the link is going to be trained during notifyAttachBegin(modeset). + // Note: This API is not intended to know if a link training will be performed during assessment of the link. + // This API is added to see if library can avoid link training during modeset so that client can take necessary decision + // to avoid a destructive modeset from UEFI mode at post to a GPU driver detected mode + // (thus prevent a visible glitch - i.e. Smooth Transition) + // + // How isLinkTrainingNeededForModeset API is different from isHeadShutDownNeeded API - + // In case of MST : we always shutdown head and link train if link is inactive, so both APIs return TRUE + // In case of SST : + // - If requested link config < active link config, we shutdown head to prevent overflow + // as head will still be driving at higher mode during link training to lower mode + // So both APIs return TRUE + // - If requested link config >= active link config, we don't need a head shutdown since + // SOR clocks can be changed by entering flush mode but will need to link train for mode change + // So isHeadShutDownNeeded returns FALSE and isLinkTrainingNeededForModeset returns TRUE + // + virtual bool isLinkTrainingNeededForModeset (ModesetInfo modesetInfo) = 0; + + // Notify library before/after modeset (update) + virtual bool notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) = 0; + + // Group of panels we're attaching to this head + virtual bool notifyAttachBegin(Group * target, const DpModesetParams &modesetParams) = 0; + + virtual void readRemoteHdcpCaps() = 0; + + // modeset might be cancelled when NAB failed + virtual void notifyAttachEnd(bool modesetCancelled) = 0; + + // + // Client needs to be notified about the SST<->MST transition, + // based on which null modeset will be sent. + // + virtual bool isLinkAwaitingTransition() = 0; + + virtual void resetLinkTrainingCounter() = 0; + + // Notify library before/after shutdown (update) + virtual void notifyDetachBegin(Group * target) = 0; + virtual void notifyDetachEnd(bool bKeepOdAlive = false) = 0; + + // Notify library to assess PCON link capability + virtual bool assessPCONLinkCapability(PCONLinkControl *params) = 0; + + // Notify library of hotplug/IRQ + virtual void notifyLongPulse(bool statusConnected) = 0; + virtual void notifyShortPulse() = 0; + + // Notify Library when ACPI initialization is done + virtual void notifyAcpiInitDone() = 0; + + // Notify Library when GPU capability changes. Usually because power event. + virtual void notifyGPUCapabilityChange() = 0; + virtual void notifyHBR2WAREngage() = 0; + + // Create a new Group. Note that if you wish to do a modeset but send the + // stream nowhere, you may do a modeset with an EMPTY group. This is expected + // to be the mechanism by which monitor faking is implemented. + virtual Group * newGroup() = 0; + + // Shutdown and the destroy the connector manager + virtual void destroy() = 0; + + virtual void createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize) = 0; + virtual void deleteFakeMuxDevice() = 0; + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) = 0; + + // + // OS Modeset Order mitigation causes the library to delay the reporting + // of new devices until they can be safely turned on. + // When enabled the library client will not see connection events until + // MustDisconnect messages are processed. + // + // Policy state should be set before the library is brought out of + // the suspended state. + // + // Important Note: This option changes the definition of QueryMode. + // Without OS order mitigation query mode assumes that you will + // deatach all of the heads from any zombied monitors *before* + // activating the new panel. If your driver cannot guarantee + // this invariant, then it must enable order mitigation. + // + virtual void setPolicyModesetOrderMitigation(bool enabled) = 0; + + // + // force LT at NAB for compliance test (Power Management) in Win10 RS2+ (WDDM 2.2) + // + // RS2 no longer sends an explicit call for setPanelPowerParams during the Resume. + // It does that by specifying an additional flag during the call to SetTimings. Due to + // this DP lib doesn't get chance to perform this transition from setPanelPowerParams + // and since it was already skipping LT in NAB/modeswitch, so LT get missed out on the + // compliance device during resume from S3/S4. + // + virtual void setPolicyForceLTAtNAB(bool enabled) = 0; + + // + // There are cases where OS does not detach heads from connector immediately after hot-unplug, + // on next hot-plug there is no guarantee that newly connected sink is capable to drive existing + // raster timings. Flush mode has following restriction + // When exiting flush mode S/W should ensure that the final + // link clock & lane count should be able to support existing raster. + // If we run into this situation and use flush mode then that will cause display engine to hang. + // This variable ensures to assess link safely in this situation and instead of using flush mode ask + // DD to detach/reattach heads for link training. + // + virtual void setPolicyAssessLinkSafely(bool enabled) = 0; + + // + // These interfaces are meant to be used *ONLY* for tool purposes. + // Clients should *NOT* use them for their own implementation. + // + // Sets the preferred link config which the tool has requested to train to. + // Each set call should be paired with a reset call. Also, preferred link configs won't persist across HPDs. + // It is advisable to do compound queries before setting a mode on a preferred config. + // Compound queries and notify attaches(link train) would use the preferred link config unless it is reset again. + // (not advisable to leave a preferred link config always ON). + // + virtual bool setPreferredLinkConfig(LinkConfiguration & lc, bool commit, + bool force = false, + LinkTrainingType forceTrainType = NORMAL_LINK_TRAINING) = 0; + + // + // Resets the preferred link config and lets the library go back to default LT policy. + // Should follow a previous set call. + // + virtual bool resetPreferredLinkConfig(bool force=false) = 0; + + // + // These interfaces are used by client to allow/disallow + // Multi-streaming. + // + // If connected sink is MST capable then: + // Client should detach all active MST video/audio streams before + // disallowing MST, vise-versa client should detach active SST + // stream before allowing MST. + // + virtual void setAllowMultiStreaming(bool bAllowMST) = 0; + virtual bool getAllowMultiStreaming(void) = 0; + + // This function reads sink MST capability from DPCD register(s). + virtual bool getSinkMultiStreamCap(void) = 0; + + // These interfaces are Deprecated, use setAllowMultiStreaming() + virtual void setDp11ProtocolForced() = 0; + virtual void resetDp11ProtocolForced() = 0; + virtual bool isDp11ProtocolForced() = 0; + + virtual bool getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12) = 0; + + virtual bool getOuiSink(unsigned &ouiId, char * modelName, + size_t modelNameBufferSize, NvU8 & chipRevision) = 0; + + virtual bool getIgnoreSourceOuiHandshake() = 0; + virtual void setIgnoreSourceOuiHandshake(bool bIgnore) = 0; + + // + // The following function is to be used to get the capability bit that tells the client whether the connector + // can do multistream. + // + virtual bool isMultiStreamCapable() = 0; + virtual bool isFlushSupported() = 0; + virtual bool isStreamCloningEnabled() = 0; + virtual NvU32 maxLinkRateSupported() = 0; + virtual bool isFECSupported() = 0; + virtual bool isFECCapable() = 0; + + // Following APIs are for link test/config for DP Test Utility + virtual bool getTestPattern(NV0073_CTRL_DP_TESTPATTERN *pTestPattern) = 0; + virtual bool setTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, + NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, + NvBool bIsHBR2, NvBool bSkipLaneDataOverride) = 0; + // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + virtual bool getLaneConfig(NvU32 *numLanes, NvU32 *data) = 0; + // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + virtual bool setLaneConfig(NvU32 numLanes, NvU32 *data) = 0; + + virtual DP_TESTMESSAGE_STATUS sendDPTestMessage(void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus) = 0; + + virtual DP_TESTMESSAGE_STATUS getStreamIDs(NvU32 *pStreamIDs, NvU32 *pCount) = 0; + // Function to configure power up/down for DP Main Link + virtual void configurePowerState(bool bPowerUp) = 0; + + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps) = 0; + virtual bool updatePsrConfiguration(vesaPsrConfig config) = 0; + virtual bool readPsrConfiguration(vesaPsrConfig *config) = 0; + virtual bool readPsrState(vesaPsrState *psrState) = 0; + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) = 0; + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr) = 0; + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr) = 0; + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrErr) = 0; + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrErr) = 0; + virtual bool updatePsrLinkState(bool bTrainLink) = 0; + + protected: + virtual ~Connector() {} + }; + + // + // Library routine to create primary port interface + // (Not intended to be used by display driver) + Connector * createConnector(MainLink * mainInterface, // DisplayDriver implemented MainLink object + AuxBus * auxInterface, // DisplayDriver implemented AuxRetry wrapper + Timer * timerInterface, // DisplayDriver provided Timer services + Connector::EventSink * sink); // Interface to notify DisplayDriver of events +} +#endif //INCLUDED_DP_CONNECTOR_H diff --git a/src/common/displayport/inc/dp_connectorimpl.h b/src/common/displayport/inc/dp_connectorimpl.h new file mode 100644 index 000000000..b649e0d1f --- /dev/null +++ b/src/common/displayport/inc/dp_connectorimpl.h @@ -0,0 +1,632 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_connectorimpl.cpp * +* DP connector implementation * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_CONNECTORIMPL_H +#define INCLUDED_DP_CONNECTORIMPL_H + +#include "dp_internal.h" +#include "dp_guid.h" +#include "dp_connector.h" +#include "dp_configcaps.h" +#include "dp_list.h" +#include "dp_buffer.h" +#include "dp_auxdefs.h" +#include "dp_watermark.h" +#include "dp_edid.h" +#include "dp_discovery.h" +#include "dp_groupimpl.h" +#include "dp_deviceimpl.h" +#include "./dptestutil/dp_testmessage.h" + +// HDCP abort codes +#define HDCP_FLAGS_ABORT_DEVICE_REVOKED 0x00000800 // Abort due to a revoked device in DP1.2 topology +#define HDCP_FLAGS_ABORT_DEVICE_INVALID 0x00080000 // Abort due to an invalid device in DP1.2 topology +#define HDCP_FLAGS_ABORT_HOP_LIMIT_EXCEEDED 0x80000000 // Abort, number of devices in DP1.2 topology exceeds supported limit + +static inline unsigned getDataClockMultiplier(NvU64 linkRate, NvU64 laneCount) +{ + // + // To get the clock multiplier: + // - Convert the linkRate from Hz to 10kHz by dividing it by 10000. + // - Multiply the 10kHz linkRate by the laneCount. + // - Multiply by 10.0/8, to account for the 8b/10b encoding overhead in the DP protocol layer. + // + // Avoid floating point in the arithmetic in the calculation + // through the following conversions: + // linkRate/10000.0 * laneCount * 10.0/8 + // (linkRate * laneCount * 10) / (10000 * 8) + // (linkRate * laneCount) / (1000 * 8) + // + return (unsigned) DisplayPort::axb_div_c_64(linkRate, laneCount, 8000); +} + +namespace DisplayPort +{ + + typedef enum + { + DP_TRANSPORT_MODE_INIT = 0, + DP_TRANSPORT_MODE_SINGLE_STREAM = 1, + DP_TRANSPORT_MODE_MULTI_STREAM = 2, + } DP_TRANSPORT_MODE; + + struct ConnectorImpl : public Connector, DiscoveryManager::DiscoveryManagerEventSink, Timer::TimerCallback, MessageManager::MessageReceiver::MessageReceiverEventSink + { + // DPCD HAL Layer - We should use this in place of direct register accesses + DPCDHAL * hal; + + MainLink * main; // Main link controls + AuxBus * auxBus; + + TestMessage testMessage; // TestMessage instance + + Timer * timer; // OS provided timer services + Connector::EventSink * sink; // Event Sink + + unsigned ouiId; // Sink ouiId + char modelName[NV_DPCD_SOURCE_DEV_ID_STRING__SIZE + 1]; // Device Model-name + bool bIgnoreSrcOuiHandshake; // Skip writing source OUI + + LinkPolicy linkPolicy; + bool linkGuessed; // True when link was "guessed" during HPD in TMDS mode + bool isLinkQuiesced; // True when link was set to quiet mode by TMDS modeset + + bool bNoLtDoneAfterHeadDetach; // True when head is disconnected in NDE + + bool isDP12AuthCap; // To tell whether this DP1.2 connector/ upmost device has the authentication Cap. + bool isHDCPAuthOn; // To tell whether this connector has the authentication on. + bool isHDCPReAuthPending; // To tell whether HDCP Auth is pending (at every stream addition and cleared at handler). + bool isHDCPAuthTriggered; // To tell whether HDCP Auth is triggered and only cleared at unplug/device detach for MST. + bool isHopLimitExceeded; // To tell the current topology is over limitation. + bool bIsDiscoveryDetectActive; // To tell device discovery is active ( isDiscoveryDetectComplete is also used as DD notify and not want to impacts that. ) + bool isDiscoveryDetectComplete; // To tell device discovery is finished. + bool bDeferNotifyLostDevice; // To tell if we should defer notify lost device event to client. + + HDCPValidateData hdcpValidateData; // Cache the HDCP ValidateData. + unsigned authRetries; // Retry counter for the authentication. + unsigned retryLT; // Retry counter for link training in case of link lost in PostLQA + unsigned hdcpCapsRetries; // Retry counter for Hdcp Caps read. + unsigned hdcpCpIrqRxStatusRetries; // Retry counter for CPIRQ RxStatus read. + bool bLTPhyRepeater; // Link Train PHY Repeaters between Source and Sink + bool bFromResumeToNAB; // True if from resume to NAB, WAR flag for unblocking GA1.5 + bool bAttachOnResume; // True if notifyLongPulse is called for resume (reboot/S3/S4) + bool bSkipAssessLinkForEDP; // Skip assessLink() for eDP. Assuming max is reachable. + bool bPConConnected; // HDMI2.1-Protocol Converter (Support SRC control mode) connected. + bool bSkipAssessLinkForPCon; // Skip assessLink() for PCON. DD will call assessFRLLink later. + bool bHdcpAuthOnlyOnDemand; // True if only initiate Hdcp authentication on demand and MST won't auto-trigger authenticate at device attach. + + bool constructorFailed; + + // + // OS Modeset Order mitigation causes the library to delay the reporting + // of new devices until they can be safely turned on. + // When enabled the library client will not see connection events until + // MustDisconnect messages are processed. + // + // Policy state should be set before the library is brought out of + // the suspended state. + // + bool policyModesetOrderMitigation; + + // + // force LT at NAB for compliance test (Power Management) in Win10 RS2+ (WDDM 2.2) + // + // RS2 no longer sends an explicit call for setPanelPowerParams during the Resume. + // It does that by specifying an additional flag during the call to SetTimings. Due to + // this DP lib doesn't get chance to perform this transition from setPanelPowerParams + // and since it was already skipping LT in NAB/modeswitch, so LT get missed out on the + // compliance device during resume from S3/S4. + // + bool policyForceLTAtNAB; + + // + // There are cases where OS does not detach heads from connector immediately after hot-unplug, + // on next hot-plug there is no guarantee that newly connected sink is capable to drive existing + // raster timings. Flush mode has following restriction + // When exiting flush mode S/W should ensure that the final + // link clock & lane count should be able to support existing raster. + // If we run into this situation and use flush mode then that will cause display engine to hang. + // This variable ensures to assess link safely in this situation: if newly connected sink is + // not capable to drive existing raster then just restore link configuration which was there + // before enabling flush mode, through fake link training. + // + bool policyAssessLinkSafely; + + bool bDisableVbiosScratchRegisterUpdate; + + // Only works when policyModesetOrderMitigation is true. + // To record if we should report newDevice. + bool modesetOrderMitigation; + + List deviceList; + List activeGroups; + LinkedList intransitionGroups; + LinkedList addStreamMSTIntransitionGroups; + List inactiveGroups; + + // Compound query + bool compoundQueryActive; + bool compoundQueryResult; + unsigned compoundQueryCount; + unsigned compoundQueryLocalLinkPBN; + + unsigned freeSlots, maximumSlots; + + // Multistream messaging + MessageManager * messageManager; + DiscoveryManager * discoveryManager; + + // Multistream timeslot management (on local link) + LinkConfiguration highestAssessedLC; // As of last assess, the highest possible link configuration + + LinkConfiguration activeLinkConfig; // Current link config. + + // this is the link config requested by a client. + // can be set and reset by the client for a given operation. + LinkConfiguration preferredLinkConfig; + + // + // Desired link configuration of single head multiple sst secondary connector. + // + LinkConfiguration oneHeadSSTSecPrefLnkCfg; + + // All possible link configs + LinkConfiguration * allPossibleLinkCfgs; + unsigned numPossibleLnkCfg; + + PCONLinkControl activePConLinkControl; + + // + // We're waiting for an MST<->SST transition + // The transition cannot be made without the DD + // disconnecting all heads. All devices are reported + // as must_disconnect. Once the last device blocking + // the transition is deattached from a head - we transition. + // + bool linkAwaitingTransition; + + // Unless we're awaiting transition this is identical to hal->getSupportsMultistream() + DP_TRANSPORT_MODE linkState; + + bool bAudioOverRightPanel; + + bool previousPlugged; + bool connectorActive; // Keep track of if connector is active to serve any IRQ + + Group * firmwareGroup; // The group used for book-keeping when we're in firmware mode + + List pendingEdidReads; // List of DevicePendingEDIDRead structures. + // This list tracks the currently in progress MST Edid Reads + + Device * lastDeviceSetForVbios; + + // Flag which gets set when ACPI init is done. DD calls notifyAcpiInitDone to tell client that ACPI init is completed + // & client can now initiate DDC EDID read for a device which supports EDID through SBIOS + bool bAcpiInitDone; + + // Flag to check if the system is UEFI. + bool bIsUefiSystem; + + // Flag to check if LT should be skipped. + bool bSkipLt; + + // Flag to make sure that zombie gets triggred when a powerChange event happens + bool bMitigateZombie; + + // + // HP Valor QHD+ N15P-Q3 EDP needs 50ms delay after D3 + // during trainLinkOptimized to come up on S4 + // + bool bDelayAfterD3; + + // + // ASUS and Samsung monitors have inconsistent behavior when + // DPCD 0x600 updated to D3. Skip D3 only in case these monitors + // are driven in SST config + // + bool bKeepLinkAlive; + + // + // HP Trump dock link training is unstable during S4 resume, which causes + // system to hang. Keep the link alive to increase stability. + // See Bug 2109823. + // + bool bKeepLinkAliveMST; + + // Keep the link alive when connector is in SST + bool bKeepLinkAliveSST; + + // + // HTC Vive Link box is not happy when we power down the link + // during link training when there is no stream present. It requests + // for a link retraining pulse which is not required. + // WAR to address this - NV Bug# 1793084 + // + bool bKeepOptLinkAlive; + + // Keep both DP and FRL link alive to save time. + bool bKeepLinkAliveForPCON; + + // + // Remote HDCP DCPD access should be D0 but won't introduce extra Dx + // state toggle. Use the counter to avoid powerdownlink when HDCP probe. + // + unsigned pendingRemoteHdcpDetections; + + // + // ASUS PQ 321 tiled monitor sometimes loses link while assessing link + // or link training .So if we lower config from HBR2 to HBR and when + // we retrain the link , we see black screen. + // So WAR is to retry link training with same config for 3 times before + // lowering link config. NV Bug #1846925 + // + bool bNoFallbackInPostLQA; + + bool bReportDeviceLostBeforeNew; + bool bEnableAudioBeyond48K; + bool bDisableSSC; + bool bEnableFastLT; + NvU32 maxLinkRateFromRegkey; + + // + // Latency(ms) to apply between link-train and FEC enable for bug + // 2561206. + // + NvU32 LT2FecLatencyMs; + + // + // Dual SST Partner connector object pointer + ConnectorImpl *pCoupledConnector; + + // Set to true when a DSC mode is requested. + bool bFECEnable; + + // Save link config before entering PSR. + LinkConfiguration psrLinkConfig; + + // + // Apply MST DSC caps WAR based on OUI ID of sink + // + bool bDscMstCapBug3143315; + + // + // Enable DSC Pass through support in driver based on regkey. + // + bool bDscMstEnablePassThrough; + + // + // Synaptics branch device doesn't support Virtual Peer Devices so DSC + // capability of downstream device should be decided based on device's own + // and its parent's DSC capability + // + bool bDscCapBasedOnParent; + + void sharedInit(); + ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Connector::EventSink * sink); + + void setPolicyModesetOrderMitigation(bool enabled); + void setPolicyForceLTAtNAB(bool enabled); + void setPolicyAssessLinkSafely(bool enabled); + + void discoveryDetectComplete(); + void discoveryNewDevice(const DiscoveryManager::Device & device); + void discoveryLostDevice(const Address & address); + void processNewDevice(const DiscoveryManager::Device & device, + const Edid & edid, + bool isMultistream, + DwnStreamPortType portType, + DwnStreamPortAttribute portAttribute, + bool isCompliance = false); + + void applyEdidWARs(Edid & edid, DiscoveryManager::Device device); + void applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase); + + ResStatusNotifyMessage ResStatus; + + void messageProcessed(MessageManager::MessageReceiver * from); + + ~ConnectorImpl(); + + // + // Utility functions + // + virtual void hardwareWasReset(); + virtual LinkConfiguration getMaxLinkConfig(); + virtual LinkConfiguration getActiveLinkConfig(); + virtual void powerdownLink(bool bPowerdownPanel = false); + + GroupImpl * getActiveGroupForSST(); + bool detectSinkCountChange(); + bool handlePhyPatternRequest(); + void applyOuiWARs(); + bool linkUseMultistream() + { + return (linkState == DP_TRANSPORT_MODE_MULTI_STREAM); + } + + void populateAllDpConfigs(); + + // + // Suspend resume API + // + virtual Group * resume(bool firmwareLinkHandsOff, + bool firmwareDPActive, + bool plugged, + bool isUefiSystem = false, + unsigned firmwareHead = 0, + bool bFirmwareLinkUseMultistream = false, + bool bDisableVbiosScratchRegisterUpdate = false, + bool bAllowMST = true); + virtual void pause(); + + virtual Device * enumDevices(Device * previousDevice) ; + + + virtual void beginCompoundQuery() ; + virtual bool compoundQueryAttach(Group * target, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth); + + virtual bool compoundQueryAttach(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams = NULL); // DSC parameters + + virtual bool endCompoundQuery(); + + // + // Timer callback tags. + // (we pass the address of these variables as context to ::expired) + char tagFireEvents; + char tagDelayedLinkTrain; + char tagHDCPReauthentication; + char tagDelayedHdcpCapRead; + char tagDelayedHDCPCPIrqHandling; + + // + // Enable disable TMDS mode + // + virtual void enableLinkHandsOff(); + virtual void releaseLinkHandsOff(); + + // + // Timer callback for event management + // Uses: fireEvents() + virtual void expired(const void * tag); + + // Generate Events. + // useTimer specifies whether we fire the events on the timer + // context, or this context. + void fireEvents(); + + // returns the number of pending notifications. + void fireEventsInternal(); + + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + ModesetInfo modesetInfo); + + virtual bool isLinkTrainingNeededForModeset(ModesetInfo modesetInfo); + + virtual bool notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + const DpModesetParams &modesetParams); + + virtual bool isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) ; + + virtual bool notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) ; + + virtual void readRemoteHdcpCaps(); + virtual void notifyAttachEnd(bool modesetCancelled); + virtual void notifyDetachBegin(Group * target); + virtual void notifyDetachEnd(bool bKeepOdAlive = false); + + bool performIeeeOuiHandshake(); + void setIgnoreSourceOuiHandshake(bool bIgnore); + bool getIgnoreSourceOuiHandshake(); + bool willLinkSupportModeSST(const LinkConfiguration & linkConfig, const ModesetInfo & modesetInfo); + void forceLinkTraining(); + + void assessLink(LinkTrainingType trainType = NORMAL_LINK_TRAINING); + + bool isLinkInD3(); + bool isLinkActive(); + bool isLinkLost(); + bool trainSingleHeadMultipleSSTLinkNotAlive(GroupImpl *pGroupAttached); + bool isLinkAwaitingTransition(); + bool isNoActiveStreamAndPowerdown(); + void incPendingRemoteHdcpDetection() + { + pendingRemoteHdcpDetections++; + } + void decPendingRemoteHdcpDetection() + { + if (pendingRemoteHdcpDetections > 0) + { + pendingRemoteHdcpDetections--; + } + } + bool trainLinkOptimized(LinkConfiguration lConfig); + bool trainLinkOptimizedSingleHeadMultipleSST(GroupImpl * group); + bool getValidLowestLinkConfig(LinkConfiguration & lConfig, LinkConfiguration & lowestSelected, ModesetInfo queryModesetInfo); + bool postLTAdjustment(const LinkConfiguration &, bool force); + void populateUpdatedLaneSettings(NvU8* voltageSwingLane, NvU8* preemphasisLane, NvU32 *data); + void populateDscCaps(DSC_INFO* dscInfo, DeviceImpl * dev, DSC_INFO::FORCED_DSC_PARAMS* forcedParams); + void populateDscGpuCaps(DSC_INFO* dscInfo); + void populateForcedDscParams(DSC_INFO* dscInfo, DSC_INFO::FORCED_DSC_PARAMS* forcedParams); + void populateDscSinkCaps(DSC_INFO* dscInfo, DeviceImpl * dev); + void populateDscModesetInfo(MODESET_INFO * pModesetInfo, const DpModesetParams * pModesetParams); + + bool train(const LinkConfiguration & lConfig, bool force, LinkTrainingType trainType = NORMAL_LINK_TRAINING); + bool validateLinkConfiguration(const LinkConfiguration & lConfig); + + virtual bool assessPCONLinkCapability(PCONLinkControl *params); + bool trainPCONFrlLink(PCONLinkControl *pConControl); + + // Set Device DSC state based on current DSC state of all active devices on this connector + bool setDeviceDscState(Device * dev, bool bEnableDsc); + + // the lowest level function(nearest to the hal) for the connector. + bool rawTrain(const LinkConfiguration & lConfig, bool force, LinkTrainingType linkTrainingType); + + bool enableFlush(); + bool beforeAddStream(GroupImpl * group, bool force=false, bool forFlushMode = false); + void afterAddStream(GroupImpl * group); + void beforeDeleteStream(GroupImpl * group, bool forFlushMode = false); + void afterDeleteStream(GroupImpl * group); + void disableFlush(bool test=false); + + bool beforeAddStreamMST(GroupImpl * group, bool force = false, bool forFlushMode = false); + + bool deleteAllVirtualChannels(); + void clearTimeslices(); + bool allocateTimeslice(GroupImpl * targetGroup); + void freeTimeslice(GroupImpl * targetGroup); + void flushTimeslotsToHardware(); + bool getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12); + bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision); + bool hdcpValidateKsv(const NvU8 *ksv, NvU32 Size); + void cancelHdcpCallbacks(); + bool handleCPIRQ(); + void handleSSC(); + void handleMCCSIRQ(); + void handleHdmiLinkStatusChanged(); + void sortActiveGroups(bool ascending); + void configInit(); + + virtual DeviceImpl* findDeviceInList(const Address & address); + virtual void disconnectDeviceList(); + void notifyLongPulseInternal(bool statusConnected); + virtual void notifyLongPulse(bool status); + virtual void notifyShortPulse(); + virtual Group * newGroup() ; + virtual void destroy(); + virtual void createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize); + virtual void deleteFakeMuxDevice(); + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize); + virtual bool isMultiStreamCapable(); + virtual bool isFlushSupported(); + virtual bool isStreamCloningEnabled(); + virtual bool isFECSupported(); + virtual bool isFECCapable(); + virtual NvU32 maxLinkRateSupported(); + virtual bool setPreferredLinkConfig(LinkConfiguration & lc, bool commit, bool force = false, LinkTrainingType trainType = NORMAL_LINK_TRAINING); + virtual bool resetPreferredLinkConfig(bool force = false); + virtual void setAllowMultiStreaming(bool bAllowMST); + virtual bool getAllowMultiStreaming(void); + virtual bool getSinkMultiStreamCap(void); + virtual void setDp11ProtocolForced(); + virtual void resetDp11ProtocolForced(); + virtual bool isDp11ProtocolForced(); + bool isAcpiInitDone(); + virtual void notifyAcpiInitDone(); + Group * createFirmwareGroup(); + virtual void notifyGPUCapabilityChange(); + virtual void notifyHBR2WAREngage(); + + bool getTestPattern(NV0073_CTRL_DP_TESTPATTERN *testPattern); + bool setTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, NvBool bIsHBR2, NvBool bSkipLaneDataOverride = false); + bool getLaneConfig(NvU32 *numLanes, NvU32 *data); // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + bool setLaneConfig(NvU32 numLanes, NvU32 *data); // "data" is an array of NV0073_CTRL_MAX_LANES unsigned ints + void getCurrentLinkConfig(unsigned & laneCount, NvU64 & linkRate); // CurrentLink Configuration + unsigned getPanelDataClockMultiplier(); + unsigned getGpuDataClockMultiplier(); + void configurePowerState(bool bPowerUp); + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps); + virtual bool updatePsrConfiguration(vesaPsrConfig config); + virtual bool readPsrConfiguration(vesaPsrConfig *config); + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState); + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr); + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr); + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrErr); + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrErr); + virtual bool readPsrState(vesaPsrState *psrState); + virtual bool updatePsrLinkState(bool bTrainLink); + + // for dp test utility. pBuffer is the request buffer of type DP_STATUS_REQUEST_xxxx + DP_TESTMESSAGE_STATUS sendDPTestMessage(void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus); + + DP_TESTMESSAGE_STATUS getStreamIDs(NvU32 *pStreamIDs, NvU32 *pCount); // for dp test utility, called by DD + + // Reset link training counter for the active link configuration. + virtual void resetLinkTrainingCounter() + { + activeLinkConfig.setLTCounter(0); + } + }; + + // + // New devices do not get a DeviceImpl created until after + // the EDID read has completed. This object is used + // to track the necessary state. + // + struct DevicePendingEDIDRead : protected EdidReadMultistream::EdidReadMultistreamEventSink, public ListElement + { + EdidReadMultistream reader; + DiscoveryManager::Device device; + ConnectorImpl * parent; + + void mstEdidCompleted(EdidReadMultistream * from); + void mstEdidReadFailed(EdidReadMultistream * from); + + public: + DevicePendingEDIDRead(ConnectorImpl * _parent, MessageManager * manager, DiscoveryManager::Device dev) + : reader(_parent->timer, manager, this, dev.address), device(dev), parent(_parent) + { + } + }; +} + +#endif //INCLUDED_DP_CONNECTORIMPL_H diff --git a/src/common/displayport/inc/dp_crc.h b/src/common/displayport/inc/dp_crc.h new file mode 100644 index 000000000..27d53413a --- /dev/null +++ b/src/common/displayport/inc/dp_crc.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_crc.h * +* CRC Algorithms for the messaging subsystem. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_CRC_H +#define INCLUDED_DP_CRC_H + +#include "dp_bitstream.h" + +namespace DisplayPort +{ + unsigned dpCalculateHeaderCRC(BitStreamReader * reader); + unsigned dpCalculateBodyCRC(BitStreamReader * writer); +} + +#endif //INCLUDED_DP_CRC_H diff --git a/src/common/displayport/inc/dp_deviceimpl.h b/src/common/displayport/inc/dp_deviceimpl.h new file mode 100644 index 000000000..19c2ac0c7 --- /dev/null +++ b/src/common/displayport/inc/dp_deviceimpl.h @@ -0,0 +1,498 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort*********************************\ +* * +* Module: dp_connector.cpp * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_DEVICEIMPL_H +#define INCLUDED_DP_DEVICEIMPL_H + +#include "dp_connector.h" +#include "dp_internal.h" +#include "dp_edid.h" +#include "dp_list.h" +#include "dp_auxdefs.h" +#include "dp_vrr.h" + +namespace DisplayPort +{ + #define PREDEFINED_DSC_MST_BPPX16 160; + #define HDCP_BCAPS_DDC_OFFSET 0x40 + #define HDCP_BCAPS_DDC_EN_BIT 0x80 + #define HDCP_BCAPS_DP_EN_BIT 0x01 + #define HDCP_I2C_CLIENT_ADDR 0x74 + + struct GroupImpl; + struct ConnectorImpl; + class DeviceHDCPDetection; + class VrrEnablement; + + struct DeviceImpl : public Device, + public AuxBus, + public ListElement + { + // + // Shadow state: This is the last state delivered to DD. + // see the ConnectorImpl::fireEvents() function for handling. + // + // State is double buffered to allow for allow announces + // to happen at the end of the state updates. We assume + // the DD can call any Connector API in response to the + // event. + // + struct Shadow + { + bool plugged; + bool zombie; + bool cableOk; + bool mustDisconnect; + bool hdcpCapDone; + LinkConfiguration highestAssessedLC; + } shadow; + + struct BandWidth + { + struct _Enum_Path + { + unsigned total, free; + bool bPathFECCapable; + bool dataValid; // Is the cache valid? + } enum_path; + + struct Compound_Query_State + { + unsigned totalTimeSlots; // Total timeslots available for allocation across this node + + unsigned timeslots_used_by_query; // Timeslots accounted for. + + unsigned bandwidthAllocatedForIndex; // Compound query is compromised of several + // qeuery attaches. These query attaches + // may have more than one device associated. + // this mask keeps track of which queryAttach's + // have already had the stream "rounted" past + // this node. + } compound_query_state; + + LinkConfiguration lastHopLinkConfig; // inferred from enum_path.total + + } bandwidth; + + enum rawEprState + { + software, + hardware + }; + + void resetCacheInferredLink(); + LinkConfiguration * inferLeafLink(unsigned * totalLinkSlots); + + + DeviceImpl * parent; // Upstream parent device + DeviceImpl * children[16]; + PortMap portMap; + + Edid rawEDID; + Edid processedEdid; + Edid ddcEdid; + DPCDHAL * hal; + GroupImpl * activeGroup; + ConnectorImpl * connector; + ConnectorType connectorType; + Address address; + GUID guid; + NvU8 peerDevice; + NvU8 dpcdRevisionMajor; + NvU8 dpcdRevisionMinor; + bool multistream; + bool videoSink, audioSink; + bool plugged; + + + AuxRetry friendlyAux; + bool payloadAllocated; // did the allocate payload go through? + + unsigned char BCAPS[HDCP_BCAPS_SIZE]; // Hdcp1.x bCaps raw data + unsigned char BKSV[HDCP_KSV_SIZE]; // Hdcp1.x bKsv raw data + unsigned char nvBCaps[HDCP_BCAPS_SIZE]; // NV generic HDCP BCAPS including 1.x, 2.2, ... + NvU64 maxTmdsClkRate; + + + bool isPendingNewDevice(); + bool isPendingLostDevice(); + bool isPendingZombie(); + bool isPendingCableOk(); + bool isPendingBandwidthChange(); + bool isPendingHDCPCapDone(); + + TriState isHDCPCap; + bool isDeviceHDCPDetectionAlive; + DeviceHDCPDetection * deviceHDCPDetection; + + PCONCaps pconCaps; + + // this flag signifies that the compliance device has requested EDID read test and may follow + // hidden and lazy zombie policy. + bool complianceDeviceEdidReadTest; + + bool lazyExitNow; + + // VRR Enablement structure + VrrEnablement *vrrEnablement; + + // DSC fields + NvU8 rawDscCaps[16]; + DscCaps dscCaps; + + // Panel replay Caps + PanelReplayCaps prCaps; + + bool bIsFakedMuxDevice; + bool bIsPreviouslyFakedMuxDevice; + bool bisMarkedForDeletion; + + // + // Device doing the DSC decompression for this device. This could be device itself + // or its parent + // + DeviceImpl* devDoingDscDecompression; + // + // If DSC stream can be sent to this device or not. Either device itself or it's + // parent can do DSC decompression + // + bool bDSCPossible; + + bool bFECSupported; + bool bFECUncorrectedSupported; + bool bFECCorrectedSupported; + bool bFECBitSupported; + bool bFECParityBlockSupported; + bool bFECParitySupported; + + TriState bSdpExtCapable; + bool bMSAOverMSTCapable; + + DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * parent); + ~DeviceImpl(); + + virtual bool isCableOk(); + virtual bool isLogical(); + virtual bool isZombie(); + + virtual unsigned getEDIDSize() const; + virtual bool getEDID(char * buffer, unsigned size) const; + virtual unsigned getRawEDIDSize() const; + virtual bool getRawEDID(char * buffer, unsigned size) const; + + virtual bool getPCONCaps(PCONCaps *pPCONCaps); + + virtual Group * getOwningGroup() + { + return (Group *)activeGroup; + } + + bool isActive(); + + void applyOUIOverrides(); + + virtual Device * getParent() + { + return parent; + } + + virtual Device * getChild(unsigned portNumber) + { + return children[portNumber]; + } + + virtual bool isMultistream() // Sink supports multistream, remember we can have 1.1 targets + { + return address.size() != 0; + } + + virtual bool isNativeDPCD() + { + return (address.size() < 2); + } + + virtual bool isVideoSink() + { + return videoSink; + } + + virtual bool isAudioSink() + { + return audioSink; + } + + virtual bool isLoop() + { + DP_LOG(("isLoop implementation is pending (bug 791059)")); + return false; + } + + virtual bool isRedundant() + { + DP_LOG(("isRedundant implementation is pending (bug 791059)")); + return false; + } + + virtual bool isMustDisconnect(); + + virtual bool isPlugged() + { + return plugged; + } + + virtual Address getTopologyAddress() const + { + return address; + } + + virtual ConnectorType getConnectorType() + { + return connectorType; + } + + virtual bool isFallbackEdid() + { + return this->processedEdid.isFallbackEdid(); + } + + virtual GUID getGUID() const + { + return guid; + } + + virtual PortMap getPortMap() const + { + return portMap; + } + + virtual TriState hdcpAvailableHop(); + virtual TriState hdcpAvailable(); + + virtual bool isMSAOverMSTCapable() + { + return bMSAOverMSTCapable; + } + + virtual bool isFakedMuxDevice(); + virtual bool isPreviouslyFakedMuxDevice(); + + bool bypassDpcdPowerOff() + { + return processedEdid.WARFlags.disableDpcdPowerOff; + } + + bool powerOnMonitorBeforeLt() + { + return processedEdid.WARFlags.powerOnBeforeLt; + } + + bool forceMaxLinkConfig() + { + return processedEdid.WARFlags.forceMaxLinkConfig; + } + + bool skipRedundantLt() + { + return processedEdid.WARFlags.skipRedundantLt; + } + + bool ignoreRedundantHotplug() + { + return processedEdid.WARFlags.ignoreRedundantHotplug; + } + + bool isOptimalLinkConfigOverridden() + { + return processedEdid.WARFlags.overrideOptimalLinkCfg; + } + + // Apply DPCD overrides if required + void dpcdOverrides(); + + bool getDpcdRevision(unsigned * major, unsigned * minor) + { + if (!major || !minor) + { + DP_ASSERT(0 && "Null pointers passed in."); + return false; + } + + *major = this->dpcdRevisionMajor; + *minor = this->dpcdRevisionMinor; + return true; + } + + bool getIgnoreMSACap() + { + return hal->getMsaTimingparIgnored(); + } + + AuxRetry::status setIgnoreMSAEnable(bool msaTimingParamIgnoreEn) + { + return hal->setIgnoreMSATimingParamters(msaTimingParamIgnoreEn); + } + + virtual bool getSDPExtnForColorimetrySupported(); + + virtual bool isPowerSuspended(); + + virtual void setPanelPowerParams(bool bSinkPowerStateD0, bool bPanelPowerStateOn); + + virtual status transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned *pNakReason= NULL, + NvU8 offset= 0, NvU8 nWriteTransactions= 0); + virtual unsigned transactionSize(); + // default behaviour is querying first three registers for every lane --> flags = 0x7 + virtual status fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags = NV_DP_FEC_FLAGS_SELECT_ALL); + virtual AuxBus * getRawAuxChannel() { return this; } + virtual AuxRetry * getAuxChannel() { return &friendlyAux; } + virtual AuxBus::status getDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason=NULL); + virtual AuxBus::status setDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason=NULL); + virtual AuxBus::status queryFecData(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags); + + virtual DscCaps getDscCaps(); + + // + // This function returns the device itself or its parent device that is doing + // DSC decompression for it. + // + virtual Device* getDevDoingDscDecompression(); + virtual void markDeviceForDeletion() {bisMarkedForDeletion = true;}; + virtual bool isMarkedForDeletion() {return bisMarkedForDeletion;}; + virtual bool getRawDscCaps(NvU8 *buffer, NvU32 bufferSize); + + virtual AuxBus::status dscCrcControl(NvBool bEnable, gpuDscCrc *dataGpu, sinkDscCrc *dataSink); + + // + // Parameter bForceMot in both getI2cData and setI2cData is used to forfully set + // the MOT bit. It is needed for some special cases where the MOT bit shouldn't + // be set but some customers need it to please their monitors. + // + virtual bool getI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false); + virtual bool setI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot = false); + virtual bool getRawEpr(unsigned * totalEpr, unsigned * freeEpr, rawEprState eprState); + + void switchToComplianceFallback(); + + // VRR Display Enablement Functions + bool startVrrEnablement(void); + void resetVrrEnablement(void); + bool isVrrMonitorEnabled(void); + bool isVrrDriverEnabled(void); + + // Panel replay related functions + bool isPanelReplaySupported(void); + void getPanelReplayCaps(void); + bool setPanelReplayConfig(panelReplayConfig prcfg); + + NvBool getDSCSupport(); + bool getFECSupport(); + NvBool isDSCPassThroughSupported(); + NvBool isDSCSupported(); + NvBool isDSCPossible(); + bool isFECSupported(); + bool readAndParseDSCCaps(); + bool parseDscCaps(const NvU8 *buffer, NvU32 bufferSize); + bool setDscEnable(bool enable); + bool getDscEnable(bool *pEnable); + unsigned getDscVersionMajor(); + unsigned getDscVersionMinor(); + unsigned getDscRcBufferSize(); + unsigned getDscRcBufferBlockSize(); + unsigned getDscMaxSlicesPerSink(); + unsigned getDscLineBufferBitDepth(); + NvBool isDscBlockPredictionSupported(); + unsigned getDscMaxBitsPerPixel(); + NvBool isDscRgbSupported(); + NvBool isDscYCbCr444Supported(); + NvBool isDscYCbCrSimple422Supported(); + NvBool isDscYCbCr422NativeSupported(); + NvBool isDscYCbCr420NativeSupported(); + unsigned getDscPeakThroughputMode0(); + unsigned getDscPeakThroughputModel(); + unsigned getDscMaxSliceWidth(); + unsigned getDscDecoderColorDepthSupportMask(); + }; + class DeviceHDCPDetection : public Object, MessageManager::Message::MessageEventSink, Timer::TimerCallback + { + DeviceImpl* parent; + RemoteDpcdReadMessage remoteBKSVReadMessage; + RemoteDpcdReadMessage remoteBCapsReadMessage; + RemoteDpcdReadMessage remote22BCapsReadMessage; + MessageManager * messageManager; // For transmit and receive + Timer * timer; + bool bksvReadCompleted; + bool bCapsReadCompleted; + bool isValidBKSV; + bool isBCapsHDCP; + unsigned retriesRemoteBKSVReadMessage; + unsigned retriesRemoteBCapsReadMessage; + unsigned retriesRemote22BCapsReadMessage; + bool retryRemoteBKSVReadMessage; + bool retryRemoteBCapsReadMessage; + bool retryRemote22BCapsReadMessage; + bool bBKSVReadMessagePending; + bool bBCapsReadMessagePending; + + public: + + DeviceHDCPDetection(DeviceImpl * parent, MessageManager * messageManager, Timer * timer) + : bksvReadCompleted(false),bCapsReadCompleted(false),isValidBKSV(false), + isBCapsHDCP(false), retriesRemoteBKSVReadMessage(0), retriesRemoteBCapsReadMessage(0), + retriesRemote22BCapsReadMessage(0), retryRemoteBKSVReadMessage(false), + retryRemoteBCapsReadMessage(false), retryRemote22BCapsReadMessage(false), + bBKSVReadMessagePending(false), bBCapsReadMessagePending(false) + + { + this->parent = parent; + this->messageManager = messageManager; + this->timer = timer; + } + + ~DeviceHDCPDetection(); + void expired(const void * tag); + void start(); + void waivePendingHDCPCapDoneNotification(); + + bool hdcpValidateKsv(const NvU8 *ksv, NvU32 Size); + void handleRemoteDpcdReadDownReply(MessageManager::Message * from); + void messageFailed(MessageManager::Message * from, NakData * nakData); + void messageCompleted(MessageManager::Message * from); + }; +} + +#endif //INCLUDED_DP_DEVICEIMPL_H + diff --git a/src/common/displayport/inc/dp_discovery.h b/src/common/displayport/inc/dp_discovery.h new file mode 100644 index 000000000..b0aa1705f --- /dev/null +++ b/src/common/displayport/inc/dp_discovery.h @@ -0,0 +1,328 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_discovery.h * +* Class definition for discovery manager. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_DISCOVERY_H +#define INCLUDED_DP_DISCOVERY_H + +#include "dp_address.h" +#include "dp_list.h" +#include "dp_messages.h" +#include "dp_messagecodings.h" + +namespace DisplayPort +{ + class DiscoveryManager : virtual public Object + { + public: + struct Device + { + Address address; // direct topology address + + bool legacy; // legacy (NON DP) device emulated on this port + bool branch; // DP 1.2 style branching device + PeerDevice peerDevice; // connector type of the device on this port + unsigned dpcdRevisionMajor; + unsigned dpcdRevisionMinor; + GUID peerGuid; // device guid + unsigned SDPStreams; // maximum number of audio streams supported + unsigned SDPStreamSinks; // number of outputs to select from + bool dirty; // got updates for the same device + PortMap portMap; + bool videoSink; // Should be true when a video sink is supported + NvU64 maxTmdsClkRate; + + Device():peerDevice(None),SDPStreams(0),SDPStreamSinks(0),dirty(false),videoSink(false) + { + portMap.validMap = portMap.inputMap = portMap.internalMap = 0; + } + + ~Device(){} + + }; + + struct ReceiverSink : + virtual public Object, + public MessageManager::MessageReceiver::MessageReceiverEventSink + { + DiscoveryManager * parent; + + // will handle CSN (up_req) and generate a up_reply for it. + virtual void messageProcessed(MessageManager::MessageReceiver * from); + void handleCSN(MessageManager::MessageReceiver * from); + + ReceiverSink(DiscoveryManager * parent) + :parent(parent) + {} + + virtual ~ReceiverSink() + {} + }; + + // This will account for upreplies and their failures/retries. + struct CsnUpReplyContainer : ListElement, Timer::TimerCallback, MessageManager::Message::MessageEventSink + { + struct CsnUpReply: public GenericUpReplyMessage + { + CsnUpReplyContainer * container; + + CsnUpReply(CsnUpReplyContainer * container, const Address & target) + : GenericUpReplyMessage(target, 0x2), container(container) + {} + + ~CsnUpReply() + {} + + }; + + DiscoveryManager * parent; + CsnUpReply upReplyMessage; + unsigned delayInUsec; + unsigned retries; + Address target; + + virtual void messageFailed(MessageManager::Message * from, NakData * nakData) + { + // if reason of failure is not timeout or defer; just forget trying again. + if (!(nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + messageCompleted(from); + return; + } + + // queue a callback to reset and send again + queueUpReply(); + return; + } + + virtual void messageCompleted(MessageManager::Message * from) + { + // don't delete now. Queue callback to delete later + retries = 0; + parent->timer->queueCallback(this, "CSNF", 5000); + } + + void queueUpReply() + { + parent->timer->queueCallback(this, "CSNF", delayInUsec/1000); + } + + void postUpReply() + { + upReplyMessage.set(target); + parent->messageManager->postReply(&this->upReplyMessage, this); + } + + virtual void expired(const void * tag) + { + if (retries) + retries--; + + if (retries) + postUpReply(); + else + { + // enough retries. wrap up. + delete this; + } + } + + CsnUpReplyContainer(DiscoveryManager * parent) + :parent(parent), upReplyMessage(this, target), delayInUsec(200000), retries(4), target(Address(0)) + {} + + virtual ~CsnUpReplyContainer() + { + // remove self from queue and delete + // cancel all pending callbacks + parent->timer->cancelCallbacks(this); + parent->pendingCsnUpReplies.remove(this); + } + + }; + + ReceiverSink receiverSink; + + ConnStatusNotifyMessage connectionStatusNotifyProcessor; + + GUIDBuilder guidBuilder; + + List pendingCsnUpReplies; + + public: + + struct DiscoveryManagerEventSink + { + virtual void discoveryDetectComplete() = 0; // reply to processDetect + virtual void discoveryNewDevice(const DiscoveryManager::Device & device) = 0; // these can go out anytime + virtual void discoveryLostDevice(const Address & address) = 0; + }; + + enum { + maximumTopologyNodes = 128 + }; + + Device currentDevices[maximumTopologyNodes]; + unsigned currentDevicesCount; + + Device * findDevice(const Address & address); + Device * findDevice(GUID & guid); + void addDevice(const Device & device); + void removeDevice(Device * device); + void removeDeviceTree(const Address & prefix); + Device * findChildDeviceForBranchWithGuid(GUID guid, unsigned port, Address & childAddr); + + // + // This is responsible for a "complete" detection of a sink. Specifically using remote dpcd reads and writes + // + struct SinkDetection : MessageManager::Message::MessageEventSink, ListElement, Timer::TimerCallback + { + Device device; + Address address; + RemoteDpcdWriteMessage remoteDpcdWriteMessage; + RemoteDpcdReadMessage remoteDpcdReadMessage; + PowerUpPhyMessage powerUpPhyMessage; + LinkAddressMessage linkAddressMessage; + DiscoveryManager * parent; + bool completed; + unsigned retriesRemoteDpcdWriteMessage; + bool retryRemoteDpcdWriteMessage; + unsigned retriesRemoteDpcdReadMessage; + bool retryRemoteDpcdReadMessage; + unsigned retriesLinkAddressMessage; + bool retryLinkAddressMessage; + + bool bFromCSN; + + SinkDetection(DiscoveryManager * parent, const Device & device, bool bFromCSN) + : device(device), address(device.address), parent(parent), completed(false), + retriesRemoteDpcdWriteMessage(0), retryRemoteDpcdWriteMessage(false), + retriesRemoteDpcdReadMessage(0), retryRemoteDpcdReadMessage(false), + bFromCSN(bFromCSN) + {} + + ~SinkDetection(); + void expired(const void * tag); + void start(); + + void detectCompleted(bool passed); + void messageFailed(MessageManager::Message * from, NakData * nakData); + void handleRemoteDpcdReadDownReply(); + void handleRemoteDpcdWriteDownReply(); + void handleLinkAddressDownReply(); + + void messageCompleted(MessageManager::Message * from); + + }; + + // + // This object represents an address in some stage of detection + // + struct BranchDetection : MessageManager::Message::MessageEventSink, ListElement, Timer::TimerCallback + { + Device parentDevice; + Address address; + LinkAddressMessage::Result child[16]; + unsigned childCount; + + LinkAddressMessage linkAddressMessage; + RemoteDpcdWriteMessage remoteDpcdWriteMessage; + + DiscoveryManager * parent; + bool completed; + bool retryLinkAddressMessage; + unsigned retriesLinkAddressMessage; + unsigned retriesRemoteDpcdWriteMessage; + bool retryRemoteDpcdWriteMessage; + + BranchDetection(DiscoveryManager * parent, const Device & device) + : parentDevice(device), address(parentDevice.address), + parent(parent), completed(false), + retryLinkAddressMessage(false), retriesLinkAddressMessage(0), + retriesRemoteDpcdWriteMessage(0), retryRemoteDpcdWriteMessage(false) + {} + + void expired(const void * tag); + void start(); + ~BranchDetection(); + + void detectCompleted(bool present); + void messageFailed(MessageManager::Message * from, NakData * nakData) ; + void handleLinkAddressDownReply(); + void handleRemoteDpcdReadDownReply(); + void messageCompleted(MessageManager::Message * from); + }; + + void detect(const Address & address); + void detectBranch(Device device); + void detectSink(Device newDevice, bool bFromCSN); + +public: + + List outstandingBranchDetections; + List outstandingSinkDetections; + DiscoveryManagerEventSink * sink; // To call NotifyDetectComplete() + MessageManager * messageManager; // For transmit and receive + Timer * timer; + DPCDHAL * hal; + + DiscoveryManager(MessageManager * messageManager, DiscoveryManagerEventSink * sink, Timer * timer, DPCDHAL * hal) + : receiverSink(this), + connectionStatusNotifyProcessor(&receiverSink), + guidBuilder(timer, 0x10DE9070), + currentDevicesCount(0), + sink(sink), + messageManager(messageManager), + timer(timer), + hal(hal) + { + + // + // Register to filter all the upmessages. We want to know when + // connection status notify events are on their way. + // + messageManager->registerReceiver(&connectionStatusNotifyProcessor); + } + + ~DiscoveryManager() + { + while (!this->outstandingBranchDetections.isEmpty()) + delete this->outstandingBranchDetections.front(); + + while (!this->outstandingSinkDetections.isEmpty()) + delete this->outstandingSinkDetections.front(); + + while (!this->pendingCsnUpReplies.isEmpty()) + delete this->pendingCsnUpReplies.front(); + } + + void notifyLongPulse(bool status); + + }; +} +#endif //INCLUDED_DP_DISCOVERY_H diff --git a/src/common/displayport/inc/dp_edid.h b/src/common/displayport/inc/dp_edid.h new file mode 100644 index 000000000..149f76839 --- /dev/null +++ b/src/common/displayport/inc/dp_edid.h @@ -0,0 +1,321 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_edid.h * +* reading EDID from SST/MST Device * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_EDID_H +#define INCLUDED_DP_EDID_H + +#include "dp_buffer.h" +#include "dp_auxbus.h" +#include "dp_address.h" +#include "dp_messages.h" +#include "dp_messagecodings.h" +#include "dp_timer.h" + +namespace DisplayPort +{ + class Edid; + + // + // Shared utility object for MST/SST edid reading. + // This object handles the retry, CRC validating, + // identification of EDID length, DDC ping, etc. + // + // It's designed as an asynchronous state machine + // because of the way MST EDID reads are built. + // + class EdidAssembler + { + public: + EdidAssembler(Edid * const edid, bool bPatchCrc = false); + + // + // returns false - when existing data in Edid is invalid + // returns seg - segment from which to read next block + // returns offset - offset within block from which to start reading next block + // + bool readNextRequest(NvU8 & seg, NvU8 & offset); + + // returns false when Edid read is completed + void postReply(const Buffer & buffer, unsigned sizeCompleted, bool success); + void postReply(unsigned char * data, unsigned sizeCompleted, bool success); + + // returns true when it read all the required blocks + bool readIsComplete(); + void reset(); + private: + Edid * edid; + Stream stream; + + NvU8 oldBlockChecksum; + unsigned blocksRead; + unsigned totalBlockCnt; + unsigned retriesCount; + bool bPatchCrc; + }; + + // + // EDID + // + class Edid + { + public: + Edid(); + ~Edid(); + + Buffer * getBuffer() const { return &buffer; } + NvU8 getFirstPageChecksum(); // Get checksum byte + NvU8 getLastPageChecksum(); // Get checksum byte for last block + + bool verifyCRC(); + unsigned getEdidVersion(); + unsigned getBlockCount(); + const char * getName() const; + unsigned getEdidSize() const; + bool isChecksumValid() const; + bool isJunkEdid() const; + bool isFallbackEdid() const; + void swap(Edid & right); + void applyEdidWorkArounds(NvU32 warFlag, const DpMonitorDenylistData *pDenylistData); + void patchCrc(); + void setForcedEdidChecksum(bool set) + { + this->forcedCheckSum = set; + } + + void setFallbackFlag(bool set) + { + this->fallbackEdid = set; + } + + void setPatchedChecksum(bool set) + { + this->patchedChecksum = set; + } + + bool isPatchedChecksum() const + { + return this->patchedChecksum; + } + + bool isValidHeader() const + { + NvU8 validHeaderData[8] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x00}; + + if (buffer.getLength() < 0x8) + return false; + + for (unsigned i = 0; i < 8; i++) + { + if (buffer.data[i] != validHeaderData[i]) + { + DP_LOG(("DP-EDID> Invalid EDID Header")); + return false; + } + } + + return true; + } + + unsigned getManufId() const + { + if (buffer.getLength() < 0xa) + return 0; + + return ((buffer.data[0x9] << 8) | (buffer.data[0x8])); + } + + unsigned getProductId() const + { + if (buffer.getLength() < 0xc) + return 0; + + return ((buffer.data[0xb] << 8) | (buffer.data[0xa])); + } + + unsigned getYearWeek() const + { + if (buffer.getLength() < 0x12) + return 0; + + return ((buffer.data[0x11] << 8) | (buffer.data[0x10])); + } + + typedef struct + { + bool extensionCountDisabled; + bool dataForced; + bool disableDpcdPowerOff; + bool forceMaxLinkConfig; + bool powerOnBeforeLt; + bool skipRedundantLt; + bool skipCableBWCheck; + bool overrideOptimalLinkCfg; + bool overrideMaxLaneCount; + bool ignoreRedundantHotplug; + bool delayAfterD3; + bool keepLinkAlive; + bool useLegacyAddress; + bool reassessMaxLink; + bool bIgnoreDscCap; // Ignore DSC even if sink reports DSC capability + }_WARFlags; + + _WARFlags WARFlags; + + typedef struct + { + unsigned maxLaneCount; // Max lane count value to override + unsigned maxLaneAtHighRate; // Max lane count supported at HBR + unsigned maxLaneAtLowRate; // Max lane count supported at RBR + unsigned optimalLinkRate; // Optimal link rate value to override + unsigned optimalLaneCount; // Optimal lane count value to override + }_WARData; + + _WARData WARData; + + void resetData() + { + buffer.reset(); + checkSumValid = false; + forcedCheckSum = false; + fallbackEdid = false; + // clear the WARFlags + _WARFlags temp = {0}; + WARFlags = temp; + } + + bool operator== (const Edid & other) + { + return (buffer == other.buffer); + } + + bool operator!= (const Edid & other) + { + return !(buffer == other.buffer); + } + + private: + void validateCheckSum(); + + mutable Buffer buffer; + bool checkSumValid; + bool forcedCheckSum; + bool fallbackEdid; + bool patchedChecksum; + }; + + // + // SST EDID Read API + // + bool EdidReadSST(Edid & edid, AuxBus * aux, Timer * timer, bool pendingTestRequestEdidRead = false, bool bBypassAssembler = false, MainLink *main = NULL); + + enum EDID_DDC + { + EDID_DDC_NONE = 0x00, + EDID_DDC_ADR0 = 0xA0, + EDID_DDC_ADR1 = 0xA2, + EDID_DDC_ADR2 = 0xA6, + EDID_SEG_SELECTOR_OFFSET = 0x60, + }; + EDID_DDC sstDDCPing(AuxBus & dpAux); + + // + // MST EDID Read API + // + + class EdidReadMultistream : public Object, protected MessageManager::Message::MessageEventSink, Timer::TimerCallback + { + public: + class EdidReadMultistreamEventSink // Connector will inherit from this + { + public: + virtual void mstEdidCompleted(EdidReadMultistream * from) = 0; + virtual void mstEdidReadFailed(EdidReadMultistream * from) = 0; + }; + + EdidReadMultistream(Timer * timer, MessageManager * manager, EdidReadMultistream::EdidReadMultistreamEventSink * sink, Address topologyAddress) + : topologyAddress(topologyAddress), manager(manager), edidReaderManager(&edid), ddcIndex(0), + retries(0), timer(timer), sink(sink) + { + startReadingEdid(); + } + + Edid edid; + Address topologyAddress; + ~EdidReadMultistream(); + + private: + void startReadingEdid(); + + MessageManager * manager; + RemoteI2cReadMessage remoteI2cRead; + EdidAssembler edidReaderManager; // come up another word besides edidReaderManager eg Manager + NvU8 DDCAddress; + NvU8 ddcIndex; + unsigned retries; + Timer * timer; + + void readNextBlock(NvU8 seg, NvU8 offset); + void failedToReadEdid(); + void expired(const void * tag); + + EdidReadMultistreamEventSink * sink; + + virtual void messageFailed(MessageManager::Message * from, NakData * nakData); + virtual void messageCompleted(MessageManager::Message * from); + void edidAttemptDone(bool succeeded); + }; + + // + // Useful defines + // + enum + { + EDID_BLOCK_SIZE = 0x80, + EDID_SEGMENT_SIZE = 2*EDID_BLOCK_SIZE, + EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT = 3, + // DID EDID CTS v1.3 d12 currently outlines that Source shall support up to 16 blocks of EDID data. + EDID_MAX_BLOCK_COUNT = 16, + }; + + static const NvU8 ddcAddrList[] = {EDID_DDC_ADR0, EDID_DDC_ADR1, EDID_DDC_ADR2}; + const NvU8 ddcAddrListSize = sizeof(ddcAddrList)/sizeof(NvU8); + const NvU8 EDID_READ_MAX_RETRY_COUNT = 6; + const NvU8 EDID_MAX_AUX_RETRIES = 10; + const NvU8 EDID_AUX_WAIT_TIME = 1; + NvU8 getEDIDBlockChecksum(const Buffer &); + + void makeEdidFallback(Edid & edid, NvU32 fallbackFormatSupported = 0); + void makeEdidFallbackVGA(Edid & edid); + +} + +#endif //INCLUDED_DP_EDID_H diff --git a/src/common/displayport/inc/dp_evoadapter.h b/src/common/displayport/inc/dp_evoadapter.h new file mode 100644 index 000000000..139306eda --- /dev/null +++ b/src/common/displayport/inc/dp_evoadapter.h @@ -0,0 +1,410 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_evoadapter.h * +* Interface for low level access to the aux bus. * +* This is the synchronous version of the interface. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_EVOADAPTER_H +#define INCLUDED_DP_EVOADAPTER_H + +#include "dp_timer.h" +#include "dp_auxbus.h" +#include "dp_mainlink.h" +#include "dp_wardatabase.h" +#include "dp_auxdefs.h" +#include "dp_regkeydatabase.h" + +#include + +#define HDCP_DUMMY_CN (0x1) +#define HDCP_DUMMY_CKSV (0xFFFFF) + + +namespace DisplayPort +{ + class EvoInterface + { + public: + // + // IOCTL access to RM class DISPLAY_COMMON and NV50_DISPLAY + // + virtual NvU32 rmControl0073(NvU32 command, void * params, NvU32 paramSize) = 0; + virtual NvU32 rmControl5070(NvU32 command, void * params, NvU32 paramSize) = 0; + + virtual bool getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount) + { + linkRate = 0; laneCount = 0; + return true; + } + + // + // Call to tell DD that linkTraining will be performed. + // Required when head is attached & we enter in flush mode GPUs. + // Required to enable/disable Audio. + // + // Derived classes that override these functions must call down to + // DisplayPort::EvoInterface::pre/postLinkTraining() to inherit this + // implementation. + // + virtual void preLinkTraining(NvU32 head) + { + } + virtual void postLinkTraining(NvU32 head) + { + } + + virtual NvU32 getSubdeviceIndex() = 0; + virtual NvU32 getDisplayId() = 0; + virtual NvU32 getSorIndex() = 0; + virtual NvU32 getLinkIndex() = 0; // Link A = 0, Link B = 1 + // + // Query the value of a registry key. Implementations should return 0 + // if the regkey is not set. + // + virtual NvU32 getRegkeyValue(const char *key) + { + return 0; + } + virtual NvU32 monitorDenylistInfo(NvU32 manufId, NvU32 productId, DpMonitorDenylistData *pDenylistData) + { + return 0; + } + + virtual bool isInbandStereoSignalingSupported() + { + return false; + } + }; + + MainLink * MakeEvoMainLink(EvoInterface * provider, Timer * timer); + AuxBus * MakeEvoAuxBus(EvoInterface * provider, Timer * timer); + + class EvoAuxBus : public AuxBus + { + public: + EvoAuxBus(EvoInterface * provider, Timer * timer) + : provider(provider), + timer(timer), + displayId(provider->getDisplayId()), + subdeviceIndex(provider->getSubdeviceIndex()), + devicePlugged(false) + { + } + + virtual status transaction(Action action, Type type, int address, NvU8 * buffer, + unsigned sizeRequested, unsigned * sizeCompleted, + unsigned * pNakReason = NULL, + NvU8 offset = 0, NvU8 nWriteTransactions = 0); + virtual unsigned transactionSize(); + virtual void setDevicePlugged(bool); + + private: + EvoInterface * provider; + Timer * timer; + NvU32 displayId; + NvU32 subdeviceIndex; + bool devicePlugged; + }; + + class EvoMainLink : public MainLink + { + EvoInterface * provider; + Timer * timer; + NvU32 displayId; + NvU32 subdeviceIndex; + NvU32 _maxLinkRateSupportedGpu; + NvU32 _maxLinkRateSupportedDfp; + unsigned allHeadMask; + bool _hasIncreasedWatermarkLimits; + bool _hasMultistream; + bool _isPC2Disabled; + bool _isEDP; + bool _isDP1_2Supported; + bool _isDP1_4Supported; + bool _isStreamCloningEnabled; + bool _needForceRmEdid; + bool _skipPowerdownEDPPanelWhenHeadDetach; + bool _isDscDisabledByRegkey; + bool _isMstDisabledByRegkey; + bool _isFECSupported; + bool _useDfpMaxLinkRateCaps; + bool _applyLinkBwOverrideWarRegVal; + bool _isDynamicMuxCapable; + bool _enableMSAOverrideOverMST; + + bool _isLTPhyRepeaterSupported; + // + // LTTPR count reported by RM, it might not be the same with DPLib probe + // For example, some Intel LTTPR might not be ready to response 0xF0000 probe + // done by RM, but when DPLib checks the same DPCD offsets it responses + // properly. This will cause serious LT problem. + // + unsigned _rmPhyRepeaterCount; + + struct DSC + { + bool isDscSupported; + unsigned encoderColorFormatMask; + unsigned lineBufferSizeKB; + unsigned rateBufferSizeKB; + unsigned bitsPerPixelPrecision; + unsigned maxNumHztSlices; + unsigned lineBufferBitDepth; + }_DSC; + + private: + virtual void initializeRegkeyDatabase(); + virtual void applyRegkeyOverrides(); + + public: + EvoMainLink(EvoInterface * provider, Timer * timer); + + virtual bool hasIncreasedWatermarkLimits() + { + return _hasIncreasedWatermarkLimits; + } + + virtual bool hasMultistream() + { + return _hasMultistream; + } + + virtual bool isPC2Disabled() + { + return _isPC2Disabled; + } + + virtual bool isDP1_2Supported() + { + return _isDP1_2Supported; + } + virtual bool isDP1_4Supported() + { + return _isDP1_4Supported; + } + virtual bool isFECSupported() + { + return _isFECSupported; + } + + virtual bool isStreamCloningEnabled() + { + return _isStreamCloningEnabled; + } + + virtual NvU32 maxLinkRateSupported() + { + // + // For cases where RM asks dplib to honor the maxLinkRate limit defined in DCB, always use + // this as the limit. Regkey has no meaning in this case. + // In other cases, based on regkey either honor the dcb limit or the max link rate for the + // specific GPU architecture. This is needed to avoid regressions on existing chips. + // + if ((_applyLinkBwOverrideWarRegVal || _useDfpMaxLinkRateCaps) && + (_maxLinkRateSupportedDfp < _maxLinkRateSupportedGpu)) + { + return _maxLinkRateSupportedDfp; + } + + return _maxLinkRateSupportedGpu; + } + + virtual bool isForceRmEdidRequired() + { + return _needForceRmEdid; + } + + virtual bool fetchEdidByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize); + virtual bool applyEdidOverrideByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize); + + virtual bool isDynamicMuxCapable() + { + return _isDynamicMuxCapable; + } + + virtual bool isInternalPanelDynamicMuxCapable() + { + return (_isDynamicMuxCapable && _isEDP); + } + + // Get GPU DSC capabilities + virtual void getDscCaps(bool *pbDscSupported, + unsigned *pEncoderColorFormatMask, + unsigned *pLineBufferSizeKB, + unsigned *pRateBufferSizeKB, + unsigned *pBitsPerPixelPrecision, + unsigned *pMaxNumHztSlices, + unsigned *pLineBufferBitDepth) + { + if (pbDscSupported) + { + *pbDscSupported = _DSC.isDscSupported; + } + + if (pEncoderColorFormatMask) + { + *pEncoderColorFormatMask = _DSC.encoderColorFormatMask; + } + + if (pLineBufferSizeKB) + { + *pLineBufferSizeKB = _DSC.lineBufferSizeKB; + } + + if (pRateBufferSizeKB) + { + *pRateBufferSizeKB = _DSC.rateBufferSizeKB; + } + + if (pBitsPerPixelPrecision) + { + *pBitsPerPixelPrecision = _DSC.bitsPerPixelPrecision; + } + + if (pMaxNumHztSlices) + { + *pMaxNumHztSlices = _DSC.maxNumHztSlices; + } + + if (pLineBufferBitDepth) + { + *pLineBufferBitDepth = _DSC.lineBufferBitDepth; + } + } + + virtual NvU32 getRootDisplayId() + { + return this->displayId; + } + + virtual bool isLttprSupported() + { + return this->_isLTPhyRepeaterSupported; + } + + // Return the current mux state. Returns false if device is not mux capable + bool getDynamicMuxState(NvU32 *muxState); + + virtual bool aquireSema(); + virtual void releaseSema(); + virtual bool physicalLayerSetTestPattern(PatternInfo * patternInfo); + + virtual void preLinkTraining(NvU32 head); + virtual void postLinkTraining(NvU32 head); + virtual NvU32 getRegkeyValue(const char *key); + virtual const DP_REGKEY_DATABASE& getRegkeyDatabase(); + virtual NvU32 getSorIndex(); + virtual bool isInbandStereoSignalingSupported(); + virtual bool train(const LinkConfiguration & link, bool force, LinkTrainingType linkTrainingType, + LinkConfiguration *retLink, bool bSkipLt = false, bool isPostLtAdjRequestGranted = false, + unsigned phyRepeaterCount = 0); + virtual bool retrieveRingBuffer(NvU8 dpRingBuffertype, NvU32 numRecords); + virtual void getLinkConfig(unsigned & laneCount, NvU64 & linkRate); + virtual bool getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount); + virtual bool setDpMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams); + virtual bool setDpStereoMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams); + virtual bool setFlushMode(); + virtual void clearFlushMode(unsigned headMask, bool testMode=false); + + virtual bool dscCrcTransaction(NvBool bEnable, gpuDscCrc *data, NvU16 *headIndex); + + void triggerACT(); + void configureHDCPRenegotiate(NvU64 cN = HDCP_DUMMY_CN, NvU64 cKsv = HDCP_DUMMY_CKSV, bool bForceReAuth = false, + bool bRxIDMsgPending = false); + void configureHDCPGetHDCPState(HDCPState &hdcpState); + bool rmUpdateDynamicDfpCache(NvU32 headIndex, RmDfpCache * dfpCache, NvBool bResetDfp); + + virtual NvU32 streamToHead(NvU32 streamId, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + + virtual NvU32 headToStream(NvU32 head, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + + void configureSingleStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + bool bEnhancedFraming, + NvU32 tuSize, + NvU32 waterMark, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamId = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false); + + void configureMultiStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + NvU32 slotStart, + NvU32 slotEnd, + NvU32 PBN, + NvU32 Timeslice, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false); + + void configureSingleHeadMultiStreamMode(NvU32 displayIDs[], + NvU32 numStreams, + NvU32 mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + + void configureMsScratchRegisters(NvU32 address, + NvU32 hopCount, + NvU32 driverState); + + bool isActive(); + bool isEDP(); + bool skipPowerdownEdpPanelWhenHeadDetach(); + bool supportMSAOverMST(); + bool queryAndUpdateDfpParams(); + bool controlRateGoverning(NvU32 head, bool enable, bool updateNow); + + bool getDpTestPattern(NV0073_CTRL_DP_TESTPATTERN *testPattern); + bool setDpTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, + NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, + NvBool bIsHBR2, NvBool bSkipLaneDataOverride); + bool getDpLaneData(NvU32 *numLanes, NvU32 *data); + bool setDpLaneData(NvU32 numLanes, NvU32 *data); + void configurePowerState(bool bPowerUp); + NvU32 monitorDenylistInfo(NvU32 ManufacturerID, NvU32 ProductID, DpMonitorDenylistData *pDenylistData); + NvU32 allocDisplayId(); + bool freeDisplayId(NvU32 displayId); + void queryGPUCapability(); + bool getEdpPowerData(bool *panelPowerOn, bool *dpcdPowerStateD0); + virtual bool vrrRunEnablementStage(unsigned stage, NvU32 *status); + + void configureTriggerSelect(NvU32 head, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY); + void configureTriggerAll(NvU32 head, bool enable); + bool configureLinkRateTable(const NvU16 *pLinkRateTable, LinkRates *pLinkRates); + bool configureFec(const bool bEnableFec); + }; + +} + +#endif //INCLUDED_DP_EVOADAPTER_H diff --git a/src/common/displayport/inc/dp_groupimpl.h b/src/common/displayport/inc/dp_groupimpl.h new file mode 100644 index 000000000..a4680aa1c --- /dev/null +++ b/src/common/displayport/inc/dp_groupimpl.h @@ -0,0 +1,121 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_groupimpl.h * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_GROUPIMPL_H +#define INCLUDED_DP_GROUPIMPL_H + +#include "dp_connector.h" +#include "dp_deviceimpl.h" +#include "dp_linkedlist.h" +#include "dp_watermark.h" +#include "dp_auxdefs.h" + +namespace DisplayPort +{ + + struct GroupImpl : public Group, ListElement, Timer::TimerCallback + { + ConnectorImpl * parent; + LinkedList members; + List elements; + unsigned headIndex; + unsigned streamIndex; + bool streamValidationDone; + bool headInFirmware; // Set if this is a firmware run mode. If set lastModesetInfo is NOT valid + bool bIsHeadShutdownNeeded; // Set if head shutdown is requested during modeset + bool hdcpEnabled; + bool hdcpPreviousStatus; + bool bWaitForDeAllocACT; + bool bDeferredPayloadAlloc; + ModesetInfo lastModesetInfo; + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID singleHeadMultiStreamID; + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultiStreamMode; + DP_COLORFORMAT colorFormat; + + struct + { + unsigned PBN; + int count; + int begin; + bool hardwareDirty; // Does the configureStream need to be called again? + Watermark watermarks; // Cached watermark calculations + } timeslot; + + bool bIsCurrentModesetGroup; // Group that is getting attached + + GroupImpl(ConnectorImpl * parent, bool isFirmwareGroup = false) + : parent(parent), + streamValidationDone(true), + headInFirmware(false), + bIsHeadShutdownNeeded(true), + hdcpEnabled(false), + hdcpPreviousStatus(false), + bWaitForDeAllocACT(false), + singleHeadMultiStreamID(DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY), + singleHeadMultiStreamMode(DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE), + bIsCurrentModesetGroup(false), + headAttached(false) + { + timeslot.count = 0; + } + + ~GroupImpl() + { + } + + virtual void insert(Device * dev); + virtual void remove(Device * dev); + void update(Device * dev, bool allocationState); // send the allocatepayload/deallocatepayload message + bool contains(Device * dev) { return members.contains(dev); } + virtual Device * enumDevices(Device * previousDevice); + + void updateVbiosScratchRegister(Device * lastDevice); // Update the VBIOS scratch register with last lit display + + // + // Timer callback tags. + // (we pass the address of these variables as context to ::expired) + // + char tagHDCPReauthentication; + char tagStreamValidation; + + unsigned authRetries; // Retry counter for the authentication. + + virtual void expired(const void * tag); + virtual bool hdcpGetEncrypted(); + virtual void destroy(); + void cancelHdcpCallbacks(); + + bool isHeadAttached() { return headAttached; } + void setHeadAttached(bool attached); + + private: + bool headAttached; // True if modeset started (during NAB). Sets back to False during NDE + }; +} + +#endif //INCLUDED_DP_GROUPIMPL_H diff --git a/src/common/displayport/inc/dp_guid.h b/src/common/displayport/inc/dp_guid.h new file mode 100644 index 000000000..2a1318da4 --- /dev/null +++ b/src/common/displayport/inc/dp_guid.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_guid.h * +* GUID struct and builder class * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_GUID_H +#define INCLUDED_DP_GUID_H + +#include "dp_internal.h" +#include "dp_timer.h" + +namespace DisplayPort +{ +#define DPCD_GUID_SIZE 16 + + struct GUID + { + NvU8 data[DPCD_GUID_SIZE]; + + GUID() + { + dpMemZero(&data, sizeof(data)); + } + + bool isGuidZero() + { + for (unsigned i = 0 ; i < DPCD_GUID_SIZE; i++) + if (data[i]) + return false; + + return true; + } + + bool operator == (const GUID & other) const + { + for (unsigned i = 0 ; i < DPCD_GUID_SIZE; i++) + if (data[i] != other.data[i]) + return false; + + return true; + } + + bool operator != (const GUID & other) const + { + return !((*this) == other); + } + + void copyFrom(const NvU8 * buffer) + { + dpMemCopy(&this->data[0], buffer, sizeof data); + } + + // XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + // Two Xs per byte, plus four dashes and a NUL byte. + typedef char StringBuffer[DPCD_GUID_SIZE*2 + 5]; + char * toString(StringBuffer & buffer) const + { + char *p = &buffer[0]; + + for (unsigned i = 0; i < DPCD_GUID_SIZE; i++) { + dpByteToHexChar(p, data[i]); + p += 2; + if (i == 3 || i == 5 || i == 7 || i == 9) + *p++ = '-'; + } + + *p++ = '\0'; + + DP_ASSERT(p == buffer + sizeof(buffer)); + + return buffer; + } + }; + + class GUIDBuilder + { + NvU32 salt; + NvU32 previousRandom; + Timer * source; + + + // + // Linear congruential random number generator + // Seed values chosen from numerical methods + // + NvU32 random(); + + public: + GUIDBuilder(Timer * source, NvU32 salt); + + void makeGuid(GUID & guid); + }; +} + +#endif //INCLUDED_DP_GUID_H diff --git a/src/common/displayport/inc/dp_hostimp.h b/src/common/displayport/inc/dp_hostimp.h new file mode 100644 index 000000000..aa96fabda --- /dev/null +++ b/src/common/displayport/inc/dp_hostimp.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_hostimp.h * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_HOSTIMP_H +#define INCLUDED_DP_HOSTIMP_H + +#include "nvtypes.h" +#include "dp_tracing.h" + +extern "C" void * dpMalloc(NvLength size); +extern "C" void dpFree(void * ptr); +extern "C" void dpDebugBreakpoint(); +// Note: dpPrint() implementations are expected to append a newline themselves. +extern "C" void dpPrint(const char * formatter, ...); +extern "C" void dpTraceEvent(NV_DP_TRACING_EVENT event, + NV_DP_TRACING_PRIORITY priority, NvU32 numArgs, ...); + +#if defined(_DEBUG) || defined(DEBUG) + #define NV_DP_ASSERT_ENABLED 1 +#else + #define NV_DP_ASSERT_ENABLED 0 +#endif + +#if NV_DP_ASSERT_ENABLED +extern "C" void dpAssert(const char *expression, const char *file, + const char *function, int line); +#endif + +#endif // INCLUDED_DP_HOSTIMP_H diff --git a/src/common/displayport/inc/dp_internal.h b/src/common/displayport/inc/dp_internal.h new file mode 100644 index 000000000..e233a7555 --- /dev/null +++ b/src/common/displayport/inc/dp_internal.h @@ -0,0 +1,139 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_internal.h * +* RM stubs to allow unit testing. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_INTERNAL_H +#define INCLUDED_DP_INTERNAL_H + +// +// Clients should not include this file +// This file provides the private malloc implementation. +// + +#include +#include // size_t + +#include "dp_object.h" +#include "dp_ringbuffer.h" + +static inline void dpByteToHexChar(char *output, NvU8 c) +{ + char dig = (c>>4) & 0xF; + output[0] = dig < 10 ? dig + '0' : dig + 'A' - 10; + dig = c & 0xF; + output[1] = dig < 10 ? dig + '0' : dig + 'A' - 10; +} + +static inline void dpHexDump(char * output, unsigned outSize, NvU8 * buffer, unsigned size) +{ + char * tail = output; + if (outSize < size * 3 + 1) + return; + + for (unsigned i = 0; i < size; i++) + { + dpByteToHexChar(tail, buffer[i]); + tail += 2; + *tail++ = ' '; + } + *tail = 0; +} + +namespace DisplayPort +{ + template + inline void swap_args(T & left, T & right) + { + T temp = left; + left = right; + right = temp; + } + + inline NvU64 divide_ceil(NvU64 a, NvU64 b) + { + return (a + b - 1) / b; + } + + inline NvU64 divide_floor(NvU64 a, NvU64 b) + { + return a / b; + } + + inline NvU64 axb_div_c_64(NvU64 a, NvU64 b, NvU64 c) + { + // NvU64 arithmetic to keep precision and avoid floats + // a*b/c = (a/c)*b + ((a%c)*b + c/2)/c + return ((a/c)*b + ((a%c)*b + c/2)/c); + } +} + +#define DP_MIN(x,y) ((x)<(y)?(x):(y)) +#define DP_MAX(x,y) ((x)<(y)?(y):(x)) + +// +// Macro to suppress unused local variable +// +template void dp_used(const T & /*x*/) {} +#define DP_USED(x) dp_used(x) + + +// +// Basic debug logging facility +// + +#if NV_DP_ASSERT_ENABLED +#define DP_LOG(x) \ + do \ + { \ + dpPrint x; \ + addDpLogRecord x; \ + }while (false) + +#define DP_ASSERT(x) \ + if (!(x)) \ + { \ + addDpAssertRecord(); \ + dpAssert(#x, __FILE__, __FUNCTION__, __LINE__); \ + dpDebugBreakpoint(); \ + } +#else + +#define DP_LOG(x) + +#define DP_ASSERT(x) \ + { \ + DP_USED(x); \ + if (!(x)) \ + { \ + addDpAssertRecord(); \ + } \ + } +#endif + +#endif //INCLUDED_DP_INTERNAL_H diff --git a/src/common/displayport/inc/dp_linkconfig.h b/src/common/displayport/inc/dp_linkconfig.h new file mode 100644 index 000000000..ae29499ef --- /dev/null +++ b/src/common/displayport/inc/dp_linkconfig.h @@ -0,0 +1,449 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_linkconfig.h * +* Link Configuration object implementation * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_LINKCONFIG_H +#define INCLUDED_DP_LINKCONFIG_H + +#include "dp_auxdefs.h" +#include "dp_internal.h" +#include "dp_watermark.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" // NV0073_CTRL_HDCP_VPRIME_SIZE +#include "displayport.h" + +namespace DisplayPort +{ + typedef NvU64 LinkRate; + + class LinkRates : virtual public Object + { + public: + // Store link rate in multipler of 270MBPS to save space + NvU8 element[NV_DPCD_SUPPORTED_LINK_RATES__SIZE]; + NvU8 entries; + + LinkRates() : entries(0) {} + + void clear() + { + entries = 0; + for (int i = 0; i < NV_DPCD_SUPPORTED_LINK_RATES__SIZE; i++) + { + element[i] = 0; + } + } + + bool import(NvU8 linkBw) + { + if (entries < NV_DPCD_SUPPORTED_LINK_RATES__SIZE) + { + element[entries] = linkBw; + entries++; + return true; + } + else + return false; + } + + NvU8 getNumLinkRates() + { + return entries; + } + + LinkRate getLowerRate(LinkRate rate) + { + int i; + NvU8 linkBw = (NvU8)(rate / DP_LINK_BW_FREQ_MULTI_MBPS); + + if ((entries == 0) || (linkBw <= element[0])) + return 0; + + for (i = entries - 1; i > 0; i--) + { + if (linkBw > element[i]) + break; + } + + rate = (LinkRate)element[i] * DP_LINK_BW_FREQ_MULTI_MBPS; + return rate; + } + + LinkRate getMaxRate() + { + LinkRate rate = 0; + if ((entries > 0) && + (entries <= NV_DPCD_SUPPORTED_LINK_RATES__SIZE)) + { + rate = (LinkRate)element[entries - 1] * DP_LINK_BW_FREQ_MULTI_MBPS; + } + + return rate; + } + }; + + class LinkPolicy : virtual public Object + { + bool bNoFallback; // No fallback when LT fails + LinkRates linkRates; + + public: + LinkPolicy() : bNoFallback(false) + { + } + bool skipFallback() + { + return bNoFallback; + } + void setSkipFallBack(bool bSkipFallback) + { + bNoFallback = bSkipFallback; + } + + LinkRates *getLinkRates() + { + return &linkRates; + } + }; + enum + { + totalTimeslots = 64, + totalUsableTimeslots = totalTimeslots - 1 + }; + + // in MBps + enum + { + RBR = 162000000, + EDP_2_16GHZ = 216000000, + EDP_2_43GHZ = 243000000, + HBR = 270000000, + EDP_3_24GHZ = 324000000, + EDP_4_32GHZ = 432000000, + HBR2 = 540000000, + HBR3 = 810000000 + }; + + struct HDCPState + { + bool HDCP_State_Encryption; + bool HDCP_State_1X_Capable; + bool HDCP_State_22_Capable; + bool HDCP_State_Authenticated; + bool HDCP_State_Repeater_Capable; + }; + + struct HDCPValidateData + { + }; + + typedef enum + { + DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST, + }DP_SINGLE_HEAD_MULTI_STREAM_MODE; + +#define HEAD_INVALID_STREAMS 0 +#define HEAD_DEFAULT_STREAMS 1 + + typedef enum + { + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY = 0, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY = 1, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_MAX = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY, + } DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID; + +#define DP_INVALID_SOR_INDEX 0xFFFFFFFF +#define DSC_DEPTH_FACTOR 16 + + + class LinkConfiguration : virtual public Object + { + public: + LinkPolicy policy; + unsigned lanes; + LinkRate peakRatePossible; + LinkRate peakRate; + LinkRate minRate; + bool enhancedFraming; + bool multistream; + bool disablePostLTRequest; + bool bEnableFEC; + bool bDisableLTTPR; + + // + // The counter to record how many times link training happens. + // Client can reset the counter by calling setLTCounter(0) + // + unsigned linkTrainCounter; + + LinkConfiguration() : + lanes(0), peakRatePossible(0), peakRate(0), minRate(0), + enhancedFraming(false), multistream(false), disablePostLTRequest(false), + bEnableFEC(false), bDisableLTTPR(false), linkTrainCounter(0) {}; + + LinkConfiguration(LinkPolicy * p, unsigned lanes, LinkRate peakRate, + bool enhancedFraming, bool MST, bool disablePostLTRequest = false, + bool bEnableFEC = false, bool bDisableLTTPR = false) : + lanes(lanes), peakRatePossible(peakRate), peakRate(peakRate), + enhancedFraming(enhancedFraming), multistream(MST), + disablePostLTRequest(disablePostLTRequest), + bEnableFEC(bEnableFEC), bDisableLTTPR(bDisableLTTPR), + linkTrainCounter(0) + { + // downrate for spread and FEC + minRate = linkOverhead(peakRate); + if (p) + { + policy = *p; + } + } + + void setLTCounter(unsigned counter) + { + linkTrainCounter = counter; + } + + unsigned getLTCounter() + { + return linkTrainCounter; + } + + NvU64 linkOverhead(NvU64 rate) + { + if(bEnableFEC) + { + + // if FEC is enabled, we have to account for 3% overhead + // for FEC+downspread according to DP 1.4 spec + + return rate - 3 * rate/ 100; + } + else + { + // if FEC is not enabled, link overhead comprises only of + // 0.05% downspread. + return rate - 5 * rate/ 1000; + + } + } + + void enableFEC(bool setFEC) + { + bEnableFEC = setFEC; + + // If FEC is enabled, update minRate with FEC+downspread overhead. + minRate = linkOverhead(peakRate); + } + + LinkConfiguration(unsigned long TotalLinkPBN) + : enhancedFraming(true), + multistream(true), + disablePostLTRequest(false), + bEnableFEC(false), + bDisableLTTPR(false), + linkTrainCounter(0) + { + // Reverse engineer a link configuration from Total TotalLinkPBN + // Note that HBR2 twice HBR. The table below treats HBR2x1 and HBRx2, etc. + + // + // BW Effective Lanes Total TotalLinkPBN + // 165 1 195.5555556 + // 165 2 391.1111111 + // 165 4 782.2222222 + // 270 1 320 + // 270 2 640 + // 270 4 1280 + // 270 8 2560 + // + + if (TotalLinkPBN <= 90) + peakRatePossible = peakRate = RBR, minRate = linkOverhead(RBR), lanes=0; // FAIL + if (TotalLinkPBN <= 195) + peakRatePossible = peakRate = RBR, minRate = linkOverhead(RBR), lanes=1; + else if (TotalLinkPBN <= 320) + peakRatePossible = peakRate = HBR, minRate=linkOverhead(HBR), lanes = 1; + else if (TotalLinkPBN <= 391) + peakRatePossible = peakRate = RBR, minRate=linkOverhead(RBR), lanes = 2; + else if (TotalLinkPBN <= 640) + peakRatePossible = peakRate = HBR, minRate=linkOverhead(HBR), lanes = 2; // could be HBR2x1, but TotalLinkPBN works out same + else if (TotalLinkPBN <= 782) + peakRatePossible = peakRate = RBR, minRate=linkOverhead(RBR), lanes = 4; + else if (TotalLinkPBN <= 960) + peakRatePossible = peakRate = HBR3, minRate=linkOverhead(HBR3), lanes = 1; + else if (TotalLinkPBN <= 1280) + peakRatePossible = peakRate = HBR, minRate=linkOverhead(HBR), lanes = 4; // could be HBR2x2 + else if (TotalLinkPBN <= 1920) + peakRatePossible = peakRate = HBR3, minRate=linkOverhead(HBR3), lanes = 2; // could be HBR2x + else if (TotalLinkPBN <= 2560) + peakRatePossible = peakRate = HBR2, minRate=linkOverhead(HBR2), lanes = 4; + else if (TotalLinkPBN <= 3840) + peakRatePossible = peakRate = HBR3, minRate=linkOverhead(HBR3), lanes = 4; + else { + peakRatePossible = peakRate = RBR, minRate = linkOverhead(RBR), lanes = 0; // FAIL + DP_ASSERT(0 && "Unknown configuration"); + } + } + + void setEnhancedFraming(bool newEnhancedFraming) + { + enhancedFraming = newEnhancedFraming; + } + + bool isValid() + { + return lanes != laneCount_0; + } + + bool lowerConfig(bool bReduceLaneCnt = false) + { + // + // TODO: bReduceLaneCnt is set to fallback to 4 lanes with lower + // valid link rate. But we should reset to max lane count + // sink supports instead. + // + + LinkRate lowerRate = policy.getLinkRates()->getLowerRate(peakRate); + + if(bReduceLaneCnt) + { + // Reduce laneCount before reducing linkRate + if(lanes == laneCount_1) + { + if (lowerRate) + { + lanes = laneCount_4; + peakRate = lowerRate; + } + else + { + lanes = laneCount_0; + } + } + else + { + lanes /= 2; + } + } + else + { + // Reduce the link rate instead of lane count + if (lowerRate) + { + peakRate = lowerRate; + } + else + { + lanes /= 2; + } + } + + minRate = linkOverhead(peakRate); + + return lanes != laneCount_0; + } + + void setLaneRate(LinkRate newRate, unsigned newLanes) + { + peakRate = newRate; + lanes = newLanes; + minRate = linkOverhead(peakRate); + } + + unsigned pbnTotal() + { + return PBNForSlots(totalUsableTimeslots); + } + + void pbnRequired(const ModesetInfo & modesetInfo, unsigned & base_pbn, unsigned & slots, unsigned & slots_pbn) + { + base_pbn = pbnForMode(modesetInfo); + slots = slotsForPBN(base_pbn); + slots_pbn = PBNForSlots(slots); + } + + NvU32 slotsForPBN(NvU32 allocatedPBN, bool usable = false) + { + NvU64 bytes_per_pbn = 54 * 1000000 / 64; // this comes out exact + NvU64 bytes_per_timeslot = peakRate * lanes / 64; + + if (bytes_per_timeslot == 0) + return (NvU32)-1; + + if (usable) + { + // round down to find the usable integral slots for a given value of PBN. + NvU32 slots = (NvU32)divide_floor(allocatedPBN * bytes_per_pbn, bytes_per_timeslot); + DP_ASSERT(slots <= 64); + + return slots; + } + else + return (NvU32)divide_ceil(allocatedPBN * bytes_per_pbn, bytes_per_timeslot); + } + + NvU32 PBNForSlots(NvU32 slots) // Rounded down + { + NvU64 bytes_per_pbn = 54 * 1000000 / 64; // this comes out exact + NvU64 bytes_per_timeslot = peakRate * lanes / 64; + + return (NvU32)(bytes_per_timeslot * slots/ bytes_per_pbn); + } + + bool operator!= (const LinkConfiguration & right) const + { + return !(*this == right); + } + + bool operator== (const LinkConfiguration & right) const + { + return (this->lanes == right.lanes && + this->peakRate == right.peakRate && + this->enhancedFraming == right.enhancedFraming && + this->multistream == right.multistream && + this->bEnableFEC == right.bEnableFEC); + } + + bool operator< (const LinkConfiguration & right) const + { + NvU64 leftMKBps = peakRate * lanes; + NvU64 rightMKBps = right.peakRate * right.lanes; + + if (leftMKBps == rightMKBps) + { + return (lanes < right.lanes); + } + else + { + return (leftMKBps < rightMKBps); + } + } + }; +} +#endif //INCLUDED_DP_LINKCONFIG_H diff --git a/src/common/displayport/inc/dp_linkedlist.h b/src/common/displayport/inc/dp_linkedlist.h new file mode 100644 index 000000000..cb0b6f28a --- /dev/null +++ b/src/common/displayport/inc/dp_linkedlist.h @@ -0,0 +1,143 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/******************************* DisplayPort *******************************\ +* * +* Module: dp_linkedlist.h * +* A linked list that uses DislayPort::List as a backend, but which * +* allocates the list backbone dynamically. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_LINKEDLIST_H +#define INCLUDED_DP_LINKEDLIST_H + +#include "dp_list.h" + +namespace DisplayPort +{ + template + class LinkedList : public Object + { + // The Element class forms the list backbone and contains pointers to + // each item in the list. + class Element : public ListElement + { + public: + Element(T *item) : item(item) { } + T *item; + }; + + List list; + + // No public copy constructor. + LinkedList(LinkedList &other) { } + + // Find the Element containing an item. + Element *containing(T *item) + { + for (ListElement *le = list.begin(); le != list.end(); le = le->next) + { + Element *e = static_cast(le); + if (e->item == item) + return e; + } + return NULL; + } + + public: + // The list starts out empty. + LinkedList() { } + + // Insert an item at the front of the list. + void insertFront(T *item) + { + // Construct an element and add it to the list. + Element *e = new Element(item); + DP_ASSERT(e); + if (e) + { + list.insertFront(e); + } + } + + // Remove an item from the list. + // O(n) to find the item to remove. + // It is an error to try to remove an item that is not in the list. + void remove(T *item) + { + Element *e = containing(item); + DP_ASSERT(e && "Item was not a member of the list"); + delete e; + } + + // Find the next item in the list after the specified item. If item is + // NULL, this returns the first item. + T *next(T *prev) + { + if (list.isEmpty()) + return NULL; + + // If prev is NULL or not in the list, return the first item. + Element *e = containing(prev); + if (!e) + { + e = static_cast(list.begin()); + return e->item; + } + else if (e->next != list.end()) + { + e = static_cast(e->next); + return e->item; + } + else + { + // prev was the last element in the list. + return NULL; + } + } + + // Query whether an item is a member of the list. + // O(n) + bool contains(T *item) + { + Element *e = containing(item); + return e != NULL; + } + + bool isEmpty() + { + return list.isEmpty(); + } + + T *pop() + { + DP_ASSERT(!list.isEmpty()); + Element *e = static_cast(list.last()); + T *item = e->item; + delete e; + return item; + } + }; +} + +#endif // INCLUDED_DP_LINKEDLIST_H diff --git a/src/common/displayport/inc/dp_list.h b/src/common/displayport/inc/dp_list.h new file mode 100644 index 000000000..77fd759e8 --- /dev/null +++ b/src/common/displayport/inc/dp_list.h @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_list.h * +* Simple doubly linked list queue * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_LIST_H +#define INCLUDED_DP_LIST_H + +#include "dp_object.h" + +namespace DisplayPort +{ + // + // List is an intrusive container, it may + // only contain elements that derive from ListElement + // + // NOTE! Deleting an element automatically unlinks it + // from the enclosing container. + // + struct ListElement : virtual public Object + { + ListElement * next, * prev; + + ListElement(); + virtual ~ListElement(); + }; + + + class List : public ListElement + { + public: + bool isEmpty(); + void insertFront(ListElement * item); + void insertBack(ListElement * item); + void insertBefore(ListElement * insertBeforeThis, ListElement * item); + void clear(); + ListElement* front(); + ListElement* last(); + + ListElement* begin() { return this->next; } + ListElement* end() { return this; } + + static ListElement * remove(ListElement * item); // Removes but does not delete + bool contains(ListElement * item); + ListElement * replace(ListElement * replacement, ListElement * replacee); + List(); + ~List(); + + unsigned size() + { + unsigned count = 0; + for (ListElement * i = begin(); i!=end(); i = i->next) + count++; + return count; + } + }; +} + +#endif //INCLUDED_DP_LIST_H diff --git a/src/common/displayport/inc/dp_mainlink.h b/src/common/displayport/inc/dp_mainlink.h new file mode 100644 index 000000000..219e110b0 --- /dev/null +++ b/src/common/displayport/inc/dp_mainlink.h @@ -0,0 +1,265 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_mainlink.h * +* Mainlink interface implemented by client. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_MAINLINK_H +#define INCLUDED_DP_MAINLINK_H + +#include "dp_linkconfig.h" +#include "dp_vrr.h" +#include "dp_wardatabase.h" +#include "dp_auxdefs.h" +#include "displayport.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "dp_regkeydatabase.h" + +#define HDCP_DUMMY_CN (0x1) +#define HDCP_DUMMY_CKSV (0xFFFFF) + +namespace DisplayPort +{ + typedef enum + { + NONE, //Abort it manually + UNTRUST, //Abort due to Kp mismatch + UNRELBL, //Abort due to repeated link failure + KSV_LEN, //Abort due to KSV length + KSV_SIG, //Abort due to KSV signature + SRM_SIG, //Abort due to SRM signature + SRM_REV, //Abort due to SRM revocation + NORDY, //Abort due to repeater not ready + KSVTOP, //Abort due to KSV topology error + BADBKSV //Abort due to invalid Bksv + }AbortAuthReason; + + // This is also used for DPCD offset 10B. 249 + enum LinkQualityPatternType + { + LINK_QUAL_DISABLED, + LINK_QUAL_D10_2, + LINK_QUAL_SYM_ERROR, + LINK_QUAL_PRBS7, + LINK_QUAL_80BIT_CUST, + LINK_QUAL_HBR2_COMPLIANCE_EYE, + LINK_QUAL_CP2520PAT3, + }; + + typedef struct + { + LinkQualityPatternType lqsPattern; + + // + // 80 bits DP CSTM Test Pattern data; + // ctsmLower takes bits 31:0 (lowest 32 bits) + // ctsmMiddle takes bits 63:32 (middle 32 bits) + // ctsmUpper takes bits 79:64 (highest 16 bits) + // + int ctsmLower; + int ctsmMiddle; + int ctsmUpper; + } PatternInfo; + + typedef struct + { + unsigned char bcaps; + unsigned char bksv[5]; + bool hdcpCapable; + unsigned char updMask; + }RmDfpCache; + + typedef enum + { + NORMAL_LINK_TRAINING, // full LT + NO_LINK_TRAINING, + FAST_LINK_TRAINING, + }LinkTrainingType; + + class MainLink : virtual public Object + { + private: + virtual void initializeRegkeyDatabase() = 0; + virtual void applyRegkeyOverrides() = 0; + + public: + virtual bool physicalLayerSetTestPattern(PatternInfo * patternInfo) = 0; + + // + // Wrappers for existing link training RM control calls + // + virtual bool train(const LinkConfiguration & link, bool force, LinkTrainingType linkTrainingType, + LinkConfiguration *retLink, bool bSkipLt = false, bool isPostLtAdjRequestGranted = false, + unsigned phyRepeaterCount = 0) = 0; + + // RM control call to retrieve buffer from RM for DP Library to dump logs + virtual bool retrieveRingBuffer(NvU8 dpRingBuffertype, NvU32 numRecords) = 0; + + // + // Requests to DD to perform pre & post link training steps + // which may disconnect and later reconnect the head (For Pre-gf119 GPUs) + // + virtual void preLinkTraining(NvU32 head) = 0; + virtual void postLinkTraining(NvU32 head) = 0; + virtual NvU32 getRegkeyValue(const char *key) = 0; + virtual const DP_REGKEY_DATABASE& getRegkeyDatabase() = 0; + virtual NvU32 getSorIndex() = 0; + virtual bool isInbandStereoSignalingSupported() = 0; + + + virtual bool isEDP() = 0; + virtual bool supportMSAOverMST() = 0; + virtual bool isForceRmEdidRequired() = 0; + virtual bool fetchEdidByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) = 0; + virtual bool applyEdidOverrideByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) = 0; + + // Return if Panel is Dynamic MUX capable + virtual bool isDynamicMuxCapable() = 0; + + // Return the current mux state. Returns false if not mux capable + virtual bool getDynamicMuxState(NvU32 *muxState) = 0; + + // Return if Internal panel is Dynamic Mux capable + virtual bool isInternalPanelDynamicMuxCapable() = 0; + + // Check if we should skip power down eDP when head detached. + virtual bool skipPowerdownEdpPanelWhenHeadDetach() = 0; + + // Get GPU DSC capabilities + virtual void getDscCaps(bool *pbDscSupported = NULL, + unsigned *pEncoderColorFormatMask = NULL, + unsigned *pLineBufferSizeKB = NULL, + unsigned *pRateBufferSizeKB = NULL, + unsigned *pBitsPerPixelPrecision = NULL, + unsigned *pMaxNumHztSlices = NULL, + unsigned *pLineBufferBitDepth = NULL) = 0; + + // + // Get the current link config. + // (Used for the boot case where EFI/VBIOS may have already trained + // the link. We need this to confirm the programming since + // we cannot rely on the DPCD registers being correct or sane) + // + virtual void getLinkConfig(unsigned &laneCount, NvU64 & linkRate) = 0; + + // Get the max link config from UEFI. + virtual bool getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount) = 0; + // + // Query if a head is attached to this DisplayId + // + virtual bool isActive() = 0; + + virtual bool hasIncreasedWatermarkLimits() = 0; + virtual bool hasMultistream() = 0; + virtual bool isPC2Disabled() = 0; + virtual bool isDP1_2Supported() = 0; + virtual bool isDP1_4Supported() = 0; + virtual bool isStreamCloningEnabled() = 0; + virtual NvU32 maxLinkRateSupported() = 0; + virtual bool isLttprSupported() = 0; + virtual bool isFECSupported() = 0; + + virtual bool setDpMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) = 0; + virtual bool setDpStereoMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) = 0; + virtual bool setFlushMode() = 0; + virtual void clearFlushMode(unsigned headMask, bool testMode=false) = 0; + + // + // HDCP Renegotiate and trigger ACT. + // + virtual void configureHDCPRenegotiate(NvU64 cN = HDCP_DUMMY_CN, NvU64 cKsv = HDCP_DUMMY_CKSV, bool bForceReAuth = false, bool bRxIDMsgPending = false) = 0; + virtual void triggerACT() = 0; + virtual void configureHDCPGetHDCPState(HDCPState &hdcpState) = 0; + + virtual NvU32 streamToHead(NvU32 streamId, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) = 0; + virtual NvU32 headToStream(NvU32 head, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) = 0; + + virtual void configureSingleStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + bool bEnhancedFraming, + NvU32 tuSize, + NvU32 waterMark, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamId = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false)= 0; + + virtual void configureMultiStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + NvU32 slotStart, + NvU32 slotEnd, + NvU32 PBN, + NvU32 Timeslice, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + bool bEnableAudioOverRightPanel = false, + bool bEnable2Head1Or = false)= 0; + + virtual void configureSingleHeadMultiStreamMode(NvU32 displayIDs[], + NvU32 numStreams, + NvU32 mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY)= 0; + + virtual void configureMsScratchRegisters(NvU32 address, + NvU32 hopCount, + NvU32 driverState) = 0; + + virtual bool controlRateGoverning(NvU32 head, bool enable, bool updateNow = true) = 0; + virtual bool getDpTestPattern(NV0073_CTRL_DP_TESTPATTERN * testPattern) = 0; + virtual bool setDpTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, + NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, + NvBool bIsHBR2, NvBool bSkipLaneDataOverride = false) = 0; + virtual bool getDpLaneData(NvU32 *numLanes, NvU32 *data) = 0; + virtual bool setDpLaneData(NvU32 numLanes, NvU32 *data) = 0; + virtual bool rmUpdateDynamicDfpCache(NvU32 headIndex, RmDfpCache * dfpCache, NvBool bResetDfp) = 0; + virtual void configurePowerState(bool bPowerUp) = 0; + virtual NvU32 monitorDenylistInfo(NvU32 ManufacturerID, NvU32 ProductID, DpMonitorDenylistData *pDenylistData) = 0; + virtual NvU32 getRootDisplayId() = 0; + virtual NvU32 allocDisplayId() = 0; + virtual bool freeDisplayId(NvU32 displayId) = 0; + virtual void queryGPUCapability() = 0; + virtual bool queryAndUpdateDfpParams() = 0; + virtual bool getEdpPowerData(bool *panelPowerOn, bool *bDPCDPowerStateD0) = 0; + virtual bool vrrRunEnablementStage(unsigned stage, NvU32 *status) = 0; + + virtual void configureTriggerSelect(NvU32 head, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) = 0; + + virtual void configureTriggerAll(NvU32 head, bool enable) = 0; + virtual bool dscCrcTransaction(NvBool bEnable, gpuDscCrc *data, NvU16 *headIndex){ return false; } + virtual bool configureLinkRateTable(const NvU16 *pLinkRateTable, LinkRates *pLinkRates) = 0; + virtual bool configureFec(const bool bEnableFec) = 0; + }; +} + +#endif //INCLUDED_DP_MAINLINK_H diff --git a/src/common/displayport/inc/dp_merger.h b/src/common/displayport/inc/dp_merger.h new file mode 100644 index 000000000..ff57de7a0 --- /dev/null +++ b/src/common/displayport/inc/dp_merger.h @@ -0,0 +1,148 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_merger.h * +* Asynchronous Message merger * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_MERGER_H +#define INCLUDED_DP_MERGER_H + +#include "dp_list.h" +#include "dp_auxretry.h" +#include "dp_timer.h" +#include "dp_bitstream.h" +#include "dp_address.h" +#include "dp_messageheader.h" +#include "dp_configcaps.h" + +namespace DisplayPort +{ + // after 4 secs delete dead transactions + #define DP_INCOMPLETE_MESSAGE_TIMEOUT_USEC 4000000 + struct EncodedMessage; + + class MessageTransactionMerger : virtual public Object + { + class IncompleteMessage : public ListElement + { + public: + EncodedMessage message; + NvU64 lastUpdated; + + }; + + List incompleteMessages; + Timer * timer; + NvU64 incompleteMessageTimeoutMs; + IncompleteMessage * freeOnNextCall; // we don't need to delete it on destruct + // since this is ALSO a member of the list we own + + IncompleteMessage * getTransactionRecord(const Address & address, unsigned messageNumber); + public: + MessageTransactionMerger(Timer * timer, unsigned incompleteMessageTimeoutMs) + : timer(timer), incompleteMessageTimeoutMs(incompleteMessageTimeoutMs), freeOnNextCall(0) + { + } + + // + // Pushes data into the queue and returns an encoded + // message if an entire message is assembled. + // + EncodedMessage * pushTransaction(MessageHeader * header, Buffer * data); + }; + + class IncomingTransactionManager : virtual public Object + { + public: + class IncomingTransactionManagerEventSink + { + public: + virtual void messagedReceived(IncomingTransactionManager * from, EncodedMessage * message) = 0; + }; + + void mailboxInterrupt(); + + // + // Create a message merger object + // - sink is called whenever a new message is received + // Callback::fired is passed an IncompleteMessage as the data arg. + // + IncomingTransactionManager(Timer * timerInterface, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink); + virtual ~IncomingTransactionManager(); + + protected: + virtual AuxRetry::status readMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0; + virtual size_t getMessageBoxSize() = 0; + virtual size_t getTransactionSize() = 0; + virtual void clearMessageBoxInterrupt() = 0; + private: + MessageTransactionMerger incompleteMessages; // List + + Buffer localWindow; + Timer * timer; + IncomingTransactionManagerEventSink * sink; + Address addressPrefix; // This is the aux address of the downstream port + // This field will be prepended to the address decoded. + }; + + class DownReplyManager : public IncomingTransactionManager + { + public: + DownReplyManager(DPCDHAL * hal, Timer * timer, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink) + : IncomingTransactionManager(timer, addressPrefix, sink), hal(hal) + { + } + virtual ~DownReplyManager() {} + + protected: + DPCDHAL * hal; + + virtual AuxRetry::status readMessageBox(NvU32 offset, NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + virtual size_t getTransactionSize(); + virtual void clearMessageBoxInterrupt(); + }; + + class UpRequestManager : public IncomingTransactionManager + { + public: + UpRequestManager(DPCDHAL * hal, Timer * timer, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink) + : IncomingTransactionManager(timer, addressPrefix, sink), hal(hal) + { + } + virtual ~UpRequestManager() {} + protected: + DPCDHAL * hal; + + virtual AuxRetry::status readMessageBox(NvU32 offset, NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + virtual size_t getTransactionSize(); + virtual void clearMessageBoxInterrupt(); + }; +} + +#endif //INCLUDED_DP_MERGER_H diff --git a/src/common/displayport/inc/dp_messagecodings.h b/src/common/displayport/inc/dp_messagecodings.h new file mode 100644 index 000000000..0ae4d88d7 --- /dev/null +++ b/src/common/displayport/inc/dp_messagecodings.h @@ -0,0 +1,559 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messagecodings.h * +* Encoding routines for various messages. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_MESSAGECODINGS_H +#define INCLUDED_DP_MESSAGECODINGS_H + +#include "dp_messages.h" +#include "displayport.h" +#include "dp_auxdefs.h" + +/* Fields for the HDCP stream status */ +#define NV_DP_HDCP_STREAM_STATE 1:0 +#define NV_DP_HDCP_STREAM_STATE_NO_EXIST (0x00000000) +#define NV_DP_HDCP_STREAM_STATE_NOT_ACTIVE (0x00000001) +#define NV_DP_HDCP_STREAM_STATE_ACTIVE (0x00000002) +#define NV_DP_HDCP_STREAM_STATE_ERROR (0x00000003) +#define NV_DP_HDCP_STREAM_REPEATER 2:2 +#define NV_DP_HDCP_STREAM_REPEATER_SIMPLE (0x00000000) +#define NV_DP_HDCP_STREAM_REPEATER_REPEATER (0x00000001) +#define NV_DP_HDCP_STREAM_ENCRYPTION 3:3 +#define NV_DP_HDCP_STREAM_ENCRYPTION_OFF (0x00000000) +#define NV_DP_HDCP_STREAM_ENCRYPTION_ON (0x00000001) +#define NV_DP_HDCP_STREAM_AUTHENTICATION 4:4 +#define NV_DP_HDCP_STREAM_AUTHENTICATION_OFF (0x00000000) +#define NV_DP_HDCP_STREAM_AUTHENTICATION_IP (0x00000000) +#define NV_DP_HDCP_STREAM_AUTHENTICATION_ON (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_LEGACY 8:8 +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_LEGACY_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_LEGACY_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_NON_DP1_2_CP 9:9 +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_NON_DP1_2_CP_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_NON_DP1_2_CP_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_MULTI 10:10 +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_MULTI_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_SINK_MULTI_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP1X 11:11 +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP1X_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP1X_YES (0x00000001) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP2X 12:12 +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP2X_NO (0x00000000) +#define NV_DP_HDCP_STREAM_OUTPUT_CP_TYPE_HDCP2X_YES (0x00000001) + +namespace DisplayPort +{ + typedef NakData Message_NakData; + + enum + { + REMOTE_READ_BUFFER_SIZE = 128, + }; + + typedef enum + { + None, + UpstreamSourceOrSSTBranch, + DownstreamBranch, + DownstreamSink, + Dongle + }PeerDevice; + + struct I2cWriteTransaction + { + I2cWriteTransaction(unsigned WriteI2cDeviceId, unsigned NumBytes, + unsigned char * buffer, bool NoStopBit = false, + unsigned I2cTransactionDelay = 0); + I2cWriteTransaction(); + unsigned WriteI2cDeviceId; + unsigned NumBytes; + unsigned char *I2cData; + bool NoStopBit; + unsigned I2cTransactionDelay; + }; + + typedef enum + { + DoesNotExist = 0, + NotActive = 1, + Active = 2, + }StreamState; + + typedef enum + { + CP_IRQ_ON = 0, + No_EVENT = 1 + }StreamEvent; + + typedef enum + { + STREAM_BEHAVIOUR_MASK_OFF = 0, + STREAM_BEHAVIOUR_MASK_ON = 1 + }StreamBehaviorMask; + + typedef enum + { + STREAM_EVENT_MASK_OFF = 0, + STREAM_EVENT_MASK_ON = 1 + }StreamEventMask; + + typedef enum + { + Force_Reauth = 0, + BlockFlow = 1 + }StreamBehavior; + + + typedef enum + { + StreamUnconnected = 0, + NonAuthLegacyDevice = 1, // TV or CRT + DP_MST = 4 + }OutputSinkType; + + typedef enum + { + HDCP1x = 1, + HDCP2x = 2 + }OutputCPType; + + typedef enum + { + SinkEvent0, + SinkEvent255 = 0xFF + }SinkEvent; + + // + // LINK_ADDRESS 0x1 + // + class LinkAddressMessage : public MessageManager::Message + { + public: + struct Result + { + bool isInputPort; + PeerDevice peerDeviceType; + unsigned portNumber; + bool hasMessaging; + bool dpPlugged; + + bool legacyPlugged; + unsigned dpcdRevisionMajor; + unsigned dpcdRevisionMinor; + GUID peerGUID; + unsigned SDPStreams; + unsigned SDPStreamSinks; + }; + + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + GUID guid; // originating branch device + unsigned numberOfPorts; + Result res[16]; + } reply; + + public: + LinkAddressMessage() : Message(NV_DP_SBMSG_REQUEST_ID_LINK_ADDRESS, + NV_DP_SBMSG_PRIORITY_LEVEL_2) + { + dpMemZero(&reply, sizeof(reply)); + } + + // Second stage init kept separate from constructor (reusable message) + void set(const Address & target); + + void getGUID(GUID & guid){guid = reply.guid;} + + // Number of ports described + unsigned resultCount(){return reply.numberOfPorts;} + const Result * result(unsigned index) + { + return &reply.res[index]; + } + }; + + + // + // CONNECTION_STATUS_NOTIFY 0x2 + // + class ConnStatusNotifyMessage : public MessageManager::MessageReceiver + { + public: + typedef struct + { + GUID guid; + unsigned port; + bool legacyPlugged; + bool devicePlugged; + bool messagingCapability; + bool isInputPort; + PeerDevice peerDeviceType; + }Request; + + protected: + Request request; + + public: + Request * getUpRequestData(){ return &request; } + virtual bool processByType(EncodedMessage * message, BitStreamReader * reader); + ConnStatusNotifyMessage(MessageReceiverEventSink * sink); + }; + + // + // GENERIC_UP_REPLY 0xnn + // + class GenericUpReplyMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + virtual void expired(const void * tag) + { } + + public: + GenericUpReplyMessage(const Address & target, unsigned requestId, + bool bReplyIsNack = false, bool bBroadcast = true, + bool bPath = false); + GenericUpReplyMessage(unsigned requestId, bool bReplyIsNack, + bool bBroadcast, bool bPath); + void set(const Address & target, bool bReplyIsNack = false, + bool bBroadcast = true, bool bPath = false); + + }; + + // + // CLEAR_PAYLOAD_ID_TABLE 0x14 + // + class ClearPayloadIdTableMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + virtual ParseResponseStatus parseResponse(EncodedMessage * message); + public: + ClearPayloadIdTableMessage(); + }; + + // + // ENUM_PATH_RESOURCES 0x10 + // + class EnumPathResMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + public: + struct + { + unsigned portNumber; + bool bFECCapability; + unsigned TotalPBN; + unsigned FreePBN; + } reply; + EnumPathResMessage(const Address & target, unsigned port, bool point); + }; + + // + // ALLOCATE_PAYLOAD 0x11 + // + class AllocatePayloadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + unsigned PBN; + unsigned virtualChannelPayloadId; + }reply; + + public: + + AllocatePayloadMessage() : Message(NV_DP_SBMSG_REQUEST_ID_ALLOCATE_PAYLOAD, + NV_DP_SBMSG_PRIORITY_LEVEL_4) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + unsigned nSDPStreams, + unsigned vcPayloadId, + unsigned PBN, + unsigned* SDPStreamSink, + bool entirePath); + + unsigned replyPortNumber(){return reply.portNumber;} + unsigned replyPBN(){return reply.PBN;} + unsigned replyVirtualChannelPayloadId(){return reply.virtualChannelPayloadId;} + + }; + + // + // QUERY_PAYLOAD 0x12 + // + class QueryPayloadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + unsigned allocatedPBN; + } reply; + + public: + QueryPayloadMessage(const Address & target, + unsigned port, + unsigned vcPayloadId); + + unsigned replyPortNumber() {return reply.portNumber;} + unsigned replyAllocatedPBN() {return reply.allocatedPBN;} + }; + + // + // RESOURCE_STATUS_NOTIFY 0x13 + // + class ResStatusNotifyMessage : public MessageManager::MessageReceiver + { + virtual bool processByType(EncodedMessage * message, + BitStreamReader * reader); + public: + struct + { + unsigned port; + GUID guid; + unsigned PBN; + } request; + + public: + ResStatusNotifyMessage(MessageReceiverEventSink * sink); + }; + + // + // REMOTE_DPCD_READ 0x20 + // + class RemoteDpcdReadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + unsigned numBytesReadDPCD; + unsigned char readData[REMOTE_READ_BUFFER_SIZE]; // Buffer + } reply; + + public: + void set(const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToRead); + + RemoteDpcdReadMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_READ, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + + unsigned replyPortNumber(){return reply.portNumber;} + unsigned replyNumOfBytesReadDPCD(){return reply.numBytesReadDPCD;} + + const NvU8 * replyGetData() + { + return reply.readData; + } + }; + + // + // REMOTE_DPCD_WRITE 0x21 + // + class RemoteDpcdWriteMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + public: + void set(const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToWrite, + const NvU8 * writeData); + + RemoteDpcdWriteMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_WRITE, + NV_DP_SBMSG_PRIORITY_LEVEL_3) {} + }; + + // + // REMOTE_I2C_READ 0x22 + // + class RemoteI2cReadMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + private: + struct + { + unsigned portNumber; + unsigned numBytesReadI2C; + unsigned char readData[REMOTE_READ_BUFFER_SIZE]; + } reply; + + public: + + RemoteI2cReadMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_READ, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned nWriteTransactions, + unsigned port, + I2cWriteTransaction* transactions, + unsigned readI2cDeviceId, + unsigned nBytesToRead); + + unsigned replyPortNumber(){return reply.portNumber;} + unsigned replyNumOfBytesReadI2C(){return reply.numBytesReadI2C;} + unsigned char* replyGetI2CData(unsigned* numBytes) + { + *numBytes = this->replyNumOfBytesReadI2C(); + return reply.readData; + } + }; + + // + // REMOTE_I2C_WRITE 0x23 + // + class RemoteI2cWriteMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + private: + struct + { + unsigned portNumber; + } reply; + + public: + + RemoteI2cWriteMessage() : Message(NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_WRITE, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + unsigned writeI2cDeviceId, + unsigned nBytesToWrite, + unsigned char* writeData); + + unsigned replyPortNumber() {return reply.portNumber;} + }; + + // + // POWER_UP_PHY 0x24 + // + class PowerUpPhyMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + } reply; + + public: + PowerUpPhyMessage() : Message(NV_DP_SBMSG_REQUEST_ID_POWER_UP_PHY, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + bool entirePath); + + unsigned replyPortNumber(){return reply.portNumber;} + }; + + // + // POWER_DOWN_PHY 0x25 + // + class PowerDownPhyMessage : public MessageManager::Message + { + virtual ParseResponseStatus parseResponseAck(EncodedMessage * message, + BitStreamReader * reader); + + private: + struct + { + unsigned portNumber; + } reply; + + public: + PowerDownPhyMessage() : Message(NV_DP_SBMSG_REQUEST_ID_POWER_DOWN_PHY, + NV_DP_SBMSG_PRIORITY_LEVEL_3) + { + dpMemZero(&reply, sizeof(reply)); + } + + void set(const Address & target, + unsigned port, + bool entirePath); + + unsigned replyPortNumber(){return reply.portNumber;} + }; + + // + // SINK_EVENT_NOTIFY 0x30 + // + class SinkEventNotifyMessage : public MessageManager::MessageReceiver + { + virtual bool processByType(EncodedMessage * message, BitStreamReader * reader); + + public: + SinkEventNotifyMessage(MessageReceiverEventSink * sink, unsigned requestId); + }; + +} + +#endif //INCLUDED_DP_MESSAGECODINGS_H diff --git a/src/common/displayport/inc/dp_messageheader.h b/src/common/displayport/inc/dp_messageheader.h new file mode 100644 index 000000000..3d09e6dc2 --- /dev/null +++ b/src/common/displayport/inc/dp_messageheader.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_messageheader.h * +* DP message header parser * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_MESSAGEHEADER_H +#define INCLUDED_DP_MESSAGEHEADER_H + +#include "dp_internal.h" +#include "dp_list.h" +#include "dp_auxretry.h" +#include "dp_timer.h" +#include "dp_bitstream.h" +#include "dp_address.h" + +namespace DisplayPort +{ + // + // User filled message structure + // + #define MAX_MESSAGE_SIZE 64 + struct EncodedMessage : public Object + { + unsigned messageNumber; // 0 or 1 + Address address; // target device for message (source for reply) + Buffer buffer; + bool isBroadcast; + bool isPathMessage; + + EncodedMessage() + : messageNumber(0), isBroadcast(false), isPathMessage(false) + {} + + void swap(EncodedMessage & other) + { + swap_args(messageNumber, other.messageNumber); + swap_args(address, other.address); + swap_args(isBroadcast, other.isBroadcast); + swap_args(isPathMessage, other.isPathMessage); + buffer.swap(other.buffer); + } + }; + + // + // Decoded message header + // + struct MessageHeader + { + Address address; + unsigned messageNumber; + unsigned payloadBytes; + bool isBroadcast; + bool isPathMessage; + bool isTransactionStart; + bool isTransactionEnd; + unsigned headerSizeBits; + }; + + bool decodeHeader(BitStreamReader * reader, MessageHeader * header, const Address & address); + + // + // Routines for maintaining a list of partially complete messages + // + + // after 4 secs delete dead transactions + #define DP_INCOMPLETE_MESSAGE_TIMEOUT_USEC 4000000 + +} +#endif //INCLUDED_DP_MESSAGEHEADER_H diff --git a/src/common/displayport/inc/dp_messages.h b/src/common/displayport/inc/dp_messages.h new file mode 100644 index 000000000..04d320421 --- /dev/null +++ b/src/common/displayport/inc/dp_messages.h @@ -0,0 +1,322 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messages.h * +* Encoding routines for aux common messages. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_MESSAGES_H +#define INCLUDED_DP_MESSAGES_H + +#include "dp_address.h" +#include "dp_bitstream.h" +#include "dp_splitter.h" +#include "dp_merger.h" +#include "dp_crc.h" +#include "dp_list.h" +#include "dp_connector.h" +#include "dp_messageheader.h" +#include "dp_auxdefs.h" + +namespace DisplayPort +{ + bool extractGUID(BitStreamReader * reader, GUID * guid); + + typedef enum + { + NakUndefined, + NakWriteFailure, + NakInvalidRAD, + NakCrcFailure, + NakBadParam, + NakDefer, + NakLinkFailure, + NakNoResources, + NakDpcdFail, + NakI2cNak, + NakAllocateFail, + + // Extensions + NakTimeout = 0x100 // Message was unable to be transmitted + + } NakReason; + + typedef struct + { + GUID guid; + NakReason reason; + unsigned nak_data; + } NakData; + + typedef enum + { + ParseResponseSuccess, + ParseResponseFailed, + ParseResponseWrong + } ParseResponseStatus; + + // + // Priority levels are defined to prioritize SBMs for DP1.4 (Highest Priority - LEVEL1, Lowest Priority - DEFAULT) + // Current implementation has the following priority levels + // CLEAR_PAYLOAD_ID_TABLE = NV_DP_SBMSG_PRIORITY_LEVEL_1 + // LINK_ADDRESS = NV_DP_SBMSG_PRIORITY_LEVEL_2 + // REMOTE_DPCD_READ, REMOTE_DPCD_WRITE = NV_DP_SBMSG_PRIORITY_LEVEL_3 + // REMOTE_I2C_READ, REMOTE_I2C_WRITE = NV_DP_SBMSG_PRIORITY_LEVEL_3 + // POWER_UP_PHY, POWER_DOWN_PHY = NV_DP_SBMSG_PRIORITY_LEVEL_3 + // ENUM_PATH_RESOURCES, ALLOCATE_PAYLOAD = NV_DP_SBMSG_PRIORITY_LEVEL_4 + // All other messages = NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT + // + // However, Message::setMessagePriority can be used to override this priority levels, if required. + // + typedef enum + { + NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT, + NV_DP_SBMSG_PRIORITY_LEVEL_4, + NV_DP_SBMSG_PRIORITY_LEVEL_3, + NV_DP_SBMSG_PRIORITY_LEVEL_2, + NV_DP_SBMSG_PRIORITY_LEVEL_1, + } DPSideBandMessagePriority; + + // + // CLASS: MessageManager + // + class MessageManager : + virtual public Object, + IncomingTransactionManager::IncomingTransactionManagerEventSink + { + + Timer * timer; + DPCDHAL * hal; + DownRequestManager splitterDownRequest; + UpReplyManager splitterUpReply; + UpRequestManager mergerUpRequest; + DownReplyManager mergerDownReply; + bool isBeingDestroyed; + bool isPaused; + + List messageReceivers; + List notYetSentDownRequest; // Down Messages yet to be processed + List notYetSentUpReply; // Up Reply Messages yet to be processed + List awaitingReplyDownRequest; // Transmitted, Split, but not yet replied to + + void onUpRequestReceived(bool status, EncodedMessage * message); + void onDownReplyReceived(bool status, EncodedMessage * message); + void transmitAwaitingDownRequests(); + void transmitAwaitingUpReplies(); + + // IncomingTransactionManager + void messagedReceived(IncomingTransactionManager * from, EncodedMessage * message); + + public: + class Message; + void cancelAllByType(unsigned type); + void cancelAll(Message * message); + + void pause() + { + isPaused = true; + } + + void clearPendingMsg() + { + hal->clearPendingMsg(); + } + void IRQUpReqest() + { + mergerUpRequest.mailboxInterrupt(); + } + + void IRQDownReply() + { + mergerDownReply.mailboxInterrupt(); + } + + MessageManager(DPCDHAL * hal, Timer * timer) + : timer(timer), hal(hal), + splitterDownRequest(hal, timer), + splitterUpReply(hal, timer), + mergerUpRequest(hal, timer, Address(0), this), + mergerDownReply(hal, timer, Address(0), this), + isBeingDestroyed(false) + { + } + + // + // CLASS: MessageReceiver + // + class MessageReceiver : public ListElement, OutgoingTransactionManager::OutgoingTransactionManagerEventSink + { + public: + class MessageReceiverEventSink + { + public: + virtual void messageProcessed(MessageReceiver * from) = 0; + }; + + // Returns false if the message should be passed to the next receiver + virtual bool process(EncodedMessage * message); + + // per message type should implement this + virtual bool processByType(EncodedMessage * message, BitStreamReader * reader) = 0; + + unsigned getRequestId() {return requestId;} + Address & getAddress() {return address;} + + MessageReceiver(MessageReceiverEventSink* sink, unsigned requestId) + : sink(sink), + requestId(requestId), + bProcessed(true), + address(0) // 0 to start with + {} + + virtual void splitterFailed(OutgoingTransactionManager * from) + { + DP_ASSERT(0 && "why did we send a reply"); + } + + virtual void splitterTransmitted(OutgoingTransactionManager * from) + { + DP_ASSERT(0 && "why did we send a reply"); + } + + protected: + MessageReceiverEventSink * sink; + unsigned requestId; + bool bProcessed; + Address address; + MessageManager * parent; + + }; + + // + // CLASS: Message + // + class Message : public ListElement, + OutgoingTransactionManager::OutgoingTransactionManagerEventSink, + Timer::TimerCallback /* countdown timer for reply */ + { + public: + class MessageEventSink + { + public: + virtual void messageFailed(Message * from, NakData * nakData) = 0; + virtual void messageCompleted(Message * from) = 0; + }; + unsigned getMsgType() {return requestIdentifier;} + unsigned getSinkPort() {return sinkPort;} + protected: + // Encoded message body (set in dp_messagecodings) + // this data structure is invalidated on post + // as the data gets swapped into the transmit buffer. + EncodedMessage encodedMessage; + MessageEventSink * sink; + + MessageManager * parent; + bool transmitReply; + bool bTransmitted; + unsigned requestIdentifier; + unsigned messagePriority; + unsigned sinkPort; + + // State updated by post operation + struct { + unsigned messageNumber; + Address target; + } state; + + virtual ParseResponseStatus parseResponseAck( + EncodedMessage * message, BitStreamReader * reader) = 0; + virtual ParseResponseStatus parseResponse(EncodedMessage * message); + virtual void splitterFailed(OutgoingTransactionManager * from); + virtual void expired(const void * tag); + virtual void splitterTransmitted(OutgoingTransactionManager * from); + + public: + friend class MessageManager; + + Message(int requestIdentifier, int messagePriority) + : sink(0), + parent(0), + transmitReply(false), + bTransmitted(false), + requestIdentifier(requestIdentifier), + messagePriority(messagePriority), + sinkPort(0xFF) + { + } + + void clear() + { + if (parent) { + parent->timer->cancelCallbacks(this); + parent->splitterDownRequest.cancel(this); + } + + parent = 0; + List::remove(this); + encodedMessage.buffer.reset(); + } + + // This function can be used to override the already set priority of the message from it's constructor. + void setMessagePriority(DPSideBandMessagePriority priorityLevel) + { + this->messagePriority = priorityLevel; + return; + } + + protected: + ~Message() + { + clear(); + } + }; + + // + // Register new receiver for unpair messages + // (eg. broadcast messages or sink->source messages) + // + void registerReceiver(MessageReceiver * receiver); + + // Post a message to be asynchronously transmitted + void post(Message * message, Message::MessageEventSink * sink, bool isReply = false); + void postReply(Message * message, Message::MessageEventSink * sink); + void cancel(Message * message); + + bool send(Message * message, NakData & nakData); + friend class Message; + ~MessageManager(); + }; + struct GenericMessageCompletion : public MessageManager::Message::MessageEventSink + { + bool failed; + bool completed; + NakData nakData; + GenericMessageCompletion(); + void messageFailed(MessageManager::Message * from, NakData * data); + void messageCompleted(MessageManager::Message * from); + }; +} + +#endif //INCLUDED_DP_MESSAGES_H diff --git a/src/common/displayport/inc/dp_object.h b/src/common/displayport/inc/dp_object.h new file mode 100644 index 000000000..9bb02e805 --- /dev/null +++ b/src/common/displayport/inc/dp_object.h @@ -0,0 +1,132 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_object.h * +* This is the object from which all other dynamically-allocated objects * +* must inherit. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_OBJECT_H +#define INCLUDED_DP_OBJECT_H + +#include "nvtypes.h" +#include "stddef.h" +#include "dp_hostimp.h" + +static inline void dpMemCopy(void * target, const void * source, size_t len) +{ + NvU8 * t = (NvU8 *)target; + const NvU8 * s = (const NvU8 *)source; + + while (len--) + *t++=*s++; +} + +static inline void dpMemZero(void * target, size_t len) +{ + NvU8 * t = (NvU8 *)target; + + while (len--) + *t++=0; +} + +static inline bool dpMemCmp(void *pvBuf1, void *pvBuf2, size_t size) +{ + NvU8 *pBuf1 = (NvU8 *)pvBuf1; + NvU8 *pBuf2 = (NvU8 *)pvBuf2; + + if(!pBuf1 || !pBuf2 || !size) + return false; + + do + { + if(*pBuf1++ == *pBuf2++) + continue; + else + break; + }while(--size); + + if(!size) + return true; + else + return false; +} + +namespace DisplayPort +{ + // + // Any object allocated through "new" must virtually inherit from this type. + // This guarantees that the memory allocation goes through dpMalloc/dpFree. + // Leak detection is implemented only on allocations of this type. Data + // structures may assume 0 initialization if allocated off the heap. + // + // You must use virtual inheritance because objects that inherit from + // multiple Object-derived classes would otherwise cause ambiguity when + // someone tries to use new or delete on them. + // + struct Object + { + virtual ~Object() {} + + void *operator new(size_t sz) + { + void * block = dpMalloc(sz); + if (block) + { + dpMemZero(block, sz); + } + return block; + } + + void *operator new[](size_t sz) + { + void * block = dpMalloc(sz); + if (block) + { + dpMemZero(block, sz); + } + return block; + } + + void operator delete(void * ptr) + { + if (ptr) + { + dpFree(ptr); + } + } + + void operator delete[](void * ptr) + { + if (ptr) + { + dpFree(ptr); + } + } + }; +} + +#endif // INCLUDED_DP_OBJECT_H diff --git a/src/common/displayport/inc/dp_regkeydatabase.h b/src/common/displayport/inc/dp_regkeydatabase.h new file mode 100644 index 000000000..5e4664460 --- /dev/null +++ b/src/common/displayport/inc/dp_regkeydatabase.h @@ -0,0 +1,108 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_regkeydatabase.h * +* Definition of the DP_REGKEY_DATABASE * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_REGKEYDATABASE_H +#define INCLUDED_DP_REGKEYDATABASE_H + +#include "dp_auxdefs.h" + +// Regkey Names +#define NV_DP_REGKEY_ENABLE_AUDIO_BEYOND_48K "ENABLE_AUDIO_BEYOND48K" +#define NV_DP_REGKEY_OVERRIDE_DPCD_REV "OVERRIDE_DPCD_REV" +#define NV_DP_REGKEY_DISABLE_SSC "DISABLE_SSC" +#define NV_DP_REGKEY_ENABLE_FAST_LINK_TRAINING "ENABLE_FAST_LINK_TRAINING" +#define NV_DP_REGKEY_DISABLE_MST "DISABLE_MST" +#define NV_DP_REGKEY_ENABLE_INBAND_STEREO_SIGNALING "ENABLE_INBAND_STEREO_SIGNALING" +#define NV_DP_REGKEY_SKIP_POWEROFF_EDP_IN_HEAD_DETACH "SKIP_POWEROFF_EDP_IN_HEAD_DETACH" +#define NV_DP_REGKEY_ENABLE_OCA_LOGGING "ENABLE_OCA_LOGGING" +#define NV_DP_REGKEY_REPORT_DEVICE_LOST_BEFORE_NEW "HP_WAR_1707690" +#define NV_DP_REGKEY_APPLY_LINK_BW_OVERRIDE_WAR "APPLY_LINK_BW_OVERRIDE_WAR" +#define NV_DP_REGKEY_APPLY_MAX_LINK_RATE_OVERRIDES "APPLY_OVERRIDES_FOR_BUG_2489143" +#define NV_DP_REGKEY_DISABLE_DSC "DISABLE_DSC" +#define NV_DP_REGKEY_SKIP_ASSESSLINK_FOR_EDP "HP_WAR_2189772" +#define NV_DP_REGKEY_HDCP_AUTH_ONLY_ON_DEMAND "DP_HDCP_AUTH_ONLY_ON_DEMAND" +#define NV_DP_REGKEY_ENABLE_MSA_OVER_MST "ENABLE_MSA_OVER_MST" + +// Keep link alive for SST and MST +#define NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE "DP_KEEP_OPT_LINK_ALIVE" +// Keep link alive when connector is in MST +#define NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_MST "DP_KEEP_OPT_LINK_ALIVE_MST" +// Keep link alive when connector is in SST +#define NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_SST "DP_KEEP_OPT_LINK_ALIVE_SST" + +#define NV_DP_REGKEY_FORCE_EDP_ILR "DP_BYPASS_EDP_ILR_REV_CHECK" + +// +// DSC capability of downstream device should be decided based on device's own +// and its parent's DSC capability. +// +#define NV_DP_DSC_MST_CAP_BUG_3143315 "DP_DSC_MST_CAP_BUG_3143315" + +// +// Enable DSC Pass through support in MST mode. +// +#define NV_DP_DSC_MST_ENABLE_PASS_THROUGH "DP_DSC_MST_ENABLE_PASS_THROUGH" + +// +// Data Base used to store all the regkey values. +// The actual data base is declared statically in dp_evoadapter.cpp. +// All entries set to 0 before initialized by the first EvoMainLink constructor. +// The first EvoMainLink constructor will populate that data base. +// Later EvoMainLink will use values from that data base. +// +struct DP_REGKEY_DATABASE +{ + bool bInitialized; // set to true after the first EvoMainLink instance is constructed + // Below are regkey values + bool bAudioBeyond48kEnabled; + NvU32 dpcdRevOveride; + bool bSscDisabled; + bool bFastLinkTrainingEnabled; + bool bMstDisabled; + bool bInbandStereoSignalingEnabled; + bool bPoweroffEdpInHeadDetachSkipped; + bool bOcaLoggingEnabled; + bool bReportDeviceLostBeforeNew; + bool bLinkBwOverrideWarApplied; + NvU32 applyMaxLinkRateOverrides; + bool bDscDisabled; + bool bAssesslinkForEdpSkipped; + bool bHdcpAuthOnlyOnDemand; + bool bMsaOverMstEnabled; + bool bOptLinkKeptAlive; + bool bOptLinkKeptAliveMst; + bool bOptLinkKeptAliveSst; + bool bBypassEDPRevCheck; + bool bDscMstCapBug3143315; + bool bDscMstEnablePassThrough; +}; + +#endif //INCLUDED_DP_REGKEYDATABASE_H + diff --git a/src/common/displayport/inc/dp_ringbuffer.h b/src/common/displayport/inc/dp_ringbuffer.h new file mode 100644 index 000000000..67fa9e0b4 --- /dev/null +++ b/src/common/displayport/inc/dp_ringbuffer.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include "dp_object.h" + +#define addToRingBufferCollection(x) {} +#define addDpLogRecord(x, ...) {} +#define addDpAssertRecord() {} +#define queryDpLogRecords(a, b, c) {} +#define resetDpAssertRingBuffer() {} + diff --git a/src/common/displayport/inc/dp_splitter.h b/src/common/displayport/inc/dp_splitter.h new file mode 100644 index 000000000..827ae1bb8 --- /dev/null +++ b/src/common/displayport/inc/dp_splitter.h @@ -0,0 +1,156 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_splitter.h * +* Asynchronous Message splitter * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_SPLITTER_H +#define INCLUDED_DP_SPLITTER_H + +#include "dp_list.h" +#include "dp_auxretry.h" +#include "dp_timer.h" +#include "dp_auxdefs.h" +#include "dp_messageheader.h" + +namespace DisplayPort +{ + + struct EncodedMessage; + class DPCDHAL; + + class MessageTransactionSplitter + { + EncodedMessage * messageOutstanding; // If set we've pulled an item out of the downQueue queue. + // One or more transactions have been sent as a result + // messageOutstanding->messageOffset show how far into + // the message we are. + unsigned assemblyTransmitted; + public: + void set(EncodedMessage * messageOutstanding) + { + this->messageOutstanding = messageOutstanding; + assemblyTransmitted = 0; + } + + // + // Encode the next transaction. + // returns false if there are no more transactions + // + bool get(Buffer & assemblyBuffer); + + MessageTransactionSplitter() + {} + }; + + class OutgoingTransactionManager: + virtual public Object, + private Timer::TimerCallback + { + public: + class OutgoingTransactionManagerEventSink + { + public: + virtual void splitterFailed(OutgoingTransactionManager * from) = 0; // Sink DEFER the writes + virtual void splitterTransmitted(OutgoingTransactionManager * from) = 0; // message was sent (may NACK later) + }; + + // Send the encoded message. This call is destructive to the EncodedMessage + // passed in + bool send( EncodedMessage & payload, OutgoingTransactionManagerEventSink * sink); + + OutgoingTransactionManager(Timer * timer); + virtual ~OutgoingTransactionManager() { timer->cancelCallbacks(this); } + + // Do not make any calls to the event sink + void cancel(OutgoingTransactionManagerEventSink * sink); + + protected: + virtual AuxRetry::status writeMessageBox(NvU8 * data, size_t length) = 0; + virtual size_t getMessageBoxSize() = 0; + private: + void writeToWindow( bool firstAttempt); + void split(); + void expired(const void * tag); // timer callback + + unsigned retriesLeft; + + Buffer assemblyBuffer; + MessageTransactionSplitter transactionSplitter; + + // + // List of outgoing messages + // + struct OutgoingMessage : ListElement + { + OutgoingTransactionManagerEventSink* eventSink; + EncodedMessage message; + }; + + List queuedMessages; + + // + // Message currently assembled in transactionSplitter + // (if any) + // + OutgoingMessage * activeMessage; + Timer * timer; + }; + + + class DownRequestManager : public OutgoingTransactionManager + { + public: + DownRequestManager(DPCDHAL * hal, Timer * timer) + : OutgoingTransactionManager(timer), hal(hal) + { + } + + virtual ~DownRequestManager() {} + protected: + DPCDHAL * hal; + + virtual AuxRetry::status writeMessageBox(NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + }; + + class UpReplyManager : public OutgoingTransactionManager + { + public: + UpReplyManager(DPCDHAL * hal, Timer * timer) + : OutgoingTransactionManager(timer), hal(hal) + { + } + virtual ~UpReplyManager() {} + protected: + DPCDHAL * hal; + + virtual AuxRetry::status writeMessageBox(NvU8 * data, size_t length); + virtual size_t getMessageBoxSize(); + }; +} + +#endif //INCLUDED_DP_SPLITTER_H diff --git a/src/common/displayport/inc/dp_timeout.h b/src/common/displayport/inc/dp_timeout.h new file mode 100644 index 000000000..35f07abbf --- /dev/null +++ b/src/common/displayport/inc/dp_timeout.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_timeout.h * +* Local timeout management * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_TIMEOUT_H +#define INCLUDED_DP_TIMEOUT_H + +#include "dp_timer.h" + +namespace DisplayPort +{ + // + // Timeout management + // + class Timeout : virtual public Object + { + Timer * timer; + NvU64 timeoutTime; // What time to trigger the timeout at + + public: + + Timeout(Timer * _timer, int timeoutMilliseconds) + : timer(_timer), timeoutTime(_timer->getTimeUs() + timeoutMilliseconds*1000 + 1 /* counter could be about to roll */) + { + } + + NvS64 remainingUs() + { + NvS64 remaining = (NvS64)(timeoutTime - timer->getTimeUs()); + + // Rollover check + if (remaining < 0) + { + remaining = 0; + } + + DP_ASSERT(remaining < ((NvS64)1000000*3600) && "Timeout remaining over an hour"); + + return remaining; + } + + bool valid() + { + return remainingUs() > 0; + } + }; +} + +#endif //INCLUDED_DP_TIMEOUT_H diff --git a/src/common/displayport/inc/dp_timer.h b/src/common/displayport/inc/dp_timer.h new file mode 100644 index 000000000..bf8c3f617 --- /dev/null +++ b/src/common/displayport/inc/dp_timer.h @@ -0,0 +1,104 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_timer.h * +* Local timer interface * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_TIMER_H +#define INCLUDED_DP_TIMER_H + +#include "dp_list.h" + +namespace DisplayPort +{ + // + // RawTimer + // This API is expected to be implemented by the + // library client. + // + class RawTimer : virtual public Object + { + public: + struct Callback : virtual public Object + { + virtual void expired() = 0; + }; + virtual void queueCallback(Callback * callback, int milliseconds) = 0; + virtual NvU64 getTimeUs() = 0; + virtual void sleep(int milliseconds) = 0; + }; + + + // + // Timer + // + class Timer : public RawTimer::Callback + { + public: + struct TimerCallback + { + virtual void expired(const void * context) = 0; + }; + + private: + RawTimer * raw; + NvU64 nextTimestamp; + List pending; + struct PendingCallback : ListElement + { + TimerCallback * target; + const void * context; + NvU64 timestamp; // in usec + bool executeInSleep; + + }; + + virtual void expired(); + unsigned fire(bool fromSleep); + + void _pump(unsigned milliseconds, bool fromSleep); + public: + Timer(RawTimer * raw) : raw(raw) {} + virtual ~Timer() {} + + // + // Queue a timer callback. + // Unless the dont-execute-in-sleep flag is + // + void queueCallback(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep = true); + NvU64 getTimeUs(); + void sleep(unsigned milliseconds); + void cancelCallbacks(Timer::TimerCallback * to); + + void cancelCallback(Timer::TimerCallback * to, const void * context); + void queueCallbackInOrder(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep); + void cancelCallbacksWithoutContext(const void * context); + void cancelAllCallbacks(); + bool checkCallbacksOfSameContext(const void * context); + }; +} + +#endif //INCLUDED_DP_TIMER_H diff --git a/src/common/displayport/inc/dp_tracing.h b/src/common/displayport/inc/dp_tracing.h new file mode 100644 index 000000000..993320f0b --- /dev/null +++ b/src/common/displayport/inc/dp_tracing.h @@ -0,0 +1,128 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + /******************************* DisplayPort ******************************\ +* * +* Module: dp_tracing.h * +* Header file for support of tracing, implemented by a host provider * +* Because this is platform-agnostic, the tracing API * +* is left up to the host interface. * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_TRACING_H +#define INCLUDED_DP_TRACING_H + +#include "nvtypes.h" + +typedef enum NV_DP_TRACING_EVENT +{ + TRACE_DP_ID_HOTPLUG, + TRACE_DP_ID_NEW_SINK_DETECTED, + TRACE_DP_ID_NEW_SINK_REPORTED, + TRACE_DP_ID_NEW_MST_DEVICE, + TRACE_DP_ID_LOST_DEVICE, + TRACE_DP_ID_LINK_ASSESSMENT, + TRACE_DP_ID_LINK_TRAINING_START, + TRACE_DP_ID_LINK_TRAINING_DONE, + TRACE_DP_ID_NOTIFY_ATTACH_BEGIN, + TRACE_DP_ID_NOTIFY_ATTACH_BEGIN_STATUS, + TRACE_DP_ID_NOTIFY_ATTACH_END, + TRACE_DP_ID_NOTIFY_DETACH_BEGIN, + TRACE_DP_ID_NOTIFY_DETACH_END, + TRACE_DP_ID_MESSAGE_EXPIRED +} NV_DP_TRACING_EVENT; + +typedef enum NV_DP_TRACING_PRIORITY +{ + TRACE_DP_PRIORITY_ERROR, + TRACE_DP_PRIORITY_WARNING, + TRACE_DP_PRIORITY_INFO +} NV_DP_TRACING_PRIORITY; + +#define NV_DPTRACE_MAX_PARAMS 8 + +#define _NV_DPTRACE_EXPAND_HELPER(x) x +#define _NV_DPTRACE_EXPAND(x) _NV_DPTRACE_EXPAND_HELPER(x) + +// +// _COUNT_ARGS: Counts the size of an argument list. +// +// For example, if the argument list is two-arguments "A, B", then call it like this: +// _COUNT_ARGS(_placeholder, A, B, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +// +// which maps to the argument names like this: +// _COUNT_ARGS(_0=_placeholder, _1=A, _2=B, _3=9, _4=8, _5=7, _6=6, _7=5, _8=4,, _9=3, _10=2, ...) +// +// and thus _COUNT_ARGS will return 2, the correct size of the argument list. +// +#define _NV_DPTRACE_COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, ...) _10 + +#define NV_DPTRACE_ERROR(...) NV_DPTRACE_EVENT(TRACE_DP_PRIORITY_ERROR, __VA_ARGS__) +#define NV_DPTRACE_WARNING(...) NV_DPTRACE_EVENT(TRACE_DP_PRIORITY_WARNING, __VA_ARGS__) +#define NV_DPTRACE_INFO(...) NV_DPTRACE_EVENT(TRACE_DP_PRIORITY_INFO, __VA_ARGS__) + +// +// When ##__VA_ARGS__ is used, it will delete a preceding comma (',') when +// __VA_ARGS__ is blank (i.e. zero-length argument list). This allows +// the zero-argument case to work without resulting in a syntax error. +// +// We have a placeholder argument as the first parameter to _COUNT_ARGS +// so that we can take advantage of this comma-deleting behavior. +// +// However, there shouldn't be a zero-arg case as of now, because the first arg is the event. +// +#define NV_DPTRACE_EVENT(priority, ...) \ + _NV_DPTRACE_SEND(priority, _NV_DPTRACE_EXPAND(_NV_DPTRACE_COUNT_ARGS(_0, ##__VA_ARGS__, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)), __VA_ARGS__) + +#define _NV_DPTRACE_SEND(priority, argc, ...) _NV_DPTRACE_EXPAND(_NV_DPTRACE_SEND_N(priority, argc, __VA_ARGS__)) +#define _NV_DPTRACE_SEND_N(priority, argc, ...) _NV_DPTRACE_EXPAND(_NV_DPTRACE_##argc(priority, __VA_ARGS__)) + +// The first argument is the event - macro number is one higher than num args passed to dpTraceEvent +#define _NV_DPTRACE_1(priority, event) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 0); + +#define _NV_DPTRACE_2(priority, event, p1) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 1, p1); + +#define _NV_DPTRACE_3(priority, event, p1, p2) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 2, p1, p2); + +#define _NV_DPTRACE_4(priority, event, p1, p2, p3) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 3, p1, p2, p3); + +#define _NV_DPTRACE_5(priority, event, p1, p2, p3, p4) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 4, p1, p2, p3, p4); + +#define _NV_DPTRACE_6(priority, event, p1, p2, p3, p4, p5) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 5, p1, p2, p3, p4, p5); + +#define _NV_DPTRACE_7(priority, event, p1, p2, p3, p4, p5, p6) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 6, p1, p2, p3, p4, p5, p6); + +#define _NV_DPTRACE_8(priority, event, p1, p2, p3, p4, p5, p6, p7) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 7, p1, p2, p3, p4, p5, p6, p7); + +#define _NV_DPTRACE_9(priority, event, p1, p2, p3, p4, p5, p6, p7, p8) \ + dpTraceEvent(TRACE_DP_ID_##event, priority, 8, p1, p2, p3, p4, p5, p6, p7, p8); + +#endif // INCLUDED_DP_TRACING_H diff --git a/src/common/displayport/inc/dp_vrr.h b/src/common/displayport/inc/dp_vrr.h new file mode 100644 index 000000000..4fa73aa4d --- /dev/null +++ b/src/common/displayport/inc/dp_vrr.h @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_vrr.h * +* Prototypes and definitions related to VRR enablement * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_VRR_H +#define INCLUDED_DP_VRR_H + +#include "dp_object.h" + +// Worstcase VRR enablement handshake timeout of 600ms (40x15ms) +#define VRR_ENABLE_STATUS_TIMEOUT_THRESHOLD 40 +#define VRR_ENABLE_STATUS_TIMEOUT_INTERVAL_MS 15 + +// Retry enablement threshold in notifyShortPulse() +#define VRR_MAX_RETRIES 3 + +namespace DisplayPort +{ + enum VrrEnableStage + { + VRR_ENABLE_STAGE_MONITOR_ENABLE_BEGIN, + VRR_ENABLE_STAGE_MONITOR_ENABLE_CHALLENGE, + VRR_ENABLE_STAGE_MONITOR_ENABLE_CHECK, + VRR_ENABLE_STAGE_DRIVER_ENABLE_BEGIN, + VRR_ENABLE_STAGE_DRIVER_ENABLE_CHALLENGE, + VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK, + VRR_ENABLE_STAGE_RESET_MONITOR, + VRR_ENABLE_STAGE_INIT_PUBLIC_INFO, + VRR_ENABLE_STAGE_GET_PUBLIC_INFO, + VRR_ENABLE_STAGE_STATUS_CHECK, + }; + + struct DeviceImpl; + + class VrrEnablement : virtual public Object + { + private: + DeviceImpl *parent; + bool bMonitorEnabled; + + bool vrrGetPublicInfo(void); + bool vrrWaitOnEnableStatus(void); + bool vrrEnableMonitor(void); + bool vrrEnableDriver(void); + + public: + + VrrEnablement(DeviceImpl *parent) + : parent(parent) + { + reset(); + } + + ~VrrEnablement() + { + parent = NULL; + reset(); + } + + bool start(void); + void reset(void) + { + bMonitorEnabled = false; + } + bool isMonitorEnabled(void); + bool isDriverEnabled(void); + }; +} + +#endif diff --git a/src/common/displayport/inc/dp_wardatabase.h b/src/common/displayport/inc/dp_wardatabase.h new file mode 100644 index 000000000..9b54f9536 --- /dev/null +++ b/src/common/displayport/inc/dp_wardatabase.h @@ -0,0 +1,75 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_wardatabase.h * +* EDID and OUI based workarounds for panel/TCON issues * +* * +\***************************************************************************/ + +#ifndef INCLUDED_DP_WARDATABASE_H +#define INCLUDED_DP_WARDATABASE_H + +#include "dp_object.h" + +namespace DisplayPort +{ + #define WAR_MAX_REASSESS_ATTEMPT 3 + #define WAR_MAX_RETRAIN_ATTEMPT 3 + + typedef enum + { + DP_MONITOR_CAPABILITY_DP_SKIP_REDUNDANT_LT = (1 << 0), // Do not train if the link B/W and lane count are already set to the desired quantities + DP_MONITOR_CAPABILITY_DP_SKIP_CABLE_BW_CHECK = (1 << 1), // Skip the link training attempts to test cable bandwidth in CheckDpLink + DP_MONITOR_CAPABILITY_DP_MULTI_WRITE_DPCD_0x600 = (1 << 2), // Repeatedly write 0x1 to 0x600 with extra delays until the read verifies the write + DP_MONITOR_CAPABILITY_DP_WRITE_0x600_BEFORE_LT = (1 << 3), // Power on a monitor before every link training + DP_MONITOR_CAPABILITY_DP_OVERRIDE_OPTIMAL_LINK_CONFIG = (1 << 4), // Override optimal link config + DP_MONITOR_CAPABILITY_DP_OVERRIDE_MAX_LANE_COUNT = (1 << 5), // WAR for some DP monitors which claims more lane count than it really supports. It may generate interrupt storm if unsupported lane count is applied + DP_MONITOR_CAPABILITY_DP_AVOID_UPDATE_POWER_STATE = (1 << 6), // Don't update panel power state when head detach or lid closed + } DP_MONITOR_CAPABILITY; + + struct DpMonitorDenylistData: virtual public Object + { + // Max lane count supported override value + unsigned int dpMaxLaneCountOverride; + + // Link rate and Lane count value overrides + // when we need to skip BW check + struct + { + unsigned int maxLaneAtHighRate; + unsigned int maxLaneAtLowRate; + } dpSkipCheckLink; + + // Link rate and Lane count value overrides + // when we need to force optimal link config + struct + { + unsigned int linkRate; + unsigned int laneCount; + } dpOverrideOptimalLinkConfig; + }; +} + +#endif // INCLUDED_DP_WARDATABASE_H diff --git a/src/common/displayport/inc/dp_watermark.h b/src/common/displayport/inc/dp_watermark.h new file mode 100644 index 000000000..b9f05d094 --- /dev/null +++ b/src/common/displayport/inc/dp_watermark.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_watermark.h * +* DP watermark IsModePossible calculations. * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_WATERMARK_H +#define INCLUDED_DP_WATERMARK_H + +#include "displayport.h" + +#define WAR_AUDIOCLAMPING_FREQ 48000 // Audio freq. more than 48KHz are currently clamped due to bug 925211 + +namespace DisplayPort +{ + class LinkConfiguration; + + struct ModesetInfo + { + unsigned twoChannelAudioHz; // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz; // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz; // Requested pixel clock for the mode + unsigned rasterWidth; + unsigned rasterHeight; + unsigned surfaceWidth; // RasterBlankStartX - newRasterBlankEndX + unsigned surfaceHeight; // Active region height + unsigned depth; + unsigned rasterBlankStartX; + unsigned rasterBlankEndX; + unsigned bitsPerComponent; // Bits per component + bool bEnableDsc; // bEnableDsc=1 indicates DSC would be enabled for the mode + DSC_MODE mode; // DSC Mode + + ModesetInfo(): twoChannelAudioHz(0), + eightChannelAudioHz(0), + pixelClockHz(0), + rasterWidth(0), + rasterHeight(0), + surfaceWidth(0), + surfaceHeight(0), + depth(0), + rasterBlankStartX(0), + rasterBlankEndX(0), + bitsPerComponent(0), + bEnableDsc(false), + mode(DSC_SINGLE) {} + + ModesetInfo(unsigned newTwoChannelAudioHz, unsigned newEightChannelAudioHz, NvU64 newPixelClockHz, + unsigned newRasterWidth, unsigned newRasterHeight, + unsigned newSurfaceWidth, unsigned newSurfaceHeight, unsigned newDepth, + unsigned newRasterBlankStartX=0, unsigned newRasterBlankEndX=0, bool newBEnableDsc = false, + DSC_MODE newMode = DSC_SINGLE): + twoChannelAudioHz(newTwoChannelAudioHz), + eightChannelAudioHz(newEightChannelAudioHz), + pixelClockHz(newPixelClockHz), + rasterWidth(newRasterWidth), + rasterHeight(newRasterHeight), + surfaceWidth(newSurfaceWidth), + surfaceHeight(newSurfaceHeight), + depth(newDepth), + rasterBlankStartX(newRasterBlankStartX), + rasterBlankEndX(newRasterBlankEndX), + bitsPerComponent(0), + bEnableDsc(newBEnableDsc), + mode(newMode){} + }; + + struct Watermark + { + unsigned waterMark; + unsigned tuSize; + unsigned hBlankSym; + unsigned vBlankSym; + }; + + bool isModePossibleSST + ( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits = false + ); + + bool isModePossibleMST + ( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo + ); + + bool isModePossibleSSTWithFEC + ( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits = false + ); + + bool isModePossibleMSTWithFEC + ( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo + ); + + // Return Payload Bandwidth Number(PBN)for requested mode + unsigned pbnForMode(const ModesetInfo & modesetInfo); +} + +#endif //INCLUDED_DP_WATERMARK_H diff --git a/src/common/displayport/inc/dptestutil/dp_testmessage.h b/src/common/displayport/inc/dptestutil/dp_testmessage.h new file mode 100644 index 000000000..b841dc0b9 --- /dev/null +++ b/src/common/displayport/inc/dptestutil/dp_testmessage.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort*********************************\ +* * +* Module: dp_testmessage.h * +* * +\***************************************************************************/ +#ifndef INCLUDED_DP_TESTMESSAGE_H +#define INCLUDED_DP_TESTMESSAGE_H + +#include "dp_auxdefs.h" + +#include "dp_connector.h" + +#define DP_LPRIME_SIZE 20 +namespace DisplayPort +{ + // test request status, for DP and nvapi + typedef enum + { + DP_TESTMESSAGE_REQUEST_STATUS_PENDING = 0, // the request is still be processing + DP_TESTMESSAGE_REQUEST_STATUS_DONE = 1, // request has been processed + DP_TESTMESSAGE_REQUEST_STATUS_ERROR = 2, // error, Dp lib busy with other request + DP_TESTMESSAGE_REQUEST_STATUS_NEWREQUEST = 3, // new request for user + } DP_TESTMESSAGE_REQUEST_STATUS; + + // Request type enum. + typedef enum + { + } DP_TESTMESSAGE_REQUEST_TYPE; + + class TestMessage; + struct ConnectorImpl; + + struct DPTestMessageCompletion : public MessageManager::Message::MessageEventSink + { + TestMessage *parent; + + public: + void setParent(TestMessage *parent) + { + this->parent = parent; + } + // call back function if message fails, the status of the dp lib(testMessageStatus) + // need to be set to DONE + void messageFailed(MessageManager::Message * from, NakData * data); + + // call back function if message complete, the status of the dp lib(testMessageStatus) + // need to be set to DONE. + // If a message has a reply, it is necessary to record the reply in the dp lib to + // send back to user later + void messageCompleted(MessageManager::Message * from); + + }; + + class TestMessage : virtual public Object + { + private: + ConnectorImpl *pConnector; + // check if the user provided request struct is of valid size + inline bool isValidStruct(DP_TESTMESSAGE_REQUEST_TYPE requestType, NvU32 structSize) + { + switch (requestType) + { + default: + return false; + } + } + MessageManager *pMsgManager; + DPTestMessageCompletion diagCompl; + + // Data Structure for Generic Message. + NvU32 replyBytes; + + public: + + DP_TESTMESSAGE_REQUEST_STATUS testMessageStatus; + + TestMessage() : testMessageStatus(DP_TESTMESSAGE_REQUEST_STATUS_DONE) + { + diagCompl.setParent(this); + pConnector = 0; + pMsgManager = 0; + replyBytes = 0; + } + DP_TESTMESSAGE_STATUS sendDPTestMessage(void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus); + MessageManager * getMessageManager(); + void setupTestMessage(MessageManager *msgManager, ConnectorImpl *connector) + { + pMsgManager = msgManager; + pConnector = connector; + } + + }; +} + + +#endif + diff --git a/src/common/displayport/src/dp_auxretry.cpp b/src/common/displayport/src/dp_auxretry.cpp new file mode 100644 index 000000000..bcc214352 --- /dev/null +++ b/src/common/displayport/src/dp_auxretry.cpp @@ -0,0 +1,315 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* Module: dp_auxretry.cpp * +* Interface implemented by library client. * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_auxbus.h" +#include "dp_auxretry.h" +#include "dp_messageheader.h" + +#include "displayport.h" + +using namespace DisplayPort; + +// +// Read a DPCD address. +// - allows size greater than single transaction/burst size +// - handles defer retries +// - handles nacks with incomplete data +// +AuxRetry::status AuxRetry::readTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + unsigned completed; + AuxBus::status s; + + DP_ASSERT( size <= aux->transactionSize() ); + + do + { + s = aux->transaction(AuxBus::read, AuxBus::native, address, buffer, size, &completed); + + // + // Got success & requested data. Also size of returned data is + // expected & non zero. + // + if ((s == AuxBus::success) && (completed == size) && (completed != 0)) + { + return ack; + } + else + { + // + // Handle defer case with a simple retry + // + if (s == AuxBus::defer) + { + if (retries) + { + --retries; + continue; + } + + return defer; + } + + // + // Nack shouldn't happen in general. Unsupported registers + // are supposed to ACK with size of 0. + // + if ( s == AuxBus::nack ) + { + return nack; + } + + if ( completed == 0 ) + { + return unsupportedRegister; + } + + // + // We got less data back than we requested... + // It's unclear when this might happen in the spec. + // We can either + // 1. Split the read into multiple pieces + // (Dangerous since we may receive non-atomic updates) + // 2. Retry + // + if ( completed < size ) + { + // + // Retry + // + if (retries) + { + --retries; + continue; + } + else + { + // Closest approximation is a defer + return defer; + } + } + } + } while(retries); + + if ((s == AuxBus::defer) || (completed < size)) + { + return defer; + } + + return ack; +} + +// +// Write a DPCD address. +// - allows size greater than single transaction/burst size +// - handles defer retries +// - handles nacks with incomplete data +// +AuxRetry::status AuxRetry::writeTransaction(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + unsigned completed; + AuxBus::status s; + + DP_ASSERT( size <= aux->transactionSize() ); + + do + { + s = aux->transaction(AuxBus::write, AuxBus::native, address, buffer, size, &completed); + + // + // Got success & requested data. Also size of returned data is + // expected & non zero. + // + if ((s == AuxBus::success) && (completed == size) && (completed != 0)) + { + return ack; + } + else + { + // + // Handle defer case with a simple retry + // + if (s == AuxBus::defer) + { + if (retries) + { + --retries; + continue; + } + + return defer; + } + + // + // Nack shouldn't happen in general. Unsupported registers + // are supposed to ACK with size of 0. + // + if ( s == AuxBus::nack ) + { + return nack; + } + + DP_ASSERT( s == AuxBus::success); + + if ( completed == 0 ) + { + return unsupportedRegister; + } + + // + // Incomplete write? + // Shouldn't happen. Just retry if it does + // + if ( completed < size ) + { + // + // Retry + // + if (retries) + { + --retries; + continue; + } + else + { + // Closest approximation is a defer + return defer; + } + } + } + } while(retries); + + if ((s == AuxBus::defer) || (completed < size)) + { + return defer; + } + + return ack; +} + +// +// Similar to readTransaction except that it supports reading +// larger spans than AuxBus::transactionSize() +// +AuxRetry::status AuxRetry::read(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + for (unsigned i = 0 ; i < size; ) + { + int todo = DP_MIN(size - i, aux->transactionSize()); + status s = readTransaction(address+i, buffer+i, todo, retries); + + if (s != ack) + { + return s; + } + + i += todo; + } + + return ack; +} + +// +// Similar to writeTransaction except that it supports writing +// larger spans than AuxBus::transactionSize() +// +AuxRetry::status AuxRetry::write(int address, NvU8 * buffer, unsigned size, unsigned retries) +{ + for (unsigned i = 0 ; i < size; ) + { + int todo = DP_MIN(size - i, aux->transactionSize()); + status s = writeTransaction(address+i, buffer+i, todo, retries); + + if (s != ack) + { + return s; + } + + i += todo; + } + + return ack; +} + +AuxBus::status AuxLogger::transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, unsigned * pNakReason, + NvU8 offset, NvU8 nWriteTransactions) +{ + AuxBus::status result = bus->transaction(action, type, address, buffer, sizeRequested, sizeCompleted); + hint[0] = 0; + // + // Do the hex dump. + // - We can't make library calls + // - We need to do this in one printf + if (result == success) + { + if (type == native) + if (address == NV_DPCD_MBOX_DOWN_REQ || address == NV_DPCD_MBOX_UP_REP || + address == NV_DPCD_MBOX_DOWN_REP || address == NV_DPCD_MBOX_UP_REQ) + { + unsigned len = *sizeCompleted; + Buffer storage(buffer, len); + BitStreamReader reader(&storage, 0, len*8); + MessageHeader header; + DisplayPort::decodeHeader(&reader, &header, Address(1)); + Address::StringBuffer sb; + DP_USED(sb); + dpHexDump(&hex[0], sizeof(hex), buffer, header.headerSizeBits/8); + dpHexDump(&hex_body[0], sizeof(hex), buffer + header.headerSizeBits/8, len - header.headerSizeBits/8); +#if defined(_DEBUG) || defined(DEBUG) + const char * name = ""; + if (header.isTransactionStart && action==write && len > header.headerSizeBits/8) + name = getRequestId(buffer[header.headerSizeBits/8]); + + DP_LOG(("DP-AUX> %s%s%s%s%04Xh hint(to:%s %s%s %s #%d) { %s| %s}", + sizeRequested == *sizeCompleted ? "" : "INCOMPLETE ", getStatus(result), + getAction(action), getType(type), address, + header.address.toString(sb), header.isTransactionStart ? "S" : "", + header.isTransactionEnd ? "E" : "", name, header.messageNumber, + hex, hex_body)); +#endif + return result; + } + } + else + hex[0] = 0; + + dpHexDump(&hex[0], sizeof(hex), buffer, *sizeCompleted); + DP_LOG(("DP-AUX> %s%s%s%s%04Xh { %s }", sizeRequested == *sizeCompleted ? "" : "INCOMPLETE ", + getStatus(result), getAction(action), getType(type), address, hex)); + + return result; +} + +AuxBus * DisplayPort::CreateAuxLogger(AuxBus * auxBus) +{ + return new AuxLogger(auxBus); +} diff --git a/src/common/displayport/src/dp_bitstream.cpp b/src/common/displayport/src/dp_bitstream.cpp new file mode 100644 index 000000000..39117de08 --- /dev/null +++ b/src/common/displayport/src/dp_bitstream.cpp @@ -0,0 +1,204 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_bitstream.c * +* Implementation of Big Endian bit streams. * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" + +using namespace DisplayPort; +bool BitStreamReader::read(unsigned * value, unsigned bits) +{ + unsigned topbit = (7- (this->bitsOffset & 7)); + + if (this->bitsOffset + bits > this->bitsEnd) + { + return false; + } + + // + // We're filling the byte down from 'topbit' towards 0. + // Can we fit all of the bits starting at topbit before + // overflowing to the next byte? + // + if (bits <= (topbit+1)) + { + int bottombit = topbit - (bits-1); + *value = (this->buffer()->data[this->bitsOffset / 8] >> bottombit) & ((1 << bits)-1); + + this->bitsOffset+=bits; + return true; + } + + // + // We're either reading too many bits or we're straddling + // a byte boundary. Serialize bit by bit. + // NOTE: This scenario is entire unlikely. Don't optimize. + // + + *value = 0; + while (bits) + { + unsigned bit; + if (!read(&bit, 1)) + { + return false; + } + *value = *value * 2 + bit; + bits--; + } + + return true; +} + +unsigned BitStreamReader::readOrDefault(unsigned bits, unsigned defaultValue) +{ + unsigned value; + + if (read(&value, bits)) + { + return value; + } + else + { + return defaultValue; + } +} + + +bool BitStreamReader::align(unsigned align) +{ + // Verify alignment is a power of two + if (!(align && ((align & (align - 1)) == 0))) + { + DP_ASSERT(0); + } + else + { + if (this->bitsOffset & (align - 1)) + { + this->bitsOffset = (this->bitsOffset + align) &~ (align - 1); + } + } + return this->bitsOffset <= this->bitsEnd; +} + +bool BitStreamWriter::write(unsigned value, unsigned bits) +{ + DP_ASSERT((value < (1ULL << bits)) && "Value out of range"); + unsigned topbit = (7- (this->bitsOffset & 7)); + + if (this->bitsOffset + bits > this->buffer()->length * 8) + { + this->buffer()->resize((this->bitsOffset + bits+7)/8); + } + + // + // We're filling the byte down from 'topbit' towards 0. + // Can we fit all of the bits starting at topbit before + // overflowing to the next byte? + // + if (bits <= (topbit+1)) + { + int bottombit = topbit - (bits-1); + NvU8 clearmask = ((1 << bits)-1) << bottombit; + + this->buffer()->data[this->bitsOffset / 8] = (NvU8)((this->buffer()->data[this->bitsOffset / 8] &~ clearmask) | (value << bottombit)); + + this->bitsOffset+=bits; + return true; + } + + // + // We're either writing too many bits or we're straddling + // a byte boundary. Serialize bit by bit. + // NOTE: This scenario is entire unlikely. Don't optimize. + // + + while (bits) + { + bits --; + if (!write( (value >> bits) & 1, 1)) + { + return false; + } + } + + return true; +} + +bool BitStreamWriter::align(unsigned align) +{ + // Verify alignment is a power of two + if (!(align && ((align & (align - 1)) == 0))) + { + DP_ASSERT(0); + } + else + { + if (this->bitsOffset & (align - 1)) + return this->write(0, align - (this->bitsOffset & (align - 1))); + } + + return true; +} + +unsigned BitStreamReader::offset() +{ + return this->bitsOffset; +} + +unsigned BitStreamWriter::offset() +{ + return this->bitsOffset; +} + +Buffer * BitStreamWriter::buffer() +{ + return this->targetBuffer; +} + +Buffer * BitStreamReader::buffer() +{ + return this->sourceBuffer; +} + + +BitStreamWriter::BitStreamWriter(Buffer * buffer, unsigned bitsOffset) +{ + this->targetBuffer = buffer; + this->bitsOffset = bitsOffset; +} + + +BitStreamReader::BitStreamReader(Buffer * buffer, unsigned bitsOffset, unsigned bitsCount) +{ + this->sourceBuffer = buffer; + this->bitsOffset = bitsOffset; + this->bitsEnd = bitsCount + bitsOffset; +} diff --git a/src/common/displayport/src/dp_buffer.cpp b/src/common/displayport/src/dp_buffer.cpp new file mode 100644 index 000000000..abc0d0f9c --- /dev/null +++ b/src/common/displayport/src/dp_buffer.cpp @@ -0,0 +1,267 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_buffer.cpp * +* Resizable byte buffer and stream operations * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_buffer.h" + +using namespace DisplayPort; + +void DisplayPort::swapBuffers(Buffer & left, Buffer & right) +{ + NvU8 *tmpData = left.data; + unsigned tmpLength = left.length; + unsigned tmpCapacity = left.capacity; + bool tmpErrorState = left.errorState; + + left.data = right.data; + left.length = right.length; + left.capacity = right.capacity; + left.errorState = right.errorState; + + right.data = tmpData; + right.length = tmpLength; + right.capacity = tmpCapacity; + right.errorState= tmpErrorState; +} + + +bool Stream::seek(unsigned where) +{ + // + // Allow seek to any position in the file INCLUDING + // the first byte past the end of the file. + // + if (where > this->parent->length) + { + return false; + } + + this->byteOffset = where; + + return true; +} + +bool Stream::read(NvU8 * buffer, unsigned size) +{ + unsigned stopReadAt = this->byteOffset + size; + + if (stopReadAt > this->parent->length) + { + return false; + } + + dpMemCopy(buffer, this->parent->data + this->byteOffset, size); + this->byteOffset = stopReadAt; + + return true; +} + +bool Buffer::resize(unsigned stopWriteAt) +{ + bool mustIncrease = stopWriteAt > this->capacity; + + if (mustIncrease || (stopWriteAt * 4 < this->capacity) ) + { + unsigned newCapacity; + NvU8 * newBuffer; + + newCapacity = 32; + + while (newCapacity <= stopWriteAt) + { + newCapacity *= 2; + } + + if (newCapacity == this->capacity) { + this->length = stopWriteAt; + return true; + } + + newBuffer = (NvU8 *)dpMalloc(sizeof(NvU8) * newCapacity); + + if (!newBuffer) + { + if (mustIncrease) + { + if (this->data) + { + dpFree(this->data); + } + + this->errorState = true; + this->data = 0; + this->capacity = 0; + this->length = 0; + } + else + newCapacity = this->capacity; + + return false; + } + + if (this->data) + { + dpMemCopy(newBuffer, this->data, DP_MIN(newCapacity, this->length)); + dpFree(this->data); + } + + this->data = newBuffer; + this->capacity = newCapacity; + + } + + this->length = stopWriteAt; + return true; +} + +void Buffer::memZero() +{ + if (this->data) + dpMemZero(this->data, this->length); +} + +bool Stream::write(NvU8 * buffer, unsigned size) +{ + unsigned stopWriteAt = this->byteOffset + size; + + if (stopWriteAt > this->parent->length) + { + this->parent->resize(stopWriteAt); + } + + if (isError()) + return false; + + dpMemCopy( this->parent->data + this->byteOffset, buffer, size); + this->byteOffset = stopWriteAt; + this->parent->length = DP_MAX(this->parent->length, stopWriteAt); + + return true; +} + +unsigned Stream::remaining() +{ + return this->parent->length - this->byteOffset; +} + +unsigned Stream::offset() +{ + return this->byteOffset; +} + +Buffer::~Buffer() +{ + reset(); +} + +void Buffer::reset() +{ + if (this->data) + { + dpFree(this->data); + } + + length = 0; + capacity = 0; + data = 0; + errorState = false; +} + +bool Buffer::isError() const +{ + return this->errorState; +} + + +Stream::Stream(Buffer * buffer) + : parent(buffer), byteOffset(0) +{ +} + +bool Stream::isError() const +{ + return this->parent->errorState; +} + +Buffer::Buffer() + : data(0), length(0), capacity(0), errorState(false) +{ +} + +Buffer::Buffer(NvU8 * src, unsigned size) + : data(0), length(0), capacity(0), errorState(false) +{ + if (src && size && resize(size) && data) + dpMemCopy(data, src, size); +} + +Buffer::Buffer(const Buffer & other) + : data(0), length(0), capacity(0), errorState(false) +{ + if (other.isError()) + { + errorState = true; + } + else + { + if (resize(other.getLength()) && other.getData()) + dpMemCopy(getData(), other.getData(), getLength()); + } +} + +Buffer & Buffer::operator = (const Buffer & other) +{ + if (other.isError()) + { + errorState = true; + } + else + { + if (resize(other.getLength())) + dpMemCopy(getData(), other.getData(), getLength()); + } + return *this; +} + + +bool Buffer::operator== (const Buffer & other) const +{ + if (length != other.length) + return false; + + for (unsigned i = 0; i < length; i++) + { + if (data[i] != other.data[i]) + return false; + + } + + return true; +} diff --git a/src/common/displayport/src/dp_configcaps.cpp b/src/common/displayport/src/dp_configcaps.cpp new file mode 100644 index 000000000..a3e583e10 --- /dev/null +++ b/src/common/displayport/src/dp_configcaps.cpp @@ -0,0 +1,3170 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_configcaps.cpp * +* Abstraction for basic caps registers * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_auxbus.h" +#include "dp_configcaps.h" +#include "dp_auxdefs.h" +#include "displayport.h" + +using namespace DisplayPort; + +struct DPCDHALImpl : DPCDHAL +{ + AuxRetry bus; + Timer * timer; + bool dpcdOffline; + bool gpuDP1_2Supported; + bool gpuDP1_4Supported; + bool bGrantsPostLtRequest; + bool pc2Disabled; + bool uprequestEnable; + bool upstreamIsSource; + bool bMultistream; + bool bGpuFECSupported; + bool bLttprSupported; + bool bBypassILREdpRevCheck; + NvU32 overrideDpcdMaxLinkRate; + NvU32 overrideDpcdRev; + NvU32 overrideDpcdMaxLaneCount; + + struct _LegacyPort: public LegacyPort + { + DwnStreamPortType type; + DwnStreamPortAttribute nonEDID; + + NvU64 maxTmdsClkRate; + + DwnStreamPortType getDownstreamPortType() + { + return type; + } + + DwnStreamPortAttribute getDownstreamNonEDIDPortAttribute() + { + return nonEDID; + } + + NvU64 getMaxTmdsClkRate() + { + return maxTmdsClkRate; + } + + } legacyPort[16]; + + struct + { + unsigned revisionMajor, revisionMinor; // DPCD offset 0 + bool supportsESI; + LinkRate maxLinkRate; // DPCD offset 1 + unsigned maxLaneCount; // DPCD offset 2 + unsigned maxLanesAtHBR; + unsigned maxLanesAtRBR; + bool enhancedFraming; + bool bPostLtAdjustmentSupport; + + bool supportsNoHandshakeTraining; + bool bSupportsTPS4; + unsigned NORP; // DPCD offset 4 + + bool detailedCapInfo; // DPCD offset 5 + bool downStreamPortPresent; + NvU8 downStreamPortType; + + unsigned downStreamPortCount; // DPCD offset 7 + bool ouiSupported; + bool msaTimingParIgnored; + + NvU16 linkRateTable[NV_DPCD_SUPPORTED_LINK_RATES__SIZE]; // DPCD offset 10 ~ 1F + + bool supportsMultistream; // DPCD offset 21 + unsigned numberAudioEndpoints; // DPCD offset 22 + bool overrideToSST; // force to SST even if MST capable + bool noLinkTraining; // DPCD offset 330h + + bool extendedRxCapsPresent; // DPCD offset 000Eh [7] - Extended Receiver Capability present + + // DPCD Offset 2211h; + unsigned extendedSleepWakeTimeoutRequestMs; + // DPCD Offset 0119h [0] - If we grant the extendedSleepWakeTimeoutRequest + bool bExtendedSleepWakeTimeoutGranted; + + // 0x2206, if the sink supports 128b/132b + bool bDP20ChannelCodingSupported; + // 0x2215 + bool bUHBR_10GSupported; + bool bUHBR_13_5GSupported; + bool bUHBR_20GSupported; + + + // DPCD Offset F0002h - Number of Physical Repeaters present (after mapping) between Source and Sink + unsigned phyRepeaterCount; + // DPCD offset 700 - EDP_DPCD_REV + unsigned eDpRevision; + + struct + { + unsigned revisionMajor, revisionMinor; // DPCD offset F0000h + LinkRate maxLinkRate; // DPCD offset F0001h + unsigned maxLaneCount; // DPCD offset F0004h + unsigned phyRepeaterExtendedWakeTimeoutMs; // DPCD offset F0005h + + // 0xF0006, if the PHY Repeater supports 128b/132b + bool bDP20ChannelCodingSupported; + // 0xF0007 + bool UHBR_10GSupported; + bool UHBR_13_5GSupported; + bool UHBR_20GSupported; + } repeaterCaps; + + PCONCaps pconCaps; + vesaPsrSinkCaps psrCaps; + NvU32 videoFallbackFormats; // DPCD offset 0200h + + } caps; + + struct + { + unsigned sinkCount; // DPCD offset 200 + bool automatedTestRequest; + bool cpIRQ; + bool mccsIRQ; + bool downRepMsgRdy; + bool upReqMsgRdy; + bool rxCapChanged; // DPCD offset 2005 + bool linkStatusChanged; // DPCD offset 2005 + bool streamStatusChanged; // DPCD offset 2005 + bool hdmiLinkStatusChanged; // DPCD offset 2005 + NvU8 eightyBitCustomPat[10]; // DPCD offset 250 - 259 + + struct + { + struct + { + bool clockRecoveryDone; + bool channelEqualizationDone; + bool symbolLocked; + } laneStatus[4]; // DPCD offset 202, 203 + + bool interlaneAlignDone; // DPCD offset 204 + bool downstmPortChng; + bool linkStatusUpdated; + + // + // (ESI specific) signifies that we have link trained and should + // update the link status in the next query to isLinkLost. Keep in + // mind that linkStatusChanged might still be zero. + // + bool linkStatusDirtied; + } laneStatusIntr; + + struct + { + bool testRequestTraining; // DPCD offset 218 + LinkRate testRequestLinkRate; // DPCD offset 219 + unsigned testRequestLaneCount; // DPCD offset 220 + } testTraining; + + struct + { + bool testRequestEdidRead; // DPCD offset 218 + } testEdid; + + struct + { + bool testRequestPattern; // DPCD offset 218 + TestPatternType testPatRequested; // DPCD offset 221 + NvU16 testHorTotalPixels; // DPCD offset 222, 223 + NvU16 testVerTotalLines; // DPCD offset 224, 225 + NvU16 testHorStartPixels; // DPCD offset 226, 227 + NvU16 testVerStartLines; // DPCD offset 228, 229 + NvU16 testHsyncWidthPixels; // DPCD offset 22A, 22B + bool testHsyncPolarity; + NvU16 testVsyncWidthLines; // DPCD offset 22C, 22D + bool testVsyncPolarity; + NvU16 testActiveWidthPixels; // DPCD offset 22E, 22F + NvU16 testActiveHeightLines; // DPCD offset 230, 231 + } testPattern; + + struct + { + bool testRequestPhyCompliance; // DPCD offset 218 + LinkQualityPatternType phyTestPattern; // DPCD offset 248 + } testPhyCompliance; + + } interrupts; + + bool bIndexedLinkrateCapable, bIndexedLinkrateEnabled; + + public: + DPCDHALImpl(AuxBus * bus, Timer * timer) + : bus(bus), + timer(timer), + gpuDP1_2Supported(false), + gpuDP1_4Supported(false), + bGrantsPostLtRequest(false), + uprequestEnable(false), + upstreamIsSource(false), + bMultistream(false), + bGpuFECSupported(false), + bBypassILREdpRevCheck(false), + overrideDpcdMaxLinkRate(0), + overrideDpcdRev(0) + { + // start with default caps. + populateFakeDpcd(); + } + + ~DPCDHALImpl() + { + } + + virtual void setAuxBus(AuxBus * bus) + { + this->bus = bus; + } + + bool isDpcdOffline() + { + return dpcdOffline; + } + + void setDPCDOffline(bool bOffline) + { + dpcdOffline = bOffline; + } + + void updateDPCDOffline() + { + NvU8 buffer[16]; + unsigned retries = 16; + // Burst read from 0x00 to 0x0F. + if (AuxRetry::ack != bus.read(NV_DPCD_REV, &buffer[0], sizeof buffer, retries)) + { + dpcdOffline = true; + } + else + { + dpcdOffline = false; + } + } + + void setPC2Disabled(bool disabled) + { + pc2Disabled = disabled; + } + + void setLttprSupported(bool isLttprSupported) + { + bLttprSupported = isLttprSupported; + } + + bool isPC2Disabled() + { + return pc2Disabled; + } + void parseAndReadCaps() + { + NvU8 buffer[16]; + NvU8 byte = 0; + AuxRetry::status status; + unsigned retries = 16; + // Burst read from 0x00 to 0x0F. + + // + // The Extended Receiver Capability field at DPCD Addresses 02200h through 022FFh is valid + // with DPCD Rev. 1.4 (and higher). + // + // A DPRX that supports the Extended Receiver Capability field must set the + // EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT bit in the TRAINING_AUX_RD_INTERVAL + // register (DPCD Address 0000Eh, bit 7) to 1 + // + caps.extendedRxCapsPresent = false; + if (AuxRetry::ack == bus.read(NV_DPCD_TRAINING_AUX_RD_INTERVAL, &byte, sizeof byte)) + { + caps.extendedRxCapsPresent = DRF_VAL(_DPCD14, _TRAINING_AUX_RD_INTERVAL, _EXTENDED_RX_CAP, byte); + } + + if (caps.extendedRxCapsPresent) + { + status = bus.read(NV_DPCD14_EXTENDED_REV, &buffer[0], sizeof buffer, retries); + } + else + { + status = bus.read(NV_DPCD_REV, &buffer[0], sizeof buffer, retries); + } + + if (AuxRetry::ack != status) + { + // Failed to read caps. + // Set an invalid state here and make sure we REMEMBER we couldn't get the caps + caps.revisionMajor = 0; + dpcdOffline = true; + return; + } + + // reset the faked dpcd flag since real LT should be possible now. + dpcdOffline = false; + + // reset edp revision to 0 + caps.eDpRevision = 0; + + if (overrideDpcdRev) + { + // Override the revision no. as DPCD override regkey is set + caps.revisionMajor = DRF_VAL(_DPCD, _REV, _MAJOR, overrideDpcdRev); + caps.revisionMinor = DRF_VAL(_DPCD, _REV, _MINOR, overrideDpcdRev); + } + else + { + caps.revisionMajor = DRF_VAL(_DPCD, _REV, _MAJOR, buffer[0]); + caps.revisionMinor = DRF_VAL(_DPCD, _REV, _MINOR, buffer[0]); + if (isAtLeastVersion(1, 2)) + { + // + // WAR required for panels with MSTAR chip as they report themselves as + // DP1.2 but they don't support DP1.2. Check OUI & ESI sinkCount. if OUI + // is not supported & sinkCount is "0", downgrade the revision to 1.1. + // + if (FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _OUI_SUPPORT, _NO, buffer[7])) + { + // Read the ESI sinkCount & overwrite revision no. if ESI not supported + NvU8 esiBuffer[1] = {0}; + NvU32 sinkCount; + AuxRetry::status status; + // + // Don't just check the transaction status as not-supporting ESI means it may + // NACK a transaction to ESI space or may return "0" as sinkCount. We need + // to override the revision Minor in both cases. + // + status = bus.read(NV_DPCD_SINK_COUNT_ESI, &esiBuffer[0], sizeof esiBuffer); + sinkCount = DRF_VAL(_DPCD, _SINK_COUNT_ESI, _SINK_COUNT, esiBuffer[0]); + + if ((sinkCount == 0) || (status != AuxRetry::ack)) + { + // If ESI not supported then overwrite the revision + caps.revisionMajor = 1; + caps.revisionMinor = 1; + } + } + + // Check if DPCD_DISPLAY_CONTROL_CAPABLE = 1 + if (FLD_TEST_DRF(_DPCD, _EDP_CONFIG_CAP, _DISPLAY_CONTROL_CAPABLE, _YES, buffer[0x0D])) + { + NvU8 edpBuffer[1] = {0}; + status = bus.read(NV_DPCD_EDP_REV, &edpBuffer[0], sizeof edpBuffer); + caps.eDpRevision = DRF_VAL(_DPCD, _EDP, _REV_VAL, edpBuffer[0]); + } + } + } + + bIndexedLinkrateCapable = false; + + if (isAtLeastVersion(1,4) && caps.extendedRxCapsPresent == false) + { + DP_ASSERT(0 && "A DPRX with DPCD Rev. 1.4 (or higher) must have Extended Receiver Capability field."); + } + + caps.supportsESI = (isAtLeastVersion(1,2) && gpuDP1_2Supported); // Support ESI register space only when GPU support DP1.2MST + + if (caps.eDpRevision >= NV_DPCD_EDP_REV_VAL_1_4 || this->bBypassILREdpRevCheck) + { + NvU16 linkRate = 0; + if (getRawLinkRateTable((NvU8*)&caps.linkRateTable[0])) + { + // First entry must be non-zero for validation + if (caps.linkRateTable[0] != 0) + { + bIndexedLinkrateCapable = true; + for (int i = 0; caps.linkRateTable[i] && (i < NV_DPCD_SUPPORTED_LINK_RATES__SIZE); i++) + { + if (linkRate < caps.linkRateTable[i]) + linkRate = caps.linkRateTable[i]; + } + if (linkRate) + caps.maxLinkRate = LINK_RATE_KHZ_TO_MBPS((NvU64)linkRate * DP_LINK_RATE_TABLE_MULTIPLIER_KHZ); + } + } + } + if (!bIndexedLinkrateCapable) + { + if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _1_62_GBPS, buffer[1])) + caps.maxLinkRate = RBR; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _2_70_GBPS, buffer[1])) + caps.maxLinkRate = HBR; + else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _5_40_GBPS, buffer[1])) + caps.maxLinkRate = HBR2; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_BANDWIDTH, _VAL, _8_10_GBPS, buffer[1])) + caps.maxLinkRate = HBR3; + else + { + DP_ASSERT(0 && "Unknown max link rate. Assuming DP 1.1 defaults"); + caps.maxLinkRate = HBR; + } + } + + // + // To prevent WAR being overridden. + // + if (overrideDpcdMaxLaneCount) + { + caps.maxLaneCount = overrideDpcdMaxLaneCount; + } + else + { + caps.maxLaneCount = DRF_VAL(_DPCD, _MAX_LANE_COUNT, _LANE, buffer[0x2]); + } + + if (!IS_VALID_LANECOUNT(caps.maxLaneCount)) + { + DP_ASSERT(0 && "Invalid lane count. Assuming 1"); + caps.maxLaneCount = 1; + } + + caps.bPostLtAdjustmentSupport = FLD_TEST_DRF(_DPCD, _MAX_LANE_COUNT, _POST_LT_ADJ_REQ_SUPPORT, _YES, buffer[0x2]); + caps.enhancedFraming = FLD_TEST_DRF(_DPCD, _MAX_LANE_COUNT, _ENHANCED_FRAMING, _YES, buffer[0x2]); + if (isAtLeastVersion(1,1) && (!caps.enhancedFraming)) + { + DP_ASSERT(0 && "A DPRX with DPCD Rev. 1.1 (or higher) must have enhanced framing capability."); + } + + if (isAtLeastVersion(1,2) && gpuDP1_2Supported && caps.bPostLtAdjustmentSupport) + { + // Source grants post Link training adjustment support + bGrantsPostLtRequest = true; + } + else + { + // Disable post Link training adjustment support whenever sink does not report capability + // This covers the case of MST to SST transition during which initially this flag is set, we need to explicitly reset this + // in order to avoid PostLTAdjustment during LT. + bGrantsPostLtRequest = false; + } + + caps.supportsNoHandshakeTraining = FLD_TEST_DRF(_DPCD, _MAX_DOWNSPREAD, _NO_AUX_HANDSHAKE_LT, _TRUE, buffer[0x3]); + caps.bSupportsTPS4 = FLD_TEST_DRF(_DPCD14, _MAX_DOWNSPREAD, _TPS4_SUPPORTED, _YES, buffer[0x3]); + + caps.NORP = DRF_VAL(_DPCD, _NORP, _VAL, buffer[0x4]) + 1; + + caps.downStreamPortPresent = FLD_TEST_DRF(_DPCD, _DOWNSTREAMPORT, _PRESENT, _YES, buffer[0x5]); + caps.detailedCapInfo = FLD_TEST_DRF(_DPCD, _DOWNSTREAMPORT, _DETAILED_CAP_INFO_AVAILABLE, _YES, buffer[0x5]); + caps.downStreamPortType = DRF_VAL(_DPCD, _DOWNSTREAMPORT, _TYPE, buffer[0x5]); + + switch (DRF_VAL(_DPCD, _DOWNSTREAMPORT, _TYPE, buffer[0x5])) + { + case 0: legacyPort[0].type = DISPLAY_PORT; break; + case 1: legacyPort[0].type = ANALOG_VGA; break; + case 2: legacyPort[0].type = DVI; break; + case 3: legacyPort[0].type = WITHOUT_EDID; break; + default: DP_ASSERT(0 && "Unknown port type"); break; + } + + caps.downStreamPortCount = DRF_VAL(_DPCD, _DOWN_STREAM_PORT, _COUNT, buffer[0x7]); + caps.msaTimingParIgnored = FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _MSA_TIMING_PAR_IGNORED, _YES, buffer[0x7]); + caps.ouiSupported = FLD_TEST_DRF(_DPCD, _DOWN_STREAM_PORT, _OUI_SUPPORT, _YES, buffer[0x7]); + + if (caps.downStreamPortPresent && !caps.downStreamPortCount) + { + DP_LOG(("DPHAL> Non-compliant device, reporting downstream port present, but no downstream ports. Overriding port count to 1.")); + caps.downStreamPortCount = 1; + } + + // Burst read from 0x20 to 0x22. + bus.read(NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS, &buffer[0], 0x22 - 0x20 + 1); + + caps.videoFallbackFormats = buffer[0]; + + caps.supportsMultistream = FLD_TEST_DRF(_DPCD, _MSTM, _CAP, _YES, buffer[0x1]); + + caps.numberAudioEndpoints = (unsigned)(DRF_VAL(_DPCD, _NUMBER_OF_AUDIO_ENDPOINTS, _VALUE, buffer[0x2])); + + // 02206h + if (AuxRetry::ack == bus.read(NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING, &buffer[0], 1)) + { + caps.bDP20ChannelCodingSupported = + FLD_TEST_DRF(_DPCD14, + _EXTENDED_MAIN_LINK_CHANNEL_CODING, + _ANSI_128B_132B, + _YES, + buffer[0]); + if (caps.bDP20ChannelCodingSupported == true) + { + // 0x2215 + if (AuxRetry::ack == bus.read(NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES, &buffer[0], 1)) + { + caps.bUHBR_10GSupported = + FLD_TEST_DRF(_DPCD20, + _128B_132B_SUPPORTED_LINK_RATES, + _UHBR10, + _YES, + buffer[0]); + + caps.bUHBR_13_5GSupported = + FLD_TEST_DRF(_DPCD20, + _128B_132B_SUPPORTED_LINK_RATES, + _UHBR13_5, + _YES, + buffer[0]); + + caps.bUHBR_20GSupported = + FLD_TEST_DRF(_DPCD20, + _128B_132B_SUPPORTED_LINK_RATES, + _UHBR20, + _YES, + buffer[0]); + } + DP_ASSERT(caps.bUHBR_10GSupported && "Unknown max link rate or HBR2 without at least DP 1.2. Assuming DP 1.1 defaults"); + } + } + + if (bLttprSupported) + { + // Burst read from 0xF0000 to 0xF0007 + if (AuxRetry::ack == bus.read(NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV, &buffer[0], 0x8, retries)) + { + caps.repeaterCaps.revisionMinor = DRF_VAL(_DPCD14, _LT_TUNABLE_PHY_REPEATER_REV, _MINOR, buffer[0x0]); + caps.repeaterCaps.revisionMajor = DRF_VAL(_DPCD14, _LT_TUNABLE_PHY_REPEATER_REV, _MAJOR, buffer[0x0]); + + if (lttprIsAtLeastVersion(1, 4)) + { + caps.phyRepeaterCount = mapPhyRepeaterVal(DRF_VAL(_DPCD14, _PHY_REPEATER_CNT, _VAL, buffer[0x2])); + + if (caps.phyRepeaterCount != 0) + { + if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _1_62_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = RBR; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _2_70_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = HBR; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _5_40_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = HBR2; + else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_RATE_PHY_REPEATER, _VAL, _8_10_GBPS, buffer[1])) + caps.repeaterCaps.maxLinkRate = HBR3; + else + { + DP_ASSERT(0 && "Unknown max link rate or HBR2 without at least DP 1.2. Assuming DP 1.1 defaults"); + caps.repeaterCaps.maxLinkRate = HBR; + } + + caps.repeaterCaps.maxLaneCount = + DRF_VAL(_DPCD14, _MAX_LANE_COUNT_PHY_REPEATER, + _VAL, buffer[0x4]); + + // The cumulative number of 10ms. + caps.repeaterCaps.phyRepeaterExtendedWakeTimeoutMs = + DRF_VAL(_DPCD14, + _PHY_REPEATER_EXTENDED_WAKE_TIMEOUT, + _REQ, buffer[0x5]) * 10; + + // An LTTPR that supports 128b/132b channel coding shall program this register to 20h. + if (lttprIsAtLeastVersion(2, 0)) + { + caps.repeaterCaps.bDP20ChannelCodingSupported = + FLD_TEST_DRF(_DPCD14, + _PHY_REPEATER_MAIN_LINK_CHANNEL_CODING, + _128B_132B_SUPPORTED, + _YES, + buffer[6]); + + caps.repeaterCaps.UHBR_10GSupported = + FLD_TEST_DRF(_DPCD14, + _PHY_REPEATER_128B_132B_RATES, + _10G_SUPPORTED, + _YES, + buffer[7]); + + caps.repeaterCaps.UHBR_13_5GSupported = + FLD_TEST_DRF(_DPCD14, + _PHY_REPEATER_128B_132B_RATES, + _13_5G_SUPPORTED, + _YES, + buffer[7]); + + caps.repeaterCaps.UHBR_20GSupported = + FLD_TEST_DRF(_DPCD14, + _PHY_REPEATER_128B_132B_RATES, + _20G_SUPPORTED, + _YES, + buffer[7]); + + if (buffer[7] && !caps.repeaterCaps.bDP20ChannelCodingSupported) + { + DP_ASSERT(0 && "UHBR is supported without 128b/132b Channel Encoding Supported!"); + } + } + } + else + { + caps.repeaterCaps.maxLinkRate = 0; + } + } + else + { + // not supported DP revision, we should not be doing LTTPR training + caps.phyRepeaterCount = 0; + caps.repeaterCaps.maxLinkRate = 0; + } + } + } + + // Check if the device requests extended sleep wake timeout + if (AuxRetry::ack == bus.read(NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST, &buffer[0], 1)) + { + if (buffer[0] == NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_1MS) + { + caps.extendedSleepWakeTimeoutRequestMs = DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_DEFAULT_MS; + } + else + { + caps.extendedSleepWakeTimeoutRequestMs = buffer[0] * 20; + } + } + else + { + caps.extendedSleepWakeTimeoutRequestMs = 0; + } + + byte = 0U; + dpMemZero(&caps.psrCaps, sizeof(vesaPsrSinkCaps)); + + status = bus.read(NV_DPCD_EDP_PSR_VERSION, &byte, sizeof byte); + if (status == AuxRetry::ack && byte > 0U) + { + caps.psrCaps.psrVersion = byte; + } + + if (caps.psrCaps.psrVersion) + { + unsigned psrSetupTimeMap[8] = { 330U, 275U, 220U, 165U, 110U, 55U, 0U }; + byte = 0U; + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR_CAP, &byte, sizeof byte)) + { + caps.psrCaps.linkTrainingRequired = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CAP, _LT_NEEDED, _YES, byte); + caps.psrCaps.psrSetupTime = + psrSetupTimeMap[DRF_VAL(_DPCD_EDP, _PSR_CAP,_SETUP_TIME, byte)]; + caps.psrCaps.yCoordinateRequired = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CAP,_Y_COORD_NEEDED, _YES, byte); + caps.psrCaps.psr2UpdateGranularityRequired = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CAP,_GRAN_REQUIRED, _YES, byte); + } + + // Version 2 supports PSR2 and SU + if (caps.psrCaps.psrVersion == 2U) + { + NvU16 xGranular = 0U; + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR2_X_GRANULARITY_H, &byte, sizeof byte)) + { + xGranular = byte; + } + + byte = 0U; + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR2_X_GRANULARITY_L, &byte, sizeof byte)) + { + xGranular = (xGranular << 8U) | byte; + } + + caps.psrCaps.suXGranularity = xGranular; + } + + // version 3 supports Y coordinate + if (caps.psrCaps.psrVersion > 2U) + { + if (AuxRetry::ack == bus.read(NV_DPCD_EDP_PSR2_Y_GRANULARITY, &byte, sizeof byte)) + { + caps.psrCaps.suYGranularity = byte; + } + } + } + + parsePortDescriptors(); + } + + virtual PCONCaps * getPCONCaps() + { + return &(caps.pconCaps); + } + + virtual unsigned getRevisionMajor() // DPCD offset 0 + { + return caps.revisionMajor; + } + + virtual unsigned getRevisionMinor() + { + return caps.revisionMinor; + } + + virtual unsigned lttprGetRevisionMajor() // DPCD offset F0000h + { + return caps.repeaterCaps.revisionMajor; + } + + virtual unsigned lttprGetRevisionMinor() + { + return caps.repeaterCaps.revisionMinor; + } + + virtual LinkRate getMaxLinkRate() // DPCD offset 1 * 27000000 + { + if (caps.phyRepeaterCount == 0) + return caps.maxLinkRate; + else + return DP_MIN(caps.maxLinkRate, caps.repeaterCaps.maxLinkRate); + } + + virtual unsigned getMaxLaneCount() // DPCD offset 2 + { + if (caps.phyRepeaterCount == 0) + return caps.maxLaneCount; + else + return DP_MIN(caps.maxLaneCount, caps.repeaterCaps.maxLaneCount); + } + + virtual bool getNoLinkTraining() + { + return caps.noLinkTraining; + } + + virtual unsigned getPhyRepeaterCount() + { + return caps.phyRepeaterCount; + } + + // Max lanes supported at the desired link rate. + virtual unsigned getMaxLaneCountSupportedAtLinkRate(LinkRate linkRate) + { + if (linkRate == HBR) + { + if (caps.maxLanesAtHBR) + { + return DP_MIN(caps.maxLanesAtHBR, getMaxLaneCount()); + } + } + else if (linkRate == RBR) + { + if (caps.maxLanesAtRBR) + { + return DP_MIN(caps.maxLanesAtRBR, getMaxLaneCount()); + } + } + // None of the above cases got hit, simply return the max lane count + return getMaxLaneCount(); + } + + virtual bool getEnhancedFraming() + { + return caps.enhancedFraming; + } + + virtual bool getDownstreamPort(NvU8 *portType) // DPCD offset 5 + { + *portType = caps.downStreamPortType; + return caps.downStreamPortPresent; + } + + virtual bool getSupportsNoHandshakeTraining() + { + return caps.supportsNoHandshakeTraining; + } + + virtual unsigned getLegacyPortCount() // DPCD offset 7 + { + return caps.downStreamPortCount; + } + + virtual LegacyPort * getLegacyPort(unsigned index) + { + return &legacyPort[index]; + } + + virtual bool getMsaTimingparIgnored() + { + return caps.msaTimingParIgnored; + } + + virtual bool getOuiSupported() + { + return caps.ouiSupported; + } + + virtual bool getSDPExtnForColorimetry() + { + bool bSDPExtnForColorimetry = false; + NvU8 byte = 0; + if (caps.extendedRxCapsPresent) + { + if (AuxRetry::ack == bus.read(NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST, &byte, sizeof byte)) + { + bSDPExtnForColorimetry = FLD_TEST_DRF(_DPCD14, _EXTENDED_DPRX_FEATURE_ENUM_LIST, + _VSC_SDP_EXT_FOR_COLORIMETRY, _YES, byte); + } + } + return bSDPExtnForColorimetry; + } + + virtual AuxRetry::status setOuiSource(unsigned ouiId, const char * model, size_t modelNameLength, NvU8 chipRevision) + { + NvU8 ouiBuffer[16]; + + // The first 3 bytes are IEEE_OUI. 2 hex digits per register. + ouiBuffer[0] = (ouiId >> 16) & 0xFF; + ouiBuffer[1] = (ouiId >> 8) & 0xFF; + ouiBuffer[2] = ouiId & 0xFF; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (modelNameLength > NV_DPCD_SOURCE_DEV_ID_STRING__SIZE) + { + DP_LOG(("DPHAL> setOuiSource(): modelNameLength should not be greater than 6")); + modelNameLength = NV_DPCD_SOURCE_DEV_ID_STRING__SIZE; + } + + // Next 6 bytes are Device Identification String. + for (unsigned int i = 0; i < modelNameLength; i++) + { + ouiBuffer[3+i] = *model; + if (*model) + model++; + } + ouiBuffer[9] = chipRevision; + + for (int i = 0xA; i<=0xF; ++i) + ouiBuffer[i] = 0; + + return bus.write(NV_DPCD_SOURCE_IEEE_OUI, &ouiBuffer[0], sizeof ouiBuffer); + } + + virtual bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) + { + NvU8 ouiBuffer[16]; + int address = NV_DPCD_SINK_IEEE_OUI; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + // If buffer size is larger than dev_id size, the extras are not used. + // If buffer size is smaller, than we can only get certain bytes. + if (modelNameBufferSize > NV_DPCD_SOURCE_DEV_ID_STRING__SIZE) + { + modelNameBufferSize = NV_DPCD_SOURCE_DEV_ID_STRING__SIZE; + } + + // + // Check if there is a downstream facing port (DFP) + // If DFP is present, device is a branch device - use branch offset + // Else device is a sink device - use sink offset + // + if(caps.downStreamPortPresent) + { + address = NV_DPCD_BRANCH_IEEE_OUI; + } + + if (AuxRetry::ack != bus.read(address, &ouiBuffer[0], sizeof ouiBuffer)) + { + *modelName = 0; + ouiId = 0; + chipRevision = 0; + return false; + } + // The first 3 bytes are IEEE_OUI. 2 hex digits per register. + ouiId = ouiBuffer[0] | (ouiBuffer[1] << 8) | (ouiBuffer[2] << 16); + + // Next 6 bytes are Device Identification String, copy as much as we can (limited buffer case). + unsigned int i; + for (i = 0; i < modelNameBufferSize; i++) + modelName[i] = ouiBuffer[3+i]; + + chipRevision = ouiBuffer[9]; + + return true; + } + + virtual bool getSupportsMultistream() // DPCD offset 21h + { + return caps.supportsMultistream && (!caps.overrideToSST); + } + + virtual void setSupportsESI(bool bIsESISupported) + { + caps.supportsESI = bIsESISupported; + } + + // + // Single stream specific caps + // + virtual unsigned getNumberOfAudioEndpoints() // DPCD offset 22h + { + if (caps.numberAudioEndpoints) + return caps.numberAudioEndpoints; + else + return caps.NORP > 1; + } + + virtual bool getGUID(GUID & guid) // DPCD offset 30h + { + NvU8 buffer[DPCD_GUID_SIZE]; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + bus.read(NV_DPCD_GUID, &buffer[0], sizeof(buffer)); + + for (unsigned i = 0; i < DPCD_GUID_SIZE; i++) + { + guid.data[i] = buffer[i]; + } + return true; + } + + virtual AuxRetry::status setGUID(GUID & guid) + { + NvU8 buffer[DPCD_GUID_SIZE]; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + for (unsigned i = 0; i < DPCD_GUID_SIZE; i++) + { + buffer[i] = guid.data[i]; + } + + return bus.write(NV_DPCD_GUID, buffer, sizeof(buffer)); + } + + void parsePortDescriptors() + { + NvU8 basicCaps[128]; + unsigned bytesPerPort = caps.detailedCapInfo ? 4 : 1; + // When Detailed_cap_info_available bit is set to 1, the max number + // of downstream port is limited to 32. Otherwise it supports up to 127 + unsigned maxPorts = caps.detailedCapInfo ? 32 : 127; + unsigned infoByte0; + if (caps.downStreamPortCount > maxPorts) + caps.downStreamPortCount = 1; + unsigned size = (bytesPerPort * caps.downStreamPortCount); + + if (AuxRetry::ack != bus.read(NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT(0), &basicCaps[0], size)) + { + DP_LOG(("DPHAL> Unable to read detailed caps!")); + caps.downStreamPortCount = 0; + return; + } + + if (!((isVersion(1,0)) || + (isVersion(1,1) && basicCaps[0] == 0 && + legacyPort[0].type == ANALOG_VGA))) + { + for (unsigned port = 0; port < caps.downStreamPortCount; port++) + { + // The index to access detailed info byte 0 + infoByte0 = port * bytesPerPort; + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO_DWNSTRM_PORT, _TX_TYPE, basicCaps[infoByte0])) + { + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DISPLAYPORT: + { + legacyPort[port].type = DISPLAY_PORT; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_ANALOG: + { + legacyPort[port].type = ANALOG_VGA; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DVI: + { + legacyPort[port].type = DVI; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_HDMI: + { + NvU8 pConCaps = basicCaps[infoByte0+2]; + + legacyPort[port].type = HDMI; + + caps.pconCaps.maxTmdsClkRate = basicCaps[infoByte0+1]; + + caps.pconCaps.bSourceControlModeSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _SRC_CONTROL_MODE_SUPPORT, _YES, pConCaps); + caps.pconCaps.bConcurrentLTSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _CONCURRENT_LT_SUPPORT, _YES, pConCaps); + caps.pconCaps.maxHdmiLinkBandwidthGbps = + DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_FRL_LINK_BW_SUPPORT, pConCaps); + + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_BITS_PER_COMPONENT_DEF, pConCaps)) + { + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_10BPC: + caps.pconCaps.maxBpc = 10; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_12BPC: + caps.pconCaps.maxBpc = 12; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_16BPC: + caps.pconCaps.maxBpc = 16; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_8BPC: + default: + caps.pconCaps.maxBpc = 8; + break; + } + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_OTHERS_NO_EDID: + { + legacyPort[port].type = WITHOUT_EDID; + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO_DWNSTRM_PORT, _NON_EDID_ATTR, basicCaps[infoByte0])) + { + default: + { + DP_ASSERT(0 && "Unknown non-edid type, assume Reserved"); + legacyPort[port].nonEDID = RESERVED; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_60HZ: + { + legacyPort[port].nonEDID = IL_720_480_60HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_50HZ: + { + legacyPort[port].nonEDID = IL_720_480_50HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_60HZ: + { + legacyPort[port].nonEDID = IL_1920_1080_60HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_50HZ: + { + legacyPort[port].nonEDID = IL_1920_1080_50HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_60HZ: + { + legacyPort[port].nonEDID = PG_1280_720_60HZ; + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_50HZ: + { + legacyPort[port].nonEDID = PG_1280_720_50_HZ; + break; + } + } + break; + } + case NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DP_PLUSPLUS: + { + legacyPort[port].type = DISPLAY_PORT_PLUSPLUS; + break; + } + default: + { + DP_ASSERT(0 && "Unknown port type"); + break; + } + } + + // Set the Init value to Zero + legacyPort[port].maxTmdsClkRate = 0; + + if (legacyPort[port].type == DVI || + legacyPort[port].type == HDMI || + legacyPort[port].type == DISPLAY_PORT_PLUSPLUS) + { + legacyPort[port].maxTmdsClkRate = ((NvU64)basicCaps[infoByte0 + 1]) * 2500000; + if (legacyPort[port].maxTmdsClkRate == 0) + { + DP_ASSERT(legacyPort[port].maxTmdsClkRate && "No Max TMDS clock rate limits."); + } + + /* + Bug : 3202060 + Parse Byte 2 as well to check the Dongle supports HDMI FRL Output + If HDMI FRL is supported, the maxTmdsClkRate limit should be removed. + */ + + if (DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_FRL_LINK_BW_SUPPORT, basicCaps[infoByte0 + 2])) + { + // Disable the TMDS CLK Limit + legacyPort[port].maxTmdsClkRate = 0; + } + } + } + } + } + + // + // Notifications of external events + // + virtual void notifyIRQ() + { + parseAndReadInterrupts(); + } + + virtual void populateFakeDpcd() + { + dpcdOffline = true; + // fill out the bare minimum caps required ... this should be extended in for more dpcd offsets in future. + caps.revisionMajor = 0x1; + caps.revisionMinor = 0x1; + caps.supportsESI = false; + caps.maxLinkRate = HBR3; + caps.maxLaneCount = 4; + caps.enhancedFraming = true; + caps.downStreamPortPresent = true; + caps.downStreamPortCount = 1; + + // populate the sinkcount interrupt + interrupts.sinkCount = 1; + } + + // DPCD override routine: Max link rate override. + void overrideMaxLinkRate(NvU32 overrideMaxLinkRate) + { + if (overrideMaxLinkRate) + { + caps.maxLinkRate = mapLinkBandiwdthToLinkrate(overrideMaxLinkRate); + } + } + + // DPCD override routine: Max lane count override. + void overrideMaxLaneCount(NvU32 maxLaneCount) + { + caps.maxLaneCount = maxLaneCount; + overrideDpcdMaxLaneCount = maxLaneCount; + } + + // DPCD override routine: Max lane count override at a given link rate. + void skipCableBWCheck(NvU32 maxLaneAtHighRate, NvU32 maxLaneAtLowRate) + { + caps.maxLanesAtHBR = maxLaneAtHighRate; + caps.maxLanesAtRBR = maxLaneAtLowRate; + } + + // DPCD override routine: Optimal link config (link rate and lane count) override. + void overrideOptimalLinkCfg(LinkRate optimalLinkRate, + NvU32 optimalLaneCount) + { + caps.maxLinkRate = optimalLinkRate; + caps.maxLaneCount = optimalLaneCount; + } + + // DPCD override routine: Optimal link rate + void overrideOptimalLinkRate(LinkRate optimalLinkRate) + { + caps.maxLinkRate = optimalLinkRate; + } + + virtual void notifyHPD(bool status, bool bSkipDPCDRead) + { + if (!status) + { + // check if dpcd is alive + NvU8 buffer; + unsigned retries = 16; + if (AuxRetry::ack == bus.read(NV_DPCD_REV, &buffer, sizeof buffer, retries)) + return; + + // Support for EDID locking: + // Refill the cache with "default" dpcd data on an unplug event as later on + // the client may send a hot-plug event for edid locked fake device (no real dpcd). + // Also raise flag "dpcdOffline" so that dpcd accesses may be optimized. + populateFakeDpcd(); + return; + } + + // Skip DPCD read if requested. + if (!bSkipDPCDRead) + { + parseAndReadCaps(); + } + + // + // For Allienware eDp Panel more time is required to assert the HPD & + // power on the AUX link. Retry 1 more time if it has failed. This is + // a BAD way to do it but no EDID is available to differentiate here + // this is the first access which happens before EDID read to read caps. + // We also found that some LG panels on HP NBs goes in a bad state after + // factory reset. Retyring 3 times works for them. So making faultyRetries as 3. + // + NvU32 faultyRetries = 3; + while ((dpcdOffline) && (faultyRetries > 0)) + { + // Read the caps again + parseAndReadCaps(); + --faultyRetries; + } + + parseAndReadInterrupts(); + } + + virtual bool isPostLtAdjustRequestSupported() + { + // + // If the upstream DPTX and downstream DPRX both support TPS4, + // TPS4 shall be used instead of POST_LT_ADJ_REQ. + // + NvBool bTps4Supported = gpuDP1_4Supported && caps.bSupportsTPS4; + return bGrantsPostLtRequest && !bTps4Supported; + } + + virtual void setPostLtAdjustRequestGranted(bool bGrantPostLtRequest) + { + NvU8 data = 0; + + bus.read(NV_DPCD_LANE_COUNT_SET, &data, sizeof data); + + if (bGrantPostLtRequest) + { + data = FLD_SET_DRF(_DPCD, _LANE_COUNT_SET, _POST_LT_ADJ_REQ_GRANTED, _YES, data); + } + + else + { + data = FLD_SET_DRF(_DPCD, _LANE_COUNT_SET, _POST_LT_ADJ_REQ_GRANTED, _NO, data); + } + + if (AuxRetry::ack != bus.write(NV_DPCD_LANE_COUNT_SET, &data, sizeof data)) + { + DP_LOG(("DPCONN> Failed to set POST_LT_ADJ_REQ_GRANTED bit.")); + } + } + + virtual bool getIsPostLtAdjRequestInProgress() // DPCD offset 204 + { + NvU8 buffer; + + if (AuxRetry::ack != bus.read(NV_DPCD_LANE_ALIGN_STATUS_UPDATED, &buffer, 1)) + { + DP_LOG(("DPCONN> Post Link Training : Failed to read POST_LT_ADJ_REQ_IN_PROGRESS")); + return false; + } + + return FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, + _POST_LT_ADJ_REQ_IN_PROGRESS, _YES, buffer); + } + + virtual TrainingPatternSelectType getTrainingPatternSelect() + { + NvU8 trainingPat = 0; + TrainingPatternSelectType pattern = TRAINING_DISABLED; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + bus.read(NV_DPCD_TRAINING_PATTERN_SET, &trainingPat, sizeof trainingPat); + + trainingPat = DRF_VAL(_DPCD, _TRAINING_PATTERN_SET, _TPS, trainingPat); + + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_NONE) + pattern = TRAINING_DISABLED; + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_TP1) + pattern = TRAINING_PAT_ONE; + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_TP2) + pattern = TRAINING_PAT_TWO; + if (trainingPat == NV_DPCD_TRAINING_PATTERN_SET_TPS_TP3) + pattern = TRAINING_PAT_THREE; + + return pattern; + } + + virtual bool setTrainingMultiLaneSet(NvU8 numLanes, + NvU8 *voltSwingSet, + NvU8 *preEmphasisSet) + { + NvU8 trainingCtrl[DP_MAX_LANES] = {0}; + unsigned writeAddress = NV_DPCD_TRAINING_LANE_SET(0); + NvU8 laneIndex; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + for (laneIndex = 0; laneIndex < numLanes; laneIndex++) + { + if (voltSwingSet[laneIndex] <= NV_DPCD_MAX_VOLTAGE_SWING) + { + trainingCtrl[laneIndex] = FLD_SET_DRF_NUM(_DPCD, _TRAINING_LANE_SET, + _VOLTAGE_SWING, voltSwingSet[laneIndex], + trainingCtrl[laneIndex]); + } + else + { + DP_ASSERT(0 && "Out of bounds voltage swing. Assuming 0"); + } + + if (voltSwingSet[laneIndex] == NV_DPCD_MAX_VOLTAGE_SWING) + { + trainingCtrl[laneIndex] = FLD_SET_DRF(_DPCD, _TRAINING_LANE_SET, + _VOLTAGE_SWING_MAX_REACHED, + _TRUE, trainingCtrl[laneIndex]); + } + + if (preEmphasisSet[laneIndex] <= NV_DPCD_MAX_VOLTAGE_PREEMPHASIS) + { + trainingCtrl[laneIndex] = FLD_SET_DRF_NUM(_DPCD, _TRAINING_LANE_SET, + _PREEMPHASIS, preEmphasisSet[laneIndex], + trainingCtrl[laneIndex]); + } + else + { + DP_ASSERT(0 && "Out of bounds preemphasis. Assuming 0"); + } + + if (preEmphasisSet[laneIndex] == NV_DPCD_MAX_VOLTAGE_PREEMPHASIS) + { + trainingCtrl[laneIndex] = FLD_SET_DRF(_DPCD, _TRAINING_LANE_SET, + _PREEMPHASIS_MAX_REACHED, _TRUE, + trainingCtrl[laneIndex]); + } + } + + return(AuxRetry::ack == bus.write(writeAddress, trainingCtrl, (unsigned)numLanes)); + } + + virtual AuxRetry::status setIgnoreMSATimingParamters(bool msaTimingParamIgnoreEn) + { + + NvU8 downspreadCtrl = 0; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + bus.read(NV_DPCD_DOWNSPREAD_CTRL, &downspreadCtrl, sizeof downspreadCtrl); + + if (msaTimingParamIgnoreEn) + downspreadCtrl = FLD_SET_DRF(_DPCD, _DOWNSPREAD_CTRL, _MSA_TIMING_PAR_IGNORED, _TRUE, downspreadCtrl); + else + downspreadCtrl = FLD_SET_DRF(_DPCD, _DOWNSPREAD_CTRL, _MSA_TIMING_PAR_IGNORED, _FALSE, downspreadCtrl); + + return bus.write(NV_DPCD_DOWNSPREAD_CTRL, &downspreadCtrl, sizeof downspreadCtrl); + } + + virtual AuxRetry::status setLinkQualPatternSet(LinkQualityPatternType linkQualPattern, unsigned laneCount) + { + if (caps.revisionMajor <= 0) + { + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + return AuxRetry::nack; + } + + if (this->isVersion(1, 1) == true) + { + NvU8 buffer = 0; + if (AuxRetry::ack != bus.read(NV_DPCD_TRAINING_PATTERN_SET, &buffer, 1)) + { + DP_ASSERT(0 && "Can't read from NV_DPCD_TRAINING_PATTERN_SET."); + return AuxRetry::nack; + } + + // write on bits 3:2 + NvU8 value = ((linkQualPattern << 2) & 0xc) | (buffer & (~0xc)); + return bus.write(NV_DPCD_TRAINING_PATTERN_SET, &value, sizeof value); + } + else if (isAtLeastVersion(1,2) == true) + { + AuxRetry::status requestStatus = AuxRetry::nack ; + + // Set test patterns for all requested lanes + for (unsigned i = 0; i < laneCount; i++) + { + requestStatus = setLinkQualLaneSet(i, linkQualPattern); + if (requestStatus != AuxRetry::ack) + break; + } + + return requestStatus; + } + else + { + DP_ASSERT(0 && "Regs only supported for DP1.2"); + return AuxRetry::unsupportedRegister; + } + } + + virtual AuxRetry::status setLinkQualLaneSet(unsigned lane, LinkQualityPatternType linkQualPattern) + { + NvU8 linkQuality = 0; + unsigned writeAddress = NV_DPCD_LINK_QUAL_LANE_SET(lane); + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (isAtLeastVersion(1,2) == false) + { + DP_ASSERT(0 && "Regs only supported for DP1.2"); + return AuxRetry::unsupportedRegister; + } + + // check if parameter is valid + if (lane >= displayPort_LaneSupported) + { + DP_ASSERT(0 && "Unknown lane selected. Assuming Lane 0"); + writeAddress = NV_DPCD_LINK_QUAL_LANE_SET(0); + } + + if (linkQualPattern == LINK_QUAL_DISABLED) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _NO, linkQuality); + if (linkQualPattern == LINK_QUAL_D10_2) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _D10_2, linkQuality); + if (linkQualPattern == LINK_QUAL_SYM_ERROR) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _SYM_ERR_MEASUREMENT_CNT, linkQuality); + if (linkQualPattern == LINK_QUAL_PRBS7) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _PRBS7, linkQuality); + if (linkQualPattern == LINK_QUAL_80BIT_CUST) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _80_BIT_CUSTOM, linkQuality); + if (linkQualPattern == LINK_QUAL_HBR2_COMPLIANCE_EYE) + linkQuality = FLD_SET_DRF(_DPCD, _LINK_QUAL_LANE_SET, _LQS, _HBR2, linkQuality); + if (linkQualPattern == LINK_QUAL_CP2520PAT3) + linkQuality = FLD_SET_DRF(_DPCD14, _LINK_QUAL_LANE_SET, _LQS, _CP2520PAT3, linkQuality); + + return bus.write(writeAddress, &linkQuality, sizeof linkQuality); + } + + virtual AuxRetry::status setMessagingEnable(bool _uprequestEnable, bool _upstreamIsSource) + { + NvU8 mstmCtrl = 0; + + if (!this->isAtLeastVersion(1, 2)) + { + DP_ASSERT(!_uprequestEnable && "Can't enable multistream on DP 1.1"); + return AuxRetry::nack; + } + + uprequestEnable = _uprequestEnable; + upstreamIsSource = _upstreamIsSource; + + // + // Lets not touch the MST enable bit here. + // Branch might be getting driven in MST mode and we do not want to + // change that unless we are sure there are no more streams being driven. + // + if (AuxRetry::ack != bus.read(NV_DPCD_MSTM_CTRL, &mstmCtrl, 1)) + { + DP_LOG(("DPHAL> ERROR! Unable to read 00111h MSTM_CTRL.")); + } + + if (_uprequestEnable) + { + bMultistream = FLD_TEST_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl); + } + else + { + bMultistream = false; + } + mstmCtrl = 0; + if (bMultistream) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl); + if (uprequestEnable) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UP_REQ_EN, _YES, mstmCtrl); + if (upstreamIsSource) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, mstmCtrl); + + return bus.write(NV_DPCD_MSTM_CTRL, &mstmCtrl, sizeof mstmCtrl); + } + + virtual AuxRetry::status setMultistreamLink(bool enable) + { + NvU8 mstmCtrl = 0; + + if (!this->isAtLeastVersion(1, 2)) + { + DP_ASSERT(!enable && "Can't enable multistream on DP 1.1"); + return AuxRetry::nack; + } + + bMultistream = enable; + + if (bMultistream) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl); + if (uprequestEnable) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UP_REQ_EN, _YES, mstmCtrl); + if (upstreamIsSource) + mstmCtrl = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, mstmCtrl); + + return bus.write(NV_DPCD_MSTM_CTRL, &mstmCtrl, sizeof mstmCtrl); + } + + virtual AuxRetry::status setMultistreamHotplugMode(MultistreamHotplugMode notifyType) + { + NvU8 deviceCtrl = 0; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + // notifytype == HPD_LONG_PULSE, adapter 0 + if (notifyType == IRQ_HPD) + deviceCtrl = FLD_SET_DRF(_DPCD, _BRANCH_DEV_CTRL, _HOTPLUG_EVENT_TYPE, _IRQ_HPD, deviceCtrl); + + return bus.write(NV_DPCD_BRANCH_DEV_CTRL, &deviceCtrl, sizeof deviceCtrl); + } + + + bool parseTestRequestTraining(NvU8 * buffer /* 0x18-0x28 valid */) + { + if (buffer[1] == 0x6) + interrupts.testTraining.testRequestLinkRate = RBR; + else if (buffer[1] == 0xa) + interrupts.testTraining.testRequestLinkRate = HBR; + else if (buffer[1] == 0x14) + interrupts.testTraining.testRequestLinkRate = HBR2; + else if (buffer[1] == 0x1E) + interrupts.testTraining.testRequestLinkRate = HBR3; + else + { + DP_ASSERT(0 && "Unknown max link rate. Assuming RBR"); + interrupts.testTraining.testRequestLinkRate = RBR; + } + + interrupts.testTraining.testRequestLaneCount = buffer[(0x220 - 0x218)] & 0xf; + + return true; + } + + void parseAutomatedTestRequest(bool testRequestPending) + { + NvU8 buffer[16]; + + interrupts.automatedTestRequest = false; + interrupts.testEdid.testRequestEdidRead = false; + interrupts.testTraining.testRequestTraining = false; + interrupts.testPhyCompliance.testRequestPhyCompliance = false; + + if (!testRequestPending) + { + return; + } + interrupts.automatedTestRequest = true; + + if (AuxRetry::ack != bus.read(NV_DPCD_TEST_REQUEST, &buffer[0], 16)) + { + DP_LOG(("DPHAL> ERROR! Automated test request found. Unable to read 0x218 register.")); + return; + } + + if (FLD_TEST_DRF(_DPCD, _TEST_REQUEST, _TEST_LINK_TRAINING, _YES, buffer[0])) + { + interrupts.testTraining.testRequestTraining = parseTestRequestTraining(&buffer[0]); + } + + if (FLD_TEST_DRF(_DPCD, _TEST_REQUEST, _TEST_EDID_READ, _YES, buffer[0])) + { + interrupts.testEdid.testRequestEdidRead = true; + } + + if (FLD_TEST_DRF(_DPCD, _TEST_REQUEST, _TEST_PHY_TEST_PATTERN, _YES, buffer[0])) + { + interrupts.testPhyCompliance.testRequestPhyCompliance = parseTestRequestPhy(); + } + } + + virtual bool parseTestRequestPhy() + { + NvU8 buffer = 0; + NvU8 bits = 0; + if (AuxRetry::ack != bus.read(NV_DPCD_PHY_TEST_PATTERN, &buffer, 1)) + { + DP_LOG(("DPHAL> ERROR! Test pattern request found but unable to read NV_DPCD_PHY_TEST_PATTERN register.")); + return false; + } + + if (isVersion(1,0)) + bits = 0; + else + bits = DRF_VAL(_DPCD, _PHY_TEST_PATTERN_SEL, _DP12, buffer); + + if (bits == NV_DPCD_PHY_TEST_PATTERN_SEL_NO) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_DISABLED; + else if (bits == NV_DPCD_PHY_TEST_PATTERN_SEL_D10_2) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_D10_2; + else if (bits == NV_DPCD_PHY_TEST_PATTERN_SEL_SYM_ERR_MEASUREMENT_CNT) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_SYM_ERROR; + else if (bits == NV_DPCD_LINK_QUAL_LANE_SET_LQS_PRBS7) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_PRBS7; + else if (bits == NV_DPCD_LINK_QUAL_LANE_SET_LQS_80_BIT_CUSTOM) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_80BIT_CUST; + else if (bits == NV_DPCD_LINK_QUAL_LANE_SET_LQS_HBR2) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_HBR2_COMPLIANCE_EYE; + else if (bits == NV_DPCD14_PHY_TEST_PATTERN_SEL_CP2520PAT3) + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_CP2520PAT3; + else + { + DP_ASSERT(0 && "Unknown pattern type, assuming none"); + interrupts.testPhyCompliance.phyTestPattern = LINK_QUAL_DISABLED; + return false; + } + + if (interrupts.testPhyCompliance.phyTestPattern == LINK_QUAL_80BIT_CUST) + { + NvU8 buffer[NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE] = {0}; + if (AuxRetry::ack != bus.read(NV_DPCD_TEST_80BIT_CUSTOM_PATTERN(0), &buffer[0], + NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE)) + { + DP_LOG(("DPHAL> ERROR! Request for 80 bit custom pattern. Can't read from 250h.")); + return false; + } + + for (unsigned i = 0; i < NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE; i++) + { + interrupts.eightyBitCustomPat[i] = buffer[i]; + } + } + + return true; + } + + virtual bool interruptCapabilitiesChanged() + { + return interrupts.rxCapChanged; + } + + virtual void clearInterruptCapabilitiesChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _RX_CAP_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool getLinkStatusChanged() + { + return interrupts.linkStatusChanged; + } + + virtual void clearLinkStatusChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _LINK_STATUS_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool getHdmiLinkStatusChanged() + { + return interrupts.hdmiLinkStatusChanged; + } + + virtual void clearHdmiLinkStatusChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool getStreamStatusChanged() + { + return interrupts.streamStatusChanged; + } + + virtual void clearStreamStatusChanged() + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _STREAM_STATUS_CHANGED, _YES, irqVector); + bus.write(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + + virtual bool isLinkStatusValid(unsigned lanes) + { + bool linkStatus = true; + + this->setDirtyLinkStatus(true); + this->refreshLinkStatus(); + + for (unsigned lane = 0; lane < lanes ; lane++) + { + linkStatus = linkStatus && interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone && + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone && + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked; + } + + linkStatus = linkStatus && interrupts.laneStatusIntr.interlaneAlignDone; + + return linkStatus; + } + + virtual void refreshLinkStatus() + { + if (interrupts.laneStatusIntr.linkStatusDirtied) + { + if (caps.supportsESI && + (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4) && + (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4A)) + { + this->fetchLinkStatusESI(); + } + else + { + this->fetchLinkStatusLegacy(); + } + } + } + + virtual void setDirtyLinkStatus(bool dirty) + { + interrupts.laneStatusIntr.linkStatusDirtied = dirty; + } + + void parseAndReadInterruptsESI() + { + NvU8 buffer[16] = {0}; + bool automatedTestRequest; + + if (AuxRetry::ack != bus.read(NV_DPCD_SINK_COUNT_ESI, &buffer[2], 0x2005 - 0x2002 + 1)) + return; + + interrupts.sinkCount = DRF_VAL(_DPCD, _SINK_COUNT_ESI, _SINK_COUNT, buffer[2]); + + // check if edp revision is v1.4 or v1.4a + if ((caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4) && (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4A)) + { + automatedTestRequest = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _AUTO_TEST, _YES, buffer[3]); + } + else + { + // if edp rev is v1.4 or v1.4a, then use legacy address for auto test. + NvU8 legacy = 0; + if (AuxRetry::ack != bus.read(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &legacy, 1)) + return; + automatedTestRequest = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _AUTO_TEST, _YES, legacy); + } + + interrupts.cpIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _CP, _YES, buffer[3]); + interrupts.mccsIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _MCCS_IRQ, _YES, buffer[3]); + interrupts.downRepMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, buffer[3]); + interrupts.upReqMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, buffer[3]); + + interrupts.rxCapChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _RX_CAP_CHANGED, _YES, buffer[5]); + interrupts.linkStatusChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _LINK_STATUS_CHANGED, _YES, buffer[5]); + interrupts.streamStatusChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _STREAM_STATUS_CHANGED, _YES, buffer[5]); + interrupts.hdmiLinkStatusChanged = FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _YES, buffer[5]); + // + // Link status changed bit is not necessarily set at all times when the sink + // loses the lane status. Refresh the lane status in any case on an IRQ + // + if ((caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4) && + (caps.eDpRevision != NV_DPCD_EDP_REV_VAL_1_4A)) + { + fetchLinkStatusESI(); + } + else + { + fetchLinkStatusLegacy(); + } + + if (interrupts.linkStatusChanged) + { + this->clearLinkStatusChanged(); + } + + if (interrupts.rxCapChanged) + { + + DP_LOG(("DPHAL> RX Capabilities have changed!")); + parseAndReadCaps(); + this->clearInterruptCapabilitiesChanged(); + } + + if (interrupts.hdmiLinkStatusChanged) + { + this->clearHdmiLinkStatusChanged(); + } + + parseAutomatedTestRequest(automatedTestRequest); + } + + void readLTTPRLinkStatus(NvS32 rxIndex, NvU8 *buffer) + { + int addrLane01Status; + // LINK_STATUS for LTTPR is 3 bytes. (NV_DPCD14_PHY_REPEATER_START(i) + 0x20 ~ 0x22) + int bytesToRead = 3; + + DP_ASSERT((rxIndex > 0 && rxIndex <= 8) && "Invalid rxIndex"); + // + // NV_DPCD14_PHY_REPEATER_START is 0-based. + // rxIndex is 1-based. + // + addrLane01Status = NV_DPCD14_PHY_REPEATER_START(rxIndex - 1) + + NV_DPCD14_LANE0_1_STATUS_PHY_REPEATER; + bus.read(addrLane01Status, buffer, bytesToRead); + } + + void resetIntrLaneStatus() + { + // + // Reset all laneStatus to true. + // These bits can only set to true when all DPRX (including sink and LTTPRs) set + // the corresponding bit to true. Set to true as init value, and later will do &= + // through all the lanes. + // + for (int lane = 0; lane < 4; lane++) + { + interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone = true; + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone = true; + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked = true; + } + interrupts.laneStatusIntr.interlaneAlignDone = true; + interrupts.laneStatusIntr.downstmPortChng = true; + interrupts.laneStatusIntr.linkStatusUpdated = true; + } + + void fetchLinkStatusESI() + { + NvU8 buffer[16] = {0}; + NvS32 rxIndex; + + // LINK_STATUS_ESI from 0x200C to 0x200E + int bytesToRead = 3; + + // Reset all laneStatus to true. + resetIntrLaneStatus(); + + for (rxIndex = caps.phyRepeaterCount; rxIndex >= (NvS32) NV0073_CTRL_DP_DATA_TARGET_SINK; rxIndex--) + { + if (rxIndex != NV0073_CTRL_DP_DATA_TARGET_SINK) + { + readLTTPRLinkStatus(rxIndex, &buffer[0xC]); + } + else + { + bus.read(NV_DPCD_LANE0_1_STATUS_ESI, &buffer[0xC], bytesToRead); + } + + for (int lane = 0; lane < 4; lane++) + { + unsigned laneBits = buffer[0xC+lane/2] >> (4*(lane & 1)); + interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone &= !!(laneBits & 1); + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone &= !!(laneBits & 2); + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked &= !!(laneBits & 4); + } + + interrupts.laneStatusIntr.interlaneAlignDone &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED_ESI, _INTERLANE_ALIGN_DONE, _YES, buffer[0xE]); + interrupts.laneStatusIntr.downstmPortChng &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED_ESI, _DOWNSTRM_PORT_STATUS_DONE, _YES, buffer[0xE]); + interrupts.laneStatusIntr.linkStatusUpdated &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED_ESI, _LINK_STATUS_UPDATED, _YES, buffer[0xE]); + } + this->setDirtyLinkStatus(false); + } + + void fetchLinkStatusLegacy() + { + NvU8 buffer[16] = {0}; + NvS32 rxIndex; + // LINK_STATUS from 0x202 to 0x204 + int bytesToRead = 3; + + // Reset all laneStatus to true. + resetIntrLaneStatus(); + + for (rxIndex = caps.phyRepeaterCount; rxIndex >= (NvS32) NV0073_CTRL_DP_DATA_TARGET_SINK; rxIndex--) + { + if (rxIndex != NV0073_CTRL_DP_DATA_TARGET_SINK) + { + readLTTPRLinkStatus(rxIndex, &buffer[2]); + } + else + { + bus.read(NV_DPCD_LANE0_1_STATUS, &buffer[2], bytesToRead); + } + + for (int lane = 0; lane < 4; lane++) + { + unsigned laneBits = buffer[2+lane/2] >> (4*(lane & 1)); + interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone &= !!(laneBits & 1); + interrupts.laneStatusIntr.laneStatus[lane].channelEqualizationDone &= !!(laneBits & 2); + interrupts.laneStatusIntr.laneStatus[lane].symbolLocked &= !!(laneBits & 4); + } + + interrupts.laneStatusIntr.interlaneAlignDone &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, _INTERLANE_ALIGN_DONE, _YES, buffer[4]); + interrupts.laneStatusIntr.downstmPortChng &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, _D0WNSTRM_PORT_STATUS_DONE, _YES, buffer[4]); + interrupts.laneStatusIntr.linkStatusUpdated &= + FLD_TEST_DRF(_DPCD, _LANE_ALIGN_STATUS_UPDATED, _LINK_STATUS_UPDATED, _YES, buffer[4]); + } + this->setDirtyLinkStatus(false); + } + + virtual bool readTraining(NvU8* voltageSwingLane, NvU8* preemphasisLane, + NvU8* trainingScoreLane, NvU8* postCursor, + NvU8 activeLaneCount) + { + NvU8 buffer[0xd] = {0}; + if (voltageSwingLane && preemphasisLane) + { + if (AuxRetry::ack != bus.read(NV_DPCD_LANE0_1_ADJUST_REQ, &buffer[0x6], 2)) + { + DP_ASSERT(0 && "Can't read NV_DPCD_LANE0_1_ADJUST_REQ."); + return false; + } + voltageSwingLane[0] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_DRIVE_CURRENT, buffer[6]); + voltageSwingLane[1] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_DRIVE_CURRENT, buffer[6]); + voltageSwingLane[2] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_DRIVE_CURRENT, buffer[7]); + voltageSwingLane[3] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_DRIVE_CURRENT, buffer[7]); + + preemphasisLane[0] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_PREEMPHASIS, buffer[6]); + preemphasisLane[1] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_PREEMPHASIS, buffer[6]); + preemphasisLane[2] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEX_PREEMPHASIS, buffer[7]); + preemphasisLane[3] = DRF_VAL(_DPCD, _LANEX_XPLUS1_ADJUST_REQ, _LANEXPLUS1_PREEMPHASIS, buffer[7]); + + } + if (trainingScoreLane) + { + if (AuxRetry::ack != bus.read(NV_DPCD_TRAINING_SCORE_LANE(0), &buffer[0x8], 4)) + { + DP_ASSERT(0 && "Can't read NV_DPCD_TRAINING_SCORE_LANE(0)."); + return false; + } + trainingScoreLane[0] = buffer[0x8]; + trainingScoreLane[1] = buffer[0x9]; + trainingScoreLane[2] = buffer[0xa]; + trainingScoreLane[3] = buffer[0xb]; + } + if (postCursor) + { + if (AuxRetry::ack != bus.read(NV_DPCD_ADJUST_REQ_POST_CURSOR2, &buffer[0xc], 1)) + { + DP_ASSERT(0 && "Can't read NV_DPCD_ADJUST_REQ_POST_CURSOR2."); + return false; + } + postCursor[0] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 0, buffer[0xc]); + postCursor[1] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 1, buffer[0xc]); + postCursor[2] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 2, buffer[0xc]); + postCursor[3] = DRF_IDX_VAL(_DPCD, _ADJUST_REQ_POST_CURSOR2, _LANE, 3, buffer[0xc]); + } + return true; + } + + virtual bool isLaneSettingsChanged(NvU8* oldVoltageSwingLane, + NvU8* newVoltageSwingLane, + NvU8* oldPreemphasisLane, + NvU8* newPreemphasisLane, + NvU8 activeLaneCount) + { + for (unsigned i = 0; i < activeLaneCount; i++) + { + if (oldVoltageSwingLane[i] != newVoltageSwingLane[i] || + oldPreemphasisLane[i] != newPreemphasisLane[i] ) + { + return true; + } + } + return false; + } + + void parseAndReadInterruptsLegacy() + { + bool automatedTestRequest = false; + NvU8 buffer[16] = {0}; + + if (AuxRetry::ack != bus.read(NV_DPCD_SINK_COUNT, &buffer[0], 2)) + return; + + interrupts.sinkCount = NV_DPCD_SINK_COUNT_VAL(buffer[0]); + + automatedTestRequest = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _AUTO_TEST, _YES, buffer[1]); + interrupts.cpIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _CP, _YES, buffer[1]); + interrupts.mccsIRQ = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _MCCS_IRQ, _YES, buffer[1]); + interrupts.downRepMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _DOWN_REP_MSG_RDY, _YES, buffer[1]); + interrupts.upReqMsgRdy = FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _UP_REQ_MSG_RDY, _YES, buffer[1]); + + fetchLinkStatusLegacy(); + this->setDirtyLinkStatus(false); + + parseAutomatedTestRequest(automatedTestRequest); + } + + void parseAndReadInterrupts() + { + if (caps.supportsESI) + parseAndReadInterruptsESI(); // DP 1.2 should use the new ESI region + else + parseAndReadInterruptsLegacy(); + + } + + virtual int getSinkCount() // DPCD offset 200 + { + return interrupts.sinkCount; + } + + // + // This was introduced as part of WAR for HP SDC Panel since their + // TCON sets DPCD 0x200 SINK_COUNT=0. It should never be called to + // set the SinkCount in other cases since SinkCount comes from DPCD. + // + virtual void setSinkCount(int sinkCount) + { + interrupts.sinkCount = sinkCount; + } + + virtual bool interruptContentProtection() + { + return interrupts.cpIRQ; + } + + virtual void clearInterruptContentProtection() + { + if (caps.supportsESI) + { + NvU8 irqVector = 0; + + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _CP, _YES, irqVector); + + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _CP, _YES, irqVector); + + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } + } + + virtual bool intteruptMCCS() + { + return interrupts.mccsIRQ; + } + + virtual void clearInterruptMCCS() + { + if (caps.supportsESI) + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _MCCS_IRQ, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _MCCS_IRQ, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } + } + + virtual bool interruptDownReplyReady() + { + return interrupts.downRepMsgRdy; + } + + virtual bool interruptUpRequestReady() + { + return interrupts.upReqMsgRdy; + } + + virtual void clearInterruptDownReplyReady() + { + if (caps.supportsESI) + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _DOWN_REP_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } + } + + virtual void clearInterruptUpRequestReady() + { + if (caps.supportsESI) + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, &irqVector, sizeof irqVector); + } + else + { + NvU8 irqVector = 0; + irqVector = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR, _UP_REQ_MSG_RDY, _YES, irqVector); + bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR, &irqVector, sizeof irqVector); + } + } + + virtual bool getLaneStatusSymbolLock(int lane) + { + return interrupts.laneStatusIntr.laneStatus[lane].symbolLocked; + } + + virtual bool getLaneStatusClockRecoveryDone(int lane) + { + return interrupts.laneStatusIntr.laneStatus[lane].clockRecoveryDone; + } + + virtual bool getInterlaneAlignDone() // DPCD offset 204 + { + return interrupts.laneStatusIntr.interlaneAlignDone; + } + + virtual bool getDownStreamPortStatusChange() + { + return interrupts.laneStatusIntr.downstmPortChng; + } + + virtual bool getPendingTestRequestTraining() // DPCD offset 218 + { + return interrupts.testTraining.testRequestTraining; + } + + virtual bool getPendingAutomatedTestRequest() + { + return interrupts.automatedTestRequest; + } + + virtual bool getPendingTestRequestEdidRead() + { + return interrupts.testEdid.testRequestEdidRead; + } + + virtual bool getPendingTestRequestPhyCompliance() + { + return interrupts.testPhyCompliance.testRequestPhyCompliance; + } + + virtual void getTestRequestTraining(LinkRate & rate, unsigned & lanes) // DPCD offset 219, 220 + { + rate = interrupts.testTraining.testRequestLinkRate; + lanes = interrupts.testTraining.testRequestLaneCount; + } + + virtual LinkQualityPatternType getPhyTestPattern() // DPCD offset 248 + { + return interrupts.testPhyCompliance.phyTestPattern; + } + + virtual void getCustomTestPattern(NvU8 *testPattern) // DPCD offset 250 - 259 + { + int i; + + for (i = 0; i < 10; i++) + { + testPattern[i] = interrupts.eightyBitCustomPat[i]; + } + } + + virtual bool getBKSV(NvU8 *bKSV) //DPCD offset 0x68000 + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP_BKSV_OFFSET, &bKSV[0], HDCP_KSV_SIZE)) + { + DP_LOG(("Found HDCP Bksv= %02x %02x %02x %02x %02x", + bKSV[4], bKSV[3], bKSV[2], bKSV[1], bKSV[0])); + return true; + } + return false; + } + + virtual bool getBCaps(BCaps &bCaps, NvU8 * rawByte) //DPCD offset 0x68028 + { + NvU8 buffer; + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP_BCAPS_OFFSET, &buffer, sizeof buffer)) + { + bCaps.HDCPCapable = FLD_TEST_DRF(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_CAPABLE, _YES, buffer); + bCaps.repeater = FLD_TEST_DRF(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_REPEATER, _YES, buffer); + if (rawByte) + *rawByte = buffer; + return true; + } + + DP_ASSERT(!"Unable to get BCaps"); + return false; + } + + virtual bool getHdcp22BCaps(BCaps &bCaps, NvU8 *rawByte) //DPCD offset 0x6921D + { + NvU8 buffer; + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP22_BCAPS_OFFSET, &buffer, sizeof buffer)) + { + bCaps.HDCPCapable = FLD_TEST_DRF(_DPCD, _HDCP22_BCAPS_OFFSET, _HDCP_CAPABLE, _YES, buffer); + bCaps.repeater = FLD_TEST_DRF(_DPCD, _HDCP22_BCAPS_OFFSET, _HDCP_REPEATER, _YES, buffer); + if (rawByte) + *rawByte = buffer; + return true; + } + + DP_ASSERT(!"Unable to get 22BCaps"); + return false; + } + + virtual bool getBinfo(BInfo &bInfo) //DPCD offset 0x6802A + { + NvU16 buffer; + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (AuxRetry::ack == bus.read(NV_DPCD_HDCP_BINFO_OFFSET, (NvU8*)&buffer, sizeof buffer)) + { + bInfo.maxCascadeExceeded = FLD_TEST_DRF(_DPCD_HDCP, _BINFO_OFFSET, _MAX_CASCADE_EXCEEDED, _TRUE, buffer); + bInfo.depth = DRF_VAL(_DPCD_HDCP, _BINFO_OFFSET, _DEPTH, buffer); + bInfo.maxDevsExceeded = FLD_TEST_DRF(_DPCD_HDCP, _BINFO_OFFSET, _MAX_DEVS_EXCEEDED, _TRUE, buffer); + bInfo.deviceCount = DRF_VAL(_DPCD_HDCP, _BINFO_OFFSET, _DEVICE_COUNT, buffer); + return true; + } + + DP_ASSERT(!"Unable to get Binfo"); + return false; + } + + // Get RxStatus per provided HDCP cap + virtual bool getRxStatus(const HDCPState &hdcpState, NvU8 *data) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + NvU32 addr = hdcpState.HDCP_State_22_Capable ? + NV_DPCD_HDCP22_RX_STATUS : NV_DPCD_HDCP_BSTATUS_OFFSET; + + if (AuxRetry::ack == bus.read(addr, data, sizeof(NvU8))) + { + return true; + } + + DP_ASSERT(!"Unable to get RxStatus//Bstatus"); + return false; + } + + virtual AuxRetry::status setTestResponseChecksum(NvU8 checksum) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + return bus.write(NV_DPCD_TEST_EDID_CHKSUM, &checksum, sizeof checksum); + } + + virtual AuxRetry::status setTestResponse(bool ack, bool edidChecksumWrite) + { + NvU8 testResponse = 0; + + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + if (ack) + testResponse = FLD_SET_DRF(_DPCD, _TEST_RESPONSE, _TEST_ACK, _YES, testResponse); + else + testResponse = FLD_SET_DRF(_DPCD, _TEST_RESPONSE, _TEST_NACK, _YES, testResponse); + + if (edidChecksumWrite) + testResponse = FLD_SET_DRF(_DPCD, _TEST_RESPONSE, _TEST_EDID_CHKSUM_WRITE, _YES, testResponse); + + return bus.write(NV_DPCD_TEST_RESPONSE, &testResponse, sizeof testResponse); + } + + // Message box encoding + virtual AuxRetry::status writeDownRequestMessageBox(NvU8 * data, size_t length) + { + // + // We can assume no message was sent if this fails. + // Reasoning: + // Sinks are not allowed to DEFER except on the first 16 byte write. + // If there isn't enough room for the 48 byte packet, that write + // will defer. + // + return bus.write(NV_DPCD_MBOX_DOWN_REQ, data, (unsigned)length); + } + + virtual size_t getDownRequestMessageBoxSize() + { + return DP_MESSAGEBOX_SIZE; + } + + virtual AuxRetry::status writeUpReplyMessageBox(NvU8 * data, size_t length) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + // + // We can assume no message was sent if this fails. + // Reasoning: + // Sinks are not allowed to DEFER except on the first 16 byte write. + // If there isn't enough room for the 48 byte packet, that write + // will defer. + // + return bus.write(NV_DPCD_MBOX_UP_REP, data, (unsigned)length); + } + + virtual size_t getUpReplyMessageBoxSize() + { + return 48; + } + + virtual AuxRetry::status readDownReplyMessageBox(NvU32 offset, NvU8 * data, size_t length) + { + // if (caps.revisionMajor <= 0) + // DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + DP_ASSERT(offset + length <= DP_MESSAGEBOX_SIZE); + + return bus.read(NV_DPCD_MBOX_DOWN_REP + offset, data, (unsigned)length); + } + + virtual size_t getDownReplyMessageBoxSize() + { + return DP_MESSAGEBOX_SIZE; + } + + virtual AuxRetry::status readUpRequestMessageBox(NvU32 offset, NvU8 * data, size_t length) + { + if (caps.revisionMajor <= 0) + DP_ASSERT(0 && "Something is wrong, revision major should be > 0"); + + DP_ASSERT(offset + length <= DP_MESSAGEBOX_SIZE); + + return bus.read(NV_DPCD_MBOX_UP_REQ + offset, data, (unsigned)length); + } + + virtual size_t getUpRequestMessageBoxSize() + { + return DP_MESSAGEBOX_SIZE; + } + + virtual size_t getTransactionSize() + { + return bus.getDirect()->transactionSize(); + } + + virtual PowerState getPowerState() + { + NvU8 data; + if (AuxRetry::ack != bus.read(NV_DPCD_SET_POWER, &data, sizeof data, 0)) + { + // Assume powerdown state + return PowerStateD3; + } + + switch (DRF_VAL(_DPCD, _SET_POWER, _VAL, data)) + { + case NV_DPCD_SET_POWER_VAL_D3_PWRDWN: + return PowerStateD3; + + case NV_DPCD_SET_POWER_VAL_D0_NORMAL: + return PowerStateD0; + + case NV_DPCD_SET_POWER_VAL_D3_AUX_ON: + { + DP_ASSERT(isAtLeastVersion(1, 2) && "DP 1.2 specific power state to be set on a non-DP1.2 system!?"); + return PowerStateD3AuxOn; + } + default: + DP_ASSERT(0 && "Unknown power state! Assuming device is asleep"); + return PowerStateD3; + } + } + + virtual bool setPowerState(PowerState newState) + { + NvU8 timeoutMs = 0; + + if (newState == PowerStateD0) + timeoutMs = caps.extendedSleepWakeTimeoutRequestMs; + + // Default behavior is 2ms for better tolerance. + if (timeoutMs < 2) + timeoutMs = 2; + + // + // A Branch Device must forward this value to its downstream devices. + // When set to D3 state, a Sink Device may put its AUX CH circuit in a "power + // saving" state. In this mode the AUX CH circuit may only detect the presence of a + // differential signal input without replying to an AUX CH request transaction. Upon + // detecting the presence of a differential signal input, the Sink Device must exit the + // "power saving" state within 1ms. + // + if (isAtLeastVersion(1, 1)) + { + NvU8 data = 0; + if (newState == PowerStateD0) + data |= NV_DPCD_SET_POWER_VAL_D0_NORMAL; + else if (newState == PowerStateD3) + { + if (caps.extendedSleepWakeTimeoutRequestMs > 1) + { + NvU8 grant = 0; + // Grant extended sleep wake timeout before go D3. + grant = FLD_SET_DRF(_DPCD, _EXTENDED_DPRX_WAKE_TIMEOUT, _PERIOD_GRANTED, _YES, grant); + if (AuxRetry::ack == bus.write(NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT, &grant, sizeof(grant))) + { + DP_LOG(("DisplayPort: Failed to grant extended sleep wake timeout before D3\n")); + } + } + data = NV_DPCD_SET_POWER_VAL_D3_PWRDWN; + } + else + { + DP_ASSERT(0 && "Unknown power state"); + } + + // + // If we're powering on, we need to allow up to 1ms for the power + // to come online. Ideally we'd handle this with a callback, + // but for now we're going to do a wait here. + // + Timeout timeout(timer, timeoutMs); + unsigned retries = 0; + + do + { + if (AuxRetry::ack == bus.write(NV_DPCD_SET_POWER, &data, sizeof(data))) + { + return true; + } + retries++; + } + while (timeout.valid() || (retries < 40) /* some panels need up to 40 retries */); + + DP_LOG(("DisplayPort: Failed to bring panel back to wake state")); + } + else + { + // DP 1.0 devices cannot be put to sleep + if (newState == PowerStateD0) + return true; + } + + return false; + } + + virtual void payloadTableClearACT() + { + NvU8 byte = NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED_YES; + bus.write(NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS, &byte, sizeof byte); + } + + virtual bool payloadWaitForACTReceived() + { + NvU8 byte = 0; + int retries = 0; + + while (true) + { + if (++retries > 40) + { + DP_LOG(("DPHAL> ACT Not received by sink device!")); + return false; + } + + if (AuxRetry::ack == bus.read(NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS, &byte, sizeof byte)) + { + if (FLD_TEST_DRF(_DPCD, _PAYLOAD_TABLE_UPDATE_STATUS, _ACT_HANDLED, _YES, byte)) + { + DP_LOG(("DPHAL> ACT Received")); + return true; + } + } + } + } + + virtual bool payloadAllocate(unsigned streamId, unsigned begin, unsigned count) + { + bool bResult = false; + NvU8 payloadAllocate[3]; + DP_ASSERT(streamId < 64 && "Invalid stream location"); + payloadAllocate[0] = (NvU8)streamId; + payloadAllocate[1] = (NvU8)begin; + payloadAllocate[2] = (NvU8)count; + + AuxRetry::status status = bus.write(NV_DPCD_PAYLOAD_ALLOC_SET, (NvU8*)&payloadAllocate, sizeof payloadAllocate); + + if (status == AuxRetry::ack) + { + // + // Bit 0 = VC Payload Table Updated(Change/Read only) + // 1 = Update, cleared to zero when u Packet Source writes 1 + // 0 = Not updated since the last time this bit was cleared + // + NvU8 payloadStatus; + int retries = 0; + + // + // Bug 1385165 that Synaptics branch revision 1.0 found to spend more than 200ms before table updated. + // Retries without delay is too soon for device to complete table update process. + // That will hit bug 1334070 and trigger monitor unplug/hotplug at early return. + // + do + { + if ((bus.read(NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS, &payloadStatus, sizeof(payloadStatus)) == AuxRetry::ack)) + { + if (FLD_TEST_DRF(_DPCD, _PAYLOAD_TABLE_UPDATE_STATUS, _UPDATED, _YES, payloadStatus)) + { + bResult = true; + break; + } + } + else + { + DP_LOG(("DPHAL> Read NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS failed.")); + } + + timer->sleep(1); + } while (++retries < PAYLOADIDTABLE_UPDATED_CHECK_RETRIES); + } + else + { + DP_LOG(("DPHAL> Send NV_DPCD_PAYLOAD_ALLOC_SET failed.")); + } + + DP_LOG(("DPHAL> Requesting allocation Stream:%d | First Slot:%d | Count:%d (%s)", streamId, begin, count, bResult ? "OK" : "FAILED")); + return bResult; + } + + void overrideMultiStreamCap(bool mstCapable) + { + caps.overrideToSST = !mstCapable; + } + + bool getMultiStreamCapOverride() + { + return caps.overrideToSST; + } + + bool getDpcdMultiStreamCap(void) + { + return caps.supportsMultistream; + } + + void setGpuDPSupportedVersions(bool supportDp1_2, bool supportDp1_4) + { + if (supportDp1_4) + DP_ASSERT(supportDp1_2 && "GPU supports DP1.4 should also support DP1.2!"); + + gpuDP1_2Supported = supportDp1_2; + gpuDP1_4Supported = supportDp1_4; + } + + void setGpuFECSupported(bool bSupportFEC) + { + bGpuFECSupported = bSupportFEC; + } + + void applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase) + { + DP_ASSERT(dpRegkeyDatabase.bInitialized && + "All regkeys are invalid because dpRegkeyDatabase is not initialized!"); + overrideDpcdRev = dpRegkeyDatabase.dpcdRevOveride; + bBypassILREdpRevCheck = dpRegkeyDatabase.bBypassEDPRevCheck; + } + + // To clear pending message {DOWN_REP/UP_REQ} and reply true if existed. + virtual bool clearPendingMsg() + { + NvU8 irqVector, data = 0; + if (AuxRetry::ack == bus.read(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, + &irqVector, sizeof(irqVector))) + { + if (FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, irqVector)) + { + // Clear pending DOWN_REP. + data = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _DOWN_REP_MSG_RDY, _YES, 0); + } + if (FLD_TEST_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, irqVector)) + { + // Clear pending UP_REQ + data = FLD_SET_DRF(_DPCD, _DEVICE_SERVICE_IRQ_VECTOR_ESI0, _UP_REQ_MSG_RDY, _YES, data); + } + if (!data || + (AuxRetry::ack != bus.write(NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0, + &data, sizeof(data)))) + { + DP_LOG(("DPCONN> %s(): No Pending Message or " + "Failed to clear pending message: irqVector/data = 0x%08x/0x%08x", + __FUNCTION__, irqVector, data)); + return false; + } + + return true; + } + else + { + DP_LOG(("DPCONN> Clear Pending MSG: Failed to read ESI0")); + } + + return false; + } + + virtual bool isMessagingEnabled() + { + NvU8 mstmCtrl; + + if ((AuxRetry::ack == bus.read(NV_DPCD_MSTM_CTRL, &mstmCtrl, 1)) && + (FLD_TEST_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, mstmCtrl))) + { + return true; + } + return false; + } + + virtual void setIndexedLinkrateEnabled(bool val) + { + bIndexedLinkrateEnabled = val; + } + + virtual bool isIndexedLinkrateEnabled() + { + return bIndexedLinkrateEnabled; + } + + virtual bool isIndexedLinkrateCapable() + { + return bIndexedLinkrateCapable; + } + + virtual NvU16 *getLinkRateTable() + { + if (!bIndexedLinkrateCapable) + { + DP_LOG(("DPCONN> link rate table should be invalid")); + } + return &caps.linkRateTable[0]; + } + + virtual NvU32 getVideoFallbackSupported() + { + return caps.videoFallbackFormats; + } + + virtual bool getRawLinkRateTable(NvU8 *buffer) + { + NvU16 temp[NV_DPCD_SUPPORTED_LINK_RATES__SIZE]; + NvU8 *data = (buffer == NULL) ? (NvU8*)&temp[0] : buffer; + + if (AuxRetry::ack != bus.read(NV_DPCD_SUPPORTED_LINK_RATES(0), data, + NV_DPCD_SUPPORTED_LINK_RATES__SIZE * sizeof(NvU16))) + { + return false; + } + return true; + } + + virtual void resetProtocolConverter() + { + NvU8 data = 0; + bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data)); + bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_2, &data, sizeof(data)); + + } + + virtual bool setSourceControlMode(bool bEnableSourceControlMode, bool bEnableFRLMode) + { + NvU8 data = 0; + + if (bEnableSourceControlMode) + { + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _SRC_CONTROL_MODE, _ENABLE, data); + if (bEnableFRLMode) + { + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _LINK_FRL_MODE, _ENABLE, data); + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _IRQ_LINK_FRL_MODE, _ENABLE, data); + } + else + { + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _LINK_FRL_MODE, _DISABLE, data); + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _IRQ_LINK_FRL_MODE, _DISABLE, data); + } + } + else + { + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _SRC_CONTROL_MODE, _DISABLE, data); + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _LINK_FRL_MODE, _DISABLE, data); + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _IRQ_LINK_FRL_MODE, _DISABLE, data); + } + + if (AuxRetry::ack != bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + return true; + } + + virtual bool checkPCONFrlReady(bool *bFrlReady) + { + NvU8 data = 0; + + if (bFrlReady == NULL) + { + DP_ASSERT(0); + return true; + } + + *bFrlReady = false; + + if (AuxRetry::ack != bus.read(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &data, sizeof(data))) + { + return false; + } + + if (data == 0) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _NO, data)) + { + parseAndReadInterruptsESI(); + return false; + } + + // Clear only this interrupt bit. + this->clearHdmiLinkStatusChanged(); + + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_HDMI_TX_LINK_STATUS, &data, sizeof(data))) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, _LINK_READY, _YES, data)) + { + *bFrlReady = true; + } + return true; + } + + virtual bool setupPCONFrlLinkAssessment(NvU32 linkBwMask, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false) + { + NvU8 data = 0; + + NvU32 requestedMaxBw = (NvU32)(getMaxFrlBwFromMask(linkBwMask)) + 1; // +1 to convert PCONHdmiLinkBw enum to DPCD FRL BW cap definition + NvU32 targetBw = NV_MIN(caps.pconCaps.maxHdmiLinkBandwidthGbps, + requestedMaxBw); + + // Step 1: Configure FRL Link (FRL BW, BW mask / Concurrent) + if (bEnableExtendLTMode) + { + // + // Set FRL_LT_CONTROL to Extended mode: + // PCON FW trains for all Link BW selected in Link BW Mask (Bit 0~5) + // + data = linkBwMask; + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_2, _FRL_LT_CONTROL, + _EXTENDED, data); + } + else + { + // Set FRL_LT_CONTROL to Normal mode, so PCON stops when first FRL LT succeed. + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_2, _FRL_LT_CONTROL, + _NORMAL, data); + } + + if (AuxRetry::ack != bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_2, &data, sizeof(data))) + { + return false; + } + + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + + if (bEnableConcurrentMode && caps.pconCaps.bConcurrentLTSupported) + { + // Client selects concurrent. + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _CONCURRENT_LT_MODE, + _ENABLE, data); + } + else + { + // + // Don't do concurrent LT for now. + // + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _CONCURRENT_LT_MODE, + _DISABLE, data); + } + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _HDMI_LINK, + _ENABLE, data); + data = FLD_SET_DRF_NUM(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _MAX_LINK_BW, + targetBw, data); + + if (AuxRetry::ack != bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + + return true; + } + + virtual bool checkPCONFrlLinkStatus(NvU32 *frlRateMask) + { + NvU8 data = 0; + + if (frlRateMask == NULL) + { + DP_ASSERT(0); + return true; + } + + *frlRateMask = 0; + // Check if IRQ happens. + if (AuxRetry::ack != bus.read(NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0, &data, sizeof(data))) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD, _LINK_SERVICE_IRQ_VECTOR_ESI0, _HDMI_LINK_STATUS_CHANGED, _NO, data)) + { + return false; + } + // Check HDMI Link Active status (0x303B Bit 0) and Link Config (0x3036) + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_HDMI_TX_LINK_STATUS, &data, sizeof(data))) + { + return false; + } + + if (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, _LINK_ACTIVE, _YES, data)) + { + if (AuxRetry::ack == bus.read(NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS, &data, sizeof(data))) + { + *frlRateMask = DRF_VAL(_DPCD20, _PCON_HDMI_LINK_CONFIG_STATUS, _LT_RESULT, data); + } + + } + + return true; + } + + virtual bool queryHdmiLinkStatus(bool *bLinkActive, bool *bLinkReady) + { + NvU8 data = 0; + + if (bLinkReady == NULL && bLinkReady == NULL) + return false; + + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_HDMI_TX_LINK_STATUS, &data, sizeof(data))) + { + return false; + } + if (bLinkReady != NULL) + { + *bLinkReady = (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, + _LINK_READY, _YES, data)); + } + if (bLinkActive != NULL) + { + *bLinkActive = (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, + _LINK_ACTIVE, _YES, data)); + } + return true; + } + + virtual NvU32 restorePCONFrlLink(NvU32 linkBwMask, + bool bEnableExtendLTMode = false, + bool bEnableConcurrentMode = false) + { + // Restore HDMI Link. + // 1. Clear HDMI link enable bit (305A bit 7) + NvU8 data = 0; + NvU32 loopCount; + NvU32 frlRate; + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _HDMI_LINK, _DISABLE, data); + if (AuxRetry::ack != bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + // 2. Set FRL or TMDS (Optional if not changed) (305A bit 5) + // 3. Read FRL Ready Bit (303B bit 1) + + Timeout timeout(timer, 500 /* 500ms */); + data = 0; + do + { + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_HDMI_TX_LINK_STATUS, + &data, sizeof(data))) + continue; + if (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, _LINK_READY, _YES, data)) + break; + } while (timeout.valid()); + + if (FLD_TEST_DRF(_DPCD20, _PCON_HDMI_TX_LINK_STATUS, _LINK_READY, _NO, data)) + { + return false; + } + + // 4. Configure FRL Link (Optional if not changed) + // 5. Set HDMI Enable Bit. + data = 0; + + if (AuxRetry::ack != bus.read(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + data = FLD_SET_DRF(_DPCD20, _PCON_FRL_LINK_CONFIG_1, _HDMI_LINK, _ENABLE, data); + if (AuxRetry::ack != bus.write(NV_DPCD20_PCON_FRL_LINK_CONFIG_1, &data, sizeof(data))) + { + return false; + } + + // 6. Read HDMI Link Status link active bit (2005 bit 3) + // 7. Read HDMI link active status bit and link config status (303b bit0 / 3036) + loopCount = NV_PCON_FRL_LT_TIMEOUT_THRESHOLD; + do + { + if (checkPCONFrlLinkStatus(&frlRate) == true) + { + break; + } + Timeout timeout(this->timer, NV_PCON_FRL_LT_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } while (--loopCount); + + return frlRate; + } + + virtual void readPsrCapabilities(vesaPsrSinkCaps *caps) + { + dpMemCopy(caps, &this->caps.psrCaps, sizeof(vesaPsrSinkCaps)); + } + + virtual bool updatePsrConfiguration(vesaPsrConfig psrcfg) + { + NvU8 config = 0U; + + if (psrcfg.psrCfgEnable) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _SINK_ENABLE, _YES, config); + } + if (psrcfg.srcTxEnabledInPsrActive) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _SOURCE_LINK_ACTIVE, _YES, config); + } + if (psrcfg.crcVerifEnabledInPsrActive) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _CRC_VERIFICATION_ACTIVE, _YES, config); + } + if (psrcfg.frameCaptureSecondActiveFrame) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _FRAME_CAPTURE_INDICATION, _SECOND, config); + } + if (psrcfg.selectiveUpdateOnSecondActiveline) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _SU_LINE_CAPTURE_INDICATION, _SECOND, config); + } + if (psrcfg.enableHpdIrqOnCrcMismatch) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _HPD_IRQ_ON_CRC_ERROR, _YES, config); + } + if (psrcfg.enablePsr2) + { + config = FLD_SET_DRF(_DPCD_EDP, _PSR_CONFIG, + _ENABLE_PSR2, _YES, config); + } + + return AuxRetry::ack == + bus.write(NV_DPCD_EDP_PSR_CONFIG, &config, 1); + } + + virtual bool readPsrConfiguration(vesaPsrConfig *psrcfg) + { + NvU8 config = 0U; + bool retVal = AuxRetry::ack == + bus.read(NV_DPCD_EDP_PSR_CONFIG, &config, 1); + + psrcfg->psrCfgEnable = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _SINK_ENABLE, _YES, config); + psrcfg->srcTxEnabledInPsrActive = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _SOURCE_LINK_ACTIVE, _YES, config); + psrcfg->crcVerifEnabledInPsrActive = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _CRC_VERIFICATION_ACTIVE, + _YES, config); + psrcfg->frameCaptureSecondActiveFrame = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _FRAME_CAPTURE_INDICATION, + _SECOND, config); + psrcfg->selectiveUpdateOnSecondActiveline = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, + _SU_LINE_CAPTURE_INDICATION, _SECOND, config); + psrcfg->enableHpdIrqOnCrcMismatch = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _HPD_IRQ_ON_CRC_ERROR, _YES, config); + psrcfg->enablePsr2 = + FLD_TEST_DRF(_DPCD_EDP, _PSR_CONFIG, _ENABLE_PSR2, _YES, config); + + return retVal; + } + + virtual bool readPsrState(vesaPsrState *psrState) + { + NvU8 config = 0U; + bool retVal = AuxRetry::ack == + bus.read(NV_DPCD_PANEL_SELF_REFRESH_STATUS, &config, 1); + + if (retVal) + { + *psrState = + (vesaPsrState)DRF_VAL(_DPCD, _PANEL_SELF_REFRESH_STATUS, + _VAL, config); + } + return retVal; + } + + virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) + { + NvU8 config[2] = { 0U , 0U }; + bool retVal = AuxRetry::ack == + bus.read(NV_DPCD_PANEL_SELF_REFRESH_DEBUG0, + &config[0], sizeof(config)); + + if (retVal) + { + psrDbgState->maxResyncFrames = + DRF_VAL(_DPCD_PANEL_SELF_REFRESH, + _DEBUG0, _MAX_RESYNC_FRAME_CNT, config[0]); + psrDbgState->actualResyncFrames = + DRF_VAL(_DPCD_PANEL_SELF_REFRESH, + _DEBUG0, _LAST_RESYNC_FRAME_CNT, config[0]); + + psrDbgState->lastSdpPsrState = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _PSR_STATE_BIT, config[1]); + psrDbgState->lastSdpUpdateRfb = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _RFB_BIT, config[1]); + psrDbgState->lastSdpCrcValid = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _CRC_VALID_BIT, config[1]); + psrDbgState->lastSdpSuValid = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _SU_VALID_BIT, config[1]); + psrDbgState->lastSdpFirstSURcvd = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _SU_FIRST_LINE_RCVD, config[1]); + psrDbgState->lastSdpLastSURcvd = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _SU_LAST_LINE_RCVD, config[1]); + psrDbgState->lastSdpYCoordValid = + !!DRF_VAL(_DPCD_PANEL_SELF_REFRESH, _LAST_SDP, + _Y_CORD_VALID, config[1]); + } + return retVal; + } + + virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr) + { + NvU8 config = 0U; + config = FLD_SET_DRF_NUM(_DPCD_PANEL_SELF_REFRESH, + _ERR_STATUS, + _LINK_CRC_ERR, + psrErr.linkCrcError, + config); + config = FLD_SET_DRF_NUM(_DPCD_PANEL_SELF_REFRESH, + _ERR_STATUS, + _RFB_ERR, + psrErr.rfbStoreError, + config); + config = FLD_SET_DRF_NUM(_DPCD_PANEL_SELF_REFRESH, + _ERR_STATUS, + _VSC_SDP_ERR, + psrErr.vscSdpError, + config); + + return AuxRetry::ack == bus.write( + NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS, &config, 1); + } + + virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr) + { + NvU8 config = 0U; + bool retVal; + retVal = AuxRetry::ack == bus.read( + NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS, + &config, sizeof(config)); + + if (retVal) + { + psrErr->vscSdpError = FLD_TEST_DRF(_DPCD, + _PANEL_SELF_REFRESH_ERR_STATUS, + _LINK_CRC_ERR, _YES, config); + psrErr->rfbStoreError = FLD_TEST_DRF(_DPCD, + _PANEL_SELF_REFRESH_ERR_STATUS, + _RFB_ERR, _YES, config); + psrErr->linkCrcError = FLD_TEST_DRF(_DPCD, + _PANEL_SELF_REFRESH_ERR_STATUS, + _VSC_SDP_ERR, + _YES, config); + } + return retVal; + } + + virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrEvt) + { + NvU8 config = 0U; + + if (psrEvt.sinkCapChange) + { + config = FLD_SET_DRF(_DPCD, + _PANEL_SELF_REFRESH_EVENT_STATUS, + _CAP_CHANGE, + _YES, config); + } + return AuxRetry::ack == bus.write( + NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS, &config, 1); + } + + virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrEvt) + { + NvU8 config = 0U; + bool retVal; + retVal = AuxRetry::ack == bus.read( + NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS, + &config, sizeof(config)); + + if (retVal) + { + psrEvt->sinkCapChange = DRF_VAL(_DPCD, + _PANEL_SELF_REFRESH_EVENT_STATUS, + _CAP_CHANGE, + config); + } + return retVal; + } +}; + +DPCDHAL * DisplayPort::MakeDPCDHAL(AuxBus * bus, Timer * timer) +{ + return new DPCDHALImpl(bus, timer); +} diff --git a/src/common/displayport/src/dp_connectorimpl.cpp b/src/common/displayport/src/dp_connectorimpl.cpp new file mode 100644 index 000000000..dbeb2b453 --- /dev/null +++ b/src/common/displayport/src/dp_connectorimpl.cpp @@ -0,0 +1,6889 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_connectorimpl.cpp * +* DP connector implementation * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_guid.h" +#include "dp_configcaps.h" +#include "dp_list.h" +#include "dp_buffer.h" +#include "dp_auxdefs.h" +#include "dp_watermark.h" +#include "dp_edid.h" +#include "dp_discovery.h" +#include "dp_groupimpl.h" +#include "dp_deviceimpl.h" +#include "dp_connectorimpl.h" + +#include "dp_auxbus.h" +#include "dpringbuffertypes.h" + +#include "ctrl/ctrl0073/ctrl0073dfp.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "dp_tracing.h" + +using namespace DisplayPort; + +// These wrappers are specifically for DSC PPS library malloc and free callbacks +// Pointer to these functions are populated to dscMalloc/dscFree in DSC_InitializeCallBack and it is initialized from both DPLib and HDMiPacketLib. +// In HDMI case, callback function for malloc/free needs client handle so to match function prototype, in DP case, adding these wrappers. +extern "C" void * dpMallocCb(const void *clientHandle, NvLength size); +extern "C" void dpFreeCb(const void *clientHandle, void *pMemPtr); + +extern "C" void * dpMallocCb(const void *clientHandle, NvLength size) +{ + return dpMalloc(size); +} + +extern "C" void dpFreeCb(const void *clientHandle, void *pMemPtr) +{ + dpFree(pMemPtr); +} + +ConnectorImpl::ConnectorImpl(MainLink * main, AuxBus * auxBus, Timer * timer, Connector::EventSink * sink) + : main(main), + auxBus(auxBus), + timer(timer), + sink(sink), + bIgnoreSrcOuiHandshake(false), + linkPolicy(), + linkGuessed(false), + isLinkQuiesced(false), + bNoLtDoneAfterHeadDetach(false), + isDP12AuthCap(false), + isHDCPAuthOn(false), + isHDCPReAuthPending(false), + isHDCPAuthTriggered(false), + isHopLimitExceeded(false), + isDiscoveryDetectComplete(false), + bDeferNotifyLostDevice(false), + hdcpValidateData(), + authRetries(0), + retryLT(0), + hdcpCapsRetries(0), + hdcpCpIrqRxStatusRetries(0), + bFromResumeToNAB(false), + bAttachOnResume(false), + bHdcpAuthOnlyOnDemand(false), + constructorFailed(false), + policyModesetOrderMitigation(false), + policyForceLTAtNAB(false), + policyAssessLinkSafely(false), + bDisableVbiosScratchRegisterUpdate(false), + modesetOrderMitigation(false), + compoundQueryActive(false), + compoundQueryResult(false), + compoundQueryCount(0), + messageManager(0), + discoveryManager(0), + numPossibleLnkCfg(0), + linkAwaitingTransition(false), + linkState(DP_TRANSPORT_MODE_INIT), + bAudioOverRightPanel(false), + connectorActive(false), + firmwareGroup(0), + bAcpiInitDone(false), + bIsUefiSystem(false), + bSkipLt(false), + bMitigateZombie(false), + bDelayAfterD3(false), + bKeepOptLinkAlive(false), + bNoFallbackInPostLQA(false), + LT2FecLatencyMs(0), + bDscCapBasedOnParent(false), + ResStatus(this) +{ + clearTimeslices(); + hal = MakeDPCDHAL(auxBus, timer); + if (hal == NULL) + { + constructorFailed = true; + return; + } + highestAssessedLC = getMaxLinkConfig(); + firmwareGroup = createFirmwareGroup(); + + if (firmwareGroup == NULL) + { + constructorFailed = true; + return; + } + + hal->setPC2Disabled(main->isPC2Disabled()); + + // + // If a GPU is DP1.2 or DP1.4 supported then set these capalibilities. + // This is used for accessing DP1.2/DP1.4 specific register space & features + // + hal->setGpuDPSupportedVersions(main->isDP1_2Supported(), main->isDP1_4Supported()); + + // Set if GPU supports FEC. Check panel FEC caps only if GPU supports it. + hal->setGpuFECSupported(main->isFECSupported()); + + // Set if LTTPR training is supported per regKey + hal->setLttprSupported(main->isLttprSupported()); + + const DP_REGKEY_DATABASE& dpRegkeyDatabase = main->getRegkeyDatabase(); + this->applyRegkeyOverrides(dpRegkeyDatabase); + hal->applyRegkeyOverrides(dpRegkeyDatabase); + + // Initialize DSC callbacks + DSC_CALLBACK callback; + callback.clientHandle = NULL; + callback.dscPrint = NULL; + callback.dscMalloc = dpMallocCb; + callback.dscFree = dpFreeCb; + DSC_InitializeCallback(callback); +} + +void ConnectorImpl::applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase) +{ + DP_ASSERT(dpRegkeyDatabase.bInitialized && + "All regkeys are invalid because dpRegkeyDatabase is not initialized!"); + + this->bSkipAssessLinkForEDP = dpRegkeyDatabase.bAssesslinkForEdpSkipped; + + // If Hdcp authenticatoin on demand regkey is set, override to the provided value. + this->bHdcpAuthOnlyOnDemand = dpRegkeyDatabase.bHdcpAuthOnlyOnDemand; + + if (dpRegkeyDatabase.bOptLinkKeptAlive) + { + this->bKeepLinkAliveMST = true; + this->bKeepLinkAliveSST = true; + } + else + { + this->bKeepLinkAliveMST = dpRegkeyDatabase.bOptLinkKeptAliveMst; + this->bKeepLinkAliveSST = dpRegkeyDatabase.bOptLinkKeptAliveSst; + } + this->bReportDeviceLostBeforeNew = dpRegkeyDatabase.bReportDeviceLostBeforeNew; + this->maxLinkRateFromRegkey = dpRegkeyDatabase.applyMaxLinkRateOverrides; + this->bEnableAudioBeyond48K = dpRegkeyDatabase.bAudioBeyond48kEnabled; + this->bDisableSSC = dpRegkeyDatabase.bSscDisabled; + this->bEnableFastLT = dpRegkeyDatabase.bFastLinkTrainingEnabled; + this->bDscMstCapBug3143315 = dpRegkeyDatabase.bDscMstCapBug3143315; + this->bDscMstEnablePassThrough = dpRegkeyDatabase.bDscMstEnablePassThrough; +} + +void ConnectorImpl::setPolicyModesetOrderMitigation(bool enabled) +{ + policyModesetOrderMitigation = enabled; +} + +void ConnectorImpl::setPolicyForceLTAtNAB(bool enabled) +{ + policyForceLTAtNAB = enabled; +} + +void ConnectorImpl::setPolicyAssessLinkSafely(bool enabled) +{ + policyAssessLinkSafely = enabled; +} + +// +// This function is to re-read remote HDCP BKSV and BCAPS. +// +// Function is added for DP1.2 devices which don't have valid BKSV at HPD and +// make BKSV available after Payload Ack. +// +void ConnectorImpl::readRemoteHdcpCaps() +{ + if (hdcpCapsRetries) + { + fireEvents(); + return; + } + +} + +void ConnectorImpl::discoveryDetectComplete() +{ + fireEvents(); + // no outstanding EDID reads and branch/sink detections for MST + if (pendingEdidReads.isEmpty() && + (!discoveryManager || + (discoveryManager->outstandingBranchDetections.isEmpty() && + discoveryManager->outstandingSinkDetections.isEmpty()))) + { + bDeferNotifyLostDevice = false; + isDiscoveryDetectComplete = true; + bIsDiscoveryDetectActive = false; + + // Complete detection and see if can enter power saving state. + isNoActiveStreamAndPowerdown(); + + fireEvents(); + } +} + +void ConnectorImpl::applyEdidWARs(Edid & edid, DiscoveryManager::Device device) +{ + DpMonitorDenylistData *pDenylistData = new DpMonitorDenylistData(); + NvU32 warFlag = 0; + warFlag = main->monitorDenylistInfo(edid.getManufId(), edid.getProductId(), pDenylistData); + + // Apply any edid overrides if required + edid.applyEdidWorkArounds(warFlag, pDenylistData); + + delete pDenylistData; +} + +void DisplayPort::DevicePendingEDIDRead::mstEdidCompleted(EdidReadMultistream * from) +{ + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-CONN> Edid read complete: %s %s", + from->topologyAddress.toString(sb), + from->edid.getName())); + ConnectorImpl * connector = parent; + parent->applyEdidWARs(from->edid, device); + parent->processNewDevice(device, from->edid, true, DISPLAY_PORT, RESERVED); + delete this; + connector->discoveryDetectComplete(); +} + +void DisplayPort::DevicePendingEDIDRead::mstEdidReadFailed(EdidReadMultistream * from) +{ + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-CONN> Edid read failed: %s (using fallback)", + from->topologyAddress.toString(sb))); + ConnectorImpl * connector = parent; + parent->processNewDevice(device, Edid(), true, DISPLAY_PORT, RESERVED); + delete this; + connector->discoveryDetectComplete(); +} + +void ConnectorImpl::messageProcessed(MessageManager::MessageReceiver * from) +{ + if (from == &ResStatus) + { + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + if (i->getGUID() == ResStatus.request.guid) + { + DeviceImpl * child = ((DeviceImpl *)i)->children[ResStatus.request.port]; + if (child) + { + child->resetCacheInferredLink(); + sink->bandwidthChangeNotification((DisplayPort::Device*)child, false); + return; + } + + break; + } + + // Child wasn't found... Invalidate all bandwidths on topology + for (Device * i = enumDevices(0); i; i = enumDevices(i)) { + ((DeviceImpl *)i)->resetCacheInferredLink(); + } + } + else + DP_ASSERT(0 && "Received unexpected upstream message that we AREN'T registered for"); +} + +void ConnectorImpl::discoveryNewDevice(const DiscoveryManager::Device & device) +{ + // + // We're guaranteed that there isn't already a device on the list with the same + // address. If we receive the same device announce again - it is considered + // a notification that the device underlying may have seen an HPD. + // + // We're going to queue an EDID read, and remember which device we did it on. + // If the EDID comes back different we'll have mark the old device object + // as disconnected - and create a new one. This is required because + // EDID is one of the fields considered to be immutable. + // + + if (!device.branch) + { + if (!device.videoSink) + { + // Don't read EDID on a device having no videoSink + processNewDevice(device, Edid(), false, DISPLAY_PORT, RESERVED); + return; + } + pendingEdidReads.insertBack(new DevicePendingEDIDRead(this, messageManager, device)); + } + else + { + // Don't try to read the EDID on a branch device + processNewDevice(device, Edid(), true, DISPLAY_PORT, RESERVED); + } +} + +void ConnectorImpl::processNewDevice(const DiscoveryManager::Device & device, + const Edid & edid, + bool isMultistream, + DwnStreamPortType portType, + DwnStreamPortAttribute portAttribute, + bool isCompliance) +{ + // + // Ideally we should read EDID here. But instead just report the device + // try to find device in list of devices + // + DeviceImpl * existingDev = findDeviceInList(device.address); + if (existingDev) + existingDev->resetCacheInferredLink(); + + // + // Process fallback EDID + // + Edid processedEdid = edid; + + if (!edid.getEdidSize() || !edid.isChecksumValid() || !edid.isValidHeader() || + edid.isPatchedChecksum()) + { + if (portType == WITHOUT_EDID) + { + switch(portAttribute) + { + case RESERVED: + case IL_720_480_60HZ: + case IL_720_480_50HZ: + case IL_1920_1080_60HZ: + case IL_1920_1080_50HZ: + case PG_1280_720_60HZ: + case PG_1280_720_50_HZ: + DP_ASSERT(0 && "Default EDID feature not supported!"); + break; + } + + } + if (portType == ANALOG_VGA) + makeEdidFallbackVGA(processedEdid); + else + { + makeEdidFallback(processedEdid, hal->getVideoFallbackSupported()); + } + } + + // + // Process caps + // + bool hasAudio = device.SDPStreams && device.SDPStreamSinks; + bool hasVideo = device.videoSink; + NvU64 maxTmdsClkRate = 0U; + ConnectorType connector = connectorDisplayPort; + + if (portType == DISPLAY_PORT_PLUSPLUS || portType == DVI || portType == HDMI) + { + maxTmdsClkRate = device.maxTmdsClkRate; + } + + switch(portType) + { + case DISPLAY_PORT: + case DISPLAY_PORT_PLUSPLUS: // DP port that supports DP and TMDS + connector = connectorDisplayPort; + break; + + case ANALOG_VGA: + connector = connectorVGA; + break; + + case DVI: + connector = connectorDVI; + break; + + case HDMI: + connector = connectorHDMI; + break; + + case WITHOUT_EDID: + connector = connectorDisplayPort; + break; + } + + // Dongle in SST mode. + if ((device.peerDevice == Dongle) && (device.address.size() == 0)) + hasAudio = hasVideo = false; + + if (device.branch) + hasAudio = hasVideo = false; + + if (!existingDev) + goto create; + + if (isCompliance && (existingDev->processedEdid == processedEdid)) + { + // unzombie the old device + } + else if (existingDev->audioSink != hasAudio || + existingDev->videoSink != hasVideo || + existingDev->rawEDID != edid || + existingDev->processedEdid != processedEdid || + existingDev->connectorType != connector || + existingDev->multistream != isMultistream || + existingDev->complianceDeviceEdidReadTest != isCompliance || + existingDev->maxTmdsClkRate != maxTmdsClkRate || + (existingDev->address.size() > 1 && !existingDev->getParent()) || + // If it is an Uninitialized Mux device, goto create so that we can properly + // initialize the device and all its caps + existingDev->isFakedMuxDevice()) + goto create; + + // Complete match, make sure its marked as plugged + existingDev->plugged = true; + if (existingDev->isActive()) + existingDev->activeGroup->update(existingDev, true); + + + fireEvents(); + return; +create: + // If there is an existing device, mark it as no longer available. + if (existingDev) + existingDev->plugged = false; + + // Find parent + DeviceImpl * parent = 0; + if (device.address.size() != 0) + { + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + if ((i->getTopologyAddress() == device.address.parent()) && + (((DeviceImpl *)i)->plugged)) + { + parent = (DeviceImpl*)i; + break; + } + } + } + + DP_ASSERT((parent || device.address.size() <= 1) && "Device was registered before parent"); + + DeviceImpl * newDev; + // + // If it is a faked Mux device, we have already notified DD of few of its caps. + // Reuse the same device to make sure that DD updates the same device's parameters + // otherwise create a new device + // + if (existingDev && existingDev->isFakedMuxDevice()) + { + newDev = existingDev; + existingDev = NULL; + } + else + { + newDev = new DeviceImpl(hal, this, parent); + } + + if (parent) + parent->children[device.address.tail()] = newDev; + + if (!newDev) + { + DP_ASSERT(0 && "new failed"); + return; + } + + // Fill out the new device + newDev->address = device.address; + newDev->multistream = isMultistream; + newDev->videoSink = hasVideo; + newDev->audioSink = hasAudio; + newDev->plugged = true; + newDev->rawEDID = edid; + newDev->processedEdid = processedEdid; + newDev->connectorType = connector; + newDev->guid = device.peerGuid; + newDev->peerDevice = device.peerDevice; + newDev->portMap = device.portMap; + newDev->dpcdRevisionMajor = device.dpcdRevisionMajor; + newDev->dpcdRevisionMinor = device.dpcdRevisionMinor; + newDev->complianceDeviceEdidReadTest = isCompliance; + newDev->maxTmdsClkRate = maxTmdsClkRate; + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + newDev->address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_INFO(NEW_SINK_DETECTED, newDev->address.size(), addrBuffer[0], addrBuffer[1], addrBuffer[2], addrBuffer[3], + newDev->multistream, newDev->rawEDID.getManufId(), newDev->rawEDID.getProductId()); + + // Apply any DPCD overrides if required + newDev->dpcdOverrides(); + + // + // Some 4K eDP panel needs HBR2 to support higher modes, Highest assessed LC + // remains in a stale state after applying DPCD overrides here. So we need to + // assess the link again. + // + if (newDev->isOptimalLinkConfigOverridden()) + { + this->assessLink(); + } + + // Panel has issues with LQA, reassess link + if (processedEdid.WARFlags.reassessMaxLink) + { + // + // If the highest assessed LC is not equal to max possible link config and + // panel is branch device which GPU is link training, re-assess link + // + int retries = 0; + + while((retries < WAR_MAX_REASSESS_ATTEMPT) && (highestAssessedLC != getMaxLinkConfig())) + { + DP_LOG(("DP> Assessed link is not equal to highest possible config. Reassess link.")); + this->assessLink(); + retries++; + } + } + + // Postpone the remote HDCPCap read for Dongles + DP_ASSERT(!isLinkInD3() && "Hdcp probe at D3"); + if (device.peerDevice != Dongle) + { + DP_ASSERT(newDev->isDeviceHDCPDetectionAlive == false); + if ((newDev->deviceHDCPDetection = new DeviceHDCPDetection(newDev, messageManager, timer))) + { + // + // We cannot move the hdcpDetection after stream added because DD + // needs hdcp Cap before stream added. + // + newDev->isDeviceHDCPDetectionAlive = true; + newDev->deviceHDCPDetection->start(); + } + else + { + // For the risk control, make the device as not HDCPCap. + DP_ASSERT(0 && "new failed"); + newDev->isDeviceHDCPDetectionAlive = false; + newDev->isHDCPCap = False; + + if (!newDev->isMultistream()) + newDev->shadow.hdcpCapDone = true; + } + } + + newDev->vrrEnablement = new VrrEnablement(newDev); + if (!newDev->vrrEnablement) + { + DP_ASSERT(0 && "new VrrEnablement failed"); + } + + BInfo bInfo; + if ((!isHopLimitExceeded) && (hal->getBinfo(bInfo))) + { + if (bInfo.maxCascadeExceeded || bInfo.maxDevsExceeded) + { + if (isHDCPAuthOn) + { + isHDCPAuthOn = false; + } + isHopLimitExceeded = true; + } + else + isHopLimitExceeded = false; + } + + // + // If the device is a faked Mux device, then we just initizlied it. + // Reset its faked status and skip adding it to the deviceList + // + if (newDev->isFakedMuxDevice()) + { + newDev->bIsFakedMuxDevice = false; + newDev->bIsPreviouslyFakedMuxDevice = true; + } + else + { + deviceList.insertBack(newDev); + } + + // if a new device has replaced a previous compliance device; let this event be exposed to DD now. + // ie : the old device will be zombied/lost now ... lazily(instead of at an unplug which happened a while back.) + if (existingDev && existingDev->complianceDeviceEdidReadTest) + existingDev->lazyExitNow = true; + + // Read panel DSC support only if GPU supports DSC + bool bGpuDscSupported; + main->getDscCaps(&bGpuDscSupported); + if (bGpuDscSupported && newDev->getDSCSupport()) + { + // Read and parse DSC caps only if panel supports DSC + newDev->readAndParseDSCCaps(); + } + + // Decide if DSC stream can be sent to new device + newDev->bDSCPossible = false; + newDev->devDoingDscDecompression = NULL; + if (bGpuDscSupported && !processedEdid.WARFlags.bIgnoreDscCap) + { + if (newDev->multistream) + { + if ((newDev->peerDevice == Dongle) && + (newDev->dpcdRevisionMajor != 0) && + !bDscCapBasedOnParent) + { + // For Peer Type 4 device with LAM DPCD rev != 0.0, check only the device's own DSC capability. + if (newDev->isDSCSupported()) + { + newDev->bDSCPossible = true; + newDev->devDoingDscDecompression = newDev; + } + } + else + { + if (this->bDscMstEnablePassThrough) + { + // + // Check the device's own and its parent's DSC capability. + // - Sink device will do DSC cecompression when + // 1. Sink device is capable of DSC decompression and parent + // supports DSC pass through. + // + // - Sink device's parent will do DSC decompression + // 1. If sink device supports DSC decompression but it's parent does not support + // DSC Pass through, but supports DSC decompression. + // 2. If the device does not support DSC decompression, but parent supports it. + // + if (newDev->isDSCSupported()) + { + if (newDev->videoSink && newDev->parent) + { + if (newDev->parent->isDSCPassThroughSupported()) + { + // + // This condition takes care of DSC capable sink devices + // connected behind a DSC Pass through capable branch + // + newDev->devDoingDscDecompression = newDev; + newDev->bDSCPossible = true; + } + else if (newDev->parent->isDSCSupported()) + { + // + // This condition takes care of DSC capable sink devices + // connected behind a branch device that is not capable + // of DSC pass through but can do DSC decompression. + // + newDev->bDSCPossible = true; + newDev->devDoingDscDecompression = newDev->parent; + } + } + else + { + // This condition takes care of branch device capable of DSC. + newDev->devDoingDscDecompression = newDev; + newDev->bDSCPossible = true; + } + } + else if (newDev->parent && newDev->parent->isDSCSupported()) + { + // + // This condition takes care of sink devices not capable of DSC + // but parent is capable of DSC decompression. + // + newDev->bDSCPossible = true; + newDev->devDoingDscDecompression = newDev->parent; + } + } + else + { + // + // Revert to old code if DSC Pass through support is not requested. + // This code will be deleted once DSC Pass through support will be enabled + // by default which will be done when 2Head1OR MST (GR-133) will be in production. + // + // Check the device's own and its parent's DSC capability. Parent of the device can do + // DSC decompression and send uncompressed stream to downstream device + // + if (newDev->isDSCSupported() || (newDev->parent && newDev->parent->isDSCSupported())) + { + newDev->bDSCPossible = true; + } + + // For multistream device, determine who will do the DSC decompression + if (newDev->bDSCPossible) + { + if(!newDev->isDSCSupported()) + { + newDev->devDoingDscDecompression = newDev->parent; + } + else + { + newDev->devDoingDscDecompression = newDev; + } + } + } + } + } + else + { + if (newDev->isDSCSupported()) + { + newDev->bDSCPossible = true; + newDev->devDoingDscDecompression = newDev; + } + } + } + + // Read panel replay capabilities + newDev->getPanelReplayCaps(); + + // Get Panel FEC support only if GPU supports FEC + if (this->isFECSupported()) + { + newDev->getFECSupport(); + } + + if (main->supportMSAOverMST()) + { + newDev->bMSAOverMSTCapable = newDev->getSDPExtnForColorimetrySupported(); + } + else + { + newDev->bMSAOverMSTCapable = false; + } + + fireEvents(); +} + +void ConnectorImpl::populateAllDpConfigs() +{ + LinkRate linkRate; + LinkRates *pConnLinkRates; + unsigned laneCounts[] = {laneCount_1, laneCount_2, laneCount_4}; + unsigned laneSets = sizeof(laneCounts) / sizeof(laneCounts[0]); + + // + // Following sequence is to be followed for saving power by default; + // It may vary with sinks which support link rate table. + // + // Link Config MBPS + // 1*RBR 162 + // 1*HBR 270 + // 2*RBR 324 + // 1*HBR2 540 + // 2*HBR 540 + // 4*RBR 648 + // 1*HBR3 810 + // ... + // + if (numPossibleLnkCfg) + { + DP_LOG(("DP> DPCONN> Rebuild possible link rate confgiurations")); + delete[] allPossibleLinkCfgs; + numPossibleLnkCfg = 0; + } + + // Attempt to configure link rate table mode if supported + pConnLinkRates = linkPolicy.getLinkRates(); + if (hal->isIndexedLinkrateCapable() && + main->configureLinkRateTable(hal->getLinkRateTable(), pConnLinkRates)) + { + // Maximal link rate is limited with link rate table + hal->overrideOptimalLinkRate(pConnLinkRates->getMaxRate()); + hal->setIndexedLinkrateEnabled(true); + } + else + { + // Reset configured link rate table if ever enabled to get RM act right + if (hal->isIndexedLinkrateEnabled()) + { + main->configureLinkRateTable(NULL, NULL); + hal->setIndexedLinkrateEnabled(false); + } + + // Get maximal link rate supported by GPU + linkRate = main->maxLinkRateSupported(); + + // Insert by order + pConnLinkRates->clear(); + if (linkRate >= RBR) + pConnLinkRates->import(linkBW_1_62Gbps); + + if (linkRate >= HBR) + pConnLinkRates->import(linkBW_2_70Gbps); + + if (linkRate >= HBR2) + pConnLinkRates->import(linkBW_5_40Gbps); + + if (linkRate >= HBR3) + pConnLinkRates->import(linkBW_8_10Gbps); + } + + numPossibleLnkCfg = laneSets * pConnLinkRates->getNumLinkRates(); + if (numPossibleLnkCfg == 0) + { + DP_LOG(("DPCONN> %s: lane count %d or link rates %d!", + pConnLinkRates->getNumLinkRates(), laneSets, __FUNCTION__)); + DP_ASSERT(0 && "Invalid lane count %d or link rates %d!"); + return; + } + + allPossibleLinkCfgs = new LinkConfiguration[numPossibleLnkCfg](); + + if (allPossibleLinkCfgs == NULL) + { + DP_LOG(("DPCONN> %s: Failed to allocate allPossibleLinkCfgs array", + __FUNCTION__)); + numPossibleLnkCfg = 0; + return; + } + + // Populate all possible link configuration + linkRate = pConnLinkRates->getMaxRate(); + for (unsigned i = 0; i < pConnLinkRates->getNumLinkRates(); i++) + { + for (unsigned j = 0; j < laneSets; j++) + { + allPossibleLinkCfgs[i * laneSets + j].setLaneRate(linkRate, laneCounts[j]); + } + linkRate = pConnLinkRates->getLowerRate(linkRate); + } + + // Sort link configurations per bandwidth from low to high + for (unsigned i = 0; i < numPossibleLnkCfg - 1; i++) + { + LinkConfiguration *pLowCfg = &allPossibleLinkCfgs[i]; + for (unsigned j = i + 1; j < numPossibleLnkCfg; j++) + { + if (allPossibleLinkCfgs[j] < *pLowCfg) + pLowCfg = &allPossibleLinkCfgs[j]; + } + // Swap + if (pLowCfg != &allPossibleLinkCfgs[i]) + { + LinkRate swapRate = pLowCfg->peakRate; + unsigned swapLanes = pLowCfg->lanes; + pLowCfg->setLaneRate(allPossibleLinkCfgs[i].peakRate, + allPossibleLinkCfgs[i].lanes); + allPossibleLinkCfgs[i].setLaneRate(swapRate, swapLanes); + } + } +} + +void ConnectorImpl::discoveryLostDevice(const Address & address) +{ + DeviceImpl * existingDev = findDeviceInList(address); + + if (!existingDev) + { + DP_ASSERT(0 && "Device lost on device not in database?!"); + return; + } + + existingDev->plugged = false; + existingDev->devDoingDscDecompression = NULL; + fireEvents(); +} + +ConnectorImpl::~ConnectorImpl() +{ + if (numPossibleLnkCfg) + delete[] allPossibleLinkCfgs; + + timer->cancelCallbacks(this); + delete discoveryManager; + pendingEdidReads.clear(); + delete messageManager; + delete hal; +} + +// +// Clear all the state associated with the head attachment +// +void ConnectorImpl::hardwareWasReset() +{ + activeLinkConfig.lanes = 0; + + while (!activeGroups.isEmpty()) + { + GroupImpl * g = (GroupImpl *)activeGroups.front(); + activeGroups.remove(g); + inactiveGroups.insertBack(g); + + g->setHeadAttached(false); + } +} + +Group * ConnectorImpl::resume(bool firmwareLinkHandsOff, + bool firmwareDPActive, + bool plugged, + bool isUefiSystem, + unsigned firmwareHead, + bool bFirmwareLinkUseMultistream, + bool bDisableVbiosScratchRegisterUpdate, + bool bAllowMST) +{ + Group * result = 0; + hardwareWasReset(); + previousPlugged = false; + connectorActive = true; + bIsUefiSystem = isUefiSystem; + + this->bDisableVbiosScratchRegisterUpdate = bDisableVbiosScratchRegisterUpdate; + + bFromResumeToNAB = true; + + if (firmwareLinkHandsOff) + { + isLinkQuiesced = true; + } + else if (firmwareDPActive) + { + DP_LOG(("CONN> Detected firmware panel is active on head %d.", firmwareHead)); + ((GroupImpl *)firmwareGroup)->setHeadAttached(true); + ((GroupImpl *)firmwareGroup)->headIndex = firmwareHead; + ((GroupImpl *)firmwareGroup)->streamIndex = 1; + ((GroupImpl *)firmwareGroup)->headInFirmware = true; + + this->linkState = bFirmwareLinkUseMultistream ? DP_TRANSPORT_MODE_MULTI_STREAM : DP_TRANSPORT_MODE_SINGLE_STREAM; + + inactiveGroups.remove((GroupImpl *)firmwareGroup); + activeGroups.remove((GroupImpl *)firmwareGroup); + activeGroups.insertBack((GroupImpl *)firmwareGroup); + + result = firmwareGroup; + } + + hal->overrideMultiStreamCap(bAllowMST); + + // + // In resume code path, all devices on this connector gets lost and deleted on first fireEvents() + // and that could generate unnecessary new/lost device events. Therefore defer to lost devices + // until discovery detect gets completed, this allows processNewDevice() function to look + // at matching existing devices and optimize creation of new devices. We only have to set the flag + // to true when plugged = true, since if disconnected, we are not going to defer anything. + // + bDeferNotifyLostDevice = plugged; + bAttachOnResume = true; + notifyLongPulse(plugged); + bAttachOnResume = false; + + return result; +} + + +void ConnectorImpl::pause() +{ + connectorActive = false; + if (messageManager) + { + messageManager->pause(); + } +} + +// Query current Device topology +Device * ConnectorImpl::enumDevices(Device * previousDevice) +{ + if (previousDevice) + previousDevice = (DeviceImpl *)((DeviceImpl*)previousDevice)->next; + else + previousDevice = (DeviceImpl *)deviceList.begin(); + + if ((DeviceImpl*)previousDevice == deviceList.end()) + return 0; + else + return (DeviceImpl *)previousDevice; +} + +LinkConfiguration ConnectorImpl::getMaxLinkConfig() +{ + NvU64 maxLinkRate; + + DP_ASSERT(hal); + + if (main->isEDP()) + { + // Regkey is supported on eDP panels only + maxLinkRate = maxLinkRateFromRegkey; + // Check if valid value is present in regkey + if (maxLinkRate && (IS_VALID_LINKBW(maxLinkRate))) + { + maxLinkRate = maxLinkRate * DP_LINK_BW_FREQ_MULTI_MBPS; + } + else + { + maxLinkRate = hal->getMaxLinkRate(); + } + } + else + { + maxLinkRate = hal->getMaxLinkRate(); + } + + LinkRate linkRate = maxLinkRate ? + DP_MIN(maxLinkRate, main->maxLinkRateSupported()) : + main->maxLinkRateSupported(); + + unsigned laneCount = hal->getMaxLaneCount() ? + DP_MIN(hal->getMaxLaneCountSupportedAtLinkRate(linkRate), hal->getMaxLaneCount()) : + 4; + + return LinkConfiguration (&this->linkPolicy, + laneCount, linkRate, + this->hal->getEnhancedFraming(), + linkUseMultistream(), + false, /* disablePostLTRequest */ + this->bFECEnable); +} + +LinkConfiguration ConnectorImpl::getActiveLinkConfig() +{ + DP_ASSERT(hal); + + return activeLinkConfig; +} + +void ConnectorImpl::beginCompoundQuery() +{ + if (linkGuessed && (main->getSorIndex() != DP_INVALID_SOR_INDEX)) + { + assessLink(); + } + + DP_ASSERT( !compoundQueryActive && "Previous compoundQuery was not ended."); + compoundQueryActive = true; + compoundQueryCount = 0; + compoundQueryResult = true; + compoundQueryLocalLinkPBN = 0; + + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + + if (i->getTopologyAddress().size() <= 1) + { + dev->bandwidth.lastHopLinkConfig = highestAssessedLC; + dev->bandwidth.compound_query_state.totalTimeSlots = 63; + dev->bandwidth.compound_query_state.timeslots_used_by_query = 0; + continue; + } + + if (!this->linkUseMultistream()) + continue; + + // Initialize starting conditions + // + // Note: this compound query code assumes that the total bandwidth is + // available for the configuration being queried. This ignores the + // concentrator case where some bandwidth may be in use by streams not + // controlled by this driver instance. Concentrators are currently not + // supported. + dev->bandwidth.compound_query_state.timeslots_used_by_query = 0; + dev->inferLeafLink(&dev->bandwidth.compound_query_state.totalTimeSlots); + + // + // Some VBIOS leave the branch in stale state and allocatePayload request queued + // at branch end gets processed much later causing the FreePBN returned to be stale. + // Clear the PBN in case EPR reports 0 free PBN when we have not explicitly requested + // for it, to clear up any previous stale allocations + // + if (dev->bandwidth.compound_query_state.totalTimeSlots == 0 && + !dev->payloadAllocated && dev->plugged) + { + GroupImpl *group = dev->activeGroup; + if (group != NULL) + { + NakData nakData; + Address devAddress = dev->getTopologyAddress(); + + AllocatePayloadMessage allocate; + unsigned sink = 0; // hardcode the audio sink to 0th in the device. + allocate.set(devAddress.parent(), devAddress.tail(), + dev->isAudioSink() ? 1 : 0, group->streamIndex, 0, &sink, true); + + ((DeviceImpl *)dev)->bandwidth.enum_path.dataValid = false; + + if (group->parent->messageManager->send(&allocate, nakData)) + dev->inferLeafLink(&dev->bandwidth.compound_query_state.totalTimeSlots); + } + } + + // Clear assement state + dev->bandwidth.compound_query_state.bandwidthAllocatedForIndex = 0; + } +} + +// +// This call will be deprecated as soon as all clients move to the new API +// +bool ConnectorImpl::compoundQueryAttach(Group * target, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) +{ + ModesetInfo modesetInfo(twoChannelAudioHz, eightChannelAudioHz, pixelClockHz, + rasterWidth, rasterHeight, (rasterBlankStartX - rasterBlankEndX), + 0/*surfaceHeight*/, depth, rasterBlankStartX, rasterBlankEndX); + + DpModesetParams modesetParams(0, modesetInfo); + return compoundQueryAttach(target, modesetParams); +} + +bool ConnectorImpl::compoundQueryAttach(Group * target, + const DpModesetParams &modesetParams, // Modeset info + DscParams *pDscParams) // DSC parameters +{ + DP_ASSERT( compoundQueryActive ); + ModesetInfo localModesetInfo = modesetParams.modesetInfo; + + compoundQueryCount++; + + if (!modesetParams.modesetInfo.depth || !modesetParams.modesetInfo.pixelClockHz) + { + DP_ASSERT(!"DP-CONN> Params with zero value passed to query!"); + compoundQueryResult = false; + return false; + } + + // + // Bug 925211: In some case we need to clamp the supporting frequencies to <= 48KHz. + // Check if audio frequency is greater than 48Khz & is not overridden by regkey + // "ENABLE_AUDIO_BEYOND48K" simply return false. + // + if (((modesetParams.modesetInfo.twoChannelAudioHz > WAR_AUDIOCLAMPING_FREQ) + || (modesetParams.modesetInfo.eightChannelAudioHz > WAR_AUDIOCLAMPING_FREQ)) + && !(bEnableAudioBeyond48K)) + { + compoundQueryResult = false; + return false; + } + + bool bGpuDscSupported; + main->getDscCaps(&bGpuDscSupported); + + if (linkUseMultistream()) + { + LinkConfiguration lc; + if (this->preferredLinkConfig.isValid()) + lc = preferredLinkConfig; + else + lc = highestAssessedLC; + + if (pDscParams && (pDscParams->forceDsc != DSC_FORCE_DISABLE)) + { + bool bFecCapable = false; + + Device * newDev = target->enumDevices(0); + DeviceImpl * dev = (DeviceImpl *)newDev; + + if (dev && dev->isDSCPossible()) + { + if (dev->devDoingDscDecompression != dev) + { + // + // If DSC decoding is going to happen at sink's parent then + // we have to make sure the path from source to sink's parent + // is fec is capable. + // Refer DP 1.4 Spec 5.4.5 + // + if(dev->address.size() == 2) + { + // + // if there is only one branch between source and sink then branch + // should be directly connected to source (sst-case) and dpcd cap + // should already be available. + // + bFecCapable = dev->parent->isFECSupported(); + } + else + { + // + // If there are multiple branches in the path, we have to check + // fecCapability field in epr reply to sink's parent's parent. + // Epr reply for each branch should already be updated with inferLeafLink. + // fecCapability field being true here means up to sink's parent, + // which is "downstream end of path" for sink's parent's parent, + // is fec capable. + // Refer DP 1.4 Spec 2.11.9.4.1 + // + bFecCapable = dev->parent->parent->isFECSupported(); + } + } + else + { + bFecCapable = dev->isFECSupported(); + } + } + + // Make sure panel/it's parent & GPU supports DSC and the whole path supports FEC + if (bGpuDscSupported && // If GPU supports DSC + this->isFECSupported() && // If GPU supports FEC + pDscParams && // If client sent DSC info + pDscParams->bCheckWithDsc && // If client wants to check with DSC + (dev && dev->isDSCPossible()) && // Either device or it's parent supports DSC + bFecCapable && // If path up to dsc decoding device supports FEC + (modesetParams.modesetInfo.bitsPerComponent != 6)) // DSC doesn't support bpc = 6 + { + DSC_INFO dscInfo; + MODESET_INFO modesetInfoDSC; + WAR_DATA warData; + NvU64 availableBandwidthBitsPerSecond = 0; + unsigned PPS[DSC_MAX_PPS_SIZE_DWORD]; + unsigned bitsPerPixelX16 = 0; + + if (!pDscParams->bitsPerPixelX16) + { + // + // For now, we will keep a pre defined value for bitsPerPixel for MST = 10 + // bitsPerPixelX16 = 160 + // + pDscParams->bitsPerPixelX16 = PREDEFINED_DSC_MST_BPPX16; + } + + bitsPerPixelX16 = pDscParams->bitsPerPixelX16; + + if (!this->preferredLinkConfig.isValid()) + { + lc.enableFEC(true); + } + + dpMemZero(PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + dpMemZero(&dscInfo, sizeof(DSC_INFO)); + + // Populate DSC related info for PPS calculations + populateDscCaps(&dscInfo, dev->devDoingDscDecompression, pDscParams->forcedParams); + + // populate modeset related info for PPS calculations + populateDscModesetInfo(&modesetInfoDSC, &modesetParams); + + // checking for DSC v1.1 and YUV combination + if ( (dscInfo.sinkCaps.algorithmRevision.versionMajor == 1) && + (dscInfo.sinkCaps.algorithmRevision.versionMinor == 1) && + (modesetParams.colorFormat == dpColorFormat_YCbCr444 )) + { + DP_LOG(("WARNING: DSC v1.2 or higher is recommended for using YUV444")); + DP_LOG(("Current version is 1.1")); + } + + availableBandwidthBitsPerSecond = lc.minRate * 8 * lc.lanes; + + warData.dpData.linkRateHz = lc.peakRate; + warData.dpData.laneCount = lc.lanes; + warData.dpData.dpMode = DSC_DP_MST; + warData.dpData.hBlank = modesetParams.modesetInfo.rasterWidth - modesetParams.modesetInfo.surfaceWidth; + warData.connectorType = DSC_DP; + + if ((DSC_GeneratePPS(&dscInfo, &modesetInfoDSC, + &warData, availableBandwidthBitsPerSecond, + (NvU32*)(PPS), + (NvU32*)(&bitsPerPixelX16))) != NVT_STATUS_SUCCESS) + { + if (pDscParams->forceDsc == DSC_FORCE_ENABLE) + { + // If DSC is force enabled then return failure here + compoundQueryResult = false; + pDscParams->bEnableDsc = false; + return false; + } + else + { + // If PPS calculation failed then try without DSC + pDscParams->bEnableDsc = false; + lc.enableFEC(false); + goto nonDscDpIMP; + } + } + else + { + pDscParams->bEnableDsc = true; + compoundQueryResult = true; + localModesetInfo.bEnableDsc = true; + localModesetInfo.depth = bitsPerPixelX16; + + if (dev->devDoingDscDecompression != dev) + { + // + // Device's parent is doing DSC decompression so we need to check + // if device's parent can send uncompressed stream to Sink. + // + unsigned mode_pbn; + + mode_pbn = pbnForMode(modesetParams.modesetInfo); + + // + // As Device's Parent is doing DSC decompression, this is leaf device and + // complete available bandwidth at this node is available for requested mode. + // + if (mode_pbn > dev->bandwidth.enum_path.total) + { + compoundQueryResult = false; + pDscParams->bEnableDsc = false; + return false; + } + } + + if (pDscParams->pDscOutParams != NULL) + { + // + // If requested then DP Library is supposed to return if mode is + // possible with DSC and calculated PPS and bits per pixel. + // + dpMemCopy(pDscParams->pDscOutParams->PPS, PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + pDscParams->bitsPerPixelX16 = bitsPerPixelX16; + } + else + { + // + // Client only wants to know if mode is possible or not but doesn't + // need all calculated PPS parameters in case DSC is required. Do nothing. + // + } + } + } + } + +nonDscDpIMP: + // I. Evaluate use of local link bandwidth + + // Calculate the PBN required + unsigned base_pbn, slots, slots_pbn; + lc.pbnRequired(localModesetInfo, base_pbn, slots, slots_pbn); + + // Accumulate the amount of PBN rounded up to nearest timeslot + compoundQueryLocalLinkPBN += slots_pbn; + if (compoundQueryLocalLinkPBN > lc.pbnTotal()) + compoundQueryResult = false; + + // Verify the min blanking, etc + Watermark dpinfo; + + if (this->isFECSupported()) + { + if (!isModePossibleMSTWithFEC(lc, localModesetInfo, &dpinfo)) + { + compoundQueryResult = false; + } + } + else + { + if (!isModePossibleMST(lc, localModesetInfo, &dpinfo)) + { + compoundQueryResult = false; + } + } + + for(Device * d = target->enumDevices(0); d; d = target->enumDevices(d)) + { + DeviceImpl * i = (DeviceImpl *)d; + + // Allocate bandwidth for the entire path to the root + // NOTE: Above we're already handle the local link + DeviceImpl * tail = i; + while (tail && tail->getParent()) + { + // Have we already accounted for this stream? + if (!(tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex & (1 << compoundQueryCount))) + { + tail->bandwidth.compound_query_state.bandwidthAllocatedForIndex |= (1 << compoundQueryCount); + + LinkConfiguration * linkConfig = tail->inferLeafLink(NULL); + tail->bandwidth.compound_query_state.timeslots_used_by_query += linkConfig->slotsForPBN(base_pbn); + + if ( tail->bandwidth.compound_query_state.timeslots_used_by_query > tail->bandwidth.compound_query_state.totalTimeSlots) + compoundQueryResult = false; + } + tail = (DeviceImpl*)tail->getParent(); + } + } + } + else // SingleStream case + { + DeviceImpl * nativeDev = findDeviceInList(Address()); + + if (compoundQueryCount != 1) + { + compoundQueryResult = false; + return false; + } + + if (nativeDev && (nativeDev->connectorType == connectorHDMI)) + { + if (modesetParams.colorFormat == dpColorFormat_YCbCr420) + { + if ((nativeDev->maxTmdsClkRate) && + (nativeDev->maxTmdsClkRate < + ((modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth /24)/2))) + { + compoundQueryResult = false; + return false; + } + } + else + { + if ((nativeDev->maxTmdsClkRate) && + (nativeDev->maxTmdsClkRate < + (modesetParams.modesetInfo.pixelClockHz * modesetParams.modesetInfo.depth /24))) + { + compoundQueryResult = false; + return false; + } + } + } + + LinkConfiguration lc = highestAssessedLC; + + // check if there is a special request from the client + if (this->preferredLinkConfig.isValid()) + { + lc = preferredLinkConfig; + } + else + { + // + // Always check for DP IMP without FEC overhead first before + // trying with DSC/FEC + // + lc.enableFEC(false); + } + + // If do not found valid native device the force lagacy DP IMP + if (!nativeDev) + { + compoundQueryResult = this->willLinkSupportModeSST(lc, modesetParams.modesetInfo); + } + else if ((pDscParams && (pDscParams->forceDsc == DSC_FORCE_ENABLE)) || // DD has forced DSC Enable + (modesetParams.modesetInfo.mode == DSC_DUAL) || // DD decided to use 2 Head 1 OR mode + (!this->willLinkSupportModeSST(lc, modesetParams.modesetInfo))) // Mode is not possible without DSC + { + // If DP IMP fails without DSC or client requested to force DSC + if (pDscParams && pDscParams->forceDsc != DSC_FORCE_DISABLE) + { + // Check if panel and GPU both supports DSC or not. Also check if panel supports FEC + if (bGpuDscSupported && // if GPU supports DSC + this->isFECSupported() && // If GPU supports FEC + pDscParams && // if client sent DSC info + pDscParams->bCheckWithDsc && // if client wants to check with DSC + nativeDev->isDSCPossible() && // if device supports DSC decompression + (nativeDev->isFECSupported() || main->isEDP()) && // if device supports FEC decoding or is an DSC capable eDP panel which doesn't support FEC + (modesetParams.modesetInfo.bitsPerComponent != 6)) // DSC doesn't support bpc = 6 + { + DSC_INFO dscInfo; + MODESET_INFO modesetInfoDSC; + WAR_DATA warData; + NvU64 availableBandwidthBitsPerSecond = 0; + unsigned PPS[DSC_MAX_PPS_SIZE_DWORD]; + unsigned bitsPerPixelX16 = pDscParams->bitsPerPixelX16; + + if (!this->preferredLinkConfig.isValid() && nativeDev->isFECSupported()) + { + lc.enableFEC(true); + } + + dpMemZero(PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + dpMemZero(&dscInfo, sizeof(DSC_INFO)); + + // Populate DSC related info for PPS calculations + populateDscCaps(&dscInfo, nativeDev->devDoingDscDecompression, pDscParams->forcedParams); + + // Populate modeset related info for PPS calculations + populateDscModesetInfo(&modesetInfoDSC, &modesetParams); + + // checking for DSC v1.1 and YUV combination + if ( (dscInfo.sinkCaps.algorithmRevision.versionMajor == 1) && + (dscInfo.sinkCaps.algorithmRevision.versionMinor == 1) && + (modesetParams.colorFormat == dpColorFormat_YCbCr444 )) + { + DP_LOG(("WARNING: DSC v1.2 or higher is recommended for using YUV444")); + DP_LOG(("Current version is 1.1")); + } + + availableBandwidthBitsPerSecond = lc.minRate * 8 * lc.lanes; + + warData.dpData.linkRateHz = lc.peakRate; + warData.dpData.laneCount = lc.lanes; + warData.dpData.hBlank = modesetParams.modesetInfo.rasterWidth - modesetParams.modesetInfo.surfaceWidth; + warData.dpData.dpMode = DSC_DP_SST; + warData.connectorType = DSC_DP; + + if ((DSC_GeneratePPS(&dscInfo, &modesetInfoDSC, + &warData, availableBandwidthBitsPerSecond, + (NvU32*)(PPS), + (NvU32*)(&bitsPerPixelX16))) != NVT_STATUS_SUCCESS) + { + compoundQueryResult = false; + pDscParams->bEnableDsc = false; + } + else + { + localModesetInfo.bEnableDsc = true; + localModesetInfo.depth = bitsPerPixelX16; + LinkConfiguration lowestSelected; + bool bIsModeSupported = false; + + + if (this->preferredLinkConfig.isValid()) + { + // Check if mode is possible with preferred link config + bIsModeSupported = willLinkSupportModeSST(lc, localModesetInfo); + } + else + { + // + // Check if mode is possible with calculated bits_per_pixel. + // Check with all possible link configs and not just highest + // assessed because with DSC, mode can fail with higher + // link config and pass for lower one. This is because + // if raster parameters are really small and DP bandwidth is + // very high then we may end up with some TU with 0 active + // symbols in SST. This may cause HW hang and so DP IMP rejects + // this mode. Refer Bug 200379426. + // + bIsModeSupported = getValidLowestLinkConfig(lc, lowestSelected, localModesetInfo); + } + + if (!bIsModeSupported) + { + pDscParams->bEnableDsc = false; + compoundQueryResult = false; + } + else + { + pDscParams->bEnableDsc = true; + compoundQueryResult = true; + + if (pDscParams->pDscOutParams != NULL) + { + // + // If requested then DP Library is supposed to return if mode is + // possible with DSC and calculated PPS and bits per pixel. + // + dpMemCopy(pDscParams->pDscOutParams->PPS, PPS, sizeof(unsigned) * DSC_MAX_PPS_SIZE_DWORD); + pDscParams->bitsPerPixelX16 = bitsPerPixelX16; + } + else + { + // + // Client only wants to know if mode is possible or not but doesn't + // need all calculated PPS parameters in case DSC is required. Do nothing. + // + } + } + } + } + else + { + // Either GPU or Sink doesn't support DSC + compoundQueryResult = false; + } + } + else + { + // Client hasn't sent DSC params info or has asked to force disable DSC. + compoundQueryResult = false; + } + } + else + { + // Mode was successful + compoundQueryResult = true; + } + } + + return compoundQueryResult; +} +void ConnectorImpl::populateDscModesetInfo(MODESET_INFO* pModesetInfo, const DpModesetParams* pModesetParams) +{ + pModesetInfo->pixelClockHz = pModesetParams->modesetInfo.pixelClockHz; + pModesetInfo->activeWidth = pModesetParams->modesetInfo.surfaceWidth; + pModesetInfo->activeHeight = pModesetParams->modesetInfo.surfaceHeight; + pModesetInfo->bitsPerComponent = pModesetParams->modesetInfo.bitsPerComponent; + + if (pModesetParams->colorFormat == dpColorFormat_RGB) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_RGB; + } + else if (pModesetParams->colorFormat == dpColorFormat_YCbCr444) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_YCbCr444; + } + else if (pModesetParams->colorFormat == dpColorFormat_YCbCr422) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_YCbCr422; + } + else if (pModesetParams->colorFormat == dpColorFormat_YCbCr420) + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_YCbCr420; + } + else + { + pModesetInfo->colorFormat = NVT_COLOR_FORMAT_RGB; + } + + if (pModesetParams->modesetInfo.mode == DSC_DUAL) + { + pModesetInfo->bDualMode = true; + } + else + { + pModesetInfo->bDualMode = false; + } + + if (pModesetParams->modesetInfo.mode == DSC_DROP) + { + pModesetInfo->bDropMode = true; + } + else + { + pModesetInfo->bDropMode = false; + } +} + +void ConnectorImpl::populateDscGpuCaps(DSC_INFO* dscInfo) +{ + unsigned encoderColorFormatMask; + unsigned lineBufferSizeKB; + unsigned rateBufferSizeKB; + unsigned bitsPerPixelPrecision; + unsigned maxNumHztSlices; + unsigned lineBufferBitDepth; + + // Get GPU DSC capabilities + main->getDscCaps(NULL, + &encoderColorFormatMask, + &lineBufferSizeKB, + &rateBufferSizeKB, + &bitsPerPixelPrecision, + &maxNumHztSlices, + &lineBufferBitDepth); + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_RGB; + } + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444; + } + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422; + } + + if (encoderColorFormatMask & NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) + { + dscInfo->gpuCaps.encoderColorFormatMask |= DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420; + } + + dscInfo->gpuCaps.lineBufferSize = lineBufferSizeKB; + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_8; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_4; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_2; + } + + if (bitsPerPixelPrecision == NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1) + { + dscInfo->gpuCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1; + } + + dscInfo->gpuCaps.maxNumHztSlices = maxNumHztSlices; + + dscInfo->gpuCaps.lineBufferBitDepth = lineBufferBitDepth; +} + +void ConnectorImpl::populateDscSinkCaps(DSC_INFO* dscInfo, DeviceImpl * dev) +{ + // Early return if dscInfo or dev is NULL + if ((dscInfo == NULL) || (dev == NULL)) + { + return; + } + + if (dev->dscCaps.dscDecoderColorFormatCaps.bRgb) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_RGB; + } + + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCr444) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_444; + } + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422; + } + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422; + } + if (dev->dscCaps.dscDecoderColorFormatCaps.bYCbCrNative420) + { + dscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1_16) + { + dscInfo->sinkCaps.bitsPerPixelPrecision |= DSC_BITS_PER_PIXEL_PRECISION_1_16; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1_16) + { + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1_8) + { + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_8; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1_4) + { + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_4; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1_2) + { + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_2; + } + + if (dev->dscCaps.dscBitsPerPixelIncrement == BITS_PER_PIXEL_PRECISION_1) + { + dscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1; + } + + // Decoder color depth mask + if (dev->dscCaps.dscDecoderColorDepthMask & DSC_BITS_PER_COLOR_MASK_12) + { + dscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS; + } + + if (dev->dscCaps.dscDecoderColorDepthMask & DSC_BITS_PER_COLOR_MASK_10) + { + dscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS; + } + + if (dev->dscCaps.dscDecoderColorDepthMask & DSC_BITS_PER_COLOR_MASK_8) + { + dscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS; + } + + dscInfo->sinkCaps.maxSliceWidth = dev->dscCaps.dscMaxSliceWidth; + dscInfo->sinkCaps.sliceCountSupportedMask = dev->dscCaps.sliceCountSupportedMask; + dscInfo->sinkCaps.maxNumHztSlices = dev->dscCaps.maxSlicesPerSink; + dscInfo->sinkCaps.lineBufferBitDepth = dev->dscCaps.lineBufferBitDepth; + dscInfo->sinkCaps.bBlockPrediction = dev->dscCaps.bDscBlockPredictionSupport; + dscInfo->sinkCaps.algorithmRevision.versionMajor = dev->dscCaps.versionMajor; + dscInfo->sinkCaps.algorithmRevision.versionMinor = dev->dscCaps.versionMinor; + dscInfo->sinkCaps.peakThroughputMode0 = dev->dscCaps.dscPeakThroughputMode0; + dscInfo->sinkCaps.peakThroughputMode1 = dev->dscCaps.dscPeakThroughputMode1; + dscInfo->sinkCaps.maxBitsPerPixelX16 = dev->dscCaps.maxBitsPerPixelX16; + + if (main->isEDP()) + { + // If eDP panel does not populate peak DSC throughput, use _MODE0_340. + if (!dscInfo->sinkCaps.peakThroughputMode0) + { + dscInfo->sinkCaps.peakThroughputMode0 = NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_340; + } + + // If eDP panel does not populate max slice width, use 2560. + if (!dscInfo->sinkCaps.maxSliceWidth) + { + dscInfo->sinkCaps.maxSliceWidth = 2560; + } + } +} + +void ConnectorImpl::populateForcedDscParams(DSC_INFO* dscInfo, DSC_INFO::FORCED_DSC_PARAMS * forcedParams) +{ + if(forcedParams) + { + dscInfo->forcedDscParams.sliceWidth = forcedParams->sliceWidth; + dscInfo->forcedDscParams.sliceHeight = forcedParams->sliceHeight; + dscInfo->forcedDscParams.sliceCount = forcedParams->sliceCount; + dscInfo->forcedDscParams.dscRevision = forcedParams->dscRevision; + } + +} + +void ConnectorImpl::populateDscCaps(DSC_INFO* dscInfo, DeviceImpl * dev, DSC_INFO::FORCED_DSC_PARAMS * forcedParams) +{ + // Sink DSC capabilities + populateDscSinkCaps(dscInfo, dev); + + // GPU DSC capabilities + populateDscGpuCaps(dscInfo); + + // Forced DSC params + populateForcedDscParams(dscInfo, forcedParams); +} + +bool ConnectorImpl::endCompoundQuery() +{ + DP_ASSERT( compoundQueryActive && "Spurious compoundQuery end."); + compoundQueryActive = false; + return compoundQueryResult; +} + +// +// Set link to HDMI mode +// +void ConnectorImpl::enableLinkHandsOff() +{ + if (isLinkQuiesced) + { + DP_ASSERT(0 && "Link is already quiesced."); + return; + } + + isLinkQuiesced = true; + + // Set the Lane Count to 0 to shut down the link. + powerdownLink(); +} + +// +// Restore from HDMI mode +// +void ConnectorImpl::releaseLinkHandsOff() +{ + if (!isLinkQuiesced) + { + DP_ASSERT(0 && "Link is already in use."); + return; + } + + isLinkQuiesced = false; + assessLink(); +} + +// +// Timer callback for event management +// Uses: fireEvents() +void ConnectorImpl::expired(const void * tag) +{ + if (tag == &tagFireEvents) + fireEventsInternal(); + else + DP_ASSERT(0); +} + +// Generate Events. +// useTimer specifies whether we fire the events on the timer +// context, or this context. +void ConnectorImpl::fireEvents() +{ + bool eventsPending = false; + + // Don't fire any events if we're not done with the modeset + if (!intransitionGroups.isEmpty()) + { + return; + } + + // Walk through the devices looking for state changes + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = e->next) + { + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingNewDevice() || + dev->isPendingLostDevice() || + dev->isPendingCableOk() || + dev->isPendingZombie() || + dev->isPendingHDCPCapDone()) + eventsPending = true; + } + + // If there were any queue an immediate callback to handle them + if (eventsPending || isDiscoveryDetectComplete) + { + // Queue the fireEventsInternal. + // It's critical we don't allow this to be processed in a sleep + // since DD may do a modeset in response + timer->queueCallback(this, &tagFireEvents, 0, false /* not allowed in sleep */); + } +} + +void ConnectorImpl::fireEventsInternal() +{ + ListElement * next; + Address::StringBuffer sb; + DP_USED(sb); + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = next) + { + next = e->next; + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingLostDevice()) + { + // + // For bug 2335599, where the connected monitor is switched to MST + // from SST after S3 resume, we need to disconnect SST monitor + // early before adding MST monitors. This will avoid client from + // mistaking the disconnection of SST monitor later as parent of + // MST monitors, which will wrongly disconnect MST monitors too. + // + if (!(!dev->multistream && linkUseMultistream()) && + bDeferNotifyLostDevice) + { + continue; + } + dev->shadow.plugged = false; + DP_LOG(("DPCONN> Lost device %s", dev->address.toString(sb))); + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + dev->address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_WARNING(LOST_DEVICE, dev->address.size(), addrBuffer[0], addrBuffer[1], + addrBuffer[2], addrBuffer[3]); + sink->lostDevice(dev); +#if defined(DEBUG) + // Assert that this device is not contained in any groups. + List* groupLists[] = { + &activeGroups, + &inactiveGroups + }; + + for (unsigned i = 0; i < sizeof(groupLists) / sizeof(groupLists[0]); i++) + { + List *groupList = groupLists[i]; + for (ListElement *e = groupList->begin(); e != groupList->end(); e = e->next) + { + GroupImpl *g = (GroupImpl *)e; + DP_ASSERT(!g->contains(dev)); + } + } +#endif + delete dev; + continue; + } + + if (dev->isPendingCableOk()) + { + dev->shadow.cableOk = dev->isCableOk(); + sink->notifyCableOkStateChange(dev, dev->shadow.cableOk); + } + + if (dev->isPendingZombie()) + { + dev->shadow.zombie = dev->isZombie(); + if (dev->complianceDeviceEdidReadTest) + { + // the zombie event will be hidden for DD/OS + DP_LOG(("DPCONN> Compliance: Device Internal Zombie? : %d 0x%x", dev->shadow.zombie ? 1 : 0, dev)); + return; + } + bMitigateZombie = false; + DP_LOG(("DPCONN> Zombie? : %d 0x%x", dev->shadow.zombie ? 1 : 0, dev)); + sink->notifyZombieStateChange(dev, dev->shadow.zombie); + } + + if (dev->isPendingHDCPCapDone()) + { + DP_ASSERT(dev->isHDCPCap != Indeterminate && "HDCPCap reading is not done!!"); + if (dev->isHDCPCap != Indeterminate) + { + // Notify RM about the new Bcaps.. + if (dev->isActive()) + { + RmDfpCache dfpCache = {0}; + dfpCache.updMask = 0; + dfpCache.bcaps = *dev->BCAPS; + for (unsigned i=0; iBKSV[i]; + + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BCAPS); + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BKSV); + dev->connector->main->rmUpdateDynamicDfpCache(dev->activeGroup->headIndex, &dfpCache, False); + } + + sink->notifyHDCPCapDone(dev, !!dev->isHDCPCap); + DP_LOG(("DPCONN> Notify HDCP cap Done : %x", !!dev->isHDCPCap)); + } + else + { + sink->notifyHDCPCapDone(dev, false); + } + + dev->shadow.hdcpCapDone = true; + } + + bool mustDisconnect = dev->isMustDisconnect(); + if (dev->shadow.mustDisconnect != mustDisconnect && mustDisconnect) + { + dev->shadow.mustDisconnect = mustDisconnect; + sink->notifyMustDisconnect(dev->activeGroup); + } + } + + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = next) + { + next = e->next; + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingNewDevice()) + { + if (bReportDeviceLostBeforeNew && bDeferNotifyLostDevice) + { + // Let's try to find if there's a device pending lost on the same address + DeviceImpl* _device = NULL; + for (ListElement * le = deviceList.begin(); le != deviceList.end(); le = le->next) + { + _device = (DeviceImpl*)le; + if ((_device->address == dev->address) && (_device->plugged != dev->plugged)) + break; + } + if (_device && + (_device->address == dev->address) && + (_device->plugged != dev->plugged)) + { + // If yes, then we need to report this lost device first. + _device->shadow.plugged = false; + DP_LOG(("DPCONN> Lost device 0x%x", _device)); + sink->lostDevice(_device); + DP_ASSERT(!_device->activeGroup && "DD didn't remove panel from group"); + delete _device; + } + } + dev->shadow.plugged = true; + if (dev->isDSCPossible()) + { + DP_LOG(("DPCONN> New device %s | Native DSC Capability - %s | DSC Decompression Device - %s", + dev->address.toString(sb), + (dev->isDSCSupported() ? "Capable" : "Not Capable"), + (dev->devDoingDscDecompression) ? dev->devDoingDscDecompression->address.toString(sb):"NA")); + } + else + { + DP_LOG(("DPCONN> New device %s", dev->address.toString(sb))); + } + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + dev->address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_INFO(NEW_SINK_REPORTED, dev->address.size(), addrBuffer[0], addrBuffer[1], + addrBuffer[2], addrBuffer[3]); + + sink->newDevice(dev); + } + } + + if (isDiscoveryDetectComplete) + { + // + // Bug 200236666 : + // isDiscoveryDetectComplete can be set when we process a new device after + // completing last edid read. In such scenario we will send notifyDetectComplete + // before newDevice for that sink has been sent to DD + // a/ sink->newDevice(dev) above can trigger the pending edid read + // b/ after last edid read completes (::mstEdidCompleted), ::processNewDevice + // will set the plugged flag for new device + // c/ this will queue pendingNewDevice event callback for the last device pending discovery + // d/ isDiscoveryDetectComplete flag set during b/ will trigger a + // premature notifyDetectComplete to DD before pendingNewDevice callback + // To fix above scenario : check if there is any newly pending new/lost device + // if yes, then defer sending notifyDetectComplete till next callback + // + bool bDeferNotifyDetectComplete = false; + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = next) + { + next = e->next; + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->isPendingNewDevice() || dev->isPendingLostDevice()) + { + bDeferNotifyDetectComplete = true; + DP_ASSERT(0 && "DP-CONN> Defer notifyDetectComplete as a new/lost device is pending!"); + break; + } + } + + if (!bDeferNotifyDetectComplete) + { + isDiscoveryDetectComplete = false; + DP_LOG(("DP-CONN> NotifyDetectComplete")); + sink->notifyDetectComplete(); + } + } + +} + +// +// This call will be deprecated as soon as all clients move to the new API +// +bool ConnectorImpl::isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) +{ + ModesetInfo modesetInfo = ModesetInfo(twoChannelAudioHz, eightChannelAudioHz, pixelClockHz, + rasterWidth, rasterHeight, (rasterBlankStartX - rasterBlankEndX), 0 /*surfaceHeight*/, + depth, rasterBlankStartX, rasterBlankEndX); + return isHeadShutDownNeeded(target, headIndex, modesetInfo); +} + +// +// Head shutdown will be needed if any of the following conditions are true: +// a. Link rate is going lower than current +// b. Head is activated as MST +// +bool ConnectorImpl::isHeadShutDownNeeded(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + ModesetInfo modesetInfo) +{ + if (linkUseMultistream()) + { + return true; + } + if (activeGroups.isEmpty()) + { + return false; + } + + bool bHeadShutdownNeeded = true; + LinkConfiguration lowestSelected; + + // Force highestLink config in SST + bool bSkipLowestConfigCheck = false; + bool bIsModeSupported = false; + LinkConfiguration maxLc = getMaxLinkConfig(); + lowestSelected = maxLc; + GroupImpl* targetImpl = (GroupImpl*)target; + + // Certain panels only work when link train to highest linkConfig in SST mode. + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->forceMaxLinkConfig()) + { + bSkipLowestConfigCheck = true; + } + } + + // + // Check if there is a special request from the client, + // If so, skip lowering down the link config. + // + if (this->preferredLinkConfig.isValid()) + { + lowestSelected = preferredLinkConfig; + bSkipLowestConfigCheck = true; + } + + // If the flag is set, simply neglect downgrading to lowest possible linkConfig + if (!bSkipLowestConfigCheck) + { + LinkConfiguration lConfig = lowestSelected; + + bIsModeSupported = getValidLowestLinkConfig(lConfig, lowestSelected, modesetInfo); + } + else + { + if (this->willLinkSupportModeSST(lowestSelected, modesetInfo)) + { + bIsModeSupported = true; + } + } + + if (bIsModeSupported) + { + // + // This is to handle a case where we query current link config + // to UEFI during boot time and it fails to return. Currently + // we do not handle this scenario and head is not shut down + // though it's actually required. This is to allow head shutdown + // in such cases. + // + if (!isLinkActive()) + { + return true; + } + + // For dual DP while changing link config, we need to shut + // down the head + if (lowestSelected.lanes == 8) + { + // If link config is changing, head shutdown will be needed. + if ((activeLinkConfig.lanes == lowestSelected.lanes) && + (activeLinkConfig.peakRate == lowestSelected.peakRate)) + { + bHeadShutdownNeeded = false; + } + } + // + // If link config is going lower then we need to shut down the + // head. If we link train to a lower config before reducing the + // mode, we will hang the HW since head would still be driving + // the higher mode at the time of link train. + // + else if ((lowestSelected.peakRate * lowestSelected.lanes) >= (activeLinkConfig.peakRate * activeLinkConfig.lanes)) + { + bHeadShutdownNeeded = false; + } + } + else + { + DP_ASSERT( 0 && "DP-CONN> This mode is not possible at any link configuration!"); + } + + if (targetImpl) + { + targetImpl->bIsHeadShutdownNeeded = bHeadShutdownNeeded; + } + + return bHeadShutdownNeeded; +} + +bool ConnectorImpl::isLinkTrainingNeededForModeset (ModesetInfo modesetInfo) +{ + // Force highestLink config in SST + bool bSkipLowestConfigCheck = false; + bool bIsModeSupported = false; + LinkConfiguration lowestSelected = getMaxLinkConfig(); + + if (linkUseMultistream()) + { + if (!isLinkActive()) + { + // If MST, we always need to link train if link is not active + return true; + } + else if (getMaxLinkConfig() != activeLinkConfig) + { + // + // If the link is active, we have to retrain, if active Link Config is + // not the highest possible Link Config. + // + return true; + } + else + { + // + // We don't have to retrain if link is active and at highest possible config + // since for MST we should always link train to highest possible Link Config. + // + return false; + } + } + + // + // Link training is needed if link is not alive OR alive but inactive + // ie., lane status reports symbol lock/interlane align/CR failures + // + if (isLinkLost() || !isLinkActive()) + { + return true; + } + + // + // Link training is needed if link config was previously guessed (not assessed by the driver). + // The link config is marked as guessed in below cases - + // a. Invalid link rate returned by UEFI + // b. When max link config is HBR3 and currently assessed by UEFI != HBR3 + // c. If a SOR is not assigned to display during link assessment + // + if (this->linkGuessed) + { + return true; + } + + // Certain panels only work when link train to highest linkConfig in SST mode. + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->forceMaxLinkConfig()) + { + bSkipLowestConfigCheck = true; + } + } + + // + // Check if there is a special request from the client, + // If so, skip lowering down the link config. + // + if (this->preferredLinkConfig.isValid()) + { + lowestSelected = preferredLinkConfig; + bSkipLowestConfigCheck = true; + } + + // If the flag is set, simply neglect downgrading to lowest possible linkConfig + if (!bSkipLowestConfigCheck) + { + LinkConfiguration lConfig = lowestSelected; + + bIsModeSupported = getValidLowestLinkConfig(lConfig, lowestSelected, modesetInfo); + } + else + { + if (this->willLinkSupportModeSST(lowestSelected, modesetInfo)) + { + bIsModeSupported = true; + } + } + + // + // Link training is needed if requested mode/link config is + // different from the active mode/link config + // + if (bIsModeSupported) + { + if ((activeLinkConfig.lanes != lowestSelected.lanes) || + (activeLinkConfig.peakRate != lowestSelected.peakRate)) + { + return true; + } + } + else + { + DP_ASSERT( 0 && "DP-CONN> This mode is not possible at any link configuration!"); + } + + return false; +} + +bool DisplayPort::SetConfigSingleHeadMultiStreamMode(Group **targets, + NvU32 displayIDs[], + NvU32 numStreams, + DP_SINGLE_HEAD_MULTI_STREAM_MODE mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex, + bool bEnableAudioOverRightPanel) +{ + GroupImpl *pTargetImpl = NULL; + ConnectorImpl *pConnectorImpl = NULL; + ConnectorImpl *pPrevConnectorImpl = NULL; + + if (numStreams > NV0073_CTRL_CMD_DP_SINGLE_HEAD_MAX_STREAMS || numStreams <= 0) + { + DP_LOG(("DP-CONN> ERROR: in configuring single head multistream mode " + "invalid number of streams")); + return false; + } + + for (NvU32 iter = 0; iter < numStreams; iter++) + { + pTargetImpl = (GroupImpl*)targets[iter]; + + if(pTargetImpl == NULL) + { + DP_LOG(("DP-CONN> ERROR: in configuring single head multistream mode:" + "invalid target passed by client")); + return false; + } + + pConnectorImpl = (ConnectorImpl*) (pTargetImpl->parent); + + if (bSetConfig) + { + if (DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST == mode) + { + // + // Detach any active firmware groups before configuring singleHead dual SST + // + if (pTargetImpl->isHeadAttached() && pTargetImpl->headInFirmware) + { + pConnectorImpl->notifyDetachBegin(NULL); + pConnectorImpl->notifyDetachEnd(); + } + + if (displayIDs[iter] != pConnectorImpl->main->getRootDisplayId()) + { + DP_ASSERT( 0 && "DP-CONN> invalid single head multistream SST configuration !"); + return false; + } + + // 0th index is primary connector index, + // 1st is secondary connector index so on + if (iter > DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) + { + pPrevConnectorImpl->pCoupledConnector = pConnectorImpl; + if (iter == (numStreams - 1)) + { + pConnectorImpl->pCoupledConnector = + (ConnectorImpl*)((GroupImpl*)targets[DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY])->parent; + } + // Clear secondary connector's link guessed state + pConnectorImpl->linkGuessed = false; + } + + pPrevConnectorImpl = pConnectorImpl; + } + + pTargetImpl->singleHeadMultiStreamMode = mode; + pTargetImpl->singleHeadMultiStreamID = (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID)iter; + + // Save the 'Audio over Right Pannel' configuration in Connector Impl + // Use this configuration when SF gets programed. + if (bEnableAudioOverRightPanel) + { + pConnectorImpl->bAudioOverRightPanel = true; + } + } + else + { + pTargetImpl->singleHeadMultiStreamMode = DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE; + pTargetImpl->singleHeadMultiStreamID = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY; + pConnectorImpl->pCoupledConnector = NULL; + pConnectorImpl->bAudioOverRightPanel = false; + } + } + + pConnectorImpl->main->configureSingleHeadMultiStreamMode(displayIDs, + numStreams, + (NvU32)mode, + bSetConfig, + vbiosPrimaryDispIdIndex); + + return true; +} + +// +// This call will be deprecated as soon as all clients move to the new API +// +bool ConnectorImpl::notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + unsigned headIndex, + unsigned twoChannelAudioHz, // if you need 192khz stereo specify 192000 here + unsigned eightChannelAudioHz, // Same setting for multi channel audio. + // DisplayPort encodes 3-8 channel streams as 8 channel + NvU64 pixelClockHz, // Requested pixel clock for the mode + unsigned rasterWidth, + unsigned rasterHeight, + unsigned rasterBlankStartX, + unsigned rasterBlankEndX, + unsigned depth) +{ + ModesetInfo modesetInfo(twoChannelAudioHz, eightChannelAudioHz, pixelClockHz, rasterWidth, + rasterHeight, (rasterBlankStartX - rasterBlankEndX), 0 /*surfaceHeight*/, + depth, rasterBlankStartX, rasterBlankEndX); + + DpModesetParams modesetParams(headIndex, modesetInfo); + + return notifyAttachBegin (target, modesetParams); +} + +bool ConnectorImpl::setDeviceDscState(Device * dev, bool bEnableDsc) +{ + if (!((DeviceImpl *)dev)->isDSCPossible()) + { + return true; + } + + if (bEnableDsc) + { + if(!(((DeviceImpl *)dev)->setDscEnable(true /*bEnableDsc*/))) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + return false; + } + } + else + { + bool bCurrDscEnable = false; + // Get Current DSC Enable State + if (!((DeviceImpl *)dev)->getDscEnable(&bCurrDscEnable)) + { + DP_LOG(("DP> Not able to get DSC Enable State!")); + } + + if (bCurrDscEnable) + { + // Before disabling DSC check if any active device with same parent has DSC enabled or not + bool bDisableDsc = true; + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + if((dev != i) && (((DeviceImpl *)i)->parent == ((DeviceImpl *)dev)->parent) && + (((DeviceImpl *)i)->activeGroup) && + (((DeviceImpl *)i)->activeGroup->isHeadAttached()) && + (((DeviceImpl *)i)->activeGroup->lastModesetInfo.bEnableDsc)) + { + DP_LOG(("Parent is shared among devices and other device is active so we can't disable DSC")); + bDisableDsc = false; + break; + } + } + + if(bDisableDsc && !((DeviceImpl *)dev)->setDscEnable(false /*bEnableDsc*/)) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + return false; + } + } + } + return true; +} + +// +// Notify library before/after modeset (update) +// Here is what NAB essentially does: +// 0. Makes sure TMDS is not attached +// 1. Trains link to optimized link config ("optimized" depends on DP1.1, DP1.2) +// 2. Performs quick watermark check for IMP. If IMP is not possible, forces link, zombies devices +// 3. if anything of above fails, marks devices in given group as zombies +// +// Return : true - NAB passed +// false - NAB failed due to invalid params or link training failure +// Link configs are forced in case of link training failure +// +bool ConnectorImpl::notifyAttachBegin(Group * target, // Group of panels we're attaching to this head + const DpModesetParams &modesetParams) +{ + unsigned twoChannelAudioHz = modesetParams.modesetInfo.twoChannelAudioHz; + unsigned eightChannelAudioHz = modesetParams.modesetInfo.eightChannelAudioHz; + NvU64 pixelClockHz = modesetParams.modesetInfo.pixelClockHz; + unsigned rasterWidth = modesetParams.modesetInfo.rasterWidth; + unsigned rasterHeight = modesetParams.modesetInfo.rasterHeight; + unsigned rasterBlankStartX = modesetParams.modesetInfo.rasterBlankStartX; + unsigned rasterBlankEndX = modesetParams.modesetInfo.rasterBlankEndX; + unsigned depth = modesetParams.modesetInfo.depth; + bool bLinkTrainingStatus = true; + bool bEnableDsc = modesetParams.modesetInfo.bEnableDsc; + bool bEnableFEC; + + if(preferredLinkConfig.isValid()) + { + bEnableFEC = preferredLinkConfig.bEnableFEC; + } + else + { + DeviceImpl * nativeDev = findDeviceInList(Address()); + if (main->isEDP() && nativeDev) + { + // eDP can support DSC with and without FEC + bEnableFEC = bEnableDsc && nativeDev->isFECSupported(); + } + else + { + bEnableFEC = bEnableDsc; + } + } + + DP_LOG(("DPCONN> Notify Attach Begin (Head %d, pclk %d raster %d x %d %d bpp", + modesetParams.headIndex, pixelClockHz, rasterWidth, rasterHeight, depth)); + NV_DPTRACE_INFO(NOTIFY_ATTACH_BEGIN, modesetParams.headIndex, pixelClockHz, rasterWidth, rasterHeight, + depth, bEnableDsc, bEnableFEC); + + if (!depth || !pixelClockHz) + { + DP_ASSERT(!"DP-CONN> Params with zero value passed to query!"); + return false; + } + + if ((modesetParams.modesetInfo.mode == DSC_DUAL) || + (modesetParams.modesetInfo.mode == DSC_DROP)) + { + if ((modesetParams.headIndex == NV_SECONDARY_HEAD_INDEX_1) || + (modesetParams.headIndex == NV_SECONDARY_HEAD_INDEX_3)) + { + DP_ASSERT(!"DP-CONN> For Two Head One OR, client should send Primary Head index!"); + return false; + } + } + + if (bEnableDsc) + { + DP_LOG(("DPCONN> DSC Mode = %s", (modesetParams.modesetInfo.mode == DSC_SINGLE) ? "SINGLE" : "DUAL")); + } + + for (Device * dev = target->enumDevices(0); dev; dev = target->enumDevices(dev)) + { + Address::StringBuffer buffer; + DP_USED(buffer); + DP_LOG(("DPCONN> | %s (%s) |", dev->getTopologyAddress().toString(buffer), dev->isVideoSink() ? "VIDEO" : "BRANCH")); + } + + if (firmwareGroup && ((GroupImpl *)firmwareGroup)->headInFirmware) + { + DP_ASSERT(bIsUefiSystem || (0 && "DPCONN> Firmware still active on head. De-activating")); + } + + GroupImpl* targetImpl = (GroupImpl*)target; + targetImpl->bIsCurrentModesetGroup = true; + + DP_ASSERT(!(targetImpl->isHeadAttached() && targetImpl->bIsHeadShutdownNeeded) && "Head should have been shut down but it is still active!"); + + targetImpl->headInFirmware = false; + if (firmwareGroup) + { + ((GroupImpl *)firmwareGroup)->headInFirmware = false; + } + + if (firmwareGroup && activeGroups.contains((GroupImpl*)firmwareGroup)) + { + if (((GroupImpl *)firmwareGroup)->isHeadAttached()) + { + targetImpl->setHeadAttached(true); + } + activeGroups.remove((GroupImpl*)firmwareGroup); + inactiveGroups.insertBack((GroupImpl*)firmwareGroup); + } + + if (this->linkGuessed && (targetImpl->singleHeadMultiStreamMode != DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) + { + DP_ASSERT(!(this->linkGuessed) && "Link was not assessed previously. Probable reason: system was not in driver mode. Assessing now."); + this->assessLink(); + } + + DP_ASSERT(this->isLinkQuiesced == 0 && "According to bracketting calls TMDS/alternate DP still active!"); + + // Transfer the group to active list + inactiveGroups.remove(targetImpl); + activeGroups.insertBack(targetImpl); + intransitionGroups.insertFront(targetImpl); + + targetImpl->lastModesetInfo = ModesetInfo(twoChannelAudioHz, eightChannelAudioHz, + pixelClockHz, rasterWidth, rasterHeight, + (rasterBlankStartX - rasterBlankEndX), modesetParams.modesetInfo.surfaceHeight, + depth, rasterBlankStartX, rasterBlankEndX, bEnableDsc, modesetParams.modesetInfo.mode); + + targetImpl->headIndex = modesetParams.headIndex; + targetImpl->streamIndex = main->headToStream(modesetParams.headIndex, targetImpl->singleHeadMultiStreamID); + targetImpl->colorFormat = modesetParams.colorFormat; + + DP_ASSERT(!this->isLinkQuiesced && "TMDS is attached, NABegin is impossible!"); + + // Update the FEC enabled flag according to the mode requested. + this->bFECEnable |= bEnableFEC; + highestAssessedLC.enableFEC(this->bFECEnable); + + // if failed, we're guaranteed that assessed link rate didn't meet the mode requirements + // isZombie() will catch this + bLinkTrainingStatus = trainLinkOptimized(getMaxLinkConfig()); + + // if LT is successful, see if panel supports DSC and if so, set DSC enabled/disabled + // according to the mode requested. + if(bLinkTrainingStatus) + { + for (Device * dev = target->enumDevices(0); dev; dev = target->enumDevices(dev)) + { + if(!setDeviceDscState(dev, bEnableDsc)) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + } + } + } + +// TODO: Need to check if we can completely remove DP_OPTION_HDCP_12_ENABLED and remove it + + beforeAddStream(targetImpl); + + if (linkUseMultistream()) + { + // Which pipeline to take the affect out of trigger ACT + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST != targetImpl->singleHeadMultiStreamMode) || + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY == targetImpl->singleHeadMultiStreamID)) + { + main->configureTriggerSelect(targetImpl->headIndex, targetImpl->singleHeadMultiStreamID); + } + } + + if (!linkUseMultistream() || main->supportMSAOverMST()) + { + bool enableInbandStereoSignaling = false; + + DP_ASSERT(activeGroups.isEmpty() == false); + + if (main->isInbandStereoSignalingSupported()) + { + enableInbandStereoSignaling = true; + } + + // + // Bug 200362535 + // setDpStereoMSAParameters does not cache the msa params. It will immediately + // apply just the stereo specific parameters. This is required because we + // can toggle the msa params using nvidia control panel and in that scenario + // we do not get supervisor interrupts. Since SV interrupts do not occur the + // msa parameters do not get applied. So to avoid having to reboot to apply the + // stereo msa params setDpStereoMSAParameters is called. + // + // setDpMSAParameters will contain all msa params, including stereo cached. + // These will be applied during supervisor interrupt. So if we will get + // SV interrupts later the same stereo settings will be applied twice. + // first by setDpStereoMSAParameters and later by setDpMSAParameters. + // + main->setDpStereoMSAParameters(!enableInbandStereoSignaling, modesetParams.msaparams); + main->setDpMSAParameters(!enableInbandStereoSignaling, modesetParams.msaparams); + } + + NV_DPTRACE_INFO(NOTIFY_ATTACH_BEGIN_STATUS, bLinkTrainingStatus); + + bFromResumeToNAB = false; + targetImpl->bIsCurrentModesetGroup = false; + return bLinkTrainingStatus; +} + + +// +// modesetCancelled True, when DD respected NAB failure and cancelled modeset. +// False, when NAB succeeded, or DD didn't honor NAB failure +// +// Here is what NAE supposed to do: +// 1. modesetCancelled == TRUE, NAB failed: +// unzombie all devices and set linkForced to false; We have Status Quo for next modeset +// 2. modesetCancelled == False, NAB failed: +// If NAB failed, linkForces is TRUE. NAE goes finds zombied devices and notifies DD about them. +// 3. modesetCancelled == False, NAB succeeded: +// NAE is no-op. (but we have some special sanity code) +// +void ConnectorImpl::notifyAttachEnd(bool modesetCancelled) +{ + GroupImpl* currentModesetDeviceGroup = NULL; + DP_LOG(("DPCONN> Notify Attach End")); + NV_DPTRACE_INFO(NOTIFY_ATTACH_END); + + bFromResumeToNAB = false; + + if (intransitionGroups.isEmpty()) + { + DP_ASSERT( 0 && "INVALID STATE: Modeset Group is NULL"); + return; + } + + currentModesetDeviceGroup = intransitionGroups.pop(); + + if (modesetCancelled) + { + currentModesetDeviceGroup->setHeadAttached(false); + } + + currentModesetDeviceGroup->setHeadAttached(true); + RmDfpCache dfpCache = {0}; + dfpCache.updMask = 0; + if (currentModesetDeviceGroup->isHeadAttached()) + { + for (DeviceImpl * dev = (DeviceImpl *)currentModesetDeviceGroup->enumDevices(0); + dev; dev = (DeviceImpl *)currentModesetDeviceGroup->enumDevices(dev)) + { + dfpCache.bcaps = *dev->BCAPS; + for (unsigned i=0; iBKSV[i]; + + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BCAPS); + dfpCache.updMask |= (1 << NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BKSV); + main->rmUpdateDynamicDfpCache(dev->activeGroup->headIndex, &dfpCache, True); + + // Remove this while enabling HDCP for MSC + break; + } + } + + // + // Add rest of the streams (other than primary) in notifyAE, since this can't be done + // unless a SOR is attached to a Head (part of modeset), and trigger ACT immediate + // + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST == currentModesetDeviceGroup->singleHeadMultiStreamMode) && + (currentModesetDeviceGroup->singleHeadMultiStreamID > DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY)) + { + DP_ASSERT(linkUseMultistream() && "it should be multistream link to configure single head MST"); + hal->payloadTableClearACT(); + hal->payloadAllocate(currentModesetDeviceGroup->streamIndex, + currentModesetDeviceGroup->timeslot.begin, currentModesetDeviceGroup->timeslot.count); + main->configureTriggerSelect(currentModesetDeviceGroup->headIndex, currentModesetDeviceGroup->singleHeadMultiStreamID); + main->triggerACT(); + } + + afterAddStream(currentModesetDeviceGroup); + + // + // Turn on the Authentication/Encryption back if previous is on. + // For DP1.1, let the upstream to turn it back. + // For DP1.2, we should turn the modeset back if it was on. + // The authentication will be called off during the modeset. + // + HDCPState hdcpState = {0}; + main->configureHDCPGetHDCPState(hdcpState); + if ((!hdcpState.HDCP_State_Authenticated) && (isHDCPAuthOn == true) + && (currentModesetDeviceGroup->hdcpEnabled)) + { + if (!this->linkUseMultistream()) + { + currentModesetDeviceGroup->hdcpEnabled = isHDCPAuthOn = false; + } + } + + fireEvents(); +} + +// Notify library before/after shutdown (update) +void ConnectorImpl::notifyDetachBegin(Group * target) +{ + if (!target) + target = firmwareGroup; + + NV_DPTRACE_INFO(NOTIFY_DETACH_BEGIN); + + GroupImpl * group = (GroupImpl*)target; + + DP_LOG(("DPCONN> Notify detach begin")); + DP_ASSERT((group->headInFirmware || group->isHeadAttached()) && "Disconnecting an inactive device"); + + // check to see if a pattern request was on. if yes clear the pattern + PatternInfo pattern_info; + pattern_info.lqsPattern = hal->getPhyTestPattern(); + // send control call to rm for the pattern + if (pattern_info.lqsPattern != LINK_QUAL_DISABLED) + { + pattern_info.lqsPattern = LINK_QUAL_DISABLED; + if (!main->physicalLayerSetTestPattern(&pattern_info)) + DP_ASSERT(0 && "Could not set the PHY_TEST_PATTERN"); + } + + beforeDeleteStream(group); + + // + // Set the trigger select so as to which frontend corresponding to the stream + // to take the affect + // + if(linkUseMultistream()) + { + main->configureTriggerSelect(group->headIndex, group->singleHeadMultiStreamID); + + // Clear payload of other than primary streams and trigger ACT immediate + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST == group->singleHeadMultiStreamMode) && + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY != group->singleHeadMultiStreamID)) + { + main->triggerACT(); + if (!hal->payloadWaitForACTReceived()) + { + DP_LOG(("DP-TS> Downstream device did not receive ACT during stream clear")); + DP_ASSERT(0); + } + } + } + + intransitionGroups.insertFront(group); +} + +// +// Here is what NDE does: +// 1. delete unplugged devices (they were zombies, if they're on this list) +// 2. unmark zombies (they were plugged zombies, they might want to get link trained next time) +// 3. mark head as detached (so that we can delete any HPD unplugged devices) +// +void ConnectorImpl::notifyDetachEnd(bool bKeepOdAlive) +{ + GroupImpl* currentModesetDeviceGroup = NULL; + DP_LOG(("DPCONN> Notify detach end")); + NV_DPTRACE_INFO(NOTIFY_DETACH_END); + + if (intransitionGroups.isEmpty()) + { + DP_ASSERT( 0 && "INVALID STATE: Modeset Group is NULL"); + return; + } + + currentModesetDeviceGroup = intransitionGroups.pop(); + + afterDeleteStream(currentModesetDeviceGroup); + + if (!linkUseMultistream()) + { + Device * d = 0; + for (d = currentModesetDeviceGroup->enumDevices(0); + currentModesetDeviceGroup->enumDevices(d) != 0; + d = currentModesetDeviceGroup->enumDevices(d)) + { + // only one device in the group + DP_ASSERT(d && (((DeviceImpl*)d)->activeGroup == currentModesetDeviceGroup)); + } + } + + // nullify last modeset info + dpMemZero(¤tModesetDeviceGroup->lastModesetInfo, sizeof(ModesetInfo)); + currentModesetDeviceGroup->setHeadAttached(false); + currentModesetDeviceGroup->headInFirmware = false; + + // Mark head as disconnected + bNoLtDoneAfterHeadDetach = true; + + // + // Update the last modeset HDCP status here. Hdcp got disabled after modeset + // thus hdcpPreviousStatus would be false to SST after device inserted. + // + HDCPState hdcpState = {0}; + main->configureHDCPGetHDCPState(hdcpState); + if (!(isHDCPAuthOn = hdcpState.HDCP_State_Authenticated)) + { + currentModesetDeviceGroup->hdcpEnabled = false; + } + + // Update Vbios scratch register + for (Device * d = currentModesetDeviceGroup->enumDevices(0); d; + d = currentModesetDeviceGroup->enumDevices(d)) + { + currentModesetDeviceGroup->updateVbiosScratchRegister(d); + } + + // Reset value of bIsHeadShutdownNeeded to get rid of false asserts + currentModesetDeviceGroup->bIsHeadShutdownNeeded = false; + + // If this is eDP and the LCD power is not ON, we don't need to Disable DSC here + bool bPanelPwrSts = true; + if ((!main->isEDP()) || (main->getEdpPowerData(&bPanelPwrSts, NULL) && bPanelPwrSts)) + { + // Disable DSC decompression on the panel if panel supports DSC and reset bFECEnable Flag + for (Device * dev = currentModesetDeviceGroup->enumDevices(0); dev; dev = currentModesetDeviceGroup->enumDevices(dev)) + { + if(!(setDeviceDscState(dev, false/*bEnableDsc*/))) + { + DP_ASSERT(!"DP-CONN> Failed to configure DSC on Sink!"); + } + } + } + + // Transfer to inactive group and cancel pending callbacks for that group. + currentModesetDeviceGroup->cancelHdcpCallbacks(); + activeGroups.remove(currentModesetDeviceGroup); + inactiveGroups.insertBack(currentModesetDeviceGroup); + + if (activeGroups.isEmpty()) + { + cancelHdcpCallbacks(); + + // We disconnected a panel, try to clear the transition + if (linkAwaitingTransition) + { + assessLink(); + } + // + // Power down the links as we have switched away from the monitor. + // Only power down if we are in single stream + // + else + { + // + // Power down the links as we have switched away from the monitor. + // For shared SOR case, we need this to keep SW stats in DP instances in sync. + // Only power down the link when it's not a compliance test device. + // + // Some eDP panels are known having problems when power down. + // See bug 1425706, 1376753, 1347872, 1355592 + // + // Hotplug may trigger detach before processNewDevice if previous state has + // lost device not yet detached. Avoid to powerdown for the case for following + // device discovery hdcp probe. + // + if (!bIsDiscoveryDetectActive) + powerdownLink(!main->skipPowerdownEdpPanelWhenHeadDetach() && !bKeepOdAlive); + } + if (this->policyModesetOrderMitigation && this->modesetOrderMitigation) + this->modesetOrderMitigation = false; + } + fireEvents(); +} + +bool ConnectorImpl::trainPCONFrlLink(PCONLinkControl *pconControl) +{ + NvU32 loopCount = NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_THRESHOLD; + NvU32 frlRateMask = 0; + bool bFrlReady = false; + bool result = false; + + // Initial return values. + pconControl->result.trainedFrlBwMask = 0; + pconControl->result.maxFrlBwTrained = PCON_HDMI_LINK_BW_FRL_INVALID; + + // Step 1: Setup PCON for later operation + + // Step 1.1: Set D0 power + hal->setPowerState(PowerStateD0); + + hal->resetProtocolConverter(); + + // Step 1.2: Enable Source Control Mode and FRL mode, enable FRL-Ready IRQ + hal->setSourceControlMode(true, true); + + do + { + // + // Step 1.3: Poll for HDMI-Link-Status Change (0x2005 Bit 3) + // Get FRL Ready Bit (0x303B Bit 1) + // + hal->checkPCONFrlReady(&bFrlReady); + if (bFrlReady == true) + { + break; + } + Timeout timeout(this->timer, NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } while (--loopCount); + + if (bFrlReady == false) + { + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_ERROR_TIMEOUT; + return false; + } + + // Step 2: Assess FRL Link capability. + + // + // Step 2.1: Configure FRL Link (FRL BW, BW mask / Concurrent) + // Start with mask for all bandwidth. Please refer to definition of DPCD 0x305B. + // + result = hal->setupPCONFrlLinkAssessment(pconControl->frlHdmiBwMask, + pconControl->flags.bExtendedLTMode, + pconControl->flags.bConcurrentMode); + if (result == false) + { + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_ERROR_GENERIC; + return false; + } + + // Step 2.2: Poll for HDMI-Link-Status Change (0x2005 Bit 3) + loopCount = NV_PCON_FRL_LT_TIMEOUT_THRESHOLD; + do + { + result = hal->checkPCONFrlLinkStatus(&frlRateMask); + if (result == true) + { + break; + } + Timeout timeout(this->timer, NV_PCON_FRL_LT_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } while (--loopCount); + + if (result == true) + { + // + // frlRateMask is result from checkPCONFrlLinkStatus (0x3036) Bit 1~6. + // + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_SUCCESS; + pconControl->result.trainedFrlBwMask = frlRateMask; + pconControl->result.maxFrlBwTrained = getMaxFrlBwFromMask(frlRateMask); + } + else + { + pconControl->result.status = NV_DP_PCON_CONTROL_STATUS_ERROR_FRL_LT_FAILURE; + } + return result; +} + +bool ConnectorImpl::assessPCONLinkCapability(PCONLinkControl *pConControl) +{ + NvU32 status; + + if (pConControl == NULL || !this->previousPlugged) + return false; + + bool bIsFlushModeEnabled = enableFlush(); + + if (!bIsFlushModeEnabled) + { + return false; + } + + if (pConControl->flags.bSourceControlMode) + { + status = trainPCONFrlLink(pConControl); + if (status == false) + { + // restore Autonomous mode and treat this as an active DP dongle. + hal->resetProtocolConverter(); + // Exit flush mode + disableFlush(); + if (!pConControl->flags.bSkipFallback) + { + bSkipAssessLinkForPCon = false; + assessLink(); + } + return status; + } + activePConLinkControl.flags = pConControl->flags; + activePConLinkControl.frlHdmiBwMask = pConControl->frlHdmiBwMask; + activePConLinkControl.result = pConControl->result; + } + + // Step 3: Assess DP Link capability. + LinkConfiguration lConfig = getMaxLinkConfig(); + highestAssessedLC = getMaxLinkConfig(); + + hal->updateDPCDOffline(); + if (hal->isDpcdOffline()) + { + disableFlush(); + return false; + } + if (!train(lConfig, false /* do not force LT */)) + { + // + // Note that now train() handles fallback, activeLinkConfig + // has the max link config that was assessed. + // + lConfig = activeLinkConfig; + } + + highestAssessedLC = lConfig; + linkGuessed = false; + disableFlush(); + + this->bKeepLinkAliveForPCON = pConControl->flags.bKeepPCONLinkAlive; + return status; +} + +bool ConnectorImpl::getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) +{ + if (!previousPlugged || !hal->getOuiSupported()) + return false; + + return hal->getOuiSink(ouiId, modelName, modelNameBufferSize, chipRevision); +} + +void ConnectorImpl::setIgnoreSourceOuiHandshake(bool bIgnoreOuiHandShake) +{ + bIgnoreSrcOuiHandshake = bIgnoreOuiHandShake; +} + +bool ConnectorImpl::getIgnoreSourceOuiHandshake() +{ + return bIgnoreSrcOuiHandshake; +} + +bool ConnectorImpl::performIeeeOuiHandshake() +{ + const char *ieeeOuiDevId = "NVIDIA"; + + if (!hal->getOuiSupported() || getIgnoreSourceOuiHandshake()) + return false; + + if (hal->setOuiSource(DPCD_OUI_NVIDIA, ieeeOuiDevId, 6 /* string length of ieeeOuiDevId */, 0) == AuxRetry::ack) + { + NvU8 chipRevision = 0; + + // parse client OUI. + if (hal->getOuiSink(ouiId, &modelName[0], sizeof(modelName), chipRevision)) + { + DP_LOG(("DP> SINK-OUI id(0x%08x) %s: rev:%d.%d", ouiId, + (NvU8*)modelName, + (unsigned)DRF_VAL(_DPCD, _SINK_HARDWARE_REV, _MAJOR, chipRevision), + (unsigned)DRF_VAL(_DPCD, _SINK_HARDWARE_REV, _MINOR, chipRevision))); + return true; + } + } + return false; +} + + +bool ConnectorImpl::willLinkSupportModeSST(const LinkConfiguration & linkConfig, const ModesetInfo & modesetInfo) +{ + DP_ASSERT(!linkUseMultistream() && "IMP for SST only"); + + // + // mode is not known yet, we have to report is possible + // Otherwise we're going to mark all devices as zombies on first HPD(c), + // since modeset info is not available. + // + if (modesetInfo.pixelClockHz == 0) + return true; + + if (linkConfig.lanes == 0 || linkConfig.peakRate == 0) + return false; + + Watermark water; + + if (this->isFECSupported()) + { + if (!isModePossibleSSTWithFEC(linkConfig, modesetInfo, &water, main->hasIncreasedWatermarkLimits())) + { + // Verify audio + return false; + } + } + else + { + if (!isModePossibleSST(linkConfig, modesetInfo, &water, main->hasIncreasedWatermarkLimits())) + { + // Verify audio + return false; + } + } + return true; +} + +// gets max values for DPCD HAL and forces link trainig with that config +void ConnectorImpl::forceLinkTraining() +{ + LinkConfiguration forcedMaxConfig(getMaxLinkConfig()); + train(forcedMaxConfig, true); +} + +void ConnectorImpl::powerdownLink(bool bPowerdownPanel) +{ + LinkConfiguration powerOff = getMaxLinkConfig(); + bool bPanelPwrSts = true; + powerOff.lanes = 0; + // Inform Sink about Main Link Power Down. + + // + // 1> If it is eDP and the power is not on, we don't need to put it into D3 here + // 2> If FEC is enabled then we have to put panel in D3 after powering down mainlink + // as FEC disable has to be detected by panel which will happen as part of link + // power down, we need to keep panel in D0 for this. + // + if (!this->bFECEnable && + ((!main->isEDP()) || (main->getEdpPowerData(&bPanelPwrSts, NULL) && bPanelPwrSts))) + { + hal->setPowerState(PowerStateD3); + } + + train(powerOff, !bPowerdownPanel); // Train to 0 links 0 BW + + // + // If FEC is enabled, put panel to D3 here for non-eDP. + // For eDP with FEC support, FEC state would be cleared as part of panel + // power down + // + if (this->bFECEnable && (!main->isEDP())) + { + hal->setPowerState(PowerStateD3); + } + + // Set FEC state as false in link power down + this->bFECEnable = false; + highestAssessedLC.enableFEC(false); +} + +GroupImpl * ConnectorImpl::getActiveGroupForSST() +{ + if (this->linkUseMultistream()) + return 0; + GroupImpl * groupAttached = 0; + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + // there should only be one group for the connector. + if (groupAttached) + { + DP_ASSERT(0 && "Multiple attached heads"); + return 0; + } + groupAttached = (GroupImpl * )e; + } + return groupAttached; +} + +bool ConnectorImpl::trainSingleHeadMultipleSSTLinkNotAlive(GroupImpl *pGroupAttached) +{ + GroupImpl *pPriGrpAttached = NULL; + GroupImpl *pSecGrpAttached = NULL; + ConnectorImpl *pPriConnImpl = NULL; + ConnectorImpl *pSecConnImpl = NULL; + + if ((pGroupAttached == NULL) || + (pCoupledConnector == NULL) || + (pGroupAttached->singleHeadMultiStreamMode != DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) + { + return false; + } + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) + { + pSecGrpAttached = pCoupledConnector->getActiveGroupForSST(); + pPriGrpAttached = pGroupAttached; + pSecConnImpl = pCoupledConnector; + pPriConnImpl = this; + } + else if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + pPriGrpAttached = pCoupledConnector->getActiveGroupForSST(); + pSecGrpAttached = pGroupAttached; + pPriConnImpl = pCoupledConnector; + pSecConnImpl = this; + } + else + { + DP_ASSERT(0 && "Invalid 2-SST configuration "); + return false; + } + + if (!pPriGrpAttached || !pSecGrpAttached || !pPriConnImpl || !pSecConnImpl) + { + DP_ASSERT(0 && "Invalid 2-SST configuration "); + return false; + } + + if (!pPriConnImpl->trainLinkOptimizedSingleHeadMultipleSST(pPriGrpAttached)) + { + DP_ASSERT(0 && "not able to configure 2-SST mode on primary link"); + return false; + } + + if (!pSecConnImpl->trainLinkOptimizedSingleHeadMultipleSST(pSecGrpAttached)) + { + DP_ASSERT(0 && "not able to configure 2-SST mode for secondary link"); + return false; + } + + return true; +} + +void ConnectorImpl::assessLink(LinkTrainingType trainType) +{ + this->bSkipLt = false; // Assesslink should never skip LT, so let's reset it in case it was set. + + if (bSkipAssessLinkForPCon) + { + // Skip assessLink() for PCON. client should call assessPCONLinkCapability later. + return; + } + + if (trainType == NO_LINK_TRAINING) + { + train(preferredLinkConfig, false, trainType); + return; + } + + if (isLinkQuiesced || + (firmwareGroup && ((GroupImpl *)firmwareGroup)->headInFirmware)) + { + highestAssessedLC = getMaxLinkConfig(); + + if (bIsUefiSystem && !hal->getSupportsMultistream()) + { + // + // Since this is a UEFI based system which can provide max link config + // supported on this panel. So try to get the max supported link config + // and update the highestAssessedLC. Once done set linkGuessed as false. + // + unsigned laneCount = 0; + NvU64 linkRate = 0; + NvU8 linkRateFromUefi, laneCountFromUefi; + + // Query the max link config if provided by UEFI. + if ((!linkGuessed) && (main->getMaxLinkConfigFromUefi(linkRateFromUefi, laneCountFromUefi))) + { + laneCount = laneCountFromUefi; + + if (linkRateFromUefi == 0x6) + { + linkRate = RBR; + } + else if (linkRateFromUefi == 0xA) + { + linkRate = HBR; + } + else if (linkRateFromUefi == 0x14) + { + linkRate = HBR2; + } + else if (linkRateFromUefi == 0x1E) + { + linkRate = HBR3; + } + else + { + DP_ASSERT(0 && "DP> Invalid link rate returned from UEFI!"); + linkGuessed = true; + } + + if ((getMaxLinkConfig().peakRate == HBR3) && + (linkRate != HBR3)) + { + // + // UEFI does not support HBR3 yet (The support will be added in Volta). + // Mark the link as guessed when max supported link config is HBR3 and + // the currently assessed link config, by UEFI is not the highest, to + // force the link assessment by driver. + // + linkGuessed = true; + } + else + { + // + // SW policy change: If the BIOS max link config isn't same as max of panel, mark DPlib for re-link + // assessment by marking linkGuessed as true. + // Re-link training is prefereable over glitchless and booting at low resolutions + // + if (laneCount != highestAssessedLC.lanes || linkRate != highestAssessedLC.peakRate) + { + linkGuessed = true; + } + else + { + linkGuessed = false; + // Update software state with latest link status info + hal->setDirtyLinkStatus(true); + hal->refreshLinkStatus(); + } + } + } + else if (!linkGuessed) + { + // We failed to query max link config from UEFI. Mark link as guessed. + DP_LOG(("DP CONN> Failed to query max link config from UEFI.")); + linkGuessed = true; + } + + if (!linkGuessed) + { + // Update SW state with UEFI provided max link config + highestAssessedLC = LinkConfiguration (&this->linkPolicy, + laneCount, linkRate, + this->hal->getEnhancedFraming(), + linkUseMultistream()); + + // Get the currently applied linkconfig and update SW state + getCurrentLinkConfig(laneCount, linkRate); + + activeLinkConfig = LinkConfiguration (&this->linkPolicy, + laneCount, linkRate, + this->hal->getEnhancedFraming(), + linkUseMultistream()); + } + } + else + { + linkGuessed = true; + } + + return; + } + + if (linkAwaitingTransition) + { + if (activeGroups.isEmpty()) + { + linkState = hal->getSupportsMultistream() ? + DP_TRANSPORT_MODE_MULTI_STREAM : DP_TRANSPORT_MODE_SINGLE_STREAM; + linkAwaitingTransition = false; + } + else + { + // + // If modesetOrderMitigation isn't on, we need to reassess + // immediately. This is because we will report the connects at the + // same time as the disconnects. IMP Query can be done immediately + // on connects. On the other hand if modeset order mitigation is + // off - all attached devices are going to be reported as + // disconnected and might as well use the old configuration. + // + if (this->policyModesetOrderMitigation && this->modesetOrderMitigation) + return; + } + } + else + { + if (hal->isDpcdOffline()) + linkState = DP_TRANSPORT_MODE_INIT; + } + + // + // Bug 1545352: This is done to avoid shutting down a display for freeing up a SOR for LT, + // when no SOR is assigned properly to the connector. It can happen when more + // than max supported number of display(s) is connected. + // It came as a requirement from some clients to avoid glitches when shutting + // down a display to make SOR availability for those monitors. + // + if (main->getSorIndex() == DP_INVALID_SOR_INDEX) + { + highestAssessedLC = getMaxLinkConfig(); + linkGuessed = true; + return; + } + + LinkConfiguration lConfig = getMaxLinkConfig(); + + LinkConfiguration preFlushModeActiveLinkConfig = activeLinkConfig; + + if (main->isInternalPanelDynamicMuxCapable()) + { + // Skip Link assessment for Dynamic MUX capable Internal Panel + if ((activeLinkConfig.lanes == lConfig.lanes) && + (activeLinkConfig.peakRate == lConfig.peakRate) && + (!isLinkInD3()) && (!isLinkLost())) + { + linkGuessed = false; + return; + } + } + + // + // Disconnect heads + // + bool bIsFlushModeEnabled = enableFlush(); + + if (!bIsFlushModeEnabled) + { + goto done; + } + + // + // if dpcd is offline; avoid assessing. Just consider max. + // keep lowering lane/rate config till train succeeds + // + hal->updateDPCDOffline(); + if (!hal->isDpcdOffline()) + { + if (!train(lConfig, false /* do not force LT */)) + { + // + // Note that now train() handles fallback, activeLinkConfig + // has the max link config that was assessed. + // + lConfig = activeLinkConfig; + } + + if (!this->linkUseMultistream() && this->policyAssessLinkSafely) + { + GroupImpl * groupAttached = this->getActiveGroupForSST(); + + if (groupAttached && groupAttached->isHeadAttached() && + !willLinkSupportModeSST(lConfig, groupAttached->lastModesetInfo)) + { + DP_ASSERT(0 && "DP> Maximum assessed link configuration is not capable to driver existing raster!"); + + train(preFlushModeActiveLinkConfig, true); + linkGuessed = true; + goto done; + } + } + } + + highestAssessedLC = lConfig; + + // It is critical that this restore the original (desired) configuration + trainLinkOptimized(lConfig); + + linkGuessed = false; + +done: + + NV_DPTRACE_INFO(LINK_ASSESSMENT, highestAssessedLC.peakRate, highestAssessedLC.lanes); + + if (bIsFlushModeEnabled) + { + disableFlush(); + } +} + +bool ConnectorImpl::handleCPIRQ() +{ + NvU8 bStatus; + HDCPState hdcpState = {0}; + + if (!isLinkActive()) + { + DP_LOG(("DP> CP_IRQ: Ignored with link down")); + return true; + } + + main->configureHDCPGetHDCPState(hdcpState); + if (hal->getRxStatus(hdcpState, &bStatus)) + { + NvBool bReAuthReq = NV_FALSE; + NvBool bRxIDMsgPending = NV_FALSE; + DP_LOG(("DP> CP_IRQ HDCP ver:%s RxStatus:0x%2x HDCP Authenticated:%s Encryption:%s", + hdcpState.HDCP_State_22_Capable ? "2.2" : "1.x", + bStatus, + hdcpState.HDCP_State_Authenticated ? "YES" : "NO", + hdcpState.HDCP_State_Encryption ? "ON" : "OFF")); + + // Check device if HDCP2.2 capable instead actual encryption status, + if (hdcpState.HDCP_State_22_Capable) + { + if (FLD_TEST_DRF(_DPCD, _HDCP22_RX_STATUS, _REAUTH_REQUEST, _YES, bStatus) || + FLD_TEST_DRF(_DPCD, _HDCP22_RX_STATUS, _LINK_INTEGRITY_FAILURE, _YES, bStatus)) + { + if (this->linkUseMultistream()) + { + // + // Bug 2860192: Some MST hub throw integrity failure before source trigger + // authentication. This may be stale data since Branch is + // doing protocol translation(DP to HDMI), and cannot treat + // as sink's fault. + // For MST, we would not lose anything here by ignoring either + // CP_Irq event since Auth never started after HPD high or + // LinkTraining start. + // + if (isHDCPAuthTriggered) + { + bReAuthReq = NV_TRUE; + } + else + { + DP_LOG(("DP>Ignore integrity failure or ReAuth in transition or before AKE_INIT.")); + } + } + else + { + bReAuthReq = NV_TRUE; + } + } + + if (FLD_TEST_DRF(_DPCD, _HDCP22_RX_STATUS, _READY, _YES, bStatus)) + { + bRxIDMsgPending = NV_TRUE; + } + } + else + { + if (FLD_TEST_DRF(_DPCD, _HDCP_BSTATUS, _REAUTHENTICATION_REQUESET, _TRUE, bStatus) || + FLD_TEST_DRF(_DPCD, _HDCP_BSTATUS, _LINK_INTEGRITY_FAILURE, _TRUE, bStatus)) + { + bReAuthReq = NV_TRUE; + } + } + + if (bReAuthReq || bRxIDMsgPending) + { + DP_LOG(("DP> CP_IRQ: REAUTHENTICATION/RXIDPENDING REQUEST")); + + if (bReAuthReq) + { + authRetries = 0; + } + + if (!this->linkUseMultistream()) + { + // Get primary connector when multi-stream SST deployed. + GroupImpl *pGroupAttached = getActiveGroupForSST(); + ConnectorImpl *sstPrim = this; + + if (pGroupAttached && + (pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) && + (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)) + { + DP_ASSERT(this->pCoupledConnector); + sstPrim = this->pCoupledConnector; + } + + sstPrim->main->configureHDCPRenegotiate(HDCP_DUMMY_CN, + HDCP_DUMMY_CKSV, + !!bReAuthReq, + !!bRxIDMsgPending); + sstPrim->main->configureHDCPGetHDCPState(hdcpState); + isHDCPAuthOn = hdcpState.HDCP_State_Authenticated; + } + } + + return true; + } + else + { + DP_LOG(("DP> CP_IRQ: RxStatus Read failed.")); + return false; + } +} + +void ConnectorImpl::handleSSC() +{ +} + +void ConnectorImpl::handleHdmiLinkStatusChanged() +{ + bool bLinkActive; + NvU32 newFrlRate; + // Check Link status + if (!hal->queryHdmiLinkStatus(&bLinkActive, NULL)) + { + return; + } + if (!bLinkActive) + { + newFrlRate = hal->restorePCONFrlLink(activePConLinkControl.frlHdmiBwMask, + activePConLinkControl.flags.bExtendedLTMode, + activePConLinkControl.flags.bConcurrentMode); + + if (newFrlRate != activePConLinkControl.result.trainedFrlBwMask) + { + activePConLinkControl.result.trainedFrlBwMask = newFrlRate; + activePConLinkControl.result.maxFrlBwTrained = getMaxFrlBwFromMask(newFrlRate); + for (Device *i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *dev = (DeviceImpl *)i; + if ((dev->activeGroup != NULL) && (dev->plugged)) + { + sink->bandwidthChangeNotification(dev, false); + } + } + } + } +} + +void ConnectorImpl::handleMCCSIRQ() +{ + for (Device *i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *dev = (DeviceImpl *)i; + if ((dev->activeGroup != NULL) && (dev->plugged)) + { + sink->notifyMCCSEvent(dev); + } + } +} + +// +// Checks if the link is still trained. +// Note that these hal registers are ONLY re-read in response to an IRQ. +// Calling this function returns the information from the last interrupt. +// +bool ConnectorImpl::isLinkLost() +{ + if (isLinkActive()) + { + // Bug 200320196: Add DPCD offline check to avoid link-train in unplugged state. + if (!hal->isDpcdOffline()) + { + unsigned laneCount; + NvU64 linkRate; + getCurrentLinkConfig(laneCount, linkRate); + // + // Check SW lane count in RM in case it's disabled beyond DPLib. + // Bug 1933751/2897747 + // + if (laneCount == laneCount_0) + return true; + } + + // update the sw cache if required + hal->refreshLinkStatus(); + if (!hal->getInterlaneAlignDone()) + return true; + + for (unsigned i = 0; i < activeLinkConfig.lanes; i++) + { + if (!hal->getLaneStatusSymbolLock(i)) + return true; + if (!hal->getLaneStatusClockRecoveryDone(i)) + return true; + } + + if (!hal->getInterlaneAlignDone()) + return true; + } + return false; +} + +bool ConnectorImpl::isLinkActive() +{ + return (activeLinkConfig.isValid()); +} + +bool ConnectorImpl::isLinkInD3() +{ + return (hal->getPowerState() == PowerStateD3); +} + +bool ConnectorImpl::trainLinkOptimizedSingleHeadMultipleSST(GroupImpl *pGroupAttached) +{ + if (!pGroupAttached) + { + DP_LOG(("DP-CONN> 2-sst group not valid")); + return false; + } + + if (preferredLinkConfig.isValid()) + { + ConnectorImpl *pSecConImpl = this->pCoupledConnector; + if (pSecConImpl->preferredLinkConfig.isValid() && + (preferredLinkConfig.lanes == laneCount_4) && (pSecConImpl->preferredLinkConfig.lanes == laneCount_4) && + (preferredLinkConfig.peakRate == pSecConImpl->preferredLinkConfig.peakRate)) + { + if (willLinkSupportModeSST(preferredLinkConfig, pGroupAttached->lastModesetInfo)) + { + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY) + { + if (!this->enableFlush()) + return false; + } + preferredLinkConfig.policy.setSkipFallBack(true); + if (!train(preferredLinkConfig, false)) + { + DP_LOG(("DP-CONN> Unable to set preferred linkconfig on 2-SST display")); + return false; + } + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + this->disableFlush(); + } + return true; + } + else + { + DP_LOG(("DP-CONN> Invalid 2-SST Preferred link configuration")); + return false; + } + } + } + + if (pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + if (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + if (this->pCoupledConnector->oneHeadSSTSecPrefLnkCfg.isValid()) + { + bool trainDone = false; + this->pCoupledConnector->oneHeadSSTSecPrefLnkCfg.policy.setSkipFallBack(true); + if (!train(this->pCoupledConnector->oneHeadSSTSecPrefLnkCfg, false)) + { + DP_LOG(("DP-CONN> Unable set the primary configuration on secondary display")); + trainDone = false; + } + else + { + trainDone = true; + } + this->disableFlush(); + return trainDone; + } + } + + } + + // Order for 2-SST link training and must be with 4 lanes + unsigned linkRateList[] = {RBR, HBR, HBR2, HBR3}; + NvU8 linkRateCount = sizeof(linkRateList) / sizeof(unsigned); + + for (NvU8 i = 0; i < linkRateCount; i++) + { + LinkConfiguration linkCfg = LinkConfiguration(&this->linkPolicy, + laneCount_4, linkRateList[i], + hal->getEnhancedFraming(), false); + linkCfg.policy.setSkipFallBack(true); + if (willLinkSupportModeSST(linkCfg, pGroupAttached->lastModesetInfo)) + { + if (!this->enableFlush()) + return false; + if (!train(linkCfg, false)) + { + if (i == linkRateCount - 1) + { + // Re-train max link config + linkCfg = getMaxLinkConfig(); + linkCfg.policy.setSkipFallBack(true); + if (!train(linkCfg, false)) + { + DP_ASSERT(0 && "DPCONN> 2-SST setting max link configuration failed "); + break; + } + } + } + else + { + oneHeadSSTSecPrefLnkCfg = linkCfg; + break; + } + } + } + + return true; +} + +bool ConnectorImpl::isNoActiveStreamAndPowerdown() +{ + if (activeGroups.isEmpty()) + { + bool bKeepMSTLinkAlive = (this->bKeepLinkAliveMST && activeLinkConfig.multistream); + bool bKeepSSTLinkAlive = (this->bKeepLinkAliveSST && !activeLinkConfig.multistream); + // + // Power saving unless: + // - Setting fake flag as true to prevent panel power down here. + // - Regkey sets to keep link alive for MST and it's in MST. + // - Regkey sets to keep link alive for SST and it's in SST. + // - bKeepOptLinkAlive is set to true - to avoid link retraining. + // - Device discovery processing that processNewDevice has HDCP probe. + // - Pending remote HDCP detection messages - prevent power down to access HDCP DCPD regs. + // - Keep link active with compliance device as we always do + // + if ((!bKeepMSTLinkAlive) && + (!bKeepSSTLinkAlive) && + (!bKeepOptLinkAlive) && + (!bKeepLinkAliveForPCON) && + (!bIsDiscoveryDetectActive) && + (pendingRemoteHdcpDetections == 0) && + (!main->isInternalPanelDynamicMuxCapable())) + { + powerdownLink(); + + // Sharp panel for HP Valor QHD+ needs 50 ms after D3 + if (bDelayAfterD3) + { + timer->sleep(50); + } + } + + return true; + } + + return false; +} + +bool ConnectorImpl::trainLinkOptimized(LinkConfiguration lConfig) +{ + LinkConfiguration lowestSelected; // initializes to 0 + bool bSkipLowestConfigCheck = false; // Force highestLink config in SST + bool bSkipRedundantLt = false; // Skip redundant LT + bool bEnteredFlushMode = false; + bool bLinkTrainingSuccessful = true; // status indicating if link training actually succeeded + // forced link training is considered a failure + bool bTwoHeadOneOrLinkRetrain = false; // force link re-train if any attached + // groups are in 2Head1OR mode. + + // Power off the link if no stream are active + if (isNoActiveStreamAndPowerdown()) + { + return true; + } + + // + // Split policy. + // If we're multistream we *always pick the highest link configuration available + // - we don't want to interrupt existing panels to light up new ones + // If we're singlestream we always pick the lowest power configurations + // - there can't be multiple streams, so the previous limitation doesn't apply + // + + // + // Find the active group(s) + // + GroupImpl * groupAttached = 0; + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + DP_ASSERT(bIsUefiSystem || (!groupAttached && "Multiple attached heads")); + groupAttached = (GroupImpl * )e; + + if ((groupAttached->lastModesetInfo.mode == DSC_DUAL) && groupAttached->bIsCurrentModesetGroup) + { + // + // If current modeset group requires 2Head1OR mode, we should retrain link. + // For SST, there will be only one group per connector. + // For MST, we need to re-run LT in case the current modeset group requires DSC_DUAL. + // + bTwoHeadOneOrLinkRetrain = true; + break; + } + } + + lowestSelected = getMaxLinkConfig(); + + if (!activeLinkConfig.multistream) + { + if (groupAttached && + groupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + return trainLinkOptimizedSingleHeadMultipleSST(groupAttached); + } + + if (preferredLinkConfig.isValid()) + { + if (activeLinkConfig != preferredLinkConfig) + { + // if a tool has requested a preferred link config; check if its possible; and train to it. + // else choose the normal path + if (groupAttached && + willLinkSupportModeSST(preferredLinkConfig, groupAttached->lastModesetInfo)) + { + if (!this->enableFlush()) + return false; + if (!train(preferredLinkConfig, false)) + { + DP_LOG(("DP-CONN> Preferred linkconfig could not be applied. Forcing on gpu side.")); + train(preferredLinkConfig, true); + } + this->disableFlush(); + return true; + } + else + { + DP_LOG(("DP-CONN> Preferred linkconfig does not support the mode")); + return false; + } + } + else + { + // We are already at preferred. Nothing to do here. Return. + return true; + } + } + + // + // This is required for making certain panels to work by training them in + // highest linkConfig in SST mode. + // + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->forceMaxLinkConfig()) + { + bSkipLowestConfigCheck = true; + } + if (dev->skipRedundantLt()) + { + bSkipRedundantLt = true; + } + } + + if (bPConConnected) + { + // When PCON is connected, always LT to max to avoid LT. + bSkipLowestConfigCheck = true; + } + + // If the flag is set, simply neglect downgrading to lowest possible linkConfig + if (!bSkipLowestConfigCheck) + { + lConfig = lowestSelected; + + if (groupAttached) + { + lConfig.enableFEC(this->bFECEnable); + // Find lowest link configuration supporting the mode + getValidLowestLinkConfig(lConfig, lowestSelected, groupAttached->lastModesetInfo); + } + } + + if (lowestSelected.isValid()) + { + // + // Check if we are already trained to the desired link config? + // Even if we are, we need to redo LT if FEC is enabled or DSC mode is DSC_DUAL + // since if current modeset requires 2H1OR, LT done during assessLink will not + // have 2H1Or flag set or if last modeset required DSC but not 2H1OR, still 2H1Or + // flag will not be set and modeset will lead to HW hang. + // + + // + // Set linkStatus to be dirty so that when isLinkLost() calls + // refreshLinkStatus() it will get real time status. This is to + // fix an issue that when UEFI-to-Driver transition, LTTPR is not + // link trainined but will be link trainined by RM. + // + hal->setDirtyLinkStatus(true); + if ((activeLinkConfig == lowestSelected) && + (!isLinkInD3()) && + (!isLinkLost()) && + !(this->bFECEnable) && + !bTwoHeadOneOrLinkRetrain) + { + if (bSkipRedundantLt || main->isInternalPanelDynamicMuxCapable()) + { + // Skip LT if the links are already trained to desired config. + DP_LOG(("DP-CONN> Skipping redundant LT.")); + return true; + } + else + { + // Make sure link status is still good. + if (activeLinkConfig.lanes && hal->isLinkStatusValid(activeLinkConfig.lanes)) + { + // Pass on a flag to RM ctrl call to skip LT at RM level. + DP_LOG(("DP-CONN> Skipping redundant LT from RM.")); + bSkipLt = true; + } + } + } + else + { + bSkipLt = false; + } + + if (groupAttached && groupAttached->isHeadAttached()) + { + // Enter flush mode/detach head before LT + if (!bSkipLt) + { + if (!(bEnteredFlushMode = this->enableFlush())) + return false; + } + } + + bLinkTrainingSuccessful = train(lowestSelected, false); + // + // If LT failed, check if skipLT was marked. If so, clear the flag and + // enable flush mode if required (headattached) and try real LT once. + // + if (!bLinkTrainingSuccessful && bSkipLt) + { + bSkipLt = false; + if (groupAttached && groupAttached->isHeadAttached()) + { + if (!(bEnteredFlushMode = this->enableFlush())) + return false; + } + bLinkTrainingSuccessful = train(lowestSelected, false); + } + if (!bLinkTrainingSuccessful) + { + // Try fall back to max link config and if that fails try original assessed link configuration + if (!train(getMaxLinkConfig(), false)) + { + if (!willLinkSupportModeSST(activeLinkConfig, groupAttached->lastModesetInfo)) + { + train(lowestSelected, true); + + // Mark link training as failed since we forced it + bLinkTrainingSuccessful = false; + } + } + } + } + else + { + if (groupAttached && groupAttached->isHeadAttached()) + { + if (!(bEnteredFlushMode = this->enableFlush())) + return false; + } + + // Mode wasn't possible at any assessed configuration. + train(getMaxLinkConfig(), true); + + // Mark link training as failed since we forced it + bLinkTrainingSuccessful = false; + } + + lConfig = activeLinkConfig; + + if (bEnteredFlushMode) + { + this->disableFlush(); + } + + // In case this was set, we should reset it to prevent skipping LT next time. + bSkipLt = false; + } + else + { + bool bRetrainToEnsureLinkStatus; + + // + // Multistream: + // If we can't restore all streams after a link train - we need to make sure that + // we set RG_DIV to "slow down" the effective pclk for that head. RG_DIV does give + // us enough room to account for both the HBR2->RBR drop and the 4->1 drop. + // This should allow us to keep the link up and operating at a sane frequency. + // .. thus we'll allow training at any frequency .. + // + + // for MST; the setPreferred calls assessLink directly. + if (preferredLinkConfig.isValid() && (activeLinkConfig != preferredLinkConfig)) + { + if (!train(preferredLinkConfig, false)) + { + DP_LOG(("DP-CONN> Preferred linkconfig could not be applied. Forcing on gpu side.")); + train(preferredLinkConfig, true); + } + return true; + } + + // + // Make sure link is physically active and healthy, otherwise re-train. + // We need to retrain if the link is in 2Head1OR MST mode. For example, + // if we plug in a 2Head1OR panel to an active link that is already driving + // a MST panel in DSC mode, RM will assign a secondary OR to the 2Head1OR panel. + // But since there is no change required in linkConfig DPlib will skip + // LT, resutling in not adding secondary OR to LT; this will lead to HW hang. + // + bRetrainToEnsureLinkStatus = (isLinkActive() && isLinkInD3()) || + isLinkLost() || + (activeLinkConfig.bEnableFEC != this->bFECEnable) || + bTwoHeadOneOrLinkRetrain; + + if (bRetrainToEnsureLinkStatus || (!isLinkActive())) + { + // + // Train to the highestAssesed link config for MST cases to avoid redundant + // fallback. There is no point of trying to link train at highest link config + // when it failed during the assessment. + // train() handles fallback now. So we don't need to step down when LT fails. + // + LinkConfiguration desired = highestAssessedLC; + + NvU8 retries = DP_LT_MAX_FOR_MST_MAX_RETRIES; + + desired.enableFEC(this->bFECEnable); + + if (bRetrainToEnsureLinkStatus) + { + bEnteredFlushMode = enableFlush(); + } + + // + // In some cases, the FEC isn't enabled and link is not lost (e.g. DP_KEEP_OPT_LINK_ALIVE = 1), + // but we're going to enable DSC. We need to update bSkipLt for retraining the link with FEC. + // As the bSkipLt was set to true prviously while link is not lost. + // + if (activeLinkConfig.bEnableFEC != this->bFECEnable) + { + bSkipLt = false; + } + + train(desired, false); + if (!activeLinkConfig.isValid()) + { + DP_LOG(("DPCONN> Unable to train link (at all). Forcing training (picture won't show up)")); + train(getMaxLinkConfig(), true); + + // Mark link training as failed since we forced it + bLinkTrainingSuccessful = false; + } + + // + // Bug 2354318: On some MST branches, we might see a problem that LT failed during + // assessLink(), but somehow works later. In this case, we should not + // retry since highestAssessedLC is not a valid comparison now. + // + if (highestAssessedLC.isValid()) + { + while ((highestAssessedLC != activeLinkConfig) && retries > 0) + { + // Give it a few more chances. + train(desired, false); + retries--; + }; + } + + lConfig = activeLinkConfig; + + if (bEnteredFlushMode) + { + disableFlush(); + } + } + } + + return (bLinkTrainingSuccessful && lConfig.isValid()); +} + +bool ConnectorImpl::getValidLowestLinkConfig +( + LinkConfiguration &lConfig, + LinkConfiguration &lowestSelected, + ModesetInfo modesetInfo +) +{ + bool bIsModeSupported = false; + unsigned i; + LinkConfiguration selectedConfig; + + for (i = 0; i < numPossibleLnkCfg; i++) + { + if ((this->allPossibleLinkCfgs[i].lanes > lConfig.lanes) || (this->allPossibleLinkCfgs[i].peakRate > lConfig.peakRate)) + { + continue; + } + + // Update enhancedFraming for target config + this->allPossibleLinkCfgs[i].enhancedFraming = lConfig.enhancedFraming; + + selectedConfig = this->allPossibleLinkCfgs[i]; + + selectedConfig.enableFEC(lConfig.bEnableFEC); + + if (willLinkSupportModeSST(selectedConfig, modesetInfo)) + { + bIsModeSupported = true; + break; + } + } + + if (bIsModeSupported) + { + lowestSelected = selectedConfig; + } + else + { + // Invalidate link config if mode is not possible at all + lowestSelected.lanes = 0; + } + + return bIsModeSupported; +} + +bool ConnectorImpl::postLTAdjustment(const LinkConfiguration & lConfig, bool force) +{ + NvU8 lastVoltageSwingLane[DP_MAX_LANES] = {0}; + NvU8 lastPreemphasisLane[DP_MAX_LANES] = {0}; + NvU8 lastTrainingScoreLane[DP_MAX_LANES] = {0}; + NvU8 lastPostCursor[DP_MAX_LANES] = {0}; + NvU8 currVoltageSwingLane[DP_MAX_LANES] = {0}; + NvU8 currPreemphasisLane[DP_MAX_LANES] = {0}; + NvU8 currTrainingScoreLane[DP_MAX_LANES] = {0}; + NvU8 currPostCursor[DP_MAX_LANES] = {0}; + NvU32 updatedLaneSettings[DP_MAX_LANES] = {0}; + NvU8 adjReqCount = 0; + NvU64 startTime; + LinkConfiguration linkConfig = lConfig; + + // Cache Voltage Swing and Preemphasis value just after Link training + if (!hal->readTraining(lastVoltageSwingLane, + lastPreemphasisLane, + lastTrainingScoreLane, + lastPostCursor, + (NvU8)activeLinkConfig.lanes)) + { + DP_LOG(("DPCONN> Post Link Training : Unable to read current training values")); + } + + if (hal->getTrainingPatternSelect() != TRAINING_DISABLED) + { + DP_LOG(("DPCONN> Post Link Training : Training pattern is not disabled.")); + } + + // + // We have cleared DPCD 102h + // Now hardware will automatically send the idle pattern + // + startTime = timer->getTimeUs(); + + do + { + if (!hal->getIsPostLtAdjRequestInProgress()) + { + // Clear POST_LT_ADJ_REQ_GRANTED bit and start normal AV transmission + hal->setPostLtAdjustRequestGranted(false); + return true; + } + + // Wait for 2ms + Timeout timeout(timer, 2); + + // check if DPCD 00206h~00207h change has reached to ADJ_REQ_LIMIT + if (adjReqCount > DP_POST_LT_ADJ_REQ_LIMIT) + { + // Clear POST_LT_ADJ_REQ_GRANTED bit and start normal AV transmission + hal->setPostLtAdjustRequestGranted(false); + return true; + } + + if (!hal->readTraining(currVoltageSwingLane, + currPreemphasisLane, + currTrainingScoreLane, + currPostCursor, + (NvU8)activeLinkConfig.lanes)) + { + DP_LOG(("DPCONN> Post Link Training : Unable to read current training values")); + } + else + { + if (!hal->isLaneSettingsChanged(lastVoltageSwingLane, + currVoltageSwingLane, + lastPreemphasisLane, + currPreemphasisLane, + (NvU8)activeLinkConfig.lanes)) + { + // Check if we have exceeded DP_POST_LT_ADJ_REQ_TIMER (200 ms) + if ((timer->getTimeUs() - startTime) > DP_POST_LT_ADJ_REQ_TIMER) + { + DP_LOG(("DPCONN> Post Link Training : DP_POST_LT_ADJ_REQ_TIMER is timed out.")); + // Clear POST_LT_ADJ_REQ_GRANTED bit and start normal AV transmission + hal->setPostLtAdjustRequestGranted(false); + return true; + } + } + else + { + adjReqCount++; + + // Clear ADJ_REQ_TIMER + startTime = timer->getTimeUs(); + + // Change RX drive settings according to DPCD 00206h & 00207h + if (!hal->setTrainingMultiLaneSet((NvU8)activeLinkConfig.lanes, + currVoltageSwingLane, + currPreemphasisLane)) + { + DP_LOG(("DPCONN> Post Link Training : Failed to set RX drive setting according to DPCD 00206h & 00207h.")); + } + + // Populate updated lane settings for currently active lanes + populateUpdatedLaneSettings(currVoltageSwingLane, currPreemphasisLane, updatedLaneSettings); + + // Change TX drive settings according to DPCD 00206h & 00207h + if (!setLaneConfig(activeLinkConfig.lanes, updatedLaneSettings)) + { + DP_LOG(("DPCONN> Post Link Training : Failed to set TX drive setting according to DPCD 00206h & 00207h.")); + } + + // Update last Voltage Swing and Preemphasis values + if (!hal->readTraining(lastVoltageSwingLane, + lastPreemphasisLane, + lastTrainingScoreLane, + lastPostCursor, + (NvU8)activeLinkConfig.lanes)) + { + DP_LOG(("DPCONN> Post Link Training : Unable to read current training values")); + } + } + } + + // Mark the linkStatus as dirty since we need to retrain in case Rx has lost sync + hal->setDirtyLinkStatus(true); + }while (!isLinkLost()); + + // Clear POST_LT_ADJ_REQ_GRANTED bit + hal->setPostLtAdjustRequestGranted(false); + + if (isLinkLost()) + { + if (bNoFallbackInPostLQA && (retryLT < WAR_MAX_RETRAIN_ATTEMPT)) + { + // + // A monitor may lose link sometimes during assess link or link training. + // So retry for 3 times before fallback to lower config + // + retryLT++; + train(lConfig, force); + return true; + } + // + // If the link is not alive, then we need to retrain at a lower config + // There is no reason to try at the same link configuration. Follow the + // fallback policy that is followed for CR phase of LT + // + if (!linkConfig.lowerConfig()) + { + DP_LOG(("DPCONN> Post Link Training : Already at the lowest link rate. Cannot reduce further")); + return false; + } + train(linkConfig, force); + } + else if (bNoFallbackInPostLQA && (retryLT != 0)) + { + retryLT = 0; + } + + return true; +} + +void ConnectorImpl::populateUpdatedLaneSettings(NvU8* voltageSwingLane, NvU8* preemphasisLane, NvU32 *data) +{ + NvU32 laneIndex; + + for (laneIndex = 0; laneIndex < activeLinkConfig.lanes; laneIndex++) + { + switch (voltageSwingLane[laneIndex]) + { + case driveCurrent_Level0: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL0, data[laneIndex]); + break; + + case driveCurrent_Level1: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL1, data[laneIndex]); + break; + + case driveCurrent_Level2: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL2, data[laneIndex]); + break; + + case driveCurrent_Level3: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _DRIVECURRENT, _LEVEL3, data[laneIndex]); + break; + } + + switch (preemphasisLane[laneIndex]) + { + case preEmphasis_Level1: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _PREEMPHASIS, _LEVEL1, data[laneIndex]); + break; + + case preEmphasis_Level2: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _PREEMPHASIS, _LEVEL2, data[laneIndex]); + break; + + case preEmphasis_Level3: + data[laneIndex] = FLD_SET_DRF(0073_CTRL, _DP_LANE_DATA, _PREEMPHASIS, _LEVEL3, data[laneIndex]); + break; + } + } +} + +bool ConnectorImpl::validateLinkConfiguration(const LinkConfiguration & lConfig) +{ + if (!IS_VALID_LANECOUNT(lConfig.lanes)) + return false; + + if (lConfig.lanes > hal->getMaxLaneCount()) + return false; + + if (lConfig.lanes != 0) + { + if (!IS_VALID_LINKBW(lConfig.peakRate/DP_LINK_BW_FREQ_MULTI_MBPS)) + return false; + + if (lConfig.peakRate > hal->getMaxLinkRate()) + return false; + + if (IS_INTERMEDIATE_LINKBW(lConfig.peakRate/DP_LINK_BW_FREQ_MULTI_MBPS)) + { + NvU16 *ilrTable; + NvU32 i; + if (!hal->isIndexedLinkrateEnabled()) + return false; + + ilrTable = hal->getLinkRateTable(); + for (i = 0; i < NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES; i++) + { + // + // lConfig.peakRate is in MBPS and ilrTable entries are the values read from DPCD + // Convert the ilrTable value to MBPS before the comparison + // + if (LINK_RATE_KHZ_TO_MBPS(ilrTable[i] * DP_LINK_RATE_TABLE_MULTIPLIER_KHZ) == lConfig.peakRate) + break; + if (ilrTable[i] == 0) + return false; + } + if (i == NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES) + return false; + } + } + + return true; +} + +bool ConnectorImpl::train(const LinkConfiguration & lConfig, bool force, + LinkTrainingType trainType) +{ + LinkTrainingType preferredTrainingType = trainType; + bool result; + // + // Validate link config against caps + // + if (!force) + { + if (!validateLinkConfiguration(lConfig)) + return false; + } + + if (!lConfig.multistream) + { + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->powerOnMonitorBeforeLt() && lConfig.lanes != 0) + { + // + // Some panels expose that they are in D0 even when they are not. + // Explicit write to DPCD 0x600 is required to wake up such panel before LT. + // + hal->setPowerState(PowerStateD0); + } + } + // + // Enable special LT only when regkey 'ENABLE_FAST_LINK_TRAINING' set + // to 1 in DD's path. + // + if (bEnableFastLT) + { + // If the panel can support NLT or FLT, then let's try it first + if (hal->getNoLinkTraining()) + preferredTrainingType = NO_LINK_TRAINING; + else if (hal->getSupportsNoHandshakeTraining()) + preferredTrainingType = FAST_LINK_TRAINING; + } + + } + + // + // Don't set the stream if we're shutting off the link + // or forcing the config + // + if (!force && lConfig.lanes != 0) + { + if (isLinkActive()) + { + if (activeLinkConfig.multistream != lConfig.multistream) + { + activeLinkConfig.lanes = 0; + rawTrain(activeLinkConfig, true, NORMAL_LINK_TRAINING); + } + } + + if (AuxRetry::ack != hal->setMultistreamLink(lConfig.multistream)) + { + DP_LOG(("DP> Failed to enable multistream mode on current link")); + } + } + + // + // Read link rate table before link-train to assure on-board re-driver + // knows link rate going to be set in link rate table. + // If eDP's power has been shutdown here, don't query Link rate table, + // else it will cause panel wake up. + // + if (hal->isIndexedLinkrateEnabled() && (lConfig.lanes != 0)) + { + hal->getRawLinkRateTable(); + } + + activeLinkConfig = lConfig; + result = rawTrain(lConfig, force, preferredTrainingType); + + // If NLT or FLT failed, then fallback to normal LT again + if (!result && (preferredTrainingType != NORMAL_LINK_TRAINING)) + result = rawTrain(lConfig, force, NORMAL_LINK_TRAINING); + + if (!result) + activeLinkConfig.lanes = 0; + else + bNoLtDoneAfterHeadDetach = false; + + if (!force && result) + this->hal->setDirtyLinkStatus(true); + + // We don't need post LQA while powering down the lanes. + if ((lConfig.lanes != 0) && + hal->isPostLtAdjustRequestSupported() && + result) + { + result = postLTAdjustment(activeLinkConfig, force); + } + + if((lConfig.lanes != 0) && result && lConfig.bEnableFEC) + { + // + // Extended latency from link-train end to FEC enable pattern + // to avoid link lost or blank screen with Synaptics branch. + // (Bug 2561206) + // + if (LT2FecLatencyMs) + { + timer->sleep(LT2FecLatencyMs); + } + + result = main->configureFec(true /*bEnableFec*/); + DP_ASSERT(result); + } + + if (lConfig != activeLinkConfig) + { + // fallback happens, returns fail to make sure clients notice it. + result = false; + } + return result; +} + +void ConnectorImpl::sortActiveGroups(bool ascending) +{ + List activeSortedGroups; + + while(!activeGroups.isEmpty()) + { + ListElement * e = activeGroups.begin(); + GroupImpl * g = (GroupImpl *)e; + + GroupImpl * groupToInsertBefore = NULL; + + // Remove from active group for sorting + activeGroups.remove(g); + + for (ListElement *e1 = activeSortedGroups.begin(); e1 != activeSortedGroups.end(); e1 = e1->next) + { + GroupImpl * g1 = (GroupImpl *)e1; + if ((g->headIndex < g1->headIndex) || + ((g->headIndex == g1->headIndex) && + ((ascending && (g->singleHeadMultiStreamID < g1->singleHeadMultiStreamID)) || + (!ascending && (g->singleHeadMultiStreamID > g1->singleHeadMultiStreamID))) + )) + { + groupToInsertBefore = g1; + break; + } + } + + if (NULL == groupToInsertBefore) + { + activeSortedGroups.insertBack(g); + } + else + { + activeSortedGroups.insertBefore(groupToInsertBefore, g); + } + } + + // Repopulate active group list + while (!activeSortedGroups.isEmpty()) + { + ListElement * e = activeSortedGroups.begin(); + + // Remove from sorted list + activeSortedGroups.remove(e); + // Insert back to active group list + activeGroups.insertBack(e); + } +} + +bool ConnectorImpl::enableFlush() +{ + bool bHeadAttached = false; + + if (activeGroups.isEmpty()) + return true; + + // + // If SST check that head should be attached with single group else if MST at least + // 1 group should have headAttached before calling flush on SOR + // + if (!this->linkUseMultistream()) + { + GroupImpl * activeGroup = this->getActiveGroupForSST(); + + if (activeGroup && !activeGroup->isHeadAttached() && intransitionGroups.isEmpty()) + { + DP_LOG(("DPCONN> SST-Flush mode should not be called when head is not attached. Returning early without enabling flush")); + return true; + } + } + else + { + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * group = (GroupImpl *)e; + if (group->isHeadAttached()) + { + bHeadAttached = true; + break; + } + } + + if (!bHeadAttached) + { + DP_LOG(("DPCONN> MST-Flush mode should not be called when head is not attached. Returning early without enabling flush")); + return true; + } + } + + if (!main->setFlushMode()) + return false; + + // + // Enabling flush mode shuts down the link, so the next link training + // call must not skip programming the hardware. Otherwise, EVO will + // hang if the head is still active when flush mode is disabled. + // + bSkipLt = false; + + sortActiveGroups(false); + + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * g = (GroupImpl *)e; + + if (!this->linkUseMultistream()) + { + GroupImpl * activeGroup = this->getActiveGroupForSST(); + DP_ASSERT(g == activeGroup); + } + + bool skipPreLinkTraining = (((g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST) || + (g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) && + (g->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)); + if (!skipPreLinkTraining) + main->preLinkTraining(g->headIndex); + + beforeDeleteStream(g, true); + if (this->linkUseMultistream()) + { + main->configureTriggerSelect(g->headIndex, g->singleHeadMultiStreamID); + main->triggerACT(); + } + afterDeleteStream(g); + } + + return true; +} + +// +// This is a wrapper for call to mainlink::train(). +bool ConnectorImpl::rawTrain(const LinkConfiguration & lConfig, bool force, LinkTrainingType linkTrainingType) +{ + { + // + // this is the common path + // activeLinkConfig will be updated in main->train() in case fallback happens. + // if the link config sent has disable Post LT request set, we send false for corresponding flag + // + if (lConfig.disablePostLTRequest) + { + return (main->train(lConfig, force, linkTrainingType, &activeLinkConfig, bSkipLt, false, + hal->getPhyRepeaterCount())); + } + return (main->train(lConfig, force, linkTrainingType, &activeLinkConfig, bSkipLt, hal->isPostLtAdjustRequestSupported(), + hal->getPhyRepeaterCount())); + } +} + +// +// Timeslot management +// + +bool ConnectorImpl::deleteAllVirtualChannels() +{ + // Clear the payload table + hal->payloadTableClearACT(); + if (!hal->payloadAllocate(0, 0, 63)) + { + DP_LOG(("DPCONN> Payload table could not be cleared")); + } + + // send clear_payload_id_table + DP_LOG(("DPCONN> Sending CLEAR_PAYLOAD_ID_TABLE broadcast")); + + for (unsigned retries = 0 ; retries < 7; retries++) + { + ClearPayloadIdTableMessage clearPayload; + NakData nack; + + if (this->messageManager->send(&clearPayload, nack)) + return true; + } + + // we should not have reached here. + DP_ASSERT(0 && "DPCONN> CLEAR_PAYLOAD_ID failed!"); + return false; +} + +void ConnectorImpl::clearTimeslices() +{ + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)((Group *)i); + group->timeslot.PBN = 0; + group->timeslot.count = 0; + group->timeslot.begin = 1; + group->timeslot.hardwareDirty = false; + } + + maximumSlots = 63; + freeSlots = maximumSlots; +} + + +void ConnectorImpl::freeTimeslice(GroupImpl * targetGroup) +{ + // compact timeslot allocation + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * group = (GroupImpl *)e; + + if (group->timeslot.begin > targetGroup->timeslot.begin) { + group->timeslot.begin -= targetGroup->timeslot.count; + group->timeslot.hardwareDirty = true; + + // + // enable TRIGGER_ALL on SFs corresponding to the the single head MST driving heads + // as both both pipelines need to take the affect of the shift happening due to deactivating + // an MST display being driven through same SOR + // + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST == group->singleHeadMultiStreamMode) && + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY == group->singleHeadMultiStreamID)) + { + main->configureTriggerAll(group->headIndex, true); + } + } + } + + // mark stream as free + freeSlots += targetGroup->timeslot.count; + targetGroup->timeslot.PBN = 0; + targetGroup->timeslot.count = 0; + targetGroup->timeslot.hardwareDirty = true; +} + +bool ConnectorImpl::allocateTimeslice(GroupImpl * targetGroup) +{ + unsigned base_pbn, slot_count, slots_pbn; + + DP_ASSERT(isLinkActive()); + if (this->isFECSupported()) + { + if (!isModePossibleMSTWithFEC(activeLinkConfig, + targetGroup->lastModesetInfo, + &targetGroup->timeslot.watermarks)) + { + DP_ASSERT(0 && "DisplayDriver bug! This mode is not possible at any " + "link configuration. It would have been reject at mode filtering time!"); + return false; + } + } + else + { + if (!isModePossibleMST(activeLinkConfig, + targetGroup->lastModesetInfo, + &targetGroup->timeslot.watermarks)) + { + DP_ASSERT(0 && "DisplayDriver bug! This mode is not possible at any " + "link configuration. It would have been reject at mode filtering time!"); + return false; + } + } + + activeLinkConfig.pbnRequired(targetGroup->lastModesetInfo, base_pbn, slot_count, slots_pbn); + + // Check for available timeslots + if (slot_count > freeSlots) + return false; + + int firstFreeSlot = 1; + + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (group->timeslot.count != 0 && + (group->timeslot.begin + group->timeslot.count) >= firstFreeSlot) + { + firstFreeSlot = group->timeslot.begin + group->timeslot.count; + } + } + + DP_ASSERT((maximumSlots - firstFreeSlot + 1) == freeSlots && "Timeslot allocation table corrupted"); + + // Already allocated? + DP_ASSERT(!targetGroup->timeslot.count && "Reallocation of stream that is already present"); + + targetGroup->timeslot.count = slot_count; + targetGroup->timeslot.begin = firstFreeSlot; + targetGroup->timeslot.PBN = base_pbn; + targetGroup->timeslot.hardwareDirty = true; + freeSlots -= slot_count; + + return true; +} + + +void ConnectorImpl::flushTimeslotsToHardware() +{ + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + + if (group->timeslot.hardwareDirty) + { + group->timeslot.hardwareDirty = false; + bool bEnable2Head1Or = false; + + if ((group->lastModesetInfo.mode == DSC_DUAL) || + (group->lastModesetInfo.mode == DSC_DROP)) + { + bEnable2Head1Or = true; + } + + main->configureMultiStream(group->headIndex, + group->timeslot.watermarks.hBlankSym, + group->timeslot.watermarks.vBlankSym, + group->timeslot.begin, + group->timeslot.begin+group->timeslot.count-1, + group->timeslot.PBN, + activeLinkConfig.PBNForSlots(group->timeslot.count), + group->colorFormat, + group->singleHeadMultiStreamID, + group->singleHeadMultiStreamMode, + bAudioOverRightPanel, + bEnable2Head1Or); + } + } +} + +void ConnectorImpl::beforeDeleteStream(GroupImpl * group, bool forFlushMode) +{ + + // + // During flush entry, if the link is not trained, retrain + // the link so that ACT can be ack'd by the sink. + // (ACK is only for multistream case) + // + // Note: A re-training might be required even in cases where link is not + // alive in non-flush mode case (Eg: beforeDeleteStream called from NAB). + // However we cannot simply re-train is such cases, without ensuring that + // head is not actively driving pixels and this needs to be handled + // differently . + // + if(forFlushMode && linkUseMultistream()) + { + if(isLinkLost()) + { + train(activeLinkConfig, false); + } + } + + // check if this is a firmware group + if (group && group->isHeadAttached() && group->headInFirmware) + { + // check if MST is enabled and we have inited messagemanager + if (hal->getSupportsMultistream() && messageManager) + { + // Firmware group can be assumed to be taking up all 63 slots. + group->timeslot.begin = 1; + group->timeslot.count = 63; + this->freeSlots = 0; + + // 1. clear the timeslots using CLEAR_PAYLAOD_TABLE + // 2. clear gpu timeslots. + if (!deleteAllVirtualChannels()) + DP_ASSERT(0 && "Failed to delete VCs. Vbios state in branch could not be cleaned."); + + freeTimeslice(group); + flushTimeslotsToHardware(); + group->bWaitForDeAllocACT = false; + + return; + } + } + + if (linkUseMultistream() && group && group->isHeadAttached() && group->timeslot.count) + { + // Detach all the panels from payload + for (Device * d = group->enumDevices(0); d; d = group->enumDevices(d)) + { + group->update(d, false); + } + + freeTimeslice(group); + flushTimeslotsToHardware(); + group->bWaitForDeAllocACT = true; + + // Delete the stream + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, 0); + + // + // If entering flush mode, enable RG (with Immediate effect) otherwise for detaching a display, + // if not single heas MST, not required to enable RG. For single head MST streams deletion, enable + // RG at loadv + // + if (forFlushMode || + ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST == group->singleHeadMultiStreamMode) && + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY != group->singleHeadMultiStreamID))) + { + main->controlRateGoverning(group->headIndex, true/*enable*/, forFlushMode /*Immediate/loadv*/); + } + } +} + +void ConnectorImpl::afterDeleteStream(GroupImpl * group) +{ + if (linkUseMultistream() && group->isHeadAttached() && group->bWaitForDeAllocACT) + { + if (!hal->payloadWaitForACTReceived()) + { + DP_LOG(("DP> Delete stream failed. Device did not acknowledge stream deletion ACT!")); + DP_ASSERT(0); + } + } +} + +void ConnectorImpl::afterAddStream(GroupImpl * group) +{ + // Skip this as there is no timeslot allocation + if (!linkUseMultistream() || !group->timeslot.count) + return; + + if (group->bDeferredPayloadAlloc) + { + DP_ASSERT(addStreamMSTIntransitionGroups.contains(group)); + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, group->timeslot.count); + main->triggerACT(); + } + group->bDeferredPayloadAlloc = false; + + if (addStreamMSTIntransitionGroups.contains(group)) { + addStreamMSTIntransitionGroups.remove(group); + } + + if (!hal->payloadWaitForACTReceived()) + { + DP_LOG(("ACT has not been received.Triggering ACT once more")); + DP_ASSERT(0); + + // + // Bug 1334070: During modeset for cloned displays on certain GPU family, + // ACT triggered during SOR attach is not being received due to timing issues. + // Also DP1.2 spec mentions that there is no harm in sending the ACT + // again if there is no change in payload table. Hence triggering ACT once more here + // + main->triggerACT(); + if (!hal->payloadWaitForACTReceived()) + { + DP_LOG(("DP-TS> Downstream device did not receive ACT during stream re-add.")); + return; + } + } + + for (Device * d = group->enumDevices(0); d; d = group->enumDevices(d)) + { + group->update((DeviceImpl *)d, true); + + lastDeviceSetForVbios = d; + } + + // Disable rate gov at the end of adding all streams + if ((DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST != group->singleHeadMultiStreamMode) || + (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_MAX == group->singleHeadMultiStreamID)) + { + main->controlRateGoverning(group->headIndex, false/*disable*/, false/*loadv*/); + } + + group->updateVbiosScratchRegister(lastDeviceSetForVbios); +} + +bool ConnectorImpl::beforeAddStream(GroupImpl * group, bool test, bool forFlushMode) +{ + bool res = false; + if (linkUseMultistream()) + { + res = beforeAddStreamMST(group, test, forFlushMode); + } + else + { + // SST + Watermark water; + bool bEnable2Head1Or = false; + bool bIsModePossible = false; + + if ((group->lastModesetInfo.mode == DSC_DUAL) || + (group->lastModesetInfo.mode == DSC_DROP)) + { + bEnable2Head1Or = true; + } + + if (this->isFECSupported()) + { + bIsModePossible = isModePossibleSSTWithFEC(activeLinkConfig, + group->lastModesetInfo, + &water, + main->hasIncreasedWatermarkLimits()); + } + else + { + bIsModePossible = isModePossibleSST(activeLinkConfig, + group->lastModesetInfo, + &water, + main->hasIncreasedWatermarkLimits()); + } + + if (bIsModePossible) + { + if (group->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + if (group->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY) + { + // + // configure sf parameters after secondary linktraining on primary link. + // + main->configureSingleStream(group->headIndex, + water.hBlankSym, + water.vBlankSym, + activeLinkConfig.enhancedFraming, + water.tuSize, + water.waterMark, + group->colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + group->singleHeadMultiStreamMode, + bAudioOverRightPanel); + } + } + else + { + main->configureSingleStream(group->headIndex, + water.hBlankSym, + water.vBlankSym, + activeLinkConfig.enhancedFraming, + water.tuSize, + water.waterMark, + group->colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + false /*bEnableAudioOverRightPanel*/, + bEnable2Head1Or); + } + } + else + { + if (test) + { + main->configureSingleStream(group->headIndex, + water.hBlankSym, + water.vBlankSym, + activeLinkConfig.enhancedFraming, + water.tuSize, + water.waterMark, + group->colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY, + DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE, + false /*bEnableAudioOverRightPanel*/, + bEnable2Head1Or); + DP_LOG(("DP-TS> Unable to allocate stream. Setting RG_DIV mode")); + res = true; + } + else + DP_ASSERT(0); + } + } + return res; +} + +bool ConnectorImpl::beforeAddStreamMST(GroupImpl * group, bool test, bool forFlushMode) +{ + bool res = false; + bool isPrimaryStream = (DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY == group->singleHeadMultiStreamID); + if (allocateTimeslice(group)) + { + flushTimeslotsToHardware(); + if (!forFlushMode && isPrimaryStream) + { + main->controlRateGoverning(group->headIndex, true /*enable*/); + } + + // If not single Head MST mode or if primary stream then program here + // other streams programmed in NAE + if (forFlushMode || + (isPrimaryStream && + addStreamMSTIntransitionGroups.isEmpty())) + { + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, group->timeslot.count); + } + else if (isPrimaryStream && + !addStreamMSTIntransitionGroups.isEmpty()) + { + + group->bDeferredPayloadAlloc = true; + } + + addStreamMSTIntransitionGroups.insertFront(group); + } + else + { + if (!test) + { + DP_LOG(("DP-TS> Unable to allocate stream. Should call mainLink->configureStream to trigger RG_DIV mode")); + main->configureMultiStream(group->headIndex, + group->timeslot.watermarks.hBlankSym, group->timeslot.watermarks.vBlankSym, + 1, 0, 0, 0, group->colorFormat, group->singleHeadMultiStreamID, group->singleHeadMultiStreamMode, bAudioOverRightPanel); + } + else + { + flushTimeslotsToHardware(); + + if (forFlushMode || + (DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST != group->singleHeadMultiStreamMode) || isPrimaryStream) + { + main->configureTriggerSelect(group->headIndex, group->singleHeadMultiStreamID); + hal->payloadTableClearACT(); + hal->payloadAllocate(group->streamIndex, group->timeslot.begin, group->timeslot.count); + } + + DP_LOG(("DP-TS> Unable to allocate stream. Setting RG_DIV mode")); + res = true; + } + } + + return res; +} + +void ConnectorImpl::disableFlush( bool test) +{ + bool bHeadAttached = false; + + if (activeGroups.isEmpty()) + return; + + sortActiveGroups(true); + + // + // If SST check that head should be attached with single group else if MST at least + // 1 group should have headAttached before calling disable flush on SOR + // + if (!this->linkUseMultistream()) + { + GroupImpl * activeGroup = this->getActiveGroupForSST(); + + if (activeGroup && !activeGroup->isHeadAttached() && intransitionGroups.isEmpty()) + { + DP_LOG(("DPCONN> SST-Flush mode disable should not be called when head is not attached. Returning early without disabling flush\n")); + return; + } + } + else + { + for (ListElement * e = activeGroups.begin(); e != activeGroups.end(); e = e->next) + { + GroupImpl * group = (GroupImpl *)e; + if (group->isHeadAttached()) + { + bHeadAttached = true; + break; + } + } + + if (!bHeadAttached) + { + DP_LOG(("DPCONN> MST-Flush mode disable should not be called when head is not attached. Returning early without disabling flush\n")); + return; + } + } + + // + // We need to rebuild the tiemslot configuration when exiting flush mode + // Bug 1550750: Change the order to proceed from last to front as they were added. + // Some tiled monitors are happy with this. + // + for (ListElement * e = activeGroups.last(); e != activeGroups.end(); e = e->prev) + { + GroupImpl * g = (GroupImpl *)e; + bool force = false; + NvU32 headMask = 0; + + if (!g->isHeadAttached() && this->linkUseMultistream()) + continue; + + bool skipPostLinkTraining = (((g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST) || + (g->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST)) && + (g->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)); + + // + // Allocate the timeslot configuration + // + force = beforeAddStream(g, test, true); + if (this->linkUseMultistream()) + { + main->configureTriggerSelect(g->headIndex, g->singleHeadMultiStreamID); + } + + if (g->lastModesetInfo.mode == DSC_DUAL) + { + // For 2 Head 1 OR - Legal combinations are Head0 and Head1, Head2 and Head3 + headMask = (1 << g->headIndex) | (1 << (g->headIndex + 1)); + } + else + { + headMask = (1 << g->headIndex); + } + + main->clearFlushMode(headMask, force); // ACT is triggered here + if (!skipPostLinkTraining) + main->postLinkTraining(g->headIndex); + afterAddStream(g); + } +} + +DeviceImpl* ConnectorImpl::findDeviceInList(const Address & address) +{ + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = e->next) + { + DeviceImpl* device = (DeviceImpl*)e; + + // + // There may be multiple hits with the same address. This can + // happen when the head is still attached to the old device.branch + // We never need to resurrect old unplugged devices - and their + // object will be destroyed as soon as the DD handles the + // notifyZombie message. + // + if ((device->address == address) && device->plugged) + return device; + } + + // + // If no plugged devices are found, we should search back through zombied devices. + // This is purely as an optimizations to allow the automatic restoration of a + // panel if it 'reappears' while its still being driven + // + for (ListElement * e = deviceList.begin(); e != deviceList.end(); e = e->next) + { + DeviceImpl* device = (DeviceImpl*)e; + + if (device->address == address) + return device; + } + + return 0; +} + +void ConnectorImpl::disconnectDeviceList() +{ + for (Device * d = enumDevices(0); d; d = enumDevices(d)) + { + ((DeviceImpl*)d)->plugged = false; + // Clear the active bit (payload_allocate) + ((DeviceImpl*)d)->payloadAllocated = false; + + // Deallocate object which may go stale after long pulse handling. + if (((DeviceImpl*)d)->isDeviceHDCPDetectionAlive) + { + delete ((DeviceImpl*)d)->deviceHDCPDetection; + ((DeviceImpl*)d)->deviceHDCPDetection = NULL; + ((DeviceImpl*)d)->isHDCPCap = False; + } + } +} + +// status == true: attach, == false: detach +void ConnectorImpl::notifyLongPulse(bool statusConnected) +{ + NvU32 muxState = 0; + NV_DPTRACE_INFO(HOTPLUG, statusConnected, connectorActive, previousPlugged); + + if (!connectorActive) + { + DP_LOG(("DP> Got a long pulse before any connector is active!!")); + return; + } + + if (main->getDynamicMuxState(&muxState)) + { + DeviceImpl * existingDev = findDeviceInList(Address()); + bool bIsMuxOnDgpu = DRF_VAL(0073, _CTRL_DFP_DISP_MUX, _STATE, muxState) == NV0073_CTRL_DFP_DISP_MUX_STATE_DISCRETE_GPU; + + if (existingDev && existingDev->isFakedMuxDevice() && !bIsMuxOnDgpu) + { + DP_LOG((" NotifyLongPulse ignored as mux is not pointing to dGPU and there is a faked device")); + return; + } + + if (existingDev && existingDev->isPreviouslyFakedMuxDevice() && !existingDev->isMarkedForDeletion()) + { + DP_LOG((" NotifyLongPulse ignored as there is a previously faked device but it is not marked for deletion")); + if (!statusConnected) + { + DP_LOG((" Calling notifyDetectComplete")); + sink->notifyDetectComplete(); + } + return; + } + } + + if (previousPlugged && statusConnected) + { + if (main->isInternalPanelDynamicMuxCapable()) + return; + + DP_LOG(("DP> Redundant plug")); + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DeviceImpl * dev = (DeviceImpl *)i; + if (dev->ignoreRedundantHotplug()) + { + DP_LOG(("DP> Skipping link assessment")); + return; + } + } + + // + // Exit early to avoid coonector re-initialization from breaking MST + // branch state when streams are allocated. + // Additional exceptions: + // - UEFI post(firmwareGroup->headInFirmware)for fresh init. + // - MST to SST transition for that unplug event may be filtered by RM. + // Messaging will be disabled in this case. + // + if (linkUseMultistream() && (!activeGroups.isEmpty()) && + (!(firmwareGroup && ((GroupImpl *)firmwareGroup)->headInFirmware)) && + (hal->isMessagingEnabled())) + { + DP_LOG(("DP> Bail out early on redundant hotplug with active" + "MST stream")); + return; + } + } + + this->notifyLongPulseInternal(statusConnected); +} + +// +// notifyLongPulse() filters redundant hotplug notifications and calls into +// notifyLongPulseInternal(). +// +// setAllowMultiStreaming() calls into notifyLongPulseInternal() in order to +// re-detect already connected sink after enabling/disabling +// MST support. +// +void ConnectorImpl::notifyLongPulseInternal(bool statusConnected) +{ + // start from scratch + preferredLinkConfig = LinkConfiguration(); + + bPConConnected = false; + bSkipAssessLinkForPCon = false; + + // + // Check if the panel is eDP and DPCD data for that is already parsed. + // Passing this as a parameter inside notifyHPD to skip reading of DPCD + // data in case of eDP after sleep/hibernate resume. + // + hal->notifyHPD(statusConnected, (!hal->isDpcdOffline() && main->isEDP())); + if (main->isLttprSupported()) + { + // + // Update LTTPR counts since it's only correct after HPD. + // If there are some other DFP parameters might change during HPD cycle + // then we can remove the isLttprSupported() check. + // + main->queryAndUpdateDfpParams(); + } + + // For bug 2489143, max link rate needs to be forced on eDP through regkey + if (main->isEDP()) + { + hal->overrideMaxLinkRate(maxLinkRateFromRegkey); + } + + // Some panels whose TCON erroneously sets DPCD 0x200 SINK_COUNT=0. + if (main->isEDP() && hal->getSinkCount() == 0) + hal->setSinkCount(1); + + // disconnect all devices + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) { + GroupImpl * g = (GroupImpl *)i; + + // Clear the timeslot table + freeTimeslice(g); + } + + disconnectDeviceList(); + + auxBus->setDevicePlugged(statusConnected); + + if (statusConnected) + { + // Reset all settings for previous downstream device + configInit(); + + if (! hal->isAtLeastVersion(1, 0 ) ) + goto completed; + + DP_LOG(("DP> HPD v%d.%d", hal->getRevisionMajor(), hal->getRevisionMinor())); + + // + // Handle to clear pending CP_IRQ that throw short pulse before L-HPD. There's no + // more short pulse corresponding to CP_IRQ after HPD, but IRQ vector needs to be + // clear or block following CP_IRQ. + // + if (hal->interruptContentProtection()) + { + DP_LOG(("DP>clear pending CP interrupt at hpd")); + hal->clearInterruptContentProtection(); + } + + populateAllDpConfigs(); + + // + // Perform OUI authentication + // + if (!performIeeeOuiHandshake() && hal->isAtLeastVersion(1, 2)) + { + DP_LOG(("DP> OUI Noncompliance! Sink is DP 1.2 and is required to implement")); + } + + // Apply Oui WARs here + applyOuiWARs(); + + // Tear down old message manager + DP_ASSERT( !hal->getSupportsMultistream() || (hal->isAtLeastVersion(1, 2) && " Device supports multistream but not DP 1.2 !?!? ")); + + // Check if we should be attempting a transition between MST<->SST + if (main->hasMultistream()) + { + if (linkState == DP_TRANSPORT_MODE_INIT) + { + linkState = hal->getSupportsMultistream() ? + DP_TRANSPORT_MODE_MULTI_STREAM : + DP_TRANSPORT_MODE_SINGLE_STREAM; + linkAwaitingTransition = false; + } + else + { + if (linkUseMultistream() != hal->getSupportsMultistream()) + { + linkAwaitingTransition = true; + DP_LOG(("CONN> Link Awaiting Transition.")); + } + else + { + linkAwaitingTransition = false; + } + } + } + + // + // Only transition between multiStream and single stream when there + // are no active panels. Note: that if we're unable to transition + // we will mark all of the displays as MUST_DISCONNECT. + // + + // + // Shutdown the old message manager if there was one + // + delete discoveryManager; + isDiscoveryDetectComplete = false; + bIsDiscoveryDetectActive = true; + + pendingEdidReads.clear(); // destroy any half completed requests + delete messageManager; + messageManager = 0; + discoveryManager = 0; + + cancelHdcpCallbacks(); + if (hal->getSupportsMultistream() && main->hasMultistream()) + { + bool bDeleteFirmwareVC = false; + + DP_LOG(("DP> Multistream panel detected, building message manager")); + + // + // Rebuild the message manager to reset and half received messages + // that may be in the pipe. + // + messageManager = new MessageManager(hal, timer); + messageManager->registerReceiver(&ResStatus); + + // + // Create a discovery manager to initiate detection + // + if (AuxRetry::ack != hal->setMessagingEnable(true, true)) + { + DP_LOG(("DP> Failed to enable messaging for multistream panel")); + } + + if (AuxRetry::ack != hal->setMultistreamHotplugMode(IRQ_HPD)) + { + DP_LOG(("DP> Failed to enable hotplug mode for multistream panel")); + } + + discoveryManager = new DiscoveryManager(messageManager, this, timer, hal); + + // Check and clear if any pending message here + if (hal->clearPendingMsg()) + { + DP_LOG(("DP> Stale MSG found: set branch to D3 and back to D0...")); + if (hal->isAtLeastVersion(1, 4)) + { + hal->setMessagingEnable(false, true); + } + hal->setPowerState(PowerStateD3); + hal->setPowerState(PowerStateD0); + if (hal->isAtLeastVersion(1, 4)) + { + hal->setMessagingEnable(true, true); + } + } + pendingRemoteHdcpDetections = 0; + + // + // We need to clear payload table and payload id table during a hotplug in cases + // where DD does not send a null modeset for a device that was plugged. Otherwise + // this will lead to issues where branch does not clear the PBN and sends stale + // available PBN values. One of the scenarios is BSOD in SLI mode, where the secondary + // GPUs are not used for primary boot by VBIOS + // + bDeleteFirmwareVC = ((GroupImpl *)firmwareGroup && + !((GroupImpl *)firmwareGroup)->isHeadAttached() && + !bIsUefiSystem); + + if (bDeleteFirmwareVC || !bAttachOnResume) + { + deleteAllVirtualChannels(); + } + + assessLink(); // Link assessment may re-add a stream + // and must be done AFTER the messaging system + // is restored. + discoveryManager->notifyLongPulse(true); + } + else // SST case + { + DiscoveryManager::Device dev; + Edid tmpEdid; + bool isComplianceForEdidTest = false; + dev.address = Address(); + + + // We will report a dongle as new device with videoSink flag as false. + if (hal->getSinkCount() == 0) + { + dev.peerDevice = Dongle; + } + else + { + dev.peerDevice = DownstreamSink; + + // Handle fallback EDID + if(!EdidReadSST(tmpEdid, auxBus, timer, + hal->getPendingTestRequestEdidRead(), + main->isForceRmEdidRequired(), + main->isForceRmEdidRequired() ? main : 0)) + { + bool status = false; + // + // For some DP2VGA dongle which is unable to get the right after several retries. + // Before library, we do give 26 times retries for DP2VGA dongle EDID retries. + // Give most 24 times here for another re-start in library. + // Bug 996248. + // + if (hal->getLegacyPortCount()) + { + LegacyPort * port = hal->getLegacyPort(0); + if (port->getDownstreamPortType() == ANALOG_VGA) + { + NvU8 retries = DP_READ_EDID_MAX_RETRIES; + for (NvU8 i = 0; i < retries; i++) + { + status = EdidReadSST(tmpEdid, auxBus, timer, + hal->getPendingTestRequestEdidRead(), + main->isForceRmEdidRequired(), + main->isForceRmEdidRequired() ? main : 0); + if (status) + break; + } + } + } + if (!status) + { + // corrupt edid + DP_LOG(("DP-CONN> Corrupt Edid!")); + + // Reading the EDID can fail if AUX is dead. + // So update DPCD state after max number of retries. + hal->updateDPCDOffline(); + } + } + + DP_LOG(("DP-CONN> Edid read complete: Manuf Id: 0x%x, Name: %s", tmpEdid.getManufId(), tmpEdid.getName())); + dev.branch = false; + dev.dpcdRevisionMajor = hal->getRevisionMajor(); + dev.dpcdRevisionMinor = hal->getRevisionMinor(); + dev.legacy = false; + dev.SDPStreams = hal->getNumberOfAudioEndpoints() ? 1 : 0; + dev.SDPStreamSinks = hal->getNumberOfAudioEndpoints(); + dev.videoSink = true; + dev.maxTmdsClkRate = 0U; + + // Apply EDID based WARs and update the WAR flags if needed + applyEdidWARs(tmpEdid, dev); + + // + // HP Valor QHD+ needs 50ms delay after D3 + // to prevent black screen + // + if (tmpEdid.WARFlags.delayAfterD3) + { + bDelayAfterD3 = true; + } + + // Panels use Legacy address range for interrupt reporting + if (tmpEdid.WARFlags.useLegacyAddress) + { + hal->setSupportsESI(false); + } + + // + // For some devices short pulse comes in after we disconnect the + // link, so DPLib ignores the request and link trains after modeset + // happens. When modeset happens the link configuration picked may + // be different than what we assessed before. So we skip the link + // power down in assessLink() in such cases + // + if (tmpEdid.WARFlags.keepLinkAlive) + { + DP_LOG(("tmpEdid.WARFlags.keepLinkAlive = true, set bKeepOptLinkAlive to true. (keep link alive after assessLink())\n")); + bKeepOptLinkAlive = true; + } + // Ack the test response, no matter it is a ref sink or not + if (hal->getPendingTestRequestEdidRead()) + { + isComplianceForEdidTest = true; + hal->setTestResponseChecksum(tmpEdid.getLastPageChecksum()); + hal->setTestResponse(true, true); + } + } + + // + // If this is a zombie VRR device that was previously enabled, + // re-enable it now. This must happen before link training if + // VRR was enabled before the device became a zombie or else the + // monitor will report that it's in normal mode even if the GPU is + // driving it in VRR mode. + // + { + DeviceImpl * existingDev = findDeviceInList(dev.address); + if (existingDev && existingDev->isVrrMonitorEnabled() && + !existingDev->isVrrDriverEnabled()) + { + DP_LOG(("DP> Re-enabling previously enabled zombie VRR monitor")); + existingDev->resetVrrEnablement(); + existingDev->startVrrEnablement(); + } + } + + if ((hal->getPCONCaps())->bSourceControlModeSupported) + { + bPConConnected = true; + } + + if (bPConConnected || + (main->isEDP() && this->bSkipAssessLinkForEDP) || + (main->isInternalPanelDynamicMuxCapable())) + { + this->highestAssessedLC = getMaxLinkConfig(); + this->linkGuessed = bPConConnected; + this->bSkipAssessLinkForPCon = bPConConnected; + } + else + { + if (tmpEdid.WARFlags.powerOnBeforeLt) + { + // + // Some panels expose that they are in D0 even when they are not. + // Explicit write to DPCD 0x600 is required to wake up such panel before LT. + // + hal->setPowerState(PowerStateD0); + } + this->assessLink(); + } + + if (hal->getLegacyPortCount() != 0) + { + LegacyPort * port = hal->getLegacyPort(0); + DwnStreamPortType portType = port->getDownstreamPortType(); + dev.maxTmdsClkRate = port->getMaxTmdsClkRate(); + processNewDevice(dev, tmpEdid, false, portType, port->getDownstreamNonEDIDPortAttribute()); + } + else + { + processNewDevice(dev, tmpEdid, false, DISPLAY_PORT, RESERVED, isComplianceForEdidTest); + } + + // After processNewDevice, we should not defer any lost device. + bDeferNotifyLostDevice = false; + } + } + else // HPD unplug + { + // + // Shutdown the old message manager if there was one + // + delete discoveryManager; + isDiscoveryDetectComplete = false; + pendingEdidReads.clear(); // destroy any half completed requests + bDeferNotifyLostDevice = false; + + delete messageManager; + messageManager = 0; + discoveryManager = 0; + bAcpiInitDone = false; + bKeepOptLinkAlive = false; + bNoFallbackInPostLQA = false; + bDscCapBasedOnParent = false; + + } +completed: + previousPlugged = statusConnected; + fireEvents(); + + if (!statusConnected) + { + sink->notifyDetectComplete(); + return; + } + if (!(hal->getSupportsMultistream() && main->hasMultistream())) + { + // Ensure NewDev will be processed before notifyDetectComplete on SST + discoveryDetectComplete(); + } +} + +void ConnectorImpl::notifyShortPulse() +{ + // + // Do nothing if device is not plugged or + // resume has not been called after hibernate + // to activate the connector + // + if (!connectorActive || !previousPlugged) + { + DP_LOG(("DP> Got a short pulse after an unplug or before any connector is active!!")); + return; + } + DP_LOG(("DP> IRQ")); + hal->notifyIRQ(); + + // Handle CP_IRQ + if (hal->interruptContentProtection()) + { + // Cancel previous queued delay handling and reset retry counter. + hdcpCpIrqRxStatusRetries = 0; + timer->cancelCallback(this, &tagDelayedHDCPCPIrqHandling); + + if (handleCPIRQ()) + { + hal->clearInterruptContentProtection(); + } + else + { + timer->queueCallback(this, &tagDelayedHDCPCPIrqHandling, HDCP_CPIRQ_RXSTATUS_COOLDOWN); + } + } + + if (hal->getStreamStatusChanged()) + { + if (!messageManager) + { + DP_LOG(("DP> Received Stream status changed Interrupt, but not in multistream mode. Ignoring.")); + } + else + { + handleSSC(); + hal->clearStreamStatusChanged(); + + // + // Handling of SSC takes longer time during which time we miss IRQs. + // Populate interrupts again. + // + hal->notifyIRQ(); + } + } + + if (hal->interruptCapabilitiesChanged()) + { + DP_LOG(("DP> Sink capabilities changed, re-reading caps and reinitializing the link.")); + // We need to set dpcdOffline to re-read the caps + hal->setDPCDOffline(true); + hal->clearInterruptCapabilitiesChanged(); + notifyLongPulse(true); + return; + } + + if (detectSinkCountChange()) + { + DP_LOG(("DP> Change in downstream sink count. Re-analysing link.")); + // We need to set dpcdOffline to re-read the caps + hal->setDPCDOffline(true); + notifyLongPulse(true); + return; + } + + if (hal->interruptDownReplyReady()) + { + if (!messageManager) + { + DP_LOG(("DP> Received DownReply Interrupt, but not in multistream mode. Ignoring.")); + } + else + { + messageManager->IRQDownReply(); + } + } + + if (hal->interruptUpRequestReady()) + { + if (!messageManager) + { + DP_LOG(("DP> Received UpRequest Interrupt, but not in multistream mode. Ignoring.")); + } + else + { + messageManager->IRQUpReqest(); + } + } + + if (hal->getDownStreamPortStatusChange() && hal->getSinkCount()) + { + Edid target; + if (!EdidReadSST(target, auxBus, timer, hal->getPendingTestRequestEdidRead())) + { + DP_LOG(("DP> Failed to read EDID.")); + } + + return; + } + + if (hal->getPendingAutomatedTestRequest()) + { + if (hal->getPendingTestRequestEdidRead()) + { + Edid target; + if (EdidReadSST(target, auxBus, timer, true)) + { + hal->setTestResponseChecksum(target.getLastPageChecksum()); + hal->setTestResponse(true, true); + } + else + hal->setTestResponse(false); + } + else if (hal->getPendingTestRequestTraining()) + { + if (activeLinkConfig.multistream) + { + hal->setTestResponse(false); + } + else + { + LinkRate requestedRate; + unsigned requestedLanes; + + hal->getTestRequestTraining(requestedRate, requestedLanes); + // if one of them is illegal; don't ack. let the box try again. + if (requestedRate == 0 || requestedLanes == 0) + { + DP_ASSERT(0 && "illegal requestedRate/Lane, retry.."); + hal->setTestResponse(false); + } + else + { + // Compliance shouldn't ask us to train above its caps + if (requestedRate == 0 || requestedRate > hal->getMaxLinkRate()) + { + DP_ASSERT(0 && "illegal requestedRate"); + requestedRate = hal->getMaxLinkRate(); + } + + if (requestedLanes == 0 || requestedLanes > hal->getMaxLaneCount()) + { + DP_ASSERT(0 && "illegal requestedLanes"); + requestedLanes = hal->getMaxLaneCount(); + } + + DeviceImpl * dev = findDeviceInList(Address()); + if (!dev || !dev->plugged || dev->multistream) + { + hal->setTestResponse(false); + } + else + { + GroupImpl * groupAttached = this->getActiveGroupForSST(); + DP_ASSERT(groupAttached && groupAttached->isHeadAttached()); + + if (!dev->activeGroup || (dev->activeGroup != groupAttached)) + { + DP_ASSERT(0 && "Compliance: no group attached"); + } + + DP_LOG(("DP> Compliance: LT on IRQ request: 0x%x, %d.", requestedRate, requestedLanes)); + // now see whether the current resolution is supported on the requested link config + LinkConfiguration lc(&linkPolicy, requestedLanes, requestedRate, hal->getEnhancedFraming(), false); + + if (groupAttached && groupAttached->isHeadAttached()) + { + if (willLinkSupportModeSST(lc, groupAttached->lastModesetInfo)) + { + DP_LOG(("DP> Compliance: Executing LT on IRQ: 0x%x, %d.", requestedRate, requestedLanes)); + // we need to force the requirement irrespective of whether is supported or not. + if (!enableFlush()) + { + hal->setTestResponse(false); + } + else + { + // + // Check if linkTraining fails, perform fake linktraining. This is required because + // if we simply fail linkTraining we will not configure the head which results in + // TDRs if any modset happens after this. + // + hal->setTestResponse(true); + if (!train(lc, false)) + train(lc, true); + disableFlush(); + // Don't force/commit. Only keep the request. + setPreferredLinkConfig(lc, false, false); + } + } + else // linkconfig is not supporting bandwidth. Fallback to default edid and notify DD. + { + // override the device with fallback edid and notify a bw change to DD. + DP_LOG(("DP> Compliance: Switching to compliance fallback EDID after IMP failure.")); + dev->switchToComplianceFallback(); + + DP_LOG(("DP> Compliance: Notifying bandwidth change to DD after IMP failure.")); + // notify a bandwidth change to DD + sink->bandwidthChangeNotification(dev, true); + } + } + else + { + hal->setTestResponse(true); + DP_LOG(("DP> Compliance: Link Training when the head is not attached.")); + if (!train(lc, false)) + train(lc, true); + } + } + } + } + } + + else if (hal->getPendingTestRequestPhyCompliance()) + { + hal->setTestResponse(handlePhyPatternRequest()); + } + } + + // Handle MCCS_IRQ + if (hal->intteruptMCCS()) + { + DP_LOG(("DP> MCCS_IRQ")); + handleMCCSIRQ(); + hal->clearInterruptMCCS(); + } + + if (hal->getHdmiLinkStatusChanged()) + { + DP_LOG(("DP> HDMI Link Status Changed")); + handleHdmiLinkStatusChanged(); + } + + // + // Check to make sure sink is not in D3 low power mode + // and interlane alignment is good, etc + // if not - trigger training + // + if (!isLinkInD3() && isLinkLost()) + { + // If the link status of a VRR monitor has changed, we need to check the enablement again. + if (hal->getLinkStatusChanged()) + { + for (Device *i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *dev = (DeviceImpl *)i; + + if ((dev->plugged) && (dev->activeGroup != NULL) && (dev->isVrrMonitorEnabled())) + { + // Trigger the full enablement, if the monitor is in locked state. + NvU8 retries = VRR_MAX_RETRIES; + if (!dev->isVrrDriverEnabled()) + { + DP_LOG(("DP> VRR enablement state is not synced. Re-enable it.")); + do + { + if (!dev->startVrrEnablement()) + { + continue; + } + else + break; + }while(--retries); + + if (!retries) + { + DP_LOG(("DP> VRR enablement failed on multiple retries.")); + } + } + } + } + } + + DP_LOG(("DP> Link not alive, Try to restore link configuration")); + + if (trainSingleHeadMultipleSSTLinkNotAlive(getActiveGroupForSST())) + { + return; + } + //save the previous highest assessed LC + LinkConfiguration previousAssessedLC = highestAssessedLC; + + assessLink(); + + //If the highest assessed LC has changed, send notification + if(highestAssessedLC != previousAssessedLC) + { + DeviceImpl * dev = findDeviceInList(Address()); + if (dev) + { + sink->bandwidthChangeNotification(dev, false); + } + } + } +} + +bool ConnectorImpl::detectSinkCountChange() +{ + if (this->linkUseMultistream()) + return false; + + DeviceImpl * existingDev = findDeviceInList(Address()); + if (!existingDev) + return false; + + // detect a zero to non-zero sink count change or vice versa + bool hasSink = !!(hal->getSinkCount()); + return ((existingDev->videoSink || existingDev->audioSink) != hasSink); +} + +bool ConnectorImpl::setPreferredLinkConfig(LinkConfiguration & lc, bool commit, + bool force, LinkTrainingType trainType) +{ + bool bEnteredFlushMode; + Device *dev; + + dev = enumDevices(0); + DeviceImpl * nativeDev = (DeviceImpl *)dev; + if (preferredLinkConfig.lanes || preferredLinkConfig.peakRate || preferredLinkConfig.minRate) + DP_ASSERT(0 && "Missing reset call for a preveious set preferred call"); + + if (lc.bEnableFEC && + ((nativeDev && !nativeDev->isFECSupported()) || (!this->isFECSupported()))) + { + DP_ASSERT(0 && "Client requested to enable FEC but either panel or GPU doesn't support FEC"); + return false; + } + + if (!validateLinkConfiguration(lc)) + { + DP_LOG(("Client requested bad LinkConfiguration.")); + return false; + } + + preferredLinkConfig = lc; + preferredLinkConfig.enhancedFraming = hal->getEnhancedFraming(); + preferredLinkConfig.multistream = this->linkUseMultistream(); + preferredLinkConfig.policy = this->linkPolicy; + if (force) + { + // Do flushmode + if (!(bEnteredFlushMode = this->enableFlush())) + DP_ASSERT(0 && "Flush fails"); + if (this->train(preferredLinkConfig, false)) + activeLinkConfig = preferredLinkConfig; + if (bEnteredFlushMode) + this->disableFlush(true); + } + else + { + if (commit) + { + assessLink(trainType); + } + } + return true; +} + +bool ConnectorImpl::resetPreferredLinkConfig(bool force) +{ + preferredLinkConfig = LinkConfiguration(); + if (force) + assessLink(); + return true; +} + +bool ConnectorImpl::isAcpiInitDone() +{ + return (hal->getSupportsMultistream() ? false : bAcpiInitDone); +} + +void ConnectorImpl::notifyAcpiInitDone() +{ + Edid ddcReadEdid; + + // Initiate the EDID Read mechanism only if it is in SST mode & plugged + if (!hal->getSupportsMultistream() && previousPlugged) + { + // Read EDID using RM Control call - NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 + if (EdidReadSST(ddcReadEdid, auxBus, timer, false, true, main)) + { + // Fill the data in device's ddcEdid & mark ACPI Init done + for (Device * i = enumDevices(0); i; i=enumDevices(i)) + { + DP_LOG(("DPCONN> ACPI Init Done. DDC EDID Read completed!!")); + + DeviceImpl * dev = (DeviceImpl*)i; + dev->ddcEdid = ddcReadEdid; + + this->bAcpiInitDone = true; + break; + } + } + } + + return; +} + +bool ConnectorImpl::getHDCPAbortCodesDP12(NvU32 &hdcpAbortCodesDP12) +{ + hdcpAbortCodesDP12 = 0; + + return false; +} + +bool ConnectorImpl::hdcpValidateKsv(const NvU8 *ksv, NvU32 Size) +{ + + if (HDCP_KSV_SIZE <= Size) + { + NvU32 i, j; + NvU32 count_ones = 0; + for (i=0; i < HDCP_KSV_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (ksv[i] & (1 <<(j))) + { + count_ones++; + } + } + } + + if (count_ones == 20) + { + return true; + } + } + return false; +} + +void ConnectorImpl::cancelHdcpCallbacks() +{ + this->isHDCPReAuthPending = false; + this->isHDCPAuthTriggered = false; + this->authRetries = 0; + + timer->cancelCallback(this, &tagHDCPReauthentication); // Cancel any queue the auth callback. + timer->cancelCallback(this, &tagDelayedHdcpCapRead); // Cancel any HDCP cap callbacks. + + + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + group->cancelHdcpCallbacks(); + } +} + +// Create a new Group +Group * ConnectorImpl::newGroup() +{ + Group * g = new GroupImpl(this); + if (g) + { + inactiveGroups.insertBack((GroupImpl*)g); + } + return g; +} + +// Create a new Group +Group * ConnectorImpl::createFirmwareGroup() +{ + Group * g = new GroupImpl(this, true); + if (g) + { + inactiveGroups.insertBack((GroupImpl*)g); + } + return g; +} + +// Shutdown and the destroy the connector manager +void ConnectorImpl::destroy() +{ + delete this; +} + +void ConnectorImpl::createFakeMuxDevice(const NvU8 *buffer, NvU32 bufferSize) +{ + if (!buffer) + return; + + // Return immediately if DSC is not supported + if(FLD_TEST_DRF(_DPCD14, _DSC_SUPPORT, _DSC_SUPPORT, _YES, buffer[0]) != 1) + return; + + DeviceImpl * existingDev = findDeviceInList(Address()); + + // Return immediately if we already have a device + if (existingDev) + { + return; + } + + DeviceImpl *newDev = new DeviceImpl(hal, this, NULL); + if (!newDev) + { + return; + } + + newDev->connectorType = connectorDisplayPort; + newDev->plugged = true; + newDev->videoSink = true; + newDev->bIsFakedMuxDevice = true; + newDev->bIsPreviouslyFakedMuxDevice = false; + + // Initialize DSC state + newDev->dscCaps.bDSCSupported = true; + newDev->parseDscCaps(buffer, bufferSize); + dpMemCopy(newDev->rawDscCaps, buffer, DP_MIN(bufferSize, 16)); + newDev->bDSCPossible = true; + newDev->devDoingDscDecompression = newDev; + + populateAllDpConfigs(); + deviceList.insertBack(newDev); + sink->newDevice(newDev); + sink->notifyDetectComplete(); +} + +void ConnectorImpl::deleteFakeMuxDevice() +{ + DeviceImpl * existingDev = findDeviceInList(Address()); + if (!existingDev) + return; + + // If this is not a fake device then don't delete it + if (!existingDev->isPreviouslyFakedMuxDevice()) + return; + + existingDev->markDeviceForDeletion(); + notifyLongPulse(false); + + return; +} + +bool ConnectorImpl::getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) +{ + DeviceImpl * existingDev = findDeviceInList(Address()); + if (!existingDev) + return false; + + return existingDev->getRawDscCaps(buffer, bufferSize); +} + +bool ConnectorImpl::isMultiStreamCapable() +{ + return main->hasMultistream(); +} + +bool ConnectorImpl::isFlushSupported() +{ + return true; +} + +bool ConnectorImpl::isStreamCloningEnabled() +{ + return main->isStreamCloningEnabled(); +} + +bool ConnectorImpl::isFECSupported() +{ + return main->isFECSupported(); +} + +bool ConnectorImpl::isFECCapable() +{ + DeviceImpl *dev; + + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + dev = (DeviceImpl *)i; + // If it's SST, or if it's the first connected branch. + if (!this->linkUseMultistream() || dev->address.size() == 1) + { + return (dev->getFECSupport() && this->isFECSupported()); + } + } + return false; +} + +NvU32 ConnectorImpl::maxLinkRateSupported() +{ + return main->maxLinkRateSupported(); +} + +Connector * DisplayPort::createConnector +( + MainLink * main, + AuxBus * aux, + Timer * timer, + Connector::EventSink * sink +) +{ + ConnectorImpl *connector = new ConnectorImpl(main, aux, timer, sink); + + if (connector == NULL || connector->constructorFailed) { + delete connector; + return NULL; + } + + if (main->getRegkeyValue(NV_DP_REGKEY_ENABLE_OCA_LOGGING)) + { + main->retrieveRingBuffer(LOG_CALL, MAX_RECORD_COUNT); + main->retrieveRingBuffer(ASSERT_HIT, MAX_RECORD_COUNT); + } + + return connector; +} + +void ConnectorImpl::setAllowMultiStreaming(bool bAllowMST) +{ + // + // hal->getMultiStreamCapOverride() returns true, if MST cap has been + // overridden to SST. + // + if (!hal->getMultiStreamCapOverride() == bAllowMST) + return; + + if (previousPlugged && + getSinkMultiStreamCap() && + !activeGroups.isEmpty() && linkUseMultistream() != bAllowMST) + { + DP_ASSERT(!"If connected sink is MST capable then:" + "Client should detach all active MST video/audio streams " + "before disallowing MST, vise-versa client should detach " + "active SST stream before allowing MST."); + } + + // + // Disable MST messaging, if client has disallowed MST; + // notifyLongPulseInternal() enable back MST messaging when client + // allow MST. + // + if (previousPlugged && linkUseMultistream() && !bAllowMST) + hal->setMessagingEnable( + false /* _uprequestEnable */, true /* _upstreamIsSource */); + + hal->overrideMultiStreamCap(bAllowMST /* mstCapable */ ); + + // Re-detect already connected sink, and to keep software state in sync + if (previousPlugged && getSinkMultiStreamCap()) + { + isHDCPAuthOn = isDP12AuthCap = false; + notifyLongPulseInternal(true); + } +} + +bool ConnectorImpl::getAllowMultiStreaming(void) +{ + // + // hal->getMultiStreamCapOverride() returns true, if MST cap has been + // overridden to SST. + // + return !hal->getMultiStreamCapOverride(); +} + +bool ConnectorImpl::getSinkMultiStreamCap(void) +{ + return hal->getDpcdMultiStreamCap(); +} + +void ConnectorImpl::setDp11ProtocolForced() +{ + if (!this->linkUseMultistream()) + { + return; + } + + this->notifyLongPulse(false); + hal->setMessagingEnable(false, true); + hal->setMultistreamLink(false); + hal->overrideMultiStreamCap(false /*no mst*/); + this->notifyLongPulse(true); +} + +void ConnectorImpl::resetDp11ProtocolForced() +{ + if (this->linkUseMultistream()) + { + return; + } + + this->notifyLongPulse(false); + hal->overrideMultiStreamCap(true /*mst capable*/); + this->notifyLongPulse(true); +} + +bool ConnectorImpl::isDp11ProtocolForced() +{ + return hal->getMultiStreamCapOverride(); +} + +bool ConnectorImpl::getTestPattern(NV0073_CTRL_DP_TESTPATTERN * testPattern) +{ + return (main->getDpTestPattern(testPattern)); +} + +bool ConnectorImpl::setTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, NvBool bIsHBR2, NvBool bSkipLaneDataOverride) +{ + return (main->setDpTestPattern(testPattern, laneMask, cstm, bIsHBR2, bSkipLaneDataOverride)); +} + +bool ConnectorImpl::getLaneConfig(NvU32 *numLanes, NvU32 *data) +{ + return (main->getDpLaneData(numLanes, data)); +} + +bool ConnectorImpl::setLaneConfig(NvU32 numLanes, NvU32 *data) +{ + return (main->setDpLaneData(numLanes, data)); +} + +void ConnectorImpl::getCurrentLinkConfig(unsigned & laneCount, NvU64 & linkRate) +{ + main->getLinkConfig(laneCount, linkRate); +} + +unsigned ConnectorImpl::getPanelDataClockMultiplier() +{ + LinkConfiguration linkConfig = getMaxLinkConfig(); + return getDataClockMultiplier(linkConfig.peakRatePossible, linkConfig.lanes); +} + +unsigned ConnectorImpl::getGpuDataClockMultiplier() +{ + unsigned laneCount; + NvU64 linkRate; + // Need to get the GPU caps, not monitor caps. + linkRate = maxLinkRateSupported(); + + laneCount = laneCount_4; + + return getDataClockMultiplier(linkRate, laneCount); +} + +void ConnectorImpl::configurePowerState(bool bPowerUp) +{ + main->configurePowerState(bPowerUp); +} + +bool ConnectorImpl::readPsrState(vesaPsrState *psrState) +{ + return hal->readPsrState(psrState); +} + +void ConnectorImpl::readPsrCapabilities(vesaPsrSinkCaps *caps) +{ + hal->readPsrCapabilities(caps); +} + +bool ConnectorImpl::readPsrConfiguration(vesaPsrConfig *psrConfig) +{ + return hal->readPsrConfiguration(psrConfig); +} + +bool ConnectorImpl::updatePsrConfiguration(vesaPsrConfig config) +{ + return hal->updatePsrConfiguration(config); +} + +bool ConnectorImpl::readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) +{ + return hal->readPsrDebugInfo(psrDbgState); +} + +bool ConnectorImpl::writePsrErrorStatus(vesaPsrErrorStatus psrErr) +{ + return hal->writePsrErrorStatus(psrErr); +} + +bool ConnectorImpl::readPsrErrorStatus(vesaPsrErrorStatus *psrErr) +{ + return hal->readPsrErrorStatus(psrErr); +} + +bool ConnectorImpl::writePsrEvtIndicator(vesaPsrEventIndicator psrEvt) +{ + return hal->writePsrEvtIndicator(psrEvt); +} + +bool ConnectorImpl::readPsrEvtIndicator(vesaPsrEventIndicator *psrEvt) +{ + return hal->readPsrEvtIndicator(psrEvt); +} + +bool ConnectorImpl::updatePsrLinkState(bool bTrainLink) +{ + bool bRet = true; + if (bTrainLink) + { + // Bug 3438892 If the panel is turned off the reciever on its side, + // force panel link on by writting 600 = 1 + if (this->isLinkLost()) + { + hal->setPowerState(PowerStateD0); + return false; + } + + // Check if Link config is valid + if (!this->psrLinkConfig.isValid()) + { + return false; + } + // Restore Link config/do Link Train + bRet = setPreferredLinkConfig(this->psrLinkConfig, false, true, NORMAL_LINK_TRAINING); + } + else + { + // Save the link config + this->psrLinkConfig = getActiveLinkConfig(); + } + return bRet; +} + +bool ConnectorImpl::handlePhyPatternRequest() +{ + + bool status = true; + PatternInfo pattern_info; + + pattern_info.lqsPattern = hal->getPhyTestPattern(); + + // Get lane count from most current link training + unsigned requestedLanes = this->activeLinkConfig.lanes; + + if (pattern_info.lqsPattern == LINK_QUAL_80BIT_CUST) + { + hal->getCustomTestPattern((NvU8 *)&pattern_info.ctsmLower); + } + + // send control call to rm for the pattern + if (!main->physicalLayerSetTestPattern(&pattern_info)) + { + DP_ASSERT(0 && "Could not set the PHY_TEST_PATTERN"); + status = false; + } + else + { + if (AuxRetry::ack != hal->setLinkQualPatternSet(pattern_info.lqsPattern, requestedLanes)) + { + DP_ASSERT(0 && "Could not set the LINK_QUAL_PATTERN"); + status = false; + } + } + return status; +} + +// +// This function is used to send dp test message. +// requestSize indicates the buffer size pointed by pBuffer +// +DP_TESTMESSAGE_STATUS ConnectorImpl::sendDPTestMessage +( + void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus +) +{ + if (messageManager) + { + testMessage.setupTestMessage(messageManager, this); + return testMessage.sendDPTestMessage(pBuffer, requestSize, pDpStatus); + } + else + { + return DP_TESTMESSAGE_STATUS_ERROR; + } +} + +// +// This function is designed for user to call twcie. The first time with NULL of +// pStreamIDs to get the number of streams. +// The second time, user would call the function with allocated buffer. +// +DP_TESTMESSAGE_STATUS ConnectorImpl::getStreamIDs(NvU32 *pStreamIDs, NvU32 *pCount) +{ + DP_TESTMESSAGE_STATUS ret; + + NvU32 streamCnt = activeGroups.size(); + if (NULL == pStreamIDs) + { + ret = DP_TESTMESSAGE_STATUS_SUCCESS; + } + else if (*pCount >= streamCnt) + { + NvU32 n = 0; + for (ListElement * i = activeGroups.begin(); i != activeGroups.end(); i = i->next) + { + GroupImpl * group = (GroupImpl *)i; + pStreamIDs[n++] = group->streamIndex; + } + ret = DP_TESTMESSAGE_STATUS_SUCCESS; + } + else + { + //buffer size not enough, the return value will be mapped and returned to nvapi + ret = DP_TESTMESSAGE_STATUS_ERROR_INSUFFICIENT_INPUT_BUFFER; + } + + *pCount = streamCnt; + + return ret; +} + +void ConnectorImpl::notifyGPUCapabilityChange() +{ + // Query current GPU capabilities. + main->queryGPUCapability(); + +} + +void ConnectorImpl::notifyHBR2WAREngage() +{ + bool peakBwChanged = false; + LinkConfiguration preLc = getMaxLinkConfig(); + // Update GPU capabilities + this->notifyGPUCapabilityChange(); + LinkConfiguration postLc = getMaxLinkConfig(); + + peakBwChanged = (preLc.peakRatePossible != postLc.peakRatePossible); + + if (this->previousPlugged && peakBwChanged) + { + // Set caps change status to make sure device becomes zombie + this->bMitigateZombie = true; + + if (this->policyModesetOrderMitigation) + { + this->modesetOrderMitigation = true; + } + // NEED TO CHECK. MAY GO AFTER LONGPULSE TRUE ???? + // If multistream, delete the MST slots allocation in Branch device + if (this->linkUseMultistream()) + this->deleteAllVirtualChannels(); + + // Disconnect the device + this->notifyLongPulse(false); + + // Connect the device again + this->notifyLongPulse(true); + } + +} + +bool ConnectorImpl::isLinkAwaitingTransition() +{ + return this->linkAwaitingTransition; +} + +void ConnectorImpl::configInit() +{ + // Reset branch specific flags + bKeepOptLinkAlive = 0; + bNoFallbackInPostLQA = 0; + LT2FecLatencyMs = 0; + bDscCapBasedOnParent = false; +} + diff --git a/src/common/displayport/src/dp_crc.cpp b/src/common/displayport/src/dp_crc.cpp new file mode 100644 index 000000000..32e26f5d8 --- /dev/null +++ b/src/common/displayport/src/dp_crc.cpp @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort*********************************\ +* * +* Module: dp_crc.cpp * +* CRC Algorithms for the messaging subsystem. * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_crc.h" +using namespace DisplayPort; + +// +// DP CRC for transactions headers +// +unsigned DisplayPort::dpCalculateHeaderCRC(BitStreamReader * reader) +{ + unsigned remainder = 0; + unsigned bit, i; + + while (reader->read(&bit, 1)) + { + remainder <<= 1; + remainder |= bit; + if ((remainder & 0x10) == 0x10) + { + remainder ^= 0x13; + } + } + + for (i = 4; i != 0; i--) + { + remainder <<= 1; + if ((remainder & 0x10) != 0) + { + remainder ^= 0x13; + } + } + + return remainder & 0xF; +} + +// +// DP CRC for body +// +unsigned DisplayPort::dpCalculateBodyCRC(BitStreamReader * reader) +{ + unsigned remainder = 0; + unsigned bit, i; + + while (reader->read(&bit, 1)) + { + remainder <<= 1; + remainder |= bit; + if ((remainder & 0x100) == 0x100) + { + remainder ^= 0xD5; + } + } + + for (i = 8; i != 0; i--) + { + remainder <<= 1; + if ((remainder & 0x100) != 0) + { + remainder ^= 0xD5; + } + } + + return remainder & 0xFF; +} diff --git a/src/common/displayport/src/dp_deviceimpl.cpp b/src/common/displayport/src/dp_deviceimpl.cpp new file mode 100644 index 000000000..21c97c637 --- /dev/null +++ b/src/common/displayport/src/dp_deviceimpl.cpp @@ -0,0 +1,2552 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_deviceimpl.cpp * +* DP device implementation * +* * +\***************************************************************************/ + +#include "dp_connectorimpl.h" +#include "dp_deviceimpl.h" +#include "dp_auxdefs.h" +#include "dp_groupimpl.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +using namespace DisplayPort; + +bool DeviceImpl::isMustDisconnect() +{ + // + // Device is must disconnect if we're trying to make an SST<->MST transition + // + if ((this->isActive()) && connector->linkAwaitingTransition) + { + return true; + } + + return false; +} + +DeviceImpl::~DeviceImpl() +{ + if (isDeviceHDCPDetectionAlive && deviceHDCPDetection) + { + delete deviceHDCPDetection; + deviceHDCPDetection = nullptr; + } + + if (vrrEnablement) + { + delete vrrEnablement; + vrrEnablement = NULL; + } + + // Unlink this node from its children + for (unsigned int i = 0; i < sizeof(children)/sizeof(*children); i++) + if (children[i]) + children[i]->parent = 0; + + // Unlink this node from its parent when it's there + if (parent && (parent->children[this->address.tail()] == this)) + parent->children[this->address.tail()] = 0; + + devDoingDscDecompression = NULL; +} + + +DeviceImpl::DeviceImpl(DPCDHAL * hal, ConnectorImpl * connector, DeviceImpl * parent) + : parent(parent), + hal(hal), + activeGroup(0), + connector(connector), + address(), + plugged(false), + friendlyAux(this), + isHDCPCap(False), + isDeviceHDCPDetectionAlive(false), + deviceHDCPDetection(0), + vrrEnablement(0), + bIsFakedMuxDevice(false), + bIsPreviouslyFakedMuxDevice(false), + bisMarkedForDeletion(false), + bSdpExtCapable(Indeterminate) +{ + bandwidth.enum_path.dataValid = false; + shadow.plugged = false; + shadow.zombie = false; + shadow.cableOk = true; + shadow.hdcpCapDone = false; + shadow.highestAssessedLC = connector->highestAssessedLC; + dpMemZero(rawDscCaps, sizeof(rawDscCaps)); +} + +bool DeviceImpl::isZombie() +{ + // You can't be a zombie if nothing is attached + if (!(this->isActive())) + return false; + + if (!plugged) + return true; + + if (isMustDisconnect()) + return true; + + if (!isMultistream()) + { + if (connector->bMitigateZombie) + return true; + + return !connector->willLinkSupportModeSST(connector->highestAssessedLC, + ((GroupImpl*)activeGroup)->lastModesetInfo); + } + else + { + return !this->payloadAllocated; + } +} + +bool DeviceImpl::isCableOk() +{ + if (hal->isDpcdOffline()) + { + // Just say that the cable is ok since we do not have anything connected + return true; + } + else + { + return ! (connector->highestAssessedLC.peakRate < connector->getMaxLinkConfig().peakRate && + connector->highestAssessedLC.lanes < connector->getMaxLinkConfig().lanes); + } +} + +bool DeviceImpl::isLogical() +{ + if (this->address.size() == 0) + return false; + + DP_ASSERT((this->address.tail() <= LOGICAL_PORT_END) && "Invalid port number"); + + // Logical port numbers of a branching unit are from Port 0x08 up to Port 0xF + if (this->address.tail() >= LOGICAL_PORT_START) + return true; + + return false; +} + +bool DeviceImpl::isPendingNewDevice() +{ + if (shadow.plugged == plugged) + return false; + + if (!plugged) + return false; + + // Delay the newDevice event till all enabled heads are not detached. + if (connector->policyModesetOrderMitigation && connector->modesetOrderMitigation) + return false; + + return !connector->linkAwaitingTransition; +} + +bool DeviceImpl::isPendingLostDevice() +{ + // marked for lazy exit..to be done now. + if (complianceDeviceEdidReadTest && lazyExitNow) + return true; + + if (isZombie()) + return false; + + if (shadow.plugged == plugged) + return false; + + return !plugged; +} + +bool DeviceImpl::isPendingZombie() +{ + if (isZombie() && !shadow.zombie) + return true; + else if (!isZombie() && shadow.zombie && plugged) + return (connector->policyModesetOrderMitigation ? false : true); + return false; +} + +bool DeviceImpl::isPendingHDCPCapDone() +{ + if ((isHDCPCap != Indeterminate) && !shadow.hdcpCapDone) + return true; + else + return false; +} + +bool DeviceImpl::isPendingCableOk() +{ + return isCableOk() != shadow.cableOk; +} + +bool DeviceImpl::isPendingBandwidthChange() +{ + return shadow.highestAssessedLC != connector->highestAssessedLC; +} + +bool DeviceImpl::getI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot) +{ + unsigned dataCompleted, sizeRemaining; + DisplayPort::AuxBus::status status; + Type transactionType; + + if (!buffer || !sizeCompleted) + return false; + + dataCompleted = 0; + *sizeCompleted = 0; + do + { + sizeRemaining = (sizeRequested - *sizeCompleted); + if ((this->address.size() < 2) && (sizeRemaining > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE)) + { + + // + // SST case + // if the transaction buffer is a multiple of 16 bytes (NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE). + // Break it to 16 bytes boundary (HW default) and the first transaction sets the middle of + // transaction bit (MOT). This will mark all the subsequent reads are all of a part of the + // same transaction (I2C restart). + // + status = transaction(AuxBus::read, AuxBus::i2cMot, offset, buffer + *sizeCompleted, + NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE, &dataCompleted); + } + else if (sizeRemaining > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE) + { + + // + // MST case + // For i2c transactions over MST devices, if the transaction buffer is divided into + // 16 bytes chunks, then read index keeps getting reset for subsequent 16B fetch. + // Refer Bug: 1233042. + // + status = transaction(AuxBus::read, AuxBus::i2cMot, offset, buffer + *sizeCompleted, + sizeRemaining, &dataCompleted); + } + else + { + // + // clear the MOT if it is a single transaction or the last bytes of + // a large, multiple of 16 bytes buffer (end of transaction). + // Note that for some customer specific needs they might force MOT bit + // when it shouldn't be set. So check if client forced the MOT bit and honour that. + // + transactionType = bForceMot ? AuxBus::i2cMot : AuxBus::i2c; + status = transaction(AuxBus::read, transactionType, offset, buffer + *sizeCompleted, + sizeRemaining, &dataCompleted); + } + + if (status != AuxBus::success) + { + DP_LOG(("DPDEV> %s: Failed read transaction", __FUNCTION__)); + break; + } + + if (dataCompleted == 0) + { + // Successfully read 0 bytes? Break out + break; + } + *sizeCompleted += dataCompleted; + } + while (*sizeCompleted < sizeRequested); + + return (status == AuxBus::success); +} + +bool DeviceImpl::setI2cData(unsigned offset, NvU8 * buffer, unsigned sizeRequested, unsigned * sizeCompleted, bool bForceMot) +{ + unsigned dataCompleted, sizeRemaining; + DisplayPort::AuxBus::status status; + Type transactionType; + + if (!buffer || !sizeCompleted) + return false; + + dataCompleted = 0; + *sizeCompleted = 0; + + // + // If the hop count is one, we're asking for DPCD to the root node. + // If hop count is zero, this is a DP 1.1 target. + // Hop Count Greater than or equal 2 is when we have a single or multiple branch + // device/s. This signifies REMOTE_I2C_WRITE transaction case. + // Here we should not divide the data to 16 byte boundary as if we + // do, the branch device will not know that it needs to set MOT=1. + // So we send the entire data up to a max payload of 255 Bytes. + // Please refer Bug 1964453 for more information. + // + if ((this->address.size() >= 2) && + (sizeRequested > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE)) + { + status = transaction(AuxBus::write, AuxBus::i2cMot, offset, buffer, + sizeRequested, &dataCompleted); + + if (status != AuxBus::success) + { + DP_LOG(("DPDEV> %s: Failed write transaction", __FUNCTION__)); + return false; + } + *sizeCompleted = dataCompleted; + DP_ASSERT(*sizeCompleted >= sizeRequested); + return (status == AuxBus::success); + } + + do + { + sizeRemaining = (sizeRequested - *sizeCompleted); + if (sizeRemaining > NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE) + { + + // + // if the transaction buffer is a multiple of 16 bytes (NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE). + // Break it to 16 bytes boundary (HW default) and the first transaction sets the middle of + // transaction bit (MOT). This will mark all the subsequent writes are all of a part of the + // same transaction (I2C restart). + // + status = transaction(AuxBus::write, AuxBus::i2cMot, offset, buffer + *sizeCompleted, + NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE, &dataCompleted); + } + else + { + // + // clear the MOT if it is a single transaction or the last bytes of + // a large, multiple of 16 bytes buffer (end of transaction). + // Note that for some customer specific needs they might force MOT bit + // when it shouldn't be set. So check if client forced the MOT bit and honour that. + // + transactionType = bForceMot ? AuxBus::i2cMot : AuxBus::i2c; + status = transaction(AuxBus::write, transactionType, offset, buffer + *sizeCompleted, + sizeRemaining, &dataCompleted); + } + + if (status != AuxBus::success) + { + DP_LOG(("DPDEV> %s: Failed write transaction", __FUNCTION__)); + break; + } + + if (dataCompleted == 0) + { + // Successfully read 0 bytes? Break out + break; + } + *sizeCompleted += dataCompleted; + } while (*sizeCompleted < sizeRequested); + + return (status == AuxBus::success); +} + +AuxBus::status DeviceImpl::getDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason) +{ + if (!buffer || !sizeCompleted) + { + // default param may be NULL + if (pNakReason) *pNakReason = NakUndefined; + return AuxBus::nack; + } + + // + // Remote DPCD doesn't work for Peer Device 4 i.e. DP-to-Legacy Dongle. + // But if a virtual DP peer device with Protocol Converter functionality + // populates the DPCD_Revision field of the LINK_ADDRESS Message reply + // then allow DPCD transaction + // + if ((this->peerDevice == Dongle) && (this->dpcdRevisionMajor == 0)) + { + if (pNakReason) *pNakReason = NakBadParam; + return AuxBus::nack; + } + + return (transaction(AuxBus::read, AuxBus::native, offset, buffer, + sizeRequested, sizeCompleted, pNakReason)); +} + +AuxBus::status DeviceImpl::setDpcdData(unsigned offset, NvU8 * buffer, + unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason) +{ + if (!buffer || !sizeCompleted) + { + // default param may be NULL + if (pNakReason) *pNakReason = NakUndefined; + return AuxBus::nack; + } + + // + // Remote DPCD doesn't work for Peer Device 4 i.e. DP-to-Legacy Dongle + // But if a virtual DP peer device with Protocol Converter functionality + // populates the DPCD_Revision field of the LINK_ADDRESS Message reply + // then allow DPCD transaction + // + if ((this->peerDevice == Dongle) && (this->dpcdRevisionMajor == 0)) + { + if (pNakReason) *pNakReason = NakBadParam; + return AuxBus::nack; + } + + return (transaction(AuxBus::write, AuxBus::native, offset, buffer, + sizeRequested, sizeCompleted, pNakReason)); +} + +AuxBus::status DeviceImpl::queryFecData(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) +{ + if (!fecStatus || !fecErrorCount) + { + return AuxBus::nack; + } + + return (fecTransaction(fecStatus, fecErrorCount, flags)); +} + +DscCaps DeviceImpl::getDscCaps() +{ + return dscCaps; +} + +// +// This function returns the device itself or its parent device that is doing +// DSC decompression for it. +// +Device* DeviceImpl::getDevDoingDscDecompression() +{ + return devDoingDscDecompression; +} + +bool DeviceImpl::getRawDscCaps(NvU8 *buffer, NvU32 bufferSize) +{ + if (bufferSize < sizeof(rawDscCaps)) + return false; + + dpMemCopy(buffer, &rawDscCaps, sizeof(rawDscCaps)); + return true; +} + +AuxBus::status DeviceImpl::transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned * pNakReason, + NvU8 offset, NvU8 nWriteTransactions) +{ + // In case of default implementation, the reason for transaction failure + // must be stored somewhere + unsigned defaultReason; + if (!pNakReason) pNakReason = &defaultReason; + // default failure reason is undefined + *pNakReason = NakUndefined; + + if (type == AuxBus::i2c || type == AuxBus::i2cMot) + { + address >>= 1; // right shifted DDC Address (request identifier in spec) + } + + // If the hop count is one, we're asking for DPCD to the root node. + // If hop count is zero, this is a DP 1.1 target. + if (this->address.size() >= 2) + { + NakData nak; + + if (connector == NULL || connector->messageManager == NULL) + { + return AuxBus::nack; + } + + if (action == AuxBus::read && type == AuxBus::native) + { + RemoteDpcdReadMessage read; + read.set(this->address.parent(), this->address.tail(), address, sizeRequested); + if (!connector->messageManager->send(&read, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakDefer) + return AuxBus::defer; + if (nak.reason == NakDpcdFail) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = read.replyNumOfBytesReadDPCD(); + + if (*sizeCompleted > sizeRequested) { + DP_LOG(("DPDEV> DPCD Read return more data than requested. Clamping buffer to requested size!")); + *sizeCompleted = sizeRequested; + } + + dpMemCopy(buffer, read.replyGetData(), *sizeCompleted); + + return AuxBus::success; + } + else if ((action == AuxBus::read) && ((type == AuxBus::i2c) || (type == AuxBus::i2cMot))) + { + bool isNoStopBit = (type == AuxBus::i2cMot) ? 1:0; + RemoteI2cReadMessage remoteI2cRead; + I2cWriteTransaction i2cWriteTransactions[1]; + i2cWriteTransactions[0] = I2cWriteTransaction(address, + 0, + &offset, + isNoStopBit); + + if (nWriteTransactions > 1) + { + DP_LOG(("DPDEV> Set function will fail for transactions > 1, please incraease the array size!")); + return AuxBus::nack; + } + + remoteI2cRead.set(this->address.parent(), // topology Address + nWriteTransactions, // number of write transactions + this->address.tail(), // port of Device + i2cWriteTransactions, // list of write transactions + address, // right shifted DDC Address (request identifier in spec) + sizeRequested); // requested size + + if (!connector->messageManager->send(&remoteI2cRead, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakI2cNak) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = remoteI2cRead.replyNumOfBytesReadI2C(); + + if (*sizeCompleted > sizeRequested) { + DP_LOG(("DPDEV> I2C Read return more data than requested. Clamping buffer to requested size!")); + *sizeCompleted = sizeRequested; + } + + dpMemCopy(buffer, remoteI2cRead.replyGetI2CData(sizeCompleted), *sizeCompleted); + + return AuxBus::success; + } + else if (action == AuxBus::write && type == AuxBus::native) + { + RemoteDpcdWriteMessage write; + write.set(this->address.parent(), this->address.tail(), address, sizeRequested, buffer); + + if (!connector->messageManager->send(&write, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakDefer) + return AuxBus::defer; + if (nak.reason == NakDpcdFail) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = sizeRequested; + + return AuxBus::success; + } + else if ((action == AuxBus::write) && ((type == AuxBus::i2c) || (type == AuxBus::i2cMot))) + { + RemoteI2cWriteMessage remoteI2cWrite; + + remoteI2cWrite.set(this->address.parent(), // topology Address + this->address.tail(), // port of Device + address, // right shifted DDC Address (request identifier in spec) + sizeRequested, + buffer); + + if (!connector->messageManager->send(&remoteI2cWrite, nak)) { + // Copy reason back to caller + *pNakReason = nak.reason; + // Translate the DPCD error codes + if (nak.reason == NakI2cNak) + return AuxBus::nack; + + // This isn't quite right. We're translating unknown messaging related + // failure cases into defers. This is done so that the client will retry the operation + return AuxBus::defer; + } + + *sizeCompleted = sizeRequested; + + return AuxBus::success; + } + else + { + DP_ASSERT(0 && "Only aux native and i2c reads and writes supported"); + return AuxBus::nack; + } + } + else + { + return this->connector->auxBus->transaction(action, type, address, buffer, + sizeRequested, sizeCompleted, pNakReason); + } +} + +unsigned DeviceImpl::transactionSize() +{ + // + // Remote (DP 1.2) sinks can read much larger chunks at once due to messaging. + // + if (this->address.size() >= 2) + return 255; + else + return this->connector->auxBus->transactionSize(); +} + +static AuxBus::status _QueryFecStatus +( + DeviceImpl *bus, + NvU8 *pStatus +) +{ + AuxBus::status status = AuxBus::success; + + NvU32 addr = NV_DPCD14_FEC_STATUS; + unsigned size = 1; + + unsigned sizeCompleted = 0; + unsigned pNakReason = 0; + + status = bus->getDpcdData(addr, pStatus, size, &sizeCompleted, &pNakReason); + + if (status != AuxBus::success) + { + DP_LOG(("DP> Error querying FEC status!")); + return AuxBus::nack; + } + return AuxBus::success; +} + +static AuxBus::status _QueryFecErrorCount +( + DeviceImpl *bus, + NvU16 *pErrorCount +) +{ + AuxBus::status status = AuxBus::success; + NvU32 addr = NV_DPCD14_FEC_ERROR_COUNT; + unsigned size = 2; + + unsigned sizeCompleted = 0; + NvU8 cnt[2] = {0, 0}; + unsigned pNakReason = 0; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &pNakReason); + + if (status != AuxBus::success) + { + DP_LOG(("DP> Error querying FEC error count!")); + return AuxBus::nack; + } + else + { + *pErrorCount = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + } + return AuxBus::success; +} + +static AuxBus::status _WriteFecConfiguration +( + DeviceImpl *bus, + NvU8 configuration +) +{ + AuxBus::status status = AuxBus::success; + + NvU32 addr = NV_DPCD14_FEC_CONFIGURATION; + unsigned size = 1; + + unsigned sizeCompleted = 0; + unsigned pNakReason = 0; + + status = bus->setDpcdData(addr, &configuration, size, &sizeCompleted, &pNakReason); + + if (status != AuxBus::success) + { + DP_LOG(("DP> Error setting FEC configuration!")); + return AuxBus::nack; + } + return AuxBus::success; +} + +AuxBus::status DeviceImpl::fecTransaction(NvU8 *fecStatus, NvU16 **fecErrorCount, NvU32 flags) +{ + AuxBus::status status; + // the capability needs to be checked first (bits 5:0 and 7 need to be set) + NvU8 data, lane, counter, laneData, offset; + if (!bFECSupported) + { + DP_LOG(("DP> FEC capability not correct!")); + return nack; + } + + if (!bFECUncorrectedSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _UNCORRECTED, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_LOG(("DP> FEC capability not correct!")); + return success; + } + } + if (!bFECCorrectedSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _CORRECTED, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_LOG(("DP> FEC capability not correct!")); + return success; + } + } + if (!bFECBitSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _BIT, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_LOG(("DP> FEC capability not correct!")); + return success; + } + } + if (!bFECParityBlockSupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _PARITY_BLOCK, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_LOG(("DP> FEC capability not correct!")); + return success; + } + } + if (!bFECParitySupported) + { + // check if this counter is specified in the flags + if (FLD_TEST_DRF(_DP, _PARITY_BIT, _ERROR, _YES, flags)) + { + for (int i = 0; i < NV_DP_MAX_NUM_OF_LANES; i++) + { + for (int j = 0; j < NV_DP_ERROR_COUNTERS_PER_LANE; j++) + { + // specific error value for error + fecErrorCount[i][j] = NV_DP_FEC_ERROR_COUNT_INVALID; + } + } + DP_LOG(("DP> FEC capability not correct!")); + return success; + } + } + + status = _QueryFecStatus(this, fecStatus); + if(status != AuxBus::success) + { + return status; + } + // setting configuration for querying error counters for every lane + for (lane = NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_0; lane < connector->activeLinkConfig.lanes; lane++) + { + // keeping FEC ready bit + laneData = DRF_DEF(_DPCD14, _FEC_CONFIGURATION, _FEC_READY, _YES); + // selecting specific lane + laneData |= DRF_NUM(_DPCD14, _FEC_CONFIGURATION, _LANE_SELECT, lane); + // setting configuration for querying all the error counters for a specific lane + for (counter = NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_UNCORRECTED_BLOCK_ERROR_COUNT; + counter <= NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_PARITY_BIT_ERROR_COUNT; counter++) + { + // address function for the current register (in the matrix registers start from 0 and in the bit mask from 1) + offset = counter - 1; + // if flag for corresponding register is not set skip querying + if ((flags & NVBIT(offset)) == 0) continue; + // selecting specific counter + data = laneData | DRF_NUM(_DPCD14, _FEC_CONFIGURATION, _FEC_ERROR_COUNT_SEL, counter) ; + status = _WriteFecConfiguration(this, data); + if (status != AuxBus::success) + { + return status; + } + // reading specific error counter register based on address function + status = _QueryFecErrorCount(this, fecErrorCount[lane] + offset); + if (status != AuxBus::success) + { + return status; + } + } + } + return AuxBus::success; +} + +// Apply DPCD overrides if required +void DeviceImpl::dpcdOverrides() +{ + if (this->parent) + { + // + // Device is behind a branch. SW can't perform overrides as branch will + // handle link training the device not source. Also hal can only override + // capability of sink, not the individual device behind the branch. + // + return; + } + if (processedEdid.WARFlags.overrideMaxLaneCount) + { + hal->overrideMaxLaneCount(processedEdid.WARData.maxLaneCount); + } + if (processedEdid.WARFlags.skipCableBWCheck) + { + hal->skipCableBWCheck(processedEdid.WARData.maxLaneAtHighRate, + processedEdid.WARData.maxLaneAtLowRate); + } + if (processedEdid.WARFlags.overrideOptimalLinkCfg) + { + LinkRate optimalLinkRate = 0; + + switch(processedEdid.WARData.optimalLinkRate) + { + case 0x6: + optimalLinkRate = RBR; + break; + case 0xa: + optimalLinkRate = HBR; + break; + case 0x14: + optimalLinkRate = HBR2; + break; + case 0x1E: + optimalLinkRate = HBR3; + break; + default: + optimalLinkRate = RBR; + DP_LOG(("DP-DEV> Invalid link rate supplied. Falling back to RBR")); + break; + } + hal->overrideOptimalLinkCfg(optimalLinkRate, processedEdid.WARData.optimalLaneCount); + } +} + +void DeviceImpl::applyOUIOverrides() +{ + // For now we only need this for Synaptic branch. + if ((this->peerDevice == DownstreamBranch) || + (this->peerDevice == UpstreamSourceOrSSTBranch)) + { + NvU8 buffer[16] = {0}; + unsigned size = 13; // Read 0x500 ~ 0x50C + unsigned sizeCompleted = 0; + unsigned nakReason = NakUndefined; + + // + // Synaptic branch claims it supports MSA override, but some older firmware has problems + // on their decoder. We need to disable the feature in that case. + // + if (AuxBus::success != this->getDpcdData(NV_DPCD_BRANCH_IEEE_OUI, &buffer[0], + size, &sizeCompleted, &nakReason)) + return; + + // Check Branch IEEE_OUI (0x500h~0x502h) is Synaptic IEEE_OUI (0x90, 0xCC, 0x24) + if ((buffer[0] == 0x90) && (buffer[1] == 0xCC) && (buffer[2] == 0x24)) + { + // Check if Device Identification String (0x503~0x506) is "SYNA" + if ((buffer[3] == 0x53) && (buffer[4] == 0x59) && (buffer[5] == 0x4E) && (buffer[6] == 0x41)) + { + // For Synaptic VMM5331 and VMM5320, it only support MSA-Over-MST for DP after Firmware 5.4.5 + if (buffer[7] == 0x53 && + (buffer[8] == 0x31 || buffer[8] == 0x20)) + { + this->bSdpExtCapable = False; + + // + // Check firmware version + // 0x50A: FW/SW Major Revision. + // 0x50B: FW/SW Minor Revision. + // 0x50C: Build Number. + // + if ((buffer[10] >= 0x06) || + ((buffer[10] == 0x05) && (buffer[11] >= 0x05)) || + ((buffer[10] == 0x05) && (buffer[11] == 0x04) && (buffer[12] >= 0x05))) + { + this->bSdpExtCapable = True; + } + } + } + } + + } +} + +bool DeviceImpl::getSDPExtnForColorimetrySupported() +{ + DeviceImpl *targetDevice = NULL; + DeviceImpl *parentDevice = NULL; + + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + // + // On fakeed mux devices, we cannot check if the device has + // the capability as we don't have access to aux. + // + if (this->isFakedMuxDevice()) + { + return false; + } + + // If the capability is queried/set already. + if (this->bSdpExtCapable != Indeterminate) + { + return (this->bSdpExtCapable == True); + } + + if (!this->isMultistream()) + { + // If the device is directly connected to the source read the DPCD directly + this->bSdpExtCapable = hal->getSDPExtnForColorimetry() ? True : False; + return (this->bSdpExtCapable == True); + } + + // For MST devices + switch (this->peerDevice) + { + case DownstreamBranch: + case UpstreamSourceOrSSTBranch: + { + targetDevice = this; + break; + } + case DownstreamSink: + { + // + // When the device is type of DownstreamSink and with branch(es) + // between GPU and it, query goes to the device and its parent + // + targetDevice = this; + parentDevice = (DeviceImpl *)this->getParent(); + break; + } + case Dongle: + { + // + // Bug 2527026: When the device is type of dongle and with branch(es) + // between GPU and it, query goes to its parent. + // + targetDevice = (DeviceImpl *)this->getParent(); + break; + } + default: + { + DP_ASSERT(0 && "Unsupported Peer Type for SDP_EXT COLORIMETRY"); + return false; + break; + } + } + + // Send remote DPCD for devices behind the branch + if ((AuxBus::success == targetDevice->getDpcdData(NV_DPCD_TRAINING_AUX_RD_INTERVAL, + &byte, sizeof byte, &size, &nakReason)) && + (FLD_TEST_DRF(_DPCD14, _TRAINING_AUX_RD_INTERVAL, _EXTENDED_RX_CAP, _YES, byte))) + { + byte = 0; + size = 0; + nakReason = NakUndefined; + + if (AuxBus::success == targetDevice->getDpcdData(NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST, + &byte, sizeof byte, &size, &nakReason)) + { + this->bSdpExtCapable = FLD_TEST_DRF(_DPCD14, + _EXTENDED_DPRX_FEATURE_ENUM_LIST, + _VSC_SDP_EXT_FOR_COLORIMETRY, + _YES, byte) ? True : False; + } + } + this->applyOUIOverrides(); + if (parentDevice && (this->bSdpExtCapable == True)) + { + // + // Do not override bSdpExtCapable for the sink. Although result won't + // change but we can keep the value for debug purpose. + // + return parentDevice->getSDPExtnForColorimetrySupported(); + } + + return (this->bSdpExtCapable == True); +} + +bool DeviceImpl::isPowerSuspended() +{ + bool bPanelPowerOn, bDPCDPowerStateD0; + if (connector->main->isEDP()) + { + connector->main->getEdpPowerData(&bPanelPowerOn, &bDPCDPowerStateD0); + return !bDPCDPowerStateD0; + } + return (connector->hal->getPowerState() == PowerStateD3); +} + +void DeviceImpl::setPanelPowerParams(bool bSinkPowerStateD0, bool bPanelPowerStateOn) +{ + bool bPanelPowerOn, bDPCDPowerStateD0; + GroupImpl * pGroupAttached = connector->getActiveGroupForSST(); + + // + // For single head dual SST mode, set the panel power params for the + // secondary connector while updating the primary connector. + // + if (pGroupAttached && + connector->pCoupledConnector && + (pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) && + (pGroupAttached->singleHeadMultiStreamID == DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY)) + { + return; + } + + if (connector->main->isEDP()) + { + connector->main->getEdpPowerData(&bPanelPowerOn, &bDPCDPowerStateD0); + } + else + { + bDPCDPowerStateD0 = (connector->hal->getPowerState() == PowerStateD0)? + true : false; + } + + // Going to Suspend (D3) + if (!bSinkPowerStateD0) + { + if (this->bypassDpcdPowerOff()) + { + DP_LOG(("DP-DEV> Bypassing 600h write for this display")); + return; + } + + if (connector->main->isEDP()) + { + /* + * If it's an eDP panel, the setPowerState call below will turn on LCD_POWER + * if it's already off. So only call the function when panel power is on + * and DPCD_SET_POWER is set to _D0. + */ + if (bPanelPowerOn && bDPCDPowerStateD0) + { + // monitor to be put to sleep + if (connector->hal->setPowerState(PowerStateD3)) + shadow.highestAssessedLC = connector->highestAssessedLC; + } + } + else + { + + if (connector->pCoupledConnector) + { + // Put secondary connctor to sleep + connector->pCoupledConnector->hal->setPowerState(PowerStateD3); + } + + // monitor to be put to sleep + if (connector->hal->setPowerState(PowerStateD3)) + { + shadow.highestAssessedLC = connector->highestAssessedLC; + } + } + // + // If bPanelPowerStateOn is false and this + // is not a multistream device, then shut down the main link. Some eDP + // panels are known to need this in order to actually shut down. + // + if (!isMultistream() && !bPanelPowerStateOn) + { + if (connector->pCoupledConnector) + { + // configure power state on secondary + connector->pCoupledConnector->main->configurePowerState(false); + } + connector->main->configurePowerState(false); + } + } + else + { + if (connector->main->isEDP() && !bPanelPowerOn) + { + // Turn on the eDP panel if required. + connector->main->configurePowerState(true); + } + // monitor to be brought out of sleep + if (connector->hal->setPowerState(PowerStateD0)) + { + if (connector->pCoupledConnector) + { + // power up main link on secondary + connector->pCoupledConnector->hal->setPowerState(PowerStateD0); + } + + // Mark linkStatus as dirty as we need to read linkStatus again since we are resuming a power state D0, link might have lost. + connector->hal->setDirtyLinkStatus(true); + if (connector->pCoupledConnector) + { + connector->pCoupledConnector->hal->setDirtyLinkStatus(true); + } + + if (connector->activeGroups.isEmpty()) + { + return; + } + if ((!connector->isLinkActive()) || + (connector->main->isEDP() && !bPanelPowerOn) || + (connector->isLinkLost()) || + (!bDPCDPowerStateD0)) + { + // + // If link is inactive, lost, or the panel was off before, then + // assess Link. Note that this'll detach head if required. + // + if (pGroupAttached && + pGroupAttached->singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + // Special handling for single head dual SST cases + connector->trainSingleHeadMultipleSSTLinkNotAlive(pGroupAttached); + } + else + { + connector->assessLink(); + } + } + } + else + DP_ASSERT(0 && "Could not bring the monitor back from sleep."); + } +} + +void DeviceImpl::switchToComplianceFallback() +{ + Edid fallbackEdid; + makeEdidFallback(fallbackEdid); + this->processedEdid.resetData(); + this->processedEdid = fallbackEdid; +} + +TriState DeviceImpl::hdcpAvailableHop() +{ + return this->isHDCPCap; +} + +TriState DeviceImpl::hdcpAvailable() +{ + if (isNativeDPCD()) + { + return this->hdcpAvailableHop(); + } + return False; +} + +void DeviceImpl::resetCacheInferredLink() +{ + this->bandwidth.enum_path.dataValid = false; +} + +LinkConfiguration * DeviceImpl::inferLeafLink(unsigned * totalLinkSlots) +{ + // update the EPR data + if (!bandwidth.enum_path.dataValid) + { + if (plugged) + { + NakData nack; + for (unsigned retries = 0; retries < 7; retries++) + { + EnumPathResMessage epr(getTopologyAddress().parent(), getTopologyAddress().tail(), true); + bool sendStatus = connector->messageManager->send(&epr, nack); + if (!sendStatus) + { + if (nack.reason == NakDefer || nack.reason == NakTimeout) + continue; + + bandwidth.enum_path.total = bandwidth.enum_path.free = 0; + break; + } + else + { + bandwidth.enum_path.total = epr.reply.TotalPBN; + bandwidth.enum_path.free = epr.reply.FreePBN; + bandwidth.enum_path.bPathFECCapable = epr.reply.bFECCapability; + break; + } + } + } + else + { + bandwidth.enum_path.total = bandwidth.enum_path.free = 0; + } + + bandwidth.enum_path.dataValid = true; + bandwidth.lastHopLinkConfig = LinkConfiguration(bandwidth.enum_path.total); + // Update FEC support of the device after EPR + this->getFECSupport(); + } + + if (totalLinkSlots) + { + *totalLinkSlots = bandwidth.lastHopLinkConfig.slotsForPBN(bandwidth.enum_path.total, true /*epr aware*/); + + // + // Override the totalLinkSlots returned to 63 only if peer device is + // 2 (branch), since TS-0 will be used for MTP header. + // Branch may return the total pbn corresponding to 64 timeslots. + // + if (*totalLinkSlots == 64 && peerDevice == DownstreamBranch) + { + *totalLinkSlots = 63; + } + } + + return &bandwidth.lastHopLinkConfig; +} + +bool DeviceImpl::isActive() +{ + DP_ASSERT(!activeGroup || activeGroup->isHeadAttached()); + return activeGroup != NULL; +} + +bool DeviceImpl::getRawEpr(unsigned * totalEpr, unsigned * freeEpr, rawEprState eprState) +{ + DP_ASSERT((totalEpr && freeEpr) && "Invalid arguments passed to function getRawEpr()"); + bool status = true; + *totalEpr = 0; + *freeEpr = 0; + + // If request has come for main link/Native branch device + // return main link PBNs as "0" & return + if (isNativeDPCD()) + return status; + + // Cached/Software state is queried + if (eprState == software) + { + *totalEpr = bandwidth.enum_path.total; + *freeEpr = bandwidth.enum_path.free; + + return status; + } + + // Hardware state is queried. Send a new EPR message to get the current state + EnumPathResMessage rawEpr(getTopologyAddress().parent(), getTopologyAddress().tail(), true); + NakData nack; + for (unsigned retries = 0; retries < 7; retries++) + { + bool sendStatus = connector->messageManager->send(&rawEpr, nack); + if (!sendStatus) + { + status = false; + if (nack.reason == NakDefer) + continue; + + DP_LOG(("DP-DEV> EPR message failed while getting RAW EPR")); + + break; + } + else + { + *totalEpr = rawEpr.reply.TotalPBN; + *freeEpr = rawEpr.reply.FreePBN; + status = true; + + break; + } + } + + return status; +} + +unsigned DeviceImpl::getEDIDSize() const +{ + // Return DDC EDID size only if we got a valid EDID there + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + return ddcEdid.getEdidSize(); + } + else + { + return processedEdid.getEdidSize(); + } +} + +bool DeviceImpl::getEDID(char * buffer, unsigned size) const +{ + // + // Return DDC EDID only if we got a valid EDID there + // This has priority on regular EDID read from panel + // + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + if (size < ddcEdid.getEdidSize()) + goto panelEdid; + + dpMemCopy(buffer, ddcEdid.getBuffer()->getData(), ddcEdid.getEdidSize()); + return true; + } + +panelEdid: + // No EDID read from SBIOS. Return panel EDID now. + if (size < processedEdid.getEdidSize()) + return false; + + dpMemCopy(buffer, processedEdid.getBuffer()->getData(), processedEdid.getEdidSize()); + return true; +} + +unsigned DeviceImpl::getRawEDIDSize() const +{ + // Return DDC EDID size only if we got a valid EDID there + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + return ddcEdid.getEdidSize(); + } + else + { + return rawEDID.getEdidSize(); + } +} + +bool DeviceImpl::getRawEDID(char * buffer, unsigned size) const +{ + // + // Return DDC EDID only if we got a valid EDID there + // This has priority on regular EDID read from panel + // + if (this->connector->isAcpiInitDone() && ddcEdid.isValidHeader()) + { + if (size >= ddcEdid.getEdidSize()) + { + dpMemCopy(buffer, ddcEdid.getBuffer()->getData(), ddcEdid.getEdidSize()); + return true; + } + } + + // No EDID read from SBIOS. Return panel EDID now. + if (size < rawEDID.getEdidSize()) + return false; + + dpMemCopy(buffer, rawEDID.getBuffer()->getData(), rawEDID.getEdidSize()); + return true; +} + +bool DeviceImpl::startVrrEnablement() +{ + bool ret = false; + + if (vrrEnablement) + { + ret = vrrEnablement->start(); + } + + return ret; +} + +void DeviceImpl::resetVrrEnablement() +{ + if (vrrEnablement) + { + vrrEnablement->reset(); + } +} + +bool DeviceImpl::isVrrMonitorEnabled() +{ + bool ret = false; + + if (vrrEnablement) + { + ret = vrrEnablement->isMonitorEnabled(); + } + + return ret; +} + +bool DeviceImpl::isVrrDriverEnabled() +{ + bool ret = false; + + if (vrrEnablement) + { + ret = vrrEnablement->isDriverEnabled(); + } + + return ret; +} + +NvBool DeviceImpl::getDSCSupport() +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + dscCaps.bDSCSupported = false; + + if(AuxBus::success == this->getDpcdData(NV_DPCD14_DSC_SUPPORT, + &byte, sizeof(byte), &size, &nakReason)) + { + if (FLD_TEST_DRF(_DPCD14, _DSC_SUPPORT, _DSC_SUPPORT, _YES, byte)) + { + dscCaps.bDSCSupported = true; + } + + if (FLD_TEST_DRF(_DPCD20, _DSC_SUPPORT, _PASS_THROUGH_SUPPORT, _YES, byte)) + { + dscCaps.bDSCPassThroughSupported = true; + } + } + + return dscCaps.bDSCSupported; +} + +bool DeviceImpl::isPanelReplaySupported() +{ + return prCaps.panelReplaySupported; +} + +void DeviceImpl::getPanelReplayCaps() +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + if (AuxBus::success == this->getDpcdData(NV_DPCD20_PANEL_REPLAY_CAPABILITY, + &byte, sizeof(byte), &size, &nakReason)) + { + prCaps.panelReplaySupported = + FLD_TEST_DRF(_DPCD20_PANEL, _REPLAY_CAPABILITY, _SUPPORTED, _YES, byte); + } +} + +bool DeviceImpl::setPanelReplayConfig(panelReplayConfig prcfg) +{ + NvU8 config = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + if (prcfg.enablePanelReplay) + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _ENABLE_PR_MODE, _YES, config); + } + else + { + config = FLD_SET_DRF(_DPCD20_PANEL, _REPLAY_CONFIGURATION, + _ENABLE_PR_MODE, _NO, config); + } + + if (AuxBus::success == this->setDpcdData(NV_DPCD20_PANEL_REPLAY_CONFIGURATION, + &config, sizeof(config), &size, &nakReason)) + { + return true; + } + + return false; +} + +bool DeviceImpl::getFECSupport() +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + + if(this->address.size() > 1) + { + bFECSupported = this->bandwidth.enum_path.bPathFECCapable; + } + + else if (AuxBus::success == this->getDpcdData(NV_DPCD14_FEC_CAPABILITY, + &byte, sizeof(byte), &size, &nakReason)) + { + bFECSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _FEC_CAPABLE, _YES, byte); + bFECUncorrectedSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE, _YES, byte); + bFECCorrectedSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _CORRECTED_BLOCK_ERROR_COUNT_CAPABLE, _YES, byte); + bFECBitSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _BIT_ERROR_COUNT_CAPABLE, _YES, byte); + bFECParityBlockSupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _PARITY_BLOCK_ERROR_COUNT_CAPABLE, _YES, byte); + bFECParitySupported = FLD_TEST_DRF(_DPCD14, _FEC_CAPABILITY, _PARITY_ERROR_COUNT_CAPABLE, _YES, byte); + } + + return bFECSupported; +} + +NvBool DeviceImpl::isDSCSupported() +{ + return dscCaps.bDSCSupported; +} + +NvBool DeviceImpl::isDSCPassThroughSupported() +{ + return dscCaps.bDSCPassThroughSupported; +} + +NvBool DeviceImpl::isDSCPossible() +{ + return this->bDSCPossible; +} + +bool DeviceImpl::isFECSupported() +{ + return bFECSupported; +} + +bool DeviceImpl::parseDscCaps(const NvU8 *buffer, NvU32 bufferSize) +{ + + if (bufferSize < 16) + { + DP_LOG((" DSC caps buffer must be greater than or equal to 16")); + return false; + } + + dscCaps.versionMajor = DRF_VAL(_DPCD14, _DSC_ALGORITHM_REVISION, _MAJOR, buffer[0x1]); + dscCaps.versionMinor = DRF_VAL(_DPCD14, _DSC_ALGORITHM_REVISION, _MINOR, buffer[0x1]); + + dscCaps.rcBufferBlockSize = DRF_VAL(_DPCD14, _DSC_RC_BUFFER_BLOCK, _SIZE, buffer[0x2]); + + dscCaps.rcBuffersize = DRF_VAL(_DPCD14, _DSC_RC_BUFFER, _SIZE, buffer[0x3]); + + dscCaps.sliceCountSupportedMask = (((buffer[0xD]) << 8) | buffer[0x4]); + if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_2, _SLICES_PER_SINK_24, _YES, buffer[0xD])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_24; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_2, _SLICES_PER_SINK_20, _YES, buffer[0xD])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_20; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_2, _SLICES_PER_SINK_16, _YES, buffer[0xD])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_16; + + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_12, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_12; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_10, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_10; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_8, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_8; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_6, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_6; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_4, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_4; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_2, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_2; + else if(FLD_TEST_DRF(_DPCD14, _DSC_SLICE_CAPABILITIES_1, _SLICES_PER_SINK_1, _YES, buffer[0x4])) + dscCaps.maxSlicesPerSink = DSC_SLICES_PER_SINK_1; + + if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _8, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 8; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _9, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 9; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _10, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 10; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _11, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 11; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _12, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 12; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _13, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 13; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _14, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 14; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _15, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 15; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_LINE_BUFFER, _BIT_DEPTH, _16, buffer[0x5])) + { + dscCaps.lineBufferBitDepth = 16; + } + + if(FLD_TEST_DRF(_DPCD14, _DSC_BLOCK_PREDICTION, _SUPPORT, _YES, buffer[0x6])) + dscCaps.bDscBlockPredictionSupport = true; + + unsigned maxBitsPerPixelLSB = DRF_VAL(_DPCD14, _DSC_MAXIMUM_BITS_PER_PIXEL_1, _LSB, buffer[0x7]); + unsigned maxBitsPerPixelMSB = DRF_VAL(_DPCD14, _DSC_MAXIMUM_BITS_PER_PIXEL_2, _MSB, buffer[0x8]); + + dscCaps.maxBitsPerPixelX16 = (maxBitsPerPixelMSB << 8) | maxBitsPerPixelLSB; + + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _RGB, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bRgb = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_444, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCr444 = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_SIMPLE_422, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422 = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_NATIVE_422, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422 = true; + if(FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_FORMAT_CAPABILITIES, _YCbCr_NATIVE_420, _YES, buffer[0x9])) + dscCaps.dscDecoderColorFormatCaps.bYCbCrNative420 = true; + + if (FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_DEPTH_CAPABILITIES, _12_BITS_PER_COLOR, _YES, buffer[0xa])) + dscCaps.dscDecoderColorDepthMask |= DSC_BITS_PER_COLOR_MASK_12; + if (FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_DEPTH_CAPABILITIES, _10_BITS_PER_COLOR, _YES, buffer[0xa])) + dscCaps.dscDecoderColorDepthMask |= DSC_BITS_PER_COLOR_MASK_10; + if (FLD_TEST_DRF(_DPCD14, _DSC_DECODER_COLOR_DEPTH_CAPABILITIES, _8_BITS_PER_COLOR, _YES, buffer[0xa])) + dscCaps.dscDecoderColorDepthMask |= DSC_BITS_PER_COLOR_MASK_8; + + dscCaps.dscPeakThroughputMode0 = DRF_VAL(_DPCD14, _DSC_PEAK_THROUGHPUT, _MODE0, buffer[0xb]); + dscCaps.dscPeakThroughputMode1 = DRF_VAL(_DPCD14, _DSC_PEAK_THROUGHPUT, _MODE1, buffer[0xb]); + + unsigned numOfPixels = DRF_VAL(_DPCD14, _DSC_MAXIMUM_SLICE_WIDTH, _MAX, buffer[0xc]); + dscCaps.dscMaxSliceWidth = numOfPixels * 320; + + if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_16, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_16; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_8, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_8; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_4, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_4; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1_2, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1_2; + } + else if (FLD_TEST_DRF(_DPCD14, _DSC_BITS_PER_PIXEL_INCREMENT, _SUPPORTED, _1, buffer[0xf])) + { + dscCaps.dscBitsPerPixelIncrement = BITS_PER_PIXEL_PRECISION_1; + } + + return true; +} + +bool DeviceImpl::readAndParseDSCCaps() +{ + // Allocate a buffer of 16 bytes to read DSC caps + + unsigned sizeCompleted = 0; + unsigned nakReason = NakUndefined; + + if(AuxBus::success != this->getDpcdData(NV_DPCD14_DSC_SUPPORT, + &rawDscCaps[0], sizeof(rawDscCaps), &sizeCompleted, &nakReason)) + { + return false; + } + + return parseDscCaps(&rawDscCaps[0], sizeof(rawDscCaps)); +} + +bool DeviceImpl::getDscEnable(bool *pEnable) +{ + AuxBus::status status = AuxBus::success; + unsigned sizeCompleted = 0; + unsigned pNakReason = 0; + NvU8 byte = 0; + + if (!pEnable || + !this->isDSCPossible() || + !this->devDoingDscDecompression || + !this->devDoingDscDecompression->plugged) + { + return false; + } + + status = this->devDoingDscDecompression->getDpcdData(NV_DPCD14_DSC_ENABLE, + &byte, + sizeof byte, + &sizeCompleted, + &pNakReason); + + if (status != AuxBus::success) + { + DP_LOG(("DP> Error querying DSC Enable State!")); + return false; + } + + *pEnable = FLD_TEST_DRF(_DPCD14, _DSC_ENABLE, _SINK, _YES, byte); + return true; +} + +bool DeviceImpl::setDscEnable(bool enable) +{ + NvU8 byte = 0; + unsigned size = 0; + unsigned nakReason = NakUndefined; + bool bCurrDscEnable = false; + bool bDscPassThrough = false; + Address::StringBuffer buffer; + DP_USED(buffer); + + if (!this->isDSCPossible() || !this->devDoingDscDecompression || + !this->devDoingDscDecompression->plugged) + { + return false; + } + + if ((this->devDoingDscDecompression == this) && this->parent != NULL && this->connector->bDscMstEnablePassThrough) + { + // + // If the device has a parent, that means the sink is on a MST link and + // and on a MST link if DSC is possible on the path and devDoingDscDecompression + // is the sink itself, then the parent should be DSC Pass through capable.. + // + bDscPassThrough = true; + } + else + { + // + // Get Current DSC Enable State + // Ideally we don't need to check the current state but Synaptics DSC device, + // which was used for inital DSC code developement did not follow spec and so + // we have added this code. Overwriting the same value should not have any + // impact as per the spec. Will remove this check once all DSC devices follow spec. + // + if (!getDscEnable(&bCurrDscEnable)) + { + DP_LOG(("DP> Not able to get DSC Enable State!")); + return false; + } + } + + if(enable) + { + if(bDscPassThrough) + { + byte = FLD_SET_DRF(_DPCD20, _DSC_PASS_THROUGH, _ENABLE, _YES, byte); + DP_LOG(("DP-DEV> Enabling DSC Pass through on branch device - %s", + this->parent->getTopologyAddress().toString(buffer))); + } + else + { + if (!bCurrDscEnable) + { + byte = FLD_SET_DRF(_DPCD14, _DSC_ENABLE, _SINK, _YES, byte); + DP_LOG(("DP-DEV> Enabling DSC decompression on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer))); + } + else + { + DP_LOG(("DP-DEV> DSC decompression is already enabled on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer))); + return true; + } + } + } + else + { + if(bDscPassThrough) + { + byte = FLD_SET_DRF(_DPCD20, _DSC_PASS_THROUGH, _ENABLE, _NO, byte); + DP_LOG(("DP-DEV> Disabling DSC Pass through on branch device - %s", + this->parent->getTopologyAddress().toString(buffer))); + } + else + { + if (bCurrDscEnable) + { + byte = FLD_SET_DRF(_DPCD14, _DSC_ENABLE, _SINK, _NO, byte); + DP_LOG(("DP-DEV> Disabling DSC decompression on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer))); + } + else + { + DP_LOG(("DP-DEV> DSC decompression is already disabled on device - %s", + this->devDoingDscDecompression->getTopologyAddress().toString(buffer))); + return true; + } + } + } + + if (bDscPassThrough) + { + // + // When sink is DSC decompression capable and parent is DSC pass through capable + // source needs to only enable DSC pass through on the parent branch and parent + // branch will take care of enabling DSC decompression on the sink. + // + return (!this->parent->setDpcdData(NV_DPCD20_DSC_PASS_THROUGH, + &byte, sizeof byte, &size, &nakReason)); + } + else + { + return (!this->devDoingDscDecompression->setDpcdData(NV_DPCD14_DSC_ENABLE, + &byte, sizeof byte, &size, &nakReason)); + } +} + +unsigned DeviceImpl::getDscVersionMajor() +{ + return dscCaps.versionMajor; +} + +unsigned DeviceImpl::getDscVersionMinor() +{ + return dscCaps.versionMinor; +} + +unsigned DeviceImpl::getDscRcBufferSize() +{ + return dscCaps.rcBuffersize; +} + +unsigned DeviceImpl::getDscRcBufferBlockSize() +{ + return dscCaps.rcBufferBlockSize; +} + +unsigned DeviceImpl::getDscMaxSlicesPerSink() +{ + return dscCaps.maxSlicesPerSink; +} + +unsigned DeviceImpl::getDscLineBufferBitDepth() +{ + return dscCaps.lineBufferBitDepth; +} + +NvBool DeviceImpl::isDscBlockPredictionSupported() +{ + return dscCaps.bDscBlockPredictionSupport; +} + +unsigned DeviceImpl::getDscMaxBitsPerPixel() +{ + return dscCaps.maxBitsPerPixelX16; +} + +NvBool DeviceImpl::isDscRgbSupported() +{ + return dscCaps.dscDecoderColorFormatCaps.bRgb; +} + +NvBool DeviceImpl::isDscYCbCr444Supported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCr444; +} + +NvBool DeviceImpl::isDscYCbCrSimple422Supported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCrSimple422; +} + +NvBool DeviceImpl::isDscYCbCr422NativeSupported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCrNative422; +} + +NvBool DeviceImpl::isDscYCbCr420NativeSupported() +{ + return dscCaps.dscDecoderColorFormatCaps.bYCbCrNative420; +} + +unsigned DeviceImpl::getDscPeakThroughputMode0() +{ + return dscCaps.dscPeakThroughputMode0; +} + +unsigned DeviceImpl::getDscPeakThroughputModel() +{ + return dscCaps.dscPeakThroughputMode1; +} + +unsigned DeviceImpl::getDscMaxSliceWidth() +{ + return dscCaps.dscMaxSliceWidth; +} + +unsigned DeviceImpl::getDscDecoderColorDepthSupportMask() +{ + return dscCaps.dscDecoderColorDepthMask; +} + +bool DeviceImpl::isFakedMuxDevice() +{ + return connector->main->isDynamicMuxCapable() && bIsFakedMuxDevice; +} + +bool DeviceImpl::isPreviouslyFakedMuxDevice() +{ + return connector->main->isDynamicMuxCapable() && bIsPreviouslyFakedMuxDevice; +} + +static AuxBus::status _QueryCrcSink +( + DeviceImpl *bus, + NvU16 *sinkCrc0, + NvU16 *sinkCrc1, + NvU16 *sinkCrc2 +) +{ + AuxBus::status status = AuxBus::success; + // no sink op needs to be done if registers are NULL + if (sinkCrc0 == NULL) return status; + NvU32 addr = NV_DPCD14_DSC_CRC_0; + unsigned size = 2; + NvU8 cnt[2] = {0, 0}; + + unsigned sizeCompleted = 0; + unsigned nakReason = 0; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &nakReason); + + if (status != AuxBus::success) + { + return status; + } + *sinkCrc0 = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + + addr = NV_DPCD14_DSC_CRC_1; + size = 2; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &nakReason); + + if (status != AuxBus::success) + { + return status; + } + *sinkCrc1 = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + + addr = NV_DPCD14_DSC_CRC_2; + size = 2; + + status = bus->getDpcdData(addr, &cnt[0], size, &sizeCompleted, &nakReason); + + if (status != AuxBus::success) + { + return status; + } + *sinkCrc2 = (((NvU16) cnt[1]) << (sizeof(NvU8) * 8)) | cnt[0]; + return status; +} + +AuxBus::status DeviceImpl::dscCrcControl(NvBool bEnable, gpuDscCrc *gpuData, sinkDscCrc *sinkData) +{ + // GPU part + if (this->connector->main->dscCrcTransaction(bEnable, gpuData, (NvU16*) &(activeGroup->headIndex)) != true) + { + return AuxBus::nack; + } + + // sink part + if (!sinkData) + { + return AuxBus::success; + } + return _QueryCrcSink(this, &(sinkData->sinkCrc0), &(sinkData->sinkCrc1), &(sinkData->sinkCrc2)); +} + +bool DeviceImpl::getPCONCaps(PCONCaps *pPCONCaps) +{ + AuxBus::status status = AuxBus::success; + NvU32 addr = NV_DPCD_DETAILED_CAP_INFO_ONE(0); + NvU8 data = 0; + unsigned size = 1; + unsigned sizeCompleted = 0; + unsigned nakReason = 0; + + if (isMultistream()) + return false; + + status = getDpcdData(addr, &data, size, &sizeCompleted, &nakReason); + if (status != AuxBus::success) + { + return false; + } + pPCONCaps->maxTmdsClkRate = data; + + addr = NV_DPCD_DETAILED_CAP_INFO_TWO(0); + status = getDpcdData(addr, &data, size, &sizeCompleted, &nakReason); + if (status != AuxBus::success) + { + return false; + } + + pPCONCaps->bSourceControlModeSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _SRC_CONTROL_MODE_SUPPORT, _YES, data); + pPCONCaps->bConcurrentLTSupported = + FLD_TEST_DRF(_DPCD, _DETAILED_CAP_INFO, _CONCURRENT_LT_SUPPORT, _YES, data); + pPCONCaps->maxHdmiLinkBandwidthGbps = + DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_FRL_LINK_BW_SUPPORT, data); + + switch (DRF_VAL(_DPCD, _DETAILED_CAP_INFO, _MAX_BITS_PER_COMPONENT_DEF, data)) + { + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_10BPC: + pPCONCaps->maxBpc = 10; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_12BPC: + pPCONCaps->maxBpc = 12; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_16BPC: + pPCONCaps->maxBpc = 16; + break; + case NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_8BPC: + default: + pPCONCaps->maxBpc = 8; + break; + } + return true; +} + + +void +DeviceHDCPDetection::start() +{ + if (parent->isNativeDPCD()) + { + if (!parent->isMultistream()) + { + goto NativeDPCDHDCPCAPRead; + } + else + { + parent->isHDCPCap = False; + waivePendingHDCPCapDoneNotification(); + return; + } + +NativeDPCDHDCPCAPRead: + + BCaps bCaps = {0}; + + parent->hal->getBCaps(bCaps, parent->BCAPS); + *(parent->nvBCaps) = *(parent->BCAPS); + + if (bCaps.HDCPCapable) + { + NvU8 tempBKSV[HDCP_KSV_SIZE] = {0}; + if (parent->hal->getBKSV(tempBKSV)) + { + if (hdcpValidateKsv(tempBKSV, HDCP_KSV_SIZE)) + { + for (unsigned i=0; iBKSV[i] = tempBKSV[i]; + } + } + parent->isHDCPCap = True; + waivePendingHDCPCapDoneNotification(); + return; + } + else + { + unsigned char hdcp22BCAPS[HDCP22_BCAPS_SIZE]; + + // Check if hdcp2.x only device and probe hdcp22Bcaps. + parent->hal->getHdcp22BCaps(bCaps, hdcp22BCAPS); + if (bCaps.HDCPCapable) + { + parent->nvBCaps[0] = FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, + _HDCP_CAPABLE, bCaps.HDCPCapable, + parent->nvBCaps[0]) | + FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_REPEATER, + bCaps.repeater, parent->nvBCaps[0]); + + // + // No need to validate 1.x bksv here and hdcp22 authentication would + // validate certificate with bksv in uproc. + // + parent->isHDCPCap = True; + waivePendingHDCPCapDoneNotification(); + return; + } + } + + parent->isHDCPCap = False; + waivePendingHDCPCapDoneNotification(); + } + else + { + parent->isHDCPCap = False; + waivePendingHDCPCapDoneNotification(); + } +} + +void +DeviceHDCPDetection::messageCompleted +( + MessageManager::Message *from +) +{ + if ((from == &remoteBKSVReadMessage) || + (from == &remoteBCapsReadMessage) || + (from == &remote22BCapsReadMessage)) + { + handleRemoteDpcdReadDownReply(from); + } +} + +void +DeviceHDCPDetection::handleRemoteDpcdReadDownReply +( + MessageManager::Message *from +) +{ + NvU8 i2cBcaps; + unsigned dataCompleted; + unsigned defaultReason; + Address::StringBuffer sb; + DP_USED(sb); + + if (from == &remoteBKSVReadMessage) + { + bksvReadCompleted = true; + bBKSVReadMessagePending = false; + DP_LOG(("DP-QM> REMOTE_DPCD_READ(BKSV) {%p} at '%s' completed", + (MessageManager::Message *)&remoteBKSVReadMessage, + parent->address.toString(sb))); + + if (remoteBKSVReadMessage.replyNumOfBytesReadDPCD() != HDCP_KSV_SIZE) + { + DP_ASSERT(0 && "Incomplete BKSV in remote DPCD read message"); + parent->isHDCPCap = False; + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + DP_ASSERT(remoteBKSVReadMessage.replyPortNumber() == parent->address.tail()); + if (hdcpValidateKsv(remoteBKSVReadMessage.replyGetData(), HDCP_KSV_SIZE)) + { + isValidBKSV = true; + for (unsigned i=0; iBKSV[i] = (remoteBKSVReadMessage.replyGetData())[i]; + + DP_LOG(("DP-QM> Device at '%s' is with valid BKSV.", + parent->address.toString(sb))); + } + } + else if (from == &remoteBCapsReadMessage) + { + bCapsReadCompleted = true; + bBCapsReadMessagePending = false; + DP_LOG(("DP-QM> REMOTE_DPCD_READ(BCaps) {%p} at '%s' completed", + (MessageManager::Message *)&remoteBCapsReadMessage, + parent->address.toString(sb))); + + if (remoteBCapsReadMessage.replyNumOfBytesReadDPCD() != HDCP_BCAPS_SIZE) + { + DP_ASSERT(0 && "Incomplete BCaps in remote DPCD read message"); + parent->isHDCPCap = False; + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + DP_ASSERT(remoteBCapsReadMessage.replyPortNumber() == parent->address.tail()); + if (!!(*remoteBCapsReadMessage.replyGetData() & 0x1)) + { + *(parent->nvBCaps) = *(parent->BCAPS) = *remoteBCapsReadMessage.replyGetData(); + isBCapsHDCP = true; + + DP_LOG(("DP-QM> Device at '%s' is with valid BCAPS : %x", + parent->address.toString(sb), *remoteBCapsReadMessage.replyGetData())); + } + else + { + if (isValidBKSV) + { + DP_LOG(("DP-QM> Device at '%s' is with valid BKSV but Invalid BCAPS : %x", + parent->address.toString(sb), *remoteBCapsReadMessage.replyGetData())); + + // Read the BCAPS DDC offset + parent->transaction(AuxBus::read, AuxBus::i2cMot, HDCP_I2C_CLIENT_ADDR, &i2cBcaps, + 1, &dataCompleted, &defaultReason, HDCP_BCAPS_DDC_OFFSET, 1); + + DP_LOG(("DP-QM> Device at '%s' is with DDC BACPS: %x", + parent->address.toString(sb), i2cBcaps)); + + // If the Reserved Bit is SET, Device supports HDCP + if (i2cBcaps & HDCP_BCAPS_DDC_EN_BIT) + { + isBCapsHDCP = true; + // Set the HDCP cap BCAPS according to DP protocol + *(parent->BCAPS) |= HDCP_BCAPS_DP_EN_BIT; + *(parent->nvBCaps) = *(parent->BCAPS); + } + } + else + { + DP_LOG(("DP-QM> Device at '%s' is without valid BKSV and BCAPS, thus try 22BCAPS")); + + Address parentAddress = parent->address.parent(); + remote22BCapsReadMessage.setMessagePriority(NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT); + remote22BCapsReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP22_BCAPS_OFFSET, HDCP22_BCAPS_SIZE); + bCapsReadCompleted = false; + bBCapsReadMessagePending = true; + messageManager->post(&remote22BCapsReadMessage, this); + } + } + } + else if (from == &remote22BCapsReadMessage) + { + bCapsReadCompleted = true; + bBCapsReadMessagePending = false; + DP_LOG(("DP-QM> REMOTE_DPCD_READ(22BCaps) {%p} at '%s' completed", + (MessageManager::Message *)&remote22BCapsReadMessage, + parent->address.toString(sb))); + + if (remote22BCapsReadMessage.replyNumOfBytesReadDPCD() != HDCP22_BCAPS_SIZE) + { + DP_ASSERT(0 && "Incomplete 22BCaps in remote DPCD read message"); + parent->isHDCPCap = False; + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + DP_ASSERT(remote22BCapsReadMessage.replyPortNumber() == parent->address.tail()); + if (!!(*remote22BCapsReadMessage.replyGetData() & 0x2)) + { + unsigned char hdcp22BCAPS; + + hdcp22BCAPS = *remote22BCapsReadMessage.replyGetData(); + + parent->nvBCaps[0] = FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, + _HDCP_CAPABLE, (hdcp22BCAPS & 0x2) ? 1 : 0, + parent->nvBCaps[0]) | + FLD_SET_DRF_NUM(_DPCD, _HDCP_BCAPS_OFFSET, _HDCP_REPEATER, + (hdcp22BCAPS & 0x1) ? 1 : 0, parent->nvBCaps[0]); + + // hdcp22 will validate certificate's bksv directly. + isBCapsHDCP = isValidBKSV = true; + + DP_LOG(("DP-QM> Device at '%s' is with valid 22BCAPS : %x", + parent->address.toString(sb), *remote22BCapsReadMessage.replyGetData())); + } + } + + if (bCapsReadCompleted && bksvReadCompleted) + { + // Complete remote HDCP probe and check if can power down again. + if (parent->connector) + { + parent->connector->decPendingRemoteHdcpDetection(); + parent->connector->isNoActiveStreamAndPowerdown(); + } + + if (isValidBKSV && isBCapsHDCP) + { + parent->isHDCPCap = True; + } + else + { + parent->isHDCPCap = False; + } + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + } + else + { + parent->isHDCPCap = Indeterminate; + } +} + +bool +DeviceHDCPDetection::hdcpValidateKsv +( + const NvU8 *ksv, + NvU32 Size +) +{ + + if (HDCP_KSV_SIZE <= Size) + { + NvU32 i, j; + NvU32 count_ones = 0; + for (i=0; i < HDCP_KSV_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (ksv[i] & (1 <<(j))) + { + count_ones++; + } + } + } + + if (count_ones == 20) + { + return true; + } + } + return false; +} + +void +DeviceHDCPDetection::messageFailed +( + MessageManager::Message *from, + NakData *nakData +) +{ + if (from == &remoteBKSVReadMessage) + { + if ((retriesRemoteBKSVReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteBKSVReadMessage++; + retryRemoteBKSVReadMessage = bBKSVReadMessagePending = true; + timer->queueCallback(this, "BKSV", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV); + return; + } + // + // If message failed is called after all retries have expired or due + // to any other reason then reset the bBKSVReadMessagePending flag + // + bBKSVReadMessagePending = false; + } + + if (from == &remoteBCapsReadMessage) + { + if ((retriesRemoteBCapsReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteBCapsReadMessage++; + retryRemoteBCapsReadMessage = bBCapsReadMessagePending = true; + timer->queueCallback(this, "BCaps", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV); + return; + } + // + // If message failed is called after all retries have expired or due + // to any other reason then reset the bBCapsReadMessagePending flag + // + bBCapsReadMessagePending = false; + } + + if (from == &remote22BCapsReadMessage) + { + if ((retriesRemote22BCapsReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemote22BCapsReadMessage++; + retryRemote22BCapsReadMessage = bBCapsReadMessagePending = true; + timer->queueCallback(this, "22BCaps", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN_BKSV); + return; + } + // + // If message failed is called after all retries have expired or due to + // any other reason then reset the bBCapsReadMessagePending flag + // + bBCapsReadMessagePending = false; + } + + parent->isHDCPCap = False; + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-QM> Message %s {%p} at '%s' failed. Device marked as not HDCP support.", + from == &remoteBKSVReadMessage ? "REMOTE_DPCD_READ(BKSV)" : + from == &remoteBCapsReadMessage ? "REMOTE_DPC_READ(BCaps)" : + from == &remote22BCapsReadMessage ? "REMOTE_DPC_READ(22BCaps)" : "???", + from, parent->address.toString(sb))); + + // Destruct only when no message is pending + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + + // Complete remote HDCP probe and check if can power down again. + if (parent->connector) + { + parent->connector->decPendingRemoteHdcpDetection(); + parent->connector->isNoActiveStreamAndPowerdown(); + } + + delete this; + } +} + +void +DeviceHDCPDetection::expired +( + const void *tag +) +{ + // Clear stale HDCP states when monitor instance is already destroyed + if (!parent->plugged) + { + if (retryRemoteBKSVReadMessage) + { + retryRemoteBKSVReadMessage = false; + bBKSVReadMessagePending = false; + } + else if (retryRemoteBCapsReadMessage) + { + retryRemoteBCapsReadMessage = false; + bBCapsReadMessagePending = false; + } + else if (retryRemote22BCapsReadMessage) + { + retryRemote22BCapsReadMessage = false; + bBCapsReadMessagePending = false; + } + + if (!(bBKSVReadMessagePending || bBCapsReadMessagePending)) + { + parent->isDeviceHDCPDetectionAlive = false; + delete this; + } + return; + } + + if (retryRemoteBKSVReadMessage) + { + Address parentAddress = parent->address.parent(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-QM> Requeing REMOTE_DPCD_READ_MESSAGE(BKSV) to %s", parentAddress.toString(sb))); + + retryRemoteBKSVReadMessage = false; + remoteBKSVReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP_BKSV_OFFSET, HDCP_KSV_SIZE); + DP_LOG(("DP-QM> Get BKSV (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", parent->address.toString(sb), &remoteBKSVReadMessage)); + + bBKSVReadMessagePending = true; + messageManager->post(&remoteBKSVReadMessage, this); + } + + if (retryRemoteBCapsReadMessage) + { + Address parentAddress = parent->address.parent(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-QM> Requeing REMOTE_DPCD_READ_MESSAGE(BCAPS) to %s", parentAddress.toString(sb))); + + retryRemoteBCapsReadMessage = false; + remoteBCapsReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP_BCAPS_OFFSET, HDCP_BCAPS_SIZE); + DP_LOG(("DP-QM> Get BCaps (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", parent->address.toString(sb), &remoteBCapsReadMessage)); + + bBCapsReadMessagePending = true; + messageManager->post(&remoteBCapsReadMessage, this); + } + + if (retryRemote22BCapsReadMessage) + { + Address parentAddress = parent->address.parent(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-QM> Requeing REMOTE_DPCD_READ_MESSAGE(22BCAPS) to %s", parentAddress.toString(sb))); + + retryRemote22BCapsReadMessage = false; + remote22BCapsReadMessage.set(parentAddress, parent->address.tail(), NV_DPCD_HDCP22_BCAPS_OFFSET, HDCP22_BCAPS_SIZE); + DP_LOG(("DP-QM> Get 22BCaps (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", parent->address.toString(sb), &remote22BCapsReadMessage)); + + bBCapsReadMessagePending = true; + messageManager->post(&remote22BCapsReadMessage, this); + } + +} + +DeviceHDCPDetection::~DeviceHDCPDetection() +{ + parent->isDeviceHDCPDetectionAlive = false; + + // Clear all pending callbacks/messages + if (this->timer) + { + this->timer->cancelCallbacks(this); + } + + if (this->messageManager) + { + this->messageManager->cancelAll(&remoteBKSVReadMessage); + this->messageManager->cancelAll(&remoteBCapsReadMessage); + this->messageManager->cancelAll(&remote22BCapsReadMessage); + } +} + +void +DeviceHDCPDetection::waivePendingHDCPCapDoneNotification() +{ + // Waive the pendingHDCPCapDone notification + parent->shadow.hdcpCapDone = true; + parent->isDeviceHDCPDetectionAlive = false; + delete this; +} diff --git a/src/common/displayport/src/dp_discovery.cpp b/src/common/displayport/src/dp_discovery.cpp new file mode 100644 index 000000000..fc81f4208 --- /dev/null +++ b/src/common/displayport/src/dp_discovery.cpp @@ -0,0 +1,928 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_discovery.cpp * +* The DP MST discovery manager. * +* * +\***************************************************************************/ + +#include "dp_discovery.h" +#include "dp_messages.h" +#include "dp_tracing.h" + +using namespace DisplayPort; + +void DiscoveryManager::notifyLongPulse(bool status) +{ + if (status) + { + Device device; + device.address = Address(0); + device.branch = hal->getSupportsMultistream(); + device.legacy = false; + + detectBranch(device); + } + else if (!status) + { + removeDeviceTree(Address()); + } +} + +void DiscoveryManager::detectBranch(Device device) +{ + Address::StringBuffer sb; + DP_USED(sb); + + // + // 1. Create a LINK_ADDRESS_MESSAGE to send to this target so that we can find who he is + // 2. Create a REMOTE_DPCD_WRITE to set the GUID for this target + // *alternatively* we may have to use the local DPCD HAL to write this + // 3. Enumerate any children that we may wish to queue detect on. + // + DP_LOG(("%s(): target = %s", __FUNCTION__, device.address.toString(sb))); + + BranchDetection * branchDetection = new BranchDetection(this, device); + outstandingBranchDetections.insertBack(branchDetection); + branchDetection->start(); +} + +void DiscoveryManager::detectSink(DiscoveryManager::Device device, bool bFromCSN) +{ + Address::StringBuffer sb; + DP_USED(sb); + + DP_LOG(("%s(): target = %s", __FUNCTION__, device.address.toString(sb))); + SinkDetection * sinkDetection = new SinkDetection(this, device, bFromCSN); + sinkDetection->start(); +} + +DiscoveryManager::Device * DiscoveryManager::findDevice(const Address & address) +{ + for (unsigned i = 0; i < currentDevicesCount; i++) + if (currentDevices[i].address == address) + { + if (currentDevices[i].peerGuid.isGuidZero() && currentDevices[i].peerDevice != Dongle && + (currentDevices[i].dpcdRevisionMajor >= 1 && currentDevices[i].dpcdRevisionMinor >= 2)) + { + DP_ASSERT(0 && "Zero guid for device even though its not a dongle type."); + } + return ¤tDevices[i]; + } + + return 0; +} + +DiscoveryManager::Device * DiscoveryManager::findDevice(GUID & guid) +{ + if (guid.isGuidZero()) + { + DP_ASSERT(0 && "zero guid search"); + return 0; + } + + for (unsigned i = 0; i < currentDevicesCount; i++) + { + if (currentDevices[i].dpcdRevisionMajor <= 1 && currentDevices[i].dpcdRevisionMinor < 2) + continue; + + if (currentDevices[i].peerGuid == guid) + return ¤tDevices[i]; + } + + return 0; +} + +void DiscoveryManager::addDevice(const DiscoveryManager::Device & device) +{ + Address::StringBuffer sb; + DP_USED(sb); + + GUID guid = device.peerGuid; + if (guid.isGuidZero() && + (device.peerDevice != Dongle) && + (device.dpcdRevisionMajor >= 1 && device.dpcdRevisionMinor >= 2)) + { + DP_ASSERT(0 && "GUID missing for the device"); + } + DP_ASSERT(!findDevice(device.address) && "Redundant add"); + sink->discoveryNewDevice(device); + + DP_LOG(("DP-DM> New device '%s' %s %s %s", device.address.toString(sb), + device.branch ? "Branch" : "", device.legacy ? "Legacy" : "", + device.peerDevice == Dongle ? "Dongle" : + device.peerDevice == DownstreamSink ? "DownstreamSink" : "")); + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + device.address.toNvU32Buffer(addrBuffer); + NV_DPTRACE_INFO(NEW_MST_DEVICE, device.address.size(), addrBuffer[0], addrBuffer[1], + addrBuffer[2], addrBuffer[3], device.branch, device.legacy, device.peerDevice); + + if (currentDevicesCount < maximumTopologyNodes) + { + currentDevices[currentDevicesCount++] = device; + } +} + +void DiscoveryManager::removeDevice(Device * device) +{ + Address::StringBuffer sb; + DP_USED(sb); + + DP_LOG(("DP-DM> Lost device '%s' %s %s %s", device->address.toString(sb), + device->branch ? "Branch" : "", device->legacy ? "Legacy" : "", + device->peerDevice == Dongle ? "Dongle" : + device->peerDevice == DownstreamSink ? "DownstreamSink" : "")); + + sink->discoveryLostDevice(device->address); + + for (unsigned i = (unsigned)(device-¤tDevices[0]); i < currentDevicesCount - 1; i++) + currentDevices[i] = currentDevices[i+1]; + currentDevicesCount--; +} + +void DiscoveryManager::removeDeviceTree(const Address & prefix) +{ + for (unsigned i = 0; i < currentDevicesCount;) + if (currentDevices[i].address.under(prefix)) + removeDevice(¤tDevices[i]); + else + i++; +} + +DiscoveryManager::Device * DiscoveryManager::findChildDeviceForBranchWithGuid +( + GUID guid, + unsigned port, + Address & childAddr +) +{ + // Find it in relevant parent's device list + DiscoveryManager::Device * parentDevice = findDevice(guid); + if (!parentDevice) + { + DP_LOG(("DM> No Parent present for the device in DB.")); + return 0; + } + + childAddr = parentDevice->address; + childAddr.append(port); + return (findDevice(childAddr)); +} + +void DiscoveryManager::SinkDetection::detectCompleted(bool passed) +{ + // we could not read or write the guid + if (!passed) + { + // + // DP1.2 monitors that do not support GUID get filtered and dropped as 'not present'. + // Instead we demote such monitors to DP1.1 and continue sink detection so that end + // user at least gets active display scanout on such monitors (albeit reduced to DP1.1). + // + if (device.dpcdRevisionMajor > 1 || device.dpcdRevisionMinor >= 2) + { + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> sink at '%s' failed GUID identification, demote to 1.1 sink.", + address.toString(sb))); + device.dpcdRevisionMajor = 1; + device.dpcdRevisionMinor = 1; + } + else + { + // Had it previously been reported as present? + if (Device * device = parent->findDevice(address)) + parent->removeDevice(device); + + delete this; + return; + } + } + + // at this point we are sure that we have a device GUID. + // We need to check whether the device is new to the DB. + // Had we previously reported the device? + + Device * oldDevice = parent->findDevice(device.address); + + if (!oldDevice) + { + // completely new device + parent->addDevice(device); + } + // If it was a branch and now isn't.. delete the tree of devices under it + else if (oldDevice && oldDevice->branch && !device.branch) + { + parent->removeDeviceTree(device.address); + } + // It changed, delete the previously reported + else if (oldDevice && (oldDevice->legacy != device.legacy || + oldDevice->dpcdRevisionMajor!= device.dpcdRevisionMajor || + oldDevice->dpcdRevisionMinor!= device.dpcdRevisionMinor || + oldDevice->peerDevice != device.peerDevice|| + oldDevice->peerGuid != device.peerGuid || + oldDevice->SDPStreams != device.SDPStreams|| + oldDevice->SDPStreamSinks != device.SDPStreamSinks || + oldDevice->videoSink != device.videoSink)) + { + parent->removeDevice(oldDevice); + } + + // otherwise.. it already existed, and still does + + // We're done + completed = true; + delete this; +} + +void DiscoveryManager::BranchDetection::detectCompleted(bool present) +{ + // + // Handle device not present + // + if (!present) + { + // Had it previously been reported as present? + if (Device * device = parent->findDevice(address)) + parent->removeDevice(device); + + delete this; + return; + } + + // + // We've got a linkAddressMessage and we were able to program the GUID! + // Report the branch and queue any children that were enumerated for detection + // + parent->addDevice(parentDevice); + + unsigned portsToDelete = (1 << (Address::maxPortCount+1)) - 1; // 16 ports + for (unsigned i = 0; i < childCount; i++) + { + Device newDevice; + newDevice.address = address; + newDevice.address.append(child[i].portNumber); + + // + // Input port? Nothing plugged in? Delete the tree of all devices under this one + // DP 1.2 Spec : 2.11.9.5.x + // + if (child[i].isInputPort || !child[i].dpPlugged) { + continue; + } + + portsToDelete &= ~(1 << child[i].portNumber); + + newDevice.peerDevice = child[i].peerDeviceType; + newDevice.legacy = child[i].legacyPlugged && (newDevice.peerDevice == Dongle); + newDevice.dpcdRevisionMajor = child[i].dpcdRevisionMajor; + newDevice.dpcdRevisionMinor = child[i].dpcdRevisionMinor; + // if internal device; use parent's GUID which we ourselves generated or got from the LAM. + if (child[i].portNumber > PHYSICAL_PORT_END) + newDevice.peerGuid = parentDevice.peerGuid; + else + newDevice.peerGuid = child[i].peerGUID; + + newDevice.SDPStreams = child[i].SDPStreams; + newDevice.SDPStreamSinks = child[i].SDPStreamSinks; + + if (child[i].peerDeviceType == DownstreamBranch && + child[i].hasMessaging) + { + newDevice.branch = true; + newDevice.videoSink = false; + } + else + { + newDevice.branch = false; + newDevice.videoSink = ((child[i].peerDeviceType == Dongle) ? + child[i].legacyPlugged : true); + } + + // + // Had we previously reported the device? + // + Device * oldDevice = parent->findDevice(newDevice.address); + + // If it was a branch and now isn't.. delete the tree of devices under it + if (oldDevice && oldDevice->branch && !newDevice.branch) + { + parent->removeDeviceTree(newDevice.address); + } + // It changed, delete + else if (oldDevice && (oldDevice->legacy != newDevice.legacy || + oldDevice->dpcdRevisionMajor!= newDevice.dpcdRevisionMajor || + oldDevice->dpcdRevisionMinor!= newDevice.dpcdRevisionMinor || + oldDevice->peerDevice != newDevice.peerDevice|| + oldDevice->peerGuid != newDevice.peerGuid || + oldDevice->SDPStreams != newDevice.SDPStreams|| + oldDevice->SDPStreamSinks != newDevice.SDPStreamSinks || + oldDevice->videoSink != newDevice.videoSink)) + { + parent->removeDevice(oldDevice); + } + + // otherwise.. it already existed, and still does + if (newDevice.branch) + { + parent->detectBranch(newDevice); + } + else + { + // the new device is a sink. It may or may not have a guid. + // write the guid if needed. + parent->detectSink(newDevice, false); + } + } + + for (unsigned i = 0; i <= Address::maxPortCount; i++) + if ((portsToDelete >> i) & 1) + { + Address a = address; + a.append(i); + parent->removeDeviceTree(a); + } + + // We're done + completed = true; + delete this; +} + +void DiscoveryManager::BranchDetection::expired(const void * tag) +{ + if (retryLinkAddressMessage) + { + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Requeing LINK_ADDRESS_MESSAGE to %s", address.toString(sb))); + + retryLinkAddressMessage = false; + linkAddressMessage.set(address); + parent->messageManager->post(&linkAddressMessage, this); + } + else if (retryRemoteDpcdWriteMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Requeing REMOTE_DPCD_WRITE_MESSAGE to %s", parentAddress.toString(sb))); + + retryRemoteDpcdWriteMessage = false; + remoteDpcdWriteMessage.set(parentAddress, parentAddress.tail(), NV_DPCD_GUID, sizeof(GUID), (NvU8 *)&parentDevice.peerGuid); + DP_LOG(("DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", address.toString(sb), &remoteDpcdWriteMessage)); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } +} + +void DiscoveryManager::SinkDetection::expired(const void * tag) +{ + if (retryLinkAddressMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Requeueing LAM message to %s", parentAddress.toString(sb))); + + retryLinkAddressMessage = false; + linkAddressMessage.set(parentAddress); + + parent->messageManager->post(&linkAddressMessage, this); + } + else if (retryRemoteDpcdReadMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Requeueing REMOTE_DPCD_READ_MESSAGE to %s", parentAddress.toString(sb))); + + retryRemoteDpcdReadMessage = false; + remoteDpcdReadMessage.set(parentAddress, parentAddress.tail(), NV_DPCD_GUID, sizeof(GUID)); + DP_LOG(("DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_READ {%p}", address.toString(sb), &remoteDpcdReadMessage)); + + parent->messageManager->post(&remoteDpcdReadMessage, this); + } + else if (retryRemoteDpcdWriteMessage) + { + Address parentAddress = address; + parentAddress.pop(); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Requeueing REMOTE_DPCD_WRITE_MESSAGE to %s", parentAddress.toString(sb))); + + retryRemoteDpcdWriteMessage = false; + remoteDpcdWriteMessage.set(parentAddress, + parentAddress.tail(), + NV_DPCD_GUID, sizeof(GUID), + (NvU8 *)&device.peerGuid); + DP_LOG(("DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", address.toString(sb), &remoteDpcdWriteMessage)); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } +} + +void DiscoveryManager::BranchDetection::messageFailed(MessageManager::Message * from, NakData * nakData) +{ + // + // If any of our messages fail, we've completed detection on this buzzard. + // The only exception is if we get a DEFER - then we retry indefinitely + // + if (from == &linkAddressMessage) + { + if (retriesLinkAddressMessage < DPCD_LINK_ADDRESS_MESSAGE_RETRIES && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesLinkAddressMessage++; + retryLinkAddressMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN); + return; + } + } + + if (from == &remoteDpcdWriteMessage) + { + if ((retriesRemoteDpcdWriteMessage < DPCD_REMOTE_DPCD_WRITE_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteDpcdWriteMessage++; + retryRemoteDpcdWriteMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_REMOTE_DPCD_WRITE_MESSAGE_COOLDOWN); + return; + } + } + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Message %s {%p} at '%s' failed. Device marked not present.", + from == &linkAddressMessage ? "LINK_ADDRESS_MESSAGE" : + from == &remoteDpcdWriteMessage ? "REMOTE_DPCD_WRITE(GUID)" : "???", + from, address.toString(sb))); + + + // + // Detection is done and branch doesn't exist. + // (Note this automatically removes self from any list we're in) + // + detectCompleted(false); +} + +void DiscoveryManager::SinkDetection::messageFailed(MessageManager::Message * from, NakData * nakData) +{ + if (from == &remoteDpcdReadMessage) + { + if ((retriesRemoteDpcdReadMessage < DPCD_REMOTE_DPCD_READ_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteDpcdReadMessage++; + retryRemoteDpcdReadMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_REMOTE_DPCD_READ_MESSAGE_COOLDOWN); + return; + } + } + + if (from == &remoteDpcdWriteMessage) + { + if ((retriesRemoteDpcdWriteMessage < DPCD_REMOTE_DPCD_WRITE_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesRemoteDpcdWriteMessage++; + retryRemoteDpcdWriteMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_REMOTE_DPCD_WRITE_MESSAGE_COOLDOWN); + return; + } + } + + if (from == &linkAddressMessage) + { + if ((retriesLinkAddressMessage < DPCD_LINK_ADDRESS_MESSAGE_RETRIES) && + (nakData->reason == NakDefer || nakData->reason == NakTimeout)) + { + retriesLinkAddressMessage++; + retryLinkAddressMessage = true; + parent->timer->queueCallback(this, "DISC", DPCD_LINK_ADDRESS_MESSAGE_COOLDOWN); + return; + } + } + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Message %s {%p} at '%s' failed.", + from == &remoteDpcdWriteMessage ? "REMOTE_DPCD_WRITE(GUID)" : + from == &remoteDpcdReadMessage ? "REMOTE_DPCD_READ(GUID)" : + from == &linkAddressMessage ? "LINK_ADDRESS_MESSAGE" : "???", + from, address.toString(sb))); + + detectCompleted(false); +} + +void DiscoveryManager::SinkDetection::handleLinkAddressDownReply() +{ + Address::StringBuffer sb; + DP_USED(sb); + LinkAddressMessage::Result child; + child = *linkAddressMessage.result(address.tail()); + + device.peerDevice = child.peerDeviceType; + device.dpcdRevisionMajor = child.dpcdRevisionMajor; + device.dpcdRevisionMinor = child.dpcdRevisionMinor; + + if (device.dpcdRevisionMajor == 0) + { + device.dpcdRevisionMajor = 1; + device.dpcdRevisionMinor = 1; + } + device.portMap.inputMap |= (1 << child.portNumber); + + DP_LOG(("DP-DM> handleLinkAddressDownReply for sink device on '%s': DPCD Rev = %d.%d", + address.toString(sb), device.dpcdRevisionMajor, device.dpcdRevisionMinor)); + + // Check if the device already has a GUID + // or it is a dongle or on a logical port ; in which case no GUID is required. + if ((!device.peerGuid.isGuidZero()) || + (device.peerDevice == Dongle) || + (device.dpcdRevisionMajor <= 1 && device.dpcdRevisionMinor < 2) || + (device.address.tail() > PHYSICAL_PORT_END)) + { + parent->addDevice(device); + delete this; + return; + } + + Address parentAddress = address.parent(); + remoteDpcdReadMessage.set(parentAddress, address.tail(), NV_DPCD_GUID, sizeof(GUID)); + + parent->messageManager->post(&remoteDpcdReadMessage, this); + +} + +void DiscoveryManager::SinkDetection::handleRemoteDpcdReadDownReply() +{ + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> REMOTE_DPCD_READ {%p} at '%s' completed", + (MessageManager::Message *)&remoteDpcdReadMessage, + address.toString(sb))); + if (remoteDpcdReadMessage.replyNumOfBytesReadDPCD() != sizeof(GUID)) + { + DP_ASSERT(0 && "Incomplete GUID in remote DPCD read message"); + detectCompleted(false); + return; + } + + DP_ASSERT(remoteDpcdReadMessage.replyPortNumber() == address.tail()); + device.peerGuid.copyFrom(remoteDpcdReadMessage.replyGetData()); + + if (!device.peerGuid.isGuidZero()) + { + // we got the GUID ... handle device add/remove + detectCompleted(true); + } + else + { + // + // We need to give ourselves a non-zero GUID! + // + parent->guidBuilder.makeGuid(device.peerGuid); + + Address parentAddress = address.parent(); + remoteDpcdWriteMessage.set(parentAddress, + address.tail(), + NV_DPCD_GUID, sizeof(GUID), + (NvU8 *)&device.peerGuid); + + DP_LOG(("DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", + address.toString(sb), &remoteDpcdWriteMessage)); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } +} + +void DiscoveryManager::BranchDetection::handleLinkAddressDownReply() +{ + Address::StringBuffer sb; + DP_USED(sb); + + // + // Copy link address results out of the structure + // - We cannot process the contents until after + // we've programmed the GUID. The reasoning is + // that we need to make sure we do not enumerate + // devices not yet in a usable state. + // + childCount = linkAddressMessage.resultCount(); + for (unsigned i = 0; i < childCount; i++) + { + child[i] = *linkAddressMessage.result(i); + + // also update the portmap + parentDevice.portMap.internalMap = 0xFF00; // ports 0x8 to 0xF are internal + parentDevice.portMap.validMap |= (1 << child[i].portNumber); + if (child[i].isInputPort) + { + parentDevice.peerDevice = child[i].peerDeviceType; + parentDevice.dpcdRevisionMajor = child[i].dpcdRevisionMajor; + parentDevice.dpcdRevisionMinor = child[i].dpcdRevisionMinor; + parentDevice.portMap.inputMap |= (1 << child[i].portNumber); + } + } + + linkAddressMessage.getGUID(parentDevice.peerGuid); + if (parentDevice.peerGuid.isGuidZero()) + { + // + // We need to give ourselves a non-zero GUID! + // + parent->guidBuilder.makeGuid(parentDevice.peerGuid); + + if (address == Address(0)) + { + DP_LOG(("DP-DM> Setting GUID (locally) for '%s'", address.toString(sb))); + // + // We're locally connected, use the DPCD HAL to write the new GUID + // + if (AuxRetry::ack != parent->hal->setGUID(parentDevice.peerGuid)) + { + detectCompleted(false); + return; + } + + detectCompleted(true); + } + else + { + // + // Let's build a remote DPCD request. Remember the target is the *parent* + // of the device we want to talk to + // + Address parentAddress = address; + parentAddress.pop(); + remoteDpcdWriteMessage.set(parentAddress, address.tail(), + NV_DPCD_GUID, sizeof(GUID), + (NvU8 *)&parentDevice.peerGuid); + + DP_LOG(("DP-DM> Setting GUID (remotely) for '%s' sent REMOTE_DPCD_WRITE {%p}", + address.toString(sb), &remoteDpcdWriteMessage)); + + parent->messageManager->post(&remoteDpcdWriteMessage, this); + } + } + else + { + // + // Already had a GUID + // + detectCompleted(true); + } + +} + +void DiscoveryManager::BranchDetection::messageCompleted(MessageManager::Message * from) +{ + if (from == &linkAddressMessage) + handleLinkAddressDownReply(); + else if (from == &remoteDpcdWriteMessage) + detectCompleted(true); +} + +void DiscoveryManager::SinkDetection::messageCompleted(MessageManager::Message * from) +{ + if (from == &remoteDpcdReadMessage) + handleRemoteDpcdReadDownReply(); + else if (from == &linkAddressMessage) + handleLinkAddressDownReply(); + else if (from == &remoteDpcdWriteMessage) + detectCompleted(true); +} + +void DiscoveryManager::BranchDetection::start() +{ + // + // 1. Create a LINK_ADDRESS_MESSAGE to send to this target so that we can find who he is + // 2. Create a REMOTE_DPCD_WRITE to set the GUID for this target + // *alternatively* we may have to use the local DPCD HAL to write this + // 3. Enumerate any children that we may wish to queue detect on. + // + linkAddressMessage.set(address); + + Address::StringBuffer sb; + DP_USED(sb); + DP_LOG(("DP-DM> Detecting '%s' (sending LINK_ADDRESS_MESSAGE {%p})", + address.toString(sb), + (MessageManager::Message *)&linkAddressMessage)); + + parent->messageManager->post(&linkAddressMessage, this); +} + +void DiscoveryManager::SinkDetection::start() +{ + // + // Per DP1.4 requirement: + // Send PowerUpPhy message first, to make sure device is ready to work + // + NakData nakData; + powerUpPhyMessage.set(address.parent(), address.tail(), NV_TRUE); + parent->messageManager->send(&powerUpPhyMessage, nakData); + + Address::StringBuffer sb; + DP_USED(sb); + + // The sink is found in CSN, missing dpcd revision + if (bFromCSN) + { + parent->outstandingSinkDetections.insertBack(this); + // Create a LINK_ADDRESS_MESSAGE to send to parent of this target + linkAddressMessage.set(address.parent()); + + DP_LOG(("DP-DM> Detecting '%s' (sending LINK_ADDRESS_MESSAGE {%p})", + address.toString(sb), + (MessageManager::Message *)&linkAddressMessage)); + parent->messageManager->post(&linkAddressMessage, this); + } + else // The sink is found in LAM sent for branch, and with DPCD rev. + { + // Check if the device already has a GUID + // or it is a dongle or on a logical port ; in which case no GUID is required. + if ((!device.peerGuid.isGuidZero()) || + (device.peerDevice == Dongle) || + (device.dpcdRevisionMajor <= 1 && device.dpcdRevisionMinor < 2) || + (device.address.tail() > PHYSICAL_PORT_END)) + { + parent->addDevice(device); + delete this; + return; + } + + parent->outstandingSinkDetections.insertBack(this); + Address parentAddress = address.parent(); + remoteDpcdReadMessage.set(parentAddress, address.tail(), NV_DPCD_GUID, sizeof(GUID)); + + parent->messageManager->post(&remoteDpcdReadMessage, this); + } + +} + +DiscoveryManager::BranchDetection::~BranchDetection() +{ + List::remove(this); + + if (parent->outstandingSinkDetections.isEmpty() && + parent->outstandingBranchDetections.isEmpty()) + parent->sink->discoveryDetectComplete(); + + parent->timer->cancelCallbacks(this); +} + +DiscoveryManager::SinkDetection::~SinkDetection() +{ + List::remove(this); + + if (parent->outstandingSinkDetections.isEmpty() && + parent->outstandingBranchDetections.isEmpty()) + parent->sink->discoveryDetectComplete(); + + parent->timer->cancelCallbacks(this); +} + +void DiscoveryManager::ReceiverSink::messageProcessed(MessageManager::MessageReceiver * from) +{ + DP_ASSERT((from->getRequestId() == 0x2) && "This receiver is only meant for CSNs"); + + // CSNs are broadcast messages. So replies will always go to immediate downstream branch + CsnUpReplyContainer * csnReplyContainer = new CsnUpReplyContainer(parent); + parent->pendingCsnUpReplies.insertBack(csnReplyContainer); + + //Send acknowledgement to the CSN sender. + csnReplyContainer->postUpReply(); + + ConnStatusNotifyMessage* csnMessage = static_cast(from); + + if (csnMessage->getUpRequestData()->isInputPort) + { + DP_LOG(("Concentrator?? Got CSN for an upstream port!")); + return; + } + + Address childAddr; + DiscoveryManager::Device * oldDevice = parent->findChildDeviceForBranchWithGuid(csnMessage->getUpRequestData()->guid, + csnMessage->getUpRequestData()->port, childAddr); + if (!csnMessage->getUpRequestData()->devicePlugged) // some device was unplugged or powered off + { + if (oldDevice) + parent->removeDeviceTree(childAddr); + return; + } + + handleCSN(from); +} + +void DiscoveryManager::ReceiverSink::handleCSN(MessageManager::MessageReceiver * from) +{ + ConnStatusNotifyMessage* csnMessage = static_cast(from); + + // There is no point in serving an upRequest when no device is present. + if (parent->currentDevicesCount == 0) + { + DP_ASSERT(0 && "DM> No Device in the Topology"); + return; + } + + // + // Check for non-zero GUID in CSN message. It is mandatory to find respective parent + // Branch should not send CSN with Zero GUID as a unique GUID is set before CSN + // + if ((csnMessage->getUpRequestData()->guid).isGuidZero()) + { + DP_ASSERT(0 && "Ignoring CSN. Invalid parent device due to zero-GUID."); + return; + } + + Address childAddr; + unsigned port = csnMessage->getUpRequestData()->port; + DiscoveryManager::Device * oldDevice = + parent->findChildDeviceForBranchWithGuid(csnMessage->getUpRequestData()->guid, + port, + childAddr); + + // Check if we already have a device + if (oldDevice) + { + oldDevice->dirty = true; + + // Set the videoSink status of oldDevice again as old device might be a legacy dongle + // and a video sink is now added with it + oldDevice->videoSink = ((csnMessage->getUpRequestData()->peerDeviceType == Dongle) ? + csnMessage->getUpRequestData()->legacyPlugged : true); + + parent->sink->discoveryNewDevice(*oldDevice); + return; + } + + // Exit if no valid address matched for further detection. + if ((childAddr.size() == 0) || + (childAddr.size() > Address::maxHops)) + { + DP_ASSERT(0 && "Ignoring CSN. Invalid parent device due to GUID not found in discovered topology"); + return; + } + + DiscoveryManager::Device newDevice; + newDevice.address = childAddr; + newDevice.branch = (csnMessage->getUpRequestData()->messagingCapability == true) && + (csnMessage->getUpRequestData()->peerDeviceType == DownstreamBranch); + + newDevice.peerDevice = csnMessage->getUpRequestData()->peerDeviceType; + newDevice.legacy = csnMessage->getUpRequestData()->legacyPlugged == true; + newDevice.SDPStreams = newDevice.SDPStreamSinks = 0; + + if (csnMessage->getUpRequestData()->devicePlugged) // Check for a new device only if it's plugged + { + if (newDevice.branch) + { + newDevice.videoSink = false; + // send a LAM and the whole nine yards + DP_ASSERT(newDevice.legacy == false); + parent->detectBranch(newDevice); + return; + } + else + { + newDevice.SDPStreams = newDevice.SDPStreamSinks = 1; + newDevice.videoSink = ((csnMessage->getUpRequestData()->peerDeviceType == Dongle) ? + csnMessage->getUpRequestData()->legacyPlugged : true); + + parent->detectSink(newDevice, true); + return; + } + } +} diff --git a/src/common/displayport/src/dp_edid.cpp b/src/common/displayport/src/dp_edid.cpp new file mode 100644 index 000000000..6a21db0fc --- /dev/null +++ b/src/common/displayport/src/dp_edid.cpp @@ -0,0 +1,625 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_edid.c * +* Implementation of SST/MST EDID reader * +* * +\***************************************************************************/ + +#include "dp_buffer.h" +#include "dp_internal.h" +#include "dp_edid.h" + +using namespace DisplayPort; + +EdidAssembler::EdidAssembler(Edid * const edid, bool bPatchCrc): + edid(edid), stream(edid->getBuffer()), oldBlockChecksum(0x00), + blocksRead(0), totalBlockCnt(0), retriesCount(0), + bPatchCrc(bPatchCrc) {} + + +bool EdidAssembler::readIsComplete() +{ + return (blocksRead > 0 && blocksRead == totalBlockCnt); +} + +void EdidAssembler::reset() +{ + oldBlockChecksum = 0x00; + blocksRead = 0; + totalBlockCnt = 0; + retriesCount = 0; + stream.seek(0); +} + +void EdidAssembler::postReply(const Buffer & buffer, unsigned sizeCompleted, bool success) +{ + if (!success || buffer.isError()) + { + retriesCount++; + return; + } + + // + // For SST: + // Check the Checksum Error Per Block reading, mark the EDID as "patched" if + // CRC is wrong. DPLib will return fallback EDID. + // + blocksRead++; + stream.write(buffer.data, sizeCompleted); + if (getEDIDBlockChecksum(buffer)) + { + if (bPatchCrc) + edid->patchCrc(); + edid->setPatchedChecksum(true); + } + return; +} + +void EdidAssembler::postReply(unsigned char * data, unsigned sizeCompleted, bool success) +{ + // + // For MST: When read of edid block failed, library will attempt to read + // same block again, but not more than EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT times + // + if (!success) + { + retriesCount++; + return; + } + + // + // Check the Checksum Error Per Block reading, + // library will attempt to read same block again, + // but not more than EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT times. + // + Buffer buffer(data, EDID_BLOCK_SIZE); + if (buffer.isError()) + { + retriesCount++; + return; + } + + NvU8 newBlockChecksum = getEDIDBlockChecksum(buffer); + if (newBlockChecksum) + { + if (this->oldBlockChecksum != newBlockChecksum) //First failure? + { + this->oldBlockChecksum = newBlockChecksum; + retriesCount++; + return; + } + } + + this->oldBlockChecksum = 0; + retriesCount = 0; + blocksRead++; + stream.write(data, sizeCompleted); +} + +bool EdidAssembler::readNextRequest(NvU8 & seg, NvU8 & offset) +{ + // + // cache totalBlockCnt, + // In EDID 1.3 HF-EEODB, it might changes after 1 extension block read. + // + if ((blocksRead == 1) || (blocksRead == 2)) + totalBlockCnt = edid->getBlockCount(); + + // + // will return false in two scenarios + // 1. EDID read is complete, all extension blocks were read + // 2. First EDID block was corrupted, then totalBlockCnt = 0 + // + if (blocksRead >= totalBlockCnt) + return false; + + // Retry count exceeded for particular block? + if (retriesCount > EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT) + return false; + + seg = NvU8(blocksRead >> 1); + offset = NvU8((blocksRead & 0x1) * EDID_BLOCK_SIZE); + return true; +} + + +enum +{ + EDID_V1_IDX_EXTENSION = 0x7E, + EDID_V1_IDX_HEADER0 = 0x00, + EDID_V1_HEADER0 = 0x00, + + EDID_V1_IDX_HEADER1 = 0x01, + EDID_V1_HEADER1 = 0xFF, + + EDID_V1_IDX_VERSION = 0x12, + EDID_V1_VERSION_1 = 0x01, + EDID_V2_IDX_VERREV = 0x00, + + // + // from od_edid.h RM to identify VER 2, use 7:4 bits. + // #define EDID_V2_VERREV_VERSION 7:4 /* RW--F */ + // #define EDID_V2_VERREV_VERSION_2 0x02 /* RWI-V */ + // + // Avoiding FLD_* macros, thus shift VER2 value 4 bits to left + // + EDID_V2_VERREV_VERSION_2 = 0x02 << 4, + EDID_FLAGS_CHKSUM_ATTEMPTS_DP = 0x5, +}; + +enum +{ + // EDID CTA-EXT (CTA 861 Extension) block defines + EDID_CTA_EXT_HEADER_OFFSET = 0x00, + EDID_CTA_EXT_HEADER = 0x02, + EDID_CTA_EXT_VERSION_OFFSET = 0x01, + EDID_CTA_EXT_VERSION_3 = 0x03, + EDID_CTA_EXT_DATA_BLOCK_HEADER_OFFSET = 0x04, + EDID_CTA_EXT_DATA_BLOCK_HEADER_HF_EEODB = 0xE2, + EDID_CTA_EXT_DATA_BLOCK_TAG_OFFSET = 0x05, + EDID_CTA_EXT_DATA_BLOCK_TAG_HF_EEODB = 0x78, + EDID_CTA_EXT_DATA_BLOCK_EXT_COUNT_OFFSET = 0x06, +}; + +Edid::Edid(): buffer() +{ + // fill EDID buffer with zeroes + this->buffer.memZero(); + checkSumValid = false; + forcedCheckSum = false; + fallbackEdid = false; + patchedChecksum = false; + + // clear the WARFlags + _WARFlags temp = {0}; + WARFlags = temp; +} + +Edid::~Edid() +{ +} + +bool Edid::verifyCRC() +{ + if (getEdidSize() > 0) + { + this->validateCheckSum(); + return this->checkSumValid; + } + else + return false; +} + +// this routine patches the edid crc after it has been overridden for WARs. +void Edid::patchCrc() +{ + // we always override some bytes within the first 128 + // recalculate and fix the checksum for the first page only. + unsigned chksum = 0; + for (unsigned i = 0; i < 128; i++) + { + chksum += buffer.data[i]; + } + chksum = chksum & 0xFF; + + if (chksum) + buffer.data[127] = 0xFF & (buffer.data[127] + (0x100 - chksum)); +} + +bool Edid::isChecksumValid() const +{ + // return checksum valid if it is. + // else return checksum is valid if checksum wasn't valid but we will assume it to be. + return (checkSumValid || forcedCheckSum); +} + +bool Edid::isFallbackEdid() const +{ + return fallbackEdid; +} + +NvU8 Edid::getFirstPageChecksum() +{ + DP_ASSERT(buffer.getLength() >= 128); + if (buffer.getLength() < 128) + return 0; + else + return buffer.data[127]; +} + +NvU8 Edid::getLastPageChecksum() +{ + NvU32 bufferSize = buffer.getLength(); + NvU32 checksumLocation = this->getBlockCount() * 128 - 1; + + if (bufferSize == 0 || bufferSize < (this->getBlockCount() * 128)) + { + DP_LOG(("DP-EDID> Edid length is 0 or less than required")); + return 0; + } + + if (bufferSize % 128 != 0) + { + DP_LOG(("DP-EDID> Edid length is not a multiple of 128")); + return 0; + } + + return buffer.data[checksumLocation]; + +} + +void Edid::validateCheckSum() +{ + // Each page has its own checksum + checkSumValid = false; + for (unsigned chunk = 0; chunk < this->buffer.length; chunk += 128) + { + unsigned chksum = 0; + for (unsigned i = 0; i < 128; i++) + { + chksum += buffer.data[i+chunk]; + } + + if ((chksum & 0xFF) != 0) + return; + } + checkSumValid = true; +} + +unsigned Edid::getEdidVersion() +{ + if (buffer.isError() || buffer.length < EDID_BLOCK_SIZE) + { + return 0; + } + + // 0 version is "unknown" + unsigned version = 0; + + // Check for Version 1 EDID + if (this->buffer.data[EDID_V1_IDX_VERSION] == EDID_V1_VERSION_1) + { + version = 1; + } + // Check for version 2 EDID + else if (this->buffer.data[EDID_V2_IDX_VERREV] & EDID_V2_VERREV_VERSION_2) + { + // + // Version 2 has 256 bytes by default. + // There is a note about an extra 256 byte block if byte 0x7E + // bit 7 is set but there's no definition for it listed in + // the EDID Version 3 (971113). So, let's just skip it for now. + // + version = 2; + } + else + { + DP_ASSERT(version && "Unknown EDID version"); + } + + return version; +} + +const char * Edid::getName() const +{ + static char decodedName[16] = {0}; + int tail = 0; + if (buffer.length < 128) + return "?"; + + for (int i = 0; i < 4; i++) + if (buffer.data[0x39 + i * 18 + 0] == 0xFC) + { + for (int j = 0; j < 13; j++) + decodedName[tail++] = buffer.data[0x39 + i*18 + 2 + j]; + break; + } + decodedName[tail++] = 0; + return decodedName; +} + +unsigned Edid::getBlockCount() +{ + if (buffer.isError() || buffer.length < EDID_BLOCK_SIZE) + { + return 0; + } + + unsigned version = getEdidVersion(); + + if (version == 1) + { + NvU32 blockCount = (unsigned) this->buffer.data[EDID_V1_IDX_EXTENSION]+1; + + if (blockCount > EDID_MAX_BLOCK_COUNT) + { + DP_LOG(("DPEDID> %s: DDC read returned questionable results: " + "Total block Count too high: %d", + __FUNCTION__, blockCount)); + return 1; + } + // + // Check for the HF-EEODB defined in HDMI 2.1 specification. + // 1. It is EDID version 1.3 and the extension block count is 1 (total block count = 2) + // 2. The 1st EDID extension block is already read. (buffer.length > block size) + // 3. The 1st EDID extension block is CTA extension block. + // 4. It has HF-EEODB (1st extension block: byte4 == 0xE2 and byte5 == 0x78) + // + if ((blockCount == 2) && (buffer.length >= EDID_BLOCK_SIZE * 2)) + { + NvU8 *pExt = &(this->buffer.data[EDID_BLOCK_SIZE]); + + // + // If it's a CTA-EXT block version 3 and has HF-EEODB + // defined, update the total block count. + // + if ((pExt[EDID_CTA_EXT_HEADER_OFFSET] == EDID_CTA_EXT_HEADER) && + (pExt[EDID_CTA_EXT_VERSION_OFFSET] == EDID_CTA_EXT_VERSION_3) && + (pExt[EDID_CTA_EXT_DATA_BLOCK_HEADER_OFFSET] == EDID_CTA_EXT_DATA_BLOCK_HEADER_HF_EEODB) && + (pExt[EDID_CTA_EXT_DATA_BLOCK_TAG_OFFSET] == EDID_CTA_EXT_DATA_BLOCK_TAG_HF_EEODB)) + { + blockCount = pExt[EDID_CTA_EXT_DATA_BLOCK_EXT_COUNT_OFFSET] + 1; + } + + } + return blockCount; + } + else if (version == 2) + { + // + // Version 2 has 256 bytes by default. + // There is a note about an extra 256 byte block + // if byte 0x7E bit 7 is set, but there's no + // definition for it listed in the + // EDID Version 3 (971113) So, let's just skip + // it for now. + // + return 2; + } + else + { + // Unknown EDID version. Skip it. + DP_LOG(("DPEDID> %s: Unknown EDID Version!",__FUNCTION__)); + DP_ASSERT(0 && "Unknown EDID version!"); + return 1; + } +} + +unsigned Edid::getEdidSize() const +{ + return this->buffer.length; +} + +void DisplayPort::Edid::swap(Edid & right) +{ + swapBuffers(buffer, right.buffer); + validateCheckSum(); +} + +const NvU8 fallbackEdidModes[5][EDID_BLOCK_SIZE] = { + // ID Manufacturer Name: NVD + // VIDEO INPUT DEFINITION: + // Digital Signal + // VESA DFP 1.x Compatible + + // + // The first 4 entries are for NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS (DPCD 0x20) + // 1024x768x60Hz: defined in bit 0. + // 1280x720x60Hz: defined in bit 1. + // 1920x1080x60Hz: defined in bit 2. [Mandatory] + // + { + // Bit 2: 1920x1080x60 only + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xDB + }, + { + // bit 2 + bit 0: 1920x1080x60 + 1024x768x60 + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x08, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD3 + }, + { + // bit 2 + bit 1: 1920x1080x60 + 1280x720x60 + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x81, 0xC0, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9C + }, + { + // bit2 + bit 1 + bit 0: All 3 modes. + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0xA5, 0x00, 0x00, 0x64, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x00, 0x08, 0x00, 0x81, 0xC0, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3A, + 0x80, 0x18, 0x71, 0x38, 0x2D, 0x40, 0x58, 0x2C, + 0x43, 0x00, 0xC0, 0x1C, 0x32, 0x00, 0x00, 0x1C, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94 + }, + { + // ESTABLISHED TIMING I: + // 640 X 480 @ 60Hz (IBM,VGA) + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x04, 0x95, 0x00, 0x00, 0x78, + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, + 0x50, 0x54, 0x00, 0x20, 0x00, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92 + } +}; + +// +// Definition of DPCD 0x20: +// 1024x768x60Hz: defined in bit 0. +// 1280x720x60Hz: defined in bit 1. +// 1920x1080x60Hz: defined in bit 2. [Mandatory] +// MIN value is 4 (only 1920x1080 supported) +// MAX value is 7 (supports all 3 modes) +// +#define SINK_VIDEO_FALLBACK_FORMATS_MIN_VALUE (0x00000004) +#define SINK_VIDEO_FALLBACK_FORMATS_MAX_VALUE (0x00000007) + +void DisplayPort::makeEdidFallback(Edid & edid, NvU32 fallbackFormatSupported) +{ + const NvU8 *data; + + // fallbackFormatSupported valid values = 4~7 + if (fallbackFormatSupported > SINK_VIDEO_FALLBACK_FORMATS_MAX_VALUE || + fallbackFormatSupported < SINK_VIDEO_FALLBACK_FORMATS_MIN_VALUE) + { + // 4 is default fallback mode. (only 640x480) + data = fallbackEdidModes[4]; + } + else + { + data = fallbackEdidModes[fallbackFormatSupported-4]; + } + if (!edid.getBuffer()->resize(EDID_BLOCK_SIZE)) + return; + + dpMemCopy(edid.getBuffer()->getData(), (const NvU8*)data, EDID_BLOCK_SIZE); + DP_ASSERT(edid.verifyCRC()); + edid.setFallbackFlag(true); +} + +/* +Fake EDID for DP2VGA dongle when the EDID of the real monitor is not available + +Established Timings [20 CE 00] + 640 x 480 @ 60Hz + 800 x 600 @ 72Hz + 800 x 600 @ 75Hz + 1024 x 768 @ 60Hz + 1024 x 768 @ 70Hz + 1024 x 768 @ 75Hz + +Standard Timings + Timing [3159] : 640 x 480 @ 85Hz (4:3) + Timing [4559] : 800 x 600 @ 85Hz (4:3) + Timing [6159] : 1024 x 768 @ 85Hz (4:3) + Timing [714F] : 1152 x 864 @ 75Hz (4:3) + +Detailed Timing [DTD] 1280 x 1024 @ 60.02Hz + Pixel Clock : 108.00Mhz + HBlank, HBorder : 408, 0 + HSyncStart, HSyncWidth : 48, 112 + VBlank, VBorder : 42, 0 + VSyncStart, VSyncWidth : 1, 3 + Image size : 376mm x 301mm + DigitalSeparate +/+ +*/ + +void DisplayPort::makeEdidFallbackVGA(Edid & edid) +{ + const NvU8 data[] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x3A, 0xC4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x13, 0x01, 0x03, 0x80, 0x26, 0x1E, 0x78, 0xEE, 0xCB, 0x05, 0xA3, 0x58, 0x4C, 0x9B, 0x25, + 0x13, 0x50, 0x54, 0x20, 0xCE, 0x00, 0x31, 0x59, 0x45, 0x59, 0x61, 0x59, 0x71, 0x4F, 0x81, 0x40, + 0x81, 0x80, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2A, 0x00, 0x98, 0x51, 0x00, 0x2A, 0x40, 0x30, 0x70, + 0x13, 0x00, 0x78, 0x2D, 0x11, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x30, 0x55, 0x1F, + 0x52, 0x0E, 0x00, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x4C, + 0x43, 0x44, 0x5F, 0x56, 0x47, 0x41, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xD8 + }; + + if (!edid.getBuffer()->resize(sizeof(data))) + return; + + dpMemCopy(edid.getBuffer()->getData(), (const NvU8*)data, sizeof data); + DP_ASSERT(edid.verifyCRC()); + edid.setFallbackFlag(true); +} + +NvU8 DisplayPort::getEDIDBlockChecksum(const Buffer & buffer) +{ + DP_ASSERT(buffer.getLength() == 128); + + unsigned chksum = 0; + for (unsigned i = 0; i < buffer.getLength(); i++) + { + chksum += buffer.data[i]; + } + chksum = chksum & 0xFF; + return (NvU8)chksum; +} diff --git a/src/common/displayport/src/dp_evoadapter.cpp b/src/common/displayport/src/dp_evoadapter.cpp new file mode 100644 index 000000000..1a8e1e645 --- /dev/null +++ b/src/common/displayport/src/dp_evoadapter.cpp @@ -0,0 +1,1843 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_evoadapter.cpp * +* Interface for low level access to the aux bus. * +* This is the synchronous version of the interface. * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_evoadapter.h" +#include "dp_auxdefs.h" +#include "dp_tracing.h" +#include "dp_vrr.h" +#include + +#include +#include +#include +#include +#include + +using namespace DisplayPort; + +// +// Evo hardcodes the relationship between stream and head # +// Head#x is always stream x+1 +// +#define STREAM_TO_HEAD_ID(s) ((s) - 1) +#define HEAD_TO_STREAM_ID(s) ((s) + 1) + +// +// Data Base used to store all the regkey values. +// The type is defined in dp_regkeydatabase.h. +// All entries set to 0 before initialized by the first EvoMainLink constructor. +// The first EvoMainLink constructor will populate that data base. +// Later EvoMainLink will use values from that data base. +// +static struct DP_REGKEY_DATABASE dpRegkeyDatabase = {0}; + +enum DP_REG_VAL_TYPE +{ + DP_REG_VAL_BOOL = 0, + DP_REG_VAL_U32 = 1, + DP_REG_VAL_U16 = 2, + DP_REG_VAL_U8 = 3 +}; + +const struct +{ + const char* pName; + void* pValue; + DP_REG_VAL_TYPE valueType; +} DP_REGKEY_TABLE [] = +{ + {NV_DP_REGKEY_ENABLE_AUDIO_BEYOND_48K, &dpRegkeyDatabase.bAudioBeyond48kEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_OVERRIDE_DPCD_REV, &dpRegkeyDatabase.dpcdRevOveride, DP_REG_VAL_U32}, + {NV_DP_REGKEY_DISABLE_SSC, &dpRegkeyDatabase.bSscDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_FAST_LINK_TRAINING, &dpRegkeyDatabase.bFastLinkTrainingEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_DISABLE_MST, &dpRegkeyDatabase.bMstDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_INBAND_STEREO_SIGNALING, &dpRegkeyDatabase.bInbandStereoSignalingEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_SKIP_POWEROFF_EDP_IN_HEAD_DETACH, &dpRegkeyDatabase.bPoweroffEdpInHeadDetachSkipped, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_OCA_LOGGING, &dpRegkeyDatabase.bOcaLoggingEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_REPORT_DEVICE_LOST_BEFORE_NEW, &dpRegkeyDatabase.bReportDeviceLostBeforeNew, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_APPLY_LINK_BW_OVERRIDE_WAR, &dpRegkeyDatabase.bLinkBwOverrideWarApplied, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_APPLY_MAX_LINK_RATE_OVERRIDES, &dpRegkeyDatabase.applyMaxLinkRateOverrides, DP_REG_VAL_U32}, + {NV_DP_REGKEY_DISABLE_DSC, &dpRegkeyDatabase.bDscDisabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_SKIP_ASSESSLINK_FOR_EDP, &dpRegkeyDatabase.bAssesslinkForEdpSkipped, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_HDCP_AUTH_ONLY_ON_DEMAND, &dpRegkeyDatabase.bHdcpAuthOnlyOnDemand, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_ENABLE_MSA_OVER_MST, &dpRegkeyDatabase.bMsaOverMstEnabled, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE, &dpRegkeyDatabase.bOptLinkKeptAlive, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_MST, &dpRegkeyDatabase.bOptLinkKeptAliveMst, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_KEEP_OPT_LINK_ALIVE_SST, &dpRegkeyDatabase.bOptLinkKeptAliveSst, DP_REG_VAL_BOOL}, + {NV_DP_REGKEY_FORCE_EDP_ILR, &dpRegkeyDatabase.bBypassEDPRevCheck, DP_REG_VAL_BOOL}, + {NV_DP_DSC_MST_CAP_BUG_3143315, &dpRegkeyDatabase.bDscMstCapBug3143315, DP_REG_VAL_BOOL}, + {NV_DP_DSC_MST_ENABLE_PASS_THROUGH, &dpRegkeyDatabase.bDscMstEnablePassThrough, DP_REG_VAL_BOOL} +}; + +EvoMainLink::EvoMainLink(EvoInterface * provider, Timer * timer) : + provider(provider), + timer(timer), + displayId(provider->getDisplayId()), + subdeviceIndex(provider->getSubdeviceIndex()) +{ + // + // Process GPU caps (This needs to be replaced with a control call caps interface) + // + NvU32 code; + + // Initialize shared regkey data base, and apply the overrides + this->initializeRegkeyDatabase(); + this->applyRegkeyOverrides(); + + _isDynamicMuxCapable = false; + _isLTPhyRepeaterSupported = true; + _rmPhyRepeaterCount = 0; + dpMemZero(&_DSC, sizeof(_DSC)); + queryGPUCapability(); + + queryAndUpdateDfpParams(); + + // + // Tell RM to hands off on the DisplayPort hardware + // + NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS setManualParams = {0}; + setManualParams.subDeviceInstance = subdeviceIndex; + code = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT, &setManualParams, sizeof setManualParams); + DP_ASSERT (code == NVOS_STATUS_SUCCESS && "Unable to enable library mode"); + + // + // Get the mask of valid heads + // + NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS allHeadMaskParams; + dpMemZero(&allHeadMaskParams, sizeof allHeadMaskParams); + allHeadMaskParams.subDeviceInstance = subdeviceIndex; + code = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK, &allHeadMaskParams, sizeof(allHeadMaskParams)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to get head mask"); + allHeadMask = 3; + } + else + { + allHeadMask = allHeadMaskParams.headMask; + } +} + + +bool EvoMainLink::vrrRunEnablementStage(unsigned stage, NvU32 *status) +{ + NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS params = {0}; + params.subDeviceInstance = subdeviceIndex; + params.displayId = this->displayId; + + switch (stage) + { + case VRR_ENABLE_STAGE_MONITOR_ENABLE_BEGIN: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _MONITOR_ENABLE_BEGIN); + break; + case VRR_ENABLE_STAGE_MONITOR_ENABLE_CHALLENGE: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _MONITOR_ENABLE_CHALLENGE); + break; + case VRR_ENABLE_STAGE_MONITOR_ENABLE_CHECK: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _MONITOR_ENABLE_CHECK); + break; + case VRR_ENABLE_STAGE_DRIVER_ENABLE_BEGIN: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _DRIVER_ENABLE_BEGIN); + break; + case VRR_ENABLE_STAGE_DRIVER_ENABLE_CHALLENGE: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _DRIVER_ENABLE_CHALLENGE); + break; + case VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _DRIVER_ENABLE_CHECK); + break; + case VRR_ENABLE_STAGE_RESET_MONITOR: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _RESET_MONITOR); + break; + case VRR_ENABLE_STAGE_INIT_PUBLIC_INFO: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _INIT_PUBLIC_INFO); + break; + case VRR_ENABLE_STAGE_GET_PUBLIC_INFO: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _GET_PUBLIC_INFO); + break; + case VRR_ENABLE_STAGE_STATUS_CHECK: + params.cmd |= DRF_DEF(0073_CTRL_DP_CMD, _ENABLE_VRR_CMD, _STAGE, _STATUS_CHECK); + break; + default: + DP_ASSERT(0 && "Undefined VRR Enablement Stage."); + return false; + } + NvU32 retVal = provider->rmControl0073(NV0073_CTRL_CMD_DP_ENABLE_VRR, ¶ms, sizeof(params)); + if (status) + { + *status = params.result; + } + if (retVal != NVOS_STATUS_SUCCESS) + { + return false; + } + return true; +} + +bool EvoMainLink::getEdpPowerData(bool *panelPowerOn, bool *dpcdPowerStateD0) +{ + NV0073_CTRL_DP_GET_EDP_DATA_PARAMS params; + params.subDeviceInstance = subdeviceIndex; + params.displayId = this->displayId; + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_EDP_DATA, ¶ms, sizeof(params)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to get eDP power data, assuming panel off."); + if (panelPowerOn) + { + *panelPowerOn = false; + } + if (dpcdPowerStateD0) + { + *dpcdPowerStateD0 = false; + } + return false; + } + else + { + if (panelPowerOn) + { + *panelPowerOn = FLD_TEST_DRF(0073_CTRL_DP, _GET_EDP_DATA, _PANEL_POWER, _ON, + params.data); + } + if (dpcdPowerStateD0) + { + *dpcdPowerStateD0 = FLD_TEST_DRF(0073_CTRL_DP, _GET_EDP_DATA, _DPCD_POWER_STATE, _D0, + params.data); + } + return true; + } +} + +NvU32 EvoMainLink::streamToHead(NvU32 streamId, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier) +{ + NvU32 headIndex = 0; + NvU32 maxHeads = allHeadMask; + NUMSETBITS_32(maxHeads); + headIndex = DP_MST_STREAMID_TO_HEAD(streamId, streamIdentifier, maxHeads); + + return headIndex; +} + +NvU32 EvoMainLink::headToStream(NvU32 head, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier) +{ + NvU32 streamIndex = 0; + + NvU32 maxHeads = allHeadMask; + NUMSETBITS_32(maxHeads); + streamIndex = DP_MST_HEAD_TO_STREAMID(head, streamIdentifier, maxHeads); + + return streamIndex; +} + +void EvoMainLink::queryGPUCapability() +{ + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.sorIndex = provider->getSorIndex(); + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_CAPS, ¶ms, sizeof(params)); + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to process GPU caps"); + } + else + { + // + // Check if MST feature needs to be disabled by regkey. This is requirement by few OEMs, they don't want to support + // MST feature on particular sku, whenever requested through INF. + // + _hasMultistream = (params.bIsMultistreamSupported == NV_TRUE) && !_isMstDisabledByRegkey; + _isDP1_2Supported = (params.bIsDp12Supported == NV_TRUE) ? true : false; + _isDP1_4Supported = (params.bIsDp14Supported == NV_TRUE) ? true : false; + _isStreamCloningEnabled = (params.bIsSCEnabled == NV_TRUE) ? true : false; + _hasIncreasedWatermarkLimits = (params.bHasIncreasedWatermarkLimits == NV_TRUE) ? true : false; + + _isFECSupported = (params.bFECSupported == NV_TRUE) ? true : false; + + _useDfpMaxLinkRateCaps = (params.bOverrideLinkBw == NV_TRUE) ? true : false; + + _isLTPhyRepeaterSupported = (params.bIsTrainPhyRepeater == NV_TRUE) ? true : false; + + if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _1_62, params.maxLinkRate)) + _maxLinkRateSupportedGpu = RBR; //in Hz + else if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _2_70, params.maxLinkRate)) + _maxLinkRateSupportedGpu = HBR; //in Hz + else if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _5_40, params.maxLinkRate)) + _maxLinkRateSupportedGpu = HBR2; //in Hz + else if (FLD_TEST_DRF(0073, _CTRL_CMD_DP_GET_CAPS, _MAX_LINK_RATE, _8_10, params.maxLinkRate)) + _maxLinkRateSupportedGpu = HBR3; //in Hz + else + { + DP_ASSERT(0 && "Unable to get max link rate"); + // Assume that we can at least support RBR. + _maxLinkRateSupportedGpu = RBR; + } + + if (!_isDscDisabledByRegkey) + { + _DSC.isDscSupported = params.DSC.bDscSupported ? true : false; + _DSC.encoderColorFormatMask = params.DSC.encoderColorFormatMask; + _DSC.lineBufferSizeKB = params.DSC.lineBufferSizeKB; + _DSC.rateBufferSizeKB = params.DSC.rateBufferSizeKB; + _DSC.bitsPerPixelPrecision = params.DSC.bitsPerPixelPrecision; + _DSC.maxNumHztSlices = params.DSC.maxNumHztSlices; + _DSC.lineBufferBitDepth = params.DSC.lineBufferBitDepth; + } + } +} + +void EvoMainLink::triggerACT() +{ + NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + + provider->rmControl0073(NV0073_CTRL_CMD_DP_SEND_ACT, ¶ms, sizeof params); +} + +void EvoMainLink::configureHDCPRenegotiate(NvU64 cN, NvU64 cKSV, bool bForceReAuth, bool bRxIDMsgPending){} +void EvoMainLink::configureHDCPGetHDCPState(HDCPState &hdcpState) +{ + // HDCP Not Supported + hdcpState.HDCP_State_Repeater_Capable = false; + hdcpState.HDCP_State_22_Capable = false; + hdcpState.HDCP_State_Encryption = false; + hdcpState.HDCP_State_Authenticated = false; +} + +void EvoMainLink::configureSingleStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + bool bEnhancedFraming, + NvU32 tuSize, + NvU32 waterMark, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamId, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultiStreamMode, + bool bAudioOverRightPanel, + bool bEnable2Head1Or) +{ + NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.head = head; + params.sorIndex = provider->getSorIndex(); + params.bEnableTwoHeadOneOr = bEnable2Head1Or; + + if (singleHeadMultiStreamMode == DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST) + { + // In 2-SST mode configure Head-SF on primary link, so primary link configuration + // gets copied to secondary link. + params.dpLink = streamId; + } + else + { + params.dpLink = provider->getLinkIndex(); + } + + params.bEnableOverride = NV_TRUE; + params.bMST = NV_FALSE; + params.hBlankSym = hBlankSym; + params.vBlankSym = vBlankSym; + params.colorFormat = colorFormat; + + params.SST.bEnhancedFraming = bEnhancedFraming; + params.SST.tuSize = tuSize; + params.SST.waterMark = waterMark; + params.SST.bEnableAudioOverRightPanel = bAudioOverRightPanel; + + provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_STREAM, ¶ms, sizeof params); +} + +void EvoMainLink::configureSingleHeadMultiStreamMode(NvU32 displayIDs[], + NvU32 numStreams, + NvU32 mode, + bool bSetConfig, + NvU8 vbiosPrimaryDispIdIndex) +{ + NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + + for (NvU32 pipelineID = 0; pipelineID < numStreams; pipelineID++) + { + params.displayIDs[pipelineID] = displayIDs[pipelineID]; + } + params.mode = mode; + params.bSetConfig = bSetConfig; + params.numStreams = numStreams; + params.vbiosPrimaryDispIdIndex = vbiosPrimaryDispIdIndex; + + provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM, + ¶ms, + sizeof params); +} + +void EvoMainLink::configureMultiStream(NvU32 head, + NvU32 hBlankSym, + NvU32 vBlankSym, + NvU32 slotStart, + NvU32 slotEnd, + NvU32 PBN, + NvU32 Timeslice, + DP_COLORFORMAT colorFormat, + DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier, + DP_SINGLE_HEAD_MULTI_STREAM_MODE singleHeadMultistreamMode, + bool bAudioOverRightPanel, + bool bEnable2Head1Or) +{ + NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS params = {0}; + params.head = head; + params.subDeviceInstance = this->subdeviceIndex; + params.sorIndex = provider->getSorIndex(); + params.dpLink = provider->getLinkIndex(); + params.bEnableOverride = NV_TRUE; + params.bMST = NV_TRUE; + params.hBlankSym = hBlankSym; + params.vBlankSym = vBlankSym; + params.colorFormat = colorFormat; + params.bEnableTwoHeadOneOr = bEnable2Head1Or; + params.singleHeadMultistreamMode = singleHeadMultistreamMode; + + params.MST.slotStart = slotStart; + params.MST.slotEnd = slotEnd; + params.MST.PBN = PBN; + params.MST.Timeslice = Timeslice; + params.MST.singleHeadMSTPipeline = streamIdentifier; + params.MST.bEnableAudioOverRightPanel = bAudioOverRightPanel; + + provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_STREAM, ¶ms, sizeof params); +} + +void EvoMainLink::configureMsScratchRegisters(NvU32 address, + NvU32 hopCount, + NvU32 dpMsDevAddrState) +{ + NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + params.activeDevAddr = address; + params.sorIndex = provider->getSorIndex(); + params.dpLink = provider->getLinkIndex(); + params.hopCount = hopCount; + params.dpMsDevAddrState = dpMsDevAddrState; + + provider->rmControl0073(NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG, ¶ms, sizeof params); +} + +// +// EvoMainLink::setDpStereoMSAParameters does the DP library Stereo override for +// In-band signaling through the MSA MISC1 field and keeps the rest of the MSA +// params the same. +// +// On GK110 and later, when stereo is enabled, we send the stereo eye +// information to the sink device through the MSA MISC1 bits 2:1. Certain +// DP 1.2 non-compliant DP->VGA dongles cannot handle this information, and +// lose all signal when these bits are non-zero. This WAR uses a RM control +// to override those MSA bits to zero. It should be called whenever a DP->VGA +// dongle is in use. +// +bool EvoMainLink::setDpStereoMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) +{ + NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = msaparams.displayId; + //clubbing the MSA params passed by DD with Dp Library Stereo Override + params.bStereoPhaseInverse = msaparams.bStereoPhaseInverse; + params.featureValues.misc[1] = msaparams.featureValues.misc[1]; + + if (bStereoEnable) { + params.bEnableMSA = NV_TRUE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] = DRF_SHIFTMASK(NV_DP_MSA_PROPERTIES_MISC1_STEREO) | msaparams.featureMask.miscMask[1]; + } else { + params.bEnableMSA = NV_FALSE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] |= msaparams.featureMask.miscMask[1]; + } + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES, ¶ms, sizeof params); + + // + // NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES is only implemented on GK110 and + // later, but this WAR is unnecessary on other GPUs, so ignore + // ERROR_NOT_SUPPORTED. + // + // XXX This may fail if a future GPU requires this WAR but does not + // implement this rmcontrol. To avoid that, this class would need to be + // aware of which evo display HAL is in use. + // + if (ret != NVOS_STATUS_SUCCESS && ret != NVOS_STATUS_ERROR_NOT_SUPPORTED) { + DP_ASSERT(!"Enabling MSA stereo override failed!"); + return false; + } + + return true; +} + +// +// EvoMainLink::setDpMSAParameters clubs MSA parameters passed by DD for format YCbCr4:2:0 +// with DP library Stereo override for In-band signaling through the MSA MISC1 field. +// +// On GK110 and later, when stereo is enabled, we send the stereo eye +// information to the sink device through the MSA MISC1 bits 2:1. Certain +// DP 1.2 non-compliant DP->VGA dongles cannot handle this information, and +// lose all signal when these bits are non-zero. This WAR uses a RM control +// to override those MSA bits to zero. It should be called whenever a DP->VGA +// dongle is in use. +// +bool EvoMainLink::setDpMSAParameters(bool bStereoEnable, const NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS &msaparams) +{ + NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = msaparams.displayId; + //clubbing the MSA params passed by DD with Dp Library Stereo Override + params.bStereoPhaseInverse = msaparams.bStereoPhaseInverse; + params.bCacheMsaOverrideForNextModeset = true; + params.featureValues.misc[0] = msaparams.featureValues.misc[0]; + params.featureValues.misc[1] = msaparams.featureValues.misc[1]; + params.featureMask.miscMask[0] = msaparams.featureMask.miscMask[0]; + + params.featureValues.rasterTotalHorizontal = msaparams.featureValues.rasterTotalHorizontal; + params.featureValues.rasterTotalVertical = msaparams.featureValues.rasterTotalVertical; + params.featureValues.activeStartHorizontal = msaparams.featureValues.activeStartHorizontal; + params.featureValues.activeStartVertical = msaparams.featureValues.activeStartVertical; + params.featureValues.surfaceTotalHorizontal = msaparams.featureValues.surfaceTotalHorizontal; + params.featureValues.surfaceTotalVertical = msaparams.featureValues.surfaceTotalVertical; + params.featureValues.syncWidthHorizontal = msaparams.featureValues.syncWidthHorizontal; + params.featureValues.syncPolarityHorizontal = msaparams.featureValues.syncPolarityHorizontal; + params.featureValues.syncHeightVertical = msaparams.featureValues.syncHeightVertical; + params.featureValues.syncPolarityVertical = msaparams.featureValues.syncPolarityVertical; + + params.featureMask.bRasterTotalHorizontal = msaparams.featureMask.bRasterTotalHorizontal; + params.featureMask.bRasterTotalVertical = msaparams.featureMask.bRasterTotalVertical; + params.featureMask.bActiveStartHorizontal = msaparams.featureMask.bActiveStartHorizontal; + params.featureMask.bActiveStartVertical = msaparams.featureMask.bActiveStartVertical; + params.featureMask.bSurfaceTotalHorizontal = msaparams.featureMask.bSurfaceTotalHorizontal; + params.featureMask.bSurfaceTotalVertical = msaparams.featureMask.bSurfaceTotalVertical; + params.featureMask.bSyncWidthHorizontal = msaparams.featureMask.bSyncWidthHorizontal; + params.featureMask.bSyncPolarityHorizontal = msaparams.featureMask.bSyncPolarityHorizontal; + params.featureMask.bSyncHeightVertical = msaparams.featureMask.bSyncHeightVertical; + params.featureMask.bSyncPolarityVertical = msaparams.featureMask.bSyncPolarityVertical; + + params.featureValues.reserved[0] = msaparams.featureValues.reserved[0]; + params.featureValues.reserved[1] = msaparams.featureValues.reserved[1]; + params.featureValues.reserved[2] = msaparams.featureValues.reserved[2]; + + params.pFeatureDebugValues = msaparams.pFeatureDebugValues; + + if (bStereoEnable) { + params.bEnableMSA = NV_TRUE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] = DRF_SHIFTMASK(NV_DP_MSA_PROPERTIES_MISC1_STEREO) | msaparams.featureMask.miscMask[1]; + } else { + params.bEnableMSA = NV_FALSE | msaparams.bEnableMSA; + params.featureMask.miscMask[1] |= msaparams.featureMask.miscMask[1]; + } + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES, ¶ms, sizeof params); + + // + // NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES is only implemented on GK110 and + // later, but this WAR is unnecessary on other GPUs, so ignore + // ERROR_NOT_SUPPORTED. + // + // XXX This may fail if a future GPU requires this WAR but does not + // implement this rmcontrol. To avoid that, this class would need to be + // aware of which evo display HAL is in use. + // + if (ret != NVOS_STATUS_SUCCESS && ret != NVOS_STATUS_ERROR_NOT_SUPPORTED) { + DP_ASSERT(!"Enabling MSA stereo override failed!"); + return false; + } + + return true; +} + +bool EvoMainLink::setFlushMode() +{ + NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.bFireAndForget = NV_FALSE; + + params.base.subdeviceIndex = subdeviceIndex; + params.sorNumber = provider->getSorIndex(); + params.bEnable = NV_TRUE; + params.bForceRgDiv = NV_FALSE; + params.bImmediate = NV_FALSE; + params.headMask = 0; + + NvU32 ret = provider->rmControl5070(NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE, ¶ms, sizeof params); + + DP_ASSERT((ret == NVOS_STATUS_SUCCESS) && "Enabling flush mode failed!"); + + return ret == NVOS_STATUS_SUCCESS; +} + +void EvoMainLink::clearFlushMode(unsigned headMask, bool testMode) +{ + NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.bFireAndForget = NV_FALSE; + params.base.subdeviceIndex = subdeviceIndex; + params.sorNumber = provider->getSorIndex(); + params.bEnable = NV_FALSE; + params.bImmediate = NV_FALSE; + params.headMask = headMask; + params.bForceRgDiv = testMode; + + NvU32 ret = provider->rmControl5070(NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE, ¶ms, sizeof params); + if (ret != NVOS_STATUS_SUCCESS) + { + DP_LOG(("DP_EVO> Disabling flush mode failed!")); + } +} + + +bool EvoMainLink::physicalLayerSetTestPattern(PatternInfo * patternInfo) +{ + // Main parameter + NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS params; + + // To identify which test pattern to transmit. + NV0073_CTRL_DP_TESTPATTERN ctrlPattern; + + dpMemZero(¶ms, sizeof(params)); + dpMemZero(&ctrlPattern, sizeof(ctrlPattern)); + + switch (patternInfo->lqsPattern) + { + case LINK_QUAL_DISABLED: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_NONE; break; + case LINK_QUAL_D10_2: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_D10_2; break; + case LINK_QUAL_SYM_ERROR: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_SERMP; break; + case LINK_QUAL_PRBS7: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_PRBS_7; break; + case LINK_QUAL_CP2520PAT3: ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_CP2520PAT3; break; + case LINK_QUAL_80BIT_CUST: + { + ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_CSTM; + + params.cstm.lower = patternInfo->ctsmLower; + params.cstm.middle = patternInfo->ctsmMiddle; + params.cstm.upper = patternInfo->ctsmUpper; + break; + } +#ifdef NV0073_CTRL_DP_TESTPATTERN_DATA_HBR2COMPLIANCE + case LINK_QUAL_HBR2_COMPLIANCE_EYE: + { + ctrlPattern.testPattern = NV0073_CTRL_DP_TESTPATTERN_DATA_HBR2COMPLIANCE; + params.cstm.lower = 0; + params.cstm.middle = 0; + params.cstm.upper = 0; + break; + } +#endif + default: + DP_ASSERT(0 && "Unknown Phy Pattern"); + return false; + } + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.testPattern = ctrlPattern; + + // + // Set the appropriate laneMask based on the current lane count. The laneMask is used for GF119+ chips + // only so it doesn't matter if we populate it for all chips. It is set to all lanes since + // setting the test pattern on a lane that is off is effectively a nop. + // The laneMask allows for setting the pattern on specific lanes to check for cross-talk, which is the + // phenomenon of observing the signal crossing over to a different lane where it's not set. + // + params.laneMask = 0xf; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TESTPATTERN, ¶ms, sizeof(params)); + + return code == NVOS_STATUS_SUCCESS; +} + +AuxBus::status EvoAuxBus::transaction(Action action, Type type, int address, + NvU8 * buffer, unsigned sizeRequested, + unsigned * sizeCompleted, + unsigned *pNakReason, + NvU8 offset, NvU8 nWriteTransactions) +{ + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params; + + DP_ASSERT(sizeRequested <= NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE); + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + params.cmd = 0; + + if (type == native) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX); + else + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _I2C); + + if (type == i2cMot) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_I2C_MOT, _TRUE); + else + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_I2C_MOT, _FALSE); + + if (action == read) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _READ); + else if (action == write) + { + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE); + dpMemCopy(params.data, buffer, sizeRequested); + } + else if (action == writeStatusUpdateRequest) + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE_STATUS); + else + DP_ASSERT(0 && "Unknown action"); + + params.addr = address; + + // + // By definition, an I2C-write-over-AUX request with + // zero bytes of data is an "address-only" transaction. + // + if ((sizeRequested == 0) && (type & (i2cMot | i2c)) && (action == write)) + { + DP_LOG(("DP> Client requested address-only transaction")); + params.bAddrOnly = NV_TRUE; + } + else if ((sizeRequested == 0) && (type == native)) + { + // Native aux transactions with size requested zero are not allowed. + DP_ASSERT(0 && "Native Aux transactions shouldn't have zero size requested"); + return nack; + } + + // Control call is taking size as 0-based. + if (sizeRequested == 0) + { + // + // I2c transactions with size requested zero. Decrementing by 1 will + // lead to 0xffffff(RM_INVALID_DATA). So keep size as zero only. + // + params.size = 0; + } + else + { + params.size = sizeRequested - 1; + } + + NvU32 code = 0; + NvU8 retries = 0; + do + { + retries++; + params.retryTimeMs = 0; + code = provider->rmControl0073(NV0073_CTRL_CMD_DP_AUXCH_CTRL, ¶ms, sizeof(params)); + // eDP is not fully powered up yet. Should not access the panel too early. + if (params.retryTimeMs > 0) + { + timer->sleep(params.retryTimeMs); + } + } while (NVOS_STATUS_SUCCESS != code && params.retryTimeMs && retries < 3); + + if (pNakReason != NULL) + { + *pNakReason = params.replyType; + } + + if (action == writeStatusUpdateRequest && code == NVOS_STATUS_ERROR_NOT_SUPPORTED) + { + // + // On some chips write status requests are generated implicitly by the + // hardware. So while the RmControl() will fail with a "not supported" + // error, the request still went out on the DPAUX channel as part of + // the last IC-over-AUX write transaction. So the error should be ignored. + // + DP_LOG(("DP> %s: Ignore ERROR_NOT_SUPPORTED for writeStatusUpdateRequest. Returning Success", __FUNCTION__)); + return AuxBus::success; + } + + // In case of Timeout we need to retry again for minimum no. of times + if (code != NVOS_STATUS_SUCCESS && code != NVOS_STATUS_ERROR_TIMEOUT) + { + if (devicePlugged) + { + DP_LOG(("DP> AuxChCtl Failing, if a device is connected you shouldn't be seeing this")); + } + return nack; + } + else if (code == NVOS_STATUS_ERROR_TIMEOUT) + { + return AuxBus::defer; + } + + *sizeCompleted = params.size; + + // Reset sizeCompleted if transaction failed. + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER) + *sizeCompleted = 0; + + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_ACK) + { + // if it was read operation copy read data to buffer + if (action == read) + { + // Check the size of data to be copied. Should not be + // more than available buffer + if (params.size > sizeRequested) + { + params.size = sizeRequested; + } + dpMemCopy(buffer, params.data, params.size); + } + + return AuxBus::success; + } + + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_NACK || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CNACK || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_TIMEOUT) + return AuxBus::nack; + + if (params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER || + params.replyType == NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER) + return AuxBus::defer; + + DP_ASSERT(0 && "Unknown reply type"); + return AuxBus::nack; +} + +unsigned EvoAuxBus::transactionSize() +{ + return NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE; +} + +void EvoAuxBus::setDevicePlugged(bool plugged) +{ + devicePlugged = plugged; +} + +void EvoMainLink::preLinkTraining(NvU32 head) +{ + provider->preLinkTraining(head); +} + +void EvoMainLink::postLinkTraining(NvU32 head) +{ + provider->postLinkTraining(head); +} + +void EvoMainLink::initializeRegkeyDatabase() +{ + NvU32 i; + if (dpRegkeyDatabase.bInitialized) + return; + for (i = 0; i < sizeof(DP_REGKEY_TABLE)/sizeof(DP_REGKEY_TABLE[0]); i++) + { + NvU32 tempValue = 0; + tempValue = provider->getRegkeyValue(DP_REGKEY_TABLE[i].pName); + switch (DP_REGKEY_TABLE[i].valueType) + { + case DP_REG_VAL_U32: + *(NvU32*)(DP_REGKEY_TABLE[i].pValue) = tempValue; + break; + case DP_REG_VAL_U16: + *(NvU16*)(DP_REGKEY_TABLE[i].pValue) = tempValue & 0xFFFF; + break; + case DP_REG_VAL_U8: + *(NvU8*)(DP_REGKEY_TABLE[i].pValue) = tempValue & 0xFF; + break; + case DP_REG_VAL_BOOL: + *(bool*)(DP_REGKEY_TABLE[i].pValue) = !!tempValue; + break; + } + } + dpRegkeyDatabase.bInitialized = true; +} + +void EvoMainLink::applyRegkeyOverrides() +{ + if (!dpRegkeyDatabase.bInitialized) + { + DP_ASSERT(0 && "dpRegkeyDatabase is not initialized before calling applyRegkeyOverrides."); + this->initializeRegkeyDatabase(); + } + _isMstDisabledByRegkey = dpRegkeyDatabase.bMstDisabled; + _isDscDisabledByRegkey = dpRegkeyDatabase.bDscDisabled; + _skipPowerdownEDPPanelWhenHeadDetach = dpRegkeyDatabase.bPoweroffEdpInHeadDetachSkipped; + _applyLinkBwOverrideWarRegVal = dpRegkeyDatabase.bLinkBwOverrideWarApplied; + _enableMSAOverrideOverMST = dpRegkeyDatabase.bMsaOverMstEnabled; +} + +NvU32 EvoMainLink::getRegkeyValue(const char *key) +{ + NvU32 i; + if (!dpRegkeyDatabase.bInitialized) + { + DP_ASSERT(0 && "dpRegkeyDatabase is not initialized before calling getRegkeyValue."); + initializeRegkeyDatabase(); + } + if (key == NULL || key[0] == '\0') + return 0; + + for (i = 0; i < sizeof(DP_REGKEY_TABLE)/sizeof(DP_REGKEY_TABLE[0]); i++) + { + NvU32 j = 0; + bool strSame = true; + while (key[j] != '\0' && DP_REGKEY_TABLE[i].pName[j] != '\0') + { + if (key[j] != DP_REGKEY_TABLE[i].pName[j]) + { + strSame = false; + break; + } + ++j; + } + if (strSame && key[j] == '\0' && DP_REGKEY_TABLE[i].pName[j] == '\0') + { + switch (DP_REGKEY_TABLE[i].valueType) + { + case DP_REG_VAL_U32: + return *(NvU32*)(DP_REGKEY_TABLE[i].pValue); + case DP_REG_VAL_U16: + return (NvU32)*(NvU16*)(DP_REGKEY_TABLE[i].pValue); + case DP_REG_VAL_U8: + return (NvU32)*(NvU8*)(DP_REGKEY_TABLE[i].pValue); + case DP_REG_VAL_BOOL: + return (NvU32)*(bool*)(DP_REGKEY_TABLE[i].pValue); + } + } + } + DP_ASSERT(0 && "Requested regkey not found in dpRegkeyDatabase."); + return 0; +} + +const DP_REGKEY_DATABASE& EvoMainLink::getRegkeyDatabase() +{ + return dpRegkeyDatabase; +} + +NvU32 EvoMainLink::getSorIndex() +{ + return provider->getSorIndex(); +} + +bool EvoMainLink::isInbandStereoSignalingSupported() +{ + return provider->isInbandStereoSignalingSupported(); +} + +bool EvoMainLink::train(const LinkConfiguration & link, bool force, + LinkTrainingType linkTrainingType, + LinkConfiguration *retLink, bool bSkipLt, + bool isPostLtAdjRequestGranted, unsigned phyRepeaterCount) +{ + NvU32 targetIndex; + NvU32 ltCounter = retLink->getLTCounter(); + bool bTrainPhyRepeater = + (!link.bDisableLTTPR) && (_isLTPhyRepeaterSupported); + + if (provider->getSorIndex() == DP_INVALID_SOR_INDEX) + { + // bail out and Skip LT since SOR is not allocated for this displayID + return false; + } + NvU32 err = 0; + + NvU32 dpCtrlCmd = DRF_DEF(0073_CTRL, _DP_CMD, _SET_LANE_COUNT, _TRUE) | + DRF_DEF(0073_CTRL, _DP_CMD, _SET_LINK_BW, _TRUE); + + if (link.multistream) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_FORMAT_MODE, _MULTI_STREAM ); + + if(link.bEnableFEC) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _ENABLE_FEC, _TRUE); + + if (isPostLtAdjRequestGranted) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _POST_LT_ADJ_REQ_GRANTED, _YES ); + + if (link.enhancedFraming) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_ENHANCED_FRAMING, _TRUE ); + if (bSkipLt) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SKIP_HW_PROGRAMMING, _YES ); + if (force) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAKE_LINK_TRAINING, _DONOT_TOGGLE_TRANSMISSION ); + + if (linkTrainingType == NO_LINK_TRAINING) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _NO_LINK_TRAINING, _YES ); + else if (linkTrainingType == FAST_LINK_TRAINING) + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAST_LINK_TRAINING, _YES ); + + targetIndex = NV0073_CTRL_DP_DATA_TARGET_SINK; + if (bTrainPhyRepeater && (_rmPhyRepeaterCount != phyRepeaterCount)) + { + // If LTTPR count is out of sync between DPLib and RM, do not link train LTTPRs. + bTrainPhyRepeater = false; + } + + if (bTrainPhyRepeater) + { + + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _TRAIN_PHY_REPEATER, _YES ); + // + // Start from the one closest to GPU. Note this is 1-based index. + // + targetIndex = phyRepeaterCount; + } + + NV_DPTRACE_INFO(LINK_TRAINING_START, link.multistream, link.peakRate, link.lanes, + phyRepeaterCount, _rmPhyRepeaterCount, bTrainPhyRepeater, targetIndex); + + NvU32 status = 0; + NvU8 retries = 0; + bool fallback = false; + + // + // Limited attempts to unblock infinite LT loop while CR failure restores + // high rate and lanes for EQ failure + // + NvU32 crHighRateFallbackCount = 0; + + // + // The rate and lane count we send to RM might be different than what client + // sent to us since fallback might happen. + // + LinkConfiguration requestRmLC = link; + do + { + NvU32 dpCtrlData = 0; + NvU64 linkrate = requestRmLC.peakRate; + NvU64 linkBw = 0; + + switch (linkrate) + { + case RBR: + case EDP_2_16GHZ: + case EDP_2_43GHZ: + case HBR: + case EDP_3_24GHZ: + case EDP_4_32GHZ: + case HBR2: + case HBR3: + linkBw = linkrate / DP_LINK_BW_FREQ_MULTI_MBPS; + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LINK_BW, + linkBw, dpCtrlData); + break; + default: + if (requestRmLC.lanes != 0) + { + DP_ASSERT(0 && "Unknown rate"); + return false; + } + break; + } + + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LANE_COUNT, + requestRmLC.lanes, dpCtrlData); + + if (requestRmLC.lanes == 0) + { + // Only need to target sink when powering down the link. + targetIndex = NV0073_CTRL_DP_DATA_TARGET_SINK; + } + + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _TARGET, + targetIndex, dpCtrlData); + + // Properly wait eDP to power up before link training. + status = 0; + retries = 0; + fallback = false; + dpCtrlCmd = FLD_SET_DRF(0073_CTRL, _DP_CMD, _FALLBACK_CONFIG, _FALSE, dpCtrlCmd); + do + { + NV0073_CTRL_DP_CTRL_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.cmd = dpCtrlCmd; + params.data = dpCtrlData; + + retries++; + params.retryTimeMs = 0; + status = provider->rmControl0073(NV0073_CTRL_CMD_DP_CTRL, ¶ms, sizeof(params)); + ltCounter++; + err = params.err; + + if (params.retryTimeMs > 0) + { + timer->sleep(params.retryTimeMs); + } + + if (status == NVOS_STATUS_SUCCESS || bSkipLt) + { + // if LT failed when bSkipLt was marked, no point in attempting LT again. + break; + } + + if (!params.retryTimeMs || retries >= 3) + { + break; + } + + } while (true); + + if (NVOS_STATUS_SUCCESS == status) + { + if (targetIndex != NV0073_CTRL_DP_DATA_TARGET_SINK) + { + targetIndex -= 1; + continue; + } + else + { + // all done, leave the loop. + break; + } + } + + if (requestRmLC.policy.skipFallback() || bSkipLt) + { + // + // if LT failed when bSkipLT was marked, no point in falling back as the issue + // is not with LinkConfig. + // + break; + } + + if (FLD_TEST_DRF(0073_CTRL_DP, _CMD, _TRAIN_PHY_REPEATER, _YES, dpCtrlCmd) && + FLD_TEST_DRF(0073_CTRL_DP, _ERR, _INVALID_PARAMETER, _ERR, err) && + FLD_TEST_DRF(0073_CTRL_DP, _ERR, _TRAIN_PHY_REPEATER, _ERR, err)) + { + // + // RM has less LTTPR than DPLib expected. + // - Force to do transparent mode. + // + targetIndex = NV0073_CTRL_DP_DATA_TARGET_SINK; + dpCtrlCmd = FLD_SET_DRF(0073_CTRL, _DP_CMD, _TRAIN_PHY_REPEATER, + _NO, dpCtrlCmd); + continue; + } + + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FALLBACK_CONFIG, _TRUE); + + if (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _CLOCK_RECOVERY, _ERR, err)) + { + // If failed CR, check if we need to fallback. + if (requestRmLC.peakRate != RBR) + { + // + // We need to fallback on link rate if the following conditions are met: + // 1. CR or EQ phase failed. + // 2. The request link bandwidth is NOT RBR + // + fallback = true; + requestRmLC.lowerConfig(); + } + else + { + // Already RBR + // Check how many lanes is done. + requestRmLC.lanes = DRF_VAL(0073_CTRL_DP, _ERR, _CR_DONE_LANE, err); + + while (!IS_VALID_LANECOUNT(requestRmLC.lanes)) + { + requestRmLC.lanes--; + } + + if (requestRmLC.lanes == 0) + { + // This is to WAR some system that doesn't set CR_DONE or EQ_DONE at all. + // In this case, we just simply try half of lanes. + requestRmLC.lanes = DRF_VAL(0073_CTRL, _DP_DATA, _SET_LANE_COUNT, dpCtrlData) / 2; + if (requestRmLC.lanes == 0) + { + // Nothing to try. Bail out. + break; + } + } + // Set back to original desired rate. + requestRmLC.peakRate = link.peakRate; + fallback = true; + crHighRateFallbackCount++; + } + } + if (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _CHANNEL_EQUALIZATION, _ERR, err)) + { + // + // If Channel equalization fails, we need to use the fallback policy + // of reducing the lane count vs link rate, but in the special case + // when all lanes have failed CR, we resort to lowering link rate instead + // (this address the new Fallback SCR v2.0) + // + if (FLD_TEST_DRF(0073_CTRL_DP, _ERR, _CR_DONE_LANE, _0_LANE, err)) + { + //Per spec, if link rate has already been reduced to RBR, exit fallback + if(requestRmLC.peakRate == RBR || !requestRmLC.lowerConfig()) + break; + } + else + { + if(!requestRmLC.lowerConfig(true)) // bReduceLaneCnt = true + break; + } + fallback = true; + } + if (fallback == false) + { + // Nothing to fallback, give up. + break; + } + if ((phyRepeaterCount > 0) && (bTrainPhyRepeater)) + { + // If fallback, need to start from beginning. + targetIndex = phyRepeaterCount; + } + } while (crHighRateFallbackCount < NV_DP_RBR_FALLBACK_MAX_TRIES); + + // + // Result should be checked for only the control call status. 'err' + // doesn't represent failure in LT - some compliance tests such as 700.1.1.2 + // intentionally test against unexpected sink caps + // + bool result = (status == NVOS_STATUS_SUCCESS); + retLink->setLaneRate(requestRmLC.peakRate, result ? requestRmLC.lanes : 0); + retLink->setLTCounter(ltCounter); + + NV_DPTRACE_INFO(LINK_TRAINING_DONE, status, requestRmLC.peakRate, requestRmLC.lanes); + + return result; +} + +bool EvoMainLink::retrieveRingBuffer(NvU8 dpRingBuffertype, NvU32 numRecords) +{ + return false; +} + +// Return the current mux state. Returns false if device is not mux capable +bool EvoMainLink::getDynamicMuxState(NvU32 *muxState) +{ + bool bIsMuxCapable = false; + NvU32 ret = 0; + NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS muxStatusParams; + + if (!muxState) + return false; + + *muxState = 0; + + if (!isDynamicMuxCapable()) + return false; + + dpMemZero(&muxStatusParams, sizeof(muxStatusParams)); + muxStatusParams.subDeviceInstance = subdeviceIndex; + muxStatusParams.displayId = displayId; + muxStatusParams.muxStatus = 0; + + ret = provider->rmControl0073(NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS, + &muxStatusParams, sizeof(muxStatusParams)); + if (ret == NV_OK && + DRF_VAL(0073, _CTRL_DFP_DISP_MUX, _STATE, muxStatusParams.muxStatus) != NV0073_CTRL_DFP_DISP_MUX_STATE_INVALID) + { + bIsMuxCapable = true; + *muxState = muxStatusParams.muxStatus; + } + + return bIsMuxCapable; +} + +bool EvoMainLink::aquireSema() +{ + NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.owner = NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RM; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_AUXCH_SET_SEMA, ¶ms, sizeof(params)); + + return code == NVOS_STATUS_SUCCESS; +} + +void EvoMainLink::releaseSema() +{ + NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.owner = NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RELEASE; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_AUXCH_SET_SEMA, ¶ms, sizeof(params)); + + DP_USED(code); + DP_ASSERT(code == NVOS_STATUS_SUCCESS); +} + +void EvoMainLink::configurePowerState(bool bPowerUp) +{ + NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS params; + + dpMemZero(¶ms, sizeof(params)); + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.ctrl = bPowerUp ? NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERUP : + NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERDOWN; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_MAIN_LINK_CTRL, ¶ms, sizeof(params)); + + DP_ASSERT(code == NVOS_STATUS_SUCCESS); +} + +void EvoMainLink::getLinkConfig(unsigned &laneCount, NvU64 & linkRate) +{ + NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_LINK_CONFIG, ¶ms, sizeof(params)); + + if (code == NVOS_STATUS_SUCCESS) + { + laneCount = params.laneCount; + linkRate = (NvU64)27000000 * params.linkBW; // BUG: Beware, turbo mode need to be taken into account + } + else + { + laneCount = 0; + linkRate = 0; + } +} + +bool EvoMainLink::getMaxLinkConfigFromUefi(NvU8 &linkRate, NvU8 &laneCount) +{ + if (provider->getMaxLinkConfigFromUefi(linkRate, laneCount)) + { + if (IS_VALID_LANECOUNT(laneCount) && IS_VALID_LINKBW(linkRate)) + { + return true; + } + } + return false; +} + +bool EvoMainLink::queryAndUpdateDfpParams() +{ + NV0073_CTRL_DFP_GET_INFO_PARAMS dfpParams; + NvU32 dfpFlags; + dpMemZero(&dfpParams, sizeof(dfpParams)); + dfpParams.subDeviceInstance = subdeviceIndex; + dfpParams.displayId = displayId; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_DFP_GET_INFO, &dfpParams, sizeof(dfpParams)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to query DFP params."); + return false; + } + + dfpFlags = dfpParams.flags; + _isEDP = DRF_VAL(0073, _CTRL_DFP_FLAGS, _EMBEDDED_DISPLAYPORT, dfpFlags) == + NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE; + + if (_isLTPhyRepeaterSupported) + { + _rmPhyRepeaterCount = DRF_VAL(0073_CTRL_DFP, _FLAGS, + _DP_PHY_REPEATER_COUNT, dfpFlags); + } + + _needForceRmEdid = DRF_VAL(0073, _CTRL_DFP_FLAGS, _DP_FORCE_RM_EDID ,dfpFlags) == + NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE; + + _isPC2Disabled = DRF_VAL(0073, _CTRL_DFP_FLAGS, _DP_POST_CURSOR2_DISABLED, dfpFlags) == + NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE; + + + switch(DRF_VAL(0073, _CTRL_DFP_FLAGS, _DP_LINK_BW, dfpFlags)) + { + default: + DP_ASSERT(0 && "maxLinkRate is set improperly in dfp object."); + // intentionally fall-thru. + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS: + _maxLinkRateSupportedDfp = RBR; + break; + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS: + _maxLinkRateSupportedDfp = HBR; + break; + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS: + _maxLinkRateSupportedDfp = HBR2; + break; + case NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS: + _maxLinkRateSupportedDfp = HBR3; + break; + } + + _isDynamicMuxCapable = FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _DYNAMIC_MUX_CAPABLE, _TRUE, dfpFlags); + + return true; +} + +bool EvoMainLink::fetchEdidByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) +{ + NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *pEdidParams; + pEdidParams = (NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS*) dpMalloc(sizeof(*pEdidParams)); + + if (pEdidParams == NULL) { + return false; + } + + dpMemZero(pEdidParams, sizeof(*pEdidParams)); + pEdidParams->subDeviceInstance = subdeviceIndex; + pEdidParams->displayId = displayId; + pEdidParams->flags = 0; // use default settings. + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, pEdidParams, sizeof(*pEdidParams)); + + if (code == NVOS_STATUS_SUCCESS) + { + // Silently dropping part of a too-large output buffer matches the + // behavior of the "V1" of this control. + // But it may make sense to revisit this behavior now that it's under + // control of this client. + NvU32 copySize = NV_MIN(pEdidParams->bufferSize, bufferSize); + dpMemCopy(edidBuffer, pEdidParams->edidBuffer, copySize); + } else { + DP_ASSERT(0 && "Unable to read EDID."); + } + + dpFree(pEdidParams); + return code == NVOS_STATUS_SUCCESS; +} + +bool EvoMainLink::applyEdidOverrideByRmCtrl(NvU8* edidBuffer, NvU32 bufferSize) +{ + NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *pEdidOverrideParams = + (NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *) + dpMalloc(sizeof(*pEdidOverrideParams)); + + if (pEdidOverrideParams == NULL) { + return false; + } + + dpMemZero(pEdidOverrideParams, sizeof(*pEdidOverrideParams)); + pEdidOverrideParams->subDeviceInstance = subdeviceIndex; + pEdidOverrideParams->displayId = displayId; + if (bufferSize > sizeof(pEdidOverrideParams->edidBuffer)) { + DP_ASSERT(0 && "EDID override too large for edidBuffer"); + dpFree(pEdidOverrideParams); + return false; + } + pEdidOverrideParams->bufferSize = bufferSize; + dpMemCopy(&pEdidOverrideParams->edidBuffer, edidBuffer, bufferSize); + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_SPECIFIC_APPLY_EDID_OVERRIDE_V2, + pEdidOverrideParams, + sizeof(*pEdidOverrideParams)); + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "Unable to apply EDID override."); + dpFree(pEdidOverrideParams); + return false; + } + + DP_ASSERT(pEdidOverrideParams->bufferSize == bufferSize); + dpMemCopy(edidBuffer, &pEdidOverrideParams->edidBuffer, bufferSize); + + dpFree(pEdidOverrideParams); + + return true; + +} + +bool EvoMainLink::isEDP() +{ + return _isEDP; +} + +bool EvoMainLink::supportMSAOverMST() +{ + return _enableMSAOverrideOverMST; +} + +bool EvoMainLink::skipPowerdownEdpPanelWhenHeadDetach() +{ + return _skipPowerdownEDPPanelWhenHeadDetach; +} + + +bool EvoMainLink::isActive() +{ + NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS params; + + for (int i = 0; i < 32; i++) + { + // + // Skip floorswept heads + // + if (!(allHeadMask & (1 << i))) + { + continue; + } + + dpMemZero(¶ms, sizeof params); + params.subDeviceInstance = 0; + params.head = i; + + NvU32 code = provider->rmControl0073(NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, ¶ms, sizeof(params)); + + if (code != NVOS_STATUS_SUCCESS) + { + DP_ASSERT(0 && "We can't get active displays, RM bug!"); + } + else if (params.displayId & displayId) + { + return true; + } + } + + return false; +} + +bool EvoMainLink::controlRateGoverning(NvU32 head, bool enable, bool updateNow) +{ + NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS params = {0}; + params.subDeviceInstance = this->subdeviceIndex; + params.head = head; + params.sorIndex = provider->getSorIndex(); + + if (enable) + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _ENABLE_RG, _ON); + } + else + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _ENABLE_RG, _OFF); + } + if (updateNow) + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _TRIGGER_MODE, _IMMEDIATE); + } + else + { + params.flags |= DRF_DEF(0073_CTRL, _CMD_DP_SET_RATE_GOV_FLAGS, _TRIGGER_MODE, _LOADV); + } + + provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_RATE_GOV, ¶ms, sizeof params); + + return true; +} + +bool EvoMainLink::getDpTestPattern(NV0073_CTRL_DP_TESTPATTERN * testPattern) +{ + NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_TESTPATTERN, ¶ms, sizeof params))) + { + testPattern->testPattern = params.testPattern.testPattern; + return true; + } + else + return false; +} + +bool EvoMainLink::setDpTestPattern(NV0073_CTRL_DP_TESTPATTERN testPattern, NvU8 laneMask, NV0073_CTRL_DP_CSTM cstm, NvBool bIsHBR2, NvBool bSkipLaneDataOverride) +{ + NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + params.testPattern = testPattern; + params.laneMask = laneMask; + params.cstm = cstm; + params.bIsHBR2 = bIsHBR2; + params.bSkipLaneDataOverride = bSkipLaneDataOverride; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TESTPATTERN, ¶ms, sizeof params))) + return true; + else + return false; +} + +bool EvoMainLink::getDpLaneData(NvU32 *numLanes, NvU32 *data) +{ + NV0073_CTRL_DP_LANE_DATA_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_GET_LANE_DATA, ¶ms, sizeof params))) + { + *numLanes = params.numLanes; + dpMemCopy(data, params.data, NV0073_CTRL_MAX_LANES*4); + return true; + } + else + return false; +} + +bool EvoMainLink::setDpLaneData(NvU32 numLanes, NvU32 *data) +{ + NV0073_CTRL_DP_LANE_DATA_PARAMS params = {0}; + + params.subDeviceInstance = this->subdeviceIndex; + params.displayId = this->displayId; + params.numLanes = numLanes; + dpMemCopy(params.data, data, NV0073_CTRL_MAX_LANES*4); + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_LANE_DATA, ¶ms, sizeof params))) + return true; + else + return false; +} + +NvU32 EvoMainLink::monitorDenylistInfo(NvU32 ManufacturerID, NvU32 ProductID, DpMonitorDenylistData *pDenylistData) +{ + return provider->monitorDenylistInfo(ManufacturerID, ProductID, pDenylistData); +} +bool EvoMainLink::rmUpdateDynamicDfpCache(NvU32 headIndex, RmDfpCache* dfpCache, NvBool bResetDfp) +{ + NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS params = {0}; + params.headIndex = headIndex; + params.bcaps = dfpCache->bcaps; + for (unsigned i=0; i<5; i++) + params.bksv[i] = dfpCache->bksv[i]; + + params.bHdcpCapable = dfpCache->hdcpCapable; + params.subDeviceInstance = subdeviceIndex; + params.updateMask = dfpCache->updMask; + if (bResetDfp) + params.bResetDfp = NV_TRUE; + + if (!(provider->rmControl0073(NV0073_CTRL_CMD_DFP_UPDATE_DYNAMIC_DFP_CACHE, ¶ms, sizeof params))) + return true; + else + return false; +} + +NvU32 EvoMainLink::allocDisplayId() +{ + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS params = {0}; + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID, ¶ms, sizeof(params)); + if (ret == NVOS_STATUS_SUCCESS) + { + return params.displayIdAssigned; + } + + return 0; +} + +bool EvoMainLink::freeDisplayId(NvU32 displayId) +{ + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS params = {0}; + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + NvU32 ret = provider->rmControl0073(NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, ¶ms, sizeof(params)); + return ret == NVOS_STATUS_SUCCESS; +} + +void EvoMainLink::configureTriggerSelect(NvU32 head, DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID streamIdentifier) +{ + NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS params = {0}; + params.head = head; + params.subDeviceInstance = subdeviceIndex; + params.sorIndex = provider->getSorIndex(); + params.singleHeadMSTPipeline = streamIdentifier; + provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT, ¶ms, sizeof params); +} + +void EvoMainLink::configureTriggerAll(NvU32 head, bool enable) +{ + NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS params = {0}; + params.head = head; + params.subDeviceInstance = subdeviceIndex; + params.enable = enable; + provider->rmControl0073(NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL, ¶ms, sizeof params); +} + +MainLink * DisplayPort::MakeEvoMainLink(EvoInterface * provider, Timer * timer) +{ + return new EvoMainLink(provider, timer); +} + +AuxBus * DisplayPort::MakeEvoAuxBus(EvoInterface * provider, Timer * timer) +{ + return new EvoAuxBus(provider, timer); +} + +bool EvoMainLink::dscCrcTransaction(NvBool bEnable, gpuDscCrc *data, NvU16 *headIndex) +{ + NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS params; + NvU32 code; + + dpMemZero(¶ms, sizeof(params)); + params.bEnable = bEnable ? NV_TRUE : NV_FALSE; + params.subDeviceInstance = subdeviceIndex; + params.headIndex = *headIndex; + + // see if setup or querying needs to be specified + if (data == NULL) + { + params.cmd = DRF_DEF(0073_CTRL, _DP_CRC_CONTROL, _CMD, _SETUP); + } + else + { + params.cmd = DRF_DEF(0073_CTRL, _DP_CRC_CONTROL, _CMD, _QUERY); + } + + // GPU part of the call + code = provider->rmControl0073(NV0073_CTRL_CMD_DFP_DSC_CRC_CONTROL, ¶ms, sizeof(params)); + if (code != NVOS_STATUS_SUCCESS) + { + DP_LOG(("DP> Crc control failed.")); + return false; + } + + // if the command is setup, return immediately + if (data != NULL) + { + data->gpuCrc0 = params.gpuCrc0; + data->gpuCrc1 = params.gpuCrc1; + data->gpuCrc2 = params.gpuCrc2; + } + + return true; +} + +// +// @brief This is to request RM to setup/reset link rate table, and save valid +// link rates for use. +// +// @param pLinkRateTable Pointer to link rate table to configure +// @param pLinkRates Pointer to LinkRates to keep valid link rates +// @return +// true Link rate table configured with at least one valid link rate +// false Otherwise +// +bool EvoMainLink::configureLinkRateTable +( + const NvU16 *pLinkRateTable, + LinkRates *pLinkRates +) +{ + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + + // Setup provided link rate table, otherwise it will be reset + if (pLinkRateTable) + { + for (NvU32 i = 0; i < NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES; i++) + { + params.linkRateTbl[i] = pLinkRateTable[i]; + } + } + + NvU32 code = provider->rmControl0073( + NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, + ¶ms, sizeof(params)); + + if ((pLinkRates != NULL ) && (code == NVOS_STATUS_SUCCESS) && + (params.linkBwCount <= NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES)) + { + pLinkRates->clear(); + for (int i = 0; i < params.linkBwCount; i++) + { + switch (params.linkBwTbl[i]) + { + case linkBW_1_62Gbps: + case linkBW_2_16Gbps: + case linkBW_2_43Gbps: + case linkBW_2_70Gbps: + case linkBW_3_24Gbps: + case linkBW_4_32Gbps: + case linkBW_5_40Gbps: + case linkBW_8_10Gbps: + pLinkRates->import(params.linkBwTbl[i]); + break; + + default: + DP_LOG(("DP_EVO> %s: Unsupported link rate received", + __FUNCTION__)); + DP_ASSERT(0); + break; + } + } + return true; + } + return false; +} + +// +// @brief This is to request RM to enable/disable Fec +// +// @param enableFec Indicates if enable/disable is requested +// @return +// true If FEC was configured successfully +// false Otherwise +// +bool EvoMainLink::configureFec +( + const bool bEnableFec +) +{ + NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS params; + dpMemZero(¶ms, sizeof(params)); + + params.subDeviceInstance = subdeviceIndex; + params.displayId = displayId; + params.bEnableFec = bEnableFec; + + NvU32 code = provider->rmControl0073( + NV0073_CTRL_CMD_DP_CONFIGURE_FEC, + ¶ms, sizeof(params)); + + if (code == NVOS_STATUS_SUCCESS) + { + return true; + } + + return false; +} diff --git a/src/common/displayport/src/dp_groupimpl.cpp b/src/common/displayport/src/dp_groupimpl.cpp new file mode 100644 index 000000000..80a182d19 --- /dev/null +++ b/src/common/displayport/src/dp_groupimpl.cpp @@ -0,0 +1,331 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_groupimpl.cpp * +* DP device group implementation * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_connector.h" +#include "dp_list.h" +#include "dp_auxdefs.h" +#include "dp_deviceimpl.h" +#include "dp_groupimpl.h" +#include "dp_connectorimpl.h" + +using namespace DisplayPort; + +void GroupImpl::update(Device * dev, bool allocationState) +{ + Address::StringBuffer sb; + Address devAddress = dev->getTopologyAddress(); + DP_USED(sb); + + // Do not map a stream that is not yet turned on in the gpu. An update shall be sent later during NAE. + if (allocationState && !this->isHeadAttached()) + return; + + // + // Do not enable the stream on an unplugged device but take care of + // detached devices. We need to clear PBNs allocated by such devices + // + if (allocationState && !((DeviceImpl *)dev)->plugged) + return; + + // + // Check if Parent's messageManager exist or not. This is required for cases + // where parent branch itself has been unplugged. No message can be sent in this case. + // + if (!parent->messageManager) + return; + + if (timeslot.count == 0 || + ((DeviceImpl *)dev)->payloadAllocated == allocationState) + return; + + if (!dev->getParent() || !((dev->getParent())->isPlugged())) + { + DeviceImpl * parentDev = NULL; + + // + // Send ALLOCATE_PAYLOAD with pbn 0 to parent port of previous branch + // Find first plugged parent branch & send message to it + // + while(devAddress.size() > 2) + { + devAddress.pop(); + parentDev = parent->findDeviceInList(devAddress.parent()); + + if (parentDev && parentDev->isPlugged()) + break; + } + + // If no parent found simply return as we don't have a valid address to send message + if (!parentDev) + return; + } + + NakData nakData; + for (int retries = 0 ; retries < 7; retries++) + { + AllocatePayloadMessage allocate; + unsigned sink = 0; // hardcode the audio sink to 0th in the device. + allocate.set(devAddress.parent(), devAddress.tail(), + dev->isAudioSink() ? 1 : 0, streamIndex, allocationState ? timeslot.PBN : 0, + &sink, true); + + // Trigger a refetch of epr + ((DeviceImpl *)dev)->bandwidth.enum_path.dataValid = false; + DeviceImpl * tail = (DeviceImpl *) dev; + while (tail && tail->getParent()) + { + tail->bandwidth.enum_path.dataValid = false; + tail = (DeviceImpl *)tail->getParent(); + } + + if (parent->messageManager->send(&allocate, nakData)) + { + if (allocationState) + { + DP_LOG(("DP-TM> Attached stream:%d to %s", streamIndex, dev->getTopologyAddress().toString(sb))); + } + else + { + DP_LOG(("DP-TM> Detached stream:%d from %s", streamIndex, dev->getTopologyAddress().toString(sb))); + } + + ((DeviceImpl *)dev)->payloadAllocated = allocationState; + + return; + } + } + + // we should not have ideally reached here unless allocate payload failed. + if (allocationState) + { + DP_LOG(("DP-TM> Allocate_payload: Failed to ATTACH stream:%d to %s", streamIndex, dev->getTopologyAddress().toString(sb))); + DP_ASSERT(0); + } + else + { + DP_LOG(("DP-TM> Allocate_payload: Failed to DETACH stream:%d from %s", streamIndex, dev->getTopologyAddress().toString(sb))); + DP_ASSERT(0); + } + +} + +void GroupImpl::insert(Device * dev) +{ + DP_ASSERT(!headInFirmware && "Cannot add or remove from a firmware group. You must perform a modeset away from the device"); + DeviceImpl * di = (DeviceImpl *)dev; + + if (isHeadAttached()) + { + if (di->activeGroup && di->activeGroup != this) + { + DP_ASSERT(0 && "Device already in active group, cannot add to another active group!"); + return; + } + di->activeGroup = this; + } + + members.insertFront(di); + + update(dev, true); + +} + +void GroupImpl::remove(Device * dev) +{ + DP_ASSERT(!headInFirmware && "Cannot add or remove from a firmware group. You must perform a modeset away from the device"); + + DeviceImpl * di = (DeviceImpl *)dev; + + if (isHeadAttached()) + { + di->activeGroup = 0; + } + members.remove(di); + + update(dev, false); + + updateVbiosScratchRegister(dev); +} + +void GroupImpl::destroy() +{ + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + remove(i); + + // Cancel any queue the auth callback. + cancelHdcpCallbacks(); + + delete this; +} + +void GroupImpl::cancelHdcpCallbacks() +{ + authRetries = 0; + + parent->timer->cancelCallback(this, &tagHDCPReauthentication); + parent->timer->cancelCallback(this, &tagStreamValidation); + +} + +Device * GroupImpl::enumDevices(Device * previousDevice) +{ + return members.next(previousDevice); +} + +void GroupImpl::expired(const void * tag) +{ + if (tag == &tagHDCPReauthentication) + { + HDCPState hdcpState = {0}; + parent->main->configureHDCPGetHDCPState(hdcpState); + + if (authRetries < HDCP_AUTHENTICATION_RETRIES) + { + this->hdcpEnabled = hdcpState.HDCP_State_Encryption; + if (hdcpState.HDCP_State_Authenticated) + { + parent->isHDCPAuthOn = true; + authRetries = 0; + } + else + { + unsigned authDelay = (hdcpState.HDCP_State_22_Capable ? + HDCP22_AUTHENTICATION_COOLDOWN : HDCP_AUTHENTICATION_COOLDOWN); + + authRetries++; + parent->main->configureHDCPRenegotiate(); + parent->isHDCPAuthOn = false; + parent->timer->queueCallback(this, &tagHDCPReauthentication, + authDelay); + } + } + else + { + parent->isHDCPAuthOn = this->hdcpEnabled = false; + } + } + else if ( tag == &tagStreamValidation) + { + if (!(this->streamValidationDone)) + { + // If we are here we need to debug what has caused the problem for not getting notification from DD. + DP_ASSERT(0 && "DP> Didn't get final notification." ); + } + } +} + +bool GroupImpl::hdcpGetEncrypted() +{ + // + // Returns whether encryption is currently enabled + // + if (parent->isHDCPAuthOn) + { + return this->hdcpEnabled; + } + else + { + return false; + } +} + +void GroupImpl::updateVbiosScratchRegister(Device * lastDev) +{ + if (!parent->bDisableVbiosScratchRegisterUpdate && + parent->lastDeviceSetForVbios == lastDev) + { + // Take a device which is part of a group + for (ListElement * e = parent->deviceList.begin(); + e != parent->deviceList.end(); e = e->next) + { + DeviceImpl * dev = (DeviceImpl *)e; + + if (dev->activeGroup && dev->activeGroup->isHeadAttached()) + { + NvU32 address = 0; + NvU32 addrSize = dev->getTopologyAddress().size(); + + // Set the MS_SCRATCH_REGISTER for lighted up display + for (NvU32 i = addrSize; i; --i) + { + address |= ((dev->address[i-1] & 0xF) << ((addrSize - i)*4)); + } + + parent->main->configureMsScratchRegisters(address, addrSize, 3); + + parent->lastDeviceSetForVbios = (Device *)dev; + + return; + } + } + } +} + +// +// Helper function for attaching and detaching heads. +// +// For attach, we will assert if group already has head attached but for +// some device in the group, active group did not point to current group. +// For detach, we will assert if the group does not have head attached but +// some device in group has an active group OR head is marked attached but +// not all devies in the group have the current group as active group. +// This also sets or clears dev->activeGroup for each contained +// device. +// +void GroupImpl::setHeadAttached(bool attached) +{ + for (Device * i = enumDevices(0); i; i = enumDevices(i)) + { + DeviceImpl *di = (DeviceImpl *)i; + + if (attached) + { + if (headAttached) + { + DP_ASSERT(di->activeGroup == this); + } + di->activeGroup = this; + } + else + { + if (!headAttached) + { + DP_ASSERT(di->activeGroup == NULL); + } + else + { + DP_ASSERT(di->activeGroup == this); + } + di->activeGroup = NULL; + } + } + headAttached = attached; +} diff --git a/src/common/displayport/src/dp_guid.cpp b/src/common/displayport/src/dp_guid.cpp new file mode 100644 index 000000000..271aada8f --- /dev/null +++ b/src/common/displayport/src/dp_guid.cpp @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_guid.cpp * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_guid.h" +#include "dp_buffer.h" + +using namespace DisplayPort; + +// +// Linear congruential random number generator +// Seed values chosen from numerical methods +// +NvU32 GUIDBuilder::random() +{ + previousRandom = static_cast(( ((NvU64)1664525 * previousRandom + 1013904223) & 0xFFFFFFFF) & 0xFF); + return previousRandom; +} + + +GUIDBuilder::GUIDBuilder(Timer * source, NvU32 salt) + : salt(salt), source(source) +{ + previousRandom = static_cast(( source->getTimeUs() & 0xFFFFFFFF) & 0xFF); +} + +void GUIDBuilder::makeGuid(GUID & guid) +{ + NvU64 currentTimer = source->getTimeUs(); + guid.data[0] = static_cast(( salt >> 24) & 0xFF); + guid.data[1] = static_cast(( salt >> 16) & 0xFF); + guid.data[2] = static_cast(( salt >> 8) & 0xFF); + guid.data[3] = static_cast(( salt) & 0xFF); + + guid.data[4] = static_cast(( currentTimer >> 56) & 0xFF); + guid.data[5] = static_cast(( currentTimer >> 48) & 0xFF); + guid.data[6] = static_cast(( currentTimer >> 40) & 0xFF); + guid.data[7] = static_cast(( currentTimer >> 32) & 0xFF); + guid.data[8] = static_cast(( currentTimer >> 24) & 0xFF); + guid.data[9] = static_cast(( currentTimer >> 16) & 0xFF); + guid.data[10] = static_cast(( currentTimer >> 8) & 0xFF); + guid.data[11] = static_cast(( currentTimer) & 0xFF); + + unsigned rnd = random(); + guid.data[12] = static_cast(( rnd >> 24) & 0xFF); + guid.data[13] = static_cast(( rnd >> 16) & 0xFF); + guid.data[14] = static_cast(( rnd >> 8) & 0xFF); + guid.data[15] = static_cast(( rnd) & 0xFF); + + // + // Spin until we get a new timer counter + // This guarantees a monotonitically increased counter + // + while (source->getTimeUs() == currentTimer) + ; +} diff --git a/src/common/displayport/src/dp_list.cpp b/src/common/displayport/src/dp_list.cpp new file mode 100644 index 000000000..d8b3b86bf --- /dev/null +++ b/src/common/displayport/src/dp_list.cpp @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* List **************************************\ +* * +* Module: dp_list.cpp * +* Simple doubly linked list * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_list.h" + +using namespace DisplayPort; +ListElement::ListElement() + : next(0), + prev(0) +{ +} + +ListElement::~ListElement() +{ + if (this->next) + { + this->prev->next = this->next; + this->next->prev = this->prev; + this->next = 0; + } +} + +List::List() +{ + this->next = this; + this->prev = this; +} + +void List::clear() +{ + while(!isEmpty()) + delete begin(); +} + +List::~List() +{ + clear(); + this->next = this; + this->prev = this; +} + +bool List::isEmpty() +{ + return this->next == this; +} + +void List::insertFront(ListElement * item) +{ + DP_ASSERT(item->next == 0 && "Attempt to insert when it's already in a list"); + item->prev = this; + item->next = this->next; + item->prev->next = item; + item->next->prev = item; +} + +void List::insertBack(ListElement * item) +{ + DP_ASSERT(item->next == 0 && "Attempt to insert when it's already in a list"); + item->prev = this->prev; + item->next = this; + item->prev->next = item; + item->next->prev = item; +} + +void List::insertBefore(ListElement * insertBeforeThis, ListElement * item) +{ + DP_ASSERT(item->next == 0 && "Attempt to insert when it's already in a list"); + item->next = insertBeforeThis; + item->prev = insertBeforeThis->prev; + insertBeforeThis->prev->next = item; + insertBeforeThis->prev = item; +} + +ListElement* List::front() +{ + DP_ASSERT(!isEmpty()); + return this->next; +} + +ListElement* List::last() +{ + DP_ASSERT(!isEmpty()); + return this->prev; +} + +ListElement * List::remove(ListElement * item) +{ + // Skip if its not already in a list + if (!item->next) + return item; + + item->prev->next = item->next; + item->next->prev = item->prev; + item->next = 0; + item->prev = 0; + + return item; +} + +bool List::contains(ListElement * item) +{ + for (ListElement * i = begin(); i!=end(); i = i->next) + { + if (i == item) + return true; + } + return false; +} + +ListElement * List::replace(ListElement * replacement, ListElement * replacee) +{ + if (!(replacement && replacee)) + { + DP_ASSERT(0 && "replacement or replaces is NULL pointer"); + return 0; + } + + DP_ASSERT(replacement->next && replacement->prev); + + // we are assuming replacee does exist in the list. + replacement->next = replacee->next; + replacement->prev = replacee->prev; + + if (replacement->next) + replacement->next->prev = replacement; + + if (replacement->prev) + replacement->prev->next = replacement; + + return replacee; +} diff --git a/src/common/displayport/src/dp_merger.cpp b/src/common/displayport/src/dp_merger.cpp new file mode 100644 index 000000000..362fc030d --- /dev/null +++ b/src/common/displayport/src/dp_merger.cpp @@ -0,0 +1,310 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_merger.cpp * +* Asynchronous Message merger * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_merger.h" +#include "dp_auxdefs.h" +#include "dp_crc.h" +#include "dp_messageheader.h" + +using namespace DisplayPort; + + +EncodedMessage * MessageTransactionMerger::pushTransaction(MessageHeader * header, Buffer * data) +{ + if (freeOnNextCall) + { + delete freeOnNextCall; + freeOnNextCall = 0; + } + + IncompleteMessage * imsg = getTransactionRecord(header->address, header->messageNumber); + + if (!imsg) + { + DP_LOG(("DP-MM> Ignore message due to OOM")); + return 0; + } + + if (header->isTransactionStart) + { + imsg->message.isPathMessage = header->isPathMessage; + imsg->message.isBroadcast = header->isBroadcast; + } + else + { + if (imsg->message.buffer.length == 0) + { + DP_LOG(("DP-MM> Expected transaction-start, ignoring message transaction")); + return 0; + } + + if (imsg->message.isPathMessage != header->isPathMessage || + imsg->message.isBroadcast != header->isBroadcast) + { + DP_ASSERT(0 && "Message type changed during transmission"); + } + } + + // + // Check for redundant start + // + if (header->isTransactionStart && imsg->message.buffer.length) + { + DP_LOG(("DP-MM> Unexpected repeated transaction-start, resetting message state.")); + + // We must have seen a previous incomplete transaction from this device + // they've begun a new packet. Forget about the old thing + imsg->message.buffer.reset(); + } + + // + // Kill the buffer if we've got less payload than we should + // + if (header->payloadBytes > data->length) + { + freeOnNextCall = imsg; + imsg->message.buffer.reset(); + DP_LOG(("DP-MM> Received truncated or corrupted message transaction")); + return 0; + } + + // + // Verify transaction CRC + // + BitStreamReader bsr(data, header->headerSizeBits, (header->payloadBytes-1)*8); + NvU8 dataCrc = (NvU8)dpCalculateBodyCRC(&bsr); + + DP_ASSERT(header->headerSizeBits % 8 == 0 && "Header must be byte aligned"); + + if (dataCrc != data->data[header->headerSizeBits/8 + header->payloadBytes - 1] || + header->payloadBytes == 0) + { + DP_LOG(("DP-MM> Received corruption message transactions")); + freeOnNextCall = imsg; + imsg->message.buffer.reset(); + return 0; + } + + // Discount the processed CRC from the payload count + header->payloadBytes--; + + // + // Append active buffer + // + unsigned i = imsg->message.buffer.length; + imsg->message.buffer.resize(i + header->payloadBytes); + dpMemCopy(&imsg->message.buffer.data[i], &data->data[header->headerSizeBits/8], header->payloadBytes); + + // + // Check for end of message transaction + // + if (header->isTransactionEnd) + { + freeOnNextCall = imsg; + + return &imsg->message; + } + + return 0; +} + +MessageTransactionMerger::IncompleteMessage * MessageTransactionMerger::getTransactionRecord(const Address & address, unsigned messageNumber) +{ + IncompleteMessage * msg; + NvU64 currentTime = this->timer->getTimeUs(); + + // + // Search for existing record + // + for (ListElement * i = incompleteMessages.begin();i != incompleteMessages.end();) + { + msg = (IncompleteMessage *)i; + i = i->next; + if (msg->message.address == address && msg->message.messageNumber == messageNumber) + { + goto found; + } + + // + // Found a stale message in the list + // + if (msg->lastUpdated + incompleteMessageTimeoutMs < currentTime) + delete msg; + } + + // + // None exists? Add a new one + // + msg = new IncompleteMessage(); + msg->message.address = address; + msg->message.messageNumber = messageNumber; + this->incompleteMessages.insertFront(msg); + +found: + // + // Update the timestamp + // + msg->lastUpdated = currentTime; + + return msg; +} + +void IncomingTransactionManager::mailboxInterrupt() +{ + MessageHeader msg; + unsigned totalSize; + AuxRetry::status result; + unsigned txSize = (unsigned)getTransactionSize(); + + // + // Size the static aux window + // + this->localWindow.resize(DP_MAX((unsigned)getTransactionSize(), (unsigned)getMessageBoxSize())); + if (this->localWindow.isError()) + return; + + // + // Read one aux-transaction worth of data + // + result = readMessageBox(0, &this->localWindow.data[0], txSize); + + DP_ASSERT( result != AuxRetry::defer && "Unexpected?!" ); + + if (result != AuxRetry::ack) + return; + + BitStreamReader reader(&this->localWindow, 0, 8*txSize); + + + // + // Before decoding the header, start with the downstream + // ports address prefix + // + if (!decodeHeader(&reader, &msg, addressPrefix)) + { + // + // It's possible we should be NACKing here. Ignoring for now + // to allow the message originator to time out (can take seconds). + // + DP_ASSERT(0 && "Not yet implemented"); + + return; + } + + // + // Let's get the entire sideband message in the localWindow + // + + totalSize = (msg.headerSizeBits / 8) + msg.payloadBytes; + + if (totalSize > txSize) + { + if (totalSize > DPCD_MESSAGEBOX_SIZE) + { + // + // Corrupt packet - total packet can't be larger than the window + // + return; + } + if (AuxRetry::ack!=readMessageBox(txSize, &this->localWindow.data[txSize], totalSize - txSize)) + { + // + // Failed to read second half of message + // + return; + } + } + + clearMessageBoxInterrupt(); + + EncodedMessage * em = incompleteMessages.pushTransaction(&msg, &this->localWindow); + + if (em) + { + this->sink->messagedReceived(this, em); + } +} + +IncomingTransactionManager::~IncomingTransactionManager() +{ +} + + +IncomingTransactionManager::IncomingTransactionManager(Timer * timer, const Address & addressPrefix, IncomingTransactionManagerEventSink * sink) + : incompleteMessages(timer, DP_INCOMPLETE_MESSAGE_TIMEOUT_USEC), addressPrefix(addressPrefix) +{ + this->sink = sink; + this->timer = timer; +} + + + +AuxRetry::status DownReplyManager::readMessageBox(NvU32 offset, NvU8 * data, size_t length) +{ + return hal->readDownReplyMessageBox(offset, data, length); +} + +size_t DownReplyManager::getMessageBoxSize() +{ + return hal->getDownReplyMessageBoxSize(); +} + +size_t DownReplyManager::getTransactionSize() +{ + return hal->getTransactionSize(); +} + +void DownReplyManager::clearMessageBoxInterrupt() +{ + hal->clearInterruptDownReplyReady(); +} + +AuxRetry::status UpRequestManager::readMessageBox(NvU32 offset, NvU8 * data, size_t length) +{ + return hal->readUpRequestMessageBox(offset, data, length); +} + +size_t UpRequestManager::getMessageBoxSize() +{ + return hal->getUpRequestMessageBoxSize(); +} + +size_t UpRequestManager::getTransactionSize() +{ + return hal->getTransactionSize(); +} + +void UpRequestManager::clearMessageBoxInterrupt() +{ + hal->clearInterruptUpRequestReady(); +} + diff --git a/src/common/displayport/src/dp_messagecodings.cpp b/src/common/displayport/src/dp_messagecodings.cpp new file mode 100644 index 000000000..0dc1b81d5 --- /dev/null +++ b/src/common/displayport/src/dp_messagecodings.cpp @@ -0,0 +1,692 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messagecodings.cpp * +* Encoding routines for various messages * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_messagecodings.h" +#include "dp_auxdefs.h" + +using namespace DisplayPort; + +// +// LINK_ADDRESS 0x1 +// +void LinkAddressMessage::set(const Address & target) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // + // Write request identifier + // + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + encodedMessage.isPathMessage = false; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; +} + +ParseResponseStatus LinkAddressMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + DisplayPort::extractGUID(reader, &reply.guid); + reader->readOrDefault(4 /*zeroes*/, 0); + reply.numberOfPorts = reader->readOrDefault(4 /*Number_Of_Ports*/, 0xF); + + for (unsigned i = 0; i < reply.numberOfPorts; i++) + { + reply.res[i].isInputPort = !!reader->readOrDefault(1 /*Input_Port*/, 1); + reply.res[i].peerDeviceType = (PeerDevice) reader->readOrDefault(3 /*Peer_Device_Type*/, 0x0); + reply.res[i].portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reply.res[i].hasMessaging = !!reader->readOrDefault(1 /*Messaging_Capability_Status*/, 0x1); + reply.res[i].dpPlugged = !!reader->readOrDefault(1 /*DisplayPort_Device_Plug_Status*/, 0x1); + + if (reply.res[i].isInputPort == false) + { + reply.res[i].legacyPlugged = !!reader->readOrDefault(1 /*Legacy_Device_Plug_Status*/, 0x1); + + reader->readOrDefault(5 /*zeroes*/, 0x0); + + unsigned ver = reader->readOrDefault(8/*DPCD_Revision*/, 0); + reply.res[i].dpcdRevisionMajor = ver >> 4; + reply.res[i].dpcdRevisionMinor = ver & 0xF; + DisplayPort::extractGUID(reader, &reply.res[i].peerGUID); + reply.res[i].SDPStreams = reader->readOrDefault(4 /*Number_SDP_Streams*/, 0xF); + reply.res[i].SDPStreamSinks = reader->readOrDefault(4 /*Number_SDP_Stream_Sinks*/, 0xF); + } + else + { + reader->readOrDefault(6 /*zeroes*/, 0x0); + reply.res[i].dpcdRevisionMajor = 1; + reply.res[i].dpcdRevisionMinor = 2; + } + } + + return ParseResponseSuccess; +} + +// +// CONNECTION_STATUS_NOTIFY 0x2 +// +ConnStatusNotifyMessage::ConnStatusNotifyMessage(MessageReceiverEventSink * sink) +: MessageReceiver(sink, NV_DP_SBMSG_REQUEST_ID_CONNECTION_STATUS_NOTIFY /*request id*/) +{ +} + +bool ConnStatusNotifyMessage::processByType(EncodedMessage * message, BitStreamReader * reader) +{ + // read the request body + request.port = reader->readOrDefault(4/*Port_Number*/, 0xF); + reader->readOrDefault(4/*zeroes*/, 0); + bool status = DisplayPort::extractGUID(reader/*GUID of the originating branch device*/, &request.guid); + reader->readOrDefault(1/*zero*/, 0); + request.legacyPlugged = !!reader->readOrDefault(1/*Legacy_Device_Plug_Status*/, 0); + request.devicePlugged = !!reader->readOrDefault(1/*DisplayPort_Device_Plug_Status*/, 0); + request.messagingCapability = !!reader->readOrDefault(1/*Messaging_Capability_Status*/, 0); + request.isInputPort = !!reader->readOrDefault(1/*Input_Port*/, 0); + request.peerDeviceType = (PeerDevice) reader->readOrDefault(3/*Peer_Device_Type*/, 0); + + // action will be implemented by evensink + this->sink->messageProcessed(this); + return status; +} + +// +// GENERIC_UP_REPLY 0xnn +// +void GenericUpReplyMessage::set(const Address & target, + bool bReplyIsNack, + bool bBroadcast, + bool bPath) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + writer.write(bReplyIsNack?1:0, 1); + writer.write(requestIdentifier, 7); + + encodedMessage.isPathMessage = bPath; + encodedMessage.isBroadcast = bBroadcast; + encodedMessage.address = target; +} + +GenericUpReplyMessage::GenericUpReplyMessage(unsigned requestId, bool bReplyIsNack, bool bBroadcast, bool bPath) +: Message(requestId, NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // + // Write request identifier + // + writer.write(bReplyIsNack?1:0, 1); + writer.write(requestId, 7); + + encodedMessage.isPathMessage = bPath; + encodedMessage.isBroadcast = bBroadcast; +} + +GenericUpReplyMessage::GenericUpReplyMessage(const Address & target, unsigned requestId, bool bReplyIsNack, bool bBroadcast, bool bPath) +: Message(requestId, NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // + // Write request identifier + // + writer.write(bReplyIsNack?1:0, 1); + writer.write(requestId, 7); + + encodedMessage.isPathMessage = bPath; + encodedMessage.isBroadcast = bBroadcast; + encodedMessage.address = target; +} + +ParseResponseStatus GenericUpReplyMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + // + // we are not expecting any replies here + // Since the corresponding post for this kind of message is of reply type; + // message manager won't queue an awaiting down reply for the same. + // + DP_ASSERT(0 && "We shouldn't be here!!"); + return ParseResponseSuccess; +} + +// +// CLEAR_PAYLOAD_ID_TABLE 0x14 +// +ClearPayloadIdTableMessage::ClearPayloadIdTableMessage() +: Message(NV_DP_SBMSG_REQUEST_ID_CLEAR_PAYLOAD_ID_TABLE /* request id */, NV_DP_SBMSG_PRIORITY_LEVEL_1) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + encodedMessage.isPathMessage = true; + encodedMessage.isBroadcast = true; + encodedMessage.address = Address(); +} + +ParseResponseStatus ClearPayloadIdTableMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + return ParseResponseSuccess; +} + +ParseResponseStatus ClearPayloadIdTableMessage::parseResponse(EncodedMessage * message) +{ + sink->messageCompleted(this); + return ParseResponseSuccess; +} + +// +// ENUM_PATH_RESOURCES 0x10 +// +EnumPathResMessage::EnumPathResMessage(const Address & target, unsigned port, bool point) +: Message(NV_DP_SBMSG_REQUEST_ID_ENUM_PATH_RESOURCES /* request identifier */, + NV_DP_SBMSG_PRIORITY_LEVEL_4) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zereo*/, 1); + writer.write(requestIdentifier, 7); + writer.write(port, 4); + writer.write(0/*zeroes*/, 4); + + encodedMessage.isPathMessage = !point; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; + dpMemZero(&reply, sizeof(reply)); +} + +ParseResponseStatus EnumPathResMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(3 /*zeroes*/, 0); + reply.bFECCapability = (reader->readOrDefault(1 /*FEC*/, 0x0) == 1) ? true : false; + reply.TotalPBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF); + reply.FreePBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// ALLOCATE_PAYLOAD 0x11 +// +void AllocatePayloadMessage::set +( + const Address & target, + unsigned port, + unsigned nSDPStreams, + unsigned vcPayloadId, + unsigned PBN, + unsigned* SDPStreamSink, + bool entirePath +) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + DP_ASSERT(SDPStreamSink || (!nSDPStreams)); + + // Write message request body + writer.write(port, 4); + writer.write(nSDPStreams, 4); + writer.write(0/*zero*/, 1); + writer.write(vcPayloadId, 7); + writer.write(PBN, 16); + for (unsigned i=0; ireadOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(5 /*zeroes*/, 0); + reply.virtualChannelPayloadId = reader->readOrDefault(7 /*Virtual_Channel_Payload_Identifier*/, 0x0); + reply.PBN = reader->readOrDefault(16 /*PBN*/, 0xFFFF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} +// +// QUERY_PAYLOAD 0x12 +// +QueryPayloadMessage::QueryPayloadMessage +( + const Address & target, + unsigned port, + unsigned vcPayloadId +) + : Message(NV_DP_SBMSG_REQUEST_ID_QUERY_PAYLOAD /* request identifier*/, + NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // Write message request + writer.write(port, 4); + writer.write(0 /*zeroes*/, 5); + writer.write(vcPayloadId, 7); + + encodedMessage.isPathMessage = false; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; + dpMemZero(&reply, sizeof(reply)); +} + +ParseResponseStatus QueryPayloadMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(4 /*zeroes*/, 0); + reply.allocatedPBN = reader->readOrDefault(16 /*Allocated_PBN*/, 0xFFFF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + + +// +// RESOURCE_STATUS_NOTIFY 0x13 +// + +ResStatusNotifyMessage::ResStatusNotifyMessage(MessageReceiverEventSink * sink) +: MessageReceiver(sink, NV_DP_SBMSG_REQUEST_ID_RESOURCE_STATUS_NOTIFY /*request id*/) +{ + dpMemZero(&request, sizeof(request)); +} + +bool ResStatusNotifyMessage::processByType(EncodedMessage * message, BitStreamReader * reader) +{ + bool status; + + // read the request body + request.port = reader->readOrDefault(4/*Port_Number*/, 0xF); + reader->readOrDefault(4/*zeroes*/, 0); + status = DisplayPort::extractGUID(reader, &request.guid); + request.PBN = reader->readOrDefault(16/*Available_PBN*/, 0); + + // action will be implemented by evensink + this->sink->messageProcessed(this); + return status; +} + +// +// REMOTE_DPCD_READ 0x20 +// +void RemoteDpcdReadMessage::set +( + const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToRead +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request data + writer.write(port, 4); + writer.write(dpcdAddress, 20); + writer.write(nBytesToRead, 8); + + encodedMessage.isPathMessage = false; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; +} + +ParseResponseStatus RemoteDpcdReadMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reader->readOrDefault(4 /*zeroes*/, 0); + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reply.numBytesReadDPCD = reader->readOrDefault(8 /*Num_Of_Bytes_Read*/, 0x0); + for (unsigned i=0; ireadOrDefault(8 /*data*/, 0x0); + } + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// REMOTE_DPCD_WRITE 0x21 +// +void RemoteDpcdWriteMessage::set +( + const Address & target, + unsigned port, + unsigned dpcdAddress, + unsigned nBytesToWrite, + const NvU8 * writeData +) +{ + clear(); + BitStreamWriter writer(&encodedMessage.buffer, 0); + + DP_ASSERT(writeData || (!nBytesToWrite)); + + // Write request identifier + writer.write(0/*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request data + writer.write(port, 4); + writer.write(dpcdAddress, 20); + writer.write(nBytesToWrite, 8); + + for (unsigned i=0; ireadOrDefault(4 /*zeroes*/, 0); + unsigned portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + + DP_ASSERT(portNumber == this->sinkPort); + DP_USED(portNumber); + + if (this->getSinkPort() != portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// REMOTE_I2C_READ 0x22 +// +void RemoteI2cReadMessage::set +( + const Address & target, + unsigned nWriteTransactions, + unsigned port, + I2cWriteTransaction* transactions, + unsigned readI2cDeviceId, + unsigned nBytesToRead +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + DP_ASSERT(transactions || (!nWriteTransactions)); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request specific data + writer.write(port, 4); + writer.write(0/*zeroes*/, 2); + writer.write(nWriteTransactions, 2); + + for (unsigned i=0; ireadOrDefault(4 /*zeroes*/, 0); + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reply.numBytesReadI2C = reader->readOrDefault(8 /*Num_Of_Bytes_Read*/, 0x0); + for (unsigned i=0; ireadOrDefault(8 /*data*/, 0x0); + } + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// REMOTE_I2C_WRITE 0x23 +// +void RemoteI2cWriteMessage::set +( + const Address & target, + unsigned port, + unsigned writeI2cDeviceId, + unsigned nBytesToWrite, + unsigned char* writeData +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + DP_ASSERT(writeData || (!nBytesToWrite)); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request data + writer.write(port, 4); + writer.write(0/*zero*/, 5); + writer.write(writeI2cDeviceId, 7); + writer.write(nBytesToWrite, 8); + + for (unsigned i=0; ireadOrDefault(4 /*zeroes*/, 0); + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} +// +// POWER_UP_PHY 0x24 +// +void PowerUpPhyMessage::set +( + const Address & target, + unsigned port, + bool entirePath +) +{ + clear(); + + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request specific data + writer.write(port, 4); + writer.write(0 /*zero*/, 4); + + encodedMessage.isPathMessage = entirePath; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; +} + +// +// POWER_DOWN_PHY 0x25 +// +ParseResponseStatus PowerUpPhyMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(4 /*zeroes*/, 0); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +void PowerDownPhyMessage::set +( + const Address & target, + unsigned port, + bool entirePath +) +{ + BitStreamWriter writer(&encodedMessage.buffer, 0); + + // Write request identifier + writer.write(0 /*zero*/, 1); + writer.write(requestIdentifier, 7); + + // write request specific data + writer.write(port, 4); + writer.write(0/*zeros*/, 4); + + encodedMessage.isPathMessage = entirePath; + encodedMessage.isBroadcast = false; + encodedMessage.address = target; + sinkPort = port; +} + +ParseResponseStatus PowerDownPhyMessage::parseResponseAck(EncodedMessage * message, BitStreamReader * reader) +{ + reply.portNumber = reader->readOrDefault(4 /*Port_Number*/, 0xF); + reader->readOrDefault(4 /*zeroes*/, 0); + + if (this->getSinkPort() != reply.portNumber) + return ParseResponseWrong; + + return ParseResponseSuccess; +} + +// +// SINK_EVENT_NOTIFY 0x30 +// + +SinkEventNotifyMessage::SinkEventNotifyMessage(MessageReceiverEventSink * sink, unsigned requestId) +: MessageReceiver(sink, 0x30 /*request id*/) +{ +} + +bool SinkEventNotifyMessage::processByType(EncodedMessage * message, BitStreamReader * reader) +{ + return true; +} + + +I2cWriteTransaction::I2cWriteTransaction +( + unsigned WriteI2cDeviceId, + unsigned NumBytes, + unsigned char * buffer, + bool NoStopBit, + unsigned I2cTransactionDelay +) +{ + this->WriteI2cDeviceId = WriteI2cDeviceId; + this->NumBytes = NumBytes; + this->NoStopBit = NoStopBit; + this->I2cTransactionDelay = I2cTransactionDelay; + this->I2cData = buffer; +} + +I2cWriteTransaction::I2cWriteTransaction(): +WriteI2cDeviceId(0), NumBytes(0), I2cData(0), NoStopBit(0), I2cTransactionDelay(0) +{ +} + diff --git a/src/common/displayport/src/dp_messageheader.cpp b/src/common/displayport/src/dp_messageheader.cpp new file mode 100644 index 000000000..c453724cd --- /dev/null +++ b/src/common/displayport/src/dp_messageheader.cpp @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_messageheader.cpp * +* DP message header parser * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_crc.h" +#include "dp_messageheader.h" + + +bool DisplayPort::decodeHeader(BitStreamReader * reader, MessageHeader * header, const Address & address) +{ + unsigned startOffset = reader->offset(); + int LCT, i; + + // + // Read the RAD + // + LCT = reader->readOrDefault( 4, 0); + reader->readOrDefault( 4, 0); + + header->address = address; + + for (i = 0; i < LCT - 1; i++) + { + header->address.append(reader->readOrDefault( 4, 0)); + } + + reader->align( 8); + + // + // Read flags + // + header->isBroadcast = !!reader->readOrDefault( 1, 0); + header->isPathMessage = !!reader->readOrDefault( 1, 0); + header->payloadBytes = reader->readOrDefault( 6, 0) ; + + header->isTransactionStart = !!reader->readOrDefault( 1, 0); + header->isTransactionEnd = !!reader->readOrDefault( 1, 0); + reader->readOrDefault( 1, 0); + header->messageNumber = reader->readOrDefault( 1, 0); + + + // Build a bit reader for the slice of header we just processed + BitStreamReader crcReader(reader->buffer(), startOffset, reader->offset()); + + if (reader->readOrDefault( 4, (NvU32)~0) != dpCalculateHeaderCRC(&crcReader)) + { + // Corrupt packet received + char buffer[48*3+1]; + dpHexDump(&buffer[0], sizeof(buffer), (NvU8*)reader->buffer() + startOffset, reader->offset() - startOffset); + DP_LOG(("DP-MM> Corrupt message transaction. Expected CRC %d. Message = {%s}", dpCalculateHeaderCRC(&crcReader), buffer)); + + return false; + } + + header->headerSizeBits = reader->offset() - startOffset; + return true; +} diff --git a/src/common/displayport/src/dp_messages.cpp b/src/common/displayport/src/dp_messages.cpp new file mode 100644 index 000000000..0baaf364c --- /dev/null +++ b/src/common/displayport/src/dp_messages.cpp @@ -0,0 +1,606 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_messages.cpp * +* Encoding for aux common messages. * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_splitter.h" +#include "dp_messages.h" +#include "dp_merger.h" +#include "dp_list.h" +#include "dp_tracing.h" + +using namespace DisplayPort; +namespace DisplayPort +{ + GenericMessageCompletion::GenericMessageCompletion() : + failed(false), completed(false) {} + void GenericMessageCompletion::messageFailed(MessageManager::Message * from, NakData * data) + { + nakData = *data; + failed = true; + completed = true; + } + void GenericMessageCompletion::messageCompleted(MessageManager::Message * from) + { + failed = false; + completed = true; + } +}; + +// +// Transmit a message and wait for the response in place. +// +bool MessageManager::send(MessageManager::Message * message, NakData & nakData) +{ + GenericMessageCompletion completion; + Address::StringBuffer sb; + DP_USED(sb); + + NvU64 startTime, elapsedTime; + post(message, &completion); + startTime = timer->getTimeUs(); + do + { + hal->notifyIRQ(); + if (hal->interruptDownReplyReady()) + IRQDownReply(); + + if (completion.completed) + { + nakData = completion.nakData; + break; + } + elapsedTime = timer->getTimeUs() - startTime; + + if (elapsedTime > (DPCD_MESSAGE_REPLY_TIMEOUT * 1000)) + { + message->expired(NULL); + nakData.reason = NakTimeout; + break; + } + + // Sleep while processing timer callbacks + timer->sleep(1); + } while(true); + + return !completion.failed; +} + +bool DisplayPort::extractGUID(BitStreamReader * reader, GUID * guid) +{ + for (unsigned i=0; i < 128; i += 8) + { + unsigned data; + if (!reader->read(&data, 8)) + { + return false; + } + + guid->data[i/8] = (NvU8)data; + } + + return true; +} + +void MessageManager::messagedReceived(IncomingTransactionManager * from, EncodedMessage * message) +{ + if (from == &mergerUpRequest) + { + onUpRequestReceived(true, message); + } + else + { + onDownReplyReceived(true, message); + } +} + +void MessageManager::Message::splitterFailed(OutgoingTransactionManager * from) +{ + // + // Message failed + // + NakData nakData; + nakData.reason = NakTimeout; + MessageManager * parent = this->parent; + + if (sink) + sink->messageFailed(this, &nakData); + + if (from == &parent->splitterDownRequest) + { + // + // Tell the message manager he may begin sending the next message + // + parent->transmitAwaitingDownRequests(); + } + else + { + parent->transmitAwaitingUpReplies(); + } +} + +void MessageManager::Message::splitterTransmitted(OutgoingTransactionManager * from) +{ + bTransmitted = true; + MessageManager * parent = this->parent; + + if (from == &parent->splitterDownRequest) + { + // + // Start the countdown timer for the reply + // + parent->timer->queueCallback(this, "SPLI", DPCD_MESSAGE_REPLY_TIMEOUT); + + // + // Tell the message manager he may begin sending the next message + // + parent->transmitAwaitingDownRequests(); + } + else // UpReply + { + if (sink) + sink->messageCompleted(this); // This is the end for an up reply + + parent->transmitAwaitingUpReplies(); + } + +} + +// Since transmit DPCD_MESSAGE_REPLY_TIMEOUT time has elapsed. +// - Let's assume the message was not replied to +void MessageManager::Message::expired(const void * tag) +{ + Address::StringBuffer sb; + DP_USED(sb); + + DP_LOG(("DP-MM> Message transmit time expired on message %p (ID = %02X, target = %s)", + (Message*)this, ((Message*)this)->requestIdentifier, (((Message*)this)->state.target).toString(sb))); + + Address::NvU32Buffer addrBuffer; + dpMemZero(addrBuffer, sizeof(addrBuffer)); + (((Message*)this)->state.target).toNvU32Buffer(addrBuffer); + NV_DPTRACE_WARNING(MESSAGE_EXPIRED, ((Message*)this)->requestIdentifier, (((Message*)this)->state.target).size(), + addrBuffer[0], addrBuffer[1], addrBuffer[2], addrBuffer[3]); + + NakData nakData; + nakData.reason = NakTimeout; + + MessageManager * parent = this->parent; + + DP_ASSERT(parent); + if (parent && !parent->isBeingDestroyed) + { + parent->awaitingReplyDownRequest.remove(this); + parent->clearPendingMsg(); + parent->transmitAwaitingDownRequests(); + parent->transmitAwaitingUpReplies(); + } + + if (sink) + sink->messageFailed(this, &nakData); +} + +// +// Enqueue the next message to the splitterDownRequest +// +void MessageManager::transmitAwaitingDownRequests() +{ + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; // Do this first since we may unlink the current node + + if (awaitingReplyDownRequest.isEmpty()) + { + // + // Set the message number, and unlink from the outgoing queue + // + m->encodedMessage.messageNumber = 0; + m->state.messageNumber = 0; + + notYetSentDownRequest.remove(m); + awaitingReplyDownRequest.insertBack(m); + + // + // This call can cause transmitAwaitingDownRequests to be called again + // + bool sent = splitterDownRequest.send(m->encodedMessage, m); + DP_ASSERT(sent); + + return; + } + } +} + +// +// Enqueue the next message to the splitterUpReply +// +void MessageManager::transmitAwaitingUpReplies() +{ + for (ListElement * i = notYetSentUpReply.begin(); i!=notYetSentUpReply.end(); ) + { + Message * m = (Message *)i; + i = i->next; // Do this first since we may unlink the current node + + notYetSentUpReply.remove(m); + + // + // This call can cause transmitAwaitingUpReplies to be called again + // + bool sent = splitterUpReply.send(m->encodedMessage, m); + DP_ASSERT(sent); + } +} + +void MessageManager::postReply(Message * message, Message::MessageEventSink * sink) +{ + post(message, sink, true); +} + +void MessageManager::cancelAllByType(unsigned type) +{ + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m->requestIdentifier == type) + notYetSentDownRequest.remove(m); + } + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m->requestIdentifier == type) + awaitingReplyDownRequest.remove(m); + } +} + +void MessageManager::cancelAll(Message * message) +{ + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m == message && m->requestIdentifier == message->requestIdentifier) + notYetSentDownRequest.remove(m); + } + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + Message * m = (Message *)i; + i = i->next; + + if (m == message && m->requestIdentifier == message->requestIdentifier) + awaitingReplyDownRequest.remove(m); + } +} + +void MessageManager::post(Message * message, Message::MessageEventSink * sink, bool transmitReply) +{ + DP_ASSERT(!isBeingDestroyed && "You may not post messages in response to a shutdown"); + + if (isPaused) + return; + + // + // Initialize the fields + // + message->sink = sink; + message->bTransmitted = false; + + // + // Queue the message for the outgoing queue. + // Later on we'll walk to the queue and make sure + // we have at most two outstanding messages PER + // target address. This is how the message + // number is decided. + // + + message->parent = this; + message->transmitReply = transmitReply; + if (message->encodedMessage.isBroadcast) + { + // if its a broadcast message; the target would be the immediate branch. + Address addr; + addr.clear(); + addr.append(0); + message->state.target = addr; + } + else + message->state.target = message->encodedMessage.address; + + if ( transmitReply ) + { + notYetSentUpReply.insertBack(message); + transmitAwaitingUpReplies(); + } + else + { + // + // If the list is empty or the incoming message has the least priority possible (DEFAULT priority), + // then just add the incoming message to the back of the list. + // Otherwise, find the right location by traversing the list. + // + if(message->messagePriority == NV_DP_SBMSG_PRIORITY_LEVEL_DEFAULT || notYetSentDownRequest.isEmpty()) + { + notYetSentDownRequest.insertBack(message); + } + else + { + ListElement *tmp = notYetSentDownRequest.last(); + Message *msg = (Message*) notYetSentDownRequest.last(); + while((msg->prev != tmp) && (msg->messagePriority < message->messagePriority)) + { + msg = (Message*)msg->prev; + } + notYetSentDownRequest.insertBefore(msg->next, message); + } + transmitAwaitingDownRequests(); + } +} + +void MessageManager::onUpRequestReceived(bool status, EncodedMessage * message) +{ + if (!status) + { + return; + } + + // + // Broadcast the up-request message to all + // the receivers on messageReceivers + // + for (ListElement * i = messageReceivers.begin(); i!=messageReceivers.end(); i=i->next) + { + MessageReceiver * rcr = (MessageReceiver *)i; + if (rcr->process((EncodedMessage *)message)) + { + return; + } + } + + DP_ASSERT(0 && "Warning: Unknown upstream UP_REQ message"); +} + + +void MessageManager::onDownReplyReceived(bool status, EncodedMessage * message) +{ + if (!status) + { + return; + } + + // + // Broadcast the down-request message to all + // the receivers on awaitingReplyDownRequest + // + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); i=i->next) + { + Message * messageAwaitingReply = (Message *)i; + + if( messageAwaitingReply->state.target == message->address && + messageAwaitingReply->state.messageNumber == message->messageNumber) + { + awaitingReplyDownRequest.remove(messageAwaitingReply); + if (messageAwaitingReply->parseResponse(message) == ParseResponseWrong) + { + // + // parseResponse() returns ParseResposeWrong when 'Request_Identifier' of down request + // message and down reply message are mis-matched. So insert message in waiting queue + // and wait for correct down reply message. + // + awaitingReplyDownRequest.insertBack(messageAwaitingReply); + } + + goto nextMessage; + } + } + + DP_LOG(("DPMM> Warning: Unmatched reply message")); +nextMessage: + transmitAwaitingUpReplies(); + transmitAwaitingDownRequests(); +} + +MessageManager::~MessageManager() +{ + // This causes any posts they may attempt to do to fail + isBeingDestroyed = true; + + // + // The message manager should not be shut down until + // all outgoing messages are in the cancelled state + // + NakData nakUndef; + nakUndef.reason = NakUndefined; + + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + ListElement * next = i->next; + if (((Message *)i)->sink) + ((Message *)i)->sink->messageFailed(((Message *)i), &nakUndef); + i = next; + } + if (!notYetSentDownRequest.isEmpty()) + { + + for (ListElement * i = notYetSentDownRequest.begin(); i!=notYetSentDownRequest.end(); ) + { + ListElement * next = i->next; + DP_LOG(("Down request message type 0x%x client is not cleaning up.", ((Message *)i)->requestIdentifier)); + i = next; + } + } + + for (ListElement * i = notYetSentUpReply.begin(); i!=notYetSentUpReply.end();) + { + ListElement * next = i->next; + if (((Message *)i)->sink) + ((Message *)i)->sink->messageFailed(((Message *)i), &nakUndef); + i = next; + } + if (!notYetSentUpReply.isEmpty()) + { + + for (ListElement * i = notYetSentUpReply.begin(); i!=notYetSentUpReply.end(); ) + { + ListElement * next = i->next; + DP_LOG(("Up reply message type 0x%x client is not cleaning up.", ((Message *)i)->requestIdentifier)); + i = next; + } + } + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + ListElement * next = i->next; + if (((Message *)i)->sink) + ((Message *)i)->sink->messageFailed(((Message *)i), &nakUndef); + i = next; + } + if (!awaitingReplyDownRequest.isEmpty()) + { + + for (ListElement * i = awaitingReplyDownRequest.begin(); i!=awaitingReplyDownRequest.end(); ) + { + ListElement * next = i->next; + DP_LOG(("Down request message type 0x%x client is not cleaning up.", ((Message *)i)->requestIdentifier)); + i = next; + } + } + + // Do not reclaim the memory of our registered receivers + while (!messageReceivers.isEmpty()) + messageReceivers.remove(messageReceivers.front()); +} + +ParseResponseStatus MessageManager::Message::parseResponse(EncodedMessage * message) +{ + BitStreamReader reader(&message->buffer, 0, message->buffer.length*8); + + // Read ReplyType + bool replyNacked = !!reader.readOrDefault(1, true); + + // Read RequestIdentifier + unsigned requestId = reader.readOrDefault(7, 0); + if (requestId != requestIdentifier) + { + DP_LOG(("DP-MM> Requested = %x Received = %x", requestId, requestIdentifier)); + DP_ASSERT(0 && "Reply type doesn't match"); + return ParseResponseWrong; + } + + if (replyNacked) + { + NakData nakData; + + // failure handler will parse the NAK response and do the required action + if (DisplayPort::extractGUID(&reader, &nakData.guid) == false) + { + DP_ASSERT(0 && "Invalid GUID in NAK"); + } + + nakData.reason = (NakReason)reader.readOrDefault(8, 0); + nakData.nak_data = reader.readOrDefault(8, 0); + + // call specific handler after parsing. + parent->timer->cancelCallbacks(this); + + MessageManager * parent = this->parent; + + if (sink) + sink->messageFailed(this, &nakData); + + parent->transmitAwaitingDownRequests(); + + return ParseResponseSuccess; + } + + ParseResponseStatus parseResult = parseResponseAck(message, &reader); + + if (parseResult == ParseResponseSuccess) + { + parent->timer->cancelCallbacks(this); + + if (this->sink) + { + MessageEventSink * msgSink = this->sink; + msgSink->messageCompleted(this); + } + } + + return parseResult; +} + +void MessageManager::Message::MessageEventSink::messageFailed(Message * from, NakData * nakData) +{ + +} + +void MessageManager::registerReceiver(MessageReceiver * receiver) +{ + messageReceivers.insertBack(receiver); +} + + +bool MessageManager::MessageReceiver::process(EncodedMessage * message) +{ + BitStreamReader reader(&message->buffer, 0, message->buffer.length*8); + + // Read RequestIdentifier + reader.readOrDefault(1, 0); + unsigned reqId = reader.readOrDefault(7, 0); + + if (reqId != this->getRequestId()) + { + // + // This receiver is not meant for this message; + // let the next in the queue handle it. + // + return false; + } + + this->address = message->address; + + // processByType should parse the request, create a response and queue it if needed + bool status = processByType(message, &reader); + if (!status) + { + // + // if we are here; we could get a receiver to handle the request + // but something else went wrong. + // + DP_ASSERT(0); + } + + return true; +} diff --git a/src/common/displayport/src/dp_mst_edid.cpp b/src/common/displayport/src/dp_mst_edid.cpp new file mode 100644 index 000000000..547316aa7 --- /dev/null +++ b/src/common/displayport/src/dp_mst_edid.cpp @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_mst_edid.c * +* Implementation Multi Stream EDID reads * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_edid.h" +#include "dp_address.h" +#include "dp_messagecodings.h" +#include "dp_messages.h" + +using namespace DisplayPort; + +EdidReadMultistream::~EdidReadMultistream() +{ + timer->cancelCallbacks(this); +} + +void EdidReadMultistream::startReadingEdid() +{ + NvU8 offset = 0; + I2cWriteTransaction i2cWriteTransactions[1]; + Address::StringBuffer buffer; + DP_USED(buffer); + DP_LOG(("%s(): start for %s", __FUNCTION__, + topologyAddress.toString(buffer))); + + edidReaderManager.reset(); + edid.resetData(); + + DDCAddress = ddcAddrList[ddcIndex]; + + // set offset within segment 0, no need to set segment, because we're starting reading EDID + i2cWriteTransactions[0] = I2cWriteTransaction(DDCAddress >> 1, + sizeof(offset), + &offset, + true); + NvU8 nWriteTransactions = 1; + + remoteI2cRead.set(topologyAddress.parent(), // topology Address + nWriteTransactions, // number of write transactions + topologyAddress.tail(), // port of Device + i2cWriteTransactions, // list of write transactions + DDCAddress >> 1, // right shifted DDC Address (request identifier in spec) + EDID_BLOCK_SIZE); // requested size + + manager->post(&remoteI2cRead, this); +} + +void EdidReadMultistream::messageCompleted(MessageManager::Message * from) +{ + RemoteI2cReadMessage* I2CReadMessage = (RemoteI2cReadMessage*)from; + unsigned char * data = 0; + unsigned numBytesRead; + Address::StringBuffer buffer; + DP_USED(buffer); + + NvU8 seg; + NvU8 offset; + DP_LOG(("%s for %s", __FUNCTION__, topologyAddress.toString(buffer))); + + DP_ASSERT(DDCAddress && "DDCAddress is 0, it is wrong"); + + data = I2CReadMessage->replyGetI2CData(&numBytesRead); + DP_ASSERT(data); + + // this is not required, but I'd like to keep things simple at first submission + DP_ASSERT(numBytesRead == EDID_BLOCK_SIZE); + edidReaderManager.postReply(data, numBytesRead, true); + + if (edidReaderManager.readNextRequest(seg, offset)) + { + readNextBlock(seg, offset); + } + else // EDID read is finished or failed. + { + edidAttemptDone(edidReaderManager.readIsComplete() && edid.verifyCRC()); + } +} + +void EdidReadMultistream::edidAttemptDone(bool succeeded) +{ + if (succeeded) + sink->mstEdidCompleted(this); + else if (ddcIndex + 1 < ddcAddrListSize) + { + ddcIndex++; + startReadingEdid(); + } + else + sink->mstEdidReadFailed(this); +} + +void EdidReadMultistream::readNextBlock(NvU8 seg, NvU8 offset) +{ + I2cWriteTransaction i2cWriteTransactions[2]; + Address::StringBuffer buffer; + DP_USED(buffer); + + // ensure that init function for i2cWriteTranscation for segment and offset won't break + DP_ASSERT(sizeof(seg) == 1); + DP_ASSERT(sizeof(offset) == 1); + + DP_LOG(("%s(): for %s (seg/offset) = %d/%d", __FUNCTION__, + topologyAddress.toString(buffer), + seg, offset)); + + unsigned nWriteTransactions = 2; + if (seg) + { + // select segment + i2cWriteTransactions[0] = I2cWriteTransaction(EDID_SEG_SELECTOR_OFFSET >> 1, + 1, &seg, true); + // set offset within segment + i2cWriteTransactions[1] = I2cWriteTransaction(DDCAddress >> 1, + 1, &offset, true); + } + else + { + // set offset within segment 0 + i2cWriteTransactions[0] = I2cWriteTransaction(DDCAddress >> 1, 1, &offset, true); + nWriteTransactions = 1; + } + + remoteI2cRead.set(topologyAddress.parent(), // topology Address + nWriteTransactions, // number of write transactions + topologyAddress.tail(), // port of Device + i2cWriteTransactions, // list of write transactions + DDCAddress >> 1, // right shifted DDC Address (request identifier in spec) + EDID_BLOCK_SIZE); // requested size + + manager->post(&remoteI2cRead, this, false); +} + +void EdidReadMultistream::expired(const void * tag) +{ + Address::StringBuffer buffer; + DP_USED(buffer); + DP_LOG(("%s on %s", __FUNCTION__, topologyAddress.toString(buffer))); + startReadingEdid(); +} + +void EdidReadMultistream::messageFailed(MessageManager::Message * from, NakData * nakData) +{ + Address::StringBuffer buffer; + DP_USED(buffer); + DP_LOG(("%s on %s", __FUNCTION__, topologyAddress.toString(buffer))); + + if (nakData->reason == NakDefer || nakData->reason == NakTimeout) + { + if (retries < MST_EDID_RETRIES) + { + ++retries; + timer->queueCallback(this, "EDID", MST_EDID_COOLDOWN); + } + else + edidAttemptDone(false /* failed */); + } + else + { + edidAttemptDone(false /* failed */); + } +} diff --git a/src/common/displayport/src/dp_splitter.cpp b/src/common/displayport/src/dp_splitter.cpp new file mode 100644 index 000000000..8b5e40d7f --- /dev/null +++ b/src/common/displayport/src/dp_splitter.cpp @@ -0,0 +1,314 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort********************************\ +* * +* Module: dp_splitter.cpp * +* Asynchronous Message Splitter * +* * +\***************************************************************************/ + +#include "dp_internal.h" +#include "dp_bitstream.h" +#include "dp_splitter.h" +#include "dp_auxdefs.h" +#include "dp_crc.h" +#include "dp_configcaps.h" + +using namespace DisplayPort; + +#define DP_MAX_HEADER_SIZE 16 +// timeout after 110ms with a retry recurring every 5ms for 10 times +#define DOWNSTREAM_RETRY_ON_DEFER_TIMEOUT 110 +#define DOWNSTREAM_RETRY_ON_DEFER_PERIOD 5 +#define DOWNSTREAM_RETRY_ON_DEFER_COUNT 10 + +bool MessageTransactionSplitter::get(Buffer & assemblyBuffer) +{ + unsigned i; + unsigned payloadSize; + bool isTransactionStart, isTransactionEnd; + Address address; + unsigned LCT; + unsigned LCR; + unsigned headerSizeBits; + + assemblyBuffer.reset(); + + // + // Done? + // + if (this->messageOutstanding->buffer.length == this->assemblyTransmitted) + { + return false; + } + + address = this->messageOutstanding->address; + if (this->messageOutstanding->isBroadcast) + { + // no RAD + address.clear(); + LCT = 1; + } + else + { + LCT = address.size(); + } + + // Calculate header size + headerSizeBits = 8 + // LCT/LCR + (((4 * (LCT -1)) + 4) &~ 7) + // byte aligned RAD + 16; + + // + // Pick how much data to send. Header+payloadSize <= 48 bytes. + // + payloadSize = DP_MIN(DPCD_MESSAGEBOX_SIZE - (headerSizeBits+7)/8, /*crc*/1 + this->messageOutstanding->buffer.length - this->assemblyTransmitted); + + // + // Is the first or last transaction in the sequence? + // + isTransactionStart = assemblyTransmitted == 0; + isTransactionEnd = (assemblyTransmitted + payloadSize - 1) == messageOutstanding->buffer.length; + + BitStreamWriter writer(&assemblyBuffer, 0); + + // + // Write the header + // + writer.write(LCT, 4); + + LCR = this->messageOutstanding->isBroadcast ? 6 : LCT > 1 ? LCT - 1 : 0; + + writer.write(LCR, 4); + + // port at i=0 is the outport of source/gpu which should not be included in the RAD in outgoing message header + // if this is a broadcast message; LCT would be 1; hence no RAD. + for (i = 1; i < LCT; i++) + writer.write(address[i], 4); + writer.align(8); + + writer.write(this->messageOutstanding->isBroadcast, 1); + writer.write(this->messageOutstanding->isPathMessage, 1); + writer.write(payloadSize, 6); + + writer.write(isTransactionStart, 1); + writer.write(isTransactionEnd, 1); + writer.write(0, 1); + + DP_ASSERT(messageOutstanding->messageNumber == 0 || messageOutstanding->messageNumber == 1); + writer.write(messageOutstanding->messageNumber, 1); + + // + // Generate 4 bit CRC. (Nibble-wise CRC of previous values) + // + BitStreamReader reader(&assemblyBuffer, 0, writer.offset()); + writer.write(dpCalculateHeaderCRC(&reader), 4); + + DP_ASSERT(writer.offset() == headerSizeBits && "Header size mismatch"); + DP_ASSERT((writer.offset() & 7) == 0 && "Packet header must end byte aligned"); + + // + // Generate body CRC + // + BitStreamReader bodyReader(&this->messageOutstanding->buffer, this->assemblyTransmitted * 8, (payloadSize - 1) * 8); + NvU8 bodyCrc = (NvU8)dpCalculateBodyCRC(&bodyReader); + + // Copy in remaining buffer (leaving room for the CRC) + for (i = 0; i < payloadSize - 1; ++i) + writer.write(this->messageOutstanding->buffer.data[i + this->assemblyTransmitted], 8); + writer.write(bodyCrc, 8); + + this->assemblyTransmitted += payloadSize - 1; + + return true; +} + +void OutgoingTransactionManager::expired(const void * tag) +{ + writeToWindow(false); +} + +void OutgoingTransactionManager::cancel(OutgoingTransactionManagerEventSink * sink) +{ + if (activeMessage && activeMessage->eventSink == sink) + activeMessage->eventSink = 0; + + for (ListElement * el = queuedMessages.begin(); el && el!=queuedMessages.end(); el = el->next) + if (((OutgoingMessage *)el)->eventSink == sink) + ((OutgoingMessage *)el)->eventSink = 0; +} + +bool OutgoingTransactionManager::send( EncodedMessage & payload, OutgoingTransactionManagerEventSink * sink) +{ + OutgoingMessage * om = new OutgoingMessage(); + + if (!om) + { + return false; + } + + om->eventSink = sink; + om->message.swap(payload); + + if (!activeMessage) + { + activeMessage = om; + transactionSplitter.set(&om->message); + transactionSplitter.get(this->assemblyBuffer); + writeToWindow(true); + } + else + { + queuedMessages.insertBack(om); + } + + return true; +} + +void OutgoingTransactionManager::writeToWindow( bool firstAttempt) +{ + AuxRetry::status result; + + if (!activeMessage || !activeMessage->eventSink) + goto findNextMessage; + + result = this->writeMessageBox(assemblyBuffer.data, assemblyBuffer.length); + + if (result == AuxRetry::defer) + { + + // + // if retries left; queue one. + // + if (firstAttempt || retriesLeft ) + { + if (firstAttempt) + { + // initialize retriesLeft + retriesLeft = DOWNSTREAM_RETRY_ON_DEFER_COUNT; + } + + retriesLeft--; + DP_LOG(("DP-MM> Messagebox write defer-ed. Q-ing retry.")); + this->timer->queueCallback(this, "SPDE", DOWNSTREAM_RETRY_ON_DEFER_PERIOD); + + return; + } + + // + // Notify message sender of failure. Keep in mind sender + // might turn around immediately with a queue'd send. + // + if (activeMessage) + { + activeMessage->eventSink->splitterFailed(this); + } + + goto findNextMessage; + } + else if (result == AuxRetry::ack) + { + // + // Split off another chunk and transmit + // + if (transactionSplitter.get(assemblyBuffer)) + { + writeToWindow(true); + } + else + { + // + // Notify message sender of success. Keep in mind sender + // might turn around immediately with a queue'd send. + // + if (activeMessage) + { + activeMessage->eventSink->splitterTransmitted(this); + } + + goto findNextMessage; + } + + return; + } + + // + // Notify message sender of failure. Keep in mind sender + // might turn around immediately with a queued send. + // + if (activeMessage) + { + activeMessage->eventSink->splitterFailed(this); + } + +findNextMessage: + // + // The old transaction is complete. Free the memory + // + delete activeMessage; + activeMessage = 0; + + // + // Look for the next transaction + // + if (queuedMessages.isEmpty()) + { + return; + } + else + { + activeMessage = (OutgoingMessage *)queuedMessages.begin(); + queuedMessages.remove(activeMessage); + + transactionSplitter.set(&activeMessage->message); + transactionSplitter.get(this->assemblyBuffer); + writeToWindow(true); + } +} + +OutgoingTransactionManager::OutgoingTransactionManager(Timer * timer) + : timer(timer) +{ + this->activeMessage = 0; +} + +AuxRetry::status DownRequestManager::writeMessageBox(NvU8 * data, size_t length) +{ + return hal->writeDownRequestMessageBox(data, length); +} + +size_t DownRequestManager::getMessageBoxSize() +{ + return hal->getDownRequestMessageBoxSize(); +} + +AuxRetry::status UpReplyManager::writeMessageBox(NvU8 * data, size_t length) +{ + return hal->writeUpReplyMessageBox(data, length); +} + +size_t UpReplyManager::getMessageBoxSize() +{ + return hal->getUpReplyMessageBoxSize(); +} diff --git a/src/common/displayport/src/dp_sst_edid.cpp b/src/common/displayport/src/dp_sst_edid.cpp new file mode 100644 index 000000000..3985dba15 --- /dev/null +++ b/src/common/displayport/src/dp_sst_edid.cpp @@ -0,0 +1,336 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_sst_edid.c * +* Implementation Single Stream EDID reads * +* * +\***************************************************************************/ + +#include "dp_buffer.h" +#include "dp_auxbus.h" +#include "dp_internal.h" +#include "dp_edid.h" + +using namespace DisplayPort; + +/* +* seg -> 256 segment of EDID +* offset -> offset within segment +*/ +static bool readNextBlock(AuxBus * auxBus, NvU8 seg, NvU8 offset, Buffer & buffer, unsigned & totalRead, unsigned DDCAddress, Timer * timer) +{ + AuxBus::Type type = AuxBus::i2cMot; + AuxBus::status auxStatus; + + unsigned retries = 0; + unsigned sizeRequested; + unsigned sizeCompleted; + unsigned transactionSize = auxBus->transactionSize(); + totalRead = 0; + + DP_ASSERT(auxBus); + DP_ASSERT(transactionSize > 0); + + // ASSERT if edidOffset offset wasn't increased in block len sizes + DP_ASSERT(offset == 0 || offset == EDID_BLOCK_SIZE); + + sizeRequested = transactionSize; + if (!buffer.resize(EDID_BLOCK_SIZE)) + { + return false; + } + + DP_ASSERT(sizeof(seg) == 1); + DP_ASSERT(sizeof(offset) == 1); + + // only set segment if it is required + if (seg) + { + // start EDID read by specifying appropriate Edid segment id + for (unsigned retry = 0; retry < EDID_MAX_AUX_RETRIES; retry++) + { + auxStatus = auxBus->transaction(AuxBus::write, AuxBus::i2cMot, EDID_SEG_SELECTOR_OFFSET >> 1, + &seg, sizeof(seg), &sizeCompleted); + if (auxStatus == AuxBus::success) + break; + + // If deferred due to timeout + if (auxStatus == AuxBus::defer) + { + // Wait for sometime between retries + timer->sleep(EDID_AUX_WAIT_TIME); + continue; + } + + return false; + } + } + + auxStatus = AuxBus::nack; + + for (retries = 0; totalRead < EDID_BLOCK_SIZE;) + { + // + // For retry, rewrite the Offset for the internal read pointer + // except when the previous Read auxstatus was an Aux::defer + // since in that case, the offset was never incremented by sink + // + if ((auxStatus != AuxBus::success) && (auxStatus != AuxBus::defer)) + { + // start from this offset, need to verify with display with multiple edid blocks + for (unsigned retry = 0; retry < EDID_MAX_AUX_RETRIES; retry++) + { + auxStatus = auxBus->transaction(AuxBus::write, AuxBus::i2cMot, DDCAddress >> 1, + (NvU8*)(&offset), sizeof(offset), &sizeCompleted); + if (auxStatus == AuxBus::success) + break; + // If deferred due to timeout + if (auxStatus == AuxBus::defer) + { + // Wait for sometime between retries + timer->sleep(EDID_AUX_WAIT_TIME); + continue; + } + + return false; + } + // if retries exceed EDID_MAX_AUX_RETRIES, give up + if (auxStatus != AuxBus::success) + { + return false; + } + } + // need to change to I2C (not MOT) to read just one last part of EDID block + if (totalRead + transactionSize >= EDID_BLOCK_SIZE) + type = AuxBus::i2c; + + sizeRequested = DP_MIN(transactionSize, EDID_BLOCK_SIZE - totalRead); + auxStatus = auxBus->transaction(AuxBus::read, type, DDCAddress >> 1, + &(buffer.data[totalRead]), sizeRequested, &sizeCompleted); + + if (AuxBus::success != auxStatus || (sizeRequested && (sizeCompleted == 0))) + { + if (retries >= EDID_MAX_AUX_RETRIES) + return false; + + DP_LOG(("DisplayPort: %s: Retrying at totalRead 0x%08x (replyType %x, size %x)", + __FUNCTION__, totalRead, auxStatus, sizeRequested)); + + // Wait for sometime between retries + timer->sleep(EDID_AUX_WAIT_TIME); + retries++; + + continue; + } + + // Assert when size mismatches and it is not last block + if ((sizeRequested != sizeCompleted) && + (totalRead + transactionSize < EDID_BLOCK_SIZE)) + { + DP_LOG(("DisplayPort: %s: dpAux returned edid block smaller than expected. Read from totalRead 0x%08x (replyType %x, size %x)", + __FUNCTION__, totalRead, auxStatus, sizeRequested)); + DP_ASSERT(0); + } + + retries = 0; // reset the number of retries + totalRead += sizeCompleted; + offset += (NvU8)sizeCompleted; + } + + return true; +} + +/*! +* @return: true => EDID read is success, false => read is failure +*/ +static bool sstReadEdid(AuxBus * auxBus, Edid & edid, unsigned DDCAddr, Timer * timer, bool pendingTestRequestEdidRead) +{ + // + // If there is pending test request for edid read, + // ask edidReaderManager to take whatever posted, + // instead of discarding bytes read by a failed read. + // Because cert devices may need to see the checksum of these bytes, + // even if they seem corrupted. + // + EdidAssembler edidReaderManager(&edid, pendingTestRequestEdidRead); + NvU32 retryCount = 0; + Buffer buffer; + if (!buffer.resize(EDID_BLOCK_SIZE)) + { + return false; + } + + DP_ASSERT(auxBus); + + do + { + NvU8 seg = 0; + NvU8 offset = 0; + unsigned totalRead = 0; + edidReaderManager.reset(); + + // start by reading first EDID block, posting it and analyzing for next request + do + { + bool success = readNextBlock(auxBus, seg, offset, buffer, totalRead, DDCAddr, timer); + edidReaderManager.postReply(buffer, totalRead, success); + } + while (edidReaderManager.readNextRequest(seg, offset)); + if (!edid.isPatchedChecksum()) + break; + } while (retryCount++ < EDID_POLICY_BLOCK_READ_MAX_RETRY_COUNT); + + // + // EDID read is successful when + // 1. read was done to the end (i.e. no corruption, no blocks exceeding retry count) + // 2. EDID CRC is correct + // + return edidReaderManager.readIsComplete(); +} + +EDID_DDC DisplayPort::sstDDCPing(AuxBus & dpAux) +{ + unsigned sizeRequested = 0, sizeCompleted; + AuxBus::status auxStatus = AuxBus::nack; + NvU8 offset = 0; + unsigned ddcAddrIdx; + + for (ddcAddrIdx = 0; ddcAddrIdx < ddcAddrListSize; ddcAddrIdx++) + { + // + // Don't use an I2C write. Some devices erroneously ACK on the write + // + auxStatus = dpAux.transaction(AuxBus::read, AuxBus::i2c, ddcAddrList[ddcAddrIdx] >> 1, + &offset, sizeRequested, &sizeCompleted); + + if (AuxBus::success == auxStatus) + return (EDID_DDC)ddcAddrList[ddcAddrIdx]; + } + + return EDID_DDC_NONE; + +} + +bool DisplayPort::EdidReadSST(Edid & edid, AuxBus * auxBus, Timer* timer, + bool pendingTestRequestEdidRead, bool bBypassAssembler, + MainLink * main) +{ + Edid previousEdid; + Buffer *buffer; + bool status; + + for (unsigned i = 0; i < ddcAddrListSize; i++) + { + for (unsigned j = 0; j < EDID_READ_MAX_RETRY_COUNT; j++) + { + // + // Client asks to use RM control code to fetch EDID. + // + if (bBypassAssembler && main) + { + unsigned blockCnt; + buffer = edid.getBuffer(); + if (!buffer->resize(EDID_BLOCK_SIZE)) + { + return false; + } + status = main->fetchEdidByRmCtrl(buffer->getData(), buffer->getLength()); + + if (status) + { + blockCnt = edid.getBlockCount(); + + // If read successfully, check if there are two or more blocks. + if (blockCnt != 1) + { + if (!buffer->resize(EDID_BLOCK_SIZE * blockCnt)) + { + return false; + } + status = main->fetchEdidByRmCtrl(buffer->getData(), buffer->getLength()); + } + } + if (!status) + { + // + // If fetchEdidByRmCtrl fails for some reasons: + // Try to read again using DPLib read function. + // One reason client to request read from RM is to making sure + // the EDID is overridden (regkey or others). So call the RM + // control call to apply the EDID overrides. + // + status = sstReadEdid(auxBus, edid, ddcAddrList[i], timer, + pendingTestRequestEdidRead); + if (status) + { + main->applyEdidOverrideByRmCtrl(buffer->getData(), + buffer->getLength()); + } + else + { + DP_LOG(("EDID> Failed to read EDID from RM and DPLib")); + } + } + } + else + { + // + // If there is pending test request for edid read, make sure we get the raw bytes without check. + // Because cert devices may need to see the checksum of whatever is read for edid, even if they seem corrupted. + // + status = sstReadEdid(auxBus, edid, ddcAddrList[i], timer, pendingTestRequestEdidRead); + + } + + if (status) + { + if (edid.verifyCRC()) + { + return true; + } + else + { + if (j == 0) // first failure? + { + previousEdid.swap(edid); + } + else + { + if (previousEdid == edid) + { + // we got the same invalid checksum again; we will assume it is valid. + edid.setForcedEdidChecksum(true); + return true; + } + } + } + } + } + } + + DP_LOG(("EDID> Failed to ping sst DDC addresses")); + + return false; +} diff --git a/src/common/displayport/src/dp_timer.cpp b/src/common/displayport/src/dp_timer.cpp new file mode 100644 index 000000000..f34431b8c --- /dev/null +++ b/src/common/displayport/src/dp_timer.cpp @@ -0,0 +1,199 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_timer.cpp * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_timer.h" +using namespace DisplayPort; + +void Timer::expired() +{ + fire(false); +} + +// Take care, this function is re-entrant. +// Consider that sleep() is effectively a call to fire(). +// Clients may sleep in response to a timer callback. +unsigned Timer::fire(bool fromSleep) // returns min time to next item to be fired +{ + restart: + + NvU64 now = getTimeUs(); + NvU64 nearest = (NvU64)-1; + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); ) + { + if (fromSleep && !i->executeInSleep) { + i = (PendingCallback*)i->next; + continue; + } + + if (now >= i->timestamp) + { + const void * context = i->context; + TimerCallback * target = i->target; + delete i; + if (target) + target->expired(context); // Take care, the client may have made + // a recursive call to fire in here. + // Easy solution: Restart at front of list. + // current time may have also changed + // drastically from a nested sleep + goto restart; + } + else + { + if (i->timestamp < nearest) + nearest = i->timestamp; + i = (PendingCallback*)i->next; + } + } + unsigned minleft = (unsigned)((nearest - now + 999)/ 1000); + return minleft; +} + +void Timer::_pump(unsigned milliseconds, bool fromSleep) +{ + do + { + unsigned amt = fire(fromSleep); + if (amt >= milliseconds) { + raw->sleep(milliseconds); + return; + } + raw->sleep(amt); + milliseconds-=amt; + } while(milliseconds); +} + +// +// Queue a timer callback. +// Unless the dont-execute-in-sleep flag is set +// +void Timer::queueCallback(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep) +{ + NvU64 now = getTimeUs(); + PendingCallback * callback = new PendingCallback(); + if (callback == NULL) + { + DP_LOG(("DP> %s: Failed to allocate callback", + __FUNCTION__)); + return; + } + callback->target = target; + callback->context = context; + callback->timestamp = now + milliseconds * 1000; + callback->executeInSleep = executeInSleep; + pending.insertBack(callback); + raw->queueCallback(this, milliseconds); +} + +NvU64 Timer::getTimeUs() +{ + return raw->getTimeUs(); +} + +// Sleep a number of milliseconds. +// timer callbacks will be serviced! +void Timer::sleep(unsigned milliseconds) +{ + _pump(milliseconds, true); +} + +void Timer::cancelCallbacks(Timer::TimerCallback * to) +{ + if (!to) + return; + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + if (i->target == to) + i->target = 0; +} + +void Timer::cancelCallback(Timer::TimerCallback * to, const void * context) +{ + if (!to) + return; + for (PendingCallback * i = (PendingCallback *)pending.begin(); i!=pending.end(); i = (PendingCallback*)i->next) + if (i->target == to && i->context == context) + i->target = 0; +} + +// Queue callbacks in order. +void Timer::queueCallbackInOrder(Timer::TimerCallback * target, const void * context, unsigned milliseconds, bool executeInSleep) +{ + NvU64 now = getTimeUs(); + PendingCallback * callback = new PendingCallback(); + callback->target = target; + callback->context = context; + callback->timestamp = now + milliseconds * 1000; + callback->executeInSleep = executeInSleep; + + //Figure out where to insert the current callback + Timer::PendingCallback* i; + + for (i = (PendingCallback*)pending.begin(); i != pending.end();) + { + // only for the given context. + if(i->context == context) + { + if(i->timestamp > callback->timestamp) + break; + } + i = (PendingCallback*) i->next; + } + if (i == pending.end()) + { + pending.insertBack(callback); + } + else + { + pending.insertBefore(i, callback); + } + + raw->queueCallback(this, milliseconds); +} + +void Timer::cancelAllCallbacks() +{ + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + i->target = 0; +} + +void Timer::cancelCallbacksWithoutContext(const void * context) +{ + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + if(i->context != context) + i->target = 0; +} + +bool Timer::checkCallbacksOfSameContext(const void * context) +{ + for (PendingCallback * i = (PendingCallback*)pending.begin(); i!=pending.end(); i = (PendingCallback *)i->next) + if(i->context == context) + return true; + + return false; +} diff --git a/src/common/displayport/src/dp_vrr.cpp b/src/common/displayport/src/dp_vrr.cpp new file mode 100644 index 000000000..f2253f835 --- /dev/null +++ b/src/common/displayport/src/dp_vrr.cpp @@ -0,0 +1,247 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_vrr.cpp * +* Implementation of VRR enablement * +* * +\***************************************************************************/ +#include "dp_connectorimpl.h" +#include "dp_vrr.h" + +using namespace DisplayPort; + +bool VrrEnablement::start() +{ + bool rc; + + DP_LOG(("DPHAL_VRR_ENABLE> **** VRR Enablement Started ****")); + rc = vrrGetPublicInfo(); + if(rc) + { + rc = vrrEnableMonitor(); + if(rc != true) + { + return false; + } + rc = vrrEnableDriver(); + if(rc != true) + { + return false; + } + } + else + { + return false; + } + + DP_LOG(("DPHAL_VRR_ENABLE> **** VRR Enablement Ends ****")); + + return true; +} + +bool VrrEnablement::vrrGetPublicInfo() +{ + MainLink *main = this->parent->connector->main; + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_INIT_PUBLIC_INFO, NULL) != true) + { + return false; + } + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_RESET_MONITOR, NULL) != true) + { + return false; + } + else + { + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_GET_PUBLIC_INFO, NULL) != true) + { + return false; + } + + return vrrWaitOnEnableStatus(); +} + +bool VrrEnablement::vrrEnableMonitor() +{ + MainLink *main = this->parent->connector->main; + + DP_LOG(("DPHAL_VRR_ENABLE> ** VRR_MON_ENABLE starts **")); + + // Always set the enable F/W state m/c to a known state. + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_RESET_MONITOR, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_MONITOR_ENABLE_BEGIN, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + main->vrrRunEnablementStage(VRR_ENABLE_STAGE_MONITOR_ENABLE_CHALLENGE, NULL); + + // Wait for VRR to be ready. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + // Compare and enable on successful comparison. + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_MONITOR_ENABLE_CHECK, NULL) == true) + { + this->bMonitorEnabled = true; + } + + DP_LOG(("DPHAL_VRR_ENABLE> ** VRR_MON_ENABLE ends **")); + + return this->bMonitorEnabled; +} + +bool VrrEnablement::vrrEnableDriver() +{ + NvU32 enableResult; + + MainLink *main = this->parent->connector->main; + + DP_LOG(("DPHAL_VRR_ENABLE> ** VRR_DRV_ENABLE starts **")); + + // Always set the enable F/W state m/c to a known state. + if(main->vrrRunEnablementStage(VRR_ENABLE_STAGE_RESET_MONITOR, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_BEGIN, &enableResult) != true) + { + return false; + } + + if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING) + { + // Wait for VRR to be ready. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + } + else if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK) + { + return true; + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_CHALLENGE, NULL) != true) + { + return false; + } + + // Wait for VRR to be 'ready'. + if (vrrWaitOnEnableStatus() != true) + { + return false; + } + + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK, NULL) != true) + { + return false; + } + + DP_LOG(("DPHAL_VRR_ENABLE> ** VRR_DRV_ENABLE ends **")); + + return true; +} + +bool VrrEnablement::vrrWaitOnEnableStatus(void) +{ + NvU32 timeout = VRR_ENABLE_STATUS_TIMEOUT_THRESHOLD; + NvU32 enableResult; + + MainLink *main = this->parent->connector->main; + ConnectorImpl *connector = this->parent->connector; + do + { + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_STATUS_CHECK, &enableResult) == true) + { + return true; + } + else + { + if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR) + { + return false; + } + else if (enableResult == NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING) + { + Timeout timeout(connector->timer, VRR_ENABLE_STATUS_TIMEOUT_INTERVAL_MS); + while(timeout.valid()); + continue; + } + else + { + return false; + } + } + }while(--timeout); + + return false; +} + +bool VrrEnablement::isMonitorEnabled(void) +{ + return (this->bMonitorEnabled); +} + +bool VrrEnablement::isDriverEnabled(void) +{ + NvU32 enableResult; + MainLink *main = this->parent->connector->main; + if (main->vrrRunEnablementStage(VRR_ENABLE_STAGE_DRIVER_ENABLE_CHECK, + &enableResult) == true) + { + return true; + } + return false; +} diff --git a/src/common/displayport/src/dp_wardatabase.cpp b/src/common/displayport/src/dp_wardatabase.cpp new file mode 100644 index 000000000..7e4ee9ab9 --- /dev/null +++ b/src/common/displayport/src/dp_wardatabase.cpp @@ -0,0 +1,645 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_wardatabase.cpp * +* EDID and OUI based workarounds for panel/TCON issues * +* * +\***************************************************************************/ +#include "dp_wardatabase.h" +#include "dp_edid.h" +#include "dp_connectorimpl.h" + +using namespace DisplayPort; + +void ConnectorImpl::applyOuiWARs() +{ + switch (ouiId) + { + // Megachips Mystique + case 0xE18000: + if (((modelName[0] == 'D') && (modelName[1] == 'p') && (modelName[2] == '1') && + (modelName[3] == '.') && (modelName[4] == '1'))) + { + // + // Mystique based link box for HTC Vive has a peculiar behaviour + // of sending a link retraining pulse if the link is powered down in the absence + // of an active stream. Bug# 1793084. Set the flag so that link is not powered down. + // + bKeepOptLinkAlive = true; + } + + if (((modelName[0] == 'D') && (modelName[1] == 'p') && (modelName[2] == '1') && + (modelName[3] == '.') && (modelName[4] == '2'))) + { + // + // ASUS monitor loses link sometimes during assessing link or link training. + // So if we retrain link by lowering config from HBR2 to HBR we see black screen + // Set the flag so that we first retry link training with same link config + // before following link training fallback. Bug #1846925 + // + bNoFallbackInPostLQA = true; + } + break; + + // Synaptics + case 0x24CC90: + if ((modelName[0] == 'S') && (modelName[1] == 'Y') && (modelName[2] == 'N') && + (modelName[3] == 'A') && (modelName[4] == 'S') && + ((modelName[5] == '1') || (modelName[5] == '2') || + (modelName[5] == '3') || (modelName[5] == '#') || + (modelName[5] == '\"'))) + { + // + // Extended latency from link-train end to FEC enable pattern + // to avoid link lost or blank screen with Synaptics branch. + // (Bug 2561206) + // + // Dock SKU ID: + // Dell Salomon-WD19TB SYNAS1 + // HP Hook SYNAS3 + // HP Adira-A SYNAS# + // Lenovo SYNAS" / SYNAS2 + // + LT2FecLatencyMs = 57; + + if (bDscMstCapBug3143315) + { + // + // Synaptics branch device doesn't support Virtual Peer Devices so DSC + // capability of downstream device should be decided based on device's own + // and its parent's DSC capability + // + bDscCapBasedOnParent = true; + } + } + break; + } +} + +void Edid::applyEdidWorkArounds(NvU32 warFlag, const DpMonitorDenylistData *pDenylistData) +{ + + unsigned ManufacturerID = this->getManufId(); + unsigned ProductID = this->getProductId(); + unsigned YearWeek = this->getYearWeek(); + + // + // Work around EDID problems, using manufacturer, product ID, and date of manufacture, + // to identify each case. + // + switch (ManufacturerID) + { + // Apple + case 0x1006: + if (0x9227 == ProductID) + { + this->WARFlags.powerOnBeforeLt = true; + DP_LOG(("DP-WAR> WAR for Apple thunderbolt J29 panel")); + DP_LOG(("DP-WAR> - Monitor needs to be powered up before LT. Bug 933051")); + } + break; + + // Acer + case 0x7204: + // Bug 451868: Acer AL1512 monitor has a wrong extension count: + if(0xad15 == ProductID && YearWeek <= 0x0d01) + { + // clear the extension count + buffer.data[0x7E] = 0; + this->WARFlags.extensionCountDisabled = true; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid override on Acer AL1512")); + DP_LOG(("DP-WAR> - Disabling extension count.Bug 451868")); + } + break; + + // Westinghouse + case 0x855C: + + // Westinghouse 37" 1080p TV. LVM-37w3 (Port DVI1 EDID). + // Westinghouse 42" 1080p TV. LVM-42w2 (Port DVI1 EDID). + if (ProductID == 0x3703 || ProductID == 0x4202) + { + // Claims HDMI support, but audio causes picture corruption. + // Removing HDMI extension block + + if (buffer.getLength() > 0x80 && + buffer.data[0x7E] == 1 && // extension block present + buffer.data[0x80] == 0x02 && // CEA block + buffer.data[0x81] == 0x03 && // revision 3 + !(buffer.data[0x83] & 0x40)) // No basic audio, must not be the HDMI port + { + // clear the extension count + buffer.data[0x7E] = 0; + this->WARFlags.extensionCountDisabled = true; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on Westinghouse AL1512 LVM- <37/42> w <2/3>")); + DP_LOG(("DP-WAR> - Disabling extension count.")); + } + } + break; + + // IBM + case 0x4D24: + if(ProductID == 0x1A03) + { + // 2001 Week 50 + if (YearWeek == 0x0B32) + { + // Override IBM T210. IBM T210 reports 2048x1536x60Hz in the edid but it's + // actually 2048x1536x40Hz. See bug 76347. This hack was, earlier, in disp driver + // Now it's being moved down to keep all overrides in same place. + // This hack was also preventing disp driver from comparing entire edid when + // trying to figure out whether or not the edid for some device has changed. + buffer.data[0x36] = 0x32; + buffer.data[0x37] = 0x3E; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on IBM T210")); + DP_LOG(("DP-WAR> 2048x1536x60Hz(misreported) -> 2048x1536x40Hz. Bug 76347")); + } + } + break; + // GWY (Gateway) or EMA (eMachines) + case 0xF91E: // GWY + case 0xA115: // EMA + // Some Gateway monitors present the eMachines mfg code, so these two cases are combined. + // Future fixes may require the two cases to be separated. + // Fix for Bug 343870. NOTE: Problem found on G80; fix applied to all GPUs. + if ((ProductID >= 0x0776 ) && (ProductID <= 0x0779)) // Product id's range from decimal 1910 to 1913 + { + // if detailed pixel clock frequency = 106.50MHz + if ( (buffer.data[0x36] == 0x9A) && + (buffer.data[0x37] == 0x29) ) + { + // then change detailed pixel clock frequency to 106.54MHz to fix bug 343870 + buffer.data[0x36] = 0x9E; + buffer.data[0x37] = 0x29; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on GWY/EMA")); + DP_LOG(("DP-WAR> 106.50MHz(misreported) -> 106.50MHz.Bug 343870")); + } + } + break; + + // INX + case 0x2C0C: + // INX L15CX monitor has an invalid detailed timing 10x311 @ 78Hz. + if( ProductID == 0x1502) + { + // remove detailed timing #4: zero out the first 3 bytes of DTD#4 block + buffer.data[0x6c] = 0x0; + buffer.data[0x6d] = 0x0; + buffer.data[0x6e] = 0x0; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on INX L15CX")); + DP_LOG(("DP-WAR> Removing invalid detailed timing 10x311 @ 78Hz")); + } + break; + + // AUO + case 0xAF06: + if ((ProductID == 0x103C) || (ProductID == 0x113C)) + { + // + // Acer have faulty AUO eDP panels which have + // wrong HBlank in the EDID. Correcting it here. + // + buffer.data[0x39] = 0x4B; // new hblank width: 75 + buffer.data[0x3F] = 0x1B; // new hsync pulse width: 27 + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on AUO eDP panel")); + DP_LOG(("DP-WAR> Modifying HBlank and HSync pulse width.")); + DP_LOG(("DP-WAR> Bugs 907998, 1001160")); + } + else if (ProductID == 0x109B || ProductID == 0x119B) + { + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> AUO eDP")); + DP_LOG(("implements only Legacy interrupt address range")); + + // Bug 1792962 - Panel got glitch on D3 write, apply this WAR. + this->WARFlags.disableDpcdPowerOff = true; + DP_LOG(("DP-WAR> Disable DPCD Power Off")); + } + break; + + // LPL + case 0x0C32: + if (ProductID == 0x0000) + { + // + // Patch EDID for Quanta - Toshiba LG 1440x900 panel. See Bug 201428 + // Must 1st verify that we have that panel. It has MFG id 32, 0C + // BUT product ID for this (and other different LG panels) are 0000. + // So verify that the last "Custom Timing" area of the EDID has + // a "Monitor Description" of type FE = "ASCII Data String" which + // has this panel's name = "LP171WX2-A4K5". + // + if ( (buffer.data[0x71] == 0x4C) && + (buffer.data[0x72] == 0x50) && + (buffer.data[0x73] == 0x31) && + (buffer.data[0x74] == 0x37) && + (buffer.data[0x75] == 0x31) && + (buffer.data[0x76] == 0x57) && + (buffer.data[0x77] == 0x58) && + (buffer.data[0x78] == 0x32) && + (buffer.data[0x79] == 0x2D) && + (buffer.data[0x7A] == 0x41) && + (buffer.data[0x7B] == 0x34) && + (buffer.data[0x7C] == 0x4B) && + (buffer.data[0x7D] == 0x35) ) + { + // + // Was 0x95, 0x25 = -> 0x2595 = 9621 or 96.21 Mhz. + // 96,210,000 / 1760 / 912 = 59.939 Hz + // Want 60 * 1760 * 912 ~= 9631 or 96.31 MHz + // 9631 = 0x259F -> 0x9F 0x25. + // So, change byte 36 from 0x95 to 0x9F. + // + buffer.data[0x36] = 0x9F; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on Quanta - Toshiba LG 1440x900")); + DP_LOG(("DP-WAR> Correcting pclk. Bug 201428")); + } + } + else + if (ProductID == 0xE300) + { + // + // Patch EDID for MSI - LG LPL 1280x800 panel. See Bug 359313 + // Must 1st verify that we have that panel. It has MFG id 32, 0C + // BUT product ID for this (and other different LG panels) are E300. + // So verify that the last "Custom Timing" area of the EDID has + // a "Monitor Description" of type FE = "ASCII Data String" which + // has this panel's name = "LP154WX4-TLC3". + // + if ( (buffer.data[0x71] == 0x4C) && + (buffer.data[0x72] == 0x50) && + (buffer.data[0x73] == 0x31) && + (buffer.data[0x74] == 0x35) && + (buffer.data[0x75] == 0x34) && + (buffer.data[0x76] == 0x57) && + (buffer.data[0x77] == 0x58) && + (buffer.data[0x78] == 0x34) && + (buffer.data[0x79] == 0x2D) && + (buffer.data[0x7A] == 0x54) && + (buffer.data[0x7B] == 0x4C) && + (buffer.data[0x7C] == 0x43) && + (buffer.data[0x7D] == 0x33) ) + { + // + // Was 0xBC, 0x1B = -> 0x1BBC = 7100 or 71.00 Mhz. + // 71,000,000 / 1488 / 826 = 59.939 Hz + // Want 60 * 1488 * 826 ~= 7111 or 71.11 MHz + // 7111 = 0x1BC7 -> 0xC7 0x1B. + // So, change byte 36 from 0xBC to 0xC7. + // + buffer.data[0x36] = 0xC7; + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on MSI - LG LPL 1280x800")); + DP_LOG(("DP-WAR> Correcting pclk. Bug 359313")); + } + } + break; + + // SKY + case 0x794D: + if (ProductID == 0x9880) + { + // + // Override for Haier TV to remove resolution + // 1366x768 from EDID data. Refer bug 351680 & 327891 + // Overriding 18 bytes from offset 0x36. + // + buffer.data[0x36] = 0x01; + buffer.data[0x37] = 0x1D; + buffer.data[0x38] = 0x00; + buffer.data[0x39] = 0x72; + buffer.data[0x3A] = 0x51; + buffer.data[0x3B] = 0xD0; + buffer.data[0x3C] = 0x1E; + buffer.data[0x3D] = 0x20; + buffer.data[0x3E] = 0x6E; + buffer.data[0x3F] = 0x28; + buffer.data[0x40] = 0x55; + buffer.data[0x41] = 0x00; + buffer.data[0x42] = 0xC4; + buffer.data[0x43] = 0x8E; + buffer.data[0x44] = 0x21; + buffer.data[0x45] = 0x00; + buffer.data[0x46] = 0x00; + buffer.data[0x47] = 0x1E; + + this->WARFlags.dataForced = true; + DP_LOG(("DP-WAR> Edid overrid on Haier TV.")); + DP_LOG(("DP-WAR> Removing 1366x768. bug 351680 & 327891")); + + } + break; + // HP + case 0xF022: + switch (ProductID) + { + case 0x192F: + // + // WAR for bug 1643712 - Issue specific to HP Z1 G2 (Zeus) All-In-One + // Putting the Rx in power save mode before BL_EN is deasserted, makes this specific sink unhappy + // Bug 1559465 will address the right power down sequence. We need to revisit this WAR once Bug 1559465 is fixed. + // + this->WARFlags.disableDpcdPowerOff = true; + DP_LOG(("DP-WAR> Disable DPCD Power Off")); + DP_LOG(("DP-WAR> HP Z1 G2 (Zeus) AIO Bug 1643712")); + break; + } + break; + + // Sharp + case 0x104d: + switch (ProductID) + { + case 0x141c: // HP Valor QHD+ N15P-Q3 Sharp EDP + // + // HP Valor QHD+ N15P-Q3 EDP needs 50 ms delay + // after D3 to avoid black screen issues. + // + this->WARFlags.delayAfterD3 = true; + DP_LOG(("DP-WAR> HP Valor QHD+ N15P-Q3 Sharp EDP needs 50 ms after D3")); + DP_LOG(("DP-WAR> bug 1520011")); + break; + + //Sharp EDPs that declares DP1.2 but doesn't implement ESI address space + case 0x1414: + case 0x1430: + case 0x1445: + case 0x1446: + case 0x144C: + case 0x1450: + case 0x1467: + case 0x145e: + // + // Use Legacy address space for DP1.2 panel + // + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> Sharp EDP implements only Legacy interrupt address range")); + break; + + case 0x143B: + // + // Bug 200113041 + // Need to be unique to identify this Sharp panel. Besides + // manufacturer ID and ProductID, we have to add the mode + // name to make this happen as LQ156D1JW05 in ASCII. + // + if ((buffer.data[0x71] == 0x4C) && + (buffer.data[0x72] == 0x51) && + (buffer.data[0x73] == 0x31) && + (buffer.data[0x74] == 0x35) && + (buffer.data[0x75] == 0x36) && + (buffer.data[0x76] == 0x44) && + (buffer.data[0x77] == 0x31) && + (buffer.data[0x78] == 0x4A) && + (buffer.data[0x79] == 0x57) && + (buffer.data[0x7A] == 0x30) && + (buffer.data[0x7B] == 0x35) && + (buffer.data[0x7C] == 0x0A) && + (buffer.data[0x7D] == 0x20)) + { + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> Sharp EDP implements only Legacy interrupt address range")); + } + break; + } + break; + + // EIZO + case 0xc315: + if (ProductID == 0x2227) + { + // + // The EIZO FlexScan SX2762W generates a redundant long HPD + // pulse after a modeset, which triggers another modeset on GPUs + // without flush mode, triggering an infinite link training + // loop. + // + this->WARFlags.ignoreRedundantHotplug = true; + DP_LOG(("DP-WAR> EIZO FlexScan SX2762W generates redundant")); + DP_LOG(("DP-WAR> hotplugs (bug 1048796)")); + break; + } + break; + + // MEI-Panasonic + case 0xa934: + if (ProductID == 0x96a2) + { + // + // Bug 200113041 + // Need to be unique to identify this MEI-Panasonic panel. + // Besides manufacturer ID and ProductID, we have to add the + // model name to make this happen as VVX17P051J00^ in ASCII. + // + if ((buffer.data[0x71] == 0x56) && + (buffer.data[0x72] == 0x56) && + (buffer.data[0x73] == 0x58) && + (buffer.data[0x74] == 0x31) && + (buffer.data[0x75] == 0x37) && + (buffer.data[0x76] == 0x50) && + (buffer.data[0x77] == 0x30) && + (buffer.data[0x78] == 0x35) && + (buffer.data[0x79] == 0x31) && + (buffer.data[0x7A] == 0x4A) && + (buffer.data[0x7B] == 0x30) && + (buffer.data[0x7C] == 0x30) && + (buffer.data[0x7D] == 0x0A)) + { + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> MEI-Panasonic EDP")); + DP_LOG(("implements only Legacy interrupt address range")); + } + } + break; + + // LG + case 0xE430: + if (ProductID == 0x0469) + { + // + // The LG display can't be driven at FHD with 2*RBR. + // Force max link config + // + this->WARFlags.forceMaxLinkConfig = true; + DP_LOG(("DP-WAR> Force maximum link config WAR required on LG panel.")); + DP_LOG(("DP-WAR> bug 1649626")); + break; + } + break; + case 0x8F34: + if (ProductID == 0xAA55) + { + this->WARFlags.forceMaxLinkConfig = true; + DP_LOG(("DP-WAR> Force maximum link config WAR required on Sharp-CerebrEx panel.")); + } + break; + + // Dell + case 0xAC10: + // Dell U2713H has problem with LQA. Disable it. + if ((ProductID == 0xA092) || (ProductID == 0xF046)) + { + this->WARFlags.reassessMaxLink = true; + } + break; + + // CMN + case 0xAE0D: + if (ProductID == 0x1747) + { + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> CMN eDP")); + DP_LOG(("implements only Legacy interrupt address range")); + } + break; + + // BenQ + case 0xD109: + if ((ProductID == 0x7F2B) || (ProductID == 0x7F2F)) + { + this->WARFlags.ignoreRedundantHotplug = true; + DP_LOG(("DP-WAR> BenQ GSync power on/off redundant hotplug")); + } + break; + + // MSI + case 0x834C: + if (ProductID == 0x4C48) + { + this->WARFlags.useLegacyAddress = true; + DP_LOG(("DP-WAR> MSI eDP\n")); + DP_LOG(("implements only Legacy interrupt address range\n")); + } + break; + + // Unigraf + case 0xC754: + case 0x1863: + { + DP_LOG(("DP-WAR> Unigraf device, keep link alive during detection\n")); + this->WARFlags.keepLinkAlive = true; + } + break; + + // BOE + case 0xE509: + if ((ProductID == 0x977) || (ProductID == 0x974) || (ProductID == 0x9D9)) + { + this->WARFlags.bIgnoreDscCap = true; + DP_LOG(("DP-WAR> BOE panels incorrectly exposing DSC capability. Ignoring it.")); + } + break; + + // NCP + case 0x7038: + if ((ProductID == 0x005F)) + { + this->WARFlags.bIgnoreDscCap = true; + DP_LOG(("DP-WAR> NCP panels incorrectly exposing DSC capability. Ignoring it.")); + } + break; + + // + // This panel advertise DSC capabilities, but panel doesn't support DSC + // So ignoring DSC capability on this panel + // + case 0x6F0E: + if (ProductID == 0x1609) + { + this->WARFlags.bIgnoreDscCap = true; + DP_LOG(("DP-WAR> Ignoring DSC capability on Lenovo CSOT 1609 Panel.")); + DP_LOG(("DP-WAR> Bug 3444252")); + } + break; + + default: + break; + } + + // Find out if the monitor needs a WAR to applied. + if (warFlag) + { + if (warFlag & DP_MONITOR_CAPABILITY_DP_SKIP_REDUNDANT_LT) + { + this->WARFlags.skipRedundantLt = true; + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_SKIP_CABLE_BW_CHECK) + { + this->WARFlags.skipCableBWCheck = true; + this->WARData.maxLaneAtHighRate = pDenylistData->dpSkipCheckLink.maxLaneAtHighRate; + this->WARData.maxLaneAtLowRate = pDenylistData->dpSkipCheckLink.maxLaneAtLowRate; + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_WRITE_0x600_BEFORE_LT) + { + // all HP monitors need to be powered up before link training + this->WARFlags.powerOnBeforeLt = true; + DP_LOG(("DP-WAR> HP monitors need to be powered up before LT")); + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_OVERRIDE_OPTIMAL_LINK_CONFIG) + { + // + // Instead of calculating the optimum link config + // based on timing, bpc etc. just used a default + // fixed link config for the monitor for all modes + // + this->WARFlags.overrideOptimalLinkCfg = true; + // Force the fix max LT + this->WARFlags.forceMaxLinkConfig = true; + this->WARData.optimalLinkRate = pDenylistData->dpOverrideOptimalLinkConfig.linkRate; + this->WARData.optimalLaneCount = pDenylistData->dpOverrideOptimalLinkConfig.laneCount; + DP_LOG(("DP-WAR> Overriding optimal link config on Dell U2410.")); + DP_LOG(("DP-WAR> bug 632801")); + } + + if (warFlag & DP_MONITOR_CAPABILITY_DP_OVERRIDE_MAX_LANE_COUNT) + { + // + // Some monitors claim more lanes than they actually support. + // This particular Lenovo monitos has just 2 lanes, but its DPCD says 4. + // This WAR is to override the max lane count read from DPCD. + // + this->WARFlags.overrideMaxLaneCount = true; + this->WARData.maxLaneCount = pDenylistData->dpMaxLaneCountOverride; + DP_LOG(("DP-WAR> Overriding max lane count on Lenovo L2440x.")); + DP_LOG(("DP-WAR> bug 687952")); + } + } + + if (this->WARFlags.dataForced) + { + DP_LOG(("DP-WAR> EDID was overridden for some data. Patching CRC.")); + this->patchCrc(); + } +} diff --git a/src/common/displayport/src/dp_watermark.cpp b/src/common/displayport/src/dp_watermark.cpp new file mode 100644 index 000000000..1dd43638d --- /dev/null +++ b/src/common/displayport/src/dp_watermark.cpp @@ -0,0 +1,872 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_watermark.cpp * +* DP watermark IsModePossible calculations * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_watermark.h" +#include "dp_linkconfig.h" +#include "displayport.h" + +#define FEC_TOTAL_SYMBOLS_PER_BLK(lanes) ((NvU32)((lanes == 1) ? 512U : 256U)) +#define FEC_PARITY_SYMBOLS_PER_BLK(lanes) ((NvU32)((lanes == 1) ? 12U : 6U)) +//return max number of FEC parity symbols in x link clock cycles +#define FEC_PARITY_SYM_SST(lanes, x) (DP_MIN((NvU32)(x) % FEC_TOTAL_SYMBOLS_PER_BLK(lanes), FEC_PARITY_SYMBOLS_PER_BLK(lanes)) + (NvU32)(x) / FEC_TOTAL_SYMBOLS_PER_BLK(lanes) * FEC_PARITY_SYMBOLS_PER_BLK(lanes) + FEC_PARITY_SYMBOLS_PER_BLK(lanes) + 1U) +#define FEC_PARITY_SYM_MST(lanes, x) (DP_MIN((NvU32)(x) % FEC_TOTAL_SYMBOLS_PER_BLK(lanes), FEC_PARITY_SYMBOLS_PER_BLK(lanes)) + (NvU32)(x) / FEC_TOTAL_SYMBOLS_PER_BLK(lanes) * FEC_PARITY_SYMBOLS_PER_BLK(lanes) + 1U) + + +bool DisplayPort::isModePossibleMST +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo +) +{ + // + // For MST, use downspread 0.6% + // + NvU64 linkFreq = linkConfig.peakRate * 994 / 1000; + + // + // This function is for multistream only! + // + DP_ASSERT( linkConfig.multistream ); + + if(!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK and DEPTH sent by the client "); + return false; + } + + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + + // Extra bits that we need to send + //(hActiveDiv4Remainder > 0 ? (4- hActiveDiv4Remainder) : 0) --> + // Number of extra pixels that we need to insert due to mapping pixels + // to the DP lanes. (4 lanes for MS) + // + // 160 --> Extra bits that we need to send during horizontal blanking + // (BS+VBID+MVID+MAUD+BE) => 5*8*num_lanes + // + // 6 * 4 --> Pixel padding worst case + // + NvU32 minHBlank = ( ((modesetInfo.surfaceWidth % 4) > 0) ? ((4-(modesetInfo.surfaceWidth % 4)) * modesetInfo.depth)/ DSC_FACTOR : 0 ) + (160 + 6 * 4); + + // Rounding to nearest multiple of 32 since we always send 32 bits in one time slice + minHBlank = minHBlank + (32 - minHBlank % 32); + + // bpp - 1 --> Rounding + minHBlank = ((minHBlank * DSC_FACTOR) + modesetInfo.depth - (1 * DSC_FACTOR))/modesetInfo.depth; + + if (minHBlank > modesetInfo.rasterWidth - modesetInfo.surfaceWidth) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Blanking Width is smaller than minimum permissible value.")); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Minimum Horizontal Active Width <= 60 not supported.")); + return false; + } + + NvS32 vblank_symbols; + NvS32 hblank_symbols = (NvS32)(((NvU64)(modesetInfo.rasterWidth - modesetInfo.surfaceWidth - minHBlank) * linkFreq) / modesetInfo.pixelClockHz); + + //reduce HBlank Symbols to account for secondary data packet + hblank_symbols -= 1; //Stuffer latency to send BS + hblank_symbols -= 3; //SPKT latency to send data to stuffer + + hblank_symbols -= linkConfig.lanes == 1 ? 9 : linkConfig.lanes == 2 ? 6 : 3; + + dpInfo->hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols; + + + // + // Audio IMP calculations + // Perform the related audio calculation to determine the number of extra symbols needed. + // + NvU32 twoChannelAudio_symbols = 0; + + if (modesetInfo.twoChannelAudioHz != 0) + { + // 1-2 channel case + NvU32 samples = (NvU32)divide_ceil(modesetInfo.twoChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + // Round to the next even sample to account for stuffing (2 ch, 4 lanes) + samples = samples + (2 - samples % 2); + + // Convert sample count to symbols + twoChannelAudio_symbols = 10 * samples + 16; + } + + NvU32 eightChannelAudio_symbols = 0; + if (modesetInfo.eightChannelAudioHz != 0) + { + // 3-8 channel case + NvU32 samples = (NvU32)divide_ceil(modesetInfo.eightChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + // Convert sample count to symbols + eightChannelAudio_symbols = 40 * samples + 16; + } + + if (dpInfo->hBlankSym < DP_MAX(twoChannelAudio_symbols, eightChannelAudio_symbols)) + { + return false; + } + + // Refer to dev_disp.ref for more information. + // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1; + // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 + if (modesetInfo.surfaceWidth < 40) + { + vblank_symbols = 0; + } + else + { + vblank_symbols = (NvS32)(((NvU64)(modesetInfo.surfaceWidth - 40) * linkFreq) / modesetInfo.pixelClockHz) - 1; + + vblank_symbols -= linkConfig.lanes == 1 ? 39 : linkConfig.lanes == 2 ? 21 : 12; + } + + dpInfo->vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols; + + return true; +} + + +bool DisplayPort::isModePossibleSST +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits +) +{ + // + // This function is for single stream only! + // + DP_ASSERT( !linkConfig.multistream ); + + unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST; + unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT; + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + + if(bUseIncreasedWatermarkLimits) + { + watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST; + watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT; + } + + if(!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK or DEPTH sent by the client "); + return false; + } + // number of link clocks per line. + int vblank_symbols = 0; + NvU64 PrecisionFactor, ratioF, watermarkF; + + NvU32 numLanesPerLink = linkConfig.lanes; + + DP_ASSERT(!linkConfig.multistream && "MST!"); + + // Check if we have a valid laneCount as currently we support only up to 4-lanes + if (!IS_VALID_LANECOUNT(linkConfig.lanes)) + { + // + // Print debug message and Assert. All calculations assume a max of 8 lanes + // & any increase in lanes should cause these calculation to be updated + // + DP_LOG(("NVRM: %s: ERROR: LaneCount - %d is not supported for waterMark calculations.", + __FUNCTION__, linkConfig.lanes)); + DP_LOG(("Current support is only up to 4-Lanes & any change/increase in supported lanes " + "should be reflected in waterMark calculations algorithm. " + "Ex: See calc for minHBlank variable below")); + + DP_ASSERT(0); + return false; + } + + if ((modesetInfo.pixelClockHz * modesetInfo.depth) >= (8 * linkConfig.minRate * linkConfig.lanes * DSC_FACTOR)) + { + return false; + } + + // + // For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with + // 0 active symbols. This may cause HW hang. Bug 200379426 + // + if ((modesetInfo.bEnableDsc) && + ((modesetInfo.pixelClockHz * modesetInfo.depth) < ((8 * linkConfig.minRate * linkConfig.lanes * DSC_FACTOR) / 64))) + { + return false; + } + + // + // Perform the SST calculation. + // For auto mode the watermark calculation does not need to track accumulated error the + // formulas for manual mode will not work. So below calculation was extracted from the DTB. + // + dpInfo->tuSize = 64; + PrecisionFactor = 100000; + ratioF = ((NvU64)modesetInfo.pixelClockHz * modesetInfo.depth * PrecisionFactor) / DSC_FACTOR; + + ratioF /= 8 * (NvU64) linkConfig.minRate * linkConfig.lanes; + + if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below + return false; + + watermarkF = ratioF * dpInfo->tuSize * (PrecisionFactor - ratioF) / PrecisionFactor; + dpInfo->waterMark = (unsigned)(watermarkAdjust + ((2 * (modesetInfo.depth * PrecisionFactor / (8 * numLanesPerLink * DSC_FACTOR)) + watermarkF) / PrecisionFactor)); + + // + // Bounds check the watermark + // + NvU32 numSymbolsPerLine = (modesetInfo.surfaceWidth * modesetInfo.depth) / (8 * linkConfig.lanes * DSC_FACTOR); + + if (dpInfo->waterMark > 39 || dpInfo->waterMark > numSymbolsPerLine) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: watermark should not be greater than 39.")); + return false; + } + + // + // Clamp the low side + // + if (dpInfo->waterMark < watermarkMinimum) + dpInfo->waterMark = watermarkMinimum; + + //Bits to send BS/BE/Extra symbols due to pixel padding + //Also accounts for enhanced framing. + NvU32 BlankingBits = 3*8*numLanesPerLink + (linkConfig.enhancedFraming ? 3*8*numLanesPerLink : 0); + + //VBID/MVID/MAUD sent 4 times all the time + BlankingBits += 3*8*4; + + NvU32 surfaceWidthPerLink = modesetInfo.surfaceWidth; + + //Extra bits sent due to pixel steering + NvU32 PixelSteeringBits = (surfaceWidthPerLink % numLanesPerLink) ? (((numLanesPerLink - surfaceWidthPerLink % numLanesPerLink) * modesetInfo.depth) / DSC_FACTOR) : 0; + + BlankingBits += PixelSteeringBits; + NvU64 NumBlankingLinkClocks = (NvU64)BlankingBits * PrecisionFactor / (8 * numLanesPerLink); + NvU32 MinHBlank = (NvU32)(NumBlankingLinkClocks * modesetInfo.pixelClockHz/ linkConfig.minRate / PrecisionFactor); + MinHBlank += 12; + + if (MinHBlank > modesetInfo.rasterWidth - modesetInfo.surfaceWidth) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Blanking Width is smaller than minimum permissible value.")); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Minimum Horizontal Active Width <= 60 not supported.")); + return false; + } + + + NvS32 hblank_symbols = (NvS32)(((NvU64)(modesetInfo.rasterWidth - modesetInfo.surfaceWidth - MinHBlank) * linkConfig.minRate) / modesetInfo.pixelClockHz); + + //reduce HBlank Symbols to account for secondary data packet + hblank_symbols -= 1; //Stuffer latency to send BS + hblank_symbols -= 3; //SPKT latency to send data to stuffer + + hblank_symbols -= numLanesPerLink == 1 ? 9 : numLanesPerLink == 2 ? 6 : 3; + + dpInfo->hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols; + + // + // Audio IMP calculations + // + + // From dev_disp.ref: + + // The packet generation logic needs to know the length of the hblank period. If there is no room + // in the current hblank for a new packet, it will be delayed until the next blanking period. This + // field should be programmed during the second Supervisor interrupt based on the new raster + // dimensions. + + // ... + + // -------------------------------------- + // The following formulas can be used to calculate the maximum audio sampling rate that can + // be supported by DisplayPort given the current raster dimensions. DisplayPort has much more + // bandwidth during blanking periods than HDMI has, so hblank size is less of an issue. + + // ... + + // Size of a packet for 2ch audio = 20 symbols (up to 2 samples) + // Size of a packet for 8ch audio = 40 symbols + // Size of an audio packet header plus control symbols = 2*#lanes + 8 symbols (assuming < 32 samples per line) + // number of packets/hblank for 2ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 20) + // number of packets/hblank for 8ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 40) + + // Maximum audio sample rate possible: + // number of audio samples/line = SetRasterSize.Width * audio_fs / pclk + // number of audio packets needed for 2ch audio = Ceiling(SetRasterSize.Width * audio_fs / (pclk*2)) + // number of audio packets needed for 3-8ch audio = SetRasterSize.Width * audio_fs / pclk + + // If number of audio packets needed > number of packets/hblank, then you cannot support that audio frequency + + // Note that the hBlankSym calculated is per lane. So the number of symbols available for audio is + // (number of lanes * hBlankSym). + // The calculation of audio packets per Hblank needs to account for the following - + // 2 symbols for SS and SE; 8 symbols for header; and additional 2 symbols to account for actual values used by HW. + // -------------------------------------- + + if (modesetInfo.twoChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < (2 * numLanesPerLink + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 twoChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 20); + + NvU32 twoChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.twoChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz * 2); + + if (twoChannelAudioPackets > twoChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + if (modesetInfo.eightChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < (2 * numLanesPerLink + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 eightChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 40); + + NvU32 eightChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.eightChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + if (eightChannelAudioPackets > eightChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + + // Refer to dev_disp.ref for more information. + // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1; + // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 + if (modesetInfo.surfaceWidth < 40) + { + vblank_symbols = 0; + } + else + { + vblank_symbols = (NvS32)(((NvU64)(modesetInfo.surfaceWidth - 40) * linkConfig.minRate) / modesetInfo.pixelClockHz) - 1; + + vblank_symbols -= numLanesPerLink == 1 ? 39 : numLanesPerLink == 2 ? 21 : 12; + } + + dpInfo->vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols; + + return true; +} + +bool DisplayPort::isModePossibleSSTWithFEC +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo, + bool bUseIncreasedWatermarkLimits +) +{ + // + // This function is for single stream only! + // Refer to Bug 200406501 and 200401850 for algorithm + // + DP_ASSERT( !linkConfig.multistream ); + + unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST; + unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT; + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + + if(bUseIncreasedWatermarkLimits) + { + watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST; + watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT; + } + + if(!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK or DEPTH sent by the client "); + return false; + } + // number of link clocks per line. + int vblank_symbols = 0; + NvU64 PrecisionFactor, ratioF, watermarkF; + NvS32 w0, s; + + NvU32 numLanesPerLink = linkConfig.lanes; + + DP_ASSERT(!linkConfig.multistream && "MST!"); + + // Check if we have a valid laneCount as currently we support only up to 4-lanes + if (!IS_VALID_LANECOUNT(linkConfig.lanes)) + { + // + // Print debug message and Assert. All calculations assume a max of 8 lanes + // & any increase in lanes should cause these calculation to be updated + // + DP_LOG(("NVRM: %s: ERROR: LaneCount - %d is not supported for waterMark calculations.", + __FUNCTION__, linkConfig.lanes)); + DP_LOG(("Current support is only up to 4-Lanes & any change/increase in supported lanes " + "should be reflected in waterMark calculations algorithm. " + "Ex: See calc for minHBlank variable below")); + + DP_ASSERT(0); + return false; + } + + if ((modesetInfo.pixelClockHz * modesetInfo.depth) >= (8 * linkConfig.minRate * linkConfig.lanes * DSC_FACTOR)) + { + return false; + } + + // + // For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with + // 0 active symbols. This may cause HW hang. Bug 200379426 + // + if ((modesetInfo.bEnableDsc) && + ((modesetInfo.pixelClockHz * modesetInfo.depth) < ((8 * linkConfig.minRate * linkConfig.lanes * DSC_FACTOR) / 64))) + { + return false; + } + + // + // Perform the SST calculation. + // For auto mode the watermark calculation does not need to track accumulated error the + // formulas for manual mode will not work. So below calculation was extracted from the DTB. + // + dpInfo->tuSize = 64; + PrecisionFactor = 100000; + ratioF = ((NvU64)modesetInfo.pixelClockHz * modesetInfo.depth * PrecisionFactor) / DSC_FACTOR; + + ratioF /= 8 * (NvU64)linkConfig.minRate * linkConfig.lanes; + + if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below + return false; + + watermarkF = (ratioF * dpInfo->tuSize * (PrecisionFactor - ratioF)) / PrecisionFactor; + + w0 = (8 / linkConfig.lanes); + if (linkConfig.bEnableFEC) + { + s = (linkConfig.lanes == 1) ? 15 : 10; + } + else + { + s = 3 - w0; + } + + dpInfo->waterMark = (unsigned)(watermarkAdjust + ((3 * (modesetInfo.depth * PrecisionFactor / (8 * numLanesPerLink * DSC_FACTOR)) + watermarkF) / PrecisionFactor) + w0 + 3); + + s = ((NvS32)ratioF * s); + + dpInfo->waterMark = (unsigned)((NvS32)dpInfo->waterMark + (s / (NvS32)PrecisionFactor)); + + // + // Bounds check the watermark + // + NvU32 numSymbolsPerLine = (modesetInfo.surfaceWidth * modesetInfo.depth) / (8 * linkConfig.lanes * DSC_FACTOR); + + if (dpInfo->waterMark > numSymbolsPerLine) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: watermark = %d should not be greater than numSymbolsPerLine = %d.", dpInfo->waterMark, numSymbolsPerLine)); + return false; + } + + // + // Clamp the low side + // + if (dpInfo->waterMark < watermarkMinimum) + dpInfo->waterMark = watermarkMinimum; + + unsigned MinHBlank = 0; + unsigned MinHBlankFEC = 0; + NvU32 BlankingBits = 0; + NvU32 BlankingSymbolsPerLane = 0; + + BlankingBits = (3U * 8U * 4U) + (2U * 8U * numLanesPerLink); + + if (modesetInfo.bEnableDsc) + { + NvU32 sliceCount, sliceWidth, chunkSize; + + sliceCount = (modesetInfo.mode == DSC_DUAL) ? 8U : 4U; + sliceWidth = (NvU32)divide_ceil(modesetInfo.surfaceWidth, sliceCount); + chunkSize = (NvU32)divide_ceil(modesetInfo.depth * sliceWidth, 8U * DSC_FACTOR); + + if(((NvU64)(chunkSize + 1U) * sliceCount * modesetInfo.pixelClockHz) < (NvU64)(linkConfig.minRate * numLanesPerLink * modesetInfo.surfaceWidth)) + { + // BW is plenty, this is common case. + //EOC symbols, when BW enough, only last EOC needs to be considered. + BlankingBits += 8U * numLanesPerLink; //+BlankingBits_DSC_EOC + BlankingBits += (chunkSize * 8U) - (sliceWidth * modesetInfo.depth / DSC_FACTOR); //+BlankingBits_DSC_bytePadding, only need to consider last slice + BlankingBits += (NvU32)(sliceCount * 8U * (divide_ceil(chunkSize, numLanesPerLink) * numLanesPerLink - chunkSize)); //+BlankingBits_DSC_lane_padding + } + else + { // no extra room in link BW + //EOC symbols, EOC will be accumulated until hblank period. + BlankingBits += (sliceCount * 8U * numLanesPerLink); //+BlankingBits_EOC + //padding, can also use simplified but pessimistic version : BlankingBits += SliceNum * (logic_lanes *8-1); + BlankingBits += (NvU32)(sliceCount * ((divide_ceil(chunkSize, numLanesPerLink) * numLanesPerLink * 8U) - (NvU32)(sliceWidth * modesetInfo.depth / DSC_FACTOR))); //+BlankingBits_DSC_padding + } + } + else + { + NvU32 surfaceWidthPerLink = modesetInfo.surfaceWidth; + NvU32 surfaceWidthPerLane = (NvU32)divide_ceil(surfaceWidthPerLink, numLanesPerLink); + + // Padding + BlankingBits += (NvU32)divide_ceil(surfaceWidthPerLane * modesetInfo.depth, 8U) * 8U * numLanesPerLink - (NvU32)(surfaceWidthPerLink * modesetInfo.depth); //+BlankingBits_nonDSC_padding + } + + BlankingSymbolsPerLane = (NvU32)divide_ceil(BlankingBits , (8U * numLanesPerLink)); //in symbols per lane + BlankingSymbolsPerLane += (linkConfig.enhancedFraming ? 3U : 0U); + + if (linkConfig.bEnableFEC) + { + // + // In worst case, FEC symbols fall into a narrow Hblank period, + // we have to consider this in HBlank checker, see bug 200496977 + // but we don't have to consider this in the calculation of hblank_symbols + // + + MinHBlankFEC = FEC_PARITY_SYM_SST(numLanesPerLink, BlankingSymbolsPerLane); //in symbols + BlankingSymbolsPerLane += MinHBlankFEC; + } + + // BlankingSymbolsPerLane is the MinHBlank in link clock cycles, + MinHBlank = (unsigned)(divide_ceil(BlankingSymbolsPerLane * modesetInfo.pixelClockHz, + linkConfig.peakRate)); //in pclk cycles + MinHBlank += 3U; //add some margin + + NvU32 HBlank = (modesetInfo.rasterWidth - modesetInfo.surfaceWidth); + + if (MinHBlank > HBlank) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Blanking Width is smaller than minimum permissible value.")); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Minimum Horizontal Active Width <= 60 not supported.")); + return false; + } + + NvU32 total_hblank_symbols = (NvS32)divide_ceil((HBlank * linkConfig.peakRate), modesetInfo.pixelClockHz); + NvS32 hblank_symbols = (NvS32)(((NvU64)(HBlank - MinHBlank) * linkConfig.peakRate) / modesetInfo.pixelClockHz); + + if (linkConfig.bEnableFEC) + { + hblank_symbols -= (FEC_PARITY_SYM_SST(numLanesPerLink, total_hblank_symbols)); + hblank_symbols += MinHBlankFEC; + } + + //reduce HBlank Symbols to account for secondary data packet + hblank_symbols -= 1; //Stuffer latency to send BS + hblank_symbols -= 3; //SPKT latency to send data to stuffer + hblank_symbols -= 3; //add some margin + + dpInfo->hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols; + + // + // Audio IMP calculations + // + + // From dev_disp.ref: + + // The packet generation logic needs to know the length of the hblank period. If there is no room + // in the current hblank for a new packet, it will be delayed until the next blanking period. This + // field should be programmed during the second Supervisor interrupt based on the new raster + // dimensions. + + // ... + + // -------------------------------------- + // The following formulas can be used to calculate the maximum audio sampling rate that can + // be supported by DisplayPort given the current raster dimensions. DisplayPort has much more + // bandwidth during blanking periods than HDMI has, so hblank size is less of an issue. + + // ... + + // Size of a packet for 2ch audio = 20 symbols (up to 2 samples) + // Size of a packet for 8ch audio = 40 symbols + // Size of an audio packet header plus control symbols = 2*#lanes + 8 symbols (assuming < 32 samples per line) + // number of packets/hblank for 2ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 20) + // number of packets/hblank for 8ch audio = Floor ((number of free symbols/hblank - (2*#lanes + 8) / 40) + + // Maximum audio sample rate possible: + // number of audio samples/line = SetRasterSize.Width * audio_fs / pclk + // number of audio packets needed for 2ch audio = Ceiling(SetRasterSize.Width * audio_fs / (pclk*2)) + // number of audio packets needed for 3-8ch audio = SetRasterSize.Width * audio_fs / pclk + + // If number of audio packets needed > number of packets/hblank, then you cannot support that audio frequency + + // Note that the hBlankSym calculated is per lane. So the number of symbols available for audio is + // (number of lanes * hBlankSym). + // The calculation of audio packets per Hblank needs to account for the following - + // 2 symbols for SS and SE; 8 symbols for header; and additional 2 symbols to account for actual values used by HW. + // -------------------------------------- + + if (modesetInfo.twoChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < ((2 * numLanesPerLink) + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 twoChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 20); + + NvU32 twoChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.twoChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz * 2); + + if (twoChannelAudioPackets > twoChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + if (modesetInfo.eightChannelAudioHz != 0) + { + if ((dpInfo->hBlankSym * numLanesPerLink) < (2 * numLanesPerLink + 8)) + { + // There aren't enough symbols/hblank available. + return false; + } + + NvU32 eightChannelAudioPacketsPerHBlank = (NvU32)divide_floor(((dpInfo->hBlankSym * numLanesPerLink) - (2 * numLanesPerLink) - 8 - (2 * numLanesPerLink)), 40); + + NvU32 eightChannelAudioPackets = (NvU32)divide_ceil(modesetInfo.eightChannelAudioHz * modesetInfo.rasterWidth, modesetInfo.pixelClockHz); + + if (eightChannelAudioPackets > eightChannelAudioPacketsPerHBlank) + { + // There aren't enough packets/hblank available. + return false; + } + } + + // Refer to dev_disp.ref for more information. + // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1; + // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39 + if (modesetInfo.surfaceWidth < 40) + { + vblank_symbols = 0; + } + else + { + vblank_symbols = (NvS32)(((NvU64)(modesetInfo.surfaceWidth - 3) * linkConfig.peakRate) / modesetInfo.pixelClockHz); + + // + // The active region transmission is delayed because of lane fifo storage. + // compare to the negedge of hblank, all the BE will be delayed by watermark/ratio cycles. + // compare to the posedge of hblank(i.e. the time of sending out BS symbols in vblank period), + // all the BS after active pixels will be delayed by maximum 1.5 TU cycles, + // the delay of the BS will cause the 1st vblank line shorter than expected, + // but it will squeeze hblank period first, + // if hblank is short, the BS will be in hactive period and impact vblank_symbols. + // + + NvS32 squeezed_symbols = (dpInfo->tuSize * 3 / 2) - hblank_symbols; + squeezed_symbols = DP_MAX(squeezed_symbols, 0); + NvS32 msa_symbols = (36 / numLanesPerLink) + 3; + + // + // MSA can't be in the 1st vblank line, except v_front_porch=0 + // if we know v_front_porch != 0, + // we can use MAX(squeezed_symbols, msa_symbols) instead of squeezed_symbols+msa_symbols + // + vblank_symbols -= (squeezed_symbols + msa_symbols); + + if (linkConfig.bEnableFEC) + { + vblank_symbols -= FEC_PARITY_SYM_SST(numLanesPerLink, vblank_symbols); + } + vblank_symbols -= 3U; //add some margin + } + + dpInfo->vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols; + + if (modesetInfo.bEnableDsc) + { + // + // For DSC enabled case, the vblank_symbols must be large enough to accommodate DSC PPS SDP, see bug 2760673 + // For 1 lane, it requires at least 170+13 symbols + // For 2 lane, it requires at least 86+3 symbols + // For 4 lane, it requires at least 44+3 symbols + // normally, no need to check this, except in some small resolution test case. + // + if ((numLanesPerLink == 1U) && (dpInfo->vBlankSym < 183U)) + { + return false; + } + else if ((numLanesPerLink == 2U) && (dpInfo->vBlankSym < 89U)) + { + return false; + } + if ((numLanesPerLink == 4U) && (dpInfo->vBlankSym <47U)) + { + return false; + } + } + + return true; +} + +bool DisplayPort::isModePossibleMSTWithFEC +( + const LinkConfiguration & linkConfig, + const ModesetInfo & modesetInfo, + Watermark * dpInfo +) +{ + // + // This function is for multistream only! + // Refer to Bug 200406501 and 200401850 for algorithm + // + DP_ASSERT(linkConfig.multistream); + + if (!modesetInfo.pixelClockHz || !modesetInfo.depth) + { + DP_ASSERT(0 && "INVALID PIXEL CLOCK and DEPTH sent by the client "); + return false; + } + + if (linkConfig.lanes == 0) + { + DP_ASSERT(0 && "No Active link / link train failed "); + return false; + } + + // depth is multiplied by 16 in case of DSC enable + unsigned DSC_FACTOR = modesetInfo.bEnableDsc ? 16 : 1; + dpInfo->tuSize = 64; + + NvU32 BlankingBits, BlankingSymbolsPerLane; + NvU32 numLanesPerLink = 4U; + NvU32 MinHBlank; + + BlankingBits = (3U * 8U * 4U) + (2U * 8U * numLanesPerLink); + + if(modesetInfo.bEnableDsc) + { + NvU32 sliceCount, sliceWidth, chunkSize; + + sliceCount = (modesetInfo.mode == DSC_DUAL) ? 8U : 4U; + sliceWidth = (NvU32)divide_ceil(modesetInfo.surfaceWidth, sliceCount); + chunkSize = (NvU32)divide_ceil(modesetInfo.depth * sliceWidth, 8U * DSC_FACTOR); + + //EOC symbols, EOC will be accumulated until hblank period. + BlankingBits += (sliceCount * 8U * numLanesPerLink); //+BlankingBits_EOC + //+BlankingBits_DSC_padding + BlankingBits += (NvU32)(sliceCount * ((divide_ceil(chunkSize, numLanesPerLink) * numLanesPerLink * 8U) - (NvU32)(sliceWidth * modesetInfo.depth / DSC_FACTOR))); + } + else + { + NvU32 surfaceWidthPerLane = (NvU32)divide_ceil(modesetInfo.surfaceWidth, numLanesPerLink); + + //Extra bits sent due to pixel steering + BlankingBits = (NvU32)divide_ceil(surfaceWidthPerLane * modesetInfo.depth, 8U) * 8U * numLanesPerLink - (NvU32)(modesetInfo.surfaceWidth * modesetInfo.depth); //+BlankingBits_nonDSC_padding + } + + BlankingSymbolsPerLane = (NvU32)divide_ceil(BlankingBits, (8U * numLanesPerLink)); //in symbols per lane + + MinHBlank = (NvU32)divide_ceil(BlankingSymbolsPerLane * 8U * numLanesPerLink * DSC_FACTOR, modesetInfo.depth); + MinHBlank += 3U; //add some margin + + NvU32 HBlank = (modesetInfo.rasterWidth - modesetInfo.surfaceWidth); + + if (MinHBlank > HBlank) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Blanking Width is smaller than minimum permissible value.")); + return false; + } + + // Bug 702290 - Active Width should be greater than 60 + if (modesetInfo.surfaceWidth <= 60) + { + DP_LOG(("NVRM: %s:", __FUNCTION__)); + DP_LOG(("\t\tERROR: Minimum Horizontal Active Width <= 60 not supported.")); + return false; + } + + // MST can do SDP splitting so all audio configuration are possible. + dpInfo->hBlankSym = 0U; + dpInfo->vBlankSym = 0U; + + return true; +} + +unsigned DisplayPort::pbnForMode(const ModesetInfo & modesetInfo) +{ + // + // Calculate PBN in terms of 54/64 mbyte/sec + // round up by .6% for spread de-rate. Note: if we're not spreading our link + // this MUST still be counted. It's also to allow downstream links to be spread. + // + unsigned pbnForMode = (NvU32)(divide_ceil(modesetInfo.pixelClockHz * modesetInfo.depth * 1006 * 64 / 8, + (NvU64)54000000 *1000)); + + if(modesetInfo.bEnableDsc) + { + // + // When DSC is enabled consider depth will multiplied by 16 and also 3% FEC Overhead + // as per DP1.4 spec + pbnForMode = (NvU32)(divide_ceil(pbnForMode * 100, 97 * DSC_DEPTH_FACTOR)); + } + + return pbnForMode; +} diff --git a/src/common/displayport/src/dptestutil/dp_testmessage.cpp b/src/common/displayport/src/dptestutil/dp_testmessage.cpp new file mode 100644 index 000000000..5813e3fe5 --- /dev/null +++ b/src/common/displayport/src/dptestutil/dp_testmessage.cpp @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* DisplayPort *******************************\ +* * +* Module: dp_testmessage.cpp * +* Used for DP Test Utility * +* * +\***************************************************************************/ +#include "dp_internal.h" +#include "dp_auxdefs.h" +#include "dp_messages.h" +#include "dp_testmessage.h" +#include "dp_connectorimpl.h" +using namespace DisplayPort; + +// the dp lib status must be set to DONE indicating there is no pending message +void DPTestMessageCompletion::messageFailed(MessageManager::Message * from, NakData * data) +{ + parent->testMessageStatus = DP_TESTMESSAGE_REQUEST_STATUS_DONE; + + { + { + DP_ASSERT(0 && "unknown msg type when msg failed"); + } + } +} + +void DPTestMessageCompletion::messageCompleted(MessageManager::Message * from) +{ + parent->testMessageStatus = DP_TESTMESSAGE_REQUEST_STATUS_DONE; + + { + { + DP_ASSERT(0 && "unknown msg type when msg complete"); + } + } +} + +MessageManager * TestMessage::getMessageManager() +{ + return pMsgManager; +} + +// +// The function request that the request struct size should be check first to ensure the right structure is used and +// no BSOD will happen. +// +// For each request type, the DP lib status for that type should be check in case of request conflict. At one time, +// for each request type, only ONE instance could be processed +// +DP_TESTMESSAGE_STATUS TestMessage::sendDPTestMessage +( + void *pBuffer, + NvU32 requestSize, + NvU32 *pDpStatus +) +{ + DP_ASSERT(pBuffer); + DP_TESTMESSAGE_REQUEST_TYPE type; + + // the buffer must contain a requestType field at least + if (requestSize < sizeof(DP_TESTMESSAGE_REQUEST_TYPE)) + return DP_TESTMESSAGE_STATUS_ERROR_INVALID_PARAM; + + type = *(DP_TESTMESSAGE_REQUEST_TYPE *)pBuffer; + + if (!isValidStruct(type, requestSize)) + return DP_TESTMESSAGE_STATUS_ERROR_INVALID_PARAM; + + *pDpStatus = DP_TESTMESSAGE_REQUEST_STATUS_ERROR; + return DP_TESTMESSAGE_STATUS_ERROR; +} + diff --git a/src/common/inc/displayport/displayport.h b/src/common/inc/displayport/displayport.h new file mode 100644 index 000000000..1f9f8d061 --- /dev/null +++ b/src/common/inc/displayport/displayport.h @@ -0,0 +1,628 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DISPLAYPORT_H_ +#define _DISPLAYPORT_H_ + +#include "nvmisc.h" +#include "dpcd.h" +#include "dpcd14.h" +#include "dpcd20.h" + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: DISPLAYPORT.H * +* Defines DISPLAYPORT V1.2 * +* * +\***************************************************************************/ + +// Displayport interoperability with HDMI dongle i2c addr +#define DP2HDMI_DONGLE_I2C_ADDR 0x80 +#define DP2HDMI_DONGLE_DDC_BUFFER_ID_LEN 16 +#define DP2HDMI_DONGLE_CAP_BUFFER_LEN 32 + +// Offset to read the dongle identifier +#define NV_DP2HDMI_DONGLE_IDENTIFIER (0x00000010) +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_REV 2:0 +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_REV_TYPE2 (0x00000000) +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_ID 7:4 +#define NV_DP2HDMI_DONGLE_IDENTIFIER_ADAPTER_ID_TYPE2 (0x0000000A) + +// Offset to read the dongle TMDS clock rate +#define NV_DP2HDMI_DONGLE_TMDS_CLOCK_RATE (0x0000001D) + +// HDMI dongle types +#define DP2HDMI_DONGLE_TYPE_1 0x1 +#define DP2HDMI_DONGLE_TYPE_2 0x2 + +// HDMI dongle frequency limits +#define DP2HDMI_DONGLE_TYPE_1_PCLK_LIMIT 165*1000*1000 +#define DP2HDMI_DONGLE_TYPE_2_PCLK_LIMIT 300*1000*1000 + +#define DPCD_VERSION_12 0x12 +#define DPCD_VERSION_13 0x13 +#define DPCD_VERSION_14 0x14 + +#define DP_LINKINDEX_0 0x0 +#define DP_LINKINDEX_1 0x1 + +// Two Head One OR +#define NV_PRIMARY_HEAD_INDEX_0 0 +#define NV_SECONDARY_HEAD_INDEX_1 1 +#define NV_PRIMARY_HEAD_INDEX_2 2 +#define NV_SECONDARY_HEAD_INDEX_3 3 + +typedef enum +{ + displayPort_Lane0 = 0, + displayPort_Lane1 = 1, + displayPort_Lane2 = 2, + displayPort_Lane3 = 3, + displayPort_Lane4 = 4, + displayPort_Lane5 = 5, + displayPort_Lane6 = 6, + displayPort_Lane7 = 7, + displayPort_LaneSupported +} DP_LANE; + +typedef enum +{ + laneCount_0 = 0x0, + laneCount_1 = 0x1, + laneCount_2 = 0x2, + laneCount_4 = 0x4, + laneCount_8 = 0x8, + laneCount_Supported +} DP_LANE_COUNT; + +typedef enum +{ + linkBW_1_62Gbps = 0x06, + linkBW_2_16Gbps = 0x08, + linkBW_2_43Gbps = 0x09, + linkBW_2_70Gbps = 0x0A, + linkBW_3_24Gbps = 0x0C, + linkBW_4_32Gbps = 0x10, + linkBW_5_40Gbps = 0x14, + linkBW_8_10Gbps = 0x1E, + linkBW_Supported +} DP_LINK_BANDWIDTH; + +typedef enum +{ + linkSpeedId_1_62Gbps = 0x00, + linkSpeedId_2_70Gbps = 0x01, + linkSpeedId_5_40Gbps = 0x02, + linkSpeedId_8_10Gbps = 0x03, + linkSpeedId_2_16Gbps = 0x04, + linkSpeedId_2_43Gbps = 0x05, + linkSpeedId_3_24Gbps = 0x06, + linkSpeedId_4_32Gbps = 0x07, + linkSpeedId_Supported +} DP_LINK_SPEED_INDEX; + + +typedef enum +{ + postCursor2_Level0 = 0, + postCursor2_Level1 = 1, + postCursor2_Level2 = 2, + postCursor2_Level3 = 3, + postCursor2_Supported +} DP_POSTCURSOR2; + +typedef enum +{ + preEmphasis_Disabled = 0, + preEmphasis_Level1 = 1, + preEmphasis_Level2 = 2, + preEmphasis_Level3 = 3, + preEmphasis_Supported +} DP_PREEMPHASIS; + +typedef enum +{ + driveCurrent_Level0 = 0, + driveCurrent_Level1 = 1, + driveCurrent_Level2 = 2, + driveCurrent_Level3 = 3, + driveCurrent_Supported +} DP_DRIVECURRENT; + +typedef enum +{ + trainingPattern_Disabled = 0x0, + trainingPattern_1 = 0x1, + trainingPattern_2 = 0x2, + trainingPattern_3 = 0x3, + trainingPattern_4 = 0xB +} DP_TRAININGPATTERN; + +typedef enum +{ + dpOverclock_Percentage_0 = 0, + dpOverclock_Percentage_10 = 10, + dpOverclock_Percentage_20 = 20 +}DP_OVERCLOCKPERCENTAGE; + +typedef enum +{ + dpColorFormat_RGB = 0, + dpColorFormat_YCbCr444 = 0x1, + dpColorFormat_YCbCr422 = 0x2, + dpColorFormat_YCbCr420 = 0x3, + dpColorFormat_Unknown = 0xF +} DP_COLORFORMAT; + +typedef enum +{ + dp_pktType_VideoStreamconfig = 0x7, + dp_pktType_CeaHdrMetaData = 0x21, + dp_pktType_SRInfoFrame = 0x7f, // Self refresh infoframe for eDP enter/exit self refresh, SRS 1698 + dp_pktType_Cea861BInfoFrame = 0x80, + dp_pktType_VendorSpecInfoFrame = 0x81, + dp_pktType_AviInfoFrame = 0x82, + dp_pktType_AudioInfoFrame = 0x84, + dp_pktType_SrcProdDescInfoFrame = 0x83, + dp_pktType_MpegSrcInfoFrame = 0x85, + dp_pktType_DynamicRangeMasteringInfoFrame = 0x87 +} DP_PACKET_TYPE; + +typedef enum +{ + DSC_SLICES_PER_SINK_1 = 1, + DSC_SLICES_PER_SINK_2 = 2, + DSC_SLICES_PER_SINK_4 = 4, + DSC_SLICES_PER_SINK_6 = 6, + DSC_SLICES_PER_SINK_8 = 8, + DSC_SLICES_PER_SINK_10 = 10, + DSC_SLICES_PER_SINK_12 = 12, + DSC_SLICES_PER_SINK_16 = 16, + DSC_SLICES_PER_SINK_20 = 20, + DSC_SLICES_PER_SINK_24 = 24 +} DscSliceCount; + +typedef enum +{ + DSC_BITS_PER_COLOR_MASK_8 = 1, + DSC_BITS_PER_COLOR_MASK_10 = 2, + DSC_BITS_PER_COLOR_MASK_12 = 4 +}DscBitsPerColorMask; + +enum DSC_MODE +{ + DSC_SINGLE, + DSC_DUAL, + DSC_DROP, + DSC_MODE_NONE +}; + +typedef enum +{ + BITS_PER_PIXEL_PRECISION_1_16 = 0, + BITS_PER_PIXEL_PRECISION_1_8 = 1, + BITS_PER_PIXEL_PRECISION_1_4 = 2, + BITS_PER_PIXEL_PRECISION_1_2 = 3, + BITS_PER_PIXEL_PRECISION_1 = 4 +}BITS_PER_PIXEL_INCREMENT; + +typedef enum +{ + NV_DP_FEC_UNCORRECTED = 0, + NV_DP_FEC_CORRECTED = 1, + NV_DP_FEC_BIT = 2, + NV_DP_FEC_PARITY_BLOCK = 3, + NV_DP_FEC_PARITY_BIT = 4 +}FEC_ERROR_COUNTER; + +typedef struct DscCaps +{ + NvBool bDSCSupported; + NvBool bDSCPassThroughSupported; + unsigned versionMajor, versionMinor; + unsigned rcBufferBlockSize; + unsigned rcBuffersize; + unsigned maxSlicesPerSink; + unsigned lineBufferBitDepth; + NvBool bDscBlockPredictionSupport; + unsigned maxBitsPerPixelX16; + unsigned sliceCountSupportedMask; + + struct + { + NvBool bRgb; + NvBool bYCbCr444; + NvBool bYCbCrSimple422; + NvBool bYCbCrNative422; + NvBool bYCbCrNative420; + }dscDecoderColorFormatCaps; + + unsigned dscDecoderColorDepthMask; + unsigned dscPeakThroughputMode0; + unsigned dscPeakThroughputMode1; + unsigned dscMaxSliceWidth; + + BITS_PER_PIXEL_INCREMENT dscBitsPerPixelIncrement; +} DscCaps; + +typedef struct GpuDscCrc +{ + NvU16 gpuCrc0; + NvU16 gpuCrc1; + NvU16 gpuCrc2; +} gpuDscCrc; + +typedef struct SinkDscCrc +{ + NvU16 sinkCrc0; + NvU16 sinkCrc1; + NvU16 sinkCrc2; +} sinkDscCrc; + +typedef struct +{ + NvBool bSourceControlModeSupported; + NvBool bConcurrentLTSupported; + NvU8 maxTmdsClkRate; + NvU8 maxBpc; + NvU8 maxHdmiLinkBandwidthGbps; +} PCONCaps; + +typedef enum +{ + PCON_HDMI_LINK_BW_FRL_9GBPS = 0, + PCON_HDMI_LINK_BW_FRL_18GBPS, + PCON_HDMI_LINK_BW_FRL_24GBPS, + PCON_HDMI_LINK_BW_FRL_32GBPS, + PCON_HDMI_LINK_BW_FRL_40GBPS, + PCON_HDMI_LINK_BW_FRL_48GBPS, + PCON_HDMI_LINK_BW_FRL_INVALID +} PCONHdmiLinkBw; + +typedef enum +{ + NV_DP_PCON_CONTROL_STATUS_SUCCESS = 0, + NV_DP_PCON_CONTROL_STATUS_ERROR_TIMEOUT = 0x80000001, + NV_DP_PCON_CONTROL_STATUS_ERROR_FRL_LT_FAILURE = 0x80000002, + NV_DP_PCON_CONTROL_STATUS_ERROR_FRL_NOT_SUPPORTED = 0x80000003, + NV_DP_PCON_CONTROL_STATUS_ERROR_GENERIC = 0x8000000F +} NV_DP_PCON_CONTROL_STATUS; +// +// Poll HDMI-Link Status change and FRL Ready. +// Spec says it should be done in 500ms, we give it 20% extra time: +// 60 times with interval 10ms. +// +#define NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_THRESHOLD (60) +#define NV_PCON_SOURCE_CONTROL_MODE_TIMEOUT_INTERVAL_MS (10) +// +// Poll HDMI-Link Status change IRQ and Link Status. +// Spec says it should be done in 250ms, we give it 20% extra time: +// 30 times with interval 10ms. +// +#define NV_PCON_FRL_LT_TIMEOUT_THRESHOLD (30) +#define NV_PCON_FRL_LT_TIMEOUT_INTERVAL_MS (10) + +typedef struct _PCONLinkControl +{ + struct + { + // This struct is being passed in for assessPCONLink I/F + NvU32 bAssessLink : 1; + + // Specify if client wants to use src control - set it false DPLib can just do DP LT alone. + // By default it should be true. + NvU32 bSourceControlMode : 1; + + // Default is sequential mode, set this to choose concurrent mode + NvU32 bConcurrentMode : 1; + + // Default is normal link training mode (stop once FRL-LT succeed). + // Set this to link train all requested FRL Bw in allowedFrlBwMask. + NvU32 bExtendedLTMode : 1; + + // Keep PCON links (DP and FRL link) alive + NvU32 bKeepPCONLinkAlive : 1; + + // Default DPLib will fallback to autonomous mode and perform DP assessLink. + NvU32 bSkipFallback : 1; + } flags; + + // Input: Clients use this to specify the FRL BW PCON should try. + NvU32 frlHdmiBwMask; + + struct + { + NV_DP_PCON_CONTROL_STATUS status; + PCONHdmiLinkBw maxFrlBwTrained; + NvU32 trainedFrlBwMask; + } result; +} PCONLinkControl; + +static NV_INLINE PCONHdmiLinkBw getMaxFrlBwFromMask(NvU32 frlRateMask) +{ + if (frlRateMask == 0) + { + // Nothing is set. Assume TMDS + return PCON_HDMI_LINK_BW_FRL_INVALID; + } + + // find highest set bit (destructive operation) + HIGHESTBITIDX_32(frlRateMask); + + return (PCONHdmiLinkBw)frlRateMask; +} + +/* + EDP VESA PSR defines +*/ + +// PSR state transitions +typedef enum +{ + vesaPsrStatus_Inactive = 0, + vesaPsrStatus_Transition2Active = 1, + vesaPsrStatus_DisplayFromRfb = 2, + vesaPsrStatus_CaptureAndDisplay = 3, + vesaPsrStatus_Transition2Inactive = 4, + vesaPsrStatus_Undefined5 = 5, + vesaPsrStatus_Undefined6 = 6, + vesaPsrStatus_SinkError = 7 +} vesaPsrState; + +typedef struct VesaPsrConfig +{ + NvU8 psrCfgEnable : 1; + NvU8 srcTxEnabledInPsrActive : 1; + NvU8 crcVerifEnabledInPsrActive : 1; + NvU8 frameCaptureSecondActiveFrame : 1; + NvU8 selectiveUpdateOnSecondActiveline : 1; + NvU8 enableHpdIrqOnCrcMismatch : 1; + NvU8 enablePsr2 : 1; + NvU8 reserved : 1; +} vesaPsrConfig; + +typedef struct VesaPsrDebugStatus +{ + NvBool lastSdpPsrState; + NvBool lastSdpUpdateRfb; + NvBool lastSdpCrcValid; + NvBool lastSdpSuValid; + NvBool lastSdpFirstSURcvd; + NvBool lastSdpLastSURcvd; + NvBool lastSdpYCoordValid; + NvU8 maxResyncFrames; + NvU8 actualResyncFrames; +} vesaPsrDebugStatus; + +typedef struct VesaPsrErrorStatus +{ + NvU8 linkCrcError : 1; + NvU8 rfbStoreError : 1; + NvU8 vscSdpError : 1; + NvU8 rsvd : 5; +} vesaPsrErrorStatus; + +typedef struct VesaPsrEventIndicator +{ + NvU8 sinkCapChange : 1; + NvU8 rsvd : 7; +} vesaPsrEventIndicator; + +#pragma pack(1) +typedef struct VesaPsrSinkCaps +{ + NvU8 psrVersion; + NvU8 linkTrainingRequired : 1; + NvU8 psrSetupTime : 3; + NvU8 yCoordinateRequired : 1; + NvU8 psr2UpdateGranularityRequired : 1; + NvU8 reserved : 2; + NvU16 suXGranularity; + NvU8 suYGranularity; +} vesaPsrSinkCaps; +#pragma pack() + +typedef struct PanelReplayCaps +{ + NvBool panelReplaySupported; +} panelReplayCaps; + +typedef struct PanelReplayConfig +{ + NvBool enablePanelReplay; +} panelReplayConfig; + +// Multiplier constant to get link frequency in KHZ +// Maximum link rate of Main Link lanes = Value x 270M. +// To get it to KHz unit, we need to multiply 270K. +#define DP_LINK_BW_FREQUENCY_MULTIPLIER_KHZ (270*1000) + +// Multiplier constant to get link rate table's in KHZ +#define DP_LINK_RATE_TABLE_MULTIPLIER_KHZ 200 + +// +// Multiplier constant to get link frequency (multiplier of 270MHz) in MBps +// a * 270 * 1000 * 1000(270Mhz) * (8 / 10)(8b/10b) / 8(Byte) +// = a * 27000000 +// +#define DP_LINK_BW_FREQ_MULTI_MBPS 27000000 + +// +// Get link rate in multiplier of 270MHz from KHz: +// a * 1000(KHz) / 270 * 1000 * 1000(270Mhz) +// +#define LINK_RATE_KHZ_TO_MULTP(a) ((a) / 270000) + +// +// Get link rate in MBps from KHz: +// a * 1000 * (8 / 10)(8b/10b) / 8(Byte) +// = a * 100 +// +#define LINK_RATE_KHZ_TO_MBPS(a) ((a) * 100) + +#define DP_MAX_LANES 8 // This defines the maximum number of lanes supported on a chip. +#define DP_MAX_LANES_PER_LINK 4 // This defines the maximum number of lanes per link in a chip. +#define DP_AUX_CHANNEL_MAX_BYTES 16 +#define DP_CLOCK_RECOVERY_TOT_TRIES 10 +#define DP_CLOCK_RECOVERY_MAX_TRIES 5 +#define DP_CH_EQ_MAX_RETRIES 5 +#define DP_LT_MAX_FOR_MST_MAX_RETRIES 3 +#define DP_READ_EDID_MAX_RETRIES 7 +#define DP_AUX_CHANNEL_DEFAULT_DEFER_MAX_TRIES 7 +#define DP_AUX_CHANNEL_TIMEOUT_MAX_TRIES 2 +#define DP_SET_POWER_D0_NORMAL_MAX_TRIES 3 +#define DP_SW_AUTO_READ_REQ_SIZE 6 +#define NV_DP_RBR_FALLBACK_MAX_TRIES 3 + +#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_DEFAULT_MS 1 + +#define DP_AUX_CHANNEL_TIMEOUT_WAITIDLE 400 // source is required to wait at least 400us before it considers the AUX transaction to have timed out. +#define DP_AUX_CHANNEL_TIMEOUT_VALUE_DEFAULT 400 +#define DP_AUX_CHANNEL_TIMEOUT_VALUE_MAX 3200 + +#define DP_PHY_REPEATER_INDEX_FOR_SINK 0xFFFFFFFF + +#define DP_MESSAGEBOX_SIZE 48 +#define DP_POST_LT_ADJ_REQ_LIMIT 6 +#define DP_POST_LT_ADJ_REQ_TIMER 200000 + +#define DP_AUX_HYBRID_TIMEOUT 600 +#define DP_AUX_SEMA_ACQUIRE_TIMEOUT 20000 + +#define DP_CONFIG_WATERMARK_ADJUST 2 +#define DP_CONFIG_WATERMARK_LIMIT 20 +#define DP_CONFIG_INCREASED_WATERMARK_ADJUST 8 +#define DP_CONFIG_INCREASED_WATERMARK_LIMIT 22 + +#define NV_DP_MSA_PROPERTIES_MISC1_STEREO 2:1 + +#define DP_LANE_STATUS_ARRAY_SIZE ((displayPort_LaneSupported + 1) / 2) +#define DP_LANE_STATUS_ARRAY_INDEX(lane) ((lane) < displayPort_LaneSupported ? ((lane) / 2) : 0) + +#define IS_VALID_LANECOUNT(val) (((NvU32)(val)==0) || ((NvU32)(val)==1) || \ + ((NvU32)(val)==2) || ((NvU32)(val)==4) || \ + ((NvU32)(val)==8)) + +#define IS_STANDARD_LINKBW(val) (((NvU32)(val)==linkBW_1_62Gbps) || \ + ((NvU32)(val)==linkBW_2_70Gbps) || \ + ((NvU32)(val)==linkBW_5_40Gbps) || \ + ((NvU32)(val)==linkBW_8_10Gbps)) + +#define IS_INTERMEDIATE_LINKBW(val) (((NvU32)(val)==linkBW_2_16Gbps) || \ + ((NvU32)(val)==linkBW_2_43Gbps) || \ + ((NvU32)(val)==linkBW_3_24Gbps) || \ + ((NvU32)(val)==linkBW_4_32Gbps)) + +#define IS_VALID_LINKBW(val) (IS_STANDARD_LINKBW(val) || \ + IS_INTERMEDIATE_LINKBW(val)) +// +// Phy Repeater count read from DPCD offset F0002h is an +// 8 bit value where each bit represents the total count +// 80h = 1 repeater, 40h = 2 , 20h = 3 ... 01h = 8 +// This function maps it to decimal system +// +static NV_INLINE NvU32 mapPhyRepeaterVal(NvU32 value) +{ + switch (value) + { + case NV_DPCD14_PHY_REPEATER_CNT_VAL_0: + return 0; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_1: + return 1; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_2: + return 2; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_3: + return 3; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_4: + return 4; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_5: + return 5; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_6: + return 6; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_7: + return 7; + case NV_DPCD14_PHY_REPEATER_CNT_VAL_8: + return 8; + default: + return 0; + } +} + +// HDCP specific definitions + +#define HDCP22_RTX_SIMPLE_PATTERN 0x12345678 +#define HDCP22_TX_CAPS_PATTERN_BIG_ENDIAN {0x02, 0x00, 0x00} + +#define DP_MST_HEAD_TO_STREAMID(head, pipeId, numHeads) ((head) + 1 + (pipeId) * (numHeads)) +#define DP_MST_STREAMID_TO_HEAD(streamid, pipeId, numHeads) ((streamid) - 1 - ((pipeId) * (numHeads))) +#define DP_MST_STREAMID_TO_PIPE(streamid, head, numHeads) (((streamid) - (head) - 1) / (numHeads)) + +typedef enum +{ + NV_DP_SBMSG_REQUEST_ID_GET_MESSAGE_TRANSACTION_VERSION = 0x00, + NV_DP_SBMSG_REQUEST_ID_LINK_ADDRESS = 0x01, + NV_DP_SBMSG_REQUEST_ID_CONNECTION_STATUS_NOTIFY = 0x02, + + NV_DP_SBMSG_REQUEST_ID_ENUM_PATH_RESOURCES = 0x10, + NV_DP_SBMSG_REQUEST_ID_ALLOCATE_PAYLOAD = 0x11, + NV_DP_SBMSG_REQUEST_ID_QUERY_PAYLOAD = 0x12, + NV_DP_SBMSG_REQUEST_ID_RESOURCE_STATUS_NOTIFY = 0x13, + NV_DP_SBMSG_REQUEST_ID_CLEAR_PAYLOAD_ID_TABLE = 0x14, + + NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_READ = 0x20, + NV_DP_SBMSG_REQUEST_ID_REMOTE_DPCD_WRITE = 0x21, + NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_READ = 0x22, + NV_DP_SBMSG_REQUEST_ID_REMOTE_I2C_WRITE = 0x23, + NV_DP_SBMSG_REQUEST_ID_POWER_UP_PHY = 0x24, + NV_DP_SBMSG_REQUEST_ID_POWER_DOWN_PHY = 0x25, + + NV_DP_SBMSG_REQUEST_ID_SINK_EVENT_NOTIFY = 0x30, + NV_DP_SBMSG_REQUEST_ID_QUERY_STREAM_ENCRYPTION_STATUS = 0x38, + + NV_DP_SBMSG_REQUEST_ID_UNDEFINED = 0xFF, +} NV_DP_SBMSG_REQUEST_ID; + +// FEC + +#define NV_DP_FEC_FLAGS_SELECT_ALL 0x7 +#define NV_DP_ERROR_COUNTERS_PER_LANE 5 +#define NV_DP_MAX_NUM_OF_LANES 4 +#define NV_DP_FEC_ERROR_COUNT_INVALID 0xbadf +#define NV_DP_UNCORRECTED_ERROR NV_DP_FEC_UNCORRECTED : NV_DP_FEC_UNCORRECTED +#define NV_DP_CORRECTED_ERROR NV_DP_FEC_CORRECTED : NV_DP_FEC_CORRECTED +#define NV_DP_BIT_ERROR NV_DP_FEC_BIT : NV_DP_FEC_BIT +#define NV_DP_PARITY_BLOCK_ERROR NV_DP_FEC_PARITY_BLOCK : NV_DP_FEC_PARITY_BLOCK +#define NV_DP_PARITY_BIT_ERROR NV_DP_FEC_PARITY_BIT : NV_DP_FEC_PARITY_BIT +#define NV_DP_UNCORRECTED_ERROR_NO 0 +#define NV_DP_UNCORRECTED_ERROR_YES 1 +#define NV_DP_CORRECTED_ERROR_NO 0 +#define NV_DP_CORRECTED_ERROR_YES 1 +#define NV_DP_BIT_ERROR_NO 0 +#define NV_DP_BIT_ERROR_YES 1 +#define NV_DP_PARITY_BLOCK_ERROR_NO 0 +#define NV_DP_PARITY_BLOCK_ERROR_YES 1 +#define NV_DP_PARITY_BIT_ERROR_NO 0 +#define NV_DP_PARITY_BIT_ERROR_YES 1 + + +#endif // #ifndef _DISPLAYPORT_H_ diff --git a/src/common/inc/displayport/dpcd.h b/src/common/inc/displayport/dpcd.h new file mode 100644 index 000000000..cb26349df --- /dev/null +++ b/src/common/inc/displayport/dpcd.h @@ -0,0 +1,1501 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DPCD_H_ +#define _DPCD_H_ + +#define NV_DPCD_CAP_LEGACY_BASE (0x00000000) + +#define NV_DPCD_REV (0x00000000) /* R-XUR */ +#define NV_DPCD_REV_MAJOR 7:4 /* R-XUF */ +#define NV_DPCD_REV_MAJOR_1 (0x00000001) /* R-XUV */ +#define NV_DPCD_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD_REV_MINOR_0 (0x00000000) /* R-XUV */ +#define NV_DPCD_REV_MINOR_1 (0x00000001) /* R-XUV */ +#define NV_DPCD_REV_MINOR_2 (0x00000002) /* R-XUV */ +#define NV_DPCD_REV_MINOR_4 (0x00000004) /* R-XUV */ + +#define NV_DPCD_MAX_LINK_BANDWIDTH (0x00000001) /* R-XUR */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL 4:0 /* R-XUF */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL_1_62_GBPS (0x00000006) /* R-XUV */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL_2_70_GBPS (0x0000000a) /* R-XUV */ +#define NV_DPCD_MAX_LINK_BANDWIDTH_VAL_5_40_GBPS (0x00000014) /* R-XUV */ + +#define NV_DPCD_MAX_LANE_COUNT (0x00000002) /* R-XUR */ +#define NV_DPCD_MAX_LANE_COUNT_LANE 4:0 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_1 (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_2 (0x00000002) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_4 (0x00000004) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_LANE_8 (0x00000008) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT 5:5 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING 7:7 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_ENHANCED_FRAMING_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED 6:6 /* R-XUF */ +#define NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_LANE_COUNT_TPS3_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_MAX_DOWNSPREAD (0x00000003) /* R-XUR */ +#define NV_DPCD_MAX_DOWNSPREAD_VAL 0:0 /* R-XUF */ +#define NV_DPCD_MAX_DOWNSPREAD_VAL_NONE (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_DOWNSPREAD_VAL_0_5_PCT (0x00000001) /* R-XUV */ +#define NV_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT 6:6 /* R-XUF */ +#define NV_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_TRUE (0x00000001) /* R-XUV */ + +// NORP = Number of Receiver Ports = Value + 1 +#define NV_DPCD_NORP (0x00000004) /* R-XUR */ +#define NV_DPCD_NORP_VAL 0:0 /* R-XUF */ +#define NV_DPCD_NORP_VAL_ONE (0x00000000) /* R-XUV */ +#define NV_DPCD_NORP_VAL_TWO (0x00000001) /* R-XUV */ +#define NV_DPCD_NORP_VAL_SST_MAX (0x00000001) /* R-XUV */ +#define NV_DPCD_NORP_DP_PWR_CAP_5V 5:5 /* R-XUF */ +#define NV_DPCD_NORP_DP_PWR_CAP_12V 6:6 /* R-XUF */ +#define NV_DPCD_NORP_DP_PWR_CAP_18V 7:7 /* R-XUF */ + +#define NV_DPCD_DOWNSTREAMPORT (0x00000005) /* R-XUR */ +#define NV_DPCD_DOWNSTREAMPORT_PRESENT 0:0 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_PRESENT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_PRESENT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE 2:1 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_DISPLAYPORT (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_ANALOG (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_HDMI_DVI (0x00000002) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_TYPE_OTHERS (0x00000003) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_FORMAT_CONVERSION 3:3 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_FORMAT_CONVERSION_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_FORMAT_CONVERSION_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE 4:4 /* R-XUF */ +#define NV_DPCD_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING (0x00000006) /* R-XUR */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B 0:0 /* R-XUF */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B 1:1 /* R-XUF */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DOWN_STREAM_PORT (0x00000007) /* R-XUR */ +#define NV_DPCD_DOWN_STREAM_PORT_COUNT 3:0 /* R-XUF */ +#define NV_DPCD_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED 6:6 /* R-XUF */ +#define NV_DPCD_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DOWN_STREAM_PORT_OUI_SUPPORT 7:7 /* R-XUF */ +#define NV_DPCD_DOWN_STREAM_PORT_OUI_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DOWN_STREAM_PORT_OUI_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_RECEIVE_PORT0_CAP_0 (0x00000008) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORT1_CAP_0 (0x0000000A) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_LOCAL_EDID 1:1 /* R-XUF */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_LOCAL_EDID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_LOCAL_EDID_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT 2:2 /* R-XUF */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_RECEIVE_PORT0_CAP_1 (0x00000009) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORT1_CAP_1 (0x0000000B) /* R-XUR */ +#define NV_DPCD_RECEIVE_PORTX_CAP_1_BUFFER_SIZE 7:0 /* R-XUF */ + +#define NV_DPCD_I2C_CTRL_CAP (0x0000000C) /* R-XUR */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED 7:0 /* R-XUF */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_1K (0x00000001) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_5K (0x00000002) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_10K (0x00000004) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_100K (0x00000008) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_400K (0x00000010) /* R-XUV */ +#define NV_DPCD_I2C_CTRL_CAP_SPEED_1M (0x00000020) /* R-XUV */ + +#define NV_DPCD_EDP_CONFIG_CAP (0x0000000D) /* R-XUR */ +#define NV_DPCD_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET 0:0 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE 1:1 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_FRAMING_CHANGE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_INVERTED_TRAINING_BIT 2:2 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_INVERTED_TRAINING_BIT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_INVERTED_TRAINING_BIT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_DISPLAY_CONTROL_CAPABLE 3:3 /* R-XUF */ +#define NV_DPCD_EDP_CONFIG_CAP_DISPLAY_CONTROL_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_CONFIG_CAP_DISPLAY_CONTROL_CAPABLE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL (0x0000000E) /* R-XUR */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL 6:0 /* R-XUF */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_DEFAULT (0x00000000) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_4MS (0x00000001) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_8MS (0x00000002) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_12MS (0x00000003) /* R-XUV */ +#define NV_DPCD_TRAINING_AUX_RD_INTERVAL_VAL_16MS (0x00000004) /* R-XUV */ + +#define NV_DPCD_ADAPTER_CAP (0x0000000F) /* R-XUR */ +#define NV_DPCD_ADAPTER_CAP_FORCE_LOAD_SENSE 0:0 /* R-XUF */ +#define NV_DPCD_ADAPTER_CAP_FORCE_LOAD_SENSE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_ADAPTER_CAP_FORCE_LOAD_SENSE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_ADAPTER_CAP_ALT_I2C_PATTERN 1:1 /* R-XUF */ +#define NV_DPCD_ADAPTER_CAP_ALT_I2C_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_ADAPTER_CAP_ALT_I2C_PATTERN_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_SUPPORTED_LINK_RATES(i) (0x00000010+(i)*2) /* R--2A */ +#define NV_DPCD_SUPPORTED_LINK_RATES__SIZE (0x00000008) /* R---S */ + +// 00010h-0001Fh: RESERVED. Reads all 0s + +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS (0x00000020) /* R-XUR */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1024_768 0:0 /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1024_768_NO (0X00000000) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1024_768_YES (0X00000001) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1280_720 1:1 /* R-XUV */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1280_720_NO (0X00000000) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1280_720_YES (0X00000001) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1920_1080 2:2 /* R-XUV */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1920_1080_NO (0X00000000) /* R-XUF */ +#define NV_DPCD_SINK_VIDEO_FALLBACK_FORMATS_1920_1080_YES (0X00000001) /* R-XUF */ + +#define NV_DPCD_MSTM (0x00000021) /* R-XUR */ +#define NV_DPCD_MSTM_CAP 0:0 /* R-XUF */ +#define NV_DPCD_MSTM_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_MSTM_CAP_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_NUMBER_OF_AUDIO_ENDPOINTS (0x00000022) /* R-XUR */ +#define NV_DPCD_NUMBER_OF_AUDIO_ENDPOINTS_VALUE 7:0 /* R-XUF */ + +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY (0x00000023) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR 3:0 /* R-XUF */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_3MS (0x00000000) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_2MS (0x00000001) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_1MS (0x00000002) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_500US (0x00000003) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_200US (0x00000004) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_100US (0x00000005) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_10US (0x00000006) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_1US (0x00000007) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_DEFAULT NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_AG_FACTOR_2MS +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR 7:4 /* R-XUF */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_3MS (0x00000000) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_2MS (0x00000001) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_1MS (0x00000002) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_500US (0x00000003) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_200US (0x00000004) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_100US (0x00000005) /* R-XUV */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_DEFAULT NV_DPCD_AV_SYNC_DATA_BLOCK_AV_GRANULARITY_VG_FACTOR_2MS + +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEC_LAT_0 (0x00000024) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEC_LAT_1 (0x00000025) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_PP_LAT_0 (0x00000026) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_PP_LAT_1 (0x00000027) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_VID_INTER_LAT (0x00000028) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_VID_PROG_LAT (0x00000029) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_REP_LAT (0x0000002A) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEL_INS_0 (0x0000002B) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEL_INS_1 (0x0000002C) /* R-XUR */ +#define NV_DPCD_AV_SYNC_DATA_BLOCK_AUD_DEL_INS_2 (0x0000002D) /* R-XUR */ + +// 0002Eh - 0002Fh: RESERVED. Reads all 0s + +#define NV_DPCD_GUID (0x00000030) /* R-XUR */ + +// 00040h - 00053h: RESERVED. Reads all 0s + +#define NV_DPCD_RX_GTC_VALUE(i) (0x00000054+(i)) /* R--1A */ +#define NV_DPCD_RX_GTC_VALUE__SIZE 4 /* R---S */ + +#define NV_DPCD_RX_GTC_REQ (0x00000058) /* R-XUR */ +#define NV_DPCD_RX_GTC_REQ_RX_GTC_MSTR_REQ 0:0 /* R-XUF */ +#define NV_DPCD_RX_GTC_REQ_RX_GTC_MSTR_REQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RX_GTC_REQ_RX_GTC_MSTR_REQ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_RX_GTC_REQ_TX_GTC_VALUE_PHASE_SKEW_EN 1:1 /* R-XUF */ +#define NV_DPCD_RX_GTC_REQ_TX_GTC_VALUE_PHASE_SKEW_EN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RX_GTC_REQ_TX_GTC_VALUE_PHASE_SKEW_EN_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_RX_GTC_FREQ_LOCK (0x00000059) /* R-XUR */ +#define NV_DPCD_RX_GTC_FREQ_LOCK_DONE 0:0 /* R-XUF */ +#define NV_DPCD_RX_GTC_FREQ_LOCK_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_RX_GTC_FREQ_LOCK_DONE_YES (0x00000001) /* R-XUV */ + +// 0005Ah - 0006Fh: RESERVED Read all 0s + +#define NV_DPCD_EDP_PSR_VERSION (0x00000070) /* R-XUR */ + +#define NV_DPCD_EDP_PSR_CAP (0x00000071) /* R-XUR */ +#define NV_DPCD_EDP_PSR_CAP_LT_NEEDED 0:0 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_LT_NEEDED_YES (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_LT_NEEDED_NO (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME 3:1 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_330US (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_275US (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_220US (0x00000002) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_165US (0x00000003) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_110US (0x00000004) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_55US (0x00000005) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_SETUP_TIME_0US (0x00000006) /* R-XUV */ +#define NV_DPCD_EDP_PSR_CAP_Y_COORD_NEEDED 4:4 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_Y_COORD_NEEDED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_Y_COORD_NEEDED_YES (0x00000001) /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_GRAN_REQUIRED 5:5 /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_GRAN_REQUIRED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD_EDP_PSR_CAP_GRAN_REQUIRED_YES (0x00000001) /* R-XUF*/ + +#define NV_DPCD_EDP_PSR2_X_GRANULARITY_L (0x00000072) /* R-XUR */ +#define NV_DPCD_EDP_PSR2_X_GRANULARITY_H (0x00000073) /* R-XUR */ +#define NV_DPCD_EDP_PSR2_Y_GRANULARITY (0x00000074) /* R-XUR */ + +// 00072h - 0007Fh: RESERVED Read all 0s + +/* + * When DETAILED_CAP_INFO_AVAILABLE = 0, 1 byte info per port. + * When DETAILED_CAP_INFO_AVAILABLE = 1, 4 bytes info per port. + * DETAILED_CAP_INFO_AVAILABLE located at 0x05h (DOWNSTREAMPORT_PRESENT), bit 5 + * + * Byte 0 definition. +*/ + +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT(i) (0x00000080+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT__SIZE 4 /* R---S */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE 2:0 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DISPLAYPORT (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_ANALOG (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DVI (0x00000002) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_HDMI (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_OTHERS_NO_EDID (0x00000004) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_TX_TYPE_DP_PLUSPLUS (0x00000005) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_HPD 3:3 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_HPD_NOT_AWARE (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_HPD_AWARE (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_ATTR 7:4 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_60HZ (0x00000001) /* R-XUV */ // 720x480i +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_480I_50HZ (0x00000002) /* R-XUV */ // 720x480i +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_60HZ (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_1080I_50HZ (0x00000004) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_60HZ (0x00000005) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT_NON_EDID_720P_50HZ (0x00000007) /* R-XUV */ + +/* + * Byte 1, Reserved for DisplayPort. + */ + +#define NV_DPCD_DETAILED_CAP_INFO_ONE(i) (0x00000081+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO__SIZE NV_DPCD_DETAILED_CAP_INFO_DWNSTRM_PORT__SIZE +#define NV_DPCD_DETAILED_CAP_INFO_ONE__SIZE 4 /* R---S */ +// For Analog VGA Donwstream Port. Maximum Pixel Rate in Mpixels per sec divided by 8 +#define NV_DPCD_DETAILED_CAP_INFO_VGA_MAX_PIXEL_RATE 7:0 /* R-XUF */ +/* + * For DVI/HDMI/DP++ Downstream Port, Maximum TMDS clock rate supported in Mbps divided by 2.5 + * e.g. 66 (0x42) for 165 MHz, 90 (0x5a) for 225 MHz + */ +#define NV_DPCD_DETAILED_CAP_INFO_TMDS_MAX_CLOCK_RATE 7:0 /* R-XUF */ + +// Byte 2, for VGA/DVI/HDMI/DP++ Downstream Port, reserved for DisplayPort. +#define NV_DPCD_DETAILED_CAP_INFO_TWO(i) (0x00000082+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO_TWO__SIZE 4 /* R---S */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF 1:0 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_8BPC (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_10BPC (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_12BPC (0x00000002) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_BITS_PER_COMPONENT_DEF_16BPC (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT 4:2 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_ZERO (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_9G (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_18G (0x00000002) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_24G (0x00000003) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_32G (0x00000004) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_40G (0x00000005) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_MAX_FRL_LINK_BW_SUPPORT_48G (0x00000006) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_SRC_CONTROL_MODE_SUPPORT 5:5 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_SRC_CONTROL_MODE_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_SRC_CONTROL_MODE_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_CONCURRENT_LT_SUPPORT 6:6 /* R-XUF */ +#define NV_DPCD_DETAILED_CAP_INFO_CONCURRENT_LT_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DETAILED_CAP_INFO_CONCURRENT_LT_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_MAX_BPC_8 8 +#define NV_MAX_BPC_10 10 +#define NV_MAX_BPC_12 12 +#define NV_MAX_BPC_16 16 + +// Byte 3, Reserved for DisplayPort and VGA +#define NV_DPCD_DETAILED_CAP_INFO_THREE(i) (0x00000083+(i)*4) /* R--1A */ +#define NV_DPCD_DETAILED_CAP_INFO_THREE__SIZE 4 /* R---S */ +// For DVI + #define NV_DPCD_DETAILED_CAP_INFO_DVI_DUAL_LINK 1:1 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_DUAL_LINK_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_DUAL_LINK_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_HIGH_COLOR_DEPTH 2:2 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_HIGH_COLOR_DEPTH_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_DVI_HIGH_COLOR_DEPTH_YES (0x00000001) /* R-XUV */ +// For HDMI and DP++ + #define NV_DPCD_DETAILED_CAP_INFO_FRAME_SEQ_TO_FRAME_PACK 0:0 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_INFO_FRAME_SEQ_TO_FRAME_PACK_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_INFO_FRAME_SEQ_TO_FRAME_PACK_YES (0x00000001) /* R-XUV */ +// For HDMI-PCon + #define NV_DPCD_DETAILED_CAP_YCBCR422_PASS_THRU_SUPPORTED 1:1 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_YCBCR422_PASS_THRU_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_YCBCR422_PASS_THRU_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_YCBCR420_PASS_THRU_SUPPORTED 2:2 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_YCBCR420_PASS_THRU_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_YCBCR420_PASS_THRU_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR422_SUPPORTED 3:3 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR422_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR422_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR420_SUPPORTED 4:4 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR420_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_YCBCR444_TO_YCBCR420_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB601_TO_YCBCR601_SUPPORTED 5:5 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB601_TO_YCBCR601_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB601_TO_YCBCR601_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB709_TO_YCBCR709_SUPPORTED 6:6 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB709_TO_YCBCR709_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGB709_TO_YCBCR709_SUPPORTED_YES (0x00000001) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGBBT2020_TO_YCBCRBT2020_SUPPORTED 7:7 /* R-XUF */ + #define NV_DPCD_DETAILED_CAP_CONV_RGBBT2020_TO_YCBCRBT2020_SUPPORTED_NO (0x00000000) /* R-XUV */ + #define NV_DPCD_DETAILED_CAP_CONV_RGBBT2020_TO_YCBCRBT2020_SUPPORTED_YES (0x00000001) /* R-XUV */ + +/* +00090h - 000FFh: RESERVED for supporting up to 127 Downstream devices per Branch device. Read all 0s +Note: When DETAILED_CAP_INFO_AVAILABLE bit is set to 1, the maximum +number of Downstream ports will be limited to 32. +*/ + +#define NV_DPCD_LINK_BANDWIDTH_SET (0x00000100) /* RWXUR */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL 7:0 /* RWXUF */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL_1_62_GPBS (0x00000006) /* RWXUV */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL_2_70_GPBS (0x0000000a) /* RWXUV */ +#define NV_DPCD_LINK_BANDWIDTH_SET_VAL_5_40_GPBS (0x00000014) /* RWXUV */ + +#define NV_DPCD_LANE_COUNT_SET (0x00000101) /* RWXUR */ +#define NV_DPCD_LANE_COUNT_SET_LANE 4:0 /* RWXUF */ +#define NV_DPCD_LANE_COUNT_SET_LANE_1 (0x00000001) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_LANE_2 (0x00000002) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_LANE_4 (0x00000004) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_POST_LT_ADJ_REQ_GRANTED 5:5 /* RWXUF */ +#define NV_DPCD_LANE_COUNT_SET_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING 7:7 /* RWXUF */ +#define NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_LANE_COUNT_SET_ENHANCEDFRAMING_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_TRAINING_PATTERN_SET (0x00000102) /* RWXUR */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS 1:0 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_TP1 (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_TP2 (0x00000002) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_TPS_TP3 (0x00000003) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS 3:2 /* R-XUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_D10_2_TP (0x00000001) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_SYM_ERR_RATE_TP (0x00000002) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_LQPS_PRBS7 (0x00000003) /* R-XUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN 4:4 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED 5:5 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL 7:6 /* RWXUF */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ILLEGAL_SYMBOL_ERROR (0x00000000) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ERROR (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_PATTERN_SET_SYM_ERR_SEL_ILLEGAL_SYMBOL_ERROR (0x00000002) /* RWXUV */ + +#define NV_DPCD_TRAINING_LANE_SET(i) (0x00000103+(i)) /* RW-1A */ +#define NV_DPCD_TRAINING_LANE_SET__SIZE 4 /* RW--S */ +#define NV_DPCD_TRAINING_LANE_SET_VOLTAGE_SWING 1:0 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_VOLTAGE_SWING_MAX_REACHED 2:2 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_VOLTAGE_SWING_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_LANE_SET_PREEMPHASIS 4:3 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_PREEMPHASIS_MAX_REACHED 5:5 /* RWXUF */ +#define NV_DPCD_TRAINING_LANE_SET_PREEMPHASIS_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_LANE0_SET (0x00000103) /* RWXUR */ + +#define NV_DPCD_MAX_VOLTAGE_SWING (0x00000003) /* RWXUV */ +#define NV_DPCD_MAX_VOLTAGE_PREEMPHASIS (0x00000003) /* RWXUV */ + +#define NV_DPCD_TRAINING_LANE1_SET (0x00000104) /* RWXUR */ +#define NV_DPCD_TRAINING_LANE2_SET (0x00000105) /* RWXUR */ +#define NV_DPCD_TRAINING_LANE3_SET (0x00000106) /* RWXUR */ +#define NV_DPCD_TRAINING_LANEX_SET_DRIVE_CURRENT 1:0 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_DRIVE_CURRENT_MAX_REACHED 2:2 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_DRIVE_CURRENT_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_TRAINING_LANEX_SET_PREEMPHASIS 4:3 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_PREEMPHASIS_MAX_REACHED 5:5 /* RWXUF */ +#define NV_DPCD_TRAINING_LANEX_SET_PREEMPHASIS_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_DOWNSPREAD_CTRL (0x00000107) /* RWXUR */ +#define NV_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP 4:4 /* RWXUF */ +#define NV_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_LESS_THAN_0_5 (0x00000001) /* RWXUV */ +#define NV_DPCD_DOWNSPREAD_CTRL_MSA_TIMING_PAR_IGNORED 7:7 /* RWXUF */ +#define NV_DPCD_DOWNSPREAD_CTRL_MSA_TIMING_PAR_IGNORED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_DOWNSPREAD_CTRL_MSA_TIMING_PAR_IGNORED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_MAIN_LINK_CHANNEL_CODING_SET (0x00000108) /* RWXUR */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_8B_10B 0:0 /* RWXUF */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_8B_10B_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_8B_10B_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_128B_132B 1:1 /* RWXUF */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_128B_132B_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_MAIN_LINK_CNANNEL_CODING_SET_ANSI_128B_132B_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_I2C_CTRL_SET (0x00000109) /* RWXUR */ +#define NV_DPCD_I2C_CTRL_SET_SPEED 7:0 /* RWXUF */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_DEFAULT (0x00000000) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_1K (0x00000001) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_5K (0x00000002) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_10K (0x00000004) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_100K (0x00000008) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_400K (0x00000010) /* RWXUV */ +#define NV_DPCD_I2C_CTRL_SET_SPEED_1M (0x00000020) /* RWXUV */ + +#define NV_DPCD_EDP_CONFIG_SET (0x0000010A) /* RWXUR */ +#define NV_DPCD_EDP_CONFIG_SET_ALTERNATE_SCRAMBLER_RESET 0:0 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_ALTERNATE_SCRAMBLER_RESET_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_ALTERNATE_SCRAMBLER_RESET_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_FRAMING_CHANGE 1:1 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_FRAMING_CHANGE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_FRAMING_CHANGE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_INVERTED_TRAINING_BIT 2:2 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_INVERTED_TRAINING_BIT_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_INVERTED_TRAINING_BIT_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_PANEL_SELF_TEST 7:7 /* RWXUF */ +#define NV_DPCD_EDP_CONFIG_SET_PANEL_SELF_TEST_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_CONFIG_SET_PANEL_SELF_TEST_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD_LINK_QUAL_LANE_SET(i) (0x0000010B+(i)) /* RW-1A */ +#define NV_DPCD_LINK_QUAL_LANE_SET__SIZE 4 /* RW--S */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS 2:0 /* RWXUF */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_D10_2 (0x00000001) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_SYM_ERR_MEASUREMENT_CNT (0x00000002) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_PRBS7 (0x00000003) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_80_BIT_CUSTOM (0x00000004) /* RWXUV */ +#define NV_DPCD_LINK_QUAL_LANE_SET_LQS_HBR2 (0x00000005) /* RWXUV */ + +#define NV_DPCD_TRAINING_LANE0_1_SET2 (0x0000010F) /* RWXUR */ +#define NV_DPCD_TRAINING_LANE2_3_SET2 (0x00000110) /* RWXUR */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEX_SET2_POST_CURSOR2 1:0 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEX_SET2_POST_CURSOR2_MAX_REACHED 2:2 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEX_SET2_POST_CURSOR2_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEXPLUS1_SET2_POST_CURSOR2 5:4 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEXPLUS1_SET2_POST_CURSOR2_MAX_REACHED 6:6 /* RWXUF */ +#define NV_DPCD_LANEX_XPLUS1_TRAINING_LANEXPLUS1_SET2_POST_CURSOR2_MAX_REACHED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_MSTM_CTRL (0x00000111) /* RWXUR */ +#define NV_DPCD_MSTM_CTRL_EN 0:0 /* RWXUF */ +#define NV_DPCD_MSTM_CTRL_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UP_REQ_EN 1:1 /* RWXUF */ +#define NV_DPCD_MSTM_CTRL_UP_REQ_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UP_REQ_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC 2:2 /* RWXUF */ +#define NV_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_AUDIO_DELAY(i) (0x00000112+(i)) /* RW-1A */ +#define NV_DPCD_AUDIO_DELAY__SIZE 3 /* NNNNS */ + +#define NV_DPCD_LINK_RATE_SET (0x00000115) /* RWXUR */ +#define NV_DPCD_LINK_RATE_SET_VAL 2:0 /* RWXUF */ + +// 00115h - 00117h: RESERVED. Reads all 0s + +#define NV_DPCD_UPSTREAM_DEV_DP_PWR (0x00000118) /* RWXUR */ +#define NV_DPCD_UPSTREAM_DEV_DP_PWR_NOT_NEEDED 0:0 /* RWXUF */ +#define NV_DPCD_UPSTREAM_DEV_DP_PWR_NOT_NEEDED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD_UPSTREAM_DEV_DP_PWR_NOT_NEEDED_TRUE (0x00000001) /* RWXUV */ + +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT (0x00000119) /* RWXUR */ +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT_PERIOD_GRANTED 0:0 /* RWXUF */ +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT_PERIOD_GRANTED_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EXTENDED_DPRX_WAKE_TIMEOUT_PERIOD_GRANTED_YES (0x00000001) /* RWXUV */ + +// 0011Ah - 0011Fh: RESERVED. Reads all 0s +// 00126h - 00153h: RESERVED. Reads all 0s + +#define NV_DPCD_TX_GTC_VALUE(i) (0x00000154+(i)) /* RW-1A */ +#define NV_DPCD_TX_GTC_VALUE__SIZE 4 /* R---S */ + +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW (0x00000158) /* RWXUR */ +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW_EN 0:0 /* RWXUF */ +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_RX_GTC_VALUE_PHASE_SKEW_EN_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_TX_GTC_FREQ_LOCK (0x00000159) /* RWXUR */ +#define NV_DPCD_TX_GTC_FREQ_LOCK_DONE 0:0 /* RWXUF */ +#define NV_DPCD_TX_GTC_FREQ_LOCK_DONE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TX_GTC_FREQ_LOCK_DONE_YES (0x00000001) /* RWXUV */ + +// 0015Ah - 0016Fh: RESERVED. Read all 0s + +#define NV_DPCD_EDP_PSR_CONFIG (0x00000170) /* RWXUR */ +#define NV_DPCD_EDP_PSR_CONFIG_SINK_ENABLE 0:0 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_SINK_ENABLE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SINK_ENABLE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SOURCE_LINK_ACTIVE 1:1 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_SOURCE_LINK_ACTIVE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SOURCE_LINK_ACTIVE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_CRC_VERIFICATION_ACTIVE 2:2 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_CRC_VERIFICATION_ACTIVE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_CRC_VERIFICATION_ACTIVE_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_FRAME_CAPTURE_INDICATION 3:3 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_FRAME_CAPTURE_INDICATION_IMM (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_FRAME_CAPTURE_INDICATION_SECOND (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SU_LINE_CAPTURE_INDICATION 4:4 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_SU_LINE_CAPTURE_INDICATION_IMM (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_SU_LINE_CAPTURE_INDICATION_SECOND (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_HPD_IRQ_ON_CRC_ERROR 5:5 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_HPD_IRQ_ON_CRC_ERROR_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_HPD_IRQ_ON_CRC_ERROR_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_ENABLE_PSR2 6:6 /* RWXUF */ +#define NV_DPCD_EDP_PSR_CONFIG_ENABLE_PSR2_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PSR_CONFIG_ENABLE_PSR2_YES (0x00000001) /* RWXUV */ + +// 00171h - 0019Fh: RESERVED. Read all 0s + + +#define NV_DPCD_ADAPTER_CTRL (0x000001A0) /* RWXUR */ +#define NV_DPCD_ADAPTER_CTRL_FORCE_LOAD_SENSE 0:0 /* RWXUF */ +#define NV_DPCD_ADAPTER_CTRL_FORCE_LOAD_SENSE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_ADAPTER_CTRL_FORCE_LOAD_SENSE_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_BRANCH_DEV_CTRL (0x000001A1) /* RWXUR */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE 0:0 /* RWXUF */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_LONGPULSE (0x00000000) /* RWXUV */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_IRQ_HPD (0x00000001) /* RWXUV */ +#define NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_DEFAULT NV_DPCD_BRANCH_DEV_CTRL_HOTPLUG_EVENT_TYPE_LONGPULSE + +// 001A2h - 0019Fh: RESERVED. Read all 0s + +#define NV_DPCD_PAYLOAD_ALLOC_SET (0x000001C0) /* RWXUR */ +#define NV_DPCD_PAYLOAD_ALLOC_SET_PAYLOAD_ID 6:0 /* RWXUF */ + +#define NV_DPCD_PAYLOAD_ALLOC_START_TIME_SLOT (0x000001C1) /* RWXUR */ +#define NV_DPCD_PAYLOAD_ALLOC_START_TIME_SLOT_VAL 5:0 /* RWXUF */ + +#define NV_DPCD_PAYLOAD_ALLOC_TIME_SLOT_COUNT (0x000001C2) /* RWXUR */ +#define NV_DPCD_PAYLOAD_ALLOC_TIME_SLOT_COUNT_VAL 5:0 /* RWXUF */ + +// 001C3h - 001FFh: RESERVED. Reads all 0s + +#define NV_DPCD_SINK_COUNT (0x00000200) /* R-XUR */ +// Bits 7 and 5:0 = SINK_COUNT +#define NV_DPCD_SINK_COUNT_VAL_BIT_05_MASK (0x3F) +#define NV_DPCD_SINK_COUNT_VAL_BIT_7 (0x80) +#define NV_DPCD_SINK_COUNT_VAL(x) ((x & NV_DPCD_SINK_COUNT_VAL_BIT_05_MASK) \ + | ((x & NV_DPCD_SINK_COUNT_VAL_BIT_7) >> 1)) +#define NV_DPCD_SINK_COUNT_CP_READY 6:6 /* R-XUF */ +#define NV_DPCD_SINK_COUNT_CP_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_COUNT_CP_READY_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR (0x00000201) /* RWXUR */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_REMOTE_CTRL 0:0 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_REMOTE_CTRL_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_REMOTE_CTRL_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTO_TEST 1:1 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTO_TEST_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTO_TEST_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP 2:2 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ 3:3 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_RDY 4:4 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_RDY_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_RDY_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_RDY 5:5 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_RDY_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_RDY_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ 6:6 /* RWXUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_LANE0_1_STATUS (0x00000202) /* R-XUR */ + +#define NV_DPCD_LANE2_3_STATUS (0x00000203) /* R-XUR */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CR_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CHN_EQ_DONE 1:1 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_SYMBOL_LOCKED 2:2 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEX_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CR_DONE 4:4 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CHN_EQ_DONE 5:5 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_SYMBOL_LOCKED 6:6 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_LANEXPLUS1_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED (0x00000204) /* R-XUR */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_POST_LT_ADJ_REQ_IN_PROGRESS 1:1 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_POST_LT_ADJ_REQ_IN_PROGRESS_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_POST_LT_ADJ_REQ_IN_PROGRESS_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_D0WNSTRM_PORT_STATUS_DONE 6:6 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_D0WNSTRM_PORT_STATUS_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_D0WNSTRM_PORT_STATUS_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED 7:7 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_SINK_STATUS (0x00000205) /* R-XUR */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS 0:0 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS 1:1 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LANE0_1_ADJUST_REQ (0x00000206) /* R-XUR */ +#define NV_DPCD_LANE2_3_ADJUST_REQ (0x00000207) /* R-XUR */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEX_DRIVE_CURRENT 1:0 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEX_PREEMPHASIS 3:2 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEXPLUS1_DRIVE_CURRENT 5:4 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_ADJUST_REQ_LANEXPLUS1_PREEMPHASIS 7:6 /* R-XUF */ + +#define NV_DPCD_TRAINING_SCORE_LANE(i) (0x00000208+(i)) /* R--1A */ +#define NV_DPCD_TRAINING_SCORE_LANE__SIZE 4 /* R---S */ + +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2 (0x0000020C) /* R-XUR */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE(i) i%4*2+1:i%4*2 +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE0 1:0 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE1 3:2 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE2 5:4 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE3 7:6 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE4 1:0 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE5 3:2 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE6 5:4 /* R-XUF */ +#define NV_DPCD_ADJUST_REQ_POST_CURSOR2_LANE7 7:6 /* R-XUF */ + +// 0020Fh: RESERVED. Read all 0s + +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE0(i) (0x00000210+(i)*2) /* R--1A */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE0__SIZE 4 /* R---S */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE0_VALUE 7:0 /* R-XUF */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1(i) (0x00000211+(i)*2) /* R--1A */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1__SIZE 4 /* R---S */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1_VALUE 6:0 /* R-XUF */ +#define NV_DPCD_SYMBOL_ERROR_COUNT_LANEX_BYTE1_VALID 7:7 /* R-XUF */ + +#define NV_DPCD_TEST_REQUEST (0x00000218) /* R-XUR */ +#define NV_DPCD_TEST_REQUEST_TEST_LINK_TRAINING 0:0 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_LINK_TRAINING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_LINK_TRAINING_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PATTERN 1:1 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PATTERN_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_EDID_READ 2:2 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_EDID_READ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_EDID_READ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PHY_TEST_PATTERN 3:3 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_PHY_TEST_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_PHY_TEST_PATTERN_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_PHY_TEST_CHANNEL_CODING 5:4 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_PHY_TEST_CHANNEL_CODING_8B10B (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_PHY_TEST_CHANNEL_CODING_128B132B (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_PATTERN_REQ 6:6 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_PATTERN_REQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_PATTERN_REQ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_DISABLED_VIDEO 7:7 /* R-XUF */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_DISABLED_VIDEO_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_REQUEST_TEST_AUDIO_DISABLED_VIDEO_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_TEST_LINK_RATE (0x00000219) /* R-XUR */ +#define NV_DPCD_TEST_LINK_RATE_TYPE 7:0 /* R-XUF */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_1_62G (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_2_70G (0x0000000A) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_5_40G (0x00000014) /* R-XUV */ +// +// For PHY Test 128b/132b channel coding (PHY_TEST_CHANNEL_CODING field in +// the TEST_REQUEST register (DPCD Address 00218h, bits 5:4) is programmed to 01b) +// +#define NV_DPCD_TEST_LINK_RATE_TYPE_UHBR10 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_UHBR20 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_LINK_RATE_TYPE_UHBR135 (0x00000004) /* R-XUV */ + +// 0021Ah - 0021Fh: RESERVED. Read all 0s + +#define NV_DPCD_TEST_LANE_COUNT (0x00000220) /* R-XUR */ +#define NV_DPCD_TEST_LANE_COUNT_VALUE 4:0 /* R-XUF */ + +#define NV_DPCD_TEST_PATTERN (0x00000221) /* R-XUR */ +#define NV_DPCD_TEST_PATTERN_TYPE 1:0 /* R-XUF */ +#define NV_DPCD_TEST_PATTERN_TYPE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_PATTERN_TYPE_COLOR_RAMPS (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_PATTERN_TYPE_BW_VERTICAL_LINES (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_PATTERN_TYPE_COLOR_SQUARES (0x00000003) /* R-XUV */ + +#define NV_DPCD_TEST_H_TOTAL_HIGH_BYTE (0x00000222) /* R-XUR */ +#define NV_DPCD_TEST_H_TOTAL_LOW_BYTE (0x00000223) /* R-XUR */ + +#define NV_DPCD_TEST_V_TOTAL_HIGH_BYTE (0x00000224) /* R-XUR */ +#define NV_DPCD_TEST_V_TOTAL_LOW_BYTE (0x00000225) /* R-XUR */ + +#define NV_DPCD_TEST_H_START_HIGH_BYTE (0x00000226) /* R-XUR */ +#define NV_DPCD_TEST_H_START_LOW_BYTE (0x00000227) /* R-XUR */ + +#define NV_DPCD_TEST_V_START_HIGH_BYTE (0x00000228) /* R-XUR */ +#define NV_DPCD_TEST_V_START_LOW_BYTE (0x00000229) /* R-XUR */ + +#define NV_DPCD_TEST_HSYNC_HIGH_BYTE (0x0000022A) /* R-XUR */ +#define NV_DPCD_TEST_HSYNC_HIGH_BYTE_VALUE 6:0 /* R-XUF */ +#define NV_DPCD_TEST_HSYNC_HIGH_BYTE_POLARITY 7:7 /* R-XUF */ +#define NV_DPCD_TEST_HSYNC_LOW_BYTE (0x0000022B) /* R-XUR */ + +#define NV_DPCD_TEST_VSYNC_HIGH_BYTE (0x0000022C) /* R-XUR */ +#define NV_DPCD_TEST_VSYNC_HIGH_BYTE_VALUE 6:0 /* R-XUF */ +#define NV_DPCD_TEST_VSYNC_HIGH_BYTE_POLARITY 7:7 /* R-XUF */ +#define NV_DPCD_TEST_VSYNC_LOW_BYTE (0x0000022D) /* R-XUR */ + +#define NV_DPCD_TEST_H_WIDTH_HIGH_BYTE (0x0000022E) /* R-XUR */ +#define NV_DPCD_TEST_H_WIDTH_LOW_BYTE (0x0000022F) /* R-XUR */ + +#define NV_DPCD_TEST_V_HEIGHT_HIGH_BYTE (0x00000230) /* R-XUR */ +#define NV_DPCD_TEST_V_HEIGHT_LOW_BYTE (0x00000231) /* R-XUR */ + +#define NV_DPCD_TEST_MISC0 (0x00000232) /* R-XUR */ +#define NV_DPCD_TEST_MISC0_TEST_SYNC_CLOCK 0:0 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT 2:1 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_RGB (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_4_2_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_4_4_4 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_COLOR_FORMAT_RESERVED (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_DYNAMIC_RANGE 3:3 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_YCBCR_COEFF 4:4 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH 7:5 /* R-XUF */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_6BITS (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_8BITS (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_10BITS (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_12BITS (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_MISC0_TEST_BIT_DEPTH_16BITS (0x00000004) /* R-XUV */ + +#define NV_DPCD_TEST_MISC1 (0x00000233) /* R-XUR */ +#define NV_DPCD_TEST_MISC1_TEST_REFRESH_DENOMINATOR 0:0 /* R-XUF */ +#define NV_DPCD_TEST_MISC1_TEST_INTERLACED 1:1 /* R-XUF */ + +#define NV_DPCD_TEST_REFRESH_RATE_NUMERATOR (0x00000234) /* R-XUR */ + +// 00235h - 0023Fh: RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_TEST_CRC_R_Cr_LOW_BYTE (0x00000240) /* R-XUR */ +#define NV_DPCD_TEST_CRC_R_Cr_HIGH_BYTE (0x00000241) /* R-XUR */ + +#define NV_DPCD_TEST_CRC_G_Y_LOW_BYTE (0x00000242) /* R-XUR */ +#define NV_DPCD_TEST_CRC_G_Y_HIGH_BYTE (0x00000243) /* R-XUR */ + +#define NV_DPCD_TEST_CRC_B_Cb_LOW_BYTE (0x00000244) /* R-XUR */ +#define NV_DPCD_TEST_CRC_B_Cb_HIGH_BYTE (0x00000245) /* R-XUR */ + +#define NV_DPCD_TEST_SINK_MISC (0x00000246) /* R-XUR */ +#define NV_DPCD_TEST_SINK_TEST_CRC_COUNT 3:0 /* R-XUF */ +#define NV_DPCD_TEST_SINK_TEST_CRC_SUPPORTED 5:5 /* R-XUF */ +#define NV_DPCD_TEST_SINK_TEST_CRC_SUPPORTED_NO (0X00000000) /* R-XUV */ +#define NV_DPCD_TEST_SINK_TEST_CRC_SUPPORTED_YES (0X00000001) /* R-XUV */ + +//00247h: RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_PHY_TEST_PATTERN (0x00000248) /* R-XUR */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_DP11 1:0 /* R-XUF */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_DP12 2:0 /* R-XUF */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_D10_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_SYM_ERR_MEASUREMENT_CNT (0x00000002) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_PRBS7 (0x00000003) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_80_BIT_CUSTOM (0x00000004) /* R-XUV */ +#define NV_DPCD_PHY_TEST_PATTERN_SEL_HBR2_COMPLIANCE_EYE (0x00000005) /* R-XUV */ + +#define NV_DPCD_HBR2_COMPLIANCE_SCRAMBLER_RESET_LOW_BYTE (0x0000024A) /* R-XUV */ +#define NV_DPCD_HBR2_COMPLIANCE_SCRAMBLER_RESET_HIGH_BYTE (0x0000024B) /* R-XUV */ + +// 0024Ch - 0024Fh RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_TEST_80BIT_CUSTOM_PATTERN(i) (0x00000250+(i)) /* R--1A */ +#define NV_DPCD_TEST_80BIT_CUSTOM_PATTERN__SIZE 10 /* R---S */ + +// 0025Ah - 0025Fh: RESERVED for test automation extensions. Reads all 0s + +#define NV_DPCD_TEST_RESPONSE (0x00000260) /* RWXUR */ +#define NV_DPCD_TEST_RESPONSE_TEST_ACK 0:0 /* RWXUF */ +#define NV_DPCD_TEST_RESPONSE_TEST_ACK_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_ACK_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_NACK 1:1 /* RWXUF */ +#define NV_DPCD_TEST_RESPONSE_TEST_NACK_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_NACK_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_EDID_CHKSUM_WRITE 2:2 /* RWXUF */ +#define NV_DPCD_TEST_RESPONSE_TEST_EDID_CHKSUM_WRITE_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_RESPONSE_TEST_EDID_CHKSUM_WRITE_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD_TEST_EDID_CHKSUM (0x00000261) /* RWXUR */ + +// 00263h - 0026Fh: RESERVED for test automation extensions Read all 0s. + +#define NV_DPCD_TEST_SINK (0x00000270) /* RWXUR */ +#define NV_DPCD_TEST_SINK_START 0:0 /* RWXUF */ +#define NV_DPCD_TEST_SINK_START_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_SINK_START_YES (0x00000001) /* RWXUV */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_SEL 5:4 /* RWXUF */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_EN 7:7 /* RWXUF */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_EN_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_TEST_SINK_PHY_SINK_TEST_LANE_EN_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD_TEST_AUDIO_MODE (0x00000271) /* R-XUR */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE 3:0 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_32_0KHZ (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_44_1KHZ (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_48_0KHZ (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_88_2KHZ (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_96_0KHZ (0x00000004) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_176_4KHZ (0x00000005) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_SAMPLING_RATE_192_0KHZ (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT 7:4 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_1 (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_3 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_4 (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_5 (0x00000004) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_6 (0x00000005) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_7 (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_MODE_CHANNEL_COUNT_8 (0x00000007) /* R-XUV */ + +#define NV_DPCD_TEST_AUDIO_PATTERN (0x00000272) /* R-XUR */ +#define NV_DPCD_TEST_AUDIO_PATTERN_TYPE 7:0 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_PATTERN_TYPE_OP_DEFINED (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PATTERN_TYPE_SAWTOOTH (0x00000001) /* R-XUV */ + +#define NV_DPCD_TEST_AUDIO_PERIOD_CH(i) (0x00000273+(i)) /* R--1A */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH__SIZE 8 /* R---S */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES 3:0 /* R-XUF */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_UNUSED (0x00000000) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_3 (0x00000001) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_6 (0x00000002) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_12 (0x00000003) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_24 (0x00000004) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_48 (0x00000005) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_96 (0x00000006) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_192 (0x00000007) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_384 (0x00000008) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_768 (0x00000009) /* R-XUV */ +#define NV_DPCD_TEST_AUDIO_PERIOD_CH_SAMPLES_1536 (0x0000000A) /* R-XUV */ + +// 0027Bh - 0027Fh: RESERVED. Read all 0s + +// For DP version 1.3 and above +#define NV_DPCD_FEC_STATUS (0x00000280) /* R-XUR */ +#define NV_DPCD_FEC_STATUS_DECODE_EN 0:0 /* R-XUF */ +#define NV_DPCD_FEC_STATUS_DECODE_EN_NOT_DETECTED (0x00000000) /* R-XUV */ +#define NV_DPCD_FEC_STATUS_DECODE_EN_DETECTED (0x00000001) /* R-XUV */ +#define NV_DPCD_FEC_STATUS_DECODE_DIS 1:1 /* R-XUF */ +#define NV_DPCD_FEC_STATUS_DECODE_DIS_NOT_DETECTED (0x00000000) /* R-XUV */ +#define NV_DPCD_FEC_STATUS_DECODE_DIS_DETECTED (0x00000001) /* R-XUV */ + + +// 00283h - 002BFh: RESERVED. Read all 0s. + +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS (0x000002C0) /* R-XUR */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED 0:0 /* R-XUF */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_UPDATED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED 1:1 /* R-XUF */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_VC_PAYLOAD_ID_SLOT(i) (0x000002C1+(i)) /* R--1A */ +#define NV_DPCD_VC_PAYLOAD_ID_SLOT__SIZE 63 /* R---S */ + +// Source Device-Specific Field, Burst write for 00300h-0030Bh +// 6 hex digits: 0x300~0x302. +#define NV_DPCD_SOURCE_IEEE_OUI (0x00000300) /* RWXUR */ +#define NV_DPCD_OUI_NVIDIA_LITTLE_ENDIAN 0x4B0400 + +// 6 bytes: 0x303~0x308 +#define NV_DPCD_SOURCE_DEV_ID_STRING(i) (0x00000303+(i)) /* RW-1A */ +#define NV_DPCD_SOURCE_DEV_ID_STRING__SIZE 6 /* RW--S */ + +#define NV_DPCD_SOURCE_HARDWARE_REV (0x00000309) /* RWXUR */ +#define NV_DPCD_SOURCE_HARDWARE_REV_MINOR 3:0 /* RWXUF */ +#define NV_DPCD_SOURCE_HARDWARE_REV_MAJOR 7:4 /* RWXUF */ + +#define NV_DPCD_SOURCE_SOFTWARE_REV_MAJOR (0x0000030A) /* RWXUR */ +#define NV_DPCD_SOURCE_SOFTWARE_REV_MINOR (0x0000030B) /* RWXUR */ + +// Sink Device-Specific Field. Read Only +// 6 hex digits: 0x400~0x402 +#define NV_DPCD_SINK_IEEE_OUI (0x00000400) /* R-XUR */ + +// 6 bytes: 0x403~0x408 +#define NV_DPCD_SINK_DEV_ID_STRING(i) (0x00000403+(i)) /* R--1A */ +#define NV_DPCD_SINK_DEV_ID_STRING__SIZE 6 /* R---S */ + +#define NV_DPCD_SINK_HARDWARE_REV (0x00000409) /* R-XUR */ +#define NV_DPCD_SINK_HARDWARE_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD_SINK_HARDWARE_REV_MAJOR 7:4 /* R-XUF */ + +#define NV_DPCD_SINK_SOFTWARE_REV_MAJOR (0x0000040A) /* R-XUR */ +#define NV_DPCD_SINK_SOFTWARE_REV_MINOR (0x0000040B) /* R-XUR */ + +// Branch Device-Specific Field +// 6 hex digits: 0x500~0x502 + +#define NV_DPCD_BRANCH_IEEE_OUI (0x00000500) /* R-XUR */ + +// 6 bytes: 0x503~0x508 +#define NV_DPCD_BRANCH_DEV_ID_STRING (0x00000503+(i)) /* R--1A */ +#define NV_DPCD_BRANCH_DEV_ID_STRING__SIZE 6 /* R---S */ + +#define NV_DPCD_BRANCH_HARDWARE_REV (0x00000509) /* R-XUR */ +#define NV_DPCD_BRANCH_HARDWARE_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD_BRANCH_HARDWARE_REV_MAJOR 7:4 /* R-XUF */ + +#define NV_DPCD_BRANCH_SOFTWARE_REV_MAJOR (0x0000050A) /* R-XUR */ +#define NV_DPCD_BRANCH_SOFTWARE_REV_MINOR (0x0000050B) /* R-XUR */ + +// Sink Control Field +#define NV_DPCD_SET_POWER (0x00000600) /* RWXUR */ +#define NV_DPCD_SET_POWER_VAL 2:0 /* RWXUF */ +#define NV_DPCD_SET_POWER_VAL_RESERVED (0x00000000) /* RWXUV */ +#define NV_DPCD_SET_POWER_VAL_D0_NORMAL (0x00000001) /* RWXUV */ +#define NV_DPCD_SET_POWER_VAL_D3_PWRDWN (0x00000002) /* RWXUV */ +#define NV_DPCD_SET_POWER_VAL_D3_AUX_ON (0x00000005) /* RWXUV */ + +/* + * 00601h - 006FFh: RESERVED. Read all 0s + */ + +// * 00700h - 007FFh: RESERVED for eDP, see eDP v1.4 and above +#define NV_DPCD_EDP_REV (0x00000700) /* R-XUR */ +#define NV_DPCD_EDP_REV_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_REV_VAL_1_1_OR_LOWER (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_2 (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_3 (0x00000002) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_4 (0x00000003) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_4A (0x00000004) /* R-XUV */ +#define NV_DPCD_EDP_REV_VAL_1_4B (0x00000005) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1 (0x00000701) /* R-XUR */ +#define NV_DPCD_EDP_GENERAL_CAP1_TCON_BKLGHT_ADJUST_CAP 0:0 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_TCON_BKLGHT_ADJUST_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_TCON_BKLGHT_ADJUST_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_PIN_EN_CAP 1:1 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_PIN_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_PIN_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_AUX_EN_CAP 2:2 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_AUX_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_BKLGHT_AUX_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_PIN_EN_CAP 3:3 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_PIN_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_PIN_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_AUX_EN_CAP 4:4 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_AUX_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_PANEL_SELF_TEST_AUX_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_FRC_EN_CAP 5:5 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_FRC_EN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_FRC_EN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_COLOR_ENGINE_CAP 6:6 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_COLOR_ENGINE_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_COLOR_ENGINE_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_SET_POWER_CAP 7:7 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP1_SET_POWER_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP1_SET_POWER_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP (0x00000702) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_PWM_PIN_CAP 0:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_PWM_PIN_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_PWM_PIN_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_SET_CAP 1:1 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_SET_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_SET_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_BYTE_CNT 2:2 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_BYTE_CNT_2B (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_BRIGHT_AUX_BYTE_CNT_1B (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_AUX_PWM_PRODUCT_CAP 3:3 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_AUX_PWM_PRODUCT_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_AUX_PWM_PRODUCT_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_PWM_PIN_PASSTHRU_CAP 4:4 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_PWM_PIN_PASSTHRU_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_PWM_PIN_PASSTHRU_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_AUX_SET_CAP 5:5 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_AUX_SET_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_FREQ_AUX_SET_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_DYNAMIC_BKLGHT_CAP 6:6 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_DYNAMIC_BKLGHT_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_DYNAMIC_BKLGHT_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_VBLANK_BKLGHT_UPDATE_CAP 7:7 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_VBLANK_BKLGHT_UPDATE_CAP_VBL (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_ADJUST_CAP_VBLANK_BKLGHT_UPDATE_CAP_IMM (0x00000000) /* R-XUV */ +#define NV_DPCP_EDP_GENERAL_CAP2 (0x00000703) /* R-XUR */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_ENGINE_CAP 0:0 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_ENGINE_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_ENGINE_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT 2:1 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT_MSB (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_BKLGHT_BRIGHT_BIT_ALIGNMENT_LSB (0x00000002) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_CONTROL_CAP 3:3 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_CONTROL_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP2_OVERDRIVE_CONTROL_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP3 (0x00000704) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP3_X_REGION_CAP 3:0 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP3_X_REGION_CAP_NOT_SUPPORTED (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_GENERAL_CAP3_Y_REGION_CAP 7:4 /* R-XUF */ +#define NV_DPCD_EDP_GENERAL_CAP3_Y_REGION_CAP_NOT_SUPPORTED (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_DISPLAY_CTL (0x00000720) /* RWXUR */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN 0:0 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BKLGHT_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN 1:1 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_BLACK_VIDEO_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_FRC_EN 2:2 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_FRC_EN_2BIT (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN 3:3 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_COLOR_ENGINE_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL 5:4 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL_AUTONOMOUS (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL_DISABLE (0x00000002) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_OVERDRIVE_CTL_ENABLE (0x00000003) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_VBLANK_BKLGHT_UPDATE_EN 7:7 /* RWXUF */ +#define NV_DPCD_EDP_DISPLAY_CTL_VBLANK_BKLGHT_UPDATE_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_DISPLAY_CTL_VBLANK_BKLGHT_UPDATE_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET (0x00000721) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE 1:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_PWM_PIN (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_PRESET_LV (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_AUX (0x00000002) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_BRIGHT_CTL_MODE_PWM_AND_AUX (0x00000003) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN 2:2 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_PWM_PIN_PASSTHRU_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN 3:3 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_FREQ_AUX_SET_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN 4:4 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_DYNAMIC_BKLGHT_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN 5:5 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_REGIONAL_BKLGHT_EN_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_UPDATE_REGION_BRIGHTNESS 6:6 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_UPDATE_REGION_BRIGHTNESS_ENABLED (0x00000001) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_MODE_SET_UPDATE_REGION_BRIGHTNESS_DISABLED (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_MSB (0x00000722) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_MSB_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_MSB_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_LSB (0x00000723) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_LSB_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_BRIGHTNESS_LSB_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT (0x00000724) /* RWXUR */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_VAL 4:0 /* RWXUF */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MIN (0x00000725) /* R-XUR */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MIN_VAL 4:0 /* R-XUF */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MIN_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MAX (0x00000726) /* R-XUR */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MAX_VAL 4:0 /* R-XUF */ +#define NV_DPCD_EDP_PWMGEN_BIT_CNT_CAP_MAX_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS (0x00000727) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION 0:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION_FAULT (0x00000001) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_CTL_STATUS_FAULT_CONDITION_NORMAL (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_SET (0x00000728) /* RWXUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_SET_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_SET_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MSB (0x0000072A) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MSB_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MID (0x0000072B) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MID_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_MID_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_LSB (0x0000072C) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_LSB_VAL 1:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MIN_LSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MSB (0x0000072D) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MSB_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MID (0x0000072E) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MID_VAL 7:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_MID_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_LSB (0x0000072F) /* R-XUR */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_LSB_VAL 1:0 /* R-XUF */ +#define NV_DPCD_EDP_BKLGHT_FREQ_CAP_MAX_LSB_VAL_INIT (0x00000000) /* R-XUV */ +#define NV_DPCD_EDP_DBC_MINIMUM_BRIGHTNESS_SET (0x00000732) /* RWXUR */ +#define NV_DPCD_EDP_DBC_MINIMUM_BRIGHTNESS_SET_VAL 4:0 /* RWXUF */ +#define NV_DPCD_EDP_DBC_MINIMUM_BRIGHTNESS_SET_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_DBC_MAXIMUM_BRIGHTNESS_SET (0x00000733) /* RWXUR */ +#define NV_DPCD_EDP_DBC_MAXIMUM_BRIGHTNESS_CAP_VAL 4:0 /* RWXUF */ +#define NV_DPCD_EDP_DBC_MAXIMUM_BRIGHTNESS_CAP_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BKLGHT_BASE (0x00000740) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BKLGHT_BASE_INDEX_OFFSET_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BKLGHT_BASE_INDEX_OFFSET_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_0 (0x00000741) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_0_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_0_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_1 (0x00000742) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_1_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_1_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_2 (0x00000743) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_2_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_2_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_3 (0x00000744) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_3_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_3_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_4 (0x00000745) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_4_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_4_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_5 (0x00000746) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_5_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_5_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_6 (0x00000747) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_6_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_6_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_7 (0x00000748) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_7_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_7_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_8 (0x00000749) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_8_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_8_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_9 (0x0000074A) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_9_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_9_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_10 (0x0000074B) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_10_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_10_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_11 (0x0000074C) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_11_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_11_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_12 (0x0000074D) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_12_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_12_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_13 (0x0000074E) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_13_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_13_VAL_INIT (0x00000000) /* RWXUV */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_14 (0x0000074F) /* RWXUR */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_14_VAL 7:0 /* RWXUF */ +#define NV_DPCD_EDP_REGIONAL_BACKLIGHT_BRIGHTNESS_14_VAL_INIT (0x00000000) /* RWXUV */ + +/* + * 00800h - 00FFFh: RESERVED. Read all 0s + */ + +// Sideband MSG Buffers +#define NV_DPCD_MBOX_DOWN_REQ (0x00001000) /* RWXUR */ +#define NV_DPCD_MBOX_UP_REP (0x00001200) /* RWXUR */ +#define NV_DPCD_MBOX_DOWN_REP (0x00001400) /* R-XUR */ +#define NV_DPCD_MBOX_UP_REQ (0x00001600) /* R-XUR */ + +// 0x2000 & 0x2001 : RESERVED for USB-over-AUX + +// ESI (Event Status Indicator) Field +#define NV_DPCD_SINK_COUNT_ESI (0x00002002) /* R-XUR */ +#define NV_DPCD_SINK_COUNT_ESI_SINK_COUNT 5:0 /* R-XUF */ +#define NV_DPCD_SINK_COUNT_ESI_CP_READY 6:6 /* R-XUF */ +#define NV_DPCD_SINK_COUNT_ESI_CP_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_COUNT_ESI_CP_READY_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0 (0x00002003) /* R-XUR */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_REMOTE_CTRL 0:0 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_REMOTE_CTRL_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_REMOTE_CTRL_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_AUTO_TEST 1:1 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_AUTO_TEST_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_AUTO_TEST_YES (0x00000001) /* R-XUV */ +// for eDP v1.4 & v1.4a only +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_TOUCH_IRQ 1:1 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_TOUCH_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_TOUCH_IRQ_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_CP 2:2 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_CP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_CP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_MCCS_IRQ 3:3 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_MCCS_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_MCCS_IRQ_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_DOWN_REP_MSG_RDY 4:4 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_DOWN_REP_MSG_RDY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_DOWN_REP_MSG_RDY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_UP_REQ_MSG_RDY 5:5 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_UP_REQ_MSG_RDY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_UP_REQ_MSG_RDY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_SINK_SPECIFIC_IRQ 6:6 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_SINK_SPECIFIC_IRQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI0_SINK_SPECIFIC_IRQ_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1 (0x00002004) /* R-XUR */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_RX_GTC_MSTR_REQ_STATUS_CHANGE 0:0 /* R-XUF */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_RX_GTC_MSTR_REQ_STATUS_CHANGE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_DEVICE_SERVICE_IRQ_VECTOR_ESI1_RX_GTC_MSTR_REQ_STATUS_CHANGE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0 (0x00002005) /* R-XUR */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_RX_CAP_CHANGED 0:0 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_RX_CAP_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_RX_CAP_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_LINK_STATUS_CHANGED 1:1 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_LINK_STATUS_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_LINK_STATUS_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_STREAM_STATUS_CHANGED 2:2 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_STREAM_STATUS_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_STREAM_STATUS_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_HDMI_LINK_STATUS_CHANGED 3:3 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_HDMI_LINK_STATUS_CHANGED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_HDMI_LINK_STATUS_CHANGED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_CONNECTED_OFF_ENTRY_REQ 4:4 /* R-XUF */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_CONNECTED_OFF_ENTRY_REQ_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LINK_SERVICE_IRQ_VECTOR_ESI0_CONNECTED_OFF_ENTRY_REQ_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS (0x00002006) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_LINK_CRC_ERR 0:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_LINK_CRC_ERR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_LINK_CRC_ERR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_RFB_ERR 1:1 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_RFB_ERR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_RFB_ERR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_VSC_SDP_ERR 2:2 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_VSC_SDP_ERR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_ERR_STATUS_VSC_SDP_ERR_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS (0x00002007) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS_CAP_CHANGE 0:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS_CAP_CHANGE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_EVENT_STATUS_CAP_CHANGE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS (0x00002008) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL 2:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_INACTIVE (0x00000000) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_TRANSITION_TO_ACTIVE (0x00000001) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_ACTIVE_DISP_FROM_RFB (0x00000002) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_ACTIVE_SINK_DEV_TIMING (0x00000003) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_TRANSITION_TO_INACTIVE (0x00000004) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_RESERVED0 (0x00000005) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_RESERVED1 (0x00000006) /* R-XUV */ +#define NV_DPCD_PANEL_SELF_REFRESH_STATUS_VAL_SINK_DEV_INTERNAL_ERR (0x00000007) /* R-XUV */ + +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG0 (0x00002009) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG0_MAX_RESYNC_FRAME_CNT 3:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG0_LAST_RESYNC_FRAME_CNT 7:4 /* R-XUF */ + +#define NV_DPCD_PANEL_SELF_REFRESH_DEBUG1 (0x0000200A) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP (0x0000200A) /* R-XUR */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_PSR_STATE_BIT 0:0 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_RFB_BIT 1:1 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_CRC_VALID_BIT 2:2 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_SU_VALID_BIT 3:3 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_SU_FIRST_LINE_RCVD 4:4 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_SU_LAST_LINE_RCVD 5:5 /* R-XUF */ +#define NV_DPCD_PANEL_SELF_REFRESH_LAST_SDP_Y_CORD_VALID 6:6 /* R-XUF */ + +// 0200Bh: RESERVED. Read all 0s + +#define NV_DPCD_LANE0_1_STATUS_ESI (0x0000200C) /* R-XUR */ +#define NV_DPCD_LANE2_3_STATUS_ESI (0x0000200D) /* R-XUR */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CR_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CHN_EQ_DONE 1:1 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_SYMBOL_LOCKED 2:2 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEX_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CR_DONE 4:4 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CR_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CR_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CHN_EQ_DONE 5:5 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CHN_EQ_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_CHN_EQ_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_SYMBOL_LOCKED 6:6 /* R-XUF */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_SYMBOL_LOCKED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANEX_XPLUS1_STATUS_ESI_LANEXPLUS1_SYMBOL_LOCKED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI (0x0000200E) /* R-XUR */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_INTERLANE_ALIGN_DONE 0:0 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_DOWNSTRM_PORT_STATUS_DONE 6:6 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_DOWNSTRM_PORT_STATUS_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_DOWNSTRM_PORT_STATUS_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_LINK_STATUS_UPDATED 7:7 /* R-XUF */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_LINK_STATUS_UPDATED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_LANE_ALIGN_STATUS_UPDATED_ESI_LINK_STATUS_UPDATED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD_SINK_STATUS_ESI (0x0000200F) /* R-XUR */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_0_STATUS 0:0 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_0_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_0_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_1_STATUS 1:1 /* R-XUF */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_1_STATUS_IN_SYNC_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_SINK_STATUS_ESI_RECEIVE_PORT_1_STATUS_IN_SYNC_YES (0x00000001) /* R-XUV */ + +// 0x00002010-0x0002025: RESERVED. Read all 0s + +#define NV_DPCD_OVERDRIVE_STATUS (0x00002026) /* R-XUR */ +#define NV_DPCD_OVERDRIVE_STATUS_OVERDRIVE_ENGINE_STATUS 0:0 /* R-XUF */ +#define NV_DPCD_OVERDRIVE_STATUS_OVERDRIVE_ENGINE_STATUS_NOT_ACTIVE (0x00000000) /* R-XUV */ +#define NV_DPCD_OVERDRIVE_STATUS_OVERDRIVE_ENGINE_STATUS_ACTIVE (0x00000001) /* R-XUV */ + +// 0x00002027-0x00067FF: RESERVED. Read all 0s + +#define NV_DPCD_HDCP_BKSV_OFFSET (0x00068000) /* R-XUR */ +#define NV_DPCD_HDCP_RPRIME_OFFSET (0x00068005) /* R-XUR */ +#define NV_DPCD_HDCP_AKSV_OFFSET (0x00068007) /* RWXUR */ +#define NV_DPCD_HDCP_AN_OFFSET (0x0006800C) /* RWXUR */ +#define NV_DPCD_HDCP_BKSV_S_OFFSET (0x00000300) /* RWXUV */ +#define NV_DPCD_HDCP_RPRIME_S_OFFSET (0x00000305) /* RWXUV */ +#define NV_DPCD_HDCP_AKSV_S_OFFSET (0x00000307) /* RWXUV */ +#define NV_DPCD_HDCP_AN_S_OFFSET (0x0000030c) /* RWXUV */ +#define NV_DPCD_HDCP_VPRIME_OFFSET (0x00068014) /* R-XUR */ +#define NV_DPCD_HDCP_BCAPS_OFFSET (0x00068028) /* R-XUR */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE 0:0 /* R-XUF */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER 1:1 /* R-XUF */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_OFFSET (0x00068029) /* R-XUR */ +#define NV_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUESET 3:3 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUESET_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUESET_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE 2:2 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_RPRIME_AVAILABLE 1:1 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_RPRIME_AVAILABLE_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_RPRIME_AVAILABLE_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_READY 0:0 /* R-XUF */ +#define NV_DPCD_HDCP_BSTATUS_READY_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BSTATUS_READY_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET (0x0006802A) /* R-XUR */ +#define NV_DPCD_HDCP_BINFO_OFFSET_DEVICE_COUNT 6:0 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_DEVS_EXCEEDED 7:7 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_DEVS_EXCEEDED_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_DEVS_EXCEEDED_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET_DEPTH 10:8 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_CASCADE_EXCEEDED 11:11 /* R-XUF */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_CASCADE_EXCEEDED_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP_BINFO_OFFSET_MAX_CASCADE_EXCEEDED_TRUE (0x00000001) /* R-XUV */ + +#define NV_DPCD_HDCP_KSV_FIFO_OFFSET (0x0006802C) /* R-XUR */ + +#define NV_DPCD_HDCP_AINFO_OFFSET (0x0006803B) /* RWXUR */ +#define NV_DPCD_HDCP_AINFO_OFFSET_REAUTHENTICATION_ENABLE_IRQ_HPD 0:0 /* RWXUF */ +#define NV_DPCD_HDCP_AINFO_OFFSET_REAUTHENTICATION_ENABLE_IRQ_HPD_NO (0x00000000) /* RWXUV */ +#define NV_DPCD_HDCP_AINFO_OFFSET_REAUTHENTICATION_ENABLE_IRQ_HPD_YES (0x00000001) /* RWXUV */ + +// Eight-Lane DP Specific DPCD defines +#define NV_DPCD_SL_TRAINING_LANE0_1_SET2(baseAddr) (baseAddr + 0x0000010E) /* RWXUR */ +#define NV_DPCD_SL_TRAINING_LANE2_3_SET2(baseAddr) (baseAddr + 0x0000010F) /* RWXUR */ +#define NV_DPCD_SL_LANE4_5_STATUS(baseAddr) (baseAddr + 0x00000202) /* R-XUR */ +#define NV_DPCD_SL_LANE6_7_STATUS(baseAddr) (baseAddr + 0x00000203) /* R-XUR */ +#define NV_DPCD_DUAL_DP_CAP (0x000003B0) /* RWXUR */ // Dual DP Capability Register +#define NV_DPCD_DUAL_DP_CAP_DDC 0:0 /* RWXUF */ // Dual DP Capability +#define NV_DPCD_DUAL_DP_CAP_DDC_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDC_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDCIC 1:1 /* RWXUF */ // DDCIC : Dual DP Column Interleave Mode Capability +#define NV_DPCD_DUAL_DP_CAP_DDCIC_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDCIC_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDPSBSC 2:2 /* RWXUF */ // DDPSBSC : Dual DP Pixel Side-by-Side Mode Capability +#define NV_DPCD_DUAL_DP_CAP_DDPSBSC_DISBALE (0x00000000) /* RWXUV */ +#define NV_DPCD_DUAL_DP_CAP_DDPSBSC_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD_DUAL_DP_BASE_ADDRESS 19:0 /* RWXUF */ +#define NV_DPCD_DUAL_DP_COLUMN_WIDTH 15:0 /* RWXUF */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT 4:0 /* RWXUF */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_1H 0x1 /* RWXUV */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_2H 0x2 /* RWXUV */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_4H 0x4 /* RWXUV */ +#define NV_DPCD_DUAL_DP_MAX_LANECOUNT_8H 0x8 /* RWXUV */ + +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL(baseAddr) (baseAddr + 0x00000110) /* RWXUR */ // Dual Link Control Register +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_PIX_MODE 1:0 /* RWXUF */ // PIX_MODE : Pixel mode select +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_PIX_MODE_SIDE_BY_SIDE (0x00000000) /* RWXUV */ // Side by side Mode enabled +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_PIX_MODE_COL_INTERLEAVE (0x00000001) /* RWXUV */ // Column Interleave Mode enabled +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_DD_ENABLE 7:7 /* RWXUF */ // DD_ENABLE: Enable Dual DP mode. +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_DD_ENABLE_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD_DUAL_DP_DUAL_LINK_CONTROL_DD_ENABLE_FALSE (0x00000000) /* RWXUV */ + +#define NV_DPCD_DUAL_DP_PIXEL_OVERLAP(baseAddr) (baseAddr + 0x00000111) /* RWXUR */ // PIXEL_OVERLAP Register +#define NV_DPCD_DUAL_DP_PIXEL_OVERLAP_IGNORE_PIX_COUNT 6:0 /* RWXUF */ // Ignore Pix Count - Number of pixels to ignore + +#define NV_DPCD_HDCP22_BCAPS_OFFSET (0x0006921D) /* R-XUR */ +#define NV_DPCD_HDCP22_BCAPS_SIZE (0x00000003) /* R---S */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_REPEATER 0:0 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_REPEATER_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_REPEATER_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_CAPABLE 1:1 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_HDCP_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_RECEIVER_CAPABILITY_MASK 15:2 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_RECEIVER_CAPABILITY_MASK_RESERVED (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_VERSION 23:16 /* R-XUF */ +#define NV_DPCD_HDCP22_BCAPS_OFFSET_VERSION_22 (0x00000002) /* R-XUV */ + +#define NV_DPCD_HDCP22_BINFO_OFFSET (0x00069330) /* R-XUR */ +#define NV_DPCD_HDCP22_BINFO_SIZE (0x00000002) /* R---S */ + +#define NV_DPCD_HDCP22_RX_STATUS (0x00069493) /* R-XUR */ +#define NV_DPCD_HDCP22_RX_STATUS_SIZE (0x00000001) /* R---S */ +#define NV_DPCD_HDCP22_RX_STATUS_READY 0:0 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_READY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_HPRIME_AVAILABLE 1:1 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_HPRIME_AVAILABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_HPRIME_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_PAIRING_AVAILABLE 2:2 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_PAIRING_AVAILABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_PAIRING_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_REAUTH_REQUEST 3:3 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_REAUTH_REQUEST_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_REAUTH_REQUEST_NO (0x00000000) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_LINK_INTEGRITY_FAILURE 4:4 /* R-XUF */ +#define NV_DPCD_HDCP22_RX_STATUS_LINK_INTEGRITY_FAILURE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD_HDCP22_RX_STATUS_LINK_INTEGRITY_FAILURE_NO (0x00000000) /* R-XUV */ + +#define NV_DPCD_HDCP22_RTX_OFFSET (0x00069000) /* RWXUR */ +#define NV_DPCD_HDCP22_RTX_SIZE (0x00000008) /* R---S */ + +#define NV_DPCD_HDCP22_TXCAPS_OFFSET (0x00069008) /* RWXUR */ +#define NV_DPCD_HDCP22_TXCAPS_SIZE (0x00000003) /* R---S */ + +#define NV_DPCD_HDCP22_CERTRX (0x0006900B) /* R-XUR */ +#define NV_DPCD_HDCP22_CERTRX_SIZE (0x0000020A) /* R---S */ + +#define NV_DPCD_HDCP22_RRX (0x00069215) /* R-XUR */ +#define NV_DPCD_HDCP22_RRX_SIZE (0x00000008) /* R---S */ + +#endif // #ifndef _DPCD_H_ diff --git a/src/common/inc/displayport/dpcd14.h b/src/common/inc/displayport/dpcd14.h new file mode 100644 index 000000000..89c9172c5 --- /dev/null +++ b/src/common/inc/displayport/dpcd14.h @@ -0,0 +1,790 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DISPLAYPORT14_H_ +#define _DISPLAYPORT14_H_ + +#define NV_DPCD14_EXTEND_CAP_BASE (0x00002200) + +#define NV_DPCD14_MAX_LINK_BANDWIDTH (0x00000001) /* R-XUR */ +#define NV_DPCD14_MAX_LINK_BANDWIDTH_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_MAX_LINK_BANDWIDTH_VAL_8_10_GBPS (0x0000001E) /* R-XUV */ + +#define NV_DPCD14_MAX_DOWNSPREAD (0x00000003) /* R-XUR */ +#define NV_DPCD14_MAX_DOWNSPREAD_TPS4_SUPPORTED 7:7 /* R-XUF */ +#define NV_DPCD14_MAX_DOWNSPREAD_TPS4_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_MAX_DOWNSPREAD_TPS4_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL (0x0000000E) /* R-XUR */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_EXTENDED_RX_CAP 7:7 /* R-XUF */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_EXTENDED_RX_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_EXTENDED_RX_CAP_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_SUPPORT (0x00000060) /* R-XUR */ +#define NV_DPCD14_DSC_SUPPORT_DSC_SUPPORT 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_SUPPORT_DSC_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SUPPORT_DSC_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_ALGORITHM_REVISION (0x00000061) /* R-XUR */ +#define NV_DPCD14_DSC_ALGORITHM_REVISION_MAJOR 3:0 /* R-XUF */ +#define NV_DPCD14_DSC_ALGORITHM_REVISION_MINOR 7:4 /* R-XUF */ + +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK (0x00000062) /* R-XUR */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE 1:0 /* R-XUF */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_1KB (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_4KB (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_16KB (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_RC_BUFFER_BLOCK_SIZE_64KB (0x00000003) /* R-XUV */ + +#define NV_DPCD14_DSC_RC_BUFFER (0x00000063) /* R-XUR */ +#define NV_DPCD14_DSC_RC_BUFFER_SIZE 7:0 /* R-XUF */ + +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1 (0x00000064) /* R-XUR */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_1 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_1_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_1_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_2 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_2_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_2_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_4 3:3 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_4_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_4_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_6 4:4 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_6_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_6_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_8 5:5 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_8_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_8_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_10 6:6 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_10_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_10_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_12 7:7 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_12_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_1_SLICES_PER_SINK_12_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_LINE_BUFFER (0x00000065) /* R-XUR */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH 3:0 /* R-XUF */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_9 (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_10 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_11 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_12 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_13 (0x00000004) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_14 (0x00000005) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_15 (0x00000006) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_16 (0x00000007) /* R-XUV */ +#define NV_DPCD14_DSC_LINE_BUFFER_BIT_DEPTH_8 (0x00000008) /* R-XUV */ + +#define NV_DPCD14_DSC_BLOCK_PREDICTION (0x00000066) /* R-XUR */ +#define NV_DPCD14_DSC_BLOCK_PREDICTION_SUPPORT 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_BLOCK_PREDICTION_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_BLOCK_PREDICTION_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_1 (0x00000067) /* R-XUR */ +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_1_LSB 7:0 /* R-XUF */ + +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_2 (0x00000068) /* R-XUR */ +#define NV_DPCD14_DSC_MAXIMUM_BITS_PER_PIXEL_2_MSB 1:0 /* R-XUF */ + +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES (0x00000069) /* R-XUR */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_RGB 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_RGB_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_RGB_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_444 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_444_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_444_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_SIMPLE_422 2:2 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_SIMPLE_422_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_SIMPLE_422_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_422 3:3 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_422_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_422_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_420 4:4 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_420_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_FORMAT_CAPABILITIES_YCbCr_NATIVE_420_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES (0x0000006A) /* R-XUR */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_8_BITS_PER_COLOR 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_8_BITS_PER_COLOR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_8_BITS_PER_COLOR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_10_BITS_PER_COLOR 2:2 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_10_BITS_PER_COLOR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_10_BITS_PER_COLOR_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_12_BITS_PER_COLOR 3:3 /* R-XUF */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_12_BITS_PER_COLOR_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_DECODER_COLOR_DEPTH_CAPABILITIES_12_BITS_PER_COLOR_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_PEAK_THROUGHPUT (0x0000006B) /* R-XUR */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0 3:0 /* R-XUF */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_340 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_400 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_450 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_500 (0x00000004) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_550 (0x00000005) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_600 (0x00000006) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_650 (0x00000007) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_700 (0x00000008) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_750 (0x00000009) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_800 (0x0000000A) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_850 (0x0000000B) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_900 (0x0000000C) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_950 (0x0000000D) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE0_1000 (0x0000000E) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1 7:4 /* R-XUF */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_340 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_400 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_450 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_500 (0x00000004) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_550 (0x00000005) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_600 (0x00000006) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_650 (0x00000007) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_700 (0x00000008) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_750 (0x00000009) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_800 (0x0000000A) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_850 (0x0000000B) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_900 (0x0000000C) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_950 (0x0000000D) /* R-XUV */ +#define NV_DPCD14_DSC_PEAK_THROUGHPUT_MODE1_1000 (0x0000000E) /* R-XUV */ + +#define NV_DPCD14_DSC_MAXIMUM_SLICE_WIDTH (0x0000006C) /* R-XUR */ +#define NV_DPCD14_DSC_MAXIMUM_SLICE_WIDTH_MAX 7:0 /* R-XUF */ + +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2 (0x0000006D) /* R-XUR */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_16 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_16_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_16_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_20 1:1 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_20_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_20_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_24 2:2 /* R-XUF */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_24_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_SLICE_CAPABILITIES_2_SLICES_PER_SINK_24_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT (0x0000006F) /* R-XUR */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED 2:0 /* R-XUF */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_16 (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_8 (0x00000001) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_4 (0x00000002) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1_2 (0x00000003) /* R-XUV */ +#define NV_DPCD14_DSC_BITS_PER_PIXEL_INCREMENT_SUPPORTED_1 (0x00000004) /* R-XUV */ + +// Field definition only used only with 128b/132b for DP2.0+ +#define NV_DPCD20_TRAINING_LANE_SET(i) (0x00000103+(i)) /* RW-1A */ +#define NV_DPCD20_TRAINING_LANE_SET__SIZE 4 /* RW--S */ +#define NV_DPCD20_TRAINING_LANE_SET_TX_FFE_PRESET_VALUE 3:0 /* RWXUF */ + +#define NV_DPCD14_DSC_ENABLE (0x00000160) /* R-XUR */ +#define NV_DPCD14_DSC_ENABLE_SINK 0:0 /* R-XUF */ +#define NV_DPCD14_DSC_ENABLE_SINK_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DSC_ENABLE_SINK_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_FEC_CAPABILITY (0x00000090) /* R-XUR */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_CAPABLE 0:0 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE 1:1 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE 2:2 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_CORRECTED_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_BIT_ERROR_COUNT_CAPABLE 3:3 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_BIT_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_BIT_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_BLOCK_ERROR_COUNT_CAPABLE 4:4 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_BLOCK_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_BLOCK_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_ERROR_COUNT_CAPABLE 5:5 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_ERROR_COUNT_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_PARITY_ERROR_COUNT_CAPABLE_YES (0x00000001) /* R-XUV */ +// Bit 6 : RESERVED. Read 0 +#define NV_DPCD14_FEC_CAPABILITY_FEC_ERROR_REPORTING_POLICY_SUPPORTED 7:7 /* R-XUF */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_ERROR_REPORTING_POLICY_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_CAPABILITY_FEC_ERROR_REPORTING_POLICY_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_TRAINING_PATTERN_SET (0x00000102) /* RWXUR */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS 3:0 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP1 (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP2 (0x00000002) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP3 (0x00000003) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_TPS_TP4 (0x00000007) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN 4:4 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_NO (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_YES (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED 5:5 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_FALSE (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SCRAMBLING_DISABLED_TRUE (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL 7:6 /* RWXUF */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ILLEGAL_SYMBOL_ERROR (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL_DISPARITY_ERROR (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_PATTERN_SET_SYM_ERR_SEL_ILLEGAL_SYMBOL_ERROR (0x00000002) /* RWXUV */ + +// Field definition only used only with 128b/132b for DP2.0+ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN (0x00000102) /* RWXUR */ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN_SELECT 3:0 /* RWXUF */ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN_SELECT_NONE (0x00000000) /* RWXUV */ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN_SELECT_TPS1 (0x00000001) /* RWXUV */ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN_SELECT_TPS2 (0x00000002) /* RWXUV */ +#define NV_DPCD20_128B_132B_TRAINING_PATTERN_SELECT_TPS2_CDS (0x00000003) /* RWXUV */ +// Note: Bit 7:4 are reserved for 128b/132b. Driver should keep them 0 + +#define NV_DPCD14_LINK_QUAL_LANE_SET(i) (0x0000010B+(i)) /* RW-1A */ +#define NV_DPCD14_LINK_QUAL_LANE_SET__SIZE 4 /* R---S */ +#define NV_DPCD14_LINK_QUAL_LANE_SET_LQS 2:0 /* RWXUF */ +#define NV_DPCD14_LINK_QUAL_LANE_SET_LQS_CP2520PAT3 (0x00000007) /* RWXUV */ + +#define NV_DPCD14_FEC_CONFIGURATION (0x00000120) /* RWXUR */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_READY 0:0 /* RWXUF */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_READY_NO (0x00000000) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_READY_YES (0x00000001) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL 3:1 /* RWXUF */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_FEC_ERROR_COUNT_DIS (0x00000000) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_UNCORRECTED_BLOCK_ERROR_COUNT (0x00000001) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_CORRECTED_BLOCK_ERROR_COUNT (0x00000002) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_BIT_ERROR_COUNT (0x00000003) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_PARITY_BLOCK_ERROR_COUNT (0x00000004) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_FEC_ERROR_COUNT_SEL_PARITY_BIT_ERROR_COUNT (0x00000005) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT 5:4 /* RWXUF */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_0 (0x00000000) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_1 (0x00000001) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_2 (0x00000002) /* RWXUV */ +#define NV_DPCD14_FEC_CONFIGURATION_LANE_SELECT_LANE_3 (0x00000003) /* RWXUV */ + +// Field definition only used only with 128b/132b for DP2.0+ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED (0x00000204) /* R-XUR */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE 2:2 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE 3:3 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_LT_FAILED 4:4 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_LT_FAILED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_128B_132B_LT_FAILED_YES (0x00000001) /* R-XUV */ + +// Field definition for 0x0206/0x0207h (ADJUST_REQUEST_LANEX), only used only with 128b/132b for DP2.0+ +#define NV_DPCD20_LANEX_XPLUS1_ADJUST_REQ_LANEX_TX_FFE_PRESET_VALUE 3:0 /* R-XUF */ +#define NV_DPCD20_LANEX_XPLUS1_ADJUST_REQ_LANEXPLUS1_TX_FFE_PRESET_VALUE 7:4 /* R-XUF */ + +// PANEL REPLAY RELATED DPCD +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY (0x000000B0) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED 0:0 +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_YES (0x00000001) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE 1:1 +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE_YES (0x00000001) + +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION (0x000001B0) +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE 0:0 +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_YES (0x00000001) + +#define NV_DPCD14_PHY_TEST_PATTERN (0x00000248) /* R-XUR */ +#define NV_DPCD14_PHY_TEST_PATTERN_SEL_CP2520PAT3 (0x00000007) /* R-XUV */ + +#define NV_DPCD14_DSC_CRC_0 (0x00000262) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_0_LOW_BYTE NV_DPCD14_DSC_CRC_0 +#define NV_DPCD14_DSC_CRC_0_HIGH_BYTE (0x00000263) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_1 (0x00000264) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_1_LOW_BYTE NV_DPCD14_DSC_CRC_1 +#define NV_DPCD14_DSC_CRC_1_HIGH_BYTE (0x00000265) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_2 (0x00000266) /* R-XUR */ +#define NV_DPCD14_DSC_CRC_2_LOW_BYTE NV_DPCD14_DSC_CRC_2 +#define NV_DPCD14_DSC_CRC_2_HIGH_BYTE (0x00000267) /* R-XUR */ + +#define NV_DPCD14_FEC_STATUS (0x00000280) /* R-XUR */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_EN_DETECTED 0:0 /* R-XUF */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_EN_DETECTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_EN_DETECTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_DIS_DETECTED 1:1 /* R-XUF */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_DIS_DETECTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_STATUS_FEC_DECODE_DIS_DETECTED_YES (0x00000001) /* R-XUV */ +// Bits 7-2: RESERVED. +#define NV_DPCD14_FEC_STATUS_CLEAR (0x00000001) + +#define NV_DPCD14_FEC_ERROR_COUNT (0x00000281) /* R-XUR */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_LOW_BYTE NV_DPCD14_FEC_ERROR_COUNT +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_HIGH_BYTE (0x00000282) /* R-XUR */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID 7:7 /* R-XUF */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_FEC_ERROR_COUNT_FEC_ERROR_COUNT_VALID_YES (0x00000001) /* R-XUV */ + +// Field definition for 0x0200E (LANE_ALIGN_STATUS_UPDATED_ESI), used only when DP2.0+ 128b/132b is enabled. +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI (0x0000200E) /* R-XUR */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE 2:2 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_EQ_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE 3:3 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_DPRX_CDS_INTERLANE_ALIGN_DONE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_LT_FAILED 4:4 /* R-XUF */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_LT_FAILED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_LANE_ALIGN_STATUS_UPDATED_ESI_128B_132B_LT_FAILED_YES (0x00000001) /* R-XUV */ + +// Field definition for 0x0200F (SINK_STATUS_ESI), used only when DP2.0+ 128b/132b is enabled. +#define NV_DPCD20_SINK_STATUS_ESI (0x0000200F) /* R-XUR */ +#define NV_DPCD20_SINK_STATUS_ESI_INTRA_HOP_AUX_REPLY 3:3 /* R-XUF */ +#define NV_DPCD20_SINK_STATUS_ESI_INTRA_HOP_AUX_REPLY_DPRX (0x00000000) /* R-XUV */ +#define NV_DPCD20_SINK_STATUS_ESI_INTRA_HOP_AUX_REPLY_LTTPR (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_REV (0x00002200) /* R-XUR */ +#define NV_DPCD14_EXTENDED_REV_MAJOR 7:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_REV_MAJOR_1 (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_REV_MINOR_4 (0x00000004) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAX_LINK_BANDWIDTH (0x00002201) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAX_LINK_BANDWIDTH_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LINK_BANDWIDTH_VAL_8_10_GBPS (0x0000001E) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT (0x00002202) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE 4:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE_1 (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE_2 (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_LANE_4 (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_POST_LT_ADJ_REQ_SUPPORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_TPS3_SUPPORTED 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_TPS3_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_TPS3_SUPPORTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_ENHANCED_FRAMING 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_ENHANCED_FRAMING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_LANE_COUNT_ENHANCED_FRAMING_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD (0x00002203) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_VAL 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_VAL_NONE (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_VAL_0_5_PCT (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_FALSE (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LT_TRUE (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_TPS4_SUPPORTED 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_TPS4_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAX_DOWNSPREAD_TPS4_SUPPORTED_YES (0x00000001) /* R-XUV */ + +// NORP = Number of Receiver Ports = Value + 1 +#define NV_DPCD14_EXTENDED_NORP (0x00002204) /* R-XUR */ +#define NV_DPCD14_EXTENDED_NORP_VAL 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_NORP_VAL_ONE (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_NORP_VAL_TWO (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_NORP_VAL_SST_MAX (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_NORP_DP_PWR_CAP_5V 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_NORP_DP_PWR_CAP_12V 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_NORP_DP_PWR_CAP_18V 7:7 /* R-XUF */ + +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT (0x00002205) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_PRESENT 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_PRESENT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_PRESENT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE 2:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_DISPLAYPORT (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_ANALOG (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_HDMI_DVI (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_TYPE_OTHERS (0x00000003) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_FORMAT_CONVERSION 3:3 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_FORMAT_CONVERSION_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_FORMAT_CONVERSION_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE 4:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWNSTREAMPORT_DETAILED_CAP_INFO_AVAILABLE_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING (0x00002206) /* R-XUR */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_8B_10B_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B 1:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_MAIN_LINK_CHANNEL_CODING_ANSI_128B_132B_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT (0x00002207) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_COUNT 3:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_MSA_TIMING_PAR_IGNORED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_OUI_SUPPORT 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_OUI_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DOWN_STREAM_PORT_OUI_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_RECEIVE_PORT0_CAP_0 (0x00002208) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORT1_CAP_0 (0x0000220A) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_LOCAL_EDID 1:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_LOCAL_EDID_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_LOCAL_EDID_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT 2:2 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_ASSO_TO_PRECEDING_PORT_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_HBLANK_EXPANSION_CAPABLE 3:3 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_HBLANK_EXPANSION_CAPABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_HBLANK_EXPANSION_CAPABLE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_UNIT 4:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_UNIT_PIXEL (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_UNIT_BYTE (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_PER_PORT 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_PER_PORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_0_BUFFER_SIZE_PER_PORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_RECEIVE_PORT0_CAP_1 (0x00002209) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORT1_CAP_1 (0x0000220B) /* R-XUR */ +#define NV_DPCD14_EXTENDED_RECEIVE_PORTX_CAP_1_BUFFER_SIZE 7:0 /* R-XUF */ + +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP (0x0000220C) /* R-XUR */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED 7:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_1K (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_5K (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_10K (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_100K (0x00000008) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_400K (0x00000010) /* R-XUV */ +#define NV_DPCD14_EXTENDED_I2C_CTRL_CAP_SPEED_1M (0x00000020) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP (0x0000220D) /* R-XUR */ +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_EDP_CONFIG_CAP_ALTERNATE_SCRAMBLER_RESET_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL (0x0000220E) /* R-XUR */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL 6:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_DEFAULT (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_4MS (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_8MS (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_12MS (0x00000003) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_VAL_16MS (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_EXTENDED_RECEIVER_CAP 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_EXTENDED_RECEIVER_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_TRAINING_AUX_RD_INTERVAL_EXTENDED_RECEIVER_CAP_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_ADAPTER_CAP (0x0000220F) /* R-XUR */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_FORCE_LOAD_SENSE 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_FORCE_LOAD_SENSE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_FORCE_LOAD_SENSE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_ALT_I2C_PATTERN 1:1 /* R-XUF */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_ALT_I2C_PATTERN_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_ADAPTER_CAP_ALT_I2C_PATTERN_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST (0x00002210) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_GTC_CAP 0:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_GTC_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_GTC_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_AV_SYNC_CAP 2:2 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_AV_SYNC_CAP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_AV_SYNC_CAP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_SDP_EXT_FOR_COLORIMETRY 3:3 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_SDP_EXT_FOR_COLORIMETRY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_SDP_EXT_FOR_COLORIMETRY_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP 4:4 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_CHAINING 5:5 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_CHAINING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_VESA_SDP_CHAINING_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP 6:6 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_CHAINING 7:7 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_CHAINING_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_FEATURE_ENUM_LIST_VSC_EXT_CTA_SDP_CHAINING_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST (0x00002211) /* R-XUR */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD 7:0 /* R-XUF */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_1MS (0x00000000) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_20MS (0x00000001) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_40MS (0x00000002) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_60MS (0x00000003) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_80MS (0x00000004) /* R-XUV */ +#define NV_DPCD14_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST_PERIOD_100MS (0x00000005) /* R-XUV */ + +#define NV_DPCD14_EXTENDED_VSC_EXT_VESA_SDP_MAX_CHAINING (0x00002212) /* R-XUR */ +#define NV_DPCD14_EXTENDED_VSC_EXT_VESA_SDP_MAX_CHAINING_VAL 7:0 /* R-XUF */ + +#define NV_DPCD14_EXTENDED_VSC_EXT_CTA_SDP_MAX_CHAINING (0x00002213) /* R-XUR */ +#define NV_DPCD14_EXTENDED_VSC_EXT_CTA_SDP_MAX_CHAINING_VAL 7:0 /* R-XUF */ + +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST (0x00002214) /* R-XUR */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_ADAPTIVE_SYNC_SDP_SUPPORTED 0:0 /* R-XUF */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_ADAPTIVE_SYNC_SDP_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_ADAPTIVE_SYNC_SDP_SUPPORTED_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_VSC_EXT_FRAMEWORK_V1_SUPPORTED 4:4 /* R-XUF */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_VSC_EXT_FRAMEWORK_V1_SUPPORTED_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_DPRX_FEATURE_ENUM_LIST_VSC_EXT_FRAMEWORK_V1_SUPPORTED_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES (0x00002215) /* R-XUR */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR10 0:0 /* R-XUF */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR10_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR10_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR20 1:1 /* R-XUF */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR20_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR20_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR13_5 2:2 /* R-XUF */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR13_5_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_128B_132B_SUPPORTED_LINK_RATES_UHBR13_5_YES (0x00000001) /* R-XUV */ + +// +// The interval is (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * INTERVAL_UNIT. +// The maximum is 256 ms. +// +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL (0x00002216) /* R-XUR */ +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL_VAL 6:0 /* R-XUF */ +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL_UNIT 7:7 /* R-XUF */ +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL_UNIT_2MS (0x00000000) /* R-XUV */ +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL_UNIT_1MS (0x00000001) /* R-XUV */ +#define NV_DPCD20_128B_132B_TRAINING_AUX_RD_INTERVAL_MAX_MS 256 + +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS (0x00003036) /* R-XUR */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_MODE 0:0 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_MODE_TMDS (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_MODE_FRL (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RESULT 6:1 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_9G 1:1 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_9G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_9G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_18G 2:2 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_18G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_18G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_24G 3:3 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_24G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_24G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_32G 4:4 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_32G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_32G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_40G 5:5 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_40G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_40G_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_48G 6:6 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_48G_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_LINK_CONFIG_STATUS_LT_RES_48G_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE(i) (0x00003037+(i)) /* RW-1A */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE__SIZE 4 /* R---S */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT 3:0 /* R-XUF */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_ZERO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_THREE (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_TEN (0x00000002) /* R-XUV */ +#define NV_DPCD20_PCON_DOWNSTREAM_LINK_ERROR_LANE_COUNT_HUNDRED (0x00000004) /* R-XUV */ + +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS (0x0000303B) /* R-XUR */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_ACTIVE 0:0 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_ACTIVE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_ACTIVE_YES (0x00000001) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_READY 1:1 /* R-XUF */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_READY_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_PCON_HDMI_TX_LINK_STATUS_LINK_READY_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_PCON_CONTROL_0 (0x00003050) /* RWXUR */ +#define NV_DPCD20_PCON_CONTROL_0_OUTPUT_CONFIG 0:0 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_0_OUTPUT_CONFIG_DVI (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_0_OUTPUT_CONFIG_HDMI (0x00000001) /* RWXUV */ + +#define NV_DPCD20_PCON_CONTROL_1 (0x00003051) /* RWXUR */ +#define NV_DPCD20_PCON_CONTROL_1_CONVERT_YCBCR420 0:0 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_1_CONVERT_YCBCR420_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_CONVERT_YCBCR420_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_EDID_PROCESS 1:1 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_EDID_PROCESS_NO (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_EDID_PROCESS_YES (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_AUTO_SCRAMBLING 2:2 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_AUTO_SCRAMBLING_NO (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_AUTO_SCRAMBLING_YES (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_FORCE_SCRAMBLING 3:3 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_FORCE_SCRAMBLING_NO (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_1_DISABLE_HDMI_FORCE_SCRAMBLING_YES (0x00000001) /* RWXUV */ + +#define NV_DPCD20_PCON_CONTROL_2 (0x00003052) /* RWXUR */ +#define NV_DPCD20_PCON_CONTROL_2_CONVERT_YCBCR422 0:0 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_2_CONVERT_YCBCR422_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_2_CONVERT_YCBCR422_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD20_PCON_CONTROL_3 (0x00003053) /* RWXUR */ +#define NV_DPCD20_PCON_CONTROL_3_COMPONENT_BIT_DEPTH 1:0 /* RWXUF */ +#define NV_DPCD20_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_SAME_AS_INC (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_8BPC (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_10BPC (0x00000002) /* RWXUV */ +#define NV_DPCD20_PCON_CONTROL_3_COMPONENT_BIT_DEPTH_12BPC (0x00000003) /* RWXUV */ + +#define NV_DPCD14_OUTPUT_HTOTAL_LOW (0x00003054) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HTOTAL_HIGH (0x00003055) /* RWXUR */ + +#define NV_DPCD14_OUTPUT_HSTART_LOW (0x00003056) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HSTART_HIGH (0x00003057) /* RWXUR */ + +#define NV_DPCD14_OUTPUT_HSP_HSW_LOW (0x00003056) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH (0x00003057) /* RWXUR */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_VAL 6:0 /* RWXUF */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_OUTPUT_HSP 7:7 /* RWXUF */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_OUTPUT_HSP_POSITIVE (0x00000000) /* RWXUV */ +#define NV_DPCD14_OUTPUT_HSP_HSW_HIGH_OUTPUT_HSP_NEGATIVE (0x00000001) /* RWXUV */ + +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1 (0x0000305A) /* RWXUR */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW 2:0 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_ZERO (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_9G (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_18G (0x00000002) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_24G (0x00000003) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_32G (0x00000004) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_40G (0x00000005) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_MAX_LINK_BW_48G (0x00000006) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_SRC_CONTROL_MODE 3:3 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_SRC_CONTROL_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_SRC_CONTROL_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_CONCURRENT_LT_MODE 4:4 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_CONCURRENT_LT_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_CONCURRENT_LT_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_LINK_FRL_MODE 5:5 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_LINK_FRL_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_LINK_FRL_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_IRQ_LINK_FRL_MODE 6:6 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_IRQ_LINK_FRL_MODE_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_IRQ_LINK_FRL_MODE_ENABLE (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_HDMI_LINK 7:7 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_HDMI_LINK_DISABLE (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_1_HDMI_LINK_ENABLE (0x00000001) /* RWXUV */ + +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2 (0x0000305B) /* RWXUR */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK 5:0 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_9G (0x00000001) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_18G (0x00000002) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_24G (0x00000004) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_32G (0x00000008) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_40G (0x00000010) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_LINK_BW_MASK_48G (0x00000020) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_FRL_LT_CONTROL 6:6 /* RWXUF */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_FRL_LT_CONTROL_NORMAL (0x00000000) /* RWXUV */ +#define NV_DPCD20_PCON_FRL_LINK_CONFIG_2_FRL_LT_CONTROL_EXTENDED (0x00000001) /* RWXUV */ + +// LT Tunable Repeater Related offsets + +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV (0x000F0000) /* R-XUR */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MINOR 3:0 /* R-XUF */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MINOR_0 (0x00000000) /* R-XUV */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MAJOR 7:4 /* R-XUF */ +#define NV_DPCD14_LT_TUNABLE_PHY_REPEATER_REV_MAJOR_1 (0x00000001) /* R-XUV */ + +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER (0x000F0001) /* R-XUR */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_1_62_GBPS (0x00000006) /* R-XUV */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_2_70_GBPS (0x0000000A) /* R-XUV */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_5_40_GBPS (0x00000014) /* R-XUV */ +#define NV_DPCD14_MAX_LINK_RATE_PHY_REPEATER_VAL_8_10_GBPS (0x0000001E) /* R-XUV */ + +#define NV_DPCD14_PHY_REPEATER_CNT (0x000F0002) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL 7:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_0 (0x00000000) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_1 (0x00000080) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_2 (0x00000040) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_3 (0x00000020) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_4 (0x00000010) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_5 (0x00000008) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_6 (0x00000004) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_7 (0x00000002) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_VAL_8 (0x00000001) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_CNT_MAX 8 + +#define NV_DPCD14_PHY_REPEATER_MODE (0x000F0003) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_MODE_VAL_TRANSPARENT (0x00000055) /* R-XUV */ +#define NV_DPCD14_PHY_REPEATER_MODE_VAL_NON_TRANSPARENT (0x000000AA) /* R-XUV */ + +#define NV_DPCD14_MAX_LANE_COUNT_PHY_REPEATER (0x000F0004) /* R-XUR */ +#define NV_DPCD14_MAX_LANE_COUNT_PHY_REPEATER_VAL 4:0 /* R-XUF */ + +#define NV_DPCD14_PHY_REPEATER_EXTENDED_WAKE_TIMEOUT (0x000F0005) /* RWXUR */ +#define NV_DPCD14_PHY_REPEATER_EXTENDED_WAKE_TIMEOUT_REQ 6:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EXTENDED_WAKE_TIMEOUT_GRANT 7:7 /* RWXUF */ + +#define NV_DPCD14_PHY_REPEATER_MAIN_LINK_CHANNEL_CODING (0x000F0006) /* RWXUR */ +#define NV_DPCD14_PHY_REPEATER_MAIN_LINK_CHANNEL_CODING_128B_132B_SUPPORTED 0:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_MAIN_LINK_CHANNEL_CODING_128B_132B_SUPPORTED_NO (0x00000000) /* RWXUF */ +#define NV_DPCD14_PHY_REPEATER_MAIN_LINK_CHANNEL_CODING_128B_132B_SUPPORTED_YES (0x00000001) /* RWXUF */ + +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES (0x000F0007) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_10G_SUPPORTED 0:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_10G_SUPPORTED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_10G_SUPPORTED_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_20G_SUPPORTED 1:1 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_20G_SUPPORTED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_20G_SUPPORTED_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_13_5G_SUPPORTED 2:2 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_13_5G_SUPPORTED_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_128B_132B_RATES_13_5G_SUPPORTED_YES (0x00000001) /* R-XUF */ + +#define NV_DPCD14_PHY_REPEATER_EQ_DONE (0x000F0008) /* R-XUR */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR(i) (i):(i) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_0 0:0 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_0_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_0_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_1 1:1 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_1_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_1_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_2 2:2 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_2_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_2_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_3 3:3 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_3_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_3_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_4 4:4 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_4_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_4_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_5 5:5 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_5_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_5_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_6 6:6 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_6_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_6_YES (0x00000001) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_7 7:7 /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_7_NO (0x00000000) /* R-XUF */ +#define NV_DPCD14_PHY_REPEATER_EQ_DONE_LTTPR_7_YES (0x00000001) /* R-XUF */ + + +#define NV_DPCD14_PHY_REPEATER_START(i) (0x000F0010+(i)*0x50) /* RW-1A */ +#define NV_DPCD14_PHY_REPEATER_START__SIZE 8 /* R---S */ +// Following defines are offsets +#define NV_DPCD14_TRAINING_PATTERN_SET_PHY_REPEATER (0x00000000) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE0_SET_PHY_REPEATER (0x00000001) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE1_SET_PHY_REPEATER (0x00000002) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE2_SET_PHY_REPEATER (0x00000003) /* RWXUV */ +#define NV_DPCD14_TRAINING_LANE3_SET_PHY_REPEATER (0x00000004) /* RWXUV */ + +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER (0x00000010) /* R-XUR */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL 6:0 /* R-XUF */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_4MS (0x00000001) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_8MS (0x00000002) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_12MS (0x00000003) /* R-XUV */ +#define NV_DPCD14_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER_VAL_16MS (0x00000004) /* R-XUV */ + +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER (0x00000011) /* R-XUR */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_VOLTAGE_SWING_3 0:0 /* R-XUF */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_VOLTAGE_SWING_3_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_VOLTAGE_SWING_3_YES (0x00000001) /* R-XUV */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_PRE_EMPHASIS_3 1:1 /* R-XUF */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_PRE_EMPHASIS_3_NO (0x00000000) /* R-XUV */ +#define NV_DPCD14_TRANSMITTER_CAP_PHY_REPEATER_PRE_EMPHASIS_3_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD14_LANE0_1_STATUS_PHY_REPEATER (0x00000020) /* R-XUR */ +#define NV_DPCD14_LANE2_3_STATUS_PHY_REPEATER (0x00000021) /* R-XUR */ +#define NV_DPCD14_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER (0x00000022) /* R-XUR */ +#define NV_DPCD14_ADJUST_REQUEST_LANE0_1_PHY_REPEATER (0x00000023) /* R-XUR */ +#define NV_DPCD14_ADJUST_REQUEST_LANE2_3_PHY_REPEATER (0x00000024) /* R-XUR */ + +#endif // #ifndef _DISPLAYPORT14_H_ + diff --git a/src/common/inc/displayport/dpcd20.h b/src/common/inc/displayport/dpcd20.h new file mode 100644 index 000000000..10e759f88 --- /dev/null +++ b/src/common/inc/displayport/dpcd20.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define NV_DPCD20_DSC_SUPPORT (0x00000060) /* R-XUR */ +#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_SUPPORT 1:1 /* R-XUF */ +#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_SUPPORT_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_SUPPORT_YES (0x00000001) /* R-XUV */ + +#define NV_DPCD20_DSC_PASS_THROUGH (0x00000160) /* R-XUR */ +#define NV_DPCD20_DSC_PASS_THROUGH_ENABLE 1:1 /* R-XUF */ +#define NV_DPCD20_DSC_PASS_THROUGH_ENABLE_NO (0x00000000) /* R-XUV */ +#define NV_DPCD20_DSC_PASS_THROUGH_ENABLE_YES (0x00000001) /* R-XUV */ + +// PANEL REPLAY RELATED DPCD +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY (0x000000B0) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED 0:0 +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_YES (0x00000001) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE 1:1 +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE_YES (0x00000001) + +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION (0x000001B0) +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE 0:0 +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_NO (0x00000000) +#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_YES (0x00000001) diff --git a/src/common/inc/hdmi_spec.h b/src/common/inc/hdmi_spec.h new file mode 100644 index 000000000..9371c1c4c --- /dev/null +++ b/src/common/inc/hdmi_spec.h @@ -0,0 +1,86 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _HDMI_SPEC_H_ +#define _HDMI_SPEC_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: HDMI_SPEC.H * +* Defines Common HDMI flags * +* * +\***************************************************************************/ + +/* +* RM will be moving to separate packet types for DP and HDMI +* since the SDP packet type differ between HDMI and DP. Going forward +* clients are expected to use the respective packet type. Once all the +* clients move to the new data types, we can remove the redundant +* PACKET_TYPE definition. +*/ + + +typedef enum +{ + pktType_AudioClkRegeneration = 0x01, + pktType_GeneralControl = 0x03, + pktType_GamutMetadata = 0x0a, + pktType_SRInfoFrame = 0x7f, // Self refresh infoframe for eDP enter/exit self refresh, SRS 1698 + pktType_Cea861BInfoFrame = 0x80, + pktType_VendorSpecInfoFrame = 0x81, + pktType_AviInfoFrame = 0x82, + pktType_AudioInfoFrame = 0x84, + pktType_SrcProdDescInfoFrame = 0x83, + pktType_MpegSrcInfoFrame = 0x85, + pktType_DynamicRangeMasteringInfoFrame = 0x87 +} PACKET_TYPE; + +typedef enum +{ + hdmi_pktType_AudioClkRegeneration = 0x01, + hdmi_pktType_GeneralControl = 0x03, + hdmi_pktType_GamutMetadata = 0x0a, + hdmi_pktType_ExtendedMetadata = 0x7f, + hdmi_pktType_Cea861BInfoFrame = 0x80, + hdmi_pktType_VendorSpecInfoFrame = 0x81, + hdmi_pktType_AviInfoFrame = 0x82, + hdmi_pktType_AudioInfoFrame = 0x84, + hdmi_pktType_SrcProdDescInfoFrame = 0x83, + hdmi_pktType_MpegSrcInfoFrame = 0x85, + hdmi_pktType_DynamicRangeMasteringInfoFrame = 0x87 +} HDMI_PACKET_TYPE; + + +#define HDMI_PKT_HDR_SIZE 3 + +#define HDMI_PKT_AVI_NUM_DBYTES 14 +#define HDMI_PKT_AUDIO_NUM_DBYTES 11 +#define HDMI_PKT_GENCTRL_NUM_DBYTES 7 +#define HDMI_PKT_ACR_NUM_DBYTES 7 +#define HDMI_PKT_GAMUT_METADATA_NUM_DBYTES 28 +#define HDMI_PKT_VS_MAX_NUM_DBYTES 28 + +#define HDMI_GENCTRL_PACKET_MUTE_ENABLE 0x01 +#define HDMI_GENCTRL_PACKET_MUTE_DISABLE 0x10 + +#endif // #ifndef _HDMI_SPEC_H_ diff --git a/src/common/inc/nvBinSegment.h b/src/common/inc/nvBinSegment.h new file mode 100644 index 000000000..1a8755107 --- /dev/null +++ b/src/common/inc/nvBinSegment.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVBINSEGMENT_H +#define NVBINSEGMENT_H + +#define PUSH_SEGMENTS +#define POP_SEGMENTS +#define CODE_SEGMENT(__seg) +#define DATA_SEGMENT(__seg) +#define BSS_SEGMENT(__seg) +#define CONS_SEGMENT(__seg) +#define PAGE_SEGMENT +#define NONPAGE_SEGMENT + +#endif // NVBINSEGMENT_H diff --git a/src/common/inc/nvBldVer.h b/src/common/inc/nvBldVer.h new file mode 100644 index 000000000..0979ac51b --- /dev/null +++ b/src/common/inc/nvBldVer.h @@ -0,0 +1,72 @@ +#ifndef _NVBLDVER_H_ +#define _NVBLDVER_H_ + +#ifndef NVBLDVER_STRINGIZE +#define NVBLDVER_STRINGIZE(t) #t +#endif +#ifndef STRINGIZE +#define STRINGIZE(t) NVBLDVER_STRINGIZE(t) +#endif + +// These variables can be overridden using ENV vars, see nvCommon.nvmk. +// If no env vars are set, then the defaults seen here will be used. +// In DVS builds, the ENV vars are used to control these values. +// Note- the value of NV_BUILD_CL and NV_BUILD_TYPE_NON_BM is only used in +// non-buildmeister builds, see override section below. +// DVS_SW_CHANGELIST has been added to ENV vars in bug 1486673 +#ifndef DVS_SW_CHANGELIST + #define DVS_SW_CHANGELIST 0 +#endif +#ifndef NV_BUILD_CL + #define NV_BUILD_CL (DVS_SW_CHANGELIST) +#endif +#if NV_BUILD_CL == 0 + #define NV_BUILD_CL (DVS_SW_CHANGELIST) +#endif +#ifndef NV_BUILD_TYPE_NON_BM + #define NV_BUILD_TYPE_NON_BM Private +#endif +#ifndef NV_BUILD_AUTHOR + #define NV_BUILD_AUTHOR unknown +#endif +// End ENV var section + + +// The values of the following strings are set via a buildmeister python script, +// and then checked back in. You cannot make changes to these sections without +// corresponding changes to the buildmeister script +#ifndef NV_BUILD_BRANCH + #define NV_BUILD_BRANCH r515_95 +#endif +#ifndef NV_PUBLIC_BRANCH + #define NV_PUBLIC_BRANCH r515_95 +#endif + +#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) +#define NV_BUILD_BRANCH_VERSION "rel/gpu_drv/r515/r515_95-155" +#define NV_BUILD_CHANGELIST_NUM (31261195) +#define NV_BUILD_TYPE "Official" +#define NV_BUILD_NAME "rel/gpu_drv/r515/r515_95-155" +#define NV_LAST_OFFICIAL_CHANGELIST_NUM (31261195) + +#else /* Windows builds */ +#define NV_BUILD_BRANCH_VERSION "r515_95-3" +#define NV_BUILD_CHANGELIST_NUM (31249857) +#define NV_BUILD_TYPE "Official" +#define NV_BUILD_NAME "516.01" +#define NV_LAST_OFFICIAL_CHANGELIST_NUM (31249857) +#endif +// End buildmeister python edited section + +// A few of the values are defined differently for non-buildmeister builds, +// this section redefines those defines +#ifndef NV_BUILDMEISTER_BLD + #undef NV_BUILD_TYPE + #define NV_BUILD_TYPE STRINGIZE(NV_BUILD_TYPE_NON_BM) + #undef NV_BUILD_CHANGELIST_NUM + #define NV_BUILD_CHANGELIST_NUM NV_BUILD_CL +#endif + +#define NV_DISPLAY_DRIVER_TITLE NV_BUILD_TYPE " " STRINGIZE(NV_BUILD_BRANCH) " " NV_BUILD_NAME " " STRINGIZE(NV_BUILD_AUTHOR) + +#endif diff --git a/src/common/inc/nvCpuUuid.h b/src/common/inc/nvCpuUuid.h new file mode 100644 index 000000000..0ab546b7c --- /dev/null +++ b/src/common/inc/nvCpuUuid.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CPU_UUID_H_ +#define _NV_CPU_UUID_H_ + +#define NV_UUID_LEN 16 + +typedef struct nv_uuid +{ + NvU8 uuid[NV_UUID_LEN]; + +} NvUuid; + +#define NV_UUID_HI(pUuid) (*((NvU64*)((pUuid)->uuid + (NV_UUID_LEN >> 1)))) +#define NV_UUID_LO(pUuid) (*((NvU64*)((pUuid)->uuid + 0))) + +typedef NvUuid NvSystemUuid; + +typedef NvUuid NvProcessorUuid; + +extern const NvProcessorUuid NV_PROCESSOR_UUID_CPU_DEFAULT; + +#endif // _NV_CPU_UUID_H_ diff --git a/src/common/inc/nvHdmiFrlCommon.h b/src/common/inc/nvHdmiFrlCommon.h new file mode 100644 index 000000000..8c4d41626 --- /dev/null +++ b/src/common/inc/nvHdmiFrlCommon.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** @file nvHdmiFrlCommon.h + * @brief This file defines data needed for and returned by HDMI 2.1 spec FRL calculations + * It meant to be a spec layer within HDMI lib, without carrying any + * driver/hw related information + */ + +#ifndef _NVHDMIFRLCOMMON_H_ +#define _NVHDMIFRLCOMMON_H_ + +#include "nvmisc.h" + +//****************************************************************************** +// Constants/Structures +//****************************************************************************** +#define MAX_RECONSTRUCTED_HACTIVE_PIXELS 2720 + +// HDMI_BPC: Bits per component enums. +typedef enum tagHDMI_BPC +{ + HDMI_BPC8 = 8, + HDMI_BPC10 = 10, + HDMI_BPC12 = 12, + HDMI_BPC16 = 16 +} HDMI_BPC; + +// HDMI_PIXEL_PACKING: Pixel packing type enums +typedef enum tagHDMI_PIXEL_PACKING +{ + HDMI_PIXEL_PACKING_RGB = 0, + HDMI_PIXEL_PACKING_YCbCr444, + HDMI_PIXEL_PACKING_YCbCr422, + HDMI_PIXEL_PACKING_YCbCr420 +} HDMI_PIXEL_PACKING; + +// HDMI_FRL_DATA_RATE: FRL mode enums +typedef enum tagHDMI_FRL_DATA_RATE +{ + HDMI_FRL_DATA_RATE_NONE, + HDMI_FRL_DATA_RATE_3LANES_3GBPS, + HDMI_FRL_DATA_RATE_3LANES_6GBPS, + HDMI_FRL_DATA_RATE_4LANES_6GBPS, + HDMI_FRL_DATA_RATE_4LANES_8GBPS, + HDMI_FRL_DATA_RATE_4LANES_10GBPS, + HDMI_FRL_DATA_RATE_4LANES_12GBPS, + HDMI_FRL_DATA_RATE_UNSPECIFIED +} HDMI_FRL_DATA_RATE; + +typedef enum tagAUDIO_PKTTYPE +{ + AUDIO_PKTTYPE_LPCM_SAMPLE = 0, + AUDIO_PKTTYPE_ONE_BIT_LPCM_SAMPLE, + AUDIO_PKTTYPE_DST_AUDIO, + AUDIO_PKTTYPE_HBR_AUDIO, + AUDIO_PKTTYPE_MULTI_STREAM_AUDIO, + AUDIO_PKTTYPE_ONE_BIT_MULTI_STREAM_AUDIO, + AUDIO_PKTTYPE_3D_AUDIO, + AUDIO_PKTTYPE_ONE_BIT_3D_AUDIO, + NO_AUDIO +} AUDIO_PKTTYPE; + +typedef struct tagFRL_CAPACITY_COMPUTATION_PARAMS +{ + NvU32 numLanes; + NvU32 frlBitRateGbps; + NvU32 pclk10KHz; + NvU32 hTotal; + NvU32 hActive; + NvU32 bpc; + HDMI_PIXEL_PACKING pixelPacking; + AUDIO_PKTTYPE audioType; + NvU32 numAudioChannels; + NvU32 audioFreqKHz; + + struct + { + NvU32 bppTargetx16; + NvU32 hSlices; + NvU32 sliceWidth; + NvU32 dscTotalChunkKBytes; + } compressionInfo; + +} FRL_CAPACITY_COMPUTATION_PARAMS; + +typedef struct tagFRL_COMPUTATION_RESULT +{ + HDMI_FRL_DATA_RATE frlRate; + NvU32 bppTargetx16; + + NvBool engageCompression; + NvBool isAudioSupported; + NvBool dataFlowDisparityReqMet; + NvBool dataFlowMeteringReqMet; + NvBool isVideoTransportSupported; + NvU32 triBytesBorrowed; // uncompressed mode: num of active Tri-bytes to be transmitted at HBlank + NvU32 hcActiveBytes; // compressed mode: num of FRL character bytes in active region + NvU32 hcActiveTriBytes; // compressed mode: num of FRL tri-bytes in active region + NvU32 hcBlankTriBytes; // compressed mode: num of FRL tri-bytes in blanking region + NvU32 tBlankToTTotalX1k; // compressed mode: ratio of time spent on blanking to the total line time +} FRL_COMPUTATION_RESULT; + +typedef struct tagFRL_PRE_CALC_CONFIG +{ + NvU32 vic; + HDMI_PIXEL_PACKING packing; + HDMI_BPC bpc; + HDMI_FRL_DATA_RATE frlRate; + NvU32 bppX16; + NvBool bCompressedMode; +} FRL_PRE_CALC_CONFIG; + +#endif // _NVHDMIFRLCOMMON_H_ diff --git a/src/common/inc/nvPNPVendorIds.h b/src/common/inc/nvPNPVendorIds.h new file mode 100644 index 000000000..97cf29771 --- /dev/null +++ b/src/common/inc/nvPNPVendorIds.h @@ -0,0 +1,556 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. +*/ +/* + * This header file contains the 3-character Plug and Play Vendor IDs and + * their translation into Vendor names. + * + * If the includer defines NV_PNP_VENDOR_IDS_USE_TCHAR, then + * PNPVendorID::vendorName will have type const TCHAR*; otherwise, it will have + * type const char*. + * + * References: + * http://www.uefi.org/pnp_id_list + * + */ + +#ifndef __NV_PNP_VENDOR_IDS_H__ +#define __NV_PNP_VENDOR_IDS_H__ + +#if defined(NV_PNP_VENDOR_IDS_USE_TCHAR) + #define _VENDOR_NAME_TYPE const TCHAR + #define _VENDOR_NAME_ENTRY(x) _T(x) +#else + #define _VENDOR_NAME_TYPE const char + #define _VENDOR_NAME_ENTRY(x) (x) +#endif + +typedef struct tagPNPVendorID +{ + char vendorId[4]; // PNP Vendor ID (example: "SNY") + _VENDOR_NAME_TYPE* vendorName; // Vendor name for display (example: "Sony") +} PNPVendorId; + + +/* + * The PNPVendorIds[] table maps between the 3-character Plug and + * Play Vendor Identifiers and user-friendly vendor names + */ +static const PNPVendorId PNPVendorIds[] = +{ + { "___", _VENDOR_NAME_ENTRY("Targa") }, + { "@@@", _VENDOR_NAME_ENTRY("Sangyo") }, + + { "AAC", _VENDOR_NAME_ENTRY("Acer") }, + { "ABC", _VENDOR_NAME_ENTRY("AboCom System Inc") }, + { "ABP", _VENDOR_NAME_ENTRY("Advanced System Products") }, + { "ACE", _VENDOR_NAME_ENTRY("ACME") }, + { "ACC", _VENDOR_NAME_ENTRY("ACCTON") }, + { "ACI", _VENDOR_NAME_ENTRY("Ancor Communications Inc") }, + { "ACK", _VENDOR_NAME_ENTRY("ACKSYS") }, + { "ACN", _VENDOR_NAME_ENTRY("ACON") }, + { "ACR", _VENDOR_NAME_ENTRY("Acer") }, + { "ACS", _VENDOR_NAME_ENTRY("Altos/ACS") }, + { "ACT", _VENDOR_NAME_ENTRY("Actebis/Targa") }, + { "ADI", _VENDOR_NAME_ENTRY("ADI") }, + { "ADP", _VENDOR_NAME_ENTRY("Adaptec") }, + { "ADT", _VENDOR_NAME_ENTRY("ADTEK") }, + { "ADV", _VENDOR_NAME_ENTRY("AMD") }, + { "ADX", _VENDOR_NAME_ENTRY("ADAX") }, + { "AEI", _VENDOR_NAME_ENTRY("AIR") }, + { "AEM", _VENDOR_NAME_ENTRY("AEM") }, + { "AEO", _VENDOR_NAME_ENTRY("UHC") }, + { "AGI", _VENDOR_NAME_ENTRY("Artish Graphics") }, + { "AKB", _VENDOR_NAME_ENTRY("Akebia") }, + { "AIC", _VENDOR_NAME_ENTRY("Arnos Instruments") }, + { "AIR", _VENDOR_NAME_ENTRY("Advanced Integrated Research") }, + { "AKB", _VENDOR_NAME_ENTRY("Akebia") }, + { "ALA", _VENDOR_NAME_ENTRY("Alacron") }, + { "ALR", _VENDOR_NAME_ENTRY("Advanced Logic Research") }, + { "AMC", _VENDOR_NAME_ENTRY("Attachmate") }, + { "AMD", _VENDOR_NAME_ENTRY("Amdek") }, + { "AMI", _VENDOR_NAME_ENTRY("American Megatrends") }, + { "AMP", _VENDOR_NAME_ENTRY("Amptron") }, + { "AMT", _VENDOR_NAME_ENTRY("Amtrans") }, + { "ANC", _VENDOR_NAME_ENTRY("Ancot") }, + { "ANI", _VENDOR_NAME_ENTRY("Anigma") }, + { "AOC", _VENDOR_NAME_ENTRY("AOC") }, + { "APD", _VENDOR_NAME_ENTRY("Applidata") }, + { "API", _VENDOR_NAME_ENTRY("AcerView") }, + { "APP", _VENDOR_NAME_ENTRY("Apple") }, + { "APS", _VENDOR_NAME_ENTRY("Autologic") }, + { "ARC", _VENDOR_NAME_ENTRY("Alta Research") }, + { "ART", _VENDOR_NAME_ENTRY("ArtMedia") }, + { "ASE", _VENDOR_NAME_ENTRY("ASEM") }, + { "ASI", _VENDOR_NAME_ENTRY("Ahead Systems") }, + { "AST", _VENDOR_NAME_ENTRY("AST Research") }, + { "ASU", _VENDOR_NAME_ENTRY("ASUS") }, + { "ATI", _VENDOR_NAME_ENTRY("Allied Telesis") }, + { "ATO", _VENDOR_NAME_ENTRY("ASTRO DESIGN, INC.") }, + { "ATT", _VENDOR_NAME_ENTRY("AT&T") }, + { "ATX", _VENDOR_NAME_ENTRY("Athenix") }, + { "AUO", _VENDOR_NAME_ENTRY("AU Optronics Corporation") }, + { "AVI", _VENDOR_NAME_ENTRY("AIR") }, + { "AVO", _VENDOR_NAME_ENTRY("Avocent Corporation") }, + { "AZU", _VENDOR_NAME_ENTRY("Azura") }, + + { "BAN", _VENDOR_NAME_ENTRY("Banyan") }, + { "BCC", _VENDOR_NAME_ENTRY("Beaver Computer Corporation") }, + { "BCD", _VENDOR_NAME_ENTRY("Dr. Seufert GmbH") }, + { "BEO", _VENDOR_NAME_ENTRY("Bang & Olufsen") }, + { "BGT", _VENDOR_NAME_ENTRY("Budzetron") }, + { "BMM", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "BNQ", _VENDOR_NAME_ENTRY("BenQ") }, + { "BOE", _VENDOR_NAME_ENTRY("BOE Technology Group Co., Ltd") }, + { "BRG", _VENDOR_NAME_ENTRY("Bridge") }, + { "BTC", _VENDOR_NAME_ENTRY("Bit 3") }, + { "BTE", _VENDOR_NAME_ENTRY("Brilliant Technology") }, + { "BUS", _VENDOR_NAME_ENTRY("BusTek") }, + + { "CAL", _VENDOR_NAME_ENTRY("Acon") }, + { "CCI", _VENDOR_NAME_ENTRY("Cache") }, + { "CCP", _VENDOR_NAME_ENTRY("Epson") }, + { "CDP", _VENDOR_NAME_ENTRY("CalComp") }, + { "CFG", _VENDOR_NAME_ENTRY("Atlantis") }, + { "CHA", _VENDOR_NAME_ENTRY("Chase Research") }, + { "CIP", _VENDOR_NAME_ENTRY("Ciprico") }, + { "CLO", _VENDOR_NAME_ENTRY("Clone Computers/Analogy") }, + { "CLT", _VENDOR_NAME_ENTRY("automated computer control systems")}, + { "CMD", _VENDOR_NAME_ENTRY("CMD Technology") }, + { "CMO", _VENDOR_NAME_ENTRY("Chi Mei Optoelectronics corp.") }, + { "CNI", _VENDOR_NAME_ENTRY("Connect International") }, + { "CNT", _VENDOR_NAME_ENTRY("CNet Technology") }, + { "COM", _VENDOR_NAME_ENTRY("Comtrol") }, + { "CPC", _VENDOR_NAME_ENTRY("Ciprico") }, + { "CPD", _VENDOR_NAME_ENTRY("CompuAdd") }, + { "CPG", _VENDOR_NAME_ENTRY("DFI") }, + { "CPI", _VENDOR_NAME_ENTRY("Computer Peripherals") }, + { "CPL", _VENDOR_NAME_ENTRY("Compal") }, + { "CPQ", _VENDOR_NAME_ENTRY("Compaq") }, + { "CPT", _VENDOR_NAME_ENTRY("cPATH") }, + { "CPX", _VENDOR_NAME_ENTRY("Powermatic Data Systems") }, + { "CRD", _VENDOR_NAME_ENTRY("Cardinal Technologies") }, + { "CRN", _VENDOR_NAME_ENTRY("Cornerstone") }, + { "CRS", _VENDOR_NAME_ENTRY("Cisco") }, + { "CSE", _VENDOR_NAME_ENTRY("Compu Shack") }, + { "CSI", _VENDOR_NAME_ENTRY("Cabletron") }, + { "CSS", _VENDOR_NAME_ENTRY("CSS Laboratories") }, + { "CTN", _VENDOR_NAME_ENTRY("Computone") }, + { "CTX", _VENDOR_NAME_ENTRY("Chuntex/CTX") }, + { "CUB", _VENDOR_NAME_ENTRY("Cubix") }, + { "CUI", _VENDOR_NAME_ENTRY("CUI") }, + { "CYB", _VENDOR_NAME_ENTRY("CyberVision") }, + + { "DBI", _VENDOR_NAME_ENTRY("DigiBoard") }, + { "DBL", _VENDOR_NAME_ENTRY("Doble Engineering") }, + { "DCC", _VENDOR_NAME_ENTRY("Dale Computer") }, + { "DCE", _VENDOR_NAME_ENTRY("Mylex") }, + { "DCM", _VENDOR_NAME_ENTRY("DCM Data Products") }, + { "DEC", _VENDOR_NAME_ENTRY("DEC") }, + { "DEI", _VENDOR_NAME_ENTRY("Deico Electronics") }, + { "DEL", _VENDOR_NAME_ENTRY("Dell") }, + { "DFI", _VENDOR_NAME_ENTRY("DFI") }, + { "DGC", _VENDOR_NAME_ENTRY("Data General") }, + { "DGS", _VENDOR_NAME_ENTRY("Diagsoft") }, + { "DIA", _VENDOR_NAME_ENTRY("Diadem") }, + { "DIO", _VENDOR_NAME_ENTRY("DIO") }, + { "DIS", _VENDOR_NAME_ENTRY("Diseda") }, + { "DIT", _VENDOR_NAME_ENTRY("Dragon Information Technology") }, + { "DLK", _VENDOR_NAME_ENTRY("D-Link") }, + { "DLO", _VENDOR_NAME_ENTRY("Dlodlo Technologies Co., Ltd") }, + { "DMB", _VENDOR_NAME_ENTRY("Digicom Systems") }, + { "DMS", _VENDOR_NAME_ENTRY("DOME imaging systems") }, + { "DNV", _VENDOR_NAME_ENTRY("NexView") }, + { "DOM", _VENDOR_NAME_ENTRY("Dome Imaging Systems") }, + { "DON", _VENDOR_NAME_ENTRY("DENON, Ltd.") }, + { "DPC", _VENDOR_NAME_ENTRY("Delta") }, + { "DPI", _VENDOR_NAME_ENTRY("DocuPoint") }, + { "DPL", _VENDOR_NAME_ENTRY("Digital Projection Limited") }, + { "DPN", _VENDOR_NAME_ENTRY("Shanghai Lexiang Technology Limited") }, + { "DPT", _VENDOR_NAME_ENTRY("DPT") }, + { "DRT", _VENDOR_NAME_ENTRY("Digital Research") }, + { "DSJ", _VENDOR_NAME_ENTRY("VR Technology Holdings Limited") }, + { "DSM", _VENDOR_NAME_ENTRY("DSM Digial Services") }, + { "DTC", _VENDOR_NAME_ENTRY("Data Technology") }, + { "DTI", _VENDOR_NAME_ENTRY("Diversified Technology") }, + { "DTK", _VENDOR_NAME_ENTRY("DTK Computer") }, + { "DTX", _VENDOR_NAME_ENTRY("Data Translation") }, + { "DVC", _VENDOR_NAME_ENTRY("DecaView") }, + { "DWE", _VENDOR_NAME_ENTRY("Daewoo") }, + + { "ECS", _VENDOR_NAME_ENTRY("EliteGroup/ECS") }, + { "ENC", _VENDOR_NAME_ENTRY("Eizo") }, + { "EGO", _VENDOR_NAME_ENTRY("Ergo Electronics") }, + { "EKC", _VENDOR_NAME_ENTRY("Kodak") }, + { "EHJ", _VENDOR_NAME_ENTRY("Epson") }, + { "EIZ", _VENDOR_NAME_ENTRY("Eizo") }, + { "ELI", _VENDOR_NAME_ENTRY("Edsun") }, + { "ELS", _VENDOR_NAME_ENTRY("ELSA") }, + { "ELX", _VENDOR_NAME_ENTRY("Elonex") }, + { "EMC", _VENDOR_NAME_ENTRY("ProView/EMC") }, + { "ENC", _VENDOR_NAME_ENTRY("Eizo") }, + { "EPI", _VENDOR_NAME_ENTRY("Envision") }, + { "EQX", _VENDOR_NAME_ENTRY("Equinox") }, + { "ERG", _VENDOR_NAME_ENTRY("Ergo") }, + { "ERP", _VENDOR_NAME_ENTRY("EURAPLAN") }, + { "ESI", _VENDOR_NAME_ENTRY("Extended Systems") }, + { "ETT", _VENDOR_NAME_ENTRY("E-Tech Research") }, + { "EVX", _VENDOR_NAME_ENTRY("Everex") }, + { "EXP", _VENDOR_NAME_ENTRY("Data Export") }, + + { "FCB", _VENDOR_NAME_ENTRY("Furukawa Electric") }, + { "FCM", _VENDOR_NAME_ENTRY("Funai") }, + { "FCT", _VENDOR_NAME_ENTRY("Free Computer Technology") }, + { "FDC", _VENDOR_NAME_ENTRY("Future Domain") }, + { "FDX", _VENDOR_NAME_ENTRY("Findex, Inc. ") }, + { "FGL", _VENDOR_NAME_ENTRY("Fujitsu") }, + { "FIC", _VENDOR_NAME_ENTRY("First International") }, + { "FOR", _VENDOR_NAME_ENTRY("Formac") }, + { "FOV", _VENDOR_NAME_ENTRY("FOVE INC") }, + { "FRC", _VENDOR_NAME_ENTRY("FORCE Computers") }, + { "FRI", _VENDOR_NAME_ENTRY("Fibernet Research") }, + { "FTN", _VENDOR_NAME_ENTRY("Fountain Technologies") }, + { "FUJ", _VENDOR_NAME_ENTRY("Fujitsu") }, + + { "GAG", _VENDOR_NAME_ENTRY("Gage Applied Sciences") }, + { "GCI", _VENDOR_NAME_ENTRY("Gateway Communications") }, + { "GEN", _VENDOR_NAME_ENTRY("Genesys") }, + { "GMX", _VENDOR_NAME_ENTRY("GMX") }, + { "GRA", _VENDOR_NAME_ENTRY("Graphica") }, + { "GSM", _VENDOR_NAME_ENTRY("LG Electronics") }, + { "GVC", _VENDOR_NAME_ENTRY("GVC") }, + { "GWY", _VENDOR_NAME_ENTRY("Gateway") }, + + { "HCL", _VENDOR_NAME_ENTRY("HCL") }, + { "HCP", _VENDOR_NAME_ENTRY("Hitachi") }, + { "HCW", _VENDOR_NAME_ENTRY("Hauppauge") }, + { "HDL", _VENDOR_NAME_ENTRY("Headland") }, + { "HEC", _VENDOR_NAME_ENTRY("Hisense") }, + { "HEI", _VENDOR_NAME_ENTRY("Hyundai") }, + { "HIT", _VENDOR_NAME_ENTRY("Hitachi/HINT") }, + { "HMX", _VENDOR_NAME_ENTRY("HUMAX Co., Ltd.") }, + { "HSD", _VENDOR_NAME_ENTRY("HannStar Display Corp") }, + { "HSL", _VENDOR_NAME_ENTRY("Hansol") }, + { "HTC", _VENDOR_NAME_ENTRY("Hitachi") }, + { "HVR", _VENDOR_NAME_ENTRY("HTC Corporation") }, + { "HWD", _VENDOR_NAME_ENTRY("HighWater Designs") }, + { "HWP", _VENDOR_NAME_ENTRY("HP") }, + { "HYL", _VENDOR_NAME_ENTRY("Hypereal") }, + { "HYP", _VENDOR_NAME_ENTRY("Hyphen Limited") }, + { "HWV", _VENDOR_NAME_ENTRY("Huawei Technologies Co., Ltd") }, + + { "IBC", _VENDOR_NAME_ENTRY("IBS") }, + { "IBM", _VENDOR_NAME_ENTRY("IBM") }, + { "ICC", _VENDOR_NAME_ENTRY("BICC Data Networks") }, + { "ICL", _VENDOR_NAME_ENTRY("Fujitsu/ICL") }, + { "ICN", _VENDOR_NAME_ENTRY("Sanyo/Icon") }, + { "ICU", _VENDOR_NAME_ENTRY("Intel") }, + { "IDS", _VENDOR_NAME_ENTRY("Intellistor") }, + { "IFT", _VENDOR_NAME_ENTRY("Informtech") }, + { "IGM", _VENDOR_NAME_ENTRY("IGM Communications") }, + { "III", _VENDOR_NAME_ENTRY("Intelligent Instrumentation") }, + { "IIN", _VENDOR_NAME_ENTRY("Intel") }, + { "IMA", _VENDOR_NAME_ENTRY("Imagraph") }, + { "IMC", _VENDOR_NAME_ENTRY("IMC Networks") }, + { "IMP", _VENDOR_NAME_ENTRY("Impression") }, + { "INF", _VENDOR_NAME_ENTRY("Inframetrics") }, + { "INL", _VENDOR_NAME_ENTRY("InnoLux Display Corporation") }, + { "INP", _VENDOR_NAME_ENTRY("Interphase") }, + { "INS", _VENDOR_NAME_ENTRY("Ines") }, + { "INT", _VENDOR_NAME_ENTRY("Intel") }, + { "IOD", _VENDOR_NAME_ENTRY("IODATA") }, + { "ISA", _VENDOR_NAME_ENTRY("ISA") }, + { "ISI", _VENDOR_NAME_ENTRY("Interface Solutions") }, + { "ISL", _VENDOR_NAME_ENTRY("Isolation Systems") }, + { "ITA", _VENDOR_NAME_ENTRY("Itausa") }, + { "ITC", _VENDOR_NAME_ENTRY("ITK") }, + { "ITN", _VENDOR_NAME_ENTRY("NTI Group/ASUS") }, + { "ITK", _VENDOR_NAME_ENTRY("NTI Group") }, + { "IVK", _VENDOR_NAME_ENTRY("Iiyama") }, + { "IVM", _VENDOR_NAME_ENTRY("Idek Iiyama") }, + { "IVR", _VENDOR_NAME_ENTRY("Inlife-Handnet Co., Ltd.") }, + { "IWR", _VENDOR_NAME_ENTRY("Icuiti Corporation") }, + + { "JDI", _VENDOR_NAME_ENTRY("Japan Display Inc") }, + { "JEN", _VENDOR_NAME_ENTRY("Jean") }, + { "JKC", _VENDOR_NAME_ENTRY("JVC Kenwood Corporation") }, + { "JVC", _VENDOR_NAME_ENTRY("JVC") }, + + { "KDS", _VENDOR_NAME_ENTRY("Korea Data Systems") }, + { "KDK", _VENDOR_NAME_ENTRY("Kodiak") }, + { "KES", _VENDOR_NAME_ENTRY("Kesa Crop") }, + { "KFC", _VENDOR_NAME_ENTRY("KFC Computek") }, + { "KPC", _VENDOR_NAME_ENTRY("King Phoenix") }, + { "KSC", _VENDOR_NAME_ENTRY("Kinetic Systems") }, + { "KTC", _VENDOR_NAME_ENTRY("Kingston Technology") }, + { "KTG", _VENDOR_NAME_ENTRY("KayserThrede") }, + { "KTR", _VENDOR_NAME_ENTRY("IMRI") }, + { "KYC", _VENDOR_NAME_ENTRY("Kyocera") }, + + { "LAG", _VENDOR_NAME_ENTRY("Laguna Systems") }, + { "LCD", _VENDOR_NAME_ENTRY("Toshiba Matsushita Display Technology Co., Ltd")}, + { "LCS", _VENDOR_NAME_ENTRY("Longshine Electronics") }, + { "LEF", _VENDOR_NAME_ENTRY("Leaf Systems") }, + { "LEN", _VENDOR_NAME_ENTRY("Lenovo Group Limited") }, + { "LGE", _VENDOR_NAME_ENTRY("LG Electronics") }, + { "LKM", _VENDOR_NAME_ENTRY("Likom/LKM") }, + { "LNK", _VENDOR_NAME_ENTRY("Link Technologies") }, + { "LTI", _VENDOR_NAME_ENTRY("Longshine") }, + { "LTN", _VENDOR_NAME_ENTRY("Lite-On") }, + + { "MAG", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "MAX", _VENDOR_NAME_ENTRY("Maxdata/Belinea") }, + { "MAY", _VENDOR_NAME_ENTRY("Maynard Electronics") }, + { "MBC", _VENDOR_NAME_ENTRY("MBC") }, + { "MCC", _VENDOR_NAME_ENTRY("MCCI") }, + { "MCD", _VENDOR_NAME_ENTRY("McDATA") }, + { "MCI", _VENDOR_NAME_ENTRY("Micronics") }, + { "MCR", _VENDOR_NAME_ENTRY("Marina Communications") }, + { "MCS", _VENDOR_NAME_ENTRY("Micro Computer Systems") }, + { "MCT", _VENDOR_NAME_ENTRY("Microtec") }, + { "MDD", _VENDOR_NAME_ENTRY("Modis") }, + { "MDG", _VENDOR_NAME_ENTRY("Madge Networks") }, + { "MDS", _VENDOR_NAME_ENTRY("Micro Display Systems") }, + { "MDT", _VENDOR_NAME_ENTRY("Magus Data") }, + { "MED", _VENDOR_NAME_ENTRY("Medion") }, + { "MEI", _VENDOR_NAME_ENTRY("Panasonic") }, + { "MEL", _VENDOR_NAME_ENTRY("Mitsubishi") }, + { "MET", _VENDOR_NAME_ENTRY("Metheus") }, + { "MFG", _VENDOR_NAME_ENTRY("Microfield Graphics") }, + { "MGC", _VENDOR_NAME_ENTRY("CompuAdd") }, + { "MGT", _VENDOR_NAME_ENTRY("Megatech") }, + { "MIC", _VENDOR_NAME_ENTRY("Micronics") }, + { "MIR", _VENDOR_NAME_ENTRY("Miro") }, + { "MJI", _VENDOR_NAME_ENTRY("MARANTZ JAPAN, INC.") }, + { "MLX", _VENDOR_NAME_ENTRY("Mylex") }, + { "MMX", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "MOR", _VENDOR_NAME_ENTRY("Morse Technology") }, + { "MSI", _VENDOR_NAME_ENTRY("Microstep") }, + { "MSV", _VENDOR_NAME_ENTRY("Mosgi") }, + { "MTC", _VENDOR_NAME_ENTRY("Mitac") }, + { "MTI", _VENDOR_NAME_ENTRY("Morse Technology") }, + { "MTQ", _VENDOR_NAME_ENTRY("Mountain Computer") }, + { "MTS", _VENDOR_NAME_ENTRY("Multi-Tech Systems") }, + { "MTX", _VENDOR_NAME_ENTRY("Matrox") }, + { "MVD", _VENDOR_NAME_ENTRY("Microvitec PLC") }, + { "MVN", _VENDOR_NAME_ENTRY("META COMPANY") }, + { "MWY", _VENDOR_NAME_ENTRY("Microway") }, + { "MYA", _VENDOR_NAME_ENTRY("Monydata") }, + { "MYL", _VENDOR_NAME_ENTRY("Mylex") }, + { "MYX", _VENDOR_NAME_ENTRY("Micronyx") }, + { "MZI", _VENDOR_NAME_ENTRY("Mozo") }, + + { "NAN", _VENDOR_NAME_ENTRY("Nanao") }, + { "NCA", _VENDOR_NAME_ENTRY("Siemens Nixdorf") }, + { "NCD", _VENDOR_NAME_ENTRY("NCD") }, + { "NCS", _VENDOR_NAME_ENTRY("Northgate") }, + { "NDC", _VENDOR_NAME_ENTRY("National DataComm") }, + { "NDS", _VENDOR_NAME_ENTRY("Nokia") }, + { "NEC", _VENDOR_NAME_ENTRY("NEC") }, + { "NIC", _VENDOR_NAME_ENTRY("National Instruments") }, + { "NIT", _VENDOR_NAME_ENTRY("Network Info Technology") }, + { "NOK", _VENDOR_NAME_ENTRY("Nokia") }, + { "NPI", _VENDOR_NAME_ENTRY("Network Peripherals") }, + { "NSC", _VENDOR_NAME_ENTRY("National Semiconductor") }, + { "NSS", _VENDOR_NAME_ENTRY("Newport Systems") }, + { "NTI", _VENDOR_NAME_ENTRY("New Tech") }, + { "NVD", _VENDOR_NAME_ENTRY("NVIDIA") }, + { "NVL", _VENDOR_NAME_ENTRY("Novell") }, + { "NXG", _VENDOR_NAME_ENTRY("Nexgen") }, + + { "OAS", _VENDOR_NAME_ENTRY("OAsys") }, + { "OCN", _VENDOR_NAME_ENTRY("Olfan") }, + { "OEC", _VENDOR_NAME_ENTRY("Daytek") }, + { "OLC", _VENDOR_NAME_ENTRY("Olicom") }, + { "OLI", _VENDOR_NAME_ENTRY("Olivetti") }, + { "OKI", _VENDOR_NAME_ENTRY("OKI Electric Industrial Company Ltd") }, + { "ONK", _VENDOR_NAME_ENTRY("ONKYO Corporation") }, + { "OPT", _VENDOR_NAME_ENTRY("OPTi") }, + { "OQI", _VENDOR_NAME_ENTRY("Optiquest") }, + { "OTI", _VENDOR_NAME_ENTRY("Orchid Technology") }, + { "OVR", _VENDOR_NAME_ENTRY("Oculus VR Inc.") }, + { "OZO", _VENDOR_NAME_ENTRY("Zoom Telephonics") }, + + { "PAR", _VENDOR_NAME_ENTRY("Parallan Comp Inc") }, + { "PBE", _VENDOR_NAME_ENTRY("Packard Bell") }, + { "PBI", _VENDOR_NAME_ENTRY("Pitney Bowes") }, + { "PBN", _VENDOR_NAME_ENTRY("Packard Bell") }, + { "PCI", _VENDOR_NAME_ENTRY("Pioneer Computer") }, + { "PCP", _VENDOR_NAME_ENTRY("Procomp") }, + { "PDR", _VENDOR_NAME_ENTRY("Pure Data") }, + { "PEA", _VENDOR_NAME_ENTRY("Peacock") }, + { "PGS", _VENDOR_NAME_ENTRY("Princeton Graphics") }, + { "PHI", _VENDOR_NAME_ENTRY("Phillips") }, + { "PHL", _VENDOR_NAME_ENTRY("Philips") }, + { "PIO", _VENDOR_NAME_ENTRY("Pioneer Electronic Corporation") }, + { "PI0", _VENDOR_NAME_ENTRY("Pioneer") }, + { "PIR", _VENDOR_NAME_ENTRY("Pico Technology Inc") }, + { "PJD", _VENDOR_NAME_ENTRY("Projectiondesign AS") }, + { "PLB", _VENDOR_NAME_ENTRY("PLB") }, + { "PLX", _VENDOR_NAME_ENTRY("Ocean Office Automation") }, + { "PMC", _VENDOR_NAME_ENTRY("PMC Consumer Electronics") }, + { "PMV", _VENDOR_NAME_ENTRY("MAG Technology") }, + { "PNR", _VENDOR_NAME_ENTRY("Planar Systems, Inc.") }, + { "PRO", _VENDOR_NAME_ENTRY("Proteon") }, + { "PSI", _VENDOR_NAME_ENTRY("PSI Perceptive Solutions") }, + { "PTS", _VENDOR_NAME_ENTRY("ProView/EMC/PTS") }, + { "PVR", _VENDOR_NAME_ENTRY("Pimax Tech Co., Ltd") }, + + { "QDI", _VENDOR_NAME_ENTRY("Quantum Data Incorporated") }, + { "QDM", _VENDOR_NAME_ENTRY("Quadram") }, + { "QTD", _VENDOR_NAME_ENTRY("Quantum 3D Inc") }, + { "QTM", _VENDOR_NAME_ENTRY("Quantum") }, + + { "RAC", _VENDOR_NAME_ENTRY("Racore Computer Products") }, + { "RCE", _VENDOR_NAME_ENTRY("RCE") }, + { "RCI", _VENDOR_NAME_ENTRY("RC International") }, + { "REL", _VENDOR_NAME_ENTRY("Relisys") }, + { "REM", _VENDOR_NAME_ENTRY("REM") }, + { "RII", _VENDOR_NAME_ENTRY("Racal Interlan") }, + { "RMP", _VENDOR_NAME_ENTRY("Research Machines") }, + { "ROK", _VENDOR_NAME_ENTRY("Rockwell") }, + { "RTI", _VENDOR_NAME_ENTRY("Rancho Technology") }, + { "RUN", _VENDOR_NAME_ENTRY("RUNCO International") }, + + { "SAM", _VENDOR_NAME_ENTRY("Samsung") }, + { "SAN", _VENDOR_NAME_ENTRY("Sanyo Electric Co.,Ltd.") }, + { "SCC", _VENDOR_NAME_ENTRY("SORD") }, + { "SCD", _VENDOR_NAME_ENTRY("Sanyo") }, + { "SDI", _VENDOR_NAME_ENTRY("Samtron/Sigma Designs") }, + { "SDT", _VENDOR_NAME_ENTRY("Siemens AG") }, + { "SEA", _VENDOR_NAME_ENTRY("Segate") }, + { "SEC", _VENDOR_NAME_ENTRY("Seiko/Epson") }, + { "SEN", _VENDOR_NAME_ENTRY("Sencore") }, + { "SGT", _VENDOR_NAME_ENTRY("Stargate Technology/AT&T") }, + { "SGX", _VENDOR_NAME_ENTRY("SGI") }, + { "SHP", _VENDOR_NAME_ENTRY("Sharp") }, + { "SIB", _VENDOR_NAME_ENTRY("Sanyo") }, + { "SIE", _VENDOR_NAME_ENTRY("Siemens Nixdorf") }, + { "SII", _VENDOR_NAME_ENTRY("Silicon Image, Inc.") }, + { "SIS", _VENDOR_NAME_ENTRY("SiS/Modula Tech") }, + { "SIT", _VENDOR_NAME_ENTRY("Sitintel") }, + { "SIX", _VENDOR_NAME_ENTRY("Zuniq Data") }, + { "SKD", _VENDOR_NAME_ENTRY("Schneider & Koch") }, + { "SKW", _VENDOR_NAME_ENTRY("Skyworth") }, + { "SKY", _VENDOR_NAME_ENTRY("SKYDATA S.P.A.") }, + { "SLB", _VENDOR_NAME_ENTRY("Shlumberger Ltd") }, + { "SLT", _VENDOR_NAME_ENTRY("Salt Internatioinal Corp.") }, + { "SLX", _VENDOR_NAME_ENTRY("Specialix") }, + { "SMC", _VENDOR_NAME_ENTRY("Standard Microsystems") }, + { "SMI", _VENDOR_NAME_ENTRY("Smile") }, + { "SML", _VENDOR_NAME_ENTRY("Smile") }, + { "SMS", _VENDOR_NAME_ENTRY("Silicon Multimedia Systems") }, + { "SNI", _VENDOR_NAME_ENTRY("Siemens Nixdorf") }, + { "SNY", _VENDOR_NAME_ENTRY("Sony") }, + { "SOB", _VENDOR_NAME_ENTRY("Sanyo") }, + { "SPE", _VENDOR_NAME_ENTRY("SPEA") }, + { "SPT", _VENDOR_NAME_ENTRY("Sceptre") }, + { "SRC", _VENDOR_NAME_ENTRY("Shamrock/SunRiver") }, + { "SSS", _VENDOR_NAME_ENTRY("S3") }, + { "STA", _VENDOR_NAME_ENTRY("Stesa") }, + { "STB", _VENDOR_NAME_ENTRY("STB Systems") }, + { "STC", _VENDOR_NAME_ENTRY("Sampo/STAC") }, + { "STP", _VENDOR_NAME_ENTRY("Sceptre") }, + { "STR", _VENDOR_NAME_ENTRY("Starlight Networks") }, + { "SUK", _VENDOR_NAME_ENTRY("Schneider & Koch") }, + { "SUP", _VENDOR_NAME_ENTRY("Supra/Diamond Media") }, + { "SUR", _VENDOR_NAME_ENTRY("Surenam") }, + { "SVR", _VENDOR_NAME_ENTRY("Sensics Inc.") }, + { "SYL", _VENDOR_NAME_ENTRY("Sylvania") }, + { "SYN", _VENDOR_NAME_ENTRY("Synaptics Inc") }, + + { "TAI", _VENDOR_NAME_ENTRY("Toshiba") }, + { "TAT", _VENDOR_NAME_ENTRY("Tatung") }, + { "TAX", _VENDOR_NAME_ENTRY("Taxan") }, + { "TCC", _VENDOR_NAME_ENTRY("Tandon") }, + { "TCI", _VENDOR_NAME_ENTRY("Tulip") }, + { "TCL", _VENDOR_NAME_ENTRY("Tech Concepts") }, + { "TCM", _VENDOR_NAME_ENTRY("Techmedia/3Com") }, + { "TCO", _VENDOR_NAME_ENTRY("Thomas Conrad") }, + { "TCR", _VENDOR_NAME_ENTRY("Thomson Consumer Electronics") }, + { "TCS", _VENDOR_NAME_ENTRY("Tatung") }, + { "TDS", _VENDOR_NAME_ENTRY("Tri Data Systems") }, + { "TDT", _VENDOR_NAME_ENTRY("TDT") }, + { "TDY", _VENDOR_NAME_ENTRY("Tandy") }, + { "TEA", _VENDOR_NAME_ENTRY("Teac") }, + { "TEC", _VENDOR_NAME_ENTRY("Tecmar") }, + { "TEI", _VENDOR_NAME_ENTRY("TECO") }, + { "TGI", _VENDOR_NAME_ENTRY("TriGem") }, + { "TGS", _VENDOR_NAME_ENTRY("Torus") }, + { "TOS", _VENDOR_NAME_ENTRY("Toshiba") }, + { "TRI", _VENDOR_NAME_ENTRY("Tricord") }, + { "TRM", _VENDOR_NAME_ENTRY("Tekram") }, + { "TRL", _VENDOR_NAME_ENTRY("Royal") }, + { "TRS", _VENDOR_NAME_ENTRY("Torus") }, + { "TRU", _VENDOR_NAME_ENTRY("Aashima/Truevision") }, + { "TSB", _VENDOR_NAME_ENTRY("Toshiba") }, + { "TSC", _VENDOR_NAME_ENTRY("Sanyo") }, + { "TSI", _VENDOR_NAME_ENTRY("TeleVideo") }, + { "TST", _VENDOR_NAME_ENTRY("Transtream Inc") }, + { "TTC", _VENDOR_NAME_ENTRY("Telecommunications Techniques") }, + { "TTK", _VENDOR_NAME_ENTRY("Totoku") }, + { "TTX", _VENDOR_NAME_ENTRY("TTX") }, + { "TVI", _VENDOR_NAME_ENTRY("TeleVideo/Truevision") }, + { "TVM", _VENDOR_NAME_ENTRY("TVM") }, + { "TWA", _VENDOR_NAME_ENTRY("Tidewater") }, + { "TWE", _VENDOR_NAME_ENTRY("Kontron") }, + { "TXN", _VENDOR_NAME_ENTRY("Texas Instruments") }, + { "TYN", _VENDOR_NAME_ENTRY("Tyan Computer") }, + + { "UBI", _VENDOR_NAME_ENTRY("Ungermann Bass") }, + { "UFO", _VENDOR_NAME_ENTRY("UFO Systems") }, + { "UNA", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNI", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNM", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNO", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNS", _VENDOR_NAME_ENTRY("Unisys") }, + { "UNT", _VENDOR_NAME_ENTRY("Unisys") }, + { "USC", _VENDOR_NAME_ENTRY("UltraStor") }, + { "USR", _VENDOR_NAME_ENTRY("US Robotics") }, + { "UTB", _VENDOR_NAME_ENTRY("Utobia") }, + + { "VES", _VENDOR_NAME_ENTRY("Vestel") }, + { "VIK", _VENDOR_NAME_ENTRY("Viking") }, + { "VLV", _VENDOR_NAME_ENTRY("Valve Corporation") }, + { "VMI", _VENDOR_NAME_ENTRY("Vermont MicroSystems") }, + { "VOB", _VENDOR_NAME_ENTRY("Vobis") }, + { "VRG", _VENDOR_NAME_ENTRY("VRgineers, Inc. ") }, + { "VRT", _VENDOR_NAME_ENTRY("Varjo Technologies") }, + { "VSC", _VENDOR_NAME_ENTRY("ViewSonic") }, + + { "WAC", _VENDOR_NAME_ENTRY("Wacom Tech") }, + { "WDC", _VENDOR_NAME_ENTRY("Western Digital") }, + { "WDE", _VENDOR_NAME_ENTRY("Westinghouse Digital Electronics") }, + { "WIL", _VENDOR_NAME_ENTRY("WIPRO") }, + { "WTC", _VENDOR_NAME_ENTRY("Wen Technology") }, + { "WYS", _VENDOR_NAME_ENTRY("Wyse Technology") }, + + { "YMH", _VENDOR_NAME_ENTRY("Yamaha Corporation") }, + { "YHQ", _VENDOR_NAME_ENTRY("Yokogawa") }, + + { "ZCM", _VENDOR_NAME_ENTRY("Zenith") }, + { "ZDS", _VENDOR_NAME_ENTRY("Zenith") }, + { "ZYT", _VENDOR_NAME_ENTRY("Zytex") }, +}; + +#endif /* __NV_PNP_VENDOR_IDS_H__ */ + diff --git a/src/common/inc/nvSha1.h b/src/common/inc/nvSha1.h new file mode 100644 index 000000000..6c9a010c6 --- /dev/null +++ b/src/common/inc/nvSha1.h @@ -0,0 +1,390 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2012 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Utility header file to generate a one-way hash from an arbitrary + * byte array, using the Secure Hashing Algorithm 1 (SHA-1) as defined + * in FIPS PUB 180-1 published April 17, 1995: + * + * http://www.itl.nist.gov/fipspubs/fip180-1.htm + * + * Some common test cases (see Appendices A and B of the above document): + * + * SHA1("abc") = + * A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D + * + * SHA1("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq") = + * 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1 + */ + +#ifndef __NV_SHA1_H__ +#define __NV_SHA1_H__ + +#include "nvtypes.h" + +/*! + * @brief Structure used by the SHA-1 functions to maintain the state of the + * calculations. + */ + +typedef struct +{ + NvU32 state[5]; + NvU32 count; + NvU8 buffer[128]; +} Sha1Context; + + +/*! + * @brief Pointer to a memory accessor function for use by the SHA-1 hash + * function. + * + * Due to memory constraints in some environments where this code is executed + * (e.g., the PMU/DPU), the data that needs to be processed by the SHA-1 hash + * function may not be readily available. This function is responsible for + * copying the data into a buffer to be used by the SHA-1 function. + * + * Besides, SHA1 library can be used by many different clients, so we need to + * provide the memory accessor functions which can work in client's environment. + * + * @param[out] pBuff The buffer to copy the new data to. + * @param[in] index The desired offset to begin copying from. + * @param[in] size The requested number of bytes to be copied. + * @param[in] info Pointer to the data passed into GenerateSha1 as pData. + * + * @return The actual number of bytes copied into the buffer. + */ + +typedef NvU32 Sha1CopyFunc(NvU8 *pBuff, NvU32 index, NvU32 size, void *pInfo); + + +/* + * The following values are defined by the SHA-1 algorithm for initial values. + */ +#define SHA1_INIT_H0 0x67452301 //!< Initial H0 value +#define SHA1_INIT_H1 0xEFCDAB89 //!< Initial H1 value +#define SHA1_INIT_H2 0x98BADCFE //!< Initial H2 value +#define SHA1_INIT_H3 0x10325476 //!< Initial H3 value +#define SHA1_INIT_H4 0xC3D2E1F0 //!< Initial H4 value + + +/*! + * @brief Reverses the byte order of a word; that is, switching the endianness + * of the word. + * + * @param[in] a A 32-bit word + * + * @returns The 32-bit word with its byte order reversed. + */ + +#define REVERSE_BYTE_ORDER(a) \ + (((a) >> 24) | ((a) << 24) | (((a) >> 8) & 0xFF00) | (((a) << 8) & 0xFF0000)) + + +/*! + * @brief Computation step as defined by SHA-1. + * + * Unlike the 64 byte buffer version outlined in the SHA-1 algorithm, this + * function uses a 128 byte buffer to minimize the calculation needed to + * index the data. + * + * @param[in,out] pState + * Pointer to State word array. + * + * @param[in] pBuffer + * Data to operate on. 128 bytes in length. No length checking is done, + * and is assumed to have been done by the calling function. + */ + +static void +_sha1Transform +( + NvU32 *pState, + NvU8 *pBuffer +) +{ + NvU32 a = pState[0]; + NvU32 b = pState[1]; + NvU32 c = pState[2]; + NvU32 d = pState[3]; + NvU32 e = pState[4]; + NvU32 *pBuf = (NvU32 *)pBuffer; + NvU32 *p; + NvU32 i; + NvU32 j; + NvU32 k; + + for (i = 0; i < 80; i++) + { + p = &pBuf[i & 0xf]; + j = p[0]; + if (i < 16) + { + j = REVERSE_BYTE_ORDER(j); + } + else + { + j ^= p[2] ^ p[8] ^ p[13]; + j = (j << 1) + (j >> 31); + } + p[0] = p[16] = j; + if (i < 40) + { + if (i < 20) + { + k = 0x5a827999 + ((b & (c ^ d)) ^ d); + } + else + { + k = 0x6ed9eba1 + (b ^ c ^ d); + } + } + else + { + if (i < 60) + { + k = 0x8f1bbcdc + (((b | c) & d) | (b & c)); + } + else + { + k = 0xca62c1d6 + (b ^ c ^ d); + } + } + j += (a << 5) + (a >> 27) + e + k; + e = d; + d = c; + c = (b << 30) + (b >> 2); + b = a; + a = j; + } + pState[0] += a; + pState[1] += b; + pState[2] += c; + pState[3] += d; + pState[4] += e; +} + + +/*! + * Initializes the SHA-1 context. + * + * @param[out] pContext + * Pointer to the context to initialize. + */ + +static void +_sha1Initialize +( + Sha1Context *pContext +) +{ + pContext->count = 0; + pContext->state[0] = SHA1_INIT_H0; + pContext->state[1] = SHA1_INIT_H1; + pContext->state[2] = SHA1_INIT_H2; + pContext->state[3] = SHA1_INIT_H3; + pContext->state[4] = SHA1_INIT_H4; +} + + +/*! + * @brief Divides the input buffer into multiple 64-byte buffers and computes + * the message digest for each. + * + * @param[in] pContext + * Pointer to a Sha1Context. + * + * @param[in] pData + * Pointer to the data array to compute the message digest. + * + * @param[in] len + * Size of the data. + * + * @param[in] copyFunc + * Copy routine to use. + */ + +static void +_sha1Update +( + Sha1Context *pContext, + void *pData, + NvU32 len, + Sha1CopyFunc copyFunc +) +{ + NvU32 buffer_offset = (pContext->count & 63); + NvU32 copy_size; + NvU32 idx = 0; + + pContext->count += len; + while ((buffer_offset + len) > 63) + { + copy_size = 64 - buffer_offset; + copyFunc(&pContext->buffer[buffer_offset], idx, copy_size, pData); + _sha1Transform(pContext->state, pContext->buffer); + buffer_offset = 0; + idx += copy_size; + len -= copy_size; + } + if (len > 0) + { + copyFunc(&pContext->buffer[buffer_offset], idx, len, pData); + } +} + + +/*! + * @brief fill memory with zero; not all environments in which this + * code runs have memset(3). + * + * @param[out] pData + * The memory to be filled with zero + * + * @param[in] nBytes + * The number of bytes of memory to fill with zero + */ + +static NV_INLINE void +_sha1MemZero +( + NvU8 *pData, + NvU32 nBytes +) +{ + NvU32 i; + + for (i = 0; i < nBytes; i++) { + pData[i] = 0; + } +} + + +/*! + * @brief Pads the message as specified by the SHA-1 algorithm and computes + * the message digest on the final message chunk(s). + * + * @param[out] pDigest + * The SHA-1 hash values. + * + * @param[in] pContext + * Pointer to a Sha1Context. + */ + +static void +_sha1Final +( + NvU8 *pDigest, + Sha1Context *pContext +) +{ + NvU32 i; + NvU32 bufferOffset = (pContext->count & 63); + NvU8 *pBuffer = (NvU8*)&pContext->buffer[bufferOffset]; + NvU32 *pCount; + NvU32 *pDig32; + + // append padding pattern to the end of input + *pBuffer++ = 0x80; + if (bufferOffset < 56) + { + _sha1MemZero(pBuffer, 59 - bufferOffset); + } + else + { + // need an extra sha1_transform + if (bufferOffset < 63) + { + _sha1MemZero(pBuffer, 63 - bufferOffset); + } + _sha1Transform(pContext->state, pContext->buffer); + _sha1MemZero(pContext->buffer, 60); + } + + // set final count (this is the number of *bits* not *bytes*) + pCount = (NvU32*)&pContext->buffer[15 << 2]; + *pCount = REVERSE_BYTE_ORDER(pContext->count << 3); + + _sha1Transform(pContext->state, pContext->buffer); + + // output hash with each dword in big endian + if (pDigest) + { + pDig32 = (NvU32*) pDigest; + for (i = 0; i < 5; i++) + { + pDig32[i] = REVERSE_BYTE_ORDER(pContext->state[i]); + } + } +} + + +/*! + * @brief Generates the SHA-1 hash value on the data provided. + * + * The function does not manipulate the source data directly, as it may not + * have direct access to it. Therefore, it relies upon the copy function to + * copy segments of the data into a local buffer before any manipulation takes + * place. + * + * @param[out] pHash + * Pointer to store the hash array. The buffer must be 20 bytes in + * length, and the result is stored in big endian format. + * + * @param[in] pData + * The source data array to transform. The actual values and make-up + * of this parameter are dependent on the copy function. + * + * @param[in] nBytes + * The size, in bytes, of the source data. + * + * @param[in] copyFunc + * The function responsible for copying data from the source + * for use by the sha1 function. It is possible for the data + * to exist outside the current execution environment (e.g., + * the PMU, and the data to hash are in system memory), so + * the function will never directly manipulate the source + * data. + */ + +#define NV_SHA1_BLOCK_LENGTH 64 +#define NV_SHA1_DIGEST_LENGTH 20 + +static void +sha1Generate +( + NvU8 pHash[NV_SHA1_DIGEST_LENGTH], + void *pData, + NvU32 nBytes, + Sha1CopyFunc copyFunc +) +{ + Sha1Context context; + + _sha1Initialize(&context); + _sha1Update(&context, pData, nBytes, copyFunc); + _sha1Final(pHash, &context); +} + + +#endif /* __NV_SHA1_H__ */ diff --git a/src/common/inc/nvSha256.h b/src/common/inc/nvSha256.h new file mode 100644 index 000000000..f5c12c180 --- /dev/null +++ b/src/common/inc/nvSha256.h @@ -0,0 +1,70 @@ +/* + * FIPS 180-2 SHA-224/256/384/512 implementation + * Last update: 02/02/2007 + * Issue date: 04/30/2005 + * + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/** \file SHA256.h + * \brief SHA256 definitions. + * \author Steven A. Fontana Sr. + * \date September 4 2009 +*/ +#ifndef NV_SHA2_H +#define NV_SHA2_H + +#include + +#define NV_SHA256_DIGEST_SIZE ( 256 / 8) + +#define NV_SHA256_BLOCK_SIZE ( 512 / 8) + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + NvU32 tot_len; + NvU32 len; + NvU8 block[2 * NV_SHA256_BLOCK_SIZE]; + NvU32 h[8]; +} nv_sha256_ctx; + +void nv_sha256_init(nv_sha256_ctx * ctx); +void nv_sha256_update(nv_sha256_ctx *ctx, const NvU8 *message, NvU32 len); +void nv_sha256_final(nv_sha256_ctx *ctx, NvU8 *digest); + +void nv_sha256_noPad(nv_sha256_ctx *ctx, NvU8 *digest); + +void nv_sha256(const NvU8 *message, NvU32 len, NvU8 *digest); + +#ifdef __cplusplus +} +#endif + +#endif /* !NV_SHA2_H */ diff --git a/src/common/inc/nvUnixVersion.h b/src/common/inc/nvUnixVersion.h new file mode 100644 index 000000000..46f1cf50c --- /dev/null +++ b/src/common/inc/nvUnixVersion.h @@ -0,0 +1,15 @@ +#ifndef __NV_UNIX_VERSION_H__ +#define __NV_UNIX_VERSION_H__ + +#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ + (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) + +#define NV_VERSION_STRING "515.43.04" + +#else + +#error "nvUnixVersion.h should only be included in UNIX builds" + +#endif + +#endif /* __NV_UNIX_VERSION_H__ */ diff --git a/src/common/inc/nvVer.h b/src/common/inc/nvVer.h new file mode 100644 index 000000000..135103ec2 --- /dev/null +++ b/src/common/inc/nvVer.h @@ -0,0 +1,17 @@ +// nvVer.h - Versions of NV drivers + +#define NV_COMPANY_NAME_STRING_SHORT "NVIDIA" +#define NV_COMPANY_NAME_STRING_FULL "NVIDIA Corporation" +#define NV_COMPANY_NAME_STRING NV_COMPANY_NAME_STRING_FULL +#define NV_COPYRIGHT_YEAR "2022" +#define NV_COPYRIGHT "(C) " NV_COPYRIGHT_YEAR " NVIDIA Corporation. All rights reserved." // Please do not use the non-ascii copyright symbol for (C). + +#if defined(NV_LINUX) || defined(NV_BSD) || defined(NV_SUNOS) || defined(NV_VMWARE) || defined(NV_QNX) || defined(NV_INTEGRITY) || \ + (defined(RMCFG_FEATURE_PLATFORM_GSP) && RMCFG_FEATURE_PLATFORM_GSP == 1) + +// All Version numbering for Unix builds has moved. (Source should be re-directed to directly include that header.) +#include "nvUnixVersion.h" + +#else + +#endif diff --git a/src/common/inc/nv_list.h b/src/common/inc/nv_list.h new file mode 100644 index 000000000..dbb5189e1 --- /dev/null +++ b/src/common/inc/nv_list.h @@ -0,0 +1,558 @@ +/* + * Copyright © 2010 Intel Corporation + * Copyright © 2010 Francisco Jerez + * Copyright © 2012 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +/* + * This file was copied from the X.Org X server source at commit + * 5884e7dedecdd82ddbb037360cf9c85143e094b5 and modified to match NVIDIA's X + * driver code style. + */ + +#ifndef _NV_LIST_H_ +#define _NV_LIST_H_ + +#ifdef __cplusplus +extern "C" { +#endif //__cplusplus + +#include "nvmisc.h" + + #define HAVE_TYPEOF 1 + +/** + * @file Classic doubly-link circular list implementation. + * For real usage examples of the linked list, see the file test/list.c + * + * Example: + * We need to keep a list of struct foo in the parent struct bar, i.e. what + * we want is something like this. + * + * struct bar { + * ... + * struct foo *list_of_foos; -----> struct foo {}, struct foo {}, struct foo{} + * ... + * } + * + * We need one list head in bar and a list element in all list_of_foos (both are of + * data type 'NVListRec'). + * + * struct bar { + * ... + * NVListRec list_of_foos; + * ... + * } + * + * struct foo { + * ... + * NVListRec entry; + * ... + * } + * + * Now we initialize the list head: + * + * struct bar bar; + * ... + * nvListInit(&bar.list_of_foos); + * + * Then we create the first element and add it to this list: + * + * struct foo *foo = malloc(...); + * .... + * nvListAdd(&foo->entry, &bar.list_of_foos); + * + * Repeat the above for each element you want to add to the list. Deleting + * works with the element itself. + * nvListDel(&foo->entry); + * free(foo); + * + * Note: calling nvListDel(&bar.list_of_foos) will set bar.list_of_foos to an empty + * list again. + * + * Looping through the list requires a 'struct foo' as iterator and the + * name of the field the subnodes use. + * + * struct foo *iterator; + * nvListForEachEntry(iterator, &bar.list_of_foos, entry) { + * if (iterator->something == ...) + * ... + * } + * + * Note: You must not call nvListDel() on the iterator if you continue the + * loop. You need to run the safe for-each loop instead: + * + * struct foo *iterator, *next; + * nvListForEachEntry_safe(iterator, next, &bar.list_of_foos, entry) { + * if (...) + * nvListDel(&iterator->entry); + * } + * + */ + +/** + * The linkage struct for list nodes. This struct must be part of your + * to-be-linked struct. NVListRec is required for both the head of the + * list and for each list node. + * + * Position and name of the NVListRec field is irrelevant. + * There are no requirements that elements of a list are of the same type. + * There are no requirements for a list head, any NVListRec can be a list + * head. + */ +typedef struct NVList { + struct NVList *next, *prev; +} NVListRec, *NVListPtr; + +/** + * Initialize the list as an empty list. + * + * Example: + * nvListInit(&bar->list_of_foos); + * + * @param The list to initialized. + */ +static NV_INLINE void +nvListInit(NVListPtr list) +{ + list->next = list->prev = list; +} + +/** + * Initialize the list as an empty list. + * + * This is functionally the same as nvListInit, but can be used for + * initialization of global variables. + * + * Example: + * static NVListRec list_of_foos = NV_LIST_INIT(&list_of_foos); + * + * @param The list to initialized. + */ +#define NV_LIST_INIT(head) { .prev = (head), .next = (head) } + +static NV_INLINE void +__nvListAdd(NVListPtr entry, NVListPtr prev, NVListPtr next) +{ + next->prev = entry; + entry->next = next; + entry->prev = prev; + prev->next = entry; +} + +/** + * Insert a new element after the given list head. The new element does not + * need to be initialised as empty list. + * The list changes from: + * head -> some element -> ... + * to + * head -> new element -> older element -> ... + * + * Example: + * struct foo *newfoo = malloc(...); + * nvListAdd(&newfoo->entry, &bar->list_of_foos); + * + * @param entry The new element to prepend to the list. + * @param head The existing list. + */ +static NV_INLINE void +nvListAdd(NVListPtr entry, NVListPtr head) +{ + __nvListAdd(entry, head, head->next); +} + +/** + * Append a new element to the end of the list given with this list head. + * + * The list changes from: + * head -> some element -> ... -> lastelement + * to + * head -> some element -> ... -> lastelement -> new element + * + * Example: + * struct foo *newfoo = malloc(...); + * nvListAppend(&newfoo->entry, &bar->list_of_foos); + * + * @param entry The new element to prepend to the list. + * @param head The existing list. + */ +static NV_INLINE void +nvListAppend(NVListPtr entry, NVListPtr head) +{ + __nvListAdd(entry, head->prev, head); +} + +static NV_INLINE void +__nvListDel(NVListPtr prev, NVListPtr next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * Remove the element from the list it is in. Using this function will reset + * the pointers to/from this element so it is removed from the list. It does + * NOT free the element itself or manipulate it otherwise. + * + * Using nvListDel on a pure list head (like in the example at the top of + * this file) will NOT remove the first element from + * the list but rather reset the list as empty list. + * + * Example: + * nvListDel(&foo->entry); + * + * @param entry The element to remove. + */ +static NV_INLINE void +nvListDel(NVListPtr entry) +{ + __nvListDel(entry->prev, entry->next); + nvListInit(entry); +} + +/** + * Check if the list is empty. + * + * Example: + * nvListIsEmpty(&bar->list_of_foos); + * + * @return True if the list contains one or more elements or False otherwise. + */ +static NV_INLINE NvBool +nvListIsEmpty(const NVListRec *head) +{ + return head->next == head; +} + +static NV_INLINE int +nvListCount(const NVListRec *head) +{ + NVListPtr next; + int count = 0; + + for (next = head->next; next != head; next = next->next) { + count++; + } + + return count; +} + +/** + * Check if entry is present in the list. + * + * Example: + * nvListPresent(&foo->entry, &bar->list_of_foos); + * + * @return 1 if the list contains the specified entry; otherwise, return 0. + */ +static NV_INLINE NvBool +nvListPresent(const NVListRec *entry, const NVListRec *head) +{ + const NVListRec *next; + + for (next = head->next; next != head; next = next->next) { + if (next == entry) { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/** + * Returns a pointer to the container of this list element. + * + * Example: + * struct foo* f; + * f = nv_container_of(&foo->entry, struct foo, entry); + * assert(f == foo); + * + * @param ptr Pointer to the NVListRec. + * @param type Data type of the list element. + * @param member Member name of the NVListRec field in the list element. + * @return A pointer to the data struct containing the list head. + */ +#ifndef nv_container_of +#define nv_container_of(ptr, type, member) \ + (type *)((char *)(ptr) - NV_OFFSETOF(type, member)) +#endif + +/** + * Alias of nv_container_of + */ +#define nvListEntry(ptr, type, member) \ + nv_container_of(ptr, type, member) + +/** + * Retrieve the first list entry for the given list pointer. + * + * Example: + * struct foo *first; + * first = nvListFirstEntry(&bar->list_of_foos, struct foo, list_of_foos); + * + * @param ptr The list head + * @param type Data type of the list element to retrieve + * @param member Member name of the NVListRec field in the list element. + * @return A pointer to the first list element. + */ +#define nvListFirstEntry(ptr, type, member) \ + nvListEntry((ptr)->next, type, member) + +/** + * Retrieve the last list entry for the given listpointer. + * + * Example: + * struct foo *first; + * first = nvListLastEntry(&bar->list_of_foos, struct foo, list_of_foos); + * + * @param ptr The list head + * @param type Data type of the list element to retrieve + * @param member Member name of the NVListRec field in the list element. + * @return A pointer to the last list element. + */ +#define nvListLastEntry(ptr, type, member) \ + nvListEntry((ptr)->prev, type, member) + +#ifdef HAVE_TYPEOF +#define __nv_container_of(ptr, sample, member) \ + nv_container_of(ptr, __typeof__(*sample), member) +#else +/* This implementation of __nv_container_of has undefined behavior according + * to the C standard, but it works in many cases. If your compiler doesn't + * support __typeof__() and fails with this implementation, please try a newer + * compiler. + */ +#define __nv_container_of(ptr, sample, member) \ + (void *)((char *)(ptr) \ + - ((char *)&(sample)->member - (char *)(sample))) +#endif + +/** + * Loop through the list given by head and set pos to struct in the list. + * + * Example: + * struct foo *iterator; + * nvListForEachEntry(iterator, &bar->list_of_foos, entry) { + * [modify iterator] + * } + * + * This macro is not safe for node deletion. Use nvListForEachEntry_safe + * instead. + * + * @param pos Iterator variable of the type of the list elements. + * @param head List head + * @param member Member name of the NVListRec in the list elements. + * + */ +#ifdef HAVE_TYPEOF +#define __NV_LIST_SET(x, y) x = y +#else +static NV_INLINE void __nvListSet(void **x, void *y) +{ + *x = y; +} + +#define __NV_LIST_SET(x, y) __nvListSet((void **) &x, (void *) (y)) +#endif + +#define nvListForEachEntry(pos, head, member) \ + for (__NV_LIST_SET(pos, __nv_container_of((head)->next, pos, member)); \ + &pos->member != (head); \ + __NV_LIST_SET(pos, __nv_container_of(pos->member.next, pos, member))) + +/** + * Loop through the list, keeping a backup pointer to the element. This + * macro allows for the deletion of a list element while looping through the + * list. + * + * See nvListForEachEntry for more details. + */ +#define nvListForEachEntry_safe(pos, tmp, head, member) \ + for (__NV_LIST_SET(pos, __nv_container_of((head)->next, pos, member)), \ + __NV_LIST_SET(tmp, __nv_container_of(pos->member.next, pos, member)); \ + &pos->member != (head); \ + __NV_LIST_SET(pos, tmp), \ + __NV_LIST_SET(tmp, __nv_container_of(pos->member.next, tmp, member))) + +/* NULL-Terminated List Interface + * + * The interface below does _not_ use the NVListRec as described above. + * It is mainly for legacy structures that cannot easily be switched to + * NVListRec. + * + * This interface is for structs like + * struct foo { + * [...] + * struct foo *next; + * [...] + * }; + * + * The position and field name of "next" are arbitrary. + */ + +/** + * Init the element as null-terminated list. + * + * Example: + * struct foo *list = malloc(); + * nvNTListInit(list, next); + * + * @param list The list element that will be the start of the list + * @param member Member name of the field pointing to next struct + */ +#define nvNTListInit(_list, _member) \ + (_list)->_member = NULL + +/** + * Returns the next element in the list or NULL on termination. + * + * Example: + * struct foo *element = list; + * while ((element = nvNTListNext(element, next)) { } + * + * This macro is not safe for node deletion. Use nvListForEachEntry_safe + * instead. + * + * @param list The list or current element. + * @param member Member name of the field pointing to next struct. + */ +#define nvNTListNext(_list, _member) \ + (_list)->_member + +/** + * Iterate through each element in the list. + * + * Example: + * struct foo *iterator; + * nvNTListForEachEntry(iterator, list, next) { + * [modify iterator] + * } + * + * @param entry Assigned to the current list element + * @param list The list to iterate through. + * @param member Member name of the field pointing to next struct. + */ +#define nvNTListForEachEntry(_entry, _list, _member) \ + for (_entry = _list; _entry; _entry = (_entry)->_member) + +/** + * Iterate through each element in the list, keeping a backup pointer to the + * element. This macro allows for the deletion of a list element while + * looping through the list. + * + * See nvNTListForEachEntry for more details. + * + * @param entry Assigned to the current list element + * @param tmp The pointer to the next element + * @param list The list to iterate through. + * @param member Member name of the field pointing to next struct. + */ +#define nvNTListForEachEntrySafe(_entry, _tmp, _list, _member) \ + for (_entry = _list, _tmp = (_entry) ? (_entry)->_member : NULL;\ + _entry; \ + _entry = _tmp, _tmp = (_tmp) ? (_tmp)->_member: NULL) + +/** + * Append the element to the end of the list. This macro may be used to + * merge two lists. + * + * Example: + * struct foo *elem = malloc(...); + * nvNTListInit(elem, next) + * nvNTListAppend(elem, list, struct foo, next); + * + * Resulting list order: + * list_item_0 -> list_item_1 -> ... -> elem_item_0 -> elem_item_1 ... + * + * @param entry An entry (or list) to append to the list + * @param list The list to append to. This list must be a valid list, not + * NULL. + * @param type The list type + * @param member Member name of the field pointing to next struct + */ +#define nvNTListAppend(_entry, _list, _type, _member) \ + do { \ + _type *__iterator = _list; \ + while (__iterator->_member) { __iterator = __iterator->_member;}\ + __iterator->_member = _entry; \ + } while (0) + +/** + * Insert the element at the next position in the list. This macro may be + * used to insert a list into a list. + * + * struct foo *elem = malloc(...); + * nvNTListInit(elem, next) + * nvNTListInsert(elem, list, struct foo, next); + * + * Resulting list order: + * list_item_0 -> elem_item_0 -> elem_item_1 ... -> list_item_1 -> ... + * + * @param entry An entry (or list) to append to the list + * @param list The list to insert to. This list must be a valid list, not + * NULL. + * @param type The list type + * @param member Member name of the field pointing to next struct + */ +#define nvNTListInsert(_entry, _list, _type, _member) \ + do { \ + nvNTListAppend((_list)->_member, _entry, _type, _member); \ + (_list)->_member = _entry; \ + } while (0) + +/** + * Delete the entry from the list by iterating through the list and + * removing any reference from the list to the entry. + * + * Example: + * struct foo *elem = + * nvNTListDel(elem, list, struct foo, next); + * + * @param entry The entry to delete from the list. entry is always + * re-initialized as a null-terminated list. + * @param list The list containing the entry, set to the new list without + * the removed entry. + * @param type The list type + * @param member Member name of the field pointing to the next entry + */ +#define nvNTListDel(_entry, _list, _type, _member) \ + do { \ + _type *__e = _entry; \ + if (__e == NULL || _list == NULL) break; \ + if ((_list) == __e) { \ + _list = __e->_member; \ + } else { \ + _type *__prev = _list; \ + while (__prev->_member && __prev->_member != __e) \ + __prev = nvNTListNext(__prev, _member); \ + if (__prev->_member) \ + __prev->_member = __e->_member; \ + } \ + nvNTListInit(__e, _member); \ + } while(0) + +#ifdef __cplusplus +} +#endif //__cplusplus + +#endif /* _NV_LIST_H_ */ diff --git a/src/common/inc/nv_speculation_barrier.h b/src/common/inc/nv_speculation_barrier.h new file mode 100644 index 000000000..70c96517a --- /dev/null +++ b/src/common/inc/nv_speculation_barrier.h @@ -0,0 +1,219 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * NVIDIA GPZ vulnerability mitigation definitions. + */ + +/* + * There are two copies of this file for legacy reasons: + * + * P4: <$NV_SOURCE/>drivers/common/inc/nv_speculation_barrier.h + * Git: include/nv_speculation_barrier.h + * + * Both files need to be kept in sync if any changes are required. + */ + +#ifndef _NV_SPECULATION_BARRIER_H_ +#define _NV_SPECULATION_BARRIER_H_ + +#define NV_SPECULATION_BARRIER_VERSION 2 + +/* + * GNU-C/MSC/clang - x86/x86_64 : x86_64, __i386, __i386__ + * GNU-C - THUMB mode : __GNUC__, __thumb__ + * GNU-C - ARM modes : __GNUC__, __arm__, __aarch64__ + * armclang - THUMB mode : __ARMCC_VERSION, __thumb__ + * armclang - ARM modes : __ARMCC_VERSION, __arm__, __aarch64__ + * GHS - THUMB mode : __ghs__, __THUMB__ + * GHS - ARM modes : __ghs__, __ARM__, __ARM64__ + */ + +#if defined(_M_IX86) || defined(__i386__) || defined(__i386) \ + || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) + /* All x86 */ + #define NV_SPECULATION_BARRIER_x86 + +#elif defined(macintosh) || defined(__APPLE__) \ + || defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) \ + || defined(__POWERPC__) || defined(__ppc) || defined(__ppc__) \ + || defined(__ppc64__) || defined(__PPC__) \ + || defined(__PPC64__) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) + /* All PowerPC */ + #define NV_SPECULATION_BARRIER_PPC + +#elif (defined(__GNUC__) && defined(__thumb__)) \ + || (defined(__ARMCC_VERSION) && defined(__thumb__)) \ + || (defined(__ghs__) && defined(__THUMB__)) + /* ARM-thumb mode(<=ARMv7)/T32 (ARMv8) */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst.w 0xf3af8014\n" + +#elif (defined(__GNUC__) && defined(__arm__)) \ + || (defined(__ARMCC_VERSION) && defined(__arm__)) \ + || (defined(__ghs__) && defined(__ARM__)) + /* aarch32(ARMv8) / arm(<=ARMv7) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB ".inst 0xe320f014\n" + +#elif (defined(__GNUC__) && defined(__aarch64__)) \ + || (defined(__ARMCC_VERSION) && defined(__aarch64__)) \ + || (defined(__ghs__) && defined(__ARM64__)) + /* aarch64(ARMv8) mode */ + #define NV_SPECULATION_BARRIER_ARM_COMMON + #define NV_SPEC_BARRIER_CSDB "HINT #20\n" +#elif defined(NVCPU_NVRISCV64) && NVOS_IS_LIBOS +# define nv_speculation_barrier() +#else + #error "Unknown compiler/chip family" +#endif + +/* + * nv_speculation_barrier -- General-purpose speculation barrier + * + * This approach provides full protection against variant-1 vulnerability. + * However, the recommended approach is detailed below (See: + * nv_array_index_no_speculate) + * + * Semantics: + * Any memory read that is sequenced after a nv_speculation_barrier(), + * and contained directly within the scope of nv_speculation_barrier() or + * directly within a nested scope, will not speculatively execute until all + * conditions for entering that scope have been architecturally resolved. + * + * Example: + * if (untrusted_index_from_user < bound) { + * ... + * nv_speculation_barrier(); + * ... + * x = array1[untrusted_index_from_user]; + * bit = x & 1; + * y = array2[0x100 * bit]; + * } + */ + +#if defined(NV_SPECULATION_BARRIER_x86) +// Delete after all references are changed to nv_speculation_barrier +#define speculation_barrier() nv_speculation_barrier() + +static inline void nv_speculation_barrier(void) +{ + +#if defined(__GNUC__) || defined(__clang__) + __asm__ __volatile__ ("lfence" : : : "memory"); +#endif + +} + +#elif defined(NV_SPECULATION_BARRIER_PPC) + +static inline void nv_speculation_barrier(void) +{ + asm volatile("ori 31,31,0"); +} + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + +/* Note: Cortex-A9 GNU-assembler seems to complain about DSB SY */ + #define nv_speculation_barrier() \ + asm volatile \ + ( \ + "DSB sy\n" \ + "ISB\n" \ + : : : "memory" \ + ) +#endif + +/* + * nv_array_index_no_speculate -- Recommended variant-1 mitigation approach + * + * The array-index-no-speculate approach "de-speculates" an array index that + * has already been bounds-checked. + * + * This approach is preferred over nv_speculation_barrier due to the following + * reasons: + * - It is just as effective as the general-purpose speculation barrier. + * - It clearly identifies what array index is being de-speculated and is thus + * self-commenting, whereas the general-purpose speculation barrier requires + * an explanation of what array index is being de-speculated. + * - It performs substantially better than the general-purpose speculation + * barrier on ARM Cortex-A cores (the difference is expected to be tens of + * cycles per invocation). Within tight loops, this difference may become + * noticeable. + * + * Semantics: + * Provided count is non-zero and the caller has already validated or otherwise + * established that index < count, any speculative use of the return value will + * use a speculative value that is less than count. + * + * Example: + * if (untrusted_index_from_user < bound) { + * untrusted_index_from_user = nv_array_index_no_speculate( + * untrusted_index_from_user, bound); + * ... + * x = array1[untrusted_index_from_user]; + * ... + * } + * + * The use of nv_array_index_no_speculate() in the above example ensures that + * subsequent uses of untrusted_index_from_user will not execute speculatively + * (they will wait for the bounds check to complete). + */ + +static inline unsigned long nv_array_index_no_speculate(unsigned long index, + unsigned long count) +{ +#if defined(NV_SPECULATION_BARRIER_x86) && (defined(__GNUC__) || defined(__clang__)) + unsigned long mask; + + __asm__ __volatile__ + ( + "CMP %2, %1 \n" + "SBB %0, %0 \n" + : "=r"(mask) : "r"(index), "r"(count) : "cc" + ); + + return (index & mask); + +#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON) + unsigned long mask; + + asm volatile + ( + "CMP %[ind], %[cnt] \n" + "SBC %[res], %[cnt], %[cnt] \n" + NV_SPEC_BARRIER_CSDB + : [res] "=r" (mask) : [ind] "r" (index), [cnt] "r" (count): "cc" + ); + + return (index & mask); + +/* Fallback to generic speculation barrier for unsupported platforms */ +#else + nv_speculation_barrier(); + + return index; +#endif +} + +#endif //_NV_SPECULATION_BARRIER_H_ diff --git a/src/common/inc/nvctassert.h b/src/common/inc/nvctassert.h new file mode 100644 index 000000000..ae3de5688 --- /dev/null +++ b/src/common/inc/nvctassert.h @@ -0,0 +1,189 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1997-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NV_CTASSERT_H +#define __NV_CTASSERT_H + +/*****************************************************************************/ + +/* Compile Time assert + * ------------------- + * Use ct_assert(b) instead of assert(b) whenever the condition 'b' is constant, + * i.e. when 'b' can be determined at compile time. + * + * e.g.: check array size: + * ct_assert(__GL_ARRAYSIZE(arrayName) == constArraySize); + * e.g.: check struct size alignment: + * ct_assert(sizeof(struct xy) % 64 == 0); + * + * When available, standard C or C++ language constructs are used: + * - ISO C++11 defines the static_assert keyword + * - ISO C11 defines the _Static_assert keyword + * + * Note that recent versions of Clang support _Static_assert in all compiler modes + * - not just C11 mode - so we test for that in addition to checking explicitly for + * C11 and C++11 support. + * + * Those new language standards aren't available on all supported platforms; an + * alternate method which involves array declarations is employed in that case, + * described below. + * + * In C, there is a restriction where ct_assert() can be placed: + * It can be placed wherever a variable declaration can be placed, i.e.: + * - either anywhere at file scope + * - or inside a function at the beginning of any {} block; it may be mixed + * with variable declarations. + * e.g.: + * void function() + * { + * ct_assert(...); <-- ok \ + * int a; | + * ct_assert(...); <-- ok | declaration section + * int b; | + * ct_assert(...); <-- ok / + * + * a = 0; -- first statement + * + * int c; <-- error + * ct_assert(...); <-- error + * + * {ct_assert(...);} <-- ok (uses its own block for ct_assert()) + * } + * + * In CPP, there is no such restriction, i.e. it can be placed at file scope + * or anywhere inside a function or namespace or class (i.e., wherever + * a variable declaration may be placed). + * + * For C code, the mechanism of this ct_assert() is to declare a prototype + * of a function (e.g. compile_time_assertion_failed_in_line_555, if current + * line number is 555), which gets an array as argument: + * (1) the size of this array is +1, if b != 0 (ok) + * (2) the size of this array is -1, if b == 0 (error) + * + * In case (2) the compiler throws an error. + * e.g. msvc compiler: + * error C2118: negative subscript or subscript is too large + * e.g. gcc 2.95.3: + * size of array `_compile_time_assertion_failed_in_line_555' is negative + * + * In case the condition 'b' is not constant, the msvc compiler throws + * an error: + * error C2057: expected constant expression + * In this case the run time assert() must be used. + * + * For C++ code, we use a different technique because the function prototype + * declaration can have function linkage conflicts. If a single compilation + * unit has ct_assert() statements on the same line number in two different + * files, we would have: + * + * compile_time_assertion_failed_in_line_777(...); from xxx.cpp + * compile_time_assertion_failed_in_line_777(...); from xxx.h + * + * That is valid C++. But if either declaration were in an extern "C" block, + * the same function would be declared with two different linkage types and an + * error would ensue. + * + * Instead, ct_assert() for C++ simply declares an array typedef. As in the C + * version, we will get a compilation error if a typedef with a negative size + * is specified. Line numbers are not needed because C++ allows redundant + * typedefs as long as they are all defined the same way. But we tack them on + * anyway in case the typedef name is reported in compiler errors. C does not + * permit redundant typedefs, so this version should not be used in true C + * code. It can be used in extern "C" blocks of C++ code, however. As with + * the C version, MSVC will throw a "negative subscript" or "expected constant + * expression" error if the expression asserted is false or non-constant. + * + * Notes: + * - This ct_assert() does *not* generate any code or variable. + * Therefore there is no need to define it away for RELEASE builds. + * - The integration of the current source file number (__LINE__) ... + * ... would be required in C++ to allow multiple use inside the same + * class/namespace (if we used the C-style expansion), because the id + * must be unique. + * ... is nice to have in C or C++ if the compiler's error message contains + * the id (this is not the case for msvc) + * - Using three nested macros instead of only one is necessary to get the id + * compile_time_assertion_failed_in_line_555 + * instead of + * compile_time_assertion_failed_in_line___LINE__ + */ + +#if defined(__clang__) +# ifndef __has_extension +# define __has_extension __has_feature // Compatibility with Clang pre-3.0 compilers. +# endif +# define CLANG_C_STATIC_ASSERT __has_extension(c_static_assert) +#else +# define CLANG_C_STATIC_ASSERT 0 +#endif + +// Adding this macro to fix MISRA 2012 rule 20.12 +#define NV_CTASSERT_STRINGIFY_MACRO(b) #b + +#if !defined(NVOC) && ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || CLANG_C_STATIC_ASSERT) + // ISO C11 defines the _Static_assert keyword +# define ct_assert(b) _Static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)) +# define ct_assert_i(b,line) _Static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)NV_CTASSERT_STRINGIFY_MACRO(line)) +#elif (defined(__cplusplus) && __cplusplus >= 201103L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) + // ISO C++11 defines the static_assert keyword +# define ct_assert(b) static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)) +# define ct_assert_i(b,line) static_assert((b), "Compile time assertion failed: " NV_CTASSERT_STRINGIFY_MACRO(b)NV_CTASSERT_STRINGIFY_MACRO(line)) +#else + // For compilers which don't support ISO C11 or C++11, we fall back to an + // array (type) declaration +# define ct_assert(b) ct_assert_i(b,__LINE__) +# define ct_assert_i(b,line) ct_assert_ii(b,line) +# ifdef __cplusplus +# define ct_assert_ii(b,line) typedef char compile_time_assertion_failed_in_line_##line[(b)?1:-1] +# else + /* + * The use of a function prototype "void compile_time_assertion_failed_in_line_##line(..) + * above violates MISRA-C 2012 Rule 8.6 since the rule disallows a function + * declaration without a definition. To fix the MISRA rule, the cplusplus style + * 'typdef char compile_time_assertion_failed_in_line_##line' + * is acceptable, but doesn't work for typical C code since there can be duplicate + * line numbers leading to duplicate typedefs which C doesn't allow. + * + * The following macro uses the predefined macro __COUNTER__ to create unique + * typedefs that fixes the MISRA violations. However, not all C compilers support + * that macro and even for compilers that support it, the underlying code makes + * use of variably modified identifiers in ct_assert that makes the use of this + * unviable. + * + * For now restrict the use of MACRO only on + * i) GCC 4.3.0 and above that supports __COUNTER__ macro + * ii) Specifically the Falcon port of the compiler since the use of variably + * modified identifiers have been removed on those projects + * + * TBD: Enable the macro on MSVC and CLANG pending + */ +# if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= 40300) && defined(GCC_FALCON) +# define ct_assert_ii(b,line) ct_assert_iii(b,line,__COUNTER__) +# define ct_assert_iii(b,line,cntr) ct_assert_cntr(b,line,cntr) +# define ct_assert_cntr(b,line,cntr) typedef char cnt##cntr##_compile_time_assertion_failed_in_line_##line[(b)?1:-1] __attribute__((unused)) +# else +# define ct_assert_ii(b,line) void compile_time_assertion_failed_in_line_##line(int _compile_time_assertion_failed_in_line_##line[(b) ? 1 : -1]) +# endif +# endif +#endif + +#endif // __NV_CTASSERT_H diff --git a/src/common/inc/nveGPUConfig.h b/src/common/inc/nveGPUConfig.h new file mode 100644 index 000000000..751e0736b --- /dev/null +++ b/src/common/inc/nveGPUConfig.h @@ -0,0 +1,183 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVEGPUCONFIG_H_ +#define _NVEGPUCONFIG_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +#define EGPU_INLINE NV_FORCEINLINE +#else //!__cplusplus +#if defined(NV_UNIX) || defined(NVCPU_RISCV64) || defined(NV_MODS) +#define EGPU_INLINE static NV_INLINE +#else //NV_UNIX +#define EGPU_INLINE NV_INLINE +#endif //NV_UNIX +#endif //!__cplusplus + +// Surprise removal capable TB3 and TB2 BUS Device ID +#define BUS_DEVICE_ID_TB3_ALPINE_RIDGE_01 0x1578 +#define BUS_DEVICE_ID_TB3_02 0x1576 +#define BUS_DEVICE_ID_TB3_03 0x15C0 +#define BUS_DEVICE_ID_TB3_04 0x15D3 +#define BUS_DEVICE_ID_TB3_05 0x15DA +#define BUS_DEVICE_ID_TB3_06 0x15EA +#define BUS_DEVICE_ID_TB3_07 0x15E7 +#define BUS_DEVICE_ID_TB3_08 0x15EF +#define BUS_DEVICE_ID_TB3_09 0x1133 +#define BUS_DEVICE_ID_TB3_10 0x1136 + +// IceLake-U TB3 device ids. Below TB3 would be integrated to CPU. +#define BUS_DEVICE_ID_ICELAKE_TB3_01 0x8A1D +#define BUS_DEVICE_ID_ICELAKE_TB3_02 0x8A1F +#define BUS_DEVICE_ID_ICELAKE_TB3_03 0x8A21 +#define BUS_DEVICE_ID_ICELAKE_TB3_04 0x8A23 +#define BUS_DEVICE_ID_ICELAKE_TB3_05 0x8A0D +#define BUS_DEVICE_ID_ICELAKE_TB3_06 0x8A17 + +// TigerLake Thunderbolt device ids. +#define BUS_DEVICE_ID_TIGERLAKE_TB3_01 0x9A1B +#define BUS_DEVICE_ID_TIGERLAKE_TB3_02 0x9A1D +#define BUS_DEVICE_ID_TIGERLAKE_TB3_03 0x9A1F +#define BUS_DEVICE_ID_TIGERLAKE_TB3_04 0x9A21 +#define BUS_DEVICE_ID_TIGERLAKE_TB3_05 0x9A23 +#define BUS_DEVICE_ID_TIGERLAKE_TB3_06 0x9A25 +#define BUS_DEVICE_ID_TIGERLAKE_TB3_07 0x9A27 +#define BUS_DEVICE_ID_TIGERLAKE_TB3_08 0x9A29 +#define BUS_DEVICE_ID_TIGERLAKE_TB3_09 0x9A2B +#define BUS_DEVICE_ID_TIGERLAKE_TB3_10 0x9A2D + +//#define BUS_DEVICE_ID_TB2_FALCON_RIDGE_DSL5520_01 0X156C // obsolete +#define BUS_DEVICE_ID_TB2_FALCON_RIDGE_DSL5520_02 0X156D +#define BUS_DEVICE_ID_TB2_03 0x157E +#define BUS_DEVICE_ID_TB2_04 0x156B +#define BUS_DEVICE_ID_TB2_05 0x1567 +#define BUS_DEVICE_ID_TB2_06 0x1569 +//#define BUS_DEVICE_ID_TB2_07 0x1548 // obsolete +#define BUS_DEVICE_ID_TB2_08 0x151B +#define BUS_DEVICE_ID_TB2_09 0x1549 +#define BUS_DEVICE_ID_TB2_10 0x1513 + +//***************************************************************************** +// Function: isTB3DeviceID +// +// Routine Description: +// +// Function to match the specified Device ID with the known TB3 BUS's +// device IDs. +// +// Arguments: +// +// deviceID[IN]: Device ID to match with the TB3 Bus +// +// Return Value: +// +// true: When the passed Dev ID match with TB3's BUS Device ID +// false: When the passed Dev ID is not matching with known TB3's +// BUS Device ID +//***************************************************************************** +EGPU_INLINE NvBool isTB3DeviceID(NvU16 deviceID) +{ + NvU32 index; + NvU16 tb3DeviceIDList[]={ BUS_DEVICE_ID_TB3_ALPINE_RIDGE_01, + BUS_DEVICE_ID_TB3_02, + BUS_DEVICE_ID_TB3_03, + BUS_DEVICE_ID_TB3_04, + BUS_DEVICE_ID_TB3_05, + BUS_DEVICE_ID_TB3_06, + BUS_DEVICE_ID_TB3_07, + BUS_DEVICE_ID_TB3_08, + BUS_DEVICE_ID_TB3_09, + BUS_DEVICE_ID_TB3_10, + BUS_DEVICE_ID_ICELAKE_TB3_01, + BUS_DEVICE_ID_ICELAKE_TB3_02, + BUS_DEVICE_ID_ICELAKE_TB3_03, + BUS_DEVICE_ID_ICELAKE_TB3_04, + BUS_DEVICE_ID_ICELAKE_TB3_05, + BUS_DEVICE_ID_ICELAKE_TB3_06, + BUS_DEVICE_ID_TIGERLAKE_TB3_01, + BUS_DEVICE_ID_TIGERLAKE_TB3_02, + BUS_DEVICE_ID_TIGERLAKE_TB3_03, + BUS_DEVICE_ID_TIGERLAKE_TB3_04, + BUS_DEVICE_ID_TIGERLAKE_TB3_05, + BUS_DEVICE_ID_TIGERLAKE_TB3_06, + BUS_DEVICE_ID_TIGERLAKE_TB3_07, + BUS_DEVICE_ID_TIGERLAKE_TB3_08, + BUS_DEVICE_ID_TIGERLAKE_TB3_09, + BUS_DEVICE_ID_TIGERLAKE_TB3_10 + }; + for (index = 0; index < (sizeof(tb3DeviceIDList)/sizeof(NvU16)); index++) + { + if (deviceID == tb3DeviceIDList[index]) + { + return NV_TRUE; + } + } + return NV_FALSE; +} // isTB3DeviceID + +//***************************************************************************** +// Function: isTB2DeviceID +// +// Routine Description: +// +// Function to match the specified Device ID with the known TB2 BUS's +// device IDs. +// +// Arguments: +// +// deviceID[IN]: Device ID to match with the TB2 Bus +// +// Return Value: +// +// true: When the passed Dev ID match with TB2's BUS Device ID +// false: When the passed Dev ID is not matching with known TB2's +// BUS Device ID +//***************************************************************************** +EGPU_INLINE NvBool isTB2DeviceID(NvU16 deviceID) +{ + NvU32 index; + NvU16 tb2DeviceIDList[]={ BUS_DEVICE_ID_TB2_FALCON_RIDGE_DSL5520_02, + BUS_DEVICE_ID_TB2_03, BUS_DEVICE_ID_TB2_04, + BUS_DEVICE_ID_TB2_05, BUS_DEVICE_ID_TB2_06, + BUS_DEVICE_ID_TB2_08, BUS_DEVICE_ID_TB2_09, + BUS_DEVICE_ID_TB2_10 + }; + for (index = 0; index < (sizeof(tb2DeviceIDList)/sizeof(NvU16)); index++) + { + if (deviceID == tb2DeviceIDList[index]) + { + return NV_TRUE; + } + } + return NV_FALSE; +} // isTB2DeviceID + +#ifdef __cplusplus +} +#endif +#endif //_NVEGPUCONFIG_H_ diff --git a/src/common/inc/nvlog_defs.h b/src/common/inc/nvlog_defs.h new file mode 100644 index 000000000..e0b3b415f --- /dev/null +++ b/src/common/inc/nvlog_defs.h @@ -0,0 +1,529 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVLOG_DEFS_H_ +#define _NVLOG_DEFS_H_ + +#include "nvtypes.h" +/******************* Common Debug & Trace Defines ***************************\ +* * +* Module: NVLOG_DEFS.H * +* * +\****************************************************************************/ + +#define NVLOG_MAX_DBG_MODULES 256 + +/********************************/ +/********* Structures *********/ +/********************************/ + +// Forward declaration, so it can be used in the function type definition. + +/** + * @brief Struct representing a buffer in NvLog + * + * All logging (Print, Regtrace, etc) use these buffers. + */ +typedef struct _NVLOG_BUFFER NVLOG_BUFFER; + + +/** + * @brief Type of the 'push' function for NvLog buffers + * + * Function called whenever pushing something to an NvLog buffer + */ +typedef NvBool (*NVLOG_BUFFER_PUSHFUNC) (NVLOG_BUFFER *, NvU8 *, NvU32); + + + +/** + * @brief Fields specific to ring buffers + */ +typedef struct _NVLOG_RING_BUFFER_EXTRA_FIELDS +{ + /** How many times the ring buffer has overflown */ + NvU32 overflow; +} NVLOG_RING_BUFFER_EXTRA_FIELDS; + + +/** + * @brief Struct representing a buffer in NvLog + * + * All logging (Print, Regtrace, etc) use these buffers. + */ +struct _NVLOG_BUFFER +{ + /** Function to call when writing to this buffer */ + union + { + NVLOG_BUFFER_PUSHFUNC fn; + + // Pad this union to prevent struct size from varying between 32/64 bit platforms + NvP64 padding; + } push; + + /** Size of the buffer data section */ + NvU32 size; + /** Buffer tag, for easier identification in a dump */ + NvU32 tag; + /** Flags of the buffer, following NVLOG_BUFFER_FLAGS_* DRF's */ + NvU32 flags; + /** Position of the next available byte in the buffer */ + NvU32 pos; + /** Number of threads currently writing to this buffer */ + volatile NvS32 threadCount; + /** Specific buffer types will define their fields here */ + union + { + NVLOG_RING_BUFFER_EXTRA_FIELDS ring; + } extra; + /** Buffer data. */ + NvU8 data[1]; +}; + +#define NVLOG_MAX_BUFFERS_v11 16 +#define NVLOG_MAX_BUFFERS_v12 256 + +#if NVOS_IS_UNIX +#define NVLOG_MAX_BUFFERS NVLOG_MAX_BUFFERS_v12 +#define NVLOG_LOGGER_VERSION 12 // v1.2 +#else +#define NVLOG_MAX_BUFFERS NVLOG_MAX_BUFFERS_v11 +#define NVLOG_LOGGER_VERSION 11 // v1.1 +#endif // NVOS_IS_UNIX + + +// +// Due to this file's peculiar location, NvPort may or may not be includable +// This hack will go away when NvLog is moved into common/shared +// +#if NVOS_IS_MACINTOSH + +#if !PORT_IS_KERNEL_BUILD +typedef struct PORT_SPINLOCK PORT_SPINLOCK; +#else +#include "nvport/nvport.h" +#endif + +#elif !defined(PORT_IS_KERNEL_BUILD) +typedef struct PORT_SPINLOCK PORT_SPINLOCK; +#else +#include "nvport/nvport.h" +#endif + +/** + * @brief Information about the entire NvLog system + */ +typedef struct _NVLOG_LOGGER +{ + /** NvLog logger version */ + NvU32 version; + /** Logging buffers */ + NVLOG_BUFFER * pBuffers[NVLOG_MAX_BUFFERS]; + /** Index of the first unallocated buffer */ + NvU32 nextFree; + /** Total number of free buffer slots */ + NvU32 totalFree; + /** Lock for all buffer oprations */ + PORT_SPINLOCK* mainLock; +} NVLOG_LOGGER; +extern NVLOG_LOGGER NvLogLogger; + +// +// Buffer flags +// + +// Logging to this buffer is disabled +#define NVLOG_BUFFER_FLAGS_DISABLED 0:0 +#define NVLOG_BUFFER_FLAGS_DISABLED_NO 0 +#define NVLOG_BUFFER_FLAGS_DISABLED_YES 1 + +#define NVLOG_BUFFER_FLAGS_TYPE 2:1 +#define NVLOG_BUFFER_FLAGS_TYPE_RING 0 +#define NVLOG_BUFFER_FLAGS_TYPE_NOWRAP 1 +#define NVLOG_BUFFER_FLAGS_TYPE_SYSTEMLOG 2 + +// Expand buffer when full +#define NVLOG_BUFFER_FLAGS_EXPANDABLE 3:3 +#define NVLOG_BUFFER_FLAGS_EXPANDABLE_NO 0 +#define NVLOG_BUFFER_FLAGS_EXPANDABLE_YES 1 + +// Allocate buffer in non paged memory +#define NVLOG_BUFFER_FLAGS_NONPAGED 4:4 +#define NVLOG_BUFFER_FLAGS_NONPAGED_NO 0 +#define NVLOG_BUFFER_FLAGS_NONPAGED_YES 1 + +// +// Type of buffer locking to use +// NONE - No locking performed, for buffers that are inherently single threaded +// STATE - Lock only during state change, do memory copying unlocked +// Don't use with tiny buffers that overflow every write or two. +// FULL - Keep everything locked for the full duration of the write +// +#define NVLOG_BUFFER_FLAGS_LOCKING 6:5 +#define NVLOG_BUFFER_FLAGS_LOCKING_NONE 0 +#define NVLOG_BUFFER_FLAGS_LOCKING_STATE 1 +#define NVLOG_BUFFER_FLAGS_LOCKING_FULL 2 + +// Store this buffer in OCA minidumps +#define NVLOG_BUFFER_FLAGS_OCA 7:7 +#define NVLOG_BUFFER_FLAGS_OCA_NO 0 +#define NVLOG_BUFFER_FLAGS_OCA_YES 1 + +// Buffer format (not included in registry key) +#define NVLOG_BUFFER_FLAGS_FORMAT 10:8 +#define NVLOG_BUFFER_FLAGS_FORMAT_PRINTF 0 +#define NVLOG_BUFFER_FLAGS_FORMAT_LIBOS_LOG 1 +#define NVLOG_BUFFER_FLAGS_FORMAT_MEMTRACK 2 + +// Buffer GPU index +#define NVLOG_BUFFER_FLAGS_GPU_INSTANCE 31:24 + +typedef NvU32 NVLOG_BUFFER_HANDLE; + +// +// Utility macros +// +#define NVLOG_IS_RING_BUFFER(pBuffer) \ + FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _RING, pBuffer->flags) +#define NVLOG_IS_NOWRAP_BUFFER(pBuffer) \ + FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _NOWRAP, pBuffer->flags) + +#define NVLOG_PRINT_BUFFER_SIZE(pBuffer) ((pBuffer)->size) +#define NVLOG_BUFFER_SIZE(pBuffer) \ + (NV_OFFSETOF(NVLOG_BUFFER, data) + NVLOG_PRINT_BUFFER_SIZE(pBuffer)) + +/********************************/ +/********* Filtering **********/ +/********************************/ +// TODO - Remove all this once tools are updated + +#define NVLOG_FILTER_INVALID (~0) + +#define NVLOG_FILTER_VALUE_SIMPLE_NO 0x0 +#define NVLOG_FILTER_VALUE_SIMPLE_YES 0x1 +#define NVLOG_FILTER_VALUE_EXPLICIT_NO 0x2 +#define NVLOG_FILTER_VALUE_EXPLICIT_YES 0x3 + +#define NVLOG_FILTER_PRINT_LEVEL_REGTRACE 1:0 +#define NVLOG_FILTER_PRINT_LEVEL_INFO 3:2 +#define NVLOG_FILTER_PRINT_LEVEL_NOTICE 5:4 +#define NVLOG_FILTER_PRINT_LEVEL_WARNINGS 7:6 +#define NVLOG_FILTER_PRINT_LEVEL_ERRORS 9:8 +#define NVLOG_FILTER_PRINT_LEVEL_HW_ERROR 11:10 +#define NVLOG_FILTER_PRINT_LEVEL_FATAL 13:12 + +#define NVLOG_FILTER_PRINT_BUFFER 18:14 +#define NVLOG_FILTER_REGTRACE_BUFFER 22:19 + +#define NVLOG_FILTER_REGTRACE_LOG_READ 25:23 +#define NVLOG_FILTER_REGTRACE_LOG_WRITE 27:26 +#define NVLOG_FILTER_REGTRACE_BREAK_READ 29:28 +#define NVLOG_FILTER_REGTRACE_BREAK_WRITE 31:30 + +#define NVLOG_FILTER_VALUE_IS_NO(val) ((val & 0x1) == 0) +#define NVLOG_FILTER_VALUE_IS_YES(val) (val & 0x1) +#define NVLOG_FILTER_PRINT_GET_VALUE(level, num) ((num >> (level*2)) & 0x3) + +/** + * @brief Type representing a value of a given 16bit range. + */ +typedef struct _NVLOG_RANGE_16 +{ + NvU16 low; + NvU16 high; + NvU32 value; +} NVLOG_RANGE_16; + + +/** + * @brief Type representing a value of a given 32bit range. + */ +typedef struct _NVLOG_RANGE_32 +{ + NvU32 low; + NvU32 high; + NvU32 value; +} NVLOG_RANGE_32; + +// +// Maximum number of files that have a filter assigned to them. +// +#define NVLOG_MAX_FILES 1 +// +// Maximum number of line rules (both single line and range) allowed per file +// +#define NVLOG_FILELINE_FILTER_MAX_RANGES 1 + +/** + * @brief Internal type for NVLOG_FILELINE_FILTER. + * + * Contains filtering info for a single file. + */ +typedef struct _NVLOG_FILELINE_FILTER_FILEHASH +{ + /** ID of the file (24bit MD5) */ + NvU32 fileId; + /** Number of elements in the array 'ranges' */ + NvU32 numElems; + /** Value to use if the given value isn't found in the range array */ + NvU32 defaultValue; + /** Array of ranges representing lines in the file */ + NVLOG_RANGE_16 ranges[NVLOG_FILELINE_FILTER_MAX_RANGES]; +} NVLOG_FILELINE_FILTER_FILEHASH; + +/** + * @brief Filter that contains rules that depend on the file and line number. + */ +typedef struct _NVLOG_FILELINE_FILTER +{ + /** Number of elements in the fileHash array */ + NvU32 numFiles; + /** Value to use if a given file isn't found */ + NvU32 defaultValue; + /** Array of file entries, ordered as a hash table */ + NVLOG_FILELINE_FILTER_FILEHASH fileHash[NVLOG_MAX_FILES]; +} NVLOG_FILELINE_FILTER; + +/********************************/ +/********* Print Logger *********/ +/********************************/ + +#define NVLOG_PRINT_LOGGER_VERSION 11 // v1.1 +// Max buffers cannot be over 32. +#define NVLOG_PRINT_MAX_BUFFERS 8 + +#define NVLOG_PRINT_BUFFER_PRIMARY 1 +#define NVLOG_PRINT_BUFFER_SECONDARY 2 +#define NVLOG_PRINT_BUFFER_SYSTEMLOG 3 + +#define NVLOG_PRINT_DESC1_FILEID 23:0 +#define NVLOG_PRINT_DESC1_GPUID 28:24 // 2^5 = 32 possible +#define NVLOG_PRINT_DESC1_MAGIC 31:29 +#define NVLOG_PRINT_DESC1_MAGIC_VALUE 5 + +#define NVLOG_PRINT_DESC2_LINEID 15:0 +#define NVLOG_PRINT_DESC2_GROUPID 17:16 +#define NVLOG_PRINT_DESC2_GROUPID_RM 0 +#define NVLOG_PRINT_DESC2_GROUPID_PMU 1 +#define NVLOG_PRINT_DESC2_OPT_DATA_COUNT 24:18 // number of dwords +#define NVLOG_PRINT_DESC2_OPT_DATA_COUNT_MAX 0x7F +#define NVLOG_PRINT_DESC2_RESERVED 28:25 +#define NVLOG_PRINT_DESC2_MAGIC 31:29 +#define NVLOG_PRINT_DESC2_MAGIC_VALUE 6 + +#define NVLOG_UNKNOWN_GPU_INSTANCE 0x1f + +#define NVLOG_PRINT_MODULE_FILTER_VALUE 1:0 +#define NVLOG_PRINT_MODULE_FILTER_BUFFER 6:2 +#define NVLOG_PRINT_MODULE_FILTER_ENABLED 7:7 + +// +// Regkey fields - These are copied directly from nvRmReg.h +// A copy is necessary as these might be needed on systems that don't +// have nvRmReg.h, such as DVS builds for NvWatch +// +#ifndef NV_REG_STR_RM_NVLOG +#define NV_REG_STR_RM_NVLOG "RMNvLog" +#define NV_REG_STR_RM_NVLOG_BUFFER_FLAGS 7:0 +#define NV_REG_STR_RM_NVLOG_BUFFER_SIZE 23:8 +#define NV_REG_STR_RM_NVLOG_BUFFER_SIZE_DEFAULT ((NVOS_IS_WINDOWS||NVOS_IS_MACINTOSH)?8:250) +#define NV_REG_STR_RM_NVLOG_BUFFER_SIZE_DISABLE 0 +#define NV_REG_STR_RM_NVLOG_RUNTIME_LEVEL 28:25 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP 30:29 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_NONE 0 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_32 1 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_64 2 +#define NV_REG_STR_RM_NVLOG_TIMESTAMP_32_DIFF 3 +#define NV_REG_STR_RM_NVLOG_INITED 31:31 +#define NV_REG_STR_RM_NVLOG_INITED_NO 0 +#define NV_REG_STR_RM_NVLOG_INITED_YES 1 +#endif // NV_REG_STR_RM_NVLOG + + +// +// Arg types: +// 0: Special meaning. End of argument list. +// 1: d, u, x, X, i, o - Integer type +// 2: lld, llu, llx, llX, lli, llo - Long long integer type +// 3: s - string type (size is 0) +// 4: p - pointer type +// 5: c - char type +// 6: f, g, e, F, G, E - floating point type +// 7-14: Unused at the moment, default value is 0 +// 15: Special meaning. Error value - unsupported type. +// +#define NVLOG_PRINT_MAX_ARG_TYPES 0x10 +#define NVLOG_PRINT_ARG_TYPE_ARGLIST_END 0x0 +#define NVLOG_PRINT_ARG_TYPE_INT 0x1 +#define NVLOG_PRINT_ARG_TYPE_LONGLONG 0x2 +#define NVLOG_PRINT_ARG_TYPE_STRING 0x3 +#define NVLOG_PRINT_ARG_TYPE_POINTER 0x4 +#define NVLOG_PRINT_ARG_TYPE_CHAR 0x5 +#define NVLOG_PRINT_ARG_TYPE_FLOAT 0x6 +#define NVLOG_PRINT_ARG_TYPE_ERROR 0xf + + +/** + * @brief Signature of the database required to decode the print logs + * + * The sig1-sig3 values are generated randomly at compile time. + */ +typedef struct _NVLOG_DB_SIGNATURE +{ + NvU32 timestamp; + NvU32 sig1; + NvU32 sig2; + NvU32 sig3; +} NVLOG_DB_SIGNATURE; + +/** + * @brief Filter that contains all rules used to filter DBG_PRINTF calls + */ +typedef struct _NVLOG_PRINT_FILTER +{ + /** Same file:line filter is shared with the Regtrace system */ + NVLOG_FILELINE_FILTER *pFileLineFilter; + /** Filter based on debug levels. Uses NVLOG_FILTER_PRINT_LEVEL_* DRF's */ + NvU32 runtimePrintLevelFilter; + /** Filter based on debug modules. Uses NVLOG_PRINT_MODULE_FILTER_* DRF's */ + NvU8 runtimePrintModuleFilter[NVLOG_MAX_DBG_MODULES]; +} NVLOG_PRINT_FILTER; + + +/** + * @brief Enum representing all possible argument types to DBG_PRINTF + */ +typedef enum _NVLOG_ARGTYPE +{ + NVLOG_ARGTYPE_NONE, + NVLOG_ARGTYPE_INT, + NVLOG_ARGTYPE_LONG_LONG_INT, + NVLOG_ARGTYPE_STRING, + NVLOG_ARGTYPE_POINTER, + NVLOG_ARGTYPE_FLOAT, + NVLOG_ARGTYPE__COUNT +} NVLOG_ARGTYPE; + +/** + * @brief General info about the NvLog Print system + */ +typedef struct _NVLOG_PRINT_LOGGER +{ + /** NvLog print logger version */ + NvU32 version; + /** Runtime argument sizes (16 different arglist values) */ + NvU8 runtimeSizes[NVLOG_PRINT_MAX_ARG_TYPES]; + /** Database signature for decoding */ + NVLOG_DB_SIGNATURE signature; + /** Filter buffer for print statements */ + NVLOG_PRINT_FILTER filter; + /** Flags for all NvLog print buffers */ + NvU32 flags; + /** Buffer indices for all nvlog buffers. buffers[1] is default. */ + NvU32 buffers[NVLOG_PRINT_MAX_BUFFERS]; + /** Initialized flag, set to true after nvlogPrintInit has executed */ + NvBool initialized; + /** Paused flag, set to true after nvlogPrintInit has executed */ + NvBool paused; +} NVLOG_PRINT_LOGGER; +extern NVLOG_PRINT_LOGGER NvLogPrintLogger; + +#define NVLOG_PRINT_BUFFER_TAG(_i) NvU32_BUILD('t','r','p','0' + (_i)) + +/********************************/ +/********** Regtrace **********/ +/********************************/ + +#define NVLOG_REGTRACE_LOGGER_VERSION 10 // v1.0 +#define NVLOG_REGTRACE_MAX_BUFFERS 4 + +#define NVLOG_REGTRACE_READ 0 +#define NVLOG_REGTRACE_WRITE 1 + +#define NVLOG_REGTRACE_DESC1_FILEID NVLOG_PRINT_DESC1_FILEID +#define NVLOG_REGTRACE_DESC1_GPUID NVLOG_PRINT_DESC1_GPUID +#define NVLOG_REGTRACE_DESC1_MAGIC NVLOG_PRINT_DESC1_MAGIC +#define NVLOG_REGTRACE_DESC1_MAGIC_VALUE (NVLOG_PRINT_DESC1_MAGIC_VALUE-1) + +#define NVLOG_REGTRACE_DESC2_LINEID 15:0 +#define NVLOG_REGTRACE_DESC2_READWRITE 16:16 +#define NVLOG_REGTRACE_DESC2_READWRITE_READ NVLOG_REGTRACE_READ +#define NVLOG_REGTRACE_DESC2_READWRITE_WRITE NVLOG_REGTRACE_WRITE +#define NVLOG_REGTRACE_DESC2_REGSIZE 18:17 +#define NVLOG_REGTRACE_DESC2_REGSIZE_8 0 +#define NVLOG_REGTRACE_DESC2_REGSIZE_16 1 +#define NVLOG_REGTRACE_DESC2_REGSIZE_32 2 +#define NVLOG_REGTRACE_DESC2_REGSIZE_64 3 +#define NVLOG_REGTRACE_DESC2_THREADID 28:19 +#define NVLOG_REGTRACE_DESC2_MAGIC 31:29 +#define NVLOG_REGTRACE_DESC2_MAGIC_VALUE 3 + +/** + * @brief Single entry in an NvLog Regtrace buffer. + */ +typedef struct _NVLOG_REGTRACE_RECORD +{ + /** Uses NVLOG_REGTRACE_DESC1_* DRF's */ + NvU32 desc1; + /** Uses NVLOG_REGTRACE_DESC1_* DRF's */ + NvU32 desc2; + /** Address of the register being accessed */ + NvU32 address; + /** Value that was read/written */ + NvU32 value; +} NVLOG_REGTRACE_RECORD; + + + +#define NVLOG_REGTRACE_FILTER_MAX_RANGES 256 + +// Regtrace shares the file:line filter with print + + +/** + * @brief Filter that contains all rules used to filter register access logging + */ +typedef struct _NVLOG_REGTRACE_FILTER +{ + /** Number of elements in the 'ranges' array */ + NvU32 numRanges; + /** File:line based filter. Shared with NvLog print system */ + NVLOG_FILELINE_FILTER *pFileLineFilter; + /** Range array for filtering based on register addresses */ + NVLOG_RANGE_32 ranges[NVLOG_REGTRACE_FILTER_MAX_RANGES]; +} NVLOG_REGTRACE_FILTER; + +/** + * @brief General info about the NvLog Regtrace system + */ +typedef struct _NVLOG_REGTRACE_LOGGER +{ + /** NvLog regtrace logger version */ + NvU32 version; + /** Filter buffer for regtrace statements */ + NVLOG_REGTRACE_FILTER filter; + /** Buffer indices for all NvLog buffers. First element is default buffer */ + NvU32 buffers[NVLOG_REGTRACE_MAX_BUFFERS]; +} NVLOG_REGTRACE_LOGGER; + +#endif // _NVLOG_DEFS_H_ diff --git a/src/common/inc/nvlog_inc.h b/src/common/inc/nvlog_inc.h new file mode 100644 index 000000000..c40c64fd2 --- /dev/null +++ b/src/common/inc/nvlog_inc.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2016 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +// +// This file must not have include guards, it is supposed to be included +// multiple times - Once in a precompiled header, once through noprecomp.h +// + +// WAR for a GCC precompiled headers problem +#if !defined(NV_RM_PRECOMPILED_HEADER) +#include "nvlog_inc2.h" + +// +// If noprecomp is not included, this will not expand and will result in an +// undefined identifier. Hopefully, the meaningful name will hint at the +// underlying problem. +// +#define ___please_include_noprecomp_h___ + +#endif diff --git a/src/common/inc/nvlog_inc2.h b/src/common/inc/nvlog_inc2.h new file mode 100644 index 000000000..7f10150c2 --- /dev/null +++ b/src/common/inc/nvlog_inc2.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013,2016-2017,2020-2020 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVLOG_INC2_H_ +#define _NVLOG_INC2_H_ +// +// Include the auto-generated g_$(filename)-nvlog.h header. The file contains +// information about the trace statements that was pulled out by the NvLog preprocessor. +// NVLOG_INCLUDE is defined by make at compile time, for every source file. +// +// The four lines of macros is some trickiness needed to make it work. +// +#if (defined(NVLOG_ENABLED) || defined(NV_MODS)) && defined(NVLOG_INCLUDE) && !defined(NVLOG_PARSING) +#if NVLOG_ENABLED || defined(NV_MODS) + +#ifndef NVLOG_FILEID // Acts as an include guard +#define NVLOG_INCLUDE3(a) #a +#define NVLOG_INCLUDE2(a) NVLOG_INCLUDE3 a +#define NVLOG_INCLUDE1 NVLOG_INCLUDE2((NVLOG_INCLUDE)) +#include NVLOG_INCLUDE1 +#endif // NVLOG_FILEID + +#endif // NVLOG_ENABLED +#endif // defined(NVLOG_ENABLED) && defined(NVLOG_INCLUDE) + + +#endif // _NVLOG_INC2_H_ diff --git a/src/common/inc/prbrt.h b/src/common/inc/prbrt.h new file mode 100644 index 000000000..b7f61504f --- /dev/null +++ b/src/common/inc/prbrt.h @@ -0,0 +1,278 @@ +/* + * Lightweight protocol buffers. + * + * Based on code taken from + * https://code.google.com/archive/p/lwpb/source/default/source + * + * The code there is licensed as Apache 2.0. However, NVIDIA has received the + * code from the original author under MIT license terms. + * + * + * Copyright 2009 Simon Kallweit + * Copyright 2010-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains the definitions used by the code generated + * by the protobuf compiler. + */ + +#ifndef __PRBRT_H__ +#define __PRBRT_H__ + +// Maximum depth of message embedding +#ifndef PRB_MAX_DEPTH +#define PRB_MAX_DEPTH 8 +#endif + +// Maximum number of required fields in a message +#ifndef PRB_MAX_REQUIRED_FIELDS +#define PRB_MAX_REQUIRED_FIELDS 16 +#endif + +// Provide enum names as strings +#ifndef PRB_ENUM_NAMES +#define PRB_ENUM_NAMES 0 +#endif + +#if PRB_ENUM_NAMES +#define PRB_MAYBE_ENUM_NAME(n) n, +#else +#define PRB_MAYBE_ENUM_NAME(n) +#endif + +// Provide field names as strings +#ifndef PRB_FIELD_NAMES +#define PRB_FIELD_NAMES 0 +#endif + +#if PRB_FIELD_NAMES +#define PRB_MAYBE_FIELD_NAME(n) n, +#else +#define PRB_MAYBE_FIELD_NAME(n) +#endif + +// Provide field default values +#ifndef PRB_FIELD_DEFAULTS +#define PRB_FIELD_DEFAULTS 0 +#endif + +#if PRB_FIELD_DEFAULTS +#define PRB_MAYBE_FIELD_DEFAULT_DEF(n) n +#define PRB_MAYBE_FIELD_DEFAULT(n) n, +#else +#define PRB_MAYBE_FIELD_DEFAULT_DEF(n) +#define PRB_MAYBE_FIELD_DEFAULT(n) +#endif + +// Provide message names as strings +#ifndef PRB_MESSAGE_NAMES +#define PRB_MESSAGE_NAMES 0 +#endif + +#if PRB_MESSAGE_NAMES +#define PRB_MAYBE_MESSAGE_NAME(n) n, +#else +#define PRB_MAYBE_MESSAGE_NAME(n) +#endif + +// Provide method names as strings +#ifndef PRB_METHOD_NAMES +#define PRB_METHOD_NAMES 0 +#endif + +#if PRB_METHOD_NAMES +#define PRB_MAYBE_METHOD_NAME(n) n, +#else +#define PRB_MAYBE_METHOD_NAME(n) +#endif + +// Provide service names as strings +#ifndef PRB_SERVICE_NAMES +#define PRB_SERVICE_NAMES 0 +#endif + +#if PRB_SERVICE_NAMES +#define PRB_MAYBE_SERVICE_NAME(n) n, +#else +#define PRB_MAYBE_SERVICE_NAME(n) +#endif + +// Field labels +#define PRB_REQUIRED 0 +#define PRB_OPTIONAL 1 +#define PRB_REPEATED 2 + +// Field value types +#define PRB_DOUBLE 0 +#define PRB_FLOAT 1 +#define PRB_INT32 2 +#define PRB_INT64 3 +#define PRB_UINT32 4 +#define PRB_UINT64 5 +#define PRB_SINT32 6 +#define PRB_SINT64 7 +#define PRB_FIXED32 8 +#define PRB_FIXED64 9 +#define PRB_SFIXED32 10 +#define PRB_SFIXED64 11 +#define PRB_BOOL 12 +#define PRB_ENUM 13 +#define PRB_STRING 14 +#define PRB_BYTES 15 +#define PRB_MESSAGE 16 + +// Field flags +#define PRB_HAS_DEFAULT (1 << 0) +#define PRB_IS_PACKED (1 << 1) +#define PRB_IS_DEPRECATED (1 << 2) + +typedef struct +{ + unsigned int label : 2; + unsigned int typ : 6; + unsigned int flags : 8; +} PRB_FIELD_OPTS; + +// Protocol buffer wire types +typedef enum +{ + WT_VARINT = 0, + WT_64BIT = 1, + WT_STRING = 2, + WT_32BIT = 5 +} WIRE_TYPE; + +// Protocol buffer wire values +typedef union +{ + NvU64 varint; + NvU64 int64; + struct { + NvU64 len; + const void *data; + } string; + NvU32 int32; +} WIRE_VALUE; + +typedef struct +{ + char *str; + NvU32 len; +} PRB_VALUE_STRING; + +typedef struct +{ + NvU8 *data; + NvU32 len; +} PRB_VALUE_BYTES; + +typedef struct +{ + void *data; + NvU32 len; +} PRB_VALUE_MESSAGE; + +typedef union +{ + NvF64 double_; + NvF32 float_; + NvS32 int32; + NvS64 int64; + NvU32 uint32; + NvU64 uint64; + NvBool bool_; + PRB_VALUE_STRING string; + PRB_VALUE_BYTES bytes; + PRB_VALUE_MESSAGE message; + int enum_; + int null; +} PRB_VALUE; + +typedef struct +{ + int value; +#if PRB_ENUM_NAMES + const char *name; +#endif +} PRB_ENUM_MAPPING; + +typedef struct +{ + const PRB_ENUM_MAPPING *mappings; + NvU32 count; +#if PRB_ENUM_NAMES + const char *name; +#endif +} PRB_ENUM_DESC; + +struct PRB_MSG_DESC; + +//* Protocol buffer field descriptor +typedef struct PRB_FIELD_DESC +{ + NvU32 number; + PRB_FIELD_OPTS opts; + const struct PRB_MSG_DESC *msg_desc; + const PRB_ENUM_DESC *enum_desc; +#if PRB_FIELD_NAMES + const char *name; +#endif +#if PRB_FIELD_DEFAULTS + const PRB_VALUE *def; +#endif +} PRB_FIELD_DESC; + +//* Protocol buffer message descriptor +typedef struct PRB_MSG_DESC +{ + NvU32 num_fields; + const PRB_FIELD_DESC *fields; +#if PRB_MESSAGE_NAMES + const char *name; +#endif +} PRB_MSG_DESC; + +// Forward declaration +struct PRB_SERVICE_DESC; + +// Protocol buffer method descriptor +struct PRB_METHOD_DESC +{ + const struct PRB_SERVICE_DESC *service; + const PRB_MSG_DESC *req_desc; + const PRB_MSG_DESC *res_desc; +#if PRB_METHOD_NAMES + const char *name; +#endif +}; + +// Protocol buffer service descriptor +typedef struct PRB_SERVICE_DESC +{ + NvU32 num_methods; + const struct PRB_METHOD_DESC *methods; +#if PRB_SERVICE_NAMES + const char *name; +#endif +} PRB_SERVICE_DESC; + +#endif diff --git a/src/common/inc/rmosxfac.h b/src/common/inc/rmosxfac.h new file mode 100644 index 000000000..5ebfdaeeb --- /dev/null +++ b/src/common/inc/rmosxfac.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2003 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RMOSXFAC_H_ +#define _RMOSXFAC_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: RMOSXFAC.H * +* Declarations for common OS interface functions. * +* * +\***************************************************************************/ + +#ifdef __cplusplus +extern "C" { +#endif +extern NvS32 RmInitRm(void); +extern NvS32 RmDestroyRm(void); + +#ifdef __cplusplus +} +#endif + +#endif // _RMOSXFAC_H_ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_boot.h b/src/common/inc/swref/published/ampere/ga100/dev_boot.h new file mode 100644 index 000000000..5e1dfb690 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_boot.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_boot_h__ +#define __ga100_dev_boot_h__ +#define NV_PMC 0x00000fff:0x00000000 /* RW--D */ +#define NV_PMC_BOOT_0 0x00000000 /* R--4R */ +#define NV_PMC_ENABLE 0x00000200 /* RW-4R */ +#define NV_PMC_ENABLE_DEVICE(i) (i):(i) /* */ +#define NV_PMC_ENABLE_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_ENABLE_DEVICE_DISABLE 0x00000000 /* */ +#define NV_PMC_ENABLE_DEVICE_ENABLE 0x00000001 /* */ +#define NV_PMC_ENABLE_NVDEC 15:15 /* */ +#define NV_PMC_ENABLE_NVDEC_DISABLED 0x00000000 /* */ +#define NV_PMC_ENABLE_NVDEC_ENABLED 0x00000001 /* */ +#define NV_PMC_DEVICE_ENABLE(i) (0x000000600+(i)*4) /* RW-4A */ +#define NV_PMC_DEVICE_ENABLE__SIZE_1 1 /* */ +#define NV_PMC_DEVICE_ENABLE__PRIV_LEVEL_MASK 0x00000084 /* */ +#define NV_PMC_DEVICE_ENABLE_STATUS 31:0 /* RWIVF */ +#define NV_PMC_DEVICE_ENABLE_STATUS_DISABLE_ALL 0x00000000 /* RWI-V */ +#define NV_PMC_DEVICE_ENABLE_STATUS_BIT(i) (i):(i) /* */ +#define NV_PMC_DEVICE_ENABLE_STATUS_BIT__SIZE_1 32 /* */ +#define NV_PMC_DEVICE_ENABLE_STATUS_BIT_DISABLE 0x00000000 /* */ +#define NV_PMC_DEVICE_ENABLE_STATUS_BIT_ENABLE 0x00000001 /* */ +#endif // __ga100_dev_boot_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_bus.h b/src/common/inc/swref/published/ampere/ga100/dev_bus.h new file mode 100644 index 000000000..a1ee1c5db --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_bus.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef ga100_dev_nv_bus_h +#define ga100_dev_nv_bus_h + +#define NV_PBUS_SW_SCRATCH(i) (0x00001400+(i)*4) /* RW-4A */ +#define NV_PBUS_SW_SCRATCH__SIZE_1 64 /* */ +#define NV_PBUS_SW_SCRATCH_FIELD 31:0 /* RWIVF */ +#define NV_PBUS_SW_SCRATCH_FIELD_INIT 0x00000000 /* RWI-V */ + +#endif // ga100_dev_nv_bus_h diff --git a/src/common/inc/swref/published/ampere/ga100/dev_bus_addendum.h b/src/common/inc/swref/published/ampere/ga100/dev_bus_addendum.h new file mode 100644 index 000000000..15244b9ca --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_bus_addendum.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef ga100_dev_nv_bus_addendum_h +#define ga100_dev_nv_bus_addendum_h + +#define NV_PBUS_SW_SCRATCH1_SMC_MODE 15:15 +#define NV_PBUS_SW_SCRATCH1_SMC_MODE_OFF 0x00000000 +#define NV_PBUS_SW_SCRATCH1_SMC_MODE_ON 0x00000001 + +#endif // ga100_dev_nv_bus_addendum_h diff --git a/src/common/inc/swref/published/ampere/ga100/dev_ce.h b/src/common/inc/swref/published/ampere/ga100/dev_ce.h new file mode 100644 index 000000000..cb752cc65 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_ce.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_ce_h__ +#define __ga100_dev_ce_h__ +#define NV_CE_PCE_MAP 0x00104028 /* R--4R */ +#define NV_CE_PCE2LCE_CONFIG__SIZE_1 18 /* */ +#define NV_CE_PCE2LCE_CONFIG_PCE_ASSIGNED_LCE_NONE 0x0000000f /* RW--V */ +#define NV_CE_GRCE_CONFIG__SIZE_1 2 /* */ +#define NV_CE_GRCE_CONFIG_SHARED_LCE 3:0 /* RWIVF */ +#define NV_CE_GRCE_CONFIG_SHARED_LCE_NONE 0xf /* RW--V */ +#define NV_CE_GRCE_CONFIG_SHARED 30:30 /* RWIVF */ +#endif diff --git a/src/common/inc/swref/published/ampere/ga100/dev_ctrl.h b/src/common/inc/swref/published/ampere/ga100/dev_ctrl.h new file mode 100644 index 000000000..d5ff9fb49 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_ctrl.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_ctrl_h__ +#define __ga100_dev_ctrl_h__ +#define NV_CTRL_VF_DOORBELL_VECTOR 11:0 /* -WXUF */ +#define NV_CTRL_VF_DOORBELL_RUNLIST_ID 22:16 /* -WXUF */ +#endif // __ga100_dev_ctrl_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_falcon_v4.h b/src/common/inc/swref/published/ampere/ga100/dev_falcon_v4.h new file mode 100644 index 000000000..af02ce8f5 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_falcon_v4.h @@ -0,0 +1,124 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __ga100_dev_falcon_v4_h__ +#define __ga100_dev_falcon_v4_h__ + +#define NV_PFALCON_FALCON_IRQSCLR 0x00000004 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQSCLR_HALT 4:4 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_HALT_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN0 6:6 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN0_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN1 7:7 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN1_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSTAT 0x00000008 /* R--4R */ +#define NV_PFALCON_FALCON_IRQSTAT_HALT 4:4 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_HALT_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN0 6:6 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN0_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN1 7:7 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN1_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMSET 0x00000010 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQMCLR 0x00000014 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQMASK 0x00000018 /* R--4R */ +#define NV_PFALCON_FALCON_IRQDEST 0x0000001c /* RW-4R */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER(i) (0x000003e8+(i)*4) /* -W-4A */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER__SIZE_1 2 /* */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER_TRIGGER 0:0 /* -W-VF */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER_TRIGGER_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_MAILBOX0 0x00000040 /* RW-4R */ +#define NV_PFALCON_FALCON_MAILBOX1 0x00000044 /* RW-4R */ +#define NV_PFALCON_FALCON_OS 0x00000080 /* RW-4R */ +#define NV_PFALCON_FALCON_RM 0x00000084 /* RW-4R */ +#define NV_PFALCON_FALCON_DEBUGINFO 0x00000094 /* RW-4R */ +#define NV_PFALCON_FALCON_CPUCTL 0x00000100 /* RW-4R */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU 1:1 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU_FALSE 0x00000000 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_HALTED 4:4 /* R-XVF */ +#define NV_PFALCON_FALCON_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN 6:6 /* RWIVF */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS 0x00000130 /* -W-4R */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU 1:1 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU_FALSE 0x00000000 /* -W--V */ +#define NV_PFALCON_FALCON_BOOTVEC 0x00000104 /* RW-4R */ +#define NV_PFALCON_FALCON_HWCFG 0x00000108 /* R--4R */ +#define NV_PFALCON_FALCON_HWCFG_IMEM_SIZE 8:0 /* R--VF */ +#define NV_PFALCON_FALCON_HWCFG2 0x000000f4 /* R--4R */ +#define NV_PFALCON_FALCON_HWCFG2_RISCV 10:10 /* R--VF */ +#define NV_PFALCON_FALCON_HWCFG2_RISCV_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_DMACTL 0x0000010c /* RW-4R */ +#define NV_PFALCON_FALCON_DMACTL_REQUIRE_CTX 0:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMACTL_REQUIRE_CTX_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMACTL_DMEM_SCRUBBING 1:1 /* R--VF */ +#define NV_PFALCON_FALCON_DMACTL_DMEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMACTL_IMEM_SCRUBBING 2:2 /* R--VF */ +#define NV_PFALCON_FALCON_DMACTL_IMEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMATRFBASE 0x00000110 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFBASE_BASE 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFMOFFS 0x00000114 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFMOFFS_OFFS 23:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFCMD 0x00000118 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFCMD_FULL 0:0 /* R-XVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_FULL_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_DMATRFCMD_IDLE 1:1 /* R-XVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_IDLE_FALSE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMATRFCMD_SEC 3:2 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_IMEM 4:4 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_IMEM_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_IMEM_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_WRITE 5:5 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_WRITE_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_WRITE_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_SIZE 10:8 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_SIZE_256B 0x00000006 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_CTXDMA 14:12 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_SET_DMTAG 16:16 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_SET_DMTAG_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFFBOFFS 0x0000011c /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFFBOFFS_OFFS 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFBASE1 0x00000128 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFBASE1_BASE 8:0 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC(i) (0x00000180+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMC_OFFS 7:2 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_BLK 23:8 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_AINCW 24:24 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IMEMC_SECURE 28:28 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMD(i) (0x00000184+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMD_DATA 31:0 /* RW-VF */ +#define NV_PFALCON_FALCON_IMEMT(i) (0x00000188+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMT_TAG 15:0 /* RW-VF */ +#define NV_PFALCON_FALCON_DMEMC(i) (0x000001c0+(i)*8) /* RW-4A */ +#define NV_PFALCON_FALCON_DMEMC_OFFS 7:2 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_BLK 23:8 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_AINCW 24:24 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMD(i) (0x000001c4+(i)*8) /* RW-4A */ +#define NV_PFALCON_FALCON_DMEMD_DATA 31:0 /* RW-VF */ + +#endif // __ga100_dev_falcon_v4_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_fb.h b/src/common/inc/swref/published/ampere/ga100/dev_fb.h new file mode 100644 index 000000000..02879740e --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_fb.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_fb_h__ +#define __ga100_dev_fb_h__ +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR 0x00100C10 /* RW-4R */ +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT 8 /* */ +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_ADR_39_08 31:0 /* RWIVF */ +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_ADR_39_08_INIT 0x00000000 /* RWI-V */ +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI 0x00100C40 /* RW-4R */ +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI_MASK 0x7F /* */ +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI_ADR_63_40 23:0 /* RWIVF */ +#define NV_PFB_FBHUB_POISON_INTR_VECTOR 0x00100A24 /* R--4R */ +#define NV_PFB_FBHUB_POISON_INTR_VECTOR_HW 7:0 /* R-IVF */ +#define NV_PFB_FBHUB_POISON_INTR_VECTOR_HW_INIT 135 /* R-I-V */ +#define NV_PFB_PRI_MMU_LOCK_CFG_PRIV_LEVEL_MASK 0x001FA7C8 /* RW-4R */ +#define NV_PFB_PRI_MMU_LOCK_CFG_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0 0:0 /* */ +#define NV_PFB_PRI_MMU_LOCK_CFG_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_PFB_PRI_MMU_LOCK_CFG_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_PFB_PRI_MMU_LOCK_ADDR_LO 0x001FA82C /* RW-4R */ +#define NV_PFB_PRI_MMU_LOCK_ADDR_LO__PRIV_LEVEL_MASK 0x001FA7C8 /* */ +#define NV_PFB_PRI_MMU_LOCK_ADDR_LO_VAL 31:4 /* RWEVF */ +#define NV_PFB_PRI_MMU_LOCK_ADDR_LO_ALIGNMENT 0x0000000c /* */ +#define NV_PFB_PRI_MMU_LOCK_ADDR_HI 0x001FA830 /* RW-4R */ +#define NV_PFB_PRI_MMU_LOCK_ADDR_HI_VAL 31:4 /* RWEVF */ +#define NV_PFB_PRI_MMU_LOCK_ADDR_HI_ALIGNMENT 0x0000000c /* */ +#endif // __ga100_dev_fb_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_fbif_v4.h b/src/common/inc/swref/published/ampere/ga100/dev_fbif_v4.h new file mode 100644 index 000000000..bf9f60413 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_fbif_v4.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_fbif_v4_h__ +#define __ga100_dev_fbif_v4_h__ + +#define NV_PFALCON_FBIF_TRANSCFG(i) (0x00000000+(i)*4) /* RW-4A */ +#define NV_PFALCON_FBIF_TRANSCFG__SIZE_1 8 /* */ +#define NV_PFALCON_FBIF_TRANSCFG_TARGET 1:0 /* RWIVF */ +#define NV_PFALCON_FBIF_TRANSCFG_TARGET_COHERENT_SYSMEM 0x00000001 /* R---V */ +#define NV_PFALCON_FBIF_TRANSCFG_MEM_TYPE 2:2 /* RWIVF */ +#define NV_PFALCON_FBIF_TRANSCFG_MEM_TYPE_PHYSICAL 0x00000001 /* R---V */ +#define NV_PFALCON_FBIF_CTL 0x00000024 /* RW-4R */ +#define NV_PFALCON_FBIF_CTL_ALLOW_PHYS_NO_CTX 7:7 /* RWIVF */ +#define NV_PFALCON_FBIF_CTL_ALLOW_PHYS_NO_CTX_ALLOW 0x00000001 /* RW--V */ + +#endif // __ga100_dev_fbif_v4_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_fuse.h b/src/common/inc/swref/published/ampere/ga100/dev_fuse.h new file mode 100644 index 000000000..6931a44c9 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_fuse.h @@ -0,0 +1,133 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __ga100_dev_fuse_h__ +#define __ga100_dev_fuse_h__ + +#define NV_FUSE_OPT_SECURE_GSP_DEBUG_DIS 0x0082074C /* RW-4R */ +#define NV_FUSE_OPT_SECURE_GSP_DEBUG_DIS_DATA 0:0 /* RWIVF */ +#define NV_FUSE_OPT_SECURE_GSP_DEBUG_DIS_DATA_NO 0x00000000 /* RW--V */ +#define NV_FUSE_OPT_SECURE_GSP_DEBUG_DIS_DATA_YES 0x00000001 /* RW--V */ + +#define NV_FUSE_OPT_NVDEC_DISABLE 0x00820378 /* RW-4R */ +#define NV_FUSE_OPT_NVDEC_DISABLE_DATA 4:0 /* RWIVF */ +#define NV_FUSE_OPT_NVDEC_DISABLE_DATA_INIT 0x00000000 /* RWI-V */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION 0x00824100 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE2_VERSION 0x00824104 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE2_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE3_VERSION 0x00824108 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE3_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE4_VERSION 0x0082410C /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE4_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE5_VERSION 0x00824110 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE5_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE6_VERSION 0x00824114 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE6_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE7_VERSION 0x00824118 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE7_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE8_VERSION 0x0082411C /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE8_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE9_VERSION 0x00824120 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE9_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE10_VERSION 0x00824124 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE10_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE11_VERSION 0x00824128 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE11_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE12_VERSION 0x0082412C /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE12_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE13_VERSION 0x00824130 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE13_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE14_VERSION 0x00824134 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE14_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE15_VERSION 0x00824138 /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE15_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE16_VERSION 0x0082413C /* RW-4R */ +#define NV_FUSE_OPT_FPF_NVDEC_UCODE16_VERSION_DATA 15:0 /* RWIVF */ + +#define NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION 0x00824140 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE2_VERSION 0x00824144 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE2_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE3_VERSION 0x00824148 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE3_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE4_VERSION 0x0082414C /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE4_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE5_VERSION 0x00824150 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE5_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE6_VERSION 0x00824154 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE6_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE7_VERSION 0x00824158 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE7_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE8_VERSION 0x0082415C /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE8_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE9_VERSION 0x00824160 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE9_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE10_VERSION 0x00824164 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE10_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE11_VERSION 0x00824168 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE11_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE12_VERSION 0x0082416C /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE12_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE13_VERSION 0x00824170 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE13_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE14_VERSION 0x00824174 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE14_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE15_VERSION 0x00824178 /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE15_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE16_VERSION 0x0082417C /* RW-4R */ +#define NV_FUSE_OPT_FPF_SEC2_UCODE16_VERSION_DATA 15:0 /* RWIVF */ + +#define NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION 0x008241C0 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE2_VERSION 0x008241C4 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE2_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE3_VERSION 0x008241C8 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE3_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE4_VERSION 0x008241CC /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE4_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE5_VERSION 0x008241D0 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE5_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE6_VERSION 0x008241D4 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE6_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE7_VERSION 0x008241D8 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE7_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE8_VERSION 0x008241DC /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE8_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE9_VERSION 0x008241E0 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE9_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE10_VERSION 0x008241E4 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE10_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE11_VERSION 0x008241E8 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE11_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE12_VERSION 0x008241EC /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE12_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE13_VERSION 0x008241F0 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE13_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE14_VERSION 0x008241F4 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE14_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE15_VERSION 0x008241F8 /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE15_VERSION_DATA 15:0 /* RWIVF */ +#define NV_FUSE_OPT_FPF_GSP_UCODE16_VERSION 0x008241FC /* RW-4R */ +#define NV_FUSE_OPT_FPF_GSP_UCODE16_VERSION_DATA 15:0 /* RWIVF */ + +#endif // __ga100_dev_fuse_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_mmu.h b/src/common/inc/swref/published/ampere/ga100/dev_mmu.h new file mode 100644 index 000000000..c7ee3e0d1 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_mmu.h @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_mmu_h__ +#define __ga100_dev_mmu_h__ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+20+11):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x09 /* R---V */ +#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0x0F /* R---V */ +#endif // __ga100_dev_mmu_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_nv_xve.h b/src/common/inc/swref/published/ampere/ga100/dev_nv_xve.h new file mode 100644 index 000000000..181c5b834 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_nv_xve.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_nv_xve_h__ +#define __ga100_dev_nv_xve_h__ +#define NV_PCFG 0x00088FFF:0x00088000 /* RW--D */ +#define NV_XVE_LINK_CONTROL_STATUS 0x00000088 /* RW-4R */ +#define NV_XVE_LINK_CONTROL_STATUS_LINK_SPEED 19:16 /* R--VF */ +#endif diff --git a/src/common/inc/swref/published/ampere/ga100/dev_nv_xve_addendum.h b/src/common/inc/swref/published/ampere/ga100/dev_nv_xve_addendum.h new file mode 100644 index 000000000..b49a496f2 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_nv_xve_addendum.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_nv_xve_addendum_h__ +#define __ga100_dev_nv_xve_addendum_h__ + +// +// Extra config bits that can be emulated by the hypervisor for passthrough. +// This offset is unused in HW and HW returns 0x0 on read. +// +#define NV_XVE_PASSTHROUGH_EMULATED_CONFIG 0xE8 + +// +// On GA100, we need to be able to detect the case where the GPU is running at +// gen4, but the root port is at gen3. On baremetal, we just check the root +// port directly, but for passthrough root port is commonly completely hidden +// or fake. To handle this case we support the hypervisor explicitly +// communicating the speed to us through emulated config space. The +// ROOT_PORT_SPEED field follows the usual link speed encoding with the +// numerical value matching the gen speed, i.e. gen3 is 0x3. +// See bug 2927491 for more details. +// +#define NV_XVE_PASSTHROUGH_EMULATED_CONFIG_ROOT_PORT_SPEED 3:0 + +#endif diff --git a/src/common/inc/swref/published/ampere/ga100/dev_nvdec_addendum.h b/src/common/inc/swref/published/ampere/ga100/dev_nvdec_addendum.h new file mode 100644 index 000000000..23702d27a --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_nvdec_addendum.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_nvdec_addendum_h__ +#define __ga100_dev_nvdec_addendum_h__ + +#define NV_PNVDEC_FBIF_BASE(dev) (0x00848600+(dev)*16384) + +#endif // __ga100_dev_nvdec_addendum_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_nvdec_pri.h b/src/common/inc/swref/published/ampere/ga100/dev_nvdec_pri.h new file mode 100644 index 000000000..c883b6632 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_nvdec_pri.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __ga100_dev_nvdec_pri_h__ +#define __ga100_dev_nvdec_pri_h__ + +#define NV_PNVDEC(dev) 0x0084bfff+(dev)*16384:0x00848000+(dev)*16384 /* RW--D */ + +#endif // __ga100_dev_nvdec_pri_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_ram.h b/src/common/inc/swref/published/ampere/ga100/dev_ram.h new file mode 100644 index 000000000..50f2d91d6 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_ram.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_ram_h__ +#define __ga100_dev_ram_h__ +#define NV_RAMIN_ALLOC_SIZE 4096 /* */ +#define NV_RAMRL_ENTRY_CHAN_USERD_PTR_LO (31+0*32):(8+0*32) /* RWXUF */ +#define NV_RAMRL_ENTRY_CHAN_USERD_PTR_HI_HW (7+1*32):(0+1*32) /* RWXUF */ +#define NV_RAMRL_ENTRY_BASE_SHIFT 12 /* */ +#define NV_RAMUSERD_PUT (16*32+31):(16*32+0) /* RW-UF */ +#define NV_RAMUSERD_GET (17*32+31):(17*32+0) /* RW-UF */ +#define NV_RAMUSERD_REF (18*32+31):(18*32+0) /* RW-UF */ +#define NV_RAMUSERD_PUT_HI (19*32+31):(19*32+0) /* RW-UF */ +#define NV_RAMUSERD_TOP_LEVEL_GET (22*32+31):(22*32+0) /* RW-UF */ +#define NV_RAMUSERD_TOP_LEVEL_GET_HI (23*32+31):(23*32+0) /* RW-UF */ +#define NV_RAMUSERD_GET_HI (24*32+31):(24*32+0) /* RW-UF */ +#define NV_RAMUSERD_GP_GET (34*32+31):(34*32+0) /* RW-UF */ +#define NV_RAMUSERD_GP_PUT (35*32+31):(35*32+0) /* RW-UF */ +#endif // __ga100_dev_ram_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_riscv_pri.h b/src/common/inc/swref/published/ampere/ga100/dev_riscv_pri.h new file mode 100644 index 000000000..77983dee7 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_riscv_pri.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_riscv_pri_h__ +#define __ga100_dev_riscv_pri_h__ + +#define NV_FALCON2_GSP_BASE 0x00111000 +#define NV_PRISCV_RISCV_CORE_SWITCH_RISCV_STATUS 0x00000240 /* R-I4R */ +#define NV_PRISCV_RISCV_CORE_SWITCH_RISCV_STATUS_ACTIVE_STAT 0:0 /* R-IVF */ +#define NV_PRISCV_RISCV_CORE_SWITCH_RISCV_STATUS_ACTIVE_STAT_ACTIVE 0x00000001 /* R---V */ +#define NV_PRISCV_RISCV_IRQMASK 0x000002b4 /* R-I4R */ +#define NV_PRISCV_RISCV_IRQDEST 0x000002b8 /* RW-4R */ + +#endif // __ga100_dev_riscv_pri_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_runlist.h b/src/common/inc/swref/published/ampere/ga100/dev_runlist.h new file mode 100644 index 000000000..b5d4de302 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_runlist.h @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_runlist_h__ +#define __ga100_dev_runlist_h__ +#define NV_CHRAM_CHANNEL(i) (0x000+(i)*4) /* RW-4A */ +#endif // __ga100_dev_runlist_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_sec_addendum.h b/src/common/inc/swref/published/ampere/ga100/dev_sec_addendum.h new file mode 100644 index 000000000..c7ecf9339 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_sec_addendum.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __ga100_dev_sec_addendum_h__ +#define __ga100_dev_sec_addendum_h__ + +#define NV_PSEC_FBIF_BASE 0x00840600 + +#endif // __ga100_dev_sec_addendum_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_sec_pri.h b/src/common/inc/swref/published/ampere/ga100/dev_sec_pri.h new file mode 100644 index 000000000..b1bf5421f --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_sec_pri.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_sec_pri_h__ +#define __ga100_dev_sec_pri_h__ + +#define NV_PSEC 0x843fff:0x840000 /* RW--D */ +#define NV_PSEC_FALCON_ENGINE 0x008403c0 /* RW-4R */ +#define NV_PSEC_FALCON_ENGINE_RESET 0:0 /* RWIVF */ +#define NV_PSEC_FALCON_ENGINE_RESET_TRUE 0x00000001 /* RW--V */ +#define NV_PSEC_FALCON_ENGINE_RESET_FALSE 0x00000000 /* RWI-V */ + +#define NV_PSEC_MAILBOX(i) (0x00840804+(i)*4) /* RW-4A */ +#define NV_PSEC_MAILBOX__SIZE_1 4 /* */ +#define NV_PSEC_MAILBOX_DATA 31:0 /* RWIVF */ +#define NV_PSEC_MAILBOX_DATA_INIT 0x00000000 /* RWI-V */ + +#endif // __ga100_dev_sec_pri_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_timer.h b/src/common/inc/swref/published/ampere/ga100/dev_timer.h new file mode 100644 index 000000000..98cef396d --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_timer.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_timer_h__ +#define __ga100_dev_timer_h__ +#define NV_PTIMER_ALARM_INTR 0x00009140 /* RW-4R */ +#define NV_PTIMER_ALARM_INTR_MASK 0:0 /* RWIVF */ +#define NV_PTIMER_ALARM_INTR_MASK_DISABLED 0x00000000 /* RWI-V */ +#define NV_PTIMER_ALARM_INTR_MASK_ENABLED 0x00000001 /* RW--V */ +#endif // __ga100_dev_timer_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_vm.h b/src/common/inc/swref/published/ampere/ga100/dev_vm.h new file mode 100644 index 000000000..190bdd5a6 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_vm.h @@ -0,0 +1,131 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga100_dev_vm_h__ +#define __ga100_dev_vm_h__ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP(i) (0x1600+(i)*4) /* R--4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP__SIZE_1 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_VALUE 31:0 /* R--VF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(i) (i) /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE__SIZE_1 64 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE_INTR_PENDING 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE_INTR_NOT_PENDING 0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET(i) (0x1608+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET__SIZE_1 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE(i) (i) /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE__SIZE_1 64 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_ENABLE 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_ENABLED 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_DISABLED 0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR(i) (0x1610+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR__SIZE_1 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE(i) (i) /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE__SIZE_1 64 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_DISABLE 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_ENABLED 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_DISABLED 0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(i) (0x1000+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF__SIZE_1 8 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_VALUE 31:0 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_VALUE_INIT 0x00000000 /* R---V */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET(i) (0x1200+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET__SIZE_1 8 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR(i) (0x1400+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR__SIZE_1 8 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_TRIGGER 0x00001640 /* -W-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_TRIGGER_VECTOR 11:0 /* -W-VF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE 0x000030B0 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_VA 0:0 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_VA_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_VA_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_PDB 1:1 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_PDB_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_PDB_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_HUBTLB_ONLY 2:2 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_HUBTLB_ONLY_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_HUBTLB_ONLY_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY 5:3 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_NONE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_START 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_SYS_MEMBAR 6:6 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_SYS_MEMBAR_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_SYS_MEMBAR_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ACK 8:7 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ACK_NONE_REQUIRED 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ACK_INTRANODE 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ACK_GLOBALLY 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CANCEL_CLIENT_ID 14:9 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CANCEL_GPC_ID 19:15 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_INVAL_SCOPE 16:15 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_INVAL_SCOPE_ALL_TLBS 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_INVAL_SCOPE_LINK_TLBS 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_INVAL_SCOPE_NON_LINK_TLBS 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CANCEL_CLIENT_TYPE 20:20 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CANCEL_CLIENT_TYPE_GPC 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CANCEL_CLIENT_TYPE_HUB 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_PASID 21:21 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_PASID_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_PASID_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_SIZE 22:22 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_SIZE_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_SIZE_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PROP_FLUSH 23:23 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PROP_FLUSH_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PROP_FLUSH_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL 26:24 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_ALL 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_PTE_ONLY 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE0 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE1 0x00000003 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE2 0x00000004 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE3 0x00000005 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE4 0x00000006 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE5 0x00000007 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_READ 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_WRITE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_ATOMIC_STRONG 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_RSVRVD 0x00000003 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_ATOMIC_WEAK 0x00000004 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_ATOMIC_ALL 0x00000005 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_WRITE_AND_ATOMIC 0x00000006 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_ALL 0x00000007 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_TRIGGER 31:31 /* -WEVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_TRIGGER_FALSE 0x00000000 /* -WE-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_TRIGGER_TRUE 0x00000001 /* -W--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_MAX_CACHELINE_SIZE 0x00000010 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_DOORBELL 0x2200 /* -W-4R */ +#define NV_VIRTUAL_FUNCTION_TIME_0 0x30080 /* R--4R */ +#define NV_VIRTUAL_FUNCTION_TIME_0_NSEC 31:5 /* R-XUF */ +#define NV_VIRTUAL_FUNCTION_TIME_1 0x30084 /* R--4R */ +#define NV_VIRTUAL_FUNCTION_TIME_1_NSEC 28:0 /* R-XUF */ +#endif // __ga100_dev_vm_h__ diff --git a/src/common/inc/swref/published/ampere/ga100/dev_vm_addendum.h b/src/common/inc/swref/published/ampere/ga100/dev_vm_addendum.h new file mode 100644 index 000000000..63fd61357 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga100/dev_vm_addendum.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef ga100_dev_vm_addendum_h +#define ga100_dev_vm_addendum_h + +// +// Compile time asserts in the source code files will ensure that +// these don't end up exceeding the range of the top level registers. +// + +// Subtrees at CPU_INTR top level for UVM owned interrupts +#define NV_CPU_INTR_UVM_SUBTREE_START NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(1) +#define NV_CPU_INTR_UVM_SUBTREE_LAST NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(1) + +#define NV_CPU_INTR_UVM_SHARED_SUBTREE_START NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(2) +#define NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(2) + +// +// Subtrees at CPU_INTR top level for all stall interrupts from host-driven +// engines +// +#define NV_CPU_INTR_STALL_SUBTREE_START NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(3) +#define NV_CPU_INTR_STALL_SUBTREE_LAST NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(3) + +#endif // ga100_dev_vm_addendum_h diff --git a/src/common/inc/swref/published/ampere/ga102/dev_boot.h b/src/common/inc/swref/published/ampere/ga102/dev_boot.h new file mode 100644 index 000000000..4925c38ff --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_boot.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_boot_h__ +#define __ga102_dev_boot_h__ +#define NV_PMC_ENABLE 0x00000200 /* RW-4R */ +#define NV_PMC_ENABLE_DEVICE(i) (i):(i) /* */ +#define NV_PMC_ENABLE_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_ENABLE_DEVICE_DISABLE 0x00000000 /* */ +#define NV_PMC_ENABLE_DEVICE_ENABLE 0x00000001 /* */ +#define NV_PMC_ENABLE_NVDEC 15:15 /* */ +#define NV_PMC_ENABLE_NVDEC_DISABLED 0x00000000 /* */ +#define NV_PMC_ENABLE_NVDEC_ENABLED 0x00000001 /* */ +#define NV_PMC_DEVICE_ENABLE(i) (0x000000600+(i)*4) /* RW-4A */ +#define NV_PMC_DEVICE_ENABLE__SIZE_1 1 /* */ +#define NV_PMC_DEVICE_ENABLE__PRIV_LEVEL_MASK 0x00000084 /* */ +#define NV_PMC_DEVICE_ENABLE_STATUS 31:0 /* RWIVF */ +#define NV_PMC_DEVICE_ENABLE_STATUS_DISABLE_ALL 0x00000000 /* RWI-V */ +#define NV_PMC_DEVICE_ENABLE_STATUS_BIT(i) (i):(i) /* */ +#define NV_PMC_DEVICE_ENABLE_STATUS_BIT__SIZE_1 26 /* */ +#define NV_PMC_DEVICE_ENABLE_STATUS_BIT_DISABLE 0x00000000 /* */ +#define NV_PMC_DEVICE_ENABLE_STATUS_BIT_ENABLE 0x00000001 /* */ +#endif // __ga102_dev_boot_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_ce.h b/src/common/inc/swref/published/ampere/ga102/dev_ce.h new file mode 100644 index 000000000..8287fef8f --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_ce.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_ce_h__ +#define __ga102_dev_ce_h__ +#define NV_CE_PCE_MAP 0x00104028 /* R--4R */ +#define NV_CE_PCE_MAP_VALUE 23:0 /* R-XVF */ +#define NV_CE_HSH_PCE_MASK 0x0010404c /* C--4R */ +#define NV_CE_HSH_PCE_MASK_VALUE 23:0 /* C--VF */ +#define NV_CE_PCE2LCE_CONFIG__SIZE_1 6 /* */ +#define NV_CE_PCE2LCE_CONFIG_PCE_ASSIGNED_LCE_NONE 0x0000000f /* RW--V */ +#define NV_CE_GRCE_CONFIG__SIZE_1 2 /* */ +#define NV_CE_GRCE_CONFIG_SHARED_LCE 3:0 /* RWIVF */ +#define NV_CE_GRCE_CONFIG_SHARED_LCE_NONE 0xf /* RW--V */ +#define NV_CE_GRCE_CONFIG_SHARED 30:30 /* RWIVF */ +#endif diff --git a/src/common/inc/swref/published/ampere/ga102/dev_falcon_second_pri.h b/src/common/inc/swref/published/ampere/ga102/dev_falcon_second_pri.h new file mode 100644 index 000000000..8dc9a9c00 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_falcon_second_pri.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __ga102_dev_falcon_second_pri_h__ +#define __ga102_dev_falcon_second_pri_h__ + +#define NV_FALCON2_GSP_BASE 0x00111000 +#define NV_FALCON2_NVDEC0_BASE 0x00849c00 +#define NV_FALCON2_SEC_BASE 0x00841000 +#define NV_PFALCON2_FALCON_MOD_SEL 0x00000180 /* RWI4R */ +#define NV_PFALCON2_FALCON_MOD_SEL_ALGO 7:0 /* RWIVF */ +#define NV_PFALCON2_FALCON_MOD_SEL_ALGO_RSA3K 0x00000001 /* RW--V */ +#define NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID 0x00000198 /* RWI4R */ +#define NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID_VAL 7:0 /* RWIVF */ +#define NV_PFALCON2_FALCON_BROM_ENGIDMASK 0x0000019c /* RWI4R */ +#define NV_PFALCON2_FALCON_BROM_PARAADDR(i) (0x00000210+(i)*4) /* RWI4A */ + +#endif // __ga102_dev_falcon_second_pri_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_falcon_v4.h b/src/common/inc/swref/published/ampere/ga102/dev_falcon_v4.h new file mode 100644 index 000000000..70e3aa252 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_falcon_v4.h @@ -0,0 +1,126 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __ga102_dev_falcon_v4_h__ +#define __ga102_dev_falcon_v4_h__ + +#define NV_PFALCON_FALCON_IRQSCLR 0x00000004 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQSCLR_HALT 4:4 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_HALT_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN0 6:6 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN0_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN1 7:7 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN1_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSTAT 0x00000008 /* R--4R */ +#define NV_PFALCON_FALCON_IRQSTAT_HALT 4:4 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_HALT_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN0 6:6 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN0_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN1 7:7 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN1_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER(i) (0x000003e8+(i)*4) /* -W-4A */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER__SIZE_1 2 /* */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER_TRIGGER 0:0 /* -W-VF */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER_TRIGGER_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQMSET 0x00000010 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQMCLR 0x00000014 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQMASK 0x00000018 /* R--4R */ +#define NV_PFALCON_FALCON_IRQDEST 0x0000001c /* RW-4R */ +#define NV_PFALCON_FALCON_MAILBOX0 0x00000040 /* RW-4R */ +#define NV_PFALCON_FALCON_MAILBOX1 0x00000044 /* RW-4R */ +#define NV_PFALCON_FALCON_DMACTL 0x0000010c /* RW-4R */ +#define NV_PFALCON_FALCON_DMACTL_REQUIRE_CTX 0:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMACTL_REQUIRE_CTX_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMACTL_DMEM_SCRUBBING 1:1 /* R--VF */ +#define NV_PFALCON_FALCON_DMACTL_DMEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMACTL_IMEM_SCRUBBING 2:2 /* R--VF */ +#define NV_PFALCON_FALCON_DMACTL_IMEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMATRFBASE 0x00000110 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFBASE_BASE 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFMOFFS 0x00000114 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFMOFFS_OFFS 23:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFCMD 0x00000118 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFCMD_FULL 0:0 /* R-XVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_FULL_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_DMATRFCMD_IDLE 1:1 /* R-XVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_IDLE_FALSE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMATRFCMD_SEC 3:2 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_IMEM 4:4 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_IMEM_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_IMEM_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_WRITE 5:5 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_WRITE_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_WRITE_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_SIZE 10:8 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_SIZE_256B 0x00000006 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_CTXDMA 14:12 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_SET_DMTAG 16:16 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_SET_DMTAG_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFFBOFFS 0x0000011c /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFFBOFFS_OFFS 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFBASE1 0x00000128 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFBASE1_BASE 8:0 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC(i) (0x00000180+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMC_OFFS 7:2 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_BLK 23:8 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_AINCW 24:24 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IMEMC_SECURE 28:28 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMD(i) (0x00000184+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMD_DATA 31:0 /* RWXVF */ +#define NV_PFALCON_FALCON_IMEMT(i) (0x00000188+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMT_TAG 15:0 /* RWXVF */ +#define NV_PFALCON_FALCON_DMEMC(i) (0x000001c0+(i)*8) /* RW-4A */ +#define NV_PFALCON_FALCON_DMEMC_OFFS 7:2 /* */ +#define NV_PFALCON_FALCON_DMEMC_BLK 23:8 /* */ +#define NV_PFALCON_FALCON_DMEMC_AINCW 24:24 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMD(i) (0x000001c4+(i)*8) /* RW-4A */ +#define NV_PFALCON_FALCON_DMEMD_DATA 31:0 /* RWXVF */ +#define NV_PFALCON_FALCON_HWCFG 0x00000108 /* R--4R */ +#define NV_PFALCON_FALCON_HWCFG_IMEM_SIZE 8:0 /* R--VF */ +#define NV_PFALCON_FALCON_HWCFG2 0x000000f4 /* R--4R */ +#define NV_PFALCON_FALCON_HWCFG2_RISCV 10:10 /* R--VF */ +#define NV_PFALCON_FALCON_HWCFG2_RISCV_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG2_MEM_SCRUBBING 12:12 /* R--VF */ +#define NV_PFALCON_FALCON_HWCFG2_MEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_OS 0x00000080 /* RW-4R */ +#define NV_PFALCON_FALCON_RM 0x00000084 /* RW-4R */ +#define NV_PFALCON_FALCON_DEBUGINFO 0x00000094 /* RW-4R */ +#define NV_PFALCON_FALCON_CPUCTL 0x00000100 /* RW-4R */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU 1:1 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU_FALSE 0x00000000 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_HALTED 4:4 /* R-XVF */ +#define NV_PFALCON_FALCON_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN 6:6 /* RWIVF */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS 0x00000130 /* -W-4R */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU 1:1 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU_FALSE 0x00000000 /* -W--V */ +#define NV_PFALCON_FALCON_BOOTVEC 0x00000104 /* RW-4R */ + +#endif // __ga102_dev_falcon_v4_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_falcon_v4_addendum.h b/src/common/inc/swref/published/ampere/ga102/dev_falcon_v4_addendum.h new file mode 100644 index 000000000..51f29b38b --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_falcon_v4_addendum.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_falcon_v4_addendum_h__ +#define __ga102_dev_falcon_v4_addendum_h__ + +#define NV_PFALCON_FALCON_HWCFG2_RESET_READY 31:31 +#define NV_PFALCON_FALCON_HWCFG2_RESET_READY_TRUE 0x00000001 +#define NV_PFALCON_FALCON_HWCFG2_RESET_READY_FALSE 0x00000000 + +#endif diff --git a/src/common/inc/swref/published/ampere/ga102/dev_fbif_v4.h b/src/common/inc/swref/published/ampere/ga102/dev_fbif_v4.h new file mode 100644 index 000000000..f937a44b4 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_fbif_v4.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_fbif_v4_h__ +#define __ga102_dev_fbif_v4_h__ + +#define NV_PFALCON_FBIF_TRANSCFG(i) (0x00000000+(i)*4) /* RW-4A */ +#define NV_PFALCON_FBIF_TRANSCFG__SIZE_1 8 /* */ +#define NV_PFALCON_FBIF_TRANSCFG_TARGET 1:0 /* RWIVF */ +#define NV_PFALCON_FBIF_TRANSCFG_TARGET_COHERENT_SYSMEM 0x00000001 /* R---V */ +#define NV_PFALCON_FBIF_TRANSCFG_MEM_TYPE 2:2 /* RWIVF */ +#define NV_PFALCON_FBIF_TRANSCFG_MEM_TYPE_PHYSICAL 0x00000001 /* R---V */ +#define NV_PFALCON_FBIF_CTL 0x00000024 /* RW-4R */ +#define NV_PFALCON_FBIF_CTL_ALLOW_PHYS_NO_CTX 7:7 /* RWIVF */ +#define NV_PFALCON_FBIF_CTL_ALLOW_PHYS_NO_CTX_ALLOW 0x00000001 /* RW--V */ + +#endif // __ga102_dev_fbif_v4_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_gc6_island.h b/src/common/inc/swref/published/ampere/ga102/dev_gc6_island.h new file mode 100644 index 000000000..c4f7805c6 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_gc6_island.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_gc6_island_h__ +#define __ga102_dev_gc6_island_h__ + +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK 0x00118128 /* RW-4R */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK_READ_PROTECTION 3:0 /* RWIVF */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0 0:0 /* */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_03(i) (0x00118214+(i)*4) /* RW-4A */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05(i) (0x00118234+(i)*4) /* RW-4A */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_42 0x001183a4 /* RW-4R */ + +#endif // __ga102_dev_gc6_island_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_gc6_island_addendum.h b/src/common/inc/swref/published/ampere/ga102/dev_gc6_island_addendum.h new file mode 100644 index 000000000..ac2531921 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_gc6_island_addendum.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_gc6_island_addendum_h__ +#define __ga102_dev_gc6_island_addendum_h__ + +#define NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0 NV_PGC6_AON_SECURE_SCRATCH_GROUP_03(0) +#define NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE 15:0 +#define NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE_1MB_IN_4K 0x100 +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT NV_PGC6_AON_SECURE_SCRATCH_GROUP_05(0) +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS 7:0 +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_COMPLETED 0x000000FF +#define NV_USABLE_FB_SIZE_IN_MB NV_PGC6_AON_SECURE_SCRATCH_GROUP_42 +#define NV_USABLE_FB_SIZE_IN_MB_VALUE 31:0 +#define NV_USABLE_FB_SIZE_IN_MB_VALUE_INIT 0 + +#endif // __ga102_dev_gc6_island_addendum_h__ + diff --git a/src/common/inc/swref/published/ampere/ga102/dev_gsp.h b/src/common/inc/swref/published/ampere/ga102/dev_gsp.h new file mode 100644 index 000000000..7d76b9b5f --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_gsp.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __ga102_dev_gsp_h__ +#define __ga102_dev_gsp_h__ + +#define NV_PGSP 0x113fff:0x110000 /* RW--D */ +#define NV_PGSP_FALCON_MAILBOX0 0x110040 /* RW-4R */ +#define NV_PGSP_FALCON_MAILBOX0_DATA 31:0 /* RWIVF */ +#define NV_PGSP_FALCON_MAILBOX1 0x110044 /* RW-4R */ +#define NV_PGSP_FALCON_MAILBOX1_DATA 31:0 /* RWIVF */ +#define NV_PGSP_FALCON_ENGINE 0x1103c0 /* RW-4R */ +#define NV_PGSP_FALCON_ENGINE_RESET 0:0 /* RWIVF */ +#define NV_PGSP_FALCON_ENGINE_RESET_TRUE 0x00000001 /* RW--V */ +#define NV_PGSP_FALCON_ENGINE_RESET_FALSE 0x00000000 /* RWI-V */ +#define NV_PGSP_MAILBOX(i) (0x110804+(i)*4) /* RW-4A */ +#define NV_PGSP_MAILBOX__SIZE_1 4 /* */ +#define NV_PGSP_MAILBOX_DATA 31:0 /* RWIVF */ +#define NV_PGSP_QUEUE_HEAD(i) (0x110c00+(i)*8) /* RW-4A */ +#define NV_PGSP_QUEUE_HEAD__SIZE_1 8 /* */ +#define NV_PGSP_QUEUE_HEAD_ADDRESS 31:0 /* RWIVF */ + +#endif // __ga102_dev_gsp_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_gsp_addendum.h b/src/common/inc/swref/published/ampere/ga102/dev_gsp_addendum.h new file mode 100644 index 000000000..7f9c428bc --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_gsp_addendum.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __ga102_dev_gsp_addendum_h__ +#define __ga102_dev_gsp_addendum_h__ + +#define NV_PGSP_FBIF_BASE 0x110600 + +#endif // __ga102_dev_gsp_addendum_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_nvdec_addendum.h b/src/common/inc/swref/published/ampere/ga102/dev_nvdec_addendum.h new file mode 100644 index 000000000..b59c25e0b --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_nvdec_addendum.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_nvdec_addendum_h__ +#define __ga102_dev_nvdec_addendum_h__ + +#define NV_PNVDEC_FBIF_BASE(dev) (0x00848600+(dev)*16384) + +#endif // __ga102_dev_nvdec_addendum_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_nvdec_pri.h b/src/common/inc/swref/published/ampere/ga102/dev_nvdec_pri.h new file mode 100644 index 000000000..93bc3ccb8 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_nvdec_pri.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_nvdec_pri_h__ +#define __ga102_dev_nvdec_pri_h__ + +#define NV_PNVDEC(dev) 0x0084bfff+(dev)*16384:0x00848000+(dev)*16384 /* RW--D */ + +#endif // __ga102_dev_nvdec_pri_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_ram.h b/src/common/inc/swref/published/ampere/ga102/dev_ram.h new file mode 100644 index 000000000..659166246 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_ram.h @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_ram_h__ +#define __ga102_dev_ram_h__ +#define NV_RAMRL_ENTRY_BASE_SHIFT 10 /* */ +#endif // __ga102_dev_ram_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_riscv_pri.h b/src/common/inc/swref/published/ampere/ga102/dev_riscv_pri.h new file mode 100644 index 000000000..b6c41f08e --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_riscv_pri.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_riscv_pri_h__ +#define __ga102_dev_riscv_pri_h__ + +#define NV_FALCON2_GSP_BASE 0x00111000 +#define NV_PRISCV_RISCV_IRQMASK 0x00000528 /* R-I4R */ +#define NV_PRISCV_RISCV_IRQDEST 0x0000052c /* RW-4R */ +#define NV_PRISCV_RISCV_CPUCTL 0x00000388 /* RWI4R */ +#define NV_PRISCV_RISCV_CPUCTL_ACTIVE_STAT 7:7 /* R-IVF */ +#define NV_PRISCV_RISCV_CPUCTL_ACTIVE_STAT_ACTIVE 0x00000001 /* R---V */ +#define NV_PRISCV_RISCV_BCR_CTRL 0x00000668 /* RWI4R */ +#define NV_PRISCV_RISCV_BCR_CTRL_VALID 0:0 /* R-IVF */ +#define NV_PRISCV_RISCV_BCR_CTRL_VALID_TRUE 0x00000001 /* R---V */ +#define NV_PRISCV_RISCV_BCR_CTRL_VALID_FALSE 0x00000000 /* R-I-V */ +#define NV_PRISCV_RISCV_BCR_CTRL_CORE_SELECT 4:4 /* RWIVF */ +#define NV_PRISCV_RISCV_BCR_CTRL_CORE_SELECT_FALCON 0x00000000 /* RWI-V */ +#define NV_PRISCV_RISCV_BCR_CTRL_CORE_SELECT_RISCV 0x00000001 /* RW--V */ +#define NV_PRISCV_RISCV_BCR_CTRL_BRFETCH 8:8 /* RWIVF */ +#define NV_PRISCV_RISCV_BCR_CTRL_BRFETCH_TRUE 0x00000001 /* RWI-V */ +#define NV_PRISCV_RISCV_BCR_CTRL_BRFETCH_FALSE 0x00000000 /* RW--V */ + +#endif // __ga102_dev_riscv_pri_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_sec_addendum.h b/src/common/inc/swref/published/ampere/ga102/dev_sec_addendum.h new file mode 100644 index 000000000..0435cc0d5 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_sec_addendum.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __ga102_dev_sec_addendum_h__ +#define __ga102_dev_sec_addendum_h__ + +#define NV_PSEC_FBIF_BASE 0x00840600 + +#endif // __ga102_dev_sec_addendum_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_sec_pri.h b/src/common/inc/swref/published/ampere/ga102/dev_sec_pri.h new file mode 100644 index 000000000..92e252e32 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_sec_pri.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_sec_pri_h__ +#define __ga102_dev_sec_pri_h__ + +#define NV_PSEC 0x843fff:0x840000 /* RW--D */ +#define NV_PSEC_FALCON_ENGINE 0x008403c0 /* RW-4R */ +#define NV_PSEC_FALCON_ENGINE_RESET 0:0 /* RWIVF */ +#define NV_PSEC_FALCON_ENGINE_RESET_TRUE 0x00000001 /* RW--V */ +#define NV_PSEC_FALCON_ENGINE_RESET_FALSE 0x00000000 /* RWI-V */ + +#define NV_PSEC_MAILBOX(i) (0x00840804+(i)*4) /* RW-4A */ +#define NV_PSEC_MAILBOX__SIZE_1 4 /* */ +#define NV_PSEC_MAILBOX_DATA 31:0 /* RWIVF */ +#define NV_PSEC_MAILBOX_DATA_INIT 0x00000000 /* RWI-V */ + +#endif // __ga102_dev_sec_pri_h__ diff --git a/src/common/inc/swref/published/ampere/ga102/dev_vm.h b/src/common/inc/swref/published/ampere/ga102/dev_vm.h new file mode 100644 index 000000000..cf07e6f50 --- /dev/null +++ b/src/common/inc/swref/published/ampere/ga102/dev_vm.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ga102_dev_vm_h__ +#define __ga102_dev_vm_h__ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP(i) (0x1600+(i)*4) /* R--4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP__SIZE_1 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_VALUE 31:0 /* R--VF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(i) (i) /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE__SIZE_1 64 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE_INTR_PENDING 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE_INTR_NOT_PENDING 0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET(i) (0x1608+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET__SIZE_1 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE(i) (i) /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE__SIZE_1 64 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_ENABLE 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_ENABLED 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_DISABLED 0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR(i) (0x1610+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR__SIZE_1 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE(i) (i) /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE__SIZE_1 64 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_DISABLE 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_ENABLED 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_DISABLED 0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(i) (0x1000+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF__SIZE_1 8 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_VALUE 31:0 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_VALUE_INIT 0x00000000 /* R---V */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET(i) (0x1200+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET__SIZE_1 8 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR(i) (0x1400+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR__SIZE_1 8 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_TRIGGER 0x00001640 /* -W-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_TRIGGER_VECTOR 11:0 /* -WXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_DOORBELL 0x2200 /* -W-4R */ +#endif // __ga102_dev_vm_h__ diff --git a/src/common/inc/swref/published/br03/dev_br03_xvd.h b/src/common/inc/swref/published/br03/dev_br03_xvd.h new file mode 100644 index 000000000..bbad0fed8 --- /dev/null +++ b/src/common/inc/swref/published/br03/dev_br03_xvd.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DEV_BR03_XVD_H +#define DEV_BR03_XVD_H + +#define NV_BR03_XVD_LINK_CTRLSTAT 0x00000070 /* RWI4R */ +#define NV_BR03_XVD_XP_0 0x00000B00 /* RW-4R */ +#define NV_BR03_XVD_XP_0_UPDATE_FC_THRESHOLD 9:1 /* RWIVF */ +#define NV_BR03_XVD_XP_0_UPDATE_FC_THRESHOLD_INIT 0x00000000 /* RWI-V */ +#define NV_BR03_XVD_XP_0_OPPORTUNISTIC_ACK 29:29 /* RWIVF */ +#define NV_BR03_XVD_XP_0_OPPORTUNISTIC_ACK_INIT 0x00000000 /* RWI-V */ +#define NV_BR03_XVD_XP_0_OPPORTUNISTIC_UPDATE_FC 30:30 /* RWIVF */ +#define NV_BR03_XVD_XP_0_OPPORTUNISTIC_UPDATE_FC_INIT 0x00000000 /* RWI-V */ + +#endif /* DEV_BR03_XVD_H */ + diff --git a/src/common/inc/swref/published/br03/dev_br03_xvu.h b/src/common/inc/swref/published/br03/dev_br03_xvu.h new file mode 100644 index 000000000..718c7f6f7 --- /dev/null +++ b/src/common/inc/swref/published/br03/dev_br03_xvu.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DEV_BR03_XVU_H +#define DEV_BR03_XVU_H + +#define NV_BR03_XVU 0x00000FFF:0x00000000 /* RW--D */ +#define NV_BR03_XVU_DEV_ID 0x00000000 /* R--4R */ +#define NV_BR03_XVU_DEV_ID_DEVICE_ID 31:16 /* C--VF */ +#define NV_BR03_XVU_DEV_ID_DEVICE_ID_BR03 0x000001B3 /* C---V */ +#define NV_BR03_XVU_REV_CC 0x00000008 /* R--4R */ +#define NV_BR03_XVU_MCC_REG_ALIAS 0x00000600 /* RW-4R */ +#define NV_BR03_XVU_MCC_REG_ALIAS_ACCESS 0:0 /* RWIVF */ +#define NV_BR03_XVU_MCC_REG_ALIAS_ACCESS_INIT 0x00000000 /* RWI-V */ +#define NV_BR03_XVU_MCC_REG_ALIAS_ACCESS_DISABLED 0x00000000 /* RW--V */ +#define NV_BR03_XVU_MCC_REG_ALIAS_ACCESS_ENABLED 0x00000001 /* RW--V */ +#define NV_BR03_XVU_MCC_REG_ALIAS_BASE_ADDRESS 31:14 /* RWIUF */ +#define NV_BR03_XVU_MCC_REG_ALIAS_BASE_ADDRESS_INIT 0x00000000 /* RWI-V */ +#define NV_BR03_XVU_UP0_INT_BUFSIZE_CPL 0x00000300 /* R--4R */ +#define NV_BR03_XVU_UP0_INT_BUFSIZE_CPL_H 7:0 /* R--UF */ +#define NV_BR03_XVU_UP0_INT_BUFSIZE_CPL_D 27:16 /* R--UF */ +#define NV_BR03_XVU_INT_FLOW_CTL 0x00000340 /* RW-4R */ +#define NV_BR03_XVU_INT_FLOW_CTL_DP0_TO_UP0_CPL 0x00000350 /* RW-4R */ +#define NV_BR03_XVU_INT_FLOW_CTL_UP0_TO_MH0_PW 0x00000408 /* RW-4R */ +#define NV_BR03_XVU_ITX_ALLOCATION 0x00000500 /* RW-4R */ +#define NV_BR03_XVU_ITX_ALLOCATION_UP0 3:0 /* RWIUF */ +#define NV_BR03_XVU_ITX_ALLOCATION_UP0_INIT 0x00000001 /* RWI-V */ +#define NV_BR03_XVU_ITX_ALLOCATION_DP0 11:8 /* RWIUF */ +#define NV_BR03_XVU_ITX_ALLOCATION_DP0_INIT 0x00000001 /* RWI-V */ +#define NV_BR03_XVU_ITX_ALLOCATION_DP1 19:16 /* RWIUF */ +#define NV_BR03_XVU_ITX_ALLOCATION_DP1_INIT 0x00000001 /* RWI-V */ +#define NV_BR03_XVU_ITX_ALLOCATION_MH0 27:24 /* RWIUF */ +#define NV_BR03_XVU_ITX_ALLOCATION_MH0_INIT 0x00000001 /* RWI-V */ +#define NV_BR03_XVU_XP_0 0x00000B00 /* RW-4R */ +#define NV_BR03_XVU_XP_0_OPPORTUNISTIC_ACK 29:29 /* RWIVF */ +#define NV_BR03_XVU_XP_0_OPPORTUNISTIC_ACK_INIT 0x00000000 /* RWI-V */ +#define NV_BR03_XVU_XP_0_OPPORTUNISTIC_UPDATE_FC 30:30 /* RWIVF */ +#define NV_BR03_XVU_XP_0_OPPORTUNISTIC_UPDATE_FC_INIT 0x00000000 /* RWI-V */ + +#endif /* DEV_BR03_XVU_H */ + diff --git a/src/common/inc/swref/published/br04/br04_ref.h b/src/common/inc/swref/published/br04/br04_ref.h new file mode 100644 index 000000000..310e036e6 --- /dev/null +++ b/src/common/inc/swref/published/br04/br04_ref.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef BR04_REF_H +#define BR04_REF_H + +#include "published/br04/dev_br04_xvd.h" +#include "published/br04/dev_br04_xvu.h" + +// BR04 can be accessed through a window in GPU register space +#define NV_BR04(i) (0x00018000 + (i * 0x00004000)) +// Config space access for downstream ports begins at 8Ki, a port every 2Ki +#define NV_BR04_XVD_OFFSET(i) ((1<<13) + (1<<11)*i) +// Size of total config space and for upstream, downstream ports. +#define NV_BR04_CONFIG_SIZE (1 << 14) +#define NV_BR04_XVU_CONFIG_SIZE (1 << 12) +#define NV_BR04_XVD_CONFIG_SIZE (1 << 11) + +// This enumeration is in the order of the _BUFSIZE_ registers; the FLOW_CTL +// registers are in a different order. +typedef enum { + BR04_PORT_UP0 = 0, + BR04_PORT_DP0, + BR04_PORT_DP1, + BR04_PORT_DP2, + BR04_PORT_DP3, + BR04_PORT_MH0, + NUM_BR04_PORTS +} BR04_PORT; + +// Is this a BR04 devid or not +// Based on assumption that XVD and XVU has same deviceID +#define IS_DEVID_BR04(i) ((i >> 4) == (NV_BR04_XVU_DEV_ID_DEVICE_ID_BR04_0 >> 4)) + +// Phantom address to use for HGPU P2P transfers +#define HGPU_P2P_PHANTOM_BASE 0xf0f0f0f000000000LL + +#endif // BR04_REF_H diff --git a/src/common/inc/swref/published/br04/dev_br04_xvd.h b/src/common/inc/swref/published/br04/dev_br04_xvd.h new file mode 100644 index 000000000..2dd7cddf9 --- /dev/null +++ b/src/common/inc/swref/published/br04/dev_br04_xvd.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DEV_BR04_XVD_H +#define DEV_BR04_XVD_H + +#define NV_BR04_XVD_LINK_CTRLSTAT 0x00000070 /* RW-4R */ +#define NV_BR04_XVD_LINK_CTRLSTAT_ASPM_CTRL 1:0 /* RWIVF */ +#define NV_BR04_XVD_LINK_CTRLSTAT_ASPM_CTRL_DISABLED 0x00000000 /* RWI-V */ +#define NV_BR04_XVD_LINK_CTRLSTAT_ASPM_CTRL_L0S 0x00000001 /* RW--V */ +#define NV_BR04_XVD_LINK_CTRLSTAT_ASPM_CTRL_L1 0x00000002 /* RW--V */ +#define NV_BR04_XVD_LINK_CTRLSTAT_ASPM_CTRL_L0S_L1 0x00000003 /* RW--V */ +#define NV_BR04_XVD_LINK_CTRLSTAT2 0x00000090 /* RW-4R */ +#define NV_BR04_XVD_LINK_CTRLSTAT2_TARGET_LINK_SPEED 3:0 /* RWIVF */ +#define NV_BR04_XVD_LINK_CTRLSTAT2_TARGET_LINK_SPEED_2P5G 0x00000001 /* RW--V */ +#define NV_BR04_XVD_LINK_CTRLSTAT2_TARGET_LINK_SPEED_5P0G 0x00000002 /* RWI-V */ +#define NV_BR04_XVD_G2_PRIV_XP_LCTRL_2 0x0000046C /* RW-4R */ +#define NV_BR04_XVD_G2_PRIV_XP_LCTRL_2_ADVERTISED_RATE_CHANGE 1:1 /* CWIVF */ +#define NV_BR04_XVD_G2_PRIV_XP_LCTRL_2_ADVERTISED_RATE_CHANGE_ZERO 0x00000000 /* CWI-V */ +#define NV_BR04_XVD_BUS 0x00000018 /* RW-4R */ +#define NV_BR04_XVD_BUS_SEC_NUMBER 15:8 /* RWIUF */ +#define NV_BR04_XVD_BUS_SEC_NUMBER_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVD_BUS_SUB_NUMBER 23:16 /* RWIUF */ +#define NV_BR04_XVD_BUS_SUB_NUMBER_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVD_G2_PRIV_XP_CONFIG 0x00000494 /* RW-4R */ +#define NV_BR04_XVD_G2_PRIV_XP_CONFIG_GEN2_REPLAY_TIMER_LIMIT 11:2 /* RWIVF */ +#define NV_BR04_XVD_G2_PRIV_XP_CONFIG_GEN2_REPLAY_TIMER_LIMIT_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVD_LINK_CTRLSTAT_DLL_LINK_SM 29:29 /* R--VF */ +#define NV_BR04_XVD_LINK_CTRLSTAT_DLL_LINK_SM_NOT_ACTIVE 0x00000000 /* R---V */ +#define NV_BR04_XVD_LINK_CTRLSTAT_DLL_LINK_SM_ACTIVE 0x00000001 /* R---V */ +#define NV_BR04_XVD_G2_VEND_XP 0x00000400 /* RW-4R */ +#define NV_BR04_XVD_G2_VEND_XP_OPPORTUNISTIC_ACK 28:28 /* RWIVF */ +#define NV_BR04_XVD_G2_VEND_XP_OPPORTUNISTIC_ACK_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVD_G2_VEND_XP_OPPORTUNISTIC_UPDATEFC 29:29 /* RWIVF */ +#define NV_BR04_XVD_G2_VEND_XP_OPPORTUNISTIC_UPDATEFC_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVD_G2_VEND_XP1 0x00000404 /* RW-4R */ +#define NV_BR04_XVD_G2_VEND_XP1_REPLAY_TIMER_LIMIT 9:0 /* RWIVF */ +#define NV_BR04_XVD_G2_VEND_XP1_REPLAY_TIMER_LIMIT_INIT 0x00000000 /* RWI-V */ + +#endif // DEV_BR04_XVD_H diff --git a/src/common/inc/swref/published/br04/dev_br04_xvu.h b/src/common/inc/swref/published/br04/dev_br04_xvu.h new file mode 100644 index 000000000..ce18cb754 --- /dev/null +++ b/src/common/inc/swref/published/br04/dev_br04_xvu.h @@ -0,0 +1,143 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef DEV_BR04_XVU_H +#define DEV_BR04_XVU_H + +#define NV_BR04_XVU 0x00000DFF:0x00000000 /* RW--D */ +#define NV_BR04_XVU_DEV_ID 0x00000000 /* R--4R */ +#define NV_BR04_XVU_DEV_ID_VENDOR_ID 15:0 /* C--VF */ +#define NV_BR04_XVU_DEV_ID_VENDOR_ID_NVIDIA 0x000010DE /* C---V */ +#define NV_BR04_XVU_DEV_ID_DEVICE_ID 31:16 /* R-IVF */ +#define NV_BR04_XVU_DEV_ID_DEVICE_ID_BR04_0 0x000005B0 /* R---V */ +#define NV_BR04_XVU_DEV_ID_DEVICE_ID_BR04_15 0x000005BF /* R---V */ +#define NV_BR04_XVU_DEV_ID_DEVICE_ID_DEFAULT 0x000005BF /* R-I-V */ +#define NV_BR04_XVU_BUS 0x00000018 /* RW-4R */ +#define NV_BR04_XVU_BUS_PRI_NUMBER 7:0 /* RWIUF */ +#define NV_BR04_XVU_BUS_PRI_NUMBER_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_BUS_SEC_NUMBER 15:8 /* RWIUF */ +#define NV_BR04_XVU_BUS_SEC_NUMBER_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_BUS_SUB_NUMBER 23:16 /* RWIUF */ +#define NV_BR04_XVU_BUS_SUB_NUMBER_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_REV_CC 0x00000008 /* R--4R */ +#define NV_BR04_XVU_REV_CC_MINOR_REVISION_ID 3:0 /* R--VF */ +#define NV_BR04_XVU_REV_CC_MAJOR_REVISION_ID 7:4 /* R--VF */ +#define NV_BR04_XVU_INT_FLOW_CTL 0x00000360 /* RW-4R */ +#define NV_BR04_XVU_INT_FLOW_CTL_UP0_TOO_CPL(i) (0x000004F0+(i)*16) /* RW-4A */ +#define NV_BR04_XVU_INT_FLOW_CTL_DP0_TOO_CPL(i) (0x00000370+(i)*16) /* RW-4A */ +#define NV_BR04_XVU_INT_FLOW_CTL_DP0_TOO_CPL__SIZE_1 6 /* */ +#define NV_BR04_XVU_INT_FLOW_CTL_DP0_TOO_CPL_H 6:0 /* RWIUF */ +#define NV_BR04_XVU_INT_FLOW_CTL_DP0_TOO_CPL_H_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_INT_FLOW_CTL_DP0_TOO_CPL_D 15:7 /* RWIUF */ +#define NV_BR04_XVU_INT_FLOW_CTL_DP0_TOO_CPL_D_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_INT_FLOW_CTL_UP0_TOO_NP(i) (0x000004F4+(i)*16) /* RW-4A */ +#define NV_BR04_XVU_INT_FLOW_CTL_UP0_TOO_PW(i) (0x000004F8+(i)*16) /* RW-4A */ +#define NV_BR04_XVU_HGPU_CTRL 0x00000980 /* RW-4R */ +#define NV_BR04_XVU_HGPU_CTRL_EN 0:0 /* RWIVF */ +#define NV_BR04_XVU_HGPU_CTRL_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_HGPU_CTRL_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_BR04_XVU_HGPU_PEER_FB_LOWER_BASE 0x00000990 /* RW-4R */ +#define NV_BR04_XVU_HGPU_PEER_FB_UPPER_BASE 0x00000994 /* RW-4R */ +#define NV_BR04_XVU_ITX_ALLOCATION 0x000005B0 /* RW-4R */ +#define NV_BR04_XVU_ITX_ALLOCATION_DP0 3:0 /* RWIUF */ +#define NV_BR04_XVU_ITX_ALLOCATION_DP0_INIT 0x00000001 /* RWI-V */ +#define NV_BR04_XVU_ITX_ALLOCATION_DP1 7:4 /* RWIUF */ +#define NV_BR04_XVU_ITX_ALLOCATION_DP1_INIT 0x00000001 /* RWI-V */ +#define NV_BR04_XVU_ITX_ALLOCATION_DP2 11:8 /* RWIUF */ +#define NV_BR04_XVU_ITX_ALLOCATION_DP2_INIT 0x00000001 /* RWI-V */ +#define NV_BR04_XVU_ITX_ALLOCATION_DP3 15:12 /* RWIUF */ +#define NV_BR04_XVU_ITX_ALLOCATION_DP3_INIT 0x00000001 /* RWI-V */ +#define NV_BR04_XVU_ITX_ALLOCATION_UP0 19:16 /* RWIUF */ +#define NV_BR04_XVU_ITX_ALLOCATION_UP0_INIT 0x00000001 /* RWI-V */ +#define NV_BR04_XVU_ITX_ALLOCATION_MH0 23:20 /* RWIUF */ +#define NV_BR04_XVU_ITX_ALLOCATION_MH0_INIT 0x00000001 /* RWI-V */ +#define NV_BR04_XVU_MCC_REG_ALIAS 0x00000600 /* RW-4R */ +#define NV_BR04_XVU_MCC_REG_ALIAS_DONOR_BUS 7:0 /* RWIUF */ +#define NV_BR04_XVU_MCC_REG_ALIAS_DONOR_BUS_INIT 0x00000000 /* R-I-V */ +#define NV_BR04_XVU_MCC_REG_ALIAS_ACCESS 8:8 /* RWIVF */ +#define NV_BR04_XVU_MCC_REG_ALIAS_ACCESS_DISABLED 0x00000000 /* RW--V */ +#define NV_BR04_XVU_MCC_REG_ALIAS_ACCESS_ENABLED 0x00000001 /* RWI-V */ +#define NV_BR04_XVU_MCC_REG_ALIAS_ADDR_SELECT 9:9 /* RWIVF */ +#define NV_BR04_XVU_MCC_REG_ALIAS_ADDR_SELECT_AUTO 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_MCC_REG_ALIAS_ADDR_SELECT_MANUAL 0x00000001 /* RW--V */ +#define NV_BR04_XVU_MCC_REG_OFFSET 0x00000604 /* RW-4R */ +#define NV_BR04_XVU_BAR_0 0x00000010 /* RW-4R */ +#define NV_BR04_XVU_DEV_CTRLSTAT 0x00000068 /* RW-4R */ +#define NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN 0:0 /* RWIVF */ +#define NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_LINK_CTRLSTAT 0x00000070 /* RW-4R */ +#define NV_BR04_XVU_LINK_CTRLSTAT_ASPM_CTRL 1:0 /* RWIVF */ +#define NV_BR04_XVU_LINK_CTRLSTAT_ASPM_CTRL_DISABLED 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_LINK_CTRLSTAT_ASPM_CTRL_L0S 0x00000001 /* RW--V */ +#define NV_BR04_XVU_LINK_CTRLSTAT_ASPM_CTRL_L1 0x00000002 /* RW--V */ +#define NV_BR04_XVU_LINK_CTRLSTAT_ASPM_CTRL_L0S_L1 0x00000003 /* RW--V */ +#define NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED 19:16 /* R--VF */ +#define NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED_2P5G 0x00000001 /* R---V */ +#define NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED_5P0G 0x00000002 /* R---V */ +#define NV_BR04_XVU_LINK_CTRLSTAT2 0x00000090 /* RW-4R */ +#define NV_BR04_XVU_LINK_CTRLSTAT2_TARGET_LINK_SPEED 3:0 /* RWIVF */ +#define NV_BR04_XVU_LINK_CTRLSTAT2_TARGET_LINK_SPEED_2P5G 0x00000001 /* RW--V */ +#define NV_BR04_XVU_LINK_CTRLSTAT2_TARGET_LINK_SPEED_5P0G 0x00000002 /* RWI-V */ +#define NV_BR04_XVU_G2_PRIV_XP_0 0x00000C00 /* RW-4R */ +#define NV_BR04_XVU_G2_PRIV_XP_0_REPLAY_TIMER_LIMIT 28:19 /* RWIVF */ +#define NV_BR04_XVU_G2_PRIV_XP_0_REPLAY_TIMER_LIMIT_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_G2_PRIV_XP_0_OPPORTUNISTIC_ACK 29:29 /* RWIVF */ +#define NV_BR04_XVU_G2_PRIV_XP_0_OPPORTUNISTIC_ACK_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_G2_PRIV_XP_0_OPPORTUNISTIC_UPDATE_FC 30:30 /* RWIVF */ +#define NV_BR04_XVU_G2_PRIV_XP_0_OPPORTUNISTIC_UPDATE_FC_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 0x00000C44 /* RW-4R */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_SPEED_CHANGE 0:0 /* CWIVF */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_SPEED_CHANGE_ZERO 0x00000000 /* CWI-V */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_CYA_DEEMPHASIS_OVERRIDE 2:2 /* RWIVF */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_CYA_DEEMPHASIS_OVERRIDE_DISABLED 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_CYA_DEEMPHASIS_OVERRIDE_ENABLED 0x00000001 /* RW--V */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_TARGET_LINK_SPEED 7:4 /* RWIVF */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_TARGET_LINK_SPEED_2P5 0x00000001 /* RW--V */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_TARGET_LINK_SPEED_5P0 0x00000002 /* RWI-V */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_DATA_RATE_SUPPORTED 11:8 /* RWIVF */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_DATA_RATE_SUPPORTED_2P5 0x00000001 /* RW--V */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_DATA_RATE_SUPPORTED_5P0_2P5 0x00000002 /* RWI-V */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_DATA_RATE_SUPPORTED_REMOTE 15:12 /* R-IVF */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_DATA_RATE_SUPPORTED_REMOTE_2P5 0x00000001 /* R-I-V */ +#define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_DATA_RATE_SUPPORTED_REMOTE_5P0_2P5 0x00000002 /* R---V */ +#define NV_BR04_XVU_BOOT_1 0x00000204 /* R--4R */ +#define NV_BR04_XVU_BOOT_1_LINK_SPEED 1:1 /* RWIVF */ +#define NV_BR04_XVU_BOOT_1_LINK_SPEED_2500 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_BOOT_1_LINK_SPEED_5000 0x00000001 /* RW--V */ +#define NV_BR04_XVU_CYA_BIT0 0x00000AB0 /* RW-4R */ +#define NV_BR04_XVU_CYA_BIT0_RSVD_28 28:28 /* RWIVF */ +#define NV_BR04_XVU_CYA_BIT0_RSVD_28_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_CYA_BIT0_RSVD_29 29:29 /* RWIVF */ +#define NV_BR04_XVU_CYA_BIT0_RSVD_29_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_CYA_BIT0_RSVD_30 30:30 /* RWIVF */ +#define NV_BR04_XVU_CYA_BIT0_RSVD_30_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_CYA_BIT0_RSVD_31 31:31 /* RWIVF */ +#define NV_BR04_XVU_CYA_BIT0_RSVD_31_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_CYA_NIBBLE0 0x00000AB4 /* RW-4R */ +#define NV_BR04_XVU_CYA_NIBBLE0_RSVD_0 3:0 /* RWIVF */ +#define NV_BR04_XVU_CYA_NIBBLE0_RSVD_0_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_CYA_NIBBLE0_RSVD_4 19:16 /* RWIVF */ +#define NV_BR04_XVU_CYA_NIBBLE0_RSVD_4_INIT 0x00000000 /* RWI-V */ +#define NV_BR04_XVU_ROM_REVISION 0x00000B08 /* RW-4R */ + +#endif // DEV_BR04_XVU_H diff --git a/src/common/inc/swref/published/disp/v03_00/dev_disp.h b/src/common/inc/swref/published/disp/v03_00/dev_disp.h new file mode 100644 index 000000000..138efcbf3 --- /dev/null +++ b/src/common/inc/swref/published/disp/v03_00/dev_disp.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __v03_00_dev_disp_h__ +#define __v03_00_dev_disp_h__ +#define NV_PDISP_CHN_NUM_CORE 0 /* */ +#define NV_PDISP_CHN_NUM_WIN(i) (1+(i)) /* */ +#define NV_PDISP_CHN_NUM_WIN__SIZE_1 32 /* */ +#define NV_PDISP_CHN_NUM_WINIM(i) (33+(i)) /* */ +#define NV_PDISP_CHN_NUM_WINIM__SIZE_1 32 /* */ +#define NV_PDISP_CHN_NUM_CURS(i) (73+(i)) /* */ +#define NV_PDISP_CHN_NUM_CURS__SIZE_1 8 /* */ +#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* R--VF */ +#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* R---V */ +#define NV_PDISP_FE_SW 0x00640FFF:0x00640000 /* RW--D */ +#define NV_PDISP_SF_USER_0 0x006F03FF:0x006F0000 /* RW--D */ +#define NV_UDISP_HASH_BASE 0x00000000 /* */ +#define NV_UDISP_HASH_LIMIT 0x00001FFF /* */ +#define NV_UDISP_OBJ_MEM_BASE 0x00002000 /* */ +#define NV_UDISP_OBJ_MEM_LIMIT 0x0000FFFF /* */ +#define NV_UDISP_HASH_TBL_CLIENT_ID (1*32+13):(1*32+0) /* RWXVF */ +#define NV_UDISP_HASH_TBL_INSTANCE (1*32+24):(1*32+14) /* RWXUF */ +#define NV_UDISP_HASH_TBL_CHN (1*32+31):(1*32+25) /* RWXUF */ +#define NV_DMA_TARGET_NODE (0*32+1):(0*32+0) /* RWXVF */ +#define NV_DMA_TARGET_NODE_PHYSICAL_NVM 0x00000001 /* RW--V */ +#define NV_DMA_TARGET_NODE_PHYSICAL_PCI 0x00000002 /* RW--V */ +#define NV_DMA_TARGET_NODE_PHYSICAL_PCI_COHERENT 0x00000003 /* RW--V */ +#define NV_DMA_ACCESS (0*32+2):(0*32+2) /* RWXVF */ +#define NV_DMA_ACCESS_READ_ONLY 0x00000000 /* RW--V */ +#define NV_DMA_ACCESS_READ_AND_WRITE 0x00000001 /* RW--V */ +#define NV_DMA_KIND (0*32+20):(0*32+20) /* RWXVF */ +#define NV_DMA_KIND_PITCH 0x00000000 /* RW--V */ +#define NV_DMA_KIND_BLOCKLINEAR 0x00000001 /* RW--V */ +#define NV_DMA_ADDRESS_BASE_LO (1*32+31):(1*32+0) /* RWXUF */ +#define NV_DMA_ADDRESS_BASE_HI (2*32+6):(2*32+0) /* RWXUF */ +#define NV_DMA_ADDRESS_LIMIT_LO (3*32+31):(3*32+0) /* RWXUF */ +#define NV_DMA_ADDRESS_LIMIT_HI (4*32+6):(4*32+0) /* RWXUF */ +#define NV_DMA_SIZE 20 /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_CORE 0x00680000 /* */ +#define NV_UDISP_FE_CHN_ARMED_BASEADR_CORE (0x00680000+32768) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_WIN(i) ((0x00690000+(i)*4096)) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_WINIM(i) ((0x00690000+((i+32)*4096))) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR_CURS(i) (0x006D8000+(i)*4096) /* RW-4A */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR(i) ((i)>0?(((0x00690000+(i-1)*4096))):0x00680000) /* */ +#define NV_UDISP_FE_CHN_ASSY_BASEADR__SIZE_1 81 /* */ +#endif // __v03_00_dev_disp_h__ diff --git a/src/common/inc/swref/published/disp/v04_00/dev_disp.h b/src/common/inc/swref/published/disp/v04_00/dev_disp.h new file mode 100644 index 000000000..1b03f75c9 --- /dev/null +++ b/src/common/inc/swref/published/disp/v04_00/dev_disp.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __v04_00_dev_disp_h__ +#define __v04_00_dev_disp_h__ + +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING(i) (0x00611800+(i)*4) /* RW-4A */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_LAST_DATA 1:1 /* RWIVF */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_LAST_DATA_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_LAST_DATA_PENDING 0x00000001 /* R---V */ +#define NV_PDISP_FE_EVT_STAT_HEAD_TIMING_LAST_DATA_RESET 0x00000001 /* -W--V */ +#define NV_PDISP_FE_RM_INTR_DISPATCH 0x00611EC0 /* R--4R */ +#define NV_PDISP_FE_RM_INTR_DISPATCH_HEAD_TIMING(i) (0+(i)):(0+(i)) /* R--VF */ +#define NV_PDISP_FE_RM_INTR_DISPATCH_HEAD_TIMING_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PDISP_FE_RM_INTR_DISPATCH_HEAD_TIMING_PENDING 0x00000001 /* R---V */ +#define NV_PDISP_VGA_WORKSPACE_BASE 0x00625F04 /* RW-4R */ +#define NV_PDISP_VGA_WORKSPACE_BASE_STATUS 3:3 /* RWIVF */ +#define NV_PDISP_VGA_WORKSPACE_BASE_STATUS_VALID 0x00000001 /* RW--V */ +#define NV_PDISP_VGA_WORKSPACE_BASE_ADDR 31:8 /* RWIVF */ + +#endif // __v04_00_dev_disp_h__ diff --git a/src/common/inc/swref/published/kepler/gk104/dev_timer.h b/src/common/inc/swref/published/kepler/gk104/dev_timer.h new file mode 100644 index 000000000..b1ab3295b --- /dev/null +++ b/src/common/inc/swref/published/kepler/gk104/dev_timer.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gk104_dev_timer_h__ +#define __gk104_dev_timer_h__ +#define NV_PTIMER_INTR_0 0x00009100 /* RW-4R */ +#define NV_PTIMER_INTR_0_ALARM 0:0 /* RWXVF */ +#define NV_PTIMER_INTR_0_ALARM_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PTIMER_INTR_0_ALARM_PENDING 0x00000001 /* R---V */ +#define NV_PTIMER_INTR_0_ALARM_RESET 0x00000001 /* -W--V */ +#define NV_PTIMER_INTR_EN_0 0x00009140 /* RW-4R */ +#define NV_PTIMER_INTR_EN_0_ALARM 0:0 /* RWIVF */ +#define NV_PTIMER_INTR_EN_0_ALARM_DISABLED 0x00000000 /* RWI-V */ +#define NV_PTIMER_INTR_EN_0_ALARM_ENABLED 0x00000001 /* RW--V */ +#define NV_PTIMER_TIME_0 0x00009400 /* RW-4R */ +#define NV_PTIMER_TIME_0_NSEC 31:5 /* RWXUF */ +#define NV_PTIMER_TIME_1 0x00009410 /* RW-4R */ +#define NV_PTIMER_TIME_1_NSEC 28:0 /* RWXUF */ +#define NV_PTIMER_ALARM_0 0x00009420 /* RW-4R */ +#define NV_PTIMER_ALARM_0_NSEC 31:5 /* RWXUF */ +#endif // __gk104_dev_timer_h__ diff --git a/src/common/inc/swref/published/maxwell/gm107/dev_boot.h b/src/common/inc/swref/published/maxwell/gm107/dev_boot.h new file mode 100644 index 000000000..c5a275b9b --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm107/dev_boot.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm107_dev_boot_h__ +#define __gm107_dev_boot_h__ +#define NV_PMC 0x00000FFF:0x00000000 /* RW--D */ +#define NV_PMC_BOOT_0 0x00000000 /* R--4R */ +#define NV_PMC_INTR(i) (0x00000100+(i)*4) /* RW-4A */ +#define NV_PMC_INTR__SIZE_1 3 /* */ +#define NV_PMC_INTR_DEVICE(i) (i):(i) /* */ +#define NV_PMC_INTR_DEVICE__SIZE_1 31 /* */ +#define NV_PMC_INTR_DEVICE_NOT_PENDING 0x00000000 /* */ +#define NV_PMC_INTR_DEVICE_PENDING 0x00000001 /* */ +#define NV_PMC_INTR_EN(i) (0x00000140+(i)*4) /* RW-4A */ +#define NV_PMC_INTR_EN__SIZE_1 3 /* */ +#define NV_PMC_INTR_0 0x00000100 /* */ +#define NV_PMC_INTR_1 0x00000104 /* */ +#define NV_PMC_ENABLE 0x00000200 /* RW-4R */ +#define NV_PMC_ENABLE_DEVICE(i) (i):(i) /* */ +#define NV_PMC_ENABLE_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_ENABLE_DEVICE_DISABLE 0x00000000 /* */ +#define NV_PMC_ENABLE_DEVICE_ENABLE 0x00000001 /* */ +#endif // __gm107_dev_boot_h__ diff --git a/src/common/inc/swref/published/maxwell/gm107/dev_bus.h b/src/common/inc/swref/published/maxwell/gm107/dev_bus.h new file mode 100644 index 000000000..b9a94bd99 --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm107/dev_bus.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm107_dev_bus_h__ +#define __gm107_dev_bus_h__ +#define NV_PBUS_INTR_0 0x00001100 /* RW-4R */ +#define NV_PBUS_INTR_0_PRI_SQUASH 1:1 /* RWIVF */ +#define NV_PBUS_INTR_0_PRI_SQUASH_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PBUS_INTR_0_PRI_SQUASH_PENDING 0x00000001 /* R---V */ +#define NV_PBUS_INTR_0_PRI_SQUASH_RESET 0x00000001 /* -W--C */ +#define NV_PBUS_INTR_0_PRI_FECSERR 2:2 /* RWIVF */ +#define NV_PBUS_INTR_0_PRI_FECSERR_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PBUS_INTR_0_PRI_FECSERR_PENDING 0x00000001 /* R---V */ +#define NV_PBUS_INTR_0_PRI_FECSERR_RESET 0x00000001 /* -W--C */ +#define NV_PBUS_INTR_0_PRI_TIMEOUT 3:3 /* RWIVF */ +#define NV_PBUS_INTR_0_PRI_TIMEOUT_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PBUS_INTR_0_PRI_TIMEOUT_PENDING 0x00000001 /* R---V */ +#define NV_PBUS_INTR_0_PRI_TIMEOUT_RESET 0x00000001 /* -W--C */ +#define NV_PBUS_INTR_0_FB_ACK_TIMEOUT 5:5 /* RWIVF */ +#define NV_PBUS_INTR_0_FB_ACK_TIMEOUT_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PBUS_INTR_0_FB_ACK_TIMEOUT_PENDING 0x00000001 /* R---V */ +#define NV_PBUS_INTR_0_FB_ACK_TIMEOUT_RESET 0x00000001 /* -W--C */ +#define NV_PBUS_BAR0_WINDOW 0x00001700 /* RW-4R */ +#define NV_PBUS_BAR0_WINDOW_BASE 23:0 /* RWIUF */ +#define NV_PBUS_BAR0_WINDOW_BASE_0 0x00000000 /* RWI-V */ +#define NV_PBUS_BAR0_WINDOW_TARGET 25:24 /* RWIUF */ +#define NV_PBUS_BAR0_WINDOW_TARGET_VID_MEM 0x00000000 /* RWI-V */ +#define NV_PBUS_BAR0_WINDOW_TARGET_SYS_MEM_COHERENT 0x00000002 /* RW--V */ +#define NV_PBUS_BAR0_WINDOW_TARGET_SYS_MEM_NONCOHERENT 0x00000003 /* RW--V */ +#define NV_PBUS_BAR0_WINDOW_BASE_SHIFT 16 /* */ +#define NV_PBUS_BAR2_BLOCK 0x00001714 /* RW-4R */ +#define NV_PBUS_BAR2_BLOCK_MAP 29:0 /* RWXUF */ +#define NV_PBUS_BAR2_BLOCK_PTR 27:0 /* RWIUF */ +#define NV_PBUS_BAR2_BLOCK_PTR_0 0x00000000 /* RWI-V */ +#define NV_PBUS_BAR2_BLOCK_TARGET 29:28 /* RWIUF */ +#define NV_PBUS_BAR2_BLOCK_TARGET_VID_MEM 0x00000000 /* RWI-V */ +#define NV_PBUS_BAR2_BLOCK_TARGET_SYS_MEM_COHERENT 0x00000002 /* RW--V */ +#define NV_PBUS_BAR2_BLOCK_TARGET_SYS_MEM_NONCOHERENT 0x00000003 /* RW--V */ +#define NV_PBUS_BAR2_BLOCK_DEBUG_CYA 30:30 /* RWIUF */ +#define NV_PBUS_BAR2_BLOCK_DEBUG_CYA_OFF 0x00000001 /* RW--V */ +#define NV_PBUS_BAR2_BLOCK_DEBUG_CYA_ON 0x00000000 /* RW--V */ +#define NV_PBUS_BAR2_BLOCK_DEBUG_CYA_INIT 0x00000001 /* RWI-V */ +#define NV_PBUS_BAR2_BLOCK_MODE 31:31 /* RWIUF */ +#define NV_PBUS_BAR2_BLOCK_MODE_PHYSICAL 0x00000000 /* RWI-V */ +#define NV_PBUS_BAR2_BLOCK_MODE_VIRTUAL 0x00000001 /* RW--V */ +#define NV_PBUS_BAR2_BLOCK_PTR_SHIFT 12 /* */ +#endif // __gm107_dev_bus_h__ diff --git a/src/common/inc/swref/published/maxwell/gm107/dev_fb.h b/src/common/inc/swref/published/maxwell/gm107/dev_fb.h new file mode 100644 index 000000000..2b07f9310 --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm107/dev_fb.h @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm107_dev_fb_h__ +#define __gm107_dev_fb_h__ +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR 0x00100C10 /* RW-4R */ +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT 8 /* */ +#define NV_PFB_NISO_FLUSH_SYSMEM_ADDR_ADR_39_08 31:0 /* RWIVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_PDB 0x00100CB8 /* RW-4R */ +#define NV_PFB_PRI_MMU_INVALIDATE_PDB_APERTURE 1:1 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_PDB_APERTURE_SYS_MEM 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_PDB_ADDR 31:4 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_PDB_ADDR_ALIGNMENT 0x0000000c /* */ +#define NV_PFB_PRI_MMU_INVALIDATE 0x00100CBC /* RW-4R */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_VA 0:0 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_VA_FALSE 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_VA_TRUE 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_PDB 1:1 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_PDB_FALSE 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_ALL_PDB_TRUE 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_HUBTLB_ONLY 2:2 /* RWXVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_HUBTLB_ONLY_FALSE 0x00000000 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_HUBTLB_ONLY_TRUE 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_INVALIDATE_TRIGGER 31:31 /* -WEVF */ +#define NV_PFB_PRI_MMU_INVALIDATE_TRIGGER_FALSE 0x00000000 /* -WE-V */ +#define NV_PFB_PRI_MMU_INVALIDATE_TRIGGER_TRUE 0x00000001 /* -W--T */ +#endif // __gm107_dev_fb_h__ diff --git a/src/common/inc/swref/published/maxwell/gm107/dev_flush.h b/src/common/inc/swref/published/maxwell/gm107/dev_flush.h new file mode 100644 index 000000000..19b8b8aeb --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm107/dev_flush.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm107_dev_flush_h__ +#define __gm107_dev_flush_h__ +#define NV_UFLUSH_L2_SYSMEM_INVALIDATE 0x00070004 /* RW-4R */ +#define NV_UFLUSH_L2_PEERMEM_INVALIDATE 0x00070008 /* RW-4R */ +#define NV_UFLUSH_L2_CLEAN_COMPTAGS 0x0007000c /* RW-4R */ +#define NV_UFLUSH_L2_FLUSH_DIRTY 0x00070010 /* RW-4R */ +#define NV_UFLUSH_FB_FLUSH 0x00070000 /* RW-4R */ +#endif // __gm107_dev_flush_h__ diff --git a/src/common/inc/swref/published/maxwell/gm107/dev_mmu.h b/src/common/inc/swref/published/maxwell/gm107/dev_mmu.h new file mode 100644 index 000000000..37f75258c --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm107/dev_mmu.h @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm107_dev_mmu_h__ +#define __gm107_dev_mmu_h__ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+28):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_16BX2 0xfe /* R---V */ +#define NV_MMU_PTE_KIND_C32_2CRA 0xdb /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CBR 0xe0 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2CBA 0xe1 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_2BRA 0xe3 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS8_MS16_2CRA 0xe5 /* R---V */ +#define NV_MMU_PTE_KIND_C64_2CRA 0xe9 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CBR 0xee /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2CBA 0xef /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_2BRA 0xf1 /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS8_MS16_2CRA 0xf3 /* R---V */ +#define NV_MMU_PTE_KIND_C128_2CR 0xf5 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS2_2CR 0xf7 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS4_2CR 0xf9 /* R---V */ +#define NV_MMU_PTE_KIND_C128_MS8_MS16_2CR 0xfb /* R---V */ +#define NV_MMU_PTE_KIND_SMHOST_MESSAGE 0xcb /* R---V */ +#endif // __gm107_dev_mmu_h__ diff --git a/src/common/inc/swref/published/maxwell/gm107/dev_nv_xve.h b/src/common/inc/swref/published/maxwell/gm107/dev_nv_xve.h new file mode 100644 index 000000000..dc23abd45 --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm107/dev_nv_xve.h @@ -0,0 +1,150 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm107_dev_nv_xve_h__ +#define __gm107_dev_nv_xve_h__ +#define NV_PCFG 0x00088FFF:0x00088000 /* RW--D */ +#define NV_XVE_ID 0x00000000 /* R--4R */ +#define NV_XVE_ID_VENDOR 15:0 /* C--VF */ +#define NV_XVE_ID_VENDOR_NVIDIA 0x000010DE /* C---V */ +#define NV_XVE_DEV_CTRL 0x00000004 /* RW-4R */ +#define NV_XVE_DEV_CTRL_CMD_IO_SPACE 0:0 /* RWIVF */ +#define NV_XVE_DEV_CTRL_CMD_IO_SPACE_DISABLED 0x00000000 /* RWI-V */ +#define NV_XVE_DEV_CTRL_CMD_IO_SPACE_ENABLED 0x00000001 /* RW--V */ +#define NV_XVE_REV_ID 0x00000008 /* R--4R */ +#define NV_XVE_REV_ID_CLASS_CODE 31:8 /* R-CVF */ +#define NV_XVE_REV_ID_CLASS_CODE_3D 0x00030200 /* ----V */ +#define NV_XVE_SUBSYSTEM 0x0000002C /* R--4R */ +#define NV_XVE_MSI_CTRL 0x00000068 /* RW-4R */ +#define NV_XVE_MSI_CTRL_MSI 16:16 /* RWIVF */ +#define NV_XVE_MSI_CTRL_MSI_DISABLE 0x00000000 /* RWI-V */ +#define NV_XVE_MSI_CTRL_MSI_ENABLE 0x00000001 /* RW--V */ +#define NV_XVE_DEVICE_CONTROL_STATUS 0x00000080 /* RWI4R */ +#define NV_XVE_DEVICE_CONTROL_STATUS_CORR_ERROR_REPORTING_ENABLE 0:0 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_CORR_ERROR_REPORTING_ENABLE_INIT 0x00000000 /* RWI-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_NON_FATAL_ERROR_REPORTING_ENABLE 1:1 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_NON_FATAL_ERROR_REPORTING_ENABLE_INIT 0x00000000 /* RWI-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_FATAL_ERROR_REPORTING_ENABLE 2:2 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_FATAL_ERROR_REPORTING_ENABLE_INIT 0x00000000 /* RWI-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_UNSUPP_REQ_REPORTING_ENABLE 3:3 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_UNSUPP_REQ_REPORTING_ENABLE_INIT 0x00000000 /* RWI-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_ENABLE_RELAXED_ORDERING 4:4 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_ENABLE_RELAXED_ORDERING_INIT 0x00000001 /* RWI-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_MAX_PAYLOAD_SIZE 7:5 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_MAX_PAYLOAD_SIZE_INIT 0x00000000 /* RWI-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_MAX_PAYLOAD_SIZE_128B 0x00000000 /* R---V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_MAX_PAYLOAD_SIZE_256B 0x00000001 /* R---V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_MAX_PAYLOAD_SIZE_512B 0x00000002 /* R---V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_MAX_PAYLOAD_SIZE_1024B 0x00000003 /* R---V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_MAX_PAYLOAD_SIZE_2048B 0x00000004 /* R---V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_MAX_PAYLOAD_SIZE_4096B 0x00000005 /* R---V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_EXTENDED_TAG_FIELD_ENABLE 8:8 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_EXTENDED_TAG_FIELD_ENABLE_INIT 0x00000001 /* RWI-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_EXTENDED_TAG_FIELD_ENABLE_ENABLED 0x00000001 /* RW--V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_PHANTOM_FUNCTIONS_ENABLE 9:9 /* R-IVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_PHANTOM_FUNCTIONS_ENABLE_INIT 0x00000000 /* R-I-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_AUXILLARY_POWER_PM_ENABLE 10:10 /* R-IVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_AUXILLARY_POWER_PM_ENABLE_INIT 0x00000000 /* R-I-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_ENABLE_NO_SNOOP 11:11 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_ENABLE_NO_SNOOP_INIT 0x00000001 /* RWI-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_MAX_READ_REQUEST_SIZE 14:12 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_MAX_READ_REQUEST_SIZE_INIT 0x00000002 /* RWI-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_RSVD 15:15 /* C--VF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_RSVD_INIT 0x00000000 /* C---V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_CORR_ERROR_DETECTED 16:16 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_CORR_ERROR_DETECTED_INIT 0x00000000 /* R-I-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_CORR_ERROR_DETECTED_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_DEVICE_CONTROL_STATUS_NON_FATAL_ERROR_DETECTED 17:17 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_NON_FATAL_ERROR_DETECTED_INIT 0x00000000 /* R-I-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_NON_FATAL_ERROR_DETECTED_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_DEVICE_CONTROL_STATUS_FATAL_ERROR_DETECTED 18:18 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_FATAL_ERROR_DETECTED_INIT 0x00000000 /* R-I-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_FATAL_ERROR_DETECTED_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_DEVICE_CONTROL_STATUS_UNSUPP_REQUEST_DETECTED 19:19 /* RWIVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_UNSUPP_REQUEST_DETECTED_INIT 0x00000000 /* R-I-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_UNSUPP_REQUEST_DETECTED_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_DEVICE_CONTROL_STATUS_AUX_POWER_DETECTED 20:20 /* R-IVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_AUX_POWER_DETECTED_INIT 0x00000000 /* R-I-V */ +#define NV_XVE_DEVICE_CONTROL_STATUS_TRANSACTIONS_PENDING 21:21 /* R-IVF */ +#define NV_XVE_DEVICE_CONTROL_STATUS_TRANSACTIONS_PENDING_INIT 0x00000000 /* R-I-V */ +#define NV_XVE_LINK_CAPABILITIES 0x00000084 /* R--4R */ +#define NV_XVE_LINK_CONTROL_STATUS 0x00000088 /* RWI4R */ +#define NV_XVE_VCCAP_HDR 0x00000100 /* R--4R */ +#define NV_XVE_VCCAP_HDR_ID 15:0 /* C--VF */ +#define NV_XVE_VCCAP_HDR_ID_VC 0x00000002 /* C---V */ +#define NV_XVE_VCCAP_HDR_VER 19:16 /* C--VF */ +#define NV_XVE_VCCAP_HDR_VER_1 0x00000001 /* C---V */ +#define NV_XVE_VCCAP_CTRL0 0x00000114 /* RW-4R */ +#define NV_XVE_VCCAP_CTRL0_MAP 7:1 /* RWIVF */ +#define NV_XVE_AER_UNCORR_ERR 0x00000424 /* RWC4R */ +#define NV_XVE_AER_UNCORR_ERR_DLINK_PROTO_ERR 4:4 /* RWCVF */ +#define NV_XVE_AER_UNCORR_ERR_DLINK_PROTO_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_UNCORR_ERR_DLINK_PROTO_ERR_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_UNCORR_ERR_DLINK_PROTO_ERR_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_UNCORR_ERR_POISONED_TLP 12:12 /* RWCVF */ +#define NV_XVE_AER_UNCORR_ERR_POISONED_TLP_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_UNCORR_ERR_POISONED_TLP_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_UNCORR_ERR_POISONED_TLP_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_UNCORR_ERR_CPL_TIMEOUT 14:14 /* RWCVF */ +#define NV_XVE_AER_UNCORR_ERR_CPL_TIMEOUT_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_UNCORR_ERR_CPL_TIMEOUT_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_UNCORR_ERR_CPL_TIMEOUT_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_UNCORR_ERR_UNEXP_CPL 16:16 /* RWCVF */ +#define NV_XVE_AER_UNCORR_ERR_UNEXP_CPL_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_UNCORR_ERR_UNEXP_CPL_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_UNCORR_ERR_UNEXP_CPL_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_UNCORR_ERR_MALFORMED_TLP 18:18 /* RWCVF */ +#define NV_XVE_AER_UNCORR_ERR_MALFORMED_TLP_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_UNCORR_ERR_MALFORMED_TLP_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_UNCORR_ERR_MALFORMED_TLP_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_UNCORR_ERR_UNSUPPORTED_REQ 20:20 /* RWCVF */ +#define NV_XVE_AER_UNCORR_ERR_UNSUPPORTED_REQ_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_UNCORR_ERR_UNSUPPORTED_REQ_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_UNCORR_ERR_UNSUPPORTED_REQ_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_CORR_ERR 0x00000430 /* RW-4R */ +#define NV_XVE_AER_CORR_ERR_RCV_ERR 0:0 /* RWCVF */ +#define NV_XVE_AER_CORR_ERR_RCV_ERR_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_CORR_ERR_RCV_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_CORR_ERR_RCV_ERR_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_CORR_ERR_BAD_TLP 6:6 /* RWCVF */ +#define NV_XVE_AER_CORR_ERR_BAD_TLP_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_CORR_ERR_BAD_TLP_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_CORR_ERR_BAD_TLP_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_CORR_ERR_BAD_DLLP 7:7 /* RWCVF */ +#define NV_XVE_AER_CORR_ERR_BAD_DLLP_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_CORR_ERR_BAD_DLLP_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_CORR_ERR_BAD_DLLP_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_CORR_ERR_RPLY_ROLLOVER 8:8 /* RWCVF */ +#define NV_XVE_AER_CORR_ERR_RPLY_ROLLOVER_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_CORR_ERR_RPLY_ROLLOVER_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_CORR_ERR_RPLY_ROLLOVER_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_CORR_ERR_RPLY_TIMEOUT 12:12 /* RWCVF */ +#define NV_XVE_AER_CORR_ERR_RPLY_TIMEOUT_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_CORR_ERR_RPLY_TIMEOUT_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_CORR_ERR_RPLY_TIMEOUT_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_AER_CORR_ERR_ADVISORY_NONFATAL 13:13 /* RWCVF */ +#define NV_XVE_AER_CORR_ERR_ADVISORY_NONFATAL_NOT_ACTIVE 0x00000000 /* R-C-V */ +#define NV_XVE_AER_CORR_ERR_ADVISORY_NONFATAL_ACTIVE 0x00000001 /* R---V */ +#define NV_XVE_AER_CORR_ERR_ADVISORY_NONFATAL_CLEAR 0x00000001 /* -W--C */ +#define NV_XVE_CYA_2 0x00000704 /* RW-4R */ +#endif // __gm107_dev_nv_xve_h__ diff --git a/src/common/inc/swref/published/maxwell/gm107/dev_nv_xve1.h b/src/common/inc/swref/published/maxwell/gm107/dev_nv_xve1.h new file mode 100644 index 000000000..1a135c0d7 --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm107/dev_nv_xve1.h @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm107_dev_nv_xve1_h__ +#define __gm107_dev_nv_xve1_h__ +#define NV_PCFG1 0x0008AFFF:0x0008A000 /* RW--D */ +#endif // __gm107_dev_nv_xve1_h__ diff --git a/src/common/inc/swref/published/maxwell/gm107/dev_ram.h b/src/common/inc/swref/published/maxwell/gm107/dev_ram.h new file mode 100644 index 000000000..b12c44e08 --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm107/dev_ram.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm107_dev_ram_h__ +#define __gm107_dev_ram_h__ +#define NV_PRAMIN 0x007FFFFF:0x00700000 /* RW--M */ +#define NV_PRAMIN_DATA032(i) (0x00700000+(i)*4) /* RW-4A */ +#define NV_PRAMIN_DATA032__SIZE_1 524288 /* */ +#define NV_PRAMIN_DATA032_VALUE 31:0 /* RWXUF */ +#define NV_PRAMIN_DATA016(i) (0x00700000+((i)/3)*4+((i)%3)) /* RW-2A */ +#define NV_PRAMIN_DATA016__SIZE_1 1572864 /* */ +#define NV_PRAMIN_DATA016_VALUE 15:0 /* RWXUF */ +#define NV_PRAMIN_DATA008(i) (0x00700000+(i)) /* RW-1A */ +#define NV_PRAMIN_DATA008__SIZE_1 2097152 /* */ +#define NV_PRAMIN_DATA008_VALUE 7:0 /* RWXUF */ +#define NV_RAMIN_BASE_SHIFT 12 /* */ +#define NV_RAMIN_ALLOC_SIZE 4096 /* */ +#define NV_RAMIN_RAMFC (127*32+31):(0*32+0) /* RWXUF */ +#define NV_RAMUSERD_PUT (16*32+31):(16*32+0) /* RWXUF */ +#define NV_RAMUSERD_GET (17*32+31):(17*32+0) /* RWXUF */ +#define NV_RAMUSERD_REF (18*32+31):(18*32+0) /* RWXUF */ +#define NV_RAMUSERD_PUT_HI (19*32+31):(19*32+0) /* RWXUF */ +#define NV_RAMUSERD_REF_THRESHOLD (20*32+31):(20*32+0) /* */ +#define NV_RAMUSERD_TOP_LEVEL_GET (22*32+31):(22*32+0) /* RWXUF */ +#define NV_RAMUSERD_TOP_LEVEL_GET_HI (23*32+31):(23*32+0) /* RWXUF */ +#define NV_RAMUSERD_GET_HI (24*32+31):(24*32+0) /* RWXUF */ +#define NV_RAMUSERD_GP_GET (34*32+31):(34*32+0) /* RWXUF */ +#define NV_RAMUSERD_GP_PUT (35*32+31):(35*32+0) /* RWXUF */ +#define NV_RAMUSERD_BASE_SHIFT 9 /* */ +#define NV_RAMUSERD_GP_TOP_LEVEL_GET (22*32+31):(22*32+0) /* RWXUF */ +#define NV_RAMUSERD_GP_TOP_LEVEL_GET_HI (23*32+31):(23*32+0) /* RWXUF */ +#define NV_RAMRL_BASE_SHIFT 12 /* */ +#define NV_RAMRL_ENTRY_TIMESLICE_SCALE_3 0x00000003 /* RWI-V */ +#define NV_RAMRL_ENTRY_TIMESLICE_TIMEOUT_128 0x00000080 /* RWI-V */ +#define NV_RAMRL_ENTRY_SIZE 8 /* */ +#define NV_RAMRL_ENTRY_TSG_LENGTH_MAX 0x00000020 /* RW--V */ +#endif // __gm107_dev_ram_h__ diff --git a/src/common/inc/swref/published/maxwell/gm107/dev_timer.h b/src/common/inc/swref/published/maxwell/gm107/dev_timer.h new file mode 100644 index 000000000..cdc096bc3 --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm107/dev_timer.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm107_dev_timer_h__ +#define __gm107_dev_timer_h__ +#define NV_PTIMER_TIME_0 0x00009400 /* RW-4R */ +#define NV_PTIMER_TIME_1 0x00009410 /* RW-4R */ +#define NV_PTIMER_TIMER_CFG0 0x00009300 /* RW-4R */ +#define NV_PTIMER_TIMER_CFG0_DEN 4:0 /* RWIUF */ +#define NV_PTIMER_TIMER_CFG0_DEN_108MHZ_REF 0x1b /* RWI-V */ +#define NV_PTIMER_TIMER_CFG0_NUM 18:16 /* RWIUF */ +#define NV_PTIMER_TIMER_CFG0_NUM_108MHZ_REF 0x7 /* RWI-V */ +#define NV_PTIMER_TIMER_CFG1 0x00009304 /* RW-4R */ +#define NV_PTIMER_TIMER_CFG1_INTEGER 5:0 /* RWIUF */ +#define NV_PTIMER_TIMER_CFG1_INTEGER_108MHZ_REF 0x000009 /* RWI-V */ +#define NV_PTIMER_GR_TICK_FREQ 0x00009480 /* RW-4R */ +#define NV_PTIMER_GR_TICK_FREQ_SELECT 2:0 /* RWIUF */ +#define NV_PTIMER_GR_TICK_FREQ_SELECT_MAX 0x00000000 /* RW--V */ +#define NV_PTIMER_GR_TICK_FREQ_SELECT_DEFAULT 0x00000005 /* RWI-V */ +#define NV_PTIMER_GR_TICK_FREQ_SELECT_MIN 0x00000007 /* RW--V */ +#define NV_PTIMER_TIME_0_NSEC 31:5 /* RWXUF */ +#define NV_PTIMER_TIME_1_NSEC 28:0 /* RWXUF */ + +#endif // __gm107_dev_timer_h__ diff --git a/src/common/inc/swref/published/maxwell/gm200/dev_flush.h b/src/common/inc/swref/published/maxwell/gm200/dev_flush.h new file mode 100644 index 000000000..2330597b1 --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm200/dev_flush.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm200_dev_flush_h__ +#define __gm200_dev_flush_h__ +#define NV_UFLUSH_L2_SYSMEM_INVALIDATE 0x00070004 /* RW-4R */ +#define NV_UFLUSH_L2_SYSMEM_INVALIDATE_PENDING 0:0 /* RWIUF */ +#define NV_UFLUSH_L2_SYSMEM_INVALIDATE_PENDING_EMPTY 0x00000000 /* R-I-V */ +#define NV_UFLUSH_L2_SYSMEM_INVALIDATE_PENDING_BUSY 0x00000001 /* R---V */ +#define NV_UFLUSH_L2_SYSMEM_INVALIDATE_OUTSTANDING 1:1 /* R-IUF */ +#define NV_UFLUSH_L2_SYSMEM_INVALIDATE_OUTSTANDING_FALSE 0x00000000 /* R-I-V */ +#define NV_UFLUSH_L2_SYSMEM_INVALIDATE_OUTSTANDING_TRUE 0x00000001 /* R---V */ +#define NV_UFLUSH_L2_PEERMEM_INVALIDATE 0x00070008 /* RW-4R */ +#define NV_UFLUSH_L2_PEERMEM_INVALIDATE_PENDING 0:0 /* RWIUF */ +#define NV_UFLUSH_L2_PEERMEM_INVALIDATE_PENDING_EMPTY 0x00000000 /* R-I-V */ +#define NV_UFLUSH_L2_PEERMEM_INVALIDATE_PENDING_BUSY 0x00000001 /* R---V */ +#define NV_UFLUSH_L2_PEERMEM_INVALIDATE_OUTSTANDING 1:1 /* R-IUF */ +#define NV_UFLUSH_L2_PEERMEM_INVALIDATE_OUTSTANDING_FALSE 0x00000000 /* R-I-V */ +#define NV_UFLUSH_L2_PEERMEM_INVALIDATE_OUTSTANDING_TRUE 0x00000001 /* R---V */ +#define NV_UFLUSH_L2_CLEAN_COMPTAGS 0x0007000c /* RW-4R */ +#define NV_UFLUSH_L2_CLEAN_COMPTAGS_PENDING 0:0 /* RWIUF */ +#define NV_UFLUSH_L2_CLEAN_COMPTAGS_PENDING_EMPTY 0x00000000 /* R-I-V */ +#define NV_UFLUSH_L2_CLEAN_COMPTAGS_PENDING_BUSY 0x00000001 /* R---V */ +#define NV_UFLUSH_L2_CLEAN_COMPTAGS_OUTSTANDING 1:1 /* R-IUF */ +#define NV_UFLUSH_L2_CLEAN_COMPTAGS_OUTSTANDING_FALSE 0x00000000 /* R-I-V */ +#define NV_UFLUSH_L2_CLEAN_COMPTAGS_OUTSTANDING_TRUE 0x00000001 /* R---V */ +#define NV_UFLUSH_L2_FLUSH_DIRTY 0x00070010 /* RW-4R */ +#define NV_UFLUSH_L2_FLUSH_DIRTY_PENDING 0:0 /* RWIUF */ +#define NV_UFLUSH_L2_FLUSH_DIRTY_PENDING_EMPTY 0x00000000 /* R-I-V */ +#define NV_UFLUSH_L2_FLUSH_DIRTY_PENDING_BUSY 0x00000001 /* R---V */ +#define NV_UFLUSH_L2_FLUSH_DIRTY_OUTSTANDING 1:1 /* R-IUF */ +#define NV_UFLUSH_L2_FLUSH_DIRTY_OUTSTANDING_FALSE 0x00000000 /* R-I-V */ +#define NV_UFLUSH_L2_FLUSH_DIRTY_OUTSTANDING_TRUE 0x00000001 /* R---V */ +#endif // __gm200_dev_flush_h__ diff --git a/src/common/inc/swref/published/maxwell/gm200/dev_nv_p2p.h b/src/common/inc/swref/published/maxwell/gm200/dev_nv_p2p.h new file mode 100644 index 000000000..09e188338 --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm200/dev_nv_p2p.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm200_dev_nv_p2p_h__ +#define __gm200_dev_nv_p2p_h__ +#define NV_P2P 0x0013AFFF:0x00139000 /* RW--D */ +#define NV_P2P_WMBOX_ADDR_ADDR 18:1 /* RWIUF */ +#define NV_P2P_WREQMB_L(i) (0x00139068+(i)*64) /* R--4A */ +#define NV_P2P_WREQMB_L__SIZE_1 8 /* */ +#define NV_P2P_WREQMB_L_PAGE_ADDR 20:0 /* R-IUF */ +#define NV_P2P_WREQMB_L_PAGE_ADDR_INIT 0x00000000 /* R-I-V */ +#define NV_P2P_WREQMB_H(i) (0x0013906c+(i)*64) /* R--4A */ +#define NV_P2P_WREQMB_H__SIZE_1 8 /* */ +#define NV_P2P_WREQMB_H_KIND 7:0 /* R-IUF */ +#define NV_P2P_WREQMB_H_KIND_INIT 0x00000000 /* R-I-V */ +#define NV_P2P_WREQMB_H_COMPTAGLINE 24:8 /* R-IUF */ +#define NV_P2P_WREQMB_H_COMPTAGLINE_INIT 0x00000000 /* R-I-V */ +#define NV_P2P_WREQMB_H_PAGE_SIZE 25:25 /* R-IUF */ +#define NV_P2P_WREQMB_H_PAGE_SIZE_INIT 0x00000000 /* R-I-V */ +#endif // __gm200_dev_nv_p2p_h__ diff --git a/src/common/inc/swref/published/maxwell/gm200/dev_timer.h b/src/common/inc/swref/published/maxwell/gm200/dev_timer.h new file mode 100644 index 000000000..6b773fb20 --- /dev/null +++ b/src/common/inc/swref/published/maxwell/gm200/dev_timer.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gm200_dev_timer_h__ +#define __gm200_dev_timer_h__ +#define NV_PTIMER_INTR_0 0x00009100 /* RW-4R */ +#define NV_PTIMER_INTR_0_TIMER 1:1 /* RWXVF */ +#define NV_PTIMER_INTR_0_TIMER_NOT_PENDING 0x00000000 /* R---V */ +#define NV_PTIMER_INTR_0_TIMER_PENDING 0x00000001 /* R---V */ +#define NV_PTIMER_INTR_0_TIMER_RESET 0x00000001 /* -W--C */ +#define NV_PTIMER_INTR_EN_0 0x00009140 /* RW-4R */ +#define NV_PTIMER_INTR_EN_0_TIMER 1:1 /* RWIVF */ +#define NV_PTIMER_INTR_EN_0_TIMER_DISABLED 0x00000000 /* RWI-V */ +#define NV_PTIMER_INTR_EN_0_TIMER_ENABLED 0x00000001 /* RW--V */ +#define NV_PTIMER_TIMER_0 0x00009428 /* RW-4R */ +#endif // __gm200_dev_timer_h__ diff --git a/src/common/inc/swref/published/nv_arch.h b/src/common/inc/swref/published/nv_arch.h new file mode 100644 index 000000000..99db0eee6 --- /dev/null +++ b/src/common/inc/swref/published/nv_arch.h @@ -0,0 +1,103 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_ARCH_PUBLISHED_H +#define NV_ARCH_PUBLISHED_H + +// high byte indicates GPU-SERIES, as defined in Gpus.pm. +#define NVGPU_ARCHITECTURE_SERIES 31:24 +#define NVGPU_ARCHITECTURE_SERIES_CLASSIC 0x00 +#define NVGPU_ARCHITECTURE_SERIES_SIMULATION 0x00 // XXX - really should be distinct from CLASSIC_GPUS +#define NVGPU_ARCHITECTURE_SERIES_TEGRA 0xE0 +#define NVGPU_ARCHITECTURE_ARCH 23:0 + +#define GPU_ARCHITECTURE(series, arch) (DRF_DEF(GPU, _ARCHITECTURE, _SERIES, series) | \ + DRF_NUM(GPU, _ARCHITECTURE, _ARCH, arch)) + +// +// Architecture constants. +// +#define GPU_ARCHITECTURE_MAXWELL GPU_ARCHITECTURE(_CLASSIC, 0x0110) +#define GPU_ARCHITECTURE_MAXWELL2 GPU_ARCHITECTURE(_CLASSIC, 0x0120) +#define GPU_ARCHITECTURE_PASCAL GPU_ARCHITECTURE(_CLASSIC, 0x0130) +#define GPU_ARCHITECTURE_VOLTA GPU_ARCHITECTURE(_CLASSIC, 0x0140) +#define GPU_ARCHITECTURE_VOLTA2 GPU_ARCHITECTURE(_CLASSIC, 0x0150) +#define GPU_ARCHITECTURE_TURING GPU_ARCHITECTURE(_CLASSIC, 0x0160) +#define GPU_ARCHITECTURE_AMPERE GPU_ARCHITECTURE(_CLASSIC, 0x0170) + +#define GPU_ARCHITECTURE_T12X GPU_ARCHITECTURE(_TEGRA, 0x0040) +#define GPU_ARCHITECTURE_T13X GPU_ARCHITECTURE(_TEGRA, 0x0013) +#define GPU_ARCHITECTURE_T21X GPU_ARCHITECTURE(_TEGRA, 0x0021) +#define GPU_ARCHITECTURE_T18X GPU_ARCHITECTURE(_TEGRA, 0x0018) +#define GPU_ARCHITECTURE_T19X GPU_ARCHITECTURE(_TEGRA, 0x0019) +#define GPU_ARCHITECTURE_T23X GPU_ARCHITECTURE(_TEGRA, 0x0023) + +#define GPU_ARCHITECTURE_SIMS GPU_ARCHITECTURE(_SIMULATION, 0x01f0) // eg: AMODEL + +// +// Implementation constants. +// These must be unique within a single architecture. +// + +#define GPU_IMPLEMENTATION_GM108 0x08 +#define GPU_IMPLEMENTATION_GM107 0x07 +#define GPU_IMPLEMENTATION_GM200 0x00 +#define GPU_IMPLEMENTATION_GM204 0x04 +#define GPU_IMPLEMENTATION_GM206 0x06 + +#define GPU_IMPLEMENTATION_GP100 0x00 +#define GPU_IMPLEMENTATION_GP102 0x02 +#define GPU_IMPLEMENTATION_GP104 0x04 +#define GPU_IMPLEMENTATION_GP106 0x06 +#define GPU_IMPLEMENTATION_GP107 0x07 +#define GPU_IMPLEMENTATION_GP108 0x08 + +#define GPU_IMPLEMENTATION_GV100 0x00 +#define GPU_IMPLEMENTATION_GV11B 0x0B + +#define GPU_IMPLEMENTATION_TU102 0x02 +#define GPU_IMPLEMENTATION_TU104 0x04 +#define GPU_IMPLEMENTATION_TU106 0x06 +#define GPU_IMPLEMENTATION_TU116 0x08 // TU116 has implementation ID 8 in HW +#define GPU_IMPLEMENTATION_TU117 0x07 + +#define GPU_IMPLEMENTATION_GA100 0x00 +#define GPU_IMPLEMENTATION_GA102 0x02 +#define GPU_IMPLEMENTATION_GA103 0x03 +#define GPU_IMPLEMENTATION_GA104 0x04 +#define GPU_IMPLEMENTATION_GA106 0x06 +#define GPU_IMPLEMENTATION_GA107 0x07 +#define GPU_IMPLEMENTATION_GA102F 0x0F + +#define GPU_IMPLEMENTATION_T124 0x00 +#define GPU_IMPLEMENTATION_T132 0x00 +#define GPU_IMPLEMENTATION_T210 0x00 +#define GPU_IMPLEMENTATION_T186 0x00 +#define GPU_IMPLEMENTATION_T194 0x00 +#define GPU_IMPLEMENTATION_T234 0x04 +#define GPU_IMPLEMENTATION_T234D 0x05 + +/* SIMS gpus */ +#define GPU_IMPLEMENTATION_AMODEL 0x00 + +#endif // NV_ARCH_PUBLISHED_H diff --git a/src/common/inc/swref/published/nv_ref.h b/src/common/inc/swref/published/nv_ref.h new file mode 100644 index 000000000..cf05badc3 --- /dev/null +++ b/src/common/inc/swref/published/nv_ref.h @@ -0,0 +1,154 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +/***************************************************************************\ +* * +* Hardware Reference Manual extracted defines. * +* - Defines in this file are approved by the HW team for publishing. * +* * +\***************************************************************************/ +#ifndef NV_REF_PUBLISHED_H +#define NV_REF_PUBLISHED_H + + + +// +// These registers can be accessed by chip-independent code as +// well as chip-dependent code. +// +// NOTE: DO NOT ADD TO THIS FILE. CREATE CHIP SPECIFIC HAL ROUTINES INSTEAD. +// + +/* + * Standard PCI config space header defines. + * The defines here cannot change across generations. + */ + +/* dev_nv_xve.ref */ +/* PBUS field defines converted to NV_CONFIG field defines */ +#define NV_CONFIG_PCI_NV_0 0x00000000 /* R--4R */ +#define NV_CONFIG_PCI_NV_0_VENDOR_ID 15:0 /* C--UF */ +#define NV_CONFIG_PCI_NV_0_VENDOR_ID_NVIDIA 0x000010DE /* C---V */ +#define NV_CONFIG_PCI_NV_0_DEVICE_ID 31:16 /* R--UF */ +#define NV_CONFIG_PCI_NV_1 0x00000004 /* RW-4R */ +#define NV_CONFIG_PCI_NV_1_IO_SPACE 0:0 /* RWIVF */ +#define NV_CONFIG_PCI_NV_1_IO_SPACE_DISABLED 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_1_IO_SPACE_ENABLED 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_1_MEMORY_SPACE 1:1 /* RWIVF */ +#define NV_CONFIG_PCI_NV_1_MEMORY_SPACE_DISABLED 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_1_MEMORY_SPACE_ENABLED 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_1_BUS_MASTER 2:2 /* RWIVF */ +#define NV_CONFIG_PCI_NV_1_BUS_MASTER_DISABLED 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_1_BUS_MASTER_ENABLED 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_2 0x00000008 /* R--4R */ +#define NV_CONFIG_PCI_NV_2_REVISION_ID 7:0 /* C--UF */ +#define NV_CONFIG_PCI_NV_2_CLASS_CODE 31:8 /* C--VF */ +#define NV_CONFIG_PCI_NV_3 0x0000000C /* RW-4R */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER 15:11 /* RWIUF */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_0_CLOCKS 0x00000000 /* RWI-V */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_8_CLOCKS 0x00000001 /* RW--V */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_240_CLOCKS 0x0000001E /* RW--V */ +#define NV_CONFIG_PCI_NV_3_LATENCY_TIMER_248_CLOCKS 0x0000001F /* RW--V */ +#define NV_CONFIG_PCI_NV_4 0x00000010 /* RW-4R */ +#define NV_CONFIG_PCI_NV_5 0x00000014 /* RW-4R */ +#define NV_CONFIG_PCI_NV_5_ADDRESS_TYPE 2:1 /* C--VF */ +#define NV_CONFIG_PCI_NV_5_ADDRESS_TYPE_64_BIT 0x00000002 /* ----V */ +#define NV_CONFIG_PCI_NV_6 0x00000018 /* RW-4R */ +#define NV_CONFIG_PCI_NV_7(i) (0x0000001C+(i)*4) /* R--4A */ +#define NV_CONFIG_PCI_NV_11 0x0000002C /* R--4R */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_VENDOR_ID 15:0 /* R--UF */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_VENDOR_ID_NONE 0x00000000 /* R---V */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_ID 31:16 /* R--UF */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_ID_NONE 0x00000000 /* R---V */ +#define NV_CONFIG_PCI_NV_11_SUBSYSTEM_ID_TNT2PRO 0x0000001f +#define NV_CONFIG_PCI_NV_12 0x00000030 /* RW-4R */ +#define NV_CONFIG_PCI_NV_13 0x00000034 /* RW-4R */ +#define NV_CONFIG_PCI_NV_13_CAP_PTR 7:0 /* C--VF */ +#define NV_CONFIG_PCI_NV_14 0x00000038 /* R--4R */ +#define NV_CONFIG_PCI_NV_15 0x0000003C /* RW-4R */ +#define NV_CONFIG_PCI_NV_15_INTR_LINE 7:0 /* RWIVF */ +/* + * These defines are the correct fields to be used to extract the + * NEXT_PTR and CAP_ID from any PCI capability structure, + * but they still have NV_24 in the name because they were from the + * first PCI capability structure in the capability list in older GPUs. + */ +#define NV_CONFIG_PCI_NV_24_NEXT_PTR 15:8 /* R--VF */ +#define NV_CONFIG_PCI_NV_24_CAP_ID 7:0 /* C--VF */ + +/* + * Standard registers present on NVIDIA chips used to ID the chip. + * Very stable across generations. + */ + +/* dev_master.ref */ +#define NV_PMC_BOOT_0 0x00000000 /* R--4R */ +#define NV_PMC_BOOT_0_MINOR_REVISION 3:0 /* R--VF */ +#define NV_PMC_BOOT_0_MAJOR_REVISION 7:4 /* R--VF */ +#define NV_PMC_BOOT_0_IMPLEMENTATION 23:20 /* R--VF */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_0 0x00000000 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_1 0x00000001 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_2 0x00000002 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_3 0x00000003 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_4 0x00000004 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_5 0x00000005 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_6 0x00000006 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_7 0x00000007 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_8 0x00000008 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_9 0x00000009 /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_A 0x0000000A /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_B 0x0000000B /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_C 0x0000000C /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_D 0x0000000D /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_E 0x0000000E /* R---V */ +#define NV_PMC_BOOT_0_IMPLEMENTATION_F 0x0000000F /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE 28:24 /* R--VF */ +#define NV_PMC_BOOT_0_ARCHITECTURE_TU100 0x00000016 /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE_TU110 0x00000016 /* R---V */ +#define NV_PMC_BOOT_0_ARCHITECTURE_GA100 0x00000017 /* R---V */ + +#define NV_PMC_BOOT_1 0x00000004 /* R--4R */ +#define NV_PMC_BOOT_1_VGPU8 8:8 /* R--VF */ +#define NV_PMC_BOOT_1_VGPU8_REAL 0x00000000 /* R-I-V */ +#define NV_PMC_BOOT_1_VGPU8_VIRTUAL 0x00000001 /* R---V */ +#define NV_PMC_BOOT_1_VGPU16 16:16 /* R--VF */ +#define NV_PMC_BOOT_1_VGPU16_REAL 0x00000000 /* R-I-V */ +#define NV_PMC_BOOT_1_VGPU16_VIRTUAL 0x00000001 /* R---V */ +#define NV_PMC_BOOT_1_VGPU 17:16 /* C--VF */ +#define NV_PMC_BOOT_1_VGPU_REAL 0x00000000 /* C---V */ +#define NV_PMC_BOOT_1_VGPU_PV 0x00000001 /* ----V */ +#define NV_PMC_BOOT_1_VGPU_VF 0x00000002 /* ----V */ +#define NV_PMC_BOOT_42 0x00000A00 /* R--4R */ +#define NV_PMC_BOOT_42_MINOR_EXTENDED_REVISION 11:8 /* R-XVF */ +#define NV_PMC_BOOT_42_MINOR_REVISION 15:12 /* R-XVF */ +#define NV_PMC_BOOT_42_MAJOR_REVISION 19:16 /* R-XVF */ +#define NV_PMC_BOOT_42_IMPLEMENTATION 23:20 /* */ +#define NV_PMC_BOOT_42_ARCHITECTURE 28:24 /* */ +#define NV_PMC_BOOT_42_CHIP_ID 28:20 /* R-XVF */ + +/* dev_arapb_misc.h */ +#define NV_PAPB_MISC_GP_HIDREV_CHIPID 15:8 /* ----F */ +#define NV_PAPB_MISC_GP_HIDREV_MAJORREV 7:4 /* ----F */ + +#endif // NV_REF_PUBLISHED_H diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_egress_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_egress_ip.h new file mode 100644 index 000000000..f64ad800c --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_egress_ip.h @@ -0,0 +1,688 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_egress_ip_h__ +#define __lr10_dev_egress_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_EGRESS_ERR_LOG_EN_0 0x00004404 /* RW-4R */ +#define NV_EGRESS_ERR_LOG_EN_0_EGRESSBUFERR 0:0 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_EGRESSBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_EGRESSBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_EGRESSBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_PKTROUTEERR 1:1 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_PKTROUTEERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_PKTROUTEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_PKTROUTEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_SEQIDERR 2:2 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_SEQIDERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_SEQIDERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_SEQIDERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_ECC_LIMIT_ERR 3:3 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_ECC_DBE_ERR 4:4 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR 5:5 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_RAM_OUT_HDR_ECC_DBE_ERR 6:6 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_RAM_OUT_HDR_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_RAM_OUT_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_RAM_OUT_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NCISOCCREDITOVFL 7:7 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_NCISOCCREDITOVFL__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NCISOCCREDITOVFL_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_NCISOCCREDITOVFL_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_REQTGTIDMISMATCHERR 8:8 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_REQTGTIDMISMATCHERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_REQTGTIDMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_REQTGTIDMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_RSPREQIDMISMATCHERR 9:9 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_RSPREQIDMISMATCHERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_RSPREQIDMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_RSPREQIDMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_URRSPERR 10:10 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_URRSPERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_URRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_URRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_PRIVRSPERR 11:11 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_PRIVRSPERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_PRIVRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_PRIVRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_HWRSPERR 12:12 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_HWRSPERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_HWRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_HWRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_PARITY_ERR 13:13 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_PARITY_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_HDR_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NCISOC_CREDIT_PARITY_ERR 14:14 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_NCISOC_CREDIT_PARITY_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NCISOC_CREDIT_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_NCISOC_CREDIT_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR 15:15 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_CREDIT_TIME_OUT_ERR 16:16 /* RWEVF */ +#define NV_EGRESS_ERR_LOG_EN_0_CREDIT_TIME_OUT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_LOG_EN_0_CREDIT_TIME_OUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_LOG_EN_0_CREDIT_TIME_OUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0 0x00004410 /* RW-4R */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_EGRESSBUFERR 0:0 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_EGRESSBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_EGRESSBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_EGRESSBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_PKTROUTEERR 1:1 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_PKTROUTEERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_PKTROUTEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_PKTROUTEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_SEQIDERR 2:2 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_SEQIDERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_SEQIDERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_SEQIDERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR 3:3 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR 4:4 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR 5:5 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR 6:6 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOCCREDITOVFL 7:7 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOCCREDITOVFL__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOCCREDITOVFL_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOCCREDITOVFL_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_REQTGTIDMISMATCHERR 8:8 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_REQTGTIDMISMATCHERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_REQTGTIDMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_REQTGTIDMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RSPREQIDMISMATCHERR 9:9 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RSPREQIDMISMATCHERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RSPREQIDMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_RSPREQIDMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_URRSPERR 10:10 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_URRSPERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_URRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_URRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_PRIVRSPERR 11:11 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_PRIVRSPERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_PRIVRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_PRIVRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_HWRSPERR 12:12 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_HWRSPERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_HWRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_HWRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_PARITY_ERR 13:13 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_PARITY_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_HDR_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR 14:14 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR 15:15 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_CREDIT_TIME_OUT_ERR 16:16 /* RWEVF */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_CREDIT_TIME_OUT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_CREDIT_TIME_OUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CORRECTABLE_REPORT_EN_0_CREDIT_TIME_OUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_EGRESS_ERR_TIMESTAMP_LOG 0x00004450 /* R--4R */ +#define NV_EGRESS_ERR_TIMESTAMP_LOG_TIMESTAMP 23:0 /* R-IVF */ +#define NV_EGRESS_ERR_TIMESTAMP_LOG_TIMESTAMP_INIT 0x00000000 /* R-I-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_VALID 0x0000444c /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_VALID_HEADERVALID0 0:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_VALID_HEADERVALID0_INVALID 0x00000000 /* R-D-V */ +#define NV_EGRESS_ERR_HEADER_LOG_VALID_HEADERVALID0_VALID 0x00000001 /* R---V */ + +#define NV_EGRESS_ERR_MISC_LOG_0 0x00004454 /* R--4R */ +#define NV_EGRESS_ERR_MISC_LOG_0_SPORT 5:0 /* R-IVF */ +#define NV_EGRESS_ERR_MISC_LOG_0_SPORT_INIT 0x00000000 /* R-I-V */ +#define NV_EGRESS_ERR_MISC_LOG_0_ENCODEDVC 10:8 /* R-IVF */ +#define NV_EGRESS_ERR_MISC_LOG_0_ENCODEDVC_INIT 0x00000000 /* R-I-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_0 0x00004420 /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_0_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_0_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_1 0x00004424 /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_1_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_1_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_2 0x00004428 /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_2_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_2_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_3 0x0000442c /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_3_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_3_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_4 0x00004430 /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_4_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_4_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_5 0x00004434 /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_5_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_5_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_6 0x00004438 /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_6_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_6_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_7 0x0000443c /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_7_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_7_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_8 0x00004440 /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_8_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_8_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_9 0x00004444 /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_9_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_9_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_HEADER_LOG_10 0x00004448 /* R--4R */ +#define NV_EGRESS_ERR_HEADER_LOG_10_DW 31:0 /* R-DVF */ +#define NV_EGRESS_ERR_HEADER_LOG_10_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_ADDRESS 0x00004494 /* R--4R */ +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_ADDRESS_ERROR_ADDRESS 7:0 /* R-DVF */ +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_ADDRESS_ERROR_ADDRESS_INIT 0x00000000 /* R-D-V */ + +#define NV_EGRESS_ERR_FIRST_0 0x0000441c /* RW-4R */ +#define NV_EGRESS_ERR_FIRST_0_EGRESSBUFERR 0:0 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_EGRESSBUFERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_EGRESSBUFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_PKTROUTEERR 1:1 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_PKTROUTEERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_PKTROUTEERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_SEQIDERR 2:2 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_SEQIDERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_SEQIDERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_HDR_ECC_LIMIT_ERR 3:3 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_HDR_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_HDR_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_HDR_ECC_DBE_ERR 4:4 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_RAM_OUT_HDR_ECC_LIMIT_ERR 5:5 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_RAM_OUT_HDR_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_RAM_OUT_HDR_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_RAM_OUT_HDR_ECC_DBE_ERR 6:6 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_RAM_OUT_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_RAM_OUT_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_NCISOCCREDITOVFL 7:7 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_NCISOCCREDITOVFL_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_NCISOCCREDITOVFL_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_REQTGTIDMISMATCHERR 8:8 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_REQTGTIDMISMATCHERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_REQTGTIDMISMATCHERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_RSPREQIDMISMATCHERR 9:9 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_RSPREQIDMISMATCHERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_RSPREQIDMISMATCHERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_URRSPERR 10:10 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_URRSPERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_URRSPERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_PRIVRSPERR 11:11 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_PRIVRSPERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_PRIVRSPERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_HWRSPERR 12:12 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_HWRSPERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_HWRSPERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_HDR_PARITY_ERR 13:13 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_HDR_PARITY_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_HDR_PARITY_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_NCISOC_CREDIT_PARITY_ERR 14:14 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_NCISOC_CREDIT_PARITY_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_NCISOC_CREDIT_PARITY_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_FLITTYPE_MISMATCH_ERR 15:15 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_FLITTYPE_MISMATCH_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_NXBAR_FLITTYPE_MISMATCH_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FIRST_0_CREDIT_TIME_OUT_ERR 16:16 /* RWDVF */ +#define NV_EGRESS_ERR_FIRST_0_CREDIT_TIME_OUT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_FIRST_0_CREDIT_TIME_OUT_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_EGRESS_ERR_CONTAIN_EN_0 0x00004414 /* RW-4R */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_EGRESSBUFERR 0:0 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_EGRESSBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_EGRESSBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_EGRESSBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_PKTROUTEERR 1:1 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_PKTROUTEERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_PKTROUTEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_PKTROUTEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_SEQIDERR 2:2 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_SEQIDERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_SEQIDERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_SEQIDERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_ECC_LIMIT_ERR 3:3 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_ECC_DBE_ERR 4:4 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR 5:5 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RAM_OUT_HDR_ECC_DBE_ERR 6:6 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RAM_OUT_HDR_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RAM_OUT_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RAM_OUT_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NCISOCCREDITOVFL 7:7 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NCISOCCREDITOVFL__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NCISOCCREDITOVFL_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NCISOCCREDITOVFL_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_REQTGTIDMISMATCHERR 8:8 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_REQTGTIDMISMATCHERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_REQTGTIDMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_REQTGTIDMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RSPREQIDMISMATCHERR 9:9 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RSPREQIDMISMATCHERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RSPREQIDMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_RSPREQIDMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_URRSPERR 10:10 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_URRSPERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_URRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_URRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_PRIVRSPERR 11:11 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_PRIVRSPERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_PRIVRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_PRIVRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_HWRSPERR 12:12 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_HWRSPERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_HWRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_HWRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_PARITY_ERR 13:13 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_PARITY_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_HDR_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NCISOC_CREDIT_PARITY_ERR 14:14 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NCISOC_CREDIT_PARITY_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NCISOC_CREDIT_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NCISOC_CREDIT_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR 15:15 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_CREDIT_TIME_OUT_ERR 16:16 /* RWEVF */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_CREDIT_TIME_OUT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_CREDIT_TIME_OUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_CONTAIN_EN_0_CREDIT_TIME_OUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_EGRESS_BUFFER_POINTERS0 0x00004310 /* R--4R */ +#define NV_EGRESS_BUFFER_POINTERS0_WRITE 7:0 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS0_WRITE_INIT 0x00000000 /* R-E-V */ +#define NV_EGRESS_BUFFER_POINTERS0_READ 23:16 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS0_READ_INIT 0x00000000 /* R-E-V */ + +#define NV_EGRESS_BUFFER_POINTERS1 0x00004314 /* R--4R */ +#define NV_EGRESS_BUFFER_POINTERS1_WRITE 7:0 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS1_WRITE_INIT 0x00000000 /* R-E-V */ +#define NV_EGRESS_BUFFER_POINTERS1_READ 23:16 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS1_READ_INIT 0x00000000 /* R-E-V */ + +#define NV_EGRESS_BUFFER_POINTERS2 0x00004318 /* R--4R */ +#define NV_EGRESS_BUFFER_POINTERS2_WRITE 7:0 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS2_WRITE_INIT 0x00000000 /* R-E-V */ +#define NV_EGRESS_BUFFER_POINTERS2_READ 23:16 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS2_READ_INIT 0x00000000 /* R-E-V */ + +#define NV_EGRESS_BUFFER_POINTERS3 0x0000431c /* R--4R */ +#define NV_EGRESS_BUFFER_POINTERS3_WRITE 7:0 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS3_WRITE_INIT 0x00000000 /* R-E-V */ +#define NV_EGRESS_BUFFER_POINTERS3_READ 23:16 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS3_READ_INIT 0x00000000 /* R-E-V */ + +#define NV_EGRESS_BUFFER_POINTERS4 0x00004320 /* R--4R */ +#define NV_EGRESS_BUFFER_POINTERS4_WRITE 7:0 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS4_WRITE_INIT 0x00000000 /* R-E-V */ +#define NV_EGRESS_BUFFER_POINTERS4_READ 23:16 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS4_READ_INIT 0x00000000 /* R-E-V */ + +#define NV_EGRESS_BUFFER_POINTERS5 0x00004324 /* R--4R */ +#define NV_EGRESS_BUFFER_POINTERS5_WRITE 7:0 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS5_WRITE_INIT 0x00000040 /* R-E-V */ +#define NV_EGRESS_BUFFER_POINTERS5_READ 23:16 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS5_READ_INIT 0x00000040 /* R-E-V */ + +#define NV_EGRESS_BUFFER_POINTERS6 0x00004328 /* R--4R */ +#define NV_EGRESS_BUFFER_POINTERS6_WRITE 7:0 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS6_WRITE_INIT 0x00000080 /* R-E-V */ +#define NV_EGRESS_BUFFER_POINTERS6_READ 23:16 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS6_READ_INIT 0x00000080 /* R-E-V */ + +#define NV_EGRESS_BUFFER_POINTERS7 0x0000432c /* R--4R */ +#define NV_EGRESS_BUFFER_POINTERS7_WRITE 7:0 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS7_WRITE_INIT 0x000000c0 /* R-E-V */ +#define NV_EGRESS_BUFFER_POINTERS7_READ 23:16 /* R-EVF */ +#define NV_EGRESS_BUFFER_POINTERS7_READ_INIT 0x000000c0 /* R-E-V */ + +#define NV_EGRESS_ERR_STATUS_0 0x00004400 /* RW-4R */ +#define NV_EGRESS_ERR_STATUS_0_EGRESSBUFERR 0:0 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_EGRESSBUFERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_EGRESSBUFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_PKTROUTEERR 1:1 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_PKTROUTEERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_PKTROUTEERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_SEQIDERR 2:2 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_SEQIDERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_SEQIDERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_HDR_ECC_LIMIT_ERR 3:3 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_HDR_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_HDR_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_HDR_ECC_DBE_ERR 4:4 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_RAM_OUT_HDR_ECC_LIMIT_ERR 5:5 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_RAM_OUT_HDR_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_RAM_OUT_HDR_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_RAM_OUT_HDR_ECC_DBE_ERR 6:6 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_RAM_OUT_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_RAM_OUT_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_NCISOCCREDITOVFL 7:7 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_NCISOCCREDITOVFL_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_NCISOCCREDITOVFL_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_REQTGTIDMISMATCHERR 8:8 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_REQTGTIDMISMATCHERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_REQTGTIDMISMATCHERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_RSPREQIDMISMATCHERR 9:9 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_RSPREQIDMISMATCHERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_RSPREQIDMISMATCHERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_URRSPERR 10:10 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_URRSPERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_URRSPERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_PRIVRSPERR 11:11 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_PRIVRSPERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_PRIVRSPERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_HWRSPERR 12:12 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_HWRSPERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_HWRSPERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_HDR_PARITY_ERR 13:13 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_HDR_PARITY_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_HDR_PARITY_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_NCISOC_CREDIT_PARITY_ERR 14:14 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_NCISOC_CREDIT_PARITY_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_NCISOC_CREDIT_PARITY_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_FLITTYPE_MISMATCH_ERR 15:15 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_FLITTYPE_MISMATCH_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_NXBAR_FLITTYPE_MISMATCH_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_STATUS_0_CREDIT_TIME_OUT_ERR 16:16 /* RWDVF */ +#define NV_EGRESS_ERR_STATUS_0_CREDIT_TIME_OUT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_EGRESS_ERR_STATUS_0_CREDIT_TIME_OUT_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_EGRESS_NCISOC_CREDIT0 0x00004370 /* R--4R */ +#define NV_EGRESS_NCISOC_CREDIT0_NUM 9:0 /* R-IVF */ +#define NV_EGRESS_NCISOC_CREDIT0_NUM_INIT 0x00000000 /* R-I-V */ + +#define NV_EGRESS_NCISOC_CREDIT1 0x00004374 /* R--4R */ +#define NV_EGRESS_NCISOC_CREDIT1_NUM 9:0 /* R-IVF */ +#define NV_EGRESS_NCISOC_CREDIT1_NUM_INIT 0x00000000 /* R-I-V */ + +#define NV_EGRESS_NCISOC_CREDIT2 0x00004378 /* R--4R */ +#define NV_EGRESS_NCISOC_CREDIT2_NUM 9:0 /* R-IVF */ +#define NV_EGRESS_NCISOC_CREDIT2_NUM_INIT 0x00000000 /* R-I-V */ + +#define NV_EGRESS_NCISOC_CREDIT3 0x0000437c /* R--4R */ +#define NV_EGRESS_NCISOC_CREDIT3_NUM 9:0 /* R-IVF */ +#define NV_EGRESS_NCISOC_CREDIT3_NUM_INIT 0x00000000 /* R-I-V */ + +#define NV_EGRESS_NCISOC_CREDIT4 0x00004380 /* R--4R */ +#define NV_EGRESS_NCISOC_CREDIT4_NUM 9:0 /* R-IVF */ +#define NV_EGRESS_NCISOC_CREDIT4_NUM_INIT 0x00000000 /* R-I-V */ + +#define NV_EGRESS_NCISOC_CREDIT5 0x00004384 /* R--4R */ +#define NV_EGRESS_NCISOC_CREDIT5_NUM 9:0 /* R-IVF */ +#define NV_EGRESS_NCISOC_CREDIT5_NUM_INIT 0x00000000 /* R-I-V */ + +#define NV_EGRESS_NCISOC_CREDIT6 0x00004388 /* R--4R */ +#define NV_EGRESS_NCISOC_CREDIT6_NUM 9:0 /* R-IVF */ +#define NV_EGRESS_NCISOC_CREDIT6_NUM_INIT 0x00000000 /* R-I-V */ + +#define NV_EGRESS_NCISOC_CREDIT7 0x0000438c /* R--4R */ +#define NV_EGRESS_NCISOC_CREDIT7_NUM 9:0 /* R-IVF */ +#define NV_EGRESS_NCISOC_CREDIT7_NUM_INIT 0x00000000 /* R-I-V */ + +#define NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER 0x00004480 /* RW-4R */ +#define NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWDVF */ +#define NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT 0x00004484 /* RW-4R */ +#define NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER 0x0000448c /* RW-4R */ +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWDVF */ +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT 0x00004490 /* RW-4R */ +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_EGRESS_CTRL 0x00004040 /* RW-4R */ +#define NV_EGRESS_CTRL_DESTINATIONIDCHECKENB 0:0 /* RWEVF */ +#define NV_EGRESS_CTRL_DESTINATIONIDCHECKENB_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_CTRL_DESTINATIONIDCHECKENB_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_CTRL_DESTINATIONIDCHECKENB__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_CTRL_ZERO_OUT_SWXATTR_DISABLE 1:1 /* RWEVF */ +#define NV_EGRESS_CTRL_ZERO_OUT_SWXATTR_DISABLE_OFF 0x00000000 /* RWE-V */ +#define NV_EGRESS_CTRL_ZERO_OUT_SWXATTR_DISABLE_SET 0x00000001 /* RW--V */ +#define NV_EGRESS_CTRL_CTO_ENB 9:9 /* RWEVF */ +#define NV_EGRESS_CTRL_CTO_ENB_ON 0x00000001 /* RW--V */ +#define NV_EGRESS_CTRL_CTO_ENB_OFF 0x00000000 /* RWE-V */ +#define NV_EGRESS_CTRL_CTO_ENB__PROD 0x00000001 /* RW--V */ + +#define NV_EGRESS_CTO_TIMER_LIMIT 0x00004048 /* RW-4R */ +#define NV_EGRESS_CTO_TIMER_LIMIT_LIMIT 19:0 /* RWEVF */ +#define NV_EGRESS_CTO_TIMER_LIMIT_LIMIT_INIT 0x000fffff /* RWE-V */ +#define NV_EGRESS_CTO_TIMER_LIMIT_LIMIT__PROD 0x00004000 /* RW--V */ + +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0 0x00004408 /* RW-4R */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_EGRESSBUFERR 0:0 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_EGRESSBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_EGRESSBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_EGRESSBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_PKTROUTEERR 1:1 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_PKTROUTEERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_PKTROUTEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_PKTROUTEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_SEQIDERR 2:2 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_SEQIDERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_SEQIDERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_SEQIDERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR 3:3 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR 4:4 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR 5:5 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR 6:6 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NCISOCCREDITOVFL 7:7 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NCISOCCREDITOVFL__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NCISOCCREDITOVFL_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NCISOCCREDITOVFL_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_REQTGTIDMISMATCHERR 8:8 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_REQTGTIDMISMATCHERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_REQTGTIDMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_REQTGTIDMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RSPREQIDMISMATCHERR 9:9 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RSPREQIDMISMATCHERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RSPREQIDMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_RSPREQIDMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_URRSPERR 10:10 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_URRSPERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_URRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_URRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_PRIVRSPERR 11:11 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_PRIVRSPERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_PRIVRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_PRIVRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_HWRSPERR 12:12 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_HWRSPERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_HWRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_HWRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_PARITY_ERR 13:13 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_PARITY_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_HDR_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR 14:14 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR 15:15 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_CREDIT_TIME_OUT_ERR 16:16 /* RWEVF */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_CREDIT_TIME_OUT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_CREDIT_TIME_OUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_FATAL_REPORT_EN_0_CREDIT_TIME_OUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0 0x0000440c /* RW-4R */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_EGRESSBUFERR 0:0 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_EGRESSBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_EGRESSBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_EGRESSBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_PKTROUTEERR 1:1 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_PKTROUTEERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_PKTROUTEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_PKTROUTEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_SEQIDERR 2:2 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_SEQIDERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_SEQIDERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_SEQIDERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR 3:3 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR 4:4 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR 5:5 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR 6:6 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RAM_OUT_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOCCREDITOVFL 7:7 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOCCREDITOVFL__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOCCREDITOVFL_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOCCREDITOVFL_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_REQTGTIDMISMATCHERR 8:8 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_REQTGTIDMISMATCHERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_REQTGTIDMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_REQTGTIDMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RSPREQIDMISMATCHERR 9:9 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RSPREQIDMISMATCHERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RSPREQIDMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_RSPREQIDMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_URRSPERR 10:10 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_URRSPERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_URRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_URRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_PRIVRSPERR 11:11 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_PRIVRSPERR__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_PRIVRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_PRIVRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_HWRSPERR 12:12 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_HWRSPERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_HWRSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_HWRSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_PARITY_ERR 13:13 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_PARITY_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_HDR_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR 14:14 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_CREDIT_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR 15:15 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_NXBAR_FLITTYPE_MISMATCH_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_CREDIT_TIME_OUT_ERR 16:16 /* RWEVF */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_CREDIT_TIME_OUT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_CREDIT_TIME_OUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_NON_FATAL_REPORT_EN_0_CREDIT_TIME_OUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_EGRESS_ERR_ECC_CTRL 0x00004470 /* RW-4R */ +#define NV_EGRESS_ERR_ECC_CTRL_NXBAR_ECC_ENABLE 0:0 /* RWEVF */ +#define NV_EGRESS_ERR_ECC_CTRL_NXBAR_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_ECC_CTRL_NXBAR_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_ECC_CTRL_NXBAR_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_ECC_CTRL_NXBAR_PARITY_ENABLE 1:1 /* RWEVF */ +#define NV_EGRESS_ERR_ECC_CTRL_NXBAR_PARITY_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_ECC_CTRL_NXBAR_PARITY_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_ECC_CTRL_NXBAR_PARITY_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_ECC_CTRL_RAM_OUT_ECC_ENABLE 8:8 /* RWEVF */ +#define NV_EGRESS_ERR_ECC_CTRL_RAM_OUT_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_ECC_CTRL_RAM_OUT_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_ECC_CTRL_RAM_OUT_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_ECC_CTRL_NCISOC_ECC_ENABLE 9:9 /* RWEVF */ +#define NV_EGRESS_ERR_ECC_CTRL_NCISOC_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_ECC_CTRL_NCISOC_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_ECC_CTRL_NCISOC_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE 10:10 /* RWEVF */ +#define NV_EGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_EGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_EGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE__PROD 0x00000001 /* RW--V */ + +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_ADDRESS_VALID 0x00004498 /* R--4R */ +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_ADDRESS_VALID_VALID 0:0 /* R-DVF */ +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_ADDRESS_VALID_VALID_INVALID 0x00000000 /* R-D-V */ +#define NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_ADDRESS_VALID_VALID_VALID 0x00000001 /* R---V */ +#endif // __lr10_dev_egress_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_ext_devices.h b/src/common/inc/swref/published/nvswitch/lr10/dev_ext_devices.h new file mode 100644 index 000000000..405c76f96 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_ext_devices.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_ext_devices_h__ +#define __lr10_dev_ext_devices_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PROM_DATA(i) (0x00300000+(i)) /* RW-1A */ +#define NV_PROM_DATA__SIZE_1 1048576 /* */ +#define NV_PROM_DATA_VALUE 7:0 /* RW-VF */ +#endif // __lr10_dev_ext_devices_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_falcon_v4.h b/src/common/inc/swref/published/nvswitch/lr10/dev_falcon_v4.h new file mode 100644 index 000000000..ef9a6e420 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_falcon_v4.h @@ -0,0 +1,461 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_falcon_v4_h__ +#define __lr10_dev_falcon_v4_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER(i) (0x000003e8+(i)*4) /* -W-4A */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER__SIZE_1 2 /* */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER_TRIGGER 0:0 /* -W-VF */ +#define NV_PFALCON_FALCON_INTR_RETRIGGER_TRIGGER_TRUE 0x00000001 /* -W--V */ + +#define NV_PFALCON_FALCON_HWCFG1 0x0000012c /* R--4R */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV 3:0 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_INIT 0x00000006 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_1_0 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_2_0 0x00000002 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_3_0 0x00000003 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_4_0 0x00000004 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_5_0 0x00000005 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_6_0 0x00000006 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_7_0 0x00000007 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_SECURITY_MODEL 5:4 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_SECURITY_MODEL_INIT 0x00000003 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_SECURITY_MODEL_NONE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_SECURITY_MODEL_LIGHT 0x00000002 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_SECURITY_MODEL_HEAVY 0x00000003 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_SUBVERSION 7:6 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_SUBVERSION_INIT 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_SUBVERSION_0 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_SUBVERSION_1 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_SUBVERSION_2 0x00000002 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CORE_REV_SUBVERSION_3 0x00000003 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_IMEM_PORTS 11:8 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_IMEM_PORTS_INIT 0x00000001 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_DMEM_PORTS 15:12 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_DMEM_PORTS_INIT 0x00000001 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_TAG_WIDTH 20:16 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_TAG_WIDTH_INIT 0x00000010 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_DMEM_TAG_WIDTH 25:21 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_DMEM_TAG_WIDTH_INIT 0x00000010 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_DBG_PRIV_BUS 27:27 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_DBG_PRIV_BUS_INIT 0x00000001 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_DBG_PRIV_BUS_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_DBG_PRIV_BUS_DISABLE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CSB_SIZE_16M 28:28 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_CSB_SIZE_16M_INIT 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_CSB_SIZE_16M_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_CSB_SIZE_16M_FALSE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_PRIV_DIRECT 29:29 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_PRIV_DIRECT_INIT 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_PRIV_DIRECT_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_PRIV_DIRECT_FALSE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_DMEM_APERTURES 30:30 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_DMEM_APERTURES_INIT 0x00000001 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_DMEM_APERTURES_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_DMEM_APERTURES_DISABLE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_IMEM_AUTOFILL 31:31 /* R-IVF */ +#define NV_PFALCON_FALCON_HWCFG1_IMEM_AUTOFILL_INIT 0x00000001 /* R-I-V */ +#define NV_PFALCON_FALCON_HWCFG1_IMEM_AUTOFILL_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_HWCFG1_IMEM_AUTOFILL_DISABLE 0x00000000 /* R---V */ + +#define NV_PFALCON_FALCON_IRQSSET 0x00000000 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQSSET_GPTMR 0:0 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_GPTMR_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_WDTMR 1:1 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_WDTMR_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_MTHD 2:2 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_MTHD_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_CTXSW 3:3 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_CTXSW_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_HALT 4:4 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_HALT_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_EXTERR 5:5 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_EXTERR_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_SWGEN0 6:6 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_SWGEN0_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_SWGEN1 7:7 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_SWGEN1_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_EXT 15:8 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_DMA 16:16 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_DMA_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_SHA 17:17 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_SHA_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_MEMERR 18:18 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_MEMERR_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_FALCON 19:19 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_FALCON_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_RISCV 20:20 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_RISCV_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSSET_TRACE 21:21 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSSET_TRACE_SET 0x00000001 /* -W--V */ + +#define NV_PFALCON_FALCON_TRACEPC 0x0000014c /* R--4R */ +#define NV_PFALCON_FALCON_TRACEPC_PC 23:0 /* R--VF */ + +#define NV_PFALCON_FALCON_TRACEIDX 0x00000148 /* RW-4R */ +#define NV_PFALCON_FALCON_TRACEIDX_CNT 31:24 /* R-IVF */ +#define NV_PFALCON_FALCON_TRACEIDX_CNT_INIT 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_TRACEIDX_MAXIDX 23:16 /* R--VF */ +#define NV_PFALCON_FALCON_TRACEIDX_IDX 7:0 /* RWIVF */ +#define NV_PFALCON_FALCON_TRACEIDX_IDX_INIT 0x00000000 /* RWI-V */ + +#define NV_PFALCON_FALCON_MAILBOX0 0x00000040 /* RW-4R */ +#define NV_PFALCON_FALCON_MAILBOX0_DATA 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_MAILBOX0_DATA_INIT 0x00000000 /* RWI-V */ + +#define NV_PFALCON_FALCON_IRQSCLR 0x00000004 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQSCLR_GPTMR 0:0 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_GPTMR_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_WDTMR 1:1 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_WDTMR_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_MTHD 2:2 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_MTHD_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_CTXSW 3:3 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_CTXSW_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_HALT 4:4 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_HALT_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_EXTERR 5:5 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_EXTERR_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN0 6:6 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN0_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN1 7:7 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN1_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_EXT 15:8 /* */ +#define NV_PFALCON_FALCON_IRQSCLR_DMA 16:16 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_DMA_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_SHA 17:17 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_SHA_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_MEMERR 18:18 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_MEMERR_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_FALCON 19:19 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_FALCON_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_RISCV 20:20 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_RISCV_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_TRACE 21:21 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_TRACE_SET 0x00000001 /* -W--V */ + +#define NV_PFALCON_FALCON_IMEMT(i) (0x00000188+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMT__SIZE_1 4 /* */ +#define NV_PFALCON_FALCON_IMEMT_TAG 15:0 /* RW-VF */ + +#define NV_PFALCON_FALCON_IMEMD(i) (0x00000184+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMD__SIZE_1 4 /* */ +#define NV_PFALCON_FALCON_IMEMD_DATA 31:0 /* RW-VF */ + +#define NV_PFALCON_FALCON_IMEMC(i) (0x00000180+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMC__SIZE_1 4 /* */ +#define NV_PFALCON_FALCON_IMEMC_OFFS 7:2 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_OFFS_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IMEMC_BLK 23:8 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_BLK_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IMEMC_AINCW 24:24 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_AINCW_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IMEMC_AINCR 25:25 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_AINCR_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IMEMC_AINCR_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IMEMC_AINCR_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IMEMC_SECURE 28:28 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_SECURE_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IMEMC_SEC_ATOMIC 29:29 /* R-IVF */ +#define NV_PFALCON_FALCON_IMEMC_SEC_ATOMIC_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IMEMC_SEC_ATOMIC_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IMEMC_SEC_WR_VIO 30:30 /* R-IVF */ +#define NV_PFALCON_FALCON_IMEMC_SEC_WR_VIO_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IMEMC_SEC_WR_VIO_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IMEMC_SEC_LOCK 31:31 /* R-IVF */ +#define NV_PFALCON_FALCON_IMEMC_SEC_LOCK_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IMEMC_SEC_LOCK_FALSE 0x00000000 /* R-I-V */ + +#define NV_PFALCON_FALCON_DMEMC(i) (0x000001c0+(i)*8) /* RW-4A */ +#define NV_PFALCON_FALCON_DMEMC__SIZE_1 8 /* */ +#define NV_PFALCON_FALCON_DMEMC_ADDRESS 23:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_ADDRESS_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_DMEMC_OFFS 7:2 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_OFFS_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_DMEMC_BLK 23:8 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_BLK_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_DMEMC_AINCW 24:24 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_AINCW_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_DMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_AINCR 25:25 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_AINCR_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_DMEMC_AINCR_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_AINCR_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_SETTAG 26:26 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_SETTAG_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_DMEMC_SETTAG_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_SETTAG_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_SETLVL 27:27 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_SETLVL_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_DMEMC_SETLVL_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_SETLVL_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_VA 28:28 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_VA_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_DMEMC_VA_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_VA_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_MISS 29:29 /* R-IVF */ +#define NV_PFALCON_FALCON_DMEMC_MISS_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_DMEMC_MISS_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_DMEMC_MULTIHIT 30:30 /* R-IVF */ +#define NV_PFALCON_FALCON_DMEMC_MULTIHIT_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_DMEMC_MULTIHIT_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_DMEMC_LVLERR 31:31 /* R-IVF */ +#define NV_PFALCON_FALCON_DMEMC_LVLERR_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_DMEMC_LVLERR_FALSE 0x00000000 /* R-I-V */ + +#define NV_PFALCON_FALCON_DMEMD(i) (0x000001c4+(i)*8) /* RW-4A */ +#define NV_PFALCON_FALCON_DMEMD__SIZE_1 8 /* */ +#define NV_PFALCON_FALCON_DMEMD_DATA 31:0 /* RW-VF */ + +#define NV_PFALCON_FALCON_DMACTL 0x0000010c /* RW-4R */ +#define NV_PFALCON_FALCON_DMACTL_REQUIRE_CTX 0:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMACTL_REQUIRE_CTX_INIT 0x00000001 /* RWI-V */ +#define NV_PFALCON_FALCON_DMACTL_REQUIRE_CTX_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMACTL_REQUIRE_CTX_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMACTL_DMEM_SCRUBBING 1:1 /* R--VF */ +#define NV_PFALCON_FALCON_DMACTL_DMEM_SCRUBBING_PENDING 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_DMACTL_DMEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMACTL_IMEM_SCRUBBING 2:2 /* R--VF */ +#define NV_PFALCON_FALCON_DMACTL_IMEM_SCRUBBING_PENDING 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_DMACTL_IMEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMACTL_DMAQ_NUM 6:3 /* R--VF */ +#define NV_PFALCON_FALCON_DMACTL_SECURE_STAT 7:7 /* R--VF */ + +#define NV_PFALCON_FALCON_BOOTVEC 0x00000104 /* RW-4R */ +#define NV_PFALCON_FALCON_BOOTVEC_VEC 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_BOOTVEC_VEC_INIT 0x00000000 /* RWI-V */ + +#define NV_PFALCON_FALCON_CPUCTL 0x00000100 /* RW-4R */ +#define NV_PFALCON_FALCON_CPUCTL_IINVAL 0:0 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_IINVAL_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_IINVAL_FALSE 0x00000000 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU 1:1 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU_FALSE 0x00000000 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_SRESET 2:2 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_SRESET_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_SRESET_FALSE 0x00000000 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_HRESET 3:3 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_HRESET_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_HRESET_FALSE 0x00000000 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_HALTED 4:4 /* R-XVF */ +#define NV_PFALCON_FALCON_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_CPUCTL_HALTED_FALSE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_CPUCTL_STOPPED 5:5 /* R-XVF */ +#define NV_PFALCON_FALCON_CPUCTL_STOPPED_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_CPUCTL_STOPPED_FALSE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN 6:6 /* RWIVF */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS 0x00000130 /* -W-4R */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU 1:1 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU_FALSE 0x00000000 /* -W--V */ + +#define NV_PFALCON_FALCON_MAILBOX1 0x00000044 /* RW-4R */ +#define NV_PFALCON_FALCON_MAILBOX1_DATA 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_MAILBOX1_DATA_INIT 0x00000000 /* RWI-V */ + +#define NV_PFALCON_FALCON_IRQSTAT 0x00000008 /* R--4R */ +#define NV_PFALCON_FALCON_IRQSTAT_GPTMR 0:0 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_GPTMR_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_GPTMR_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_WDTMR 1:1 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_WDTMR_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_WDTMR_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_MTHD 2:2 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_MTHD_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_MTHD_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_CTXSW 3:3 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_CTXSW_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_CTXSW_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_HALT 4:4 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_HALT_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_HALT_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_EXTERR 5:5 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_EXTERR_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_EXTERR_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN0 6:6 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN0_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN0_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN1 7:7 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN1_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN1_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_EXT 15:8 /* */ +#define NV_PFALCON_FALCON_IRQSTAT_DMA 16:16 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_DMA_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_DMA_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_SHA 17:17 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_SHA_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_SHA_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_MEMERR 18:18 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_MEMERR_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_MEMERR_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_FALCON 19:19 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_FALCON_TURE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_FALCON_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_RISCV 20:20 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_RISCV_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_RISCV_FALSE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQSTAT_TRACE 21:21 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQSTAT_TRACE_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_TRACE_FALSE 0x00000000 /* R-I-V */ + +#define NV_PFALCON_FALCON_IRQMASK 0x00000018 /* R--4R */ +#define NV_PFALCON_FALCON_IRQMASK_GPTMR 0:0 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_GPTMR_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_GPTMR_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_WDTMR 1:1 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_WDTMR_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_WDTMR_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_MTHD 2:2 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_MTHD_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_MTHD_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_CTXSW 3:3 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_CTXSW_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_CTXSW_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_HALT 4:4 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_HALT_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_HALT_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_EXTERR 5:5 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_EXTERR_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_EXTERR_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_SWGEN0 6:6 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_SWGEN0_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_SWGEN0_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_SWGEN1 7:7 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_SWGEN1_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_SWGEN1_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_EXT 15:8 /* */ +#define NV_PFALCON_FALCON_IRQMASK_DMA 16:16 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_DMA_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_DMA_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_SHA 17:17 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_SHA_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_SHA_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_MEMERR 18:18 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_MEMERR_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_MEMERR_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_FALCON 19:19 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_FALCON_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_FALCON_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_RISCV 20:20 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_RISCV_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_RISCV_DISABLE 0x00000000 /* R-I-V */ +#define NV_PFALCON_FALCON_IRQMASK_TRACE 21:21 /* R-IVF */ +#define NV_PFALCON_FALCON_IRQMASK_TRACE_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMASK_TRACE_DISABLE 0x00000000 /* R-I-V */ + +#define NV_PFALCON_FALCON_IRQDEST 0x0000001c /* RW-4R */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_GPTMR 0:0 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_GPTMR_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_GPTMR_FALCON 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_GPTMR_HOST 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_WDTMR 1:1 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_WDTMR_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_WDTMR_FALCON 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_WDTMR_HOST 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_MTHD 2:2 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_MTHD_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_MTHD_FALCON 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_MTHD_HOST 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_CTXSW 3:3 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_CTXSW_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_CTXSW_FALCON 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_CTXSW_HOST 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_HALT 4:4 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_HALT_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_HALT_FALCON 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_HALT_HOST 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_EXTERR 5:5 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_EXTERR_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_EXTERR_FALCON 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_EXTERR_HOST 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_SWGEN0 6:6 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_SWGEN0_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_SWGEN0_FALCON 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_SWGEN0_HOST 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_SWGEN1 7:7 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_SWGEN1_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_SWGEN1_FALCON 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_SWGEN1_HOST 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_HOST_EXT 15:8 /* */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_GPTMR 16:16 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_GPTMR_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_GPTMR_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_GPTMR_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_GPTMR_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_GPTMR_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_WDTMR 17:17 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_WDTMR_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_WDTMR_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_WDTMR_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_WDTMR_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_WDTMR_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_MTHD 18:18 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_MTHD_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_MTHD_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_MTHD_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_MTHD_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_MTHD_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_CTXSW 19:19 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_CTXSW_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_CTXSW_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_CTXSW_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_CTXSW_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_CTXSW_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_HALT 20:20 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_HALT_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_HALT_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_HALT_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_HALT_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_HALT_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_EXTERR 21:21 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_EXTERR_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_EXTERR_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_EXTERR_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_EXTERR_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_EXTERR_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN0 22:22 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN0_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN0_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN0_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN0_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN0_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN1 23:23 /* RWIVF */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN1_INIT 0x00000000 /* RWI-V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN1_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN1_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN1_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_SWGEN1_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IRQDEST_TARGET_EXT 31:24 /* */ + +#define NV_PFALCON_FALCON_OS 0x00000080 /* RW-4R */ +#define NV_PFALCON_FALCON_OS_VERSION 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_OS_VERSION_INIT 0x00000000 /* RWI-V */ +#endif // __lr10_dev_falcon_v4_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_ingress_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_ingress_ip.h new file mode 100644 index 000000000..90626f031 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_ingress_ip.h @@ -0,0 +1,928 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_ingress_ip_h__ +#define __lr10_dev_ingress_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_INGRESS_ERR_LOG_EN_0 0x00001404 /* RW-4R */ +#define NV_INGRESS_ERR_LOG_EN_0_CMDDECODEERR 0:0 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_CMDDECODEERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_CMDDECODEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_CMDDECODEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_REQCONTEXTMISMATCHERR 2:2 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_REQCONTEXTMISMATCHERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_REQCONTEXTMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_REQCONTEXTMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_ACLFAIL 3:3 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_ACLFAIL__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_ACLFAIL_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_ACLFAIL_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_HDR_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_HDR_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_HDR_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_HDR_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_INVALIDVCSET 6:6 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_INVALIDVCSET__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_INVALIDVCSET_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_INVALIDVCSET_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_ADDRBOUNDSERR 7:7 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_ADDRBOUNDSERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_ADDRBOUNDSERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_ADDRBOUNDSERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTABCFGERR 8:8 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTABCFGERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTABCFGERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTABCFGERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTABCFGERR 9:9 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTABCFGERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTABCFGERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTABCFGERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_REMAPTAB_ECC_DBE_ERR 10:10 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_REMAPTAB_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_REMAPTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_REMAPTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTAB_ECC_DBE_ERR 11:11 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTAB_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTAB_ECC_DBE_ERR 12:12 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTAB_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_PARITY_ERR 13:13 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_PARITY_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_NCISOC_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_REMAPTAB_ECC_LIMIT_ERR 14:14 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_REMAPTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_REMAPTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_REMAPTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTAB_ECC_LIMIT_ERR 15:15 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_RIDTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTAB_ECC_LIMIT_ERR 16:16 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_RLANTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_ADDRTYPEERR 17:17 /* RWEVF */ +#define NV_INGRESS_ERR_LOG_EN_0_ADDRTYPEERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_LOG_EN_0_ADDRTYPEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_LOG_EN_0_ADDRTYPEERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0 0x00001410 /* RW-4R */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_CMDDECODEERR 0:0 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_CMDDECODEERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_CMDDECODEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_CMDDECODEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REQCONTEXTMISMATCHERR 2:2 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REQCONTEXTMISMATCHERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REQCONTEXTMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REQCONTEXTMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ACLFAIL 3:3 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ACLFAIL__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ACLFAIL_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ACLFAIL_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_INVALIDVCSET 6:6 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_INVALIDVCSET__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_INVALIDVCSET_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_INVALIDVCSET_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ADDRBOUNDSERR 7:7 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ADDRBOUNDSERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ADDRBOUNDSERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ADDRBOUNDSERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTABCFGERR 8:8 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTABCFGERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTABCFGERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTABCFGERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTABCFGERR 9:9 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTABCFGERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTABCFGERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTABCFGERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR 10:10 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTAB_ECC_DBE_ERR 11:11 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTAB_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTAB_ECC_DBE_ERR 12:12 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTAB_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_PARITY_ERR 13:13 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_PARITY_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_NCISOC_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR 14:14 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR 15:15 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR 16:16 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ADDRTYPEERR 17:17 /* RWEVF */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ADDRTYPEERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ADDRTYPEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CORRECTABLE_REPORT_EN_0_ADDRTYPEERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_INGRESS_ERR_TIMESTAMP_LOG 0x00001450 /* R--4R */ +#define NV_INGRESS_ERR_TIMESTAMP_LOG_TIMESTAMP 23:0 /* R-IVF */ +#define NV_INGRESS_ERR_TIMESTAMP_LOG_TIMESTAMP_INIT 0x00000000 /* R-I-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_VALID 0x0000144c /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_VALID_HEADERVALID0 0:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_VALID_HEADERVALID0_INVALID 0x00000000 /* R-D-V */ +#define NV_INGRESS_ERR_HEADER_LOG_VALID_HEADERVALID0_VALID 0x00000001 /* R---V */ + +#define NV_INGRESS_ERR_MISC_LOG_0 0x00001454 /* R--4R */ +#define NV_INGRESS_ERR_MISC_LOG_0_SPORT 5:0 /* R-IVF */ +#define NV_INGRESS_ERR_MISC_LOG_0_SPORT_INIT 0x00000000 /* R-I-V */ +#define NV_INGRESS_ERR_MISC_LOG_0_ENCODEDVC 10:8 /* R-IVF */ +#define NV_INGRESS_ERR_MISC_LOG_0_ENCODEDVC_CREQ0 0x00000000 /* R-I-V */ +#define NV_INGRESS_ERR_MISC_LOG_0_ENCODEDVC_RSP0 0x00000005 /* R---V */ +#define NV_INGRESS_ERR_MISC_LOG_0_ENCODEDVC_CREQ1 0x00000006 /* R---V */ +#define NV_INGRESS_ERR_MISC_LOG_0_ENCODEDVC_RSP1 0x00000007 /* R---V */ + +#define NV_INGRESS_ERR_HEADER_LOG_0 0x00001420 /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_0_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_0_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_1 0x00001424 /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_1_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_1_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_2 0x00001428 /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_2_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_2_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_3 0x0000142c /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_3_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_3_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_4 0x00001430 /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_4_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_4_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_5 0x00001434 /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_5_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_5_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_6 0x00001438 /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_6_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_6_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_7 0x0000143c /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_7_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_7_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_8 0x00001440 /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_8_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_8_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_9 0x00001444 /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_9_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_9_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_HEADER_LOG_10 0x00001448 /* R--4R */ +#define NV_INGRESS_ERR_HEADER_LOG_10_DW 31:0 /* R-DVF */ +#define NV_INGRESS_ERR_HEADER_LOG_10_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_CONTAIN_EN_0 0x00001414 /* RW-4R */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_CMDDECODEERR 0:0 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_CMDDECODEERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_CMDDECODEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_CMDDECODEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REQCONTEXTMISMATCHERR 2:2 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REQCONTEXTMISMATCHERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REQCONTEXTMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REQCONTEXTMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ACLFAIL 3:3 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ACLFAIL__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ACLFAIL_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ACLFAIL_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_HDR_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_HDR_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_HDR_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_HDR_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_INVALIDVCSET 6:6 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_INVALIDVCSET__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_INVALIDVCSET_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_INVALIDVCSET_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ADDRBOUNDSERR 7:7 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ADDRBOUNDSERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ADDRBOUNDSERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ADDRBOUNDSERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTABCFGERR 8:8 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTABCFGERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTABCFGERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTABCFGERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTABCFGERR 9:9 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTABCFGERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTABCFGERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTABCFGERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REMAPTAB_ECC_DBE_ERR 10:10 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REMAPTAB_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REMAPTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REMAPTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTAB_ECC_DBE_ERR 11:11 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTAB_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTAB_ECC_DBE_ERR 12:12 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTAB_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_PARITY_ERR 13:13 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_PARITY_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_NCISOC_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REMAPTAB_ECC_LIMIT_ERR 14:14 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REMAPTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REMAPTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_REMAPTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTAB_ECC_LIMIT_ERR 15:15 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RIDTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTAB_ECC_LIMIT_ERR 16:16 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_RLANTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ADDRTYPEERR 17:17 /* RWEVF */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ADDRTYPEERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ADDRTYPEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_CONTAIN_EN_0_ADDRTYPEERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER 0x00001480 /* RW-4R */ +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWDVF */ +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_ADDRESS 0x00001488 /* R--4R */ +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_ADDRESS_ERROR_ADDRESS 15:0 /* R-DVF */ +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_ADDRESS_ERROR_ADDRESS_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_ADDRESS_VALID 0x0000148c /* R--4R */ +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_ADDRESS_VALID_VALID 0:0 /* R-DVF */ +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_ADDRESS_VALID_VALID_INVALID 0x00000000 /* R-D-V */ +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_ADDRESS_VALID_VALID_VALID 0x00000001 /* R---V */ + +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER 0x00001490 /* RW-4R */ +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWDVF */ +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_ADDRESS 0x00001498 /* R--4R */ +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_ADDRESS_ERROR_ADDRESS 15:0 /* R-DVF */ +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_ADDRESS_ERROR_ADDRESS_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_ADDRESS_VALID 0x0000149c /* R--4R */ +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_ADDRESS_VALID_VALID 0:0 /* R-DVF */ +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_ADDRESS_VALID_VALID_INVALID 0x00000000 /* R-D-V */ +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_ADDRESS_VALID_VALID_VALID 0x00000001 /* R---V */ + +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER 0x000014a0 /* RW-4R */ +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWDVF */ +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_ADDRESS 0x000014a8 /* R--4R */ +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_ADDRESS_ERROR_ADDRESS 15:0 /* R-DVF */ +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_ADDRESS_ERROR_ADDRESS_INIT 0x00000000 /* R-D-V */ + +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_ADDRESS_VALID 0x000014ac /* R--4R */ +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_ADDRESS_VALID_VALID 0:0 /* R-DVF */ +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_ADDRESS_VALID_VALID_INVALID 0x00000000 /* R-D-V */ +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_ADDRESS_VALID_VALID_VALID 0x00000001 /* R---V */ + +#define NV_INGRESS_ERR_FIRST_0 0x0000141c /* RW-4R */ +#define NV_INGRESS_ERR_FIRST_0_CMDDECODEERR 0:0 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_CMDDECODEERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_CMDDECODEERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_REQCONTEXTMISMATCHERR 2:2 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_REQCONTEXTMISMATCHERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_REQCONTEXTMISMATCHERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_ACLFAIL 3:3 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_ACLFAIL_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_ACLFAIL_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_NCISOC_HDR_ECC_LIMIT_ERR 4:4 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_NCISOC_HDR_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_NCISOC_HDR_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_NCISOC_HDR_ECC_DBE_ERR 5:5 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_NCISOC_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_NCISOC_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_INVALIDVCSET 6:6 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_INVALIDVCSET_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_INVALIDVCSET_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_ADDRBOUNDSERR 7:7 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_ADDRBOUNDSERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_ADDRBOUNDSERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_RIDTABCFGERR 8:8 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_RIDTABCFGERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_RIDTABCFGERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_RLANTABCFGERR 9:9 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_RLANTABCFGERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_RLANTABCFGERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_REMAPTAB_ECC_DBE_ERR 10:10 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_REMAPTAB_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_REMAPTAB_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_RIDTAB_ECC_DBE_ERR 11:11 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_RIDTAB_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_RIDTAB_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_RLANTAB_ECC_DBE_ERR 12:12 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_RLANTAB_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_RLANTAB_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_NCISOC_PARITY_ERR 13:13 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_NCISOC_PARITY_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_NCISOC_PARITY_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_REMAPTAB_ECC_LIMIT_ERR 14:14 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_REMAPTAB_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_REMAPTAB_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_RIDTAB_ECC_LIMIT_ERR 15:15 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_RIDTAB_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_RIDTAB_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_RLANTAB_ECC_LIMIT_ERR 16:16 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_RLANTAB_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_RLANTAB_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FIRST_0_ADDRTYPEERR 17:17 /* RWDVF */ +#define NV_INGRESS_ERR_FIRST_0_ADDRTYPEERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_FIRST_0_ADDRTYPEERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_INGRESS_ERR_STATUS_0 0x00001400 /* RW-4R */ +#define NV_INGRESS_ERR_STATUS_0_CMDDECODEERR 0:0 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_CMDDECODEERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_CMDDECODEERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_REQCONTEXTMISMATCHERR 2:2 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_REQCONTEXTMISMATCHERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_REQCONTEXTMISMATCHERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_ACLFAIL 3:3 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_ACLFAIL_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_ACLFAIL_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_NCISOC_HDR_ECC_LIMIT_ERR 4:4 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_NCISOC_HDR_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_NCISOC_HDR_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_NCISOC_HDR_ECC_DBE_ERR 5:5 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_NCISOC_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_NCISOC_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_INVALIDVCSET 6:6 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_INVALIDVCSET_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_INVALIDVCSET_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_ADDRBOUNDSERR 7:7 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_ADDRBOUNDSERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_ADDRBOUNDSERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_RIDTABCFGERR 8:8 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_RIDTABCFGERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_RIDTABCFGERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_RLANTABCFGERR 9:9 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_RLANTABCFGERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_RLANTABCFGERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_REMAPTAB_ECC_DBE_ERR 10:10 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_REMAPTAB_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_REMAPTAB_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_RIDTAB_ECC_DBE_ERR 11:11 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_RIDTAB_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_RIDTAB_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_RLANTAB_ECC_DBE_ERR 12:12 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_RLANTAB_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_RLANTAB_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_NCISOC_PARITY_ERR 13:13 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_NCISOC_PARITY_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_NCISOC_PARITY_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_REMAPTAB_ECC_LIMIT_ERR 14:14 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_REMAPTAB_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_REMAPTAB_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_RIDTAB_ECC_LIMIT_ERR 15:15 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_RIDTAB_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_RIDTAB_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_RLANTAB_ECC_LIMIT_ERR 16:16 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_RLANTAB_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_RLANTAB_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_STATUS_0_ADDRTYPEERR 17:17 /* RWDVF */ +#define NV_INGRESS_ERR_STATUS_0_ADDRTYPEERR_NONE 0x00000000 /* RWD-V */ +#define NV_INGRESS_ERR_STATUS_0_ADDRTYPEERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_INGRESS_REMAPTABDATA0 0x00001090 /* RW-4R */ +#define NV_INGRESS_REMAPTABDATA0_RMAP_ADDR 10:0 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA0_RMAP_ADDR_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA0_IRL_SEL 16:15 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA0_IRL_SEL_SELECTNONE 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA0_IRL_SEL_SELECTIRL0 0x00000001 /* RW--V */ +#define NV_INGRESS_REMAPTABDATA0_IRL_SEL_SELECTIRL1 0x00000002 /* RW--V */ +#define NV_INGRESS_REMAPTABDATA0_IRL_SEL_ENABLEERRRSP 0x00000003 /* RW--V */ +#define NV_INGRESS_REMAPTABDATA0_ECC 30:22 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA0_ECC_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA0_ACLVALID 31:31 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA0_ACLVALID_INVALID 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA0_ACLVALID_VALID 0x00000001 /* RW--V */ + +#define NV_INGRESS_REMAPTABDATA1 0x00001094 /* RW-4R */ +#define NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK 15:0 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK 31:16 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK_INIT 0x00000000 /* RWE-V */ + +#define NV_INGRESS_REMAPTABDATA2 0x00001098 /* RW-4R */ +#define NV_INGRESS_REMAPTABDATA2_REQCTXT_REP 15:0 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA2_REQCTXT_REP_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA2_ADR_OFFSET 31:16 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA2_ADR_OFFSET_INIT 0x00000000 /* RWE-V */ + +#define NV_INGRESS_REMAPTABDATA3 0x0000109c /* RW-4R */ +#define NV_INGRESS_REMAPTABDATA3_ADR_BASE 15:0 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA3_ADR_BASE_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA3_ADR_LIMIT 31:16 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA3_ADR_LIMIT_INIT 0x00000000 /* RWE-V */ + +#define NV_INGRESS_REMAPTABDATA4 0x000010a0 /* RW-4R */ +#define NV_INGRESS_REMAPTABDATA4_TGTID 10:0 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA4_TGTID_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA4_P2R_SWIZ 12:12 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA4_P2R_SWIZ_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA4_PLANE_SELECT 13:13 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA4_PLANE_SELECT_EVEN 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA4_PLANE_SELECT_ODD 0x00000001 /* RW--V */ +#define NV_INGRESS_REMAPTABDATA4_MULT2 14:14 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA4_MULT2_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA4_RFUNC 21:15 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA4_RFUNC_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA4_GPU_DIV 26:24 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA4_GPU_DIV_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REMAPTABDATA4_RSVD 31:27 /* RWEVF */ +#define NV_INGRESS_REMAPTABDATA4_RSVD_INIT 0x00000000 /* RWE-V */ + +#define NV_INGRESS_RIDTABDATA0 0x000010b0 /* RW-4R */ +#define NV_INGRESS_RIDTABDATA0_GSIZE 3:0 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_16X 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_1X 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_2X 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_3X 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_4X 0x00000004 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_5X 0x00000005 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_6X 0x00000006 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_7X 0x00000007 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_8X 0x00000008 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_9X 0x00000009 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_10X 0x0000000a /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_11X 0x0000000b /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_12X 0x0000000c /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_13X 0x0000000d /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_14X 0x0000000e /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_GSIZE_15X 0x0000000f /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_PORT0 10:5 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA0_PORT0_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE0 13:12 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE0_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE0_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE0_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE0_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_PORT1 19:14 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA0_PORT1_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE1 22:21 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE1_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE1_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE1_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE1_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_PORT2 28:23 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA0_PORT2_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE2 31:30 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE2_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE2_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE2_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA0_VC_MODE2_ALWAYS1 0x00000003 /* RW--V */ + +#define NV_INGRESS_RIDTABDATA1 0x000010b4 /* RW-4R */ +#define NV_INGRESS_RIDTABDATA1_PORT3 5:0 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA1_PORT3_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE3 8:7 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE3_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE3_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE3_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE3_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA1_PORT4 14:9 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA1_PORT4_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE4 17:16 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE4_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE4_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE4_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE4_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA1_PORT5 23:18 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA1_PORT5_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE5 26:25 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE5_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE5_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE5_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA1_VC_MODE5_ALWAYS1 0x00000003 /* RW--V */ + +#define NV_INGRESS_RIDTABDATA2 0x000010b8 /* RW-4R */ +#define NV_INGRESS_RIDTABDATA2_PORT6 5:0 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA2_PORT6_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE6 8:7 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE6_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE6_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE6_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE6_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA2_PORT7 14:9 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA2_PORT7_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE7 17:16 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE7_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE7_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE7_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE7_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA2_PORT8 23:18 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA2_PORT8_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE8 26:25 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE8_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE8_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE8_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA2_VC_MODE8_ALWAYS1 0x00000003 /* RW--V */ + +#define NV_INGRESS_RIDTABDATA3 0x000010bc /* RW-4R */ +#define NV_INGRESS_RIDTABDATA3_PORT9 5:0 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA3_PORT9_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE9 8:7 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE9_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE9_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE9_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE9_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA3_PORT10 14:9 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA3_PORT10_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE10 17:16 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE10_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE10_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE10_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE10_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA3_PORT11 23:18 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA3_PORT11_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE11 26:25 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE11_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE11_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE11_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA3_VC_MODE11_ALWAYS1 0x00000003 /* RW--V */ + +#define NV_INGRESS_RIDTABDATA4 0x000010c0 /* RW-4R */ +#define NV_INGRESS_RIDTABDATA4_PORT12 5:0 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA4_PORT12_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE12 8:7 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE12_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE12_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE12_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE12_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA4_PORT13 14:9 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA4_PORT13_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE13 17:16 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE13_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE13_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE13_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE13_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA4_PORT14 23:18 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA4_PORT14_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE14 26:25 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE14_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE14_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE14_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA4_VC_MODE14_ALWAYS1 0x00000003 /* RW--V */ + +#define NV_INGRESS_RIDTABDATA5 0x000010c4 /* RW-4R */ +#define NV_INGRESS_RIDTABDATA5_PORT15 5:0 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA5_PORT15_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA5_VC_MODE15 8:7 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA5_VC_MODE15_SAME 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA5_VC_MODE15_INVERT 0x00000001 /* RW--V */ +#define NV_INGRESS_RIDTABDATA5_VC_MODE15_ALWAYS0 0x00000002 /* RW--V */ +#define NV_INGRESS_RIDTABDATA5_VC_MODE15_ALWAYS1 0x00000003 /* RW--V */ +#define NV_INGRESS_RIDTABDATA5_RMOD 18:9 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA5_RMOD_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA5_ECC 30:22 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA5_ECC_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA5_ACLVALID 31:31 /* RWEVF */ +#define NV_INGRESS_RIDTABDATA5_ACLVALID_INVALID 0x00000000 /* RWE-V */ +#define NV_INGRESS_RIDTABDATA5_ACLVALID_VALID 0x00000001 /* RW--V */ + +#define NV_INGRESS_REQRSPMAPADDR 0x00001080 /* RW-4R */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS 13:0 /* RWEVF */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_REMAPTAB_DEPTH 0x000007ff /* RW--V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RID_TAB_DEPTH 0x000001ff /* RW--V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RLAN_TAB_DEPTH 0x000001ff /* RW--V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_SEL 18:16 /* RWEVF */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM 0x00000000 /* RWE-V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM 0x00000001 /* RW--V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM 0x00000002 /* RW--V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_SEL_RSVD3 0x00000003 /* RW--V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_SEL_RSVD4 0x00000004 /* RW--V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_SEL_RSVD5 0x00000005 /* RW--V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_SEL_RSVD6 0x00000006 /* RW--V */ +#define NV_INGRESS_REQRSPMAPADDR_RAM_SEL_RSVD7 0x00000007 /* RW--V */ +#define NV_INGRESS_REQRSPMAPADDR_AUTO_INCR 31:31 /* RWEVF */ +#define NV_INGRESS_REQRSPMAPADDR_AUTO_INCR_ENABLE 0x00000001 /* RWE-V */ +#define NV_INGRESS_REQRSPMAPADDR_AUTO_INCR_DISABLE 0x00000000 /* RW--V */ + +#define NV_INGRESS_RLANTABDATA0 0x000010d0 /* RW-4R */ +#define NV_INGRESS_RLANTABDATA0_GRP_SIZE_0 3:0 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA0_GRP_SIZE_0_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA0_GRP_SEL_0 8:5 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA0_GRP_SEL_0_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA0_GRP_SIZE_1 13:10 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA0_GRP_SIZE_1_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA0_GRP_SEL_1 18:15 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA0_GRP_SEL_1_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA0_GRP_SIZE_2 23:20 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA0_GRP_SIZE_2_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA0_GRP_SEL_2 28:25 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA0_GRP_SEL_2_INIT 0x00000000 /* RWE-V */ + +#define NV_INGRESS_RLANTABDATA1 0x000010d4 /* RW-4R */ +#define NV_INGRESS_RLANTABDATA1_GRP_SIZE_3 3:0 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA1_GRP_SIZE_3_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA1_GRP_SEL_3 8:5 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA1_GRP_SEL_3_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA1_GRP_SIZE_4 13:10 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA1_GRP_SIZE_4_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA1_GRP_SEL_4 18:15 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA1_GRP_SEL_4_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA1_GRP_SIZE_5 23:20 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA1_GRP_SIZE_5_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA1_GRP_SEL_5 28:25 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA1_GRP_SEL_5_INIT 0x00000000 /* RWE-V */ + +#define NV_INGRESS_RLANTABDATA2 0x000010d8 /* RW-4R */ +#define NV_INGRESS_RLANTABDATA2_GRP_SIZE_6 3:0 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA2_GRP_SIZE_6_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA2_GRP_SEL_6 8:5 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA2_GRP_SEL_6_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA2_GRP_SIZE_7 13:10 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA2_GRP_SIZE_7_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA2_GRP_SEL_7 18:15 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA2_GRP_SEL_7_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA2_GRP_SIZE_8 23:20 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA2_GRP_SIZE_8_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA2_GRP_SEL_8 28:25 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA2_GRP_SEL_8_INIT 0x00000000 /* RWE-V */ + +#define NV_INGRESS_RLANTABDATA3 0x000010dc /* RW-4R */ +#define NV_INGRESS_RLANTABDATA3_GRP_SIZE_9 3:0 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA3_GRP_SIZE_9_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA3_GRP_SEL_9 8:5 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA3_GRP_SEL_9_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA3_GRP_SIZE_10 13:10 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA3_GRP_SIZE_10_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA3_GRP_SEL_10 18:15 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA3_GRP_SEL_10_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA3_GRP_SIZE_11 23:20 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA3_GRP_SIZE_11_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA3_GRP_SEL_11 28:25 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA3_GRP_SEL_11_INIT 0x00000000 /* RWE-V */ + +#define NV_INGRESS_RLANTABDATA4 0x000010e0 /* RW-4R */ +#define NV_INGRESS_RLANTABDATA4_GRP_SIZE_12 3:0 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA4_GRP_SIZE_12_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA4_GRP_SEL_12 8:5 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA4_GRP_SEL_12_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA4_GRP_SIZE_13 13:10 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA4_GRP_SIZE_13_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA4_GRP_SEL_13 18:15 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA4_GRP_SEL_13_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA4_GRP_SIZE_14 23:20 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA4_GRP_SIZE_14_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA4_GRP_SEL_14 28:25 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA4_GRP_SEL_14_INIT 0x00000000 /* RWE-V */ + +#define NV_INGRESS_RLANTABDATA5 0x000010e4 /* RW-4R */ +#define NV_INGRESS_RLANTABDATA5_GRP_SIZE_15 3:0 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA5_GRP_SIZE_15_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA5_GRP_SEL_15 8:5 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA5_GRP_SEL_15_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA5_RSVD 21:20 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA5_RSVD_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA5_ECC 30:22 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA5_ECC_INIT 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA5_ACLVALID 31:31 /* RWEVF */ +#define NV_INGRESS_RLANTABDATA5_ACLVALID_INVALID 0x00000000 /* RWE-V */ +#define NV_INGRESS_RLANTABDATA5_ACLVALID_VALID 0x00000001 /* RW--V */ + +#define NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER 0x000014b0 /* RW-4R */ +#define NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWDVF */ +#define NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT 0x000014b4 /* RW-4R */ +#define NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0 0x00001408 /* RW-4R */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_CMDDECODEERR 0:0 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_CMDDECODEERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_CMDDECODEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_CMDDECODEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REQCONTEXTMISMATCHERR 2:2 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REQCONTEXTMISMATCHERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REQCONTEXTMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REQCONTEXTMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ACLFAIL 3:3 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ACLFAIL__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ACLFAIL_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ACLFAIL_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_INVALIDVCSET 6:6 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_INVALIDVCSET__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_INVALIDVCSET_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_INVALIDVCSET_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ADDRBOUNDSERR 7:7 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ADDRBOUNDSERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ADDRBOUNDSERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ADDRBOUNDSERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTABCFGERR 8:8 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTABCFGERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTABCFGERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTABCFGERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTABCFGERR 9:9 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTABCFGERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTABCFGERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTABCFGERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR 10:10 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTAB_ECC_DBE_ERR 11:11 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTAB_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTAB_ECC_DBE_ERR 12:12 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTAB_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR 13:13 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR 14:14 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR 15:15 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR 16:16 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ADDRTYPEERR 17:17 /* RWEVF */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ADDRTYPEERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ADDRTYPEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_FATAL_REPORT_EN_0_ADDRTYPEERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0 0x0000140c /* RW-4R */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_CMDDECODEERR 0:0 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_CMDDECODEERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_CMDDECODEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_CMDDECODEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REQCONTEXTMISMATCHERR 2:2 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REQCONTEXTMISMATCHERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REQCONTEXTMISMATCHERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REQCONTEXTMISMATCHERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ACLFAIL 3:3 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ACLFAIL__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ACLFAIL_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ACLFAIL_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_INVALIDVCSET 6:6 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_INVALIDVCSET__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_INVALIDVCSET_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_INVALIDVCSET_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ADDRBOUNDSERR 7:7 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ADDRBOUNDSERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ADDRBOUNDSERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ADDRBOUNDSERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTABCFGERR 8:8 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTABCFGERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTABCFGERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTABCFGERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTABCFGERR 9:9 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTABCFGERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTABCFGERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTABCFGERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR 10:10 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REMAPTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTAB_ECC_DBE_ERR 11:11 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTAB_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTAB_ECC_DBE_ERR 12:12 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTAB_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTAB_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTAB_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR 13:13 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR 14:14 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_REMAPTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR 15:15 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RIDTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR 16:16 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_RLANTAB_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ADDRTYPEERR 17:17 /* RWEVF */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ADDRTYPEERR__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ADDRTYPEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_NON_FATAL_REPORT_EN_0_ADDRTYPEERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_INGRESS_ERR_ECC_CTRL 0x00001470 /* RW-4R */ +#define NV_INGRESS_ERR_ECC_CTRL_NCISOC_HDR_ECC_ENABLE 0:0 /* RWEVF */ +#define NV_INGRESS_ERR_ECC_CTRL_NCISOC_HDR_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_ECC_CTRL_NCISOC_HDR_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_ECC_CTRL_NCISOC_HDR_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE 1:1 /* RWEVF */ +#define NV_INGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_ECC_CTRL_REMAPTAB_ECC_ENABLE 8:8 /* RWEVF */ +#define NV_INGRESS_ERR_ECC_CTRL_REMAPTAB_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_ECC_CTRL_REMAPTAB_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_ECC_CTRL_REMAPTAB_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_ECC_CTRL_RIDTAB_ECC_ENABLE 9:9 /* RWEVF */ +#define NV_INGRESS_ERR_ECC_CTRL_RIDTAB_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_ECC_CTRL_RIDTAB_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_ECC_CTRL_RIDTAB_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_ECC_CTRL_RLANTAB_ECC_ENABLE 10:10 /* RWEVF */ +#define NV_INGRESS_ERR_ECC_CTRL_RLANTAB_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_INGRESS_ERR_ECC_CTRL_RLANTAB_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_INGRESS_ERR_ECC_CTRL_RLANTAB_ECC_ENABLE__PROD 0x00000001 /* RW--V */ + +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER_LIMIT 0x00001484 /* RW-4R */ +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER_LIMIT 0x00001494 /* RW-4R */ +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER_LIMIT 0x000014a4 /* RW-4R */ +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ +#endif // __lr10_dev_ingress_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_minion_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_minion_ip.h new file mode 100644 index 000000000..c6f6a76e7 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_minion_ip.h @@ -0,0 +1,730 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_minion_ip_h__ +#define __lr10_dev_minion_ip_h__ +/* This file is autogenerated. Do not edit */ +// did not include the regs that weren't approved for Tegra +#define NV_CMINION_FALCON_CG2 0x00000134 /* RWI4R */ +#define NV_CMINION_FALCON_CG2_SLCG 17:1 /* */ +#define NV_CMINION_FALCON_CG2_SLCG_ENABLED 0 /* */ +#define NV_CMINION_FALCON_CG2_SLCG_DISABLED 0x1FFFF /* */ +#define NV_CMINION_FALCON_CG2_SLCG__PROD 0x10004 /* */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_DMA 1:1 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_DMA_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_DMA_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_GC6_SR_FSM 2:2 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_GC6_SR_FSM_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_GC6_SR_FSM_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_PIPE 3:3 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_PIPE_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_PIPE_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_DIV 4:4 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_DIV_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_DIV_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_ICD 5:5 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_ICD_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_ICD_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_CFG 6:6 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_CFG_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_CFG_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_CTXSW 7:7 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_CTXSW_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_CTXSW_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_PMB 8:8 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_PMB_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_PMB_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_RF 9:9 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_RF_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_RF_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_MUL 10:10 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_MUL_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_MUL_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_LDST 11:11 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_LDST_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_LDST_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_TSYNC 12:12 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_TSYNC_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_TSYNC_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_GPTMR 13:13 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_GPTMR_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_GPTMR_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_WDTMR 14:14 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_WDTMR_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_WDTMR_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_IRQSTAT 15:15 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_IRQSTAT_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_IRQSTAT_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_TOP 16:16 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_TOP_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FALCON_TOP_DISABLED 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_CG2_SLCG_FBIF 17:17 /* RWIVF */ +#define NV_CMINION_FALCON_CG2_SLCG_FBIF_ENABLED 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CG2_SLCG_FBIF_DISABLED 0x00000001 /* RWI-V */ + +#define NV_CMINION_FALCON_IRQMASK 0x00000018 /* R--4R */ +#define NV_CMINION_FALCON_IRQMASK_GPTMR 0:0 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_GPTMR_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_GPTMR_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_WDTMR 1:1 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_WDTMR_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_WDTMR_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_MTHD 2:2 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_MTHD_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_MTHD_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_CTXSW 3:3 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_CTXSW_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_CTXSW_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_HALT 4:4 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_HALT_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_HALT_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_EXTERR 5:5 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_EXTERR_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_EXTERR_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_SWGEN0 6:6 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_SWGEN0_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_SWGEN0_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_SWGEN1 7:7 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_SWGEN1_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_SWGEN1_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_EXT 15:8 /* */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ1 8:8 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ1_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ1_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ2 9:9 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ2_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ2_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ3 10:10 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ3_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ3_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ4 11:11 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ4_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ4_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ5 12:12 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ5_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ5_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ6 13:13 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ6_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ6_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ7 14:14 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ7_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ7_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ8 15:15 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ8_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_EXT_EXTIRQ8_DISABLE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQMASK_DMA 16:16 /* R-IVF */ +#define NV_CMINION_FALCON_IRQMASK_DMA_ENABLE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQMASK_DMA_DISABLE 0x00000000 /* R-I-V */ + +#define NV_CMINION_FALCON_IRQSCLR 0x00000004 /* -W-4R */ +#define NV_CMINION_FALCON_IRQSCLR_GPTMR 0:0 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_GPTMR_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_WDTMR 1:1 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_WDTMR_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_MTHD 2:2 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_MTHD_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_CTXSW 3:3 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_CTXSW_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_HALT 4:4 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_HALT_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_EXTERR 5:5 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_EXTERR_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_SWGEN0 6:6 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_SWGEN0_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_SWGEN1 7:7 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_SWGEN1_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_EXT 15:8 /* */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ1 8:8 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ1_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ2 9:9 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ2_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ3 10:10 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ3_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ4 11:11 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ4_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ5 12:12 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ5_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ6 13:13 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ6_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ7 14:14 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ7_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ8 15:15 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_EXT_EXTIRQ8_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQSCLR_DMA 16:16 /* -WXVF */ +#define NV_CMINION_FALCON_IRQSCLR_DMA_SET 0x00000001 /* -W--V */ + +#define NV_CMINION_FALCON_IRQSTAT 0x00000008 /* R--4R */ +#define NV_CMINION_FALCON_IRQSTAT_GPTMR 0:0 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_GPTMR_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_GPTMR_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_WDTMR 1:1 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_WDTMR_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_WDTMR_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_MTHD 2:2 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_MTHD_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_MTHD_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_CTXSW 3:3 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_CTXSW_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_CTXSW_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_HALT 4:4 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_HALT_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_HALT_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_EXTERR 5:5 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_EXTERR_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_EXTERR_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_SWGEN0 6:6 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_SWGEN0_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_SWGEN0_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_SWGEN1 7:7 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_SWGEN1_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_SWGEN1_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT 15:8 /* */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ1 8:8 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ1_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ1_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ2 9:9 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ2_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ2_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ3 10:10 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ3_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ3_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ4 11:11 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ4_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ4_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ5 12:12 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ5_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ5_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ6 13:13 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ6_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ6_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ7 14:14 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ7_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ7_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ8 15:15 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ8_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_EXT_EXTIRQ8_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IRQSTAT_DMA 16:16 /* R-IVF */ +#define NV_CMINION_FALCON_IRQSTAT_DMA_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IRQSTAT_DMA_FALSE 0x00000000 /* R-I-V */ + +#define NV_CMINION_FALCON_IRQMSET 0x00000010 /* -W-4R */ +#define NV_CMINION_FALCON_IRQMSET_GPTMR 0:0 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_GPTMR_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_WDTMR 1:1 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_WDTMR_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_MTHD 2:2 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_MTHD_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_CTXSW 3:3 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_CTXSW_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_HALT 4:4 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_HALT_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_EXTERR 5:5 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_EXTERR_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_SWGEN0 6:6 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_SWGEN0_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_SWGEN1 7:7 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_SWGEN1_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_EXT 15:8 /* */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ1 8:8 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ1_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ2 9:9 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ2_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ3 10:10 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ3_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ4 11:11 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ4_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ5 12:12 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ5_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ6 13:13 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ6_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ7 14:14 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ7_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ8 15:15 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_EXT_EXTIRQ8_SET 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_IRQMSET_DMA 16:16 /* -WXVF */ +#define NV_CMINION_FALCON_IRQMSET_DMA_SET 0x00000001 /* -W--V */ + +#define NV_CMINION_FALCON_IRQDEST 0x0000001c /* RW-4R */ +#define NV_CMINION_FALCON_IRQDEST_HOST_GPTMR 0:0 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_GPTMR_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_GPTMR_FALCON 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_GPTMR_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_WDTMR 1:1 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_WDTMR_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_WDTMR_FALCON 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_WDTMR_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_MTHD 2:2 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_MTHD_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_MTHD_FALCON 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_MTHD_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_CTXSW 3:3 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_CTXSW_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_CTXSW_FALCON 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_CTXSW_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_HALT 4:4 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_HALT_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_HALT_FALCON 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_HALT_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXTERR 5:5 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXTERR_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXTERR_FALCON 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXTERR_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_SWGEN0 6:6 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_SWGEN0_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_SWGEN0_FALCON 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_SWGEN0_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_SWGEN1 7:7 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_SWGEN1_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_SWGEN1_FALCON 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_SWGEN1_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT 15:8 /* */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ1 8:8 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ1_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ1_FALCON 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ2 9:9 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ2_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ2_FALCON 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ3 10:10 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ3_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ3_FALCON 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ4 11:11 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ4_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ4_FALCON 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ5 12:12 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ5_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ5_FALCON 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ6 13:13 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ6_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ6_FALCON 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ7 14:14 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ7_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ7_FALCON 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ8 15:15 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ8_HOST 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_HOST_EXT_EXTIRQ8_FALCON 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_GPTMR 16:16 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_GPTMR_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_GPTMR_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_GPTMR_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_GPTMR_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_GPTMR_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_WDTMR 17:17 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_WDTMR_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_WDTMR_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_WDTMR_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_WDTMR_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_WDTMR_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_MTHD 18:18 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_MTHD_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_MTHD_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_MTHD_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_MTHD_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_MTHD_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_CTXSW 19:19 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_CTXSW_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_CTXSW_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_CTXSW_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_CTXSW_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_CTXSW_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_HALT 20:20 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_HALT_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_HALT_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_HALT_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_HALT_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_HALT_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXTERR 21:21 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXTERR_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXTERR_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXTERR_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXTERR_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXTERR_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN0 22:22 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN0_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN0_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN0_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN0_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN0_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN1 23:23 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN1_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN1_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN1_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN1_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_SWGEN1_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT 31:24 /* */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1 24:24 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2 25:25 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3 26:26 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4 27:27 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5 28:28 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6 29:29 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7 30:30 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8 31:31 /* RWIVF */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8_HOST_NONSTALL 0x00000001 /* RW--V */ + +#define NV_CMINION_FALCON_DMACTL 0x0000010c /* RW-4R */ +#define NV_CMINION_FALCON_DMACTL_REQUIRE_CTX 0:0 /* RWIVF */ +#define NV_CMINION_FALCON_DMACTL_REQUIRE_CTX_INIT 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_DMACTL_REQUIRE_CTX_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_DMACTL_REQUIRE_CTX_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_DMACTL_DMEM_SCRUBBING 1:1 /* R--VF */ +#define NV_CMINION_FALCON_DMACTL_DMEM_SCRUBBING_PENDING 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_DMACTL_DMEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_CMINION_FALCON_DMACTL_IMEM_SCRUBBING 2:2 /* R--VF */ +#define NV_CMINION_FALCON_DMACTL_IMEM_SCRUBBING_PENDING 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_DMACTL_IMEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_CMINION_FALCON_DMACTL_DMAQ_NUM 6:3 /* R--VF */ +#define NV_CMINION_FALCON_DMACTL_SECURE_STAT 7:7 /* R--VF */ + +#define NV_CMINION_FALCON_IMEMC(i) (0x00000180+(i)*16) /* RW-4A */ +#define NV_CMINION_FALCON_IMEMC__SIZE_1 4 /* */ +#define NV_CMINION_FALCON_IMEMC_OFFS 7:2 /* RWIVF */ +#define NV_CMINION_FALCON_IMEMC_OFFS_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IMEMC_BLK 23:8 /* RWIVF */ +#define NV_CMINION_FALCON_IMEMC_BLK_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IMEMC_AINCW 24:24 /* RWIVF */ +#define NV_CMINION_FALCON_IMEMC_AINCW_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IMEMC_AINCR 25:25 /* RWIVF */ +#define NV_CMINION_FALCON_IMEMC_AINCR_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IMEMC_AINCR_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_IMEMC_AINCR_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_IMEMC_SECURE 28:28 /* RWIVF */ +#define NV_CMINION_FALCON_IMEMC_SECURE_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_IMEMC_SEC_ATOMIC 29:29 /* R-IVF */ +#define NV_CMINION_FALCON_IMEMC_SEC_ATOMIC_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IMEMC_SEC_ATOMIC_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IMEMC_SEC_WR_VIO 30:30 /* R-IVF */ +#define NV_CMINION_FALCON_IMEMC_SEC_WR_VIO_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IMEMC_SEC_WR_VIO_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_IMEMC_SEC_LOCK 31:31 /* R-IVF */ +#define NV_CMINION_FALCON_IMEMC_SEC_LOCK_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_IMEMC_SEC_LOCK_FALSE 0x00000000 /* R-I-V */ + +#define NV_CMINION_FALCON_IMEMT(i) (0x00000188+(i)*16) /* RW-4A */ +#define NV_CMINION_FALCON_IMEMT__SIZE_1 4 /* */ +#define NV_CMINION_FALCON_IMEMT_TAG 15:0 /* RW-VF */ + +#define NV_CMINION_FALCON_IMEMD(i) (0x00000184+(i)*16) /* RW-4A */ +#define NV_CMINION_FALCON_IMEMD__SIZE_1 4 /* */ +#define NV_CMINION_FALCON_IMEMD_DATA 31:0 /* RW-VF */ + +#define NV_CMINION_FALCON_DMEMC(i) (0x000001c0+(i)*8) /* RW-4A */ +#define NV_CMINION_FALCON_DMEMC__SIZE_1 8 /* */ +#define NV_CMINION_FALCON_DMEMC_ADDRESS 23:0 /* RWIVF */ +#define NV_CMINION_FALCON_DMEMC_ADDRESS_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_DMEMC_OFFS 7:2 /* RWIVF */ +#define NV_CMINION_FALCON_DMEMC_OFFS_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_DMEMC_BLK 23:8 /* RWIVF */ +#define NV_CMINION_FALCON_DMEMC_BLK_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_DMEMC_AINCW 24:24 /* RWIVF */ +#define NV_CMINION_FALCON_DMEMC_AINCW_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_DMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_DMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_DMEMC_AINCR 25:25 /* RWIVF */ +#define NV_CMINION_FALCON_DMEMC_AINCR_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_DMEMC_AINCR_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_DMEMC_AINCR_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_DMEMC_SETTAG 26:26 /* RWIVF */ +#define NV_CMINION_FALCON_DMEMC_SETTAG_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_DMEMC_SETTAG_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_DMEMC_SETTAG_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_DMEMC_SETLVL 27:27 /* RWIVF */ +#define NV_CMINION_FALCON_DMEMC_SETLVL_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_DMEMC_SETLVL_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_DMEMC_SETLVL_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_DMEMC_VA 28:28 /* RWIVF */ +#define NV_CMINION_FALCON_DMEMC_VA_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_DMEMC_VA_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_DMEMC_VA_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_DMEMC_MISS 29:29 /* R-IVF */ +#define NV_CMINION_FALCON_DMEMC_MISS_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_DMEMC_MISS_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_DMEMC_MULTIHIT 30:30 /* R-IVF */ +#define NV_CMINION_FALCON_DMEMC_MULTIHIT_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_DMEMC_MULTIHIT_FALSE 0x00000000 /* R-I-V */ +#define NV_CMINION_FALCON_DMEMC_LVLERR 31:31 /* R-IVF */ +#define NV_CMINION_FALCON_DMEMC_LVLERR_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_DMEMC_LVLERR_FALSE 0x00000000 /* R-I-V */ + +#define NV_CMINION_FALCON_DMEMD(i) (0x000001c4+(i)*8) /* RW-4A */ +#define NV_CMINION_FALCON_DMEMD__SIZE_1 8 /* */ +#define NV_CMINION_FALCON_DMEMD_DATA 31:0 /* RW-VF */ + +#define NV_CMINION_FALCON_OS 0x00000080 /* RW-4R */ +#define NV_CMINION_FALCON_OS_VERSION 31:0 /* RWIVF */ +#define NV_CMINION_FALCON_OS_VERSION_INIT 0x00000000 /* RWI-V */ + +#define NV_CMINION_FALCON_MAILBOX1 0x00000044 /* RW-4R */ +#define NV_CMINION_FALCON_MAILBOX1_DATA 31:0 /* RWIVF */ +#define NV_CMINION_FALCON_MAILBOX1_DATA_INIT 0x00000000 /* RWI-V */ + +#define NV_CMINION_FALCON_SCTL 0x00000240 /* RW-4R */ +#define NV_CMINION_FALCON_SCTL_LSMODE 0:0 /* RWIVF */ +#define NV_CMINION_FALCON_SCTL_LSMODE_FALSE 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_SCTL_LSMODE_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_SCTL_HSMODE 1:1 /* R--VF */ +#define NV_CMINION_FALCON_SCTL_HSMODE_FALSE 0x00000000 /* R---V */ +#define NV_CMINION_FALCON_SCTL_HSMODE_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_SCTL_LSMODE_LEVEL 5:4 /* RWIVF */ +#define NV_CMINION_FALCON_SCTL_LSMODE_LEVEL_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_SCTL_UCODE_LEVEL 5:4 /* */ +#define NV_CMINION_FALCON_SCTL_UCODE_LEVEL_INIT 0 /* */ +#define NV_CMINION_FALCON_SCTL_DEBUG_PRIV_LEVEL 9:8 /* RWIVF */ +#define NV_CMINION_FALCON_SCTL_DEBUG_PRIV_LEVEL_INIT 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_SCTL_RESET_LVLM_EN 12:12 /* RWIVF */ +#define NV_CMINION_FALCON_SCTL_RESET_LVLM_EN_TRUE 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_SCTL_RESET_LVLM_EN_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_SCTL_STALLREQ_CLR_EN 13:13 /* RWIVF */ +#define NV_CMINION_FALCON_SCTL_STALLREQ_CLR_EN_TRUE 0x00000001 /* RWI-V */ +#define NV_CMINION_FALCON_SCTL_STALLREQ_CLR_EN_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_SCTL_AUTH_EN 14:14 /* RWIVF */ +#define NV_CMINION_FALCON_SCTL_AUTH_EN_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_SCTL_AUTH_EN_FALSE 0x00000000 /* RWI-V */ +#define NV_CMINION_FALCON_SCTL1 0x00000250 /* RW-4R */ +#define NV_CMINION_FALCON_SCTL1_CSBLVL_MASK 1:0 /* RWIVF */ +#define NV_CMINION_FALCON_SCTL1_CSBLVL_MASK_INIT 0x00000003 /* RWI-V */ +#define NV_CMINION_FALCON_SCTL1_EXTLVL_MASK 3:2 /* RWIVF */ +#define NV_CMINION_FALCON_SCTL1_EXTLVL_MASK_INIT 0x00000003 /* RWI-V */ + +#define NV_CMINION_FALCON_BOOTVEC 0x00000104 /* RW-4R */ +#define NV_CMINION_FALCON_BOOTVEC_VEC 31:0 /* RWIVF */ +#define NV_CMINION_FALCON_BOOTVEC_VEC_INIT 0x00000000 /* RWI-V */ + +#define NV_CMINION_FALCON_CPUCTL 0x00000100 /* RW-4R */ +#define NV_CMINION_FALCON_CPUCTL_IINVAL 0:0 /* -WXVF */ +#define NV_CMINION_FALCON_CPUCTL_IINVAL_TRUE 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_CPUCTL_IINVAL_FALSE 0x00000000 /* -W--V */ +#define NV_CMINION_FALCON_CPUCTL_STARTCPU 1:1 /* -WXVF */ +#define NV_CMINION_FALCON_CPUCTL_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_CPUCTL_STARTCPU_FALSE 0x00000000 /* -W--V */ +#define NV_CMINION_FALCON_CPUCTL_SRESET 2:2 /* -WXVF */ +#define NV_CMINION_FALCON_CPUCTL_SRESET_TRUE 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_CPUCTL_SRESET_FALSE 0x00000000 /* -W--V */ +#define NV_CMINION_FALCON_CPUCTL_HRESET 3:3 /* -WXVF */ +#define NV_CMINION_FALCON_CPUCTL_HRESET_TRUE 0x00000001 /* -W--V */ +#define NV_CMINION_FALCON_CPUCTL_HRESET_FALSE 0x00000000 /* -W--V */ +#define NV_CMINION_FALCON_CPUCTL_HALTED 4:4 /* R-XVF */ +#define NV_CMINION_FALCON_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_CPUCTL_HALTED_FALSE 0x00000000 /* R---V */ +#define NV_CMINION_FALCON_CPUCTL_STOPPED 5:5 /* R-XVF */ +#define NV_CMINION_FALCON_CPUCTL_STOPPED_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_FALCON_CPUCTL_STOPPED_FALSE 0x00000000 /* R---V */ +#define NV_CMINION_FALCON_CPUCTL_ALIAS_EN 6:6 /* RWIVF */ +#define NV_CMINION_FALCON_CPUCTL_ALIAS_EN_TRUE 0x00000001 /* RW--V */ +#define NV_CMINION_FALCON_CPUCTL_ALIAS_EN_FALSE 0x00000000 /* RW--V */ +#define NV_CMINION_FALCON_CPUCTL_ALIAS_EN_INIT 0x00000000 /* RWI-V */ + +#define NV_CMINION_SCP_CTL_STAT 0x00000608 /* R--4R */ +#define NV_CMINION_SCP_CTL_STAT_DEBUG_MODE 20:20 /* R--VF */ +#define NV_CMINION_SCP_CTL_STAT_DEBUG_MODE_DISABLED 0x00000000 /* R---V */ +#define NV_CMINION_SCP_CTL_STAT_AES_SCC_DIS 2:2 /* R--VF */ +#define NV_CMINION_SCP_CTL_STAT_AES_SCC_DIS_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_SCP_CTL_STAT_AES_SCC_DIS_FALSE 0x00000000 /* R---V */ +#define NV_CMINION_SCP_CTL_STAT_HSMODE 1:1 /* R--VF */ +#define NV_CMINION_SCP_CTL_STAT_HSMODE_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_SCP_CTL_STAT_HSMODE_FALSE 0x00000000 /* R---V */ +#define NV_CMINION_SCP_CTL_STAT_SBOOT 0:0 /* R--VF */ +#define NV_CMINION_SCP_CTL_STAT_SBOOT_TRUE 0x00000001 /* R---V */ +#define NV_CMINION_SCP_CTL_STAT_SBOOT_FALSE 0x00000000 /* R---V */ + +#define NV_MINION_NVLINK_DL_STAT(i) (0x00000980+(i)*0x4) /* RW-4A */ +#define NV_MINION_NVLINK_DL_STAT__SIZE_1 4 /* */ +#define NV_MINION_NVLINK_DL_STAT_ARGS 15:0 /* RWEVF */ +#define NV_MINION_NVLINK_DL_STAT_ARGS_INIT 0x00000000 /* RWE-V */ +#define NV_MINION_NVLINK_DL_STAT_STATUSIDX 23:16 /* RWEVF */ +#define NV_MINION_NVLINK_DL_STAT_STATUSIDX_INIT 0x00000000 /* RWE-V */ +#define NV_MINION_NVLINK_DL_STAT_READY 31:31 /* RWEVF */ +#define NV_MINION_NVLINK_DL_STAT_READY_TRUE 0x00000001 /* RW--V */ +#define NV_MINION_NVLINK_DL_STAT_READY_FALSE 0x00000000 /* RWE-V */ + +#define NV_MINION_NVLINK_DL_STATDATA(i) (0x000009c0+(i)*0x4) /* RW-4A */ +#define NV_MINION_NVLINK_DL_STATDATA__SIZE_1 4 /* */ +#define NV_MINION_NVLINK_DL_STATDATA_DATA 31:0 /* RWEVF */ +#define NV_MINION_NVLINK_DL_STATDATA_DATA_INIT 0x00000000 /* RWE-V */ + +#define NV_MINION_NVLINK_LINK_INTR(i) (0x00000a00+(i)*0x4) /* RW-4A */ +#define NV_MINION_NVLINK_LINK_INTR__SIZE_1 4 /* */ +#define NV_MINION_NVLINK_LINK_INTR_CODE 7:0 /* RWEVF */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_NA 0x00000000 /* RWE-V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_SWREQ 0x00000001 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_DLREQ 0x00000002 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_PMDISABLED 0x00000003 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_DLCMDFAULT 0x00000004 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_TLREQ 0x00000005 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_NOINIT 0x00000010 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_NOTIFY 0x00000017 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_LOCAL_CONFIG_ERR 0x00000018 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_NEGOTIATION_CONFIG_ERR 0x00000019 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_BADINIT 0x00000020 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_CODE_PMFAIL 0x00000021 /* RW--V */ +#define NV_MINION_NVLINK_LINK_INTR_SUBCODE 15:8 /* RWEVF */ +#define NV_MINION_NVLINK_LINK_INTR_SUBCODE_INIT 0x00000000 /* RWE-V */ +#define NV_MINION_NVLINK_LINK_INTR_STATE 31:31 /* RWEVF */ +#define NV_MINION_NVLINK_LINK_INTR_STATE_INIT 0x00000000 /* RWE-V */ + +#define NV_MINION_MINION_INTR 0x00000810 /* RW-4R */ +#define NV_MINION_MINION_INTR_FATAL 0:0 /* RWEVF */ +#define NV_MINION_MINION_INTR_FATAL_INIT 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_INTR_NONFATAL 1:1 /* RWEVF */ +#define NV_MINION_MINION_INTR_NONFATAL_INIT 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_INTR_FALCON_STALL 2:2 /* R-EVF */ +#define NV_MINION_MINION_INTR_FALCON_STALL_INIT 0x00000000 /* R-E-V */ +#define NV_MINION_MINION_INTR_FALCON_NOSTALL 3:3 /* R-EVF */ +#define NV_MINION_MINION_INTR_FALCON_NOSTALL_INIT 0x00000000 /* R-E-V */ +#define NV_MINION_MINION_INTR_LINK 31:16 /* R-EVF */ +#define NV_MINION_MINION_INTR_LINK_INIT 0x00000000 /* R-E-V */ + +#define NV_MINION_MINION_INTR_STALL_EN 0x00000818 /* RW-4R */ +#define NV_MINION_MINION_INTR_STALL_EN_FATAL 0:0 /* RWEVF */ +#define NV_MINION_MINION_INTR_STALL_EN_FATAL_DISABLE 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_INTR_STALL_EN_FATAL_ENABLE 0x00000001 /* RW--V */ +#define NV_MINION_MINION_INTR_STALL_EN_NONFATAL 1:1 /* RWEVF */ +#define NV_MINION_MINION_INTR_STALL_EN_NONFATAL_DISABLE 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_INTR_STALL_EN_NONFATAL_ENABLE 0x00000001 /* RW--V */ +#define NV_MINION_MINION_INTR_STALL_EN_FALCON_STALL 2:2 /* RWEVF */ +#define NV_MINION_MINION_INTR_STALL_EN_FALCON_STALL_DISABLE 0x00000000 /* RW--V */ +#define NV_MINION_MINION_INTR_STALL_EN_FALCON_STALL_ENABLE 0x00000001 /* RWE-V */ +#define NV_MINION_MINION_INTR_STALL_EN_FALCON_NOSTALL 3:3 /* RWEVF */ +#define NV_MINION_MINION_INTR_STALL_EN_FALCON_NOSTALL_DISABLE 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_INTR_STALL_EN_FALCON_NOSTALL_ENABLE 0x00000001 /* RW--V */ +#define NV_MINION_MINION_INTR_STALL_EN_LINK 31:16 /* RWEVF */ +#define NV_MINION_MINION_INTR_STALL_EN_LINK_DISABLE_ALL 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_INTR_STALL_EN_LINK_ENABLE_ALL 0x0000ffff /* RW--V */ + +#define NV_MINION_MINION_INTR_NONSTALL_EN 0x0000081c /* RW-4R */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_FATAL 0:0 /* RWEVF */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_FATAL_DISABLE 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_FATAL_ENABLE 0x00000001 /* RW--V */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_NONFATAL 1:1 /* RWEVF */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_NONFATAL_DISABLE 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_NONFATAL_ENABLE 0x00000001 /* RW--V */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_FALCON_STALL 2:2 /* RWEVF */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_FALCON_STALL_DISABLE 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_FALCON_STALL_ENABLE 0x00000001 /* RW--V */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_FALCON_NOSTALL 3:3 /* RWEVF */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_FALCON_NOSTALL_DISABLE 0x00000000 /* RW--V */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_FALCON_NOSTALL_ENABLE 0x00000001 /* RWE-V */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_LINK 31:16 /* RWEVF */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_LINK_DISABLE_ALL 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_INTR_NONSTALL_EN_LINK_ENABLE_ALL 0x0000ffff /* RW--V */ + +#define NV_MINION_MINION_STATUS 0x00000830 /* RW-4R */ +#define NV_MINION_MINION_STATUS_STATUS 7:0 /* RWEVF */ +#define NV_MINION_MINION_STATUS_STATUS_INIT 0x00000000 /* RWE-V */ +#define NV_MINION_MINION_STATUS_STATUS_BOOT 0x00000001 /* RW--V */ +#define NV_MINION_MINION_STATUS_INTR_CODE 31:8 /* RWEVF */ +#define NV_MINION_MINION_STATUS_INTR_CODE_INIT 0x00000000 /* RWE-V */ + +#define NV_MINION_NVLINK_DL_CMD(i) (0x00000900+(i)*0x4) /* RW-4A */ +#define NV_MINION_NVLINK_DL_CMD__SIZE_1 4 /* */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND 7:0 /* RWEVF */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_NOP 0x00000000 /* RWE-V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_INITPHY 0x00000001 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_SWINTR 0x00000002 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_INITLANEENABLE 0x00000003 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_INITDLPL 0x00000004 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_INITRXTERM 0x00000005 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_INITTL 0x00000006 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_INITPLL 0x00000007 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_LANEDISABLE 0x00000008 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_LANESHUTDOWN 0x0000000c /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_INITPHASE1 0x0000000d /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_INITNEGOTIATE 0x0000000e /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_INITOPTIMIZE 0x0000000f /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_ENABLEPM 0x00000010 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_DISABLEPM 0x00000011 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_TXCLKSWITCH_PLL 0x00000014 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_TXCLKSWITCH_ALT 0x00000015 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_CLEARRESTORESTATE 0x00000017 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_SAVESTATE 0x00000018 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_READ_PHY_TRAINING_PARAMS 0x00000020 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_WRITE_PHY_TRAINING_PARAMS 0x00000021 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_CONFIGEOM 0x00000040 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_SETNEA 0x00000041 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_SETNEDR 0x00000042 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_SETNEDW 0x00000043 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_XAVIER_PLLOVERRIDE_ON 0x00000050 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_XAVIER_PLLOVERRIDE_OFF 0x00000051 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_XAVIER_CALIBRATEPLL 0x00000052 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_TURING_RXDET 0x00000058 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_DLSTAT_CLR_DLERRCNT 0x00000070 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_DLSTAT_CLR_DLLPCNT 0x00000071 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_DLSTAT_CLR_DLTHROUGHPUTCNT 0x00000072 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_DBG_PROTECTIONS_OFF 0x000000f0 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_ALWAYSFAULT 0x000000ff /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_FAULT 30:30 /* RWEVF */ +#define NV_MINION_NVLINK_DL_CMD_FAULT_FAULT_CLEAR 0x00000001 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_FAULT_NOFAULT_NOCLEAR 0x00000000 /* RWE-V */ +#define NV_MINION_NVLINK_DL_CMD_READY 31:31 /* RWEVF */ +#define NV_MINION_NVLINK_DL_CMD_READY_TRUE 0x00000001 /* RW--V */ +#define NV_MINION_NVLINK_DL_CMD_READY_FALSE 0x00000000 /* RWE-V */ + +#define NV_MINION_MISC_0 0x000008b0 /* RW-4R */ +#define NV_MINION_MISC_0_SCRATCH_SWRW_0 31:0 /* RWEVF */ +#define NV_MINION_MISC_0_SCRATCH_SWRW_0_INIT 0x00000000 /* RWE-V */ + +#define NV_MINION_NVLINK_DL_CMD_DATA(i) (0x00000920+(i)*0x4) /* RW-4A */ +#define NV_MINION_NVLINK_DL_CMD_DATA__SIZE_1 4 /* */ +#define NV_MINION_NVLINK_DL_CMD_DATA_DATA 31:0 /* RWEVF */ +#define NV_MINION_NVLINK_DL_CMD_DATA_DATA_INIT 0x00000000 /* RWE-V */ +#endif // __lr10_dev_minion_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_minion_ip_addendum.h b/src/common/inc/swref/published/nvswitch/lr10/dev_minion_ip_addendum.h new file mode 100644 index 000000000..62a015e70 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_minion_ip_addendum.h @@ -0,0 +1,323 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_minion_ip_addendum_h__ +#define __lr10_dev_minion_ip_addendum_h__ +#define NV_NVLSTAT 0x00000103:0x00000000 /* RW--D */ +#define NV_NVLSTAT_UC01 0x00000001 /* R--4R */ +#define NV_NVLSTAT_UC01_PM_STATE 31:31 /* R---F */ +#define NV_NVLSTAT_UC01_ACMODE_STATE 30:30 /* R---F */ +#define NV_NVLSTAT_UC01_LANES_ENABLED 29:29 /* R---F */ +#define NV_NVLSTAT_UC01_LANES_ENABLED_TRUE 0x1 /* R---V */ +#define NV_NVLSTAT_UC01_LANES_ENABLED_FALSE 0x0 /* R---V */ +#define NV_NVLSTAT_UC01_TRAINING_BUFFER_STATUS 23:20 /* R---F */ +#define NV_NVLSTAT_UC01_SEARCH_ERROR 19:19 /* R---F */ +#define NV_NVLSTAT_UC01_TRAINING_GOOD 18:18 /* R---F */ +#define NV_NVLSTAT_UC01_TRAINING_GOOD_SUCCESS 0x1 /* R---V */ +#define NV_NVLSTAT_UC01_TRAINING_GOOD_UNKNOWN 0x0 /* R---V */ +#define NV_NVLSTAT_UC01_CONFIG_GOOD 16:16 /* R---F */ +#define NV_NVLSTAT_UC01_CONFIG_GOOD_SUCCESS 0x1 /* R---V */ +#define NV_NVLSTAT_UC01_CONFIG_GOOD_UNKNOWN 0x0 /* R---V */ +#define NV_NVLSTAT_UC01_LINK_STATE 15:0 /* R---F */ +#define NV_NVLSTAT_LNK0 0x00000010 /* R--4R */ +#define NV_NVLSTAT_LNK0_INTR_TX_FAULT_RAM 4:4 /* R---F */ +#define NV_NVLSTAT_LNK0_INTR_TX_FAULT_INTERFACE 5:5 /* R---F */ +#define NV_NVLSTAT_LNK0_INTR_TX_FAULT_SUBLINK_CHANGE 8:8 /* R---F */ +#define NV_NVLSTAT_LNK0_INTR_RX_FAULT_SUBLINK_CHANGE 16:16 /* R---F */ +#define NV_NVLSTAT_LNK0_INTR_RX_FAULT_DL_PROTOCOL 20:20 /* R---F */ +#define NV_NVLSTAT_LNK0_INTR_RX_FAULT_SHORT_ERROR_RATE 21:21 /* R---F */ +#define NV_NVLSTAT_LNK0_INTR_RX_FAULT_LONG_ERROR_RATE 22:22 /* R---F */ +#define NV_NVLSTAT_LNK0_INTR_LTSSM_PROTOCOL 29:29 /* R---F */ +#define NV_NVLSTAT_LNK1 0x00000011 /* R--4R */ +#define NV_NVLSTAT_LNK1_ERROR_COUNT1_RECOVERY_EVENTS_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_LNK1_ERROR_COUNT1_RECOVERY_EVENTS_VALUE_SRCOVF 0x000003ff /* R---V */ +#define NV_NVLSTAT_LNK1_ERROR_COUNT1_RECOVERY_EVENTS_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_LNK1_ERROR_COUNT1_RECOVERY_EVENTS_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_LNK1_ERROR_COUNT1_RECOVERY_EVENTS_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_LNK2 0x00000012 /* R--4R */ +#define NV_NVLSTAT_LNK2_RXDET_LINK_STATUS 9:8 /* R---F */ +#define NV_NVLSTAT_LNK2_RXDET_LINK_STATUS_UNINITIALIZED 0x0 /* R---V */ +#define NV_NVLSTAT_LNK2_RXDET_LINK_STATUS_SEARCH 0x1 /* R---V */ +#define NV_NVLSTAT_LNK2_RXDET_LINK_STATUS_FOUND 0x2 /* R---V */ +#define NV_NVLSTAT_LNK2_RXDET_LINK_STATUS_TIMEOUT 0x3 /* R---V */ +#define NV_NVLSTAT_LNK2_RXDET_LANE_STATUS 7:0 /* R---F */ +#define NV_NVLSTAT_LNK2_RXDET_LANE_STATUS_FOUND 0x0f /* R---V */ +#define NV_NVLSTAT_LNK3 0x00000013 /* R--4R */ +#define NV_NVLSTAT_LNK3_LINERATE 23:0 /* R---F */ +#define NV_NVLSTAT_LNK4 0x00000014 /* R--4R */ +#define NV_NVLSTAT_LNK4_LINKCLOCK 15:0 /* R---F */ +#define NV_NVLSTAT_LNK5 0x00000015 /* R--4R */ +#define NV_NVLSTAT_LNK5_DATARATE 31:0 /* R---F */ +#define NV_NVLSTAT_TX00 0x00000020 /* R--4R */ +#define NV_NVLSTAT_TX00_COUNT_TX_STATE_EIGHTH_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_TX00_COUNT_TX_STATE_EIGHTH_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_TX00_COUNT_TX_STATE_EIGHTH_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_TX00_COUNT_TX_STATE_EIGHTH_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_TX00_COUNT_TX_STATE_EIGHTH_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TX01 0x00000021 /* R--4R */ +#define NV_NVLSTAT_TX01_COUNT_TX_STATE_NVHS_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_TX01_COUNT_TX_STATE_NVHS_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_TX01_COUNT_TX_STATE_NVHS_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_TX01_COUNT_TX_STATE_NVHS_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_TX01_COUNT_TX_STATE_NVHS_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TX02 0x00000022 /* R--4R */ +#define NV_NVLSTAT_TX02_COUNT_TX_STATE_OTHER_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_TX02_COUNT_TX_STATE_OTHER_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_TX02_COUNT_TX_STATE_OTHER_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_TX02_COUNT_TX_STATE_OTHER_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_TX02_COUNT_TX_STATE_OTHER_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TX03 0x00000023 /* R--4R */ +#define NV_NVLSTAT_TX03_DELAY_TX_STATE_LP_ENTER_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_TX03_DELAY_TX_STATE_LP_ENTER_VALUE_SRCOVF 0x000000ff /* R---V */ +#define NV_NVLSTAT_TX03_DELAY_TX_STATE_LP_ENTER_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_TX03_DELAY_TX_STATE_LP_ENTER_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_TX03_DELAY_TX_STATE_LP_ENTER_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TX04 0x00000024 /* R--4R */ +#define NV_NVLSTAT_TX04_DELAY_TX_STATE_LP_EXIT_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_TX04_DELAY_TX_STATE_LP_EXIT_VALUE_SRCOVF 0x000000ff /* R---V */ +#define NV_NVLSTAT_TX04_DELAY_TX_STATE_LP_EXIT_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_TX04_DELAY_TX_STATE_LP_EXIT_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_TX04_DELAY_TX_STATE_LP_EXIT_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TX05 0x00000025 /* R--4R */ +#define NV_NVLSTAT_TX05_NUM_TX_STATE_LP_EXIT_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_TX05_NUM_TX_STATE_LP_EXIT_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_TX05_NUM_TX_STATE_LP_EXIT_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_TX05_NUM_TX_STATE_LP_EXIT_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_TX05_NUM_TX_STATE_LP_EXIT_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TX06 0x00000026 /* R--4R */ +#define NV_NVLSTAT_TX06_NUM_TX_STATE_LP_ENTER_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_TX06_NUM_TX_STATE_LP_ENTER_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_TX06_NUM_TX_STATE_LP_ENTER_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_TX06_NUM_TX_STATE_LP_ENTER_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_TX06_NUM_TX_STATE_LP_ENTER_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TX07 0x00000027 /* R--4R */ +#define NV_NVLSTAT_TX07_DELAY_TX_FB_EXIT_OTHER_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_TX07_DELAY_TX_FB_EXIT_OTHER_VALUE_SRCOVF 0x000000ff /* R---V */ +#define NV_NVLSTAT_TX07_DELAY_TX_FB_EXIT_OTHER_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_TX07_DELAY_TX_FB_EXIT_OTHER_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_TX07_DELAY_TX_FB_EXIT_OTHER_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TX08 0x00000028 /* R--4R */ +#define NV_NVLSTAT_TX08_DELAY_TX_FB_ENTER_OTHER_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_TX08_DELAY_TX_FB_ENTER_OTHER_VALUE_SRCOVF 0x000000ff /* R---V */ +#define NV_NVLSTAT_TX08_DELAY_TX_FB_ENTER_OTHER_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_TX08_DELAY_TX_FB_ENTER_OTHER_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_TX08_DELAY_TX_FB_ENTER_OTHER_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TX09 0x00000029 /* R--4R */ +#define NV_NVLSTAT_TX09_REPLAY_EVENTS_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_TX09_REPLAY_EVENTS_VALUE_SRCOVF 0x000000ff /* R---V */ +#define NV_NVLSTAT_TX09_REPLAY_EVENTS_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_TX09_REPLAY_EVENTS_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_TX09_REPLAY_EVENTS_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TX12 0x0000002c /* R--4R */ +#define NV_NVLSTAT_TX12_TX_LPOCC_HIST 31:0 /* R---F */ +#define NV_NVLSTAT_TX13 0x0000002d /* R--4R */ +#define NV_NVLSTAT_TX13_TX_LPOCC_HIST 31:0 /* R---F */ +#define NV_NVLSTAT_TX14 0x0000002e /* R--4R */ +#define NV_NVLSTAT_TX14_TX_LPEXIT_HIST 31:0 /* R---F */ +#define NV_NVLSTAT_TX15 0x0000002f /* R--4R */ +#define NV_NVLSTAT_TX15_TX_LPEXIT_HIST 31:0 /* R---F */ +#define NV_NVLSTAT_RX00 0x00000040 /* R--4R */ +#define NV_NVLSTAT_RX00_REPLAY_EVENTS_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_RX00_REPLAY_EVENTS_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_RX00_REPLAY_EVENTS_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_RX00_REPLAY_EVENTS_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_RX00_REPLAY_EVENTS_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_RX01 0x00000041 /* R--4R */ +#define NV_NVLSTAT_RX01_FLIT_CRC_ERRORS_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_RX01_FLIT_CRC_ERRORS_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_RX01_FLIT_CRC_ERRORS_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_RX01_FLIT_CRC_ERRORS_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_RX01_FLIT_CRC_ERRORS_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_RX03 0x00000043 /* R--4R */ +#define NV_NVLSTAT_RX03_DELAY_RX_LP_ENTER_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_RX03_DELAY_RX_LP_ENTER_VALUE_SRCOVF 0x000000ff /* R---V */ +#define NV_NVLSTAT_RX03_DELAY_RX_LP_ENTER_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_RX03_DELAY_RX_LP_ENTER_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_RX03_DELAY_RX_LP_ENTER_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_RX04 0x00000044 /* R--4R */ +#define NV_NVLSTAT_RX04_DELAY_RX_LP_EXIT_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_RX04_DELAY_RX_LP_EXIT_VALUE_SRCOVF 0x000000ff /* R---V */ +#define NV_NVLSTAT_RX04_DELAY_RX_LP_EXIT_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_RX04_DELAY_RX_LP_EXIT_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_RX04_DELAY_RX_LP_EXIT_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_RX05 0x00000045 /* R--4R */ +#define NV_NVLSTAT_RX05_DELAY_RX_FB_ENTER_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_RX05_DELAY_RX_FB_ENTER_VALUE_SRCOVF 0x000000ff /* R---V */ +#define NV_NVLSTAT_RX05_DELAY_RX_FB_ENTER_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_RX05_DELAY_RX_FB_ENTER_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_RX05_DELAY_RX_FB_ENTER_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_RX06 0x00000046 /* R--4R */ +#define NV_NVLSTAT_RX06_DELAY_RX_FB_EXIT_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_RX06_DELAY_RX_FB_EXIT_VALUE_SRCOVF 0x000000ff /* R---V */ +#define NV_NVLSTAT_RX06_DELAY_RX_FB_EXIT_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_RX06_DELAY_RX_FB_EXIT_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_RX06_DELAY_RX_FB_EXIT_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_RX08 0x00000048 /* R--4R */ +#define NV_NVLSTAT_RX08_ERRORLOG_ERR_CNT_MULTI 7:0 /* R---F */ +#define NV_NVLSTAT_RX09 0x00000049 /* R--4R */ +#define NV_NVLSTAT_RX09_ERRORLOG_ERR_CNT_7 31:24 /* R---F */ +#define NV_NVLSTAT_RX09_ERRORLOG_ERR_CNT_6 23:16 /* R---F */ +#define NV_NVLSTAT_RX09_ERRORLOG_ERR_CNT_5 15:8 /* R---F */ +#define NV_NVLSTAT_RX09_ERRORLOG_ERR_CNT_4 7:0 /* R---F */ +#define NV_NVLSTAT_RX10 0x0000004a /* R--4R */ +#define NV_NVLSTAT_RX10_ERRORLOG_ERR_CNT_3 31:24 /* R---F */ +#define NV_NVLSTAT_RX10_ERRORLOG_ERR_CNT_2 23:16 /* R---F */ +#define NV_NVLSTAT_RX10_ERRORLOG_ERR_CNT_1 15:8 /* R---F */ +#define NV_NVLSTAT_RX10_ERRORLOG_ERR_CNT_0 7:0 /* R---F */ +#define NV_NVLSTAT_RX12 0x0000004c /* R--4R */ +#define NV_NVLSTAT_RX12_ECC_CORRECTED_ERR_L0_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_RX12_ECC_CORRECTED_ERR_L0_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_RX12_ECC_CORRECTED_ERR_L0_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_RX12_ECC_CORRECTED_ERR_L0_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_RX12_ECC_CORRECTED_ERR_L0_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_RX13 0x0000004d /* R--4R */ +#define NV_NVLSTAT_RX13_ECC_CORRECTED_ERR_L1_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_RX13_ECC_CORRECTED_ERR_L1_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_RX13_ECC_CORRECTED_ERR_L1_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_RX13_ECC_CORRECTED_ERR_L1_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_RX13_ECC_CORRECTED_ERR_L1_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_RX14 0x0000004e /* R--4R */ +#define NV_NVLSTAT_RX14_ECC_CORRECTED_ERR_L2_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_RX14_ECC_CORRECTED_ERR_L2_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_RX14_ECC_CORRECTED_ERR_L2_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_RX14_ECC_CORRECTED_ERR_L2_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_RX14_ECC_CORRECTED_ERR_L2_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_RX15 0x0000004f /* R--4R */ +#define NV_NVLSTAT_RX15_ECC_CORRECTED_ERR_L3_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_RX15_ECC_CORRECTED_ERR_L3_VALUE_SRCOVF 0x0000ffff /* R---V */ +#define NV_NVLSTAT_RX15_ECC_CORRECTED_ERR_L3_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_RX15_ECC_CORRECTED_ERR_L3_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_RX15_ECC_CORRECTED_ERR_L3_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_TR00 0x00000090 /* R--4R */ +#define NV_NVLSTAT_TR00_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR01 0x00000091 /* R--4R */ +#define NV_NVLSTAT_TR01_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR02 0x00000092 /* R--4R */ +#define NV_NVLSTAT_TR02_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR03 0x00000093 /* R--4R */ +#define NV_NVLSTAT_TR03_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR04 0x00000094 /* R--4R */ +#define NV_NVLSTAT_TR04_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR05 0x00000095 /* R--4R */ +#define NV_NVLSTAT_TR05_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR06 0x00000096 /* R--4R */ +#define NV_NVLSTAT_TR06_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR07 0x00000097 /* R--4R */ +#define NV_NVLSTAT_TR07_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR08 0x00000098 /* R--4R */ +#define NV_NVLSTAT_TR08_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR09 0x00000099 /* R--4R */ +#define NV_NVLSTAT_TR09_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR10 0x0000009a /* R--4R */ +#define NV_NVLSTAT_TR10_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR11 0x0000009b /* R--4R */ +#define NV_NVLSTAT_TR11_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR12 0x0000009c /* R--4R */ +#define NV_NVLSTAT_TR12_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR13 0x0000009d /* R--4R */ +#define NV_NVLSTAT_TR13_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR14 0x0000009e /* R--4R */ +#define NV_NVLSTAT_TR14_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR15 0x0000009f /* R--4R */ +#define NV_NVLSTAT_TR15_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_TR16 0x000000a0 /* R--4R */ +#define NV_NVLSTAT_TR16_L0FOM 15:0 /* R---F */ +#define NV_NVLSTAT_TR16_L1FOM 31:16 /* R---F */ +#define NV_NVLSTAT_TR17 0x000000a1 /* R--4R */ +#define NV_NVLSTAT_TR17_L2FOM 15:0 /* R---F */ +#define NV_NVLSTAT_TR17_L3FOM 31:16 /* R---F */ +#define NV_NVLSTAT_DB00 0x00000080 /* R--4R */ +#define NV_NVLSTAT_DB00_ERRORS_INJECTED_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_DB00_ERRORS_INJECTED_VALUE_SRCOVF 0x7fffffff /* R---V */ +#define NV_NVLSTAT_DB00_ERRORS_INJECTED_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_DB00_ERRORS_INJECTED_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_DB00_ERRORS_INJECTED_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_DB01 0x00000081 /* R--4R */ +#define NV_NVLSTAT_DB01_ERROR_COUNT_ERR_LANECRC_L3 31:24 /* R---F */ +#define NV_NVLSTAT_DB01_ERROR_COUNT_ERR_LANECRC_L2 23:16 /* R---F */ +#define NV_NVLSTAT_DB01_ERROR_COUNT_ERR_LANECRC_L1 15:8 /* R---F */ +#define NV_NVLSTAT_DB01_ERROR_COUNT_ERR_LANECRC_L0 7:0 /* R---F */ +#define NV_NVLSTAT_DB02 0x00000082 /* R--4R */ +#define NV_NVLSTAT_DB02_ERROR_COUNT_ERR_LANECRC_L7 31:24 /* R---F */ +#define NV_NVLSTAT_DB02_ERROR_COUNT_ERR_LANECRC_L6 23:16 /* R---F */ +#define NV_NVLSTAT_DB02_ERROR_COUNT_ERR_LANECRC_L5 15:8 /* R---F */ +#define NV_NVLSTAT_DB02_ERROR_COUNT_ERR_LANECRC_L4 7:0 /* R---F */ +#define NV_NVLSTAT_DB03 0x00000083 /* R--4R */ +#define NV_NVLSTAT_DB03_RXSLSM_ERR_CNTL_CLK_SWITCH_ERR 31:31 /* R---F */ +#define NV_NVLSTAT_DB03_RXSLSM_ERR_CNTL_OFF2SAFE_LINK_DET_ERR 2:2 /* R---F */ +#define NV_NVLSTAT_DB03_RXSLSM_ERR_CNTL_CONST_DET_ERR 1:1 /* R---F */ +#define NV_NVLSTAT_DB04 0x00000084 /* R--4R */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_E2S_STROBE_NO_LD_ERR 23:23 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_H2S_STROBE_NO_LD_ERR 22:22 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_02S_STROBE_NO_LD_ERR 21:21 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_E2S_SD_NO_LD_ERR 20:20 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_H2S_SD_NO_LD_ERR 19:19 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_O2S_SD_NO_LD_ERR 18:18 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_RC_DEADLINE_ERR 15:15 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_RC_TXPWR_ERR 14:14 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_RC_RXPWR_ERR 13:13 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_E2SAFE_LD_ERR 12:12 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_SAFE_NO_LD_ERR 11:11 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_FENCE_ERR 10:10 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_HS2SAFE_LINK_DET_ERR 9:9 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_TRAIN2SAFE_LINK_DET_ERR 8:8 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_FIFO_SKEW_ERR 7:7 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_SYM_ALIGN_END_ERR 6:6 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_SYM_LOCK_ERR 5:5 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_SCRAM_LOCK_ERR 4:4 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_SAFE2NO_LINK_DET_ERR 3:3 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_OFF2SAFE_LINK_DET_ERR 2:2 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_CONST_DET_ERR 1:1 /* R---F */ +#define NV_NVLSTAT_DB04_RXSLSM_ERR_CNTL_FIFO_DRAIN_ERR 0:0 /* R---F */ +#define NV_NVLSTAT_DB05 0x00000085 /* R--4R */ +#define NV_NVLSTAT_DB05_TIMEOUT_LOG_SYM_LOCK_LANE 31:24 /* R---F */ +#define NV_NVLSTAT_DB05_TIMEOUT_LOG_SCRAM_LOCK_LANE 23:16 /* R---F */ +#define NV_NVLSTAT_DB05_TIMEOUT_LOG_CONST_DET_LANE 15:8 /* R---F */ +#define NV_NVLSTAT_DB05_TIMEOUT_LOG_FIFO_DRAIN_LANE 7:0 /* R---F */ +#define NV_NVLSTAT_DB06 0x00000086 /* R--4R */ +#define NV_NVLSTAT_DB06_TIMEOUT_LOG_SYM_ALIGN_END_LANE 31:24 /* R---F */ +#define NV_NVLSTAT_DB06_TIMEOUT_LOG_FIFO_SKEW_LANE 23:16 /* R---F */ +#define NV_NVLSTAT_DB07 0x00000087 /* R--4R */ +#define NV_NVLSTAT_DB07_FIFO_STATUS_RX_0_ENTRIES_USED_3 29:24 /* R---F */ +#define NV_NVLSTAT_DB07_FIFO_STATUS_RX_0_ENTRIES_USED_2 21:16 /* R---F */ +#define NV_NVLSTAT_DB07_FIFO_STATUS_RX_0_ENTRIES_USED_1 13:8 /* R---F */ +#define NV_NVLSTAT_DB07_FIFO_STATUS_RX_0_ENTRIES_USED_0 5:0 /* R---F */ +#define NV_NVLSTAT_DB08 0x00000088 /* R--4R */ +#define NV_NVLSTAT_DB08_FIFO_STATUS_RX_0_ENTRIES_USED_7 29:24 /* R---F */ +#define NV_NVLSTAT_DB08_FIFO_STATUS_RX_0_ENTRIES_USED_6 21:16 /* R---F */ +#define NV_NVLSTAT_DB08_FIFO_STATUS_RX_0_ENTRIES_USED_5 13:8 /* R---F */ +#define NV_NVLSTAT_DB08_FIFO_STATUS_RX_0_ENTRIES_USED_4 5:0 /* R---F */ +#define NV_NVLSTAT_DB09 0x00000089 /* R--4R */ +#define NV_NVLSTAT_DB09_SLSM_STATUS_RX_SURPRISE_LD_CNT_VALUE 30:0 /* R---F */ +#define NV_NVLSTAT_DB09_SLSM_STATUS_RX_SURPRISE_LD_CNT_VALUE_SRCOVF 0x000000ff /* R---V */ +#define NV_NVLSTAT_DB09_SLSM_STATUS_RX_SURPRISE_LD_CNT_OVER 31:31 /* R---F */ +#define NV_NVLSTAT_DB09_SLSM_STATUS_RX_SURPRISE_LD_CNT_OVER_OVER 0x1 /* R---V */ +#define NV_NVLSTAT_DB09_SLSM_STATUS_RX_SURPRISE_LD_CNT_OVER_OKAY 0x0 /* R---V */ +#define NV_NVLSTAT_DB10 0x0000008a /* R--4R */ +#define NV_NVLSTAT_DB10_DATA 31:0 /* R---F */ +#define NV_NVLSTAT_MN00 0x000000ff /* R--4R */ +#define NV_NVLSTAT_MN00_LINK_INTR_SUBCODE 15:8 /* R---F */ +#define NV_NVLSTAT_MN00_LINK_INTR_CODE 7:0 /* R---F */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_FORCE_EQ_OVERRIDE_1 0x00000060 /* RWE-V */ +#define NV_MINION_NVLINK_DL_CMD_COMMAND_RELEASE_EQ_OVERRIDE_1 0x00000061 /* RWE-V */ +#endif // __lr10_dev_minion_ip_addendum_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_npg_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_npg_ip.h new file mode 100644 index 000000000..7441fa342 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_npg_ip.h @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_npg_ip_h__ +#define __lr10_dev_npg_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NPG_NPG_INTERRUPT_STATUS 0x00000400 /* R--4R */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV0_FNC_OR 0:0 /* R-EVF */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV0_FNC_OR_INIT 0x00000000 /* R-E-V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV0_INT_STATUS 3:1 /* R-EVF */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV0_INT_STATUS_INIT 0x00000000 /* R-E-V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV0_INT_STATUS_FATAL 0x00000001 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV0_INT_STATUS_NONFATAL 0x00000002 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV0_INT_STATUS_CORRECTABLE 0x00000004 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV1_FNC_OR 4:4 /* R-EVF */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV1_FNC_OR_INIT 0x00000000 /* R-E-V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV1_INT_STATUS 7:5 /* R-EVF */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV1_INT_STATUS_INIT 0x00000000 /* R-E-V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV1_INT_STATUS_FATAL 0x00000001 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV1_INT_STATUS_NONFATAL 0x00000002 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV1_INT_STATUS_CORRECTABLE 0x00000004 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV2_FNC_OR 8:8 /* R-EVF */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV2_FNC_OR_INIT 0x00000000 /* R-E-V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV2_INT_STATUS 11:9 /* R-EVF */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV2_INT_STATUS_INIT 0x00000000 /* R-E-V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV2_INT_STATUS_FATAL 0x00000001 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV2_INT_STATUS_NONFATAL 0x00000002 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV2_INT_STATUS_CORRECTABLE 0x00000004 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV3_FNC_OR 12:12 /* R-EVF */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV3_FNC_OR_INIT 0x00000000 /* R-E-V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV3_INT_STATUS 15:13 /* R-EVF */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV3_INT_STATUS_INIT 0x00000000 /* R-E-V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV3_INT_STATUS_FATAL 0x00000001 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV3_INT_STATUS_NONFATAL 0x00000002 /* R---V */ +#define NV_NPG_NPG_INTERRUPT_STATUS_DEV3_INT_STATUS_CORRECTABLE 0x00000004 /* R---V */ + +#define NV_NPG_WARMRESET 0x00000140 /* RW-4R */ +#define NV_NPG_WARMRESET_NPORTWARMRESET 11:8 /* RWEVF */ +#define NV_NPG_WARMRESET_NPORTWARMRESET_INIT 0x0000000f /* RWE-V */ +#define NV_NPG_WARMRESET_NPORTWARMRESET_ASSERT 0x00000000 /* RW--V */ +#define NV_NPG_WARMRESET_NPORTWARMRESET_DEASSERT 0x00000001 /* RW--V */ + +#define NV_NPG_DEBUG_CLEAR 0x00000144 /* RW-4R */ +#define NV_NPG_DEBUG_CLEAR_CLEAR 3:0 /* RWIVF */ +#define NV_NPG_DEBUG_CLEAR_CLEAR_ASSERT_NPORT_0 0x00000001 /* RW--V */ +#define NV_NPG_DEBUG_CLEAR_CLEAR_ASSERT_NPORT_1 0x00000002 /* RW--V */ +#define NV_NPG_DEBUG_CLEAR_CLEAR_ASSERT_NPORT_2 0x00000004 /* RW--V */ +#define NV_NPG_DEBUG_CLEAR_CLEAR_ASSERT_NPORT_3 0x00000008 /* RW--V */ +#define NV_NPG_DEBUG_CLEAR_CLEAR_ASSERT_ALL 0x0000000f /* RW--V */ +#define NV_NPG_DEBUG_CLEAR_CLEAR_DEASSERT 0x00000000 /* RWI-V */ +#define NV_NPG_DEBUG_CLEAR_CMN_CLEAR 31:31 /* RWIVF */ +#define NV_NPG_DEBUG_CLEAR_CMN_CLEAR_ASSERT 0x00000001 /* RW--V */ +#define NV_NPG_DEBUG_CLEAR_CMN_CLEAR_DEASSERT 0x00000000 /* RWI-V */ + +#define NV_NPG_CTRL_PRI_MULTICAST 0x000000c0 /* RW-4R */ +#define NV_NPG_CTRL_PRI_MULTICAST_NPORT_ENABLE 5:0 /* RWEVF */ +#define NV_NPG_CTRL_PRI_MULTICAST_NPORT_ENABLE_NO_NPORT_ENABLED 0x00000000 /* RW--V */ +#define NV_NPG_CTRL_PRI_MULTICAST_NPORT_ENABLE_PORT0_NPORT_ENABLED 0x00000001 /* RW--V */ +#define NV_NPG_CTRL_PRI_MULTICAST_NPORT_ENABLE_PORT1_NPORT_ENABLED 0x00000002 /* RW--V */ +#define NV_NPG_CTRL_PRI_MULTICAST_NPORT_ENABLE_PORT01_NPORT_ENABLED 0x00000003 /* RW--V */ +#define NV_NPG_CTRL_PRI_MULTICAST_NPORT_ENABLE_PORT2_NPORT_ENABLED 0x00000004 /* RW--V */ +#define NV_NPG_CTRL_PRI_MULTICAST_NPORT_ENABLE_PORT02_NPORT_ENABLED 0x00000005 /* RW--V */ +#define NV_NPG_CTRL_PRI_MULTICAST_NPORT_ENABLE_PORT3_NPORT_ENABLED 0x00000008 /* RW--V */ +#define NV_NPG_CTRL_PRI_MULTICAST_NPORT_ENABLE_ALL_NPORT_ENABLED 0x0000000f /* RWE-V */ +#define NV_NPG_CTRL_PRI_MULTICAST_READ_MODE 7:6 /* RWEVF */ +#define NV_NPG_CTRL_PRI_MULTICAST_READ_MODE_LOW_SELECTED_BUS 0x00000000 /* RW--V */ +#define NV_NPG_CTRL_PRI_MULTICAST_READ_MODE_OR_ALL_BUSSES 0x00000001 /* RW--V */ +#define NV_NPG_CTRL_PRI_MULTICAST_READ_MODE_AND_ALL_BUSSES 0x00000002 /* RWE-V */ +#define NV_NPG_CTRL_PRI_MULTICAST_READ_MODE_ZEROS 0x00000003 /* RW--V */ +#define NV_NPG_CTRL_PRI_MULTICAST_MCAST_ENABLED 13:8 /* R-EVF */ +#define NV_NPG_CTRL_PRI_MULTICAST_MCAST_ENABLED_NO_MCAST_ENABLED 0x00000000 /* R---V */ +#define NV_NPG_CTRL_PRI_MULTICAST_MCAST_ENABLED_PORT0_MCAST_ENABLED 0x00000001 /* R---V */ +#define NV_NPG_CTRL_PRI_MULTICAST_MCAST_ENABLED_PORT1_MCAST_ENABLED 0x00000002 /* R---V */ +#define NV_NPG_CTRL_PRI_MULTICAST_MCAST_ENABLED_PORT01_MCAST_ENABLED 0x00000003 /* R---V */ +#define NV_NPG_CTRL_PRI_MULTICAST_MCAST_ENABLED_PORT2_MCAST_ENABLED 0x00000004 /* R---V */ +#define NV_NPG_CTRL_PRI_MULTICAST_MCAST_ENABLED_PORT02_MCAST_ENABLED 0x00000005 /* R---V */ +#define NV_NPG_CTRL_PRI_MULTICAST_MCAST_ENABLED_PORT3_MCAST_ENABLED 0x00000008 /* R---V */ +#define NV_NPG_CTRL_PRI_MULTICAST_MCAST_ENABLED_ALL_MCAST_ENABLED 0x0000000f /* R-E-V */ +#endif // __lr10_dev_npg_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_npgperf_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_npgperf_ip.h new file mode 100644 index 000000000..a4cb77b60 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_npgperf_ip.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_npgperf_ip_h__ +#define __lr10_dev_npgperf_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NPGPERF_CTRL_CLOCK_GATING 0x000000c8 /* RW-4R */ +#define NV_NPGPERF_CTRL_CLOCK_GATING_CG1_SLCG 3:0 /* RWEVF */ +#define NV_NPGPERF_CTRL_CLOCK_GATING_CG1_SLCG_INIT 0x0000000f /* RWE-V */ +#define NV_NPGPERF_CTRL_CLOCK_GATING_CG1_SLCG__PROD 0x00000000 /* RW--V */ + +#define NV_NPGPERF_PERF_CTRL_CLOCK_GATING 0x000000cc /* RW-4R */ +#define NV_NPGPERF_PERF_CTRL_CLOCK_GATING_CG1_SLCG 3:0 /* RWEVF */ +#define NV_NPGPERF_PERF_CTRL_CLOCK_GATING_CG1_SLCG_INIT 0x0000000f /* RWE-V */ +#define NV_NPGPERF_PERF_CTRL_CLOCK_GATING_CG1_SLCG__PROD 0x00000000 /* RW--V */ +#define NV_NPGPERF_PERF_CTRL_CLOCK_GATING_CONTEXT_FREEZE 7:4 /* RWEVF */ +#define NV_NPGPERF_PERF_CTRL_CLOCK_GATING_CONTEXT_FREEZE_DISABLED 0x00000000 /* RWE-V */ +#define NV_NPGPERF_PERF_CTRL_CLOCK_GATING_CONTEXT_FREEZE_ENABLED 0x00000001 /* RW--V */ +#define NV_NPGPERF_PERF_CTRL_CLOCK_GATING_CONTEXT_FREEZE__PROD 0x00000000 /* RW--V */ + +#define NV_NPGPERF_CTRL_PRI_MULTICAST 0x000000c0 /* RW-4R */ +#define NV_NPGPERF_CTRL_PRI_MULTICAST_NPORT_ENABLE 5:0 /* RWEVF */ +#define NV_NPGPERF_CTRL_PRI_MULTICAST_NPORT_ENABLE_INIT 0x0000000f /* RWE-V */ +#define NV_NPGPERF_CTRL_PRI_MULTICAST_READ_MODE 7:6 /* RWEVF */ +#define NV_NPGPERF_CTRL_PRI_MULTICAST_READ_MODE_LOW_SELECTED_BUS 0x00000000 /* RW--V */ +#define NV_NPGPERF_CTRL_PRI_MULTICAST_READ_MODE_OR_ALL_BUSSES 0x00000001 /* RW--V */ +#define NV_NPGPERF_CTRL_PRI_MULTICAST_READ_MODE_AND_ALL_BUSSES 0x00000002 /* RWE-V */ +#define NV_NPGPERF_CTRL_PRI_MULTICAST_READ_MODE_ZEROS 0x00000003 /* RW--V */ +#endif // __lr10_dev_npgperf_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nport_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nport_ip.h new file mode 100644 index 000000000..2c8e2f133 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nport_ip.h @@ -0,0 +1,342 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nport_ip_h__ +#define __lr10_dev_nport_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_0 0x0000010c /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_0_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_0_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_0_LIMIT__PROD 0x000000c0 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_0 0x00000110 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_0_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_0_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_0_LIMIT__PROD 0x00000140 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_0 0x00000114 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_0_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_0_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_0_LIMIT__PROD 0x00000680 /* RW--V */ +#define NV_NPORT_CTRL_SLCG 0x00000050 /* RW-4R */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_INGRESS 0:0 /* RWEVF */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_INGRESS_ENABLE 0x00000001 /* RWE-V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_INGRESS_DISABLE 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_INGRESS__PROD 0x00000001 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_ROUTE 1:1 /* RWEVF */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_ROUTE_ENABLE 0x00000001 /* RWE-V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_ROUTE_DISABLE 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_ROUTE__PROD 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_EGRESS 2:2 /* RWEVF */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_EGRESS_ENABLE 0x00000001 /* RWE-V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_EGRESS_DISABLE 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_EGRESS__PROD 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_STRACK 3:3 /* RWEVF */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_STRACK_ENABLE 0x00000001 /* RWE-V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_STRACK_DISABLE 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_STRACK__PROD 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_TAGSTATE 4:4 /* RWEVF */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_TAGSTATE_ENABLE 0x00000001 /* RWE-V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_TAGSTATE_DISABLE 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_TAGSTATE__PROD 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_TREX 5:5 /* RWEVF */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_TREX_ENABLE 0x00000001 /* RWE-V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_TREX_DISABLE 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_SLCG_DIS_CG_TREX__PROD 0x00000000 /* RW--V */ +#define NV_NPORT_PORTSTAT_COUNT_LOW_0_0 0x00000120 /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_LOW_0_0_PACKETCOUNT 31:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_LOW_0_0_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_COUNT_LOW_0_1 0x00000124 /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_LOW_0_1_PACKETCOUNT 15:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_LOW_0_1_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_COUNT_MEDIUM_0_0 0x00000128 /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_MEDIUM_0_0_PACKETCOUNT 31:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_MEDIUM_0_0_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_COUNT_MEDIUM_0_1 0x0000012c /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_MEDIUM_0_1_PACKETCOUNT 15:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_MEDIUM_0_1_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_COUNT_HIGH_0_0 0x00000130 /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_HIGH_0_0_PACKETCOUNT 31:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_HIGH_0_0_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_COUNT_HIGH_0_1 0x00000134 /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_HIGH_0_1_PACKETCOUNT 15:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_HIGH_0_1_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_COUNT_PANIC_0_0 0x00000138 /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_PANIC_0_0_PACKETCOUNT 31:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_PANIC_0_0_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_COUNT_PANIC_0_1 0x0000013c /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_PANIC_0_1_PACKETCOUNT 15:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_PANIC_0_1_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_PACKET_COUNT_0_0 0x00000118 /* R--4R */ +#define NV_NPORT_PORTSTAT_PACKET_COUNT_0_0_PACKETCOUNT 31:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_PACKET_COUNT_0_0_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_PACKET_COUNT_0_1 0x0000011c /* R--4R */ +#define NV_NPORT_PORTSTAT_PACKET_COUNT_0_1_PACKETCOUNT 15:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_PACKET_COUNT_0_1_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_CONTROL 0x00000100 /* RW-4R */ +#define NV_NPORT_PORTSTAT_CONTROL_SWEEPMODE 1:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_CONTROL_SWEEPMODE_SINGLE 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_CONTROL_SWEEPMODE_CONTINUOUS 0x00000001 /* RW--V */ +#define NV_NPORT_PORTSTAT_CONTROL_SWEEPMODE_SWONDEMAND 0x00000002 /* RW--V */ +#define NV_NPORT_PORTSTAT_CONTROL_RANGESELECT 7:4 /* RWEVF */ +#define NV_NPORT_PORTSTAT_CONTROL_RANGESELECT_BITS13TO0 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_CONTROL_RANGESELECT_BITS15TO2 0x00000001 /* RW--V */ +#define NV_NPORT_PORTSTAT_CONTROL_RANGESELECT_BITS17TO4 0x00000002 /* RW--V */ +#define NV_NPORT_PORTSTAT_CONTROL_RANGESELECT_BITS19TO6 0x00000003 /* RW--V */ +#define NV_NPORT_PORTSTAT_CONTROL_RANGESELECT_BITS21TO8 0x00000004 /* RW--V */ +#define NV_NPORT_PORTSTAT_CONTROL_RANGESELECT_BITS23TO10 0x00000005 /* RW--V */ +#define NV_NPORT_PORTSTAT_SOURCE_FILTER_0 0x000002ac /* RW-4R */ +#define NV_NPORT_PORTSTAT_SOURCE_FILTER_0_SRCFILTERBIT 31:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_SOURCE_FILTER_0_SRCFILTERBIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_SOURCE_FILTER_1 0x000002b0 /* RW-4R */ +#define NV_NPORT_PORTSTAT_SOURCE_FILTER_1_SRCFILTERBIT 3:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_SOURCE_FILTER_1_SRCFILTERBIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_WINDOW_LIMIT 0x00000108 /* RW-4R */ +#define NV_NPORT_PORTSTAT_WINDOW_LIMIT_WINDOWLIMIT 31:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_WINDOW_LIMIT_WINDOWLIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_SNAP_CONTROL 0x00000104 /* RW-4R */ +#define NV_NPORT_PORTSTAT_SNAP_CONTROL_STARTCOUNTER 0:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_SNAP_CONTROL_STARTCOUNTER_ENABLE 0x00000001 /* RW--V */ +#define NV_NPORT_PORTSTAT_SNAP_CONTROL_STARTCOUNTER_DISABLE 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_SNAP_CONTROL_SNAPONDEMAND 4:4 /* RWEVF */ +#define NV_NPORT_PORTSTAT_SNAP_CONTROL_SNAPONDEMAND_ENABLE 0x00000001 /* RW--V */ +#define NV_NPORT_PORTSTAT_SNAP_CONTROL_SNAPONDEMAND_DISABLE 0x00000000 /* RWE-V */ +#define NV_NPORT_CTRL_BUFFER_READY 0x00000044 /* RW-4R */ +#define NV_NPORT_CTRL_BUFFER_READY_BUFFERRDY 0:0 /* RWEVF */ +#define NV_NPORT_CTRL_BUFFER_READY_BUFFERRDY_ENABLE 0x00000001 /* RW--V */ +#define NV_NPORT_CTRL_BUFFER_READY_BUFFERRDY_DISABLE 0x00000000 /* RWE-V */ +#define NV_NPORT_REQLINKID 0x00000054 /* RW-4R */ +#define NV_NPORT_REQLINKID_REQROUTINGID 8:0 /* RWEVF */ +#define NV_NPORT_REQLINKID_REQROUTINGID_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_REQLINKID_REQROUTINGLAN 18:15 /* RWEVF */ +#define NV_NPORT_REQLINKID_REQROUTINGLAN_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_REQLINKID_REQROUTINGIDREMAP 21:21 /* RWEVF */ +#define NV_NPORT_REQLINKID_REQROUTINGIDREMAP_ENABLE 0x00000001 /* RW--V */ +#define NV_NPORT_REQLINKID_REQROUTINGIDREMAP_DISABLE 0x00000000 /* RWE-V */ +#define NV_NPORT_REQLINKID_REQROUTINGLANREMAP 22:22 /* RWEVF */ +#define NV_NPORT_REQLINKID_REQROUTINGLANREMAP_ENABLE 0x00000001 /* RW--V */ +#define NV_NPORT_REQLINKID_REQROUTINGLANREMAP_DISABLE 0x00000000 /* RWE-V */ +#define NV_NPORT_REQLINKID_MASKS 0x00000058 /* RW-4R */ +#define NV_NPORT_REQLINKID_MASKS_REQROUTINGID_MASK 8:0 /* RWEVF */ +#define NV_NPORT_REQLINKID_MASKS_REQROUTINGID_MASK_INIT 0x000001ff /* RWE-V */ +#define NV_NPORT_REQLINKID_MASKS_REQROUTINGLAN_MASK 18:15 /* RWEVF */ +#define NV_NPORT_REQLINKID_MASKS_REQROUTINGLAN_MASK_INIT 0x0000000f /* RWE-V */ +#define NV_NPORT_CTRL 0x00000040 /* RW-4R */ +#define NV_NPORT_CTRL_TRUNKLINKENB 0:0 /* RWEVF */ +#define NV_NPORT_CTRL_TRUNKLINKENB_TRUNKLINK 0x00000001 /* RW--V */ +#define NV_NPORT_CTRL_TRUNKLINKENB_ACCESSLINK 0x00000000 /* RWE-V */ +#define NV_NPORT_CTRL_EGDRAINENB 1:1 /* RWEVF */ +#define NV_NPORT_CTRL_EGDRAINENB_ENABLE 0x00000001 /* RW--V */ +#define NV_NPORT_CTRL_EGDRAINENB_DISABLE 0x00000000 /* RWE-V */ +#define NV_NPORT_CTRL_RTDRAINENB 2:2 /* RWEVF */ +#define NV_NPORT_CTRL_RTDRAINENB_ENABLE 0x00000001 /* RW--V */ +#define NV_NPORT_CTRL_RTDRAINENB_DISABLE 0x00000000 /* RWE-V */ +#define NV_NPORT_CTRL_ENDPOINT_COUNT 5:4 /* RWEVF */ +#define NV_NPORT_CTRL_ENDPOINT_COUNT_512 0x00000000 /* RWE-V */ +#define NV_NPORT_CTRL_ENDPOINT_COUNT_1024 0x00000001 /* RW--V */ +#define NV_NPORT_CTRL_ENDPOINT_COUNT_2048 0x00000003 /* RW--V */ +#define NV_NPORT_CTRL_INHIBITRAMLOAD 8:8 /* RWEVF */ +#define NV_NPORT_CTRL_INHIBITRAMLOAD_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_CTRL_SPARE 13:9 /* RWEVF */ +#define NV_NPORT_CTRL_SPARE_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_CTRL_ENROUTEDBI 16:16 /* RWEVF */ +#define NV_NPORT_CTRL_ENROUTEDBI_ENABLE 0x00000001 /* RWE-V */ +#define NV_NPORT_CTRL_ENROUTEDBI_DISABLE 0x00000000 /* RW--V */ +#define NV_NPORT_CTRL_ENEGRESSDBI 17:17 /* RWEVF */ +#define NV_NPORT_CTRL_ENEGRESSDBI_ENABLE 0x00000001 /* RWE-V */ +#define NV_NPORT_CTRL_ENEGRESSDBI_DISABLE 0x00000000 /* RW--V */ +#define NV_NPORT_ERR_CONTROL_COMMON_NPORT 0x00000470 /* RW-4R */ +#define NV_NPORT_ERR_CONTROL_COMMON_NPORT_CORRECTABLEENABLE 0:0 /* RWEVF */ +#define NV_NPORT_ERR_CONTROL_COMMON_NPORT_CORRECTABLEENABLE_INIT 0x00000001 /* RWE-V */ +#define NV_NPORT_ERR_CONTROL_COMMON_NPORT_FATALENABLE 1:1 /* RWEVF */ +#define NV_NPORT_ERR_CONTROL_COMMON_NPORT_FATALENABLE_INIT 0x00000001 /* RWE-V */ +#define NV_NPORT_ERR_CONTROL_COMMON_NPORT_NONFATALENABLE 2:2 /* RWEVF */ +#define NV_NPORT_ERR_CONTROL_COMMON_NPORT_NONFATALENABLE_INIT 0x00000001 /* RWE-V */ +#define NV_NPORT_CTRL_STOP 0x00000048 /* RW-4R */ +#define NV_NPORT_CTRL_STOP_INGRESS_STOP 0:0 /* RWEVF */ +#define NV_NPORT_CTRL_STOP_INGRESS_STOP_STOP 0x00000001 /* RW--V */ +#define NV_NPORT_CTRL_STOP_INGRESS_STOP_ALLOWTRAFFIC 0x00000000 /* RWE-V */ +#define NV_NPORT_CTRL_STOP_EGRESS_STOP 8:8 /* RWEVF */ +#define NV_NPORT_CTRL_STOP_EGRESS_STOP_STOP 0x00000001 /* RW--V */ +#define NV_NPORT_CTRL_STOP_EGRESS_STOP_ALLOWTRAFFIC 0x00000000 /* RWE-V */ +#define NV_NPORT_CTRL_STOP_ROUTE_STOP_VC 23:16 /* RWEVF */ +#define NV_NPORT_CTRL_STOP_ROUTE_STOP_VC_ALLOWTRAFFIC 0x00000000 /* RWE-V */ +#define NV_NPORT_CTRL_STOP_ROUTE_STOP_VC_STOPVC0 0x00000001 /* RW--V */ +#define NV_NPORT_CTRL_STOP_ROUTE_STOP_VC_STOPVC1 0x00000002 /* RW--V */ +#define NV_NPORT_CTRL_STOP_ROUTE_STOP_VC_STOPVC2 0x00000004 /* RW--V */ +#define NV_NPORT_CTRL_STOP_ROUTE_STOP_VC_STOPVC3 0x00000008 /* RW--V */ +#define NV_NPORT_CTRL_STOP_ROUTE_STOP_VC_STOPVC4 0x00000010 /* RW--V */ +#define NV_NPORT_CTRL_STOP_ROUTE_STOP_VC_STOPVC5 0x00000020 /* RW--V */ +#define NV_NPORT_CTRL_STOP_ROUTE_STOP_VC_STOPVC6 0x00000040 /* RW--V */ +#define NV_NPORT_CTRL_STOP_ROUTE_STOP_VC_STOPVC7 0x00000080 /* RW--V */ +#define NV_NPORT_CONTAIN_AND_DRAIN 0x0000005c /* RW-4R */ +#define NV_NPORT_CONTAIN_AND_DRAIN_CLEAR 18:18 /* RWIVF */ +#define NV_NPORT_CONTAIN_AND_DRAIN_CLEAR_ENABLE 0x00000001 /* RW--V */ +#define NV_NPORT_CONTAIN_AND_DRAIN_CLEAR_DISABLE 0x00000000 /* RWI-V */ +#define NV_NPORT_INITIALIZATION 0x0000004c /* RW-4R */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_0 0:0 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_0_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_0_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_1 1:1 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_1_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_1_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_2 2:2 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_2_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_2_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_3 3:3 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_3_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_3_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_4 4:4 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_4_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_4_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_5 5:5 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_5_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_5_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_6 6:6 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_6_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_TAGPOOLINIT_6_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_INITIALIZATION_LINKTABLEINIT 8:8 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_LINKTABLEINIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_LINKTABLEINIT_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_INITIALIZATION_REMAPTABINIT 9:9 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_REMAPTABINIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_REMAPTABINIT_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_INITIALIZATION_RIDTABINIT 10:10 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_RIDTABINIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_RIDTABINIT_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_INITIALIZATION_RLANTABINIT 11:11 /* RWEVF */ +#define NV_NPORT_INITIALIZATION_RLANTABINIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_INITIALIZATION_RLANTABINIT_HWINIT 0x00000001 /* RW--V */ +#define NV_NPORT_SCRATCH_WARM 0x00000fc0 /* RW-4R */ +#define NV_NPORT_SCRATCH_WARM_DATA 31:0 /* RWEVF */ +#define NV_NPORT_SCRATCH_WARM_DATA_INIT 0xdeadbaad /* RWE-V */ +#define NV_NPORT_PORTSTAT_COUNT_LOW_1_0 0x00000154 /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_LOW_1_0_PACKETCOUNT 31:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_LOW_1_0_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_COUNT_MEDIUM_1_0 0x0000015c /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_MEDIUM_1_0_PACKETCOUNT 31:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_MEDIUM_1_0_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_COUNT_HIGH_1_0 0x00000164 /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_HIGH_1_0_PACKETCOUNT 31:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_HIGH_1_0_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_COUNT_PANIC_1_0 0x0000016c /* R--4R */ +#define NV_NPORT_PORTSTAT_COUNT_PANIC_1_0_PACKETCOUNT 31:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_COUNT_PANIC_1_0_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_PACKET_COUNT_1_0 0x0000014c /* R--4R */ +#define NV_NPORT_PORTSTAT_PACKET_COUNT_1_0_PACKETCOUNT 31:0 /* R-EVF */ +#define NV_NPORT_PORTSTAT_PACKET_COUNT_1_0_PACKETCOUNT_INIT 0x00000000 /* R-E-V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_1 0x00000140 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_1_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_1_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_1_LIMIT__PROD 0x000000c0 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1 0x00000144 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1_LIMIT__PROD 0x00000140 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_1 0x00000148 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_1_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_1_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_1_LIMIT__PROD 0x00000680 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_1 0x00000140 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_1_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_1_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_1_LIMIT__PROD 0x000000c0 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1 0x00000144 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1_LIMIT__PROD 0x00000140 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_1 0x00000148 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_1_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_1_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_1_LIMIT__PROD 0x00000680 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_2 0x00000174 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_2_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_2_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_2_LIMIT__PROD 0x000000c0 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_2 0x00000178 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_2_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_2_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_2_LIMIT__PROD 0x00000140 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_2 0x0000017c /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_2_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_2_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_2_LIMIT__PROD 0x00000680 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_3 0x000001a8 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_3_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_3_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_3_LIMIT__PROD 0x000000c0 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_3 0x000001ac /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_3_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_3_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_3_LIMIT__PROD 0x00000140 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_3 0x000001b0 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_3_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_3_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_3_LIMIT__PROD 0x00000680 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_4 0x000001dc /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_4_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_4_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_4_LIMIT__PROD 0x000000c0 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_4 0x000001e0 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_4_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_4_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_4_LIMIT__PROD 0x00000140 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_4 0x000001e4 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_4_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_4_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_4_LIMIT__PROD 0x00000680 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_5 0x00000210 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_5_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_5_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_5_LIMIT__PROD 0x000000c0 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_5 0x00000214 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_5_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_5_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_5_LIMIT__PROD 0x00000140 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_5 0x00000218 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_5_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_5_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_5_LIMIT__PROD 0x00000680 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_6 0x00000244 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_6_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_6_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_6_LIMIT__PROD 0x000000c0 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_6 0x00000248 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_6_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_6_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_6_LIMIT__PROD 0x00000140 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_6 0x0000024c /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_6_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_6_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_6_LIMIT__PROD 0x00000680 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_7 0x00000278 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_7_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_7_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_LOW_7_LIMIT__PROD 0x000000c0 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_7 0x0000027c /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_7_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_7_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_MEDIUM_7_LIMIT__PROD 0x00000140 /* RW--V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_7 0x00000280 /* RW-4R */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_7_LIMIT 23:0 /* RWEVF */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_7_LIMIT_INIT 0x00000000 /* RWE-V */ +#define NV_NPORT_PORTSTAT_LIMIT_HIGH_7_LIMIT__PROD 0x00000680 /* RW--V */ +#endif // __lr10_dev_nport_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nport_ip_addendum.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nport_ip_addendum.h new file mode 100644 index 000000000..d53940182 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nport_ip_addendum.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nport_ip_addendum_h__ +#define __lr10_dev_nport_ip_addendum_h__ + +// VC mapping +// IAS section 8.2.4.5 Table 40. LimeRock VC Error Encoding + +#define NV_NPORT_VC_MAPPING_CREQ0 0x0 +#define NV_NPORT_VC_MAPPING_RSP0 0x5 +#define NV_NPORT_VC_MAPPING_CREQ1 0x6 +#define NV_NPORT_VC_MAPPING_RSP1 0x7 + +#define NV_NPORT_SCRATCH_WARM_PORT_RESET_REQUIRED 0:0 + +#endif // __lr10_dev_nport_ip_addendum_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nv_xp.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nv_xp.h new file mode 100644 index 000000000..7b7a2c331 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nv_xp.h @@ -0,0 +1,379 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nv_xp_h__ +#define __lr10_dev_nv_xp_h__ +/* This file is autogenerated. Do not edit */ +#define NV_XP_LANE_ERROR_STATUS 0x0008D400 /* RW-4R */ +#define NV_XP_LANE_ERROR_STATUS_SYNC_HDR_CODING_ERR 0:0 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_SYNC_HDR_CODING_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_SYNC_HDR_CODING_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_SYNC_HDR_ORDER_ERR 1:1 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_SYNC_HDR_ORDER_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_SYNC_HDR_ORDER_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_OS_DATA_SEQ_ERR 2:2 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_OS_DATA_SEQ_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_OS_DATA_SEQ_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_TSX_DATA_SEQ_ERR 3:3 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_TSX_DATA_SEQ_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_TSX_DATA_SEQ_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_SKPOS_LFSR_ERR 4:4 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_SKPOS_LFSR_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_SKPOS_LFSR_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_RX_CLK_FIFO_OVERFLOW 5:5 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_RX_CLK_FIFO_OVERFLOW_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_RX_CLK_FIFO_OVERFLOW_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_ELASTIC_FIFO_OVERFLOW 6:6 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_ELASTIC_FIFO_OVERFLOW_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_ELASTIC_FIFO_OVERFLOW_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_RCVD_LINK_NUM_ERR 7:7 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_RCVD_LINK_NUM_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_RCVD_LINK_NUM_ERR_ACTIVE 0x00000000 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_RCVD_LANE_NUM_ERR 8:8 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_RCVD_LANE_NUM_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_RCVD_LANE_NUM_ERR_ACTIVE 0x00000000 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_SKP_RCV_SYM_ERR 9:9 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_SKP_RCV_SYM_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_SKP_RCV_SYM_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_SKPOS_PARITY_ERR 10:10 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_SKPOS_PARITY_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_SKPOS_PARITY_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_LOCAL_DATA_PARITY_ERR 11:11 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_LOCAL_DATA_PARITY_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_LOCAL_DATA_PARITY_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_FIRST_RETIMER_DATA_PARITY_ERR 12:12 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_FIRST_RETIMER_DATA_PARITY_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_FIRST_RETIMER_DATA_PARITY_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_SECOND_RETIMER_DATA_PARITY_ERR 13:13 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_SECOND_RETIMER_DATA_PARITY_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_SECOND_RETIMER_DATA_PARITY_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_MARGIN_CRC_ERR 14:14 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_MARGIN_CRC_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_MARGIN_CRC_ERR_ACTIVE 0x00000001 /* R---V */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_MARGIN_PARITY_ERR 15:15 /* RWIVF */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_MARGIN_PARITY_ERR_NOT_ACTIVE 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERROR_STATUS_CTLSKPOS_MARGIN_PARITY_ERR_ACTIVE 0x00000001 /* R---V */ + +#define NV_XP_LANE_ERRORS_COUNT_0 0x0008D40C /* R--4R */ +#define NV_XP_LANE_ERRORS_COUNT_0_LANE_0_VALUE 7:0 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_0_LANE_0_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_0_LANE_1_VALUE 15:8 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_0_LANE_1_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_0_LANE_2_VALUE 23:16 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_0_LANE_2_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_0_LANE_3_VALUE 31:24 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_0_LANE_3_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_LANE_ERRORS_COUNT_1 0x0008D410 /* R--4R */ +#define NV_XP_LANE_ERRORS_COUNT_1_LANE_4_VALUE 7:0 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_1_LANE_4_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_1_LANE_5_VALUE 15:8 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_1_LANE_5_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_1_LANE_6_VALUE 23:16 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_1_LANE_6_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_1_LANE_7_VALUE 31:24 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_1_LANE_7_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_LANE_ERRORS_COUNT_2 0x0008D414 /* R--4R */ +#define NV_XP_LANE_ERRORS_COUNT_2_LANE_8_VALUE 7:0 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_2_LANE_8_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_2_LANE_9_VALUE 15:8 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_2_LANE_9_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_2_LANE_10_VALUE 23:16 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_2_LANE_10_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_2_LANE_11_VALUE 31:24 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_2_LANE_11_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_LANE_ERRORS_COUNT_3 0x0008D418 /* R--4R */ +#define NV_XP_LANE_ERRORS_COUNT_3_LANE_12_VALUE 7:0 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_3_LANE_12_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_3_LANE_13_VALUE 15:8 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_3_LANE_13_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_3_LANE_14_VALUE 23:16 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_3_LANE_14_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_LANE_ERRORS_COUNT_3_LANE_15_VALUE 31:24 /* R-IVF */ +#define NV_XP_LANE_ERRORS_COUNT_3_LANE_15_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_L1_1_ENTRY_COUNT(i) (0x0008D910+(i)*4) /* R--4A */ +#define NV_XP_L1_1_ENTRY_COUNT__SIZE_1 1 /* */ +#define NV_XP_L1_1_ENTRY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_L1_1_ENTRY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_L1_2_ENTRY_COUNT(i) (0x0008D950+(i)*4) /* R--4A */ +#define NV_XP_L1_2_ENTRY_COUNT__SIZE_1 1 /* */ +#define NV_XP_L1_2_ENTRY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_L1_2_ENTRY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_L1_2_ABORT_COUNT(i) (0x0008D990+(i)*4) /* R--4A */ +#define NV_XP_L1_2_ABORT_COUNT__SIZE_1 1 /* */ +#define NV_XP_L1_2_ABORT_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_L1_2_ABORT_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_L1_SUBSTATE_TO_DEEP_L1_TIMEOUT_COUNT(i) (0x0008D9D0+(i)*4) /* R--4A */ +#define NV_XP_L1_SUBSTATE_TO_DEEP_L1_TIMEOUT_COUNT__SIZE_1 1 /* */ +#define NV_XP_L1_SUBSTATE_TO_DEEP_L1_TIMEOUT_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_L1_SUBSTATE_TO_DEEP_L1_TIMEOUT_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_L1_SHORT_DURATION_COUNT(i) (0x0008E0C4+(i)*4) /* R--4A */ +#define NV_XP_L1_SHORT_DURATION_COUNT__SIZE_1 1 /* */ +#define NV_XP_L1_SHORT_DURATION_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_L1_SHORT_DURATION_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_RECEIVER_ERRORS_COUNT(i) (0x0008D440+(i)*4) /* R--4A */ +#define NV_XP_RECEIVER_ERRORS_COUNT__SIZE_1 1 /* */ +#define NV_XP_RECEIVER_ERRORS_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP_RECEIVER_ERRORS_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_REPLAY_ROLLOVER_COUNT(i) (0x0008D5C0+(i)*4) /* R--4A */ +#define NV_XP_REPLAY_ROLLOVER_COUNT__SIZE_1 1 /* */ +#define NV_XP_REPLAY_ROLLOVER_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP_REPLAY_ROLLOVER_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_BAD_DLLP_COUNT(i) (0x0008D6C0+(i)*4) /* R--4A */ +#define NV_XP_BAD_DLLP_COUNT__SIZE_1 1 /* */ +#define NV_XP_BAD_DLLP_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP_BAD_DLLP_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_BAD_TLP_COUNT(i) (0x0008D700+(i)*4) /* R--4A */ +#define NV_XP_BAD_TLP_COUNT__SIZE_1 1 /* */ +#define NV_XP_BAD_TLP_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP_BAD_TLP_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP__8B10B_ERRORS_COUNT 0x0008D404 /* R--4R */ +#define NV_XP__8B10B_ERRORS_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP__8B10B_ERRORS_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_SYNC_HEADER_ERRORS_COUNT 0x0008D408 /* R--4R */ +#define NV_XP_SYNC_HEADER_ERRORS_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP_SYNC_HEADER_ERRORS_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_LCRC_ERRORS_COUNT(i) (0x0008D480+(i)*4) /* R--4A */ +#define NV_XP_LCRC_ERRORS_COUNT__SIZE_1 1 /* */ +#define NV_XP_LCRC_ERRORS_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP_LCRC_ERRORS_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_FAILED_L0S_EXITS_COUNT(i) (0x0008D4C0+(i)*4) /* R--4A */ +#define NV_XP_FAILED_L0S_EXITS_COUNT__SIZE_1 1 /* */ +#define NV_XP_FAILED_L0S_EXITS_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP_FAILED_L0S_EXITS_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_NAKS_SENT_COUNT(i) (0x0008D500+(i)*4) /* R--4A */ +#define NV_XP_NAKS_SENT_COUNT__SIZE_1 1 /* */ +#define NV_XP_NAKS_SENT_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP_NAKS_SENT_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_NAKS_RCVD_COUNT(i) (0x0008D540+(i)*4) /* R--4A */ +#define NV_XP_NAKS_RCVD_COUNT__SIZE_1 1 /* */ +#define NV_XP_NAKS_RCVD_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP_NAKS_RCVD_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_XP_NAKS_RCVD_COUNT_ILLOGICAL_VALUE 23:16 /* R-IVF */ +#define NV_XP_NAKS_RCVD_COUNT_ILLOGICAL_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_L1_TO_RECOVERY_COUNT(i) (0x0008D600+(i)*4) /* R--4A */ +#define NV_XP_L1_TO_RECOVERY_COUNT__SIZE_1 1 /* */ +#define NV_XP_L1_TO_RECOVERY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_L1_TO_RECOVERY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_L0_TO_RECOVERY_COUNT(i) (0x0008D640+(i)*4) /* R--4A */ +#define NV_XP_L0_TO_RECOVERY_COUNT__SIZE_1 1 /* */ +#define NV_XP_L0_TO_RECOVERY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_L0_TO_RECOVERY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_RECOVERY_COUNT(i) (0x0008D680+(i)*4) /* R--4A */ +#define NV_XP_RECOVERY_COUNT__SIZE_1 1 /* */ +#define NV_XP_RECOVERY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_RECOVERY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_CHIPSET_XMIT_L0S_ENTRY_COUNT(i) (0x0008D740+(i)*4) /* R--4A */ +#define NV_XP_CHIPSET_XMIT_L0S_ENTRY_COUNT__SIZE_1 1 /* */ +#define NV_XP_CHIPSET_XMIT_L0S_ENTRY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_CHIPSET_XMIT_L0S_ENTRY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_GPU_XMIT_L0S_ENTRY_COUNT(i) (0x0008D780+(i)*4) /* R--4A */ +#define NV_XP_GPU_XMIT_L0S_ENTRY_COUNT__SIZE_1 1 /* */ +#define NV_XP_GPU_XMIT_L0S_ENTRY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_GPU_XMIT_L0S_ENTRY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_L1_ENTRY_COUNT(i) (0x0008D7C0+(i)*4) /* R--4A */ +#define NV_XP_L1_ENTRY_COUNT__SIZE_1 1 /* */ +#define NV_XP_L1_ENTRY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_L1_ENTRY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_L1P_ENTRY_COUNT(i) (0x0008D800+(i)*4) /* R--4A */ +#define NV_XP_L1P_ENTRY_COUNT__SIZE_1 1 /* */ +#define NV_XP_L1P_ENTRY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_L1P_ENTRY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_DEEP_L1_ENTRY_COUNT(i) (0x0008D840+(i)*4) /* R--4A */ +#define NV_XP_DEEP_L1_ENTRY_COUNT__SIZE_1 1 /* */ +#define NV_XP_DEEP_L1_ENTRY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_DEEP_L1_ENTRY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_ASLM_COUNT(i) (0x0008D880+(i)*4) /* R--4A */ +#define NV_XP_ASLM_COUNT__SIZE_1 1 /* */ +#define NV_XP_ASLM_COUNT_VALUE 15:0 /* R-IVF */ +#define NV_XP_ASLM_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ + +#define NV_XP_ERROR_COUNTER_RESET 0x0008D900 /* RWI4R */ +#define NV_XP_ERROR_COUNTER_RESET_8B10B_ERRORS_COUNT 0:0 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_8B10B_ERRORS_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_8B10B_ERRORS_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_SYNC_HEADER_ERRORS_COUNT 1:1 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_SYNC_HEADER_ERRORS_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_SYNC_HEADER_ERRORS_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_LANE_ERRORS_COUNT 2:2 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_LANE_ERRORS_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_LANE_ERRORS_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_RECEIVER_ERRORS_COUNT 3:3 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_RECEIVER_ERRORS_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_RECEIVER_ERRORS_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_LCRC_ERRORS_COUNT 4:4 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_LCRC_ERRORS_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_LCRC_ERRORS_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_FAILED_L0S_EXITS_COUNT 5:5 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_FAILED_L0S_EXITS_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_FAILED_L0S_EXITS_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_NAKS_SENT_COUNT 6:6 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_NAKS_SENT_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_NAKS_SENT_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_NAKS_RCVD_COUNT 7:7 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_NAKS_RCVD_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_NAKS_RCVD_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_REPLAY_COUNT 8:8 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_REPLAY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_REPLAY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_REPLAY_ROLLOVER_COUNT 9:9 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_REPLAY_ROLLOVER_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_REPLAY_ROLLOVER_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_L1_TO_RECOVERY_COUNT 10:10 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_L1_TO_RECOVERY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_L1_TO_RECOVERY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_L0_TO_RECOVERY_COUNT 11:11 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_L0_TO_RECOVERY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_L0_TO_RECOVERY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_RECOVERY_COUNT 12:12 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_RECOVERY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_RECOVERY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_BAD_DLLP_COUNT 13:13 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_BAD_DLLP_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_BAD_DLLP_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_BAD_TLP_COUNT 14:14 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_BAD_TLP_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_BAD_TLP_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_CHIPSET_XMIT_L0S_ENTRY_COUNT 15:15 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_CHIPSET_XMIT_L0S_ENTRY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_CHIPSET_XMIT_L0S_ENTRY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_GPU_XMIT_L0S_ENTRY_COUNT 16:16 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_GPU_XMIT_L0S_ENTRY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_GPU_XMIT_L0S_ENTRY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_L1_ENTRY_COUNT 17:17 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_L1_ENTRY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_L1_ENTRY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_L1P_ENTRY_COUNT 18:18 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_L1P_ENTRY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_L1P_ENTRY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_DEEP_L1_ENTRY_COUNT 19:19 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_DEEP_L1_ENTRY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_DEEP_L1_ENTRY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_ASLM_COUNT 20:20 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_ASLM_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_ASLM_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_SKPOS_ERRORS_COUNT 21:21 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_SKPOS_ERRORS_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_SKPOS_ERRORS_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_L1_1_ENTRY_COUNT 22:22 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_L1_1_ENTRY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_L1_1_ENTRY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_L1_2_ENTRY_COUNT 23:23 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_L1_2_ENTRY_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_L1_2_ENTRY_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_L1_2_ABORT_COUNT 24:24 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_L1_2_ABORT_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_L1_2_ABORT_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_L1_SUBSTATE_TO_DEEP_L1_TIMEOUT_COUNT 25:25 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_L1_SUBSTATE_TO_DEEP_L1_TIMEOUT_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_L1_SUBSTATE_TO_DEEP_L1_TIMEOUT_COUNT_PENDING 0x00000001 /* -W--T */ +#define NV_XP_ERROR_COUNTER_RESET_L1_SHORT_DURATION_COUNT 26:26 /* RWIVF */ +#define NV_XP_ERROR_COUNTER_RESET_L1_SHORT_DURATION_COUNT_DONE 0x00000000 /* RWI-V */ +#define NV_XP_ERROR_COUNTER_RESET_L1_SHORT_DURATION_COUNT_PENDING 0x00000001 /* -W--T */ + +#define NV_XP_PRI_XP3G_CG 0x0008E000 /* RWI4R */ +#define NV_XP_PRI_XP3G_CG_IDLE_CG_DLY_CNT 5:0 /* RWIVF */ +#define NV_XP_PRI_XP3G_CG_IDLE_CG_DLY_CNT_HWINIT 0x00000000 /* RWI-V */ +#define NV_XP_PRI_XP3G_CG_IDLE_CG_DLY_CNT__PROD 0x0000000B /* RW--V */ +#define NV_XP_PRI_XP3G_CG_IDLE_CG_EN 6:6 /* RWIVF */ +#define NV_XP_PRI_XP3G_CG_IDLE_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_XP_PRI_XP3G_CG_IDLE_CG_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_XP_PRI_XP3G_CG_IDLE_CG_EN__PROD 0x00000001 /* RW--V */ +#define NV_XP_PRI_XP3G_CG_STATE_CG_EN 7:7 /* */ +#define NV_XP_PRI_XP3G_CG_STATE_CG_EN_ENABLED 0x00000001 /* */ +#define NV_XP_PRI_XP3G_CG_STATE_CG_EN_DISABLED 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_STATE_CG_EN__PROD 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_STALL_CG_DLY_CNT 13:8 /* */ +#define NV_XP_PRI_XP3G_CG_STALL_CG_DLY_CNT_HWINIT 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_STALL_CG_DLY_CNT__PROD 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_STALL_CG_EN 14:14 /* RWIVF */ +#define NV_XP_PRI_XP3G_CG_STALL_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_XP_PRI_XP3G_CG_STALL_CG_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_XP_PRI_XP3G_CG_STALL_CG_EN__PROD 0x00000000 /* RW--V */ +#define NV_XP_PRI_XP3G_CG_QUIESCENT_CG_EN 15:15 /* */ +#define NV_XP_PRI_XP3G_CG_QUIESCENT_CG_EN_ENABLED 0x00000001 /* */ +#define NV_XP_PRI_XP3G_CG_QUIESCENT_CG_EN_DISABLED 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_QUIESCENT_CG_EN__PROD 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_WAKEUP_DLY_CNT 19:16 /* RWIVF */ +#define NV_XP_PRI_XP3G_CG_WAKEUP_DLY_CNT_HWINIT 0x00000000 /* RWI-V */ +#define NV_XP_PRI_XP3G_CG_WAKEUP_DLY_CNT__PROD 0x00000000 /* RW--V */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_CNT 23:20 /* */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_CNT_FULLSPEED 0x0000000f /* */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_CNT__PROD 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_DI_DT_SKEW_VAL 27:24 /* */ +#define NV_XP_PRI_XP3G_CG_DI_DT_SKEW_VAL_HWINIT 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_DI_DT_SKEW_VAL__PROD 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_EN 28:28 /* */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_EN_ENABLED 0x00000001 /* */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_EN_DISABLED 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_EN__PROD 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_SW_OVER 29:29 /* */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_SW_OVER_EN 0x00000001 /* */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_SW_OVER_DIS 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_THROT_CLK_SW_OVER__PROD 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_PAUSE_CG_EN 30:30 /* */ +#define NV_XP_PRI_XP3G_CG_PAUSE_CG_EN_ENABLED 0x00000001 /* */ +#define NV_XP_PRI_XP3G_CG_PAUSE_CG_EN_DISABLED 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_PAUSE_CG_EN__PROD 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_HALT_CG_EN 31:31 /* */ +#define NV_XP_PRI_XP3G_CG_HALT_CG_EN_ENABLED 0x00000001 /* */ +#define NV_XP_PRI_XP3G_CG_HALT_CG_EN_DISABLED 0x00000000 /* */ +#define NV_XP_PRI_XP3G_CG_HALT_CG_EN__PROD 0x00000000 /* */ + +#define NV_XP_PRI_XP3G_CG1 0x0008E004 /* RWI4R */ +#define NV_XP_PRI_XP3G_CG1_MONITOR_CG_EN 0:0 /* RWIVF */ +#define NV_XP_PRI_XP3G_CG1_MONITOR_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_XP_PRI_XP3G_CG1_MONITOR_CG_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_XP_PRI_XP3G_CG1_MONITOR_CG_EN__PROD 0x00000000 /* RW--V */ + +#define NV_XP_REPLAY_COUNT(i) (0x0008D580+(i)*4) /* R--4A */ +#define NV_XP_REPLAY_COUNT__SIZE_1 1 /* */ +#define NV_XP_REPLAY_COUNT_VALUE 31:0 /* R-IVF */ +#define NV_XP_REPLAY_COUNT_VALUE_INIT 0x00000000 /* R-I-V */ +#endif // __lr10_dev_nv_xp_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nv_xve.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nv_xve.h new file mode 100644 index 000000000..c26581d30 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nv_xve.h @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nv_xve_h__ +#define __lr10_dev_nv_xve_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PCFG 0x00088FFF:0x00088000 /* RW--D */ +#define NV_XVE_ERROR_COUNTER1 0x00000854 /* R--4R */ +#define NV_XVE_ERROR_COUNTER1_CORR_ERROR_COUNT_VALUE 15:0 /* R-XVF */ + +#define NV_XVE_ERROR_COUNTER 0x000004AC /* R--4R */ +#define NV_XVE_ERROR_COUNTER_RSVD_CORR_ERROR_COUNT_VALUE 7:0 /* C--VF */ +#define NV_XVE_ERROR_COUNTER_RSVD_CORR_ERROR_COUNT_VALUE_INIT 0x00000000 /* C---V */ +#define NV_XVE_ERROR_COUNTER_NON_FATAL_ERROR_COUNT_VALUE 15:8 /* R-XVF */ +#define NV_XVE_ERROR_COUNTER_FATAL_ERROR_COUNT_VALUE 23:16 /* R-XVF */ +#define NV_XVE_ERROR_COUNTER_UNSUPP_REQ_COUNT_VALUE 31:24 /* R-XVF */ + +#define NV_XVE_PRI_XVE_CG 0x000004E8 /* RWI4R */ +#define NV_XVE_PRI_XVE_CG_IDLE_CG_DLY_CNT 5:0 /* RWIVF */ +#define NV_XVE_PRI_XVE_CG_IDLE_CG_DLY_CNT_HWINIT 0x00000000 /* RWI-V */ +#define NV_XVE_PRI_XVE_CG_IDLE_CG_DLY_CNT__PROD 0x00000004 /* RW--V */ +#define NV_XVE_PRI_XVE_CG_IDLE_CG_EN 6:6 /* RWIVF */ +#define NV_XVE_PRI_XVE_CG_IDLE_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_XVE_PRI_XVE_CG_IDLE_CG_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_XVE_PRI_XVE_CG_IDLE_CG_EN__PROD 0x00000000 /* RW--V */ +#define NV_XVE_PRI_XVE_CG_STATE_CG_EN 7:7 /* */ +#define NV_XVE_PRI_XVE_CG_STATE_CG_EN_ENABLED 0x00000001 /* */ +#define NV_XVE_PRI_XVE_CG_STATE_CG_EN_DISABLED 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_STATE_CG_EN__PROD 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_STALL_CG_DLY_CNT 13:8 /* */ +#define NV_XVE_PRI_XVE_CG_STALL_CG_DLY_CNT_HWINIT 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_STALL_CG_DLY_CNT__PROD 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_STALL_CG_EN 14:14 /* RWIVF */ +#define NV_XVE_PRI_XVE_CG_STALL_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_XVE_PRI_XVE_CG_STALL_CG_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_XVE_PRI_XVE_CG_STALL_CG_EN__PROD 0x00000000 /* RW--V */ +#define NV_XVE_PRI_XVE_CG_QUIESCENT_CG_EN 15:15 /* */ +#define NV_XVE_PRI_XVE_CG_QUIESCENT_CG_EN_ENABLED 0x00000001 /* */ +#define NV_XVE_PRI_XVE_CG_QUIESCENT_CG_EN_DISABLED 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_QUIESCENT_CG_EN__PROD 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_WAKEUP_DLY_CNT 19:16 /* RWIVF */ +#define NV_XVE_PRI_XVE_CG_WAKEUP_DLY_CNT_HWINIT 0x00000000 /* RWI-V */ +#define NV_XVE_PRI_XVE_CG_WAKEUP_DLY_CNT__PROD 0x00000000 /* RW--V */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_CNT 23:20 /* */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_CNT_FULLSPEED 0x0000000f /* */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_CNT__PROD 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_DI_DT_SKEW_VAL 27:24 /* */ +#define NV_XVE_PRI_XVE_CG_DI_DT_SKEW_VAL_HWINIT 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_DI_DT_SKEW_VAL__PROD 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_EN 28:28 /* */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_EN_ENABLED 0x00000001 /* */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_EN_DISABLED 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_EN__PROD 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_SW_OVER 29:29 /* */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_SW_OVER_EN 0x00000001 /* */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_SW_OVER_DIS 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_THROT_CLK_SW_OVER__PROD 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_PAUSE_CG_EN 30:30 /* */ +#define NV_XVE_PRI_XVE_CG_PAUSE_CG_EN_ENABLED 0x00000001 /* */ +#define NV_XVE_PRI_XVE_CG_PAUSE_CG_EN_DISABLED 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_PAUSE_CG_EN__PROD 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_HALT_CG_EN 31:31 /* */ +#define NV_XVE_PRI_XVE_CG_HALT_CG_EN_ENABLED 0x00000001 /* */ +#define NV_XVE_PRI_XVE_CG_HALT_CG_EN_DISABLED 0x00000000 /* */ +#define NV_XVE_PRI_XVE_CG_HALT_CG_EN__PROD 0x00000000 /* */ + +#define NV_XVE_PRI_XVE_CG1 0x000004EC /* RWI4R */ +#define NV_XVE_PRI_XVE_CG1_MONITOR_CG_EN 0:0 /* RWIVF */ +#define NV_XVE_PRI_XVE_CG1_MONITOR_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_XVE_PRI_XVE_CG1_MONITOR_CG_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_XVE_PRI_XVE_CG1_MONITOR_CG_EN__PROD 0x00000000 /* RW--V */ +#define NV_XVE_PRI_XVE_CG1_SLCG 17:1 /* RWIVF */ +#define NV_XVE_PRI_XVE_CG1_SLCG_ENABLED 0x00000000 /* RW--V */ +#define NV_XVE_PRI_XVE_CG1_SLCG_DISABLED 0x0001FFFF /* RWI-V */ +#define NV_XVE_PRI_XVE_CG1_SLCG__PROD 0x00000000 /* RW--V */ + +#define NV_XVE_CYA_2 0x00000704 /* RW-4R */ +#define NV_XVE_CYA_2_RSVD 31:0 /* RWCVF */ +#define NV_XVE_CYA_2_RSVD_INIT 0x00000000 /* RWC-V */ +#endif // __lr10_dev_nv_xve_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvlctrl_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlctrl_ip.h new file mode 100644 index 000000000..c606f448c --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlctrl_ip.h @@ -0,0 +1,113 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvlctrl_ip_h__ +#define __lr10_dev_nvlctrl_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS(i) (0x00000304+(i)*0x40) /* R--4A */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS__SIZE_1 4 /* */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS_FATAL 0:0 /* R-EVF */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS_FATAL_INIT 0x00000000 /* R-E-V */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS_NONFATAL 1:1 /* R-EVF */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS_NONFATAL_INIT 0x00000000 /* R-E-V */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS_CORRECTABLE 2:2 /* R-EVF */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS_CORRECTABLE_INIT 0x00000000 /* R-E-V */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS_INTR0 3:3 /* R-EVF */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS_INTR0_INIT 0x00000000 /* R-E-V */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS_INTR1 4:4 /* R-EVF */ +#define NV_NVLCTRL_LINK_INTR_0_STATUS_INTR1_INIT 0x00000000 /* R-E-V */ +#define NV_NVLCTRL_PLL_PRI_CLOCK_GATING 0x00000604 /* RW-4R */ +#define NV_NVLCTRL_PLL_PRI_CLOCK_GATING_CG1_SLCG 0:0 /* RWEVF */ +#define NV_NVLCTRL_PLL_PRI_CLOCK_GATING_CG1_SLCG_ENABLED 0x00000000 /* RW--V */ +#define NV_NVLCTRL_PLL_PRI_CLOCK_GATING_CG1_SLCG_DISABLED 0x00000001 /* RWE-V */ +#define NV_NVLCTRL_PLL_PRI_CLOCK_GATING_CG1_SLCG__PROD 0x00000000 /* RW--V */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK 0x00000220 /* RW-4R */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK_FATAL 0:0 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK_FATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK_NONFATAL 1:1 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK_NONFATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK_CORRECTABLE 2:2 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK_CORRECTABLE_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK_INTR0 3:3 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK_INTR0_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK_INTR1 4:4 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_0_MASK_INTR1_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK 0x00000228 /* RW-4R */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK_FATAL 0:0 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK_FATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK_NONFATAL 1:1 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK_NONFATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK_CORRECTABLE 2:2 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK_CORRECTABLE_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK_INTR0 3:3 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK_INTR0_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK_INTR1 4:4 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_1_MASK_INTR1_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK 0x00000230 /* RW-4R */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK_FATAL 0:0 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK_FATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK_NONFATAL 1:1 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK_NONFATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK_CORRECTABLE 2:2 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK_CORRECTABLE_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK_INTR0 3:3 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK_INTR0_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK_INTR1 4:4 /* RWIVF */ +#define NV_NVLCTRL_COMMON_INTR_2_MASK_INTR1_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_0_MASK(i) (0x00000300+(i)*0x40) /* RW-4A */ +#define NV_NVLCTRL_LINK_INTR_0_MASK__SIZE_1 4 /* */ +#define NV_NVLCTRL_LINK_INTR_0_MASK_FATAL 0:0 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_0_MASK_FATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_0_MASK_NONFATAL 1:1 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_0_MASK_NONFATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_0_MASK_CORRECTABLE 2:2 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_0_MASK_CORRECTABLE_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_0_MASK_INTR0 3:3 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_0_MASK_INTR0_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_0_MASK_INTR1 4:4 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_0_MASK_INTR1_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_1_MASK(i) (0x00000308+(i)*0x40) /* RW-4A */ +#define NV_NVLCTRL_LINK_INTR_1_MASK__SIZE_1 4 /* */ +#define NV_NVLCTRL_LINK_INTR_1_MASK_FATAL 0:0 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_1_MASK_FATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_1_MASK_NONFATAL 1:1 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_1_MASK_NONFATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_1_MASK_CORRECTABLE 2:2 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_1_MASK_CORRECTABLE_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_1_MASK_INTR0 3:3 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_1_MASK_INTR0_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_1_MASK_INTR1 4:4 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_1_MASK_INTR1_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_2_MASK(i) (0x00000310+(i)*0x40) /* RW-4A */ +#define NV_NVLCTRL_LINK_INTR_2_MASK__SIZE_1 4 /* */ +#define NV_NVLCTRL_LINK_INTR_2_MASK_FATAL 0:0 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_2_MASK_FATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_2_MASK_NONFATAL 1:1 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_2_MASK_NONFATAL_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_2_MASK_CORRECTABLE 2:2 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_2_MASK_CORRECTABLE_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_2_MASK_INTR0 3:3 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_2_MASK_INTR0_INIT 0x00000000 /* RWI-V */ +#define NV_NVLCTRL_LINK_INTR_2_MASK_INTR1 4:4 /* RWIVF */ +#define NV_NVLCTRL_LINK_INTR_2_MASK_INTR1_INIT 0x00000000 /* RWI-V */ +#endif // __lr10_dev_nvlctrl_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvldl_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvldl_ip.h new file mode 100644 index 000000000..c3c89452e --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvldl_ip.h @@ -0,0 +1,350 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvldl_ip_h__ +#define __lr10_dev_nvldl_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NVLDL_TOP_LINK_STATE 0x00000000 /* R--4R */ +#define NV_NVLDL_TOP_LINK_STATE_STATE 7:0 /* R-XVF */ +#define NV_NVLDL_TOP_LINK_STATE_STATE_INIT 0x00000000 /* R---V */ +#define NV_NVLDL_TOP_LINK_STATE_STATE_HWCFG 0x00000001 /* R---V */ +#define NV_NVLDL_TOP_LINK_STATE_STATE_SWCFG 0x00000002 /* R---V */ +#define NV_NVLDL_TOP_LINK_STATE_STATE_ACTIVE 0x00000003 /* R---V */ +#define NV_NVLDL_TOP_LINK_STATE_STATE_FAULT 0x00000004 /* R---V */ +#define NV_NVLDL_TOP_LINK_STATE_STATE_SLEEP 0x00000005 /* R---V */ +#define NV_NVLDL_TOP_LINK_STATE_STATE_RCVY_AC 0x00000008 /* R---V */ +#define NV_NVLDL_TOP_LINK_STATE_STATE_RCVY_RX 0x0000000a /* R---V */ +#define NV_NVLDL_TOP_LINK_STATE_STATE_TRAIN 0x0000000b /* R---V */ +#define NV_NVLDL_TOP_LINK_STATE_AN0_BUSY 12:12 /* R-XVF */ +#define NV_NVLDL_TOP_LINK_STATE_TL_BUSY 13:13 /* R-XVF */ +#define NV_NVLDL_TOP_LINK_STATE_DBG_SUBSTATE 31:16 /* R-XVF */ +#define NV_NVLDL_TOP_LINK_CHANGE 0x00000040 /* RW-4R */ +#define NV_NVLDL_TOP_LINK_CHANGE_STATUS 1:0 /* R-XVF */ +#define NV_NVLDL_TOP_LINK_CHANGE_STATUS_DONE 0x00000000 /* R---V */ +#define NV_NVLDL_TOP_LINK_CHANGE_STATUS_BUSY 0x00000001 /* R---V */ +#define NV_NVLDL_TOP_LINK_CHANGE_STATUS_FAULT 0x00000002 /* R---V */ +#define NV_NVLDL_TOP_LINK_CHANGE_STATUS_ABORT 0x00000003 /* R---V */ +#define NV_NVLDL_TOP_LINK_CHANGE_ACTION 3:2 /* -WXVF */ +#define NV_NVLDL_TOP_LINK_CHANGE_ACTION_LTSSM_CHANGE 0x00000001 /* -W--V */ +#define NV_NVLDL_TOP_LINK_CHANGE_ACTION_LTSSM_FORCE 0x00000003 /* -W--V */ +#define NV_NVLDL_TOP_LINK_CHANGE_NEWSTATE 7:4 /* RWEVF */ +#define NV_NVLDL_TOP_LINK_CHANGE_NEWSTATE_INIT 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_LINK_CHANGE_NEWSTATE_HWCFG 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_LINK_CHANGE_NEWSTATE_SWCFG 0x00000002 /* RW--V */ +#define NV_NVLDL_TOP_LINK_CHANGE_NEWSTATE_ACTIVE 0x00000003 /* RW--V */ +#define NV_NVLDL_TOP_LINK_CHANGE_NEWSTATE_RCVY_AC 0x00000008 /* RW--V */ +#define NV_NVLDL_TOP_LINK_CHANGE_NEWSTATE_FAULT 0x00000004 /* RW--V */ +#define NV_NVLDL_TOP_LINK_CHANGE_NEWSTATE_RCVY_RX 0x0000000a /* RW--V */ +#define NV_NVLDL_TOP_LINK_CHANGE_NEWSTATE_SLEEP 0x00000005 /* RW--V */ +#define NV_NVLDL_TOP_LINK_CHANGE_NEWSTATE_TRAIN 0x0000000b /* RW--V */ +#define NV_NVLDL_TOP_LINK_CHANGE_OLDSTATE_MASK 19:16 /* RWEVF */ +#define NV_NVLDL_TOP_LINK_CHANGE_OLDSTATE_MASK_INIT 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_LINK_CHANGE_OLDSTATE_MASK_DONTCARE 0x0000000f /* RW--V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE 0x00000044 /* RW-4R */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_STATUS 1:0 /* R-EVF */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_STATUS_DONE 0x00000000 /* R-E-V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_STATUS_BUSY 0x00000001 /* R---V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_STATUS_FAULT 0x00000002 /* R---V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_STATUS_ABORT 0x00000003 /* R---V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_ACTION 3:2 /* -WEVF */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_ACTION_INIT 0x00000000 /* -WE-V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_ACTION_SLSM_CHANGE 0x00000001 /* -W--V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_ACTION_SLSM_FORCE 0x00000003 /* -W--V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_NEWSTATE 7:4 /* RWEUF */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_NEWSTATE_HS 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_NEWSTATE_EIGHTH 0x00000004 /* RW--V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_NEWSTATE_TRAIN 0x00000005 /* RW--V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_NEWSTATE_SAFE 0x00000006 /* RW--V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_NEWSTATE_OFF 0x00000007 /* RW--V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_SUBLINK 15:12 /* RWEUF */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_SUBLINK_TX 0x00000000 /* RW--V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_SUBLINK_RX 0x00000001 /* RWE-V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_OLDSTATE_MASK 19:16 /* RWEUF */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_OLDSTATE_MASK_INIT 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_OLDSTATE_MASK_DONTCARE 0x0000000f /* RW--V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_COUNTDOWN 31:20 /* RWEUF */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_COUNTDOWN_MAXIMUM 0x0000007f /* RW--V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_COUNTDOWN_MINIMUM 0x00000008 /* RWE-V */ +#define NV_NVLDL_TOP_SUBLINK_CHANGE_COUNTDOWN_IMMEDIATE 0x00000000 /* RW--V */ +#define NV_NVLDL_TOP_LINK_TEST 0x00000048 /* RW-4R */ +#define NV_NVLDL_TOP_LINK_TEST_MODE 0:0 /* RWEVF */ +#define NV_NVLDL_TOP_LINK_TEST_MODE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_LINK_TEST_MODE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_LINK_TEST_AUTO_HWCFG 30:30 /* RWEVF */ +#define NV_NVLDL_TOP_LINK_TEST_AUTO_HWCFG_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_LINK_TEST_AUTO_HWCFG_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_LINK_TEST_AUTO_NVHS 31:31 /* RWEVF */ +#define NV_NVLDL_TOP_LINK_TEST_AUTO_NVHS_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_LINK_TEST_AUTO_NVHS_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR 0x00000050 /* RW-4R */ +#define NV_NVLDL_TOP_INTR_TX_REPLAY 0:0 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_TX_RECOVERY_SHORT 1:1 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_TX_FAULT_RAM 4:4 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_TX_FAULT_INTERFACE 5:5 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_TX_FAULT_SUBLINK_CHANGE 8:8 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_RX_FAULT_SUBLINK_CHANGE 16:16 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_RX_FAULT_DL_PROTOCOL 20:20 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_RX_SHORT_ERROR_RATE 21:21 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_RX_LONG_ERROR_RATE 22:22 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_RX_ILA_TRIGGER 23:23 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_RX_CRC_COUNTER 24:24 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_LTSSM_FAULT_DOWN 27:27 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_LTSSM_FAULT_UP 28:28 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_LTSSM_PROTOCOL 29:29 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_MINION_REQUEST 30:30 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2 0x00000054 /* RW-4R */ +#define NV_NVLDL_TOP_INTR_SW2_TX_REPLAY 0:0 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_TX_RECOVERY_SHORT 1:1 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_TX_FAULT_RAM 4:4 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_TX_FAULT_INTERFACE 5:5 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_TX_FAULT_SUBLINK_CHANGE 8:8 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_RX_FAULT_SUBLINK_CHANGE 16:16 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_RX_FAULT_DL_PROTOCOL 20:20 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_RX_SHORT_ERROR_RATE 21:21 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_RX_LONG_ERROR_RATE 22:22 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_RX_ILA_TRIGGER 23:23 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_RX_CRC_COUNTER 24:24 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_LTSSM_FAULT_DOWN 27:27 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_LTSSM_FAULT_UP 28:28 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_LTSSM_PROTOCOL 29:29 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_SW2_MINION_REQUEST 30:30 /* RWXVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN 0x00000058 /* RW-4R */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_REPLAY 0:0 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_REPLAY_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_RECOVERY_SHORT 1:1 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_RECOVERY_SHORT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_RECOVERY_SHORT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_FAULT_RAM 4:4 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_FAULT_RAM_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_FAULT_RAM_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_FAULT_INTERFACE 5:5 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_FAULT_INTERFACE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_FAULT_INTERFACE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_FAULT_SUBLINK_CHANGE 8:8 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_FAULT_SUBLINK_CHANGE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_TX_FAULT_SUBLINK_CHANGE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_FAULT_SUBLINK_CHANGE 16:16 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_FAULT_SUBLINK_CHANGE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_FAULT_SUBLINK_CHANGE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_FAULT_DL_PROTOCOL 20:20 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_FAULT_DL_PROTOCOL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_FAULT_DL_PROTOCOL_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_SHORT_ERROR_RATE 21:21 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_SHORT_ERROR_RATE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_SHORT_ERROR_RATE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_LONG_ERROR_RATE 22:22 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_LONG_ERROR_RATE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_LONG_ERROR_RATE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_ILA_TRIGGER 23:23 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_ILA_TRIGGER_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_ILA_TRIGGER_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_CRC_COUNTER 24:24 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_CRC_COUNTER_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_RX_CRC_COUNTER_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_LTSSM_FAULT_DOWN 27:27 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_LTSSM_FAULT_DOWN_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_LTSSM_FAULT_DOWN_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_LTSSM_FAULT_UP 28:28 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_LTSSM_FAULT_UP_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_LTSSM_FAULT_UP_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_LTSSM_PROTOCOL 29:29 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_LTSSM_PROTOCOL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_LTSSM_PROTOCOL_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_MINION_REQUEST 30:30 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_STALL_EN_MINION_REQUEST_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_STALL_EN_MINION_REQUEST_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN 0x0000005c /* RW-4R */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_REPLAY 0:0 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_REPLAY_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_RECOVERY_SHORT 1:1 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_RECOVERY_SHORT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_RECOVERY_SHORT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_FAULT_RAM 4:4 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_FAULT_RAM_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_FAULT_RAM_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_FAULT_INTERFACE 5:5 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_FAULT_INTERFACE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_FAULT_INTERFACE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_FAULT_SUBLINK_CHANGE 8:8 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_FAULT_SUBLINK_CHANGE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_TX_FAULT_SUBLINK_CHANGE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_FAULT_SUBLINK_CHANGE 16:16 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_FAULT_SUBLINK_CHANGE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_FAULT_SUBLINK_CHANGE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_FAULT_DL_PROTOCOL 20:20 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_FAULT_DL_PROTOCOL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_FAULT_DL_PROTOCOL_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_SHORT_ERROR_RATE 21:21 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_SHORT_ERROR_RATE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_SHORT_ERROR_RATE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_LONG_ERROR_RATE 22:22 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_LONG_ERROR_RATE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_LONG_ERROR_RATE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_ILA_TRIGGER 23:23 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_ILA_TRIGGER_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_ILA_TRIGGER_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_CRC_COUNTER 24:24 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_CRC_COUNTER_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_RX_CRC_COUNTER_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_LTSSM_FAULT_DOWN 27:27 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_LTSSM_FAULT_DOWN_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_LTSSM_FAULT_DOWN_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_LTSSM_FAULT_UP 28:28 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_LTSSM_FAULT_UP_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_LTSSM_FAULT_UP_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_LTSSM_PROTOCOL 29:29 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_LTSSM_PROTOCOL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_LTSSM_PROTOCOL_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_MINION_REQUEST 30:30 /* RWEVF */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_MINION_REQUEST_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLDL_TOP_INTR_NONSTALL_EN_MINION_REQUEST_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLDL_TOP_ERROR_COUNT_CTRL 0x00000080 /* -W-4R */ +#define NV_NVLDL_TOP_ERROR_COUNT_CTRL_CLEAR_RECOVERY 2:2 /* -WXVF */ +#define NV_NVLDL_TOP_ERROR_COUNT_CTRL_CLEAR_RECOVERY_CLEAR 0x00000001 /* -W--V */ +#define NV_NVLDL_TX_TRAIN0_TX 0x00002018 /* RW-4R */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_LEN_0 10:0 /* RWEVF */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_LEN_0_INIT 0x00000260 /* RWE-V */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_LEN_0_MINIMUM 0x00000001 /* RW--V */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_LEN_0__PROD_ISSC 0x00000648 /* RW--V */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_LEN_0_SCL 15:11 /* RWEVF */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_LEN_0_SCL_INIT 0x00000004 /* RWE-V */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_LEN_0_SCL_MINIMUM 0x00000000 /* RW--V */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_LEN_0_SCL__PROD_ISSC 0x00000005 /* RW--V */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_INFINITE 31:31 /* RWEVF */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_INFINITE_OFF 0x00000000 /* RWE-V */ +#define NV_NVLDL_TX_TRAIN0_TX_PRBS_INFINITE_ON 0x00000001 /* RW--V */ +#define NV_NVLDL_TX_SLSM_STATUS_TX 0x00002024 /* R--4R */ +#define NV_NVLDL_TX_SLSM_STATUS_TX_SUBSTATE 3:0 /* R-XVF */ +#define NV_NVLDL_TX_SLSM_STATUS_TX_SUBSTATE_STABLE 0x00000000 /* R---V */ +#define NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE 7:4 /* R-XVF */ +#define NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_HS 0x00000000 /* R---V */ +#define NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_EIGHTH 0x00000004 /* R---V */ +#define NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_TRAIN 0x00000005 /* R---V */ +#define NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_SAFE 0x00000006 /* R---V */ +#define NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_OFF 0x00000007 /* R---V */ +#define NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_UNKNOWN 0x0000000d /* R---V */ +#define NV_NVLDL_TX_ERROR_COUNT_CTRL 0x00002280 /* -W-4R */ +#define NV_NVLDL_TX_ERROR_COUNT_CTRL_CLEAR_REPLAY 8:8 /* -WXVF */ +#define NV_NVLDL_TX_ERROR_COUNT_CTRL_CLEAR_REPLAY_CLEAR 0x00000001 /* -W--V */ +#define NV_NVLDL_RX_CONFIG_RX 0x00003000 /* RW-4R */ +#define NV_NVLDL_RX_CONFIG_RX_HW_POLARITY_INVERT 3:0 /* R-EVF */ +#define NV_NVLDL_RX_CONFIG_RX_HW_POLARITY_INVERT_NONE 0x00000000 /* R-E-V */ +#define NV_NVLDL_RX_CONFIG_RX_HW_POLARITY_INVERT_ALL 0x0000000f /* R---V */ +#define NV_NVLDL_RX_CONFIG_RX_FIFO_WR_REQ_DELAY 7:4 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_FIFO_WR_REQ_DELAY_INIT 0x0000000a /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_HW_LANE_REVERSE 8:8 /* R-EVF */ +#define NV_NVLDL_RX_CONFIG_RX_HW_LANE_REVERSE_OFF 0x00000000 /* R-E-V */ +#define NV_NVLDL_RX_CONFIG_RX_HW_LANE_REVERSE_ON 0x00000001 /* R---V */ +#define NV_NVLDL_RX_CONFIG_RX_POLARITY_INVERT 19:16 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_POLARITY_INVERT_NONE 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_POLARITY_INVERT_ALL 0x0000000f /* RW--V */ +#define NV_NVLDL_RX_CONFIG_RX_ECC_MODE 23:22 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_ECC_MODE_OFF 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_ECC_MODE_ECC96 0x00000001 /* RW--V */ +#define NV_NVLDL_RX_CONFIG_RX_ECC_MODE_ECC88 0x00000002 /* RW--V */ +#define NV_NVLDL_RX_CONFIG_RX_LANE_REVERSE 24:24 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_LANE_REVERSE_OFF 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_LANE_REVERSE_ON 0x00000001 /* RW--V */ +#define NV_NVLDL_RX_CONFIG_RX_IOBIST_EN 25:25 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_IOBIST_EN_OFF 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_IOBIST_EN_ON 0x00000001 /* RW--V */ +#define NV_NVLDL_RX_CONFIG_RX_PHY_NO_ADJUST 26:26 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_PHY_NO_ADJUST_OFF 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_PHY_NO_ADJUST_ON 0x00000001 /* RW--V */ +#define NV_NVLDL_RX_CONFIG_RX_PHY_ADJUST_OVERRIDE 27:27 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_PHY_ADJUST_OVERRIDE_OFF 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_PHY_ADJUST_OVERRIDE_ON 0x00000001 /* RW--V */ +#define NV_NVLDL_RX_CONFIG_RX_POLARITY_OVERRIDE 28:28 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_POLARITY_OVERRIDE_OFF 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_POLARITY_OVERRIDE_ON 0x00000001 /* RW--V */ +#define NV_NVLDL_RX_CONFIG_RX_REVERSAL_OVERRIDE 29:29 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_REVERSAL_OVERRIDE_OFF 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_REVERSAL_OVERRIDE_ON 0x00000001 /* RW--V */ +#define NV_NVLDL_RX_CONFIG_RX_SKIP_TOGGLE_CONST 30:30 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_SKIP_TOGGLE_CONST_OFF 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_SKIP_TOGGLE_CONST_ON 0x00000001 /* RW--V */ +#define NV_NVLDL_RX_CONFIG_RX_DEBUG_ENABLE 31:31 /* RWEVF */ +#define NV_NVLDL_RX_CONFIG_RX_DEBUG_ENABLE_OFF 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_CONFIG_RX_DEBUG_ENABLE_ON 0x00000001 /* RW--V */ +#define NV_NVLDL_RX_SLSM_STATUS_RX 0x00003014 /* R--4R */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_SUBSTATE 3:0 /* R-EVF */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_SUBSTATE_STABLE 0x00000000 /* R-E-V */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE 7:4 /* R-XVF */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_HS 0x00000000 /* R---V */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_EIGHTH 0x00000004 /* R---V */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_TRAIN 0x00000005 /* R---V */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_SAFE 0x00000006 /* R---V */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_OFF 0x00000007 /* R---V */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_UNKNOWN 0x0000000d /* R---V */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_SURPRISE_LD_CNT 15:8 /* R-EVF */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_SURPRISE_LD_CNT_INIT 0x00000000 /* R-E-V */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_FENCE_STATUS 31:31 /* R-EVF */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_FENCE_STATUS_OFF 0x00000000 /* R-E-V */ +#define NV_NVLDL_RX_SLSM_STATUS_RX_FENCE_STATUS_ON 0x00000001 /* R---V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL 0x00003280 /* RW-4R */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_FLIT_CRC 0:0 /* -WEVF */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_FLIT_CRC_INIT 0x00000000 /* -WE-V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_FLIT_CRC_CLEAR 0x00000001 /* -W--V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_LANE_CRC 1:1 /* -WEVF */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_LANE_CRC_INIT 0x00000000 /* -WE-V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_LANE_CRC_CLEAR 0x00000001 /* -W--V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_RATES 2:2 /* -WEVF */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_RATES_INIT 0x00000000 /* -WE-V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_RATES_CLEAR 0x00000001 /* -W--V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_REPLAY 3:3 /* -WEVF */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_REPLAY_INIT 0x00000000 /* -WE-V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_REPLAY_CLEAR 0x00000001 /* -W--V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_ECC_COUNTS 4:4 /* -WEVF */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_ECC_COUNTS_INIT 0x00000000 /* -WE-V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_CLEAR_ECC_COUNTS_CLEAR 0x00000001 /* -W--V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_SHORT_RATE 8:8 /* RWEVF */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_SHORT_RATE_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_SHORT_RATE_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_LONG_RATE 9:9 /* RWEVF */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_LONG_RATE_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_LONG_RATE_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_RATE_COUNT_MODE 10:10 /* RWEVF */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_RATE_COUNT_MODE_FLIT 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_RATE_COUNT_MODE_SEQUENCE 0x00000001 /* RW--V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_FLIT_COUNT_MODE 11:11 /* RWEVF */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_FLIT_COUNT_MODE_FLIT 0x00000000 /* RW--V */ +#define NV_NVLDL_RX_ERROR_COUNT_CTRL_FLIT_COUNT_MODE_SEQUENCE 0x00000001 /* RWE-V */ + +#define NV_NVLDL_RX_ERROR_RATE_CTRL 0x00003284 /* RW-4R */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_THRESHOLD_MAN 2:0 /* RWEUF */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_THRESHOLD_MAN_INIT 0x00000001 /* RWE-V */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_THRESHOLD_EXP 3:3 /* RWEUF */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_THRESHOLD_EXP_INIT 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_TIMESCALE_MAN 6:4 /* RWEUF */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_TIMESCALE_MAN_INIT 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_TIMESCALE_EXP 11:8 /* RWEUF */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_TIMESCALE_EXP_INIT 0x00000006 /* RWE-V */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN 18:16 /* RWEUF */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN_INIT 0x00000001 /* RWE-V */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_EXP 19:19 /* RWEUF */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_EXP_INIT 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_TIMESCALE_MAN 22:20 /* RWEUF */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_TIMESCALE_MAN_INIT 0x00000000 /* RWE-V */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_TIMESCALE_EXP 28:24 /* RWEUF */ +#define NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_TIMESCALE_EXP_INIT 0x00000006 /* RWE-V */ +#endif // __lr10_dev_nvldl_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvldl_ip_addendum.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvldl_ip_addendum.h new file mode 100644 index 000000000..bbcf7be22 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvldl_ip_addendum.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvldl_ip_addendum_h__ +#define __lr10_dev_nvldl_ip_addendum_h__ + +#define NV_NVLDL_TOP_SCRATCH_PRIVMASK1_INITPLL_LINK_STATE 0:0 +#define NV_NVLDL_TOP_SCRATCH_PRIVMASK1_INITPLL_LINK_STATE_DONE 0x1 + +#endif // __lr10_dev_nvldl_ip_addendum_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvlipt_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlipt_ip.h new file mode 100644 index 000000000..d64fa4ad8 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlipt_ip.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvlipt_ip_h__ +#define __lr10_dev_nvlipt_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NVLIPT_COMMON_ERR_STATUS_0 0x00000280 /* RW-4R */ +#define NV_NVLIPT_COMMON_ERR_STATUS_0_CLKCTL_ILLEGAL_REQUEST 0:0 /* RWIVF */ +#define NV_NVLIPT_COMMON_ERR_STATUS_0_CLKCTL_ILLEGAL_REQUEST_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_COMMON_ERR_STATUS_0_CLKCTL_ILLEGAL_REQUEST_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_STATUS_0_RSTSEQ_PLL_TIMEOUT 1:1 /* RWIVF */ +#define NV_NVLIPT_COMMON_ERR_STATUS_0_RSTSEQ_PLL_TIMEOUT_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_COMMON_ERR_STATUS_0_RSTSEQ_PLL_TIMEOUT_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_STATUS_0_RSTSEQ_PHYARB_TIMEOUT 2:2 /* RWIVF */ +#define NV_NVLIPT_COMMON_ERR_STATUS_0_RSTSEQ_PHYARB_TIMEOUT_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_COMMON_ERR_STATUS_0_RSTSEQ_PHYARB_TIMEOUT_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0 0x00000288 /* RW-4R */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_CLKCTL_ILLEGAL_REQUEST 0:0 /* RWEVF */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_CLKCTL_ILLEGAL_REQUEST__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_CLKCTL_ILLEGAL_REQUEST_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_CLKCTL_ILLEGAL_REQUEST_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_RSTSEQ_PLL_TIMEOUT 1:1 /* RWEVF */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_RSTSEQ_PLL_TIMEOUT__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_RSTSEQ_PLL_TIMEOUT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_RSTSEQ_PLL_TIMEOUT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_RSTSEQ_PHYARB_TIMEOUT 2:2 /* RWEVF */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_RSTSEQ_PHYARB_TIMEOUT__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_RSTSEQ_PHYARB_TIMEOUT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_COMMON_ERR_FATAL_REPORT_EN_0_RSTSEQ_PHYARB_TIMEOUT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0 0x00000294 /* RW-4R */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_CLKCTL_ILLEGAL_REQUEST 0:0 /* RWEVF */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_CLKCTL_ILLEGAL_REQUEST__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_CLKCTL_ILLEGAL_REQUEST_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_CLKCTL_ILLEGAL_REQUEST_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_RSTSEQ_PLL_TIMEOUT 1:1 /* RWEVF */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_RSTSEQ_PLL_TIMEOUT__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_RSTSEQ_PLL_TIMEOUT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_RSTSEQ_PLL_TIMEOUT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_RSTSEQ_PHYARB_TIMEOUT 2:2 /* RWEVF */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_RSTSEQ_PHYARB_TIMEOUT__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_RSTSEQ_PHYARB_TIMEOUT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_COMMON_ERR_CONTAIN_EN_0_RSTSEQ_PHYARB_TIMEOUT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_REPORT_INJECT_0 0x00000298 /* RW-4R */ +#define NV_NVLIPT_COMMON_ERR_REPORT_INJECT_0_CLKCTL_ILLEGAL_REQUEST 0:0 /* RWIVF */ +#define NV_NVLIPT_COMMON_ERR_REPORT_INJECT_0_CLKCTL_ILLEGAL_REQUEST_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_COMMON_ERR_REPORT_INJECT_0_CLKCTL_ILLEGAL_REQUEST_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_REPORT_INJECT_0_RSTSEQ_PLL_TIMEOUT 1:1 /* RWIVF */ +#define NV_NVLIPT_COMMON_ERR_REPORT_INJECT_0_RSTSEQ_PLL_TIMEOUT_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_COMMON_ERR_REPORT_INJECT_0_RSTSEQ_PLL_TIMEOUT_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_REPORT_INJECT_0_RSTSEQ_PHYARB_TIMEOUT 2:2 /* RWIVF */ +#define NV_NVLIPT_COMMON_ERR_REPORT_INJECT_0_RSTSEQ_PHYARB_TIMEOUT_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_COMMON_ERR_REPORT_INJECT_0_RSTSEQ_PHYARB_TIMEOUT_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_FIRST_0 0x0000029c /* RW-4R */ +#define NV_NVLIPT_COMMON_ERR_FIRST_0_CLKCTL_ILLEGAL_REQUEST 0:0 /* RWIVF */ +#define NV_NVLIPT_COMMON_ERR_FIRST_0_CLKCTL_ILLEGAL_REQUEST_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_COMMON_ERR_FIRST_0_CLKCTL_ILLEGAL_REQUEST_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_FIRST_0_RSTSEQ_PLL_TIMEOUT 1:1 /* RWIVF */ +#define NV_NVLIPT_COMMON_ERR_FIRST_0_RSTSEQ_PLL_TIMEOUT_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_COMMON_ERR_FIRST_0_RSTSEQ_PLL_TIMEOUT_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_ERR_FIRST_0_RSTSEQ_PHYARB_TIMEOUT 2:2 /* RWIVF */ +#define NV_NVLIPT_COMMON_ERR_FIRST_0_RSTSEQ_PHYARB_TIMEOUT_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_COMMON_ERR_FIRST_0_RSTSEQ_PHYARB_TIMEOUT_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_COMMON_INTR_CONTROL_COMMON 0x00000300 /* RW-4R */ +#define NV_NVLIPT_COMMON_INTR_CONTROL_COMMON_INT0_EN 0:0 /* RWEVF */ +#define NV_NVLIPT_COMMON_INTR_CONTROL_COMMON_INT0_EN_INIT 0x00000000 /* RWE-V */ +#define NV_NVLIPT_COMMON_INTR_CONTROL_COMMON_INT1_EN 1:1 /* RWEVF */ +#define NV_NVLIPT_COMMON_INTR_CONTROL_COMMON_INT1_EN_INIT 0x00000000 /* RWE-V */ +#define NV_NVLIPT_COMMON_TOPOLOGY_LOCAL_CHIP_SID_LO 0x00000108 /* R--4R */ +#define NV_NVLIPT_COMMON_TOPOLOGY_LOCAL_CHIP_SID_LO_SID_31_0 31:0 /* R-IVF */ +#define NV_NVLIPT_COMMON_TOPOLOGY_LOCAL_CHIP_SID_LO_SID_31_0_INIT 0x00000000 /* R-I-V */ +#define NV_NVLIPT_COMMON_TOPOLOGY_LOCAL_CHIP_SID_HI 0x0000010c /* R--4R */ +#define NV_NVLIPT_COMMON_TOPOLOGY_LOCAL_CHIP_SID_HI_SID_63_32 31:0 /* R-IVF */ +#define NV_NVLIPT_COMMON_TOPOLOGY_LOCAL_CHIP_SID_HI_SID_63_32_INIT 0x00000000 /* R-I-V */ +#endif // __lr10_dev_nvlipt_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvlipt_lnk_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlipt_lnk_ip.h new file mode 100644 index 000000000..8b4c31537 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlipt_lnk_ip.h @@ -0,0 +1,476 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvlipt_lnk_ip_h__ +#define __lr10_dev_nvlipt_lnk_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL 0x00000090 /* RW-4R */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_SEL 1:0 /* RWEVF */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_SEL_L0 0x00000000 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_SEL_L3 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_SEL_TX 0x00000002 /* RWE-V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_SEL_OFF 0x00000003 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_TXCLK_SEL 4:3 /* RWEVF */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_TXCLK_SEL_PLL_CLK 0x00000000 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_TXCLK_SEL_ALT_CLK 0x00000001 /* RWE-V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_TXCLK_SEL_OFF 0x00000003 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_STS 17:16 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_STS_L0 0x00000000 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_STS_L3 0x00000001 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_STS_TX 0x00000002 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_STS_OFF 0x00000003 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_TXCLK_STS 20:19 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_TXCLK_STS_PLL_CLK 0x00000000 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_TXCLK_STS_ALT_CLK 0x00000001 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_TXCLK_STS_OFF 0x00000003 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_PLL_PWR 24:24 /* RWEVF */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_PLL_PWR_OFF 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_PLL_PWR_ON 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_PLL_PWR_STS 25:25 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_PLL_PWR_STS_OFF 0x00000000 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_PLL_PWR_STS_ON 0x00000001 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_HW_DISABLE 31:31 /* RWEVF */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_HW_DISABLE_OFF 0x00000000 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_CLK_CTRL_RXCLK_HW_DISABLE_ON 0x00000001 /* RWE-V */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_TYPE 0x00000104 /* RW-4R */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_TYPE_TYPE 7:0 /* RWIVF */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_TYPE_TYPE_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_TYPE_TYPE_NV2P1TUR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_TYPE_TYPE_NV3P0AMP 0x00000002 /* RW--V */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_TYPE_TYPE_NV3P0LRK 0x00000003 /* RW--V */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_TYPE_TYPE_BB3P0P9P 0x00000004 /* RW--V */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_TYPE_TYPE_BB3P0P10 0x00000005 /* RW--V */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_LINK_INFO 0x00000110 /* RW-4R */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_LINK_INFO_LINK_NUMBER 7:0 /* RWIVF */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_LINK_INFO_LINK_NUMBER_INIT 0x000000ff /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0 0x00000280 /* RW-4R */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_SLEEPWHILEACTIVELINK 0:0 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_SLEEPWHILEACTIVELINK_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_SLEEPWHILEACTIVELINK_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_ILLEGALLINKSTATEREQUEST 1:1 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_ILLEGALLINKSTATEREQUEST_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_ILLEGALLINKSTATEREQUEST_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_FAILEDMINIONREQUEST 2:2 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_FAILEDMINIONREQUEST_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_FAILEDMINIONREQUEST_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_RESERVEDREQUESTVALUE 3:3 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_RESERVEDREQUESTVALUE_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_RESERVEDREQUESTVALUE_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_LINKSTATEWRITEWHILEBUSY 4:4 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_LINKSTATEWRITEWHILEBUSY_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_LINKSTATEWRITEWHILEBUSY_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_LINK_STATE_REQUEST_TIMEOUT 5:5 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_LINK_STATE_REQUEST_TIMEOUT_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_LINK_STATE_REQUEST_TIMEOUT_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR 6:6 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_RSTSEQ_PHYCTL_TIMEOUT 7:7 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_RSTSEQ_PHYCTL_TIMEOUT_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_RSTSEQ_PHYCTL_TIMEOUT_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_RSTSEQ_CLKCTL_TIMEOUT 8:8 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_RSTSEQ_CLKCTL_TIMEOUT_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_STATUS_0_RSTSEQ_CLKCTL_TIMEOUT_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0 0x00000288 /* RW-4R */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_SLEEPWHILEACTIVELINK 0:0 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_SLEEPWHILEACTIVELINK__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_SLEEPWHILEACTIVELINK_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_SLEEPWHILEACTIVELINK_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_ILLEGALLINKSTATEREQUEST 1:1 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_ILLEGALLINKSTATEREQUEST_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_ILLEGALLINKSTATEREQUEST_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_FAILEDMINIONREQUEST 2:2 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_FAILEDMINIONREQUEST_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_FAILEDMINIONREQUEST_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RESERVEDREQUESTVALUE 3:3 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RESERVEDREQUESTVALUE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RESERVEDREQUESTVALUE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_LINKSTATEWRITEWHILEBUSY 4:4 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_LINKSTATEWRITEWHILEBUSY_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_LINKSTATEWRITEWHILEBUSY_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_LINK_STATE_REQUEST_TIMEOUT 5:5 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_LINK_STATE_REQUEST_TIMEOUT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_LINK_STATE_REQUEST_TIMEOUT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR 6:6 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RSTSEQ_PHYCTL_TIMEOUT 7:7 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RSTSEQ_PHYCTL_TIMEOUT__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RSTSEQ_PHYCTL_TIMEOUT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RSTSEQ_PHYCTL_TIMEOUT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RSTSEQ_CLKCTL_TIMEOUT 8:8 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RSTSEQ_CLKCTL_TIMEOUT__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RSTSEQ_CLKCTL_TIMEOUT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_FATAL_REPORT_EN_0_RSTSEQ_CLKCTL_TIMEOUT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0 0x0000028c /* RW-4R */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_SLEEPWHILEACTIVELINK 0:0 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_SLEEPWHILEACTIVELINK_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_SLEEPWHILEACTIVELINK_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_ILLEGALLINKSTATEREQUEST 1:1 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_ILLEGALLINKSTATEREQUEST__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_ILLEGALLINKSTATEREQUEST_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_ILLEGALLINKSTATEREQUEST_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_FAILEDMINIONREQUEST 2:2 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_FAILEDMINIONREQUEST__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_FAILEDMINIONREQUEST_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_FAILEDMINIONREQUEST_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_RESERVEDREQUESTVALUE 3:3 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_RESERVEDREQUESTVALUE__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_RESERVEDREQUESTVALUE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_RESERVEDREQUESTVALUE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_LINKSTATEWRITEWHILEBUSY 4:4 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_LINKSTATEWRITEWHILEBUSY__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_LINKSTATEWRITEWHILEBUSY_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_LINKSTATEWRITEWHILEBUSY_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_LINK_STATE_REQUEST_TIMEOUT 5:5 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_LINK_STATE_REQUEST_TIMEOUT__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_LINK_STATE_REQUEST_TIMEOUT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_LINK_STATE_REQUEST_TIMEOUT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR 6:6 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR__PROD 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_RSTSEQ_PHYCTL_TIMEOUT 7:7 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_RSTSEQ_PHYCTL_TIMEOUT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_RSTSEQ_PHYCTL_TIMEOUT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_RSTSEQ_CLKCTL_TIMEOUT 8:8 /* RWEVF */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_RSTSEQ_CLKCTL_TIMEOUT_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_ERR_NON_FATAL_REPORT_EN_0_RSTSEQ_CLKCTL_TIMEOUT_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0 0x00000298 /* RW-4R */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_SLEEPWHILEACTIVELINK 0:0 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_SLEEPWHILEACTIVELINK_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_SLEEPWHILEACTIVELINK_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_ILLEGALLINKSTATEREQUEST 1:1 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_ILLEGALLINKSTATEREQUEST_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_ILLEGALLINKSTATEREQUEST_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_FAILEDMINIONREQUEST 2:2 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_FAILEDMINIONREQUEST_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_FAILEDMINIONREQUEST_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_RESERVEDREQUESTVALUE 3:3 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_RESERVEDREQUESTVALUE_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_RESERVEDREQUESTVALUE_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_LINKSTATEWRITEWHILEBUSY 4:4 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_LINKSTATEWRITEWHILEBUSY_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_LINKSTATEWRITEWHILEBUSY_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_LINK_STATE_REQUEST_TIMEOUT 5:5 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_LINK_STATE_REQUEST_TIMEOUT_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_LINK_STATE_REQUEST_TIMEOUT_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR 6:6 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_RSTSEQ_PHYCTL_TIMEOUT 7:7 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_RSTSEQ_PHYCTL_TIMEOUT_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_RSTSEQ_PHYCTL_TIMEOUT_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_RSTSEQ_CLKCTL_TIMEOUT 8:8 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_RSTSEQ_CLKCTL_TIMEOUT_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_REPORT_INJECT_0_RSTSEQ_CLKCTL_TIMEOUT_INSERT 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0 0x0000029c /* RW-4R */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_SLEEPWHILEACTIVELINK 0:0 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_SLEEPWHILEACTIVELINK_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_SLEEPWHILEACTIVELINK_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_ILLEGALLINKSTATEREQUEST 1:1 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_ILLEGALLINKSTATEREQUEST_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_ILLEGALLINKSTATEREQUEST_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_FAILEDMINIONREQUEST 2:2 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_FAILEDMINIONREQUEST_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_FAILEDMINIONREQUEST_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_RESERVEDREQUESTVALUE 3:3 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_RESERVEDREQUESTVALUE_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_RESERVEDREQUESTVALUE_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_LINKSTATEWRITEWHILEBUSY 4:4 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_LINKSTATEWRITEWHILEBUSY_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_LINKSTATEWRITEWHILEBUSY_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_LINK_STATE_REQUEST_TIMEOUT 5:5 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_LINK_STATE_REQUEST_TIMEOUT_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_LINK_STATE_REQUEST_TIMEOUT_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR 6:6 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_WRITE_TO_LOCKED_SYSTEM_REG_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_RSTSEQ_PHYCTL_TIMEOUT 7:7 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_RSTSEQ_PHYCTL_TIMEOUT_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_RSTSEQ_PHYCTL_TIMEOUT_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_RSTSEQ_CLKCTL_TIMEOUT 8:8 /* RWIVF */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_RSTSEQ_CLKCTL_TIMEOUT_NONE 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_ERR_FIRST_0_RSTSEQ_CLKCTL_TIMEOUT_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_INTR_CONTROL_LINK 0x00000300 /* RW-4R */ +#define NV_NVLIPT_LNK_INTR_CONTROL_LINK_INT0_EN 0:0 /* RWEVF */ +#define NV_NVLIPT_LNK_INTR_CONTROL_LINK_INT0_EN_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_INTR_CONTROL_LINK_INT0_EN_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_INTR_CONTROL_LINK_INT1_EN 1:1 /* RWEVF */ +#define NV_NVLIPT_LNK_INTR_CONTROL_LINK_INT1_EN_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_INTR_CONTROL_LINK_INT1_EN_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET 0x00000380 /* RW-4R */ +#define NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET_LINK_RESET 0:0 /* RWEVF */ +#define NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET_LINK_RESET_DEASSERT 0x00000000 /* RW--V */ +#define NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET_LINK_RESET_ASSERT 0x00000001 /* RWE-V */ +#define NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET_LINK_RESET_STATUS 1:1 /* R-EVF */ +#define NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET_LINK_RESET_STATUS_DEASSERTED 0x00000000 /* R---V */ +#define NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET_LINK_RESET_STATUS_ASSERTED 0x00000001 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST 0x00000480 /* RW-4R */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST 3:0 /* RWEVF */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_NOP 0x00000000 /* RWE-V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_ACTIVE 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_L2 0x00000002 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_EMPTY 0x00000008 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET 0x00000009 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS 15:8 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_INIT 0x00000000 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_REQUEST_SUCCESSFUL 0x00000001 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_ILLEGAL_STATE_REQUEST 0x00000002 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_RESET_SEQ_TIMEOUT 0x00000003 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_MINION_REQUEST_NOT_ENABLED 0x00000004 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_REQUEST_TIMEOUT 0x00000005 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_STATUS_MINION_REQUEST_FAIL 0x00000080 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_ERR 30:30 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_ERR_NOERR 0x00000000 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_ERR_ERR 0x00000001 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_READY 31:31 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_READY_INIT 0x00000001 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS 0x00000484 /* R--4R */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE 3:0 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_ACTIVE 0x00000001 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_L2 0x00000002 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_EMPTY 0x00000008 /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_RESET 0x00000009 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_CONTAIN 0x0000000e /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_DISABLE 0x0000000f /* R---V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_TXTLBUFFEREMPTY 8:8 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_TXTLBUFFEREMPTY_INIT 0x00000000 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_TXREPLAYBUFFEREMPTY 9:9 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_TXREPLAYBUFFEREMPTY_INIT 0x00000000 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_RXTLBUFFEREMPTY 11:11 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_RXTLBUFFEREMPTY_INIT 0x00000000 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_RMTTXBUFFEREMPTY 13:13 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_RMTTXBUFFEREMPTY_INIT 0x00000000 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_RMTRXBUFFEREMPTY 14:14 /* R-EVF */ +#define NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_RMTRXBUFFEREMPTY_INIT 0x00000000 /* R-E-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL 0x00000600 /* RW-4R */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_LINK_DISABLE 0:0 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_LINK_DISABLE_ENABLED 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_LINK_DISABLE_DISABLED 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_1 1:1 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_1_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_2 2:2 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_2_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_3 3:3 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_3_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_4 4:4 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_4_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_5 5:5 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_5_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_6 6:6 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_6_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_7 7:7 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_7_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_8 8:8 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_8_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_9 9:9 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_9_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_10 10:10 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_10_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_11 11:11 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_11_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_12 12:12 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_12_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_13 13:13 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_13_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_14 14:14 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_14_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_15 15:15 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_MODE_CTRL_RESERVED_15_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL 0x0000060c /* RW-4R */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_REFERENCE_CLOCK_MODE 1:0 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_REFERENCE_CLOCK_MODE_COMMON 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_REFERENCE_CLOCK_MODE_RESERVED 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_REFERENCE_CLOCK_MODE_NON_COMMON_NO_SS 0x00000002 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_REFERENCE_CLOCK_MODE_NON_COMMON_SS 0x00000003 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_REFERENCE_CLOCK_FREQUENCY 5:4 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_REFERENCE_CLOCK_FREQUENCY_156_25_MHZ 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_REFERENCE_CLOCK_FREQUENCY_150_00_MHZ 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE 15:8 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_50_00000_GBPS 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_16_00000_GBPS 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_20_00000_GBPS 0x00000002 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_25_00000_GBPS 0x00000003 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_25_78125_GBPS 0x00000004 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_32_00000_GBPS 0x00000005 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_40_00000_GBPS 0x00000006 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_53_12500_GBPS 0x00000007 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_ILLEGAL_LINE_RATE 0x000000ff /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL 0x00000618 /* RW-4R */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_AC_DC_MODE 0:0 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_AC_DC_MODE_AC 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_AC_DC_MODE_DC 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_LINE_CODE_MODE 2:1 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_LINE_CODE_MODE_NRZ 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_LINE_CODE_MODE_NRZ_128B130 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_LINE_CODE_MODE_PAM4 0x00000003 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_RECEIVER_DETECT_ENABLE 3:3 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_RECEIVER_DETECT_ENABLE_ENABLE 0x00000001 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_RECEIVER_DETECT_ENABLE_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_BLOCK_CODE_MODE 7:6 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_BLOCK_CODE_MODE_OFF 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_BLOCK_CODE_MODE_ECC96_ENABLED 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_BLOCK_CODE_MODE_ECC88_ENABLED 0x00000002 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_FOM_FORMAT 10:8 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_FOM_FORMAT_INIT 0x00000005 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_FOM_FORMAT_FOMA 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_FOM_FORMAT_FOMB 0x00000002 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_FOM_FORMAT_FOMC 0x00000004 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_OPTIMIZATION_ALGORITHM 18:11 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_OPTIMIZATION_ALGORITHM_INIT 0x00000017 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_OPTIMIZATION_ALGORITHM_A0 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_OPTIMIZATION_ALGORITHM_A1 0x00000002 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_OPTIMIZATION_ALGORITHM_A2 0x00000004 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_OPTIMIZATION_ALGORITHM_A3 0x00000008 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_OPTIMIZATION_ALGORITHM_A4 0x00000010 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_OPTIMIZATION_ALGORITHM_A5 0x00000020 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_OPTIMIZATION_ALGORITHM_A6 0x00000040 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_OPTIMIZATION_ALGORITHM_A7 0x00000080 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_ADJUSTMENT_ALGORITHM 23:19 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_ADJUSTMENT_ALGORITHM_INIT 0x00000003 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_ADJUSTMENT_ALGORITHM_B0 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_ADJUSTMENT_ALGORITHM_B1 0x00000002 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_ADJUSTMENT_ALGORITHM_B2 0x00000004 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_ADJUSTMENT_ALGORITHM_B3 0x00000008 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_MINIMUM_TRAIN_TIME_MANTISSA 27:24 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_MINIMUM_TRAIN_TIME_MANTISSA_INIT 0x00000002 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_MINIMUM_TRAIN_TIME_EXPONENT 31:28 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL_TXTRAIN_MINIMUM_TRAIN_TIME_EXPONENT_INIT 0x00000003 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2 0x00000624 /* RW-4R */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESTORE_PHY_TRAINING_PARAMS 0:0 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESTORE_PHY_TRAINING_PARAMS_ENABLE 0x00000001 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESTORE_PHY_TRAINING_PARAMS_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_1 1:1 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_1_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_2 2:2 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_2_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_3 3:3 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_3_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_4 4:4 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_4_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_5 5:5 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_5_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_6 6:6 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_6_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_7 7:7 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_7_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_8 8:8 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_8_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_9 9:9 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_9_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_10 10:10 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_10_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_11 11:11 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_11_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_12 12:12 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_12_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_13 13:13 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_13_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_14 14:14 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_14_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_15 15:15 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_15_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_16 16:16 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_16_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_17 17:17 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_17_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_18 18:18 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_18_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_19 19:19 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_19_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_20 20:20 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_20_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_21 21:21 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_21_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_22 22:22 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_22_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_23 23:23 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_23_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_24 24:24 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_24_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_25 25:25 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_25_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_26 26:26 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_26_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_27 27:27 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_27_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_28 28:28 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_28_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_29 29:29 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_29_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_30 30:30 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_30_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_31 31:31 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CHANNEL_CTRL2_RESERVED_31_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL 0x00000638 /* RW-4R */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL_PWRM_SL_ENABLE 0:0 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL_PWRM_SL_ENABLE_ENABLE 0x00000001 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL_PWRM_SL_ENABLE_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL_RESERVED_1 1:1 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL_RESERVED_1_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL_PWRM_L2_ENABLE 2:2 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL_PWRM_L2_ENABLE_ENABLE 0x00000001 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL_PWRM_L2_ENABLE_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL_RESERVED_3 3:3 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_AN1_CTRL_RESERVED_3_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_SID_LO 0x00000108 /* RW-4R */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_SID_LO_SID_31_0 31:0 /* RWIVF */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_SID_LO_SID_31_0_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_SID_HI 0x0000010c /* RW-4R */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_SID_HI_SID_63_32 31:0 /* RWIVF */ +#define NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_SID_HI_SID_63_32_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL 0x0000064c /* RW-4R */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_LINK_DISABLE 0:0 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_LINK_DISABLE_ENABLED 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_LINK_DISABLE_DISABLED 0x00000001 /* RW--V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_1 1:1 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_1_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_2 2:2 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_2_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_3 3:3 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_3_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_4 4:4 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_4_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_5 5:5 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_5_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_6 6:6 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_6_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_7 7:7 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_7_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_8 8:8 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_8_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_9 9:9 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_9_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_10 10:10 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_10_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_11 11:11 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_11_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_12 12:12 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_12_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_13 13:13 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_13_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_14 14:14 /* RWIVF */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_14_INIT 0x00000000 /* RWI-V */ +#define NV_NVLIPT_LNK_CTRL_SW_LINK_MODE_CTRL_RESERVED_15 15:15 /* RWIVF */ +#endif // __lr10_dev_nvlipt_lnk_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvlperf_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlperf_ip.h new file mode 100644 index 000000000..5c7fda5c2 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlperf_ip.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvlperf_ip_h__ +#define __lr10_dev_nvlperf_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NVLPERF_CTRL_CLOCK_GATING 0x000000c8 /* RW-4R */ +#define NV_NVLPERF_CTRL_CLOCK_GATING_CG1_SLCG 11:0 /* RWEVF */ +#define NV_NVLPERF_CTRL_CLOCK_GATING_CG1_SLCG_INIT 0x00000fff /* RWE-V */ +#define NV_NVLPERF_CTRL_CLOCK_GATING_CG1_SLCG_ENABLED 0x00000000 /* RW--V */ +#define NV_NVLPERF_CTRL_CLOCK_GATING_CG1_SLCG_DISABLED 0x00000001 /* RW--V */ +#define NV_NVLPERF_CTRL_CLOCK_GATING_CG1_SLCG__PROD 0x00000000 /* RW--V */ +#define NV_NVLPERF_CTRL_CLOCK_GATING_CG1_SLCG_CTRL 12:12 /* RWEVF */ +#define NV_NVLPERF_CTRL_CLOCK_GATING_CG1_SLCG_CTRL_ENABLED 0x00000000 /* RW--V */ +#define NV_NVLPERF_CTRL_CLOCK_GATING_CG1_SLCG_CTRL_DISABLED 0x00000001 /* RWE-V */ +#define NV_NVLPERF_CTRL_CLOCK_GATING_CG1_SLCG_CTRL__PROD 0x00000000 /* RW--V */ +#define NV_NVLPERF_PERF_CTRL_CLOCK_GATING 0x000000cc /* RW-4R */ +#define NV_NVLPERF_PERF_CTRL_CLOCK_GATING_CG1_SLCG 11:0 /* RWEVF */ +#define NV_NVLPERF_PERF_CTRL_CLOCK_GATING_CG1_SLCG_INIT 0x00000fff /* RWE-V */ +#define NV_NVLPERF_PERF_CTRL_CLOCK_GATING_CG1_SLCG_ENABLED 0x00000000 /* RW--V */ +#define NV_NVLPERF_PERF_CTRL_CLOCK_GATING_CG1_SLCG_DISABLED 0x00000001 /* RW--V */ +#define NV_NVLPERF_PERF_CTRL_CLOCK_GATING_CG1_SLCG__PROD 0x00000000 /* RW--V */ +#define NV_NVLPERF_PERF_CTRL_CLOCK_GATING_CONTEXT_FREEZE 23:12 /* RWEVF */ +#define NV_NVLPERF_PERF_CTRL_CLOCK_GATING_CONTEXT_FREEZE_DISABLED 0x00000000 /* RWE-V */ +#define NV_NVLPERF_PERF_CTRL_CLOCK_GATING_CONTEXT_FREEZE_ENABLED 0x00000001 /* RW--V */ +#define NV_NVLPERF_PERF_CTRL_CLOCK_GATING_CONTEXT_FREEZE__PROD 0x00000000 /* RW--V */ +#endif // __lr10_dev_nvlperf_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvlphyctl_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlphyctl_ip.h new file mode 100644 index 000000000..899c5cc31 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlphyctl_ip.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvlphyctl_ip_h__ +#define __lr10_dev_nvlphyctl_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6 0x0000281c /* RW-4R */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6_RXCAL 0:0 /* RWEVF */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6_RXCAL_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6_RXCAL_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6_INIT_TRAIN 3:3 /* RWEVF */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6_INIT_TRAIN_NOT_COMPLETE 0x00000001 /* RWE-V */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6_INIT_TRAIN_COMPLETE 0x00000000 /* RW--V */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6_CDR_EN_DELAY 26:16 /* RWEVF */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6_CDR_EN_DELAY_ZERO 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6_CDR_EN_DELAY_SCL 31:27 /* RWEVF */ +#define NV_NVLPHYCTL_COMMON_CFG_CTL_6_CDR_EN_DELAY_SCL_ZERO 0x00000000 /* RWE-V */ + +#define NV_NVLPHYCTL_COMMON_CFG_STATUS_0 0x00002838 /* R--4R */ +#define NV_NVLPHYCTL_COMMON_CFG_STATUS_0_TX_IDDQ_DIS_STS 0:0 /* R--VF */ +#define NV_NVLPHYCTL_COMMON_CFG_STATUS_0_TX_SLEEP_DIS_STS 1:1 /* R--VF */ +#define NV_NVLPHYCTL_COMMON_CFG_STATUS_0_RXCAL_DONE 2:2 /* R--VF */ +#define NV_NVLPHYCTL_COMMON_CFG_STATUS_0_TX_DATA_READY_STS 3:3 /* R--VF */ +#define NV_NVLPHYCTL_COMMON_CFG_STATUS_0_TX_DATA_EN_STS 4:4 /* R--VF */ +#define NV_NVLPHYCTL_COMMON_CFG_STATUS_0_RX_IDDQ_DIS_STS 5:5 /* R--VF */ +#define NV_NVLPHYCTL_COMMON_CFG_STATUS_0_RX_SLEEP_DIS_STS 6:6 /* R--VF */ +#define NV_NVLPHYCTL_COMMON_CFG_STATUS_0_RX_DATA_EN_STS 7:7 /* R--VF */ + +#define NV_NVLPHYCTL_LANE_PAD_CTL_4(i) (0x0000284c+(i)*0x40) /* RW-4A */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4__SIZE_1 5 /* */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_CAL_EN 0:0 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_CAL_EN_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_CAL_EN_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_CAL_DONE 1:1 /* R--VF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_EQ_TRAIN_EN 4:4 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_EQ_TRAIN_EN_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_EQ_TRAIN_EN_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_EQ_TRAIN_DONE 5:5 /* R--VF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_EOM_DONE 9:9 /* R--VF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_CAL_OVRD 12:12 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_CAL_OVRD_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_CAL_OVRD_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_EQ_OVRD 13:13 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_EQ_OVRD_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_EQ_OVRD_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_PRECODE_INV_OVRD 14:14 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_PRECODE_INV_OVRD_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_PRECODE_INV_OVRD_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_PRECODE_INV_EN 15:15 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_PRECODE_INV_EN_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_PRECODE_INV_EN_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_4_RX_EOM_STATUS 31:16 /* R--VF */ + +#define NV_NVLPHYCTL_LANE_PAD_CTL_8(i) (0x00002864+(i)*0x40) /* RW-4A */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8__SIZE_1 5 /* */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EOM_OVRD 0:0 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EOM_OVRD_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EOM_OVRD_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EOM_EN 1:1 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EOM_EN_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EOM_EN_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EQ_TRAIN_MODE 27:27 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EQ_TRAIN_MODE_INIT 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EQ_RESET_OVRD 28:28 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EQ_RESET_OVRD_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EQ_RESET_OVRD_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_CDR_RESET_OVRD 29:29 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_CDR_RESET_OVRD_OFF 0x00000000 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_CDR_RESET_OVRD_ON 0x00000001 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EQ_RESET 30:30 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EQ_RESET_OFF 0x00000000 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_RX_EQ_RESET_ON 0x00000001 /* RWE-V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_CDR_RESET 31:31 /* RWEVF */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_CDR_RESET_OFF 0x00000000 /* RW--V */ +#define NV_NVLPHYCTL_LANE_PAD_CTL_8_CDR_RESET_ON 0x00000001 /* RWE-V */ +#endif // __lr10_dev_nvlphyctl_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvlsaw_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlsaw_ip.h new file mode 100644 index 000000000..c558df3a7 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlsaw_ip.h @@ -0,0 +1,590 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvlsaw_ip_h__ +#define __lr10_dev_nvlsaw_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NVLSAW_SW_SCRATCH_3 0x000004ec /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_3_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_3_VALUE_INIT 0x00000000 /* RWE-V */ +#define NV_NVLSAW_SW_SCRATCH_13 0x00000514 /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_13_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_13_VALUE_INIT 0x00000000 /* RWE-V */ +#define NV_NVLSAW_SW_SCRATCH_15 0x0000051c /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_15_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_15_VALUE_INIT 0x00000000 /* RWE-V */ +#define NV_NVLSAW_SW_SCRATCH_2 0x000004e8 /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_2_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_2_VALUE_INIT 0x00000000 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY 0x00000864 /* -W-4R */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY_PTIMER_0 20:20 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY_PTIMER_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY_PTIMER_1 21:21 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY_PTIMER_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY_PMGR_0 22:22 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY_PMGR_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY_PMGR_1 23:23 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY_PMGR_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY_SMBUS_MSGBOX 24:24 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_LEGACY_SMBUS_MSGBOX_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE 0x00000880 /* -W-4R */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_0 0:0 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_1 1:1 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_2 2:2 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_2_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_3 3:3 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_3_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_4 4:4 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_4_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_5 5:5 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_5_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_6 6:6 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_6_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_7 7:7 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_7_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_8 8:8 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NVLIPT_8_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_0 9:9 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_1 10:10 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_2 11:11 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_2_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_3 12:12 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_3_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_4 13:13 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_4_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_5 14:14 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_5_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_6 15:15 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_6_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_7 16:16 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_7_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_8 17:17 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_NPG_8_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_SAW_WRITE_LOCKED 25:25 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_SAW_WRITE_LOCKED_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_SOE_SHIM_FLUSH 26:26 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_SOE_SHIM_FLUSH_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_SOE_SHIM_ILLEGAL 27:27 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_SOE_SHIM_ILLEGAL_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_SMR_0 28:28 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_SMR_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_SMR_1 29:29 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_SMR_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_OVER_TEMP_ALERT 30:30 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_OVER_TEMP_ALERT_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_OVER_TEMP 31:31 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_CLR_CORRECTABLE_OVER_TEMP_ENABLE 0x00000001 /* -W--V */ + +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE 0x00000870 /* -W-4R */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_0 0:0 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_1 1:1 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_2 2:2 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_2_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_3 3:3 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_3_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_4 4:4 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_4_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_5 5:5 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_5_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_6 6:6 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_6_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_7 7:7 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_7_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_8 8:8 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NVLIPT_8_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_0 9:9 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_1 10:10 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_2 11:11 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_2_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_3 12:12 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_3_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_4 13:13 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_4_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_5 14:14 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_5_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_6 15:15 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_6_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_7 16:16 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_7_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_8 17:17 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_NPG_8_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_SAW_WRITE_LOCKED 25:25 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_SAW_WRITE_LOCKED_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_SOE_SHIM_FLUSH 26:26 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_SOE_SHIM_FLUSH_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_SOE_SHIM_ILLEGAL 27:27 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_SOE_SHIM_ILLEGAL_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_SMR_0 28:28 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_SMR_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_SMR_1 29:29 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_SMR_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_OVER_TEMP_ALERT 30:30 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_OVER_TEMP_ALERT_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_OVER_TEMP 31:31 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_CORRECTABLE_OVER_TEMP_ENABLE 0x00000001 /* -W--V */ + +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL 0x00000868 /* -W-4R */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_0 0:0 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_1 1:1 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_2 2:2 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_2_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_3 3:3 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_3_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_4 4:4 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_4_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_5 5:5 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_5_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_6 6:6 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_6_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_7 7:7 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_7_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_8 8:8 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NVLIPT_8_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_0 9:9 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_1 10:10 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_2 11:11 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_2_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_3 12:12 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_3_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_4 13:13 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_4_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_5 14:14 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_5_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_6 15:15 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_6_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_7 16:16 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_7_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_8 17:17 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NPG_8_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_SOE 18:18 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_SOE_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NXBAR_0 20:20 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NXBAR_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NXBAR_1 21:21 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NXBAR_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NXBAR_2 22:22 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NXBAR_2_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NXBAR_3 23:23 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_NXBAR_3_ENABLE 0x00000001 /* -W--V */ + +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL 0x0000086c /* -W-4R */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_0 0:0 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_1 1:1 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_2 2:2 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_2_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_3 3:3 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_3_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_4 4:4 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_4_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_5 5:5 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_5_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_6 6:6 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_6_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_7 7:7 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_7_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_8 8:8 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NVLIPT_8_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_0 9:9 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_0_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_1 10:10 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_1_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_2 11:11 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_2_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_3 12:12 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_3_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_4 13:13 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_4_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_5 14:14 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_5_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_6 15:15 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_6_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_7 16:16 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_7_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_8 17:17 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_NPG_8_ENABLE 0x00000001 /* -W--V */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_SOE 18:18 /* -WXVF */ +#define NV_NVLSAW_NVSPMC_INTR_EN_SET_NONFATAL_SOE_ENABLE 0x00000001 /* -W--V */ + +#define NV_NVLSAW_NVSPMC_INTR_LEGACY 0x00000840 /* R--4R */ +#define NV_NVLSAW_NVSPMC_INTR_LEGACY_PTIMER_0 20:20 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_LEGACY_PTIMER_1 21:21 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_LEGACY_PMGR_0 22:22 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_LEGACY_PMGR_1 23:23 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_LEGACY_SMBUS_MSGBOX 24:24 /* R--VF */ + +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL 0x00000848 /* R--4R */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NVLIPT_0 0:0 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NVLIPT_1 1:1 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NVLIPT_2 2:2 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NVLIPT_3 3:3 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NVLIPT_4 4:4 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NVLIPT_5 5:5 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NVLIPT_6 6:6 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NVLIPT_7 7:7 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NVLIPT_8 8:8 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NPG_0 9:9 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NPG_1 10:10 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NPG_2 11:11 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NPG_3 12:12 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NPG_4 13:13 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NPG_5 14:14 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NPG_6 15:15 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NPG_7 16:16 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_NPG_8 17:17 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_NONFATAL_SOE 18:18 /* R--VF */ + +#define NV_NVLSAW_NVSPMC_INTR_FATAL 0x00000844 /* R--4R */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NVLIPT_0 0:0 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NVLIPT_1 1:1 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NVLIPT_2 2:2 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NVLIPT_3 3:3 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NVLIPT_4 4:4 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NVLIPT_5 5:5 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NVLIPT_6 6:6 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NVLIPT_7 7:7 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NVLIPT_8 8:8 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NPG_0 9:9 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NPG_1 10:10 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NPG_2 11:11 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NPG_3 12:12 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NPG_4 13:13 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NPG_5 14:14 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NPG_6 15:15 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NPG_7 16:16 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NPG_8 17:17 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_SOE 18:18 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NXBAR_0 20:20 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NXBAR_1 21:21 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NXBAR_2 22:22 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_FATAL_NXBAR_3 23:23 /* R--VF */ + +#define NV_NVLSAW_NVSPMC_ENABLE 0x000008d0 /* RW-4R */ +#define NV_NVLSAW_NVSPMC_ENABLE_NXBAR 0:0 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NXBAR_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NXBAR_ENABLE 0x00000001 /* RWE-V */ + +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT 0x000008c8 /* RW-4R */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_0 0:0 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_0_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_0_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_1 1:1 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_1_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_1_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_2 2:2 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_2_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_2_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_3 3:3 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_3_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_3_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_4 4:4 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_4_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_4_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_5 5:5 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_5_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_5_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_6 6:6 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_6_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_6_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_7 7:7 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_7_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_7_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_8 8:8 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_8_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NVLIPT_NVLIPT_8_ENABLE 0x00000001 /* RWE-V */ + +#define NV_NVLSAW_NVSPMC_ENABLE_NPG 0x000008cc /* RW-4R */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_0 0:0 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_0_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_0_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_1 1:1 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_1_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_1_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_2 2:2 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_2_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_2_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_3 3:3 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_3_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_3_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_4 4:4 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_4_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_4_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_5 5:5 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_5_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_5_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_6 6:6 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_6_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_6_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_7 7:7 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_7_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_7_ENABLE 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_8 8:8 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_8_DISABLE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_ENABLE_NPG_NPG_8_ENABLE 0x00000001 /* RWE-V */ + +#define NV_NVLSAW_GLBLLATENCYTIMERCTRL 0x00000040 /* RW-4R */ +#define NV_NVLSAW_GLBLLATENCYTIMERCTRL_ENABLE 0:0 /* RWEVF */ +#define NV_NVLSAW_GLBLLATENCYTIMERCTRL_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLSAW_GLBLLATENCYTIMERCTRL_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLSAW_GLBLLATENCYTIMERCTRL_ENABLE__PROD 0x00000001 /* RW--V */ + +#define NV_NVLSAW_SW_SCRATCH_6 0x000004f8 /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_6_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_6_VALUE_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLSAW_SW_SCRATCH_7 0x000004fc /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_7_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_7_VALUE_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLSAW_SW_SCRATCH_8 0x00000500 /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_8_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_8_VALUE_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLSAW_SW_SCRATCH_9 0x00000504 /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_9_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_9_VALUE_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLSAW_SW_SCRATCH_10 0x00000508 /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_10_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_10_VALUE_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLSAW_SW_SCRATCH_11 0x0000050c /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_11_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_11_VALUE_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLSAW_SCRATCH_COLD 0x000007c4 /* RW-4R */ +#define NV_NVLSAW_SCRATCH_COLD_DATA 31:0 /* RWIVF */ +#define NV_NVLSAW_SCRATCH_COLD_DATA_INIT 0xdeadbaad /* RWI-V */ + +#define NV_NVLSAW_SW_SCRATCH_12 0x00000510 /* RW-4R */ +#define NV_NVLSAW_SW_SCRATCH_12_VALUE 31:0 /* RWEVF */ +#define NV_NVLSAW_SW_SCRATCH_12_VALUE_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY 0x000008d4 /* RW-4R */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PTIMER_0 20:20 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PTIMER_0_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PTIMER_0_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PTIMER_1 21:21 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PTIMER_1_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PTIMER_1_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PMGR_0 22:22 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PMGR_0_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PMGR_0_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PMGR_1 23:23 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PMGR_1_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_PMGR_1_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_SMBUS_MSGBOX 24:24 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_SMBUS_MSGBOX_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY_SMBUS_MSGBOX_SOE 0x00000000 /* RW--V */ + +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE 0x000008e0 /* RW-4R */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_0 0:0 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_0_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_0_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_1 1:1 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_1_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_1_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_2 2:2 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_2_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_2_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_3 3:3 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_3_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_3_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_4 4:4 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_4_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_4_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_5 5:5 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_5_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_5_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_6 6:6 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_6_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_6_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_7 7:7 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_7_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_7_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_8 8:8 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_8_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NVLIPT_8_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_0 9:9 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_0_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_0_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_1 10:10 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_1_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_1_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_2 11:11 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_2_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_2_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_3 12:12 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_3_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_3_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_4 13:13 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_4_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_4_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_5 14:14 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_5_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_5_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_6 15:15 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_6_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_6_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_7 16:16 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_7_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_7_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_8 17:17 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_8_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_NPG_8_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SAW_WRITE_LOCKED 25:25 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SAW_WRITE_LOCKED_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SAW_WRITE_LOCKED_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SOE_SHIM_FLUSH 26:26 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SOE_SHIM_FLUSH_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SOE_SHIM_FLUSH_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SOE_SHIM_ILLEGAL 27:27 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SOE_SHIM_ILLEGAL_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SOE_SHIM_ILLEGAL_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SMR_0 28:28 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SMR_0_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SMR_0_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SMR_1 29:29 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SMR_1_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_SMR_1_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_OVER_TEMP_ALERT 30:30 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_OVER_TEMP_ALERT_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_OVER_TEMP_ALERT_SOE 0x00000000 /* RW--V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_OVER_TEMP 31:31 /* RWEVF */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_OVER_TEMP_HOST 0x00000001 /* RWE-V */ +#define NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE_OVER_TEMP_SOE 0x00000000 /* RW--V */ + +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY 0x00000898 /* R--4R */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PTIMER_0 20:20 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PTIMER_0_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PTIMER_0_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PTIMER_1 21:21 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PTIMER_1_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PTIMER_1_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PMGR_0 22:22 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PMGR_0_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PMGR_0_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PMGR_1 23:23 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PMGR_1_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_PMGR_1_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_SMBUS_MSGBOX 24:24 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_SMBUS_MSGBOX_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY_SMBUS_MSGBOX_ENABLE 0x00000001 /* R---V */ + +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE 0x000008a4 /* R--4R */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_0 0:0 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_0_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_0_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_1 1:1 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_1_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_1_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_2 2:2 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_2_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_2_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_3 3:3 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_3_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_3_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_4 4:4 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_4_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_4_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_5 5:5 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_5_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_5_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_6 6:6 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_6_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_6_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_7 7:7 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_7_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_7_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_8 8:8 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_8_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NVLIPT_8_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_0 9:9 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_0_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_0_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_1 10:10 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_1_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_1_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_2 11:11 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_2_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_2_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_3 12:12 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_3_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_3_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_4 13:13 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_4_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_4_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_5 14:14 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_5_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_5_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_6 15:15 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_6_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_6_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_7 16:16 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_7_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_7_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_8 17:17 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_8_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_NPG_8_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SAW_WRITE_LOCKED 25:25 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SAW_WRITE_LOCKED_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SAW_WRITE_LOCKED_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SOE_SHIM_FLUSH 26:26 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SOE_SHIM_FLUSH_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SOE_SHIM_FLUSH_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SOE_SHIM_ILLEGAL 27:27 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SOE_SHIM_ILLEGAL_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SOE_SHIM_ILLEGAL_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SMR_0 28:28 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SMR_0_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SMR_0_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SMR_1 29:29 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SMR_1_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_SMR_1_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_OVER_TEMP_ALERT 30:30 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_OVER_TEMP_ALERT_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_OVER_TEMP_ALERT_ENABLE 0x00000001 /* R---V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_OVER_TEMP 31:31 /* R-EVF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_OVER_TEMP_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE_OVER_TEMP_ENABLE 0x00000001 /* R---V */ + +#define NV_NVLSAW_NVSPMC_INTR_SOE_LEGACY 0x00000884 /* R--4R */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_LEGACY_PTIMER_0 20:20 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_LEGACY_PTIMER_1 21:21 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_LEGACY_PMGR_0 22:22 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_LEGACY_PMGR_1 23:23 /* R--VF */ +#define NV_NVLSAW_NVSPMC_INTR_SOE_LEGACY_SMBUS_MSGBOX 24:24 /* R--VF */ +#endif // __lr10_dev_nvlsaw_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvlsaw_ip_addendum.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlsaw_ip_addendum.h new file mode 100644 index 000000000..c5c3e3442 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvlsaw_ip_addendum.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file dev_nvlsaw_ip_addendum.h + * @brief NVSwitch specific defines that are missing in the dev_nvlsaw_ip.h manual. + */ + + +#ifndef __lr10_dev_nvlsaw_ip_addendum_h__ +#define __lr10_dev_nvlsaw_ip_addendum_h__ + +#define NV_NVLSAW_SW_SCRATCH_0_ERASE_LEDGER_CARVEOUT_OFFSET 11:0 +#define NV_NVLSAW_SW_SCRATCH_0_ERASE_LEDGER_CARVEOUT_SIZE 23:12 +// VBIOS write protect mode for nvflash +#define NV_NVLSAW_SW_SCRATCH_0_WRITE_PROTECT_MODE 24:24 +#define NV_NVLSAW_SW_SCRATCH_0_WRITE_PROTECT_MODE_DISABLED 0x00000000 +#define NV_NVLSAW_SW_SCRATCH_0_WRITE_PROTECT_MODE_ENABLED 0x00000001 + +#define NV_NVLSAW_SW_SCRATCH_7_BIOS_OEM_VERSION 7:0 +#define NV_NVLSAW_SW_SCRATCH_7_RESERVED 31:8 + +#define NV_NVLSAW_SW_SCRATCH_2_INFOROM_WRITE_PROTECT_MODE 0:0 +#define NV_NVLSAW_SW_SCRATCH_2_INFOROM_WRITE_PROTECT_MODE_DISABLED 0x00000000 +#define NV_NVLSAW_SW_SCRATCH_2_INFOROM_WRITE_PROTECT_MODE_ENABLED 0x00000001 +#define NV_NVLSAW_SW_SCRATCH_2_INFOROM_CARVEOUT_OFFSET 19:8 +#define NV_NVLSAW_SW_SCRATCH_2_INFOROM_CARVEOUT_SIZE 31:20 + +#define NV_NVLSAW_SCRATCH_COLD_OOB_BLACKLIST_DEVICE_REQUESTED 0:0 +#define NV_NVLSAW_SCRATCH_COLD_OOB_BLACKLIST_DEVICE_REQUESTED_ENABLE 0x00000000 +#define NV_NVLSAW_SCRATCH_COLD_OOB_BLACKLIST_DEVICE_REQUESTED_DISABLE 0x00000001 + +// SCRATCH_12 is used to communicate fabric state to SOE. Bit fields: +#define NV_NVLSAW_SW_SCRATCH_12_DEVICE_RESET_REQUIRED 0:0 +#define NV_NVLSAW_SW_SCRATCH_12_DEVICE_BLACKLIST_REASON 5:1 +#define NV_NVLSAW_SW_SCRATCH_12_DEVICE_FABRIC_STATE 8:6 +#define NV_NVLSAW_SW_SCRATCH_12_DRIVER_FABRIC_STATE 11:9 +#define NV_NVLSAW_SW_SCRATCH_12_FABRIC_MANAGER_ERROR 18:12 +#define NV_NVLSAW_SW_SCRATCH_12_EVENT_MESSAGE_COUNT 26:19 + +#define NV_NVLSAW_SCRATCH_COLD_OOB_BLACKLIST_DEVICE_REQUESTED 0:0 + +#endif //__lr10_dev_nvlsaw_ip_addendum_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvltlc_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvltlc_ip.h new file mode 100644 index 000000000..2e4108440 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvltlc_ip.h @@ -0,0 +1,1169 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvltlc_ip_h__ +#define __lr10_dev_nvltlc_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_LO(i) (0x0000154c+(i)*0x54) /* RW-4A */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_LO__SIZE_1 4 /* */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_LO_COUNT 31:0 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_LO_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_HI(i) (0x00001550+(i)*0x54) /* RW-4A */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_HI__SIZE_1 4 /* */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_HI_COUNT 30:0 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_HI_COUNT_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_HI_ROLLOVER 31:31 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_HI_ROLLOVER_INIT 0x00000000 /* RWD-V */ + +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_LO(i) (0x00001d4c+(i)*0x54) /* RW-4A */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_LO__SIZE_1 4 /* */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_LO_COUNT 31:0 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_LO_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_HI(i) (0x00001d50+(i)*0x54) /* RW-4A */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_HI__SIZE_1 4 /* */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_HI_COUNT 30:0 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_HI_COUNT_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_HI_ROLLOVER 31:31 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_HI_ROLLOVER_INIT 0x00000000 /* RWD-V */ + +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0 0x00001a8c /* RW-4R */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXDLHDRPARITYERR 0:0 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXDLHDRPARITYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXDLHDRPARITYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXDLDATAPARITYERR 1:1 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXDLDATAPARITYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXDLDATAPARITYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXDLCTRLPARITYERR 2:2 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXDLCTRLPARITYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXDLCTRLPARITYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXINVALIDAEERR 3:3 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXINVALIDAEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXINVALIDAEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXINVALIDBEERR 4:4 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXINVALIDBEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXINVALIDBEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXINVALIDADDRALIGNERR 5:5 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXINVALIDADDRALIGNERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXINVALIDADDRALIGNERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXPKTLENERR 6:6 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXPKTLENERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXPKTLENERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVCMDENCERR 7:7 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVCMDENCERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVCMDENCERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVDATLENENCERR 8:8 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVDATLENENCERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVDATLENENCERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVPKTSTATUSERR 9:9 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVPKTSTATUSERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVPKTSTATUSERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVCACHEATTRPROBEREQERR 10:10 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVCACHEATTRPROBEREQERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVCACHEATTRPROBEREQERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVCACHEATTRPROBERSPERR 11:11 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVCACHEATTRPROBERSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSVCACHEATTRPROBERSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_DATLENGTRMWREQMAXERR 12:12 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_DATLENGTRMWREQMAXERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_DATLENGTRMWREQMAXERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_DATLENLTATRRSPMINERR 13:13 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_DATLENLTATRRSPMINERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_DATLENLTATRRSPMINERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_INVALIDCACHEATTRPOERR 14:14 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_INVALIDCACHEATTRPOERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_INVALIDCACHEATTRPOERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_INVALIDCRERR 15:15 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_INVALIDCRERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_INVALIDCRERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXRSPSTATUS_HW_ERR 16:16 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXRSPSTATUS_HW_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXRSPSTATUS_HW_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXRSPSTATUS_UR_ERR 17:17 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXRSPSTATUS_UR_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXRSPSTATUS_UR_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXRSPSTATUS_PRIV_ERR 18:18 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXRSPSTATUS_PRIV_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_RXRSPSTATUS_PRIV_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_INVALID_COLLAPSED_RESPONSE_ERR 19:19 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_INVALID_COLLAPSED_RESPONSE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_0_INVALID_COLLAPSED_RESPONSE_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0 0x0000128c /* RW-4R */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_TXDLCREDITPARITYERR 17:17 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_TXDLCREDITPARITYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_TXDLCREDITPARITYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_CREQ_RAM_HDR_ECC_DBE_ERR 18:18 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_CREQ_RAM_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_CREQ_RAM_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_CREQ_RAM_DAT_ECC_DBE_ERR 19:19 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_CREQ_RAM_DAT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_CREQ_RAM_DAT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_CREQ_RAM_ECC_LIMIT_ERR 20:20 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_CREQ_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_CREQ_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP_RAM_HDR_ECC_DBE_ERR 21:21 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP_RAM_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP_RAM_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP_RAM_DAT_ECC_DBE_ERR 22:22 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP_RAM_DAT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP_RAM_DAT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP_RAM_ECC_LIMIT_ERR 23:23 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_COM_RAM_HDR_ECC_DBE_ERR 24:24 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_COM_RAM_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_COM_RAM_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_COM_RAM_DAT_ECC_DBE_ERR 25:25 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_COM_RAM_DAT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_COM_RAM_DAT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_COM_RAM_ECC_LIMIT_ERR 26:26 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_COM_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_COM_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP1_RAM_HDR_ECC_DBE_ERR 27:27 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP1_RAM_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP1_RAM_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP1_RAM_DAT_ECC_DBE_ERR 28:28 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP1_RAM_DAT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP1_RAM_DAT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP1_RAM_ECC_LIMIT_ERR 29:29 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP1_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_0_RSP1_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1 0x00001aac /* RW-4R */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_RXHDROVFERR 7:0 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_RXHDROVFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_RXHDROVFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_RXDATAOVFERR 15:8 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_RXDATAOVFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_RXDATAOVFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_STOMPDETERR 16:16 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_STOMPDETERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_STOMPDETERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_RXPOISONERR 17:17 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_RXPOISONERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_RXPOISONERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_HEARTBEAT_TIMEOUT_ERR 18:18 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_HEARTBEAT_TIMEOUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_HEARTBEAT_TIMEOUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1 0x000012a0 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC0 0:0 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC0_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC0_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC1 1:1 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC1_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC1_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC2 2:2 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC2_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC2_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC3 3:3 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC3_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC3_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC4 4:4 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC4_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC4_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC5 5:5 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC5_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC5_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC6 6:6 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC6_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC6_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC7 7:7 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC7_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_1_AN1_TIMEOUT_VC7_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1 0x000012ac /* RW-4R */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC0 0:0 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC0_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC0_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC1 1:1 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC1_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC1_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC2 2:2 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC2_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC2_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC3 3:3 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC3_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC3_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC4 4:4 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC4_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC4_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC5 5:5 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC5_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC5_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC6 6:6 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC6_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC6_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC7 7:7 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC7_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_NON_FATAL_REPORT_EN_1_AN1_TIMEOUT_VC7_ENABLE 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1 0x000012bc /* RW-4R */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC0 0:0 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC0_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC0_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC1 1:1 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC1_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC1_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC2 2:2 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC2_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC2_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC3 3:3 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC3_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC3_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC4 4:4 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC4_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC4_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC5 5:5 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC5_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC5_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC6 6:6 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC6_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC6_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC7 7:7 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC7_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_1_AN1_TIMEOUT_VC7_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1 0x000012b8 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC0 0:0 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC0_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC0_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC1 1:1 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC1_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC1_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC2 2:2 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC2_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC2_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC3 3:3 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC3_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC3_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC4 4:4 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC4_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC4_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC5 5:5 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC5_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC5_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC6 6:6 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC6_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC6_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC7 7:7 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC7_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_1_AN1_TIMEOUT_VC7_INSERT 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0 0x00000288 /* RW-4R */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR 0:0 /* RWEVF */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_DAT_ECC_DBE_ERR 9:9 /* RWEVF */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_DAT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_DAT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_ECC_LIMIT_ERR 10:10 /* RWEVF */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXPOISONDET 23:23 /* RWEVF */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXPOISONDET_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXPOISONDET_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXRSPSTATUS_HW_ERR 24:24 /* RWEVF */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXRSPSTATUS_HW_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXRSPSTATUS_HW_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXRSPSTATUS_UR_ERR 25:25 /* RWEVF */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXRSPSTATUS_UR_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXRSPSTATUS_UR_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXRSPSTATUS_PRIV_ERR 26:26 /* RWEVF */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXRSPSTATUS_PRIV_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_SYS_ERR_FATAL_REPORT_EN_0_TXRSPSTATUS_PRIV_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0 0x0000029c /* RW-4R */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_PARITY_ERR 0:0 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_PARITY_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_PARITY_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_HDR_ECC_DBE_ERR 8:8 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_DAT_ECC_DBE_ERR 9:9 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_DAT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_DAT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_ECC_LIMIT_ERR 10:10 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_NCISOC_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXPOISONDET 23:23 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXPOISONDET_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXPOISONDET_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXRSPSTATUS_HW_ERR 24:24 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXRSPSTATUS_HW_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXRSPSTATUS_HW_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXRSPSTATUS_UR_ERR 25:25 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXRSPSTATUS_UR_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXRSPSTATUS_UR_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXRSPSTATUS_PRIV_ERR 26:26 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXRSPSTATUS_PRIV_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_FIRST_0_TXRSPSTATUS_PRIV_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0 0x00000298 /* RW-4R */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_PARITY_ERR 0:0 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_PARITY_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_PARITY_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_HDR_ECC_DBE_ERR 8:8 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_HDR_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_HDR_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_DAT_ECC_DBE_ERR 9:9 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_DAT_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_DAT_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_ECC_LIMIT_ERR 10:10 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_ECC_LIMIT_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_NCISOC_ECC_LIMIT_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXPOISONDET 23:23 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXPOISONDET_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXPOISONDET_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXRSPSTATUS_HW_ERR 24:24 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXRSPSTATUS_HW_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXRSPSTATUS_HW_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXRSPSTATUS_UR_ERR 25:25 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXRSPSTATUS_UR_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXRSPSTATUS_UR_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXRSPSTATUS_PRIV_ERR 26:26 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXRSPSTATUS_PRIV_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_REPORT_INJECT_0_TXRSPSTATUS_PRIV_ERR_INSERT 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0 0x00000280 /* RW-4R */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_PARITY_ERR 0:0 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_PARITY_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_PARITY_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_HDR_ECC_DBE_ERR 8:8 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_DAT_ECC_DBE_ERR 9:9 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_DAT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_DAT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_ECC_LIMIT_ERR 10:10 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_NCISOC_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXPOISONDET 23:23 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXPOISONDET_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXPOISONDET_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXRSPSTATUS_HW_ERR 24:24 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXRSPSTATUS_HW_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXRSPSTATUS_HW_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXRSPSTATUS_UR_ERR 25:25 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXRSPSTATUS_UR_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXRSPSTATUS_UR_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXRSPSTATUS_PRIV_ERR 26:26 /* RWDVF */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXRSPSTATUS_PRIV_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_SYS_ERR_STATUS_0_TXRSPSTATUS_PRIV_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0 0x00000a80 /* RW-4R */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_NCISOC_PARITY_ERR 0:0 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_NCISOC_PARITY_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_NCISOC_PARITY_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_HDR_RAM_ECC_DBE_ERR 1:1 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_HDR_RAM_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_HDR_RAM_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_HDR_RAM_ECC_LIMIT_ERR 2:2 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_HDR_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_HDR_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT0_RAM_ECC_DBE_ERR 3:3 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT0_RAM_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT0_RAM_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT0_RAM_ECC_LIMIT_ERR 4:4 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT0_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT0_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT1_RAM_ECC_DBE_ERR 5:5 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT1_RAM_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT1_RAM_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT1_RAM_ECC_LIMIT_ERR 6:6 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT1_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_STATUS_0_DAT1_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0 0x00000a88 /* RW-4R */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR 0:0 /* RWEVF */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_NCISOC_PARITY_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_HDR_RAM_ECC_DBE_ERR 1:1 /* RWEVF */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_HDR_RAM_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_HDR_RAM_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_HDR_RAM_ECC_LIMIT_ERR 2:2 /* RWEVF */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_HDR_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_HDR_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT0_RAM_ECC_DBE_ERR 3:3 /* RWEVF */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT0_RAM_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT0_RAM_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT0_RAM_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT0_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT0_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT1_RAM_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT1_RAM_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT1_RAM_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT1_RAM_ECC_LIMIT_ERR 6:6 /* RWEVF */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT1_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_SYS_ERR_FATAL_REPORT_EN_0_DAT1_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0 0x00000a9c /* RW-4R */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_NCISOC_PARITY_ERR 0:0 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_NCISOC_PARITY_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_NCISOC_PARITY_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_HDR_RAM_ECC_DBE_ERR 1:1 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_HDR_RAM_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_HDR_RAM_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_HDR_RAM_ECC_LIMIT_ERR 2:2 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_HDR_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_HDR_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT0_RAM_ECC_DBE_ERR 3:3 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT0_RAM_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT0_RAM_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT0_RAM_ECC_LIMIT_ERR 4:4 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT0_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT0_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT1_RAM_ECC_DBE_ERR 5:5 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT1_RAM_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT1_RAM_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT1_RAM_ECC_LIMIT_ERR 6:6 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT1_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_FIRST_0_DAT1_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0 0x00000a98 /* RW-4R */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_NCISOC_PARITY_ERR 0:0 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_NCISOC_PARITY_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_NCISOC_PARITY_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_HDR_RAM_ECC_DBE_ERR 1:1 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_HDR_RAM_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_HDR_RAM_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_HDR_RAM_ECC_LIMIT_ERR 2:2 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_HDR_RAM_ECC_LIMIT_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_HDR_RAM_ECC_LIMIT_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT0_RAM_ECC_DBE_ERR 3:3 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT0_RAM_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT0_RAM_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT0_RAM_ECC_LIMIT_ERR 4:4 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT0_RAM_ECC_LIMIT_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT0_RAM_ECC_LIMIT_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT1_RAM_ECC_DBE_ERR 5:5 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT1_RAM_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT1_RAM_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT1_RAM_ECC_LIMIT_ERR 6:6 /* RWDVF */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT1_RAM_ECC_LIMIT_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_SYS_ERR_REPORT_INJECT_0_DAT1_RAM_ECC_LIMIT_ERR_INSERT 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0 0x00001280 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_TXDLCREDITPARITYERR 17:17 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_TXDLCREDITPARITYERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_TXDLCREDITPARITYERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_CREQ_RAM_HDR_ECC_DBE_ERR 18:18 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_CREQ_RAM_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_CREQ_RAM_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_CREQ_RAM_DAT_ECC_DBE_ERR 19:19 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_CREQ_RAM_DAT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_CREQ_RAM_DAT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_CREQ_RAM_ECC_LIMIT_ERR 20:20 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_CREQ_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_CREQ_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP_RAM_HDR_ECC_DBE_ERR 21:21 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP_RAM_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP_RAM_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP_RAM_DAT_ECC_DBE_ERR 22:22 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP_RAM_DAT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP_RAM_DAT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP_RAM_ECC_LIMIT_ERR 23:23 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_COM_RAM_HDR_ECC_DBE_ERR 24:24 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_COM_RAM_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_COM_RAM_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_COM_RAM_DAT_ECC_DBE_ERR 25:25 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_COM_RAM_DAT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_COM_RAM_DAT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_COM_RAM_ECC_LIMIT_ERR 26:26 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_COM_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_COM_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP1_RAM_HDR_ECC_DBE_ERR 27:27 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP1_RAM_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP1_RAM_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP1_RAM_DAT_ECC_DBE_ERR 28:28 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP1_RAM_DAT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP1_RAM_DAT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP1_RAM_ECC_LIMIT_ERR 29:29 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP1_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_STATUS_0_RSP1_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0 0x00001288 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_TXDLCREDITPARITYERR 17:17 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_TXDLCREDITPARITYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_TXDLCREDITPARITYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_CREQ_RAM_HDR_ECC_DBE_ERR 18:18 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_CREQ_RAM_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_CREQ_RAM_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_CREQ_RAM_DAT_ECC_DBE_ERR 19:19 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_CREQ_RAM_DAT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_CREQ_RAM_DAT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_CREQ_RAM_ECC_LIMIT_ERR 20:20 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_CREQ_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_CREQ_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP_RAM_HDR_ECC_DBE_ERR 21:21 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP_RAM_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP_RAM_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP_RAM_DAT_ECC_DBE_ERR 22:22 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP_RAM_DAT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP_RAM_DAT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP_RAM_ECC_LIMIT_ERR 23:23 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_COM_RAM_HDR_ECC_DBE_ERR 24:24 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_COM_RAM_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_COM_RAM_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_COM_RAM_DAT_ECC_DBE_ERR 25:25 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_COM_RAM_DAT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_COM_RAM_DAT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_COM_RAM_ECC_LIMIT_ERR 26:26 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_COM_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_COM_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP1_RAM_HDR_ECC_DBE_ERR 27:27 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP1_RAM_HDR_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP1_RAM_HDR_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP1_RAM_DAT_ECC_DBE_ERR 28:28 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP1_RAM_DAT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP1_RAM_DAT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP1_RAM_ECC_LIMIT_ERR 29:29 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP1_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_ERR_FATAL_REPORT_EN_0_RSP1_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0 0x0000129c /* RW-4R */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_TXDLCREDITPARITYERR 17:17 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_TXDLCREDITPARITYERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_TXDLCREDITPARITYERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_CREQ_RAM_HDR_ECC_DBE_ERR 18:18 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_CREQ_RAM_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_CREQ_RAM_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_CREQ_RAM_DAT_ECC_DBE_ERR 19:19 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_CREQ_RAM_DAT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_CREQ_RAM_DAT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_CREQ_RAM_ECC_LIMIT_ERR 20:20 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_CREQ_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_CREQ_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP_RAM_HDR_ECC_DBE_ERR 21:21 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP_RAM_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP_RAM_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP_RAM_DAT_ECC_DBE_ERR 22:22 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP_RAM_DAT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP_RAM_DAT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP_RAM_ECC_LIMIT_ERR 23:23 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_COM_RAM_HDR_ECC_DBE_ERR 24:24 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_COM_RAM_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_COM_RAM_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_COM_RAM_DAT_ECC_DBE_ERR 25:25 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_COM_RAM_DAT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_COM_RAM_DAT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_COM_RAM_ECC_LIMIT_ERR 26:26 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_COM_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_COM_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP1_RAM_HDR_ECC_DBE_ERR 27:27 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP1_RAM_HDR_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP1_RAM_HDR_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP1_RAM_DAT_ECC_DBE_ERR 28:28 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP1_RAM_DAT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP1_RAM_DAT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP1_RAM_ECC_LIMIT_ERR 29:29 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP1_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_FIRST_0_RSP1_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0 0x00001298 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_TXDLCREDITPARITYERR 17:17 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_TXDLCREDITPARITYERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_TXDLCREDITPARITYERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_CREQ_RAM_HDR_ECC_DBE_ERR 18:18 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_CREQ_RAM_HDR_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_CREQ_RAM_HDR_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_CREQ_RAM_DAT_ECC_DBE_ERR 19:19 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_CREQ_RAM_DAT_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_CREQ_RAM_DAT_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_CREQ_RAM_ECC_LIMIT_ERR 20:20 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_CREQ_RAM_ECC_LIMIT_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_CREQ_RAM_ECC_LIMIT_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP_RAM_HDR_ECC_DBE_ERR 21:21 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP_RAM_HDR_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP_RAM_HDR_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP_RAM_DAT_ECC_DBE_ERR 22:22 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP_RAM_DAT_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP_RAM_DAT_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP_RAM_ECC_LIMIT_ERR 23:23 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP_RAM_ECC_LIMIT_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP_RAM_ECC_LIMIT_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_COM_RAM_HDR_ECC_DBE_ERR 24:24 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_COM_RAM_HDR_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_COM_RAM_HDR_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_COM_RAM_DAT_ECC_DBE_ERR 25:25 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_COM_RAM_DAT_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_COM_RAM_DAT_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_COM_RAM_ECC_LIMIT_ERR 26:26 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_COM_RAM_ECC_LIMIT_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_COM_RAM_ECC_LIMIT_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP1_RAM_HDR_ECC_DBE_ERR 27:27 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP1_RAM_HDR_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP1_RAM_HDR_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP1_RAM_DAT_ECC_DBE_ERR 28:28 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP1_RAM_DAT_ECC_DBE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP1_RAM_DAT_ECC_DBE_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP1_RAM_ECC_LIMIT_ERR 29:29 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP1_RAM_ECC_LIMIT_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_ERR_REPORT_INJECT_0_RSP1_RAM_ECC_LIMIT_ERR_INSERT 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0 0x00001a80 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXDLHDRPARITYERR 0:0 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXDLHDRPARITYERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXDLHDRPARITYERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXDLDATAPARITYERR 1:1 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXDLDATAPARITYERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXDLDATAPARITYERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXDLCTRLPARITYERR 2:2 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXDLCTRLPARITYERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXDLCTRLPARITYERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXINVALIDAEERR 3:3 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXINVALIDAEERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXINVALIDAEERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXINVALIDBEERR 4:4 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXINVALIDBEERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXINVALIDBEERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXINVALIDADDRALIGNERR 5:5 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXINVALIDADDRALIGNERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXINVALIDADDRALIGNERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXPKTLENERR 6:6 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXPKTLENERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXPKTLENERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVCMDENCERR 7:7 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVCMDENCERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVCMDENCERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVDATLENENCERR 8:8 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVDATLENENCERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVDATLENENCERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVPKTSTATUSERR 9:9 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVPKTSTATUSERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVPKTSTATUSERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVCACHEATTRPROBEREQERR 10:10 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVCACHEATTRPROBEREQERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVCACHEATTRPROBEREQERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVCACHEATTRPROBERSPERR 11:11 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVCACHEATTRPROBERSPERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RSVCACHEATTRPROBERSPERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_DATLENGTRMWREQMAXERR 12:12 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_DATLENGTRMWREQMAXERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_DATLENGTRMWREQMAXERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_DATLENLTATRRSPMINERR 13:13 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_DATLENLTATRRSPMINERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_DATLENLTATRRSPMINERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_INVALIDCACHEATTRPOERR 14:14 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_INVALIDCACHEATTRPOERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_INVALIDCACHEATTRPOERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_INVALIDCRERR 15:15 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_INVALIDCRERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_INVALIDCRERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXRSPSTATUS_HW_ERR 16:16 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXRSPSTATUS_HW_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXRSPSTATUS_HW_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXRSPSTATUS_UR_ERR 17:17 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXRSPSTATUS_UR_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXRSPSTATUS_UR_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXRSPSTATUS_PRIV_ERR 18:18 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXRSPSTATUS_PRIV_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_RXRSPSTATUS_PRIV_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_INVALID_COLLAPSED_RESPONSE_ERR 19:19 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_INVALID_COLLAPSED_RESPONSE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_0_INVALID_COLLAPSED_RESPONSE_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0 0x00001a88 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXDLHDRPARITYERR 0:0 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXDLHDRPARITYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXDLHDRPARITYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXDLDATAPARITYERR 1:1 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXDLDATAPARITYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXDLDATAPARITYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXDLCTRLPARITYERR 2:2 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXDLCTRLPARITYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXDLCTRLPARITYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXINVALIDAEERR 3:3 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXINVALIDAEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXINVALIDAEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXINVALIDBEERR 4:4 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXINVALIDBEERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXINVALIDBEERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXINVALIDADDRALIGNERR 5:5 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXINVALIDADDRALIGNERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXINVALIDADDRALIGNERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXPKTLENERR 6:6 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXPKTLENERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXPKTLENERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVCMDENCERR 7:7 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVCMDENCERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVCMDENCERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVDATLENENCERR 8:8 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVDATLENENCERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVDATLENENCERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVPKTSTATUSERR 9:9 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVPKTSTATUSERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVPKTSTATUSERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVCACHEATTRPROBEREQERR 10:10 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVCACHEATTRPROBEREQERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVCACHEATTRPROBEREQERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVCACHEATTRPROBERSPERR 11:11 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVCACHEATTRPROBERSPERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RSVCACHEATTRPROBERSPERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_DATLENGTRMWREQMAXERR 12:12 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_DATLENGTRMWREQMAXERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_DATLENGTRMWREQMAXERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_DATLENLTATRRSPMINERR 13:13 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_DATLENLTATRRSPMINERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_DATLENLTATRRSPMINERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_INVALIDCACHEATTRPOERR 14:14 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_INVALIDCACHEATTRPOERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_INVALIDCACHEATTRPOERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_INVALIDCRERR 15:15 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_INVALIDCRERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_INVALIDCRERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXRSPSTATUS_HW_ERR 16:16 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXRSPSTATUS_HW_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXRSPSTATUS_HW_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXRSPSTATUS_UR_ERR 17:17 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXRSPSTATUS_UR_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXRSPSTATUS_UR_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXRSPSTATUS_PRIV_ERR 18:18 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXRSPSTATUS_PRIV_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_RXRSPSTATUS_PRIV_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_INVALID_COLLAPSED_RESPONSE_ERR 19:19 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_INVALID_COLLAPSED_RESPONSE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_0_INVALID_COLLAPSED_RESPONSE_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0 0x00001a9c /* RW-4R */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXDLHDRPARITYERR 0:0 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXDLHDRPARITYERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXDLHDRPARITYERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXDLDATAPARITYERR 1:1 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXDLDATAPARITYERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXDLDATAPARITYERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXDLCTRLPARITYERR 2:2 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXDLCTRLPARITYERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXDLCTRLPARITYERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXINVALIDAEERR 3:3 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXINVALIDAEERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXINVALIDAEERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXINVALIDBEERR 4:4 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXINVALIDBEERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXINVALIDBEERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXINVALIDADDRALIGNERR 5:5 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXINVALIDADDRALIGNERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXINVALIDADDRALIGNERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXPKTLENERR 6:6 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXPKTLENERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXPKTLENERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVCMDENCERR 7:7 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVCMDENCERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVCMDENCERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVDATLENENCERR 8:8 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVDATLENENCERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVDATLENENCERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVPKTSTATUSERR 9:9 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVPKTSTATUSERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVPKTSTATUSERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVCACHEATTRPROBEREQERR 10:10 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVCACHEATTRPROBEREQERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVCACHEATTRPROBEREQERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVCACHEATTRPROBERSPERR 11:11 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVCACHEATTRPROBERSPERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RSVCACHEATTRPROBERSPERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_DATLENGTRMWREQMAXERR 12:12 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_DATLENGTRMWREQMAXERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_DATLENGTRMWREQMAXERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_DATLENLTATRRSPMINERR 13:13 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_DATLENLTATRRSPMINERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_DATLENLTATRRSPMINERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_INVALIDCACHEATTRPOERR 14:14 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_INVALIDCACHEATTRPOERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_INVALIDCACHEATTRPOERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_INVALIDCRERR 15:15 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_INVALIDCRERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_INVALIDCRERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXRSPSTATUS_HW_ERR 16:16 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXRSPSTATUS_HW_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXRSPSTATUS_HW_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXRSPSTATUS_UR_ERR 17:17 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXRSPSTATUS_UR_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXRSPSTATUS_UR_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXRSPSTATUS_PRIV_ERR 18:18 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXRSPSTATUS_PRIV_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_RXRSPSTATUS_PRIV_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_INVALID_COLLAPSED_RESPONSE_ERR 19:19 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_INVALID_COLLAPSED_RESPONSE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_0_INVALID_COLLAPSED_RESPONSE_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0 0x00001a98 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXDLHDRPARITYERR 0:0 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXDLHDRPARITYERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXDLHDRPARITYERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXDLDATAPARITYERR 1:1 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXDLDATAPARITYERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXDLDATAPARITYERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXDLCTRLPARITYERR 2:2 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXDLCTRLPARITYERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXDLCTRLPARITYERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXINVALIDAEERR 3:3 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXINVALIDAEERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXINVALIDAEERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXINVALIDBEERR 4:4 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXINVALIDBEERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXINVALIDBEERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXINVALIDADDRALIGNERR 5:5 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXINVALIDADDRALIGNERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXINVALIDADDRALIGNERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXPKTLENERR 6:6 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXPKTLENERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXPKTLENERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVCMDENCERR 7:7 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVCMDENCERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVCMDENCERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVDATLENENCERR 8:8 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVDATLENENCERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVDATLENENCERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVPKTSTATUSERR 9:9 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVPKTSTATUSERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVPKTSTATUSERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVCACHEATTRPROBEREQERR 10:10 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVCACHEATTRPROBEREQERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVCACHEATTRPROBEREQERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVCACHEATTRPROBERSPERR 11:11 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVCACHEATTRPROBERSPERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RSVCACHEATTRPROBERSPERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_DATLENGTRMWREQMAXERR 12:12 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_DATLENGTRMWREQMAXERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_DATLENGTRMWREQMAXERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_DATLENLTATRRSPMINERR 13:13 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_DATLENLTATRRSPMINERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_DATLENLTATRRSPMINERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_INVALIDCACHEATTRPOERR 14:14 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_INVALIDCACHEATTRPOERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_INVALIDCACHEATTRPOERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_INVALIDCRERR 15:15 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_INVALIDCRERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_INVALIDCRERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXRSPSTATUS_HW_ERR 16:16 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXRSPSTATUS_HW_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXRSPSTATUS_HW_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXRSPSTATUS_UR_ERR 17:17 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXRSPSTATUS_UR_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXRSPSTATUS_UR_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXRSPSTATUS_PRIV_ERR 18:18 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXRSPSTATUS_PRIV_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_RXRSPSTATUS_PRIV_ERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_INVALID_COLLAPSED_RESPONSE_ERR 19:19 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_INVALID_COLLAPSED_RESPONSE_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_0_INVALID_COLLAPSED_RESPONSE_ERR_INSERT 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1 0x00001aa0 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_RXHDROVFERR 7:0 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_RXHDROVFERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_RXHDROVFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_RXDATAOVFERR 15:8 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_RXDATAOVFERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_RXDATAOVFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_STOMPDETERR 16:16 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_STOMPDETERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_STOMPDETERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_RXPOISONERR 17:17 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_RXPOISONERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_RXPOISONERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_AN1_HEARTBEAT_TIMEOUT_ERR 18:18 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_AN1_HEARTBEAT_TIMEOUT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_STATUS_1_AN1_HEARTBEAT_TIMEOUT_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1 0x00001aa8 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_RXHDROVFERR 7:0 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_RXHDROVFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_RXHDROVFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_RXDATAOVFERR 15:8 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_RXDATAOVFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_RXDATAOVFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_STOMPDETERR 16:16 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_STOMPDETERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_STOMPDETERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_RXPOISONERR 17:17 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_RXPOISONERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_RXPOISONERR_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_AN1_HEARTBEAT_TIMEOUT_ERR 18:18 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_AN1_HEARTBEAT_TIMEOUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_ERR_FATAL_REPORT_EN_1_AN1_HEARTBEAT_TIMEOUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1 0x00001abc /* RW-4R */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_RXHDROVFERR 7:0 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_RXHDROVFERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_RXHDROVFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_RXDATAOVFERR 15:8 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_RXDATAOVFERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_RXDATAOVFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_STOMPDETERR 16:16 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_STOMPDETERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_STOMPDETERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_RXPOISONERR 17:17 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_RXPOISONERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_RXPOISONERR_CLEAR 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_AN1_HEARTBEAT_TIMEOUT_ERR 18:18 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_AN1_HEARTBEAT_TIMEOUT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_FIRST_1_AN1_HEARTBEAT_TIMEOUT_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1 0x00001ab8 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_RXHDROVFERR 7:0 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_RXHDROVFERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_RXHDROVFERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_RXDATAOVFERR 15:8 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_RXDATAOVFERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_RXDATAOVFERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_STOMPDETERR 16:16 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_STOMPDETERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_STOMPDETERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_RXPOISONERR 17:17 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_RXPOISONERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_RXPOISONERR_INSERT 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_AN1_HEARTBEAT_TIMEOUT_ERR 18:18 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_AN1_HEARTBEAT_TIMEOUT_ERR_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_ERR_REPORT_INJECT_1_AN1_HEARTBEAT_TIMEOUT_ERR_INSERT 0x00000001 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_PWRM_IC_LIMIT 0x00001418 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_LIMIT_LIMIT 31:0 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_LIMIT_LIMIT_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_RX_LNK_PWRM_IC_LIMIT 0x00001c18 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_LIMIT_LIMIT 31:0 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_LIMIT_LIMIT_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_TX_LNK_PWRM_IC_INC 0x00001408 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_INC_FBINC 15:0 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_INC_FBINC_INIT 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_INC_LPINC 31:16 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_INC_LPINC_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_RX_LNK_PWRM_IC_INC 0x00001c08 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_INC_FBINC 15:0 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_INC_FBINC_INIT 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_INC_LPINC 31:16 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_INC_LPINC_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_TX_LNK_PWRM_IC_DEC 0x0000140c /* RW-4R */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_DEC_FBDEC 15:0 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_DEC_FBDEC_INIT 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_DEC_LPDEC 31:16 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_DEC_LPDEC_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_RX_LNK_PWRM_IC_DEC 0x00001c0c /* RW-4R */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_DEC_FBDEC 15:0 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_DEC_FBDEC_INIT 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_DEC_LPDEC 31:16 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_DEC_LPDEC_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_TX_LNK_PWRM_IC_LP_ENTER_THRESHOLD 0x00001410 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_LP_ENTER_THRESHOLD_THRESHOLD 31:0 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_LP_ENTER_THRESHOLD_THRESHOLD_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_RX_LNK_PWRM_IC_LP_ENTER_THRESHOLD 0x00001c10 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_LP_ENTER_THRESHOLD_THRESHOLD 31:0 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_LP_ENTER_THRESHOLD_THRESHOLD_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_TX_LNK_PWRM_IC_LP_EXIT_THRESHOLD 0x00001414 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_LP_EXIT_THRESHOLD_THRESHOLD 31:0 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_LP_EXIT_THRESHOLD_THRESHOLD_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_RX_LNK_PWRM_IC_LP_EXIT_THRESHOLD 0x00001c14 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_LP_EXIT_THRESHOLD_THRESHOLD 31:0 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_LP_EXIT_THRESHOLD_THRESHOLD_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL 0x00001400 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_CURRENTSTATE 0:0 /* R-EVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_CURRENTSTATE_LP 0x00000001 /* R---V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_CURRENTSTATE_FB 0x00000000 /* R-E-V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_REMOTEDESIRED 1:1 /* R-EVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_REMOTEDESIRED_LP 0x00000001 /* R---V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_REMOTEDESIRED_FB 0x00000000 /* R-E-V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_SOFTWAREDESIRED 2:2 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_SOFTWAREDESIRED_LP 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_SOFTWAREDESIRED_FB 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_HARDWAREDISABLE 3:3 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_HARDWAREDISABLE_INIT 0x00000000 /* RWE-V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_HARDWAREDESIRED 4:4 /* R-EVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_HARDWAREDESIRED_LP 0x00000001 /* R---V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_HARDWAREDESIRED_FB 0x00000000 /* R-E-V */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_COUNTSTART 5:5 /* RWEVF */ +#define NV_NVLTLC_TX_LNK_PWRM_IC_SW_CTRL_COUNTSTART_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL 0x00001c00 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_CURRENTSTATE 0:0 /* R-EVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_CURRENTSTATE_LP 0x00000001 /* R---V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_CURRENTSTATE_FB 0x00000000 /* R-E-V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_REMOTEDESIRED 1:1 /* R-EVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_REMOTEDESIRED_LP 0x00000001 /* R---V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_REMOTEDESIRED_FB 0x00000000 /* R-E-V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_SOFTWAREDESIRED 2:2 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_SOFTWAREDESIRED_LP 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_SOFTWAREDESIRED_FB 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_HARDWAREDISABLE 3:3 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_HARDWAREDISABLE_INIT 0x00000000 /* RWE-V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_HARDWAREDESIRED 4:4 /* R-EVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_HARDWAREDESIRED_LP 0x00000001 /* R---V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_HARDWAREDESIRED_FB 0x00000000 /* R-E-V */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_COUNTSTART 5:5 /* RWEVF */ +#define NV_NVLTLC_RX_LNK_PWRM_IC_SW_CTRL_COUNTSTART_INIT 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0(i) (0x00001d0c+(i)*0x54) /* RW-4A */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0__SIZE_1 4 /* */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_UNIT 2:1 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_UNIT_CYCLES 0x00000000 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_UNIT_PACKETS 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_UNIT_FLITS 0x00000002 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_UNIT_BYTES 0x00000003 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER 7:3 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_HEAD 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_AE 0x00000002 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_BE 0x00000004 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_DATA 0x00000008 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_IDLE 0x00000010 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_VCSETFILTERMODE 9:8 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_VCSETFILTERMODE_INIT 0x00000003 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_VCSETFILTERMODE_NONE 0x00000000 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_VCSETFILTERMODE_VCSET0 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_VCSETFILTERMODE_VCSET1 0x00000002 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE 19:17 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_ONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_TWO 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_FOUR 0x00000002 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_EIGHT 0x00000003 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_SIXTEEN 0x00000004 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_THIRTYTWO 0x00000005 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_SIXTYFOUR 0x00000006 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_ONETWENTYEIGHT 0x00000007 /* RW--V */ + +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0(i) (0x0000150c+(i)*0x54) /* RW-4A */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0__SIZE_1 4 /* */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_UNIT 2:1 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_UNIT_CYCLES 0x00000000 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_UNIT_PACKETS 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_UNIT_FLITS 0x00000002 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_UNIT_BYTES 0x00000003 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER 7:3 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_HEAD 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_AE 0x00000002 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_BE 0x00000004 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_DATA 0x00000008 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_FLITFILTER_IDLE 0x00000010 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_VCSETFILTERMODE 9:8 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_VCSETFILTERMODE_INIT 0x00000003 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_VCSETFILTERMODE_NONE 0x00000000 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_VCSETFILTERMODE_VCSET0 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_VCSETFILTERMODE_VCSET1 0x00000002 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE 19:17 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_ONE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_TWO 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_FOUR 0x00000002 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_EIGHT 0x00000003 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_SIXTEEN 0x00000004 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_THIRTYTWO 0x00000005 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_SIXTYFOUR 0x00000006 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_0_PMSIZE_ONETWENTYEIGHT 0x00000007 /* RW--V */ + +#define NV_NVLTLC_RX_SYS_CTRL_BUFFER_READY 0x00000924 /* RW-4R */ +#define NV_NVLTLC_RX_SYS_CTRL_BUFFER_READY_BUFFERRDY 0:0 /* RWEVF */ +#define NV_NVLTLC_RX_SYS_CTRL_BUFFER_READY_BUFFERRDY_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_SYS_CTRL_BUFFER_READY_BUFFERRDY_DISABLE 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_TX_SYS_CTRL_BUFFER_READY 0x00000124 /* RW-4R */ +#define NV_NVLTLC_TX_SYS_CTRL_BUFFER_READY_BUFFERRDY 0:0 /* RWEVF */ +#define NV_NVLTLC_TX_SYS_CTRL_BUFFER_READY_BUFFERRDY_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_SYS_CTRL_BUFFER_READY_BUFFERRDY_DISABLE 0x00000000 /* RWE-V */ + +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL 0x00001508 /* RW-4R */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_ENTX0 0:0 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_ENTX0_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_ENTX1 1:1 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_ENTX1_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_ENTX2 2:2 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_ENTX2_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_ENTX3 3:3 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_ENTX3_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_RESETTX0 16:16 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_RESETTX0_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_RESETTX1 17:17 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_RESETTX1_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_RESETTX2 18:18 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_RESETTX2_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_RESETTX3 19:19 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_RESETTX3_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE0 24:24 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE0_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE1 25:25 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE1_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE2 26:26 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE2_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE3 27:27 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE3_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_HW_FORCE_ILA_TRIGGER_ENABLE 29:29 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_HW_FORCE_ILA_TRIGGER_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_HW_FORCE_ILA_TRIGGER_ENABLE_DISABLE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_ENABLE 30:30 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_ENABLE_DISABLE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_CAPTURE_ENABLE 31:31 /* RWDVF */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_CAPTURE_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_TX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_CAPTURE_ENABLE_DISABLE 0x00000000 /* RWD-V */ + +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL 0x00001d08 /* RW-4R */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_ENRX0 0:0 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_ENRX0_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_ENRX1 1:1 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_ENRX1_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_ENRX2 2:2 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_ENRX2_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_ENRX3 3:3 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_ENRX3_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_RESETRX0 16:16 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_RESETRX0_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_RESETRX1 17:17 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_RESETRX1_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_RESETRX2 18:18 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_RESETRX2_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_RESETRX3 19:19 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_RESETRX3_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE0 24:24 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE0_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE1 25:25 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE1_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE2 26:26 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE2_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE3 27:27 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_CAPTURE3_INIT 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_ENABLE 30:30 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_ENABLE_DISABLE 0x00000000 /* RWD-V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_CAPTURE_ENABLE 31:31 /* RWDVF */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_CAPTURE_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_NVLTLC_RX_LNK_DEBUG_TP_CNTR_CTRL_HW_TP_CAPTURE_ENABLE_DISABLE 0x00000000 /* RWD-V */ +#endif // __lr10_dev_nvltlc_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvs.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvs.h new file mode 100644 index 000000000..7575a2a70 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvs.h @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvs_h__ +#define __lr10_dev_nvs_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_1 0x00001988 /* RW-4R */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_1_DATA 31:0 /* RWXVF */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_1_DATA_WAS_READ 0x0 /* RW--V */ + +#define NV_PBUS_PRI_TIMEOUT_SAVE_3 0x00001990 /* R--4R */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_3_SUBID 3:0 /* R-XVF */ + +#define NV_PBUS_PRI_TIMEOUT_FECS_ERRCODE 0x0000198C /* RW-4R */ +#define NV_PBUS_PRI_TIMEOUT_FECS_ERRCODE_DATA 31:0 /* RWXVF */ + +#define NV_PBUS_PRI_TIMEOUT_SAVE_0 0x00001984 /* RW-4R */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_TO 0:0 /* RWXVF */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_TO_ERROR 0x1 /* RW--V */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_TO_CLEAR 0x0 /* -W--V */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_TO_NONE 0x0 /* RW--V */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_WRITE 1:1 /* RWXVF */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_WRITE_TRUE 0x1 /* RW--V */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_WRITE_FALSE 0x0 /* RW--V */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_ADDR 25:2 /* RWXVF */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_FECS_TGT 31:31 /* RWXVF */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_FECS_TGT_TRUE 0x1 /* RW--V */ +#define NV_PBUS_PRI_TIMEOUT_SAVE_0_FECS_TGT_FALSE 0x0 /* RW--V */ + +#define NV_PBUS_EXT_CG1 0x00001C04 /* RW-4R */ +#define NV_PBUS_EXT_CG1_SLCG 9:1 /* */ +#define NV_PBUS_EXT_CG1_SLCG_ENABLED 0x00000000 /* */ +#define NV_PBUS_EXT_CG1_SLCG_DISABLED 0x000001ff /* */ +#define NV_PBUS_EXT_CG1_SLCG__PROD 0x00000000 /* */ +#define NV_PBUS_EXT_CG1_SLCG_C11 2:2 /* RWIVF */ +#define NV_PBUS_EXT_CG1_SLCG_C11_ENABLED 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_C11_DISABLED 0x00000001 /* RWI-V */ +#define NV_PBUS_EXT_CG1_SLCG_C11__PROD 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_PRI 4:4 /* RWIVF */ +#define NV_PBUS_EXT_CG1_SLCG_PRI_ENABLED 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_PRI_DISABLED 0x00000001 /* RWI-V */ +#define NV_PBUS_EXT_CG1_SLCG_PRI__PROD 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_UNROLL 5:5 /* RWIVF */ +#define NV_PBUS_EXT_CG1_SLCG_UNROLL_ENABLED 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_UNROLL_DISABLED 0x00000001 /* RWI-V */ +#define NV_PBUS_EXT_CG1_SLCG_UNROLL__PROD 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_ROLL 7:7 /* RWIVF */ +#define NV_PBUS_EXT_CG1_SLCG_ROLL_ENABLED 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_ROLL_DISABLED 0x00000001 /* RWI-V */ +#define NV_PBUS_EXT_CG1_SLCG_ROLL__PROD 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_IFR 8:8 /* RWIVF */ +#define NV_PBUS_EXT_CG1_SLCG_IFR_ENABLED 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_IFR_DISABLED 0x00000001 /* RWI-V */ +#define NV_PBUS_EXT_CG1_SLCG_IFR__PROD 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_PMC 10:10 /* RWIVF */ +#define NV_PBUS_EXT_CG1_SLCG_PMC_ENABLED 0x00000000 /* RW--V */ +#define NV_PBUS_EXT_CG1_SLCG_PMC_DISABLED 0x00000001 /* RWI-V */ +#define NV_PBUS_EXT_CG1_SLCG_PMC__PROD 0x00000000 /* RW--V */ + +#define NV_PBUS_INTR_EN_0 0x00001140 /* RW-4R */ +#define NV_PBUS_INTR_EN_0_PRI_SQUASH 1:1 /* RWIVF */ +#define NV_PBUS_INTR_EN_0_PRI_SQUASH_DISABLED 0x00000000 /* RWI-V */ +#define NV_PBUS_INTR_EN_0_PRI_SQUASH_ENABLED 0x00000001 /* RW--V */ +#define NV_PBUS_INTR_EN_0_PRI_FECSERR 2:2 /* RWIVF */ +#define NV_PBUS_INTR_EN_0_PRI_FECSERR_DISABLED 0x00000000 /* RWI-V */ +#define NV_PBUS_INTR_EN_0_PRI_FECSERR_ENABLED 0x00000001 /* RW--V */ +#define NV_PBUS_INTR_EN_0_PRI_TIMEOUT 3:3 /* RWIVF */ +#define NV_PBUS_INTR_EN_0_PRI_TIMEOUT_DISABLED 0x00000000 /* RWI-V */ +#define NV_PBUS_INTR_EN_0_PRI_TIMEOUT_ENABLED 0x00000001 /* RW--V */ +#define NV_PBUS_INTR_EN_0_SW 26:26 /* RWIVF */ +#define NV_PBUS_INTR_EN_0_SW_DISABLED 0x00000000 /* RWI-V */ +#define NV_PBUS_INTR_EN_0_SW_ENABLED 0x00000001 /* RW--V */ + +#define NV_PBUS_INTR_0 0x00001100 /* RW-4R */ +#define NV_PBUS_INTR_0_PRI_SQUASH 1:1 /* RWIVF */ +#define NV_PBUS_INTR_0_PRI_SQUASH_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PBUS_INTR_0_PRI_SQUASH_PENDING 0x00000001 /* R---V */ +#define NV_PBUS_INTR_0_PRI_SQUASH_RESET 0x00000001 /* -W--C */ +#define NV_PBUS_INTR_0_PRI_FECSERR 2:2 /* RWIVF */ +#define NV_PBUS_INTR_0_PRI_FECSERR_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PBUS_INTR_0_PRI_FECSERR_PENDING 0x00000001 /* R---V */ +#define NV_PBUS_INTR_0_PRI_FECSERR_RESET 0x00000001 /* -W--C */ +#define NV_PBUS_INTR_0_PRI_TIMEOUT 3:3 /* RWIVF */ +#define NV_PBUS_INTR_0_PRI_TIMEOUT_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PBUS_INTR_0_PRI_TIMEOUT_PENDING 0x00000001 /* R---V */ +#define NV_PBUS_INTR_0_PRI_TIMEOUT_RESET 0x00000001 /* -W--C */ +#define NV_PBUS_INTR_0_SW 26:26 /* RWIVF */ +#define NV_PBUS_INTR_0_SW_NOT_PENDING 0x00000000 /* R-I-V */ +#define NV_PBUS_INTR_0_SW_PENDING 0x00000001 /* R---V */ +#define NV_PBUS_INTR_0_SW_RESET 0x00000001 /* -W--C */ +#endif // __lr10_dev_nvs_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvs_master.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvs_master.h new file mode 100644 index 000000000..4c628310b --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvs_master.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvs_master_h__ +#define __lr10_dev_nvs_master_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PSMC_BOOT_2 0x00000008 /* R--4R */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION 3:0 /* R-XVF */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_0 0x00000000 /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_1 0x00000001 /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_2 0x00000002 /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_3 0x00000003 /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_4 0x00000004 /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_5 0x00000005 /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_6 0x00000006 /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_7 0x00000007 /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_8 0x00000008 /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_9 0x00000009 /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_A 0x0000000A /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_B 0x0000000B /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_C 0x0000000C /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_D 0x0000000D /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_E 0x0000000E /* R---V */ +#define NV_PSMC_BOOT_2_MINOR_EXTENDED_REVISION_F 0x0000000F /* R---V */ +#define NV_PSMC_BOOT_2_FMODEL 30:30 /* R---F */ +#define NV_PSMC_BOOT_2_FMODEL_NO 0x00000000 /* R---V */ +#define NV_PSMC_BOOT_2_FMODEL_YES 0x00000001 /* R---V */ +#define NV_PSMC_BOOT_2_EMULATION 31:31 /* R-XVF */ +#define NV_PSMC_BOOT_2_EMULATION_NO 0x00000000 /* R---V */ +#define NV_PSMC_BOOT_2_EMULATION_YES 0x00000001 /* R---V */ + +#define NV_PSMC_BOOT_42 0x00000A00 /* R--4R */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION 11:8 /* R-XVF */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_0 0x00000000 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_1 0x00000001 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_2 0x00000002 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_3 0x00000003 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_4 0x00000004 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_5 0x00000005 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_6 0x00000006 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_7 0x00000007 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_8 0x00000008 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_9 0x00000009 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_A 0x0000000A /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_B 0x0000000B /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_C 0x0000000C /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_D 0x0000000D /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_E 0x0000000E /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_EXTENDED_REVISION_F 0x0000000F /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION 15:12 /* R-XVF */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_1 0x00000001 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_2 0x00000002 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_3 0x00000003 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_4 0x00000004 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_5 0x00000005 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_6 0x00000006 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_7 0x00000007 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_8 0x00000008 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_9 0x00000009 /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_10 0x0000000A /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_11 0x0000000B /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_12 0x0000000C /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_13 0x0000000D /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_14 0x0000000E /* R---V */ +#define NV_PSMC_BOOT_42_MINOR_REVISION_15 0x0000000F /* R---V */ +#define NV_PSMC_BOOT_42_MAJOR_REVISION 19:16 /* R-XVF */ +#define NV_PSMC_BOOT_42_MAJOR_REVISION_A 0x0000000A /* R---V */ +#define NV_PSMC_BOOT_42_MAJOR_REVISION_B 0x0000000B /* R---V */ +#define NV_PSMC_BOOT_42_MAJOR_REVISION_C 0x0000000C /* R---V */ +#define NV_PSMC_BOOT_42_MAJOR_REVISION_D 0x0000000D /* R---V */ +#define NV_PSMC_BOOT_42_MAJOR_REVISION_E 0x0000000E /* R---V */ +#define NV_PSMC_BOOT_42_MAJOR_REVISION_F 0x0000000F /* R---V */ +#define NV_PSMC_BOOT_42_ARCHITECTURE 28:24 /* */ +#define NV_PSMC_BOOT_42_ARCHITECTURE_SVNP01 0x00000000 /* */ +#define NV_PSMC_BOOT_42_ARCHITECTURE_LR10 0x00000000 /* */ +#define NV_PSMC_BOOT_42_CHIP_ID 28:20 /* R-XVF */ +#define NV_PSMC_BOOT_42_CHIP_ID_SVNP01 0x00000005 /* R---V */ +#define NV_PSMC_BOOT_42_CHIP_ID_LR10 0x00000006 /* R---V */ + +#define NV_PSMC_BOOT_0 0x00000000 /* R--4R */ +#define NV_PSMC_BOOT_0_ID 31:0 /* */ +#define NV_PSMC_BOOT_0_MINOR_REVISION 3:0 /* R--VF */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_1 0x00000001 /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_2 0x00000002 /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_3 0x00000003 /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_4 0x00000004 /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_5 0x00000005 /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_6 0x00000006 /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_7 0x00000007 /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_8 0x00000008 /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_9 0x00000009 /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_10 0x0000000A /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_11 0x0000000B /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_12 0x0000000C /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_13 0x0000000D /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_14 0x0000000E /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_15 0x0000000F /* R---V */ +#define NV_PSMC_BOOT_0_MINOR_REVISION_INIT 0x00000001 /* R---V */ +#define NV_PSMC_BOOT_0_MAJOR_REVISION 7:4 /* R--VF */ +#define NV_PSMC_BOOT_0_MAJOR_REVISION_A 0x0000000A /* R---V */ +#define NV_PSMC_BOOT_0_MAJOR_REVISION_B 0x0000000B /* R---V */ +#define NV_PSMC_BOOT_0_MAJOR_REVISION_C 0x0000000C /* R---V */ +#define NV_PSMC_BOOT_0_MAJOR_REVISION_D 0x0000000D /* R---V */ +#define NV_PSMC_BOOT_0_MAJOR_REVISION_E 0x0000000E /* R---V */ +#define NV_PSMC_BOOT_0_MAJOR_REVISION_F 0x0000000F /* R---V */ +#define NV_PSMC_BOOT_0_MAJOR_REVISION_INIT 0x00000000 /* R---V */ +#define NV_PSMC_BOOT_0_RESERVED_0 11:8 /* */ +#define NV_PSMC_BOOT_0_ARCHITECTURE 28:24 /* R--VF */ +#define NV_PSMC_BOOT_0_ARCHITECTURE_SVNP01 0x00000000 /* R---V */ +#define NV_PSMC_BOOT_0_ARCHITECTURE_LR10 0x00000000 /* R---V */ + +#define NV_PSMC_INTR_LEGACY 0x00000100 /* R--4R */ +#define NV_PSMC_INTR_LEGACY_PTIMER 20:20 /* R--VF */ +#define NV_PSMC_INTR_LEGACY_PMGR 21:21 /* R--VF */ +#define NV_PSMC_INTR_LEGACY_SAW 22:22 /* R--VF */ +#define NV_PSMC_INTR_LEGACY_DECODE_TRAP_PRIV_LEVEL_VIOLATION 24:24 /* R--VF */ +#define NV_PSMC_INTR_LEGACY_DECODE_TRAP_WRITE_DROPPED 25:25 /* R--VF */ +#define NV_PSMC_INTR_LEGACY_RING_MANAGE_SUCCESS 26:26 /* R--VF */ +#define NV_PSMC_INTR_LEGACY_PBUS 28:28 /* R--VF */ +#define NV_PSMC_INTR_LEGACY_XVE 29:29 /* R--VF */ +#define NV_PSMC_INTR_LEGACY_PRIV_RING 30:30 /* R--VF */ +#define NV_PSMC_INTR_EN_SET_LEGACY 0x00000160 /* -W-4R */ +#define NV_PSMC_INTR_EN_SET_LEGACY_PTIMER 20:20 /* -W-VF */ +#define NV_PSMC_INTR_EN_SET_LEGACY_PMGR 21:21 /* -W-VF */ +#define NV_PSMC_INTR_EN_SET_LEGACY_SAW 22:22 /* -W-VF */ +#define NV_PSMC_INTR_EN_SET_LEGACY_DECODE_TRAP_PRIV_LEVEL_VIOLATION 24:24 /* -W-VF */ +#define NV_PSMC_INTR_EN_SET_LEGACY_DECODE_TRAP_WRITE_DROPPED 25:25 /* -W-VF */ +#define NV_PSMC_INTR_EN_SET_LEGACY_RING_MANAGE_SUCCESS 26:26 /* -W-VF */ +#define NV_PSMC_INTR_EN_SET_LEGACY_PBUS 28:28 /* -W-VF */ +#define NV_PSMC_INTR_EN_SET_LEGACY_XVE 29:29 /* -W-VF */ +#define NV_PSMC_INTR_EN_SET_LEGACY_PRIV_RING 30:30 /* -W-VF */ +#define NV_PSMC_INTR_EN_CLR_LEGACY 0x00000180 /* -W-4R */ +#define NV_PSMC_INTR_EN_CLR_LEGACY_PTIMER 20:20 /* -W-VF */ +#define NV_PSMC_INTR_EN_CLR_LEGACY_PMGR 21:21 /* -W-VF */ +#define NV_PSMC_INTR_EN_CLR_LEGACY_SAW 22:22 /* -W-VF */ +#define NV_PSMC_INTR_EN_CLR_LEGACY_DECODE_TRAP_PRIV_LEVEL_VIOLATION 24:24 /* -W-VF */ +#define NV_PSMC_INTR_EN_CLR_LEGACY_DECODE_TRAP_WRITE_DROPPED 25:25 /* -W-VF */ +#define NV_PSMC_INTR_EN_CLR_LEGACY_RING_MANAGE_SUCCESS 26:26 /* -W-VF */ +#define NV_PSMC_INTR_EN_CLR_LEGACY_PBUS 28:28 /* -W-VF */ +#define NV_PSMC_INTR_EN_CLR_LEGACY_XVE 29:29 /* -W-VF */ +#define NV_PSMC_INTR_EN_CLR_LEGACY_PRIV_RING 30:30 /* -W-VF */ +#endif // __lr10_dev_nvs_master_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nvs_top.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nvs_top.h new file mode 100644 index 000000000..561398570 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nvs_top.h @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nvs_top_h__ +#define __lr10_dev_nvs_top_h__ +/* This file is autogenerated. Do not edit */ +#define NV_SWPTOP /* R--4P */ +#define NV_SWPTOP_TABLE_BASE_ADDRESS_OFFSET 0x0002c000 /* */ +#define NV_SWPTOP_ENTRY 1:0 /* R-EVF */ +#define NV_SWPTOP_ENTRY_INVALID 0x00000000 /* R-E-V */ +#define NV_SWPTOP_ENTRY_ENUM 0x00000001 /* R---V */ +#define NV_SWPTOP_ENTRY_DATA1 0x00000002 /* R---V */ +#define NV_SWPTOP_ENTRY_DATA2 0x00000003 /* R---V */ +#define NV_SWPTOP_CONTENTS 30:2 /* R-EVF */ +#define NV_SWPTOP_CONTENTS_INIT 0x00000000 /* R-E-V */ +#define NV_SWPTOP_CHAIN 31:31 /* R-EVF */ +#define NV_SWPTOP_CHAIN_DISABLE 0x00000000 /* R-E-V */ +#define NV_SWPTOP_CHAIN_ENABLE 0x00000001 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE 9:2 /* R--UF */ +#define NV_SWPTOP_ENUM_DEVICE_INVALID 0x0 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_PTOP 0x1 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_SIOCTRL 0x2 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_SIOCTRL_BCAST 0x3 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_NPG 0x4 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_NPG_BCAST 0x5 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_SWX 0x6 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_SWX_BCAST 0x7 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_CLKS 0x8 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_FUSE 0x9 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_JTAG 0xa /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_PMGR 0xb /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_SAW 0xc /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_XP3G 0xd /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_XVE 0xe /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_ROM 0xf /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_EXTDEV 0x10 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_PRIVMAIN 0x11 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_PRIVLOC 0x12 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_NVLW 0x13 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_NVLW_BCAST 0x14 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_NXBAR 0x15 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_NXBAR_BCAST 0x16 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_PXBAR 0x17 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_PXBAR_BCAST 0x18 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_PCIE 0x19 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_PCIE_BCAST 0x1a /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_PTIMER 0x1b /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_TSENSE 0x1c /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_SOE 0x1d /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_SMR 0x1e /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_I2C 0x1f /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_SMBPBI 0x20 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_SE 0x21 /* R---V */ +#define NV_SWPTOP_ENUM_DEVICE_THERM 0x22 /* R---V */ +#define NV_SWPTOP_ENUM_ID 17:10 /* R--UF */ +#define NV_SWPTOP_ENUM_RESERVED 19:18 /* R--UF */ +#define NV_SWPTOP_ENUM_VERSION 30:20 /* R--UF */ +#define NV_SWPTOP_DATA1_RESET 6:2 /* R--UF */ +#define NV_SWPTOP_DATA1_INTR 11:7 /* R--UF */ +#define NV_SWPTOP_DATA1_RESERVED2 11:2 /* R--UF */ +#define NV_SWPTOP_DATA1_CLUSTER_TYPE 16:12 /* R--UF */ +#define NV_SWPTOP_DATA1_CLUSTER_TYPE_INVALID 0x0 /* R---V */ +#define NV_SWPTOP_DATA1_CLUSTER_TYPE_SYS 0x1 /* R---V */ +#define NV_SWPTOP_DATA1_CLUSTER_TYPE_PRT 0x2 /* R---V */ +#define NV_SWPTOP_DATA1_CLUSTER_NUMBER 21:17 /* R--UF */ +#define NV_SWPTOP_DATA1_RESERVED 30:22 /* R--UF */ + #define NV_SWPTOP_DATA1_PTOP_LENGTH 30:2 /* R--UF */ +#define NV_SWPTOP_DATA2_TYPE 30:26 /* R--UF */ +#define NV_SWPTOP_DATA2_TYPE_INVALID 0x0 /* R---V */ +#define NV_SWPTOP_DATA2_TYPE_RESERVED 0x1 /* R---V */ +#define NV_SWPTOP_DATA2_TYPE_RESETREG 0x2 /* R---V */ +#define NV_SWPTOP_DATA2_TYPE_INTRREG 0x3 /* R---V */ +#define NV_SWPTOP_DATA2_TYPE_DISCOVERY 0x4 /* R---V */ +#define NV_SWPTOP_DATA2_TYPE_UNICAST 0x5 /* R---V */ +#define NV_SWPTOP_DATA2_TYPE_BROADCAST 0x6 /* R---V */ +#define NV_SWPTOP_DATA2_TYPE_MULTICAST0 0x7 /* R---V */ +#define NV_SWPTOP_DATA2_TYPE_MULTICAST1 0x8 /* R---V */ +#define NV_SWPTOP_DATA2_TYPE_MULTICAST2 0x9 /* R---V */ +#define NV_SWPTOP_DATA2_ADDR 25:2 /* R--UF */ +#endif // __lr10_dev_nvs_top_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nxbar_tc_global_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nxbar_tc_global_ip.h new file mode 100644 index 000000000..41b84c6e6 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nxbar_tc_global_ip.h @@ -0,0 +1,787 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nxbar_tc_global_ip_h__ +#define __lr10_dev_nxbar_tc_global_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST 0x00000240 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_LOG_VC 15:13 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_LOG_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_LOG_DST 19:16 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_LOG_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_LOG_SRC 25:24 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_LOG_SRC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_LOG_TILE_SRC 31:28 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FIRST_LOG_TILE_SRC_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST 0x00000440 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_LOG_VC 15:13 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_LOG_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_LOG_DST 19:16 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_LOG_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_LOG_SRC 25:24 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_LOG_SRC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_LOG_TILE_SRC 31:28 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FIRST_LOG_TILE_SRC_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST 0x00000640 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_LOG_VC 15:13 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_LOG_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_LOG_DST 19:16 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_LOG_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_LOG_SRC 25:24 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_LOG_SRC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_LOG_TILE_SRC 31:28 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FIRST_LOG_TILE_SRC_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST 0x00000840 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_LOG_VC 15:13 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_LOG_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_LOG_DST 19:16 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_LOG_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_LOG_SRC 25:24 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_LOG_SRC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_LOG_TILE_SRC 31:28 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FIRST_LOG_TILE_SRC_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST 0x00000a40 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_LOG_VC 15:13 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_LOG_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_LOG_DST 19:16 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_LOG_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_LOG_SRC 25:24 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_LOG_SRC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_LOG_TILE_SRC 31:28 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FIRST_LOG_TILE_SRC_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST 0x00000c40 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_LOG_VC 15:13 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_LOG_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_LOG_DST 19:16 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_LOG_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_LOG_SRC 25:24 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_LOG_SRC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_LOG_TILE_SRC 31:28 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FIRST_LOG_TILE_SRC_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST 0x00000e40 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_LOG_VC 15:13 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_LOG_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_LOG_DST 19:16 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_LOG_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_LOG_SRC 25:24 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_LOG_SRC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_LOG_TILE_SRC 31:28 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FIRST_LOG_TILE_SRC_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST 0x00001040 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_LOG_VC 15:13 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_LOG_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_LOG_DST 19:16 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_LOG_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_LOG_SRC 25:24 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_LOG_SRC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_LOG_TILE_SRC 31:28 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FIRST_LOG_TILE_SRC_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST 0x00001240 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_LOG_VC 15:13 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_LOG_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_LOG_DST 19:16 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_LOG_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_LOG_SRC 25:24 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_LOG_SRC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_LOG_TILE_SRC 31:28 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FIRST_LOG_TILE_SRC_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN 0x0000023c /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN 0x0000043c /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN 0x0000063c /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN 0x0000083c /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN 0x00000a3c /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN 0x00000c3c /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN 0x00000e3c /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN 0x0000103c /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN 0x0000123c /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_FATAL_INTR_EN_EGRESS_CDT_PARITY_ERROR__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS 0x00000234 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_STATUS_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS 0x00000434 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_STATUS_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS 0x00000634 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_STATUS_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS 0x00000834 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_STATUS_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS 0x00000a34 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_STATUS_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS 0x00000c34 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_STATUS_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS 0x00000e34 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_STATUS_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS 0x00001034 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_STATUS_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS 0x00001234 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_EGRESS_CDT_PARITY_ERROR 7:7 /* RWIVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_STATUS_EGRESS_CDT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TC_TILEOUT0_ERR_CYA 0x00000230 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT0_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL 0:0 /* RWEVF */ +#define NV_NXBAR_TC_TILEOUT0_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_ENABLE 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_TILEOUT0_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT1_ERR_CYA 0x00000430 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT1_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL 0:0 /* RWEVF */ +#define NV_NXBAR_TC_TILEOUT1_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_ENABLE 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_TILEOUT1_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT2_ERR_CYA 0x00000630 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT2_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL 0:0 /* RWEVF */ +#define NV_NXBAR_TC_TILEOUT2_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_ENABLE 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_TILEOUT2_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT3_ERR_CYA 0x00000830 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT3_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL 0:0 /* RWEVF */ +#define NV_NXBAR_TC_TILEOUT3_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_ENABLE 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_TILEOUT3_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT4_ERR_CYA 0x00000a30 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT4_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL 0:0 /* RWEVF */ +#define NV_NXBAR_TC_TILEOUT4_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_ENABLE 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_TILEOUT4_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT5_ERR_CYA 0x00000c30 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT5_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL 0:0 /* RWEVF */ +#define NV_NXBAR_TC_TILEOUT5_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_ENABLE 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_TILEOUT5_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT6_ERR_CYA 0x00000e30 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT6_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL 0:0 /* RWEVF */ +#define NV_NXBAR_TC_TILEOUT6_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_ENABLE 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_TILEOUT6_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT7_ERR_CYA 0x00001030 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT7_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL 0:0 /* RWEVF */ +#define NV_NXBAR_TC_TILEOUT7_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_ENABLE 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_TILEOUT7_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_TILEOUT8_ERR_CYA 0x00001230 /* RW-4R */ +#define NV_NXBAR_TC_TILEOUT8_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL 0:0 /* RWEVF */ +#define NV_NXBAR_TC_TILEOUT8_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_ENABLE 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_TILEOUT8_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TC_ERROR_STATUS 0x00000090 /* R--4R */ +#define NV_NXBAR_TC_ERROR_STATUS_TILE0 0:0 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILE0_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILE1 1:1 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILE1_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILE2 2:2 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILE2_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILE3 3:3 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILE3_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT0 16:16 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT0_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT1 17:17 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT1_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT2 18:18 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT2_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT3 19:19 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT3_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT4 20:20 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT4_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT5 21:21 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT5_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT6 22:22 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT6_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT7 23:23 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT7_DEFAULT 0x00000000 /* R---V */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT8 24:24 /* R--VF */ +#define NV_NXBAR_TC_ERROR_STATUS_TILEOUT8_DEFAULT 0x00000000 /* R---V */ + +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG 0x00000048 /* RWE4R */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_IDLE_CG_DLY_CNT 5:0 /* RWEVF */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_IDLE_CG_DLY_CNT_HWINIT 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_IDLE_CG_DLY_CNT__PROD 0x00000004 /* RW--V */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_IDLE_CG_EN 6:6 /* RWEVF */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_IDLE_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_IDLE_CG_EN_DISABLED 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_IDLE_CG_EN__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STATE_CG_EN 7:7 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STATE_CG_EN_ENABLED 0x00000001 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STATE_CG_EN_DISABLED 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STATE_CG_EN__PROD 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STALL_CG_DLY_CNT 13:8 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STALL_CG_DLY_CNT_HWINIT 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STALL_CG_DLY_CNT__PROD 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STALL_CG_EN 14:14 /* RWEVF */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STALL_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STALL_CG_EN_DISABLED 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_STALL_CG_EN__PROD 0x00000000 /* RW--V */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_QUIESCENT_CG_EN 15:15 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_QUIESCENT_CG_EN_ENABLED 0x00000001 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_QUIESCENT_CG_EN_DISABLED 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_QUIESCENT_CG_EN__PROD 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_WAKEUP_DLY_CNT 19:16 /* RWEVF */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_WAKEUP_DLY_CNT_HWINIT 0x00000000 /* RWE-V */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_WAKEUP_DLY_CNT__PROD 0x00000000 /* RW--V */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_CNT 23:20 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_CNT_FULLSPEED 0x0000000f /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_CNT__PROD 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_DI_DT_SKEW_VAL 27:24 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_DI_DT_SKEW_VAL_HWINIT 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_DI_DT_SKEW_VAL__PROD 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_EN 28:28 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_EN_ENABLED 0x00000001 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_EN_DISABLED 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_EN__PROD 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_SW_OVER 29:29 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_SW_OVER_EN 0x00000001 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_SW_OVER_DIS 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_THROT_CLK_SW_OVER__PROD 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_PAUSE_CG_EN 30:30 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_PAUSE_CG_EN_ENABLED 0x00000001 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_PAUSE_CG_EN_DISABLED 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_PAUSE_CG_EN__PROD 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_HALT_CG_EN 31:31 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_HALT_CG_EN_ENABLED 0x00000001 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_HALT_CG_EN_DISABLED 0x00000000 /* */ +#define NV_NXBAR_TC_PRI_NXBAR_TC_CG_HALT_CG_EN__PROD 0x00000000 /* */ +#endif // __lr10_dev_nxbar_tc_global_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_nxbar_tile_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_nxbar_tile_ip.h new file mode 100644 index 000000000..c52ca1d32 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_nxbar_tile_ip.h @@ -0,0 +1,156 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_nxbar_tile_ip_h__ +#define __lr10_dev_nxbar_tile_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NXBAR_TILE_ERR_STATUS 0x00000064 /* RW-4R */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_STATUS_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_STATUS_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_STATUS_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_PKT_INVALID_DST 7:7 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_PKT_INVALID_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_PKT_PARITY_ERROR 8:8 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_STATUS_INGRESS_PKT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN 0x0000006c /* RW-4R */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_BUFFER_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_BUFFER_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_EGRESS_CREDIT_OVERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_EGRESS_CREDIT_UNDERFLOW__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_NON_BURSTY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_NON_STICKY_PKT__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_BURST_GT_9_DATA_VC__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_PKT_INVALID_DST 7:7 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_PKT_INVALID_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_PKT_INVALID_DST__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_PKT_PARITY_ERROR 8:8 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_PKT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FATAL_INTR_EN_INGRESS_PKT_PARITY_ERROR__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TILE_ERR_FIRST 0x00000070 /* RW-4R */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_BUFFER_OVERFLOW 0:0 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_BUFFER_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW 1:1 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_BUFFER_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_EGRESS_CREDIT_OVERFLOW 2:2 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_EGRESS_CREDIT_OVERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW 3:3 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_EGRESS_CREDIT_UNDERFLOW_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_NON_BURSTY_PKT 4:4 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_NON_BURSTY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_NON_STICKY_PKT 5:5 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_NON_STICKY_PKT_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC 6:6 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_BURST_GT_9_DATA_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_PKT_INVALID_DST 7:7 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_PKT_INVALID_DST_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_PKT_PARITY_ERROR 8:8 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_INGRESS_PKT_PARITY_ERROR_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_LOG_VC 15:13 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_LOG_VC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_LOG_SRC 19:16 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_LOG_SRC_INIT 0x00000000 /* RWI-V */ +#define NV_NXBAR_TILE_ERR_FIRST_LOG_DST 27:24 /* RWIVF */ +#define NV_NXBAR_TILE_ERR_FIRST_LOG_DST_INIT 0x00000000 /* RWI-V */ + +#define NV_NXBAR_TILE_ERR_CYA 0x00000060 /* RW-4R */ +#define NV_NXBAR_TILE_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL 0:0 /* RWEVF */ +#define NV_NXBAR_TILE_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_DISABLE 0x00000000 /* RWE-V */ +#define NV_NXBAR_TILE_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL_ENABLE 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_ERR_CYA_SRCID_UPDATE_AT_EGRESS_CTRL__PROD 0x00000001 /* RW--V */ + +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG 0x00000048 /* RWE4R */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_IDLE_CG_DLY_CNT 5:0 /* RWEVF */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_IDLE_CG_DLY_CNT_HWINIT 0x00000000 /* RWE-V */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_IDLE_CG_DLY_CNT__PROD 0x00000004 /* RW--V */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_IDLE_CG_EN 6:6 /* RWEVF */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_IDLE_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_IDLE_CG_EN_DISABLED 0x00000000 /* RWE-V */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_IDLE_CG_EN__PROD 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STATE_CG_EN 7:7 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STATE_CG_EN_ENABLED 0x00000001 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STATE_CG_EN_DISABLED 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STATE_CG_EN__PROD 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STALL_CG_DLY_CNT 13:8 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STALL_CG_DLY_CNT_HWINIT 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STALL_CG_DLY_CNT__PROD 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STALL_CG_EN 14:14 /* RWEVF */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STALL_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STALL_CG_EN_DISABLED 0x00000000 /* RWE-V */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_STALL_CG_EN__PROD 0x00000000 /* RW--V */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_QUIESCENT_CG_EN 15:15 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_QUIESCENT_CG_EN_ENABLED 0x00000001 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_QUIESCENT_CG_EN_DISABLED 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_QUIESCENT_CG_EN__PROD 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_WAKEUP_DLY_CNT 19:16 /* RWEVF */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_WAKEUP_DLY_CNT_HWINIT 0x00000000 /* RWE-V */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_WAKEUP_DLY_CNT__PROD 0x00000000 /* RW--V */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_CNT 23:20 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_CNT_FULLSPEED 0x0000000f /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_CNT__PROD 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_DI_DT_SKEW_VAL 27:24 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_DI_DT_SKEW_VAL_HWINIT 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_DI_DT_SKEW_VAL__PROD 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_EN 28:28 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_EN_ENABLED 0x00000001 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_EN_DISABLED 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_EN__PROD 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_SW_OVER 29:29 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_SW_OVER_EN 0x00000001 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_SW_OVER_DIS 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_THROT_CLK_SW_OVER__PROD 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_PAUSE_CG_EN 30:30 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_PAUSE_CG_EN_ENABLED 0x00000001 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_PAUSE_CG_EN_DISABLED 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_PAUSE_CG_EN__PROD 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_HALT_CG_EN 31:31 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_HALT_CG_EN_ENABLED 0x00000001 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_HALT_CG_EN_DISABLED 0x00000000 /* */ +#define NV_NXBAR_TILE_PRI_NXBAR_TILE_CG_HALT_CG_EN__PROD 0x00000000 /* */ +#endif // __lr10_dev_nxbar_tile_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_pmgr.h b/src/common/inc/swref/published/nvswitch/lr10/dev_pmgr.h new file mode 100644 index 000000000..2a9fdcbc5 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_pmgr.h @@ -0,0 +1,176 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_pmgr_h__ +#define __lr10_dev_pmgr_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PMGR_GPIO_OUTPUT_CNTL(i) 0x0000D610 +(i) * (0x0000D614 - 0x0000D610) /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL__SIZE_1 31 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL 7:0 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_INIT 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_NORMAL 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_RASTER_SYNC_0 0x00000040 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_RASTER_SYNC_1 0x00000041 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_RASTER_SYNC_2 0x00000042 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_RASTER_SYNC_3 0x00000043 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_RASTER_SYNC(i) ((i) + 0x40) /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_RASTER_SYNC__SIZE_1 4 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_STEREO_0 0x00000048 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_STEREO_1 0x00000049 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_STEREO_2 0x0000004A /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_STEREO_3 0x0000004B /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_STEREO(i) ((i) + 0x48) /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_STEREO__SIZE_1 4 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SWAP_READY_OUT_0 0x00000050 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SWAP_READY_OUT_1 0x00000051 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SWAP_READY_OUT_2 0x00000052 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SWAP_READY_OUT_3 0x00000053 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SWAP_READY_OUT(i) ((i) + 0x50) /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SWAP_READY_OUT__SIZE_1 4 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_VID_PWM_3 0x00000055 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_VID_PWM_2 0x00000056 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_VID_PWM_1 0x00000057 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_FAN_ALERT 0x00000059 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_ADC_MUX_SEL 0x0000005A /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_BA_METER 0x0000005B /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_PWM_OUTPUT 0x0000005C /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_VID_PWM 0x0000005D /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_THERMAL_PWM 0x0000005E /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_THERMAL_SLOWDOWN 0x0000005F /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_0 0x00000060 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_1 0x00000061 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_2 0x00000062 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_3 0x00000063 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_4 0x00000064 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_5 0x00000065 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_6 0x00000066 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_7 0x00000067 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_8 0x00000068 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_9 0x00000069 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_A 0x0000006A /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_B 0x0000006B /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_C 0x0000006C /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_D 0x0000006D /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_E 0x0000006E /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_DEBUG_PORT_F 0x0000006F /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SPI_CS_1 0x00000071 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SPI_CS_2 0x00000072 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SPI_CS_3 0x00000073 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SPI_CS(i) ((i)+0x70) /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SPI_CS__SIZE_1 4 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR0_TMDS_OUT_PWM 0x00000080 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR0_TMDS_OUT_PINA 0x00000081 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR0_TMDS_OUT_PINB 0x00000082 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR1_TMDS_OUT_PWM 0x00000084 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR1_TMDS_OUT_PINA 0x00000085 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR1_TMDS_OUT_PINB 0x00000086 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR2_TMDS_OUT_PWM 0x00000088 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR2_TMDS_OUT_PINA 0x00000089 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR2_TMDS_OUT_PINB 0x0000008A /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR3_TMDS_OUT_PWM 0x0000008C /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR3_TMDS_OUT_PINA 0x0000008D /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR3_TMDS_OUT_PINB 0x0000008E /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR_TMDS_OUT_PWM(i) (((i)*4) + 0x80)/* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR_TMDS_OUT_PWM__SIZE_1 8 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR_TMDS_OUT_PINA(i) (((i)*4) + 0x81)/* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR_TMDS_OUT_PINA__SIZE_1 8 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR_TMDS_OUT_PINB(i) (((i)*4) + 0x82)/* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_SEL_SOR_TMDS_OUT_PINB__SIZE_1 8 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUTPUT 12:12 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUTPUT_INIT 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUTPUT_0 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUTPUT_1 0x00000001 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUT_EN 13:13 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUT_EN_INIT 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUT_EN_NO 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUT_EN_YES 0x00000001 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_INPUT 14:14 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_INPUT_0 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_INPUT_1 0x00000001 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUT_INV 15:15 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUT_INV_INIT 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUT_INV_DISABLE 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_IO_OUT_INV_ENABLE 0x00000001 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_PULLUD 17:16 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_PULLUD_INIT 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_PULLUD_NONE 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_PULLUD_UP 0x00000001 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_PULLUD_DOWN 0x00000002 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_PULLUD_RESERVED 0x00000003 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_OPEN_DRAIN 18:18 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_OPEN_DRAIN_INIT 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_OPEN_DRAIN_DISABLE 0x00000000 /* */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_OPEN_DRAIN_ENABLE 0x00000001 /* */ + +#define NV_PMGR_GPIO_OUTPUT_CNTL_TRIGGER 0x0000D604 /* RWI4R */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_TRIGGER_UPDATE 0:0 /* RWIVF */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_TRIGGER_UPDATE_INIT 0x00000000 /* R-I-V */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_TRIGGER_UPDATE_DONE 0x00000000 /* R---V */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_TRIGGER_UPDATE_TRIGGER 0x00000001 /* -W--T */ +#define NV_PMGR_GPIO_OUTPUT_CNTL_TRIGGER_UPDATE_PENDING 0x00000001 /* R---V */ + +#define NV_PMGR_ROM_WINDOW_XVE 0x0000E218 /* RW-4R */ +#define NV_PMGR_ROM_WINDOW_XVE_OFFSET 11:0 /* RWHVF */ +#define NV_PMGR_ROM_WINDOW_XVE_OFFSET_MIN 0x00000000 /* RWH-V */ +#define NV_PMGR_ROM_WINDOW_XVE_OFFSET_MAX 0x00000FFF /* RW--V */ +#define NV_PMGR_ROM_WINDOW_XVE_LIMIT 27:16 /* RWHVF */ +#define NV_PMGR_ROM_WINDOW_XVE_LIMIT_MIN 0x00000000 /* RW--V */ +#define NV_PMGR_ROM_WINDOW_XVE_LIMIT_MAX 0x00000FFF /* RWH-V */ + +#define NV_PMGR_GPIO_INPUT_CNTL_1 0x0000D740 /* RW-4R */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_PINNUM 7:0 /* RWIVF */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_PINNUM_INIT 0x00000001 /* RWI-V */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_INV 8:8 /* RWIVF */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_INV_NO 0x00000000 /* RWI-V */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_INV_YES 0x00000001 /* RW--V */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_READ 9:9 /* R--VF */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_READ_0 0x00000000 /* R---V */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_READ_1 0x00000001 /* R---V */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_BYPASS_FILTER 10:10 /* RWIVF */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_BYPASS_FILTER_INIT 0x00000000 /* RWI-V */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_BYPASS_FILTER_NO 0x00000000 /* RW--V */ +#define NV_PMGR_GPIO_INPUT_CNTL_1_BYPASS_FILTER_YES 0x00000001 /* RW--V */ + +#define NV_PMGR_I2C_TIMING(i) (0x0000D008 + (i) * 0x20) /* RW-4A */ +#define NV_PMGR_I2C_TIMING__SIZE_1 10 /* */ +#define NV_PMGR_I2C_TIMING_SCL_PERIOD 11:0 /* RWIVF */ +#define NV_PMGR_I2C_TIMING_SCL_PERIOD_INIT 0x0000010E /* RWI-V */ +#define NV_PMGR_I2C_TIMING_SCL_PERIOD_100KHZ 0x000003E8 /* RW--V */ +#define NV_PMGR_I2C_TIMING_SCL_PERIOD_200KHZ 0x000001F4 /* RW--V */ +#define NV_PMGR_I2C_TIMING_SCL_PERIOD_300KHZ 0x0000014E /* RW--V */ +#define NV_PMGR_I2C_TIMING_SCL_PERIOD_400KHZ 0x00000103 /* RW--V */ +#define NV_PMGR_I2C_TIMING_SCL_PERIOD_500KHZ 0x000000C8 /* RW--V */ +#define NV_PMGR_I2C_TIMING_SCL_PERIOD_600KHZ 0x000000A7 /* RW--V */ +#define NV_PMGR_I2C_TIMING_SCL_PERIOD_800KHZ 0x0000007D /* RW--V */ +#define NV_PMGR_I2C_TIMING_SCL_PERIOD_1000KHZ 0x00000064 /* RW--V */ +#define NV_PMGR_I2C_TIMING_IGNORE_ACK 15:15 /* RWIVF */ +#define NV_PMGR_I2C_TIMING_IGNORE_ACK_INIT 0x00000000 /* RWI-V */ +#define NV_PMGR_I2C_TIMING_IGNORE_ACK_DISABLE 0x00000000 /* RW--V */ +#define NV_PMGR_I2C_TIMING_IGNORE_ACK_ENABLE 0x00000001 /* RW--V */ +#define NV_PMGR_I2C_TIMING_TIMEOUT_CLK_CNT 23:16 /* RWIVF */ +#define NV_PMGR_I2C_TIMING_TIMEOUT_CLK_CNT_INIT 0x00000001 /* R-I-V */ +#define NV_PMGR_I2C_TIMING_TIMEOUT_CHECK 24:24 /* RWIVF */ +#define NV_PMGR_I2C_TIMING_TIMEOUT_CHECK_INIT 0x00000000 /* RWI-V */ +#define NV_PMGR_I2C_TIMING_TIMEOUT_CHECK_DISABLE 0x00000000 /* RW--V */ +#define NV_PMGR_I2C_TIMING_TIMEOUT_CHECK_ENABLE 0x00000001 /* RW--V */ +#endif // __lr10_dev_pmgr_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_pri_ringmaster.h b/src/common/inc/swref/published/nvswitch/lr10/dev_pri_ringmaster.h new file mode 100644 index 000000000..83a9f4a38 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_pri_ringmaster.h @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_pri_ringmaster_h__ +#define __lr10_dev_pri_ringmaster_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PPRIV_MASTER_CG1 0x001200a8 /* RW-4R */ +#define NV_PPRIV_MASTER_CG1_SLCG 0:0 /* RWBVF */ +#define NV_PPRIV_MASTER_CG1_SLCG__PROD 0x00000000 /* RW--V */ +#define NV_PPRIV_MASTER_CG1_SLCG_ENABLED 0x00000000 /* RW--V */ +#define NV_PPRIV_MASTER_CG1_SLCG_DISABLED 0x00000001 /* RWB-V */ + +#define NV_PPRIV_MASTER_RING_COMMAND 0x0012004c /* RW-4R */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD 5:0 /* RWBVF */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_NO_CMD 0x00000000 /* RWB-V */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_START_RING 0x00000001 /* RW--T */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ACK_INTERRUPT 0x00000002 /* RW--T */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS 0x00000003 /* RW--T */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_AND_START_RING 0x00000004 /* RW--T */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_NO_TAG_ENUMERATE_AND_START_RING 0x00000005 /* RW--T */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP 9:6 /* RWBVF */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP_ALL 0x00000000 /* RWB-V */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP_GPC 0x00000001 /* RW--V */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP_FBP 0x00000002 /* RW--V */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP_SYS 0x00000003 /* RW--V */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP_ROP_L2 0x00000004 /* RW--V */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP_NO_TAG_ALL 0x00000008 /* RW--V */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP_NO_TAG_GPC 0x00000009 /* RW--V */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP_NO_TAG_FBP 0x0000000a /* RW--V */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP_NO_TAG_SYS 0x0000000b /* RW--V */ +#define NV_PPRIV_MASTER_RING_COMMAND_CMD_ENUMERATE_STATIONS_BC_GRP_NO_TAG_ROP_L2 0x0000000c /* RW--V */ +#define NV_PPRIV_MASTER_RING_COMMAND_DATA 0x00120048 /* RW-4R */ +#define NV_PPRIV_MASTER_RING_COMMAND_DATA_START_RING_SEED 7:0 /* RWBVF */ +#define NV_PPRIV_MASTER_RING_COMMAND_DATA_START_RING_SEED_INIT 0x00000053 /* RWB-V */ + +#define NV_PPRIV_MASTER_RING_START_RESULTS 0x00120050 /* R--4R */ +#define NV_PPRIV_MASTER_RING_START_RESULTS_CONNECTIVITY 0:0 /* R-BVF */ +#define NV_PPRIV_MASTER_RING_START_RESULTS_CONNECTIVITY_PASS 0x00000001 /* R---V */ +#define NV_PPRIV_MASTER_RING_START_RESULTS_CONNECTIVITY_FAIL 0x00000000 /* R-B-V */ + +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0 0x00120058 /* R--4R */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_GBL_WRITE_ERROR_FBP 31:16 /* R-BVF */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_GBL_WRITE_ERROR_FBP_V 0x00000000 /* R-B-V */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_GBL_WRITE_ERROR_SYS 8:8 /* R-BVF */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_GBL_WRITE_ERROR_SYS_V 0x00000000 /* R-B-V */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_RING_START_CONN_FAULT 0:0 /* R-BVF */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_RING_START_CONN_FAULT_V 0x00000000 /* R-B-V */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_DISCONNECT_FAULT 1:1 /* R-BVF */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_DISCONNECT_FAULT_V 0x00000000 /* R-B-V */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_OVERFLOW_FAULT 2:2 /* R-BVF */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_OVERFLOW_FAULT_V 0x00000000 /* R-B-V */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_RING_ENUMERATION_FAULT 3:3 /* R-BVF */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_RING_ENUMERATION_FAULT_V 0x00000000 /* R-B-V */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_GPC_RS_MAP_CONFIG_FAULT 4:4 /* R-BVF */ +#define NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0_GPC_RS_MAP_CONFIG_FAULT_V 0x00000000 /* R-B-V */ +#endif // __lr10_dev_pri_ringmaster_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_pri_ringstation_prt.h b/src/common/inc/swref/published/nvswitch/lr10/dev_pri_ringstation_prt.h new file mode 100644 index 000000000..39db5d7cc --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_pri_ringstation_prt.h @@ -0,0 +1,550 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_pri_ringstation_prt_h__ +#define __lr10_dev_pri_ringstation_prt_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PPRIV_PRT_PRT0_CG1 0x00128250 /* RW-4R */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_SLOWCLK 0:0 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_SLOWCLK_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_SLOWCLK_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_SLOWCLK__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_CONFIG_REGS 1:1 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_CONFIG_REGS_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_CONFIG_REGS_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_CONFIG_REGS__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_FUNNEL_DECODER 2:2 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_FUNNEL_DECODER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_FUNNEL_DECODER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_FUNNEL_DECODER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_FUNNEL_ARB 3:3 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_FUNNEL_ARB_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_FUNNEL_ARB_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_FUNNEL_ARB__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_HISTORY_BUFFER 4:4 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_HISTORY_BUFFER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_HISTORY_BUFFER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_HISTORY_BUFFER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_MASTER 5:5 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_MASTER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_MASTER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_MASTER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_SLAVE 6:6 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_SLAVE_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_SLAVE_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_SLAVE__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_UCODE_TRAP 7:7 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_UCODE_TRAP_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_UCODE_TRAP_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_UCODE_TRAP__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV 8:8 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_LOC_PRIV 9:9 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_LOC_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_LOC_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_LOC_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PM 10:10 /* RWBVF */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PM_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PM_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT0_CG1_SLCG_PM__PROD 0x0 /* RW--V */ + +#define NV_PPRIV_PRT_PRT1_CG1 0x00128a50 /* RW-4R */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_SLOWCLK 0:0 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_SLOWCLK_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_SLOWCLK_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_SLOWCLK__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_CONFIG_REGS 1:1 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_CONFIG_REGS_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_CONFIG_REGS_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_CONFIG_REGS__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_FUNNEL_DECODER 2:2 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_FUNNEL_DECODER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_FUNNEL_DECODER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_FUNNEL_DECODER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_FUNNEL_ARB 3:3 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_FUNNEL_ARB_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_FUNNEL_ARB_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_FUNNEL_ARB__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_HISTORY_BUFFER 4:4 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_HISTORY_BUFFER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_HISTORY_BUFFER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_HISTORY_BUFFER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_MASTER 5:5 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_MASTER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_MASTER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_MASTER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_SLAVE 6:6 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_SLAVE_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_SLAVE_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_SLAVE__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_UCODE_TRAP 7:7 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_UCODE_TRAP_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_UCODE_TRAP_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_UCODE_TRAP__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV 8:8 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_LOC_PRIV 9:9 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_LOC_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_LOC_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_LOC_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PM 10:10 /* RWBVF */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PM_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PM_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT1_CG1_SLCG_PM__PROD 0x0 /* RW--V */ + +#define NV_PPRIV_PRT_PRT2_CG1 0x00129250 /* RW-4R */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_SLOWCLK 0:0 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_SLOWCLK_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_SLOWCLK_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_SLOWCLK__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_CONFIG_REGS 1:1 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_CONFIG_REGS_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_CONFIG_REGS_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_CONFIG_REGS__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_FUNNEL_DECODER 2:2 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_FUNNEL_DECODER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_FUNNEL_DECODER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_FUNNEL_DECODER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_FUNNEL_ARB 3:3 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_FUNNEL_ARB_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_FUNNEL_ARB_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_FUNNEL_ARB__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_HISTORY_BUFFER 4:4 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_HISTORY_BUFFER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_HISTORY_BUFFER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_HISTORY_BUFFER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_MASTER 5:5 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_MASTER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_MASTER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_MASTER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_SLAVE 6:6 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_SLAVE_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_SLAVE_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_SLAVE__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_UCODE_TRAP 7:7 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_UCODE_TRAP_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_UCODE_TRAP_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_UCODE_TRAP__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV 8:8 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_LOC_PRIV 9:9 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_LOC_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_LOC_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_LOC_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PM 10:10 /* RWBVF */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PM_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PM_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT2_CG1_SLCG_PM__PROD 0x0 /* RW--V */ + +#define NV_PPRIV_PRT_PRT3_CG1 0x00129a50 /* RW-4R */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_SLOWCLK 0:0 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_SLOWCLK_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_SLOWCLK_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_SLOWCLK__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_CONFIG_REGS 1:1 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_CONFIG_REGS_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_CONFIG_REGS_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_CONFIG_REGS__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_FUNNEL_DECODER 2:2 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_FUNNEL_DECODER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_FUNNEL_DECODER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_FUNNEL_DECODER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_FUNNEL_ARB 3:3 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_FUNNEL_ARB_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_FUNNEL_ARB_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_FUNNEL_ARB__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_HISTORY_BUFFER 4:4 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_HISTORY_BUFFER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_HISTORY_BUFFER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_HISTORY_BUFFER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_MASTER 5:5 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_MASTER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_MASTER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_MASTER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_SLAVE 6:6 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_SLAVE_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_SLAVE_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_SLAVE__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_UCODE_TRAP 7:7 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_UCODE_TRAP_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_UCODE_TRAP_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_UCODE_TRAP__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV 8:8 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_LOC_PRIV 9:9 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_LOC_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_LOC_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_LOC_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PM 10:10 /* RWBVF */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PM_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PM_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT3_CG1_SLCG_PM__PROD 0x0 /* RW--V */ + +#define NV_PPRIV_PRT_PRT4_CG1 0x0012a250 /* RW-4R */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_SLOWCLK 0:0 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_SLOWCLK_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_SLOWCLK_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_SLOWCLK__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_CONFIG_REGS 1:1 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_CONFIG_REGS_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_CONFIG_REGS_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_CONFIG_REGS__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_FUNNEL_DECODER 2:2 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_FUNNEL_DECODER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_FUNNEL_DECODER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_FUNNEL_DECODER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_FUNNEL_ARB 3:3 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_FUNNEL_ARB_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_FUNNEL_ARB_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_FUNNEL_ARB__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_HISTORY_BUFFER 4:4 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_HISTORY_BUFFER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_HISTORY_BUFFER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_HISTORY_BUFFER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_MASTER 5:5 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_MASTER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_MASTER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_MASTER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_SLAVE 6:6 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_SLAVE_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_SLAVE_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_SLAVE__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_UCODE_TRAP 7:7 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_UCODE_TRAP_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_UCODE_TRAP_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_UCODE_TRAP__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV 8:8 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_LOC_PRIV 9:9 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_LOC_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_LOC_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_LOC_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PM 10:10 /* RWBVF */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PM_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PM_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT4_CG1_SLCG_PM__PROD 0x0 /* RW--V */ + +#define NV_PPRIV_PRT_PRT5_CG1 0x0012aa50 /* RW-4R */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_SLOWCLK 0:0 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_SLOWCLK_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_SLOWCLK_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_SLOWCLK__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_CONFIG_REGS 1:1 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_CONFIG_REGS_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_CONFIG_REGS_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_CONFIG_REGS__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_FUNNEL_DECODER 2:2 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_FUNNEL_DECODER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_FUNNEL_DECODER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_FUNNEL_DECODER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_FUNNEL_ARB 3:3 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_FUNNEL_ARB_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_FUNNEL_ARB_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_FUNNEL_ARB__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_HISTORY_BUFFER 4:4 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_HISTORY_BUFFER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_HISTORY_BUFFER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_HISTORY_BUFFER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_MASTER 5:5 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_MASTER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_MASTER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_MASTER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_SLAVE 6:6 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_SLAVE_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_SLAVE_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_SLAVE__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_UCODE_TRAP 7:7 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_UCODE_TRAP_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_UCODE_TRAP_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_UCODE_TRAP__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV 8:8 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_LOC_PRIV 9:9 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_LOC_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_LOC_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_LOC_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PM 10:10 /* RWBVF */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PM_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PM_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT5_CG1_SLCG_PM__PROD 0x0 /* RW--V */ + +#define NV_PPRIV_PRT_PRT6_CG1 0x0012b250 /* RW-4R */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_SLOWCLK 0:0 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_SLOWCLK_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_SLOWCLK_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_SLOWCLK__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_CONFIG_REGS 1:1 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_CONFIG_REGS_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_CONFIG_REGS_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_CONFIG_REGS__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_FUNNEL_DECODER 2:2 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_FUNNEL_DECODER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_FUNNEL_DECODER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_FUNNEL_DECODER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_FUNNEL_ARB 3:3 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_FUNNEL_ARB_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_FUNNEL_ARB_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_FUNNEL_ARB__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_HISTORY_BUFFER 4:4 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_HISTORY_BUFFER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_HISTORY_BUFFER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_HISTORY_BUFFER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_MASTER 5:5 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_MASTER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_MASTER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_MASTER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_SLAVE 6:6 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_SLAVE_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_SLAVE_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_SLAVE__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_UCODE_TRAP 7:7 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_UCODE_TRAP_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_UCODE_TRAP_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_UCODE_TRAP__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV 8:8 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_LOC_PRIV 9:9 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_LOC_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_LOC_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_LOC_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PM 10:10 /* RWBVF */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PM_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PM_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT6_CG1_SLCG_PM__PROD 0x0 /* RW--V */ + +#define NV_PPRIV_PRT_PRT7_CG1 0x0012ba50 /* RW-4R */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_SLOWCLK 0:0 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_SLOWCLK_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_SLOWCLK_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_SLOWCLK__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_CONFIG_REGS 1:1 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_CONFIG_REGS_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_CONFIG_REGS_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_CONFIG_REGS__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_FUNNEL_DECODER 2:2 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_FUNNEL_DECODER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_FUNNEL_DECODER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_FUNNEL_DECODER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_FUNNEL_ARB 3:3 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_FUNNEL_ARB_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_FUNNEL_ARB_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_FUNNEL_ARB__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_HISTORY_BUFFER 4:4 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_HISTORY_BUFFER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_HISTORY_BUFFER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_HISTORY_BUFFER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_MASTER 5:5 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_MASTER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_MASTER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_MASTER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_SLAVE 6:6 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_SLAVE_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_SLAVE_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_SLAVE__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_UCODE_TRAP 7:7 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_UCODE_TRAP_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_UCODE_TRAP_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_UCODE_TRAP__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV 8:8 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_LOC_PRIV 9:9 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_LOC_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_LOC_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_LOC_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PM 10:10 /* RWBVF */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PM_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PM_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT7_CG1_SLCG_PM__PROD 0x0 /* RW--V */ + +#define NV_PPRIV_PRT_PRT8_CG1 0x0012c250 /* RW-4R */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_SLOWCLK 0:0 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_SLOWCLK_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_SLOWCLK_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_SLOWCLK__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_CONFIG_REGS 1:1 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_CONFIG_REGS_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_CONFIG_REGS_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_CONFIG_REGS__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_FUNNEL_DECODER 2:2 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_FUNNEL_DECODER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_FUNNEL_DECODER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_FUNNEL_DECODER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_FUNNEL_ARB 3:3 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_FUNNEL_ARB_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_FUNNEL_ARB_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_FUNNEL_ARB__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_HISTORY_BUFFER 4:4 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_HISTORY_BUFFER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_HISTORY_BUFFER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_HISTORY_BUFFER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_MASTER 5:5 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_MASTER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_MASTER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_MASTER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_SLAVE 6:6 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_SLAVE_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_SLAVE_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_SLAVE__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_UCODE_TRAP 7:7 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_UCODE_TRAP_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_UCODE_TRAP_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_UCODE_TRAP__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV 8:8 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_LOC_PRIV 9:9 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_LOC_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_LOC_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_LOC_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PM 10:10 /* RWBVF */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PM_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PM_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_PRT8_CG1_SLCG_PM__PROD 0x0 /* RW--V */ + +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_ADR 0x00128120 /* R--4R */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_ADR_ADDRESS 25:0 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_ADR_ADDRESS_I 0x0000000 /* R-B-V */ + +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_ADR_MOD 0x0012811c /* R--4R */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_ADR_MOD_ADDRESS 25:0 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_ADR_MOD_ADDRESS_I 0x0000000 /* R-B-V */ + +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_ADR 0x00128920 /* R--4R */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_ADR_ADDRESS 25:0 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_ADR_ADDRESS_I 0x0000000 /* R-B-V */ + +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_ADR_MOD 0x0012891c /* R--4R */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_ADR_MOD_ADDRESS 25:0 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_ADR_MOD_ADDRESS_I 0x0000000 /* R-B-V */ + +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_WRDAT 0x00128124 /* R--4R */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_WRDAT_DATA 31:0 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_WRDAT_DATA_I 0x00000000 /* R-B-V */ + +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_WRDAT 0x00128924 /* R--4R */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_WRDAT_DATA 31:0 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_WRDAT_DATA_I 0x00000000 /* R-B-V */ + +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO 0x00128128 /* R--4R */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_SUBID 31:24 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_SUBID_I 0x00 /* R-B-V */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_LOCAL_ORDERING 22:22 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_LOCAL_ORDERING_I 0x0 /* R-B-V */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_PRIV_LEVEL 21:20 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_PRIV_LEVEL_I 0x0 /* R-B-V */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_SENDING_RS 17:12 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_SENDING_RS_I 0x00 /* R-B-V */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_PENDING 9:9 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_PENDING_I 0x0 /* R-B-V */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_ORPHAN 8:8 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_ORPHAN_I 0x0 /* R-B-V */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_PRIV_MASTER 7:0 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO_PRIV_MASTER_I 0x00 /* R-B-V */ + +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO 0x00128928 /* R--4R */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_SUBID 31:24 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_SUBID_I 0x00 /* R-B-V */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_LOCAL_ORDERING 22:22 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_LOCAL_ORDERING_I 0x0 /* R-B-V */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_PRIV_LEVEL 21:20 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_PRIV_LEVEL_I 0x0 /* R-B-V */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_SENDING_RS 17:12 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_SENDING_RS_I 0x00 /* R-B-V */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_PENDING 9:9 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_PENDING_I 0x0 /* R-B-V */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_ORPHAN 8:8 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_ORPHAN_I 0x0 /* R-B-V */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_PRIV_MASTER 7:0 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO_PRIV_MASTER_I 0x00 /* R-B-V */ + +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_CODE 0x0012812c /* R--4R */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_CODE_VALUE 31:0 /* R-BVF */ +#define NV_PPRIV_PRT_PRT0_PRIV_ERROR_CODE_VALUE_I 0x00000000 /* R-B-V */ + +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_CODE 0x0012892c /* R--4R */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_CODE_VALUE 31:0 /* R-BVF */ +#define NV_PPRIV_PRT_PRT1_PRIV_ERROR_CODE_VALUE_I 0x00000000 /* R-B-V */ + +#define NV_PPRIV_PRT_CG1 0x00000250 /* RW-4R */ +#define NV_PPRIV_PRT_CG1_SLCG_SLOWCLK 0:0 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_SLOWCLK_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_SLOWCLK_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_SLOWCLK__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_CONFIG_REGS 1:1 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_CONFIG_REGS_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_CONFIG_REGS_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_CONFIG_REGS__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_FUNNEL_DECODER 2:2 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_FUNNEL_DECODER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_FUNNEL_DECODER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_FUNNEL_DECODER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_FUNNEL_ARB 3:3 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_FUNNEL_ARB_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_FUNNEL_ARB_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_FUNNEL_ARB__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_HISTORY_BUFFER 4:4 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_HISTORY_BUFFER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_HISTORY_BUFFER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_HISTORY_BUFFER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_MASTER 5:5 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_MASTER_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_MASTER_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_MASTER__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_SLAVE 6:6 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_SLAVE_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_SLAVE_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_SLAVE__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_UCODE_TRAP 7:7 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_UCODE_TRAP_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_UCODE_TRAP_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_UCODE_TRAP__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV 8:8 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_LOC_PRIV 9:9 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_LOC_PRIV_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_LOC_PRIV_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_LOC_PRIV__PROD 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PM 10:10 /* RWBVF */ +#define NV_PPRIV_PRT_CG1_SLCG_PM_ENABLED 0x0 /* RW--V */ +#define NV_PPRIV_PRT_CG1_SLCG_PM_DISABLED 0x1 /* RWB-V */ +#define NV_PPRIV_PRT_CG1_SLCG_PM__PROD 0x0 /* RW--V */ +#endif // __lr10_dev_pri_ringstation_prt_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_pri_ringstation_sys.h b/src/common/inc/swref/published/nvswitch/lr10/dev_pri_ringstation_sys.h new file mode 100644 index 000000000..b737a3fb6 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_pri_ringstation_sys.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_pri_ringstation_sys_h__ +#define __lr10_dev_pri_ringstation_sys_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PPRIV_SYS_PRIV_ERROR_ADR 0x00122120 /* R--4R */ +#define NV_PPRIV_SYS_PRIV_ERROR_ADR_ADDRESS 25:0 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_ADR_ADDRESS_I 0x00000000 /* R-B-V */ +#define NV_PPRIV_SYS_PRIV_ERROR_ADR_MOD 0x0012211c /* R--4R */ +#define NV_PPRIV_SYS_PRIV_ERROR_ADR_MOD_ADDRESS 25:0 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_ADR_MOD_ADDRESS_I 0x00000000 /* R-B-V */ + +#define NV_PPRIV_SYS_PRIV_ERROR_WRDAT 0x00122124 /* R--4R */ +#define NV_PPRIV_SYS_PRIV_ERROR_WRDAT_DATA 31:0 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_WRDAT_DATA_I 0x00000000 /* R-B-V */ + +#define NV_PPRIV_SYS_PRIV_ERROR_INFO 0x00122128 /* R--4R */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_SUBID 31:24 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_SUBID_I 0x00000000 /* R-B-V */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_LOCAL_ORDERING 22:22 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_LOCAL_ORDERING_I 0x00000000 /* R-B-V */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_PRIV_LEVEL 21:20 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_PRIV_LEVEL_I 0x00000000 /* R-B-V */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_SENDING_RS 17:12 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_SENDING_RS_I 0x00000000 /* R-B-V */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_PENDING 9:9 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_PENDING_I 0x00000000 /* R-B-V */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_ORPHAN 8:8 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_ORPHAN_I 0x00000000 /* R-B-V */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_PRIV_MASTER (8-1):0 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_INFO_PRIV_MASTER_I 0x00000000 /* R-B-V */ + +#define NV_PPRIV_SYS_PRIV_ERROR_CODE 0x0012212c /* R--4R */ +#define NV_PPRIV_SYS_PRIV_ERROR_CODE_VALUE 31:0 /* R-BVF */ +#define NV_PPRIV_SYS_PRIV_ERROR_CODE_VALUE_I 0x00000000 /* R-B-V */ +#endif // __lr10_dev_pri_ringstation_sys_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_route_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_route_ip.h new file mode 100644 index 000000000..cf65f4c2b --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_route_ip.h @@ -0,0 +1,639 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_route_ip_h__ +#define __lr10_dev_route_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_ROUTE_ERR_GLT_ECC_ERROR_ADDRESS 0x00005488 /* RW-4R */ +#define NV_ROUTE_ERR_GLT_ECC_ERROR_ADDRESS_ERROR_ADDRESS 8:0 /* RWIVF */ +#define NV_ROUTE_ERR_GLT_ECC_ERROR_ADDRESS_ERROR_ADDRESS_INIT 0x00000000 /* RWI-V */ + +#define NV_ROUTE_ERR_GLT_ECC_ERROR_ADDRESS_VALID 0x0000548c /* R--4R */ +#define NV_ROUTE_ERR_GLT_ECC_ERROR_ADDRESS_VALID_VALID 0:0 /* R-DVF */ +#define NV_ROUTE_ERR_GLT_ECC_ERROR_ADDRESS_VALID_VALID_INVALID 0x00000000 /* R-D-V */ +#define NV_ROUTE_ERR_GLT_ECC_ERROR_ADDRESS_VALID_VALID_VALID 0x00000001 /* R---V */ + +#define NV_ROUTE_ERR_LOG_EN_0 0x00005404 /* RW-4R */ +#define NV_ROUTE_ERR_LOG_EN_0_ROUTEBUFERR 0:0 /* RWEVF */ +#define NV_ROUTE_ERR_LOG_EN_0_ROUTEBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_ROUTEBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_LOG_EN_0_ROUTEBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_NOPORTDEFINEDERR 1:1 /* RWEVF */ +#define NV_ROUTE_ERR_LOG_EN_0_NOPORTDEFINEDERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_NOPORTDEFINEDERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_LOG_EN_0_NOPORTDEFINEDERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_INVALIDROUTEPOLICYERR 2:2 /* RWEVF */ +#define NV_ROUTE_ERR_LOG_EN_0_INVALIDROUTEPOLICYERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_INVALIDROUTEPOLICYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_LOG_EN_0_INVALIDROUTEPOLICYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_GLT_ECC_LIMIT_ERR 3:3 /* RWEVF */ +#define NV_ROUTE_ERR_LOG_EN_0_GLT_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_GLT_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_LOG_EN_0_GLT_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_GLT_ECC_DBE_ERR 4:4 /* RWEVF */ +#define NV_ROUTE_ERR_LOG_EN_0_GLT_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_GLT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_LOG_EN_0_GLT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_TRANSDONERESVERR 5:5 /* RWEVF */ +#define NV_ROUTE_ERR_LOG_EN_0_TRANSDONERESVERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_TRANSDONERESVERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_LOG_EN_0_TRANSDONERESVERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_PDCTRLPARERR 6:6 /* RWEVF */ +#define NV_ROUTE_ERR_LOG_EN_0_PDCTRLPARERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_PDCTRLPARERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_LOG_EN_0_PDCTRLPARERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_NVS_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_ROUTE_ERR_LOG_EN_0_NVS_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_NVS_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_LOG_EN_0_NVS_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_NVS_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_ROUTE_ERR_LOG_EN_0_NVS_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_NVS_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_LOG_EN_0_NVS_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_CDTPARERR 9:9 /* RWEVF */ +#define NV_ROUTE_ERR_LOG_EN_0_CDTPARERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_LOG_EN_0_CDTPARERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_LOG_EN_0_CDTPARERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0 0x00005410 /* RW-4R */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_ROUTEBUFERR 0:0 /* RWEVF */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_ROUTEBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_ROUTEBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_ROUTEBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NOPORTDEFINEDERR 1:1 /* RWEVF */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NOPORTDEFINEDERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NOPORTDEFINEDERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NOPORTDEFINEDERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_INVALIDROUTEPOLICYERR 2:2 /* RWEVF */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_INVALIDROUTEPOLICYERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_INVALIDROUTEPOLICYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_INVALIDROUTEPOLICYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_GLT_ECC_LIMIT_ERR 3:3 /* RWEVF */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_GLT_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_GLT_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_GLT_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_GLT_ECC_DBE_ERR 4:4 /* RWEVF */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_GLT_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_GLT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_GLT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_TRANSDONERESVERR 5:5 /* RWEVF */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_TRANSDONERESVERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_TRANSDONERESVERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_TRANSDONERESVERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_PDCTRLPARERR 6:6 /* RWEVF */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_PDCTRLPARERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_PDCTRLPARERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_PDCTRLPARERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NVS_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NVS_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NVS_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NVS_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NVS_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NVS_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NVS_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_NVS_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_CDTPARERR 9:9 /* RWEVF */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_CDTPARERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_CDTPARERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CORRECTABLE_REPORT_EN_0_CDTPARERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_ROUTE_ERR_TIMESTAMP_LOG 0x00005450 /* R--4R */ +#define NV_ROUTE_ERR_TIMESTAMP_LOG_TIMESTAMP 23:0 /* R-IVF */ +#define NV_ROUTE_ERR_TIMESTAMP_LOG_TIMESTAMP_INIT 0x00000000 /* R-I-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_VALID 0x0000544c /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_VALID_HEADERVALID0 0:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_VALID_HEADERVALID0_INVALID 0x00000000 /* R-D-V */ +#define NV_ROUTE_ERR_HEADER_LOG_VALID_HEADERVALID0_VALID 0x00000001 /* R---V */ + +#define NV_ROUTE_ERR_MISC_LOG_0 0x00005454 /* R--4R */ +#define NV_ROUTE_ERR_MISC_LOG_0_SPORT 5:0 /* R-IVF */ +#define NV_ROUTE_ERR_MISC_LOG_0_SPORT_INIT 0x00000000 /* R-I-V */ +#define NV_ROUTE_ERR_MISC_LOG_0_ENCODEDVC 10:8 /* R-IVF */ +#define NV_ROUTE_ERR_MISC_LOG_0_ENCODEDVC_INIT 0x00000000 /* R-I-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_0 0x00005420 /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_0_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_0_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_1 0x00005424 /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_1_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_1_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_2 0x00005428 /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_2_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_2_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_3 0x0000542c /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_3_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_3_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_4 0x00005430 /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_4_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_4_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_5 0x00005434 /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_5_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_5_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_6 0x00005438 /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_6_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_6_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_7 0x0000543c /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_7_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_7_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_8 0x00005440 /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_8_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_8_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_9 0x00005444 /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_9_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_9_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_HEADER_LOG_10 0x00005448 /* R--4R */ +#define NV_ROUTE_ERR_HEADER_LOG_10_DW 31:0 /* R-DVF */ +#define NV_ROUTE_ERR_HEADER_LOG_10_DW_INIT 0x00000000 /* R-D-V */ + +#define NV_ROUTE_ERR_STATUS_0 0x00005400 /* RW-4R */ +#define NV_ROUTE_ERR_STATUS_0_ROUTEBUFERR 0:0 /* RWDVF */ +#define NV_ROUTE_ERR_STATUS_0_ROUTEBUFERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_STATUS_0_ROUTEBUFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_STATUS_0_NOPORTDEFINEDERR 1:1 /* RWDVF */ +#define NV_ROUTE_ERR_STATUS_0_NOPORTDEFINEDERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_STATUS_0_NOPORTDEFINEDERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_STATUS_0_INVALIDROUTEPOLICYERR 2:2 /* RWDVF */ +#define NV_ROUTE_ERR_STATUS_0_INVALIDROUTEPOLICYERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_STATUS_0_INVALIDROUTEPOLICYERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_STATUS_0_GLT_ECC_LIMIT_ERR 3:3 /* RWDVF */ +#define NV_ROUTE_ERR_STATUS_0_GLT_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_STATUS_0_GLT_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_STATUS_0_GLT_ECC_DBE_ERR 4:4 /* RWDVF */ +#define NV_ROUTE_ERR_STATUS_0_GLT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_STATUS_0_GLT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_STATUS_0_TRANSDONERESVERR 5:5 /* RWDVF */ +#define NV_ROUTE_ERR_STATUS_0_TRANSDONERESVERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_STATUS_0_TRANSDONERESVERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_STATUS_0_PDCTRLPARERR 6:6 /* RWDVF */ +#define NV_ROUTE_ERR_STATUS_0_PDCTRLPARERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_STATUS_0_PDCTRLPARERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_STATUS_0_NVS_ECC_LIMIT_ERR 7:7 /* RWDVF */ +#define NV_ROUTE_ERR_STATUS_0_NVS_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_STATUS_0_NVS_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_STATUS_0_NVS_ECC_DBE_ERR 8:8 /* RWDVF */ +#define NV_ROUTE_ERR_STATUS_0_NVS_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_STATUS_0_NVS_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_STATUS_0_CDTPARERR 9:9 /* RWDVF */ +#define NV_ROUTE_ERR_STATUS_0_CDTPARERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_STATUS_0_CDTPARERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_ROUTE_ERR_FIRST_0 0x0000541c /* RW-4R */ +#define NV_ROUTE_ERR_FIRST_0_ROUTEBUFERR 0:0 /* RWDVF */ +#define NV_ROUTE_ERR_FIRST_0_ROUTEBUFERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_FIRST_0_ROUTEBUFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FIRST_0_NOPORTDEFINEDERR 1:1 /* RWDVF */ +#define NV_ROUTE_ERR_FIRST_0_NOPORTDEFINEDERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_FIRST_0_NOPORTDEFINEDERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FIRST_0_INVALIDROUTEPOLICYERR 2:2 /* RWDVF */ +#define NV_ROUTE_ERR_FIRST_0_INVALIDROUTEPOLICYERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_FIRST_0_INVALIDROUTEPOLICYERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FIRST_0_GLT_ECC_LIMIT_ERR 3:3 /* RWDVF */ +#define NV_ROUTE_ERR_FIRST_0_GLT_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_FIRST_0_GLT_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FIRST_0_GLT_ECC_DBE_ERR 4:4 /* RWDVF */ +#define NV_ROUTE_ERR_FIRST_0_GLT_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_FIRST_0_GLT_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FIRST_0_TRANSDONERESVERR 5:5 /* RWDVF */ +#define NV_ROUTE_ERR_FIRST_0_TRANSDONERESVERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_FIRST_0_TRANSDONERESVERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FIRST_0_PDCTRLPARERR 6:6 /* RWDVF */ +#define NV_ROUTE_ERR_FIRST_0_PDCTRLPARERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_FIRST_0_PDCTRLPARERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FIRST_0_NVS_ECC_LIMIT_ERR 7:7 /* RWDVF */ +#define NV_ROUTE_ERR_FIRST_0_NVS_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_FIRST_0_NVS_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FIRST_0_NVS_ECC_DBE_ERR 8:8 /* RWDVF */ +#define NV_ROUTE_ERR_FIRST_0_NVS_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_FIRST_0_NVS_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FIRST_0_CDTPARERR 9:9 /* RWDVF */ +#define NV_ROUTE_ERR_FIRST_0_CDTPARERR_NONE 0x00000000 /* RWD-V */ +#define NV_ROUTE_ERR_FIRST_0_CDTPARERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_ROUTE_ERR_CONTAIN_EN_0 0x00005414 /* RW-4R */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_ROUTEBUFERR 0:0 /* RWEVF */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_ROUTEBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_ROUTEBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_ROUTEBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NOPORTDEFINEDERR 1:1 /* RWEVF */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NOPORTDEFINEDERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NOPORTDEFINEDERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NOPORTDEFINEDERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_INVALIDROUTEPOLICYERR 2:2 /* RWEVF */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_INVALIDROUTEPOLICYERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_INVALIDROUTEPOLICYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_INVALIDROUTEPOLICYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_GLT_ECC_LIMIT_ERR 3:3 /* RWEVF */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_GLT_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_GLT_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_GLT_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_GLT_ECC_DBE_ERR 4:4 /* RWEVF */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_GLT_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_GLT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_GLT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_TRANSDONERESVERR 5:5 /* RWEVF */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_TRANSDONERESVERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_TRANSDONERESVERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_TRANSDONERESVERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_PDCTRLPARERR 6:6 /* RWEVF */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_PDCTRLPARERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_PDCTRLPARERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_PDCTRLPARERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NVS_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NVS_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NVS_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NVS_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NVS_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NVS_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NVS_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_NVS_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_CDTPARERR 9:9 /* RWEVF */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_CDTPARERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_CDTPARERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_CONTAIN_EN_0_CDTPARERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_ROUTE_REG_TABLE_ADDRESS 0x00005080 /* RW-4R */ +#define NV_ROUTE_REG_TABLE_ADDRESS_INDEX 8:0 /* RWEVF */ +#define NV_ROUTE_REG_TABLE_ADDRESS_INDEX_MIN 0x00000000 /* RWE-V */ +#define NV_ROUTE_REG_TABLE_ADDRESS_INDEX_GLTAB_DEPTH 0x000001ff /* RW--V */ +#define NV_ROUTE_REG_TABLE_ADDRESS_AUTO_INCR 31:31 /* RWEVF */ +#define NV_ROUTE_REG_TABLE_ADDRESS_AUTO_INCR_DISABLE 0x00000000 /* RW--V */ +#define NV_ROUTE_REG_TABLE_ADDRESS_AUTO_INCR_ENABLE 0x00000001 /* RWE-V */ + +#define NV_ROUTE_REG_TABLE_DATA0 0x00005090 /* RW-4R */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_0 3:0 /* RWEVF */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_0_INIT 0x00000000 /* RWE-V */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_1 7:4 /* RWEVF */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_1_INIT 0x00000000 /* RWE-V */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_2 11:8 /* RWEVF */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_2_INIT 0x00000000 /* RWE-V */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_3 15:12 /* RWEVF */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_3_INIT 0x00000000 /* RWE-V */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_4 19:16 /* RWEVF */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_4_INIT 0x00000000 /* RWE-V */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_5 23:20 /* RWEVF */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_5_INIT 0x00000000 /* RWE-V */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_6 27:24 /* RWEVF */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_6_INIT 0x00000000 /* RWE-V */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_7 31:28 /* RWEVF */ +#define NV_ROUTE_REG_TABLE_DATA0_GLX_7_INIT 0x00000000 /* RWE-V */ + +#define NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER 0x00005490 /* RW-4R */ +#define NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWIVF */ +#define NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWI-V */ + +#define NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER_LIMIT 0x00005494 /* RW-4R */ +#define NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWEVF */ +#define NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWE-V */ +#define NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_ROUTE_ERR_ECC_CTRL 0x00005470 /* RW-4R */ +#define NV_ROUTE_ERR_ECC_CTRL_GLT_ECC_ENABLE 0:0 /* RWEVF */ +#define NV_ROUTE_ERR_ECC_CTRL_GLT_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_ECC_CTRL_GLT_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_ECC_CTRL_GLT_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_ECC_CTRL_NVS_ECC_ENABLE 2:2 /* RWEVF */ +#define NV_ROUTE_ERR_ECC_CTRL_NVS_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_ECC_CTRL_NVS_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_ECC_CTRL_NVS_ECC_ENABLE__PROD 0x00000001 /* RW--V */ + +#define NV_ROUTE_CMD_ROUTE_TABLE0 0x000050a0 /* RW-4R */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN0 1:0 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN0_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN0_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN0_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN0_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN1 5:4 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN1_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN1_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN1_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN1_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN2 9:8 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN2_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN2_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN2_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN2_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN3 13:12 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN3_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN3_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN3_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN3_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN4 17:16 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN4_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN4_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN4_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN4_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN5 21:20 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN5_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN5_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN5_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN5_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN6 25:24 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN6_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN6_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN6_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN6_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN7 29:28 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN7_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN7_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN7_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE0_RFUN7_ALTERNATE 0x00000003 /* RW--V */ + +#define NV_ROUTE_CMD_ROUTE_TABLE1 0x000050a4 /* RW-4R */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN8 1:0 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN8_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN8_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN8_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN8_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN9 5:4 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN9_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN9_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN9_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN9_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN10 9:8 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN10_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN10_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN10_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN10_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN11 13:12 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN11_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN11_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN11_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN11_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN12 17:16 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN12_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN12_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN12_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN12_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN13 21:20 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN13_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN13_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN13_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN13_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN14 25:24 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN14_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN14_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN14_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN14_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN15 29:28 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN15_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN15_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN15_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE1_RFUN15_ALTERNATE 0x00000003 /* RW--V */ + +#define NV_ROUTE_CMD_ROUTE_TABLE2 0x000050a8 /* RW-4R */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN16 1:0 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN16_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN16_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN16_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN16_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN17 5:4 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN17_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN17_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN17_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN17_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN18 9:8 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN18_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN18_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN18_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN18_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN19 13:12 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN19_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN19_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN19_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN19_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN20 17:16 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN20_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN20_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN20_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN20_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN21 21:20 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN21_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN21_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN21_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN21_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN22 25:24 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN22_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN22_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN22_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN22_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN23 29:28 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN23_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN23_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN23_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE2_RFUN23_ALTERNATE 0x00000003 /* RW--V */ + +#define NV_ROUTE_CMD_ROUTE_TABLE3 0x000050ac /* RW-4R */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN24 1:0 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN24_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN24_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN24_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN24_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN25 5:4 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN25_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN25_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN25_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN25_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN26 9:8 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN26_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN26_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN26_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN26_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN27 13:12 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN27_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN27_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN27_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN27_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN28 17:16 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN28_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN28_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN28_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN28_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN29 21:20 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN29_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN29_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN29_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN29_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN30 25:24 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN30_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN30_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN30_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN30_ALTERNATE 0x00000003 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN31 29:28 /* RWEVF */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN31_SPRAY 0x00000000 /* RWE-V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN31_FIXED 0x00000001 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN31_RANDOM 0x00000002 /* RW--V */ +#define NV_ROUTE_CMD_ROUTE_TABLE3_RFUN31_ALTERNATE 0x00000003 /* RW--V */ + +#define NV_ROUTE_ROUTE_CONTROL 0x00005040 /* RW-4R */ +#define NV_ROUTE_ROUTE_CONTROL_SWECCENB 0:0 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_SWECCENB_HWGEN 0x00000000 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_SWECCENB_SWGEN 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_ECCWRITEBACKENB 1:1 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_ECCWRITEBACKENB_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_ECCWRITEBACKENB_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_URRESPERRVCMAP 3:2 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_URRESPERRVCMAP_SAME 0x00000000 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_URRESPERRVCMAP_INVERT 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_URRESPERRVCMAP_ALWAYS0 0x00000002 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_URRESPERRVCMAP_ALWAYS1 0x00000003 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_DEBUGENB 4:4 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_DEBUGENB_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_DEBUGENB_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_REQPORTDBGMODE 6:5 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_REQPORTDBGMODE_ERROR 0x00000000 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_REQPORTDBGMODE_ERROR2 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_REQPORTDBGMODE_ADDRESS 0x00000002 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_REQPORTDBGMODE_DBGPORT 0x00000003 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_VCDBGMODE 8:7 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_VCDBGMODE_SAME 0x00000000 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_VCDBGMODE_INVERT 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_VCDBGMODE_ALWAYS0 0x00000002 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_VCDBGMODE_ALWAYS1 0x00000003 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_RSPPORTDBGMODE 10:9 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_RSPPORTDBGMODE_ERROR 0x00000000 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_RSPPORTDBGMODE_ERROR2 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_RSPPORTDBGMODE_ADDRESS 0x00000002 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_RSPPORTDBGMODE_DBGPORT 0x00000003 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_URRESPENB 11:11 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_URRESPENB_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_URRESPENB_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_URRESPENB__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_KILLURTD 12:12 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_KILLURTD_DISABLE 0x00000000 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_KILLURTD_ENABLE 0x00000001 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_TRANSDONERESVOVERRIDE 13:13 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_TRANSDONERESVOVERRIDE_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_TRANSDONERESVOVERRIDE_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_TRANSDONERESVENABLE 14:14 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_TRANSDONERESVENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_TRANSDONERESVENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_PASSPING 15:15 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_PASSPING_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_PASSPING_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_HOPINNXBAR 19:19 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_HOPINNXBAR_DISABLE 0x00000000 /* RW--V */ +#define NV_ROUTE_ROUTE_CONTROL_HOPINNXBAR_ENABLE 0x00000001 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_STOREANDFORWARD 24:24 /* RWEVF */ +#define NV_ROUTE_ROUTE_CONTROL_STOREANDFORWARD_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ROUTE_CONTROL_STOREANDFORWARD_ENABLE 0x00000001 /* RW--V */ + +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0 0x00005408 /* RW-4R */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_ROUTEBUFERR 0:0 /* RWEVF */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_ROUTEBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_ROUTEBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_ROUTEBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NOPORTDEFINEDERR 1:1 /* RWEVF */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NOPORTDEFINEDERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NOPORTDEFINEDERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NOPORTDEFINEDERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_INVALIDROUTEPOLICYERR 2:2 /* RWEVF */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_INVALIDROUTEPOLICYERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_INVALIDROUTEPOLICYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_INVALIDROUTEPOLICYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_GLT_ECC_LIMIT_ERR 3:3 /* RWEVF */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_GLT_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_GLT_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_GLT_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_GLT_ECC_DBE_ERR 4:4 /* RWEVF */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_GLT_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_GLT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_GLT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_TRANSDONERESVERR 5:5 /* RWEVF */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_TRANSDONERESVERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_TRANSDONERESVERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_TRANSDONERESVERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_PDCTRLPARERR 6:6 /* RWEVF */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_PDCTRLPARERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_PDCTRLPARERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_PDCTRLPARERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NVS_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NVS_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NVS_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NVS_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NVS_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NVS_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NVS_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_NVS_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_CDTPARERR 9:9 /* RWEVF */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_CDTPARERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_CDTPARERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_FATAL_REPORT_EN_0_CDTPARERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0 0x0000540c /* RW-4R */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_ROUTEBUFERR 0:0 /* RWEVF */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_ROUTEBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_ROUTEBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_ROUTEBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NOPORTDEFINEDERR 1:1 /* RWEVF */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NOPORTDEFINEDERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NOPORTDEFINEDERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NOPORTDEFINEDERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_INVALIDROUTEPOLICYERR 2:2 /* RWEVF */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_INVALIDROUTEPOLICYERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_INVALIDROUTEPOLICYERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_INVALIDROUTEPOLICYERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_GLT_ECC_LIMIT_ERR 3:3 /* RWEVF */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_GLT_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_GLT_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_GLT_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_GLT_ECC_DBE_ERR 4:4 /* RWEVF */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_GLT_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_GLT_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_GLT_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_TRANSDONERESVERR 5:5 /* RWEVF */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_TRANSDONERESVERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_TRANSDONERESVERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_TRANSDONERESVERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_PDCTRLPARERR 6:6 /* RWEVF */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_PDCTRLPARERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_PDCTRLPARERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_PDCTRLPARERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NVS_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NVS_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NVS_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NVS_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NVS_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NVS_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NVS_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_NVS_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_CDTPARERR 9:9 /* RWEVF */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_CDTPARERR__PROD 0x00000000 /* RW--V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_CDTPARERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_ROUTE_ERR_NON_FATAL_REPORT_EN_0_CDTPARERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_ROUTE_ERR_GLT_ECC_ERROR_COUNTER_LIMIT 0x00005484 /* RW-4R */ +#define NV_ROUTE_ERR_GLT_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWEVF */ +#define NV_ROUTE_ERR_GLT_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWE-V */ +#define NV_ROUTE_ERR_GLT_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ +#endif // __lr10_dev_route_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_route_ip_addendum.h b/src/common/inc/swref/published/nvswitch/lr10/dev_route_ip_addendum.h new file mode 100644 index 000000000..45f85f050 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_route_ip_addendum.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_route_ip_addendum_h__ +#define __lr10_dev_route_ip_addendum_h__ + +// NV_ROUTE_REG_TABLE_DATA0 definition in the manuals have no indexing. + +#define NV_ROUTE_REG_TABLE_DATA0_GLX(i) 4*(i)+3:4*(i)+0 + +#endif // __lr10_dev_route_ip_addendum_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_soe_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_soe_ip.h new file mode 100644 index 000000000..6bb1d563f --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_soe_ip.h @@ -0,0 +1,1117 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_soe_ip_h__ +#define __lr10_dev_soe_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_SOE_FALCON_EXTERRADDR 0x0168 /* R--4R */ +#define NV_SOE_FALCON_EXTERRADDR_ADDR 31:0 /* R--VF */ +#define NV_SOE_FALCON_EXTERRSTAT 0x016c /* RW-4R */ +#define NV_SOE_FALCON_EXTERRSTAT_PC 23:0 /* R--VF */ +#define NV_SOE_FALCON_EXTERRSTAT_STAT 27:24 /* R--VF */ +#define NV_SOE_FALCON_EXTERRSTAT_STAT_ACK_POS 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_EXTERRSTAT_STAT_ACK_TOUT 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_EXTERRSTAT_VALID 31:31 /* RWIVF */ +#define NV_SOE_FALCON_EXTERRSTAT_VALID_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_EXTERRSTAT_VALID_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_EXTERRSTAT_VALID_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_HWCFG1 0x012c /* R--4R */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV 3:0 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_INIT 0x00000006 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_1_0 0x00000001 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_2_0 0x00000002 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_3_0 0x00000003 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_4_0 0x00000004 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_5_0 0x00000005 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_6_0 0x00000006 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_7_0 0x00000007 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_SECURITY_MODEL 5:4 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_SECURITY_MODEL_INIT 0x00000003 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_SECURITY_MODEL_NONE 0x00000000 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_SECURITY_MODEL_LIGHT 0x00000002 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_SECURITY_MODEL_HEAVY 0x00000003 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_SUBVERSION 7:6 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_SUBVERSION_INIT 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_SUBVERSION_0 0x00000000 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_SUBVERSION_1 0x00000001 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_SUBVERSION_2 0x00000002 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CORE_REV_SUBVERSION_3 0x00000003 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_IMEM_PORTS 11:8 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_IMEM_PORTS_INIT 0x00000001 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_DMEM_PORTS 15:12 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_DMEM_PORTS_INIT 0x00000001 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_TAG_WIDTH 20:16 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_TAG_WIDTH_INIT 0x00000010 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_DMEM_TAG_WIDTH 25:21 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_DMEM_TAG_WIDTH_INIT 0x00000010 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_DBG_PRIV_BUS 27:27 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_DBG_PRIV_BUS_INIT 0x00000001 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_DBG_PRIV_BUS_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_DBG_PRIV_BUS_DISABLE 0x00000000 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CSB_SIZE_16M 28:28 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_CSB_SIZE_16M_INIT 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_CSB_SIZE_16M_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_CSB_SIZE_16M_FALSE 0x00000000 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_PRIV_DIRECT 29:29 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_PRIV_DIRECT_INIT 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_PRIV_DIRECT_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_PRIV_DIRECT_FALSE 0x00000000 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_DMEM_APERTURES 30:30 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_DMEM_APERTURES_INIT 0x00000001 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_DMEM_APERTURES_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_DMEM_APERTURES_DISABLE 0x00000000 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_IMEM_AUTOFILL 31:31 /* R-IVF */ +#define NV_SOE_FALCON_HWCFG1_IMEM_AUTOFILL_INIT 0x00000001 /* R-I-V */ +#define NV_SOE_FALCON_HWCFG1_IMEM_AUTOFILL_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_HWCFG1_IMEM_AUTOFILL_DISABLE 0x00000000 /* R---V */ +#define NV_SOE_HWCFG 0x0abc /* R--4R */ +#define NV_SOE_HWCFG_EMEM_SIZE 8:0 /* R--VF */ +#define NV_SOE_EMEMC(i) (0x0ac0+(i)*8) /* RW-4A */ +#define NV_SOE_EMEMC__SIZE_1 4 /* */ +#define NV_SOE_EMEMC_OFFS 7:2 /* RWIVF */ +#define NV_SOE_EMEMC_OFFS_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_EMEMC_BLK 15:8 /* RWIVF */ +#define NV_SOE_EMEMC_BLK_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_EMEMC_AINCW 24:24 /* RWIVF */ +#define NV_SOE_EMEMC_AINCW_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_EMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_EMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_EMEMC_AINCR 25:25 /* RWIVF */ +#define NV_SOE_EMEMC_AINCR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_EMEMC_AINCR_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_EMEMC_AINCR_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_EMEMD(i) (0x0ac4+(i)*8) /* RW-4A */ +#define NV_SOE_EMEMD__SIZE_1 4 /* */ +#define NV_SOE_EMEMD_DATA 31:0 /* RW-VF */ +#define NV_SOE_MSGQ_TAIL(i) (0x0c84+(i)*8) /* RW-4A */ +#define NV_SOE_MSGQ_TAIL__SIZE_1 8 /* */ +#define NV_SOE_MSGQ_TAIL_VAL 31:0 /* RWIUF */ +#define NV_SOE_MSGQ_TAIL_VAL_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_MSGQ_HEAD(i) (0x0c80+(i)*8) /* RW-4A */ +#define NV_SOE_MSGQ_HEAD__SIZE_1 8 /* */ +#define NV_SOE_MSGQ_HEAD_VAL 31:0 /* RWIUF */ +#define NV_SOE_MSGQ_HEAD_VAL_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_QUEUE_TAIL(i) (0x0c04+(i)*8) /* RW-4A */ +#define NV_SOE_QUEUE_TAIL__SIZE_1 8 /* */ +#define NV_SOE_QUEUE_TAIL_ADDRESS 31:0 /* RWIVF */ +#define NV_SOE_QUEUE_TAIL_ADDRESS_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_QUEUE_HEAD(i) (0x0c00+(i)*8) /* RW-4A */ +#define NV_SOE_QUEUE_HEAD__SIZE_1 8 /* */ +#define NV_SOE_QUEUE_HEAD_ADDRESS 31:0 /* RWIVF */ +#define NV_SOE_QUEUE_HEAD_ADDRESS_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FBIF_TRANSCFG(i) (0x0600+(i)*4) /* RW-4A */ +#define NV_SOE_FBIF_TRANSCFG__SIZE_1 8 /* */ +#define NV_SOE_FBIF_TRANSCFG_TARGET 1:0 /* RWIVF */ +#define NV_SOE_FBIF_TRANSCFG_TARGET_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FBIF_TRANSCFG_TARGET_LOCAL_FB 0x00000000 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_TARGET_COHERENT_SYSMEM 0x00000001 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_TARGET_NONCOHERENT_SYSMEM 0x00000002 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_MEM_TYPE 2:2 /* RWIVF */ +#define NV_SOE_FBIF_TRANSCFG_MEM_TYPE_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FBIF_TRANSCFG_MEM_TYPE_VIRTUAL 0x00000000 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_MEM_TYPE_PHYSICAL 0x00000001 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_L2C_WR 5:4 /* RWIVF */ +#define NV_SOE_FBIF_TRANSCFG_L2C_WR_INIT 0x00000001 /* RWI-V */ +#define NV_SOE_FBIF_TRANSCFG_L2C_WR_L2_EVICT_FIRST 0x00000000 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_L2C_WR_L2_EVICT_NORMAL 0x00000001 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_L2C_WR_L2_EVICT_LAST 0x00000002 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_L2C_RD 9:8 /* RWIVF */ +#define NV_SOE_FBIF_TRANSCFG_L2C_RD_INIT 0x00000001 /* RWI-V */ +#define NV_SOE_FBIF_TRANSCFG_L2C_RD_L2_EVICT_FIRST 0x00000000 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_L2C_RD_L2_EVICT_NORMAL 0x00000001 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_L2C_RD_L2_EVICT_LAST 0x00000002 /* R---V */ +#define NV_SOE_FBIF_TRANSCFG_WACHK0 12:12 /* RWIVF */ +#define NV_SOE_FBIF_TRANSCFG_WACHK0_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FBIF_TRANSCFG_WACHK0_ENABLE 0x00000001 /* RW--V */ +#define NV_SOE_FBIF_TRANSCFG_WACHK0_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FBIF_TRANSCFG_WACHK1 13:13 /* RWIVF */ +#define NV_SOE_FBIF_TRANSCFG_WACHK1_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FBIF_TRANSCFG_WACHK1_ENABLE 0x00000001 /* RW--V */ +#define NV_SOE_FBIF_TRANSCFG_WACHK1_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FBIF_TRANSCFG_RACHK0 14:14 /* RWIVF */ +#define NV_SOE_FBIF_TRANSCFG_RACHK0_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FBIF_TRANSCFG_RACHK0_ENABLE 0x00000001 /* RW--V */ +#define NV_SOE_FBIF_TRANSCFG_RACHK0_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FBIF_TRANSCFG_RACHK1 15:15 /* RWIVF */ +#define NV_SOE_FBIF_TRANSCFG_RACHK1_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FBIF_TRANSCFG_RACHK1_ENABLE 0x00000001 /* RW--V */ +#define NV_SOE_FBIF_TRANSCFG_RACHK1_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FBIF_TRANSCFG_ENGINE_ID_FLAG 16:16 /* RWIVF */ +#define NV_SOE_FBIF_TRANSCFG_ENGINE_ID_FLAG_BAR2_FN0 0x00000000 /* RWI-V */ +#define NV_SOE_FBIF_TRANSCFG_ENGINE_ID_FLAG_OWN 0x00000001 /* RW--V */ +#define NV_SOE_FBIF_CG1 0x0674 /* RW-4R */ +#define NV_SOE_FBIF_CG1_SLCG 3:0 /* */ +#define NV_SOE_FBIF_CG1_SLCG_ENABLED 0 /* */ +#define NV_SOE_FBIF_CG1_SLCG_DISABLED 0xF /* */ +#define NV_SOE_FBIF_CG1_SLCG__PROD 0x0 /* */ +#define NV_SOE_FBIF_CG1_SLCG_MSD0 0:0 /* RWIVF */ +#define NV_SOE_FBIF_CG1_SLCG_MSD0_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FBIF_CG1_SLCG_MSD0_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FBIF_CG1_SLCG_MSD1 1:1 /* RWIVF */ +#define NV_SOE_FBIF_CG1_SLCG_MSD1_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FBIF_CG1_SLCG_MSD1_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FBIF_CG1_SLCG_FB0 2:2 /* RWIVF */ +#define NV_SOE_FBIF_CG1_SLCG_FB0_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FBIF_CG1_SLCG_FB0_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FBIF_CG1_SLCG_FB1 3:3 /* RWIVF */ +#define NV_SOE_FBIF_CG1_SLCG_FB1_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FBIF_CG1_SLCG_FB1_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2 0x0134 /* RWI4R */ +#define NV_SOE_FALCON_CG2_SLCG 17:1 /* */ +#define NV_SOE_FALCON_CG2_SLCG_ENABLED 0 /* */ +#define NV_SOE_FALCON_CG2_SLCG_DISABLED 0x1FFFF /* */ +#define NV_SOE_FALCON_CG2_SLCG__PROD 0x10004 /* */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_DMA 1:1 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_DMA_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_DMA_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_GC6_SR_FSM 2:2 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_GC6_SR_FSM_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_GC6_SR_FSM_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_PIPE 3:3 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_PIPE_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_PIPE_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_DIV 4:4 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_DIV_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_DIV_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_ICD 5:5 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_ICD_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_ICD_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_CFG 6:6 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_CFG_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_CFG_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_CTXSW 7:7 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_CTXSW_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_CTXSW_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_PMB 8:8 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_PMB_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_PMB_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_RF 9:9 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_RF_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_RF_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_MUL 10:10 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_MUL_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_MUL_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_LDST 11:11 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_LDST_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_LDST_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_TSYNC 12:12 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_TSYNC_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_TSYNC_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_GPTMR 13:13 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_GPTMR_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_GPTMR_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_WDTMR 14:14 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_WDTMR_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_WDTMR_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_IRQSTAT 15:15 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_IRQSTAT_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_IRQSTAT_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_TOP 16:16 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_TOP_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_TOP_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FBIF 17:17 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FBIF_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FBIF_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_SHA 18:18 /* RWIVF */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_SHA_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CG2_SLCG_FALCON_SHA_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_MISC_CG1 0x083c /* RW-4R */ +#define NV_SOE_MISC_CG1_SLCG 31:0 /* RWIVF */ +#define NV_SOE_MISC_CG1_SLCG_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_MISC_CG1_SLCG_DISABLED 0x800000ff /* RWI-V */ +#define NV_SOE_MISC_CG1_SLCG__PROD 0x000000fe /* RW--V */ +#define NV_SOE_MISC_CG1_SLCG_FALCON 0:0 /* RWIVF */ +#define NV_SOE_MISC_CG1_SLCG_FALCON_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_MISC_CG1_SLCG_FALCON_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_MISC_CG1_SLCG_SCP 2:2 /* RWIVF */ +#define NV_SOE_MISC_CG1_SLCG_SCP_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_MISC_CG1_SLCG_SCP_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_MISC_CG1_SLCG_CSBMASTER 3:3 /* RWIVF */ +#define NV_SOE_MISC_CG1_SLCG_CSBMASTER_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_MISC_CG1_SLCG_CSBMASTER_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_MISC_CG1_SLCG_BAR0 4:4 /* RWIVF */ +#define NV_SOE_MISC_CG1_SLCG_BAR0_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_MISC_CG1_SLCG_BAR0_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_MISC_CG1_SLCG_MISC 5:5 /* RWIVF */ +#define NV_SOE_MISC_CG1_SLCG_MISC_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_MISC_CG1_SLCG_MISC_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_MISC_CG1_SLCG_TOP 31:31 /* RWIVF */ +#define NV_SOE_MISC_CG1_SLCG_TOP_ENABLED 0x00000000 /* RW--V */ +#define NV_SOE_MISC_CG1_SLCG_TOP_DISABLED 0x00000001 /* RWI-V */ +#define NV_SOE_MISC_TOP_CG 0x0840 /* RW-4R */ +#define NV_SOE_MISC_TOP_CG_IDLE_CG_DLY_CNT 5:0 /* RWEVF */ +#define NV_SOE_MISC_TOP_CG_IDLE_CG_DLY_CNT_INIT 0x0000001f /* RWE-V */ +#define NV_SOE_MISC_TOP_CG_IDLE_CG_DLY_CNT__PROD 0x00000002 /* RW--V */ +#define NV_SOE_FALCON_ENGINE 0x03c0 /* RW-4R */ +#define NV_SOE_FALCON_ENGINE_RESET 0:0 /* RWIVF */ +#define NV_SOE_FALCON_ENGINE_RESET_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_ENGINE_RESET_FALSE 0x00000000 /* RWI-V */ +#define NV_SOE_SCP_CTL_STAT 0x0408 /* R--4R */ +#define NV_SOE_SCP_CTL_STAT_DEBUG_MODE 20:20 /* R--VF */ +#define NV_SOE_SCP_CTL_STAT_DEBUG_MODE_DISABLED 0x00000000 /* R---V */ +#define NV_SOE_SCP_CTL_STAT_AES_SCC_DIS 2:2 /* R--VF */ +#define NV_SOE_SCP_CTL_STAT_AES_SCC_DIS_TRUE 0x00000001 /* R---V */ +#define NV_SOE_SCP_CTL_STAT_AES_SCC_DIS_FALSE 0x00000000 /* R---V */ +#define NV_SOE_SCP_CTL_STAT_HSMODE 1:1 /* R--VF */ +#define NV_SOE_SCP_CTL_STAT_HSMODE_TRUE 0x00000001 /* R---V */ +#define NV_SOE_SCP_CTL_STAT_HSMODE_FALSE 0x00000000 /* R---V */ +#define NV_SOE_SCP_CTL_STAT_SBOOT 0:0 /* R--VF */ +#define NV_SOE_SCP_CTL_STAT_SBOOT_TRUE 0x00000001 /* R---V */ +#define NV_SOE_SCP_CTL_STAT_SBOOT_FALSE 0x00000000 /* R---V */ +#define NV_SOE_FALCON_MAILBOX0 0x0040 /* RW-4R */ +#define NV_SOE_FALCON_MAILBOX0_DATA 31:0 /* RWIVF */ +#define NV_SOE_FALCON_MAILBOX0_DATA_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_MAILBOX1 0x0044 /* RW-4R */ +#define NV_SOE_FALCON_MAILBOX1_DATA 31:0 /* RWIVF */ +#define NV_SOE_FALCON_MAILBOX1_DATA_INIT 0x00000000 /* RWI-V */ +// check if all of these should be added +#define NV_SOE_FALCON_CPUCTL 0x0100 /* RW-4R */ +#define NV_SOE_FALCON_CPUCTL_IINVAL 0:0 /* -WXVF */ +#define NV_SOE_FALCON_CPUCTL_IINVAL_TRUE 0x00000001 /* -W--V */ +#define NV_SOE_FALCON_CPUCTL_IINVAL_FALSE 0x00000000 /* -W--V */ +#define NV_SOE_FALCON_CPUCTL_STARTCPU 1:1 /* -WXVF */ +#define NV_SOE_FALCON_CPUCTL_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_SOE_FALCON_CPUCTL_STARTCPU_FALSE 0x00000000 /* -W--V */ +#define NV_SOE_FALCON_CPUCTL_SRESET 2:2 /* -WXVF */ +#define NV_SOE_FALCON_CPUCTL_SRESET_TRUE 0x00000001 /* -W--V */ +#define NV_SOE_FALCON_CPUCTL_SRESET_FALSE 0x00000000 /* -W--V */ +#define NV_SOE_FALCON_CPUCTL_HRESET 3:3 /* -WXVF */ +#define NV_SOE_FALCON_CPUCTL_HRESET_TRUE 0x00000001 /* -W--V */ +#define NV_SOE_FALCON_CPUCTL_HRESET_FALSE 0x00000000 /* -W--V */ +#define NV_SOE_FALCON_CPUCTL_HALTED 4:4 /* R-XVF */ +#define NV_SOE_FALCON_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_CPUCTL_HALTED_FALSE 0x00000000 /* R---V */ +#define NV_SOE_FALCON_CPUCTL_STOPPED 5:5 /* R-XVF */ +#define NV_SOE_FALCON_CPUCTL_STOPPED_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_CPUCTL_STOPPED_FALSE 0x00000000 /* R---V */ +#define NV_SOE_FALCON_CPUCTL_ALIAS_EN 6:6 /* RWIVF */ +#define NV_SOE_FALCON_CPUCTL_ALIAS_EN_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_CPUCTL_ALIAS_EN_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_CPUCTL_ALIAS_EN_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_CPUCTL_ALIAS 0x0130 /* -W-4R */ +#define NV_SOE_FALCON_CPUCTL_ALIAS_STARTCPU 1:1 /* -WXVF */ +#define NV_SOE_FALCON_CPUCTL_ALIAS_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_SOE_FALCON_CPUCTL_ALIAS_STARTCPU_FALSE 0x00000000 /* -W--V */ + +#define NV_SOE_FALCON_DEBUGINFO 0x0094 /* RW-4R */ +#define NV_SOE_FALCON_DEBUGINFO_DATA 31:0 /* RWXVF */ +#define NV_SOE_FALCON_EXCI 0x00d0 /* R--4R */ +#define NV_SOE_FALCON_EXCI_EXPC 19:0 /* R-XVF */ +#define NV_SOE_FALCON_EXCI_EXCAUSE 24:20 /* R-XVF */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_TRAP0 0x00000000 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_TRAP1 0x00000001 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_TRAP2 0x00000002 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_TRAP3 0x00000003 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_ILL_INS 0x00000008 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_INV_INS 0x00000009 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_MISS_INS 0x0000000a /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_DHIT_INS 0x0000000b /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_SP_OVERFLOW 0x0000000d /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_BRKPT_INS 0x0000000f /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_DMEM_MISS_INS 0x00000010 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_DMEM_DHIT_INS 0x00000011 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_DMEM_PAFAULT_INS 0x00000012 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_DMEM_PERMISSION_INS 0x00000013 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_BROM_CALL_INS 0x00000015 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_KMEM_VIOLATION_INS 0x00000016 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXCAUSE_BMEM_PERMISSION_INS 0x00000017 /* R---V */ +#define NV_SOE_FALCON_EXCI_EXPC_HIGH 31:28 /* R-XVF */ +#define NV_SOE_FALCON_SCTL 0x0240 /* RW-4R */ +#define NV_SOE_FALCON_SCTL_LSMODE 0:0 /* RWIVF */ +#define NV_SOE_FALCON_SCTL_LSMODE_FALSE 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_SCTL_LSMODE_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_SCTL_HSMODE 1:1 /* R--VF */ +#define NV_SOE_FALCON_SCTL_HSMODE_FALSE 0x00000000 /* R---V */ +#define NV_SOE_FALCON_SCTL_HSMODE_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_SCTL_LSMODE_LEVEL 5:4 /* RWIVF */ +#define NV_SOE_FALCON_SCTL_LSMODE_LEVEL_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_SCTL_UCODE_LEVEL 5:4 /* */ +#define NV_SOE_FALCON_SCTL_UCODE_LEVEL_INIT 0 /* */ +#define NV_SOE_FALCON_SCTL_DEBUG_PRIV_LEVEL 9:8 /* RWIVF */ +#define NV_SOE_FALCON_SCTL_DEBUG_PRIV_LEVEL_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_SCTL_RESET_LVLM_EN 12:12 /* RWIVF */ +#define NV_SOE_FALCON_SCTL_RESET_LVLM_EN_TRUE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_SCTL_RESET_LVLM_EN_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_SCTL_STALLREQ_CLR_EN 13:13 /* RWIVF */ +#define NV_SOE_FALCON_SCTL_STALLREQ_CLR_EN_TRUE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_SCTL_STALLREQ_CLR_EN_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_SCTL_AUTH_EN 14:14 /* RWIVF */ +#define NV_SOE_FALCON_SCTL_AUTH_EN_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_SCTL_AUTH_EN_FALSE 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_SCTL1 0x0250 /* RW-4R */ +#define NV_SOE_FALCON_SCTL1_CSBLVL_MASK 1:0 /* RWIVF */ +#define NV_SOE_FALCON_SCTL1_CSBLVL_MASK_INIT 0x00000003 /* RWI-V */ +#define NV_SOE_FALCON_SCTL1_EXTLVL_MASK 3:2 /* RWIVF */ +#define NV_SOE_FALCON_SCTL1_EXTLVL_MASK_INIT 0x00000003 /* RWI-V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT 0x0850 /* R-I4R */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HUBIRQ0 0:0 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HUBIRQ0_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HUBIRQ0_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HUBIRQ1 1:1 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HUBIRQ1_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HUBIRQ1_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HOST_STALLINTR 2:2 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HOST_STALLINTR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HOST_STALLINTR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HOST_NOSTALLINTR 3:3 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HOST_NOSTALLINTR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_HOST_NOSTALLINTR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_DISP_INTR 4:4 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_DISP_INTR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_DISP_INTR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_PMGR_INTR 5:5 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_PMGR_INTR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_PMGR_INTR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_RESERVED 31:6 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_RESERVED_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQSTAT_RESERVED_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQMASK 0x085c /* R-I4R */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HUBIRQ0 0:0 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HUBIRQ0_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HUBIRQ0_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HUBIRQ1 1:1 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HUBIRQ1_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HUBIRQ1_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HOST_STALLINTR 2:2 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HOST_STALLINTR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HOST_STALLINTR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HOST_NOSTALLINTR 3:3 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HOST_NOSTALLINTR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_HOST_NOSTALLINTR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_DISP_INTR 4:4 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQMASK_DISP_INTR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_DISP_INTR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_PMGR_INTR 5:5 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQMASK_PMGR_INTR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_PMGR_INTR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_RESERVED 31:6 /* R-IVF */ +#define NV_SOE_MISC_EXTIO_IRQMASK_RESERVED_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_MISC_EXTIO_IRQMASK_RESERVED_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT 0x0008 /* R--4R */ +#define NV_SOE_FALCON_IRQSTAT_GPTMR 0:0 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_GPTMR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_GPTMR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_WDTMR 1:1 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_WDTMR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_WDTMR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_MTHD 2:2 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_MTHD_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_MTHD_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_CTXSW 3:3 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_CTXSW_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_CTXSW_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_HALT 4:4 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_HALT_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_HALT_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_EXTERR 5:5 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_EXTERR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_EXTERR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_SWGEN0 6:6 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_SWGEN0_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_SWGEN0_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_SWGEN1 7:7 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_SWGEN1_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_SWGEN1_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_EXT 15:8 /* */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ1 8:8 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ1_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ1_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ2 9:9 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ2_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ2_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ3 10:10 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ3_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ3_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ4 11:11 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ4_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ4_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ5 12:12 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ5_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ5_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ6 13:13 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ6_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ6_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ7 14:14 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ7_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ7_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ8 15:15 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ8_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_EXT_EXTIRQ8_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_DMA 16:16 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_DMA_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_DMA_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_SHA 17:17 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_SHA_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_SHA_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_MEMERR 18:18 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_MEMERR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_MEMERR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_FALCON 19:19 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_FALCON_TURE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_FALCON_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_RISCV 20:20 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_RISCV_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_RISCV_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_TRACE 21:21 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_TRACE_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_TRACE_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS 0x00fc /* R--4R */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_GPTMR 0:0 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_GPTMR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_GPTMR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_WDTMR 1:1 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_WDTMR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_WDTMR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_MTHD 2:2 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_MTHD_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_MTHD_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_CTXSW 3:3 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_CTXSW_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_CTXSW_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_HALT 4:4 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_HALT_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_HALT_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXTERR 5:5 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXTERR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXTERR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_SWGEN0 6:6 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_SWGEN0_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_SWGEN0_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_SWGEN1 7:7 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_SWGEN1_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_SWGEN1_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT 15:8 /* */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ1 8:8 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ1_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ1_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ2 9:9 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ2_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ2_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ3 10:10 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ3_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ3_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ4 11:11 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ4_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ4_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ5 12:12 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ5_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ5_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ6 13:13 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ6_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ6_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ7 14:14 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ7_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ7_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ8 15:15 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ8_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_EXT_EXTIRQ8_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_DMA 16:16 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_DMA_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_DMA_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_SHA 17:17 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_SHA_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_SHA_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_MEMERR 18:18 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_MEMERR_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_MEMERR_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_FALCON 19:19 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_FALCON_TURE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_FALCON_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_RISCV 20:20 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_RISCV_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_RISCV_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_TRACE 21:21 /* R-IVF */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_TRACE_TRUE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQSTAT_ALIAS_TRACE_FALSE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMODE 0x000c /* RW-4R */ +#define NV_SOE_FALCON_IRQMODE_LVL_GPTMR 0:0 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_GPTMR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_GPTMR_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_GPTMR_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_WDTMR 1:1 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_WDTMR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_WDTMR_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_WDTMR_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_MTHD 2:2 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_MTHD_INIT 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_MTHD_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_MTHD_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_CTXSW 3:3 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_CTXSW_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_CTXSW_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_CTXSW_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_HALT 4:4 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_HALT_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_HALT_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_HALT_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXTERR 5:5 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXTERR_INIT 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXTERR_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXTERR_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_SWGEN0 6:6 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_SWGEN0_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_SWGEN0_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_SWGEN0_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_SWGEN1 7:7 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_SWGEN1_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_SWGEN1_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_SWGEN1_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT 15:8 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_INIT 0x000000fc /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ1 8:8 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ1_TRUE 1 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ1_FALSE 0 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ2 9:9 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ2_TRUE 1 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ2_FALSE 0 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ3 10:10 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ3_TRUE 1 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ3_FALSE 0 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ4 11:11 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ4_TRUE 1 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ4_FALSE 0 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ5 12:12 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ5_TRUE 1 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ5_FALSE 0 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ6 13:13 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ6_TRUE 1 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ6_FALSE 0 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ7 14:14 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ7_TRUE 1 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ7_FALSE 0 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ8 15:15 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ8_TRUE 1 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_EXT_EXTIRQ8_FALSE 0 /* */ +#define NV_SOE_FALCON_IRQMODE_LVL_DMA 16:16 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_DMA_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_DMA_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_DMA_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_SHA 17:17 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_SHA_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_SHA_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_SHA_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_MEMERR 18:18 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_MEMERR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_MEMERR_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_MEMERR_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_FALCON 19:19 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_FALCON_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_FALCON_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_FALCON_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_RISCV 20:20 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_RISCV_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_RISCV_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_RISCV_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_TRACE 21:21 /* RWIVF */ +#define NV_SOE_FALCON_IRQMODE_LVL_TRACE_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQMODE_LVL_TRACE_TRUE 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQMODE_LVL_TRACE_FALSE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQMASK 0x0018 /* R--4R */ +#define NV_SOE_FALCON_IRQMASK_GPTMR 0:0 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_GPTMR_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_GPTMR_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_WDTMR 1:1 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_WDTMR_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_WDTMR_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_MTHD 2:2 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_MTHD_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_MTHD_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_CTXSW 3:3 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_CTXSW_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_CTXSW_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_HALT 4:4 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_HALT_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_HALT_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_EXTERR 5:5 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_EXTERR_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_EXTERR_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_SWGEN0 6:6 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_SWGEN0_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_SWGEN0_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_SWGEN1 7:7 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_SWGEN1_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_SWGEN1_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_EXT 15:8 /* */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ1 8:8 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ1_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ1_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ2 9:9 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ2_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ2_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ3 10:10 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ3_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ3_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ4 11:11 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ4_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ4_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ5 12:12 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ5_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ5_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ6 13:13 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ6_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ6_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ7 14:14 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ7_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ7_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ8 15:15 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ8_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_EXT_EXTIRQ8_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_DMA 16:16 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_DMA_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_DMA_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_SHA 17:17 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_SHA_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_SHA_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_MEMERR 18:18 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_MEMERR_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_MEMERR_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_FALCON 19:19 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_FALCON_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_FALCON_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_RISCV 20:20 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_RISCV_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_RISCV_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQMASK_TRACE 21:21 /* R-IVF */ +#define NV_SOE_FALCON_IRQMASK_TRACE_ENABLE 0x00000001 /* R---V */ +#define NV_SOE_FALCON_IRQMASK_TRACE_DISABLE 0x00000000 /* R-I-V */ +#define NV_SOE_FALCON_IRQDEST 0x001c /* RW-4R */ +#define NV_SOE_FALCON_IRQDEST_HOST_GPTMR 0:0 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_GPTMR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_GPTMR_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_GPTMR_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_WDTMR 1:1 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_WDTMR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_WDTMR_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_WDTMR_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_MTHD 2:2 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_MTHD_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_MTHD_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_MTHD_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_CTXSW 3:3 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_CTXSW_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_CTXSW_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_CTXSW_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_HALT 4:4 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_HALT_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_HALT_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_HALT_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXTERR 5:5 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXTERR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXTERR_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXTERR_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_SWGEN0 6:6 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_SWGEN0_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_SWGEN0_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_SWGEN0_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_SWGEN1 7:7 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_SWGEN1_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_SWGEN1_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_SWGEN1_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT 15:8 /* */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ1 8:8 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ1_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ1_FALCON 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ2 9:9 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ2_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ2_FALCON 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ3 10:10 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ3_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ3_FALCON 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ4 11:11 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ4_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ4_FALCON 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ5 12:12 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ5_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ5_FALCON 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ6 13:13 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ6_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ6_FALCON 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ7 14:14 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ7_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ7_FALCON 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ8 15:15 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ8_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_HOST_EXT_EXTIRQ8_FALCON 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_GPTMR 16:16 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_GPTMR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_GPTMR_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_GPTMR_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_GPTMR_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_GPTMR_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_WDTMR 17:17 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_WDTMR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_WDTMR_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_WDTMR_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_WDTMR_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_WDTMR_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_MTHD 18:18 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_MTHD_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_MTHD_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_MTHD_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_MTHD_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_MTHD_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_CTXSW 19:19 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_CTXSW_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_CTXSW_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_CTXSW_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_CTXSW_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_CTXSW_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_HALT 20:20 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_HALT_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_HALT_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_HALT_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_HALT_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_HALT_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXTERR 21:21 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXTERR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXTERR_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXTERR_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXTERR_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXTERR_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN0 22:22 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN0_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN0_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN0_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN0_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN0_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN1 23:23 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN1_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN1_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN1_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN1_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_SWGEN1_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT 31:24 /* */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1 24:24 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ1_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2 25:25 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ2_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3 26:26 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ3_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4 27:27 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ4_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5 28:28 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ5_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6 29:29 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ6_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7 30:30 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ7_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8 31:31 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST_TARGET_EXT_EXTIRQ8_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2 0x003c /* RW-4R */ +#define NV_SOE_FALCON_IRQDEST2_HOST_DMA 0:0 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_HOST_DMA_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_DMA_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_DMA_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_SHA 1:1 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_HOST_SHA_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_SHA_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_SHA_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_MEMERR 2:2 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_HOST_MEMERR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_MEMERR_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_MEMERR_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_FALCON 3:3 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_HOST_FALCON_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_FALCON_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_FALCON_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_RISCV 4:4 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_HOST_RISCV_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_RISCV_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_RISCV_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_TRACE 5:5 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_HOST_TRACE_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_TRACE_FALCON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_HOST_TRACE_HOST 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_DMA 16:16 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_DMA_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_DMA_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_DMA_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_DMA_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_DMA_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_SHA 17:17 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_SHA_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_SHA_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_SHA_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_SHA_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_SHA_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_MEMERR 18:18 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_MEMERR_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_MEMERR_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_MEMERR_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_MEMERR_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_MEMERR_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_FALCON 19:19 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_FALCON_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_FALCON_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_FALCON_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_FALCON_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_FALCON_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_RISCV 20:20 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_RISCV_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_RISCV_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_RISCV_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_RISCV_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_RISCV_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_TRACE 21:21 /* RWIVF */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_TRACE_INIT 0x00000000 /* RWI-V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_TRACE_FALCON_IRQ0 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_TRACE_FALCON_IRQ1 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_TRACE_HOST_NORMAL 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQDEST2_TARGET_TRACE_HOST_NONSTALL 0x00000001 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK 0x00e0 /* RW-4R */ +#define NV_SOE_FALCON_IRQSCMASK_GPTMR 0:0 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_GPTMR_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_GPTMR_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_WDTMR 1:1 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_WDTMR_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_WDTMR_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_MTHD 2:2 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_MTHD_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_MTHD_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_CTXSW 3:3 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_CTXSW_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_CTXSW_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_HALT 4:4 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_HALT_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_HALT_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_EXTERR 5:5 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_EXTERR_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_EXTERR_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_SWGEN0 6:6 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_SWGEN0_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_SWGEN0_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_SWGEN1 7:7 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_SWGEN1_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_SWGEN1_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_EXT 15:8 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_ENABLE 0x000000ff /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ1 8:8 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ1_ENABLE 1 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ1_DISABLE 0 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ2 9:9 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ2_ENABLE 1 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ2_DISABLE 0 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ3 10:10 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ3_ENABLE 1 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ3_DISABLE 0 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ4 11:11 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ4_ENABLE 1 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ4_DISABLE 0 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ5 12:12 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ5_ENABLE 1 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ5_DISABLE 0 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ6 13:13 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ6_ENABLE 1 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ6_DISABLE 0 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ7 14:14 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ7_ENABLE 1 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ7_DISABLE 0 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ8 15:15 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ8_ENABLE 1 /* */ +#define NV_SOE_FALCON_IRQSCMASK_EXT_EXTIRQ8_DISABLE 0 /* */ +#define NV_SOE_FALCON_IRQSCMASK_DMA 16:16 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_DMA_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_DMA_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_SHA 17:17 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_SHA_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_SHA_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_MEMERR 18:18 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_MEMERR_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_MEMERR_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_FALCON 19:19 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_FALCON_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_FALCON_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_RISCV 20:20 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_RISCV_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_RISCV_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQSCMASK_TRACE 21:21 /* RWIVF */ +#define NV_SOE_FALCON_IRQSCMASK_TRACE_ENABLE 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQSCMASK_TRACE_DISABLE 0x00000000 /* RW--V */ +#define NV_SOE_MAILBOX(i) (0x0804+(i)*4) /* RW-4A */ +#define NV_SOE_MAILBOX__SIZE_1 4 /* */ +#define NV_SOE_MAILBOX_DATA 31:0 /* RWIVF */ +#define NV_SOE_MAILBOX_DATA_INIT 0x00000000 /* RWI-V */ + +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK 0x0290 /* RWI4R */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION 2:0 /* RWIVF */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_ALL_LEVELS_ENABLED 0x00000007 /* RWI-V */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_ALL_LEVELS_DISABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0 0:0 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1 1:1 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2 2:2 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_VIOLATION 3:3 /* RWIVF */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_READ_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION 6:4 /* RWIVF */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_ALL_LEVELS_ENABLED 0x00000007 /* RWI-V */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_ALL_LEVELS_DISABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0 4:4 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1 5:5 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2 6:6 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_VIOLATION 7:7 /* RWIVF */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK_WRITE_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ + +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK 0x028c /* RWI4R */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION 2:0 /* RWIVF */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_ALL_LEVELS_ENABLED 0x00000007 /* RWI-V */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_ALL_LEVELS_DISABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0 0:0 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1 1:1 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2 2:2 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_VIOLATION 3:3 /* RWIVF */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_READ_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION 6:4 /* RWIVF */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_ALL_LEVELS_ENABLED 0x00000007 /* RWI-V */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_ALL_LEVELS_DISABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0 4:4 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1 5:5 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2 6:6 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_VIOLATION 7:7 /* RWIVF */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK_WRITE_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ + +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK 0x03c4 /* RWI4R */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION 2:0 /* RWIVF */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_ALL_LEVELS_ENABLED 0x00000007 /* RWI-V */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_ENABLED 0x00000004 /* RW--V */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_ALL_LEVELS_DISABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0 0:0 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1 1:1 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2 2:2 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_VIOLATION 3:3 /* RWIVF */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_READ_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION 6:4 /* RWIVF */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_ALL_LEVELS_ENABLED 0x00000007 /* RWI-V */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_ENABLED 0x00000004 /* RW--V */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_ALL_LEVELS_DISABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0 4:4 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1 5:5 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2 6:6 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_VIOLATION 7:7 /* RWIVF */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK_WRITE_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ + +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK 0x027c /* RWI4R */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION 2:0 /* RWIVF */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_ALL_LEVELS_ENABLED 0x00000007 /* RWI-V */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_ALL_LEVELS_DISABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0 0:0 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1 1:1 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2 2:2 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_VIOLATION 3:3 /* RWIVF */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_READ_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION 6:4 /* RWIVF */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_ALL_LEVELS_ENABLED 0x00000007 /* RWI-V */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_ALL_LEVELS_DISABLED 0x00000000 /* RW--V */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0 4:4 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1 5:5 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL1_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2 6:6 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_ENABLE 0x00000001 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL2_DISABLE 0x00000000 /* */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_VIOLATION 7:7 /* RWIVF */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_VIOLATION_REPORT_ERROR 0x00000001 /* RWI-V */ +#define NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK_WRITE_VIOLATION_SOLDIER_ON 0x00000000 /* RW--V */ +#endif // __lr10_dev_soe_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_soe_ip_addendum.h b/src/common/inc/swref/published/nvswitch/lr10/dev_soe_ip_addendum.h new file mode 100644 index 000000000..f0c6fe8fe --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_soe_ip_addendum.h @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __lr10_dev_soe_ip_addendum_h__ +#define __lr10_dev_soe_ip_addendum_h__ + +// +// The detail description about each of the mutex can be found in +// /drivers/resman/arch/nvalloc/common/inc/nv_mutex.h +// +// Enums in the following file will also need to be updated: +// /drivers/nvswitch/common/inc/soemutexreservation.h +// +#define NV_SOE_MUTEX_DEFINES \ + NV_MUTEX_ID_SOE_EMEM_ACCESS, \ + +#define NV_SOE_EMEM_ACCESS_PORT_NVSWITCH (0) +#define NV_SOE_EMEM_ACCESS_PORT_NVWATCH (1) +#define UNUSED_EMEM_ACCESS_PORT_2 (2) +#define UNUSED_EMEM_ACCESS_PORT_3 (3) + +#define NUM_SAW_ENGINE 1 +#define NUM_NVLINK_ENGINE 9 + +#define NUM_TLC_ENGINE 36 +#define NUM_NVLIPT_LNK_ENGINE 36 + +#define NV_PRIV_OFFSET_NVLSAW 0x00028000 + +#define NV_GFW_SOE_EXIT_AND_HALT (NV_PRIV_OFFSET_NVLSAW + NV_NVLSAW_SW_SCRATCH_15) +#define NV_GFW_SOE_EXIT_AND_HALT_REQUESTED 0:0 +#define NV_GFW_SOE_EXIT_AND_HALT_REQUESTED_YES 0x1 +#define NV_GFW_SOE_EXIT_AND_HALT_REQUESTED_NO 0x0 +#define NV_GFW_SOE_EXIT_AND_HALT_TIMEOUT 150000 //150ms + +#define NV_GFW_SOE_BOOT (NV_PRIV_OFFSET_NVLSAW + NV_NVLSAW_SW_SCRATCH_3) +#define NV_GFW_SOE_BOOT_PROGRESS 7:0 +#define NV_GFW_SOE_BOOT_PROGRESS_NOT_STARTED 0x00000000 +#define NV_GFW_SOE_BOOT_PROGRESS_STARTED 0x00000001 +#define NV_GFW_SOE_BOOT_PROGRESS_SECURE_DATA_VERIFY_DONE 0x00000002 +#define NV_GFW_SOE_BOOT_PROGRESS_SYSTEM_VALIDITY_DONE 0x00000003 +#define NV_GFW_SOE_BOOT_PROGRESS_PRELTSSM_OVERRIDES_DONE 0x00000004 +#define NV_GFW_SOE_BOOT_PROGRESS_REPORT_INFOROM_CARVEOUT_DONE 0x00000005 +#define NV_GFW_SOE_BOOT_PROGRESS_REPORT_ROMDIR_DONE 0x00000006 +#define NV_GFW_SOE_BOOT_PROGRESS_REPORT_ERASE_LEDGER_DONE 0x00000007 +#define NV_GFW_SOE_BOOT_PROGRESS_FW_SECURITY_INIT_DONE 0x00000008 +#define NV_GFW_SOE_BOOT_PROGRESS_POSTLTSSM_OVERRIDES_DONE 0x00000009 +#define NV_GFW_SOE_BOOT_PROGRESS_IMAGE_VERIFY_DONE 0x0000000A +#define NV_GFW_SOE_BOOT_PROGRESS_COMPLETED 0x000000FF + +#define NV_GFW_SOE_BOOT_VALIDATION_STATUS 10:8 +#define NV_GFW_SOE_BOOT_VALIDATION_STATUS_UNSUPPORTED 0x00000000 +#define NV_GFW_SOE_BOOT_VALIDATION_STATUS_IN_PROGRESS 0x00000001 +#define NV_GFW_SOE_BOOT_VALIDATION_STATUS_PASS_NO_TRUST 0x00000002 +#define NV_GFW_SOE_BOOT_VALIDATION_STATUS_PASS_TRUSTED 0x00000003 +#define NV_GFW_SOE_BOOT_VALIDATION_STATUS_FAIL 0x00000004 +#define NV_GFW_SOE_BOOT_VALIDATION_STATUS_PASS_UNTRUSTED 0x00000005 +#define NV_GFW_SOE_BOOT_VALIDATION_STATUS_WARN_NO_TRUST 0x00000006 +#define NV_GFW_SOE_BOOT_VALIDATION_STATUS_WARN_TRUSTED 0x00000007 + +#define NV_GFW_SOE_PROGRESS_CODE (NV_PRIV_OFFSET_NVLSAW + NV_NVLSAW_SW_SCRATCH_13) +#define NV_GFW_SOE_PROGRESS_CODE_VALUE 3:0 +#define NV_GFW_SOE_PROGRESS_CODE_VALUE_NOT_STARTED 0x00000000 +#define NV_GFW_SOE_PROGRESS_CODE_VALUE_STARTED 0x00000001 +#define NV_GFW_SOE_PROGRESS_CODE_VALUE_EXIT 0x00000002 +#define NV_GFW_SOE_PROGRESS_CODE_VALUE_EXIT_SECUREMODE 0x00000003 +#define NV_GFW_SOE_PROGRESS_CODE_VALUE_ABORTED 0x00000004 +#define NV_GFW_SOE_PROGRESS_CODE_VALUE_COMPLETED 0x00000005 + +#define NV_SOE_RESET_SEQUENCE (NV_PRIV_OFFSET_NVLSAW + NV_NVLSAW_SW_SCRATCH_15) +#define NV_SOE_RESET_SEQUENCE_REQUESTED 0:0 +#define NV_SOE_RESET_SEQUENCE_REQUESTED_YES 0x00000001 +#define NV_SOE_RESET_SEQUENCE_REQUESTED_NO 0x00000000 + +#define NUM_NPG_ENGINE 9 +#define NUM_NPG_BCAST_ENGINE 1 +#define NUM_NPORT_ENGINE 36 +#define NUM_NPORT_MULTICAST_BCAST_ENGINE 1 +#define NUM_NXBAR_ENGINE 4 +#define NUM_NXBAR_BCAST_ENGINE 1 +#define NUM_TILE_ENGINE 16 +#define NUM_TILE_MULTICAST_BCAST_ENGINE 1 +#define NUM_NVLW_ENGINE 9 +#define NUM_BUS_ENGINE 1 + +#define NUM_GIN_ENGINE 0 +#define NUM_SYS_PRI_HUB 0 +#define NUM_PRI_MASTER_RS 0 +#define NUM_MINION_ENGINE NUM_NVLW_ENGINE + + +#endif // __lr10_dev_soe_ip_addendum_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_sourcetrack_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_sourcetrack_ip.h new file mode 100644 index 000000000..0b49e5218 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_sourcetrack_ip.h @@ -0,0 +1,290 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_sourcetrack_ip_h__ +#define __lr10_dev_sourcetrack_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS 0x000064a8 /* R--4R */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS_ERROR_ADDRESS 9:0 /* R-DVF */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS_ERROR_ADDRESS_INIT 0x00000000 /* R-D-V */ + +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID 0x000064ac /* R--4R */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID_VALID 0:0 /* R-DVF */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID_VALID_INVALID 0x00000000 /* R-D-V */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID_VALID_VALID 0x00000001 /* R---V */ + +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS 0x00006488 /* R--4R */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_ERROR_ADDRESS 9:0 /* R-DVF */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_ERROR_ADDRESS_INIT 0x00000000 /* R-D-V */ + +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID 0x0000648c /* R--4R */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID_VALID 0:0 /* R-DVF */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID_VALID_INVALID 0x00000000 /* R-D-V */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID_VALID_VALID 0x00000001 /* R---V */ + +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT 0x00006494 /* RW-4R */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_SOURCETRACK_ERR_LOG_EN_0 0x00006404 /* RW-4R */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR 0:0 /* RWEVF */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR 5:5 /* RWEVF */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR 13:13 /* RWEVF */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR 15:15 /* RWEVF */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_SOURCETRACK_TIME_OUT_ERR 28:28 /* RWEVF */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_SOURCETRACK_TIME_OUT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_SOURCETRACK_TIME_OUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_LOG_EN_0_SOURCETRACK_TIME_OUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0 0x00006410 /* RW-4R */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR 0:0 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR 5:5 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR 13:13 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR 15:15 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR 28:28 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CORRECTABLE_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0 0x00006414 /* RW-4R */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR 0:0 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR 5:5 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR 13:13 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR 15:15 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_SOURCETRACK_TIME_OUT_ERR 28:28 /* RWEVF */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_SOURCETRACK_TIME_OUT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_SOURCETRACK_TIME_OUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_CONTAIN_EN_0_SOURCETRACK_TIME_OUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_SOURCETRACK_ERR_STATUS_0 0x00006400 /* RW-4R */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR 0:0 /* RWDVF */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR 5:5 /* RWDVF */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR 7:7 /* RWDVF */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR 8:8 /* RWDVF */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR 13:13 /* RWDVF */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR 15:15 /* RWDVF */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_STATUS_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_STATUS_0_SOURCETRACK_TIME_OUT_ERR 28:28 /* RWDVF */ +#define NV_SOURCETRACK_ERR_STATUS_0_SOURCETRACK_TIME_OUT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_STATUS_0_SOURCETRACK_TIME_OUT_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_SOURCETRACK_ERR_FIRST_0 0x0000641c /* RW-4R */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR 0:0 /* RWDVF */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR 5:5 /* RWDVF */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR 7:7 /* RWDVF */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR 8:8 /* RWDVF */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR 13:13 /* RWDVF */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR 15:15 /* RWDVF */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_FIRST_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FIRST_0_SOURCETRACK_TIME_OUT_ERR 28:28 /* RWDVF */ +#define NV_SOURCETRACK_ERR_FIRST_0_SOURCETRACK_TIME_OUT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_SOURCETRACK_ERR_FIRST_0_SOURCETRACK_TIME_OUT_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER 0x00006480 /* RW-4R */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWDVF */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT 0x00006484 /* RW-4R */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER 0x000064a0 /* RW-4R */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWDVF */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT 0x000064a4 /* RW-4R */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_SOURCETRACK_ERR_ECC_CTRL 0x00006470 /* RW-4R */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN0_CRUMBSTORE_ECC_ENABLE 0:0 /* RWEVF */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN0_CRUMBSTORE_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN0_CRUMBSTORE_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN0_CRUMBSTORE_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ENABLE 5:5 /* RWEVF */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN1_CRUMBSTORE_ECC_ENABLE 7:7 /* RWEVF */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN1_CRUMBSTORE_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN1_CRUMBSTORE_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_ECC_CTRL_CREQ_TCEN1_CRUMBSTORE_ECC_ENABLE__PROD 0x00000001 /* RW--V */ + +#define NV_SOURCETRACK_CTRL 0x00006040 /* RW-4R */ +#define NV_SOURCETRACK_CTRL_COL_RSP_DIS 0:0 /* RWEVF */ +#define NV_SOURCETRACK_CTRL_COL_RSP_DIS_OFF 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_CTRL_COL_RSP_DIS_ON 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_CTRL_STO_ENB 9:9 /* RWEVF */ +#define NV_SOURCETRACK_CTRL_STO_ENB_ON 0x00000001 /* RWE-V */ +#define NV_SOURCETRACK_CTRL_STO_ENB_OFF 0x00000000 /* RW--V */ + +#define NV_SOURCETRACK_MULTISEC_TIMER0 0x00006044 /* RW-4R */ +#define NV_SOURCETRACK_MULTISEC_TIMER0_TIMERVAL0 19:0 /* RWEVF */ +#define NV_SOURCETRACK_MULTISEC_TIMER0_TIMERVAL0_INIT 0x00002710 /* RWE-V */ +#define NV_SOURCETRACK_MULTISEC_TIMER0_TIMERVAL0__PROD 0x00004000 /* RW--V */ + +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0 0x00006408 /* RW-4R */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR 0:0 /* RWEVF */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR 5:5 /* RWEVF */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR 13:13 /* RWEVF */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR 15:15 /* RWEVF */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR 28:28 /* RWEVF */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_FATAL_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0 0x0000640c /* RW-4R */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR 0:0 /* RWEVF */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR 5:5 /* RWEVF */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR 13:13 /* RWEVF */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR 15:15 /* RWEVF */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR 28:28 /* RWEVF */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_SOURCETRACK_ERR_NON_FATAL_REPORT_EN_0_SOURCETRACK_TIME_OUT_ERR_ENABLE 0x00000001 /* RW--V */ +#endif // __lr10_dev_sourcetrack_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_therm.h b/src/common/inc/swref/published/nvswitch/lr10/dev_therm.h new file mode 100644 index 000000000..c543428a4 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_therm.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_therm_h__ +#define __lr10_dev_therm_h__ +/* This file is autogenerated. Do not edit */ +#define NV_THERM_TSENSE_THRESHOLD_TEMPERATURES 0x00066f44 /* RW-4R */ +#define NV_THERM_TSENSE_THRESHOLD_TEMPERATURES_WARNING_TEMPERATURE 13:0 /* RWIVF */ +#define NV_THERM_TSENSE_THRESHOLD_TEMPERATURES_WARNING_TEMPERATURE_INIT 0x00000e60 /* RWI-V */ +#define NV_THERM_TSENSE_THRESHOLD_TEMPERATURES_OVERTEMP_TEMPERATURE 29:16 /* RWIVF */ +#define NV_THERM_TSENSE_THRESHOLD_TEMPERATURES_OVERTEMP_TEMPERATURE_INIT 0x000012c0 /* RWI-V */ + +#define NV_THERM_TSENSE_U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS 0x00066f28 /* RW-4R */ +#define NV_THERM_TSENSE_U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS_TEMPERATURE_OFFSET 13:0 /* RWIVF */ +#define NV_THERM_TSENSE_U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS_TEMPERATURE_OFFSET_INIT 0x00000000 /* RWI-V */ +#define NV_THERM_TSENSE_U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS_TEMPERATURE_OVERRIDE 29:16 /* RWIVF */ +#define NV_THERM_TSENSE_U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS_TEMPERATURE_OVERRIDE_INIT 0x00000000 /* RWI-V */ +#define NV_THERM_TSENSE_U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS_TEMPERATURE_OVERRIDE_EN 31:31 /* RWIVF */ +#define NV_THERM_TSENSE_U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS_TEMPERATURE_OVERRIDE_EN_INIT 0x00000000 /* RWI-V */ + +#define NV_THERM_TSENSE_MAXIMUM_TEMPERATURE 0x00066f40 /* R--4R */ +#define NV_THERM_TSENSE_MAXIMUM_TEMPERATURE_MAXIMUM_TEMPERATURE 13:0 /* R--VF */ +#define NV_THERM_TSENSE_MAXIMUM_TEMPERATURE_MAX_TEMP_SENSE_NUMBER 21:16 /* R--VF */ + +#define NV_THERM_MSGBOX_COMMAND 0x000660e0 /* RW-4R */ +#define NV_THERM_MSGBOX_COMMAND_DATA 30:0 /* RWIVF */ +#define NV_THERM_MSGBOX_COMMAND_DATA_INIT 0x00000000 /* RWI-V */ +#define NV_THERM_MSGBOX_COMMAND_INTR 31:31 /* RWIVF */ +#define NV_THERM_MSGBOX_COMMAND_INTR_NOT_PENDING 0x00000000 /* RWI-V */ +#define NV_THERM_MSGBOX_COMMAND_INTR_PENDING 0x00000001 /* RW--V */ +#endif // __lr10_dev_therm_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_timer.h b/src/common/inc/swref/published/nvswitch/lr10/dev_timer.h new file mode 100644 index 000000000..28f248951 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_timer.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_timer_h__ +#define __lr10_dev_timer_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PTIMER_PRI_TMR_CG1 0x00029600 /* RW-4R */ +#define NV_PTIMER_PRI_TMR_CG1_MONITOR_CG_EN 0:0 /* RWIVF */ +#define NV_PTIMER_PRI_TMR_CG1_MONITOR_CG_EN_ENABLED 0x00000001 /* RW--V */ +#define NV_PTIMER_PRI_TMR_CG1_MONITOR_CG_EN_DISABLED 0x00000000 /* RWI-V */ +#define NV_PTIMER_PRI_TMR_CG1_MONITOR_CG_EN__PROD 0x00000000 /* RW--V */ +#define NV_PTIMER_PRI_TMR_CG1_SLCG 1:1 /* RWIVF */ +#define NV_PTIMER_PRI_TMR_CG1_SLCG_ENABLED 0x00000000 /* RW--V */ +#define NV_PTIMER_PRI_TMR_CG1_SLCG_DISABLED 0x00000001 /* RWI-V */ +#define NV_PTIMER_PRI_TMR_CG1_SLCG__PROD 0x00000000 /* RW--V */ +#endif // __lr10_dev_timer_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_trim.h b/src/common/inc/swref/published/nvswitch/lr10/dev_trim.h new file mode 100644 index 000000000..1c09848aa --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_trim.h @@ -0,0 +1,140 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_trim_h__ +#define __lr10_dev_trim_h__ +/* This file is autogenerated. Do not edit */ +#define NV_PCLOCK_NVSW_CLK_DIST_MODE 0x0002402C /* RW-4R */ +#define NV_PCLOCK_NVSW_CLK_DIST_MODE_SWITCH2CLK_DIST_MODE 0:0 /* RWAUF */ +#define NV_PCLOCK_NVSW_CLK_DIST_MODE_SWITCH2CLK_DIST_MODE_1XCLK 0x00000000 /* RW--V */ +#define NV_PCLOCK_NVSW_CLK_DIST_MODE_SWITCH2CLK_DIST_MODE_2XCLK 0x00000001 /* RW--V */ +#define NV_PCLOCK_NVSW_CLK_DIST_MODE_SWITCH2CLK_DIST_MODE_INIT 0x00000000 /* RWA-V */ + +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF 0x00024004 /* RW-4R */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_MDIV 7:0 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_MDIV_INIT 0x00000003 /* RWA-V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_MDIV_MAX 0x000000FF /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_MDIV_MIN 0x00000001 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_NDIV 15:8 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_NDIV_INIT 0x00000050 /* RWA-V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_NDIV_MAX 0x000000FF /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_NDIV_MIN 0x00000001 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_PLDIV 21:16 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_PLDIV_INIT 0x00000002 /* RWA-V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_PLDIV_MAX 0x0000001F /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_COEFF_PLDIV_MIN 0x00000001 /* RW--V */ + +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG 0x00024000 /* RW-4R */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_ENABLE 0:0 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_ENABLE_INIT 0x00000000 /* RWA-V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_ENABLE_NO 0x00000000 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_ENABLE_YES 0x00000001 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_IDDQ 1:1 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_IDDQ_INIT 0x00000001 /* RWA-V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_IDDQ_POWER_OFF 0x00000001 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_IDDQ_POWER_ON 0x00000000 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_SYNC_MODE 2:2 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_SYNC_MODE_DISABLE 0x00000000 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_SYNC_MODE_ENABLE 0x00000001 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_SYNC_MODE_INIT 0x00000001 /* RWA-V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_LOCK 17:17 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_LOCK_FALSE 0x00000000 /* R---V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_LOCK_TRUE 0x00000001 /* R---V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_SSA 18:18 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_SSA_FALSE 0x00000000 /* R---V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_SSA_TRUE 0x00000001 /* R---V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_SSD 19:19 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_SSD_FALSE 0x00000000 /* R---V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_SSD_TRUE 0x00000001 /* R---V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_BYPASSPLL_CYA 21:21 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_BYPASSPLL_CYA_INIT 0x00000000 /* RWA-V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_LOOP_CTRL 23:22 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_LOOP_CTRL_INIT 0x00000000 /* RWA-V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_FREQLOCK 24:24 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_FREQLOCK_NO 0x00000000 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_PLL_FREQLOCK_YES 0x00000001 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_RESETB 25:25 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_RESETB_INIT 0x00000000 /* RWA-V */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_SEL_TESTOUT 28:26 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHPLL_CFG_SEL_TESTOUT_INIT 0x00000000 /* RWA-V */ + +#define NV_PCLOCK_NVSW_SWITCHCLK 0x00024050 /* RW-4R */ +#define NV_PCLOCK_NVSW_SWITCHCLK_MUX 7:0 /* RWAUF */ +#define NV_PCLOCK_NVSW_SWITCHCLK_MUX_INIT 0x00000001 /* RWA-V */ +#define NV_PCLOCK_NVSW_SWITCHCLK_MUX_NVLINK_TXREFCLK 0x00000010 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHCLK_MUX_PCIE_TXREFCLK 0x00000004 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHCLK_MUX_PEX_REFCLK 0x00000001 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHCLK_MUX_PEX_REFCLK_FILLER1 0x00000020 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHCLK_MUX_PEX_REFCLK_FILLER2 0x00000040 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHCLK_MUX_PEX_REFCLK_FILLER3 0x00000080 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHCLK_MUX_SWITCHPLL 0x00000008 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHCLK_MUX_TESTCLK 0x00000002 /* RW--V */ +#define NV_PCLOCK_NVSW_SWITCHCLK_RDY_PEX_REFCLK 8:8 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHCLK_RDY_TESTCLK 9:9 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHCLK_RDY_PCIE_TXREFCLK 10:10 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHCLK_RDY_SWITCHPLL 11:11 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHCLK_RDY_NVLINK_TXREFCLK 12:12 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHCLK_RDY_PEX_REFCLK_FILLER1 13:13 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHCLK_RDY_PEX_REFCLK_FILLER2 14:14 /* R--UF */ +#define NV_PCLOCK_NVSW_SWITCHCLK_RDY_PEX_REFCLK_FILLER3 15:15 /* R--UF */ + +#define NV_PCLOCK_NVSW_SYSTEMCLK 0x00024080 /* RW-4R */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_MUX 7:0 /* RWAUF */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_MUX_INIT 0x00000001 /* RWA-V */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_MUX_NVLINK_TXREFCLK 0x00000010 /* RW--V */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_MUX_PCIE_TXREFCLK 0x00000004 /* RW--V */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_MUX_PEX_REFCLK 0x00000001 /* RW--V */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_MUX_PEX_REFCLK_FILLER 0x00000002 /* RW--V */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_MUX_PEX_REFCLK_FILLER1 0x00000020 /* RW--V */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_MUX_PEX_REFCLK_FILLER2 0x00000040 /* RW--V */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_MUX_PEX_REFCLK_FILLER3 0x00000080 /* RW--V */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_MUX_SWITCHPLL 0x00000008 /* RW--V */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_SYSTEMCLK_RDY_PEX_REFCLK 8:8 /* R--UF */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_SYSTEMCLK_RDY_PEX_REFCLK_FILLER 9:9 /* R--UF */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_SYSTEMCLK_RDY_PCIE_TXREFCLK 10:10 /* R--UF */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_SYSTEMCLK_RDY_SWITCHPLL 11:11 /* R--UF */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_SYSTEMCLK_RDY_NVLINK_TXREFCLK 12:12 /* R--UF */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_SYSTEMCLK_RDY_PEX_REFCLK_FILLER1 13:13 /* R--UF */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_SYSTEMCLK_RDY_PEX_REFCLK_FILLER2 14:14 /* R--UF */ +#define NV_PCLOCK_NVSW_SYSTEMCLK_SYSTEMCLK_RDY_PEX_REFCLK_FILLER3 15:15 /* R--UF */ + +#define NV_PCLOCK_NVSW_JTAGINTFC 0x00024030 /* RW-4R */ +#define NV_PCLOCK_NVSW_JTAGINTFC_CLK_DIVSEL 2:0 /* RWAUF */ +#define NV_PCLOCK_NVSW_JTAGINTFC_CLK_DIVSEL_INIT 0x00000001 /* RWA-V */ +#define NV_PCLOCK_NVSW_JTAGINTFC_JTAGTM_INTFC_CLK_EN 3:3 /* RWAUF */ +#define NV_PCLOCK_NVSW_JTAGINTFC_JTAGTM_INTFC_CLK_EN_INIT 0x00000001 /* RWA-V */ +#define NV_PCLOCK_NVSW_JTAGINTFC_JTAGTM_INTFC_CLK_EN_OFF 0x00000000 /* RW--V */ +#define NV_PCLOCK_NVSW_JTAGINTFC_JTAGTM_INTFC_CLK_EN_ON 0x00000001 /* RW--V */ + +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK 0x00024090 /* RW-4R */ +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_DISABLE 0:0 /* RWAUF */ +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_DISABLE_INIT 0x00000000 /* RWA-V */ +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_DIV_SYNC_WAIT 3:1 /* RWAUF */ +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_DIV_SYNC_WAIT_INIT 0x00000001 /* RWA-V */ +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_DIV 13:4 /* RWAUF */ +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_DIV_INIT 0x0000000C /* RWA-V */ +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_REFCLK_BUF_EN_CYA 14:14 /* RWAUF */ +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_REFCLK_BUF_EN_CYA_INIT 0x00000000 /* RWA-V */ +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_REFCLK_BUF_EN_OVERRIDE 15:15 /* RWAUF */ +#define NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_REFCLK_BUF_EN_OVERRIDE_INIT 0x00000000 /* RWA-V */ +#endif // __lr10_dev_trim_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/dev_tstate_ip.h b/src/common/inc/swref/published/nvswitch/lr10/dev_tstate_ip.h new file mode 100644 index 000000000..91d1bd738 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/dev_tstate_ip.h @@ -0,0 +1,463 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_dev_tstate_ip_h__ +#define __lr10_dev_tstate_ip_h__ +/* This file is autogenerated. Do not edit */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_ADDRESS 0x00003488 /* R--4R */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_ERROR_ADDRESS 9:0 /* R-DVF */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_ERROR_ADDRESS_INIT 0x00000000 /* R-D-V */ + +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID 0x0000348c /* R--4R */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID_VALID 0:0 /* R-DVF */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID_VALID_INVALID 0x00000000 /* R-D-V */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID_VALID_VALID 0x00000001 /* R---V */ + +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_ADDRESS 0x00003498 /* R--4R */ +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_ADDRESS_ERROR_ADDRESS 9:0 /* R-DVF */ +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_ADDRESS_ERROR_ADDRESS_INIT 0x00000000 /* R-D-V */ + +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID 0x0000349c /* R--4R */ +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID_VALID 0:0 /* R-DVF */ +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID_VALID_INVALID 0x00000000 /* R-D-V */ +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID_VALID_VALID 0x00000001 /* R---V */ + +#define NV_TSTATE_ERR_TD_TID_RAM_ECC_ERROR_COUNTER_LIMIT 0x000034a4 /* RW-4R */ +#define NV_TSTATE_ERR_TD_TID_RAM_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_TSTATE_ERR_TD_TID_RAM_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_TSTATE_ERR_TD_TID_RAM_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_TSTATE_ERR_LOG_EN_0 0x00003404 /* RW-4R */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOLBUFERR 0:0 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOLBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOLBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOLBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOL_ECC_LIMIT_ERR 1:1 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOL_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOL_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOL_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOL_ECC_DBE_ERR 2:2 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOL_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOL_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_TAGPOOL_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTOREBUFERR 3:3 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTOREBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTOREBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTOREBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTORE_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTORE_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAMBUFERR 6:6 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAMBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAMBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAMBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAM_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAM_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAM_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAM_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAM_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_TD_TID_RAM_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_ATO_ERR 9:9 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_ATO_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_ATO_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_ATO_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_CAMRSP_ERR 10:10 /* RWEVF */ +#define NV_TSTATE_ERR_LOG_EN_0_CAMRSP_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_LOG_EN_0_CAMRSP_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_LOG_EN_0_CAMRSP_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0 0x00003410 /* RW-4R */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOLBUFERR 0:0 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOLBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOLBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOLBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR 1:1 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR 2:2 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTOREBUFERR 3:3 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTOREBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTOREBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTOREBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAMBUFERR 6:6 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAMBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAMBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAMBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_ATO_ERR 9:9 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_ATO_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_ATO_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_ATO_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CAMRSP_ERR 10:10 /* RWEVF */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CAMRSP_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CAMRSP_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CORRECTABLE_REPORT_EN_0_CAMRSP_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_TSTATE_ERR_MISC_LOG_0 0x00003454 /* R--4R */ +#define NV_TSTATE_ERR_MISC_LOG_0_SPORT 5:0 /* R-IVF */ +#define NV_TSTATE_ERR_MISC_LOG_0_SPORT_INIT 0x00000000 /* R-I-V */ +#define NV_TSTATE_ERR_MISC_LOG_0_ENCODEDVC 10:8 /* R-IVF */ +#define NV_TSTATE_ERR_MISC_LOG_0_ENCODEDVC_CREQ 0x00000000 /* R-I-V */ +#define NV_TSTATE_ERR_MISC_LOG_0_ENCODEDVC_DGD 0x00000001 /* R---V */ +#define NV_TSTATE_ERR_MISC_LOG_0_ENCODEDVC_ATR 0x00000002 /* R---V */ +#define NV_TSTATE_ERR_MISC_LOG_0_ENCODEDVC_ATSD 0x00000003 /* R---V */ +#define NV_TSTATE_ERR_MISC_LOG_0_ENCODEDVC_PROBE 0x00000004 /* R---V */ +#define NV_TSTATE_ERR_MISC_LOG_0_ENCODEDVC_CREQ_TD 0x00000005 /* R---V */ +#define NV_TSTATE_ERR_MISC_LOG_0_ENCODEDVC_DGD_TD 0x00000006 /* R---V */ + +#define NV_TSTATE_ERR_CONTAIN_EN_0 0x00003414 /* RW-4R */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOLBUFERR 0:0 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOLBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOLBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOLBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOL_ECC_LIMIT_ERR 1:1 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOL_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOL_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOL_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOL_ECC_DBE_ERR 2:2 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOL_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOL_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TAGPOOL_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTOREBUFERR 3:3 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTOREBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTOREBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTOREBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTORE_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTORE_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAMBUFERR 6:6 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAMBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAMBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAMBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAM_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAM_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAM_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAM_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAM_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_TD_TID_RAM_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_ATO_ERR 9:9 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_ATO_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_ATO_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_ATO_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CAMRSP_ERR 10:10 /* RWEVF */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CAMRSP_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CAMRSP_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_CONTAIN_EN_0_CAMRSP_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_TSTATE_ERR_STATUS_0 0x00003400 /* RW-4R */ +#define NV_TSTATE_ERR_STATUS_0_TAGPOOLBUFERR 0:0 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_TAGPOOLBUFERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_TAGPOOLBUFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_STATUS_0_TAGPOOL_ECC_LIMIT_ERR 1:1 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_TAGPOOL_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_TAGPOOL_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_STATUS_0_TAGPOOL_ECC_DBE_ERR 2:2 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_TAGPOOL_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_TAGPOOL_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_STATUS_0_CRUMBSTOREBUFERR 3:3 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_CRUMBSTOREBUFERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_CRUMBSTOREBUFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_STATUS_0_CRUMBSTORE_ECC_LIMIT_ERR 4:4 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_CRUMBSTORE_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_CRUMBSTORE_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_STATUS_0_CRUMBSTORE_ECC_DBE_ERR 5:5 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_CRUMBSTORE_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_CRUMBSTORE_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_STATUS_0_TD_TID_RAMBUFERR 6:6 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_TD_TID_RAMBUFERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_TD_TID_RAMBUFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_STATUS_0_TD_TID_RAM_ECC_LIMIT_ERR 7:7 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_TD_TID_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_TD_TID_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_STATUS_0_TD_TID_RAM_ECC_DBE_ERR 8:8 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_TD_TID_RAM_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_TD_TID_RAM_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_STATUS_0_ATO_ERR 9:9 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_ATO_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_ATO_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_STATUS_0_CAMRSP_ERR 10:10 /* RWDVF */ +#define NV_TSTATE_ERR_STATUS_0_CAMRSP_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_STATUS_0_CAMRSP_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_TSTATE_ERR_DEBUG 0x00003420 /* R--4R */ +#define NV_TSTATE_ERR_DEBUG_ATO_SOURCE 7:0 /* R-DVF */ +#define NV_TSTATE_ERR_DEBUG_ATO_SOURCE_INIT 0x00000000 /* R-D-V */ + +#define NV_TSTATE_ERR_FIRST_0 0x0000341c /* RW-4R */ +#define NV_TSTATE_ERR_FIRST_0_TAGPOOLBUFERR 0:0 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_TAGPOOLBUFERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_TAGPOOLBUFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FIRST_0_TAGPOOL_ECC_LIMIT_ERR 1:1 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_TAGPOOL_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_TAGPOOL_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FIRST_0_TAGPOOL_ECC_DBE_ERR 2:2 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_TAGPOOL_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_TAGPOOL_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FIRST_0_CRUMBSTOREBUFERR 3:3 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_CRUMBSTOREBUFERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_CRUMBSTOREBUFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FIRST_0_CRUMBSTORE_ECC_LIMIT_ERR 4:4 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_CRUMBSTORE_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_CRUMBSTORE_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FIRST_0_CRUMBSTORE_ECC_DBE_ERR 5:5 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_CRUMBSTORE_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_CRUMBSTORE_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FIRST_0_TD_TID_RAMBUFERR 6:6 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_TD_TID_RAMBUFERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_TD_TID_RAMBUFERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FIRST_0_TD_TID_RAM_ECC_LIMIT_ERR 7:7 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_TD_TID_RAM_ECC_LIMIT_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_TD_TID_RAM_ECC_LIMIT_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FIRST_0_TD_TID_RAM_ECC_DBE_ERR 8:8 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_TD_TID_RAM_ECC_DBE_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_TD_TID_RAM_ECC_DBE_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FIRST_0_ATO_ERR 9:9 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_ATO_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_ATO_ERR_CLEAR 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FIRST_0_CAMRSP_ERR 10:10 /* RWDVF */ +#define NV_TSTATE_ERR_FIRST_0_CAMRSP_ERR_NONE 0x00000000 /* RWD-V */ +#define NV_TSTATE_ERR_FIRST_0_CAMRSP_ERR_CLEAR 0x00000001 /* RW--V */ + +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER 0x00003480 /* RW-4R */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWDVF */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT 0x00003484 /* RW-4R */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER 0x00003490 /* RW-4R */ +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER_ERROR_COUNT 23:0 /* RWDVF */ +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER_ERROR_COUNT_INIT 0x00000000 /* RWD-V */ + +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT 0x00003494 /* RW-4R */ +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT 23:0 /* RWDVF */ +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT_INIT 0x00ffffff /* RWD-V */ +#define NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT_ERROR_LIMIT__PROD 0x007fffff /* RW--V */ + +#define NV_TSTATE_ERR_ECC_CTRL 0x00003470 /* RW-4R */ +#define NV_TSTATE_ERR_ECC_CTRL_CRUMBSTORE_ECC_ENABLE 0:0 /* RWEVF */ +#define NV_TSTATE_ERR_ECC_CTRL_CRUMBSTORE_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_ECC_CTRL_CRUMBSTORE_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_ECC_CTRL_CRUMBSTORE_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_ECC_CTRL_TAGPOOL_ECC_ENABLE 1:1 /* RWEVF */ +#define NV_TSTATE_ERR_ECC_CTRL_TAGPOOL_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_ECC_CTRL_TAGPOOL_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_ECC_CTRL_TAGPOOL_ECC_ENABLE__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_ECC_CTRL_TD_TID_ECC_ENABLE 2:2 /* RWEVF */ +#define NV_TSTATE_ERR_ECC_CTRL_TD_TID_ECC_ENABLE_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_ECC_CTRL_TD_TID_ECC_ENABLE_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_ECC_CTRL_TD_TID_ECC_ENABLE__PROD 0x00000001 /* RW--V */ + +#define NV_TSTATE_TAGSTATECONTROL 0x00003040 /* RW-4R */ +#define NV_TSTATE_TAGSTATECONTROL_SWECCENB 1:1 /* RWEVF */ +#define NV_TSTATE_TAGSTATECONTROL_SWECCENB_ON 0x00000001 /* RW--V */ +#define NV_TSTATE_TAGSTATECONTROL_SWECCENB_OFF 0x00000000 /* RWE-V */ +#define NV_TSTATE_TAGSTATECONTROL_ATO_ENB 9:9 /* RWEVF */ +#define NV_TSTATE_TAGSTATECONTROL_ATO_ENB_ON 0x00000001 /* RWE-V */ +#define NV_TSTATE_TAGSTATECONTROL_ATO_ENB_OFF 0x00000000 /* RW--V */ + +#define NV_TSTATE_ATO_TIMER_LIMIT 0x00003048 /* RW-4R */ +#define NV_TSTATE_ATO_TIMER_LIMIT_LIMIT 19:0 /* RWEVF */ +#define NV_TSTATE_ATO_TIMER_LIMIT_LIMIT_INIT 0x000fffff /* RWE-V */ +#define NV_TSTATE_ATO_TIMER_LIMIT_LIMIT__PROD 0x00005555 /* RW--V */ + +#define NV_TSTATE_CREQ_CAM_LOCK 0x00003050 /* RW-4R */ +#define NV_TSTATE_CREQ_CAM_LOCK_ON 31:0 /* RWEVF */ +#define NV_TSTATE_CREQ_CAM_LOCK_ON_INIT 0x00000000 /* RWE-V */ + +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0 0x00003408 /* RW-4R */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOLBUFERR 0:0 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOLBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOLBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOLBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR 1:1 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR 2:2 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTOREBUFERR 3:3 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTOREBUFERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTOREBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTOREBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAMBUFERR 6:6 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAMBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAMBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAMBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_ATO_ERR 9:9 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_ATO_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_ATO_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_ATO_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CAMRSP_ERR 10:10 /* RWEVF */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CAMRSP_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CAMRSP_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_FATAL_REPORT_EN_0_CAMRSP_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0 0x0000340c /* RW-4R */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOLBUFERR 0:0 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOLBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOLBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOLBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR 1:1 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOL_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR 2:2 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TAGPOOL_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTOREBUFERR 3:3 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTOREBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTOREBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTOREBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR 4:4 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR__PROD 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR 5:5 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CRUMBSTORE_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAMBUFERR 6:6 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAMBUFERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAMBUFERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAMBUFERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR 7:7 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_LIMIT_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR 8:8 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_TD_TID_RAM_ECC_DBE_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_ATO_ERR 9:9 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_ATO_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_ATO_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_ATO_ERR_ENABLE 0x00000001 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CAMRSP_ERR 10:10 /* RWEVF */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CAMRSP_ERR__PROD 0x00000000 /* RW--V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CAMRSP_ERR_DISABLE 0x00000000 /* RWE-V */ +#define NV_TSTATE_ERR_NON_FATAL_REPORT_EN_0_CAMRSP_ERR_ENABLE 0x00000001 /* RW--V */ + +#define NV_TSTATE_RAM_ADDRESS 0x00003080 /* RW-4R */ +#define NV_TSTATE_RAM_ADDRESS_ADDR 9:0 /* RWEVF */ +#define NV_TSTATE_RAM_ADDRESS_ADDR_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_ADDRESS_ADDR_TAGPOOL_CRUMBSTORE_TDTID_DEPTH 0x000003ff /* RW--V */ +#define NV_TSTATE_RAM_ADDRESS_ADDR_CREQ_CAM_DEPTH 0x0000001f /* RW--V */ +#define NV_TSTATE_RAM_ADDRESS_VC 14:12 /* RWEVF */ +#define NV_TSTATE_RAM_ADDRESS_VC_VC0_CREQ 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_ADDRESS_VC_VC5_TRANSDONE 0x00000005 /* RW--V */ +#define NV_TSTATE_RAM_ADDRESS_SELECT 17:16 /* RWEVF */ +#define NV_TSTATE_RAM_ADDRESS_SELECT_TAGPOOL_RAM 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_ADDRESS_SELECT_CRUMBSTORE_RAM 0x00000001 /* RW--V */ +#define NV_TSTATE_RAM_ADDRESS_SELECT_TD_TID_RAM 0x00000002 /* RW--V */ +#define NV_TSTATE_RAM_ADDRESS_SELECT_CREQ_CAM 0x00000003 /* RW--V */ +#define NV_TSTATE_RAM_ADDRESS_AUTO_INCR 31:31 /* RWEVF */ +#define NV_TSTATE_RAM_ADDRESS_AUTO_INCR_ENABLE 0x00000001 /* RWE-V */ +#define NV_TSTATE_RAM_ADDRESS_AUTO_INCR_DISABLE 0x00000000 /* RW--V */ + +#define NV_TSTATE_RAM_DATA1 0x00003094 /* RW-4R */ +#define NV_TSTATE_RAM_DATA1_EST 0:0 /* RWEVF */ +#define NV_TSTATE_RAM_DATA1_EST_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_DATA1_IRL 2:1 /* RWEVF */ +#define NV_TSTATE_RAM_DATA1_IRL_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_DATA1_BECN 3:3 /* RWEVF */ +#define NV_TSTATE_RAM_DATA1_BECN_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_DATA1_RLAN 7:4 /* RWEVF */ +#define NV_TSTATE_RAM_DATA1_RLAN_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_DATA1_RID 18:8 /* RWEVF */ +#define NV_TSTATE_RAM_DATA1_RID_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_DATA1_TCEN 19:19 /* RWEVF */ +#define NV_TSTATE_RAM_DATA1_TCEN_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_DATA1_AGE 21:20 /* RWEVF */ +#define NV_TSTATE_RAM_DATA1_AGE_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_DATA1_VALID 31:31 /* RWEVF */ +#define NV_TSTATE_RAM_DATA1_VALID_INIT 0x00000000 /* RWE-V */ + +#define NV_TSTATE_RAM_DATA0 0x00003090 /* RW-4R */ +#define NV_TSTATE_RAM_DATA0_TAG 9:0 /* RWEVF */ +#define NV_TSTATE_RAM_DATA0_TAG_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_DATA0_ECC 30:24 /* RWEVF */ +#define NV_TSTATE_RAM_DATA0_ECC_INIT 0x00000000 /* RWE-V */ + +#define NV_TSTATE_RAM_DATA2 0x00003098 /* RW-4R */ +#define NV_TSTATE_RAM_DATA2_CLOSED 0:0 /* RWEVF */ +#define NV_TSTATE_RAM_DATA2_CLOSED_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_DATA2_COLLAP_TAG_CNT 14:1 /* RWEVF */ +#define NV_TSTATE_RAM_DATA2_COLLAP_TAG_CNT_INIT 0x00000000 /* RWE-V */ +#define NV_TSTATE_RAM_DATA2_DNE 17:17 /* RWEVF */ +#define NV_TSTATE_RAM_DATA2_DNE_INIT 0x00000000 /* RWE-V */ +#endif // __lr10_dev_tstate_ip_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/npgip_discovery.h b/src/common/inc/swref/published/nvswitch/lr10/npgip_discovery.h new file mode 100644 index 000000000..9ca601f61 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/npgip_discovery.h @@ -0,0 +1,69 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_npgip_discovery_h__ +#define __lr10_npgip_discovery_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NPG_DISCOVERY /* R--4P */ +#define NV_NPG_DISCOVERY_ENTRY 1:0 /* R-EVF */ +#define NV_NPG_DISCOVERY_ENTRY_INVALID 0x00000000 /* R-E-V */ +#define NV_NPG_DISCOVERY_ENTRY_ENUM 0x00000001 /* R---V */ +#define NV_NPG_DISCOVERY_ENTRY_DATA1 0x00000002 /* R---V */ +#define NV_NPG_DISCOVERY_ENTRY_DATA2 0x00000003 /* R---V */ +#define NV_NPG_DISCOVERY_CONTENTS 30:2 /* R-EVF */ +#define NV_NPG_DISCOVERY_CONTENTS_INIT 0x00000000 /* R-E-V */ +#define NV_NPG_DISCOVERY_CHAIN 31:31 /* R-EVF */ +#define NV_NPG_DISCOVERY_CHAIN_DISABLE 0x00000000 /* R-E-V */ +#define NV_NPG_DISCOVERY_CHAIN_ENABLE 0x00000001 /* R---V */ +#define NV_NPG_DISCOVERY_ENUM_DEVICE 7:2 /* R--UF */ +#define NV_NPG_DISCOVERY_ENUM_DEVICE_INVALID 0x0 /* R---V */ +#define NV_NPG_DISCOVERY_ENUM_DEVICE_NPG 0x1 /* R---V */ +#define NV_NPG_DISCOVERY_ENUM_DEVICE_NPORT 0x2 /* R---V */ +#define NV_NPG_DISCOVERY_ENUM_DEVICE_NPORT_MULTICAST 0x3 /* R---V */ +#define NV_NPG_DISCOVERY_ENUM_DEVICE_NPG_PERFMON 0x4 /* R---V */ +#define NV_NPG_DISCOVERY_ENUM_DEVICE_NPORT_PERFMON 0x5 /* R---V */ +#define NV_NPG_DISCOVERY_ENUM_DEVICE_NPORT_PERFMON_MULTICAST 0x6 /* R---V */ +#define NV_NPG_DISCOVERY_ENUM_ID 15:8 /* R--UF */ +#define NV_NPG_DISCOVERY_ENUM_RESERVED 19:16 /* R--UF */ +#define NV_NPG_DISCOVERY_ENUM_VERSION 30:20 /* R--UF */ +#define NV_NPG_DISCOVERY_ENUM_VERSION_1 0x1 /* R---V */ +#define NV_NPG_DISCOVERY_ENUM_VERSION_2 0x2 /* R---V */ +#define NV_NPG_DISCOVERY_DATA1_RESET 6:2 /* R--UF */ +#define NV_NPG_DISCOVERY_DATA1_INTR 11:7 /* R--UF */ +#define NV_NPG_DISCOVERY_DATA1_RESERVED2 11:2 /* R--UF */ +#define NV_NPG_DISCOVERY_DATA1_NPG_LENGTH 30:12 /* R--UF */ +#define NV_NPG_DISCOVERY_DATA1_RESERVED 30:12 /* R--UF */ +#define NV_NPG_DISCOVERY_DATA2_TYPE 30:26 /* R--UF */ +#define NV_NPG_DISCOVERY_DATA2_TYPE_INVALID 0x0 /* R---V */ +#define NV_NPG_DISCOVERY_DATA2_TYPE_RESERVED 0x1 /* R---V */ +#define NV_NPG_DISCOVERY_DATA2_TYPE_RESETREG 0x2 /* R---V */ +#define NV_NPG_DISCOVERY_DATA2_TYPE_INTRREG 0x3 /* R---V */ +#define NV_NPG_DISCOVERY_DATA2_TYPE_DISCOVERY 0x4 /* R---V */ +#define NV_NPG_DISCOVERY_DATA2_TYPE_UNICAST 0x5 /* R---V */ +#define NV_NPG_DISCOVERY_DATA2_TYPE_BROADCAST 0x6 /* R---V */ +#define NV_NPG_DISCOVERY_DATA2_TYPE_MULTICAST0 0x7 /* R---V */ +#define NV_NPG_DISCOVERY_DATA2_TYPE_MULTICAST1 0x8 /* R---V */ +#define NV_NPG_DISCOVERY_DATA2_TYPE_MULTICAST2 0x9 /* R---V */ +#define NV_NPG_DISCOVERY_DATA2_ADDR 25:2 /* R--UF */ +#define NV_NPG_DISCOVERY_PRI_BASE_ALIGN 12 /* */ +#endif // __lr10_npgip_discovery_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/nvlinkip_discovery.h b/src/common/inc/swref/published/nvswitch/lr10/nvlinkip_discovery.h new file mode 100644 index 000000000..f92e39121 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/nvlinkip_discovery.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_nvlinkip_discovery_h__ +#define __lr10_nvlinkip_discovery_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NVLINKIP_DISCOVERY_COMMON /* R--4P */ +#define NV_NVLINKIP_DISCOVERY_COMMON_ENTRY 1:0 /* R-EVF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_ENTRY_INVALID 0x00000000 /* R-E-V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_ENTRY_ENUM 0x00000001 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_ENTRY_DATA1 0x00000002 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_ENTRY_DATA2 0x00000003 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_CONTENTS 30:2 /* R-EVF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_CONTENTS_INIT 0x00000000 /* R-E-V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_CHAIN 31:31 /* R-EVF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_CHAIN_DISABLE 0x00000000 /* R-E-V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_CHAIN_ENABLE 0x00000001 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE 7:2 /* R--UF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_INVALID 0x0 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_IOCTRL 0x1 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLTL 0x2 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLINK 0x3 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_MINION 0x4 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLIPT 0x5 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLTLC 0x6 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_IOCTRLMIF 0x7 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_DLPL_MULTICAST 0x8 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLTLC_MULTICAST 0x9 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_IOCTRLMIF_MULTICAST 0xA /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_DLPL 0xB /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_SIOCTRL 0xC /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_TIOCTRL 0xD /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_SIOCTRL_PERFMON 0xE /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLIPT_SYS_PERFMON 0xF /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_TX_PERFMON_MULTICAST 0x10 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_RX_PERFMON_MULTICAST 0x11 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_TX_PERFMON 0x12 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_RX_PERFMON 0x13 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLW 0x14 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLW_PERFMON 0x15 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLDL 0x16 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLDL_MULTICAST 0x17 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_SYS_PERFMON 0x18 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_SYS_PERFMON_MULTICAST 0x19 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLIPT_LNK 0x1A /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLIPT_LNK_MULTICAST 0x1B /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_PLL 0x1C /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_ID 15:8 /* R--UF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_RESERVED 19:16 /* R--UF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_VERSION 30:20 /* R--UF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_VERSION_NVLINK10 0x1 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_VERSION_NVLINK20 0x2 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_VERSION_3 0x3 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_VERSION_NVLINK22 0x4 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_VERSION_NVLINK30 0x5 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_VERSION_NVLINK31 0x6 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_VERSION_NVLINK32 0x7 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA1_RESERVED 30:12 /* R--UF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA1_IOCTRL_LENGTH 30:12 /* R--UF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA1_RESERVED2 11:2 /* R--UF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE 30:26 /* R--UF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_INVALID 0x0 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_PLLCONTROL 0x1 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_RESETREG 0x2 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_INTRREG 0x3 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_DISCOVERY 0x4 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_UNICAST 0x5 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_BROADCAST 0x6 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_MULTICAST0 0x7 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_MULTICAST1 0x8 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_MULTICAST2 0x9 /* R---V */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_ADDR 25:2 /* R--UF */ +#define NV_NVLINKIP_DISCOVERY_COMMON_DATA2_ADDR_ALIGN 2 /* */ +#endif // __lr10_nvlinkip_discovery_h__ diff --git a/src/common/inc/swref/published/nvswitch/lr10/nxbar_discovery.h b/src/common/inc/swref/published/nvswitch/lr10/nxbar_discovery.h new file mode 100644 index 000000000..c64cb4a93 --- /dev/null +++ b/src/common/inc/swref/published/nvswitch/lr10/nxbar_discovery.h @@ -0,0 +1,69 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the Software), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __lr10_nxbar_discovery_h__ +#define __lr10_nxbar_discovery_h__ +/* This file is autogenerated. Do not edit */ +#define NV_NXBAR_DISCOVERY /* R--4P */ +#define NV_NXBAR_DISCOVERY_ENTRY 1:0 /* R-EVF */ +#define NV_NXBAR_DISCOVERY_ENTRY_INVALID 0x00000000 /* R-E-V */ +#define NV_NXBAR_DISCOVERY_ENTRY_ENUM 0x00000001 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENTRY_DATA1 0x00000002 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENTRY_DATA2 0x00000003 /* R---V */ +#define NV_NXBAR_DISCOVERY_CONTENTS 30:2 /* R-EVF */ +#define NV_NXBAR_DISCOVERY_CONTENTS_INIT 0x00000000 /* R-E-V */ +#define NV_NXBAR_DISCOVERY_CHAIN 31:31 /* R-EVF */ +#define NV_NXBAR_DISCOVERY_CHAIN_DISABLE 0x00000000 /* R-E-V */ +#define NV_NXBAR_DISCOVERY_CHAIN_ENABLE 0x00000001 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENUM_DEVICE 7:2 /* R--UF */ +#define NV_NXBAR_DISCOVERY_ENUM_DEVICE_INVALID 0x0 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENUM_DEVICE_NXBAR 0x1 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENUM_DEVICE_TILE 0x2 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENUM_DEVICE_TILE_MULTICAST 0x3 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENUM_DEVICE_NXBAR_PERFMON 0x4 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENUM_DEVICE_TILE_PERFMON 0x5 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENUM_DEVICE_TILE_PERFMON_MULTICAST 0x6 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENUM_ID 15:8 /* R--UF */ +#define NV_NXBAR_DISCOVERY_ENUM_RESERVED 19:16 /* R--UF */ +#define NV_NXBAR_DISCOVERY_ENUM_VERSION 30:20 /* R--UF */ +#define NV_NXBAR_DISCOVERY_ENUM_VERSION_1 0x1 /* R---V */ +#define NV_NXBAR_DISCOVERY_ENUM_VERSION_2 0x2 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA1_RESET 6:2 /* R--UF */ +#define NV_NXBAR_DISCOVERY_DATA1_INTR 11:7 /* R--UF */ +#define NV_NXBAR_DISCOVERY_DATA1_RESERVED2 11:2 /* R--UF */ +#define NV_NXBAR_DISCOVERY_DATA1_NXBAR_LENGTH 30:12 /* R--UF */ +#define NV_NXBAR_DISCOVERY_DATA1_RESERVED 30:12 /* R--UF */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE 30:26 /* R--UF */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE_INVALID 0x0 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE_RESERVED 0x1 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE_RESETREG 0x2 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE_INTRREG 0x3 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE_DISCOVERY 0x4 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE_UNICAST 0x5 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE_BROADCAST 0x6 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE_MULTICAST0 0x7 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE_MULTICAST1 0x8 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA2_TYPE_MULTICAST2 0x9 /* R---V */ +#define NV_NXBAR_DISCOVERY_DATA2_ADDR 25:2 /* R--UF */ +#define NV_NXBAR_DISCOVERY_PRI_BASE_ALIGN 12 /* */ +#endif // __lr10_nxbar_discovery_h__ diff --git a/src/common/inc/swref/published/pascal/gp100/dev_boot.h b/src/common/inc/swref/published/pascal/gp100/dev_boot.h new file mode 100644 index 000000000..4f032d55e --- /dev/null +++ b/src/common/inc/swref/published/pascal/gp100/dev_boot.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gp100_dev_boot_h__ +#define __gp100_dev_boot_h__ +#define NV_PMC_INTR(i) (0x00000100+(i)*4) /* R--4A */ +#define NV_PMC_INTR_EN(i) (0x00000140+(i)*4) /* R--4A */ +#define NV_PMC_INTR_EN__SIZE_1 2 /* */ +#define NV_PMC_INTR_EN_DEVICE(i) (i):(i) /* */ +#define NV_PMC_INTR_EN_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_INTR_EN_DEVICE_DISABLED 0x00000000 /* */ +#define NV_PMC_INTR_EN_DEVICE_ENABLED 0x00000001 /* */ +#define NV_PMC_INTR_EN_VALUE 31:0 /* R-IVF */ +#define NV_PMC_INTR_EN_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_PMC_INTR_EN_SET(i) (0x00000160+(i)*4) /* -W-4A */ +#define NV_PMC_INTR_EN_SET__SIZE_1 2 /* */ +#define NV_PMC_INTR_EN_SET_DEVICE(i) (i):(i) /* */ +#define NV_PMC_INTR_EN_SET_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_INTR_EN_SET_DEVICE_SET 0x00000001 /* */ +#define NV_PMC_INTR_EN_SET_VALUE 31:0 /* -W-VF */ +#define NV_PMC_INTR_EN_CLEAR(i) (0x00000180+(i)*4) /* -W-4A */ +#define NV_PMC_INTR_EN_CLEAR__SIZE_1 2 /* */ +#define NV_PMC_INTR_EN_CLEAR_DEVICE(i) (i):(i) /* */ +#define NV_PMC_INTR_EN_CLEAR_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_INTR_EN_CLEAR_DEVICE_SET 0x00000001 /* */ +#define NV_PMC_INTR_EN_CLEAR_VALUE 31:0 /* -W-VF */ +#endif // __gp100_dev_boot_h__ diff --git a/src/common/inc/swref/published/pascal/gp100/dev_fault.h b/src/common/inc/swref/published/pascal/gp100/dev_fault.h new file mode 100644 index 000000000..280bb437a --- /dev/null +++ b/src/common/inc/swref/published/pascal/gp100/dev_fault.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gp100_dev_fault_h__ +#define __gp100_dev_fault_h__ +#define NV_PFAULT_FAULT_TYPE_PDE 0x00000000 /* */ +#define NV_PFAULT_FAULT_TYPE_PDE_SIZE 0x00000001 /* */ +#define NV_PFAULT_FAULT_TYPE_PTE 0x00000002 /* */ +#define NV_PFAULT_FAULT_TYPE_VA_LIMIT_VIOLATION 0x00000003 /* */ +#define NV_PFAULT_FAULT_TYPE_UNBOUND_INST_BLOCK 0x00000004 /* */ +#define NV_PFAULT_FAULT_TYPE_PRIV_VIOLATION 0x00000005 /* */ +#define NV_PFAULT_FAULT_TYPE_RO_VIOLATION 0x00000006 /* */ +#define NV_PFAULT_FAULT_TYPE_PITCH_MASK_VIOLATION 0x00000008 /* */ +#define NV_PFAULT_FAULT_TYPE_WORK_CREATION 0x00000009 /* */ +#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_APERTURE 0x0000000a /* */ +#define NV_PFAULT_FAULT_TYPE_COMPRESSION_FAILURE 0x0000000b /* */ +#define NV_PFAULT_FAULT_TYPE_UNSUPPORTED_KIND 0x0000000c /* */ +#define NV_PFAULT_FAULT_TYPE_REGION_VIOLATION 0x0000000d /* */ +#define NV_PFAULT_FAULT_TYPE_POISONED 0x0000000e /* */ +#define NV_PFAULT_FAULT_TYPE_ATOMIC_VIOLATION 0x0000000f /* */ +#endif // __gp100_dev_fault_h__ diff --git a/src/common/inc/swref/published/pascal/gp100/dev_mmu.h b/src/common/inc/swref/published/pascal/gp100/dev_mmu.h new file mode 100644 index 000000000..6b4610ad3 --- /dev/null +++ b/src/common/inc/swref/published/pascal/gp100/dev_mmu.h @@ -0,0 +1,158 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gp100_dev_mmu_h__ +#define __gp100_dev_mmu_h__ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+18+11):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_VER2_PDE__SIZE 8 +#define NV_MMU_VER2_DUAL_PDE_APERTURE_BIG 2:1 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG 3:3 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID (35-3):(8-4) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SYS 53:(8-4) /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SHIFT 8 /* */ +#define NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL 66:65 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL 67:67 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID (99-3):72 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_SYS 117:72 /* RWXVF */ +#define NV_MMU_VER2_DUAL_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_DUAL_PDE__SIZE 16 +#define NV_MMU_VER2_PDE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_PDE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_VER2_PDE_VOL 3:3 /* RWXVF */ +#define NV_MMU_VER2_PDE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PDE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PDE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_VER2_PTE_VALID 0:0 /* RWXVF */ +#define NV_MMU_VER2_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_APERTURE 2:1 /* RWXVF */ +#define NV_MMU_VER2_PTE_VOL 3:3 /* RWXVF */ +#define NV_MMU_VER2_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_ENCRYPTED 4:4 /* RWXVF */ +#define NV_MMU_VER2_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_VER2_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_VER2_PTE_PRIVILEGE 5:5 /* RWXVF */ +#define NV_MMU_VER2_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_READ_ONLY 6:6 /* RWXVF */ +#define NV_MMU_VER2_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE 7:7 /* RWXVF */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_VER2_PTE_ATOMIC_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_SYS 53:8 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER 35:(36-3) /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_VER2_PTE_COMPTAGLINE (18+35):36 /* RWXVF */ +#define NV_MMU_VER2_PTE_KIND 63:56 /* RWXVF */ +#define NV_MMU_PTE_KIND_INVALID 0xff /* R---V */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_PTE_KIND_C32_MS4_4CBRA 0x2c /* R---V */ +#define NV_MMU_PTE_KIND_C64_MS4_4CBRA 0x2d /* R---V */ +#define NV_MMU_VER2_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_VER2_PTE__SIZE 8 +#endif // __gp100_dev_mmu_h__ diff --git a/src/common/inc/swref/published/pascal/gp100/dev_ram.h b/src/common/inc/swref/published/pascal/gp100/dev_ram.h new file mode 100644 index 000000000..ee2bad5fc --- /dev/null +++ b/src/common/inc/swref/published/pascal/gp100/dev_ram.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gp100_dev_ram_h__ +#define __gp100_dev_ram_h__ +/* This file is autogenerated. Do not edit */ +#define NV_RAMIN /* ----G */ +#define NV_RAMIN_BASE_SHIFT 12 /* */ +#define NV_RAMIN_ALLOC_SIZE 4096 /* */ +#define NV_RAMIN_PAGE_DIR_BASE_TARGET (128*32+1):(128*32+0) /* RWXUF */ +#define NV_RAMIN_PAGE_DIR_BASE_TARGET_VID_MEM 0x00000000 /* RW--V */ +#define NV_RAMIN_PAGE_DIR_BASE_TARGET_SYS_MEM_COHERENT 0x00000002 /* RW--V */ +#define NV_RAMIN_PAGE_DIR_BASE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 /* RW--V */ +#define NV_RAMIN_PAGE_DIR_BASE_VOL (128*32+2):(128*32+2) /* RWXUF */ +#define NV_RAMIN_PAGE_DIR_BASE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_RAMIN_PAGE_DIR_BASE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_RAMIN_PAGE_DIR_BASE_FAULT_REPLAY_TEX (128*32+4):(128*32+4) /* RWXUF */ +#define NV_RAMIN_PAGE_DIR_BASE_FAULT_REPLAY_TEX_DISABLED 0x00000000 /* RW--V */ +#define NV_RAMIN_PAGE_DIR_BASE_FAULT_REPLAY_TEX_ENABLED 0x00000001 /* RW--V */ +#define NV_RAMIN_PAGE_DIR_BASE_FAULT_REPLAY_GCC (128*32+5):(128*32+5) /* RWXUF */ +#define NV_RAMIN_PAGE_DIR_BASE_FAULT_REPLAY_GCC_DISABLED 0x00000000 /* RW--V */ +#define NV_RAMIN_PAGE_DIR_BASE_FAULT_REPLAY_GCC_ENABLED 0x00000001 /* RW--V */ +#define NV_RAMIN_USE_NEW_PT_FORMAT (128*32+10):(128*32+10) /* RWXUF */ +#define NV_RAMIN_USE_NEW_PT_FORMAT_FALSE 0x00000000 /* RW--V */ +#define NV_RAMIN_USE_NEW_PT_FORMAT_TRUE 0x00000001 /* RW--V */ +#define NV_RAMIN_BIG_PAGE_SIZE (128*32+11):(128*32+11) /* RWXUF */ +#define NV_RAMIN_BIG_PAGE_SIZE_128KB 0x00000000 /* RW--V */ +#define NV_RAMIN_BIG_PAGE_SIZE_64KB 0x00000001 /* RW--V */ +#define NV_RAMIN_PAGE_DIR_BASE_LO (128*32+31):(128*32+12) /* RWXUF */ +#define NV_RAMIN_PAGE_DIR_BASE_HI (129*32+31):(129*32+0) /* RWXUF */ +#define NV_RAMIN_ADR_LIMIT_LO (130*32+31):(130*32+12) /* RWXUF */ +#define NV_RAMIN_ADR_LIMIT_HI (131*32+31):(131*32+0) /* RWXUF */ +#endif // __gp100_dev_ram_h__ diff --git a/src/common/inc/swref/published/pascal/gp102/dev_fb.h b/src/common/inc/swref/published/pascal/gp102/dev_fb.h new file mode 100644 index 000000000..d9445e9e0 --- /dev/null +++ b/src/common/inc/swref/published/pascal/gp102/dev_fb.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gp102_dev_fb_h__ +#define __gp102_dev_fb_h__ +#define NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE 0x00100CE0 /* RW-4R */ +#define NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE_LOWER_SCALE 3:0 /* RWEVF */ +#define NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE_LOWER_MAG 9:4 /* RWEVF */ +#define NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE_ECC_MODE 30:30 /* RWEVF */ +#define NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE_ECC_MODE_DISABLED 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE_ECC_MODE_ENABLED 0x00000001 /* RW--V */ +#endif // __gp102_dev_fb_h__ diff --git a/src/common/inc/swref/published/pascal/gp102/dev_pbdma.h b/src/common/inc/swref/published/pascal/gp102/dev_pbdma.h new file mode 100644 index 000000000..af0c00595 --- /dev/null +++ b/src/common/inc/swref/published/pascal/gp102/dev_pbdma.h @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gp102_dev_pbdma_h__ +#define __gp102_dev_pbdma_h__ +#define NV_PPBDMA_SET_CHANNEL_INFO_SCG_TYPE_COMPUTE1 0x00000001 /* */ +#endif // __gp102_dev_pbdma_h__ diff --git a/src/common/inc/swref/published/pcie_switch/pcie_switch_ref.h b/src/common/inc/swref/published/pcie_switch/pcie_switch_ref.h new file mode 100644 index 000000000..064a0a529 --- /dev/null +++ b/src/common/inc/swref/published/pcie_switch/pcie_switch_ref.h @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef PCIE_SWITCH_REF_H +#define PCIE_SWITCH_REF_H +#include "published/br03/dev_br03_xvd.h" +#include "published/br03/dev_br03_xvu.h" +#include "published/br04/br04_ref.h" + +// +// This file has the vendor and device IDs of all supported PCIe switches +// and any new PCIe switch can be added to this allowlist. +// + +#define IS_SUPPORTED_PCIE_SWITCH(vendorId, deviceId) \ + (((vendorId == PCI_VENDOR_ID_NVIDIA) && IS_DEVID_SUPPORTED_NVIDIA(deviceId)) || \ + ((vendorId == PCI_VENDOR_ID_PLX) && IS_DEVID_SUPPORTED_PLX(deviceId)) || \ + ((vendorId == PCI_VENDOR_ID_PMC) && IS_DEVID_SUPPORTED_PMC(deviceId)) || \ + ((vendorId == PCI_VENDOR_ID_MELLANOX) && IS_DEVID_SUPPORTED_MELLANOX(deviceId))) + +// Accept NVIDIA devices BR03, BR04 +#define PCI_VENDOR_ID_NVIDIA 0x10DE +#define IS_DEVID_SUPPORTED_NVIDIA(devId) \ + (IS_DEVID_BR04(devId) || \ + (devId == NV_BR03_XVU_DEV_ID_DEVICE_ID_BR03)) + +// +// Accept all 87xx and 97xx PLX bridges as supported - these are all +// currently Gen3 peer-to-peer-capable switches. +// +#define PCI_VENDOR_ID_PLX 0x10B5 +#define IS_DEVID_SUPPORTED_PLX(devId) \ + (((devId & 0xFF00) == 0x8700) || \ + ((devId & 0xFF00) == 0x9700)) + +// Accept all 85xx and 40xx PMC-Sierra bridges as supported +#define PCI_VENDOR_ID_PMC 0x11F8 +#define IS_DEVID_SUPPORTED_PMC(devId) \ + (((devId & 0xFF00) == 0x8500) || \ + ((devId & 0xFF00) == 0x4000)) + +// Accept Mellanox CX6 PCIe bridge 0x1976 as supported for A16 +#define IS_DEVID_SUPPORTED_MELLANOX(devId) (devId == 0x1976) + +#endif // PCIE_SWITCH_REF_H diff --git a/src/common/inc/swref/published/turing/tu102/dev_access_counter.h b/src/common/inc/swref/published/turing/tu102/dev_access_counter.h new file mode 100644 index 000000000..9dcc8ca49 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_access_counter.h @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_access_counter_h__ +#define __tu102_dev_access_counter_h__ +#define NV_ACCESS_COUNTER_NOTIFY_BUF_SIZE 32 /* */ +#endif // __tu102_dev_access_counter_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_boot.h b/src/common/inc/swref/published/turing/tu102/dev_boot.h new file mode 100644 index 000000000..24ef32414 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_boot.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_boot_h__ +#define __tu102_dev_boot_h__ +#define NV_PMC_INTR(i) (0x00000100+(i)*4) /* RW-4A */ +#define NV_PMC_INTR_EN(i) (0x00000140+(i)*4) /* R--4A */ +#define NV_PMC_INTR_EN__SIZE_1 2 /* */ +#define NV_PMC_INTR_EN_DEVICE(i) (i):(i) /* */ +#define NV_PMC_INTR_EN_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_INTR_EN_DEVICE_DISABLED 0x00000000 /* */ +#define NV_PMC_INTR_EN_DEVICE_ENABLED 0x00000001 /* */ +#define NV_PMC_INTR_EN_VALUE 31:0 /* R-IVF */ +#define NV_PMC_INTR_EN_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_PMC_INTR_EN_SET(i) (0x00000160+(i)*4) /* -W-4A */ +#define NV_PMC_INTR_EN_SET__SIZE_1 2 /* */ +#define NV_PMC_INTR_EN_SET_DEVICE(i) (i):(i) /* */ +#define NV_PMC_INTR_EN_SET_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_INTR_EN_SET_DEVICE_SET 0x00000001 /* */ +#define NV_PMC_INTR_EN_SET_VALUE 31:0 /* -W-VF */ +#define NV_PMC_INTR_EN_CLEAR(i) (0x00000180+(i)*4) /* -W-4A */ +#define NV_PMC_INTR_EN_CLEAR__SIZE_1 2 /* */ +#define NV_PMC_INTR_EN_CLEAR_DEVICE(i) (i):(i) /* */ +#define NV_PMC_INTR_EN_CLEAR_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_INTR_EN_CLEAR_DEVICE_SET 0x00000001 /* */ +#define NV_PMC_INTR_EN_CLEAR_VALUE 31:0 /* -W-VF */ +#define NV_PMC_ENABLE 0x00000200 /* RW-4R */ +#define NV_PMC_ENABLE_DEVICE(i) (i):(i) /* */ +#define NV_PMC_ENABLE_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_ENABLE_DEVICE_DISABLE 0x00000000 /* */ +#define NV_PMC_ENABLE_DEVICE_ENABLE 0x00000001 /* */ +#define NV_PMC_ENABLE_NVDEC 15:15 /* */ +#define NV_PMC_ENABLE_NVDEC_DISABLED 0x00000000 /* */ +#define NV_PMC_ENABLE_NVDEC_ENABLED 0x00000001 /* */ +#endif // __tu102_dev_boot_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_bus.h b/src/common/inc/swref/published/turing/tu102/dev_bus.h new file mode 100644 index 000000000..a3371152f --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_bus.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_bus_h__ +#define __tu102_dev_bus_h__ + +#define NV_PBUS_VBIOS_SCRATCH(i) (0x00001400+(i)*4) /* */ + +#define NV_PBUS_IFR_FMT_FIXED0 0x00000000 /* */ +#define NV_PBUS_IFR_FMT_FIXED0_SIGNATURE 31:0 /* */ +#define NV_PBUS_IFR_FMT_FIXED0_SIGNATURE_VALUE 0x4947564E /* */ +#define NV_PBUS_IFR_FMT_FIXED1 0x00000004 /* */ +#define NV_PBUS_IFR_FMT_FIXED1_VERSIONSW 15:8 /* */ +#define NV_PBUS_IFR_FMT_FIXED1_FIXED_DATA_SIZE 30:16 /* */ +#define NV_PBUS_IFR_FMT_FIXED2 0x00000008 /* */ +#define NV_PBUS_IFR_FMT_FIXED2_TOTAL_DATA_SIZE 19:0 /* */ + +#endif // __tu102_dev_bus_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_ce.h b/src/common/inc/swref/published/turing/tu102/dev_ce.h new file mode 100644 index 000000000..c9d6eda90 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_ce.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_ce_h__ +#define __tu102_dev_ce_h__ +#define NV_CE_PCE2LCE_CONFIG__SIZE_1 4 /* */ +#define NV_CE_PCE2LCE_CONFIG_PCE_ASSIGNED_LCE_NONE 0x0000000f /* RW--V */ +#define NV_CE_GRCE_CONFIG__SIZE_1 2 /* */ +#define NV_CE_GRCE_CONFIG_SHARED_LCE 3:0 /* RWIVF */ +#define NV_CE_GRCE_CONFIG_SHARED 30:30 /* RWIVF */ +#endif // __tu102_dev_ce_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_ctrl.h b/src/common/inc/swref/published/turing/tu102/dev_ctrl.h new file mode 100644 index 000000000..bca9910ec --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_ctrl.h @@ -0,0 +1,86 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_ctrl_h__ +#define __tu102_dev_ctrl_h__ +#define NV_CTRL_CPU_INTR_TOP(i) (0x00B73400+(i)*4) /* R--4A */ +#define NV_CTRL_CPU_INTR_TOP__SIZE_1 64 /* */ +#define NV_CTRL_CPU_INTR_TOP_VALUE 31:0 /* R--VF */ +#define NV_CTRL_CPU_INTR_TOP_EN_SET(i) (0x00B73800+(i)*4) /* RW-4A */ +#define NV_CTRL_CPU_INTR_TOP_EN_SET__SIZE_1 64 /* */ +#define NV_CTRL_CPU_INTR_TOP_EN_SET_VALUE 31:0 /* RWIVF */ +#define NV_CTRL_CPU_INTR_TOP_EN_SET_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_CTRL_CPU_INTR_TOP_EN_CLEAR(i) (0x00B73C00+(i)*4) /* RW-4A */ +#define NV_CTRL_CPU_INTR_TOP_EN_CLEAR__SIZE_1 64 /* */ +#define NV_CTRL_CPU_INTR_TOP_EN_CLEAR_VALUE 31:0 /* RWIVF */ +#define NV_CTRL_CPU_INTR_TOP_EN_CLEAR_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_CTRL_CPU_INTR_LEAF(i) (0x00B74000+(i)*4) /* RW-4A */ +#define NV_CTRL_CPU_INTR_LEAF__SIZE_1 1024 /* */ +#define NV_CTRL_CPU_INTR_LEAF_VALUE 31:0 /* RWIVF */ +#define NV_CTRL_CPU_INTR_LEAF_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_CTRL_CPU_INTR_LEAF_ARRAY_SIZE_PER_FN 16 /* */ +#define NV_CTRL_CPU_INTR_LEAF_EN_SET(i) (0x00B78000+(i)*4) /* RW-4A */ +#define NV_CTRL_CPU_INTR_LEAF_EN_SET__SIZE_1 1024 /* */ +#define NV_CTRL_CPU_INTR_LEAF_EN_SET_VALUE 31:0 /* RWIVF */ +#define NV_CTRL_CPU_INTR_LEAF_EN_SET_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_CTRL_CPU_INTR_LEAF_EN_CLEAR(i) (0x00B7C000+(i)*4) /* RW-4A */ +#define NV_CTRL_CPU_INTR_LEAF_EN_CLEAR__SIZE_1 1024 /* */ +#define NV_CTRL_CPU_INTR_LEAF_EN_CLEAR_VALUE 31:0 /* RWIVF */ +#define NV_CTRL_CPU_INTR_LEAF_EN_CLEAR_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_CTRL_LEGACY_ENGINE_STALL_INTR_BASE_VECTORID 0xB66880 /* C--4R */ +#define NV_CTRL_LEGACY_ENGINE_STALL_INTR_BASE_VECTORID_VECTOR 11:0 /* C--UF */ +#define NV_CTRL_LEGACY_ENGINE_STALL_INTR_BASE_VECTORID_VECTOR_INIT 192 /* C---V */ +#define NV_CTRL_LEGACY_ENGINE_NONSTALL_INTR_BASE_VECTORID 0xB66884 /* C--4R */ +#define NV_CTRL_LEGACY_ENGINE_NONSTALL_INTR_BASE_VECTORID_VECTOR 11:0 /* C--UF */ +#define NV_CTRL_LEGACY_ENGINE_NONSTALL_INTR_BASE_VECTORID_VECTOR_INIT 0 /* C---V */ +#define NV_CTRL_VIRTUAL_INTR_LEAF(i) (0x00B66800+(i)*4) /* RW-4A */ +#define NV_CTRL_VIRTUAL_INTR_LEAF__SIZE_1 2 /* */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_PENDING 31:0 /* RWIVF */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_PENDING_INIT 0 /* RWI-V */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_PENDING_INTR 1 /* R---V */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_PENDING_CLEAR 1 /* -W--V */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_SET(i) (0x00B66820+(i)*4) /* RW-4A */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_SET__SIZE_1 2 /* */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_SET_VALUE 31:0 /* RWIVF */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_SET_VALUE_INIT 0 /* RWI-V */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_SET_VECTOR(i) (i) /* */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_SET_VECTOR_ENABLE 1 /* */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_SET_VECTOR_ENABLED 1 /* */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_SET_VECTOR_DISABLED 0 /* */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_CLEAR(i) (0x00B66840+(i)*4) /* RW-4A */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_CLEAR__SIZE_1 2 /* */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_CLEAR_VALUE 31:0 /* RWIVF */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_CLEAR_VALUE_INIT 0 /* RWI-V */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_CLEAR_VECTOR(i) (i) /* */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_CLEAR_VECTOR_DISABLE 1 /* */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_CLEAR_VECTOR_ENABLED 1 /* */ +#define NV_CTRL_VIRTUAL_INTR_LEAF_EN_CLEAR_VECTOR_DISABLED 0 /* */ +#define NV_CTRL_CPU_INTR_LEAF_TRIGGER(i) (0x00B66C00+(i)*4) /* -W-4A */ +#define NV_CTRL_CPU_INTR_LEAF_TRIGGER__SIZE_1 64 /* */ +#define NV_CTRL_CPU_INTR_LEAF_TRIGGER_VECTOR 11:0 /* -WXVF */ +#define NV_CTRL_CPU_DOORBELL_VECTORID 0x00B6687C /* C--4R */ +#define NV_CTRL_CPU_DOORBELL_VECTORID_VALUE 11:0 /* C--VF */ +#define NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT 129 /* C---V */ +#define NV_CTRL_VF_DOORBELL_VECTOR 11:0 /* -WXUF */ +#define NV_CTRL_VF_DOORBELL_RUNLIST_ID 22:16 /* -WXUF */ +#endif // __tu102_dev_ctrl_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_ext_devices.h b/src/common/inc/swref/published/turing/tu102/dev_ext_devices.h new file mode 100644 index 000000000..023de16c1 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_ext_devices.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_ext_devices_h__ +#define __tu102_dev_ext_devices_h__ + +#define NV_PROM_DATA(i) (0x00300000+(i)) /* RW-1A */ + +#endif // __tu102_dev_ext_devices_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_falcon_v4.h b/src/common/inc/swref/published/turing/tu102/dev_falcon_v4.h new file mode 100644 index 000000000..4543f22c8 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_falcon_v4.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __tu102_dev_falcon_v4_h__ +#define __tu102_dev_falcon_v4_h__ + +#define NV_PFALCON_FALCON_IRQSCLR 0x00000004 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQSCLR_HALT 4:4 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_HALT_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN0 6:6 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN0_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN1 7:7 /* -WXVF */ +#define NV_PFALCON_FALCON_IRQSCLR_SWGEN1_SET 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_IRQSTAT 0x00000008 /* R--4R */ +#define NV_PFALCON_FALCON_IRQSTAT_HALT 4:4 /* R-XVF */ +#define NV_PFALCON_FALCON_IRQSTAT_HALT_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN0 6:6 /* R-XVF */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN0_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN1 7:7 /* R-XVF */ +#define NV_PFALCON_FALCON_IRQSTAT_SWGEN1_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_IRQMSET 0x00000010 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQMCLR 0x00000014 /* -W-4R */ +#define NV_PFALCON_FALCON_IRQMASK 0x00000018 /* R--4R */ +#define NV_PFALCON_FALCON_IRQDEST 0x0000001c /* RW-4R */ +#define NV_PFALCON_FALCON_MAILBOX0 0x00000040 /* RW-4R */ +#define NV_PFALCON_FALCON_MAILBOX1 0x00000044 /* RW-4R */ +#define NV_PFALCON_FALCON_OS 0x00000080 /* RW-4R */ +#define NV_PFALCON_FALCON_RM 0x00000084 /* RW-4R */ +#define NV_PFALCON_FALCON_DEBUGINFO 0x00000094 /* RW-4R */ +#define NV_PFALCON_FALCON_CPUCTL 0x00000100 /* RW-4R */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU 1:1 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_STARTCPU_FALSE 0x00000000 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_HALTED 4:4 /* R-XVF */ +#define NV_PFALCON_FALCON_CPUCTL_HALTED_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN 6:6 /* RWIVF */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_EN_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS 0x00000130 /* RW-4R */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU 1:1 /* -WXVF */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU_TRUE 0x00000001 /* -W--V */ +#define NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU_FALSE 0x00000000 /* -W--V */ +#define NV_PFALCON_FALCON_BOOTVEC 0x00000104 /* RW-4R */ +#define NV_PFALCON_FALCON_HWCFG 0x00000108 /* R--4R */ +#define NV_PFALCON_FALCON_HWCFG_IMEM_SIZE 8:0 /* R--VF */ +#define NV_PFALCON_FALCON_HWCFG2 0x000000f4 /* R--4R */ +#define NV_PFALCON_FALCON_HWCFG2_RISCV 10:10 /* R--VF */ +#define NV_PFALCON_FALCON_HWCFG2_RISCV_ENABLE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_DMACTL 0x0000010c /* RW-4R */ +#define NV_PFALCON_FALCON_DMACTL_REQUIRE_CTX 0:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMACTL_REQUIRE_CTX_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMACTL_DMEM_SCRUBBING 1:1 /* R--VF */ +#define NV_PFALCON_FALCON_DMACTL_DMEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMACTL_IMEM_SCRUBBING 2:2 /* R--VF */ +#define NV_PFALCON_FALCON_DMACTL_IMEM_SCRUBBING_DONE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMATRFBASE 0x00000110 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFBASE_BASE 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFMOFFS 0x00000114 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFMOFFS_OFFS 15:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFCMD 0x00000118 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFCMD_FULL 0:0 /* R-XVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_FULL_TRUE 0x00000001 /* R---V */ +#define NV_PFALCON_FALCON_DMATRFCMD_IDLE 1:1 /* R-XVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_IDLE_FALSE 0x00000000 /* R---V */ +#define NV_PFALCON_FALCON_DMATRFCMD_SEC 3:2 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_IMEM 4:4 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_IMEM_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_IMEM_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_WRITE 5:5 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_WRITE_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_WRITE_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_SIZE 10:8 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_SIZE_256B 0x00000006 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFCMD_CTXDMA 14:12 /* RWXVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_SET_DMTAG 16:16 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFCMD_SET_DMTAG_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMATRFFBOFFS 0x0000011c /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFFBOFFS_OFFS 31:0 /* RWIVF */ +#define NV_PFALCON_FALCON_DMATRFBASE1 0x00000128 /* RW-4R */ +#define NV_PFALCON_FALCON_DMATRFBASE1_BASE 8:0 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC(i) (0x00000180+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMC_OFFS 7:2 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_BLK 15:8 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_AINCW 24:24 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_IMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_IMEMC_SECURE 28:28 /* RWIVF */ +#define NV_PFALCON_FALCON_IMEMD(i) (0x00000184+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMD_DATA 31:0 /* RW-VF */ +#define NV_PFALCON_FALCON_IMEMT(i) (0x00000188+(i)*16) /* RW-4A */ +#define NV_PFALCON_FALCON_IMEMT_TAG 15:0 /* RW-VF */ +#define NV_PFALCON_FALCON_DMEMC(i) (0x000001c0+(i)*8) /* RW-4A */ +#define NV_PFALCON_FALCON_DMEMC_OFFS 7:2 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_BLK 15:8 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_AINCW 24:24 /* RWIVF */ +#define NV_PFALCON_FALCON_DMEMC_AINCW_TRUE 0x00000001 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMC_AINCW_FALSE 0x00000000 /* RW--V */ +#define NV_PFALCON_FALCON_DMEMD(i) (0x000001c4+(i)*8) /* RW-4A */ +#define NV_PFALCON_FALCON_DMEMD_DATA 31:0 /* RW-VF */ + +#endif // __tu102_dev_falcon_v4_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_fb.h b/src/common/inc/swref/published/turing/tu102/dev_fb.h new file mode 100644 index 000000000..4c9fb4632 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_fb.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_fb_h__ +#define __tu102_dev_fb_h__ + +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO 0x00100A18 /* R--4R */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_FULL 0:0 /* R-IVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_FULL_FALSE 0x0 /* R-I-V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_FULL_TRUE 0x1 /* R---V */ +#define NV_PFB_PRI_MMU_INT_VECTOR_FAULT_NOTIFY_REPLAYABLE 64 /* R---V */ +#define NV_PFB_PRI_MMU_INT_VECTOR_FAULT_NOTIFY_NON_REPLAYABLE 132 /* R---V */ + +#define NV_PFB_PRI_MMU_WPR2_ADDR_LO 0x001FA824 /* RW-4R */ +#define NV_PFB_PRI_MMU_WPR2_ADDR_LO_VAL 31:4 /* RWEVF */ +#define NV_PFB_PRI_MMU_WPR2_ADDR_LO_ALIGNMENT 0x0000000c /* */ +#define NV_PFB_PRI_MMU_WPR2_ADDR_HI 0x001FA828 /* RW-4R */ +#define NV_PFB_PRI_MMU_WPR2_ADDR_HI_VAL 31:4 /* RWEVF */ +#define NV_PFB_PRI_MMU_WPR2_ADDR_HI_ALIGNMENT 0x0000000c /* */ + +#endif // __tu102_dev_fb_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_fb_addendum.h b/src/common/inc/swref/published/turing/tu102/dev_fb_addendum.h new file mode 100644 index 000000000..25b5854b1 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_fb_addendum.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef tu102_dev_fb_addendum_h +#define tu102_dev_fb_addendum_h + +#define NV_MMU_PTE_COMPTAGLINE_BITS_FROM_SPA 35:16 + +#endif // tu102_dev_fb_addendum_h diff --git a/src/common/inc/swref/published/turing/tu102/dev_fbif_v4.h b/src/common/inc/swref/published/turing/tu102/dev_fbif_v4.h new file mode 100644 index 000000000..126c4a0eb --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_fbif_v4.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_fbif_v4_h__ +#define __tu102_dev_fbif_v4_h__ + +#define NV_PFALCON_FBIF_TRANSCFG(i) (0x00000000+(i)*4) /* RW-4A */ +#define NV_PFALCON_FBIF_TRANSCFG__SIZE_1 8 /* */ +#define NV_PFALCON_FBIF_TRANSCFG_TARGET 1:0 /* RWIVF */ +#define NV_PFALCON_FBIF_TRANSCFG_TARGET_COHERENT_SYSMEM 0x00000001 /* R---V */ +#define NV_PFALCON_FBIF_TRANSCFG_MEM_TYPE 2:2 /* RWIVF */ +#define NV_PFALCON_FBIF_TRANSCFG_MEM_TYPE_PHYSICAL 0x00000001 /* R---V */ +#define NV_PFALCON_FBIF_CTL 0x00000024 /* RW-4R */ +#define NV_PFALCON_FBIF_CTL_ALLOW_PHYS_NO_CTX 7:7 /* RWIVF */ +#define NV_PFALCON_FBIF_CTL_ALLOW_PHYS_NO_CTX_ALLOW 0x00000001 /* RW--V */ + +#endif // __tu102_dev_fbif_v4_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_fuse.h b/src/common/inc/swref/published/turing/tu102/dev_fuse.h new file mode 100644 index 000000000..027d605af --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_fuse.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __tu102_dev_fuse_h__ +#define __tu102_dev_fuse_h__ + +#define NV_FUSE_OPT_SECURE_GSP_DEBUG_DIS 0x0002174C /* RW-4R */ +#define NV_FUSE_OPT_SECURE_GSP_DEBUG_DIS_DATA 0:0 /* RWIVF */ +#define NV_FUSE_OPT_SECURE_GSP_DEBUG_DIS_DATA_NO 0x00000000 /* RW--V */ +#define NV_FUSE_OPT_SECURE_GSP_DEBUG_DIS_DATA_YES 0x00000001 /* RW--V */ + +#define NV_FUSE_OPT_NVDEC_DISABLE 0x00021378 /* RW-4R */ +#define NV_FUSE_OPT_NVDEC_DISABLE_DATA 2:0 /* RWIVF */ +#define NV_FUSE_OPT_NVDEC_DISABLE_DATA_INIT 0x00000000 /* RWI-V */ + +#endif // __tu102_dev_fuse_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_gc6_island.h b/src/common/inc/swref/published/turing/tu102/dev_gc6_island.h new file mode 100644 index 000000000..b241ac440 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_gc6_island.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_gc6_island_h__ +#define __tu102_dev_gc6_island_h__ + +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK 0x00118128 /* RW-4R */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK_READ_PROTECTION 3:0 /* RWIVF */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0 0:0 /* */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_ENABLE 0x00000001 /* */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK_READ_PROTECTION_LEVEL0_DISABLE 0x00000000 /* */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_03(i) (0x00118214+(i)*4) /* RW-4A */ +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05(i) (0x00118234+(i)*4) /* RW-4A */ + +#endif // __tu102_dev_gc6_island_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_gc6_island_addendum.h b/src/common/inc/swref/published/turing/tu102/dev_gc6_island_addendum.h new file mode 100644 index 000000000..6e3acb67b --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_gc6_island_addendum.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_gc6_island_addendum_h__ +#define __tu102_dev_gc6_island_addendum_h__ + +#define NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0 NV_PGC6_AON_SECURE_SCRATCH_GROUP_03(0) +#define NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE 15:0 +#define NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE_1MB_IN_4K 0x100 +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT NV_PGC6_AON_SECURE_SCRATCH_GROUP_05(0) +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS 7:0 +#define NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT_PROGRESS_COMPLETED 0x000000FF + +#endif // __tu102_dev_gc6_island_addendum_h__ + diff --git a/src/common/inc/swref/published/turing/tu102/dev_gsp.h b/src/common/inc/swref/published/turing/tu102/dev_gsp.h new file mode 100644 index 000000000..7a00e288d --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_gsp.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __tu102_dev_gsp_h__ +#define __tu102_dev_gsp_h__ + +#define NV_PGSP 0x113fff:0x110000 /* RW--D */ +#define NV_PGSP_FALCON_MAILBOX0 0x110040 /* RW-4R */ +#define NV_PGSP_FALCON_MAILBOX0_DATA 31:0 /* RWIVF */ +#define NV_PGSP_FALCON_MAILBOX1 0x110044 /* RW-4R */ +#define NV_PGSP_FALCON_MAILBOX1_DATA 31:0 /* RWIVF */ +#define NV_PGSP_FALCON_ENGINE 0x1103c0 /* RW-4R */ +#define NV_PGSP_FALCON_ENGINE_RESET 0:0 /* RWIVF */ +#define NV_PGSP_FALCON_ENGINE_RESET_TRUE 0x00000001 /* RW--V */ +#define NV_PGSP_FALCON_ENGINE_RESET_FALSE 0x00000000 /* RWI-V */ +#define NV_PGSP_MAILBOX(i) (0x110804+(i)*4) /* RW-4A */ +#define NV_PGSP_MAILBOX__SIZE_1 4 /* */ +#define NV_PGSP_MAILBOX_DATA 31:0 /* RWIVF */ +#define NV_PGSP_QUEUE_HEAD(i) (0x110c00+(i)*8) /* RW-4A */ +#define NV_PGSP_QUEUE_HEAD__SIZE_1 8 /* */ +#define NV_PGSP_QUEUE_HEAD_ADDRESS 31:0 /* RWIVF */ + +#endif // __tu102_dev_gsp_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_gsp_addendum.h b/src/common/inc/swref/published/turing/tu102/dev_gsp_addendum.h new file mode 100644 index 000000000..ea99e0893 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_gsp_addendum.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __tu102_dev_gsp_addendum_h__ +#define __tu102_dev_gsp_addendum_h__ + +#define NV_PGSP_FBIF_BASE 0x110600 + +#endif // __tu102_dev_gsp_addendum_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_mmu.h b/src/common/inc/swref/published/turing/tu102/dev_mmu.h new file mode 100644 index 000000000..0134b3dde --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_mmu.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_mmu_h__ +#define __tu102_dev_mmu_h__ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+20+11):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_INVALID 0x07 /* R---V */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x06 /* R---V */ +#define NV_MMU_PTE_KIND_Z16 0x01 /* R---V */ +#define NV_MMU_PTE_KIND_S8 0x02 /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24 0x03 /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8 0x04 /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8 0x05 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE 0x08 /* R---V */ +#define NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC 0x09 /* R---V */ +#define NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC 0x0A /* R---V */ +#define NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC 0x0B /* R---V */ +#define NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC 0x0C /* R---V */ +#define NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC 0x0D /* R---V */ +#define NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC 0x0E /* R---V */ +#define NV_MMU_PTE_KIND_SMSKED_MESSAGE 0x0F /* R---V */ +#define NV_MMU_CLIENT_KIND_Z16 0x1 /* R---V */ +#define NV_MMU_CLIENT_KIND_Z24S8 0x5 /* R---V */ +#define NV_MMU_CLIENT_KIND_INVALID 0x7 /* R---V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PTE_COMPTAGLINE (20+35):36 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#endif // __tu102_dev_mmu_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_nv_xve.h b/src/common/inc/swref/published/turing/tu102/dev_nv_xve.h new file mode 100644 index 000000000..ac51201dd --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_nv_xve.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_nv_xve_h__ +#define __tu102_dev_nv_xve_h__ +#define NV_PCFG 0x00088FFF:0x00088000 /* RW--D */ +#define NV_XVE_MSIX_CAP_HDR 0x000000C8 /* RW-4R */ +#define NV_XVE_MSIX_CAP_HDR_ENABLE 31:31 /* RWIVF */ +#define NV_XVE_MSIX_CAP_HDR_ENABLE_ENABLED 0x00000001 /* RW--V */ +#define NV_XVE_MSIX_CAP_HDR_ENABLE_DISABLED 0x00000000 /* RWI-V */ +#endif // __tu102_dev_nv_xve_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_nvdec_addendum.h b/src/common/inc/swref/published/turing/tu102/dev_nvdec_addendum.h new file mode 100644 index 000000000..dc9b34117 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_nvdec_addendum.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_nvdec_addendum_h__ +#define __tu102_dev_nvdec_addendum_h__ + +#define NV_PNVDEC_FBIF_BASE(dev) (0x00830600+(dev)*16384) + +#endif // __tu102_dev_nvdec_addendum_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_nvdec_pri.h b/src/common/inc/swref/published/turing/tu102/dev_nvdec_pri.h new file mode 100644 index 000000000..2c6c5a154 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_nvdec_pri.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_nvdec_pri_h__ +#define __tu102_dev_nvdec_pri_h__ + +#define NV_PNVDEC(dev) 0x00833fff+(dev)*16384:0x00830000+(dev)*16384 /* RW--D */ + +#endif // __tu102_dev_nvdec_pri_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_ram.h b/src/common/inc/swref/published/turing/tu102/dev_ram.h new file mode 100644 index 000000000..0f5f0e8b1 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_ram.h @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_ram_h__ +#define __tu102_dev_ram_h__ +#define NV_PRAMIN 0x007FFFFF:0x00700000 /* RW--M */ +#endif // __tu102_dev_ram_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_riscv_pri.h b/src/common/inc/swref/published/turing/tu102/dev_riscv_pri.h new file mode 100644 index 000000000..d5967ba2e --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_riscv_pri.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_riscv_pri_h__ +#define __tu102_dev_riscv_pri_h__ + +#define NV_FALCON2_GSP_BASE 0x00111000 +#define NV_PRISCV_RISCV_CORE_SWITCH_RISCV_STATUS 0x00000240 /* R-I4R */ +#define NV_PRISCV_RISCV_CORE_SWITCH_RISCV_STATUS_ACTIVE_STAT 0:0 /* R-IVF */ +#define NV_PRISCV_RISCV_CORE_SWITCH_RISCV_STATUS_ACTIVE_STAT_ACTIVE 0x00000001 /* R---V */ +#define NV_PRISCV_RISCV_IRQMASK 0x000002b4 /* R-I4R */ +#define NV_PRISCV_RISCV_IRQDEST 0x000002b8 /* RW-4R */ + +#endif // __tu102_dev_riscv_pri_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_sec_addendum.h b/src/common/inc/swref/published/turing/tu102/dev_sec_addendum.h new file mode 100644 index 000000000..e7353ce38 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_sec_addendum.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_sec_addendum_h__ +#define __tu102_dev_sec_addendum_h__ + +#define NV_PSEC_FBIF_BASE 0x00840600 + +#endif // __tu102_dev_sec_addendum_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_sec_pri.h b/src/common/inc/swref/published/turing/tu102/dev_sec_pri.h new file mode 100644 index 000000000..ab2889254 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_sec_pri.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_sec_pri_h__ +#define __tu102_dev_sec_pri_h__ + +#define NV_PSEC 0x843fff:0x840000 /* RW--D */ +#define NV_PSEC_FALCON_ENGINE 0x008403c0 /* RW-4R */ +#define NV_PSEC_FALCON_ENGINE_RESET 0:0 /* RWIVF */ +#define NV_PSEC_FALCON_ENGINE_RESET_TRUE 0x00000001 /* RW--V */ +#define NV_PSEC_FALCON_ENGINE_RESET_FALSE 0x00000000 /* RWI-V */ + +#define NV_PSEC_MAILBOX(i) (0x00840804+(i)*4) /* RW-4A */ +#define NV_PSEC_MAILBOX__SIZE_1 4 /* */ +#define NV_PSEC_MAILBOX_DATA 31:0 /* RWIVF */ +#define NV_PSEC_MAILBOX_DATA_INIT 0x00000000 /* RWI-V */ + +#endif // __tu102_dev_sec_pri_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_timer.h b/src/common/inc/swref/published/turing/tu102/dev_timer.h new file mode 100644 index 000000000..64659bcab --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_timer.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_timer_h__ +#define __tu102_dev_timer_h__ +#define NV_PTIMER_INTR_0 0x00009100 /* RW-4R */ +#define NV_PTIMER_INTR_EN_0 0x00009140 /* RW-4R */ +#define NV_PTIMER_VF_TIMER(i) (0x00009800+(i)*4) /* RW-4A */ +#define NV_PTIMER_VF_TIMER_NSEC 31:0 /* */ +#endif // __tu102_dev_timer_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_vm.h b/src/common/inc/swref/published/turing/tu102/dev_vm.h new file mode 100644 index 000000000..bad783445 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_vm.h @@ -0,0 +1,213 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_dev_vm_h__ +#define __tu102_dev_vm_h__ +#define NV_VIRTUAL_FUNCTION_FULL_PHYS_OFFSET 0x00BBFFFF:0x00B80000 /* RW--D */ +#define NV_VIRTUAL_FUNCTION_PRIV_L2_SYSMEM_INVALIDATE 0x00000F00 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_L2_PEERMEM_INVALIDATE 0x00000F04 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP(i) (0x1600+(i)*4) /* R--4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP__SIZE_1 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_VALUE 31:0 /* R--VF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(i) (i) /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE__SIZE_1 64 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE_INTR_PENDING 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE_INTR_NOT_PENDING 0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET(i) (0x1608+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET__SIZE_1 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE(i) (i) /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE__SIZE_1 64 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_ENABLE 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_ENABLED 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_DISABLED 0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR(i) (0x1610+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR__SIZE_1 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE(i) (i) /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE__SIZE_1 64 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_DISABLE 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_ENABLED 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_DISABLED 0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(i) (0x1000+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF__SIZE_1 8 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_VALUE 31:0 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_VALUE_INIT 0x00000000 /* R---V */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET(i) (0x1200+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET__SIZE_1 8 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR(i) (0x1400+(i)*4) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR__SIZE_1 8 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR_VALUE 31:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_TRIGGER 0x00001640 /* -W-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_TRIGGER_VECTOR 11:0 /* -W-VF */ +#define NV_VIRTUAL_FUNCTION_PRIV_TIMER 0x2300 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_TIMER_NSEC 31:0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_TIMER_USEC 31:10 /* RWIUF */ +#define NV_VIRTUAL_FUNCTION_PRIV_TIMER_USEC_INIT 0x0 /* RWI-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_NON_REPLAY_FAULT_BUFFER 0 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_REPLAY_FAULT_BUFFER 1 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_LO(i) (0x00003000+(i)*32) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_LO__SIZE_1 2 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_HI(i) (0x00003004+(i)*32) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_HI__SIZE_1 2 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET(i) (0x00003008+(i)*32) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET__SIZE_1 2 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET_PTR 19:0 /* RWEVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET_PTR_RESET 0x00000000 /* RWE-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED 30:30 /* RWEVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED_NO 0x00000000 /* RWE-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED_YES 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED_CLEAR 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET_OVERFLOW 31:31 /* RWEVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET_OVERFLOW_NO 0x00000000 /* RWE-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET_OVERFLOW_YES 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET_OVERFLOW_CLEAR 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT(i) (0x0000300C+(i)*32) /* R--4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT__SIZE_1 2 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT_PTR 19:0 /* R-EVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT_PTR_RESET 0x00000000 /* R-E-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT_GETPTR_CORRUPTED 30:30 /* R-EVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT_GETPTR_CORRUPTED_NO 0x00000000 /* R-E-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT_GETPTR_CORRUPTED_YES 0x00000001 /* R---V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT_OVERFLOW 31:31 /* R-EVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT_OVERFLOW_NO 0x00000000 /* R-E-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT_OVERFLOW_YES 0x00000001 /* R---V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_SIZE(i) (0x00003010+(i)*32) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_SIZE__SIZE_1 2 /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_PAGE_FAULT_CTRL 0x00003070 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_STATUS 0x00003094 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB 0x000030A0 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB_APERTURE 1:1 /* RWEVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB_APERTURE_INIT 0x00000000 /* RWE-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB_APERTURE_SYS_MEM 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB_ADDR 31:4 /* RWEVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB_ADDR_INIT 0x00000000 /* RWE-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB_ADDR_ALIGNMENT 0x0000000c /* */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_UPPER_PDB 0x000030A4 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_UPPER_PDB_ADDR 19:0 /* RWEVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_UPPER_PDB_ADDR_INIT 0x00000000 /* RWE-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE 0x000030B0 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_VA 0:0 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_VA_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_VA_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_PDB 1:1 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_PDB_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ALL_PDB_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_HUBTLB_ONLY 2:2 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_HUBTLB_ONLY_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_HUBTLB_ONLY_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY 5:3 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_NONE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_START 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_SYS_MEMBAR 6:6 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_SYS_MEMBAR_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_SYS_MEMBAR_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ACK 8:7 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ACK_NONE_REQUIRED 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ACK_INTRANODE 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_ACK_GLOBALLY 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CANCEL_CLIENT_ID 14:9 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CANCEL_GPC_ID 19:15 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CANCEL_CLIENT_TYPE 20:20 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CANCEL_CLIENT_TYPE_GPC 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CANCEL_CLIENT_TYPE_HUB 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_PASID 21:21 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_PASID_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_PASID_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_SIZE 22:22 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_SIZE_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_USE_SIZE_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PROP_FLUSH 23:23 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PROP_FLUSH_FALSE 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PROP_FLUSH_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL 26:24 /* RWXVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_ALL 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_PTE_ONLY 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE0 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE1 0x00000003 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE2 0x00000004 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE3 0x00000005 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE4 0x00000006 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_UP_TO_PDE5 0x00000007 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_READ 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_WRITE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_ATOMIC_STRONG 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_RSVRVD 0x00000003 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_ATOMIC_WEAK 0x00000004 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_ATOMIC_ALL 0x00000005 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_WRITE_AND_ATOMIC 0x00000006 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_CACHE_LEVEL_CANCEL_ALL 0x00000007 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_TRIGGER 31:31 /* -WEVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_TRIGGER_FALSE 0x00000000 /* -WE-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_TRIGGER_TRUE 0x00000001 /* -W--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG 0x00003100 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_THRESHOLD 15:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_THRESHOLD_INIT 0x00000080 /* RWI-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY 17:16 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY_INIT 0x00000000 /* RWI-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY_64K 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY_2M 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY_16M 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY_16G 0x00000003 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY 19:18 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY_INIT 0x00000000 /* RWI-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY_64K 0x00000000 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY_2M 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY_16M 0x00000002 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY_16G 0x00000003 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_LO 0x00003108 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_LO_EN 0:0 /* RWIVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_LO_EN_FALSE 0x00000000 /* RWI-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_LO_EN_TRUE 0x00000001 /* RW--V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_HI 0x0000310C /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_SIZE 0x00003110 /* R--4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_GET 0x00003114 /* RW-4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_PUT 0x00003118 /* R--4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO 0x0000311C /* R--4R */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_FULL 0:0 /* R-IVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_FULL_FALSE 0x0 /* R-I-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_FULL_TRUE 0x1 /* R---V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_PUSHED 1:1 /* R-IVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_PUSHED_FALSE 0x0 /* R---V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_PUSHED_TRUE 0x1 /* R-I-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_WRITE_NACK 24:24 /* R-IVF */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_WRITE_NACK_FALSE 0x0 /* R-I-V */ +#define NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_WRITE_NACK_TRUE 0x1 /* R---V */ +#define NV_VIRTUAL_FUNCTION_PRIV_MSIX_TABLE_ADDR_LO(i) (0x00010000+(i)*16) /* RW-4A */ +#define NV_VIRTUAL_FUNCTION_PRIV_MSIX_TABLE_ADDR_LO__SIZE_1 6 /* */ +#define NV_VIRTUAL_FUNCTION_TIME_0 0x30080 /* R--4R */ +#define NV_VIRTUAL_FUNCTION_TIME_0_NSEC 31:5 /* R-XUF */ +#define NV_VIRTUAL_FUNCTION_TIME_1 0x30084 /* R--4R */ +#define NV_VIRTUAL_FUNCTION_TIME_1_NSEC 28:0 /* R-XUF */ +#define NV_VIRTUAL_FUNCTION_PRIV_DOORBELL 0x2200 /* -W-4R */ +#define NV_VIRTUAL_FUNCTION_DOORBELL 0x30090 /* -W-4R */ +#define NV_VIRTUAL_FUNCTION_ERR_CONT 0x30094 /* R--4R */ +#endif // __tu102_dev_vm_h__ diff --git a/src/common/inc/swref/published/turing/tu102/dev_vm_addendum.h b/src/common/inc/swref/published/turing/tu102/dev_vm_addendum.h new file mode 100644 index 000000000..6123cd61f --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/dev_vm_addendum.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef tu102_dev_vm_addendum_h +#define tu102_dev_vm_addendum_h + +// +// Compile time asserts in the source code files will ensure that +// these don't end up exceeding the range of the top level registers. +// + +// Subtrees at CPU_INTR top level for UVM owned interrupts +#define NV_CPU_INTR_UVM_SUBTREE_START NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(1) +#define NV_CPU_INTR_UVM_SUBTREE_LAST NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(1) + +#define NV_CPU_INTR_UVM_SHARED_SUBTREE_START NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(2) +#define NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(2) + +// +// Subtrees at CPU_INTR top level for all stall interrupts from host-driven +// engines +// +#define NV_CPU_INTR_STALL_SUBTREE_START NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(3) +#define NV_CPU_INTR_STALL_SUBTREE_LAST NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(3) + +#endif // tu102_dev_vm_addendum_h diff --git a/src/common/inc/swref/published/turing/tu102/hwproject.h b/src/common/inc/swref/published/turing/tu102/hwproject.h new file mode 100644 index 000000000..26bb4a548 --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/hwproject.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __tu102_hwproject_h__ +#define __tu102_hwproject_h__ + +#define NV_CHIP_EXTENDED_SYSTEM_PHYSICAL_ADDRESS_BITS 47 + +#endif // __tu102_hwproject_h__ diff --git a/src/common/inc/swref/published/turing/tu102/kind_macros.h b/src/common/inc/swref/published/turing/tu102/kind_macros.h new file mode 100644 index 000000000..4243730df --- /dev/null +++ b/src/common/inc/swref/published/turing/tu102/kind_macros.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define _kind_macros_orig_H_ + +#define KIND_INVALID(k) ( ((k) ==NV_MMU_CLIENT_KIND_INVALID)) +#define PTEKIND_COMPRESSIBLE(k) ( ((k) >=NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE && (k) <= NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC)) +#define PTEKIND_DISALLOWS_PLC(k) ( !((k) ==NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE)) +#define PTEKIND_SUPPORTED(k) ( ((k) ==NV_MMU_PTE_KIND_INVALID)|| ((k) ==NV_MMU_PTE_KIND_PITCH)|| ((k) ==NV_MMU_PTE_KIND_GENERIC_MEMORY)|| ((k) >=NV_MMU_PTE_KIND_Z16 && (k) <= NV_MMU_PTE_KIND_Z24S8)|| ((k) >=NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE && (k) <= NV_MMU_PTE_KIND_SMSKED_MESSAGE)) +#define KIND_Z(k) ( ((k) >=NV_MMU_CLIENT_KIND_Z16 && (k) <= NV_MMU_CLIENT_KIND_Z24S8)) + diff --git a/src/common/inc/swref/published/turing/tu104/dev_timer.h b/src/common/inc/swref/published/turing/tu104/dev_timer.h new file mode 100644 index 000000000..af89e57df --- /dev/null +++ b/src/common/inc/swref/published/turing/tu104/dev_timer.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __tu104_dev_timer_h__ +#define __tu104_dev_timer_h__ + +#define NV_PTIMER 0x00009fff:0x00009000 /* RW--D */ + +#endif // __tu104_dev_timer_h__ + diff --git a/src/common/inc/swref/published/volta/gv100/dev_boot.h b/src/common/inc/swref/published/volta/gv100/dev_boot.h new file mode 100644 index 000000000..425fe1d86 --- /dev/null +++ b/src/common/inc/swref/published/volta/gv100/dev_boot.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gv100_dev_boot_h__ +#define __gv100_dev_boot_h__ +#define NV_PMC_INTR(i) (0x00000100+(i)*4) /* R--4A */ +#define NV_PMC_INTR_EN(i) (0x00000140+(i)*4) /* R--4A */ +#define NV_PMC_INTR_EN__SIZE_1 4 /* */ +#define NV_PMC_INTR_EN_DEVICE(i) (i):(i) /* */ +#define NV_PMC_INTR_EN_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_INTR_EN_DEVICE_DISABLED 0x00000000 /* */ +#define NV_PMC_INTR_EN_DEVICE_ENABLED 0x00000001 /* */ +#define NV_PMC_INTR_EN_VALUE 31:0 /* R-IVF */ +#define NV_PMC_INTR_EN_VALUE_INIT 0x00000000 /* R-I-V */ +#define NV_PMC_INTR_EN_SET(i) (0x00000160+(i)*4) /* -W-4A */ +#define NV_PMC_INTR_EN_SET__SIZE_1 4 /* */ +#define NV_PMC_INTR_EN_SET_DEVICE(i) (i):(i) /* */ +#define NV_PMC_INTR_EN_SET_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_INTR_EN_SET_DEVICE_SET 0x00000001 /* */ +#define NV_PMC_INTR_EN_SET_VALUE 31:0 /* -W-VF */ +#define NV_PMC_INTR_EN_CLEAR(i) (0x00000180+(i)*4) /* -W-4A */ +#define NV_PMC_INTR_EN_CLEAR__SIZE_1 4 /* */ +#define NV_PMC_INTR_EN_CLEAR_DEVICE(i) (i):(i) /* */ +#define NV_PMC_INTR_EN_CLEAR_DEVICE__SIZE_1 32 /* */ +#define NV_PMC_INTR_EN_CLEAR_DEVICE_SET 0x00000001 /* */ +#define NV_PMC_INTR_EN_CLEAR_VALUE 31:0 /* -W-VF */ +#endif // __gv100_dev_boot_h__ diff --git a/src/common/inc/swref/published/volta/gv100/dev_fault.h b/src/common/inc/swref/published/volta/gv100/dev_fault.h new file mode 100644 index 000000000..923d21c59 --- /dev/null +++ b/src/common/inc/swref/published/volta/gv100/dev_fault.h @@ -0,0 +1,27 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gv100_dev_fault_h__ +#define __gv100_dev_fault_h__ +#define NV_PFAULT_MMU_ENG_ID_GRAPHICS 64 /* */ +#endif // __gv100_dev_fault_h__ diff --git a/src/common/inc/swref/published/volta/gv100/dev_fb.h b/src/common/inc/swref/published/volta/gv100/dev_fb.h new file mode 100644 index 000000000..781fef5e2 --- /dev/null +++ b/src/common/inc/swref/published/volta/gv100/dev_fb.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef __gv100_dev_fb_h__ +#define __gv100_dev_fb_h__ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG 0x00100A00 /* RW-4R */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_THRESHOLD 15:0 /* RWIVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY 17:16 /* RWIVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY_64K 0x00000000 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY_2M 0x00000001 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY_16M 0x00000002 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MIMC_GRANULARITY_16G 0x00000003 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY 19:18 /* RWIVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY_64K 0x00000000 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY_2M 0x00000001 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY_16M 0x00000002 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MOMC_GRANULARITY_16G 0x00000003 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MIMC_USE_LIMIT 29:28 /* RWIVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MIMC_USE_LIMIT_NONE 0x00000000 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MIMC_USE_LIMIT_QTR 0x00000001 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MIMC_USE_LIMIT_HALF 0x00000002 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MIMC_USE_LIMIT_FULL 0x00000003 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MOMC_USE_LIMIT 31:30 /* RWIVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MOMC_USE_LIMIT_NONE 0x00000000 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MOMC_USE_LIMIT_QTR 0x00000001 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MOMC_USE_LIMIT_HALF 0x00000002 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_CONFIG_MOMC_USE_LIMIT_FULL 0x00000003 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_LO 0x00100A04 /* RW-4R */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_LO_EN 0:0 /* RWIVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_LO_EN_FALSE 0x00000000 /* RWI-V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_LO_EN_TRUE 0x00000001 /* RW--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO 0x00100A18 /* R--4R */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_FULL 0:0 /* R-IVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_FULL_FALSE 0x0 /* R-I-V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_FULL_TRUE 0x1 /* R---V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_PUSHED 1:1 /* R-IVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_PUSHED_FALSE 0x0 /* R---V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_PUSHED_TRUE 0x1 /* R-I-V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_WRITE_NACK 24:24 /* R-IVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_WRITE_NACK_FALSE 0x0 /* R-I-V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_WRITE_NACK_TRUE 0x1 /* R---V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR 0x00100A1C /* -W-4R */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_MIMC 0:0 /* -WIVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_MIMC_INIT 0x0 /* -WI-V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_MIMC_CLR 0x1 /* -W--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_MOMC 1:1 /* -WIVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_MOMC_INIT 0x0 /* -WI-V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_MOMC_CLR 0x1 /* -W--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_ALL_COUNTERS 2:2 /* -WIVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_ALL_COUNTERS_INIT 0x0 /* -WI-V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_ALL_COUNTERS_CLR 0x1 /* -W--V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_WRITE_NACK 31:31 /* -WIVF */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_WRITE_NACK_INIT 0x0 /* -WI-V */ +#define NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_CLR_WRITE_NACK_CLR 0x1 /* -W--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET(i) (0x00100E2C+(i)*20) /* RW-4A */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET__SIZE_1 2 /* */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_PTR 19:0 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_PTR_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED 30:30 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED_NO 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED_YES 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_GETPTR_CORRUPTED_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_OVERFLOW 31:31 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_OVERFLOW_NO 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_OVERFLOW_YES 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_GET_OVERFLOW_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT(i) (0x00100E30+(i)*20) /* R--4A */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT__SIZE_1 2 /* */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_PTR 19:0 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_PTR_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_GETPTR_CORRUPTED 30:30 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_GETPTR_CORRUPTED_NO 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_GETPTR_CORRUPTED_YES 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_OVERFLOW 31:31 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_OVERFLOW_NO 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_BUFFER_PUT_OVERFLOW_YES 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS 0x00100E60 /* RW-4R */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR1_PHYS 0:0 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR1_PHYS_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR1_PHYS_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR1_PHYS_SET 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR1_VIRT 1:1 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR1_VIRT_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR1_VIRT_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR1_VIRT_SET 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR2_PHYS 2:2 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR2_PHYS_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR2_PHYS_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR2_PHYS_SET 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR2_VIRT 3:3 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR2_VIRT_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR2_VIRT_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_BAR2_VIRT_SET 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_IFB_PHYS 4:4 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_IFB_PHYS_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_IFB_PHYS_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_IFB_PHYS_SET 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_IFB_VIRT 5:5 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_IFB_VIRT_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_IFB_VIRT_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_IFB_VIRT_SET 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_OTHER_PHYS 6:6 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_OTHER_PHYS_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_OTHER_PHYS_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_OTHER_PHYS_SET 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_OTHER_VIRT 7:7 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_OTHER_VIRT_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_OTHER_VIRT_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_DROPPED_OTHER_VIRT_SET 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE 8:8 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_SET 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE 9:9 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_SET 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_ERROR 10:10 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_ERROR_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_ERROR_SET 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_ERROR 11:11 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_ERROR_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_ERROR_SET 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_OVERFLOW 12:12 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_OVERFLOW_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_OVERFLOW_SET 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_OVERFLOW 13:13 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_OVERFLOW_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_OVERFLOW_SET 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_GETPTR_CORRUPTED 14:14 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_GETPTR_CORRUPTED_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_REPLAYABLE_GETPTR_CORRUPTED_SET 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_GETPTR_CORRUPTED 15:15 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_GETPTR_CORRUPTED_RESET 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_NON_REPLAYABLE_GETPTR_CORRUPTED_SET 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_BUSY 30:30 /* R-EVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_BUSY_FALSE 0x00000000 /* R-E-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_BUSY_TRUE 0x00000001 /* R---V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_VALID 31:31 /* RWEVF */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_VALID_RESET 0x00000000 /* RWE-V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_VALID_CLEAR 0x00000001 /* RW--V */ +#define NV_PFB_PRI_MMU_FAULT_STATUS_VALID_SET 0x00000001 /* RW--V */ +#endif // __gv100_dev_fb_h__ diff --git a/src/common/inc/swref/published/volta/gv100/dev_fb_addendum.h b/src/common/inc/swref/published/volta/gv100/dev_fb_addendum.h new file mode 100644 index 000000000..2e5373e43 --- /dev/null +++ b/src/common/inc/swref/published/volta/gv100/dev_fb_addendum.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef gv100_dev_fb_addendum_h +#define gv100_dev_fb_addendum_h + +#define NV_PFB_NISO_INTR_EN_SET_PERF_ALIAS_HUB_ACCESS_COUNTER_NOTIFY 0:0 /* -WIVF */ +#define NV_PFB_NISO_INTR_EN_SET_PERF_ALIAS_HUB_ACCESS_COUNTER_NOTIFY_INIT 0x0 /* -WI-V */ +#define NV_PFB_NISO_INTR_EN_SET_PERF_ALIAS_HUB_ACCESS_COUNTER_NOTIFY_SET 0x1 /* -W--V */ +#define NV_PFB_NISO_INTR_EN_SET_PERF_ALIAS_HUB_ACCESS_COUNTER_ERROR 1:1 /* -WIVF */ +#define NV_PFB_NISO_INTR_EN_SET_PERF_ALIAS_HUB_ACCESS_COUNTER_ERROR_INIT 0x0 /* -WI-V */ +#define NV_PFB_NISO_INTR_EN_SET_PERF_ALIAS_HUB_ACCESS_COUNTER_ERROR_SET 0x1 /* -W--V */ + +#endif // gv100_dev_fb_addendum_h diff --git a/src/common/inc/swref/published/volta/gv100/dev_mmu.h b/src/common/inc/swref/published/volta/gv100/dev_mmu.h new file mode 100644 index 000000000..d91346549 --- /dev/null +++ b/src/common/inc/swref/published/volta/gv100/dev_mmu.h @@ -0,0 +1,101 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gv100_dev_mmu_h__ +#define __gv100_dev_mmu_h__ +#define NV_MMU_PDE_APERTURE_BIG (0*32+1):(0*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_BIG_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_SIZE (0*32+3):(0*32+2) /* RWXVF */ +#define NV_MMU_PDE_SIZE_FULL 0x00000000 /* RW--V */ +#define NV_MMU_PDE_SIZE_HALF 0x00000001 /* RW--V */ +#define NV_MMU_PDE_SIZE_QUARTER 0x00000002 /* RW--V */ +#define NV_MMU_PDE_SIZE_EIGHTH 0x00000003 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_BIG_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_BIG_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL (1*32+1):(1*32+0) /* RWXVF */ +#define NV_MMU_PDE_APERTURE_SMALL_INVALID 0x00000000 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_VIDEO_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PDE_APERTURE_SMALL_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL (1*32+2):(1*32+2) /* RWXVF */ +#define NV_MMU_PDE_VOL_SMALL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_SMALL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PDE_VOL_BIG_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PDE_VOL_BIG_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PDE_ADDRESS_SMALL_SYS (1*32+31):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SMALL_VID (1*32+31-3):(1*32+4) /* RWXVF */ +#define NV_MMU_PDE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PDE__SIZE 8 +#define NV_MMU_PTE_VALID (0*32+0):(0*32+0) /* RWXVF */ +#define NV_MMU_PTE_VALID_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_VALID_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE (0*32+1):(0*32+1) /* RWXVF */ +#define NV_MMU_PTE_PRIVILEGE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_PRIVILEGE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY (0*32+2):(0*32+2) /* RWXVF */ +#define NV_MMU_PTE_READ_ONLY_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_ONLY_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ENCRYPTED (0*32+3):(0*32+3) /* RWXVF */ +#define NV_MMU_PTE_ENCRYPTED_TRUE 0x00000001 /* R---V */ +#define NV_MMU_PTE_ENCRYPTED_FALSE 0x00000000 /* R---V */ +#define NV_MMU_PTE_ADDRESS_SYS (0*32+31):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID (0*32+31-3):(0*32+4) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER (0*32+31):(0*32+32-3) /* RWXVF */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_0 0x00000000 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_1 0x00000001 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_2 0x00000002 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_3 0x00000003 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_4 0x00000004 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_5 0x00000005 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_6 0x00000006 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_VID_PEER_7 0x00000007 /* RW--V */ +#define NV_MMU_PTE_VOL (1*32+0):(1*32+0) /* RWXVF */ +#define NV_MMU_PTE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_MMU_PTE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE (1*32+2):(1*32+1) /* RWXVF */ +#define NV_MMU_PTE_APERTURE_VIDEO_MEMORY 0x00000000 /* RW--V */ +#define NV_MMU_PTE_APERTURE_PEER_MEMORY 0x00000001 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 /* RW--V */ +#define NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 /* RW--V */ +#define NV_MMU_PTE_LOCK (1*32+3):(1*32+3) /* RWXVF */ +#define NV_MMU_PTE_LOCK_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_LOCK_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_COMPTAGLINE (1*32+18+11):(1*32+12) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE (1*32+30):(1*32+30) /* RWXVF */ +#define NV_MMU_PTE_READ_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_READ_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE (1*32+31):(1*32+31) /* RWXVF */ +#define NV_MMU_PTE_WRITE_DISABLE_TRUE 0x1 /* RW--V */ +#define NV_MMU_PTE_WRITE_DISABLE_FALSE 0x0 /* RW--V */ +#define NV_MMU_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#define NV_MMU_PTE__SIZE 8 +#define NV_MMU_PTE_KIND (1*32+11):(1*32+4) /* RWXVF */ +#define NV_MMU_PTE_KIND_PITCH 0x00 /* R---V */ +#define NV_MMU_VER2_PTE_ADDRESS_VID (35-3):8 /* RWXVF */ +#define NV_MMU_VER2_PTE_COMPTAGLINE (18+35):36 /* RWXVF */ +#define NV_MMU_VER2_PTE_ADDRESS_SHIFT 0x0000000c /* */ +#endif // __gv100_dev_mmu_h__ diff --git a/src/common/inc/swref/published/volta/gv100/dev_pbdma.h b/src/common/inc/swref/published/volta/gv100/dev_pbdma.h new file mode 100644 index 000000000..a0f1249f2 --- /dev/null +++ b/src/common/inc/swref/published/volta/gv100/dev_pbdma.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gv100_dev_pbdma_h__ +#define __gv100_dev_pbdma_h__ +#define NV_PPBDMA_USERD_ADDR 31:9 /* RW-UF */ +#define NV_PPBDMA_USERD_HI_ADDR 7:0 /* RW-UF */ +#endif // __gv100_dev_pbdma_h__ diff --git a/src/common/inc/swref/published/volta/gv100/dev_ram.h b/src/common/inc/swref/published/volta/gv100/dev_ram.h new file mode 100644 index 000000000..16aaf4ca2 --- /dev/null +++ b/src/common/inc/swref/published/volta/gv100/dev_ram.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gv100_dev_ram_h__ +#define __gv100_dev_ram_h__ +#define NV_RAMIN_BASE_SHIFT 12 /* */ +#define NV_RAMIN_ALLOC_SIZE 4096 /* */ +#define NV_RAMIN_ENABLE_ATS (135*32+31):(135*32+31) /* RWXUF */ +#define NV_RAMIN_ENABLE_ATS_TRUE 0x00000001 /* RW--V */ +#define NV_RAMIN_ENABLE_ATS_FALSE 0x00000000 /* RW--V */ +#define NV_RAMIN_PASID (135*32+(20-1)):(135*32+0) /* RWXUF */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_TARGET(i) ((168+(i)*4)*32+1):((168+(i)*4)*32+0) /* RWXUF */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_TARGET__SIZE_1 64 /* */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_TARGET_VID_MEM 0x00000000 /* RW--V */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_TARGET_INVALID 0x00000001 /* RW--V */ // Note: INVALID should match PEER +#define NV_RAMIN_SC_PAGE_DIR_BASE_TARGET_SYS_MEM_COHERENT 0x00000002 /* RW--V */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 /* RW--V */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_VOL(i) ((168+(i)*4)*32+2):((168+(i)*4)*32+2) /* RWXUF */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_VOL__SIZE_1 64 /* */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_VOL_TRUE 0x00000001 /* RW--V */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_VOL_FALSE 0x00000000 /* RW--V */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_FAULT_REPLAY_TEX(i) ((168+(i)*4)*32+4):((168+(i)*4)*32+4) /* RWXUF */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_FAULT_REPLAY_TEX__SIZE_1 64 /* */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_FAULT_REPLAY_TEX_DISABLED 0x00000000 /* RW--V */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_FAULT_REPLAY_TEX_ENABLED 0x00000001 /* RW--V */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_FAULT_REPLAY_GCC(i) ((168+(i)*4)*32+5):((168+(i)*4)*32+5) /* RWXUF */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_FAULT_REPLAY_GCC__SIZE_1 64 /* */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_FAULT_REPLAY_GCC_DISABLED 0x00000000 /* RW--V */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_FAULT_REPLAY_GCC_ENABLED 0x00000001 /* RW--V */ +#define NV_RAMIN_SC_USE_VER2_PT_FORMAT(i) ((168+(i)*4)*32+10):((168+(i)*4)*32+10) /* RWXUF */ +#define NV_RAMIN_SC_USE_VER2_PT_FORMAT__SIZE_1 64 /* */ +#define NV_RAMIN_SC_USE_VER2_PT_FORMAT_FALSE 0x00000000 /* RW--V */ +#define NV_RAMIN_SC_USE_VER2_PT_FORMAT_TRUE 0x00000001 /* RW--V */ +#define NV_RAMIN_SC_BIG_PAGE_SIZE(i) ((168+(i)*4)*32+11):((168+(i)*4)*32+11) /* RWXUF */ +#define NV_RAMIN_SC_BIG_PAGE_SIZE__SIZE_1 64 /* */ +#define NV_RAMIN_SC_BIG_PAGE_SIZE_64KB 0x00000001 /* RW--V */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_LO(i) ((168+(i)*4)*32+31):((168+(i)*4)*32+12) /* RWXUF */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_LO__SIZE_1 64 /* */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_HI(i) ((169+(i)*4)*32+31):((169+(i)*4)*32+0) /* RWXUF */ +#define NV_RAMIN_SC_PAGE_DIR_BASE_HI__SIZE_1 64 /* */ +#define NV_RAMIN_SC_ENABLE_ATS(i) ((170+(i)*4)*32+31):((170+(i)*4)*32+31) /* RWXUF */ +#define NV_RAMIN_SC_PASID(i) ((170+(i)*4)*32+(20-1)):((170+(i)*4)*32+0) /* RWXUF */ +#define NV_RAMRL_ENTRY_TSG_TIMESLICE_SCALE_3 0x00000003 /* RWI-V */ +#define NV_RAMRL_ENTRY_TSG_TIMESLICE_TIMEOUT_128 0x00000080 /* RWI-V */ +#define NV_RAMRL_ENTRY_TSG_LENGTH_MAX 0x00000080 /* RW--V */ +#define NV_RAMRL_ENTRY_SIZE 16 /* */ +#endif // __gv100_dev_ram_h__ diff --git a/src/common/inc/swref/published/volta/gv100/dev_timer.h b/src/common/inc/swref/published/volta/gv100/dev_timer.h new file mode 100644 index 000000000..3ea02305c --- /dev/null +++ b/src/common/inc/swref/published/volta/gv100/dev_timer.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __gv100_dev_timer_h__ +#define __gv100_dev_timer_h__ + +#define NV_PTIMER_TIME_0 0x00009400 /* RW-4R */ +#define NV_PTIMER_TIME_1 0x00009410 /* RW-4R */ +#define NV_PTIMER_TIME_PRIV_LEVEL_MASK 0x00009430 /* RW-4R */ +#define NV_PTIMER_TIME_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0 4:4 +#define NV_PTIMER_TIME_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_ENABLE 0x00000001 +#define NV_PTIMER_TIME_PRIV_LEVEL_MASK_WRITE_PROTECTION_LEVEL0_DISABLE 0x00000000 + +#endif // __gv100_dev_timer_h__ diff --git a/src/common/inc/swref/published/volta/gv100/dev_usermode.h b/src/common/inc/swref/published/volta/gv100/dev_usermode.h new file mode 100644 index 000000000..8222ba80c --- /dev/null +++ b/src/common/inc/swref/published/volta/gv100/dev_usermode.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __gv100_dev_usermode_h__ +#define __gv100_dev_usermode_h__ +#define NV_USERMODE_TIME_0 0x00810080 /* R--4R */ +#define NV_USERMODE_TIME_0_NSEC 31:5 /* R-XUF */ +#define NV_USERMODE_TIME_1 0x00810084 /* R--4R */ +#define NV_USERMODE_TIME_1_NSEC 28:0 /* R-XUF */ +#endif // __gv100_dev_usermode_h__ diff --git a/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h b/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h new file mode 100644 index 000000000..ed6bba9a4 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmi_frlInterface.h @@ -0,0 +1,268 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** @file nvhdmi_frlInterface.h + * @brief This file provides FRL related interfaces between client and HDMI lib + */ + + +#ifndef _NVHDMI_FRLINTERFACE_H_ +#define _NVHDMI_FRLINTERFACE_H_ + +#include "nvhdmipkt.h" +#include "nvHdmiFrlCommon.h" + +#include "../timing/nvtiming.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +// DSC encoder color format bitmasks (these match DSC lib & RM ctrl 0073 fields) +typedef enum tagHDMI_DSC_ENCODER_COLOR_FORMAT +{ + HDMI_DSC_ENCODER_COLOR_FORMAT_RGB = 1, + HDMI_DSC_ENCODER_COLOR_FORMAT_YCBCR444 = 2, + HDMI_DSC_ENCODER_COLOR_FORMAT_YCBCRNATIVE422 = 4, + HDMI_DSC_ENCODER_COLOR_FORMAT_YCBCRNATIVE420 = 8 +} HDMI_DSC_ENCODER_COLOR_FORMAT; + +// Options for QueryFRLConfig interface +typedef enum tagHDMI_QUERY_FRL_OPTION +{ + HDMI_QUERY_FRL_ANY_CONFIG = 0, // any FRL config that supports mode + HDMI_QUERY_FRL_OPTIMUM_CONFIG, // find best fit config for this mode + HDMI_QUERY_FRL_LOWEST_BANDWIDTH, // min bw + HDMI_QUERY_FRL_HIGHEST_PIXEL_QUALITY, // trade off bandwidth for pixel quality + HDMI_QUERY_FRL_HIGHEST_BANDWIDTH +} HDMI_QUERY_FRL_OPTION; + +/************************************************************************************************* +* HDMI_VIDEO_TRANSPORT_INFO: * +* Video transport format - a combination of timing, bpc, packing represents what goes on the link* +* client passes this in, lib uses this for bandwidth calculations to decide required FRL rate * +**************************************************************************************************/ +typedef struct tagHDMI_VIDEO_TRANSPORT_INFO +{ + const NVT_TIMING *pTiming; // backend timing + HDMI_BPC bpc; + HDMI_PIXEL_PACKING packing; + NvBool bDualHeadMode; // 2H1OR +} HDMI_VIDEO_TRANSPORT_INFO; + +/************************************************************************************************ +* HDMI_QUERY_FRL_CLIENT_CONTROL: * +* Allow client to force request DSC/FRL configurations. For testing purpose or otherwise * +* eg, client could query for any fitting FRL config instead of most optimum. It could trade off * +* bandwidth for pixel quality. * +*************************************************************************************************/ +typedef struct tagHDMI_QUERY_FRL_CLIENT_CONTROL +{ + HDMI_QUERY_FRL_OPTION option; + + NvU32 forceFRLRate : 1; + NvU32 forceAudio2Ch48KHz : 1; + NvU32 enableDSC : 1; + NvU32 forceSliceCount : 1; + NvU32 forceSliceWidth : 1; + NvU32 forceBppx16 : 1; + NvU32 skipGeneratePPS : 1; + NvU32 reserved : 25; + + // client can set below params if respective force flag is set + NvU32 sliceCount; + NvU32 sliceWidth; + NvU32 bitsPerPixelX16; + HDMI_FRL_DATA_RATE frlRate; + +} HDMI_QUERY_FRL_CLIENT_CONTROL; + +/************************************************************************************************ +* HDMI_SRC_CAPS: * +* Input to HDMI lib. * +* * +* Client gives info about GPU capabilities - DSC related caps * +*************************************************************************************************/ +typedef struct tagHDMI_SRC_CAPS +{ + struct + { + NvU32 dscCapable : 1; + NvU32 bppPrecision : 8; + NvU32 encoderColorFormatMask : 8; + NvU32 lineBufferSizeKB : 8; + NvU32 rateBufferSizeKB : 8; + NvU32 maxNumHztSlices : 8; + NvU32 lineBufferBitDepth : 8; + NvU32 dualHeadBppTargetMaxX16 : 16; + NvU32 maxWidthPerSlice; + } dscCaps; + + HDMI_FRL_DATA_RATE linkMaxFRLRate; +} HDMI_SRC_CAPS; + +/************************************************************************************************ +* HDMI_SINK_CAPS: * +* Input to HDMI lib. * +* * +* Client gives info from EDID, HDMI lib uses DSC related info to call DSC lib to generate PPS * +* Audio information from CEA861 block is used for bandwidth calculations * +* linkMaxFRLRate and linkMaxFRLRateDSC are max link rates determined from physical link * +* training. * +*************************************************************************************************/ +typedef struct tagHDMI_SINK_CAPS +{ + const NVT_HDMI_FORUM_INFO *pHdmiForumInfo; + NvU32 audioType; + NvU32 maxAudioChannels; + NvU32 maxAudioFreqKHz; + NvBool bHBRAudio; + HDMI_FRL_DATA_RATE linkMaxFRLRate; + HDMI_FRL_DATA_RATE linkMaxFRLRateDSC; +} HDMI_SINK_CAPS; + +/************************************************************************************************ +* HDMI_FRL_CONFIG: * +* Output from HDMI lib. Client uses this info for modeset * +* * +* maxSupportedAudioCh, maxSupportedAudioFreqKHz - max possible audio settings at the chosen * +* FRL rate, though the sink caps may have reported higher caps * +* * +* dscInfo - if current timing requires DSC, lib returns PPS information here * +* * +* bitsPerPixelx16 - optimum bpp value calculated per spec * +* dscHActiveBytes - in compressed video transport mode, number of bytes in 1 line * +* dscHActiveTriBytes - in compressed video transport mode, number of tri-bytes in 1 line * +* dscHBlankTriBytes - in compressed video transport mode, number of tri-bytes to be sent * +* to represent horizontal blanking * +* * +* pps[32] - PPS data. HDMI lib calls DSC lib to fill it in * +*************************************************************************************************/ +#define HDMI_DSC_MAX_PPS_SIZE_DWORD 32 +typedef struct tagHDMI_FRL_CONFIG +{ + HDMI_FRL_DATA_RATE frlRate; + NvU32 maxSupportedAudioCh; + NvU32 maxSupportedAudioFreqKHz; + + // DSC info client will use for core channel modeset + struct + { + NvU32 bEnableDSC : 1; + NvU32 reserved : 31; + + NvU32 bitsPerPixelX16; + NvU32 sliceCount; + NvU32 sliceWidth; + NvU32 pps[HDMI_DSC_MAX_PPS_SIZE_DWORD]; + NvU32 dscHActiveBytes; + NvU32 dscHActiveTriBytes; + NvU32 dscHBlankTriBytes; + NvU32 dscTBlankToTTotalRatioX1k; + } dscInfo; + +} HDMI_FRL_CONFIG; + +/************************************************************************************************ +* NvHdmi_AssessLinkCapabilities: * +* * +* Input parameters: * +* subDevice - Sub Device ID. * +* displayId - Display ID. * +* pSinkEdid - EDID of sink * +* * +* Output parameters: * +* pSrcCaps - src capabilities - DSC caps * +* pSinkCaps - sink capabilities - actual caps calculated from link training * +* * +* Calls RM to get DSC related src side caps. Performs physical link training to determine if * +* sink reported max FRL rate can actually be supported on the physical link * +*************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_AssessLinkCapabilities(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps); + +/************************************************************************************************ +* NvHdmi_QueryFRLConfig: * +* * +* Input parameters: * +* libHandle - Hdmi library handle, provided on initializing the library. * +* pVidTransInfo - information about timing, bpc and packing * +* pClientCtrl - settings client wants to see set. HDMI lib tries to honor these * +* pSinkCaps - sink capabilities * +* * +* Output parameters: * +* pFRLConfig - chosen FRL rate and DSC configuration * +* * +*************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_QueryFRLConfig(NvHdmiPkt_Handle libHandle, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig); + +/************************************************************************************************ +* NvHdmi_SetFRLConfig: * +* * +* Input parameters: * +* libHandle - Hdmi library handle, provided on initializing the library. * +* subDevice - Sub Device ID. * +* displayId - Display ID. * +* bFakeLt - Indicates that the GPU's link configuration should be forced and that * +* configuration of the sink device should be skipped. * +* pFRLConfig - Link configuration to set. * +* * +************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_SetFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig); + +/************************************************************************************************ +* NvHdmi_ClearFRLConfig: * +* * +* Input parameters: * +* libHandle - Hdmi library handle, provided on initializing the library. * +* subDevice - Sub Device ID. * +* displayId - Display ID to change the settings on. * +* * +************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmi_ClearFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId); + +#ifdef __cplusplus +} +#endif + +#endif // _NVHDMI_FRLINTERFACE_H_ diff --git a/src/common/modeset/hdmipacket/nvhdmipkt.c b/src/common/modeset/hdmipacket/nvhdmipkt.c new file mode 100644 index 000000000..0a843bbde --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt.c @@ -0,0 +1,616 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt.c + * + * Purpose: Provide initialization functions for HDMI library + */ + +#include "nvlimits.h" +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" +#include "nvhdmipkt_internal.h" + +#include "../timing/nvt_dsc_pps.h" + +#include "class/cl9170.h" +#include "class/cl917d.h" +#include "class/cl9270.h" +#include "class/cl927d.h" +#include "class/cl9470.h" +#include "class/cl947d.h" +#include "class/cl9570.h" +#include "class/cl957d.h" +#include "class/clc370.h" +#include "class/clc37d.h" +#include "class/clc570.h" +#include "class/clc57d.h" +#include "class/clc670.h" +#include "class/clc67d.h" + +// Class hierarchy structure +typedef struct tagNVHDMIPKT_CLASS_HIERARCHY +{ + NVHDMIPKT_CLASS_ID classId; + NVHDMIPKT_CLASS_ID parentClassId; + NvBool isRootClass; + void (*initInterface)(NVHDMIPKT_CLASS*); + NvBool (*constructor) (NVHDMIPKT_CLASS*); + void (*destructor) (NVHDMIPKT_CLASS*); + NvU32 displayClass; + NvU32 coreDmaClass; +} NVHDMIPKT_CLASS_HIERARCHY; + +/************************************************************************************************* + * hierarchy structure establishes the relationship between classes. * + * If isRootClass=NV_TRUE, it is a root class, else it is a child of a class. classId * + * also acts as an index, and hence the order of the structure below should be maintanied. * + * * + * ASSUMPTION: There are two huge assumptions while creating the class relationship and * + * while traversing it. 1. That of the Class ID definitaion (NVHDMIPKT_CLASS_ID), which has * + * to be strictly indexed, that is 0, 1, 2... and so on. And 2. that the structure * + * CLASS_HIERARCHY (above) follow that indexing. That is NVHDMIPKT_0073_CLASS is value 0 and * + * the first entry in CLASS_HIERARCHY, NVHDMIPKT_9171_CLASS is value 1 and hence the second * + * entry in CLASS_HIERARCHY, so on and so forth. * + * * + * HOW TO ADD A NEW CLASS? * + * 1. Add an ID in NVHDMIPKT_CLASS_ID. * + * 2. Add a source file nvhdmipkt_XXXX.c, and include it into makefiles. Makefiles of * + * Mods, Windows, and Linux. * + * 3. Provide initializeHdmiPktInterfaceXXXX, hdmiConstructorXXXX, and, hdmiDestructorXXXX. * + * 4. Add functions that needs to be overridden in NVHDMIPKT_CLASS. * + * 5. Add a relationship in hierarchy[] array. The new class can be a subclass or a root. In * + * case of a root all the interfaces needs to be overridden in NVHDMIPKT_CLASS. * + ************************************************************************************************/ +static const NVHDMIPKT_CLASS_HIERARCHY hierarchy[] = +{ + {// Index 0==NVHDMIPKT_0073_CLASS + NVHDMIPKT_0073_CLASS, // classId + NVHDMIPKT_0073_CLASS, // parentClassId + NV_TRUE, // isRootClass + initializeHdmiPktInterface0073, // initInterface + hdmiConstructor0073, // constructor + hdmiDestructor0073, // destructor + 0, // displayClass + 0 // coreDmaClass + }, + {// Index 1==NVHDMIPKT_9171_CLASS + NVHDMIPKT_9171_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_TRUE, // isRootClass + initializeHdmiPktInterface9171, // initInterface + hdmiConstructor9171, // constructor + hdmiDestructor9171, // destructor + NV9170_DISPLAY, // displayClass + NV917D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 2==NVHDMIPKT_9271_CLASS + NVHDMIPKT_9271_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterface9271, // initInterface + hdmiConstructor9271, // constructor + hdmiDestructor9271, // destructor + NV9270_DISPLAY, // displayClass + NV927D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 3==NVHDMIPKT_9471_CLASS + NVHDMIPKT_9471_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterface9471, // initInterface + hdmiConstructor9471, // constructor + hdmiDestructor9471, // destructor + NV9470_DISPLAY, // displayClass + NV947D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 4==NVHDMIPKT_9571_CLASS + NVHDMIPKT_9571_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterface9571, // initInterface + hdmiConstructor9571, // constructor + hdmiDestructor9571, // destructor + NV9570_DISPLAY, // displayClass + NV957D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 5==NVHDMIPKT_C371_CLASS + NVHDMIPKT_C371_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC371, // initInterface + hdmiConstructorC371, // constructor + hdmiDestructorC371, // destructor + NVC370_DISPLAY, // displayClass + NVC37D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 6==NVHDMIPKT_C571_CLASS + // Note that Turing (C57x) has a distinct displayClass and coreDmaClass, + // but it inherits the _DISP_SF_USER class from Volta (C37x). We call this + // NVHDMIPKT_C571_CLASS, but reuse initInterface()/constructor()/destructor() + // from C371. + NVHDMIPKT_C571_CLASS, + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC371, // initInterface + hdmiConstructorC371, // constructor + hdmiDestructorC371, // destructor + NVC570_DISPLAY, // displayClass + NVC57D_CORE_CHANNEL_DMA // coreDmaClass + }, + {// Index 7==NVHDMIPKT_C671_CLASS + NVHDMIPKT_C671_CLASS, // classId + NVHDMIPKT_9171_CLASS, // parentClassId + NV_FALSE, // isRootClass + initializeHdmiPktInterfaceC671, // initInterface + hdmiConstructorC671, // constructor + hdmiDestructorC671, // destructor + NVC670_DISPLAY, // displayClass + NVC67D_CORE_CHANNEL_DMA // coreDmaClass + }, +}; + +#if defined(DSC_CALLBACK_MODIFIED) +// Callbacks for DSC PPS library +void *hdmipktMallocCb(const void *clientHandle, NvLength size); +void hdmipktFreeCb(const void *clientHandle, void *pMemPtr); + +void *hdmipktMallocCb(const void *clientHandle, NvLength size) +{ + const NVHDMIPKT_CLASS *pClass = (const NVHDMIPKT_CLASS*)(clientHandle); + return pClass->callback.malloc(pClass->cbHandle, size); +} + +void hdmipktFreeCb(const void *clientHandle, void *pMemPtr) +{ + const NVHDMIPKT_CLASS *pClass = (const NVHDMIPKT_CLASS*)(clientHandle); + pClass->callback.free(pClass->cbHandle, pMemPtr); +} +#endif // DSC_CALLBACK_MODIFIED + +/********************************** HDMI Library interfaces *************************************/ +/* + * NvHdmiPkt_PacketCtrl + */ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketCtrl(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl) +{ + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + return pClass->hdmiPacketCtrl(pClass, + subDevice, + displayId, + head, + packetType, + transmitControl); +} + +/* + * NvHdmiPkt_PacketWrite + */ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketWrite(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + return pClass->hdmiPacketWrite(pClass, + subDevice, + displayId, + head, + packetType, + transmitControl, + packetLen, + pPacket); +} + +NVHDMIPKT_RESULT +NvHdmi_AssessLinkCapabilities(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + if (!pSinkEdid || + !pSrcCaps || + !pSinkCaps) + { + return NVHDMIPKT_INVALID_ARG; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiAssessLinkCapabilities(pClass, + subDevice, + displayId, + pSinkEdid, + pSrcCaps, + pSinkCaps); +} +/* + * NvHdmi_QueryFRLConfig + */ +NVHDMIPKT_RESULT +NvHdmi_QueryFRLConfig(NvHdmiPkt_Handle libHandle, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + if (!pVidTransInfo || + !pClientCtrl || + !pSrcCaps || + !pSinkCaps || + !pFRLConfig) + { + return NVHDMIPKT_INVALID_ARG; + } + + // if there is no FRL capability reported fail this call + if (pSinkCaps->linkMaxFRLRate == HDMI_FRL_DATA_RATE_NONE) + { + return NVHDMIPKT_FAIL; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiQueryFRLConfig(pClass, + pVidTransInfo, + pClientCtrl, + pSrcCaps, + pSinkCaps, + pFRLConfig); +} + +/* + * NvHdmi_SetFRLConfig + */ +NVHDMIPKT_RESULT +NvHdmi_SetFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + if (!pFRLConfig) + { + return NVHDMIPKT_INVALID_ARG; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiSetFRLConfig(pClass, + subDevice, + displayId, + bFakeLt, + pFRLConfig); + +} + +/* + * NvHdmi_ClearFRLConfig + */ +NVHDMIPKT_RESULT +NvHdmi_ClearFRLConfig(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId) +{ + if (libHandle == NVHDMIPKT_INVALID_HANDLE) + { + return NVHDMIPKT_LIBRARY_INIT_FAIL; + } + + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + return pClass->hdmiClearFRLConfig(pClass, + subDevice, + displayId); +} + +/*************************** HDMI Library internal helper functions *****************************/ +/* + * NvHdmiPkt_HwClass2HdmiClass + * internal function; translates display/display-dma class to hdmi class + */ +static NVHDMIPKT_CLASS_ID +NvHdmiPkt_HwClass2HdmiClass(NvU32 const hwClass) +{ + NVHDMIPKT_CLASS_ID hdmiClassId = NVHDMIPKT_9571_CLASS; + NvU32 i = 0; + + for (i = 0; i < NVHDMIPKT_INVALID_CLASS; i++) + { + if ((hierarchy[i].displayClass == hwClass) || + (hierarchy[i].coreDmaClass == hwClass)) + { + hdmiClassId = hierarchy[i].classId; + break; + } + } + + // Assign default class 73 to pre-Kepler families + if (hwClass < NV9170_DISPLAY) + { + hdmiClassId = NVHDMIPKT_0073_CLASS; + } + + return hdmiClassId; +} + +/* + * NvHdmiPkt_InitInterfaces + * internal function; calls class init interface functions + */ +static void +NvHdmiPkt_InitInterfaces(NVHDMIPKT_CLASS_ID const thisClassId, + NVHDMIPKT_CLASS* const pClass) +{ + // Recurse to the root first, and then call each initInterface() method + // from root to child. + if (!hierarchy[thisClassId].isRootClass) + { + NvHdmiPkt_InitInterfaces(hierarchy[thisClassId].parentClassId, pClass); + } + hierarchy[thisClassId].initInterface(pClass); +} + +static void +NvHdmiPkt_CallDestructors(NVHDMIPKT_CLASS_ID const thisClassId, + NVHDMIPKT_CLASS* const pClass) +{ + // Destructor calls are made from this to root class. + hierarchy[thisClassId].destructor(pClass); + if (!hierarchy[thisClassId].isRootClass) + { + NvHdmiPkt_CallDestructors(hierarchy[thisClassId].parentClassId, pClass); + } +} + +/* + * NvHdmiPkt_CallConstructors + * internal function; calls class constructors and returns boolean success/failure + */ +static NvBool +NvHdmiPkt_CallConstructors(NVHDMIPKT_CLASS_ID const thisClassId, + NVHDMIPKT_CLASS* const pClass) +{ + // Recurse to the root first, and then call each constructor + // from root to child. + if (!hierarchy[thisClassId].isRootClass) + { + if (!NvHdmiPkt_CallConstructors(hierarchy[thisClassId].parentClassId, pClass)) + { + return NV_FALSE; + } + } + + if (!hierarchy[thisClassId].constructor(pClass)) + { + if (!hierarchy[thisClassId].isRootClass) + { + // Backtrack on constructor failure + NvHdmiPkt_CallDestructors(hierarchy[thisClassId].parentClassId, pClass); + } + + return NV_FALSE; + } + + return NV_TRUE; +} + +/******************************** HDMI Library Init functions ***********************************/ +/* + * NvHdmiPkt_InitializeLibrary + */ +NvHdmiPkt_Handle +NvHdmiPkt_InitializeLibrary(NvU32 const hwClass, + NvU32 const numSubDevices, + NvHdmiPkt_CBHandle const cbHandle, + const NVHDMIPKT_CALLBACK* const pCallbacks, + NvU32 const sfUserHandle, + const NVHDMIPKT_RM_CLIENT_HANDLES* const pClientHandles) +{ + NVHDMIPKT_CLASS* pClass = 0; + NvU32 i = 0; + NvBool result = NV_FALSE; + NVHDMIPKT_CLASS_ID thisClassId = NVHDMIPKT_INVALID_CLASS; + + // Argument validations + if (pCallbacks == 0 || numSubDevices == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + // Validating RM handles/callbacks +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (sfUserHandle == 0 || pClientHandles == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + if (pCallbacks->rmGetMemoryMap == 0 || + pCallbacks->rmFreeMemoryMap == 0 || + pCallbacks->rmDispControl2 == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + + // Mandatory mutex callbacks. + if (pCallbacks->acquireMutex == 0 || pCallbacks->releaseMutex == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + // Mandatory memory allocation callbacks. + if (pCallbacks->malloc == 0 || pCallbacks->free == 0) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + pClass = pCallbacks->malloc(cbHandle, sizeof(NVHDMIPKT_CLASS)); + if (!pClass) + { + goto NvHdmiPkt_InitializeLibrary_exit; + } + + // 0. Get the hdmi class ID + thisClassId = NvHdmiPkt_HwClass2HdmiClass(hwClass); + + // Init data + NVMISC_MEMSET(pClass, 0, sizeof(NVHDMIPKT_CLASS)); + + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + pClass->memMap[i].subDevice = NVHDMIPKT_INVALID_SUBDEV; + } + + pClass->numSubDevices = numSubDevices; + pClass->cbHandle = cbHandle; + pClass->thisId = thisClassId; + + // RM handles/callbacks +#if NVHDMIPKT_RM_CALLS_INTERNAL + pClass->isRMCallInternal = NV_TRUE; + pClass->sfUserHandle = sfUserHandle; + pClass->clientHandles.hClient = pClientHandles->hClient; + pClass->clientHandles.hDevice = pClientHandles->hDevice; + pClass->clientHandles.hDisplay = pClientHandles->hDisplay; + + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + pClass->clientHandles.hSubDevices[i] = pClientHandles->hSubDevices[i]; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + pClass->isRMCallInternal = NV_FALSE; + pClass->callback.rmGetMemoryMap = pCallbacks->rmGetMemoryMap; + pClass->callback.rmFreeMemoryMap = pCallbacks->rmFreeMemoryMap; + pClass->callback.rmDispControl2 = pCallbacks->rmDispControl2; +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + + pClass->callback.acquireMutex = pCallbacks->acquireMutex; + pClass->callback.releaseMutex = pCallbacks->releaseMutex; + + pClass->callback.malloc = pCallbacks->malloc; + pClass->callback.free = pCallbacks->free; + +#if !defined (NVHDMIPKT_DONT_USE_TIMER) + pClass->callback.setTimeout = pCallbacks->setTimeout; + pClass->callback.checkTimeout = pCallbacks->checkTimeout; +#endif + +#if defined (DEBUG) + pClass->callback.print = pCallbacks->print; + pClass->callback.assert = pCallbacks->assert; +#endif + + // 1. Init interfaces + NvHdmiPkt_InitInterfaces(thisClassId, pClass); + + // 2. Constructor calls + result = NvHdmiPkt_CallConstructors(thisClassId, pClass); + +#if defined(DSC_CALLBACK_MODIFIED) + DSC_CALLBACK callbacks; + NVMISC_MEMSET(&callbacks, 0, sizeof(DSC_CALLBACK)); + callbacks.clientHandle = pClass; + callbacks.dscMalloc = hdmipktMallocCb; + callbacks.dscFree = hdmipktFreeCb; + DSC_InitializeCallback(callbacks); +#endif // DSC_CALLBACK_MODIFIED + +NvHdmiPkt_InitializeLibrary_exit: + if (result) + { + NvHdmiPkt_Print(pClass, "Initialize Success."); + } + else + { + if (pClass) + { + NvHdmiPkt_Print(pClass, "Initialize Failed."); + } + if (pCallbacks && pCallbacks->free) + { + pCallbacks->free(cbHandle, pClass); + } + } + + return (result == NV_TRUE) ? toHdmiPktHandle(pClass) : NVHDMIPKT_INVALID_HANDLE; +} + +/* + * NvHdmiPkt_DestroyLibrary + */ +void +NvHdmiPkt_DestroyLibrary(NvHdmiPkt_Handle libHandle) +{ + NVHDMIPKT_CLASS* pClass = fromHdmiPktHandle(libHandle); + NVHDMIPKT_CLASS_ID currClassId = NVHDMIPKT_0073_CLASS; + + if (pClass != 0) + { + NvHdmiPkt_Print(pClass, "Destroy."); + NvHdmiPkt_CBHandle cbHandle = pClass->cbHandle; + void (*freeCb) (NvHdmiPkt_CBHandle handle, + void *pMem) = pClass->callback.free; + + currClassId = pClass->thisId; + NvHdmiPkt_CallDestructors(currClassId, pClass); + + freeCb(cbHandle, pClass); + } +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt.h b/src/common/modeset/hdmipacket/nvhdmipkt.h new file mode 100644 index 000000000..35b2fbdc3 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt.h @@ -0,0 +1,317 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt.h + * + * Purpose: This file is a common header for all HDMI Library Clients + */ + +#ifndef _NVHDMIPKT_H_ +#define _NVHDMIPKT_H_ + +#include + +#include "nvmisc.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +/**************************** HDMI Library defines, enums and structs ***************************/ +/************************************************************************************************ + * NOTE: NVHDMIPKT_RM_CALLS_INTERNAL define tells this library to make RM calls (allocate, free * + * control, etc.) internally and not through callbacks into the client. * + ************************************************************************************************/ +#if !defined(NVHDMIPKT_RM_CALLS_INTERNAL) +# define NVHDMIPKT_RM_CALLS_INTERNAL 1 +#endif + +// NVHDMIPKT_RESULT: HDMI library return result enums +typedef enum +{ + NVHDMIPKT_SUCCESS = 0, + NVHDMIPKT_FAIL = 1, + NVHDMIPKT_LIBRARY_INIT_FAIL = 2, + NVHDMIPKT_INVALID_ARG = 3, + NVHDMIPKT_TIMEOUT = 4, + NVHDMIPKT_ERR_GENERAL = 5, + NVHDMIPKT_INSUFFICIENT_BANDWIDTH = 6, + NVHDMIPKT_RETRY = 7 +} NVHDMIPKT_RESULT; + +// NVHDMIPKT_TYPE: HDMI Packet Enums +typedef enum _NVHDMIPKT_TYPE +{ + NVHDMIPKT_TYPE_UNDEFINED = 0, // Undefined Packet Type + NVHDMIPKT_TYPE_GENERIC = 1, // Generic packet, any Generic Packet + // (e.g Gamut Metadata packet) + NVHDMIPKT_TYPE_AVI_INFOFRAME = 2, // Avi infoframe + NVHDMIPKT_TYPE_GENERAL_CONTROL = 3, // GCP + NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME = 4, // VSI + NVHDMIPKT_TYPE_AUDIO_INFOFRAME = 5, // Audio InfoFrame + NVHDMIPKT_TYPE_EXTENDED_METADATA_PACKET = 6, // Extended Metadata Packet (HDMI 2.1) + NVHDMIPKT_INVALID_PKT_TYPE = 13 +} NVHDMIPKT_TYPE; + +// Hdmi packet TransmitControl defines. These definitions reflect the +// defines from ctrl and class defines for info frames. +#define NV_HDMI_PKT_TRANSMIT_CTRL_ENABLE 0:0 +#define NV_HDMI_PKT_TRANSMIT_CTRL_ENABLE_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_ENABLE_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_OTHER 1:1 +#define NV_HDMI_PKT_TRANSMIT_CTRL_OTHER_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_OTHER_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_SINGLE 2:2 +#define NV_HDMI_PKT_TRANSMIT_CTRL_SINGLE_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_SINGLE_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW 3:3 +#define NV_HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_CHKSUM_HW_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_HBLANK 4:4 +#define NV_HDMI_PKT_TRANSMIT_CTRL_HBLANK_DIS 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_HBLANK_EN 0x00000001 + +#define NV_HDMI_PKT_TRANSMIT_CTRL_VIDEO_FMT 5:5 +#define NV_HDMI_PKT_TRANSMIT_CTRL_VIDEO_FMT_SW_CTRL 0x00000000 +#define NV_HDMI_PKT_TRANSMIT_CTRL_VIDEO_FMT_HW_CTRL 0x00000001 + +// NVHDMIPKT_TC: HDMI Packet Transmit Control +// NOTE: Client should use these defines below for transmit control, and avoid using the ones +// above. Use only if client knows and wants fine control. And in that case the value +// passed has to be explicitly typecasted to NVHDMIPKT_TC by the client. +typedef enum _NVHDMIPKT_TC +{ + NVHDMIPKT_TRANSMIT_CONTROL_DISABLE = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _DIS)), + + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)), + + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_SINGLE_FRAME = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)), + + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_OTHER_FRAME = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)), + + NVHDMIPKT_TRANSMIT_CONTROL_VIDEO_FMT_HW_CTRL = + (DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _VIDEO_FMT, _HW_CTRL)), + +} NVHDMIPKT_TC; + +// RM client handles. Used when client chooses that hdmi library make RM calls. +// NOTE: NVHDMIPKT_RM_CALLS_INTERNAL macro should be define to use it. +typedef struct tagNVHDMIPKT_RM_CLIENT_HANDLES +{ + NvU32 hClient; + NvU32 hDevice; + NvU32 hSubDevices[NV_MAX_SUBDEVICES]; + NvU32 hDisplay; +} NVHDMIPKT_RM_CLIENT_HANDLES; + +/****************************** HDMI Library callbacks into client ******************************/ +typedef void* NvHdmiPkt_CBHandle; + +/************************************************************************************************ + * [rmGetMemoryMap, rmFreeMemoryMap, rmDispControl,] acquireMutex and releaseMutex are mandatory* + * callbacks, to be implemented by the client. Callbacks in [] above are mandatory only for * + * Windows. * + * Linux need not implement those, if they plan to use NVHDMIPKT_RM_CALLS_INTERNAL define. * + * * + * rmGetMemoryMap and rmFreeMemoryMap are RM calls to allocate the DISP_SF_USER class. * + * And mutex callbacks keep hemi packet operations atomic. * + ************************************************************************************************/ +typedef struct _tagNVHDMIPKT_CALLBACK +{ + // MANDATORY callbacks. + NvBool + (*rmGetMemoryMap) (NvHdmiPkt_CBHandle handle, + NvU32 dispSfUserClassId, + NvU32 dispSfUserSize, + NvU32 subDevice, + NvU32* pMemHandle, + void** ppBaseMem); + + void + (*rmFreeMemoryMap) (NvHdmiPkt_CBHandle handle, + NvU32 subDevice, + NvU32 memHandle, + void* pMem); + + NvBool + (*rmDispControl2) (NvHdmiPkt_CBHandle handle, + NvU32 subDevice, + NvU32 cmd, + void* pParams, + NvU32 paramSize); + + + void + (*acquireMutex) (NvHdmiPkt_CBHandle handle); + + void + (*releaseMutex) (NvHdmiPkt_CBHandle handle); + + // OPTIONAL callbacks + /* time in microseconds (us) */ + NvBool + (*setTimeout) (NvHdmiPkt_CBHandle handle, + NvU32 us_timeout); + + /* ChecTimeout returns true when timer times out */ + NvBool + (*checkTimeout) (NvHdmiPkt_CBHandle handle); + + // callbacks to allocate memory on heap to reduce stack usage + void* + (*malloc) (NvHdmiPkt_CBHandle handle, + NvLength numBytes); + + void + (*free) (NvHdmiPkt_CBHandle handle, + void *pMem); + + void + (*print) (NvHdmiPkt_CBHandle handle, + const char* fmtstring, + ...) +#if defined(__GNUC__) + __attribute__ ((format (printf, 2, 3))) +#endif + ; + + void + (*assert) (NvHdmiPkt_CBHandle handle, + NvBool expression); +} NVHDMIPKT_CALLBACK; + +/*********************** HDMI Library interface to write hdmi ctrl/packet ***********************/ +typedef void* NvHdmiPkt_Handle; +#define NVHDMIPKT_INVALID_HANDLE ((NvHdmiPkt_Handle)0) + +/************************************************************************************************ + * NvHdmiPkt_PacketCtrl - Returns HDMI NVHDMIPKT_RESULT. * + * * + * Parameters: * + * libHandle - Hdmi library handle, provided on initializing the library. * + * subDevice - Sub Device ID. * + * displayId - Display ID. * + * head - Head number. * + * packetType - One of the NVHDMIPKT_TYPE types. * + * transmitControl - Packet transmit control setting. * + ************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketCtrl (NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + +/************************************************************************************************ + * NvHdmiPkt_PacketWrite - Returns HDMI NVHDMIPKT_RESULT. * + * * + * Parameters: * + * libHandle - Hdmi library handle, provided on initializing the library. * + * subDevice - Sub Device ID. * + * displayId - Display ID. * + * head - Head number. * + * packetType - One of the NVHDMIPKT_TYPE types. * + * transmitControl - Packet transmit control setting. * + * packetLen - Length of the packet in bytes to be transmitted. * + * pPacket - Pointer to packet data. * + ************************************************************************************************/ +NVHDMIPKT_RESULT +NvHdmiPkt_PacketWrite(NvHdmiPkt_Handle libHandle, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +/***************************** Interface to initialize HDMI Library *****************************/ + +/************************************************************************************************ + * NvHdmiPkt_InitializeLibrary - Returns NvHdmiPkt_Handle. This handle is used to call * + * library interfaces. If handle returned is invalid - * + * NVHDMIPKT_INVALID_HANDLE -, there was a problem in * + * initialization and the library won't work. * + * * + * Parameters: * + * hwClass - Depending on HW, apply display class or display dma class. Either will do.* + * Eg. for GK104- NV9170_DISPLAY or NV917D_CORE_CHANNEL_DMA. * + * numSubDevices - Number of sub devices. * + * * + * cbHandle - Callback handle. Client cookie for callbacks made to client. * + * pCallback - Callbacks. Struct NVHDMIPKT_CALLBACK. * + * * + * Below mentioned sfUserHandle and clientHandles parameters are used only when not providing * + * rmGetMemoryMap, rmFreeMemoryMap and rmDispControl callbacks. This is meant for Linux. * + * And is controlled by NVHDMIPKT_RM_CALLS_INTERNAL macro. * + * NOTE: And Clients not using NVHDMIPKT_RM_CALLS_INTERNAL, need to set both sfUserHandle and * + * clientHandles to 0. * + * * + * sfUserHandle - SF_USER handle; this is the base handle. Subsequent subdevice handles are * + * derived incrementally from this handle. * + * pClientHandles - RM handles for client, device, subdevices and displayCommon. * + * * + ************************************************************************************************/ +NvHdmiPkt_Handle +NvHdmiPkt_InitializeLibrary(NvU32 const hwClass, + NvU32 const numSubDevices, + NvHdmiPkt_CBHandle const cbHandle, + const NVHDMIPKT_CALLBACK* const pCallback, + NvU32 const sfUserHandle, + const NVHDMIPKT_RM_CLIENT_HANDLES* const pClientHandles); + +/************************************************************************************************ + * NvHdmiPkt_DestroyLibrary * + * * + * When done with the HDMI Library call NvHdmiPkt_DestroyLibrary. It is like a destructor. * + * This destructor frees up resources acquired during initialize. * + * * + ************************************************************************************************/ +void +NvHdmiPkt_DestroyLibrary(NvHdmiPkt_Handle libHandle); + +#ifdef __cplusplus +} +#endif +#endif // _NVHDMIPKT_H_ diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_0073.c b/src/common/modeset/hdmipacket/nvhdmipkt_0073.c new file mode 100644 index 000000000..0e1bae8b4 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_0073.c @@ -0,0 +1,385 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_0073.c + * + * Purpose: Provides infoframe write functions for HDMI library for Pre-KEPLER chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "hdmi_spec.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" + +NVHDMIPKT_RESULT +hdmiPacketCtrl0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + +NVHDMIPKT_RESULT +hdmiPacketWrite0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +/* + * hdmiPacketCtrl0073 + */ +NVHDMIPKT_RESULT +hdmiPacketCtrl0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS params = {0}; + + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.type = pThis->translatePacketType(pThis, packetType); + params.transmitControl = pThis->translateTransmitControl(pThis, transmitControl); + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (NvRmControl(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL, + ¶ms, sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to hdmiPacketCtrl failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + + return result; +} + +/* + * hdmiPacketWrite0073 + */ +NVHDMIPKT_RESULT +hdmiPacketWrite0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS params = {0}; + + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.packetSize = packetLen; + params.transmitControl = pThis->translateTransmitControl(pThis, transmitControl); + + // init the infoframe packet + NVMISC_MEMSET(params.aPacket, 0, NV0073_CTRL_SET_OD_MAX_PACKET_SIZE); + + // copy the payload + NVMISC_MEMCPY(params.aPacket, pPacket, packetLen); + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (NvRmControl(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to hdmiPacketWrite failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + + return result; +} + +/* + * translatePacketType0073 + */ +static NvU32 +translatePacketType0073(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TYPE packetType) +{ + NvU32 type0073 = 0; + + switch (packetType) + { + case NVHDMIPKT_TYPE_AVI_INFOFRAME: + type0073 = pktType_AviInfoFrame; + break; + + case NVHDMIPKT_TYPE_GENERIC: + type0073 = pktType_GamutMetadata; + break; + + case NVHDMIPKT_TYPE_GENERAL_CONTROL: + type0073 = pktType_GeneralControl; + break; + + case NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME: + type0073 = pktType_VendorSpecInfoFrame; + break; + + case NVHDMIPKT_TYPE_AUDIO_INFOFRAME: + type0073 = pktType_AudioInfoFrame; + break; + + default: + NvHdmiPkt_Print(pThis, "ERROR - translatePacketType wrong packet type: %0x", + packetType); + NvHdmiPkt_Assert(0); + break; + } + + return type0073; +} + +/* + * translateTransmitControl0073 + */ +static NvU32 +translateTransmitControl0073(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TC transmitControl) +{ + NvU32 tc = 0; + + // TODO: tc validation + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _ENABLE, _YES, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _OTHER_FRAME, _ENABLE, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _SINGLE_FRAME, _ENABLE, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _HBLANK, _EN, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _ON_HBLANK, _ENABLE, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _VIDEO_FMT, _HW_CTRL, transmitControl)) + { + tc = FLD_SET_DRF(0073, _CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, + _VIDEO_FMT, _HW_CONTROLLED, tc); + } + + return tc; +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor0073 + */ +NvBool +hdmiConstructor0073(NVHDMIPKT_CLASS* pThis) +{ + return NV_TRUE; +} + +/* + * hdmiUnDestructor0073 + */ +void +hdmiDestructor0073(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +// Below are dummy functions for the HW functions not needed for a display class +/* + * hdmiWriteDummyPacket + */ +void +hdmiWriteDummyPacket(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiWriteDummyPacket called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return; +} + +/* + * hdmiReadDummyPacketStatus + */ +static NvBool +hdmiReadDummyPacketStatus(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType0073) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiReadDummyPacketStatus called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NV_TRUE; +} + +/* + * hdmiWriteDummyPacketCtrl + */ +static NVHDMIPKT_RESULT +hdmiWriteDummyPacketCtrl(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType0073, + NvU32 transmitControl, + NvBool bDisable) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiWriteDummyPacketCtrl called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiAssessLinkCapabilitiesDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiAssessLinkCapabilitiesDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiQueryFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiQueryFRLConfigDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiSetFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiSetFRLConfigDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +NVHDMIPKT_RESULT +hdmiClearFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId) +{ + NvHdmiPkt_Print(pThis, "ERROR - Dummy function hdmiClearFRLConfigDummy called. " + "Should never be called."); + NvHdmiPkt_Assert(0); + return NVHDMIPKT_SUCCESS; +} + +/* + * initializeHdmiPktInterface0073 + */ +void +initializeHdmiPktInterface0073(NVHDMIPKT_CLASS* pClass) +{ + pClass->hdmiPacketCtrl = hdmiPacketCtrl0073; + pClass->hdmiPacketWrite = hdmiPacketWrite0073; + pClass->translatePacketType = translatePacketType0073; + pClass->translateTransmitControl = translateTransmitControl0073; + + // Functions below are mapped to dummy functions, as not needed for HW before GK104 + pClass->hdmiReadPacketStatus = hdmiReadDummyPacketStatus; + pClass->hdmiWritePacketCtrl = hdmiWriteDummyPacketCtrl; + pClass->hdmiWriteAviPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteAudioPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteGenericPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteGeneralCtrlPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteVendorPacket = hdmiWriteDummyPacket; + + // Update SF_USER data + pClass->dispSfUserClassId = 0; + pClass->dispSfUserSize = 0; + + // Functions below are used by HDMI FRL and will be available for Ampere+. + pClass->hdmiAssessLinkCapabilities = hdmiAssessLinkCapabilitiesDummy; + pClass->hdmiQueryFRLConfig = hdmiQueryFRLConfigDummy; + pClass->hdmiSetFRLConfig = hdmiSetFRLConfigDummy; + pClass->hdmiClearFRLConfig = hdmiClearFRLConfigDummy; +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_9171.c b/src/common/modeset/hdmipacket/nvhdmipkt_9171.c new file mode 100644 index 000000000..eb7d399b2 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_9171.c @@ -0,0 +1,804 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9171.c + * + * Purpose: Provides packet write functions for HDMI library for KEPLER + chips + */ + +#include "nvlimits.h" +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "hdmi_spec.h" +#include "class/cl9171.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" + +#define NVHDMIPKT_9171_INVALID_PKT_TYPE ((NV9171_SF_HDMI_INFO_IDX_VSI) + 1) +NVHDMIPKT_RESULT +hdmiPacketWrite9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +NVHDMIPKT_RESULT +hdmiPacketCtrl9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); +/* + * hdmiReadPacketStatus9171 + */ +static NvBool +hdmiReadPacketStatus9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType9171) +{ + NvBool bResult = NV_FALSE; + NvU32 regOffset = 0; + NvU32 status = 0; + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_INFO_STATUS__SIZE_1) + { + return bResult; + } + + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GCP: + case NV9171_SF_HDMI_INFO_IDX_VSI: + regOffset = NV9171_SF_HDMI_INFO_STATUS(head, pktType9171); + status = REG_RD32(pBaseReg, regOffset); + bResult = FLD_TEST_DRF(9171, _SF_HDMI_INFO_STATUS, _SENT, _DONE, status); + break; + + default: + break; + } + + return bResult; +} + +/* + * hdmiWritePacketCtrl9171 + */ +static NVHDMIPKT_RESULT +hdmiWritePacketCtrl9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType9171, + NvU32 transmitControl, + NvBool bDisable) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_INVALID_ARG; + NvU32 regOffset = 0; + NvU32 hdmiCtrl = 0; + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_INFO_CTRL__SIZE_1) + { + return result; + } + + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GCP: + case NV9171_SF_HDMI_INFO_IDX_VSI: + regOffset = NV9171_SF_HDMI_INFO_CTRL(head, pktType9171); + hdmiCtrl = REG_RD32(pBaseReg, regOffset); + hdmiCtrl = (bDisable == NV_TRUE) ? + (FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _ENABLE, _DIS, hdmiCtrl)) : + (transmitControl); + REG_WR32(pBaseReg, regOffset, hdmiCtrl); + + result = NVHDMIPKT_SUCCESS; + break; + + default: + break; + } + + return result; +} + +/* + * hdmiWriteAviPacket9171 + */ +static void +hdmiWriteAviPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_HEADER(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_HEADER, _HB0, pPacket[0], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_HEADER, _HB1, pPacket[1], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_HEADER, _HB2, pPacket[2], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_HEADER(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB2, pPacket[5], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW, _PB3, pPacket[6], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, _PB4, pPacket[7], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, _PB5, pPacket[8], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH, _PB6, pPacket[9], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB7, pPacket[10], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB8, pPacket[11], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB9, pPacket[12], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW, _PB10, pPacket[13], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, _PB11, pPacket[14], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, _PB12, pPacket[15], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH, _PB13, pPacket[16], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(head), data); + + return; +} + +/* + * hdmiWriteGenericPacket9171 + */ +static void +hdmiWriteGenericPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_HEADER(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_HEADER, _HB0, pPacket[0], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_HEADER, _HB1, pPacket[1], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_HEADER, _HB2, pPacket[2], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_HEADER(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB2, pPacket[5], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_LOW, _PB3, pPacket[6], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_HIGH, _PB4, pPacket[7], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_HIGH, _PB5, pPacket[8], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK0_HIGH, _PB6, pPacket[9], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB7, pPacket[10], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB8, pPacket[11], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB9, pPacket[12], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_LOW, _PB10, pPacket[13], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_HIGH, _PB11, pPacket[14], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_HIGH, _PB12, pPacket[15], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK1_HIGH, _PB13, pPacket[16], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB14, pPacket[17], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB15, pPacket[18], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB16, pPacket[19], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_LOW, _PB17, pPacket[20], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_HIGH, _PB18, pPacket[21], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_HIGH, _PB19, pPacket[22], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK2_HIGH, _PB20, pPacket[23], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB21, pPacket[24], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB22, pPacket[25], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB23, pPacket[26], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_LOW, _PB24, pPacket[27], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_HIGH, _PB25, pPacket[28], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_HIGH, _PB26, pPacket[29], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GENERIC_SUBPACK3_HIGH, _PB27, pPacket[30], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH(head), data); + + return; +} + +/* + * hdmiWriteGeneralCtrlPacket9171 + */ +static void +hdmiWriteGeneralCtrlPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NvU32 data = 0; + + // orIndexer info is ignored. + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GCP_SUBPACK(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GCP_SUBPACK, _SB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GCP_SUBPACK, _SB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_GCP_SUBPACK, _SB2, pPacket[5], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_GCP_SUBPACK(head), data); + + return; +} + +/* + * hdmiWriteVendorPacket9171 + */ +static void +hdmiWriteVendorPacket9171(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacketIn) +{ + NvU32 data = 0; + NvU8 pPacket[31] = {0}; + + NVMISC_MEMCPY(pPacket, pPacketIn, packetLen); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_HEADER(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_HEADER, _HB0, pPacket[0], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_HEADER, _HB1, pPacket[1], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_HEADER, _HB2, pPacket[2], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_HEADER(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB0, pPacket[3], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB1, pPacket[4], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB2, pPacket[5], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_LOW, _PB3, pPacket[6], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_HIGH, _PB4, pPacket[7], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_HIGH, _PB5, pPacket[8], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK0_HIGH, _PB6, pPacket[9], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK0_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB7, pPacket[10], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB8, pPacket[11], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB9, pPacket[12], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_LOW, _PB10, pPacket[13], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_HIGH, _PB11, pPacket[14], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_HIGH, _PB12, pPacket[15], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK1_HIGH, _PB13, pPacket[16], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK1_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB14, pPacket[17], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB15, pPacket[18], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB16, pPacket[19], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_LOW, _PB17, pPacket[20], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_HIGH, _PB18, pPacket[21], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_HIGH, _PB19, pPacket[22], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK2_HIGH, _PB20, pPacket[23], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK2_HIGH(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB21, pPacket[24], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB22, pPacket[25], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB23, pPacket[26], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_LOW, _PB24, pPacket[27], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK3_LOW(head), data); + + data = REG_RD32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK3_HIGH(head)); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_HIGH, _PB25, pPacket[28], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_HIGH, _PB26, pPacket[29], data); + data = FLD_SET_DRF_NUM(9171, _SF_HDMI_VSI_SUBPACK3_HIGH, _PB27, pPacket[30], data); + REG_WR32(pBaseReg, NV9171_SF_HDMI_VSI_SUBPACK3_HIGH(head), data); + + return; +} + +/* + * translatePacketType9171 + */ +static NvU32 +translatePacketType9171(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TYPE packetType) +{ + NvU32 type9171 = NVHDMIPKT_9171_INVALID_PKT_TYPE; + + switch (packetType) + { + case NVHDMIPKT_TYPE_AVI_INFOFRAME: + type9171 = NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME; + break; + + case NVHDMIPKT_TYPE_GENERIC: + type9171 = NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME; + break; + + case NVHDMIPKT_TYPE_GENERAL_CONTROL: + type9171 = NV9171_SF_HDMI_INFO_IDX_GCP; + break; + + case NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME: + type9171 = NV9171_SF_HDMI_INFO_IDX_VSI; + break; + + case NVHDMIPKT_TYPE_AUDIO_INFOFRAME: + default: + NvHdmiPkt_Print(pThis, "ERROR - translatePacketType wrong packet type: %0x.", + packetType); + NvHdmiPkt_Assert(0); + break; + } + + return type9171; +} + +/* + * translateTransmitControl9171 + */ +static NvU32 +translateTransmitControl9171(NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TC transmitControl) +{ + NvU32 tc = 0; + + // TODO: tc validation + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _ENABLE, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _OTHER, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _SINGLE, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _CHKSUM_HW, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _HBLANK, _EN, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _HBLANK, _EN, tc); + } + + if (FLD_TEST_DRF(_HDMI_PKT, _TRANSMIT_CTRL, _VIDEO_FMT, _HW_CTRL, transmitControl)) + { + tc = FLD_SET_DRF(9171, _SF_HDMI_INFO_CTRL, _VIDEO_FMT, _HW_CONTROLLED, tc); + } + + return tc; +} + +/* + * hdmiPacketCtrl9171 + */ +NVHDMIPKT_RESULT +hdmiPacketCtrl9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl) +{ + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + NvU32 pktType9171 = pThis->translatePacketType(pThis, packetType); + NvU32 tc = pThis->translateTransmitControl(pThis, transmitControl); + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 || + pktType9171 == NVHDMIPKT_9171_INVALID_PKT_TYPE) + { + return NVHDMIPKT_INVALID_ARG; + } + + return pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktType9171, tc, NV_FALSE); +} + +/* + * internal utility function + * checkPacketStatus + */ +static NVHDMIPKT_RESULT +checkPacketStatus(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktType9171) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvBool bCheckPacketStatus = NV_TRUE; + NvU32 regOffset = 0; + NvU32 status = 0; + + // check to see if timer callbacks are provided + if (pThis->callback.setTimeout == 0 || pThis->callback.checkTimeout == 0) + { + goto checkPacketStatus_exit; + } + + // Mark packets that don't need status check + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + case NV9171_SF_HDMI_INFO_IDX_GCP: + regOffset = NV9171_SF_HDMI_INFO_STATUS(head, pktType9171); + status = REG_RD32(pBaseReg, regOffset); + bCheckPacketStatus = FLD_TEST_DRF(9171, _SF_HDMI_INFO_CTRL, _SINGLE, _EN, status); + break; + + default: + bCheckPacketStatus = NV_FALSE; + break; + } + + if (bCheckPacketStatus == NV_TRUE) + { + if (pThis->callback.setTimeout(pThis->cbHandle, NVHDMIPKT_STATUS_READ_TIMEOUT_IN_us) + == NV_FALSE) + { + // Timer set failed + goto checkPacketStatus_exit; + } + + while(pThis->hdmiReadPacketStatus(pThis, pBaseReg, head, pktType9171) == NV_FALSE) + { + if (pThis->callback.checkTimeout(pThis->cbHandle) == NV_TRUE) + { + // status check operation timed out + result = NVHDMIPKT_TIMEOUT; + goto checkPacketStatus_exit; + } + } + } + +checkPacketStatus_exit: + return result; +} + +/* + * hdmiPacketWrite9171 + */ +NVHDMIPKT_RESULT +hdmiPacketWrite9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvU32* pBaseReg = (NvU32*)pThis->memMap[subDevice].pMemBase; + NvU32 pktType9171 = pThis->translatePacketType(pThis, packetType); + NvU32 tc = pThis->translateTransmitControl(pThis, transmitControl); + NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS params = {0}; + + if (pBaseReg == 0 || head >= NV9171_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 || + packetLen == 0 || pPacket == 0 || pktType9171 == NVHDMIPKT_9171_INVALID_PKT_TYPE) + { + result = NVHDMIPKT_INVALID_ARG; + goto hdmiPacketWrite9171_exit; + } + + // acquire mutex + pThis->callback.acquireMutex(pThis->cbHandle); + + // Check status if last infoframe was sent out or not + + if ((result = checkPacketStatus(pThis, pBaseReg, head, pktType9171)) == + NVHDMIPKT_TIMEOUT) + { + NvHdmiPkt_Print(pThis, "ERROR - Packet status check timed out."); + NvHdmiPkt_Assert(0); + goto hdmiPacketWrite9171_release_mutex_exit; + } + + // Disable this packet type. + pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktType9171, tc, NV_TRUE); + + // write the packet + switch (pktType9171) + { + case NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME: + pThis->hdmiWriteAviPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + case NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME: + pThis->hdmiWriteGenericPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + case NV9171_SF_HDMI_INFO_IDX_GCP: + // Check whether the GCP packet is AVMute DISABLE or AvMute ENABLE + // Enable HDMI only on GCP unmute i.e. AVMUTE DISABLE + if (pPacket[HDMI_PKT_HDR_SIZE] == HDMI_GENCTRL_PACKET_MUTE_DISABLE) + { + // Enable HDMI. + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + params.subDeviceInstance = (NvU8)subDevice; + params.displayId = displayId; + params.bEnable = NV0073_CTRL_SPECIFIC_CTRL_HDMI_ENABLE; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to enable hdmi ctrl failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + } + pThis->hdmiWriteGeneralCtrlPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + case NV9171_SF_HDMI_INFO_IDX_VSI: + pThis->hdmiWriteVendorPacket(pThis, pBaseReg, head, packetLen, pPacket); + break; + + default: + result = NVHDMIPKT_INVALID_ARG; + break; + } + + // Enable this infoframe. + pThis->hdmiWritePacketCtrl(pThis, pBaseReg, head, pktType9171, tc, NV_FALSE); + +hdmiPacketWrite9171_release_mutex_exit: + // release mutex + pThis->callback.releaseMutex(pThis->cbHandle); +hdmiPacketWrite9171_exit: + return result; +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9171 + */ +NvBool +hdmiConstructor9171(NVHDMIPKT_CLASS* pThis) +{ + NvU32 i = 0; + NvBool result = NV_TRUE; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < pThis->numSubDevices; i++) + { + if (CALL_DISP_RM(NvRmAlloc)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->sfUserHandle + i, + pThis->dispSfUserClassId, + (void*)0) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "Failed to alloc SF_USER handle"); + NvHdmiPkt_Assert(0); + break; + } + + pThis->memMap[i].memHandle = pThis->sfUserHandle + i; + + if (CALL_DISP_RM(NvRmMapMemory)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->memMap[i].memHandle, + 0, + pThis->dispSfUserSize, + &pThis->memMap[i].pMemBase, + 0) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "Failed to map SF_USER memory."); + NvHdmiPkt_Assert(0); + break; + } + + if (pThis->memMap[i].pMemBase == 0) + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "SF_USER memory returned is NULL."); + NvHdmiPkt_Assert(0); + break; + } + + pThis->memMap[i].subDevice = i; + } + + // coudln't complete the loop above + if (i < pThis->numSubDevices) + { + result = NV_FALSE; + goto hdmiConstructor9171_exit; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < pThis->numSubDevices; i++) + { + result = pThis->callback.rmGetMemoryMap(pThis->cbHandle, + pThis->dispSfUserClassId, + pThis->dispSfUserSize, + i, + &pThis->memMap[i].memHandle, + &pThis->memMap[i].pMemBase); + if (result == NV_TRUE) + { + pThis->memMap[i].subDevice = i; + } + else + { + NvHdmiPkt_Print(pThis, "ERROR - Init failed. " + "Failed to map SF_USER memory."); + NvHdmiPkt_Assert(0); + result = NV_FALSE; + goto hdmiConstructor9171_exit; + } + } +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + +hdmiConstructor9171_exit: + return result; +} + +/* + * hdmiDestructor9171 + */ +void +hdmiDestructor9171(NVHDMIPKT_CLASS* pThis) + +{ + NvU32 i = 0; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + // free memory + if (pThis->memMap[i].pMemBase) + { + if (CALL_DISP_RM(NvRmUnmapMemory)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->memMap[i].memHandle, + pThis->memMap[i].pMemBase, + 0) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - unInit failed. " + "SF_USER memory unMap failed."); + NvHdmiPkt_Assert(0); + } + } + + // free handle + if (pThis->memMap[i].memHandle) + { + if (CALL_DISP_RM(NvRmFree)(pThis->clientHandles.hClient, + pThis->clientHandles.hSubDevices[i], + pThis->memMap[i].memHandle) != NVOS_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - unInit failed. " + "Freeing SF_USER memory handle failed."); + NvHdmiPkt_Assert(0); + } + } + + pThis->memMap[i].subDevice = NVHDMIPKT_INVALID_SUBDEV; + pThis->memMap[i].memHandle = 0; + pThis->memMap[i].pMemBase = 0; + } +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + for (i = 0; i < NV_MAX_SUBDEVICES; i++) + { + if (pThis->memMap[i].memHandle) + { + pThis->callback.rmFreeMemoryMap(pThis->cbHandle, + i, + pThis->memMap[i].memHandle, + pThis->memMap[i].pMemBase); + + pThis->memMap[i].subDevice = NVHDMIPKT_INVALID_SUBDEV; + pThis->memMap[i].memHandle = 0; + pThis->memMap[i].pMemBase = 0; + } + } +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + + return; +} + +/* + * initializeHdmiPktInterface9171 + */ +void +initializeHdmiPktInterface9171(NVHDMIPKT_CLASS* pClass) +{ + pClass->hdmiPacketCtrl = hdmiPacketCtrl9171; + pClass->hdmiPacketWrite = hdmiPacketWrite9171; + pClass->translatePacketType = translatePacketType9171; + pClass->translateTransmitControl = translateTransmitControl9171; + + // HW register write functions + pClass->hdmiReadPacketStatus = hdmiReadPacketStatus9171; + pClass->hdmiWritePacketCtrl = hdmiWritePacketCtrl9171; + pClass->hdmiWriteAviPacket = hdmiWriteAviPacket9171; + pClass->hdmiWriteAudioPacket = hdmiWriteDummyPacket; + pClass->hdmiWriteGenericPacket = hdmiWriteGenericPacket9171; + pClass->hdmiWriteGeneralCtrlPacket = hdmiWriteGeneralCtrlPacket9171; + pClass->hdmiWriteVendorPacket = hdmiWriteVendorPacket9171; + + // Update SF_USER data + pClass->dispSfUserClassId = NV9171_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9171DispSfUserMap); + + // Functions below are used by HDMI FRL and will be available for Ampere+. + pClass->hdmiAssessLinkCapabilities = hdmiAssessLinkCapabilitiesDummy; + pClass->hdmiQueryFRLConfig = hdmiQueryFRLConfigDummy; + pClass->hdmiSetFRLConfig = hdmiSetFRLConfigDummy; + pClass->hdmiClearFRLConfig = hdmiClearFRLConfigDummy; +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_9271.c b/src/common/modeset/hdmipacket/nvhdmipkt_9271.c new file mode 100644 index 000000000..eaf65e57e --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_9271.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9271.c + * + * Purpose: Provides packet write functions for HDMI library for KEPLER + chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/cl9271.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to 9271. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9271 + */ +NvBool +hdmiConstructor9271(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructor9271 + */ +void +hdmiDestructor9271(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterface9271 + */ +void +initializeHdmiPktInterface9271(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NV9271_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9271DispSfUserMap); +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_9471.c b/src/common/modeset/hdmipacket/nvhdmipkt_9471.c new file mode 100644 index 000000000..d863c9ff0 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_9471.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9471.c + * + * Purpose: Provides packet write functions for HDMI library for Maxwell + chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/cl9471.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to 9471. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9471 + */ +NvBool +hdmiConstructor9471(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructor9471 + */ +void +hdmiDestructor9471(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterface9471 + */ +void +initializeHdmiPktInterface9471(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NV9471_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9471DispSfUserMap); +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_9571.c b/src/common/modeset/hdmipacket/nvhdmipkt_9571.c new file mode 100644 index 000000000..85e6b131d --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_9571.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_9571.c + * + * Purpose: Provides packet write functions for HDMI library for Maxwell + chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/cl9571.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to 9571. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructor9571 + */ +NvBool +hdmiConstructor9571(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructor9571 + */ +void +hdmiDestructor9571(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterface9571 + */ +void +initializeHdmiPktInterface9571(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NV9571_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(Nv9571DispSfUserMap); +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_C371.c b/src/common/modeset/hdmipacket/nvhdmipkt_C371.c new file mode 100644 index 000000000..fd89eac6b --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_C371.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_C371.c + * + * Purpose: Provides packet write functions for HDMI library for Volta+ chips + */ + +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" + +#include "class/clc371.h" + +/******************************************** NOTE *********************************************** +* This file serves as an example on how to add a new HW SF USER CLASS. Notice that this * +* Class didn't override any functions, as 9171 is identical to C371. * +*************************************************************************************************/ + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructorC371 + */ +NvBool +hdmiConstructorC371(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructorC371 + */ +void +hdmiDestructorC371(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterfaceC371 + */ +void +initializeHdmiPktInterfaceC371(NVHDMIPKT_CLASS* pClass) +{ + // Update SF_USER data + pClass->dispSfUserClassId = NVC371_DISP_SF_USER; + pClass->dispSfUserSize = sizeof(NvC371DispSfUserMap); +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_C671.c b/src/common/modeset/hdmipacket/nvhdmipkt_C671.c new file mode 100644 index 000000000..838556591 --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_C671.c @@ -0,0 +1,1389 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_C671.c + * + * Purpose: Provides packet write functions for HDMI library for Ampere+ chips + */ + +#include +#include "nvhdmipkt_common.h" +#include "nvhdmipkt_class.h" + +#include "nvhdmipkt_internal.h" +#include "nvHdmiFrlCommon.h" + +#include "../timing/nvt_dsc_pps.h" +#include "ctrl/ctrl0073/ctrl0073system.h" + +#include "class/clc671.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" + +#define MULTIPLIER_1G 1000000000 +#define PCLK_VARIANCE_10MHZ 1000 + +// In HDMI case, for PPS set, HDMI2.1 spec expects source to set this field to 13, decoder capability is assumed +// Note, in DP case, DSC decoder is allowed to report line buffer depth capability through DPCD registers +#define HDMI_DSC_DECODER_LINE_BUFFER_BIT_DEPTH_CAP 13 +#define NVHDMIPKT_C671_INVALID_PKT_TYPE ((NVC671_SF_HDMI_INFO_IDX_VSI) + 1) + +extern NVHDMIPKT_RESULT hdmiPacketWrite0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +extern NVHDMIPKT_RESULT hdmiPacketCtrl0073(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + +extern NVHDMIPKT_RESULT hdmiPacketWrite9171(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + +static NVHDMIPKT_RESULT hdmiClearFRLConfigC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId); + + +// translate FRL rate to RM control param +static NvU32 translateFRLRateToNv0073SetHdmiFrlConfig(HDMI_FRL_DATA_RATE frlRate) +{ + switch(frlRate) + { + case HDMI_FRL_DATA_RATE_NONE : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE; + case HDMI_FRL_DATA_RATE_3LANES_3GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_3G; + case HDMI_FRL_DATA_RATE_3LANES_6GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_6G; + case HDMI_FRL_DATA_RATE_4LANES_6GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_6G; + case HDMI_FRL_DATA_RATE_4LANES_8GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_8G; + case HDMI_FRL_DATA_RATE_4LANES_10GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_10G; + case HDMI_FRL_DATA_RATE_4LANES_12GBPS : return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_12G; + default: + break; + } + return NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE; +} + +/* + * Both DSC_Max_FRL_Rate and Max_FRL_Rate have same translation + * + */ +static HDMI_FRL_DATA_RATE translateFRLCapToFRLDataRate(NvU32 sinkFRLcap) +{ + switch(sinkFRLcap) + { + case 6: return HDMI_FRL_DATA_RATE_4LANES_12GBPS; + case 5: return HDMI_FRL_DATA_RATE_4LANES_10GBPS; + case 4: return HDMI_FRL_DATA_RATE_4LANES_8GBPS; + case 3: return HDMI_FRL_DATA_RATE_4LANES_6GBPS; + case 2: return HDMI_FRL_DATA_RATE_3LANES_6GBPS; + case 1: return HDMI_FRL_DATA_RATE_3LANES_3GBPS; + case 0: // fall through + default: break; + } + + if (sinkFRLcap > 6 && sinkFRLcap <= 15) + { + return HDMI_FRL_DATA_RATE_4LANES_12GBPS; + } + + return HDMI_FRL_DATA_RATE_NONE; +} + +// If we want to force 2ch48KHz fill it in as default, if not, +// Lookup sink short audio descriptor blocks to see max supported audio +static void populateAudioCaps(NVT_EDID_CEA861_INFO const * const p861ExtBlock, + HDMI_SINK_CAPS * pSinkCaps) +{ + NvU32 i; + + for (i = 0; i < p861ExtBlock->total_sad; i++) + { + NvU32 data = p861ExtBlock->audio[i].byte1; + data = (data & NVT_CEA861_AUDIO_FORMAT_MASK) >> NVT_CEA861_AUDIO_FORMAT_SHIFT; + + // unsupported + if ((data == NVT_CEA861_AUDIO_FORMAT_RSVD) || + (data == NVT_CEA861_AUDIO_FORMAT_RSVD15)) + { + continue; + } + + // check for HBR audio support. We don't support any other packet types + if ((data == NVT_CEA861_AUDIO_FORMAT_DTS_HD) || + (data == NVT_CEA861_AUDIO_FORMAT_MAT)) + { + pSinkCaps->bHBRAudio = NV_TRUE; + } + + // num of channels for this audio format + data = p861ExtBlock->audio[i].byte1; + NvU32 numChannels = ((data & NVT_CEA861_AUDIO_MAX_CHANNEL_MASK) >> NVT_CEA861_AUDIO_MAX_CHANNEL_SHIFT) + 1; + if (pSinkCaps->maxAudioChannels < numChannels) + { + pSinkCaps->maxAudioChannels = numChannels; + } + + // get max sampling frequency + data = p861ExtBlock->audio[i].byte2; + NvU32 sampleFreq = (data & NVT_CEA861_AUDIO_SAMPLE_RATE_192KHZ) ? 192 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_176KHZ) ? 176 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_96KHZ) ? 96 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_88KHZ) ? 88 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_48KHZ) ? 48 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_44KHZ) ? 44 : + (data & NVT_CEA861_AUDIO_SAMPLE_RATE_32KHZ) ? 32 : 0; + if (pSinkCaps->maxAudioFreqKHz < sampleFreq) + { + pSinkCaps->maxAudioFreqKHz = sampleFreq; + } + } +} + +/* + * hdmiAssessLinkCapabilities + * + * 1. Try physical link training to determine max link capacity + * 2. Calculate max audio capabilities + * 3. Limit connector max to what the source can support + * AssesssLinkCapabilities is expected to be called at hotplug time. Ideally, srcCaps need to be calculated one time, + * but for now, no incentive to do so. In future move it out to better place as need arises + */ +static NVHDMIPKT_RESULT +hdmiAssessLinkCapabilitiesC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps) +{ + + // Read DSC caps from RM - gpu caps for DSC are same across DP and HDMI FRL (HDMI 2.1+) + // Hence use same RM control as DP case for reading this cap + NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS params; + params.subDeviceInstance = 0; + params.sorIndex = 0; // Passing SOR index as 0 since all SORs have the same capability. + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_DP_GET_CAPS, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_DP_GET_CAPS, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + return NVHDMIPKT_FAIL; + } + + pSrcCaps->dscCaps.dscCapable = params.DSC.bDscSupported; + pSrcCaps->dscCaps.encoderColorFormatMask = params.DSC.encoderColorFormatMask; + pSrcCaps->dscCaps.dualHeadBppTargetMaxX16 = 256; // Tu10x/GA10x HW DSC module allow max 16bpp in 2H1OR mode. + + NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS hdmiGpuCapsParams; + NVMISC_MEMSET(&hdmiGpuCapsParams, 0, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS)); +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS, + &hdmiGpuCapsParams, + sizeof(hdmiGpuCapsParams)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + hdmiGpuCapsParams.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS, + &hdmiGpuCapsParams, + sizeof(hdmiGpuCapsParams)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NVMISC_MEMSET(&hdmiGpuCapsParams, 0, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS)); + } + + pSrcCaps->linkMaxFRLRate = translateFRLCapToFRLDataRate(hdmiGpuCapsParams.caps); + + switch(params.DSC.bitsPerPixelPrecision) + { + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_8; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_4; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_2; break; + case NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1: + pSrcCaps->dscCaps.bppPrecision = DSC_BITS_PER_PIXEL_PRECISION_1; break; + default: break; + } + + pSrcCaps->dscCaps.lineBufferSizeKB = params.DSC.lineBufferSizeKB; + pSrcCaps->dscCaps.rateBufferSizeKB = params.DSC.rateBufferSizeKB; + pSrcCaps->dscCaps.maxNumHztSlices = params.DSC.maxNumHztSlices; + pSrcCaps->dscCaps.lineBufferBitDepth = params.DSC.lineBufferBitDepth; + pSrcCaps->dscCaps.maxWidthPerSlice = 5120; // Max DSC buffer width per head is 5120, this can be chunks of 1/2/4 slices, so keep 5120 as the very max. + + pSinkCaps->pHdmiForumInfo = &pSinkEdid->hdmiForumInfo; + populateAudioCaps(&pSinkEdid->ext861, pSinkCaps); + populateAudioCaps(&pSinkEdid->ext861_2, pSinkCaps); + + NvU32 setFRLRate = pSinkEdid->hdmiForumInfo.max_FRL_Rate; + + pSinkCaps->linkMaxFRLRate = translateFRLCapToFRLDataRate(setFRLRate); + pSinkCaps->linkMaxFRLRateDSC = (pSrcCaps->dscCaps.dscCapable && + (pSinkEdid->hdmiForumInfo.dsc_Max_FRL_Rate > setFRLRate)) ? + pSinkCaps->linkMaxFRLRate : + translateFRLCapToFRLDataRate(pSinkEdid->hdmiForumInfo.dsc_Max_FRL_Rate); + + return NVHDMIPKT_SUCCESS; +} + +// Fill in basic params from Timing info etc +static void populateBaseFRLParams(HDMI_VIDEO_TRANSPORT_INFO const *pVidTransInfo, + HDMI_SINK_CAPS const *pSinkCaps, + NvBool bForce2Ch48KHz, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams) +{ + pFRLParams->pclk10KHz = pVidTransInfo->pTiming->pclk; + pFRLParams->hTotal = pVidTransInfo->pTiming->HTotal; + pFRLParams->hActive = pVidTransInfo->pTiming->HVisible; + pFRLParams->bpc = pVidTransInfo->bpc; + pFRLParams->pixelPacking = pVidTransInfo->packing; + + pFRLParams->numAudioChannels = bForce2Ch48KHz ? 2 : pSinkCaps->maxAudioChannels; + pFRLParams->audioFreqKHz = bForce2Ch48KHz ? 48 : pSinkCaps->maxAudioFreqKHz; + pFRLParams->audioType = pSinkCaps->bHBRAudio ? AUDIO_PKTTYPE_HBR_AUDIO : + AUDIO_PKTTYPE_LPCM_SAMPLE; + + pFRLParams->compressionInfo.dscTotalChunkKBytes = 1024 * (pSinkCaps->pHdmiForumInfo->dsc_totalChunkKBytes); +} + + +// Get next higher link rate +static HDMI_FRL_DATA_RATE getNextHigherLinkRate(HDMI_FRL_DATA_RATE frlRate) +{ + return (frlRate == HDMI_FRL_DATA_RATE_4LANES_12GBPS) ? HDMI_FRL_DATA_RATE_NONE : (frlRate + 1); +} + +// Fill in GPU and Monitor caps for DSC PPS calculations +static void populateDscCaps(HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + DSC_INFO * pDscInfo) +{ + // populate src caps + pDscInfo->gpuCaps.encoderColorFormatMask = pSrcCaps->dscCaps.encoderColorFormatMask; + pDscInfo->gpuCaps.lineBufferSize = pSrcCaps->dscCaps.lineBufferSizeKB; + pDscInfo->gpuCaps.bitsPerPixelPrecision = pSrcCaps->dscCaps.bppPrecision; + pDscInfo->gpuCaps.maxNumHztSlices = pSrcCaps->dscCaps.maxNumHztSlices; + pDscInfo->gpuCaps.lineBufferBitDepth = pSrcCaps->dscCaps.lineBufferBitDepth; + + // populate sink caps + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_RGB; + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_444; + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422; + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422; + + pDscInfo->sinkCaps.bitsPerPixelPrecision = DSC_BITS_PER_PIXEL_PRECISION_1_16; + if (pSinkCaps->pHdmiForumInfo->dsc_Native_420) + { + pDscInfo->sinkCaps.decoderColorFormatMask |= DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420; + } + + // limited by spec + pDscInfo->sinkCaps.maxSliceWidth = 2720; + + NvU32 sliceCountMask = 0; + NvU32 maxNumHztSlices = pSinkCaps->pHdmiForumInfo->dsc_MaxSlices; + NvU32 peakThroughput = (pSinkCaps->pHdmiForumInfo->dsc_MaxPclkPerSliceMHz == 400) ? + DSC_DECODER_PEAK_THROUGHPUT_MODE0_400 : + DSC_DECODER_PEAK_THROUGHPUT_MODE0_340; + + switch(pSinkCaps->pHdmiForumInfo->dsc_MaxSlices) + { + case 16: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_16; // fall-through + case 12: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_12; // fall-through + case 8: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_8; // fall-through + case 4: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_4; // fall-through + case 2: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_2; // fall-through + case 1: sliceCountMask |= DSC_DECODER_SLICES_PER_SINK_1; break; + default: break; + } + + pDscInfo->sinkCaps.sliceCountSupportedMask = sliceCountMask; + pDscInfo->sinkCaps.maxNumHztSlices = maxNumHztSlices; + pDscInfo->sinkCaps.lineBufferBitDepth = HDMI_DSC_DECODER_LINE_BUFFER_BIT_DEPTH_CAP; + + // Color depth supported by DSC decoder of panel + pDscInfo->sinkCaps.decoderColorDepthMask |= pSinkCaps->pHdmiForumInfo->dsc_16bpc ? DSC_DECODER_COLOR_DEPTH_CAPS_16_BITS : 0; + pDscInfo->sinkCaps.decoderColorDepthMask |= pSinkCaps->pHdmiForumInfo->dsc_12bpc ? DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS : 0; + pDscInfo->sinkCaps.decoderColorDepthMask |= pSinkCaps->pHdmiForumInfo->dsc_10bpc ? DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS : 0; + pDscInfo->sinkCaps.decoderColorDepthMask |= DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS; + + pDscInfo->sinkCaps.bBlockPrediction = 1; + pDscInfo->sinkCaps.algorithmRevision.versionMajor = 1; + pDscInfo->sinkCaps.algorithmRevision.versionMinor = 2; + pDscInfo->sinkCaps.peakThroughputMode0 = peakThroughput; + pDscInfo->sinkCaps.peakThroughputMode1 = peakThroughput * 2; +} + +// Fill in mode related info for DSC lib +static void populateDscModesetInfo(HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + MODESET_INFO * pDscModesetInfo) +{ + pDscModesetInfo->pixelClockHz = pVidTransInfo->pTiming->pclk * 10000; // Requested pixel clock for the mode + pDscModesetInfo->activeWidth = pVidTransInfo->pTiming->HVisible; // Active Width + pDscModesetInfo->activeHeight = pVidTransInfo->pTiming->VVisible; // Active Height + pDscModesetInfo->bitsPerComponent = (NvU32)pVidTransInfo->bpc; // BPC value to be used + pDscModesetInfo->colorFormat = (pVidTransInfo->packing == HDMI_PIXEL_PACKING_RGB) ? NVT_COLOR_FORMAT_RGB : + (pVidTransInfo->packing == HDMI_PIXEL_PACKING_YCbCr444) ? NVT_COLOR_FORMAT_YCbCr444 : + (pVidTransInfo->packing == HDMI_PIXEL_PACKING_YCbCr422) ? NVT_COLOR_FORMAT_YCbCr422 : + (pVidTransInfo->packing == HDMI_PIXEL_PACKING_YCbCr420) ? NVT_COLOR_FORMAT_YCbCr420 : 0; + pDscModesetInfo->bDualMode = pVidTransInfo->bDualHeadMode; + pDscModesetInfo->bDropMode = NV_FALSE; +} + +// Checks against source and sink caps whether DSC is possible +// Tries to determine slice width and slice count accounting for 2Head1Or, populates this info into FRL calculation structure +// if this calculation fails DSC cannot be enabled +static NvBool evaluateIsDSCPossible(NVHDMIPKT_CLASS *pThis, + HDMI_SRC_CAPS const *pSrcCaps, + HDMI_SINK_CAPS const *pSinkCaps, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams) +{ + const NvU32 numHeadsDrivingSink = pVidTransInfo->bDualHeadMode ? 2 : 1; + + if (!pSrcCaps->dscCaps.dscCapable) + { + return NV_FALSE; + } + + if (!pSinkCaps->pHdmiForumInfo->dsc_1p2 || + !pSinkCaps->linkMaxFRLRateDSC || + (!pSinkCaps->pHdmiForumInfo->dsc_16bpc && (pFRLParams->bpc == HDMI_BPC16)) || + (!pSinkCaps->pHdmiForumInfo->dsc_12bpc && (pFRLParams->bpc == HDMI_BPC12)) || + (!pSinkCaps->pHdmiForumInfo->dsc_10bpc && (pFRLParams->bpc == HDMI_BPC10))) + { + return NV_FALSE; + } + + // Disallow DSC if the source or sink don't support DSC with this mode's colorformat/packing. + switch (pVidTransInfo->packing) + { + case HDMI_PIXEL_PACKING_RGB: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_RGB)) + { + return NV_FALSE; + } + break; + case HDMI_PIXEL_PACKING_YCbCr444: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444)) + { + return NV_FALSE; + } + break; + case HDMI_PIXEL_PACKING_YCbCr422: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)) + { + return NV_FALSE; + } + break; + case HDMI_PIXEL_PACKING_YCbCr420: + if (!(pSrcCaps->dscCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) || + !pSinkCaps->pHdmiForumInfo->dsc_Native_420) + { + return NV_FALSE; + } + break; + } + + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (pGetHdmiFrlCapacityComputationParams) + { + NvBool bIsDSCPossible = NV_FALSE; + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->dsc.maxSliceCount = NV_MIN(pSrcCaps->dscCaps.maxNumHztSlices * numHeadsDrivingSink, pSinkCaps->pHdmiForumInfo->dsc_MaxSlices); + pGetHdmiFrlCapacityComputationParams->dsc.maxSliceWidth = pSrcCaps->dscCaps.maxWidthPerSlice; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_IS_FRL_DSC_POSSIBLE; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + bIsDSCPossible = pGetHdmiFrlCapacityComputationParams->dsc.bIsDSCPossible; + } + + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + return bIsDSCPossible; + } + return NV_FALSE; + + return NV_TRUE; +} + +static void translateBitRate(HDMI_FRL_DATA_RATE frlRate, NvU32 *pFrlBitRateGbps, NvU32 *pNumLanes) +{ + switch(frlRate) + { + case HDMI_FRL_DATA_RATE_4LANES_12GBPS : { *pFrlBitRateGbps = 12; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_4LANES_10GBPS : { *pFrlBitRateGbps = 10; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_4LANES_8GBPS : { *pFrlBitRateGbps = 8; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_4LANES_6GBPS : { *pFrlBitRateGbps = 6; *pNumLanes = 4; break; } + case HDMI_FRL_DATA_RATE_3LANES_6GBPS : { *pFrlBitRateGbps = 6; *pNumLanes = 3; break; } + case HDMI_FRL_DATA_RATE_3LANES_3GBPS : // fall through + default : { *pFrlBitRateGbps = 3; *pNumLanes = 3; break; } + } +} + +// Determine if video transport is possible at any FRL rate in the specified range +// Iterate from min rate to max rate +static NVHDMIPKT_RESULT +determineUncompressedFRLConfig(NVHDMIPKT_CLASS *pThis, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams, + HDMI_FRL_DATA_RATE minFRLRate, + HDMI_FRL_DATA_RATE maxFRLRate, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT *pResults) +{ + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + HDMI_FRL_DATA_RATE frlRate = minFRLRate; + NVHDMIPKT_RESULT status = NVHDMIPKT_INSUFFICIENT_BANDWIDTH; + + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + + while (frlRate != HDMI_FRL_DATA_RATE_NONE) + { + translateBitRate(frlRate, &pFRLParams->frlBitRateGbps, &pFRLParams->numLanes); + + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_UNCOMPRESSED_VIDEO; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + *pResults = pGetHdmiFrlCapacityComputationParams->result; + } + } + else + { + status = NVHDMIPKT_FAIL; + goto uncompressedQuery_exit; + } + + status = (pResults->isVideoTransportSupported && pResults->isAudioSupported) ? NVHDMIPKT_SUCCESS : status; + + if ((status == NVHDMIPKT_SUCCESS) || + (frlRate == maxFRLRate)) + { + break; + } + + // try again at next link rate + frlRate = getNextHigherLinkRate(frlRate); + } + + pResults->frlRate = frlRate; + +uncompressedQuery_exit: + if (pGetHdmiFrlCapacityComputationParams) + { + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + } + return status; +} + +// Determines the absolute min n max Bpp settings we can use with DSC. This is irrespective of FRL rate +static void calcBppMinMax(HDMI_SRC_CAPS const *pSrcCaps, + HDMI_SINK_CAPS const *pSinkCaps, + HDMI_VIDEO_TRANSPORT_INFO const *pVidTransInfo, + NvU32 *pBppMinX16, + NvU32 *pBppMaxX16) +{ + + NvU32 bppMinX16 = 0; + NvU32 bppMaxX16 = 0; + + switch(pVidTransInfo->packing) + { + case HDMI_PIXEL_PACKING_YCbCr420: { bppMinX16 = 6 * 16; bppMaxX16 = (3 * pVidTransInfo->bpc * 8 - 1); break; } + case HDMI_PIXEL_PACKING_YCbCr422: { bppMinX16 = 7 * 16; bppMaxX16 = (2 * pVidTransInfo->bpc * 16 - 1); break; } + case HDMI_PIXEL_PACKING_RGB: + case HDMI_PIXEL_PACKING_YCbCr444: + default: { bppMinX16 = 8 * 16; bppMaxX16 = (3 * pVidTransInfo->bpc * 16 - 1); break; } + } + + // cap to 12 if DSC_All_Bpp is not set + if (!pSinkCaps->pHdmiForumInfo->dsc_All_bpp) + { + bppMaxX16 = (bppMaxX16 > 12*16) ? 12*16 : bppMaxX16; + } + + if (pVidTransInfo->bDualHeadMode && (bppMaxX16 > pSrcCaps->dscCaps.dualHeadBppTargetMaxX16)) + { + bppMaxX16 = pSrcCaps->dscCaps.dualHeadBppTargetMaxX16; + } + + *pBppMinX16 = bppMinX16; + *pBppMaxX16 = bppMaxX16; +} + + +// Determine minimum FRL rate at which Video Transport is possible at given min bpp +// Once FRL rate is found, determine the max bpp possible at this FRL rate +// To determine Primary Compressed Format using this function caller must pass in the full range of min, max FRL and min, max Bpp +// For any optimizations on top of the Primary Compressed Format, caller must adjust the range of these + +static NVHDMIPKT_RESULT +determineCompressedFRLConfig(NVHDMIPKT_CLASS *pThis, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS *pFRLParams, + HDMI_FRL_DATA_RATE minFRLRate, + HDMI_FRL_DATA_RATE maxFRLRate, + NvU32 bppMinX16, + NvU32 bppMaxX16, + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT *pResults) +{ + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + HDMI_FRL_DATA_RATE frlRate = minFRLRate; + NvU32 bppTargetX16 = bppMinX16; + NVHDMIPKT_RESULT status = NVHDMIPKT_INSUFFICIENT_BANDWIDTH; + + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + + // Set bppTarget to min and iterate over FRL rates + pFRLParams->compressionInfo.bppTargetx16 = bppMinX16; + while (frlRate != HDMI_FRL_DATA_RATE_NONE) + { + translateBitRate(frlRate, &pFRLParams->frlBitRateGbps, &pFRLParams->numLanes); + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_COMPRESSED_VIDEO; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + *pResults = pGetHdmiFrlCapacityComputationParams->result; + } + } + else + { + status = NVHDMIPKT_FAIL; + goto compressedQuery_exit; + } + + status = (pResults->isVideoTransportSupported && pResults->isAudioSupported) ? NVHDMIPKT_SUCCESS : status; + + if ((status == NVHDMIPKT_SUCCESS) || + (frlRate == maxFRLRate)) + { + break; + } + + frlRate = getNextHigherLinkRate(frlRate); + } + + if (status != NVHDMIPKT_SUCCESS) + { + goto compressedQuery_exit; + } + + // We now have the base FRL rate. Iterate over bppTarget to find the max supported bpp + status = NVHDMIPKT_INSUFFICIENT_BANDWIDTH; + bppTargetX16 = bppMaxX16; + NvU32 stepSize = 16; + + while (status != NVHDMIPKT_SUCCESS) + { + pFRLParams->compressionInfo.bppTargetx16 = bppTargetX16; + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->input = *pFRLParams; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_COMPRESSED_VIDEO; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + *pResults = pGetHdmiFrlCapacityComputationParams->result; + } + } + else + { + status = NVHDMIPKT_FAIL; + goto compressedQuery_exit; + } + + status = (pResults->isVideoTransportSupported && pResults->isAudioSupported) ? NVHDMIPKT_SUCCESS : status; + + if (status == NVHDMIPKT_SUCCESS) + { + // If this is the maxBpp nothing else to try + if (bppTargetX16 == bppMaxX16) + { + break; + } + + // If we detected a successful bppTarget value, go up a step size, + // and iterate by decrementing bppTarget by 1/16 to reach a finer tuned bpp value + if (stepSize == 16) + { + status = NVHDMIPKT_RETRY; + bppTargetX16 = bppTargetX16 + stepSize - 1; + stepSize = 1; + } + } + else + { + bppTargetX16 = bppTargetX16 - stepSize; + // bppTargetX16 is guaranteed to be >= bppMinX16 + } + } + + pResults->frlRate = frlRate; + pResults->bppTargetx16 = bppTargetX16; + +compressedQuery_exit: + if (pGetHdmiFrlCapacityComputationParams) + { + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + } + + return status; +} + +/* + * hdmiQueryFRLConfigC671 + * + * This function uses below logic: + * Verify if force params from client are in expected range + * If client is not asking for optimum config or force enable DSC, try uncompressed first + * For DSC enabled, honor all choices client has made for slice count/width. Determine the primary compressed format (PCF) first. + * For any other items client wants to control do this as optimization on top of the PCF + * Call DSC library for PPS generation unless specified otherwise. + */ +static NVHDMIPKT_RESULT +hdmiQueryFRLConfigC671(NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pGetHdmiFrlCapacityComputationParams = NULL; + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + + NVMISC_MEMSET(pFRLConfig, 0, sizeof(HDMI_FRL_CONFIG)); + + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS frlParams; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT frlComputeResult; + NvU32 bppMinX16, bppMaxX16; + + NVMISC_MEMSET(&frlParams, 0, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS)); + NVMISC_MEMSET(&frlComputeResult, 0, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT)); + + NvU32 vic = NVT_GET_CEA_FORMAT(pVidTransInfo->pTiming->etc.status); + NvBool bTryUncompressedMode, bCanUseDSC; + + populateBaseFRLParams(pVidTransInfo, + pSinkCaps, + pClientCtrl->forceAudio2Ch48KHz ? NV_TRUE : NV_FALSE, + &frlParams); + + calcBppMinMax(pSrcCaps, pSinkCaps, pVidTransInfo, &bppMinX16, &bppMaxX16); + bCanUseDSC = evaluateIsDSCPossible(pThis, pSrcCaps, pSinkCaps, pVidTransInfo, &frlParams); + + // Input validation + if ((pClientCtrl->forceFRLRate && (pClientCtrl->frlRate > pSinkCaps->linkMaxFRLRate)) || + (pClientCtrl->enableDSC && !bCanUseDSC) || + (pClientCtrl->forceSliceCount && (pClientCtrl->sliceCount > (NvU32)(NV_MIN(pSrcCaps->dscCaps.maxNumHztSlices, pSinkCaps->pHdmiForumInfo->dsc_MaxSlices)))) || + (pClientCtrl->forceSliceWidth && (pClientCtrl->sliceWidth > NV_MIN(pSrcCaps->dscCaps.maxWidthPerSlice, MAX_RECONSTRUCTED_HACTIVE_PIXELS))) || + (pClientCtrl->forceBppx16 && ((pClientCtrl->bitsPerPixelX16 < bppMinX16) || (pClientCtrl->bitsPerPixelX16 > bppMaxX16))) || + (pClientCtrl->forceBppx16 && !pSinkCaps->pHdmiForumInfo->dsc_All_bpp)) + { + return NVHDMIPKT_FAIL; + } + + bTryUncompressedMode = (bCanUseDSC && (pClientCtrl->enableDSC || + (pClientCtrl->option == HDMI_QUERY_FRL_LOWEST_BANDWIDTH))) ? + NV_FALSE : NV_TRUE; + + HDMI_FRL_DATA_RATE maxRate = NV_MIN(pSinkCaps->linkMaxFRLRate, pSrcCaps->linkMaxFRLRate); + + pGetHdmiFrlCapacityComputationParams = pThis->callback.malloc(pThis->cbHandle, sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + + if (bTryUncompressedMode) + { + HDMI_FRL_DATA_RATE minFRLRate = HDMI_FRL_DATA_RATE_NONE, maxFRLRate = HDMI_FRL_DATA_RATE_NONE; + NvBool bHasPreCalcFRLData = NV_FALSE; + + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->preCalc.vic = vic; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_HAS_PRECAL_FRL_DATA; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + bHasPreCalcFRLData = pGetHdmiFrlCapacityComputationParams->preCalc.bHasPreCalcFRLData; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + // We iterate over a range of FRL rates to see if timing is supported in uncompressed manner + // adjust the min and max range here according to what we aim for: if client wants to force a single FRL rate, + // min n max point to just this one rate. If client just wants any config, just try max supported rate. + // For everything else, iterate from lowest to highest FRL rate + if (pClientCtrl->forceFRLRate) + { + minFRLRate = pClientCtrl->frlRate; + maxFRLRate = pClientCtrl->frlRate; + } + else if (pClientCtrl->option == HDMI_QUERY_FRL_HIGHEST_BANDWIDTH) + { + minFRLRate = maxRate; + maxFRLRate = maxRate; + } + else if (bHasPreCalcFRLData) + { + HDMI_FRL_DATA_RATE preCalcFrlRate; + pGetHdmiFrlCapacityComputationParams->preCalc.packing = pVidTransInfo->packing; + pGetHdmiFrlCapacityComputationParams->preCalc.bpc = pVidTransInfo->bpc; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_UNCOMPRESSED_FRL_CONFIG; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + preCalcFrlRate = pGetHdmiFrlCapacityComputationParams->preCalc.frlRate; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + if (preCalcFrlRate <= maxRate) + { + minFRLRate = preCalcFrlRate; + maxFRLRate = preCalcFrlRate; + } + else if (!bCanUseDSC) + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else if (pClientCtrl->option == HDMI_QUERY_FRL_ANY_CONFIG) + { + minFRLRate = maxRate; + maxFRLRate = maxRate; + } + else // HDMI_QUERY_FRL_OPTIMUM_CONFIG or HDMI_QUERY_FRL_LOWEST_BANDWIDTH + { + minFRLRate = HDMI_FRL_DATA_RATE_3LANES_3GBPS; + maxFRLRate = maxRate; + } + + result = determineUncompressedFRLConfig(pThis, &frlParams, minFRLRate, maxFRLRate, &frlComputeResult); + if (result == NVHDMIPKT_SUCCESS) + { + goto frlQuery_Success; + } + // If we could not find a FRL rate and DSC is not allowed, try using min audio see if it gets us a pass result + else if (!bCanUseDSC) + { + frlParams.numAudioChannels = 2; + frlParams.audioFreqKHz = 48; + frlParams.audioType = AUDIO_PKTTYPE_LPCM_SAMPLE; + result = determineUncompressedFRLConfig(pThis, &frlParams, minFRLRate, maxFRLRate, &frlComputeResult); + // If still not found return failure. Nothing more to try + if (result != NVHDMIPKT_SUCCESS) + { + goto frlQuery_fail; + } + } + } + + if (bCanUseDSC) + { + HDMI_FRL_DATA_RATE minFRLRateItr, maxFRLRateItr; + HDMI_FRL_DATA_RATE dscMaxFRLRate = NV_MIN(pSinkCaps->linkMaxFRLRateDSC, pSrcCaps->linkMaxFRLRate); + NvU32 bppMinX16Itr, bppMaxX16Itr; + NvBool bHasPreCalcFRLData = NV_FALSE; + + // DSC_All_bpp = 1: + // Lower the compression ratio better the pixel quality, hence a high bppTarget value will be ideal + // DSC_All_bpp = 1 allows us the flexibility to use a bppTarget setting different from the primary compressed format + // DSC_All_bpp = 0: + // Per spec, this supports only the bppTarget from primary compressed format - {minimum FRL rate, bpp, HCactive, HCblank} + + minFRLRateItr = HDMI_FRL_DATA_RATE_3LANES_3GBPS; + maxFRLRateItr = dscMaxFRLRate; + bppMinX16Itr = bppMinX16; + bppMaxX16Itr = bppMaxX16; + + if (pGetHdmiFrlCapacityComputationParams) + { + NVMISC_MEMSET(pGetHdmiFrlCapacityComputationParams, 0, sizeof(*pGetHdmiFrlCapacityComputationParams)); + pGetHdmiFrlCapacityComputationParams->preCalc.vic = vic; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_HAS_PRECAL_FRL_DATA; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + bHasPreCalcFRLData = pGetHdmiFrlCapacityComputationParams->preCalc.bHasPreCalcFRLData; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + if (bHasPreCalcFRLData) + { + HDMI_FRL_DATA_RATE preCalcFrlRate; + NvU32 preCalcBppx16; + + if (pGetHdmiFrlCapacityComputationParams) + { + pGetHdmiFrlCapacityComputationParams->preCalc.packing = pVidTransInfo->packing; + pGetHdmiFrlCapacityComputationParams->cmd = NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_COMPRESSED_FRL_CONFIG; +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)) == NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + 0, + NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION, + pGetHdmiFrlCapacityComputationParams, + sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS)); + if (bSuccess == NV_TRUE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + preCalcFrlRate = pGetHdmiFrlCapacityComputationParams->preCalc.frlRate; + preCalcBppx16 = pGetHdmiFrlCapacityComputationParams->preCalc.bppX16; + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + } + else + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + + if (preCalcFrlRate != HDMI_FRL_DATA_RATE_UNSPECIFIED) + { + if (preCalcFrlRate > dscMaxFRLRate) + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + maxFRLRateItr = minFRLRateItr = preCalcFrlRate; + bppMaxX16Itr = bppMinX16Itr = preCalcBppx16; + } + } + + // force SliceWidth and count if requested + if (pClientCtrl->forceSliceCount) + { + frlParams.compressionInfo.hSlices = pClientCtrl->sliceCount; + frlParams.compressionInfo.sliceWidth = NV_UNSIGNED_DIV_CEIL(pVidTransInfo->pTiming->HVisible, pClientCtrl->sliceCount); + } + else if (pClientCtrl->forceSliceWidth) + { + frlParams.compressionInfo.sliceWidth = pClientCtrl->sliceWidth; + frlParams.compressionInfo.hSlices = NV_UNSIGNED_DIV_CEIL(pVidTransInfo->pTiming->HVisible, pClientCtrl->sliceWidth); + } + + if (pClientCtrl->forceFRLRate) + { + if (pClientCtrl->frlRate > dscMaxFRLRate) + { + result = NVHDMIPKT_FAIL; + goto frlQuery_fail; + } + + minFRLRateItr = pClientCtrl->frlRate; + maxFRLRateItr = pClientCtrl->frlRate; + } + + if (pClientCtrl->forceBppx16) + { + bppMinX16Itr = pClientCtrl->bitsPerPixelX16; + bppMaxX16Itr = pClientCtrl->bitsPerPixelX16; + } + + // Determine Primary Compressed Format + // First determine the FRL rate at which video transport is possible even at bppMin + // Then iterate over bppTarget - start at max n decrement until we hit bppMin. The max bpp for which + // video transport is possible together with the FRL rate is the primary compressed format + + result = determineCompressedFRLConfig(pThis, &frlParams, + minFRLRateItr, maxFRLRateItr, + bppMinX16Itr, bppMaxX16Itr, + &frlComputeResult); + + + // there are no FRL rates at which video transport is possible even at min bpp + // Could not even determine PCF. Cannot support this mode + if (result != NVHDMIPKT_SUCCESS) + { + goto frlQuery_fail; + } + + // Any other optimizations we want to do over the Primary Compressed Format? + { + NvBool bRedoDSCCalc = NV_FALSE; + + if (pClientCtrl->option == HDMI_QUERY_FRL_HIGHEST_BANDWIDTH) + { + NvBool bHasPreCalcFRLData = NV_TRUE; + + if (bHasPreCalcFRLData) + { + frlComputeResult.frlRate = dscMaxFRLRate; + } + else + { + // Keep bppTgt calculated as Primary Compressed Format and use FRL rate the highest availableLinkBw + // redo DSC calculations to recalculate TBlanktoTTotal ratio and HCblank/active to suit the new rate + // The hw method setting matters and may cause blank screen if not recalculated - see Bug 3458295 #9 + minFRLRateItr = maxFRLRateItr = dscMaxFRLRate; + bppMinX16Itr = bppMaxX16Itr = frlComputeResult.bppTargetx16; + bRedoDSCCalc = NV_TRUE; + } + } + + if (pSinkCaps->pHdmiForumInfo->dsc_All_bpp) + { + if ((pClientCtrl->option == HDMI_QUERY_FRL_HIGHEST_PIXEL_QUALITY) && + (frlComputeResult.frlRate < (NvU32)dscMaxFRLRate)) + { + // Increase FRL rate if possible and iterate over primary compressed format bppTarget to max Bpp + minFRLRateItr = getNextHigherLinkRate(frlComputeResult.frlRate); + bppMinX16Itr = frlComputeResult.bppTargetx16; + bppMaxX16Itr = bppMaxX16; + bRedoDSCCalc = NV_TRUE; + } + + if (pClientCtrl->option == HDMI_QUERY_FRL_LOWEST_BANDWIDTH) + { + // Keep FRL rate as the primary compressed format rate and force Bpp to Min + minFRLRateItr = maxFRLRateItr = frlComputeResult.frlRate; + bppMinX16Itr = bppMaxX16Itr = bppMinX16; + bRedoDSCCalc = NV_TRUE; + } + } + + if (bRedoDSCCalc) + { + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS optQueryParams; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT optQueryResult; + NVMISC_MEMCPY(&optQueryParams, &frlParams, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS)); + + // If optimization is successful copy over new results. If not, no need to fail, keep Primary Compressed Format + if(determineCompressedFRLConfig(pThis, &optQueryParams, minFRLRateItr, maxFRLRateItr, + bppMinX16Itr, bppMaxX16Itr, + &optQueryResult) == NVHDMIPKT_SUCCESS) + { + NVMISC_MEMCPY(&frlParams, &optQueryParams, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS)); + NVMISC_MEMCPY(&frlComputeResult, &optQueryResult, sizeof(NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT)); + } + } + } + } + +frlQuery_Success: + pFRLConfig->maxSupportedAudioCh = frlParams.numAudioChannels; + pFRLConfig->maxSupportedAudioFreqKHz = frlParams.audioFreqKHz; + pFRLConfig->dscInfo.sliceCount = frlParams.compressionInfo.hSlices; + pFRLConfig->dscInfo.sliceWidth = frlParams.compressionInfo.sliceWidth; + + pFRLConfig->frlRate = frlComputeResult.frlRate; + pFRLConfig->dscInfo.bEnableDSC = frlComputeResult.engageCompression; + pFRLConfig->dscInfo.bitsPerPixelX16 = frlComputeResult.bppTargetx16; + pFRLConfig->dscInfo.dscHActiveBytes = frlComputeResult.hcActiveBytes; + pFRLConfig->dscInfo.dscHActiveTriBytes = frlComputeResult.hcActiveTriBytes; + pFRLConfig->dscInfo.dscHBlankTriBytes = frlComputeResult.hcBlankTriBytes; + pFRLConfig->dscInfo.dscTBlankToTTotalRatioX1k = frlComputeResult.tBlankToTTotalX1k; + + if (pFRLConfig->dscInfo.bEnableDSC && !pClientCtrl->skipGeneratePPS) + { + DSC_INFO dscInfo; + MODESET_INFO dscModesetInfo; + WAR_DATA warData; + + NVMISC_MEMSET(&dscInfo , 0, sizeof(DSC_INFO)); + NVMISC_MEMSET(&dscModesetInfo, 0, sizeof(MODESET_INFO)); + NVMISC_MEMSET(&warData , 0, sizeof(WAR_DATA)); + + populateDscCaps(pSrcCaps, pSinkCaps, &dscInfo); + populateDscModesetInfo(pVidTransInfo, &dscModesetInfo); + + dscInfo.forcedDscParams.sliceWidth = pFRLConfig->dscInfo.sliceWidth; + dscInfo.forcedDscParams.dscRevision.versionMajor = 1; + dscInfo.forcedDscParams.dscRevision.versionMinor = 2; + + NvU32 bitsPerPixelX16 = pFRLConfig->dscInfo.bitsPerPixelX16; + NvU32 frlBitRateGbps = 0, numLanes = 0; + translateBitRate(pFRLConfig->frlRate, &frlBitRateGbps, &numLanes); + NvU64 availableLinkBw = (NvU64)(frlBitRateGbps) * (NvU64)(numLanes) * MULTIPLIER_1G; + warData.connectorType = DSC_HDMI; + + if ((DSC_GeneratePPS(&dscInfo, + &dscModesetInfo, + &warData, + availableLinkBw, + pFRLConfig->dscInfo.pps, + &bitsPerPixelX16)) != NVT_STATUS_SUCCESS) + { + NvHdmiPkt_Print(pThis, "ERROR - DSC PPS calculation failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + } + + // DSC lib should honor the bpp setting passed from client, assert here just in case + NvHdmiPkt_Assert(bitsPerPixelX16 == pFRLConfig->dscInfo.bitsPerPixelX16); + } + +frlQuery_fail: + if (pGetHdmiFrlCapacityComputationParams) + { + pThis->callback.free(pThis->cbHandle, pGetHdmiFrlCapacityComputationParams); + } + + return result; +} + +/* + * hdmiSetFRLConfigC671 + */ +static NVHDMIPKT_RESULT +hdmiSetFRLConfigC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig) +{ + NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS params = {0}; + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.data = translateFRLRateToNv0073SetHdmiFrlConfig(pFRLConfig->frlRate); + params.bFakeLt = bFakeLt; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) + +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to set HDMI FRL failed."); + NvHdmiPkt_Assert(0); + + return NVHDMIPKT_FAIL; + } + + return NVHDMIPKT_SUCCESS; +} + +/* + * hdmiClearFRLConfigC671 + */ +static NVHDMIPKT_RESULT +hdmiClearFRLConfigC671(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + + NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS params = {0}; + NVMISC_MEMSET(¶ms, 0, sizeof(params)); + params.subDeviceInstance = subDevice; + params.displayId = displayId; + params.data = NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE; + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (CALL_DISP_RM(NvRmControl)(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + params.subDeviceInstance, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG, + ¶ms, + sizeof(params)); + + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "WARNING - RM call to reset HDMI FRL failed."); + result = NVHDMIPKT_FAIL; + } + return result; +} + +static NVHDMIPKT_RESULT +hdmiPacketWriteC671(NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket) +{ + NVHDMIPKT_RESULT result = NVHDMIPKT_SUCCESS; + NvU32 pktTypeC671 = pThis->translatePacketType(pThis, packetType); + + if (head >= NVC671_SF_HDMI_INFO_CTRL__SIZE_1 || + packetLen == 0 || + pPacket == 0 || + pktTypeC671 == NVHDMIPKT_C671_INVALID_PKT_TYPE) + { + result = NVHDMIPKT_INVALID_ARG; + goto hdmiPacketWriteC671_exit; + } + + if (pktTypeC671 == NVC671_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME) + { + // In GA10X, we use Generic infoframe for ACR WAR. This RM ctrl is used to control if the WAR is enabled/not. + NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS dispCapsParams; + + NVMISC_MEMSET(&dispCapsParams, 0, sizeof(dispCapsParams)); + +#if NVHDMIPKT_RM_CALLS_INTERNAL + if (NvRmControl(pThis->clientHandles.hClient, + pThis->clientHandles.hDisplay, + NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &dispCapsParams, + sizeof(dispCapsParams)) != NVOS_STATUS_SUCCESS) +#else // !NVHDMIPKT_RM_CALLS_INTERNAL + NvBool bSuccess = pThis->callback.rmDispControl2(pThis->cbHandle, + subDevice, + NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &dispCapsParams, sizeof(dispCapsParams)); + if (bSuccess == NV_FALSE) +#endif // NVHDMIPKT_RM_CALLS_INTERNAL + { + NvHdmiPkt_Print(pThis, "ERROR - RM call to get caps failed."); + NvHdmiPkt_Assert(0); + result = NVHDMIPKT_FAIL; + goto hdmiPacketWriteC671_exit; + } + + NvBool bSwAcr = (NV0073_CTRL_SYSTEM_GET_CAP(dispCapsParams.capsTbl, NV0073_CTRL_SYSTEM_CAPS_HDMI21_SW_ACR_BUG_3275257)) ? NV_TRUE: NV_FALSE; + + if (bSwAcr) + { + // acquire mutex + pThis->callback.acquireMutex(pThis->cbHandle); + + result = hdmiPacketWrite0073(pThis, subDevice, displayId, head, packetType, transmitControl, packetLen, pPacket); + + if (result == NVHDMIPKT_SUCCESS) + { + result = hdmiPacketCtrl0073(pThis, subDevice, displayId, head, packetType, transmitControl); + } + + // release mutex + pThis->callback.releaseMutex(pThis->cbHandle); + } + else + { + result = hdmiPacketWrite9171(pThis, subDevice, displayId, head, packetType, transmitControl, packetLen, pPacket); + } + } + else + { + result = hdmiPacketWrite9171(pThis, subDevice, displayId, head, packetType, transmitControl, packetLen, pPacket); + } + +hdmiPacketWriteC671_exit: + return result; +} + +// non-HW - class utility/maintenance functions +/* + * hdmiConstructorC671 + */ +NvBool +hdmiConstructorC671(NVHDMIPKT_CLASS* pThis) +{ + NvBool result = NV_TRUE; + + return result; +} + +/* + * hdmiDestructorC671 + */ +void +hdmiDestructorC671(NVHDMIPKT_CLASS* pThis) + +{ + return; +} + +/* + * initializeHdmiPktInterfaceC671 + */ +void +initializeHdmiPktInterfaceC671(NVHDMIPKT_CLASS* pClass) +{ + pClass->dispSfUserClassId = NVC671_DISP_SF_USER; + pClass->hdmiAssessLinkCapabilities = hdmiAssessLinkCapabilitiesC671; + pClass->hdmiQueryFRLConfig = hdmiQueryFRLConfigC671; + pClass->hdmiSetFRLConfig = hdmiSetFRLConfigC671; + pClass->hdmiClearFRLConfig = hdmiClearFRLConfigC671; + pClass->hdmiPacketWrite = hdmiPacketWriteC671; +} diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_class.h b/src/common/modeset/hdmipacket/nvhdmipkt_class.h new file mode 100644 index 000000000..2889753ef --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_class.h @@ -0,0 +1,179 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_class.h + * + * Purpose: This file contains hdmipkt class definition. Which defines class interfaces. + */ + +#ifndef _NVHDMIPKT_CLASS_H_ +#define _NVHDMIPKT_CLASS_H_ + +#include "nvlimits.h" +#include "nvhdmi_frlInterface.h" + +/************************************************************************************************* + * NOTE * This header file to be used only inside this (Hdmi Packet) library. * + ************************************************************************************************/ +// NVHDMIPKT_CLASS_ID: HDMI packet class version +// NOTE: Anytime a new class comes with upgrades, it needs to be added here. +// Consult resman\kernel\inc\classhal.h, before adding a class. +typedef enum +{ + NVHDMIPKT_0073_CLASS = 0, // pre GK104 + NVHDMIPKT_9171_CLASS = 1, // GK104 + NVHDMIPKT_9271_CLASS = 2, // GK110 + NVHDMIPKT_9471_CLASS = 3, // GM10X + NVHDMIPKT_9571_CLASS = 4, // GM20X + NVHDMIPKT_C371_CLASS = 5, // GV100 + NVHDMIPKT_C571_CLASS = 6, // TU102 + NVHDMIPKT_C671_CLASS = 7, // GA102, T234D + NVHDMIPKT_INVALID_CLASS // Not to be used by client, and always the last entry here. +} NVHDMIPKT_CLASS_ID; + +// Hdmi packet class +struct tagNVHDMIPKT_CLASS +{ + // data + NvU32 dispSfUserClassId; // Id from nvidia/class definition + NvU32 dispSfUserSize; + NvU32 numSubDevices; + NvU32 sfUserHandle; + NVHDMIPKT_RM_CLIENT_HANDLES clientHandles; + NVHDMIPKT_MEM_MAP memMap[NV_MAX_SUBDEVICES]; + NvHdmiPkt_CBHandle cbHandle; + NVHDMIPKT_CALLBACK callback; + NVHDMIPKT_CLASS_ID thisId; + NvBool isRMCallInternal; + + // functions + NVHDMIPKT_RESULT + (*hdmiPacketCtrl) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl); + + NVHDMIPKT_RESULT + (*hdmiPacketWrite) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId, + NvU32 head, + NVHDMIPKT_TYPE packetType, + NVHDMIPKT_TC transmitControl, + NvU32 packetLen, + NvU8 const *const pPacket); + + // HW functions - that read/write registers + NvBool + (*hdmiReadPacketStatus) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktTypeNative); + + NVHDMIPKT_RESULT + (*hdmiWritePacketCtrl) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 pktTypeNative, + NvU32 transmitControl, + NvBool bDisable); + + void + (*hdmiWriteAviPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteAudioPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteGenericPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteGeneralCtrlPacket)(NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + void + (*hdmiWriteVendorPacket) (NVHDMIPKT_CLASS* pThis, + NvU32* pBaseReg, + NvU32 head, + NvU32 packetLen, + NvU8 const *const pPacket); + + // utility functions to translate the generic packet type and transmit control + // to corresponding rm ctrl or hw define types. + NvU32 + (*translatePacketType) (NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TYPE packetType); + + NvU32 + (*translateTransmitControl) (NVHDMIPKT_CLASS* pThis, + NVHDMIPKT_TC transmitControl); + + // + // HDMI FRL functions to enable/disable HDMI FRL and calculate the bandwidth + // capacity required for target timing. + // + NVHDMIPKT_RESULT + (*hdmiAssessLinkCapabilities) (NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps); + NVHDMIPKT_RESULT + (*hdmiQueryFRLConfig) (NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig); + + NVHDMIPKT_RESULT + (*hdmiSetFRLConfig) (NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig); + + NVHDMIPKT_RESULT + (*hdmiClearFRLConfig) (NVHDMIPKT_CLASS* pThis, + NvU32 subDevice, + NvU32 displayId); +}; + +#endif //_NVHDMIPKT_CLASS_H_ diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_common.h b/src/common/modeset/hdmipacket/nvhdmipkt_common.h new file mode 100644 index 000000000..771e9e7fe --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_common.h @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_common.h + * + * Purpose: This file contains defines and structures used across hdmipkt library. All the + * common stuff goes here. + */ + +#ifndef _NVHDMIPKT_COMMON_H_ +#define _NVHDMIPKT_COMMON_H_ + +/************************************************************************************************* + * NOTE * This header file to be used only inside this (Hdmi Packet) library. * + ************************************************************************************************/ + +#include "nvhdmipkt.h" +#include "nvhdmi_frlInterface.h" +#if NVHDMIPKT_RM_CALLS_INTERNAL +#include "nvRmApi.h" +#define CALL_DISP_RM(x) x + +#endif + +/**************************** HDMI Library defines, enums and structs ***************************/ +// typedefs +typedef struct tagNVHDMIPKT_CLASS NVHDMIPKT_CLASS; +typedef struct tagNVHDMIPKT_MEM_MAP NVHDMIPKT_MEM_MAP; + +// Register read/write defines +#define REG_RD32(reg, offset) (*(((volatile NvU32*)(reg)) + ((offset)/4))) +#define REG_WR32(reg, offset, data) ((*(((volatile NvU32*)(reg)) + ((offset)/4))) = (data)) + +#define NVHDMIPKT_INVALID_SUBDEV (0xFFFFFFFF) +#define NVHDMIPKT_DONT_USE_TIMER +#define NVHDMIPKT_STATUS_READ_TIMEOUT_IN_us (1*1000*1000) /* us - micro second */ + +// Disp SF User memory map and handle structure +struct tagNVHDMIPKT_MEM_MAP +{ + NvU32 subDevice; + NvU32 memHandle; + void* pMemBase; +}; + +// HDMIPKT print define +#if defined (DEBUG) + #define NvHdmiPkt_Print(_p, ...) \ + do { \ + if ((_p)->callback.print) \ + { \ + (_p)->callback.print((_p)->cbHandle, "HdmiPacketLibrary: " __VA_ARGS__); \ + } \ + } while(0) +#else + #define NvHdmiPkt_Print(_p, ...) /* nothing */ +#endif + + +// HDMIPKT assert define +#if defined (DEBUG) + #define NvHdmiPkt_AssertP(p, expr) ((p)->callback.assert ? \ + (p)->callback.assert((p)->cbHandle, !!(expr)) : 0) + #define NvHdmiPkt_Assert(expr) NvHdmiPkt_AssertP(pThis, expr) +#else + #define NvHdmiPkt_AssertP(p, expr) + #define NvHdmiPkt_Assert(expr) +#endif + + +// Prototypes for common functions shared across implementations. +extern void hdmiWriteDummyPacket(NVHDMIPKT_CLASS*, NvU32*, NvU32, NvU32, NvU8 const *const); +extern NVHDMIPKT_RESULT hdmiAssessLinkCapabilitiesDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NVT_EDID_INFO const * const pSinkEdid, + HDMI_SRC_CAPS *pSrcCaps, + HDMI_SINK_CAPS *pSinkCaps); +extern NVHDMIPKT_RESULT hdmiQueryFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + HDMI_VIDEO_TRANSPORT_INFO const * const pVidTransInfo, + HDMI_QUERY_FRL_CLIENT_CONTROL const * const pClientCtrl, + HDMI_SRC_CAPS const * const pSrcCaps, + HDMI_SINK_CAPS const * const pSinkCaps, + HDMI_FRL_CONFIG *pFRLConfig); +extern NVHDMIPKT_RESULT hdmiSetFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId, + NvBool bFakeLt, + HDMI_FRL_CONFIG *pFRLConfig); +extern NVHDMIPKT_RESULT hdmiClearFRLConfigDummy(NVHDMIPKT_CLASS *pThis, + NvU32 subDevice, + NvU32 displayId); + + +#endif //_NVHDMIPKT_COMMON_H_ diff --git a/src/common/modeset/hdmipacket/nvhdmipkt_internal.h b/src/common/modeset/hdmipacket/nvhdmipkt_internal.h new file mode 100644 index 000000000..42487f0af --- /dev/null +++ b/src/common/modeset/hdmipacket/nvhdmipkt_internal.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * File: nvhdmipkt_internal.h + * + * Purpose: This files contains defines to be used by nvhdmipkt.c + */ + +#ifndef _NVHDMIPKT_INTERNAL_H_ +#define _NVHDMIPKT_INTERNAL_H_ + +/************************************************************************************************* + * NOTE * This header file to be used only inside this (Hdmi Packet) library. * + ************************************************************************************************/ +#define toHdmiPktHandle(p) ((NvHdmiPkt_Handle)(p)) +#define fromHdmiPktHandle(h) ((NVHDMIPKT_CLASS*)(h)) + +extern void initializeHdmiPktInterface0073(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9171(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9271(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9471(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterface9571(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterfaceC371(NVHDMIPKT_CLASS*); +extern void initializeHdmiPktInterfaceC671(NVHDMIPKT_CLASS*); + +extern NvBool hdmiConstructor0073(NVHDMIPKT_CLASS*); +extern void hdmiDestructor0073 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9171(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9171 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9271(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9271 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9471(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9471 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructor9571(NVHDMIPKT_CLASS*); +extern void hdmiDestructor9571 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructorC371(NVHDMIPKT_CLASS*); +extern void hdmiDestructorC371 (NVHDMIPKT_CLASS*); +extern NvBool hdmiConstructorC671(NVHDMIPKT_CLASS*); +extern void hdmiDestructorC671 (NVHDMIPKT_CLASS*); + +#endif //_NVHDMIPKT_INTERNAL_H_ diff --git a/src/common/modeset/timing/displayid.h b/src/common/modeset/timing/displayid.h new file mode 100644 index 000000000..987c6feef --- /dev/null +++ b/src/common/modeset/timing/displayid.h @@ -0,0 +1,776 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: displayid.h +// +// Purpose: the template for DisplayID parsing (future replacement for EDID) +// +//***************************************************************************** + + +#ifndef __DISPLAYID_H_ +#define __DISPLAYID_H_ + +#include "nvtiming.h" + +// The structures below must be tightly packed, in order to correctly +// overlay on the EDID DisplayID extension block bytes. Both MSVC and +// gcc support the pack() pragma for this. + +#if defined(__GNUC__) || defined(_MSC_VER) +# define __SUPPORTS_PACK_PRAGMA 1 +#else +# error "unrecognized compiler: displayid structures must be tightly packed" +#endif + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack(1) +#endif + +typedef struct _tagDISPLAYID_SECTION +{ + NvU8 version; // displayid version + NvU8 section_bytes; // length of this displayID section excluding mandatory bytes [0, 251] + + NvU8 product_type; // NVT_DISPLAYID_PROD_X + NvU8 extension_count; + + NvU8 data[NVT_DISPLAYID_SECTION_MAX_SIZE]; // data blocks. Note, the length of this structure may + // exceed valid memory, as DisplayID has variable length + +} DISPLAYID_SECTION; + +#define NVT_DISPLAYID_VER_1_1 0x101 + +#define NVT_DISPLAYID_PROD_EXTENSION 0 // Extension (product type not declared) +#define NVT_DISPLAYID_PROD_TEST 1 // Test Structure/Test Equipment +#define NVT_DISPLAYID_PROD_DISPLAY_PANEL 2 // Display Panel, LCD, or PDP module, etc. +#define NVT_DISPLAYID_PROD_STANDALONE_MONITOR 3 // Standalone display device, desktop monitor, TV monitor +#define NVT_DISPLAYID_PROD_RECEIVER 4 // Television receiver or display product capable of RF signals +#define NVT_DISPLAYID_PROD_REPEATER 5 // Repeater/translator that is not intended as display device +#define NVT_DISPLAYID_PROD_DIRECT_DRIVE 6 // Direct Drive monitor +#define NVT_DISPLAYID_PROD_MAX_NUMBER 6 // max product number + + +typedef struct _tagDISPLAYID_DATA_BLOCK_HEADER +{ + NvU8 type; // identification + NvU8 revision; + NvU8 data_bytes; // number of payload bytes [0, 248] + +} DISPLAYID_DATA_BLOCK_HEADER; + +#define NVT_DISPLAYID_BLOCK_TYPE_PRODUCT_IDENTITY 0 // Product Identification block +#define NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_PARAM 1 // Display Parameters block +#define NVT_DISPLAYID_BLOCK_TYPE_COLOR_CHAR 2 // Color Characteristics block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_1 3 // Type 1 Detailed Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_2 4 // Type 2 Detailed Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_3 5 // Type 3 Short Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_4 6 // Type 4 DMT ID Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_VESA 7 // VESA Standard Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_CEA 8 // CEA Standard Timing block +#define NVT_DISPLAYID_BLOCK_TYPE_RANGE_LIMITS 9 // Video Timing Range Limits block +#define NVT_DISPLAYID_BLOCK_TYPE_SERIAL_NUMBER 10 // Product Serial Number block +#define NVT_DISPLAYID_BLOCK_TYPE_ASCII_STRING 11 // General Purpose ASCII String block +#define NVT_DISPLAYID_BLOCK_TYPE_DEVICE_DATA 12 // Display Device Data block +#define NVT_DISPLAYID_BLOCK_TYPE_INTERFACE_POWER 13 // Interface Power Sequencing block +#define NVT_DISPLAYID_BLOCK_TYPE_TRANSFER_CHAR 14 // Transfer Characteristics block +#define NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE 15 // Display Interface Data Block +#define NVT_DISPLAYID_BLOCK_TYPE_STEREO 16 // Stereo Data Block +#define NVT_DISPLAYID_BLOCK_TYPE_TIMING_5 17 // Type V Timing Short Descriptor +#define NVT_DISPLAYID_BLOCK_TYPE_TILEDDISPLAY 18 // Tiled Display Data Block +#define NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE_FEATURES 0X26 // DisplayID2.0 Display Interface Features Data Block // +#define NVT_DISPLAYID_BLOCK_TYPE_CTA_DATA 0x81 // DIsplay ID data block +#define NVT_DISPLAYID_BLOCK_TYPE_VENDOR_SPEC 0x7F // Vendor Specific Data Block + +#define NVT_DISPLAYID_PRODUCT_IDENTITY_MIN_LEN 12 +#define NVT_DISPLAYID_PRODUCT_IDENTITY_MAX_STRING_LEN 0xE9 + +typedef struct _tagDISPLAYID_PROD_IDENTIFICATION_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + + NvU8 vendor[3]; + NvU16 product_code; + NvU32 serial_number; + NvU8 model_tag; + NvU8 model_year; + NvU8 productid_string_size; + + NvU8 productid_string[NVT_DISPLAYID_PRODUCT_IDENTITY_MAX_STRING_LEN]; +} DISPLAYID_PROD_IDENTIFICATION_BLOCK; + +typedef struct _tagDISPLAYID_DISPLAY_PARAM_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU16 horizontal_image_size; + NvU16 vertical_image_size; + NvU16 horizontal_pixel_count; + NvU16 vertical_pixel_count; + + NvU8 feature; + + NvU8 transfer_char_gamma; + NvU8 aspect_ratio; + NvU8 color_bit_depth; +} DISPLAYID_DISPLAY_PARAM_BLOCK; + +#define NVT_DISPLAYID_DISPLAY_PARAM_BLOCK_LEN 0x0C + +#define NVT_DISPLAYID_DISPLAY_PARAM_SUPPORT_AUDIO 7:7 +#define NVT_DISPLAYID_DISPLAY_PARAM_SEPARATE_AUDIO 6:6 +#define NVT_DISPLAYID_DISPLAY_PARAM_AUDIO_INPUT_OVERRIDE 5:5 +#define NVT_DISPLAYID_DISPLAY_PARAM_POWER_MANAGEMENT 4:4 +#define NVT_DISPLAYID_DISPLAY_PARAM_FIXED_TIMING 3:3 +#define NVT_DISPLAYID_DISPLAY_PARAM_FIXED_PIXEL_FORMAT 2:2 +#define NVT_DISPLAYID_DISPLAY_PARAM_DEINTERLACING 0:0 + +#define NVT_DISPLAYID_DISPLAY_PARAM_DEPTH_OVERALL 7:4 +#define NVT_DISPLAYID_DISPLAY_PARAM_DEPTH_NATIVE 3:0 + +typedef struct _tagDISPLAYID_COLOR_POINT +{ + NvU8 color_x_bits_low; + NvU8 color_bits_mid; + NvU8 color_y_bits_high; +} DISPLAYID_COLOR_POINT; + +#define NVT_DISPLAYID_COLOR_POINT_Y 7:4 +#define NVT_DISPLAYID_COLOR_POINT_X 3:0 + +#define NVT_DISPLAYID_COLOR_MAX_POINTS 22 + +typedef struct _tagDISPLAYID_COLOR_CHAR_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + + // Color Characteristics Information + NvU8 point_info; + + DISPLAYID_COLOR_POINT points[NVT_DISPLAYID_COLOR_MAX_POINTS]; +} DISPLAYID_COLOR_CHAR_BLOCK; + +#define NVT_DISPLAYID_COLOR_PRIMARIES 6:4 +#define NVT_DISPLAYID_COLOR_WHITE_POINTS 3:0 +#define NVT_DISPLAYID_COLOR_TEMPORAL 7:7 + +// the following fields apply to Timing Descriptors 1-3 (Not all of them are +// used per descriptor, but the format is the same +#define NVT_DISPLAYID_TIMING_PREFERRED 7:7 +#define NVT_DISPLAYID_TIMING_3D_STEREO 6:5 +#define NVT_DISPLAYID_TIMING_3D_STEREO_MONO 0 +#define NVT_DISPLAYID_TIMING_3D_STEREO_STEREO 1 +#define NVT_DISPLAYID_TIMING_3D_STEREO_EITHER 2 +#define NVT_DISPLAYID_TIMING_INTERLACE 4:4 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO 2:0 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_1_1 0 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_5_4 1 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_4_3 2 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_15_9 3 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_9 4 +#define NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_10 5 + +typedef struct _tag_DISPLAYID_TIMING_1_DESCRIPTOR +{ + NvU8 pixel_clock_low_minus_0_01MHz; + NvU8 pixel_clock_mid; + NvU8 pixel_clock_high; + + struct + { + NvU8 aspect_ratio : 3; + NvU8 rsvd : 1; + NvU8 interface_frame_scanning_type : 1; + NvU8 stereo_support : 2; + NvU8 is_preferred_detailed_timing : 1; + }options; + + struct + { + NvU8 active_image_pixels_low_minus_1; + NvU8 active_image_pixels_high; + NvU8 blank_pixels_low_minus_1; + NvU8 blank_pixels_high; + NvU8 front_porch_low_minus_1; + NvU8 front_porch_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_low_minus_1; + NvU8 sync_width_high; + }horizontal; + + struct + { + NvU8 active_image_lines_low_minus_1; + NvU8 active_image_lines_high; + NvU8 blank_lines_low_minus_1; + NvU8 blank_lines_high; + NvU8 front_porch_lines_low_minus_1; + NvU8 front_porch_lines_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_lines_low_minus_1; + NvU8 sync_width_lines_high; + }vertical; + +} DISPLAYID_TIMING_1_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_1_MAX_DESCRIPTORS 12 + +typedef struct _tagDISPLAYID_TIMING_1_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_1_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_1_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_1_BLOCK; + +#define NVT_DISPLAYID_TIMING_1_POLARITY_SHIFT 15 +#define NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS 8 + +typedef struct _tag_DISPLAYID_TIMING_2_DESCRIPTOR +{ + NvU8 pixel_clock_low_minus_0_01MHz; + NvU8 pixel_clock_mid; + NvU8 pixel_clock_high; + + struct + { + NvU8 rsvd : 2; + NvU8 vsync_polarity : 1; + NvU8 hsync_polarity : 1; + NvU8 interface_frame_scanning_type : 1; + NvU8 stereo_support : 2; + NvU8 is_preferred_detailed_timing : 1; + }options; + + struct + { + NvU8 active_image_in_char_minus_1; + NvU8 active_image_in_char_high : 1; + NvU8 blank_in_char_minus_1 : 7; + NvU8 sync_width_in_char_minus_1 : 4; + NvU8 front_porch_in_char_minus_1 : 4; + }horizontal; + + struct + { + NvU8 active_image_lines_low_minus_1; + NvU8 active_image_lines_high : 4; + NvU8 reserved : 4; + NvU8 blank_lines_minus_1; + NvU8 sync_width_lines_minus_1 : 4; + NvU8 front_porch_lines_minus_1 : 4; + }vertical; + +} DISPLAYID_TIMING_2_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_2_HORIZ_BLANK_PIXEL 7:1 +#define NVT_DISPLAYID_TIMING_2_HORIZ_ACTIVE_PIXEL_HIGH 0:0 +#define NVT_DISPLAYID_TIMING_2_HORIZ_OFFSET 7:4 +#define NVT_DISPLAYID_TIMING_2_HORIZ_SYNC 3:0 +#define NVT_DISPLAYID_TIMING_2_VERT_ACTIVE_PIXEL_HIGH 3:0 +#define NVT_DISPLAYID_TIMING_2_VERT_OFFSET 7:4 +#define NVT_DISPLAYID_TIMING_2_VERT_SYNC 3:0 + +#define NVT_DISPLAYID_TIMING_2_MAX_DESCRIPTORS 22 + +typedef struct _tagDISPLAYID_TIMING_2_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_2_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_2_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_2_BLOCK; + +typedef struct _TAG_DISPLAYID_TIMING_3_DESCRIPTOR +{ + NvU8 optns; + NvU8 horizontal_active_pixels; + NvU8 transfer; +} DISPLAYID_TIMING_3_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_3_FORMULA 6:4 +#define NVT_DISPLAYID_TIMING_3_FORMULA_STANDARD 0 +#define NVT_DISPLAYID_TIMING_3_FORMULA_REDUCED_BLANKING 1 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO 3:0 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_1_1 0 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_5_4 1 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_4_3 2 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_15_9 3 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_9 4 +#define NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_10 5 +#define NVT_DISPLAYID_TIMING_3_INTERLACE 7:7 +#define NVT_DISPLAYID_TIMING_3_REFRESH_RATE 6:0 + +#define NVT_DISPLAYID_TIMING_3_MAX_DESCRIPTORS 82 + +typedef struct _tagDISPLAYID_TIMING_3_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_3_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_3_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_3_BLOCK; + +#define NVT_DISPLAYID_TIMING_4_MAX_CODES NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN + +typedef struct _tagDISPLAYID_TIMING_4_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 timing_codes[NVT_DISPLAYID_TIMING_4_MAX_CODES]; +} DISPLAYID_TIMING_4_BLOCK; + +#define NVT_DISPLAYID_TIMING_5_STEREO_SUPPORT_MASK 0x60 +#define NVT_DISPLAYID_TIMING_5_FRACTIONAL_RR_SUPPORT_MASK 0x10 +#define NVT_DISPLAYID_TIMING_5_FORMULA_SUPPORT_MASK 3 + +typedef struct _TAG_DISPLAYID_TIMING_5_DESCRIPTOR +{ + NvU8 optns; + NvU8 rsvd; + NvU8 horizontal_active_pixels_low; + NvU8 horizontal_active_pixels_high; + NvU8 vertical_active_pixels_low; + NvU8 vertical_active_pixels_high; + NvU8 refresh_rate; +} DISPLAYID_TIMING_5_DESCRIPTOR; + +#define NVT_DISPLAYID_TIMING_5_MAX_DESCRIPTORS 53 + +typedef struct _tagDISPLAYID_TIMING_5_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + DISPLAYID_TIMING_5_DESCRIPTOR descriptors[NVT_DISPLAYID_TIMING_5_MAX_DESCRIPTORS]; +} DISPLAYID_TIMING_5_BLOCK; + +#define DISPLAYID_TIMING_VESA_BLOCK_SIZE 0x0A +#define DISPLAYID_TIMING_CEA_BLOCK_SIZE 0x08 + +typedef struct _tagDISPLAYID_TIMING_MODE_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 timing_modes[DISPLAYID_TIMING_VESA_BLOCK_SIZE]; +} DISPLAYID_TIMING_MODE_BLOCK; + + +typedef struct _tagDISPLAYID_RANGE_LIMITS_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 pixel_clock_min[3]; + NvU8 pixel_clock_max[3]; + NvU8 horizontal_frequency_min; + NvU8 horizontal_frequency_max; + NvU16 horizontal_blanking_min; + NvU8 vertical_refresh_rate_min; + NvU8 vertical_refresh_rate_max; + NvU16 vertical_blanking_min; + + NvU8 optns; +} DISPLAYID_RANGE_LIMITS_BLOCK; + +#define DISPLAYID_RANGE_LIMITS_BLOCK_LEN 0xF + +#define NVT_DISPLAYID_RANGE_LIMITS_INTERLACE 7:7 +#define NVT_DISPLAYID_RANGE_LIMITS_CVT_STANDARD 6:6 +#define NVT_DISPLAYID_RANGE_LIMITS_CVT_REDUCED 5:5 +#define NVT_DISPLAYID_RANGE_LIMITS_DFD 4:4 + +typedef struct _tagDISPLAYID_ASCII_STRING_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 data[NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN]; +} DISPLAYID_ASCII_STRING_BLOCK; + +typedef struct _tagDISPLAYID_DEVICE_DATA_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + + NvU8 technology; + NvU8 operating_mode; + NvU16 horizontal_pixel_count; + NvU16 vertical_pixel_count; + NvU8 aspect_ratio; + NvU8 orientation; + + NvU8 subpixel_info; + NvU8 horizontal_pitch; + NvU8 vertical_pitch; + + NvU8 color_bit_depth; + NvU8 response_time; + +} DISPLAYID_DEVICE_DATA_BLOCK; + +#define DISPLAYID_DEVICE_DATA_BLOCK_LEN 0xD + +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_CRT_MONOCHROME 0x00 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_CRT_STANDARD 0x01 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_CRT_OTHER 0x02 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_TN 0x10 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_CHOL_LC 0x11 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_FERRO_LC 0x12 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_PASSIVE_MATRIX_OTHER 0x13 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_TN 0x14 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_IPS 0x15 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_VA 0x16 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_OCB 0x17 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_ACTIVE_MATRIX_FERRO 0x18 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_LCD_OTHER 0x1F +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_PLASMA_DC 0x20 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_PLASMA_AC 0x21 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROLUM 0x30 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_INORGANIC_LED 0x40 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ORGANIC_LED 0x50 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_FED 0x60 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROPHORETIC 0x70 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROCHROMIC 0x80 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROMECHANICAL 0x90 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_ELECTROWETTING 0xA0 +#define NVT_DISPLAYID_DEVICE_TECHNOLOGY_OTHER 0xF0 + +// Display Device operating mode info +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE 7:4 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_REFLECTIVE_NO_ILLUM 0x0 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_REFLECTIVE_ILLUM 0x1 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_REFLECTIVE_ILLUM_DEF 0x2 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSMISSIVE_NO_ILLUM 0x3 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSMISSIVE_ILLUM 0x4 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSMISSIVE_ILLUM_DEF 0x5 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_EMISSIVE 0x6 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSFLECTIVE_REF 0x7 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSFLECTIVE_TRANS 0x8 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSPARENT_AMB 0x9 +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_TRANSPARENT_EMIS 0xA +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_PROJECTION_REF 0xB +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_PROJECTION_TRANS 0xC +#define NVT_DISPLAYID_DEVICE_OPERATING_MODE_PROJECTION_EMIS 0xD +#define NVT_DISPLAYID_DEVICE_BACKLIGHT 3:3 +#define NVT_DISPLAYID_DEVICE_INTENSITY 2:2 + +// Display Device aspect ratio/orientation info +#define NVT_DISPLAYID_DEVICE_ORIENTATION 7:6 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_LANDSCAPE 0 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_PORTRAIT 1 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_NOT_FIXED 2 +#define NVT_DISPLAYID_DEVICE_ORIENTATION_UNDEFINED 3 +#define NVT_DISPLAYID_DEVICE_ROTATION 5:4 +#define NVT_DISPLAYID_DEVICE_ROTATION_NONE 0 +#define NVT_DISPLAYID_DEVICE_ROTATION_CLOCKWISE 1 +#define NVT_DISPLAYID_DEVICE_ROTATION_COUNTERCLOCKWISE 2 +#define NVT_DISPLAYID_DEVICE_ROTATION_BOTH 3 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL 3:2 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_UPPER_LEFT 0 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_UPPER_RIGHT 1 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_LOWER_LEFT 2 +#define NVT_DISPLAYID_DEVICE_ZERO_PIXEL_LOWER RIGHT 3 +#define NVT_DISPLAYID_DEVICE_SCAN 1:0 +#define NVT_DISPLAYID_DEVICE_SCAN_UNDEFINED 0 +#define NVT_DISPLAYID_DEVICE_SCAN_FAST_LONG 1 +#define NVT_DISPLAYID_DEVICE_SCAN_FAST_SHORT 2 + +// Display Device Color Depth information +#define NVT_DISPLAYID_DEVICE_COLOR_DEPTH 3:0 + +// Display Device Response Time information +#define NVT_DISPLAYID_DEVICE_WHITE_BLACK 7:7 +#define NVT_DISPLAYID_DEVICE_RESPONSE_TIME 6:0 + +#define NVT_DISPLAYID_SUBPIXEL_UNDEFINED 0 +#define NVT_DISPLAYID_SUBPIXEL_RGB_VERTICAL 1 +#define NVT_DISPLAYID_SUBPIXEL_RGB_HORIZONTAL 2 +#define NVT_DISPLAYID_SUBPIXEL_VERTICAL_STR 3 +#define NVT_DISPLAYID_SUBPIXEL_HORIZONTAL_STR 4 +#define NVT_DISPLAYID_SUBPIXEL_QUAD_RED_TOP_LEFT 5 +#define NVT_DISPLAYID_SUBPIXEL_QUAD_RED_BOTTOM_LEFT 6 +#define NVT_DISPLAYID_SUBPIXEL_DELTA_RGB 7 +#define NVT_DISPLAYID_SUBPIXEL_MOSAIC 8 +#define NVT_DISPLAYID_SUBPIXEL_QUAD_INC_WHITE 9 +#define NVT_DISPLAYID_SUBPIXEL_FIVE 10 +#define NVT_DISPLAYID_SUBPIXEL_SIX 11 +#define NVT_DISPLAYID_SUBPIXEL_PENTILE 12 + +typedef struct _tagDISPLAYID_INTERFACE_POWER_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 power_sequence_T1; + NvU8 power_sequence_T2; + NvU8 power_sequence_T3; + NvU8 power_sequence_T4_min; + NvU8 power_sequence_T5_min; + NvU8 power_sequence_T6_min; +} DISPLAYID_INTERFACE_POWER_BLOCK; + +#define DISPLAYID_INTERFACE_POWER_BLOCK_LEN 0x6 + +#define NVT_DISPLAYID_POWER_T1_MIN 7:4 +#define NVT_DISPLAYID_POWER_T1_MAX 3:0 +#define NVT_DISPLAYID_POWER_T2 5:0 +#define NVT_DISPLAYID_POWER_T3 5:0 +#define NVT_DISPLAYID_POWER_T4_MIN 6:0 +#define NVT_DISPLAYID_POWER_T5_MIN 5:0 +#define NVT_DISPLAYID_POWER_T6_MIN 5:0 + +typedef struct _tagDISPLAYID_TRANSFER_CHAR_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 info; + NvU8 samples; + NvU8 curve_data[NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN - 2]; +} DISPLAYID_TRANSFER_CHAR_BLOCK; + +typedef struct _tagDISPLAYID_INTERFACE_DATA_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 info; + + NvU8 version; + NvU8 color_depth_rgb; + NvU8 color_depth_ycbcr444; + NvU8 color_depth_ycbcr422; + NvU8 content_protection; + NvU8 content_protection_version; + + NvU8 spread; + + NvU8 interface_attribute_1; + NvU8 interface_attribute_2; +} DISPLAYID_INTERFACE_DATA_BLOCK; + +#define DISPLAYID_INTERFACE_DATA_BLOCK_LEN 0xA + +#define NVT_DISPLAYID_INTERFACE_TYPE 7:4 + +// Interface Codes (note exception for Analog Interface) +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG 0 +#define NVT_DISPLAYID_INTERFACE_TYPE_LVDS 1 +#define NVT_DISPLAYID_INTERFACE_TYPE_TMDS 2 +#define NVT_DISPLAYID_INTERFACE_TYPE_RSDS 3 +#define NVT_DISPLAYID_INTERFACE_TYPE_DVI_D 4 +#define NVT_DISPLAYID_INTERFACE_TYPE_DVI_I_ANALOG 5 +#define NVT_DISPLAYID_INTERFACE_TYPE_DVI_I_DIGITAL 6 +#define NVT_DISPLAYID_INTERFACE_TYPE_HDMI_A 7 +#define NVT_DISPLAYID_INTERFACE_TYPE_HDMI_B 8 +#define NVT_DISPLAYID_INTERFACE_TYPE_MDDI 9 +#define NVT_DISPLAYID_INTERFACE_TYPE_DISPLAYPORT 10 +#define NVT_DISPLAYID_INTERFACE_TYPE_PROPRIETARY 11 + +// Analog Interface Subtype codes +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG_VGA 0 +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG_VESA_NAVI_V 1 +#define NVT_DISPLAYID_INTERFACE_TYPE_ANALOG_VESA_NAVI_D 2 + +#define NVT_DISPLAYID_INTERFACE_NUMLINKS 3:0 +#define NVT_DISPLAYID_INTERFACE_CONTENT 2:0 +#define NVT_DISPLAYID_INTERFACE_CONTENT_NONE 0 +#define NVT_DISPLAYID_INTERFACE_CONTENT_HDCP 1 +#define NVT_DISPLAYID_INTERFACE_CONTENT_DTCP 2 +#define NVT_DISPLAYID_INTERFACE_CONTENT_DPCP 3 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE 7:6 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE_NONE 0 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE_DOWN 1 +#define NVT_DISPLAYID_INTERFACE_SPREAD_TYPE_CENTER 2 +#define NVT_DISPLAYID_INTERFACE_SPREAD_PER 3:0 + +#define NVT_DISPLAYID_INTERFACE_RGB16 5:5 +#define NVT_DISPLAYID_INTERFACE_RGB14 4:4 +#define NVT_DISPLAYID_INTERFACE_RGB12 3:3 +#define NVT_DISPLAYID_INTERFACE_RGB10 2:2 +#define NVT_DISPLAYID_INTERFACE_RGB8 1:1 +#define NVT_DISPLAYID_INTERFACE_RGB6 0:0 + +#define NVT_DISPLAYID_INTERFACE_YCBCR444_16 5:5 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_14 4:4 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_12 3:3 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_10 2:2 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_8 1:1 +#define NVT_DISPLAYID_INTERFACE_YCBCR444_6 0:0 + +#define NVT_DISPLAYID_INTERFACE_YCBCR422_16 4:4 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_14 3:3 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_12 2:2 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_10 1:1 +#define NVT_DISPLAYID_INTERFACE_YCBCR422_8 0:0 + +// LVDS specific settings +#define NVT_DISPLAYID_LVDS_COLOR 4:4 +#define NVT_DISPLAYID_LVDS_2_8 3:3 +#define NVT_DISPLAYID_LVDS_12 2:2 +#define NVT_DISPLAYID_LVDS_5 1:1 +#define NVT_DISPLAYID_LVDS_3_3 0:0 + +#define NVT_DISPLAYID_INTERFACE_DE 2:2 +#define NVT_DISPLAYID_INTERFACE_POLARITY 1:1 +#define NVT_DISPLAYID_INTERFACE_STROBE 0:0 + +typedef struct _tagDISPLAYID_STEREO_INTERFACE_METHOD_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 stereo_bytes; + NvU8 stereo_code; + NvU8 timing_sub_block[NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN]; +} DISPLAYID_STEREO_INTERFACE_METHOD_BLOCK; + +#define NVT_DISPLAYID_STEREO_FIELD_SEQUENTIAL 0x0 +#define NVT_DISPLAYID_STEREO_SIDE_BY_SIDE 0x1 +#define NVT_DISPLAYID_STEREO_PIXEL_INTERLEAVED 0x2 +#define NVT_DISPLAYID_STEREO_DUAL_INTERFACE 0x3 +#define NVT_DISPLAYID_STEREO_MULTIVIEW 0x4 +#define NVT_DISPLAYID_STEREO_PROPRIETARY 0xFF + +#define NVT_DISPLAYID_STEREO_MIRRORING 2:1 +#define NVT_DISPLAYID_STEREO_POLARITY 0:0 + +typedef struct _tagDISPLAYID_TILED_DISPLAY_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + struct + { + NvU8 single_tile_behavior:3; // 0x03 + NvU8 multi_tile_behavior:2; // 0x03 + NvU8 rsvd :1; // 0x03 + NvU8 has_bezel_info :1; // 0x03 + NvU8 single_enclosure :1; // 0x03 + } capability; + struct + { + NvU8 row :4; // 0x04 + NvU8 col :4; // 0x04 + } topology_low; + struct + { + NvU8 y :4; // 0x05 + NvU8 x :4; // 0x05 + } location_low; + struct + { + NvU8 y :1; // 0x06 + NvU8 reserved1 :1; // 0x06 + NvU8 x :1; // 0x06 + NvU8 reserved2 :1; // 0x06 + NvU8 row :1; // 0x06 + NvU8 reserved3 :1; // 0x06 + NvU8 col :1; // 0x06 + NvU8 reserved4 :1; // 0x06 + } topo_loc_high; + struct + { + NvU8 width_low; // 0x07 + NvU8 width_high; // 0x08 + NvU8 height_low; // 0x09 + NvU8 height_high; // 0X0A + } native_resolution; + struct + { + NvU8 pixel_density; // 0x0B + NvU8 top; // 0x0C + NvU8 bottom; // 0x0D + NvU8 right; // 0x0E + NvU8 left; // 0x0F + } bezel_info; + struct + { + NvU8 vendor_id[3]; // 0x10 ~ 0x12 + NvU8 product_id[2]; // 0x13 ~ 0x14 + NvU8 serial_number[4]; // 0x15 ~ 0x18 + } topology_id; +} DISPLAYID_TILED_DISPLAY_BLOCK; + +typedef struct _tagDISPLAYID_INTERFACE_FEATURES_DATA_BLOCK +{ + DISPLAYID_DATA_BLOCK_HEADER header; + NvU8 supported_color_depth_rgb; + NvU8 supported_color_depth_ycbcr444; + NvU8 supported_color_depth_ycbcr422; + NvU8 supported_color_depth_ycbcr420; + NvU8 minimum_pixel_rate_ycbcr420; + NvU8 supported_audio_capability; + NvU8 supported_colorspace_eotf_combination_1; + NvU8 supported_colorspace_eotf_combination_2; + NvU8 additional_supported_colorspace_eotf_total; + NvU8 additional_supported_colorspace_eotf[NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF]; +} DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK; + +#define DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK_MAX_LEN sizeof(DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK) + +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB16 5:5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB14 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB12 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB10 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB8 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_RGB6 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_16 5:5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_14 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_12 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_10 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_8 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR444_6 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_16 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_14 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_12 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_10 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR422_8 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_16 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_14 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_12 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_10 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_YCBCR420_8 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_AUDIO_SUPPORTED_32KHZ 7:7 +#define NVT_DISPLAYID_INTERFACE_FEATURES_AUDIO_SUPPORTED_44_1KHZ 6:6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_AUDIO_SUPPORTED_48KHZ 5:5 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT2020_EOTF_SMPTE_ST2084 6:6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT2020_EOTF_BT2020 5:5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_DCI_P3_EOTF_DCI_P3 4:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_ADOBE_RGB_EOTF_ADOBE_RGB 3:3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT709_EOTF_BT1886 2:2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_BT601_EOTF_BT601 1:1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_COLORSPACE_SRGB_EOTF_SRGB 0:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF_TOTAL 2:0 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE 7:4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_NOT_DEFINED 0 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_SRGB 1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_BT601 2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_BT709 3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_ADOBE_RGB 4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_DCI_P3 5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_BT2020 6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_COLORSPACE_CUSTOM 7 + +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF 3:0 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_NOT_DEFINED 0 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_SRGB 1 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_BT601 2 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_BT709 3 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_ADOBE_RGB 4 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_DCI_P3 5 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_BT2020 6 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_GAMMA 7 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_SMPTE_ST2084 8 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_HYBRID_LOG 9 +#define NVT_DISPLAYID_INTERFACE_FEATURES_ADDITIONAL_SUPPORTED_EOTF_CUSTOM 10 + + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack() +#endif + +#endif // __DISPLAYID_H_ diff --git a/src/common/modeset/timing/displayid20.h b/src/common/modeset/timing/displayid20.h new file mode 100644 index 000000000..006d279be --- /dev/null +++ b/src/common/modeset/timing/displayid20.h @@ -0,0 +1,701 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: displayid20.h +// +// Purpose: the template for DisplayID 2.0 parsing (future replacement for EDID) +// +//***************************************************************************** + + +#ifndef __DISPLAYID20_H_ +#define __DISPLAYID20_H_ + +#include "nvtiming.h" + +// The structures below must be tightly packed, in order to correctly +// overlay on the DisplayID 2.0 block bytes. Both MSVC and +// gcc support the pack() pragma for this. + +#if defined(__GNUC__) || defined(_MSC_VER) +# define __SUPPORTS_PACK_PRAGMA 1 +#else +# error "unrecognized compiler: displayid structures must be tightly packed" +#endif + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack(1) +#endif + +#define DISPLAYID_2_0_SECTION_SIZE_TOTAL(_pSectionHeader_) ((_pSectionHeader_).section_bytes + \ + sizeof(DISPLAYID_2_0_SECTION_HEADER) + \ + sizeof(NvU8)) +#define DISPLAYID_2_0_DATA_BLOCK_SIZE_TOTAL(_pBlockHeader_) ((_pBlockHeader_)->data_bytes + \ + sizeof(DISPLAYID_2_0_DATA_BLOCK_HEADER)) +#define DISPLAYID_2_0_SECTION_SIZE_MAX 256 +#define DISPLAYID_2_0_SECTION_DATA_SIZE_MAX (DISPLAYID_2_0_SECTION_SIZE_MAX - \ + sizeof(DISPLAYID_2_0_SECTION_HEADER) + +typedef struct _tagDISPLAYID_2_0_SECTION_HEADER +{ + NvU8 revision:4; // displayID revision + NvU8 version:4; // displayID version + NvU8 section_bytes; // length of this displayID section excluding mandatory bytes [0, 251] + + NvU8 product_type:4; // Display Product Primary Use Case + NvU8 reserved:4; // RESERVED + NvU8 extension_count; // Total extension count. +} DISPLAYID_2_0_SECTION_HEADER; + +typedef struct _tagDISPLAYID_2_0_SECTION +{ + DISPLAYID_2_0_SECTION_HEADER header; + + NvU8 data[DISPLAYID_2_0_SECTION_SIZE_MAX]; // data blocks. Note, DisplayID has variable length +} DISPLAYID_2_0_SECTION; + +#define DISPLAYID_2_0_VERSION 2 +#define DISPLAYID_2_0_REVISION 0 + +#define DISPLAYID_2_0_PROD_EXTENSION 0 // Extension (same primary use case as base section) +#define DISPLAYID_2_0_PROD_TEST 1 // Test Structure/Test Equipment +#define DISPLAYID_2_0_PROD_GENERIC_DISPLAY 2 // None of the listed primary use cases; generic display +#define DISPLAYID_2_0_PROD_TELEVISION 3 // Television (TV) display +#define DISPLAYID_2_0_PROD_DESKTOP_PRODUCTIVITY_DISPLAY 4 // Desktop productivity display +#define DISPLAYID_2_0_PROD_DESKTOP_GAMING_DISPLAY 5 // Desktop gaming display +#define DISPLAYID_2_0_PROD_PRESENTATION_DISPLAY 6 // Presentation display +#define DISPLAYID_2_0_PROD_HMD_VR 7 // Head mounted Virtual Reality display +#define DISPLAYID_2_0_PROD_HMD_AR 8 // Head mounted Augmented Reality display + +typedef struct _tagDISPLAYID_2_0_DATA_BLOCK_HEADER +{ + NvU8 type; // Data block tag + NvU8 revision:3; // block revision + NvU8 reserved:5; + NvU8 data_bytes; // number of payload bytes in Block [ 0, 248] +} DISPLAYID_2_0_DATA_BLOCK_HEADER; + +#define DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY 0x20 +#define DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM 0x21 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_7 0x22 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_8 0x23 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_9 0x24 +#define DISPLAYID_2_0_BLOCK_TYPE_TIMING_10 0x2A +#define DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS 0x25 +#define DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES 0x26 +#define DISPLAYID_2_0_BLOCK_TYPE_STEREO 0x27 +#define DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY 0x28 +#define DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID 0x29 +#define DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC 0x7E +#define DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA 0x81 + +#define DISPLAYID_2_0_PRODUCT_NAME_STRING_MAX_LEN ((0xFB - 0xF) + 1) + +typedef struct _tagDISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK +{ + // Product Identification Data Block (0x20) + // Number of payload bytes 12(0xC) - 248(0xF8) + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + + NvU8 vendor[3]; + NvU8 product_code[2]; + NvU8 serial_number[4]; + NvU8 model_tag; + NvU8 model_year; + NvU8 product_name_string_size; + NvU8 product_name_string[DISPLAYID_2_0_PRODUCT_NAME_STRING_MAX_LEN]; +} DISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK; + +typedef struct _tagDISPLAY_2_0_DISPLAY_PARAM_BLOCK_HEADER +{ + NvU8 type; // Display Parameters Data Block (0x21) + NvU8 revision:3; + NvU8 reserved:4; + NvU8 image_size_multiplier:1; + NvU8 data_bytes; // number of payload bytes 29(0x1D) +} DISPLAY_2_0_DISPLAY_PARAM_BLOCK_HEADER; + +typedef struct _tagDISPLAYID_2_0_COLOR_CHROMATICITY +{ + NvU8 color_x_bits_low; + struct { + NvU8 color_x_bits_high:4; + NvU8 color_y_bits_low:4; + } color_bits_mid; + NvU8 color_y_bits_high; +} DISPLAYID_2_0_COLOR_CHROMATICITY; + +typedef enum _tagDISPLAYID_2_0_NATIVE_COLOR_DEPTH +{ + NATIVE_COLOR_NOT_DEFINED = 0, + NATIVE_COLOR_BPC_6 = 1, + NATIVE_COLOR_BPC_8 = 2, + NATIVE_COLOR_BPC_10 = 3, + NATIVE_COLOR_BPC_12 = 4, + NATIVE_COLOR_BPC_16 = 5, +} DISPLAYID_2_0_NATIVE_COLOR_DEPTH; + +#define DISPLAYID_2_0_DISPLAY_PARAM_BLOCK_PAYLOAD_LENGTH 29 +typedef struct _tagDISPLAYID_2_0_DISPLAY_PARAM_BLOCK +{ + DISPLAY_2_0_DISPLAY_PARAM_BLOCK_HEADER header; + + NvU8 horizontal_image_size[2]; + NvU8 vertical_image_size[2]; + NvU8 horizontal_pixel_count[2]; + NvU8 vertical_pixel_count[2]; + + struct { + NvU8 scan_orientation :3; + NvU8 luminance_information :2; + NvU8 reserved :1; + NvU8 color_information :1; + NvU8 audio_speaker_information :1; + } feature; + + DISPLAYID_2_0_COLOR_CHROMATICITY primary_color_1_chromaticity; + DISPLAYID_2_0_COLOR_CHROMATICITY primary_color_2_chromaticity; + DISPLAYID_2_0_COLOR_CHROMATICITY primary_color_3_chromaticity; + DISPLAYID_2_0_COLOR_CHROMATICITY white_point_chromaticity; + NvU8 max_luminance_full_coverage[2]; + NvU8 max_luminance_1_percent_rectangular_coverage[2]; + NvU8 min_luminance[2]; + + struct { + NvU8 color_depth :3; + NvU8 reserved0 :1; + NvU8 device_technology :3; + NvU8 device_theme_preference :1; + } color_depth_and_device_technology; + + NvU8 gamma_EOTF; +} DISPLAYID_2_0_DISPLAY_PARAM_BLOCK; + +#define DISPLAYID_2_0_SCAN_ORIENTATION_LRTB 0 // Left to right, top to bottom +#define DISPLAYID_2_0_SCAN_ORIENTATION_RLTB 1 // Right to left, top to bottom +#define DISPLAYID_2_0_SCAN_ORIENTATION_TBRL 2 // Top to bottom, right to left +#define DISPLAYID_2_0_SCAN_ORIENTATION_BTRL 3 // Bottom to top, right to left +#define DISPLAYID_2_0_SCAN_ORIENTATION_RLBT 4 // Right to left, bottom to top +#define DISPLAYID_2_0_SCAN_ORIENTATION_LRBT 5 // Left to right, bottom to top +#define DISPLAYID_2_0_SCAN_ORIENTATION_BTLR 6 // Bottom to top, left to right +#define DISPLAYID_2_0_SCAN_ORIENTATION_TBLR 7 // Top to bottom, left to right + +#define DISPLAYID_2_0_COLOR_INFORMATION_1931_CIE 0 +#define DISPLAYID_2_0_color_INFORMATION_1976_CIE 1 + +#define DISPLAYID_2_0_AUDIO_SPEAKER_INTEGRATED 0 +#define DISPLAYID_2_0_AUDIO_SPEAKER_NOT_INTEGRATED 1 + +#define DISPLAYID_2_0_DEVICE_TECHNOLOGY_UNSPECIFIED 0 +#define DISPLAYID_2_0_DEVICE_TECHNOLOGY_LCD 1 +#define DISPLAYID_2_0_DEVICE_TECHNOLOGY_OLED 2 + +#define DISPLAYID_2_0_TYPE7_DSC_PASSTHRU_REVISION 1 +#define DISPLAYID_2_0_TYPE7_YCC420_SUPPORT_REVISION 2 + +// DisplayID_v2.0 E5 - DSC Pass-Through timing +// DisplayID_v2.0 E7 - YCC420 and > 20 bytes per descriptor supported +typedef struct _tagDISPLAYID_2_0_TIMING_7_BLOCK_HEADER +{ + NvU8 type; // Type VII Timing (0x22) + NvU8 revision :3; + NvU8 dsc_passthrough :1; + NvU8 payload_bytes_len :3; + NvU8 reserved :1; + NvU8 data_bytes; // Values range from 1(0x01) to 248(0xF8) +} DISPLAYID_2_0_TIMING_7_BLOCK_HEADER; + +typedef struct _tag_DISPLAYID_2_0_TIMING_7_DESCRIPTOR +{ + // Range is defined as 0.001 through 16,777.216 MP/s + NvU8 pixel_clock[3]; + + struct + { + NvU8 aspect_ratio : 4; + NvU8 interface_frame_scanning_type : 1; + NvU8 stereo_support : 2; + NvU8 is_preferred_or_ycc420 : 1; + }options; + + struct + { + NvU8 active_image_pixels[2]; + NvU8 blank_pixels[2]; + NvU8 front_porch_pixels_low; + NvU8 front_porch_pixels_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_pixels[2]; + }horizontal; + + struct + { + NvU8 active_image_lines[2]; + NvU8 blank_lines[2]; + NvU8 front_porch_lines_low; + NvU8 front_porch_lines_high : 7; + NvU8 sync_polarity : 1; + NvU8 sync_width_lines[2]; + }vertical; + +} DISPLAYID_2_0_TIMING_7_DESCRIPTOR; + +#define DISPLAYID_2_0_TIMING_7_MAX_DESCRIPTORS 12 + +typedef struct _tagDISPLAYID_2_0_TIMING_7_BLOCK +{ + DISPLAYID_2_0_TIMING_7_BLOCK_HEADER header; + DISPLAYID_2_0_TIMING_7_DESCRIPTOR descriptors[DISPLAYID_2_0_TIMING_7_MAX_DESCRIPTORS]; +} DISPLAYID_2_0_TIMING_7_BLOCK; + +#define DISPLAYID_2_0_TIMING_DSC_PASSTHRU_TIMING 1 + +// the following fields apply to Timing Descriptors 7 (Not all of them are +// used per descriptor, but the format is the same +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_1_1 0 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_5_4 1 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_4_3 2 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_15_9 3 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_9 4 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_10 5 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_64_27 6 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_256_135 7 +#define DISPLAYID_2_0_TIMING_ASPECT_RATIO_CALCULATE 8 // calculate using Horizontal and Vertical Active Image Pixels + +#define DISPLAYID_2_0_TIMING_PROGRESSIVE_SCAN 0 +#define DISPLAYID_2_0_TIMING_INTERLACED_SCAN 1 + +#define DISPLAYID_2_0_TIMING_3D_STEREO_MONO 0 +#define DISPLAYID_2_0_TIMING_3D_STEREO_STEREO 1 +#define DISPLAYID_2_0_TIMING_3D_STEREO_EITHER 2 + +#define DISPLAYID_2_0_TIMING_SYNC_POLARITY_NEGATIVE 0 +#define DISPLAYID_2_0_TIMING_SYNC_POLARITY_POSITIVE 1 + +typedef struct _tagDISPLAYID_2_0_TIMING_8_BLOCK_HEADER +{ + NvU8 type; // Type VIII Timing (0x23) + NvU8 revision :3; + NvU8 timing_code_size :1; + NvU8 reserved :1; + NvU8 is_support_yuv420 :1; + NvU8 timing_code_type :2; + NvU8 data_bytes; // Values range from 1(0x01) to 248(0xF8) +} DISPLAYID_2_0_TIMING_8_BLOCK_HEADER; + +typedef struct _tagDISPLAYID_2_0_TIMING_8_ONE_BYTE_CODE +{ + NvU8 timing_code; +} DISPLAYID_2_0_TIMING_8_ONE_BYTE_CODE; + +typedef struct _tagDISPLAYID_2_0_TIMING_8_TWO_BYTE_CODE +{ + NvU8 timing_code[2]; +} DISPLAYID_2_0_TIMING_8_TWO_BYTE_CODE; + +#define DISPLAYID_2_0_TIMING_8_MAX_CODES 248 + +typedef struct _tagDISPLAYID_2_0_TIMING_8_BLOCK +{ + DISPLAYID_2_0_TIMING_8_BLOCK_HEADER header; + + union + { + DISPLAYID_2_0_TIMING_8_ONE_BYTE_CODE timing_code_1[DISPLAYID_2_0_TIMING_8_MAX_CODES]; + DISPLAYID_2_0_TIMING_8_TWO_BYTE_CODE timing_code_2[DISPLAYID_2_0_TIMING_8_MAX_CODES / 2]; + }; +} DISPLAYID_2_0_TIMING_8_BLOCK; + +#define DISPLAYID_2_0_TIMING_CODE_DMT 0 +#define DISPLAYID_2_0_TIMING_CODE_CTA_VIC 1 +#define DISPLAYID_2_0_TIMING_CODE_HDMI_VIC 2 +#define DISPLAYID_2_0_TIMING_CODE_RSERVED 3 +#define DISPLAYID_2_0_TIMING_CODE_SIZE_1_BYTE 0 +#define DISPLAYID_2_0_TIMING_CODE_SIZE_2_BYTE 1 + +typedef struct _TAG_DISPLAYID_2_0_TIMING_9_DESCRIPTOR +{ + struct { + NvU8 timing_formula:3; + NvU8 reserved0:1; + NvU8 fractional_refresh_rate_support:1; + NvU8 stereo_support:2; + NvU8 reserved1:1; + } options; + + NvU8 horizontal_active_pixels[2]; + NvU8 vertical_active_lines[2]; + NvU8 refresh_rate; // 1 Hz to 256 Hz +} DISPLAYID_2_0_TIMING_9_DESCRIPTOR; + +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD 0 +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1 1 +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_2 2 +#define DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_3 3 + +#define DISPLAYID_2_0_TIMING_9_MAX_DESCRIPTORS 41 + +typedef struct _tagDISPLAYID_2_0_TIMING_9_BLOCK +{ + // Type IX Timing (0x24) + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + DISPLAYID_2_0_TIMING_9_DESCRIPTOR descriptors[DISPLAYID_2_0_TIMING_9_MAX_DESCRIPTORS]; +} DISPLAYID_2_0_TIMING_9_BLOCK; + +#define DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_6 0 +#define DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_7 1 + +typedef struct _tagDISPLAYID_2_0_TIMING_10_BLOCK_HEADER +{ + NvU8 type; // Type X Timing (0x2A) + NvU8 revision :3; + NvU8 reserved0 :1; + NvU8 payload_bytes_len :3; + NvU8 reserved1 :1; + NvU8 payload_bytes; +} DISPLAYID_2_0_TIMING_10_BLOCK_HEADER; + +typedef struct _DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR +{ + struct { + NvU8 timing_formula :3; + NvU8 reserved0 :1; + NvU8 vrr_or_hblank :1; + NvU8 stereo_support :2; + NvU8 ycc420_support :1; + } options; + + NvU8 horizontal_active_pixels[2]; + NvU8 vertical_active_lines[2]; + NvU8 refresh_rate; // 1 Hz to 256 Hz +} DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR; + +typedef struct _DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR +{ + DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR descriptor_6_bytes; + NvU8 refresh_rate_high :2; + NvU8 delta_hblank :3; + NvU8 additional_vblank_timing :3; +} DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR; + +#define DISPLAYID_2_0_TIMING_10_MAX_6BYTES_DESCRIPTORS 41 +#define DISPLAYID_2_0_TIMING_10_MAX_7BYTES_DESCRIPTORS 35 + +typedef struct _DISPLAYID_2_0_TIMING_10_BLOCK +{ + DISPLAYID_2_0_TIMING_10_BLOCK_HEADER header; + NvU8 descriptors[120]; +} DISPLAYID_2_0_TIMING_10_BLOCK; + +#define DISPLAYID_2_0_RANGE_LIMITS_BLOCK_PAYLOAD_LENGTH 9 +typedef struct _tagDISPLAYID_2_0_RANGE_LIMITS_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + + NvU8 pixel_clock_min[3]; + NvU8 pixel_clock_max[3]; + NvU8 vertical_frequency_min; + NvU8 vertical_frequency_max_7_0; + struct { + NvU8 vertical_frequency_max_9_8:2; + NvU8 reserved:5; + NvU8 seamless_dynamic_video_timing_change:1; + } dynamic_video_timing_range_support; +} DISPLAYID_2_0_RANGE_LIMITS_BLOCK; + +#define DISPLAYID_2_0_SEAMLESS_DYNAMIC_VIDEO_TIMING_CHANGE_NOT_SUPPORTED 0 +#define DISPLAYID_2_0_SEAMLESS_DYNAMIC_VIDEO_TIMING_CHANGE_SUPPORTED 1 + +#define DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK_PAYLOAD_LENGTH_MIN 9 +#define DISPLAYID_2_0_MAX_COLOR_SPACE_AND_EOTF 7 +typedef struct _tagDISPLAYID_2_0_INTERFACE_FEATURES_BLOCK +{ + // Display Interface Features Data Block (0x26) + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + + struct { + NvU8 bit_per_primary_6:1; + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:2; + } interface_color_depth_rgb; + + struct { + NvU8 bit_per_primary_6:1; + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:2; + } interface_color_depth_ycbcr444; + + struct { + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:3; + } interface_color_depth_ycbcr422; + + struct { + NvU8 bit_per_primary_8:1; + NvU8 bit_per_primary_10:1; + NvU8 bit_per_primary_12:1; + NvU8 bit_per_primary_14:1; + NvU8 bit_per_primary_16:1; + NvU8 reserved:3; + } interface_color_depth_ycbcr420; + + NvU8 min_pixel_rate_ycbcr420; // x 74.25MP/s + + struct { + NvU8 reserved:5; + NvU8 sample_rate_48_khz:1; + NvU8 sample_rate_44_1_khz:1; + NvU8 sample_rate_32_khz:1; + } audio_capability; + + struct { + NvU8 color_space_srgb_eotf_srgb:1; + NvU8 color_space_bt601_eotf_bt601:1; + NvU8 color_space_bt709_eotf_bt1886:1; + NvU8 color_space_adobe_rgb_eotf_adobe_rgb:1; + NvU8 color_space_dci_p3_eotf_dci_p3:1; + NvU8 color_space_bt2020_eotf_bt2020:1; + NvU8 color_space_bt2020_eotf_smpte_st2084:1; + NvU8 reserved:1; + } color_space_and_eotf_1; + + struct { + NvU8 reserved; + } color_space_and_eotf_2; + + struct { + NvU8 count:3; + NvU8 reserved:5; + } additional_color_space_and_eotf_count; + + struct { + NvU8 eotf:4; + NvU8 color_space:4; + } additional_color_space_and_eotf[DISPLAYID_2_0_MAX_COLOR_SPACE_AND_EOTF]; +} DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK; + +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_NOT_DEFINED 0 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_SRGB 1 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_BT601 2 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_BT709 3 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_ADOBE_RGB 4 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_DCI_P3 5 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_BT2020 6 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_COLORSPACE_CUSTOM 7 + +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_NOT_DEFINED 0 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_SRGB 1 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_BT601 2 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_BT709 3 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_ADOBE_RGB 4 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_DCI_P3 5 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_BT2020 6 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_GAMMA 7 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_SMPTE_ST2084 8 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_HYBRID_LOG 9 +#define DISPLAYID_2_0_INTERFACE_FEATURES_SUPPORTED_EOTF_CUSTOM 10 + +typedef struct _tagDISPLAYID_2_0_STEREO_INTERFACE_BLOCK_HEADER +{ + NvU8 type; + NvU8 revision:3; + NvU8 reserved:3; + NvU8 stereo_timing_support:2; +} DISPLAYID_2_0_STEREO_INTERFACE_BLOCK_HEADER; + +typedef struct _tagDISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR +{ + NvU8 supported_timing_code_count:5; + NvU8 reserved:1; + NvU8 timing_code_type:2; + NvU8 timing_code[0x1F]; +} DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_FIELD_SEQUENTIAL_INTERFACE_DESCRIPTOR +{ + NvU8 polarity_descriptor:1; + NvU8 reserved:7; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_FIELD_SEQUENTIAL_INTERFACE_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_SIDE_BY_SIDE_INTERFACE_DESCRIPTOR +{ + NvU8 view_identity_descriptor:1; + NvU8 reserved:7; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_SIDE_BY_SIDE_INTERFACE_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_PIXEL_INTERLEAVED_DESCRIPTOR +{ + NvU8 interleaved_pattern_descriptor[8]; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_PIXEL_INTERLEAVED_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_DUAL_INTERFACE_LEFT_AND_RIGHT_SEPARATE_DESCRIPTOR +{ + NvU8 left_and_right_polarity_descriptor:1; + NvU8 mirroring_descriptor:2; + NvU8 reserved:5; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_DUAL_INTERFACE_LEFT_AND_RIGHT_SEPARATE_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_MULTI_VIEW_DESCRIPTOR +{ + NvU8 views_descriptors_count; + NvU8 view_interleaving_method_code_descriptor; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_MULTI_VIEW_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_STACKED_FRAME_DESCRIPTOR +{ + NvU8 view_identity_descriptor:1; + NvU8 reserved:7; + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_STACKED_FRAME_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_PROPRIETARY_DESCRIPTOR +{ + DISPLAYID_2_0_STEREO_TIMING_DESCRIPTOR timing_descriptor; +} DISPLAYID_2_0_STEREO_PROPRIETARY_DESCRIPTOR; + +typedef struct _tagDISPLAYID_2_0_STEREO_INTERFACE_METHOD_BLOCK +{ + DISPLAYID_2_0_STEREO_INTERFACE_BLOCK_HEADER header; + + NvU8 stereo_bytes; + NvU8 stereo_code; + union { + DISPLAYID_2_0_STEREO_FIELD_SEQUENTIAL_INTERFACE_DESCRIPTOR field_sequential; + DISPLAYID_2_0_STEREO_SIDE_BY_SIDE_INTERFACE_DESCRIPTOR side_by_side; + DISPLAYID_2_0_STEREO_PIXEL_INTERLEAVED_DESCRIPTOR pixel_interleaved; + DISPLAYID_2_0_STEREO_DUAL_INTERFACE_LEFT_AND_RIGHT_SEPARATE_DESCRIPTOR dual_interface; + DISPLAYID_2_0_STEREO_MULTI_VIEW_DESCRIPTOR multi_view; + DISPLAYID_2_0_STEREO_STACKED_FRAME_DESCRIPTOR stacked_frame; + DISPLAYID_2_0_STEREO_PROPRIETARY_DESCRIPTOR proprietary; + }; +} DISPLAYID_2_0_STEREO_INTERFACE_METHOD_BLOCK; + +#define DISPLAYID_2_0_STEREO_CODE_FIELD_SEQUENTIAL 0x0 +#define DISPLAYID_2_0_STEREO_CODE_SIDE_BY_SIDE 0x1 +#define DISPLAYID_2_0_STEREO_CODE_PIXEL_INTERLEAVED 0x2 +#define DISPLAYID_2_0_STEREO_CODE_DUAL_INTERFACE 0x3 +#define DISPLAYID_2_0_STEREO_CODE_MULTIVIEW 0x4 +#define DISPLAYID_2_0_STEREO_CODE_STACKED_FRAME 0x5 +#define DISPLAYID_2_0_STEREO_CODE_PROPRIETARY 0xFF + +#define DISPLAYID_STEREO_MIRRORING 2:1 +#define DISPLAYID_STEREO_POLARITY 0:0 + +#define DISPLAYID_2_0_TILED_DISPLAY_BLOCK_PAYLOAD_LENGTH 22 +typedef struct _tagDISPLAYID_2_0_TILED_DISPLAY_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + struct + { + NvU8 single_tile_behavior:3; // 0x03 + NvU8 multi_tile_behavior:2; // 0x03 + NvU8 rsvd :1; // 0x03 + NvU8 has_bezel_info :1; // 0x03 + NvU8 single_enclosure :1; // 0x03 + } capability; + struct + { + NvU8 row :4; // 0x04 + NvU8 col :4; // 0x04 + } topo_low; + struct + { + NvU8 y :4; // 0x05 + NvU8 x :4; // 0x05 + } loc_low; + struct + { + NvU8 y :2; // 0x06 + NvU8 x :2; // 0x06 + NvU8 row :2; // 0x06 + NvU8 col :2; // 0x06 + } topo_loc_high; + struct + { + NvU8 width_low; // 0x07 + NvU8 width_high; // 0x08 + NvU8 height_low; // 0x09 + NvU8 height_high; // 0X0A + } native_resolution; + struct + { + NvU8 pixel_density; // 0x0B + NvU8 top; // 0x0C + NvU8 bottom; // 0x0D + NvU8 right; // 0x0E + NvU8 left; // 0x0F + } bezel_info; + struct + { + NvU8 vendor_id[3]; // 0x10 ~ 0x12 + NvU8 product_id[2]; // 0x13 ~ 0x14 + NvU8 serial_number[4]; // 0x15 ~ 0x18 + } topo_id; +} DISPLAYID_2_0_TILED_DISPLAY_BLOCK; + +#define DISPLAYID_2_0_CONTAINERID_BLOCK_PAYLOAD_LENGTH 16 +typedef struct _tagDISPLAYID_2_0_CONTAINERID_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + NvU8 container_id[DISPLAYID_2_0_CONTAINERID_BLOCK_PAYLOAD_LENGTH]; +} DISPLAYID_2_0_CONTAINERID_BLOCK; + +typedef struct _tagDISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + NvU8 vendor_id[3]; + NvU8 vendor_specific_data[245]; +} DISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK; + +typedef struct _tagDISPLAYID_2_0_CTA_BLOCK +{ + DISPLAYID_2_0_DATA_BLOCK_HEADER header; + NvU8 cta_data[248]; +} DISPLAYID_2_0_CTA_BLOCK; + +#ifdef __SUPPORTS_PACK_PRAGMA +#pragma pack() +#endif + +// Entry point functions both used in DID20 and DID20ext +NVT_STATUS parseDisplayId20DataBlock(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +NvU8 computeDisplayId20SectionCheckSum(const NvU8 *pSectionBytes, NvU32 length); + +#endif // __DISPLAYID20_H_ diff --git a/src/common/modeset/timing/dpsdp.h b/src/common/modeset/timing/dpsdp.h new file mode 100644 index 000000000..43754d6c5 --- /dev/null +++ b/src/common/modeset/timing/dpsdp.h @@ -0,0 +1,373 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* +=============================================================================== + + dp_sdp.cpp + + Provide definition needed for display port secondary data packet. + +================================================================================ +*/ + +#ifndef __DPSDP_H__ +#define __DPSDP_H__ + +#include "nvtypes.h" + +#define DP_SDP_HEADER_SIZE 4 +#define DP_SDP_DATA_SIZE 28 + +// TODO: needs to wait for RM to provide the enum. Therefore, hardcoded to 7, which is the packet type for VSC SDP +typedef enum tagSDP_PACKET_TYPE +{ + SDP_PACKET_TYPE_VSC = 7, +} SDP_PACKET_TYPE; + +typedef enum tagSDP_VSC_REVNUM +{ + SDP_VSC_REVNUM_STEREO = 1, + SDP_VSC_REVNUM_STEREO_PSR, + SDP_VSC_REVNUM_STEREO_PSR2, + SDP_VSC_REVNUM_PSR2_EXTN, + SDP_VSC_REVNUM_STEREO_PSR2_COLOR, + SDP_VSC_REVNUM_STEREO_PR, + SDP_VSC_REVNUM_STEREO_PR_COLOR, +} SDP_VSC_REVNUM; + +typedef enum tagSDP_VSC_VALID_DATA_BYTES +{ + SDP_VSC_VALID_DATA_BYTES_STEREO = 1, + SDP_VSC_VALID_DATA_BYTES_STEREO_PSR = 8, + SDP_VSC_VALID_DATA_BYTES_PSR2 = 12, + SDP_VSC_VALID_DATA_BYTES_PSR2_COLOR = 19, + SDP_VSC_VALID_DATA_BYTES_PR = 16, + SDP_VSC_VALID_DATA_BYTES_PR_COLOR = 19, +} SDP_VSC_VALID_DATA_BYTES; + +typedef enum tagSDP_VSC_DYNAMIC_RANGE +{ + SDP_VSC_DYNAMIC_RANGE_VESA, + SDP_VSC_DYNAMIC_RANGE_CEA, +} SDP_VSC_DYNAMIC_RANGE; + +typedef enum tagSDP_VSC_PIX_ENC +{ + SDP_VSC_PIX_ENC_RGB, + SDP_VSC_PIX_ENC_YCBCR444, + SDP_VSC_PIX_ENC_YCBCR422, + SDP_VSC_PIX_ENC_YCBCR420, + SDP_VSC_PIX_ENC_Y, + SDP_VSC_PIX_ENC_RAW, +} SDP_VSC_PIX_ENC; + +typedef enum tagSDP_VSC_BIT_DEPTH_RGB +{ + SDP_VSC_BIT_DEPTH_RGB_6BPC = 0, + SDP_VSC_BIT_DEPTH_RGB_8BPC, + SDP_VSC_BIT_DEPTH_RGB_10BPC, + SDP_VSC_BIT_DEPTH_RGB_12BPC, + SDP_VSC_BIT_DEPTH_RGB_16BPC, + +} SDP_VSC_BIT_DEPTH_RGB; + +typedef enum tagSDP_VSC_BIT_DEPTH_YCBCR +{ + SDP_VSC_BIT_DEPTH_YCBCR_8BPC = 1, + SDP_VSC_BIT_DEPTH_YCBCR_10BPC, + SDP_VSC_BIT_DEPTH_YCBCR_12BPC, + SDP_VSC_BIT_DEPTH_YCBCR_16BPC, + +} SDP_VSC_BIT_DEPTH_YCBCR; + +typedef enum tagSDP_VSC_BIT_DEPTH_RAW +{ + SDP_VSC_BIT_DEPTH_RAW_6BPC = 1, + SDP_VSC_BIT_DEPTH_RAW_7BPC, + SDP_VSC_BIT_DEPTH_RAW_8BPC, + SDP_VSC_BIT_DEPTH_RAW_10BPC, + SDP_VSC_BIT_DEPTH_RAW_12BPC, + SDP_VSC_BIT_DEPTH_RAW_14PC, + SDP_VSC_BIT_DEPTH_RAW_16PC, + +} SDP_VSC_BIT_DEPTH_RAW; + +typedef enum tagSDP_VSC_CONTENT_TYPE +{ + SDP_VSC_CONTENT_TYPE_UNDEFINED = 0, + SDP_VSC_CONTENT_TYPE_GRAPHICS, + SDP_VSC_CONTENT_TYPE_PHOTO, + SDP_VSC_CONTENT_TYPE_VIDEO, + SDP_VSC_CONTENT_TYPE_GAMES, + +} SDP_VSC_CONTENT_TYPE; + +typedef enum tagSDP_VSC_COLOR_FMT_RGB_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_SRGB = 0, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_RGB_WIDE_GAMUT_FIXED, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_RGB_SCRGB, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_ADOBERGB, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_DCI_P3, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_CUSTOM, + SDP_VSC_COLOR_FMT_RGB_COLORIMETRY_ITU_R_BT2020_RGB, +} SDP_VSC_COLOR_FMT_RGB_COLORIMETRY; + +typedef enum tagSDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT601 = 0, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT709, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_XVYCC601, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_XVYCC709, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_SYCC601, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ADOBEYCC601, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT2020_YCCBCCRC, + SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT2020_YCBCR, +} SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY; + +typedef enum tagSDP_VSC_COLOR_FMT_RAW_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_RAW_COLORIMETRY_CUSTOM_COLOR_PROFILE = 0, +} SDP_VSC_COLOR_FMT_RAW; + +typedef enum tagSDP_VSC_COLOR_FMT_Y_COLORIMETRY +{ + SDP_VSC_COLOR_FMT_Y_COLORIMETRY_DICOM = 0, +} SDP_VSC_COLOR_FMT_Y; + +// The struct element field hb and db fields are arranged to match the HW registers +// NV_PDISP_SF_DP_GENERIC_INFOFRAME_HEADER* and NV_PDISP_SF_DP_GENERIC_INFOFRAME_SUBPACK0_DB* +typedef struct tagDPSDP_DP_VSC_SDP_DESCRIPTOR +{ + NvU8 dataSize; // the db data size + + // header + struct + { + NvU8 hb0; // DP1.3 spec, the value = 0 + NvU8 hb1; // DP1.3 spec, value = 7 + NvU8 revisionNumber : 5; + NvU8 hb2Reserved : 3; + NvU8 numValidDataBytes : 5; // number of valid data bytes + NvU8 hb3Reserved : 3; + } hb; + + // data content + struct + { + // Stereo field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 stereoInterface; // DB0 + // PSR Field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 psrState : 1; //DB1 + NvU8 psrUpdateRfb : 1; + NvU8 psrCrcValid : 1; + NvU8 psrSuValid : 1; + NvU8 psrSuFirstScanLine : 1; + NvU8 psrSuLastScanLine : 1; + NvU8 psrYCoordinateValid : 1; + NvU8 psrReserved : 1; + NvU8 db2; + NvU8 db3; + NvU8 db4; + NvU8 db5; + NvU8 db6; + NvU8 db7; + // DB8 - DB15 are undefined in DP 1.3 spec. + NvU8 db8; + NvU8 db9; + NvU8 db10; + NvU8 db11; + NvU8 db12; + NvU8 db13; + NvU8 db14; + NvU8 db15; + + // Colorimetry Infoframe Secondary Data Package following DP1.3 spec + NvU8 colorimetryFormat : 4; // DB16 infoframe per DP1.3 spec + NvU8 pixEncoding : 4; // DB16 infoframe per DP1.3 spec + + NvU8 bitDepth : 7; // DB17 infoframe per DP1.3 spec + NvU8 dynamicRange : 1; // DB17 infoframe per DP1.3 spec + + NvU8 contentType : 3; // DB18 infoframe per DP1.3 spec + NvU8 db18Reserved : 5; + + NvU8 db19; + NvU8 db20; + NvU8 db21; + NvU8 db22; + NvU8 db23; + NvU8 db24; + NvU8 db25; + NvU8 db26; + NvU8 db27; + } db; + +} DPSDP_DP_VSC_SDP_DESCRIPTOR; + +typedef struct tagDPSDP_DP_PR_VSC_SDP_DESCRIPTOR +{ + NvU8 dataSize; // the db data size + + // header + struct + { + NvU8 hb0; // DP1.3 spec, the value = 0 + NvU8 hb1; // DP1.3 spec, value = 7 + NvU8 revisionNumber : 5; + NvU8 hb2Reserved : 3; + NvU8 numValidDataBytes : 5; // number of valid data bytes + NvU8 hb3Reserved : 3; + } hb; + + // data content + struct + { + // Stereo field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 stereoInterface; // DB0 + // PSR Field. Note: Needs to be expanded when needed. Refer to DP1.3 spec. + NvU8 prState : 1; // DB1 + NvU8 prReserved : 1; // Always ZERO + NvU8 prCrcValid : 1; + NvU8 prSuValid : 1; + NvU8 prReservedEx : 4; + + NvU8 db2; + NvU8 db3; + NvU8 db4; + NvU8 db5; + NvU8 db6; + NvU8 db7; + // DB8 - DB15 are undefined in DP 1.3 spec. + NvU8 db9; + NvU8 db10; + NvU8 db11; + NvU8 db12; + NvU8 db13; + NvU8 db14; + NvU8 db15; + + // Colorimetry Infoframe Secondary Data Package following DP1.3 spec + NvU8 colorimetryFormat : 4; // DB16 infoframe per DP1.3 spec + NvU8 pixEncoding : 4; // DB16 infoframe per DP1.3 spec + + NvU8 bitDepth : 7; // DB17 infoframe per DP1.3 spec + NvU8 dynamicRange : 1; // DB17 infoframe per DP1.3 spec + + NvU8 contentType : 3; // DB18 infoframe per DP1.3 spec + NvU8 db18Reserved : 5; + + NvU8 db19; + NvU8 db20; + NvU8 db21; + NvU8 db22; + NvU8 db23; + NvU8 db24; + NvU8 db25; + NvU8 db26; + NvU8 db27; + } db; + +} DPSDP_DP_PR_VSC_SDP_DESCRIPTOR; + +typedef struct tagDPSDP_DESCRIPTOR +{ + NvU8 dataSize; + + // header byte + struct + { + NvU8 hb0; + NvU8 hb1; + NvU8 hb2; + NvU8 hb3; + } hb; + + // content byte + struct + { + NvU8 db0; + NvU8 db1; + NvU8 db2; + NvU8 db3; + NvU8 db4; + NvU8 db5; + NvU8 db6; + NvU8 db7; + NvU8 db8; + NvU8 db9; + NvU8 db10; + NvU8 db11; + NvU8 db12; + NvU8 db13; + NvU8 db14; + NvU8 db15; + NvU8 db16; + NvU8 db17; + NvU8 db18; + NvU8 db19; + NvU8 db20; + NvU8 db21; + NvU8 db22; + NvU8 db23; + NvU8 db24; + NvU8 db25; + NvU8 db26; + NvU8 db27; + NvU8 db28; + NvU8 db29; + NvU8 db30; + NvU8 db31; + } db; + +} DPSDP_DESCRIPTOR; + +// The following #defines are for RGB only +#define DP_VSC_SDP_BIT_DEPTH_RGB_6BPC 0 +#define DP_VSC_SDP_BIT_DEPTH_RGB_8BPC 1 +#define DP_VSC_SDP_BIT_DEPTH_RGB_10BPC 2 +#define DP_VSC_SDP_BIT_DEPTH_RGB_12BPC 3 +#define DP_VSC_SDP_BIT_DEPTH_RGB_16BPC 4 + +// The following #defines are for YUV only +#define DP_VSC_SDP_BIT_DEPTH_YUV_8BPC 1 +#define DP_VSC_SDP_BIT_DEPTH_YUV_10BPC 2 +#define DP_VSC_SDP_BIT_DEPTH_YUV_12BPC 3 +#define DP_VSC_SDP_BIT_DEPTH_YUV_16BPC 4 + +// The following #defines are for RAW only +#define DP_VSC_SDP_BIT_DEPTH_RAW_6BPC 1 +#define DP_VSC_SDP_BIT_DEPTH_RAW_7BPC 2 +#define DP_VSC_SDP_BIT_DEPTH_RAW_8BPC 3 +#define DP_VSC_SDP_BIT_DEPTH_RAW_10BPC 4 +#define DP_VSC_SDP_BIT_DEPTH_RAW_12BPC 5 +#define DP_VSC_SDP_BIT_DEPTH_RAW_14BPC 6 +#define DP_VSC_SDP_BIT_DEPTH_RAW_16BPC 7 + +#define DP_INFOFRAME_SDP_V1_3_VERSION 0x13 +#define DP_INFOFRAME_SDP_V1_3_HB3_VERSION_MASK 0xFC +#define DP_INFOFRAME_SDP_V1_3_HB3_VERSION_SHIFT 2 +#define DP_INFOFRAME_SDP_V1_3_HB3_MSB_MASK 0x3 +#define DP_INFOFRAME_SDP_V1_3_NON_AUDIO_SIZE 30 +#endif // __DPSDP_H_ diff --git a/src/common/modeset/timing/edid.h b/src/common/modeset/timing/edid.h new file mode 100644 index 000000000..fec2ff732 --- /dev/null +++ b/src/common/modeset/timing/edid.h @@ -0,0 +1,341 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: edid.h +// +// Purpose: the template for EDID parse +// +//***************************************************************************** + +#ifndef __EDID_H_ +#define __EDID_H_ + +#include "nvtiming.h" +#include "nvtiming_pvt.h" + +// EDID 1.x detailed timing template + + +#define NVT_PVT_EDID_LDD_PAYLOAD_SIZE 13 + +typedef struct _tagEDID_LONG_DISPLAY_DESCRIPTOR +{ + // the header + NvU8 prefix[2]; // 0x00 ~ 0x01 + NvU8 rsvd; // 0x02 + NvU8 tag; // 0x03 + NvU8 rsvd2; // 0x04 + + // the payload + NvU8 data[NVT_PVT_EDID_LDD_PAYLOAD_SIZE]; // 0x05~0x11 +}EDID_LONG_DISPLAY_DESCRIPTOR; +typedef struct _tagEDID_MONITOR_RANGE_GTF2 +{ + NvU8 reserved; // byte 0x0B: reserved as 00 + NvU8 startFreq; // byte 0x0C: start frequency for secondary curve, hot. freq./2[kHz] + NvU8 C; // byte 0x0D: C*2 0 <= 127 + NvU8 M_LSB; // byte 0x0E-0x0F: M (LSB) 0 <= M <= 65535 + NvU8 M_MSB; + NvU8 K; // byte 0x10: K 0 <= K <= 255 + NvU8 J; // byte 0x11: J*2 0 <= J <= 127 +}EDID_MONITOR_RANGE_GTF2; + +typedef struct _tagEDID_MONITOR_RANGE_CVT +{ + NvU8 version; // byte 0x0B: cvt version + NvU8 pixel_clock; // byte 0x0C: [bits 7:2]pixel clock precision + // [bits 1:0]max active MSB + NvU8 max_active; // byte 0x0D: with byte 12 [bits 1:0], max active pixels per line + NvU8 aspect_supported; // byte 0x0E: supported aspect ratios + NvU8 aspect_preferred_blanking; // byte 0x0F: preferred aspect ratio / blanking style support + NvU8 scaling_support; // byte 0x10: display scaling support + NvU8 preferred_refresh_rate; // byte 0x11: preferred vertical refresh rate +}EDID_MONITOR_RANGE_CVT; + +// cvt support in display range limit block +#define NVT_PVT_EDID_CVT_PIXEL_CLOCK_MASK 0xFC +#define NVT_PVT_EDID_CVT_PIXEL_CLOCK_SHIFT 2 +#define NVT_PVT_EDID_CVT_ACTIVE_MSB_MASK 0x03 +#define NVT_PVT_EDID_CVT_ACTIVE_MSB_SHIFT 8 + +#define NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_MASK 0xF8 +#define NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_SHIFT 3 +#define NVT_PVT_EDID_CVT_RESERVED0_MASK 0x07 +#define NVT_PVT_EDID_CVT_RESERVED0_SHIFT 0 + +#define NVT_PVT_EDID_CVT_ASPECT_PREFERRED_MASK 0xE0 +#define NVT_PVT_EDID_CVT_ASPECT_PREFERRED_SHIFT 5 +#define NVT_PVT_EDID_CVT_BLANKING_MASK 0x18 +#define NVT_PVT_EDID_CVT_BLANKING_SHIFT 3 +#define NVT_PVT_EDID_CVT_RESERVED1_MASK 0x07 +#define NVT_PVT_EDID_CVT_RESERVED1_SHIFT 0 + +#define NVT_PVT_EDID_CVT_SCALING_MASK 0xF0 +#define NVT_PVT_EDID_CVT_SCALING_SHIFT 4 +#define NVT_PVT_EDID_CVT_RESERVED2_MASK 0x0F +#define NVT_PVT_EDID_CVT_RESERVED2_SHIFT 0 + +typedef struct _tagEDID_MONITOR_RANGE_LIMIT +{ + // the header in monitor descriptor data + NvU8 minVRate; // byte 0x05: min vertical rate + NvU8 maxVRate; // byte 0x06: max vertical rate + NvU8 minHRate; // byte 0x07: min horizontal rate + NvU8 maxHRate; // byte 0x08: max horizontal rate + NvU8 maxPClock10M; // byte 0x09: max pixel clock in 10M + NvU8 timing_support; // byte 0x0A: 2nd GTF / CVT timing formula support + union + { + EDID_MONITOR_RANGE_GTF2 gtf2; // bytes 0x0B-0x11 + EDID_MONITOR_RANGE_CVT cvt; // ... + }u; +} EDID_MONITOR_RANGE_LIMIT; + +// timing_support +#define NVT_PVT_EDID_RANGE_OFFSET_VER_MIN 0x01 +#define NVT_PVT_EDID_RANGE_OFFSET_VER_MAX 0x02 +#define NVT_PVT_EDID_RANGE_OFFSET_HOR_MIN 0x04 +#define NVT_PVT_EDID_RANGE_OFFSET_HOR_MAX 0x08 +#define NVT_PVT_EDID_RANGE_OFFSET_AMOUNT 255 + +typedef struct _tagEDID_CVT_3BYTE_BLOCK +{ + NvU8 addressable_lines; // byte 0: 8 lsb of addressable lines + NvU8 lines_ratio; // byte 1 : [bits7:4] 4 msb of addressable lines [bits3:2] aspect ratio + NvU8 refresh_rates; // byte 2 : supported/preferred refresh rates +}EDID_CVT_3BYTE_BLOCK; + +typedef struct _tagEDID_CVT_3BYTE +{ + // the header in monitor descriptor data. + NvU8 version; // byte 0x05 : version code (0x01) + EDID_CVT_3BYTE_BLOCK block[NVT_EDID_DD_MAX_CVT3_PER_DESCRITPOR]; // bytes 0x06-0x11 +}EDID_CVT_3BYTE; + +// CVT 3byte +#define NVT_PVT_EDID_CVT3_LINES_MSB_MASK 0xF0 +#define NVT_PVT_EDID_CVT3_LINES_MSB_SHIFT 4 +#define NVT_PVT_EDID_CVT3_ASPECT_MASK 0x0C +#define NVT_PVT_EDID_CVT3_ASPECT_SHIFT 2 + +#define NVT_PVT_EDID_CVT3_PREFERRED_RATE_MASK 0x60 +#define NVT_PVT_EDID_CVT3_PREFERRED_RATE_SHIFT 5 +#define NVT_PVT_EDID_CVT3_SUPPORTED_RATE_MASK 0x1F +#define NVT_PVT_EDID_CVT3_SUPPORTED_RATE_SHIFT 0 + +typedef struct _tagEDID_COLOR_POINT_DATA +{ + NvU8 wp1_index; // 0x05: white point index number + NvU8 wp1_x_y; // 0x06: [bits3:2] lsb of wp1_x [bits1:0] lsb of wp1_y + NvU8 wp1_x; // 0x07: msb of wp1_x + NvU8 wp1_y; // 0x08: msb of wp1_y + NvU8 wp1_gamma; // 0x09: (gamma x 100) - 100 + NvU8 wp2_index; // 0x0A: ... + NvU8 wp2_x_y; // 0x0B: ... + NvU8 wp2_x; // 0x0C: ... + NvU8 wp2_y; // 0x0D: ... + NvU8 wp2_gamma; // 0x0E: ... + NvU8 line_feed; // 0x0F: reserved for line feed (0x0A) + NvU16 reserved0; // 0x10-0x11: reserved for space (0x2020) +}EDID_COLOR_POINT_DATA; + +#define NVT_PVT_EDID_CPD_WP_X_MASK 0x0C +#define NVT_PVT_EDID_CPD_WP_X_SHIFT 2 +#define NVT_PVT_EDID_CPD_WP_Y_MASK 0x03 +#define NVT_PVT_EDID_CPD_WP_Y_SHIFT 0 + +typedef struct _tagEDID_STANDARD_TIMING_ID +{ + NvU16 std_timing[NVT_EDID_DD_STI_NUM]; //0x05-0x10: 6 standard timings + NvU8 line_feed; //0x11: reserved for line feed (0x0A) +}EDID_STANDARD_TIMING_ID; + +typedef struct _tagEDID_COLOR_MANAGEMENT_DATA +{ + NvU8 version; //0x05: version (0x03) + NvU8 red_a3_lsb; //0x06: Red a3 LSB + NvU8 red_a3_msb; //0x07: Red a3 MSB + NvU8 red_a2_lsb; //0x08 + NvU8 red_a2_msb; //0x09 + NvU8 green_a3_lsb; //0x0A + NvU8 green_a3_msb; //0x0B + NvU8 green_a2_lsb; //0x0C + NvU8 green_a2_msb; //0x0D + NvU8 blue_a3_lsb; //0x0E + NvU8 blue_a3_msb; //0x0F + NvU8 blue_a2_lsb; //0x10 + NvU8 blue_a2_msb; //0x11 +}EDID_COLOR_MANAGEMENT_DATA; + +typedef struct _tagEDID_EST_TIMINGS_III +{ + NvU8 revision; //0x05: revision (0x0A) + NvU8 timing_byte[12]; //0x05-0x11: established timings III +}EDID_EST_TIMINGS_III; + +typedef struct _tagDETAILEDTIMINGDESCRIPTOR +{ + NvU16 wDTPixelClock; // 0x00 + NvU8 bDTHorizontalActive; // 0x02 + NvU8 bDTHorizontalBlanking; // 0x03 + NvU8 bDTHorizActiveBlank; // 0x04 + NvU8 bDTVerticalActive; // 0x05 + NvU8 bDTVerticalBlanking; // 0x06 + NvU8 bDTVertActiveBlank; // 0x07 + NvU8 bDTHorizontalSync; // 0x08 + NvU8 bDTHorizontalSyncWidth; // 0x09 + NvU8 bDTVerticalSync; // 0x0A + NvU8 bDTHorizVertSyncOverFlow; // 0x0B + NvU8 bDTHorizontalImage; // 0x0C + NvU8 bDTVerticalImage; // 0x0D + NvU8 bDTHorizVertImage; // 0x0E + NvU8 bDTHorizontalBorder; // 0x0F + NvU8 bDTVerticalBorder; // 0x10 + NvU8 bDTFlags; // 0x11 +}DETAILEDTIMINGDESCRIPTOR; + +// EDID 1.x basic block template +typedef struct _tagEDIDV1STRUC +{ + NvU8 bHeader[8]; // 0x00-0x07 + NvU16 wIDManufName; // 0x08 + NvU16 wIDProductCode; // 0x0A + NvU32 dwIDSerialNumber; // 0x0C + NvU8 bWeekManuf; // 0x10 + NvU8 bYearManuf; // 0x11 + NvU8 bVersionNumber; // 0x12 + NvU8 bRevisionNumber; // 0x13 + NvU8 bVideoInputDef; // 0x14 + NvU8 bMaxHorizImageSize; // 0x15 + NvU8 bMaxVertImageSize; // 0x16 + NvU8 bDisplayXferChar; // 0x17 + NvU8 bFeatureSupport; // 0x18 + NvU8 Chromaticity[10]; // 0x19-0x22 + NvU8 bEstablishedTimings1; // 0x23 + NvU8 bEstablishedTimings2; // 0x24 + NvU8 bManufReservedTimings; // 0x25 + NvU16 wStandardTimingID[8]; // 0x26 + DETAILEDTIMINGDESCRIPTOR DetailedTimingDesc[4]; // 0x36 + NvU8 bExtensionFlag; // 0x7E + NvU8 bChecksum; // 0x7F +}EDIDV1STRUC; + +// EDID 2.x basic block template +typedef struct _tagEDIDV2STRUC +{ + NvU8 bHeader; // 0x00 + NvU16 wIDManufName; // 0x01 + NvU16 wIDProductCode; // 0x03 + NvU8 bWeekManuf; // 0x05 + NvU16 wYearManuf; // 0x06 + NvU8 bProductIDString[32]; // 0x08 + NvU8 bSerialNumber[16]; // 0x28 + NvU8 bReserved1[8]; // 0x38 + NvU8 bPhysicalInterfaceType; // 0x40 + NvU8 bVideoInterfaceType; // 0x41 + NvU8 bInterfaceDataFormat[8]; // 0x42 + NvU8 bInterfaceColor[5]; // 0x4A + NvU8 bDisplayTechType; // 0x4F + NvU8 bMajorDisplayChar; // 0x50 + NvU8 bFeaturesSupported[3]; // 0x51 + NvU16 wDisplayResponseTime; // 0x54 + NvU32 dwDisplayXferChar; // 0x56 + NvU32 dwMaxLuminance; // 0x5A + NvU8 bColorimetry[20]; // 0x5E + NvU16 wMaxHorizImageSize; // 0x72 + NvU16 wMaxVertImageSize; // 0x74 + NvU16 wMaxHorizAddressibility; // 0x76 + NvU16 wMaxVertAddressibility; // 0x78 + NvU8 bHorizPixelPitch; // 0x7A + NvU8 bVertPixelPitch; // 0x7B + NvU8 bReserved2; // 0x7C + NvU8 bGTFSupportInfo; // 0x7D + NvU16 wTimingInfoMap; // 0x7E + NvU8 bTableDescriptors[127]; // 0x80 + NvU8 bChecksum; // 0xFF +}EDIDV2STRUC; + +// EDID CEA/EIA-861 extension block template +typedef struct _tagEIA861EXTENSION +{ + NvU8 tag; // 0x00 + NvU8 revision; // 0x01 + NvU8 offset; // 0x02 + NvU8 misc; // 0x03 + NvU8 data[NVT_CEA861_MAX_PAYLOAD]; // 0x04 - 0x7E + NvU8 checksum; // 0x7F +}EIA861EXTENSION; + + +typedef struct _tagVTBEXTENSION +{ + NvU8 tag; // 0x00 + NvU8 revision; // 0x01 + NvU8 num_detailed; // 0x02 + NvU8 num_cvt; // 0x03 + NvU8 num_standard; // 0x04 + NvU8 data[NVT_VTB_MAX_PAYLOAD]; // 0x05 - 0x7E + NvU8 checksum; +}VTBEXTENSION; + +// video signal interface mask +#define NVT_PVT_EDID_INPUT_ISDIGITAL_MASK 0x80 // 0==analog +#define NVT_PVT_EDID_INPUT_ISDIGITAL_SHIFT 7 +#define NVT_PVT_EDID_INPUT_ANALOG_ETC_MASK 0x7F +#define NVT_PVT_EDID_INPUT_ANALOG_ETC_SHIFT 0 + +#define NVT_PVT_EDID_INPUT_INTERFACE_MASK 0x0F +#define NVT_PVT_EDID_INPUT_INTERFACE_SHIFT 0 + +#define NVT_PVT_EDID_INPUT_BPC_MASK 0x70 +#define NVT_PVT_EDID_INPUT_BPC_SHIFT 4 +#define NVT_PVT_EDID_INPUT_BPC_UNDEF 0x00 +#define NVT_PVT_EDID_INPUT_BPC_6 0x01 +#define NVT_PVT_EDID_INPUT_BPC_8 0x02 +#define NVT_PVT_EDID_INPUT_BPC_10 0x03 +#define NVT_PVT_EDID_INPUT_BPC_12 0x04 +#define NVT_PVT_EDID_INPUT_BPC_14 0x05 +#define NVT_PVT_EDID_INPUT_BPC_16 0x06 + +// color characteristic +#define NVT_PVT_EDID_CC_RED_X1_X0_MASK 0xC0 +#define NVT_PVT_EDID_CC_RED_X1_X0_SHIFT 6 +#define NVT_PVT_EDID_CC_RED_Y1_Y0_MASK 0x30 +#define NVT_PVT_EDID_CC_RED_Y1_Y0_SHIFT 4 + +#define NVT_PVT_EDID_CC_GREEN_X1_X0_MASK 0x0C +#define NVT_PVT_EDID_CC_GREEN_X1_X0_SHIFT 2 +#define NVT_PVT_EDID_CC_GREEN_Y1_Y0_MASK 0x03 +#define NVT_PVT_EDID_CC_GREEN_Y1_Y0_SHIFT 0 + +#define NVT_PVT_EDID_CC_BLUE_X1_X0_MASK 0xC0 +#define NVT_PVT_EDID_CC_BLUE_X1_X0_SHIFT 6 +#define NVT_PVT_EDID_CC_BLUE_Y1_Y0_MASK 0x30 +#define NVT_PVT_EDID_CC_BLUE_Y1_Y0_SHIFT 4 + +#define NVT_PVT_EDID_CC_WHITE_X1_X0_MASK 0x0C +#define NVT_PVT_EDID_CC_WHITE_X1_X0_SHIFT 2 +#define NVT_PVT_EDID_CC_WHITE_Y1_Y0_MASK 0x03 +#define NVT_PVT_EDID_CC_WHITE_Y1_Y0_SHIFT 0 + +#endif // __EDID_H_ diff --git a/src/common/modeset/timing/nvt_cvt.c b/src/common/modeset/timing/nvt_cvt.c new file mode 100644 index 000000000..0222ceb7a --- /dev/null +++ b/src/common/modeset/timing/nvt_cvt.c @@ -0,0 +1,431 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_cvt.c +// +// Purpose: calculate CVT/CVT-RB timing +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +const NvU32 NVT_MAX_NVU32= (NvU32)(-1); + +const NvU32 NVT_CVT_CELL_GRAN=8; +const NvU32 NVT_CVT_MIN_VSYNCBP=11; // in 550us (!!) [1000000:550 = 20000:11] +const NvU32 NVT_CVT_V_PORCH=3; // in pixels +const NvU32 NVT_CVT_C_PRIME=30; // value of (C' * 10) +const NvU32 NVT_CVT_M_PRIME_D_20=15; // value of (M' / 100) +const NvU32 NVT_CVT_CLOCK_STEP=25; // Pclk step, in 10kHz +const NvU32 NVT_CVT_H_SYNC_PER=8; // HSYNC percentage (8%) + +const NvU32 NVT_CVT_RB_HBLANK_CELLS=20; // 160 fixed hblank for RB +const NvU32 NVT_CVT_RB_HFPORCH_CELLS=6; // 48 fixed hfporch for RB +const NvU32 NVT_CVT_RB_HSYNCW_CELLS=4; // 32 fixed hsyncwidth for RB +const NvU32 NVT_CVT_RB_MIN_VBLANK=23; // 460 lines (or 460 us?) [1000000:460 = 50000:23] +const NvU32 NVT_CVT_MIN_V_BPORCH=6; + + +// VESA CVT spec ver1.2: +// +// Page 24 : Table 5-4 : Delta between Original Reduced Blank Timing and Reduced Blanking Timing V2 +#define NVT_CVT_RB2_CLOCK_STEP_KHZ 1 +#define NVT_CVT_RB2_H_BLANK_PIXELS 80 +#define NVT_CVT_RB2_H_SYNC_PIXELS 32 +#define NVT_CVT_RB2_MIN_VBLANK_MICROSEC 460 +#define NVT_CVT_RB2_V_FPORCH_MIN 1 +#define NVT_CVT_RB2_V_BPORCH 6 +// Page 16 : Table 3-2 : Vertical Sync Duration +#define NVT_CVT_RB2_V_SYNC_WIDTH 8 +// Page 22: RB_MIN_VBI = RB_V_FPORCH + V_SYNC_RND + MIN_V_BPORCH +#define NVT_CVT_RB2_MIN_VBI NVT_CVT_RB2_V_SYNC_WIDTH + NVT_CVT_RB2_V_FPORCH_MIN + NVT_CVT_RB2_V_BPORCH + +// Page 15 : The Horizontal Sync Pulse duration will in all cases be 32 pixel clocks in duration, with the position +// set so that the trailing edge of the Horizontal Sync Pulse is located in the center of the Horizontal +// Blanking period.This implies that for a fixed blank of 80 pixel clocks, the Horizontal Back Porch is +// fixed to(80 / 2) 40 pixel clocks and the Horizontal Front Porch is fixed to(80 - 40 - 32) = 8 clock cycles. +#define NVT_CVT_RB2_H_FPORCH 8 +#define NVT_CVT_RB2_H_BPORCH 40 + +CODE_SEGMENT(PAGE_DD_CODE) +static NvU16 getCVTVSync(NvU32 XRes, NvU32 YRes) +{ + // 4:3 modes + if(XRes * 3 == YRes * 4) + return 4; + + // 16:9 modes + //if((XRes * 9 == YRes * 16) || + // (XRes == 848 && YRes == 480) || // 53:30 = 1.76666 + // (XRes == 1064 && YRes == 600) || // 133:75 = 1.77333 + // (XRes == 1360 && YRes == 768) || // 85:48 = 1.77083 + // (XRes == 1704 && YRes == 960) || // 71:40 = 1.775 + // (XRes == 1864 && YRes == 1050) || // 832:525 = 1.77523809 + // (XRes == 2128 && YRes == 1200) || // 133:75 + // (XRes == 2728 && YRes == 1536) || // 341:192 = 1.7760416 + // (XRes == 3408 && YRes == 1920) || // 71:40 + // (XRes == 4264 && YRes == 2400)) // 533:300 = 1.77666 + // return 5; + // NOTE: Because 16:9 modes are really a collection of mode of + // aspect ratio between 16:9 and 53:30, we will include + // all generic mode within this aspect ration range + if((XRes * 9 <= YRes * 16) && (XRes * 30 >= YRes * 53)) + return 5; + + // 16:10 modes + if((XRes * 5 == YRes * 8) || + (XRes == 1224 && YRes == 768) || + (XRes == 2456 && YRes == 1536)) + return 6; + + // Special 1280 modes + if((XRes == 1280 && YRes == 1024) || + (XRes == 1280 && YRes == 768)) + return 7; + + // Failure value, for identification + return 10; +} + + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NvU32 dwXCells, dwVSyncBP, dwHBlankCells, dwPClk, dwHSyncCells, dwVSyncWidth; + + NvU32 dwHPeriodEstimate_NUM, dwHPeroidEstimate_DEN; + NvU32 dwIdealDutyCycle_NUM, dwIdealDutyCycle_DEN; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if (width < 300 || height < 200 || rr < 10) + return NVT_STATUS_ERR;//return NVT_STATUS_ERR_BACKOFF | NVT_STATUS_ERR_OUTOFRANGE; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + + pT->etc.status = NVT_STATUS_CVT; + if ((width % NVT_CVT_CELL_GRAN)!=0) + { + width = (width + NVT_CVT_CELL_GRAN / 2) / NVT_CVT_CELL_GRAN * NVT_CVT_CELL_GRAN; + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_ALIGNMENT); + } + + // Calculate timing + dwXCells = width / NVT_CVT_CELL_GRAN; // Convert to number of cells + dwVSyncWidth = getCVTVSync(dwXCells * NVT_CVT_CELL_GRAN, height); + + dwHPeriodEstimate_NUM = 20000 - NVT_CVT_MIN_VSYNCBP * rr; + dwHPeroidEstimate_DEN = rr * (height + NVT_CVT_V_PORCH); + + dwVSyncBP = NVT_CVT_MIN_VSYNCBP * dwHPeroidEstimate_DEN / dwHPeriodEstimate_NUM +1; + if(dwVSyncBP < dwVSyncWidth + NVT_CVT_MIN_V_BPORCH) + dwVSyncBP = dwVSyncWidth + NVT_CVT_MIN_V_BPORCH; + + // Check for overflow + //DBG_ASSERT(NVT_MAX_NVU32 / NVT_CVT_C_PRIME > dwHPeroidEstimate_DEN); + + dwIdealDutyCycle_DEN = dwHPeroidEstimate_DEN; + dwIdealDutyCycle_NUM = NVT_CVT_C_PRIME * dwHPeroidEstimate_DEN - NVT_CVT_M_PRIME_D_20 * dwHPeriodEstimate_NUM; + + if (dwIdealDutyCycle_NUM < dwIdealDutyCycle_DEN * 20) + { + dwIdealDutyCycle_NUM=20; + dwIdealDutyCycle_DEN=1; + } + + // Check for overflow + if (NVT_MAX_NVU32 / dwXCells <= dwIdealDutyCycle_NUM) + { + dwIdealDutyCycle_NUM /= 10; + dwIdealDutyCycle_DEN /= 10; + } + + dwHBlankCells = ((dwXCells * dwIdealDutyCycle_NUM)/(200*dwIdealDutyCycle_DEN - 2*dwIdealDutyCycle_NUM))*2; + + // Check for overflow + //DBG_ASSERT(MAX_NVU32 / dwHPeroidEstimate_DEN > (dwXCells + dwHBlankCells)*CVT_CELL_GRAN); + dwPClk = ((dwXCells + dwHBlankCells) * NVT_CVT_CELL_GRAN * dwHPeroidEstimate_DEN * 2 / dwHPeriodEstimate_NUM / NVT_CVT_CLOCK_STEP) * NVT_CVT_CLOCK_STEP; + + dwHSyncCells = (dwXCells + dwHBlankCells) * NVT_CVT_H_SYNC_PER / 100; + + + pT->HVisible = (NvU16)(dwXCells * NVT_CVT_CELL_GRAN); + pT->VVisible = (NvU16)height; + + pT->HTotal = (NvU16)((dwXCells + dwHBlankCells) * NVT_CVT_CELL_GRAN); + pT->HFrontPorch = (NvU16)((dwHBlankCells/2 - dwHSyncCells) * NVT_CVT_CELL_GRAN); + pT->HSyncWidth = (NvU16)(dwHSyncCells * NVT_CVT_CELL_GRAN); + + pT->VTotal = (NvU16)(height + dwVSyncBP + NVT_CVT_V_PORCH); + pT->VFrontPorch = (NvU16)(NVT_CVT_V_PORCH); + pT->VSyncWidth = getCVTVSync(dwXCells * NVT_CVT_CELL_GRAN, height); + + pT->pclk = dwPClk; + + pT->HSyncPol = NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + + // Clear unused fields + pT->HBorder = pT->VBorder = 0; + pT->interlaced = NVT_PROGRESSIVE; + + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + + // interlaced adjustment + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + { + if ((pT->VTotal & 0x1) != 0) + pT->interlaced = NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2; + else + pT->interlaced = NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2; + + pT->pclk >>= 1; + pT->VTotal >>= 1; + pT->VVisible = (pT->VVisible + 1) / 2; + } + pT->etc.rgb444.bpc.bpc8 = 1; + + return NVT_STATUS_SUCCESS; +} + +// CVT-RB timing calculation +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NvU32 dwXCells, dwPClk, dwVBILines, dwVSyncWidth; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if (width < 300 || height < 200 || rr < 10) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_BACKOFF | NVT_STATUS_ERR_OUTOFRANGE; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + + pT->etc.status = NVT_STATUS_CVT_RB; + if ((width % NVT_CVT_CELL_GRAN)!=0) + { + width = (width + NVT_CVT_CELL_GRAN / 2) / NVT_CVT_CELL_GRAN * NVT_CVT_CELL_GRAN; + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_ALIGNMENT); + } + + // Calculate timing + dwXCells = width / NVT_CVT_CELL_GRAN; // Convert to number of cells + dwVSyncWidth = getCVTVSync(dwXCells * NVT_CVT_CELL_GRAN, height); + + dwVBILines = (NVT_CVT_RB_MIN_VBLANK * height * rr) / (50000 - NVT_CVT_RB_MIN_VBLANK * rr) + 1; + + if(dwVBILines < NVT_CVT_V_PORCH + dwVSyncWidth + NVT_CVT_MIN_V_BPORCH) + dwVBILines = NVT_CVT_V_PORCH + dwVSyncWidth + NVT_CVT_MIN_V_BPORCH; + + dwPClk = rr * (height + dwVBILines) * (dwXCells + NVT_CVT_RB_HBLANK_CELLS) / (10000 / NVT_CVT_CELL_GRAN) / NVT_CVT_CLOCK_STEP; + dwPClk *= NVT_CVT_CLOCK_STEP; + + pT->HVisible = (NvU16)(dwXCells * NVT_CVT_CELL_GRAN); + pT->VVisible = (NvU16)height; + + pT->HTotal = (NvU16)((dwXCells + NVT_CVT_RB_HBLANK_CELLS) * NVT_CVT_CELL_GRAN); + pT->HFrontPorch = (NvU16)(NVT_CVT_RB_HFPORCH_CELLS * NVT_CVT_CELL_GRAN); + pT->HSyncWidth = (NvU16)(NVT_CVT_RB_HSYNCW_CELLS * NVT_CVT_CELL_GRAN); + + pT->VTotal = (NvU16)(height + dwVBILines); + pT->VFrontPorch = (NvU16)(NVT_CVT_V_PORCH); + pT->VSyncWidth = (NvU16)dwVSyncWidth; + + pT->pclk = dwPClk; + + pT->HSyncPol = NVT_H_SYNC_POSITIVE; + pT->VSyncPol = NVT_V_SYNC_NEGATIVE; + + // Clear unused fields + pT->HBorder = pT->VBorder = 0; + pT->interlaced = 0; + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT-RB:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + + // interlaced adjustment + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + { + if ((pT->VTotal & 0x1) != 0) + pT->interlaced = NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2; + else + pT->interlaced = NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2; + + pT->pclk >>= 1; + pT->VTotal >>= 1; + pT->VVisible = (pT->VVisible + 1) / 2; + } + + return NVT_STATUS_SUCCESS; +} + +// CVT-RB2 timing calculation +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCVT_RB2(NvU32 width, NvU32 height, NvU32 rr, NvBool is1000div1001, NVT_TIMING *pT) +{ + NvU32 vbi, act_vbi_lines, total_v_lines, total_pixels, act_pixel_freq_khz; + + // parameter check + if (pT == NULL || width == 0 || height == 0 || rr == 0) + return NVT_STATUS_ERR; + + // Check for valid input parameter + if (width < 300 || height < 200 || rr < 10) + return NVT_STATUS_ERR; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + pT->etc.status = NVT_STATUS_CVT_RB_2; + + // CVT spec1.2 - page 21 : 5.4 Computation of Reduced Blanking Timing Parameters + // 8. Estimate the Horizontal Period (kHz): + // H_PERIOD_EST = ((1000000 / (V_FIELD_RATE_RQD)) - RB_MIN_V_BLANK) / (V_LINES_RND + + // TOP_MARGIN + BOT_MARGIN) + // h_period_est = (1000000 / rr - NVT_CVT_RB2_MIN_VBLANK) / height; + + // 9. Determine the number of lines in the vertical blanking interval : + // VBI_LINES = ROUNDDOWN(RB_MIN_V_BLANK / H_PERIOD_EST, 0) + 1 + // vbi = NVT_CVT_RB2_MIN_VBLANK / h_period_est + 1; + + // combining step 8, 9, + vbi = height * NVT_CVT_RB2_MIN_VBLANK_MICROSEC * rr / (1000000 - NVT_CVT_RB2_MIN_VBLANK_MICROSEC * rr) + 1; + + // 10. Check Vertical Blanking is Sufficient : + // RB_MIN_VBI = RB_V_FPORCH + V_SYNC_RND + MIN_V_BPORCH + // ACT_VBI_LINES = IF(VBI_LINES < RB_MIN_VBI, RB_MIN_VBI, VBI_LINES) + act_vbi_lines = MAX(vbi, NVT_CVT_RB2_MIN_VBI); + + // 11. Find total number of vertical lines : + // TOTAL_V_LINES = ACT_VBI_LINES + V_LINES_RND + TOP_MARGIN + BOT_MARGIN + // + INTERLACE + total_v_lines = act_vbi_lines + height; //+0.5 if interlaced + + // 12. Find total number of pixel clocks per line : + // TOTAL_PIXELS = RB_H_BLANK + TOTAL_ACTIVE_PIXELS + total_pixels = NVT_CVT_RB2_H_BLANK_PIXELS + width; + + // sanity check just in case of bad edid where the timing value could exceed the limit of NVT_TIMING structure which unfortunately is defined in NvU16 + if (total_pixels > (NvU16)-1 || total_v_lines > (NvU16)-1) + return NVT_STATUS_INVALID_PARAMETER; + + // 13. Calculate Pixel Clock Frequency to nearest CLOCK_STEP MHz : + // ACT_PIXEL_FREQ = CLOCK_STEP * ROUNDDOWN((V_FIELD_RATE_RQD * TOTAL_V_LINES * + // TOTAL_PIXELS / 1000000 * REFRESH_MULTIPLIER) / CLOCK_STEP, 0) + if (is1000div1001) + act_pixel_freq_khz = NVT_CVT_RB2_CLOCK_STEP_KHZ * (rr * total_v_lines * total_pixels / 1001 / NVT_CVT_RB2_CLOCK_STEP_KHZ); + else + act_pixel_freq_khz = NVT_CVT_RB2_CLOCK_STEP_KHZ * (rr * total_v_lines * total_pixels / 1000 / NVT_CVT_RB2_CLOCK_STEP_KHZ); + + // 14. Find actual Horizontal Frequency(kHz) : + // ACT_H_FREQ = 1000 * ACT_PIXEL_FREQ / TOTAL_PIXELS + // 15. Find Actual Field Rate(Hz) : + // ACT_FIELD_RATE = 1000 * ACT_H_FREQ / TOTAL_V_LINES + // 16. Find actual Vertical Refresh Rate(Hz) : + // ACT_FRAME_RATE = IF(INT_RQD ? = "y", ACT_FIELD_RATE / 2, ACT_FI + + // fill in the essential timing info for output + pT->HVisible = (NvU16)width; + pT->HTotal = (NvU16)(total_pixels); + pT->HFrontPorch = NVT_CVT_RB2_H_FPORCH; + pT->HSyncWidth = NVT_CVT_RB2_H_SYNC_PIXELS; + pT->VVisible = (NvU16)height; + pT->VTotal = (NvU16)total_v_lines; + pT->VSyncWidth = NVT_CVT_RB2_V_SYNC_WIDTH; + pT->VFrontPorch = (NvU16)(act_vbi_lines - NVT_CVT_RB2_V_SYNC_WIDTH - NVT_CVT_RB2_V_BPORCH); + pT->pclk = (act_pixel_freq_khz + 5) / 10; //convert to 10Khz + pT->HSyncPol = NVT_H_SYNC_POSITIVE; + pT->VSyncPol = NVT_V_SYNC_NEGATIVE; + pT->HBorder = pT->VBorder = 0; // not supported + pT->interlaced = 0; // not supported yet + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000 * (NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + NVT_SNPRINTF((char *)pT->etc.name, 40, "CVT-RB2:%dx%dx%dHz", width, height, rr); + pT->etc.name[39] = '\0'; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool NvTiming_IsTimingCVTRB(const NVT_TIMING *pTiming) +{ + // Check from the Timing Type + NvU32 reducedType = 0; + reducedType = NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status); + + if (reducedType == NVT_TYPE_CVT_RB || reducedType == NVT_TYPE_CVT_RB_2) + { + return NV_TRUE; + } + + // Manually Check for RB 1 and 2 + // RB1 - HBlank = 160, and HSync = 32, HFrontPorch = 48, HBackPorch = 80 + if (((pTiming->HTotal - pTiming->HVisible) == 160) && (pTiming->HSyncWidth == 32) && (pTiming->HFrontPorch == 48)) + { + return NV_TRUE; + } + + // RB2 - HBlank = 80, HSync = 32, HFrontPorch = 8, HBackPorch = 40 + if (((pTiming->HTotal - pTiming->HVisible) == 80) && (pTiming->HSyncWidth == 32) && (pTiming->HFrontPorch == 8)) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_displayid20.c b/src/common/modeset/timing/nvt_displayid20.c new file mode 100644 index 000000000..234ca64fb --- /dev/null +++ b/src/common/modeset/timing/nvt_displayid20.c @@ -0,0 +1,1747 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_displayid20.c +// +// Purpose: the provide displayID 2.0 related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming.h" +#include "nvtiming_pvt.h" +#include "displayid20.h" + +PUSH_SEGMENTS + +// DisplayID20 Entry point functions +static NVT_STATUS parseDisplayId20BaseSection(const DISPLAYID_2_0_SECTION *pSection, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +static NVT_STATUS parseDisplayId20SectionDataBlocks(const DISPLAYID_2_0_SECTION *pSection, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +static NVT_STATUS parseDisplayId20ExtensionSection(const DISPLAYID_2_0_SECTION *pSection, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); + +// DisplayID20 Data Block Tag Alloction +static NVT_STATUS parseDisplayId20ProductIdentity(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_PRODUCT_IDENTITY *pProductIdentity); // 0x20 Product Identificaton Block Tag +static NVT_STATUS parseDisplayId20DisplayParam(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_DISPLAY_PARAMETERS *pDisplayParam); // 0x21 Display Parameters +static NVT_STATUS parseDisplayId20Timing7(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x22 Type VII Timing - Detailed Timing +static NVT_STATUS parseDisplayId20Timing8(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x23 Type VIII Timing - Enumerated Timing +static NVT_STATUS parseDisplayId20Timing9(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x24 Type IX Timing - Formula-based +static NVT_STATUS parseDisplayId20Timing10(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x24 Type X Timing - Formula-based RR up to 1024Hz +static NVT_STATUS parseDisplayId20RangeLimit(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_RANGE_LIMITS *pRangeLimits); // 0x25 Dynamic Video Timing Range Limits +static NVT_STATUS parseDisplayId20DisplayInterfaceFeatures(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_INTERFACE_FEATURES *pInterfaceFeatures); // 0x26 Display Interface Features +static NVT_STATUS parseDisplayId20Stereo(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x27 Stereo Display Interface +static NVT_STATUS parseDisplayId20TiledDisplay(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY *pTileTopo); // 0x28 Tiled Display Topology +static NVT_STATUS parseDisplayId20ContainerId(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_CONTAINERID *pContainerId); // 0x29 ContainerID +static NVT_STATUS parseDisplayId20VendorSpecific(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_VENDOR_SPECIFIC *pVendorSpecific); // 0x7E Vendor-specific +static NVT_STATUS parseDisplayId20CtaData(const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); // 0x81 CTA DisplayID + +// Helper function +static NVT_STATUS getPrimaryUseCase(NvU8 product_type, NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE *primary_use_case); +static NVT_STATUS parseDisplayId20Timing7Descriptor(const DISPLAYID_2_0_TIMING_7_DESCRIPTOR *pDescriptor, NVT_TIMING *pTiming, NvU8 revision, NvU8 count); +static NVT_STATUS parseDisplayId20Timing9Descriptor(const DISPLAYID_2_0_TIMING_9_DESCRIPTOR *pDescriptor, NVT_TIMING *pTiming, NvU8 count); +static NVT_STATUS parseDisplayId20Timing10Descriptor(const void *pDescriptor, NVT_TIMING *pTiming, NvU8 payloadbytes, NvU8 count); +static NvU32 greatestCommonDenominator(NvU32 x, NvU32 y); +static NvU8 getExistedTimingSeqNumber(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, enum NVT_TIMING_TYPE); + +/* + * The Second-generation version of VESA DisplayID Standard + * DisplayID v2.0 + * + * @brief Parses a displayID20 section + * + * @param pDisplayId The DisplayId20 Section Block () + * @param length Size of the displayId section Block + * @param pDisplayIdInfo Need to parse the raw data to store as NV structure + * + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NV_STDCALL +NvTiming_parseDisplayId20Info( + const NvU8 *pDisplayId, + NvU32 length, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_SECTION *pSection = NULL; + NvU32 offset = 0; + NvU32 extensionIndex = 0; + NvU32 idx = 0; + + // parameter check + if ((pDisplayId == NULL) || + (pDisplayIdInfo == NULL)) + { + return NVT_STATUS_ERR; + } + + pSection = (const DISPLAYID_2_0_SECTION *)pDisplayId; + + if ((pSection->header.version < DISPLAYID_2_0_VERSION) || + (DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header) > length)) + { + return NVT_STATUS_ERR; + } + + NVMISC_MEMSET(pDisplayIdInfo, 0, sizeof(NVT_DISPLAYID_2_0_INFO)); + + status = parseDisplayId20BaseSection(pSection, pDisplayIdInfo); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + pDisplayIdInfo->extension_count = pSection->header.extension_count; + for (extensionIndex = 0; extensionIndex < pDisplayIdInfo->extension_count; extensionIndex++) + { + // Get offset to the next section. + offset += DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header); + + // validate the next section buffer is valid + pSection = (const DISPLAYID_2_0_SECTION *)(pDisplayId + offset); + if ((offset + DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header)) > length) + { + return NVT_STATUS_ERR; + } + + // process the section + status = parseDisplayId20ExtensionSection(pSection, pDisplayIdInfo); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + } + + for (idx = 0; idx < pDisplayIdInfo->total_timings; idx++) + { + updateColorFormatForDisplayId20Timings(pDisplayIdInfo, idx); + } + + return status; +} + +NvU32 NvTiming_DisplayID2ValidationMask( + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info, + NvBool bIsStrongValidation) +{ + NvU32 j; + NvU32 ret = 0; + + // check the DisplayId2 version and signature + if (pDisplayId20Info->version != 0x2) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_VERSION); + } + + if (!pDisplayId20Info->valid_data_blocks.product_id_present) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_PRODUCT_ID); + } + + if (pDisplayId20Info->primary_use_case >= PRODUCT_PRIMARY_USE_GENERIC_DISPLAY && + pDisplayId20Info->primary_use_case <= PRODUCT_PRIMARY_USE_HEAD_MOUNT_AUGMENTED_REALITY) + { + if (!(pDisplayId20Info->valid_data_blocks.parameters_present && + pDisplayId20Info->valid_data_blocks.interface_feature_present && + pDisplayId20Info->valid_data_blocks.type7Timing_present && + pDisplayId20Info->total_timings)) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_NO_DATA_BLOCK); + } + } + + // Strong validation to follow + if (bIsStrongValidation == NV_TRUE) + { + // TODO : For each of the Data Block limitation + // Type 7 Timings data block + for (j = 0; j <= pDisplayId20Info->total_timings; j++) + { + if ( NVT_PREFERRED_TIMING_IS_DISPLAYID(pDisplayId20Info->timing[j].etc.flag) && + (pDisplayId20Info->display_param.h_pixels != 0) && + (pDisplayId20Info->display_param.v_pixels != 0)) + { + if ( pDisplayId20Info->timing[j].HVisible != pDisplayId20Info->display_param.h_pixels || + pDisplayId20Info->timing[j].VVisible != pDisplayId20Info->display_param.v_pixels ) + { + ret |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_NO_DATA_BLOCK); + break; + } + } + } + // TODO : go on the next data block validation if it existed. + // TODO : validate extension blocks + } + + return ret; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +NvTiming_DisplayID2ValidationDataBlocks( + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + NvBool bIsStrongValidation) +{ + if (NvTiming_DisplayID2ValidationMask(pDisplayIdInfo, bIsStrongValidation) != 0) + { + return NVT_STATUS_ERR; + } + else + { + return NVT_STATUS_SUCCESS; + } +} + +// DisplayID20 Entry point functions +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20BaseSection( + const DISPLAYID_2_0_SECTION *pSection, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + // validate for section checksum before processing the data block + if (computeDisplayId20SectionCheckSum((const NvU8 *)pSection, DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header)) != 0) + { + status |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_CHECKSUM); + return status; + } + + pDisplayIdInfo->revision = pSection->header.revision; + pDisplayIdInfo->version = pSection->header.version; + + status = getPrimaryUseCase(pSection->header.product_type, + &pDisplayIdInfo->primary_use_case); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + status = parseDisplayId20SectionDataBlocks(pSection, pDisplayIdInfo); + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20ExtensionSection( + const DISPLAYID_2_0_SECTION *pSection, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + // validate for section checksum before processing the data block + if (computeDisplayId20SectionCheckSum((const NvU8 *)pSection, DISPLAYID_2_0_SECTION_SIZE_TOTAL(pSection->header)) != 0) + { + status |= NVT_DID2_VALIDATION_ERR_MASK(NVT_DID2_VALIDATION_ERR_CHECKSUM); + return status; + } + + nvt_assert(pSection->header.version >= DISPLAYID_2_0_VERSION); + nvt_assert(pSection->header.extension_count == 0); + nvt_assert(pSection->header.product_type == DISPLAYID_2_0_PROD_EXTENSION); + + status = parseDisplayId20SectionDataBlocks(pSection, pDisplayIdInfo); + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20SectionDataBlocks( + const DISPLAYID_2_0_SECTION *pSection, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NvU32 i = 0; + NvU32 offset = 0; + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock = NULL; + NVT_STATUS status = NVT_STATUS_SUCCESS; + + while (offset < pSection->header.section_bytes) + { + // Get current block + pDataBlock = (const DISPLAYID_2_0_DATA_BLOCK_HEADER *)(pSection->data + offset); + + // detected zero padding + if (pDataBlock->type == 0) + { + for (i = offset; i < pSection->header.section_bytes; i++) + { + // validate that all paddings are zeros + nvt_assert(pSection->data[i] == 0); + } + break; + } + + // check data block is valid. + if ((offset + DISPLAYID_2_0_DATA_BLOCK_SIZE_TOTAL(pDataBlock)) > pSection->header.section_bytes) + { + return NVT_STATUS_ERR; + } + + // parse the data block + status = parseDisplayId20DataBlock(pDataBlock, pDisplayIdInfo); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + switch (pDataBlock->type) + { + case DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY: + pDisplayIdInfo->valid_data_blocks.product_id_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM: + pDisplayIdInfo->valid_data_blocks.parameters_present = NV_TRUE; + if (pDisplayIdInfo->display_param.audio_speakers_integrated == AUDIO_SPEAKER_INTEGRATED_SUPPORTED) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_7: + pDisplayIdInfo->valid_data_blocks.type7Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_8: + pDisplayIdInfo->valid_data_blocks.type8Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_9: + pDisplayIdInfo->valid_data_blocks.type9Timing_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS: + pDisplayIdInfo->valid_data_blocks.dynamic_range_limit_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES: + pDisplayIdInfo->valid_data_blocks.interface_feature_present = NV_TRUE; + + // Supported - Color depth is supported for all supported timings. Supported timing includes all Display-ID exposed timings + // (that is timing exposed using DisplayID timing types and CTA VICs) + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayIdInfo->interface_features.yuv444.bpcs)) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_444; + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayIdInfo->interface_features.yuv422.bpcs)) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_422; + } + + if (pDisplayIdInfo->interface_features.audio_capability.support_48khz || + pDisplayIdInfo->interface_features.audio_capability.support_44_1khz || + pDisplayIdInfo->interface_features.audio_capability.support_32khz) + { + pDisplayIdInfo->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + break; + case DISPLAYID_2_0_BLOCK_TYPE_STEREO: + pDisplayIdInfo->valid_data_blocks.stereo_interface_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY: + pDisplayIdInfo->valid_data_blocks.tiled_display_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID: + pDisplayIdInfo->valid_data_blocks.container_id_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC: + pDisplayIdInfo->valid_data_blocks.vendor_specific_present = NV_TRUE; + break; + case DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA: + pDisplayIdInfo->valid_data_blocks.cta_data_present = NV_TRUE; + break; + default: + status = NVT_STATUS_ERR; + } + + // advance to the next block + offset += DISPLAYID_2_0_DATA_BLOCK_SIZE_TOTAL(pDataBlock); + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20DataBlock( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + switch (pDataBlock->type) + { + case DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY: + status = parseDisplayId20ProductIdentity(pDataBlock, &pDisplayIdInfo->product_identity); + break; + case DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM: + status = parseDisplayId20DisplayParam(pDataBlock, &pDisplayIdInfo->display_param); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_7: + status = parseDisplayId20Timing7(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_8: + status = parseDisplayId20Timing8(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_9: + status = parseDisplayId20Timing9(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TIMING_10: + status = parseDisplayId20Timing10(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS: + status = parseDisplayId20RangeLimit(pDataBlock, &pDisplayIdInfo->range_limits); + break; + case DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES: + status = parseDisplayId20DisplayInterfaceFeatures(pDataBlock, &pDisplayIdInfo->interface_features); + break; + case DISPLAYID_2_0_BLOCK_TYPE_STEREO: + status = parseDisplayId20Stereo(pDataBlock, pDisplayIdInfo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY: + status = parseDisplayId20TiledDisplay(pDataBlock, &pDisplayIdInfo->tile_topo); + break; + case DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID: + status = parseDisplayId20ContainerId(pDataBlock, &pDisplayIdInfo->container_id); + break; + case DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC: + status = parseDisplayId20VendorSpecific(pDataBlock, &pDisplayIdInfo->vendor_specific); + break; + case DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA: + status = parseDisplayId20CtaData(pDataBlock, pDisplayIdInfo); + break; + default: + status = NVT_STATUS_ERR; + } + return status; +} + +// All Data Blocks Parsing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20ProductIdentity( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_PRODUCT_IDENTITY *pProductIdentity) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK *pProductIdBlock = NULL; + + if (pDataBlock->type == DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY) + { + pProductIdBlock = (const DISPLAYID_2_0_PROD_IDENTIFICATION_BLOCK *)pDataBlock; + + pProductIdentity->vendor_id = (pProductIdBlock->vendor[0] << 16) | + (pProductIdBlock->vendor[1] << 8) | + (pProductIdBlock->vendor[2]); + pProductIdentity->product_id = (pProductIdBlock->product_code[0]) | + (pProductIdBlock->product_code[1] << 8); + pProductIdentity->serial_number = (pProductIdBlock->serial_number[0]) | + (pProductIdBlock->serial_number[1] << 8) | + (pProductIdBlock->serial_number[2] << 16) | + (pProductIdBlock->serial_number[3] << 24); + pProductIdentity->week = (pProductIdBlock->model_tag >= 1 && pProductIdBlock->model_tag <= 52) ? + pProductIdBlock->model_tag : 0; + pProductIdentity->year = (pProductIdBlock->model_tag == 0xFF) ? + pProductIdBlock->model_year : + pProductIdBlock->model_year + 2000; + + if (pProductIdBlock->product_name_string_size != 0) + { + NVMISC_STRNCPY((char *)pProductIdentity->product_string, + (const char *)pProductIdBlock->product_name_string, + pProductIdBlock->product_name_string_size); + } + pProductIdentity->product_string[pProductIdBlock->product_name_string_size] = '\0'; + } + else + { + return NVT_STATUS_ERR; + } + + return status; +} + + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20DisplayParam( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_DISPLAY_PARAMETERS *pDisplayParam) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_DISPLAY_PARAM_BLOCK *pDisplayParamBlock = NULL; + + if ((pDataBlock->type == DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM) && + (pDataBlock->data_bytes == DISPLAYID_2_0_DISPLAY_PARAM_BLOCK_PAYLOAD_LENGTH)) + { + pDisplayParamBlock = (const DISPLAYID_2_0_DISPLAY_PARAM_BLOCK *)pDataBlock; + + pDisplayParam->revision = pDisplayParamBlock->header.revision; + pDisplayParam->h_image_size_micro_meter = (pDisplayParamBlock->horizontal_image_size[1] << 8 | + pDisplayParamBlock->horizontal_image_size[0]) * + (pDisplayParamBlock->header.image_size_multiplier ? 1000 : 100); + pDisplayParam->v_image_size_micro_meter = (pDisplayParamBlock->vertical_image_size[1] << 8 | + pDisplayParamBlock->vertical_image_size[0]) * + (pDisplayParamBlock->header.image_size_multiplier ? 1000 : 100); + pDisplayParam->h_pixels = pDisplayParamBlock->horizontal_pixel_count[1] << 8 | + pDisplayParamBlock->horizontal_pixel_count[0]; + pDisplayParam->v_pixels = pDisplayParamBlock->vertical_pixel_count[1] << 8 | + pDisplayParamBlock->vertical_pixel_count[0]; + + pDisplayParam->scan_orientation = pDisplayParamBlock->feature.scan_orientation; + pDisplayParam->audio_speakers_integrated = pDisplayParamBlock->feature.audio_speaker_information ? AUDIO_SPEAKER_INTEGRATED_NOT_SUPPORTED : AUDIO_SPEAKER_INTEGRATED_SUPPORTED; + pDisplayParam->color_map_standard = pDisplayParamBlock->feature.color_information ? COLOR_MAP_CIE_1976 : COLOR_MAP_CIE_1931; + + // 12 bits Binary Fraction Representations + pDisplayParam->primaries[0].x = pDisplayParamBlock->primary_color_1_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->primary_color_1_chromaticity.color_x_bits_low; + pDisplayParam->primaries[0].y = pDisplayParamBlock->primary_color_1_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->primary_color_1_chromaticity.color_bits_mid.color_y_bits_low; + pDisplayParam->primaries[1].x = pDisplayParamBlock->primary_color_2_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->primary_color_2_chromaticity.color_x_bits_low; + pDisplayParam->primaries[1].y = pDisplayParamBlock->primary_color_2_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->primary_color_2_chromaticity.color_bits_mid.color_y_bits_low; + pDisplayParam->primaries[2].x = pDisplayParamBlock->primary_color_3_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->primary_color_3_chromaticity.color_x_bits_low; + pDisplayParam->primaries[2].y = pDisplayParamBlock->primary_color_3_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->primary_color_3_chromaticity.color_bits_mid.color_y_bits_low; + pDisplayParam->white.x = pDisplayParamBlock->white_point_chromaticity.color_bits_mid.color_x_bits_high << 8 | + pDisplayParamBlock->white_point_chromaticity.color_x_bits_low; + pDisplayParam->white.y = pDisplayParamBlock->white_point_chromaticity.color_y_bits_high << 4 | + pDisplayParamBlock->white_point_chromaticity.color_bits_mid.color_y_bits_low; + + // IEEE 754 half-precision binary floating-point format + pDisplayParam->native_max_luminance_full_coverage = pDisplayParamBlock->max_luminance_full_coverage[1] << 8 | + pDisplayParamBlock->max_luminance_full_coverage[0]; + pDisplayParam->native_max_luminance_1_percent_rect_coverage = pDisplayParamBlock->max_luminance_1_percent_rectangular_coverage[1] << 8 | + pDisplayParamBlock->max_luminance_1_percent_rectangular_coverage[0]; + pDisplayParam->native_min_luminance = pDisplayParamBlock->min_luminance[1] << 8 | + pDisplayParamBlock->min_luminance[0]; + + if (pDisplayParamBlock->feature.luminance_information == 0) + { + pDisplayParam->native_luminance_info = NATIVE_LUMINANCE_INFO_MIN_GURANTEE_VALUE; + } + else if (pDisplayParamBlock->feature.luminance_information == 1) + { + pDisplayParam->native_luminance_info = NATIVE_LUMINANCE_INFO_SOURCE_DEVICE_GUIDANCE; + } + else + { + return NVT_STATUS_ERR; + } + + UPDATE_BPC_FOR_COLORFORMAT(pDisplayParam->native_color_depth, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_6, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_8, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_10, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_12, + 0, + pDisplayParamBlock->color_depth_and_device_technology.color_depth == NATIVE_COLOR_BPC_16); + + pDisplayParam->device_technology = pDisplayParamBlock->color_depth_and_device_technology.device_technology; + if (pDisplayParam->revision == 1) + { + pDisplayParam->device_theme_Preference = pDisplayParamBlock->color_depth_and_device_technology.device_theme_preference; + } + pDisplayParam->gamma_x100 = (pDisplayParamBlock->gamma_EOTF + 100); + } + else + { + return NVT_STATUS_ERR; + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing7( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_7_BLOCK *pTiming7Block = NULL; + NvU32 descriptorCount = 0; + NvU8 revision = 0; + NvU8 i = 0; + NvU8 startSeqNumber = 0; + + NVT_TIMING newTiming; + + if (pDataBlock->type != DISPLAYID_2_0_BLOCK_TYPE_TIMING_7) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + pTiming7Block = (const DISPLAYID_2_0_TIMING_7_BLOCK *)pDataBlock; + + // Based on the DisplayID_2_0_E7 spec: + // the Future descriptor can be defined with more than 20 Byte per descriptor without creating a new timing type + if (pTiming7Block->header.payload_bytes_len == 0) + { + if (pDataBlock->data_bytes % sizeof(DISPLAYID_2_0_TIMING_7_DESCRIPTOR) != 0) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + descriptorCount = pDataBlock->data_bytes / (sizeof(DISPLAYID_2_0_TIMING_7_DESCRIPTOR) + pTiming7Block->header.payload_bytes_len); + + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_7_MAX_DESCRIPTORS) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_7); + + for (i = 0; i < descriptorCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + if (parseDisplayId20Timing7Descriptor(&pTiming7Block->descriptors[i], &newTiming, revision, startSeqNumber+i) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing8( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_8_BLOCK *pTiming8Block = NULL; + NVT_TIMING newTiming; + NvU8 codeType = DISPLAYID_2_0_TIMING_CODE_RSERVED; + NvU8 codeCount = 0; + NvU8 startSeqNumber = 0; + NvU8 i; + + + if (pDataBlock->type != DISPLAYID_2_0_BLOCK_TYPE_TIMING_8) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + pTiming8Block = (const DISPLAYID_2_0_TIMING_8_BLOCK *)pDataBlock; + + // 1-byte descriptor timing code + if (pTiming8Block->header.timing_code_size == DISPLAYID_2_0_TIMING_CODE_SIZE_1_BYTE) + { + if (pDataBlock->data_bytes % sizeof(DISPLAYID_2_0_TIMING_8_ONE_BYTE_CODE) != 0) + { + return NVT_STATUS_ERR; + } + + codeCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_8_ONE_BYTE_CODE); + if (codeCount < 1 || codeCount > DISPLAYID_2_0_TIMING_8_MAX_CODES) + { + return NVT_STATUS_ERR; + } + + codeType = pTiming8Block->header.timing_code_type; + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_8); + + for (i = 0; i < codeCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (codeType == DISPLAYID_2_0_TIMING_CODE_DMT) + { + if (NvTiming_EnumDMT((NvU32)(pTiming8Block->timing_code_1[i].timing_code), + &newTiming) != NVT_STATUS_SUCCESS) + { + break; + } + } + else if (codeType == DISPLAYID_2_0_TIMING_CODE_CTA_VIC) + { + if (NvTiming_EnumCEA861bTiming((NvU32)(pTiming8Block->timing_code_1[i].timing_code), + &newTiming) != NVT_STATUS_SUCCESS) + { + break; + } + } + else if (codeType == DISPLAYID_2_0_TIMING_CODE_HDMI_VIC) + { + if (NvTiming_EnumHdmiVsdbExtendedTiming((NvU32)(pTiming8Block->timing_code_1[i].timing_code), + &newTiming) != NVT_STATUS_SUCCESS) + { + break; + } + } + else + { + // RESERVED + break; + } + + newTiming.etc.flag |= ((pTiming8Block->header.revision >= 1) && pTiming8Block->header.is_support_yuv420) ? NVT_FLAG_DISPLAYID_2_0_EXPLICT_YUV420 : 0; + newTiming.etc.status = NVT_STATUS_DISPLAYID_8N(++startSeqNumber); + + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), "DID20-Type8:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status), + (int)newTiming.HVisible, (int)newTiming.VVisible, + (int)newTiming.etc.rrx1k/1000, (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name) - 1] = '\0'; + + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + } + else + { + nvt_assert(0); + // TODO : 2-byte descriptor timing code did not define yet in DID20. + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing9( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_9_BLOCK *pTiming9Block = NULL; + NVT_TIMING newTiming; + NvU32 descriptorCount = 0; + NvU8 startSeqNumber = 0; + NvU8 i = 0; + + if (pDataBlock->type != DISPLAYID_2_0_BLOCK_TYPE_TIMING_9) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + descriptorCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_9_DESCRIPTOR); + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_9_MAX_DESCRIPTORS) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + pTiming9Block = (const DISPLAYID_2_0_TIMING_9_BLOCK *)pDataBlock; + + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_9); + + for (i = 0; i < descriptorCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayId20Timing9Descriptor(&pTiming9Block->descriptors[i], &newTiming, startSeqNumber+i) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing10( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_10_BLOCK *pTiming10Block = NULL; + NvU32 descriptorCount = 0; + NvU8 startSeqNumber = 0; + NvU8 i = 0; + + NVT_TIMING newTiming; + + pTiming10Block = (const DISPLAYID_2_0_TIMING_10_BLOCK *)pDataBlock; + + if (pTiming10Block->header.type != DISPLAYID_2_0_BLOCK_TYPE_TIMING_10) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + + if (pTiming10Block->header.payload_bytes_len == DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_6) + { + descriptorCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR); + + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_10_MAX_6BYTES_DESCRIPTORS) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + } + else if (pTiming10Block->header.payload_bytes_len == DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_7) + { + descriptorCount = pDataBlock->data_bytes / sizeof(DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR); + + if (descriptorCount < 1 || descriptorCount > DISPLAYID_2_0_TIMING_10_MAX_7BYTES_DESCRIPTORS) + { + nvt_assert(0); + return NVT_STATUS_ERR; + } + } + + startSeqNumber = getExistedTimingSeqNumber(pDisplayIdInfo, NVT_TYPE_DISPLAYID_10); + + for (i = 0; i < descriptorCount; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NVT_STATUS_SUCCESS == parseDisplayId20Timing10Descriptor(&pTiming10Block->descriptors[i], &newTiming, pTiming10Block->header.payload_bytes_len, startSeqNumber+i)) + { + if (!assignNextAvailableDisplayId20Timing(pDisplayIdInfo, &newTiming)) + { + break; + } + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20RangeLimit( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_RANGE_LIMITS *pRangeLimits) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_RANGE_LIMITS_BLOCK *pRangeLimitsBlock = NULL; + + if ((pDataBlock->type == DISPLAYID_2_0_BLOCK_TYPE_RANGE_LIMITS) && + (pDataBlock->data_bytes == DISPLAYID_2_0_RANGE_LIMITS_BLOCK_PAYLOAD_LENGTH)) + { + pRangeLimitsBlock = (const DISPLAYID_2_0_RANGE_LIMITS_BLOCK *)pDataBlock; + + pRangeLimits->revision = pDataBlock->revision; + + pRangeLimits->pclk_min = (pRangeLimitsBlock->pixel_clock_min[2] << 16 | + pRangeLimitsBlock->pixel_clock_min[1] << 8 | + pRangeLimitsBlock->pixel_clock_min[0]) + 1; + pRangeLimits->pclk_max = (pRangeLimitsBlock->pixel_clock_max[2] << 16 | + pRangeLimitsBlock->pixel_clock_max[1] << 8 | + pRangeLimitsBlock->pixel_clock_max[0]) + 1; + pRangeLimits->vfreq_min = pRangeLimitsBlock->vertical_frequency_min; + if (pRangeLimits->revision == 1) + { + pRangeLimits->vfreq_max = pRangeLimitsBlock->dynamic_video_timing_range_support.vertical_frequency_max_9_8 << 8 | pRangeLimitsBlock->vertical_frequency_max_7_0; + } + else + { + pRangeLimits->vfreq_max = pRangeLimitsBlock->vertical_frequency_max_7_0; + } + + pRangeLimits->seamless_dynamic_video_timing_change = pRangeLimitsBlock->dynamic_video_timing_range_support.seamless_dynamic_video_timing_change; + } + else + { + return NVT_STATUS_ERR; + } + + return status; +} + +#define ADD_COLOR_SPACE_EOTF_COMBINATION(_pInterfaceFeatures, _color_space, _eotf) do { \ + (_pInterfaceFeatures)->colorspace_eotf_combination[(_pInterfaceFeatures)->combination_count].color_space = (_color_space); \ + (_pInterfaceFeatures)->colorspace_eotf_combination[(_pInterfaceFeatures)->combination_count].eotf = (_eotf); \ + (_pInterfaceFeatures)->combination_count++; \ + } while(0) + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20DisplayInterfaceFeatures( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_INTERFACE_FEATURES *pInterfaceFeatures) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NvU32 i = 0; + const DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK *pInterfaceFeaturesBlock = NULL; + + if (pDataBlock->type == DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES && + pDataBlock->data_bytes >= DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK_PAYLOAD_LENGTH_MIN) + { + pInterfaceFeaturesBlock = (const DISPLAYID_2_0_INTERFACE_FEATURES_BLOCK *)pDataBlock; + pInterfaceFeatures->revision = pDataBlock->revision; + + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->rgb444, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_6, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_rgb.bit_per_primary_16); + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->yuv444, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_6, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr444.bit_per_primary_16); + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->yuv422, + 0, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr422.bit_per_primary_16); + UPDATE_BPC_FOR_COLORFORMAT(pInterfaceFeatures->yuv420, + 0, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_8, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_10, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_12, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_14, + pInterfaceFeaturesBlock->interface_color_depth_ycbcr420.bit_per_primary_16); + + // * 74.25MP/s + pInterfaceFeatures->yuv420_min_pclk = pInterfaceFeaturesBlock->min_pixel_rate_ycbcr420 * + 7425; + + pInterfaceFeatures->audio_capability.support_48khz = + pInterfaceFeaturesBlock->audio_capability.sample_rate_48_khz; + pInterfaceFeatures->audio_capability.support_44_1khz = + pInterfaceFeaturesBlock->audio_capability.sample_rate_44_1_khz; + pInterfaceFeatures->audio_capability.support_32khz = + pInterfaceFeaturesBlock->audio_capability.sample_rate_32_khz; + + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_srgb_eotf_srgb) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_SRGB, + INTERFACE_EOTF_SRGB); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt601_eotf_bt601) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT601, + INTERFACE_EOTF_BT601); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt709_eotf_bt1886) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT709, + INTERFACE_EOTF_BT1886); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_adobe_rgb_eotf_adobe_rgb) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_ADOBE_RGB, + INTERFACE_EOTF_ADOBE_RGB); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_dci_p3_eotf_dci_p3) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_DCI_P3, + INTERFACE_EOTF_DCI_P3); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt2020_eotf_bt2020) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT2020, + INTERFACE_EOTF_BT2020); + } + if (pInterfaceFeaturesBlock->color_space_and_eotf_1.color_space_bt2020_eotf_smpte_st2084) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + INTERFACE_COLOR_SPACE_BT2020, + INTERFACE_EOTF_SMPTE_ST2084); + } + + for (i = 0; i < pInterfaceFeaturesBlock->additional_color_space_and_eotf_count.count; i++) + { + ADD_COLOR_SPACE_EOTF_COMBINATION(pInterfaceFeatures, + pInterfaceFeaturesBlock->additional_color_space_and_eotf[i].color_space, + pInterfaceFeaturesBlock->additional_color_space_and_eotf[i].eotf); + } + } + else + { + return NVT_STATUS_ERR; + } + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Stereo( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + // TODO: Implement the parsing here. + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20TiledDisplay( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY *pTileTopo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TILED_DISPLAY_BLOCK *pTiledDisplayBlock = NULL; + + if ((pDataBlock->type == DISPLAYID_2_0_BLOCK_TYPE_TILED_DISPLAY) && + (pDataBlock->data_bytes == DISPLAYID_2_0_TILED_DISPLAY_BLOCK_PAYLOAD_LENGTH)) + { + pTiledDisplayBlock = (const DISPLAYID_2_0_TILED_DISPLAY_BLOCK *)pDataBlock; + + pTileTopo->revision = pDataBlock->revision; + + pTileTopo->capability.bSingleEnclosure = pTiledDisplayBlock->capability.single_enclosure; + pTileTopo->capability.bHasBezelInfo = pTiledDisplayBlock->capability.has_bezel_info; + pTileTopo->capability.multi_tile_behavior = pTiledDisplayBlock->capability.multi_tile_behavior; + pTileTopo->capability.single_tile_behavior = pTiledDisplayBlock->capability.single_tile_behavior; + + pTileTopo->topology.row = ((pTiledDisplayBlock->topo_loc_high.row << 5) | + (pTiledDisplayBlock->topo_low.row)) + 1; + pTileTopo->topology.col = ((pTiledDisplayBlock->topo_loc_high.col << 5) | + (pTiledDisplayBlock->topo_low.col)) + 1; + pTileTopo->location.x = ((pTiledDisplayBlock->topo_loc_high.x << 5) | + (pTiledDisplayBlock->loc_low.x)); + pTileTopo->location.y = ((pTiledDisplayBlock->topo_loc_high.y << 5) | + (pTiledDisplayBlock->loc_low.y)); + + pTileTopo->native_resolution.width = ((pTiledDisplayBlock->native_resolution.width_high << 8) | + pTiledDisplayBlock->native_resolution.width_low) + 1; + pTileTopo->native_resolution.height = ((pTiledDisplayBlock->native_resolution.height_high << 8) | + pTiledDisplayBlock->native_resolution.height_low) + 1; + + pTileTopo->bezel_info.top = (pTiledDisplayBlock->bezel_info.top * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + pTileTopo->bezel_info.bottom = (pTiledDisplayBlock->bezel_info.bottom * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + pTileTopo->bezel_info.right = (pTiledDisplayBlock->bezel_info.right * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + pTileTopo->bezel_info.left = (pTiledDisplayBlock->bezel_info.left * + pTiledDisplayBlock->bezel_info.pixel_density) / 10; + + pTileTopo->tile_topology_id.vendor_id = pTiledDisplayBlock->topo_id.vendor_id[0] << 16 | + pTiledDisplayBlock->topo_id.vendor_id[1] << 8 | + pTiledDisplayBlock->topo_id.vendor_id[2]; + pTileTopo->tile_topology_id.product_id = pTiledDisplayBlock->topo_id.product_id[1] << 8 | + pTiledDisplayBlock->topo_id.product_id[0]; + pTileTopo->tile_topology_id.serial_number = pTiledDisplayBlock->topo_id.serial_number[3] << 24 | + pTiledDisplayBlock->topo_id.serial_number[2] << 16 | + pTiledDisplayBlock->topo_id.serial_number[1] << 8 | + pTiledDisplayBlock->topo_id.serial_number[0]; + } + else + { + return NVT_STATUS_ERR; + } + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20ContainerId( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_CONTAINERID *pContainerId) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_CONTAINERID_BLOCK *pContainerIdBlock = NULL; + + if ((pDataBlock->type == DISPLAYID_2_0_BLOCK_TYPE_CONTAINER_ID) && + (pDataBlock->data_bytes == DISPLAYID_2_0_CONTAINERID_BLOCK_PAYLOAD_LENGTH)) + { + pContainerIdBlock = (const DISPLAYID_2_0_CONTAINERID_BLOCK *)pDataBlock; + + pContainerId->revision = pDataBlock->revision; + pContainerId->data1 = pContainerIdBlock->container_id[0] << 24 | + pContainerIdBlock->container_id[1] << 16 | + pContainerIdBlock->container_id[2] << 8 | + pContainerIdBlock->container_id[3]; + pContainerId->data2 = pContainerIdBlock->container_id[4] << 8 | + pContainerIdBlock->container_id[5]; + pContainerId->data3 = pContainerIdBlock->container_id[6] << 8 | + pContainerIdBlock->container_id[7]; + pContainerId->data4 = pContainerIdBlock->container_id[8] << 8 | + pContainerIdBlock->container_id[9]; + pContainerId->data5[0] = pContainerIdBlock->container_id[10]; + pContainerId->data5[1] = pContainerIdBlock->container_id[11]; + pContainerId->data5[2] = pContainerIdBlock->container_id[12]; + pContainerId->data5[3] = pContainerIdBlock->container_id[13]; + pContainerId->data5[4] = pContainerIdBlock->container_id[14]; + pContainerId->data5[5] = pContainerIdBlock->container_id[15]; + } + else + { + status = NVT_STATUS_ERR; + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20VendorSpecific( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_VENDOR_SPECIFIC *pVendorSpecific) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK *block = NULL; + NvU32 ieee_oui = 0; + + if (pDataBlock->type == DISPLAYID_2_0_BLOCK_TYPE_VENDOR_SPEC) + { + block = (const DISPLAYID_2_0_VENDOR_SPECIFIC_BLOCK*)pDataBlock; + ieee_oui = (NvU32)((block->vendor_id[0] << 16) | + (block->vendor_id[1] << 8) | + (block->vendor_id[2])); + + switch (ieee_oui) + { + case NVT_VESA_VENDOR_SPECIFIC_IEEE_ID: + // TODO: below parser shall be updated if DID21 changed in the future + if (pDataBlock->data_bytes == NVT_VESA_VENDOR_SPECIFIC_LENGTH) + { + pVendorSpecific->vesaVsdb.data_struct_type.type = block->vendor_specific_data[3] & NVT_VESA_ORG_VSDB_DATA_TYPE_MASK; + pVendorSpecific->vesaVsdb.data_struct_type.color_space_and_eotf = (block->vendor_specific_data[3] & NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_MASK) >> NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_SHIFT; + + pVendorSpecific->vesaVsdb.overlapping.pixels_overlapping_count = block->vendor_specific_data[4] & NVT_VESA_ORG_VSDB_PIXELS_OVERLAPPING_MASK; + pVendorSpecific->vesaVsdb.overlapping.multi_sst = (block->vendor_specific_data[4] & NVT_VESA_ORG_VSDB_MULTI_SST_MODE_MASK) >> NVT_VESA_ORG_VSDB_MULTI_SST_MODE_SHIFT; + + pVendorSpecific->vesaVsdb.pass_through_integer.pass_through_integer_dsc = block->vendor_specific_data[5] & NVT_VESA_ORG_VSDB_PASS_THROUGH_INTEGER_MASK; + pVendorSpecific->vesaVsdb.pass_through_fractional.pass_through_fraction_dsc = block->vendor_specific_data[6] & NVT_VESA_ORG_VSDB_PASS_THROUGH_FRACTIOINAL_MASK; + } + else + { + status = NVT_STATUS_ERR; + } + break; + + default: + break; + } + } + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20CtaData( + const DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + const DISPLAYID_2_0_CTA_BLOCK * ctaBlock = NULL; + NVT_EDID_CEA861_INFO *p861Info = NULL; + NvU8 *pcta_data = NULL; + + if (pDataBlock->type == DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA) + { + ctaBlock = (const DISPLAYID_2_0_CTA_BLOCK *)pDataBlock; + + // WAR here to add a (size_t) cast for casting member from const to non-const in order to avoid Linux old compiler failed in DVS. + pcta_data = (NvU8 *)(size_t)ctaBlock->cta_data; + + status = parseCta861DataBlockInfo(pcta_data, pDataBlock->data_bytes, &pDisplayIdInfo->cta.cta861_info); + if (status != NVT_STATUS_SUCCESS) + { + return status; + } + + p861Info = &pDisplayIdInfo->cta.cta861_info; + + parseCta861VsdbBlocks(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + + parseCta861HfScdb(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + + // This CTA 861 function to parse 861 part + parse861bShortTiming(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + + // yuv420-only video + parse861bShortYuv420Timing(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + + parseCea861HdrStaticMetadataDataBlock(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + + // CEA861-F at 7.5.12 section about VFPDB block. + if (p861Info->total_vfpdb != 0) + { + parse861bShortPreferredTiming(p861Info, pDisplayIdInfo, FROM_DISPLAYID_20_DATA_BLOCK); + } + + return status; + + } + else + { + return NVT_STATUS_ERR; + } +} + +// Helper function +CODE_SEGMENT(PAGE_DD_CODE) +static NvU32 +greatestCommonDenominator( + NvU32 x, + NvU32 y) +{ + NvU32 g = 0; + + while (x > 0) + { + g = x; + x = y % x; + y = g; + } + return g; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +getPrimaryUseCase( + NvU8 product_type, + NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE *primary_use_case) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + + switch (product_type) + { + case DISPLAYID_2_0_PROD_TEST: + *primary_use_case = PRODUCT_PRIMARY_USE_TEST_EQUIPMENT; + break; + case DISPLAYID_2_0_PROD_GENERIC_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_GENERIC_DISPLAY; + break; + case DISPLAYID_2_0_PROD_TELEVISION: + *primary_use_case = PRODUCT_PRIMARY_USE_TELEVISION; + break; + case DISPLAYID_2_0_PROD_DESKTOP_PRODUCTIVITY_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_DESKTOP_PRODUCTIVITY; + break; + case DISPLAYID_2_0_PROD_DESKTOP_GAMING_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_DESKTOP_GAMING; + break; + case DISPLAYID_2_0_PROD_PRESENTATION_DISPLAY: + *primary_use_case = PRODUCT_PRIMARY_USE_PRESENTATION; + break; + case DISPLAYID_2_0_PROD_HMD_VR: + *primary_use_case = PRODUCT_PRIMARY_USE_HEAD_MOUNT_VIRTUAL_REALITY; + break; + case DISPLAYID_2_0_PROD_HMD_AR: + *primary_use_case = PRODUCT_PRIMARY_USE_HEAD_MOUNT_AUGMENTED_REALITY; + break; + case DISPLAYID_2_0_PROD_EXTENSION: + status = NVT_STATUS_ERR; + break; + default: + status = NVT_STATUS_ERR; + } + + return status; +} + +// used in DID20 and DID20ext +CODE_SEGMENT(PAGE_DD_CODE) +NvU8 +computeDisplayId20SectionCheckSum( + const NvU8 *pSectionBytes, + NvU32 length) +{ + + NvU32 i = 0; + NvU32 checkSum = 0; + + // Each DisplayID section composed of five mandatory bytes: + // DisplayID Structure Version and Revision + // Section Size + // Product Primary Use Case + // Extension Count + // Checksum + for (i = 0, checkSum = 0; i < length; i++) + { + checkSum += pSectionBytes[i]; + } + + return (checkSum & 0xFF); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool +assignNextAvailableDisplayId20Timing( + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + const NVT_TIMING *pTiming) +{ + if (pDisplayIdInfo->total_timings >= COUNT(pDisplayIdInfo->timing)) + { + return NV_FALSE; + } + + pDisplayIdInfo->timing[pDisplayIdInfo->total_timings] = *pTiming; + pDisplayIdInfo->total_timings++; + + return NV_TRUE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing7Descriptor( + const DISPLAYID_2_0_TIMING_7_DESCRIPTOR *pDescriptor, + NVT_TIMING *pTiming, + NvU8 revision, + NvU8 count) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NvU32 gdc = 0; + + // pclk is in 10Khz + // pixel_clock is in kHz + pTiming->pclk = ((pDescriptor->pixel_clock[2] << 16 | + pDescriptor->pixel_clock[1] << 8 | + pDescriptor->pixel_clock[0]) + 1) / 10; + + pTiming->HBorder = 0; + pTiming->VBorder = 0; + + pTiming->HVisible = ((pDescriptor->horizontal.active_image_pixels[1] << 8) | + (pDescriptor->horizontal.active_image_pixels[0])) + 1; + pTiming->VVisible = ((pDescriptor->vertical.active_image_lines[1] << 8) | + (pDescriptor->vertical.active_image_lines[0])) + 1; + + pTiming->HTotal = (((pDescriptor->horizontal.blank_pixels[1] << 8) | + (pDescriptor->horizontal.blank_pixels[0])) + 1) + + pTiming->HVisible; + pTiming->VTotal = (((pDescriptor->vertical.blank_lines[1] << 8) | + (pDescriptor->vertical.blank_lines[0])) + 1) + + pTiming->VVisible; + + pTiming->HFrontPorch = ((pDescriptor->horizontal.front_porch_pixels_high << 8) | + (pDescriptor->horizontal.front_porch_pixels_low)) + 1; + pTiming->VFrontPorch = ((pDescriptor->vertical.front_porch_lines_high << 8) | + (pDescriptor->vertical.front_porch_lines_low)) + 1; + + pTiming->HSyncWidth = ((pDescriptor->horizontal.sync_width_pixels[1] << 8) | + (pDescriptor->horizontal.sync_width_pixels[0])) + 1; + pTiming->VSyncWidth = ((pDescriptor->vertical.sync_width_lines[1] << 8) | + (pDescriptor->vertical.sync_width_lines[0])) + 1; + + pTiming->HSyncPol = pDescriptor->horizontal.sync_polarity ? NVT_H_SYNC_POSITIVE : + NVT_H_SYNC_NEGATIVE; + pTiming->VSyncPol = pDescriptor->vertical.sync_polarity ? NVT_V_SYNC_POSITIVE : + NVT_V_SYNC_NEGATIVE; + + // EDID used in DP1.4 Compliance test had incorrect HBlank listed, leading to wrong raster sizes being set by driver (bug 2714607) + // Filter incorrect timings here. HTotal must cover sufficient blanking time + if (pTiming->HTotal < (pTiming->HVisible + pTiming->HFrontPorch + pTiming->HSyncWidth)) + { + return NVT_STATUS_ERR; + } + + pTiming->interlaced = pDescriptor->options.interface_frame_scanning_type; + + switch (pDescriptor->options.aspect_ratio) + { + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_1_1: + pTiming->etc.aspect = (1 << 16) | 1; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_5_4: + pTiming->etc.aspect = (5 << 16) | 4; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_4_3: + pTiming->etc.aspect = (4 << 16) | 3; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_15_9: + pTiming->etc.aspect = (15 << 16) | 9; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_9: + pTiming->etc.aspect = (16 << 16) | 9; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_16_10: + pTiming->etc.aspect = (16 << 16) | 10; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_64_27: + pTiming->etc.aspect = (64 << 16) | 27; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_256_135: + pTiming->etc.aspect = (256 << 16) | 135; + break; + case DISPLAYID_2_0_TIMING_ASPECT_RATIO_CALCULATE: + gdc = greatestCommonDenominator(pTiming->HVisible, pTiming->VVisible); + if (gdc != 0) + { + pTiming->etc.aspect = ((pTiming->HVisible / gdc) << 16) | + (pTiming->VVisible / gdc); + } + else + { + pTiming->etc.aspect = 0; + } + break; + default: + pTiming->etc.aspect = 0; + } + + pTiming->etc.rr = NvTiming_CalcRR(pTiming->pclk, + pTiming->interlaced, + pTiming->HTotal, + pTiming->VTotal); + pTiming->etc.rrx1k = NvTiming_CalcRRx1k(pTiming->pclk, + pTiming->interlaced, + pTiming->HTotal, + pTiming->VTotal); + + pTiming->etc.flag |= (revision >= DISPLAYID_2_0_TYPE7_DSC_PASSTHRU_REVISION ) ? NVT_FLAG_DISPLAYID_7_DSC_PASSTHRU : 0; + if (revision >= DISPLAYID_2_0_TYPE7_YCC420_SUPPORT_REVISION) + { + pTiming->etc.flag |= pDescriptor->options.is_preferred_or_ycc420 ? NVT_FLAG_DISPLAYID_2_0_EXPLICT_YUV420 : 0; + + if (pDescriptor->options.is_preferred_or_ycc420) // YCC 420 support + { + UPDATE_BPC_FOR_COLORFORMAT(pTiming->etc.yuv420, 0, 1, 1, 1, 0, 1); + } + } + else + { + pTiming->etc.flag |= pDescriptor->options.is_preferred_or_ycc420 ? NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING : 0; + } + + pTiming->etc.status = NVT_STATUS_DISPLAYID_7N(++count); + + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type7:#%2d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)((pTiming->interlaced ? 2 : 1)*pTiming->VVisible), + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + pTiming->etc.name[sizeof(pTiming->etc.name) - 1] = '\0'; + + pTiming->etc.rep = 0x1; + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing9Descriptor( + const DISPLAYID_2_0_TIMING_9_DESCRIPTOR *pDescriptor, + NVT_TIMING *pTiming, + NvU8 count) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NvU32 width = 0; + NvU32 height = 0; + NvU32 rr = 0; + + width = (pDescriptor->horizontal_active_pixels[1] << 8 | pDescriptor->horizontal_active_pixels[0]) + 1; + height = (pDescriptor->vertical_active_lines[1] << 8 | pDescriptor->vertical_active_lines[0]) + 1; + rr = pDescriptor->refresh_rate + 1; + + switch (pDescriptor->options.timing_formula) + { + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD: + status = NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1: + status = NvTiming_CalcCVT_RB(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_2: + status = NvTiming_CalcCVT_RB2(width, height, rr, pDescriptor->options.fractional_refresh_rate_support, pTiming); + break; + default: + status = NVT_STATUS_ERR; + break; + } + + if (status == NVT_STATUS_SUCCESS) + { + NVMISC_MEMSET(pTiming->etc.name, 0, sizeof(pTiming->etc.name)); + pTiming->etc.status = NVT_STATUS_DISPLAYID_9N(++count); + + if ( pDescriptor->options.timing_formula== DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type9:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + else if (pDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type9-RB1:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + else if (pDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_2) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type9-RB2:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + } + + pTiming->etc.name[sizeof(pTiming->etc.name) - 1] = '\0'; + + return status; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20Timing10Descriptor( + const void *pDescriptor, + NVT_TIMING *pTiming, + NvU8 payloadbytes, + NvU8 count) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + const DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR* p6bytesDescriptor = NULL; + const DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR* p7bytesDescriptor = NULL; + NvU32 width = 0; + NvU32 height = 0; + NvU32 rr = 0; + + p6bytesDescriptor = (const DISPLAYID_2_0_TIMING_10_6BYTES_DESCRIPTOR *)pDescriptor; + + width = (p6bytesDescriptor->horizontal_active_pixels[1] << 8 | p6bytesDescriptor->horizontal_active_pixels[0]) + 1; + height = (p6bytesDescriptor->vertical_active_lines[1] << 8 | p6bytesDescriptor->vertical_active_lines[0]) + 1; + rr = p6bytesDescriptor->refresh_rate + 1; + + if (payloadbytes == DISPLAYID_2_0_TIMING_10_PAYLOAD_BYTES_7) + { + p7bytesDescriptor = (const DISPLAYID_2_0_TIMING_10_7BYTES_DESCRIPTOR *)pDescriptor; + rr = (p7bytesDescriptor->descriptor_6_bytes.refresh_rate | p7bytesDescriptor->refresh_rate_high << 8) + 1; + } + + switch (p6bytesDescriptor->options.timing_formula) + { + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD: + status = NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1: + status = NvTiming_CalcCVT_RB(width, height, rr, NVT_PROGRESSIVE, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_2: + status = NvTiming_CalcCVT_RB2(width, height, rr, p6bytesDescriptor->options.vrr_or_hblank, pTiming); + break; + case DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_3: + { + /* Will uncomment this if we used the new RB3 interface. + NvU32 hBlankValue = 0; + + if (p6bytesDescriptor->options.vrr_or_hblank == 0) // Horizontal Blank in Pixels = [Field Value] * 8 + 80 + { + hBlankValue = p7bytesDescriptor->delta_hblank * 8 + 80; + } + else if (p6bytesDescriptor->options.vrr_or_hblank == 1) + { + if (p7bytesDescriptor->delta_hblank <= 5) + hBlankValue = p7bytesDescriptor->delta_hblank * 8 + 160; + else // if 5 < Field Value <=7 + hBlankValue = 160 - (p7bytesDescriptor->delta_hblank * 8); + } + */ + + // TODO : Need to handle 7:5 bit at 6 byte -Additional Vertical Blank Time in % of frame time for the defined Refresh Rate + //status = NvTiming_CalcCVT_RB3(width, height, rr, hBlankValue, pTiming); + break; + } + } + + if ( status == NVT_STATUS_SUCCESS ) + { + NVMISC_MEMSET(pTiming->etc.name, 0, sizeof(pTiming->etc.name)); + pTiming->etc.status = NVT_STATUS_DISPLAYID_10N(++count); + + if (p6bytesDescriptor->options.ycc420_support) + { + // YCC 420 support + UPDATE_BPC_FOR_COLORFORMAT(pTiming->etc.yuv420, 0, 1, 1, 1, 0, 1); + } + + if (p6bytesDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_STANDARD) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type10:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + + } + else if (p6bytesDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_1) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type10RB1:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + else if (p6bytesDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_2) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type10RB2:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + else if (p6bytesDescriptor->options.timing_formula == DISPLAYID_2_0_TIMING_FORMULA_CVT_1_2_REDUCED_BLANKING_3) + { + NVT_SNPRINTF((char *)pTiming->etc.name, sizeof(pTiming->etc.name), "DID20-Type10RB3:#%3d:%dx%dx%3d.%03dHz/%s", + (int)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status), + (int)pTiming->HVisible, + (int)pTiming->VVisible, + (int)pTiming->etc.rrx1k/1000, + (int)pTiming->etc.rrx1k%1000, + (pTiming->interlaced ? "I":"P")); + } + } + + pTiming->etc.name[sizeof(pTiming->etc.name) - 1] = '\0'; + + return status; +} + +// get the existed stored timing sequence number +CODE_SEGMENT(PAGE_DD_CODE) +static NvU8 +getExistedTimingSeqNumber( + NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, + enum NVT_TIMING_TYPE timingType) +{ + NvU8 count = 0; + NvU8 i = 0; + + switch (timingType) + { + case NVT_TYPE_DISPLAYID_7: + case NVT_TYPE_DISPLAYID_8: + case NVT_TYPE_DISPLAYID_9: + case NVT_TYPE_DISPLAYID_10: + break; + default: + return count; + } + + for (i = 0; i< pDisplayIdInfo->total_timings; i++) + { + if (NVT_GET_TIMING_STATUS_TYPE(pDisplayIdInfo->timing[i].etc.status) == timingType) + { + ++count; + } + } + + return count; +} + +// get the version +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 getDID2Version(NvU8 *pData, NvU32 *pVer) +{ + const DISPLAYID_2_0_SECTION *pSection = (const DISPLAYID_2_0_SECTION*)pData; + + *pVer = 0; + if (pSection->header.version == 0x2) + { + *pVer = (((NvU32)pSection->header.version) << 8) + ((NvU32)pSection->header.revision); + } + else + return NVT_STATUS_ERR; // un-recongnized DisplayID20 version + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void +updateColorFormatForDisplayId20Timings( + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info, + NvU32 timingIdx) +{ + // pDisplayId20Info parsed displayID20 info + NVT_TIMING *pT= &pDisplayId20Info->timing[timingIdx]; + + nvt_assert(timingIdx <= COUNT(pDisplayId20Info->timing)); + + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayId20Info->interface_features.rgb444.bpc.bpc10, + pDisplayId20Info->interface_features.rgb444.bpc.bpc12, + pDisplayId20Info->interface_features.rgb444.bpc.bpc14, + pDisplayId20Info->interface_features.rgb444.bpc.bpc16); + + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, // yuv444 does not support 6bpc + pDisplayId20Info->interface_features.yuv444.bpc.bpc8, + pDisplayId20Info->interface_features.yuv444.bpc.bpc10, + pDisplayId20Info->interface_features.yuv444.bpc.bpc12, + pDisplayId20Info->interface_features.yuv444.bpc.bpc14, + pDisplayId20Info->interface_features.yuv444.bpc.bpc16); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, // yuv422 does not support 6bpc + pDisplayId20Info->interface_features.yuv422.bpc.bpc8, + pDisplayId20Info->interface_features.yuv422.bpc.bpc10, + pDisplayId20Info->interface_features.yuv422.bpc.bpc12, + pDisplayId20Info->interface_features.yuv422.bpc.bpc14, + pDisplayId20Info->interface_features.yuv422.bpc.bpc16); + + if (!NVT_DID20_TIMING_IS_CTA861(pT->etc.flag, pT->etc.status)) + { + // yuv420 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv420, 0, // yuv420 does not support 6bpc + pDisplayId20Info->interface_features.yuv420.bpc.bpc8, + pDisplayId20Info->interface_features.yuv420.bpc.bpc10, + pDisplayId20Info->interface_features.yuv420.bpc.bpc12, + pDisplayId20Info->interface_features.yuv420.bpc.bpc14, + pDisplayId20Info->interface_features.yuv420.bpc.bpc16); + } +} +POP_SEGMENTS + diff --git a/src/common/modeset/timing/nvt_dmt.c b/src/common/modeset/timing/nvt_dmt.c new file mode 100644 index 000000000..d644d37eb --- /dev/null +++ b/src/common/modeset/timing/nvt_dmt.c @@ -0,0 +1,272 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_dmt.c +// +// Purpose: calculate DMT/DMT-RB timing +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +// DMT table +// Macro to declare a TIMING initializer for given parameters without border +#define DMT_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,id) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,{0,rr,set_rrx1k(pclk,ht,vt),0,0x1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT,id),"VESA DMT"}} + +#define DMTRB_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,id) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,{0,rr,set_rrx1k(pclk,ht,vt),0,0x1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT_RB,id),"VESA DMT/RB"}} + +#define DMTRB_2_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,id) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,{0,rr,set_rrx1k(pclk,ht,vt),0,0x1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT_RB_2,id),"VESA DMT/RB2"}} + +DATA_SEGMENT(PAGE_DATA) + +static NVT_TIMING DMT[] = +{ + // VESA standard entries (ordered according to VESA DMT ID). + // hv,hfp,hsw, ht,hsp, vv,vfp,vsw, vt,vsp, rr,pclk , id + DMT_TIMING ( 640, 32, 64, 832,'+', 350, 32, 3, 445,'-', 85, 3150, 0x01), + DMT_TIMING ( 640, 32, 64, 832,'-', 400, 1, 3, 445,'+', 85, 3150, 0x02), + DMT_TIMING ( 720, 36, 72, 936,'-', 400, 1, 3, 446,'+', 85, 3550, 0x03), + DMT_TIMING ( 640, 8, 96, 800,'-', 480, 2, 2, 525,'-', 60, 2518, 0x04), + // 640x480x72Hz (VESA) - this entry have borders + {640,8,16,40,832,NVT_H_SYNC_NEGATIVE,480,8,1,3,520,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,3150,{0,72,72000,0,1,{0},{0},{0},{0},NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT,5),"VESA DMT"}}, + DMT_TIMING ( 640, 16, 64, 840,'-', 480, 1, 3, 500,'-', 75, 3150, 0x06), + DMT_TIMING ( 640, 56, 56, 832,'-', 480, 1, 3, 509,'-', 85, 3600, 0x07), + DMT_TIMING ( 800, 24, 72,1024,'+', 600, 1, 2, 625,'+', 56, 3600, 0x08), + DMT_TIMING ( 800, 40,128,1056,'+', 600, 1, 4, 628,'+', 60, 4000, 0x09), + DMT_TIMING ( 800, 56,120,1040,'+', 600, 37, 6, 666,'+', 72, 5000, 0x0A), + DMT_TIMING ( 800, 16, 80,1056,'+', 600, 1, 3, 625,'+', 75, 4950, 0x0B), + DMT_TIMING ( 800, 32, 64,1048,'+', 600, 1, 3, 631,'+', 85, 5625, 0x0C), + DMTRB_TIMING( 800, 48, 32, 960,'+', 600, 3, 4, 636,'-',120, 7325, 0x0D), + DMT_TIMING ( 848, 16,112,1088,'+', 480, 6, 8, 517,'+', 60, 3375, 0x0E), + DMT_TIMING (1024, 8,176,1264,'+', 768, 0, 4, 817,'+', 43, 4490, 0x0F), + DMT_TIMING (1024, 24,136,1344,'-', 768, 3, 6, 806,'-', 60, 6500, 0x10), + DMT_TIMING (1024, 24,136,1328,'-', 768, 3, 6, 806,'-', 70, 7500, 0x11), + DMT_TIMING (1024, 16, 96,1312,'+', 768, 1, 3, 800,'+', 75, 7875, 0x12), + DMT_TIMING (1024, 48, 96,1376,'+', 768, 1, 3, 808,'+', 85, 9450, 0x13), + DMTRB_TIMING(1024, 48, 32,1184,'+', 768, 3, 4, 813,'-',120,11550, 0x14), + DMT_TIMING (1152, 64,128,1600,'+', 864, 1, 3, 900,'+', 75,10800, 0x15), + DMTRB_TIMING(1280, 48, 32,1440,'+', 768, 3, 7, 790,'-', 60, 6825, 0x16), + DMT_TIMING (1280, 64,128,1664,'-', 768, 3, 7, 798,'+', 60, 7950, 0x17), + DMT_TIMING (1280, 80,128,1696,'-', 768, 3, 7, 805,'+', 75,10225, 0x18), + DMT_TIMING (1280, 80,136,1712,'-', 768, 3, 7, 809,'+', 85,11750, 0x19), + DMTRB_TIMING(1280, 48, 32,1440,'+', 768, 3, 7, 813,'-',120,14025, 0x1A), + DMTRB_TIMING(1280, 48, 32,1440,'+', 800, 3, 6, 823,'-', 60, 7100, 0x1B), + DMT_TIMING (1280, 72,128,1680,'-', 800, 3, 6, 831,'+', 60, 8350, 0x1C), + DMT_TIMING (1280, 80,128,1696,'-', 800, 3, 6, 838,'+', 75,10650, 0x1D), + DMT_TIMING (1280, 80,136,1712,'-', 800, 3, 6, 843,'+', 85,12250, 0x1E), + DMTRB_TIMING(1280, 48, 32,1440,'+', 800, 3, 6, 847,'-',120,14625, 0x1F), + DMT_TIMING (1280, 96,112,1800,'+', 960, 1, 3,1000,'+', 60,10800, 0x20), + DMT_TIMING (1280, 64,160,1728,'+', 960, 1, 3,1011,'+', 85,14850, 0x21), + DMTRB_TIMING(1280, 48, 32,1440,'+', 960, 3, 4,1017,'-',120,17550, 0x22), + DMT_TIMING (1280, 48,112,1688,'+',1024, 1, 3,1066,'+', 60,10800, 0x23), + DMT_TIMING (1280, 16,144,1688,'+',1024, 1, 3,1066,'+', 75,13500, 0x24), + DMT_TIMING (1280, 64,160,1728,'+',1024, 1, 3,1072,'+', 85,15750, 0x25), + DMTRB_TIMING(1280, 48, 32,1440,'+',1024, 3, 7,1084,'-',120,18725, 0x26), + DMT_TIMING (1360, 64,112,1792,'+', 768, 3, 6, 795,'+', 60, 8550, 0x27), + DMTRB_TIMING(1360, 48, 32,1520,'+', 768, 3, 5, 813,'-',120,14825, 0x28), + DMTRB_TIMING(1400, 48, 32,1560,'+',1050, 3, 4,1080,'-', 60,10100, 0x29), + DMT_TIMING (1400, 88,144,1864,'-',1050, 3, 4,1089,'+', 60,12175, 0x2A), + DMT_TIMING (1400,104,144,1896,'-',1050, 3, 4,1099,'+', 75,15600, 0x2B), + DMT_TIMING (1400,104,152,1912,'-',1050, 3, 4,1105,'+', 85,17950, 0x2C), + DMTRB_TIMING(1400, 48, 32,1560,'+',1050, 3, 4,1050,'-',120,20800, 0x2D), + DMTRB_TIMING(1440, 48, 32,1600,'+', 900, 3, 6, 926,'-', 60, 8875, 0x2E), + DMT_TIMING (1440, 80,152,1904,'-', 900, 3, 6, 934,'+', 60,10650, 0x2F), + DMT_TIMING (1440, 96,152,1936,'-', 900, 3, 6, 942,'+', 75,13675, 0x30), + DMT_TIMING (1440,104,152,1952,'-', 900, 3, 6, 948,'+', 85,15700, 0x31), + DMTRB_TIMING(1440, 48, 32,1600,'+', 900, 3, 6, 953,'-',120,18275, 0x32), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 60,16200, 0x33), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 65,17550, 0x34), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 70,18900, 0x35), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 75,20250, 0x36), + DMT_TIMING (1600, 64,192,2160,'+',1200, 1, 3,1250,'+', 85,22950, 0x37), + DMTRB_TIMING(1600, 48, 32,1760,'+',1200, 3, 4,1271,'-',120,26825, 0x38), + DMTRB_TIMING(1680, 48, 32,1840,'+',1050, 3, 6,1080,'-', 60,11900, 0x39), + DMT_TIMING (1680,104,176,2240,'-',1050, 3, 6,1089,'+', 60,14625, 0x3A), + DMT_TIMING (1680,120,176,2272,'-',1050, 3, 6,1099,'+', 75,18700, 0x3B), + DMT_TIMING (1680,128,176,2288,'-',1050, 3, 6,1105,'+', 85,21475, 0x3C), + DMTRB_TIMING(1680, 48, 32,1840,'+',1050, 3, 6,1112,'-',120,24550, 0x3D), + DMT_TIMING (1792,128,200,2448,'-',1344, 1, 3,1394,'+', 60,20475, 0x3E), + DMT_TIMING (1792, 96,216,2456,'-',1344, 1, 3,1417,'+', 75,26100, 0x3F), + DMTRB_TIMING(1792, 48, 32,1952,'+',1344, 3, 4,1423,'-',120,33325, 0x40), + DMT_TIMING (1856, 96,224,2528,'-',1392, 1, 3,1439,'+', 60,21825, 0x41), + DMT_TIMING (1856,128,224,2560,'-',1392, 1, 3,1500,'+', 75,28800, 0x42), + DMTRB_TIMING(1856, 48, 32,2016,'+',1392, 3, 4,1474,'-',120,35650, 0x43), + DMTRB_TIMING(1920, 48, 32,2080,'+',1200, 3, 6,1235,'-', 60,15400, 0x44), + DMT_TIMING (1920,136,200,2592,'-',1200, 3, 6,1245,'+', 60,19325, 0x45), + DMT_TIMING (1920,136,208,2608,'-',1200, 3, 6,1255,'+', 75,24525, 0x46), + DMT_TIMING (1920,144,208,2624,'-',1200, 3, 6,1262,'+', 85,28125, 0x47), + DMTRB_TIMING(1920, 48, 32,2080,'+',1200, 3, 6,1271,'-',120,31700, 0x48), + DMT_TIMING (1920,128,208,2600,'-',1440, 1, 3,1500,'+', 60,23400, 0x49), + DMT_TIMING (1920,144,224,2640,'-',1440, 1, 3,1500,'+', 75,29700, 0x4A), + DMTRB_TIMING(1920, 48, 32,2080,'+',1440, 3, 4,1525,'-',120,38050, 0x4B), + DMTRB_TIMING(2560, 48, 32,2720,'+',1600, 3, 6,1646,'-', 60,26850, 0x4C), + DMT_TIMING (2560,192,280,3504,'-',1600, 3, 6,1658,'+', 60,34850, 0x4D), + DMT_TIMING (2560,208,280,3536,'-',1600, 3, 6,1672,'+', 75,44325, 0x4E), + DMT_TIMING (2560,208,280,3536,'-',1600, 3, 6,1682,'+', 85,50525, 0x4F), + DMTRB_TIMING(2560, 48, 32,2720,'+',1600, 3, 6,1694,'-',120,55275, 0x50), + DMT_TIMING (1366, 70,143,1792,'+',768 , 3, 3, 798,'+', 60, 8550, 0x51),//1366 x 768 @60 (non-interlaced) DMT ID: 51h + DMT_TIMING (1920, 88, 44,2200,'+',1080, 4, 5,1125,'+', 60,14850, 0x52),//1920 x 1080 @60 (non-interlaced) DMT ID: 52h + DMTRB_TIMING(1600, 24, 80,1800,'+', 900, 1, 3,1000,'+', 60,10800, 0x53),//1600 x 900 @60 (non-interlaced) DMT ID: 53h + DMTRB_TIMING(2048, 26, 80,2250,'+',1152, 1, 3,1200,'+', 60,16200, 0x54),//2048 x 1152 @60 (non-interlaced) DMT ID: 54h + DMT_TIMING (1280,110, 40,1650,'+', 720, 5, 5, 750,'+', 60, 7425, 0x55),//1280 x 720 @60 (non-interlaced) DMT ID: 55h + DMTRB_TIMING(1366, 14, 56,1500,'+', 768, 1, 3, 800,'+', 60, 7200, 0x56),//1366 x 768 @60 (non-interlaced) DMT ID: 56h + + // Added timing definitions in DMT 1.3 Version 1.0, Rev. 13 + DMTRB_2_TIMING(4096, 8, 56,4176,'+', 2160, 48, 8, 2222,'-', 60,55674, 0x57),//4096 x 2160 @60 (non-interlaced) DMT ID: 57h + DMTRB_2_TIMING(4096, 8, 56,4176,'+', 2160, 48, 8, 2222,'-', 59,55619, 0x58),//4096 x 2160 @60 (non-interlaced) DMT ID: 58h + + // ******************************** + // Additional non-standard entries. + // ******************************** + + // Settings for 640x400 + // GTF timing for 640x400x60Hz has too low HFreq, this is a + // Specially constructed timing from 640x480, with extra blanking + // on top and bottom of the screen + + DMT_TIMING(640,16,96,800,'-',400,50,2,525,'-',60,2518,0), + DMT_TIMING(640,16,96,800,'+',400,12,2,449,'-',70,2518,0), + + // the end of table + NVT_TIMING_SENTINEL +}; +static NvU32 MAX_DMT_FORMAT = sizeof(DMT)/sizeof(DMT[0]) - 1; + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumDMT(NvU32 dmtId, NVT_TIMING *pT) +{ + if ((pT == NULL) || (dmtId == 0)) + { + return NVT_STATUS_ERR; + } + + // The last entry is not used. + if (dmtId > MAX_DMT_FORMAT) + { + return NVT_STATUS_ERR; + } + + // Make sure that the DMT ID matches according to the array index. + if (NVT_GET_TIMING_STATUS_SEQ(DMT[dmtId - 1].etc.status) == dmtId) + { + *pT = DMT[dmtId - 1]; + + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, + (NvU32)10000*(NvU32)1000, + (NvU32)pT->HTotal*(NvU32)pT->VTotal); + NVT_SNPRINTF((char *)pT->etc.name, 40, "DMT:#%d:%dx%dx%dHz", + dmtId, pT->HVisible, pT->VVisible, pT->etc.rr); + ((char *)pT->etc.name)[39] = '\0'; + + return NVT_STATUS_SUCCESS; + } + + return NVT_STATUS_ERR; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcDMT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NVT_TIMING *p = (NVT_TIMING *)DMT; + + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // no interlaced DMT timing + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + return NVT_STATUS_ERR; + + while (p->HVisible != 0 && p->VVisible != 0) + { + if (NVT_GET_TIMING_STATUS_TYPE(p->etc.status) == NVT_TYPE_DMT) + { + if ((NvU32)p->HVisible == width && + (NvU32)p->VVisible == height && + (NvU32)p->etc.rr == rr) + { + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + *pT = *p; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + NVT_SNPRINTF((char *)pT->etc.name, 40, "DMT:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + pT->etc.rgb444.bpc.bpc8 = 1; + return NVT_STATUS_SUCCESS; + } + } + p ++; + } + + // if we couldn't find a DMT with regular blanking, try the DMT with reduced blanking next + return NvTiming_CalcDMT_RB(width, height, rr, flag, pT); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcDMT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NVT_TIMING *p = (NVT_TIMING *)DMT; + + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + // no interlaced DMT timing + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + return NVT_STATUS_ERR; + + while (p->HVisible != 0 && p->VVisible != 0) + { + // select only reduced-bandwidth timing. + if (NVT_GET_TIMING_STATUS_TYPE(p->etc.status) == NVT_TYPE_DMT_RB) + { + if ((NvU32)p->HVisible == width && + (NvU32)p->VVisible == height && + (NvU32)p->etc.rr == rr) + { + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + *pT = *p; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + NVT_SNPRINTF((char *)pT->etc.name, 40, "DMT-RB:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + pT->etc.rgb444.bpc.bpc8 = 1; + return NVT_STATUS_SUCCESS; + } + } + p ++; + } + return NVT_STATUS_ERR; +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_dsc_pps.c b/src/common/modeset/timing/nvt_dsc_pps.c new file mode 100644 index 000000000..cc2f3fd16 --- /dev/null +++ b/src/common/modeset/timing/nvt_dsc_pps.c @@ -0,0 +1,2030 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +//============================================================================= +// +// Provide function to calculate PPS(Picture Parameter Set) +// +// +//============================================================================== + +/* ------------------------ Includes --------------------------------------- */ +#include "nvt_dsc_pps.h" +#include "nvmisc.h" +#include "displayport/displayport.h" +#include + +/* ------------------------ Macros ----------------------------------------- */ + +#if defined (DEBUG) +#define DSC_Print(...) \ + do { \ + if (callbacks.dscPrint) { \ + callbacks.dscPrint("DSC: " __VA_ARGS__); \ + } \ + } while(0) +#else +#define DSC_Print(...) do { } while(0) +#endif + +#define MIN_CHECK(s,a,b) { if((a)<(b)) { DSC_Print("%s (=%u) needs to be larger than %u",s,a,b); return (NVT_STATUS_ERR);} } +#define RANGE_CHECK(s,a,b,c) { if((((NvS32)(a))<(NvS32)(b))||(((NvS32)(a))>(NvS32)(c))) { DSC_Print("%s (=%u) needs to be between %u and %u",s,a,b,c); return (NVT_STATUS_ERR);} } +#define ENUM2_CHECK(s,a,b,c) { if(((a)!=(b))&&((a)!=(c))) { DSC_Print("%s (=%u) needs to be %u or %u",s,a,b,c); return (NVT_STATUS_ERR);} } +#define ENUM3_CHECK(s,a,b,c,d) { if(((a)!=(b))&&((a)!=(c))&&((a)!=(d))) { DSC_Print("%s (=%u) needs to be %u, %u or %u",s,a,b,c,d); return (NVT_STATUS_ERR);} } +#define MAX(a,b) (((a)>=(b) || (b == 0xffffffff))?(a):(b)) +#define MIN(a,b) ((a)>=(b)?(b):(a)) +#define CLAMP(a,b,c) ((a)<=(b)?(b):((a)>(c)?(c):(a))) +#define ADJUST_SLICE_NUM(n) ((n)>4?8:((n)>2?4:(n))) +#define MSB(a) (((a)>>8)&0xFF) +#define LSB(a) ((a)&0xFF) + +#define NUM_BUF_RANGES 15 +#define BPP_UNIT 16 +#define OFFSET_FRACTIONAL_BITS 11 +#define PIXELS_PER_GROUP 3 + +//The max pclk frequency(in Mhz) per slice +//DP1.4 spec defines the number of slices needed per display line, +//based on the pixel rate. it's about 340Mhz per slice. +#define MAX_PCLK_PER_SLICE_KHZ 340000 +//The max slice_width used in slice_width calculation +//this is not HW limitation(which is 5120 per head), just a recommendation +#define MAX_WIDTH_PER_SLICE 5120 +//RC algorithm will get better performance if slice size is bigger. +//This requires slice size be much greater than rc_model_size(8K bits) +//but bigger slice will increase the error rate of DSC slices. +//256KB is a moderate value (about 1280x200 @8bpp) +#define MIN_SLICE_SIZE (256*1024) +// Per DP 1.4 spec, sink should support slice width of up to at least 2560 (it is allowed to support more). +#define SINK_MAX_SLICE_WIDTH_DEFAULT 2560 +// Min bits per pixel supported +#define MIN_BITS_PER_PIXEL 8 +// Max bits per pixel supported +#define MAX_BITS_PER_PIXEL 32 +// Max HBlank pixel count +#define MAX_HBLANK_PIXELS 7680 + +/* ------------------------ Datatypes -------------------------------------- */ + +//input parameters to the pps calculation +typedef struct +{ + NvU32 dsc_version_minor; // DSC minor version (1-DSC1.1, 2-DSC 1.2) + NvU32 bits_per_component; // bits per component of input pixels (8,10,12) + NvU32 linebuf_depth; // bits per component of reconstructed line buffer (8 ~ 13) + NvU32 block_pred_enable; // block prediction enable (0, 1) + NvU32 convert_rgb; // input pixel format (0 YCbCr, 1 RGB) + NvU32 bits_per_pixel; // bits per pixel*BPP_UNIT (8.0*BPP_UNIT ~ 32.0*BPP_UNIT) + NvU32 pic_height; // picture height (8 ~ 8192) + NvU32 pic_width; // picture width (single mode: 32 ~ 5120, dual mode: 64 ~ 8192) + NvU32 slice_height; // 0 - auto, others (8 ~ 8192) - must be (pic_height % slice_height == 0) + NvU32 slice_width; // maximum slice_width, 0-- default: 1280. + NvU32 slice_num; // 0 - auto, others: 1,2,4,8 + NvU32 slice_count_mask; // no of slices supported by sink + NvU32 max_slice_num; // slice number cap determined from GPU and sink caps + NvU32 max_slice_width; // slice width cap determined from GPU and sink caps + NvU32 pixel_clkMHz; // pixel clock frequency in MHz, used for slice_width calculation. + NvU32 dual_mode; // 0 - single mode, 1 - dual mode, only for checking pic_width + NvU32 simple_422; // 4:2:2 simple mode + NvU32 native_420; // 420 native mode + NvU32 native_422; // 422 native mode + NvU32 drop_mode; // 0 - normal mode, 1 - drop mode. + NvU32 peak_throughput_mode0; // peak throughput supported by the sink for 444 and simple 422 modes. + NvU32 peak_throughput_mode1; // peak throughput supported by the sink for native 422 and 420 modes. +} DSC_INPUT_PARAMS; + +//output pps parameters after calculation +typedef struct +{ + NvU32 dsc_version_major; // DSC major version, always 1 + NvU32 dsc_version_minor; // DSC minor version + NvU32 pps_identifier; // Application-specific identifier, always 0 + NvU32 bits_per_component; // bits per component for input pixels + NvU32 linebuf_depth; // line buffer bit depth + NvU32 block_pred_enable; // enable/disable block prediction + NvU32 convert_rgb; // color space for input pixels + NvU32 simple_422; // 4:2:2 simple mode + NvU32 vbr_enable; // enable VBR mode + NvU32 bits_per_pixel; // (bits per pixel * BPP_UNIT) after compression + NvU32 pic_height; // picture height + NvU32 pic_width; // picture width + NvU32 slice_height; // slice height + NvU32 slice_width; // slice width + NvU32 chunk_size; // the size in bytes of the slice chunks + NvU32 initial_xmit_delay; // initial transmission delay + NvU32 initial_dec_delay; // initial decoding delay + NvU32 initial_scale_value; // initial xcXformScale factor value + NvU32 scale_increment_interval; // number of group times between incrementing the rcXformScale factor + NvU32 scale_decrement_interval; // number of group times between decrementing the rcXformScale factor + NvU32 first_line_bpg_offset; // number of additional bits allocated for each group on the first line in a slice + NvU32 nfl_bpg_offset; // number of bits de-allocated for each group after the first line in a slice + NvU32 slice_bpg_offset; // number of bits de-allocated for each group to enforce the slice constrain + NvU32 initial_offset; // initial value for rcXformOffset + NvU32 final_offset; // maximum end-of-slice value for rcXformOffset + NvU32 flatness_min_qp; // minimum flatness QP + NvU32 flatness_max_qp; // maximum flatness QP + //rc_parameter_set + NvU32 rc_model_size; // number of bits within the "RC model" + NvU32 rc_edge_factor; // edge detection factor + NvU32 rc_quant_incr_limit0; // QP threshold for short-term RC + NvU32 rc_quant_incr_limit1; // QP threshold for short-term RC + NvU32 rc_tgt_offset_hi; // upper end of the target bpg range for short-term RC + NvU32 rc_tgt_offset_lo; // lower end of the target bpg range for short-term RC + NvU32 rc_buf_thresh[NUM_BUF_RANGES-1]; // thresholds in "RC model" + //rc_range_parameters + NvU32 range_min_qp[NUM_BUF_RANGES]; // minimum QP for each of the RC ranges + NvU32 range_max_qp[NUM_BUF_RANGES]; // maximum QP for each of the RC ranges + NvU32 range_bpg_offset[NUM_BUF_RANGES]; // bpg adjustment for each of the RC ranges + //420,422 + NvU32 native_420; // 420 native mode + NvU32 native_422; // 422 native mode + NvU32 second_line_bpg_offset; // 2nd line bpg offset to use, native 420 only + NvU32 nsl_bpg_offset; // non-2nd line bpg offset to use, native 420 only + NvU32 second_line_offset_adj; // adjustment to 2nd line bpg offset, native 420 only + + //additional params not in PPS + NvU32 slice_num; + NvU32 groups_per_line; + NvU32 num_extra_mux_bits; + NvU32 flatness_det_thresh; +} DSC_OUTPUT_PARAMS; + +/* ------------------------ Global Variables ------------------------------- */ + +DSC_CALLBACK callbacks; + +static const NvU8 minqp444_8b[15][37]={ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0} + ,{ 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0} + ,{ 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0} + ,{ 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0} + ,{ 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0} + ,{ 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{14,14,13,13,12,12,12,12,11,11,10,10,10,10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3} +}; + +static const NvU8 maxqp444_8b[15][37]={ + { 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 6, 6, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0} + ,{ 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0} + ,{ 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1} + ,{10,10, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1} + ,{11,11,10,10, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1} + ,{12,11,11,10,10,10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1} + ,{12,12,11,11,10,10,10,10,10,10, 9, 9, 9, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1} + ,{12,12,12,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{12,12,12,12,11,11,11,11,11,10,10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{13,13,13,13,12,12,11,11,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10,10, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4} +}; + +static const NvU8 minqp444_10b[15][49]={ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 7, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 7, 7, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0} + ,{ 9, 9, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0} + ,{10, 9, 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0} + ,{10,10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1} + ,{10,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{10,10,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{12,12,12,12,12,12,12,12,12,12,11,11,11,11,11,11,11,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 1} + ,{18,18,17,17,16,16,16,16,15,15,14,14,14,14,13,13,13,12,12,12,11,11,11,11,10,10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3} +}; + +static const NvU8 maxqp444_10b[15][49]={ + { 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{10,10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{12,11,11,10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0} + ,{12,12,11,11,10,10,10,10,10,10,10,10, 9, 9, 9, 8, 7, 7, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0} + ,{13,12,12,11,11,11,11,11,11,11,11,11,10,10, 9, 8, 8, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{13,12,12,12,11,11,11,11,11,11,11,11,10,10,10, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,10,10, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1} + ,{14,14,13,13,12,12,12,12,12,12,12,12,12,11,11,10, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1} + ,{15,15,14,14,13,13,13,13,13,13,12,12,12,11,11,10,10, 9, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1} + ,{16,15,15,14,14,14,13,13,13,13,13,13,13,12,12,11,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1} + ,{16,16,15,15,14,14,14,14,14,14,13,13,13,12,12,11,11,10,10,10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2} + ,{16,16,16,15,15,15,14,14,14,14,13,13,13,13,12,12,12,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{16,16,16,16,15,15,15,15,15,14,14,13,13,13,12,12,12,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2} + ,{17,17,17,17,16,16,15,15,15,15,14,14,14,14,13,13,12,12,12,12,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2} + ,{19,19,18,18,17,17,17,17,16,16,15,15,15,15,14,14,14,13,13,13,12,12,12,12,11,11,10,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4} +}; + +static const NvU8 minqp444_12b[15][61]={ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{ 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{11,10,10, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{11,11,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{13,12,12,11,11,11,11,11,11,11,11,11,10,10, 9, 9, 9, 8, 7, 7, 7, 7, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{13,12,12,12,11,11,11,11,11,11,11,11,11,11,11,10, 9, 9, 8, 8, 8, 8, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,11,11,10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,11,11,11,10,10,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0} + ,{13,13,12,12,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,10,10,10, 9, 9, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0} + ,{14,13,13,12,12,12,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 0} + ,{14,14,13,13,13,13,13,13,13,13,13,13,13,13,13,12,12,12,12,12,11,11,11,11,11,11,10,10, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1} + ,{14,14,14,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,12,12,11,11,11,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{14,14,14,14,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,12,12,12,12,12,12,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 1, 1, 1} + ,{17,17,17,17,16,16,15,15,15,15,15,15,15,15,15,15,15,15,15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 1} + ,{22,22,21,21,20,20,20,20,19,19,18,18,18,18,17,17,17,16,16,16,15,15,15,15,14,14,13,13,13,13,13,12,12,11,11,11,11,11,10,10, 9, 9, 9, 9, 9, 8, 8, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 3} +}; + +static const NvU8 maxqp444_12b[15][61]={ + {12,12,12,12,12,12,11,11,11,10, 9, 9, 6, 6, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{14,14,13,13,12,12,12,12,12,12,11,11, 9, 9, 9, 8, 8, 7, 7, 7, 7, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{16,15,15,14,13,13,13,13,13,13,13,13,12,12,12,11,10,10, 9, 9, 9, 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{16,16,15,15,14,14,14,14,14,14,14,14,13,13,13,12,11,11,10,10,10, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ,{17,16,16,15,15,15,15,15,15,15,15,15,14,14,13,12,12,11,10,10,10,10, 8, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0} + ,{17,16,16,16,15,15,15,15,15,15,15,15,14,14,14,13,12,12,11,11,11,11, 9, 9, 9, 9, 8, 8, 8, 8, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0} + ,{17,17,16,16,15,15,15,15,15,15,15,15,15,14,14,13,12,12,11,11,11,11,11,10,10,10, 9, 9, 9, 8, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0} + ,{18,18,17,17,16,16,16,16,16,16,16,16,16,15,15,14,13,13,12,12,12,12,11,11,11,11,10,10,10, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1} + ,{19,19,18,18,17,17,17,17,17,17,16,16,16,15,15,14,14,13,13,13,13,13,12,12,12,12,11,11,10, 9, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 1, 1} + ,{20,19,19,18,18,18,17,17,17,17,17,17,17,16,16,15,14,14,13,13,13,13,12,12,12,12,11,11,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 1} + ,{20,20,19,19,18,18,18,18,18,18,17,17,17,16,16,15,15,14,14,14,13,13,12,12,12,12,11,11,10,10,10,10,10,10,10,10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3, 3, 2, 2, 2} + ,{20,20,20,19,19,19,18,18,18,18,17,17,17,17,16,16,16,15,15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{20,20,20,20,19,19,19,19,19,18,18,17,17,17,16,16,16,15,15,15,14,14,13,13,13,13,12,12,11,11,11,11,10,10,10,10, 9, 9, 9, 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 2, 2, 2} + ,{21,21,21,21,20,20,19,19,19,19,18,18,18,18,17,17,16,16,16,16,15,15,14,14,14,14,13,13,12,12,12,12,11,11,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 4, 4, 3, 3, 2} + ,{23,23,22,22,21,21,21,21,20,20,19,19,19,19,18,18,18,17,17,17,16,16,16,16,15,15,14,14,14,14,14,13,13,12,12,12,12,12,11,11,10,10,10,10,10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4} +}; + +static const NvU32 rcBufThresh[] = { 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616, 7744, 7872, 8000, 8064 }; + +/* ------------------------ Static Variables ------------------------------- */ +/* ------------------------ Private Functions Prototype--------------------- */ + +static void * DSC_Malloc(NvLength size); +static void DSC_Free(void * ptr); +static NvU32 +DSC_GetHigherSliceCount +( + NvU32 common_slice_count_mask, + NvU32 desired_slice_num, + NvU32 dual_mode, + NvU32 *new_slice_num +); +static NvU32 DSC_AlignDownForBppPrecision(NvU32 bitsPerPixelX16, NvU32 bitsPerPixelPrecision); + +static NvU32 +DSC_GetPeakThroughputMps(NvU32 peak_throughput); + +static NvU32 +DSC_SliceCountMaskforSliceNum (NvU32 slice_num); + +/* ------------------------ Private Functions ------------------------------ */ + +/* + * @brief Calculate Bits Per Pixel aligned down as per bitsPerPixelPrecision supported + * by Sink + * + * @param[in] bitsPerPixelX16 Bits Per Pixel + * @param[in] bitsPerPixelPrecision Bits Per Pixel Precision Supported by Panel + * + * @returns Aligned down Bits Per Pixel value + */ +static NvU32 +DSC_AlignDownForBppPrecision +( + NvU32 bitsPerPixelX16, + NvU32 bitsPerPixelPrecision +) +{ + NvU32 allignDownForBppPrecision; + + switch (bitsPerPixelPrecision) + { + case DSC_BITS_PER_PIXEL_PRECISION_1_16: + allignDownForBppPrecision = 1; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1_8: + allignDownForBppPrecision = 2; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1_4: + allignDownForBppPrecision = 4; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1_2: + allignDownForBppPrecision = 8; + break; + + case DSC_BITS_PER_PIXEL_PRECISION_1: + allignDownForBppPrecision = 16; + break; + + default: + allignDownForBppPrecision = 16; + } + + return (bitsPerPixelX16 & ~(allignDownForBppPrecision - 1)); +} + +/* + * @brief Calculate chunk size, num_extra_mux_bits + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsCalcExtraBits +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 numSsps = 3; + NvU32 sliceBits; + NvU32 extra_bits; + NvU32 bitsPerComponent = out->bits_per_component; + NvU32 muxWordSize; + + muxWordSize = (bitsPerComponent >= 12) ? 64 : 48; + if (out->convert_rgb) + { + extra_bits = (numSsps * (muxWordSize + (4 * bitsPerComponent + 4) - 2)); + } + else if (!out->native_422) // YCbCr + { + extra_bits = (numSsps * muxWordSize + (4 * bitsPerComponent + 4) + 2 * (4 * bitsPerComponent) - 2); + } + else + { + extra_bits = (numSsps * muxWordSize + (4 * bitsPerComponent + 4) + 3 * (4 * bitsPerComponent) - 2); + } + + sliceBits = 8 * out->chunk_size * out->slice_height; + //while ((extra_bits>0) && ((sliceBits - extra_bits) % muxWordSize)) + // extra_bits--; + sliceBits = (sliceBits - extra_bits) % muxWordSize; + if (sliceBits != 0) + { + extra_bits -= MIN(extra_bits, muxWordSize - sliceBits); + } + + out->num_extra_mux_bits = extra_bits; + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate RC initial value. + * Require: groups_per_line in Dsc_PpsCalcWidth() + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsCalcRcInitValue +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 bitsPerPixel = out->bits_per_pixel; + out->rc_model_size = 8192; + + if (out->native_422) + { + // =IF(CompressBpp >= 8, 2048, IF(CompressBpp <= 7, 5632, 5632 - ROUND((CompressBpp - 7) * (3584), 0))) + if (bitsPerPixel >= 16 * BPP_UNIT) + out->initial_offset = 2048; + else if (bitsPerPixel >= 14 * BPP_UNIT) + out->initial_offset = 5632 - ((bitsPerPixel - 14 * BPP_UNIT) * 1792 + BPP_UNIT / 2) / BPP_UNIT; + else if (bitsPerPixel >= 12 * BPP_UNIT) + out->initial_offset = 5632; + } + else + { + if (bitsPerPixel >= 12 * BPP_UNIT) + out->initial_offset = 2048; + else if (bitsPerPixel >= 10 * BPP_UNIT) + out->initial_offset = 5632 - ((bitsPerPixel - 10 * BPP_UNIT) * 1792 + BPP_UNIT / 2) / BPP_UNIT; + else if (bitsPerPixel >= 8 * BPP_UNIT) + out->initial_offset = 6144 - ((bitsPerPixel - 8 * BPP_UNIT) * 256 + BPP_UNIT / 2) / BPP_UNIT; + else + out->initial_offset = 6144; + } + RANGE_CHECK("initial_offset", out->initial_offset, 0, out->rc_model_size); + + out->initial_scale_value = 8 * out->rc_model_size / (out->rc_model_size - out->initial_offset); + if (out->groups_per_line < out->initial_scale_value - 8) + { + out->initial_scale_value = out->groups_per_line + 8; + } + RANGE_CHECK("initial_scale_value", out->initial_scale_value, 0, 63); + + out->initial_xmit_delay = (4096*BPP_UNIT + bitsPerPixel / 2) / bitsPerPixel; + //RANGE_CHECK("initial_xmit_delay", out->initial_xmit_delay, 0, 1023); + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate bpg value except slice_bpg_offset + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static +NvU32 DSC_PpsCalcBpg +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 uncompressedBpgRate; + NvU32 ub_BpgOfs; + NvU32 firstLineBpgOfs; + NvU32 secondLineBpgOfs; + NvU32 bitsPerPixel; + NvU32 rbsMin; + NvU32 hrdDelay; + + if (out->native_422) + uncompressedBpgRate = PIXELS_PER_GROUP * out->bits_per_component * 4; + else + uncompressedBpgRate = (3 * out->bits_per_component + (out->convert_rgb ? 2 : 0)) * PIXELS_PER_GROUP; + + ub_BpgOfs = (uncompressedBpgRate*BPP_UNIT - PIXELS_PER_GROUP * out->bits_per_pixel) / BPP_UNIT; + + if (out->slice_height >= 8) + firstLineBpgOfs = 12 + MIN(34, out->slice_height - 8) * 9 / 100; + else + firstLineBpgOfs = 2 * (out->slice_height - 1); + + firstLineBpgOfs = CLAMP(firstLineBpgOfs, 0, ub_BpgOfs); + out->first_line_bpg_offset = firstLineBpgOfs; + RANGE_CHECK("first_line_bpg_offset", out->first_line_bpg_offset, 0, 31); + + if (out->slice_height > 1) + out->nfl_bpg_offset = ((out->first_line_bpg_offset << OFFSET_FRACTIONAL_BITS) + out->slice_height - 2) / (out->slice_height - 1); + else + out->nfl_bpg_offset = 0; + + RANGE_CHECK("nfl_bpg_offset", out->nfl_bpg_offset, 0, 65535); + + secondLineBpgOfs = out->native_420 ? 12 : 0; + secondLineBpgOfs = CLAMP(secondLineBpgOfs, 0, ub_BpgOfs); + out->second_line_bpg_offset = secondLineBpgOfs; + RANGE_CHECK("second_line_bpg_offset", out->second_line_bpg_offset, 0, 31); + + if (out->slice_height > 2) + out->nsl_bpg_offset = ((out->second_line_bpg_offset << OFFSET_FRACTIONAL_BITS) + out->slice_height - 2) / (out->slice_height - 1); + else + out->nsl_bpg_offset = 0; + RANGE_CHECK("nsl_bpg_offset", out->nsl_bpg_offset, 0, 65535); + + out->second_line_offset_adj = out->native_420 ? 512 : 0; + + bitsPerPixel = out->bits_per_pixel; + rbsMin = out->rc_model_size - out->initial_offset + + (out->initial_xmit_delay * bitsPerPixel + BPP_UNIT - 1) / BPP_UNIT + + out->groups_per_line * out->first_line_bpg_offset; + hrdDelay = (rbsMin * BPP_UNIT + bitsPerPixel - 1) / bitsPerPixel; + out->initial_dec_delay = hrdDelay - out->initial_xmit_delay; + RANGE_CHECK("initial_dec_delay", out->initial_dec_delay, 0, 65535); + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate slice_bpg_offset, final_offset and scale_increment_interval, + * scale_decrement_interval + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcScaleInterval +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 final_scale; + NvU32 groups_total; + NvU32 bitsPerPixel = out->bits_per_pixel; + + groups_total = out->groups_per_line * out->slice_height; + out->slice_bpg_offset = (((out->rc_model_size - out->initial_offset + out->num_extra_mux_bits) << OFFSET_FRACTIONAL_BITS) + + groups_total - 1) / groups_total; + RANGE_CHECK("slice_bpg_offset", out->slice_bpg_offset, 0, 65535); + + if ((PIXELS_PER_GROUP * bitsPerPixel << OFFSET_FRACTIONAL_BITS) - (out->slice_bpg_offset + out->nfl_bpg_offset) * BPP_UNIT + < (1 + 5 * PIXELS_PER_GROUP) * BPP_UNIT << OFFSET_FRACTIONAL_BITS) + { + DSC_Print("The bits/pixel allocation for non-first lines is too low (<5.33bpp)."); + DSC_Print("Consider decreasing FIRST_LINE_BPG_OFFSET."); + } + + out->final_offset = out->rc_model_size - (out->initial_xmit_delay * bitsPerPixel + 8)/BPP_UNIT + out->num_extra_mux_bits; + RANGE_CHECK("final_offset", out->final_offset, 0, out->rc_model_size-1); //try increase initial_xmit_delay + + final_scale = 8 * out->rc_model_size / (out->rc_model_size - out->final_offset); + RANGE_CHECK("final_scale", final_scale, 0, 63); //try increase initial_xmit_delay + + // BEGIN scale_increment_NvU32erval fix + if(final_scale > 9) + { + // + // Note: the following calculation assumes that the rcXformOffset crosses 0 at some point. If the zero-crossing + // doesn't occur in a configuration, we recommend to reconfigure the rc_model_size and thresholds to be smaller + // for that configuration. + // + out->scale_increment_interval = (out->final_offset << OFFSET_FRACTIONAL_BITS) / + ((final_scale - 9) * (out->nfl_bpg_offset + out->slice_bpg_offset + out->nsl_bpg_offset)); + RANGE_CHECK("scale_increment_interval", out->scale_increment_interval, 0, 65535); + } + else + { + out->scale_increment_interval = 0; + } + + // END scale_increment_interval fix + if (out->initial_scale_value > 8) + out->scale_decrement_interval = out->groups_per_line / (out->initial_scale_value - 8); + else + out->scale_decrement_interval = 4095; + RANGE_CHECK("scale_decrement_interval", out->scale_decrement_interval, 1, 4095); + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate RC parameters + * + * @param[in/out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcRcParam +( + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 i, idx; + NvU32 bitsPerPixel = out->bits_per_pixel; + NvU32 bpcm8 = out->bits_per_component - 8; + NvU32 yuv_modifier = out->convert_rgb == 0 && out->dsc_version_minor == 1; + NvU32 qp_bpc_modifier = bpcm8 * 2 - yuv_modifier; + + out->flatness_min_qp = 3 + qp_bpc_modifier; + out->flatness_max_qp = 12 + qp_bpc_modifier; + out->flatness_det_thresh = 2 << bpcm8; + out->rc_edge_factor = 6; + out->rc_quant_incr_limit0 = 11 + qp_bpc_modifier; + out->rc_quant_incr_limit1 = 11 + qp_bpc_modifier; + out->rc_tgt_offset_hi = 3; + out->rc_tgt_offset_lo = 3; + + for (i = 0; i < NUM_BUF_RANGES - 1; i++) + out->rc_buf_thresh[i] = rcBufThresh[i] & (0xFF << 6); + + //if (out->native_420) + // idx = bitsPerPixel/BPP_UNIT - 8; + //else if(out->native_422) + // idx = bitsPerPixel/BPP_UNIT - 12; + //else + idx = (2 * (bitsPerPixel - 6 * BPP_UNIT) ) / BPP_UNIT; + + if (bpcm8 == 0) + { + for (i = 0; i < NUM_BUF_RANGES; i++) + { + const NvU32 min = minqp444_8b[i][idx]; + const NvU32 max = maxqp444_8b[i][idx]; + + out->range_min_qp[i] = MAX(0, min - yuv_modifier); + out->range_max_qp[i] = MAX(0, max - yuv_modifier); + } + } + else if (bpcm8 == 2) + { + for (i = 0; i < NUM_BUF_RANGES; i++) + { + const NvU32 min = minqp444_10b[i][idx]; + const NvU32 max = maxqp444_10b[i][idx]; + + out->range_min_qp[i] = MAX(0, min - yuv_modifier); + out->range_max_qp[i] = MAX(0, max - yuv_modifier); + } + } + else + { + for (i = 0; i < NUM_BUF_RANGES; i++) + { + const NvU32 min = minqp444_12b[i][idx]; + const NvU32 max = maxqp444_12b[i][idx]; + + out->range_min_qp[i] = MAX(0, min - yuv_modifier); + out->range_max_qp[i] = MAX(0, max - yuv_modifier); + } + } + + for (i = 0; i < NUM_BUF_RANGES; ++i) + { + //if (out->native_420) + //{ + // NvU32 ofs_und4[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + // NvU32 ofs_und5[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + // NvU32 ofs_und6[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + // NvU32 ofs_und8[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + // out->range_min_qp[i] = minqp_420[bpcm8 / 2][i][idx]; + // out->range_max_qp[i] = maxqp_420[bpcm8 / 2][i][idx]; + // if (bitsPerPixel <= 8*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und4[i]; + // else if (bitsPerPixel <= 10*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und4[i] + (NvU32)(0.5 * (bitsPerPixel - 8.0) * (ofs_und5[i] - ofs_und4[i]) + 0.5); + // else if (bitsPerPixel <= 12*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und5[i] + (NvU32)(0.5 * (bitsPerPixel - 10.0) * (ofs_und6[i] - ofs_und5[i]) + 0.5); + // else if (bitsPerPixel <= 16*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und6[i] + (NvU32)(0.25 * (bitsPerPixel - 12.0) * (ofs_und8[i] - ofs_und6[i]) + 0.5); + // else + // out->range_bpg_offset[i] = ofs_und8[i]; + //} + //else if (out->native_422) + //{ + // NvU32 ofs_und6[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + // NvU32 ofs_und7[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + // NvU32 ofs_und10[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + // out->range_min_qp[i] = minqp_422[bpcm8 / 2][i][idx]; + // out->range_max_qp[i] = maxqp_422[bpcm8 / 2][i][idx]; + // if (bitsPerPixel <= 12*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und6[i]; + // else if(bitsPerPixel <= 14*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und6[i] + (NvU32)((bitsPerPixel - 12.0) * (ofs_und7[i] - ofs_und6[i]) / 2.0 + 0.5); + // else if(bitsPerPixel <= 16*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und7[i]; + // else if(bitsPerPixel <= 20*BPP_UNIT) + // out->range_bpg_offset[i] = ofs_und7[i] + (NvU32)((bitsPerPixel - 16.0) * (ofs_und10[i] - ofs_und7[i]) / 4.0 + 0.5); + // else + // out->range_bpg_offset[i] = ofs_und10[i]; + //} + //else + { + const NvU32 ofs_und6[] = { 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; + const NvU32 ofs_und8[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + const NvU32 ofs_und12[] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -10, -12, -12, -12 }; + const NvU32 ofs_und15[] = { 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 }; + if (bitsPerPixel <= 6 * BPP_UNIT) + out->range_bpg_offset[i] = ofs_und6[i]; + else if (bitsPerPixel <= 8 * BPP_UNIT) + out->range_bpg_offset[i] = ofs_und6[i] + ((bitsPerPixel - 6 * BPP_UNIT) * (ofs_und8[i] - ofs_und6[i]) + BPP_UNIT) / (2 * BPP_UNIT); + else if (bitsPerPixel <= 12 * BPP_UNIT) + out->range_bpg_offset[i] = ofs_und8[i]; + else if (bitsPerPixel <= 15 * BPP_UNIT) + out->range_bpg_offset[i] = ofs_und12[i] + ((bitsPerPixel - 12 * BPP_UNIT) * (ofs_und15[i] - ofs_und12[i]) + 3 * BPP_UNIT / 2) / (3 * BPP_UNIT); + else + out->range_bpg_offset[i] = ofs_und15[i]; + } + } + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Initialize with basic PPS values based on passed down input params + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcBase +( + const DSC_INPUT_PARAMS *in, + DSC_OUTPUT_PARAMS *out +) +{ + out->dsc_version_major = 1; + ENUM2_CHECK("dsc_version_minor", in->dsc_version_minor, 1, 2); + out->dsc_version_minor = in->dsc_version_minor == 1 ? 1 : 2; + out->pps_identifier = 0; + ENUM3_CHECK("bits_per_component", in->bits_per_component, 8, 10, 12); + out->bits_per_component = in->bits_per_component; + RANGE_CHECK("bits_per_pixelx16", in->bits_per_pixel, 8 * BPP_UNIT, (out->bits_per_component * 3) * BPP_UNIT); + out->bits_per_pixel = in->bits_per_pixel; + RANGE_CHECK("linebuf_depth", in->linebuf_depth, DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MIN, DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MAX); + out->linebuf_depth = in->linebuf_depth; + ENUM2_CHECK("block_pred_enable", in->block_pred_enable, 0, 1); + out->block_pred_enable = in->block_pred_enable ? 1 : 0; + ENUM2_CHECK("convert_rgb", in->convert_rgb, 0, 1); + out->convert_rgb = in->convert_rgb ? 1 : 0; + RANGE_CHECK("pic_height", in->pic_height, 8, 8192); + out->pic_height = in->pic_height; + + if (in->dual_mode) + { + RANGE_CHECK("pic_width", in->pic_width, 64, 8192); + } + else + { + RANGE_CHECK("pic_width", in->pic_width, 32, 5120); + } + out->pic_width = in->pic_width; + + out->simple_422 = in->simple_422; + out->vbr_enable = 0; + out->native_420 = in->native_420; + out->native_422 = in->native_422; + out->slice_num = in->slice_num; + out->slice_width= in->slice_width; + out->slice_height= in->slice_height; + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Generate 32bit data array from DSC_OUTPUT_PARAMS. + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * NvU32[32] to return the pps data. + * The data can be send to SetDscPpsData* methods directly. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static void +DSC_PpsConstruct +( + const DSC_OUTPUT_PARAMS *in, + NvU32 data[DSC_MAX_PPS_SIZE_DWORD] +) +{ + NvU32 i; + NvU32 pps[96]; + + pps[0] = (in->dsc_version_major << 4) | (in->dsc_version_minor & 0xF); + pps[1] = in->pps_identifier; + pps[2] = 0; + pps[3] = (in->bits_per_component << 4) | (in->linebuf_depth & 0xF); + pps[4] = (in->block_pred_enable << 5) | (in->convert_rgb << 4) | + (in->simple_422 << 3) | (in->vbr_enable << 2) | + MSB(in->bits_per_pixel & 0x3FF); + pps[5] = LSB(in->bits_per_pixel); + pps[6] = MSB(in->pic_height); + pps[7] = LSB(in->pic_height); + pps[8] = MSB(in->pic_width); + pps[9] = LSB(in->pic_width); + pps[10] = MSB(in->slice_height); + pps[11] = LSB(in->slice_height); + pps[12] = MSB(in->slice_width); + pps[13] = LSB(in->slice_width); + pps[14] = MSB(in->chunk_size); + pps[15] = LSB(in->chunk_size); + pps[16] = MSB(in->initial_xmit_delay & 0x3FF); + pps[17] = LSB(in->initial_xmit_delay); + pps[18] = MSB(in->initial_dec_delay); + pps[19] = LSB(in->initial_dec_delay); + pps[20] = 0; + pps[21] = in->initial_scale_value & 0x3F; + pps[22] = MSB(in->scale_increment_interval); + pps[23] = LSB(in->scale_increment_interval); + pps[24] = MSB(in->scale_decrement_interval & 0xFFF); + pps[25] = LSB(in->scale_decrement_interval); + pps[26] = 0; + pps[27] = in->first_line_bpg_offset & 0x1F; + pps[28] = MSB(in->nfl_bpg_offset); + pps[29] = LSB(in->nfl_bpg_offset); + pps[30] = MSB(in->slice_bpg_offset); + pps[31] = LSB(in->slice_bpg_offset); + pps[32] = MSB(in->initial_offset); + pps[33] = LSB(in->initial_offset); + pps[34] = MSB(in->final_offset); + pps[35] = LSB(in->final_offset); + pps[36] = in->flatness_min_qp & 0x1F; + pps[37] = in->flatness_max_qp & 0x1F; + + pps[38] = MSB(in->rc_model_size); + pps[39] = LSB(in->rc_model_size); + pps[40] = in->rc_edge_factor & 0xF; + pps[41] = in->rc_quant_incr_limit0 & 0x1F; + pps[42] = in->rc_quant_incr_limit1 & 0x1F; + pps[43] = (in->rc_tgt_offset_hi << 4) | (in->rc_tgt_offset_lo & 0xF); + for (i = 0; i < NUM_BUF_RANGES - 1; i++) + pps[44 + i] = in->rc_buf_thresh[i] >> 6; + + for (i = 0; i < NUM_BUF_RANGES; i++) + { + NvU32 x = ((in->range_min_qp[i] & 0x1F) << 11) | + ((in->range_max_qp[i] & 0x1F) << 6) | + ((in->range_bpg_offset[i] & 0x3F)) ; + pps[58 + i * 2] = MSB(x); + pps[59 + i * 2] = LSB(x); + } + + pps[88] = (in->native_420 << 1) | (in->native_422); + pps[89] = in->second_line_bpg_offset & 0x1F; + pps[90] = MSB(in->nsl_bpg_offset); + pps[91] = LSB(in->nsl_bpg_offset); + pps[92] = MSB(in->second_line_offset_adj); + pps[93] = LSB(in->second_line_offset_adj); + pps[94] = 0; + pps[95] = 0; + + for (i = 0; i < 24; i++) + { + data[i] = ((pps[i * 4 + 0] << 0) | + (pps[i * 4 + 1] << 8) | + (pps[i * 4 + 2] << 16) | + (pps[i * 4 + 3] << 24)); + } + + for(; i < 32; i++) + data[i] = 0; +} + +/* + * @brief Generate slice count supported mask with given slice num. + * + * @param[in] slice_num slice num for which mask needs to be generated + * + * @returns out_slice_count_mask if successful + * 0 if not successful + */ +static NvU32 +DSC_SliceCountMaskforSliceNum (NvU32 slice_num) +{ + switch (slice_num) + { + case 1: + return DSC_DECODER_SLICES_PER_SINK_1; + case 2: + return DSC_DECODER_SLICES_PER_SINK_2; + case 4: + return DSC_DECODER_SLICES_PER_SINK_4; + case 6: + return DSC_DECODER_SLICES_PER_SINK_6; + case 8: + return DSC_DECODER_SLICES_PER_SINK_8; + case 10: + return DSC_DECODER_SLICES_PER_SINK_10; + case 12: + return DSC_DECODER_SLICES_PER_SINK_12; + case 16: + return DSC_DECODER_SLICES_PER_SINK_16; + case 20: + return DSC_DECODER_SLICES_PER_SINK_20; + case 24: + return DSC_DECODER_SLICES_PER_SINK_24; + default: + return DSC_DECODER_SLICES_PER_SINK_INVALID; + } +} + +/* + * @brief Convert peak throughput placeholders into numeric values. + * + * @param[in] peak_throughput_mode0 peak throughput sink cap placeholder. + * + * @returns peak_throughput_mps actual throughput in MegaPixels/second. + */ +static NvU32 +DSC_GetPeakThroughputMps(NvU32 peak_throughput) +{ + NvU32 peak_throughput_mps; + switch(peak_throughput) + { + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_340: + peak_throughput_mps = 340; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_400: + peak_throughput_mps = 400; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_450: + peak_throughput_mps = 450; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_500: + peak_throughput_mps = 500; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_550: + peak_throughput_mps = 550; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_600: + peak_throughput_mps = 600; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_650: + peak_throughput_mps = 650; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_700: + peak_throughput_mps = 700; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_750: + peak_throughput_mps = 750; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_800: + peak_throughput_mps = 800; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_850: + peak_throughput_mps = 850; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_900: + peak_throughput_mps = 900; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_950: + peak_throughput_mps = 950; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_1000: + peak_throughput_mps = 1000; + break; + case DSC_DECODER_PEAK_THROUGHPUT_MODE0_170: + peak_throughput_mps = 170; + break; + default: + peak_throughput_mps = 0; + } + return peak_throughput_mps; +} + +/* + * @brief Get the next higher valid slice count. + * + * @param[in] common_slice_count_mask Includes slice counts supported by both. + * @param[in] desired_slice_num desired slice count + * @param[in] dual_mode if dual mode or not + * @param[in] new_slice_num new slice count if one was found. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_GetHigherSliceCount +( + NvU32 common_slice_count_mask, + NvU32 desired_slice_num, + NvU32 dual_mode, + NvU32 *new_slice_num +) +{ + // + // slice num = 6 won't exist in common_slice_count_mask, but + // still keeping it to align mask bits and valid_slice_num index. + // + NvU32 valid_slice_num[6] = {1,2,0,4,6,8}; + NvU32 i = 0; + NvU32 slice_mask = common_slice_count_mask; + NvU32 max_slice_num_index = dual_mode ? 5 : 3; + + while (slice_mask && i <= max_slice_num_index) + { + if (slice_mask & 0x1) + { + if (valid_slice_num[i] > desired_slice_num) + { + *new_slice_num = valid_slice_num[i]; + return NVT_STATUS_SUCCESS; + } + } + slice_mask = slice_mask >> 1; + i++; + } + + return NVT_STATUS_ERR; +} + +/* + * @brief Function validates and calculates, if required, the slice parameters like + * slice_width, slice_num for the DSC mode requested. + * + * If slice width, slice num is not forced, fn calculates them by trying to minimize + * slice num used. + * + * If slice width/slice num is forced, it validates the forced parameter and calculates + * corresponding parameter and makes sure it can be supported. + * + * If both slice num and slice width are forced, it validates both. + * + * @param[in] pixel_clkMHz Pixel clock + * @param[in] dual_mode Specify if Dual Mode is enabled or not + * @param[in] max_slice_num max slice number supported by sink + * @param[in] max_slice_width max slice width supported by sink + * @param[in] slice_count_mask Mask of slice counts supported by sink + * @param[in] peak_throughput Peak throughput supported by DSC sink + * decoder in Mega Pixels Per Second + * @param[out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NvU32 +DSC_PpsCalcSliceParams +( + NvU32 pixel_clkMHz, + NvU32 dual_mode, + NvU32 max_slice_num, + NvU32 max_slice_width, + NvU32 slice_count_mask, + NvU32 peak_throughput, + DSC_OUTPUT_PARAMS *out +) +{ + NvU32 min_slice_num; + NvU32 slicew; + NvU32 peak_throughput_mps; + // + // Bits 0,1,3,4,5 represents slice counts 1,2,4,6,8. + // Bit 2 is reserved and Slice count = 6 is not supported + // by GPU, so that is not required to be set. + // + NvU32 gpu_slice_count_mask = DSC_DECODER_SLICES_PER_SINK_1 | + DSC_DECODER_SLICES_PER_SINK_2 | + DSC_DECODER_SLICES_PER_SINK_4; + + NvU32 gpu_slice_count_mask_dual = DSC_DECODER_SLICES_PER_SINK_2 | + DSC_DECODER_SLICES_PER_SINK_4 | + DSC_DECODER_SLICES_PER_SINK_8; + + NvU32 common_slice_count_mask = dual_mode? gpu_slice_count_mask_dual & slice_count_mask : + gpu_slice_count_mask & slice_count_mask; + + if (!common_slice_count_mask) + { + DSC_Print("DSC cannot be supported since no common supported slice count\n"); + return NVT_STATUS_ERR; + } + + peak_throughput_mps = DSC_GetPeakThroughputMps(peak_throughput); + if (!peak_throughput_mps) + { + DSC_Print("Peak throughput cannot be zero.\n"); + return NVT_STATUS_ERR; + } + + if (max_slice_width > MAX_WIDTH_PER_SLICE) + { + DSC_Print("GPU can support only a max of 5120 pixels across all slices\n"); + max_slice_width = MAX_WIDTH_PER_SLICE; + } + + if (out->slice_num == 0 && out->slice_width == 0) + { + NvU32 new_slice_num = 0; + NvU32 min_slice_num_1 = (out->pic_width + max_slice_width - 1) / max_slice_width; + NvU32 min_slice_num_2 = (pixel_clkMHz + peak_throughput_mps - 1) / peak_throughput_mps; + min_slice_num = MAX(min_slice_num_1, min_slice_num_2); + + if (max_slice_num < min_slice_num) + { + DSC_Print("Requested mode cannot be supported with DSC\n"); + return NVT_STATUS_ERR; + } + + if (!(DSC_SliceCountMaskforSliceNum(min_slice_num) & common_slice_count_mask)) + { + if (DSC_GetHigherSliceCount(common_slice_count_mask, min_slice_num, dual_mode, &new_slice_num) == NVT_STATUS_ERR) + { + DSC_Print("DSC cannot be enabled for this mode\n"); + return NVT_STATUS_ERR; + } + else + { + out->slice_num = new_slice_num; + } + } + else + { + out->slice_num = min_slice_num; + } + + out->slice_width = (out->pic_width + out->slice_num - 1) / out->slice_num; + } + else if (out->slice_num == 0) + { + if (out->slice_width > max_slice_width) + { + DSC_Print("Error! Max Supported Slice Width = %u\n", max_slice_width); + return NVT_STATUS_ERR; + } + + out->slice_num = (out->pic_width + out->slice_width - 1) / out->slice_width; + if (!(DSC_SliceCountMaskforSliceNum(out->slice_num) & common_slice_count_mask)) + { + DSC_Print("Slice count corresponding to requested slice_width is not supported\n"); + return NVT_STATUS_ERR; + } + } + else if (out->slice_width == 0) + { + if (!(DSC_SliceCountMaskforSliceNum(out->slice_num) & common_slice_count_mask)) + { + DSC_Print("Slice count requested is not supported\n"); + return NVT_STATUS_ERR; + } + + out->slice_width = (out->pic_width + out->slice_num - 1) / out->slice_num; + + if (out->slice_width > max_slice_width) + { + DSC_Print("Slice width corresponding to the requested slice count is not supported\n"); + return NVT_STATUS_ERR; + } + } + else + { + if (!(DSC_SliceCountMaskforSliceNum(out->slice_num) & common_slice_count_mask)) + { + DSC_Print("Requested slice count is not supported\n"); + return NVT_STATUS_ERR; + } + + if (out->slice_width > max_slice_width) + { + DSC_Print("Requested slice width cannot be supported\n"); + return NVT_STATUS_ERR; + } + + if (out->slice_width != (out->pic_width + out->slice_num - 1) / out->slice_num) + { + DSC_Print("slice_width must equal CEIL(pic_width/slice_num) \n"); + return NVT_STATUS_ERR; + } + } + + if((pixel_clkMHz / out->slice_num) > peak_throughput_mps) + { + DSC_Print("Sink DSC decoder does not support minimum throughout required for this DSC config \n"); + return NVT_STATUS_ERR; + } + + if (max_slice_width < SINK_MAX_SLICE_WIDTH_DEFAULT) + { + DSC_Print("Sink has to support a max slice width of at least 2560 as per DP1.4 spec. Ignoring for now."); + } + + if (out->slice_width < 32) + { + DSC_Print("slice_width must >= 32\n"); + return NVT_STATUS_ERR; + } + + slicew = out->slice_width >> (out->native_420 || out->native_422); // /2 in 4:2:0 mode + out->groups_per_line = (slicew + PIXELS_PER_GROUP - 1) / PIXELS_PER_GROUP; + out->chunk_size = (slicew * out->bits_per_pixel + 8 * BPP_UNIT - 1) / (8 * BPP_UNIT); // Number of bytes per chunk + + // + // Below is not constraint of DSC module, this is RG limitation. + // check total data packet per line from DSC to RG won't larger than pic_width + // + if ((out->chunk_size + 3) / 4 * out->slice_num > out->pic_width) + { + DSC_Print("Error! bpp too high, RG will overflow, normally, this error is also caused by padding (pic_widthslice_height == 0) + { + NvU32 i; + for (i = 1 ; i <= 16; i++) + { + out->slice_height = out->pic_height / i; + if (out->pic_height != out->slice_height * i ) + continue; + + if (DSC_PpsCheckSliceHeight(out) == NVT_STATUS_SUCCESS) + return NVT_STATUS_SUCCESS; + } + DSC_Print("Error! can't find valid slice_height"); + return NVT_STATUS_ERR; + } + + RANGE_CHECK("slice_height", out->slice_height, 8, out->pic_height); + + if (out->pic_height % out->slice_height != 0) + { + DSC_Print("Error! pic_height %% slice_height must be 0"); + return NVT_STATUS_ERR; + } + + if(DSC_PpsCheckSliceHeight(out) != NVT_STATUS_SUCCESS) + { + DSC_Print("Error! slice_height not valid"); + return NVT_STATUS_ERR; + } + return NVT_STATUS_SUCCESS; +} + +/* + * @brief Calculate DSC_OUTPUT_PARAMS from DSC_INPUT_PARAMS. + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsCalc +( + const DSC_INPUT_PARAMS *in, + DSC_OUTPUT_PARAMS *out +) +{ + NVT_STATUS ret; + NvU32 peak_throughput = 0; + + ret = DSC_PpsCalcBase(in, out); + if (ret != NVT_STATUS_SUCCESS) + return ret; + + if (in->drop_mode) + { + // in drop mode, HW requires these params to simplify the design + out->bits_per_pixel = 16 * BPP_UNIT; + out->slice_num = 2; + } + + if (out->native_420 || out->native_422) + { + peak_throughput = in->peak_throughput_mode1; + } + else + { + peak_throughput = in->peak_throughput_mode0; + } + + ret = DSC_PpsCalcSliceParams(in->pixel_clkMHz, in->dual_mode, + in->max_slice_num, in->max_slice_width, in->slice_count_mask, + peak_throughput, out); + if (ret != NVT_STATUS_SUCCESS) return ret; + ret = DSC_PpsCalcRcInitValue(out); + if (ret != NVT_STATUS_SUCCESS) return ret; + ret = Dsc_PpsCalcHeight(out); + if (ret != NVT_STATUS_SUCCESS) return ret; + ret = DSC_PpsCalcRcParam(out); + return ret; +} + +/* + * @brief Calculate DSC_OUTPUT_PARAMS from DSC_INPUT_PARAMS internally, + * then pack pps parameters into 32bit data array. + * + * @param[in] in DSC input parameter + * @param[out] out DSC output parameter + * NvU32[32] to return the pps data. + * The data can be send to SetDscPpsData* methods directly. + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +DSC_PpsDataGen +( + const DSC_INPUT_PARAMS *in, + NvU32 out[DSC_MAX_PPS_SIZE_DWORD] +) +{ + NVT_STATUS ret; + DSC_OUTPUT_PARAMS *pPpsOut; + + pPpsOut = (DSC_OUTPUT_PARAMS *)DSC_Malloc(sizeof(DSC_OUTPUT_PARAMS)); + if (pPpsOut == NULL) + { + DSC_Print("ERROR - Memory allocation error."); + ret = NVT_STATUS_NO_MEMORY; + goto done; + } + + NVMISC_MEMSET(pPpsOut, 0, sizeof(DSC_OUTPUT_PARAMS)); + ret = DSC_PpsCalc(in, pPpsOut); + if (ret != NVT_STATUS_SUCCESS) + { + DSC_Print("ERROR - Invalid parameter."); + goto done; + } + + DSC_PpsConstruct(pPpsOut, out); + + /* fall through */ +done: + DSC_Free(pPpsOut); + + return ret; +} + +/* + * @brief Allocates memory for requested size + * + * @param[in] size Size to be allocated + * + * @returns Pointer to allocated memory + */ +static void * +DSC_Malloc(NvLength size) +{ +#if defined(DSC_CALLBACK_MODIFIED) + return (callbacks.dscMalloc)(callbacks.clientHandle, size); +#else + return (callbacks.dscMalloc)(size); +#endif // DSC_CALLBACK_MODIFIED +} + +/* + * @brief Frees dynamically allocated memory + * + * @param[in] ptr Pointer to a memory to be deallocated + * + */ +static void +DSC_Free(void * ptr) +{ +#if defined(DSC_CALLBACK_MODIFIED) + (callbacks.dscFree)(callbacks.clientHandle, ptr); +#else + (callbacks.dscFree)(ptr); +#endif // DSC_CALLBACK_MODIFIED +} + +/* + * @brief Validate input parameter we got from caller of this function + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +static NVT_STATUS +_validateInput +( + const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond +) +{ + // Validate DSC Info + if (pDscInfo->sinkCaps.decoderColorFormatMask == 0) + { + DSC_Print("ERROR - At least one of the color format decoding needs to be supported by Sink."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (!ONEBITSET(pDscInfo->sinkCaps.bitsPerPixelPrecision)) + { + DSC_Print("ERROR - Only one of Bits Per Pixel Precision should be set"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pDscInfo->sinkCaps.bitsPerPixelPrecision != 1) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 2) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 4) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 8) && + (pDscInfo->sinkCaps.bitsPerPixelPrecision != 16)) + { + DSC_Print("ERROR - Bits Per Pixel Precision should be 1/16, 1/8, 1/4, 1/2 or 1 bpp."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.maxSliceWidth == 0) + { + DSC_Print("ERROR - Invalid max slice width supported by sink."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.maxNumHztSlices == 0) + { + DSC_Print("ERROR - Invalid max number of horizontal slices supported by sink."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.lineBufferBitDepth == 0) + { + DSC_Print("ERROR - Invalid line buffer bit depth supported by sink."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.algorithmRevision.versionMinor == 0) + { + DSC_Print("ERROR - Invalid DSC algorithm revision supported by sink."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.encoderColorFormatMask == 0) + { + DSC_Print("ERROR - At least one of the color format encoding needs to be supported by GPU."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.lineBufferSize == 0) + { + DSC_Print("ERROR - Invalid Line buffer size supported by GPU."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.maxNumHztSlices == 0) + { + DSC_Print("ERROR - Invalid max number of horizontal slices supported by GPU."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->gpuCaps.lineBufferBitDepth == 0) + { + DSC_Print("ERROR - Invalid line buffer bit depth supported by GPU."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.sliceCount > pDscInfo->sinkCaps.maxNumHztSlices) + { + DSC_Print("ERROR - Client can't specify forced slice count greater than what sink supports."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pDscInfo->forcedDscParams.sliceCount / (pModesetInfo->bDualMode ? 2 : 1)) > pDscInfo->gpuCaps.maxNumHztSlices) + { + DSC_Print("ERROR - Client can't specify forced slice count greater than what GPU supports."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.sliceWidth > pDscInfo->sinkCaps.maxSliceWidth) + { + DSC_Print("ERROR - Client can't specify forced slice width greater than what sink supports."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pDscInfo->forcedDscParams.sliceCount > 0) && + (pDscInfo->forcedDscParams.sliceWidth != 0)) + { + DSC_Print("ERROR - Client can't specify both forced slice count and slice width."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pDscInfo->forcedDscParams.sliceCount != 0) && + (pDscInfo->forcedDscParams.sliceCount != 1) && + (pDscInfo->forcedDscParams.sliceCount != 2) && + (pDscInfo->forcedDscParams.sliceCount != 4) && + (pDscInfo->forcedDscParams.sliceCount != 8)) + { + DSC_Print("ERROR - Forced Slice Count has to be 1/2/4/8."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.sliceWidth > pModesetInfo->activeWidth) + { + DSC_Print("ERROR - Forced Slice Width can't be more than Active Width."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.sliceHeight > pModesetInfo->activeHeight) + { + DSC_Print("ERROR - Forced Slice Height can't be more than Active Height."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.dscRevision.versionMinor > + pDscInfo->sinkCaps.algorithmRevision.versionMinor) + { + DSC_Print("ERROR - Forced DSC Algorithm Revision is greater than Sink Supported value."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->forcedDscParams.dscRevision.versionMinor > 2) + { + DSC_Print("ERROR - Forced DSC Algorithm Revision is greater than 1.2"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->pixelClockHz == 0) + { + DSC_Print("ERROR - Invalid pixel Clock for mode."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->activeWidth == 0) + { + DSC_Print("ERROR - Invalid active width for mode."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->activeHeight == 0) + { + DSC_Print("ERROR - Invalid active height for mode."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->bitsPerComponent == 0) + { + DSC_Print("ERROR - Invalid bits per component for mode."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (availableBandwidthBitsPerSecond == 0) + { + DSC_Print("ERROR - Invalid available bandwidth in Bits Per Second."); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr422) + { + // + // For using YCbCr422 with DSC, either of the following has to be true + // 1> Sink supports Simple422 + // 2> GPU and Sink supports Native 422 + // + if ((!(pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422)) && + (!((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)))) + { + DSC_Print("ERROR - Can't enable YCbCr422 with current GPU and Sink DSC config."); + return NVT_STATUS_INVALID_PARAMETER; + } + } + + if (pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr420) + { + // + // For using YCbCr420 with DSC, GPU and Sink has to support Native 420 + // + if (!((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420))) + { + DSC_Print("ERROR - Can't enable YCbCr420 with current GPU and Sink DSC config."); + return NVT_STATUS_INVALID_PARAMETER; + } + } + + if ((pDscInfo->sinkCaps.algorithmRevision.versionMajor == 1) && + (pDscInfo->sinkCaps.algorithmRevision.versionMinor == 1) && + (pModesetInfo->colorFormat == NVT_COLOR_FORMAT_YCbCr420)) + { + DSC_Print("WARNING: DSC v1.2 or higher is recommended for using YUV444"); + DSC_Print("Current version is 1.1"); + } + + if (pDscInfo->sinkCaps.maxBitsPerPixelX16 > 1024U) + { + DSC_Print("ERROR - Max bits per pixel can't be greater than 1024"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pDscInfo->sinkCaps.decoderColorDepthMask) + { + switch (pModesetInfo->bitsPerComponent) + { + case 12: + if (!(pDscInfo->sinkCaps.decoderColorDepthMask & DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS)) + { + DSC_Print("ERROR - Sink DSC Decoder does not support 12 bpc"); + return NVT_STATUS_INVALID_PARAMETER; + } + break; + case 10: + if (!(pDscInfo->sinkCaps.decoderColorDepthMask & DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS)) + { + DSC_Print("ERROR - Sink DSC Decoder does not support 10 bpc"); + return NVT_STATUS_INVALID_PARAMETER; + } + break; + case 8: + if (!(pDscInfo->sinkCaps.decoderColorDepthMask & DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS)) + { + DSC_Print("ERROR - Sink DSC Decoder does not support 8 bpc"); + return NVT_STATUS_INVALID_PARAMETER; + } + break; + + default: + DSC_Print("ERROR - Invalid bits per component specified"); + return NVT_STATUS_INVALID_PARAMETER; + } + } + else + { + DSC_Print("WARNING - Decoder Color Depth Mask was not provided. Assuming that decoder supports all depths."); + } + + // Validate WAR data + if (pWARData) + { + if ((pWARData->connectorType != DSC_DP) && (pWARData->connectorType != DSC_HDMI)) + { + DSC_Print("WARNING - Incorrect connector info sent with WAR data"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pWARData->connectorType == DSC_DP) + { + if (!IS_VALID_LANECOUNT(pWARData->dpData.laneCount)) + { + DSC_Print("ERROR - Incorrect DP Lane count info sent with WAR data"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (!IS_VALID_LINKBW(pWARData->dpData.linkRateHz / DP_LINK_BW_FREQ_MULTI_MBPS)) + { + DSC_Print("ERROR - Incorrect DP Link rate info sent with WAR data"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if (pWARData->dpData.hBlank > MAX_HBLANK_PIXELS) + { + DSC_Print("ERROR - Incorrect DP HBlank info sent with WAR data"); + return NVT_STATUS_INVALID_PARAMETER; + } + + if ((pWARData->dpData.dpMode != DSC_DP_SST) && (pWARData->dpData.dpMode != DSC_DP_MST)) + { + DSC_Print("ERROR - Incorrect DP Stream mode sent with WAR data"); + return NVT_STATUS_INVALID_PARAMETER; + } + } + } + + return NVT_STATUS_SUCCESS; +} + +/* ------------------------ Public Functions ------------------------------- */ + +/* + * @brief Calculate PPS parameters based on passed down Sink, + * GPU capability and modeset info + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * @param[out] pps Calculated PPS parameter. + * The data can be send to SetDscPpsData* methods directly. + * @param[out] pBitsPerPixelX16 Bits per pixel multiplied by 16 + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + * In case this returns failure consider that PPS is not possible. + */ +NVT_STATUS +DSC_GeneratePPS +( + const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond, + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD], + NvU32 *pBitsPerPixelX16 +) +{ + DSC_INPUT_PARAMS *in = NULL; + NVT_STATUS ret = NVT_STATUS_ERR; + + if ((!pDscInfo) || (!pModesetInfo) || (!pBitsPerPixelX16)) + { + DSC_Print("ERROR - Invalid parameter."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + + ret = _validateInput(pDscInfo, pModesetInfo, pWARData, availableBandwidthBitsPerSecond); + if (ret != NVT_STATUS_SUCCESS) + { + DSC_Print("ERROR - Invalid parameter."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + + in = (DSC_INPUT_PARAMS *)DSC_Malloc(sizeof(DSC_INPUT_PARAMS)); + if (in == NULL) + { + DSC_Print("ERROR - Memory allocation error."); + ret = NVT_STATUS_NO_MEMORY; + goto done; + } + + NVMISC_MEMSET(in, 0, sizeof(DSC_INPUT_PARAMS)); + + in->bits_per_component = pModesetInfo->bitsPerComponent; + in->linebuf_depth = MIN((pDscInfo->sinkCaps.lineBufferBitDepth), (pDscInfo->gpuCaps.lineBufferBitDepth)); + in->block_pred_enable = pDscInfo->sinkCaps.bBlockPrediction; + + switch (pModesetInfo->colorFormat) + { + case NVT_COLOR_FORMAT_RGB: + in->convert_rgb = 1; + break; + + case NVT_COLOR_FORMAT_YCbCr444: + in->convert_rgb = 0; + break; + case NVT_COLOR_FORMAT_YCbCr422: + in->convert_rgb = 0; + + if ((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)) + { + in->native_422 = 1; + } + else if (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422) + { + in->simple_422 = 1; + } + else + { + DSC_Print("ERROR - YCbCr422 is not possible with current config."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + break; + case NVT_COLOR_FORMAT_YCbCr420: + in->convert_rgb = 0; + + if ((pDscInfo->gpuCaps.encoderColorFormatMask & DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422) && + (pDscInfo->sinkCaps.decoderColorFormatMask & DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422)) + { + in->native_420 = 1; + } + else + { + DSC_Print("ERROR - YCbCr420 is not possible with current config."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + break; + + default: + DSC_Print("ERROR - Invalid color Format specified."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + + // calculate max possible bits per pixel allowed by the available bandwidth + in->bits_per_pixel = (NvU32)((availableBandwidthBitsPerSecond * BPP_UNIT) / pModesetInfo->pixelClockHz); + + if (pWARData && (pWARData->connectorType == DSC_DP)) + { + // + // In DP case, being too close to the available bandwidth caused HW to hang. + // 2 is subtracted based on issues seen in DP CTS testing. Refer to bug 200406501, comment 76 + // This limitation is only on DP, not needed for HDMI DSC HW + // + in->bits_per_pixel = (NvU32)((availableBandwidthBitsPerSecond * BPP_UNIT) / pModesetInfo->pixelClockHz) - (BPP_UNIT/8); + + if (pWARData->dpData.laneCount == 1U) + { + // + // SOR lane fifo might get overflown when DP 1 lane, FEC enabled and pclk*bpp > 96%*linkclk*8 i.e. + // DSC stream is consuming more than 96% of the total bandwidth. Use lower bits per pixel. Refer Bug 200561864. + // + in->bits_per_pixel = (NvU32)((96U * availableBandwidthBitsPerSecond * BPP_UNIT) / (100U * pModesetInfo->pixelClockHz)) - + (BPP_UNIT / 8U); + } + + if ((pWARData->dpData.dpMode == DSC_DP_SST) && (pWARData->dpData.hBlank < 100U)) + { + // + // For short HBlank timing, using bits per pixel value which may have to add DSC padding for each chunk + // may not be possible so use bits per pixel value which won't require DSC padding. Bug 200628516 + // + + NvU32 protocolOverhead; + NvU32 dscOverhead; + NvU32 minSliceCount = (NvU32)NV_CEIL(pModesetInfo->pixelClockHz, (MAX_PCLK_PER_SLICE_KHZ * 1000U)); + NvU32 sliceWidth; + NvU32 i; + + if ((minSliceCount > 2U) &&(minSliceCount < 4U)) + { + minSliceCount = 4U; + } + else if (minSliceCount > 4U) + { + minSliceCount = 8U; + } + + sliceWidth = (NvU32)NV_CEIL(pModesetInfo->activeWidth, minSliceCount); + + if (pWARData->dpData.laneCount == 1U) + { + protocolOverhead = 42U; + } + else if (pWARData->dpData.laneCount == 2U) + { + protocolOverhead = 24U; + } + else + { + protocolOverhead = 21U; + } + + dscOverhead = minSliceCount * 2U; + + if ((pWARData->dpData.hBlank * pWARData->dpData.linkRateHz / pModesetInfo->pixelClockHz) < + (protocolOverhead + dscOverhead + 3U)) + { + // + // For very short HBlank timing, find out bits per pixel value which will not require additional + // DSC padding. 128 will be used as the lowest bits per pixel value. + // + for (i = in->bits_per_pixel; i >= MIN_BITS_PER_PIXEL * BPP_UNIT; i--) + { + if (((i * sliceWidth) % ( 8U * minSliceCount * pWARData->dpData.laneCount * 16U)) == 0U) + { + break; + } + } + in->bits_per_pixel = i; + } + } + } + + // + // bits per pixel upper limit is minimum of 3 times bits per component or 32 + // + if (in->bits_per_pixel > MIN((3 * in->bits_per_component * BPP_UNIT), (MAX_BITS_PER_PIXEL * BPP_UNIT))) + { + in->bits_per_pixel = MIN((3 * in->bits_per_component * BPP_UNIT), (MAX_BITS_PER_PIXEL * BPP_UNIT)); + } + + in->bits_per_pixel = DSC_AlignDownForBppPrecision(in->bits_per_pixel, pDscInfo->sinkCaps.bitsPerPixelPrecision); + + // If user specified bits_per_pixel value to be used check if it is valid one + if (*pBitsPerPixelX16 != 0) + { + *pBitsPerPixelX16 = DSC_AlignDownForBppPrecision(*pBitsPerPixelX16, pDscInfo->sinkCaps.bitsPerPixelPrecision); + if (*pBitsPerPixelX16 > in->bits_per_pixel) + { + DSC_Print("ERROR - Invalid bits per pixel value specified."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + else + { + in->bits_per_pixel = *pBitsPerPixelX16; + } + + // For DSC Dual Mode, because of architectural limitation we can't use bits_per_pixel more than 16. + if (pModesetInfo->bDualMode && (in->bits_per_pixel > 256 /*bits_per_pixel = 16*/)) + { + DSC_Print("ERROR - DSC Dual Mode, because of architectural limitation we can't use bits_per_pixel more than 16."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + + if ((pDscInfo->sinkCaps.maxBitsPerPixelX16 != 0) && (*pBitsPerPixelX16 > pDscInfo->sinkCaps.maxBitsPerPixelX16)) + { + DSC_Print("ERROR - bits per pixel value specified by user is greater than what DSC decompressor can support."); + ret = NVT_STATUS_INVALID_PARAMETER; + goto done; + } + } + else + { + // + // For DSC Dual Mode, because of architectural limitation we can't use bits_per_pixel more than 16. + // Forcing it to 16. + // + if (pModesetInfo->bDualMode && (in->bits_per_pixel > 256 /*bits_per_pixel = 16*/)) + { + DSC_Print("ERROR - DSC Dual Mode, because of architectural limitation we can't use bits_per_pixel more than 16."); + DSC_Print("ERROR - Forcing it to 16."); + in->bits_per_pixel = 256; + } + + // If calculated bits_per_pixel is 126 or 127, we need to use 128 value. Bug 2686078 + if ((in->bits_per_pixel == 126) || (in->bits_per_pixel == 127)) + { + DSC_Print("WARNING: bits_per_pixel is forced to 128 because calculated value was 126 or 127"); + in->bits_per_pixel = 128; + } + + if ((pDscInfo->sinkCaps.maxBitsPerPixelX16 != 0) && (in->bits_per_pixel > pDscInfo->sinkCaps.maxBitsPerPixelX16)) + { + DSC_Print("WARNING - Optimal bits per pixel value calculated is greater than what DSC decompressor can support. Forcing it to max that decompressor can support"); + in->bits_per_pixel = pDscInfo->sinkCaps.maxBitsPerPixelX16; + } + } + + in->dsc_version_minor = pDscInfo->forcedDscParams.dscRevision.versionMinor ? pDscInfo->forcedDscParams.dscRevision.versionMinor : + pDscInfo->sinkCaps.algorithmRevision.versionMinor; + in->pic_width = pModesetInfo->activeWidth; + in->pic_height = pModesetInfo->activeHeight; + in->slice_height = pDscInfo->forcedDscParams.sliceHeight; + in->slice_width = pDscInfo->forcedDscParams.sliceWidth; + in->slice_num = pDscInfo->forcedDscParams.sliceCount; + in->max_slice_num = MIN(pDscInfo->sinkCaps.maxNumHztSlices, + pModesetInfo->bDualMode ? pDscInfo->gpuCaps.maxNumHztSlices * 2 : pDscInfo->gpuCaps.maxNumHztSlices); + in->max_slice_width = pDscInfo->sinkCaps.maxSliceWidth; + in->pixel_clkMHz = (NvU32)(pModesetInfo->pixelClockHz / 1000000L); + in->dual_mode = pModesetInfo->bDualMode; + in->drop_mode = pModesetInfo->bDropMode; + in->slice_count_mask = pDscInfo->sinkCaps.sliceCountSupportedMask; + in->peak_throughput_mode0 = pDscInfo->sinkCaps.peakThroughputMode0; + in->peak_throughput_mode1 = pDscInfo->sinkCaps.peakThroughputMode1; + + ret = DSC_PpsDataGen(in, pps); + + *pBitsPerPixelX16 = in->bits_per_pixel; + + /* fall through */ +done: + DSC_Free(in); + + return ret; +} + +/* + * @brief Initializes callbacks for print and assert + * + * @param[in] callback DSC callbacks + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +NVT_STATUS DSC_InitializeCallback(DSC_CALLBACK callback) +{ + // if callbacks are initialized already, return nothing to do + if (callbacks.dscMalloc && callbacks.dscFree) + { + return NVT_STATUS_SUCCESS; + } + +#if defined(DSC_CALLBACK_MODIFIED) + callbacks.clientHandle = callback.clientHandle; +#endif // DSC_CALLBACK_MODIFIED + callbacks.dscPrint = NULL; + callbacks.dscMalloc = callback.dscMalloc; + callbacks.dscFree = callback.dscFree; +#if defined (DEBUG) + callbacks.dscPrint = callback.dscPrint; +#endif + + return NVT_STATUS_SUCCESS; +} diff --git a/src/common/modeset/timing/nvt_dsc_pps.h b/src/common/modeset/timing/nvt_dsc_pps.h new file mode 100644 index 000000000..651d420c2 --- /dev/null +++ b/src/common/modeset/timing/nvt_dsc_pps.h @@ -0,0 +1,324 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* +=============================================================================== + + dsc_pps.h + + Provide definition needed for DSC(Display Stream Compression) PPS(Picture Parameter Set) + +================================================================================ +*/ + +#ifndef __DSCPPS_H__ +#define __DSCPPS_H__ + +/* ------------------------ Includes --------------------------------------- */ +#include "nvtypes.h" +#include "nvtiming.h" + +/* ------------------------ Macros ----------------------------------------- */ +#define DSC_MAX_PPS_SIZE_DWORD 32 + +/* ------------------------ Datatypes -------------------------------------- */ + +#define DSC_CALLBACK_MODIFIED 1 + +#if defined(DSC_CALLBACK_MODIFIED) +typedef struct +{ + // DSC - Callbacks + const void* clientHandle; // ClientHandle is only used when calling into HDMI lib's mallocCb/freeCb + void (*dscPrint) (const char* fmtstring, ...); + void *(*dscMalloc)(const void *clientHandle, NvLength size); + void (*dscFree) (const void *clientHandle, void * ptr); +} DSC_CALLBACK; +#else +typedef struct +{ + // DSC - Callbacks + void (*dscPrint) (const char* fmtstring, ...); + void *(*dscMalloc)(NvLength size); + void (*dscFree) (void * ptr); +} DSC_CALLBACK; +#endif // DSC_CALLBACK_MODIFIED + +typedef struct +{ + NvU32 versionMajor; + NvU32 versionMinor; +} DSC_ALGORITHM_REV; + +typedef struct +{ + NvU64 pixelClockHz; // Requested pixel clock for the mode + NvU32 activeWidth; // Active Width + NvU32 activeHeight; // Active Height + NvU32 bitsPerComponent; // BPC value to be used + NVT_COLOR_FORMAT colorFormat; // Color format to be used for this modeset + + // + // Whether to enable Dual mode for DSC. + // Dual mode specifies that 2 heads would be generating + // pixels for complete stream. + // + NvBool bDualMode; + + // + // Whether to enable DROP mode for DSC. + // DROP mode specifies that instead of compressing the pixels, OR will drop + // the pixels of the right half frame to reduce the data rate by half. + // This mode is added for testing 2head1OR solution without a DSC panel + // + NvBool bDropMode; +} MODESET_INFO; + +typedef struct +{ + struct SINK_DSC_CAPS + { + // Mask of all color formats for which decoding supported by panel + NvU32 decoderColorFormatMask; +#define DSC_DECODER_COLOR_FORMAT_RGB (0x00000001) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_SIMPLE_422 (0x00000004) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000008) +#define DSC_DECODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000010) + + // e.g. 1/16, 1/8, 1/4, 1/2, 1bpp + NvU32 bitsPerPixelPrecision; +#define DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001) +#define DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002) +#define DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000004) +#define DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000008) +#define DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000010) + + // Maximum slice width supported by panel + NvU32 maxSliceWidth; + + // Maximum number of horizontal slices supported + NvU32 maxNumHztSlices; + + // Slice counts supported by the sink + NvU32 sliceCountSupportedMask; +#define DSC_DECODER_SLICES_PER_SINK_INVALID (0x00000000) +#define DSC_DECODER_SLICES_PER_SINK_1 (0x00000001) +#define DSC_DECODER_SLICES_PER_SINK_2 (0x00000002) +#define DSC_DECODER_SLICES_PER_SINK_4 (0x00000008) +#define DSC_DECODER_SLICES_PER_SINK_6 (0x00000010) +#define DSC_DECODER_SLICES_PER_SINK_8 (0x00000020) +#define DSC_DECODER_SLICES_PER_SINK_10 (0x00000040) +#define DSC_DECODER_SLICES_PER_SINK_12 (0x00000080) +#define DSC_DECODER_SLICES_PER_SINK_16 (0x00000100) +#define DSC_DECODER_SLICES_PER_SINK_20 (0x00000200) +#define DSC_DECODER_SLICES_PER_SINK_24 (0x00000400) + + // + // Bit depth used by the Sink device to store the + // reconstructed pixels within the line buffer + // + NvU32 lineBufferBitDepth; +#define DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MIN (0x00000008) +#define DSC_DECODER_LINE_BUFFER_BIT_DEPTH_MAX (0x0000000D) + + NvU32 decoderColorDepthCaps; // Color depth supported by DSC decoder of panel +#define DSC_DECODER_COLOR_DEPTH_CAPS_8_BITS (0x00000001) +#define DSC_DECODER_COLOR_DEPTH_CAPS_10_BITS (0x00000002) +#define DSC_DECODER_COLOR_DEPTH_CAPS_12_BITS (0x00000004) +#define DSC_DECODER_COLOR_DEPTH_CAPS_16_BITS (0x00000008) + + NvU32 decoderColorDepthMask; + + DSC_ALGORITHM_REV algorithmRevision; // DSC algorithm revision that sink supports + + NvBool bBlockPrediction; // Whether block prediction is supported or not. + + // Peak throughput supported for 444 and simple 422 modes + NvU32 peakThroughputMode0; +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_INVALID (0x00000000) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_340 (0x00000001) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_400 (0x00000002) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_450 (0x00000003) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_500 (0x00000004) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_550 (0x00000005) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_600 (0x00000006) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_650 (0x00000007) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_700 (0x00000008) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_750 (0x00000009) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_800 (0x0000000A) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_850 (0x0000000B) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_900 (0x0000000C) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_950 (0x0000000D) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_1000 (0x0000000E) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE0_170 (0x0000000F) + + // Peak throughput supported for native 422 and 420 modes + NvU32 peakThroughputMode1; +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_INVALID (0x00000000) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_340 (0x00000001) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_400 (0x00000002) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_450 (0x00000003) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_500 (0x00000004) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_550 (0x00000005) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_600 (0x00000006) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_650 (0x00000007) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_700 (0x00000008) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_750 (0x00000009) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_800 (0x0000000A) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_850 (0x0000000B) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_900 (0x0000000C) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_950 (0x0000000D) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_1000 (0x0000000E) +#define DSC_DECODER_PEAK_THROUGHPUT_MODE1_170 (0x0000000F) + + // Maximum bits_per_pixel supported by the DSC decompressor multiplied by 16 + NvU32 maxBitsPerPixelX16; + }sinkCaps; + + struct GPU_DSC_CAPS + { + // Mask of all color formats for which encoding supported by GPU + NvU32 encoderColorFormatMask; +#define DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001) +#define DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002) +#define DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004) +#define DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008) + + // + // Size of line buffer inside DSC. Should be in number of pixels. + // this should be greater than or equal to active width + // + NvU32 lineBufferSize; + + // e.g. 1/16, 1/8, 1/4, 1/2, 1bpp + NvU32 bitsPerPixelPrecision; + + // Maximum number of horizontal slices supported + NvU32 maxNumHztSlices; + + // + // Bit depth used by the GPU to store the + // reconstructed pixels within the line buffer + // + NvU32 lineBufferBitDepth; + }gpuCaps; + + struct FORCED_DSC_PARAMS + { + // Forced Slice count + NvU32 sliceCount; + + // Forced Slice width + NvU32 sliceWidth; + + // Forced Slice height + NvU32 sliceHeight; + + // Forced DSC Algorithm Revision + DSC_ALGORITHM_REV dscRevision; + }forcedDscParams; +} DSC_INFO; + +typedef struct +{ + NvU32 manufacturerID; + NvU32 productID; + NvU32 yearWeek; +} EDID_INFO; + +typedef enum +{ + DSC_DP, + DSC_HDMI +} DSC_CONNECTOR_TYPE; + +typedef enum +{ + DSC_DP_SST, + DSC_DP_MST +} DSC_DP_MODE; + +typedef struct +{ + DSC_CONNECTOR_TYPE connectorType; + struct DP_DATA + { + NvU64 linkRateHz; + NvU32 laneCount; + DSC_DP_MODE dpMode; + NvU32 hBlank; + }dpData; +} WAR_DATA; + +/* + * Windows testbed compiles are done with warnings as errors + * with the maximum warning level. Here we turn off some + * of the problematic warnings. + */ + +/* ------------------------ Global Variables ------------------------------- */ +/* ------------------------ Static Variables ------------------------------- */ +/* ------------------------ Private Functions ------------------------------ */ +/* ------------------------ Public Functions ------------------------------- */ + +#ifdef __cplusplus +extern "C" { +#endif +/* + * @brief Initializes callbacks for print and assert + * + * @param[in] callback DSC callbacks + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +NVT_STATUS DSC_InitializeCallback(DSC_CALLBACK callback); + +/* + * @brief Calculate PPS parameters based on passed down Sink, + * GPU capability and modeset info + * + * @param[in] pDscInfo Includes Sink and GPU DSC capabilities + * @param[in] pModesetInfo Modeset related information + * @param[in] pWARData Data required for providing WAR for issues + * @param[in] availableBandwidthBitsPerSecond Available bandwidth for video + * transmission(After FEC/Downspread overhead consideration) + * @param[out] pps Calculated PPS parameter. + * The data can be send to SetDscPpsData* methods directly. + * @param[out] pBitsPerPixelX16 Bits per pixel multiplied by 16 + * + * @returns NVT_STATUS_SUCCESS if successful; + * NVT_STATUS_ERR if unsuccessful; + */ +NVT_STATUS DSC_GeneratePPS(const DSC_INFO *pDscInfo, + const MODESET_INFO *pModesetInfo, + const WAR_DATA *pWARData, + NvU64 availableBandwidthBitsPerSecond, + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD], + NvU32 *pBitsPerPixelX16); + +#ifdef __cplusplus +} +#endif +#endif // __DSCPPS_H__ diff --git a/src/common/modeset/timing/nvt_edid.c b/src/common/modeset/timing/nvt_edid.c new file mode 100644 index 000000000..6fe3fb577 --- /dev/null +++ b/src/common/modeset/timing/nvt_edid.c @@ -0,0 +1,2662 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edid.c +// +// Purpose: the provide edid related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "edid.h" + +PUSH_SEGMENTS + +// Macro to declare a TIMING initializer for given parameters without border +#define EST_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rr,pclk,format) \ +{hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',NVT_PROGRESSIVE,pclk,{0,rr,set_rrx1k(pclk,ht,vt),0,1,{0},{0},{0},{0},format,"VESA Established"}} + +DATA_SEGMENT(PAGE_DATA) +#if !defined(NV_WSA) +CONS_SEGMENT(PAGE_CONS) +#endif // wsa + +// There is a large table of strings that translate 3-character PNP vendor IDs to a more user-friendly name in the following header. +// Mark this constant table as pageable. +#include "nvPNPVendorIds.h" + +static const NVT_TIMING EDID_EST[] = +{ + EST_TIMING( 720, 0, 0, 720,'-', 400, 0,0, 400,'-',70, 0,NVT_STATUS_EDID_EST), // 720x400x70Hz (IBM, VGA) + EST_TIMING( 720, 0, 0, 720,'-', 400, 0,0, 400,'-',88, 0,NVT_STATUS_EDID_EST), // 720x400x88Hz (IBM, XGA2) + {640,0,16,96,800,NVT_H_SYNC_NEGATIVE,480,0,10,2,525,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,2518,{0,60,60000,0,1,{0},{0},{0},{0},NVT_STATUS_EDID_EST,"EDID_Established"}}, + + EST_TIMING( 640, 0, 0, 640,'-', 480, 0,0, 480,'-',67, 0,NVT_STATUS_EDID_EST), // 640x480x67Hz (Apple, Mac II) + + // 640x480x72Hz (VESA) - this entry have borders + {640,8,16,40,832,NVT_H_SYNC_NEGATIVE,480,8,1,3,520,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,3150,{0,72,72000,0,1,{0},{0},{0},{0},NVT_STATUS_EDID_EST,"EDID_Established"}}, + EST_TIMING( 640,16, 64, 840,'-', 480, 1,3, 500,'-',75, 3150,NVT_STATUS_EDID_EST), // 640x480x75Hz (VESA) + EST_TIMING( 800,24, 72,1024,'+', 600, 1,2, 625,'+',56, 3600,NVT_STATUS_EDID_EST), // 800x600x56Hz (VESA) + EST_TIMING( 800,40,128,1056,'+', 600, 1,4, 628,'+',60, 4000,NVT_STATUS_EDID_EST), // 800x600x60Hz (VESA) + + EST_TIMING( 800,56,120,1040,'+', 600,37,6, 666,'+',72, 5000,NVT_STATUS_EDID_EST), // 800x600x72Hz (VESA) + EST_TIMING( 800,16, 80,1056,'+', 600, 1,3, 625,'+',75, 4950,NVT_STATUS_EDID_EST), // 800x600x75Hz (VESA) + EST_TIMING( 832, 0, 0, 832,'-', 624, 0,0, 624,'-',75, 0,NVT_STATUS_EDID_EST), // 832x624x75Hz (Apple, Mac II) + EST_TIMING(1024, 0, 0,1024,'-', 768, 0,0, 768,'-',87, 0,NVT_STATUS_EDID_EST), // 1024x768x87Hz (IBM, Interlaced) + + EST_TIMING(1024,24,136,1344,'-', 768, 3,6, 806,'-',60, 6500,NVT_STATUS_EDID_EST), // 1024x768x60Hz (VESA) + EST_TIMING(1024,24,136,1328,'-', 768, 3,6, 806,'-',70, 7500,NVT_STATUS_EDID_EST), // 1024x768x70Hz (VESA) + EST_TIMING(1024,16, 96,1312,'+', 768, 1,3, 800,'+',75, 7875,NVT_STATUS_EDID_EST), // 1024x768x75Hz (VESA) + EST_TIMING(1280,16,144,1688,'+',1024, 1,3,1066,'+',75,13500,NVT_STATUS_EDID_EST), // 1280x1024x75Hz (VESA) + + // the end + NVT_TIMING_SENTINEL +}; + +static NvU32 MAX_EST_FORMAT = sizeof(EDID_EST)/sizeof(EDID_EST[0]) - 1; + +static const NVT_TIMING EDID_ESTIII[] = +{ + EST_TIMING( 640, 32, 64, 832,'+', 350,32,3, 445,'-',85, 3150,NVT_STATUS_EDID_EST), // 640x350x85Hz + EST_TIMING( 640, 32, 64, 832,'-', 400, 1,3, 445,'+',85, 3150,NVT_STATUS_EDID_EST), // 640x400x85Hz + EST_TIMING( 720, 36, 72, 936,'-', 400, 1,3, 446,'+',85, 3550,NVT_STATUS_EDID_EST), // 720x400x85Hz + EST_TIMING( 640, 56, 56, 832,'-', 480, 1,3, 509,'-',85, 3600,NVT_STATUS_EDID_EST), // 640x480x85Hz + EST_TIMING( 848, 16,112,1088,'+', 480, 6,8, 517,'+',60, 3375,NVT_STATUS_EDID_EST), // 848x480x60HZ + EST_TIMING( 800, 32, 64,1048,'+', 600, 1,3, 631,'+',85, 5625,NVT_STATUS_EDID_EST), // 800x600x85Hz + EST_TIMING(1024, 48, 96,1376,'+', 768, 1,3, 808,'+',85, 9450,NVT_STATUS_EDID_EST), // 1024x768x85Hz + EST_TIMING(1152, 64,128,1600,'+', 864, 1,3, 900,'+',75,10800,NVT_STATUS_EDID_EST), // 1152x864x75Hz + + EST_TIMING(1280, 48, 32,1440,'+', 768, 3,7, 790,'-',60, 6825,NVT_STATUS_EDID_EST), // 1280x768x60Hz (RB) + EST_TIMING(1280, 64,128,1664,'-', 768, 3,7, 798,'+',60, 7950,NVT_STATUS_EDID_EST), // 1280x768x60Hz + EST_TIMING(1280, 80,128,1696,'-', 768, 3,7, 805,'+',75,10225,NVT_STATUS_EDID_EST), // 1280x768x75Hz + EST_TIMING(1280, 80,136,1712,'-', 768, 3,7, 809,'+',85,11750,NVT_STATUS_EDID_EST), // 1280x768x85Hz + EST_TIMING(1280, 96,112,1800,'+', 960, 1,3,1000,'+',60,10800,NVT_STATUS_EDID_EST), // 1280x960x60Hz + EST_TIMING(1280, 64,160,1728,'+', 960, 1,3,1011,'+',85,14850,NVT_STATUS_EDID_EST), // 1280x960x85Hz + EST_TIMING(1280, 48,112,1688,'+',1024, 1,3,1066,'+',60,10800,NVT_STATUS_EDID_EST), // 1280x1024x60Hz + EST_TIMING(1280, 64,160,1728,'+',1024, 1,3,1072,'+',85,15750,NVT_STATUS_EDID_EST), // 1280x1024x85Hz + + EST_TIMING(1360, 64,112,1792,'+', 768, 3,6, 795,'+',60, 8550,NVT_STATUS_EDID_EST), // 1360x768x60Hz + EST_TIMING(1440, 48, 32,1600,'+', 900, 3,6, 926,'-',60, 8875,NVT_STATUS_EDID_EST), // 1440x900x60Hz (RB) + EST_TIMING(1440, 80,152,1904,'-', 900, 3,6, 934,'+',60,10650,NVT_STATUS_EDID_EST), // 1440x900x60Hz + EST_TIMING(1440, 96,152,1936,'-', 900, 3,6, 942,'+',75,13675,NVT_STATUS_EDID_EST), // 1440x900x75Hz + EST_TIMING(1440,104,152,1952,'-', 900, 3,6, 948,'+',85,15700,NVT_STATUS_EDID_EST), // 1440x900x85Hz + EST_TIMING(1400, 48, 32,1560,'+',1050, 3,4,1080,'-',60,10100,NVT_STATUS_EDID_EST), // 1440x1050x60Hz (RB) + EST_TIMING(1400, 88,144,1864,'-',1050, 3,4,1089,'+',60,12175,NVT_STATUS_EDID_EST), // 1440x1050x60Hz + EST_TIMING(1400,104,144,1896,'-',1050, 3,4,1099,'+',75,15600,NVT_STATUS_EDID_EST), // 1440x1050x75Hz + + EST_TIMING(1400,104,152,1912,'-',1050, 3,4,1105,'+',85,17950,NVT_STATUS_EDID_EST), // 1440x1050x85Hz + EST_TIMING(1680, 48, 32,1840,'+',1050, 3,6,1080,'-',60,11900,NVT_STATUS_EDID_EST), // 1680x1050x60Hz (RB) + EST_TIMING(1680,104,176,2240,'-',1050, 3,6,1089,'+',60,14625,NVT_STATUS_EDID_EST), // 1680x1050x60Hz + EST_TIMING(1680,120,176,2272,'-',1050, 3,6,1099,'+',75,18700,NVT_STATUS_EDID_EST), // 1680x1050x75Hz + EST_TIMING(1680,128,176,2288,'-',1050, 3,6,1105,'+',85,21475,NVT_STATUS_EDID_EST), // 1680x1050x85Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',60,16200,NVT_STATUS_EDID_EST), // 1600x1200x60Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',65,17550,NVT_STATUS_EDID_EST), // 1600x1200x65Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',70,18900,NVT_STATUS_EDID_EST), // 1600x1200x70Hz + + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',75,20250,NVT_STATUS_EDID_EST), // 1600x1200x75Hz + EST_TIMING(1600, 64,192,2160,'+',1200, 1,3,1250,'+',85,22950,NVT_STATUS_EDID_EST), // 1600x1200x85Hz + EST_TIMING(1792,128,200,2448,'-',1344, 1,3,1394,'+',60,20475,NVT_STATUS_EDID_EST), // 1792x1344x60Hz + EST_TIMING(1792, 96,216,2456,'-',1344, 1,3,1417,'+',75,26100,NVT_STATUS_EDID_EST), // 1792x1344x75Hz + EST_TIMING(1856, 96,224,2528,'-',1392, 1,3,1439,'+',60,21825,NVT_STATUS_EDID_EST), // 1856x1392x60Hz + EST_TIMING(1856,128,224,2560,'-',1392, 1,3,1500,'+',75,28800,NVT_STATUS_EDID_EST), // 1856x1392x75Hz + EST_TIMING(1920, 48, 32,2080,'+',1200, 3,6,1235,'-',60,15400,NVT_STATUS_EDID_EST), // 1920x1200x60Hz (RB) + EST_TIMING(1920,136,200,2592,'-',1200, 3,6,1245,'+',60,19325,NVT_STATUS_EDID_EST), // 1920x1200x60Hz + + EST_TIMING(1920,136,208,2608,'-',1200, 3,6,1255,'+',75,24525,NVT_STATUS_EDID_EST), // 1920x1200x75Hz + EST_TIMING(1920,144,208,2624,'-',1200, 3,6,1262,'+',85,28125,NVT_STATUS_EDID_EST), // 1920x1200x85Hz + EST_TIMING(1920,128,208,2600,'-',1440, 1,3,1500,'+',60,23400,NVT_STATUS_EDID_EST), // 1920x1440x60Hz + EST_TIMING(1920,144,224,2640,'-',1440, 1,3,1500,'+',75,29700,NVT_STATUS_EDID_EST), // 1920x1440x75Hz + + NVT_TIMING_SENTINEL, + NVT_TIMING_SENTINEL, + NVT_TIMING_SENTINEL, + NVT_TIMING_SENTINEL +}; + +static NvU32 MAX_ESTIII_FORMAT = sizeof(EDID_ESTIII)/sizeof(EDID_ESTIII[0]) - 1; + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumEST(NvU32 index, NVT_TIMING *pT) +{ + if ((pT == NULL) || (index > MAX_EST_FORMAT)) + { + return NVT_STATUS_ERR; + } + + *pT = EDID_EST[index]; + + if (pT->HTotal == 0 || pT->VTotal == 0) + { + return NVT_STATUS_ERR; + } + + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, + (NvU32)10000*(NvU32)1000, + (NvU32)pT->HTotal*(NvU32)pT->VTotal); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumESTIII(NvU32 index, NVT_TIMING *pT) +{ + if ((pT == NULL) || (index > MAX_ESTIII_FORMAT)) + { + return NVT_STATUS_ERR; + } + + *pT = EDID_ESTIII[index]; + + if (pT->HTotal == 0 || pT->VTotal == 0) + { + return NVT_STATUS_ERR; + } + + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, + (NvU32)10000*(NvU32)1000, + (NvU32)pT->HTotal*(NvU32)pT->VTotal); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 isHdmi3DStereoType(NvU8 StereoStructureType) +{ + return ((NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM == StereoStructureType) || + (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF == StereoStructureType)); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_GetVESADisplayDescriptorVersion(NvU8 *rawData, NvU32 *pVer) +{ + return getEdidVersion(rawData, pVer); +} + +// get the EDID version +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 getEdidVersion(NvU8 *pEdid, NvU32 *pVer) +{ + EDIDV1STRUC *p = (EDIDV1STRUC *) pEdid; + + if (pEdid[0] == 0x00) + { + // For Version 1.x, first 8 bytes of EDID must be 00h, FFh, FFh, FFh, FFh, FFh, FFh, 00h. + // Beware of Endian-ness and signed-ness. + if (p->bHeader[1] != 0xFF || p->bHeader[2] != 0xFF || p->bHeader[3] != 0xFF || + p->bHeader[4] != 0xFF || p->bHeader[5] != 0xFF || p->bHeader[6] != 0xFF || + p->bHeader[7] != 0x00) + return NVT_STATUS_ERR; + + *pVer = (((NvU32) p->bVersionNumber) << 8) + ((NvU32) p->bRevisionNumber); + } + else if ((pEdid[0] & 0xF0) == 0x20 && (pEdid[0] & 0x0F) >=0) + *pVer = (((NvU32) (pEdid[0] & 0XF0) << 4) + (NvU32) (pEdid[0] & 0X0F)) ; // DisplayID version 2.x + else + return NVT_STATUS_ERR; // un-recongnized EDID version + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidCvt3ByteDescriptor(NvU8 *p, NVT_EDID_INFO *pInfo, NvU32 *vtbCount) +{ + NvU32 k; + NvU32 width, height, aspect, rr = 0; + NVT_EDID_DD_CVT_3BYTE_BLOCK *pTiming = (NVT_EDID_DD_CVT_3BYTE_BLOCK *)p; + NVT_TIMING newTiming; + NVT_STATUS status; + + + if (pTiming->addressable_lines == 0) + return; + + height = pTiming->addressable_lines; + aspect = pTiming->aspect_ratio; + + if (aspect == NVT_EDID_CVT3_ASPECT_4X3) + width = height * 4 / 3; + else if (aspect == NVT_EDID_CVT3_ASPECT_16X9) + width = height * 16 / 9; + else if (aspect == NVT_EDID_CVT3_ASPECT_16X10) + width = height * 16 / 10; + else //15:9 + width = height * 15 / 9; + + width &= 0xFFFFFFF8; // round down to nearest 8 + + // loop through bits4:0 of supported_vert_rate so we can add a timing + // for each supported rate + for (k=1; k<=0x10; k<<=1) + { + // skip if this bit indicate no support for the rate; + if ( (pTiming->supported_vert_rates & (k)) == 0) + continue; + + // find the correct refresh rate for this bit + switch (k) + { + case NVT_EDID_CVT3_SUPPORTED_RATE_60HZ_REDUCED_BLANKING : + case NVT_EDID_CVT3_SUPPORTED_RATE_60HZ : + rr = 60; + break; + case NVT_EDID_CVT3_SUPPORTED_RATE_85HZ : + rr = 85; + break; + case NVT_EDID_CVT3_SUPPORTED_RATE_75HZ : + rr = 75; + break; + case NVT_EDID_CVT3_SUPPORTED_RATE_50HZ : + rr = 50; + break; + } + + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if ( (k) != NVT_EDID_CVT3_SUPPORTED_RATE_60HZ_REDUCED_BLANKING) // standard blanking + { + status = NvTiming_CalcCVT(width, height, rr, + NVT_PROGRESSIVE, + &newTiming); + } + else // reduced blanking + { + status = NvTiming_CalcCVT_RB(width, height, rr, + NVT_PROGRESSIVE, + &newTiming); + } + + if (status == NVT_STATUS_SUCCESS) + { + // For VTB timings, add additional information + if (vtbCount) + { + (*vtbCount)++; + newTiming.etc.status = NVT_STATUS_EDID_VTB_EXT_CVTn(*vtbCount); + newTiming.etc.name[39] = '\0'; + } + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } // for (k=1; k<=0x10; k<<=1) + +} + +// parse the EDID 1.x based cvt timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidCvtTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i, j; + + // find display range limit with cvt, or cvt 3-byte LDDs + for (i=0; ildd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_CVT ) + { + NVT_EDID_DD_CVT_3BYTE *pCVT = (NVT_EDID_DD_CVT_3BYTE *)&pInfo->ldd[i].u.cvt; + + // loop through cvt 3-byte blocks + for (j=0; jblock + j), + pInfo, NULL); + } // for(j=0; jestablished_timings_1_2) * 8 - 1), j = 0; i != 0; i >>= 1, j ++) + { + if ((pInfo->established_timings_1_2 & i) != 0 && EDID_EST[j].pclk != 0) + { + // count the timing + newTiming = EDID_EST[j]; + newTiming.etc.status = NVT_STATUS_EDID_ESTn(++count); + NVT_SNPRINTF((char *)newTiming.etc.name, 40, + "EDID-EST(VESA):%dx%dx%dHz", + (int)newTiming.HVisible, + (int)newTiming.VVisible, + (int)newTiming.etc.rr); + newTiming.etc.name[39] = '\0'; + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } + + // ESTIII block in ldd only supported in EDID1.4 and above + if (pInfo->version < NVT_EDID_VER_1_4) + return; + + for (i=0; ildd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_ESTIII ) + { + NVT_EDID_DD_EST_TIMING3* pEST = &pInfo->ldd[i].u.est3; + + for (j=0; jdata[j] & (1<> 8) & 0x3F) + 60; // bits 5->0 + + // get the height + aspect = ((timing >> 8) & 0xC0); // aspect ratio at bit 7:6 + if (aspect == 0x00) + height = (pInfo->version < 0x103) ? width : (width * 5 / 8); //16:10 per EDID1.3 and 1:1 with earlier EDID + else if (aspect == 0x40) + height = width * 3 / 4; //4:3 + else if (aspect == 0x80) + height = width * 4 / 5; //5:4 + else + height = width * 9 / 16; //16:9 + + // try to get the timing from DMT first + if (NvTiming_CalcDMT(width, height, rr, 0, pT) == NVT_STATUS_SUCCESS) + { + pT->etc.status = NVT_STATUS_EDID_STDn(count); + NVT_SNPRINTF((char *)pT->etc.name, 40, "EDID-STD(DMT):%dx%dx%dHz", (int)width, (int)height, (int)rr); + pT->etc.name[39] = '\0'; + } + else if (pInfo->version >= NVT_EDID_VER_1_4) + { + // EDID1.4 and above defaults to CVT, instead of GTF. GTF is deprecated as of 1.4. + NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, pT); + pT->etc.status = NVT_STATUS_EDID_STDn(count); + NVT_SNPRINTF((char *)pT->etc.name, 40, "EDID-STD(CVT):%dx%dx%dHz", (int)width, (int)height, (int)rr); + pT->etc.name[39] = '\0'; + } + else + { + // if the mode is not found in DMT, use GTF timing + NvTiming_CalcGTF(width, height, rr, NVT_PROGRESSIVE, pT); + pT->etc.status = NVT_STATUS_EDID_STDn(count); + NVT_SNPRINTF((char *)pT->etc.name, 40, "EDID-STD(GTF):%dx%dx%dHz", (int)width, (int)height, (int)rr); + pT->etc.name[39] = '\0'; + } +} + +// parse the EDID 1.x based standard timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidStandardTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i, j; + NVT_TIMING newTiming; + NvU32 count = 0; + + // now check for standard timings + for (i=0; istandard_timings[i] & 0x0FF) != 0x1) && //proper indication of unused field + (pInfo->standard_timings[i] != 0x0)) //improper indication (bad edid) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + parseEdidStandardTimingDescriptor(pInfo->standard_timings[i], + pInfo, count, &newTiming); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + count++; + }//if ((pInfo->standard_timings[i] & 0x0FF) != 0x1) + } //for (i=0; iversion < NVT_EDID_VER_1_4) + return; + + // now check for standard timings in long display descriptors + for (i=0; ildd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_STI ) + { + NVT_EDID_DD_STD_TIMING* pSTI = &pInfo->ldd[i].u.std_timing; + for (j=0; jdescriptor[j] & 0x0FF) != 0x00) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + parseEdidStandardTimingDescriptor(pSTI->descriptor[j], + pInfo, count, &newTiming); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + count++; + } // if ((pSTI->std_timing[i] & 0x0FF) != 0x1) + } //for (j=0; jDetailedTimingDesc[i]).tag = NVT_EDID_DISPLAY_DESCRIPTOR_STI ) + } //for (i=0; iwDTPixelClock !=0 || pDTD->bDTHorizontalActive !=0) + && (pDTD->wDTPixelClock != 0x0101 || pDTD->bDTHorizontalActive != 1 || + pDTD->bDTHorizontalBlanking != 1 || pDTD->bDTHorizActiveBlank != 1)) + { + // Note that hvisible and vvisible here correspond to the "Addressable Video" portion of the + // "Active Video" defined in the EDID spec (see section 3.12: Note Regarding Borders) + hvisible = (pDTD->bDTHorizontalActive + ((pDTD->bDTHorizActiveBlank & 0xF0) << 4)) - 2 * pDTD->bDTHorizontalBorder; + vvisible = (pDTD->bDTVerticalActive + ((pDTD->bDTVertActiveBlank & 0xF0) << 4)) - 2 * pDTD->bDTVerticalBorder; + + // Sanity check since we are getting values from the monitor + if (hvisible <= 0 || vvisible <= 0 || pDTD->wDTPixelClock == 0) + { + if (pT) + pT->HVisible = 0; + return NVT_STATUS_ERR; + } + + // if the output timing buffer is not provide, simply return here to indicate a legal descriptor + if (pT == NULL) + return NVT_STATUS_SUCCESS; + + // horizontal timing parameters + pT->HVisible = (NvU16)hvisible; + pT->HBorder = (NvU16)pDTD->bDTHorizontalBorder; + pT->HTotal = (NvU16)hvisible + (NvU16)(pDTD->bDTHorizontalBlanking + ((pDTD->bDTHorizActiveBlank & 0x0F) << 8)) + pT->HBorder * 2; + pT->HFrontPorch = (NvU16)(pDTD->bDTHorizontalSync + ((pDTD->bDTHorizVertSyncOverFlow & 0xC0) << 2)); + pT->HSyncWidth = (NvU16)(pDTD->bDTHorizontalSyncWidth + ((pDTD->bDTHorizVertSyncOverFlow & 0x30) << 4)); + + // vertical timing parameters + pT->VVisible = (NvU16)vvisible; + pT->VBorder = (NvU16)pDTD->bDTVerticalBorder; + pT->VTotal = (NvU16)vvisible + (NvU16)(pDTD->bDTVerticalBlanking + ((pDTD->bDTVertActiveBlank & 0x0F) << 8)) + pT->VBorder * 2; + pT->VFrontPorch = (NvU16)(((pDTD->bDTVerticalSync & 0xF0) >> 4) + ((pDTD->bDTHorizVertSyncOverFlow & 0x0C) << 2)); + pT->VSyncWidth = (NvU16)((pDTD->bDTVerticalSync & 0x0F) + ((pDTD->bDTHorizVertSyncOverFlow & 0x03) << 4)); + + // pixel clock + pT->pclk = (NvU32)pDTD->wDTPixelClock; + + // sync polarities + if ((pDTD->bDTFlags & 0x18) == 0x18) + { + pT->HSyncPol = ((pDTD->bDTFlags & 0x2) != 0) ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = ((pDTD->bDTFlags & 0x4) != 0) ? NVT_V_SYNC_POSITIVE : NVT_V_SYNC_NEGATIVE; + } + else if ((pDTD->bDTFlags & 0x18) == 0x10) + { + pT->HSyncPol = ((pDTD->bDTFlags & 0x2) != 0) ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + } + else + { + pT->HSyncPol = NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + } + + // interlaced + if ((pDTD->bDTFlags & 0x80) == 0x80) + pT->interlaced = 1; + else + pT->interlaced = 0; + + // Eizo split EDID case, using 0th bit to indicate split display capability + if (((pDTD->bDTFlags & 1) == 1) && !(((pDTD->bDTFlags & 0x20) == 0x20) || ((pDTD->bDTFlags & 0x40) == 0x40))) + { + pT->etc.flag |= NVT_FLAG_EDID_DTD_EIZO_SPLIT; + } + if (pT->interlaced) + { + // Adjust for one extra blank line in every other frame. + dwTotalPixels = (((NvU32)pT->HTotal * pT->VTotal) + + ((NvU32)pT->HTotal * (pT->VTotal + 1))) / 2; + } + else + { + dwTotalPixels = (NvU32)pT->HTotal * pT->VTotal; + } + + pT->etc.rr = (NvU16)(((NvU32)pDTD->wDTPixelClock*10000+dwTotalPixels/2)/dwTotalPixels); + // Using utility call to multiply and divide to take care of overflow and truncation of large values + // How did we arrive at 10000000? It comes from the fact that Pixel clock mentioned in EDID is in mulitples of 10KHz = 10000 + // and the refresh rate is mentioned in 0.001Hz, that is 60Hz will be represented as 60000, which brings in the factor of 1000. + // And hence 10000 * 1000 = 10000000 + pT->etc.rrx1k = axb_div_c(pDTD->wDTPixelClock, 10000000, dwTotalPixels); + pT->etc.status = NVT_STATUS_EDID_DTD; + NVT_SNPRINTF((char *)pT->etc.name, sizeof(pT->etc.name), "EDID-Detailed:%dx%dx%d.%03dHz%s", (int)pT->HVisible, (int)(pT->interlaced ? 2 : 1)*pT->VVisible , (int)pT->etc.rrx1k/1000, (int)pT->etc.rrx1k%1000, (pT->interlaced ? "/i" : "")); + pT->etc.name[sizeof(pT->etc.name) - 1] = '\0'; + + // aspect ratio + pT->etc.aspect = (pDTD->bDTHorizVertImage & 0xF0) << 20 | pDTD->bDTHorizontalImage << 16 | + (pDTD->bDTHorizVertImage & 0x0F) << 8 | pDTD->bDTVerticalImage; + + pT->etc.rep = 0x1; // Bit mask for no pixel repetition. + + return NVT_STATUS_SUCCESS; + } + + return NVT_STATUS_ERR; +} + +// parse the EDID 1.x based standard timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidDetailedTiming(NvU8 *pEdid, NVT_EDID_INFO *pInfo) +{ + EDIDV1STRUC *p = (EDIDV1STRUC *) pEdid; + NVT_TIMING newTiming; + NvU32 i; + NvBool found = NV_FALSE; + + for (i = 0; i < 4; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseEdidDetailedTimingDescriptor((NvU8 *)&p->DetailedTimingDesc[i], + &newTiming) == NVT_STATUS_SUCCESS) + { + newTiming.etc.status = NVT_STATUS_EDID_DTDn(i+1); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + found = NV_TRUE; + } + } + + if (found) + { + // if edid_ver 1.3, PTM flag should be set + //nvt_assert(pInfo->version > 0x103 || (pInfo->u.feature & + // NVT_EDID_OTHER_FEATURES_PTM_INCLUDE_NATIVE)); + + if (pInfo->u.feature & NVT_EDID_OTHER_FEATURES_PTM_INCLUDE_NATIVE) + { + pInfo->timing[0].etc.flag |= NVT_FLAG_DTD1_PREFERRED_TIMING; + } + } +} + + +// parse the EDID 1.x 18-byte long display descriptor +CODE_SEGMENT(PAGE_DD_CODE) +static void parseEdidLongDisplayDescriptor(EDID_LONG_DISPLAY_DESCRIPTOR *descriptor, NVT_EDID_18BYTE_DESCRIPTOR *p, NvU32 version) +{ + NvU32 i; + + // bypass the input pointer check in this private function + + // return if it's a detailed timing descriptor + if (descriptor->prefix[0] != 0 || descriptor->prefix[1] != 0) + return; + + // other sanity check for the input data + if (descriptor->rsvd != 0) + return; + + p->tag = descriptor->tag; + + // now translate the descriptor + switch (descriptor->tag) + { + case NVT_EDID_DISPLAY_DESCRIPTOR_DPSN: // display product serial number + case NVT_EDID_DISPLAY_DESCRITPOR_DPN: // display product name + case NVT_EDID_DISPLAY_DESCRIPTOR_ADS: // alphanumeric data string (ASCII) + + // copy the 13 characters payload from the 18-byte descriptor + for (i = 0; i < NVT_PVT_EDID_LDD_PAYLOAD_SIZE; i++) + { + if (descriptor->data[i] == 0x0A) + p->u.serial_number.str[i] = '\0'; + else + p->u.serial_number.str[i] = descriptor->data[i]; + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_DRL: // display range limit + { + EDID_MONITOR_RANGE_LIMIT *pRangeLimit = (EDID_MONITOR_RANGE_LIMIT *)&descriptor->data[0]; + + p->u.range_limit.min_v_rate = pRangeLimit->minVRate; + p->u.range_limit.max_v_rate = pRangeLimit->maxVRate; + p->u.range_limit.min_h_rate = pRangeLimit->minHRate; + p->u.range_limit.max_h_rate = pRangeLimit->maxHRate; + p->u.range_limit.max_pclk_MHz = pRangeLimit->maxPClock10M * 10; + p->u.range_limit.timing_support = pRangeLimit->timing_support; + + // add 255Hz offsets if needed, use descriptor->rsvd2 + // to offset the min values their max MUST be offset as well + if (version >= NVT_EDID_VER_1_4) + { + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MAX) + { + p->u.range_limit.max_v_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MIN) + { + p->u.range_limit.min_v_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + } + } + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MAX) + { + p->u.range_limit.max_h_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + if (descriptor->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MIN) + { + p->u.range_limit.min_h_rate += NVT_PVT_EDID_RANGE_OFFSET_AMOUNT; + } + } + } + + if (p->u.range_limit.timing_support == NVT_EDID_RANGE_SUPPORT_GTF2) + { + // descriptor->data[7] + // Start frequency for secondary curve, hor freq./2[kHz] + p->u.range_limit.u.gtf2.C = pRangeLimit->u.gtf2.C / 2; // 0 <= C <= 127 + p->u.range_limit.u.gtf2.K = pRangeLimit->u.gtf2.K; // 0 <= K <= 255 + p->u.range_limit.u.gtf2.J = pRangeLimit->u.gtf2.J / 2; // 0 <= J <= 127 + p->u.range_limit.u.gtf2.M = (pRangeLimit->u.gtf2.M_MSB << 8) + + pRangeLimit->u.gtf2.M_LSB; // 0 <= M <= 65535 + } + else if (p->u.range_limit.timing_support == NVT_EDID_RANGE_SUPPORT_CVT) + { + // the pixel clock adjustment is in cvt.pixel_clock @ bits7:2 + // that number is in 0.25MHz, ie actual max clock is max_pclk_MHz - (0.25 x cvt_pixel_clock) + // subtract the whole number part from max_pclk_MHz, save the remainder + p->u.range_limit.max_pclk_MHz -= (pRangeLimit->u.cvt.pixel_clock & NVT_PVT_EDID_CVT_PIXEL_CLOCK_MASK) >> NVT_PVT_EDID_CVT_PIXEL_CLOCK_SHIFT >> 2; // ie divide by 4 to get whole number + p->u.range_limit.u.cvt.pixel_clock_adjustment = ((pRangeLimit->u.cvt.pixel_clock & NVT_PVT_EDID_CVT_PIXEL_CLOCK_MASK) >> NVT_PVT_EDID_CVT_PIXEL_CLOCK_SHIFT) & 0x03; // ie modulus 4 + + p->u.range_limit.u.cvt.max_active_pixels_per_line = (pRangeLimit->u.cvt.pixel_clock & NVT_PVT_EDID_CVT_ACTIVE_MSB_MASK) << NVT_PVT_EDID_CVT_ACTIVE_MSB_SHIFT; + p->u.range_limit.u.cvt.max_active_pixels_per_line |= pRangeLimit->u.cvt.max_active; + p->u.range_limit.u.cvt.max_active_pixels_per_line <<= 3; // ie multiply 8 + + p->u.range_limit.u.cvt.aspect_supported = (pRangeLimit->u.cvt.aspect_supported & NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_MASK) >> NVT_PVT_EDID_CVT_ASPECT_SUPPORTED_SHIFT; + + p->u.range_limit.u.cvt.aspect_preferred = ( pRangeLimit->u.cvt.aspect_preferred_blanking & NVT_PVT_EDID_CVT_ASPECT_PREFERRED_MASK) >> NVT_PVT_EDID_CVT_ASPECT_PREFERRED_SHIFT; + p->u.range_limit.u.cvt.blanking_support = ( pRangeLimit->u.cvt.aspect_preferred_blanking & NVT_PVT_EDID_CVT_BLANKING_MASK) >> NVT_PVT_EDID_CVT_BLANKING_SHIFT; + + p->u.range_limit.u.cvt.scaling_support = (pRangeLimit->u.cvt.scaling_support & NVT_PVT_EDID_CVT_SCALING_MASK) >> NVT_PVT_EDID_CVT_SCALING_SHIFT; + + p->u.range_limit.u.cvt.preferred_refresh_rate = pRangeLimit->u.cvt.preferred_refresh_rate; + } + } + + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_CPD: // color point data + { + EDID_COLOR_POINT_DATA *pColorPoint = (EDID_COLOR_POINT_DATA *)&descriptor->data[0]; + + p->u.color_point.wp1_index = pColorPoint->wp1_index; + p->u.color_point.wp1_x = pColorPoint->wp1_x << 2; + p->u.color_point.wp1_x |= (pColorPoint->wp1_x_y & NVT_PVT_EDID_CPD_WP_X_MASK) >> NVT_PVT_EDID_CPD_WP_X_SHIFT; + p->u.color_point.wp1_y = pColorPoint->wp1_y << 2; + p->u.color_point.wp1_y |= (pColorPoint->wp1_x_y & NVT_PVT_EDID_CPD_WP_Y_MASK) >> NVT_PVT_EDID_CPD_WP_Y_SHIFT; + p->u.color_point.wp1_gamma = pColorPoint->wp1_gamma + 100; + + p->u.color_point.wp2_index = pColorPoint->wp2_index; + p->u.color_point.wp2_x = pColorPoint->wp2_x << 2; + p->u.color_point.wp2_x |= (pColorPoint->wp2_x_y & NVT_PVT_EDID_CPD_WP_X_MASK) >> NVT_PVT_EDID_CPD_WP_X_SHIFT; + p->u.color_point.wp2_y = pColorPoint->wp2_y << 2; + p->u.color_point.wp2_y |= (pColorPoint->wp2_x_y & NVT_PVT_EDID_CPD_WP_Y_MASK) >> NVT_PVT_EDID_CPD_WP_Y_SHIFT; + p->u.color_point.wp2_gamma = pColorPoint->wp2_gamma + 100; + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_STI: // standard timing identification + { + EDID_STANDARD_TIMING_ID *pStdTiming = (EDID_STANDARD_TIMING_ID *)&descriptor->data[0]; + + for(i=0; iu.std_timing.descriptor[i] = pStdTiming->std_timing[i]; + } + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_DCM: // display color management + { + EDID_COLOR_MANAGEMENT_DATA *pColorMan = (EDID_COLOR_MANAGEMENT_DATA *)&descriptor->data[0]; + + p->u.color_man.red_a3 = pColorMan->red_a3_lsb | (pColorMan->red_a3_msb << 8); + p->u.color_man.red_a2 = pColorMan->red_a2_lsb | (pColorMan->red_a2_msb << 8); + + p->u.color_man.green_a3 = pColorMan->green_a3_lsb | (pColorMan->green_a3_msb << 8); + p->u.color_man.green_a2 = pColorMan->green_a2_lsb | (pColorMan->green_a2_msb << 8); + + p->u.color_man.blue_a3 = pColorMan->blue_a3_lsb | (pColorMan->blue_a3_msb << 8); + p->u.color_man.blue_a2 = pColorMan->blue_a2_lsb | (pColorMan->blue_a2_msb << 8); + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_CVT: // CVT 3-byte timing code + { + EDID_CVT_3BYTE *pCVT_3byte = (EDID_CVT_3BYTE *)&descriptor->data[0]; + + for (i=0; iblock[i].addressable_lines != 0) + { + p->u.cvt.block[i].addressable_lines = pCVT_3byte->block[i].addressable_lines; + p->u.cvt.block[i].addressable_lines |= (pCVT_3byte->block[i].lines_ratio & NVT_PVT_EDID_CVT3_LINES_MSB_MASK) << NVT_PVT_EDID_CVT3_LINES_MSB_SHIFT; + p->u.cvt.block[i].addressable_lines +=1; + p->u.cvt.block[i].addressable_lines <<= 1; + + p->u.cvt.block[i].aspect_ratio = (pCVT_3byte->block[i].lines_ratio & NVT_PVT_EDID_CVT3_ASPECT_MASK) >> NVT_PVT_EDID_CVT3_ASPECT_SHIFT; + + p->u.cvt.block[i].preferred_vert_rates = (pCVT_3byte->block[i].refresh_rates & NVT_PVT_EDID_CVT3_PREFERRED_RATE_MASK) >> NVT_PVT_EDID_CVT3_PREFERRED_RATE_SHIFT; + p->u.cvt.block[i].supported_vert_rates = (pCVT_3byte->block[i].refresh_rates & NVT_PVT_EDID_CVT3_SUPPORTED_RATE_MASK) >> NVT_PVT_EDID_CVT3_SUPPORTED_RATE_SHIFT; + } + } + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_ESTIII: // establishied timing III + { + EDID_EST_TIMINGS_III *pEstTiming = (EDID_EST_TIMINGS_III *)&descriptor->data[0]; + + for(i=0; iu.est3.data[i] = pEstTiming->timing_byte[i]; + } + } + break; + + case NVT_EDID_DISPLAY_DESCRIPTOR_DUMMY: // dummy descriptor + default: + // unresolved descriptor yet + for (i = 0; i < NVT_PVT_EDID_LDD_PAYLOAD_SIZE; i++) + { + p->u.dummy.data[i] = descriptor->data[i]; + } + break; + } + +} + +// get generic EDID info +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NV_STDCALL NvTiming_ParseEDIDInfo(NvU8 *pEdid, NvU32 length, NVT_EDID_INFO *pInfo) +{ + NvU32 i, j, k, data; + EDIDV1STRUC *p; + NvU8 *pExt; + NVT_EDID_CEA861_INFO *p861Info; + + // parameter check + if (pEdid == NULL || length < 128 || pInfo == NULL) + { + return NVT_STATUS_ERR; + } + + NVMISC_MEMSET(pInfo, 0, sizeof(NVT_EDID_INFO)); + + // get the EDID version + if (getEdidVersion(pEdid, &pInfo->version) == NVT_STATUS_ERR) + { + return NVT_STATUS_ERR; + } + + p = (EDIDV1STRUC *) pEdid; + + // get the IDs + pInfo->manuf_id = p->wIDManufName; + pInfo->product_id = p->wIDProductCode; + + // translate the ID into manufacturer's name + pInfo->manuf_name[0] = 'A' + (NvU8)((pInfo->manuf_id & 0x007c) >> 2) - 1; + pInfo->manuf_name[1] = 'A' + (NvU8)((pInfo->manuf_id & 0x0003) << 3 | (pInfo->manuf_id & 0xe000) >> 13) - 1; + pInfo->manuf_name[2] = 'A' + (NvU8)((pInfo->manuf_id & 0x1f00) >> 8) - 1; + pInfo->manuf_name[3] = '\0'; + + // get serial number + pInfo->serial_number = p->dwIDSerialNumber; + + // get the week and year + pInfo->week = p->bWeekManuf; + pInfo->year = p->bYearManuf + 1990; + + // get the interface info + pInfo->input.isDigital = (p->bVideoInputDef & NVT_PVT_EDID_INPUT_ISDIGITAL_MASK) >> NVT_PVT_EDID_INPUT_ISDIGITAL_SHIFT; + + if (pInfo->input.isDigital && pInfo->version > 0x103) // must be at least EDID1.4 to support the following fields + { + switch ( (p->bVideoInputDef & NVT_PVT_EDID_INPUT_BPC_MASK) >> NVT_PVT_EDID_INPUT_BPC_SHIFT) + { + case NVT_PVT_EDID_INPUT_BPC_6 : + pInfo->input.u.digital.bpc = 6; + break; + case NVT_PVT_EDID_INPUT_BPC_8 : + pInfo->input.u.digital.bpc = 8; + break; + case NVT_PVT_EDID_INPUT_BPC_10 : + pInfo->input.u.digital.bpc = 10; + break; + case NVT_PVT_EDID_INPUT_BPC_12 : + pInfo->input.u.digital.bpc = 12; + break; + case NVT_PVT_EDID_INPUT_BPC_14 : + pInfo->input.u.digital.bpc = 14; + break; + case NVT_PVT_EDID_INPUT_BPC_16 : + pInfo->input.u.digital.bpc = 16; + break; + default : + pInfo->input.u.digital.bpc = 0; + break; + } + pInfo->input.u.digital.video_interface = (p->bVideoInputDef & NVT_PVT_EDID_INPUT_INTERFACE_MASK) >> NVT_PVT_EDID_INPUT_INTERFACE_SHIFT; + } + else if (!pInfo->input.isDigital) + { + pInfo->input.u.analog_data = (p->bVideoInputDef & NVT_PVT_EDID_INPUT_ANALOG_ETC_MASK) >> NVT_PVT_EDID_INPUT_ANALOG_ETC_SHIFT; + } + + // get the max image size and aspect ratio + if (p->bMaxHorizImageSize != 0 && p->bMaxVertImageSize != 0) + { + pInfo->screen_size_x = p->bMaxHorizImageSize; + pInfo->screen_size_y = p->bMaxVertImageSize; + pInfo->screen_aspect_x = 0; + pInfo->screen_aspect_y = 0; + } + else if (p->bMaxHorizImageSize != 0 && p->bMaxVertImageSize == 0) + { + pInfo->screen_size_x = 0; + pInfo->screen_size_y = 0; + pInfo->screen_aspect_x = 99 + p->bMaxHorizImageSize; + pInfo->screen_aspect_y = 100; + } + else if (p->bMaxHorizImageSize == 0 && p->bMaxVertImageSize != 0) + { + pInfo->screen_size_x = 0; + pInfo->screen_size_y = 0; + pInfo->screen_aspect_x = 100; + pInfo->screen_aspect_y = 99 + p->bMaxVertImageSize; + } + + // get the gamma + pInfo->gamma = p->bDisplayXferChar + 100; + + // get the features + pInfo->u.feature = p->bFeatureSupport; + + // get chromaticity coordinates + pInfo->cc_red_x = p->Chromaticity[2] << 2; + pInfo->cc_red_x |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_RED_X1_X0_MASK) >> NVT_PVT_EDID_CC_RED_X1_X0_SHIFT; + pInfo->cc_red_y = p->Chromaticity[3] << 2; + pInfo->cc_red_y |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_RED_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_RED_Y1_Y0_SHIFT; + + pInfo->cc_green_x = p->Chromaticity[4] << 2; + pInfo->cc_green_x |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_GREEN_X1_X0_MASK) >> NVT_PVT_EDID_CC_GREEN_X1_X0_SHIFT; + pInfo->cc_green_y = p->Chromaticity[5] << 2; + pInfo->cc_green_y |= (p->Chromaticity[0] & NVT_PVT_EDID_CC_GREEN_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_GREEN_Y1_Y0_SHIFT; + + pInfo->cc_blue_x = p->Chromaticity[6] << 2; + pInfo->cc_blue_x |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_BLUE_X1_X0_MASK) >> NVT_PVT_EDID_CC_BLUE_X1_X0_SHIFT; + pInfo->cc_blue_y = p->Chromaticity[7] << 2; + pInfo->cc_blue_y |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_BLUE_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_BLUE_Y1_Y0_SHIFT; + + pInfo->cc_white_x = p->Chromaticity[8] << 2; + pInfo->cc_white_x |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_WHITE_X1_X0_MASK) >> NVT_PVT_EDID_CC_WHITE_X1_X0_SHIFT; + pInfo->cc_white_y = p->Chromaticity[9] << 2; + pInfo->cc_white_y |= (p->Chromaticity[1] & NVT_PVT_EDID_CC_WHITE_Y1_Y0_MASK) >> NVT_PVT_EDID_CC_WHITE_Y1_Y0_SHIFT; + + // copy established timings + pInfo->established_timings_1_2 = (NvU16)p->bEstablishedTimings1 << 8; + pInfo->established_timings_1_2 |= (NvU16)p->bEstablishedTimings2; + + // copy manuf reserved timings + pInfo->manufReservedTimings = p->bManufReservedTimings; + + // copy standard timings + for (i = 0; i < NVT_EDID_MAX_STANDARD_TIMINGS; i++) + { + pInfo->standard_timings[i] = p->wStandardTimingID[i]; + } + + // get the number of extensions + pInfo->total_extensions = p->bExtensionFlag; + + // check_sum + for (i = 0, data = 0; i < length; i++) + { + data += pEdid[i]; + } + pInfo->checksum_ok = !(data & 0xFF); + pInfo->checksum = p->bChecksum; + + + // now find out the total number of all of the timings in the EDID + pInfo->total_timings = 0; + + // now find out the detailed timings + parseEdidDetailedTiming(pEdid, pInfo); + + // now parse all 18-byte long display descriptors (not detailed timing) + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + parseEdidLongDisplayDescriptor((EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i], &pInfo->ldd[i], pInfo->version); + } + + // now check the number of timings in the extension + for (k = 0, j = 1; j <= pInfo->total_extensions && (j + 1) * sizeof(EDIDV1STRUC) <= length; j++) + { + pExt = pEdid + sizeof(EDIDV1STRUC) * j; + + // check for 861 extension first + switch (*pExt) + { + case NVT_EDID_EXTENSION_CTA: + p861Info = (k == 0) ? &pInfo->ext861 : &pInfo->ext861_2; + + get861ExtInfo(pExt, sizeof(EDIDV1STRUC), p861Info); + + // HF EEODB is present in edid v1.3 and v1.4 does not need this.Also, it is always present in the 1st CTA extension block. + if (j == 1 && pInfo->version == NVT_EDID_VER_1_3) + { + parseCta861HfEeodb(p861Info, &pInfo->total_extensions); + } + + // update pInfo with basic hdmi info + // assumes each edid will only have one such block across multiple cta861 blocks (otherwise may create declaration conflict) + // In case of multiple such blocks, the last one takes precedence, except for SCDB + + // parseCta861VsdbBlocks() uses hfScdb info so need to be parsed first + parseCta861HfScdb(p861Info, pInfo, FROM_CTA861_EXTENSION); + parseCta861VsdbBlocks(p861Info, pInfo, FROM_CTA861_EXTENSION); + + // parse HDR related information from the HDR static metadata data block + parseCea861HdrStaticMetadataDataBlock(p861Info, pInfo, FROM_CTA861_EXTENSION); + + // parse Dolby Vision related information from the DV vendor specific video data block + parseCea861DvStaticMetadataDataBlock(p861Info, &pInfo->dv_static_metadata_info); + + // Timings are listed (or shall) be listed in priority order + // So read SVD, yuv420 SVDs first before reading detailed timings + + // add the 861B short video timing descriptor + if (p861Info->revision >= NVT_CEA861_REV_B) + { + // base video + parse861bShortTiming(p861Info, pInfo, FROM_CTA861_EXTENSION); + + // yuv420-only video + parse861bShortYuv420Timing(p861Info, pInfo, FROM_CTA861_EXTENSION); + } + + // add the detailed timings in 18-byte long display descriptor + parse861ExtDetailedTiming(pExt, p861Info->basic_caps, pInfo); + + // CEA861-F at 7.5.12 section about VFPDB block. + if (p861Info->revision >= NVT_CEA861_REV_F && p861Info->total_vfpdb != 0) + { + parse861bShortPreferredTiming(p861Info, pInfo, FROM_CTA861_EXTENSION); + } + + k++; + break; + + case NVT_EDID_EXTENSION_VTB: + parseVTBExtension(pExt, pInfo); + break; + + case NVT_EDID_EXTENSION_DISPLAYID: + if ((pExt[1] & 0xF0) == 0x20) // displayID2.x as EDID extension + { + if(getDisplayId20EDIDExtInfo(pExt, sizeof(EDIDV1STRUC), + pInfo) == NVT_STATUS_SUCCESS) + { + if (pInfo->ext861.total_y420vdb != 0 || pInfo->ext861.total_y420cmdb != 0) + { + pInfo->ext_displayid20.interface_features.yuv420_min_pclk = 0; + } + + if (!pInfo->ext861.basic_caps) + { + pInfo->ext861.basic_caps = pInfo->ext_displayid20.basic_caps; + } + } + } + else // displayID13 as EDID extension + { + //do not fail function based on return value of getDisplayIdEDIDExtInfo refer bug 3247180 where some rogue monitors don't provide correct DID13 raw data. + if (getDisplayIdEDIDExtInfo(pExt, sizeof(EDIDV1STRUC), + pInfo) == NVT_STATUS_SUCCESS) + { + // Check if YCbCr is supported in base block + // since it is mandatory if YCbCr is supported on any other display interface as per 5.1.1.1 Video Colorimetry + if(pInfo->u.feature_ver_1_4_digital.support_ycrcb_444) + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + pInfo->ext_displayid.u4.display_interface.ycbcr444_depth.support_8b = 1; + } + else + { + pInfo->ext_displayid.u4.display_interface_features.ycbcr444_depth.support_8b = 1; + } + } + + if(pInfo->u.feature_ver_1_4_digital.support_ycrcb_422) + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + pInfo->ext_displayid.u4.display_interface.ycbcr422_depth.support_8b = 1; + } + else + { + pInfo->ext_displayid.u4.display_interface_features.ycbcr422_depth.support_8b = 1; + } + } + } + } + break; + + default: + break; + } + } + + // Copy all the timings(could include type 7/8/9/10) from displayid20->timings[] to pEdidInfo->timings[] + for (i = 0; i < pInfo->ext_displayid20.total_timings; i++) + { + if (!assignNextAvailableTiming(pInfo, &(pInfo->ext_displayid20.timing[i]))) + { + return NVT_STATUS_ERR; + } + } + + // check for cvt timings - in display range limits or cvt 3-byte LDD, only for EDID1.4 and above + if (pInfo->version > 0x0103) + { + parseEdidCvtTiming(pInfo); + } + + // now check for standard timings - base EDID and then the LDDs + parseEdidStandardTiming(pInfo); + + // find out the total established timings - base EDID and then the LDDs + parseEdidEstablishedTiming(pInfo); + + getEdidHDM1_4bVsdbTiming(pInfo); + + // Assert if no timings were found (due to a bad EDID) or if we mistakenly + // assigned more timings than we allocated space for (due to bad logic above) + nvt_assert(pInfo->total_timings && + (pInfo->total_timings <= COUNT(pInfo->timing))); + + // go through all timings and update supported color formats + // consider the supported bpc per color format from parsed EDID / CTA861 / DisplayId + updateColorFormatAndBpcTiming(pInfo); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void updateColorFormatAndBpcTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i, j, data; + + for (i = 0; i < pInfo->total_timings; i++) + { + data = NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status); + switch (data) + { + case NVT_TYPE_HDMI_STEREO: + case NVT_TYPE_HDMI_EXT: + // VTB timing use the base EDID (block 0) to determine the color format support + case NVT_TYPE_EDID_VTB_EXT: + case NVT_TYPE_EDID_VTB_EXT_STD: + case NVT_TYPE_EDID_VTB_EXT_DTD: + case NVT_TYPE_EDID_VTB_EXT_CVT: + // pInfo->u.feature_ver_1_3.color_type provides mono, rgb, rgy, undefined + // assume RGB 8-bpc support only (VTB is pretty old edid standard) + pInfo->timing[i].etc.rgb444.bpc.bpc8 = 1; + break; + // These are from the CTA block, and relies on + // Since there could be multiple CEA blocks, these are adjusted when the blocks are parsed + case NVT_TYPE_EDID_861ST: + case NVT_TYPE_EDID_EXT_DTD: + if (pInfo->ext_displayid20.as_edid_extension && + pInfo->ext_displayid20.valid_data_blocks.cta_data_present) + { + updateColorFormatForDisplayId20ExtnTimings(pInfo, i); + } + updateBpcForTiming(pInfo, i); + break; + default: + // * the displayID_v1.3/v2.0 EDID extension need to follow the EDID bpc definition. + // * all other default to base edid + updateBpcForTiming(pInfo, i); + } + + // The timings[i] entries need to update the bpc values where are based on the different color format again + // if displayId extension existed it's interface feature data block + if (pInfo->ext_displayid.version == 0x12 || pInfo->ext_displayid.version == 0x13) + { + updateColorFormatForDisplayIdExtnTimings(pInfo, i); + } + else if (pInfo->ext_displayid20.valid_data_blocks.interface_feature_present) + { + // DisplayId2.0 spec has its own way of determining color format support which includes bpc + color format + updateColorFormatForDisplayId20ExtnTimings(pInfo, i); + } + } + + // Go through all the timings and set CTA format accordingly. If a timing is a CTA 861b timing, store the + // index of this CTA 861b standard in NVT_TIMING.etc.status field. + // However parser needs to exclude the DTD timing in EDID base block where is shared same detailed timing in VIC/DTD_ext in CTA861 + for (i = 0; i < pInfo->total_timings; i++) + { + data = NvTiming_GetCEA861TimingIndex(&pInfo->timing[i]); + // DisplayID block did not belong to CTA timing and it owned the deep color block itself + if (data && !((NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_1) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_2) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_7) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_8) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_9) || + (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_DISPLAYID_10))) + { + // CEA timings may be enumerated outside of SVD blocks -- the formats of these timings don't have CEA FORMAT (vic) set + // before marking them CEA, make sure their color formats are updated too + if (NVT_GET_CEA_FORMAT(pInfo->timing[i].etc.status) == 0 && + (!NVT_IS_DTD(pInfo->timing[i].etc.status) || + isMatchedCTA861Timing(pInfo, &pInfo->timing[i]))) + { + for (j = 0; j < pInfo->total_timings; j++) + { + // It is assumed CTA timings that are repeated by the CTA block or different CTA blocks will + // announce the same color format for the same CTA timings + if (NVT_GET_CEA_FORMAT(pInfo->timing[j].etc.status) == data) + { + // There could be anomalies between EDID 1.4 base block color format vs CEA861 basic caps + // In this case we assume the union is supported + pInfo->timing[i].etc.rgb444.bpcs |= pInfo->timing[j].etc.rgb444.bpcs; + pInfo->timing[i].etc.yuv444.bpcs |= pInfo->timing[j].etc.yuv444.bpcs; + pInfo->timing[i].etc.yuv422.bpcs |= pInfo->timing[j].etc.yuv422.bpcs; + pInfo->timing[i].etc.yuv420.bpcs |= pInfo->timing[j].etc.yuv420.bpcs; + break; + } + } + + // now update the VIC of this timing + NVT_SET_CEA_FORMAT(pInfo->timing[i].etc.status, data); + } + // see the aspect ratio info if needed + if (pInfo->timing[i].etc.aspect == 0) + { + pInfo->timing[i].etc.aspect = getCEA861TimingAspectRatio(data); + } + } + } + +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool isMatchedCTA861Timing(NVT_EDID_INFO *pInfo, NVT_TIMING *pT) +{ + NvU32 j; + + for (j = 0; j < pInfo->total_timings; j++) + { + if (NVT_GET_CEA_FORMAT(pInfo->timing[j].etc.status) && NvTiming_IsTimingExactEqual(&pInfo->timing[j], pT)) + { + return NV_TRUE; + } + } + return NV_FALSE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void updateBpcForTiming(NVT_EDID_INFO *pInfo, NvU32 index) +{ + NVT_EDID_CEA861_INFO *p861Info; + + // assume/prefer data from 1st CEA block if multiple exist + p861Info = &pInfo->ext861; + + pInfo->timing[index].etc.rgb444.bpc.bpc8 = 1; + + if (pInfo->version >= NVT_EDID_VER_1_4 && pInfo->input.isDigital) + { + if (pInfo->u.feature_ver_1_4_digital.support_ycrcb_444) + { + pInfo->timing[index].etc.yuv444.bpc.bpc8 = 1; + } + if (pInfo->u.feature_ver_1_4_digital.support_ycrcb_422) + { + pInfo->timing[index].etc.yuv422.bpc.bpc8 = 1; + } + if (pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_DISPLAYPORT_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_UNDEFINED) + { + pInfo->timing[index].etc.rgb444.bpc.bpc6 = 1; + + // trust bpc claim in edid base block for DP only + if (pInfo->input.u.digital.bpc >= NVT_EDID_VIDEOSIGNAL_BPC_10) + { + pInfo->timing[index].etc.rgb444.bpc.bpc10 = 1; + pInfo->timing[index].etc.yuv444.bpc.bpc10 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_444 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444); + pInfo->timing[index].etc.yuv422.bpc.bpc10 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_422 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422); + } + if (pInfo->input.u.digital.bpc >= NVT_EDID_VIDEOSIGNAL_BPC_12) + { + pInfo->timing[index].etc.rgb444.bpc.bpc12 = 1; + pInfo->timing[index].etc.yuv444.bpc.bpc12 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_444 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444); + pInfo->timing[index].etc.yuv422.bpc.bpc12 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_422 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422); + } + if (pInfo->input.u.digital.bpc >= NVT_EDID_VIDEOSIGNAL_BPC_16) + { + pInfo->timing[index].etc.rgb444.bpc.bpc16 = 1; + pInfo->timing[index].etc.yuv444.bpc.bpc16 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_444 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444); + pInfo->timing[index].etc.yuv422.bpc.bpc16 = pInfo->u.feature_ver_1_4_digital.support_ycrcb_422 || (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422); + } + } + else if ((pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_UNDEFINED) && + p861Info->revision >= NVT_CEA861_REV_A) + { + updateHDMILLCDeepColorForTiming(pInfo, index); + } + } + else if (p861Info->revision >= NVT_CEA861_REV_A) + { + updateHDMILLCDeepColorForTiming(pInfo, index); + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_Get18ByteLongDescriptorIndex(NVT_EDID_INFO *pEdidInfo, NvU8 tag, NvU32 *pDtdIndex) +{ + NvU32 dtdIndex; + + if (!pEdidInfo || !pDtdIndex) + { + return NVT_STATUS_ERR; + } + + for (dtdIndex = *pDtdIndex; dtdIndex < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; dtdIndex++) + { + if (pEdidInfo->ldd[dtdIndex].tag == tag) + { + *pDtdIndex = dtdIndex; + return NVT_STATUS_SUCCESS; + } + } + + return NVT_STATUS_ERR; +} + +// get the edid timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetEdidTimingEx(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT, NvU32 rrx1k) +{ + NvU32 i, j; + NvU32 preferred_cea, preferred_displayid_dtd, preferred_dtd1, dtd1, map0, map1, map2, map3, map4, ceaIndex, max, cvt; + NVT_TIMING *pEdidTiming; + NVT_EDID_DD_RANGE_CVT *pCVT = NULL; + NVT_TIMING cvtTiming; + + // input check + if (pEdidInfo == NULL || pEdidInfo->total_timings == 0 || pT == 0) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0) // rrx1k is optional, can be 0. + return NVT_STATUS_ERR; + + pEdidTiming = pEdidInfo->timing; + + // the timing mapping index : + // + // preferred_cea - the "prefer SVD" in CEA-861-F (i.e. A Sink that prefers a Video Format that is not listed as an SVD in Video Data Block, but instead listed in YCBCR 4:2:0 VDB) + // preferred_displayid_dtd - the "prefer detailed timing of DispalyID" extension + // preferred_dtd1 - the first deatiled timing and PTM flag is enable + // dtd1 - the first detailed timing + // map0 - the "perfect" match (the timing's H/V-visible and pixel clock(refresh rate) are the same as the asking "width", "height" and "rr". + // map1 - the "closest" match with the honor of the interlaced flag + // map2 - the "closest" match without the honor of the interlaced flag + // map3 - the "closest" match to the panel's native timing (i.e. the first DTD timing or the short 861B/C/D timings with "native" flag). + // map4 - the "closest" match with the same refresh rate + // max - the timing with the max visible area + preferred_cea = preferred_displayid_dtd = preferred_dtd1 = dtd1 = map0 = map1 = map2 = map3 = map4 = ceaIndex = pEdidInfo->total_timings; + max = cvt = 0; + for (i = 0; i < pEdidInfo->total_timings; i++) + { + // if the client prefers _NATIVE timing, then don't select custom timing + if ((flag & (NVT_FLAG_NATIVE_TIMING | NVT_FLAG_EDID_TIMING)) != 0 && NVT_IS_CUST_ENTRY(pEdidTiming[i].etc.status) != 0) + { + continue; + } + + // find the perfect match is possible + if ((flag & NVT_FLAG_MAX_EDID_TIMING) == 0 && + width == pEdidTiming[i].HVisible && + height == frame_height(pEdidTiming[i]) && + rr == pEdidTiming[i].etc.rr && + ((rrx1k == 0) || (rrx1k == pEdidTiming[i].etc.rrx1k)) && + !!(flag & NVT_PVT_INTERLACED_MASK) == !!pEdidTiming[i].interlaced) + { + if (map0 >= pEdidInfo->total_timings) + { + // make sure we take the priority as "detailed>standard>established". (The array timing[] always have the detailed timings in the front and then the standard and established.) + map0 = i; + } + + if ( (NVT_PREFERRED_TIMING_IS_CEA(pEdidTiming[i].etc.flag)) || + ((0 == (flag & NVT_FLAG_EDID_861_ST)) && NVT_PREFERRED_TIMING_IS_DTD1(pEdidTiming[i].etc.flag, pEdidTiming[i].etc.status)) || + (NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidTiming[i].etc.flag)) || + (NVT_IS_NATIVE_TIMING(pEdidTiming[i].etc.status))) + { + *pT = pEdidTiming[i]; + return NVT_STATUS_SUCCESS; + } + + if (NVT_GET_TIMING_STATUS_TYPE(pEdidTiming[i].etc.status) == NVT_TYPE_EDID_861ST) + { + if (ceaIndex == pEdidInfo->total_timings) + { + // Save the first entry found. + ceaIndex = i; + } + else + { + if (((flag & NVT_FLAG_CEA_4X3_TIMING) && (pEdidTiming[i].etc.aspect == 0x40003)) || + ((flag & NVT_FLAG_CEA_16X9_TIMING) && (pEdidTiming[i].etc.aspect == 0x160009))) + { + // Use preferred aspect ratio if specified. + ceaIndex = i; + } + } + } + } // if ((flag & NVT_FLAG_MAX_EDID_TIMING) == 0 && + + // bypass the custom timing to be select for the mismatch case + if (NVT_GET_TIMING_STATUS_TYPE(pEdidTiming[i].etc.status) == NVT_TYPE_CUST || + NVT_IS_CUST_ENTRY(pEdidTiming[i].etc.status) != 0) + { + if (width != pEdidTiming[i].HVisible || height != frame_height(pEdidTiming[i]) || rr != pEdidTiming[i].etc.rr) + { + continue; + } + } + + // find out the preferred timing just in case of cea_vfpdb is existed + if (preferred_cea == pEdidInfo->total_timings && + NVT_PREFERRED_TIMING_IS_CEA(pEdidTiming[i].etc.flag)) + { + preferred_cea = i; + } + + // find out the preferred timing just in case + // Caller we will force rr value as 1 to select the DisplayID prefer timing in pEdidTiming if it existed + // however, we can't assign the correct refresh rate we want if we had two and above rr values which shared the same timing. + if (rr != 1) + { + if (pEdidTiming[i].etc.rr == rr && NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidTiming[i].etc.flag)) + { + preferred_displayid_dtd = i; + } + } + else if (preferred_displayid_dtd == pEdidInfo->total_timings && + NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidTiming[i].etc.flag)) + { + preferred_displayid_dtd = i; + } + + if (NVT_PREFERRED_TIMING_IS_DTD1(pEdidTiming[i].etc.flag, pEdidTiming[i].etc.status)) + { + preferred_dtd1 = i; + } + + if (NVT_IS_DTD1(pEdidTiming[i].etc.status)) + { + dtd1 = i; + } + + // find out the max mode just in case + if (pEdidTiming[i].HVisible * pEdidTiming[i].VVisible > pEdidTiming[max].HVisible * pEdidTiming[max].VVisible) + max = i; + + // if the requested timing is not in the EDID, try to find out the EDID entry with the same progressive/interlaced setting + if (map1 >= pEdidInfo->total_timings) + { + if (!!(flag & NVT_PVT_INTERLACED_MASK) == !!pEdidTiming[i].interlaced && + width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map1 = i; + } + } + else + { + if (!!(flag & NVT_PVT_INTERLACED_MASK) == !!pEdidTiming[i].interlaced && + width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i]) && + abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map1].HVisible, width) && + abs_delta(frame_height(pEdidTiming[i]), height) <= abs_delta(frame_height(pEdidTiming[map1]), height)) + { + // if there're 2 timings with the same visible size, choose the one with closer refresh rate + if (pEdidTiming[i].HVisible == pEdidTiming[map1].HVisible && + frame_height(pEdidTiming[i]) == frame_height(pEdidTiming[map1])) + { + if (abs_delta(pEdidTiming[i].etc.rr, rr) < abs_delta(pEdidTiming[map1].etc.rr, rr)) + { + map1 = i; + } + } + else + { + map1 = i; + } + } + } + + // if the requested timing is not in the EDID, try to find out the EDID entry without the progressive/interlaced setting + if (map2 >= pEdidInfo->total_timings) + { + if (width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map2 = i; + } + } + else + { + if (width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i]) && + abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map2].HVisible, width) && + abs_delta(frame_height(pEdidTiming[i]), height) <= abs_delta(frame_height(pEdidTiming[map2]), height)) + { + // if there're 2 timings with the same visible size, choose the one with closer refresh rate + if (pEdidTiming[i].HVisible == pEdidTiming[map2].HVisible && + frame_height(pEdidTiming[i]) == frame_height(pEdidTiming[map2])) + { + if (abs_delta(pEdidTiming[i].etc.rr, rr) < abs_delta(pEdidTiming[map2].etc.rr, rr)) + { + map2 = i; + } + } + else + { + map2 = i; + } + } + } + + // find out the native timing + if (NVT_IS_NATIVE_TIMING(pEdidTiming[i].etc.status) || NVT_IS_DTD1(pEdidTiming[i].etc.status)) + { + if (map3 >= pEdidInfo->total_timings) + { + if (width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map3 = i; + } + } + else if(abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map3].HVisible, width) && + abs_delta(frame_height(pEdidTiming[i]), height) <= abs_delta(frame_height(pEdidTiming[map3]), height) && + width <= pEdidTiming[i].HVisible && + height <= frame_height(pEdidTiming[i])) + { + map3 = i; + } + } + + // find the edid timing with refresh rate matching + if (map4 >= pEdidInfo->total_timings) + { + if (width <= pEdidTiming[i].HVisible && + height <= pEdidTiming[i].VVisible && + rr == pEdidTiming[i].etc.rr) + { + map4 = i; + } + } + else + { + if (width <= pEdidTiming[i].HVisible && + height <= pEdidTiming[i].HVisible && + rr == pEdidTiming[i].etc.rr && + abs_delta(pEdidTiming[i].HVisible, width) <= abs_delta(pEdidTiming[map4].HVisible, width) && + abs_delta(pEdidTiming[i].VVisible, height) <= abs_delta(pEdidTiming[map4].VVisible, height)) + { + map4 = i; + } + } + + }//for (i = 0; i < pEdidInfo->total_timings; i++) + + if ( (preferred_displayid_dtd == preferred_dtd1) && (preferred_dtd1 == dtd1) && + (dtd1 == map0) && + (map0 == map1) && + (map1 == map2) && + (map2 == map3) && + (map3 == map4) && + (map4 == pEdidInfo->total_timings) && + pEdidInfo->version >= NVT_EDID_VER_1_4 && + pEdidInfo->u.feature_ver_1_4_digital.continuous_frequency && + !(flag & NVT_PVT_INTERLACED_MASK)) + { + // try to find CVT timing that fits + NvU32 maxHeight, minHeight, tempHeight; + + minHeight = ~0; + maxHeight = tempHeight= 0; + + // looping through long display descriptors + for (i=0; ildd[i].tag != NVT_EDID_DISPLAY_DESCRIPTOR_DRL || pEdidInfo->ldd[i].u.range_limit.timing_support != NVT_EDID_RANGE_SUPPORT_CVT) + { + continue; + } + + pCVT = &pEdidInfo->ldd[i].u.range_limit.u.cvt; + + if (width <= pCVT->max_active_pixels_per_line || (pCVT->scaling_support & NVT_EDID_CVT_SCALING_HOR_SHRINK)) + { + for (j=0; jaspect_supported & (1< tempHeight) + { + minHeight = tempHeight; + } + if (maxHeight < tempHeight) + { + maxHeight = tempHeight; + } + + }//for (j=0; j<5; j++) + }//if (width <= pCVT->max_active_pixels_per_line || (pCVT->scaling_support & NVT_EDID_CVT_SCALING_HOR_STRETCH)) + + if ( ((minHeight < height) && (pCVT->scaling_support & NVT_EDID_CVT_SCALING_VER_SHRINK)) || + ((maxHeight > height) && (pCVT->scaling_support & NVT_EDID_CVT_SCALING_VER_STRETCH)) ) + { + cvt = 1; + } + + if (cvt) + { + break; + } + }//for (i=0; iblanking_support & NVT_EDID_CVT_BLANKING_REDUCED && NvTiming_CalcCVT_RB(width, height, rr, NVT_PROGRESSIVE, &cvtTiming) == NVT_STATUS_SUCCESS) + { + if ( cvtTiming.pclk > (NvU32)((pEdidInfo->ldd[i].u.range_limit.max_pclk_MHz * 100) - (pCVT->pixel_clock_adjustment * 25)) ) + { + cvt = 0; + } + } + else if (pCVT->blanking_support & NVT_EDID_CVT_BLANKING_STANDARD && NvTiming_CalcCVT(width, height, rr, NVT_PROGRESSIVE, &cvtTiming) == NVT_STATUS_SUCCESS) + { + if ( cvtTiming.pclk > (NvU32)((pEdidInfo->ldd[i].u.range_limit.max_pclk_MHz * 100) - (pCVT->pixel_clock_adjustment * 25)) ) + { + cvt = 0; + } + } + else + { + cvt = 0; + } + + } + }//(dtd1 == map0 == map1 == map2 == map3 == pEdidInfo->total_timings) && pEdidInfo->version >= NVT_EDID_VER_1_4 && + // pEdidInfo->feature_ver_1_4_digital.continuous_frequency && !(flag & NVT_PVT_INTERLACED_MASK)) + + // now return the mismatched EDID timing + if (flag & NVT_FLAG_NV_PREFERRED_TIMING) + { + *pT = (preferred_displayid_dtd != pEdidInfo->total_timings) ? pEdidTiming[preferred_displayid_dtd] : + (preferred_cea != pEdidInfo->total_timings) ? pEdidTiming[preferred_cea] : + (preferred_dtd1 != pEdidInfo->total_timings) ? pEdidTiming[preferred_dtd1] : + pEdidTiming[dtd1]; + // what if DTD1 itself is filtered out, in such case dtd1 index points to an invalid timing[]? + // (dtd1 != pEdidInfo->total_timings) ? pEdidTiming[dtd1] : pEdidTiming[0]; + } + else if (flag & NVT_FLAG_DTD1_TIMING) + { + *pT = pEdidTiming[dtd1]; + } + else if ((flag & NVT_FLAG_MAX_EDID_TIMING) && (0 == (flag & NVT_FLAG_EDID_861_ST))) + { + *pT = pEdidTiming[max]; + } + else if ((flag & (NVT_FLAG_CEA_4X3_TIMING | NVT_FLAG_CEA_16X9_TIMING | NVT_FLAG_EDID_861_ST)) && ceaIndex < (pEdidInfo->total_timings)) + { + *pT = pEdidTiming[ceaIndex]; + } + else if ((flag & NVT_FLAG_NATIVE_TIMING) != 0 && map3 < pEdidInfo->total_timings) + { + // Allow closest refresh rate match when EDID has detailed timing for different RR on native resolution. + if (map0 < pEdidInfo->total_timings && + pEdidTiming[map0].HVisible == pEdidTiming[map3].HVisible && + pEdidTiming[map0].VVisible == pEdidTiming[map3].VVisible) + { + *pT = pEdidTiming[map0]; + } + else + { + *pT = pEdidTiming[map3]; + } + } + else if (map0 < pEdidInfo->total_timings) + { + // use the exact mapped timing if possible + *pT = pEdidTiming[map0]; + } + else if ((flag & NVT_FLAG_EDID_TIMING_RR_MATCH) && map4 < pEdidInfo->total_timings) + { + *pT = pEdidTiming[map4]; + } + else if (map1 < pEdidInfo->total_timings) + { + // use the mapped timing if possible + *pT = pEdidTiming[map1]; + } + else if (map2 < pEdidInfo->total_timings) + { + // use the 2nd mapped timing if possible + *pT = pEdidTiming[map2]; + } + else if (dtd1 < pEdidInfo->total_timings && width <= pEdidTiming[dtd1].HVisible && height <= pEdidTiming[dtd1].VVisible) + { + // use the 1st detailed timing if possible + *pT = pEdidTiming[dtd1]; + } + else if (cvt) + { + // use the cvt timing + *pT = cvtTiming; + } + else + { + // use the max timing for all other cases + *pT = pEdidTiming[max]; + } + + // set the mismatch status + if (pT->HVisible != width || frame_height(*pT) != height) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_SIZE); + } + if (!NvTiming_IsRoundedRREqual(pT->etc.rr, pT->etc.rrx1k, (NvU16)rr)) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_RR); + } + if (!!pT->interlaced != !!(flag & NVT_PVT_INTERLACED_MASK)) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_FORMAT); + } + + return NVT_STATUS_SUCCESS; +} + +// get the edid timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetEdidTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT) +{ + return NvTiming_GetEdidTimingEx(width, height, rr, flag, pEdidInfo, pT, 0); +} +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetHDMIStereoExtTimingFromEDID(NvU32 width, NvU32 height, NvU32 rr, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_EXT_TIMING *pT) +{ + NVT_STATUS status = NVT_STATUS_ERR; + NvU8 Vic; + NvU32 i; + NVT_TIMING Timing; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_EXT_TIMING)); + + // adjust the flags -- + // need EDID timing with RR match, + // not max timing, + flag = flag | NVT_FLAG_EDID_TIMING | NVT_FLAG_EDID_TIMING_RR_MATCH | NVT_FLAG_EDID_861_ST; + flag = flag & ~(NVT_FLAG_MAX_EDID_TIMING); + + status = NvTiming_GetEdidTiming(width, height, rr, flag, pEdidInfo, &Timing); + if (NVT_STATUS_SUCCESS == status) + { + status = NVT_STATUS_ERR; + + // is this an exact match? + if (0 == NVT_GET_TIMING_STATUS_MATCH(Timing.etc.status)) + { + if (NVT_TYPE_EDID_861ST == NVT_GET_TIMING_STATUS_TYPE(Timing.etc.status)) + { + // lookup the vic for this timing in the support map. + Vic = (NvU8) NVT_GET_CEA_FORMAT(Timing.etc.status); + for (i = 0; i < pEdidInfo->Hdmi3Dsupport.total; ++i) + { + if (Vic == pEdidInfo->Hdmi3Dsupport.map[i].Vic) + { + break; + } + } + if (i < pEdidInfo->Hdmi3Dsupport.total) + { + // does this vic support the requested structure type? + if (0 != (NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(StereoStructureType) & pEdidInfo->Hdmi3Dsupport.map[i].StereoStructureMask)) + { + // if this is side-by-side(half) the detail needs to match also. + if ((NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF != StereoStructureType) || (SideBySideHalfDetail == pEdidInfo->Hdmi3Dsupport.map[i].SideBySideHalfDetail)) + { + // convert the 2D timing to 3D. + NvTiming_GetHDMIStereoTimingFrom2DTiming(&Timing, StereoStructureType, SideBySideHalfDetail, pT); + status = NVT_STATUS_SUCCESS; + } + } + } + } + } + } + return status; +} + +// EDID based AspectRatio Timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetEDIDBasedASPRTiming( NvU16 width, NvU16 height, NvU16 rr, NVT_EDID_INFO *pEI, NVT_TIMING *pT) +{ + NvU32 i, dwStatus; + NvU32 dwNativeIndex; + NvU32 flag; + NvU32 ret; + + // sanity check + if( pEI == NULL || pEI->total_timings == 0 || pT == NULL ) + { + return NVT_STATUS_ERR; + } + if( width == 0 || height == 0 ) + { + return NVT_STATUS_ERR; + } + + // get an EDID timing. Return err if it fails as we don't have any timing to tweak. + flag = 0; + ret = NvTiming_GetEdidTiming(width, height, rr, flag, pEI, pT); + if( NVT_STATUS_SUCCESS != ret ) + { + return NVT_STATUS_ERR; + } + // in case we have an exact match from EDID (in terms of Size), we return Success. + else if ((NVT_GET_TIMING_STATUS_MATCH(pT->etc.status) & NVT_STATUS_TIMING_MISMATCH_SIZE) == 0) + { + return NVT_STATUS_SUCCESS; + } + + // find the Native timing + for (i = 0, dwNativeIndex = pEI->total_timings + 1; i < pEI->total_timings; i++) + { + dwStatus = pEI->timing[i].etc.status; + + if ((NVT_IS_NATIVE_TIMING(dwStatus)) || NVT_IS_DTD1(dwStatus)) + { + dwNativeIndex = i; + break; + } + } + + // we don't want to apply LogicScaling(Letterboxing) to Wide Mode on Wide Panel (or non-Wide Mode on non-Wide Panel) + if( nvt_is_wideaspect(width, height) == nvt_is_wideaspect(pEI->timing[dwNativeIndex].HVisible, pEI->timing[dwNativeIndex].VVisible) ) + { + return NVT_STATUS_ERR; + } + + // Letterbox mode enabled by regkey LogicScalingMode + // When we try to set modes not supported in EDID (eg. DFP over DSub) the display may not fit the screen. + // If Logic Scaling is enabled (ie why we are here), we need to tweak the timing (for CRT) provided: + // 1) the aspect ratio of native mode and requested mode differ + // eg. Native AR = 5:4, 1280x1024 + // Requested AR = 16:10, 1280x800 + // 2) Both Width and Height do not mismatch together; If they do we shall go in for DMT/GTF timing + // by failing this call. + if( pT->interlaced == 0 && + dwNativeIndex < pEI->total_timings && + (pEI->timing[dwNativeIndex].HVisible*height != pEI->timing[dwNativeIndex].VVisible*width) && + (width == pT->HVisible || height == pT->VVisible)) + { + pT->HFrontPorch += (pT->HVisible - width) / 2; + pT->VFrontPorch += (pT->VVisible - height) / 2; + pT->HVisible = width; + pT->VVisible = height; + if(rr != pT->etc.rr) + { + pT->etc.rrx1k = rr * 1000; + pT->pclk = RRx1kToPclk (pT); + } + + pT->etc.status = NVT_STATUS_ASPR; + return NVT_STATUS_SUCCESS; + } + + return NVT_STATUS_ERR; +} + +// check whether EDID is valid +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_EDIDValidationMask(NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation) +{ + NvU32 i, j, version, checkSum; + EDIDV1STRUC *p = (EDIDV1STRUC *)pEdid; + EDID_LONG_DISPLAY_DESCRIPTOR *pLdd; + NvU8 *pExt; + DETAILEDTIMINGDESCRIPTOR *pDTD; + NvU32 ret = 0; + + // check the EDID base size to avoid accessing beyond the EDID buffer, do not proceed with + // further validation. + if (length < sizeof(EDIDV1STRUC)) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE); + return ret; + } + + // check the EDID version and signature + if (getEdidVersion(pEdid, &version) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_VERSION); + return ret; + } + + // check block 0 checksum value + if (!isChecksumValid(pEdid)) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + return ret; + } + + // Strong validation to follow + if (bIsStrongValidation == NV_TRUE) + { + // range limit check + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + pLdd = (EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i]; + if (pLdd->tag == NVT_EDID_DISPLAY_DESCRIPTOR_DRL && (version == 0x103 || (version == 0x104 && (p->bFeatureSupport & 1)))) + { + EDID_MONITOR_RANGE_LIMIT *pRangeLimit = (EDID_MONITOR_RANGE_LIMIT *)pLdd->data; + NvU8 max_v_rate_offset, min_v_rate_offset, max_h_rate_offset, min_h_rate_offset; + + // add 255Hz offsets as needed before doing the check, use descriptor->rsvd2 + nvt_assert(!(pLdd->rsvd2 & 0xF0)); + + max_v_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MAX ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + min_v_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_VER_MIN ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + max_h_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MAX ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + min_h_rate_offset = pLdd->rsvd2 & NVT_PVT_EDID_RANGE_OFFSET_HOR_MIN ? NVT_PVT_EDID_RANGE_OFFSET_AMOUNT : 0; + + if ((pRangeLimit->minVRate + min_v_rate_offset) > (pRangeLimit->maxVRate + max_v_rate_offset) || + (pRangeLimit->minHRate + min_h_rate_offset) > (pRangeLimit->maxHRate + max_h_rate_offset) || + pRangeLimit->maxVRate == 0 || + pRangeLimit->maxHRate == 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_RANGE_LIMIT); + } + break; + } + } + + // extension and size check + if ((NvU32)(p->bExtensionFlag + 1) * sizeof(EDIDV1STRUC) > length) + { + // Do not proceed with further validation if the size is invalid. + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE); + return ret; + } + + // validate Detailed Timing Descriptors, 4 blocks + for (i = 0; i < 4; i++) + { + if (*((NvU16 *)&p->DetailedTimingDesc[i]) != 0) + { + // This block is not a Display Descriptor. + // It must be a valid timing definition + // validate the block by passing NULL as the NVTIMING parameter to parseEdidDetailedTimingDescriptor + if (parseEdidDetailedTimingDescriptor((NvU8 *)&p->DetailedTimingDesc[i], NULL) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + } + } + else + { + // This block is a display descriptor, validate + if (((EDID_LONG_DISPLAY_DESCRIPTOR *)&p->DetailedTimingDesc[i])->rsvd != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + } + } + } + + // validate extension blocks + for (j = 1; j <= p->bExtensionFlag; j++) + { + pExt = pEdid + sizeof(EDIDV1STRUC) * j; + + // check for 861 extension + switch (*pExt) + { + case NVT_EDID_EXTENSION_CTA: + // first sanity check on the extension block + if (get861ExtInfo(pExt, sizeof(EIA861EXTENSION), NULL) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT); + } + + // check sum on CEA extension block + for (i = 0, checkSum = 0; i < sizeof(EIA861EXTENSION); i ++) + { + checkSum += pExt[i]; + } + + if ((checkSum & 0xFF) != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + } + + // 0 indicates no DTD in this block + if (((EIA861EXTENSION*)pExt)->offset == 0) + { + continue; + } + + // validate DTD blocks + pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pExt[((EIA861EXTENSION *)pExt)->offset]; + while (pDTD->wDTPixelClock != 0 && + (NvU8 *)pDTD - pExt < (int)sizeof(EIA861EXTENSION)) + { + if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, NULL) != NVT_STATUS_SUCCESS) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD); + } + pDTD++; + } + break; + case NVT_EDID_EXTENSION_VTB: + // perform a checksum on the VTB block + for (i = 0, checkSum = 0; i < sizeof(VTBEXTENSION); i++) + { + checkSum += pExt[i]; + } + if ((checkSum & 0xFF) != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + } + break; + case NVT_EDID_EXTENSION_DISPLAYID: + // perform a checksum on the VTB block + for (i = 0, checkSum = 0; i < sizeof(EIA861EXTENSION); i++) + { + checkSum += pExt[i]; + } + if ((checkSum & 0xFF) != 0) + { + ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + } + break; + default: + break; + } + } + + + } + + return ret; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EDIDValidation (NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation) +{ + if (NvTiming_EDIDValidationMask(pEdid, length, bIsStrongValidation) != 0) { + return NVT_STATUS_ERR; + } else { + return NVT_STATUS_SUCCESS; + } +} + +// Function Description: Get the first Detailed Timing Descriptor +// +// Parameters: +// pEdidInfo: IN - pointer to parsed EDID +// pT: OUT - pointer to where the DTD1 timing will be stored +// +// Return: +// NVT_STATUS_SUCCESS: DTD1 was found in parsed EDID, pT is a valid result +// NVT_STATUS_INVALID_PARAMETER: one or more parameter was invalid +// NVT_STATUS_ERR: DTD1 was not found in parsed EDID, pT is invalid +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetDTD1Timing (NVT_EDID_INFO * pEdidInfo, NVT_TIMING * pT) +{ + NvU32 j; + + // check param + if (pEdidInfo == NULL || pT == NULL) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // find the PTM mode + for (j = 0; j < pEdidInfo->total_timings; j++) + { + if (NVT_PREFERRED_TIMING_IS_DTD1(pEdidInfo->timing[j].etc.flag, pEdidInfo->timing[j].etc.status)) + { + *pT = pEdidInfo->timing[j]; + return NVT_STATUS_SUCCESS; + } + } + + // find DisplayID preferred + for (j = 1; j < pEdidInfo->total_timings; j++) + { + if (NVT_PREFERRED_TIMING_IS_DISPLAYID(pEdidInfo->timing[j].etc.flag)) + { + *pT = pEdidInfo->timing[j]; + return NVT_STATUS_SUCCESS; + } + } + + // DTD1 should exist, but if it doesn't, return not found + for (j = 0; j < pEdidInfo->total_timings; j++) + { + NvU32 data = pEdidInfo->timing[j].etc.status; + if (NVT_IS_DTD1(data)) + { + *pT = pEdidInfo->timing[j]; + return NVT_STATUS_SUCCESS; + } + } + + // DTD1 should exist, but if it doesn't, return not found + return NVT_STATUS_ERR; +} + +// Description: Parses a VTB extension block into its associated timings +// +// Parameters: +// pEdidExt: IN - pointer to the beginning of the extension block +// pInfo: IN - The original block information, including the +// array of timings. +// +// NOTE: this function *really* should be in its own separate file, but a certain DVS test +// uses cross build makefiles which do not allow the specification of a new file. +CODE_SEGMENT(PAGE_DD_CODE) +void parseVTBExtension(NvU8 *pEdidExt, NVT_EDID_INFO *pInfo) +{ + NvU32 i; + VTBEXTENSION *pExt = (VTBEXTENSION *)pEdidExt; + NvU32 count; + NvU32 bytes; + NVT_TIMING newTiming; + + // Null = bad idea + if (pEdidExt == NULL) + { + return; + } + + // Sanity check for VTB extension block + if (pExt->tag != NVT_EDID_EXTENSION_VTB || + pExt->revision == NVT_VTB_REV_NONE) + { + return; + } + + // Sanity check - ensure that the # of descriptor does not exceed + // byte size + count = (NvU32)sizeof(EDID_LONG_DISPLAY_DESCRIPTOR) * pExt->num_detailed + + (NvU32)sizeof(EDID_CVT_3BYTE_BLOCK) * pExt->num_cvt + + (NvU32)sizeof(NvU16) * pExt->num_standard; + if (count > NVT_VTB_MAX_PAYLOAD) + { + return; + } + + count = 0; + bytes = 0; + + // Process Detailed Timings + for (i = 0; i < pExt->num_detailed; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseEdidDetailedTimingDescriptor((NvU8 *)(pExt->data + bytes), + &newTiming) == NVT_STATUS_SUCCESS) + { + newTiming.etc.name[39] = '\0'; + newTiming.etc.status = NVT_STATUS_EDID_VTB_EXT_DTDn(++count); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + bytes += (NvU32)(sizeof(EDID_LONG_DISPLAY_DESCRIPTOR)); + } + } + + // Process CVT Timings + for (i = 0; i < pExt->num_cvt; i++) + { + parseEdidCvt3ByteDescriptor((NvU8 *)(pExt->data + bytes), pInfo, &count); + + bytes += (NvU32)sizeof(EDID_CVT_3BYTE_BLOCK); + } + + // Process Standard Timings + for (i = 0; i < pExt->num_standard; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + parseEdidStandardTimingDescriptor(*(NvU16 *)(pExt->data + bytes), + pInfo, count, &newTiming); + newTiming.etc.name[39] = '\0'; + newTiming.etc.status = NVT_STATUS_EDID_VTB_EXT_STDn(++count); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + + bytes += (NvU32)sizeof(NvU16); + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +static int IsPrintable(NvU8 c) +{ + return ((c >= ' ') && (c <= '~')); +} + +CODE_SEGMENT(PAGE_DD_CODE) +static int IsWhiteSpace(NvU8 c) +{ + // consider anything unprintable or single space (ASCII 32) + // to be whitespace + return (!IsPrintable(c) || (c == ' ')); +} + +CODE_SEGMENT(PAGE_DD_CODE) +static void RemoveTrailingWhiteSpace(NvU8 *str, int len) +{ + int i; + + for (i = len; (i >= 0) && IsWhiteSpace(str[i]); i--) + { + str[i] = '\0'; + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +static void RemoveNonPrintableCharacters(NvU8 *str) +{ + int i; + + // Check that all characters are printable. + // If not, replace them with '?' + for (i = 0; str[i] != '\0'; i++) + { + if (!IsPrintable(str[i])) + { + str[i] = '?'; + } + } +} + +/** + * @brief Assigns this timing to the next available slot in pInfo->timing[] if + * possible. + * @param pInfo EDID struct containing the parsed timings + * @param pTiming New timing to be copied into pInfo->timing[] + */ +CODE_SEGMENT(PAGE_DD_CODE) +NvBool assignNextAvailableTiming(NVT_EDID_INFO *pInfo, + const NVT_TIMING *pTiming) +{ + // Don't write past the end of + // pInfo->timing[NVT_EDID_MAX_TOTAL_TIMING] + if (pInfo->total_timings >= COUNT(pInfo->timing)) { + return NV_FALSE; + } + + pInfo->timing[pInfo->total_timings++] = *pTiming; + return NV_TRUE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetProductName(const NVT_EDID_INFO *pEdidInfo, + NvU8 *pProductName, + const NvU32 productNameLength) +{ + NvU32 i = 0, m = 0, n = 0; + + if( pEdidInfo == NULL || pProductName == NULL ) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + for ( i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + if (pEdidInfo->ldd[i].tag == NVT_EDID_DISPLAY_DESCRITPOR_DPN) + { + for(n = 0; n < NVT_EDID_LDD_PAYLOAD_SIZE && pEdidInfo->ldd[i].u.product_name.str[n] != 0x0; n++) + { + pProductName[m++] = pEdidInfo->ldd[i].u.product_name.str[n]; + if ((m + 1) >= productNameLength) + { + goto done; + } + } + } + } +done: + pProductName[m] = '\0'; //Ensure a null termination at the end. + + RemoveTrailingWhiteSpace(pProductName, m); + RemoveNonPrintableCharacters(pProductName); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_CalculateEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidsize) +{ + return calculateCRC32(pEDIDBuffer, edidsize); +} + +//Calculates EDID's CRC after purging 'Week of Manufacture', 'Year of Manufacture', +//'Product ID String' & 'Serial Number' from EDID +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_CalculateCommonEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidVersion) +{ + NvU32 commonEDIDBufferSize = 0; + NvU8 CommonEDIDBuffer[256]; + NvU32 edidBufferIndex = 0; + + if(pEDIDBuffer==NULL) + { + return 0; + } + + // Transfer over the original EDID buffer + NVMISC_MEMCPY(CommonEDIDBuffer, pEDIDBuffer, 256); + + // Wipe out the Serial Number, Week of Manufacture, and Year of Manufacture or Model Year + NVMISC_MEMSET(CommonEDIDBuffer + 0x0C, 0, 6); + + // Wipe out the checksums + CommonEDIDBuffer[0x7F] = 0; + CommonEDIDBuffer[0xFF] = 0; + + // We also need to zero out any "EDID Other Monitor Descriptors" (http://en.wikipedia.org/wiki/Extended_display_identification_data) + for (edidBufferIndex = 54; edidBufferIndex <= 108; edidBufferIndex += 18) + { + if (CommonEDIDBuffer[edidBufferIndex] == 0 && CommonEDIDBuffer[edidBufferIndex+1] == 0) + { + // Wipe this block out. It contains OEM-specific details that contain things like serial numbers + NVMISC_MEMSET(CommonEDIDBuffer + edidBufferIndex, 0, 18); + } + } + + // Check what size we should do the compare against + if ( edidVersion > NVT_EDID_VER_1_4 ) + { + commonEDIDBufferSize = 256; + } + else + { + commonEDIDBufferSize = 128; + } + + return NvTiming_CalculateEDIDCRC32(CommonEDIDBuffer, commonEDIDBufferSize); +} // NvTiming_CalculateCommonEDIDCRC32 + +// Calculate the minimum and maximum v_rate and h_rate, as well as +// maximum pclk; initialize with the range of values in the EDID mode +// list, but override with what is in the range limit descriptor section. +// +// based on drivers/modeset.nxt/CODE/edid.c:EdidGetMonitorLimits() and +// EdidBuildRangeLimits() +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalculateEDIDLimits(NVT_EDID_INFO *pEdidInfo, NVT_EDID_RANGE_LIMIT *pLimit) +{ + NvU32 i; + + NVMISC_MEMSET(pLimit, 0, sizeof(NVT_EDID_RANGE_LIMIT)); + + // the below currently only supports 1.x EDIDs + if ((pEdidInfo->version & 0xFF00) != 0x100) + { + return NVT_STATUS_ERR; + } + + pLimit->min_v_rate_hzx1k = ~0; + pLimit->max_v_rate_hzx1k = 0; + pLimit->min_h_rate_hz = ~0; + pLimit->max_h_rate_hz = 0; + pLimit->max_pclk_10khz = 0; + + // find the ranges in the EDID mode list + for (i = 0; i < pEdidInfo->total_timings; i++) + { + NVT_TIMING *pTiming = &pEdidInfo->timing[i]; + NvU32 h_rate_hz; + + if (pLimit->min_v_rate_hzx1k > pTiming->etc.rrx1k) + { + pLimit->min_v_rate_hzx1k = pTiming->etc.rrx1k; + } + if (pLimit->max_v_rate_hzx1k < pTiming->etc.rrx1k) + { + pLimit->max_v_rate_hzx1k = pTiming->etc.rrx1k; + } + + h_rate_hz = axb_div_c(pTiming->pclk, 10000, (NvU32)pTiming->HTotal); + + if (pLimit->min_h_rate_hz > h_rate_hz) + { + pLimit->min_h_rate_hz = h_rate_hz; + } + if (pLimit->max_h_rate_hz < h_rate_hz) + { + pLimit->max_h_rate_hz = h_rate_hz; + } + + if (pLimit->max_pclk_10khz < pTiming->pclk) + { + pLimit->max_pclk_10khz = pTiming->pclk; + } + } + + // use the range limit display descriptor, if available: these + // override anything we found in the EDID mode list + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) + { + if (pEdidInfo->ldd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_DRL) + { + NVT_EDID_DD_RANGE_LIMIT *pRangeLimit = &pEdidInfo->ldd[i].u.range_limit; + NvU32 max_pclk_10khz; + + // {min,max}_v_rate is in hz + if (pRangeLimit->min_v_rate != 0) { + pLimit->min_v_rate_hzx1k = pRangeLimit->min_v_rate * 1000; + } + if (pRangeLimit->max_v_rate != 0) { + pLimit->max_v_rate_hzx1k = pRangeLimit->max_v_rate * 1000; + } + + // {min,max}_h_rate is in khz + if (pRangeLimit->min_h_rate != 0) { + pLimit->min_h_rate_hz = pRangeLimit->min_h_rate * 1000; + } + if (pRangeLimit->max_h_rate != 0) { + pLimit->max_h_rate_hz = pRangeLimit->max_h_rate * 1000; + } + + // EdidGetMonitorLimits() honored the pclk from the + // modelist over what it found in the range limit + // descriptor, so do the same here + max_pclk_10khz = pRangeLimit->max_pclk_MHz * 100; + if (pLimit->max_pclk_10khz < max_pclk_10khz) { + pLimit->max_pclk_10khz = max_pclk_10khz; + } + + break; + } + } + + return NVT_STATUS_SUCCESS; +} + +// Build a user-friendly name: +// +// * get the vendor name: +// * use the 3 character PNP ID from the EDID's manufacturer ID field +// * expand, if possible, the PNP ID using the PNPVendorIds[] table +// * get the product name from the descriptor block(s) +// * prepend the vendor name and the product name, unless the product +// name already contains the vendor name +// * if any characters in the string are outside the printable ASCII +// range, replace them with '?' + +#define tolower(c) (((c) >= 'A' && (c) <= 'Z') ? (c) + ('a'-'A') : (c)) + +CODE_SEGMENT(PAGE_DD_CODE) +void NvTiming_GetMonitorName(NVT_EDID_INFO *pEdidInfo, + NvU8 monitor_name[NVT_EDID_MONITOR_NAME_STRING_LENGTH]) +{ + NvU8 product_name[NVT_EDID_MONITOR_NAME_STRING_LENGTH]; + const NvU8 *vendor_name; + NVT_STATUS status; + NvU32 i, j; + NvBool prepend_vendor; + + NVMISC_MEMSET(monitor_name, 0, NVT_EDID_MONITOR_NAME_STRING_LENGTH); + + // get vendor_name: it is either the manufacturer ID or the PNP vendor name + vendor_name = pEdidInfo->manuf_name; + + for (i = 0; i < (sizeof(PNPVendorIds)/sizeof(PNPVendorIds[0])); i++) + { + if ((vendor_name[0] == PNPVendorIds[i].vendorId[0]) && + (vendor_name[1] == PNPVendorIds[i].vendorId[1]) && + (vendor_name[2] == PNPVendorIds[i].vendorId[2])) + { + vendor_name = (const NvU8 *) PNPVendorIds[i].vendorName; + break; + } + } + + // get the product name from the descriptor blocks + status = NvTiming_GetProductName(pEdidInfo, product_name, sizeof(product_name)); + + if (status != NVT_STATUS_SUCCESS) + { + product_name[0] = '\0'; + } + + // determine if the product name already includes the vendor name; + // if so, do not prepend the vendor name to the monitor name + prepend_vendor = NV_TRUE; + + for (i = 0; i < NVT_EDID_MONITOR_NAME_STRING_LENGTH; i++) + { + if (vendor_name[i] == '\0') + { + prepend_vendor = NV_FALSE; + break; + } + + if (tolower(product_name[i]) != tolower(vendor_name[i])) + { + break; + } + } + + j = 0; + + // prepend the vendor name to the monitor name + if (prepend_vendor) + { + for (i = 0; (i < NVT_EDID_MONITOR_NAME_STRING_LENGTH) && (vendor_name[i] != '\0'); i++) + { + monitor_name[j++] = vendor_name[i]; + } + } + + // if we added the vendor name above, add a space between the + // vendor name and the product name + if ((j > 0) && (j < (NVT_EDID_MONITOR_NAME_STRING_LENGTH - 1))) + { + monitor_name[j++] = ' '; + } + + // append the product name to the monitor string + for (i = 0; (i < NVT_EDID_MONITOR_NAME_STRING_LENGTH) && (product_name[i] != '\0'); i++) + { + if (j >= (NVT_EDID_MONITOR_NAME_STRING_LENGTH - 1)) + { + break; + } + monitor_name[j++] = product_name[i]; + } + monitor_name[j] = '\0'; + + RemoveTrailingWhiteSpace(monitor_name, j); + RemoveNonPrintableCharacters(monitor_name); +} + +CODE_SEGMENT(PAGE_DD_CODE) +void updateHDMILLCDeepColorForTiming(NVT_EDID_INFO *pInfo, NvU32 index) +{ + NVT_EDID_CEA861_INFO *p861Info = &pInfo->ext861; + // NOTE: EDID and CEA861 does not have clear statement regarding this. + // To be backward compatible with current Nvidia implementation, if not edid >= 1.4 and CEA block exists, follow color format declaration from CEA block. + // update supported color space within each bpc + // rgb 8bpc always supported + + UPDATE_BPC_FOR_COLORFORMAT(pInfo->timing[index].etc.rgb444, 0, 1, + pInfo->hdmiLlcInfo.dc_30_bit, + pInfo->hdmiLlcInfo.dc_36_bit, + 0, pInfo->hdmiLlcInfo.dc_48_bit); + + if (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_444) + { + // pHdmiLlc->dc_y444 assumed basic cap is set; when base cap is set, 8bpc yuv444 always supported + UPDATE_BPC_FOR_COLORFORMAT(pInfo->timing[index].etc.yuv444, 0, 1, + pInfo->hdmiLlcInfo.dc_y444 && pInfo->hdmiLlcInfo.dc_30_bit, + pInfo->hdmiLlcInfo.dc_y444 && pInfo->hdmiLlcInfo.dc_36_bit, + 0, pInfo->hdmiLlcInfo.dc_y444 && pInfo->hdmiLlcInfo.dc_48_bit); + } + if (p861Info->basic_caps & NVT_CEA861_CAP_YCbCr_422) + { + // pHdmiLlc->dc_y444 assumed basic cap is set; when base cap is set, 8bpc yuv422 always supported + // newer CEA861/HDMI specs suggest the base cap should support both or neither (Nvidia puts no limitations here) + // HDMI1.4b spec Section 6.2.4 Color Depth Requirements states that YCbCr 4:2:2 format is 36-bit mode, which means 8, 10 and 12bpc output is supported as soon as there is enough bandwidth + UPDATE_BPC_FOR_COLORFORMAT(pInfo->timing[index].etc.yuv422, 0, 1, 1, 1, 0, 0); + } +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_edidext_861.c b/src/common/modeset/timing/nvt_edidext_861.c new file mode 100644 index 000000000..67df6eaf1 --- /dev/null +++ b/src/common/modeset/timing/nvt_edidext_861.c @@ -0,0 +1,2941 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edidext_861.c +// +// Purpose: the provide edid 861 extension related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "edid.h" + +PUSH_SEGMENTS + +#define EIA_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rrx1k,ip,aspect,rep,format) \ + {hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',(ip)=='i' ? NVT_INTERLACED:NVT_PROGRESSIVE,\ + 0,{0,((rrx1k)+500)/1000,rrx1k,((1?aspect)<<16)|(0?aspect),rep,{0},{0},{0},{0},NVT_STATUS_EDID_861STn(format),"CEA-861B:#"#format""}} + + +#define NVT_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rrx1k,ip,aspect,rep,format,name) \ + {hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',(ip)=='i' ? NVT_INTERLACED:NVT_PROGRESSIVE,\ + 0,{0,((rrx1k)+500)/1000,rrx1k,((1?aspect)<<16)|(0?aspect),rep,{0},{0},{0},{0},NVT_TYPE_NV_PREDEFINEDn(format),name}} + +#define HDMI_EXT_TIMING(hv,hfp,hsw,ht,hsp,vv,vfp,vsw,vt,vsp,rrx1k,ip,aspect,rep,format,name) \ + {hv,0,hfp,hsw,ht,(hsp)=='-',vv,0,vfp,vsw,vt,(vsp)=='-',(ip)=='i' ? NVT_INTERLACED:NVT_PROGRESSIVE,\ + 0,{0,((rrx1k)+500)/1000,rrx1k,((1?aspect)<<16)|(0?aspect),rep,{0},{0},{0},{0},NVT_STATUS_HDMI_EXTn(format),name}} + +DATA_SEGMENT(PAGE_DATA) +CONS_SEGMENT(PAGE_CONS) + +static const NVT_TIMING EIA861B[]= +{ + // all 64 EIA/CEA-861E timings + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 1),//640 x 480p @59.94/60 (Format 1) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-', 59940,'p', 4:3, 0x1, 2),//720 x 480p @59.94/60 (Format 2) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-', 59940,'p',16:9, 0x1, 3),//720 x 480p @59.94/60 (Format 3) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5,5, 750,'+', 59940,'p',16:9, 0x1, 4),//1280 x 720p @59.94/60 (Format 4) + EIA_TIMING(1920, 88, 44,2200,'+', 540, 2,5, 562,'+', 59940,'i',16:9, 0x1, 5),//1920 x 1080i @59.94/60 (Format 5) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-', 59940,'i', 4:3, 0x2, 6),//720(1440) x 480i @59.94/60 (Format 6) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-', 59940,'i',16:9, 0x2, 7),//720(1440) x 480i @59.94/60 (Format 7) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 263,'-', 59940,'p', 4:3, 0x2, 8),//720(1440) x 240p @59.94/60 (Format 8) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 263,'-', 59940,'p',16:9, 0x2, 9),//720(1440) x 240p @59.94/60 (Format 9) + EIA_TIMING(2880, 76,248,3432,'-', 240, 4,3, 262,'-', 59940,'i', 4:3, 0x3ff,10),//(2880) x 480i @59.94/60 (Format 10) + EIA_TIMING(2880, 76,248,3432,'-', 240, 4,3, 262,'-', 59940,'i',16:9, 0x3ff,11),//(2880) x 480i @59.94/60 (Format 11) + EIA_TIMING(2880, 76,248,3432,'-', 240, 5,3, 263,'-', 59940,'p', 4:3, 0x3ff,12),//(2880) x 480p @59.94/60 (Format 12) + EIA_TIMING(2880, 76,248,3432,'-', 240, 5,3, 263,'-', 59940,'p',16:9, 0x3ff,13),//(2880) x 480p @59.94/60 (Format 13) + EIA_TIMING(1440, 32,124,1716,'-', 480, 9,6, 525,'-', 59940,'p', 4:3, 0x3,14),//1440 x 480p @59.94/60 (Format 14) + EIA_TIMING(1440, 32,124,1716,'-', 480, 9,6, 525,'-', 59940,'p',16:9, 0x3,15),//1440 x 480p @59.94/60 (Format 15) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4,5,1125,'+', 59940,'p',16:9, 0x1,16),//1920 x 1080p @59.94/60 (Format 16) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-', 50000,'p', 4:3, 0x1,17),//720 x 576p @50 (Format 17) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-', 50000,'p',16:9, 0x1,18),//720 x 576p @50 (Format 18) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5,5, 750,'+', 50000,'p',16:9, 0x1,19),//1280 x 720p @50 (Format 19) + EIA_TIMING(1920, 528, 44,2640,'+', 540, 2,5, 562,'+', 50000,'i',16:9, 0x1,20),//1920 x 1080i @50 (Format 20) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'i', 4:3, 0x2,21),//720(1440) x 576i @50 (Format 21) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'i',16:9, 0x2,22),//720(1440) x 576i @50 (Format 22) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'p', 4:3, 0x2,23),//720(1440) x 288p @50 (Format 23) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-', 50000,'p',16:9, 0x2,24),//720(1440) x 288p @50 (Format 24) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'i', 4:3, 0x3ff,25),//(2880) x 576i @50 (Format 25) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'i',16:9, 0x3ff,26),//(2880) x 576i @50 (Format 26) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'p', 4:3, 0x3ff,27),//(2880) x 288p @50 (Format 27) + EIA_TIMING(2880, 48,252,3456,'-', 288, 2,3, 312,'-', 50000,'p',16:9, 0x3ff,28),//(2880) x 288p @50 (Format 28) + EIA_TIMING(1440, 24,128,1728,'-', 576, 5,5, 625,'_', 50000,'p', 4:3, 0x3,29),//1440 x 576p @50 (Format 29) + EIA_TIMING(1440, 24,128,1728,'-', 576, 5,5, 625,'_', 50000,'p',16:9, 0x3,30),//1440 x 576p @50 (Format 30) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4,5,1125,'+', 50000,'p',16:9, 0x1,31),//1920 x 1080p @50 (Format 31) + EIA_TIMING(1920, 638, 44,2750,'+',1080, 4,5,1125,'+', 23976,'p',16:9, 0x1,32),//1920 x 1080p @23.97/24 (Format 32) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4,5,1125,'+', 25000,'p',16:9, 0x1,33),//1920 x 1080p @25 (Format 33) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4,5,1125,'+', 29970,'p',16:9, 0x1,34),//1920 x 1080p @29.97/30 (Format 34) + EIA_TIMING(2880, 64,248,3432,'-', 480, 9,6, 525,'-', 59940,'p', 4:3, 0x7,35),//(2880) x 480p @59.94/60 (Format 35) + EIA_TIMING(2880, 64,248,3432,'-', 480, 9,6, 525,'-', 59940,'p',16:9, 0x7,36),//(2880) x 480p @59.94/60 (Format 36) + EIA_TIMING(2880, 48,256,3456,'-', 576, 5,5, 625,'-', 50000,'p', 4:3, 0x7,37),//(2880) x 576p @50 (Format 37) + EIA_TIMING(2880, 48,256,3456,'-', 576, 5,5, 625,'-', 50000,'p',16:9, 0x7,38),//(2880) x 576p @50 (Format 38) + EIA_TIMING(1920, 32,168,2304,'+', 540,23,5, 625,'-', 50000,'i',16:9, 0x1,39),//1920 x 1080i @50 (Format 39) + EIA_TIMING(1920, 528, 44,2640,'+', 540, 2,5, 562,'+',100000,'i',16:9, 0x1,40),//1920 x 1080i @100 (Format 40) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5,5, 750,'+',100000,'p',16:9, 0x1,41),//1280 x 720p @100 (Format 41) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',100000,'p', 4:3, 0x1,42),//720 x 576p @100 (Format 42) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',100000,'p',16:9, 0x1,43),//720 x 576p @100 (Format 43) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',100000,'i', 4:3, 0x2,44),//720(1440) x 576i @100 (Format 44) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',100000,'i',16:9, 0x2,45),//720(1440) x 576i @100 (Format 45) + EIA_TIMING(1920, 88, 44,2200,'+', 540, 2,5, 562,'+',119880,'i',16:9, 0x1,46),//1920 x 1080i @119.88/120 (Format 46) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5,5, 750,'+',119880,'p',16:9, 0x1,47),//1280 x 720p @119.88/120 (Format 47) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',119880,'p', 4:3, 0x1,48),//720 x 480p @119.88/120 (Format 48) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',119880,'p',16:9, 0x1,49),//720 x 480p @119.88/120 (Format 49) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',119880,'i', 4:3, 0x2,50),//720(1440) x 480i @119.88/120 (Format 50) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',119880,'i',16:9, 0x2,51),//720(1440) x 480i @119.88/120 (Format 51) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',200000,'p', 4:3, 0x1,52),//720 x 576p @200 (Format 52) + EIA_TIMING( 720, 12, 64, 864,'-', 576, 5,5, 625,'-',200000,'p',16:9, 0x1,53),//720 x 576p @200 (Format 53) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',200000,'i', 4:3, 0x2,54),//720(1440) x 576i @200 (Format 54) + EIA_TIMING(1440, 24,126,1728,'-', 288, 2,3, 312,'-',200000,'i',16:9, 0x2,55),//720(1440) x 576i @200 (Format 55) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',239760,'p', 4:3, 0x1,56),//720 x 480p @239.76/240 (Format 56) + EIA_TIMING( 720, 16, 62, 858,'-', 480, 9,6, 525,'-',239760,'p',16:9, 0x1,57),//720 x 480p @239.76/240 (Format 57) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',239760,'i', 4:3, 0x2,58),//720(1440) x 480i @239.76/240 (Format 58) + EIA_TIMING(1440, 38,124,1716,'-', 240, 4,3, 262,'-',239760,'i',16:9, 0x2,59),//720(1440) x 480i @239.76/240 (Format 59) + EIA_TIMING(1280,1760, 40,3300,'+', 720, 5,5, 750,'+',23976, 'p',16:9, 0x1,60),//1280 x 720p @23.97/24 (Format 60) + EIA_TIMING(1280,2420, 40,3960,'+', 720, 5,5, 750,'+',25000, 'p',16:9, 0x1,61),//1280 x 720p @25 (Format 61) + EIA_TIMING(1280,1760, 40,3300,'-', 720, 5,5, 750,'+',29970, 'p',16:9, 0x1,62),//1280 x 720p @29.97/30 (Format 62) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4,5,1125,'+',119880,'p',16:9, 0x1,63),//1920 x 1080p @119.88/120 (Format 63) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4,5,1125,'+',100000,'p',16:9, 0x1,64),//1920 x 1080p @100 (Format 64) + // Following modes are from CEA-861F + EIA_TIMING(1280,1760, 40,3300,'+', 720, 5, 5, 750,'+', 23976,'p', 64:27, 0x1, 65),//1280 x 720p @23.98/24 (Format 65) + EIA_TIMING(1280,2420, 40,3960,'+', 720, 5, 5, 750,'+', 25000,'p', 64:27, 0x1, 66),//1280 x 720p @25 (Format 66) + EIA_TIMING(1280,1760, 40,3300,'+', 720, 5, 5, 750,'+', 29970,'p', 64:27, 0x1, 67),//1280 x 720p @29.97/30 (Format 67) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5, 5, 750,'+', 50000,'p', 64:27, 0x1, 68),//1280 x 720p @50 (Format 68) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5, 5, 750,'+', 59940,'p', 64:27, 0x1, 69),//1280 x 720p @59.94/60 (Format 69) + EIA_TIMING(1280, 440, 40,1980,'+', 720, 5, 5, 750,'+',100000,'p', 64:27, 0x1, 70),//1280 x 720p @100 (Format 70) + EIA_TIMING(1280, 110, 40,1650,'+', 720, 5, 5, 750,'+',119880,'p', 64:27, 0x1, 71),//1280 x 720p @119.88/120 (Format 71) + EIA_TIMING(1920, 638, 44,2750,'+',1080, 4, 5,1125,'+', 23976,'p', 64:27, 0x1, 72),//1920 x1080p @23.98/24 (Format 72) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4, 5,1125,'+', 25000,'p', 64:27, 0x1, 73),//1920 x1080p @25 (Format 73) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4, 5,1125,'+', 29970,'p', 64:27, 0x1, 74),//1920 x1080p @29.97/30 (Format 74) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4, 5,1125,'+', 50000,'p', 64:27, 0x1, 75),//1920 x1080p @50 (Format 75) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4, 5,1125,'+', 59940,'p', 64:27, 0x1, 76),//1920 x1080p @59.94/60 (Format 76) + EIA_TIMING(1920, 528, 44,2640,'+',1080, 4, 5,1125,'+',100000,'p', 64:27, 0x1, 77),//1920 x1080p @100 (Format 77) + EIA_TIMING(1920, 88, 44,2200,'+',1080, 4, 5,1125,'+',119880,'p', 64:27, 0x1, 78),//1920 x1080p @119.88/120 (Format 78) + EIA_TIMING(1680,1360, 40,3300,'+', 720, 5, 5, 750,'+', 23976,'p', 64:27, 0x1, 79),//1680 x 720p @23.98/24 (Format 79) + EIA_TIMING(1680,1228, 40,3168,'+', 720, 5, 5, 750,'+', 25000,'p', 64:27, 0x1, 80),//1680 x 720p @25 (Format 80) + EIA_TIMING(1680, 700, 40,2640,'+', 720, 5, 5, 750,'+', 29970,'p', 64:27, 0x1, 81),//1680 x 720p @29.97/30 (Format 81) + EIA_TIMING(1680, 260, 40,2200,'+', 720, 5, 5, 750,'+', 50000,'p', 64:27, 0x1, 82),//1680 x 720p @50 (Format 82) + EIA_TIMING(1680, 260, 40,2200,'+', 720, 5, 5, 750,'+', 59940,'p', 64:27, 0x1, 83),//1680 x 720p @59.94/60 (Format 83) + EIA_TIMING(1680, 60, 40,2000,'+', 720, 5, 5, 825,'+',100000,'p', 64:27, 0x1, 84),//1680 x 720p @100 (Format 84) + EIA_TIMING(1680, 60, 40,2000,'+', 720, 5, 5, 825,'+',119880,'p', 64:27, 0x1, 85),//1680 x 720p @119.88/120 (Format 85) + EIA_TIMING(2560, 998, 44,3750,'+',1080, 4, 5,1100,'+', 23976,'p', 64:27, 0x1, 86),//2560 x1080p @23.98/24 (Format 86) + EIA_TIMING(2560, 448, 44,3200,'+',1080, 4, 5,1125,'+', 25000,'p', 64:27, 0x1, 87),//2560 x1080p @25 (Format 87) + EIA_TIMING(2560, 768, 44,3520,'+',1080, 4, 5,1125,'+', 29970,'p', 64:27, 0x1, 88),//2560 x1080p @29.97/30 (Format 88) + EIA_TIMING(2560, 548, 44,3300,'+',1080, 4, 5,1125,'+', 50000,'p', 64:27, 0x1, 89),//2560 x1080p @50 (Format 89) + EIA_TIMING(2560, 248, 44,3000,'+',1080, 4, 5,1100,'+', 59940,'p', 64:27, 0x1, 90),//2560 x1080p @59.94/60 (Format 90) + EIA_TIMING(2560, 218, 44,2970,'+',1080, 4, 5,1250,'+',100000,'p', 64:27, 0x1, 91),//2560 x1080p @100 (Format 91) + EIA_TIMING(2560, 548, 44,3300,'+',1080, 4, 5,1250,'+',119880,'p', 64:27, 0x1, 92),//2560 x1080p @119.88/120 (Format 92) + EIA_TIMING(3840,1276, 88,5500,'+',2160, 8,10,2250,'+', 23976,'p', 16:9, 0x1, 93),//3840 x2160p @23.98/24 (Format 93) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 25000,'p', 16:9, 0x1, 94),//3840 x2160p @25 (Format 94) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 29970,'p', 16:9, 0x1, 95),//3840 x2160p @29.97/30 (Format 95) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 50000,'p', 16:9, 0x1, 96),//3840 x2160p @50 (Format 96) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 59940,'p', 16:9, 0x1, 97),//3840 x2160p @59.94/60 (Format 97) + EIA_TIMING(4096,1020, 88,5500,'+',2160, 8,10,2250,'+', 23976,'p',256:135, 0x1, 98),//4096 x2160p @23.98/24 (Format 98) + EIA_TIMING(4096, 968, 88,5280,'+',2160, 8,10,2250,'+', 25000,'p',256:135, 0x1, 99),//4096 x2160p @25 (Format 99) + EIA_TIMING(4096, 88, 88,4400,'+',2160, 8,10,2250,'+', 29970,'p',256:135, 0x1,100),//4096 x2160p @29.97/30 (Format 100) + EIA_TIMING(4096, 968, 88,5280,'+',2160, 8,10,2250,'+', 50000,'p',256:135, 0x1,101),//4096 x2160p @50 (Format 101) + EIA_TIMING(4096, 88, 88,4400,'+',2160, 8,10,2250,'+', 59940,'p',256:135, 0x1,102),//4096 x2160p @59.94/60 (Format 102) + EIA_TIMING(3840,1276, 88,5500,'+',2160, 8,10,2250,'+', 23976,'p', 64:27, 0x1,103),//3840 x2160p @23.98/24 (Format 103) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 25000,'p', 64:27, 0x1,104),//3840 x2160p @25 (Format 104) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 29970,'p', 64:27, 0x1,105),//3840 x2160p @29.97/30 (Format 105) + EIA_TIMING(3840,1056, 88,5280,'+',2160, 8,10,2250,'+', 50000,'p', 64:27, 0x1,106),//3840 x2160p @50 (Format 106) + EIA_TIMING(3840, 176, 88,4400,'+',2160, 8,10,2250,'+', 59940,'p', 64:27, 0x1,107),//3840 x2160p @59.94/60 (Format 107) + // VIC 108-127 timings are from CTA-861-G_FINAL_revised_2018_Errata_2.pdf + EIA_TIMING(1280, 960, 40, 2500,'+', 720, 5, 5, 750,'+', 47950,'p', 16:9, 0x1,108),//1280 x 720p @47.95/48 (Format 108) + EIA_TIMING(1280, 960, 40, 2500,'+', 720, 5, 5, 750,'+', 47950,'p', 64:27, 0x1,109),//1280 x 720p @47.95/48 (Format 109) + EIA_TIMING(1680, 810, 40, 2750,'+', 720, 5, 5, 750,'+', 47950,'p', 64:27, 0x1,110),//1680 x 720p @47.95/48 (Format 110) + EIA_TIMING(1920, 638, 44, 2750,'+',1080, 4, 5,1125,'+', 47950,'p', 16:9, 0x1,111),//1920 x 1080p @47.95/48 (Format 111) + EIA_TIMING(1920, 638, 44, 2750,'+',1080, 4, 5,1125,'+', 47950,'p', 64:27, 0x1,112),//1920 x 1080p @47.95/48 (Format 112) + EIA_TIMING(2560, 998, 44, 3750,'+',1080, 4, 5,1100,'+', 47950,'p', 64:27, 0x1,113),//2560 x 1080p @47.95/48 (Format 113) + EIA_TIMING(3840,1276, 88, 5500,'+',2160, 8,10,2250,'+', 47950,'p', 16:9, 0x1,114),//3840 x 2160p @47.95/48 (Format 114) + EIA_TIMING(4096,1020, 88, 5500,'+',2160, 8,10,2250,'+', 47950,'p',256:135, 0x1,115),//4096 x 2160p @47.95/48 (Format 115) + EIA_TIMING(3840,1276, 88, 5500,'+',2160, 8,10,2250,'+', 47950,'p', 64:27, 0x1,116),//3840 x 2160p @47.95/48 (Format 116) + EIA_TIMING(3840,1056, 88, 5280,'+',2160, 8,10,2250,'+',100000,'p', 16:9, 0x1,117),//3840 x 2160p @100 (Format 117) + EIA_TIMING(3840, 176, 88, 4400,'+',2160, 8,10,2250,'+',119880,'p', 16:9, 0x1,118),//3840 x 2160p @119.88/120 (Format 118) + EIA_TIMING(3840,1056, 88, 5280,'+',2160, 8,10,2250,'+',100000,'p', 64:27, 0x1,119),//3840 x 2160p @100 (Format 119) + EIA_TIMING(3840, 176, 88, 4400,'+',2160, 8,10,2250,'+',119880,'p', 64:27, 0x1,120),//3840 x 2160p @119.88/120 (Format 120) + EIA_TIMING(5120,1996, 88, 7500,'+',2160, 8,10,2200,'+', 23976,'p', 64:27, 0x1,121),//5120 x 2160p @23.98/24 (Format 121) + EIA_TIMING(5120,1696, 88, 7200,'+',2160, 8,10,2200,'+', 25000,'p', 64:27, 0x1,122),//5120 x 2160p @25 (Format 122) + EIA_TIMING(5120, 664, 88, 6000,'+',2160, 8,10,2200,'+', 29970,'p', 64:27, 0x1,123),//5120 x 2160p @29.97/30 (Format 123) + EIA_TIMING(5120, 746, 88, 6250,'+',2160, 8,10,2475,'+', 47950,'p', 64:27, 0x1,124),//5120 x 2160p @47.95/48 (Format 124) + EIA_TIMING(5120,1096, 88, 6600,'+',2160, 8,10,2250,'+', 50000,'p', 64:27, 0x1,125),//5120 x 2160p @50 (Format 125) + EIA_TIMING(5120, 164, 88, 5500,'+',2160, 8,10,2250,'+', 59940,'p', 64:27, 0x1,126),//5120 x 2160p @59.94/60 (Format 126) + EIA_TIMING(5120,1096, 88, 6600,'+',2160, 8,10,2250,'+',100000,'p', 64:27, 0x1,127),//5120 x 2160p @100 (Format 127) + // VIC 128-192 are Forbidden and should be never used. But to simplify the SVD access, put a default timing here. + // We can remove these after adding a function to access CEA Timings. + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 128) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 129) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 130) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 131) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 132) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 133) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 134) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 135) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 136) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 137) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 138) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 139) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 140) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 141) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 142) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 143) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 144) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 145) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 146) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 147) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 148) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 149) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 150) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 151) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 152) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 153) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 154) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 155) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 156) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 157) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 158) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 159) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 160) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 161) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 162) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 163) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 164) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 165) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 166) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 167) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 168) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 169) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 170) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 171) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 172) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 173) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 174) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 175) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 176) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 177) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 178) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 179) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 180) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 181) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 182) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 183) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 184) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 185) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 186) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 187) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 188) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 189) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 190) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 191) + EIA_TIMING( 640, 16, 96, 800,'-', 480,10,2, 525,'-', 59940,'p', 4:3, 0x1, 0),//640 x 480p @59.94/60 //Forbidden (Format 192) + // VIC 193-219 timings are from CTA-861-G_FINAL_revised_2018_Errata_2.pdf + EIA_TIMING( 5120, 164, 88, 5500,'+',2160, 8,10,2250,'+',120000,'p', 64:27,0x1,193),// 5120 x 2160p @119.88/120 (Format 193) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 23976,'p', 16:9,0x1,194),// 7680 x 4320p @23.98/24 (Format 194) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 25000,'p', 16:9,0x1,195),// 7680 x 4320p @25 (Format 195) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 29970,'p', 16:9,0x1,196),// 7680 x 4320p @29.97/30 (Format 196) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 47950,'p', 16:9,0x1,197),// 7680 x 4320p @47.95/48 (Format 197) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 50000,'p', 16:9,0x1,198),// 7680 x 4320p @50 (Format 198) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 59940,'p', 16:9,0x1,199),// 7680 x 4320p @59.94/60 (Format 199) + EIA_TIMING( 7680,2112,176,10560,'+',4320,16,20,4500,'+',100000,'p', 16:9,0x1,200),// 7680 x 4320p @100 (Format 200) + EIA_TIMING( 7680, 352,176, 8000,'+',4320,16,20,4500,'+',119880,'p', 16:9,0x1,201),// 7680 x 4320p @119.88/120 (Format 201) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 23976,'p', 64:27,0x1,202),// 7680 x 4320p @23.98/24 (Format 202) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 25000,'p', 64:27,0x1,203),// 7680 x 4320p @25 (Format 203) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 29970,'p', 64:27,0x1,204),// 7680 x 4320p @29.97/30 (Format 204) + EIA_TIMING( 7680,2552,176,11000,'+',4320,16,20,4500,'+', 47950,'p', 64:27,0x1,205),// 7680 x 4320p @47.95/48 (Format 205) + EIA_TIMING( 7680,2352,176,10800,'+',4320,16,20,4400,'+', 50000,'p', 64:27,0x1,206),// 7680 x 4320p @50 (Format 206) + EIA_TIMING( 7680, 552,176, 9000,'+',4320,16,20,4400,'+', 59940,'p', 64:27,0x1,207),// 7680 x 4320p @59.94/60 (Format 207) + EIA_TIMING( 7680,2112,176,10560,'+',4320,16,20,4500,'+',100000,'p', 64:27,0x1,208),// 7680 x 4320p @100 (Format 208) + EIA_TIMING( 7680, 352,176, 8800,'+',4500,16,20,4950,'+',119880,'p', 64:27,0x1,209),// 7680 x 4320p @119.88/120 (Format 209) + EIA_TIMING(10240,1492,176,12500,'+',4320,16,20,4950,'+', 23976,'p', 64:27,0x1,210),//10240 x 4320p @23.98/24 (Format 210) + EIA_TIMING(10240,2492,176,13500,'+',4320,16,20,4400,'+', 25000,'p', 64:27,0x1,211),//10240 x 4320p @25 (Format 211) + EIA_TIMING(10240, 288,176,11000,'+',4320,16,20,4500,'+', 29970,'p', 64:27,0x1,212),//10240 x 4320p @29.97/30 (Format 212) + EIA_TIMING(10240,1492,176,12500,'+',4320,16,20,4950,'+', 47950,'p', 64:27,0x1,213),//10240 x 4320p @47.95/48 (Format 213) + EIA_TIMING(10240,2492,176,13500,'+',4320,16,20,4400,'+', 44000,'p', 64:27,0x1,214),//10240 x 4320p @50 (Format 214) + EIA_TIMING(10240, 288,176,11000,'+',4320,16,20,4500,'+', 59940,'p', 64:27,0x1,215),//10240 x 4320p @59.94/60 (Format 215) + EIA_TIMING(10240,2192,176,13200,'+',4320,16,20,4500,'+',100000,'p', 64:27,0x1,216),//10240 x 4320p @100 (Format 216) + EIA_TIMING(10240, 288,176,11000,'+',4320,16,20,4500,'+',119880,'p', 64:27,0x1,217),//10240 x 4320p @119.88/120 (Format 217) + EIA_TIMING( 4096, 800, 88, 5280,'+',2160, 8,10,2250,'+',100000,'p',256:135,0x1,218),// 4096 x 2160p @100 (Format 218) + EIA_TIMING( 4096, 88, 88, 4400,'+',2160, 8,10,2250,'+',119880,'p',256:135,0x1,219),// 4096 x 2160p @119.88/120 (Format 219) + // 220-255 Reserved for the Future + // the end + EIA_TIMING(0,0,0,0,'-',0,0,0,0,'-',0,'p',4:3,0,0) +}; +static NvU32 MAX_CEA861B_FORMAT = sizeof(EIA861B)/sizeof(EIA861B[0]) - 1; + +static const NvU32 EIA861B_DUAL_ASPECT_VICS[][2] = +{ + { 2, 3 }, // 720x480p 59.94Hz/60Hz + { 4, 69 }, // 1280x720p 59.94Hz/60Hz + { 6, 7 }, // 720(1440)x480i 59.94Hz/60Hz + { 8, 9 }, // 720(1440)x240p 59.94Hz/60Hz + + { 10, 11 }, // 2880x480i 59.94Hz/60Hz + { 12, 13 }, // 2880x240p 59.94Hz/60Hz + { 14, 15 }, // 1440x480p 59.94Hz/60Hz + { 16, 76 }, // 1920x1080p 59.94Hz/60Hz + { 17, 18 }, // 720x576p 50Hz + { 19, 68 }, // 1280x720p 50Hz + + { 21, 22 }, // 720(1440)x576i 50Hz + { 23, 24 }, // 720(1440)x288p 50Hz + { 25, 26 }, // 2880x576i 50Hz + { 27, 28 }, // 2880x288p 50Hz + { 29, 30 }, // 1440x576p 50Hz + + { 31, 75 }, // 1920x1080p 50Hz + { 32, 72 }, // 1920x1080p 23.98Hz/24Hz + { 33, 73 }, // 1920x1080p 25Hz + { 34, 74 }, // 1920x1080p 29.97Hz/30Hz + { 35, 36 }, // 2880x480p 59.94Hz/60Hz + { 37, 38 }, // 2880x576p 50Hz + + { 41, 70 }, // 1280x720p 100Hz + { 42, 43 }, // 720x576p 100Hz + { 44, 45 }, // 720(1440)x576i 100Hz + { 47, 71 }, // 1280x720p 119.88/120Hz + { 48, 49 }, // 720x480p 119.88/120Hz + + { 50, 51 }, // 720(1440)x480i 119.88/120Hz + { 52, 53 }, // 720x576p 200Hz + { 54, 55 }, // 720(1440)x576i 200Hz + { 56, 57 }, // 720x480p 239.76/240Hz + { 58, 59 }, // 720(1440)x480i 239.76/240Hz + + { 60, 65 }, // 1280x720p 23.98Hz/24Hz + { 61, 66 }, // 1280x720p 25Hz + { 62, 67 }, // 1280x720p 29.97Hz/30Hz + { 63, 78 }, // 1920x1080p 119.88/120Hz + { 64, 77 }, // 1920x1080p 100Hz + + { 93, 103 }, // 3840x2160p 23.98Hz/24Hz + { 94, 104 }, // 3840x2160p 25Hz + { 95, 105 }, // 3840x2160p 29.97Hz/30Hz + { 96, 106 }, // 3840x2160p 50Hz + { 97, 107 }, // 3840x2160p 59.94Hz/60Hz +}; +static NvU32 MAX_EIA861B_DUAL_ASPECT_VICS = sizeof(EIA861B_DUAL_ASPECT_VICS) / sizeof(EIA861B_DUAL_ASPECT_VICS[0]); + +static const NVT_TIMING PSF_TIMING[]= +{ + NVT_TIMING( 1920,600, 88,2750,'+', 540, 2,5,562,'+',47952,'i',16:9, 0x1, 1, "ITU-R BT.709-5:1080i/24Psf"),//1920x1080i @47.952Hz | 24/PsF | ITU-R BT.709-5 + NVT_TIMING( 1920,488, 88,2640,'+', 540, 2,5,562,'+',49950,'i',16:9, 0x1, 2, "ITU-R BT.709-5:1080i/25Psf"),//1920x1080i @49.950Hz | 25/PsF | ITU-R BT.709-5 + + // the end + EIA_TIMING(0,0,0,0,'-',0,0,0,0,'-',0,'p',4:3,0,0) +}; +static NvU32 MAX_PSF_FORMAT = sizeof(PSF_TIMING)/sizeof(PSF_TIMING[0]) - 1; + +static const NVT_TIMING HDMI_EXT_4Kx2K_TIMING[]= +{ + HDMI_EXT_TIMING( 3840, 176, 88,4400,'+', 2160, 8,10,2250,'+',29970,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx30Hz, "HDMI EXT: 3840x2160x29.97/30hz"),//3840x2160 @29.97/30Hz VIC: 0x01 + HDMI_EXT_TIMING( 3840,1056, 88,5280,'+', 2160, 8,10,2250,'+',25000,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx25Hz, "HDMI EXT: 3840x2160x25hz"), //3840x2160 @25Hz VIC: 0x02 + HDMI_EXT_TIMING( 3840,1276, 88,5500,'+', 2160, 8,10,2250,'+',23976,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz, "HDMI EXT: 3840x2160x23.98/24hz"),//3840x2160 @23.98/24Hz VIC: 0x03 + HDMI_EXT_TIMING( 4096,1020, 88,5500,'+', 2160, 8,10,2250,'+',24000,'p',16:9, 0x1, NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz_SMPTE, "HDMI EXT: 4096x2160x24hzSmpte"), //4096x2160 @24Hz VIC: 0x04 + + // the end + EIA_TIMING(0,0,0,0,'-',0,0,0,0,'-',0,'p',4:3,0,0) +}; +static NvU32 MAX_HDMI_EXT_4Kx2K_FORMAT = sizeof(HDMI_EXT_4Kx2K_TIMING)/sizeof(HDMI_EXT_4Kx2K_TIMING[0]) - 1; + +// HDMI 1.4a mandatory 3D video formats. +// From HDMI 1.4a specification page 147 of 201, table 8-15. And HDMI 1.4a Complaince test specification page 190. +static const HDMI3DDETAILS HDMI_MANDATORY_3D_FORMATS[] = +{ + {32, NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK, 0}, // 1920 x 1080p @ 24 Hz + { 4, NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK, 0}, // 1280 x 720p @ 60 Hz + {19, NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK, 0}, // 1280 x 720p @ 50 Hz + { 5, NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH}, // 1920 x 1080i @ 60 Hz + {20, NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH} // 1920 x 1080i @ 50 Hz +}; +static NvU32 MAX_HDMI_MANDATORY_3D_FORMAT = sizeof(HDMI_MANDATORY_3D_FORMATS) / sizeof(HDMI_MANDATORY_3D_FORMATS[0]); + +static const NVT_VIDEO_INFOFRAME DEFAULT_VIDEO_INFOFRAME = {/*header*/2,2,13, /*byte1*/0, /*byte2*/0x8, /*byte3*/0, /*byte4*/0, /*byte5*/0, /*byte6~13*/0,0,0,0,0,0,0,0}; +static const NVT_AUDIO_INFOFRAME DEFAULT_AUDIO_INFOFRAME = {/*header*/4,1,10, /*byte1*/0, /*byte2*/0, /*byte3*/0, /*byte*/0, /*byte5*/0, /*byte6~10*/0,0,0,0,0}; +static const NVT_VENDOR_SPECIFIC_INFOFRAME DEFAULT_VENDOR_SPECIFIC_INFOFRAME = {/*header*/{0x01,1,6}, {/*byte1*/3, /*byte2*/0x0c, /*byte3*/0, /*byte4*/0, /*byte5*/0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}}; + +// parse the 861 detailed timing info +CODE_SEGMENT(PAGE_DD_CODE) +void parse861ExtDetailedTiming(NvU8 *pEdidExt, + NvU8 basicCaps, + NVT_EDID_INFO *pInfo) +{ + NvU32 count = 0; + EIA861EXTENSION *pEIA861 = (EIA861EXTENSION *) pEdidExt; + DETAILEDTIMINGDESCRIPTOR *pDTD; + NVT_TIMING newTiming; + + // sanity check for CEA ext block + if ((pEIA861->tag != 0x2) || (0 == pEIA861->offset) || (NVT_CEA861_REV_NONE == pEIA861->revision)) + { + // no CEA ext block, return + return; + } + + // Get all detailed timings in CEA ext block + pDTD = (DETAILEDTIMINGDESCRIPTOR *)&pEdidExt[pEIA861->offset]; + + while((NvU8 *)pDTD < (pEdidExt + sizeof(EDIDV1STRUC)) && // Check that we're not going beyond this extension block. + pDTD->wDTPixelClock != 0) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseEdidDetailedTimingDescriptor((NvU8 *)pDTD, + &newTiming) == NVT_STATUS_SUCCESS) + { + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), + "CTA-861Long:%5dx%4dx%3d.%03dHz/%s", + (int)newTiming.HVisible, + (int)((newTiming.interlaced ? 2 : 1) * newTiming.VVisible), + (int)newTiming.etc.rrx1k/1000, + (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name)-1] = '\0'; + newTiming.etc.status = NVT_STATUS_EDID_EXT_DTDn(++count); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + pDTD ++; + } +} + +// parse the 861B short timing descriptor +CODE_SEGMENT(PAGE_DD_CODE) +void parse861bShortTiming(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NvU32 i; + NvU32 vic, bytePos, bitPos; + NVT_TIMING newTiming; + NVT_HDMI_FORUM_INFO *pHfvs = NULL; + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + + NvU8 *pVic = pExt861->video; + NvU32 total_svd = pExt861->total_svd; + NvU8 *pYuv420Map = pExt861->valid.y420cmdb ? pExt861->map_y420cmdb : NULL; + NvU8 yuv420MapCount = pExt861->total_y420cmdb; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHfvs = &pInfo->hdmiForumInfo; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHfvs = &pDisplayID20->vendor_specific.hfvs; + } + else + { + return; + } + + for (i = 0; i < total_svd; i++) + { + vic = NVT_GET_CTA_8BIT_VIC(pVic[i]); + + if (vic == 0 || vic > MAX_CEA861B_FORMAT) + continue; + + // assign corresponding CEA format's timing from pre-defined CE timing table, EIA861B + newTiming = EIA861B[vic-1]; + newTiming.etc.status = NVT_STATUS_EDID_861STn(vic); + + // set CEA format to location of _CEA_FORMAT. _CEA_FORMAT isn't set in pre-defined CE timing from + // EIA861B table + if (NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status) != + NVT_CEA861_640X480P_59940HZ_4X3) + { + // Although IT 640x480 video timing has a CE id, it is not a CE timing. See 3.1 + // "General Video Format Requirements" section in CEA-861-E spec + NVT_SET_CEA_FORMAT(newTiming.etc.status, + NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status)); + } + + // calculate the pixel clock + newTiming.pclk = RRx1kToPclk(&newTiming); + + if ((vic <= 64) && (pVic[i] & NVT_CTA861_VIDEO_NATIVE_MASK)) + { + NVT_SET_NATIVE_TIMING_FLAG(newTiming.etc.status); + } + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), + "CTA-861G:#%3d:%5dx%4dx%3d.%03dHz/%s", (int)vic, + (int)newTiming.HVisible, + (int)((newTiming.interlaced ? 2 : 1)*newTiming.VVisible), + (int)newTiming.etc.rrx1k/1000, (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name)-1] = '\0'; + + // if yuv420 is supported in the video SVDs, it is indicated by CMDB bitmap + bytePos = i / (8 * sizeof(NvU8)); + if (bytePos < yuv420MapCount) + { + bitPos = 1 << (i % (8 * sizeof(NvU8))); + if (pYuv420Map[bytePos] & bitPos) + { + // pHfvs->dcXXX are only for YCbCr420; when bitPos is set, 8bpc yuv420 always supported + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + } + } + + // Y420CMDB with L == 1, implies yuv420MapCount == 0 but all SVDs support 420 + if (pYuv420Map && yuv420MapCount == 0) + { + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + } + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + newTiming.etc.flag |= NVT_FLAG_DISPLAYID_2_0_TIMING; + + if (!assignNextAvailableDisplayId20Timing(pDisplayID20, &newTiming)) + { + break; + } + } + } +} + +// parse the 861B short Yuv420 timing descriptor +CODE_SEGMENT(PAGE_DD_CODE) +void parse861bShortYuv420Timing(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NvU32 i; + NvU8 vic; + NVT_TIMING newTiming; + NVT_HDMI_FORUM_INFO *pHfvs = NULL; + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NvU8 *pYuv420Vic = pExt861->svd_y420vdb; + NvU32 total_y420vdb = pExt861->total_y420vdb; + NvU8 *pVdb = pExt861->video; + NvU32 total_svd = pExt861->total_svd; + NvU32 total_timings = 0; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHfvs = &pInfo->hdmiForumInfo; + total_timings = pInfo->total_timings; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHfvs = &pDisplayID20->vendor_specific.hfvs; + total_timings = pDisplayID20->total_timings; + } + else + { + return; + } + + if (total_timings == 0) + { + return; + } + + + for (i = 0; i < total_y420vdb; i++) + { + vic = NVT_GET_CTA_8BIT_VIC(pYuv420Vic[i]); + + if (vic == 0 || vic > MAX_CEA861B_FORMAT) + continue; + + // assign corresponding CEA format's timing from pre-defined CE timing table, EIA861B + newTiming = EIA861B[vic-1]; + + // if yuv420 is supported in the video SVDs, it is indicated by yuv420vdb + if(total_svd > 0) + { + NvU8 idx, j; + NvBool bFound = NV_FALSE; + for (idx=0; idx < total_svd; idx++) + { + if (pVdb[idx] == vic) + { + for (j=0; j < total_timings; j++) + { + NVT_TIMING *timing = NULL; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + timing = &pInfo->timing[j]; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + timing = &pDisplayID20->timing[j]; + } + + if (NvTiming_IsTimingExactEqual(timing, &newTiming)) + { + bFound = NV_TRUE; + // we found one in pExt861->video[]. pHfvs->dcXXX are only for YCbCr420, so we can support: + // 1. 8bpc yuv420 always supported. + // 2. only add yuv420 and its deep colour caps into Video Data Block + UPDATE_BPC_FOR_COLORFORMAT(timing->etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + break; + } + } + } + } + if (bFound) continue; + } + + newTiming.etc.status = NVT_STATUS_EDID_861STn(vic); + + // set CEA format to location of _CEA_FORMAT. _CEA_FORMAT isn't set in pre-defined CE timing from + // EIA861B table + if (NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status) != + NVT_CEA861_640X480P_59940HZ_4X3) + { + // Although IT 640x480 video timing has a CE id, it is not a CE timing. See 3.1 + // "General Video Format Requirements" section in CEA-861-E spec + NVT_SET_CEA_FORMAT(newTiming.etc.status, + NVT_GET_TIMING_STATUS_SEQ(newTiming.etc.status)); + } + + // calculate the pixel clock + newTiming.pclk = RRx1kToPclk(&newTiming); + + // From CTA-861-F: By default, Y420VDB SVDs, when present in the EDID, shall be less preferred than all regular Video Data Block SVDs. + // So it should use normal VIC code without native flag. + //if ((vic <= 64) && (pVic[i] & NVT_CTA861_VIDEO_NATIVE_MASK)) + //{ + // NVT_SET_NATIVE_TIMING_FLAG(newTiming.etc.status); + //} + NVT_SNPRINTF((char *)newTiming.etc.name, sizeof(newTiming.etc.name), + "CTA-861G:#%3d:%5dx%4dx%3d.%03dHz/%s", (int)vic, + (int)newTiming.HVisible, + (int)((newTiming.interlaced ? 2 : 1)*newTiming.VVisible), + (int)newTiming.etc.rrx1k/1000, (int)newTiming.etc.rrx1k%1000, + (newTiming.interlaced ? "I":"P")); + newTiming.etc.name[sizeof(newTiming.etc.name)-1] = '\0'; + + // update supported color space; any VICs enumerated in the Y420VDB are yuv420 only modes + // update 8bpc supported color space; other bpc updated once VSDB is parsed + + // pHfvs->dcXXX are only for YCbCr420; when Vic enumerated here, 8bpc yuv420 always supported + UPDATE_BPC_FOR_COLORFORMAT(newTiming.etc.yuv420, 0, 1, + pHfvs->dc_30bit_420, + pHfvs->dc_36bit_420, 0, + pHfvs->dc_48bit_420); + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + newTiming.etc.flag |= NVT_FLAG_DISPLAYID_2_0_TIMING; + + if (assignNextAvailableDisplayId20Timing(pDisplayID20, &newTiming)) + { + + break; + } + } + } +} + +// Currently, we only focus on the particular application in CEA861-F spec described +// "One particular application is a Sink that prefers a Video Format that is not listed as an SVD in a VDB +// but instead listed in a YCBCR 4:2:0 Video Data Block" +CODE_SEGMENT(PAGE_DD_CODE) +void parse861bShortPreferredTiming(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NvU32 isMatch,i,j = 0; + + NVT_TIMING preferTiming; + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NvU8 *pSvr = pExt861->svr_vfpdb; + NvU8 totalSvr = pExt861->total_vfpdb; + NvU8 kth = 0; + NvU8 extKth = 0; + NvU8 DTDCount = 0; + NvU8 extDTDCount = 0; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + } + else + { + return; + } + + // finding all the DTD + if (flag == FROM_CTA861_EXTENSION) + { + for (j = 0; j < pInfo->total_timings; j++) + { + if (NVT_IS_DTD(pInfo->timing[j].etc.status)) + { + DTDCount++; + } + else if (NVT_IS_EXT_DTD(pInfo->timing[j].etc.status)) + { + extDTDCount++; + } + } + } + + // TODO : this only handle single SVR right now + for (i = 0; i < totalSvr; i++) + { + NvU8 svr = pSvr[i]; + NvU8 vic = 0; + + if (svr == 0 || svr == 128 || (svr >= 161 && svr <= 192) || svr == 255) + continue; + + // Kth 18bytes DTD in the EDID + if (svr >= 129 && svr <= 144) + { + kth = svr - 128; + // only base EDID and CTA861 can support 18bytes + if (flag == FROM_CTA861_EXTENSION) + { + for (j = 0; j < pInfo->total_timings; j++) + { + if (kth <= DTDCount) + { + if (NVT_IS_DTDn(pInfo->timing[j].etc.status, kth)) + { + pInfo->timing[j].etc.flag |= NVT_FLAG_CEA_PREFERRED_TIMING; + break; + } + } + else + { + extKth = kth - DTDCount; + if (NVT_IS_EXT_DTDn(pInfo->timing[j].etc.status, extKth)) + { + pInfo->timing[j].etc.flag |= NVT_FLAG_CEA_PREFERRED_TIMING; + break; + } + } + } + } + } + else if (svr >= 145 && svr <= 160) + { + // TODO : Interpret as the Nth 20-byte DTD or 6- or 7-byte CVT-based descriptor, + // where N = SVR – 144 (for N = 1 to 16) + break; + } + else if (svr == 254) + { + // TODO : Interpret as the timing format indicated by the first code of the first T8VTDB + break; + } + else // assign corresponding CEA format's timing from pre-defined CE timing table, EIA861B + { + // ( SVR >= 1 and SVR <= 127) and (SVR >= 193 and SVR <= 253) + vic = NVT_GET_CTA_8BIT_VIC(svr); + preferTiming = EIA861B[vic-1]; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + for (j = 0; j < pInfo->total_timings; j++) + { + isMatch = NvTiming_IsTimingExactEqual(&pInfo->timing[j], &preferTiming); + if (isMatch && (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[j].etc.status) == NVT_TYPE_EDID_861ST)) + { + pInfo->timing[j].etc.flag |= NVT_FLAG_CEA_PREFERRED_TIMING; + break; + } + } + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + for (j = 0; j < pDisplayID20->total_timings; j++) + { + isMatch = NvTiming_IsTimingExactEqual(&pDisplayID20->timing[j], &preferTiming); + if (isMatch && (NVT_GET_TIMING_STATUS_TYPE(pDisplayID20->timing[j].etc.status) == NVT_TYPE_EDID_861ST)) + { + pDisplayID20->timing[j].etc.flag |= NVT_FLAG_CEA_PREFERRED_TIMING | NVT_FLAG_DISPLAYID_2_0_TIMING; + break; + } + } + } + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCea861HdrStaticMetadataDataBlock(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NVT_HDR_STATIC_METADATA *pHdrInfo = NULL; + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHdrInfo = &pInfo->hdr_static_metadata_info; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHdrInfo = &pDisplayID20->cta.hdrInfo; + } + else + { + return; + } + + if (pExt861 == NULL || pHdrInfo == NULL) + { + return; + } + + // Parse the EOTF capability information. It's possible to have multiple EOTF + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_GAMMA_SDR) + { + pHdrInfo->supported_eotf.trad_gamma_sdr_eotf = 1; + } + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_GAMMA_HDR) + { + pHdrInfo->supported_eotf.trad_gamma_hdr_eotf = 1; + } + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_SMPTE_ST2084) + { + pHdrInfo->supported_eotf.smpte_st_2084_eotf = 1; + } + if (pExt861->hdr_static_metadata.byte1 & NVT_CEA861_EOTF_FUTURE) + { + pHdrInfo->supported_eotf.future_eotf = 1; + } + + // Parse the static metadata descriptor + if (pExt861->hdr_static_metadata.byte2) + { + pHdrInfo->static_metadata_type = 1; + } + else + { + pHdrInfo->static_metadata_type = 0; + } + + pHdrInfo->max_cll = pExt861->hdr_static_metadata.byte3 & NVT_CEA861_MAX_CLL_MASK; + pHdrInfo->max_fall = pExt861->hdr_static_metadata.byte4 & NVT_CEA861_MAX_FALL_MASK; + pHdrInfo->min_cll = pExt861->hdr_static_metadata.byte5 & NVT_CEA861_MIN_CLL_MASK; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCea861DvStaticMetadataDataBlock(NVT_EDID_CEA861_INFO *pExt861, NVT_DV_STATIC_METADATA *pDvInfo) +{ + NvU32 vsvdbVersion = 0; + NVT_DV_STATIC_METADATA_TYPE0 *pDvType0 = NULL; + NVT_DV_STATIC_METADATA_TYPE1 *pDvType1 = NULL; + NVT_DV_STATIC_METADATA_TYPE1_1 *pvDvType1_1 = NULL; + NVT_DV_STATIC_METADATA_TYPE2 *pDvType2 = NULL; + + if (pExt861 == NULL || pDvInfo == NULL) + { + return; + } + + if(pExt861->vsvdb.ieee_id != NVT_CEA861_DV_IEEE_ID) + { + return; + } + + //init + NVMISC_MEMSET(pDvInfo, 0, sizeof(NVT_DV_STATIC_METADATA)); + + // copy ieee id + pDvInfo->ieee_id = pExt861->vsvdb.ieee_id; + + vsvdbVersion = (pExt861->vsvdb.vendor_data[0] & NVT_CEA861_VSVDB_VERSION_MASK) >> NVT_CEA861_VSVDB_VERSION_MASK_SHIFT; + + switch (vsvdbVersion) + { + case 0: + if (pExt861->vsvdb.vendor_data_size < sizeof(NVT_DV_STATIC_METADATA_TYPE0)) + { + return; + } + pDvType0 = (NVT_DV_STATIC_METADATA_TYPE0 *)(&pExt861->vsvdb.vendor_data); + // copy the data + pDvInfo->VSVDB_version = pDvType0->VSVDB_version; + pDvInfo->supports_2160p60hz = pDvType0->supports_2160p60hz; + pDvInfo->supports_YUV422_12bit = pDvType0->supports_YUV422_12bit; + pDvInfo->supports_global_dimming = pDvType0->supports_global_dimming; + pDvInfo->colorimetry = 0; // this field does not exist in type0 + pDvInfo->dm_version = (pDvType0->dm_version_major << 4) | pDvType0->dm_version_minor; + pDvInfo->target_min_luminance = (pDvType0->target_min_pq_11_4 << 4) | pDvType0->target_min_pq_3_0; + pDvInfo->target_max_luminance = (pDvType0->target_max_pq_11_4 << 4) | pDvType0->target_max_pq_3_0; + pDvInfo->cc_red_x = (pDvType0->cc_red_x_11_4 << 4) | pDvType0->cc_red_x_3_0; + pDvInfo->cc_red_y = (pDvType0->cc_red_y_11_4 << 4) | pDvType0->cc_red_y_3_0; + pDvInfo->cc_green_x = (pDvType0->cc_green_x_11_4 << 4) | pDvType0->cc_green_x_3_0; + pDvInfo->cc_green_y = (pDvType0->cc_green_y_11_4 << 4) | pDvType0->cc_green_y_3_0; + pDvInfo->cc_blue_x = (pDvType0->cc_blue_x_11_4 << 4) | pDvType0->cc_blue_x_3_0; + pDvInfo->cc_blue_y = (pDvType0->cc_blue_y_11_4 << 4) | pDvType0->cc_blue_y_3_0; + pDvInfo->cc_white_x = (pDvType0->cc_white_x_11_4 << 4) | pDvType0->cc_white_x_3_0; + pDvInfo->cc_white_y = (pDvType0->cc_white_y_11_4 << 4) | pDvType0->cc_white_y_3_0; + pDvInfo->supports_backlight_control = 0; + pDvInfo->backlt_min_luma = 0; + pDvInfo->interface_supported_by_sink = 0; + pDvInfo->supports_10b_12b_444 = 0; + break; + case 1: + if (pExt861->vsvdb.vendor_data_size == sizeof(NVT_DV_STATIC_METADATA_TYPE1)) + { + pDvType1 = (NVT_DV_STATIC_METADATA_TYPE1 *)(&pExt861->vsvdb.vendor_data); + // copy the data + pDvInfo->VSVDB_version = pDvType1->VSVDB_version; + pDvInfo->supports_2160p60hz = pDvType1->supports_2160p60hz; + pDvInfo->supports_YUV422_12bit = pDvType1->supports_YUV422_12bit; + pDvInfo->dm_version = pDvType1->dm_version; + pDvInfo->supports_global_dimming = pDvType1->supports_global_dimming; + pDvInfo->colorimetry = pDvType1->colorimetry; + pDvInfo->target_min_luminance = pDvType1->target_min_luminance; + pDvInfo->target_max_luminance = pDvType1->target_max_luminance; + pDvInfo->cc_red_x = pDvType1->cc_red_x; + pDvInfo->cc_red_y = pDvType1->cc_red_y; + pDvInfo->cc_green_x = pDvType1->cc_green_x; + pDvInfo->cc_green_y = pDvType1->cc_green_y; + pDvInfo->cc_blue_x = pDvType1->cc_blue_x; + pDvInfo->cc_blue_y = pDvType1->cc_blue_y; + pDvInfo->supports_backlight_control = 0; + pDvInfo->backlt_min_luma = 0; + pDvInfo->interface_supported_by_sink = 0; + pDvInfo->supports_10b_12b_444 = 0; + pDvInfo->cc_white_x = 0; + pDvInfo->cc_white_y = 0; + } + else if (pExt861->vsvdb.vendor_data_size == sizeof(NVT_DV_STATIC_METADATA_TYPE1_1)) + { + pvDvType1_1 = (NVT_DV_STATIC_METADATA_TYPE1_1 *)(&pExt861->vsvdb.vendor_data); + // copy the data + pDvInfo->VSVDB_version = pvDvType1_1->VSVDB_version; + pDvInfo->supports_2160p60hz = pvDvType1_1->supports_2160p60hz; + pDvInfo->supports_YUV422_12bit = pvDvType1_1->supports_YUV422_12bit; + pDvInfo->dm_version = pvDvType1_1->dm_version; + pDvInfo->supports_global_dimming = pvDvType1_1->supports_global_dimming; + pDvInfo->colorimetry = pvDvType1_1->colorimetry; + pDvInfo->target_min_luminance = pvDvType1_1->target_min_luminance; + pDvInfo->target_max_luminance = pvDvType1_1->target_max_luminance; + pDvInfo->cc_green_x = NVT_DOLBY_CHROMATICITY_MSB_GX | pvDvType1_1->unique_Gx; + pDvInfo->cc_green_y = NVT_DOLBY_CHROMATICITY_MSB_GY | pvDvType1_1->unique_Gy; + pDvInfo->cc_blue_x = NVT_DOLBY_CHROMATICITY_MSB_BX | pvDvType1_1->unique_Bx; + pDvInfo->cc_blue_y = NVT_DOLBY_CHROMATICITY_MSB_BY | pvDvType1_1->unique_By; + pDvInfo->cc_red_x = NVT_DOLBY_CHROMATICITY_MSB_RX | pvDvType1_1->unique_Rx; + pDvInfo->cc_red_y = NVT_DOLBY_CHROMATICITY_MSB_RY | (pvDvType1_1->unique_Ry_bit_0 | (pvDvType1_1->unique_Ry_bit_1 <<1) | (pvDvType1_1->unique_Ry_bit_2_to_4 << 2)); + pDvInfo->supports_backlight_control = 0; + pDvInfo->backlt_min_luma = 0; + pDvInfo->interface_supported_by_sink = pvDvType1_1->interface_supported_by_sink; + pDvInfo->supports_10b_12b_444 = 0; + pDvInfo->cc_white_x = 0; + pDvInfo->cc_white_y = 0; + } + else + { + return; + } + + break; + case 2: + if (pExt861->vsvdb.vendor_data_size < sizeof(NVT_DV_STATIC_METADATA_TYPE2)) + { + return; + } + pDvType2 = (NVT_DV_STATIC_METADATA_TYPE2 *)(&pExt861->vsvdb.vendor_data); + // copy the data + pDvInfo->VSVDB_version = pDvType2->VSVDB_version; + pDvInfo->supports_backlight_control = pDvType2->supports_backlight_control; + pDvInfo->supports_YUV422_12bit = pDvType2->supports_YUV422_12bit; + pDvInfo->dm_version = pDvType2->dm_version; + pDvInfo->backlt_min_luma = pDvType2->backlt_min_luma; + pDvInfo->supports_global_dimming = pDvType2->supports_global_dimming; + pDvInfo->target_min_luminance = pDvType2->target_min_luminance; + pDvInfo->interface_supported_by_sink = pDvType2->interface_supported_by_sink; + pDvInfo->target_max_luminance = pDvType2->target_max_luminance; + pDvInfo->cc_green_x = NVT_DOLBY_CHROMATICITY_MSB_GX | pDvType2->unique_Gx; + pDvInfo->cc_green_y = NVT_DOLBY_CHROMATICITY_MSB_GY | pDvType2->unique_Gy; + pDvInfo->cc_blue_x = NVT_DOLBY_CHROMATICITY_MSB_BX | pDvType2->unique_Bx; + pDvInfo->cc_blue_y = NVT_DOLBY_CHROMATICITY_MSB_BY | pDvType2->unique_By; + pDvInfo->cc_red_x = NVT_DOLBY_CHROMATICITY_MSB_RX | pDvType2->unique_Rx; + pDvInfo->cc_red_y = NVT_DOLBY_CHROMATICITY_MSB_RY | pDvType2->unique_Ry; + pDvInfo->supports_10b_12b_444 = pDvType2->supports_10b_12b_444_bit0 | (pDvType2->supports_10b_12b_444_bit1 << 1); + pDvInfo->colorimetry = 0; + pDvInfo->supports_2160p60hz = 0; + pDvInfo->cc_white_x = 0; + pDvInfo->cc_white_y = 0; + break; + default: + break; + } +} + +// find both hdmi llc and hdmi forum vendor specific data block and return basic hdmi information +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861VsdbBlocks(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag + ) +{ + NvU32 i; + + NVT_EDID_INFO *pInfo = NULL; + NVT_DISPLAYID_2_0_INFO *pDisplayID20 = NULL; + NVT_HDMI_LLC_INFO *pHdmiLlc = NULL; + NVT_HDMI_FORUM_INFO *pHfvs = NULL; + NVDA_VSDB_PARSED_INFO *pNvVsdb = NULL; + MSFT_VSDB_PARSED_INFO *pMsftVsdb = NULL; + + if (pExt861 == NULL || pRawInfo == NULL) + { + return; + } + + if (flag == FROM_CTA861_EXTENSION || flag == FROM_DISPLAYID_13_DATA_BLOCK) + { + pInfo = (NVT_EDID_INFO *)pRawInfo; + pHdmiLlc = &pInfo->hdmiLlcInfo; + pHfvs = &pInfo->hdmiForumInfo; + pNvVsdb = &pInfo->nvdaVsdbInfo; + pMsftVsdb = &pInfo->msftVsdbInfo; + } + else if (flag == FROM_DISPLAYID_20_DATA_BLOCK) + { + pDisplayID20 = (NVT_DISPLAYID_2_0_INFO *)pRawInfo; + pHdmiLlc = &pDisplayID20->vendor_specific.hdmiLlc; + pHfvs = &pDisplayID20->vendor_specific.hfvs; + pNvVsdb = &pDisplayID20->vendor_specific.nvVsdb; + pMsftVsdb = &pDisplayID20->vendor_specific.msftVsdb; + } + else + { + return; + } + + if (pHdmiLlc == NULL || pHfvs == NULL || pNvVsdb == NULL || pMsftVsdb == NULL || (pExt861->total_vsdb == 0)) + { + return; + } + + for (i = 0; i < pExt861->total_vsdb; i++) + { + // Assumes each vsdb is unique for this CEA block, e.g., no two HDMI_IEEE_ID + switch (pExt861->vsdb[i].ieee_id) + { + case NVT_CEA861_HDMI_IEEE_ID: + // set any 3D timings and HDMI extended timing specified in the VSDB + parseEdidHdmiLlcBasicInfo((VSDB_DATA *)(&pExt861->vsdb[i]), pHdmiLlc); + pExt861->valid.H14B_VSDB = 1; + break; + + case NVT_CEA861_HDMI_FORUM_IEEE_ID: + parseEdidHdmiForumVSDB((VSDB_DATA *)(&pExt861->vsdb[i]), pHfvs); + pExt861->valid.H20_HF_VSDB = 1; + break; + + case NVT_CEA861_NVDA_IEEE_ID: + parseEdidNvidiaVSDBBlock((VSDB_DATA *)(&pExt861->vsdb[i]), pNvVsdb); + break; + + case NVT_CEA861_MSFT_IEEE_ID: + parseEdidMsftVsdbBlock((VSDB_DATA *)(&pExt861->vsdb[i]), pMsftVsdb); + break; + + } + } + + // H20_HF_VSDB shall be listed only if H14B_VSDB is also listed + // H20_HF_VSDB should not specify > 600MHz + nvt_assert(!pExt861->valid.H20_HF_VSDB || (pExt861->valid.H14B_VSDB && (pHfvs->max_TMDS_char_rate <= 0x78))); + + // Done with reading CEA VSDB blocks, sanitize them now + if (pExt861->valid.SCDB) + { + pHdmiLlc->effective_tmds_clock = pExt861->hfscdb[1]; + } + else if (pExt861->valid.H14B_VSDB) + { + // HDMI 2.0 Spec - section 10.3.2 + // The maximum Rate = Max_TMDS_Character_Rate * 5 MHz. + // If the Sink does not support TMDS Character Rates > 340 Mcsc, then the Sink shall set this field to 0. + // If the Sink supports TMDS Character Rates > 340 Mcsc, the Sink shall set Max_TMDS_Character_Rate appropriately and non - zero. + + // Pick updated TMDS clock rate + pHdmiLlc->effective_tmds_clock = (pExt861->valid.H20_HF_VSDB) ? + MAX(pHdmiLlc->max_tmds_clock, pHfvs->max_TMDS_char_rate) : + MIN(pHdmiLlc->max_tmds_clock, 0x44); + } + +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861HfEeodb(NVT_EDID_CEA861_INFO *pExt861, + NvU32 *pTotalEdidExtensions) +{ + // *pTotalEdidExtensions set by the edid extension flag should be >= 1 for HFEEODB to be valid. + if (pTotalEdidExtensions == NULL || pExt861 == NULL || !pExt861->valid.HF_EEODB || *pTotalEdidExtensions == 0) + { + return; + } + + // HDMI 2.1 AmendmentA1 specifies that if EEODB is present sources shall ignore the Extension flag. + // This effectively overrides the extension count from extension flag. + *pTotalEdidExtensions = pExt861->hfeeodb; +} + + +CODE_SEGMENT(PAGE_DD_CODE) +void parseCta861HfScdb(NVT_EDID_CEA861_INFO *pExt861, + void *pRawInfo, + NVT_CTA861_ORIGIN flag) +{ + NVT_EDID_INFO *pInfo = (NVT_EDID_INFO *)pRawInfo; + VSDB_DATA vsdbData; + + if (pExt861 == NULL || pRawInfo == NULL) + { + return; + } + + if (!pExt861->valid.SCDB || pExt861->valid.H20_HF_VSDB) + { + return; + } + NVMISC_MEMSET(&vsdbData, 0, sizeof(vsdbData)); + NVMISC_MEMCPY(&vsdbData.vendor_data, pExt861->hfscdb, sizeof(vsdbData.vendor_data)); + + vsdbData.vendor_data_size = pExt861->hfscdbSize; + + parseEdidHdmiForumVSDB(&vsdbData, &pInfo->hdmiForumInfo); +} + + +CODE_SEGMENT(PAGE_DD_CODE) +void getEdidHDM1_4bVsdbTiming(NVT_EDID_INFO *pInfo) +{ + NvU32 i = 0, j = 0; + + for (i = 0; i < 2; ++i) + { + NVT_EDID_CEA861_INFO *pExt861 = (0 == i) ? &pInfo->ext861 : &pInfo->ext861_2; + + for (j = 0; j < pExt861->total_vsdb; ++j) + { + switch (pExt861->vsdb[j].ieee_id) + { + case NVT_CEA861_HDMI_IEEE_ID: + { + NvU32 count = 0; + // set any 3D timings and HDMI extended timing specified in the VSDB + parseEdidHDMILLCTiming(pInfo, (VSDB_DATA *)(&pExt861->vsdb[j]), &count, &(pInfo->Hdmi3Dsupport)); + pInfo->HDMI3DSupported = 0 < count; + break; + } + + default: + break; + } + } + } +} + +// get the full EDID 861 extension info +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS get861ExtInfo(NvU8 *p, NvU32 size, NVT_EDID_CEA861_INFO *p861info) +{ + + NvU32 dtd_offset; + // sanity check + if (p == NULL || size < sizeof(EDIDV1STRUC)) + { + return NVT_STATUS_ERR; + } + + // make sure we have 861 extension + if (p[0] != 0x2 || p[1] < NVT_CEA861_REV_ORIGINAL) + { + return NVT_STATUS_ERR; + } + + + // don't do anything further if p is NULL + if (p861info == NULL) + { + return NVT_STATUS_SUCCESS; + } + + // init + NVMISC_MEMSET(p861info, 0, sizeof(NVT_EDID_CEA861_INFO)); + + // get the revision number + p861info->revision = p[1]; + + // no extra info for 861-original, returning from here + if (p861info->revision == NVT_CEA861_REV_ORIGINAL) + { + return NVT_STATUS_SUCCESS; + } + + p861info->basic_caps = p[3]; + + // no extra info for 861-A, returning from here + if (p861info->revision == NVT_CEA861_REV_A) + { + return NVT_STATUS_SUCCESS; + } + + dtd_offset = (NvU32)p[2]; + if (dtd_offset == 0 || dtd_offset == 4) + { + return NVT_STATUS_SUCCESS; + } + + // resolve all short descriptors in the reserved block + // reserved block starts from offset 04 to dtd_offset-1 + return parseCta861DataBlockInfo(&p[4], dtd_offset - 4, p861info); +} + +// get the 861 extension tags info +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS parseCta861DataBlockInfo(NvU8 *p, + NvU32 size, + NVT_EDID_CEA861_INFO *p861info) +{ + NvU32 i, j; + NvU32 video_index = 0; + NvU32 audio_index = 0; + NvU32 speaker_index = 0; + NvU32 vendor_index = 0; + NvU32 yuv420vdb_index = 0; + NvU32 yuv420cmdb_index = 0; + NvU8 svr_index = 0; + NvU32 tag, ext_tag, payload; + i= 0; + + while (i < size) + { + // get the descriptor's tag and payload size + tag = NVT_CEA861_GET_SHORT_DESCRIPTOR_TAG(p[i]); + payload = NVT_CEA861_GET_SHORT_DESCRIPTOR_SIZE(p[i]); + + // move the pointer to the payload section + i++; + + // loop through all descriptors + if (tag == NVT_CEA861_TAG_VIDEO) + { + // short video descriptor + for (j = 0; j < payload; j ++, i ++, video_index ++) + { + if (video_index < NVT_CEA861_VIDEO_MAX_DESCRIPTOR) + { + p861info->video[video_index] = p[i]; + } + else + { + break; + } + } + p861info->total_svd = (NvU8)video_index; + } + else if (tag == NVT_CEA861_TAG_AUDIO) + { + // short audio descriptor + for (j = 0; j < payload / 3; j ++, i += 3, audio_index ++) + { + if (audio_index < NVT_CEA861_AUDIO_MAX_DESCRIPTOR) + { + p861info->audio[audio_index].byte1 = p[i]; + p861info->audio[audio_index].byte2 = p[i+1]; + p861info->audio[audio_index].byte3 = p[i+2]; + } + else + { + break; + } + } + p861info->total_sad = (NvU8)audio_index; + } + else if (tag == NVT_CEA861_TAG_SPEAKER_ALLOC) + { + // speaker allocation descriptor + for (j = 0; j < payload / 3; j ++, i += 3, speaker_index ++) + { + if (speaker_index < NVT_CEA861_SPEAKER_MAX_DESCRIPTOR) + { + p861info->speaker[speaker_index].byte1 = p[i]; + p861info->speaker[speaker_index].byte2 = p[i+1]; + p861info->speaker[speaker_index].byte3 = p[i+2]; + } + else + { + break; + } + } + p861info->total_ssd = (NvU8)speaker_index; + } + else if (tag == NVT_CEA861_TAG_VENDOR) + { + if (vendor_index < NVT_CEA861_VSDB_MAX_BLOCKS) + { + if (payload < 3) + { + // This malformed payload will cause a hang below. + return NVT_STATUS_ERR; + } + + p861info->vsdb[vendor_index].ieee_id = p[i]; //IEEE ID low byte + p861info->vsdb[vendor_index].ieee_id |= (p[i+1]) << 8; //IEEE ID middle byte + p861info->vsdb[vendor_index].ieee_id |= (p[i+2]) << 16; //IEEE ID high byte + + p861info->vsdb[vendor_index].vendor_data_size = payload - 3; + + // move the pointer to the payload + i += 3; + + // get the other vendor specific data + for (j = 0; j < payload - 3; j ++, i ++) + { + if (j < NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH) + { + p861info->vsdb[vendor_index].vendor_data[j] = p[i]; + } + } + vendor_index++; + } + } + else if (tag == NVT_CEA861_TAG_EXTENDED_FLAG) + { + if (payload >= 1) + { + ext_tag = p[i]; + if (ext_tag == NVT_CEA861_EXT_TAG_VIDEO_CAP && payload >= 2) + { + p861info->video_capability = p[i + 1] & NVT_CEA861_VIDEO_CAPABILITY_MASK; + p861info->valid.VCDB = 1; + i += 2; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_COLORIMETRY && payload >= 3) + { + p861info->colorimetry.byte1 = p[i + 1] & NVT_CEA861_COLORIMETRY_MASK; + p861info->colorimetry.byte2 = p[i + 2] & NVT_CEA861_GAMUT_METADATA_MASK; + p861info->valid.colorimetry = 1; + i += 3; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_VIDEO_FORMAT_PREFERENCE && payload >= 2) + { + // when present, indicates the order of preference for selected Video Formats listed as DTDs and/or SVDs throughout Block 0 and the CTA Extensions of the + // order of SVD preferred modes shall take precedence over preferred modes defined elsewhere in the EDID/CEA861 blocks + + // exclude the extended tag + i++; payload--; + + for (j = 0; (j < payload) && (svr_index < NVT_CEA861_VFPDB_MAX_DESCRIPTOR); j++, i++, svr_index++) + { + p861info->svr_vfpdb[svr_index] = p[i]; + } + p861info->total_vfpdb = svr_index; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_YCBCR420_VIDEO && payload >= 2) + { + // when present, list SVDs that are only supported in YCbCr 4:2:0 + + // exclude the extended tag + i++; payload--; + + for (j = 0; (j < payload) && (yuv420vdb_index < NVT_CEA861_Y420VDB_MAX_DESCRIPTOR); j++, i++, yuv420vdb_index++) + { + p861info->svd_y420vdb[yuv420vdb_index] = p[i]; + } + p861info->total_y420vdb = (NvU8)yuv420vdb_index; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_YCBCR420_CAP && payload >= 1) + { + // when present, provides bitmap to video SVDs that also support YCbCr 4:2:0 in addition to RGB, YCbCr 4:4:4, and/or YCbCr 4: 2:0 + + // exclude the extended tag + i++; payload--; + + for (j = 0; (j < payload) && (yuv420cmdb_index < NVT_CEA861_Y420CMDB_MAX_DESCRIPTOR); j++, i++, yuv420cmdb_index++) + { + p861info->map_y420cmdb[yuv420cmdb_index] = p[i]; + } + p861info->total_y420cmdb = (NvU8)yuv420cmdb_index; + + p861info->valid.y420cmdb = 1; // total_y420cmdb is not enough as this could be 0. See CEA861-F 7.5.11 + } + else if(ext_tag == NVT_CEA861_EXT_TAG_HDR_STATIC_METADATA && payload >= 3) + { + p861info->hdr_static_metadata.byte1 = p[i + 1] & NVT_CEA861_EOTF_MASK; // This byte has bits which identify which EOTF supported by the sink. + p861info->hdr_static_metadata.byte2 = p[i + 2] & NVT_CEA861_STATIC_METADATA_DESCRIPTOR_MASK; // This byte has bits which identify which Static Metadata descriptors are supported by the sink. + + i += 3; + + if (payload > 3) + { + p861info->hdr_static_metadata.byte3 = p[i]; + i++; + } + + if (payload > 4) + { + p861info->hdr_static_metadata.byte4 = p[i]; + i++; + } + + if (payload > 5) + { + p861info->hdr_static_metadata.byte5 = p[i]; + i++; + } + + p861info->valid.hdr_static_metadata = 1; + } + else if(ext_tag == NVT_CEA861_EXT_TAG_VENDOR_SPECIFIC_VIDEO && ((payload >= 14) || (payload >= 11))) //version 2 of VSDB has 11 bytes of data and version 1 has 14 + { + + // exclude the extended tag + i++; payload--; + + p861info->vsvdb.ieee_id = p[i]; //IEEE ID low byte + p861info->vsvdb.ieee_id |= (p[i + 1]) << 8; //IEEE ID middle byte + p861info->vsvdb.ieee_id |= (p[i + 2]) << 16; //IEEE ID high byte + + p861info->vsvdb.vendor_data_size = payload - 3; + + // move the pointer to the payload + i += 3; + + // get the other vendor specific video data + for (j = 0; j < payload - 3; j++, i++) + { + if (j < NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH) + { + p861info->vsvdb.vendor_data[j] = p[i]; + } + } + p861info->valid.dv_static_metadata = 1; + } + else if(ext_tag == NVT_CTA861_EXT_TAG_SCDB && payload >= 7) // sizeof(HDMI Forum Sink Capability Data Block) ranges between 7 to 31 bytes + { + // As per HDMI2.1 A1 amendment Sink Capability Data Structure(SCDS) can alternatively be included in HDMI Forum Sink Capability Data Block(HF-SCDB), + // instead of HF-VSDB, to indicate HDMI2.1 capability. + // Sinks will expose HF-SCDB if they do not expose HF-VSDB. + + // move pointer to SCDS + i += 3; + + // Copy SCDS over to p861info->vsdb[vendor_index]. Parsing will later be handled in parseEdidHdmiForumVSDB(). + for (j = 0; (j < payload - 3) && (j < NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH); j ++, i ++) + { + p861info->hfscdb[j] = p[i]; + } + p861info->hfscdbSize = MIN(payload - 3, NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH); + p861info->valid.SCDB = 1; + } + else if (ext_tag == NVT_CEA861_EXT_TAG_HF_EEODB && payload == 2) + { + // Skip over extended tag + i++; payload--; + + p861info->hfeeodb = p[i]; + p861info->valid.HF_EEODB = 1; + i += payload; + } + else + { + // skip the unrecognized extended block + i += payload; + } + } + } + else + { + // reserved block, just skip here + i += payload; + } + } + + p861info->total_vsdb = (NvU8)vendor_index; + + return NVT_STATUS_SUCCESS; +} + +// enum the EIA/CEA 861B predefined timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumCEA861bTiming(NvU32 ceaFormat, NVT_TIMING *pT) +{ + if (pT == NULL || ceaFormat == 0 || ceaFormat > MAX_CEA861B_FORMAT) + { + return NVT_STATUS_ERR; + } + + ceaFormat = NVT_GET_CTA_8BIT_VIC(ceaFormat); + + *pT = EIA861B[ceaFormat - 1]; + + // calculate the pixel clock + pT->pclk = RRx1kToPclk (pT); + NVT_SET_CEA_FORMAT(pT->etc.status, ceaFormat); + + NVT_SNPRINTF((char *)pT->etc.name, sizeof(pT->etc.name), "CTA-861G:#%3d:%dx%dx%3d.%03dHz/%s", (int)ceaFormat, (int)pT->HVisible, (int)((pT->interlaced ? 2 : 1)*pT->VVisible), (int)pT->etc.rrx1k/1000, (int)pT->etc.rrx1k%1000, (pT->interlaced ? "I":"P")); + pT->etc.name[sizeof(pT->etc.name) - 1] = '\0'; + + return NVT_STATUS_SUCCESS; +} + + +// Check whether the given timing is a CEA 861 timing. +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_GetCEA861TimingIndex (NVT_TIMING *pT) +{ + NvU32 i = 0, j = 0; + NvU32 ceaIndex = 0; + NvU32 aspect_x; + NvU32 aspect_y; + + if (pT == NULL) + { + return ceaIndex; + } + + if (NVT_GET_CEA_FORMAT(pT->etc.status) != 0) + { + // CEA format has been set, done + return NVT_GET_CEA_FORMAT(pT->etc.status); + } + + aspect_x = nvt_aspect_x(pT->etc.aspect); + aspect_y = nvt_aspect_y(pT->etc.aspect); + + // loop through the pre-defined CEA 861 table + // Skip VIC1 - Although IT 640x480 video timing has a CE id, it is not a CE timing. See 3.1 + // "General Video Format Requirements" section in CEA-861-E spec + for (i = 1; i < MAX_CEA861B_FORMAT; i++) + { + if (NvTiming_IsTimingRelaxedEqual(pT, &EIA861B[i])) + { + // The timing matches with a CEA 861 timing. Set CEA format to NVT_TIMING.etc.status. + ceaIndex = NVT_GET_TIMING_STATUS_SEQ(EIA861B[i].etc.status); + + if (!aspect_x || !aspect_y) + { + return ceaIndex; + } + + // for the dual-aspect ratio timings we should further check the aspect ratio matching(16:9 or 4:3) based on the integer rounding error + for (j = 0; j < MAX_EIA861B_DUAL_ASPECT_VICS; j++) + { + if (ceaIndex == EIA861B_DUAL_ASPECT_VICS[j][0]) + { + NvU32 ceaIndex1 = EIA861B_DUAL_ASPECT_VICS[j][1]; + + NvU32 format1 = axb_div_c(aspect_x, nvt_aspect_y(EIA861B[ceaIndex - 1].etc.aspect), aspect_y); + NvU32 format2 = axb_div_c(aspect_x, nvt_aspect_y(EIA861B[ceaIndex1 - 1].etc.aspect), aspect_y); + + NvU32 format_1_diff = abs_delta(format1, nvt_aspect_x(EIA861B[ceaIndex - 1].etc.aspect)); + NvU32 format_2_diff = abs_delta(format2, nvt_aspect_x(EIA861B[ceaIndex1 - 1].etc.aspect)); + + if (format_2_diff < format_1_diff) + { + ceaIndex = ceaIndex1; + } + break; + } + else if (ceaIndex < EIA861B_DUAL_ASPECT_VICS[j][0]) // not a dual-dspect ratio timing + { + break; + } + } + break; + } + } + return ceaIndex; +} + +// calculate 861B based timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcCEA861bTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 pixelRepeatCount, NVT_TIMING *pT) + +{ + NvU32 i = 0; + NvU16 pixelRepeatMask = 1 << (pixelRepeatCount - 1); + + nvt_assert(pixelRepeatCount > 0 && pixelRepeatCount <= 10); + + if (pT == NULL) + { + return NVT_STATUS_ERR; + } + + // loop through the table + for (i = 0; i < MAX_CEA861B_FORMAT; i ++) + { + + if ((EIA861B[i].etc.rep & pixelRepeatMask) == 0) + { + continue; + } + + if (width == (NvU32)NvTiming_MaxFrameWidth(EIA861B[i].HVisible, pixelRepeatMask) && + height == frame_height(EIA861B[i])&& + rr == EIA861B[i].etc.rr && + (!!(flag & NVT_PVT_INTERLACED_MASK)) == (!!EIA861B[i].interlaced)) + { + *pT = EIA861B[i]; + + // calculate the pixel clock + pT->pclk = RRx1kToPclk (pT); + + NVT_SET_CEA_FORMAT(pT->etc.status, NVT_GET_TIMING_STATUS_SEQ(pT->etc.status)); + + NVT_SNPRINTF((char *)pT->etc.name, sizeof(pT->etc.name), "CTA-861G:#%3d:%dx%dx%3d.%03dHz/%s", (int)NVT_GET_TIMING_STATUS_SEQ(pT->etc.status), (int)pT->HVisible, (int)((pT->interlaced ? 2 : 1)*pT->VVisible), (int)pT->etc.rrx1k/1000, (int)pT->etc.rrx1k%1000, (pT->interlaced ? "I":"P")); + pT->etc.name[sizeof(pT->etc.name) - 1] = '\0'; + + return NVT_STATUS_SUCCESS; + } + } + + return NVT_STATUS_ERR; + +} + +// Assign fields in NVT_VIDEO_INFOFRAME_CTRL, using NVT_TIMING +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructVideoInfoframeCtrl(const NVT_TIMING *pTiming, NVT_VIDEO_INFOFRAME_CTRL *pCtrl) +{ + // setup VIC code it is not specified + if (pCtrl->video_format_id == NVT_INFOFRAME_CTRL_DONTCARE || + pCtrl->video_format_id == 0 || + pCtrl->video_format_id > NVT_CEA861_1920X1080P_29970HZ_16X9) + { + // setup video format ID + pCtrl->video_format_id = (NvU8)NVT_GET_CEA_FORMAT(pTiming->etc.status); + if (pCtrl->video_format_id < NVT_CEA861_640X480P_59940HZ_4X3 || + pCtrl->video_format_id > NVT_CTA861_4096x2160p_119880HZ_256X135) + { + // Prior RFE 543088 + if (pCtrl->video_format_id == 0 && + NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status) == NVT_TYPE_EDID_861ST) + { + pCtrl->video_format_id = (NvU8)NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status); + } + if (pCtrl->video_format_id == 0 && + pTiming->HVisible == 640 && + pTiming->VVisible == 480 && + pTiming->interlaced == 0 && + pTiming->etc.rr == 60) + { + pCtrl->video_format_id = NVT_CEA861_640X480P_59940HZ_4X3; + } + } + } + + // for HDMI_EXT timing, AVI VIC should be 0. + if (NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status) == NVT_TYPE_HDMI_EXT) + { + pCtrl->video_format_id = 0; + } + + // setup aspect ratio it is not specified + if (pCtrl->pic_aspect_ratio == NVT_INFOFRAME_CTRL_DONTCARE || + pCtrl->pic_aspect_ratio == NVT_VIDEO_INFOFRAME_BYTE2_M1M0_NO_DATA || + pCtrl->pic_aspect_ratio > NVT_VIDEO_INFOFRAME_BYTE2_M1M0_FUTURE) + { + // extract the screen measurements from the DTD aspect ratio. + // (we pack the height & width in a DWORD to form the aspect ratio) + + NvU32 x,y; + x = (pTiming->etc.aspect & 0x0fff); + y = ((pTiming->etc.aspect >> 16) & 0x0fff); + + if (axb_div_c(y,3,x) == 4) + { + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_4X3; + } + else if (axb_div_c(y,9,x) == 16) + { + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_16X9; + } + else if (pCtrl->video_format_id == NVT_CEA861_640X480P_59940HZ_4X3) + { + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_4X3; + } + else + { + // default to no data, to cover other non-cea modes + pCtrl->pic_aspect_ratio = NVT_VIDEO_INFOFRAME_BYTE2_M1M0_NO_DATA; + } + } + + if (pCtrl->it_content == NVT_INFOFRAME_CTRL_DONTCARE) + { + // Initialize ITC flag to NVT_VIDEO_INFOFRAME_BYTE3_ITC_IT_CONTENT + pCtrl->it_content = NVT_VIDEO_INFOFRAME_BYTE3_ITC_IT_CONTENT; + pCtrl->it_content_type = NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_GRAPHICS; + } + + if (pCtrl->pixel_repeat == NVT_INFOFRAME_CTRL_DONTCARE) + { + // Initialize pixel repetitions + NvU32 pixelRepeat = pTiming->etc.rep; + LOWESTBITIDX_32(pixelRepeat); + pCtrl->pixel_repeat = (NvU8)pixelRepeat; + } + + return NVT_STATUS_SUCCESS; +} + + +// construct AVI video infoframe based on the user control and the current context state +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructVideoInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VIDEO_INFOFRAME_CTRL *pCtrl, NVT_VIDEO_INFOFRAME *pContext, NVT_VIDEO_INFOFRAME *pInfoFrame) +{ + // parameter check + if (pEdidInfo == NULL || pInfoFrame == NULL) + { + return NVT_STATUS_ERR; + } + + // infoframe is only supported on 861A and later + if (pEdidInfo->ext861.revision < NVT_CEA861_REV_A) + { + return NVT_STATUS_ERR; + } + + // if context state is provided, use it to initialize the infoframe buffer + if (pContext != NULL) + { + *pInfoFrame = *pContext; + } + else + { + *pInfoFrame = DEFAULT_VIDEO_INFOFRAME; + } + + // init the header + pInfoFrame->type = NVT_INFOFRAME_TYPE_VIDEO; + + // TODO : This is just to check the version, we still need to change lots of structure + // "NVT_VIDEO_INFOFRAME" / "VIDEO_INFOFRAME" / "DEFAULT_VIDEO_INFOFRAME" / "NVM_DISP_STATE" etc.. + // to accept the new ACE0-3 bits supported in the future. Right now no any sink to support this. + // + // Based on the latest CTA-861-G-Errata.pdf file, we need to do following logic to get the correct CTA861 version + // When Y2 = 0, the following algorithm shall be used for AVI InfoFrame version selection: + // if (C=3 and EC=7) + // Sources shall use AVI InfoFrame Version 4. + // Else if (VIC>=128) + // Sources shall use AVI InfoFrame Version 3. + // Else + // Sources shall use AVI InfoFrame Version 2. + // End if + // + if (pCtrl) + { + if (nvt_get_bits(pInfoFrame->byte1, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_SHIFT) <= NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_FUTURE) // this shall be as 0 always. + { + if ((nvt_get_bits(pInfoFrame->byte2, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SHIFT) == NVT_VIDEO_INFOFRAME_BYTE2_C1C0_EXT_COLORIMETRY) && + //EC2-0 is based on the 7.5.5 at CTA861-G which DCI-P3 bit defined or notat byte4 + (nvt_get_bits(pInfoFrame->byte3, NVT_VIDEO_INFOFRAME_BYTE3_EC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_EC_SHIFT) == NVT_VIDEO_INFOFRAME_BYTE3_EC_AdditionalColorExt)) + { + pInfoFrame->version = NVT_VIDEO_INFOFRAME_VERSION_4; // just put the logic to get the correct version 4, but it shall not be used at currently stage. + } + else + { + pInfoFrame->version = (((pCtrl->video_format_id & NVT_VIDEO_INFOFRAME_BYTE4_VIC7) == NVT_VIDEO_INFOFRAME_BYTE4_VIC7) ? NVT_VIDEO_INFOFRAME_VERSION_3 : + ((pEdidInfo->ext861.revision >= NVT_CEA861_REV_B) ? NVT_VIDEO_INFOFRAME_VERSION_2 : NVT_VIDEO_INFOFRAME_VERSION_1)); + } + } + } + else + { + pInfoFrame->version = (pEdidInfo->ext861.revision >= NVT_CEA861_REV_B) ? NVT_VIDEO_INFOFRAME_VERSION_2 : NVT_VIDEO_INFOFRAME_VERSION_1; + } + pInfoFrame->length = sizeof(NVT_VIDEO_INFOFRAME) - sizeof(NVT_INFOFRAME_HEADER); + + if (pInfoFrame->version < NVT_VIDEO_INFOFRAME_VERSION_3) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, 0, NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_MASK, NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_SHIFT); + } + + if (pInfoFrame->version == NVT_VIDEO_INFOFRAME_VERSION_2) + { + nvt_nvu8_set_bits(pInfoFrame->byte4, 0, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_MASK, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_SHIFT); + } + else if (pInfoFrame->version == NVT_VIDEO_INFOFRAME_VERSION_1) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, 0, NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_MASK, NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte4, 0, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_MASK, NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte5, 0, NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_MASK, NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_SHIFT); + } + + // construct the desired infoframe contents based on the control + if (pCtrl) + { + // byte 1 + if (pCtrl->color_space != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->color_space, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_SHIFT); + } + + if (pCtrl->active_format_info_present != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->active_format_info_present, NVT_VIDEO_INFOFRAME_BYTE1_A0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_A0_SHIFT); + } + + if (pCtrl->bar_info != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->bar_info, NVT_VIDEO_INFOFRAME_BYTE1_B1B0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_B1B0_SHIFT); + } + + if (pCtrl->scan_info != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, pCtrl->scan_info, NVT_VIDEO_INFOFRAME_BYTE1_S1S0_MASK, NVT_VIDEO_INFOFRAME_BYTE1_S1S0_SHIFT); + } + + // byte 2 + if (pCtrl->colorimetry != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, pCtrl->colorimetry, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SHIFT); + } + + if (pCtrl->pic_aspect_ratio != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, pCtrl->pic_aspect_ratio, NVT_VIDEO_INFOFRAME_BYTE2_M1M0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_M1M0_SHIFT); + } + + if (pCtrl->active_format_aspect_ratio != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, pCtrl->active_format_aspect_ratio, NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_MASK, NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_SHIFT); + } + + // byte 3 + if (pCtrl->it_content != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->it_content, NVT_VIDEO_INFOFRAME_BYTE3_ITC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_ITC_SHIFT); + } + + if (pCtrl->extended_colorimetry != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->extended_colorimetry, NVT_VIDEO_INFOFRAME_BYTE3_EC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_EC_SHIFT); + } + + if (pCtrl->rgb_quantization_range != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->rgb_quantization_range, NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_MASK, NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_SHIFT); + } + + if (pCtrl->nonuniform_scaling != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte3, pCtrl->nonuniform_scaling, NVT_VIDEO_INFOFRAME_BYTE3_SC_MASK, NVT_VIDEO_INFOFRAME_BYTE3_SC_SHIFT); + } + + // byte 4 and byte 5 only supported on InfoFrame 2.0 + if (pInfoFrame->version >= NVT_VIDEO_INFOFRAME_VERSION_2) + { + // byte 4 + if (pCtrl->video_format_id != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte4, pCtrl->video_format_id, NVT_VIDEO_INFOFRAME_BYTE4_VIC_MASK, NVT_VIDEO_INFOFRAME_BYTE4_VIC_SHIFT); + } + + // byte 5 + if (pCtrl->pixel_repeat != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, pCtrl->pixel_repeat, NVT_VIDEO_INFOFRAME_BYTE5_PR_MASK, NVT_VIDEO_INFOFRAME_BYTE5_PR_SHIFT); + } + + // byte5 + if (pCtrl->it_content_type != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, pCtrl->it_content_type, NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_MASK, NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_SHIFT); + } + } + + // byte 6~13, bar info + if (pCtrl->top_bar != 0xFFFF) + { + pInfoFrame->top_bar_low = (NvU8)(pCtrl->top_bar % 0x100); + pInfoFrame->top_bar_high = (NvU8)(pCtrl->top_bar / 0x100); + } + if (pCtrl->bottom_bar != 0xFFFF) + { + pInfoFrame->bottom_bar_low = (NvU8)(pCtrl->bottom_bar % 0x100); + pInfoFrame->bottom_bar_high = (NvU8)(pCtrl->bottom_bar / 0x100); + } + if (pCtrl->left_bar != 0xFFFF) + { + pInfoFrame->left_bar_low = (NvU8)(pCtrl->left_bar % 0x100); + pInfoFrame->left_bar_high = (NvU8)(pCtrl->left_bar / 0x100); + } + if (pCtrl->right_bar != 0xFFFF) + { + pInfoFrame->right_bar_low = (NvU8)(pCtrl->right_bar % 0x100); + pInfoFrame->right_bar_high = (NvU8)(pCtrl->right_bar / 0x100); + } + } + + return NVT_STATUS_SUCCESS; +} + +// construct AVI audio infoframe based on the user control and the current context state +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructAudioInfoframe(NVT_AUDIO_INFOFRAME_CTRL *pUserCtrl, NVT_AUDIO_INFOFRAME *pContext, NVT_AUDIO_INFOFRAME *pInfoFrame) +{ + NVT_AUDIO_INFOFRAME_CTRL ctrl; + + // parameter check + if (pInfoFrame == NULL) + { + return NVT_STATUS_ERR; + } + + // use the user provided control if possible + if (pUserCtrl) + { + ctrl = *pUserCtrl; + } + else + { + // otherwise use the default control + NVMISC_MEMSET(&ctrl, NVT_INFOFRAME_CTRL_DONTCARE, sizeof(ctrl)); + } + + // if context state is provided, use it to initialize the infoframe buffer + if (pContext != NULL) + { + *pInfoFrame = *pContext; + } + else + { + *pInfoFrame = DEFAULT_AUDIO_INFOFRAME; + + // if the context state is not provide, we should user EDID info to build a default ctrl + //buildDefaultAudioInfoframeCtrl(pEdidInfo, &ctrl); + } + + // init the header + pInfoFrame->type = NVT_INFOFRAME_TYPE_AUDIO; + pInfoFrame->version = NVT_VIDEO_INFOFRAME_VERSION_1; + pInfoFrame->length = sizeof(NVT_AUDIO_INFOFRAME) - sizeof(NVT_INFOFRAME_HEADER); + + // init the reserved fields + nvt_nvu8_set_bits(pInfoFrame->byte1, 0, NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_MASK, NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte2, 0, NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_MASK, NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->byte5, 0, NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_MASK, NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_SHIFT); + pInfoFrame->rsvd_byte6 = 0; + pInfoFrame->rsvd_byte7 = 0; + pInfoFrame->rsvd_byte8 = 0; + pInfoFrame->rsvd_byte9 = 0; + pInfoFrame->rsvd_byte10 = 0; + + // byte 1 + if (ctrl.channel_count != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, ctrl.channel_count, NVT_AUDIO_INFOFRAME_BYTE1_CC_MASK, NVT_AUDIO_INFOFRAME_BYTE1_CC_SHIFT); + } + + if (ctrl.coding_type != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte1, ctrl.coding_type, NVT_AUDIO_INFOFRAME_BYTE1_CT_MASK, NVT_AUDIO_INFOFRAME_BYTE1_CT_SHIFT); + } + + // byte 2 + if (ctrl.sample_depth != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, ctrl.sample_depth, NVT_AUDIO_INFOFRAME_BYTE2_SS_MASK, NVT_AUDIO_INFOFRAME_BYTE2_SS_SHIFT); + } + + if (ctrl.sample_rate != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte2, ctrl.sample_rate, NVT_AUDIO_INFOFRAME_BYTE2_SF_MASK, NVT_AUDIO_INFOFRAME_BYTE2_SF_SHIFT); + } + + // byte 3 + pInfoFrame->byte3 = 0; + + // byte 4 + if (ctrl.speaker_placement != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte4, ctrl.speaker_placement, NVT_AUDIO_INFOFRAME_BYTE4_CA_MASK, NVT_AUDIO_INFOFRAME_BYTE4_CA_SHIFT); + } + + // byte 5 + if (ctrl.level_shift != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, ctrl.level_shift, NVT_AUDIO_INFOFRAME_BYTE5_LSV_MASK, NVT_AUDIO_INFOFRAME_BYTE5_LSV_SHIFT); + } + + if (ctrl.down_mix_inhibit != NVT_INFOFRAME_CTRL_DONTCARE) + { + nvt_nvu8_set_bits(pInfoFrame->byte5, ctrl.down_mix_inhibit, NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_MASK, NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_SHIFT); + } + + + return NVT_STATUS_SUCCESS; + +} + +// Construct Vendor Specific Infoframe +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructVendorSpecificInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL *pCtrl, NVT_VENDOR_SPECIFIC_INFOFRAME *pInfoFrame) +{ + NVT_STATUS RetCode = NVT_STATUS_SUCCESS; + NvU8 optIdx = 0; + NvU8 HDMIFormat; + + // parameter check + if (pEdidInfo == NULL || pInfoFrame == NULL) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // infoframe is only supported on 861A and later + if (pEdidInfo->ext861.revision < NVT_CEA861_REV_A) + { + return NVT_STATUS_ERR; + } + + + // initialize the infoframe buffer + *pInfoFrame = DEFAULT_VENDOR_SPECIFIC_INFOFRAME; + + // init the header (mostly done in default Infoframe) + pInfoFrame->Header.length = offsetof(NVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD, optionalBytes); + + // construct the desired infoframe contents based on the control + if (pCtrl) + { + // clear all static reserved fields + nvt_nvu8_set_bits(pInfoFrame->Data.byte4, 0, NVT_HDMI_VS_BYTE4_RSVD_MASK, NVT_HDMI_VS_BYTE4_RSVD_SHIFT); + + // setup the parameters + nvt_nvu8_set_bits(pInfoFrame->Data.byte4, pCtrl->HDMIFormat, NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_MASK, NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_SHIFT); + + // determine what the format is -- if disabled, force the format to NONE. + if (pCtrl->Enable) + { + HDMIFormat = pCtrl->HDMIFormat; + } + else + { + HDMIFormat = NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_NONE; + } + + switch(HDMIFormat) + { + case NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_NONE: + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, 0, NVT_HDMI_VS_BYTENv_RSVD_MASK, NVT_HDMI_VS_BYTENv_RSVD_SHIFT); + break; + } + case NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_EXT: + { + // Note: extended resolution frames are not yet fully supported + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, pCtrl->HDMI_VIC, NVT_HDMI_VS_BYTE5_HDMI_VIC_MASK, NVT_HDMI_VS_BYTE5_HDMI_VIC_SHIFT); + break; + } + case NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_3D: + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, 0, NVT_HDMI_VS_BYTE5_HDMI_RSVD_MASK, NVT_HDMI_VS_BYTE5_HDMI_RSVD_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, pCtrl->ThreeDStruc, NVT_HDMI_VS_BYTE5_HDMI_3DS_MASK, NVT_HDMI_VS_BYTE5_HDMI_3DS_SHIFT); + + // side by side half requires additional format data in the infoframe. + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF == pCtrl->ThreeDStruc) + { + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], pCtrl->ThreeDDetail, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_MASK, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SHIFT); + optIdx++; + } + if (pCtrl->MetadataPresent) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_PRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + + switch(pCtrl->MetadataType) + { + case NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_PARALLAX: + { + if (sizeof(pCtrl->Metadata) >= NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX && + sizeof(pInfoFrame->Data.optionalBytes) - (optIdx + 1) >= NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX) + { + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_MASK, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_SHIFT); + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_PARALLAX, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_MASK, NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_SHIFT); + ++optIdx; + + NVMISC_MEMCPY(pCtrl->Metadata, &pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX); + optIdx += NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX; + } + else + { + // not enough data in the control struct or not enough room in the infoframe -- BOTH compile time issues!! + // ignore metadata. + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + } + break; + } + default: + { + // unrecognised metadata, recover the best we can. + // note -- can not copy whatever is there because type implies length. + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + RetCode = NVT_STATUS_ERR; + } + } + + } + else + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK, NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT); + } + break; + } + } + // clear last byte of infoframe (reserved per spec). + pInfoFrame->Header.length += optIdx + 1; + for (; optIdx < sizeof(pInfoFrame->Data.optionalBytes); ++optIdx) + { + nvt_nvu8_set_bits(pInfoFrame->Data.optionalBytes[optIdx], NVT_HDMI_VS_BYTENv_RSVD, NVT_HDMI_VS_BYTENv_RSVD_MASK, NVT_HDMI_VS_BYTENv_RSVD_SHIFT); + } + } + return RetCode; +} + +// Construct Extended Metadata Packet Infoframe +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ConstructExtendedMetadataPacketInfoframe( + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL *pCtrl, + NVT_EXTENDED_METADATA_PACKET_INFOFRAME *pInfoFrame) +{ + NVT_STATUS RetCode = NVT_STATUS_SUCCESS; + if (!pCtrl || !pInfoFrame) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // Initialize the infoframe + NVMISC_MEMSET(pInfoFrame, 0, sizeof(*pInfoFrame)); + + // Construct an infoframe to enable or disable HDMI 2.1 VRR + pInfoFrame->Header.type = NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET; + pInfoFrame->Header.firstLast = NVT_EMP_HEADER_FIRST_LAST; + pInfoFrame->Header.sequenceIndex = 0x00; + + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_EMP_BYTE1_VFR_ENABLE, + NVT_HDMI_EMP_BYTE1_VFR_MASK, + NVT_HDMI_EMP_BYTE1_VFR_SHIFT); + + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_EMP_BYTE1_NEW_ENABLE, + NVT_HDMI_EMP_BYTE1_NEW_MASK, + NVT_HDMI_EMP_BYTE1_NEW_SHIFT); + + if (!pCtrl->EnableVRR) + { + nvt_nvu8_set_bits(pInfoFrame->Data.byte1, NVT_HDMI_EMP_BYTE1_END_ENABLE, + NVT_HDMI_EMP_BYTE1_END_MASK, + NVT_HDMI_EMP_BYTE1_END_SHIFT); + } + + nvt_nvu8_set_bits(pInfoFrame->Data.byte3, + NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SPEC_DEFINED, + NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_MASK, + NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SHIFT); + + nvt_nvu8_set_bits(pInfoFrame->Data.byte5, 1, + NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_MASK, + NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_SHIFT); + + nvt_nvu8_set_bits(pInfoFrame->Data.byte7, (pCtrl->EnableVRR ? 4 : 0), + NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_MASK, + NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_SHIFT); + + if (pCtrl->EnableVRR) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[0], + NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_ENABLE, + NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_MASK, + NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_SHIFT); + } + + if (pCtrl->ITTiming) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[1], + pCtrl->BaseVFP, + NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_MASK, + NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_SHIFT); + + // In HDMI2.1, MD2 bit 2 is set when RB timing is used. + // In HDMI2.1A, MD2 bit 2 is RSVD as 0 + if (pCtrl->version == NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER_HDMI21) + { + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[2], + pCtrl->ReducedBlanking, + NVT_HDMI_EMP_BYTE8_MD2_RB_MASK, + NVT_HDMI_EMP_BYTE8_MD2_RB_SHIFT); + } + + // MSB for Base Refresh Rate + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[2], + pCtrl->BaseRefreshRate >> 8, + NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_MASK, + NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_SHIFT); + + // LSB for Base Refresh Rate + nvt_nvu8_set_bits(pInfoFrame->Data.metadataBytes[3], + pCtrl->BaseRefreshRate, + NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_MASK, + NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_SHIFT); + } + + return RetCode; +} + +// Enumerate Psf Timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumNvPsfTiming(NvU32 nvPsfFormat, NVT_TIMING *pT) +{ + if (pT == NULL || nvPsfFormat == 0 || nvPsfFormat > MAX_PSF_FORMAT) + { + return NVT_STATUS_ERR; + } + + *pT = PSF_TIMING[nvPsfFormat - 1]; + + // calculate the pixel clock + pT->pclk = RRx1kToPclk (pT); + + return NVT_STATUS_SUCCESS; +} + +// Set ActiveSpace for HDMI 3D stereo timing +CODE_SEGMENT(PAGE_DD_CODE) +void SetActiveSpaceForHDMI3DStereo(const NVT_TIMING *pTiming, NVT_EXT_TIMING *pExtTiming) +{ + // Note -- this assumes that the Timng is the 2D instance. + NvU16 VBlank; + + // assume no active space to start. + pExtTiming->HDMI3D.VActiveSpace[0] = 0; + pExtTiming->HDMI3D.VActiveSpace[1] = 0; + + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK == pExtTiming->HDMI3D.StereoStructureType) + { + VBlank = pTiming->VTotal - pTiming->VVisible; + if (pTiming->interlaced) + { + //++++ This need to be revisited, not sure when active space 1 & 2 should be different. + // (fortunately, we are not supporting any interlaced packed frame modes yet). + pExtTiming->HDMI3D.VActiveSpace[0] = VBlank + 1; + pExtTiming->HDMI3D.VActiveSpace[1] = VBlank - 1; + } + else + { + pExtTiming->HDMI3D.VActiveSpace[0] = VBlank; + } + } + return; +} + +// Generate HDMI stereo timing from 2D timing +CODE_SEGMENT(PAGE_DD_CODE) +void NvTiming_GetHDMIStereoTimingFrom2DTiming(const NVT_TIMING *pTiming, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail, NVT_EXT_TIMING *pExtTiming) +{ + NvU16 VBlank; + NvU16 HBlank; + + if ((NULL == pTiming) || (NULL == pExtTiming) || (!isHdmi3DStereoType(StereoStructureType))) + { + return; + } + // init the extended timing + NVMISC_MEMSET(pExtTiming, 0, sizeof(NVT_EXT_TIMING)); + + // copy the 2D timing to the 3D timing. + pExtTiming->timing = *pTiming; + + // init the extension w/in the 3D timing + pExtTiming->HDMI3D.StereoStructureType = StereoStructureType; + pExtTiming->HDMI3D.SideBySideHalfDetail = SideBySideHalfDetail; + + + switch(StereoStructureType) + { + case NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK: + { + // calculate VBlank + VBlank = pTiming->VTotal - pTiming->VVisible; + + // Use the 2D timing to calculate the Active Space + SetActiveSpaceForHDMI3DStereo(pTiming, pExtTiming); + + // Calculate the 3D VVisible size based on the 2D VVisible and the active space. + if (pTiming->interlaced) + { + pExtTiming->timing.VVisible = ((pTiming->VVisible * 4) + (pExtTiming->HDMI3D.VActiveSpace[0]) * 2) + pExtTiming->HDMI3D.VActiveSpace[1]; + } + else + { + pExtTiming->timing.VVisible = (pTiming->VVisible * 2) + pExtTiming->HDMI3D.VActiveSpace[0]; + } + // Calculate the 3D VTotal from the 3D VVisible & the VBlank. + pExtTiming->timing.VTotal = pExtTiming->timing.VVisible + VBlank; + + pExtTiming->timing.etc.status = NVT_SET_TIMING_STATUS_TYPE(pExtTiming->timing.etc.status, NVT_TYPE_HDMI_STEREO); + + break; + } + case NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL: + { + // calculate HBlank before calculating new HVisible + HBlank = pTiming->HTotal - pTiming->HVisible; + + pExtTiming->timing.HVisible = pTiming->HVisible * 2; + + pExtTiming->timing.HTotal = pExtTiming->timing.HVisible + HBlank; + + pExtTiming->timing.etc.status = NVT_SET_TIMING_STATUS_TYPE(pExtTiming->timing.etc.status, NVT_TYPE_HDMI_STEREO); + + break; + } + case NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF: // valid formats with no timing changes. + case NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM: + { + break; + } + case NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT: // formats we are not supporting. + case NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT: + case NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH: + case NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX: + { + break; + } + } + // calculate the pixel clock + pExtTiming->timing.pclk = RRx1kToPclk (&(pExtTiming->timing)); + return; +} + +// Add mode to 3D stereo support map +CODE_SEGMENT(PAGE_DD_CODE) +void AddModeToSupportMap(HDMI3DSUPPORTMAP * pMap, NvU8 Vic, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail) +{ + NvU32 i; + + if (0 < Vic) + { + // first check if the vic is already listed. + for (i = 0; i < pMap->total; ++i) + { + if (pMap->map[i].Vic == Vic) + { + break; + } + } + if (i == pMap->total) + { + // vic is not in the map. + // add it. + // note that we can't add the VIC to one of the 1st 16 entries. + // 1st 16 entries in the map are reserved for the vics from the EDID. + // if we add this VIC to the 1st 16, & there are any optional modes listed, + // the optional mode(s) will be improperly applied to this VIC as well + i = MAX(MAX_EDID_ADDRESSABLE_3D_VICS, pMap->total); + if (i < MAX_3D_VICS_SUPPORTED) + { + pMap->map[i].Vic = Vic; + pMap->total = i + 1; + } + } + nvt_assert(pMap->total <= MAX_3D_VICS_SUPPORTED); + if (i < pMap->total) + { + pMap->map[i].StereoStructureMask = pMap->map[i].StereoStructureMask | NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(StereoStructureType); + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF == StereoStructureType) + { + pMap->map[i].SideBySideHalfDetail = SideBySideHalfDetail; + } + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidHdmiLlcBasicInfo(VSDB_DATA *pVsdb, NVT_HDMI_LLC_INFO *pHdmiLlc) +{ + NVT_HDMI_LLC_VSDB_PAYLOAD *p; + if (pVsdb == NULL || pHdmiLlc == NULL) + { + return; + } + + p = (NVT_HDMI_LLC_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + + // Minimum vendor_data_size is 2 + pHdmiLlc->addrA = p->A; + pHdmiLlc->addrB = p->B; + pHdmiLlc->addrC = p->C; + pHdmiLlc->addrD = p->D; + + // If more data is provided, we read it as well each field at a time up to video latency + if (pVsdb->vendor_data_size >= 3) + { + pHdmiLlc->supports_AI = p->Supports_AI; + pHdmiLlc->dc_48_bit = p->DC_48bit; + pHdmiLlc->dc_36_bit = p->DC_36bit; + pHdmiLlc->dc_30_bit = p->DC_30bit; + pHdmiLlc->dc_y444 = p->DC_Y444; + pHdmiLlc->dual_dvi = p->DVI_Dual; + + if (pVsdb->vendor_data_size >= 4) + { + pHdmiLlc->max_tmds_clock = p->Max_TMDS_Clock; + + if (pVsdb->vendor_data_size >= 5) + { + pHdmiLlc->latency_field_present = p->Latency_Fields_Present; + pHdmiLlc->i_latency_field_present = p->I_Latency_Fields_Present; + pHdmiLlc->hdmi_video_present = p->HDMI_Video_present; + pHdmiLlc->cnc3 = p->CNC3; + pHdmiLlc->cnc2 = p->CNC2; + pHdmiLlc->cnc1 = p->CNC1; + pHdmiLlc->cnc0 = p->CNC0; + } + } + } + +} + +// get HDMI 1.4 specific timing (3D stereo timings and extended mode timings) +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidHDMILLCTiming(NVT_EDID_INFO *pInfo, VSDB_DATA *pVsdb, NvU32 *pMapSz, HDMI3DSUPPORTMAP * pM) +{ + NVT_HDMI_LLC_VSDB_PAYLOAD *pHdmiLLC; + NVT_HDMI_VIDEO *pHDMIVideo; + NvU32 DataCnt = 0; + NvU32 DataSz; + NvU16 i, j, k; + NvU16 Supports50Hz; + NvU16 Supports60Hz; + NvU32 vendorDataSize; + + if ((NULL == pInfo) || (NULL == pVsdb) || (NULL == pM)) + { + return; + } + + // init the support map + NVMISC_MEMSET(pM, 0, sizeof(HDMI3DSUPPORTMAP)); + Supports50Hz = 0; + Supports60Hz = 0; + + nvt_assert(pInfo->total_timings <= COUNT(pInfo->timing)); + + for (i = 0; i < pInfo->total_timings; ++i) + { + if (NVT_GET_TIMING_STATUS_TYPE(pInfo->timing[i].etc.status) == NVT_TYPE_EDID_861ST) + { + if (MAX_EDID_ADDRESSABLE_3D_VICS > pM->total) + { + // fill in the VICs from the EDID (up to the 1st 16). These are used for applying any 3D optional modes listed in the LLC + // -- the optional modes are addressed based on their relative location within the EDID. + pM->map[pM->total].Vic = (NvU8) NVT_GET_TIMING_STATUS_SEQ(pInfo->timing[i].etc.status); + ++pM->total; + } + + // since we are spinning through the timing array anyway, + // check to see which refresh rates are supported. + if (50 == pInfo->timing[i].etc.rr) + { + Supports50Hz = 1; + } + else if (60 == pInfo->timing[i].etc.rr) + { + Supports60Hz = 1; + } + } + } + + if (0 == pM->total) + { + if (NULL != pMapSz) + { + *pMapSz = 0; + } + } + + vendorDataSize = pVsdb->vendor_data_size; + if ((NVT_CEA861_HDMI_IEEE_ID == pVsdb->ieee_id) && + (offsetof(NVT_HDMI_LLC_VSDB_PAYLOAD, Data) < vendorDataSize)) + { + pHdmiLLC = (NVT_HDMI_LLC_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + DataSz = (NvU32) MIN(vendorDataSize - offsetof(NVT_HDMI_LLC_VSDB_PAYLOAD, Data), sizeof(pHdmiLLC->Data)); + + if (5 <= vendorDataSize) + { + if (pHdmiLLC->Latency_Fields_Present) + { + DataCnt += (NvU32) sizeof(NVT_CEA861_LATENCY); + + if (pHdmiLLC->I_Latency_Fields_Present) + { + DataCnt += (NvU32) sizeof(NVT_CEA861_LATENCY); + } + } + + if ((pHdmiLLC->HDMI_Video_present) && + (DataSz > DataCnt) && + (DataSz - DataCnt >= sizeof(NVT_HDMI_VIDEO))) + { + pHDMIVideo = (NVT_HDMI_VIDEO *) &pHdmiLLC->Data[DataCnt]; + DataCnt += (NvU32) sizeof(NVT_HDMI_VIDEO); + + // If 3D is present, then add the basic 3D modes 1st. + if (pHDMIVideo->ThreeD_Present) + { + if ((0 != Supports50Hz) || (0 != Supports60Hz)) + { + // 50 and / or 60 Hz is supported, add 1920 x 1080 @ 24Hz 3D modes. + AddModeToSupportMap(pM, 32, NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, 0); // 1920 x 1080p @ 24 Hz + AddModeToSupportMap(pM, 32, NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM, 0); // 1920 x 1080p @ 24 Hz + + if (0 != Supports50Hz) + { + // add the mandatory modes for 50 Hz + AddModeToSupportMap(pM, 19, NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, 0); // 1280 x 720p @ 50 Hz + AddModeToSupportMap(pM, 19, NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM, 0); // 1280 x 720p @ 50 Hz + // 1920 x 1080i @ 50 Hz + AddModeToSupportMap(pM, 20, NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH); + } + + if (0 != Supports60Hz) + { + // add the mandatory modes for 60 Hz + AddModeToSupportMap(pM, 4, NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, 0); // 1280 x 720p @ 60 Hz + AddModeToSupportMap(pM, 4, NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM, 0); // 1280 x 720p @ 60 Hz + // 1920 x 1080i @ 60 Hz + AddModeToSupportMap(pM, 5, NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF, NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH); + } + } + } + + if ((DataSz > DataCnt) && + (DataSz - DataCnt >= pHDMIVideo->HDMI_VIC_Len)) + { + // handle HDMI VIC entries to add HDMI 1.4a 4kx2k extended modes + NVT_HDMI_VIC_LIST * pVicList = (NVT_HDMI_VIC_LIST *) &pHdmiLLC->Data[DataCnt]; + + for ( k = 0; k < pHDMIVideo->HDMI_VIC_Len; ++k) + { + NVT_TIMING newTiming; + + // extended mode VIC code from 1 - 4. + if ((0 < pVicList->HDMI_VIC[k]) && (pVicList->HDMI_VIC[k] <= MAX_HDMI_EXT_4Kx2K_FORMAT)) + { + NVMISC_MEMCPY(&newTiming, + &HDMI_EXT_4Kx2K_TIMING[pVicList->HDMI_VIC[k] - 1], + sizeof(newTiming)); + + // Fill in the pixel clock + newTiming.pclk = RRx1kToPclk(&newTiming); + + if (!assignNextAvailableTiming(pInfo, &newTiming)) + { + break; + } + } + } + + DataCnt += pHDMIVideo->HDMI_VIC_Len; + } + + // the following code implements parsing the HDMI 3D additional modes (all modes bitmap & additional vic modes) + // Kepler and above support 3D secondary modes + if ((pHDMIVideo->ThreeD_Present) && + ((1 == pHDMIVideo->ThreeD_Multi_Present) || (2 == pHDMIVideo->ThreeD_Multi_Present)) && + (0 < pHDMIVideo->HDMI_3D_Len) && + (DataSz > (DataCnt + 1)) && //make sure pHdmiLLC->Data[DataCnt + 1] is valid + (DataSz - DataCnt >= pHDMIVideo->HDMI_3D_Len)) + { + NvU16 AllVicStructMask; + NvU16 AllVicIdxMask; + NvU8 AllVicDetail; + + // determine which modes to apply to all VICs. + AllVicStructMask = (pHdmiLLC->Data[DataCnt] << 8) | pHdmiLLC->Data[DataCnt + 1]; + AllVicStructMask = AllVicStructMask & NVT_ALL_HDMI_3D_STRUCT_SUPPORTED_MASK; + DataCnt += 2; + + if ((2 == pHDMIVideo->ThreeD_Multi_Present) && (DataSz > (DataCnt+1))) //make sure pHdmiLLC->Data[DataCnt + 1] is valid + { + AllVicIdxMask = pHdmiLLC->Data[DataCnt] << 8 | pHdmiLLC->Data[DataCnt + 1]; + DataCnt += 2; + } + else + { + AllVicIdxMask = 0xffff; + } + + // determine what the detail should be. + AllVicDetail = 0 != (AllVicStructMask & NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK) ? NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH : 0; + + // add the modes to the Support map for all the listed VICs. + for (k = 0; k < MIN(MAX_EDID_ADDRESSABLE_3D_VICS, pM->total); ++k) + { + if ((0 != (AllVicIdxMask & (1 << k))) && (0 != pM->map[k].Vic)) + { + pM->map[k].StereoStructureMask = pM->map[k].StereoStructureMask | AllVicStructMask; + pM->map[k].SideBySideHalfDetail = AllVicDetail; + } + } + } + + // handle any additional per vic modes listed in the EDID + while (DataSz > DataCnt) + { + // get a pointer to the entry. + NVT_3D_MULTI_LIST * pMultiListEntry = (NVT_3D_MULTI_LIST *) &pHdmiLLC->Data[DataCnt]; + + // apply the specified structure to the Support Map + pM->map[pMultiListEntry->TwoD_VIC_order].StereoStructureMask = + pM->map[pMultiListEntry->TwoD_VIC_order].StereoStructureMask | NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(pMultiListEntry->ThreeD_Structure); + + // increment the Data count by 2 if this is side by side half, + // or 1 if it is any other structure. + if (NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF <= pMultiListEntry->ThreeD_Structure) + { + pM->map[pMultiListEntry->TwoD_VIC_order].SideBySideHalfDetail = pMultiListEntry->ThreeD_Detail; + DataCnt += 2; + } + else + { + pM->map[pMultiListEntry->TwoD_VIC_order].SideBySideHalfDetail = 0; + DataCnt += 1; + } + } + } + } + } + + + // compress out entries where there is no 3D support. + for (i = 0, j = 0; i < pM->total; ++i) + { + if (0 != pM->map[i].StereoStructureMask) + { + pM->map[j] = pM->map[i]; + ++j; + } + } + + pM->total = j; + + if (NULL != pMapSz) + { + *pMapSz = pM->total; + } +} + +// get HDMI 1.4 3D mandatory stereo format datail base on the input vic. +// If the vic is not in the mandatory format list, return error. +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetHDMIStereoMandatoryFormatDetail(const NvU8 vic, NvU16 *pStereoStructureMask, NvU8 *pSideBySideHalfDetail) +{ + NvU32 i; + + if ((vic < 1) || (vic > MAX_CEA861B_FORMAT)) + { + return NVT_STATUS_ERR; + } + + for (i = 0; i < MAX_HDMI_MANDATORY_3D_FORMAT; i++) + { + if (vic == HDMI_MANDATORY_3D_FORMATS[i].Vic) + { + if (pStereoStructureMask != NULL) + { + *pStereoStructureMask = HDMI_MANDATORY_3D_FORMATS[i].StereoStructureMask; + } + + if (pSideBySideHalfDetail != NULL) + { + *pSideBySideHalfDetail = HDMI_MANDATORY_3D_FORMATS[i].SideBySideHalfDetail; + } + + return NVT_STATUS_SUCCESS; + } + } + + return NVT_STATUS_ERR; +} +// return the aspect ratio of a given CEA/EIA 861 timing +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 getCEA861TimingAspectRatio(NvU32 vic) +{ + return (vic > 0 && vic < MAX_CEA861B_FORMAT + 1) ? EIA861B[vic-1].etc.aspect : 0; +} + +// expose the HDMI extended video timing defined by the HDMI LLC VSDB +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_EnumHdmiVsdbExtendedTiming(NvU32 hdmi_vic, NVT_TIMING *pT) +{ + if (hdmi_vic > MAX_HDMI_EXT_4Kx2K_FORMAT || hdmi_vic == 0 || pT == NULL) + { + return NVT_STATUS_ERR; + } + *pT = HDMI_EXT_4Kx2K_TIMING[hdmi_vic - 1]; + pT->pclk = RRx1kToPclk(pT); + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidNvidiaVSDBBlock(VSDB_DATA *pVsdb, NVDA_VSDB_PARSED_INFO *vsdbInfo) +{ + NVT_NVDA_VSDB_PAYLOAD *pNvda; + + if ((pVsdb == NULL) || (vsdbInfo == NULL)) + { + return; + } + + if ((NVT_CEA861_NVDA_IEEE_ID == pVsdb->ieee_id) && + (pVsdb->vendor_data_size >= sizeof(NVT_NVDA_VSDB_PAYLOAD))) + { + pNvda = (NVT_NVDA_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + + // only version 0x1 is supported + if (pNvda->opcode == 0x1) + { + vsdbInfo->vsdbVersion = pNvda->opcode; + } + + switch (vsdbInfo->vsdbVersion) + { + case 1: + vsdbInfo->valid = NV_TRUE; + vsdbInfo->vrrData.v1.supportsVrr = NV_TRUE; + vsdbInfo->vrrData.v1.minRefreshRate = pNvda->vrrMinRefreshRate; + break; + + default: + break; + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidMsftVsdbBlock(VSDB_DATA *pVsdb, MSFT_VSDB_PARSED_INFO *pVsdbInfo) +{ + if ((pVsdb == NULL) || (pVsdbInfo == NULL)) + { + return; + } + + NVMISC_MEMSET(pVsdbInfo, 0, sizeof(MSFT_VSDB_PARSED_INFO)); + + if ((NVT_CEA861_MSFT_IEEE_ID == pVsdb->ieee_id) && + (pVsdb->vendor_data_size >= sizeof(NVT_MSFT_VSDB_PAYLOAD))) + { + NvU32 i = 0; + NVT_MSFT_VSDB_PAYLOAD *pMsftVsdbPayload = (NVT_MSFT_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + + pVsdbInfo->version = pMsftVsdbPayload->version; + + if (pVsdbInfo->version >= 1) + { + for (i = 0; i < MSFT_VSDB_CONTAINER_ID_SIZE; i++) + { + pVsdbInfo->containerId[i] = pMsftVsdbPayload->containerId[i]; + } + + pVsdbInfo->desktopUsage = pMsftVsdbPayload->desktopUsage; + pVsdbInfo->thirdPartyUsage = pMsftVsdbPayload->thirdPartyUsage; + pVsdbInfo->valid = NV_TRUE; + } + // Version 3 is the latest version of MSFT VSDB at the time of writing this code + // Any update from newer version will be ignored and be parsed as Version 3, till + // we have an explicit handling for newer version here. + if (pVsdbInfo->version >= 3) + { + // Primary Use case is valid from Version 3 and is ignored on previous versions. + pVsdbInfo->primaryUseCase = pMsftVsdbPayload->primaryUseCase; + } + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +void parseEdidHdmiForumVSDB(VSDB_DATA *pVsdb, NVT_HDMI_FORUM_INFO *pHdmiInfo) +{ + NVT_HDMI_FORUM_VSDB_PAYLOAD *pHdmiForum; + NvU32 remainingSize; + + if ((pVsdb == NULL) || pHdmiInfo == NULL) + { + return; + } + + pHdmiForum = (NVT_HDMI_FORUM_VSDB_PAYLOAD *)(&pVsdb->vendor_data); + switch(pHdmiForum->Version) + { + case 1: + // From HDMI spec the payload data size is from 7 to 31 + // In parseCta861DataBlockInfo(), the payload size recorded in pHdmiForum is + // subtracted by 3. Thus the expected range here is 4 - 28. + // Assert if the the vendor_data_size < 4. + nvt_assert(pVsdb->vendor_data_size >= 4); + + remainingSize = pVsdb->vendor_data_size; + + // second byte + pHdmiInfo->max_TMDS_char_rate = pHdmiForum->Max_TMDS_Character_Rate; + + // third byte + pHdmiInfo->threeD_Osd_Disparity = pHdmiForum->ThreeD_Osd_Disparity; + pHdmiInfo->dual_view = pHdmiForum->Dual_View; + pHdmiInfo->independent_View = pHdmiForum->Independent_View; + pHdmiInfo->lte_340Mcsc_scramble = pHdmiForum->Lte_340mcsc_Scramble; + pHdmiInfo->ccbpci = pHdmiForum->CCBPCI; + pHdmiInfo->cable_status = pHdmiForum->CABLE_STATUS; + pHdmiInfo->rr_capable = pHdmiForum->RR_Capable; + pHdmiInfo->scdc_present = pHdmiForum->SCDC_Present; + + // fourth byte + pHdmiInfo->dc_30bit_420 = pHdmiForum->DC_30bit_420; + pHdmiInfo->dc_36bit_420 = pHdmiForum->DC_36bit_420; + pHdmiInfo->dc_48bit_420 = pHdmiForum->DC_48bit_420; + pHdmiInfo->uhd_vic = pHdmiForum->UHD_VIC; + pHdmiInfo->max_FRL_Rate = pHdmiForum->Max_FRL_Rate; + + remainingSize -= 4; + + // fifth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->fapa_start_location = pHdmiForum->FAPA_start_location; + pHdmiInfo->allm = pHdmiForum->ALLM; + pHdmiInfo->fva = pHdmiForum->FVA; + pHdmiInfo->cnmvrr = pHdmiForum->CNMVRR; + pHdmiInfo->cinemaVrr = pHdmiForum->CinemaVRR; + pHdmiInfo->m_delta = pHdmiForum->M_delta; + + // sixth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->vrr_min = pHdmiForum->VRR_min; + pHdmiInfo->vrr_max = ((NvU16)pHdmiForum->VRR_max_high) << 8; + + // seventh byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->vrr_max |= (pHdmiForum->VRR_max_low); + + // eighth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->dsc_10bpc = pHdmiForum->DSC_10bpc; + pHdmiInfo->dsc_12bpc = pHdmiForum->DSC_12bpc; + pHdmiInfo->dsc_16bpc = pHdmiForum->DSC_16bpc; + pHdmiInfo->dsc_All_bpp = pHdmiForum->DSC_All_bpp; + pHdmiInfo->dsc_Native_420 = pHdmiForum->DSC_Native_420; + pHdmiInfo->dsc_1p2 = pHdmiForum->DSC_1p2; + + // ninth byte + if (!remainingSize--) + { + break; + } + pHdmiInfo->dsc_MaxSlices = 0; + pHdmiInfo->dsc_MaxPclkPerSliceMHz = 0; + switch(pHdmiForum->DSC_MaxSlices) + { + case 7: pHdmiInfo->dsc_MaxSlices = 16; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 400; break; + case 6: pHdmiInfo->dsc_MaxSlices = 12; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 400; break; + case 5: pHdmiInfo->dsc_MaxSlices = 8; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 400; break; + case 4: pHdmiInfo->dsc_MaxSlices = 8; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + case 3: pHdmiInfo->dsc_MaxSlices = 4; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + case 2: pHdmiInfo->dsc_MaxSlices = 2; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + case 1: pHdmiInfo->dsc_MaxSlices = 1; pHdmiInfo->dsc_MaxPclkPerSliceMHz = 340; break; + default: break; + } + + pHdmiInfo->dsc_Max_FRL_Rate = pHdmiForum->DSC_Max_FRL_Rate; + + // tenth byte + if (!remainingSize--) + { + break; + } + + // Per spec, number of bytes has to be computed as 1024 x (1 + DSC_TotalChunkKBytes). + // For driver parser purposes, add 1 here so that the field means max num of KBytes in a link of chunks + pHdmiInfo->dsc_totalChunkKBytes = (pHdmiForum->DSC_totalChunkKBytes == 0) ? 0 : pHdmiForum->DSC_totalChunkKBytes + 1; + break; + + default: + break; + + } + +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_edidext_displayid.c b/src/common/modeset/timing/nvt_edidext_displayid.c new file mode 100644 index 000000000..e0b3a17a2 --- /dev/null +++ b/src/common/modeset/timing/nvt_edidext_displayid.c @@ -0,0 +1,1346 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edidext_displayid.c +// +// Purpose: the provide edid related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "displayid.h" +#include "edid.h" + +PUSH_SEGMENTS + +static NVT_STATUS parseDisplayIdSection(DISPLAYID_SECTION * section, + NvU32 max_length, + NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdBlock(NvU8 * block, + NvU8 max_length, + NvU8 * pLength, + NVT_EDID_INFO *pEdidInfo); + +// Specific blocks that can be parsed based on DisplayID +static NVT_STATUS parseDisplayIdProdIdentityBlock(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdParam(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdColorChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdTiming1(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming2(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming3(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming4(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTiming5(NvU8 * block, NVT_EDID_INFO *pEdidInfo, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdTimingVesa(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdTimingEIA(NvU8 * block, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayIdRangeLimits(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdSerialNumber(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdAsciiString(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdDeviceData(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdInterfacePower(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdTransferChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdDisplayInterface(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdStereo(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdTiledDisplay(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); +static NVT_STATUS parseDisplayIdCtaData(NvU8 * block, NVT_EDID_INFO *pInfo, NVT_DISPLAYID_INFO *pDisplayIdInfo); +static NVT_STATUS parseDisplayIdDisplayInterfaceFeatures(NvU8 * block, NVT_DISPLAYID_INFO *pInfo); + +static NVT_STATUS parseDisplayIdTiming1Descriptor(DISPLAYID_TIMING_1_DESCRIPTOR * desc, NVT_TIMING *pT); +static NVT_STATUS parseDisplayIdTiming2Descriptor(DISPLAYID_TIMING_2_DESCRIPTOR * desc, NVT_TIMING *pT); +static NVT_STATUS parseDisplayIdTiming3Descriptor(DISPLAYID_TIMING_3_DESCRIPTOR * desc, NVT_TIMING *pT); +static NVT_STATUS parseDisplayIdTiming5Descriptor(DISPLAYID_TIMING_5_DESCRIPTOR * desc, NVT_TIMING *pT); + +/** + * @brief Parses a displayID Extension block, with timings stored in pT and + * other info stored in pInfo + * @param p The EDID Extension Block (With a DisplayID in it) + * @param size Size of the displayID Extension Block + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS getDisplayIdEDIDExtInfo(NvU8 *p, NvU32 size, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_SECTION * section; + + if (p == NULL || size < sizeof(EDIDV1STRUC)) + return NVT_STATUS_ERR; + if (p[0] != NVT_EDID_EXTENSION_DISPLAYID) + return NVT_STATUS_ERR; + + section = (DISPLAYID_SECTION *)(p + 1); + pEdidInfo->ext_displayid.version = section->version; + if (section->product_type > NVT_DISPLAYID_PROD_MAX_NUMBER) + return NVT_STATUS_ERR; + + return parseDisplayIdSection(section, sizeof(EDIDV1STRUC) - 1, pEdidInfo); +} + +/** + * @brief updates the color format for each bpc for each timing + * @param pInfo EDID struct containing DisplayID information and + * the timings + * @param timingIdx Index of the first display ID timing in the + * pInfo->timing[] timing array. + */ +CODE_SEGMENT(PAGE_DD_CODE) +void updateColorFormatForDisplayIdExtnTimings(NVT_EDID_INFO *pInfo, + NvU32 timingIdx) +{ + // pDisplayIdInfo is the parsed display ID info + NVT_DISPLAYID_INFO *pDisplayIdInfo = &pInfo->ext_displayid; + NVT_TIMING *pT = &pInfo->timing[timingIdx]; + + nvt_assert((timingIdx) <= COUNT(pInfo->timing)); + + if ((pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED || + pInfo->ext861.valid.H14B_VSDB || pInfo->ext861.valid.H20_HF_VSDB) && pInfo->ext861.revision >= NVT_CEA861_REV_A) + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 0, + 1, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_16b); + } + else + { + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 0, + 1, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_16b); + } + } + else // DP + { + if (!pInfo->ext_displayid.supported_displayId2_0) + { + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface.rgb_depth.support_16b); + } + else + { + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.rgb_depth.support_16b); + } + } + + if (!pInfo->ext_displayid.supported_displayId2_0) + { + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, /* yuv444 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_8b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_10b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_12b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_14b, + pDisplayIdInfo->u4.display_interface.ycbcr444_depth.support_16b); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, /* yuv422 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_8b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_10b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_12b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_14b, + pDisplayIdInfo->u4.display_interface.ycbcr422_depth.support_16b); + } + else + { + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, /* yuv444 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_8b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.ycbcr444_depth.support_16b); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, /* yuv422 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_8b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.ycbcr422_depth.support_16b); + // yuv420 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv420, 0, /* yuv420 does not support 6bpc */ + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_8b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_10b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_12b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_14b, + pDisplayIdInfo->u4.display_interface_features.ycbcr420_depth.support_16b); + } +} + +/** + * @brief Parses a displayID Section + * @param section The DisplayID Section to parse + * @param max_length The indicated total length of the displayID as given (or + * sizeof(EDIDV1STRUCT) for an extension block) + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings + */ +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdSection(DISPLAYID_SECTION * section, + NvU32 max_length, + NVT_EDID_INFO *pEdidInfo) +{ + NvU8 block_location = 0; + NvU8 section_length; + NvU8 remaining_length; + + if (section == NULL || max_length <= NVT_DISPLAYID_SECTION_HEADER_LEN) + return NVT_STATUS_ERR; + if (section->section_bytes > max_length - NVT_DISPLAYID_SECTION_HEADER_LEN) + return NVT_STATUS_ERR; + + remaining_length = section->section_bytes; + + while (block_location < section->section_bytes) + { + DISPLAYID_DATA_BLOCK_HEADER * hdr = (DISPLAYID_DATA_BLOCK_HEADER *) (section->data + block_location); + NvU8 is_prod_id = remaining_length > 3 && block_location == 0 && hdr->type == 0 && hdr->data_bytes > 0; + NvU8 i; + + // Check the padding. + if (hdr->type == 0 && !is_prod_id) + { + for (i = 1 ; i < remaining_length; i++) + { + // All remaining bytes must all be 0. + if (section->data[block_location + i] != 0) + { + return NVT_STATUS_ERR; + } + } + + section_length = remaining_length; + } + else + { + if (parseDisplayIdBlock((NvU8 *)(section->data + block_location), + section->section_bytes - block_location, + §ion_length, + pEdidInfo) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + } + + block_location += section_length; + remaining_length -= section_length; + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdBlock(NvU8 * block, + NvU8 max_length, + NvU8 * pLength, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_DATA_BLOCK_HEADER * hdr = (DISPLAYID_DATA_BLOCK_HEADER *) block; + NVT_DISPLAYID_INFO *pInfo = &pEdidInfo->ext_displayid; + + if (block == NULL || max_length <= NVT_DISPLAYID_DATABLOCK_HEADER_LEN) + return NVT_STATUS_ERR; + if (hdr->data_bytes > max_length - NVT_DISPLAYID_DATABLOCK_HEADER_LEN) + return NVT_STATUS_ERR; + + *pLength = hdr->data_bytes + NVT_DISPLAYID_DATABLOCK_HEADER_LEN; + + switch (hdr->type) + { + case NVT_DISPLAYID_BLOCK_TYPE_PRODUCT_IDENTITY: + parseDisplayIdProdIdentityBlock(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_PARAM: + parseDisplayIdParam(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_COLOR_CHAR: + parseDisplayIdColorChar(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_1: + parseDisplayIdTiming1(block, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_2: + parseDisplayIdTiming2(block, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_3: + parseDisplayIdTiming3(block, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_4: + parseDisplayIdTiming4(block, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_5: + parseDisplayIdTiming5(block, pEdidInfo, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_VESA: + parseDisplayIdTimingVesa(block, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TIMING_CEA: + parseDisplayIdTimingEIA(block, pEdidInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_RANGE_LIMITS: + parseDisplayIdRangeLimits(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_SERIAL_NUMBER: + parseDisplayIdSerialNumber(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_ASCII_STRING: + parseDisplayIdAsciiString(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DEVICE_DATA: + parseDisplayIdDeviceData(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_INTERFACE_POWER: + parseDisplayIdInterfacePower(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TRANSFER_CHAR: + parseDisplayIdTransferChar(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE: + parseDisplayIdDisplayInterface(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_STEREO: + parseDisplayIdStereo(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_TILEDDISPLAY: + parseDisplayIdTiledDisplay(block, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_CTA_DATA: + parseDisplayIdCtaData(block, pEdidInfo, pInfo); + break; + case NVT_DISPLAYID_BLOCK_TYPE_DISPLAY_INTERFACE_FEATURES: + parseDisplayIdDisplayInterfaceFeatures(block, pInfo); + break; + default: + break; + } + return NVT_STATUS_SUCCESS; +} +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdColorChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NvU32 i, j; + NvU16 x_p, y_p; + DISPLAYID_COLOR_CHAR_BLOCK * blk = (DISPLAYID_COLOR_CHAR_BLOCK *)block; + + /** unused flag - uncomment if you wish to use it in the future + NvU8 isTemp = DRF_VAL(T_DISPLAYID, _COLOR, _TEMPORAL, blk->point_info); + */ + NvU8 wp_num = DRF_VAL(T_DISPLAYID, _COLOR, _WHITE_POINTS, blk->point_info); + NvU8 prim_num = DRF_VAL(T_DISPLAYID, _COLOR, _PRIMARIES, blk->point_info); + + if ((prim_num + wp_num) * sizeof(DISPLAYID_COLOR_POINT) + 1 != blk->header.data_bytes) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i < prim_num; i++) + { + x_p = (blk->points)[i].color_x_bits_low + + (DRF_VAL(T_DISPLAYID, _COLOR, _POINT_X, (blk->points)[i].color_bits_mid) << 8); + y_p = DRF_VAL(T_DISPLAYID, _COLOR, _POINT_Y, (blk->points)[i].color_bits_mid) + + ((blk->points)[i].color_y_bits_high << 4); + pInfo->primaries[i].x = x_p; + pInfo->primaries[i].y = y_p; + } + + for (j = 0; j < wp_num; j++) + { + x_p = (blk->points)[i].color_x_bits_low + + (DRF_VAL(T_DISPLAYID, _COLOR, _POINT_X, (blk->points)[i].color_bits_mid) << 8); + y_p = DRF_VAL(T_DISPLAYID, _COLOR, _POINT_Y, (blk->points)[i].color_bits_mid) + + ((blk->points)[i].color_y_bits_high << 4); + pInfo->white_points[pInfo->total_primaries + j].x = x_p; + pInfo->white_points[pInfo->total_primaries + j].y = y_p; + + i++; + } + pInfo->total_primaries = prim_num; + pInfo->total_white_points += wp_num; + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdProdIdentityBlock(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_PROD_IDENTIFICATION_BLOCK * blk = (DISPLAYID_PROD_IDENTIFICATION_BLOCK *)block; + if (blk->header.data_bytes - blk->productid_string_size != NVT_DISPLAYID_PRODUCT_IDENTITY_MIN_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + pInfo->vendor_id = (blk->vendor)[2] | ((blk->vendor)[1] << 8) | ((blk->vendor)[0] << 16); + pInfo->product_id = blk->product_code; + pInfo->serial_number = blk->serial_number; + pInfo->week = blk->model_tag; + pInfo->year = blk->model_year; + + if (blk->productid_string_size != 0) + NVMISC_STRNCPY((char *)pInfo->product_string, (const char *)blk->productid_string, blk->productid_string_size); + pInfo->product_string[blk->productid_string_size] = '\0'; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdParam(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_DISPLAY_PARAM_BLOCK * blk = (DISPLAYID_DISPLAY_PARAM_BLOCK *)block; + if (blk->header.data_bytes != NVT_DISPLAYID_DISPLAY_PARAM_BLOCK_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + pInfo->horiz_size = blk->horizontal_image_size; + pInfo->vert_size = blk->vertical_image_size; + pInfo->horiz_pixels = blk->horizontal_pixel_count; + pInfo->vert_pixels = blk->vertical_pixel_count; + + pInfo->support_audio = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _SUPPORT_AUDIO, blk->feature); + pInfo->separate_audio = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _SEPARATE_AUDIO, blk->feature); + pInfo->audio_override = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _AUDIO_INPUT_OVERRIDE, blk->feature); + pInfo->power_management = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _POWER_MANAGEMENT, blk->feature); + pInfo->fixed_timing = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _FIXED_TIMING, blk->feature); + pInfo->fixed_pixel_format = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _FIXED_PIXEL_FORMAT, blk->feature); + pInfo->deinterlace = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _DEINTERLACING, blk->feature); + + pInfo->gamma = (NvU16)(blk->transfer_char_gamma - 1) * 100; + pInfo->aspect_ratio = blk->aspect_ratio; + + pInfo->depth_overall = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _DEPTH_OVERALL, blk->color_bit_depth); + pInfo->depth_native = DRF_VAL(T_DISPLAYID, _DISPLAY_PARAM, _DEPTH_NATIVE, blk->color_bit_depth); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming1(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + NVT_TIMING newTiming; + DISPLAYID_TIMING_1_BLOCK * blk = (DISPLAYID_TIMING_1_BLOCK *)block; + if (blk->header.data_bytes % sizeof(DISPLAYID_TIMING_1_DESCRIPTOR) != 0) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i * sizeof(DISPLAYID_TIMING_1_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming1Descriptor(blk->descriptors + i, + &newTiming) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + } + return NVT_STATUS_SUCCESS; +} +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming1Descriptor(DISPLAYID_TIMING_1_DESCRIPTOR * type1, NVT_TIMING *pT) +{ + NvU32 totalPixels_in_2_fields; + if (type1 == NULL || pT == NULL) + return NVT_STATUS_ERR; + + // the pixel clock + pT->pclk = (NvU32)((type1->pixel_clock_high << 16) + (type1->pixel_clock_mid << 8) + type1->pixel_clock_low_minus_0_01MHz + 1); + + // the DisplayID spec does not support border + pT->HBorder = pT->VBorder = 0; + + // get horizontal timing parameters + pT->HVisible = (NvU16)((type1->horizontal.active_image_pixels_high << 8) + type1->horizontal.active_image_pixels_low_minus_1 + 1); + pT->HTotal = (NvU16)((type1->horizontal.blank_pixels_high << 8) + type1->horizontal.blank_pixels_low_minus_1 + 1) + pT->HVisible; + pT->HFrontPorch = (NvU16)((type1->horizontal.front_porch_high << 8) + type1->horizontal.front_porch_low_minus_1 + 1); + pT->HSyncWidth = (NvU16)((type1->horizontal.sync_width_high << 8) + type1->horizontal.sync_width_low_minus_1 + 1); + pT->HSyncPol = type1->horizontal.sync_polarity ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + + // get vertical timings + pT->VVisible = (NvU16)((type1->vertical.active_image_lines_high << 8) + type1->vertical.active_image_lines_low_minus_1 + 1); + pT->VTotal = (NvU16)((type1->vertical.blank_lines_high << 8) + type1->vertical.blank_lines_low_minus_1 + 1) + pT->VVisible; + pT->VFrontPorch = (NvU16)((type1->vertical.front_porch_lines_high << 8) + type1->vertical.front_porch_lines_low_minus_1 + 1); + pT->VSyncWidth = (NvU16)((type1->vertical.sync_width_lines_high << 8) + type1->vertical.sync_width_lines_low_minus_1 + 1); + pT->VSyncPol = type1->vertical.sync_polarity ? NVT_V_SYNC_POSITIVE : NVT_V_SYNC_NEGATIVE; + + // EDID used in DP1.4 Compliance test had incorrect HBlank listed, leading to wrong raster sizes being set by driver (bug 2714607) + // Filter incorrect timings here. HTotal must cover sufficient blanking time + if (pT->HTotal < (pT->HVisible + pT->HFrontPorch + pT->HSyncWidth)) + { + return NVT_STATUS_ERR; + } + + // the frame scanning type + pT->interlaced = type1->options.interface_frame_scanning_type; + + // the aspect ratio + switch (type1->options.aspect_ratio) + { + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_1_1: + pT->etc.aspect = (1 << 16) | 1; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_5_4: + pT->etc.aspect = (5 << 16) | 4; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_4_3: + pT->etc.aspect = (4 << 16) | 3; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_15_9: + pT->etc.aspect = (15 << 16) | 9; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_9: + pT->etc.aspect = (16 << 16) | 9; + break; + case NVT_DISPLAYID_TIMING_ASPECT_RATIO_16_10: + pT->etc.aspect = (16 << 16) | 10; + break; + default: + pT->etc.aspect = 0; + break; + } + + // the refresh rate + if (pT->interlaced) + { + // in interlaced mode, adjust for one extra line in every other frame. pT->VTotal is field based here + totalPixels_in_2_fields = (NvU32)pT->HTotal * ((NvU32)pT->VTotal * 2 + 1); + // calculate the field rate in interlaced mode + pT->etc.rr = (NvU16)axb_div_c(pT->pclk * 2, 10000, totalPixels_in_2_fields); + pT->etc.rrx1k = axb_div_c(pT->pclk * 2, 10000000, totalPixels_in_2_fields); + } + else + { + // calculate frame rate in progressive mode + // in progressive mode filed = frame + pT->etc.rr = (NvU16)axb_div_c(pT->pclk, 10000, (NvU32)pT->HTotal * (NvU32)pT->VTotal); + pT->etc.rrx1k = axb_div_c(pT->pclk, 10000000, (NvU32)pT->HTotal * (NvU32)pT->VTotal); + } + pT->etc.name[39] = '\0'; + pT->etc.rep = 0x1; // bit mask for no pixel repetition + + pT->etc.status = NVT_STATUS_DISPLAYID_1; + // Unlike the PTM in EDID base block, DisplayID type I/II preferred timing does not have dependency on sequence + // so we'll just update the preferred flag, not sequence them + //pT->etc.status = NVT_STATUS_DISPLAYID_1N(1); + pT->etc.flag |= type1->options.is_preferred_detailed_timing ? NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING : 0; + + /* Fields currently not used. Uncomment them for future use + type1->options.stereo_support; + */ + + // the DisplayID spec covers the timing parameter(Visible/FrontPorch/SyncWidth/Total) range from 1~65536 while our NVT_TIMING structure which is mostly based on NvU16 only covers 0~65535 + nvt_assert(pT->HVisible != 0); + nvt_assert(pT->HFrontPorch != 0); + nvt_assert(pT->HSyncWidth != 0); + nvt_assert(pT->VVisible != 0); + nvt_assert(pT->VFrontPorch != 0); + nvt_assert(pT->VSyncWidth != 0); + + // cover the possible overflow + nvt_assert(pT->HTotal > pT->HVisible); + nvt_assert(pT->VTotal > pT->VVisible); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming2(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + DISPLAYID_TIMING_2_BLOCK * blk = (DISPLAYID_TIMING_2_BLOCK *)block; + NVT_TIMING newTiming; + + if (blk->header.data_bytes % sizeof(DISPLAYID_TIMING_2_DESCRIPTOR) != 0) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i * sizeof(DISPLAYID_TIMING_2_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming2Descriptor(blk->descriptors + i, + &newTiming) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming2Descriptor(DISPLAYID_TIMING_2_DESCRIPTOR * type2, NVT_TIMING *pT) +{ + NvU32 totalPixels_in_2_fields; + if (type2 == NULL || pT == NULL) + return NVT_STATUS_ERR; + + // the pixel clock + pT->pclk = (NvU32)((type2->pixel_clock_high << 16) + (type2->pixel_clock_mid << 8) + type2->pixel_clock_low_minus_0_01MHz + 1); + + // the DisplayID spec does not support border + pT->HBorder = pT->VBorder = 0; + + // get horizontal timing parameters + pT->HVisible = (NvU16)((type2->horizontal.active_image_in_char_high << 8) + type2->horizontal.active_image_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS; + pT->HTotal = (NvU16)(type2->horizontal.blank_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS + pT->HVisible; + pT->HFrontPorch = (NvU16)(type2->horizontal.front_porch_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS; + pT->HSyncWidth = (NvU16)(type2->horizontal.sync_width_in_char_minus_1 + 1) * NVT_DISPLAYID_CHAR_WIDTH_IN_PIXELS; + pT->HSyncPol = type2->options.hsync_polarity ? NVT_H_SYNC_POSITIVE : NVT_H_SYNC_NEGATIVE; + + // get vertical timing parameters + pT->VVisible = (NvU16)((type2->vertical.active_image_lines_high << 8) + type2->vertical.active_image_lines_low_minus_1 + 1); + pT->VTotal = (NvU16)(type2->vertical.blank_lines_minus_1 + 1) + pT->VVisible; + pT->VFrontPorch = (NvU16)(type2->vertical.front_porch_lines_minus_1 + 1); + pT->VSyncWidth = (NvU16)(type2->vertical.sync_width_lines_minus_1 + 1); + pT->VSyncPol = type2->options.vsync_polarity ? NVT_V_SYNC_POSITIVE : NVT_V_SYNC_NEGATIVE; + + // the frame scanning type + pT->interlaced = type2->options.interface_frame_scanning_type; + + // the refresh rate + if (pT->interlaced) + { + // in interlaced mode, adjust for one extra line in every other frame. pT->VTotal is field based here + totalPixels_in_2_fields = (NvU32)pT->HTotal * ((NvU32)pT->VTotal * 2 + 1); + // calculate the field rate in interlaced mode + pT->etc.rr = (NvU16)axb_div_c(pT->pclk * 2, 10000, totalPixels_in_2_fields); + pT->etc.rrx1k = axb_div_c(pT->pclk * 2, 10000000, totalPixels_in_2_fields); + } + else + { + // calculate frame rate in progressive mode + // in progressive mode filed = frame + pT->etc.rr = (NvU16)axb_div_c(pT->pclk, 10000, (NvU32)pT->HTotal * (NvU32)pT->VTotal); + pT->etc.rrx1k = axb_div_c(pT->pclk, 10000000, (NvU32)pT->HTotal * (NvU32)pT->VTotal); + } + + pT->etc.aspect = 0; + pT->etc.name[39] = '\0'; + pT->etc.rep = 0x1; // Bit mask for no pixel repetition + + pT->etc.status = NVT_STATUS_DISPLAYID_2; + // Unlike the PTM in EDID base block, DisplayID type I/II preferred timing does not have dependency on sequence + // so we'll just update the preferred flag, not sequence them + //pT->etc.status = NVT_STATUS_DISPLAYID_1N(1); + pT->etc.flag |= type2->options.is_preferred_detailed_timing ? NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING : 0; + + /* Fields currently not used. Uncomment them for future use + type1->options.stereo_support; + */ + + // the DisplayID spec covers the timing parameter(Visible/FrontPorch/SyncWidth/Total) range from 1~65536 while our NVT_TIMING structure which is mostly based on NvU16 only covers 0~65535 + nvt_assert(pT->HVisible != 0); + nvt_assert(pT->HFrontPorch != 0); + nvt_assert(pT->HSyncWidth != 0); + nvt_assert(pT->VVisible != 0); + nvt_assert(pT->VFrontPorch != 0); + nvt_assert(pT->VSyncWidth != 0); + + // cover the possible overflow + nvt_assert(pT->HTotal > pT->HVisible); + nvt_assert(pT->VTotal > pT->VVisible); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming3Descriptor(DISPLAYID_TIMING_3_DESCRIPTOR * desc, + NVT_TIMING *pT) +{ + NvU8 formula, aspect; + NvU32 horiz, vert, rr; + NvU32 interlace; + if (desc == NULL || pT == NULL) + return NVT_STATUS_ERR; + + formula = DRF_VAL(T_DISPLAYID, _TIMING_3, _FORMULA, desc->optns); + /* Fields currently not used, uncomment for use + preferred = DRF_VAL(T_DISPLAYID, _TIMING, _PREFERRED, desc->optns); + */ + aspect = DRF_VAL(T_DISPLAYID, _TIMING_3, _ASPECT_RATIO, desc->optns); + interlace = DRF_VAL(T_DISPLAYID, _TIMING_3, _INTERLACE, desc->transfer) ? NVT_INTERLACED : NVT_PROGRESSIVE; + rr = (NvU32)(DRF_VAL(T_DISPLAYID, _TIMING_3, _REFRESH_RATE, desc->transfer) + 1); + + horiz = (NvU32)((desc->horizontal_active_pixels + 1) << 3); + + switch (aspect) + { + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_1_1: + vert = horiz; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_5_4: + vert = horiz * 4 / 5; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_4_3: + vert = horiz * 3 / 4; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_15_9: + vert = horiz * 9 / 15; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_9: + vert = horiz * 9 / 16; + break; + case NVT_DISPLAYID_TIMING_3_ASPECT_RATIO_16_10: + vert = horiz * 10 / 16; + break; + default: + return NVT_STATUS_ERR; + } + + switch (formula) + { + case NVT_DISPLAYID_TIMING_3_FORMULA_STANDARD: + if (NvTiming_CalcCVT(horiz, vert, rr, interlace, pT) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + break; + case NVT_DISPLAYID_TIMING_3_FORMULA_REDUCED_BLANKING: + if (NvTiming_CalcCVT_RB(horiz, vert, rr, interlace, pT) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + break; + default: + return NVT_STATUS_ERR; + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming3(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + DISPLAYID_TIMING_3_BLOCK * blk = (DISPLAYID_TIMING_3_BLOCK *)block; + NVT_TIMING newTiming; + + if (blk->header.data_bytes % sizeof(DISPLAYID_TIMING_3_DESCRIPTOR) != 0) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i * sizeof(DISPLAYID_TIMING_3_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming3Descriptor(blk->descriptors + i, + &newTiming) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming4(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU16 i; + NVT_TIMING newTiming; + DISPLAYID_TIMING_4_BLOCK * blk = (DISPLAYID_TIMING_4_BLOCK *)block; + if (blk->header.data_bytes < 1 || blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NvTiming_EnumDMT((NvU32)(blk->timing_codes[i]), + &newTiming) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming5Descriptor(DISPLAYID_TIMING_5_DESCRIPTOR * desc, NVT_TIMING *pT) +{ + NvU32 width, height, rr; + NvBool is1000div1001 = NV_FALSE; + + // we don't handle stereo type nor custom reduced blanking yet + //NvU8 stereoType, formula; + //stereoType = (desc->optns & NVT_DISPLAYID_TIMING_5_STEREO_SUPPORT_MASK); + //formula = desc->optns & NVT_DISPLAYID_TIMING_5_FORMULA_SUPPORT_MASK; + + if (desc->optns & NVT_DISPLAYID_TIMING_5_FRACTIONAL_RR_SUPPORT_MASK) + { + is1000div1001 = NV_TRUE; + } + width = ((desc->horizontal_active_pixels_high << 8) | desc->horizontal_active_pixels_low) + 1; + height = ((desc->vertical_active_pixels_high << 8) | desc->vertical_active_pixels_low) + 1; + rr = desc->refresh_rate + 1; + return NvTiming_CalcCVT_RB2(width, height, rr, is1000div1001, pT); +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiming5(NvU8 * block, NVT_EDID_INFO *pEdidInfo, NVT_DISPLAYID_INFO *pInfo) +{ + NvU16 i; + NVT_TIMING newTiming; + DISPLAYID_TIMING_5_BLOCK * blk = (DISPLAYID_TIMING_5_BLOCK *)block; + if (blk->header.data_bytes < 1 || blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + for (i = 0; i * sizeof(DISPLAYID_TIMING_5_DESCRIPTOR) < blk->header.data_bytes; i++) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (parseDisplayIdTiming5Descriptor(blk->descriptors + i, &newTiming) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTimingVesa(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU8 i, j; + NVT_TIMING newTiming; + DISPLAYID_TIMING_MODE_BLOCK * blk = (DISPLAYID_TIMING_MODE_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_TIMING_VESA_BLOCK_SIZE) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i < DISPLAYID_TIMING_VESA_BLOCK_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (blk->timing_modes[i] & (1 << j)) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NvTiming_EnumDMT((NvU32)(i * 8 + j + 1), + &newTiming) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + } + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTimingEIA(NvU8 * block, NVT_EDID_INFO *pEdidInfo) +{ + NvU8 i, j; + NVT_TIMING newTiming; + DISPLAYID_TIMING_MODE_BLOCK * blk = (DISPLAYID_TIMING_MODE_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_TIMING_CEA_BLOCK_SIZE) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + for (i = 0; i < DISPLAYID_TIMING_CEA_BLOCK_SIZE; i++) + { + for (j = 0; j < 8; j++) + { + if (blk->timing_modes[i] & (1 << j)) + { + NVMISC_MEMSET(&newTiming, 0, sizeof(newTiming)); + + if (NvTiming_EnumCEA861bTiming((NvU32)(i * 8 + j + 1), + &newTiming) == NVT_STATUS_SUCCESS) + { + if (!assignNextAvailableTiming(pEdidInfo, &newTiming)) + { + break; + } + } + } + } + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdRangeLimits(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NVT_DISPLAYID_RANGE_LIMITS * rl; + DISPLAYID_RANGE_LIMITS_BLOCK * blk = (DISPLAYID_RANGE_LIMITS_BLOCK *)block; + if ((blk->header.data_bytes != DISPLAYID_RANGE_LIMITS_BLOCK_LEN) || + (pInfo->rl_num >= NVT_DISPLAYID_RANGE_LIMITS_MAX_COUNT)) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + rl = pInfo->range_limits + pInfo->rl_num; + (pInfo->rl_num)++; + + rl->pclk_min = blk->pixel_clock_min[0] | (blk->pixel_clock_min[1] << 8) | (blk->pixel_clock_min[2] << 16); + rl->pclk_max = blk->pixel_clock_max[0] | (blk->pixel_clock_max[1] << 8) | (blk->pixel_clock_max[2] << 16); + + rl->interlaced = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _INTERLACE, blk->optns); + rl->cvt = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _CVT_STANDARD, blk->optns); + rl->cvt_reduced = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _CVT_REDUCED, blk->optns); + rl->dfd = DRF_VAL(T_DISPLAYID, _RANGE_LIMITS, _DFD, blk->optns); + + rl->hfreq_min = blk->horizontal_frequency_min; + rl->hfreq_max = blk->horizontal_frequency_max; + rl->hblank_min = blk->horizontal_blanking_min; + rl->vfreq_min = blk->vertical_refresh_rate_min; + rl->vfreq_max = blk->vertical_refresh_rate_max; + rl->vblank_min = blk->vertical_blanking_min; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdSerialNumber(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_ASCII_STRING_BLOCK * blk = (DISPLAYID_ASCII_STRING_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + // Nothing is currently done to store any ASCII Serial Number, if it is + // required. Code here may need to be modified sometime in the future, along + // with NVT_DISPLAYID_INFO struct + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdAsciiString(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_ASCII_STRING_BLOCK * blk = (DISPLAYID_ASCII_STRING_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + // Nothing is currently done to store any ASCII String Data, if it is + // required. Code here may need to be modified sometime in the future, along + // with NVT_DISPLAYID_INFO struct + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdDeviceData(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_DEVICE_DATA_BLOCK * blk = (DISPLAYID_DEVICE_DATA_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_DEVICE_DATA_BLOCK_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + pInfo->tech_type = blk->technology; + + pInfo->device_op_mode = DRF_VAL(T_DISPLAYID, _DEVICE, _OPERATING_MODE, blk->operating_mode); + pInfo->support_backlight = DRF_VAL(T_DISPLAYID, _DEVICE, _BACKLIGHT, blk->operating_mode); + pInfo->support_intensity = DRF_VAL(T_DISPLAYID, _DEVICE, _INTENSITY, blk->operating_mode); + + pInfo->horiz_pixel_count = blk->horizontal_pixel_count; + pInfo->vert_pixel_count = blk->vertical_pixel_count; + + pInfo->orientation = DRF_VAL(T_DISPLAYID, _DEVICE, _ORIENTATION, blk->orientation); + pInfo->rotation = DRF_VAL(T_DISPLAYID, _DEVICE, _ROTATION, blk->orientation); + pInfo->zero_pixel = DRF_VAL(T_DISPLAYID, _DEVICE, _ZERO_PIXEL, blk->orientation); + pInfo->scan_direction = DRF_VAL(T_DISPLAYID, _DEVICE, _SCAN, blk->orientation); + + pInfo->subpixel_info = blk->subpixel_info; + pInfo->horiz_pitch = blk->horizontal_pitch; + pInfo->vert_pitch = blk->vertical_pitch; + + pInfo->color_bit_depth = DRF_VAL(T_DISPLAYID, _DEVICE, _COLOR_DEPTH, blk->color_bit_depth); + pInfo->white_to_black = DRF_VAL(T_DISPLAYID, _DEVICE, _WHITE_BLACK, blk->response_time); + pInfo->response_time = DRF_VAL(T_DISPLAYID, _DEVICE, _RESPONSE_TIME, blk->response_time); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdInterfacePower(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_INTERFACE_POWER_BLOCK * blk = (DISPLAYID_INTERFACE_POWER_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_INTERFACE_POWER_BLOCK_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + // Note specifically that the data inside T1/T2 variables are the exact + // interface power data. the millisecond increments are dependent on the + // DisplayID specification. + pInfo->t1_min = DRF_VAL(T_DISPLAYID, _POWER, _T1_MIN, blk->power_sequence_T1); + pInfo->t1_max = DRF_VAL(T_DISPLAYID, _POWER, _T1_MAX, blk->power_sequence_T1); + pInfo->t2_max = DRF_VAL(T_DISPLAYID, _POWER, _T2, blk->power_sequence_T2); + pInfo->t3_max = DRF_VAL(T_DISPLAYID, _POWER, _T3, blk->power_sequence_T3); + pInfo->t4_min = DRF_VAL(T_DISPLAYID, _POWER, _T4_MIN, blk->power_sequence_T4_min); + pInfo->t5_min = DRF_VAL(T_DISPLAYID, _POWER, _T5_MIN, blk->power_sequence_T5_min); + pInfo->t6_min = DRF_VAL(T_DISPLAYID, _POWER, _T6_MIN, blk->power_sequence_T6_min); + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTransferChar(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + // Transfer Characteristics are currently not supported, but parsing of the + // block should be added in the future when more specifications on monitors + // that require this information is located here. + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdDisplayInterface(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_INTERFACE_DATA_BLOCK * blk = (DISPLAYID_INTERFACE_DATA_BLOCK *)block; + if (blk->header.data_bytes != DISPLAYID_INTERFACE_DATA_BLOCK_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + pInfo->supported_displayId2_0 = 0; + + // Type/Link Info + pInfo->u4.display_interface.interface_type = DRF_VAL(T_DISPLAYID, _INTERFACE, _TYPE, blk->info); + pInfo->u4.display_interface.u1.digital_num_links = DRF_VAL(T_DISPLAYID, _INTERFACE, _NUMLINKS, blk->info); + pInfo->u4.display_interface.interface_version = blk->version; + + // Color Depths + pInfo->u4.display_interface.rgb_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB16, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB14, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB12, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB10, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB8, blk->color_depth_rgb); + pInfo->u4.display_interface.rgb_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE, _RGB6, blk->color_depth_rgb); + pInfo->u4.display_interface.ycbcr444_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_16, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_14, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_12, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_10, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_8, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr444_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR444_6, blk->color_depth_ycbcr444); + pInfo->u4.display_interface.ycbcr422_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_16, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_14, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_12, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_10, blk->color_depth_ycbcr422); + pInfo->u4.display_interface.ycbcr422_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE, _YCBCR422_8, blk->color_depth_ycbcr422); + + // Content Protection + pInfo->u4.display_interface.content_protection = DRF_VAL(T_DISPLAYID, _INTERFACE, _CONTENT, blk->content_protection); + pInfo->u4.display_interface.content_protection_version = blk->content_protection_version; + + // Spread + pInfo->u4.display_interface.spread_spectrum = DRF_VAL(T_DISPLAYID, _INTERFACE, _SPREAD_TYPE, blk->spread); + pInfo->u4.display_interface.spread_percent = DRF_VAL(T_DISPLAYID, _INTERFACE, _SPREAD_PER, blk->spread); + + // Proprietary Information + switch (pInfo->u4.display_interface.interface_type) + { + case NVT_DISPLAYID_INTERFACE_TYPE_LVDS: + pInfo->u2.lvds.color_map = DRF_VAL(T_DISPLAYID, _LVDS, _COLOR, blk->interface_attribute_1); + pInfo->u2.lvds.support_2_8v = DRF_VAL(T_DISPLAYID, _LVDS, _2_8, blk->interface_attribute_1); + pInfo->u2.lvds.support_12v = DRF_VAL(T_DISPLAYID, _LVDS, _12, blk->interface_attribute_1); + pInfo->u2.lvds.support_5v = DRF_VAL(T_DISPLAYID, _LVDS, _5, blk->interface_attribute_1); + pInfo->u2.lvds.support_3_3v = DRF_VAL(T_DISPLAYID, _LVDS, _3_3, blk->interface_attribute_1); + pInfo->u2.lvds.DE_mode = DRF_VAL(T_DISPLAYID, _INTERFACE, _DE, blk->interface_attribute_2); + pInfo->u2.lvds.polarity = DRF_VAL(T_DISPLAYID, _INTERFACE, _POLARITY, blk->interface_attribute_2); + pInfo->u2.lvds.data_strobe = DRF_VAL(T_DISPLAYID, _INTERFACE, _STROBE, blk->interface_attribute_2); + break; + case NVT_DISPLAYID_INTERFACE_TYPE_PROPRIETARY: + pInfo->u2.proprietary.DE_mode = DRF_VAL(T_DISPLAYID, _INTERFACE, _DE, blk->interface_attribute_1); + pInfo->u2.proprietary.polarity = DRF_VAL(T_DISPLAYID, _INTERFACE, _POLARITY, blk->interface_attribute_1); + pInfo->u2.proprietary.data_strobe = DRF_VAL(T_DISPLAYID, _INTERFACE, _STROBE, blk->interface_attribute_1); + break; + default: + break; + } + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdStereo(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NvU8 * sub; + + DISPLAYID_STEREO_INTERFACE_METHOD_BLOCK * blk = (DISPLAYID_STEREO_INTERFACE_METHOD_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + sub = blk->timing_sub_block; + + pInfo->stereo_code = blk->stereo_code; + switch (blk->stereo_code) + { + case NVT_DISPLAYID_STEREO_FIELD_SEQUENTIAL: + pInfo->u3.field_sequential.stereo_polarity = sub[0]; + break; + case NVT_DISPLAYID_STEREO_SIDE_BY_SIDE: + pInfo->u3.side_by_side.view_identity = sub[0]; + break; + case NVT_DISPLAYID_STEREO_PIXEL_INTERLEAVED: + NVMISC_MEMCPY(pInfo->u3.pixel_interleaved.interleave_pattern, sub, 8); + break; + case NVT_DISPLAYID_STEREO_DUAL_INTERFACE: + pInfo->u3.left_right_separate.mirroring = DRF_VAL(T_DISPLAYID, _STEREO, _MIRRORING, sub[0]); + pInfo->u3.left_right_separate.polarity = DRF_VAL(T_DISPLAYID, _STEREO, _POLARITY, sub[0]); + break; + case NVT_DISPLAYID_STEREO_MULTIVIEW: + pInfo->u3.multiview.num_views = sub[0]; + pInfo->u3.multiview.code = sub[1]; + break; + case NVT_DISPLAYID_STEREO_PROPRIETARY: + break; + default: + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdTiledDisplay(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + DISPLAYID_TILED_DISPLAY_BLOCK * blk = (DISPLAYID_TILED_DISPLAY_BLOCK *)block; + if (blk->header.data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + + // For revision 0, we only allow one tiled display data block. + if (!blk->header.revision && pInfo->tile_topology_id.vendor_id) + return NVT_STATUS_SUCCESS; + + pInfo->tiled_display_revision = blk->header.revision; + + pInfo->tile_capability.bSingleEnclosure = blk->capability.single_enclosure; + pInfo->tile_capability.bHasBezelInfo = blk->capability.has_bezel_info; + pInfo->tile_capability.multi_tile_behavior = blk->capability.multi_tile_behavior; + pInfo->tile_capability.single_tile_behavior = blk->capability.single_tile_behavior; + + pInfo->tile_topology.row = ((blk->topo_loc_high.row << 5) | blk->topology_low.row) + 1; + pInfo->tile_topology.col = ((blk->topo_loc_high.col << 5) | blk->topology_low.col) + 1; + + pInfo->tile_location.x = (blk->topo_loc_high.x << 5) | blk->location_low.x; + pInfo->tile_location.y = (blk->topo_loc_high.y << 5) | blk->location_low.y; + + pInfo->native_resolution.width = ((blk->native_resolution.width_high<<8)|blk->native_resolution.width_low) + 1; + pInfo->native_resolution.height = ((blk->native_resolution.height_high<<8)|blk->native_resolution.height_low) + 1; + + pInfo->bezel_info.pixel_density = blk->bezel_info.pixel_density; + pInfo->bezel_info.top = (blk->bezel_info.top * blk->bezel_info.pixel_density) / 10; + pInfo->bezel_info.bottom = (blk->bezel_info.bottom * blk->bezel_info.pixel_density) / 10; + pInfo->bezel_info.right = (blk->bezel_info.right * blk->bezel_info.pixel_density) / 10; + pInfo->bezel_info.left = (blk->bezel_info.left * blk->bezel_info.pixel_density) / 10; + + pInfo->tile_topology_id.vendor_id = (blk->topology_id.vendor_id[2] << 16) | + (blk->topology_id.vendor_id[1] << 8 ) | + blk->topology_id.vendor_id[0]; + + pInfo->tile_topology_id.product_id = (blk->topology_id.product_id[1] << 8) | blk->topology_id.product_id[0]; + + pInfo->tile_topology_id.serial_number = (blk->topology_id.serial_number[3] << 24) | + (blk->topology_id.serial_number[2] << 16) | + (blk->topology_id.serial_number[1] << 8 ) | + blk->topology_id.serial_number[0]; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdCtaData(NvU8 * block, NVT_EDID_INFO *pInfo, NVT_DISPLAYID_INFO *pDisplayIdInfo) +{ + DISPLAYID_DATA_BLOCK_HEADER * blk = (DISPLAYID_DATA_BLOCK_HEADER*)block; + NVT_EDID_CEA861_INFO *p861info = &pInfo->ext861; + if (blk->data_bytes > NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + pDisplayIdInfo->cea_data_block_present = 1; + p861info->revision = blk->revision; + + //parse CEA tags which starts at 3rd byte from block + parseCta861DataBlockInfo(&block[3], blk->data_bytes, p861info); + + // update pInfo with basic hdmi info + // assumes each edid will only have one such block across multiple cta861 blocks (otherwise may create declaration conflict) + // in case of multiple such blocks, the last one takes precedence + parseCta861VsdbBlocks(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + + parseCta861HfScdb(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + + //parse HDR related information from the HDR static metadata data block + parseCea861HdrStaticMetadataDataBlock(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + + // base video + parse861bShortTiming(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + // yuv420-only video + parse861bShortYuv420Timing(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + // CEA861-F at 7.5.12 section about VFPDB block. + if (p861info->total_vfpdb != 0) + { + parse861bShortPreferredTiming(p861info, pInfo, FROM_DISPLAYID_13_DATA_BLOCK); + } + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS parseDisplayIdDisplayInterfaceFeatures(NvU8 * block, NVT_DISPLAYID_INFO *pInfo) +{ + NvU8 i; + DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK * blk = (DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK *)block; + if (blk->header.data_bytes > DISPLAYID_INTERFACE_FEATURES_DATA_BLOCK_MAX_LEN) + { + // Assert since this error is ignored + nvt_assert(0); + return NVT_STATUS_ERR; + } + pInfo->supported_displayId2_0 = 1; + + // Color Depths + pInfo->u4.display_interface_features.rgb_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB16, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB14, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB12, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB10, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB8, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.rgb_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _RGB6, blk->supported_color_depth_rgb); + pInfo->u4.display_interface_features.ycbcr444_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_16, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_14, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_12, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_10, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_8, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr444_depth.support_6b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR444_6, blk->supported_color_depth_ycbcr444); + pInfo->u4.display_interface_features.ycbcr422_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_16, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_14, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_12, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_10, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr422_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR422_8, blk->supported_color_depth_ycbcr422); + pInfo->u4.display_interface_features.ycbcr420_depth.support_16b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_16, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_14b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_14, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_12b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_12, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_10b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_10, blk->supported_color_depth_ycbcr420); + pInfo->u4.display_interface_features.ycbcr420_depth.support_8b = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _YCBCR420_8, blk->supported_color_depth_ycbcr420); + + // Minimum Pixel Rate at Which YCbCr 4:2:0 Encoding Is Supported + pInfo->u4.display_interface_features.minimum_pixel_rate_ycbcr420 = blk->minimum_pixel_rate_ycbcr420; + + // Audio capability + pInfo->u4.display_interface_features.audio_capability.support_32khz = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _AUDIO_SUPPORTED_32KHZ, blk->supported_audio_capability); + pInfo->u4.display_interface_features.audio_capability.support_44_1khz = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _AUDIO_SUPPORTED_44_1KHZ, blk->supported_audio_capability); + pInfo->u4.display_interface_features.audio_capability.support_48khz = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _AUDIO_SUPPORTED_48KHZ, blk->supported_audio_capability); + + // Colorspace and EOTF combination + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt2020_eotf_smpte_st2084 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT2020_EOTF_SMPTE_ST2084, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt2020_eotf_bt2020 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT2020_EOTF_BT2020, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_dci_p3_eotf_dci_p3 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_DCI_P3_EOTF_DCI_P3, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_adobe_rgb_eotf_adobe_rgb = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_ADOBE_RGB_EOTF_ADOBE_RGB, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt709_eotf_bt1886 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT709_EOTF_BT1886, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_bt601_eotf_bt601 = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_BT601_EOTF_BT601, blk->supported_colorspace_eotf_combination_1); + pInfo->u4.display_interface_features.colorspace_eotf_combination_1.support_colorspace_srgb_eotf_srgb = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _COLORSPACE_SRGB_EOTF_SRGB, blk->supported_colorspace_eotf_combination_1); + + // Additional support Colorspace and EOTF + pInfo->u4.display_interface_features.total_additional_colorspace_eotf.total = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _ADDITIONAL_SUPPORTED_COLORSPACE_EOTF_TOTAL, blk->additional_supported_colorspace_eotf_total); + + for (i = 0; i < pInfo->u4.display_interface_features.total_additional_colorspace_eotf.total; i++) + { + pInfo->u4.display_interface_features.additional_colorspace_eotf[i].support_colorspace = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _ADDITIONAL_SUPPORTED_COLORSPACE, blk->additional_supported_colorspace_eotf[i]); + pInfo->u4.display_interface_features.additional_colorspace_eotf[i].support_eotf = DRF_VAL(T_DISPLAYID, _INTERFACE_FEATURES, _ADDITIONAL_SUPPORTED_EOTF, blk->additional_supported_colorspace_eotf[i]); + + } + return NVT_STATUS_SUCCESS; +} + + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_edidext_displayid20.c b/src/common/modeset/timing/nvt_edidext_displayid20.c new file mode 100644 index 000000000..88bf0e110 --- /dev/null +++ b/src/common/modeset/timing/nvt_edidext_displayid20.c @@ -0,0 +1,346 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_edidext_displayid20.c +// +// Purpose: the provide displayID 2.0 related services +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "displayid20.h" +#include "edid.h" + +PUSH_SEGMENTS + +// DisplayId2 as EDID extension entry point functions +static NVT_STATUS parseDisplayId20EDIDExtSection(DISPLAYID_2_0_SECTION *section, NVT_EDID_INFO *pEdidInfo); +static NVT_STATUS parseDisplayId20EDIDExtDataBlocks(DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, NvU8 remainSectionLength, NvU8 *pCurrentDBLength, NVT_EDID_INFO *pEdidInfo); + +/** + * + * @brief Parses a displayId20 EDID Extension block, with timings stored in p and + * other info stored in pInfo + * @param p The EDID Extension Block (With a DisplayID in it) + * @param size Size of the displayId Extension Block + * @param pEdidInfo EDID struct containing DisplayID information and + * the timings + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +getDisplayId20EDIDExtInfo( + NvU8 *p, + NvU32 size, + NVT_EDID_INFO *pEdidInfo) +{ + DISPLAYID_2_0_SECTION *extSection = NULL; + + if (p == NULL || + size < sizeof(EDIDV1STRUC) || + size > sizeof(EDIDV1STRUC) || + p[0] != NVT_EDID_EXTENSION_DISPLAYID || + pEdidInfo == NULL) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // Calculate the All DisplayID20 Extension checksum + // The function name + if (computeDisplayId20SectionCheckSum(p, sizeof(EDIDV1STRUC)) != 0) + { + return NVT_STATUS_ERR; + // ret |= NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECK_SUM); + } + + extSection = (DISPLAYID_2_0_SECTION *)(p + 1); + + return parseDisplayId20EDIDExtSection(extSection, pEdidInfo); +} + +/* + * @brief DisplayId20 as EDID extension block's "Section" entry point functions + */ +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS +parseDisplayId20EDIDExtSection( + DISPLAYID_2_0_SECTION * extSection, + NVT_EDID_INFO *pEdidInfo) +{ + NvU8 datablock_location = 0; + NvU8 datablock_length; + NvU8 remaining_length; + + if ((extSection == NULL) || + (extSection->header.section_bytes != 121)) + { + return NVT_STATUS_ERR; + } + + // It is based on the DisplayID v2.0 Errata E7 + // First DisplayID2.0 section as EDID extension shall populate "Display Product Primary Use Case" byte with a value from 1h-8h based on the intended primary use case of the sink. + // Any subsequent DisplayID2.0 section EDID extension shall set the "Display Product Primary Use Case" byte to 0h. + pEdidInfo->total_did2_extensions++; + + if (extSection->header.version == DISPLAYID_2_0_VERSION) + { + if (((pEdidInfo->total_did2_extensions == 1) && (extSection->header.product_type == 0 || + extSection->header.product_type > DISPLAYID_2_0_PROD_HMD_AR || + extSection->header.extension_count != 0)) || + (pEdidInfo->total_did2_extensions > 1 && extSection->header.product_type != 0)) + { + nvt_assert(0); // product_type value set incorrect in Display Product Primary Use Case field + } + + pEdidInfo->ext_displayid20.version = extSection->header.version; + pEdidInfo->ext_displayid20.revision = extSection->header.revision; + pEdidInfo->ext_displayid20.as_edid_extension = NV_TRUE; + } + else + { + return NVT_STATUS_INVALID_PARAMETER; + } + + // validate for section checksum before processing the data block + if (computeDisplayId20SectionCheckSum((const NvU8*)extSection, DISPLAYID_2_0_SECTION_SIZE_TOTAL(extSection->header)) != 0) + { + return NVT_STATUS_ERR; + } + + remaining_length = extSection->header.section_bytes; + + while (datablock_location < extSection->header.section_bytes) + { + DISPLAYID_2_0_DATA_BLOCK_HEADER * dbHeader = (DISPLAYID_2_0_DATA_BLOCK_HEADER *) (extSection->data + datablock_location); + NvU8 is_reserve = remaining_length > 3 && datablock_location == 0 && dbHeader->type == 0 && dbHeader->data_bytes > 0; + NvU8 i; + + // Check the padding. + if (dbHeader->type == 0 && !is_reserve) + { + for (i = 1 ; i < remaining_length; i++) + { + // All remaining bytes must all be 0. + if (extSection->data[datablock_location + i] != 0) + { + return NVT_STATUS_ERR; + } + } + + datablock_length = remaining_length; + } + else + { + if (parseDisplayId20EDIDExtDataBlocks( + dbHeader, + extSection->header.section_bytes - datablock_location, + &datablock_length, + pEdidInfo) != NVT_STATUS_SUCCESS) + return NVT_STATUS_ERR; + } + + datablock_location += datablock_length; + remaining_length -= datablock_length; + } + + return NVT_STATUS_SUCCESS; +} + +/* + * @brief DisplayId20 as EDID extension block's "Data Block" entry point functions + */ +CODE_SEGMENT(PAGE_DD_CODE) +static NVT_STATUS +parseDisplayId20EDIDExtDataBlocks( + DISPLAYID_2_0_DATA_BLOCK_HEADER *pDataBlock, + NvU8 RemainSectionLength, + NvU8 *pCurrentDBLength, + NVT_EDID_INFO *pEdidInfo) +{ + NVT_STATUS status = NVT_STATUS_SUCCESS; + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info = NULL; + + // size sanity checking + if ((pDataBlock == NULL || RemainSectionLength <= NVT_DISPLAYID_DATABLOCK_HEADER_LEN) || + (pDataBlock->data_bytes > RemainSectionLength - NVT_DISPLAYID_DATABLOCK_HEADER_LEN)) + return NVT_STATUS_ERR; + + if (pDataBlock->type < DISPLAYID_2_0_BLOCK_TYPE_PRODUCT_IDENTITY) + { + return NVT_STATUS_INVALID_PARAMETER; + } + + pDisplayId20Info = &pEdidInfo->ext_displayid20; + + *pCurrentDBLength = pDataBlock->data_bytes + NVT_DISPLAYID_DATABLOCK_HEADER_LEN; + + status = parseDisplayId20DataBlock(pDataBlock, pDisplayId20Info); + + // TODO : All the data blocks shall sync the data from the datablock in DisplayID2_0 to pEdidInfo + if (status == NVT_STATUS_SUCCESS && pDisplayId20Info->as_edid_extension == NV_TRUE) + { + switch (pDataBlock->type) + { + case DISPLAYID_2_0_BLOCK_TYPE_INTERFACE_FEATURES: + pDisplayId20Info->valid_data_blocks.interface_feature_present = NV_TRUE; + + // Supported - Color depth is supported for all supported timings. Supported timing includes all Display-ID exposed timings + // (that is timing exposed using DisplayID timing types and CTA VICs) + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayId20Info->interface_features.yuv444.bpcs)) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_444; + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pDisplayId20Info->interface_features.yuv422.bpcs)) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_YCbCr_422; + } + + if (pDisplayId20Info->interface_features.audio_capability.support_48khz || + pDisplayId20Info->interface_features.audio_capability.support_44_1khz || + pDisplayId20Info->interface_features.audio_capability.support_32khz) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + + break; + + // DisplayID_v2.0 E5 defined + // if inside CTA embedded block existed 420 VDB/CMDB, then we follow these two blocks only. + // * support for 420 pixel encoding is limited to the timings exposed in the restricted set exposed in the CTA data block. + // * field of "Mini Pixel Rate at YCbCr420" shall be set 00h + case DISPLAYID_2_0_BLOCK_TYPE_CTA_DATA: + pDisplayId20Info->valid_data_blocks.cta_data_present = NV_TRUE; + + // copy all the vendor specific data block from DisplayId20 to pEdidInfo + // TODO: mixed CTA extension block and DID2.0 extension block is not handled + NVMISC_MEMCPY(&pEdidInfo->hdmiLlcInfo, &pDisplayId20Info->vendor_specific.hdmiLlc, sizeof(NVT_HDMI_LLC_INFO)); + NVMISC_MEMCPY(&pEdidInfo->hdmiForumInfo, &pDisplayId20Info->vendor_specific.hfvs, sizeof(NVT_HDMI_FORUM_INFO)); + NVMISC_MEMCPY(&pEdidInfo->nvdaVsdbInfo, &pDisplayId20Info->vendor_specific.nvVsdb, sizeof(NVDA_VSDB_PARSED_INFO)); + NVMISC_MEMCPY(&pEdidInfo->msftVsdbInfo, &pDisplayId20Info->vendor_specific.msftVsdb, sizeof(MSFT_VSDB_PARSED_INFO)); + NVMISC_MEMCPY(&pEdidInfo->hdr_static_metadata_info, &pDisplayId20Info->cta.hdrInfo, sizeof(NVT_HDR_STATIC_METADATA)); + NVMISC_MEMCPY(&pEdidInfo->dv_static_metadata_info, &pDisplayId20Info->cta.dvInfo, sizeof(NVT_DV_STATIC_METADATA)); + + // If the CTA861 extension existed already, we need to transfer the revision/basic_caps to cta embedded in DID20. + if (pEdidInfo->ext861.revision >= NVT_CEA861_REV_B) + { + pDisplayId20Info->cta.cta861_info.revision = pEdidInfo->ext861.revision; + pDisplayId20Info->cta.cta861_info.basic_caps = pEdidInfo->ext861.basic_caps; + pDisplayId20Info->basic_caps = pEdidInfo->ext861.basic_caps; + } + + // this is the DisplayID20 Extension, so we need to copy from what is the CTA raw data in DID20 to Edid's CTA block + NVMISC_MEMCPY(&pEdidInfo->ext861, &pDisplayId20Info->cta.cta861_info, sizeof(NVT_EDID_CEA861_INFO)); + break; + + case DISPLAYID_2_0_BLOCK_TYPE_DISPLAY_PARAM: + pDisplayId20Info->valid_data_blocks.parameters_present = NV_TRUE; + + // EDID only supported 10bits chromaitcity to match the OS D3DKMDT_2DOFFSET 10bits, so we don't need to transfer it here. + + pEdidInfo->input.u.digital.bpc = NVT_COLORDEPTH_HIGHEST_BPC(pDisplayId20Info->display_param.native_color_depth); + pEdidInfo->gamma = pDisplayId20Info->display_param.gamma_x100; + + if (pDisplayId20Info->display_param.audio_speakers_integrated == AUDIO_SPEAKER_INTEGRATED_SUPPORTED) + { + pDisplayId20Info->basic_caps |= NVT_DISPLAY_2_0_CAP_BASIC_AUDIO; + } + + break; + + default: + break; + } + } + + return status; +} + +/* @brief Update the correct color format / attribute of timings from interface feature data block + */ +CODE_SEGMENT(PAGE_DD_CODE) +void +updateColorFormatForDisplayId20ExtnTimings( + NVT_EDID_INFO *pInfo, + NvU32 timingIdx) +{ + // pDisplayId20Info parsed displayID20 info + NVT_DISPLAYID_2_0_INFO *pDisplayId20Info = &pInfo->ext_displayid20; + NVT_TIMING *pT= &pInfo->timing[timingIdx]; + + nvt_assert(timingIdx <= COUNT(pInfo->timing)); + + if (pDisplayId20Info->as_edid_extension) + { + if ((pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED || + pInfo->input.u.digital.video_interface == NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED || + pInfo->ext861.valid.H14B_VSDB || pInfo->ext861.valid.H20_HF_VSDB) && pInfo->ext861.revision >= NVT_CEA861_REV_A) + { + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 0, + 1, + pDisplayId20Info->interface_features.rgb444.bpc.bpc10, + pDisplayId20Info->interface_features.rgb444.bpc.bpc12, + pDisplayId20Info->interface_features.rgb444.bpc.bpc14, + pDisplayId20Info->interface_features.rgb444.bpc.bpc16); + } + else + { + // rgb444 (always support 6bpc and 8bpc as per DP spec 5.1.1.1.1 RGB Colorimetry) + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.rgb444, 1, + 1, + pDisplayId20Info->interface_features.rgb444.bpc.bpc10, + pDisplayId20Info->interface_features.rgb444.bpc.bpc12, + pDisplayId20Info->interface_features.rgb444.bpc.bpc14, + pDisplayId20Info->interface_features.rgb444.bpc.bpc16); + } + + // yuv444 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv444, 0, /* yuv444 does not support 6bpc */ + pDisplayId20Info->interface_features.yuv444.bpc.bpc8, + pDisplayId20Info->interface_features.yuv444.bpc.bpc10, + pDisplayId20Info->interface_features.yuv444.bpc.bpc12, + pDisplayId20Info->interface_features.yuv444.bpc.bpc14, + pDisplayId20Info->interface_features.yuv444.bpc.bpc16); + // yuv422 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv422, 0, /* yuv422 does not support 6bpc */ + pDisplayId20Info->interface_features.yuv422.bpc.bpc8, + pDisplayId20Info->interface_features.yuv422.bpc.bpc10, + pDisplayId20Info->interface_features.yuv422.bpc.bpc12, + pDisplayId20Info->interface_features.yuv422.bpc.bpc14, + pDisplayId20Info->interface_features.yuv422.bpc.bpc16); + + if (!NVT_DID20_TIMING_IS_CTA861(pInfo->timing[timingIdx].etc.flag, pInfo->timing[timingIdx].etc.status)) + { + // yuv420 + UPDATE_BPC_FOR_COLORFORMAT(pT->etc.yuv420, 0, /* yuv420 does not support 6bpc */ + pDisplayId20Info->interface_features.yuv420.bpc.bpc8, + pDisplayId20Info->interface_features.yuv420.bpc.bpc10, + pDisplayId20Info->interface_features.yuv420.bpc.bpc12, + pDisplayId20Info->interface_features.yuv420.bpc.bpc14, + pDisplayId20Info->interface_features.yuv420.bpc.bpc16); + } + } +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_gtf.c b/src/common/modeset/timing/nvt_gtf.c new file mode 100644 index 000000000..405a16ce4 --- /dev/null +++ b/src/common/modeset/timing/nvt_gtf.c @@ -0,0 +1,138 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_gtf.c +// +// Purpose: calculate gtf timing +// +//***************************************************************************** + +#include "nvBinSegment.h" +#include "nvmisc.h" + +#include "nvtiming_pvt.h" + +// calculate GTF timing + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +const NvU32 NVT_GTF_CELL_GRAN=8; +const NvU32 NVT_GTF_MIN_VSYNCBP=11; // in 550us (!!) [1000000:550 = 20000:11] +const NvU32 NVT_GTF_MIN_VPORCH=1; + +const NvU32 NVT_GTF_C_PRIME=30; // (gtf_C-gtf_J)*gtf_K/256+gtf_J; +const NvU32 NVT_GTF_M_PRIME=300; // NVT_GTFK/256*gtf_M; +const NvU32 NVT_GTF_VSYNC_RQD=3; +const NvU32 NVT_GTF_HSYNC_PERCENTAGE=8; // 8% HSync for GTF + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_CalcGTF(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT) +{ + NvU32 dwXCells, dwVSyncBP, dwVTotal, dwIdN, dwIdD, dwHBlank, dwHTCells, dwHSync, dwHFrontPorch, dwRefreshRate; + + // parameter check + if (pT == NULL) + return NVT_STATUS_ERR; + + if (width == 0 || height == 0 || rr == 0 ) + return NVT_STATUS_ERR; + + dwRefreshRate = rr; + dwXCells = a_div_b(width, NVT_GTF_CELL_GRAN); + + if(dwRefreshRate * NVT_GTF_MIN_VSYNCBP >= 20000) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_OUTOFRANGE; // H period estimate less than 0 + + dwVSyncBP = a_div_b((height + NVT_GTF_MIN_VPORCH) * NVT_GTF_MIN_VSYNCBP * dwRefreshRate, + (20000 - NVT_GTF_MIN_VSYNCBP * dwRefreshRate)); + dwVTotal = dwVSyncBP + height + NVT_GTF_MIN_VPORCH; + + // Calculate the numerator and denominator of Ideal Duty Cycle + // NOTE: here dwIdN/dwIdN = IdealDutyCycle/GTF_C_Prime + dwIdD = dwVTotal * dwRefreshRate; + + if(dwIdD <= NVT_GTF_M_PRIME * 1000 / NVT_GTF_C_PRIME) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_OUTOFRANGE; // Ideal duty cycle less than 0 + + dwIdN = dwIdD - NVT_GTF_M_PRIME * 1000 / NVT_GTF_C_PRIME; + + // A proper way to calculate dwXCells*dwIdN/(100*dwIdD/GTF_C_PRIME-dwIdN) + dwHBlank = axb_div_c(dwIdN*3, dwXCells, 2*(300*dwIdD/NVT_GTF_C_PRIME - dwIdN*3)); + dwHBlank = ( dwHBlank ) * 2 * NVT_GTF_CELL_GRAN; + dwHTCells = dwXCells + dwHBlank / NVT_GTF_CELL_GRAN; + dwHSync = a_div_b(dwHTCells * NVT_GTF_HSYNC_PERCENTAGE, 100) * NVT_GTF_CELL_GRAN; + if((dwHSync == 0) || (dwHSync*2 > dwHBlank)) + return NVT_STATUS_ERR;//NVT_STATUS_ERR_OUTOFRANGE; // HSync too small or too big. + + dwHFrontPorch = dwHBlank/2-dwHSync; + + NVMISC_MEMSET(pT, 0, sizeof(NVT_TIMING)); + + pT->HVisible = (NvU16)(dwXCells*NVT_GTF_CELL_GRAN); + pT->VVisible = (NvU16)height; + + pT->HTotal = (NvU16)(dwHTCells*NVT_GTF_CELL_GRAN); + pT->HFrontPorch = (NvU16)dwHFrontPorch; + pT->HSyncWidth = (NvU16)dwHSync; + + pT->VTotal = (NvU16)dwVTotal; + pT->VFrontPorch = (NvU16)NVT_GTF_MIN_VPORCH; + pT->VSyncWidth = (NvU16)NVT_GTF_VSYNC_RQD; + + // A proper way to calculate fixed HTotal*VTotal*Rr/10000 + pT->pclk = axb_div_c(dwHTCells*dwVTotal, dwRefreshRate, 10000/NVT_GTF_CELL_GRAN); + + pT->HSyncPol = NVT_H_SYNC_NEGATIVE; + pT->VSyncPol = NVT_V_SYNC_POSITIVE; + pT->interlaced = 0; + + // fill in the extra timing info + pT->etc.flag = 0; + pT->etc.rr = (NvU16)rr; + pT->etc.rrx1k = axb_div_c((NvU32)pT->pclk, (NvU32)10000*(NvU32)1000, (NvU32)pT->HTotal*(NvU32)pT->VTotal); + pT->etc.aspect = 0; + pT->etc.rep = 0x1; + pT->etc.status = NVT_STATUS_GTF; + NVT_SNPRINTF((char *)pT->etc.name, 40, "GTF:%dx%dx%dHz",width, height, rr); + pT->etc.name[39] = '\0'; + pT->etc.rgb444.bpc.bpc8 = 1; + + // interlaced adjustment + if ((flag & NVT_PVT_INTERLACED_MASK) != 0) + { + if ((pT->VTotal & 0x1) != 0) + pT->interlaced = NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2; + else + pT->interlaced = NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2; + + pT->pclk >>= 1; + pT->VTotal >>= 1; + pT->VVisible = (pT->VVisible + 1) / 2; + } + + return NVT_STATUS_SUCCESS; +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_tv.c b/src/common/modeset/timing/nvt_tv.c new file mode 100644 index 000000000..7ff4a6ebd --- /dev/null +++ b/src/common/modeset/timing/nvt_tv.c @@ -0,0 +1,192 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_tv.c +// +// Purpose: calculate tv based timing timing +// +//***************************************************************************** + +#include "nvBinSegment.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +static const NVT_TIMING TV_TIMING[] = +{ + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,240, 0,10,6,262, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1407, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_NTSC_M, "SDTV:NTSC_M"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,240, 0,10,6,262, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1407, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_NTSC_J, "SDTV:NTSC_J"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10,8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_M, "SDTV:PAL_M"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10,8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_A, "SDTV:PAL_A"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10,8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_N, "SDTV:PAL_N"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10,8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_SDTV_PAL_NC, "SDTV:PAL_NC"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,240, 0,10,6,262, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1407, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_480I, "HDTV(analog):480i"}}, + {720, 0,15,8, 858, NVT_H_SYNC_NEGATIVE,480, 0,10,4,525, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,2700, {0,60,59940,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_480P, "HDTV(analog):480p"}}, + {720, 0,21,66,894, NVT_H_SYNC_POSITIVE,288, 0,10,8,312, NVT_V_SYNC_POSITIVE,NVT_INTERLACED, 1397, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_576I, "HDTV(analog):576i"}}, + {720, 0,10,8, 864, NVT_H_SYNC_NEGATIVE,576, 0,5, 4,625, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,2700, {0,50,50000,0x0403,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_576P, "HDTV(analog):576p"}}, + {1280,0,70,80, 1650,NVT_H_SYNC_NEGATIVE,720,0,5, 5,750, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,7418, {0,60,59940,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_720P, "HDTV(analog):720p"}}, + {1920,0,44,88,2200,NVT_H_SYNC_NEGATIVE,540, 0,2, 5,562, NVT_V_SYNC_NEGATIVE,NVT_INTERLACED, 7418, {0,60,59940,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080I, "HDTV(analog):1080i"}}, + {1920,0,44,88,2200,NVT_H_SYNC_NEGATIVE,1080,0,4, 5,1125,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,14835,{0,60,59940,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080P, "HDTV(analog):1080p"}}, + {1280,0,400,80,1980,NVT_H_SYNC_NEGATIVE,720,0,5, 5,750, NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,7425, {0,50,50000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_720P50, "HDTV(analog):720p50"}}, + {1920,0,594,88,2750,NVT_H_SYNC_NEGATIVE,1080,0,4, 5,1125,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,7425,{0,24,24000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080P24,"HDTV(analog):1080p24"}}, + {1920,0,484,88,2640,NVT_H_SYNC_NEGATIVE,540, 0,4, 5,562, NVT_V_SYNC_NEGATIVE,NVT_INTERLACED, 7425,{0,50,50000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080I50,"HDTV(analog):1080i50"}}, + {1920,0,484,88,2640,NVT_H_SYNC_NEGATIVE,1080,0,4, 5,1125,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,14850,{0,50,50000,0x1009,0x1,{0},{0},{0},{0},NVT_STATUS_HDTV_1080P50,"HDTV(analog):1080p50"}}, + {0,0,0,0,0,NVT_H_SYNC_NEGATIVE,0,0,0,0,0,NVT_V_SYNC_NEGATIVE,NVT_PROGRESSIVE,0,{0,0,0,0,0,{0},{0},{0},{0},0,""}} +}; + +//*********************************************** +//** Wrapper Structure to store Fake EDID data ** +//*********************************************** +typedef struct tagFAKE_TV_EDID +{ + NvU32 EdidType; + NvU32 EdidSize; + const NvU8* FakeEdid; +} FAKE_TV_EDID; + +// calculate the backend TV timing +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_GetTvTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 tvFormat, NVT_TIMING *pT) +{ + NvU32 i, j, k; + + // input check + if (pT == NULL) + return NVT_STATUS_ERR; + + if ((width == 0 || height == 0 || rr == 0) && tvFormat >= NVT_MAX_TV_FORMAT) + return NVT_STATUS_ERR; + + // handle double scan + if (height <= NVT_PVT_DOUBLE_SCAN_HEIGHT) + { + width <<= 1; + height <<= 1; + } + + // try the exact match first + if (tvFormat != NVT_AUTO_HDTV_FORMAT) + { + i = 0; + while (TV_TIMING[i].HVisible != 0) + { + if (NVT_GET_TIMING_STATUS_SEQ(TV_TIMING[i].etc.status) == tvFormat) + { + // find the match + *pT = TV_TIMING[i]; + return NVT_STATUS_SUCCESS; + } + + // move to the next entry + i++; + } + + // unknown TV format, return failure here + *pT = TV_TIMING[0]; + return NVT_STATUS_ERR; + } + + // we are doing auto HDTV format binding here + i = 0; + j = k = sizeof(TV_TIMING)/sizeof(TV_TIMING[0]) - 1; + while (TV_TIMING[i].HVisible != 0) + { + // #1: try the exact resolution/refreshrate/interlaced match + if (width == TV_TIMING[i].HVisible && + height == frame_height(TV_TIMING[i])&& + rr == TV_TIMING[i].etc.rr && + !!(flag & NVT_PVT_INTERLACED_MASK) == !!TV_TIMING[i].interlaced && + NVT_GET_TIMING_STATUS_TYPE(TV_TIMING[i].etc.status) == NVT_TYPE_HDTV) + { + // exact match, return from here + *pT = TV_TIMING[i]; + return NVT_STATUS_SUCCESS; + } + + // #2: try to closest match with interlaced check ON + if (!!(flag & NVT_PVT_INTERLACED_MASK) == !!TV_TIMING[i].interlaced && + NVT_GET_TIMING_STATUS_TYPE(TV_TIMING[i].etc.status) == NVT_TYPE_HDTV) + { + if (abs_delta(width, TV_TIMING[i].HVisible) <= abs_delta(width, TV_TIMING[j].HVisible) && + abs_delta(height, frame_height(TV_TIMING[i])) <= abs_delta(height, frame_height(TV_TIMING[j])) && + abs_delta(rr, TV_TIMING[i].etc.rr) <= abs_delta(rr, TV_TIMING[j].etc.rr) && + width <= TV_TIMING[i].HVisible && + height <= frame_height(TV_TIMING[i])) + { + j = i; + } + } + + // #3: try to closest match with interlaced check OFF + if (NVT_GET_TIMING_STATUS_TYPE(TV_TIMING[i].etc.status) == NVT_TYPE_HDTV) + { + if (abs_delta(width, TV_TIMING[i].HVisible) <= abs_delta(width, TV_TIMING[k].HVisible) && + abs_delta(height, frame_height(TV_TIMING[i])) <= abs_delta(height, frame_height(TV_TIMING[k])) && + abs_delta(rr, TV_TIMING[i].etc.rr) <= abs_delta(rr, TV_TIMING[j].etc.rr) && + width <= TV_TIMING[i].HVisible && + height <= frame_height(TV_TIMING[i])) + { + k = i; + } + } + + // move to the next entry + i++; + } + + // return the closest matched timing here + if (TV_TIMING[j].HVisible != 0) + { + *pT = TV_TIMING[j]; + } + else if (TV_TIMING[k].HVisible != 0) + { + *pT = TV_TIMING[k]; + } + else + { + *pT = TV_TIMING[0]; + } + + // set the mismatch status + if (pT->HVisible != width || frame_height(*pT) != height) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_SIZE); + } + if (pT->etc.rr != rr) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_RR); + } + if (!!pT->interlaced != !!(flag & NVT_PVT_INTERLACED_MASK)) + { + NVT_SET_TIMING_STATUS_MISMATCH(pT->etc.status, NVT_STATUS_TIMING_MISMATCH_FORMAT); + } + + return NVT_STATUS_SUCCESS; + +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvt_util.c b/src/common/modeset/timing/nvt_util.c new file mode 100644 index 000000000..0c181e37d --- /dev/null +++ b/src/common/modeset/timing/nvt_util.c @@ -0,0 +1,370 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvt_util.c +// +// Purpose: provide the utility functions for timing library +// +//***************************************************************************** + +#include "nvBinSegment.h" + +#include "nvtiming_pvt.h" + +PUSH_SEGMENTS + +CONS_SEGMENT(PAGE_CONS) + +// The following table was generated w/ this program: +/* +#include + +#define CRC32_POLYNOMIAL 0xEDB88320 + +void main() +{ + unsigned int crc = 0, i = 0, j = 0; + unsigned int CRCTable[256]; + + for (i = 0; i < 256 ; i++) + { + crc = i; + for (j = 8; j > 0; j--) + { + if (crc & 1) + crc = (crc >> 1) ^ CRC32_POLYNOMIAL; + else + crc >>= 1; + } + CRCTable[i] = crc; + } + + printf("static const NvU32 s_CRCTable[256] = {"); + for (i = 0; i < 256; i++) + { + printf("%s0x%08X%s", + ((i % 10 == 0) ? "\n " : ""), + CRCTable[i], + ((i != 255) ? ", " : " ")); + } + printf("};\n"); +} +*/ +static const NvU32 s_CRCTable[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, + 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, + 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, + 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, + 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, + 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, + 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, + 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, + 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, + 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, + 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, + 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, + 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, + 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 a_div_b(NvU32 a, NvU32 b) +{ + if (b == 0) + return 0xFFFFFFFF; + + return (a + b/2)/b; +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 axb_div_c(NvU32 a, NvU32 b, NvU32 c) +{ + NvU32 AhxBl, AlxBh; + NvU32 AxB_high, AxB_low; + NvU32 AxB_div_C_low; + + if (c==0) + return 0xFFFFFFFF; + + // calculate a*b + AhxBl = (a>>16)*(b&0xFFFF); + AlxBh = (a&0xFFFF)*(b>>16); + + AxB_high = (a>>16) * (b>>16); + AxB_low = (a&0xFFFF) * (b&0xFFFF); + + AxB_high += AlxBh >> 16; + AxB_high += AhxBl >> 16; + + if ((AxB_low + (AlxBh<<16))< AxB_low) + AxB_high ++; + AxB_low += AlxBh << 16; + + if ((AxB_low + (AhxBl<<16)) < AxB_low) + AxB_high ++; + AxB_low += AhxBl << 16; + + AxB_div_C_low = AxB_low/c; + AxB_div_C_low += 0xFFFFFFFF / c * (AxB_high % c); + AxB_div_C_low += ((0xFFFFFFFF % c + 1) * (AxB_high % c) + (AxB_low % c) + c/2) / c; + + + return AxB_div_C_low; +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU64 axb_div_c_64(NvU64 a, NvU64 b, NvU64 c) +{ + // NvU64 arithmetic to keep precision and avoid floats + // a*b/c = (a/c)*b + ((a%c)*b + c/2)/c + return ((a/c)*b + ((a%c)*b + c/2)/c); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 calculateCRC32(NvU8* pBuf, NvU32 bufsize) +{ + NvU32 crc32 = 0xFFFFFFFF, temp1, temp2, count = bufsize; + + if (bufsize == 0 || pBuf == NULL) + { + return 0; + } + + while (count-- != 0) + { + temp1 = (crc32 >> 8) & 0x00FFFFFF; + temp2 = s_CRCTable[(crc32 ^ *pBuf++) & 0xFF]; + crc32 = temp1 ^ temp2; + } + crc32 ^= 0xFFFFFFFF; + + return crc32; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvBool isChecksumValid(NvU8 *pBuf) +{ + NvU8 i; + NvU8 checksum = 0; + + for (i= 0; i < NVT_EDID_BLOCK_SIZE; i++) + { + checksum += pBuf[i]; + } + + if ((checksum & 0xFF) == 0) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +CODE_SEGMENT(PAGE_DD_CODE) +void patchChecksum(NvU8 *pBuf) +{ + NvU8 i; + NvU8 chksum = 0; + + for (i = 0; i < NVT_EDID_BLOCK_SIZE; i++) + { + chksum += pBuf[i]; + } + chksum &= 0xFF; + + // The 1-byte sum of all 128 bytes in this EDID block shall equal zero + // The Checksum Byte (at address 7Fh) shall contain a value such that a checksum of the entire + // 128-byte BASE EDID equals 00h. + if (chksum) + { + pBuf[127] = 0xFF & (pBuf[127] + (0x100 - chksum)); + } +} + +CODE_SEGMENT(PAGE_DD_CODE) +NVT_STATUS NvTiming_ComposeCustTimingString(NVT_TIMING *pT) +{ + if (pT == NULL) + return NVT_STATUS_ERR; + + NVT_SNPRINTF((char *)pT->etc.name, 40, "CUST:%dx%dx%d.%03dHz%s",pT->HVisible, (pT->interlaced ? 2 : 1)*pT->VVisible , pT->etc.rrx1k/1000, pT->etc.rrx1k%1000, (pT->interlaced ? "/i" : "")); + pT->etc.name[39] = '\0'; + + return NVT_STATUS_SUCCESS; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU16 NvTiming_CalcRR(NvU32 pclk, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal) +{ + NvU16 rr = 0; + + if (interlaced) + { + NvU32 totalPixelsIn2Fields = (NvU32)HTotal * ((NvU32)VTotal * 2 + 1); + + if (totalPixelsIn2Fields != 0) + { + rr = (NvU16)axb_div_c(pclk * 2, 10000, totalPixelsIn2Fields); + } + } + else + { + NvU32 totalPixels = (NvU32)HTotal * VTotal; + + if (totalPixels != 0) + { + rr = (NvU16)axb_div_c(pclk, 10000, totalPixels); + } + } + return rr; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_CalcRRx1k(NvU32 pclk, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal) +{ + NvU32 rrx1k = 0; + + if (interlaced) + { + NvU32 totalPixelsIn2Fields = (NvU32)HTotal * ((NvU32)VTotal * 2 + 1); + + if (totalPixelsIn2Fields != 0) + { + rrx1k = (NvU32)axb_div_c(pclk * 2, 10000000, totalPixelsIn2Fields); + } + } + else + { + NvU32 totalPixels = (NvU32)HTotal * VTotal; + + if (totalPixels != 0) + { + rrx1k = (NvU32)axb_div_c(pclk, 10000000, totalPixels); + } + } + + return rrx1k; +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU32 NvTiming_IsRoundedRREqual(NvU16 rr1, NvU32 rr1x1k, NvU16 rr2) +{ + return ((rr1 >= (rr1x1k/1000)) && (rr1 <= (rr1x1k + 500) / 1000) && + (rr2 >= (rr1x1k/1000)) && (rr2 <= (rr1x1k + 500) / 1000)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 NvTiming_IsTimingExactEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2) +{ + if ((pT1 == NULL) || (pT2 == NULL)) + { + return 0; + } + + return (( pT1->HVisible == pT2->HVisible) && + ( pT1->HBorder == pT2->HBorder) && + ( pT1->HFrontPorch == pT2->HFrontPorch) && + ( pT1->HSyncWidth == pT2->HSyncWidth) && + ( pT1->HSyncPol == pT2->HSyncPol) && + ( pT1->HTotal == pT2->HTotal) && + ( pT1->VVisible == pT2->VVisible) && + ( pT1->VBorder == pT2->VBorder) && + ( pT1->VFrontPorch == pT2->VFrontPorch) && + ( pT1->VSyncWidth == pT2->VSyncWidth) && + ( pT1->VSyncPol == pT2->VSyncPol) && + ( pT1->VTotal == pT2->VTotal) && + ( pT1->etc.rr == pT2->etc.rr) && + (!!pT1->interlaced == !!pT2->interlaced)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 NvTiming_IsTimingExactEqualEx(const NVT_TIMING *pT1, const NVT_TIMING *pT2) +{ + NvU32 bIsTimingExactEqual = NvTiming_IsTimingExactEqual(pT1, pT2); + return (bIsTimingExactEqual && (pT1->etc.rrx1k == pT2->etc.rrx1k)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 NvTiming_IsTimingRelaxedEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2) +{ + if ((pT1 == NULL) || (pT2 == NULL)) + { + return 0; + } + + return (( pT1->HVisible == pT2->HVisible) && + ( pT1->HBorder == pT2->HBorder) && + ( pT1->HFrontPorch == pT2->HFrontPorch) && + ( pT1->HSyncWidth == pT2->HSyncWidth) && + //( pT1->HSyncPol == pT2->HSyncPol) && // skip the polarity check to tolerate mismatch h/v sync polarities in 18-byte DTD + ( pT1->HTotal == pT2->HTotal) && + ( pT1->VVisible == pT2->VVisible) && + ( pT1->VBorder == pT2->VBorder) && + ( pT1->VFrontPorch == pT2->VFrontPorch) && + ( pT1->VSyncWidth == pT2->VSyncWidth) && + //( pT1->VSyncPol == pT2->VSyncPol) && // skip the polarity check to tolerate mismatch h/v sync polarities in 18-byte DTD + ( pT1->VTotal == pT2->VTotal) && + ( pT1->etc.rr == pT2->etc.rr) && + (!!pT1->interlaced == !!pT2->interlaced)); +} + +CODE_SEGMENT(NONPAGE_DD_CODE) +NvU32 RRx1kToPclk (NVT_TIMING *pT) +{ + return axb_div_c(pT->HTotal * (pT->VTotal + ((pT->interlaced != 0) ? (pT->VTotal + 1) : 0)), + pT->etc.rrx1k, + 1000 * ((pT->interlaced != 0) ? 20000 : 10000)); +} + +CODE_SEGMENT(PAGE_DD_CODE) +NvU16 NvTiming_MaxFrameWidth(NvU16 HVisible, NvU16 repMask) +{ + NvU16 minPixelRepeat; + + if (repMask == 0) + { + return HVisible; + } + + minPixelRepeat = 1; + while ((repMask & 1) == 0) + { + repMask >>= 1; + minPixelRepeat++; + } + + return (HVisible / minPixelRepeat); +} + +POP_SEGMENTS diff --git a/src/common/modeset/timing/nvtiming.h b/src/common/modeset/timing/nvtiming.h new file mode 100644 index 000000000..70ee491cf --- /dev/null +++ b/src/common/modeset/timing/nvtiming.h @@ -0,0 +1,5342 @@ +//**************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvtiming.h +// +// Purpose: This file is the common header all nv timing library clients. +// +//***************************************************************************** + +#ifndef __NVTIMING_H__ +#define __NVTIMING_H__ + +#include "nvtypes.h" + + +#define abs_delta(a,b) ((a)>(b)?((a)-(b)):((b)-(a))) + +//*********************** +// The Timing Structure +//*********************** +// +// Nvidia specific timing extras +typedef struct tagNVT_HDMIEXT +{ + // in the case of stereo, the NVT_TIMING structure will hold the 2D + // instance of the timing parameters, and the stereo extension will + // contain the variants required to produce the stereo frame. + NvU8 StereoStructureType; + NvU8 SideBySideHalfDetail; + NvU16 VActiveSpace[2]; +} NVT_HDMIEXT; +#define NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(x) (1 << (x)) +#define NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK) +#define NVT_HDMI_3D_SUPPORTED_FIELD_ALT_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT) +#define NVT_HDMI_3D_SUPPORTED_LINE_ALT_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT) +#define NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEFULL_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL) +#define NVT_HDMI_3D_SUPPORTED_LDEPTH_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH) +#define NVT_HDMI_3D_SUPPORTED_LDEPTHGFX_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX) +#define NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM) +#define NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK NVT_HDMI_3D_SUPPORTED_STRUCT_MASK(NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF) +#define NVT_ALL_HDMI_3D_STRUCT_SUPPORTED_MASK (NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK | NVT_HDMI_3D_SUPPORTED_TOPBOTTOM_MASK | NVT_HDMI_3D_SUPPORTED_SIDEBYSIDEHALF_MASK) + +typedef union tagNVT_COLORDEPTH +{ + NvU8 bpcs; + struct + { + NvU8 bpc6 : 1; + NvU8 bpc8 : 1; + NvU8 bpc10 : 1; + NvU8 bpc12 : 1; + NvU8 bpc14 : 1; + NvU8 bpc16 : 1; + NvU8 rsrvd1 : 1; // must be 0 + NvU8 rsrvd2 : 1; // must be 0 + } bpc; +} NVT_COLORDEPTH; + +#define IS_BPC_SUPPORTED_COLORFORMAT(colorDepth) (!!((NvU8)(colorDepth))) +#define UPDATE_BPC_FOR_COLORFORMAT(colorFormat, b6bpc, b8bpc, b10bpc, b12bpc, b14bpc, b16bpc) \ + if ((b6bpc)) ((colorFormat).bpc.bpc6 = 1); \ + if ((b8bpc)) ((colorFormat).bpc.bpc8 = 1); \ + if ((b10bpc)) ((colorFormat).bpc.bpc10 = 1); \ + if ((b12bpc)) ((colorFormat).bpc.bpc12 = 1); \ + if ((b14bpc)) ((colorFormat).bpc.bpc14 = 1); \ + if ((b16bpc)) ((colorFormat).bpc.bpc16 = 1); + +#define SET_BPC_FOR_COLORFORMAT(_colorFormat, _bpc) \ + if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_6) \ + ((_colorFormat).bpc.bpc6 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_8) \ + ((_colorFormat).bpc.bpc8 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_10) \ + ((_colorFormat).bpc.bpc10 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_12) \ + ((_colorFormat).bpc.bpc12 = 1); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_16) ((_colorFormat).bpc.bpc16 = 1); + +#define CLEAR_BPC_FOR_COLORFORMAT(_colorFormat, _bpc) \ + if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_6) \ + ((_colorFormat).bpc.bpc6 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_8) \ + ((_colorFormat).bpc.bpc8 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_10) \ + ((_colorFormat).bpc.bpc10 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_12) \ + ((_colorFormat).bpc.bpc12 = 0); \ + else if ((_bpc) == NVT_EDID_VIDEOSIGNAL_BPC_16) ((_colorFormat).bpc.bpc16 = 0); + +#define NVT_COLORDEPTH_HIGHEST_BPC(_colorFormat) \ + (_colorFormat).bpc.bpc16 ? NVT_EDID_VIDEOSIGNAL_BPC_16 : \ + (_colorFormat).bpc.bpc12 ? NVT_EDID_VIDEOSIGNAL_BPC_12 : \ + (_colorFormat).bpc.bpc10 ? NVT_EDID_VIDEOSIGNAL_BPC_10 : \ + (_colorFormat).bpc.bpc8 ? NVT_EDID_VIDEOSIGNAL_BPC_8 : \ + (_colorFormat).bpc.bpc6 ? NVT_EDID_VIDEOSIGNAL_BPC_6 : NVT_EDID_VIDEOSIGNAL_BPC_NOT_DEFINED + +typedef struct tagNVT_TIMINGEXT +{ + NvU32 flag; // reserve for NV h/w based enhancement like double-scan. + NvU16 rr; // the logical refresh rate to present + NvU32 rrx1k; // the physical vertical refresh rate in 0.001Hz + NvU32 aspect; // the display aspect ratio Hi(aspect):horizontal-aspect, Low(aspect):vertical-aspect + // + // Bitmask of one-hot encoded possible pixel repetitions: + // 0x1: no pixel repetition (i.e., display each pixel once) + // 0x2: each pixel is displayed twice horizontally; + // 0x3: use either no pixel repetition or display each pixel twice + // ... + // + NvU16 rep; + NVT_COLORDEPTH rgb444; // each bit within is set if rgb444 supported on that bpc + NVT_COLORDEPTH yuv444; // each bit within is set if yuv444 supported on that bpc + NVT_COLORDEPTH yuv422; // each bit within is set if yuv422 supported on that bpc + NVT_COLORDEPTH yuv420; // each bit within is set if yuv420 supported on that bpc + NvU32 status; // the timing standard being used + NvU8 name[51]; // the name of the timing +}NVT_TIMINGEXT; +// +// +//The very basic timing structure based on the VESA standard: +// +// |<----------------------------htotal--------------------------->| +// ---------"active" video-------->|<-------blanking------>|<----- +// |<-------hvisible-------->|<-hb->|<-hfp->|<-hsw->|<-hbp->|<-hb->| +// ----------+-------------------------+ | | | | | +// A A | | | | | | | +// : : | | | | | | | +// : : | | | | | | | +// :verical| addressable video | | | | | | +// :visible| | | | | | | +// : : | | | | | | | +// : : | | | | | | | +// verical V | | | | | | | +// total --+-------------------------+ | | | | | +// : vb border | | | | | +// : -----------------------------------+ | | | | +// : vfp front porch | | | | +// : -------------------------------------------+ | | | +// : vsw sync width | | | +// : ---------------------------------------------------+ | | +// : vbp back porch | | +// : -----------------------------------------------------------+ | +// V vb border | +// --------------------------------------------------------------------------+ +// +typedef struct tagNVT_TIMING +{ + // VESA scan out timing parameters: + NvU16 HVisible; //horizontal visible + NvU16 HBorder; //horizontal border + NvU16 HFrontPorch; //horizontal front porch + NvU16 HSyncWidth; //horizontal sync width + NvU16 HTotal; //horizontal total + NvU8 HSyncPol; //horizontal sync polarity: 1-negative, 0-positive + + NvU16 VVisible; //vertical visible + NvU16 VBorder; //vertical border + NvU16 VFrontPorch; //vertical front porch + NvU16 VSyncWidth; //vertical sync width + NvU16 VTotal; //vertical total + NvU8 VSyncPol; //vertical sync polarity: 1-negative, 0-positive + + NvU16 interlaced; //1-interlaced, 0-progressive + NvU32 pclk; //pixel clock in 10KHz + + //other timing related extras + NVT_TIMINGEXT etc; +}NVT_TIMING; + +#define NVT_MAX_TOTAL_TIMING 128 + +// +// The below VSync/HSync Polarity definition have been inverted to match +// HW Display Class definition. +// timing related constants: +#define NVT_H_SYNC_POSITIVE 0 +#define NVT_H_SYNC_NEGATIVE 1 +#define NVT_H_SYNC_DEFAULT NVT_H_SYNC_NEGATIVE +// +#define NVT_V_SYNC_POSITIVE 0 +#define NVT_V_SYNC_NEGATIVE 1 +#define NVT_V_SYNC_DEFAULT NVT_V_SYNC_POSITIVE +// +#define NVT_PROGRESSIVE 0 +#define NVT_INTERLACED 1 +#define NVT_INTERLACED_EXTRA_VBLANK_ON_FIELD2 1 +#define NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2 2 + +// timing related macros: +#define NVT_FRAME_HEIGHT(_vvisible_, _interlaced_) ((_vvisible_) * ((_interlaced_ != 0) ? 2 : 1)) + +//************************************* +// The Timing Status encoded in +// NVT_TIMING::NVT_TIMINGEXT::status +//************************************* +// +// TIMING_STATUS has the following kinds of info: +// +// NVT_TIMING::NVT_TIMINGEXT::status +// +// +----+----+---------+----+----+------------------------------+---+---------------+---+----------------+ +// bit31 bit30 bit29 bit22 bit21 bit20 bit16 bit15 bit8 bit7 bit0 +// |native|cust|<-cta format->|Dual|<--------mismatch status-------->|<---timing type--->|<---timing seq#--->| +// +// 1. the monitor preferred timing flag and cust EDID entry flag +// +#define NVT_STATUS_TIMING_NATIVE_FLAG_MASK 0x80000000 +#define NVT_STATUS_TIMING_NATIVE_FLAG_SHIFT 31 +#define NVT_IS_NATIVE_TIMING(n) (((n)&NVT_STATUS_TIMING_NATIVE_FLAG_MASK)>>NVT_STATUS_TIMING_NATIVE_FLAG_SHIFT) +#define NVT_SET_NATIVE_TIMING_FLAG(n) ((n)|=1U<< NVT_STATUS_TIMING_NATIVE_FLAG_SHIFT) +#define NVT_PREFERRED_TIMING_MODE_MASK 0x2 +// +#define NVT_STATUS_TIMING_CUST_ENTRY_MASK 0x40000000 +#define NVT_STATUS_TIMING_CUST_ENTRY_SHIFT 30 +#define NVT_IS_CUST_ENTRY(n) (((n)&NVT_STATUS_TIMING_CUST_ENTRY_MASK)>>NVT_STATUS_TIMING_CUST_ENTRY_SHIFT) +#define NVT_SET_CUST_ENTRY_FLAG(n) ((n)|=1<>NVT_STATUS_TIMING_CEA_FORMAT_SHIFT) +#define NVT_SET_CEA_FORMAT(n,index) {(n)&=~NVT_STATUS_TIMING_CEA_FORMAT_MASK;(n)|=(index)<>NVT_STATUS_TIMING_CEA_DMT_SHIFT) +#define NVT_SET_CEA_DMT_DUAL_STANDARD_FLAG(n) ((n)|=NVT_STATUS_TIMING_CEA_DMT_MASK) +// +// +// 3. the mismatch status +#define NVT_STATUS_TIMING_MISMATCH_MASK 0x001F0000 +#define NVT_STATUS_TIMING_MISMATCH_SHIFT 16 +#define NVT_STATUS_TIMING_MISMATCH_SIZE 0x1 //visible width and height don't match with the asked width/height +#define NVT_STATUS_TIMING_MISMATCH_RR 0x2 //the refresh rate doesn't match with the requested +#define NVT_STATUS_TIMING_MISMATCH_FORMAT 0x4 //other timing info doesn't match (i.e. progressive/interlaced, double, reduced-blanking etc...) +#define NVT_STATUS_TIMING_MISMATCH_ALIGNMENT 0x8 //the asking alignment doesn't match the spec +// +// macroes to set/get the timing mismatch status +#define NVT_SET_TIMING_STATUS_MISMATCH(m,n) ((m)|=(((n)<>NVT_STATUS_TIMING_MISMATCH_SHIFT) +// +// +// 4. the timing type +// +#define NVT_STATUS_TIMING_TYPE_MASK 0x0000FF00 +#define NVT_STATUS_TIMING_TYPE_SHIFT 8 +// +typedef enum NVT_TIMING_TYPE +{ + NVT_TYPE_DMT = 1, // DMT + NVT_TYPE_GTF, // GTF + NVT_TYPE_ASPR, // wide aspect ratio timing, for legacy support only + NVT_TYPE_NTSC_TV, // NTSC TV timing. for legacy support only + NVT_TYPE_PAL_TV, // PAL TV timing, legacy support only + NVT_TYPE_CVT, // CVT timing + NVT_TYPE_CVT_RB, // CVT timing with reduced blanking + NVT_TYPE_CUST, // Customized timing + NVT_TYPE_EDID_DTD, // EDID detailed timing + NVT_TYPE_EDID_STD, // EDID standard timing + NVT_TYPE_EDID_EST, // EDID established timing + NVT_TYPE_EDID_CVT, // EDID defined CVT timing (EDID 1.4) + NVT_TYPE_EDID_861ST, // EDID defined CEA/EIA 861 timing (in the EDID 861 extension) + NVT_TYPE_NV_PREDEFINED, // NV pre-defined timings (PsF timings) + NVT_TYPE_DMT_RB, // DMT timing with reduced blanking + NVT_TYPE_EDID_EXT_DTD, // EDID detailed timing in the extension + NVT_TYPE_SDTV, // SDTV timing (including NTSC, PAL etc) + NVT_TYPE_HDTV, // HDTV timing (480p,480i,720p, 1080i etc) + NVT_TYPE_SMPTE, // deprecated ? still used by drivers\unix\nvkms\src\nvkms-dpy.c + NVT_TYPE_EDID_VTB_EXT, // EDID defined VTB extension timing + NVT_TYPE_EDID_VTB_EXT_STD, // EDID defined VTB extension standard timing + NVT_TYPE_EDID_VTB_EXT_DTD, // EDID defined VTB extension detailed timing + NVT_TYPE_EDID_VTB_EXT_CVT, // EDID defined VTB extension cvt timing + NVT_TYPE_HDMI_STEREO, // EDID defined HDMI stereo timing + NVT_TYPE_DISPLAYID_1, // DisplayID Type 1 timing + NVT_TYPE_DISPLAYID_2, // DisplayID Type 2 timing + NVT_TYPE_HDMI_EXT, // EDID defined HDMI extended resolution timing (UHDTV - 4k, 8k etc.) + NVT_TYPE_CUST_AUTO, // Customized timing generated automatically by NVCPL + NVT_TYPE_CUST_MANUAL, // Customized timing entered manually by user + NVT_TYPE_CVT_RB_2, // CVT timing with reduced blanking V2 + NVT_TYPE_DMT_RB_2, // DMT timing with reduced blanking V2 + NVT_TYPE_DISPLAYID_7, // DisplayID 2.0 detailed timing - Type VII + NVT_TYPE_DISPLAYID_8, // DisplayID 2.0 enumerated timing - Type VIII + NVT_TYPE_DISPLAYID_9, // DisplayID 2.0 formula-based timing - Type IX + NVT_TYPE_DISPLAYID_10, // DisplayID 2.0 formula-based timing - Type X +}NVT_TIMING_TYPE; +// +// 5. the timing sequence number like the TV format and EIA861B predefined timing format +// **the numbers are chosen to match with the NV h/w format** +// +#define NVT_STATUS_TIMING_SEQ_MASK 0x000000FF +// +typedef enum NVT_TV_FORMAT +{ + NVT_NTSC = 0, + NVT_NTSC_M = 0, + NVT_NTSC_J = 1, + NVT_PAL = 2, + NVT_PAL_M = 2, + NVT_PAL_A = 3, + NVT_PAL_N = 4, + NVT_PAL_NC = 5, + NVT_HD576I = 8, + NVT_HD480I, + NVT_HD480P, + NVT_HD576P, + NVT_HD720P, + NVT_HD1080I, + NVT_HD1080P, + NVT_HD720P50, + NVT_HD1080P24, + NVT_HD1080I50, + NVT_HD1080P50, + NVT_MAX_TV_FORMAT, + NVT_AUTO_SDTV_FORMAT = (NvU32)(-2), // Not supported in NvTiming_GetTvTiming + NVT_AUTO_HDTV_FORMAT = (NvU32)(-1), +}NVT_TV_FORMAT; + +#define NVT_DEFAULT_HDTV_FMT NVT_HD1080I +// +// macros to set/get the timing type and seq number +// +#define NVT_DEF_TIMING_STATUS(type, seq) ((((type)<>NVT_STATUS_TIMING_TYPE_SHIFT +#define NVT_GET_TIMING_STATUS_SEQ(n) ((n)&NVT_STATUS_TIMING_SEQ_MASK) +// +// +// +// the timing type definitions +#define NVT_STATUS_DMT NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT, 0) // DMT +#define NVT_STATUS_GTF NVT_DEF_TIMING_STATUS(NVT_TYPE_GTF, 0) // GTF +#define NVT_STATUS_ASPR NVT_DEF_TIMING_STATUS(NVT_TYPE_ASPR, 0) // ASPR +#define NVT_STATUS_NTSC_TV NVT_DEF_TIMING_STATUS(NVT_TYPE_NTSC_TV, 0) // TVN +#define NVT_STATUS_PAL_TV NVT_DEF_TIMING_STATUS(NVT_TYPE_PAL_TV, 0) // TVP +#define NVT_STATUS_CVT NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT, 0) // CVT timing with regular blanking +#define NVT_STATUS_CVT_RB NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT_RB, 0) // CVT_RB timing +#define NVT_STATUS_CVT_RB_2 NVT_DEF_TIMING_STATUS(NVT_TYPE_CVT_RB_2, 0) // CVT_RB timing V2 +#define NVT_STATUS_CUST NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST, 0) // Customized timing +#define NVT_STATUS_EDID_DTD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_DTD, 0) +#define NVT_STATUS_EDID_STD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_STD, 0) +#define NVT_STATUS_EDID_EST NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EST, 0) +#define NVT_STATUS_EDID_CVT NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_CVT, 0) +#define NVT_STATUS_EDID_861ST NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_861ST, 0) +#define NVT_STATUS_DMT_RB NVT_DEF_TIMING_STATUS(NVT_TYPE_DMT_RB, 0) +#define NVT_STATUS_EDID_EXT_DTD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EXT_DTD, 0) +#define NVT_STATUS_SDTV_NTSC NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_NTSC) +#define NVT_STATUS_SDTV_NTSC_M NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_NTSC) +#define NVT_STATUS_SDTV_NTSC_J NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_NTSC_J) +#define NVT_STATUS_SDTV_PAL NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL) +#define NVT_STATUS_SDTV_PAL_M NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL) +#define NVT_STATUS_SDTV_PAL_A NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL_A) +#define NVT_STATUS_SDTV_PAL_N NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL_N) +#define NVT_STATUS_SDTV_PAL_NC NVT_DEF_TIMING_STATUS(NVT_TYPE_SDTV, NVT_PAL_NC) +#define NVT_STATUS_HDTV_480I NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD480I) +#define NVT_STATUS_HDTV_480P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD480P) +#define NVT_STATUS_HDTV_576I NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD576I) +#define NVT_STATUS_HDTV_576P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD576P) +#define NVT_STATUS_HDTV_720P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD720P) +#define NVT_STATUS_HDTV_1080I NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080I) +#define NVT_STATUS_HDTV_1080P NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080P) +#define NVT_STATUS_HDTV_720P50 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD720P50) +#define NVT_STATUS_HDTV_1080P24 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080P24) +#define NVT_STATUS_HDTV_1080I50 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080I50) +#define NVT_STATUS_HDTV_1080P50 NVT_DEF_TIMING_STATUS(NVT_TYPE_HDTV, NVT_HD1080P50) +#define NVT_STATUS_EDID_VTB_EXT NVT_DEF_TIMING_STATUS(NVT_TYPE_VTB_EXT, 0) +#define NVT_STATUS_EDID_VTB_EXT_DTD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_DTD, 0) +#define NVT_STATUS_EDID_VTB_EXT_CVT NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_CVT, 0) +#define NVT_STATUS_EDID_VTB_EXT_STD NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_STD, 0) +#define NVT_STATUS_HDMI_STEREO NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_STEREO, 0) +#define NVT_STATUS_DISPLAYID_1 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_1, 0) +#define NVT_STATUS_DISPLAYID_2 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_2, 0) +#define NVT_STATUS_DISPLAYID_7 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_7, 0) +#define NVT_STATUS_DISPLAYID_8 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_8, 0) +#define NVT_STATUS_DISPLAYID_9 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_9, 0) +#define NVT_STATUS_DISPLAYID_10 NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_10, 0) +#define NVT_STATUS_HDMI_EXT NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_EXT, 0) +#define NVT_STATUS_CUST_AUTO NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST_AUTO, 0) +#define NVT_STATUS_CUST_MANUAL NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST_MANUAL, 0) + +// +// adding the timing sequence (from the EDID) to the modeset status +#define NVT_STATUS_DTD1 NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_DTD, 1) +#define NVT_STATUS_EDID_DTDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_DTD, n) +#define NVT_STATUS_EDID_STDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_STD, n) +#define NVT_STATUS_EDID_ESTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EST, n) +#define NVT_STATUS_EDID_CVTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_CVT, n) +#define NVT_STATUS_EDID_861STn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_861ST, n) +#define NVT_STATUS_EDID_EXT_DTDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_EXT_DTD, n) +#define NVT_STATUS_CUSTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_CUST, n) +#define NVT_TYPE_NV_PREDEFINEDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_NV_PREDEFINED, n) +#define NVT_STATUS_EDID_VTB_EXTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT, n) +#define NVT_STATUS_EDID_VTB_EXT_DTDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_DTD, n) +#define NVT_STATUS_EDID_VTB_EXT_STDn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_STD, n) +#define NVT_STATUS_EDID_VTB_EXT_CVTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_EDID_VTB_EXT_CVT, n) +#define NVT_STATUS_HDMI_STEREO_REQn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_STEREO_REQ, n) +#define NVT_STATUS_DISPLAYID_1N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_1, n) +#define NVT_STATUS_DISPLAYID_2N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_2, n) +#define NVT_STATUS_DISPLAYID_7N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_7, n) +#define NVT_STATUS_DISPLAYID_8N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_8, n) +#define NVT_STATUS_DISPLAYID_9N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_9, n) +#define NVT_STATUS_DISPLAYID_10N(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_DISPLAYID_10, n) +#define NVT_STATUS_HDMI_EXTn(n) NVT_DEF_TIMING_STATUS(NVT_TYPE_HDMI_EXT, n) + + +//******************************** +// CEA/EIA 861 related EDID info +//******************************** +#define NVT_CEA861_REV_NONE 0 +#define NVT_CEA861_REV_ORIGINAL 1 +#define NVT_CEA861_REV_A 2 +#define NVT_CEA861_REV_B 3 +#define NVT_CEA861_REV_C 3 +#define NVT_CEA861_REV_D 3 +#define NVT_CEA861_REV_E 3 +#define NVT_CEA861_REV_F 3 +#define NVT_CTA861_REV_G 3 +// +// max data after misc/basic_caps in EIA861EXTENSION +#define NVT_CEA861_MAX_PAYLOAD 123 +// +// the basic info encoded in byte[3] +#define NVT_CEA861_CAP_UNDERSCAN 0x80 // DTV monitor supports underscan +#define NVT_CEA861_CAP_BASIC_AUDIO 0x40 // DTV monitor supports basic audio +#define NVT_CEA861_CAP_YCbCr_444 0x20 // DTV monitor supports YCbCr4:4:4 +#define NVT_CEA861_CAP_YCbCr_422 0x10 // DTV monitor supports YCbCr4:2:2 +// +#define NVT_CEA861_TOTAL_LT_MASK 0x0F //the max number of 18-byte detailed timing descriptor +// +// +#define NVT_CEA861_SHORT_DESCRIPTOR_SIZE_MASK 0x1F +#define NVT_CEA861_SHORT_DESCRIPTOR_TAG_MASK 0xE0 +#define NVT_CEA861_SHORT_DESCRIPTOR_TAG_SHIFT 5 +// +// the descriptor type tags +#define NVT_CEA861_TAG_RSVD 0 // reserved block +#define NVT_CEA861_TAG_NONE 0 // reserved block +#define NVT_CEA861_TAG_AUDIO 1 // Audio Data Block +#define NVT_CEA861_TAG_VIDEO 2 // Video Data Block +#define NVT_CEA861_TAG_VENDOR 3 // Vendor Specific Data Block +#define NVT_CEA861_TAG_SPEAKER_ALLOC 4 // Speaker Allocation Data Block +#define NVT_CEA861_TAG_VESA_DTC 5 // VESA DTC data block +#define NVT_CEA861_TAG_RSVD1 6 // reserved block +#define NVT_CEA861_TAG_EXTENDED_FLAG 7 // use Extended Tag +// +// the extended tag codes when NVT_CEA861_TAG_EXTENDED_FLAG +#define NVT_CEA861_EXT_TAG_VIDEO_CAP 0 // Video Capability Data Block +#define NVT_CEA861_EXT_TAG_VENDOR_SPECIFIC_VIDEO 1 // Vendor-Specific Video Data Block +#define NVT_CEA861_EXT_TAG_VESA_VIDEO_DISPLAY_DEVICE 2 // Reserved for VESA Video Display Device Information Data Block +#define NVT_CEA861_EXT_TAG_VESA_VIDEO 3 // Reserved for VESA Video Data BLock +#define NVT_CEA861_EXT_TAG_HDMI_VIDEO 4 // Reserved for HDMI Video Data Block +#define NVT_CEA861_EXT_TAG_COLORIMETRY 5 // Colorimetry Data Block +#define NVT_CEA861_EXT_TAG_HDR_STATIC_METADATA 6 // HDR Static Metadata Data Block CEA861.3 HDR extension for HDMI 2.0a +#define NVT_CEA861_EXT_TAG_VIDEO_RSVD_MIN 7 // 7...12 : Reserved for video-related blocks +#define NVT_CEA861_EXT_TAG_VIDEO_RSVD_MAX 12 +#define NVT_CEA861_EXT_TAG_VIDEO_FORMAT_PREFERENCE 13 // CEA861F Video Format Preference Data Block +#define NVT_CEA861_EXT_TAG_YCBCR420_VIDEO 14 // CEA861F YCBCR 4:2:0 Video Data Block +#define NVT_CEA861_EXT_TAG_YCBCR420_CAP 15 // CEA861F YCBCR 4:2:0 Capability Map Data Block +#define NVT_CEA861_EXT_TAG_MISC_AUDIO 16 // CEA Miscellaneous Audio Fields +#define NVT_CEA861_EXT_TAG_VENDOR_SPECIFIC_AUDIO 17 // Vendor-Specific Audio Data Block +#define NVT_CEA861_EXT_TAG_HDMI_AUDIO 18 // Reserved for HDMI Audio Data Block +#define NVT_CEA861_EXT_TAG_AUDIO_RSVD_MIN 19 // 19...31 : Reserved for audio-related blocks +#define NVT_CEA861_EXT_TAG_AUDIO_RSVD_MAX 31 +#define NVT_CEA861_EXT_TAG_INFOFRAME 32 // Infoframe Data Block +#define NVT_CEA861_EXT_TAG_RSVD_MIN_1 33 // 33...120 : Reserved for general +#define NVT_CEA861_EXT_TAG_RSVD_MAX_1 119 +#define NVT_CEA861_EXT_TAG_HF_EEODB 120 // HDMI Forum Edid Extension Override Data Block +#define NVT_CTA861_EXT_TAG_SCDB 121 // 0x79 == Tag for Sink Capability Data Block +#define NVT_CEA861_EXT_TAG_RSVD_MIN_2 122 // 122...255 : Reserved for general +#define NVT_CEA861_EXT_TAG_RSVD_MAX_2 255 +// +//the extended tag payload size; the size includes the extended tag code +#define NVT_CEA861_EXT_VIDEO_CAP_SD_SIZE 2 +#define NVT_CEA861_EXT_COLORIMETRY_SD_SIZE 3 +#define NVT_CTA861_EXT_HDR_STATIC_METADATA_SIZE 6 +#define NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH +// +// +#define NVT_CEA861_GET_SHORT_DESCRIPTOR_TAG(a) (((a)&NVT_CEA861_SHORT_DESCRIPTOR_TAG_MASK)>>NVT_CEA861_SHORT_DESCRIPTOR_TAG_SHIFT) +#define NVT_CEA861_GET_SHORT_DESCRIPTOR_SIZE(a) ((NvU32)((a)&NVT_CEA861_SHORT_DESCRIPTOR_SIZE_MASK)) + + +//******************************** +// VTB Extension related info +//******************************** + +#define NVT_VTB_REV_NONE 0 +#define NVT_VTB_REV_A 1 + +#define NVT_VTB_MAX_PAYLOAD 122 + +//************************* +// short descriptor +//************************* +#define NVT_CEA861_SD_HEADER_SIZE 1 +#define NVT_CEA861_SD_PAYLOAD_SIZE 31 +#define NVT_CEA861_SD_TOTAL_SIZE (NVT_CEA861_SD_HEADER_SIZE + NVT_CEA861_SD_PAYLOAD_SIZE) + +//************************* +// short video descriptor +//************************* +#define NVT_CEA861_VIDEO_SD_SIZE 1 +// the max total short video descriptors possible; See CEA-861-E, section 7.5, +// "It is also possible to have more than one of a specific type of data block if necessary +// to include all of the descriptors needed to describe the sinks capabilities." +#define NVT_CEA861_VIDEO_MAX_DESCRIPTOR ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_VIDEO_SD_SIZE) + \ + (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE) / NVT_CEA861_VIDEO_SD_SIZE) +#define NVT_CTA861_VIDEO_VIC_MASK 0xFF //the VIC mask of the short video descriptor +#define NVT_CTA861_7BIT_VIDEO_VIC_MASK 0x7F //the 7 bits VIC mask of the short video descriptor +#define NVT_CTA861_VIDEO_NATIVE_MASK 0x80 //the Native mask of the short video descriptor +#define NVT_HDMI_YUV_420_PCLK_SUPPORTED_MIN 59000 //the vale shall equal or larger than 590MHz to support YCbCr in HDMI2.1 + +// CTA-861G supports more SVDs which is over 0x7F index +// All value below 192 will be treated as 7 bit VIC. Value 128~192 shall be forbidden. +#define NVT_GET_CTA_8BIT_VIC(vic) (((vic) <= NVT_CTA861_7BIT_8BIT_SEPARATE_VALUE) ? ((vic) & NVT_CTA861_7BIT_VIDEO_VIC_MASK) : ((vic) & NVT_CTA861_VIDEO_VIC_MASK)) +// + +// According to CEA-861-E Spec. +// Note 3. A video timing with a vertical frequency that is an integer multiple +// of 6.00 Hz (i.e. 24.00, 30.00, 60.00, 120.00 or 240.00 Hz) is considered to +// be the same as a video timing with the equivalent detailed timing +// information but where the vertical frequency is adjusted by a factor of +// 1000/1001 (i.e., 24/1.001, 30/1.001, 60/1.001, 120/1.001 or 240/1.001). +// Excluding ceaIndex 1 640x480 which is a PC Mode. +#define NVT_CEA861_TIMING_FRR(_VID_, _RR_) ((_VID_) > 1 && ((_RR_) % 6) == 0) +#define NVT_CEA861_640X480P_59940HZ_4X3 1 // Video Identification Code: format 1 +#define NVT_CEA861_720X480P_59940HZ_4X3 2 // Video Identification Code: format 2 +#define NVT_CEA861_720X480P_59940HZ_16X9 3 // Video Identification Code: format 3 +#define NVT_CEA861_1280X720P_59940HZ_16X9 4 // ... +#define NVT_CEA861_1920X1080I_59940HZ_16X9 5 // ... +#define NVT_CEA861_1440X480I_59940HZ_4X3 6 // ... +#define NVT_CEA861_1440X480I_59940HZ_16X9 7 // ... +#define NVT_CEA861_1440X240P_59940HZ_4X3 8 // ... +#define NVT_CEA861_1440X240P_59940HZ_16X9 9 // ... +#define NVT_CEA861_2880X480I_59940HZ_4X3 10 // ... +#define NVT_CEA861_2880X480I_59940HZ_16X9 11 // ... +#define NVT_CEA861_2880X240P_59940HZ_4X3 12 // ... +#define NVT_CEA861_2880X240P_59940HZ_16X9 13 // ... +#define NVT_CEA861_1440X480P_59940HZ_4X3 14 // ... +#define NVT_CEA861_1440X480P_59940HZ_16X9 15 // ... +#define NVT_CEA861_1920X1080P_59940HZ_16X9 16 // ... +#define NVT_CEA861_720X576P_50000HZ_4X3 17 // ... +#define NVT_CEA861_720X576P_50000HZ_16X9 18 // ... +#define NVT_CEA861_1280X720P_50000HZ_16X9 19 // ... +#define NVT_CEA861_1920X1080I_50000HZ_16X9 20 // ... +#define NVT_CEA861_1440X576I_50000HZ_4X3 21 // ... +#define NVT_CEA861_1440X576I_50000HZ_16X9 22 // ... +#define NVT_CEA861_1440X288P_50000HZ_4X3 23 // ... +#define NVT_CEA861_1440X288P_50000HZ_16X9 24 // ... +#define NVT_CEA861_2880X576I_50000HZ_4X3 25 // ... +#define NVT_CEA861_2880X576I_50000HZ_16X9 26 // ... +#define NVT_CEA861_2880X288P_50000HZ_4X3 27 // ... +#define NVT_CEA861_2880X288P_50000HZ_16X9 28 // ... +#define NVT_CEA861_1440X576P_50000HZ_4X3 29 // ... +#define NVT_CEA861_1440X576P_50000HZ_16X9 30 // ... +#define NVT_CEA861_1920X1080P_50000HZ_16X9 31 // ... +#define NVT_CEA861_1920X1080P_23976HZ_16X9 32 // ... +#define NVT_CEA861_1920X1080P_25000HZ_16X9 33 // ... +#define NVT_CEA861_1920X1080P_29970HZ_16X9 34 // ... +#define NVT_CEA861_2880X480P_59940HZ_4X3 35 // ... +#define NVT_CEA861_2880X480P_59940HZ_16X9 36 // ... +#define NVT_CEA861_2880X576P_50000HZ_4X3 37 // ... +#define NVT_CEA861_2880X576P_50000HZ_16X9 38 // ... +#define NVT_CEA861_1920X1250I_50000HZ_16X9 39 // ... +#define NVT_CEA861_1920X1080I_100000HZ_16X9 40 // ... +#define NVT_CEA861_1280X720P_100000HZ_16X9 41 // ... +#define NVT_CEA861_720X576P_100000HZ_4X3 42 // ... +#define NVT_CEA861_720X576P_100000HZ_16X9 43 // ... +#define NVT_CEA861_1440X576I_100000HZ_4X3 44 // ... +#define NVT_CEA861_1440X576I_100000HZ_16X9 45 // ... +#define NVT_CEA861_1920X1080I_119880HZ_16X9 46 // ... +#define NVT_CEA861_1280X720P_119880HZ_16X9 47 // ... +#define NVT_CEA861_720X480P_119880HZ_4X3 48 // ... +#define NVT_CEA861_720X480P_119880HZ_16X9 49 // ... +#define NVT_CEA861_1440X480I_119880HZ_4X3 50 // ... +#define NVT_CEA861_1440X480I_119880HZ_16X9 51 // ... +#define NVT_CEA861_720X576P_200000HZ_4X3 52 // ... +#define NVT_CEA861_720X576P_200000HZ_16X9 53 // ... +#define NVT_CEA861_1440X576I_200000HZ_4X3 54 // ... +#define NVT_CEA861_1440X576I_200000HZ_16X9 55 // ... +#define NVT_CEA861_720X480P_239760HZ_4X3 56 // ... +#define NVT_CEA861_720X480P_239760HZ_16X9 57 // ... +#define NVT_CEA861_1440X480I_239760HZ_4X3 58 // Video Identification Code: format 58 +#define NVT_CEA861_1440X480I_239760HZ_16X9 59 // Video Identification Code: format 59 +#define NVT_CEA861_1280X720P_23976HZ_16X9 60 // ... +#define NVT_CEA861_1280X720P_25000HZ_16X9 61 // ... +#define NVT_CEA861_1280X720P_29970HZ_16X9 62 // ... +#define NVT_CEA861_1920X1080P_119880HZ_16X9 63 // ... +#define NVT_CEA861_1920X1080P_100000HZ_16X9 64 // ... + +// Following modes are from CEA-861F +#define NVT_CEA861_1280X720P_23980HZ_64X27 65 // Video Identification Code: format 65 +#define NVT_CEA861_1280X720P_25000HZ_64X27 66 // Video Identification Code: format 66 +#define NVT_CEA861_1280X720P_29970HZ_64X27 67 // Video Identification Code: format 67 +#define NVT_CEA861_1280X720P_50000HZ_64X27 68 +#define NVT_CEA861_1280X720P_59940HZ_64X27 69 +#define NVT_CEA861_1280X720P_100000HZ_64X27 70 +#define NVT_CEA861_1280X720P_119880HZ_64X27 71 +#define NVT_CEA861_1920X1080P_23980HZ_64X27 72 +#define NVT_CEA861_1920X1080P_25000HZ_64X27 73 +#define NVT_CEA861_1920X1080P_29970HZ_64X27 74 +#define NVT_CEA861_1920X1080P_50000HZ_64X27 75 +#define NVT_CEA861_1920X1080P_59940HZ_64X27 76 +#define NVT_CEA861_1920X1080P_100000HZ_64X27 77 +#define NVT_CEA861_1920X1080P_119880HZ_64X27 78 +#define NVT_CEA861_1680X720P_23980HZ_64X27 79 +#define NVT_CEA861_1680X720P_25000HZ_64X27 80 +#define NVT_CEA861_1680X720P_29970HZ_64X27 81 +#define NVT_CEA861_1680X720P_50000HZ_64X27 82 +#define NVT_CEA861_1680X720P_59940HZ_64X27 83 +#define NVT_CEA861_1680X720P_100000HZ_64X27 84 +#define NVT_CEA861_1680X720P_119880HZ_64X27 85 +#define NVT_CEA861_2560X1080P_23980HZ_64X27 86 +#define NVT_CEA861_2560X1080P_25000HZ_64X27 87 +#define NVT_CEA861_2560X1080P_29970HZ_64X27 88 +#define NVT_CEA861_2560X1080P_50000HZ_64X27 89 +#define NVT_CEA861_2560X1080P_59940HZ_64X27 90 +#define NVT_CEA861_2560X1080P_100000HZ_64X27 91 +#define NVT_CEA861_2560X1080P_119880HZ_64X27 92 +#define NVT_CEA861_3840X2160P_23980HZ_16X9 93 +#define NVT_CEA861_3840X2160P_25000HZ_16X9 94 +#define NVT_CEA861_3840X2160P_29970HZ_16X9 95 +#define NVT_CEA861_3840X2160P_50000HZ_16X9 96 +#define NVT_CEA861_3840X2160P_59940HZ_16X9 97 +#define NVT_CEA861_4096X2160P_23980HZ_256X135 98 +#define NVT_CEA861_4096X2160P_25000HZ_256X135 99 +#define NVT_CEA861_4096X2160P_29970HZ_256X135 100 +#define NVT_CEA861_4096X2160P_50000HZ_256X135 101 +#define NVT_CEA861_4096X2160P_59940HZ_256X135 102 +#define NVT_CEA861_4096X2160P_23980HZ_64X27 103 +#define NVT_CEA861_4096X2160P_25000HZ_64X27 104 +#define NVT_CEA861_4096X2160P_29970HZ_64X27 105 +#define NVT_CEA861_4096X2160P_50000HZ_64X27 106 +#define NVT_CEA861_4096X2160P_59940HZ_64X27 107 + +// Following modes are from CTA-861G +#define NVT_CTA861_1280X720P_47950HZ_16X9 108 +#define NVT_CTA861_1280X720P_47950HZ_64x27 109 +#define NVT_CTA861_1680X720P_47950HZ_64x27 110 +#define NVT_CTA861_1920X1080P_47950HZ_16X9 111 +#define NVT_CTA861_1920X1080P_47950HZ_64x27 112 +#define NVT_CTA861_2560X1080P_47950HZ_64x27 113 +#define NVT_CTA861_3840X2160P_47950HZ_16X9 114 +#define NVT_CTA861_4096x2160p_47950HZ_256X135 115 +#define NVT_CTA861_3840x2160p_47950HZ_64x276 116 +#define NVT_CTA861_3840x2160p_100000HZ_16X9 117 +#define NVT_CTA861_3840x2160p_119880HZ_16X9 118 +#define NVT_CTA861_3840x2160p_100000HZ_64X276 119 +#define NVT_CTA861_3840x2160p_119880HZ_64X276 120 +#define NVT_CTA861_5120x2160p_23980HZ_64X276 121 +#define NVT_CTA861_5120x2160p_25000HZ_64X276 122 +#define NVT_CTA861_5120x2160p_29970HZ_64X276 123 +#define NVT_CTA861_5120x2160p_47950Hz_64X276 124 +#define NVT_CTA861_5120x2160p_50000HZ_64X276 125 +#define NVT_CTA861_5120x2160p_59940HZ_64X276 126 +#define NVT_CTA861_5120x2160p_100000HZ_64X276 127 + +#define NVT_CTA861_7BIT_8BIT_SEPARATE_VALUE 192 + +#define NVT_CTA861_5120x2160p_119880HZ_64X276 193 +#define NVT_CTA861_7680x4320p_23980HZ_16X9 194 +#define NVT_CTA861_7680x4320p_25000HZ_16X9 195 +#define NVT_CTA861_7680x4320p_29970HZ_16X9 196 +#define NVT_CTA861_7680x4320p_47950HZ_16X9 197 +#define NVT_CTA861_7680x4320p_50000HZ_16X9 198 +#define NVT_CTA861_7680x4320p_59940HZ_16X9 199 +#define NVT_CTA861_7680x4320p_100000HZ_16X9 200 +#define NVT_CTA861_7680x4320p_119880HZ_16X9 201 +#define NVT_CTA861_7680x4320p_23980HZ_64X276 202 +#define NVT_CTA861_7680x4320p_25000HZ_64X276 203 +#define NVT_CTA861_7680x4320p_29970HZ_64X276 204 +#define NVT_CTA861_7680x4320p_47950HZ_64X276 205 +#define NVT_CTA861_7680x4320p_50000HZ_64X276 206 +#define NVT_CTA861_7680x4320p_59940HZ_64X276 207 +#define NVT_CTA861_7680x4320p_100000HZ_64X276 208 +#define NVT_CTA861_7680x4320p_119880HZ_64X276 209 +#define NVT_CTA861_10240x4320p_23980HZ_64X276 210 +#define NVT_CTA861_10240x4320p_25000HZ_64X276 211 +#define NVT_CTA861_10240x4320p_29970HZ_64X276 212 +#define NVT_CTA861_10240x4320p_47950HZ_64X276 213 +#define NVT_CTA861_10240x4320p_50000HZ_64X276 214 +#define NVT_CTA861_10240x4320p_59940HZ_64X276 215 +#define NVT_CTA861_10240x4320p_100000HZ_64X276 216 +#define NVT_CTA861_10240x4320p_119880HZ_64X276 217 +#define NVT_CTA861_4096x2160p_100000HZ_256X135 218 +#define NVT_CTA861_4096x2160p_119880HZ_256X135 219 + +// When defining new CEA861 format: +// Search code base to update array of certain category of CEA formats, such as 720p, 1080i, etc... +// Ideally, it's better to define these groups in one module. However, they should not reside +// in this .h file, thus updating these groups in other file is still needed. +// example of the group: 720p: NVT_CEA861_1280X720P_59940HZ_16X9, +// NVT_CEA861_1280X720P_100000HZ_16X9, +// NVT_CEA861_1280X720P_119880HZ_16X9 + +//************************* +// short audio descriptor +//************************* +#define NVT_CEA861_AUDIO_SD_SIZE sizeof(NVT_3BYTES) +// the max total short audio descriptors possible; See CEA-861-E, section 7.5 on repeated types +#define NVT_CEA861_AUDIO_MAX_DESCRIPTOR ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_AUDIO_SD_SIZE) + \ + (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE) / NVT_CEA861_AUDIO_SD_SIZE) +// +// short audio descriptor - byte 1 +#define NVT_CEA861_AUDIO_FORMAT_MASK 0x78 //the audio format mask of the CEA short +#define NVT_CEA861_AUDIO_FORMAT_SHIFT 3 //the audio format data shift +// +#define NVT_CEA861_AUDIO_FORMAT_RSVD 0 // short audio descriptor format - reserved +#define NVT_CEA861_AUDIO_FORMAT_LINEAR_PCM 1 // short audio descriptor format - Linear PCM (uncompressed) +#define NVT_CEA861_AUDIO_FORMAT_AC3 2 // short audio descriptor format - AC3 +#define NVT_CEA861_AUDIO_FORMAT_MPEG1 3 // short audio descriptor format - MPEG1(layer 1&2) +#define NVT_CEA861_AUDIO_FORMAT_MP3 4 // short audio descriptor format - MP3(MPEG1 layer 3) +#define NVT_CEA861_AUDIO_FORMAT_MPEG2 5 // short audio descriptor format - MPEG2 (multichannel) +#define NVT_CEA861_AUDIO_FORMAT_AAC 6 // short audio descriptor format - AAC +#define NVT_CEA861_AUDIO_FORMAT_DTS 7 // short audio descriptor format - DTS +#define NVT_CEA861_AUDIO_FORMAT_ATRAC 8 // short audio descriptor format - ATRAC +#define NVT_CEA861_AUDIO_FORMAT_ONE_BIT 9 // short audio descriptor format - one bit audio +#define NVT_CEA861_AUDIO_FORMAT_DDP 10 // short audio descriptor format - dolby digital + +#define NVT_CEA861_AUDIO_FORMAT_DTS_HD 11 // short audio descriptor format - DTS_HD +#define NVT_CEA861_AUDIO_FORMAT_MAT 12 // short audio descriptor format - MAT(MLP) +#define NVT_CEA861_AUDIO_FORMAT_DST 13 // short audio descriptor format - DST +#define NVT_CEA861_AUDIO_FORMAT_WMA_PRO 14 // short audio descriptor format - WMA Pro +#define NVT_CEA861_AUDIO_FORMAT_RSVD15 15 // short audio descriptor format - reserved +// +#define NVT_CEA861_AUDIO_MAX_CHANNEL_MASK 7 // short audio descriptor format - Max Number of channels - 1 +#define NVT_CEA861_AUDIO_MAX_CHANNEL_SHIFT 0 // short audio descriptor format shift +// +// short audio descriptor - byte 2 +#define NVT_CEA861_AUDIO_SAMPLE_RATE_MASK 0x7F //the sample rate mask +#define NVT_CEA861_AUDIO_SAMPLE_RATE_SHIFT 0 //the sample rate shift +// +#define NVT_CEA861_AUDIO_SAMPLE_RATE_32KHZ 0x01 // short audio descriptor - sample rate : 32KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_44KHZ 0x02 // short audio descriptor - sample rate : 44KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_48KHZ 0x04 // short audio descriptor - sample rate : 48KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_88KHZ 0x08 // short audio descriptor - sample rate : 88KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_96KHZ 0x10 // short audio descriptor - sample rate : 96KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_176KHZ 0x20 // short audio descriptor - sample rate : 176KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_192KHZ 0x40 // short audio descriptor - sample rate : 192KHz +#define NVT_CEA861_AUDIO_SAMPLE_RATE_RSVD 0x80 // short audio descriptor - sample rate : reserved +// +// short audio descriptor - byte 3 +#define NVT_CEA861_AUDIO_SAMPLE_DEPTH_MASK 0x07 // the uncompressed audio resolution mask +#define NVT_CEA861_AUDIO_SAMPLE_DEPTH_SHIFT 0 // the uncompressed audio resolution shift +// +#define NVT_CEA861_AUDIO_SAMPLE_SIZE_16BIT 0x01 // uncompressed (Linear PCM) audio A/D resolution - 16bit +#define NVT_CEA861_AUDIO_SAMPLE_SIZE_20BIT 0x02 // uncompressed (Linear PCM) audio A/D resolution - 20bit +#define NVT_CEA861_AUDIO_SAMPLE_SIZE_24BIT 0x04 // uncompressed (Linear PCM) audio A/D resolution - 24bit + +//************************** +// speaker allocation data +//************************** +#define NVT_CEA861_SPEAKER_SD_SIZE sizeof(NVT_3BYTES) +// the max total short speaker descriptors possible; See CEA-861-E, section 7.5 on repeated types +#define NVT_CEA861_SPEAKER_MAX_DESCRIPTOR ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_SPEAKER_SD_SIZE) + \ + (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE) / NVT_CEA861_SPEAKER_SD_SIZE) +#define NVT_CEA861_SPEAKER_ALLOC_MASK 0x7F // the speaker allocation mask +#define NVT_CEA861_SPEAKER_ALLOC_SHIFT 0 // the speaker allocation mask shift +// +#define NVT_CEA861_SPEAKER_ALLOC_FL_FR 0x01 // speaker allocation : Front Left + Front Right +#define NVT_CEA861_SPEAKER_ALLOC_LFE 0x02 // speaker allocation : Low Frequency Effect +#define NVT_CEA861_SPEAKER_ALLOC_FC 0x04 // speaker allocation : Front Center +#define NVT_CEA861_SPEAKER_ALLOC_RL_RR 0x08 // speaker allocation : Rear Left + Rear Right +#define NVT_CEA861_SPEAKER_ALLOC_RC 0x10 // speaker allocation : Rear Center +#define NVT_CEA861_SPEAKER_ALLOC_FLC_FRC 0x20 // speaker allocation : Front Left Center + Front Right Center +#define NVT_CEA861_SPEAKER_ALLOC_RLC_RRC 0x40 // speaker allocation : Rear Left Center + Rear Right Center + +//*********************** +// vendor specific data +//*********************** +#define NVT_CEA861_VSDB_HEADER_SIZE 4 +#define NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH 28 // max allowed vendor specific data block payload (in byte) +#define NVT_CEA861_HDMI_IEEE_ID 0x0C03 +#define NVT_CEA861_HDMI_LLC_IEEE_ID NVT_CEA861_HDMI_IEEE_ID +#define NVT_CEA861_NVDA_IEEE_ID 0x44B +#define NVT_CEA861_HDMI_FORUM_IEEE_ID 0xC45DD8 +#define NVT_CEA861_MSFT_IEEE_ID 0xCA125C + +#define NVT_CEA861_VSDB_MAX_BLOCKS 4 // NOTE: The maximum number of VSDB blocks should be: + // (NVT_CEA861_MAX_PAYLOAD / (NVT_CEA861_VSDB_HEADER_SIZE + 1)) (assume at least 1 byte of payload) + // As of Sept 2013, there are 3 different VSDBs defined in the spec. Hence allocating space for all 24 + // is overkill. As a tradeoff, we define this limit as 4 for now. If required, this should be increased later. + +typedef struct VSDB_DATA +{ + NvU32 ieee_id; + NvU32 vendor_data_size; // size of data copied to vendor_data (excludes ieee_id from frame) + NvU8 vendor_data[NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH]; +} VSDB_DATA; + +//******************************* +// vendor specific video data +//******************************* +#define NVT_CEA861_DV_IEEE_ID 0x00D046 +#define NVT_CEA861_VSVDB_PAYLOAD_MAX_LENGTH 25 // max allowed vendor specific video data block payload (in byte) +#define NVT_CEA861_VSVDB_VERSION_MASK 0xE0 // vsdb version mask +#define NVT_CEA861_VSVDB_VERSION_MASK_SHIFT 5 // vsdb version shift mask + +typedef struct VSVDB_DATA +{ + NvU32 ieee_id; + NvU32 vendor_data_size; // size of data copied to vendor_data (excludes ieee_id from frame) + NvU8 vendor_data[NVT_CEA861_VSVDB_PAYLOAD_MAX_LENGTH]; +} VSVDB_DATA; + +#pragma pack(1) +typedef struct tagNVT_DV_STATIC_METADATA_TYPE0 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_2160p60hz : 1; + NvU8 supports_global_dimming : 1; + NvU8 reserved_1 : 2; + NvU8 VSVDB_version : 3; + + // second- fourth byte + NvU8 cc_red_y_3_0 : 4; + NvU8 cc_red_x_3_0 : 4; + NvU8 cc_red_x_11_4 : 8; + NvU8 cc_red_y_11_4 : 8; + + NvU8 cc_green_y_3_0 : 4; + NvU8 cc_green_x_3_0 : 4; + NvU8 cc_green_x_11_4 : 8; + NvU8 cc_green_y_11_4 : 8; + + NvU8 cc_blue_y_3_0 : 4; + NvU8 cc_blue_x_3_0 : 4; + NvU8 cc_blue_x_11_4 : 8; + NvU8 cc_blue_y_11_4 : 8; + + NvU8 cc_white_y_3_0 : 4; + NvU8 cc_white_x_3_0 : 4; + NvU8 cc_white_x_11_4 : 8; + NvU8 cc_white_y_11_4 : 8; + + NvU8 target_max_pq_3_0 : 4; + NvU8 target_min_pq_3_0 : 4; + NvU8 target_min_pq_11_4 : 8; + NvU8 target_max_pq_11_4 : 8; + + NvU8 dm_version_minor : 4; + NvU8 dm_version_major : 4; + + NvU8 reserved_2 : 8; + NvU8 reserved_3 : 8; + NvU8 reserved_4 : 8; + NvU8 reserved_5 : 8; +} NVT_DV_STATIC_METADATA_TYPE0; + +typedef struct tagNVT_DV_STATIC_METADATA_TYPE1 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_2160p60hz : 1; + NvU8 dm_version : 3; + NvU8 VSVDB_version : 3; + + // second byte + NvU8 supports_global_dimming : 1; + NvU8 target_max_luminance : 7; + + // third byte + NvU8 colorimetry : 1; + NvU8 target_min_luminance : 7; + + //fourth byte + NvU8 reserved : 8; + //fith to tenth byte + NvU8 cc_red_x : 8; + NvU8 cc_red_y : 8; + NvU8 cc_green_x : 8; + NvU8 cc_green_y : 8; + NvU8 cc_blue_x : 8; + NvU8 cc_blue_y : 8; +} NVT_DV_STATIC_METADATA_TYPE1; + +typedef struct tagNVT_DV_STATIC_METADATA_TYPE1_1 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_2160p60hz : 1; + NvU8 dm_version : 3; + NvU8 VSVDB_version : 3; + + // second byte + NvU8 supports_global_dimming : 1; + NvU8 target_max_luminance : 7; + + // third byte + NvU8 colorimetry : 1; + NvU8 target_min_luminance : 7; + + //fourth byte + NvU8 interface_supported_by_sink : 2; + NvU8 unique_By : 3; + NvU8 unique_Bx : 3; + + //fifth byte + NvU8 unique_Ry_bit_0 : 1; + NvU8 unique_Gx : 7; + + //sixth byte + NvU8 unique_Ry_bit_1 : 1; + NvU8 unique_Gy : 7; + + //seventh byte + NvU8 unique_Rx : 5; + NvU8 unique_Ry_bit_2_to_4 : 3; + +} NVT_DV_STATIC_METADATA_TYPE1_1; + +typedef struct tagNVT_DV_STATIC_METADATA_TYPE2 +{ + // first byte + NvU8 supports_YUV422_12bit : 1; + NvU8 supports_backlight_control : 1; + NvU8 dm_version : 3; + NvU8 VSVDB_version : 3; + + // second byte + NvU8 backlt_min_luma : 2; + NvU8 supports_global_dimming : 1; + NvU8 target_min_luminance : 5; + + // third byte + NvU8 interface_supported_by_sink : 2; + NvU8 reserved : 1; + NvU8 target_max_luminance : 5; + + //fourth byte + NvU8 supports_10b_12b_444_bit1 : 1; + NvU8 unique_Gx : 7; + + //fifth byte + NvU8 supports_10b_12b_444_bit0 : 1; + NvU8 unique_Gy : 7; + + //sixth byte + NvU8 unique_Bx : 3; + NvU8 unique_Rx : 5; + + //seventh byte + NvU8 unique_By : 3; + NvU8 unique_Ry : 5; + +} NVT_DV_STATIC_METADATA_TYPE2; +#pragma pack() + +//*************************** +// colorimetry data block +//*************************** +// +// Colorimetry capabilities - byte 3 +#define NVT_CEA861_COLORIMETRY_MASK 0xFF // the colorimetry cap mask +#define NVT_CEA861_COLORIMETRY_SHIFT 0 // the colorimetry cap shift + +#define NVT_CEA861_COLORIMETRY_NO_DATA 0x00 +#define NVT_CEA861_COLORIMETRY_xvYCC_601 0x01 // xvYCC601 capable +#define NVT_CEA861_COLORIMETRY_xvYCC_709 0x02 // xvYCC709 capable +#define NVT_CEA861_COLORIMETRY_sYCC_601 0x04 // sYCC601 capable +#define NVT_CEA861_COLORIMETRY_AdobeYCC_601 0x08 // AdobeYCC601 capable +#define NVT_CEA861_COLORIMETRY_AdobeRGB 0x10 // AdobeRGB capable +#define NVT_CEA861_COLORIMETRY_BT2020cYCC 0x20 // BT2020 cYCbCr (constant luminance) capable +#define NVT_CEA861_COLORIMETRY_BT2020YCC 0x40 // BT2020 Y'CbCr capable +#define NVT_CEA861_COLORIMETRY_BT2020RGB 0x80 // BT2020 RGB capable +// Colorimetry capabilities - byte 4 +#define NVT_CEA861_COLORIMETRY_DCI_P3 0x80 // DCI-P3 + +// +// gamut-related metadata capabilities - byte 4 +#define NVT_CEA861_GAMUT_METADATA_MASK 0x8F // the colorimetry or gamut-related metadata block mask +#define NVT_CEA861_GAMUT_METADATA_SHIFT 0 // the metadata block shift +// +#define NVT_CEA861_GAMUT_METADATA_MD0 0x01 // MD0 +#define NVT_CEA861_GAMUT_METADATA_MD1 0x02 // MD1 +#define NVT_CEA861_GAMUT_METADATA_MD2 0x04 // MD2 +#define NVT_CEA861_GAMUT_METADATA_MD3 0x08 // MD2 + +//*************************** +// HDR static metadata data block +//*************************** +// +typedef struct tagNVT_5BYTES +{ + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; +} NVT_5BYTES; + +// Supported Electro-Optical Transfer Function - byte 3 +#define NVT_CEA861_EOTF_MASK 0x3F // the EOTF cap mask +#define NVT_CEA861_EOTF_SHIFT 0 // the EOTF cap shift +// +#define NVT_CEA861_EOTF_GAMMA_SDR 0x01 // ET_0 Traditional gamma - SDR Luminance Range +#define NVT_CEA861_EOTF_GAMMA_HDR 0x02 // ET_1 Traditional gamma - HDR Luminance Range +#define NVT_CEA861_EOTF_SMPTE_ST2084 0x04 // ET_2 SMPTE ST2084 EOTF (a.k.a PQ - Perceptual Quantizer EOTF) +#define NVT_CEA861_EOTF_FUTURE 0x08 // ET_3 Future EOTF + +// +// Supported Static Metadata Descriptor - byte 4 +#define NVT_CEA861_STATIC_METADATA_DESCRIPTOR_MASK 0x01 // the supported static metadata descriptor block mask +#define NVT_CEA861_STATIC_METADATA_SHIFT 0 // the metadata block shift +// +#define NVT_CEA861_STATIC_METADATA_SM0 0x00 // Static Metadata Type 1 + +// +// Desired Content Max Luminance data - byte 5 +#define NVT_CEA861_MAX_CLL_MASK 0xFF // the desired content max luminance level (MaxCLL) data block mask +#define NVT_CEA861_MAX_CLL_SHIFT 0 // the metadata block shift + +// Desired Content Max Frame-Average Luminance data - byte 6 +#define NVT_CEA861_MAX_FALL_MASK 0xFF // the desired content max frame-average luminance (MaxFALL) data block mask +#define NVT_CEA861_MAX_FALL_SHIFT 0 // the metadata block shift + +// Desired Content Min Luminance data - byte 7 +#define NVT_CEA861_MIN_CLL_MASK 0xFF // the desired content min luminance level (MinCLL) data block mask +#define NVT_CEA861_MIN_CLL_SHIFT 0 // the metadata block shift + +//*************************** +// video capability data block +//*************************** +// +#define NVT_CEA861_VIDEO_CAPABILITY_MASK 0x7F // the video capability data block mask +#define NVT_CEA861_VIDEO_CAPABILITY_SHIFT 0 // the video capability data block shift +// +#define NVT_CEA861_VIDEO_CAPABILITY_S_CE0 0x01 // S_CE0 +#define NVT_CEA861_VIDEO_CAPABILITY_S_CE1 0x02 // S_CE1 +#define NVT_CEA861_VIDEO_CAPABILITY_S_IT0 0x04 // S_IT0 +#define NVT_CEA861_VIDEO_CAPABILITY_S_IT1 0x08 // S_IT1 +#define NVT_CEA861_VIDEO_CAPABILITY_S_PT0 0x10 // S_PT0 +#define NVT_CEA861_VIDEO_CAPABILITY_S_PT1 0x20 // S_PT1 +#define NVT_CEA861_VIDEO_CAPABILITY_S_QS 0x40 // S_QS + +//************************** +// EDID 861 Extension Info +//************************** +typedef struct tagNVT_3BYTES +{ + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; +} NVT_3BYTES; + +//*********************** +// VCDB specific data +//*********************** +#define NVT_CEA861_VCDB_QS_MASK 0x40 // quantization range selectable mask +#define NVT_CEA861_VCDB_QS_SHIFT 6 // quantization range selectable shift + +#define NVT_CEA861_VCDB_S_PT_MASK 0x30 // PT over/underscan behavior mask +#define NVT_CEA861_VCDB_S_PT_SHIFT 4 // PT over/underscan behavior shift +#define NVT_CEA861_VCDB_S_PT_NO_DATA 0x00 +#define NVT_CEA861_VCDB_S_PT_ALWAYS_OVERSCAN 0x01 +#define NVT_CEA861_VCDB_S_PT_ALWAYS_UNDERSCAN 0x02 +#define NVT_CEA861_VCDB_S_PT_OVER_OR_UNDERSCAN 0x03 + +#define NVT_CEA861_VCDB_S_IT_MASK 0x0C // IT over/underscan behavior mask +#define NVT_CEA861_VCDB_S_IT_SHIFT 2 // IT over/underscan behavior shift +#define NVT_CEA861_VCDB_S_IT_NOT_SUPPORTED 0x00 +#define NVT_CEA861_VCDB_S_IT_ALWAYS_OVERSCAN 0x01 +#define NVT_CEA861_VCDB_S_IT_ALWAYS_UNDERSCAN 0x02 +#define NVT_CEA861_VCDB_S_IT_OVER_OR_UNDERSCAN 0x03 + +#define NVT_CEA861_VCDB_S_CE_MASK 0x03 // CE over/underscan behavior mask +#define NVT_CEA861_VCDB_S_CE_SHIFT 0 // CE over/underscan behavior shift +#define NVT_CEA861_VCDB_S_CE_NOT_SUPPORTED 0x00 +#define NVT_CEA861_VCDB_S_CE_ALWAYS_OVERSCAN 0x01 +#define NVT_CEA861_VCDB_S_CE_ALWAYS_UNDERSCAN 0x02 +#define NVT_CEA861_VCDB_S_CE_OVER_OR_UNDERSCAN 0x03 + +// +typedef struct tagNVT_2BYTES +{ + NvU8 byte1; + NvU8 byte2; +} NVT_2BYTES; +// +// See CEA-861E, Table 42, 43 Extended Tags; indicates that the corresponding CEA extended data block value is valid, e.g. if colorimetry is set, then NVT_EDID_CEA861_INFO::colorimetry is valid +typedef struct tagNVT_VALID_EXTENDED_BLOCKS +{ + NvU32 VCDB : 1; + NvU32 VSVD : 1; + NvU32 colorimetry : 1; + NvU32 H14B_VSDB : 1; + NvU32 H20_HF_VSDB : 1; + NvU32 y420cmdb : 1; + NvU32 hdr_static_metadata : 1; + NvU32 dv_static_metadata : 1; + NvU32 SCDB : 1; + NvU32 HF_EEODB : 1; +} NVT_VALID_EXTENDED_BLOCKS; + + +//************************* +// extended data blocks +//************************* +#define NVT_CEA861_SD_EXT_HEADER_SIZE 1 + +#define NVT_CEA861_Y420VDB_SD_SIZE 1 + +// Max number of YUV420 VDBs for each VDB block is 30 per CTA-861-G spec sec. 7.5.10 +// Accomodate 2 blocks +#define NVT_CEA861_Y420VDB_MAX_DESCRIPTOR 60 + +#define NVT_CEA861_Y420CMDB_SD_SIZE 1 + +// Max number of YUV420 SVDs for each VDB block is 30 per CTA-861-G spec sec. 7.5.11 +// Accomodate 2 blocks +#define NVT_CEA861_Y420CMDB_MAX_DESCRIPTOR 60 +#define NVT_CEA861_VFPDB_SD_SIZE 1 +#define NVT_CEA861_VFPDB_MAX_DESCRIPTOR 16 // NOTE: Limiting to 16 to not allocate too much space. The maximum descriptor should be: + // ((NVT_CEA861_MAX_PAYLOAD / NVT_CEA861_SD_TOTAL_SIZE) * (NVT_CEA861_SD_PAYLOAD_SIZE / NVT_CEA861_VFPDB_SD_SIZE) + + // (NVT_CEA861_MAX_PAYLOAD % NVT_CEA861_SD_TOTAL_SIZE - NVT_CEA861_SD_HEADER_SIZE - NVT_CEA861_SD_EXT_HEADER_SIZE) / NVT_CEA861_VFPDB_SD_SIZE) + +typedef enum tagNVT_CTA861_ORIGIN +{ + FROM_CTA861_EXTENSION, + FROM_DISPLAYID_13_DATA_BLOCK, + FROM_DISPLAYID_20_DATA_BLOCK, +} NVT_CTA861_ORIGIN; + +// +typedef struct tagEDID_CEA861_INFO +{ + NvU8 revision; + NvU8 basic_caps; + + // short video descriptor + NvU8 total_svd; + NvU8 video[NVT_CEA861_VIDEO_MAX_DESCRIPTOR]; + + // short audio descriptor + NvU8 total_sad; + NVT_3BYTES audio[NVT_CEA861_AUDIO_MAX_DESCRIPTOR]; + + // speaker allocation data + NvU8 total_ssd; + NVT_3BYTES speaker[NVT_CEA861_SPEAKER_MAX_DESCRIPTOR]; + + // vendor specific data + NvU8 total_vsdb; + VSDB_DATA vsdb[NVT_CEA861_VSDB_MAX_BLOCKS]; + + // vendor specific video data + VSVDB_DATA vsvdb; + + // indicates which of the extended data blocks below contain valid data excluding extended blocks with total count + NVT_VALID_EXTENDED_BLOCKS valid; + // extended data blocks + NVT_2BYTES colorimetry; // Colorimetry Data Block + NvU8 video_capability; // Video Capability Block + + // HDR Static Metadata Data Block. See CEA-861.3 HDR Static Metadata Extensions, Section 4.2 + NVT_5BYTES hdr_static_metadata; + + // VFPDB extended block. See CEA861-H, Section 7.5.12 Video Format Preference Data Block + NvU8 total_vfpdb; + NvU8 svr_vfpdb[NVT_CEA861_VFPDB_MAX_DESCRIPTOR]; // svr of preferred video formats + + // Y420VDB extended block. See CEA861-F, Section 7.5.10 YCBCR 4:2:0 Video Data Block + NvU8 total_y420vdb; + NvU8 svd_y420vdb[NVT_CEA861_Y420VDB_MAX_DESCRIPTOR]; // svd of video formats that only support YCbCr 4:2:0 + + // Y420CMDB extended block. See CEA861-F, Section 7.5.11 YCBCR 4:2:0 Capability Map Data Block + NvU8 total_y420cmdb; + NvU8 map_y420cmdb[NVT_CEA861_Y420CMDB_MAX_DESCRIPTOR]; // bit map to svd in video[] that also supports YCbCr 4:2:0 + + // NVT_EDID_CEA861_INFO::vsvdb.SCDB = 1 in case hfscdb is exposed by sink. + NvU32 hfscdbSize; + NvU8 hfscdb[NVT_CTA861_EXT_SCDB_PAYLOAD_MAX_LENGTH]; + + NvU8 hfeeodb; // HDMI Forum Edid Extension Override Data Block. +} NVT_EDID_CEA861_INFO; + + +//******************* +// Parsed DisplayID Information +//******************* +#define NVT_DISPLAYID_SECTION_MAX_SIZE 251 +#define NVT_DISPLAYID_SECTION_HEADER_LEN 5 +#define NVT_DISPLAYID_DATABLOCK_MAX_PAYLOAD_LEN 248 +#define NVT_DISPLAYID_DATABLOCK_HEADER_LEN 3 + +#define NVT_DISPLAYID_PRODUCT_STRING_MAX_LEN 233 +#define NVT_DISPLAYID_COLOR_MAX_WHITEPOINTS 5 +#define NVT_DISPLAYID_COLOR_MAX_PRIMARIES 3 +#define NVT_DISPLAYID_RANGE_LIMITS_MAX_COUNT 2 +#define NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF 7 + +typedef enum tagNVT_SINGLE_TILE_BEHAVIOR +{ + NVT_SINGLE_TILE_BEHAVIOR_OTHER = 0, + NVT_SINGLE_TILE_BEHAVIOR_SOURCE_DRIVEN, + NVT_SINGLE_TILE_BEHAVIOR_SCALE, + NVT_SINGLE_TILE_BEHAVIOR_CLONE +} NVT_SINGLE_TILE_BEHAVIOR; + +typedef enum tagNVT_MULTI_TILE_BEHAVIOR +{ + NVT_MULTI_TILE_BEHAVIOR_OTHER = 0, + NVT_MULTI_TILE_BEHAVIOR_SOURCE_DRIVEN +} NVT_MULTI_TILE_BEHAVIOR; + +typedef struct _tagNVT_TILEDDISPLAY_TOPOLOGY_ID +{ + NvU32 vendor_id; + NvU16 product_id; + NvU32 serial_number; +} NVT_TILEDDISPLAY_TOPOLOGY_ID; + +typedef struct _tagNVT_COLOR_POINT +{ + NvU16 x; + NvU16 y; +} NVT_COLOR_POINT; + +typedef struct _tagNVT_DISPLAYID_RANGE_LIMITS +{ + NvU32 revision; + NvU32 pclk_min; + NvU32 pclk_max; + NvU8 hfreq_min; + NvU8 hfreq_max; + NvU16 hblank_min; + NvU8 vfreq_min; + NvU16 vfreq_max; + NvU16 vblank_min; + NvU8 interlaced : 1; + NvU8 cvt : 1; + NvU8 cvt_reduced : 1; + NvU8 dfd : 1; + NvU8 seamless_dynamic_video_timing_change : 1; +} NVT_DISPLAYID_RANGE_LIMITS; + +#define NVT_DID_MAX_EXT_PAYLOAD 122 + +typedef struct _tagNVT_DISPLAYID_INFO +{ + // Top Level Header Information + NvU8 version; + NvU8 product_type; + + // Product Identification (0 or 1 Blocks Allowed) + NvU32 vendor_id; + NvU16 product_id; + NvU32 serial_number; + NvU8 week; + NvU8 year; + NvU8 product_string[NVT_DISPLAYID_PRODUCT_STRING_MAX_LEN + 1]; + + // Display Parameters + NvU16 horiz_size; + NvU16 vert_size; + NvU16 horiz_pixels; + NvU16 vert_pixels; + NvU8 support_audio : 1; + NvU8 separate_audio : 1; + NvU8 audio_override : 1; + NvU8 power_management : 1; + NvU8 fixed_timing : 1; + NvU8 fixed_pixel_format : 1; + NvU8 rsvd4 : 1; + NvU8 deinterlace : 1; + NvU16 gamma; + NvU8 aspect_ratio; + NvU8 depth_overall : 4; + NvU8 depth_native : 4; + + // Color Characteristics + NvU8 total_white_points; + NvU8 total_primaries : 3; + NvU8 temporal : 1; + NVT_COLOR_POINT white_points[NVT_DISPLAYID_COLOR_MAX_WHITEPOINTS]; + NVT_COLOR_POINT primaries[NVT_DISPLAYID_COLOR_MAX_PRIMARIES]; + + // Range Limits + NvU8 rl_num; + NVT_DISPLAYID_RANGE_LIMITS range_limits[NVT_DISPLAYID_RANGE_LIMITS_MAX_COUNT]; + + // Display Data + NvU8 tech_type; + NvU8 device_op_mode : 4; + NvU8 support_backlight : 1; + NvU8 support_intensity : 1; + NvU8 rsvd1 : 2; + NvU16 horiz_pixel_count; + NvU16 vert_pixel_count; + NvU8 orientation : 2; + NvU8 rotation : 2; + NvU8 zero_pixel : 2; + NvU8 scan_direction : 2; + NvU8 subpixel_info; + NvU8 horiz_pitch; + NvU8 vert_pitch; + NvU8 rsvd2 : 4; + NvU8 color_bit_depth : 4; + NvU8 white_to_black : 1; + NvU8 response_time : 7; + + // Power Settings + NvU8 t1_min : 4; + NvU8 t1_max : 4; + NvU8 t2_max; + NvU8 t3_max; + NvU8 t4_min; + NvU8 t5_min; + NvU8 t6_min; + + union + { + struct + { + NvU8 rsvd : 3; + NvU8 color_map : 1; + NvU8 support_2_8v : 1; + NvU8 support_12v : 1; + NvU8 support_5v : 1; + NvU8 support_3_3v : 1; + NvU8 rsvd2 : 5; + NvU8 DE_mode : 1; + NvU8 polarity : 1; + NvU8 data_strobe : 1; + } lvds; + + struct + { + NvU8 rsvd : 5; + NvU8 DE_mode : 1; + NvU8 polarity : 1; + NvU8 data_strobe : 1; + } proprietary; + } u2; + + // Stereo Interface + NvU8 stereo_code; + union + { + struct + { + NvU8 stereo_polarity; + } field_sequential; + + struct + { + NvU8 view_identity; + } side_by_side; + + struct + { + NvU8 interleave_pattern[8]; + } pixel_interleaved; + + struct + { + NvU8 rsvd : 5; + NvU8 mirroring : 2; + NvU8 polarity : 1; + } left_right_separate; + + struct + { + NvU8 num_views; + NvU8 code; + } multiview; + } u3; + + NvU32 tiled_display_revision; + struct + { + NvBool bSingleEnclosure; + NvBool bHasBezelInfo; + NVT_SINGLE_TILE_BEHAVIOR single_tile_behavior; + NVT_MULTI_TILE_BEHAVIOR multi_tile_behavior; + } tile_capability; + + struct + { + NvU32 row; + NvU32 col; + } tile_topology; + + struct + { + NvU32 x; + NvU32 y; + } tile_location; + + struct + { + NvU32 width; + NvU32 height; + } native_resolution; + + struct + { + NvU32 pixel_density; + NvU32 top; + NvU32 bottom; + NvU32 right; + NvU32 left; + } bezel_info; + + NVT_TILEDDISPLAY_TOPOLOGY_ID tile_topology_id; + NvU8 cea_data_block_present; + + NvU8 supported_displayId2_0; + union + { + // Display Interface + struct + { + NvU8 interface_type : 4; + union + { + NvU8 analog_subtype : 4; + NvU8 digital_num_links : 4; + } u1; + + NvU8 interface_version; + + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } rgb_depth; + + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } ycbcr444_depth; + + struct + { + NvU8 rsvd : 3; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + } ycbcr422_depth; + + NvU8 content_protection; + NvU8 content_protection_version; + NvU8 spread_spectrum : 2; + NvU8 rsvd3 : 2; + NvU8 spread_percent : 4; + + } display_interface; + + //display interface features for DID2.0 + struct + { + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } rgb_depth; + + struct + { + NvU8 rsvd : 2; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + NvU8 support_6b : 1; + } ycbcr444_depth; + + struct + { + NvU8 rsvd : 3; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + } ycbcr422_depth; + + struct + { + NvU8 rsvd : 3; + NvU8 support_16b : 1; + NvU8 support_14b : 1; + NvU8 support_12b : 1; + NvU8 support_10b : 1; + NvU8 support_8b : 1; + } ycbcr420_depth; + + // based on the DID2.0 spec. minimum pixel rate at which the Sink device shall support YCbCr 4:2:0 encoding + NvU8 minimum_pixel_rate_ycbcr420; + + struct + { + NvU8 support_32khz : 1; + NvU8 support_44_1khz : 1; + NvU8 support_48khz : 1; + NvU8 rsvd : 5; + } audio_capability; + + struct + { + NvU8 rsvd : 1; + NvU8 support_colorspace_bt2020_eotf_smpte_st2084: 1; + NvU8 support_colorspace_bt2020_eotf_bt2020 : 1; + NvU8 support_colorspace_dci_p3_eotf_dci_p3 : 1; + NvU8 support_colorspace_adobe_rgb_eotf_adobe_rgb: 1; + NvU8 support_colorspace_bt709_eotf_bt1886 : 1; + NvU8 support_colorspace_bt601_eotf_bt601 : 1; + NvU8 support_colorspace_srgb_eotf_srgb : 1; + } colorspace_eotf_combination_1; + + struct + { + NvU8 rsvd : 8; + } colorspace_eotf_combination_2; + + struct + { + NvU8 rsvd : 5; + NvU8 total : 3; + } total_additional_colorspace_eotf; + + struct + { + NvU8 support_colorspace : 4; + NvU8 support_eotf : 4; + } additional_colorspace_eotf[NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF]; + } display_interface_features; + } u4; + +} NVT_DISPLAYID_INFO; + +//*********************************** +// EDID 18-byte display descriptors +//*********************************** +// +// +//*** (Tag = 0xFF) ***/ +// Display Product Serial Number +#define NVT_EDID_LDD_PAYLOAD_SIZE 13 +typedef struct tagNVT_EDID_DD_SERIAL_NUMBER +{ + NvU8 str[NVT_EDID_LDD_PAYLOAD_SIZE]; + NvU8 padding[16 - NVT_EDID_LDD_PAYLOAD_SIZE]; +} NVT_EDID_DD_SERIAL_NUMBER; +// +// +// +//*** (Tag = 0xFE) ***/ +// Alphanumeric Data String (ASCII) +typedef struct tagNVT_EDID_DD_DATA_STRING +{ + NvU8 str[NVT_EDID_LDD_PAYLOAD_SIZE]; + NvU8 padding[16 - NVT_EDID_LDD_PAYLOAD_SIZE]; +} NVT_EDID_DD_DATA_STRING; +// +// +// +//*** (Tag = 0xFD) ***/ +// Display Range Limit +// +typedef struct tagNVT_EDID_DD_RANGE_GTF2 +{ + NvU8 C; + NvU8 K; + NvU8 J; + NvU16 M; +} NVT_EDID_DD_RANGE_GTF2; + +typedef struct tagNVT_EDID_DD_RANGE_CVT +{ + NvU16 max_active_pixels_per_line; + + NvU8 pixel_clock_adjustment : 2; // this is in 0.25Hz, subtract from max_pixel_clock + // the whole number part (if existing) gets subtracted + // from max_pclk_MHz right away + NvU8 aspect_supported : 5; + + NvU8 aspect_preferred : 3; + NvU8 blanking_support : 2; + NvU8 reserved1 : 3; + + NvU8 scaling_support : 4; + NvU8 reserved2 : 4; + + NvU8 preferred_refresh_rate; +} NVT_EDID_DD_RANGE_CVT; + +typedef struct tagNVT_EDID_DD_RANGE_LIMIT +{ + NvU16 min_v_rate; + NvU16 max_v_rate; + NvU16 min_h_rate; + NvU16 max_h_rate; + NvU16 max_pclk_MHz; + NvU8 timing_support; // indicates 2nd GTF / CVT support + union + { + // if timing_support = 0x02 + NVT_EDID_DD_RANGE_GTF2 gtf2; + + // if timing_support = 0x04 + NVT_EDID_DD_RANGE_CVT cvt; + }u; +} NVT_EDID_DD_RANGE_LIMIT; + +typedef struct tagNVT_EDID_RANGE_LIMIT +{ + NvU32 min_v_rate_hzx1k; + NvU32 max_v_rate_hzx1k; + NvU32 min_h_rate_hz; + NvU32 max_h_rate_hz; + NvU32 max_pclk_10khz; +} NVT_EDID_RANGE_LIMIT; + +// timing support +#define NVT_EDID_RANGE_SUPPORT_GTF2 0x02 +#define NVT_EDID_RANGE_SUPPORT_CVT 0x04 + +// supported aspect ratios +#define NVT_EDID_CVT_ASPECT_SUPPORT_MAX 5 + +#define NVT_EDID_CVT_ASPECT_SUPPORT_4X3 0x10 +#define NVT_EDID_CVT_ASPECT_SUPPORT_16X9 0x08 +#define NVT_EDID_CVT_ASPECT_SUPPORT_16X10 0x04 +#define NVT_EDID_CVT_ASPECT_SUPPORT_5X4 0x02 +#define NVT_EDID_CVT_ASPECT_SUPPORT_15X9 0x01 + +// preferred aspect ratios +#define NVT_EDID_CVT_ASPECT_PREFER_4X3 0x00 +#define NVT_EDID_CVT_ASPECT_PREFER_16X9 0x01 +#define NVT_EDID_CVT_ASPECT_PREFER_16X10 0x02 +#define NVT_EDID_CVT_ASPECT_PREFER_5X4 0x03 +#define NVT_EDID_CVT_ASPECT_PREFER_15X9 0x04 + +// cvt blanking support +#define NVT_EDID_CVT_BLANKING_STANDARD 0x01 +#define NVT_EDID_CVT_BLANKING_REDUCED 0x02 + +// scaling support +#define NVT_EDID_CVT_SCALING_HOR_SHRINK 0x08 +#define NVT_EDID_CVT_SCALING_HOR_STRETCH 0x04 +#define NVT_EDID_CVT_SCALING_VER_SHRINK 0x02 +#define NVT_EDID_CVT_SCALING_VER_STRETCH 0x01 + +// +// +// +//*** (Tag = 0xFC) ***/ +// Display Product Name +typedef struct tagNVT_EDID_DD_PRODUCT_NAME +{ + NvU8 str[NVT_EDID_LDD_PAYLOAD_SIZE]; + NvU8 padding[16 - NVT_EDID_LDD_PAYLOAD_SIZE]; +} NVT_EDID_DD_PRODUCT_NAME; +// +// +// +//*** (Tag = 0xFB) ***/ +// the 18-byte display descriptors +// Display Color Point Data +typedef struct tagNVT_EDID_DD_COLOR_POINT +{ + NvU8 wp1_index; + NvU16 wp1_x; + NvU16 wp1_y; + NvU16 wp1_gamma; + NvU8 wp2_index; + NvU16 wp2_x; + NvU16 wp2_y; + NvU16 wp2_gamma; +} NVT_EDID_DD_COLOR_POINT; +// +// +// +//*** (Tag = 0xFA) ***/ +// Standard Timing Identifications +#define NVT_EDID_DD_STI_NUM 6 + +typedef struct tagNVT_EDID_DD_STD_TIMING +{ + NvU16 descriptor[NVT_EDID_DD_STI_NUM]; +} NVT_EDID_DD_STD_TIMING; +// +// +// +//*** (Tag = 0xF9) ***/ +// Display Color Management Data (DCM) +typedef struct tagNVT_EDID_DD_COLOR_MANAGEMENT_DATA +{ + NvU16 red_a3; + NvU16 red_a2; + NvU16 green_a3; + NvU16 green_a2; + NvU16 blue_a3; + NvU16 blue_a2; +} NVT_EDID_DD_COLOR_MANAGEMENT_DATA; +// +// +// +//*** (Tag = 0xF8) ***/ +// CVT 3 Byte Timing Code +#define NVT_EDID_DD_MAX_CVT3_PER_DESCRITPOR 4 + +typedef struct tagEDID_DD_CVT_3BYTE_BLOCK +{ + NvU16 addressable_lines : 14; + NvU8 aspect_ratio : 2; + NvU8 reserved0 : 1; + NvU8 preferred_vert_rates : 2; + NvU8 supported_vert_rates : 5; + +} NVT_EDID_DD_CVT_3BYTE_BLOCK; + +typedef struct tagNVT_EDID_DD_CVT_3BYTE +{ + NVT_EDID_DD_CVT_3BYTE_BLOCK block[NVT_EDID_DD_MAX_CVT3_PER_DESCRITPOR]; +} NVT_EDID_DD_CVT_3BYTE; + +#define NVT_EDID_CVT3_ASPECT_4X3 0x00 +#define NVT_EDID_CVT3_ASPECT_16X9 0x01 +#define NVT_EDID_CVT3_ASPECT_16X10 0x02 +#define NVT_EDID_CVT3_ASPECT_15X9 0x03 + +#define NVT_EDID_CVT3_PREFFERED_RATE_50HZ 0x00 +#define NVT_EDID_CVT3_PREFFERED_RATE_60HZ 0x01 +#define NVT_EDID_CVT3_PREFFERED_RATE_75HZ 0x02 +#define NVT_EDID_CVT3_PREFFERED_RATE_85HZ 0x03 + +#define NVT_EDID_CVT3_SUPPORTED_RATE_50HZ 0x10 +#define NVT_EDID_CVT3_SUPPORTED_RATE_60HZ 0x08 +#define NVT_EDID_CVT3_SUPPORTED_RATE_75HZ 0x04 +#define NVT_EDID_CVT3_SUPPORTED_RATE_85HZ 0x02 +#define NVT_EDID_CVT3_SUPPORTED_RATE_60HZ_REDUCED_BLANKING 0x01 +// +// +// +//*** (Tag = 0xF7) ***/ +// Established Timings III +// +#define NVT_EDID_DD_EST_TIMING3_NUM 6 + +typedef struct tagNVT_EDID_DD_EST_TIMING3 +{ + NvU8 revision; + NvU8 data[NVT_EDID_DD_EST_TIMING3_NUM]; +} NVT_EDID_DD_EST_TIMING3; +// +// +// +//*** (Tag = 0x10) ***/ +// Dummy Descriptor Definition +typedef struct tagNVT_EDID_DD_DUMMY_DESCRIPTOR +{ + NvU8 data[13]; +} NVT_EDID_DD_DUMMY_DESCRIPTOR; +// +// +// +//*** (Tag = 0x0F) ***/ +// Manufacturer Special Data +typedef struct tagNVT_EDID_DD_MANUF_DATA +{ + NvU8 data[13]; +} NVT_EDID_DD_MANUF_DATA; +// +// +// +// the translated generic 18-byte long descriptor +typedef struct tagNVT_EDID_18BYTE_DESCRIPTOR +{ + NvU8 tag; + union + { + NVT_EDID_DD_SERIAL_NUMBER serial_number; + NVT_EDID_DD_DATA_STRING data_str; + NVT_EDID_DD_RANGE_LIMIT range_limit; + NVT_EDID_DD_PRODUCT_NAME product_name; + NVT_EDID_DD_COLOR_POINT color_point; + NVT_EDID_DD_STD_TIMING std_timing; + NVT_EDID_DD_COLOR_MANAGEMENT_DATA color_man; + NVT_EDID_DD_CVT_3BYTE cvt; + NVT_EDID_DD_EST_TIMING3 est3; + NVT_EDID_DD_DUMMY_DESCRIPTOR dummy; + NVT_EDID_DD_MANUF_DATA manuf_data; + } u; +} NVT_EDID_18BYTE_DESCRIPTOR; +// +// +// Display Descriptor Tags +#define NVT_EDID_DISPLAY_DESCRIPTOR_DPSN 0xFF // display product serial number +#define NVT_EDID_DISPLAY_DESCRIPTOR_ADS 0xFE // alphanumeric data string (ASCII) +#define NVT_EDID_DISPLAY_DESCRIPTOR_DRL 0xFD // display range limit +#define NVT_EDID_DISPLAY_DESCRITPOR_DPN 0xFC // display product name +#define NVT_EDID_DISPLAY_DESCRIPTOR_CPD 0xFB // color point data +#define NVT_EDID_DISPLAY_DESCRIPTOR_STI 0xFA // standard timing identification +#define NVT_EDID_DISPLAY_DESCRIPTOR_DCM 0xF9 // display color management +#define NVT_EDID_DISPLAY_DESCRIPTOR_CVT 0xF8 // CVT 3-byte timing code +#define NVT_EDID_DISPLAY_DESCRIPTOR_ESTIII 0xF7 // establishied timing III +#define NVT_EDID_DISPLAY_DESCRIPTOR_DUMMY 0x10 // dummy descriptor + +//******************* +// Raw EDID offsets and info +//******************* +// +// Byte 14, video input definition +// +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_MASK 0x0F // dvi/hdmi/dp +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_MASK 0x70 // bpc support +#define NVT_EDID_VIDEO_INPUT_DEFINITION_DIGITAL_MASK 0x80 // digital/analog +// +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_SHIFT 0 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_SHIFT 4 +#define NVT_EDID_VIDEO_INPUT_DEFINITION_DIGITAL_SHIFT 7 +// +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_UNDEFINED 0 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_DVI_SUPPORTED 1 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_A_SUPPORTED 2 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_HDMI_B_SUPPORTED 3 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_MDDI_SUPPORTED 4 +#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_DISPLAYPORT_SUPPORTED 5 +//#define NVT_EDID_DIGITAL_VIDEO_INTERFACE_STANDARD_RESERVED 6 - 15 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_UNDEFINED 0 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_6BPC 1 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_8BPC 2 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_10BPC 3 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_12BPC 4 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_14BPC 5 +#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_16BPC 6 +//#define NVT_EDID_VIDEO_COLOR_BIT_DEPTH_RESERVED 7 +#define NVT_EDID_VIDEO_INPUT_DEFINITION_DIGITAL 0x01 +// +// Byte 18, feature support +// +#define NVT_EDID_OTHER_FEATURES_MASK 0x07 // sRGB space, preferred timing, continuous freq. +#define NVT_EDID_DISPLAY_COLOR_TYPE_MASK 0x18 // for analog, see byte 14, bit 7 +#define NVT_EDID_DISPLAY_COLOR_ENCODING_MASK 0x18 // for digital +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_MASK 0xE0 // standby/suspend/active off +// +#define NVT_EDID_OTHER_FEATURES_SHIFT 0 +#define NVT_EDID_DISPLAY_COLOR_TYPE_SHIFT 3 +#define NVT_EDID_DISPLAY_COLOR_ENCODING_SHIFT 3 +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SHIFT 5 +// +#define NVT_EDID_OTHER_FEATURES_USES_CONTINUOUS_FREQ (1 << 0) +#define NVT_EDID_OTHER_FEATURES_PTM_INCLUDE_NATIVE (1 << 1) +#define NVT_EDID_OTHER_FEATURES_SRGB_DEFAULT_COLORSPACE (1 << 2) +// +#define NVT_EDID_DISPLAY_COLOR_TYPE_MONOCHROME 0 +#define NVT_EDID_DISPLAY_COLOR_TYPE_RGB 1 +#define NVT_EDID_DISPLAY_COLOR_TYPE_NON_RGB 2 +#define NVT_EDID_DISPLAY_COLOR_TYPE_UNDEFINED 3 +// +#define NVT_EDID_DISPLAY_COLOR_ENCODING_YCBCR_444 (1 << 0) // RGB is always supported +#define NVT_EDID_DISPLAY_COLOR_ENCODING_YCBCR_422 (1 << 1) // RGB is always supported +// +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SUPPORTS_ACTIVE_OFF (1 << 0) +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SUPPORTS_SUSPENDED_MODE (1 << 1) +#define NVT_EDID_DISPLAY_POWER_MANAGEMENT_SUPPORTS_STANDBY_MODE (1 << 2) +// +// edid offsets +// +#define NVT_EDID_VIDEO_INPUT_DEFINITION 0x14 +#define NVT_EDID_FEATURE_SUPPORT 0x18 + + +//******************* +// Parsed EDID info +//******************* +// +#define NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR 4 +#define NVT_EDID_MAX_STANDARD_TIMINGS 8 +#define NVT_EDID_MAX_TOTAL_TIMING NVT_MAX_TOTAL_TIMING +#define NVT_EDID_VER_1_1 0x101 +#define NVT_EDID_VER_1_2 0x102 +#define NVT_EDID_VER_1_3 0x103 +#define NVT_EDID_VER_1_4 0x104 +// +// byte 0x14, Digital +// bits 0-3 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_NOT_DEFINED 0x0 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_DVI 0x1 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_HDMI_A 0x2 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_HDMI_B 0x3 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_MDDI 0x4 +#define NVT_EDID_VIDEOSIGNAL_INTERFACE_DP 0x5 +// bits 4-6; these are translated values. See NvTiming_ParseEDIDInfo() +#define NVT_EDID_VIDEOSIGNAL_BPC_NOT_DEFINED 0 +#define NVT_EDID_VIDEOSIGNAL_BPC_6 6 +#define NVT_EDID_VIDEOSIGNAL_BPC_8 8 +#define NVT_EDID_VIDEOSIGNAL_BPC_10 10 +#define NVT_EDID_VIDEOSIGNAL_BPC_12 12 +#define NVT_EDID_VIDEOSIGNAL_BPC_14 14 +#define NVT_EDID_VIDEOSIGNAL_BPC_16 16 +// +// byte 0x18, edid 1.3 +// bits 3-4 +#define NVT_EDID_FEATURESUPPORT_COLOR_MONOCHROME 0x0 /* Monochrome/grayscale display */ +#define NVT_EDID_FEATURESUPPORT_COLOR_RGB 0x1 /* R/G/B color display */ +#define NVT_EDID_FEATURESUPPORT_COLOR_MULTICOLOR 0x2 /* non R/G/B multicolor displays e.g. R/G/Y */ +#define NVT_EDID_FEATURESUPPORT_COLOR_UNDEFINED 0x3 /* Undefined */ +// +// byte 0x18, edid 1.4 +// bits 3-4 +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_RBG 0x0 /* RGB always supported */ +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_YCRCB444 0x1 /* RGB + 444 */ +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_YCRCB422 0x2 /* RGB + 422 */ +#define NVT_EDID_FEATURESUPPORT_COLOR_ENCODING_YCRCB 0x3 /* RGB + 444 + 422 supported */ +// +// +// structure used internally to map support for HDMI 3D modes. +#define MAX_EDID_ADDRESSABLE_3D_VICS 16 +#define MAX_3D_VICS_RESERVED_FOR_MANDATORY 8 +#define MAX_3D_VICS_SUPPORTED (MAX_EDID_ADDRESSABLE_3D_VICS + MAX_3D_VICS_RESERVED_FOR_MANDATORY) + +//Constants given by Dolby to be appended for chromaticity information +#define NVT_DOLBY_CHROMATICITY_MSB_BX 0x20 +#define NVT_DOLBY_CHROMATICITY_MSB_BY 0x08 +#define NVT_DOLBY_CHROMATICITY_MSB_GX 0x00 +#define NVT_DOLBY_CHROMATICITY_MSB_GY 0x80 +#define NVT_DOLBY_CHROMATICITY_MSB_RX 0xA0 +#define NVT_DOLBY_CHROMATICITY_MSB_RY 0x40 + +typedef struct _HDMI3DDetails +{ + NvU8 Vic; + NvU16 StereoStructureMask; + NvU8 SideBySideHalfDetail; +} HDMI3DDETAILS; + +typedef struct _SupportMap +{ + HDMI3DDETAILS map[MAX_3D_VICS_SUPPORTED]; + NvU32 total; +} HDMI3DSUPPORTMAP; + +typedef struct tagNVT_EXT_TIMING +{ + NVT_TIMING timing; + NVT_HDMIEXT HDMI3D; +} NVT_EXT_TIMING; + +typedef struct _NVDA_VSDB_PARSED_INFO +{ + NvBool valid; + NvU8 vsdbVersion; + + // these fields are specified in version 1 of the NVDA VSDB + union + { + struct + { + NvBool supportsVrr; + NvU8 minRefreshRate; + } v1; + } vrrData; + +} NVDA_VSDB_PARSED_INFO; + +typedef enum _MSFT_VSDB_DESKTOP_USAGE +{ + MSFT_VSDB_NOT_USABLE_BY_DESKTOP = 0, + MSFT_VSDB_USABLE_BY_DESKTOP = 1 +} MSFT_VSDB_DESKTOP_USAGE; + +typedef enum _MSFT_VSDB_THIRD_PARTY_USAGE +{ + MSFT_VSDB_NOT_USABLE_BY_THIRD_PARTY = 0, + MSFT_VSDB_USABLE_BY_THIRD_PARTY = 1 +} MSFT_VSDB_THIRD_PARTY_USAGE; + +typedef enum _MSFT_VSDB_PRIMARY_USE_CASE +{ + MSFT_VSDB_FOR_UNDEFINED = 0, + MSFT_VSDB_FOR_TEST_EQUIPMENT = 0x1, + MSFT_VSDB_FOR_GENERIC_DISPLAY = 0x2, + MSFT_VSDB_FOR_TELEVISION_DISPLAY = 0x3, + MSFT_VSDB_FOR_DESKTOP_PRODUCTIVITY_DISPLAY = 0x4, + MSFT_VSDB_FOR_DESKTOP_GAMING_DISPLAY = 0x5, + MSFT_VSDB_FOR_PRESENTATION_DISPLAY = 0x6, + MSFT_VSDB_FOR_VIRTUAL_REALITY_HEADSETS = 0x7, + MSFT_VSDB_FOR_AUGMENTED_REALITY = 0x8, + MSFT_VSDB_FOR_VIDEO_WALL_DISPLAY = 0x10, + MSFT_VSDB_FOR_MEDICAL_IMAGING_DISPLAY = 0x11, + MSFT_VSDB_FOR_DEDICATED_GAMING_DISPLAY = 0x12, + MSFT_VSDB_FOR_DEDICATED_VIDEO_MONITOR_DISPLAY = 0x13, + MSFT_VSDB_FOR_ACCESSORY_DISPLAY = 0X14 +} MSFT_VSDB_PRIMARY_USE_CASE; + +#define MSFT_VSDB_CONTAINER_ID_SIZE (16) +#define MSFT_VSDB_MAX_VERSION_SUPPORT (3) + +typedef struct _MSFT_VSDB_PARSED_INFO +{ + NvBool valid; + NvU8 version; + + MSFT_VSDB_DESKTOP_USAGE desktopUsage; + MSFT_VSDB_THIRD_PARTY_USAGE thirdPartyUsage; + MSFT_VSDB_PRIMARY_USE_CASE primaryUseCase; + NvU8 containerId[MSFT_VSDB_CONTAINER_ID_SIZE]; + +} MSFT_VSDB_PARSED_INFO; + +typedef struct tagNVT_HDMI_LLC_INFO +{ + // A.B.C.D address + NvU8 addrA; + NvU8 addrB; + NvU8 addrC; + NvU8 addrD; + + NvU8 supports_AI : 1; + NvU8 dc_48_bit : 1; + NvU8 dc_36_bit : 1; + NvU8 dc_30_bit : 1; + NvU8 dc_y444 : 1; + NvU8 dual_dvi : 1; + NvU8 max_tmds_clock; + NvU8 effective_tmds_clock; + NvU8 latency_field_present : 1; + NvU8 i_latency_field_present : 1; + NvU8 hdmi_video_present : 1; + NvU8 cnc3 : 1; + NvU8 cnc2 : 1; + NvU8 cnc1 : 1; + NvU8 cnc0 : 1; + NvU8 video_latency; + NvU8 audio_latency; + NvU8 interlaced_video_latency; + NvU8 interlaced_audio_latency; + NvU8 threeD_present : 1; + NvU8 threeD_multi_present : 2; + NvU8 image_size : 2; + NvU8 hdmi_vic_len : 3; + NvU8 hdmi_3d_len : 5; + // for now ignoring the other extensions + // .... +} NVT_HDMI_LLC_INFO; + +typedef struct tagNVT_HDMI_FORUM_INFO +{ + NvU8 max_TMDS_char_rate; + NvU8 threeD_Osd_Disparity : 1; + NvU8 dual_view : 1; + NvU8 independent_View : 1; + NvU8 lte_340Mcsc_scramble : 1; + NvU8 ccbpci : 1; + NvU8 cable_status : 1; + NvU8 rr_capable : 1; + NvU8 scdc_present : 1; + NvU8 dc_30bit_420 : 1; + NvU8 dc_36bit_420 : 1; + NvU8 dc_48bit_420 : 1; + NvU8 uhd_vic : 1; + NvU8 max_FRL_Rate : 4; + NvU8 fapa_start_location : 1; + NvU8 allm : 1; + NvU8 fva : 1; + NvU8 cnmvrr : 1; + NvU8 cinemaVrr : 1; + NvU8 m_delta : 1; + NvU8 vrr_min : 6; + NvU16 vrr_max : 10; + NvU8 dsc_10bpc : 1; + NvU8 dsc_12bpc : 1; + NvU8 dsc_16bpc : 1; + NvU8 dsc_All_bpp : 1; + NvU8 dsc_Native_420 : 1; + NvU8 dsc_1p2 : 1; + NvU8 dsc_MaxSlices : 6; + NvU16 dsc_MaxPclkPerSliceMHz : 10; + NvU8 dsc_Max_FRL_Rate : 4; + NvU8 dsc_totalChunkKBytes : 7; // = 1 + EDID reported DSC_TotalChunkKBytes + +} NVT_HDMI_FORUM_INFO; + +typedef struct tagNVT_HDR_STATIC_METADATA +{ + struct + { + NvU8 trad_gamma_sdr_eotf : 1; + NvU8 trad_gamma_hdr_eotf : 1; + NvU8 smpte_st_2084_eotf : 1; + NvU8 future_eotf : 1; + } supported_eotf; + + NvU8 static_metadata_type; // set to 1 if the sink support for static meta data type 1 + NvU8 max_cll; // maximum luminance level value + NvU8 max_fall; // maximum fram-average luminance + NvU8 min_cll; // minimum luminance level value + +}NVT_HDR_STATIC_METADATA; + +typedef struct tagNVT_DV_STATIC_METADATA +{ + NvU32 ieee_id : 24; + NvU32 VSVDB_version : 3; + NvU32 dm_version : 8; + NvU32 supports_2160p60hz : 1; + NvU32 supports_YUV422_12bit : 1; + NvU32 supports_global_dimming : 1; + NvU32 colorimetry : 1; + NvU32 target_min_luminance : 12; + NvU32 target_max_luminance : 12; + NvU32 cc_red_x : 12; + NvU32 cc_red_y : 12; + NvU32 cc_green_x : 12; + NvU32 cc_green_y : 12; + NvU32 cc_blue_x : 12; + NvU32 cc_blue_y : 12; + NvU32 cc_white_x : 12; + NvU32 cc_white_y : 12; + NvU32 supports_backlight_control : 2; + NvU32 backlt_min_luma : 2; + NvU32 interface_supported_by_sink : 2; + NvU32 supports_10b_12b_444 : 2; +}NVT_DV_STATIC_METADATA; + +//*********************************** +// parsed DisplayID 2.0 definitions +//*********************************** +#define NVT_DISPLAYID_2_0_PRODUCT_STRING_MAX_LEN 236 + +// the basic info encoded in byte[3] +#define NVT_DISPLAY_2_0_CAP_BASIC_AUDIO 0x40 // DTV monitor supports basic audio +#define NVT_DISPLAY_2_0_CAP_YCbCr_444 0x20 // DTV monitor supports YCbCr4:4:4 +#define NVT_DISPLAY_2_0_CAP_YCbCr_422 0x10 // DTV monitor supports YCbCr4:2:2 + +// vendor specific + +#define NVT_VESA_VENDOR_SPECIFIC_IEEE_ID 0x3A0292 +#define NVT_VESA_VENDOR_SPECIFIC_LENGTH 7 + +#define NVT_VESA_ORG_VSDB_DATA_TYPE_MASK 0x07 +#define NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_MASK 0x80 +#define NVT_VESA_ORG_VSDB_COLOR_SPACE_AND_EOTF_SHIFT 7 +#define NVT_VESA_ORG_VSDB_PIXELS_OVERLAPPING_MASK 0x0F +#define NVT_VESA_ORG_VSDB_MULTI_SST_MODE_MASK 0x60 +#define NVT_VESA_ORG_VSDB_MULTI_SST_MODE_SHIFT 5 +#define NVT_VESA_ORG_VSDB_PASS_THROUGH_INTEGER_MASK 0x3F +#define NVT_VESA_ORG_VSDB_PASS_THROUGH_FRACTIOINAL_MASK 0x0F + +typedef enum _tagNVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE +{ + PRODUCT_PRIMARY_USE_TEST_EQUIPMENT = 1, + PRODUCT_PRIMARY_USE_GENERIC_DISPLAY = 2, + PRODUCT_PRIMARY_USE_TELEVISION = 3, + PRODUCT_PRIMARY_USE_DESKTOP_PRODUCTIVITY = 4, + PRODUCT_PRIMARY_USE_DESKTOP_GAMING = 5, + PRODUCT_PRIMARY_USE_PRESENTATION = 6, + PRODUCT_PRIMARY_USE_HEAD_MOUNT_VIRTUAL_REALITY = 7, + PRODUCT_PRIMARY_USE_HEAD_MOUNT_AUGMENTED_REALITY = 8, +} NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE; + +typedef enum _tagNVT_DISPLAYID_SCAN_ORIENTATION +{ + SCAN_ORIENTATION_LRTB = 0, + SCAN_ORIENTATION_RLTB = 1, + SCAN_ORIENTATION_TBRL = 2, + SCAN_ORIENTATION_BTRL = 3, + SCAN_ORIENTATION_RLBT = 4, + SCAN_ORIENTATION_LRBT = 5, + SCAN_ORIENTATION_BTLR = 6, + SCAN_ORIENTATION_TBLR = 7, +} NVT_DISPLAYID_SCAN_ORIENTATION; + +typedef enum _tagNVT_DISPLAYID_INTERFACE_EOTF +{ + INTERFACE_EOTF_NOT_DEFINED = 0x0, + INTERFACE_EOTF_SRGB = 0x1, + INTERFACE_EOTF_BT601 = 0x2, + INTERFACE_EOTF_BT1886 = 0x3, + INTERFACE_EOTF_ADOBE_RGB = 0x4, + INTERFACE_EOTF_DCI_P3 = 0x5, + INTERFACE_EOTF_BT2020 = 0x6, + INTERFACE_EOTF_NATIVE_GAMMA = 0x7, + INTERFACE_EOTF_SMPTE_ST2084 = 0x8, + INTERFACE_EOTF_HYBRID_LOG = 0x9, + INTERFACE_EOTF_CUSTOM = 0x10, +} NVT_DISPLAYID_INTERFACE_EOTF; + +typedef enum _tagNVT_DISPLAYID_INTERFACE_COLOR_SPACE +{ + INTERFACE_COLOR_SPACE_NOT_DEFINED = 0x0, + INTERFACE_COLOR_SPACE_SRGB = 0x1, + INTERFACE_COLOR_SPACE_BT601 = 0x2, + INTERFACE_COLOR_SPACE_BT709 = 0x3, + INTERFACE_COLOR_SPACE_ADOBE_RGB = 0x4, + INTERFACE_COLOR_SPACE_DCI_P3 = 0x5, + INTERFACE_COLOR_SPACE_BT2020 = 0x6, + INTERFACE_COLOR_SPACE_CUSTOM = 0x7, +} NVT_DISPLAYID_INTERFACE_COLOR_SPACE; + +typedef enum _tagNVT_DISPLAYID_DEVICE_TECHNOLOGY +{ + DEVICE_TECHNOLOGY_NOT_SPECIFIED, + DEVICE_TECHNOLOGY_LCD, + DEVICE_TECHNOLOGY_OLED, +} NVT_DISPLAYID_DEVICE_TECHNOLOGY; + +typedef struct _tagNVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY +{ + NvU32 revision; + + struct + { + NvBool bSingleEnclosure; + NvBool bHasBezelInfo; + NVT_SINGLE_TILE_BEHAVIOR single_tile_behavior; + NVT_MULTI_TILE_BEHAVIOR multi_tile_behavior; + } capability; + + struct + { + NvU32 row; + NvU32 col; + } topology; + + struct + { + NvU32 x; + NvU32 y; + } location; + + struct + { + NvU32 width; + NvU32 height; + } native_resolution; + + struct + { + NvU32 top; // Top bezel in pixels + NvU32 bottom; // Bottom bezel in pixels + NvU32 right; // Right bezel in pixels + NvU32 left; // Left bezel in pixels + } bezel_info; + + NVT_TILEDDISPLAY_TOPOLOGY_ID tile_topology_id; +} NVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY; + +typedef struct _tagNVT_DISPLAYID_CONTAINERID +{ + NvU32 revision; + NvU32 data1; + NvU16 data2; + NvU16 data3; + NvU16 data4; + NvU8 data5[6]; +} NVT_DISPLAYID_CONTAINERID; + +typedef struct _tagNVT_DISPLAYID_INTERFACE_FEATURES +{ + NvU32 revision; + + NVT_COLORDEPTH rgb444; // each bit within is set if rgb444 supported on that bpc + NVT_COLORDEPTH yuv444; // each bit within is set if yuv444 supported on that bpc + NVT_COLORDEPTH yuv422; // each bit within is set if yuv422 supported on that bpc + NVT_COLORDEPTH yuv420; // each bit within is set if yuv420 supported on that bpc + + NvU32 yuv420_min_pclk; + + struct + { + NvU8 support_32khz : 1; + NvU8 support_44_1khz : 1; + NvU8 support_48khz : 1; + NvU8 rsvd : 5; + } audio_capability; + + NvU32 combination_count; + struct + { + NVT_DISPLAYID_INTERFACE_EOTF eotf; + NVT_DISPLAYID_INTERFACE_COLOR_SPACE color_space; + } colorspace_eotf_combination[NVT_DISPLAYID_DISPLAY_INTERFACE_FEATURES_MAX_ADDITIONAL_SUPPORTED_COLORSPACE_EOTF + 1]; + +} NVT_DISPLAYID_INTERFACE_FEATURES; + +typedef struct _tagNVT_DISPLAYID_PRODUCT_IDENTITY +{ + NvU32 revision; + NvU32 vendor_id; + NvU16 product_id; + NvU32 serial_number; + NvU16 week; + NvU16 year; + NvU8 product_string[NVT_DISPLAYID_2_0_PRODUCT_STRING_MAX_LEN + 1]; +} NVT_DISPLAYID_PRODUCT_IDENTITY; + +typedef enum _tagNVT_COLOR_MAP_STANDARD +{ + COLOR_MAP_CIE_1931, + COLOR_MAP_CIE_1976, +} NVT_COLOR_MAP_STANDARD; + +typedef enum _tagNVT_AUDIO_SPEAKER_INTEGRATED +{ + AUDIO_SPEAKER_INTEGRATED_SUPPORTED = 0, + AUDIO_SPEAKER_INTEGRATED_NOT_SUPPORTED = 1, +} NVT_AUDIO_SPEAKER_INTEGRATED; + +typedef enum _tagNVT_NATIVE_LUMINANCE_INFO +{ + NATIVE_LUMINANCE_INFO_MIN_GURANTEE_VALUE = 0, + NATIVE_LUMINANCE_INFO_SOURCE_DEVICE_GUIDANCE = 1, +} NVT_NATIVE_LUMINANCE_INFO; + +typedef struct _tagNVT_DISPLAYID_DISPLAY_PARAMETERS +{ + NvU32 revision; + NvU32 h_image_size_micro_meter; + NvU32 v_image_size_micro_meter; + NvU16 h_pixels; + NvU16 v_pixels; + NVT_DISPLAYID_SCAN_ORIENTATION scan_orientation; + NVT_COLOR_MAP_STANDARD color_map_standard; + NVT_COLOR_POINT primaries[3]; + NVT_COLOR_POINT white; + NVT_NATIVE_LUMINANCE_INFO native_luminance_info; + NvU16 native_max_luminance_full_coverage; + NvU16 native_max_luminance_1_percent_rect_coverage; + NvU16 native_min_luminance; + NVT_COLORDEPTH native_color_depth; + NvU16 gamma_x100; + NVT_DISPLAYID_DEVICE_TECHNOLOGY device_technology; + NvBool device_theme_Preference; + NvBool audio_speakers_integrated; +} NVT_DISPLAYID_DISPLAY_PARAMETERS; + +typedef struct _tagVESA_VSDB_PARSED_INFO +{ + struct + { + NvU8 type : 3; + NvU8 reserved : 4; + NvU8 color_space_and_eotf : 1; + } data_struct_type; + + struct + { + NvU8 pixels_overlapping_count : 4; + NvU8 reserved_0 : 1; + NvU8 multi_sst : 2; + NvU8 reserved_1 : 1; + } overlapping; + + struct + { + NvU8 pass_through_integer_dsc : 6; + NvU8 reserved : 2; + } pass_through_integer; + + struct + { + NvU8 pass_through_fraction_dsc : 4; + NvU8 reserved : 4; + } pass_through_fractional; + +} VESA_VSDB_PARSED_INFO; + +typedef struct _tagNVT_DISPLAYID_VENDOR_SPECIFIC +{ + NVT_HDMI_LLC_INFO hdmiLlc; + NVT_HDMI_FORUM_INFO hfvs; + NVDA_VSDB_PARSED_INFO nvVsdb; + MSFT_VSDB_PARSED_INFO msftVsdb; + VESA_VSDB_PARSED_INFO vesaVsdb; +} NVT_DISPLAYID_VENDOR_SPECIFIC; + +typedef struct _tagNVT_DISPLAYID_CTA +{ + NVT_EDID_CEA861_INFO cta861_info; + NVT_HDR_STATIC_METADATA hdrInfo; + NVT_DV_STATIC_METADATA dvInfo; +} NVT_DISPLAYID_CTA; + +typedef struct _tagNVT_VALID_DATA_BLOCKS +{ + NvBool product_id_present; + NvBool parameters_present; + NvBool type7Timing_present; + NvBool type8Timing_present; + NvBool type9Timing_present; + NvBool dynamic_range_limit_present; + NvBool interface_feature_present; + NvBool stereo_interface_present; + NvBool tiled_display_present; + NvBool container_id_present; + NvBool vendor_specific_present; + NvBool cta_data_present; +} NVT_VALID_DATA_BLOCKS; + +#define NVT_DISPLAYID_MAX_TOTAL_TIMING NVT_MAX_TOTAL_TIMING +typedef struct _tagNVT_DISPLAYID_2_0_INFO +{ + NvU8 revision; + NvU8 version; + + // support audio/yuv444/yuv422 color for CTA861 compatible + NvU8 basic_caps; + + // the all extensions that may appear following the base section + NvU32 extension_count; + + // this displayID20 is EDID extension or not + NvBool as_edid_extension; + + // data blocks present or not + NVT_VALID_DATA_BLOCKS valid_data_blocks; + + NVT_DISPLAYID_PRODUCT_PRIMARY_USE_CASE primary_use_case; + + // Product Identification Data Block (Mandatory) + NVT_DISPLAYID_PRODUCT_IDENTITY product_identity; + + // Display Parameter Data Block (Mandatory for Display Use) + NVT_DISPLAYID_DISPLAY_PARAMETERS display_param; + + // Detailed Timing Data Block (Mandatory for Display Use) + NvU32 total_timings; + NVT_TIMING timing[NVT_DISPLAYID_MAX_TOTAL_TIMING]; + + // Enumerated Timing Code Data Block (Not Mandatory) + + // Formula-based Timing Data Block (Not Mandatory) + + // Dynamic Video Timing Range Limits Data Block (Not Mandatory) + NVT_DISPLAYID_RANGE_LIMITS range_limits; + + // Display Interface Features Data Block (Mandatory) + NVT_DISPLAYID_INTERFACE_FEATURES interface_features; + + // Stereo Display Interface Data Block (Not Mandatory) + + // Tiled Display Topology Data Block (Not Mandatory) + NVT_DISPLAYID_TILED_DISPLAY_TOPOLOGY tile_topo; + + // ContainerID Data Block (Mandatory for Multi-function Device) + NVT_DISPLAYID_CONTAINERID container_id; + + // Vendor-specific Data Block (Not Mandatory) + NVT_DISPLAYID_VENDOR_SPECIFIC vendor_specific; + + // CTA DisplayID Data Block (Not Mandatory) + NVT_DISPLAYID_CTA cta; +} NVT_DISPLAYID_2_0_INFO; + +#define NVT_EDID_PRIMARY_COLOR_FP2INT_FACTOR 1024 // Per EDID 1.4, 10bit color primary is encoded in floating point as (bit9/2 + bit8/4 + bi7/8 + ... + bit0) +typedef struct tagNVT_EDID_INFO +{ + // generic edid info + NvU32 version; + NvU16 manuf_id; + NvU16 manuf_id_hi; + NvU8 manuf_name[4]; + NvU16 product_id; + NvU32 serial_number; + NvU8 week; + NvU16 year; + + // the interface info + struct + { + union + { + struct + { + NvU8 serrations : 1; + NvU8 sync_type : 3; + NvU8 video_setup : 1; + NvU8 vp_p : 2; + } analog; + struct + { + NvU8 video_interface : 4; + NvU8 bpc : 5; + } digital; + NvU8 analog_data : 7; + } u; + NvU8 isDigital : 1; + } input; + + // the screen size info + NvU8 screen_size_x; // horizontal screen size in cm + NvU8 screen_size_y; // verical screen size in cm + NvU16 screen_aspect_x; // aspect ratio + NvU16 screen_aspect_y; // aspect ratio + + // display transfer characteristics + NvU16 gamma; + + // features support + union + { + NvU8 feature; + struct + { + NvU8 support_gtf : 1; + NvU8 preferred_timing_is_native : 1; // should be "Preferred_timing_is_dtd1". To be exact, "Native" is referenced as the native HDTV timing by CEA861 extension block + NvU8 default_colorspace_srgb : 1; + NvU8 color_type : 2; + NvU8 support_active_off : 1; + NvU8 support_suspend : 1; + NvU8 support_standby : 1; + + } feature_ver_1_3; + struct + { + NvU8 continuous_frequency : 1; + NvU8 preferred_timing_is_native : 1; // should be "Preferred_timing_is_dtd1". To be exact, "Native" is referenced as the native HDTV timing by CEA861 extension block + NvU8 default_colorspace_srgb : 1; + NvU8 color_type : 2; + NvU8 support_active_off : 1; + NvU8 support_suspend : 1; + NvU8 support_standby : 1; + } feature_ver_1_4_analog; + struct + { + NvU8 continuous_frequency : 1; + NvU8 preferred_timing_is_native : 1; // should be "Preferred_timing_is_dtd1". To be exact, "Native" is referenced as the native HDTV timing by CEA861 extension block + NvU8 default_colorspace_srgb : 1; + NvU8 support_ycrcb_444 : 1; + NvU8 support_ycrcb_422 : 1; + NvU8 support_active_off : 1; + NvU8 support_suspend : 1; + NvU8 support_standby : 1; + } feature_ver_1_4_digital; + }u; + + // chromaticity coordinates + NvU16 cc_red_x; + NvU16 cc_red_y; + NvU16 cc_green_x; + NvU16 cc_green_y; + NvU16 cc_blue_x; + NvU16 cc_blue_y; + NvU16 cc_white_x; + NvU16 cc_white_y; + + // established timings 1 and 2 + NvU16 established_timings_1_2; + + // Manufacturer reserved timings + NvU16 manufReservedTimings; + + // standard timings + NvU16 standard_timings[NVT_EDID_MAX_STANDARD_TIMINGS]; + + // 18-bytes display descriptor info + NVT_EDID_18BYTE_DESCRIPTOR ldd[NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR]; + + // the parse timing + NVT_TIMING timing[NVT_EDID_MAX_TOTAL_TIMING]; + + // Note: This contains the timing after validation. + NvU32 total_timings; + + // This contains the count timing that were invalidated because they don't meet + // some policies (PClk, etc). + NvU32 total_invalidTimings; + + // indicates support for HDMI 1.4+ 3D stereo modes are present + NvU32 HDMI3DSupported; + + HDMI3DSUPPORTMAP Hdmi3Dsupport; + + // Data parsed from NVDA VSDB - Variable Refresh Rate Monitor capabilities + NVDA_VSDB_PARSED_INFO nvdaVsdbInfo; + + // Data parsed from MSFT VSDB - HMD and Specialized (Direct display) Monitor capabilities + MSFT_VSDB_PARSED_INFO msftVsdbInfo; + + // HDR capability information from the HDR Metadata Data Block + NVT_HDR_STATIC_METADATA hdr_static_metadata_info; + + // DV capability information from the DV Metadata Data Block + NVT_DV_STATIC_METADATA dv_static_metadata_info; + + // HDMI LLC info + NVT_HDMI_LLC_INFO hdmiLlcInfo; + + // HDMI 2.0 information + NVT_HDMI_FORUM_INFO hdmiForumInfo; + // deprecating the following, please use hdmiForumInfo; + struct + { + NvU8 max_TMDS_char_rate; + NvU8 lte_340Mcsc_scramble :1; + NvU8 rr_capable :1; + NvU8 SCDC_present :1; + } hdmi_2_0_info; + + // the total edid extension(s) attached to the basic block + NvU32 total_extensions; + // the total displayid2 extension(s) attached to the basic block. + NvU32 total_did2_extensions; + + NvU8 checksum; + NvU8 checksum_ok; + + // extension info + NVT_EDID_CEA861_INFO ext861; + + // for the 2nd CEA/EIA861 extension + // note: "ext861" should really be an array but since it requires massive name change and it's hard + // to find more than one 861 extension in the real world, I made a trade off like this for now. + NVT_EDID_CEA861_INFO ext861_2; + + NVT_DISPLAYID_INFO ext_displayid; + NVT_DISPLAYID_2_0_INFO ext_displayid20; +} NVT_EDID_INFO; + +typedef enum +{ + NVT_PROTOCOL_UNKNOWN = 0, + NVT_PROTOCOL_DP = 1, + NVT_PROTOCOL_HDMI = 2, +} NVT_PROTOCOL; + +// the display interface/connector claimed by the EDID +#define NVT_EDID_INPUT_DIGITAL_UNDEFINED 0x00 // undefined digital interface +#define NVT_EDID_INPUT_DVI 0x01 +#define NVT_EDID_INPUT_HDMI_TYPE_A 0x02 +#define NVT_EDID_INPUT_HDMI_TYPE_B 0x03 +#define NVT_EDID_INPUT_MDDI 0x04 +#define NVT_EDID_INPUT_DISPLAY_PORT 0x05 + + +// the EDID extension TAG +#define NVT_EDID_EXTENSION_CTA 0x02 // CTA 861 series extensions +#define NVT_EDID_EXTENSION_VTB 0x10 // video timing block extension +#define NVT_EDID_EXTENSION_DI 0x40 // display information extension +#define NVT_EDID_EXTENSION_LS 0x50 // localized string extension +#define NVT_EDID_EXTENSION_DPVL 0x60 // digital packet video link extension +#define NVT_EDID_EXTENSION_DISPLAYID 0x70 // display id +#define NVT_EDID_EXTENSION_BM 0xF0 // extension block map +#define NVT_EDID_EXTENSION_OEM 0xFF // extension defined by the display manufacturer + +//************************************ +// Audio and Video Infoframe Control +//************************************ +// +// the control info for generating infoframe data +#define NVT_INFOFRAME_CTRL_DONTCARE 0xFF +// +typedef struct tagNVT_VIDEO_INFOFRAME_CTRL +{ + NvU8 color_space; + NvU8 active_format_info_present; + NvU8 bar_info; + NvU8 scan_info; + NvU8 colorimetry; + NvU8 pic_aspect_ratio; + NvU8 active_format_aspect_ratio; + NvU8 it_content; + NvU8 it_content_type; + NvU8 extended_colorimetry; + NvU8 rgb_quantization_range; + NvU8 nonuniform_scaling; + NvU8 video_format_id; + NvU8 pixel_repeat; + NvU16 top_bar; + NvU16 bottom_bar; + NvU16 left_bar; + NvU16 right_bar; +}NVT_VIDEO_INFOFRAME_CTRL; +// +typedef struct tagNVT_AUDIO_INFOFRAME_CTRL +{ + NvU8 coding_type; + NvU8 channel_count; + NvU8 sample_rate; + NvU8 sample_depth; + NvU8 speaker_placement; + NvU8 level_shift; + NvU8 down_mix_inhibit; +}NVT_AUDIO_INFOFRAME_CTRL; + +typedef struct tagNVT_VENDOR_SPECIFIC_INFOFRAME_CTRL +{ + NvU32 Enable; + NvU8 HDMIFormat; + NvU8 HDMI_VIC; + NvU8 ThreeDStruc; + NvU8 ThreeDDetail; + NvU8 MetadataPresent; + NvU8 MetadataType; + NvU8 Metadata[8]; // type determines length + +} NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL; +#define NVT_3D_METADTATA_TYPE_PARALAX 0x00 +#define NVT_3D_METADTATA_PARALAX_LEN 0x08 + +#define NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER_HDMI21 0x0 +#define NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER_HDMI21A 0x1 +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL +{ + NvU32 version; // See #define NVT_EXTENDED_METADATA_PACKET_INFOFRAME_VER + NvU32 EnableVRR; + NvU32 ITTiming; + NvU32 BaseVFP; + NvU32 ReducedBlanking; + NvU32 BaseRefreshRate; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL; + +//*********************************** +// the actual Auido/Video Infoframe +//*********************************** +// +// info frame type code +#define NVT_INFOFRAME_TYPE_VENDOR_SPECIFIC 1 +#define NVT_INFOFRAME_TYPE_VIDEO 2 +#define NVT_INFOFRAME_TYPE_SOURCE_PRODUCT_DESCRIPTION 3 +#define NVT_INFOFRAME_TYPE_AUDIO 4 +#define NVT_INFOFRAME_TYPE_MPEG_SOURCE 5 +#define NVT_INFOFRAME_TYPE_SELF_REFRESH 6 +#define NVT_INFOFRAME_TYPE_DYNAMIC_RANGE_MASTERING 7 +#define NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET 8 +// +// +typedef struct tagNVT_INFOFRAME_HEADER +{ + NvU8 type; + NvU8 version; + NvU8 length; +}NVT_INFOFRAME_HEADER; + +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER +{ + NvU8 type; + NvU8 firstLast; + NvU8 sequenceIndex; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER; + +#define NVT_EMP_HEADER_FIRST 0x80 +#define NVT_EMP_HEADER_LAST 0x40 +#define NVT_EMP_HEADER_FIRST_LAST 0xC0 + +// SPD Infoframe +typedef struct tagNVT_SPD_INFOFRAME_PAYLOAD +{ + NvU8 vendorBytes[8]; + NvU8 productBytes[16]; + + NvU8 sourceInformation; + + // Since HDMI Library doesn't clear the rest of the bytes and checksum is calculated for all the 32 bytes : Temporary WAR + NvU8 paddingBytes[3]; + + +} NVT_SPD_INFOFRAME_PAYLOAD; + +typedef struct tagNVT_SPD_INFOFRAME +{ + NVT_INFOFRAME_HEADER Header; + NVT_SPD_INFOFRAME_PAYLOAD Data; +} NVT_SPD_INFOFRAME; + +// the video infoframe version 1-3 structure +typedef struct tagNVT_VIDEO_INFOFRAME +{ + NvU8 type; + NvU8 version; + NvU8 length; + + // byte 1~5 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + + // byte 6~13 + NvU8 top_bar_low; + NvU8 top_bar_high; + NvU8 bottom_bar_low; + NvU8 bottom_bar_high; + NvU8 left_bar_low; + NvU8 left_bar_high; + NvU8 right_bar_low; + NvU8 right_bar_high; + +}NVT_VIDEO_INFOFRAME; +// +#define NVT_VIDEO_INFOFRAME_VERSION_1 1 +#define NVT_VIDEO_INFOFRAME_VERSION_2 2 +#define NVT_VIDEO_INFOFRAME_VERSION_3 3 +#define NVT_VIDEO_INFOFRAME_VERSION_4 4 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_MASK 0x03 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_OVERSCANNED 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_UNDERSCANNED 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_S1S0_FUTURE 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_MASK 0x0C +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_SHIFT 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_NOT_VALID 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_VERT_VALID 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_HORIZ_VALID 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_B1B0_H_V_VALID 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_MASK 0x10 // active format info present +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_SHIFT 4 // active format info present +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_A0_VALID 1 +// +// CTA-861G new requirement - DD changed this policy +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2_MASK 8 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_MASK 0xE0 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_SHIFT 0x5 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_RGB 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_YCbCr422 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_YCbCr444 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_YCbCr420 3 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y2Y1Y0_FUTURE 3 // nvlEscape still uses this lline 4266 +// CEA-861-F - Unix still used this one +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_MASK 0x60 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_SHIFT 0x5 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_RGB 0 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr422 1 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444 2 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr420 3 +#define NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_FUTURE 3 // nvlEscape still uses this lline 4266 +// +#define NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_MASK 0x80 // for Inforframe V1 / V2 +#define NVT_VIDEO_INFOFRAME_BYTE1_RESERVED_SHIFT 7 +// +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_MASK 0x0F // active format aspect ratio +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_SAME_AS_M1M0 8 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_4X3_CENTER 9 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_16X9_CENTER 10 +#define NVT_VIDEO_INFOFRAME_BYTE2_R3R2R1R0_14x9_CENTER 11 +// +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_MASK 0x30 // picture aspect ratio +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_SHIFT 4 // picture aspect ratio +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_4X3 1 +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_16X9 2 +#define NVT_VIDEO_INFOFRAME_BYTE2_M1M0_FUTURE 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_MASK 0xC0 // colorimetry +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SHIFT 6 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_SMPTE170M_ITU601 1 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_ITU709 2 +#define NVT_VIDEO_INFOFRAME_BYTE2_C1C0_EXT_COLORIMETRY 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_MASK 0x03 // non-uniform scaling +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_NONE 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_HORIZ_SCALED 1 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_VERT_SCALED 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_SC_H_V_SCALED 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_MASK 0x0C // quantization +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_SHIFT 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_DEFAULT 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_LIMITED_RANGE 1 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_FULL_RANGE 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_RESERVED 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_MASK 0x70 // extended colorimetry +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_SHIFT 4 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_xvYCC_601 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_xvYCC_709 1 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_sYCC_601 2 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_AdobeYCC_601 3 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_AdobeRGB 4 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_BT2020cYCC 5 // CEA-861-F define it as "ITU-R BT.2020 YcCbcCrc" at Table 12 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_BT2020RGBYCC 6 // CEA-861-F define it as "ITU-R BT.2020 YcCbCr" at Table 12 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_RESERVED7 7 // CEA-861-F define it as "Reserved" at Table 12 +#define NVT_VIDEO_INFOFRAME_BYTE3_EC_AdditionalColorExt 7 // CTA-861-G define it as "Additional Colorimtry Ext Info Valid" at Table_13 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_MASK 0x80 // IT content +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_SHIFT 7 +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_NO_DATA 0 +#define NVT_VIDEO_INFOFRAME_BYTE3_ITC_IT_CONTENT 1 +// +#define NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_MASK 0x60 // reserved +#define NVT_VIDEO_INFOFRAME_BYTE3_RESERVED_V1_SHIFT 5 +// +#define NVT_VIDEO_INFOFRAME_BYTE4_VIC_MASK 0xFF // video identification code +#define NVT_VIDEO_INFOFRAME_BYTE4_VIC_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE4_VIC7 0x80 +// +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V3_MASK 0x00 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V3_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_MASK 0x80 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V2_SHIFT 7 +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_MASK 0xFF +#define NVT_VIDEO_INFOFRAME_BYTE4_RESERVED_V1_SHIFT 0 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_MASK 0x0F // pixel repetitions +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_NO_PEP 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_2X 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_3X 2 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_4X 3 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_5X 4 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_6X 5 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_7X 6 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_8X 7 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_9X 8 +#define NVT_VIDEO_INFOFRAME_BYTE5_PR_10X 9 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_MASK 0x30 // Content Information +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_SHIFT 4 +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_NODATA 0 // ITC = 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_GRAPHICS 0 // ITC = 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_PHOTO 1 // ITC = don't care +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_CINEMA 2 // ITC = don't care +#define NVT_VIDEO_INFOFRAME_BYTE5_CN1CN0_GAME 3 // ITC = don't care + +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_MASK 0xC0 // YCC quantization +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_SHIFT 6 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_LIMITED_RANGE 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_FULL_RANGE 2 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_RESERVED3 3 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ1YQ0_RESERVED4 4 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_MASK 0xc0 // content type +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_SHIFT 6 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_LIMITED 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_FULL 1 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_RSVD1 2 +#define NVT_VIDEO_INFOFRAME_BYTE5_YQ_RSVD2 3 +// +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V2_MASK 0x00 +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V2_SHIFT 0 +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_MASK 0xFF +#define NVT_VIDEO_INFOFRAME_BYTE5_RESERVED_V1_SHIFT 0 +// +#define NVT_VIDEO_INFOFRAME_BYTE14_RESERVED_V4_MASK 0xF0 +#define NVT_VIDEO_INFOFRAME_BYTE14_RESERVED_V4_SHIFT 4 +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_0 0 +#define NVT_VIDEO_INFOFRAME_BYTE14_ACE0_1 1 +// +#define NVT_VIDEO_INFOFRAME_BYTE14_RESERVED_MASK 0x0F +#define NVT_VIDEO_INFOFRAME_BYTE14_RESERVED_SHIFT 0 +// +#define NVT_VIDEO_INFOFRAME_CONTENT_VIDEO 0 +#define NVT_VIDEO_INFOFRAME_CONTENT_GRAPHICS 1 +#define NVT_VIDEO_INFOFRAME_CONTENT_PHOTO 2 +#define NVT_VIDEO_INFOFRAME_CONTENT_CINEMA 3 +#define NVT_VIDEO_INFOFRAME_CONTENT_GAME 4 +#define NVT_VIDEO_INFOFRAME_CONTENT_LAST 4 + +#pragma pack(1) +typedef struct +{ + // byte 1 + struct + { + NvU8 scanInfo : 2; + NvU8 barInfo : 2; + NvU8 activeFormatInfoPresent : 1; + NvU8 colorSpace : 2; + NvU8 rsvd_bits_byte1 : 1; + } byte1; + + // byte 2 + struct + { + NvU8 activeFormatAspectRatio : 4; + NvU8 picAspectRatio : 2; + NvU8 colorimetry : 2; + } byte2; + + // byte 3 + struct + { + NvU8 nonuniformScaling : 2; + NvU8 rgbQuantizationRange : 2; + NvU8 extendedColorimetry : 3; + NvU8 itContent : 1; + } byte3; + + // byte 4 + struct + { + NvU8 vic : 7; + NvU8 rsvd_bits_byte4 : 1; + } byte4; + + // byte 5 + struct + { + NvU8 pixelRepeat : 4; + NvU8 contentTypes : 2; + NvU8 yccQuantizationRange : 2; + } byte5; + + NvU16 topBar; + NvU16 bottomBar; + NvU16 leftBar; + NvU16 rightBar; +} NVT_VIDEO_INFOFRAME_OVERRIDE; +#pragma pack() + +typedef struct +{ + NvU32 vic : 8; + NvU32 pixelRepeat : 5; + NvU32 colorSpace : 3; + NvU32 colorimetry : 3; + NvU32 extendedColorimetry : 4; + NvU32 rgbQuantizationRange : 3; + NvU32 yccQuantizationRange : 3; + NvU32 itContent : 2; + NvU32 contentTypes : 3; + NvU32 scanInfo : 3; + NvU32 activeFormatInfoPresent : 2; + NvU32 activeFormatAspectRatio : 5; + NvU32 picAspectRatio : 3; + NvU32 nonuniformScaling : 3; + NvU32 barInfo : 3; + NvU32 top_bar : 17; + NvU32 bottom_bar : 17; + NvU32 left_bar : 17; + NvU32 right_bar : 17; + NvU32 Future17 : 2; + NvU32 Future47 : 2; +} NVT_INFOFRAME_VIDEO; + + +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_S1S0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_B1B0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_A0_MASK 0x1 // active format info present +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_Y1Y0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE1_Y2Y1Y0_MASK 0x7 +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE2_R3R2R1R0_MASK 0xF // active format aspect ratio +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE2_M1M0_MASK 0x3 // picture aspect ratio +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE2_C1C0_MASK 0x3 // colorimetry +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_SC_MASK 0x3 // non-uniform scaling +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_Q1Q0_MASK 0x3 // quantization +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_EC_MASK 0x7 // extended colorimetry +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE3_ITC_MASK 0x1 // IT content +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE4_VIC_MASK 0x7F // video identification code +// +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE5_PR_MASK 0xF // pixel repetitions +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE5_CN1CN0_MASK 0x3 +#define NVT_VIDEO_INFOFRAME_OVERRIDE_BYTE5_YQ1YQ0_MASK 0x3 // YCC quantization + +// audio infoframe structure +typedef struct tagNVT_AUDIO_INFOFRAME +{ + NvU8 type; + NvU8 version; + NvU8 length; + + // byte 1~5 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + + // byte 6~10 + NvU8 rsvd_byte6; + NvU8 rsvd_byte7; + NvU8 rsvd_byte8; + NvU8 rsvd_byte9; + NvU8 rsvd_byte10; + +}NVT_AUDIO_INFOFRAME; + +// self refresh infoframe structure. See SR spec. +typedef struct tagNVT_SR_INFOFRAME +{ + NvU8 type; + NvU8 version; + NvU8 length; + + NvU8 data; + +}NVT_SR_INFOFRAME; + +// +#define NVT_AUDIO_INFOFRAME_VERSION_1 1 +// +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_MASK 0x07 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_REF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_2CH 1 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_DO_NOT_USE 2 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_4CH 3 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_5CH 4 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_6CH 5 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_7CH 6 +#define NVT_AUDIO_INFOFRAME_BYTE1_CC_8CH 7 +// +#define NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_MASK 0x08 +#define NVT_AUDIO_INFOFRAME_BYTE1_RESERVED_SHIFT 3 +// +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MASK 0xF0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_SHIFT 4 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_REF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_PCM 1 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_DO_NOT_USE 2 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MPEG1 3 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MP3 4 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_MPEG2 5 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_AAC 6 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_DTS 7 +#define NVT_AUDIO_INFOFRAME_BYTE1_CT_USE_CODING_EXTENSION_TYPE 15 +// +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_MASK 0x3 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_REF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_16BIT 1 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_20BIT 2 +#define NVT_AUDIO_INFOFRAME_BYTE2_SS_24BIT 3 +// +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_MASK 0x1C +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_SHIFT 2 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_HEADER 0 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_32KHz 1 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_44KHz 2 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_48KHz 3 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_88KHz 4 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_96KHz 5 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_176KHz 6 +#define NVT_AUDIO_INFOFRAME_BYTE2_SF_192KHz 7 +// +#define NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_MASK 0xE0 +#define NVT_AUDIO_INFOFRAME_BYTE2_RESERVED_SHIFT 5 +// +#define NVT_AUDIO_INFOFRAME_BYTE3_CXT_MASK 0x1F +#define NVT_AUDIO_INFOFRAME_BYTE3_CXT_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE3_CXT_RESERVE31 31 +// +#define NVT_AUDIO_INFOFRAME_BYTE3_RESERVED_MASK 0xE0 +#define NVT_AUDIO_INFOFRAME_BYTE3_RESERVED_SHIFT 5 +// +#define NVT_AUDIO_INFOFRAME_BYTE4_CA_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE4_CA_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE4_CA_FRW_FLW_RR_RL_FC_LFE_FR_FL 49 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_SHIFT 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_NO_DATA 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_0DB 1 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_PLUS10DB 2 +#define NVT_AUDIO_INFOFRAME_BYTE5_LFEPBL_RESERVED03 3 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_MASK 0x4 +#define NVT_AUDIO_INFOFRAME_BYTE5_RESERVED_SHIFT 2 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_MASK 0x78 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_SHIFT 3 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_0dB 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_1dB 1 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_2dB 2 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_3dB 3 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_4dB 4 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_5dB 5 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_6dB 6 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_7dB 7 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_8dB 8 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_9dB 9 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_10dB 10 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_11dB 11 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_12dB 12 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_13dB 13 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_14dB 14 +#define NVT_AUDIO_INFOFRAME_BYTE5_LSV_15dB 15 +// +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_MASK 0x80 +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_SHIFT 7 +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_PERMITTED 0 +#define NVT_AUDIO_INFOFRAME_BYTE5_DM_INH_PROHIBITED 1 +// +#define NVT_AUDIO_INFOFRAME_BYTE6_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE6_RESERVED_SHIFT 0 +// +// +#define NVT_AUDIO_INFOFRAME_BYTE7_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE7_RESERVED_SHIFT 0 +// +/// +#define NVT_AUDIO_INFOFRAME_BYTE8_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE8_RESERVED_SHIFT 0 +// +// +#define NVT_AUDIO_INFOFRAME_BYTE9_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE9_RESERVED_SHIFT 0 +// +// +#define NVT_AUDIO_INFOFRAME_BYTE10_RESERVED_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_BYTE10_RESERVED_SHIFT 0 +// + +typedef struct +{ + // byte 1 + struct + { + NvU8 channelCount : 3; + NvU8 rsvd_bits_byte1 : 1; + NvU8 codingType : 4; + } byte1; + + // byte 2 + struct + { + NvU8 sampleSize : 2; + NvU8 sampleRate : 3; + NvU8 rsvd_bits_byte2 : 3; + } byte2; + + + // byte 3 + struct + { + NvU8 codingExtensionType : 5; + NvU8 rsvd_bits_byte3 : 3; + } byte3; + + // byte 4 + NvU8 speakerPlacement; + + // byte 5 + struct + { + NvU8 lfePlaybackLevel : 2; + NvU8 rsvd_bits_byte5 : 1; + NvU8 levelShift : 4; + NvU8 downmixInhibit : 1; + } byte5; + + // byte 6~10 + NvU8 rsvd_byte6; + NvU8 rsvd_byte7; + NvU8 rsvd_byte8; + NvU8 rsvd_byte9; + NvU8 rsvd_byte10; +} NVT_AUDIO_INFOFRAME_OVERRIDE; + +typedef struct +{ + NvU32 codingType : 5; + NvU32 codingExtensionType : 6; + NvU32 sampleSize : 3; + NvU32 sampleRate : 4; + NvU32 channelCount : 4; + NvU32 speakerPlacement : 9; + NvU32 downmixInhibit : 2; + NvU32 lfePlaybackLevel : 3; + NvU32 levelShift : 5; + NvU32 Future12 : 2; + NvU32 Future2x : 4; + NvU32 Future3x : 4; + NvU32 Future52 : 2; + NvU32 Future6 : 9; + NvU32 Future7 : 9; + NvU32 Future8 : 9; + NvU32 Future9 : 9; + NvU32 Future10 : 9; +} NVT_INFOFRAME_AUDIO; + +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE1_CC_MASK 0x07 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE1_CT_MASK 0x0F +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE2_SS_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE2_SF_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE3_CXT_MASK 0x1F +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE4_CA_MASK 0xFF +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE5_LFEPBL_MASK 0x03 +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE5_LSV_MASK 0x0F +#define NVT_AUDIO_INFOFRAME_OVERRIDE_BYTE5_DM_INH_MASK 0x01 + +// +// HDMI 1.3a GCP, ColorDepth +// +#define NVT_HDMI_COLOR_DEPTH_DEFAULT 0x0 +#define NVT_HDMI_COLOR_DEPTH_RSVD1 0x1 +#define NVT_HDMI_COLOR_DEPTH_RSVD2 0x2 +#define NVT_HDMI_COLOR_DEPTH_RSVD3 0x3 +#define NVT_HDMI_COLOR_DEPTH_24 0x4 +#define NVT_HDMI_COLOR_DEPTH_30 0x5 +#define NVT_HDMI_COLOR_DEPTH_36 0x6 +#define NVT_HDMI_COLOR_DEPTH_48 0x7 +#define NVT_HDMI_COLOR_DEPTH_RSVD8 0x8 +#define NVT_HDMI_COLOR_DEPTH_RSVD9 0x9 +#define NVT_HDMI_COLOR_DEPTH_RSVD10 0xA +#define NVT_HDMI_COLOR_DEPTH_RSVD11 0xB +#define NVT_HDMI_COLOR_DEPTH_RSVD12 0xC +#define NVT_HDMI_COLOR_DEPTH_RSVD13 0xD +#define NVT_HDMI_COLOR_DEPTH_RSVD14 0xE +#define NVT_HDMI_COLOR_DEPTH_RSVD15 0xF + +// HDMI 1.3a GCP, PixelPacking Phase +#define NVT_HDMI_PIXELPACKING_PHASE4 0x0 +#define NVT_HDMI_PIXELPACKING_PHASE1 0x1 +#define NVT_HDMI_PIXELPACKING_PHASE2 0x2 +#define NVT_HDMI_PIXELPACKING_PHASE3 0x3 +#define NVT_HDMI_PIXELPACKING_RSVD4 0x4 +#define NVT_HDMI_PIXELPACKING_RSVD5 0x5 +#define NVT_HDMI_PIXELPACKING_RSVD6 0x6 +#define NVT_HDMI_PIXELPACKING_RSVD7 0x7 +#define NVT_HDMI_PIXELPACKING_RSVD8 0x8 +#define NVT_HDMI_PIXELPACKING_RSVD9 0x9 +#define NVT_HDMI_PIXELPACKING_RSVD10 0xA +#define NVT_HDMI_PIXELPACKING_RSVD11 0xB +#define NVT_HDMI_PIXELPACKING_RSVD12 0xC +#define NVT_HDMI_PIXELPACKING_RSVD13 0xD +#define NVT_HDMI_PIXELPACKING_RSVD14 0xE +#define NVT_HDMI_PIXELPACKING_RSVD15 0xF + +#define NVT_HDMI_RESET_DEFAULT_PIXELPACKING_PHASE 0x0 +#define NVT_HDMI_SET_DEFAULT_PIXELPACKING_PHASE 0x1 + +#define NVT_HDMI_GCP_SB1_CD_SHIFT 0 +#define NVT_HDMI_GCP_SB1_PP_SHIFT 4 + + +// Vendor specific info frame (HDMI 1.4 specific) +typedef struct tagNVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD +{ + // byte 1~5 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + NvU8 optionalBytes[22]; +}NVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD; +typedef struct tagNVT_VENDOR_SPECIFIC_INFOFRAME +{ + NVT_INFOFRAME_HEADER Header; + NVT_VENDOR_SPECIFIC_INFOFRAME_PAYLOAD Data; +} NVT_VENDOR_SPECIFIC_INFOFRAME; +// +#define NVT_HDMI_VS_INFOFRAME_VERSION_1 1 + +// +#define NVT_HDMI_VS_BYTE4_RSVD_MASK 0x1f +#define NVT_HDMI_VS_BYTE4_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_MASK 0xe0 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_SHIFT 0x05 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_NONE 0x00 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_EXT 0x01 +#define NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_3D 0x02 +// 0x03-0x07 reserved +// +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_MASK 0xff // HDMI_VID_FMT = HDMI_VID_FMT_EXT +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_NA 0xfe +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_RSVD 0x00 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx30Hz 0x01 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx25Hz 0x02 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz 0x03 +#define NVT_HDMI_VS_BYTE5_HDMI_VIC_4Kx2Kx24Hz_SMPTE 0x04 +// 0x05-0xff reserved +// +#define NVT_HDMI_VS_BYTE5_HDMI_RSVD_MASK 0x07 // HDMI_VID_FMT = HDMI_VID_FMT_3D +#define NVT_HDMI_VS_BYTE5_HDMI_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE5_3D_META_PRESENT_MASK 0x01 +#define NVT_HDMI_VS_BYTE5_3D_META_PRESENT_SHIFT 0x03 +#define NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_NOTPRES 0x00 // HDMI Metadata is not present +#define NVT_HDMI_VS_BYTE5_HDMI_META_PRESENT_PRES 0x01 // HDMI Metadata is present +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_MASK 0xf0 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_SHIFT 0x04 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_NA 0xfe +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK 0x00 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_FIELD_ALT 0x01 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_LINE_ALT 0x02 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEFULL 0x03 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTH 0x04 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_LDEPTHGFX 0x05 +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_TOPBOTTOM 0x06 +//0x06-0x07 reserved +#define NVT_HDMI_VS_BYTE5_HDMI_3DS_SIDEBYSIDEHALF 0x08 +//0x09-0x0f reserved +// +// bytes 6-21 are optional depending on the 3D mode & the presence/abcense of metadata +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_RSVD_MASK 0x0f // HDMI_VID_FMT = HDMI_VID_FMT_3D +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_MASK 0xf0 +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SHIFT 0x04 +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_NA 0xfe // Extended data is not applicable +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH 0x01 // Horizontal subsampling 1.4a defines a single subsampling vs 1.4s 4. +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_OL_OR 0x00 // Horizontal subsampling Odd Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_OL_ER 0x01 // Horizontal subsampling Odd Left Even Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_EL_OR 0x02 // Horizontal subsampling Even Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_H_EL_ER 0x03 // Horizontal subsampling Even Left Even Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_OL_OR 0x04 // Quincunx matrix Odd Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_OL_ER 0x05 // Quincunx matrix Odd Left Even Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_EL_OR 0x06 // Quincunx matrix Even Left Odd Right +#define NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_SSH_Q_EL_ER 0x07 // Quincunx matrix Even Left Even Right +//0x08-0x0f reserved +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_MASK 0xf0 // HDMI_VID_FMT = HDMI_VID_FMT_3D; HDMI_3D_META_PRESENT = 1 +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_SHIFT 0x04 // +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_NONE 0x00 // length of no metadata +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_LEN_PARALLAX 0x08 // length of paralax data + +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_MASK 0x0f // HDMI_VID_FMT = HDMI_VID_FMT_3D +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_SHIFT 0x00 +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_PARALLAX 0x00 // parallax metadata in the frame +#define NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_NA 0xfe // no metadata in the frame + +#define NVT_HDMI_VS_BYTENv_RSVD_MASK 0xff // if last byte of infoframe, will move depending on HDMI_VID_FMT, 3D metadata present, 3D_Metadata type. +#define NVT_HDMI_VS_BYTENv_RSVD_SHIFT 0x00 +#define NVT_HDMI_VS_BYTENv_RSVD 0x00 + + +// Extended Metadata Packet (HDMI 2.1 specific) +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME_PAYLOAD +{ + // byte 1~7 + NvU8 byte1; + NvU8 byte2; + NvU8 byte3; + NvU8 byte4; + NvU8 byte5; + NvU8 byte6; + NvU8 byte7; + + NvU8 metadataBytes[21]; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME_PAYLOAD; + +typedef struct tagNVT_EXTENDED_METADATA_PACKET_INFOFRAME +{ + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER Header; + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_PAYLOAD Data; +} NVT_EXTENDED_METADATA_PACKET_INFOFRAME; + +#define NVT_HDMI_EMP_BYTE1_RSVD_MASK 0x01 +#define NVT_HDMI_EMP_BYTE1_RSVD_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE1_SYNC_MASK 0x02 +#define NVT_HDMI_EMP_BYTE1_SYNC_SHIFT 1 +#define NVT_HDMI_EMP_BYTE1_SYNC_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_SYNC_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_VFR_MASK 0x04 +#define NVT_HDMI_EMP_BYTE1_VFR_SHIFT 2 +#define NVT_HDMI_EMP_BYTE1_VFR_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_VFR_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_AFR_MASK 0x08 +#define NVT_HDMI_EMP_BYTE1_AFR_SHIFT 3 +#define NVT_HDMI_EMP_BYTE1_AFR_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_AFR_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_MASK 0x30 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_SHIFT 4 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_PERIODIC_PSEUDO_STATIC 0 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_PERIODIC_DYNAMIC 1 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_UNIQUE 2 +#define NVT_HDMI_EMP_BYTE1_DS_TYPE_RSVD 3 + +#define NVT_HDMI_EMP_BYTE1_END_MASK 0x40 +#define NVT_HDMI_EMP_BYTE1_END_SHIFT 6 +#define NVT_HDMI_EMP_BYTE1_END_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_END_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE1_NEW_MASK 0x80 +#define NVT_HDMI_EMP_BYTE1_NEW_SHIFT 7 +#define NVT_HDMI_EMP_BYTE1_NEW_DISABLE 0 +#define NVT_HDMI_EMP_BYTE1_NEW_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE2_RSVD_MASK 0xff +#define NVT_HDMI_EMP_BYTE2_RSVD_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_MASK 0xff +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SHIFT 0 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_VENDOR_SPECIFIC 0 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_SPEC_DEFINED 1 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_CTA_DEFINED 2 +#define NVT_HDMI_EMP_BYTE3_ORGANIZATION_ID_VESA_DEFINED 3 + +#define NVT_HDMI_EMP_BYTE4_DATA_SET_TAG_MSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE4_DATA_SET_TAG_MSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE5_DATA_SET_TAG_LSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE6_DATA_SET_LENGTH_MSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE6_DATA_SET_LENGTH_MSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE7_DATA_SET_LENGTH_LSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_MASK 0x01 +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_SHIFT 0 +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_DISABLE 0 +#define NVT_HDMI_EMP_BYTE8_MD0_VRR_EN_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_MASK 0xff +#define NVT_HDMI_EMP_BYTE8_MD1_BASE_VFRONT_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE8_MD2_RB_MASK 0x04 +#define NVT_HDMI_EMP_BYTE8_MD2_RB_SHIFT 2 +#define NVT_HDMI_EMP_BYTE8_MD2_RB_DISABLE 0 +#define NVT_HDMI_EMP_BYTE8_MD2_RB_ENABLE 1 + +#define NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_MASK 0x03 +#define NVT_HDMI_EMP_BYTE8_MD2_BASE_RR_MSB_SHIFT 0 + +#define NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_MASK 0xff +#define NVT_HDMI_EMP_BYTE8_MD3_BASE_RR_LSB_SHIFT 0 + + + +// the Vendor-Specific-Data-Block header +typedef struct tagNVT_CEA861_VSDB_HEADER +{ + // byte 0 + NvU32 length : 5; + NvU32 vendorSpecificTag : 3; + // byte 1-3 + NvU32 ieee_id : 24; + +} NVT_CEA861_VSDB_HEADER; + +// HDMI LLC Vendor-Specific data block +// from HDMI 1.4 spec (superset of VSDB from HDMI 1.3a spec) +typedef struct tagNVT_CEA861_LATENCY +{ + NvU8 Video_Latency; + NvU8 Audio_Latency; + +} NVT_CEA861_LATENCY; + +typedef struct tagNVT_HDMI_VIDEO +{ + NvU8 Rsvd_1 : 3; + NvU8 ImageSize : 2; + NvU8 ThreeD_Multi_Present : 2; + NvU8 ThreeD_Present : 1; + NvU8 HDMI_3D_Len : 5; + NvU8 HDMI_VIC_Len : 3; +} NVT_HDMI_VIDEO; + +typedef struct tagNVT_HDMI_VIC_LIST +{ + NvU8 HDMI_VIC[1]; // note: list length is actually specified in HDMI_VIC_Len +} NVT_HDMI_VIC_LIST; + +typedef struct tagNVT_3D_STRUCT_ALL +{ + NvU8 ThreeDStructALL0_FramePacking : 1; + NvU8 ThreeDStructALL1_FieldAlt : 1; + NvU8 ThreeDStructALL2_LineAlt : 1; + NvU8 ThreeDStructALL3_SSFull : 1; + NvU8 ThreeDStructALL4_LDepth : 1; + NvU8 ThreeDStructALL5_LDepthGFX : 1; + NvU8 ThreeDStructALL6_TopBottom : 1; + NvU8 ThreeDStructALL7 : 1; + NvU8 ThreeDStructALL8_SSHalf : 1; + NvU8 Rsvd_1 : 7; +} NVT_3D_STRUCT_ALL; + +typedef struct tagNVT_3D_MULTI_LIST +{ + NvU8 ThreeD_Structure : 4; + NvU8 TwoD_VIC_order : 4; + NvU8 Rsvd_2 : 4; + NvU8 ThreeD_Detail : 4; +} NVT_3D_MULTI_LIST; + +#define NVT_3D_DETAILS_ALL 0x00 +#define NVT_3D_DETAILS_ALL_HORIZONTAL 0x01 +#define NVT_3D_DETAILS_HORIZONTAL_ODD_LEFT_ODD_RIGHT 0x02 +#define NVT_3D_DETAILS_HORIZONTAL_ODD_LEFT_EVEN_RIGHT 0x03 +#define NVT_3D_DETAILS_HORIZONTAL_EVEN_LEFT_ODD_RIGHT 0x04 +#define NVT_3D_DETAILS_HORIZONTAL_EVEN_LEFT_EVEN_RIGHT 0x05 +#define NVT_3D_DETAILS_ALL_QUINCUNX 0x06 +#define NVT_3D_DETAILS_QUINCUNX_ODD_LEFT_ODD_RIGHT 0x07 +#define NVT_3D_DETAILS_QUINCUNX_ODD_LEFT_EVEN_RIGHT 0x08 +#define NVT_3D_DETAILS_QUINCUNX_EVEN_LEFT_ODD_RIGHT 0x09 +#define NVT_3D_DETAILS_QUINCUNX_EVEN_LEFT_EVEN_RIGHT 0x0a + +typedef struct tagNVT_HDMI_LLC_VSDB_PAYLOAD +{ + // 1st byte + NvU8 B : 4; + NvU8 A : 4; + // 2nd byte + NvU8 D : 4; + NvU8 C : 4; + // 3rd byte + NvU8 DVI_Dual : 1; + NvU8 Rsvd_3 : 2; + NvU8 DC_Y444 : 1; + NvU8 DC_30bit : 1; + NvU8 DC_36bit : 1; + NvU8 DC_48bit : 1; + NvU8 Supports_AI : 1; + // 4th byte + NvU8 Max_TMDS_Clock; + // 5th byte + NvU8 CNC0 : 1; + NvU8 CNC1 : 1; + NvU8 CNC2 : 1; + NvU8 CNC3 : 1; + NvU8 Rsvd_5 : 1; + NvU8 HDMI_Video_present : 1; + NvU8 I_Latency_Fields_Present : 1; + NvU8 Latency_Fields_Present : 1; + + // the rest of the frame may contain optional data as defined + // in the NVT_CEA861_LATENCY, HDMI_VIDEO, HDMI_VIC, NVT_3D_STRUCT_ALL & 3D_MULTI_LIST structures + // and as specified by the corresponding control bits + NvU8 Data[NVT_CEA861_VSDB_PAYLOAD_MAX_LENGTH - 5]; + +} NVT_HDMI_LLC_VSDB_PAYLOAD; + +// HDMI LLC Vendor Specific Data Block +typedef struct tagNVT_HDMI_LLC_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_HDMI_LLC_VSDB_PAYLOAD payload; +} NVT_HDMI_LLC_DATA; + +typedef struct tagNVT_NVDA_VSDB_PAYLOAD +{ + NvU8 opcode; // Nvidia specific opcode - please refer to VRR monitor spec v5 + NvU8 vrrMinRefreshRate; // Minimum refresh rate supported by this monitor +} NVT_NVDA_VSDB_PAYLOAD; + +// NVIDIA Vendor Specific Data Block +typedef struct tagNVT_NVDA_VSDB_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_NVDA_VSDB_PAYLOAD payload; +} NVT_NVDA_VSDB_DATA; + +typedef struct _NVT_MSFT_VSDB_PAYLOAD +{ + NvU8 version; + NvU8 primaryUseCase : 5; + NvU8 thirdPartyUsage : 1; + NvU8 desktopUsage : 1; + NvU8 reserved : 1; + NvU8 containerId[MSFT_VSDB_CONTAINER_ID_SIZE]; +} NVT_MSFT_VSDB_PAYLOAD; + +typedef struct _NVT_MSFT_VSDB_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_MSFT_VSDB_PAYLOAD payload; +} NVT_MSFT_VSDB_DATA; + +#define NVT_MSFT_VSDB_BLOCK_SIZE (sizeof(NVT_MSFT_VSDB_DATA)) + +typedef struct tagNVT_HDMI_FORUM_VSDB_PAYLOAD +{ + // first byte + NvU8 Version; + // second byte + NvU8 Max_TMDS_Character_Rate; + // third byte + NvU8 ThreeD_Osd_Disparity : 1; + NvU8 Dual_View : 1; + NvU8 Independent_View : 1; + NvU8 Lte_340mcsc_Scramble : 1; + NvU8 CCBPCI : 1; + NvU8 CABLE_STATUS : 1; + NvU8 RR_Capable : 1; + NvU8 SCDC_Present : 1; + // fourth byte + NvU8 DC_30bit_420 : 1; + NvU8 DC_36bit_420 : 1; + NvU8 DC_48bit_420 : 1; + NvU8 UHD_VIC : 1; + NvU8 Max_FRL_Rate : 4; + // fifth byte + NvU8 FAPA_start_location : 1; + NvU8 ALLM : 1; + NvU8 FVA : 1; + NvU8 CNMVRR : 1; + NvU8 CinemaVRR : 1; + NvU8 M_delta : 1; + NvU8 Rsvd_2 : 2; + // sixth byte + NvU8 VRR_min : 6; + NvU8 VRR_max_high : 2; + // seventh byte + NvU8 VRR_max_low : 8; + // eighth byte + NvU8 DSC_10bpc : 1; + NvU8 DSC_12bpc : 1; + NvU8 DSC_16bpc : 1; + NvU8 DSC_All_bpp : 1; + NvU8 Rsvd_3 : 2; + NvU8 DSC_Native_420 : 1; + NvU8 DSC_1p2 : 1; + // ninth byte + NvU8 DSC_MaxSlices : 4; + NvU8 DSC_Max_FRL_Rate : 4; + // tenth byte + NvU8 DSC_totalChunkKBytes : 6; + NvU8 Rsvd_4 : 2; +} NVT_HDMI_FORUM_VSDB_PAYLOAD; + +// HDMI Forum Vendor Specific Data Block +typedef struct tagNVT_HDMI_FORUM_DATA +{ + NVT_CEA861_VSDB_HEADER header; + NVT_HDMI_FORUM_VSDB_PAYLOAD payload; +} NVT_HDMI_FORUM_DATA; + +// +// +// Video Capability Data Block (VCDB) +typedef struct _NV_ESC_MONITOR_CAPS_VCDB +{ + NvU8 quantizationRangeYcc : 1; + NvU8 quantizationRangeRgb : 1; + NvU8 scanInfoPreferredVideoFormat : 2; + NvU8 scanInfoITVideoFormats : 2; + NvU8 scanInfoCEVideoFormats : 2; +} NVT_HDMI_VCDB_DATA; + +// +// +//*********************************************************** +// Dynamic Range and Mastering Infoframe (HDR) +//*********************************************************** +// +typedef struct tagNVT_HDR_INFOFRAME_MASTERING_DATA +{ + NvU16 displayPrimary_x0; //!< x coordinate of color primary 0 (e.g. Red) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayPrimary_y0; //!< y coordinate of color primary 0 (e.g. Red) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 displayPrimary_x1; //!< x coordinate of color primary 1 (e.g. Green) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayPrimary_y1; //!< y coordinate of color primary 1 (e.g. Green) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 displayPrimary_x2; //!< x coordinate of color primary 2 (e.g. Blue) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayPrimary_y2; //!< y coordinate of color primary 2 (e.g. Blue) of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 displayWhitePoint_x; //!< x coordinate of white point of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + NvU16 displayWhitePoint_y; //!< y coordinate of white point of mastering display ([0x0000-0xC350] = [0.0 - 1.0]) + + NvU16 max_display_mastering_luminance; //!< Maximum display mastering luminance ([0x0001-0xFFFF] = [1.0 - 65535.0] cd/m^2) + NvU16 min_display_mastering_luminance; //!< Minimum display mastering luminance ([0x0001-0xFFFF] = [1.0 - 6.55350] cd/m^2) + + NvU16 max_content_light_level; //!< Maximum Content Light level (MaxCLL) ([0x0001-0xFFFF] = [1.0 - 65535.0] cd/m^2) + NvU16 max_frame_average_light_level; //!< Maximum Frame-Average Light Level (MaxFALL) ([0x0001-0xFFFF] = [1.0 - 65535.0] cd/m^2) +} NVT_HDR_INFOFRAME_MASTERING_DATA; + +#define NVT_CEA861_HDR_INFOFRAME_EOTF_SDR_GAMMA 0 //SDR Luminance Range +#define NVT_CEA861_HDR_INFOFRAME_EOTF_HDR_GAMMA 1 //HDR Luminance Range +#define NVT_CEA861_HDR_INFOFRAME_EOTF_ST2084 2 +#define NVT_CEA861_HDR_INFOFRAME_EOTF_Future 3 +#define NVT_CEA861_STATIC_METADATA_TYPE1_PRIMARY_COLOR_NORMALIZE_FACTOR 0xC350 // Per CEA-861.3 spec + +typedef struct tagNVT_HDR_INFOFRAME_PAYLOAD +{ + //byte 1 + NvU8 eotf : 3; + NvU8 f13_17 : 5; // These bits are reserved for future use + //byte 2 + NvU8 static_metadata_desc_id : 3; + NvU8 f23_27 : 5; // These bits are reserved for future use + + NVT_HDR_INFOFRAME_MASTERING_DATA type1; +} NVT_HDR_INFOFRAME_PAYLOAD; + +#pragma pack(1) +typedef struct tagNVT_HDR_INFOFRAME +{ + NVT_INFOFRAME_HEADER header; + NVT_HDR_INFOFRAME_PAYLOAD payload; +} NVT_HDR_INFOFRAME; +#pragma pack() + +// +// +//*********************************************************** +// Gamut Metadata Range and Vertices structures +//*********************************************************** +// +// GBD structure formats +// +#define NVT_GAMUT_FORMAT_VERTICES 0 +#define NVT_GAMUT_FORMAT_RANGE 1 + +typedef struct tagNVT_GAMUT_HEADER +{ + NvU8 type:8; + + // byte 1 + NvU8 AGSNum:4; + NvU8 GBD_profile:3; + NvU8 Next_Field:1; + + // byte 2 + NvU8 CGSNum:4; + NvU8 Packet_Seq:2; + NvU8 Rsvd:1; + NvU8 No_Cmt_GBD:1; + +} NVT_GAMUT_HEADER; + +typedef struct tagNVT_GAMUT_METADATA_RANGE_8BIT{ + + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:2; + NvU8 Format_Flag:1; + + // Packaged data + NvU8 Min_Red_Data:8; + NvU8 Max_Red_Data:8; + NvU8 Min_Green_Data:8; + NvU8 Max_Green_Data:8; + NvU8 Min_Blue_Data:8; + NvU8 Max_Blue_Data:8; +} NVT_GAMUT_METADATA_RANGE_8BIT; + +typedef struct tagNVT_GAMUT_METADATA_RANGE_10BIT{ + + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:2; + NvU8 Format_Flag:1; + + // Packaged data + NvU8 Min_Red_Data_HI:8; + + NvU8 Max_Red_Data_HI:6; + NvU8 Min_Red_Data_LO:2; + + NvU8 Min_Green_Data_HI:4; + NvU8 Max_Red_Data_LO:4; + + NvU8 Max_Green_Data_HI:2; + NvU8 Min_Green_Data_LO:6; + + NvU8 Max_Green_Data_LO:8; + + NvU8 Min_Blue_Data_HI:8; + + NvU8 Max_Blue_Data_HI:6; + NvU8 Min_Blue_Data_LO:2; + + NvU8 Data_Rsvd:4; + NvU8 Max_Blue_Data_LO:4; + +} NVT_GAMUT_METADATA_RANGE_10BIT; + +typedef struct tagNVT_GAMUT_METADATA_RANGE_12BIT{ + + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:2; + NvU8 Format_Flag:1; + + // Packaged data + NvU8 Min_Red_Data_HI:8; + + NvU8 Max_Red_Data_HI:4; + NvU8 Min_Red_Data_LO:4; + + NvU8 Max_Red_Data_LO:8; + + NvU8 Min_Green_Data_HI:8; + + NvU8 Max_Green_Data_HI:4; + NvU8 Min_Green_Data_LO:4; + + NvU8 Max_Green_Data_LO:8; + + NvU8 Min_Blue_Data_HI:8; + + NvU8 Max_Blue_Data_HI:4; + NvU8 Min_Blue_Data_LO:4; + + NvU8 Max_Blue_Data_LO:8; + +} NVT_GAMUT_METADATA_RANGE_12BIT; + +typedef struct tagNVT_GAMUT_METADATA_VERTICES_8BIT +{ + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:1; + NvU8 Facet_Mode:1; // Must be set to 0 + NvU8 Format_Flag:1; // Must be set to 0 + NvU8 Number_Vertices_H:8; // Must be set to 0 + NvU8 Number_Vertices_L:8; // Must be set to 4 + + // Packaged data + NvU8 Black_Y_R; + NvU8 Black_Cb_G; + NvU8 Black_Cr_B; + NvU8 Red_Y_R; + NvU8 Red_Cb_G; + NvU8 Red_Cr_B; + NvU8 Green_Y_R; + NvU8 Green_Cb_G; + NvU8 Green_Cr_B; + NvU8 Blue_Y_R; + NvU8 Blue_Cb_G; + NvU8 Blue_Cr_B; +} NVT_GAMUT_METADATA_VERTICES_8BIT; + +typedef struct tagNVT_GAMUT_METADATA_VERTICES_10BIT +{ + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:1; + NvU8 Facet_Mode:1; // Must be set to 0 + NvU8 Format_Flag:1; // Must be set to 0 + NvU8 Number_Vertices_H:8; // Must be set to 0 + NvU8 Number_Vertices_L:8; // Must be set to 4 + + // Packaged data + NvU8 Black_Y_R_HI; + + NvU8 Black_Cb_G_HI:6; + NvU8 Black_Y_R_LO:2; + + NvU8 Black_Cr_B_HI:4; + NvU8 Black_Cb_G_LO:4; + + NvU8 Red_Y_R_HI:2; + NvU8 Black_Cr_B_LO:6; + + NvU8 Red_Y_R_LO; + + NvU8 Red_Cb_G_HI; + + NvU8 Red_Cr_B_HI:6; + NvU8 Red_Cb_G_LO:2; + + NvU8 Green_Y_R_HI:4; + NvU8 Red_Cr_B_LO:4; + + NvU8 Green_Cb_G_HI:2; + NvU8 Green_Y_R_LO:6; + + NvU8 Green_Cb_G_LO; + + NvU8 Green_Cr_B_HI; + + NvU8 Blue_Y_R_HI:6; + NvU8 Green_Cr_B_LO:2; + + NvU8 Blue_Cb_G_HI:4; + NvU8 Blue_Y_R_LO:4; + + NvU8 Blue_Cr_B_HI:2; + NvU8 Blue_Cb_G_LO:6; + + NvU8 Blue_Cr_B_LO; +} NVT_GAMUT_METADATA_VERTICES_10BIT; + +typedef struct tagNVT_GAMUT_METADATA_VERTICES_12BIT +{ + // Header + NvU8 GBD_Color_Space:3; + NvU8 GBD_Color_Precision:2; + NvU8 Rsvd:1; + NvU8 Facet_Mode:1; // Must be set to 0 + NvU8 Format_Flag:1; // Must be set to 0 + NvU8 Number_Vertices_H:8; // Must be set to 0 + NvU8 Number_Vertices_L:8; // Must be set to 4 + + // Packaged data + NvU8 Black_Y_R_HI; + + NvU8 Black_Cb_G_HI:4; + NvU8 Black_Y_R_LO:4; + + NvU8 Black_Cb_G_LO; + + NvU8 Black_Cr_B_HI; + + NvU8 Red_Y_R_HI:4; + NvU8 Black_Cr_B_LO:4; + + NvU8 Red_Y_R_LO; + + NvU8 Red_Cb_G_HI; + + NvU8 Red_Cr_B_HI:4; + NvU8 Red_Cb_G_LO:4; + + NvU8 Red_Cr_B_LO; + + NvU8 Green_Y_R_HI; + + NvU8 Green_Cb_G_HI:4; + NvU8 Green_Y_R_LO:4; + + NvU8 Green_Cb_G_LO; + + NvU8 Green_Cr_B_HI; + + NvU8 Blue_Y_R_HI:4; + NvU8 Green_Cr_B_LO:4; + + NvU8 Blue_Y_R_LO; + + NvU8 Blue_Cb_G_HI; + + NvU8 Blue_Cr_B_HI:4; + NvU8 Blue_Cb_G_LO:4; + + NvU8 Blue_Cr_B_LO; +} NVT_GAMUT_METADATA_VERTICES_12BIT; + +typedef struct tagNVT_GAMUT_METADATA +{ + NVT_GAMUT_HEADER header; + + union + { + NVT_GAMUT_METADATA_RANGE_8BIT range8Bit; + NVT_GAMUT_METADATA_RANGE_10BIT range10Bit; + NVT_GAMUT_METADATA_RANGE_12BIT range12Bit; + NVT_GAMUT_METADATA_VERTICES_8BIT vertices8bit; + NVT_GAMUT_METADATA_VERTICES_10BIT vertices10bit; + NVT_GAMUT_METADATA_VERTICES_12BIT vertices12bit; + }payload; + +}NVT_GAMUT_METADATA; +// +//*********************************** +// Display Port Configuration Data +//*********************************** +// +// DPCD field offset +#define NVT_DPCD_ADDRESS_RECEIVER_CAPABILITY_FIELD 0x00000 +#define NVT_DPCD_ADDRESS_LINK_CONFIG_FIELD 0x00100 +#define NVT_DPCD_ADDRESS_MSTM_CTRL_FIELD 0x00111 //DPMST Control MST <-> ST +#define NVT_DPCD_ADDRESS_MSTM_BRANCH_DEVICE 0x001A1 +#define NVT_DPCD_ADDRESS_LINK_SINK_STATUS_FIELD 0x00200 +#define NVT_DPCD_ADDRESS_VENDOR_SPECIFIC_SOURCE_DEVICE 0x00300 +#define NVT_DPCD_ADDRESS_VENDOR_SPECIFIC_SINK_DEVICE 0x00400 +#define NVT_DPCD_ADDRESS_VENDOR_SPECIFIC_BRANCH_DEVICE 0x00500 +#define NVT_DPCD_ADDRESS_SINK_CTRL_FIELD 0x00600 +#define NVT_DPCD_ADDRESS_DOWN_REQ_BUFFER_FIELD 0x01000 +#define NVT_DPCD_ADDRESS_UP_REP_BUFFER_FIELD 0x01200 +#define NVT_DPCD_ADDRESS_DOWN_REP_BUFFER_FIELD 0x01400 +#define NVT_DPCD_ADDRESS_UP_REQ_BUFFER_FIELD 0x01600 +#define NVT_DPCD_ADDRESS_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x02003 +#define NVT_DPCD_ADDRESS_DP_TUNNELING_CAPS_SUPPORT_FIELD 0xE000D +#define NVT_DPCD_ADDRESS_DP_IN_ADAPTER_INFO_FIELD 0xE000E +#define NVT_DPCD_ADDRESS_USB4_DRIVER_ID_FIELD 0xE000F +#define NVT_DPCD_ADDRESS_USB4_ROUTER_TOPOLOGY_ID_FIELD 0xE001B + +// +// Raw DPCD data format - Receiver Capability Field // 00000h - 000FFh +typedef struct tagNVT_DPCD_RECEIVER_CAP +{ + NvU8 rev; // 00000h + NvU8 max_link_rate; // 00001h + NvU8 max_lane_count; // 00002h + NvU8 max_downspread; // 00003h + NvU8 norp; // 00004h + NvU8 downstream_port_present; // 00005h + NvU8 main_link_ch_coding; // 00006h + NvU8 down_stream_port_count; // 00007h + NvU8 receive_port0_cap_0; // 00008h + NvU8 receive_port0_cap_1; // 00009h + NvU8 receive_port1_cap_0; // 0000Ah + NvU8 receive_port1_cap_1; // 0000Bh + NvU8 reserved_0[0x7F - 0xC + 1]; // 0000Ch - 0007Fh + NvU8 down_strm_port0_cap[0x8F - 0x80 + 1]; // 00080h - 0008Fh + //NvU8 reserved_1[0xFF - 0x90 + 1]; // 00090h - 000FFh +}NVT_DPCD_RECEIVER_CAP; + +// +// Raw DPCD data format - Link Configuration Field // 00100h - 001FFh +typedef struct tagNVT_DPCD_LINK_CFG +{ + NvU8 link_bw_set; // 00100h + NvU8 lane_count_set; // 00101h + NvU8 training_pattern_set; // 00102h + NvU8 training_lane0_set; // 00103h + NvU8 training_lane1_set; // 00104h + NvU8 training_lane2_set; // 00105h + NvU8 training_lane3_set; // 00106h + NvU8 downspread_ctrl; // 00107h + NvU8 main_link_ch_coding_set; // 00108h + NvU8 reserved_0[0x110 - 0x109 + 1]; // 00110h - 00109h + NvU8 mstm_ctrl; // 00111h + // NvU8 reserved_0[0x1FF - 0x111 + 1]; +}NVT_DPCD_LINK_CFG; +// +// Raw DPCD data format - Link/Sink Status Field // 00200h - 002FFh +typedef struct tagNVT_DPCD_LINK_SINK_STATUS +{ + NvU8 sink_count; // 00200h + NvU8 device_service_irq_vector; // 00201h + NvU8 lane0_1_status; // 00202h + NvU8 lane2_3_status; // 00203h + NvU8 lane_align_status_update; // 00204h + NvU8 sink_status; // 00205h + NvU8 adjust_req_lane0_1; // 00206h + NvU8 adjust_req_lane2_3; // 00207h + NvU8 training_score_lane0; // 00208h + NvU8 training_score_lane1; // 00209h + NvU8 training_score_lane2; // 0020Ah + NvU8 training_score_lane3; // 0020Bh + NvU8 reserved_0[0x20F - 0x20C + 1]; // 0020Fh - 0020Ch + NvU16 sym_err_count_lane0; // 00210h - 00211h + NvU16 sym_err_count_lane1; // 00212h - 00213h + NvU16 sym_err_count_lane2; // 00214h - 00215h + NvU16 sym_err_count_lane3; // 00217h - 00216h + NvU8 test_req; // 00218h + NvU8 test_link_rate; // 00219h + NvU8 reserved_1[0x21F - 0x21A + 1]; // 0021Fh - 0021Ah + NvU8 test_lane_count; // 00220h + NvU8 test_pattern; // 00221h + NvU16 test_h_total; // 00222h - 00223h + NvU16 test_v_total; // 00224h - 00225h + NvU16 test_h_start; // 00226h - 00227h + NvU16 test_v_start; // 00228h - 00229h + NvU16 test_hsync; // 0022Ah - 0022Bh + NvU16 test_vsync; // 0022Ch - 0022Dh + NvU16 test_h_width; // 0022Eh - 0022Fh + NvU16 test_v_height; // 00230h - 00231h + NvU16 test_misc; // 00232h - 00233h + NvU8 test_refresh_rate_numerator; // 00234h + NvU8 reserved_2[0x23F - 0x235 + 1]; // 00235h - 0023Fh + NvU16 test_crc_R_Cr; // 00240h - 00241h + NvU16 test_crc_G_Y; // 00242h - 00243h + NvU16 test_crc_B_Cb; // 00244h - 00245h + NvU8 test_sink_misc; // 00246h + NvU8 reserved_3[0x25F - 0x247 + 1]; // 00247h - 0025fh + NvU8 test_response; // 00260h + NvU8 test_edid_checksum; // 00261h + NvU8 reserved_4[0x26F - 0x262 + 1]; // 00262h - 0026Fh + NvU8 test_sink; // 00270h + //NvU8 reserved_5[0x27F - 0x271 + 1]; // 00271h - 0027Fh + //NvU8 reserved_6[0x2FF - 0x280 + 1]; // 00280h - 002FFh +}NVT_DPCD_LINK_SINK_STATUS; + +#define NV_DPCD_DONGLE_NVIDIA_OUI 0x00044B + +// +// Raw DPCD data format - Vendor-Specific Field for Source Device // 00300h - 003FFh +// Raw DPCD data format - Vendor-Specific Field for Sink Device // 00400h - 004FFh +// Raw DPCD data format - Vendor-Specific Field for Branch Device // 00500h - 005FFh +typedef struct tagNVT_DPCD_VENDOR_SPECIFIC_FIELD +{ + NvU8 ieee_oui7_0; // 00300h + NvU8 ieee_oui15_8; // 00301h + NvU8 ieee_oui23_16; // 00302h + //NvU8 reserved[0x3FF - 0x303 + 1]; // 003FFh - 00303h +}NVT_DPCD_VENDOR_SPECIFIC_FIELD; +// +// Raw DPCD data format - Dongle Specific Field +typedef struct tagNVT_DPCD_DONGLE_SPECIFIC_FIELD +{ + NvU8 vendor_b0; // 00300h + NvU8 vendor_b1; // 00301h + NvU8 vendor_b2; // 00302h + NvU8 model[6]; // 00303h - 00308h + NvU8 chipIDVersion; // 00309h + //NvU8 reserved[0x3FF - 0x30A + 1]; // 0030Ah - 005FFh +}NVT_DPCD_DONGLE_SPECIFIC_FIELD; +// +// Raw DPCD data format - DualDP Specific Field +typedef struct tagNVT_DPCD_DUALDP_SPECIFIC_FIELD +{ + NvU8 vendor_b0; // 00300h + NvU8 vendor_b1; // 00301h + NvU8 vendor_b2; // 00302h + NvU8 model[6]; // 00303h - 00308h + NvU8 chipd_id_version; // 00309h + NvU8 reserved_1[0x3AF - 0x30A + 1]; // 0030Ah - 003AFh + NvU8 dual_dp_cap; // 003B0h + NvU8 dual_dp_base_addr[3]; // 003B1h - 003B3h + //NvU8 reserved_2[0x3FF - 0x3B4 + 1]; // 003B4h - 003FFh +}NVT_DPCD_DUALDP_SPECIFIC_FIELD; + +// +// Raw DPCD data format - Sink Control Field // 00600h - 006FFh +typedef struct tagNVT_DPCD_SINK_CTRL_FIELD +{ + NvU8 set_power; // 00600h + //NvU8 reserved[0x6FF - 0x601 + 1]; // 00601h - 006FFh +}NVT_DPCD_SINK_CTRL_FIELD; +// +// The entire DPCD data block +typedef struct tagNVT_DPCD +{ + NVT_DPCD_RECEIVER_CAP receiver_cap; + NVT_DPCD_LINK_CFG link_cfg; + NVT_DPCD_LINK_SINK_STATUS link_status; + NVT_DPCD_VENDOR_SPECIFIC_FIELD vsp_source_device; + NVT_DPCD_VENDOR_SPECIFIC_FIELD vsp_sink_device; + NVT_DPCD_VENDOR_SPECIFIC_FIELD vsp_branch_device; + NVT_DPCD_SINK_CTRL_FIELD sink_ctrl; +}NVT_DPCD; +// +// +// Parsed DPCD info +// +// +#define NVT_DPCD_REV_10 NVT_DPCD_DPCD_REV_10 // DPCD revision 1.0 +#define NVT_DPCD_REV_11 NVT_DPCD_DPCD_REV_11 // DPCD revision 1.1 +#define NVT_DPCD_REV_12 NVT_DPCD_DPCD_REV_12 // DPCD revision 1.2 +#define NVT_DPCD_RECEIVER_MAX_DOWNSTREAM_PORT 16 // the max downstream port possible per device +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_DP NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT // Display Port +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_VGA NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_VGA // analog VGA or analog video over DVI-I +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_DVI NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DVI // DVI +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_HDMI NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_HDMI // HDMI +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_OTHERS NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_NO_EDID // the downstream port type will have no EDID in sink device such as Composite/SVideo. +#define NVT_DPCD_RECEIVER_DOWNSTREAM_PORT_TYPE_DP_PP NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT_PP // Display Port++ +#define NVT_DPCD_LINK_RATE_1_62_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_1_62GPBS_PER_LANE // 1.62Gbps per lane +#define NVT_DPCD_LINK_RATE_2_70_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_2_70GPBS_PER_LANE // 2.70Gbps per lane +#define NVT_DPCD_LINK_RATE_5_40_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_5_40GPBS_PER_LANE // 5.40Gbps per lane +#define NVT_DPCD_LINK_RATE_8_10_GBPS NVT_DPCD_LINK_BW_SET_LINK_BW_SET_8_10GPBS_PER_LANE // 5.40Gbps per lane +#define NVT_DPCD_LINK_RATE_FACTOR_IN_10KHZ_MBPS 2700 // e.g. NVT_DPCD_LINK_RATE_1_62_GBPS * 0.27Gbps per lane (in 10KHz) +#define NVT_DPCD_LANE_COUNT_1 NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_1_LANE +#define NVT_DPCD_LANE_COUNT_2 NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_2_LANES +#define NVT_DPCD_LANE_COUNT_4 NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_4_LANES +#define NVT_DPCD_LANE_COUNT_8 8 + +// note: the values of NVT_COLOR_FORMAT_* are fixed in order to match the equivalent NV classes +typedef enum +{ + NVT_COLOR_FORMAT_RGB = 0, + NVT_COLOR_FORMAT_YCbCr422 = 1, + NVT_COLOR_FORMAT_YCbCr444 = 2, + NVT_COLOR_FORMAT_YCbCr420 = 3, + NVT_COLOR_FORMAT_Y = 4, + NVT_COLOR_FORMAT_RAW = 5, + NVT_COLOR_FORMAT_INVALID = 0xFF +} NVT_COLOR_FORMAT; + +typedef enum +{ + NVT_COLOR_RANGE_FULL = 0, + NVT_COLOR_RANGE_LIMITED = 1 +} NVT_COLOR_RANGE; + +// note: the values of NVT_COLORIMETRY_* are fixed in order to match the equivalent NV classes +typedef enum +{ + NVT_COLORIMETRY_RGB = 0, + NVT_COLORIMETRY_YUV_601 = 1, + NVT_COLORIMETRY_YUV_709 = 2, + NVT_COLORIMETRY_EXTENDED = 3, + NVT_COLORIMETRY_XVYCC_601 = 4, + NVT_COLORIMETRY_XVYCC_709 = 5, + NVT_COLORIMETRY_ADOBERGB = 6, + NVT_COLORIMETRY_BT2020cYCC = 7, + NVT_COLORIMETRY_BT2020YCC = 8, + NVT_COLORIMETRY_BT2020RGB = 9, + NVT_COLORIMETRY_INVALID = 0xFF +} NVT_COLORIMETRY; + +#define NVT_DPCD_BPC_DEFAULT 0x00 +#define NVT_DPCD_BPC_6 0x01 +#define NVT_DPCD_BPC_8 0x02 +#define NVT_DPCD_BPC_10 0x03 +#define NVT_DPCD_BPC_12 0x04 +#define NVT_DPCD_BPC_16 0x05 + +#define NVT_DPCD_AUTOMATED_TEST 0x02 +#define NVT_DPCD_CP_IRQ 0x04 + +#define NVT_DPCD_LANES_2_3_TRAINED 0x77 +#define NVT_DPCD_LANE_1_TRAINED 0x07 +#define NVT_DPCD_LANE_0_TRAINED 0x07 +#define NVT_DPCD_INTERLANE_ALIGN_DONE 0x1 + +#define NVT_DPCD_LANE_1_STATUS 7:4 +#define NVT_DPCD_LANE_0_STATUS 3:0 +#define NVT_DPCD_ADDRESS_LANE_STATUS 0x00202 + +#define NVT_DPCD_TEST_REQ_LINK_TRAINING 0x01 +#define NVT_DPCD_TEST_REQ_TEST_PATTERN 0x02 +#define NVT_DPCD_TEST_REQ_EDID_READ 0x04 +#define NVT_DPCD_TEST_REQ_PHY_TEST_PATTERN 0x08 + +#define NVT_DPCD_TEST_ACK 0x01 +#define NVT_DPCD_TEST_NAK 0x02 +#define NVT_DPCD_TEST_EDID_CHECKSUM_WRITE 0x04 + +#define NVT_DPCD_TEST_MISC_COLOR_FORMAT 2:1 +#define NVT_DPCD_TEST_MISC_DYNAMIC_RANGE 3:3 +#define NVT_DPCD_TEST_MISC_YCbCr_COEFFICIENT 4:4 +#define NVT_DPCD_TEST_MISC_BIT_DEPTH 7:5 + +#define NVT_DPCD_TEST_EDID_CHECKSUM_ADDRESS 0x261 +#define NVT_DPCD_TEST_RESPONSE_ADDRESS 0x260 +#define NVT_EDID_CHECKSUM_BYTE 127 + +#define NVT_DPCD_POWER_STATE_NORMAL 0x01 +#define NVT_DPCD_POWER_STATE_POWER_DOWN 0x02 + +// ******************* +// ** DPCD 1.1 Spec ** +// ******************* + +// 0x000h DPCD_REV +#define NVT_DPCD_DPCD_REV 0x000 +#define NVT_DPCD_DPCD_REV_MINOR_VER 3:0 +#define NVT_DPCD_DPCD_REV_MAJOR_VER 7:4 +#define NVT_DPCD_DPCD_REV_10 0x10 +#define NVT_DPCD_DPCD_REV_11 0x11 +#define NVT_DPCD_DPCD_REV_12 0x12 + +// 0x001h MAX_LINK_RATE +#define NVT_DPCD_MAX_LINK_RATE 0x001 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE 7:0 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_1_62GPS_PER_LANE 0x06 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_2_70GPS_PER_LANE 0x0A +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_5_40GPS_PER_LANE 0x14 +#define NVT_DPCD_MAX_LINK_RATE_MAX_LINK_RATE_8_10GPS_PER_LANE 0x1E + +// 0x002h - MAX_LANE_COUNT +#define NVT_DPCD_MAX_LANE_COUNT 0x002 +#define NVT_DPCD_MAX_LANE_COUNT_MAX_LANE_COUNT 4:0 +#define NVT_DPCD_MAX_LANE_COUNT_RSVD 6:5 +#define NVT_DPCD_MAX_LANE_COUNT_ENHANCED_FRAME_CAP 7:7 + +// 0x003h - MAX_DOWNSPREAD +#define NVT_DPCD_MAX_DOWNSPREAD 0x003 +#define NVT_DPCD_MAX_DOWNSPREAD_MAX_DOWNSPREAD 0:0 +#define NVT_DPCD_MAX_DOWNSPREAD_MAX_DOWNSPREAD_NO 0 +#define NVT_DPCD_MAX_DOWNSPREAD_MAX_DOWNSPREAD_YES 1 +#define NVT_DPCD_MAX_DOWNSPREAD_RSVD 5:1 +#define NVT_DPCD_MAX_DOWNSPREAD_NO_AUX_HANDSHAKE_LINK_TRAINING 6:6 +#define NVT_DPCD_MAX_DOWNSPREAD_RSVD_2 7:7 + +// 0x004h - NORP +#define NVT_DPCD_NORP 0x004 +#define NVT_DPCD_NORP_NUMBER_OF_RECEIVER_PORT_SUBTRACT_ONE 0:0 +#define NVT_DPCD_NORP_RSVD 7:1 + +// 0x005 - DOWNSTREAMPORT_PRESENT +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT 0x005 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_PRESENT 0:0 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE 2:1 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_DISPLAYPORT 0 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_VGA 1 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_DVI_HDMI 2 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_DWN_STRM_PORT_TYPE_OTHERS 3 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_FORMAT_CONVERSION 3:3 +#define NVT_DPCD_DOWNSTREAMPORT_PRESENT_RSVD 7:4 + +// 0x006 - MAIN_LINK_CHANNEL_CODING +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING 0x006 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_ANSI8B_10B 0:0 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_RSVD 7:1 + +// 0x007 - DOWN_STREAM_PORT_COUNT +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT 0x007 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_DWN_STRM_PORT_COUNT 3:0 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_RSVD 6:4 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_OUI_SUPPORT 7:7 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_OUI_SUPPORT_YES 1 +#define NVT_DPCD_DOWN_STREAM_PORT_COUNT_OUI_SUPPORT_NO 0 + +// 0x008h - RECEIVE_PORT0_CAP_0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0 0x008 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_RSVD 0:0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_LOCAL_EDID_PRESENT 1:1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_LOCAL_EDID_PRESENT_YES 1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_LOCAL_EDID_PRESENT_NO 0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_ASSOCIATED_TO_PRECEDING_PORT 2:2 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_YES 1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_NO 0 +#define NVT_DPCD_RECEIVE_PORT0_CAP_0_RSVD_2 7:3 + +// 0x009h - RECEIVE_PORT0_CAP_1 +#define NVT_DPCD_RECEIVE_PORT0_CAP_1 0x009 +#define NVT_DPCD_RECEIVE_PORT0_CAP_1_BUFFER_SIZE 7:0 + +// 0x00Ah - RECEIVE_PORT1_CAP_0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0 0x00A +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_RSVD 0:0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_LOCAL_EDID_PRESENT 1:1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_LOCAL_EDID_PRESENT_YES 1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_LOCAL_EDID_PRESENT_NO 0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_ASSOCIATED_TO_PRECEDING_PORT 2:2 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_YES 1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_ASSOCIATED_TO_PRECEDING_PORT_NO 0 +#define NVT_DPCD_RECEIVE_PORT1_CAP_0_RSVD_2 7:3 + +// 0x00Bh - RECEIVE_PORT1_CAP_1 +#define NVT_DPCD_RECEIVE_PORT1_CAP_1 0x00B +#define NVT_DPCD_RECEIVE_PORT1_CAP_1_BUFFER_SIZE 7:0 + +// 0x021h - MST_CAP +#define NVT_DPCD_MSTM_CAP 0x021 +#define NVT_DPCD_MSTM_CAP_MST_CAP 0:0 +#define NVT_DPCD_MSTM_CAP_MST_CAP_NO 0 +#define NVT_DPCD_MSTM_CAP_MST_CAP_YES 1 + +// 0x080h ~ 0x08Fh - DWN_STRM_PORT0_CAP +#define NVT_DPCD_DWN_STRM_PORT0_CAP 0x080 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE 2:0 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT 0 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_VGA 1 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DVI 2 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_HDMI 3 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_NO_EDID 4 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_TYPE_DISPLAYPORT_PP 5 //Defined in Post DP 1.2 draft +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_HPD 3:3 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_HPD_AWARE_YES 1 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_DWN_STRM_PORT0_HPD_AWARE_NO 0 +#define NVT_DPCD_DWN_STRM_PORT0_CAP_RSVD 7:4 + +// 0x100h - LINK_BW_SET +#define NVT_DPCD_LINK_BW_SET 0x100 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET 7:0 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_1_62GPBS_PER_LANE 0x06 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_2_70GPBS_PER_LANE 0x0A +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_5_40GPBS_PER_LANE 0x14 +#define NVT_DPCD_LINK_BW_SET_LINK_BW_SET_8_10GPBS_PER_LANE 0x1E + +// 0x101h - LANE_COUNT_SET +#define NVT_DPCD_LANE_COUNT_SET 0x101 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET 4:0 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_1_LANE 1 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_2_LANES 2 +#define NVT_DPCD_LANE_COUNT_SET_LANE_COUNT_SET_4_LANES 4 +#define NVT_DPCD_LANE_COUNT_SET_RSVD 6:5 +#define NVT_DPCD_LANE_COUNT_SET_ENHANCED_FRAME_EN 7:7 +#define NVT_DPCD_LANE_COUNT_SET_ENHANCED_FRAME_EN_YES 1 +#define NVT_DPCD_LANE_COUNT_SET_ENHANCED_FRAME_EN_NO 0 + +// 0x102h - TRAINING_PATTERN_SET +#define NVT_DPCD_TRAINING_PATTERN_SET 0x102 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET 1:0 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_NOT_IN_PROGRESS 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_PATTERN_1 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_PATTERN_2 2 +#define NVT_DPCD_TRAINING_PATTERN_SET_TRAINING_PATTERN_SET_RSVD 3 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET 3:2 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_NOT_TRANSMITTED 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_D10_2 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_SERMPT 2 +#define NVT_DPCD_TRAINING_PATTERN_SET_LINK_QUAL_PATTERN_SET_PRBS7 3 +#define NVT_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN 4:4 +#define NVT_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_NO 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_RECOVERED_CLOCK_OUT_EN_YES 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLE 5:5 +#define NVT_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLE_NO 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_SCRAMBLING_DISABLE_YES 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL 7:6 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_DIS_ERROR 0 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_D_ERROR 1 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_IS_ERROR 2 +#define NVT_DPCD_TRAINING_PATTERN_SET_SYMBOL_ERROR_COUNT_SEL_RSVD 3 + +// 0x103h ~ 0x106h - TRAINING_LANE?_SET +#define NVT_DPCD_TRAINING_LANE0_SET 0x103 +#define NVT_DPCD_TRAINING_LANE1_SET 0x104 +#define NVT_DPCD_TRAINING_LANE2_SET 0x105 +#define NVT_DPCD_TRAINING_LANE3_SET 0x106 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET 1:0 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L0 0 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L1 1 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L2 2 +#define NVT_DPCD_TRAINING_LANE0_SET_VOLTAGE_SWING_SET_TP1_VS_L3 3 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_SWING_REACHED 2:2 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_SWING_REACHED_NO 0 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_SWING_REACHED_YES 1 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET 4:3 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_NONE 0 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_L1 1 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_L2 2 +#define NVT_DPCD_TRAINING_LANE0_SET_PRE_EMPHASIS_SET_TP2_PE_L3 3 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_PRE_EMPHASIS_REACHED 5:5 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_PRE_EMPHASIS_REACHED_NO 0 +#define NVT_DPCD_TRAINING_LANE0_SET_MAX_PRE_EMPHASIS_REACHED_YES 1 +#define NVT_DPCD_TRAINING_LANE0_SET_RSVD 7:6 + +// 0x107h - DOWNSPREAD_CTRL +#define NVT_DPCD_DOWNSPREAD_CTRL 0x107 +#define NVT_DPCD_DOWNSPREAD_CTRL_RSVD 3:0 +#define NVT_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP 4:4 +#define NVT_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_NO 0 +#define NVT_DPCD_DOWNSPREAD_CTRL_SPREAD_AMP_YES 1 +#define NVT_DPCD_DOWNSPREAD_CTRL_RSVD_2 7:5 + +// 0x108h - MAIN_LINK_CHANNEL_CODING_SET +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_SET 0x108 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_SET_SET_ANSI8B10B 0:0 +#define NVT_DPCD_MAIN_LINK_CHANNEL_CODING_SET_RSVD 7:1 + +// 0x111h - MSTM_CTRL +#define NVT_DPCD_MSTM_CTRL 0x111 +#define NVT_DPCD_MSTM_CTRL_MST_EN 0:0 +#define NVT_DPCD_MSTM_CTRL_MST_EN_NO 0 +#define NVT_DPCD_MSTM_CTRL_MST_EN_YES 1 +#define NVT_DPCD_MSTM_CTRL_UP_REQ_EN 1:1 +#define NVT_DPCD_MSTM_CTRL_UP_REQ_EN_NO 0 +#define NVT_DPCD_MSTM_CTRL_UP_REQ_EN_YES 1 +#define NVT_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC 2:2 +#define NVT_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_NO 0 +#define NVT_DPCD_MSTM_CTRL_UPSTREAM_IS_SRC_YES 1 +#define NVT_DPCD_MSTM_CTRL_MST_RSVD 7:3 + +// 0x1A1h - BRANCH_DEVICE_CTRL +#define NVT_DPCD_BRANCH_DEVICE_CTRL 0x1A1 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_HPD_NOTIF_TYPE 0:0 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_HPD_NOTIF_TYPE_LONG_HPD_PULSE 0 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_HPD_NOTIF_TYPE_SHORT_IRQ_PULSE 1 +#define NVT_DPCD_BRANCH_DEVICE_CTRL_RSVD 7:1 + +#define NVT_DPCD_PAYLOAD_ALLOCATE_SET 0x1C0 +#define NVT_DPCD_PAYLOAD_ALLOCATE_SET_VC_ID 6:0 + +#define NVT_DPCD_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1C1 +#define NVT_DPCD_PAYLOAD_ALLOCATE_START_TIME_SLOT_FIELD 5:0 + +#define NVT_DPCD_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1C2 +#define NVT_DPCD_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT_FIELD 5:0 + +// 0x200h - SINK_COUNT +#define NVT_DPCD_SINK_COUNT 0x200 +#define NVT_DPCD_SINK_COUNT_SINK_COUNT 5:0 +#define NVT_DPCD_SINK_COUNT_CP_READY 6:6 +#define NVT_DPCD_SINK_COUNT_RSVD 7:7 + +// 0x201h - DEVICE_SERVICE_IRQ_VECTOR +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR 0x201 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_RSVD_REMOTE_CTRL_CMD_PENDING 0:0 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_AUTOMATED_TEST_REQUEST 1:1 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_CP_IRQ 2:2 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_MCCS_IRQ 3:3 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_DOWN_REP_MSG_READY 4:4 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_UP_REQ_MSG_READY 5:5 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_SINK_SPECIFIC_IRQ 6:6 +#define NVT_DPCD_DEVICE_SERVICE_IRQ_VECTOR_RSVD_2 7:7 + +// 0x202h ~ 0x203h - LANE0_1_STATUS +#define NVT_DPCD_LANE0_1_STATUS 0x202 +#define NVT_DPCD_LANE2_3_STATUS 0x203 +#define NVT_DPCD_LANE0_1_STATUS_LANE0_CR_DONE 0:0 +#define NVT_DPCD_LANE0_1_STATUS_LANE0_CHANNEL_EQ_DONE 1:1 +#define NVT_DPCD_LANE0_1_STATUS_LANE0_SYMBOL_LOCKED 2:2 +#define NVT_DPCD_LANE0_1_STATUS_RSVD 3:3 +#define NVT_DPCD_LANE0_1_STATUS_LANE1_CR_DONE 4:4 +#define NVT_DPCD_LANE0_1_STATUS_LANE1_CHANNEL_EQ_DONE 5:5 +#define NVT_DPCD_LANE0_1_STATUS_LANE1_SYMBOL_LOCKED 6:6 +#define NVT_DPCD_LANE0_1_STATUS_RSVD_2 7:7 + +// 0x204h - LANE_ALIGN_STATUS_UPDATED +// Temporary until Linux/Apple change their code. +#define NVT_DPCD_LANE_ALIGN_STAUTS_UPDATED NVT_DPCD_LANE_ALIGN_STATUS_UPDATED +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED 0x204 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_INTERLANE_ALIGN_DONE 0:0 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_RSVD 5:1 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_DOWNSTREAM_PORT_STATUS_CHANGED 6:6 +#define NVT_DPCD_LANE_ALIGN_STATUS_UPDATED_LINK_STATUS_UPDATED 7:7 + +// 0x205 - SINK_STATUS +#define NVT_DPCD_SINK_STATUS 0x205 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS 0:0 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_OUT_OF_SYNC 0 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_0_STATUS_IN_SYNC 1 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS 1:1 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_OUT_OF_SYNC 0 +#define NVT_DPCD_SINK_STATUS_RECEIVE_PORT_1_STATUS_IN_SYNC 1 +#define NVT_DPCD_SINK_STATUS_RSVD 7:2 + +// 0x206h ~ 0x207h - ADJUST_REQUEST_LANE0_1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1 0x206 +#define NVT_DPCD_ADJUST_REQUEST_LANE2_3 0x207 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0 1:0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE0_LEVEL_3 3 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0 3:2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE0_LEVEL_3 3 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1 5:4 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_VOLTAGE_SWING_LANE1_LEVEL_3 3 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1 7:6 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_0 0 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_1 1 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_2 2 +#define NVT_DPCD_ADJUST_REQUEST_LANE0_1_PRE_EMPHASIS_LANE1_LEVEL_3 3 + +// 0x208h ~ 0x20Bh TRAINING_SCORE_LANE0~3 +#define NVT_DPCD_TRAINING_SCORE_LANE0 0x208 +#define NVT_DPCD_TRAINING_SCORE_LANE1 0x209 +#define NVT_DPCD_TRAINING_SCORE_LANE2 0x20A +#define NVT_DPCD_TRAINING_SCORE_LANE3 0x20B + +// 0x210h ~ 0x217h SYMBOL_ERROR_COUNT_LANE0 (16bit) +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_LO 0x210 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_HI 0x211 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE1_LO 0x212 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE1_HI 0x213 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE2_LO 0x214 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE2_HI 0x215 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE3_LO 0x216 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE3_HI 0x217 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_ERROR_COUNT_LO 7:0 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_ERROR_COUNT_HI 6:0 +#define NVT_DPCD_SYMBOL_ERROR_COUNT_LANE0_ERROR_COUNT_VALID 7:7 + +// 0x218h TEST_REQUEST +#define NVT_DPCD_TEST_REQUEST 0x218 +#define NVT_DPCD_TEST_REQUEST_TEST_LINK_TRAINING 0:0 +#define NVT_DPCD_TEST_REQUEST_TEST_PATTERN 1:1 +#define NVT_DPCD_TEST_REQUEST_TEST_EDID_READ 2:2 +#define NVT_DPCD_TEST_REQUEST_PHY_TEST_PATTERN 3:3 +#define NVT_DPCD_TEST_REQUEST_RSVD 7:4 + +// 0x219h TEST_LINK_RATE +#define NVT_DPCD_TEST_LINK_RATE 0x219 + +// 0x220h TEST_LANE_COUNT +#define NVT_DPCD_TEST_LANE_COUNT 0x220 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT 4:0 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT_ONE_LANE 1 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT_TWO_LANES 2 +#define NVT_DPCD_TEST_LANE_COUNT_TEST_LANE_COUNT_FOUR_LANES 4 +#define NVT_DPCD_TEST_LANE_COUNT_RSVD 7:5 + +// 0x221h TEST_PATTERN +#define NVT_DPCD_TEST_PATTERN 0x221 +#define NVT_DPCD_TEST_PATTERN_NO_TEST_PATTERN_TRANSMITTED 0 +#define NVT_DPCD_TEST_PATTERN_COLOR_RAMPS 1 +#define NVT_DPCD_TEST_PATTERN_BLACK_AND_WHITE_VERTICAL_LINES 2 +#define NVT_DPCD_TEST_PATTERN_COLOR_SQUARE 3 + +// 0x222h ~ 0x223h TEST_H_TOTAL +#define NVT_DPCD_TEST_H_TOTAL_HI 0x222 +#define NVT_DPCD_TEST_H_TOTAL_LO 0x223 + +// 0x224h ~ 0x225h TEST_V_TOTAL +#define NVT_DPCD_TEST_V_TOTAL_HI 0x224 +#define NVT_DPCD_TEST_V_TOTAL_LO 0x225 + +// 0x226h ~ 0x227h TEST_H_START +#define NVT_DPCD_TEST_H_START_HI 0x226 +#define NVT_DPCD_TEST_H_START_LO 0x227 + +// 0x228h ~ 0x229h TEST_V_START +#define NVT_DPCD_TEST_V_START_HI 0x228 +#define NVT_DPCD_TEST_V_START_LO 0x229 + +// 0x22Ah ~ 0x22Bh TEST_HSYNC +#define NVT_DPCD_TEST_HSYNC_HI 0x22A +#define NVT_DPCD_TEST_HSYNC_LO 0x22B +#define NVT_DPCD_TEST_HSYNC_HI_TEST_HSYNC_WIDTH_14_8 6:0 +#define NVT_DPCD_TEST_HSYNC_HI_TEST_HSYNC_POLARITY 7:7 + +// 0x22Ch ~ 0x22Dh TEST_VSYNC +#define NVT_DPCD_TEST_VSYNC_HI 0x22C +#define NVT_DPCD_TEST_VSYNC_LO 0x22D +#define NVT_DPCD_TEST_VSYNC_HI_TEST_VSYNC_WIDTH_14_8 6:0 +#define NVT_DPCD_TEST_VSYNC_HI_TEST_VSYNC_POLARITY 7:7 + +// 0x22Eh ~ 0x22Fh TEST_H_WIDTH +#define NVT_DPCD_TEST_H_WIDTH_HI 0x22E +#define NVT_DPCD_TEST_H_WIDTH_LO 0x22F + +// 0x230h ~ 0x231h TEST_V_WIDTH +#define NVT_DPCD_TEST_V_HEIGHT_HI 0x230 +#define NVT_DPCD_TEST_V_HEIGHT_LO 0x231 + +// 0x232h ~ 0x233h TEST_MISC +#define NVT_DPCD_TEST_MISC_LO 0x232 +#define NVT_DPCD_TEST_MISC_LO_TEST_SYNCHRONOUS_CLOCK 0:0 +#define NVT_DPCD_TEST_MISC_LO_TEST_SYNCHRONOUS_CLOCK_ASYNC 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_SYNCHRONOUS_CLOCK_SYNC 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT 2:1 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_RGB 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_YCbCr422 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_YCbCr444 2 +#define NVT_DPCD_TEST_MISC_LO_TEST_COLOR_FORMAT_RSVD 3 +#define NVT_DPCD_TEST_MISC_LO_TEST_DYNAMIC_RANGE 3:3 +#define NVT_DPCD_TEST_MISC_LO_TEST_DYNAMIC_RANGE_VESA 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_DYNAMIC_RANGE_CEA 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_YCBCR_COEFFICIENTS 4:4 +#define NVT_DPCD_TEST_MISC_LO_TEST_YCBCR_COEFFICIENTS_ITU601 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_YCBCR_COEFFICIENTS_ITU709 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH 7:5 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_6BPC 0 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_8BPC 1 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_10BPC 2 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_12BPC 3 +#define NVT_DPCD_TEST_MISC_LO_TEST_BIT_DEPTH_16BPC 4 +#define NVT_DPCD_TEST_MISC_HI 0x233 +#define NVT_DPCD_TEST_MISC_HI_TEST_REFRESH_DENOMINATOR 0:0 +#define NVT_DPCD_TEST_MISC_HI_TEST_REFRESH_DENOMINATOR_1 0 +#define NVT_DPCD_TEST_MISC_HI_TEST_REFRESH_DENOMINATOR_1001 1 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED 1:1 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED_NO 0 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED_YES 1 +#define NVT_DPCD_TEST_MISC_HI_TEST_INTERLACED_RSVD 7:2 + +// 0x234h TEST_REFRESH_RATE_NUMERATOR +#define NVT_DPCD_TEST_REFRESH_RATE_NUMERATOR 0x234 + +// 0x240h ~ 0x241h TEST_CRC_R_Cr +#define NVT_DPCD_TEST_CRC_R_Cr_LO 0x240 +#define NVT_DPCD_TEST_CRC_R_Cr_HI 0x241 + +// 0x242h ~ 0x243h TEST_CRC_G_Y +#define NVT_DPCD_TEST_CRC_G_Y_LO 0x242 +#define NVT_DPCD_TEST_CRC_G_Y_HI 0x243 + +// 0x244h ~ 0x245h TEST_CRC_B_Cb +#define NVT_DPCD_TEST_CRC_B_Cb_LO 0x244 +#define NVT_DPCD_TEST_CRC_B_Cb_HI 0x245 + +// 0x246h TEST_SINC_MISC +#define NVT_DPCD_TEST_SINK_MISC 0x246 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_COUNT 3:0 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_SUPPORTED 5:5 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_SUPPORTED_NO 0 +#define NVT_DPCD_TEST_SINK_MISC_TEST_CRC_SUPPORTED_YES 1 +#define NVT_DPCD_TEST_SINK_MISC_RSVD 7:6 + +// 0x248h PHY_TEST_PATTERN +#define NVT_DPCD_PHY_TEST_PATTERN 0x248 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL 1:0 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_NO_TEST_PATTERN 0 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_D10_2 1 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_SEMC 2 +#define NVT_DPCD_PHY_TEST_PATTERN_PHY_TEST_PATTERN_SEL_PRBS7 3 +#define NVT_DPCD_PHY_TEST_PATTERN_RSVD 7:2 + +// 0x260h TEST_RESPONSE +#define NVT_DPCD_TEST_RESPONSE 0x260 +#define NVT_DPCD_TEST_RESPONSE_TEST_ACK 0:0 +#define NVT_DPCD_TEST_RESPONSE_TEST_ACK_KEEP_TEST_REQ 0 +#define NVT_DPCD_TEST_RESPONSE_TEST_ACK_CLEAR_TEST_REQ 1 +#define NVT_DPCD_TEST_RESPONSE_TEST_NAK 1:1 +#define NVT_DPCD_TEST_RESPONSE_TEST_NACK_KEEP_TEST_REQ 0 +#define NVT_DPCD_TEST_RESPONSE_TEST_NACK_CLEAR_TEST_REQ 1 +#define NVT_DPCD_TEST_RESPONSE_TEST_EDID_CHECKSUM_WRITE 2:2 +#define NVT_DPCD_TEST_RESPONSE_TEST_EDID_CHECKSUM_WRITE_NO 0 +#define NVT_DPCD_TEST_RESPONSE_TEST_EDID_CHECKSUM_WRITE_YES 1 +#define NVT_DPCD_TEST_RESPONSE_RSVD 7:3 + +// 0x261h TEST_EDID_CHECKSUM +#define NVT_DPCD_TEST_EDID_CHECKSUM 0x261 + +// 0x270 TEST_SINK +#define NVT_DPCD_TEST_SINK 0x270 +#define NVT_DPCD_TEST_SINK_TEST_SINK_START 0:0 +#define NVT_DPCD_TEST_SINK_TEST_SINK_START_STOP_CALC_CRC 0 +#define NVT_DPCD_TEST_SINK_TEST_SINK_START_START_CALC_CRC 1 +#define NVT_DPCD_TEST_SINK_RSVD 7:1 + +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS 0x2C0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_TABLE_UPDATED 0:0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_TABLE_UPDATED_NO 0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_TABLE_UPDATED_YES 1 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED 1:1 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_NO 0 +#define NVT_DPCD_PAYLOAD_TABLE_UPDATE_STATUS_ACT_HANDLED_YES 1 + +// 0x300h ~ 0x302h SOURCE_IEEE_OUT +#define NVT_DPCD_SOURCE_IEEE_OUT_7_0 0x300 +#define NVT_DPCD_SOURCE_IEEE_OUT_15_8 0x301 +#define NVT_DPCD_SOURCE_IEEE_OUT_23_16 0x302 + +// 0x400h ~ 0x402h SINK_IEEE_OUT +#define NVT_DPCD_SINK_IEEE_OUT_7_0 0x400 +#define NVT_DPCD_SINK_IEEE_OUT_15_8 0x401 +#define NVT_DPCD_SINK_IEEE_OUT_23_16 0x402 + +// 0x500h ~ 0x502h BRANCH_IEEE_OUT +#define NVT_DPCD_BRANCH_IEEE_OUT_7_0 0x500 +#define NVT_DPCD_BRANCH_IEEE_OUT_15_8 0x501 +#define NVT_DPCD_BRANCH_IEEE_OUT_23_16 0x502 + +// 0x600 SET_POWER +#define NVT_DPCD_SET_POWER 0x600 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE 1:0 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_RSVD 0 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_D0 1 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_D3 2 +#define NVT_DPCD_SET_POWER_SET_POWER_STATE_RSVD_2 3 +#define NVT_DPCD_SET_POWER_RSVD 7:2 + +//************************************* +// DP 1.2 Main Stream Attribute Fiedls +//************************************* + +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_MASK 0x01 // MISC0 bit 0 Synchronous Clock +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_SHIFT 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_ASYNC 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC0_SYNC_CLOCK_INSYNC 0x1 + +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_MASK 0xe0 // MISC0 bits 7:5 number of bits per color +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_SHIFT 0x5 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_6 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_8 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_10 0x2 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_12 0x3 +#define NVT_DP_INFOFRAME_MSA_MISC0_BITS_PER_COLOR_16 0x4 + +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_MASK 0x1e // MISC0 bits 4:1 Color Encoding Format and Content Color Gamut +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_SHIFT 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_LEGACY 0x0 // RGB unspecified color space (legacy RGB mode) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_CEA_RGB 0x4 // CEA RGB (sRGB primaries) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_WIDE_GAMUT_FIXED_POINT 0x3 // RGB wide gamut fixed point (XR8,XR10, XR12) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_WIDE_GAMUT_FLOAT_POINT 0xb // RGB wide gamut floating point(scRGB) +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_422_ITU601 0x5 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_422_ITU709 0xd +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_444_ITU601 0x6 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_YCBCR_444_ITU709 0xe +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_422_ITU601 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_422_ITU709 0x9 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_444_ITU601 0x2 +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_XVYCC_444_ITU709 0xa +#define NVT_DP_INFOFRAME_MSA_MISC0_COLOR_FORMAT_ADOBE_RGB 0xc + +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_MASK 0x01 // MISC1 bit 0 Interlaced Vertical Total +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_SHIFT 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_ODD 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_INTERLACED_V_TOTAL_EVEN 0x1 + +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_MASK 0x06 // MISC1 bits 2:1 stereo video attribute +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_SHIFT 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_NONE 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_RIGHT_LEFT 0x1 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_LEFT_RIGHT 0x3 +#define NVT_DP_INFOFRAME_MSA_MISC1_STEREO_RESERVED 0x2 + +#define NVT_DP_INFOFRAME_MSA_MISC1_RESERVED_MASK 0x38 // MISC1 bits 5:3 reserved (DP1.3). Note: DP1.2 MISC 6:3 is reserved and undefined. +#define NVT_DP_INFOFRAME_MSA_MISC1_RESERVED_SHIFT 0x3 +#define NVT_DP_INFOFRAME_MSA_MISC1_RESERVED_DEFAULT 0x0 + +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_MASK 0x40 // MISC1 bit Using VSC SDP, and sink to ignore MISC1 bit 7 and MISC0 7:1. +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_SHIFT 0x6 +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_DISABLE 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_VSC_SDP_ENABLE 0x1 + +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_COLOR_OR_LUMINANCE_MASK 0x80 // MISC1 bit 7 Y-Only Video +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_COLOR_OR_LUMINANCE_SHIFT 0x7 +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_COLOR 0x0 +#define NVT_DP_INFOFRAME_MSA_MISC1_BITS_PER_LUMINANCE 0x1 + +// ************************ +// ** HDCP DPCD 1.0 Spec ** +// ************************ + +// 0x68029 BSTATUS +#define NVT_DPCD_HDCP_BSTATUS 0x68029 +#define NVT_DPCD_HDCP_BSTATUS_LINK_INTEGRITY_FAILURE 0x04 +#define NVT_DPCD_HDCP_BSTATUS_REAUTHENTICATION_REQUEST 0x08 + +#define NVT_DPCD_HDCP_BCAPS_OFFSET 0x00068028 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE 0:0 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_NO 0x00000000 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_CAPABLE_YES 0x00000001 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER 1:1 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_NO 0x00000000 +#define NVT_DPCD_HDCP_BCAPS_OFFSET_HDCP_REPEATER_YES 0x00000001 + +#define NVT_DPCD_HDCP_BKSV_OFFSET 0x00068000 +#define HDCP_KSV_SIZE 5 + + +// ********************************************* +// ** Vendor DPCD for Apple's mDP->VGA dongle ** +// ********************************************* + +// 0x30F DP2VGA_I2C_SPEED_CONTROL +#define NVT_DPCD_DP2VGA_I2C_SPEED_CONTROL 0x30F + +// 0x50C DP2VGA_GENERAL_STATUS +#define NVT_DPCD_DP2VGA_GENERAL_STATUS 0x50C + +// 0x50D DP2VGA_I2C_SPEED_CAP +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP 0x50D +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_SLOWEST 0xFF +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_1KBPS 0x01 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_3KBPS 0x02 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_10KBPS 0x04 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_100KBPS 0x08 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_400KBPS 0x10 +#define NVT_DPCD_DP2VGA_I2C_SPEED_CAP_1MBPS 0x20 + + +// +// HDMI/DP common definitions + +#define NVT_DYNAMIC_RANGE_VESA 0x00 +#define NVT_DYNAMIC_RANGE_CEA 0x01 +#define NVT_DYNAMIC_RANGE_AUTO 0xFF + + +typedef struct tagNVT_PARSED_DPCD_INFO_DOWNSTREAM_PORT +{ + NvU8 type : 3; // the downstream port type + NvU8 isHpdAware : 1; // if it's HPD aware + NvU8 reserved : 4; +}NVT_PARSED_DPCD_INFO_DOWNSTREAM_PORT; +// +typedef struct tagNVT_DPCD_PARSED_RECEIVER_INFO +{ + // receiver info + NvU32 rev; // DPCD version number + NvU32 maxLinkRate; // the max link rate of main link lanes in 10KHz + NvU32 maxLaneCount; // the max number of lanes + NvU32 numOfPorts; // the number of receiver ports + NvU32 p0BufferSizePerLane; // the buffer size per lane (in BYTE) + NvU32 p1BufferSizePerLane; // the buffer size per lane (in BYTE) + + // downstream port info + NvU32 downstreamPortCount; // the total number of down stream ports + NvU32 downstreamPort0Type; // type of downstream port 0 + NVT_PARSED_DPCD_INFO_DOWNSTREAM_PORT downstreamPort[NVT_DPCD_RECEIVER_MAX_DOWNSTREAM_PORT]; + + // other misc info + NvU32 cap_support0_005DownSpread : 1; + NvU32 cap_supportEnhancedFrame : 1; + NvU32 cap_noAuxHandshakeLinkTraining : 1; + NvU32 cap_downstreamPortHasFormatConvBlk : 1; + NvU32 cap_mainLinkChSupportANSI8B10B : 1; + NvU32 cap_downstreamPortSupportOUI : 1; + NvU32 cap_p0HasEDID : 1; + NvU32 cap_p0AssociatedToPrecedingPort : 1; + NvU32 cap_p1HasEDID : 1; + NvU32 cap_p1AssociatedToPrecedingPort : 1; + + // DP 1.2 fields + NvU32 cap_mstm : 1; + NvU32 cap_reserved : 21; +}NVT_DPCD_PARSED_RECEIVER_INFO; + +#define NVT_DPCD_NUM_TRAINING_LANES 4 + +typedef struct tagNVT_TRAINING_LANE_SETTING +{ + NvU8 voltageSwing; + NvU8 maxSwingReached; + NvU8 preEmphasis; + NvU8 maxPreEmphasisReached; +}NVT_TRAINING_LANE_SETTING; + +// 00100h LINK CONFIGURATION FIELD +typedef struct tagNVT_DPCD_PARSED_LINK_CONFIG +{ + NvU8 linkRate; + NvU8 laneCount; + + NVT_TRAINING_LANE_SETTING trainLaneSetting[NVT_DPCD_NUM_TRAINING_LANES]; + + NvU32 enhancedFrameEnabled : 1; + NvU32 trainingPatternSetting : 2; + NvU32 linkQualityPatternSetting : 2; + NvU32 recoveredClockOutputEnabled : 1; + NvU32 scramblingDisable : 1; + NvU32 symbolErrorCount : 2; + NvU32 spreadAmp : 1; + NvU32 mainLinkCoding8b10b : 1; + NvU32 multiStreamEnabled : 1; + NvU32 reserved : 19; +}NVT_DPCD_PARSED_LINK_CONFIG; + +typedef struct tagNVT_DPCD_INFO +{ + NVT_DPCD_PARSED_RECEIVER_INFO receiver; + NVT_DPCD_PARSED_LINK_CONFIG linkConfig; + NvU32 sourceOUI; + NvU32 sinkOUI; + NvU32 branchOUI; +}NVT_DPCD_INFO; + +typedef struct tagNVT_DPCD_CONFIG +{ + NvU32 dpInfoFlags; +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENABLED 0:0 +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENABLED_FALSE (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENABLED_TRUE (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE 7:4 +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_NONE (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2DVI (0x00000001) // B2: dp2dvi-singlelink +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2HDMI (0x00000002) // dp2hdmi +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2DVI2 (0x00000003) // B3: dp2dvi-duallink +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2VGA (0x00000004) // B4: dp2vga +#define NV_DISPLAYPORT_INFO_FLAGS_DONGLE_TYPE_DP2TV (0x00000005) // Composite/SVideo +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT 10:8 // Maximum supported laneCount +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT_1_LANE (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT_2_LANE (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LANECOUNT_4_LANE (0x00000002) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE 13:11 // Maximum supported linkRate +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_1_62GBPS (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_2_70GBPS (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_5_40GBPS (0x00000002) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MAX_CAP_LINKRATE_8_10GBPS (0x00000003) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MULTISTREAM 16:16 // Bit to check MST/SST +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MULTISTREAM_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_MULTISTREAM_ENABLED (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENHANCED_FRAMING 17:17 // Bit to check enhanced framing support +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENHANCED_FRAMING_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_ENHANCED_FRAMING_ENABLED (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_DOWNSPREAD 18:18 // Bit to check downspread support +#define NV_DISPLAYPORT_INFO_FLAGS_DP_DOWNSPREAD_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_DOWNSPREAD_ENABLED (0x00000001) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_SCRAMBLING 19:19 // Bit to check scrambling +#define NV_DISPLAYPORT_INFO_FLAGS_DP_SCRAMBLING_DISABLED (0x00000000) +#define NV_DISPLAYPORT_INFO_FLAGS_DP_SCRAMBLING_ENABLED (0x00000001) + NvU32 linkRate; + NvU32 laneCount; + NvU32 colorFormat; + NvU32 dynamicRange; + NvU32 colorimetry; + NvU32 bpc; + NvU32 bpp; + + // pre-emphasis and drive current level (EFI might need this information) + NvU8 laneData[4]; + // DP max pixelClock supported based on DP max laneCount/linkRate + NvU32 dpMaxPixelClk; + NvU8 maxCapLinkRate; + NvU8 maxCapLaneCount; + + // B4 (DP2VGA) Vendor Specific I2C Speed Control + NvU8 dp2vga_i2cCap; + NvU8 dp2vga_i2cCtrl; + + NvU8 bDpOffline; +}NVT_DPCD_CONFIG; + +typedef struct tagNVT_DPCD_DP_TUNNELING_CAPS +{ + NvU8 dpTunnelingSupport : 1; // DP Tunneling through USB4 Support + NvU8 reserved : 5; // Reserved. + NvU8 dpPanelReplayTunnelingOptSupport : 1; // Panel Replay Tunneling Optimization Support + NvU8 dpInBwAllocationModeSupport : 1; // DP IN Bandwidth Allocation Mode Support +}NVT_DPCD_DP_TUNNELING_CAPS; + +typedef struct tagNVT_DPCD_DP_IN_ADAPTER_INFO +{ + NvU8 dpInAdapterNumber : 6; // DP IN Adapter Number + NvU8 reserved : 2; +}NVT_DPCD_DP_IN_ADAPTER_INFO; + +typedef struct tagNVT_DPCD_USB4_DRIVER_ID +{ + NvU8 usb4DriverId : 4; // USB4 Driver ID + NvU8 reserved : 4; +}NVT_DPCD_USB4_DRIVER_ID; + +//****************************** +// Intel EDID Like Data (ELD) +//****************************** +#define NVT_ELD_VER_1 0x1 // ELD version 1, which is an obsolete ELD structure. Treated as reserved +#define NVT_ELD_VER_2 0x2 // ELD version 2, which supports CEA version 861-D or below. Max baseline ELD size of 80 bytes (15 short audio descriptors) +#define NVT_ELD_VER_VIDEO_DRIVER_UNLOAD 0x1F // Indicates an ELD that has been partially populated through implementation specific mean of default programming before an external + // graphics driver is load, Only the fields that is called out as "canned" fields will be populated, and audio driver should + // ignore the non "canned" fields. +#define NVT_ELD_CONN_TYPE_HDMI 0x0 // indicates an HDMI connection type +#define NVT_ELD_CONN_TYPE_DP 0x1 // indicates a DP connection type + + +//****************************** +// Audio +//****************************** +#define NVT_AUDIO_768KHZ 768000 // HBR Audio +#define NVT_AUDIO_384KHZ 384000 // HBR Audio +#define NVT_AUDIO_192KHZ 192000 +#define NVT_AUDIO_176KHZ 176000 +#define NVT_AUDIO_96KHZ 96000 +#define NVT_AUDIO_88KHZ 88000 +#define NVT_AUDIO_48KHZ 48000 +#define NVT_AUDIO_44KHZ 44000 +#define NVT_AUDIO_32KHZ 32000 + +//Default format for HDTV is NVT_DEFAULT_HDTV_FMT i.e 1080i +#define NVT_DEFAULT_HDTV_PREFERRED_TIMING(x, y, z, p) \ + if(((x) == 1920) && ((y) == 1080) && ((z) != D3DDDI_VSSLO_PROGRESSIVE )) p = 1; + +//Default format for non-DDC displays is 10x7 +#define NVT_DEFAULT_NONDCC_PREFERRED_TIMING(x, y, z, p) \ + if(((x) == 1024) && ((y) == 768) && ((z) == 60 )) p = 1; + + +// Length of user-friendly monitor name, derived from the EDID's +// Display Product Name descriptor block, plus the EDID manufacturer PNP +// ID. The Display Product can be distributed across four 13-byte +// descriptor blocks, and the PNP ID currently decodes to at most 40 +// characters: 4*13 + 40 = 92 +#define NVT_EDID_MONITOR_NAME_STRING_LENGTH 96 + +// Compute the actual size of an EDID with a pointer to an NVT_EDID_INFO. +static NV_INLINE NvU32 NVT_EDID_ACTUAL_SIZE(const NVT_EDID_INFO *pInfo) +{ + return (pInfo->total_extensions + 1) * 128; +} + +//****************************** +//****************************** +//** the export functions ** +//****************************** +//****************************** + +// the common timing function return values +typedef enum +{ + NVT_STATUS_SUCCESS = 0, // Success (no status) + NVT_STATUS_ERR = 0x80000000, // generic get timing error + NVT_STATUS_INVALID_PARAMETER, // passed an invalid parameter + NVT_STATUS_NO_MEMORY, // memory allocation failed +} NVT_STATUS; + +//************************************* +// The EDID validation Mask +//************************************* +#define NVT_EDID_VALIDATION_MASK 0xFFFFFFFF +#define NVT_IS_EDID_VALIDATION_FLAGS(x, n) ((((x)&NVT_EDID_VALIDATION_MASK)) & NVBIT32(n)) +#define NVT_CLEAR_EDID_VALIDATION_FLAGS(x, n) ((x)&=(~NVBIT32(n))) + +typedef enum +{ + // errors returned as a bitmask by NvTiming_EDIDValidationMask() + NVT_EDID_VALIDATION_ERR_EXT = 0, + NVT_EDID_VALIDATION_ERR_VERSION, + NVT_EDID_VALIDATION_ERR_SIZE, + NVT_EDID_VALIDATION_ERR_CHECKSUM, + NVT_EDID_VALIDATION_ERR_RANGE_LIMIT, + NVT_EDID_VALIDATION_ERR_DTD, + NVT_EDID_VALIDATION_ERR_EXT_DTD, +} NVT_EDID_VALIDATION_ERR_STATUS; +#define NVT_EDID_VALIDATION_ERR_MASK(x) NVBIT32(x) + +typedef enum +{ + // errors returned as a bitmask by NvTiming_DisplayID2ValidationMask() + NVT_DID2_VALIDATION_ERR_EXT = 0, + NVT_DID2_VALIDATION_ERR_VERSION, + NVT_DID2_VALIDATION_ERR_SIZE, + NVT_DID2_VALIDATION_ERR_CHECKSUM, + NVT_DID2_VALIDATION_ERR_PRODUCT_ID, + NVT_DID2_VALIDATION_ERR_NO_DATA_BLOCK, + NVT_DID2_VALIDATION_ERR_RANGE_LIMIT, + NVT_DID2_VALIDATION_ERR_NATIVE_DTD, +} NVT_DID2_VALIDATION_ERR_STATUS; +#define NVT_DID2_VALIDATION_ERR_MASK(x) NVBIT32(x) + +// timing calculation flags: +#define NVT_FLAG_PROGRESSIVE_TIMING 0x00000000 +#define NVT_FLAG_INTERLACED_TIMING NVT_INTERLACED +#define NVT_FLAG_INTERLACED_TIMING2 NVT_INTERLACED_NO_EXTRA_VBLANK_ON_FIELD2 //without extra vblank on field 2 +#define NVT_FLAG_DOUBLE_SCAN_TIMING 0x00000010 +#define NVT_FLAG_REDUCED_BLANKING_TIMING 0x00000020 +#define NVT_FLAG_MAX_EDID_TIMING 0x00000040 +#define NVT_FLAG_NV_DOUBLE_SCAN_TIMING 0x00000080 +#define NVT_FLAG_NATIVE_TIMING 0x00000100 +#define NVT_FLAG_EDID_TIMING 0x00000200 +#define NVT_FLAG_CEA_4X3_TIMING 0x00000400 +#define NVT_FLAG_CEA_16X9_TIMING 0x00000800 +#define NVT_FLAG_OS_ADDED_TIMING 0x00001000 +#define NVT_FLAG_SPECTRUM_SPREAD 0x00002000 +#define NVT_FLAG_EDID_TIMING_RR_MATCH 0x00004000 +#define NVT_FLAG_EDID_861_ST 0x00008000 +#define NVT_FLAG_EDID_DTD_EIZO_SPLIT 0x00010000 +#define NVT_FLAG_DTD1_TIMING 0x00020000 +#define NVT_FLAG_NV_PREFERRED_TIMING 0x00040000 +#define NVT_FLAG_DTD1_PREFERRED_TIMING 0x00080000 +#define NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING 0x00100000 +#define NVT_FLAG_CEA_PREFERRED_TIMING 0x00200000 +#define NVT_FLAG_DISPLAYID_7_DSC_PASSTHRU 0x00400000 +#define NVT_FLAG_DISPLAYID_2_0_TIMING 0x00800000 // this one for the CTA861 embedded in DID20 +#define NVT_FLAG_DISPLAYID_2_0_EXPLICT_YUV420 0x01000000 // DID2 E7 spec. supported yuv420 indicated + +#define NVT_FLAG_INTERLACED_MASK (NVT_FLAG_INTERLACED_TIMING | NVT_FLAG_INTERLACED_TIMING2) + +#ifdef __cplusplus +extern "C" { +#endif + +// Generic timing parameter calculation +NvU16 NvTiming_CalcRR(NvU32 pclk, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal); +NvU32 NvTiming_CalcRRx1k(NvU32 pclk, NvU16 interlaced, NvU16 HTotal, NvU16 VTotal); + +NvU32 NvTiming_IsRoundedRREqual(NvU16 rr1, NvU32 rr1x1k, NvU16 rr2); +NvU32 NvTiming_IsTimingExactEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2); +NvU32 NvTiming_IsTimingExactEqualEx(const NVT_TIMING *pT1, const NVT_TIMING *pT2); +NvU32 NvTiming_IsTimingRelaxedEqual(const NVT_TIMING *pT1, const NVT_TIMING *pT2); +NvU16 NvTiming_MaxFrameWidth(NvU16 HVisible, NvU16 rep); + +// Establish timing enumeration +NVT_STATUS NvTiming_EnumEST(NvU32 index, NVT_TIMING *pT); +NVT_STATUS NvTiming_EnumESTIII(NvU32 index, NVT_TIMING *pT); + +// GTF timing calculation +NVT_STATUS NvTiming_CalcGTF(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); + +// DMT timing calculation +NVT_STATUS NvTiming_EnumDMT(NvU32 dmtId, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcDMT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcDMT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); + +// CVT timing calculation +NVT_STATUS NvTiming_CalcCVT(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcCVT_RB(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_TIMING *pT); +NVT_STATUS NvTiming_CalcCVT_RB2(NvU32 width, NvU32 height, NvU32 rr, NvBool is1000div1001, NVT_TIMING *pT); +NvBool NvTiming_IsTimingCVTRB(const NVT_TIMING *pTiming); + +// CEA/EIA/Psf timing +NVT_STATUS NvTiming_CalcCEA861bTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 pixelRepeatCount, NVT_TIMING *pT); +NVT_STATUS NvTiming_EnumCEA861bTiming(NvU32 ceaFormat, NVT_TIMING *pT); +NVT_STATUS NvTiming_EnumNvPsfTiming(NvU32 nvPsfFormat, NVT_TIMING *pT); +NvU32 NvTiming_GetCEA861TimingIndex(NVT_TIMING *pT); + +//expose the HDMI extended video timing defined by the HDMI LLC VSDB +NVT_STATUS NvTiming_EnumHdmiVsdbExtendedTiming(NvU32 hdmi_vic, NVT_TIMING *pT); + +// TV(analog) based timing +NVT_STATUS NvTiming_GetTvTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NvU32 tvFormat, NVT_TIMING *pT); + +// Get EDID timing +NVT_STATUS NvTiming_GetEdidTimingEx(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT, NvU32 rrx1k); +NVT_STATUS NvTiming_GetEdidTiming(NvU32 width, NvU32 height, NvU32 rr, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_TIMING *pT); + +// Get EDID based HDMI Stereo timing +NVT_STATUS NvTiming_GetHDMIStereoExtTimingFromEDID(NvU32 width, NvU32 height, NvU32 rr, NvU8 structure, NvU8 detail, NvU32 flag, NVT_EDID_INFO *pEdidInfo, NVT_EXT_TIMING *pT); +void NvTiming_GetHDMIStereoTimingFrom2DTiming(const NVT_TIMING *pTiming, NvU8 StereoStructureType, NvU8 SideBySideHalfDetail, NVT_EXT_TIMING *pExtTiming); +NVT_STATUS NvTiming_GetHDMIStereoMandatoryFormatDetail(const NvU8 vic, NvU16 *pStereoStructureMask, NvU8 *pSideBySideHalfDetail); + +// EDID based AspectRatio Timing +NVT_STATUS NvTiming_GetEDIDBasedASPRTiming(NvU16 width, NvU16 height, NvU16 rr, NVT_EDID_INFO *pEI, NVT_TIMING *ft); + + +// EDID or DISPLAYID2 version +NvU32 NvTiming_GetVESADisplayDescriptorVersion(NvU8 *rawData, NvU32 *pVer); + +// EDID entry parse +NVT_STATUS NV_STDCALL NvTiming_ParseEDIDInfo(NvU8 *pEdid, NvU32 length, NVT_EDID_INFO *pEdidInfo); +NvU32 NvTiming_EDIDValidationMask(NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation); +NVT_STATUS NvTiming_EDIDValidation(NvU8 *pEdid, NvU32 length, NvBool bIsStrongValidation); + +// DisplayID20 standalone entry parse +NVT_STATUS NV_STDCALL NvTiming_parseDisplayId20Info(const NvU8 *pDisplayId, NvU32 length, NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo); +NvU32 NvTiming_DisplayID2ValidationMask(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, NvBool bIsStrongValidation); +NVT_STATUS NvTiming_DisplayID2ValidationDataBlocks(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, NvBool bIsStrongValidation); + +NVT_STATUS NvTiming_Get18ByteLongDescriptorIndex(NVT_EDID_INFO *pEdidInfo, NvU8 tag, NvU32 *dtdIndex); +NVT_STATUS NvTiming_GetProductName(const NVT_EDID_INFO *pEdidInfo, + NvU8 *pProductName, + const NvU32 productNameLength); +NvU32 NvTiming_CalculateEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidsize); +NvU32 NvTiming_CalculateCommonEDIDCRC32(NvU8* pEDIDBuffer, NvU32 edidVersion); +NVT_STATUS NvTiming_CalculateEDIDLimits(NVT_EDID_INFO *pEdidInfo, NVT_EDID_RANGE_LIMIT *pLimit); +void NvTiming_GetMonitorName(NVT_EDID_INFO *pEdidInfo, NvU8 monitor_name[NVT_EDID_MONITOR_NAME_STRING_LENGTH]); + +// utility routines +NvU64 axb_div_c_64(NvU64 a, NvU64 b, NvU64 c); +NvU32 axb_div_c(NvU32 a, NvU32 b, NvU32 c); +NvU32 a_div_b(NvU32 a, NvU32 b); +NvU32 calculateCRC32(NvU8* pBuf, NvU32 bufsize); +void patchChecksum(NvU8* pBuf); +NvBool isChecksumValid(NvU8* pBuf); + +NvU32 RRx1kToPclk (NVT_TIMING *pT); + +NVT_STATUS NvTiming_ComposeCustTimingString(NVT_TIMING *pT); + +// Infoframe composer +NVT_STATUS NvTiming_ConstructVideoInfoframeCtrl(const NVT_TIMING *pTiming, NVT_VIDEO_INFOFRAME_CTRL *pCtrl); +NVT_STATUS NvTiming_ConstructVideoInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VIDEO_INFOFRAME_CTRL *pCtrl, NVT_VIDEO_INFOFRAME *pContext, NVT_VIDEO_INFOFRAME *p); +NVT_STATUS NvTiming_ConstructAudioInfoframe(NVT_AUDIO_INFOFRAME_CTRL *pCtrl, NVT_AUDIO_INFOFRAME *pContext, NVT_AUDIO_INFOFRAME *p); +NVT_STATUS NvTiming_ConstructVendorSpecificInfoframe(NVT_EDID_INFO *pEdidInfo, NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL *pCtrl, NVT_VENDOR_SPECIFIC_INFOFRAME *p); +NVT_STATUS NvTiming_ConstructExtendedMetadataPacketInfoframe(NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL *pCtrl, NVT_EXTENDED_METADATA_PACKET_INFOFRAME *p); + +// Get specific timing from parsed EDID +NVT_STATUS NvTiming_GetDTD1Timing (NVT_EDID_INFO * pEdidInfo, NVT_TIMING * pT); + +#define NVT_IS_DTD(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_EDID_DTD) +#define NVT_IS_EXT_DTD(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_EDID_EXT_DTD) +#define NVT_IS_CTA861(d) (NVT_GET_TIMING_STATUS_TYPE((d)) == NVT_TYPE_EDID_861ST) + +#define NVT_IS_DTD1(d) ((NVT_IS_DTD((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == 1)) +#define NVT_IS_DTDn(d, n) ((NVT_IS_DTD((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == n)) +#define NVT_IS_EXT_DTDn(d, n) ((NVT_IS_EXT_DTD((d))) && (NVT_GET_TIMING_STATUS_SEQ((d)) == n)) + +#define NVT_DID20_TIMING_IS_CTA861(flag, status) ((NVT_IS_CTA861((status))) && (0 != (NVT_FLAG_DISPLAYID_2_0_TIMING & (flag)))) +#define NVT_PREFERRED_TIMING_IS_DTD1(flag, status) ((NVT_IS_DTD1((status))) && (0 != (NVT_FLAG_DTD1_PREFERRED_TIMING & (flag)))) +#define NVT_PREFERRED_TIMING_IS_DISPLAYID(flag) (0 != (NVT_FLAG_DISPLAYID_DTD_PREFERRED_TIMING & flag)) +#define NVT_PREFERRED_TIMING_IS_CEA(flag) (0 != (NVT_FLAG_CEA_PREFERRED_TIMING & flag)) + +#ifdef __cplusplus +} +#endif + +#endif //__NVTIMING_H__ diff --git a/src/common/modeset/timing/nvtiming_pvt.h b/src/common/modeset/timing/nvtiming_pvt.h new file mode 100644 index 000000000..d96846ec6 --- /dev/null +++ b/src/common/modeset/timing/nvtiming_pvt.h @@ -0,0 +1,142 @@ +//***************************************************************************** +// +// SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: MIT +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// File: nvtiming_pvt.h +// +// Purpose: the private functions/structures which are only used inside +// the nv timing library. +// +//***************************************************************************** + +#ifndef __NVTIMING_PVT_H_ +#define __NVTIMING_PVT_H_ + +#include "nvtiming.h" + +#if defined(NVT_USE_NVKMS) + #include "nvidia-modeset-os-interface.h" + #define NVT_SNPRINTF nvkms_snprintf +#else + #include + #include + #define NVT_SNPRINTF snprintf +#endif + +#define nvt_assert(p) ((void)0) + +#include // NULL + +// EDID related private functions +NvU32 getEdidVersion(NvU8 *pData, NvU32 *pVer); +NvBool assignNextAvailableTiming(NVT_EDID_INFO *pInfo, const NVT_TIMING *pTiming); +void parseEdidCvtTiming(NVT_EDID_INFO *pInfo); +void parseEdidEstablishedTiming(NVT_EDID_INFO *pInfo); +void parseEdidStandardTiming(NVT_EDID_INFO *pInfo); +void parseEdidDetailedTiming(NvU8 *pEdid, NVT_EDID_INFO *pInfo); +NVT_STATUS parseEdidDetailedTimingDescriptor(NvU8 *pDTD, NVT_TIMING *pT); +void parseEdidCvt3ByteDescriptor(NvU8 *p, NVT_EDID_INFO *pInfo, NvU32 *vtbCount); +void parseEdidStandardTimingDescriptor(NvU16 timing, NVT_EDID_INFO *pInfo, NvU32 count, NVT_TIMING * pT); +void parseVTBExtension(NvU8 *pEdidExt, NVT_EDID_INFO *pInfo); +void updateHDMILLCDeepColorForTiming(NVT_EDID_INFO *pInfo, NvU32 index); +void updateBpcForTiming(NVT_EDID_INFO *pInfo, NvU32 index); +void updateColorFormatAndBpcTiming(NVT_EDID_INFO *pInfo); +// End EDID + +// CTA861 related private functions +NVT_STATUS get861ExtInfo(NvU8 *pEdid, NvU32 edidSize, NVT_EDID_CEA861_INFO *p); +NVT_STATUS parseCta861DataBlockInfo(NvU8 *pEdid, NvU32 size, NVT_EDID_CEA861_INFO *p); +void parse861ExtDetailedTiming(NvU8 *pEdidExt, NvU8 basicCaps, NVT_EDID_INFO *pInfo); +void parse861bShortTiming(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parse861bShortYuv420Timing(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parse861bShortPreferredTiming(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861VsdbBlocks(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861HfScdb(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN flag); +void parseCta861HfEeodb(NVT_EDID_CEA861_INFO *pExt861, NvU32 *pTotalEdidExtensions); +void parseEdidMsftVsdbBlock(VSDB_DATA *pVsdb, MSFT_VSDB_PARSED_INFO *vsdbInfo); +void parseEdidHdmiLlcBasicInfo(VSDB_DATA *pVsdb, NVT_HDMI_LLC_INFO *pHdmiLlc); +void parseEdidHdmiForumVSDB(VSDB_DATA *pVsdb, NVT_HDMI_FORUM_INFO *pHdmiInfo); +void getEdidHDM1_4bVsdbTiming(NVT_EDID_INFO *pInfo); +void parseEdidHDMILLCTiming(NVT_EDID_INFO *pInfo, VSDB_DATA *pVsdb, NvU32 *pSupported, HDMI3DSUPPORTMAP * pM); +void parseEdidNvidiaVSDBBlock(VSDB_DATA *pVsdb, NVDA_VSDB_PARSED_INFO *vsdbInfo); +void parseCea861HdrStaticMetadataDataBlock(NVT_EDID_CEA861_INFO *pExt861, void *pRawInfo, NVT_CTA861_ORIGIN); +void parseCea861DvStaticMetadataDataBlock(NVT_EDID_CEA861_INFO *pExt861, NVT_DV_STATIC_METADATA *pDvInfo); +NvBool isMatchedCTA861Timing(NVT_EDID_INFO *pInfo, NVT_TIMING *pT); +NvU32 isHdmi3DStereoType(NvU8 StereoStructureType); +NvU32 getCEA861TimingAspectRatio(NvU32 vic); +void SetActiveSpaceForHDMI3DStereo(const NVT_TIMING *pTiming, NVT_EXT_TIMING *pExtTiming); +void AddModeToSupportMap(HDMI3DSUPPORTMAP * pMap, NvU8 vic, NvU8 structure, NvU8 Detail); +void getMonitorDescriptorString(NvU8 *pEdid, NvU8 tag, char *str, int onceOnly); +// End CTA861 + +// DispalyID base / extension related functions +NvU32 getDID2Version(NvU8 *pData, NvU32 *pVer); +NVT_STATUS getDisplayIdEDIDExtInfo(NvU8* pEdid, NvU32 edidSize, NVT_EDID_INFO* pEdidInfo); +NVT_STATUS getDisplayId20EDIDExtInfo(NvU8* pDisplayid, NvU32 edidSize, NVT_EDID_INFO* pEdidInfo); +void updateColorFormatForDisplayIdExtnTimings(NVT_EDID_INFO* pInfo, NvU32 timingIdx); +void updateColorFormatForDisplayId20ExtnTimings(NVT_EDID_INFO* pInfo, NvU32 timingIdx); +NvBool assignNextAvailableDisplayId20Timing(NVT_DISPLAYID_2_0_INFO *pDisplayIdInfo, const NVT_TIMING *pTiming); +void updateColorFormatForDisplayId20Timings(NVT_DISPLAYID_2_0_INFO* pDisplayId2Info, NvU32 timingIdx); +// End DisplayID + +NvU32 axb_div_c_old(NvU32 a, NvU32 b, NvU32 c); + +#define NVT_EDID_BLOCK_SIZE 128 + +#define NVT_PVT_INTERLACED_MASK 0xF +#define NVT_PVT_DOUBLESCAN_MASK 0x10 +#define NVT_PVT_RB_MASK 0x20 + +#define NVT_PVT_DOUBLE_SCAN_HEIGHT 384 +#define NVT_PVT_DOUBLE_SCAN_HEIGHT_VGA 600 +#define NVT_PVT_DOUBLE_SCAN_PCLK_MIN 1200 //in 10KHz + +#define abs(a) ((a)>0?(a):-(a)) +#define set_rrx1k(a,b,c) ((b)*(c)==0?(0):(NvU32)(((NvU64)(a)*10000*1000+(b)*(c)/2)/((b)*(c)))) +#define frame_height(a) ((NvU32)((a).VVisible * ((a).interlaced!=0?2:1))) +#define nvt_is_wideaspect(width,height) ((width)*5 >= (height)*8) + +#ifndef MIN +#define MIN(x, y) ((x)>(y) ? (y) : (x)) +#endif +#ifndef MAX +#define MAX(x,y) ((x) > (y) ? (x) : (y)) +#endif + + +#ifndef COUNT +#define COUNT(a) (sizeof(a)/sizeof(a[0])) +#endif +#ifndef offsetof +#define offsetof(st, m) ((size_t) ( (char *)&((st *)(0))->m - (char *)0 )) +#endif +#define nvt_nvu8_set_bits(d, s, m, shift) {(d)&=(NvU8)((NvU8)(m)^0xFFU);(d)|=((s)<<(shift))&(m);} +#define nvt_get_bits(d, m, shift) (((d)&(m))>>shift) +#define nvt_lowest_bit(n) ((n)&(~((n)-1))) +#define nvt_aspect_x(n) ((n)>>16) +#define nvt_aspect_y(n) ((n)&0xFFFF) + +// Sentinel values for NVT_TIMING +#define NVT_TIMINGEXT_SENTINEL {0,0,0,0,0,{0},{0},{0},{0},0,""} +#define NVT_TIMING_SENTINEL {0,0,0,0,0,0,0,0,0,0,0,0,0,0,NVT_TIMINGEXT_SENTINEL} + +#endif //__NVTIMING_PVT_H_ diff --git a/src/common/nvlink/interface/nvlink.h b/src/common/nvlink/interface/nvlink.h new file mode 100644 index 000000000..5244f936b --- /dev/null +++ b/src/common/nvlink/interface/nvlink.h @@ -0,0 +1,566 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// nvlink.h +// + +#ifndef _NVLINK_H_ +#define _NVLINK_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "nvlink_common.h" +#include "nvlink_lib_ctrl.h" +#include "nv_list.h" +#include "nvlink_errors.h" +#include "nvCpuUuid.h" + +// Debug Prints +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + #define NVLINK_PRINT_ENABLED 1 + #define NVLINK_PRINT(format_and_stuff) nvlink_print format_and_stuff + + #define DBG_MODULE_NVLINK_CORE __FILE__, __LINE__, __FUNCTION__ + #define DBG_MODULE_IBMNPU DBG_MODULE_NVLINK_CORE + #define DBG_MODULE_TEGRASHIM DBG_MODULE_NVLINK_CORE + #define DBG_MODULE_EBRIDGE DBG_MODULE_NVLINK_CORE + #define DBG_MODULE_NVSWITCH DBG_MODULE_NVLINK_CORE +#else + #define NVLINK_PRINT(format_and_stuff) ((void)(0)) +#endif + +// Devices that support NVLINK +#define NVLINK_DEVICE_TYPE_EBRIDGE 0x0 +#define NVLINK_DEVICE_TYPE_IBMNPU 0x1 +#define NVLINK_DEVICE_TYPE_GPU 0x2 +#define NVLINK_DEVICE_TYPE_NVSWITCH 0x3 +#define NVLINK_DEVICE_TYPE_TEGRASHIM 0x4 + +// NVLink versions +#define NVLINK_DEVICE_VERSION_10 0x00000001 +#define NVLINK_DEVICE_VERSION_20 0x00000002 +#define NVLINK_DEVICE_VERSION_22 0x00000004 +#define NVLINK_DEVICE_VERSION_30 0x00000005 +#define NVLINK_DEVICE_VERSION_31 0x00000006 +#define NVLINK_DEVICE_VERSION_40 0x00000007 + +// Link Transition Timeouts in miliseconds +#define NVLINK_TRANSITION_OFF_TIMEOUT 1 +#define NVLINK_TRANSITION_SAFE_TIMEOUT 300 +#define NVLINK_TRANSITION_HS_TIMEOUT 8000 +#define NVLINK_TRANSITION_POST_HS_TIMEOUT 70 + +// Link training seed values +#define NVLINK_MAX_SEED_NUM 6 +#define NVLINK_MAX_SEED_BUFFER_SIZE NVLINK_MAX_SEED_NUM + 1 + +#define NVLINK_MAX_SYSTEM_LINK_NUM 624 + +// Forwards +struct nvlink_device; +struct nvlink_device_handle; +struct nvlink_link; +struct nvlink_link_handlers; + +// nvlink device state +struct nvlink_device +{ + NVListRec node; + + // List of links associated with this device + NVListRec link_list; + + // Uniquely identifies a device in the core + NvU64 deviceId; + + // Client supplied names and ids + char *driverName; + char *deviceName; + NvU8 *uuid; + + // PCI Information + struct nvlink_pci_info pciInfo; + + // Device type and status + NvU64 type; + NvBool initialized; + + // fabric node id + NvU16 nodeId; + + // Client private information + void *pDevInfo; +}; + +// nvlink link change type +enum nvlink_link_change_type +{ + nvlink_retrain_from_off, + nvlink_retrain_from_safe, + +}; + +// nvlink link_change parameters +struct nvlink_link_change +{ + struct nvlink_link *master; + struct nvlink_link *slave; + + enum nvlink_link_change_type change_type; +}; + +// nvlink link state +struct nvlink_link +{ + NVListRec node; + + // Device the link is associated with + struct nvlink_device *dev; + + // Lock for per link structure + void *linkLock; + + // Uniquely identifies a link in the core + NvU64 linkId; + + // If this link is the master of its connection + NvBool master; + + // Client supplied link name and number + char *linkName; + NvU32 linkNumber; + + NvU64 token; + + // Link state + NvU32 state; + NvBool inSWCFG; + + // Sublink states + NvU32 tx_sublink_state; + NvU32 rx_sublink_state; + + // Has rceiver detect passed + NvBool bRxDetected; + + // Link failed when sending InitPll to minion + NvBool bTxCommonModeFail; + + // Link failed when transitioning to SWCFG + NvBool bSafeTransitionFail; + + // Link failed when sending INITPHASE5 to minion + NvBool bInitphase5Fails; + + // IP version + NvU32 version; + + // Has state been saved + NvBool bStateSaved; + + // Number of retries to put link to safe + NvU32 safe_retries; + + // Set if LINK is ac coupled + NvBool ac_coupled; + + // Number of retries to discover the other end of the link + NvU32 packet_injection_retries; + + // Local Sid of the link. + NvU64 localSid; + + // Remote Sid of the link. + NvU64 remoteSid; + + // Remote LinkId to which the current link is connected. + NvU32 remoteLinkId; + + NvU32 remoteDeviceType; + + // Has INITNEGOTIATE received CONFIG_GOOD (NVL3.0+) + NvBool bInitnegotiateConfigGood; + + // Power state transition status + enum + { + nvlink_power_state_in_L0, + nvlink_power_state_entering_L2, + nvlink_power_state_in_L2, + nvlink_power_state_exiting_L2 + } powerStateTransitionStatus; + + // Link handlers + const struct nvlink_link_handlers *link_handlers; + + // Client private information + void *link_info; + + // Outstanding link change request information + struct nvlink_link_change link_change; + + //seed data for given nvlink + NvU32 seedData[NVLINK_MAX_SEED_BUFFER_SIZE]; +}; + +// nvlink link handler ops +struct nvlink_link_handlers +{ + NV_API_CALL NvlStatus (*add) (struct nvlink_link *link); + NV_API_CALL NvlStatus (*remove) (struct nvlink_link *link); + NV_API_CALL NvlStatus (*lock) (struct nvlink_link *link); + NV_API_CALL void (*unlock) (struct nvlink_link *link); + NV_API_CALL NvlStatus (*queue_link_change) (struct nvlink_link_change *link_change); + NV_API_CALL NvlStatus (*set_dl_link_mode) (struct nvlink_link *link, NvU64 mode, NvU32 flags); + NV_API_CALL NvlStatus (*get_dl_link_mode) (struct nvlink_link *link, NvU64 *mode); + NV_API_CALL NvlStatus (*set_tl_link_mode) (struct nvlink_link *link, NvU64 mode, NvU32 flags); + NV_API_CALL NvlStatus (*get_tl_link_mode) (struct nvlink_link *link, NvU64 *mode); + NV_API_CALL NvlStatus (*set_tx_mode) (struct nvlink_link *link, NvU64 mode, NvU32 flags); + NV_API_CALL NvlStatus (*get_tx_mode) (struct nvlink_link *link, NvU64 *mode, NvU32 *subMode); + NV_API_CALL NvlStatus (*set_rx_mode) (struct nvlink_link *link, NvU64 mode, NvU32 flags); + NV_API_CALL NvlStatus (*get_rx_mode) (struct nvlink_link *link, NvU64 *mode, NvU32 *subMode); + NV_API_CALL NvlStatus (*set_rx_detect) (struct nvlink_link *link, NvU32 flags); + NV_API_CALL NvlStatus (*get_rx_detect) (struct nvlink_link *link); + NV_API_CALL NvlStatus (*write_discovery_token) (struct nvlink_link *link, NvU64 token); + NV_API_CALL NvlStatus (*read_discovery_token) (struct nvlink_link *link, NvU64 *token); + NV_API_CALL void (*training_complete) (struct nvlink_link *link); + NV_API_CALL void (*get_uphy_load) (struct nvlink_link *link, NvBool* bUnlocked); +}; + +// +// Represents an intranode connections in single/multi-node system. +// Both endpoints of the connection is visible from same node. +// +struct nvlink_intranode_conn +{ + NVListRec node; + struct nvlink_link *end0; + struct nvlink_link *end1; +}; + +// +// Represents internode connections in a multi-node system. +// One of the endpoint of the connection must be a local link. +// +struct nvlink_internode_conn +{ + NVListRec node; + struct nvlink_link *local_end; + nvlink_remote_endpoint_info remote_end; +}; + + +// Typedefs +typedef struct nvlink_device nvlink_device; +typedef struct nvlink_device_handle nvlink_device_handle; +typedef struct nvlink_link nvlink_link; +typedef struct nvlink_link_change nvlink_link_change; +typedef struct nvlink_device_handlers nvlink_device_handlers; +typedef struct nvlink_link_handlers nvlink_link_handlers; +typedef struct nvlink_intranode_conn nvlink_intranode_conn; +typedef struct nvlink_internode_conn nvlink_internode_conn; +typedef enum nvlink_link_change_type nvlink_link_change_type; +typedef struct nvlink_inband_data nvlink_inband_data; + + +#define NVLINK_MAX_NUM_SAFE_RETRIES 7 +#define NVLINK_MAX_NUM_PACKET_INJECTION_RETRIES 4 + + +// NVLINK LINK states +#define NVLINK_LINKSTATE_OFF 0x00 // OFF +#define NVLINK_LINKSTATE_HS 0x01 // High Speed +#define NVLINK_LINKSTATE_SAFE 0x02 // Safe/Discovery State +#define NVLINK_LINKSTATE_FAULT 0x03 // Faulty +#define NVLINK_LINKSTATE_RECOVERY 0x04 // Recovery +#define NVLINK_LINKSTATE_FAIL 0x05 // Unconnected/Fail +#define NVLINK_LINKSTATE_DETECT 0x06 // Detect mode +#define NVLINK_LINKSTATE_RESET 0x07 // Reset +#define NVLINK_LINKSTATE_ENABLE_PM 0x08 // Enable Link Power Management +#define NVLINK_LINKSTATE_DISABLE_PM 0x09 // Disable Link Power Management +#define NVLINK_LINKSTATE_SLEEP 0x0A // Sleep (L2) +#define NVLINK_LINKSTATE_SAVE_STATE 0x0B // Save state while entering L2 +#define NVLINK_LINKSTATE_RESTORE_STATE 0x0C // Restore state while exiting L2 +#define NVLINK_LINKSTATE_PRE_HS 0x0E // Settings before moving to High Speed +#define NVLINK_LINKSTATE_DISABLE_ERR_DETECT 0x0F // Disable Error detection (interrupt) +#define NVLINK_LINKSTATE_LANE_DISABLE 0x10 // Disable Lanes +#define NVLINK_LINKSTATE_LANE_SHUTDOWN 0x11 // Shutdown Lanes in PHY +#define NVLINK_LINKSTATE_TRAFFIC_SETUP 0x12 // Setup traffic flow after ACTIVE +#define NVLINK_LINKSTATE_INITPHASE1 0x13 // INITPHASE1 +#define NVLINK_LINKSTATE_INITNEGOTIATE 0x14 // Initialize the negotiation (Ampere And Later) +#define NVLINK_LINKSTATE_POST_INITNEGOTIATE 0x15 // Sends DL stat +#define NVLINK_LINKSTATE_INITOPTIMIZE 0x16 // INITOPTIMIZE +#define NVLINK_LINKSTATE_POST_INITOPTIMIZE 0x17 // POST INITOPTIMIZE DL stat check +#define NVLINK_LINKSTATE_DISABLE_HEARTBEAT 0x18 // Disables the heartbeat errors +#define NVLINK_LINKSTATE_CONTAIN 0x19 // TL is in contain mode +#define NVLINK_LINKSTATE_INITTL 0x1A // INITTL +#define NVLINK_LINKSTATE_INVALID 0xFF // Invalid state + +// NVLINK TX SUBLINK states +#define NVLINK_SUBLINK_STATE_TX_HS 0x0 // TX High Speed +#define NVLINK_SUBLINK_STATE_TX_SINGLE_LANE 0x4 // TX Single Lane (1/8th or 1/4th) Mode (Deprecated) +#define NVLINK_SUBLINK_STATE_TX_TRAIN 0x5 // TX training +#define NVLINK_SUBLINK_STATE_TX_SAFE 0x6 // TX Safe Mode +#define NVLINK_SUBLINK_STATE_TX_OFF 0x7 // TX OFF +#define NVLINK_SUBLINK_STATE_TX_COMMON_MODE 0x8 // TX common mode enable +#define NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE 0x9 // TX common mode disable +#define NVLINK_SUBLINK_STATE_TX_DATA_READY 0xA // Do Data Ready and Data Enable +#define NVLINK_SUBLINK_STATE_TX_EQ 0xB // TX equalization +#define NVLINK_SUBLINK_STATE_TX_PRBS_EN 0xC // TX IOBIST PRBS generator enable +#define NVLINK_SUBLINK_STATE_TX_POST_HS 0xD // TX Post High Speed settings + +// NVLINK RX SUBLINK states +#define NVLINK_SUBLINK_STATE_RX_HS 0x0 // RX High Speed +#define NVLINK_SUBLINK_STATE_RX_SINGLE_LANE 0x4 // RX Single Lane (1/8th or 1/4th) Mode (Deprecated) +#define NVLINK_SUBLINK_STATE_RX_TRAIN 0x5 // RX training +#define NVLINK_SUBLINK_STATE_RX_SAFE 0x6 // RX Safe Mode +#define NVLINK_SUBLINK_STATE_RX_OFF 0x7 // RX OFF +#define NVLINK_SUBLINK_STATE_RX_RXCAL 0x8 // RX in calibration +#define NVLINK_SUBLINK_STATE_RX_INIT_TERM 0x9 // Enable RX termination + +// NVLINK TX SUBLINK sub-states +#define NVLINK_SUBLINK_SUBSTATE_TX_STABLE 0x0 // TX Stable + +// NVLINK RX SUBLINK sub-states +#define NVLINK_SUBLINK_SUBSTATE_RX_STABLE 0x0 // RX Stable + +// State change flags +#define NVLINK_STATE_CHANGE_ASYNC 0x0 // Don't wait for the state change to complete +#define NVLINK_STATE_CHANGE_SYNC 0x1 // Wait for the state change to complete + + +/************************************************************************************************/ +/***************************** NVLink library management functions ******************************/ +/************************************************************************************************/ + +/* + * Check if the nvlink core library is initialized + */ +NvBool nvlink_lib_is_initialized(void); + +/* + * Check if there are no devices registered + */ +NvBool nvlink_lib_is_device_list_empty(void); + + +/************************************************************************************************/ +/************************** NVLink library driver-side interface ********************************/ +/***************** Manages device and link registration and un-registration *********************/ +/************************************************************************************************/ + +/* + * Associates device in the NVLink Core + * During the call, the calling driver must support callbacks into the driver from Core + */ +NvlStatus nvlink_lib_register_device(nvlink_device *dev); + +/* + * Unassociates device in the NVLink Core + * Includes removing any links related to the device if still registered + * During the call, the calling driver must support callbacks into the driver from Core + */ +NvlStatus nvlink_lib_unregister_device(nvlink_device *dev); + + +/* + * Associates link with a device in the NVLink Core + * During the call, the calling driver must support callbacks into the driver from Core + */ +NvlStatus nvlink_lib_register_link(nvlink_device *dev, nvlink_link *link); + +/* + * Unassociates link from a device in the NVLink Core + * During the call, the calling driver must support callbacks into the driver from Core + */ +NvlStatus nvlink_lib_unregister_link(nvlink_link *link); + + +/************************************************************************************************/ +/******************************* NVLink link management functions *******************************/ +/************************************************************************************************/ + +/* + * Check if the device has no links registered + */ +NvBool nvlink_lib_is_link_list_empty(nvlink_device *dev); + +/* + * Get the link associated with the given device's link number + */ +NvlStatus nvlink_lib_get_link(nvlink_device *device, + NvU32 link_id, + nvlink_link **link); + +/* + * Set the link endpoint as the link master + */ +NvlStatus nvlink_lib_set_link_master(nvlink_link *link); + +/* + * Get the link master associated with this endpoint + */ +NvlStatus nvlink_lib_get_link_master(nvlink_link *link, nvlink_link **master); + +/************************************************************************************************/ +/*************************** NVLink topology discovery functions ********************************/ +/************************************************************************************************/ + +/* + * Get the connected remote endpoint information + * For a given link, return the other endpoint details it is connected + * to. If there is no connection associated with the given link, then + * conn_info.connected member will be NV_FALSE. + * + * Note: This routine will not initiate any link initialization or topology + * discovery. + */ +NvlStatus nvlink_lib_get_remote_conn_info(nvlink_link *link, nvlink_conn_info *conn_info); + +/* + * Get the connected remote endpoint information + * For a given end of a link, returns the device and link information + * for the remote end along with a boolean variable that specifies if + * the topology detection was complete + */ +NvlStatus nvlink_lib_discover_and_get_remote_conn_info(nvlink_link *end, + nvlink_conn_info *conn_info, + NvU32 flags); + + +/************************************************************************************************/ +/****************************** NVLink initialization functions *********************************/ +/************************************************************************************************/ + +/* + * Re-init a given link from OFF to SWCFG + */ +NvlStatus nvlink_lib_reinit_link_from_off_to_swcfg(nvlink_link *link, + NvU32 flags); + +/************************************************************************************************/ +/********************************** NVLink training functions ***********************************/ +/************************************************************************************************/ + +/* + * Train a given set of links from SWCFG to ACTIVE state + * a. For low training latency - caller passes all links as an array + * b. For high training latency - caller passes link one by one + */ +NvlStatus nvlink_lib_train_links_from_swcfg_to_active(nvlink_link **links, + NvU32 linkCount, + NvU32 flags); + +/* + * Train a given set of links of a device from L2 to ACTIVE state + */ +NvlStatus nvlink_lib_train_links_from_L2_to_active(nvlink_device *dev, + NvU32 linkMask, + NvU32 flags); + +/* + * Retrain a given link from SWCFG to ACTIVE + */ +NvlStatus nvlink_lib_retrain_link_from_swcfg_to_active(nvlink_link *link, + NvU32 flags); + +/* + * Save the seed Data passed in from an endpoint driver +*/ +NvlStatus nvlink_lib_save_training_seeds(nvlink_link * link, + NvU32 * seedData); +NvlStatus nvlink_lib_copy_training_seeds(nvlink_link * link, + NvU32 * seedDataCopy); + +/* + * Send the endpoint driver back the seeds we have stored +*/ +void nvlink_lib_restore_training_seeds(nvlink_link * link, + NvU32 * seedData); + +/* + * Check that the requested links have trained to active +*/ +NvlStatus nvlink_lib_check_training_complete(nvlink_link **links, + NvU32 linkCount); + +/************************************************************************************************/ +/********************************** NVLink shutdown functions ***********************************/ +/************************************************************************************************/ + +/* + * [CLEAN SHUTDOWN] + * Shutdown given links of a device from active to L2 state + */ +NvlStatus nvlink_lib_powerdown_links_from_active_to_L2(nvlink_device *dev, + NvU32 linkMask, + NvU32 flags); + +/* + * [PSEUDO-CLEAN SHUTDOWN] + * Shutdown the given array of links from ACTIVE to OFF state + */ +NvlStatus nvlink_lib_powerdown_links_from_active_to_off(nvlink_link **links, + NvU32 numLinks, + NvU32 flags); + +/* + * Power down the given array of links from ACTIVE to SWCFG state + */ +NvlStatus nvlink_lib_powerdown_links_from_active_to_swcfg(nvlink_link **links, + NvU32 numLinks, + NvU32 flags); + +/* + * Reset the given array of links + */ +NvlStatus nvlink_lib_reset_links(nvlink_link **links, + NvU32 numLinks, + NvU32 flags); + + +/* + * Nvlink core library structure iterators + */ + +#define FOR_EACH_DEVICE_REGISTERED(dev, head, node) \ + nvListForEachEntry(dev, &head.node, node) + +#define FOR_EACH_LINK_REGISTERED(link, dev, node) \ + nvListForEachEntry(link, &dev->link_list, node) + +#define FOR_EACH_LINK_REGISTERED_SAFE(link, next, dev, node) \ + nvListForEachEntry_safe(link, next, &dev->link_list, node) + +#define FOR_EACH_CONNECTION(conn, head, node) \ + nvListForEachEntry(conn, &head.node, node) + +#ifdef __cplusplus +} +#endif + +#endif // _NVLINK_H_ diff --git a/src/common/nvlink/interface/nvlink_common.h b/src/common/nvlink/interface/nvlink_common.h new file mode 100644 index 000000000..5a671768c --- /dev/null +++ b/src/common/nvlink/interface/nvlink_common.h @@ -0,0 +1,173 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_COMMON_H_ +#define _NVLINK_COMMON_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvCpuUuid.h" +#include "nvlink_errors.h" + + +#ifndef NULL +#define NULL ((void *)0) +#endif + +// nvlink pci bar information +struct nvlink_pci_bar_info +{ + NvU64 busAddress; + NvU64 baseAddr; + NvU64 barSize; + NvU32 offset; + void *pBar; +}; + +#define MAX_NVLINK_BARS 2 + +// nvlink pci information +struct nvlink_pci_info +{ + NvU32 domain; + NvU8 bus; + NvU8 device; + NvU8 function; + NvU32 pciDeviceId; + NvU32 irq; + NvBool intHooked; + struct nvlink_pci_bar_info bars[MAX_NVLINK_BARS]; +}; + +// nvlink detailed device information +struct nvlink_detailed_device_info +{ + char *deviceName; + NvU64 deviceType; + NvU8 *devUuid; + NvBool bInitialized; + void *dev_info; // Endpoint driver device info opaque + // to core lib. Passed from end point + // driver to core + + struct nvlink_pci_info *pciInfo; +}; + +// nvlink device registration parameters +struct nvlink_device_register_params +{ + // + // Core lib device info opaque to endpoint driver + // Passed from core lib to endpoint driver + // + void **deviceHandle; + char *driverName; + + struct nvlink_detailed_device_info *device_params; +}; + +// nvlink detailed link information +struct nvlink_detailed_link_info +{ + void *deviceHandle; // Core library device handle passed + // to endpoint driver during device + // registration + + void *link_info; // End point driver link info opaque + // to core lib. Passed from end point + // driver to core + + char *linkName; + NvU32 linkNumber; + NvU32 version; + NvBool bAcCoupled; + const void *link_handlers; +}; + +// nvlink link registration parameters +struct nvlink_link_register_params +{ + // + // Core lib link info opaque to endpoint driver + // Passed from core lib to endpoint driver + // + void **linkHandle; + + struct nvlink_detailed_link_info *link_params; +}; + +// nvlink client device handle +struct nvlink_device_handle +{ + NvU32 linkMask; + struct nvlink_pci_info pciInfo; +}; + +#define NVLINK_PCI_DEV_FMT "%04x:%02x:%02x.%x" +#define NVLINK_PCI_DEV_FMT_ARGS(info) (info)->domain, \ + (info)->bus, \ + (info)->device, \ + (info)->function + +// nvlink connection information +struct nvlink_conn_info +{ + NvU32 domain; + NvU16 bus; + NvU16 device; + NvU16 function; + NvU32 pciDeviceId; + NvU8 devUuid[NV_UUID_LEN]; + NvU64 deviceType; + NvU32 linkNumber; + NvBool bConnected; + NvU64 chipSid; +}; + +// nvlink ioctrl params +struct nvlink_ioctrl_params +{ + void *osPrivate; + NvU32 cmd; + void *buf; + NvU32 size; +}; + +// Typedefs +typedef struct nvlink_pci_bar_info nvlink_pci_bar_info; +typedef struct nvlink_pci_info nvlink_pci_info; +typedef struct nvlink_detailed_device_info nvlink_detailed_device_info; +typedef struct nvlink_detailed_link_info nvlink_detailed_link_info; +typedef struct nvlink_device_register_params nvlink_device_register_params; +typedef struct nvlink_link_register_params nvlink_link_register_params; +typedef struct nvlink_conn_info nvlink_conn_info; +typedef struct nvlink_ioctrl_params nvlink_ioctrl_params; + +#ifdef __cplusplus +} +#endif + +#endif //_NVLINK_COMMON_H_ diff --git a/src/common/nvlink/interface/nvlink_errors.h b/src/common/nvlink/interface/nvlink_errors.h new file mode 100644 index 000000000..d6784a39f --- /dev/null +++ b/src/common/nvlink/interface/nvlink_errors.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_ERRORS_H_ +#define _NVLINK_ERRORS_H_ + +typedef int NvlStatus; + +#define NVL_SUCCESS (NvlStatus) 0 +#define NVL_BAD_ARGS (NvlStatus) 1 +#define NVL_NO_MEM (NvlStatus) 2 +#define NVL_NOT_FOUND (NvlStatus) 3 +#define NVL_INITIALIZATION_PARTIAL_FAILURE (NvlStatus) 4 +#define NVL_INITIALIZATION_TOTAL_FAILURE (NvlStatus) 5 +#define NVL_PCI_ERROR (NvlStatus) 6 +#define NVL_ERR_GENERIC (NvlStatus) 7 +#define NVL_ERR_INVALID_STATE (NvlStatus) 8 +#define NVL_UNBOUND_DEVICE (NvlStatus) 9 +#define NVL_MORE_PROCESSING_REQUIRED (NvlStatus)10 +#define NVL_IO_ERROR (NvlStatus)11 +#define NVL_ERR_STATE_IN_USE (NvlStatus)12 +#define NVL_ERR_NOT_SUPPORTED (NvlStatus)13 +#define NVL_ERR_NOT_IMPLEMENTED (NvlStatus)14 +#define NVL_ERR_INSUFFICIENT_PERMISSIONS (NvlStatus)15 +#define NVL_ERR_OPERATING_SYSTEM (NvlStatus)16 + +#endif // _NVLINK_ERRORS_H_ diff --git a/src/common/nvlink/interface/nvlink_export.h b/src/common/nvlink/interface/nvlink_export.h new file mode 100644 index 000000000..471ec8380 --- /dev/null +++ b/src/common/nvlink/interface/nvlink_export.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_EXPORT_H_ +#define _NVLINK_EXPORT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvlink_common.h" + +/* + * Initializes core lib and does all that is needed + * to access NVLINK functionality on the current platform. + */ +NvlStatus nvlink_lib_initialize(void); + +/* + * Frees any related resources and then unloads core lib. + */ +NvlStatus nvlink_lib_unload(void); + +/* +* Entry point for nvlink ioctl calls. +*/ +NvlStatus nvlink_lib_ioctl_ctrl(nvlink_ioctrl_params *ctrl_params); + +#ifdef __cplusplus +} +#endif + +#endif //_NVLINK_EXPORT_H_ diff --git a/src/common/nvlink/interface/nvlink_lib_ctrl.h b/src/common/nvlink/interface/nvlink_lib_ctrl.h new file mode 100644 index 000000000..e81aed4a8 --- /dev/null +++ b/src/common/nvlink/interface/nvlink_lib_ctrl.h @@ -0,0 +1,1157 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_LIB_CTRL_H_ +#define _NVLINK_LIB_CTRL_H_ + +#include "nvtypes.h" +#include "nvlink_errors.h" + +/* List of supported capability type */ +#define NVLINK_CAP_FABRIC_MANAGEMENT 0 + +/* + * Max supported capabilities count + * + */ +#define NVLINK_CAP_COUNT 1 + +#define NVLINK_UUID_LEN 16 + +/* + * Total number of nvlinks connections a device could have. + */ +#define NVLINK_MAX_DEVICE_CONN 64 + +/* + * Total number of links to train in parallel + */ +#define NVLINK_MAX_PARALLEL_CONNS_TRAIN_COUNT 288 + +/* + * Best effort to copy the driver supplied device name. Name more than + * this size (if any) will be truncated. + */ +#define NVLINK_DEVICE_NAME_LEN_MAX 30 + +/* + * Total number of nvlink devices supported. Derived based on current supported + * limits. OBJ_MAX_GPUS = 32 and NVSWITCH_DEVICE_INSTANCE_MAX = 64 + */ +#define NVLINK_DEVICE_INSTANCE_MAX 96 + +/* + * Total number of nvlink endpoints core library can have + * This is mapped to NVLINK_MAX_SYSTEM_LINK_NUM in drivers/nvlink/interface/nvlink.h + */ +#define NVLINK_MAX_NVLINK_ENDPOINTS 312 + +#define NVLINK_VERSION_STRING_LENGTH 64 + +/* + * NVLink version consists of, + * major - no compatibility. + * minor - only backwards compatible. + */ +typedef struct +{ + char version[NVLINK_VERSION_STRING_LENGTH]; +} nvlink_version; + +typedef struct +{ + NvU16 domain; + NvU8 bus; + NvU8 device; + NvU8 function; +} nvlink_pci_dev_info; + +/* uniquely identify an nvlink endpoint */ +typedef struct +{ + NvU16 nodeId; + NvU32 linkIndex; + nvlink_pci_dev_info pciInfo; +} nvlink_endpoint; + +/* uniquely identify an nvlink device */ +typedef struct +{ + NvU16 nodeId; + nvlink_pci_dev_info pciInfo; +} nvlink_device_info; + +/* detailed information about an nvlink device */ +typedef struct +{ + nvlink_pci_dev_info pciInfo; + char deviceName[NVLINK_DEVICE_NAME_LEN_MAX]; + NvU8 devUuid[NVLINK_UUID_LEN]; + NvU16 numLinks; + NvU32 devType; + NV_DECLARE_ALIGNED(NvU64 enabledLinkMask, 8); +} nvlink_detailed_dev_info; + +/* detailed information about a remote nvlink connection endpoint */ +typedef struct +{ + NvU16 nodeId; + NvU32 linkIndex; + nvlink_pci_dev_info pciInfo; + NvU8 devUuid[NVLINK_UUID_LEN]; + NvU32 devType; +} nvlink_remote_endpoint_info; + +/* uniquely identify an nvlink connection */ +typedef struct +{ + nvlink_endpoint srcEndPoint; + nvlink_endpoint dstEndPoint; +} nvlink_connection_info; + +/* link device types */ +typedef enum +{ + nvlink_device_type_unknown = 0, + nvlink_device_type_gpu, + nvlink_device_type_ibmnpu, + nvlink_device_type_nvswitch +} nvlink_device_type; + +/* link modes */ +typedef enum +{ + nvlink_link_mode_unknown = 0, + nvlink_link_mode_off, + nvlink_link_mode_active, + nvlink_link_mode_swcfg, + nvlink_link_mode_fault, + nvlink_link_mode_recovery, + nvlink_link_mode_fail, + nvlink_link_mode_detect, + nvlink_link_mode_reset, + nvlink_link_mode_enable_pm, + nvlink_link_mode_disable_pm, + nvlink_link_mode_traffic_setup, + nvlink_link_mode_contain +} nvlink_link_mode; + +/* sublink tx modes */ +typedef enum +{ + nvlink_tx_sublink_mode_unknown = 0, + nvlink_tx_sublink_mode_hs, + nvlink_tx_sublink_mode_single_lane, + nvlink_tx_sublink_mode_train, + nvlink_tx_sublink_mode_safe, + nvlink_tx_sublink_mode_off, + nvlink_tx_sublink_mode_common_mode, + nvlink_tx_sublink_mode_common_mode_disable, + nvlink_tx_sublink_mode_data_ready, + nvlink_tx_sublink_mode_tx_eq, + nvlink_tx_sublink_mode_pbrs_en, + nvlink_tx_sublink_mode_post_hs +} nvlink_tx_sublink_mode; + +/* sublink rx modes */ +typedef enum +{ + nvlink_rx_sublink_mode_unknown = 0, + nvlink_rx_sublink_mode_hs, + nvlink_rx_sublink_mode_single_lane, + nvlink_rx_sublink_mode_train, + nvlink_rx_sublink_mode_safe, + nvlink_rx_sublink_mode_off, + nvlink_rx_sublink_mode_rxcal +} nvlink_rx_sublink_mode; + +/* link and sublink state of an nvlink endpoint */ +typedef struct +{ + NvU32 linkMode; + NvU32 txSubLinkMode; + NvU32 rxSubLinkMode; +} nvlink_link_state; + +/* + * CTRL_NVLINK_CHECK_VERSION + * + * The interface will check if the client's version is supported by the driver. + * + * Parameters: + * user [IN] + * version of the interface that the client is compiled with. + * + * kernel [OUT] + * version of the interface that the kernel driver is compiled with. This + * information will be filled even if the CTRL call returns + * NVL_ERR_NOT_SUPPORTED due to version mismatch. + * + * status [OUT] + * NVL_SUCCESS if the client is using compatible interface. + * NVL_ERR_NOT_SUPPORTED if the client is using incompatible interface. + * Or, Other NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + nvlink_version user; + + /* output parameters */ + nvlink_version kernel; + NvlStatus status; +} nvlink_check_version; + +/* + * CTRL_NVLINK_SET_NODE_ID + * + * Assign the nvlink devices with a fabric node id. + * This interface will enumerate all the existing registered devices (if any) + * and update its node id. Any subsequent nvlink device registration will use + * this node id. + * + * Parameters: + * nodeId [IN] + * fabric node id of the node. + * + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + NvU16 nodeId; + + /* output parameters */ + NvlStatus status; +} nvlink_set_node_id; + +/* + * CTRL_NVLINK_SET_TX_COMMON_MODE + * + * Set common mode for all device links. + * A user mode module, which has no validated and stored nvlink connection should + * do a device discovery. This interface allows all the links to enable or disable + * common mode to facilitate such device discovery. + * + * Parameters: + * commMode [IN] + * Specify the desired common mode, True to enable and False to disable. + * + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + NvBool commMode; + + /* output parameters */ + NvlStatus status; +} nvlink_set_tx_common_mode; + +/* + * CTRL_NVLINK_CALIBRATE + * + * Do Rx Calibration for all the links. + * A user mode module, which has no validated and stored nvlink connection + * should do a device discovery. This interface allows all the links to + * initiate Rx calibration as part of this discovery process. + * + * Parameters: + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; +} nvlink_calibrate; + +/* + * CTRL_NVLINK_ENABLE_DATA + * + * Enable TX data for all the registered links in the node. + * A user mode module, which has no validated and stored nvlink connection should + * do a device discovery. This interface enable data mode for all the links as part + * of this discovery process. + * + * Parameters: + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; +} nvlink_enable_data; + +/* + * CTRL_NVLINK_LINK_INIT_ASYNC + * + * Initialize all the registered links in the node. + * Initializes all the registered links in the node and start SWCFG state + * transition for the links. However, this interface will not wait/poll + * for the links to finish the initialization. The caller must use + * CTRL_NVLINK_DEVICE_LINK_INIT_STATUS to query the link status which will + * wait for the initialization to complete and report how many links are + * transitioned to SWCFG state. + * + * Parameters: + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; +} nvlink_link_init_async; + +/* + * CTRL_NVLINK_DEVICE_LINK_INIT_STATUS + * + * Query link initialization status of the specified device. + * This interface poll/wait for the link initialization to complete and report + * per link initialization status. The caller can initiate a link initialization + * request using the CTRL_NVLINK_LINK_INIT_ASYNC interface. + * + * Parameters: + * devInfo [OUT] + * nvlink device identification information. + * + * status [OUT] + * One of the NVL_XXX status value. + * + * initStatus [OUT] + * Per link init state information. Link will be in SWCFG mode on True. + */ +typedef struct +{ + NvU32 linkIndex; + NvBool initStatus; +} nvlink_link_init_status; + +typedef struct +{ + /* input parameters */ + nvlink_device_info devInfo; + + /* output parameters */ + NvlStatus status; + nvlink_link_init_status linkStatus[NVLINK_MAX_DEVICE_CONN]; +} nvlink_device_link_init_status; + +/* + * CTRL_NVLINK_DISCOVER_INTRANODE_CONNS + * + * Initiate an nvlink connection discovery. + * This interface allows the node to initiate an nvlink connection discovery + * process by writing and reading specific discovery tokens. The discovered + * connections are then registered in the nvlink driver context as intranode + * connections. + * + * Note: + * 1) Link has to be in SWCFG/HS mode to participate in the discovery process. + * 2) This interface will discover only intranode connections. + * + * Parameters: + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; +} nvlink_discover_intranode_conns; + +/* + * CTRL_NVLINK_DEVICE_GET_INTRANODE_CONNS + * + * Returns all the nvlink intranode connections for a device. + * This interface allows a user mode module to retrive endpoint details + * of all the intranode connections for the specified device. + * + * Note: + * 1) Issue CTRL_NVLINK_DISCOVER_INTRANODE_CONNS to discover connections first. + * + * Parameters: + * devInfo [OUT] + * nvlink device identification information. + * + * status [OUT] + * One of the NVL_XXX status value. + * + * numConnections [OUT] + * The number of nvlink connections discovered for this device. + * + * conn [OUT] + * Detailed device information of each connection to this device. + */ +typedef struct +{ + /* input parameters */ + nvlink_device_info devInfo; + + /* output parameters */ + NvlStatus status; + NvU32 numConnections; + nvlink_connection_info conn[NVLINK_MAX_DEVICE_CONN]; +} nvlink_device_get_intranode_conns; + +/* + * CTRL_NVLINK_ADD_INTERNODE_CONN + * + * Register an internode nvlink connection with driver. + * This interface allows a user mode module to populate internode nvlink + * connections which are discovered through an nvlink device discovery + * process. This is applicable only for multi-node systems where only one + * endpoint of the connection is visible/accessible from a given node. + * + * Parameters: + * localEndPoint [IN] + * Local endpoint information of the internode connection. + * + * remoteEndPoint [IN] + * Remote endpoint and device information of the internode connection. + * + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + nvlink_endpoint localEndPoint; + nvlink_remote_endpoint_info remoteEndPoint; + + /* output parameters */ + NvlStatus status; +} nvlink_add_internode_conn; + +/* + * CTRL_NVLINK_REMOVE_INTERNODE_CONN + * + * Remove a previously added internode nvlink connection from the driver. + * This interface allows a user mode module to remove an internode nvlink + * connection. This is applicable only for multi-node systems where only one + * endpoint of the connection is visible/accessible from a given node. + * + * Parameters: + * localEndPoint [IN] + * Local endpoint information of the internode connection. + * + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + nvlink_endpoint localEndPoint; + + /* output parameters */ + NvlStatus status; +} nvlink_remove_internode_conn; + +/* + * CTRL_NVLINK_DEVICE_WRITE_DISCOVERY_TOKENS + * + * Write discovery token on links which are not part of any connection. + * This interface allows a user mode module, which should discover all the + * internode connections to write a unique discovery token on all the links + * of the specified device. + * + * Parameters: + * devInfo [OUT] + * nvlink device identification information. + * + * status [OUT] + * One of the NVL_XXX status value. + * + * numTokens [OUT] + * Number of nvlink tokens written. + * + * tokenInfo [OUT] + * link index, and value of each token written. + */ +typedef struct +{ + NvU32 linkIndex; + NV_DECLARE_ALIGNED(NvU64 tokenValue, 8); +} nvlink_token_info; + +typedef struct +{ + /* input parameters */ + nvlink_device_info devInfo; + + /* output parameters */ + NvlStatus status; + NvU32 numTokens; + nvlink_token_info tokenInfo[NVLINK_MAX_DEVICE_CONN]; +} nvlink_device_write_discovery_tokens; + +/* + * CTRL_NVLINK_DEVICE_READ_DISCOVERY_TOKENS + * + * Read discovery token on links which are not part of any connection. + * This interface allows a user mode module to read discovery token from all the + * links of the specified device. To discover internode connections, a module + * will first initiate a write discovery token operation, then read back tokens + * from all other nodes and check for matching tokens. + * + * Parameters: + * devInfo [OUT] + * nvlink device identification information. + * + * status [OUT] + * One of the NVL_XXX status value. + * + * numTokens [OUT] + * Number of nvlink tokens read. + * + * tokenInfo [OUT] + * link index, and value of each token read. + */ +typedef struct +{ + /* input parameters */ + nvlink_device_info devInfo; + + /* output parameters */ + NvlStatus status; + NvU32 numTokens; + nvlink_token_info tokenInfo[NVLINK_MAX_DEVICE_CONN]; +} nvlink_device_read_discovery_tokens; + +/* + * CTRL_NVLINK_TRAIN_INTRANODE_CONN + * + * Train an intranode connection. + * This interface allows a user mode module to train an nvlink connection + * to the desired state. + * + * Note: + * 1) Source endpoint is treated as active endpoint. + * + * Parameters: + * trainTo [IN] + * Desired connection state as defined in nvlink_conn_train_type. + * + * srcEndPoint [IN] + * Specify source endpoint details of the nvlink connection. + * + * dstEndPoint [IN] + * Specify other endpoint details of the nvlink connection. + * + * status [OUT] + * One of the NVL_XXX status value. + * + * srcEndState [OUT] + * Link and sublink state of the source endpoint. + * + * dstEndState [OUT] + * Link and sublink state of the other endpoint. + */ +typedef enum +{ + nvlink_train_conn_off_to_swcfg = 0, + nvlink_train_conn_swcfg_to_active, + nvlink_train_conn_to_off, + nvlink_train_conn_active_to_swcfg, + nvlink_train_conn_swcfg_to_off, +} nvlink_conn_train_type; + +typedef struct +{ + /* input parameters */ + NvU32 trainTo; + nvlink_endpoint srcEndPoint; + nvlink_endpoint dstEndPoint; + + /* output parameters */ + NvlStatus status; + nvlink_link_state srcEndState; + nvlink_link_state dstEndState; +} nvlink_train_intranode_conn; + +/* + * CTRL_NVLINK_TRAIN_INTERNODE_CONN_LINK + * + * Train link of an internode connection. + * This interface allows a user mode module to train the local endpoint + * link of an nvlink internode connection to the desired state. This is + * applicable only for multi-node systems where only one endpoint of the + * connection is visible/accessible from a given node. + * + * Parameters: + * trainTo [IN] + * Desired link state as defined in nvlink_link_train_type. + * + * isMasterEnd [IN] + * True if the endpoint is the master endpoint of the connection. + * + * endPoint [IN] + * Specify endpoint details of the nvlink connection. + * + * status [OUT] + * One of the NVL_XXX status value. + * + * endState [OUT] + * Link and sublink state of the endpoint. + */ +typedef enum +{ + nvlink_train_link_off_to_swcfg = 0, + nvlink_train_link_swcfg_to_active, + nvlink_train_link_to_off, + nvlink_train_link_active_to_swcfg, + nvlink_train_link_swcfg_to_off, +} nvlink_link_train_type; + +typedef struct +{ + /* input parameters */ + NvU32 trainTo; + NvU32 isMasterEnd; + nvlink_endpoint localEndPoint; + + /* output parameters */ + NvlStatus status; + nvlink_link_state localEndState; +} nvlink_train_internode_conn_link; + +/* + * CTRL_NVLINK_TRAIN_INTERNODE_CONN_SUBLINK + * + * Train sublink of an internode connection. + * This interface allows a user mode module to train the local endpoint + * sublink of an nvlink internode connection to the desired state. This is + * applicable only for multi-node systems where only one endpoint of the + * connection is visible/accessible from a given node. + * + * Parameters: + * trainTo [IN] + * Desired sublink state as defined in nvlink_sublink_train_type. + * + * isMasterEnd [IN] + * True if the endpoint is master endpoint of the connection. + * + * endPoint [IN] + * Specify endpoint details of the nvlink connection. + * + * status [OUT] + * One of the NVL_XXX status value. + * + * endState [OUT] + * Link and sublink state of the endpoint. + */ +typedef enum +{ + nvlink_train_sublink_off_to_safe = 0, + nvlink_train_sublink_safe_to_hs, + nvlink_train_sublink_to_off, + nvlink_train_sublink_hs_to_safe, + nvlink_train_sublink_safe_to_off, +} nvlink_sublink_train_type; + +typedef struct +{ + /* input parameters */ + nvlink_sublink_train_type trainTo; + NvU32 isMasterEnd; + nvlink_endpoint localEndPoint; + + /* output parameters */ + NvlStatus status; + nvlink_link_state localEndState; +} nvlink_train_internode_conn_sublink; + +/* + * CTRL_NVLINK_GET_DEVICES_INFO + * + * Return registered device information. + * This interface allows a user mode module to query and retrieve detailed + * information about all the registered devices in the nvlink core. This + * device information can be later used to uniquely identify each device + * present in the node. + * + * Parameters: + * + * status [OUT] + * One of the NVL_XXX status value. + * + * numDevice [OUT] + * Total number of devices registered. + * + * devInfo [OUT] + * Detailed information of each device. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; + NvU32 numDevice; + nvlink_detailed_dev_info devInfo[NVLINK_DEVICE_INSTANCE_MAX]; +} nvlink_get_devices_info; + +/* + * CTRL_NVLINK_INITPHASE1 + * + * This interface allows a user mode module to send INITPHASE request to minion. + * NOTE: This IOCTRL is supported from GA100+ + * + * Parameters: + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; +} nvlink_initphase1; + +/* + * CTRL_NVLINK_INITNEGOTIATE + * + * This interface allows a user mode module to send INITNEGOTIATE request to minion. + * NOTE: This IOCTRL is supported from GA100+ + * + * Parameters: + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; +} nvlink_initnegotiate; + +/* + * CTRL_NVLINK_RX_INIT_TERM + * + * This interface allows a user mode module to send RECEIVER TERMINATION on the endpoint. + * NOTE: This IOCTRL is supported from GA100+ + * + * Parameters: + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; +} nvlink_rx_init_term; + +/* + * CTRL_NVLINK_DEVICE_READ_SIDS + * + * NVLink 3.0 onwards, connection detection is handled by Minion. After INITNEGOTIATE + * completed, this interface needs to be queried to retrieve the local/remote SIDs + * and the local/remote link number of all links associated with a device + * + * Parameters: + * devInfo [IN] + * nvlink device identification information. + * + * status [OUT] + * One of the NVL_XXX status value. + * + * numEntries [OUT] + * Number of links for which SIDs were read + * + * sidInfo [OUT] + * local/remote link number and local/remote sid + */ +typedef struct +{ + NvU32 localLinkNum; + NvU32 remoteLinkNum; + + /* Added as part of NvLink 3.0 */ + NV_DECLARE_ALIGNED(NvU64 localLinkSid, 8); + NV_DECLARE_ALIGNED(NvU64 remoteLinkSid, 8); +} nvlink_sid_info; + +typedef struct +{ + /* input parameters */ + nvlink_device_info devInfo; + + /* output parameters */ + NvlStatus status; + NvU32 numEntries; + nvlink_sid_info sidInfo[NVLINK_MAX_DEVICE_CONN]; +} nvlink_device_read_sids; + +/* + * CTRL_NVLINK_SET_RX_DETECT + * + * This interface allows a user mode module to send RECEIVER DETECT on the endpoint. + * NOTE: This IOCTRL is supported from GA100+ + * + * Parameters: + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; +} nvlink_set_rx_detect; + +/* + * CTRL_NVLINK_GET_RX_DETECT + * + * This interface allows a user mode module to Poll for output of receiver + * detect on all the endpoints. + * NOTE: This IOCTRL is supported from GA100+ + * + * Parameters: + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; +} nvlink_get_rx_detect; + +/* + * CTRL_NVLINK_ACQUIRE_CAPABILITY + * + * Upon success, user mode would acquire the requested capability + * to perform privilege operations. This IOCTL will acquire one + * capability at a time. + * + * Parameters: + * capDescriptor [IN] + * The OS file descriptor or handle representing the capability. + * cap [IN] + * The requested capability. One of the NVLINK_CAP_*. + * + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + NV_DECLARE_ALIGNED(NvU64 capDescriptor, 8); + NvU32 cap; + + /* output parameters */ + NvlStatus status; +} nvlink_acquire_capability; + +/* + * CTRL_NVLINK_GET_LINK_STATE + * + * Returns link state. This is needed for trunk links + * which has post HS steps to get links to active. + * + * Parameters: + * endPointPairs [IN] + * Specify the endpoints on which the command is to be sent + * + * endPointPairsCount [IN] + * Specify count of endpoints passed in + * + * status [OUT] + * One of the NVL_XXX status value. + * + * endpointPairsStates [OUT] + * Link and sublink state of the endpoint + */ +typedef struct +{ + /* input parameters */ + nvlink_endpoint endPoints[NVLINK_MAX_NVLINK_ENDPOINTS]; + NvU32 endPointCount; + + /* output parameters */ + NvlStatus status; + nvlink_link_state endState[NVLINK_MAX_NVLINK_ENDPOINTS]; +} nvlink_get_link_state; + +/* + * CTRL_NVLINK_TRAIN_INTRANODE_CONNS_PARALLEL + * + * Train a set of intranode connections in parallel. + * This interface allows a user mode module to train a set of nvlink + * connections to the desired state. + * + * Note: + * 1) Source endpoint of every pair is treated as active endpoint. + * + * Parameters: + * trainTo [IN] + * Desired connection state as defined in nvlink_conn_train_type. + * + * endPointPairs [IN] + * Specify endpoint pair (source and other endpoint) details of the + * nvlink connections + * + * endPointPairsCount [IN] + * Specify count of intranode connnection passed in + * + * status [OUT] + * One of the NVL_XXX status value. + * + * endpointPairsStates [OUT] + * Link and sublink state of the endpoint pairs + */ + +typedef struct +{ + nvlink_endpoint src; + nvlink_endpoint dst; +} nvlink_endpoint_pair; + +typedef struct +{ + nvlink_link_state srcEnd; + nvlink_link_state dstEnd; +} nvlink_link_state_pair; + +typedef struct +{ + /* input parameters */ + NvU32 trainTo; + nvlink_endpoint_pair endPointPairs[NVLINK_MAX_PARALLEL_CONNS_TRAIN_COUNT]; + NvU32 endPointPairsCount; + + /* output parameters */ + NvlStatus status; + nvlink_link_state_pair endpointPairsStates[NVLINK_MAX_PARALLEL_CONNS_TRAIN_COUNT]; +} nvlink_train_intranode_conns_parallel; + +/* + * CTRL_NVLINK_TRAIN_INTERNODE_LINKS_INITOPTIMIZE + * + * This interface allows a user mode module to send INITOPTIMIZE request to minion + * and poll on the training good status which implies sublinks are trained + * NOTE: This IOCTRL is supported from GA100+ + * + * Parameters: + * endPoints [IN] + * Specify the endpoints on which INITOPTIMIZE is to be sent + * + * endPointCount [IN] + * Specify count of endpoints passed in + * + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + nvlink_endpoint endPoints[NVLINK_MAX_NVLINK_ENDPOINTS]; + NvU32 endPointCount; + + /* output parameters */ + NvlStatus status; +} nvlink_train_internode_links_initoptimize; + +/* + * CTRL_NVLINK_TRAIN_INTERNODE_LINKS_POST_INITOPTIMIZE + * + * This interface allows a user mode module to send POST_INITOPTIMIZE request to minion + * + * NOTE: This IOCTRL is supported from GA100+ + * + * Parameters: + * endPoints [IN] + * Specify the endpoints on which POST_INITOPTIMIZE is to be sent + * + * endPointCount [IN] + * Specify count of endpoints passed in + * + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + nvlink_endpoint endPoints[NVLINK_MAX_NVLINK_ENDPOINTS]; + NvU32 endPointCount; + + /* output parameters */ + NvlStatus status; +} nvlink_train_internode_links_post_initoptimize; + +/* + * CTRL_NVLINK_TRAIN_INTERNODE_CONNS_PARALLEL + * + * Train link of an internode connection. + * This interface allows a user mode module to train the local endpoint + * link of an nvlink internode connection to the desired state. This is + * applicable only for multi-node systems where only one endpoint of the + * connection is visible/accessible from a given node. + * + * Parameters: + * trainTo [IN] + * Desired link state as defined in nvlink_link_train_type. + * + * isMasterEnd [IN] + * True if the endpoint is the master endpoint of the connection. + * + * endPoints [IN] + * Specify endpoint details of the nvlink connections. + * + * status [OUT] + * One of the NVL_XXX status value. + * + * endStates [OUT] + * Link and sublink state of the endpoints. + */ + +typedef struct +{ + /* input parameters */ + NvU32 trainTo; + NvU32 isMasterEnd[NVLINK_MAX_PARALLEL_CONNS_TRAIN_COUNT]; + nvlink_endpoint localEndPoints[NVLINK_MAX_PARALLEL_CONNS_TRAIN_COUNT]; + NvU32 localEndPointCount; + + /* output parameters */ + NvlStatus status; + nvlink_link_state localEndStates[NVLINK_MAX_PARALLEL_CONNS_TRAIN_COUNT]; +} nvlink_train_internode_conns_parallel; + +/* + * CTRL_NVLINK_INITPHASE5 + * + * This interface allows a user mode module to send INITPHASE request to minion. + * NOTE: This IOCTRL is supported from GA100+ + * + * Parameters: + * status [OUT] + * One of the NVL_XXX status value. + */ +typedef struct +{ + /* input parameters */ + + /* output parameters */ + NvlStatus status; +} nvlink_initphase5; + +/* + * CTRL_NVLINK_GET_DEVICE_LINKS_STATE + * + * Returns the link state of all enabled links on a given device. + * + * Parameters: + * devInfo [IN] + * nvlink device identification information. + * + * status [OUT] + * One of the NVL_XXX status value. + * + * endStates [OUT] + * Link and sublink state of links. The array + * is continuous (i.e. it will have all links of the device + * even if the link is disabled) + * For links that are not enabled, the index in the array + * will show the states as INVALID. + * + * endStatesCount [OUT] + * count of valid entries into the endStates array + * + */ +typedef struct +{ + /* input parameters */ + nvlink_device_info devInfo; + + + /* output parameters */ + NvlStatus status; + nvlink_link_state endStates[NVLINK_MAX_NVLINK_ENDPOINTS]; + NvU32 endStatesCount; +} nvlink_get_device_link_states; + +#define CTRL_NVLINK_CHECK_VERSION 0x01 +#define CTRL_NVLINK_SET_NODE_ID 0x02 +#define CTRL_NVLINK_SET_TX_COMMON_MODE 0x03 +#define CTRL_NVLINK_CALIBRATE 0x04 +#define CTRL_NVLINK_ENABLE_DATA 0x05 +#define CTRL_NVLINK_LINK_INIT_ASYNC 0x06 +#define CTRL_NVLINK_DEVICE_LINK_INIT_STATUS 0x07 +#define CTRL_NVLINK_DISCOVER_INTRANODE_CONNS 0x08 +#define CTRL_NVLINK_DEVICE_GET_INTRANODE_CONNS 0x09 +#define CTRL_NVLINK_ADD_INTERNODE_CONN 0x0A +#define CTRL_NVLINK_REMOVE_INTERNODE_CONN 0x0B +#define CTRL_NVLINK_DEVICE_WRITE_DISCOVERY_TOKENS 0x0C +#define CTRL_NVLINK_DEVICE_READ_DISCOVERY_TOKENS 0x0D +#define CTRL_NVLINK_TRAIN_INTRANODE_CONN 0x0E +#define CTRL_NVLINK_TRAIN_INTERNODE_CONN_LINK 0x0F +#define CTRL_NVLINK_TRAIN_INTERNODE_CONN_SUBLINK 0x10 +#define CTRL_NVLINK_GET_DEVICES_INFO 0x11 +#define CTRL_NVLINK_INITPHASE1 0x12 +#define CTRL_NVLINK_INITNEGOTIATE 0x13 +#define CTRL_NVLINK_RX_INIT_TERM 0x14 +#define CTRL_NVLINK_SET_RX_DETECT 0x15 +#define CTRL_NVLINK_GET_RX_DETECT 0x16 +#define CTRL_NVLINK_ACQUIRE_CAPABILITY 0x17 +#define CTRL_NVLINK_TRAIN_INTRANODE_CONNS_PARALLEL 0x18 +#define CTRL_NVLINK_DEVICE_READ_SIDS 0x19 +#define CTRL_NVLINK_TRAIN_INTERNODE_LINKS_INITOPTIMIZE 0x1A +#define CTRL_NVLINK_TRAIN_INTERNODE_LINKS_POST_INITOPTIMIZE 0x1B +#define CTRL_NVLINK_TRAIN_INTERNODE_CONNS_PARALLEL 0x1C +#define CTRL_NVLINK_INITPHASE5 0x1D +#define CTRL_NVLINK_GET_DEVICE_LINK_STATES 0x1E +#define CTRL_NVLINK_GET_LINK_STATE 0x1F +#define CTRL_NVLINK_RESERVED_0 0x20 +#define CTRL_NVLINK_RESERVED_1 0x21 +#define CTRL_NVLINK_RESERVED_2 0x22 +#define CTRL_NVLINK_RESERVED_3 0x23 +#define CTRL_NVLINK_RESERVED_4 0x24 +#define CTRL_NVLINK_RESERVED_5 0x25 +#define CTRL_NVLINK_RESERVED_6 0x26 +#define CTRL_NVLINK_RESERVED_7 0x27 +#define CTRL_NVLINK_RESERVED_8 0x28 +#define CTRL_NVLINK_RESERVED_9 0x29 +#define CTRL_NVLINK_RESERVED_10 0x2A +#define CTRL_NVLINK_RESERVED_11 0x2B +/* Do not add code after this line */ + +#endif // _NVLINK_LIB_CTRL_H_ diff --git a/src/common/nvlink/interface/nvlink_lock.h b/src/common/nvlink/interface/nvlink_lock.h new file mode 100644 index 000000000..df707ddaa --- /dev/null +++ b/src/common/nvlink/interface/nvlink_lock.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NVLINK_LOCK_H_ +#define _NVLINK_LOCK_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvlink_common.h" + +/* + * Allocate top level lock. Return NVL_SUCCESS if + * the lock was allocated else return NVL_ERR_GENERIC. + */ +NvlStatus nvlink_lib_top_lock_alloc(void); + +/* + * Free top level lock. Return NVL_SUCCESS if + * the lock was freed else return NVL_ERR_GENERIC. + */ +NvlStatus nvlink_lib_top_lock_free(void); + +/* + * Allocate per-link lock. Return NVL_SUCCESS if + * the lock was allocated else return NVL_ERR_GENERIC. + */ +NvlStatus nvlink_lib_link_lock_alloc(nvlink_link *link); + +/* + * Free per-link lock. Return NVL_SUCCESS if + * the lock was freed else return NVL_ERR_GENERIC. + */ +NvlStatus nvlink_lib_link_lock_free(nvlink_link *link); + +/* + * Acquire top level lock. Return NVL_SUCCESS if + * the lock was acquired else return NVL_ERR_STATE_IN_USE. + */ +NvlStatus nvlink_lib_top_lock_acquire(void); + +/* + * Release top level lock. Return NVL_SUCCESS if + * the lock was released else return NVL_ERR_GENERIC. + */ +NvlStatus nvlink_lib_top_lock_release(void); + +/* + * Sort the array of links in order of (DBDF, link#) - + * lowest to highest and acquire link locks. + * Return NVL_SUCCESS if all the link locks were acquired. + * Else if any link lock failed to be acquired, release + * all acquired link locks and return NVL_ERR_STATE_IN_USE. + */ +NvlStatus nvlink_lib_link_locks_acquire(nvlink_link **links, int numLinks); + +/* + * Loop over all the links and call nvlink_releaseLock(links[i]->linkLock). + * Return NVL_SUCCESS if all the link locks were released. + * Else if any link lock failed to be released return NVL_ERR_GENERIC. + */ +NvlStatus nvlink_lib_link_locks_release(nvlink_link **links, int numLinks); + +#ifdef __cplusplus +} +#endif + +#endif // _NVLINK_LOCK_H_ diff --git a/src/common/nvlink/interface/nvlink_os.h b/src/common/nvlink/interface/nvlink_os.h new file mode 100644 index 000000000..4130bf98d --- /dev/null +++ b/src/common/nvlink/interface/nvlink_os.h @@ -0,0 +1,86 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_OS_H_ +#define _NVLINK_OS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvlink_common.h" + +#define NVLINK_FREE(x) nvlink_free((void *)x) + +// Memory management functions +void * nvlink_malloc(NvLength); +void nvlink_free(void *); +void * nvlink_memset(void *, int, NvLength); +void * nvlink_memcpy(void *, const void *, NvLength); +int nvlink_memcmp(const void *, const void *, NvLength); +NvU32 nvlink_memRd32(const volatile void *); +void nvlink_memWr32(volatile void *, NvU32); +NvU64 nvlink_memRd64(const volatile void *); +void nvlink_memWr64(volatile void *, NvU64); + +// String management functions +char * nvlink_strcpy(char *, const char *); +NvLength nvlink_strlen(const char *); +int nvlink_strcmp(const char *, const char *); +int nvlink_snprintf(char *, NvLength, const char *, ...); + +// Locking support functions +void * nvlink_allocLock(void); +void nvlink_acquireLock(void *); +NvBool nvlink_isLockOwner(void *); +void nvlink_releaseLock(void *); +void nvlink_freeLock(void *); + +// Miscellaneous functions +void nvlink_assert(int expression); +void nvlink_sleep(unsigned int ms); +void nvlink_print(const char *, int, const char *, int, const char *, ...); +int nvlink_is_admin(void); + +// Capability functions +NvlStatus nvlink_acquire_fabric_mgmt_cap(void *osPrivate, NvU64 capDescriptor); +int nvlink_is_fabric_manager(void *osPrivate); + +#define NVLINK_DBG_LEVEL_INFO 0x0 +#define NVLINK_DBG_LEVEL_SETUP 0x1 +#define NVLINK_DBG_LEVEL_USERERRORS 0x2 +#define NVLINK_DBG_LEVEL_WARNINGS 0x3 +#define NVLINK_DBG_LEVEL_ERRORS 0x4 + +#define NVLINK_DBG_WHERE __FILE__, __LINE__, __FUNCTION__ +#define NVLINK_DBG_INFO NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_INFO +#define NVLINK_DBG_SETUP NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_SETUP +#define NVLINK_DBG_USERERRORS NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_USERERRORS +#define NVLINK_DBG_WARNINGS NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_WARNINGS +#define NVLINK_DBG_ERRORS NVLINK_DBG_WHERE, NVLINK_DBG_LEVEL_ERRORS + +#ifdef __cplusplus +} +#endif + +#endif //_NVLINK_OS_H_ diff --git a/src/common/nvlink/kernel/nvlink/core/nvlink_conn_mgmt.c b/src/common/nvlink/kernel/nvlink/core/nvlink_conn_mgmt.c new file mode 100644 index 000000000..ee483fb2a --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/core/nvlink_conn_mgmt.c @@ -0,0 +1,527 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" + +/** + * For a given link, return the associated intranode connection + * + * @param[in] endpoint NVLink Link pointer + * @param[out] conn Connection associated with the link + */ +void +nvlink_core_get_intranode_conn +( + nvlink_link *endpoint, + nvlink_intranode_conn **conn +) +{ + nvlink_intranode_conn *tmpConn = NULL; + + FOR_EACH_CONNECTION(tmpConn, nvlinkLibCtx.nv_intraconn_head, node) + { + if (tmpConn->end0 == endpoint || tmpConn->end1 == endpoint) + { + *conn = tmpConn; + break; + } + } +} + +/** + * For a given local link, return the associated internode connection + * + * @param[in] localLink NVLink Link pointer + * @param[out] conn Connection associated with the link + */ +void +nvlink_core_get_internode_conn +( + nvlink_link *localLink, + nvlink_internode_conn **conn +) +{ + nvlink_internode_conn *tmpConn = NULL; + + FOR_EACH_CONNECTION(tmpConn, nvlinkLibCtx.nv_interconn_head, node) + { + if (tmpConn->local_end == localLink) + { + *conn = tmpConn; + break; + } + } +} + +/** + * Add a new intranode connection to the list of connections + * + * @param[in] end0 NVLink Link pointer for end0 + * @param[in] end1 NVLink Link pointer for end1 + * + * return NVL_SUCCESS if the conn was added successfully + */ +NvlStatus +nvlink_core_add_intranode_conn +( + nvlink_link *end0, + nvlink_link *end1 +) +{ + nvlink_intranode_conn *conn = NULL; + + // don't do anything if we have an intranode connecction + nvlink_core_get_intranode_conn(end0, &conn); + + if (conn != NULL) + { + // Verify that the other end of the connection is indeed end1 + conn->end0 == end0 ? + nvlink_assert(conn->end1 == end1) : + nvlink_assert(conn->end0 == end1); + return NVL_SUCCESS; + } + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "Adding new NVLink intranode connection between %s:%s and %s:%s\n", + end0->dev->deviceName, end0->linkName, + end1->dev->deviceName, end1->linkName)); + + // create a new intranode connection object + conn = (nvlink_intranode_conn*)nvlink_malloc(sizeof(nvlink_intranode_conn)); + if (conn == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "Adding NVLink intranode connection failed " + "due to memory allocation error\n")); + return NVL_NO_MEM; + } + + nvlink_memset(conn, 0, sizeof(nvlink_intranode_conn)); + + // Initialize the node for the connection + nvListInit(&conn->node); + + // Initialize the connection endpoints + conn->end0 = end0; + conn->end1 = end1; + + // Add the connection to the list of connections + nvListAppend(&conn->node, &nvlinkLibCtx.nv_intraconn_head.node); + + // + // Update the count of connected endpoints + // Loopback link, increment by 1 + // Non loopback link, increment by 2 + // + nvlinkLibCtx.connectedEndpoints = ( end0 == end1 ? + nvlinkLibCtx.connectedEndpoints + 1: + nvlinkLibCtx.connectedEndpoints + 2 ); + + return NVL_SUCCESS; +} + +/** + * Add a new internode connection to the list of internode connections + * + * Note: As of now, no stats/count for internode connections. + * + * @param[in] localLink NVLink Link pointer for one end + * @param[in] remoteEndPoint Remote endpoint + * + * return NVL_SUCCESS if the conn was added succesfully + */ +NvlStatus +nvlink_core_add_internode_conn +( + nvlink_link *localLink, + nvlink_remote_endpoint_info *remoteEndPoint +) +{ + nvlink_internode_conn *conn = NULL; + + // Don't do anything if we have an internode connecction for local link + nvlink_core_get_internode_conn(localLink, &conn); + if (conn != NULL) + { + return NVL_SUCCESS; + } + + // create a new connection + conn = (nvlink_internode_conn *)nvlink_malloc(sizeof(nvlink_internode_conn)); + if (conn == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "Adding nvlink internode connection failed" + " due to memory allocation error\n")); + return NVL_NO_MEM; + } + + nvlink_memset(conn, 0, sizeof(nvlink_internode_conn)); + + // initialize the node for the connection list + nvListInit(&conn->node); + + // copy/assign the connection endpoints information + conn->local_end = localLink; + nvlink_memcpy(&conn->remote_end, + remoteEndPoint, + sizeof(nvlink_remote_endpoint_info)); + + // add the connection to the list of internode connections + nvListAppend(&conn->node, &nvlinkLibCtx.nv_interconn_head.node); + + return NVL_SUCCESS; +} + +/** + * Remove the connection from the list of intranode connections + * + * @param[in] conn NVLink connection pointer + */ +void +nvlink_core_remove_intranode_conn +( + nvlink_intranode_conn *conn +) +{ + // Remove the connection from the list of connections + nvListDel(&conn->node); + + // + // Update the count of connected endpoints + // Loopback link, decrement by 1 + // Non loopback link, decrement by 2 + // + nvlinkLibCtx.connectedEndpoints = ( conn->end0 == conn->end1 ? + nvlinkLibCtx.connectedEndpoints - 1: + nvlinkLibCtx.connectedEndpoints - 2 ); + + // + // Update the count of notConnected endpoints + // Loopback link, do nothing + // Non-loopback link, increment by 1 + // + nvlinkLibCtx.notConnectedEndpoints = ( conn->end0 != conn->end1 ? + nvlinkLibCtx.notConnectedEndpoints + 1: + nvlinkLibCtx.notConnectedEndpoints ); + + nvlink_free((void *)conn); +} + +/** + * Remove the connection from the list of internode connections + * + * @param[in] localLink NVLink link pointer + */ +void +nvlink_core_remove_internode_conn +( + nvlink_link *localLink +) +{ + nvlink_internode_conn *conn = NULL; + + nvlink_core_get_internode_conn(localLink, &conn); + + if (conn != NULL) + { + nvListDel(&conn->node); + nvlink_free((void *)conn); + } +} + +/** + * Check if the given intranode connection is in the specified mode + * + * @param[in] conn NVLink Connection pointer + * @param[in] linkMode Link mode + * + * return NVL_SUCCESS if the conn is in the given state + */ +NvlStatus +nvlink_core_check_intranode_conn_state +( + nvlink_intranode_conn *conn, + NvU64 linkMode +) +{ + switch (linkMode) + { + case NVLINK_LINKSTATE_OFF: + { + if ((nvlink_core_check_link_state(conn->end0, NVLINK_LINKSTATE_OFF)) && + (nvlink_core_check_link_state(conn->end1, NVLINK_LINKSTATE_OFF))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link already in OFF state. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_SUCCESS; + } + + // Check if only one end of connection is OFF + if ((nvlink_core_check_link_state(conn->end0, NVLINK_LINKSTATE_OFF)) || + (nvlink_core_check_link_state(conn->end1, NVLINK_LINKSTATE_OFF))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link is in bad state. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_ERR_INVALID_STATE; + } + + return NVL_ERR_GENERIC; + } + + case NVLINK_LINKSTATE_RESET: + { + if ((nvlink_core_check_link_state(conn->end0, NVLINK_LINKSTATE_RESET)) && + (nvlink_core_check_link_state(conn->end1, NVLINK_LINKSTATE_RESET))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link already in RESET state. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_SUCCESS; + } + + // Check if only one end of connection is RESET + if ((nvlink_core_check_link_state(conn->end0, NVLINK_LINKSTATE_RESET)) || + (nvlink_core_check_link_state(conn->end1, NVLINK_LINKSTATE_RESET))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link is in bad state. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_ERR_INVALID_STATE; + } + + return NVL_ERR_GENERIC; + } + + case NVLINK_LINKSTATE_SAFE: + { + // Check if both ends and their sublinks are already in SAFE mode + if ((nvlink_core_check_link_state(conn->end0, NVLINK_LINKSTATE_SAFE)) && + (nvlink_core_check_link_state(conn->end1, NVLINK_LINKSTATE_SAFE))) + { + if ((nvlink_core_check_tx_sublink_state(conn->end0, + NVLINK_SUBLINK_STATE_TX_OFF)) && + (nvlink_core_check_tx_sublink_state(conn->end1, + NVLINK_SUBLINK_STATE_TX_OFF)) && + (nvlink_core_check_rx_sublink_state(conn->end0, + NVLINK_SUBLINK_STATE_RX_OFF)) && + (nvlink_core_check_rx_sublink_state(conn->end1, + NVLINK_SUBLINK_STATE_RX_OFF))) + { + // + // If links are in safe, check if sublinks are in off + // if so, we had performed pseudo-clean shutdown + // + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link is not in SAFE mode. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_ERR_GENERIC; + } + else if (!((nvlink_core_check_tx_sublink_state(conn->end0, + NVLINK_SUBLINK_STATE_TX_SAFE)) && + (nvlink_core_check_tx_sublink_state(conn->end1, + NVLINK_SUBLINK_STATE_TX_SAFE)) && + (nvlink_core_check_rx_sublink_state(conn->end0, + NVLINK_SUBLINK_STATE_RX_SAFE)) && + (nvlink_core_check_rx_sublink_state(conn->end1, + NVLINK_SUBLINK_STATE_RX_SAFE)))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Sublinks are in bad state. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_ERR_INVALID_STATE; + } + + return NVL_SUCCESS; + } + + // Check if only one end of connection is in SAFE mode + if ((nvlink_core_check_link_state(conn->end0, NVLINK_LINKSTATE_SAFE)) || + (nvlink_core_check_link_state(conn->end1, NVLINK_LINKSTATE_SAFE))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link is in bad state. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_ERR_INVALID_STATE; + } + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link is not in SAFE mode. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_ERR_GENERIC; + } + + case NVLINK_LINKSTATE_HS: + { + // Check if both ends and their sublinks are already in HS mode + if ((nvlink_core_check_link_state(conn->end0, NVLINK_LINKSTATE_HS)) && + (nvlink_core_check_link_state(conn->end1, NVLINK_LINKSTATE_HS))) + { + if (!((nvlink_core_check_tx_sublink_state(conn->end0, + NVLINK_SUBLINK_STATE_TX_HS)) && + (nvlink_core_check_tx_sublink_state(conn->end1, + NVLINK_SUBLINK_STATE_TX_HS)) && + (nvlink_core_check_rx_sublink_state(conn->end0, + NVLINK_SUBLINK_STATE_RX_HS)) && + (nvlink_core_check_rx_sublink_state(conn->end1, + NVLINK_SUBLINK_STATE_RX_HS)))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Sublinks are in bad state. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_ERR_INVALID_STATE; + } + + return NVL_SUCCESS; + } + + if ((nvlink_core_check_link_state(conn->end0, NVLINK_LINKSTATE_HS)) || + (nvlink_core_check_link_state(conn->end1, NVLINK_LINKSTATE_HS))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link is in bad state. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_ERR_INVALID_STATE; + } + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link is not in HIGH SPEED mode. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_ERR_GENERIC; + } + + case NVLINK_LINKSTATE_SLEEP: + { + // Check if both ends of connection are already in SLEEP mode + if ((nvlink_core_check_link_state(conn->end0, NVLINK_LINKSTATE_SLEEP)) && + (nvlink_core_check_link_state(conn->end1, NVLINK_LINKSTATE_SLEEP))) + { + return NVL_SUCCESS; + } + + // Check if only one end of connection is in SLEEP mode + if ((nvlink_core_check_link_state(conn->end0, NVLINK_LINKSTATE_SLEEP)) || + (nvlink_core_check_link_state(conn->end1, NVLINK_LINKSTATE_SLEEP))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link is in bad state. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + return NVL_ERR_INVALID_STATE; + } + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link is not in SLEEP mode. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conn); + + return NVL_ERR_GENERIC; + } + } + + return NVL_SUCCESS; +} + +/** + * Copy the intranode connection's remote endpoint information into + * the nvlink_conn_info structure passed in + * + * @param[in] remote_end NVLink Link pointer + * @param[in] conn_info Details of remote endpoint + */ +void +nvlink_core_copy_intranode_conn_info +( + nvlink_link *remote_end, + nvlink_conn_info *conn_info +) +{ + // copy the remote device pci information + conn_info->domain = remote_end->dev->pciInfo.domain; + conn_info->bus = remote_end->dev->pciInfo.bus; + conn_info->device = remote_end->dev->pciInfo.device; + conn_info->function = remote_end->dev->pciInfo.function; + conn_info->pciDeviceId = remote_end->dev->pciInfo.pciDeviceId; + conn_info->chipSid = remote_end->localSid; + + // copy the device type + conn_info->deviceType = remote_end->dev->type; + + // copy the remote device uuid + if (remote_end->dev->uuid != NULL) + { + nvlink_memcpy(conn_info->devUuid, remote_end->dev->uuid, NV_UUID_LEN); + } + + // copy the remote link number + conn_info->linkNumber = remote_end->linkNumber; +} + +/** + * Copy the internode connection's remote endpoint information into + * the nvlink_conn_info structure passed in + * + * @param[in] remote_end NVLink Link pointer + * @param[in] conn_info Details of remote endpoint + */ +void +nvlink_core_copy_internode_conn_info +( + nvlink_remote_endpoint_info *remote_end, + nvlink_conn_info *conn_info +) +{ + // copy the remote device pci information + conn_info->domain = remote_end->pciInfo.domain; + conn_info->bus = remote_end->pciInfo.bus; + conn_info->device = remote_end->pciInfo.device; + conn_info->function = remote_end->pciInfo.function; + conn_info->pciDeviceId = 0; + + // copy the device type + conn_info->deviceType = remote_end->devType; + + // copy the remote device uuid + nvlink_memcpy(conn_info->devUuid, remote_end->devUuid, NV_UUID_LEN); + + // copy the remote link number + conn_info->linkNumber = remote_end->linkIndex; +} diff --git a/src/common/nvlink/kernel/nvlink/core/nvlink_discovery.c b/src/common/nvlink/kernel/nvlink/core/nvlink_discovery.c new file mode 100644 index 000000000..d39264a78 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/core/nvlink_discovery.c @@ -0,0 +1,383 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" + +static NvBool _nvlink_core_all_links_initialized(void); +static void _nvlink_core_discover_topology(void); + +/** + * Get the remote end of the link + * + * For a given end of a link, returns the other end its connected to. + * + * Note: This function shouldn't be called when external fabric management is + * enabled in the endpoint drivers. Unfortunately, there is no graceful + * way to know that in the NVLink driver beforehand (during module load). + * + * @param[in] end NVLink Link pointer + * @param[out] remote_end Remote endpoint of the connection + * @param[in] flags Flags + */ +void +nvlink_core_discover_and_get_remote_end +( + nvlink_link *end, + nvlink_link **remote_end, + NvU32 flags +) +{ + nvlink_intranode_conn *conn = NULL; + nvlink_device *dev = NULL; + nvlink_link *link = NULL; + NvU32 linkCount = 0; + nvlink_link **pLinks = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (pLinks == NULL) + { + return; + } + + if (nvlinkLibCtx.bNewEndpoints) + { + if (!_nvlink_core_all_links_initialized()) + { + // Initialize the links to SWCFG mode + FOR_EACH_DEVICE_REGISTERED(dev, nvlinkLibCtx.nv_devicelist_head, node) + { + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (linkCount >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: linkCount >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + nvlink_free((void *)pLinks); + return; + } + + pLinks[linkCount++] = link; + } + } + { + nvlink_core_init_links_from_off_to_swcfg(pLinks, linkCount, flags); + } + } + + // Re-discover the nvlink topology + _nvlink_core_discover_topology(); + } + + // Get the connection for the endpoint + nvlink_core_get_intranode_conn(end, &conn); + + if (conn != NULL) + { + *remote_end = (conn->end0 == end ? conn->end1 : conn->end0); + } + + if (pLinks != NULL) + { + nvlink_free((void *) pLinks); + } +} + +/** + * Discovery process to determine topology + * + * Involves sending and reading back AN0 packets/SID values + */ +static void +_nvlink_core_discover_topology(void) +{ + nvlink_device *dev0 = NULL; + nvlink_device *dev1 = NULL; + nvlink_link *end0 = NULL; + nvlink_link *end1 = NULL; + nvlink_intranode_conn *conn = NULL; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + NvBool isTokenFound = NV_FALSE; + NvU64 token = 0; + + nvlinkLibCtx.notConnectedEndpoints = 0; + + FOR_EACH_DEVICE_REGISTERED(dev0, nvlinkLibCtx.nv_devicelist_head, node) + { + FOR_EACH_LINK_REGISTERED(end0, dev0, node) + { + // + // If receiver detect failed for the link or if clocks could not be set + // up for the link, then move to next link + // + if (!end0->bRxDetected || end0->bTxCommonModeFail) + continue; + + conn = NULL; + nvlink_core_get_intranode_conn(end0, &conn); + if (conn != NULL) + { + continue; + } + + if (end0->packet_injection_retries > NVLINK_MAX_NUM_PACKET_INJECTION_RETRIES) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Packet injection retries reached for %s:%s.\n", + __FUNCTION__, end0->dev->deviceName, end0->linkName)); + nvlinkLibCtx.notConnectedEndpoints++; + continue; + } + + end0->link_handlers->get_dl_link_mode(end0, &linkMode); + + // Packet injection can only happen on links that are in SAFE or ACTIVE + if (!((linkMode == NVLINK_LINKSTATE_SAFE) || (linkMode == NVLINK_LINKSTATE_HS))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Packet injection only works for links in SAFE or HS %s:%s.\n", + __FUNCTION__, end0->dev->deviceName, end0->linkName)); + nvlinkLibCtx.notConnectedEndpoints++; + continue; + } + + // + // Send the AN0 packet + // For Nvlink3.0, token mechanism is handled by Minion. + // SW gets Sids values and so write_disocvery_token is Stubbed for Nvlink 3.0 + // We use the return value of write_discovery_token to shift between + // Nvlink2.0 and NvLink3.0 + // + if ((end0->version < NVLINK_DEVICE_VERSION_30) || + ((end0->localSid == 0) || (end0->remoteSid == 0))) + { + end0->link_handlers->write_discovery_token(end0, end0->token); + } + end0->packet_injection_retries++; + isTokenFound = NV_FALSE; + + FOR_EACH_DEVICE_REGISTERED(dev1, nvlinkLibCtx.nv_devicelist_head, node) + { + FOR_EACH_LINK_REGISTERED(end1, dev1, node) + { + // + // If receiver detect failed for the link or if clocks could not be + // set up for the link, then move to next link + // + if (!end1->bRxDetected || end1->bTxCommonModeFail) + continue; + + token = 0; + + if ((end0->version >= NVLINK_DEVICE_VERSION_30) && + (end0->localSid != 0) && (end0->remoteSid != 0)) + { + if ((end0->remoteSid == end1->localSid) && + (end0->remoteLinkId == end1->linkNumber)) + { + // Make sure the below token check passes. + token = end0->token; + } + } + else + { + // Read the RX sublink for the AN0 packet + end1->link_handlers->read_discovery_token(end1, (NvU64 *) &token); + } + + // If token matches, establish the connection + if (token == end0->token) + { + isTokenFound = NV_TRUE; + + // + // If R4 tokens were used for NVLink3.0+, then mark initnegotiate + // passed, since ALT training won't get kicked off without it. + // + if ((end0->version >= NVLINK_DEVICE_VERSION_30) && + ((end0->localSid == 0) || (end0->remoteSid == 0))) + { + end0->bInitnegotiateConfigGood = NV_TRUE; + end1->bInitnegotiateConfigGood = NV_TRUE; + } + + // Add to the connections list + nvlink_core_add_intranode_conn(end0, end1); + break; + } + } + + if (isTokenFound) break; + } + + if (nvlinkLibCtx.connectedEndpoints == + (nvlinkLibCtx.registeredEndpoints - nvlinkLibCtx.notConnectedEndpoints)) + { + break; + } + } + + if (nvlinkLibCtx.connectedEndpoints == + (nvlinkLibCtx.registeredEndpoints - nvlinkLibCtx.notConnectedEndpoints)) + { + break; + } + } +} + +/** + * Are all links trained or is there a need to re-attempt training ? + * + * Returns true if all links trained and no need to re-attempt training + * Returns false otherwise + */ +static NvBool +_nvlink_core_all_links_initialized(void) +{ + nvlink_device *dev = NULL; + nvlink_link *link = NULL; + NvU64 linkMode, txMode, rxMode; + NvU32 txSubMode, rxSubMode; + + if (nvlinkLibCtx.registeredEndpoints == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: No links registered with nvlink core! Sleeping.\n", + __FUNCTION__)); + return NV_TRUE; + } + + nvlinkLibCtx.endpointsInFail = 0; + nvlinkLibCtx.endpointsInSafe = 0; + nvlinkLibCtx.endpointsInActive = 0; + + // + // Get the current state of all endpoints. This determines + // if some of the endpoints are still not trained to SAFE + // + FOR_EACH_DEVICE_REGISTERED(dev, nvlinkLibCtx.nv_devicelist_head, node) + { + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (nvlinkLibCtx.bNewEndpoints) + { + link->safe_retries = 0; + link->packet_injection_retries = 0; + } + + if (link->state == NVLINK_LINKSTATE_FAIL) + { + if (nvlinkLibCtx.bNewEndpoints) + { + // + // New endpoints were detected. There may be a chance that + // endpoints that failed previously may transition to safe + // + link->state = NVLINK_LINKSTATE_OFF; + } + else + { + nvlinkLibCtx.endpointsInFail++; + } + continue; + } + + linkMode = NVLINK_LINKSTATE_OFF; + + if (link->link_handlers->get_dl_link_mode(link, &linkMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s", + __FUNCTION__, link->dev->deviceName, link->linkName)); + continue; + } + + if (linkMode == NVLINK_LINKSTATE_SAFE) + { + + // + // Link is only truly in SAFE mode if link state and sublink state + // is in SAFE/SWCFG. + // After pseudo-clean shutdown, sublinks are in OFF, so they + // need to be retrained to SAFE + // + if (link->link_handlers->get_tx_mode(link, &txMode, &txSubMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get tx sublink mode for %s:%s", + __FUNCTION__, link->dev->deviceName, link->linkName)); + } + if (link->link_handlers->get_rx_mode(link, &rxMode, &rxSubMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get rx sublink mode for %s:%s", + __FUNCTION__, link->dev->deviceName, link->linkName)); + } + + if ((txMode == NVLINK_SUBLINK_STATE_TX_OFF) || + (rxMode == NVLINK_SUBLINK_STATE_RX_OFF)) + { + continue; + } + + link->bRxDetected = NV_TRUE; + nvlinkLibCtx.endpointsInSafe++; + continue; + } + + if (linkMode == NVLINK_LINKSTATE_HS) + { + link->bRxDetected = NV_TRUE; + nvlinkLibCtx.endpointsInActive++; + continue; + } + } + } + + // New endpoints have been considered + nvlinkLibCtx.bNewEndpoints = NV_FALSE; + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Registered Links = %d, nvlinkLibCtx.endpointsInSafe = %d, " + " nvlinkLibCtx.endpointsInFail = %d, nvlinkLibCtx.endpointsInActive = %d\n", + __FUNCTION__, + nvlinkLibCtx.registeredEndpoints, nvlinkLibCtx.endpointsInSafe, + nvlinkLibCtx.endpointsInFail, nvlinkLibCtx.endpointsInActive)); + + // Determine if all links are currently trained + if ((nvlinkLibCtx.registeredEndpoints - nvlinkLibCtx.endpointsInFail - + nvlinkLibCtx.endpointsInSafe - nvlinkLibCtx.endpointsInActive) == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: All connected links are in trained\n", + __FUNCTION__)); + return NV_TRUE; + } + + return NV_FALSE; +} diff --git a/src/common/nvlink/kernel/nvlink/core/nvlink_initialize.c b/src/common/nvlink/kernel/nvlink/core/nvlink_initialize.c new file mode 100644 index 000000000..7d47bf59a --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/core/nvlink_initialize.c @@ -0,0 +1,1402 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" + +/** + * Initialize all the endpoints from OFF to SWCFG state + * + * @param[in] links Array of link endpoints to initialize + * @param[in] numLinks Number of links in the array + * @param[in] flags Flags to determine whether init is sync/async + */ +void +nvlink_core_init_links_from_off_to_swcfg +( + nvlink_link **pLinks, + NvU32 numLinks, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode; + NvU32 i; + + // Sanity check the links array + nvlink_assert(pLinks != NULL); + + // Return early if there are no links to initialize + if (numLinks == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No links to initialize\n", + __FUNCTION__)); + return; + } + + // Step 1: Perform INITPHASE1 on all endpoints + nvlink_core_initphase1(pLinks, numLinks, flags); + + // Get state on all links. This ensures NVLINK_LINKSTATE_INITPHASE1 completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < numLinks; i++) + { + status = pLinks[i]->link_handlers->get_dl_link_mode(pLinks[i], &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s is in bad state\n", + __FUNCTION__, pLinks[i]->dev->deviceName, pLinks[i]->linkName)); + } + } + } + + // Step 2.0: RECEIVER DETECT: Enable RX termination on all the endpoints + nvlink_core_rx_init_term(pLinks, numLinks, flags); + + // Get state on all links. This ensures NVLINK_SUBLINK_STATE_RX_INIT_TERM completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < numLinks; i++) + { + // If receiver detect has passed for the link, move to next link + if (pLinks[i]->bRxDetected) + continue; + + status = pLinks[i]->link_handlers->get_dl_link_mode(pLinks[i], &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s is in bad state\n", + __FUNCTION__, pLinks[i]->dev->deviceName, pLinks[i]->linkName)); + } + } + } + + // Step 2.1 RECEIVER DETECT :Perform receiver detect on all the endpoints + nvlink_core_set_rx_detect(pLinks, numLinks, flags); + + // Get state on all links. This ensures receiver detect command completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < numLinks; i++) + { + // In NVLink3.0 and 3.1, RXDET must be called serially - done above (Bug 2546220) + if (!((pLinks[i]->version == NVLINK_DEVICE_VERSION_30) || + (pLinks[i]->version == NVLINK_DEVICE_VERSION_31))) + { + // If receiver detect has passed for the link, move to next link + if (pLinks[i]->bRxDetected) + continue; + + status = pLinks[i]->link_handlers->get_dl_link_mode(pLinks[i], &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s is in bad state\n", + __FUNCTION__, pLinks[i]->dev->deviceName, pLinks[i]->linkName)); + } + } + } + } + + // Step 2.2 RECEIVER DETECT :Poll for output of receiver detect on all the endpoints + nvlink_core_get_rx_detect(pLinks, numLinks, flags); + + + /***************** Receiver Detect is completed at this point ****************/ + /***************** Proceed with the link initialization steps ****************/ + + + // Enable Common mode on all Tx's + nvlink_core_enable_common_mode(pLinks, numLinks, flags); + + // Get state on all links. This ensures NVLINK_SUBLINK_STATE_TX_COMMON_MODE completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < numLinks; i++) + { + // If receiver detect failed for the link, move to next link + if (!pLinks[i]->bRxDetected || pLinks[i]->bTxCommonModeFail) + continue; + + status = pLinks[i]->link_handlers->get_dl_link_mode(pLinks[i], &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s is in bad state\n", + __FUNCTION__, pLinks[i]->dev->deviceName, pLinks[i]->linkName)); + } + } + } + + // Put all Rx's in RXCAL + nvlink_core_calibrate_links(pLinks, numLinks, flags); + + // Disable Tx common mode + nvlink_core_disable_common_mode(pLinks, numLinks, flags); + + // Set Data Ready and Enable + nvlink_core_enable_data(pLinks, numLinks, flags); + + // Get state on all links. This ensures NVLINK_SUBLINK_STATE_TX_DATA_READY completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < numLinks; i++) + { + // If receiver detect failed for the link, move to next link + if (!pLinks[i]->bRxDetected || pLinks[i]->bTxCommonModeFail) + continue; + + status = pLinks[i]->link_handlers->get_dl_link_mode(pLinks[i], &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s is in bad state\n", + __FUNCTION__, pLinks[i]->dev->deviceName, pLinks[i]->linkName)); + } + } + } + + // Put the links in SAFE mode + for (i = 0; i < numLinks; i++) + { + // If receiver detect failed for the link, move to next link + if (!pLinks[i]->bRxDetected || pLinks[i]->bTxCommonModeFail) + continue; + + linkMode = 0; + if (pLinks[i]->link_handlers->get_dl_link_mode(pLinks[i], &linkMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s", + __FUNCTION__, pLinks[i]->dev->deviceName, pLinks[i]->linkName)); + } + + if ((linkMode != NVLINK_LINKSTATE_SAFE) && (linkMode != NVLINK_LINKSTATE_HS)) + { + // Check if the link has reached failed state + if (pLinks[i]->state == NVLINK_LINKSTATE_FAIL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s:%s marked as failed.\n", + pLinks[i]->dev->deviceName, pLinks[i]->linkName)); + continue; + } + + // + // Check if number of attempts to put the link into + // safe state has already exceeded the maximum number + // of retries. If yes, mark the link as failed + // + // On NVLink3.0, we don't support retraining in the driver. + // However MODS test 252 (on NVL3+ specifically) will train + // HS->OFF->HS many times. This check causes RM to stop + // training after NVLINK_MAX_NUM_SAFE_RETRIES times + // + if ((pLinks[i]->safe_retries > NVLINK_MAX_NUM_SAFE_RETRIES) && + (pLinks[i]->version < NVLINK_DEVICE_VERSION_30)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "Max safe mode retries reached for %s:%s. Marking it as failed.\n", + + pLinks[i]->dev->deviceName, pLinks[i]->linkName)); + pLinks[i]->state = NVLINK_LINKSTATE_FAIL; + continue; + } + + // Put the link in safe state and increment the retry count + pLinks[i]->link_handlers->set_dl_link_mode(pLinks[i], NVLINK_LINKSTATE_SAFE, flags); + pLinks[i]->safe_retries++; + } + } + + // Poll for links to enter SAFE mode + for (i = 0; i < numLinks; i++) + { + status = nvlink_core_wait_for_link_init(pLinks[i]); + if (status == NVL_SUCCESS) + { + pLinks[i]->powerStateTransitionStatus = nvlink_power_state_in_L0; + } + } + + // Send INITNEGOTIATE to all the links + nvlink_core_initnegotiate(pLinks, numLinks, flags); +} + +/** + * Kick-off INITPHASE1 on the given array of links + * + * @param[in] links Array of nvlink_link pointers + * @param[in] numLinks Number of links in the array + * @param[in] flags Flags - Async/Sync + * + * return NvlStatus + */ +NvlStatus +nvlink_core_initphase1 +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvU32 i; + + // Sanity check the links array + nvlink_assert(links != NULL); + + // Return early if link array is empty + if (numLinks == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link array is empty\n", + __FUNCTION__)); + return NVL_SUCCESS; + } + + for (i = 0; i < numLinks; i++) + { + NvlStatus status = NVL_SUCCESS; + NvU64 tlLinkMode = 0; + NvU64 dlLinkMode = 0; + NvU64 txMode = 0; + NvU32 txSubMode = 0; + NvU64 rxMode = 0; + NvU32 rxSubMode = 0; + + // INITPHASE1 is supported only for NVLINK version >= 3.0 + if (links[i]->version < NVLINK_DEVICE_VERSION_30) + continue; + + { + if (links[i]->link_handlers->get_tl_link_mode(links[i], &tlLinkMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get TL link mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + + if (links[i]->link_handlers->get_dl_link_mode(links[i], &dlLinkMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + + if (dlLinkMode != NVLINK_LINKSTATE_RESET) + { + if (links[i]->link_handlers->get_tx_mode(links[i], &txMode, &txSubMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get tx sublink mode for %s:%s", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + + if (links[i]->link_handlers->get_rx_mode(links[i], &rxMode, &rxSubMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get rx sublink mode for %s:%s", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + } + } + + // + // After pseudo-clean shutdown, sublink states are in OFF while + // link states stay in SWCFG. So, use sublink state here to determine + // if we should perform INITPHASE1 to cover both cold boot and + // pseudo-clean shutdown cases + // We also check the tl link state to see if the link is L2. Exiting + // from L2 also requires INITPHASE1 to be re-run + // + if ((tlLinkMode == NVLINK_LINKSTATE_SLEEP) || + (dlLinkMode == NVLINK_LINKSTATE_RESET) || + ((txMode == NVLINK_SUBLINK_STATE_TX_OFF) && + (rxMode == NVLINK_SUBLINK_STATE_RX_OFF))) + { + // Check if the link has reached failed state + if (links[i]->state == NVLINK_LINKSTATE_FAIL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s:%s marked as failed.\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + status = links[i]->link_handlers->set_dl_link_mode(links[i], + NVLINK_LINKSTATE_INITPHASE1, + flags); + + // Although it fails we need to continue with the next link + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Initphase failed on Device:Link %s:%s", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + } + } + + // + // We could have links which are faulty and cannot be initialized. But proceeding + // the initialization sequence allows us to use other non-faulty links. Therefore + // return success always. + // + return NVL_SUCCESS; +} + +/** + * Kick-off INITRXTERM on the given array of links + * + * @param[in] links Array of nvlink_link pointers + * @param[in] numLinks Number of links in the array + * @param[in] flags Flags - Async/Sync + * + * return NvlStatus + */ +NvlStatus +nvlink_core_rx_init_term +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvU32 i; + + // Sanity check the links array + nvlink_assert(links != NULL); + + // Return early if link array is empty + if (numLinks == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link array is empty\n", + __FUNCTION__)); + return NVL_SUCCESS; + } + + for (i = 0; i < numLinks; i++) + { + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + if (links[i]->version < NVLINK_DEVICE_VERSION_22) + continue; + + // If receiver detect has passed for the link, move to next link + if (links[i]->bRxDetected) + continue; + + if (links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + + switch (linkMode) + { + case NVLINK_LINKSTATE_SAFE: + case NVLINK_LINKSTATE_HS: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s:%s is already trained to SAFE or HIGH SPEED " + " RX Termination should have been enabled on the link\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + case NVLINK_LINKSTATE_FAULT: + case NVLINK_LINKSTATE_RECOVERY: + case NVLINK_LINKSTATE_FAIL: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link in bad state. Cannot enable RX termination " + "from current state for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + default: + { + switch (links[i]->rx_sublink_state) + { + case NVLINK_SUBLINK_STATE_RX_RXCAL: + continue; + + default: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Enabling RX Termination on %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + + if ((links[i]->link_handlers->set_rx_mode(links[i], + NVLINK_SUBLINK_STATE_RX_INIT_TERM, + flags)) != 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to enable RX termination for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + break; + } + } + break; + } + } + } + + // + // We could have links which are faulty and cannot be initialized. But proceeding + // the initialization sequence allows us to use other non-faulty links. Therefore + // return success always. + // + return NVL_SUCCESS; +} + +/** + * Kick-off receiver detect on the given array of links + * + * @param[in] links Array of nvlink_link pointers + * @param[in] numLinks Number of links in the array + * @param[in] flags Flags - Async/Sync + * + * return NvlStatus + */ +NvlStatus +nvlink_core_set_rx_detect +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvU32 i; + + // Sanity check the links array + nvlink_assert(links != NULL); + + // Return early if link array is empty + if (numLinks == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link array is empty\n", + __FUNCTION__)); + return NVL_SUCCESS; + } + + for (i = 0; i < numLinks; i++) + { + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + if (links[i]->version < NVLINK_DEVICE_VERSION_22) + continue; + + // If receiver detect has passed for the link, move to next link + if (links[i]->bRxDetected) + continue; + + if (links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + + switch (linkMode) + { + case NVLINK_LINKSTATE_SAFE: + case NVLINK_LINKSTATE_HS: + { + links[i]->bRxDetected = NV_TRUE; + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s:%s is already trained to SAFE or HIGH SPEED\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + case NVLINK_LINKSTATE_FAULT: + case NVLINK_LINKSTATE_RECOVERY: + case NVLINK_LINKSTATE_FAIL: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link in bad state. Cannot perform RXDET from current state for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + default: + { + switch (links[i]->rx_sublink_state) + { + case NVLINK_SUBLINK_STATE_RX_RXCAL: + continue; + + default: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Performing RXDET on %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + + if ((links[i]->link_handlers->set_rx_detect(links[i], flags)) != 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to perform receiver detect for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + + // In NVLink3.0 and 3.1, RXDET must be called serially (Bug 2546220) + if ((links[i]->version == NVLINK_DEVICE_VERSION_30) || + (links[i]->version == NVLINK_DEVICE_VERSION_31)) + { + // Get state on all links. This ensures receiver detect command completes + status = links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s is in bad state\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Checking RXDET status on %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + + if ((links[i]->link_handlers->get_rx_detect(links[i])) != 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Receiver detect failed for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + else + { + links[i]->bRxDetected = NV_TRUE; + } + } + break; + } + } + break; + } + } + } + + // + // We could have links which are faulty and cannot be initialized. But proceeding + // the initialization sequence allows us to use other non-faulty links. Therefore + // return success always. + // + return NVL_SUCCESS; +} + +/** + * Get receiver detect status on the given array of links + * + * @param[in] links Array of nvlink_link pointers + * @param[in] numLinks Number of links in the array + * @param[in] flags Flags - Async/Sync + * + * return NvlStatus + */ +NvlStatus +nvlink_core_get_rx_detect +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvU32 i; + + // Sanity check the links array + nvlink_assert(links != NULL); + + // Return early if link array is empty + if (numLinks == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link array is empty\n", + __FUNCTION__)); + return NVL_SUCCESS; + } + + for (i = 0; i < numLinks; i++) + { + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + // If receiver detect has passed for the link, move to next link + if (links[i]->bRxDetected) + continue; + + // + // In NVLink3.0 and 3.1, RXDET must be called serially (Bug 2546220). + // So this would have been already addressed + // + if ((links[i]->version == NVLINK_DEVICE_VERSION_30) || + (links[i]->version == NVLINK_DEVICE_VERSION_31)) + continue; + + if (links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + + switch (linkMode) + { + case NVLINK_LINKSTATE_SAFE: + case NVLINK_LINKSTATE_HS: + { + links[i]->bRxDetected = NV_TRUE; + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s:%s is already trained to SAFE or HIGH SPEED. " + "RXDET should have passed on the link. Set bRxDetected to TRUE\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + case NVLINK_LINKSTATE_FAULT: + case NVLINK_LINKSTATE_RECOVERY: + case NVLINK_LINKSTATE_FAIL: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link in bad state. Cannot poll RXDET from current state for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + default: + { + switch (links[i]->rx_sublink_state) + { + case NVLINK_SUBLINK_STATE_RX_RXCAL: + continue; + + default: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Checking RXDET status on %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + + if ((links[i]->link_handlers->get_rx_detect(links[i])) != 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Receiver detect failed for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + else + { + links[i]->bRxDetected = NV_TRUE; + } + break; + } + } + break; + } + } + } + + // + // We could have links which are faulty and cannot be initialized. But proceeding + // the initialization sequence allows us to use other non-faulty links. Therefore + // return success always. + // + return NVL_SUCCESS; +} + +/** + * Get Enable TX common mode on the given array of links + * + * @param[in] links Array of nvlink_link pointers + * @param[in] numLinks Number of links in the array + * @param[in] flags Flags - Async/Sync + * + * return NvlStatus + */ +NvlStatus +nvlink_core_enable_common_mode +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvU32 i; + + // Sanity check the links array + nvlink_assert(links != NULL); + + // Return early if link array is empty + if (numLinks == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link array is empty\n", + __FUNCTION__)); + return NVL_SUCCESS; + } + + for (i = 0; i < numLinks; i++) + { + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + if (!links[i]->bRxDetected) + { + // link did not pass RXDET, don't do anything + continue; + } + + if (links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + switch (linkMode) + { + case NVLINK_LINKSTATE_SAFE: + case NVLINK_LINKSTATE_HS: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s:%s is already trained to SAFE or HIGH SPEED\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + case NVLINK_LINKSTATE_FAULT: + case NVLINK_LINKSTATE_RECOVERY: + case NVLINK_LINKSTATE_FAIL: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Cannot put Tx in common mode from current state for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + default: + { + switch (links[i]->tx_sublink_state) + { + case NVLINK_SUBLINK_STATE_TX_COMMON_MODE: + case NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE: + case NVLINK_SUBLINK_STATE_TX_DATA_READY: + continue; + + default: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Setting common mode on %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + + if ((links[i]->link_handlers->set_tx_mode(links[i], + NVLINK_SUBLINK_STATE_TX_COMMON_MODE, + flags)) != 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to put Tx in common mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + + links[i]->bTxCommonModeFail = NV_TRUE; + } + break; + } + } + break; + } + } + } + + // + // We could have links which are faulty and cannot be initialized. But proceeding + // the initialization sequence allows us to use other non-faulty links. Therefore + // return success always. + // + return NVL_SUCCESS; +} + +/** + * Perform RX calibration on the given array of links + * + * @param[in] links Array of nvlink_link pointers + * @param[in] numLinks Number of links in the array + * @param[in] flags Flags - Async/Sync + * + * return NvlStatus + */ +NvlStatus +nvlink_core_calibrate_links +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvU32 i; + + // Sanity check the links array + nvlink_assert(links != NULL); + + // Return early if link array is empty + if (numLinks == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link array is empty\n", + __FUNCTION__)); + return NVL_SUCCESS; + } + + for (i = 0; i < numLinks; i++) + { + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + // If receiver detect failed for the link, move to next link + if (!links[i]->bRxDetected || links[i]->bTxCommonModeFail) + continue; + + status = links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + switch (linkMode) + { + case NVLINK_LINKSTATE_SAFE: + case NVLINK_LINKSTATE_HS: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s:%s is already trained to SAFE or HIGH SPEED\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + case NVLINK_LINKSTATE_FAULT: + case NVLINK_LINKSTATE_RECOVERY: + case NVLINK_LINKSTATE_FAIL: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Cannot put Rx in RXCAL mode from current state for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + default: + { + if (links[i]->rx_sublink_state == NVLINK_SUBLINK_STATE_RX_RXCAL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Sublink already in RXCAL on %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Setting RXCAL on %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + + if ((links[i]->link_handlers->set_rx_mode(links[i], + NVLINK_SUBLINK_STATE_RX_RXCAL, + flags)) != 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to put Rx in RXCAL mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + break; + } + } + } + + // + // We could have links which are faulty and cannot be initialized. But proceeding + // the initialization sequence allows us to use other non-faulty links. Therefore + // return success always. + // + return NVL_SUCCESS; +} + +/** + * Get Disable TX common mode on the given array of links + * + * @param[in] links Array of nvlink_link pointers + * @param[in] numLinks Number of links in the array + * @param[in] flags Flags - Async/Sync + * + * return NvlStatus + */ +NvlStatus +nvlink_core_disable_common_mode +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvU32 i; + + // Sanity check the links array + nvlink_assert(links != NULL); + + // Return early if link array is empty + if (numLinks == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link array is empty\n", + __FUNCTION__)); + return NVL_SUCCESS; + } + + for (i = 0; i < numLinks; i++) + { + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + if (!links[i]->bRxDetected || links[i]->bTxCommonModeFail) + { + // link did not pass RXDET or failed in common mode, don't do anything + continue; + } + + status = links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + switch (linkMode) + { + case NVLINK_LINKSTATE_SAFE: + case NVLINK_LINKSTATE_HS: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s:%s is already trained to SAFE or HIGH SPEED\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + case NVLINK_LINKSTATE_FAULT: + case NVLINK_LINKSTATE_RECOVERY: + case NVLINK_LINKSTATE_FAIL: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Cannot disable Tx common mode from current state for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + default: + { + switch (links[i]->tx_sublink_state) + { + case NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE: + case NVLINK_SUBLINK_STATE_TX_DATA_READY: + continue; + + default: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Disabling common mode on %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + + if ((links[i]->link_handlers->set_tx_mode(links[i], + NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE, + flags)) != 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to disable Tx common mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + break; + } + } + break; + } + } + } + + // + // We could have links which are faulty and cannot be initialized. But proceeding + // the initialization sequence allows us to use other non-faulty links. Therefore + // return success always. + // + return NVL_SUCCESS; +} + +/** + * Enable data on the given array of links + * + * @param[in] links Array of nvlink_link pointers + * @param[in] numLinks Number of links in the array + * @param[in] flags Flags - Async/Sync + * + * return NvlStatus + */ +NvlStatus +nvlink_core_enable_data +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvU32 i; + + // Sanity check the links array + nvlink_assert(links != NULL); + + // Return early if link array is empty + if (numLinks == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link array is empty\n", + __FUNCTION__)); + return NVL_SUCCESS; + } + + for (i = 0; i < numLinks; i++) + { + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + // If receiver detect failed for the link, move to next link + if (!links[i]->bRxDetected || links[i]->bTxCommonModeFail) + continue; + + status = links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + switch (linkMode) + { + case NVLINK_LINKSTATE_SAFE: + case NVLINK_LINKSTATE_HS: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s:%s is already trained to SAFE or HIGH SPEED\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + case NVLINK_LINKSTATE_FAULT: + case NVLINK_LINKSTATE_RECOVERY: + case NVLINK_LINKSTATE_FAIL: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Cannot put Data Ready and Enable from current state for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + default: + { + switch (links[i]->tx_sublink_state) + { + case NVLINK_SUBLINK_STATE_TX_DATA_READY: + continue; + + default: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Setting Data Ready on %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + + if ((links[i]->link_handlers->set_tx_mode(links[i], + NVLINK_SUBLINK_STATE_TX_DATA_READY, + flags)) != 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set Data Ready and Data Enable %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + break; + } + } + break; + } + } + } + + // + // We could have links which are faulty and cannot be initialized. But proceeding + // the initialization sequence allows us to use other non-faulty links. Therefore + // return success always. + // + return NVL_SUCCESS; +} + +/** + * Send INITNEGOTIATE command on the given array of links + * + * @param[in] links Array of nvlink_link pointers + * @param[in] numLinks Number of links in the array + * @param[in] flags Flags - Async/Sync + * + * return NvlStatus + */ +NvlStatus +nvlink_core_initnegotiate +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvU32 i; + + // Sanity check the links array + nvlink_assert(links != NULL); + + // Return early if link array is empty + if (numLinks == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link array is empty\n", + __FUNCTION__)); + return NVL_SUCCESS; + } + + for (i = 0; i < numLinks; i++) + { + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + // If receiver detect failed for the link, move to next link + if (!links[i]->bRxDetected || links[i]->bTxCommonModeFail || + links[i]->bSafeTransitionFail || links[i]->bInitphase5Fails) + { + continue; + } + + if (links[i]->version < NVLINK_DEVICE_VERSION_30) + continue; + + // Packet injection can only happen on links that are in SAFE or ACTIVE + status = links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + if (linkMode == NVLINK_LINKSTATE_HS) + { + continue; + } + else if (linkMode == NVLINK_LINKSTATE_SAFE) + { + // Check if the link has reached failed state + if (links[i]->state == NVLINK_LINKSTATE_FAIL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s:%s marked as failed.\n", + links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + if ((links[i]->link_handlers->set_dl_link_mode(links[i], + NVLINK_LINKSTATE_INITNEGOTIATE, + flags)) != 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: INITNEGOTIATE Failed on device:link %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + } + } + + // Bug 2398907 mentioned that a link pair can take upto 125us for DL stat to have CONFIG_GOOD. + nvlink_sleep(1); + + for (i = 0; i < numLinks; i++) + { + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + // If receiver detect failed for the link, move to next link + if (!links[i]->bRxDetected || links[i]->bTxCommonModeFail || + links[i]->bSafeTransitionFail) + { + continue; + } + + if (links[i]->version < NVLINK_DEVICE_VERSION_30) + continue; + + status = links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + if ((linkMode == NVLINK_LINKSTATE_SAFE) || (linkMode == NVLINK_LINKSTATE_HS)) + { + // Check if the link has reached failed state + if (links[i]->state == NVLINK_LINKSTATE_FAIL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s:%s marked as failed.\n", + links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + if (!(links[i]->bInitnegotiateConfigGood)) + { + if (!((links[i]->link_handlers->set_dl_link_mode(links[i], + NVLINK_LINKSTATE_POST_INITNEGOTIATE, + flags)) == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: DL stat CONFIG GOOD failed on %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + } + } + } + + return NVL_SUCCESS; +} + +/** + * Poll on SAFE/SWCFG on the given link + * + * @param[in] link nvlink_link pointer + * + * return NvlStatus + */ +NvlStatus +nvlink_core_wait_for_link_init +( + nvlink_link *link +) +{ + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + // + // Check for SW fail flags to exit early + // + // Note: We don't check for !bRxDetected here since driver unload/reload + // clears our SW state leading incorrectly skipping links (bug 3164375). + // For RXDET status, the linkstate checks below are sufficient + // + if (link->bTxCommonModeFail || link->bInitphase5Fails) + { + return NVL_ERR_INVALID_STATE; + } + + status = link->link_handlers->get_dl_link_mode(link, &linkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + + return status; + } + + // skip polling if link become faulty + if ((linkMode == NVLINK_LINKSTATE_FAULT) || + (linkMode == NVLINK_LINKSTATE_RECOVERY) || + (linkMode == NVLINK_LINKSTATE_FAIL)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to put link in SAFE %s:%s\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + + // link is in bad state. don't do anything + return NVL_ERR_INVALID_STATE; + } + + // skip polling if link already transitioned or has not begun training + if ((linkMode == NVLINK_LINKSTATE_SAFE) || + (linkMode == NVLINK_LINKSTATE_HS)) + { + return NVL_SUCCESS; + } + else if ((linkMode == NVLINK_LINKSTATE_OFF) || + (linkMode == NVLINK_LINKSTATE_RESET)) + { + // Failing status is expected for non-initialized links + return NVL_ERR_GENERIC; + } + + // reset state since we're trying again + link->bSafeTransitionFail = NV_FALSE; + + // poll for link state + status = nvlink_core_poll_link_state(link, + NVLINK_LINKSTATE_SAFE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to put link in SAFE %s:%s\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + + link->bSafeTransitionFail = NV_TRUE; + return status; + } + + // poll sublink state as well. + status = nvlink_core_poll_sublink_state(link, + NVLINK_SUBLINK_STATE_TX_SAFE, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + link, + NVLINK_SUBLINK_STATE_RX_SAFE, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to put sublink in SAFE %s:%s\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + + link->bSafeTransitionFail = NV_TRUE; + return status; + } + + // link is in SAFE state, initialization is success. + return NVL_SUCCESS; +} + diff --git a/src/common/nvlink/kernel/nvlink/core/nvlink_ioctl.c b/src/common/nvlink/kernel/nvlink/core/nvlink_ioctl.c new file mode 100644 index 000000000..a0c0880b7 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/core/nvlink_ioctl.c @@ -0,0 +1,692 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvVer.h" +#include "nvlink_os.h" +#include "nvlink_lib_ctrl.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" + +static nvlink_device_type +_nvlink_core_map_device_type +( + NvU64 type +) +{ + nvlink_device_type devType; + + switch (type) + { + case NVLINK_DEVICE_TYPE_IBMNPU: + devType = nvlink_device_type_ibmnpu; + break; + case NVLINK_DEVICE_TYPE_GPU: + devType = nvlink_device_type_gpu; + break; + case NVLINK_DEVICE_TYPE_NVSWITCH: + devType = nvlink_device_type_nvswitch; + break; + default: + devType = nvlink_device_type_unknown; + break; + } + + return devType; +} + +static nvlink_link_mode +_nvlink_core_map_link_state +( + NvU64 dlState, + NvU64 tlState +) +{ + nvlink_link_mode mode; + + // + // If TL has entered contain, return contain. + // Otherwise, return DL state + // + if (tlState == NVLINK_LINKSTATE_CONTAIN) + { + mode = nvlink_link_mode_contain; + return mode; + } + + switch (dlState) + { + case NVLINK_LINKSTATE_OFF: + mode = nvlink_link_mode_off; + break; + case NVLINK_LINKSTATE_HS: + mode = nvlink_link_mode_active; + break; + case NVLINK_LINKSTATE_SAFE: + mode = nvlink_link_mode_swcfg; + break; + case NVLINK_LINKSTATE_FAULT: + mode = nvlink_link_mode_fault; + break; + case NVLINK_LINKSTATE_RECOVERY: + mode = nvlink_link_mode_recovery; + break; + case NVLINK_LINKSTATE_FAIL: + mode = nvlink_link_mode_fail; + break; + case NVLINK_LINKSTATE_DETECT: + mode = nvlink_link_mode_detect; + break; + case NVLINK_LINKSTATE_RESET: + mode = nvlink_link_mode_reset; + break; + case NVLINK_LINKSTATE_ENABLE_PM: + mode = nvlink_link_mode_enable_pm; + break; + case NVLINK_LINKSTATE_DISABLE_PM: + mode = nvlink_link_mode_disable_pm; + break; + case NVLINK_LINKSTATE_TRAFFIC_SETUP: + mode = nvlink_link_mode_traffic_setup; + break; + default: + mode = nvlink_link_mode_unknown; + break; + } + + return mode; +} + +static nvlink_tx_sublink_mode +_nvlink_core_map_tx_sublink_state +( + NvU64 state +) +{ + nvlink_tx_sublink_mode mode; + + switch (state) + { + case NVLINK_SUBLINK_STATE_TX_HS: + mode = nvlink_tx_sublink_mode_hs; + break; + case NVLINK_SUBLINK_STATE_TX_SINGLE_LANE: + mode = nvlink_tx_sublink_mode_single_lane; + break; + case NVLINK_SUBLINK_STATE_TX_TRAIN: + mode = nvlink_tx_sublink_mode_train; + break; + case NVLINK_SUBLINK_STATE_TX_SAFE: + mode = nvlink_tx_sublink_mode_safe; + break; + case NVLINK_SUBLINK_STATE_TX_OFF: + mode = nvlink_tx_sublink_mode_off; + break; + case NVLINK_SUBLINK_STATE_TX_COMMON_MODE: + mode = nvlink_tx_sublink_mode_common_mode; + break; + case NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE: + mode = nvlink_tx_sublink_mode_common_mode_disable; + break; + case NVLINK_SUBLINK_STATE_TX_DATA_READY: + mode = nvlink_tx_sublink_mode_data_ready; + break; + case NVLINK_SUBLINK_STATE_TX_EQ: + mode = nvlink_tx_sublink_mode_tx_eq; + break; + case NVLINK_SUBLINK_STATE_TX_PRBS_EN: + mode = nvlink_tx_sublink_mode_pbrs_en; + break; + case NVLINK_SUBLINK_STATE_TX_POST_HS: + mode = nvlink_tx_sublink_mode_post_hs; + break; + default: + mode = nvlink_tx_sublink_mode_unknown; + break; + } + + return mode; +} + +static nvlink_rx_sublink_mode +_nvlink_core_map_rx_sublink_state +( + NvU64 state +) +{ + nvlink_rx_sublink_mode mode; + + switch (state) + { + case NVLINK_SUBLINK_STATE_RX_HS: + mode = nvlink_rx_sublink_mode_hs; + break; + case NVLINK_SUBLINK_STATE_RX_SINGLE_LANE: + mode = nvlink_rx_sublink_mode_single_lane; + break; + case NVLINK_SUBLINK_STATE_RX_TRAIN: + mode = nvlink_rx_sublink_mode_train; + break; + case NVLINK_SUBLINK_STATE_RX_SAFE: + mode = nvlink_rx_sublink_mode_safe; + break; + case NVLINK_SUBLINK_STATE_RX_OFF: + mode = nvlink_rx_sublink_mode_off; + break; + case NVLINK_SUBLINK_STATE_RX_RXCAL: + mode = nvlink_rx_sublink_mode_rxcal; + break; + default: + mode = nvlink_rx_sublink_mode_unknown; + break; + } + + return mode; +} + +/** + * Check if the link is already initialized + * + * Note: A link is initialized if it is in SWCFG or ACTIVE state + * + * @param[in] linkMode Link state + * + * NvBool + */ +static NvBool +_nvlink_core_is_link_initialized +( + NvU64 linkMode +) +{ + if ((linkMode == NVLINK_LINKSTATE_SAFE) || + (linkMode == NVLINK_LINKSTATE_HS)) + { + return NV_TRUE; + } + else + { + return NV_FALSE; + } +} + +/** + * Get the mask of enabled links for the device + * + * @param[in] dev nvlink_device pointer + * + * NvU64 + */ +static NvU64 +_nvlink_core_get_enabled_link_mask +( + nvlink_device *dev +) +{ + NvU64 linkMask = 0x0; + nvlink_link *link = NULL; + + nvListForEachEntry(link, &dev->link_list, node) + { + linkMask |= NVBIT64(link->linkNumber); + } + + return linkMask; +} + +/** + * Check if the device type is supported + * + * @param[in] devType Device type + * + * NvBool + */ +NvBool +nvlink_core_is_supported_device_type +( + NvU32 devType +) +{ + if ((devType == nvlink_device_type_ibmnpu) || + (devType == nvlink_device_type_gpu) || + (devType == nvlink_device_type_nvswitch)) + { + return NV_TRUE; + } + else + { + return NV_FALSE; + } +} + +/** + * Get the link and sublink states for the endpoint + * + * @param[in] link nvlink_device * + * @param[out] linkState nvlink_link_state * + */ +void +nvlink_core_get_endpoint_state +( + nvlink_link *link, + nvlink_link_state *linkState +) +{ + NvlStatus status = NVL_SUCCESS; + NvU32 txSubLinkSubstate = NVLINK_SUBLINK_SUBSTATE_TX_STABLE; + NvU32 rxSubLinkSubState = NVLINK_SUBLINK_SUBSTATE_RX_STABLE; + NvU64 state = NVLINK_LINKSTATE_INVALID; + NvU64 dlState = NVLINK_LINKSTATE_INVALID; + NvU64 tlState = NVLINK_LINKSTATE_INVALID; + + // + // This is a best case effort to return the current state of the link + // to user as part of the ioctl call. Typically, this call should succeed + // unless the corresponding HAL/Callbacks are not registered, which can + // happen during early development cycle. Adding an assert to catch that + // in debug builds. + // + + status = link->link_handlers->get_dl_link_mode(link, &dlState); + nvlink_assert(status == NVL_SUCCESS); + + link->link_handlers->get_tl_link_mode(link, &tlState); + + linkState->linkMode = _nvlink_core_map_link_state(dlState, tlState); + + status = link->link_handlers->get_tx_mode(link, + &state, + &txSubLinkSubstate); + nvlink_assert(status == NVL_SUCCESS); + + linkState->txSubLinkMode = _nvlink_core_map_tx_sublink_state(state); + + status = link->link_handlers->get_rx_mode(link, + &state, + &rxSubLinkSubState); + nvlink_assert(status == NVL_SUCCESS); + + linkState->rxSubLinkMode = _nvlink_core_map_rx_sublink_state(state); +} + +/** + * Get the nvlink_device * from the PCI DBDF + * + * @param[in] devInfo PCI Information + * @param[out] dev nvlink_device * + */ +void +nvlink_core_get_device_by_devinfo +( + nvlink_device_info *devInfo, + nvlink_device **dev +) +{ + nvlink_device *tmpDev = NULL; + + FOR_EACH_DEVICE_REGISTERED(tmpDev, nvlinkLibCtx.nv_devicelist_head, node) + { + if ( (tmpDev->nodeId == devInfo->nodeId) && + (tmpDev->pciInfo.domain == devInfo->pciInfo.domain) && + (tmpDev->pciInfo.bus == devInfo->pciInfo.bus) && + (tmpDev->pciInfo.device == devInfo->pciInfo.device) && + (tmpDev->pciInfo.function == devInfo->pciInfo.function)) + { + *dev = tmpDev; + return; + } + } + + // not found any matching device + *dev = NULL; +} + +/** + * Get the nvlink_link * from the PCI DBDF and link# + * + * @param[in] endPoint PCI Information and link# + * @param[out] link nvlink_link * + */ +void +nvlink_core_get_link_by_endpoint +( + nvlink_endpoint *endPoint, + nvlink_link **link +) +{ + nvlink_device *tmpDev = NULL; + nvlink_link *tmpLink = NULL; + + FOR_EACH_DEVICE_REGISTERED(tmpDev, nvlinkLibCtx.nv_devicelist_head, node) + { + if ((tmpDev->nodeId == endPoint->nodeId) && + (tmpDev->pciInfo.domain == endPoint->pciInfo.domain) && + (tmpDev->pciInfo.bus == endPoint->pciInfo.bus) && + (tmpDev->pciInfo.device == endPoint->pciInfo.device) && + (tmpDev->pciInfo.function == endPoint->pciInfo.function)) + { + FOR_EACH_LINK_REGISTERED(tmpLink, tmpDev, node) + { + if (tmpLink->linkNumber == endPoint->linkIndex) + { + *link = tmpLink; + return; + } + } + } + } + + // not found any matching link + *link = NULL; +} + +/** + * Given the nvlink_link ptr, copy the endpoint details for the link + * + * @param[in] connLink nvlink_link * + * @param[out] endPointInfo Endpoint details for the link + */ +void +nvlink_core_copy_endpoint_info +( + nvlink_link *connLink, + nvlink_endpoint *endPointInfo +) +{ + nvlink_device *dev = connLink->dev; + + endPointInfo->pciInfo.domain = dev->pciInfo.domain; + endPointInfo->pciInfo.bus = dev->pciInfo.bus; + endPointInfo->pciInfo.device = dev->pciInfo.device; + endPointInfo->pciInfo.function = dev->pciInfo.function; + endPointInfo->nodeId = dev->nodeId; + endPointInfo->linkIndex = connLink->linkNumber; +} + +/** + * Given the nvlink_device ptr, copy the device details + * + * @param[in] tmpDev nvlink_device * + * @param[out] devInfo Device details + */ +void +nvlink_core_copy_device_info +( + nvlink_device *tmpDev, + nvlink_detailed_dev_info *devInfo +) +{ + devInfo->pciInfo.domain = tmpDev->pciInfo.domain; + devInfo->pciInfo.bus = tmpDev->pciInfo.bus; + devInfo->pciInfo.device = tmpDev->pciInfo.device; + devInfo->pciInfo.function = tmpDev->pciInfo.function; + devInfo->numLinks = nvListCount(&tmpDev->link_list); + devInfo->devType = _nvlink_core_map_device_type(tmpDev->type); + devInfo->enabledLinkMask = _nvlink_core_get_enabled_link_mask(tmpDev); + // copy device uuid information if available + if (tmpDev->uuid != NULL) + { + nvlink_memcpy(devInfo->devUuid, tmpDev->uuid, NVLINK_UUID_LEN); + } + + // copy device name information if available + if (tmpDev->deviceName != NULL) + { + int nameLen = nvlink_strlen(tmpDev->deviceName); + int copyLen = 0; + copyLen = (nameLen > NVLINK_DEVICE_NAME_LEN_MAX) ? NVLINK_DEVICE_NAME_LEN_MAX : nameLen; + nvlink_memcpy(devInfo->deviceName, tmpDev->deviceName, copyLen); + } +} + +/** + * Transition to SWCFG on the given array of links + * + * @param[in] links Array of nvlink_link pointers + * @param[in] numLinks Number of links in the array + * + * return NvlStatus + */ +NvlStatus +nvlink_core_link_init_async +( + nvlink_link **links, + NvU32 numLinks +) +{ + NvU32 i; + + // Sanity check the links array for non-zero links + nvlink_assert((links != NULL) && (numLinks > 0)); + + for (i = 0; i < numLinks; i++) + { + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + if (!links[i]->bRxDetected || links[i]->bTxCommonModeFail) + { + // link did not pass RXDET or failed in common mode, don't do anything + continue; + } + + status = links[i]->link_handlers->get_dl_link_mode(links[i], &linkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + // TODO : Handle fault checking + if (_nvlink_core_is_link_initialized(linkMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s:%s is already trained to SAFE or HIGH SPEED\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + + // link already in higher state. don't do anything + continue; + } + + // Put the link in SAFE state + links[i]->link_handlers->set_dl_link_mode(links[i], + NVLINK_LINKSTATE_SAFE, + NVLINK_STATE_CHANGE_SYNC); + } + + // + // We could have links which are faulty and cannot be initialized. But proceeding + // the initialization sequence allows us to use other non-faulty links. Therefore + // return success always. + // + return NVL_SUCCESS; +} + +/** + * Generate a discovery token for the given link + * + * @param[in] link nvlink_link pointer + * + * return NvU64 + */ +NvU64 +nvlink_core_get_link_discovery_token +( + nvlink_link *link +) +{ + NvU64 token = 0; + + // + // generate a unique token value for discovering connections. + // link->token is the memory address of the allocated link object, + // which is unique within a node. Adding fabric node id + // to make it unique across different nodes. + // + + token = link->token & ~((NvU64)NVLINK_FABRIC_NODE_ID_MASK << NVLINK_FABRIC_NODE_ID_POS); + token = token | ((NvU64)link->dev->nodeId << NVLINK_FABRIC_NODE_ID_POS); + return token; +} + +/** + * Write the dicovery token for the given link + * + * @param[in] link nvlink_link pointer + * @param[in] token Discovery token to write + * + * return NvlStatus + */ +NvlStatus +nvlink_core_write_link_discovery_token +( + nvlink_link *link, + NvU64 token +) +{ + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + // Packet injection can only happen if link is in SWCFG/ACTIVE + status = link->link_handlers->get_dl_link_mode(link, &linkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + + return status; + } + + if (!_nvlink_core_is_link_initialized(linkMode)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Packet injection only works for links in SAFE or HS %s:%s.\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + + return NVL_ERR_INVALID_STATE; + } + + // Send the token (AN0 packet) + link->link_handlers->write_discovery_token(link, token); + + return NVL_SUCCESS; +} + +/** + * Read the dicovery token for the given link + * + * @param[in] link nvlink_link pointer + * + * return NvU64 + */ +NvU64 +nvlink_core_read_link_discovery_token +( + nvlink_link *link +) +{ + NvU64 token = 0; + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + + status = link->link_handlers->get_dl_link_mode(link, &linkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + + return 0; + } + + if (!_nvlink_core_is_link_initialized(linkMode)) + { + return 0; + } + + // query discovery token from the link + link->link_handlers->read_discovery_token(link, (NvU64 *) &token); + + return token; +} + +/** + * Detect the connection by correlating the tokens + * + * @param[in] srcLink nvlink_link pointer + * @param[in] writeToken write token + * @param[in] skipReadToken token vs sid for connection detection + * + */ +void +nvlink_core_correlate_conn_by_token +( + nvlink_link *srcLink, + NvU64 writeToken, + NvBool skipReadToken +) +{ + nvlink_device *dev = NULL; + nvlink_link *dstLink = NULL; + NvU64 readToken = 0; + + FOR_EACH_DEVICE_REGISTERED(dev, nvlinkLibCtx.nv_devicelist_head, node) + { + FOR_EACH_LINK_REGISTERED(dstLink, dev, node) + { + nvlink_intranode_conn *conn = NULL; + + nvlink_core_get_intranode_conn(dstLink, &conn); + if (conn != NULL) + { + // skip token read if we already have a connection for the dstLink + continue; + } + + if (skipReadToken) + { + if ((srcLink->remoteSid == dstLink->localSid) && + (srcLink->remoteLinkId == dstLink->linkNumber)) + { + readToken = writeToken; + } + } + else + { + // Read the RX sublink for the AN0 packet + readToken = nvlink_core_read_link_discovery_token(dstLink); + } + + // If token matches, establish the connection + if (writeToken == readToken) + { + // Add to the connections list + nvlink_core_add_intranode_conn(srcLink, dstLink); + return; + } + } + } +} diff --git a/src/common/nvlink/kernel/nvlink/core/nvlink_link_mgmt.c b/src/common/nvlink/kernel/nvlink/core/nvlink_link_mgmt.c new file mode 100644 index 000000000..717cd3eb1 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/core/nvlink_link_mgmt.c @@ -0,0 +1,446 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" + +/** + * For the given link, check whether the link state is at the requested state. + * + * @param[in] link NVLink link pointer + * @param[in] linkState Target Link State + * + * return NV_TRUE if the link is in the given state + */ +NvBool +nvlink_core_check_link_state +( + nvlink_link *link, + NvU64 linkState +) +{ + NvU64 crntDlLinkMode = NVLINK_LINKSTATE_OFF; + NvU64 crntTlLinkMode = NVLINK_LINKSTATE_OFF; + NvlStatus status = NVL_SUCCESS; + + switch (linkState) + { + case NVLINK_LINKSTATE_OFF: + case NVLINK_LINKSTATE_RESET: + case NVLINK_LINKSTATE_SAFE: + case NVLINK_LINKSTATE_HS: + { + status = link->link_handlers->get_dl_link_mode(link, &crntDlLinkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get DL link mode for %s:%s\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + return NV_FALSE; + } + + if (crntDlLinkMode == linkState) + { + return NV_TRUE; + } + break; + } + case NVLINK_LINKSTATE_SLEEP: + { + status = link->link_handlers->get_tl_link_mode(link, &crntTlLinkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get TL link mode for %s:%s\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + return NV_FALSE; + } + + if (crntTlLinkMode == linkState) + { + return NV_TRUE; + } + break; + } + + } + + // return false for default case or the states are not matching + return NV_FALSE; +} + +/** + * For the given link, check whether the tx sublink state is at the + * requested state. + * + * @param[in] link NVLink link pointer + * @param[in] txSublinkState Target Tx Sublink State + * + * return NV_TRUE if the tx sublink is in the given state + */ +NvBool +nvlink_core_check_tx_sublink_state +( + nvlink_link *link, + NvU64 txSublinkState +) +{ + NvlStatus status = NVL_SUCCESS; + + NvU64 crntTxSublinkMode = NVLINK_SUBLINK_STATE_TX_OFF; + NvU32 crntTxSublinkSubMode = NVLINK_SUBLINK_SUBSTATE_TX_STABLE; + + status = link->link_handlers->get_tx_mode(link, + &crntTxSublinkMode, + &crntTxSublinkSubMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get TX sublink mode for %s:%s\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + return NV_FALSE; + } + + switch (txSublinkState) + { + case NVLINK_SUBLINK_STATE_TX_OFF: + { + if (crntTxSublinkMode == NVLINK_SUBLINK_STATE_TX_OFF) + { + return NV_TRUE; + } + break; + } + case NVLINK_SUBLINK_STATE_TX_SAFE: + { + if (crntTxSublinkMode == NVLINK_SUBLINK_STATE_TX_SAFE) + { + return NV_TRUE; + } + break; + } + case NVLINK_SUBLINK_STATE_TX_HS: + { + if ((crntTxSublinkMode == NVLINK_SUBLINK_STATE_TX_HS) || + (crntTxSublinkMode == NVLINK_SUBLINK_STATE_TX_SINGLE_LANE)) + { + return NV_TRUE; + } + break; + } + } + + // return false for default case or the states are not matching + return NV_FALSE; +} + +/** + * For the given link, check whether the rx sublink state is at the + * requested state. + * + * @param[in] link NVLink link pointer + * @param[in] rxSublinkState Target Rx Sublink State + * + * return NV_TRUE if the rx sublink is in the given state + */ +NvBool +nvlink_core_check_rx_sublink_state +( + nvlink_link *link, + NvU64 rxSublinkState +) +{ + NvlStatus status = NVL_SUCCESS; + + NvU64 crntRxSublinkMode = NVLINK_SUBLINK_STATE_RX_OFF; + NvU32 crntRxSublinkSubMode = NVLINK_SUBLINK_SUBSTATE_RX_STABLE; + + status = link->link_handlers->get_rx_mode(link, + &crntRxSublinkMode, + &crntRxSublinkSubMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get TX sublink mode for %s:%s\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + return NV_FALSE; + } + + switch (rxSublinkState) + { + case NVLINK_SUBLINK_STATE_RX_OFF: + { + if (crntRxSublinkMode == NVLINK_SUBLINK_STATE_RX_OFF) + { + return NV_TRUE; + } + break; + } + case NVLINK_SUBLINK_STATE_RX_SAFE: + { + if (crntRxSublinkMode == NVLINK_SUBLINK_STATE_RX_SAFE) + { + return NV_TRUE; + } + break; + } + case NVLINK_SUBLINK_STATE_RX_HS: + { + if ((crntRxSublinkMode == NVLINK_SUBLINK_STATE_RX_HS) || + (crntRxSublinkMode == NVLINK_SUBLINK_STATE_RX_SINGLE_LANE)) + { + return NV_TRUE; + } + break; + } + } + + // return false for default case or the states are not matching + return NV_FALSE; +} + +/** + * Poll the link to reach the specified state upto the given timeout. + * Link state transition is considered failed once timeout occurs. + * + * @param[in] link NVLink link pointer + * @param[in] linkState Target Link state + * @param[in] timeout Timeout + * + * return NVL_SUCCESS if the link transitioned to the target state + */ +NvlStatus +nvlink_core_poll_link_state +( + nvlink_link *link, + NvU64 linkState, + NvU32 timeout +) +{ + NvU64 currentLinkState = ~0; + + link->link_handlers->get_dl_link_mode(link, ¤tLinkState); + + while (currentLinkState != linkState) + { + nvlink_sleep(1); + + timeout--; + + if (!timeout) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Timeout occured while polling on link.\n", + __FUNCTION__)); + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link info: device: %s link: %s link state " + "expected: 0x%08llx actual: 0x%08llx.\n", + __FUNCTION__, link->dev->deviceName, link->linkName, + linkState, currentLinkState)); + + return NVL_ERR_INVALID_STATE; + } + + link->link_handlers->get_dl_link_mode(link, ¤tLinkState); + } + + return NVL_SUCCESS; +} + +/** + * Poll for a given timeout period for the sublink to reach a given + * state. Sublink state transition is considered as failed once the + * timeout occurs + * + * @param[in] localTxSubLink Local NVLink pointer + * @param[in] localTxSubLinkState Local Tx Sublink State + * @param[in] localTxSubLinkSubState Local Tx Sublink Substate + * @param[in] remoteRxSubLink Remote NVLink pointer + * @param[in] remoteRxSubLinkState Remote Rx Sublink State + * @param[in] remoteRxSubLinkSubState Remote Rx Sublink Substate + * @param[in] timeout Timeout + * + * return NVL_SUCCESS is the sublink transitioned to the given state + */ +NvlStatus +nvlink_core_poll_sublink_state +( + nvlink_link *localTxSubLink, + NvU64 localTxSubLinkState, + NvU32 localTxSubLinkSubState, + nvlink_link *remoteRxSubLink, + NvU64 remoteRxSubLinkState, + NvU32 remoteRxSubLinkSubState, + NvU32 timeout +) +{ + NvlStatus status = NVL_SUCCESS; + + // check for tx sublink if a valid link is specified + if (localTxSubLink) + { + status = nvlink_core_poll_tx_sublink_state(localTxSubLink, + localTxSubLinkState, + localTxSubLinkSubState, + timeout); + if (status != NVL_SUCCESS) + { + // polling on tx sublink failed. skip any rx polling + return status; + } + } + + // + // check for rx sublink if a valid link is specified and no + // timeout on tx sublink (if it was specified) + // + if (remoteRxSubLink) + { + status = nvlink_core_poll_rx_sublink_state(remoteRxSubLink, + remoteRxSubLinkState, + remoteRxSubLinkSubState, + timeout); + } + + return status; +} + +/** + * Poll for the tx sublink to reach the specified state upto the given + * timeout. Sublink state transition is considered failed once timeout + * occurs. + * + * @param[in] link NVLink pointer + * @param[in] txSublinkState Tx Sublink State + * @param[in] txSublinkSubState Tx Sublink Substate + * @param[in] timeout Timeout + * + * return NVL_SUCCESS if the tx sublink transitioned to the target state + */ +NvlStatus +nvlink_core_poll_tx_sublink_state +( + nvlink_link *link, + NvU64 txSublinkState, + NvU32 txSublinkSubState, + NvU32 timeout +) +{ + NvU64 currentTxSublinkState = ~0; + NvU32 currentTxSublinkSubState = ~0; + + link->link_handlers->get_tx_mode(link, + ¤tTxSublinkState, + ¤tTxSublinkSubState); + + while (!((currentTxSublinkState == txSublinkState) && + (currentTxSublinkSubState == txSublinkSubState))) + { + nvlink_sleep(1); + + timeout--; + + if (!timeout) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Timeout occured while polling on link.\n", + __FUNCTION__)); + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link info: device: %s link: %s txsublink state" + " expected: 0x%08llx actual: 0x%08llx.\n", + __FUNCTION__, link->dev->deviceName, link->linkName, + txSublinkState, currentTxSublinkState)); + + return NVL_ERR_INVALID_STATE; + } + + link->link_handlers->get_tx_mode(link, + ¤tTxSublinkState, + ¤tTxSublinkSubState); + } + + return NVL_SUCCESS; +} + +/** + * Poll for the rx sublink to reach the specified state upto the given + * timeout. Sublink state transition is considered failed once timeout + * occurs. + * + * @param[in] link NVLink pointer + * @param[in] rxSublinkState Rx Sublink State + * @param[in] rxSublinkSubState Rx Sublink Substate + * @param[in] timeout Timeout + * + * return NVL_SUCCESS if the rx sublink transitioned to the target state + */ +NvlStatus +nvlink_core_poll_rx_sublink_state +( + nvlink_link *link, + NvU64 rxSublinkState, + NvU32 rxSublinkSubState, + NvU32 timeout +) +{ + NvU64 currentRxSublinkState = ~0; + NvU32 currentRxSublinkSubState = ~0; + + link->link_handlers->get_rx_mode(link, + ¤tRxSublinkState, + ¤tRxSublinkSubState); + + while (!((currentRxSublinkState == rxSublinkState) && + (currentRxSublinkSubState == rxSublinkSubState))) + { + nvlink_sleep(1); + + timeout--; + + if (!timeout) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Timeout occured while polling on link.\n", + __FUNCTION__)); + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link info: device: %s link: %s rxsublink state " + "expected: 0x%08llx actual: 0x%08llx.\n", + __FUNCTION__, link->dev->deviceName, link->linkName, + rxSublinkState, currentRxSublinkState)); + + return NVL_ERR_INVALID_STATE; + } + + link->link_handlers->get_rx_mode(link, + ¤tRxSublinkState, + ¤tRxSublinkSubState); + } + + return NVL_SUCCESS; +} diff --git a/src/common/nvlink/kernel/nvlink/core/nvlink_logger.c b/src/common/nvlink/kernel/nvlink/core/nvlink_logger.c new file mode 100644 index 000000000..f087c9656 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/core/nvlink_logger.c @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" + +static void _nvlink_core_print_link(nvlink_link *); + +/** + * Print link state of a single nvlink_link + * + * @param[in] link NVLink Link pointer + */ +void +nvlink_core_print_link_state +( + nvlink_link *link +) +{ + NvU64 linkMode = 0; + NvU64 txSublinkMode = 0; + NvU64 rxSublinkMode = 0; + NvU32 txSublinkSubMode = 0; + NvU32 rxSublinkSubMode = 0; + + link->link_handlers->get_dl_link_mode(link, &linkMode); + link->link_handlers->get_tx_mode(link, &txSublinkMode, &txSublinkSubMode); + link->link_handlers->get_rx_mode(link, &rxSublinkMode, &rxSublinkSubMode); + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s:%s linkMode: %lld txSublinkMode: %lld rxSublinkMode: %lld\n", + __FUNCTION__, + link->dev->deviceName, link->linkName, + linkMode, txSublinkMode, rxSublinkMode)); +} + +/** + * Print information of a single nvlink intranode connection + * + * @param[in] conn NVLink connection pointer + */ +void +nvlink_core_print_intranode_conn +( + nvlink_intranode_conn *conn +) +{ + if (NULL == conn) return; + + _nvlink_core_print_link(conn->end0); + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, " <---> ")); + _nvlink_core_print_link(conn->end1); + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, "\n")); +} + +/** + * Print link type and device information of a single nvlink_link + * + * @param[in] link NVLink Link pointer + */ +static void +_nvlink_core_print_link +( + nvlink_link *link +) +{ + switch (link->dev->type) + { + case NVLINK_DEVICE_TYPE_GPU: + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, "NVGPU")); + break; + case NVLINK_DEVICE_TYPE_IBMNPU: + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, "IBMNPU")); + break; + case NVLINK_DEVICE_TYPE_NVSWITCH: + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, "NVSWITCH")); + break; + case NVLINK_DEVICE_TYPE_EBRIDGE: + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, "EBRIDGE")); + break; + } + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "(%x): %04x:%02x:%02x.%x %s", + link->dev->pciInfo.device, + link->dev->pciInfo.domain, + link->dev->pciInfo.bus, + link->dev->pciInfo.device, + link->dev->pciInfo.function, + link->linkName)); +} diff --git a/src/common/nvlink/kernel/nvlink/core/nvlink_shutdown.c b/src/common/nvlink/kernel/nvlink/core/nvlink_shutdown.c new file mode 100644 index 000000000..ca601a5b0 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/core/nvlink_shutdown.c @@ -0,0 +1,845 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" + +static void _nvlink_core_clear_link_state(nvlink_link *); + +/** + * [CLEAN SHUTDOWN] + * + * Shutdown given intranode connections from active to L2 state + * + * @param[in] conns Array of connections to transition to L2 + * @param[in] connCount Number of connections in the array + * @param[in] flags Flags to track if training is sync/async + * + * return NVL_SUCCESS if the connections transitioned to L2 successfully + */ +NvlStatus +nvlink_core_powerdown_intranode_conns_from_active_to_L2 +( + nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + NvU32 version; + NvU32 i; + + if ((conns == NULL) || (connCount == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No connections to exit L2\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + // Set the version. Currently, only one version is supported on a chip + version = conns[0]->end0->version; + + /**************** Start the L2 entry sequence for the connections ***************/ + + // NVLink 3.0 and beyond, link needs to be ACTIVE before it can be transitioned to L2 + if ((version >= NVLINK_DEVICE_VERSION_30) && (connCount > 0)) + { + for (i = 0; i < connCount; i++) + { + status = nvlink_core_check_intranode_conn_state(conns[i], NVLINK_LINKSTATE_HS); + if ((status == NVL_SUCCESS) || (status == NVL_ERR_INVALID_STATE)) + { + continue; + } + + // We can train connections to HS only when they are already in SAFE + status = nvlink_core_check_intranode_conn_state(conns[i], NVLINK_LINKSTATE_SAFE); + if (status != NVL_SUCCESS) + { + continue; + } + + if (nvlink_core_train_intranode_conns_from_swcfg_to_active_ALT(&conns[i], 1, flags)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Failed to train connection to ACTIVE.\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + } + } + + // STEP 0: Disable HeartBeat on the endpoints of all connections + for (i = 0; i < connCount; i++) + { + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_DISABLE_HEARTBEAT, + flags); + + // Only send if not in loopback + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_DISABLE_HEARTBEAT, + flags); + } + } + + // STEP 1: Disable PM on the endpoints of all connections + for (i = 0; i < connCount; i++) + { + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_DISABLE_PM, + flags); + + // Only send if not in loopback + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_DISABLE_PM, + flags); + } + } + + // Get link state on all endpoints. This ensures that NVLINK_LINKSTATE_DISABLE_PM completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < connCount; i++) + { + status = conns[i]->end0->link_handlers->get_dl_link_mode(conns[i]->end0, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s is not in good state after sending DISABLE PM\n", + __FUNCTION__, conns[i]->end0->dev->deviceName, conns[i]->end0->linkName)); + } + + status = conns[i]->end1->link_handlers->get_dl_link_mode(conns[i]->end1, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s is not in good state after sending DISABLE PM\n", + __FUNCTION__, conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + } + } + + // Check for each connection, if both the ends and their sublinks are in HS mode + for (i = 0; i < connCount; i++) + { + status = nvlink_core_check_intranode_conn_state(conns[i], NVLINK_LINKSTATE_HS); + if (status == NVL_ERR_INVALID_STATE) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s - Link %s:%s is not in good state\n", + __FUNCTION__, + conns[i]->end0->dev->deviceName, conns[i]->end0->linkName, + conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + else if (status == NVL_SUCCESS) + { + // STEP 2: Change link state from ACTIVE to SWCFG on all endpoints + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_SAFE, + flags); + // Only send if not in loopback + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_SAFE, + flags); + } + } + } + + // + // All the endpoints should now either be in SWCFG or transitioning to SWCFG. Poll for all + // endpoints to reach SWCFG. If any endpoint does not transition to SWCFG, return error + // + for (i = 0; i < connCount; i++) + { + // Wait for the end0 to go to SWCFG + status = nvlink_core_poll_link_state(conns[i]->end0, + NVLINK_LINKSTATE_SAFE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set endpoint %s:%s in SWCFG\n", + __FUNCTION__, conns[i]->end0->dev->deviceName, conns[i]->end0->linkName)); + } + + // Wait for the end1 to go to SWCFG + status = nvlink_core_poll_link_state(conns[i]->end1, + NVLINK_LINKSTATE_SAFE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set endpoint %s:%s in SWCFG\n", + __FUNCTION__, conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + } + + // STEP 3: Change sub-link state to SAFE on all endpoints + for (i = 0; i < connCount; i++) + { + conns[i]->end0->link_handlers->set_tx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_SAFE, + flags); + + // Only send if not in loopback + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_tx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_SAFE, + flags); + } + } + + // Poll for all endpoints sub-link state to reach SAFE + for (i = 0; i < connCount; i++) + { + // Wait for sublinks to go to SAFE + status = nvlink_core_poll_sublink_state(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_SAFE, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + conns[i]->end1, + NVLINK_SUBLINK_STATE_RX_SAFE, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set sublinks to SAFE\n", + __FUNCTION__)); + } + + status = nvlink_core_poll_sublink_state(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_SAFE, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + conns[i]->end0, + NVLINK_SUBLINK_STATE_RX_SAFE, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set sublinks to SAFE\n", + __FUNCTION__)); + } + } + + // STEP 4: Save link state on all the endpoints + for (i = 0; i < connCount; i++) + { + if (!conns[i]->end0->bStateSaved) + { + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_SAVE_STATE, + flags); + } + + if (!conns[i]->end1->bStateSaved) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_SAVE_STATE, + flags); + } + } + + // Get link state on all endpoints. This ensures that NVLINK_LINKSTATE_SAVE_STATE completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < connCount; i++) + { + status = conns[i]->end0->link_handlers->get_dl_link_mode(conns[i]->end0, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s is not in good state after sending SAVESTATE command\n", + __FUNCTION__, conns[i]->end0->dev->deviceName, conns[i]->end0->linkName)); + } + + status = conns[i]->end1->link_handlers->get_dl_link_mode(conns[i]->end1, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s is not in good state after sending SAVESTATE command\n", + __FUNCTION__, conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + } + } + + // STEP 5: Trigger the sleep request on all the endpoints + for (i = 0; i < connCount; i++) + { + // + // Send SLEEP request on one end of connection if not in loopback. + // Don' poll, since transition will happen when both ends get the request + // + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end0->link_handlers->set_tl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_SLEEP, + NVLINK_STATE_CHANGE_ASYNC); + } + + // Send SLEEP request on both ends and poll for completion + conns[i]->end1->link_handlers->set_tl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_SLEEP, + NVLINK_STATE_CHANGE_SYNC); + conns[i]->end0->link_handlers->set_tl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_SLEEP, + NVLINK_STATE_CHANGE_SYNC); + } + + // Finally check the connection states + for (i = 0; i < connCount; i++) + { + status = nvlink_core_check_intranode_conn_state(conns[i], NVLINK_LINKSTATE_SLEEP); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link %s:%s - Link %s:%s is not in good state after sending SLEEP request\n", + __FUNCTION__, + conns[i]->end0->dev->deviceName, conns[i]->end0->linkName, + conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + + // Update the link and sublink states in the core library + conns[i]->end0->state = NVLINK_LINKSTATE_SLEEP; + conns[i]->end1->state = NVLINK_LINKSTATE_SLEEP; + conns[i]->end0->tx_sublink_state = NVLINK_SUBLINK_STATE_TX_OFF; + conns[i]->end1->tx_sublink_state = NVLINK_SUBLINK_STATE_TX_OFF; + conns[i]->end0->rx_sublink_state = NVLINK_SUBLINK_STATE_RX_OFF; + conns[i]->end1->rx_sublink_state = NVLINK_SUBLINK_STATE_RX_OFF; + + // Update power state transition status for the connection + conns[i]->end0->powerStateTransitionStatus = nvlink_power_state_in_L2; + conns[i]->end1->powerStateTransitionStatus = nvlink_power_state_in_L2; + } + + /***************** End of L2 entry sequence for the connections ****************/ + + // + // Note that status is squashed, since the expectation is that we soldier on if any link fails + // during the transition to L2 state + // + return NVL_SUCCESS; +} + +/** + * [PSEUDO-CLEAN SHUTDOWN] + * + * Shutdown the given array of intranode connections from ACTIVE to OFF state + * + * @param[in] conns Array of connections to shutdown + * @param[in] connCount Number of connections in the array + * @param[in] flags Flags to track if shutdown is sync/async + * + * return NVL_SUCCESS if the connections shutdown successfully + */ +NvlStatus +nvlink_core_powerdown_intranode_conns_from_active_to_off +( + nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + NvU32 i; + + if ((conns == NULL) || (connCount == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No connections to shutdown\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + for (i = 0; i < connCount; i++) + { + // Disable Power Management before moving link out of Active + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_DISABLE_PM, + flags); + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_DISABLE_PM, + flags); + } + + // Move both ends to SWCFG + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_SAFE, + flags); + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_SAFE, + flags); + } + } + + // Poll for links to reach SWCFG & initiate sublinks to SAFE state + for (i = 0; i < connCount; i++) + { + // Wait for the end0 to go to SWCFG + status = nvlink_core_poll_link_state(conns[i]->end0, + NVLINK_LINKSTATE_SAFE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set endpoint %s:%s in SWCFG", + __FUNCTION__, conns[i]->end0->dev->deviceName, conns[i]->end0->linkName)); + + // to track Failure + conns[i]->end0->inSWCFG = NV_FALSE; + } + else + { + conns[i]->end0->inSWCFG = NV_TRUE; + } + + // Wait for the end1 to go to SWCFG + status = nvlink_core_poll_link_state(conns[i]->end1, + NVLINK_LINKSTATE_SAFE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set endpoint %s:%s in SWCFG\n", + __FUNCTION__, conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + + // to track Failure + conns[i]->end1->inSWCFG = NV_FALSE; + } + else + { + conns[i]->end1->inSWCFG = NV_TRUE; + } + + // Change each sublink state to SAFE + if(conns[i]->end0->inSWCFG == NV_TRUE) + { + conns[i]->end0->link_handlers->set_tx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_SAFE, + flags); + } + + if (conns[i]->end0 != conns[i]->end1 && conns[i]->end1->inSWCFG == NV_TRUE) + { + conns[i]->end1->link_handlers->set_tx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_SAFE, + flags); + } + } + + // Poll for sublinks to reach SAFE state + for (i = 0; i < connCount; i++) + { + // Wait for sublinks to go to SAFE + if(conns[i]->end0->inSWCFG == NV_TRUE) + { + status = nvlink_core_poll_sublink_state(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_SAFE, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + conns[i]->end1, + NVLINK_SUBLINK_STATE_RX_SAFE, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + } + if (status != NVL_SUCCESS || conns[i]->end0->inSWCFG == NV_FALSE) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set sublinks to SAFE", + __FUNCTION__)); + } + + if(conns[i]->end1->inSWCFG == NV_TRUE) + { + status = nvlink_core_poll_sublink_state(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_SAFE, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + conns[i]->end0, + NVLINK_SUBLINK_STATE_RX_SAFE, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + } + if (status != NVL_SUCCESS || conns[i]->end1->inSWCFG == NV_FALSE) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set sublinks to SAFE", + __FUNCTION__)); + } + + // + // Disable error detect on both sides of the link + // + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_DISABLE_ERR_DETECT, + flags); + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_DISABLE_ERR_DETECT, + flags); + } + + // + // Disable Lanes on both sides of the link + // + status = conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_LANE_DISABLE, + flags); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to disable lanes for link %s:%s\n", + __FUNCTION__, conns[i]->end0->dev->deviceName, conns[i]->end0->linkName)); + } + + if (conns[i]->end0 != conns[i]->end1) + { + status = conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_LANE_DISABLE, + flags); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to disable lanes for link %s:%s\n", + __FUNCTION__, conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + } + + // + // Shutdown Lanes on both sides of the link + // + status = conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_LANE_SHUTDOWN, + flags); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to shutdown lanes for link %s:%s\n", + __FUNCTION__, conns[i]->end0->dev->deviceName, conns[i]->end0->linkName)); + } + + if (conns[i]->end0 != conns[i]->end1) + { + status = conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_LANE_SHUTDOWN, + flags); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to shutdown lanes for link %s:%s\n", + __FUNCTION__, conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + } + + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, NVLINK_LINKSTATE_OFF, flags); + + // Link becomes in-accessible after its turned off. Check if this is a loopback connection + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, NVLINK_LINKSTATE_OFF, flags); + } + + _nvlink_core_clear_link_state(conns[i]->end0); + _nvlink_core_clear_link_state(conns[i]->end1); + } + + // + // Squash status. If any side of link doesn not respond the link is + // shutdown unilaterally + // + return NVL_SUCCESS; +} + +/** + * Power down the given array of intranode connections from ACTIVE to SWCFG state + * + * @param[in] conns Array of connections to shutdown + * @param[in] connCount Number of connections in the array + * @param[in] flags Flags to track if shutdown is sync/async + * + * return NVL_SUCCESS if the connections shutdown successfully + */ +NvlStatus +nvlink_core_powerdown_intranode_conns_from_active_to_swcfg +( + nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + NvU32 i; + + if ((conns == NULL) || (connCount == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No connections to shutdown\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + for (i = 0; i < connCount; i++) + { + // Disable Power Management before moving link out of Active + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_DISABLE_PM, + flags); + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_DISABLE_PM, + flags); + } + + // Move both ends to SWCFG + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_SAFE, + flags); + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_SAFE, + flags); + } + } + + // + // Poll _SAFE state for connections and set corresponding sublinks to _SAFE + // + for (i = 0; i < connCount; i++) + { + // Wait for the end0 to go to SWCFG + status = nvlink_core_poll_link_state(conns[i]->end0, + NVLINK_LINKSTATE_SAFE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set endpoint %s:%s in SWCFG", + __FUNCTION__, conns[i]->end0->dev->deviceName, conns[i]->end0->linkName)); + } + else + { + // Change each sublink state to SAFE + conns[i]->end0->link_handlers->set_tx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_SAFE, + flags); + } + + if (conns[i]->end0 != conns[i]->end1) + { + // Wait for the end1 to go to SWCFG + status = nvlink_core_poll_link_state(conns[i]->end1, + NVLINK_LINKSTATE_SAFE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set endpoint %s:%s in SWCFG", + __FUNCTION__, conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + else + { + // Change each sublink state to SAFE + conns[i]->end1->link_handlers->set_tx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_SAFE, + flags); + } + } + } + + // Wait for sublinks to go to SAFE + for (i = 0; i < connCount; i++) + { + status = nvlink_core_poll_sublink_state(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_SAFE, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + conns[i]->end1, + NVLINK_SUBLINK_STATE_RX_SAFE, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set sublinks to SAFE (TX:RX)", + __FUNCTION__)); + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: sublinks (%s:%s) (%s:%s)", + __FUNCTION__, + conns[i]->end0->dev->deviceName, conns[i]->end0->linkName, + conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + + status = nvlink_core_poll_sublink_state(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_SAFE, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + conns[i]->end0, + NVLINK_SUBLINK_STATE_RX_SAFE, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set sublinks to SAFE (RX:TX)", + __FUNCTION__)); + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: sublinks (%s:%s) (%s:%s)", + __FUNCTION__, + conns[i]->end0->dev->deviceName, conns[i]->end0->linkName, + conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + } + + // Update tracking info + for (i = 0; i < connCount; i++) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Connection is in SAFE mode. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conns[i]); + } + + // + // Squash status. If any side of link doesn not respond the link is + // shutdown unilaterally + // + return NVL_SUCCESS; +} + +/** + * Reset the given array of intranode connections + * + * @param[in] conns Array of connections to reset + * @param[in] connCount Number of connections in the array + * @param[in] flags Flags + * + * return NVL_SUCCESS if the connections reset successfully + */ +NvlStatus +nvlink_core_reset_intranode_conns +( + nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags +) +{ + NvU32 i; + + if ((conns == NULL) || (connCount == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No connections to shutdown\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + for (i = 0; i < connCount; i++) + { + // + // Reset both ends of this connection. + // This path should enable/init those link endpoints as well. + // + // NVLink3.0 + uses the TL link reset + // + if (conns[i]->end0->version >= NVLINK_DEVICE_VERSION_30) + { + conns[i]->end0->link_handlers->set_tl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_RESET, + flags); + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_tl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_RESET, + flags); + } + } + else + { + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_RESET, + flags); + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_RESET, + flags); + } + + _nvlink_core_clear_link_state(conns[i]->end0); + _nvlink_core_clear_link_state(conns[i]->end1); + } + + return NVL_SUCCESS; +} + +/** + * Clears Core Library State + * + * @param[in] link NVLink Link pointer + */ +static void +_nvlink_core_clear_link_state +( + nvlink_link *link +) +{ + // Receiver Detect needs to happen again + link->bRxDetected = NV_FALSE; + + // INITNEGOTIATE needs to happen again + link->bInitnegotiateConfigGood = NV_FALSE; + + // TxCommonMode needs to happen again + link->bTxCommonModeFail = NV_FALSE; + + // SAFE transition needs to happen again + link->bSafeTransitionFail = NV_FALSE; + + // Reset the SW state tracking the link and sublink states + link->state = NVLINK_LINKSTATE_OFF; + link->tx_sublink_state = NVLINK_SUBLINK_STATE_TX_OFF; + link->rx_sublink_state = NVLINK_SUBLINK_STATE_RX_OFF; +} diff --git a/src/common/nvlink/kernel/nvlink/core/nvlink_training.c b/src/common/nvlink/kernel/nvlink/core/nvlink_training.c new file mode 100644 index 000000000..6bf16f70f --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/core/nvlink_training.c @@ -0,0 +1,1078 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" + +static void _nvlink_core_set_sublink_pre_hs_settings(nvlink_link *, NvU32); +static void _nvlink_core_set_link_pre_active_settings(nvlink_link *, NvU32); +static void _nvlink_core_set_link_post_active_settings(nvlink_link *, NvU32); + +/** + * Link training + * Train the internode connection link from SWCFG to ACTIVE + * + * @param[in] conn NVLink connection pointer + * @param[in] isMasterEnd Is this the master end of the connection + * @param[in] flags Flags to track if training is sync/async + * + * return NVL_SUCCESS if the link trains successfully + */ +NvlStatus +nvlink_core_train_internode_conns_from_swcfg_to_active +( + nvlink_internode_conn **conns, + NvU32 connCount, + NvU32 *isMasterEnd, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + NvU32 i; + NvBool skipConn[NVLINK_MAX_SYSTEM_LINK_NUM] = {0}; + + if ((conns == NULL) || (connCount == 0) || (isMasterEnd == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No connections to train to ACTIVE\n", + __FUNCTION__)); + + return NVL_BAD_ARGS; + } + + for (i = 0; i < connCount; i++) + { + // Don't do anything if the link is already at HS. + if ((nvlink_core_check_link_state(conns[i]->local_end, NVLINK_LINKSTATE_HS)) && + (nvlink_core_check_tx_sublink_state(conns[i]->local_end, + NVLINK_SUBLINK_STATE_TX_HS)) && + (nvlink_core_check_rx_sublink_state(conns[i]->local_end, + NVLINK_SUBLINK_STATE_RX_HS))) + { + // + // Note: On NVLink version < 3.0, bufferready is set prior to link state + // change to ACTIVE. So, return early. For NVLink version >= 3.0, + // bufferready is only set after link is ACTIVE. Hence, proceed to + // the subsequent code + // + if (conns[i]->local_end->version < NVLINK_DEVICE_VERSION_30) + { + skipConn[i] = NV_TRUE; + } + } + + // + // For NVLink version < 3.0, we can train link to ACTIVE only when link is at + // SWCFG and sublink are at HS + // + if (conns[i]->local_end->version < NVLINK_DEVICE_VERSION_30) + { + if (!(nvlink_core_check_link_state(conns[i]->local_end, NVLINK_LINKSTATE_SAFE)) || + !(nvlink_core_check_tx_sublink_state(conns[i]->local_end, + NVLINK_SUBLINK_STATE_TX_HS)) || + !(nvlink_core_check_rx_sublink_state(conns[i]->local_end, + NVLINK_SUBLINK_STATE_RX_HS))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Invalid link/sublink mode while training link to HIGH SPEED" + " %s:%s \n", + __FUNCTION__, + conns[i]->local_end->dev->deviceName, conns[i]->local_end->linkName)); + nvlink_core_print_link_state(conns[i]->local_end); + skipConn[i] = NV_TRUE; + } + } + } + + for (i = 0; i < connCount; i++) + { + if (skipConn[i]) + { + continue; + } + + _nvlink_core_set_link_pre_active_settings(conns[i]->local_end, flags); + + // Change mode for master link. The other link end should transition to active. + if (isMasterEnd[i] == NV_TRUE) + { + conns[i]->local_end->link_handlers->set_dl_link_mode(conns[i]->local_end, + NVLINK_LINKSTATE_HS, + flags); + } + } + + for (i = 0; i < connCount; i++) + { + + // Wait for the link state to change. + status = nvlink_core_poll_link_state(conns[i]->local_end, + NVLINK_LINKSTATE_HS, + NVLINK_TRANSITION_HS_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set link state to ACTIVE for link" + " %s:%s \n", + __FUNCTION__, + conns[i]->local_end->dev->deviceName, conns[i]->local_end->linkName)); + } + else + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_SETUP, + "%s: Successfully able to set link state to ACTIVE for link" + " %s:%s \n", + __FUNCTION__, + conns[i]->local_end->dev->deviceName, conns[i]->local_end->linkName)); + } + + // Do all the miscellaneous settings once the link is trained to ACTIVE. + _nvlink_core_set_link_post_active_settings(conns[i]->local_end, flags); + } + + // + // Always return success to FM on training failures + // FM will read link states to determine sucessfull training + // + return NVL_SUCCESS; +} + +/** + * Link training + * Train the internode connection sublink to enter high speed + * + * @param[in] conn NVLink connection pointer + * @param[in] flags Flags to track if the training is sync/async + * + * return NVL_SUCCESS if the sublink trained successfully + */ +NvlStatus +nvlink_core_train_internode_conn_sublink_from_safe_to_hs +( + nvlink_internode_conn *conn, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + + // NVLink 3.0 onwards this is handled through INITOPTIMIZE, return error + if (conn->local_end->version >= NVLINK_DEVICE_VERSION_30) + { + return NVL_ERR_NOT_SUPPORTED; + } + + _nvlink_core_set_sublink_pre_hs_settings(conn->local_end, flags); + + // don't do anything if the link is already at HS. + if ((nvlink_core_check_link_state(conn->local_end, NVLINK_LINKSTATE_HS)) && + (nvlink_core_check_tx_sublink_state(conn->local_end, + NVLINK_SUBLINK_STATE_TX_HS)) && + (nvlink_core_check_rx_sublink_state(conn->local_end, + NVLINK_SUBLINK_STATE_RX_HS))) + { + // both link and sublinks are at HS. don't do anything. + return NVL_SUCCESS; + } + + // we can train sublink to HS only when link is at SWCFG. + if (!nvlink_core_check_link_state(conn->local_end, NVLINK_LINKSTATE_SAFE)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Invalid link mode while training sublink to HIGH SPEED" + " %s:%s \n", + __FUNCTION__, + conn->local_end->dev->deviceName, conn->local_end->linkName)); + nvlink_core_print_link_state(conn->local_end); + return NVL_ERR_INVALID_STATE; + } + + // tx sublink state must be in SAFE as well. + if (!nvlink_core_check_tx_sublink_state(conn->local_end, + NVLINK_SUBLINK_STATE_TX_SAFE)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Invalid tx sublink mode while training sublink to HIGH SPEED" + " %s:%s \n", + __FUNCTION__, + conn->local_end->dev->deviceName, conn->local_end->linkName)); + nvlink_core_print_link_state(conn->local_end); + return NVL_ERR_INVALID_STATE; + } + + // + // rx sublink state may be in SAFE mode or in HS, if the other end of the + // connection already toggled tx sublink mode to HS. + // + if (!((nvlink_core_check_rx_sublink_state(conn->local_end, + NVLINK_SUBLINK_STATE_RX_SAFE)) || + (nvlink_core_check_rx_sublink_state(conn->local_end, + NVLINK_SUBLINK_STATE_RX_HS)))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Invalid rx sublink mode while training sublink to HIGH SPEED" + " %s:%s \n", + __FUNCTION__, + conn->local_end->dev->deviceName, conn->local_end->linkName)); + nvlink_core_print_link_state(conn->local_end); + return NVL_ERR_INVALID_STATE; + } + + // Put TX sublink in HS + conn->local_end->link_handlers->set_tx_mode(conn->local_end, + NVLINK_SUBLINK_STATE_TX_HS, + flags); + + // Wait for sublink to go in HS. + status = nvlink_core_poll_tx_sublink_state(conn->local_end, + NVLINK_SUBLINK_STATE_TX_HS, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + NVLINK_TRANSITION_HS_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set sublink state to HIGH SPEED for link" + " %s:%s \n", + __FUNCTION__, + conn->local_end->dev->deviceName, conn->local_end->linkName)); + return status; + } + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_SETUP, + "%s:Successfully able to set sublink state to HIGH SPEED for link" + " %s:%s \n", + __FUNCTION__, + conn->local_end->dev->deviceName, conn->local_end->linkName)); + + return status; +} + +/** + * Train a given set of intranode connections from L2 to ACTIVE state + * + * @param[in] conns Array of connections to train + * @param[in] connCount Number of connections in the array + * @param[in] flags Flags to track if training is sync/async + * + * return NVL_SUCCESS if the connections train successfully + */ +NvlStatus +nvlink_core_train_intranode_conns_from_from_L2_to_active +( + nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + NvU32 i; + + if ((conns == NULL) || (connCount == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No connections to exit L2\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + /**************** Start the L2 exit sequence for the connections ***************/ + + // STEP 1: Reset all endpoints of the links. This clears any link state + for (i = 0; i < connCount; i++) + { + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_RESET, + flags); + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_RESET, + flags); + } + + // STEP 2: NVLink 3 and beyond, we also need to perform INITPHASE1 + for (i = 0; i < connCount; i++) + { + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_INITPHASE1, + flags); + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_INITPHASE1, + flags); + } + } + + // Get link state on all endpoints. This ensures that NVLINK_LINKSTATE_INITPHASE1 completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < connCount; i++) + { + status = conns[i]->end0->link_handlers->get_dl_link_mode(conns[i]->end0, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + return status; + } + + status = conns[i]->end1->link_handlers->get_dl_link_mode(conns[i]->end1, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + return status; + } + } + } + + // Verify that all the endpoints are now in INIT state + for (i = 0; i < connCount; i++) + { + status = nvlink_core_check_intranode_conn_state(conns[i], NVLINK_LINKSTATE_OFF); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Connection did not transition to INIT. ", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conns[i]); + + return status; + } + } + + // STEP 3: Restore all end point state saved while entering SLEEP state + for (i = 0; i < connCount; i++) + { + if (conns[i]->end0->bStateSaved) + { + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_RESTORE_STATE, + flags); + } + + if (conns[i]->end1->bStateSaved) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_RESTORE_STATE, + flags); + } + } + + // Get link state on all endpoints. This ensures that NVLINK_LINKSTATE_RESTORE_STATE completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < connCount; i++) + { + status = conns[i]->end0->link_handlers->get_dl_link_mode(conns[i]->end0, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + return status; + } + + status = conns[i]->end1->link_handlers->get_dl_link_mode(conns[i]->end1, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + return status; + } + } + } + + // STEP 4: Initialize RX Termination on all end points + for (i = 0; i < connCount; i++) + { + conns[i]->end0->link_handlers->set_rx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_RX_INIT_TERM, + flags); + conns[i]->end1->link_handlers->set_rx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_RX_INIT_TERM, + flags); + } + + // Get link state on all endpoints. This ensures that NVLINK_SUBLINK_STATE_RX_INIT_TERM completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < connCount; i++) + { + status = conns[i]->end0->link_handlers->get_dl_link_mode(conns[i]->end0, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + return status; + } + + status = conns[i]->end1->link_handlers->get_dl_link_mode(conns[i]->end1, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + return status; + } + } + } + + // STEP 5: Enable Common mode on Tx's of all endpoints + for (i = 0; i < connCount; i++) + { + if (!((conns[i]->end0->tx_sublink_state == NVLINK_SUBLINK_STATE_TX_COMMON_MODE) || + (conns[i]->end0->tx_sublink_state == NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE) || + (conns[i]->end0->tx_sublink_state == NVLINK_SUBLINK_STATE_TX_DATA_READY))) + { + conns[i]->end0->link_handlers->set_tx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_COMMON_MODE, + flags); + } + if (!((conns[i]->end1->tx_sublink_state == NVLINK_SUBLINK_STATE_TX_COMMON_MODE) || + (conns[i]->end1->tx_sublink_state == NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE) || + (conns[i]->end1->tx_sublink_state == NVLINK_SUBLINK_STATE_TX_DATA_READY))) + { + conns[i]->end1->link_handlers->set_tx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_COMMON_MODE, + flags); + } + } + + // Get link state on all endpoints. This ensures that NVLINK_SUBLINK_STATE_TX_COMMON_MODE completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < connCount; i++) + { + status = conns[i]->end0->link_handlers->get_dl_link_mode(conns[i]->end0, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + return status; + } + + status = conns[i]->end1->link_handlers->get_dl_link_mode(conns[i]->end1, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + return status; + } + } + } + + // STEP 6: Put all Rx's in RXCAL + for (i = 0; i < connCount; i++) + { + if (conns[i]->end0->rx_sublink_state != NVLINK_SUBLINK_STATE_RX_RXCAL) + { + conns[i]->end0->link_handlers->set_rx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_RX_RXCAL, + flags); + } + if (conns[i]->end1->rx_sublink_state != NVLINK_SUBLINK_STATE_RX_RXCAL) + { + conns[i]->end1->link_handlers->set_rx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_RX_RXCAL, + flags); + } + } + + // STEP 7: Disable Tx common mode + for (i = 0; i < connCount; i++) + { + if (!((conns[i]->end0->tx_sublink_state == NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE) || + (conns[i]->end0->tx_sublink_state == NVLINK_SUBLINK_STATE_TX_DATA_READY))) + { + conns[i]->end0->link_handlers->set_tx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE, + flags); + } + if (!((conns[i]->end1->tx_sublink_state == NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE) || + (conns[i]->end1->tx_sublink_state == NVLINK_SUBLINK_STATE_TX_DATA_READY))) + { + conns[i]->end1->link_handlers->set_tx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE, + flags); + } + } + + // STEP 8: Set Data Ready and Enable + for (i = 0; i < connCount; i++) + { + if (conns[i]->end0->tx_sublink_state != NVLINK_SUBLINK_STATE_TX_DATA_READY) + { + conns[i]->end0->link_handlers->set_tx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_DATA_READY, + flags); + } + if (conns[i]->end1->tx_sublink_state != NVLINK_SUBLINK_STATE_TX_DATA_READY) + { + conns[i]->end1->link_handlers->set_tx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_DATA_READY, + flags); + } + } + + // Get link state on all endpoints. This ensures that NVLINK_SUBLINK_STATE_TX_DATA_READY completes + if (flags == NVLINK_STATE_CHANGE_ASYNC) + { + for (i = 0; i < connCount; i++) + { + status = conns[i]->end0->link_handlers->get_dl_link_mode(conns[i]->end0, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + return status; + } + + status = conns[i]->end1->link_handlers->get_dl_link_mode(conns[i]->end1, &linkMode); + if ((status != NVL_SUCCESS) || + (linkMode == NVLINK_LINKSTATE_FAIL) || (linkMode == NVLINK_LINKSTATE_FAULT)) + { + return status; + } + } + } + + // STEP 9: Set link mode to SAFE + for (i = 0; i < connCount; i++) + { + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_SAFE, + flags); + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_SAFE, + flags); + } + } + + // Verify all the endpoints link state now reflect SAFE state + for (i = 0; i < connCount; i++) + { + status = nvlink_core_poll_link_state(conns[i]->end0, + NVLINK_LINKSTATE_SAFE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + return status; + } + + status = nvlink_core_poll_link_state(conns[i]->end1, + NVLINK_LINKSTATE_SAFE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + return status; + } + + status = nvlink_core_poll_sublink_state(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_SAFE, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + conns[i]->end1, + NVLINK_SUBLINK_STATE_RX_SAFE, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + return status; + } + + status = nvlink_core_poll_sublink_state(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_SAFE, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + conns[i]->end0, + NVLINK_SUBLINK_STATE_RX_SAFE, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_SAFE_TIMEOUT); + if (status != NVL_SUCCESS) + { + return status; + } + } + + // STEP 9: Set INITNEOGOTIATE + for (i = 0; i < connCount; i++) + { + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_INITNEGOTIATE, + flags); + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_INITNEGOTIATE, + flags); + } + } + + // Bug 2398907 mentioned that a link pair can take upto 125us for DL stat to have CONFIG_GOOD. + nvlink_sleep(1); + + // STEP 8: Set POST_INITNEGOTIATE + for (i = 0; i < connCount; i++) + { + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_POST_INITNEGOTIATE, + flags); + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_POST_INITNEGOTIATE, + flags); + } + + if (connCount != 0) + { + // STEP 11: Train the sublinks to HS and links to ACTIVE + if (conns[0]->end0->version >= NVLINK_DEVICE_VERSION_30) + { + // NVLink 3.0+ ALT training + status = nvlink_core_train_intranode_conns_from_swcfg_to_active_ALT(conns, + connCount, + flags); + } + else + { + // Legacy training + status = nvlink_core_train_intranode_conns_from_swcfg_to_active_legacy(conns, + connCount, + flags); + } + + if (status != NVL_SUCCESS) + { + return status; + } + + for (i = 0; i < connCount; i++) + { + // Update the power state transition status of the link + conns[i]->end0->powerStateTransitionStatus = nvlink_power_state_in_L0; + conns[i]->end1->powerStateTransitionStatus = nvlink_power_state_in_L0; + } + } + + /***************** End of L2 exit sequence for the connections *****************/ + + return status; +} + +/** + * Train intranode connections associated with a list of links to HS + * using ALT sequence + * + * @param[in] conns Array of connections to train + * @param[in] connCount Number of connections in the array + * @param[in] flags Flags to track if training is sync/async + * + * return NVL_SUCCESS if the connections train successfully + */ +NvlStatus +nvlink_core_train_intranode_conns_from_swcfg_to_active_ALT +( + nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + NvlStatus pollStatus = NVL_SUCCESS; + NvU64 linkMode = NVLINK_LINKSTATE_OFF; + NvU32 i; + NvBool skipConn[NVLINK_MAX_SYSTEM_LINK_NUM] = {0}; + + if ((conns == NULL) || (connCount == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No connections to train to ACTIVE\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + for (i = 0; i < connCount; i++) + { + status = conns[i]->end0->link_handlers->get_dl_link_mode(conns[i]->end0, &linkMode); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to get link mode for %s:%s\n", + __FUNCTION__, conns[i]->end0->dev->deviceName, conns[i]->end0->linkName)); + continue; + } + + // + // Skip training if links are in HS + // Only checking end0 here because HS implies both sides are HS + // + if (linkMode == NVLINK_LINKSTATE_HS) + { + skipConn[i] = NV_TRUE; + } + } + + // Trigger INITOPTIMIZE on both ends of the connection + for (i = 0; i < connCount; i++) + { + if (skipConn[i]) + { + continue; + } + + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_INITOPTIMIZE, + flags); + + // On loopback, only send INITOPTIMIZE to one side. + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_INITOPTIMIZE, + flags); + } + } + + // Trigger POST_INITOPTIMIZE (Checks INITOPTIMIZE was successful) on both ends of the connection + for (i = 0; i < connCount; i++) + { + if (skipConn[i]) + { + continue; + } + + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_POST_INITOPTIMIZE, + flags); + + // On loopback, only send POST_INITOPTIMIZE to one side. + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_POST_INITOPTIMIZE, + flags); + } + } + + // Set link modes to ACTIVE + for (i = 0; i < connCount; i++) + { + if (skipConn[i]) + { + continue; + } + + // Some settings required before moving to ACTIVE + _nvlink_core_set_link_pre_active_settings(conns[i]->end0, flags); + _nvlink_core_set_link_pre_active_settings(conns[i]->end1, flags); + + // + // Put only end0 in ACTIVE mode. The other end should automatically go to Active. + // If it does not go to ACTIVE then we need to do fault handling. + // + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_HS, + flags); + } + + // Verify link mode HS on the endpoints + for (i = 0; i < connCount; i++) + { + if (skipConn[i]) + { + continue; + } + + pollStatus = nvlink_core_poll_link_state(conns[i]->end1, + NVLINK_LINKSTATE_HS, + NVLINK_TRANSITION_HS_TIMEOUT); + if (pollStatus != NVL_SUCCESS) + { + status = pollStatus; + } + else + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Successfully able to set linkstate to ACTIVE for links" + " %s:%s<->%s:%s\n", + __FUNCTION__, + conns[i]->end0->dev->deviceName, conns[i]->end0->linkName, + conns[i]->end1->dev->deviceName, conns[i]->end1->linkName)); + } + + conns[i]->end0->link_handlers->training_complete(conns[i]->end0); + + // On loopback, only send once + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->training_complete(conns[i]->end1); + } + + conns[i]->end0->link_handlers->set_tx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_POST_HS, + flags); + // On loopback, only send once + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_tx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_POST_HS, + flags); + } + + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_TRAFFIC_SETUP, + flags); + // On loopback, only send once + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_TRAFFIC_SETUP, + flags); + } + + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_ENABLE_PM, + flags); + // On loopback, only send once + if (conns[i]->end0 != conns[i]->end1) + { + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_ENABLE_PM, + flags); + } + } + + return status; +} + +/** + * Train a single intranode connection associated with a list of links to HS + * using legacy pre-Ampere sequence + * + * @param[in] conns Array of connections to train + * @param[in] connCount Number of connections in the array + * @param[in] flags Flags to track if training is sync/async + * + * return NVL_SUCCESS if the connections train successfully + */ +NvlStatus +nvlink_core_train_intranode_conns_from_swcfg_to_active_legacy +( + nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + NvlStatus pollStatus = NVL_SUCCESS; + NvU32 i; + + if ((conns == NULL) || (connCount == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No connections to train to ACTIVE\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + // Enable PRBS generator on both ends of the link + for (i = 0; i < connCount; i++) + { + _nvlink_core_set_sublink_pre_hs_settings(conns[i]->end0, flags); + _nvlink_core_set_sublink_pre_hs_settings(conns[i]->end1, flags); + } + + // Put TX sublink on both ends in High Speed + for (i = 0; i < connCount; i++) + { + conns[i]->end0->link_handlers->set_tx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_HS, + flags); + conns[i]->end1->link_handlers->set_tx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_HS, + flags); + } + + // Wait for sublinks to go in High Speed. + for (i = 0; i < connCount; i++) + { + pollStatus = nvlink_core_poll_sublink_state(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_HS, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + conns[i]->end1, + NVLINK_SUBLINK_STATE_RX_HS, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_HS_TIMEOUT); + if (pollStatus != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set sublinks in High Speed mode", + __FUNCTION__)); + + status = pollStatus; + } + + pollStatus = nvlink_core_poll_sublink_state(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_HS, + NVLINK_SUBLINK_SUBSTATE_TX_STABLE, + conns[i]->end0, + NVLINK_SUBLINK_STATE_RX_HS, + NVLINK_SUBLINK_SUBSTATE_RX_STABLE, + NVLINK_TRANSITION_HS_TIMEOUT); + if (pollStatus != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set sublinks in High Speed mode", + __FUNCTION__)); + + status = pollStatus; + } + } + + // Some settings required before moving to ACTIVE + for (i = 0; i < connCount; i++) + { + _nvlink_core_set_link_pre_active_settings(conns[i]->end0, flags); + _nvlink_core_set_link_pre_active_settings(conns[i]->end1, flags); + + // + // Put only end0 in ACTIVE mode. The other end should automatically go to Active. + // If it does not go to ACTIVE then we need to do fault handling. + // + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_HS, + flags); + } + + // Verify link mode HS on the endpoints + for (i = 0; i < connCount; i++) + { + pollStatus = nvlink_core_poll_link_state(conns[i]->end1, + NVLINK_LINKSTATE_HS, + NVLINK_TRANSITION_HS_TIMEOUT); + if (pollStatus == NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Successfully able to train an intranode connection to Active", + __FUNCTION__)); + nvlink_core_print_intranode_conn(conns[i]); + } + else + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to train an intranode connection to Active", + __FUNCTION__)); + + nvlink_core_print_intranode_conn(conns[i]); + status = NVL_ERR_GENERIC; + } + + conns[i]->end0->link_handlers->training_complete(conns[i]->end0); + conns[i]->end1->link_handlers->training_complete(conns[i]->end1); + + conns[i]->end0->link_handlers->set_tx_mode(conns[i]->end0, + NVLINK_SUBLINK_STATE_TX_POST_HS, + flags); + conns[i]->end1->link_handlers->set_tx_mode(conns[i]->end1, + NVLINK_SUBLINK_STATE_TX_POST_HS, + flags); + + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_TRAFFIC_SETUP, + flags); + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_TRAFFIC_SETUP, + flags); + + conns[i]->end0->link_handlers->set_dl_link_mode(conns[i]->end0, + NVLINK_LINKSTATE_ENABLE_PM, + flags); + conns[i]->end1->link_handlers->set_dl_link_mode(conns[i]->end1, + NVLINK_LINKSTATE_ENABLE_PM, + flags); + } + + return status; +} + +/** + * Miscellaneous pre High Speed settings. + * Do all the sublink specific settings before it is trained to HS mode + * + * @param[in] link NVLink Link pointer + * @param[in] flags Flags to track if the step is sync/async + */ +static void +_nvlink_core_set_sublink_pre_hs_settings +( + nvlink_link *link, + NvU32 flags +) +{ + // + // Before training the sublinks to HS, the PROD values must be loaded. + // On Volta/NVSwitch, the PROD values get loaded by UCODE during DLPL Init. + // So, this PRBS setting is not a prerequisite for High Speed transition. + // However, for GP100, these values and several other RX end parameters get + // loaded as part of PRBS enable. If these values are not initialized, then + // the RX end of the link won't transition to High Speed. + // + + // Enable PRBS generator + link->link_handlers->set_tx_mode(link, NVLINK_SUBLINK_STATE_TX_PRBS_EN, flags); +} + +/** + * Miscellaneous pre Active mode settings + * Do all the link specific settings before it is trained to Active mode. + * + * @param[in] link NVLink Link pointer + * @param[in] flags Flags to track if the step is sync/async + */ +static void +_nvlink_core_set_link_pre_active_settings +( + nvlink_link *link, + NvU32 flags +) +{ + // Some settings required before moving to ACTIVE + link->link_handlers->set_dl_link_mode(link, NVLINK_LINKSTATE_PRE_HS, flags); +} + +/** + * Miscellaneous post Active mode settings + * Do all the link specific settings once it is trained to Active mode. + * + * @param[in] link NVLink Link pointer + * @param[in] flags Flags to track if the step is sync/async + */ +static void +_nvlink_core_set_link_post_active_settings +( + nvlink_link *link, + NvU32 flags +) +{ + link->link_handlers->training_complete(link); + + link->link_handlers->set_tx_mode(link, NVLINK_SUBLINK_STATE_TX_POST_HS, flags); + + link->link_handlers->set_dl_link_mode(link, NVLINK_LINKSTATE_TRAFFIC_SETUP, flags); + + link->link_handlers->set_dl_link_mode(link, NVLINK_LINKSTATE_ENABLE_PM, flags); +} diff --git a/src/common/nvlink/kernel/nvlink/interface/nvlink_ioctl_entry.c b/src/common/nvlink/kernel/nvlink/interface/nvlink_ioctl_entry.c new file mode 100644 index 000000000..19f76b634 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/interface/nvlink_ioctl_entry.c @@ -0,0 +1,3557 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvVer.h" +#include "nvlink_os.h" +#include "nvlink_lib_ctrl.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" +#include "nvlink_lock.h" + +#define NVLINK_IOC_GET_BUF(ctrlParams, type) (ctrlParams)->size >= sizeof(type) ? (type *) (ctrlParams)->buf : NULL + +/** + * List of static functions + */ +static NvlStatus nvlink_lib_ioctl_ctrl_helper(nvlink_ioctrl_params *); +static NvlStatus nvlink_lib_ctrl_prologue(nvlink_ioctrl_params *); +static NvlStatus nvlink_lib_ctrl_check_version(nvlink_check_version *); +static NvlStatus nvlink_lib_ctrl_set_node_id(nvlink_set_node_id *); +static NvlStatus nvlink_lib_ctrl_all_links(nvlink_ioctrl_params *); +static NvlStatus nvlink_lib_ctrl_device_link_init_status(nvlink_device_link_init_status *); +static NvlStatus nvlink_lib_ctrl_device_write_discovery_tokens(nvlink_device_write_discovery_tokens *); +static NvlStatus nvlink_lib_ctrl_device_read_discovery_tokens(nvlink_device_read_discovery_tokens *); +static NvlStatus nvlink_lib_ctrl_device_read_sids(nvlink_device_read_sids *); +static NvlStatus nvlink_lib_ctrl_discover_intranode_conns(nvlink_discover_intranode_conns *); +static NvlStatus nvlink_lib_ctrl_device_get_intranode_conns(nvlink_device_get_intranode_conns *); +static NvlStatus nvlink_lib_ctrl_add_internode_conn(nvlink_add_internode_conn *); +static NvlStatus nvlink_lib_ctrl_remove_internode_conn(nvlink_remove_internode_conn *); +static NvlStatus nvlink_lib_ctrl_train_intranode_conn(nvlink_train_intranode_conn *); +static NvlStatus nvlink_lib_ctrl_train_intranode_conns_parallel(nvlink_train_intranode_conns_parallel *); +static NvlStatus nvlink_lib_ctrl_train_internode_conn_link(nvlink_train_internode_conn_link *); +static NvlStatus nvlink_lib_ctrl_train_internode_conn_sublink(nvlink_train_internode_conn_sublink *); +static NvlStatus nvlink_lib_ctrl_train_internode_links_initoptimize(nvlink_train_internode_links_initoptimize *); +static NvlStatus nvlink_lib_ctrl_train_internode_links_post_initoptimize(nvlink_train_internode_links_post_initoptimize *); +static NvlStatus nvlink_lib_ctrl_train_internode_conns_parallel(nvlink_train_internode_conns_parallel *); +static NvlStatus nvlink_lib_ctrl_get_devices_info(nvlink_get_devices_info *); +static NvlStatus nvlink_lib_ctrl_acquire_capability(nvlink_ioctrl_params *, nvlink_acquire_capability *); +static NvlStatus nvlink_lib_ctrl_get_link_state(nvlink_get_link_state *); +static NvlStatus nvlink_lib_ctrl_get_device_link_states(nvlink_get_device_link_states *); + +/** + * Entry point for IOCTLs into the NVLink core library + * + * @param[in] ctrlParams IOCTL params + * + * return NvlStatus + */ +NvlStatus +nvlink_lib_ioctl_ctrl +( + nvlink_ioctrl_params *ctrlParams +) +{ + NvlStatus status = NVL_SUCCESS; + + status = nvlink_lib_ioctl_ctrl_helper(ctrlParams); + + return status; +} + +/** + * Helper function for routing the IOCTL to the respective handlers + * + * Note: The handlers acquire the required core library locks before + * calling the core library code + * + * @param[in] ctrlParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ioctl_ctrl_helper +( + nvlink_ioctrl_params *ctrlParams +) +{ + NvlStatus status; + + status = nvlink_lib_ctrl_prologue(ctrlParams); + if (status != NVL_SUCCESS) + { + return status; + } + + switch (ctrlParams->cmd) + { + case CTRL_NVLINK_CHECK_VERSION: + { + nvlink_check_version *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_check_version); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_check_version(iocReq); + break; + } + + case CTRL_NVLINK_SET_NODE_ID: + { + nvlink_set_node_id *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_set_node_id); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_set_node_id(iocReq); + break; + } + + // + // The following commands operate on all the links registered in the + // core library. Hence, clubbing them into a group so, we don't have + // to duplicate the lock acquire/release for each of them + // + case CTRL_NVLINK_INITPHASE1: + case CTRL_NVLINK_RX_INIT_TERM: + case CTRL_NVLINK_SET_RX_DETECT: + case CTRL_NVLINK_GET_RX_DETECT: + case CTRL_NVLINK_SET_TX_COMMON_MODE: + case CTRL_NVLINK_CALIBRATE: + case CTRL_NVLINK_ENABLE_DATA: + case CTRL_NVLINK_LINK_INIT_ASYNC: + case CTRL_NVLINK_INITNEGOTIATE: + case CTRL_NVLINK_INITPHASE5: + { + nvlink_lib_ctrl_all_links(ctrlParams); + break; + } + + case CTRL_NVLINK_DEVICE_LINK_INIT_STATUS: + { + nvlink_device_link_init_status *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_device_link_init_status); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_device_link_init_status(iocReq); + break; + } + + case CTRL_NVLINK_DEVICE_WRITE_DISCOVERY_TOKENS: + { + nvlink_device_write_discovery_tokens *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_device_write_discovery_tokens); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_device_write_discovery_tokens(iocReq); + break; + } + + case CTRL_NVLINK_DEVICE_READ_DISCOVERY_TOKENS: + { + nvlink_device_read_discovery_tokens *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_device_read_discovery_tokens); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_device_read_discovery_tokens(iocReq); + break; + } + + case CTRL_NVLINK_DEVICE_READ_SIDS: + { + nvlink_device_read_sids *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_device_read_sids); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_device_read_sids(iocReq); + break; + } + + case CTRL_NVLINK_DISCOVER_INTRANODE_CONNS: + { + nvlink_discover_intranode_conns *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_discover_intranode_conns); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_discover_intranode_conns(iocReq); + break; + } + + case CTRL_NVLINK_DEVICE_GET_INTRANODE_CONNS: + { + nvlink_device_get_intranode_conns *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_device_get_intranode_conns); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_device_get_intranode_conns(iocReq); + break; + } + + case CTRL_NVLINK_ADD_INTERNODE_CONN: + { + nvlink_add_internode_conn *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_add_internode_conn); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_add_internode_conn(iocReq); + break; + } + + case CTRL_NVLINK_REMOVE_INTERNODE_CONN: + { + nvlink_remove_internode_conn *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_remove_internode_conn); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_remove_internode_conn(iocReq); + break; + } + + case CTRL_NVLINK_TRAIN_INTRANODE_CONN: + { + nvlink_train_intranode_conn *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_train_intranode_conn); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_train_intranode_conn(iocReq); + break; + } + + case CTRL_NVLINK_TRAIN_INTRANODE_CONNS_PARALLEL: + { + nvlink_train_intranode_conns_parallel *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_train_intranode_conns_parallel); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_train_intranode_conns_parallel(iocReq); + break; + } + + case CTRL_NVLINK_TRAIN_INTERNODE_CONN_LINK: + { + nvlink_train_internode_conn_link *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_train_internode_conn_link); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_train_internode_conn_link(iocReq); + break; + } + + case CTRL_NVLINK_TRAIN_INTERNODE_CONN_SUBLINK: + { + nvlink_train_internode_conn_sublink *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_train_internode_conn_sublink); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_train_internode_conn_sublink(iocReq); + break; + } + + case CTRL_NVLINK_TRAIN_INTERNODE_LINKS_INITOPTIMIZE: + { + nvlink_train_internode_links_initoptimize *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_train_internode_links_initoptimize); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + iocReq->status = nvlink_lib_ctrl_train_internode_links_initoptimize(iocReq); + break; + } + + case CTRL_NVLINK_TRAIN_INTERNODE_LINKS_POST_INITOPTIMIZE: + { + nvlink_train_internode_links_post_initoptimize *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_train_internode_links_post_initoptimize); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + iocReq->status = nvlink_lib_ctrl_train_internode_links_post_initoptimize(iocReq); + break; + } + + case CTRL_NVLINK_TRAIN_INTERNODE_CONNS_PARALLEL: + { + nvlink_train_internode_conns_parallel *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_train_internode_conns_parallel); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_train_internode_conns_parallel(iocReq); + break; + } + + case CTRL_NVLINK_GET_DEVICES_INFO: + { + nvlink_get_devices_info *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_get_devices_info); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_get_devices_info(iocReq); + break; + } + + case CTRL_NVLINK_ACQUIRE_CAPABILITY: + { + nvlink_acquire_capability *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_acquire_capability); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_acquire_capability(ctrlParams, iocReq); + break; + } + + case CTRL_NVLINK_GET_LINK_STATE: + { + nvlink_get_link_state *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_get_link_state); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_get_link_state(iocReq); + break; + } + case CTRL_NVLINK_GET_DEVICE_LINK_STATES: + { + nvlink_get_device_link_states *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_get_device_link_states); + if (!iocReq) + { + return NVL_BAD_ARGS; + } + + iocReq->status = nvlink_lib_ctrl_get_device_link_states(iocReq); + break; + } + + case CTRL_NVLINK_RESERVED_0: + case CTRL_NVLINK_RESERVED_1: + case CTRL_NVLINK_RESERVED_2: + case CTRL_NVLINK_RESERVED_3: + case CTRL_NVLINK_RESERVED_4: + case CTRL_NVLINK_RESERVED_5: + case CTRL_NVLINK_RESERVED_6: + case CTRL_NVLINK_RESERVED_7: + case CTRL_NVLINK_RESERVED_8: + case CTRL_NVLINK_RESERVED_9: + case CTRL_NVLINK_RESERVED_10: + case CTRL_NVLINK_RESERVED_11: + { + return NVL_SUCCESS; + break; + } + + default: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: unknown ioctl command 0x%08X specified.\n", + __FUNCTION__, ctrlParams->cmd)); + return NVL_BAD_ARGS; + } + } + + // + // the IOCTL call is success. However, status of the individual IOCTL is + // indicated in their corresponding embedded status field. + // + return NVL_SUCCESS; +} + +/** + * Preliminary check before passing the IOCTL to the respective handler + * + * @param[in] ctrlParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_prologue +( + nvlink_ioctrl_params *ctrlParams +) +{ + NvlStatus status = NVL_SUCCESS; + + if (ctrlParams == NULL) + { + return NVL_BAD_ARGS; + } + + switch (ctrlParams->cmd) + { + // + // These control calls are aren't privileged. So, skip the capability + // check. + // + case CTRL_NVLINK_CHECK_VERSION: + case CTRL_NVLINK_ACQUIRE_CAPABILITY: + { + break; + } + default: + { + if (!nvlink_is_admin() && + !nvlink_is_fabric_manager(ctrlParams->osPrivate)) + { + status = NVL_ERR_INSUFFICIENT_PERMISSIONS; + } + break; + } + } + + return status; +} + +/** + * Check if the user and kernel versions mismatch + * + * @param[in] versionParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_check_version +( + nvlink_check_version *versionParams +) +{ + const NvU32 NV_VERSION_LENGTH = nvlink_strlen(NV_VERSION_STRING); + + if (NV_VERSION_LENGTH > NVLINK_VERSION_STRING_LENGTH) + { + return NVL_NO_MEM; + } + + versionParams->user.version[NVLINK_VERSION_STRING_LENGTH - 1] = '\0'; + + nvlink_memset(versionParams->kernel.version, 0x0, sizeof(versionParams->kernel.version)); + nvlink_strcpy(versionParams->kernel.version, NV_VERSION_STRING); + + versionParams->kernel.version[NVLINK_VERSION_STRING_LENGTH - 1] = '\0'; + + if (nvlink_strcmp(versionParams->user.version, versionParams->kernel.version)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: version mismatch, kernel version %s user version %s\n", + __FUNCTION__, + versionParams->kernel.version, versionParams->user.version)); + + return NVL_ERR_NOT_SUPPORTED; + } + + return NVL_SUCCESS; +} + +/** + * Assign node ID to all the registered devices + * + * @param[in] idParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_set_node_id +( + nvlink_set_node_id *idParams +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_device *dev = NULL; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // Top-level lock is now acquired + + // Return success, if an attempt is made to re-assign the same node-id. + if (nvlinkLibCtx.nodeId == idParams->nodeId) + { + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return NVL_SUCCESS; + } + + if (nvlinkLibCtx.nodeId != NV_U16_MAX) + { + // Don't allow to change fabric node id once it is set. + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Can't change fabric node id once it is set. " + "Current node id is %u\n", + __FUNCTION__, nvlinkLibCtx.nodeId)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return NVL_ERR_INVALID_STATE; + } + + // Change already registered device's fabric node id. + FOR_EACH_DEVICE_REGISTERED(dev, nvlinkLibCtx.nv_devicelist_head, node) + { + dev->nodeId = idParams->nodeId; + } + + // Store fabric node id for any future device registration. + nvlinkLibCtx.nodeId = idParams->nodeId; + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return NVL_SUCCESS; +} + +/** + * Kick off the desired operation on registered links of all devices + * + * Note: This operation will acquire the per-link locks of all the + * registered links of all devices in the core library + * + * @param[in] ctrlParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_all_links +( + nvlink_ioctrl_params *ctrlParams +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_device *dev = NULL; + nvlink_link *link = NULL; + NvU32 numLinks = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + FOR_EACH_DEVICE_REGISTERED(dev, nvlinkLibCtx.nv_devicelist_head, node) + { + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + links[numLinks] = link; + numLinks++; + } + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + nvlink_assert((links != NULL) && (numLinks > 0)); + + // Kick off the desired operation on all the registered links + switch (ctrlParams->cmd) + { + case CTRL_NVLINK_INITPHASE1: + { + nvlink_initphase1 *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_initphase1); + if (!iocReq) + { + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_all_links_end; + } + + // default initialize status to NVL_SUCCESS + iocReq->status = NVL_SUCCESS; + + iocReq->status = nvlink_core_initphase1(links, numLinks, + NVLINK_STATE_CHANGE_SYNC); + break; + } + + case CTRL_NVLINK_RX_INIT_TERM: + { + nvlink_rx_init_term *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_rx_init_term); + if (!iocReq) + { + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_all_links_end; + } + + // default initialize status to NVL_SUCCESS + iocReq->status = NVL_SUCCESS; + + iocReq->status = nvlink_core_rx_init_term(links, numLinks, + NVLINK_STATE_CHANGE_ASYNC); + break; + } + + case CTRL_NVLINK_SET_RX_DETECT: + { + nvlink_set_rx_detect *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_set_rx_detect); + if (!iocReq) + { + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_all_links_end; + } + + // default initialize status to NVL_SUCCESS + iocReq->status = NVL_SUCCESS; + + iocReq->status = nvlink_core_set_rx_detect(links, numLinks, + NVLINK_STATE_CHANGE_ASYNC); + break; + } + + case CTRL_NVLINK_GET_RX_DETECT: + { + nvlink_get_rx_detect *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_get_rx_detect); + if (!iocReq) + { + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_all_links_end; + } + + // default initialize status to NVL_SUCCESS + iocReq->status = NVL_SUCCESS; + + iocReq->status = nvlink_core_get_rx_detect(links, numLinks, + NVLINK_STATE_CHANGE_ASYNC); + break; + } + + case CTRL_NVLINK_SET_TX_COMMON_MODE: + { + nvlink_set_tx_common_mode *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_set_tx_common_mode); + if (!iocReq) + { + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_all_links_end; + } + + // default initialize status to NVL_SUCCESS + iocReq->status = NVL_SUCCESS; + + if (iocReq->commMode) + { + iocReq->status = nvlink_core_enable_common_mode(links, numLinks, + NVLINK_STATE_CHANGE_SYNC); + } + else if(links[0]->version <= NVLINK_DEVICE_VERSION_30) + { + iocReq->status = nvlink_core_disable_common_mode(links, numLinks, + NVLINK_STATE_CHANGE_SYNC); + } + + // + // If the current nvlink device does not support disabling common mode + // skip using the command and return success for FM to continue on. + // + break; + } + + case CTRL_NVLINK_CALIBRATE: + { + nvlink_calibrate *iocReq; + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_calibrate); + + if (!iocReq) + { + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_all_links_end; + } + + // default initialize status to NVL_SUCCESS + iocReq->status = NVL_SUCCESS; + + iocReq->status = nvlink_core_calibrate_links(links, numLinks, + NVLINK_STATE_CHANGE_SYNC); + break; + } + + case CTRL_NVLINK_ENABLE_DATA: + { + nvlink_enable_data *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_enable_data); + if (!iocReq) + { + goto nvlink_lib_ctrl_all_links_end; + } + + // default initialize status to NVL_SUCCESS + iocReq->status = NVL_SUCCESS; + + iocReq->status = nvlink_core_enable_data(links, numLinks, + NVLINK_STATE_CHANGE_SYNC); + break; + } + + case CTRL_NVLINK_LINK_INIT_ASYNC: + { + nvlink_link_init_async *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_link_init_async); + if (!iocReq) + { + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_all_links_end; + } + + // default initialize status to NVL_SUCCESS + iocReq->status = NVL_SUCCESS; + + iocReq->status = nvlink_core_link_init_async(links, numLinks); + break; + } + + case CTRL_NVLINK_INITNEGOTIATE: + { + nvlink_initnegotiate *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_initnegotiate); + if (!iocReq) + { + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_all_links_end; + } + + // default initialize status to NVL_SUCCESS + iocReq->status = NVL_SUCCESS; + + iocReq->status = nvlink_core_initnegotiate(links, numLinks, + NVLINK_STATE_CHANGE_ASYNC); + break; + } + case CTRL_NVLINK_INITPHASE5: + { + nvlink_initphase5 *iocReq; + + iocReq = NVLINK_IOC_GET_BUF(ctrlParams, nvlink_initphase5); + if (!iocReq) + { + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_all_links_end; + } + + // default initialize status to NVL_SUCCESS + iocReq->status = NVL_SUCCESS; + + break; + } + + default: + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: unknown ioctl command specified.\n", + __FUNCTION__)); + + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_all_links_end; + } + + } + +nvlink_lib_ctrl_all_links_end: + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + if (links != NULL) + { + nvlink_free((void *)links); + } + + return status; +} + +/** + * Get the link init status on all queried links + * + * @param[in] statusParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_device_link_init_status +( + nvlink_device_link_init_status *statusParams +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_device *dev = NULL; + nvlink_link *link = NULL; + NvU32 numLinks = 0; + NvU32 i = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + // look-up user requested nvlink device object + nvlink_core_get_device_by_devinfo(&statusParams->devInfo, &dev); + if (dev == NULL) + { + // + // Couldn't find the device ptr in the core library. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + links[numLinks] = link; + numLinks++; + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + // Poll for links to reach SAFE/SWCFG and capture the status + for (i = 0; i < numLinks; i++) + { + // status index should be within NVLINK_MAX_DEVICE_CONN + if (i >= NVLINK_MAX_DEVICE_CONN) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: number of links for the device >= NVLINK_MAX_DEVICE_CONN", + __FUNCTION__)); + + nvlink_assert(0); + + nvlink_lib_link_locks_release(links, numLinks); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + status = nvlink_core_wait_for_link_init(links[i]); + + // indicate link init state to user + statusParams->linkStatus[i].linkIndex = links[i]->linkNumber; + + if (status == NVL_SUCCESS) + { + statusParams->linkStatus[i].initStatus = NV_TRUE; + } + else + { + statusParams->linkStatus[i].initStatus = NV_FALSE; + } + } + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + if (links != NULL) + { + nvlink_free((void *)links); + } + return NVL_SUCCESS; +} + +/** + * Send discovery tokens on all the links for a given device + * + * @param[in] writeParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_device_write_discovery_tokens +( + nvlink_device_write_discovery_tokens *writeParams +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_device *dev = NULL; + nvlink_link *link = NULL; + NvU32 numLinks = 0; + NvU32 i = 0; + NvU32 numTokens = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + // Initialize number of tokens written to 0 + writeParams->numTokens = 0; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + // look-up user requested nvlink device object + nvlink_core_get_device_by_devinfo(&writeParams->devInfo, &dev); + if (dev == NULL) + { + // + // Couldn't find the device ptr in the core library. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + nvlink_intranode_conn *conn = NULL; + + nvlink_core_get_intranode_conn(link, &conn); + if (conn != NULL) + { + // skip token write if we already have a connection for the link + continue; + } + + if (numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + links[numLinks] = link; + numLinks++; + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + for (i = 0; i < numLinks; i++) + { + NvU64 writeToken = 0; + + writeToken = nvlink_core_get_link_discovery_token(links[i]); + status = nvlink_core_write_link_discovery_token(links[i], writeToken); + + if (status == NVL_SUCCESS) + { + // + // wrote a token. copy the token and link information to user + // which can be used for comparing tokens across nodes. + // + + // total number of tokens should be within NVLINK_MAX_DEVICE_CONN + if (numTokens >= NVLINK_MAX_DEVICE_CONN) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Number of tokens >= NVLINK_MAX_DEVICE_CONN\n", + __FUNCTION__)); + + nvlink_assert(0); + + nvlink_lib_link_locks_release(links, numLinks); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + writeParams->tokenInfo[numTokens].linkIndex = links[i]->linkNumber; + writeParams->tokenInfo[numTokens].tokenValue = writeToken; + numTokens++; + } + } + + // update total number of tokens written + writeParams->numTokens = numTokens; + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + if (links != NULL) + { + nvlink_free((void *)links); + } + return NVL_SUCCESS; +} + +/** + * Read discovery tokens on all the links for a given device + * + * @param[in] readParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_device_read_discovery_tokens +( + nvlink_device_read_discovery_tokens *readParams +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_device *dev = NULL; + nvlink_link *link = NULL; + NvU32 numLinks = 0; + NvU32 i = 0; + NvU32 numTokens = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + // Initialize number of tokens read to 0 + readParams->numTokens = 0; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + // look-up user requested nvlink device object + nvlink_core_get_device_by_devinfo(&readParams->devInfo, &dev); + if (dev == NULL) + { + // + // Couldn't find the device ptr in the core library. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + nvlink_intranode_conn *conn = NULL; + + nvlink_core_get_intranode_conn(link, &conn); + if (conn != NULL) + { + // skip token write if we already have a connection for the link + continue; + } + + if (numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + links[numLinks] = link; + numLinks++; + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + for (i = 0; i < numLinks; i++) + { + NvU64 readToken = 0; + + // query discovery token from the link + readToken = nvlink_core_read_link_discovery_token(links[i]); + + // take non-zero tokens. token will be zero if read_discovery failed as well. + if (readToken) + { + // + // received a valid token. copy the token and link information to user + // which can be used for comparing tokens across nodes. + // + + // total number of tokens should be within NVLINK_MAX_DEVICE_CONN + if (numTokens >= NVLINK_MAX_DEVICE_CONN) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Number of tokens >= NVLINK_MAX_DEVICE_CONN\n", + __FUNCTION__)); + + nvlink_assert(0); + + nvlink_lib_link_locks_release(links, numLinks); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + readParams->tokenInfo[numTokens].linkIndex = links[i]->linkNumber; + readParams->tokenInfo[numTokens].tokenValue = readToken; + numTokens++; + } + } + + // update total number of tokens read + readParams->numTokens = numTokens; + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + if (links != NULL) + { + nvlink_free((void *)links); + } + return NVL_SUCCESS; +} + +/** + * Read the SIDs for the the local and remote device + * + * @param[in] readParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_device_read_sids +( + nvlink_device_read_sids *readParams +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_device *dev = NULL; + nvlink_link *link = NULL; + NvU32 numLinks = 0; + NvU32 i = 0; + NvU32 numEntries = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + // Initialize number of SIDs read to 0 + readParams->numEntries = 0; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + // look-up user requested nvlink device object + nvlink_core_get_device_by_devinfo(&readParams->devInfo, &dev); + if (dev == NULL) + { + // + // Couldn't find the device ptr in the core library. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + nvlink_intranode_conn *conn = NULL; + + nvlink_core_get_intranode_conn(link, &conn); + if (conn != NULL) + { + // skip token write if we already have a connection for the link + continue; + } + + if (numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + links[numLinks] = link; + numLinks++; + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + for (i = 0; i < numLinks; i++) + { + // Fill-up the local/remote link numbers and SIDs + readParams->sidInfo[numEntries].localLinkSid = links[i]->localSid; + readParams->sidInfo[numEntries].remoteLinkSid = links[i]->remoteSid; + readParams->sidInfo[numEntries].localLinkNum = links[i]->linkNumber; + readParams->sidInfo[numEntries].remoteLinkNum = links[i]->remoteLinkId; + numEntries++; + } + + // update total number of entries read + readParams->numEntries = numEntries; + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + if (links != NULL) + { + nvlink_free((void *)links); + } + return NVL_SUCCESS; +} + +/** + * Discover all the intranode connections from the core library + * + * @param[in] connParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_discover_intranode_conns +( + nvlink_discover_intranode_conns *connParams +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_device *dev = NULL; + nvlink_link *link = NULL; + NvU32 numLinks = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + FOR_EACH_DEVICE_REGISTERED(dev, nvlinkLibCtx.nv_devicelist_head, node) + { + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + links[numLinks] = link; + numLinks++; + } + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Note: We will still need to hold the top-level lock, because we might have + // to add connections to the intranode connections list if any case new + // intranode connection is discovered + // + + FOR_EACH_DEVICE_REGISTERED(dev, nvlinkLibCtx.nv_devicelist_head, node) + { + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + NvU64 writeToken = 0; + nvlink_intranode_conn *conn = NULL; + + nvlink_core_get_intranode_conn(link, &conn); + if (conn != NULL) + { + // skip token write if we already have a connection for the link + continue; + } + + if (!link->bRxDetected) + { + // If receiver detect has failed, then there is no connection + continue; + } + + writeToken = nvlink_core_get_link_discovery_token(link); + + if ((link->version < NVLINK_DEVICE_VERSION_30) || + ((link->localSid == 0) || (link->remoteSid == 0))) + { + nvlink_core_write_link_discovery_token(link, writeToken); + + // wrote a token. read back tokens from all links and create connection + nvlink_core_correlate_conn_by_token(link, writeToken, NV_FALSE); + } + else + { + // From 3.0 we rely on Sid values. So send skiptoken as true. + nvlink_core_correlate_conn_by_token(link, writeToken, NV_TRUE); + } + } + } + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + if (links != NULL) + { + nvlink_free((void *)links); + } + return NVL_SUCCESS; +} + +/** + * Get the intranode connections from the core library + * + * @param[in] getParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_device_get_intranode_conns +( + nvlink_device_get_intranode_conns *getParams +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_device *dev = NULL; + NvU32 numConns = 0; + nvlink_intranode_conn *conn = NULL; + + // Initialize number of connections to 0 + getParams->numConnections = 0; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + // look-up user requested nvlink device object + nvlink_core_get_device_by_devinfo(&getParams->devInfo, &dev); + if (dev == NULL) + { + // + // Couldn't find the device ptr in the core library. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + FOR_EACH_CONNECTION(conn, nvlinkLibCtx.nv_intraconn_head, node) + { + // + // copy connection information if source or destination device of + // this connection belong to the nvlink device specified by user + // + if ((conn->end0->dev == dev) || (conn->end1->dev == dev)) + { + // total number of connections should be within NVLINK_MAX_DEVICE_CONN + if (numConns >= NVLINK_MAX_DEVICE_CONN) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numConns >= NVLINK_MAX_DEVICE_CONN\n", + __FUNCTION__)); + + nvlink_assert(0); + + nvlink_lib_top_lock_release(); + return NVL_ERR_INVALID_STATE; + } + + // copy source endpoint information + nvlink_core_copy_endpoint_info(conn->end0, &getParams->conn[numConns].srcEndPoint); + + // copy destination endpoint information + nvlink_core_copy_endpoint_info(conn->end1, &getParams->conn[numConns].dstEndPoint); + + numConns++; + } + } + + getParams->numConnections = numConns; + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return NVL_SUCCESS; +} + +/** + * Add a discovered internode connection + * + * @param[in] addParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_add_internode_conn +( + nvlink_add_internode_conn *addParams +) +{ + nvlink_link *localLink = NULL; + nvlink_intranode_conn *intraConn = NULL; + NvlStatus status = NVL_SUCCESS; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + // make sure that this connection is multi-node + if (addParams->localEndPoint.nodeId == addParams->remoteEndPoint.nodeId) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Internode connection add with same node id for local and remote endpoint\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + // validate the remote endpoint device type information + if (!nvlink_core_is_supported_device_type(addParams->remoteEndPoint.devType)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Internode connection add with invalid remote device type\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + // + // look-up the nvlink link objects. Look-up will fail if there is a + // fabric node id mismatch. So an explicit check against self + // node id is not required. + // + nvlink_core_get_link_by_endpoint(&addParams->localEndPoint, &localLink); + if (localLink == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Internode connection add with no matching local endpoint\n", + __FUNCTION__)); + + // + // Couldn't find the endpoint registered in the core library. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + // can't add internode connection if we have an intranode connection + nvlink_core_get_intranode_conn(localLink, &intraConn); + if (intraConn != NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Found an intranode connection while adding internode connection\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + // all the sanity check passed, add this internode connection in our context + status = nvlink_core_add_internode_conn(localLink, &addParams->remoteEndPoint); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return status; +} + +/** + * Remove an internode connection from the list + * + * @param[in] removeParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_remove_internode_conn +( + nvlink_remove_internode_conn *removeParams +) +{ + nvlink_link *localLink = NULL; + NvlStatus status = NVL_SUCCESS; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + // + // look-up the nvlink link objects. Look-up will fail if there is a + // fabric node id mismatch. So an explicit check against self + // node id is not required. + // + nvlink_core_get_link_by_endpoint(&removeParams->localEndPoint, &localLink); + if (localLink == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Internode connection remove with no matching local endpoint\n", + __FUNCTION__)); + + // + // Couldn't find the endpoint registered in the core library. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + // Acquire the per-link lock + status = nvlink_lib_link_locks_acquire(&localLink, 1); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link lock\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return status; + } + + // all the sanity check passed, remove this internode connection from our context + nvlink_core_remove_internode_conn(localLink); + + // Release the per-link lock + nvlink_lib_link_locks_release(&localLink, 1); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return NVL_SUCCESS; +} + +/** + * Train the intranode connection to the desired target state + * + * @param[in] trainParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_train_intranode_conn +( + nvlink_train_intranode_conn *trainParams +) +{ + nvlink_link *srcLink = NULL; + nvlink_link *dstLink = NULL; + nvlink_link *initLinks[2] = {0}; + nvlink_intranode_conn *conn = NULL; + NvlStatus status = NVL_SUCCESS; + NvU32 count; + + // make sure that this call is for single node systems + if (trainParams->srcEndPoint.nodeId != trainParams->dstEndPoint.nodeId) + { + return NVL_BAD_ARGS; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + // + // look-up the nvlink link objects. Look-up will fail if there is a + // fabric node id mismatch. So an explicit check against self + // node id is not required. + // + nvlink_core_get_link_by_endpoint(&trainParams->srcEndPoint, &srcLink); + nvlink_core_get_link_by_endpoint(&trainParams->dstEndPoint, &dstLink); + + // we can't train if both ends are not found + if ((srcLink == NULL) || (dstLink == NULL)) + { + // + // Couldn't find the endpoints registered in the core library. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + // look-up the nvlink connection object by source link + nvlink_core_get_intranode_conn(srcLink, &conn); + if (conn == NULL) + { + // + // Couldn't find an associated connection for the 2 endpoints. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + // + // we found the connection by the source link. Make sure that dest link is + // indeed, the user specified one as well + // + if ((conn->end0 != dstLink) && (conn->end1 != dstLink)) + { + // + // The dest endpoint is not the remote end for the src endpoint. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + initLinks[0] = conn->end0; + initLinks[1] = conn->end1; + + // If loopback then only pass in 1 link + if (conn->end0 != conn->end1) + { + count = 2; + } + else + { + count = 1; + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(initLinks, 2); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + // the connection looks sane, initiate the training + switch (trainParams->trainTo) + { + case nvlink_train_conn_off_to_swcfg: + { + { + // ALT training for NVLink3.0+ + nvlink_core_init_links_from_off_to_swcfg( + initLinks, count, NVLINK_STATE_CHANGE_SYNC); + } + break; + } + case nvlink_train_conn_swcfg_to_active: + { + if (srcLink->version >= NVLINK_DEVICE_VERSION_30) + + { + // ALT training for NVLink3.0+ + status = nvlink_core_train_intranode_conns_from_swcfg_to_active_ALT( + &conn, 1, NVLINK_STATE_CHANGE_SYNC); + } + else + { + // Legacy training for pre-NVLink3.0 + status = nvlink_core_train_intranode_conns_from_swcfg_to_active_legacy( + &conn, 1, NVLINK_STATE_CHANGE_SYNC); + } + break; + } + case nvlink_train_conn_active_to_swcfg: + { + status = nvlink_core_powerdown_intranode_conns_from_active_to_swcfg( + &conn, 1, NVLINK_STATE_CHANGE_SYNC); + break; + } + case nvlink_train_conn_to_off: + case nvlink_train_conn_swcfg_to_off: + { + status = nvlink_core_powerdown_intranode_conns_from_active_to_off( + &conn, 1, NVLINK_STATE_CHANGE_SYNC); + if (status == NVL_SUCCESS) + { + nvlink_core_reset_intranode_conns(&conn, 1, NVLINK_STATE_CHANGE_SYNC); + } + break; + } + default: + { + status = NVL_BAD_ARGS; + break; + } + } + + // + // always get the latest link state values so that + // user has additional information other than just the return value. + // + nvlink_core_get_endpoint_state(conn->end0, &trainParams->srcEndState); + nvlink_core_get_endpoint_state(conn->end1, &trainParams->dstEndState); + + // Release the per-link locks + nvlink_lib_link_locks_release(initLinks, 2); + + return status; +} + +/** + * Train the intranode connections in parallel to the desired target state + * + * @param[in] trainParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_train_intranode_conns_parallel +( + nvlink_train_intranode_conns_parallel *trainParams +) +{ + nvlink_link *srcLink = NULL; + nvlink_link *dstLink = NULL; + nvlink_link **trainLinks = NULL; + nvlink_link **initLinks = NULL; + nvlink_intranode_conn **conns = NULL; + NvU32 numConns = 0; + NvlStatus status = NVL_SUCCESS; + NvU32 i; + NvU32 count = 0; + + // + // sanity check the input parms + // make sure that this call is for single node systems + // + numConns = trainParams->endPointPairsCount; + for (i = 0; i < numConns; i++) + { + if (trainParams->endPointPairs[i].src.nodeId != + trainParams->endPointPairs[i].dst.nodeId) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Node index 0x%x with mis-match ids (src:0x%x dst:0x%x).\n", + __FUNCTION__ , i, + trainParams->endPointPairs[i].src.nodeId, + trainParams->endPointPairs[i].dst.nodeId)); + + return NVL_BAD_ARGS; + } + if ((trainParams->endPointPairs[i].src.pciInfo.bus == trainParams->endPointPairs[i].dst.pciInfo.bus) && + (trainParams->endPointPairs[i].src.pciInfo.device == trainParams->endPointPairs[i].dst.pciInfo.device) && + (trainParams->endPointPairs[i].src.pciInfo.function == trainParams->endPointPairs[i].dst.pciInfo.function) && + (trainParams->endPointPairs[i].src.linkIndex == trainParams->endPointPairs[i].dst.linkIndex)) + { + count++; + } + else + { + count = count + 2; + } + } + + // Allocate space for the connection list + conns = (nvlink_intranode_conn **)nvlink_malloc( + sizeof(nvlink_intranode_conn *) * numConns); + if (conns == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate space for connections list\n", + __FUNCTION__)); + + status = NVL_ERR_GENERIC; + goto nvlink_lib_ctrl_train_intranode_conns_parallel_end; + } + + // Allocate space for the links list for link initialization + initLinks = (nvlink_link **)nvlink_malloc(sizeof(nvlink_link *) * count); + if (initLinks == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate space for links list for link initialization\n", + __FUNCTION__)); + + status = NVL_ERR_GENERIC; + goto nvlink_lib_ctrl_train_intranode_conns_parallel_end; + } + + // Allocate space for the links list for link training + trainLinks = (nvlink_link **)nvlink_malloc(sizeof(nvlink_link *) * numConns); + if (trainLinks == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate space for links list for link training\n", + __FUNCTION__)); + + status = NVL_ERR_GENERIC; + goto nvlink_lib_ctrl_train_intranode_conns_parallel_end; + } + + nvlink_memset(conns, 0, sizeof(nvlink_intranode_conn *) * numConns); + nvlink_memset(initLinks, 0, sizeof(nvlink_link *) * count); + nvlink_memset(trainLinks, 0, sizeof(nvlink_link *) * numConns); + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + goto nvlink_lib_ctrl_train_intranode_conns_parallel_end; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device and + // link lists and connections list + // + count = 0; + // Get all the connections associated with the list of links + for (i = 0; i < numConns; i++) + { + // + // look-up the nvlink link objects. Look-up will fail if there is a + // fabric node id mismatch. So an explicit check against self + // node id is not required. + // + srcLink = NULL; + dstLink = NULL; + + nvlink_core_get_link_by_endpoint(&trainParams->endPointPairs[i].src, &srcLink); + nvlink_core_get_link_by_endpoint(&trainParams->endPointPairs[i].dst, &dstLink); + + // we can't train if both ends of a pair not found + if ((srcLink == NULL) || (dstLink == NULL)) + { + // + // Couldn't find the endpoints registered in the core library. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_train_intranode_conns_parallel_end; + } + + // look-up the nvlink connection object by source link + nvlink_core_get_intranode_conn(srcLink, &conns[i]); + if (conns[i] == NULL) + { + // + // Couldn't find an associated connection for the 2 endpoints. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_train_intranode_conns_parallel_end; + } + + // + // we found the connection by source link. Make sure that dest link is + // indeed, the user specified one as well + // + if ((conns[i]->end0 != dstLink) && (conns[i]->end1 != dstLink)) + { + // + // The dest endpoint is not the remote end for the src endpoint. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_train_intranode_conns_parallel_end; + } + if (srcLink == dstLink) + { + initLinks[count] = srcLink; + count++; + } + else + { + initLinks[count] = srcLink; + initLinks[count + 1] = dstLink; + count = count + 2; + } + trainLinks[i] = srcLink; + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(initLinks, count); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + goto nvlink_lib_ctrl_train_intranode_conns_parallel_end; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + // Check all the links captured have version >= 3.0 + for (i = 0; i < numConns; i++) + { + // Parallel training allowed NvLink 3.0 & above + if ((conns[i]->end0->version < NVLINK_DEVICE_VERSION_30) || + (conns[i]->end1->version < NVLINK_DEVICE_VERSION_30)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Parallel training not allowed with nvlink version 0x%x indexed 0x%x\n", + __FUNCTION__ , + conns[i]->end0->version, i)); + + // + // Parallel training is allowed for only NVLink 3.0 and above. Release + // the per link locks and return + // + nvlink_lib_link_locks_release(initLinks, count); + + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_train_intranode_conns_parallel_end; + } + } + + // the connection looks sane, initiate the training + switch (trainParams->trainTo) + { + case nvlink_train_conn_off_to_swcfg: + { + { + // ALT training for NVLink3.0+ + nvlink_core_init_links_from_off_to_swcfg( + initLinks, count, NVLINK_STATE_CHANGE_SYNC); + } + break; + } + case nvlink_train_conn_swcfg_to_active: + { + { + // ALT training for NVLink3.0+ + status = nvlink_core_train_intranode_conns_from_swcfg_to_active_ALT( + conns, numConns, NVLINK_STATE_CHANGE_SYNC); + } + break; + } + case nvlink_train_conn_active_to_swcfg: + { + status = nvlink_core_powerdown_intranode_conns_from_active_to_swcfg( + conns, numConns, NVLINK_STATE_CHANGE_SYNC); + break; + } + case nvlink_train_conn_to_off: + case nvlink_train_conn_swcfg_to_off: + { + status = nvlink_core_powerdown_intranode_conns_from_active_to_off( + conns, numConns, NVLINK_STATE_CHANGE_SYNC); + if (status == NVL_SUCCESS) + { + nvlink_core_reset_intranode_conns(conns, numConns, NVLINK_STATE_CHANGE_SYNC); + } + break; + } + default: + { + status = NVL_BAD_ARGS; + break; + } + } + + // + // always get the latest link state values when the args are verified + // so that user has additional information other than just the return value. + // + for (i = 0; i < numConns; i++) + { + nvlink_core_get_endpoint_state(conns[i]->end0, &trainParams->endpointPairsStates[i].srcEnd); + nvlink_core_get_endpoint_state(conns[i]->end1, &trainParams->endpointPairsStates[i].dstEnd); + } + + // Release the per-link locks + nvlink_lib_link_locks_release(initLinks, count); + +nvlink_lib_ctrl_train_intranode_conns_parallel_end: + + if (conns != NULL) + { + nvlink_free((void *)conns); + } + + if (initLinks != NULL) + { + nvlink_free((void *)initLinks); + } + + if (trainLinks != NULL) + { + nvlink_free((void *)trainLinks); + } + + return status; +} + +/** + * Train the internode connection link to the target state + * + * @param[in] linkParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_train_internode_conn_link +( + nvlink_train_internode_conn_link *linkParams +) +{ + nvlink_link *localLink = NULL; + NvlStatus status = NVL_SUCCESS; + nvlink_internode_conn *interConn = NULL; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device and + // link lists and connections list + // + + // + // look-up the nvlink link objects. Look-up will fail if there is a + // fabric node id mismatch. So an explicit check against self + // node id is not required. + // + nvlink_core_get_link_by_endpoint(&linkParams->localEndPoint, &localLink); + + // user specified link is not available + if (localLink == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Internode connection link train request with no matching local endpoint\n", + __FUNCTION__)); + + // + // Couldn't find the endpoint registered in the core library. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + nvlink_core_get_internode_conn(localLink, &interConn); + if (interConn == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No Internode connection exists for local endpoint %s: %s.\n", + __FUNCTION__, localLink->dev->deviceName, localLink->linkName)); + + // + // Couldn't find an associated connection for the endpoint. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + // Acquire the per-link lock + status = nvlink_lib_link_locks_acquire(&localLink, 1); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + + switch (linkParams->trainTo) + { + case nvlink_train_link_off_to_swcfg: + { + // OFF to SAFE is part of initialization sequence as of now. + status = NVL_BAD_ARGS; + break; + } + case nvlink_train_link_swcfg_to_active: + { + status = nvlink_core_train_internode_conns_from_swcfg_to_active( + &interConn, 1, &linkParams->isMasterEnd, NVLINK_STATE_CHANGE_SYNC); + break; + } + case nvlink_train_link_to_off: + { + // OFF state transitions are not supported/tested + status = NVL_BAD_ARGS; + break; + } + case nvlink_train_link_active_to_swcfg: + { + // not implemented/supported now + status = NVL_BAD_ARGS; + break; + } + case nvlink_train_link_swcfg_to_off: + { + // OFF state transitions are not supported/tested + status = NVL_BAD_ARGS; + break; + } + default: + { + status = NVL_BAD_ARGS; + break; + } + } + + // + // always get the latest link state values so that + // user has additional information other than just the return value. + // + nvlink_core_get_endpoint_state(localLink, &linkParams->localEndState); + + // Release the per-link lock + nvlink_lib_link_locks_release(&localLink, 1); + + return status; +} + +/* + * Train the internode connection sublink to the target state + * + * @param[in] subLinkParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_train_internode_conn_sublink +( + nvlink_train_internode_conn_sublink *subLinkParams +) +{ + nvlink_link *localLink = NULL; + NvlStatus status = NVL_SUCCESS; + nvlink_internode_conn *interConn = NULL; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device and + // link lists and connections list + // + + // + // look-up the nvlink link objects. Look-up will fail if there is a + // fabric node id mismatch. So an explicit check against self + // node id is not required. + // + nvlink_core_get_link_by_endpoint(&subLinkParams->localEndPoint, &localLink); + + // user specified link is not available + if (localLink == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Internode connection sublink train request with no matching local endpoint\n", + __FUNCTION__)); + + // + // Couldn't find the endpoint registered in the core library. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + nvlink_core_get_internode_conn(localLink, &interConn); + if (interConn == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No Internode connection exists for local endpoint %s: %s.\n", + __FUNCTION__, localLink->dev->deviceName, localLink->linkName)); + + // + // Couldn't find an associated connection for the endpoint. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_BAD_ARGS; + } + + // Acquire the per-link lock + status = nvlink_lib_link_locks_acquire(&localLink, 1); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + switch (subLinkParams->trainTo) + { + case nvlink_train_sublink_off_to_safe: + { + // OFF to SAFE is part of initialization sequence as of now. + status = NVL_BAD_ARGS; + break; + } + case nvlink_train_sublink_safe_to_hs: + { + // NVLink 3.0 onwards this is handled through INITOPTIMIZE + if (localLink->version >= NVLINK_DEVICE_VERSION_30) + { + return NVL_ERR_NOT_SUPPORTED; + } + status = nvlink_core_train_internode_conn_sublink_from_safe_to_hs( + interConn, NVLINK_STATE_CHANGE_SYNC); + break; + } + case nvlink_train_sublink_to_off: + { + // OFF state transitions are not supported/tested + status = NVL_BAD_ARGS; + break; + } + case nvlink_train_sublink_hs_to_safe: + { + // not implemented/supported now + status = NVL_BAD_ARGS; + break; + } + case nvlink_train_sublink_safe_to_off: + { + // OFF state transitions are not supported/tested + status = NVL_BAD_ARGS; + break; + } + default: + { + status = NVL_BAD_ARGS; + break; + } + } + + // + // always get the latest link state values so that + // user has additional information other than just the return value. + // + nvlink_core_get_endpoint_state(localLink, &subLinkParams->localEndState); + + // Release the per-link lock + nvlink_lib_link_locks_release(&localLink, 1); + + return status; +} + +/** + * Send INITOPTIMIZE on the given internode links + * + * @param[in] initoptimizeParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_train_internode_links_initoptimize +( + nvlink_train_internode_links_initoptimize *initoptimizeParams +) +{ + nvlink_link *endpoint = NULL; + NvlStatus status = NVL_SUCCESS; + NvU32 numLinks = 0; + NvU32 i = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + if (initoptimizeParams->endPointCount > NVLINK_MAX_NVLINK_ENDPOINTS) + { + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device and + // link lists and connections list + // + + for (i = 0; i < initoptimizeParams->endPointCount; i++) + { + endpoint = NULL; + nvlink_core_get_link_by_endpoint(&initoptimizeParams->endPoints[i], &endpoint); + + // we can't send INITOPTIMIZE if the endpoint is not found + if (endpoint == NULL) + { + // + // Couldn't find the endpoint registered in the core library. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + else if (numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + links[numLinks] = endpoint; + numLinks++; + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + for (i = 0; i < numLinks; i++) + { + // INITOPTIMIZE is not supported before NVLink 3.0 + if (links[i]->version < NVLINK_DEVICE_VERSION_30) + continue; + + // Continue if the link is already active, nothing to do + if ((nvlink_core_check_link_state(links[i], NVLINK_LINKSTATE_HS)) && + (nvlink_core_check_tx_sublink_state(links[i], NVLINK_SUBLINK_STATE_TX_HS)) && + (nvlink_core_check_rx_sublink_state(links[i], NVLINK_SUBLINK_STATE_RX_HS))) + { + continue; + } + + // + // For INITOPTIMIZE, link should be in SWCFG, else flag error and continue + // to next link + // + if (!((nvlink_core_check_link_state(links[i], NVLINK_LINKSTATE_SAFE)) && + (nvlink_core_check_tx_sublink_state(links[i], NVLINK_SUBLINK_STATE_TX_SAFE)) && + (nvlink_core_check_rx_sublink_state(links[i], NVLINK_SUBLINK_STATE_RX_SAFE)))) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: INITOPTIMIZE only works for links in SAFE %s:%s.\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + continue; + } + + status = links[i]->link_handlers->set_dl_link_mode(links[i], + NVLINK_LINKSTATE_INITOPTIMIZE, + NVLINK_STATE_CHANGE_ASYNC); + + // Although it failed we need to continue on other links. + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: INITOPTIMIZE failed on Device:Link %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + } + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + if (links != NULL) + { + nvlink_free((void *)links); + } + return NVL_SUCCESS; +} + +/** + * Send POSTINITOPTIMIZE on the given internode links + * + * @param[in] initoptimizeParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_train_internode_links_post_initoptimize +( + nvlink_train_internode_links_post_initoptimize *postinitoptimizeParams +) +{ + nvlink_link *endpoint = NULL; + NvlStatus status = NVL_SUCCESS; + NvU32 numLinks = 0; + NvU32 i = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + if (postinitoptimizeParams->endPointCount > NVLINK_MAX_NVLINK_ENDPOINTS) + { + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device and + // link lists and connections list + // + + for (i = 0; i < postinitoptimizeParams->endPointCount; i++) + { + endpoint = NULL; + nvlink_core_get_link_by_endpoint(&postinitoptimizeParams->endPoints[i], &endpoint); + + // we can't send INITOPTIMIZE if the endpoint is not found + if (endpoint == NULL) + { + // + // Couldn't find the endpoint registered in the core library. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + else if (numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + links[numLinks] = endpoint; + numLinks++; + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + for (i = 0; i < numLinks; i++) + { + // POST_INITOPTIMIZE is not supported before NVLink 3.0 + if (links[i]->version < NVLINK_DEVICE_VERSION_30) + continue; + + // Continue if the link is already active, nothing to do + if ((nvlink_core_check_link_state(links[i], NVLINK_LINKSTATE_HS)) && + (nvlink_core_check_tx_sublink_state(links[i], NVLINK_SUBLINK_STATE_TX_HS)) && + (nvlink_core_check_rx_sublink_state(links[i], NVLINK_SUBLINK_STATE_RX_HS))) + { + continue; + } + + status = links[i]->link_handlers->set_dl_link_mode(links[i], + NVLINK_LINKSTATE_POST_INITOPTIMIZE, + NVLINK_STATE_CHANGE_ASYNC); + + // Although it failed we need to continue on other links. + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: POST_INITOPTIMIZE failed on Device:Link %s:%s\n", + __FUNCTION__, links[i]->dev->deviceName, links[i]->linkName)); + } + } + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + if (links != NULL) + { + nvlink_free((void *)links); + } + return NVL_SUCCESS; +} + +/** + * Train the internode connection links to the target state + * + * @param[in] linkParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_train_internode_conns_parallel +( + nvlink_train_internode_conns_parallel *linkParams +) +{ + nvlink_link *localLink = NULL; + NvlStatus status = NVL_SUCCESS; + NvU32 numLinks = 0; + NvU32 i = 0; + nvlink_link **links = NULL; + nvlink_internode_conn **interConns = NULL; + + links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + status = NVL_NO_MEM; + goto nvlink_lib_ctrl_train_internode_conns_parallel_end; + } + + interConns = (nvlink_internode_conn **)nvlink_malloc( + sizeof(nvlink_internode_conn *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (interConns == NULL) + { + status = NVL_NO_MEM; + goto nvlink_lib_ctrl_train_internode_conns_parallel_end; + } + + if (linkParams->localEndPointCount > NVLINK_MAX_PARALLEL_CONNS_TRAIN_COUNT) + { + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_train_internode_conns_parallel_end; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + goto nvlink_lib_ctrl_train_internode_conns_parallel_end; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device and + // link lists and connections list + // + for (i = 0; i < linkParams->localEndPointCount; i++) + { + // + // look-up the nvlink link objects. Look-up will fail if there is a + // fabric node id mismatch. So an explicit check against self + // node id is not required. + // + nvlink_core_get_link_by_endpoint(&linkParams->localEndPoints[i], &localLink); + + // user specified link is not available + if (localLink == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Internode connection link train request with no matching local endpoint\n", + __FUNCTION__)); + + // + // Couldn't find the endpoint registered in the core library. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_train_internode_conns_parallel_end; + } + + nvlink_core_get_internode_conn(localLink, &(interConns[i])); + if (interConns[i] == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No Internode connection exists for local endpoint %s: %s.\n", + __FUNCTION__, localLink->dev->deviceName, localLink->linkName)); + + // + // Couldn't find an associated connection for the endpoint. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + status = NVL_BAD_ARGS; + goto nvlink_lib_ctrl_train_internode_conns_parallel_end; + } + + links[numLinks] = localLink; + numLinks++; + } + + // Acquire the per-link lock + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + goto nvlink_lib_ctrl_train_internode_conns_parallel_end; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + switch (linkParams->trainTo) + { + case nvlink_train_link_off_to_swcfg: + { + // OFF to SAFE is part of initialization sequence as of now. + status = NVL_BAD_ARGS; + break; + } + case nvlink_train_link_swcfg_to_active: + { + status = nvlink_core_train_internode_conns_from_swcfg_to_active( + interConns, numLinks, linkParams->isMasterEnd, NVLINK_STATE_CHANGE_SYNC); + break; + } + case nvlink_train_link_to_off: + { + // OFF state transitions are not supported/tested + status = NVL_BAD_ARGS; + break; + } + case nvlink_train_link_active_to_swcfg: + { + // not implemented/supported now + status = NVL_BAD_ARGS; + break; + } + case nvlink_train_link_swcfg_to_off: + { + // OFF state transitions are not supported/tested + status = NVL_BAD_ARGS; + break; + } + default: + { + status = NVL_BAD_ARGS; + break; + } + } + + for (i = 0; i < numLinks; i++) + { + + // + // always get the latest link state values so that + // user has additional information other than just the return value. + // + nvlink_core_get_endpoint_state(links[i], &linkParams->localEndStates[i]); + } + + // Release the per-link lock + nvlink_lib_link_locks_release(links, numLinks); + +nvlink_lib_ctrl_train_internode_conns_parallel_end: + + if (links != NULL) + { + nvlink_free((void *)links); + } + if (interConns != NULL) + { + nvlink_free((void *)interConns); + } + + return status; +} + +/** + * Get the device information for all registered devices + * + * @param[in] infoParams IOCTL params + * + * return NvlStatus + */ +static NvlStatus +nvlink_lib_ctrl_get_devices_info +( + nvlink_get_devices_info *infoParams +) +{ + nvlink_device *dev = NULL; + NvlStatus status = NVL_SUCCESS; + NvU32 numDevices = 0; + + // Initialize number of devices to 0 + infoParams->numDevice = 0; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device and + // link lists and connections list + // + + FOR_EACH_DEVICE_REGISTERED(dev, nvlinkLibCtx.nv_devicelist_head, node) + { + // total number of devices should be within NVLINK_DEVICE_INSTANCE_MAX + if (numDevices >= NVLINK_DEVICE_INSTANCE_MAX) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numDevices >= NVLINK_DEVICE_INSTANCE_MAX", + __FUNCTION__)); + + nvlink_assert(0); + nvlink_lib_top_lock_release(); + return NVL_ERR_INVALID_STATE; + } + + // copy device information + nvlink_core_copy_device_info(dev, &infoParams->devInfo[numDevices]); + numDevices++; + } + + infoParams->numDevice = numDevices; + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return status; +} + +static NvlStatus +nvlink_lib_ctrl_acquire_capability +( + nvlink_ioctrl_params *ctrlParams, + nvlink_acquire_capability *capParams +) +{ + NvlStatus status; + + if (capParams == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad ioctl capability ctrl params specified.\n", + __FUNCTION__)); + return NVL_BAD_ARGS; + } + + status = nvlink_acquire_fabric_mgmt_cap(ctrlParams->osPrivate, + capParams->capDescriptor); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire fabric mgmt capability.\n", + __FUNCTION__)); + return status; + } + + return NVL_SUCCESS; +} + +static NvlStatus nvlink_lib_ctrl_get_link_state +( + nvlink_get_link_state *linkParams +) +{ + nvlink_link *endpoint = NULL; + NvlStatus status = NVL_SUCCESS; + NvU32 numLinks = 0; + NvU32 i = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + if (linkParams->endPointCount > NVLINK_MAX_NVLINK_ENDPOINTS) + { + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device and + // link lists and connections list + // + + for (i = 0; i < linkParams->endPointCount; i++) + { + endpoint = NULL; + nvlink_core_get_link_by_endpoint(&linkParams->endPoints[i], &endpoint); + + // we can't send this command if the endpoint is not found + if (endpoint == NULL) + { + // + // Couldn't find the endpoint registered in the core library. Release + // the top-level lock and return + // + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + else if (numLinks >= NVLINK_MAX_NVLINK_ENDPOINTS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + links[numLinks] = endpoint; + numLinks++; + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + for (i = 0; i < numLinks; i++) + { + // Wait for the link state to change. + status = nvlink_core_poll_link_state(links[i], + NVLINK_LINKSTATE_HS, + NVLINK_TRANSITION_POST_HS_TIMEOUT); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Unable to set link state to ACTIVE for link" + " %s:%s \n", + __FUNCTION__, + links[i]->dev->deviceName, links[i]->linkName)); + } + else + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_SETUP, + "%s: Successfully able to set link state to ACTIVE for link" + " %s:%s \n", + __FUNCTION__, + links[i]->dev->deviceName, links[i]->linkName)); + } + + nvlink_core_get_endpoint_state(links[i], &linkParams->endState[i]); + } + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + if (links != NULL) + { + nvlink_free((void *)links); + } + + return NVL_SUCCESS; +} + +static NvlStatus +nvlink_lib_ctrl_get_device_link_states +( + nvlink_get_device_link_states *params +) +{ + nvlink_link *endpoint = NULL; + nvlink_device *dev = NULL; + nvlink_device *devIter = NULL; + nvlink_link *remoteLink = NULL; + NvlStatus status = NVL_SUCCESS; + NvU32 numLinks = 0; + NvU32 i = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_NVLINK_ENDPOINTS); + if (links == NULL) + { + return NVL_NO_MEM; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // look-up user requested nvlink device object + nvlink_core_get_device_by_devinfo(¶ms->devInfo, &dev); + if (dev == NULL) + { + // + // Couldn't find the device ptr in the core library. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return NVL_BAD_ARGS; + } + + // + // Top-level lock is now acquired. Proceed to traversing the list + // of devices and list of links to lock all links + // + FOR_EACH_DEVICE_REGISTERED(devIter, nvlinkLibCtx.nv_devicelist_head, node) + { + FOR_EACH_LINK_REGISTERED(endpoint, devIter, node) + { + if (numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + links[numLinks] = endpoint; + numLinks++; + } + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return status; + } + + // + // All the required per-link locks are now successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + nvlink_assert((links != NULL) && (numLinks > 0)); + + for (i = 0; i < numLinks; ++i) + { + // Check RxDet status of the link and repopulate as necessary + nvlink_core_get_rx_detect(&links[i], 1, 0); + + // Get the endpoint states of the link + nvlink_core_get_endpoint_state(links[i], &(params->endStates[i])); + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: link 0x%x -- rxDet status 0x%x, linkMode 0x%x,\n", + __FUNCTION__, i, links[i]->bRxDetected, params->endStates[i].linkMode)); + + // + // If the link succeeds rxDet then go through and find its peer link. What is important + // is not actually finding the link, but making sure the corelib goes through the discovery + // process and has endpoints cache the remote information in the corelib such that + // FM or endpoints can query the corelib for the topology of the system. + // + if (links[i]->bRxDetected) + { + remoteLink = NULL; + nvlink_core_discover_and_get_remote_end(links[i], &remoteLink, 0); + + if (remoteLink == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: link 0x%x: couldn't find link pair\n", + __FUNCTION__, i)); + continue; + } + + // If the link is in active, issue postActive settings + if (params->endStates[i].linkMode == nvlink_link_mode_active) + { + links[i]->link_handlers->training_complete(links[i]); + } + } + } + + params->endStatesCount = numLinks; + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + if (links != NULL) + { + nvlink_free((void *)links); + } + + return status; +} diff --git a/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_discovery_entry.c b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_discovery_entry.c new file mode 100644 index 000000000..afbcb1717 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_discovery_entry.c @@ -0,0 +1,281 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" +#include "nvlink_lock.h" + +/** + * Get the connected remote endpoint information + * + * For a given link, return the remote endpoint details it is connected to. + * If there is no connection associated with the specified link, then, the + * conn_info.connected member will be NV_FALSE. + * + * Note: This routine will not initiate any link initialization or topology + * discovery. + * + * @param[in] link NVLink Link pointer + * @param[out] conn_info Details of remote endpoint + */ +NvlStatus +nvlink_lib_get_remote_conn_info +( + nvlink_link *link, + nvlink_conn_info *conn_info +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_link *remoteEnd = NULL; + nvlink_intranode_conn *intraConn = NULL; + nvlink_internode_conn *interConn = NULL; + NvU32 numLinks = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + // Initialize connected state to false + conn_info->bConnected = NV_FALSE; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the + // connnection list + // + + // Find the associated intranode connection with this link + nvlink_core_get_intranode_conn(link, &intraConn); + if (intraConn != NULL) + { + // Get the required remote endpoint of the connection + remoteEnd = (intraConn->end0 == link ? + intraConn->end1 : intraConn->end0); + + // Mark the connected state + conn_info->bConnected = NV_TRUE; + + if ((numLinks+1) >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + links[numLinks] = link; + numLinks++; + + links[numLinks] = remoteEnd; + numLinks++; + } + + // + // On multi-node systems, check the internode connection + // list as well to return the connection information + // + nvlink_core_get_internode_conn(link, &interConn); + if (interConn != NULL) + { + // Mark the connected state + conn_info->bConnected = NV_TRUE; + + links[numLinks] = link; + numLinks++; + } + + // Acquire per-link lock + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return status; + } + + if (intraConn != NULL) + { + nvlink_core_copy_intranode_conn_info(remoteEnd, conn_info); + } + else + { + if (interConn != NULL) + { + nvlink_core_copy_internode_conn_info(&interConn->remote_end, + conn_info); + } + } + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + // Release top-level lock + nvlink_lib_top_lock_release(); + + if (links != NULL) + { + nvlink_free((void *)links); + } + return NVL_SUCCESS; +} + +/** + * Get the connected remote endpoint information + * + * For a given link, return the remote endpoint it is connected to. + * + * Note: This routine triggers topology discovery on the set of + * links registered in the core library + * + * @param[in] end NVLink Link pointer + * @param[out] conn_info Details of remote endpoint + * @param[in] flags Flags + */ +NvlStatus +nvlink_lib_discover_and_get_remote_conn_info +( + nvlink_link *end, + nvlink_conn_info *conn_info, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_link *link = NULL; + nvlink_link *remote_end = NULL; + nvlink_device *dev = NULL; + NvU32 numLinks = 0; + + nvlink_link **links = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (links == NULL) + { + return NVL_NO_MEM; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + nvlink_free((void *)links); + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists + // + + FOR_EACH_DEVICE_REGISTERED(dev, nvlinkLibCtx.nv_devicelist_head, node) + { + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock and free links + nvlink_lib_top_lock_release(); + nvlink_free((void *)links); + return NVL_ERR_INVALID_STATE; + } + + links[numLinks] = link; + numLinks++; + } + } + + // Acquire the per-link locks + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + nvlink_free((void *)links); + return status; + } + + // Initialize connected state to false + conn_info->bConnected = NV_FALSE; + + // Get the remote_end of the link + nvlink_core_discover_and_get_remote_end(end, &remote_end, flags); + + if (remote_end) + { + // mark the connected state + conn_info->bConnected = NV_TRUE; + nvlink_core_copy_intranode_conn_info(remote_end, conn_info); + } + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + if (links != NULL) + { + nvlink_free((void *)links); + } + return NVL_SUCCESS; +} diff --git a/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_initialize_entry.c b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_initialize_entry.c new file mode 100644 index 000000000..63474fcb5 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_initialize_entry.c @@ -0,0 +1,118 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" +#include "nvlink_lock.h" + +/** + * Re-Initialize a given link from OFF to SWCFG + * + * @param[in] link NVLink Link pointer + * @param[in] flags Flag to track if the initialization is aync/async + * + * return NVL_SUCCESS if the initialization was successful + */ +NvlStatus +nvlink_lib_reinit_link_from_off_to_swcfg +( + nvlink_link *link, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_intranode_conn *conn = NULL; + nvlink_link *links[2] = {0}; + + if (!link) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad link pointer specified.\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the + // connection list + // + + nvlink_core_get_intranode_conn(link, &conn); + if (!conn) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No connection was found for this link.\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return NVL_ERR_GENERIC; + } + + links[0] = conn->end0; + links[1] = conn->end1; + + // Acquire the per-link locks for all links captured + status = nvlink_lib_link_locks_acquire(links, 2); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return status; + } + + // + // All the required per-link locks are successfully acquired + // The connection list traversal is also complete now + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + { + nvlink_core_init_links_from_off_to_swcfg(links, 2, flags); + } + // Release the per-link locks + nvlink_lib_link_locks_release(links, 2); + + return NVL_SUCCESS; +} diff --git a/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_link_mgmt_entry.c b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_link_mgmt_entry.c new file mode 100644 index 000000000..1dc7733ec --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_link_mgmt_entry.c @@ -0,0 +1,323 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" +#include "nvlink_lock.h" + +/** + * TODO: Rework this function to acquire locks and update callers + * + * Check if the device has no links registered + * + * @param[in] dev NVLink Device pointer + * + * return NV_TRUE if the device has no links registered + */ +NvBool +nvlink_lib_is_link_list_empty +( + nvlink_device *dev +) +{ + NvBool isEmpty = NV_TRUE; + + isEmpty = nvListIsEmpty(&dev->link_list); + + return isEmpty; +} + +/** + * Get the link associated with the given link id. + * + * @param[in] device NVLink Device Pointer + * @param[in] link_id Link Id of the given link + * @param[out] link NVLink Link pointer + * + * return NVL_SUCCESS on success + */ +NvlStatus +nvlink_lib_get_link +( + nvlink_device *device, + NvU32 link_id, + nvlink_link **link +) +{ + nvlink_link *cur = NULL; + NvlStatus status = -NVL_NOT_FOUND; + + if (device == NULL || link == NULL) + { + return -NVL_BAD_ARGS; + } + + *link = NULL; + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the + // link list for the device + // + + // Reset status to -NVL_NOT_FOUND + status = -NVL_NOT_FOUND; + + FOR_EACH_LINK_REGISTERED(cur, device, node) + { + if (cur->linkNumber == link_id) + { + *link = cur; + status = NVL_SUCCESS; + break; + } + } + + // Release the top level-lock + nvlink_lib_top_lock_release(); + + return status; +} + +/** + * Set the given link as the link master. + * This requires that the remote end of the link is known, and that it + * hasn't set itself to be the master. + * + * Note: This function is used by RM to set master attribute to a link + * in order to handle GPU lock inversion problem while servicing + * link interrupts(re-training). With external fabric management + * enabled, we don't have the issue. Also we don't have to worry + * about the inter-node connections which are managed by FM. + * + * @param[in] link NVLink Link pointer + * + * return NVL_SUCCESS if the master was set + */ +NvlStatus +nvlink_lib_set_link_master +( + nvlink_link *link +) +{ + nvlink_link *remote_end = NULL; + NvlStatus status = NVL_SUCCESS; + nvlink_intranode_conn *conn = NULL; + nvlink_link *links[2] = {0}; + NvU32 numLinks = 0; + + if (link == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad link pointer specified.\n", + __FUNCTION__)); + return NVL_ERR_GENERIC; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the + // connection list + // + + links[numLinks] = link; + numLinks++; + + nvlink_core_get_intranode_conn(link, &conn); + if (conn != NULL) + { + remote_end = (conn->end0 == link ? conn->end1 : conn->end0); + links[numLinks] = remote_end; + numLinks++; + } + + // Acquire the per-link locks for all links captured + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return status; + } + + // + // All the required per-link locks are successfully acquired + // The connection list traversal is also complete now + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + // Early return if we've already done this + if (link->master) + { + status = NVL_SUCCESS; + } + else + { + // Make sure the remote end exists and hasn't claimed the master yet + if (remote_end == NULL || remote_end->master) + { + status = NVL_ERR_INVALID_STATE; + } + else + { + link->master = NV_TRUE; + } + } + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + return status; +} + +/** + * Get the link master associated with the given link. + * This may be the given link, or it may be the remote end. In the case + * when no master is assigned or the remote end is not known, this will + * return an error. + * + * @param[in] link NVLink Link pointer + * @param[out] master Master endpoint for the link + * + * return NVL_SUCCESS if the master was found + */ +NvlStatus +nvlink_lib_get_link_master +( + nvlink_link *link, + nvlink_link **master +) +{ + nvlink_link *remote_end = NULL; + nvlink_intranode_conn *conn = NULL; + NvlStatus status = NVL_SUCCESS; + nvlink_link *links[2] = {0}; + NvU32 numLinks = 0; + + if (link == NULL || master == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad link pointer specified.\n", + __FUNCTION__)); + return NVL_ERR_GENERIC; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the + // connection list + // + + links[numLinks] = link; + numLinks++; + + nvlink_core_get_intranode_conn(link, &conn); + if (conn != NULL) + { + remote_end = (conn->end0 == link ? conn->end1 : conn->end0); + links[numLinks] = remote_end; + numLinks++; + } + + // Acquire the per-link locks for all links captured + status = nvlink_lib_link_locks_acquire(links, numLinks); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return status; + } + + // + // All the required per-link locks are successfully acquired + // The connection list traversal is also complete now + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + if (link->master) + { + *master = link; + } + else + { + // Make sure the remote end exists and hasn't claimed the master yet + if (remote_end == NULL) + { + status = NVL_ERR_INVALID_STATE; + } + + *master = remote_end; + } + + // Release the per-link locks + nvlink_lib_link_locks_release(links, numLinks); + + return status; +} + + diff --git a/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_registration_entry.c b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_registration_entry.c new file mode 100644 index 000000000..29345c796 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_registration_entry.c @@ -0,0 +1,508 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" + +#include "nvlink_lock.h" + +static NvBool _nvlink_lib_is_device_registered(nvlink_device *); +static NvBool _nvlink_lib_is_link_registered(nvlink_device *, nvlink_link *); + +/** + * Associates device with the NVLink Core Library + * + * @param[in] dev NVLink Device pointer + * + * return NVL_SUCCESS if the device is registered successfully + */ +NvlStatus +nvlink_lib_register_device +( + nvlink_device *dev +) +{ + NvlStatus lock_status = NVL_SUCCESS; + NvlStatus result = NVL_SUCCESS; + + if (dev == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad device pointer\n", + __FUNCTION__)); + return NVL_ERR_GENERIC; + } + + // Acquire top-level lock + lock_status = nvlink_lib_top_lock_acquire(); + if (lock_status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return lock_status; + } + + // Top-level lock is now acquired + + // Assign the deviceId for the device + dev->deviceId = (NvU64)(NvUPtr)dev; + + // Assign fabric node id to the device object + dev->nodeId = nvlinkLibCtx.nodeId; + + // Register the device if not yet registered + if (!_nvlink_lib_is_device_registered(dev)) + { + // Initialize the node and link list for the device + nvListInit(&dev->link_list); + nvListInit(&dev->node); + + // Add the device to the list of devices + nvListAppend(&dev->node, &nvlinkLibCtx.nv_devicelist_head.node); + + result = NVL_SUCCESS; + } + else + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s is already registered in nvlink core\n", + __FUNCTION__, dev->deviceName)); + + result = NVL_ERR_GENERIC; + } + + // Release top-level lock + nvlink_lib_top_lock_release(); + + return result; +} + +/** + * Unassociates device from the NVLink Core + * Includes removing any links related to the device if still registered + * + * @param[in] dev NVLink Device pointer + * + * return NVL_SUCCESS if the device is un-registered successfully + */ +NvlStatus +nvlink_lib_unregister_device +( + nvlink_device *dev +) +{ + NvBool bConnected = NV_FALSE; + nvlink_intranode_conn *intra_conn = NULL; + nvlink_internode_conn *inter_conn = NULL; + NvlStatus lock_status = NVL_SUCCESS; + NvU32 numLinks = 0; + nvlink_link *curLink = NULL; + nvlink_link *nextLink = NULL; + + if (dev == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad device pointer\n", + __FUNCTION__)); + return NVL_ERR_GENERIC; + } + + // Acquire top-level lock + lock_status = nvlink_lib_top_lock_acquire(); + if (lock_status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return lock_status; + } + + // Top-level lock is now acquired + + // Loop to unregister each link from the device + FOR_EACH_LINK_REGISTERED_SAFE(curLink, nextLink, dev, node) + { + // Reset the variables specific to each link + bConnected = NV_FALSE; + intra_conn = NULL; + inter_conn = NULL; + numLinks = 0; + + // We will use at most 2 links in this function - the link and it's partner + nvlink_link *links[2] = {0}; + + links[numLinks] = curLink; + numLinks++; + + // Check if there's an intranode connection present + nvlink_core_get_intranode_conn(curLink, &intra_conn); + if (intra_conn != NULL) + { + // Mark the endpoint as connected + bConnected = NV_TRUE; + + if (intra_conn->end0 == curLink) + { + links[numLinks] = intra_conn->end1; + } + else + { + links[numLinks] = intra_conn->end0; + } + numLinks++; + } + + // + // Check if there's an internode connection present + // Only the local end required for internode connection + // (which is registered above) so just detect this for now + // + nvlink_core_get_internode_conn(curLink, &inter_conn); + + // Acquire per-link lock + lock_status = nvlink_lib_link_locks_acquire(links, numLinks); + if (lock_status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + nvlink_lib_top_lock_release(); + return lock_status; + } + + if (intra_conn != NULL) + { + // Remove the associated intranode connection with this link from the list + nvlink_core_remove_intranode_conn(intra_conn); + } + + if (inter_conn != NULL) + { + // Remove the associated internode connection with this link from the list + nvlink_core_remove_internode_conn(curLink); + } + + // Remove the link from the link list for the device + nvListDel(&curLink->node); + + // Release and free the link locks + nvlink_lib_link_locks_release(links, numLinks); + nvlink_lib_link_lock_free(curLink); + + curLink->link_handlers->remove(curLink); + + // If the endpoint was not connected + nvlinkLibCtx.notConnectedEndpoints = ( bConnected ? + nvlinkLibCtx.notConnectedEndpoints : + nvlinkLibCtx.notConnectedEndpoints - 1 ); + + // Update count of registered endpoints + nvlinkLibCtx.registeredEndpoints--; + } + + nvListDel(&dev->node); + + // Release top-level lock + nvlink_lib_top_lock_release(); + + return NVL_SUCCESS; +} + +/** + * Associates link with a device in the NVLink Core library + * + * @param[in] dev NVLink Device pointer + * @param[in] link NVLink Link pointer + * + * return NVL_SUCCESS if the link is registered successfully + */ +NvlStatus +nvlink_lib_register_link +( + nvlink_device *dev, + nvlink_link *link +) +{ + NvlStatus lock_status = NVL_SUCCESS; + NvlStatus result = NVL_SUCCESS; + + if (dev == NULL || link == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad device or link pointer\n", + __FUNCTION__)); + return NVL_ERR_GENERIC; + } + + // Allocate per-link lock for the link to be registered + lock_status = nvlink_lib_link_lock_alloc(link); + if (lock_status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to alloc per-link lock\n", + __FUNCTION__)); + + return lock_status; + } + + // Acquire top-level lock + lock_status = nvlink_lib_top_lock_acquire(); + if (lock_status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + // + // Since the per-link lock will be allocated when this function + // is run again. Free the unused allocated lock. + // + nvlink_lib_link_lock_free(link); + + return lock_status; + } + + // Top-level lock is now acquired + + // Assign the linkId for the device + link->linkId = (NvU64)(NvUPtr) link; + + // Register the link if not yet registered + if (!_nvlink_lib_is_link_registered(dev, link)) + { + // Initialize the node for the link + nvListInit(&link->node); + + // Generate token for this link + link->token = (NvU64)(NvUPtr) link; + + // Add the link to the list of links for the device + nvListAppend(&link->node, &dev->link_list); + link->link_handlers->add(link); + + // Initialize training parameters + link->safe_retries = 0; + link->packet_injection_retries = 0; + + // Update count of registered endpoints + nvlinkLibCtx.registeredEndpoints++; + + // Indicate that a new endpoint is registered + nvlinkLibCtx.bNewEndpoints = NV_TRUE; + + result = NVL_SUCCESS; + } + else + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: %s: %s is already registered in nvlink core\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + + result = NVL_ERR_GENERIC; + + // Free per-link lock since we don't have a new link + nvlink_lib_link_lock_free(link); + } + + // Release top-level lock + nvlink_lib_top_lock_release(); + + return result; +} + +/** + * Unassociates link from a device in the NVLink Core library + * + * @param[in] link NVLink Link pointer + * + * return NVL_SUCCESS if the link is un-registered successfully + */ +NvlStatus +nvlink_lib_unregister_link +( + nvlink_link *link +) +{ + NvBool bConnected = NV_FALSE; + nvlink_intranode_conn *intra_conn = NULL; + nvlink_internode_conn *inter_conn = NULL; + NvlStatus lock_status = NVL_SUCCESS; + NvU32 numLinks = 0; + + // We will use at most 2 links in this function - the link and it's partner + nvlink_link *links[2] = {0}; + + if (link == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad link pointer\n", + __FUNCTION__)); + return NVL_ERR_GENERIC; + } + + // Acquire top-level lock + lock_status = nvlink_lib_top_lock_acquire(); + if (lock_status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return lock_status; + } + + // Top-level lock is now acquired + + links[numLinks] = link; + numLinks++; + + // Check if there's an intranode connection present + nvlink_core_get_intranode_conn(link, &intra_conn); + if (intra_conn != NULL) + { + // Mark the endpoint as connected + bConnected = NV_TRUE; + + if (intra_conn->end0 == link) + { + links[numLinks] = intra_conn->end1; + } + else + { + links[numLinks] = intra_conn->end0; + } + numLinks++; + } + + // + // Check if there's an internode connection present + // Only the local end required for internode connection + // (which is registered above) so just detect this for now + // + nvlink_core_get_internode_conn(link, &inter_conn); + + // Acquire per-link lock + lock_status = nvlink_lib_link_locks_acquire(links, numLinks); + if (lock_status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + nvlink_lib_top_lock_release(); + return lock_status; + } + + if (intra_conn != NULL) + { + // Remove the associated intranode connection with this link from the list + nvlink_core_remove_intranode_conn(intra_conn); + } + + if (inter_conn != NULL) + { + // Remove the associated internode connection with this link from the list + nvlink_core_remove_internode_conn(link); + } + + // Remove the link from the link list for the device + nvListDel(&link->node); + + // Release and free the locks + nvlink_lib_link_locks_release(links, numLinks); + nvlink_lib_link_lock_free(link); + + link->link_handlers->remove(link); + + // If the endpoint was not connected + nvlinkLibCtx.notConnectedEndpoints = ( bConnected ? + nvlinkLibCtx.notConnectedEndpoints : + nvlinkLibCtx.notConnectedEndpoints - 1 ); + + // Update count of registered endpoints + nvlinkLibCtx.registeredEndpoints--; + + // Release top-level lock + nvlink_lib_top_lock_release(); + + return NVL_SUCCESS; +} + +/** + * Check if the nvlink device is already registered in the core library + * + * @param[in] dev NVLink Device pointer + * + * return NV_TRUE if the device is already registered + */ +static NvBool +_nvlink_lib_is_device_registered +( + nvlink_device *dev +) +{ + nvlink_device *tmpDev = NULL; + + FOR_EACH_DEVICE_REGISTERED(tmpDev, nvlinkLibCtx.nv_devicelist_head, node) + { + if (dev->deviceId == tmpDev->deviceId) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/** + * Check if the nvlink link is already registered in the core library + * + * @param[in] dev NVLink Device pointer + * @param[in] link NVLink Link pointer + * + * return NV_TRUE if the link is already registered for the device + */ +static NvBool +_nvlink_lib_is_link_registered +( + nvlink_device *dev, + nvlink_link *link +) +{ + nvlink_link *tmpLink = NULL; + + FOR_EACH_LINK_REGISTERED(tmpLink, dev, node) + { + if (link->linkId == tmpLink->linkId) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} diff --git a/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_shutdown_entry.c b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_shutdown_entry.c new file mode 100644 index 000000000..760e5c917 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_shutdown_entry.c @@ -0,0 +1,777 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" +#include "nvlink_lock.h" + +/** + * [CLEAN SHUTDOWN] + * + * Shutdown given links of a device from active to L2 state + * + * param[in] dev NVLink Device pointer + * param[in] linkMask Mask of links to be shutdown + * param[in] flags Flags to track if the transition is sync/async + * + * return NVL_SUCCESS if the links transition to L2 + */ +NvlStatus +nvlink_lib_powerdown_links_from_active_to_L2 +( + nvlink_device *dev, + NvU32 linkMask, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_link *link = NULL; + nvlink_intranode_conn **conns = NULL; + nvlink_intranode_conn *conn = NULL; + NvU32 numLinks = 0; + NvU32 numConns = 0; + NvU32 connCount = 0; + NvU32 i; + NvU32 lockLinkCount = 0; + nvlink_link **lockLinks = NULL; + + + if (dev == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad device pointer specified.\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + lockLinks = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (lockLinks == NULL) + { + return NVL_NO_MEM; + } + + // Allocate space for the connection list + conns = (nvlink_intranode_conn **)nvlink_malloc( + sizeof(nvlink_intranode_conn *) * NVLINK_MAX_SYSTEM_LINK_NUM); + + if (conns == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate space for connections list\n", + __FUNCTION__)); + + nvlink_free((void *)lockLinks); + return NVL_ERR_GENERIC; + } + + // Initialize the list of links + nvlink_memset(conns, 0, sizeof(nvlink_intranode_conn *) * 32); + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + goto nvlink_lib_powerdown_links_from_active_to_L2_end; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists and connection lists + // + + // Get the array of link endpoints whose lock needs to be acquired + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (!(linkMask & (1 << link->linkNumber))) + { + continue; + } + + // Get the connection associated with the link + conn = NULL; + nvlink_core_get_intranode_conn(link, &conn); + + if (conn == NULL) + { + // + // Could not find the connection for the link. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + status = NVL_ERR_GENERIC; + goto nvlink_lib_powerdown_links_from_active_to_L2_end; + } + else if ((numLinks + 1) >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: numLinks >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + status = NVL_ERR_INVALID_STATE; + goto nvlink_lib_powerdown_links_from_active_to_L2_end; + } + + lockLinks[lockLinkCount] = conn->end0; + lockLinkCount++; + + lockLinks[lockLinkCount] = conn->end1; + lockLinkCount++; + } + + // Acquire the per-link locks for all links captured + status = nvlink_lib_link_locks_acquire(lockLinks, lockLinkCount); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + status = NVL_ERR_GENERIC; + goto nvlink_lib_powerdown_links_from_active_to_L2_end; + } + + // Filter the connections which are already in SLEEP + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (!(linkMask & (1 << link->linkNumber))) + { + continue; + } + + // If the link received a L2 exit request, but never exited L2 + if (link->powerStateTransitionStatus == nvlink_power_state_exiting_L2) + { + // Update the power state transition status + link->powerStateTransitionStatus = nvlink_power_state_in_L2; + continue; + } + + // Get the connection associated with the link + conn = NULL; + nvlink_core_get_intranode_conn(link, &conn); + + // Check the connection state to verify if the link is already in SLEEP + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_SLEEP); + if (status == NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link is already in sleep %s: %s.\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + continue; + } + + // Link is not in SLEEP. Update power state transition status for the link + link->powerStateTransitionStatus = nvlink_power_state_entering_L2; + } + + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (!(linkMask & (1 << link->linkNumber))) + { + continue; + } + + // Check if the link desires to enter SLEEP + if (link->powerStateTransitionStatus == nvlink_power_state_entering_L2) + { + // Get the connection associated with the link + conn = NULL; + nvlink_core_get_intranode_conn(link, &conn); + + // The connection will enter SLEEP only when both its endpoints desire to enter SLEEP + if ((conn->end0->powerStateTransitionStatus == nvlink_power_state_entering_L2) && + (conn->end1->powerStateTransitionStatus == nvlink_power_state_entering_L2)) + { + // Increment the #connections considered for entering L2 + numConns++; + + // Check if the the connection is already included in the list + for (i = 0; i < connCount; i++) + { + if (conns[i] == conn) + break; + } + + // If this is a new connection, add it to the list + if (i == connCount) + { + conns[connCount] = conn; + connCount++; + } + } + numLinks++; + } + } + + // + // All the required per-link locks are successfully acquired + // The connection list traversal is also complete now + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + // Clear the status variable + status = NVL_SUCCESS; + + if (connCount > 0) + { + status = nvlink_core_powerdown_intranode_conns_from_active_to_L2(conns, connCount, flags); + } + + if (status == NVL_SUCCESS) + { + // + // If some links are waiting on the remote end to request sleep, + // update status to NVL_MORE_PROCESSING_REQUIRED + // + status = (numLinks != numConns ? NVL_MORE_PROCESSING_REQUIRED : NVL_SUCCESS); + } + + // Release the per-link locks + nvlink_lib_link_locks_release(lockLinks, lockLinkCount); + +nvlink_lib_powerdown_links_from_active_to_L2_end: + + if (conns != NULL) + { + nvlink_free((void *)conns); + } + + if (lockLinks != NULL) + { + nvlink_free((void *)lockLinks); + } + return status; +} + +/** + * [PSEUDO-CLEAN SHUTDOWN] + * + * Shutdown the given array of links from ACTIVE to OFF state + * + * param[in] links Array of links to shutdown + * param[in] numLinks Number of links to be shutdown + * param[in] flags Flags to track if the transition is sync/async + * + * return NVL_SUCCESS if the pseudo-clean shutdown is successful + */ +NvlStatus +nvlink_lib_powerdown_links_from_active_to_off +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_intranode_conn **conns = NULL; + nvlink_intranode_conn *conn = NULL; + NvU32 numConns = 0; + NvU32 i; + NvU32 lockLinkCount = 0; + nvlink_link **lockLinks = NULL; + + + if ((links == NULL) || (numLinks == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No links to shutdown\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + lockLinks = (nvlink_link **)nvlink_malloc( sizeof(nvlink_link *) * (2 * numLinks)); + if (lockLinks == NULL) + { + return NVL_NO_MEM; + } + + // Allocate space for the connection list + conns = (nvlink_intranode_conn **)nvlink_malloc( + sizeof(nvlink_intranode_conn *) * numLinks); + + if (conns == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate space for connections list\n", + __FUNCTION__)); + + nvlink_free((void *)lockLinks); + return NVL_ERR_GENERIC; + } + + nvlink_memset(conns, 0, sizeof(nvlink_intranode_conn *) * numLinks); + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + goto nvlink_lib_powerdown_links_from_active_to_off_end; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists and connection lists + // + + // + // Get the array of both local and remote endpoints whose lock needs + // to be acquired + // + for (i = 0; i < numLinks; i++) + { + conn = NULL; + nvlink_core_get_intranode_conn(links[i], &conn); + + if (conn == NULL) + { + continue; + } + + // Capture both the link and its end-point + lockLinks[lockLinkCount] = conn->end0; + lockLinkCount++; + + lockLinks[lockLinkCount] = conn->end1; + lockLinkCount++; + } + + // Acquire the per-link locks for all links captured + status = nvlink_lib_link_locks_acquire(lockLinks, lockLinkCount); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + goto nvlink_lib_powerdown_links_from_active_to_off_end; + } + + // Sanity checking if the link is already in OFF/RESET state + for (i = 0; i < numLinks; i++) + { + conn = NULL; + nvlink_core_get_intranode_conn(links[i], &conn); + + if (conn == NULL) + { + continue; + } + + // Check if both ends of the connection are in L2 + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_SLEEP); + if (status == NVL_SUCCESS) + { + continue; + } + + // Check if both ends and their sublinks are in OFF mode + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_OFF); + if (status == NVL_SUCCESS) + { + continue; + } + + // Check if both ends are in RESET + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_RESET); + if (status == NVL_SUCCESS) + { + continue; + } + + conns[numConns] = conn; + numConns++; + } + + // + // All the required per-link locks are successfully acquired + // The connection list traversal is also complete now + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + // Reset status to NVL_SUCCESS + status = NVL_SUCCESS; + + if (numConns > 0) + { + status = nvlink_core_powerdown_intranode_conns_from_active_to_off(conns, + numConns, + flags); + } + + // Release the per-link locks + nvlink_lib_link_locks_release(lockLinks, lockLinkCount); + +nvlink_lib_powerdown_links_from_active_to_off_end: + + if (conns != NULL) + { + nvlink_free((void *)conns); + } + + if (lockLinks != NULL) + { + nvlink_free((void *)lockLinks); + } + // + // Squash status. If any side of link doesn not respond the link is + // shutdown unilaterally + // + return NVL_SUCCESS; +} + +/** + * Power down the given array of links from ACTIVE to SWCFG state + * + * param[in] links Array of links to shutdown + * param[in] numLinks Number of links to be shutdown + * param[in] flags Flags to track if the transition is sync/async + * + * return NVL_SUCCESS if the transitions were successful + */ +NvlStatus +nvlink_lib_powerdown_links_from_active_to_swcfg +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_intranode_conn **conns = NULL; + nvlink_intranode_conn *conn = NULL; + NvU32 numConns = 0; + NvU32 i; + NvU32 lockLinkCount = 0; + nvlink_link **lockLinks = NULL; + + + if ((links == NULL) || (numLinks == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No links to shutdown\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + // Allocate the link locks + lockLinks = (nvlink_link **)nvlink_malloc(sizeof(nvlink_link *) * (2 * numLinks)); + if (lockLinks == NULL) + { + return NVL_NO_MEM; + } + + // Allocate space for the connection list + conns = (nvlink_intranode_conn **)nvlink_malloc( + sizeof(nvlink_intranode_conn *) * numLinks); + + if (conns == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate space for connections list\n", + __FUNCTION__)); + + nvlink_free((void *)lockLinks); + return NVL_ERR_GENERIC; + } + + nvlink_memset(conns, 0, sizeof(nvlink_intranode_conn *) * numLinks); + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + goto nvlink_lib_powerdown_links_from_active_to_swcfg_end; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists and connection lists + // + + // + // Get the array of both local and remote endpoints whose lock needs + // to be acquired + // + for (i = 0; i < numLinks; i++) + { + conn = NULL; + nvlink_core_get_intranode_conn(links[i], &conn); + + if (conn == NULL) + { + continue; + } + + // Capture both the link and its end-point + lockLinks[lockLinkCount] = conn->end0; + lockLinkCount++; + + lockLinks[lockLinkCount] = conn->end1; + lockLinkCount++; + } + + // Acquire the per-link locks for all links captured + status = nvlink_lib_link_locks_acquire(lockLinks, lockLinkCount); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + goto nvlink_lib_powerdown_links_from_active_to_swcfg_end; + } + + // Sanity checking of links; if already in swfg state, skip it + for (i = 0; i < numLinks; i++) + { + nvlink_intranode_conn *conn = NULL; + + nvlink_core_get_intranode_conn(links[i], &conn); + if (conn == NULL) + { + continue; + } + + // Check if both ends and their sublinks are in SAFE mode + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_SAFE); + if (status == NVL_SUCCESS) + { + continue; + } + + conns[numConns] = conn; + numConns++; + } + + // + // All the required per-link locks are successfully acquired + // The connection list traversal is also complete now + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + // Reset status to NVL_SUCCESS + status = NVL_SUCCESS; + + if (numConns > 0) + { + status = nvlink_core_powerdown_intranode_conns_from_active_to_swcfg(conns, + numConns, + flags); + } + + // Release the per-link locks + nvlink_lib_link_locks_release(lockLinks, lockLinkCount); + +nvlink_lib_powerdown_links_from_active_to_swcfg_end: + + if (conns != NULL) + { + nvlink_free((void *)conns); + } + + if (lockLinks != NULL) + { + nvlink_free((void *)lockLinks); + } + // + // Squash status. If any side of link doesn not respond the link is + // shutdown unilaterally + // + return NVL_SUCCESS; +} + +/** + * Reset the given array of links + * + * param[in] links Array of links to be reset + * param[in] numLinks Number of links to be shutdown + * param[in] flags Flags to track if the transition is sync/async + * + * return NVL_SUCCESS if the links were reset successfully + */ +NvlStatus +nvlink_lib_reset_links +( + nvlink_link **links, + NvU32 numLinks, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_intranode_conn **conns = NULL; + nvlink_intranode_conn *conn = NULL; + NvU32 numConns = 0; + NvU32 i; + NvU32 lockLinkCount = 0; + nvlink_link **lockLinks = NULL; + + + if ((links == NULL) || (numLinks == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No links to reset\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + // Allocate space for the link locks + lockLinks = (nvlink_link **)nvlink_malloc( sizeof(nvlink_link *) * (2 * numLinks)); + if (lockLinks == NULL) + { + return NVL_NO_MEM; + } + + // Allocate space for the connection list + conns = (nvlink_intranode_conn **)nvlink_malloc( + sizeof(nvlink_intranode_conn *) * numLinks); + + if (conns == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate space for connections list\n", + __FUNCTION__)); + + nvlink_free((void *)lockLinks); + return NVL_ERR_GENERIC; + } + + nvlink_memset(conns, 0, sizeof(nvlink_intranode_conn *) * numLinks); + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + goto nvlink_lib_reset_links_end; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists and connection lists + // + + // Sanity checking if the link is already in OFF/RESET state + for (i = 0; i < numLinks; i++) + { + conn = NULL; + + nvlink_core_get_intranode_conn(links[i], &conn); + if (conn == NULL) + { + continue; + } + + // Capture both the link and its end-point + lockLinks[lockLinkCount] = conn->end0; + lockLinkCount++; + + lockLinks[lockLinkCount] = conn->end1; + lockLinkCount++; + + conns[numConns] = conn; + numConns++; + } + + // Acquire the per-link locks for all links captured + status = nvlink_lib_link_locks_acquire(lockLinks, lockLinkCount); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + goto nvlink_lib_reset_links_end; + } + + // + // All the required per-link locks are successfully acquired + // The connection list traversal is also complete now + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + if (numConns > 0) + { + status = nvlink_core_reset_intranode_conns(conns, numConns, flags); + } + + // Release the per-link locks + nvlink_lib_link_locks_release(lockLinks, lockLinkCount); + +nvlink_lib_reset_links_end: + + if (conns != NULL) + { + nvlink_free((void *)conns); + } + + if (lockLinks != NULL) + { + nvlink_free((void *)lockLinks); + } + + return status; +} diff --git a/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_training_entry.c b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_training_entry.c new file mode 100644 index 000000000..201860a1d --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/interface/nvlink_kern_training_entry.c @@ -0,0 +1,826 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "../nvlink_ctx.h" +#include "../nvlink_helper.h" +#include "nvlink_lock.h" + + +/** + * Check whether a group of links have completed training + * + * @param[in] links List of NVLink Link pointers + * @param[in] linkCount Count of #links + * + * return NL_SUCCESS if all links transitioned to Active + */ +NvlStatus +nvlink_lib_check_training_complete +( + nvlink_link **links, + NvU32 linkCount +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_link **lockLinks = NULL; + NvU32 lockLinkCount = 0; + NvU32 i; + + if (links == NULL || linkCount == 0) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad link pointer or linkCount!\n", + __FUNCTION__)); + return NVL_ERR_GENERIC; + } + + lockLinks = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (lockLinks == NULL) + { + return NVL_NO_MEM; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + goto nvlink_lib_check_training_complete_end; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists and connection lists + // + // + // Get the array of both local and remote endpoints whose lock needs + // to be acquired + // + for (i = 0; i < linkCount; i++) + { + if ((lockLinkCount + 1) >= NVLINK_MAX_SYSTEM_LINK_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: lockLinkCount >= NVLINK_MAX_SYSTEM_LINK_NUM", + __FUNCTION__)); + + nvlink_assert(0); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + status = NVL_ERR_INVALID_STATE; + goto nvlink_lib_check_training_complete_end; + } + + // Capture both the link and its end-point + lockLinks[lockLinkCount] = links[i]; + lockLinkCount++; + } + // Acquire the per-link locks for all links captured + status = nvlink_lib_link_locks_acquire(lockLinks, lockLinkCount); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + // Release the top-level lock + nvlink_lib_top_lock_release(); + goto nvlink_lib_check_training_complete_end; + } + + nvlink_lib_top_lock_release(); + + // Release the per-link locks + nvlink_lib_link_locks_release(lockLinks, lockLinkCount); +nvlink_lib_check_training_complete_end: + if (lockLinks != NULL) + { + nvlink_free((void *)lockLinks); + } + return status; +} + + +/** + * Train a given set of links from SWCFG to ACTIVE state + * + * Note: For training the links one by one - its the responsibility of + * the caller to call this function every time for each link + * + * @param[in] links List of NVLink Link pointers + * @param[in] linkCount Count of #links + * @param[in] flags Flag to track whether training is sync/async + * + * return NL_SUCCESS if the link state transition was a success + */ +NvlStatus +nvlink_lib_train_links_from_swcfg_to_active +( + nvlink_link **links, + NvU32 linkCount, + NvU32 flags +) +{ + nvlink_intranode_conn **conns = NULL; + nvlink_intranode_conn *conn = NULL; + NvlStatus status = NVL_SUCCESS; + NvU32 connCount = 0; + NvU32 i, j; + nvlink_link **lockLinks = NULL; + NvU32 lockLinkCount = 0; + + if (links == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad link pointer\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + lockLinks = (nvlink_link **)nvlink_malloc( sizeof(nvlink_link *) * (2 * linkCount)); + if (lockLinks == NULL) + { + return NVL_NO_MEM; + } + + // Allocate space for the connection list + conns = (nvlink_intranode_conn **)nvlink_malloc( + sizeof(nvlink_intranode_conn *) * linkCount); + + if (conns == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate space for connections list\n", + __FUNCTION__)); + + status = NVL_ERR_GENERIC; + goto nvlink_lib_train_links_from_swcfg_to_active_end; + } + + nvlink_memset(conns, 0, sizeof(nvlink_intranode_conn *) * linkCount); + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + goto nvlink_lib_train_links_from_swcfg_to_active_end; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists and connection lists + // + + // + // Get the array of both local and remote endpoints whose lock needs + // to be acquired + // + for (i = 0; i < linkCount; i++) + { + conn = NULL; + nvlink_core_get_intranode_conn(links[i], &conn); + + if (!conn) + { + // + // Could not find the connection for the link. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + status = NVL_ERR_GENERIC; + goto nvlink_lib_train_links_from_swcfg_to_active_end; + } + + // Capture both the link and its end-point + lockLinks[lockLinkCount] = conn->end0; + lockLinkCount++; + + lockLinks[lockLinkCount] = conn->end1; + lockLinkCount++; + } + + // Acquire the per-link locks for all links captured + status = nvlink_lib_link_locks_acquire(lockLinks, lockLinkCount); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + goto nvlink_lib_train_links_from_swcfg_to_active_end; + } + + // Check all the connections which need to be trained + for (i = 0; i < linkCount; i++) + { + conn = NULL; + nvlink_core_get_intranode_conn(links[i], &conn); + + // Don't train links that didn't receive CONFIG_GOOD (NVLINK3+) + if (((conn->end0->version >= NVLINK_DEVICE_VERSION_30) || + (conn->end1->version >= NVLINK_DEVICE_VERSION_30)) && + (!links[i]->bInitnegotiateConfigGood)) + { + continue; + } + + // Check if the link is already in ACTIVE + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_HS); + if ((status == NVL_SUCCESS) || (status == NVL_ERR_INVALID_STATE)) + { + continue; + } + + // We can train connections to HS only when they are already in SAFE + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_SAFE); + if (status != NVL_SUCCESS) + { + continue; + } + + // Check if the connection is not already considered + for (j = 0; j < connCount; j++) + { + if (conns[j] == conn) + break; + } + + // If this is a new connection, add it to the list + if (j == connCount) + { + conns[connCount] = conn; + connCount++; + } + } + + // + // All the required per-link locks are successfully acquired + // The connection list traversal is also complete now + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + if (connCount > 0) + { + if ((conn->end0->version >= NVLINK_DEVICE_VERSION_30) || + (conn->end1->version >= NVLINK_DEVICE_VERSION_30)) + { + status = nvlink_core_train_intranode_conns_from_swcfg_to_active_ALT(conns, + connCount, + flags); + } + else + { + status = nvlink_core_train_intranode_conns_from_swcfg_to_active_legacy(conns, + connCount, + flags); + } + } + + // Release the per-link locks + nvlink_lib_link_locks_release(lockLinks, lockLinkCount); + +nvlink_lib_train_links_from_swcfg_to_active_end: + + if (conns != NULL) + { + nvlink_free((void *)conns); + } + + if (lockLinks != NULL) + { + nvlink_free((void *)lockLinks); + } + + return status; +} + +/** + * Train a given set of links of a device from L2 to ACTIVE state + * + * param[in] dev NVLink Device pointer + * param[in] linkMask Mask of links to be trained + * param[in] flags Flags to track if the transition is sync/async + * + * return NVL_SUCCESS if the links train to ACTIVE + */ +NvlStatus +nvlink_lib_train_links_from_L2_to_active +( + nvlink_device *dev, + NvU32 linkMask, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + nvlink_link *link = NULL; + nvlink_intranode_conn **conns = NULL; + nvlink_intranode_conn *conn = NULL; + NvU32 numLinks = 0; + NvU32 numConns = 0; + NvU32 connCount = 0; + NvU32 i; + + nvlink_link **lockLinks = NULL; + NvU32 lockLinkCount = 0; + + + if (dev == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad device pointer specified.\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + lockLinks = (nvlink_link **)nvlink_malloc( + sizeof(nvlink_link *) * NVLINK_MAX_SYSTEM_LINK_NUM); + if (lockLinks == NULL) + { + return NVL_NO_MEM; + } + + // Allocate space for the connection list + conns = (nvlink_intranode_conn **)nvlink_malloc( + sizeof(nvlink_intranode_conn *) * NVLINK_MAX_SYSTEM_LINK_NUM); + + if (conns == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate space for connections list\n", + __FUNCTION__)); + + status = NVL_ERR_GENERIC; + goto nvlink_lib_train_links_from_L2_to_active_end; + } + + // Initialize the list of links + nvlink_memset(conns, 0, sizeof(nvlink_intranode_conn *) * 32); + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + goto nvlink_lib_train_links_from_L2_to_active_end; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists and connection lists + // + + // Get the array of link endpoints whose lock needs to be acquired + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (!(linkMask & (1 << link->linkNumber))) + { + continue; + } + + // Get the connection associated with the link + conn = NULL; + nvlink_core_get_intranode_conn(link, &conn); + + if (conn == NULL) + { + // + // Could not find the connection for the link. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + status = NVL_ERR_GENERIC; + goto nvlink_lib_train_links_from_L2_to_active_end; + } + + lockLinks[lockLinkCount] = conn->end0; + lockLinkCount++; + + lockLinks[lockLinkCount] = conn->end1; + lockLinkCount++; + } + + // Acquire the per-link locks for all links captured + status = nvlink_lib_link_locks_acquire(lockLinks, lockLinkCount); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + goto nvlink_lib_train_links_from_L2_to_active_end; + } + + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (!(linkMask & (1 << link->linkNumber))) + { + continue; + } + + // If the link received a L2 entry request, but never entered L2 + if (link->powerStateTransitionStatus == nvlink_power_state_entering_L2) + { + // Update the power state transition status + link->powerStateTransitionStatus = nvlink_power_state_in_L0; + continue; + } + + // Get the connection associated with the link + conn = NULL; + nvlink_core_get_intranode_conn(link, &conn); + + // Check the connection state to verify if the link is already in HS + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_HS); + if (status == NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Link is not in sleep %s: %s.\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + continue; + } + + // Check the connection state to verify if the link is already in SAFE + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_SAFE); + if (status == NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link is not in sleep %s: %s.\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + continue; + } + + // Mark the power state transition for the link + link->powerStateTransitionStatus = nvlink_power_state_exiting_L2; + } + + FOR_EACH_LINK_REGISTERED(link, dev, node) + { + if (!(linkMask & (1 << link->linkNumber))) + { + continue; + } + + if (link->powerStateTransitionStatus == nvlink_power_state_exiting_L2) + { + // Get the connection associated with the link + conn = NULL; + nvlink_core_get_intranode_conn(link, &conn); + + // Verify if both the endpoints desire to exit SLEEP + if ((conn->end0->powerStateTransitionStatus == nvlink_power_state_exiting_L2) && + (conn->end1->powerStateTransitionStatus == nvlink_power_state_exiting_L2)) + { + // Increment the #connections considered for exiting L2 + numConns++; + + // Check if the the connection is already included in the list + for (i = 0; i < connCount; i++) + { + if (conns[i] == conn) + break; + } + + // If this is a new connection, add it to the list + if (i == connCount) + { + conns[connCount] = conn; + connCount++; + } + } + + // Increment the #links considered for exiting L2 + numLinks++; + } + } + + // + // All the required per-link locks are successfully acquired + // The connection list traversal is also complete now + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + // Clear the status variable + status = NVL_SUCCESS; + + if (connCount > 0) + { + status = nvlink_core_train_intranode_conns_from_from_L2_to_active(conns, connCount, flags); + } + + if (status == NVL_SUCCESS) + { + // + // If some links are waiting on the remote end to exit sleep, + // update status to NVL_MORE_PROCESSING_REQUIRED + // + status = (numLinks != numConns ? NVL_MORE_PROCESSING_REQUIRED : NVL_SUCCESS); + } + + // Release the per-link locks + nvlink_lib_link_locks_release(lockLinks, lockLinkCount); + +nvlink_lib_train_links_from_L2_to_active_end: + + if (conns != NULL) + { + nvlink_free((void *)conns); + } + + if (lockLinks != NULL) + { + nvlink_free((void *)lockLinks); + } + + return status; +} + +/** + * Retrain a given link from SWCFG to ACTIVE + * + * @param[in] link NVLink Link pointer + * @param[in] flags Flag to track if the training is aync/async + * + * return NVL_SUCCESS if the training was successful + */ +NvlStatus +nvlink_lib_retrain_link_from_swcfg_to_active +( + nvlink_link *link, + NvU32 flags +) +{ + nvlink_intranode_conn *conn = NULL; + NvlStatus status = NVL_SUCCESS; + + if (!link) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad link pointer specified.\n", + __FUNCTION__)); + return NVL_ERR_GENERIC; + } + + // Acquire the top-level lock + status = nvlink_lib_top_lock_acquire(); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return status; + } + + // + // Top-level lock is now acquired. Proceed to traversing the device + // and link lists and connection lists + // + + // Get the connection associated with the link + nvlink_core_get_intranode_conn(link, &conn); + + if (!conn) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No connection was found for %s: %s.\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + + // + // Could not find the connection for the link. Release the + // top-level lock and return + // + nvlink_lib_top_lock_release(); + + return NVL_ERR_GENERIC; + } + + // create array of one conn and two link endpoints + nvlink_intranode_conn *conns[1] = {conn}; + nvlink_link *links[2] = {0}; + + links[0] = conn->end0; + links[1] = conn->end1; + + // Acquire the per-link locks for the links + status = nvlink_lib_link_locks_acquire(links, 2); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link locks\n", + __FUNCTION__)); + + // Release the top-level lock + nvlink_lib_top_lock_release(); + + return status; + } + + // + // All the required per-link locks are successfully acquired + // Release the top level-lock + // + nvlink_lib_top_lock_release(); + + // Check if the link is already in ACTIVE + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_HS); + if ((status == NVL_SUCCESS) || (status == NVL_ERR_INVALID_STATE)) + { + // Release the per-link locks + nvlink_lib_link_locks_release(links, 2); + + return status; + } + + // We can train connections to HS only when they are already in SAFE + status = nvlink_core_check_intranode_conn_state(conn, NVLINK_LINKSTATE_SAFE); + if (status != NVL_SUCCESS) + { + // Release the per-link locks + nvlink_lib_link_locks_release(links, 2); + + return status; + } + if ((conn->end0->version >= NVLINK_DEVICE_VERSION_30) || + (conn->end1->version >= NVLINK_DEVICE_VERSION_30)) + + { + if (!conn->end0->bInitnegotiateConfigGood || + !conn->end1->bInitnegotiateConfigGood) + { + status = NVL_ERR_GENERIC; + } + else + { + // ALT training for NVLink3.0+ + status = nvlink_core_train_intranode_conns_from_swcfg_to_active_ALT(conns, 0x1, flags); + } + } + else + { + // Legacy training for pre-NVLink3.0 + status = nvlink_core_train_intranode_conns_from_swcfg_to_active_legacy(conns, 0x1, flags); + } + + // Release the per-link locks + nvlink_lib_link_locks_release(links, 2); + + return status; +} + +/** + * Save training seeds into the link structure + * + * @param[in] link NVLink Link pointer + * @param[in] seedData Training seed information + * + * return NVL_SUCCESS if the seed saving was successful + */ +NvlStatus +nvlink_lib_save_training_seeds +( + nvlink_link *link, + NvU32 *seedData +) +{ + NvlStatus status = NVL_SUCCESS; + + // Check to make sure we are given a buffer of data + if (seedData == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No seed data given to store %s: %s.\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + return NVL_ERR_GENERIC; + } + + NvU32 size = seedData[0]; + + // check to make sure the size is not out of bounds + if (size > NVLINK_MAX_SEED_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad data, size of %d out of bounds %s: %s.\n", + __FUNCTION__, size, link->dev->deviceName, link->linkName)); + return NVL_ERR_GENERIC; + } + + // Acquire the per-link lock + status = nvlink_lib_link_locks_acquire(&link, 1); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link lock\n", + __FUNCTION__)); + + return status; + } + + //always using corelib defined structures for size + nvlink_memcpy(link->seedData, seedData, sizeof(link->seedData)); + + // Release the per-link locks + nvlink_lib_link_locks_release(&link, 1); + + return NVL_SUCCESS; +} + +/** + * Copy training seeds from the link structure + * + * @param[in] link NVLink Link pointer + * @param[in] seedData Training seed information + * + * return NVL_SUCCESS if the seed copy was successful + */ +NvlStatus +nvlink_lib_copy_training_seeds +( + nvlink_link *link, + NvU32 *seedDataCopy +) +{ + NvlStatus status = NVL_SUCCESS; + + // Check to make sure we are given a buffer to copy into + if (seedDataCopy == NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: No seed data structure given to store into %s: %s.\n", + __FUNCTION__, link->dev->deviceName, link->linkName)); + return NVL_ERR_GENERIC; + } + + NvU32 size = link->seedData[0]; + + // check to make sure the size is not out of bounds + if (size > NVLINK_MAX_SEED_NUM) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Bad data, size of %d out of bounds %s: %s.\n", + __FUNCTION__, size, link->dev->deviceName, link->linkName)); + return NVL_ERR_GENERIC; + } + + // Acquire the per-link lock + status = nvlink_lib_link_locks_acquire(&link, 1); + if (status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire per-link lock\n", + __FUNCTION__)); + + return status; + } + + nvlink_memcpy(seedDataCopy, link->seedData, sizeof(link->seedData)); + + // Release the per-link locks + nvlink_lib_link_locks_release(&link, 1); + + return NVL_SUCCESS; +} diff --git a/src/common/nvlink/kernel/nvlink/nvlink_ctx.h b/src/common/nvlink/kernel/nvlink/nvlink_ctx.h new file mode 100644 index 000000000..39554daac --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/nvlink_ctx.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_CTX_H_ +#define _NVLINK_CTX_H_ + +// +// Link transition times in ms. +// TODO: Review with HW for optimal transition times; +// +#define LINK_TRANSITION_TIME_OFF 1 +#define LINK_TRANSITION_TIME_SAFE 5 +#define LINK_TRANSITION_TIME_HS 500 +#define LINK_TRANSITION_TIMEOUT_IN_MS 2000 + +typedef struct +{ + /* + * Lock for all core lib structures except nvlink_link structures + */ + void *topLevelLock; + + /* + * Head of the device-list + */ + nvlink_device nv_devicelist_head; + + /* + * Head of the established intranode nvlink connections list + */ + nvlink_intranode_conn nv_intraconn_head; + + /* + * Head of the added internode nvlink connections list + */ + nvlink_internode_conn nv_interconn_head; + + /* + * Topology information + * registeredEndpoints : #Endpoints registered in the core library + * connectedEndpoints : #Endpoints whose remote has been determined + * notConnectedEndpoints: #Endpoints whose remote has not been determined + */ + NvU32 registeredEndpoints; + NvU32 connectedEndpoints; + NvU32 notConnectedEndpoints; + NvBool bNewEndpoints; + + /* + * Endpoint count in different link states + * endpointsInSafe : #Endpoints in SAFE state + * endpointsInFail : #Endpoints that failed to transition to ACTIVE + * endpointsInActive: #Endpoints in ACTIVE + */ + NvU32 endpointsInSafe; + NvU32 endpointsInFail; + NvU32 endpointsInActive; + + /* + * Fabric node id set by ioctl interface. This id will be assigned to each + * nvlink device during registration and matched for endpoint look-up on + * ioctls, which operate on endpoints. + */ + NvU16 nodeId; +}nvlink_lib_context; + +extern nvlink_lib_context nvlinkLibCtx; + +#endif //_NVLINK_CTX_H_ + diff --git a/src/common/nvlink/kernel/nvlink/nvlink_helper.h b/src/common/nvlink/kernel/nvlink/nvlink_helper.h new file mode 100644 index 000000000..40f6b810d --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/nvlink_helper.h @@ -0,0 +1,365 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVLINK_HELPER_H_ +#define _NVLINK_HELPER_H_ + + +// +// fabric node id will be used as MSB 16 bits of the link token value to +// generate a unique token for discovering connections +// +#define NVLINK_FABRIC_NODE_ID_MASK 0xFFFF +#define NVLINK_FABRIC_NODE_ID_POS 48 + +/** + * Check if the device type is supported + */ +NvBool nvlink_core_is_supported_device_type(NvU32 devType); + +/** + * Get the link and sublink states for the endpoint + */ +void nvlink_core_get_endpoint_state(nvlink_link *link, nvlink_link_state *linkState); + +/** + * Get the nvlink_device * from the PCI DBDF + */ +void nvlink_core_get_device_by_devinfo(nvlink_device_info *devInfo, nvlink_device **dev); + +/** + * Get the nvlink_link * from the PCI DBDF and link# + */ +void nvlink_core_get_link_by_endpoint(nvlink_endpoint *endPoint, nvlink_link **link); + +/** + * Given the nvlink_link ptr, copy the endpoint details for the link + */ +void nvlink_core_copy_endpoint_info(nvlink_link *connLink, nvlink_endpoint *endPointInfo); + +/** + * Given the nvlink_device ptr, copy the device details + */ +void nvlink_core_copy_device_info(nvlink_device *tmpDev, nvlink_detailed_dev_info *devInfo); + + +/************************************************************************************************/ +/****************************** NVLink initialization functions *********************************/ +/************************************************************************************************/ + +/** + * Kick-off INITPHASE1 on the given array of links + */ +NvlStatus nvlink_core_initphase1(nvlink_link **links, NvU32 numLinks, NvU32 flags); + +/** + * Kick-off INITRXTERM on the given array of links + */ +NvlStatus nvlink_core_rx_init_term(nvlink_link **links, NvU32 numLinks, NvU32 flags); + +/** + * Kick-off receiver detect on the given array of links + */ +NvlStatus nvlink_core_set_rx_detect(nvlink_link **links, NvU32 numLinks, NvU32 flags); + +/** + * Get receiver detect status on the given array of links + */ +NvlStatus nvlink_core_get_rx_detect(nvlink_link **links, NvU32 numLinks, NvU32 flags); + +/** + * Get Enable TX common mode on the given array of links + */ +NvlStatus nvlink_core_enable_common_mode(nvlink_link **links, NvU32 numLinks, NvU32 flags); + +/** + * Get Disable TX common mode on the given array of links + */ +NvlStatus nvlink_core_disable_common_mode(nvlink_link **links, NvU32 numLinks, NvU32 flags); + +/** + * Perform RX calibration on the given array of links + */ +NvlStatus nvlink_core_calibrate_links(nvlink_link **links, NvU32 numLinks, NvU32 flags); + +/** + * Enable data on the given array of links + */ +NvlStatus nvlink_core_enable_data(nvlink_link **links, NvU32 numLinks, NvU32 flags); + +/** + * Transition to SWCFG on the given array of links + */ +NvlStatus nvlink_core_link_init_async(nvlink_link **links, NvU32 numLinks); + +/** + * Poll on SAFE/SWCFG on the given link + */ +NvlStatus nvlink_core_wait_for_link_init(nvlink_link *link); + +/** + * Initialize all the endpoints from OFF to SWCFG state + */ +void nvlink_core_init_links_from_off_to_swcfg(nvlink_link **pLinks, + NvU32 numLinks, + NvU32 flags); + +/** + * Send INITNEGOTIATE command on the given array of links + */ +NvlStatus nvlink_core_initnegotiate(nvlink_link **links, NvU32 numLinks, NvU32 flags); + + +/************************************************************************************************/ +/*************************** NVLink topology discovery functions ********************************/ +/************************************************************************************************/ + +/** + * Generate a discovery token for the given link + */ +NvU64 nvlink_core_get_link_discovery_token(nvlink_link *link); + +/** + * Write the dicovery token for the given link + */ +NvlStatus nvlink_core_write_link_discovery_token(nvlink_link *link, NvU64 token); + +/** + * Read the dicovery token for the given link + */ +NvU64 nvlink_core_read_link_discovery_token(nvlink_link *link); + +/** + * Detect the connection by correlating the tokens + */ +void nvlink_core_correlate_conn_by_token(nvlink_link *srcLink, NvU64 writeToken, NvBool skipReadToken); + +/** + * For a given end of a link, returns the other end its connected to. + */ +void nvlink_core_discover_and_get_remote_end(nvlink_link *end, + nvlink_link **remote_end, + NvU32 flags); + + +/************************************************************************************************/ +/********************************** NVLink training functions ***********************************/ +/************************************************************************************************/ + +/** + * Train the internode connection link from SWCFG to ACTIVE + */ +NvlStatus nvlink_core_train_internode_conns_from_swcfg_to_active(nvlink_internode_conn **conns, + NvU32 connCount, + NvU32 *isMasterEnd, + NvU32 flags); + +/** + * Train the internode connection sublink to enter high speed + */ +NvlStatus nvlink_core_train_internode_conn_sublink_from_safe_to_hs(nvlink_internode_conn *conn, + NvU32 flags); + +/** + * Train a given set of intranode connections from L2 to ACTIVE state + */ +NvlStatus nvlink_core_train_intranode_conns_from_from_L2_to_active(nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags); + +/** + * Train intranode connections associated with a list of links to HS using ALT sequence + */ +NvlStatus nvlink_core_train_intranode_conns_from_swcfg_to_active_ALT(nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags); + + +/** + * Train a single intranode connection associated with a list of links to HS using legacy + * pre-Ampere sequence + */ +NvlStatus nvlink_core_train_intranode_conns_from_swcfg_to_active_legacy(nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags); + +/************************************************************************************************/ +/********************************** NVLink shutdown functions ***********************************/ +/************************************************************************************************/ + +/** + * [CLEAN SHUTDOWN] + * + * Shutdown given intranode connections from active to L2 state + */ +NvlStatus nvlink_core_powerdown_intranode_conns_from_active_to_L2(nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags); + +/** + * [PSEUDO-CLEAN SHUTDOWN] + * + * Shutdown the given array of intranode connections from ACTIVE to OFF state + */ +NvlStatus nvlink_core_powerdown_intranode_conns_from_active_to_off(nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags); + +/** + * Power down the given array of intranode connections from ACTIVE to SWCFG state + */ +NvlStatus nvlink_core_powerdown_intranode_conns_from_active_to_swcfg(nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags); + +/** + * Reset the given array of intranode connections + */ +NvlStatus nvlink_core_reset_intranode_conns(nvlink_intranode_conn **conns, + NvU32 connCount, + NvU32 flags); + + +/************************************************************************************************/ +/**************************** NVLink connection management functions ****************************/ +/************************************************************************************************/ + +/** + * For a given link, return the associated internode connection + */ +void nvlink_core_get_internode_conn(nvlink_link *localLink, + nvlink_internode_conn **conn); + +/** + * Add a new internode connection to the list of connections + */ +NvlStatus nvlink_core_add_internode_conn(nvlink_link *localLink, + nvlink_remote_endpoint_info *remoteEndPoint); + +/** + * For a given link, delete the associated internode connection + */ +void nvlink_core_remove_internode_conn(nvlink_link *localLink); + +/** + * For a given link, return the associated intranode connection + */ +void nvlink_core_get_intranode_conn(nvlink_link *endpoint, + nvlink_intranode_conn **conn); + +/** + * Add a new intranode connection to the list of intranode connections + */ +NvlStatus nvlink_core_add_intranode_conn(nvlink_link *end0, nvlink_link *end1); + +/** + * Remove the connection from the list of intranode connections + */ +void nvlink_core_remove_intranode_conn(nvlink_intranode_conn *conn); + +/** + * Check if the given intranode connection is in the specified mode + */ +NvlStatus nvlink_core_check_intranode_conn_state(nvlink_intranode_conn *conn, + NvU64 linkMode); + +/** + * Copy the intranode connection's remote endpoint information into the nvlink_conn_info + * structure passed in + */ +void nvlink_core_copy_intranode_conn_info(nvlink_link *remote_end, + nvlink_conn_info *conn_info); + +/** + * Copy the internode connection's remote endpoint information into the nvlink_conn_info + * structure passed in + */ +void nvlink_core_copy_internode_conn_info(nvlink_remote_endpoint_info *remote_end, + nvlink_conn_info *conn_info); + + +/************************************************************************************************/ +/******************************* NVLink link management functions *******************************/ +/************************************************************************************************/ + +/** + * For the given link, check whether the link state is at the requested state + */ +NvBool nvlink_core_check_link_state(nvlink_link *link, NvU64 linkState); + +/** + * For the given link, check whether the tx sublink state is at the requested state + */ +NvBool nvlink_core_check_tx_sublink_state(nvlink_link *link, NvU64 txSublinkState); + +/** + * For the given link, check whether the rx sublink state is at the requested state + */ +NvBool nvlink_core_check_rx_sublink_state(nvlink_link *link, NvU64 rxSublinkState); + +/** + * Poll for the link to reach the particular state upto the given timeout. The link + * state transition is considered failed once timeout occurs + */ +NvlStatus nvlink_core_poll_link_state(nvlink_link *link, + NvU64 linkState, + NvU32 timeout); +/** + * Poll for a given timeout period for a sublink to reach the particular state. The + * sublink state transition is considered failed once timeout occurs + */ +NvlStatus nvlink_core_poll_sublink_state(nvlink_link *localTxSubLink, + NvU64 localTxSubLinkState, + NvU32 localTxSubLinkSubtate, + nvlink_link *remoteRxSubLink, + NvU64 remoteRxSubLinkState, + NvU32 remoteRxSubLinkSubstate, + NvU32 timeout); + +/** + * Poll for the tx sublink to reach the specified state upto the given timeout. The + * sublink state transition is considered failed once timeout occurs + */ +NvlStatus nvlink_core_poll_tx_sublink_state(nvlink_link *link, + NvU64 txSublinkState, + NvU32 txSublinkSubState, + NvU32 timeout); + +/** + * Poll for the rx sublink to reach the specified state upto the given timeout. The + * sublink state transition is considered failed once timeout occurs + */ +NvlStatus nvlink_core_poll_rx_sublink_state(nvlink_link *link, + NvU64 rxSublinkState, + NvU32 rxSublinkSubState, + NvU32 timeout); + +/************************************************************************************************/ +/****************** Nvlink print functions for devices/links/connections ************************/ +/************************************************************************************************/ + +void nvlink_core_print_link_state(nvlink_link *link); +void nvlink_core_print_intranode_conn(nvlink_intranode_conn *conn); + + +#endif //_NVLINK_HELPER_H_ diff --git a/src/common/nvlink/kernel/nvlink/nvlink_lib_mgmt.c b/src/common/nvlink/kernel/nvlink/nvlink_lib_mgmt.c new file mode 100644 index 000000000..988fe6f32 --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/nvlink_lib_mgmt.c @@ -0,0 +1,163 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "nvlink_ctx.h" +#include "nvlink_helper.h" + +#include "nvlink_lock.h" + +nvlink_lib_context nvlinkLibCtx = {0}; + +/* + * Initialize the nvlink core library + * + * return NVL_SUCCESS if the library is initialized successfully + */ +NvlStatus +nvlink_lib_initialize(void) +{ + NvlStatus lock_status = NVL_SUCCESS; + + if (nvlinkLibCtx.nv_devicelist_head.initialized == 0) + { + // Allocate top-level lock + lock_status = nvlink_lib_top_lock_alloc(); + if (lock_status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate top-level lock\n", + __FUNCTION__)); + + return lock_status; + } + + // Acquire top-level lock + lock_status = nvlink_lib_top_lock_acquire(); + if (lock_status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return lock_status; + } + + // Top-level lock is now acquired + + // Initialize the device list head + nvListInit(&nvlinkLibCtx.nv_devicelist_head.link_list); + nvListInit(&nvlinkLibCtx.nv_devicelist_head.node); + nvlinkLibCtx.nv_devicelist_head.initialized = 1; + + // Initialize the intranode connection list head + nvListInit(&nvlinkLibCtx.nv_intraconn_head.node); + + // Initialize the internode connection list head + nvListInit(&nvlinkLibCtx.nv_interconn_head.node); + + // Initialize registered and connected links to 0 + nvlinkLibCtx.registeredEndpoints = 0; + nvlinkLibCtx.connectedEndpoints = 0; + nvlinkLibCtx.notConnectedEndpoints = 0; + + // + // Initialize fabric node id to max value until set + // by ioctl interface + // + nvlinkLibCtx.nodeId = NV_U16_MAX ; + + // Release top-level lock + nvlink_lib_top_lock_release(); + } + + return NVL_SUCCESS; +} + +/* + * Unload the nvlink core library + * + * return NVL_SUCCESS if the library is unloaded successfully + */ +NvlStatus +nvlink_lib_unload(void) +{ + NvlStatus lock_status = NVL_SUCCESS; + + if (nvlink_lib_is_initialized()) + { + // Acquire top-level lock + lock_status = nvlink_lib_top_lock_acquire(); + if (lock_status != NVL_SUCCESS) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to acquire top-level lock\n", + __FUNCTION__)); + + return lock_status; + } + + // Top-level lock is now acquired + + // Check if there are no devices registered + if (nvlink_lib_is_device_list_empty()) + { + nvlinkLibCtx.nv_devicelist_head.initialized = 0; + } + + // Release and free top-level lock + nvlink_lib_top_lock_release(); + nvlink_lib_top_lock_free(); + } + + return NVL_SUCCESS; +} + +/* + * Check if the nvlink core library is initialized + * + * return NV_TRUE if the core library is already initialized + */ +NvBool +nvlink_lib_is_initialized(void) +{ + return nvlinkLibCtx.nv_devicelist_head.initialized; +} + +/* + * Check if there are any devices registered + * + * return NV_TRUE if there are devices registered in the core library + */ +NvBool +nvlink_lib_is_device_list_empty(void) +{ + NvBool isEmpty = NV_TRUE; + + isEmpty = nvListIsEmpty(&nvlinkLibCtx.nv_devicelist_head.node); + + return isEmpty; +} + diff --git a/src/common/nvlink/kernel/nvlink/nvlink_lock.c b/src/common/nvlink/kernel/nvlink/nvlink_lock.c new file mode 100644 index 000000000..c16a8df9a --- /dev/null +++ b/src/common/nvlink/kernel/nvlink/nvlink_lock.c @@ -0,0 +1,488 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink.h" +#include "nvtypes.h" +#include "nvlink_export.h" +#include "nvlink_os.h" +#include "nvlink_ctx.h" +#include "nvlink_helper.h" +#include "nvlink_lock.h" + +// +// Only enabling locking for testing purposes at the moment. +// Disabled at all other times. +// +#define LOCKING_DISABLED 1 + +static void _sort_links(nvlink_link **, NvU32, NvBool (*)(void *, void *)); +static NvBool _compare(void *, void *); + +/* + * Allocate top level lock. Return NVL_SUCCESS if + * the lock was allocated else return NVL_ERR_GENERIC. + */ +NvlStatus +nvlink_lib_top_lock_alloc(void) +{ + if (LOCKING_DISABLED) + { + return NVL_SUCCESS; + } + + void *top_lock = NULL; + + // Check if top level lock is already allocated + if (nvlinkLibCtx.topLevelLock != NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Top-level lock already allocated\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + top_lock = nvlink_allocLock(); + if (NULL == top_lock) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate top-level lock\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + nvlinkLibCtx.topLevelLock = top_lock; + + // Top-level lock allocated + + return NVL_SUCCESS; +} + +/* + * Free top level lock. Return NVL_SUCCESS if + * the lock was freed else return NVL_ERR_GENERIC. + */ +NvlStatus +nvlink_lib_top_lock_free(void) +{ + if (LOCKING_DISABLED) + { + return NVL_SUCCESS; + } + + // Check if already freed + if (NULL == nvlinkLibCtx.topLevelLock) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Top-level lock not allocated/already freed\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + nvlink_freeLock(nvlinkLibCtx.topLevelLock); + nvlinkLibCtx.topLevelLock = NULL; + + // Top-level lock freed + + return NVL_SUCCESS; +} + +/* + * Allocate per-link lock. Return NVL_SUCCESS if + * the lock was allocated else return NVL_ERR_GENERIC. + */ +NvlStatus +nvlink_lib_link_lock_alloc +( + nvlink_link *link +) +{ + if (LOCKING_DISABLED) + { + return NVL_SUCCESS; + } + + void *link_lock = NULL; + + // Check if already allocated + if (link->linkLock != NULL) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link lock already allocated on this link\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + link_lock = nvlink_allocLock(); + if (NULL == link_lock) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Failed to allocate link lock\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + // Link lock allocated + link->linkLock = link_lock; + + return NVL_SUCCESS; +} + +/* + * Free per-link lock. Return NVL_SUCCESS if + * the lock was freed else return NVL_ERR_GENERIC. + */ +NvlStatus +nvlink_lib_link_lock_free +( + nvlink_link *link +) +{ + if (LOCKING_DISABLED) + { + return NVL_SUCCESS; + } + + // Check if already freed + if (NULL == link->linkLock) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Link lock not allocated/already freed\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + nvlink_freeLock(link->linkLock); + link->linkLock = NULL; + + // Link lock freed + + return NVL_SUCCESS; +} + +/* + * Acquire top level lock. Return NVL_SUCCESS if + * the lock was acquired else return NVL_ERR_STATE_IN_USE. + */ +NvlStatus +nvlink_lib_top_lock_acquire(void) +{ + if (LOCKING_DISABLED) + { + return NVL_SUCCESS; + } + + // Check if top-level lock is allocated before attempting to acquire + if (NULL == nvlinkLibCtx.topLevelLock) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Top-level lock is not allocated\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + // + // ToDo: Check if the lock was acquired succesfully + // Currently the nvlink_acquireLock function doesn't report failures + // + nvlink_acquireLock(nvlinkLibCtx.topLevelLock); + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Acquired top-level lock\n", + __FUNCTION__)); + + return NVL_SUCCESS; +} + +/* + * Release top level lock. Return NVL_SUCCESS if + * the lock was released else return NVL_ERR_GENERIC. + */ +NvlStatus +nvlink_lib_top_lock_release(void) +{ + if (LOCKING_DISABLED) + { + return NVL_SUCCESS; + } + + // Check if top-level lock is allocated before attempting to release + if (NULL == nvlinkLibCtx.topLevelLock) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Top-level lock is not allocated\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + // + // ToDo: Check if the lock was released succesfully + // Currently the nvlink_releaseLock function doesn't report failures + // + nvlink_releaseLock(nvlinkLibCtx.topLevelLock); + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Released top-level lock\n", + __FUNCTION__)); + + return NVL_SUCCESS; +} + +/* + * Sort the array of links in order of (DBDF, link#) - + * lowest to highest and acquire link locks. + * Return NVL_SUCCESS if all the link locks were acquired. + * Else if any link lock failed to be acquired, release + * all acquired link locks and return NVL_ERR_STATE_IN_USE. + */ +NvlStatus +nvlink_lib_link_locks_acquire +( + nvlink_link **links, + int numLinks +) +{ + if (LOCKING_DISABLED) + { + return NVL_SUCCESS; + } + + int i; + + nvlink_link *link_prev = NULL; + + // Check if array of links is already empty before attempting to release. + if ((NULL == links) || (numLinks == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Could not release the link locks. Link array is empty !\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + // Sort the link array in increasing order of (DBDF, link#) + _sort_links(links, numLinks, _compare); + + for (i = 0; i < numLinks; i++) + { + // + // Don't acquire locks on loop back links twice since the current link is + // the same as the previous one + // + if (links[i] != link_prev) + { + nvlink_acquireLock(links[i]->linkLock); + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Acquire link lock for dom:%d bus:%d dev:%d fun:%d link:%d\n", + __FUNCTION__, + + links[i]->dev->pciInfo.domain, links[i]->dev->pciInfo.bus, + links[i]->dev->pciInfo.device, links[i]->dev->pciInfo.function, + links[i]->linkNumber)); + } + + link_prev = links[i]; + } + + // + // ToDo: Check if the lock was acquired succesfully + // Currently the nvlink_acquireLock function doesn't report failures + // + return NVL_SUCCESS; +} + +/* + * Loop over all the links and call nvlink_releaseLock(links[i]->linkLock). + * Return NVL_SUCCESS if all the link locks were released. + * Else if any link lock failed to be released return NVL_ERR_GENERIC. + */ +NvlStatus +nvlink_lib_link_locks_release +( + nvlink_link **links, + int numLinks +) +{ + int i; + + if (LOCKING_DISABLED) + { + return NVL_SUCCESS; + } + + nvlink_link *link_prev = NULL; + + // Check if array of links is already empty before attempting to release. + if ((NULL == links) || (numLinks == 0)) + { + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_ERRORS, + "%s: Could not release the link locks. Link array is empty !\n", + __FUNCTION__)); + + return NVL_ERR_GENERIC; + } + + // Sort the link array in increasing order of (DBDF, link#) + _sort_links(links, numLinks, _compare); + + for (i = 0; i < numLinks; i++) + { + // + // Don't release locks on loop back links twice since the current link is + // the same as the previous one + // + if (links[i] != link_prev) + { + nvlink_releaseLock(links[i]->linkLock); + + NVLINK_PRINT((DBG_MODULE_NVLINK_CORE, NVLINK_DBG_LEVEL_INFO, + "%s: Release link lock for dom:%d bus:%d dev:%d fun:%d link:%d\n", + __FUNCTION__, + + links[i]->dev->pciInfo.domain, links[i]->dev->pciInfo.bus, + links[i]->dev->pciInfo.device, links[i]->dev->pciInfo.function, + links[i]->linkNumber)); + } + + link_prev = links[i]; + } + + // + // ToDo: Check if the lock was released succesfully + // Currently the nvlink_releaseLock function doesn't report failures + // + return NVL_SUCCESS; +} + +/* + * Sorts the links in the increasing order of DBDF, link# + */ +static void +_sort_links +( + nvlink_link **links, + NvU32 numLinks, + NvBool (*compare)(void *, void *) +) +{ + nvlink_link *temp = NULL; + NvU32 i, j; + + for (i = 0; i < numLinks; i++) + { + for (j = i + 1; j < numLinks; j++) + { + if (_compare(links[j], links[i])) + { + temp = links[i]; + links[i] = links[j]; + links[j] = temp; + } + } + } +} + +/* + * Compare function for _nvlink_sort - compares DBDF, link# + */ +static NvBool +_compare +( + void *link1, + void *link2 +) +{ + nvlink_link *l1 = (nvlink_link *) link1; + nvlink_link *l2 = (nvlink_link *) link2; + + // Compare link domains + if (l1->dev->pciInfo.domain < l2->dev->pciInfo.domain) + { + return NV_TRUE; + } + + if (l1->dev->pciInfo.domain > l2->dev->pciInfo.domain) + { + return NV_FALSE; + } + + // Domain is same for devices of links. Compare bus next + + // Compare link buses + if (l1->dev->pciInfo.bus < l2->dev->pciInfo.bus) + { + return NV_TRUE; + } + + if (l1->dev->pciInfo.bus > l2->dev->pciInfo.bus) + { + return NV_FALSE; + } + + // Bus is same for devices of links. Compare device next + + // Compare link devices + if (l1->dev->pciInfo.device < l2->dev->pciInfo.device) + { + return NV_TRUE; + } + + if (l1->dev->pciInfo.device > l2->dev->pciInfo.device) + { + return NV_FALSE; + } + + // Device is same for devices of links. Compare function next + + // Compare link functions + if (l1->dev->pciInfo.function < l2->dev->pciInfo.function) + { + return NV_TRUE; + } + + if (l1->dev->pciInfo.function > l2->dev->pciInfo.function) + { + return NV_FALSE; + } + + // DBDF is same for both the links. Check the link# + + // Compare link numbers + if (l1->linkNumber < l2->linkNumber) + { + return NV_TRUE; + } + else + { + return NV_FALSE; + } +} diff --git a/src/common/nvswitch/common/inc/rmflcncmdif_nvswitch.h b/src/common/nvswitch/common/inc/rmflcncmdif_nvswitch.h new file mode 100644 index 000000000..dae47a4c7 --- /dev/null +++ b/src/common/nvswitch/common/inc/rmflcncmdif_nvswitch.h @@ -0,0 +1,56 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMFLCNCMDIF_NVSWITCH_H_ +#define _RMFLCNCMDIF_NVSWITCH_H_ + +/*! + * @file rmflcncmdif_nvswitch.h + * @brief Top-level header-file that defines the generic command/message + * interfaces that may be used to communicate with the falcon (e.g. SOE) + */ + +#include "flcnifcmn.h" +#include "rmsoecmdif.h" + +/*! + * Generic command struct which can be used in generic falcon code + */ +typedef union RM_FLCN_CMD +{ + RM_FLCN_CMD_GEN cmdGen; + + RM_FLCN_CMD_SOE cmdSoe; +} RM_FLCN_CMD, *PRM_FLCN_CMD; + +/*! + * Falcon Message structure + */ +typedef union RM_FLCN_MSG +{ + RM_FLCN_MSG_GEN msgGen; // Generic Message + + RM_FLCN_MSG_SOE msgSoe; // SOE message +} RM_FLCN_MSG, *PRM_FLCN_MSG; + +#endif // _RMFLCNCMDIF_NVSWITCH_H_ diff --git a/src/common/nvswitch/common/inc/rmsoecmdif.h b/src/common/nvswitch/common/inc/rmsoecmdif.h new file mode 100644 index 000000000..12e6871d6 --- /dev/null +++ b/src/common/nvswitch/common/inc/rmsoecmdif.h @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMSOECMDIF_H_ +#define _RMSOECMDIF_H_ + +/*! + * @file rmsoecmdif.h + * @brief Top-level header-file that defines the command/message interfaces + * that may be used to communicate with SOE. + * + * This header does not directly define specific attributes, structure, or + * other properties for specific commands or messages. Instead, it includes all + * specific unit- header-files and then defines the top-level command and + * message structures, RM_SOE_COMMAND and RM_SOE_MESSAGE (respectively). + */ + +#include "flcnifcmn.h" + +#include "soe/soebif.h" +#include "soe/soeifcmn.h" +#include "soe/soeifsoe.h" +#include "soe/soeiftest.h" +#include "soe/soeiftherm.h" +#include "soe/soeifspi.h" +#include "soe/soeififr.h" +#include "soe/soeifsmbpbi.h" +#include "soe/soeifcore.h" +#include "soe/soeifchnmgmt.h" + +/*! + * Defines the structure that must be populated to send any arbitrary command + * to SOE's command queue. Each command packet will contain a command + * header ('hdr') describing various command attributes as well as the command + * data itself ('cmd'). + */ +typedef struct +{ + RM_FLCN_QUEUE_HDR hdr; + union + { + RM_SOE_TEST_CMD soeTest; + RM_SOE_THERM_CMD therm; + RM_SOE_SPI_CMD spi; + RM_SOE_IFR_CMD ifr; + RM_SOE_SMBPBI_CMD smbpbiCmd; + RM_SOE_BIF_CMD bif; + RM_SOE_CORE_CMD core; + RM_SOE_CHNMGMT_CMD chnmgmt; + } cmd; +} RM_FLCN_CMD_SOE, +*PRM_FLCN_CMD_SOE; + +typedef RM_FLCN_CMD_SOE RM_SOE_COMMAND; +typedef PRM_FLCN_CMD_SOE PRM_SOE_COMMAND; + +/*! + * Defines the structure that represents all messages sent from SOE to + * the RM. Similar to the command structure, the message structure also + * contains a message header ('hdr') that describes the message as well as + * the actual message data ('msg'). + */ +typedef struct +{ + RM_FLCN_QUEUE_HDR hdr; + union + { + RM_SOE_TEST_MSG soeTest; + RM_SOE_THERM_MSG soeTherm; + RM_FLCN_MSG_SOE_INIT init; + RM_SOE_CHNMGMT_MSG chnmgmt; + } msg; +} RM_FLCN_MSG_SOE, +*PRM_FLCN_MSG_SOE; + +/*! + * Production task-identifiers: + * + * @note TASK_ID__IDLE must remain zero. + * @note TASK_ID__END must mark the end of the valid IDs. + * + * @note Do NOT compact following defines! + * @note Do NOT recycle deleted/unused IDs! + */ +#define RM_SOE_TASK_ID__IDLE 0x00 +#define RM_SOE_TASK_ID_CMDMGMT 0x01 +#define RM_SOE_TASK_ID_WKRTHD 0x02 +#define RM_SOE_TASK_ID_THERM 0x03 +#define RM_SOE_TASK_ID_RESERVED 0x04 +#define RM_SOE_TASK_ID_SPI 0x05 +#define RM_SOE_TASK_ID_RSVD6 0x06 +#define RM_SOE_TASK_ID_SMBPBI 0x07 +#define RM_SOE_TASK_ID_BIF 0x08 +#define RM_SOE_TASK_ID_CORE 0x09 +#define RM_SOE_TASK_ID_IFR 0x0A +#define RM_SOE_TASK_ID_CHNMGMT 0x0B +#define RM_SOE_TASK_ID_RMMSG 0x0C +// Add new task ID here... +#define RM_SOE_TASK_ID__END 0x0C + +/*! + * Unit-identifiers: + * + * Notes: + * UNIT_END must mark the end of the valid IDs. + */ +#define RM_SOE_UNIT_REWIND RM_FLCN_UNIT_ID_REWIND +#define RM_SOE_UNIT_INIT (0x01) +#define RM_SOE_UNIT_NULL (0x02) +#define RM_SOE_UNIT_TEST (0x03) +#define RM_SOE_UNIT_UNLOAD (0x04) +#define RM_SOE_UNIT_THERM (0x05) +#define RM_SOE_UNIT_RESERVED (0x06) +#define RM_SOE_UNIT_SPI (0x07) +#define RM_SOE_UNIT_RSVD8 (0x08) +#define RM_SOE_UNIT_SMBPBI (0x09) +#define RM_SOE_UNIT_BIF (0x0A) +#define RM_SOE_UNIT_CORE (0x0B) +#define RM_SOE_UNIT_IFR (0x0C) +#define RM_SOE_UNIT_CHNMGMT (0x0D) +// Add new unit ID here... +#define RM_SOE_UNIT_END (0x0D) + + +#endif // _RMSOECMDIF_H_ + diff --git a/src/common/nvswitch/common/inc/smbpbi_shared_nvswitch.h b/src/common/nvswitch/common/inc/smbpbi_shared_nvswitch.h new file mode 100644 index 000000000..d45ae5a27 --- /dev/null +++ b/src/common/nvswitch/common/inc/smbpbi_shared_nvswitch.h @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SMBPBI_SHARED_NVSWITCH_H_ +#define _SMBPBI_SHARED_NVSWITCH_H_ + +#include "inforom/types.h" +#include "inforom/ifrdem.h" + +/*! + * + * Shared surface between nvswitch and SOE that includes + * data from the InfoROM needed for OOB queries + * + */ +typedef struct +{ + struct { + NvBool bValid; + NvU8 boardPartNum[24]; + NvU8 serialNum[16]; + NvU8 marketingName[24]; + NvU32 buildDate; + } OBD; + + struct { + NvBool bValid; + NvU8 oemInfo[32]; + } OEM; + + struct { + NvBool bValid; + NvU8 inforomVer[16]; + } IMG; + + struct { + NvBool bValid; + NvU64_ALIGN32 uncorrectedTotal; + NvU64_ALIGN32 correctedTotal; + } ECC; + + struct _def_inforomdata_dem_object { + NvBool bValid; + NvBool bPresent; // in the InfoROM image + + union { + INFOROM_OBJECT_HEADER_V1_00 header; + INFOROM_DEM_OBJECT_V1_00 v1; + } object; + } DEM; +} RM_SOE_SMBPBI_INFOROM_DATA, *PRM_SOE_SMBPBI_INFOROM_DATA; + +typedef struct +{ + RM_SOE_SMBPBI_INFOROM_DATA inforomObjects; +} SOE_SMBPBI_SHARED_SURFACE, *PSOE_SMBPBI_SHARED_SURFACE; + +/*! + * Macros to evaluate offsets into the shared surface + */ + +#define SOE_SMBPBI_SHARED_OFFSET_INFOROM(obj, member) \ + NV_OFFSETOF(SOE_SMBPBI_SHARED_SURFACE, inforomObjects.obj.member) + +#endif // _SMBPBI_SHARED_NVSWITCH_H_ diff --git a/src/common/nvswitch/common/inc/soe/soebif.h b/src/common/nvswitch/common/inc/soe/soebif.h new file mode 100644 index 000000000..5c79c99de --- /dev/null +++ b/src/common/nvswitch/common/inc/soe/soebif.h @@ -0,0 +1,147 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SOEBIF_H_ +#define _SOEBIF_H_ + +/*! + * @file soebif.h + * @brief SOE BIF Command Queue + * + * The BIF unit ID will be used for sending and recieving + * Command Messages between driver and BIF unit of SOE + */ + +/*! + * Commands offered by the SOE Bus Interface. + */ +enum +{ + /*! + * Update the UPHY EOM(Eye Opening Measurement) parameters. + */ + RM_SOE_BIF_CMD_UPDATE_EOM, + + /*! + * This command sends UPHY register's address and lane from the client + * to the SOE and gets the register value. + */ + RM_SOE_BIF_CMD_GET_UPHY_DLN_CFG_SPACE, + + /*! + * Set PCIE link speed + */ + RM_SOE_BIF_CMD_SET_PCIE_LINK_SPEED, + + /*! + * Get UPHY EOM(Eye Opening Measurement) status. + */ + RM_SOE_BIF_CMD_GET_EOM_STATUS, + + /*! + * Signal Lane Margining + */ + RM_SOE_BIF_CMD_SIGNAL_LANE_MARGINING, + + /*! + * Handle Margining interrupt + */ + RM_SOE_BIF_CMD_SERVICE_MARGINING_INTERRUPTS, +}; + +/*! + * BIF queue command payload + */ +typedef struct +{ + NvU8 cmdType; + NvU8 mode; + NvU8 nerrs; + NvU8 nblks; + NvU8 berEyeSel; +} RM_SOE_BIF_CMD_EOM; + +typedef struct +{ + NvU8 cmdType; + NvU8 mode; + NvU8 nerrs; + NvU8 nblks; + NvU8 berEyeSel; + NvU32 laneMask; + RM_FLCN_U64 dmaHandle; +} RM_SOE_BIF_CMD_EOM_STATUS; + +typedef struct +{ + NvU8 cmdType; + NvU32 regAddress; + NvU32 laneSelectMask; +} RM_SOE_BIF_CMD_UPHY_DLN_CFG_SPACE; + +typedef struct +{ + NvU8 cmdType; + NvU8 laneNum; +} RM_SOE_BIF_CMD_LANE_MARGINING; + +#define RM_SOE_BIF_LINK_SPEED_INVALID (0x00) +#define RM_SOE_BIF_LINK_SPEED_GEN1PCIE (0x01) +#define RM_SOE_BIF_LINK_SPEED_GEN2PCIE (0x02) +#define RM_SOE_BIF_LINK_SPEED_GEN3PCIE (0x03) +#define RM_SOE_BIF_LINK_SPEED_GEN4PCIE (0x04) +#define RM_SOE_BIF_LINK_SPEED_GEN5PCIE (0x05) + +#define RM_SOE_BIF_LINK_WIDTH_INVALID (0x00) +#define RM_SOE_BIF_LINK_WIDTH_X1 (0x01) +#define RM_SOE_BIF_LINK_WIDTH_X2 (0x02) +#define RM_SOE_BIF_LINK_WIDTH_X4 (0x03) +#define RM_SOE_BIF_LINK_WIDTH_X8 (0x04) +#define RM_SOE_BIF_LINK_WIDTH_X16 (0x05) + +// Maximum time to wait for LTSSM to go idle, in ns +#define BIF_LTSSM_IDLE_TIMEOUT_NS (200 * SOE_INTERVAL_1USEC_IN_NS) +// Maximum time to wait for LTSSM to declare link ready, in ns +#define BIF_LTSSM_LINK_READY_TIMEOUT_NS (20 * SOE_INTERVAL_1MSEC_IN_NS) +// Maximum time to keep trying to change link speed, in ns +#define BIF_LINK_CHANGE_TIMEOUT_NS (10 * SOE_INTERVAL_5MSEC_IN_NS) +// Maximum PCIe lanes supported per link is 16 as of PCIe spec 4.0r1.0 +#define BIF_MAX_PCIE_LANES 16U + +typedef struct +{ + NvU8 cmdType; + NvU32 linkSpeed; +} RM_SOE_BIF_CMD_PCIE_LINK_SPEED; + +typedef union +{ + NvU8 cmdType; + RM_SOE_BIF_CMD_EOM eomctl; + RM_SOE_BIF_CMD_UPHY_DLN_CFG_SPACE cfgctl; + RM_SOE_BIF_CMD_PCIE_LINK_SPEED speedctl; + RM_SOE_BIF_CMD_EOM_STATUS eomStatus; + RM_SOE_BIF_CMD_LANE_MARGINING laneMargining; +} RM_SOE_BIF_CMD; + +#endif // _SOEBIF_H_ diff --git a/src/common/nvswitch/common/inc/soe/soeifchnmgmt.h b/src/common/nvswitch/common/inc/soe/soeifchnmgmt.h new file mode 100644 index 000000000..ede9e7b28 --- /dev/null +++ b/src/common/nvswitch/common/inc/soe/soeifchnmgmt.h @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SOEIFCHNMGMT_H_ +#define _SOEIFCHNMGMT_H_ + +/*! + * @file soeifchnmgmt.h + * @brief SOE Command/Message Interfaces - SOE chnmgmt + */ + +/*! + * @brief Defines for CHNMGMT commands + */ +enum +{ + RM_SOE_CHNMGMT_CMD_ID_ENGINE_RC_RECOVERY, + RM_SOE_CHNMGMT_CMD_ID_FINISH_RC_RECOVERY, +}; + +/*! + * @brief CHNMGMT engine RC command + */ +typedef struct +{ + NvU8 cmdType; //= 0) +#define NV_TSENSE_FXP_9_5_TO_CELSIUS(fxp) \ + (NvU32) (fxp /(1 << DRF_SIZE(NV_TSENSE_FXP_9_5_FRACTIONAL))) + +// Convert FXP 9.5 to NvTemp +#define NV_TSENSE_FXP_9_5_SIGN(fxp) \ + DRF_VAL(_TYPES, _SFXP, _INTEGER_SIGN(9,5), fxp) + +#define NV_TSENSE_FXP_9_5_TO_24_8(fxp) \ + (NvTemp) ((NV_TSENSE_FXP_9_5_SIGN(fxp) == \ + NV_TYPES_SFXP_INTEGER_SIGN_NEGATIVE ? \ + DRF_SHIFTMASK(31:17) : 0x0) | (fxp << 3)) + +/*! + * Macros for NvType <-> Celsius temperature conversion. + */ +#define RM_SOE_CELSIUS_TO_NV_TEMP(cel) \ + NV_TYPES_S32_TO_SFXP_X_Y(24,8,(cel)) +#define RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(nvt) \ + NV_TYPES_SFXP_X_Y_TO_S32(24,8,(nvt)) +#define RM_SOE_NV_TEMP_TO_CELSIUS_ROUNDED(nvt) \ + NV_TYPES_SFXP_X_Y_TO_S32_ROUNDED(24,8,(nvt)) + +/*! + * Thermal Message IDs + */ +enum +{ + RM_SOE_THERM_MSG_ID_SLOWDOWN_STATUS, + RM_SOE_THERM_MSG_ID_SHUTDOWN_STATUS, +}; + +/*! + * @brief message for thermal shutdown + */ +typedef struct +{ + NvU8 msgType; + NvTemp maxTemperature; + NvTemp overtThreshold; + + struct + { + NvBool bTsense; + NvBool bPmgr; + }source; +} RM_SOE_THERM_MSG_SHUTDOWN_STATUS; + +/*! + * @brief message for thermal slowdown + */ +typedef struct +{ + NvU8 msgType; + NvBool bSlowdown; + NvTemp maxTemperature; + NvTemp warnThreshold; + + struct + { + NvBool bTsense; + NvBool bPmgr; + }source; +} RM_SOE_THERM_MSG_SLOWDOWN_STATUS; + +/*! + * A simple union of all the Thermal messages. + * Use the 'msgType' variable to determine the actual type of the message. + */ +typedef union +{ + NvU8 msgType; + // The following structs are expected to include cmdType as the first member + RM_SOE_THERM_MSG_SLOWDOWN_STATUS slowdown; + RM_SOE_THERM_MSG_SHUTDOWN_STATUS shutdown; +}RM_SOE_THERM_MSG; + +/*! + * Thermal Command types + */ +enum +{ + RM_SOE_THERM_FORCE_SLOWDOWN, + RM_SOE_THERM_SEND_MSG_TO_DRIVER, +}; + +/*! + * @brief Force Thermal slowdown + */ +typedef struct +{ + NvU8 cmdType; + NvBool slowdown; + NvU32 periodUs; +} RM_SOE_THERM_CMD_FORCE_SLOWDOWN; + +/*! + * @brief Send aysncronous message about thermal events. + */ +typedef struct +{ + NvU8 cmdType; + union + { + NvU8 msgType; + RM_SOE_THERM_MSG_SLOWDOWN_STATUS slowdown; + RM_SOE_THERM_MSG_SHUTDOWN_STATUS shutdown; + } status; +} RM_SOE_THERM_CMD_SEND_ASYNC_MSG; + +/*! + * A simple union of all the therm commands. Use the 'cmdType' variable to + * determine the actual type of the command. + */ +typedef union +{ + NvU8 cmdType; + // The following structs are expected to include cmdType as the first member + RM_SOE_THERM_CMD_FORCE_SLOWDOWN slowdown; + RM_SOE_THERM_CMD_SEND_ASYNC_MSG msg; +}RM_SOE_THERM_CMD; + +#endif // _SOEIFTHERM_H_ + diff --git a/src/common/nvswitch/interface/ctrl_dev_nvswitch.h b/src/common/nvswitch/interface/ctrl_dev_nvswitch.h new file mode 100644 index 000000000..a962bb485 --- /dev/null +++ b/src/common/nvswitch/interface/ctrl_dev_nvswitch.h @@ -0,0 +1,3121 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file defines CTRL calls that are device specifics. + * + * This is a platform agnostic file and lists the CTRL calls used by all the + * clients, Fabric Manager, MODS or NVSwitch GTEST etc. + * + * As Fabric Manager relies on driver ABI compatibility the CTRL calls listed in + * this file contribute to the driver ABI version. + * + * Note: ctrl_dev_nvswitch.h and ctrl_dev_internal_nvswitch.h do not share any + * data. This helps to keep the driver ABI stable. + */ + +#ifndef _CTRL_DEVICE_NVSWITCH_H_ +#define _CTRL_DEVICE_NVSWITCH_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + + +#include "nvtypes.h" +#include "nvfixedtypes.h" +#include "nvmisc.h" +#include "ioctl_common_nvswitch.h" + +/* + * CTRL_NVSWITCH_GET_INFO + * + * Control for querying miscellaneous device information. + * + * This provides a single API to query for multiple pieces of miscellaneous + * information via a single call. + * + * Parameters: + * count [IN] + * Count of queries. Max supported queries per-call are + * NVSWITCH_GET_INFO_COUNT_MAX + * index [IN] + * One of the NVSWITCH_GET_INFO_INDEX type value. + * + * info [OUT] + * Data pertaining to the provided NVSWITCH_GET_INFO_INDEX type value. + */ + +#define NVSWITCH_GET_INFO_COUNT_MAX 32 + +typedef enum nvswitch_get_info_index +{ + NVSWITCH_GET_INFO_INDEX_ARCH = 0x0, + NVSWITCH_GET_INFO_INDEX_IMPL, + NVSWITCH_GET_INFO_INDEX_CHIPID, + NVSWITCH_GET_INFO_INDEX_REVISION_MAJOR, + NVSWITCH_GET_INFO_INDEX_REVISION_MINOR, + NVSWITCH_GET_INFO_INDEX_REVISION_MINOR_EXT, + NVSWITCH_GET_INFO_INDEX_PLATFORM, + NVSWITCH_GET_INFO_INDEX_DEVICE_ID, + + NVSWITCH_GET_INFO_INDEX_NUM_PORTS = 0x100, + NVSWITCH_GET_INFO_INDEX_ENABLED_PORTS_MASK_31_0, + NVSWITCH_GET_INFO_INDEX_ENABLED_PORTS_MASK_63_32, + NVSWITCH_GET_INFO_INDEX_NUM_VCS, + NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_TABLE_SIZE, + NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_EXTA_TABLE_SIZE, + NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_EXTB_TABLE_SIZE, + NVSWITCH_GET_INFO_INDEX_ROUTING_ID_TABLE_SIZE, + NVSWITCH_GET_INFO_INDEX_ROUTING_LAN_TABLE_SIZE, + + NVSWITCH_GET_INFO_INDEX_FREQ_KHZ = 0x200, + NVSWITCH_GET_INFO_INDEX_VCOFREQ_KHZ, + NVSWITCH_GET_INFO_INDEX_VOLTAGE_MVOLT, + NVSWITCH_GET_INFO_INDEX_PHYSICAL_ID, + + NVSWITCH_GET_INFO_INDEX_PCI_DOMAIN = 0x300, + NVSWITCH_GET_INFO_INDEX_PCI_BUS, + NVSWITCH_GET_INFO_INDEX_PCI_DEVICE, + NVSWITCH_GET_INFO_INDEX_PCI_FUNCTION + /* See enum modification guidelines at the top of this file */ +} NVSWITCH_GET_INFO_INDEX; + +#define NVSWITCH_GET_INFO_INDEX_ARCH_LR10 0x02 +#define NVSWITCH_GET_INFO_INDEX_IMPL_LR10 0x01 + +#define NVSWITCH_GET_INFO_INDEX_PLATFORM_UNKNOWN 0x00 +#define NVSWITCH_GET_INFO_INDEX_PLATFORM_RTLSIM 0x01 +#define NVSWITCH_GET_INFO_INDEX_PLATFORM_FMODEL 0x02 +#define NVSWITCH_GET_INFO_INDEX_PLATFORM_EMULATION 0x03 +#define NVSWITCH_GET_INFO_INDEX_PLATFORM_SILICON 0x04 + +typedef struct nvswitch_get_info +{ + NvU32 count; + NvU32 index[NVSWITCH_GET_INFO_COUNT_MAX]; + NvU32 info[NVSWITCH_GET_INFO_COUNT_MAX]; + +} NVSWITCH_GET_INFO; + +/* + * CTRL_NVSWITCH_SET_INGRESS_REQUEST_TABLE + * + * Control for programming ingress request tables. + * This interface is only supported on SV10 architecture. All others will + * return an error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the ingress request table from which table entries + * should be programmed. + * numEntries [IN] + * Number of entries to be programmed. Currently, the call supports + * programming NVSWITCH_INGRESS_REQUEST_ENTRIES_MAX at a time. + * entries [IN] + * The entries (entry format is architecture dependent). + */ + +#define NVSWITCH_INGRESS_REQUEST_ENTRIES_MAX 256 + +/* TODO: document the entry format in detail */ +typedef struct nvswitch_ingress_request_entry +{ + NvU32 vcModeValid7_0; + NvU32 vcModeValid15_8; + NvU32 vcModeValid17_16; + NvU32 mappedAddress; + NvU32 routePolicy; + NvBool entryValid; + +} NVSWITCH_INGRESS_REQUEST_ENTRY; + +typedef struct nvswitch_set_ingress_request_table +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 numEntries; + NVSWITCH_INGRESS_REQUEST_ENTRY entries[NVSWITCH_INGRESS_REQUEST_ENTRIES_MAX]; + +} NVSWITCH_SET_INGRESS_REQUEST_TABLE; + +/* + * CTRL_NVSWITCH_GET_INGRESS_REQUEST_TABLE + * + * Control for reading ingress request tables. A sparse list of nonzero entries + * and their table indices is returned. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the ingress request table from which table entries + * should be read. + * nextIndex [OUT] + * The table index of the next entry to read. Set to INGRESS_MAP_TABLE_SIZE + * when the end of the table has been reached. + * numEntries [OUT] + * Number of entries returned. Currently, the call supports returning up to + * NVSWITCH_INGRESS_REQUEST_ENTRIES_MAX entries at a time. + * entries [OUT] + * Ingress request entries along with their table indices. + * Entry format is architecture dependent. + */ + +typedef struct nvswitch_ingress_request_idx_entry +{ + NvU32 idx; + NVSWITCH_INGRESS_REQUEST_ENTRY entry; + +} NVSWITCH_INGRESS_REQUEST_IDX_ENTRY; + +typedef struct nvswitch_get_ingress_request_table_params +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 nextIndex; + NvU32 numEntries; + NVSWITCH_INGRESS_REQUEST_IDX_ENTRY entries[NVSWITCH_INGRESS_REQUEST_ENTRIES_MAX]; + +} NVSWITCH_GET_INGRESS_REQUEST_TABLE_PARAMS; + +/* + * CTRL_NVSWITCH_SET_INGRESS_REQUEST_VALID + * + * Control for toggling the existing ingress request table entries' validity. + * This interface is only supported on SV10 architecture. All others will + * return an error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the ingress request table from which table entries + * should be programmed. + * numEntries [IN] + * Number of entries to be programmed. Currently, the call supports + * programming NVSWITCH_INGRESS_REQUEST_ENTRIES_MAX at a time. + * entryValid [IN] + * If true, an existing entry is marked valid, else will be marked invalid. + */ + +typedef struct nvswitch_set_ingress_request_valid +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 numEntries; + NvBool entryValid[NVSWITCH_INGRESS_REQUEST_ENTRIES_MAX]; + +} NVSWITCH_SET_INGRESS_REQUEST_VALID; + +/* + * CTRL_NVSWITCH_SET_INGRESS_RESPONSE_TABLE + * + * Control for programming ingress response tables. + * This interface is only supported on SV10 architecture. All others will + * return an error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the ingress request table from which table entries + * should be programmed. + * numEntries [IN] + * Number of entries to be programmed. Currently, the call supports + * programming NVSWITCH_INGRESS_REQUEST_ENTRIES_MAX at a time. + * entries [IN] + * The entries (entry format is architecture dependent). + */ + +#define NVSWITCH_INGRESS_RESPONSE_ENTRIES_MAX 256 + +/* TODO: document the entry format in detail */ +typedef struct nvswitch_ingress_response_entry +{ + NvU32 vcModeValid7_0; + NvU32 vcModeValid15_8; + NvU32 vcModeValid17_16; + NvU32 routePolicy; + NvBool entryValid; + +} NVSWITCH_INGRESS_RESPONSE_ENTRY; + +typedef struct nvswitch_set_ingress_response_table +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 numEntries; + NVSWITCH_INGRESS_RESPONSE_ENTRY entries[NVSWITCH_INGRESS_RESPONSE_ENTRIES_MAX]; + +} NVSWITCH_SET_INGRESS_RESPONSE_TABLE; + +/* + * CTRL_NVSWITCH_SET_REMAP_POLICY + * + * Control to load remap policy table + * This interface is not supported on SV10 architecture. SV10 will return an + * error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * tableSelect [IN] + * Remap table selector + * firstIndex [IN] + * A starting index of the remap table from which table entries + * should be programmed. Valid range should be queried using + * NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_TABLE_SIZE. + * numEntries [IN] + * Number of entries to be programmed. Currently, the call supports + * programming NVSWITCH_REMAP_POLICY_ENTRIES_MAX at a time. + * remapPolicy [IN] + * The entries (see NVSWITCH_REMAP_POLICY_ENTRY). + */ + +#define NVSWITCH_REMAP_POLICY_ENTRIES_MAX 64 + +#define NVSWITCH_REMAP_POLICY_FLAGS_REMAP_ADDR NVBIT(0) +#define NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_CHECK NVBIT(1) +#define NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_REPLACE NVBIT(2) +#define NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE NVBIT(4) +#define NVSWITCH_REMAP_POLICY_FLAGS_ADR_OFFSET NVBIT(5) /* Apply address offset */ +#define NVSWITCH_REMAP_POLICY_FLAGS_REFLECTIVE NVBIT(30) /* Reflective mapping */ +#define NVSWITCH_REMAP_POLICY_FLAGS_ADDR_TYPE NVBIT(31) /* Enforce address type checking */ + +typedef struct nvswitch_remap_policy_entry +{ + NvBool entryValid; + NvU32 targetId; /* Unique endpoint ID */ + + NvU32 irlSelect; /* Injection rate limiter (0=none/1=IRL1/2=IRL2) */ + + NvU32 flags; /* See NVSWITCH_REMAP_POLICY_FLAGS_* */ + + NV_DECLARE_ALIGNED(NvU64 address, 8); /* 47-bit remap address. Bits 46:36 are used. */ + + /* reqContext fields are used when */ + /* routing function _REQCTXT_CHECK or _REPLACE */ + /* is set. */ + NvU32 reqCtxMask; /* Used to mask packet request ctxt before */ + /* checking. */ + + NvU32 reqCtxChk; /* Post-mask packet request ctxt check value. */ + /* Packets that fail compare are converted to */ + /* UR response and looped back. */ + + NvU32 reqCtxRep; /* Replaces packet request context when */ + /* _REQCTXT_REPLACE is set. */ + + NV_DECLARE_ALIGNED(NvU64 addressOffset, 8); /* offset - base is added to packet address if */ + /* routing function _ADR_OFFSET & _ADR_BASE are */ + /* set. 64GB offset 1MB aligned on LR10. */ + + NV_DECLARE_ALIGNED(NvU64 addressBase, 8); /* If routing function _ADR_BASE is set, limits */ + NV_DECLARE_ALIGNED(NvU64 addressLimit, 8); /* application of _ADR_OFFSET to packet */ + /* addresses that pass base/limit bounds check. */ + /* Maximum 64GB size 1MB aligned on LR10. */ + + +} NVSWITCH_REMAP_POLICY_ENTRY; + +typedef enum nvswitch_table_select_remap +{ + NVSWITCH_TABLE_SELECT_REMAP_PRIMARY = 0, + NVSWITCH_TABLE_SELECT_REMAP_EXTA, + NVSWITCH_TABLE_SELECT_REMAP_EXTB, + NVSWITCH_TABLE_SELECT_REMAP_MULTICAST +} NVSWITCH_TABLE_SELECT_REMAP; + +typedef struct nvswitch_set_remap_policy +{ + NvU32 portNum; + NVSWITCH_TABLE_SELECT_REMAP tableSelect; + NvU32 firstIndex; + NvU32 numEntries; + NVSWITCH_REMAP_POLICY_ENTRY remapPolicy[NVSWITCH_REMAP_POLICY_ENTRIES_MAX]; + +} NVSWITCH_SET_REMAP_POLICY; + +/* + * CTRL_NVSWITCH_GET_REMAP_POLICY + * + * Control to get remap policy table + * This interface is not supported on SV10 architecture. SV10 will return unsupported + * error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * tableSelect [IN] + * Remap table selector + * firstIndex [IN] + * A starting index of the remap policy table from which table entries + * should be read. + * numEntries [OUT] + * Number of entries returned. This call returns + * NVSWITCH_REMAP_POLICY_ENTRIES_MAX entries at a time. + * nextIndex [OUT] + * The table index of the next entry to read. Set to INGRESS_REMAPTAB_SIZE + * when the end of the table has been reached. + * entries [OUT] + * The entries (see NVSWITCH_REMAP_POLICY_ENTRY). + */ + + +typedef struct nvswitch_get_remap_policy_params +{ + NvU32 portNum; + NVSWITCH_TABLE_SELECT_REMAP tableSelect; + NvU32 firstIndex; + NvU32 numEntries; + NvU32 nextIndex; + NVSWITCH_REMAP_POLICY_ENTRY entry[NVSWITCH_REMAP_POLICY_ENTRIES_MAX]; + +} NVSWITCH_GET_REMAP_POLICY_PARAMS; + +/* + * CTRL_NVSWITCH_SET_REMAP_POLICY_VALID + * + * Control to set remap policy tables valid/invalid + * This interface is not supported on SV10 architecture. SV10 will return unsupported + * error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * tableSelect [IN] + * Remap table selector + * firstIndex [IN] + * A starting index of the remap policy table from which table entries + * should be programmed. + * numEntries [IN] + * Number of entries to be programmed. The call supports + * programming of maximum NVSWITCH_REMAP_POLICY_ENTRIES_MAX at a time. + * entryValid [IN] + * If true, an existing entry is marked valid, else will be marked invalid. + */ + +typedef struct nvswitch_set_remap_policy_valid +{ + NvU32 portNum; + NVSWITCH_TABLE_SELECT_REMAP tableSelect; + NvU32 firstIndex; + NvU32 numEntries; + NvBool entryValid[NVSWITCH_REMAP_POLICY_ENTRIES_MAX]; + +} NVSWITCH_SET_REMAP_POLICY_VALID; + +/* + * CTRL_NVSWITCH_SET_ROUTING_ID + * + * Control to load Routing ID table + * The routing ID table configures the VC and routing policy as well as the + * valid set if ganged link routes. + * This interface is not supported on SV10 architecture. SV10 will return an + * error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the routing ID table from which table entries + * should be programmed. Valid range should be queried using + * NVSWITCH_GET_INFO_INDEX_ROUTING_ID_TABLE_SIZE. + * numEntries [IN] + * Number of entries to be programmed. Currently, the call supports programming + * maximum of NVSWITCH_ROUTING_ID_ENTRIES_MAX entries at a time. + * routingId [IN] + * The entries (see NVSWITCH_ROUTING_ID_ENTRY). + */ + +#define NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX 16 +#define NVSWITCH_ROUTING_ID_VC_MODE_MAX 4 +#define NVSWITCH_ROUTING_ID_ENTRIES_MAX 64 + +typedef enum nvswitch_routing_id_vcmap +{ + NVSWITCH_ROUTING_ID_VCMAP_SAME = 0x0, + NVSWITCH_ROUTING_ID_VCMAP_INVERT, + NVSWITCH_ROUTING_ID_VCMAP_ZERO, + NVSWITCH_ROUTING_ID_VCMAP_ONE + /* See enum modification guidelines at the top of this file */ +} NVSWITCH_ROUTING_ID_VCMAP; + +typedef struct nvswitch_routing_id_dest_port_list +{ + NvU32 vcMap; /* NVSWITCH_ROUTING_ID_VCMAP_* */ + NvU32 destPortNum; + +} NVSWITCH_ROUTING_ID_DEST_PORT_LIST; + +typedef struct nvswitch_routing_id_entry +{ + NvBool entryValid; + NvBool useRoutingLan; + NvBool enableIrlErrResponse; + NvU32 numEntries; + NVSWITCH_ROUTING_ID_DEST_PORT_LIST portList[NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX]; + +} NVSWITCH_ROUTING_ID_ENTRY; + +typedef struct nvswitch_set_routing_id +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 numEntries; + NVSWITCH_ROUTING_ID_ENTRY routingId[NVSWITCH_ROUTING_ID_ENTRIES_MAX]; + +} NVSWITCH_SET_ROUTING_ID; + +/* + * CTRL_NVSWITCH_GET_ROUTING_ID + * + * Control to get routing ID table + * This interface is not supported on SV10 architecture. SV10 will return unsupported + * error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the routing id table from which table entries + * should be read. + * numEntries [OUT] + * Number of entries returned. The call returns only + * NVSWITCH_ROUTING_ID_ENTRIES_MAX entries at a time. + * nextIndex [OUT] + * The table index of the next entry to read. Set to INGRESS_RIDTAB_SIZE + * when the end of the table has been reached. + * entries [OUT] + * The entries (see NVSWITCH_ROUTING_ID_IDX_ENTRY). + */ + +typedef struct nvswitch_routing_id_idx_entry +{ + NvU32 idx; + NVSWITCH_ROUTING_ID_ENTRY entry; + +} NVSWITCH_ROUTING_ID_IDX_ENTRY; + +typedef struct nvswitch_get_routing_id_params +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 numEntries; + NvU32 nextIndex; + NVSWITCH_ROUTING_ID_IDX_ENTRY entries[NVSWITCH_ROUTING_ID_ENTRIES_MAX]; + +} NVSWITCH_GET_ROUTING_ID_PARAMS; + +/* + * CTRL_NVSWITCH_SET_ROUTING_ID_VALID + * + * Control to set routing ID tables valid/invalid + * This interface is not supported on SV10 architecture. SV10 will return unsupported + * error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the routing lan table from which table entries + * should be programmed. + * numEntries [IN] + * Number of entries to be programmed. This call supports programming + * maximum entries of NVSWITCH_ROUTING_ID_ENTRIES_MAX at a time. + * entryValid [IN] + * If true, an existing entry is marked valid, else will be marked invalid. + */ + +typedef struct nvswitch_set_routing_id_valid +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 numEntries; + NvBool entryValid[NVSWITCH_ROUTING_ID_ENTRIES_MAX]; + +} NVSWITCH_SET_ROUTING_ID_VALID; + +/* + * CTRL_NVSWITCH_SET_ROUTING_LAN + * + * Control to load routing LAN table + * This interface is not supported on SV10 architecture. SV10 will return an + * error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the ingress request table from which table entries + * should be programmed. Valid range should be queried using + * NVSWITCH_GET_INFO_INDEX_ROUTING_LAN_TABLE_SIZE. + * numEntries [IN] + * Number of entries to be programmed. Currently, the call supports + * programming NVSWITCH_ROUTING_LAN_ENTRIES_MAX at a time. + * routingLan [IN] + * The entries (see NVSWITCH_ROUTING_LAN_ENTRY). + */ + +#define NVSWITCH_ROUTING_LAN_GROUP_SEL_MAX 16 +#define NVSWITCH_ROUTING_LAN_GROUP_SIZE_MAX 16 +#define NVSWITCH_ROUTING_LAN_ENTRIES_MAX 64 + +typedef struct nvswitch_routing_lan_port_select +{ + NvU32 groupSelect; /* Port list group selector */ + NvU32 groupSize; /* Valid range: 1..16 */ + +} NVSWITCH_ROUTING_LAN_PORT_SELECT; + +typedef struct nvswitch_routing_lan_entry +{ + NvBool entryValid; + NvU32 numEntries; + NVSWITCH_ROUTING_LAN_PORT_SELECT portList[NVSWITCH_ROUTING_LAN_GROUP_SEL_MAX]; + +} NVSWITCH_ROUTING_LAN_ENTRY; + +typedef struct nvswitch_set_routing_lan +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 numEntries; + NVSWITCH_ROUTING_LAN_ENTRY routingLan[NVSWITCH_ROUTING_LAN_ENTRIES_MAX]; + +} NVSWITCH_SET_ROUTING_LAN; + +/* + * CTRL_NVSWITCH_GET_ROUTING_LAN + * + * Control to get routing LAN table + * This interface is not supported on SV10 architecture. SV10 will return unsupported + * error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the routing lan table from which table entries + * should be read. + * numEntries [OUT] + * Number of entries returned. Currently, the call supports + * NVSWITCH_ROUTING_LAN_ENTRIES_MAX at a time. + * nextIndex [OUT] + * The table index of the next entry to read. Set to INGRESS_RLANTAB_SIZE + * when the end of the table has been reached. + * entries [OUT] + * The entries (see NVSWITCH_ROUTING_LAN_IDX_ENTRY). + */ + +typedef struct nvswitch_routing_lan_idx_entry +{ + NvU32 idx; + NVSWITCH_ROUTING_LAN_ENTRY entry; + +} NVSWITCH_ROUTING_LAN_IDX_ENTRY; + +typedef struct nvswitch_get_routing_lan_params +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 numEntries; + NvU32 nextIndex; + NVSWITCH_ROUTING_LAN_IDX_ENTRY entries[NVSWITCH_ROUTING_LAN_ENTRIES_MAX]; + +} NVSWITCH_GET_ROUTING_LAN_PARAMS; + +/* + * CTRL_NVSWITCH_SET_ROUTING_LAN_VALID + * + * Control to set routing LAN tables valid/invalid + * This interface is not supported on SV10 architecture. SV10 will return unsupported + * error. Architecture can be queried using _GET_INFO_INDEX_ARCH. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the routing lan table from which table entries + * should be programmed. + * numEntries [IN] + * Number of entries to be programmed. Currently, the call supports + * programming NVSWITCH_ROUTING_LAN_ENTRIES_MAX at a time. + * entryValid [IN] + * If true, an existing entry is marked valid, else will be marked invalid. + */ + +typedef struct nvswitch_set_routing_lan_valid +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 numEntries; + NvBool entryValid[NVSWITCH_ROUTING_LAN_ENTRIES_MAX]; + +} NVSWITCH_SET_ROUTING_LAN_VALID; + +/* + * CTRL_NVSWITCH_GET_INGRESS_RESPONSE_TABLE + * + * Control for reading ingress response tables. A sparse list of nonzero entries + * and their table indices is returned. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * firstIndex [IN] + * A starting index of the ingress response table from which table entries + * should be read. + * nextIndex [OUT] + * The table index of the next entry to read. Set to INGRESS_MAP_TABLE_SIZE + * when the end of the table has been reached. + * numEntries [OUT] + * Number of entries returned. Currently, the call supports returning up to + * NVSWITCH_INGRESS_RESPONSE_ENTRIES_MAX entries at a time. + * entries [OUT] + * Ingress response entries along with their table indices. + * Entry format is architecture dependent. + */ + +typedef struct nvswitch_ingress_response_idx_entry +{ + NvU32 idx; + NVSWITCH_INGRESS_RESPONSE_ENTRY entry; + +} NVSWITCH_INGRESS_RESPONSE_IDX_ENTRY; + +typedef struct nvswitch_get_ingress_response_table_params +{ + NvU32 portNum; + NvU32 firstIndex; + NvU32 nextIndex; + NvU32 numEntries; + NVSWITCH_INGRESS_RESPONSE_IDX_ENTRY entries[NVSWITCH_INGRESS_RESPONSE_ENTRIES_MAX]; + +} NVSWITCH_GET_INGRESS_RESPONSE_TABLE_PARAMS; + +/* + * CTRL_NVSWITCH_GET_ERRORS + * + * Control to query error information. + * + * Parameters: + * errorType [IN] + * Allows to query specific class of errors. See NVSWITCH_ERROR_SEVERITY_xxx. + * + * errorIndex [IN/OUT] + * On input: The index of the first error of the specified 'errorType' at which to start + * reading out of the driver. + * + * On output: The index of the first error that wasn't reported through the 'error' array + * in this call to CTRL_NVSWITCH_GET_ERRORS. Specific to the specified 'errorType'. + * + * nextErrorIndex[OUT] + * The index that will be assigned to the next error to occur for the specified 'errorType'. + * Users of the GET_ERRORS control call may set 'errorIndex' to this field on initialization + * to bypass errors that have already occurred without making multiple control calls. + * + * errorCount [OUT] + * Number of errors returned by the call. Currently, errorCount is limited + * by NVSWITCH_ERROR_COUNT_SIZE. In order to query all the errors, a + * client needs to keep calling the control till errorCount is zero. + * error [OUT] + * The error entires. + */ + +typedef enum nvswitch_error_severity_type +{ + NVSWITCH_ERROR_SEVERITY_NONFATAL = 0, + NVSWITCH_ERROR_SEVERITY_FATAL, + NVSWITCH_ERROR_SEVERITY_MAX + /* See enum modification guidelines at the top of this file */ +} NVSWITCH_ERROR_SEVERITY_TYPE; + +typedef enum nvswitch_error_src_type +{ + NVSWITCH_ERROR_SRC_NONE = 0, + NVSWITCH_ERROR_SRC_HW + /* See enum modification guidelines at the top of this file */ +} NVSWITCH_ERROR_SRC_TYPE; + +typedef enum nvswitch_err_type +{ + NVSWITCH_ERR_NO_ERROR = 0x0, + + /* + * These error enumerations are derived from the error bits defined in each + * hardware manual. + * + * NVSwitch errors values should start from 10000 (decimal) to be + * distinguishable from GPU errors. + */ + + /* HOST */ + NVSWITCH_ERR_HW_HOST = 10000, + NVSWITCH_ERR_HW_HOST_PRIV_ERROR = 10001, + NVSWITCH_ERR_HW_HOST_PRIV_TIMEOUT = 10002, + NVSWITCH_ERR_HW_HOST_UNHANDLED_INTERRUPT = 10003, + NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START = 10004, + NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_END = 10005, + NVSWITCH_ERR_HW_HOST_THERMAL_SHUTDOWN = 10006, + NVSWITCH_ERR_HW_HOST_IO_FAILURE = 10007, + NVSWITCH_ERR_HW_HOST_LAST, + + + /* NPORT: Ingress errors */ + NVSWITCH_ERR_HW_NPORT_INGRESS = 11000, + NVSWITCH_ERR_HW_NPORT_INGRESS_CMDDECODEERR = 11001, + NVSWITCH_ERR_HW_NPORT_INGRESS_BDFMISMATCHERR = 11002, + NVSWITCH_ERR_HW_NPORT_INGRESS_BUBBLEDETECT = 11003, + NVSWITCH_ERR_HW_NPORT_INGRESS_ACLFAIL = 11004, + NVSWITCH_ERR_HW_NPORT_INGRESS_PKTPOISONSET = 11005, + NVSWITCH_ERR_HW_NPORT_INGRESS_ECCSOFTLIMITERR = 11006, + NVSWITCH_ERR_HW_NPORT_INGRESS_ECCHDRDOUBLEBITERR = 11007, + NVSWITCH_ERR_HW_NPORT_INGRESS_INVALIDCMD = 11008, + NVSWITCH_ERR_HW_NPORT_INGRESS_INVALIDVCSET = 11009, + NVSWITCH_ERR_HW_NPORT_INGRESS_ERRORINFO = 11010, + NVSWITCH_ERR_HW_NPORT_INGRESS_REQCONTEXTMISMATCHERR = 11011, + NVSWITCH_ERR_HW_NPORT_INGRESS_NCISOC_HDR_ECC_LIMIT_ERR = 11012, + NVSWITCH_ERR_HW_NPORT_INGRESS_NCISOC_HDR_ECC_DBE_ERR = 11013, + NVSWITCH_ERR_HW_NPORT_INGRESS_ADDRBOUNDSERR = 11014, + NVSWITCH_ERR_HW_NPORT_INGRESS_RIDTABCFGERR = 11015, + NVSWITCH_ERR_HW_NPORT_INGRESS_RLANTABCFGERR = 11016, + NVSWITCH_ERR_HW_NPORT_INGRESS_REMAPTAB_ECC_DBE_ERR = 11017, + NVSWITCH_ERR_HW_NPORT_INGRESS_RIDTAB_ECC_DBE_ERR = 11018, + NVSWITCH_ERR_HW_NPORT_INGRESS_RLANTAB_ECC_DBE_ERR = 11019, + NVSWITCH_ERR_HW_NPORT_INGRESS_NCISOC_PARITY_ERR = 11020, + NVSWITCH_ERR_HW_NPORT_INGRESS_REMAPTAB_ECC_LIMIT_ERR = 11021, + NVSWITCH_ERR_HW_NPORT_INGRESS_RIDTAB_ECC_LIMIT_ERR = 11022, + NVSWITCH_ERR_HW_NPORT_INGRESS_RLANTAB_ECC_LIMIT_ERR = 11023, + NVSWITCH_ERR_HW_NPORT_INGRESS_ADDRTYPEERR = 11024, + NVSWITCH_ERR_HW_NPORT_INGRESS_LAST, /* NOTE: Must be last */ + + /* NPORT: Egress errors */ + NVSWITCH_ERR_HW_NPORT_EGRESS = 12000, + NVSWITCH_ERR_HW_NPORT_EGRESS_EGRESSBUFERR = 12001, + NVSWITCH_ERR_HW_NPORT_EGRESS_PKTROUTEERR = 12002, + NVSWITCH_ERR_HW_NPORT_EGRESS_ECCSINGLEBITLIMITERR0 = 12003, + NVSWITCH_ERR_HW_NPORT_EGRESS_ECCHDRDOUBLEBITERR0 = 12004, + NVSWITCH_ERR_HW_NPORT_EGRESS_ECCDATADOUBLEBITERR0 = 12005, + NVSWITCH_ERR_HW_NPORT_EGRESS_ECCSINGLEBITLIMITERR1 = 12006, + NVSWITCH_ERR_HW_NPORT_EGRESS_ECCHDRDOUBLEBITERR1 = 12007, + NVSWITCH_ERR_HW_NPORT_EGRESS_ECCDATADOUBLEBITERR1 = 12008, + NVSWITCH_ERR_HW_NPORT_EGRESS_NCISOCHDRCREDITOVFL = 12009, + NVSWITCH_ERR_HW_NPORT_EGRESS_NCISOCDATACREDITOVFL = 12010, + NVSWITCH_ERR_HW_NPORT_EGRESS_ADDRMATCHERR = 12011, + NVSWITCH_ERR_HW_NPORT_EGRESS_TAGCOUNTERR = 12012, + NVSWITCH_ERR_HW_NPORT_EGRESS_FLUSHRSPERR = 12013, + NVSWITCH_ERR_HW_NPORT_EGRESS_DROPNPURRSPERR = 12014, + NVSWITCH_ERR_HW_NPORT_EGRESS_POISONERR = 12015, + NVSWITCH_ERR_HW_NPORT_EGRESS_PACKET_HEADER = 12016, + NVSWITCH_ERR_HW_NPORT_EGRESS_BUFFER_DATA = 12017, + NVSWITCH_ERR_HW_NPORT_EGRESS_NCISOC_CREDITS = 12018, + NVSWITCH_ERR_HW_NPORT_EGRESS_TAG_DATA = 12019, + NVSWITCH_ERR_HW_NPORT_EGRESS_SEQIDERR = 12020, + NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_HDR_ECC_LIMIT_ERR = 12021, + NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_HDR_ECC_DBE_ERR = 12022, + NVSWITCH_ERR_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_LIMIT_ERR = 12023, + NVSWITCH_ERR_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_DBE_ERR = 12024, + NVSWITCH_ERR_HW_NPORT_EGRESS_NCISOCCREDITOVFL = 12025, + NVSWITCH_ERR_HW_NPORT_EGRESS_REQTGTIDMISMATCHERR = 12026, + NVSWITCH_ERR_HW_NPORT_EGRESS_RSPREQIDMISMATCHERR = 12027, + NVSWITCH_ERR_HW_NPORT_EGRESS_PRIVRSPERR = 12028, + NVSWITCH_ERR_HW_NPORT_EGRESS_HWRSPERR = 12029, + NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_HDR_PARITY_ERR = 12030, + NVSWITCH_ERR_HW_NPORT_EGRESS_NCISOC_CREDIT_PARITY_ERR = 12031, + NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_FLITTYPE_MISMATCH_ERR = 12032, + NVSWITCH_ERR_HW_NPORT_EGRESS_CREDIT_TIME_OUT_ERR = 12033, + NVSWITCH_ERR_HW_NPORT_EGRESS_TIMESTAMP_LOG = 12034, + NVSWITCH_ERR_HW_NPORT_EGRESS_MISC_LOG = 12035, + NVSWITCH_ERR_HW_NPORT_EGRESS_HEADER_LOG = 12036, + NVSWITCH_ERR_HW_NPORT_EGRESS_LAST, /* NOTE: Must be last */ + + /* NPORT: Fstate errors */ + NVSWITCH_ERR_HW_NPORT_FSTATE = 13000, + NVSWITCH_ERR_HW_NPORT_FSTATE_TAGPOOLBUFERR = 13001, + NVSWITCH_ERR_HW_NPORT_FSTATE_CRUMBSTOREBUFERR = 13002, + NVSWITCH_ERR_HW_NPORT_FSTATE_SINGLEBITECCLIMITERR_CRUMBSTORE = 13003, + NVSWITCH_ERR_HW_NPORT_FSTATE_UNCORRECTABLEECCERR_CRUMBSTORE = 13004, + NVSWITCH_ERR_HW_NPORT_FSTATE_SINGLEBITECCLIMITERR_TAGSTORE = 13005, + NVSWITCH_ERR_HW_NPORT_FSTATE_UNCORRECTABLEECCERR_TAGSTORE = 13006, + NVSWITCH_ERR_HW_NPORT_FSTATE_SINGLEBITECCLIMITERR_FLUSHREQSTORE = 13007, + NVSWITCH_ERR_HW_NPORT_FSTATE_UNCORRECTABLEECCERR_FLUSHREQSTORE = 13008, + NVSWITCH_ERR_HW_NPORT_FSTATE_LAST, /* NOTE: Must be last */ + + /* NPORT: Tstate errors */ + NVSWITCH_ERR_HW_NPORT_TSTATE = 14000, + NVSWITCH_ERR_HW_NPORT_TSTATE_TAGPOOLBUFERR = 14001, + NVSWITCH_ERR_HW_NPORT_TSTATE_CRUMBSTOREBUFERR = 14002, + NVSWITCH_ERR_HW_NPORT_TSTATE_SINGLEBITECCLIMITERR_CRUMBSTORE = 14003, + NVSWITCH_ERR_HW_NPORT_TSTATE_UNCORRECTABLEECCERR_CRUMBSTORE = 14004, + NVSWITCH_ERR_HW_NPORT_TSTATE_SINGLEBITECCLIMITERR_TAGSTORE = 14005, + NVSWITCH_ERR_HW_NPORT_TSTATE_UNCORRECTABLEECCERR_TAGSTORE = 14006, + NVSWITCH_ERR_HW_NPORT_TSTATE_TAGPOOL_ECC_LIMIT_ERR = 14007, + NVSWITCH_ERR_HW_NPORT_TSTATE_TAGPOOL_ECC_DBE_ERR = 14008, + NVSWITCH_ERR_HW_NPORT_TSTATE_CRUMBSTORE_ECC_LIMIT_ERR = 14009, + NVSWITCH_ERR_HW_NPORT_TSTATE_CRUMBSTORE_ECC_DBE_ERR = 14010, + NVSWITCH_ERR_HW_NPORT_TSTATE_COL_CRUMBSTOREBUFERR = 14011, + NVSWITCH_ERR_HW_NPORT_TSTATE_COL_CRUMBSTORE_ECC_LIMIT_ERR = 14012, + NVSWITCH_ERR_HW_NPORT_TSTATE_COL_CRUMBSTORE_ECC_DBE_ERR = 14013, + NVSWITCH_ERR_HW_NPORT_TSTATE_TD_TID_RAMBUFERR = 14014, + NVSWITCH_ERR_HW_NPORT_TSTATE_TD_TID_RAM_ECC_LIMIT_ERR = 14015, + NVSWITCH_ERR_HW_NPORT_TSTATE_TD_TID_RAM_ECC_DBE_ERR = 14016, + NVSWITCH_ERR_HW_NPORT_TSTATE_ATO_ERR = 14017, + NVSWITCH_ERR_HW_NPORT_TSTATE_CAMRSP_ERR = 14018, + NVSWITCH_ERR_HW_NPORT_TSTATE_LAST, /* NOTE: Must be last */ + + /* NPORT: Route errors */ + NVSWITCH_ERR_HW_NPORT_ROUTE = 15000, + NVSWITCH_ERR_HW_NPORT_ROUTE_ROUTEBUFERR = 15001, + NVSWITCH_ERR_HW_NPORT_ROUTE_NOPORTDEFINEDERR = 15002, + NVSWITCH_ERR_HW_NPORT_ROUTE_INVALIDROUTEPOLICYERR = 15003, + NVSWITCH_ERR_HW_NPORT_ROUTE_ECCLIMITERR = 15004, + NVSWITCH_ERR_HW_NPORT_ROUTE_UNCORRECTABLEECCERR = 15005, + NVSWITCH_ERR_HW_NPORT_ROUTE_TRANSDONERESVERR = 15006, + NVSWITCH_ERR_HW_NPORT_ROUTE_PACKET_HEADER = 15007, + NVSWITCH_ERR_HW_NPORT_ROUTE_GLT_ECC_LIMIT_ERR = 15008, + NVSWITCH_ERR_HW_NPORT_ROUTE_GLT_ECC_DBE_ERR = 15009, + NVSWITCH_ERR_HW_NPORT_ROUTE_PDCTRLPARERR = 15010, + NVSWITCH_ERR_HW_NPORT_ROUTE_NVS_ECC_LIMIT_ERR = 15011, + NVSWITCH_ERR_HW_NPORT_ROUTE_NVS_ECC_DBE_ERR = 15012, + NVSWITCH_ERR_HW_NPORT_ROUTE_CDTPARERR = 15013, + NVSWITCH_ERR_HW_NPORT_ROUTE_LAST, /* NOTE: Must be last */ + + /* NPORT: Nport errors */ + NVSWITCH_ERR_HW_NPORT = 16000, + NVSWITCH_ERR_HW_NPORT_DATAPOISONED = 16001, + NVSWITCH_ERR_HW_NPORT_UCINTERNAL = 16002, + NVSWITCH_ERR_HW_NPORT_CINTERNAL = 16003, + NVSWITCH_ERR_HW_NPORT_LAST, /* NOTE: Must be last */ + + /* NVLCTRL: NVCTRL errors */ + NVSWITCH_ERR_HW_NVLCTRL = 17000, + NVSWITCH_ERR_HW_NVLCTRL_INGRESSECCSOFTLIMITERR = 17001, + NVSWITCH_ERR_HW_NVLCTRL_INGRESSECCHDRDOUBLEBITERR = 17002, + NVSWITCH_ERR_HW_NVLCTRL_INGRESSECCDATADOUBLEBITERR = 17003, + NVSWITCH_ERR_HW_NVLCTRL_INGRESSBUFFERERR = 17004, + NVSWITCH_ERR_HW_NVLCTRL_EGRESSECCSOFTLIMITERR = 17005, + NVSWITCH_ERR_HW_NVLCTRL_EGRESSECCHDRDOUBLEBITERR = 17006, + NVSWITCH_ERR_HW_NVLCTRL_EGRESSECCDATADOUBLEBITERR = 17007, + NVSWITCH_ERR_HW_NVLCTRL_EGRESSBUFFERERR = 17008, + NVSWITCH_ERR_HW_NVLCTRL_LAST, /* NOTE: Must be last */ + + /* Nport: Nvlipt errors */ + NVSWITCH_ERR_HW_NVLIPT = 18000, + NVSWITCH_ERR_HW_NVLIPT_DLPROTOCOL = 18001, + NVSWITCH_ERR_HW_NVLIPT_DATAPOISONED = 18002, + NVSWITCH_ERR_HW_NVLIPT_FLOWCONTROL = 18003, + NVSWITCH_ERR_HW_NVLIPT_RESPONSETIMEOUT = 18004, + NVSWITCH_ERR_HW_NVLIPT_TARGETERROR = 18005, + NVSWITCH_ERR_HW_NVLIPT_UNEXPECTEDRESPONSE = 18006, + NVSWITCH_ERR_HW_NVLIPT_RECEIVEROVERFLOW = 18007, + NVSWITCH_ERR_HW_NVLIPT_MALFORMEDPACKET = 18008, + NVSWITCH_ERR_HW_NVLIPT_STOMPEDPACKETRECEIVED = 18009, + NVSWITCH_ERR_HW_NVLIPT_UNSUPPORTEDREQUEST = 18010, + NVSWITCH_ERR_HW_NVLIPT_UCINTERNAL = 18011, + NVSWITCH_ERR_HW_NVLIPT_PHYRECEIVER = 18012, + NVSWITCH_ERR_HW_NVLIPT_BADAN0PKT = 18013, + NVSWITCH_ERR_HW_NVLIPT_REPLAYTIMEOUT = 18014, + NVSWITCH_ERR_HW_NVLIPT_ADVISORYERROR = 18015, + NVSWITCH_ERR_HW_NVLIPT_CINTERNAL = 18016, + NVSWITCH_ERR_HW_NVLIPT_HEADEROVERFLOW = 18017, + NVSWITCH_ERR_HW_NVLIPT_RSTSEQ_PHYARB_TIMEOUT = 18018, + NVSWITCH_ERR_HW_NVLIPT_RSTSEQ_PLL_TIMEOUT = 18019, + NVSWITCH_ERR_HW_NVLIPT_CLKCTL_ILLEGAL_REQUEST = 18020, + NVSWITCH_ERR_HW_NVLIPT_LAST, /* NOTE: Must be last */ + + /* Nport: Nvltlc TX/RX errors */ + NVSWITCH_ERR_HW_NVLTLC = 19000, + NVSWITCH_ERR_HW_NVLTLC_TXHDRCREDITOVFERR = 19001, + NVSWITCH_ERR_HW_NVLTLC_TXDATACREDITOVFERR = 19002, + NVSWITCH_ERR_HW_NVLTLC_TXDLCREDITOVFERR = 19003, + NVSWITCH_ERR_HW_NVLTLC_TXDLCREDITPARITYERR = 19004, + NVSWITCH_ERR_HW_NVLTLC_TXRAMHDRPARITYERR = 19005, + NVSWITCH_ERR_HW_NVLTLC_TXRAMDATAPARITYERR = 19006, + NVSWITCH_ERR_HW_NVLTLC_TXUNSUPVCOVFERR = 19007, + NVSWITCH_ERR_HW_NVLTLC_TXSTOMPDET = 19008, + NVSWITCH_ERR_HW_NVLTLC_TXPOISONDET = 19009, + NVSWITCH_ERR_HW_NVLTLC_TARGETERR = 19010, + NVSWITCH_ERR_HW_NVLTLC_TX_PACKET_HEADER = 19011, + NVSWITCH_ERR_HW_NVLTLC_UNSUPPORTEDREQUESTERR = 19012, + NVSWITCH_ERR_HW_NVLTLC_RXDLHDRPARITYERR = 19013, + NVSWITCH_ERR_HW_NVLTLC_RXDLDATAPARITYERR = 19014, + NVSWITCH_ERR_HW_NVLTLC_RXDLCTRLPARITYERR = 19015, + NVSWITCH_ERR_HW_NVLTLC_RXRAMDATAPARITYERR = 19016, + NVSWITCH_ERR_HW_NVLTLC_RXRAMHDRPARITYERR = 19017, + NVSWITCH_ERR_HW_NVLTLC_RXINVALIDAEERR = 19018, + NVSWITCH_ERR_HW_NVLTLC_RXINVALIDBEERR = 19019, + NVSWITCH_ERR_HW_NVLTLC_RXINVALIDADDRALIGNERR = 19020, + NVSWITCH_ERR_HW_NVLTLC_RXPKTLENERR = 19021, + NVSWITCH_ERR_HW_NVLTLC_RSVCMDENCERR = 19022, + NVSWITCH_ERR_HW_NVLTLC_RSVDATLENENCERR = 19023, + NVSWITCH_ERR_HW_NVLTLC_RSVADDRTYPEERR = 19024, + NVSWITCH_ERR_HW_NVLTLC_RSVRSPSTATUSERR = 19025, + NVSWITCH_ERR_HW_NVLTLC_RSVPKTSTATUSERR = 19026, + NVSWITCH_ERR_HW_NVLTLC_RSVCACHEATTRPROBEREQERR = 19027, + NVSWITCH_ERR_HW_NVLTLC_RSVCACHEATTRPROBERSPERR = 19028, + NVSWITCH_ERR_HW_NVLTLC_DATLENGTATOMICREQMAXERR = 19029, + NVSWITCH_ERR_HW_NVLTLC_DATLENGTRMWREQMAXERR = 19030, + NVSWITCH_ERR_HW_NVLTLC_DATLENLTATRRSPMINERR = 19031, + NVSWITCH_ERR_HW_NVLTLC_INVALIDCACHEATTRPOERR = 19032, + NVSWITCH_ERR_HW_NVLTLC_INVALIDCRERR = 19033, + NVSWITCH_ERR_HW_NVLTLC_RXRESPSTATUSTARGETERR = 19034, + NVSWITCH_ERR_HW_NVLTLC_RXRESPSTATUSUNSUPPORTEDREQUESTERR = 19035, + NVSWITCH_ERR_HW_NVLTLC_RXHDROVFERR = 19036, + NVSWITCH_ERR_HW_NVLTLC_RXDATAOVFERR = 19037, + NVSWITCH_ERR_HW_NVLTLC_STOMPDETERR = 19038, + NVSWITCH_ERR_HW_NVLTLC_RXPOISONERR = 19039, + NVSWITCH_ERR_HW_NVLTLC_CORRECTABLEINTERNALERR = 19040, + NVSWITCH_ERR_HW_NVLTLC_RXUNSUPVCOVFERR = 19041, + NVSWITCH_ERR_HW_NVLTLC_RXUNSUPNVLINKCREDITRELERR = 19042, + NVSWITCH_ERR_HW_NVLTLC_RXUNSUPNCISOCCREDITRELERR = 19043, + NVSWITCH_ERR_HW_NVLTLC_RX_PACKET_HEADER = 19044, + NVSWITCH_ERR_HW_NVLTLC_RX_ERR_HEADER = 19045, + NVSWITCH_ERR_HW_NVLTLC_TX_SYS_NCISOC_PARITY_ERR = 19046, + NVSWITCH_ERR_HW_NVLTLC_TX_SYS_NCISOC_HDR_ECC_DBE_ERR = 19047, + NVSWITCH_ERR_HW_NVLTLC_TX_SYS_NCISOC_DAT_ECC_DBE_ERR = 19048, + NVSWITCH_ERR_HW_NVLTLC_TX_SYS_NCISOC_ECC_LIMIT_ERR = 19049, + NVSWITCH_ERR_HW_NVLTLC_TX_SYS_TXRSPSTATUS_HW_ERR = 19050, + NVSWITCH_ERR_HW_NVLTLC_TX_SYS_TXRSPSTATUS_UR_ERR = 19051, + NVSWITCH_ERR_HW_NVLTLC_TX_SYS_TXRSPSTATUS_PRIV_ERR = 19052, + NVSWITCH_ERR_HW_NVLTLC_RX_SYS_NCISOC_PARITY_ERR = 19053, + NVSWITCH_ERR_HW_NVLTLC_RX_SYS_HDR_RAM_ECC_DBE_ERR = 19054, + NVSWITCH_ERR_HW_NVLTLC_RX_SYS_HDR_RAM_ECC_LIMIT_ERR = 19055, + NVSWITCH_ERR_HW_NVLTLC_RX_SYS_DAT0_RAM_ECC_DBE_ERR = 19056, + NVSWITCH_ERR_HW_NVLTLC_RX_SYS_DAT0_RAM_ECC_LIMIT_ERR = 19057, + NVSWITCH_ERR_HW_NVLTLC_RX_SYS_DAT1_RAM_ECC_DBE_ERR = 19058, + NVSWITCH_ERR_HW_NVLTLC_RX_SYS_DAT1_RAM_ECC_LIMIT_ERR = 19059, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_CREQ_RAM_HDR_ECC_DBE_ERR = 19060, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_CREQ_RAM_DAT_ECC_DBE_ERR = 19061, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_CREQ_RAM_ECC_LIMIT_ERR = 19062, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_RSP_RAM_HDR_ECC_DBE_ERR = 19063, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_RSP_RAM_DAT_ECC_DBE_ERR = 19064, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_RSP_RAM_ECC_LIMIT_ERR = 19065, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_COM_RAM_HDR_ECC_DBE_ERR = 19066, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_COM_RAM_DAT_ECC_DBE_ERR = 19067, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_COM_RAM_ECC_LIMIT_ERR = 19068, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_RSP1_RAM_HDR_ECC_DBE_ERR = 19069, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_RSP1_RAM_DAT_ECC_DBE_ERR = 19070, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_RSP1_RAM_ECC_LIMIT_ERR = 19071, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC0 = 19072, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC1 = 19073, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC2 = 19074, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC3 = 19075, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC4 = 19076, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC5 = 19077, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC6 = 19078, + NVSWITCH_ERR_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC7 = 19079, + NVSWITCH_ERR_HW_NVLTLC_RX_LNK_RXRSPSTATUS_HW_ERR = 19080, + NVSWITCH_ERR_HW_NVLTLC_RX_LNK_RXRSPSTATUS_UR_ERR = 19081, + NVSWITCH_ERR_HW_NVLTLC_RX_LNK_RXRSPSTATUS_PRIV_ERR = 19082, + NVSWITCH_ERR_HW_NVLTLC_RX_LNK_INVALID_COLLAPSED_RESPONSE_ERR = 19083, + NVSWITCH_ERR_HW_NVLTLC_RX_LNK_AN1_HEARTBEAT_TIMEOUT_ERR = 19084, + NVSWITCH_ERR_HW_NVLTLC_LAST, /* NOTE: Must be last */ + + /* DLPL: errors ( SL1 errors too) */ + NVSWITCH_ERR_HW_DLPL = 20000, + NVSWITCH_ERR_HW_DLPL_TX_REPLAY = 20001, + NVSWITCH_ERR_HW_DLPL_TX_RECOVERY_SHORT = 20002, + NVSWITCH_ERR_HW_DLPL_TX_RECOVERY_LONG = 20003, + NVSWITCH_ERR_HW_DLPL_TX_FAULT_RAM = 20004, + NVSWITCH_ERR_HW_DLPL_TX_FAULT_INTERFACE = 20005, + NVSWITCH_ERR_HW_DLPL_TX_FAULT_SUBLINK_CHANGE = 20006, + NVSWITCH_ERR_HW_DLPL_RX_FAULT_SUBLINK_CHANGE = 20007, + NVSWITCH_ERR_HW_DLPL_RX_FAULT_DL_PROTOCOL = 20008, + NVSWITCH_ERR_HW_DLPL_RX_SHORT_ERROR_RATE = 20009, + NVSWITCH_ERR_HW_DLPL_RX_LONG_ERROR_RATE = 20010, + NVSWITCH_ERR_HW_DLPL_RX_ILA_TRIGGER = 20011, + NVSWITCH_ERR_HW_DLPL_RX_CRC_COUNTER = 20012, + NVSWITCH_ERR_HW_DLPL_LTSSM_FAULT = 20013, + NVSWITCH_ERR_HW_DLPL_LTSSM_PROTOCOL = 20014, + NVSWITCH_ERR_HW_DLPL_MINION_REQUEST = 20015, + NVSWITCH_ERR_HW_DLPL_FIFO_DRAIN_ERR = 20016, + NVSWITCH_ERR_HW_DLPL_CONST_DET_ERR = 20017, + NVSWITCH_ERR_HW_DLPL_OFF2SAFE_LINK_DET_ERR = 20018, + NVSWITCH_ERR_HW_DLPL_SAFE2NO_LINK_DET_ERR = 20019, + NVSWITCH_ERR_HW_DLPL_SCRAM_LOCK_ERR = 20020, + NVSWITCH_ERR_HW_DLPL_SYM_LOCK_ERR = 20021, + NVSWITCH_ERR_HW_DLPL_SYM_ALIGN_END_ERR = 20022, + NVSWITCH_ERR_HW_DLPL_FIFO_SKEW_ERR = 20023, + NVSWITCH_ERR_HW_DLPL_TRAIN2SAFE_LINK_DET_ERR = 20024, + NVSWITCH_ERR_HW_DLPL_HS2SAFE_LINK_DET_ERR = 20025, + NVSWITCH_ERR_HW_DLPL_FENCE_ERR = 20026, + NVSWITCH_ERR_HW_DLPL_SAFE_NO_LD_ERR = 20027, + NVSWITCH_ERR_HW_DLPL_E2SAFE_LD_ERR = 20028, + NVSWITCH_ERR_HW_DLPL_RC_RXPWR_ERR = 20029, + NVSWITCH_ERR_HW_DLPL_RC_TXPWR_ERR = 20030, + NVSWITCH_ERR_HW_DLPL_RC_DEADLINE_ERR = 20031, + NVSWITCH_ERR_HW_DLPL_TX_HS2LP_ERR = 20032, + NVSWITCH_ERR_HW_DLPL_RX_HS2LP_ERR = 20033, + NVSWITCH_ERR_HW_DLPL_LTSSM_FAULT_UP = 20034, + NVSWITCH_ERR_HW_DLPL_LTSSM_FAULT_DOWN = 20035, + NVSWITCH_ERR_HW_DLPL_PHY_A = 20036, + NVSWITCH_ERR_HW_DLPL_TX_PL_ERROR = 20037, + NVSWITCH_ERR_HW_DLPL_RX_PL_ERROR = 20038, + NVSWITCH_ERR_HW_DLPL_LAST, /* NOTE: Must be last */ + + /* AFS: errors */ + NVSWITCH_ERR_HW_AFS = 21000, + NVSWITCH_ERR_HW_AFS_UC_INGRESS_CREDIT_OVERFLOW = 21001, + NVSWITCH_ERR_HW_AFS_UC_INGRESS_CREDIT_UNDERFLOW = 21002, + NVSWITCH_ERR_HW_AFS_UC_EGRESS_CREDIT_OVERFLOW = 21003, + NVSWITCH_ERR_HW_AFS_UC_EGRESS_CREDIT_UNDERFLOW = 21004, + NVSWITCH_ERR_HW_AFS_UC_INGRESS_NON_BURSTY_PKT_DETECTED = 21005, + NVSWITCH_ERR_HW_AFS_UC_INGRESS_NON_STICKY_PKT_DETECTED = 21006, + NVSWITCH_ERR_HW_AFS_UC_INGRESS_BURST_GT_17_DATA_VC_DETECTED = 21007, + NVSWITCH_ERR_HW_AFS_UC_INGRESS_BURST_GT_1_NONDATA_VC_DETECTED = 21008, + NVSWITCH_ERR_HW_AFS_UC_INVALID_DST = 21009, + NVSWITCH_ERR_HW_AFS_UC_PKT_MISROUTE = 21010, + NVSWITCH_ERR_HW_AFS_LAST, /* NOTE: Must be last */ + + /* MINION: errors */ + NVSWITCH_ERR_HW_MINION = 22000, + NVSWITCH_ERR_HW_MINION_UCODE_IMEM = 22001, + NVSWITCH_ERR_HW_MINION_UCODE_DMEM = 22002, + NVSWITCH_ERR_HW_MINION_HALT = 22003, + NVSWITCH_ERR_HW_MINION_BOOT_ERROR = 22004, + NVSWITCH_ERR_HW_MINION_TIMEOUT = 22005, + NVSWITCH_ERR_HW_MINION_DLCMD_FAULT = 22006, + NVSWITCH_ERR_HW_MINION_DLCMD_TIMEOUT = 22007, + NVSWITCH_ERR_HW_MINION_DLCMD_FAIL = 22008, + NVSWITCH_ERR_HW_MINION_FATAL_INTR = 22009, + NVSWITCH_ERR_HW_MINION_WATCHDOG = 22010, + NVSWITCH_ERR_HW_MINION_EXTERR = 22011, + NVSWITCH_ERR_HW_MINION_FATAL_LINK_INTR = 22012, + NVSWITCH_ERR_HW_MINION_NONFATAL = 22013, + NVSWITCH_ERR_HW_MINION_LAST, /* NOTE: Must be last */ + + /* NXBAR errors */ + NVSWITCH_ERR_HW_NXBAR = 23000, + NVSWITCH_ERR_HW_NXBAR_TILE_INGRESS_BUFFER_OVERFLOW = 23001, + NVSWITCH_ERR_HW_NXBAR_TILE_INGRESS_BUFFER_UNDERFLOW = 23002, + NVSWITCH_ERR_HW_NXBAR_TILE_EGRESS_CREDIT_OVERFLOW = 23003, + NVSWITCH_ERR_HW_NXBAR_TILE_EGRESS_CREDIT_UNDERFLOW = 23004, + NVSWITCH_ERR_HW_NXBAR_TILE_INGRESS_NON_BURSTY_PKT = 23005, + NVSWITCH_ERR_HW_NXBAR_TILE_INGRESS_NON_STICKY_PKT = 23006, + NVSWITCH_ERR_HW_NXBAR_TILE_INGRESS_BURST_GT_9_DATA_VC = 23007, + NVSWITCH_ERR_HW_NXBAR_TILE_INGRESS_PKT_INVALID_DST = 23008, + NVSWITCH_ERR_HW_NXBAR_TILE_INGRESS_PKT_PARITY_ERROR = 23009, + NVSWITCH_ERR_HW_NXBAR_TILEOUT_INGRESS_BUFFER_OVERFLOW = 23010, + NVSWITCH_ERR_HW_NXBAR_TILEOUT_INGRESS_BUFFER_UNDERFLOW = 23011, + NVSWITCH_ERR_HW_NXBAR_TILEOUT_EGRESS_CREDIT_OVERFLOW = 23012, + NVSWITCH_ERR_HW_NXBAR_TILEOUT_EGRESS_CREDIT_UNDERFLOW = 23013, + NVSWITCH_ERR_HW_NXBAR_TILEOUT_INGRESS_NON_BURSTY_PKT = 23014, + NVSWITCH_ERR_HW_NXBAR_TILEOUT_INGRESS_NON_STICKY_PKT = 23015, + NVSWITCH_ERR_HW_NXBAR_TILEOUT_INGRESS_BURST_GT_9_DATA_VC = 23016, + NVSWITCH_ERR_HW_NXBAR_TILEOUT_EGRESS_CDT_PARITY_ERROR = 23017, + NVSWITCH_ERR_HW_NXBAR_LAST, /* NOTE: Must be last */ + + /* NPORT: SOURCETRACK errors */ + NVSWITCH_ERR_HW_NPORT_SOURCETRACK = 24000, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR = 24001, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR = 24002, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR = 24003, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR = 24004, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR = 24005, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR = 24006, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_SOURCETRACK_TIME_OUT_ERR = 24007, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_LAST, /* NOTE: Must be last */ + + /* NVLIPT_LNK errors */ + NVSWITCH_ERR_HW_NVLIPT_LNK = 25000, + NVSWITCH_ERR_HW_NVLIPT_LNK_ILLEGALLINKSTATEREQUEST = 25001, + NVSWITCH_ERR_HW_NVLIPT_LNK_FAILEDMINIONREQUEST = 25002, + NVSWITCH_ERR_HW_NVLIPT_LNK_RESERVEDREQUESTVALUE = 25003, + NVSWITCH_ERR_HW_NVLIPT_LNK_LINKSTATEWRITEWHILEBUSY = 25004, + NVSWITCH_ERR_HW_NVLIPT_LNK_LINK_STATE_REQUEST_TIMEOUT = 25005, + NVSWITCH_ERR_HW_NVLIPT_LNK_WRITE_TO_LOCKED_SYSTEM_REG_ERR = 25006, + NVSWITCH_ERR_HW_NVLIPT_LNK_SLEEPWHILEACTIVELINK = 25007, + NVSWITCH_ERR_HW_NVLIPT_LNK_RSTSEQ_PHYCTL_TIMEOUT = 25008, + NVSWITCH_ERR_HW_NVLIPT_LNK_RSTSEQ_CLKCTL_TIMEOUT = 25009, + NVSWITCH_ERR_HW_NVLIPT_LNK_LAST, /* Note: Must be last */ + + /* SOE errors */ + NVSWITCH_ERR_HW_SOE = 26000, + NVSWITCH_ERR_HW_SOE_RESET = 26001, + NVSWITCH_ERR_HW_SOE_BOOTSTRAP = 26002, + NVSWITCH_ERR_HW_SOE_COMMAND_QUEUE = 26003, + NVSWITCH_ERR_HW_SOE_TIMEOUT = 26004, + NVSWITCH_ERR_HW_SOE_SHUTDOWN = 26005, + NVSWITCH_ERR_HW_SOE_HALT = 26006, + NVSWITCH_ERR_HW_SOE_EXTERR = 26007, + NVSWITCH_ERR_HW_SOE_WATCHDOG = 26008, + NVSWITCH_ERR_HW_SOE_LAST, /* Note: Must be last */ + + /* Please update nvswitch_translate_hw_errors with a newly added error class. */ + NVSWITCH_ERR_LAST + /* See enum modification guidelines at the top of this file */ +} NVSWITCH_ERR_TYPE; + +typedef enum nvswitch_pri_error_instance +{ + NVSWITCH_PBUS_PRI_SQUASH = 0, + NVSWITCH_PBUS_PRI_FECSERR, + NVSWITCH_PBUS_PRI_TIMEOUT, + NVSWITCH_PPRIV_WRITE_SYS, + NVSWITCH_PPRIV_WRITE_PRT +} NVSWITCH_PRI_ERROR_INSTANCE; + +typedef struct nvswitch_error +{ + NvU32 error_value; /* NVSWITCH_ERR_* */ + NvU32 error_src; /* NVSWITCH_ERROR_SRC_* */ + NvU32 instance; /* Used for link# or subengine instance */ + NvU32 subinstance; /* Used for lane# or similar */ + NV_DECLARE_ALIGNED(NvU64 time, 8); /* Platform time (nsec) */ + NvBool error_resolved; /* If an error is correctable, set to true. */ +} NVSWITCH_ERROR; + +#define NVSWITCH_ERROR_COUNT_SIZE 64 + +typedef struct nvswitch_get_errors +{ + NvU32 errorType; + NvU64 errorIndex; + NvU64 nextErrorIndex; + NvU32 errorCount; + NVSWITCH_ERROR error[NVSWITCH_ERROR_COUNT_SIZE]; +} NVSWITCH_GET_ERRORS_PARAMS; + +/* + * CTRL_NVSWITCH_GET_INTERNAL_LATENCY + * + * Control for querying latency bins. + * + * Parameters: + * vc_selector [IN] + * A valid VC number returned by NVSWITCH_GET_INFO. + * + * elapsed_time_msec [OUT] + * Elapsed time since the latency bins were queried. + * egressHistogram [OUT] + * Latency bin data/histogram format. The data will be available for the + * enabled/supported ports returned by NVSWITCH_GET_INFO. + */ + +#define NVSWITCH_MAX_PORTS 64 + +/* TODO: describe the format */ +typedef struct nvswitch_internal_latency_bins +{ + NV_DECLARE_ALIGNED(NvU64 low, 8); + NV_DECLARE_ALIGNED(NvU64 medium, 8); + NV_DECLARE_ALIGNED(NvU64 high, 8); + NV_DECLARE_ALIGNED(NvU64 panic, 8); + NV_DECLARE_ALIGNED(NvU64 count, 8); +} NVSWITCH_INTERNAL_LATENCY_BINS; + +typedef struct nvswitch_get_internal_latency +{ + NvU32 vc_selector; + NV_DECLARE_ALIGNED(NvU64 elapsed_time_msec, 8); + NVSWITCH_INTERNAL_LATENCY_BINS egressHistogram[NVSWITCH_MAX_PORTS]; +} NVSWITCH_GET_INTERNAL_LATENCY; + +/* + * CTRL_NVSWITCH_SET_LATENCY_BINS + * + * Control for setting latency bins. + * + * Parameters: + * NVSWITCH_LATENCY_BIN [IN] + * Latency bin thresholds. The thresholds would be only applied to the + * enabled ports and the supported VCs by those ports. + * NVSWITCH_GET_INFO can be used to query enabled ports and supported VCs. + */ + +#define NVSWITCH_MAX_VCS 8 + +/* TODO: describe the format */ +typedef struct nvswitch_latency_bin +{ + NvU32 lowThreshold; /* in nsec */ + NvU32 medThreshold; /* in nsec */ + NvU32 hiThreshold; /* in nsec */ + +} NVSWITCH_LATENCY_BIN; + +typedef struct nvswitch_set_latency_bins +{ + NVSWITCH_LATENCY_BIN bin[NVSWITCH_MAX_VCS]; + +} NVSWITCH_SET_LATENCY_BINS; + +/* + * CTRL_NVSWITCH_SET_SWITCH_PORT_CONFIG + * + * Control for setting device port configurations. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO. + * type [IN] + * A connection type. See NVSWITCH_CONNECTION_TYPE. + * requesterLinkID [IN] + * An unique port ID in the fabric. + * requesterLan [IN] + * A Lan Id. + * count [IN] + * Endpoint Count + * acCoupled [IN] + * Set true, if the port is AC coupled. + * enableVC1 [IN] + * Set true, if VC1 should be enabled for the port. + */ + +typedef enum nvswitch_connection_type +{ + CONNECT_ACCESS_GPU = 0, + CONNECT_ACCESS_CPU, + CONNECT_TRUNK_SWITCH, + CONNECT_ACCESS_SWITCH + /* See enum modification guidelines at the top of this file */ +} NVSWITCH_CONNECTION_TYPE; + +typedef enum nvswitch_connection_count +{ + CONNECT_COUNT_512 = 0, + CONNECT_COUNT_1024, + CONNECT_COUNT_2048 + /* See enum modification guidelines at the top of this file */ +} NVSWITCH_CONNECTION_COUNT; + +typedef struct nvswitch_set_switch_port_config +{ + NvU32 portNum; + NvU32 type; + NvU32 requesterLinkID; + NvU32 requesterLanID; + NvU32 count; + NvBool acCoupled; + NvBool enableVC1; + +} NVSWITCH_SET_SWITCH_PORT_CONFIG; + +/* + * CTRL_NVSWITCH_SET_GANGED_LINK_TABLE + * + * Control for setting ganged link tables. + * This interface is only supported on architectures that report + * _GET_INFO_INDEX_ARCH == SV10. All others will return an error. + * + * Parameters: + * linkMask [IN] + * A valid link/port mask returned by the port masks returned by + * NVSWITCH_GET_INFO. + * entries [IN] + * The Ganged link entires. (TODO: Describe format) + */ + +#define NVSWITCH_GANGED_LINK_TABLE_ENTRIES_MAX 256 + +typedef struct nvswitch_set_ganged_link_table +{ + NvU32 link_mask; + NvU32 entries[NVSWITCH_GANGED_LINK_TABLE_ENTRIES_MAX]; + +} NVSWITCH_SET_GANGED_LINK_TABLE; + +/* + * CTRL_NVSWITCH_GET_NVLIPT_COUNTER + * + * Control for querying NVLIPT counters. + * + * Parameters: + * liptCounter [OUT] + * Port's TX/RX traffic data. The data will be available for the + * enabled/supported ports returned by NVSWITCH_GET_INFO. + */ + +typedef struct nvswitch_nvlipt_counter +{ + NV_DECLARE_ALIGNED(NvU64 txCounter0, 8); + NV_DECLARE_ALIGNED(NvU64 txCounter1, 8); + NV_DECLARE_ALIGNED(NvU64 rxCounter0, 8); + NV_DECLARE_ALIGNED(NvU64 rxCounter1, 8); + +} NVSWITCH_NVLIPT_COUNTER; + +typedef struct nvswitch_get_nvlipt_counters +{ + NVSWITCH_NVLIPT_COUNTER liptCounter[NVSWITCH_MAX_PORTS]; + +} NVSWITCH_GET_NVLIPT_COUNTERS; + +/* + * CTRL_NVSWITCH_SET_NVLIPT_COUNTER_CONFIG + * + * Control to set NVLIPT counter configuration. + * + * Parameters: + * linkMask [IN] + * A valid link/port mask returned by the port masks returned by + * NVSWITCH_GET_INFO. + * tx0/tx1/rx0/rx1 [IN] + * TX/RX link configurations. + */ + +/* TODO: describe format */ +typedef struct nvlipt_counter_config +{ + NvU32 ctrl_0; + NvU32 ctrl_1; + NvU32 req_filter; + NvU32 rsp_filter; + NvU32 misc_filter; + NV_DECLARE_ALIGNED(NvU64 addr_filter, 8); + NV_DECLARE_ALIGNED(NvU64 addr_mask, 8); + +} NVLIPT_COUNTER_CONFIG; + +typedef struct nvswitch_set_nvlipt_counter_config +{ + NV_DECLARE_ALIGNED(NvU64 link_mask, 8); + NVLIPT_COUNTER_CONFIG tx0; + NVLIPT_COUNTER_CONFIG tx1; + NVLIPT_COUNTER_CONFIG rx0; + NVLIPT_COUNTER_CONFIG rx1; + +} NVSWITCH_SET_NVLIPT_COUNTER_CONFIG; + +/* + * CTRL_NVSWITCH_GET_NVLIPT_COUNTER_CONFIG + * + * Control to query NVLIPT counter configuration. + * + * Parameters: + * link [IN] + * A valid link/port returned by the port masks returned by + * NVSWITCH_GET_INFO. + * + * tx0/tx1/rx0/rx1 [OUT] + * TX/RX link configurations for the provide port. + */ + +typedef struct nvswitch_get_nvlipt_counter_config +{ + NvU32 link; + NVLIPT_COUNTER_CONFIG tx0; + NVLIPT_COUNTER_CONFIG tx1; + NVLIPT_COUNTER_CONFIG rx0; + NVLIPT_COUNTER_CONFIG rx1; + +} NVSWITCH_GET_NVLIPT_COUNTER_CONFIG; + +/* + * CTRL_NVSWITCH_GET_INGRESS_REQLINKID + * + * Control to query the ingress requestor link id. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + * + * requesterLinkID [OUT] + * Ingress requestor link id for the provided port. + */ + +typedef struct nvswitch_get_ingress_reqlinkid_params +{ + NvU32 portNum; + NvU32 requesterLinkID; + +} NVSWITCH_GET_INGRESS_REQLINKID_PARAMS; + +/* + * CTRL_NVSWITCH_UNREGISTER_LINK + * + * Control to unregister the request link (port). This ensures that the black- + * listed link will not be initialized or trained by the driver. + * + * Parameters: + * portNum [IN] + * A valid port number present in the port masks returned by + * NVSWITCH_GET_INFO + */ + +typedef struct nvswitch_unregister_link_params +{ + NvU32 portNum; + +} NVSWITCH_UNREGISTER_LINK_PARAMS; + +/* + * CTRL_RESET_AND_DRAIN_LINKS + * + * Control to reset and drain the links. Resets NVLinks and ensures to drain + * backed up traffic. + * + * Parameters: + * linkMask [IN] + * A mask of link(s) to be reset. + * For SV10, the linkMask must contain at least a link-pair (even-odd links). + * + * Returns: + * NVL_SUCCESS if there were no errors + * -NVL_BAD_PARAMS if input parameters are wrong. + * -NVL_ERR_INVALID_STATE if other errors are present and a full-chip reset is required. + * -NVL_INITIALIZATION_TOTAL_FAILURE if NPORT initialization failed and a retry is required. + */ + +typedef struct nvswitch_reset_and_drain_links_params +{ + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + +} NVSWITCH_RESET_AND_DRAIN_LINKS_PARAMS; + +/* + * CTRL_NVSWITCH_GET_NVLINK_STATUS + * + * enabledLinkMask + * This field specifies the mask of available links on this subdevice. + * linkInfo + * This structure stores the per-link status of different NVLink + * parameters. The link is identified using an index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +/* + * NVSWITCH_NVLINK_DEVICE_INFO + * + * This structure stores information about the device to which this link is + * associated + * + * deviceIdFlags + * Bitmask that specifies which IDs are valid for the device + * Refer NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_* for possible values + * If NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_PCI is set, PCI + * information is valid + * If NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_UUID is set, UUID is + * valid + * domain, bus, device, function, pciDeviceId + * PCI information for the device + * deviceType + * Type of the device + * See NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_* for possible values + * deviceUUID + * This field specifies the device UUID of the device. Useful for + * identifying the device (or version) + */ + +typedef struct +{ + // ID Flags + NvU32 deviceIdFlags; + + // PCI Information + NvU32 domain; + NvU16 bus; + NvU16 device; + NvU16 function; + NvU32 pciDeviceId; + + // Device Type + NV_DECLARE_ALIGNED(NvU64 deviceType, 8); + + // Device UUID + NvU8 deviceUUID[16]; +} NVSWITCH_NVLINK_DEVICE_INFO; + +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS 31:0 +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_NONE (0x00000000) +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_PCI (0x00000001) +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_UUID (0x00000002) + +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_EBRIDGE (0x00000000) +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_NPU (0x00000001) +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU (0x00000002) +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH (0x00000003) +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_TEGRA (0x00000004) +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE (0x000000FF) + +#define NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_UUID_INVALID (0xFFFFFFFF) + +/* + * NVSWITCH_NVLINK_CAPS_* + * + * SUPPORTED + * Set if NVLink is present and supported on this subdevice, NV_FALSE + * otherwise. This field is used for *global* caps only and NOT for + * per-link caps + * P2P_SUPPORTED + * Set if P2P over NVLink is supported on this subdevice, NV_FALSE + * otherwise. + * SYSMEM_ACCESS + * Set if sysmem can be accessed over NVLink on this subdevice, NV_FALSE + * otherwise. + * PEER_ATOMICS + * Set if P2P atomics are supported over NVLink on this subdevice, NV_FALSE + * otherwise. + * SYSMEM_ATOMICS + * Set if sysmem atomic transcations are supported over NVLink on this + * subdevice, NV_FALSE otherwise. + * PEX_TUNNELING + * Set if PEX tunneling over NVLink is supported on this subdevice, + * NV_FALSE otherwise. + * SLI_BRIDGE + * GLOBAL: Set if SLI over NVLink is supported on this subdevice, NV_FALSE + * otherwise. + * LINK: Set if SLI over NVLink is supported on a link, NV_FALSE + * otherwise. + * SLI_BRIDGE_SENSABLE + * GLOBAL: Set if the subdevice is capable of sensing SLI bridges, NV_FALSE + * otherwise. + * LINK: Set if the link is capable of sensing an SLI bridge, NV_FALSE + * otherwise. + * POWER_STATE_L0 + * Set if L0 is a supported power state on this subdevice/link, NV_FALSE + * otherwise. + * POWER_STATE_L1 + * Set if L1 is a supported power state on this subdevice/link, NV_FALSE + * otherwise. + * POWER_STATE_L2 + * Set if L2 is a supported power state on this subdevice/link, NV_FALSE + * otherwise. + * POWER_STATE_L3 + * Set if L3 is a supported power state on this subdevice/link, NV_FALSE + * otherwise. + * VALID + * Set if this link is supported on this subdevice, NV_FALSE otherwise. + * This field is used for *per-link* caps only and NOT for global caps. + * + */ + +/* caps format is byte_index:bit_mask */ +#define NVSWITCH_NVLINK_CAPS_SUPPORTED 0:0x01 +#define NVSWITCH_NVLINK_CAPS_P2P_SUPPORTED 0:0x02 +#define NVSWITCH_NVLINK_CAPS_SYSMEM_ACCESS 0:0x04 +#define NVSWITCH_NVLINK_CAPS_P2P_ATOMICS 0:0x08 +#define NVSWITCH_NVLINK_CAPS_SYSMEM_ATOMICS 0:0x10 +#define NVSWITCH_NVLINK_CAPS_PEX_TUNNELING 0:0x20 +#define NVSWITCH_NVLINK_CAPS_SLI_BRIDGE 0:0x40 +#define NVSWITCH_NVLINK_CAPS_SLI_BRIDGE_SENSABLE 0:0x80 +#define NVSWITCH_NVLINK_CAPS_POWER_STATE_L0 1:0x01 +#define NVSWITCH_NVLINK_CAPS_POWER_STATE_L1 1:0x02 +#define NVSWITCH_NVLINK_CAPS_POWER_STATE_L2 1:0x04 +#define NVSWITCH_NVLINK_CAPS_POWER_STATE_L3 1:0x08 +#define NVSWITCH_NVLINK_CAPS_VALID 1:0x10 + +/* + * Size in bytes of nvlink caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NVSWITCH_NVLINK_CAPS_TBL_SIZE 2 + +#define NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_INVALID (0x00000000) +#define NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_1_0 (0x00000001) +#define NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_2_0 (0x00000002) +#define NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_2_2 (0x00000004) +#define NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0 (0x00000005) +#define NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_1 (0x00000006) + +#define NVSWITCH_NVLINK_CAPS_NCI_VERSION_INVALID (0x00000000) +#define NVSWITCH_NVLINK_CAPS_NCI_VERSION_1_0 (0x00000001) +#define NVSWITCH_NVLINK_CAPS_NCI_VERSION_2_0 (0x00000002) +#define NVSWITCH_NVLINK_CAPS_NCI_VERSION_2_2 (0x00000004) +#define NVSWITCH_NVLINK_CAPS_NCI_VERSION_3_0 (0x00000005) +#define NVSWITCH_NVLINK_CAPS_NCI_VERSION_3_1 (0x00000006) + + +/* + * NVSWITCH_NVLINK_LINK_STATUS_INFO + * + * This structure stores the per-link status of different NVLink parameters. + * + * capsTbl + * This is bit field for getting different global caps. The individual + * bitfields are specified by NVSWITCH_NVLINK_CAPS_* + * phyType + * This field specifies the type of PHY (NVHS or GRS) being used for this + * link. + * subLinkWidth + * This field specifies the no. of lanes per sublink. + * linkState + * This field specifies the current state of the link. See + * NVSWITCH_GET_NVLINK_STATUS_LINK_STATE_* for possible values. + * linkPowerState + * This field specifies the current power state of the link. See + * NVSWITCH_NVLINK_STATUS_LINK_POWER_STATE_* for possible values. + * rxSublinkStatus + * This field specifies the current state of RX sublink. See + * NVSWITCH_GET_NVLINK_STATUS_SUBLINK_RX_STATE_* for possible values. + * txSublinkStatus + * This field specifies the current state of TX sublink. See + * NVSWITCH_GET_NVLINK_STATUS_SUBLINK_TX_STATE_* for possible values. + * nvlinkVersion + * This field specifies the NVLink version supported by the link. + * nciVersion + * This field specifies the NCI version supported by the link. + * phyVersion + * This field specifies the version of PHY being used by the link. + * nvlinkCommonClockSpeed + * This field gives the value of nvlink common clock. + * nvlinkRefClkSpeed + * This field gives the value of nvlink refclk clock. + * nvlinkRefClkType + * This field specifies whether refclk is taken from NVHS reflck or PEX + * refclk for the current GPU. See NVSWITCH_NVLINK_REFCLK_TYPE_INVALID* + * for possible values. + * nvlinkLinkClock + * This field gives the actual clock/speed at which links is running. + * connected + * This field specifies if any device is connected on the other end of the + * link + * loopProperty + * This field specifies if the link is a loopback/loopout link. See + * NVSWITCH_NVLINK_STATUS_LOOP_PROPERTY_* for possible values. + * laneRxdetStatusMask + * This field reports the per-lane RX Detect status provided by MINION. + * remoteDeviceLinkNumber + * This field specifies the link number on the remote end of the link + * remoteDeviceInfo + * This field stores the device information for the remote end of the link + * + */ + +typedef struct +{ + // Top level capablilites + NvU32 capsTbl; + + NvU8 phyType; + NvU8 subLinkWidth; + + // Link and sublink states + NvU32 linkState; + NvU32 linkPowerState; + NvU8 rxSublinkStatus; + NvU8 txSublinkStatus; + + // Indicates that lane reveral is in effect on this link. + NvBool bLaneReversal; + + NvU8 nvlinkVersion; + NvU8 nciVersion; + NvU8 phyVersion; + + // Clock information + + // These are being deprecated, please use HW Consistent terminology below + NvU32 nvlinkLinkClockKHz; + NvU32 nvlinkCommonClockSpeedKHz; + NvU32 nvlinkRefClkSpeedKHz; + NvU32 nvlinkCommonClockSpeedMhz; + + // HW consistent terminology + NvU32 nvlinkLineRateMbps; + NvU32 nvlinkLinkDataRateKiBps; + NvU32 nvlinkLinkClockMhz; + NvU32 nvlinkRefClkSpeedMhz; + NvU8 nvlinkRefClkType; + + // Connection information + NvBool connected; + NvU8 loopProperty; + NvU8 remoteDeviceLinkNumber; + NvU8 localDeviceLinkNumber; + + // + // Added as part of NvLink 3.0 + // Note: SID has link info appended to it when provided by minion + // + NV_DECLARE_ALIGNED(NvU64 remoteLinkSid, 8); + NV_DECLARE_ALIGNED(NvU64 localLinkSid, 8); + + // LR10+ only + NvU32 laneRxdetStatusMask; + + NVSWITCH_NVLINK_DEVICE_INFO remoteDeviceInfo; + NVSWITCH_NVLINK_DEVICE_INFO localDeviceInfo; +} NVSWITCH_NVLINK_LINK_STATUS_INFO; + +/* NVLink link states */ +#define NVSWITCH_NVLINK_STATUS_LINK_STATE_INIT (0x00000000) +#define NVSWITCH_NVLINK_STATUS_LINK_STATE_HWCFG (0x00000001) +#define NVSWITCH_NVLINK_STATUS_LINK_STATE_SWCFG (0x00000002) +#define NVSWITCH_NVLINK_STATUS_LINK_STATE_ACTIVE (0x00000003) +#define NVSWITCH_NVLINK_STATUS_LINK_STATE_FAULT (0x00000004) +#define NVSWITCH_NVLINK_STATUS_LINK_STATE_RECOVERY (0x00000006) +#define NVSWITCH_NVLINK_STATUS_LINK_STATE_INVALID (0xFFFFFFFF) + +/* NVLink link power states */ +#define NVSWITCH_NVLINK_STATUS_LINK_POWER_STATE_L0 (0x00000000) +#define NVSWITCH_NVLINK_STATUS_LINK_POWER_STATE_L1 (0x00000001) +#define NVSWITCH_NVLINK_STATUS_LINK_POWER_STATE_INVALID (0xFFFFFFFF) + +/* NVLink Tx sublink states */ +#define NVSWITCH_NVLINK_STATUS_SUBLINK_RX_STATE_HIGH_SPEED_1 (0x00000000) +#define NVSWITCH_NVLINK_STATUS_SUBLINK_RX_STATE_SINGLE_LANE (0x00000004) +#define NVSWITCH_NVLINK_STATUS_SUBLINK_RX_STATE_TRAINING (0x00000005) +#define NVSWITCH_NVLINK_STATUS_SUBLINK_RX_STATE_SAFE_MODE (0x00000006) +#define NVSWITCH_NVLINK_STATUS_SUBLINK_RX_STATE_OFF (0x00000007) +#define NVSWITCH_NVLINK_STATUS_SUBLINK_RX_STATE_INVALID (0x000000FF) + +/* NVLink Rx sublink states */ +#define NVSWITCH_NVLINK_STATUS_SUBLINK_TX_STATE_HIGH_SPEED_1 (0x00000000) +#define NVSWITCH_NVLINK_STATUS_SUBLINK_TX_STATE_SINGLE_LANE (0x00000004) +#define NVSWITCH_NVLINK_STATUS_SUBLINK_TX_STATE_TRAINING (0x00000005) +#define NVSWITCH_NVLINK_STATUS_SUBLINK_TX_STATE_SAFE_MODE (0x00000006) +#define NVSWITCH_NVLINK_STATUS_SUBLINK_TX_STATE_OFF (0x00000007) +#define NVSWITCH_NVLINK_STATUS_SUBLINK_TX_STATE_INVALID (0x000000FF) + +#define NVSWITCH_NVLINK_STATUS_PHY_NVHS (0x00000001) +#define NVSWITCH_NVLINK_STATUS_PHY_GRS (0x00000002) +#define NVSWITCH_NVLINK_STATUS_PHY_INVALID (0x000000FF) + +/* Version information */ +#define NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_1_0 (0x00000001) +#define NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_2_0 (0x00000002) +#define NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_2_2 (0x00000004) +#define NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_3_0 (0x00000005) +#define NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_3_1 (0x00000006) +#define NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_4_0 (0x00000007) +#define NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_INVALID (0x000000FF) + +#define NVSWITCH_NVLINK_STATUS_NCI_VERSION_1_0 (0x00000001) +#define NVSWITCH_NVLINK_STATUS_NCI_VERSION_2_0 (0x00000002) +#define NVSWITCH_NVLINK_STATUS_NCI_VERSION_2_2 (0x00000004) +#define NVSWITCH_NVLINK_STATUS_NCI_VERSION_3_0 (0x00000005) +#define NVSWITCH_NVLINK_STATUS_NCI_VERSION_3_1 (0x00000006) +#define NVSWITCH_NVLINK_STATUS_NCI_VERSION_4_0 (0x00000007) +#define NVSWITCH_NVLINK_STATUS_NCI_VERSION_INVALID (0x000000FF) + +#define NVSWITCH_NVLINK_STATUS_NVHS_VERSION_1_0 (0x00000001) +#define NVSWITCH_NVLINK_STATUS_NVHS_VERSION_INVALID (0x000000FF) + +#define NVSWITCH_NVLINK_STATUS_GRS_VERSION_1_0 (0x00000001) +#define NVSWITCH_NVLINK_STATUS_GRS_VERSION_INVALID (0x000000FF) + +/* Connection properties */ +#define NVSWITCH_NVLINK_STATUS_CONNECTED_TRUE (0x00000001) +#define NVSWITCH_NVLINK_STATUS_CONNECTED_FALSE (0x00000000) + +#define NVSWITCH_NVLINK_STATUS_LOOP_PROPERTY_LOOPBACK (0x00000001) +#define NVSWITCH_NVLINK_STATUS_LOOP_PROPERTY_LOOPOUT (0x00000002) +#define NVSWITCH_NVLINK_STATUS_LOOP_PROPERTY_NONE (0x00000000) + +#define NVSWITCH_NVLINK_STATUS_REMOTE_LINK_NUMBER_INVALID (0x000000FF) + +#define NVSWITCH_NVLINK_MAX_LINKS 64 + +/* NVLink REFCLK types */ +#define NVSWITCH_NVLINK_REFCLK_TYPE_INVALID (0x00) +#define NVSWITCH_NVLINK_REFCLK_TYPE_NVHS (0x01) +#define NVSWITCH_NVLINK_REFCLK_TYPE_PEX (0x02) + +typedef struct +{ + NV_DECLARE_ALIGNED(NvU64 enabledLinkMask, 8); + NVSWITCH_NVLINK_LINK_STATUS_INFO linkInfo[NVSWITCH_NVLINK_MAX_LINKS]; +} NVSWITCH_GET_NVLINK_STATUS_PARAMS; + +/* List of supported capability type */ +#define NVSWITCH_CAP_FABRIC_MANAGEMENT 0 + +/* + * Max supported capabilities count. + */ +#define NVSWITCH_CAP_COUNT 1 + +/* + * CTRL_NVSWITCH_ACQUIRE_CAPABILITY + * + * Upon success, user mode would acquire the requested capability + * to perform privilege operations. This IOCTL will acquire one + * capability at a time. + * + * Parameters: + * capDescriptor [IN] + * The OS file descriptor or handle representing the capability. + * cap [IN] + * The requested capability. One of the NVSWITCH_CAP_*. + */ +typedef struct +{ + /* input parameters */ + NV_DECLARE_ALIGNED(NvU64 capDescriptor, 8); + NvU32 cap; + + +} NVSWITCH_ACQUIRE_CAPABILITY_PARAMS; + +/* + * CTRL_NVSWITCH_GET_TEMPERATURE + * + * Control to query temperature of Nvswitch sensors. + * + * The Temperatures are returned in FXP 24.8(NvTemp) format. + * + * Parameters: + * channelMask [IN] + * Mask of all the thermal channels queried. + * temperature [OUT] + * Temperature of the channel. + * status [OUT] + * Return status of the channel. + */ + +#define NVSWITCH_NUM_MAX_CHANNELS 16 + +typedef struct +{ + NvU32 channelMask; + NvTemp temperature[NVSWITCH_NUM_MAX_CHANNELS]; + NvS32 status[NVSWITCH_NUM_MAX_CHANNELS]; +} NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS; + +#define NVSWITCH_CTRL_THERMAL_EVENT_ID_WARN 0 +#define NVSWITCH_CTRL_THERMAL_EVENT_ID_OVERT 1 + +typedef struct +{ + NvU32 thermalEventId; + NvTemp temperatureLimit; +} NVSWITCH_CTRL_GET_TEMPERATURE_LIMIT_PARAMS; + +/* + * Limerock thermal channels + */ +#define NVSWITCH_THERM_CHANNEL_LR10_TSENSE_MAX 0x00 +#define NVSWITCH_THERM_CHANNEL_LR10_TSENSE_OFFSET_MAX 0x01 +#define NVSWITCH_THERM_CHANNEL_LR10_TDIODE 0x02 +#define NVSWITCH_THERM_CHANNEL_LR10_TDIODE_OFFSET 0x03 +#define NVSWITCH_NUM_CHANNELS_LR10 4 + +/* + * CTRL_NVSWITCH_GET_THROUGHPUT_COUNTERS + * + * Control for querying NVLINK throughput counters. + * + * Parameters: + * counterMask [IN] + * A mask of counter types. + * One of the NVSWITCH_THROUGHPUT_COUNTERS_TYPE_* macros + * linkMask [IN] + * A mask of desired link(s) + * counters [OUT] + * Fetched counter values + */ + +/* NVLink throughput counter types */ + +/* Nvlink throughput counters reading data flits in TX */ +#define NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_TX (0x00000001) + +/* Nvlink throughput counters reading data flits in RX */ +#define NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_RX (0x00000002) + +/* Nvlink throughput counters reading all flits in TX */ +#define NVSWITCH_THROUGHPUT_COUNTERS_TYPE_RAW_TX (0x00000004) + +/* Nvlink throughput counters reading all flits in RX */ +#define NVSWITCH_THROUGHPUT_COUNTERS_TYPE_RAW_RX (0x00000008) + +#define NVSWITCH_THROUGHPUT_COUNTERS_TYPE_MAX 4 + +typedef struct nvswitch_throughput_values +{ + NvU64 values[NVSWITCH_THROUGHPUT_COUNTERS_TYPE_MAX]; + +} NVSWITCH_THROUGHPUT_COUNTER_VALUES; + +typedef struct nvswitch_get_throughput_counters +{ + NvU16 counterMask; + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NVSWITCH_THROUGHPUT_COUNTER_VALUES counters[NVSWITCH_MAX_PORTS]; + +} NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS; + +/* + * CTRL_NVSWITCH_GET_BIOS_INFO + * + * Control call to get VBIOS information. + * + * Parameters: + * version [OUT] + * Vbios version in hex value. + */ +typedef struct nvswitch_get_bios_info +{ + NvU64 version; +} NVSWITCH_GET_BIOS_INFO_PARAMS; + +/* + * CTRL_NVSWITCH_BLACKLIST_DEVICE + * + * Control to Blacklist a device. A blacklisted device will have + * interrupts disabled, and opens/ioctls will fail. If a device is + * blacklisted OOB then the setting is persistent. If a device is + * blacklisted by the OS (such as module parameter) then the setting + * persists for the OS until the config file is changed and the driver + * reloaded. If a device is blacklisted by ioctl then the setting does + * not persist across driver unload/reload. + * + * See BLACKLIST_REASON enum definition in interface/ioctl_common_nvswitch.h + * + * Parameters: + * deviceReason [IN] + * The reason the device is blacklisted + */ +typedef struct nvswitch_blacklist_device +{ + NVSWITCH_DEVICE_BLACKLIST_REASON deviceReason; +} NVSWITCH_BLACKLIST_DEVICE_PARAMS; + +/* + * CTRL_NVSWITCH_SET_FM_DRIVER_STATE + * + * Control to set the FM driver state for a device (heartbeat). + * + * Driver Fabric State is intended to reflect the state of the driver and + * fabric manager. Once FM sets the Driver State to CONFIGURED, it is + * expected the FM will send heartbeat updates. If the heartbeat is not + * received before the session timeout, then the driver reports status + * as MANAGER_TIMEOUT. See also control device ioctl CTRL_NVSWITCH_GET_DEVICES_V2. + * + * See DRIVER_FABRIC_STATE enum definition in interface/ioctl_common_nvswitch.h + * + * Parameters: + * driverState [IN] + * The driver state for the device + */ +typedef struct nvswitch_set_fm_driver_state +{ + NVSWITCH_DRIVER_FABRIC_STATE driverState; +} NVSWITCH_SET_FM_DRIVER_STATE_PARAMS; + +/* + * CTRL_NVSWITCH_SET_DEVICE_FABRIC_STATE + * + * Control to set the device fabric state + * + * Device Fabric State reflects the fabric state of the nvswitch device. + * FM sets the Device Fabric State to CONFIGURED once FM is managing the + * device. + * + * See DEVICE_FABRIC_STATE enum definition in interface/ioctl_common_nvswitch.h + * + * Parameters: + * deviceState [IN] + * The device fabric state + */ +typedef struct nvswitch_set_device_fabric_state +{ + NVSWITCH_DEVICE_FABRIC_STATE deviceState; +} NVSWITCH_SET_DEVICE_FABRIC_STATE_PARAMS; + +/* + * CTRL_NVSWITCH_SET_FM_HEARTBEAT_TIMEOUT + * + * Control to set the FM session heartbeat timeout for a device + * + * If a device is managed by FM, and if a heartbeat is not received + * by the FM_HEARTBEAT_TIMEOUT, then the driver reports Driver + * Fabric State as MANAGER_TIMEOUT. + * + * NVSWITCH_DEFAULT_FM_HEARTBEAT_TIMEOUT_MSEC is the default timeout + * + * Parameters: + * fmTimeout [IN] + * The FM timeout value for the device, in milliseconds + */ +typedef struct nvswitch_set_fm_heartbeat_timeout +{ + NvU32 fmTimeout; +} NVSWITCH_SET_FM_HEARTBEAT_TIMEOUT_PARAMS; +#define NVSWITCH_DEFAULT_FM_HEARTBEAT_TIMEOUT_MSEC (10*1000) + +/* + * CTRL_NVSWITCH_SET_LINK_ERROR_STATE_INFO + * + * Control to set bitmask info of the + * link training error + * + * Parameters: + * attemptedTrainingMask0 [IN] + * Bitmask of links that have been + * attempted to train. + * trainingErrorMask0 [IN] + * Bitmaks of links that have an error + * during training. + */ +typedef struct nvswitch_set_training_error_info +{ + NvU64 attemptedTrainingMask0; + NvU64 trainingErrorMask0; +} NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS; + +#define NVSWITCH_DEVICE_EVENT_FATAL 0 +#define NVSWITCH_DEVICE_EVENT_NONFATAL 1 +#define NVSWITCH_DEVICE_EVENT_PORT_UP 2 +#define NVSWITCH_DEVICE_EVENT_PORT_DOWN 3 +#define NVSWITCH_DEVICE_EVENT_COUNT 4 +#define NVSWITCH_REGISTER_EVENTS_MAX_EVENT_IDS (500) + +/* + * CTRL_NVSWITCH_REGISTER_EVENTS + * + * Control to register event IDs with an OS descriptor + * + * This control allows for clients to register one or more event IDs + * with an OS descriptor. After registering event IDs, clients may poll + * the OS descriptor for the registered event. + * + * Subsequent calls to register_event will overwrite currently registered + * event IDs. This allows the client to switch event polling as and when required. + * Explicit unregister_events control call isn't necessary when the + * client wishes to change the event types currently being monitored. + * + * On Linux, only a single event ID can be registered to each + * OS descriptor at a time. Calling this control with + * numEvents > 1 on Linux will cause an error to be returned. + * + * On Windows, the osDescriptor field should be a valid + * Windows EVENT handle. + * + * osDescriptor is unused on other operating systems. + * + * Parameters: + * eventIds [IN] + * A buffer of event IDs to register for + * numEvents [IN] + * Number of valid elements in eventIds + * osDescriptor [IN] + * OS event handle (Windows only) + */ +typedef struct nvswitch_register_events +{ + NvU32 eventIds[NVSWITCH_REGISTER_EVENTS_MAX_EVENT_IDS]; + NvU32 numEvents; + void *osDescriptor; +} NVSWITCH_REGISTER_EVENTS_PARAMS; + +/* + * CTRL_NVSWITCH_UNREGISTER_EVENTS + * + * Control to unregister all event IDs from an OS descriptor + * + * This control unregisters all registered event IDs associated + * with an OS descriptor. + * + * On Windows, the osDescriptor field should be a valid + * Windows EVENT handle. + * + * osDescriptor is unused on other operating systems. + * + * Parameters: + * osDescriptor [IN] + * OS event handle (Windows only) + */ +typedef struct nvswitch_unregister_events +{ + void *osDescriptor; +} NVSWITCH_UNREGISTER_EVENTS_PARAMS; + +/* + * CTRL_NVSWITCH_GET_FATAL_ERROR_SCOPE + * + * Control to query if a fatal error is occurred on a port or device + * + * Parameters: + * device [OUT] + * Set to NV_TRUE if the nvswitch device encountered a fatal error + * port [OUT] + * An array of booleans indicating which ports + * encountered a fatal error + */ +typedef struct nvswitch_get_fatal_error_scope_params +{ + NvBool device; + NvBool port[NVSWITCH_MAX_PORTS]; +} NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS; + +#define NVSWITCH_I2C_SMBUS_CMD_QUICK 0 +#define NVSWITCH_I2C_SMBUS_CMD_BYTE 1 +#define NVSWITCH_I2C_SMBUS_CMD_BYTE_DATA 2 +#define NVSWITCH_I2C_SMBUS_CMD_WORD_DATA 3 + +/* + * NVSWITCH_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW + * + * This structure provides data for the SMBUS Byte command. + * + * message [IN/OUT] + * 8 Bit data message to read or write. + */ +typedef struct +{ + NvU8 message; +} NVSWITCH_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW; + +/* + * NVSWITCH_I2C_TRANSACTION_DATA_SMBUS_BYTE_DATA_RW + * + * This structure provides data for the SMBUS Byte Data command. + * + * cmd [IN] + * SMBUS input command. + * message [IN/OUT] + * 8 Bit data message to read or write. + */ +typedef struct +{ + NvU8 cmd; + NvU8 message; +} NVSWITCH_I2C_TRANSACTION_DATA_SMBUS_BYTE_DATA_RW; + +/* + * NVSWITCH_I2C_TRANSACTION_DATA_SMBUS_WORD_DATA_RW + * + * This structure provides data for the SMBUS Word Data command. + * + * cmd [IN] + * SMBUS input command. + * message [IN/OUT] + * 16 Bit data message to read or write. + */ +typedef struct +{ + NvU8 cmd; + NvU16 message; +} NVSWITCH_I2C_TRANSACTION_DATA_SMBUS_WORD_DATA_RW; + +typedef union +{ + NVSWITCH_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW smbusByte; + NVSWITCH_I2C_TRANSACTION_DATA_SMBUS_BYTE_DATA_RW smbusByteData; + NVSWITCH_I2C_TRANSACTION_DATA_SMBUS_WORD_DATA_RW smbusWordData; +} NVSWITCH_I2C_TRANSACTION_DATA; + +/* + * CTRL_NVSWITCH_I2C_SMBUS_COMMAND + * + * Control to issue SMBUS I2C transaction to an I2C device + * + * Parameters: + * deviceAddr [IN] + * The I2C Slave address to issue a transaction to. This is the unshifted, + * normal 7-bit address. For example, the input would be address 0x50 for + * device 0xA0. + * port [IN] + * The logical port/bus in which the I2C transaction is requested. + * cmdType [IN] + * The desired SMBUS command type. See NVSWITCH_I2C_SMBUS_CMD_*. + * bRead [IN] + * This field must be specified to indicate whether the + * command is a write (FALSE) or a read (TRUE). + * transactionData [IN/OUT] + * The NVSWITCH_I2C_TRANSACTION_DATA union to be filled out/read back + * depending on the SMBUS command type. + */ +typedef struct nvswitch_i2c_smbus_command_params +{ + NvU16 deviceAddr; + NvU32 port; + NvU8 cmdType; + NvBool bRead; + NVSWITCH_I2C_TRANSACTION_DATA transactionData; +} NVSWITCH_I2C_SMBUS_COMMAND_PARAMS; + +/* + * APIs for getting NVLink counters + */ + +// These are the bitmask definitions for different counter types + +#define NVSWITCH_NVLINK_COUNTER_INVALID 0x00000000 + +#define NVSWITCH_NVLINK_COUNTER_TL_TX0 0x00000001 +#define NVSWITCH_NVLINK_COUNTER_TL_TX1 0x00000002 +#define NVSWITCH_NVLINK_COUNTER_TL_RX0 0x00000004 +#define NVSWITCH_NVLINK_COUNTER_TL_RX1 0x00000008 + +#define NVSWITCH_NVLINK_LP_COUNTERS_DL 0x00000010 + +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT 0x00010000 + +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(i) (1 << (i + 17)) +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE__SIZE 8 +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 0x00020000 +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 0x00040000 +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 0x00080000 +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 0x00100000 +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 0x00200000 +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 0x00400000 +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 0x00800000 +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 0x01000000 + +#define NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY 0x02000000 +#define NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY 0x04000000 + +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY 0x08000000 + +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_MASKED 0x10000000 + +#define NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_ECC_COUNTS 0x20000000 + +#define NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS 0x40000000 +#define NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL 0x80000000 + +/* + * Note that COUNTER_MAX_TYPES will need to be updated each time adding + * a new counter type exceeds the existing value. + * + */ +#define NVSWITCH_NVLINK_COUNTER_MAX_TYPES 32 + +/* + * CTRL_NVSWITCH_GET_COUNTERS + * This command gets the counts for different counter types. + * + * [in] linkId + * This parameter specifies the TL link id/no for which we want to get + * counters for. + * + * [in] counterMask + * This parameter specifies the input mask for desired counter types. + * + * [out] bTx0TlCounterOverflow + * This boolean is set to NV_TRUE if TX Counter 0 has rolled over. + * + * [out] bTx1TlCounterOverflow + * This boolean is set to NV_TRUE if TX Counter 1 has rolled over. + * + * [out] bRx0TlCounterOverflow + * This boolean is set to NV_TRUE if RX Counter 0 has rolled over. + * + * [out] bRx1TlCounterOverflow + * This boolean is set to NV_TRUE if RX Counter 1 has rolled over. + * + * [out] nvlinkCounters + * This array contains the error counts for each error type as requested from + * the counterMask. The array indexes correspond to the mask bits one-to-one. + */ + +typedef struct +{ + NvU8 linkId; + NvU32 counterMask; + NvBool bTx0TlCounterOverflow; + NvBool bTx1TlCounterOverflow; + NvBool bRx0TlCounterOverflow; + NvBool bRx1TlCounterOverflow; + NV_DECLARE_ALIGNED(NvU64 nvlinkCounters[NVSWITCH_NVLINK_COUNTER_MAX_TYPES], 8); +} NVSWITCH_NVLINK_GET_COUNTERS_PARAMS; + +/* + * Structure to store the ECC error data. + * valid + * Is the lane valid or not + * eccErrorValue + * Value of the Error. + * overflowed + * If the error overflowed or not + */ +typedef struct +{ + NvBool valid; + NvU32 eccErrorValue; + NvBool overflowed; +} NVSWITCH_LANE_ERROR; + +/* + * Structure to store ECC error data for Links + * errorLane array index corresponds to the lane number. + * + * errorLane[] + * Stores the ECC error data per lane. + */ +typedef struct +{ + NVSWITCH_LANE_ERROR errorLane[NVSWITCH_NVLINK_MAX_LANES]; + NvU32 eccDecFailed; + NvBool eccDecFailedOverflowed; +} NVSWITCH_LINK_ECC_ERROR; + +/* + * CTRL_GET_NVLINK_ECC_ERRORS + * + * Control to get the values of ECC ERRORS + * + * Parameters: + * linkMask [IN] + * Links on which the ECC error data requested + * A valid link/port mask returned by the port masks returned by + * NVSWITCH_GET_INFO + * errorLink[] [OUT] + * Stores the ECC error related information for each link. + * errorLink array index corresponds to the link Number. + */ + +typedef struct nvswitch_get_nvlink_ecc_errors +{ + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NVSWITCH_LINK_ECC_ERROR errorLink[NVSWITCH_NVLINK_MAX_LINKS]; +} NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS; + +#define NVSWITCH_NVLINK_MAX_CORRECTABLE_ERROR_DAYS 5 +#define NVSWITCH_NVLINK_MAX_CORRECTABLE_ERROR_MONTHS 3 + +typedef struct +{ + NvU32 lastUpdated; + NvU32 flitCrcErrorsPerMinute; + NvU32 laneCrcErrorsPerMinute[NVSWITCH_NVLINK_MAX_LANES]; +} NVSWITCH_NVLINK_CORRECTABLE_ERROR_RATES; + +#define CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_COUNT_TX_NVHS 0 +#define CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_COUNT_TX_RESERVED 1 +#define CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_COUNT_TX_OTHER 2 +#define CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_NUM_TX_LP_ENTER 3 +#define CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_NUM_TX_LP_EXIT 4 +#define CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_COUNT_TX_SLEEP 5 +#define CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_MAX_COUNTERS 6 +/* + * CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS + * + * Reads NVLINK low power counters for given linkId + * + * Parameters: + * linkId [IN] + * ID of the link to be queried + * counterValidMask [IN,OUT] + * Mask of valid counters + * counterValues [OUT] + * Low power counter values returned + */ +typedef struct nvswitch_get_nvlink_lp_counters_params +{ + NvU32 linkId; + NvU32 counterValidMask; + NvU32 counterValues[CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS_MAX_COUNTERS]; +} NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS; + +/* + * CTRL_NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES + * + * This command queries recent correctable error rates for the given link. + * + * The error rates specify the maximum number of errors per minute recorded + * for the given link within a 24-hour period for daily maximums or a 30-day + * period for monthly maximums. + * + * Parameters: + * linkId [in] + * NVLink link ID + * dailyMaxCorrectableErrorRates[] [OUT] + * NVLink daily max correctable error rate array + * monthlyMaxCorrectableErrorRates[] [OUT] + * NVLink monthly max correctable error rate array + */ + +typedef struct +{ + NvU8 linkId; + NVSWITCH_NVLINK_CORRECTABLE_ERROR_RATES dailyMaxCorrectableErrorRates[NVSWITCH_NVLINK_MAX_CORRECTABLE_ERROR_DAYS]; + NVSWITCH_NVLINK_CORRECTABLE_ERROR_RATES monthlyMaxCorrectableErrorRates[NVSWITCH_NVLINK_MAX_CORRECTABLE_ERROR_MONTHS]; +} NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS; + +#define NVSWITCH_NVLINK_ERROR_READ_SIZE 128 //could not read the maximum size (721) of entries in one call + +typedef enum +{ + NVSWITCH_NVLINK_NO_ERROR = 0, + + //DL RX Fatal Counts + NVSWITCH_NVLINK_ERR_DL_RX_FAULT_DL_PROTOCOL_FATAL = 1000, + NVSWITCH_NVLINK_ERR_DL_RX_FAULT_SUBLINK_CHANGE_FATAL, + + //DL RX Correctable Accumulated Counts + NVSWITCH_NVLINK_ERR_DL_RX_FLIT_CRC_CORR, + NVSWITCH_NVLINK_ERR_DL_RX_LANE0_CRC_CORR, + NVSWITCH_NVLINK_ERR_DL_RX_LANE1_CRC_CORR, + NVSWITCH_NVLINK_ERR_DL_RX_LANE2_CRC_CORR, + NVSWITCH_NVLINK_ERR_DL_RX_LANE3_CRC_CORR, + NVSWITCH_NVLINK_ERR_DL_RX_LINK_REPLAY_EVENTS_CORR, + + //DL TX Fatal Counts + NVSWITCH_NVLINK_ERR_DL_TX_FAULT_RAM_FATAL, + NVSWITCH_NVLINK_ERR_DL_TX_FAULT_INTERFACE_FATAL, + NVSWITCH_NVLINK_ERR_DL_TX_FAULT_SUBLINK_CHANGE_FATAL, + + //DL TX Correctable Accumulated Counts + NVSWITCH_NVLINK_ERR_DL_TX_LINK_REPLAY_EVENTS_CORR, + + //DL NA Fatal Counts + NVSWITCH_NVLINK_ERR_DL_LTSSM_FAULT_UP_FATAL, + NVSWITCH_NVLINK_ERR_DL_LTSSM_FAULT_DOWN_FATAL, + + //DL NA Correctable Accumulated Counts + NVSWITCH_NVLINK_ERR_DL_LINK_RECOVERY_EVENTS_CORR, + + //TLC RX Fatal Counts + NVSWITCH_NVLINK_ERR_TLC_RX_DL_HDR_PARITY_ERR_FATAL = 2000, + NVSWITCH_NVLINK_ERR_TLC_RX_DL_DATA_PARITY_ERR_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_DL_CTRL_PARITY_ERR_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_INVALID_AE_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_INVALID_BE_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_INVALID_ADDR_ALIGN_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_PKTLEN_ERR_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_RSVD_PACKET_STATUS_ERR_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_RSVD_CACHE_ATTR_PROBE_REQ_ERR_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_RSVD_CACHE_ATTR_PROBE_RSP_ERR_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_DATLEN_GT_RMW_REQ_MAX_ERR_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_DATLEN_LT_ATR_RSP_MIN_ERR_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_INVALID_CR_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_INVALID_COLLAPSED_RESPONSE_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_HDR_OVERFLOW_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_DATA_OVERFLOW_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_STOMP_DETECTED_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_RSVD_CMD_ENC_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_RSVD_DAT_LEN_ENC_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_INVALID_PO_FOR_CACHE_ATTR_FATAL, + + //TLC RX Non-Fatal Counts + NVSWITCH_NVLINK_ERR_TLC_RX_RSP_STATUS_HW_ERR_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_RSP_STATUS_UR_ERR_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_RSP_STATUS_PRIV_ERR_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_POISON_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_AN1_HEARTBEAT_TIMEOUT_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_ILLEGAL_PRI_WRITE_NONFATAL, + + //TLC RX Fatal Counts addendum + NVSWITCH_NVLINK_ERR_TLC_RX_HDR_RAM_ECC_DBE_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_DAT0_RAM_ECC_DBE_FATAL, + NVSWITCH_NVLINK_ERR_TLC_RX_DAT1_RAM_ECC_DBE_FATAL, + + //TLC TX Fatal Counts + NVSWITCH_NVLINK_ERR_TLC_TX_DL_CREDIT_PARITY_ERR_FATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_NCISOC_HDR_ECC_DBE_FATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_NCISOC_PARITY_ERR_FATAL, + + //TLC TX Non-Fatal Counts + NVSWITCH_NVLINK_ERR_TLC_TX_ILLEGAL_PRI_WRITE_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_AN1_TIMEOUT_VC0_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_AN1_TIMEOUT_VC1_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_AN1_TIMEOUT_VC2_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_AN1_TIMEOUT_VC3_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_AN1_TIMEOUT_VC4_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_AN1_TIMEOUT_VC5_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_AN1_TIMEOUT_VC6_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_AN1_TIMEOUT_VC7_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_POISON_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_RSP_STATUS_HW_ERR_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_RSP_STATUS_UR_ERR_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_RSP_STATUS_PRIV_ERR_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_CREQ_DAT_RAM_ECC_DBE_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_RSP_DAT_RAM_ECC_DBE_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_COM_DAT_RAM_ECC_DBE_NONFATAL, + NVSWITCH_NVLINK_ERR_TLC_TX_RSP1_DAT_RAM_ECC_DBE_FATAL, + + //NVLIPT Fatal Counts + NVSWITCH_NVLINK_ERR_NVLIPT_SLEEP_WHILE_ACTIVE_LINK_FATAL = 3000, + NVSWITCH_NVLINK_ERR_NVLIPT_RSTSEQ_PHYCTL_TIMEOUT_FATAL, + NVSWITCH_NVLINK_ERR_NVLIPT_RSTSEQ_CLKCTL_TIMEOUT_FATAL, + NVSWITCH_NVLINK_ERR_NVLIPT_CLKCTL_ILLEGAL_REQUEST_FATAL, + NVSWITCH_NVLINK_ERR_NVLIPT_RSTSEQ_PLL_TIMEOUT_FATAL, + NVSWITCH_NVLINK_ERR_NVLIPT_RSTSEQ_PHYARB_TIMEOUT_FATAL, + + //NVLIPT Non-Fatal Counts + NVSWITCH_NVLINK_ERR_NVLIPT_ILLEGAL_LINK_STATE_REQUEST_NONFATAL, + NVSWITCH_NVLINK_ERR_NVLIPT_FAILED_MINION_REQUEST_NONFATAL, + NVSWITCH_NVLINK_ERR_NVLIPT_RESERVED_REQUEST_VALUE_NONFATAL, + NVSWITCH_NVLINK_ERR_NVLIPT_LINK_STATE_WRITE_WHILE_BUSY_NONFATAL, + NVSWITCH_NVLINK_ERR_NVLIPT_WRITE_TO_LOCKED_SYSTEM_REG_NONFATAL, + NVSWITCH_NVLINK_ERR_NVLIPT_LINK_STATE_REQUEST_TIMEOUT_NONFATAL, +} NVSWITCH_NVLINK_ERROR_TYPE; + +typedef struct +{ + NvU8 instance; + NvU32 error; //NVSWITCH_NVLINK_ERROR_TYPE + NvU32 timeStamp; + NvU64 count; +} NVSWITCH_NVLINK_ERROR_ENTRY; + +/* + * CTRL_NVSWITCH_GET_NVLINK_ERROR_COUNTS + * + * Control to get the NVLINK errors from inforom cache + * + * Parameters: + * errorIndex [IN/OUT] + * On input: The index of the first NVLink error to retrieve from inforom cache + * On output: The index of the first error to retrieve after the previous call. + * errorCount [OUT] + * Number of errors returned by the call. Currently, errorCount is limited + * by NVSWITCH_NVLINK_ERROR_READ_SIZE. In order to query all the errors, a + * client needs to keep calling the control till errorCount is zero. + * errorLog[] [OUT] + * NVLINK error array + */ + +typedef struct +{ + NvU32 errorIndex; + NvU32 errorCount; + NVSWITCH_NVLINK_ERROR_ENTRY errorLog[NVSWITCH_NVLINK_ERROR_READ_SIZE]; +} NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS; + +#define NVSWITCH_ECC_ERRORS_MAX_READ_COUNT 128 + +typedef struct +{ + NvU32 sxid; + NvU8 linkId; + NvU32 lastErrorTimestamp; + NvBool bAddressValid; + NvU32 address; + NvU32 correctedCount; + NvU32 uncorrectedCount; +} NVSWITCH_ECC_ERROR_ENTRY; + +/* + * CTRL_NVSWITCH_GET_ECC_ERROR_COUNTS + * + * Control to get the ECC error counts and logs from inforom + * + * Parameters: + * uncorrectedTotal [out] + * uncorrected ECC errors count + * correctedTotal [out] + * corrected ECC errors count + * errorCount [out] + * recorded error log count in the array + * errorLog[] [OUT] + * ECC errors array + */ + +typedef struct +{ + NvU64 uncorrectedTotal; + NvU64 correctedTotal; + NvU32 errorCount; + NVSWITCH_ECC_ERROR_ENTRY errorLog[NVSWITCH_ECC_ERRORS_MAX_READ_COUNT]; +} NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS; + +#define NVSWITCH_SXID_ENTRIES_NUM 10 + +typedef struct +{ + NvU32 sxid; + NvU32 timestamp; +} NVSWITCH_SXID_ENTRY; + +/* + * CTRL_NVSWITCH_GET_SXIDS + * + * Control to get the NVSwitch SXID errors from inforom cache + * + * Parameters: + * sxidCount [OUT] + * The total SXID error number + * sxidFirst [OUT] + * The array of the first NVSWITCH_SXID_ENTRIES_NUM (10) SXIDs + * sxidLast [OUT] + * The array of the last NVSWITCH_SXID_ENTRIES_NUM (10) SXIDs + */ + +typedef struct +{ + NvU32 sxidCount; + NVSWITCH_SXID_ENTRY sxidFirst[NVSWITCH_SXID_ENTRIES_NUM]; + NVSWITCH_SXID_ENTRY sxidLast[NVSWITCH_SXID_ENTRIES_NUM]; +} NVSWITCH_GET_SXIDS_PARAMS; + +/* + * CTRL_NVSWITCH_GET_FOM_VALUES + * This command gives the FOM values to MODS + * + * [in] linkId + * Link number on which the FOM values are requested + * [out] numLanes + * This field specifies the no. of lanes per link + * [out] figureOfMetritValues + * This field contains the FOM values per lane + */ + +typedef struct nvswitch_get_fom_values_params +{ + NvU32 linkId; + NvU8 numLanes; + NvU16 figureOfMeritValues[NVSWITCH_NVLINK_MAX_LANES]; +} NVSWITCH_GET_FOM_VALUES_PARAMS; + +/* + * CTRL_NVSWITCH_SET_RESIDENCY_BINS + * + * Control for setting residency bins. + * + * Parameters: + * [in] table_select + * Which table to return. + * [in] NVSWITCH_RESIDENCY_BIN + * Residency thresholds. The thresholds would be only applied to the + * enabled ports. + * NVSWITCH_GET_INFO can be used to query enabled ports. + */ + +typedef struct nvswitch_residency_bin +{ + NvU32 lowThreshold; /* in nsec */ + NvU32 hiThreshold; /* in nsec */ + +} NVSWITCH_RESIDENCY_THRESHOLDS; + +#define NVSWITCH_TABLE_SELECT_MULTICAST 0 +#define NVSWITCH_TABLE_SELECT_REDUCTION 1 + +typedef struct nvswitch_set_residency_bins +{ + NvU32 table_select; // NVSWITCH_TABLE_SELECT_MULTICAST/_REDUCTION + NVSWITCH_RESIDENCY_THRESHOLDS bin; + +} NVSWITCH_SET_RESIDENCY_BINS; + +/* + * CTRL_NVSWITCH_GET_RESIDENCY_BINS + * + * Control for querying multicast & reduction residency histogram. + * + * Parameters: + * [in] linkId + * Link number on which the residency histogram is requested + * [in] table_select + * Which table to return. + * + * [in] bin + * Residency thresholds. + * [out] residency + * Residency data/histogram format. The data will be available for the + * enabled/supported ports returned by NVSWITCH_GET_INFO. + */ + +typedef struct nvswitch_residency_bins +{ + NV_DECLARE_ALIGNED(NvU64 low, 8); + NV_DECLARE_ALIGNED(NvU64 medium, 8); + NV_DECLARE_ALIGNED(NvU64 high, 8); +} NVSWITCH_RESIDENCY_BINS; + +#define NVSWITCH_RESIDENCY_SIZE 128 + +typedef struct nvswitch_get_residency_bins +{ + NvU32 link; + NvU32 table_select; // NVSWITCH_TABLE_SELECT_MULTICAST/_REDUCTION + NVSWITCH_RESIDENCY_THRESHOLDS bin; + NVSWITCH_RESIDENCY_BINS residency[NVSWITCH_RESIDENCY_SIZE]; +} NVSWITCH_GET_RESIDENCY_BINS; + +/* + * CTRL_NVSWITCH_GET_RB_STALL_BUSY + * + * Control for querying reduction buffer stall/busy counters. + * + * Parameters: + * [in] linkId + * Link number on which the stall/busy counters are requested + * [in] table_select + * Which table to return. + * + * [out] stall_busy + * Reduction buffer stall/busy counters. The data will be available for the + * enabled/supported ports returned by NVSWITCH_GET_INFO. + */ + +typedef struct nvswitch_stall_busy +{ + NV_DECLARE_ALIGNED(NvU64 time, 8); // in ns + NV_DECLARE_ALIGNED(NvU64 stall, 8); + NV_DECLARE_ALIGNED(NvU64 busy, 8); +} NVSWITCH_STALL_BUSY; + +typedef struct nvswitch_get_rd_stall_busy +{ + NvU32 link; + NvU32 table_select; // NVSWITCH_TABLE_SELECT_MULTICAST/_REDUCTION + NVSWITCH_STALL_BUSY vc0; + NVSWITCH_STALL_BUSY vc1; +} NVSWITCH_GET_RB_STALL_BUSY; + +#define NVSWITCH_GET_SW_INFO_COUNT_MAX 32 + +typedef enum nvswitch_get_sw_info_index +{ + NVSWITCH_GET_SW_INFO_INDEX_INFOROM_NVL_SUPPORTED = 0x0, + NVSWITCH_GET_SW_INFO_INDEX_INFOROM_BBX_SUPPORTED +} NVSWITCH_GET_SW_INFO_INDEX; + +typedef struct nvswitch_get_sw_info_params +{ + NvU32 count; + NvU32 index[NVSWITCH_GET_SW_INFO_COUNT_MAX]; + NvU32 info[NVSWITCH_GET_SW_INFO_COUNT_MAX]; +} NVSWITCH_GET_SW_INFO_PARAMS; + +/* + * NVSWITCH_CTRL_I2C_DEVICE_INFO + * + * This structure describes the basic I2C Device information. + * + * type + * This field return the type of device NVSWITCH_I2C_DEVICE_ + * i2cAddress + * This field contains the 7 bit/10 bit address of the I2C device. + * i2cLogicalPort + * This field contains the Logical port of the I2C device. + */ + +typedef enum +{ + NVSWITCH_I2C_PORT_I2CA = 0, + NVSWITCH_I2C_PORT_I2CB, + NVSWITCH_I2C_PORT_I2CC, + NVSWITCH_I2C_PORT_I2CD +} NVSWITCH_I2C_PORT_TYPE; + +typedef enum +{ + NVSWITCH_I2C_DEVICE_UNKNOWN = 0, + + NVSWITCH_I2C_DEVICE_SKIP = 0xFF + +} NVSWITCH_I2C_DEVICE_TYPE; + +typedef struct +{ + NVSWITCH_I2C_DEVICE_TYPE type; + NvU32 i2cAddress; + NVSWITCH_I2C_PORT_TYPE i2cPortLogical; +} NVSWITCH_CTRL_I2C_DEVICE_INFO; + +/* Maximum number of I2C devices in DCB */ +#define NVSWITCH_CTRL_I2C_MAX_DEVICES 32 + +/* + * CTRL_NVSWITCH_I2C_TABLE_GET_DEV_INFO + * + * RM Control to get I2C device info from the DCB I2C Devices Table. + * + * i2cDevCount + * The value of this parameter will give the number of valid + * I2C devices returned in structure. + * + * i2cDevInfo[] + * For each device the control call will report the device info + * + */ +typedef struct +{ + NvU8 i2cDevCount; + NVSWITCH_CTRL_I2C_DEVICE_INFO i2cDevInfo[NVSWITCH_CTRL_I2C_MAX_DEVICES]; +} NVSWITCH_CTRL_I2C_GET_DEV_INFO_PARAMS; + +//! Maximum size of index. +#define NVSWITCH_CTRL_I2C_INDEX_LENGTH_MAX 4 + +/*! Set if the command should begin with a START. For a transactional + * interface (highly recommended), this should always be _SEND. + */ +#define NVSWITCH_CTRL_I2C_FLAGS_START 0:0 +#define NVSWITCH_CTRL_I2C_FLAGS_START_NONE 0 +#define NVSWITCH_CTRL_I2C_FLAGS_START_SEND 1 + +/*! + * Indicate whether to send a repeated start between the index and + * message phrases. + * + * This flag will send a restart between each index and message. This should + * be set for reads, but rarely (if ever) for writes. + * + * A RESTART is required when switching directions; this is called a combined + * format. These are typically used in indexed read commands, where an index + * is written to the device to indicate what register(s) to read, and then + * the register is read. Almost always, indexed writes do not require a + * restart, though some devices will accept them. However, this flag should + * be used for writes in the rare case where a restart should be sent between + * the last index and the message. + */ +#define NVSWITCH_CTRL_I2C_FLAGS_RESTART 1:1 +#define NVSWITCH_CTRL_I2C_FLAGS_RESTART_NONE 0 +#define NVSWITCH_CTRL_I2C_FLAGS_RESTART_SEND 1 + +/*! Set if the command should conclude with a STOP. For a transactional + * interface (highly recommended), this should always be _SEND. + */ +#define NVSWITCH_CTRL_I2C_FLAGS_STOP 2:2 +#define NVSWITCH_CTRL_I2C_FLAGS_STOP_NONE 0 +#define NVSWITCH_CTRL_I2C_FLAGS_STOP_SEND 1 + +/*! The slave addressing mode: 7-bit (most common) or 10-bit. It is possible + * but not recommended to send no address at all using _NONE. + */ +#define NVSWITCH_CTRL_I2C_FLAGS_ADDRESS_MODE 4:3 +#define NVSWITCH_CTRL_I2C_FLAGS_ADDRESS_MODE_NO_ADDRESS 0 +#define NVSWITCH_CTRL_I2C_FLAGS_ADDRESS_MODE_7BIT 1 +#define NVSWITCH_CTRL_I2C_FLAGS_ADDRESS_MODE_10BIT 2 + +//! The length of the index. If length is 0, no index will be sent. +#define NVSWITCH_CTRL_I2C_FLAGS_INDEX_LENGTH 7:5 +#define NVSWITCH_CTRL_I2C_FLAGS_INDEX_LENGTH_ZERO 0 +#define NVSWITCH_CTRL_I2C_FLAGS_INDEX_LENGTH_ONE 1 +#define NVSWITCH_CTRL_I2C_FLAGS_INDEX_LENGTH_TWO 2 +#define NVSWITCH_CTRL_I2C_FLAGS_INDEX_LENGTH_THREE 3 +#define NVSWITCH_CTRL_I2C_FLAGS_INDEX_LENGTH_MAXIMUM NVSWITCH_CTRL_I2C_INDEX_LENGTH_MAX + +/*! The flavor to use: software bit-bang or hardware controller. The hardware + * controller is faster, but is not necessarily available or capable. + */ +#define NVSWITCH_CTRL_I2C_FLAGS_FLAVOR 8:8 +#define NVSWITCH_CTRL_I2C_FLAGS_FLAVOR_HW 0 +#define NVSWITCH_CTRL_I2C_FLAGS_FLAVOR_SW 1 + +/*! The target speed at which to drive the transaction at. + * + * Note: The lib reserves the right to lower the speed mode if the I2C master + * implementation cannot handle the speed given. + */ +#define NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE 11:9 +#define NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_DEFAULT 0x00000000 +#define NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_100KHZ 0x00000003 +#define NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_200KHZ 0x00000004 +#define NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_300KHZ 0x00000005 +#define NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_400KHZ 0x00000006 +#define NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_1000KHZ 0x00000007 + +/* + * NVSWITCH_CTRL_I2C_FLAGS_TRANSACTION_MODE + * A client uses this field to specify a transaction mode. + * Possible values are: + * NVSWITCH_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL + * The default, this value indicates to use the normal I2C transaction + * mode which will involve read/write operations depending on client's + * needs. + * NVSWITCH_CTRL_I2C_FLAGS_TRANSACTION_MODE_PING + * This value specifies that the device only needs to be pinged. No need + * of performing a complete read/write transaction. This will address + * the device to be pinged but not send any data. On receiving an ACK, + * we will get a confirmation on the device's availability. + * PING requires that: + * _START = _SEND + * _RESTART = _NONE + * _STOP = _SEND + * _ADDRESS_MODE != _NO_ADDRESS + * _INDEX_LENGTH = _ZERO + * messageLength = 0 + */ +#define NVSWITCH_CTRL_I2C_FLAGS_TRANSACTION_MODE 12:12 +#define NVSWITCH_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL (0x00000000) +#define NVSWITCH_CTRL_I2C_FLAGS_TRANSACTION_MODE_PING (0x00000001) + +/*! + * Block Reads/Writes: There are two different protocols for reading/writing >2 + * byte sets of data to/from a slave device. The SMBus specification section + * 5.5.7 defines "Block Reads/Writes" in which the first byte of the payload + * specifies the size of the data to be read/written s.t. payload_size = + * data_size + 1. However, many other devices depend on the master to already + * know the size of the data being accessed (i.e. SW written with knowledge of + * the device's I2C register spec) and skip this overhead. This second behavior + * is actually the default behavior of all the lib's I2C interfaces. + * + * Setting this bit will enable the block protocol for reads and writes for size + * >2. + */ +#define NVSWITCH_CTRL_I2C_FLAGS_BLOCK_PROTOCOL 17:17 +#define NVSWITCH_CTRL_I2C_FLAGS_BLOCK_PROTOCOL_DISABLED 0x00000000 +#define NVSWITCH_CTRL_I2C_FLAGS_BLOCK_PROTOCOL_ENABLED 0x00000001 + +/*! + * NVSWITCH_CTRL_I2C_FLAGS_RESERVED + * A client must leave this field as 0, as it is reserved for future use. + */ +#define NVSWITCH_CTRL_I2C_FLAGS_RESERVED 31:18 + +#define NVSWITCH_CTRL_I2C_MESSAGE_LENGTH_MAX 256 + +/* + * CTRL_NVSWITCH_I2C_INDEXED + * + * Perform a basic I2C transaction synchronously. + * + * portId + * This field must be specified by the client to indicate the logical + * port/bus for which the transaction is requested. + * + * bIsRead + * This field must be specified by the client to indicate whether the + * command is a write (FALSE) or a read (TRUE). + * + * flags + * This parameter specifies optional flags used to control certain modal + * features such as target speed and addressing mode. The currently + * defined fields are described previously; see NVSWITCH_CTRL_I2C_FLAGS_*. + * + * acquirer + * The ID of the client that is trying to take control of the I2C module. + * + * address + * The address of the I2C slave. The address should be shifted left by + * one. For example, the I2C address 0x50, often used for reading EDIDs, + * would be stored here as 0xA0. This matches the position within the + * byte sent by the master, as the last bit is reserved to specify the + * read or write direction. + * + * index + * This parameter, required of the client if index is one or more, + * specifies the index to be written. The buffer should be arranged such + * that index[0] will be the first byte sent. + * + * messageLength + * This parameter, required of the client, specifies the number of bytes to + * read or write from the slave after the index is written. + * + * message + * This parameter, required of the client, specifies the data to be written + * to the slave. The buffer should be arranged such that message[0] will + * be the first byte read or written. If the transaction is a read, then + * it will follow the combined format described in the I2C specification. + * If the transaction is a write, the message will immediately follow the + * index without a restart. + * + */ +typedef struct +{ + NvU8 port; + NvU8 bIsRead; + NvU16 address; + NvU32 flags; + NvU32 acquirer; + + NvU8 index[NVSWITCH_CTRL_I2C_INDEX_LENGTH_MAX]; + + NvU32 messageLength; + NvU8 message[NVSWITCH_CTRL_I2C_MESSAGE_LENGTH_MAX]; +} NVSWITCH_CTRL_I2C_INDEXED_PARAMS; + + +/* + * CTRL call command list. + * + * Linux driver supports only 8-bit commands. + * + * See struct control call command modification guidelines at the top + * of this file. + */ +#define CTRL_NVSWITCH_GET_INFO 0x01 +#define CTRL_NVSWITCH_SET_SWITCH_PORT_CONFIG 0x02 +#define CTRL_NVSWITCH_SET_INGRESS_REQUEST_TABLE 0x03 +#define CTRL_NVSWITCH_SET_INGRESS_REQUEST_VALID 0x04 +#define CTRL_NVSWITCH_SET_INGRESS_RESPONSE_TABLE 0x05 +#define CTRL_NVSWITCH_SET_GANGED_LINK_TABLE 0x06 +#define CTRL_NVSWITCH_GET_INTERNAL_LATENCY 0x07 +#define CTRL_NVSWITCH_SET_LATENCY_BINS 0x08 +#define CTRL_NVSWITCH_GET_NVLIPT_COUNTERS 0x09 +#define CTRL_NVSWITCH_SET_NVLIPT_COUNTER_CONFIG 0x0A +#define CTRL_NVSWITCH_GET_NVLIPT_COUNTER_CONFIG 0x0B +#define CTRL_NVSWITCH_GET_ERRORS 0x0C +#define CTRL_NVSWITCH_SET_REMAP_POLICY 0x0D +#define CTRL_NVSWITCH_SET_ROUTING_ID 0x0E +#define CTRL_NVSWITCH_SET_ROUTING_LAN 0x0F +#define CTRL_NVSWITCH_GET_INGRESS_REQUEST_TABLE 0x10 +#define CTRL_NVSWITCH_GET_INGRESS_RESPONSE_TABLE 0x11 +#define CTRL_NVSWITCH_GET_INGRESS_REQLINKID 0x12 +#define CTRL_NVSWITCH_UNREGISTER_LINK 0x13 +#define CTRL_NVSWITCH_RESET_AND_DRAIN_LINKS 0x14 +#define CTRL_NVSWITCH_GET_ROUTING_LAN 0x15 +#define CTRL_NVSWITCH_SET_ROUTING_LAN_VALID 0x16 +#define CTRL_NVSWITCH_GET_NVLINK_STATUS 0x17 +#define CTRL_NVSWITCH_ACQUIRE_CAPABILITY 0x18 +#define CTRL_NVSWITCH_GET_ROUTING_ID 0x19 +#define CTRL_NVSWITCH_SET_ROUTING_ID_VALID 0x1A +#define CTRL_NVSWITCH_GET_TEMPERATURE 0x1B +#define CTRL_NVSWITCH_GET_REMAP_POLICY 0x1C +#define CTRL_NVSWITCH_SET_REMAP_POLICY_VALID 0x1D +#define CTRL_NVSWITCH_GET_THROUGHPUT_COUNTERS 0x1E +#define CTRL_NVSWITCH_GET_BIOS_INFO 0x1F +#define CTRL_NVSWITCH_BLACKLIST_DEVICE 0x20 +#define CTRL_NVSWITCH_SET_FM_DRIVER_STATE 0x21 +#define CTRL_NVSWITCH_SET_DEVICE_FABRIC_STATE 0x22 +#define CTRL_NVSWITCH_SET_FM_HEARTBEAT_TIMEOUT 0x23 +#define CTRL_NVSWITCH_REGISTER_EVENTS 0x24 +#define CTRL_NVSWITCH_UNREGISTER_EVENTS 0x25 +#define CTRL_NVSWITCH_SET_TRAINING_ERROR_INFO 0x26 +#define CTRL_NVSWITCH_GET_FATAL_ERROR_SCOPE 0x27 +#define CTRL_NVSWITCH_GET_COUNTERS 0x2A +#define CTRL_NVSWITCH_GET_NVLINK_ECC_ERRORS 0x2B +#define CTRL_NVSWITCH_I2C_SMBUS_COMMAND 0x2C +#define CTRL_NVSWITCH_GET_TEMPERATURE_LIMIT 0x2D +#define CTRL_NVSWITCH_GET_NVLINK_MAX_ERROR_RATES 0x2E +#define CTRL_NVSWITCH_GET_NVLINK_ERROR_COUNTS 0x2F +#define CTRL_NVSWITCH_GET_ECC_ERROR_COUNTS 0x30 +#define CTRL_NVSWITCH_GET_SXIDS 0x31 +#define CTRL_NVSWITCH_GET_FOM_VALUES 0x32 +#define CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS 0x33 +#define CTRL_NVSWITCH_SET_RESIDENCY_BINS 0x34 +#define CTRL_NVSWITCH_GET_RESIDENCY_BINS 0x35 +#define CTRL_NVSWITCH_GET_RB_STALL_BUSY 0x36 +#define CTRL_NVSWITCH_RESERVED_0 0x37 +#define CTRL_NVSWITCH_RESERVED_1 0x38 +#define CTRL_NVSWITCH_RESERVED_2 0x39 +#define CTRL_NVSWITCH_RESERVED_3 0x3A +#define CTRL_NVSWITCH_RESERVED_4 0x3B +#define CTRL_NVSWITCH_RESERVED_5 0x3C +#define CTRL_NVSWITCH_GET_SW_INFO 0x47 +#define CTRL_NVSWITCH_RESERVED_6 0x48 +#define CTRL_NVSWITCH_RESERVED_7 0x49 +/* + * DO NOT ADD CODE AFTER THIS LINE. + * If the command hits 0xA0, see ctrl_dev_internal_nvswitch.h to adjust the internal range. + */ + +#ifdef __cplusplus +} +#endif + +#endif // _CTRL_DEVICE_NVSWITCH_H_ diff --git a/src/common/nvswitch/interface/export_nvswitch.h b/src/common/nvswitch/interface/export_nvswitch.h new file mode 100644 index 000000000..77d41bd51 --- /dev/null +++ b/src/common/nvswitch/interface/export_nvswitch.h @@ -0,0 +1,979 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVSWITCH_EXPORT_H_ +#define _NVSWITCH_EXPORT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nv_stdarg.h" +#include "nvlink_common.h" +#include "ioctl_common_nvswitch.h" + +#define NVSWITCH_DRIVER_NAME "nvidia-nvswitch" + +#define NVSWITCH_MAX_BARS 1 + +#define NVSWITCH_DEVICE_INSTANCE_MAX 64 + +#define PCI_CLASS_BRIDGE_NVSWITCH 0x0680 + +#ifndef PCI_VENDOR_ID_NVIDIA +#define PCI_VENDOR_ID_NVIDIA 0x10DE +#endif + +#define PCI_ADDR_OFFSET_VENDOR 0 +#define PCI_ADDR_OFFSET_DEVID 2 + +#define NVSWITCH_NSEC_PER_SEC 1000000000ULL + +#define NVSWITCH_DBG_LEVEL_MMIO 0x0 +#define NVSWITCH_DBG_LEVEL_INFO 0x1 +#define NVSWITCH_DBG_LEVEL_SETUP 0x2 +#define NVSWITCH_DBG_LEVEL_WARN 0x3 +#define NVSWITCH_DBG_LEVEL_ERROR 0x4 + +#define NVSWITCH_LOG_BUFFER_SIZE 512 + +#define NVSWITCH_DMA_DIR_TO_SYSMEM 0 +#define NVSWITCH_DMA_DIR_FROM_SYSMEM 1 +#define NVSWITCH_DMA_DIR_BIDIRECTIONAL 2 + +#define NVSWITCH_I2C_CMD_READ 0 +#define NVSWITCH_I2C_CMD_WRITE 1 +#define NVSWITCH_I2C_CMD_SMBUS_READ 2 +#define NVSWITCH_I2C_CMD_SMBUS_WRITE 3 +#define NVSWITCH_I2C_CMD_SMBUS_QUICK_READ 4 +#define NVSWITCH_I2C_CMD_SMBUS_QUICK_WRITE 5 + +typedef struct nvswitch_device nvswitch_device; +typedef struct NVSWITCH_CLIENT_EVENT NVSWITCH_CLIENT_EVENT; + +/* + * @Brief : The interface will check if the client's version is supported by the + * driver. + * + * @param[in] user_version Version of the interface that the client is + * compiled with. + * @param[out] kernel_version Version of the interface that the kernel driver + * is compiled with. This information will be + * filled even if the CTRL call returns + * -NVL_ERR_NOT_SUPPORTED due to version mismatch. + * @param[in] length Version string buffer length + * + * @returns NVL_SUCCESS if the client is using compatible + * interface. + * -NVL_ERR_NOT_SUPPORTED if the client is using + * incompatible interface. + * Or, Other NVL_XXX status value. + */ +NvlStatus +nvswitch_lib_check_api_version +( + const char *user_version, + char *kernel_version, + NvU32 length +); + +/* + * @Brief : Allocate a new nvswitch lib device instance. + * + * @Description : Creates and registers a new nvswitch device and registers + * with the nvlink library. This only initializes software state, + * it does not initialize the hardware state. + * + * @param[in] pci_domain pci domain of the device + * @param[in] pci_bus pci bus of the device + * @param[in] pci_device pci device of the device + * @param[in] pci_func pci function of the device + * @param[in] device_id pci device ID of the device + * @param[in] os_handle Device handle used to interact with OS layer + * @param[in] os_instance instance number of this device + * @param[out] device return device handle for interfacing with library + * + * @returns NVL_SUCCESS if the action succeeded + * an NVL error code otherwise + */ +NvlStatus +nvswitch_lib_register_device +( + NvU16 pci_domain, + NvU8 pci_bus, + NvU8 pci_device, + NvU8 pci_func, + NvU16 device_id, + void *os_handle, + NvU32 os_instance, + nvswitch_device **device +); + +/* + * @Brief : Clean-up the software state for a nvswitch device. + * + * @Description : + * + * @param[in] device device handle to destroy + * + * @returns none + */ +void +nvswitch_lib_unregister_device +( + nvswitch_device *device +); + +/* + * @Brief : Initialize the hardware for a nvswitch device. + * + * @Description : + * + * @param[in] device a reference to the device to initialize + * + * @returns NVL_SUCCESS if the action succeeded + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_PCI_ERROR if bar info unable to be retrieved + */ +NvlStatus +nvswitch_lib_initialize_device +( + nvswitch_device *device +); + +/* + * @Brief : Shutdown the hardware for a nvswitch device. + * + * @Description : + * + * @param[in] device a reference to the device to initialize + * + * @returns NVL_SUCCESS if the action succeeded + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_PCI_ERROR if bar info unable to be retrieved + */ +NvlStatus +nvswitch_lib_shutdown_device +( + nvswitch_device *device +); + +/* + * @Brief Control call (ioctl) interface. + * + * @param[in] device device to operate on + * @param[in] cmd Enumerated command to execute. + * @param[in] params Params structure to pass to the command. + * @param[in] params_size Size of the parameter structure. + * @param[in] osPrivate The private data structure for OS. + * + * @return NVL_SUCCESS on a successful command + * -NVL_NOT_FOUND if target device unable to be found + * -NVL_BAD_ARGS if an invalid cmd is provided + * -NVL_BAD_ARGS if a null arg is provided + * -NVL_ERR_GENERIC otherwise + */ +NvlStatus nvswitch_lib_ctrl +( + nvswitch_device *device, + NvU32 cmd, + void *params, + NvU64 size, + void *osPrivate +); + +/* + * @Brief: Retrieve PCI information for a switch based from device instance + * + * @Description : + * + * @param[in] lib_handle device to query + * @param[out] pciInfo return pointer to nvswitch lib copy of device info + */ +void nvswitch_lib_get_device_info +( + nvswitch_device *lib_handle, + struct nvlink_pci_info **pciInfo +); + +/* + * @Brief: Retrieve BIOS version for an nvswitch device + * + * @Description: For devices with a BIOS, this retrieves the BIOS version. + * + * @param[in] device device to query + * @param[out] version BIOS version is stored here + * + * @returns NVL_SUCCESS BIOS version was retrieved successfully + * -NVL_BAD_ARGS an invalid device is provided + * -NVL_ERR_INVALID_STATE an error occurred reading BIOS info + * -NVL_ERR_NOT_SUPPORTED device doesn't support this feature + */ + +NvlStatus +nvswitch_lib_get_bios_version +( + nvswitch_device *device, + NvU64 *version +); + + +/* + * @Brief: Retrieve whether the device supports PCI pin interrupts + * + * @Description: Returns whether the device can use PCI pin IRQs + * + * + * @returns NV_TRUE device can use PCI pin IRQs + * NV_FALSE device cannot use PCI pin IRQs + */ + +NvlStatus +nvswitch_lib_use_pin_irq +( + nvswitch_device *device +); + + +/* + * @Brief: Load platform information (emulation, simulation etc.). + * + * @param[in] lib_handle device + * + * @return NVL_SUCCESS on a successful command + * -NVL_BAD_ARGS if an invalid device is provided + */ +NvlStatus nvswitch_lib_load_platform_info +( + nvswitch_device *lib_handle +); + +/* + * @Brief : Enable interrupts for this device + * + * @Description : + * + * @param[in] device device to enable + * + * @returns NVL_SUCCESS + * -NVL_PCI_ERROR if there was a register access error + */ +void +nvswitch_lib_enable_interrupts +( + nvswitch_device *device +); + +/* + * @Brief : Disable interrupts for this device + * + * @Description : + * + * @param[in] device device to enable + * + * @returns NVL_SUCCESS + * -NVL_PCI_ERROR if there was a register access error + */ +void +nvswitch_lib_disable_interrupts +( + nvswitch_device *device +); + +/* + * @Brief : Check if interrupts are pending on this device + * + * @Description : + * + * @param[in] device device to check + * + * @returns NVL_SUCCESS if there were no errors and interrupts were handled + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_PCI_ERROR if there was a register access error + * -NVL_MORE_PROCESSING_REQUIRED no interrupts were found for this device + */ +NvlStatus +nvswitch_lib_check_interrupts +( + nvswitch_device *device +); + +/* + * @Brief : Services interrupts for this device + * + * @Description : + * + * @param[in] device device to service + * + * @returns NVL_SUCCESS if there were no errors and interrupts were handled + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_PCI_ERROR if there was a register access error + * -NVL_MORE_PROCESSING_REQUIRED no interrupts were found for this device + */ +NvlStatus +nvswitch_lib_service_interrupts +( + nvswitch_device *device +); + +/* + * @Brief : Get depth of error logs + * + * @Description : + * + * @param[in] device device to check + * + * @param[out] fatal Count of fatal errors + * @param[out] nonfatal Count of non-fatal errors + * + * @returns NVL_SUCCESS if there were no errors and interrupts were handled + * -NVL_NOT_FOUND if bad arguments provided + */ +NvlStatus +nvswitch_lib_get_log_count +( + nvswitch_device *device, + NvU32 *fatal, NvU32 *nonfatal +); + +/* + * @Brief : Periodic thread-based dispatcher for kernel functions + * + * @Description : Its purpose is to do any background subtasks (data collection, thermal + * monitoring, etc. These subtasks may need to run at varying intervals, and + * may even wish to adjust their execution period based on other factors. + * Each subtask's entry notes the last time it was executed and its desired + * execution period. This function returns back to the dispatcher the desired + * time interval before it should be called again. + * + * @param[in] device The device to run background tasks on + * + * @returns nsec interval to wait before the next call. + */ +NvU64 +nvswitch_lib_deferred_task_dispatcher +( + nvswitch_device *device +); + +/* + * @Brief : Perform post init tasks + * + * @Description : Any device initialization/tests which need the device to be + * initialized to a sane state go here. + * + * @param[in] device The device to run the post-init on + * + * @returns returns NvlStatus code, see nvlink_errors.h + */ +NvlStatus +nvswitch_lib_post_init_device +( + nvswitch_device *device +); + +/* + * @Brief : Perform post init tasks for a blacklisted device + * + * @Description : Any initialization tasks that should be run after a + * blacklisted item should go here. + * + * @param[in] device The device to run the post-init-blacklist on + * + * @returns void + */ +void +nvswitch_lib_post_init_blacklist_device +( + nvswitch_device *device +); + +/* + * @Brief : Get the UUID of the device + * + * @Description : Copies out the device's UUID into the uuid field + * + * @param[in] device The device to get the UUID from + * + * @param[out] uuid A pointer to a uuid struct in which the UUID is written to + * + * @returns void + */ +void +nvswitch_lib_get_uuid +( + nvswitch_device *device, + NvUuid *uuid +); + +/* + * @Brief : Get the Physical ID of the device + * + * @Description : Copies out the device's Physical ID into the phys_id field + * + * @param[in] device The device to get the UUID from + * + * @param[out] phys_id A pointer to a NvU32 which the physical ID is written to + * + * @returns NVL_SUCCESS if successful + * -NVL_BAD_ARGS if bad arguments provided + */ +NvlStatus +nvswitch_lib_get_physid +( + nvswitch_device *device, + NvU32 *phys_id +); + +/* + * @Brief : Read the Fabric State for a nvswitch device. + * + * @Description : Returns the Fabric State for the device + * + * @param[in] device a reference to the device + * @param[in] *ptrs references to the fabric state + * + * @returns NVL_SUCCESS if the action succeeded + * -NVL_BAD_ARGS if bad arguments provided + */ +NvlStatus +nvswitch_lib_read_fabric_state +( + nvswitch_device *device, + NVSWITCH_DEVICE_FABRIC_STATE *device_fabric_state, + NVSWITCH_DEVICE_BLACKLIST_REASON *device_blacklist_reason, + NVSWITCH_DRIVER_FABRIC_STATE *driver_fabric_state +); + +/* + * @Brief : Validates PCI device id + * + * @Description : Validates PCI device id + * + * @param[in] device The device id to be validated + * + * @returns True if device id is valid + */ +NvBool +nvswitch_lib_validate_device_id +( + NvU32 device_id +); + +/* + * @Brief : Gets an event if it exists in the Event list + * + * @Description : Gets an event if it is in the Device's Client + * Event list + * + * @param[in] device Device to operate on + * @param[in] osPrivate The private data structure for the OS + * @param[out] ppClientEvent Double pointer to client event + * + * @returns NVL_SUCCESS if client event found + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_NOT_FOUND if no client event found + */ +NvlStatus +nvswitch_lib_get_client_event +( + nvswitch_device *device, + void *osPrivate, + NVSWITCH_CLIENT_EVENT **ppClientEvent +); + +/* + * @Brief : Adds a single entry into the Event list + * + * @Description : Adds an entry into the front of the Device's + * Client Event List + * + * @param[in] device Device to operate on + * @param[in] osPrivate The private data structure for OS + * @param[in] pParams The parameters for the client event + * + * @returns NVL_SUCCESS if event added + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_NO_MEM if allocation fails + */ +NvlStatus +nvswitch_lib_add_client_event +( + nvswitch_device *device, + void *osPrivate, + NvU32 eventId +); + +/* + * @Brief : Removes entries from the Event list + * + * @Description : Removes the entries associated with osPrivate + * from the Device's Client Event List + * + * @param[in] device Device to operate on + * @param[in] osPrivate The private data structure for OS + * + * @returns NVL_SUCCESS if event removed + */ +NvlStatus +nvswitch_lib_remove_client_events +( + nvswitch_device *device, + void *osPrivate +); + +/* + * @Brief : Notifies all events with a matching event Id in the Client Event list + * + * @Description : Notifies all events with a matching event Id in the Client Event list + * + * @param[in] device Device to operate on + * @param[in] eventId The event ID to notify + * + * @returns NVL_SUCCESS if arguments are valid + * -NVL_BAD_ARGS if bad arguments provided + */ +NvlStatus +nvswitch_lib_notify_client_events +( + nvswitch_device *device, + NvU32 eventId +); + +/* + * @Brief : Gets a mask of valid I2C ports for the device + * + * @Description : Gets a mask of valid I2C ports for the device + * + * @param[in] device Device to operate on + * @param[out] validPortsMask A pointer to a mask of valid ports + * + * @returns NVL_SUCCESS if successfuly + * -NVL_BAD_ARGS if bad arguments provided + */ +NvlStatus +nvswitch_lib_get_valid_ports_mask +( + nvswitch_device *device, + NvU32 *validPortsMask +); + +/* + * @Brief : Returns a boolean if the I2C interface is supported for the device + * + * @Description : Returns a boolean if the I2C interface is supported for the device + * + * @param[in] device Device to operate on + * + * @returns NV_TRUE device can use the I2C interface + * NV_FALSE device cannot use the I2C interface + */ +NvBool +nvswitch_lib_is_i2c_supported +( + nvswitch_device *device +); + +/* + * @Brief : Performs an I2C transaction + * + * @Description : Performs an I2C transaction + * + * @param[in] device Device to operate on + * @param[in] port Port to issue I2C transaction + * @param[in] type Type of I2C transaction + * @param[in] addr Device address to perform I2C transaction on + * @param[in] command I2C command to perform on + * @param[in] len Length of the I2C transaction message + * @param[in/out] pData A pointer to the buffer containing the input/output data + * + * @returns NVL_SUCCESS if I2C transaction completes + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_ERR_INVALID_STATE if something internal went wrong + */ +NvlStatus +nvswitch_lib_i2c_transfer +( + nvswitch_device *device, + NvU32 port, + NvU8 type, + NvU8 addr, + NvU8 command, + NvU32 len, + NvU8 *pData +); + +/* + * Returns count of registered NvSwitch devices. + */ +NvU32 +nvswitch_os_get_device_count +( + void +); + +/* + * Get current time in nanoseconds + * The time is since epoch time (midnight UTC of January 1, 1970) + */ +NvU64 +nvswitch_os_get_platform_time +( + void +); + +#define NVSWITCH_PRINT_ATTRIB(str, arg1) \ + __attribute__ ((format (printf, (str), (arg1)))) + +/* + * printf wrapper + */ +void +NVSWITCH_PRINT_ATTRIB(2, 3) +nvswitch_os_print +( + int log_level, + const char *pFormat, + ... +); + +/* + * "Registry" interface for dword + */ +NvlStatus +nvswitch_os_read_registry_dword +( + void *os_handle, + const char *name, + NvU32 *data +); + +/* + * "Registry" interface for binary data + */ +NvlStatus +nvswitch_os_read_registery_binary +( + void *os_handle, + const char *name, + NvU8 *data, + NvU32 length +); + +NvBool +nvswitch_os_is_uuid_in_blacklist +( + NvUuid *uuid +); + + +/* + * Override platform/simulation settings for cases + */ +void +nvswitch_os_override_platform +( + void *os_handle, + NvBool *rtlsim +); + +/* + * Memory management interface + */ +NvlStatus +nvswitch_os_alloc_contig_memory +( + void *os_handle, + void **virt_addr, + NvU32 size, + NvBool force_dma32 +); + +void +nvswitch_os_free_contig_memory +( + void *os_handle, + void *virt_addr, + NvU32 size +); + +NvlStatus +nvswitch_os_map_dma_region +( + void *os_handle, + void *cpu_addr, + NvU64 *dma_handle, + NvU32 size, + NvU32 direction +); + +NvlStatus +nvswitch_os_unmap_dma_region +( + void *os_handle, + void *cpu_addr, + NvU64 dma_handle, + NvU32 size, + NvU32 direction +); + +NvlStatus +nvswitch_os_set_dma_mask +( + void *os_handle, + NvU32 dma_addr_width +); + +NvlStatus +nvswitch_os_sync_dma_region_for_cpu +( + void *os_handle, + NvU64 dma_handle, + NvU32 size, + NvU32 direction +); + +NvlStatus +nvswitch_os_sync_dma_region_for_device +( + void *os_handle, + NvU64 dma_handle, + NvU32 size, + NvU32 direction +); + +void * +nvswitch_os_malloc_trace +( + NvLength size, + const char *file, + NvU32 line +); + +void +nvswitch_os_free +( + void *pMem +); + +NvLength +nvswitch_os_strlen +( + const char *str +); + +char* +nvswitch_os_strncpy +( + char *pDest, + const char *pSrc, + NvLength length +); + +int +nvswitch_os_strncmp +( + const char *s1, + const char *s2, + NvLength length +); + +void * +nvswitch_os_memset +( + void *pDest, + int value, + NvLength size +); + +void * +nvswitch_os_memcpy +( + void *pDest, + const void *pSrc, + NvLength size +); + +int +nvswitch_os_memcmp +( + const void *s1, + const void *s2, + NvLength size +); + +/* + * Memory read / write interface + */ +NvU32 +nvswitch_os_mem_read32 +( + const volatile void * pAddress +); + +void +nvswitch_os_mem_write32 +( + volatile void *pAddress, + NvU32 data +); + +NvU64 +nvswitch_os_mem_read64 +( + const volatile void *pAddress +); + +void +nvswitch_os_mem_write64 +( + volatile void *pAddress, + NvU64 data +); + +/* + * Interface to write formatted output to sized buffer + */ +int +nvswitch_os_snprintf +( + char *pString, + NvLength size, + const char *pFormat, + ... +); + +/* + * Interface to write formatted output to sized buffer + */ +int +nvswitch_os_vsnprintf +( + char *buf, + NvLength size, + const char *fmt, + va_list arglist +); + +/* + * Debug assert and log interface + */ +void +nvswitch_os_assert_log +( + int cond, + const char *pFormat, + ... +); + +/* + * Interface to sleep for specified milliseconds. Yields the CPU to scheduler. + */ +void +nvswitch_os_sleep +( + unsigned int ms +); + +NvlStatus +nvswitch_os_acquire_fabric_mgmt_cap +( + void *osPrivate, + NvU64 capDescriptor +); + +int +nvswitch_os_is_fabric_manager +( + void *osPrivate +); + +int +nvswitch_os_is_admin +( + void +); + +NvlStatus +nvswitch_os_get_os_version +( + NvU32 *pMajorVer, + NvU32 *pMinorVer, + NvU32 *pBuildNum +); + +void +nvswitch_lib_smbpbi_log_sxid +( + nvswitch_device *device, + NvU32 sxid, + const char *pFormat, + ... +); + +/*! + * @brief: OS Specific handling to add an event. + */ +NvlStatus +nvswitch_os_add_client_event +( + void *osHandle, + void *osPrivate, + NvU32 eventId +); + +/*! + * @brief: OS specific handling to remove all events corresponding to osPrivate. + */ +NvlStatus +nvswitch_os_remove_client_event +( + void *osHandle, + void *osPrivate +); + +/*! + * @brief: OS specific handling to notify an event. + */ +NvlStatus +nvswitch_os_notify_client_event +( + void *osHandle, + void *osPrivate, + NvU32 eventId +); + +/*! + * @brief: Gets OS specific support for the REGISTER_EVENTS ioctl + */ +NvlStatus +nvswitch_os_get_supported_register_events_params +( + NvBool *bSupportsManyEvents, + NvBool *bUserSuppliesOsData +); + +#ifdef __cplusplus +} +#endif +#endif //_NVSWITCH_EXPORT_H_ diff --git a/src/common/nvswitch/interface/ioctl_common_nvswitch.h b/src/common/nvswitch/interface/ioctl_common_nvswitch.h new file mode 100644 index 000000000..deef94316 --- /dev/null +++ b/src/common/nvswitch/interface/ioctl_common_nvswitch.h @@ -0,0 +1,127 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _IOCTL_COMMON_NVSWITCH_H_ +#define _IOCTL_COMMON_NVSWITCH_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define NVSWITCH_DEV_IO_TYPE 'd' +#define NVSWITCH_CTL_IO_TYPE 'c' + +/* + * Defines for IOCTL Hints + * + * NVSWITCH_IO_READ_ONLY : + * Only reads parameters from the kernel and does not pass any to it + * + * NVSWITCH_IO_WRITE_ONLY : + * Only writes parameters to the kernel, but does not want anything back. + * + * NVSWITCH_IO_WRITE_READ : + * Writes data to the kernel and wants information back + * + * NVSWITCH_IO_DEFAULT : + * Don't copy anything into the kernel, nor copy anything back. + */ +#define NVSWITCH_IO_READ_ONLY 0x0 +#define NVSWITCH_IO_WRITE_ONLY 0x1 +#define NVSWITCH_IO_WRITE_READ 0x2 +#define NVSWITCH_IO_DEFAULT 0x3 + +/* + * Macro for defining new IOCTLs in a platform independent way. + * + * Select Linux specific IOCTL defining macro (_IO, _IOR, _IOW, _IOWR) + * based on IOCTL direction. + */ +#define NVSWITCH_IOCTL_CODE(ioType, ctrl, paramType, direction) \ + ((direction == NVSWITCH_IO_READ_ONLY) ? _IOR(ioType, ctrl, paramType) : \ + (direction == NVSWITCH_IO_WRITE_ONLY) ? _IOW(ioType, ctrl, paramType) : \ + (direction == NVSWITCH_IO_WRITE_READ) ? _IOWR(ioType, ctrl, paramType) : \ + _IO(ioType, ctrl)) + +/* + * NVSWITCH_NVLINK_MAX_LANES is used by both internal and exteranl IOCTLs. + */ +#define NVSWITCH_NVLINK_MAX_LANES 4 + +/* + * Common Fabric State enums + * + * Definitions: + * Driver Fabric State is intended to reflect the state of the driver and + * fabric manager. Once FM sets the Driver State to CONFIGURED, it is + * expected the FM will send heartbeat updates. If the heartbeat is not + * received before the session timeout, then the driver reports status + * as MANAGER_TIMEOUT. + * + * Device Fabric State reflects the state of the nvswitch device. + * FM sets the Device Fabric State to CONFIGURED once FM is managing the + * device. If the Device Fabric State is BLACKLISTED then the device is + * not available for use; opens fail for a blacklisted device, and interrupts + * are disabled. + * + * Blacklist Reason provides additional detail of why a device is blacklisted. + */ +typedef enum nvswitch_driver_fabric_state +{ + NVSWITCH_DRIVER_FABRIC_STATE_OFFLINE = 0, // offline (No driver loaded) + NVSWITCH_DRIVER_FABRIC_STATE_STANDBY, // driver up, no FM + NVSWITCH_DRIVER_FABRIC_STATE_CONFIGURED, // driver up, FM up + NVSWITCH_DRIVER_FABRIC_STATE_MANAGER_TIMEOUT, // driver up, FM timed out + NVSWITCH_DRIVER_FABRIC_STATE_MANAGER_ERROR, // driver up, FM in error state + NVSWITCH_DRIVER_FABRIC_STATE_COUNT +} NVSWITCH_DRIVER_FABRIC_STATE; + +typedef enum nvswitch_device_fabric_state +{ + NVSWITCH_DEVICE_FABRIC_STATE_OFFLINE = 0, // offline: No driver, no FM + NVSWITCH_DEVICE_FABRIC_STATE_STANDBY, // driver up, no FM, not blacklisted + NVSWITCH_DEVICE_FABRIC_STATE_CONFIGURED, // driver up, FM up, not blacklisted + NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED, // device is blacklisted + NVSWITCH_DEVICE_FABRIC_STATE_COUNT +} NVSWITCH_DEVICE_FABRIC_STATE; + +typedef enum nvswitch_device_blacklist_mode +{ + NVSWITCH_DEVICE_BLACKLIST_REASON_NONE = 0, // device is not blacklisted + NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_OUT_OF_BAND, // manually blacklisted by out-of-band client + NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_IN_BAND, // manually blacklisted by in-band OS config + NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_PEER, // FM indicates blacklisted due to peer manual blacklisted + NVSWITCH_DEVICE_BLACKLIST_REASON_TRUNK_LINK_FAILURE, // FM indicates blacklisted due to trunk link failure + NVSWITCH_DEVICE_BLACKLIST_REASON_TRUNK_LINK_FAILURE_PEER, // FM indicates blacklisted due to trunk link failure of peer + NVSWITCH_DEVICE_BLACKLIST_REASON_ACCESS_LINK_FAILURE, // FM indicates blacklisted due to access link failure + NVSWITCH_DEVICE_BLACKLIST_REASON_ACCESS_LINK_FAILURE_PEER, // FM indicates blacklisted due to access link failure of peer + NVSWITCH_DEVICE_BLACKLIST_REASON_UNSPEC_DEVICE_FAILURE, // FM indicates blacklisted due to unspecified device failure + NVSWITCH_DEVICE_BLACKLIST_REASON_UNSPEC_DEVICE_FAILURE_PEER // FM indicates blacklisted due to unspec device failure of peer +} NVSWITCH_DEVICE_BLACKLIST_REASON; + +#ifdef __cplusplus +} +#endif // __cplusplus + +#endif //_IOCTL_COMMON_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/bios_nvswitch.c b/src/common/nvswitch/kernel/bios_nvswitch.c new file mode 100644 index 000000000..f6a752495 --- /dev/null +++ b/src/common/nvswitch/kernel/bios_nvswitch.c @@ -0,0 +1,243 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "bios_nvswitch.h" +#include "error_nvswitch.h" +#include "rmsoecmdif.h" +#include "nvswitch/lr10/dev_ext_devices.h" + +#include "flcn/flcn_nvswitch.h" + +#include "rmflcncmdif_nvswitch.h" + +static NvlStatus +_nvswitch_core_bios_read +( + nvswitch_device *device, + NvU8 readType, + NvU32 reqSize, + NvU8 *pData +) +{ +#define MAX_READ_SIZE 0x2000 + RM_FLCN_CMD_SOE cmd; + NVSWITCH_TIMEOUT timeout; + NvU32 cmdSeqDesc = 0; + NV_STATUS status; + FLCN *pFlcn = NULL; + RM_SOE_CORE_CMD_BIOS *pParams = &cmd.cmd.core.bios; + NvU64 dmaHandle = 0; + NvU8 *pReadBuffer = NULL; + NvU32 spiReadCnt = 0; + NvU32 offset = 0; + NvU32 bufferSize = (reqSize < SOE_DMA_MAX_SIZE) ? SOE_DMA_MAX_SIZE : MAX_READ_SIZE; + + // Create DMA mapping for SOE CORE transactions + status = nvswitch_os_alloc_contig_memory(device->os_handle, + (void**)&pReadBuffer, bufferSize, (device->dma_addr_width == 32)); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to allocate contig memory\n"); + return status; + } + + status = nvswitch_os_map_dma_region(device->os_handle, + pReadBuffer, + &dmaHandle, + bufferSize, + NVSWITCH_DMA_DIR_TO_SYSMEM); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to map dma region to sysmem\n"); + nvswitch_os_free_contig_memory(device->os_handle, pReadBuffer, bufferSize); + return status; + } + + pFlcn = device->pSoe->pFlcn; + + while (offset < reqSize) + { + nvswitch_os_memset(pReadBuffer, 0, bufferSize); + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + + cmd.hdr.unitId = RM_SOE_UNIT_CORE; + cmd.hdr.size = sizeof(cmd); + cmd.cmd.core.bios.cmdType = readType; + RM_FLCN_U64_PACK(&pParams->dmaHandle, &dmaHandle); + pParams->offset = offset; + pParams->sizeInBytes = NV_MIN((reqSize - offset), MAX_READ_SIZE); + cmdSeqDesc = 0; + + status = nvswitch_os_sync_dma_region_for_device(device->os_handle, dmaHandle, + bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM); + if (status != NV_OK) + { + nvswitch_os_unmap_dma_region(device->os_handle, pReadBuffer, dmaHandle, + bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM); + nvswitch_os_free_contig_memory(device->os_handle, pReadBuffer, bufferSize); + NVSWITCH_PRINT(device, ERROR, "Failed to yield to DMA controller\n"); + return status; + } + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 4, &timeout); + + status = flcnQueueCmdPostBlocking(device, pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg - not used for now + NULL, // pPayload - not used for now + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: CORE read failed. rc:%d\n", + __FUNCTION__, status); + break; + } + + status = nvswitch_os_sync_dma_region_for_cpu(device->os_handle, dmaHandle, + bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "DMA controller failed to yield back\n"); + break; + } + + if (readType == RM_SOE_CORE_CMD_READ_BIOS) + { + nvswitch_os_memcpy(((NvU8*)&pData[offset]), pReadBuffer, pParams->sizeInBytes); + } + else if (readType == RM_SOE_CORE_CMD_READ_BIOS_SIZE) + { + nvswitch_os_memcpy(((NvU8*)pData), pReadBuffer, reqSize); + break; + } + + offset += pParams->sizeInBytes; + spiReadCnt++; + } + + nvswitch_os_unmap_dma_region(device->os_handle, pReadBuffer, dmaHandle, + bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM); + + nvswitch_os_free_contig_memory(device->os_handle, pReadBuffer, bufferSize); + return status; +} + +NvlStatus +nvswitch_bios_read +( + nvswitch_device *device, + NvU32 size, + void *pData +) +{ + return _nvswitch_core_bios_read(device, RM_SOE_CORE_CMD_READ_BIOS, size, (NvU8*)pData); +} + +NvlStatus +nvswitch_bios_read_size +( + nvswitch_device *device, + NvU32 *pSize +) +{ + if (pSize == NULL) + { + return -NVL_BAD_ARGS; + } + + return _nvswitch_core_bios_read(device, RM_SOE_CORE_CMD_READ_BIOS_SIZE, sizeof(NvU32), (NvU8*)pSize); +} + +/*! + * @brief Retrieves BIOS Image via SOE's CORE task + * This function needs SOE to be initialized for the Util task to respond. + * Upon success the BIOS Image will be place in device.biosImage + * @param[in/out] device - pointer to the nvswitch device. + */ +NvlStatus +nvswitch_bios_get_image +( + nvswitch_device *device +) +{ + NvU8 *pBiosRawBuffer = NULL; + NvlStatus status = NVL_SUCCESS; + NvU32 biosSize = 0; + + if (device->biosImage.pImage != NULL) + { + NVSWITCH_PRINT(device, ERROR, + "NVRM: %s: bios already available, skip reading" + "\n", __FUNCTION__); + + return NVL_SUCCESS; + } + + if (!device->pSoe) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SOE not initialized yet. \n", + __FUNCTION__); + return NVL_SUCCESS; + } + + status = nvswitch_bios_read_size(device, &biosSize); + if (status != NVL_SUCCESS || biosSize == 0) + { + NVSWITCH_PRINT(device, ERROR, + "NVRM: %s: bios read size failed" + "\n", __FUNCTION__); + return status; + } + + NVSWITCH_PRINT(device, SETUP, + "NVRM: %s: BIOS Size = 0x%x" + "\n", __FUNCTION__, biosSize); + + pBiosRawBuffer = (NvU8*) nvswitch_os_malloc(biosSize); + if (pBiosRawBuffer == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s : failed memory allocation" + "\n", __FUNCTION__); + + return -NVL_NO_MEM; + } + + nvswitch_os_memset(pBiosRawBuffer, 0, biosSize); + + status = nvswitch_bios_read(device, biosSize, pBiosRawBuffer); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, " Failed to retrieve BIOS image, Code 0x%x\n", status); + nvswitch_os_free(pBiosRawBuffer); + return status; + } + + device->biosImage.pImage = pBiosRawBuffer; + device->biosImage.size = biosSize; + + return NVL_SUCCESS; +} diff --git a/src/common/nvswitch/kernel/error_nvswitch.c b/src/common/nvswitch/kernel/error_nvswitch.c new file mode 100644 index 000000000..ee1676710 --- /dev/null +++ b/src/common/nvswitch/kernel/error_nvswitch.c @@ -0,0 +1,544 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "error_nvswitch.h" + +#define NVSWITCH_DATE_LEN 64 + +// +// Error logging +// +static void +_nvswitch_dump_error_entry +( + nvswitch_device *device, + NvU32 error_count, + NVSWITCH_ERROR_TYPE *error_entry +) +{ + if ((error_entry != NULL) && + (error_entry->error_src == NVSWITCH_ERROR_SRC_HW)) + { + NVSWITCH_PRINT_SXID(device, error_entry->error_type, + "Severity %d Engine instance %02d Sub-engine instance %02d\n", + error_entry->severity, error_entry->instance, error_entry->subinstance); + + NVSWITCH_PRINT_SXID(device, error_entry->error_type, + "Data {0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x}\n", + error_entry->data.raw.data[0], error_entry->data.raw.data[1], + error_entry->data.raw.data[2], error_entry->data.raw.data[3], + error_entry->data.raw.data[4], error_entry->data.raw.data[5], + error_entry->data.raw.data[6], error_entry->data.raw.data[7]); + + if ((error_entry->data.raw.data[ 8] != 0) || + (error_entry->data.raw.data[ 9] != 0) || + (error_entry->data.raw.data[10] != 0) || + (error_entry->data.raw.data[11] != 0) || + (error_entry->data.raw.data[12] != 0) || + (error_entry->data.raw.data[13] != 0) || + (error_entry->data.raw.data[14] != 0) || + (error_entry->data.raw.data[15] != 0)) + + { + NVSWITCH_PRINT_SXID(device, error_entry->error_type, + "Data {0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x, 0x%08x}\n", + error_entry->data.raw.data[ 8], error_entry->data.raw.data[ 9], + error_entry->data.raw.data[10], error_entry->data.raw.data[11], + error_entry->data.raw.data[12], error_entry->data.raw.data[13], + error_entry->data.raw.data[14], error_entry->data.raw.data[15]); + } + } +} + +// +// Construct an error log +// +// If error_log_size > 0 a circular buffer is created to record errors +// +NvlStatus +nvswitch_construct_error_log +( + NVSWITCH_ERROR_LOG_TYPE *errors, + NvU32 error_log_size, + NvBool overwritable +) +{ + NvlStatus retval = NVL_SUCCESS; + + NVSWITCH_ASSERT(errors != NULL); + + errors->error_start = 0; + errors->error_count = 0; + errors->error_total = 0; + errors->error_log_size = 0; + errors->error_log = NULL; + errors->overwritable = overwritable; + + if (error_log_size > 0) + { + errors->error_log = nvswitch_os_malloc(error_log_size * sizeof(NVSWITCH_ERROR_TYPE)); + } + + if (errors->error_log != NULL) + { + errors->error_log_size = error_log_size; + nvswitch_os_memset(errors->error_log, 0, errors->error_log_size * sizeof(NVSWITCH_ERROR_TYPE)); + } + + if (error_log_size != errors->error_log_size) + { + retval = -NVL_NO_MEM; + } + + return retval; +} + +// +// Destroy an error log +// +void +nvswitch_destroy_error_log +( + nvswitch_device *device, + NVSWITCH_ERROR_LOG_TYPE *errors +) +{ + if (errors == NULL) + return; + + errors->error_start = 0; + errors->error_count = 0; + //errors->error_total = 0; // Don't reset total count of errors logged + errors->error_log_size = 0; + + if (errors->error_log != NULL) + { + nvswitch_os_free(errors->error_log); + errors->error_log = NULL; + } +} + +void +nvswitch_record_error +( + nvswitch_device *device, + NVSWITCH_ERROR_LOG_TYPE *errors, + NvU32 error_type, // NVSWITCH_ERR_* + NvU32 instance, + NvU32 subinstance, + NVSWITCH_ERROR_SRC_TYPE error_src, // NVSWITCH_ERROR_SRC_* + NVSWITCH_ERROR_SEVERITY_TYPE severity, // NVSWITCH_ERROR_SEVERITY_* + NvBool error_resolved, + void *data, + NvU32 data_size, + NvU32 line +) +{ + NvU32 idx_error; + + NVSWITCH_ASSERT(errors != NULL); + NVSWITCH_ASSERT(data_size <= sizeof(errors->error_log[idx_error].data)); + + // If no error log has been created, don't log it. + if ((errors->error_log_size != 0) && (errors->error_log != NULL)) + { + idx_error = (errors->error_start + errors->error_count) % errors->error_log_size; + + if (errors->error_count == errors->error_log_size) + { + // Error ring buffer already full. + if (errors->overwritable) + { + errors->error_start = (errors->error_start + 1) % errors->error_log_size; + } + else + { + // Return: ring buffer full + return; + } + } + else + { + errors->error_count++; + } + + // Log error info + errors->error_log[idx_error].error_type = error_type; + errors->error_log[idx_error].instance = instance; + errors->error_log[idx_error].subinstance = subinstance; + errors->error_log[idx_error].error_src = error_src; + errors->error_log[idx_error].severity = severity; + errors->error_log[idx_error].error_resolved = error_resolved; + errors->error_log[idx_error].line = line; + + // Log tracking info + errors->error_log[idx_error].timer_count = nvswitch_hw_counter_read_counter(device); + errors->error_log[idx_error].time = nvswitch_os_get_platform_time(); + errors->error_log[idx_error].local_error_num = errors->error_total; + errors->error_log[idx_error].global_error_num = device->error_total; + + // Copy ancillary data blob + nvswitch_os_memset(&errors->error_log[idx_error].data, 0, sizeof(errors->error_log[idx_error].data)); + if ((data != NULL) && (data_size > 0)) + { + nvswitch_os_memcpy(&errors->error_log[idx_error].data, data, data_size); + } + + _nvswitch_dump_error_entry(device, idx_error, &errors->error_log[idx_error]); + } + errors->error_total++; + device->error_total++; +} + +// +// Discard N errors from the specified log +// + +void +nvswitch_discard_errors +( + NVSWITCH_ERROR_LOG_TYPE *errors, + NvU32 error_discard_count +) +{ + error_discard_count = NV_MIN(error_discard_count, errors->error_count); + errors->error_start = (errors->error_start+error_discard_count) % errors->error_log_size; + errors->error_count -= error_discard_count; +} + +// +// Retrieve an error entry by index. +// 0 = oldest error +// Out-of-range index does not return an error, but does return an error of type "NO_ERROR" +// error_count returns how many errors in the error log +// + +void +nvswitch_get_error +( + nvswitch_device *device, + NVSWITCH_ERROR_LOG_TYPE *errors, + NVSWITCH_ERROR_TYPE *error_entry, + NvU32 error_idx, + NvU32 *error_count +) +{ + NVSWITCH_ASSERT(errors != NULL); + + if (error_entry != NULL) + { + if (error_idx >= errors->error_count) + { + // Index out-of-range + nvswitch_os_memset(error_entry, 0, sizeof(*error_entry)); + error_entry->error_type = 0; + error_entry->instance = 0; + error_entry->subinstance = 0; + error_entry->local_error_num = errors->error_total; + error_entry->global_error_num = ((device == NULL) ? 0 : device->error_total); + error_entry->error_src = NVSWITCH_ERROR_SRC_NONE; + error_entry->severity = NVSWITCH_ERROR_SEVERITY_NONFATAL; + error_entry->error_resolved = NV_TRUE; + error_entry->line = 0; + error_entry->timer_count = + ((device == NULL) ? 0 : nvswitch_hw_counter_read_counter(device)); + error_entry->time = nvswitch_os_get_platform_time(); + } + else + { + *error_entry = errors->error_log[(errors->error_start + error_idx) % errors->error_log_size]; + } + } + + if (error_count) + { + *error_count = errors->error_count; + } +} + + +// +// Retrieve the oldest logged error entry. +// Optionally remove the error entry after reading +// error_count returns how many remaining errors in the error log +// + +void +nvswitch_get_next_error +( + nvswitch_device *device, + NVSWITCH_ERROR_LOG_TYPE *errors, + NVSWITCH_ERROR_TYPE *error_entry, + NvU32 *error_count, + NvBool remove_from_list +) +{ + nvswitch_get_error(device, errors, error_entry, 0, error_count); + + // Optionally remove the error from the log + if (remove_from_list) + { + nvswitch_discard_errors(errors, 1); + } +} + +NVSWITCH_NVLINK_HW_ERROR +nvswitch_translate_hw_error +( + NVSWITCH_ERR_TYPE type +) +{ + if ((type >= NVSWITCH_ERR_HW_NPORT_INGRESS) && + (type < NVSWITCH_ERR_HW_NPORT_INGRESS_LAST)) + { + return NVSWITCH_NVLINK_HW_INGRESS; + } + else if ((type >= NVSWITCH_ERR_HW_NPORT_EGRESS) && + (type < NVSWITCH_ERR_HW_NPORT_EGRESS_LAST)) + { + return NVSWITCH_NVLINK_HW_EGRESS; + } + else if ((type >= NVSWITCH_ERR_HW_NPORT_FSTATE) && + (type < NVSWITCH_ERR_HW_NPORT_FSTATE_LAST)) + { + return NVSWITCH_NVLINK_HW_FSTATE; + } + else if ((type >= NVSWITCH_ERR_HW_NPORT_TSTATE) && + (type < NVSWITCH_ERR_HW_NPORT_TSTATE_LAST)) + { + return NVSWITCH_NVLINK_HW_TSTATE; + } + else if ((type >= NVSWITCH_ERR_HW_NPORT_ROUTE) && + (type < NVSWITCH_ERR_HW_NPORT_ROUTE_LAST)) + { + return NVSWITCH_NVLINK_HW_ROUTE; + } + else if ((type >= NVSWITCH_ERR_HW_NPORT) && + (type < NVSWITCH_ERR_HW_NPORT_LAST)) + { + return NVSWITCH_NVLINK_HW_NPORT; + } + else if ((type >= NVSWITCH_ERR_HW_NVLCTRL) && + (type < NVSWITCH_ERR_HW_NVLCTRL_LAST)) + { + return NVSWITCH_NVLINK_HW_NVLCTRL; + } + else if ((type >= NVSWITCH_ERR_HW_NVLIPT) && + (type < NVSWITCH_ERR_HW_NVLIPT_LAST)) + { + return NVSWITCH_NVLINK_HW_NVLIPT; + } + else if ((type >= NVSWITCH_ERR_HW_NVLTLC) && + (type < NVSWITCH_ERR_HW_NVLTLC_LAST)) + { + return NVSWITCH_NVLINK_HW_NVLTLC; + } + else if ((type >= NVSWITCH_ERR_HW_DLPL) && + (type < NVSWITCH_ERR_HW_DLPL_LAST)) + { + return NVSWITCH_NVLINK_HW_DLPL; + } + else if ((type >= NVSWITCH_ERR_HW_AFS) && + (type < NVSWITCH_ERR_HW_AFS_LAST)) + { + return NVSWITCH_NVLINK_HW_AFS; + } + else if ((type >= NVSWITCH_ERR_HW_HOST) && + (type < NVSWITCH_ERR_HW_HOST_LAST)) + { + return NVSWITCH_NVLINK_HW_HOST; + } + else if ((type >= NVSWITCH_ERR_HW_MINION) && + (type < NVSWITCH_ERR_HW_MINION_LAST)) + { + return NVSWITCH_NVLINK_HW_MINION; + } + else if ((type >= NVSWITCH_ERR_HW_NXBAR) && + (type < NVSWITCH_ERR_HW_NXBAR_LAST)) + { + return NVSWITCH_NVLINK_HW_NXBAR; + } + else if ((type >= NVSWITCH_ERR_HW_NPORT_SOURCETRACK) && + (type < NVSWITCH_ERR_HW_NPORT_SOURCETRACK_LAST)) + { + return NVSWITCH_NVLINK_HW_SOURCETRACK; + } + else if ((type >= NVSWITCH_ERR_HW_NVLIPT_LNK) && + (type < NVSWITCH_ERR_HW_NVLIPT_LNK_LAST)) + { + return NVSWITCH_ERR_HW_NVLIPT_LNK; + } + else if ((type >= NVSWITCH_ERR_HW_SOE) && + (type < NVSWITCH_ERR_HW_SOE_LAST)) + { + return NVSWITCH_ERR_HW_SOE; + } + else + { + // Update this assert after adding a new translation entry above + ct_assert(NVSWITCH_ERR_HW_SOE_LAST == (NVSWITCH_ERR_LAST - 1)); + + NVSWITCH_PRINT(NULL, ERROR, + "%s: Undefined error type\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + return NVSWITCH_NVLINK_HW_GENERIC; + } +} + +static NVSWITCH_NVLINK_ARCH_ERROR +_nvswitch_translate_arch_error +( + NVSWITCH_ERROR_TYPE *error_entry +) +{ + if (error_entry->severity == NVSWITCH_ERROR_SEVERITY_FATAL) + { + return NVSWITCH_NVLINK_ARCH_ERROR_HW_FATAL; + } + else if (error_entry->severity == NVSWITCH_ERROR_SEVERITY_NONFATAL) + { + if (error_entry->error_resolved) + { + return NVSWITCH_NVLINK_ARCH_ERROR_HW_CORRECTABLE; + } + else + { + return NVSWITCH_NVLINK_ARCH_ERROR_HW_UNCORRECTABLE; + } + } + + return NVSWITCH_NVLINK_ARCH_ERROR_GENERIC; +} + +void +nvswitch_translate_error +( + NVSWITCH_ERROR_TYPE *error_entry, + NVSWITCH_NVLINK_ARCH_ERROR *arch_error, + NVSWITCH_NVLINK_HW_ERROR *hw_error +) +{ + NVSWITCH_ASSERT(error_entry != NULL); + + if (arch_error) + { + *arch_error = NVSWITCH_NVLINK_ARCH_ERROR_NONE; + } + + if (hw_error) + { + *hw_error = NVSWITCH_NVLINK_HW_ERROR_NONE; + } + + if (error_entry->error_src == NVSWITCH_ERROR_SRC_HW) + { + if (arch_error) + { + *arch_error = _nvswitch_translate_arch_error(error_entry); + } + + if (hw_error) + { + *hw_error = nvswitch_translate_hw_error(error_entry->error_type); + } + } + else + { + NVSWITCH_PRINT(NULL, ERROR, + "%s: Undefined error source\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + } +} + +NvlStatus +nvswitch_ctrl_get_errors +( + nvswitch_device *device, + NVSWITCH_GET_ERRORS_PARAMS *p +) +{ + NvU32 index = 0; + NvU32 count = 0; + NVSWITCH_ERROR_LOG_TYPE *error_log; + NVSWITCH_ERROR_TYPE error; + + switch (p->errorType) + { + case NVSWITCH_ERROR_SEVERITY_FATAL: + error_log = &device->log_FATAL_ERRORS; + break; + case NVSWITCH_ERROR_SEVERITY_NONFATAL: + error_log = &device->log_NONFATAL_ERRORS; + break; + default: + return -NVL_BAD_ARGS; + } + + nvswitch_os_memset(p->error, 0, sizeof(NVSWITCH_ERROR) * + NVSWITCH_ERROR_COUNT_SIZE); + p->nextErrorIndex = NVSWITCH_ERROR_NEXT_LOCAL_NUMBER(error_log); + p->errorCount = 0; + + // If there is nothing to do, return. + nvswitch_get_error(device, error_log, &error, index, &count); + if (count == 0) + { + return NVL_SUCCESS; + } + + // + // If the error's local_error_num is smaller than the errorIndex + // passed in by the client, fast-forward index by the difference. + // This will skip over errors that were previously read by the client. + // + if (error.local_error_num < p->errorIndex) + { + index = (NvU32) (p->errorIndex - error.local_error_num); + } + + // If there is nothing to do after fast-forwarding, return. + if (index >= count) + { + return NVL_SUCCESS; + } + + while ((p->errorCount < NVSWITCH_ERROR_COUNT_SIZE) && (index < count)) + { + // Get the next error to consider from the log + nvswitch_get_error(device, error_log, &error, index, NULL); + + p->error[p->errorCount].error_value = error.error_type; + p->error[p->errorCount].error_src = error.error_src; + p->error[p->errorCount].instance = error.instance; + p->error[p->errorCount].subinstance = error.subinstance; + p->error[p->errorCount].time = error.time; + p->error[p->errorCount].error_resolved = error.error_resolved; + p->errorCount++; + index++; + } + + p->errorIndex = error.local_error_num + 1; + + return NVL_SUCCESS; +} diff --git a/src/common/nvswitch/kernel/flcn/flcn_call_hal_nvswitch.c b/src/common/nvswitch/kernel/flcn/flcn_call_hal_nvswitch.c new file mode 100644 index 000000000..f4f77f61e --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/flcn_call_hal_nvswitch.c @@ -0,0 +1,714 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "flcn/haldefs_flcn_nvswitch.h" +#include "flcn/flcn_nvswitch.h" + +#include "flcnifcmn.h" + +typedef union RM_FLCN_CMD RM_FLCN_CMD, *PRM_FLCN_CMD; +typedef union RM_FLCN_MSG RM_FLCN_MSG, *PRM_FLCN_MSG; + +// OBJECT Interfaces +NV_STATUS +flcnQueueReadData +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 queueId, + void *pData, + NvBool bMsg +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueReadData != (void *)0); + return pFlcn->pHal->queueReadData(device, pFlcn, queueId, pData, bMsg); +} + +NV_STATUS +flcnQueueCmdWrite +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 queueId, + RM_FLCN_CMD *pCmd, + NVSWITCH_TIMEOUT *pTimeout +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueCmdWrite != (void *)0); + return pFlcn->pHal->queueCmdWrite(device, pFlcn, queueId, pCmd, pTimeout); +} + +NV_STATUS +flcnQueueCmdCancel +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 seqDesc +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueCmdCancel != (void *)0); + return pFlcn->pHal->queueCmdCancel(device, pFlcn, seqDesc); +} + +NV_STATUS +flcnQueueCmdPostNonBlocking +( + nvswitch_device *device, + PFLCN pFlcn, + PRM_FLCN_CMD pCmd, + PRM_FLCN_MSG pMsg, + void *pPayload, + NvU32 queueIdLogical, + FlcnQMgrClientCallback pCallback, + void *pCallbackParams, + NvU32 *pSeqDesc, + NVSWITCH_TIMEOUT *pTimeout +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueCmdPostNonBlocking != (void *)0); + return pFlcn->pHal->queueCmdPostNonBlocking(device, pFlcn, pCmd, pMsg, pPayload, queueIdLogical, pCallback, pCallbackParams, pSeqDesc, pTimeout); +} + +NV_STATUS +flcnQueueCmdPostBlocking +( + nvswitch_device *device, + PFLCN pFlcn, + PRM_FLCN_CMD pCmd, + PRM_FLCN_MSG pMsg, + void *pPayload, + NvU32 queueIdLogical, + NvU32 *pSeqDesc, + NVSWITCH_TIMEOUT *pTimeout +) +{ + NV_STATUS status; + + status = flcnQueueCmdPostNonBlocking(device, pFlcn, pCmd, pMsg, pPayload, + queueIdLogical, NULL, NULL, pSeqDesc, pTimeout); + if (status != NV_OK) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_COMMAND_QUEUE, + "Failed to post command to SOE\n"); + return status; + } + + status = flcnQueueCmdWait(device, pFlcn, *pSeqDesc, pTimeout); + if (status == NV_ERR_TIMEOUT) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_TIMEOUT, + "Timed out while waiting for SOE command completion\n"); + flcnQueueCmdCancel(device, pFlcn, *pSeqDesc); + } + + return status; +} + +NV_STATUS +flcnQueueCmdWait +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 seqDesc, + NVSWITCH_TIMEOUT *pTimeout +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueCmdWait != (void *)0); + + return pFlcn->pHal->queueCmdWait(device, pFlcn, seqDesc, pTimeout); +} + +NvU8 +flcnCoreRevisionGet +( + struct nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->coreRevisionGet != (void *)0); + return pFlcn->pHal->coreRevisionGet(device, pFlcn); +} + +void +flcnMarkNotReady +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->markNotReady != (void *)0); + pFlcn->pHal->markNotReady(device, pFlcn); +} + +NV_STATUS +flcnCmdQueueHeadGet +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 *pHead +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->cmdQueueHeadGet != (void *)0); + return pFlcn->pHal->cmdQueueHeadGet(device, pFlcn, pQueue, pHead); +} + +NV_STATUS +flcnMsgQueueHeadGet +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 *pHead +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->msgQueueHeadGet != (void *)0); + return pFlcn->pHal->msgQueueHeadGet(device, pFlcn, pQueue, pHead); +} + +NV_STATUS +flcnCmdQueueTailGet +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 *pTail +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->cmdQueueTailGet != (void *)0); + return pFlcn->pHal->cmdQueueTailGet(device, pFlcn, pQueue, pTail); +} + +NV_STATUS +flcnMsgQueueTailGet +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 *pTail +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->msgQueueTailGet != (void *)0); + return pFlcn->pHal->msgQueueTailGet(device, pFlcn, pQueue, pTail); +} + +NV_STATUS +flcnCmdQueueHeadSet +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 head +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->cmdQueueHeadSet != (void *)0); + return pFlcn->pHal->cmdQueueHeadSet(device, pFlcn, pQueue, head); +} + +NV_STATUS +flcnMsgQueueHeadSet +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 head +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->msgQueueHeadSet != (void *)0); + return pFlcn->pHal->msgQueueHeadSet(device, pFlcn, pQueue, head); +} + +NV_STATUS +flcnCmdQueueTailSet +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 tail +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->cmdQueueTailSet != (void *)0); + return pFlcn->pHal->cmdQueueTailSet(device, pFlcn, pQueue, tail); +} + +NV_STATUS +flcnMsgQueueTailSet +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 tail +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->msgQueueTailSet != (void *)0); + return pFlcn->pHal->msgQueueTailSet(device, pFlcn, pQueue, tail); +} + +PFLCN_QMGR_SEQ_INFO +flcnQueueSeqInfoFind +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 seqDesc +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoFind != (void *)0); + return pFlcn->pHal->queueSeqInfoFind(device, pFlcn, seqDesc); +} + +PFLCN_QMGR_SEQ_INFO +flcnQueueSeqInfoAcq +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoAcq != (void *)0); + return pFlcn->pHal->queueSeqInfoAcq(device, pFlcn); +} + +void +flcnQueueSeqInfoRel +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCN_QMGR_SEQ_INFO pSeqInfo +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoRel != (void *)0); + pFlcn->pHal->queueSeqInfoRel(device, pFlcn, pSeqInfo); +} + +void +flcnQueueSeqInfoStateInit +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoStateInit != (void *)0); + pFlcn->pHal->queueSeqInfoStateInit(device, pFlcn); +} + +void +flcnQueueSeqInfoCancelAll +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoCancelAll != (void *)0); + pFlcn->pHal->queueSeqInfoCancelAll(device, pFlcn); +} + +NV_STATUS +flcnQueueSeqInfoFree +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCN_QMGR_SEQ_INFO pSeqInfo +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueSeqInfoFree != (void *)0); + return pFlcn->pHal->queueSeqInfoFree(device, pFlcn, pSeqInfo); +} + +NV_STATUS +flcnQueueEventRegister +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 unitId, + NvU8 *pMsg, + FlcnQMgrClientCallback pCallback, + void *pParams, + NvU32 *pEvtDesc +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueEventRegister != (void *)0); + return pFlcn->pHal->queueEventRegister(device, pFlcn, unitId, pMsg, pCallback, pParams, pEvtDesc); +} + +NV_STATUS +flcnQueueEventUnregister +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 evtDesc +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueEventUnregister != (void *)0); + return pFlcn->pHal->queueEventUnregister(device, pFlcn, evtDesc); +} + +NV_STATUS +flcnQueueEventHandle +( + nvswitch_device *device, + PFLCN pFlcn, + RM_FLCN_MSG *pMsg, + NV_STATUS evtStatus +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueEventHandle != (void *)0); + return pFlcn->pHal->queueEventHandle(device, pFlcn, pMsg, evtStatus); +} + +NV_STATUS +flcnQueueResponseHandle +( + nvswitch_device *device, + PFLCN pFlcn, + RM_FLCN_MSG *pMsg +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueResponseHandle != (void *)0); + return pFlcn->pHal->queueResponseHandle(device, pFlcn, pMsg); +} + +NvU32 +flcnQueueCmdStatus +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 seqDesc +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->queueCmdStatus != (void *)0); + return pFlcn->pHal->queueCmdStatus(device, pFlcn, seqDesc); +} + +NV_STATUS +flcnDmemCopyFrom +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 src, + NvU8 *pDst, + NvU32 sizeBytes, + NvU8 port +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->dmemCopyFrom != (void *)0); + return pFlcn->pHal->dmemCopyFrom(device, pFlcn, src, pDst, sizeBytes, port); +} + +NV_STATUS +flcnDmemCopyTo +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 dst, + NvU8 *pSrc, + NvU32 sizeBytes, + NvU8 port +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->dmemCopyTo != (void *)0); + return pFlcn->pHal->dmemCopyTo(device, pFlcn, dst, pSrc, sizeBytes, port); +} + +void +flcnPostDiscoveryInit +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->postDiscoveryInit != (void *)0); + pFlcn->pHal->postDiscoveryInit(device, pFlcn); +} + +void +flcnDbgInfoDmemOffsetSet +( + nvswitch_device *device, + PFLCN pFlcn, + NvU16 debugInfoDmemOffset +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->dbgInfoDmemOffsetSet != (void *)0); + pFlcn->pHal->dbgInfoDmemOffsetSet(device, pFlcn, debugInfoDmemOffset); +} + + + +// HAL Interfaces +NV_STATUS +flcnConstruct_HAL +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->construct != (void *)0); + return pFlcn->pHal->construct(device, pFlcn); +} + +void +flcnDestruct_HAL +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->destruct != (void *)0); + pFlcn->pHal->destruct(device, pFlcn); +} + +NvU32 +flcnRegRead_HAL +( + struct nvswitch_device *device, + PFLCN pFlcn, + NvU32 offset +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->regRead != (void *)0); + return pFlcn->pHal->regRead(device, pFlcn, offset); +} + +void +flcnRegWrite_HAL +( + struct nvswitch_device *device, + PFLCN pFlcn, + NvU32 offset, + NvU32 data +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->regWrite != (void *)0); + pFlcn->pHal->regWrite(device, pFlcn, offset, data); +} + +const char * +flcnGetName_HAL +( + struct nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->getName != (void *)0); + return pFlcn->pHal->getName(device, pFlcn); +} + +NvU8 +flcnReadCoreRev_HAL +( + struct nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->readCoreRev != (void *)0); + return pFlcn->pHal->readCoreRev(device, pFlcn); +} + +void +flcnGetCoreInfo_HAL +( + struct nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->getCoreInfo != (void *)0); + pFlcn->pHal->getCoreInfo(device, pFlcn); +} + +NV_STATUS +flcnDmemTransfer_HAL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 src, + NvU8 *pDst, + NvU32 sizeBytes, + NvU8 port, + NvBool bCopyFrom +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->dmemTransfer != (void *)0); + return pFlcn->pHal->dmemTransfer(device, pFlcn, src, pDst, sizeBytes, port, bCopyFrom); +} + +void +flcnIntrRetrigger_HAL +( + nvswitch_device *device, + FLCN *pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->intrRetrigger != (void *)0); + pFlcn->pHal->intrRetrigger(device, pFlcn); +} + +NvBool +flcnAreEngDescsInitialized_HAL +( + nvswitch_device *device, + FLCN *pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->areEngDescsInitialized != (void *)0); + return pFlcn->pHal->areEngDescsInitialized(device, pFlcn); +} + +NV_STATUS +flcnWaitForResetToFinish_HAL +( + nvswitch_device *device, + FLCN *pFlcn +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->waitForResetToFinish != (void *)0); + return pFlcn->pHal->waitForResetToFinish(device, pFlcn); +} + +void +flcnDbgInfoCapturePcTrace_HAL +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + if (pFlcn->pHal->dbgInfoCapturePcTrace == (void *)0) + { + NVSWITCH_ASSERT(0); + return; + } + + pFlcn->pHal->dbgInfoCapturePcTrace(device, pFlcn); +} + +void +flcnDbgInfoCaptureRiscvPcTrace_HAL +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + if (pFlcn->pHal->dbgInfoCaptureRiscvPcTrace == (void *)0) + { + NVSWITCH_ASSERT(0); + return; + } + + pFlcn->pHal->dbgInfoCaptureRiscvPcTrace(device, pFlcn); +} + + +NvU32 +flcnDmemSize_HAL +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + if (pFlcn->pHal->dmemSize == (void *)0) + { + NVSWITCH_ASSERT(0); + return 0; + } + + return pFlcn->pHal->dmemSize(device, pFlcn); +} + +NvU32 +flcnSetImemAddr_HAL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 dst +) +{ + if (pFlcn->pHal->setImemAddr == (void *)0) + { + NVSWITCH_ASSERT(0); + return 0; + } + + return pFlcn->pHal->setImemAddr(device, pFlcn, dst); +} + +void +flcnImemCopyTo_HAL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 dst, + NvU8 *pSrc, + NvU32 sizeBytes, + NvBool bSecure, + NvU32 tag, + NvU8 port +) +{ + if (pFlcn->pHal->imemCopyTo == (void *)0) + { + NVSWITCH_ASSERT(0); + return; + } + + pFlcn->pHal->imemCopyTo(device, pFlcn, dst, pSrc, sizeBytes, bSecure, tag, port); +} + +NvU32 +flcnSetDmemAddr_HAL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 dst +) +{ + if (pFlcn->pHal->setDmemAddr == (void *)0) + { + NVSWITCH_ASSERT(0); + return 0; + } + + return pFlcn->pHal->setDmemAddr(device, pFlcn, dst); +} + +NvU32 +flcnRiscvRegRead_HAL +( + struct nvswitch_device *device, + PFLCN pFlcn, + NvU32 offset +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->riscvRegRead != (void *)0); + return pFlcn->pHal->riscvRegRead(device, pFlcn, offset); +} + +void +flcnRiscvRegWrite_HAL +( + struct nvswitch_device *device, + PFLCN pFlcn, + NvU32 offset, + NvU32 data +) +{ + NVSWITCH_ASSERT(pFlcn->pHal->riscvRegWrite != (void *)0); + pFlcn->pHal->riscvRegWrite(device, pFlcn, offset, data); +} diff --git a/src/common/nvswitch/kernel/flcn/flcn_nvswitch.c b/src/common/nvswitch/kernel/flcn/flcn_nvswitch.c new file mode 100644 index 000000000..5a6a1abd6 --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/flcn_nvswitch.c @@ -0,0 +1,542 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" + +#include "flcn/flcn_nvswitch.h" +#include "flcn/flcnable_nvswitch.h" + +static void flcnSetupIpHal(nvswitch_device *device, PFLCN pFlcn); + +/*! + * @brief Get the falcon core revision and subversion. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * + * @return the falcon core revision in the format of NV_FLCN_CORE_REV_X_Y. + */ +static NvU8 +_flcnCoreRevisionGet_IMPL +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + if (pFlcn->coreRev == 0x00) + { + // Falcon core revision has not yet been set. Set it now. + flcnGetCoreInfo_HAL(device, pFlcn); + } + + return pFlcn->coreRev; +} + +/*! + * @brief Mark the falcon as not ready and inaccessible from RM. + * osHandleGpuSurpriseRemoval will use this routine to prevent access to the + * Falcon, which could crash due to absense of GPU, during driver cleanup. + * + * @param[in] device nvswitch_device pointer + * @param[in] pFlcn FLCN pointer + * + * @returns nothing + */ +static void +_flcnMarkNotReady_IMPL +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + pFlcn->bOSReady = NV_FALSE; +} + +/*! + * Retrieves the current head pointer for given physical command queue index. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue Pointer to the queue + * @param[out] pHead Pointer to write with the queue's head pointer + * + * @return 'NV_OK' if head value was successfully retrieved. + */ +static NV_STATUS +_flcnCmdQueueHeadGet_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 *pHead +) +{ + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->cmdQHeadSize); + NVSWITCH_ASSERT(pHead != NULL); + + *pHead = flcnRegRead_HAL(device, pFlcn, + (pQueueInfo->cmdQHeadBaseAddress + + (pQueue->queuePhyId * pQueueInfo->cmdQHeadStride))); + return NV_OK; +} + +/*! + * Sets the head pointer for the given physical command queue index. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue Pointer to the queue + * @param[in] head The desired head value for the queue + * + * @return 'NV_OK' if the head value was successfully set. + */ +static NV_STATUS +_flcnCmdQueueHeadSet_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 head +) +{ + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->cmdQHeadSize); + + flcnRegWrite_HAL(device, pFlcn, + (pQueueInfo->cmdQHeadBaseAddress + + (pQueue->queuePhyId * pQueueInfo->cmdQHeadStride)), + head); + return NV_OK; +} + +/*! + * Retrieves the current tail pointer for given physical command queue index. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue Pointer to the queue + * @param[out] pTail Pointer to write with the queue's tail value + * + * @return 'NV_OK' if the tail value was successfully retrieved. + */ +static NV_STATUS +_flcnCmdQueueTailGet_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 *pTail +) +{ + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->cmdQTailSize); + NVSWITCH_ASSERT(pTail != NULL); + + *pTail = flcnRegRead_HAL(device, pFlcn, + (pQueueInfo->cmdQTailBaseAddress + + (pQueue->queuePhyId * pQueueInfo->cmdQTailStride))); + return NV_OK; +} + +/*! + * Set the Command Queue tail pointer. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue Pointer to the queue + * @param[in] tail The desired tail value + * + * @return 'NV_OK' if the tail value was successfully set. + */ +static NV_STATUS +_flcnCmdQueueTailSet_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 tail +) +{ + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->cmdQTailSize); + + flcnRegWrite_HAL(device, pFlcn, + (pQueueInfo->cmdQTailBaseAddress + + (pQueue->queuePhyId * pQueueInfo->cmdQTailStride)), + tail); + return NV_OK; +} + +/*! + * Retrieve the current Message Queue Head pointer. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue Pointer to the queue + * @param[in] pHead Pointer to write with the queue's head value + * + * @return 'NV_OK' if the queue's head value was successfully retrieved. + */ +static NV_STATUS +_flcnMsgQueueHeadGet_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 *pHead +) +{ + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->msgQHeadSize); + NVSWITCH_ASSERT(pHead != NULL); + + *pHead = flcnRegRead_HAL(device, pFlcn, + (pQueueInfo->msgQHeadBaseAddress + + (pQueue->queuePhyId * pQueueInfo->msgQHeadStride))); + return NV_OK; +} + +/*! + * Set the Message Queue Head pointer. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue Pointer to the queue + * @param[in] head The desired head value + * + * @return 'NV_OK' if the head value was successfully set. + */ +static NV_STATUS +_flcnMsgQueueHeadSet_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 head +) +{ + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->msgQHeadSize); + + flcnRegWrite_HAL(device, pFlcn, + (pQueueInfo->msgQHeadBaseAddress + + (pQueue->queuePhyId * pQueueInfo->msgQHeadStride)), + head); + return NV_OK; +} + +/*! + * Retrieve the current Message Queue Tail pointer. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue Pointer to the queue + * @param[out] pTail Pointer to write with the message queue's tail value + * + * @return 'NV_OK' if the tail value was successfully retrieved. + */ +static NV_STATUS +_flcnMsgQueueTailGet_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 *pTail +) +{ + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->msgQTailSize); + NVSWITCH_ASSERT(pTail != NULL); + + *pTail = flcnRegRead_HAL(device, pFlcn, + (pQueueInfo->msgQTailBaseAddress + + (pQueue->queuePhyId * pQueueInfo->msgQTailStride))); + return NV_OK; +} + +/*! + * Set the Message Queue Tail pointer. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue Pointer to the queue + * @param[in] tail The desired tail value for the message queue + * + * @return 'NV_OK' if the tail value was successfully set. + */ +static NV_STATUS +_flcnMsgQueueTailSet_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + NvU32 tail +) +{ + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueue->queuePhyId < pQueueInfo->msgQTailSize); + + flcnRegWrite_HAL(device, pFlcn, + (pQueueInfo->msgQTailBaseAddress + + (pQueue->queuePhyId * pQueueInfo->msgQTailStride)), + tail); + return NV_OK; +} + +/*! + * Copies 'sizeBytes' from DMEM offset 'src' to 'pDst' using DMEM access + * port 'port'. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN pointer + * @param[in] src The DMEM offset for the source of the copy + * @param[out] pDst Pointer to write with copied data from DMEM + * @param[in] sizeBytes The number of bytes to copy from DMEM + * @param[in] port The DMEM port index to use when accessing the DMEM + */ +static NV_STATUS +_flcnDmemCopyFrom_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 src, + NvU8 *pDst, + NvU32 sizeBytes, + NvU8 port +) +{ + return flcnDmemTransfer_HAL(device, pFlcn, + src, pDst, sizeBytes, port, + NV_TRUE); // bCopyFrom +} + +/*! + * Copies 'sizeBytes' from 'pDst' to DMEM offset 'dst' using DMEM access port + * 'port'. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN pointer + * @param[in] dst The destination DMEM offset for the copy + * @param[in] pSrc The pointer to the buffer containing the data to copy + * @param[in] sizeBytes The number of bytes to copy into DMEM + * @param[in] port The DMEM port index to use when accessing the DMEM + */ +static NV_STATUS +_flcnDmemCopyTo_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 dst, + NvU8 *pSrc, + NvU32 sizeBytes, + NvU8 port +) +{ + return flcnDmemTransfer_HAL(device, pFlcn, + dst, pSrc, sizeBytes, port, + NV_FALSE); // bCopyFrom +} + +static void +_flcnPostDiscoveryInit_IMPL +( + nvswitch_device *device, + FLCN *pFlcn +) +{ + flcnableFetchEngines_HAL(device, pFlcn->pFlcnable, &pFlcn->engDescUc, &pFlcn->engDescBc); + + flcnSetupIpHal(device, pFlcn); +} + +/* -------------------- Object construction/initialization ------------------- */ + +/** + * @brief set hal object-interface function pointers to flcn implementations + * + * this function has to be at the end of the file so that all the + * other functions are already defined. + * + * @param[in] pFlcn The flcn for which to set hals + */ +static void +flcnSetupHal +( + PFLCN pFlcn, + NvU32 pci_device_id +) +{ + flcn_hal *pHal = NULL; + if (nvswitch_is_lr10_device_id(pci_device_id)) + { + flcnSetupHal_LR10(pFlcn); + goto _flcnSetupHal_success; + } + + NVSWITCH_PRINT(NULL, ERROR, + "Flcn hal can't be setup due to unknown device id\n"); + NVSWITCH_ASSERT(0); + +_flcnSetupHal_success: + //init hal OBJ Interfaces + pHal = pFlcn->pHal; + + pHal->coreRevisionGet = _flcnCoreRevisionGet_IMPL; + pHal->markNotReady = _flcnMarkNotReady_IMPL; + pHal->cmdQueueHeadGet = _flcnCmdQueueHeadGet_IMPL; + pHal->msgQueueHeadGet = _flcnMsgQueueHeadGet_IMPL; + pHal->cmdQueueTailGet = _flcnCmdQueueTailGet_IMPL; + pHal->msgQueueTailGet = _flcnMsgQueueTailGet_IMPL; + pHal->cmdQueueHeadSet = _flcnCmdQueueHeadSet_IMPL; + pHal->msgQueueHeadSet = _flcnMsgQueueHeadSet_IMPL; + pHal->cmdQueueTailSet = _flcnCmdQueueTailSet_IMPL; + pHal->msgQueueTailSet = _flcnMsgQueueTailSet_IMPL; + + pHal->dmemCopyFrom = _flcnDmemCopyFrom_IMPL; + pHal->dmemCopyTo = _flcnDmemCopyTo_IMPL; + pHal->postDiscoveryInit = _flcnPostDiscoveryInit_IMPL; + + flcnQueueSetupHal(pFlcn); + flcnRtosSetupHal(pFlcn); + flcnQueueRdSetupHal(pFlcn); +} + +static void +flcnSetupIpHal +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NvU8 coreRev = flcnableReadCoreRev(device, pFlcn->pFlcnable); + + switch (coreRev) { + case NV_FLCN_CORE_REV_3_0: + { + flcnSetupHal_v03_00(pFlcn); + break; + } + case NV_FLCN_CORE_REV_4_0: + case NV_FLCN_CORE_REV_4_1: + { + flcnSetupHal_v04_00(pFlcn); + break; + } + case NV_FLCN_CORE_REV_5_0: + case NV_FLCN_CORE_REV_5_1: + { + flcnSetupHal_v05_01(pFlcn); + break; + } + case NV_FLCN_CORE_REV_6_0: + { + flcnSetupHal_v06_00(pFlcn); + break; + } + default: + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unsupported falcon core revision: %hhu!\n", + __FUNCTION__, coreRev); + NVSWITCH_ASSERT(0); + break; + } + } +} + +FLCN * +flcnAllocNew(void) +{ + FLCN *pFlcn = nvswitch_os_malloc(sizeof(*pFlcn)); + if (pFlcn != NULL) + { + nvswitch_os_memset(pFlcn, 0, sizeof(*pFlcn)); + } + + return pFlcn; +} + +NvlStatus +flcnInit +( + nvswitch_device *device, + FLCN *pFlcn, + NvU32 pci_device_id +) +{ + NvlStatus retval = NVL_SUCCESS; + + // allocate hal if a child class hasn't already + if (pFlcn->pHal == NULL) + { + flcn_hal *pHal = pFlcn->pHal = nvswitch_os_malloc(sizeof(*pHal)); + if (pHal == NULL) + { + NVSWITCH_PRINT(device, ERROR, "Flcn allocation failed!\n"); + retval = -NVL_NO_MEM; + goto flcn_init_fail; + } + nvswitch_os_memset(pHal, 0, sizeof(*pHal)); + } + + //don't have a parent class to init, go straight to setupHal + flcnSetupHal(pFlcn, pci_device_id); + + return retval; + +flcn_init_fail: + flcnDestroy(device, pFlcn); + return retval; +} + +// reverse of flcnInit() +void +flcnDestroy +( + nvswitch_device *device, + FLCN *pFlcn +) +{ + if (pFlcn->pHal != NULL) + { + nvswitch_os_free(pFlcn->pHal); + pFlcn->pHal = NULL; + } +} diff --git a/src/common/nvswitch/kernel/flcn/flcnable_call_hal_nvswitch.c b/src/common/nvswitch/kernel/flcn/flcnable_call_hal_nvswitch.c new file mode 100644 index 000000000..0a73334f3 --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/flcnable_call_hal_nvswitch.c @@ -0,0 +1,219 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "flcn/haldefs_flcnable_nvswitch.h" +#include "flcn/flcnable_nvswitch.h" + +#include "flcnifcmn.h" + +#include "export_nvswitch.h" +#include "common_nvswitch.h" + +typedef struct FALCON_EXTERNAL_CONFIG FALCON_EXTERNAL_CONFIG, *PFALCON_EXTERNAL_CONFIG; +typedef struct FLCN_QMGR_SEQ_INFO FLCN_QMGR_SEQ_INFO, *PFLCN_QMGR_SEQ_INFO; +typedef union RM_FLCN_CMD RM_FLCN_CMD, *PRM_FLCN_CMD; +typedef union RM_FLCN_MSG RM_FLCN_MSG, *PRM_FLCN_MSG; +typedef struct ENGINE_DESCRIPTOR_TYPE ENGINE_DESCRIPTOR_TYPE, *PENGINE_DESCRIPTOR_TYPE; + + +// OBJECT Interfaces +NvU8 +flcnableReadCoreRev +( + nvswitch_device *device, + PFLCNABLE pFlcnable +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->readCoreRev != (void *)0); + return pFlcnable->pHal->readCoreRev(device, pFlcnable); +} + +void +flcnableGetExternalConfig +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + PFALCON_EXTERNAL_CONFIG pConfig +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->getExternalConfig != (void *)0); + pFlcnable->pHal->getExternalConfig(device, pFlcnable, pConfig); +} + +void +flcnableEmemCopyFrom +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + NvU32 src, + NvU8 *pDst, + NvU32 sizeBytes, + NvU8 port +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->ememCopyFrom != (void *)0); + pFlcnable->pHal->ememCopyFrom(device, pFlcnable, src, pDst, sizeBytes, port); +} + +void +flcnableEmemCopyTo +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + NvU32 dst, + NvU8 *pSrc, + NvU32 sizeBytes, + NvU8 port +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->ememCopyTo != (void *)0); + pFlcnable->pHal->ememCopyTo(device, pFlcnable, dst, pSrc, sizeBytes, port); +} + +NV_STATUS +flcnableHandleInitEvent +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + RM_FLCN_MSG *pGenMsg +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->handleInitEvent != (void *)0); + return pFlcnable->pHal->handleInitEvent(device, pFlcnable, pGenMsg); +} + +PFLCN_QMGR_SEQ_INFO +flcnableQueueSeqInfoGet +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + NvU32 seqIndex +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->queueSeqInfoGet != (void *)0); + return pFlcnable->pHal->queueSeqInfoGet(device, pFlcnable, seqIndex); +} + +void +flcnableQueueSeqInfoClear +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + PFLCN_QMGR_SEQ_INFO pSeqInfo +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->queueSeqInfoClear != (void *)0); + pFlcnable->pHal->queueSeqInfoClear(device, pFlcnable, pSeqInfo); +} + +void +flcnableQueueSeqInfoFree +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + PFLCN_QMGR_SEQ_INFO pSeqInfo +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->queueSeqInfoFree != (void *)0); + pFlcnable->pHal->queueSeqInfoFree(device, pFlcnable, pSeqInfo); +} + +NvBool +flcnableQueueCmdValidate +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + RM_FLCN_CMD *pCmd, + RM_FLCN_MSG *pMsg, + void *pPayload, + NvU32 queueIdLogical +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->queueCmdValidate != (void *)0); + return pFlcnable->pHal->queueCmdValidate(device, pFlcnable, pCmd, pMsg, pPayload, queueIdLogical); +} + +NV_STATUS +flcnableQueueCmdPostExtension +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + RM_FLCN_CMD *pCmd, + RM_FLCN_MSG *pMsg, + void *pPayload, + NVSWITCH_TIMEOUT *pTimeout, + PFLCN_QMGR_SEQ_INFO pSeqInfo +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->queueCmdPostExtension != (void *)0); + return pFlcnable->pHal->queueCmdPostExtension(device, pFlcnable, pCmd, pMsg, pPayload, pTimeout, pSeqInfo); +} + +void +flcnablePostDiscoveryInit +( + nvswitch_device *device, + FLCNABLE *pFlcnable +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->postDiscoveryInit != (void *)0); + pFlcnable->pHal->postDiscoveryInit(device, pFlcnable); +} + + + +// HAL Interfaces +NV_STATUS +flcnableConstruct_HAL +( + nvswitch_device *device, + FLCNABLE *pFlcnable +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->construct != (void *)0); + return pFlcnable->pHal->construct(device, pFlcnable); +} + +void +flcnableDestruct_HAL +( + nvswitch_device *device, + FLCNABLE *pFlcnable +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->destruct != (void *)0); + pFlcnable->pHal->destruct(device, pFlcnable); +} + +void +flcnableFetchEngines_HAL +( + nvswitch_device *device, + FLCNABLE *pFlcnable, + ENGINE_DESCRIPTOR_TYPE *pEngDescUc, + ENGINE_DESCRIPTOR_TYPE *pEngDescBc +) +{ + NVSWITCH_ASSERT(pFlcnable->pHal->fetchEngines != (void *)0); + pFlcnable->pHal->fetchEngines(device, pFlcnable, pEngDescUc, pEngDescBc); +} diff --git a/src/common/nvswitch/kernel/flcn/flcnable_nvswitch.c b/src/common/nvswitch/kernel/flcn/flcnable_nvswitch.c new file mode 100644 index 000000000..699c0587e --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/flcnable_nvswitch.c @@ -0,0 +1,359 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "flcn/flcnable_nvswitch.h" +#include "flcn/flcn_nvswitch.h" +#include "rmflcncmdif_nvswitch.h" + +#include "common_nvswitch.h" +#include "nvstatus.h" + +/*! + * @brief Read the falcon core revision and subversion. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcnable FLCNABLE object pointer + * + * @return @ref NV_FLCN_CORE_REV_X_Y. + */ +static NvU8 +_flcnableReadCoreRev_IMPL +( + nvswitch_device *device, + PFLCNABLE pFlcnable +) +{ + return flcnReadCoreRev_HAL(device, pFlcnable->pFlcn); +} + +/*! + * @brief Get external config + */ +static void +_flcnableGetExternalConfig_IMPL +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + PFALCON_EXTERNAL_CONFIG pConfig +) +{ + pConfig->bResetInPmc = NV_FALSE; + pConfig->blkcgBase = 0xffffffff; + pConfig->fbifBase = 0xffffffff; +} + +/*! + * @brief Retrieve content from falcon's EMEM. + */ +static void +_flcnableEmemCopyFrom_IMPL +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + NvU32 src, + NvU8 *pDst, + NvU32 sizeBytes, + NvU8 port +) +{ + NVSWITCH_PRINT(device, ERROR, + "%s: FLCNABLE interface not implemented on this falcon!\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); +} + +/*! + * @brief Write content to falcon's EMEM. + */ +static void +_flcnableEmemCopyTo_IMPL +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + NvU32 dst, + NvU8 *pSrc, + NvU32 sizeBytes, + NvU8 port +) +{ + NVSWITCH_PRINT(device, ERROR, + "%s: FLCNABLE interface not implemented on this falcon!\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); +} + +/* + * @brief Handle INIT Event + */ +static NV_STATUS +_flcnableHandleInitEvent_IMPL +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + RM_FLCN_MSG *pGenMsg +) +{ + return NV_OK; +} + +/*! + * @brief Retrieves a pointer to the engine specific SEQ_INFO structure. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcnable FLCNABLE object pointer + * @param[in] seqIndex Index of the structure to retrieve + * + * @return Pointer to the SEQ_INFO structure or NULL on invalid index. + */ +static PFLCN_QMGR_SEQ_INFO +_flcnableQueueSeqInfoGet_IMPL +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + NvU32 seqIndex +) +{ + NVSWITCH_PRINT(device, ERROR, + "%s: FLCNABLE interface not implemented on this falcon!\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); + return NULL; +} + +/*! + * @brief Clear out the engine specific portion of the SEQ_INFO structure. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcnable FLCNABLE object pointer + * @param[in] pSeqInfo SEQ_INFO structure pointer + */ +static void +_flcnableQueueSeqInfoClear_IMPL +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + PFLCN_QMGR_SEQ_INFO pSeqInfo +) +{ +} + +/*! + * @brief Free up all the engine specific sequence allocations. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcnable FLCNABLE object pointer + * @param[in] pSeqInfo SEQ_INFO structure pointer + */ +static void +_flcnableQueueSeqInfoFree_IMPL +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + PFLCN_QMGR_SEQ_INFO pSeqInfo +) +{ +} + +/*! + * @brief Validate that the given CMD and related params are properly formed. + * + * @copydoc flcnQueueCmdPostNonBlocking_IMPL + * + * @return Boolean if command was properly formed. + */ +static NvBool +_flcnableQueueCmdValidate_IMPL +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + PRM_FLCN_CMD pCmd, + PRM_FLCN_MSG pMsg, + void *pPayload, + NvU32 queueIdLogical +) +{ + NVSWITCH_PRINT(device, ERROR, + "%s: FLCNABLE interface not implemented on this falcon!\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); + return NV_FALSE; +} + +/*! + * @brief Engine specific command post actions. + * + * @copydoc flcnQueueCmdPostNonBlocking_IMPL + * + * @return NV_OK on success + * Failure specific error codes + */ +static NV_STATUS +_flcnableQueueCmdPostExtension_IMPL +( + nvswitch_device *device, + PFLCNABLE pFlcnable, + PRM_FLCN_CMD pCmd, + PRM_FLCN_MSG pMsg, + void *pPayload, + NVSWITCH_TIMEOUT *pTimeout, + PFLCN_QMGR_SEQ_INFO pSeqInfo +) +{ + return NV_OK; +} + +static void +_flcnablePostDiscoveryInit_IMPL +( + nvswitch_device *device, + FLCNABLE *pSoe +) +{ + flcnPostDiscoveryInit(device, pSoe->pFlcn); +} + +/** + * @brief sets pEngDescUc and pEngDescBc to the discovered + * engine that matches this flcnable instance + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + * @param[out] pEngDescUc pointer to the UniCast Engine + * Descriptor Pointer + * @param[out] pEngDescBc pointer to the BroadCast Engine + * Descriptor Pointer + */ +static void +_flcnableFetchEngines_IMPL +( + nvswitch_device *device, + FLCNABLE *pSoe, + ENGINE_DESCRIPTOR_TYPE *pEngDescUc, + ENGINE_DESCRIPTOR_TYPE *pEngDescBc +) +{ + // Every falcon REALLY needs to implement this. If they don't flcnRegRead and flcnRegWrite won't work + NVSWITCH_PRINT(device, ERROR, + "%s: FLCNABLE interface not implemented on this falcon!\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); +} + + +/* -------------------- Object construction/initialization ------------------- */ +static void +flcnableSetupHal +( + FLCNABLE *pFlcnable, + NvU32 pci_device_id +) +{ + flcnable_hal *pHal = pFlcnable->pHal; + + //init hal Interfaces + pHal->readCoreRev = _flcnableReadCoreRev_IMPL; + pHal->getExternalConfig = _flcnableGetExternalConfig_IMPL; + pHal->ememCopyFrom = _flcnableEmemCopyFrom_IMPL; + pHal->ememCopyTo = _flcnableEmemCopyTo_IMPL; + pHal->handleInitEvent = _flcnableHandleInitEvent_IMPL; + pHal->queueSeqInfoGet = _flcnableQueueSeqInfoGet_IMPL; + pHal->queueSeqInfoClear = _flcnableQueueSeqInfoClear_IMPL; + pHal->queueSeqInfoFree = _flcnableQueueSeqInfoFree_IMPL; + pHal->queueCmdValidate = _flcnableQueueCmdValidate_IMPL; + pHal->queueCmdPostExtension = _flcnableQueueCmdPostExtension_IMPL; + pHal->postDiscoveryInit = _flcnablePostDiscoveryInit_IMPL; + pHal->fetchEngines = _flcnableFetchEngines_IMPL; +} + +NvlStatus +flcnableInit +( + nvswitch_device *device, + FLCNABLE *pFlcnable, + NvU32 pci_device_id +) +{ + NvlStatus retval; + FLCN *pFlcn = NULL; + + // allocate hal if a child class hasn't already + if (pFlcnable->pHal == NULL) + { + flcnable_hal *pHal = pFlcnable->pHal = nvswitch_os_malloc(sizeof(*pHal)); + if (pHal == NULL) + { + NVSWITCH_PRINT(device, ERROR, "Flcn allocation failed!\n"); + retval = -NVL_NO_MEM; + goto flcnable_init_fail; + } + nvswitch_os_memset(pHal, 0, sizeof(*pHal)); + } + + // init flcn - a little out of place here, since we're really only + // supposed to be initializing hals. However, we need pci_device_id + // to initialize flcn's hals and flcn is _very_ closely tied to + // flcnable so it kind of makes some sense to allocate it here + pFlcn = pFlcnable->pFlcn = flcnAllocNew(); + if (pFlcn == NULL) + { + NVSWITCH_PRINT(device, ERROR, "Flcn allocation failed!\n"); + retval = -NVL_NO_MEM; + goto flcnable_init_fail; + } + retval = flcnInit(device, pFlcn, pci_device_id); + if (retval != NVL_SUCCESS) + { + goto flcnable_init_fail; + } + + //don't have a parent class to init, go straight to setupHal + flcnableSetupHal(pFlcnable, pci_device_id); + + return retval; + +flcnable_init_fail: + flcnableDestroy(device, pFlcnable); + return retval; +} + +// reverse of flcnableInit() +void +flcnableDestroy +( + nvswitch_device *device, + FLCNABLE *pFlcnable +) +{ + if (pFlcnable->pFlcn != NULL) + { + flcnDestroy(device, pFlcnable->pFlcn); + nvswitch_os_free(pFlcnable->pFlcn); + pFlcnable->pFlcn = NULL; + } + + if (pFlcnable->pHal != NULL) + { + nvswitch_os_free(pFlcnable->pHal); + pFlcnable->pHal = NULL; + } +} diff --git a/src/common/nvswitch/kernel/flcn/flcndmem_nvswitch.c b/src/common/nvswitch/kernel/flcn/flcndmem_nvswitch.c new file mode 100644 index 000000000..219a514a3 --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/flcndmem_nvswitch.c @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file flcndmem_nvswitch.c + * @brief FLCN Data-Memory Manager + * + * This module is intended to serve as the primary interface between all upper- + * level Falcon-object layers and the HAL-layer. It provides APIs for accessing + * the Falcon DMEM (read and write) as well as managing all allocations in the + * RM-managed region of the Falcon DMEM. + * + * DMEM allocations are satisfied out of a carved-out portion of the Falcon + * DMEM. The location of this region is determined when the Falcon image is + * built and is communicated to the RM from the Falcon via the INIT message + * that the Falcon sends upon initialization. Therefore, allocations cannot be + * satisfied until this message arrives (occurs immediately after STATE_LOAD). + */ + +/* ------------------------ Includes --------------------------------------- */ +#include "flcn/flcn_nvswitch.h" +#include "common_nvswitch.h" + +/* ------------------------ Static Function Prototypes --------------------- */ + +/* ------------------------ Globals ---------------------------------------- */ +/* ------------------------ Public Functions ------------------------------ */ +/* ------------------------ Private Static Functions ----------------------- */ diff --git a/src/common/nvswitch/kernel/flcn/flcnqueue_dmem_nvswitch.c b/src/common/nvswitch/kernel/flcn/flcnqueue_dmem_nvswitch.c new file mode 100644 index 000000000..b31be1ecd --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/flcnqueue_dmem_nvswitch.c @@ -0,0 +1,730 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvmisc.h" +#include "common_nvswitch.h" + +#include "flcn/flcnable_nvswitch.h" +#include "flcn/flcn_nvswitch.h" +#include "flcn/flcnqueue_nvswitch.h" + +#include "rmflcncmdif_nvswitch.h" + +/*! + * @file flcnqueue_dmem_nvswitch.c + * @brief Provides all the fundamental logic for reading/writing queues. + * + * Queues are the primary communication mechanism between the RM and various + * falcon-based engines such as the PMU and Display Falcon. The RM requests + * actions by inserting a data packet (command) into a command queue. This + * generates an interrupt to the falcon which allows it to wake-up and service + * the request. Upon completion of the command, the falcon can optionally + * write an acknowledgment packet (message) into a separate queue designated + * for RM-bound messages. + * + * There are currently two types of queues used: + * 1) DMEM queues. The original queue type. This file contains the routines + * specific to DMEM queues. + * 2) FB Queues For infomation specific to FB Queues, see the HDR of + * flcnqueue_fb.c. + * File flcnqueue.c has all routines common to both types of queues. + * + * Each queue has distinct "head" and "tail" pointers. The "head" pointer marks + * the position in the queue where the next write operation will take place; + * the "tail" marks the position of the next read. When the head and tail + * pointers are equal, the queue is empty. When non-equal, data exists in the + * queue that needs processed. Queues are always allocated contiguously in the + * falcon's DMEM. It may never be assumed that the queue's head pointer will + * always be greater than the tail pointer. Such a condition is legal and + * occurs when the head pointer approaches the end of the queue and data must + * be written at the beginning of the queue to fit. This is known as a + * "rewind" condition. For simplicity, wrapping is not supported. That is, a + * single packet cannot wrap around the boundaries of the queue. The writer of + * the queue must ensure that this never occurs. When the writer determines + * that a packet won't fit in the end of the queue, it must write a "rewind" + * command telling the reader to ignore the rest of the queue and look at the + * beginning of the queue for the next packet. When the reader finds the + * rewind packet, it must look to the beginning of the queue to find the packet + * to read. The writer is responsible for ensuring that sufficient space will + * always exist at the end of the queue for the rewind packet. The writer is + * also responsible for ensuring that sufficient space exists at the beginning + * of the queue for the real packet before writing the rewind command. + * Finally, upon a rewind condition, the writer is also responsible for + * ensuring that the head pointer never intercepts the tail pointer. Such a + * condition indicates that the queue is full, but is completely + * indistinguishable from the empty condition (in both cases head and tail are + * equivalent). + * + * The general queue insertion algorithm is as follows: + * @code + * if head >= tail + * if packet_size <= (queue_size - head - rewind_cmd_size) + * write packet + * else + * if packet_size <= (tail - queue_start - 1) + * write rewind command + * write packet + * else + * abort + * else + * if packet_size <= (tail - head - 1) + * write packet + * else + * abort + * @endcode + * + * This module provides a basic queue library to support this mechanism. For + * simplicity, this module makes minimal distinction between command queues and + * message queues. It simply provides APIs for opening a queue and performing + * basic read/write operations. The only complexity handled here is the + * rewind operation that is required as the end of a queue is reached during a + * write operation. This module handles that case by requiring the write size + * as a parameter to the "open for write" function. For the specifics, see + * @ref flcnQueueOpenWrite. + * + * The following diagrams may be used for reference in several of the space + * calculations performed by this module. The two most interesting queue states + * exist when the head pointer is greater than the tail and vice versa. Head + * equal to tail is just a sub-case of head greater than tail. + * + *
+ *           (head > tail)                     (tail > head)
+ *          .-+-+-+-+-+-+-. <-- qSize         .-+-+-+-+-+-+-. <-- qSize
+ *          |             |                   |             |
+ *          |    free     |                   |    used     |
+ *          |             |                   |             |
+ *          +-------------+ <-- head          +-------------+ <-- tail
+ *          |             |                   |             |
+ *          |             |                   |             |
+ *          |    used     |                   |    free     |
+ *          |             |                   |             |
+ *          |             |                   |             |
+ *          +-------------+ <-- tail          +-------------+ <-- head
+ *          |             |                   |             |
+ *          |    free     |                   |    used     |
+ *          |             |                   |             |
+ *          `-+-+-+-+-+-+-' <-- qOffset       `-+-+-+-+-+-+-' <-- qOffset
+ *
+ * To be read bottom-to-top (low-address to high-address)
+ */
+
+static NV_STATUS _flcnQueueOpenWrite_dmem   (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue, NvU32 writeSize);
+static NV_STATUS _flcnQueuePop_dmem         (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue, void *pData, NvU32 size, NvU32 *pBytesRead);
+static void      _flcnQueueRewind_dmem      (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue);
+static void      _flcnQueuePush_dmem        (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue, void *pData, NvU32 size);
+static NV_STATUS _flcnQueueTailGet_dmem     (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32 *pTail);
+static NV_STATUS _flcnQueueTailSet_dmem     (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32  tail );
+static void      _flcnQueueRead_dmem        (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32  offset, NvU8 *pDst, NvU32 sizeBytes);
+static void      _flcnQueueWrite_dmem       (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32  offset, NvU8 *pSrc, NvU32 sizeBytes);
+static NV_STATUS _flcnQueueHasRoom_dmem     (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32 writeSize, NvBool *pBRewind);
+
+/*!
+ * @brief Construct a Falcon Queue object for a DMEM queue.
+ *
+ * This is a constructor/initialization function for Falcon Queue objects.
+ * Callers can choose to either provide a pre-allocated Falcon Queue object or
+ * allow this function to perform the allocation.  The former case is more
+ * ideal where a collection of queues must be allocated or when static
+ * allocation is desired.
+ *
+ * @param[in]      device    nvswitch device pointer
+ * @param[in]      pFlcn     FLCN object pointer
+ * @param[in,out]  pQueue    Pointer to the queue to construct.
+ *
+ * @return 'NV_OK' upon successful construction/initialization.
+ * @return 'NV_ERR_INVALID_POINTER' when pQueue is NULL.
+ */
+NV_STATUS
+flcnQueueConstruct_dmem_nvswitch
+(
+    nvswitch_device *device,
+    PFLCN            pFlcn,
+    PFLCNQUEUE      *ppQueue,
+    NvU32            queueLogId,
+    NvU32            queuePhyId,
+    NvU32            offset,
+    NvU32            queueSize,
+    NvU32            cmdHdrSize
+)
+{
+    PFLCNQUEUE  pQueue;
+    NV_STATUS   status = NV_OK;
+
+    status = flcnQueueConstruct_common_nvswitch(device, pFlcn,
+        ppQueue,        // ppQueue
+        queueLogId,     // Logical ID of the queue
+        queuePhyId,     // Physical ID of the queue
+        offset,         // offset
+        queueSize,      // size
+        cmdHdrSize);    // cmdHdrSize
+
+    if (status != NV_OK)
+    {
+        NVSWITCH_ASSERT(status == NV_OK);
+        return status;
+    }
+    if (*ppQueue == NULL)
+    {
+        NVSWITCH_ASSERT(*ppQueue != NULL);
+        return NV_ERR_INVALID_POINTER;
+    }
+
+    pQueue = *ppQueue;
+
+    pQueue->openWrite    = _flcnQueueOpenWrite_dmem;
+    pQueue->rewind       = _flcnQueueRewind_dmem;
+    pQueue->pop          = _flcnQueuePop_dmem;
+    pQueue->push         = _flcnQueuePush_dmem;
+    pQueue->tailGet      = _flcnQueueTailGet_dmem;
+    pQueue->tailSet      = _flcnQueueTailSet_dmem;
+    pQueue->read         = _flcnQueueRead_dmem;
+    pQueue->write        = _flcnQueueWrite_dmem;
+    pQueue->hasRoom      = _flcnQueueHasRoom_dmem;
+
+    //
+    // Command size cannot be larger than queue size / 2.  Otherwise, it is
+    // impossible to send two commands back to back if we start from the
+    // beginning of the queue.
+    //
+    pQueue->maxCmdSize   = queueSize / 2;
+
+    return status;
+}
+
+/*!
+ * @brief Opens a queue for writing.
+ *
+ * Opens the given command queue for write operations.  Any number of write
+ * operations may be performed between a call to 'open' and the subsequent call
+ * to 'close'.  However, the full write-size of the entire transaction must be
+ * specified when the queue is opened to ensure that the transaction may be
+ * written into a contiguous portion of the queue (the falcon ucode does not
+ * support wrapping within a single transaction).  This function handles all
+ * wrapping/rewinding of the queue as it becomes necessary to find space.
+ *
+ * @param[in]  device     nvswitch device pointer
+ * @param[in]  pFlcn      FLCN object pointer
+ * @param[in]  pQueue     The queue to open
+ * @param[in]  writeSize  The size (in bytes) of the entire transaction
+ *
+ * @return 'NV_OK' if the queue is successfully opened.
+ * @return 'NV_ERR_INSUFFICIENT_RESOURCES' if there is insufficient queue space
+ * @return 'NV_ERR_GENERIC' otherwise.
+ * @see    flcnQueuePush
+ * @see    flcnQueueRewind
+ */
+static NV_STATUS
+_flcnQueueOpenWrite_dmem
+(
+    nvswitch_device *device,
+    PFLCN            pFlcn,
+    PFLCNQUEUE       pQueue,
+    NvU32            writeSize
+)
+{
+    NvBool    bRewind = NV_FALSE;
+    NV_STATUS status;
+    //
+    // Verify that the queue is not already opened.  This is not expected to
+    // occur.
+    //
+    if (pQueue->bOpened)
+    {
+        NVSWITCH_PRINT(device, ERROR,
+            "%s: unable to open queue (already opened, queueLogId=0x%x).\n",
+            __FUNCTION__, pQueue->queueLogId);
+        NVSWITCH_ASSERT(0);
+        return NV_ERR_GENERIC;
+    }
+
+    //
+    // Look at the queue's head and tail pointers and determine if enough space
+    // exists in the queue for the write.
+    //
+    status = _flcnQueueHasRoom_dmem(device, pFlcn, pQueue, writeSize, &bRewind);
+    if (NV_OK != status)
+    {
+        if (NV_ERR_INSUFFICIENT_RESOURCES == status)
+        {
+            NVSWITCH_PRINT(device, INFO,
+                "%s: queue is too full to write data (write-size=0x%x).\n",
+                __FUNCTION__, writeSize);
+        }
+
+        return status;
+    }
+
+    //
+    // Reaching this point indicates that the queue is successfully opened
+    // and sufficient space exists to write the desired data.  Simply set the
+    // queue's write position, set the oflag, and mark the queue as "opened".
+    //
+    (void)pQueue->headGet(device, pFlcn, pQueue, &pQueue->position);
+    pQueue->oflag    = FLCNQUEUE_OFLAG_WRITE;
+    pQueue->bOpened  = NV_TRUE;
+
+    // finally, rewind the queue if necessary
+    if (bRewind)
+    {
+        pQueue->rewind(device, pFlcn, pQueue);
+    }
+    return NV_OK;
+}
+
+/*!
+ * @brief Reads a buffer of data from the given queue.
+ *
+ * Read a buffer of data from the given queue.  This function does not
+ * interpret the data read in any way.  Consequently, it cannot feasibly
+ * detect each and every rewind condition that is possible.  For this
+ * reason, it is up the to the caller to interpret the returned data and
+ * rewind the queue as necessary. This function keeps track of the current
+ * read position in queue (set when the queue is opened). To maintain the
+ * required DMEM alignment, the queue position is updated with aligned-read
+ * size (size rounded-up to the next DMEM alignment).
+ *
+ * @param[in]   device      nvswitch device pointer
+ * @param[in]   pFlcn       FLCN object pointer
+ * @param[in]   pQueue      The queue to read from
+ * @param[in]   pData       The buffer to write the read data to
+ * @param[in]   size        The number of bytes to read
+ * @param[out]  pBytesRead  The number of bytes read from the queue
+ *
+ * @return 'NV_OK' if the read operation is successful. 'NV_ERR_GENERIC' upon
+ *         error.  Reading zero bytes from an empty queue is not considered an
+ *         error condition.
+ *
+ * @see flcnQueueRewind
+ */
+static NV_STATUS
+_flcnQueuePop_dmem
+(
+    nvswitch_device *device,
+    PFLCN            pFlcn,
+    PFLCNQUEUE       pQueue,
+    void            *pData,
+    NvU32            size,
+    NvU32           *pBytesRead
+)
+{
+    NvU32   head;
+    NvU32   tail;
+    NvU32   used;
+
+    // set the bytes read to zero in case and error occurs
+    *pBytesRead = 0;
+
+    // ensure the queue is currently opened for read
+    if (!QUEUE_OPENED_FOR_READ(pQueue))
+    {
+        NVSWITCH_PRINT(device, ERROR,
+             "%s: queue not opened for read (queueLogId=0x%x).\n",
+             __FUNCTION__, pQueue->queueLogId);
+        NVSWITCH_ASSERT(0);
+        return NV_ERR_GENERIC;
+    }
+
+    //
+    // The calculations performed in this function are best described using
+    // head and tail terminology. The current head pointer values are always
+    // used whereas the cached queue position is used for the tail value. This
+    // allows read-operations to be transacted without writing the tail pointer
+    // for each read.
+    //
+    (void)pQueue->headGet(device, pFlcn, pQueue, &head);
+    tail = pQueue->position;
+
+    // there is no data in the queue when the head and tail are equal
+    if (head == tail)
+    {
+        return NV_OK;
+    }
+
+    //
+    // Calculate the used space in the queue (this limits how much can be read).
+    // Two cases:
+    //     1. When the head is greater than the tail the amount of data in the
+    //        queue is defined by the difference between the head and tail
+    //        pointers.
+    //
+    //     2. When the head is less than the tail, a potential rewind condition
+    //        exists. In that case, the amount of data that can be read
+    //        (without wrapping) is defined as the difference between the
+    //        queue's size and the current tail pointer. Note that 'tail' is
+    //        absolute so we need to factor-in the starting-offset of the queue.
+    //
+    if (head > tail)
+    {
+        used = head - tail;
+    }
+    else
+    {
+        used = pQueue->queueOffset + pQueue->queueSize - tail;
+    }
+
+    // ensure we only read what is available and no more
+    if (size > used)
+    {
+        NVSWITCH_PRINT(device, ERROR,
+            "%s: suspicious read op - read size > used size. "
+            "(queueLogId=0x%x, read size=0x%x, used size=0x%x).\n",
+            __FUNCTION__, pQueue->queueLogId, size, used);
+        NVSWITCH_ASSERT(0);
+
+        // best thing we can do is cap the read size
+        size = used;
+    }
+
+    //
+    // Copy the data into the output buffer, update the queue's current
+    // position, and return the number of bytes that have been read.
+    //
+    pQueue->read(device, pFlcn, pQueue, tail, pData, size);
+    pQueue->position += NV_ALIGN_UP(size, QUEUE_ALIGNMENT);
+    *pBytesRead = size;
+    return NV_OK;
+}
+
+/*!
+ * @brief Rewinds a queue back to its starting offset in DMEM.
+ *
+ * When the queue is opened for "write", this function writes the rewind
+ * command to current queue position and updates the queue position to the
+ * beginning of the queue. When opened for "read", only the queue position
+ * is updated.
+ *
+ * @param[in]  device  nvswitch device pointer
+ * @param[in]  pFlcn   FLCN object pointer
+ * @param[in]  pQueue  The queue to rewind.
+ *
+ * @pre  The queue must be opened for prior to calling this function
+ * @see  flcnQueueOpenRead
+ * @see  flcnQueueOpenWrite
+ */
+static void
+_flcnQueueRewind_dmem
+(
+    nvswitch_device *device,
+    PFLCN            pFlcn,
+    PFLCNQUEUE       pQueue
+)
+{
+    RM_FLCN_CMD  rewindCmd;
+    NvU32        size = 0;
+
+    //
+    // Ensure that the queue is opened before continuing. Failure here
+    // is never expected.
+    //
+    if (!pQueue->bOpened)
+    {
+        NVSWITCH_PRINT(device, ERROR,
+             "%s: queue not opened (queueLogId=0x%x).\n",
+             __FUNCTION__, pQueue->queueLogId);
+        NVSWITCH_ASSERT(0);
+        return;
+    }
+
+    // write the rewind the command when the queue is opened for "write"
+    if (QUEUE_OPENED_FOR_WRITE(pQueue))
+    {
+        // populate the rewind command
+        size = pQueue->populateRewindCmd(device, pFlcn, pQueue, &rewindCmd);
+
+        // write out the rewind command
+        pQueue->push(device, pFlcn, pQueue, &rewindCmd, size);
+    }
+
+    // manually set the queue position back to the beginning of the queue
+    pQueue->position = pQueue->queueOffset;
+    return;
+}
+
+/*!
+ * @brief Writes a buffer of data to a queue.
+ *
+ * Writes a buffer of data to the given command queue.  This function
+ * cannot fail since space checks are performed during the call to open to
+ * ensure that sufficient space exists in the queue for the data.
+ *
+ * @param[in]  device  nvswitch device pointer
+ * @param[in]  pFlcn   FLCN object pointer
+ * @param[in]  pQueue  The queue to write to
+ * @param[in]  pData   The buffer of data to write
+ * @param[in]  size    The number of bytes to write from the buffer
+ */
+static void
+_flcnQueuePush_dmem
+(
+    nvswitch_device *device,
+    PFLCN            pFlcn,
+    PFLCNQUEUE       pQueue,
+    void            *pData,
+    NvU32            size
+)
+{
+    // ensure the queue is currently opened for write
+    if (!QUEUE_OPENED_FOR_WRITE(pQueue))
+    {
+        NVSWITCH_PRINT(device, ERROR,
+            "%s: queue not opened for write (queueLogId=0x%x).\n",
+            __FUNCTION__, pQueue->queueLogId);
+        NVSWITCH_ASSERT(0);
+        return;
+    }
+
+    // write that data out to the PMU/DPU DMEM
+    pQueue->write(device, pFlcn, pQueue, pQueue->position, pData, size);
+    pQueue->position += NV_ALIGN_UP(size, QUEUE_ALIGNMENT);
+    return;
+}
+
+/*!
+ * Checks a queue to see if it has room for a writing data of a specific size.
+ *
+ * @param[in]   device     nvswitch device pointer
+ * @param[in]   pFlcn      FLCN object pointer
+ * @param[in]   pQueue     The queue to check for space
+ * @param[in]   writeSize  The amount of space to check for
+ * @param[out]  pBRewind
+ *     Set to 'NV_TRUE' when space may be found if the queue is rewound. This
+ *     parameter is optional (may be NULL) for callers not interested in
+ *     rewind information.
+ *
+ * @return 'NV_OK' if the queue contains space (has room) for the write.
+ *         'NV_ERR_INSUFFICIENT_RESOURCES' if queue is full.
+ */
+static NV_STATUS
+_flcnQueueHasRoom_dmem
+(
+    nvswitch_device *device,
+    PFLCN            pFlcn,
+    PFLCNQUEUE       pQueue,
+    NvU32            writeSize,
+    NvBool          *pBRewind
+)
+{
+    NvU32  head;
+    NvU32  tail;
+    NvU32  free = 0;
+    NvBool bRewind = NV_FALSE;
+
+    //
+    // Align the writeSize up to to the size the buffer will actually take in
+    // the queue.
+    //
+    writeSize = NV_ALIGN_UP(writeSize, QUEUE_ALIGNMENT);
+
+    // retrieve the current queue's head and tail pointers.
+    (void)pQueue->headGet(device, pFlcn, pQueue, &head);
+    (void)pQueue->tailGet(device, pFlcn, pQueue, &tail);
+
+    //
+    // In the case where the head pointer is greater than the tail pointer,
+    // calculate the amount of space in the command queue that may be used
+    // before a REWIND command must be written.  Be sure to account for the
+    // size of the REWIND command to ensure it can ALWAYS be written.
+    //
+    if (head >= tail)
+    {
+        free  = pQueue->queueOffset + pQueue->queueSize - head;
+        free -= pQueue->cmdHdrSize;
+
+        //
+        // Set the rewind flag to check if space would exist if the queue
+        // were rewound.
+        //
+        if (writeSize > free)
+        {
+            bRewind = NV_TRUE;
+            head    = pQueue->queueOffset;
+        }
+    }
+
+    //
+    // In the event that the head pointer has wrapped around the queue and
+    // the tail has no yet caught up, calculate the amount of space in the
+    // command queue that may be used before the head pointer reaches the tail
+    // pointer (this can never be allowed to happen).  This condition is also
+    // met if a rewind condition is detected above.
+    //
+    if (head < tail)
+    {
+        //
+        // Subtract off one byte from the free space to guarantee that the tail
+        // is never allowed to be equal to the head pointer unless the queue is
+        // truly empty.
+        //
+        free = tail - head - 1;
+    }
+
+    // return the rewind flag
+    if (pBRewind != NULL)
+    {
+        *pBRewind = bRewind;
+    }
+
+    return (writeSize <= free) ? NV_OK : NV_ERR_INSUFFICIENT_RESOURCES;
+}
+
+/*!
+ * Retrieve the current tail pointer for given FLCN queue.
+ *
+ * @param[in]   device  nvswitch device pointer
+ * @param[in]   pFlcn   FLCN object pointer
+ * @param[in]   pQueue  Pointer to the queue
+ * @param[out]  pTail   Pointer to write with the queue's tail value
+ *
+ * @return 'NV_OK' if the tail value was successfully retrieved.
+ * @return 'NV_ERR_GENERIC' otherwise
+ */
+static NV_STATUS
+_flcnQueueTailGet_dmem
+(
+    nvswitch_device *device,
+    PFLCN            pFlcn,
+    PFLCNQUEUE       pQueue,
+    NvU32           *pTail
+)
+{
+    NVSWITCH_ASSERT(pFlcn->pQueueInfo != NULL);
+    if (RM_FLCN_QUEUEID_IS_COMMAND_QUEUE(pFlcn->pQueueInfo, pQueue->queueLogId))
+    {
+        return flcnCmdQueueTailGet(device, pFlcn, pQueue, pTail);
+    }
+    else
+    {
+        return flcnMsgQueueTailGet(device, pFlcn, pQueue, pTail);
+    }
+}
+
+/*!
+ * Set the tail pointer for the given FLCN queue.
+ *
+ * @param[in]  device  nvswitch device pointer
+ * @param[in]  pFlcn   FLCN object pointer
+ * @param[in]  pQueue  Pointer to the queue
+ * @param[in]  tail    The desired tail value
+ *
+ * @return 'NV_OK' if the tail value was successfully set.
+ * @return 'NV_ERR_GENERIC' otherwise
+ */
+static NV_STATUS
+_flcnQueueTailSet_dmem
+(
+    nvswitch_device *device,
+    PFLCN            pFlcn,
+    PFLCNQUEUE       pQueue,
+    NvU32            tail
+)
+{
+    NVSWITCH_ASSERT(pFlcn->pQueueInfo != NULL);
+    if (RM_FLCN_QUEUEID_IS_COMMAND_QUEUE(pFlcn->pQueueInfo, pQueue->queueLogId))
+    {
+        return flcnCmdQueueTailSet(device, pFlcn, pQueue, tail);
+    }
+    else
+    {
+        return flcnMsgQueueTailSet(device, pFlcn, pQueue, tail);
+    }
+}
+
+/*!
+ * Read a buffer of data from FLCN queue.
+ *
+ * @param[in]   device      nvswitch device pointer
+ * @param[in]   pFlcn       FLCN object pointer
+ * @param[in]   pQueue      The queue to read from
+ * @param[in]   offset      Offset (from the start of DMEM) to start the read
+ * @param[out]  pDst        Buffer to store the read-data
+ * @param[in]   sizeBytes   The number of bytes to read
+ *
+ * @return void
+ */
+static void
+_flcnQueueRead_dmem
+(
+    nvswitch_device *device,
+    PFLCN            pFlcn,
+    PFLCNQUEUE       pQueue,
+    NvU32            offset,
+    NvU8            *pDst,
+    NvU32            sizeBytes
+)
+{
+    if (pFlcn->bEmemEnabled)
+    {
+        flcnableEmemCopyFrom(device, pFlcn->pFlcnable,
+                             offset, pDst, sizeBytes, 0);
+    }
+    else
+    {
+        if (flcnDmemCopyFrom(device, pFlcn, offset, pDst, sizeBytes, 0)
+            != NV_OK)
+        {
+            NVSWITCH_PRINT(device, ERROR,
+                "%s: Failed to copy from flcn DMEM\n",
+                __FUNCTION__);
+            NVSWITCH_ASSERT(0);
+        }
+    }
+}
+
+/*!
+ * Write a buffer of data to a FLCN queue.
+ *
+ * @param[in]  device     nvswitch device pointer
+ * @param[in]  pFlcn      FLCN object pointer
+ * @param[in]  pQueue     The queue to write to
+ * @param[in]  offset     Offset (from the start of DMEM) to start the write
+ * @param[in]  pSrc       Buffer containing the write-data
+ * @param[in]  sizeBytes  The number of bytes to write
+ */
+static void
+_flcnQueueWrite_dmem
+(
+    nvswitch_device *device,
+    PFLCN            pFlcn,
+    PFLCNQUEUE       pQueue,
+    NvU32            offset,
+    NvU8            *pSrc,
+    NvU32            sizeBytes
+)
+{
+    if (pFlcn->bEmemEnabled)
+    {
+        flcnableEmemCopyTo(device, pFlcn->pFlcnable,
+                           offset, pSrc, sizeBytes, 0);
+    }
+    else
+    {
+        if (flcnDmemCopyTo(device, pFlcn, offset, pSrc, sizeBytes, 0)
+            != NV_OK)
+        {
+            NVSWITCH_PRINT(device, ERROR,
+                "%s: Failed to copy to flcn DMEM\n",
+                __FUNCTION__);
+            NVSWITCH_ASSERT(0);
+        }
+    }
+}
diff --git a/src/common/nvswitch/kernel/flcn/flcnqueue_fb_nvswitch.c b/src/common/nvswitch/kernel/flcn/flcnqueue_fb_nvswitch.c
new file mode 100644
index 000000000..35106de70
--- /dev/null
+++ b/src/common/nvswitch/kernel/flcn/flcnqueue_fb_nvswitch.c
@@ -0,0 +1,56 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "common_nvswitch.h"
+
+#include "flcn/flcnable_nvswitch.h"
+#include "flcn/flcn_nvswitch.h"
+#include "flcn/flcnqueue_nvswitch.h"
+
+/*!
+ * @file   flcnqueue_fb.c
+ * @brief  Provides all functions specific to FB Queue (non-DMEM queues).
+ *
+ * Queues are the primary communication mechanism between the RM and various
+ * falcon-based engines such as the PMU and Display Falcon.  The RM requests
+ * actions by inserting a data packet (command) into a command queue. This
+ * generates an interrupt to the falcon which allows it to wake-up and service
+ * the request.  Upon completion of the command, the falcon can optionally
+ * write an acknowledgment packet (message) into a separate queue designated
+ * for RM-bound messages.  CMDs sent by an FB CMD queue must send a
+ * response, as that is required to clear that CMD queue element's "in use bit"
+ * and, free the DMEM allocation associated with it.
+ *
+ * For more information on FB Queue see:
+  *     PMU FB Queue (RID-70296)
+ * For general queue information, see the HDR of flcnqueue.c.
+ * For information specific to DMEM queues, see the HDR of flcnqueue_dmem.c
+ *
+ * Each queue has distinct "head" and "tail" pointers. The "head" pointer is the
+ * index of the queue Element where the next write operation will take place;
+ * the "tail" marks the index of the queue Element for the next read.  When the
+ * head and tail pointers are equal, the queue is empty.  When non-equal, data
+ * exists in the queue that needs to be processed.  Queues are always allocated
+ * in the Super Surface in FB.
+ */
+
diff --git a/src/common/nvswitch/kernel/flcn/flcnqueue_nvswitch.c b/src/common/nvswitch/kernel/flcn/flcnqueue_nvswitch.c
new file mode 100644
index 000000000..73aeee313
--- /dev/null
+++ b/src/common/nvswitch/kernel/flcn/flcnqueue_nvswitch.c
@@ -0,0 +1,1646 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "flcn/flcnqueue_nvswitch.h"
+#include "flcn/flcn_nvswitch.h"
+#include "soe/soe_nvswitch.h"
+
+#include "rmflcncmdif_nvswitch.h"
+#include "common_nvswitch.h"
+
+/*!
+ * @file   flcnqueue_nvswitch.c
+ * @brief  Provides all the fundamental logic for reading/writing queues.
+ *
+ * Queues are the primary communication mechanism between the RM and various
+ * falcon-based engines such as the PMU and Display Falcon.  The RM requests
+ * actions by inserting a data packet (command) into a command queue. This
+ * generates an interrupt to the falcon which allows it to wake-up and service
+ * the request.  Upon completion of the command, the falcon can optionally
+ * write an acknowledgment packet (message) into a separate queue designated
+ * for RM-bound messages.
+ *
+ * There are currently two type of queues supported:
+ *      1) DMEM queues.  The original queue type.   For informaiton specific to
+ *          DMEM queues see the HDR of flcnqueue_dmem.c
+ *      2) FB Queues   For infomation specific to FB Queues, see the HDR of
+ *          flcnqueue_fb.c.
+ * This file contains routines common to both queue types.
+ *
+ * 
+ * + * To use the read/write APIs, the caller must first "open" the queue for the + * appropriate action (read/write). A queue may only be opened once at any + * given time. Subsequent attempts to open an already "opened" queue will + * result in a failure. This module will keep track of all "opened" queues. + * When a queue is opened for "read", a copy of the queue's current tail + * pointer will be cached. All reads that occur from the time the queue is + * opened to the time it is closed will be based on this cached value. The + * value will be updated for each individual read operation that is requested. + * When the queue is "closed", this value will be written out to its + * corresponding PRIV register thus making it visible to the falcon ucode. The + * same scheme is also applied when opening a queue for "writing" except that + * instead of caching the tail pointer, the header pointer is cached. + * Additionally, when a "command queue" is closed and the new head pointer is + * written, an interrupt will be generated for the falcon to allow the command + * to be processed. + */ + +static NvBool _flcnQueueCmdValidate (nvswitch_device *device, PFLCN pFlcn, PRM_FLCN_CMD pCmd, PRM_FLCN_MSG pMsg, void *pPayload, NvU32 queueIdLogical); +static NvU32 _flcnQueuePopulateRewindCmd(nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, RM_FLCN_CMD *pFlcnCmd); +static NV_STATUS _flcnQueueClose (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue, NvBool bCommit); +static NvBool _flcnQueueIsEmpty (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue); +static NV_STATUS _flcnQueueOpenRead (nvswitch_device *device, PFLCN, PFLCNQUEUE pQueue); +static NV_STATUS _flcnQueueHeadGet (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32 *pHead); +static NV_STATUS _flcnQueueHeadSet (nvswitch_device *device, PFLCN pFlcn, PFLCNQUEUE pQueue, NvU32 head); + +/*! + * @brief Construct a Falcon Queue object + * + * This is a constructor/initialization function for Falcon Queue objects. + * Callers can choose to either provide a pre-allocated Falcon Queue object or + * allow this function to perform the allocation. The former case is more + * ideal cases where a collection of queues must be allocated or when static + * allocation is desired. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in,out] ppQueue + * Pointer to the queue to construct and optionally allocate. When pointing + * to a non-NULL queue pointer, the queue is simply initialized. When NULL, + * a queue will be allocated and then initialized. + * + * @param[in] queueLogId Logical-identifier for the queue + * @param[in] queuePhyId Physical-index/identifier for the queue + * @param[in] offset Starting location of queue in memory (DMEM) + * @param[in] queueSize Size (in bytes) of the queue + * @param[in] cmdHdrSize Size (in bytes) of the command header + * + * @return 'NV_OK' upon successful construction/initialization. + * @return 'NV_ERR_NO_MEMORY' when unable to allocate queue. + */ +NV_STATUS +flcnQueueConstruct_common_nvswitch +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCNQUEUE *ppQueue, + NvU32 queueLogId, + NvU32 queuePhyId, + NvU32 offset, + NvU32 queueSize, + NvU32 cmdHdrSize +) +{ + PFLCNQUEUE pQueue; + NV_STATUS status = NV_OK; + + if (ppQueue == NULL) + { + NVSWITCH_ASSERT(0); + return NV_ERR_INVALID_POINTER; + } + + if (*ppQueue == NULL) + { + *ppQueue = nvswitch_os_malloc(sizeof(FLCNQUEUE)); + if (*ppQueue == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to allocate FLCNQUEUE (queueLogId=0x%x).\n", + __FUNCTION__, queueLogId); + return NV_ERR_NO_MEMORY; + } + nvswitch_os_memset(*ppQueue, 0, sizeof(FLCNQUEUE)); + } + + pQueue = *ppQueue; + + pQueue->queueLogId = queueLogId; + pQueue->queuePhyId = queuePhyId; + pQueue->queueOffset = offset; + pQueue->position = offset; + pQueue->queueSize = queueSize; + pQueue->cmdHdrSize = cmdHdrSize; + pQueue->oflag = 0; + pQueue->bOpened = NV_FALSE; + pQueue->bLocked = NV_FALSE; + pQueue->close = _flcnQueueClose; + pQueue->isEmpty = _flcnQueueIsEmpty; + pQueue->openRead = _flcnQueueOpenRead; + pQueue->headGet = _flcnQueueHeadGet; + pQueue->headSet = _flcnQueueHeadSet; + pQueue->populateRewindCmd = _flcnQueuePopulateRewindCmd; + + return status; +} + +/*! + * @brief Closes a queue + * + * Closes the given command queue. The 'bCommit' flag is used to commit the + * changes performed on the queue since it was opened. When the queue is + * opened for writing, the commit operation involves writing the queue's head + * pointer with the queue's current write position value. When committing a + * queue that has been opened for reading, the tail pointer is updated instead. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue The queue to close + * @param[in] bCommit 'NV_TRUE' to commit the operations performed on the + * queue since opened. 'NV_FALSE' to leave the queue in + * its current HW state. + * + * @return 'NV_OK' if the close operation is successful + * @pre The queue must be opened prior to calling this function + * @see flcnQueueOpenRead + * @see flcnQueueOpenWrite + */ +static NV_STATUS +_flcnQueueClose +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCNQUEUE pQueue, + NvBool bCommit +) +{ + // ensure that the queue is opened for before continuing. + if (!pQueue->bOpened) + { + NVSWITCH_PRINT(device, ERROR, + "%s: queue not opened (queueLogId=0x%x).\n", + __FUNCTION__, pQueue->queueLogId); + return NV_OK; + } + + // + // If committing a queue opened for read, close the queue by updating the + // queue's tail pointer. If committing when opened for write, update the + // head pointer. + // + if (bCommit) + { + if (QUEUE_OPENED_FOR_READ(pQueue)) + { + (void)pQueue->tailSet(device, pFlcn, pQueue, pQueue->position); + } + else + { + (void)pQueue->headSet(device, pFlcn, pQueue, pQueue->position); + } + } + + // mark the queue as "not open" + pQueue->bOpened = NV_FALSE; + return NV_OK; +} + +/*! + * @brief Checks to see if a queue contains any data that may be read + * + * Compares the queue's head and tail pointers to see if any data is available + * for reading. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue The queue to check + * + * @return 'NV_TRUE' if the queue is empty; 'NV_FALSE' otherwise. + * @see flcnQueuePop + */ +static NvBool +_flcnQueueIsEmpty +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCNQUEUE pQueue +) +{ + NvU32 head; + NvU32 tail; + + // + // Reading always occurs from the tail to the head. If the file is + // already opened for read, use the queue position as the current tail + // value (otherwise, get the tail value from hardware). + // + (void)pQueue->headGet(device, pFlcn, pQueue, &head); + if (QUEUE_OPENED_FOR_READ(pQueue)) + { + tail = pQueue->position; + } + else + { + (void)pQueue->tailGet(device, pFlcn, pQueue, &tail); + } + return head == tail; +} + +/*! + * @brief Opens a queue for reading. + * + * Opens the given command queue for read operations. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue The queue to open + * + * @return 'NV_OK' if the queue is successfully opened. 'NV_ERR_GENERIC' otherwise. + * @see flcnQueuePop + */ +static NV_STATUS +_flcnQueueOpenRead +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCNQUEUE pQueue +) +{ + // + // Verify that the queue is not already opened. This is not expected to + // occur. + // + if (pQueue->bOpened) + { + NVSWITCH_PRINT(device, ERROR, + "%s: unable to open queue (already opened, queueLogId=0x%x).\n", + __FUNCTION__, pQueue->queueLogId); + NVSWITCH_ASSERT(0); + return NV_ERR_GENERIC; + } + + // + // Update the queue position to specify where the first read will occur + // from, set the open flag, and mark the queue as "opened". + // + (void)pQueue->tailGet(device, pFlcn, pQueue, &pQueue->position); + pQueue->oflag = FLCNQUEUE_OFLAG_READ; + pQueue->bOpened = NV_TRUE; + + return NV_OK; +} + +/*! + * Retrieve the current head pointer for given FLCN queue. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue Pointer to the queue + * @param[out] pHead Pointer to write with the queue's head pointer + * + * @return 'NV_OK' if head value was successfully retrieved. + * @return 'NV_ERR_GENERIC' otherwise + */ +static NV_STATUS +_flcnQueueHeadGet +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCNQUEUE pQueue, + NvU32 *pHead +) +{ + NVSWITCH_ASSERT(pFlcn->pQueueInfo != NULL); + if (RM_FLCN_QUEUEID_IS_COMMAND_QUEUE(pFlcn->pQueueInfo, pQueue->queueLogId)) + { + return flcnCmdQueueHeadGet(device, pFlcn, pQueue, pHead); + } + else + { + return flcnMsgQueueHeadGet(device, pFlcn, pQueue, pHead); + } +} + +/*! + * Set the head pointer for the given FLCN queue. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue Pointer to the queue + * @param[in] head The desired head value for the queue + * + * @return 'NV_OK' if the head value was successfully set. + * @return 'NV_ERR_GENERIC' otherwise + */ +static NV_STATUS +_flcnQueueHeadSet +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCNQUEUE pQueue, + NvU32 head +) +{ + NVSWITCH_ASSERT(pFlcn->pQueueInfo != NULL); + if (RM_FLCN_QUEUEID_IS_COMMAND_QUEUE(pFlcn->pQueueInfo, pQueue->queueLogId)) + { + return flcnCmdQueueHeadSet(device, pFlcn, pQueue, head); + } + else + { + return flcnMsgQueueHeadSet(device, pFlcn, pQueue, head); + } +} + +/*! + * Populate the FLCN queue rewind command + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pQueue The queue where we will push the command to + * @param[in] pFlcnCmd Pointer storing the content of the rewind command + * + * @return The size of the populated data + */ +static NvU32 +_flcnQueuePopulateRewindCmd +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCNQUEUE pQueue, + RM_FLCN_CMD *pFlcnCmd +) +{ + pFlcnCmd->cmdGen.hdr.unitId = RM_FLCN_UNIT_ID_REWIND; + pFlcnCmd->cmdGen.hdr.size = (NvU8)pQueue->cmdHdrSize; + return pFlcnCmd->cmdGen.hdr.size; +} + +/*! + * Write a command to the specified command queue. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object Pointer + * @param[in] queueLogId Logical ID of the queue + * @param[in] pCmd The command buffer to submit + * + * @param[in] pTimeout + * An optional pointer (may be NULL) to a pre-configured timeout structure + * that when non-NULL is used to indicate that blocking behavior is + * allowed (within the bounds of the timeout) for operations that have the + * potential to fail on transient conditions and can be retried (mutex + * acquirement, queue insertion, etc ...). When NULL, this function does + * NOT retry such operations when they fail (the function becomes non- + * blocking) and returns back control to the caller. Example scenarios + * include when the command queue mutex can not be obtained and if the + * queue does not have enough free space to fit the command. + * + * @return 'NV_OK' + * If the command is successfully written to the command queue. + * + * @return 'NV_ERR_INSUFFICIENT_RESOURCES' + * If the command could not be queued as a result of the target command + * queue having insufficient space to fit the command. Could be after + * the initial queue attempt if the non-blocking behavior has been + * requested or after successive retries if the timeout expired before + * enough space was free'd in the queue. + * + * @return 'NV_ERR_FLCN_ERROR' + * If the command could not be queued due to a failure such as a HALTed + * SOE. This is considered a fatal error. + * + * @return NV_ERR_TIMEOUT + * A timeout occurred before the command write completed. + */ +static NV_STATUS +_flcnQueueCmdWrite_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 queueLogId, + RM_FLCN_CMD *pCmd, + NVSWITCH_TIMEOUT *pTimeout +) +{ + NV_STATUS status; + PFLCNQUEUE pQueue; + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + NvBool bKeepPolling; + + NVSWITCH_ASSERT(pTimeout != NULL); + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueueInfo->pQueues != NULL); + pQueue = &pQueueInfo->pQueues[queueLogId]; + + // + // Open the command queue for writing. It is guaranteed that the queue will + // have sufficient space for the command if successfully opened. Upon + // failure, retries will be conducted until either space free's up in the + // queue for the command or until a timeout occurs, assuming the SOE is + // operating normally (not HALTed). + // + do + { + bKeepPolling = (nvswitch_timeout_check(pTimeout)) ? NV_FALSE : NV_TRUE; + + status = pQueue->openWrite(device, pFlcn, pQueue, pCmd->cmdGen.hdr.size); + if (status == NV_ERR_INSUFFICIENT_RESOURCES) + { + if (soeIsCpuHalted_HAL(device, ((PSOE)pFlcn->pFlcnable))) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SOE Halt detected (queueLogId=0x%x).\n", + __FUNCTION__, pQueue->queueLogId); + return NV_ERR_FLCN_ERROR; + } + } + else + { + // Anything else is likely not transient + break; + } + } while (bKeepPolling); + + if (status == NV_ERR_INSUFFICIENT_RESOURCES) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout while waiting for space (queueLogId=0x%x).\n", + __FUNCTION__, pQueue->queueLogId); + return NV_ERR_TIMEOUT; + } + + // + // if failed to write Command due to no space, + // dump the queue contents if debug flag is on + // +#if defined(DEBUG) + if (status == NV_ERR_INSUFFICIENT_RESOURCES) + { + RM_FLCN_CMD FlcnCmd; + NV_STATUS dumpstatus; + + dumpstatus = flcnRtosDumpCmdQueue_nvswitch(device, pFlcn, queueLogId, &FlcnCmd); + NVSWITCH_PRINT(device, ERROR, + "%s: Dumping Falcon Command queue completed with status =0x%x \n", + __FUNCTION__ , dumpstatus ); + } +#endif + + if (status != NV_OK) + { + NVSWITCH_PRINT(device, WARN, + "%s: error while opening queue (queueLogId=0x%x, status=0x%x).\n", + __FUNCTION__, pQueue->queueLogId, status); + return status; + } + + // write the command to the queue. + pQueue->push(device, pFlcn, pQueue, pCmd, pCmd->cmdGen.hdr.size); + + // + // Close the command queue to flush out the new head pointer. A failure + // to properly close the queue indicates that the head pointer is unchanged. + // In that case, nothing new has been enqueued from the FLCN's perspective. + // + status = pQueue->close(device, pFlcn, pQueue, NV_TRUE); + if (status == NV_OK) + { + NVSWITCH_PRINT(device, INFO, + "%s: command queued (unit-id=0x%x).\n", + __FUNCTION__, pCmd->cmdGen.hdr.unitId); + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s: error while closing queue (queueLogId=0x%x, status=0x%x).\n", + __FUNCTION__, pQueue->queueLogId, status); + } + + return status; +} + +/*! + * Lookup/find the info structure for a sequence given a sequence descriptor. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] seqDesc Descriptor for the sequencer to find + * + * @return SEQ_INFO structure pointer or NULL if no free entry was found. + */ +static PFLCN_QMGR_SEQ_INFO +_flcnQueueSeqInfoFind_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 seqDesc +) +{ + PFLCN_QMGR_SEQ_INFO pSeqInfo; + NvU32 seqIndex; + + for (seqIndex = 0; seqIndex < pFlcn->numSequences; seqIndex++) + { + // (seqIndex < numSequences) so no need to check pointer for NULL. + pSeqInfo = flcnableQueueSeqInfoGet(device, pFlcn->pFlcnable, seqIndex); + + if (pSeqInfo->seqDesc == seqDesc) + { + return pSeqInfo; + } + } + return NULL; +} + +/*! + * Find a free sequence info structure and reserve it so that it may not be + * taken by another client. We always search the free seq starting from the + * next to the latest used seq since it is the most possible free sequence if + * we consume the sequence in serial. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * + * @return SEQ_INFO structure pointer or NULL if no free entry was fount + */ +static PFLCN_QMGR_SEQ_INFO +_flcnQueueSeqInfoAcq_IMPL +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + PFLCN_QMGR_SEQ_INFO pSeqInfo; + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + NvU32 seqIndex; + + seqIndex = pQueueInfo->latestUsedSeqNum; + + for (++seqIndex, seqIndex %= pFlcn->numSequences; seqIndex != pQueueInfo->latestUsedSeqNum; + ++seqIndex, seqIndex %= pFlcn->numSequences) + { + // (seqIndex < numSequences) so no need to check pointer for NULL. + pSeqInfo = flcnableQueueSeqInfoGet(device, pFlcn->pFlcnable, seqIndex); + + if (pSeqInfo->seqState == FLCN_QMGR_SEQ_STATE_FREE) + { + pSeqInfo->seqState = FLCN_QMGR_SEQ_STATE_PENDING; + pQueueInfo->latestUsedSeqNum = seqIndex; + return pSeqInfo; + } + + } + + NVSWITCH_PRINT(device, ERROR, "%s: No free sequence numbers.\n", __FUNCTION__); + return NULL; +} + +/*! + * @brief Mark the sequence info structure as available (and clear it out). + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in,out] pSeqInfo Pointer to the sequence info struct to release + */ +static void +_flcnQueueSeqInfoRel_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCN_QMGR_SEQ_INFO pSeqInfo +) +{ + // Leave @ref seqNum untouched. + pSeqInfo->seqDesc = FLCN_INVALID_SEQ_DESC; + pSeqInfo->seqState = FLCN_QMGR_SEQ_STATE_FREE; + pSeqInfo->pCallback = NULL; + pSeqInfo->pCallbackParams = NULL; + + // Clear any engine specific SEQ_INFO structure extension. + flcnableQueueSeqInfoClear(device, pFlcn->pFlcnable, pSeqInfo); +} + +/*! + * Initializes all global sequence tracking data -- releases all sequence info + * elements, initializes sequence numbers, etc. + * + * @param[in] device nvswitch device pointer + * @param[in,out] pFlcn FLCN object pointer + */ +static void +_flcnQueueSeqInfoStateInit_IMPL +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + PFLCN_QMGR_SEQ_INFO pSeqInfo; + NvU32 seqIndex; + + // initialize all elements of the sequence info table + for (seqIndex = 0; seqIndex < pFlcn->numSequences; seqIndex++) + { + // (seqIndex < numSequences) so no need to check pointer for NULL. + pSeqInfo = flcnableQueueSeqInfoGet(device, pFlcn->pFlcnable, seqIndex); + + pSeqInfo->seqNum = (NvU8)seqIndex; + flcnQueueSeqInfoRel(device, pFlcn, pSeqInfo); + } +} + +/*! + * @brief Cancel all sequences that are not currently "free". + * + * Invokes the callback function for any commands that are currently running + * to inform the client that the command has been cancelled/failed. This could + * happen as a result of restarting the FLCN or tearing down the driver. + * + * All sequences that are currently running will be marked as "cancelled". The + * sequence itself will not be released for reuse. This is to allow the status + * to persist so that the data is not stale when/if the client queries for + * status. It also prevents multiple commands containing the same sequence + * number from being sent to the FLCN. Only call this function prior to event + * which will reset the FLCN (or shut it down completely) to avoid leaking + * sequences. + * + * To ease the teardown process, this function MAY be called when: + * - No outstanding sequences exist that need to be cancelled + * - When the sequence-info table is not allocated + * + * @param[in] device nvswitch device pointer + * @param[in,out] pFlcn FLCN object pointer + */ +static void +_flcnQueueSeqInfoCancelAll_IMPL +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + PFLCN_QMGR_SEQ_INFO pSeqInfo; + NvU32 seqIndex; + + // find the sequence info date for the given sequence + for (seqIndex = 0; seqIndex < pFlcn->numSequences; seqIndex++) + { + // (seqIndex < numSequences) so no need to check pointer for NULL. + pSeqInfo = flcnableQueueSeqInfoGet(device, pFlcn->pFlcnable, seqIndex); + + if (pSeqInfo->seqState != FLCN_QMGR_SEQ_STATE_FREE) + { + if (pSeqInfo->seqState != FLCN_QMGR_SEQ_STATE_CANCELED) + { + // + // Cancel the sequence and report an error to the client that + // issued to the command to allow them to perform any necessary + // cleanup (if applicable). + // + pSeqInfo->seqState = FLCN_QMGR_SEQ_STATE_CANCELED; + if (pSeqInfo->pCallback != NULL) + { + pSeqInfo->pCallback(device, NULL, + pSeqInfo->pCallbackParams, + pSeqInfo->seqDesc, + NV_ERR_INVALID_STATE); + } + } + + // Free resources associated with all outstanding sequences. + (void)flcnQueueSeqInfoFree(device, pFlcn, pSeqInfo); + } + + // Re-initialize SEQ_INFO structures. + flcnQueueSeqInfoRel(device, pFlcn, pSeqInfo); + } +} + +/*! + * @brief Free up all the engine specific sequence allocations. + * + * @param[in] device nvswitch device pointer + * @param[in] pPmu PMU object pointer + * @param[in] pSeqInfo SEQ_INFO structure pointer + * + * @return 'NV_OK' if FB Queue Element in use bit cleared. Otherwise error code + * from elementUseStateClr(). + */ +static NV_STATUS +_flcnQueueSeqInfoFree_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + PFLCN_QMGR_SEQ_INFO pSeqInfo +) +{ + + NV_STATUS status = NV_OK; + + flcnableQueueSeqInfoFree(device, pFlcn->pFlcnable, pSeqInfo); + + return status; +} + +/*! + * @brief Finds a free event descriptor + * + * Searches through the FLCN Event Info list and finds an available event + * descriptor. Note that this function does not guarantee that the returned + * will remain reserved. It simply returns an event descriptor that is + * currently unassigned. + * + * @param device nvswitch device pointer + * @param pFlcn FLCN object pointer + * @param pEvtDesc Pointer to write with the assigned event descriptor + * + * @return 'NV_OK' if a free event descriptor was found and assigned. + */ +static NV_STATUS +_flcnQueueAssignEventDesc +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 *pEvtDesc +) +{ + PFLCN_EVENT_INFO pEventInfo; + NvU32 nextDesc; + NV_STATUS status = NV_OK; + NvBool bAvailable = NV_FALSE; + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + + // + // Search through the event info list to see if the current event + // descriptor is available. When not available, move on to the next + // descriptor (allow wrapping) and check the list again. + // + nextDesc = pQueueInfo->nextEvtDesc; + while (!bAvailable) + { + pEventInfo = pQueueInfo->pEventInfo; + bAvailable = NV_TRUE; + while (pEventInfo != NULL) + { + // check if already assigned + if (pEventInfo->evtDesc == nextDesc) + { + bAvailable = NV_FALSE; + break; + } + pEventInfo = pEventInfo->pNext; + } + + // + // Move on to the next descriptor if the current descriptor is not + // available, verify that we did not wrap around back to where we + // started. + // + ++nextDesc; + if ((!bAvailable) && (nextDesc == pQueueInfo->nextEvtDesc)) + { + // + // Hitting this point is bad. It indicates that all 2^32 event + // descriptors are currently assigned! + // + NVSWITCH_ASSERT(0); + break; + } + } + + // + // If an available descriptor was found set the returned event descriptor + // value and update the next event descriptor value. + // + if (bAvailable) + { + *pEvtDesc = nextDesc - 1; + pQueueInfo->nextEvtDesc = nextDesc; + } + else + { + status = NV_ERR_GENERIC; + } + return status; +} + +/*! + * @brief Register for event notification. + * + * Registers the given client callback for notification of 'unitId' events. + * Returns a unique handle from which clients must use to identify themselves + * later when un-registering for events. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] unitId + * The identifier which describes the type of event to register for. + * See 'nvswitch/common/inc/rmflcncmdif_nvswitch.h' for a list of all + * available unit identifiers. + * + * @param[in,out] pMsg + * A buffer to contain the data associated with the event. It is the + * caller's responsibility to initialize this buffer. This parameter + * is optional (may be NULL) for callers interested in the occurrence + * of an event, but not interested in the data passed in the event. + * + * @param[in] pCallback + * The callback function pointer to be called when the event fires. + * + * @param[in] pCallbackParams + * Additional optional (may be NULL) parameters that will be blindly + * passed to the callback function. This API will not use these + * parameters whatsoever. + * + * @param[out] pEvtDesc + * Represents and identifier that will be assigned for this particular + * registration. This will be required for properly un-registering + * for the event. + * + * @return 'NV_OK' if a successful registration attempt was made. + */ +static NV_STATUS +_flcnQueueEventRegister_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 unitId, + NvU8 *pMsg, + FlcnQMgrClientCallback pCallback, + void *pCallbackParams, + NvU32 *pEvtDesc +) +{ + PFLCN_EVENT_INFO pEventInfo; + NvU32 evtDesc; + NV_STATUS status; + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + + if ((pCallback == NULL) || (pEvtDesc == NULL)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Callback and event descriptor may not be NULL", + __FUNCTION__); + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Validate the UNIT ID (all other pointers are either optional or are + // expected to be checked in the higher-level event registration API). + // + if (!RM_FLCN_UNIT_ID_IS_VALID(pQueueInfo, unitId)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Invalid unit-id (0x%x)", + __FUNCTION__, unitId); + return NV_ERR_INVALID_ARGUMENT; + } + + // find an available event descriptor to assign to this event registration + status = _flcnQueueAssignEventDesc(device, pFlcn, &evtDesc); + if (status != NV_OK) + { + // + // Hitting this point means there is a serious descriptor leak + // somewhere (2^32 descriptors assigned). This is not expected to + // happen. + // + NVSWITCH_PRINT(device, ERROR, + "%s: Error assigning a FLCN event descriptor. No " \ + "more descriptors?\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); + return status; + } + + // allocate an event-info structure + pEventInfo = nvswitch_os_malloc(sizeof(FLCN_EVENT_INFO)); + if (pEventInfo == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: could not allocate memory for event info " \ + "structure.\n", + __FUNCTION__); + return NV_ERR_NO_MEMORY; + } + + // fill the newly created event info structure + pEventInfo->unitId = unitId; + pEventInfo->pMsg = (RM_FLCN_MSG *)pMsg; + pEventInfo->pCallback = pCallback; + pEventInfo->pCallbackParams = pCallbackParams; + pEventInfo->pNext = pQueueInfo->pEventInfo; + pEventInfo->evtDesc = evtDesc; + + // prepend the new event-info to the front of the event-list + pQueueInfo->pEventInfo = pEventInfo; + + // return a descriptor for the event-info data to the client + *pEvtDesc = evtDesc; + return NV_OK; +} + +/*! + * @brief Unregister for event notification. + * + * Un-registers the given client for event notification. Clients must + * identify themselves using the handle provided by the Registration + * function. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] evtDesc The descriptor that was assigned when the event was + * first registered for. + * + * @return 'NV_OK' if the un-registration was successful. + */ +static NV_STATUS +_flcnQueueEventUnregister_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 evtDesc +) +{ + PFLCN_EVENT_INFO pEventInfo; + PFLCN_EVENT_INFO pEventInfoPrev = NULL; + NV_STATUS status = NV_OK; + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + if (pQueueInfo == NULL) + { + NVSWITCH_ASSERT(pQueueInfo != NULL); + return NV_ERR_INVALID_POINTER; + } + + // get the event-info list + pEventInfo = pQueueInfo->pEventInfo; + + // + // Search through the event-info list to find the event-info for the + // specified client. + // + while (pEventInfo != NULL && evtDesc != pEventInfo->evtDesc) + { + // move on to the next event-info structure + pEventInfoPrev = pEventInfo; + pEventInfo = pEventInfo->pNext; + } + + // + // If the event-info data was found, unlink it from the list and free the + // memory. Return an error otherwise. + // + if (pEventInfo != NULL) + { + if (pEventInfoPrev != NULL) + { + pEventInfoPrev->pNext = pEventInfo->pNext; + } + else + { + pQueueInfo->pEventInfo = pEventInfo->pNext; + } + pEventInfo->pNext = NULL; + nvswitch_os_free(pEventInfo); + pEventInfo = NULL; + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + return status; +} + +/*! + * High-level event dispatcher for all event messages posted to the Message + * Queue. This function will perform the following operations on the received + * message: + * + * # Inspect the message to determine if the message represents an event + * that must be generically handled by the FLCN object. + * + * # Relay the message to the FLCN HAL layer in case any chip-specific + * processing of the message is required. + * + * # Notify all clients that have register for notification on this type + * of FLCN message. + * + * All three of the actions are ALWAYS performed. The only exceptions are if/ + * when error occur at the object- or HAL-layer processing. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] pMsg Pointer to event's message data + * @param[in] evtStatus + * status to be passed to the event listener. If this is not NV_OK, the + * event handler can take appropriate actions upon errors. e.g. event + * cancelled, etc. + * + * @return 'NV_OK' if the event was successfully handled. Otherwise, an + * error occurred at the object- or HAL-layer processing. Such errors + * will be reflected in the returned status value. + */ +static NV_STATUS +_flcnQueueEventHandle_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + RM_FLCN_MSG *pMsg, + NV_STATUS evtStatus +) +{ + PFLCN_EVENT_INFO pEventInfo; + PFLCN_EVENT_INFO pEventInfoNext; + NV_STATUS status = NV_OK; + RM_FLCN_MSG_GEN *pMsgGen = (RM_FLCN_MSG_GEN *)pMsg; + PFLCNABLE pFlcnable = pFlcn->pFlcnable; + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + + // get the event-info list + pEventInfo = pQueueInfo->pEventInfo; + + // + // Inspect the message and determine if the message needs handled at the + // object-layer. + // + if (pMsgGen->hdr.unitId == pQueueInfo->initEventUnitId) + { + status = flcnableHandleInitEvent(device, pFlcnable, pMsg); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Error processing FLCN message at object-layer " \ + "(unitId=0x%02x, seqNum=0x%02x).\n", + __FUNCTION__, pMsgGen->hdr.unitId, pMsgGen->hdr.seqNumId); + NVSWITCH_ASSERT(0); + return status; + } + } + + // + // Search through the event listener list and signal all clients listening + // for this type of event. + // + for (pEventInfo = pQueueInfo->pEventInfo; pEventInfo != NULL; pEventInfo = pEventInfoNext) + { + // + // Get a pointer to the next event info structure now just in case + // the client decides to unregister from within the callback. + // + pEventInfoNext = pEventInfo->pNext; + + // + // When a client is found listening for this type of event, copy the + // message into the buffer given by the client when they registered + // for the event notification. Call the client's callback function + // when after the copy completes. + // + if (pEventInfo->unitId == pMsgGen->hdr.unitId) + { + if (pEventInfo->pMsg != NULL) + { + nvswitch_os_memcpy(pEventInfo->pMsg, pMsg, pMsgGen->hdr.size); + } + + // callback function cannot be NULL + NVSWITCH_ASSERT(pEventInfo->pCallback != NULL); + pEventInfo->pCallback(device, + pMsg, + pEventInfo->pCallbackParams, + pEventInfo->evtDesc, + evtStatus); + } + } + + return status; +} + +/*! + * Handle non-event messages. This functions checks whether a message is a + * valid reply to the commands sent previously, and if it's valid, it calls + * registered callback functions. + * + * @param[in] device nvswitch device pointer + * @param[in pFlcn FLCN object pointer + * @param[in] pMsg Pointer to event's message data + */ +static NV_STATUS +_flcnQueueResponseHandle_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + RM_FLCN_MSG *pMsg +) +{ + PFLCN_QMGR_SEQ_INFO pSeqInfo; + NV_STATUS status = NV_OK; + RM_FLCN_MSG_GEN *pMsgGen = (RM_FLCN_MSG_GEN *)pMsg; + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + + // get the sequence info data associated with this message + pSeqInfo = flcnableQueueSeqInfoGet(device, pFlcn->pFlcnable, pMsgGen->hdr.seqNumId); + if ((pSeqInfo == NULL) || + (pSeqInfo->seqState != FLCN_QMGR_SEQ_STATE_USED)) + { + // + // Hitting this case indicates that the FLCN has reported a non-event + // message for a command that is not known to be queued. + // + NVSWITCH_PRINT(device, ERROR, + "%s: message received for an unknown sequence number = %d\n", + __FUNCTION__, pMsgGen->hdr.seqNumId); + return NV_ERR_GENERIC; + } + + // + // Make a client callback to notify the client that the command has + // completed. Also provide the private client data, the sequence + // descriptor of the completed command, and the error status. + // + if (pSeqInfo->pCallback != NULL) + { + pSeqInfo->pCallback(device, pMsg, + pSeqInfo->pCallbackParams, + pSeqInfo->seqDesc, + status); + } + + status = flcnQueueSeqInfoFree(device, pFlcn, pSeqInfo); + // We do not check status since we want to continue with clean-up items. + + // release the sequence so that it may be used for other commands + flcnQueueSeqInfoRel(device, pFlcn, pSeqInfo); + + return status; +} + +/*! + * FLCN interface for retrieving status on a currently queued command + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] seqDesc The identifier that was assigned to the command when it + * was submitted. + * + * @return Command's current status, see FLCN_CMD_STATE_ + */ +static NvU32 +_flcnQueueCmdStatus_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 seqDesc +) +{ + PFLCN_QMGR_SEQ_INFO pSeqInfo = NULL; + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + NvU32 seqStatus = FLCN_CMD_STATE_NONE; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + + // attempt to find the sequence-info data for the given sequence + pSeqInfo = flcnQueueSeqInfoFind(device, pFlcn, seqDesc); + if (pSeqInfo == NULL) + { + // sequence completed/done + if (seqDesc < pQueueInfo->nextSeqDesc) + { + seqStatus = FLCN_CMD_STATE_DONE; + } + // sequence never submitted + else + { + seqStatus = FLCN_CMD_STATE_NONE; + } + } + else + { + switch (pSeqInfo->seqState) + { + case FLCN_QMGR_SEQ_STATE_FREE: + case FLCN_QMGR_SEQ_STATE_CANCELED: + { + seqStatus = FLCN_CMD_STATE_DONE; + break; + } + case FLCN_QMGR_SEQ_STATE_PENDING: + case FLCN_QMGR_SEQ_STATE_USED: + { + seqStatus= FLCN_CMD_STATE_RUNNING; + break; + } + } + } + + return seqStatus; +} + +/*! + * FLCN interface for cancelling previously submitted/posted command + * + * Cancelling a command is the way for a caller to indicate that they no longer + * desire to be notified when a command they previously submitted completes + * and to request that no processing occur on the data returned by the falcon for + * that command. This function does not affect processing of the command on + * the falcon. The falcon will continue to operate on the command in the same manner + * as it would if the command had not been cancelled. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] seqDesc The identifier that was assigned to the command + * when it was submitted. + * + * @return NV_OK + * falcon command cancelled + * + * @return NV_ERR_OBJECT_NOT_FOUND + * The provided command-descriptor does not correspond to any in-flight + * commands. + */ +static NV_STATUS +_flcnQueueCmdCancel_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 seqDesc +) +{ + PFLCN_QMGR_SEQ_INFO pSeqInfo = NULL; + + // attempt to find the sequence-info data for the given sequence + pSeqInfo = flcnQueueSeqInfoFind(device, pFlcn, seqDesc); + if (pSeqInfo == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + // + // Mark the sequence as 'canceled' so that we do not attempt to process + // command response when its received. + // + pSeqInfo->seqState = FLCN_QMGR_SEQ_STATE_CANCELED; + return NV_OK; +} + +/*! + * @brief Post a non-blocking command to the FLCN CMD queue(s) for processing. + * + * The FLCN interface for submitting a (non-blocking) command to the FLCN HW. + * This interface is also used for setting the callback that will occur when + * command has completed. The client must specify a callback function, any + * private arguments that should be passed in the callback as well as all + * necessary pre-allocated buffers for storing the command message and message + * payload (if applicable). For commands requiring input payloads or output + * payloads (or both), the client must provide the command offsets to the + * allocation structures that contain all needed information describing the + * allocation (size and location). + * + * @param[in] device nvswitch device pointer + * @param[in,out] pFlcn FLCN object pointer + * @param[in] pCmd Buffer containing raw command data + * @param[in,out] pMsg + * Buffer (may be NULL) that will be filled in response to execution of + * the command (ie. the command's output, initialized by the caller). + * @param[in] pPayload + * Optional pointer (may be NULL) to an engine specific structure that + * describes any input and output payloads that may be associated with + * the command being submitted. + * @param[in] queueIdLogical + * The logical identifier for the command queue this command is destined + * for. Note that only RM command queues are allowed. + * @param[in] pCallback + * Specifies the optional (may be NULL) callback function that will be + * called when the command completes. + * @param[in] pCallbackParams + * Additional optional (may be NULL) parameters that will be blindly passed + * to the callback function. This API will not use these parameters at all. + * @param[out] pSeqDesc + * All commands submitted to the FLCN will be assigned a unique identifier + * before being queued. This identifier will be stored by this pointer and + * may be later used to query for command's status. + * @param[in] pTimeout + * An optional pointer (may be NULL) to a pre-configured timeout structure + * that when non-NULL is be used to indicate that blocking behavior is + * allowed (within the bounds of the timeout) for operations that have the + * potential to fail on transient conditions and can be retried (mutex + * acquirement, queue insertion, etc ...). When NULL, this function does + * NOT retry such operations when they fail (the function becomes non- + * blocking) and returns back control to the caller. Example scenarios + * include when the command queue mutex can not be obtained and if the + * queue does not have enough free space to fit the command. + * + * @return 'NV_OK' + * If the command is successfully written to the command queue. + * + * @return 'NV_ERR_INSUFFICIENT_RESOURCES' + * If the command could not be queued as a result of the target command + * queue having insufficient space to fit the command. Could be after + * the initial queue attempt if the non-blocking behavior has been + * requested or after successive retries if the timeout expired before + * enough space was free'd in the queue. + * + * @return 'NV_ERR_STATE_IN_USE' + * If the command couldn't be queued due to a failure to successfully + * acquire the mutex which protects access to the target command queue. + * Same comments as above on blocking and non-blocking behavior apply. + */ +static NV_STATUS +_flcnQueueCmdPostNonBlocking_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + PRM_FLCN_CMD pCmd, + PRM_FLCN_MSG pMsg, + void *pPayload, + NvU32 queueIdLogical, + FlcnQMgrClientCallback pCallback, + void *pCallbackParams, + NvU32 *pSeqDesc, + NVSWITCH_TIMEOUT *pTimeout +) +{ + PFALCON_QUEUE_INFO pQueueInfo; + PFLCN_QMGR_SEQ_INFO pSeqInfo = NULL; + PFLCNQUEUE pQueue; + NV_STATUS status; + + // Sanity check the object pointers. + if (pFlcn == NULL) + { + NVSWITCH_ASSERT(pFlcn != NULL); + return NV_ERR_INVALID_STATE; + } + + pQueueInfo = pFlcn->pQueueInfo; + if (pQueueInfo == NULL) + { + NVSWITCH_ASSERT(pQueueInfo != NULL); + return NV_ERR_INVALID_STATE; + } + + pQueue = &pQueueInfo->pQueues[queueIdLogical]; + + // Sequence descriptor pointer may never be NULL. + if (pSeqDesc == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Falcon must be in a ready state before commands may be submitted. + if (!pFlcn->bOSReady) + { + if (pFlcn->engineTag != ENG_TAG_SOE) { + NVSWITCH_PRINT(device, ERROR, + "%s: FLCN not ready for command processing\n", + __FUNCTION__); + return NV_ERR_INVALID_STATE; + } + else + { + SOE *pSoe = (PSOE)pFlcn->pFlcnable; + + status = soeWaitForInitAck(device, pSoe); + + if (status != NV_OK || !pFlcn->bOSReady) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SOE not ready for command processing\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); + return status; + } + } + } + + // Sanity check the command input. + if (!_flcnQueueCmdValidate(device, pFlcn, pCmd, pMsg, pPayload, queueIdLogical)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: illformed command request. Skipping.\n", + __FUNCTION__); + status = NV_ERR_INVALID_ARGUMENT; + goto flcnQueueCmdPostNonBlocking_exit; + } + + // Attempt to reserve a sequence for this command. + pSeqInfo = flcnQueueSeqInfoAcq(device, pFlcn); + if (pSeqInfo == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: could not generate a sequence ID for the command\n", + __FUNCTION__); + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto flcnQueueCmdPostNonBlocking_exit; + } + + // Set the sequence number in the command header. + pCmd->cmdGen.hdr.seqNumId = pSeqInfo->seqNum; + + // + // Set the control flags in the command header so that we get a status + // message in the Message Queue and an interrupt when the command completes. + // + pCmd->cmdGen.hdr.ctrlFlags = RM_FLCN_QUEUE_HDR_FLAGS_STATUS; + + // Save the Queue in the Seq structure. + pSeqInfo->pCmdQueue = pQueue; + + // + // Perform all necessary bookkeeping work before enqueuing the command. + // This must be done first to protect against the case where the FLCN + // processes the command before we have the chance to save-off the client + // callback information. + // + pSeqInfo->pCallback = pCallback; + pSeqInfo->pCallbackParams = pCallbackParams; + pSeqInfo->seqDesc = pQueueInfo->nextSeqDesc++; + + // Set the sequence descriptor return value. + *pSeqDesc = pSeqInfo->seqDesc; + + // Engine specific command post handling. + status = flcnableQueueCmdPostExtension(device, pFlcn->pFlcnable, pCmd, + pMsg, pPayload, pTimeout, pSeqInfo); + if (NV_OK != status) + { + flcnQueueSeqInfoRel(device, pFlcn, pSeqInfo); + goto flcnQueueCmdPostNonBlocking_exit; + } + + // Enqueue the command in the target FLCN command queue. + status = flcnQueueCmdWrite(device, pFlcn, queueIdLogical, pCmd, pTimeout); + if (status == NV_OK) + { + pSeqInfo->seqState = FLCN_QMGR_SEQ_STATE_USED; + } + else + { + // On failure cleanup any allocations and release the sequence number. + (void)flcnQueueSeqInfoFree(device, pFlcn, pSeqInfo); + // Already have not OK status from flcnQueueCmdWrite + + flcnQueueSeqInfoRel(device, pFlcn, pSeqInfo); + } + +flcnQueueCmdPostNonBlocking_exit: + if (NV_OK != status) + { + if (NULL != pSeqInfo) + { + flcnQueueSeqInfoRel(device, pFlcn, pSeqInfo); + } + } + return status; +} + +/*! + * @brief Validate that the basic CMD params are properly formed. + * + * @return Boolean if command was properly formed. + */ +static NvBool +_flcnQueueCmdValidate +( + nvswitch_device *device, + PFLCN pFlcn, + PRM_FLCN_CMD pCmd, + PRM_FLCN_MSG pMsg, + void *pPayload, + NvU32 queueIdLogical +) +{ + // Command pointer may never be NULL. + if (pCmd == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: command pointer is NULL\n", + __FUNCTION__); + return NV_FALSE; + } + + // Each command must contain a header (at minimum). + if (pCmd->cmdGen.hdr.size < RM_FLCN_QUEUE_HDR_SIZE) + { + NVSWITCH_PRINT(device, ERROR, + "%s: invalid command (illegal size = 0x%x)\n", + __FUNCTION__, pCmd->cmdGen.hdr.size); + return NV_FALSE; + } + + // Call engine specific command input validation. + return flcnableQueueCmdValidate(device, pFlcn->pFlcnable, pCmd, pMsg, + pPayload, queueIdLogical); +} + +/*! + * @brief Wait for a command to complete on the falcon. + * + * Continuously processes messages posted to the message-queue waiting for the + * falcon to post the response to the command associated with 'seqDesc' or until + * a timeout occurs. This function does not return the data the falcon responded + * with. + * + * @note: Use this function sparingly. It should generally only be used when a + * synchronous response from the falcon is absolutely required. Common use-cases + * are processing of RmCtrl-s where call cannot return until a command completes + * or when executing some linear process where the next step the sequence cannot + * commence until a previously issued command completes. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * @param[in] seqDesc Command identifier issued when it was submitted + * @param[in,out] pTimeout Timeout struct. used while waiting for completion + * + * @return NV_OK + * Falcon command completion received. + * + * @return NV_ERR_INVALID_REQUEST + * The command-descriptor doesn't correspond to any in-flight commands. + * + * @return NV_ERR_TIMEOUT + * A timeout occurred before the command completed. + */ +NV_STATUS +_flcnQueueCmdWait_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 seqDesc, + NVSWITCH_TIMEOUT *pTimeout +) +{ + NvBool bKeepPolling; + + if (_flcnQueueCmdStatus_IMPL(device, pFlcn, seqDesc) == FLCN_CMD_STATE_NONE) + { + return NV_ERR_INVALID_REQUEST; + } + + do + { + bKeepPolling = (nvswitch_timeout_check(pTimeout)) ? NV_FALSE : NV_TRUE; + + // + // Directly invoke interrupt handler to process FLCN response. + // This is needed for the following scenarios: + // 1. When interrupts are disabled (init path) + // 2. When interrupts are enabled (ioctl, background task), we hold + // the device_mutex which nvswitch_isr_thread also tries to acquire. + // + soeService_HAL(device, (PSOE)pFlcn->pFlcnable); + + if (_flcnQueueCmdStatus_IMPL(device, pFlcn, seqDesc) == FLCN_CMD_STATE_DONE) + { + return NV_OK; + } + } while (bKeepPolling); + + return NV_ERR_TIMEOUT; +} + +void +flcnQueueSetupHal +( + FLCN *pFlcn +) +{ + flcn_hal *pHal = pFlcn->pHal; + + pHal->queueCmdWrite = _flcnQueueCmdWrite_IMPL; + + pHal->queueSeqInfoFind = _flcnQueueSeqInfoFind_IMPL; + pHal->queueSeqInfoAcq = _flcnQueueSeqInfoAcq_IMPL; + pHal->queueSeqInfoRel = _flcnQueueSeqInfoRel_IMPL; + pHal->queueSeqInfoStateInit = _flcnQueueSeqInfoStateInit_IMPL; + pHal->queueSeqInfoCancelAll = _flcnQueueSeqInfoCancelAll_IMPL; + pHal->queueSeqInfoFree = _flcnQueueSeqInfoFree_IMPL; + + pHal->queueEventRegister = _flcnQueueEventRegister_IMPL; + pHal->queueEventUnregister = _flcnQueueEventUnregister_IMPL; + pHal->queueEventHandle = _flcnQueueEventHandle_IMPL; + pHal->queueResponseHandle = _flcnQueueResponseHandle_IMPL; + + pHal->queueCmdStatus = _flcnQueueCmdStatus_IMPL; + pHal->queueCmdCancel = _flcnQueueCmdCancel_IMPL; + pHal->queueCmdPostNonBlocking = _flcnQueueCmdPostNonBlocking_IMPL; + pHal->queueCmdWait = _flcnQueueCmdWait_IMPL; +} + diff --git a/src/common/nvswitch/kernel/flcn/flcnqueuerd_nvswitch.c b/src/common/nvswitch/kernel/flcn/flcnqueuerd_nvswitch.c new file mode 100644 index 000000000..5a06856b6 --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/flcnqueuerd_nvswitch.c @@ -0,0 +1,441 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" + +#include "flcn/flcn_nvswitch.h" +#include "flcn/flcnqueue_nvswitch.h" + +#include "rmflcncmdif_nvswitch.h" + +/*! + * @file flcnqueuerd_nvswitch.c + */ + +static NV_STATUS _flcnQueueReaderGetNextHeader(nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, void *pData, NvBool bMsg); +static NV_STATUS _flcnQueueReaderReadHeader (nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, void *pData, NvBool bMsg); +static NV_STATUS _flcnQueueReaderReadBody (nvswitch_device *, PFLCN, FLCNQUEUE *pQueue, void *pData, NvBool bMsg); + +#define NVSWITCH_PRINT_QUEUE_READER_ERR_CLOSING(id, status) \ + NVSWITCH_PRINT(device, ERROR, \ + "%s: error while closing queue (id=0x%x, status=" \ + "0x%x).\n", __FUNCTION__, (id), (status)) + +#define NVSWITCH_PRINT_QUEUE_READER_INVALID_UNITID(id, unitId) \ + NVSWITCH_PRINT(device, ERROR, \ + "%s: invalid unit-id read from queue (qid=0x%x, " \ + "uid=0x%x).\n", __FUNCTION__, (id), (unitId)) + +#define NVSWITCH_PRINT_QUEUE_READER_ERR_OPENING(id, status) \ + NVSWITCH_PRINT(device, WARN, \ + "%s: error while opening queue (id=0x%x, status=" \ + "0x%x).\n", __FUNCTION__, (id), (status)) + +#define NVSWITCH_PRINT_QUEUE_READER_ERR_READING(id, status) \ + NVSWITCH_PRINT(device, ERROR, \ + "%s: error while reading from queue (id=0x%x, " \ + "status=0x%x).\n", __FUNCTION__, (id), (status)) + +#define NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGBODY(id, status) \ + NVSWITCH_PRINT(device, ERROR, \ + "%s: error reading body from queue (id=0x%x, " \ + "status=0x%x).\n", __FUNCTION__, (id), (status)) + +#define NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGHDR(id, status) \ + NVSWITCH_PRINT(device, ERROR, \ + "%s: error reading header from queue (id=" \ + "0x%x, status=0x%x).\n", __FUNCTION__, (id), (status)) + +#define NVSWITCH_PRINT_QUEUE_READER_ERR_READING_UNKNOWN_DATA(id, status) \ + NVSWITCH_PRINT(device, ERROR, \ + "%s: unrecognizable data read from queue (id=0x%x, " \ + "status=0x%x).\n", __FUNCTION__, (id), (status)) + +#define NVSWITCH_PRINT_QUEUE_READER_PRINT_HDR_READ_INFO(offset) \ + NVSWITCH_PRINT(device, INFO, \ + "%s: Reading a header from DMEM @ 0x%x.\n", \ + __FUNCTION__, (offset)) + + +/*! + * Reads the queue and retrieves the next unread message/command. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn Falcon object pointer + * @param[in] queueLogId Logical ID of the queue + * @param[in,out] pData The buffer to fill with the queue data + * @param[in] bMsg Message/Command + * + * @return NV_OK when the read operation is successful. + * NV_ERR_NOT_READY Queue is Empty + * NV_ERR_GENERIC otherwise. + */ +static NV_STATUS +_flcnQueueReadData_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 queueLogId, + void *pData, + NvBool bMsg +) +{ + NV_STATUS status = NV_OK; + NV_STATUS retStatus = NV_OK; + FLCNQUEUE *pQueue; + PFALCON_QUEUE_INFO pQueueInfo; + RM_FLCN_QUEUE_HDR bufferGenHdr; + + NVSWITCH_ASSERT(pFlcn != NULL); + + pQueueInfo = pFlcn->pQueueInfo; + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueueInfo->pQueues != NULL); + + pQueue = &pQueueInfo->pQueues[queueLogId]; + + // + // If the queue is empty, simply return NV_ERR_NOT_READY to indicate that a message is + // not available. + // + if (pQueue->isEmpty(device, pFlcn, pQueue)) + { + return NV_ERR_NOT_READY; + } + + status = pQueue->openRead(device, pFlcn, pQueue); + if (status != NV_OK) + { + NVSWITCH_PRINT_QUEUE_READER_ERR_OPENING(pQueue->queueLogId, status); + return status; + } + + status = _flcnQueueReaderGetNextHeader(device, pFlcn, pQueue, pData, bMsg); + if (status != NV_OK) + { + NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGHDR(pQueue->queueLogId, status); + retStatus = status; + } + + else + { + bufferGenHdr = bMsg ? ((RM_FLCN_MSG *)pData)->msgGen.hdr : + ((RM_FLCN_CMD *)pData)->cmdGen.hdr; + // + // If the size of the message in the header is greater than the size of + // the structure which will hold the message, then log a breakpoint. + // Copying data more than the structure can hold can lead to buffer overrun + // on the stack and lead to fatal errors. Logging a breakpoint here will + // make sure that we can catch this condition in release drivers by looking + // at the RmJournal. + // + // Note: When this happens, we are essentially not purging the message queue + // so the TAIL pointer will still point to the start of this message. + // The next time RM gets a new message from Falcon, it will try to purge this + // message and will keep on looping trying to purge. It will eventually + // bugcheck, but at least the breakpoint in the logs will point to this bug + // + if ((bufferGenHdr.size > pQueueInfo->maxMsgSize) && (bMsg)) + { + retStatus = NV_ERR_GENERIC; + NVSWITCH_ASSERT(0); + } + // + // Check the message header to see if the message has a body. If it does, + // read it. It is not considered an error for a message to contain only + // a header. + // + else if (bufferGenHdr.size > RM_FLCN_QUEUE_HDR_SIZE) + { + status = _flcnQueueReaderReadBody(device, pFlcn, pQueue, pData, bMsg); + if (status != NV_OK) + { + NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGBODY(pQueue->queueLogId, status); + retStatus = status; + } + } + } + + // + // Queue needs to be closed even if there is error in + // reading header/message above + // + status = pQueue->close(device, pFlcn, pQueue, NV_TRUE); + if (status != NV_OK) + { + NVSWITCH_PRINT_QUEUE_READER_ERR_CLOSING(pQueue->queueLogId, status); + + // + // Update the retStatus only if there was no error reading + // header/message earlier. + // + if (NV_OK == retStatus) + { + retStatus = status; + } + } + + return retStatus; +} + +/*! + * @brief Retrieves the next valid header from the queue. + * + * This function attempts to read a message header from the message queue. Upon + * a successful read, the header is be validated and a check is made to see if + * the header read is the rewind header. If found, the queue is rewound and + * another attempt is be made to read a valid header. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn Falcon object pointer + * @param[in] pQueue The queue to read from + * @param[in] pData The buffer to fill-in + * @param[in] bMsg Msg/Cmd + * + * @return 'NV_OK' If a VALID message is read from the message queue. + * @return 'NV_ERR_GENERIC' Otherwise. + */ +static NV_STATUS +_flcnQueueReaderGetNextHeader +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + void *pData, + NvBool bMsg +) +{ + NV_STATUS status; + RM_FLCN_QUEUE_HDR bufferGenHdr; + + // attempt to read a message header from the message queue + status = _flcnQueueReaderReadHeader(device, pFlcn, pQueue, pData, bMsg); + if (status != NV_OK) + { + NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGHDR(pQueue->queueLogId, status); + return NV_ERR_GENERIC; + } + + bufferGenHdr = bMsg ? ((RM_FLCN_MSG *)pData)->msgGen.hdr : + ((RM_FLCN_CMD *)pData)->cmdGen.hdr; + + // + // If the rewind header is received, rewind the message queue and re- + // attempt to read a message header. + // + if (bufferGenHdr.unitId == RM_FLCN_UNIT_ID_REWIND) + { + pQueue->rewind(device, pFlcn, pQueue); + status = _flcnQueueReaderReadHeader(device, pFlcn, pQueue, pData, bMsg); + if (status != NV_OK) + { + NVSWITCH_PRINT_QUEUE_READER_ERR_READING_MSGHDR(pQueue->queueLogId, status); + return NV_ERR_GENERIC; + } + } + + bufferGenHdr = bMsg ? ((RM_FLCN_MSG *)pData)->msgGen.hdr : + ((RM_FLCN_CMD *)pData)->cmdGen.hdr; + // + // Validate the header's unit identifier. This step is performed AFTER the + // rewind check as an optimization in the event that we did read a rewind + // message. In the event of receiving an invalid unit-id, the rewind check + // would also have failed. + // + if (!RM_FLCN_UNIT_ID_IS_VALID(pFlcn->pQueueInfo, bufferGenHdr.unitId)) + { + NVSWITCH_PRINT_QUEUE_READER_INVALID_UNITID(pQueue->queueLogId, bufferGenHdr.unitId); + return NV_ERR_GENERIC; + } + return NV_OK; +} + +/*! + * @brief Reads the body of a message/command into the buffer. + * + * Simply performs a read operation on a previously opened queue in attempt to + * read a message body. This function does not make any attempts to interpret + * the body's data. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn Falcon object pointer + * @param[in] pQueue The queue to read from + * @param[in] pData The buffer to fill-in + * @param[in] bMsg Msg/Cmd + * + * @return 'NV_OK' If a message is read from the message queue. + * @return 'NV_ERR_GENERIC' Otherwise. + */ +static NV_STATUS +_flcnQueueReaderReadBody +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + void *pData, + NvBool bMsg +) +{ + NvU32 bytesRead; + NvU32 readSize; + NV_STATUS status; + RM_FLCN_QUEUE_HDR bufferGenHdr; + + NVSWITCH_ASSERT(!pQueue->isEmpty(device, pFlcn, pQueue)); + + bufferGenHdr = bMsg ? ((RM_FLCN_MSG *)pData)->msgGen.hdr : + ((RM_FLCN_CMD *)pData)->cmdGen.hdr; + + // + // The header contains the size to read for the message/command body. Note that + // size in the header accounts for the size of the header itself. + // + readSize = bufferGenHdr.size - RM_FLCN_QUEUE_HDR_SIZE; + + if(bMsg) + { + status = pQueue->pop(device, pFlcn, pQueue, &((RM_FLCN_MSG *)pData)->msgGen.msg, + readSize, &bytesRead); + } + else + { + status = pQueue->pop(device, pFlcn, pQueue, &((RM_FLCN_CMD *)pData)->cmdGen.cmd, + readSize, &bytesRead); + } + + if (status != NV_OK) + { + NVSWITCH_PRINT_QUEUE_READER_ERR_READING(pQueue->queueLogId, status); + return status; + } + + // + // The number of bytes should always be greater than zero in virtue of the + // fact the queue is known to be non-empty at this point. + // + NVSWITCH_ASSERT(bytesRead != 0); + + // + // Verify that enough data is read to constitute a full message body. + // Anything less is considered a logic error as it indicates that we are + // out of sync with the data that's in the queue (ie. we cannot recognize + // it). This is not expected to occur. + // + if (bytesRead != readSize) + { + NVSWITCH_PRINT_QUEUE_READER_ERR_READING_UNKNOWN_DATA(pQueue->queueLogId, status); + NVSWITCH_ASSERT(0); + return NV_ERR_GENERIC; + } + return NV_OK; +} + +/*! + * @brief Read a message/command header from the given queue. + * + * Simply performs a read operation on a previously opened queue in attempt to + * read a message header. This function does not make any attempts to + * interpret or validate the message header + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn Falcon object pointer + * @param[in] pQueue The queue to read from + * @param[in] pData The buffer to fill-in + * @param[in] bMsg Msg/Cmd + * + * @return 'NV_OK' If a message is read from the message queue. + * @return 'NV_ERR_INVALID_STATE' If queue is empty. + * @return 'NV_ERR_GENERIC' Otherwise. + */ +static NV_STATUS +_flcnQueueReaderReadHeader +( + nvswitch_device *device, + PFLCN pFlcn, + FLCNQUEUE *pQueue, + void *pData, + NvBool bMsg +) +{ + NvU32 bytesRead; + NV_STATUS status; + if (pQueue->isEmpty(device, pFlcn, pQueue)) + { + NVSWITCH_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + NVSWITCH_PRINT_QUEUE_READER_PRINT_HDR_READ_INFO(pQueue->position); + + if(bMsg) + { + // read a header's worth of data from the queue + status = pQueue->pop( + device, pFlcn, pQueue, &((RM_FLCN_MSG *)pData)->msgGen.hdr, + RM_FLCN_QUEUE_HDR_SIZE, &bytesRead); + } + else + { + status = pQueue->pop( + device, pFlcn, pQueue, &((RM_FLCN_CMD *)pData)->cmdGen.hdr, + RM_FLCN_QUEUE_HDR_SIZE, &bytesRead); + } + + if (status != NV_OK) + { + NVSWITCH_PRINT_QUEUE_READER_ERR_READING(pQueue->queueLogId, status); + return status; + } + + // + // The number of bytes should always be greater than zero in virtue of the + // fact the queue is known to be non-empty at this point. + // + NVSWITCH_ASSERT(bytesRead != 0); + + // + // Verify that enough data is read to constitute a full header. Anything + // less is considered a logic error as it indicates that we are out of sync + // with the data that's in the queue (ie. we cannot recognize it). This is + // not expected to occur. + // + if (bytesRead != RM_FLCN_QUEUE_HDR_SIZE) + { + NVSWITCH_PRINT_QUEUE_READER_ERR_READING_UNKNOWN_DATA(pQueue->queueLogId, status); + NVSWITCH_ASSERT(0); + return NV_ERR_GENERIC; + } + return NV_OK; +} + +void +flcnQueueRdSetupHal +( + FLCN *pFlcn +) +{ + flcn_hal *pHal = pFlcn->pHal; + + pHal->queueReadData = _flcnQueueReadData_IMPL; +} + diff --git a/src/common/nvswitch/kernel/flcn/flcnrtosdebug_nvswitch.c b/src/common/nvswitch/kernel/flcn/flcnrtosdebug_nvswitch.c new file mode 100644 index 000000000..f9292b789 --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/flcnrtosdebug_nvswitch.c @@ -0,0 +1,129 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file flcnrtosdebug_nvswitch.c + * @brief Provides support for capturing RTOS's state in case of Falcon + * related failures. + */ + +/* ------------------------- Includes --------------------------------------- */ + +#include "common_nvswitch.h" + +#include "flcn/flcn_nvswitch.h" +#include "flcn/flcnable_nvswitch.h" +#include "rmflcncmdif_nvswitch.h" + +#include "flcn/flcnrtosdebug_nvswitch.h" + +/*! + * Dump the complete stack by iterating from tail to head pointer + * + * @param[in] device nvswitch_device pointer + * @param[in] pFlcn FLCN pointer + * @param[in] queueLogId Logical ID of the queue + * @param[in] pFlcnCmd Pointer to the command buffer to read + * + */ +NV_STATUS +flcnRtosDumpCmdQueue_nvswitch +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 queueLogId, + RM_FLCN_CMD *pFlcnCmd +) +{ + FLCNQUEUE *pQueue; + NvU32 head; + NvU32 tail; + NvU32 tailcache; + + NV_STATUS status = NV_OK; + PFALCON_QUEUE_INFO pQueueInfo = pFlcn->pQueueInfo; + + pQueue = &pQueueInfo->pQueues[queueLogId]; + (void)pQueue->tailGet(device, pFlcn, pQueue, &tail); + (void)pQueue->headGet(device, pFlcn, pQueue, &head); + + // caching the current tail pointer + (void)pQueue->tailGet(device, pFlcn, pQueue, &tailcache); + + if (head == tail) + { + return status; + } + + while (tail != head) + { + status = flcnQueueReadData(device,pFlcn, + queueLogId, + pFlcnCmd, NV_FALSE); + NVSWITCH_PRINT(device, ERROR, "%s:" \ + "Cmd_Dump UnitId %d size %d sq %d ctl %d cmd %d\n", + __FUNCTION__, + pFlcnCmd->cmdGen.hdr.unitId, + pFlcnCmd->cmdGen.hdr.size, + pFlcnCmd->cmdGen.hdr.seqNumId, + pFlcnCmd->cmdGen.hdr.ctrlFlags, + pFlcnCmd->cmdGen.cmd); + + (void)pQueue->tailGet(device, pFlcn, pQueue, &tail); + } + + // restoring the cached tail pointer + (void)pQueue->tailSet(device, pFlcn, pQueue, tailcache); + + return status; +} + +/*! + * @brief Populates falcon DMEM pointer in its internal debug info structure + * + * @param[in] device GPU object pointer + * @param[in] pFlcn FLCN pointer + * @param[in] debugInfoDmemOffset DMEM offset of the falcon debug info + */ +static void +_flcnDbgInfoDmemOffsetSet_IMPL +( + nvswitch_device *device, + PFLCN pFlcn, + NvU16 debugInfoDmemOffset +) +{ + pFlcn->debug.debugInfoDmemOffset = debugInfoDmemOffset; +} + +void +flcnRtosSetupHal +( + FLCN *pFlcn +) +{ + flcn_hal *pHal = pFlcn->pHal; + + pHal->dbgInfoDmemOffsetSet = _flcnDbgInfoDmemOffsetSet_IMPL; +} + diff --git a/src/common/nvswitch/kernel/flcn/v03/flcn0300_nvswitch.c b/src/common/nvswitch/kernel/flcn/v03/flcn0300_nvswitch.c new file mode 100644 index 000000000..362e241cd --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/v03/flcn0300_nvswitch.c @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file flcn0300_nvswitch.c + * @brief Provides the implementation for all falcon 3.0 HAL interfaces. + */ + +#include "nvmisc.h" +#include "common_nvswitch.h" + +#include "flcn/flcnable_nvswitch.h" +#include "flcn/flcn_nvswitch.h" + +#include "nvswitch/lr10/dev_falcon_v4.h" + +/*! + * @brief Get information about the falcon core + * + * @param[in] device nvswitch_device pointer + * @param[in] pFlcn FLCN pointer + * + * @returns nothing + */ +static void +_flcnGetCoreInfo_v03_00 +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NvU32 hwcfg1 = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_HWCFG1); + + if (FLD_TEST_DRF(_PFALCON, _FALCON_HWCFG1, _SECURITY_MODEL, _HEAVY, hwcfg1)) + { + NVSWITCH_PRINT(device, INFO, + "%s: Engine '%s' is using the heavy security model\n", + __FUNCTION__, flcnGetName_HAL(device, pFlcn)); + } + + // Save off the security model. + pFlcn->securityModel = DRF_VAL(_PFALCON, _FALCON_HWCFG1, _SECURITY_MODEL, hwcfg1); + + // Combine Falcon core revision and subversion for easy version comparison. + pFlcn->coreRev = flcnableReadCoreRev(device, pFlcn->pFlcnable); + + pFlcn->supportsDmemApertures = FLD_TEST_DRF(_PFALCON, _FALCON_HWCFG1, _DMEM_APERTURES, _ENABLE, hwcfg1); +} + +/** + * @brief set hal function pointers for functions defined in v03_00 (i.e. this file) + * + * this function has to be at the end of the file so that all the + * other functions are already defined. + * + * @param[in] pFlcn The flcn for which to set hals + */ +void +flcnSetupHal_v03_00 +( + PFLCN pFlcn +) +{ + flcn_hal *pHal = pFlcn->pHal; + + pHal->getCoreInfo = _flcnGetCoreInfo_v03_00; +} + diff --git a/src/common/nvswitch/kernel/flcn/v04/flcn0400_nvswitch.c b/src/common/nvswitch/kernel/flcn/v04/flcn0400_nvswitch.c new file mode 100644 index 000000000..6ab910f20 --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/v04/flcn0400_nvswitch.c @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file flcn0400_nvswitch.c + * @brief Provides the implementation for all falcon 04.00 HAL interfaces. + */ + +#include "flcn/flcn_nvswitch.h" + +/** + * @brief set hal function pointers for functions defined in v04_00 (i.e. this file) + * + * this function has to be at the end of the file so that all the + * other functions are already defined. + * + * @param[in] pFlcn The flcn for which to set hals + */ +void +flcnSetupHal_v04_00 +( + PFLCN pFlcn +) +{ + // default to using definitions from v03_00 + flcnSetupHal_v03_00(pFlcn); +} diff --git a/src/common/nvswitch/kernel/flcn/v05/flcn0501_nvswitch.c b/src/common/nvswitch/kernel/flcn/v05/flcn0501_nvswitch.c new file mode 100644 index 000000000..4ccef22a3 --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/v05/flcn0501_nvswitch.c @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file flcn0501_nvswitch.c + * @brief Provides the implementation for all falcon 5.1 HAL interfaces. + */ + +#include "flcn/flcn_nvswitch.h" + + +/** + * @brief set hal function pointers for functions defined in v05_01 (i.e. this file) + * + * this function has to be at the end of the file so that all the + * other functions are already defined. + * + * @param[in] pFlcn The flcn for which to set hals + */ +void +flcnSetupHal_v05_01 +( + PFLCN pFlcn +) +{ + // default to using definitions from v04_00 + flcnSetupHal_v04_00(pFlcn); +} diff --git a/src/common/nvswitch/kernel/flcn/v06/flcn0600_nvswitch.c b/src/common/nvswitch/kernel/flcn/v06/flcn0600_nvswitch.c new file mode 100644 index 000000000..3e047a354 --- /dev/null +++ b/src/common/nvswitch/kernel/flcn/v06/flcn0600_nvswitch.c @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file flcn0600_nvswitch.c + * @brief Provides the implementation for all falcon 06.00 HAL interfaces. + */ + +#include "flcn/flcn_nvswitch.h" + + +/** + * @brief set hal function pointers for functions defined in v06_00 (i.e. this file) + * + * this function has to be at the end of the file so that all the + * other functions are already defined. + * + * @param[in] pFlcn The flcn for which to set hals + */ +void +flcnSetupHal_v06_00 +( + PFLCN pFlcn +) +{ + // default to using definitions from v05_01 + flcnSetupHal_v05_01(pFlcn); +} diff --git a/src/common/nvswitch/kernel/inc/bios_nvswitch.h b/src/common/nvswitch/kernel/inc/bios_nvswitch.h new file mode 100644 index 000000000..2ba9166e9 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/bios_nvswitch.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _BIOS_NVSWITCH_H_ +#define _BIOS_NVSWITCH_H_ + +#include "common_nvswitch.h" + +NvlStatus nvswitch_bios_read(nvswitch_device *, NvU32, void *); +NvlStatus nvswitch_bios_read_size(nvswitch_device *, NvU32 *); +NvlStatus nvswitch_bios_get_image(nvswitch_device *device); + +#endif //_BIOS_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/common_nvswitch.h b/src/common/nvswitch/kernel/inc/common_nvswitch.h new file mode 100644 index 000000000..c5d53322c --- /dev/null +++ b/src/common/nvswitch/kernel/inc/common_nvswitch.h @@ -0,0 +1,537 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _COMMON_NVSWITCH_H_ +#define _COMMON_NVSWITCH_H_ + +#ifdef INCLUDE_NVLINK_LIB +#include "nvlink.h" +#endif + +#include "export_nvswitch.h" +#include "error_nvswitch.h" +#include "io_nvswitch.h" +#include "rom_nvswitch.h" +#include "haldef_nvswitch.h" +#include "nvctassert.h" +#include "flcn/flcnable_nvswitch.h" +#include "inforom/inforom_nvswitch.h" +#include "spi_nvswitch.h" +#include "smbpbi_nvswitch.h" +#include "nvCpuUuid.h" + +#define NVSWITCH_GET_BIT(v, p) (((v) >> (p)) & 1) +#define NVSWITCH_SET_BIT(v, p) ((v) | NVBIT(p)) +#define NVSWITCH_CLEAR_BIT(v, p) ((v) & ~NVBIT(p)) +#define NVSWITCH_MASK_BITS(n) (~(0xFFFFFFFF << (n))) + +static NV_INLINE NvBool nvswitch_test_flags(NvU32 val, NvU32 flags) +{ + return !!(val & flags); +} + +static NV_INLINE void nvswitch_set_flags(NvU32 *val, NvU32 flags) +{ + *val |= flags; +} + +static NV_INLINE void nvswitch_clear_flags(NvU32 *val, NvU32 flags) +{ + *val &= ~flags; +} + +// Destructive operation to reverse bits in a mask +#define NVSWITCH_REVERSE_BITMASK_32(numBits, mask) \ +{ \ + NvU32 i, reverse = 0; \ + FOR_EACH_INDEX_IN_MASK(32, i, mask) \ + { \ + reverse |= NVBIT((numBits - 1) - i); \ + } \ + FOR_EACH_INDEX_IN_MASK_END; \ + \ + mask = reverse; \ +} + +#define NVSWITCH_CHECK_STATUS(_d, _status) \ + if (_status != NVL_SUCCESS) \ + { \ + NVSWITCH_PRINT(_d, MMIO, "%s(%d): status=%d\n", \ + __FUNCTION__, __LINE__, \ + _status); \ + } + +#define IS_RTLSIM(device) (device->is_rtlsim) +#define IS_FMODEL(device) (device->is_fmodel) +#define IS_EMULATION(device) (device->is_emulation) + +#define NVSWITCH_DEVICE_NAME "nvswitch" +#define NVSWITCH_LINK_NAME "link" + +// Max size of sprintf("%d", valid_instance) compile time check +#if NVSWITCH_DEVICE_INSTANCE_MAX < 100 +#define NVSWITCH_INSTANCE_LEN 2 +#endif + +#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0]))) + +#define NVSWITCH_DBG_LEVEL NVSWITCH_DBG_LEVEL_INFO + +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) +#define NVSWITCH_PRINT(_d, _lvl, _fmt, ...) \ + ((NVSWITCH_DBG_LEVEL <= NVSWITCH_DBG_LEVEL_ ## _lvl) ? \ + nvswitch_os_print(NVSWITCH_DBG_LEVEL_ ## _lvl, \ + "%s[%-5s]: " _fmt, \ + ((_d == NULL) ? \ + "nvswitchx" : \ + ((nvswitch_device *)_d)->name), \ + #_lvl, \ + ## __VA_ARGS__) : \ + ((void)(0)) \ + ) +#else + #define NVSWITCH_PRINT(_d, _lvl, _fmt, ...) ((void)0) +#endif + +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) +#define nvswitch_os_malloc(_size) \ + nvswitch_os_malloc_trace(_size, __FILE__, __LINE__) +#else +#define nvswitch_os_malloc(_size) \ + nvswitch_os_malloc_trace(_size, NULL, 0) +#endif + +// +// This macro should be used to check assertion statements and print Error messages. +// +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) +#define NVSWITCH_ASSERT(_cond) \ + nvswitch_os_assert_log((_cond), "NVSwitch: Assertion failed in %s() at %s:%d\n", \ + __FUNCTION__ , __FILE__, __LINE__) +#else +#define NVSWITCH_ASSERT(_cond) \ + nvswitch_os_assert_log((_cond), "NVSwitch: Assertion failed \n") +#endif + +#define NVSWITCH_ASSERT_ERROR_INFO(errorCategory, errorInfo) NVSWITCH_ASSERT(0x0) +#define NVSWITCH_ASSERT_INFO(errCode, errLinkMask, errSubcode) NVSWITCH_ASSERT(0x0) + +// +// This macro should be used cautiously as it prints information in the release +// drivers. +// +#define NVSWITCH_PRINT_SXID(_d, _sxid, _fmt, ...) \ + do \ + { \ + NVSWITCH_ASSERT(nvswitch_translate_hw_error(_sxid) != NVSWITCH_NVLINK_HW_GENERIC); \ + nvswitch_os_print(NVSWITCH_DBG_LEVEL_ERROR, \ + "nvidia-%s: SXid (PCI:" NVLINK_PCI_DEV_FMT "): %05d, " _fmt, \ + (_d)->name, NVLINK_PCI_DEV_FMT_ARGS(&(_d)->nvlink_device->pciInfo), _sxid, \ + ##__VA_ARGS__); \ + nvswitch_lib_smbpbi_log_sxid(_d, _sxid, _fmt, ##__VA_ARGS__); \ + nvswitch_inforom_bbx_add_sxid(_d, _sxid, 0, 0, 0); \ + } while(0) + +#define NVSWITCH_DEV_CMD_DISPATCH_WITH_PRIVATE_DATA(cmd, function, type, private)\ + case cmd: \ + { \ + if (sizeof(type) == size) \ + { \ + retval = function(device, params, private); \ + } \ + else \ + { \ + retval = -NVL_BAD_ARGS; \ + } \ + break; \ + } + +#define NVSWITCH_DEV_CMD_DISPATCH_HELPER(cmd, supported, function, type) \ + case cmd: \ + { \ + if (!supported) \ + { \ + retval = -NVL_ERR_NOT_SUPPORTED; \ + } \ + else if (sizeof(type) == size) \ + { \ + retval = function(device, params); \ + } \ + else \ + { \ + retval = -NVL_BAD_ARGS; \ + } \ + break; \ + } + +#define NVSWITCH_DEV_CMD_DISPATCH(cmd, function, type) \ + NVSWITCH_DEV_CMD_DISPATCH_HELPER(cmd, NV_TRUE, function, type) + +#define NVSWITCH_MODS_CMDS_SUPPORTED NV_FALSE + +#if defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS) +#define NVSWITCH_TEST_CMDS_SUPPORTED NV_TRUE +#else +#define NVSWITCH_TEST_CMDS_SUPPORTED NV_FALSE +#endif + +#define NVSWITCH_DEV_CMD_DISPATCH_MODS(cmd, function, type) \ + NVSWITCH_DEV_CMD_DISPATCH_HELPER(cmd, NVSWITCH_MODS_CMDS_SUPPORTED, function, type) + +#define NVSWITCH_DEV_CMD_DISPATCH_TEST(cmd, function, type) \ + NVSWITCH_DEV_CMD_DISPATCH_HELPER(cmd, NVSWITCH_TEST_CMDS_SUPPORTED, function, type) + +#define NVSWITCH_MAX_NUM_LINKS 100 +#if NVSWITCH_MAX_NUM_LINKS <= 100 +#define NVSWITCH_LINK_INSTANCE_LEN 2 +#endif + +extern const nvlink_link_handlers nvswitch_link_handlers; + +// +// link_info is used to store private link information +// +typedef struct +{ + char name[sizeof(NVSWITCH_LINK_NAME) + NVSWITCH_LINK_INSTANCE_LEN]; +} LINK_INFO; + +typedef struct +{ + NvU32 external_fabric_mgmt; + NvU32 txtrain_control; + NvU32 crossbar_DBI; + NvU32 link_DBI; + NvU32 ac_coupled_mask; + NvU32 ac_coupled_mask2; + NvU32 swap_clk; + NvU32 link_enable_mask; + NvU32 link_enable_mask2; + NvU32 bandwidth_shaper; + NvU32 ssg_control; + NvU32 skip_buffer_ready; + NvU32 enable_pm; + NvU32 chiplib_forced_config_link_mask; + NvU32 chiplib_forced_config_link_mask2; + NvU32 soe_dma_self_test; + NvU32 soe_disable; + NvU32 soe_enable; + NvU32 soe_boot_core; + NvU32 latency_counter; + NvU32 nvlink_speed_control; + NvU32 inforom_bbx_periodic_flush; + NvU32 inforom_bbx_write_periodicity; + NvU32 inforom_bbx_write_min_duration; + NvU32 ato_control; + NvU32 sto_control; + NvU32 minion_disable; + NvU32 set_ucode_target; + NvU32 set_simmode; + NvU32 set_smf_settings; + NvU32 select_uphy_tables; + NvU32 link_training_mode; + NvU32 i2c_access_control; + NvU32 link_recal_settings; + NvU32 crc_bit_error_rate_short; + NvU32 crc_bit_error_rate_long; +} NVSWITCH_REGKEY_TYPE; + +// +// Background tasks +// +typedef struct NVSWITCH_TASK +{ + struct NVSWITCH_TASK *next; + void (*task_fn)(nvswitch_device *); + NvU64 period_nsec; + NvU64 last_run_nsec; + NvU32 flags; +} NVSWITCH_TASK_TYPE; + +#define NVSWITCH_TASK_TYPE_FLAGS_ALWAYS_RUN 0x1 // Run even the if not initialized + +// +// PLL +// +typedef struct +{ + NvU32 src_freq_khz; + NvU32 M; + NvU32 N; + NvU32 PL; + NvU32 dist_mode; + NvU32 refclk_div; + NvU32 vco_freq_khz; + NvU32 freq_khz; +} NVSWITCH_PLL_INFO; + +// Per-unit interrupt masks +typedef struct +{ + NvU32 fatal; + NvU32 nonfatal; + NvU32 correctable; +} NVSWITCH_INTERRUPT_MASK; + +// BIOS Image +typedef struct +{ + // Size of the image. + NvU32 size; + + // pointer to the BIOS image. + NvU8* pImage; +} NVSWITCH_BIOS_IMAGE; + +struct NVSWITCH_CLIENT_EVENT +{ + NVListRec entry; + NvU32 eventId; + void *private_driver_data; +}; + +// +// common device information +// +struct nvswitch_device +{ +#ifdef INCLUDE_NVLINK_LIB + nvlink_device *nvlink_device; +#endif + + char name[sizeof(NVSWITCH_DEVICE_NAME) + NVSWITCH_INSTANCE_LEN]; + + void *os_handle; + NvU32 os_instance; + + NvBool is_emulation; + NvBool is_rtlsim; + NvBool is_fmodel; + + NVSWITCH_REGKEY_TYPE regkeys; + + // Tasks + NVSWITCH_TASK_TYPE *tasks; + + // Errors + NvU64 error_total; // Total errors recorded across all error logs + NVSWITCH_ERROR_LOG_TYPE log_FATAL_ERRORS; + NVSWITCH_ERROR_LOG_TYPE log_NONFATAL_ERRORS; + + NVSWITCH_FIRMWARE firmware; + + // HAL connectivity + nvswitch_hal hal; + + // SOE + FLCNABLE *pSoe; + + // DMA + NvU32 dma_addr_width; + + // InfoROM + struct inforom *pInforom; + + // I2C + struct NVSWITCH_OBJI2C *pI2c; + + // SMBPBI + struct smbpbi *pSmbpbi; + + // NVSWITCH_LINK_TYPE + NVSWITCH_LINK_TYPE link[NVSWITCH_MAX_LINK_COUNT]; + + // PLL + NVSWITCH_PLL_INFO switch_pll; + + // Device specific information + NvU32 chip_arch; // NVSWITCH_GET_INFO_INDEX_ARCH_* + NvU32 chip_impl; // NVSWITCH_GET_INFO_INDEX_IMPL_* + // + NvU32 chip_id; // NV_PSMC/PMC_BOOT_42_CHIP_ID_* + void * chip_device; + + // UUID in big-endian format + NvUuid uuid; + + // Fabric Manager timeout value for the heartbeat + NvU32 fm_timeout; + + // Fabric State + NVSWITCH_DRIVER_FABRIC_STATE driver_fabric_state; + NVSWITCH_DEVICE_FABRIC_STATE device_fabric_state; + NVSWITCH_DEVICE_BLACKLIST_REASON device_blacklist_reason; + NvU64 fabric_state_timestamp; + NvU32 fabric_state_sequence_number; + + // Full BIOS image + NVSWITCH_BIOS_IMAGE biosImage; + + // List of client events + NVListRec client_events_list; +}; + +#define NVSWITCH_IS_DEVICE_VALID(device) \ + ((device != NULL) && \ + (device->nvlink_device->type == NVLINK_DEVICE_TYPE_NVSWITCH)) + +#define NVSWITCH_IS_DEVICE_ACCESSIBLE(device) \ + (NVSWITCH_IS_DEVICE_VALID(device) && \ + (device->nvlink_device->pciInfo.bars[0].pBar != NULL)) + +#define NVSWITCH_IS_DEVICE_INITIALIZED(device) \ + (NVSWITCH_IS_DEVICE_ACCESSIBLE(device) && \ + (device->nvlink_device->initialized)) + +// +// Error Function defines +// + +NvlStatus +nvswitch_construct_error_log +( + NVSWITCH_ERROR_LOG_TYPE *errors, + NvU32 error_log_size, + NvBool overwritable +); + +void +nvswitch_destroy_error_log +( + nvswitch_device *device, + NVSWITCH_ERROR_LOG_TYPE *errors +); + +void +nvswitch_record_error +( + nvswitch_device *device, + NVSWITCH_ERROR_LOG_TYPE *errors, + NvU32 error_type, // NVSWITCH_ERR_* + NvU32 instance, + NvU32 subinstance, + NVSWITCH_ERROR_SRC_TYPE error_src, // NVSWITCH_ERROR_SRC_* + NVSWITCH_ERROR_SEVERITY_TYPE severity, // NVSWITCH_ERROR_SEVERITY_* + NvBool error_resolved, + void *data, + NvU32 data_size, + NvU32 line +); + +void +nvswitch_discard_errors +( + NVSWITCH_ERROR_LOG_TYPE *errors, + NvU32 error_discard_count +); + +void +nvswitch_get_error +( + nvswitch_device *device, + NVSWITCH_ERROR_LOG_TYPE *errors, + NVSWITCH_ERROR_TYPE *error_entry, + NvU32 error_idx, + NvU32 *error_count +); + +void +nvswitch_get_next_error +( + nvswitch_device *device, + NVSWITCH_ERROR_LOG_TYPE *errors, + NVSWITCH_ERROR_TYPE *error_entry, + NvU32 *error_count, + NvBool remove_from_list +); + +void +nvswitch_get_link_handlers +( + nvlink_link_handlers *nvswitch_link_handlers +); + +// +// Timeout checking +// + +typedef struct NVSWITCH_TIMEOUT +{ + NvU64 timeout_ns; +} NVSWITCH_TIMEOUT; + +#define NVSWITCH_INTERVAL_1USEC_IN_NS 1000LL +#define NVSWITCH_INTERVAL_50USEC_IN_NS 50000LL +#define NVSWITCH_INTERVAL_1MSEC_IN_NS 1000000LL +#define NVSWITCH_INTERVAL_5MSEC_IN_NS 5000000LL +#define NVSWITCH_INTERVAL_1SEC_IN_NS 1000000000LL + +#define NVSWITCH_HEARTBEAT_INTERVAL_NS NVSWITCH_INTERVAL_1SEC_IN_NS + +// This should only be used for short delays +#define NVSWITCH_NSEC_DELAY(nsec_delay) \ +do \ +{ \ + if (!IS_FMODEL(device)) \ + { \ + NVSWITCH_TIMEOUT timeout; \ + nvswitch_timeout_create(nsec_delay, &timeout); \ + do { } \ + while (!nvswitch_timeout_check(&timeout)); \ + } \ +} while(0) + +#define NVSWITCH_GET_CAP(tbl,cap,field) (((NvU8)tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)) +#define NVSWITCH_SET_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) |= (0?cap##field)) + +NvBool nvswitch_is_lr10_device_id(NvU32 device_id); + +NvU32 nvswitch_reg_read_32(nvswitch_device *device, NvU32 offset); +void nvswitch_reg_write_32(nvswitch_device *device, NvU32 offset, NvU32 data); +NvU64 nvswitch_read_64bit_counter(nvswitch_device *device, NvU32 lo_offset, NvU32 hi_offset); +void nvswitch_timeout_create(NvU64 timeout_ns, NVSWITCH_TIMEOUT *time); +NvBool nvswitch_timeout_check(NVSWITCH_TIMEOUT *time); +void nvswitch_task_create(nvswitch_device *device, +void (*task_fn)(nvswitch_device *device), NvU64 period_nsec, NvU32 flags); +void nvswitch_tasks_destroy(nvswitch_device *device); + +void nvswitch_free_chipdevice(nvswitch_device *device); +NvlStatus nvswitch_create_link(nvswitch_device *device, NvU32 link_number, nvlink_link **link); +nvlink_link* nvswitch_get_link(nvswitch_device *device, NvU8 link_id); +NvU64 nvswitch_get_enabled_link_mask(nvswitch_device *device); +void nvswitch_destroy_link(nvlink_link *link); +NvlStatus nvswitch_validate_pll_config(nvswitch_device *device, + NVSWITCH_PLL_INFO *switch_pll, + NVSWITCH_PLL_LIMITS default_pll_limits); + +NvlStatus nvswitch_poll_sublink_state(nvswitch_device *device, nvlink_link *link); +void nvswitch_setup_link_loopback_mode(nvswitch_device *device, NvU32 linkNumber); +void nvswitch_reset_persistent_link_hw_state(nvswitch_device *device, NvU32 linkNumber); +void nvswitch_store_topology_information(nvswitch_device *device, nvlink_link *link); + +NvlStatus nvswitch_set_training_mode(nvswitch_device *device); +NvBool nvswitch_is_link_in_reset(nvswitch_device *device, nvlink_link *link); +void nvswitch_apply_recal_settings(nvswitch_device *device, nvlink_link *link); +void nvswitch_init_buffer_ready(nvswitch_device *device, nvlink_link *link, NvBool bNportBufferReady); + +#endif //_COMMON_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/error_nvswitch.h b/src/common/nvswitch/kernel/inc/error_nvswitch.h new file mode 100644 index 000000000..fec751bcc --- /dev/null +++ b/src/common/nvswitch/kernel/inc/error_nvswitch.h @@ -0,0 +1,223 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _ERROR_NVSWITCH_H_ +#define _ERROR_NVSWITCH_H_ + +#include "nvtypes.h" + +#include "ctrl_dev_nvswitch.h" + +// +// Error logging +// + +typedef struct +{ + NvU32 addr; + NvU32 data; + NvU32 info; + NvU32 code; +} NVSWITCH_PRI_ERROR_LOG_TYPE; + +typedef struct +{ + NvU32 addr; + NvU32 data; + NvU32 write; + NvU32 dest; + NvU32 subId; + NvU32 errCode; + NvU32 raw_data[4]; +} NVSWITCH_PRI_TIMEOUT_ERROR_LOG_TYPE; + +typedef struct +{ + NvU32 raw_pending; // raw pending interrupt status + NvU32 mask; // localized mask for current handler + NvU32 raw_first; // raw first register + NvU32 raw_enable; // raw mask/enable register + NvU32 data[4]; // record of interrupt specific data +} NVSWITCH_INTERRUPT_LOG_TYPE; + +typedef struct +{ + NvU32 data[16]; +} NVSWITCH_RAW_ERROR_LOG_TYPE; + +#define NVSWITCH_ERROR_NEXT_LOCAL_NUMBER(log) (log->error_total) + +typedef struct +{ + NvU32 error_type; // NVSWITCH_ERR_* + NvU64 local_error_num; // Count of preceding errors (local error log) + NvU64 global_error_num; // Count of preceding errors (globally) + NVSWITCH_ERROR_SRC_TYPE error_src; // NVSWITCH_ERROR_SRC_* + NVSWITCH_ERROR_SEVERITY_TYPE severity; // NVSWITCH_ERROR_SEVERITY_* + NvU32 instance; // Used for link# or subengine instance + NvU32 subinstance; // Used for lane# or similar + NvBool error_resolved; + NvU64 timer_count; // NvSwitch timer count + NvU64 time; // Platform time, in ns + NvU32 line; + + union + { + NvU64 address; + NVSWITCH_PRI_ERROR_LOG_TYPE pri_error; + NVSWITCH_PRI_TIMEOUT_ERROR_LOG_TYPE pri_timeout; + NVSWITCH_INTERRUPT_LOG_TYPE intr; + NVSWITCH_RAW_ERROR_LOG_TYPE raw; + } data; +} NVSWITCH_ERROR_TYPE; + +typedef struct +{ + NvU32 error_start; // Start index within CB + NvU32 error_count; // Count of current errors in CB + NvU64 error_total; // Count of total errors logged + NvU32 error_log_size; // CB size + NVSWITCH_ERROR_TYPE *error_log; + NvBool overwritable; // Old CB entries can be overwritten + +} NVSWITCH_ERROR_LOG_TYPE; + +// +// Helpful error logging wrappers +// + +#define NVSWITCH_LOG_FATAL(_device, _errsrc, _errtype, _instance, _subinstance, _errresolved)\ + nvswitch_record_error( \ + _device, \ + &(_device->log_FATAL_ERRORS), \ + NVSWITCH_ERR ## _errtype, \ + _instance, _subinstance, \ + NVSWITCH_ERROR_SRC ## _errsrc, \ + NVSWITCH_ERROR_SEVERITY_FATAL, \ + _errresolved, \ + NULL, 0, \ + __LINE__) + +#define NVSWITCH_LOG_FATAL_DATA(_device, _errsrc, _errtype, _instance, _subinstance, _errresolved, _errdata) \ + nvswitch_record_error( \ + _device, \ + &(_device->log_FATAL_ERRORS), \ + NVSWITCH_ERR ## _errtype, \ + _instance, _subinstance, \ + NVSWITCH_ERROR_SRC ## _errsrc, \ + NVSWITCH_ERROR_SEVERITY_FATAL, \ + _errresolved, \ + _errdata, sizeof(*_errdata), \ + __LINE__) + + +#define NVSWITCH_LOG_NONFATAL(_device, _errsrc, _errtype, _instance, _subinstance, _errresolved) \ + nvswitch_record_error( \ + _device, \ + &(_device->log_NONFATAL_ERRORS), \ + NVSWITCH_ERR ## _errtype, \ + _instance, _subinstance, \ + NVSWITCH_ERROR_SRC ## _errsrc, \ + NVSWITCH_ERROR_SEVERITY_NONFATAL, \ + _errresolved, \ + NULL, 0, \ + __LINE__) + +#define NVSWITCH_LOG_NONFATAL_DATA(_device, _errsrc, _errtype, _instance, _subinstance, _errresolved, _errdata) \ + nvswitch_record_error( \ + _device, \ + &(_device->log_NONFATAL_ERRORS), \ + NVSWITCH_ERR ## _errtype, \ + _instance, _subinstance, \ + NVSWITCH_ERROR_SRC ## _errsrc, \ + NVSWITCH_ERROR_SEVERITY_NONFATAL, \ + _errresolved, \ + _errdata, sizeof(*_errdata), \ + __LINE__) + +#define NVSWITCH_NVLINK_ARCH_ERROR_NONE 0 +#define NVSWITCH_NVLINK_ARCH_ERROR_GENERIC 1 +#define NVSWITCH_NVLINK_ARCH_ERROR_HW_FATAL 2 +#define NVSWITCH_NVLINK_ARCH_ERROR_HW_CORRECTABLE 3 +#define NVSWITCH_NVLINK_ARCH_ERROR_HW_UNCORRECTABLE 4 + +#define NVSWITCH_NVLINK_HW_ERROR_NONE 0x0 +#define NVSWITCH_NVLINK_HW_GENERIC 0x1 +#define NVSWITCH_NVLINK_HW_INGRESS 0x2 +#define NVSWITCH_NVLINK_HW_EGRESS 0x3 +#define NVSWITCH_NVLINK_HW_FSTATE 0x4 +#define NVSWITCH_NVLINK_HW_TSTATE 0x5 +#define NVSWITCH_NVLINK_HW_ROUTE 0x6 +#define NVSWITCH_NVLINK_HW_NPORT 0x7 +#define NVSWITCH_NVLINK_HW_NVLCTRL 0x8 +#define NVSWITCH_NVLINK_HW_NVLIPT 0x9 +#define NVSWITCH_NVLINK_HW_NVLTLC 0xA +#define NVSWITCH_NVLINK_HW_DLPL 0xB +#define NVSWITCH_NVLINK_HW_AFS 0xC +#define NVSWITCH_NVLINK_HW_MINION 0xD +#define NVSWITCH_NVLINK_HW_HOST 0xE +#define NVSWITCH_NVLINK_HW_NXBAR 0XF +#define NVSWITCH_NVLINK_HW_SOURCETRACK 0x10 + +typedef NvU32 NVSWITCH_NVLINK_ARCH_ERROR; +typedef NvU32 NVSWITCH_NVLINK_HW_ERROR; + +NVSWITCH_NVLINK_HW_ERROR nvswitch_translate_hw_error(NVSWITCH_ERR_TYPE type); +void nvswitch_translate_error(NVSWITCH_ERROR_TYPE *error_entry, + NVSWITCH_NVLINK_ARCH_ERROR *arch_error, + NVSWITCH_NVLINK_HW_ERROR *hw_error); +NvlStatus nvswitch_ctrl_get_errors(nvswitch_device *device, + NVSWITCH_GET_ERRORS_PARAMS *p); + +// Log correctable per-device error with data +#define NVSWITCH_REPORT_CORRECTABLE_DEVICE_DATA(_device, _logenum, _data, _fmt, ...) \ + do \ + { \ + NVSWITCH_PRINT_SXID(_device, NVSWITCH_ERR ## _logenum, \ + "Correctable, " _fmt "\n", ## __VA_ARGS__ ); \ + NVSWITCH_LOG_NONFATAL_DATA(_device, _HW, _logenum, \ + 0, 0, NV_TRUE, _data); \ + } while(0) + +// Log correctable per-link error with data +#define NVSWITCH_REPORT_CORRECTABLE_LINK_DATA(_device, _link, _logenum, _data, _fmt, ...) \ + do \ + { \ + NVSWITCH_PRINT_SXID(_device, NVSWITCH_ERR ## _logenum, \ + "Correctable, Link %02d " _fmt "\n", _link, ## __VA_ARGS__ ); \ + NVSWITCH_LOG_NONFATAL_DATA(_device, _HW, _logenum, \ + _link, 0, NV_TRUE, _data); \ + } while(0) + +// Log nonfatal per-link error +#define NVSWITCH_REPORT_NONFATAL_LINK(_device, _link, _logenum, _fmt, ...) \ + do \ + { \ + NVSWITCH_PRINT_SXID(_device, NVSWITCH_ERR ## _logenum, \ + "Non-fatal, Link %02d " _fmt "\n", _link, ## __VA_ARGS__ ); \ + NVSWITCH_LOG_NONFATAL(_device, _HW, _logenum, \ + _link, 0, NV_FALSE); \ + } while(0) + +#endif //_ERROR_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/flcn/flcn_nvswitch.h b/src/common/nvswitch/kernel/inc/flcn/flcn_nvswitch.h new file mode 100644 index 000000000..1a788e455 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/flcn/flcn_nvswitch.h @@ -0,0 +1,436 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _FLCN_NVSWITCH_H_ +#define _FLCN_NVSWITCH_H_ + +#include "flcn/flcnrtosdebug_nvswitch.h" // +#include "flcnifcmn.h" +#include "flcn/flcnqueue_nvswitch.h" + +#include "flcn/haldefs_flcn_nvswitch.h" +#include "common_nvswitch.h" + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: FLCN_NVSWITCH.H * +* Defines and structures used for the Falcon Object. The Falcon * +* object is the base object for all Falcon-derived engines. * +\***************************************************************************/ + +/*! + * Compares an unit id against the values in the unit_id enumeration and + * verifies that the id is valid. It is expected that the id is specified + * as an unsigned integer. + */ +#define RM_FLCN_UNIT_ID_IS_VALID(pQeueInfo, id) \ + ((id) < (pQeueInfo)->maxUnitId) + + +/*! + * Verifies that the given queue identifier is a valid command queue id. It + * is expected that the id is specified as an unsigned integer. + */ +#define RM_FLCN_QUEUEID_IS_COMMAND_QUEUE(pQeueInfo, id) \ + ((id) <= (pQeueInfo)->maxCmdQueueIndex) + +/*! + * Define a sequence descriptor that may be used during initialization that + * represents an invalid sequence descriptor (one in which will never be + * assigned when a sequence/command is submitted). + */ +#define FLCN_INVALID_SEQ_DESC NV_U32_MAX + +/*! + * Define a event descriptor that may be used during initialization that + * represents an invalid event descriptor (one in which will never be assigned + * when a event is registered). + */ +#define FLCN_INVALID_EVT_DESC NV_U32_MAX + +/*! + * Defines the alignment/granularity of falcon memory blocks + */ +#define FLCN_BLK_ALIGNMENT (256) + +/*! + * Defines the required address/offset alignment for all DMEM accesses + */ +#define FLCN_DMEM_ACCESS_ALIGNMENT (4) + +typedef struct FLCN_EVENT_INFO FLCN_EVENT_INFO, *PFLCN_EVENT_INFO; + +/*! + * @brief Tracks all information for each client that has registered for a + * specific type of event-notification. + */ +struct FLCN_EVENT_INFO +{ + /*! + * A unique identifier given to each event info instance to provide a + * fast way to identify and track an event registration. + */ + NvU32 evtDesc; + + /*! + * An identifier that describes the type of event the client wants + * notification of. + */ + NvU32 unitId; + + /*! + * The client's pre-allocated message buffer. This is the buffer that + * the message data will be written to when extracted from the Message + * Queue. This buffer must be sufficiently sized to hold the largest + * possible event for type 'unitId'. + */ + union RM_FLCN_MSG *pMsg; + + /*! The client function to be called when the event triggers. */ + FlcnQMgrClientCallback pCallback; + + /*! + * Any client-specified private parameters that must be provided in the + * callback function. + */ + void *pCallbackParams; + + /*! + * Client's are tracked as a linked list. This is a pointer to the next + * client in the list. The ordering of this list implies no association + * between the clients. + */ + FLCN_EVENT_INFO *pNext; +}; + +/*! + * @brief Enumeration for each discrete command state. + */ +typedef enum FLCN_CMD_STATE +{ + /*! + * Indicates the the command does not have a state. Commands/sequences + * that have never been submitted while possess this state. + */ + FLCN_CMD_STATE_NONE = 0, + + /*! Indicates that the command is being processed by the FLCN. */ + FLCN_CMD_STATE_RUNNING, + + /*! Indicates that the command has finished execution on the FLCN. */ + FLCN_CMD_STATE_DONE +} FLCN_CMD_STATE; + + +typedef struct FALCON_EXTERNAL_CONFIG +{ + NvBool bResetInPmc; // If TRUE, Reset Falcon using PMC Enable + NvU32 riscvRegisterBase; // Falcon's RISCV base offset. + NvU32 fbifBase; // Falcon's FB Interface base. + NvU32 blkcgBase; // Falcon's BLKCG base. +} FALCON_EXTERNAL_CONFIG, *PFALCON_EXTERNAL_CONFIG; + +typedef struct +{ + NvU8 maxUnitId; //> 4) & 0xf)) | \ + DRF_NUM(_PFALCON, _IP_VER, _MINOR, (coreVer & 0xf))) + +#define NV_PFALCON_IP_VER_MINOR 23:16 +#define NV_PFALCON_IP_VER_MAJOR 31:24 + +// Some mailbox defines (should be shared with MSDEC OS) +#define NV_FALCON_MAILBOX0_MSDECOS_STATUS 11:0 +#define NV_FALCON_MAILBOX0_MSDECOS_INVALID_METHOD_MTHDCNT 19:12 +#define NV_FALCON_MAILBOX0_MSDECOS_INVALID_METHOD_MTHDID 31:20 +#define NV_FALCON_MAILBOX1_MSDECOS_INVALID_METHOD_MTHDDATA 31:0 + +PFLCN flcnAllocNew(void); +NvlStatus flcnInit(nvswitch_device *device, PFLCN pFlcn, NvU32 pci_device_id); +void flcnDestroy(nvswitch_device *device, FLCN *pFlcn); + +/*! + * The HW arch (e.g. FALCON or FALCON + RISCV) that can be actively enabled and + * running on an uproc engine. + */ +#define NV_UPROC_ENGINE_ARCH_DEFAULT (0x0) +#define NV_UPROC_ENGINE_ARCH_FALCON (0x1) +#define NV_UPROC_ENGINE_ARCH_FALCON_RISCV (0x2) + +/*! + * Hepler macro to check what HW arch is enabled and running on an uproc engine. + */ +#define UPROC_ENG_ARCH_FALCON(pFlcn) (pFlcn->engArch == NV_UPROC_ENGINE_ARCH_FALCON) +#define UPROC_ENG_ARCH_FALCON_RISCV(pFlcn) (pFlcn->engArch == NV_UPROC_ENGINE_ARCH_FALCON_RISCV) + +// Falcon Register index +#define NV_FALCON_REG_R0 (0) +#define NV_FALCON_REG_R1 (1) +#define NV_FALCON_REG_R2 (2) +#define NV_FALCON_REG_R3 (3) +#define NV_FALCON_REG_R4 (4) +#define NV_FALCON_REG_R5 (5) +#define NV_FALCON_REG_R6 (6) +#define NV_FALCON_REG_R7 (7) +#define NV_FALCON_REG_R8 (8) +#define NV_FALCON_REG_R9 (9) +#define NV_FALCON_REG_R10 (10) +#define NV_FALCON_REG_R11 (11) +#define NV_FALCON_REG_R12 (12) +#define NV_FALCON_REG_R13 (13) +#define NV_FALCON_REG_R14 (14) +#define NV_FALCON_REG_R15 (15) +#define NV_FALCON_REG_IV0 (16) +#define NV_FALCON_REG_IV1 (17) +#define NV_FALCON_REG_UNDEFINED (18) +#define NV_FALCON_REG_EV (19) +#define NV_FALCON_REG_SP (20) +#define NV_FALCON_REG_PC (21) +#define NV_FALCON_REG_IMB (22) +#define NV_FALCON_REG_DMB (23) +#define NV_FALCON_REG_CSW (24) +#define NV_FALCON_REG_CCR (25) +#define NV_FALCON_REG_SEC (26) +#define NV_FALCON_REG_CTX (27) +#define NV_FALCON_REG_EXCI (28) +#define NV_FALCON_REG_RSVD0 (29) +#define NV_FALCON_REG_RSVD1 (30) +#define NV_FALCON_REG_RSVD2 (31) + +#define NV_FALCON_REG_SIZE (32) + +#define FALC_REG(x) NV_FALCON_REG_##x + + +#endif // _FLCN_NVSWITCH_H_ + +/*! + * Defines the Falcon IMEM block-size (as a power-of-2). + */ +#define FALCON_IMEM_BLKSIZE2 (8) + +/*! + * Defines the Falcon DMEM block-size (as a power-of-2). + */ +#define FALCON_DMEM_BLKSIZE2 (8) + diff --git a/src/common/nvswitch/kernel/inc/flcn/flcnable_nvswitch.h b/src/common/nvswitch/kernel/inc/flcn/flcnable_nvswitch.h new file mode 100644 index 000000000..c5345d8e4 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/flcn/flcnable_nvswitch.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _FLCNABLE_NVSWITCH_H_ +#define _FLCNABLE_NVSWITCH_H_ + +/*! + * @file flcnable_nvswitch.h + * @brief Provides definitions for all FLCNABLE data structures and interfaces. + */ + +#include "flcn/haldefs_flcnable_nvswitch.h" + +#include "flcnifcmn.h" + +#include "nvlink_errors.h" + +struct nvswitch_device; +struct FLCN; +struct FALCON_EXTERNAL_CONFIG; +struct FLCN_QMGR_SEQ_INFO; +union RM_FLCN_MSG; +union RM_FLCN_CMD; +struct ENGINE_DESCRIPTOR_TYPE; + +/*! + * Defines the structure used to contain all generic information related to + * the FLCNABLE. + */ +typedef struct FLCNABLE +{ + // pointer to our function table - should always be the first thing in any object + flcnable_hal *pHal; + // we don't have a parent class, so we go straight to our members + + /* Pointer to FLCN object for the object represented by this FLCNABLE */ + struct FLCN *pFlcn; + +} FLCNABLE, *PFLCNABLE; + +NvlStatus flcnableInit(struct nvswitch_device *device, PFLCNABLE pFlcnable, NvU32 pci_device_id); +void flcnableDestroy(struct nvswitch_device *device, PFLCNABLE pFlcnable); + +/*! + * Utility to get the FLCN object for the engine + */ +#define ENG_GET_FLCN(pObj) (((PFLCNABLE)pObj)->pFlcn) + +/*! + * Safe (from NULL parent) version of utility to get the FLCN object for the engine + */ +#define ENG_GET_FLCN_IFF(pObj) ((NULL!=(pObj))?ENG_GET_FLCN(pObj):NULL) + +// hal functions +NvU8 flcnableReadCoreRev (struct nvswitch_device *device, PFLCNABLE); +void flcnableGetExternalConfig (struct nvswitch_device *device, PFLCNABLE, struct FALCON_EXTERNAL_CONFIG *); +void flcnableEmemCopyFrom (struct nvswitch_device *device, PFLCNABLE, NvU32, NvU8 *, NvU32, NvU8); +void flcnableEmemCopyTo (struct nvswitch_device *device, PFLCNABLE, NvU32, NvU8 *, NvU32, NvU8); +NV_STATUS flcnableHandleInitEvent (struct nvswitch_device *device, PFLCNABLE, union RM_FLCN_MSG *); +struct FLCN_QMGR_SEQ_INFO * flcnableQueueSeqInfoGet (struct nvswitch_device *device, PFLCNABLE, NvU32); +void flcnableQueueSeqInfoClear (struct nvswitch_device *device, PFLCNABLE, struct FLCN_QMGR_SEQ_INFO *); +void flcnableQueueSeqInfoFree (struct nvswitch_device *device, PFLCNABLE, struct FLCN_QMGR_SEQ_INFO *); +NvBool flcnableQueueCmdValidate (struct nvswitch_device *device, PFLCNABLE, union RM_FLCN_CMD *, union RM_FLCN_MSG *, void *, NvU32); +NV_STATUS flcnableQueueCmdPostExtension (struct nvswitch_device *device, PFLCNABLE, union RM_FLCN_CMD *, union RM_FLCN_MSG *, void *, struct NVSWITCH_TIMEOUT *, struct FLCN_QMGR_SEQ_INFO *); +void flcnablePostDiscoveryInit (struct nvswitch_device *device, PFLCNABLE); + +NV_STATUS flcnableConstruct_HAL (struct nvswitch_device *device, PFLCNABLE); +void flcnableDestruct_HAL (struct nvswitch_device *device, PFLCNABLE); + +void flcnableFetchEngines_HAL (struct nvswitch_device *device, PFLCNABLE, struct ENGINE_DESCRIPTOR_TYPE *, struct ENGINE_DESCRIPTOR_TYPE *); + + +#endif // _FLCNABLE_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/flcn/flcnqueue_nvswitch.h b/src/common/nvswitch/kernel/inc/flcn/flcnqueue_nvswitch.h new file mode 100644 index 000000000..7a59f1c92 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/flcn/flcnqueue_nvswitch.h @@ -0,0 +1,263 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _FLCNQUEUE_NVSWITCH_H_ +#define _FLCNQUEUE_NVSWITCH_H_ + +/*! + * @file flcnqueue_nvswitch.h + * @copydoc flcnqueue_nvswitch.c + */ + +#include "nvstatus.h" + +struct nvswitch_device; +struct NVSWITCH_TIMEOUT; +struct FLCN; +struct FLCNQUEUE; +union RM_FLCN_MSG; +union RM_FLCN_CMD; + +/*! + * Define the signature of the callback function that FLCN clients must + * register when sending a FLCN command or registering for FLCN event + * notification. Upon completion of the command or upon intercepting an event + * of a specific type, the callback will be invoked passing the completed + * sequence or event descriptor to the client along with status to indicate if + * the message buffer was properly populated. + * + * @param[in] device nvswitch_device pointer + * @param[in] pMsg Pointer to the received message + * @param[in] pParams Pointer to the parameters + * @param[in] seqDesc Sequencer descriptor number + * @param[in] status Status for command execution result + */ +typedef void (*FlcnQMgrClientCallback)(struct nvswitch_device *, union RM_FLCN_MSG *pMsg, void *pParams, NvU32 seqDesc, NV_STATUS status); + +typedef NV_STATUS (*FlcnQueueClose )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvBool); +typedef NvBool (*FlcnQueueIsEmpty )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue); +typedef NV_STATUS (*FlcnQueueOpenRead )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue); +typedef NV_STATUS (*FlcnQueueOpenWrite)(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32); +typedef NV_STATUS (*FlcnQueuePop )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, void*, NvU32, NvU32 *); +typedef void (*FlcnQueuePush )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, void*, NvU32); +typedef void (*FlcnQueueRewind )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue); + +typedef NV_STATUS (*FlcnQueueHeadGet )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 *pHead); +typedef NV_STATUS (*FlcnQueueHeadSet )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 head ); +typedef NV_STATUS (*FlcnQueueTailGet )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 *pTail); +typedef NV_STATUS (*FlcnQueueTailSet )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 tail ); +typedef void (*FlcnQueueRead )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 offset, NvU8 *pDst, NvU32 sizeBytes); +typedef void (*FlcnQueueWrite )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 offset, NvU8 *pSrc, NvU32 sizeBytes); +typedef NV_STATUS (*FlcnQueueHasRoom )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 writeSize, NvBool *pBRewind); +typedef NV_STATUS (*FlcnQueueLock )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, struct NVSWITCH_TIMEOUT *pTimeout); +typedef NV_STATUS (*FlcnQueueUnlock )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue); +typedef NvU32 (*FlcnQueuePopulateRewindCmd )(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, union RM_FLCN_CMD *pFlcnCmd); +typedef NV_STATUS (*FlcnQueueElementUseStateClr)(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE *pQueue, NvU32 queuePos); + +/*! + * This structure defines the various flags that may be passed to the queue + * "open" API. Read-operations are allowed on queues opened for 'READ'; + * write-operations are allowed when opened for "WRITE". The specific flag + * used when a queue is opened defines behavior of the "close" operation. + */ +typedef enum +{ + FLCNQUEUE_OFLAG_READ = 0, + FLCNQUEUE_OFLAG_WRITE +} FLCNQUEUE_OFLAG; + +/*! + * Contains all fields, attributes, and functions pertaining to Falcon Queues. + */ +typedef struct FLCNQUEUE +{ + FlcnQueueClose close; + FlcnQueueIsEmpty isEmpty; + FlcnQueueOpenRead openRead; + FlcnQueueOpenWrite openWrite; + FlcnQueuePop pop; + FlcnQueuePush push; + FlcnQueueRewind rewind; + + FlcnQueueHeadGet headGet; + FlcnQueueHeadSet headSet; + FlcnQueueTailGet tailGet; + FlcnQueueTailSet tailSet; + FlcnQueueRead read; + FlcnQueueWrite write; + FlcnQueueHasRoom hasRoom; + FlcnQueuePopulateRewindCmd populateRewindCmd; + FlcnQueueElementUseStateClr elementUseStateClr; + + /*! + * When the queue is currently opened for writing, this value stores the + * current write position. This allows multiple writes to be streamed into + * the queue without updating the head pointer for each individual write. + */ + NvU32 position; + + /*! The physical DMEM offset where this queue resides/begins. */ + NvU32 queueOffset; + + /*! + * The logical queue identifier for the queue which we use to index into + * the queue structures inside RM. + */ + NvU32 queueLogId; + + /*! + * The physical queue index indicates the index of the queue pertaining to + * its type. We can use it to index into the head and tail registers of + * a particular type(CMD or MSG) of queue. + * For e.g., consider we have 3 command queues and 2 message queues allocated + * for a particular falcon, their queueLogId and queuePhyId values will be as: + * + * CMDQ0 queuePhyId = 0, queueLogId = 0 + * CMDQ1 queuePhyId = 1, queueLogId = 1 + * CMDQ2 queuePhyId = 2, queueLogId = 2 + * + * MSGQ0 queuePhyId = 0, queueLogId = 3 + * MSGQ1 queuePhyId = 1, queueLogId = 4 + */ + NvU32 queuePhyId; + + /*! The size of the queue in bytes for DMEM queue, number of entries for FB queue */ + NvU32 queueSize; + + /*! The size of the command header in bytes. */ + NvU32 cmdHdrSize; + + /*! + * Maximum size for each command. + */ + NvU32 maxCmdSize; + + /*! The open-flag that was specified when the queue was opened. */ + FLCNQUEUE_OFLAG oflag; + + /*! + * 'NV_TRUE' when data is currently being written info the queue (only + * pertains to command queues). + */ + NvBool bOpened; + + /*! + * 'NV_TRUE' when locked granting exclusive access the the lock owner. + */ + NvBool bLocked; + +} FLCNQUEUE, *PFLCNQUEUE; + +/*! + * @brief Enumeration to represent each discrete sequence state + * + * Each sequence stored in the Sequence Table must have a state associated + * with it to keep track of used vs. available sequences. + */ +typedef enum +{ + /*! Indicates the sequence is not be used and is available */ + FLCN_QMGR_SEQ_STATE_FREE = 0, + + /*! + * Indicates the sequence has been reserved for a command, but command has + * not yet been queued in a command queue. + */ + FLCN_QMGR_SEQ_STATE_PENDING, + + /*! + * Indicates the sequence has been reserved for a command and has been + * queued. + */ + FLCN_QMGR_SEQ_STATE_USED, + + /*! + * Indicates that an event has occurred (shutdown/reset/...) that caused + * the sequence to be canceled. + */ + FLCN_QMGR_SEQ_STATE_CANCELED +} FLCN_QMGR_SEQ_STATE; + +/*! + * @brief Common SEQ_INFO used by all falcons. + */ +typedef struct FLCN_QMGR_SEQ_INFO +{ + /*! + * The unique identifier used by the FLCN ucode to distinguish sequences. + * The ID is unique to all sequences currently in-flight but may be reused + * as sequences are completed by the FLCN. + */ + NvU8 seqNum; + /*! + * Similar to 'seqNum' but unique for all sequences ever submitted (i.e. + * never reused). + */ + NvU32 seqDesc; + /*! + * The state of the sequence (@ref FLCN_QMGR_SEQ_STATE). + */ + FLCN_QMGR_SEQ_STATE seqState; + /*! + * The client function to be called when the sequence completes. + */ + FlcnQMgrClientCallback pCallback; + /*! + * Client-specified params that must be provided to the callback function. + */ + void *pCallbackParams; + + /*! + * CMD Queue associated with this Seq. + */ + struct FLCNQUEUE *pCmdQueue; + +} FLCN_QMGR_SEQ_INFO, *PFLCN_QMGR_SEQ_INFO; + +NV_STATUS flcnQueueConstruct_common_nvswitch(struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE **ppQueue, NvU32 queueId, NvU32 queuePhyId, NvU32 offset, NvU32 queueSize, NvU32 cmdHdrSize); +NV_STATUS flcnQueueConstruct_dmem_nvswitch (struct nvswitch_device *device, struct FLCN *pFlcn, struct FLCNQUEUE **ppQueue, NvU32 queueId, NvU32 queuePhyId, NvU32 offset, NvU32 queueSize, NvU32 cmdHdrSize); + + +// Dumping queues for debugging purpose +NV_STATUS flcnRtosDumpCmdQueue_nvswitch(struct nvswitch_device *device, struct FLCN *pFlcn, NvU32 queueLogId, union RM_FLCN_CMD *FlcnCmd); + +/*! + * Alignment to use for all head/tail pointer updates. Pointers are always + * rouned up to the nearest multiple of this value. + */ +#define QUEUE_ALIGNMENT (4) + +/*! + * Checks if the given queue is currently opened for read. + */ +#define QUEUE_OPENED_FOR_READ(pQueue) \ + (((pQueue)->bOpened) && ((pQueue)->oflag == FLCNQUEUE_OFLAG_READ)) + +/*! + * Checks if the given queue is currently opened for write. + */ +#define QUEUE_OPENED_FOR_WRITE(pQueue) \ + (((pQueue)->bOpened) && ((pQueue)->oflag == FLCNQUEUE_OFLAG_WRITE)) + +#endif // _FLCNQUEUE_NVSWITCH_H_ + diff --git a/src/common/nvswitch/kernel/inc/flcn/flcnrtosdebug_nvswitch.h b/src/common/nvswitch/kernel/inc/flcn/flcnrtosdebug_nvswitch.h new file mode 100644 index 000000000..bf8d55731 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/flcn/flcnrtosdebug_nvswitch.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _FLCN_RTOS_DEBUG_NVSWITCH_H_ +#define _FLCN_RTOS_DEBUG_NVSWITCH_H_ + +/*! + * @file flcnrtosdebug_nvswitch.h + * @copydoc flcnrtosdebug_nvswitch.c + */ + +/* ------------------------- Includes --------------------------------------- */ +/* ------------------------- Macros ----------------------------------------- */ +/* ------------------------- Datatypes -------------------------------------- */ +/* ------------------------- Function Prototypes ---------------------------- */ + +#endif // _FLCN_RTOS_DEBUG_NVSWITCH_H_ + diff --git a/src/common/nvswitch/kernel/inc/flcn/haldefs_flcn_nvswitch.h b/src/common/nvswitch/kernel/inc/flcn/haldefs_flcn_nvswitch.h new file mode 100644 index 000000000..b1a81de8b --- /dev/null +++ b/src/common/nvswitch/kernel/inc/flcn/haldefs_flcn_nvswitch.h @@ -0,0 +1,107 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _HALDEFS_FLCN_NVSWITCH_H_ +#define _HALDEFS_FLCN_NVSWITCH_H_ + + +#include "nvstatus.h" +#include "flcn/flcnqueue_nvswitch.h" +#include "flcnifcmn.h" + +struct nvswitch_device; +struct NVSWITCH_TIMEOUT; +struct FLCN; +union RM_FLCN_MSG; +union RM_FLCN_CMD; +struct FLCNQUEUE; +struct FLCN_QMGR_SEQ_INFO; + +typedef struct { + // OBJECT Interfaces + NV_STATUS (*queueReadData) (struct nvswitch_device *, struct FLCN *, NvU32 queueId, void *pData, NvBool bMsg); + NV_STATUS (*queueCmdWrite) (struct nvswitch_device *, struct FLCN *, NvU32 queueId, union RM_FLCN_CMD *pCmd, struct NVSWITCH_TIMEOUT *pTimeout); + NV_STATUS (*queueCmdCancel) (struct nvswitch_device *, struct FLCN *, NvU32 seqDesc); + NV_STATUS (*queueCmdPostNonBlocking) (struct nvswitch_device *, struct FLCN *, union RM_FLCN_CMD *pCmd, union RM_FLCN_MSG *pMsg, void *pPayload, NvU32 queueIdLogical, FlcnQMgrClientCallback pCallback, void *pCallbackParams, NvU32 *pSeqDesc, struct NVSWITCH_TIMEOUT *pTimeout); + NV_STATUS (*queueCmdWait) (struct nvswitch_device *, struct FLCN *, NvU32 seqDesc, struct NVSWITCH_TIMEOUT *pTimeout); + NvU8 (*coreRevisionGet) (struct nvswitch_device *, struct FLCN *); + void (*markNotReady) (struct nvswitch_device *, struct FLCN *); + NV_STATUS (*cmdQueueHeadGet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 *pHead); + NV_STATUS (*msgQueueHeadGet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 *pHead); + NV_STATUS (*cmdQueueTailGet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 *pTail); + NV_STATUS (*msgQueueTailGet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 *pTail); + NV_STATUS (*cmdQueueHeadSet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 head); + NV_STATUS (*msgQueueHeadSet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 head); + NV_STATUS (*cmdQueueTailSet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 tail); + NV_STATUS (*msgQueueTailSet) (struct nvswitch_device *, struct FLCN *, struct FLCNQUEUE *pQueue, NvU32 tail); + struct FLCN_QMGR_SEQ_INFO *(*queueSeqInfoFind) (struct nvswitch_device *, struct FLCN *, NvU32 seqDesc); + struct FLCN_QMGR_SEQ_INFO *(*queueSeqInfoAcq) (struct nvswitch_device *, struct FLCN *); + void (*queueSeqInfoRel) (struct nvswitch_device *, struct FLCN *, struct FLCN_QMGR_SEQ_INFO *pSeqInfo); + void (*queueSeqInfoStateInit) (struct nvswitch_device *, struct FLCN *); + void (*queueSeqInfoCancelAll) (struct nvswitch_device *, struct FLCN *); + NV_STATUS (*queueSeqInfoFree) (struct nvswitch_device *, struct FLCN *, struct FLCN_QMGR_SEQ_INFO *); + NV_STATUS (*queueEventRegister) (struct nvswitch_device *, struct FLCN *, NvU32 unitId, NvU8 *pMsg, FlcnQMgrClientCallback pCallback, void *pParams, NvU32 *pEvtDesc); + NV_STATUS (*queueEventUnregister) (struct nvswitch_device *, struct FLCN *, NvU32 evtDesc); + NV_STATUS (*queueEventHandle) (struct nvswitch_device *, struct FLCN *, union RM_FLCN_MSG *pMsg, NV_STATUS evtStatus); + NV_STATUS (*queueResponseHandle) (struct nvswitch_device *, struct FLCN *, union RM_FLCN_MSG *pMsg); + NvU32 (*queueCmdStatus) (struct nvswitch_device *, struct FLCN *, NvU32 seqDesc); + NV_STATUS (*dmemCopyFrom) (struct nvswitch_device *, struct FLCN *, NvU32 src, NvU8 *pDst, NvU32 sizeBytes, NvU8 port); + NV_STATUS (*dmemCopyTo) (struct nvswitch_device *, struct FLCN *, NvU32 dst, NvU8 *pSrc, NvU32 sizeBytes, NvU8 port); + void (*postDiscoveryInit) (struct nvswitch_device *, struct FLCN *); + void (*dbgInfoDmemOffsetSet) (struct nvswitch_device *, struct FLCN *, NvU16 debugInfoDmemOffset); + + + //HAL Interfaces + NV_STATUS (*construct) (struct nvswitch_device *, struct FLCN *); + void (*destruct) (struct nvswitch_device *, struct FLCN *); + NvU32 (*regRead) (struct nvswitch_device *, struct FLCN *, NvU32 offset); + void (*regWrite) (struct nvswitch_device *, struct FLCN *, NvU32 offset, NvU32 data); + const char *(*getName) (struct nvswitch_device *, struct FLCN *); + NvU8 (*readCoreRev) (struct nvswitch_device *, struct FLCN *); + void (*getCoreInfo) (struct nvswitch_device *, struct FLCN *); + NV_STATUS (*dmemTransfer) (struct nvswitch_device *, struct FLCN *, NvU32 src, NvU8 *pDst, NvU32 sizeBytes, NvU8 port, NvBool bCopyFrom); + void (*intrRetrigger) (struct nvswitch_device *, struct FLCN *); + NvBool (*areEngDescsInitialized) (struct nvswitch_device *, struct FLCN *); + NV_STATUS (*waitForResetToFinish) (struct nvswitch_device *, struct FLCN *); + void (*dbgInfoCapturePcTrace) (struct nvswitch_device *, struct FLCN *); + void (*dbgInfoCaptureRiscvPcTrace) (struct nvswitch_device *, struct FLCN *); + NvU32 (*dmemSize) (struct nvswitch_device *, struct FLCN *); + NvU32 (*setImemAddr) (struct nvswitch_device *, struct FLCN *, NvU32 dst); + void (*imemCopyTo) (struct nvswitch_device *, struct FLCN *, NvU32 dst, NvU8 *pSrc, NvU32 sizeBytes, NvBool bSecure, NvU32 tag, NvU8 port); + NvU32 (*setDmemAddr) (struct nvswitch_device *, struct FLCN *, NvU32 dst); + NvU32 (*riscvRegRead) (struct nvswitch_device *, struct FLCN *, NvU32 offset); + void (*riscvRegWrite) (struct nvswitch_device *, struct FLCN *, NvU32 offset, NvU32 data); +} flcn_hal; + +void flcnQueueSetupHal(struct FLCN *pFlcn); +void flcnRtosSetupHal(struct FLCN *pFlcn); +void flcnQueueRdSetupHal(struct FLCN *pFlcn); + +void flcnSetupHal_LR10(struct FLCN *pFlcn); + +void flcnSetupHal_v03_00(struct FLCN *pFlcn); +void flcnSetupHal_v04_00(struct FLCN *pFlcn); +void flcnSetupHal_v05_01(struct FLCN *pFlcn); +void flcnSetupHal_v06_00(struct FLCN *pFlcn); + +#endif //_HALDEFS_FLCN_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/flcn/haldefs_flcnable_nvswitch.h b/src/common/nvswitch/kernel/inc/flcn/haldefs_flcnable_nvswitch.h new file mode 100644 index 000000000..635bab770 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/flcn/haldefs_flcnable_nvswitch.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _HALDEFS_FLCNABLE_NVSWITCH_H_ +#define _HALDEFS_FLCNABLE_NVSWITCH_H_ + +#include "nvstatus.h" +#include "flcnifcmn.h" + +struct nvswitch_device; +struct NVSWITCH_TIMEOUT; +struct FLCNABLE; +struct FALCON_EXTERNAL_CONFIG; +struct FLCN_QMGR_SEQ_INFO; +union RM_FLCN_MSG; +union RM_FLCN_CMD; +struct ENGINE_DESCRIPTOR_TYPE; + +typedef struct { + NvU8 (*readCoreRev)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable); + void (*getExternalConfig)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable, + struct FALCON_EXTERNAL_CONFIG *pConfig); + void (*ememCopyFrom)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable, + NvU32 src, + NvU8 *pDst, + NvU32 sizeBytes, + NvU8 port); + void (*ememCopyTo)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable, + NvU32 dst, + NvU8 *pSrc, + NvU32 sizeBytes, + NvU8 port); + NV_STATUS (*handleInitEvent)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable, + union RM_FLCN_MSG *pGenMsg); + struct FLCN_QMGR_SEQ_INFO* (*queueSeqInfoGet)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable, + NvU32 seqIndex); + void (*queueSeqInfoClear)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable, + struct FLCN_QMGR_SEQ_INFO *pSeqInfo); + void (*queueSeqInfoFree)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable, + struct FLCN_QMGR_SEQ_INFO *pSeqInfo); + NvBool (*queueCmdValidate)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable, + union RM_FLCN_CMD *pCmd, + union RM_FLCN_MSG *pMsg, + void *pPayload, + NvU32 queueIdLogical); + NV_STATUS (*queueCmdPostExtension)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable, + union RM_FLCN_CMD *pCmd, + union RM_FLCN_MSG *pMsg, + void *pPayload, + struct NVSWITCH_TIMEOUT *pTimeout, + struct FLCN_QMGR_SEQ_INFO *pSeqInfo); + void (*postDiscoveryInit)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable); + + + + NV_STATUS (*construct)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable); + void (*destruct)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable); + void (*fetchEngines)( + struct nvswitch_device *device, + struct FLCNABLE *pFlcnable, + struct ENGINE_DESCRIPTOR_TYPE *pEngDescUc, + struct ENGINE_DESCRIPTOR_TYPE *pEngDescBc); + +} flcnable_hal; + +#endif //_HALDEFS_FLCNABLE_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/haldef_nvswitch.h b/src/common/nvswitch/kernel/inc/haldef_nvswitch.h new file mode 100644 index 000000000..d7cf072aa --- /dev/null +++ b/src/common/nvswitch/kernel/inc/haldef_nvswitch.h @@ -0,0 +1,262 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _HALDEF_NVSWITCH_H_ +#define _HALDEF_NVSWITCH_H_ + +#include "ctrl_dev_nvswitch.h" + +#include "inforom/ifrstruct.h" +#include "inforom/omsdef.h" + +// +// List of functions halified in the NVSwitch Driver +// +// Note: All hal fns must be implemented for each chip. +// There is no automatic stubbing here. +// +// This 'xmacro' list is fed into generator macros which then use the +// _FUNCTION_LIST to generate HAL declarations, function prototypes, and HAL +// construction. Xmacros are a useful way to maintain consistency between +// parallel lists. +// The components of the _FUNCTION_LIST are similar to a function prototype +// declaration, with the addition of an '_arch' parameter suffixed on to it +// which is used on some _FUNCTION_LIST expansions to generate arch-specific +// information. +// The format of each line is: +// _op(return type, function name, (parameter list), _arch) +// + +#define NVSWITCH_HAL_FUNCTION_LIST(_op, _arch) \ + _op(NvlStatus, nvswitch_initialize_device_state, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_destroy_device_state, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_determine_platform, (nvswitch_device *device), _arch) \ + _op(NvU32, nvswitch_get_num_links, (nvswitch_device *device), _arch) \ + _op(NvBool, nvswitch_is_link_valid, (nvswitch_device *device, NvU32 link_id), _arch) \ + _op(void, nvswitch_set_fatal_error, (nvswitch_device *device, NvBool device_fatal, NvU32 link_id), _arch) \ + _op(NvU32, nvswitch_get_swap_clk_default, (nvswitch_device *device), _arch) \ + _op(NvU32, nvswitch_get_latency_sample_interval_msec, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_internal_latency_bin_log,(nvswitch_device *device), _arch) \ + _op(void, nvswitch_ecc_writeback_task, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_monitor_thermal_alert, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_hw_counter_shutdown, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_get_rom_info, (nvswitch_device *device, NVSWITCH_EEPROM_TYPE *eeprom), _arch) \ + _op(void, nvswitch_lib_enable_interrupts, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_lib_disable_interrupts, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_lib_check_interrupts, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_lib_service_interrupts, (nvswitch_device *device), _arch) \ + _op(NvU64, nvswitch_hw_counter_read_counter, (nvswitch_device *device), _arch) \ + _op(NvBool, nvswitch_is_link_in_use, (nvswitch_device *device, NvU32 link_id), _arch) \ + _op(NvlStatus, nvswitch_reset_and_drain_links, (nvswitch_device *device, NvU64 link_mask), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_info, (nvswitch_device *device, NVSWITCH_GET_INFO *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_nvlink_status, (nvswitch_device *device, NVSWITCH_GET_NVLINK_STATUS_PARAMS *ret), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_counters, (nvswitch_device *device, NVSWITCH_NVLINK_GET_COUNTERS_PARAMS *ret), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_switch_port_config, (nvswitch_device *device, NVSWITCH_SET_SWITCH_PORT_CONFIG *p), _arch) \ + _op(NvlStatus, nvswitch_set_nport_port_config, (nvswitch_device *device, NVSWITCH_SET_SWITCH_PORT_CONFIG *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_ingress_request_table, (nvswitch_device *device, NVSWITCH_GET_INGRESS_REQUEST_TABLE_PARAMS *params), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_ingress_request_table, (nvswitch_device *device, NVSWITCH_SET_INGRESS_REQUEST_TABLE *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_ingress_request_valid, (nvswitch_device *device, NVSWITCH_SET_INGRESS_REQUEST_VALID *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_ingress_response_table, (nvswitch_device *device, NVSWITCH_GET_INGRESS_RESPONSE_TABLE_PARAMS *params), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_ingress_response_table, (nvswitch_device *device, NVSWITCH_SET_INGRESS_RESPONSE_TABLE *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_ganged_link_table, (nvswitch_device *device, NVSWITCH_SET_GANGED_LINK_TABLE *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_remap_policy, (nvswitch_device *device, NVSWITCH_SET_REMAP_POLICY *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_remap_policy, (nvswitch_device *device, NVSWITCH_GET_REMAP_POLICY_PARAMS *params), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_remap_policy_valid, (nvswitch_device *device, NVSWITCH_SET_REMAP_POLICY_VALID *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_routing_id, (nvswitch_device *device, NVSWITCH_SET_ROUTING_ID *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_routing_id, (nvswitch_device *device, NVSWITCH_GET_ROUTING_ID_PARAMS *params), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_routing_id_valid, (nvswitch_device *device, NVSWITCH_SET_ROUTING_ID_VALID *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_routing_lan, (nvswitch_device *device, NVSWITCH_SET_ROUTING_LAN *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_routing_lan, (nvswitch_device *device, NVSWITCH_GET_ROUTING_LAN_PARAMS *params), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_routing_lan_valid, (nvswitch_device *device, NVSWITCH_SET_ROUTING_LAN_VALID *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_internal_latency, (nvswitch_device *device, NVSWITCH_GET_INTERNAL_LATENCY *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_latency_bins, (nvswitch_device *device, NVSWITCH_SET_LATENCY_BINS *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_ingress_reqlinkid, (nvswitch_device *device, NVSWITCH_GET_INGRESS_REQLINKID_PARAMS *params), _arch) \ + _op(NvU32, nvswitch_i2c_get_port_info, (nvswitch_device *device, NvU32 port), _arch) \ + _op(NvlStatus, nvswitch_ctrl_i2c_indexed, (nvswitch_device *device, NVSWITCH_CTRL_I2C_INDEXED_PARAMS *pParams), _arch) \ + _op(NvlStatus, nvswitch_ctrl_therm_read_temperature, (nvswitch_device *device, NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info), _arch) \ + _op(NvlStatus, nvswitch_ctrl_therm_get_temperature_limit, (nvswitch_device *device, NVSWITCH_CTRL_GET_TEMPERATURE_LIMIT_PARAMS *info), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_throughput_counters, (nvswitch_device *device, NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *p), _arch) \ + _op(NvlStatus, nvswitch_corelib_add_link, (nvlink_link *link), _arch) \ + _op(NvlStatus, nvswitch_corelib_remove_link, (nvlink_link *link), _arch) \ + _op(NvlStatus, nvswitch_corelib_set_dl_link_mode, (nvlink_link *link, NvU64 mode, NvU32 flags), _arch) \ + _op(NvlStatus, nvswitch_corelib_get_dl_link_mode, (nvlink_link *link, NvU64 *mode), _arch) \ + _op(NvlStatus, nvswitch_corelib_set_tl_link_mode, (nvlink_link *link, NvU64 mode, NvU32 flags), _arch) \ + _op(NvlStatus, nvswitch_corelib_get_tl_link_mode, (nvlink_link *link, NvU64 *mode), _arch) \ + _op(NvlStatus, nvswitch_corelib_set_tx_mode, (nvlink_link *link, NvU64 mode, NvU32 flags), _arch) \ + _op(NvlStatus, nvswitch_corelib_get_tx_mode, (nvlink_link *link, NvU64 *mode, NvU32 *subMode), _arch) \ + _op(NvlStatus, nvswitch_corelib_set_rx_mode, (nvlink_link *link, NvU64 mode, NvU32 flags), _arch) \ + _op(NvlStatus, nvswitch_corelib_get_rx_mode, (nvlink_link *link, NvU64 *mode, NvU32 *subMode), _arch) \ + _op(NvlStatus, nvswitch_corelib_set_rx_detect, (nvlink_link *link, NvU32 flags), _arch) \ + _op(NvlStatus, nvswitch_corelib_get_rx_detect, (nvlink_link *link), _arch) \ + _op(void, nvswitch_corelib_training_complete, (nvlink_link *link), _arch) \ + _op(NvU32, nvswitch_get_device_dma_width, (nvswitch_device *device), _arch) \ + _op(NvU32, nvswitch_get_link_ip_version, (nvswitch_device *device, NvU32 link_id), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_fom_values, (nvswitch_device *device, NVSWITCH_GET_FOM_VALUES_PARAMS *p), _arch) \ + _op(NvlStatus, nvswitch_deassert_link_reset, (nvswitch_device *device, nvlink_link *link), _arch) \ + _op(NvBool, nvswitch_is_soe_supported, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_soe_set_ucode_core, (nvswitch_device *device, NvBool bFalcon), _arch) \ + _op(NvlStatus, nvswitch_init_soe, (nvswitch_device *device), _arch) \ + _op(NvBool, nvswitch_is_inforom_supported, (nvswitch_device *device), _arch) \ + _op(NvBool, nvswitch_is_spi_supported, (nvswitch_device *device), _arch) \ + _op(NvBool, nvswitch_is_smbpbi_supported, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_soe_prepare_for_reset, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_post_init_device_setup, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_post_init_blacklist_device_setup, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_setup_link_system_registers, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_get_nvlink_ecc_errors, (nvswitch_device *device, NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS *p), _arch) \ + _op(NvlStatus, nvswitch_inforom_ecc_log_error_event, (nvswitch_device *device, INFOROM_ECC_OBJECT *pEccGeneric, INFOROM_NVS_ECC_ERROR_EVENT *error_event), _arch) \ + _op(void, nvswitch_oms_set_device_disable, (INFOROM_OMS_STATE *pOmsState, NvBool bForceDeviceDisable), _arch) \ + _op(NvBool, nvswitch_oms_get_device_disable, (INFOROM_OMS_STATE *pOmsState), _arch) \ + _op(NvlStatus, nvswitch_inforom_nvl_log_error_event, (nvswitch_device *device, void *pNvlGeneric, void *error_event, NvBool *bDirty), _arch) \ + _op(NvlStatus, nvswitch_inforom_nvl_update_link_correctable_error_info, (nvswitch_device *device, void *pNvlGeneric, void *pData, NvU8 linkId, NvU8 nvliptInstance, NvU8 localLinkIdx, void *pErrorCounts, NvBool *bDirty), _arch) \ + _op(NvlStatus, nvswitch_inforom_nvl_get_max_correctable_error_rate, (nvswitch_device *device, NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *p), _arch) \ + _op(NvlStatus, nvswitch_inforom_nvl_get_errors, (nvswitch_device *device, NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *p), _arch) \ + _op(NvlStatus, nvswitch_inforom_ecc_get_errors, (nvswitch_device *device, NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *p), _arch) \ + _op(void, nvswitch_load_uuid, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_i2c_set_hw_speed_mode, (nvswitch_device *device, NvU32 port, NvU32 speedMode), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_bios_info, (nvswitch_device *device, NVSWITCH_GET_BIOS_INFO_PARAMS *p), _arch) \ + _op(NvlStatus, nvswitch_read_oob_blacklist_state, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_write_fabric_state, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_initialize_oms_state, (nvswitch_device *device, INFOROM_OMS_STATE *pOmsState), _arch) \ + _op(NvlStatus, nvswitch_oms_inforom_flush, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_inforom_ecc_get_total_errors, (nvswitch_device *device, INFOROM_ECC_OBJECT *pEccGeneric, NvU64 *corCount, NvU64 *uncCount), _arch) \ + _op(NvlStatus, nvswitch_bbx_setup_prologue, (nvswitch_device *device, void *pInforomBbxState), _arch) \ + _op(NvlStatus, nvswitch_bbx_setup_epilogue, (nvswitch_device *device, void *pInforomBbxState), _arch) \ + _op(NvlStatus, nvswitch_bbx_add_data_time, (nvswitch_device *device, void *pInforomBbxState, void *pInforomBbxData), _arch) \ + _op(NvlStatus, nvswitch_bbx_add_sxid, (nvswitch_device *device, void *pInforomBbxState, void *pInforomBbxData), _arch) \ + _op(NvlStatus, nvswitch_bbx_add_temperature, (nvswitch_device *device, void *pInforomBbxState, void *pInforomBbxData), _arch) \ + _op(void, nvswitch_bbx_set_initial_temperature, (nvswitch_device *device, void *pInforomBbxState, void *pInforomBbxData), _arch) \ + _op(NvlStatus, nvswitch_inforom_bbx_get_sxid, (nvswitch_device *device, NVSWITCH_GET_SXIDS_PARAMS *p), _arch) \ + _op(NvlStatus, nvswitch_smbpbi_get_dem_num_messages, (nvswitch_device *device, NvU8 *pMsgCount), _arch) \ + _op(NvlStatus, nvswitch_set_minion_initialized, (nvswitch_device *device, NvU32 idx_minion, NvBool initialized), _arch) \ + _op(NvBool, nvswitch_is_minion_initialized, (nvswitch_device *device, NvU32 idx_minion), _arch) \ + _op(NvlStatus, nvswitch_get_link_public_id, (nvswitch_device *device, NvU32 linkId, NvU32 *publicId), _arch) \ + _op(NvlStatus, nvswitch_get_link_local_idx, (nvswitch_device *device, NvU32 linkId, NvU32 *localLinkIdx), _arch) \ + _op(NvlStatus, nvswitch_set_training_error_info, (nvswitch_device *device, NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS *pLinkTrainingErrorInfoParams), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_fatal_error_scope, (nvswitch_device *device, NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS *pParams), _arch) \ + _op(void, nvswitch_init_scratch, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_device_discovery, (nvswitch_device *device, NvU32 discovery_offset), _arch) \ + _op(void, nvswitch_filter_discovery, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_process_discovery, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_init_minion, (nvswitch_device *device), _arch) \ + _op(NvU32, nvswitch_get_eng_base, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance), _arch) \ + _op(NvU32, nvswitch_get_eng_count, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast), _arch) \ + _op(NvU32, nvswitch_eng_rd, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance, NvU32 offset), _arch) \ + _op(void, nvswitch_eng_wr, (nvswitch_device *device, NVSWITCH_ENGINE_ID eng_id, NvU32 eng_bcast, NvU32 eng_instance, NvU32 offset, NvU32 data), _arch) \ + _op(NvU32, nvswitch_get_link_eng_inst, (nvswitch_device *device, NvU32 link_id, NVSWITCH_ENGINE_ID eng_id), _arch) \ + _op(void *, nvswitch_alloc_chipdevice, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_init_thermal, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_init_pll_config, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_init_pll, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_init_clock_gating, (nvswitch_device *device), _arch) \ + _op(NvU32, nvswitch_read_physical_id, (nvswitch_device *device), _arch) \ + _op(NvU32, nvswitch_get_caps_nvlink_version, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_initialize_interrupt_tree, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_init_dlpl_interrupts, (nvlink_link *link), _arch) \ + _op(NvlStatus, nvswitch_initialize_pmgr, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_initialize_ip_wrappers, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_initialize_route, (nvswitch_device *device), _arch) \ + _op(void, nvswitch_soe_unregister_events, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_soe_register_event_callbacks, (nvswitch_device *device), _arch) \ + _op(NVSWITCH_BIOS_NVLINK_CONFIG *, nvswitch_get_bios_nvlink_config, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_minion_send_command, (nvswitch_device *device, NvU32 linkNumber, NvU32 command, NvU32 scratch0), _arch) \ + _op(NvlStatus, nvswitch_init_nport, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_init_nxbar, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_clear_nport_rams, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_pri_ring_init, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_get_soe_ucode_binaries, (nvswitch_device *device, const NvU32 **soe_ucode_data, const NvU32 **soe_ucode_header), _arch) \ + _op(NvlStatus, nvswitch_get_remap_table_selector, (nvswitch_device *device, NVSWITCH_TABLE_SELECT_REMAP table_selector, NvU32 *remap_ram_sel), _arch) \ + _op(NvU32, nvswitch_get_ingress_ram_size, (nvswitch_device *device, NvU32 ingress_ram_selector), _arch) \ + _op(NvlStatus, nvswitch_minion_get_dl_status, (nvswitch_device *device, NvU32 linkId, NvU32 statusIdx, NvU32 statusArgs, NvU32 *statusData), _arch) \ + _op(void, nvswitch_corelib_get_uphy_load, (nvlink_link *link, NvBool *bUnlocked), _arch) \ + _op(NvBool, nvswitch_is_i2c_supported, (nvswitch_device *device), _arch) \ + _op(NvlStatus, nvswitch_poll_sublink_state, (nvswitch_device *device, nvlink_link *link), _arch)\ + _op(void, nvswitch_setup_link_loopback_mode, (nvswitch_device *device, NvU32 linkNumber), _arch)\ + _op(void, nvswitch_reset_persistent_link_hw_state, (nvswitch_device *device, NvU32 linkNumber), _arch)\ + _op(void, nvswitch_store_topology_information, (nvswitch_device *device, nvlink_link *link), _arch) \ + _op(void, nvswitch_init_lpwr_regs, (nvlink_link *link), _arch) \ + _op(NvlStatus, nvswitch_set_training_mode, (nvswitch_device *device), _arch) \ + _op(NvU32, nvswitch_get_sublink_width, (nvswitch_device *device, NvU32 linkNumber), _arch) \ + _op(NvBool, nvswitch_i2c_is_device_access_allowed, (nvswitch_device *device, NvU32 port, NvU8 addr, NvBool bIsRead), _arch) \ + _op(NvlStatus, nvswitch_parse_bios_image, (nvswitch_device *device), _arch) \ + _op(NvBool, nvswitch_is_link_in_reset, (nvswitch_device *device, nvlink_link *link), _arch) \ + _op(void, nvswitch_init_buffer_ready, (nvswitch_device *device, nvlink_link * link, NvBool bNportBufferReady), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_nvlink_lp_counters, (nvswitch_device *device, NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_set_residency_bins, (nvswitch_device *device, NVSWITCH_SET_RESIDENCY_BINS *p), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_residency_bins, (nvswitch_device *device, NVSWITCH_GET_RESIDENCY_BINS *p), _arch) \ + _op(void, nvswitch_apply_recal_settings, (nvswitch_device *device, nvlink_link *), _arch) \ + _op(NvlStatus, nvswitch_service_nvldl_fatal_link, (nvswitch_device *device, NvU32 nvliptInstance, NvU32 link), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_rb_stall_busy, (nvswitch_device *device, NVSWITCH_GET_RB_STALL_BUSY *p), _arch) \ + _op(NvlStatus, nvswitch_service_minion_link, (nvswitch_device *device, NvU32 link_id), _arch) \ + _op(NvlStatus, nvswitch_ctrl_get_sw_info, (nvswitch_device *device, NVSWITCH_GET_SW_INFO_PARAMS *p), _arch) + +// +// Declare HAL function pointer table +// +// This macro takes the xmacro _FUNCTION_LIST and uses some components in it to +// automatically generate the HAL structure declaration in a form: +// NvU32 (*function_foo1)(nvswitch_device device); +// void (*function_foo2)(nvswitch_device device, NvU32 parameter1); +// NvlStatus (*function_foo3)(nvswitch_device device, NvU32 parameter1, void *parameter2); +// + +#define DECLARE_HAL_FUNCTIONS(_return, _function, _params, _arch) \ + _return (*_function)_params; + +typedef struct nvswitch_hal_functions +{ + NVSWITCH_HAL_FUNCTION_LIST(DECLARE_HAL_FUNCTIONS, HAL) + +} nvswitch_hal; + +// +// Fill in HAL function pointer table +// +// This macro takes the xmacro _FUNCTION_LIST and uses some components in it to +// automatically generate all the HAL function fill-in assignments for a given +// architecture. +// + +#define CREATE_HAL_FUNCTIONS(_return, _function, _params, _arch) \ + device->hal._function = _function##_##_arch; \ + +#define NVSWITCH_INIT_HAL(device, arch) \ + NVSWITCH_HAL_FUNCTION_LIST(CREATE_HAL_FUNCTIONS, arch) \ + +// +// Declare HAL function dispatch functions +// +// This macro takes the xmacro _FUNCTION_LIST and uses some components in it to +// automatically generate the function prototypes for the dispatcher functions +// that dereference the correct arch HAL function. +// + +#define DECLARE_HAL_DISPATCHERS(_return, _function, _params, _arch) \ + _return _function _params; + +NVSWITCH_HAL_FUNCTION_LIST(DECLARE_HAL_DISPATCHERS, unused_argument) + +// HAL functions +void nvswitch_setup_hal_lr10(nvswitch_device *device); + +#endif //_HALDEF_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/inforom/inforom_nvswitch.h b/src/common/nvswitch/kernel/inc/inforom/inforom_nvswitch.h new file mode 100644 index 000000000..98f43d2ba --- /dev/null +++ b/src/common/nvswitch/kernel/inc/inforom/inforom_nvswitch.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INFOROM_NVSWITCH_H_ +#define _INFOROM_NVSWITCH_H_ + +#include "inforom/ifrstruct.h" +#include "inforom/omsdef.h" +#include "nv_list.h" +#include "smbpbi_shared_nvswitch.h" + +#define INFOROM_MAX_PACKED_SIZE (32*1024) + +#define INFOROM_FS_FILE_SIZE(pPackedFile) \ + (((pPackedFile)[INFOROM_OBJECT_HEADER_V1_00_SIZE_OFFSET]) | \ + ((pPackedFile)[INFOROM_OBJECT_HEADER_V1_00_SIZE_OFFSET + 1] << 8)) +#define INFOROM_FS_FILE_NAMES_MATCH(fileName1, fileName2) \ + ((((NvU8)((fileName1)[0])) == ((NvU8)((fileName2)[0]))) && \ + (((NvU8)((fileName1)[1])) == ((NvU8)((fileName2)[1]))) && \ + (((NvU8)((fileName1)[2])) == ((NvU8)((fileName2)[2])))) + +#define INFOROM_FS_COPY_FILE_NAME(destName, srcName) \ +{ \ + (destName)[0] = (srcName)[0]; \ + (destName)[1] = (srcName)[1]; \ + (destName)[2] = (srcName)[2]; \ +} + +struct INFOROM_OBJECT_CACHE_ENTRY +{ + INFOROM_OBJECT_HEADER_V1_00 header; + struct INFOROM_OBJECT_CACHE_ENTRY *pNext; +}; + +struct inforom +{ + // InfoROM Objects + // RO objects - statically allocated as the InfoROM should always contain + // these objects. + struct + { + NvBool bValid; + NvU8 packedObject[INFOROM_OBD_OBJECT_V1_XX_PACKED_SIZE]; + INFOROM_OBD_OBJECT_V1_XX object; + } OBD; + + struct + { + NvBool bValid; + NvU8 packedObject[INFOROM_OEM_OBJECT_V1_00_PACKED_SIZE]; + INFOROM_OEM_OBJECT_V1_00 object; + } OEM; + + struct + { + NvBool bValid; + NvU8 packedObject[INFOROM_IMG_OBJECT_V1_00_PACKED_SIZE]; + INFOROM_IMG_OBJECT_V1_00 object; + } IMG; + + INFOROM_ECC_STATE *pEccState; + INFOROM_OMS_STATE *pOmsState; + + // + // descriptor cache for all the inforom objects. This is to handle inforom objects in a generic way. + // + struct INFOROM_OBJECT_CACHE_ENTRY *pObjectCache; +}; + +// Generic InfoROM APIs +NvlStatus nvswitch_initialize_inforom(nvswitch_device *device); +NvlStatus nvswitch_inforom_read_object(nvswitch_device* device, + const char *objectName, const char *pObjectFormat, + NvU8 *pPackedObject, void *pObject); +NvlStatus nvswitch_inforom_write_object(nvswitch_device* device, + const char *objectName, const char *pObjectFormat, + void *pObject, NvU8 *pOldPackedObject); +void nvswitch_destroy_inforom(nvswitch_device *device); +NvlStatus nvswitch_inforom_add_object(struct inforom *pInforom, + INFOROM_OBJECT_HEADER_V1_00 *pHeader); +NvlStatus nvswitch_inforom_get_object_version_info(nvswitch_device *device, + const char *objectName, NvU8 *pVersion, NvU8 *pSubVersion); +void *nvswitch_add_halinfo_node(NVListPtr head, int type, int size); +void *nvswitch_get_halinfo_node(NVListPtr head, int type); +void nvswitch_inforom_post_init(nvswitch_device *device); +NvlStatus nvswitch_initialize_inforom_objects(nvswitch_device *device); +void nvswitch_destroy_inforom_objects(nvswitch_device *device); +NvlStatus nvswitch_inforom_load_object(nvswitch_device* device, + struct inforom *pInforom, const char *objectName, + const char *pObjectFormat, NvU8 *pPackedObject, void *pObject); +void nvswitch_inforom_read_static_data(nvswitch_device *device, + struct inforom *pInforom, RM_SOE_SMBPBI_INFOROM_DATA *pData); + +// InfoROM RO APIs +NvlStatus nvswitch_inforom_read_only_objects_load(nvswitch_device *device); + +// InfoROM NVL APIs +NvlStatus nvswitch_inforom_nvlink_load(nvswitch_device *device); +void nvswitch_inforom_nvlink_unload(nvswitch_device *device); +NvlStatus nvswitch_inforom_nvlink_flush(nvswitch_device *device); +NvlStatus nvswitch_inforom_nvlink_get_minion_data(nvswitch_device *device, + NvU8 linkId, NvU32 *seedData); +NvlStatus nvswitch_inforom_nvlink_set_minion_data(nvswitch_device *device, + NvU8 linkId, NvU32 *seedData, NvU32 size); +NvlStatus nvswitch_inforom_nvlink_log_error_event(nvswitch_device *device, void *error_event); +NvlStatus nvswitch_inforom_nvlink_get_max_correctable_error_rate(nvswitch_device *device, + NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *params); +NvlStatus nvswitch_inforom_nvlink_get_errors(nvswitch_device *device, + NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *params); + +// InfoROM ECC APIs +NvlStatus nvswitch_inforom_ecc_load(nvswitch_device *device); +void nvswitch_inforom_ecc_unload(nvswitch_device *device); +NvlStatus nvswitch_inforom_ecc_flush(nvswitch_device *device); +NvlStatus nvswitch_inforom_ecc_log_err_event(nvswitch_device *device, + INFOROM_NVS_ECC_ERROR_EVENT *err_event); +NvlStatus nvswitch_inforom_ecc_get_errors(nvswitch_device *device, + NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *params); + +// InfoROM OMS APIs +NvlStatus nvswitch_inforom_oms_load(nvswitch_device *device); +void nvswitch_inforom_oms_unload(nvswitch_device *device); +NvlStatus nvswitch_inforom_oms_set_device_disable(nvswitch_device *device, + NvBool bDisable); +NvlStatus nvswitch_inforom_oms_get_device_disable(nvswitch_device *device, + NvBool *pBDisabled); + +// InfoROM BBX APIs +NvlStatus nvswitch_inforom_bbx_load(nvswitch_device *device); +void nvswitch_inforom_bbx_unload(nvswitch_device * device); +NvlStatus nvswitch_inforom_bbx_add_sxid(nvswitch_device *device, + NvU32 exceptionType, NvU32 data0, + NvU32 data1, NvU32 data2); +void nvswitch_bbx_collect_current_time(nvswitch_device *device, + void *pBbxState); +NvlStatus nvswitch_inforom_bbx_get_sxid(nvswitch_device *device, + NVSWITCH_GET_SXIDS_PARAMS *params); + +// InfoROM DEM APIs +NvlStatus nvswitch_inforom_dem_load(nvswitch_device *device); +void nvswitch_inforom_dem_unload(nvswitch_device * device); +#endif // _INFOROM_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/intr_nvswitch.h b/src/common/nvswitch/kernel/inc/intr_nvswitch.h new file mode 100644 index 000000000..c6331735a --- /dev/null +++ b/src/common/nvswitch/kernel/inc/intr_nvswitch.h @@ -0,0 +1,211 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INTR_NVSWITCH_H_ +#define _INTR_NVSWITCH_H_ + +#include "error_nvswitch.h" + +// +// Wrapper to track interrupt servicing +// +#define NVSWITCH_UNHANDLED_INIT(val) (unhandled = (val)) +#define NVSWITCH_HANDLED(mask) (unhandled &= ~(mask)) + +#define NVSWITCH_UNHANDLED_CHECK(_device, _unhandled) \ + do \ + { \ + if (_unhandled) \ + { \ + NVSWITCH_PRINT(_device, ERROR, \ + "%s:%d unhandled interrupt! %x\n", \ + __FUNCTION__, __LINE__, _unhandled); \ + NVSWITCH_PRINT_SXID(_device, \ + NVSWITCH_ERR_HW_HOST_UNHANDLED_INTERRUPT, \ + "Fatal, unhandled interrupt\n"); \ + NVSWITCH_LOG_FATAL_DATA(_device, _HW, \ + _HW_HOST_UNHANDLED_INTERRUPT, 0, 0, NV_FALSE, &_unhandled);\ + } \ + } while(0) + +// +// Wrappers for basic leaf interrupt handling +// +#define NVSWITCH_PENDING(_bit) ((bit = (_bit)) && (pending & (_bit))) +#define NVSWITCH_FIRST() (bit & report.raw_first) ? " (First)" : "" + +// +// Report/log error interrupt helper. +// + +// +// Print an intermediate point (non-leaf) in the interrupt tree. +// +#define NVSWITCH_REPORT_TREE(_logenum) \ + do \ + { \ + NVSWITCH_PRINT(device, ERROR, "Intermediate, Link %02d \n", link); \ + } while(0) + +// Log correctable errors +#define NVSWITCH_REPORT_CORRECTABLE(_logenum, _str) \ + do \ + { \ + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR ## _logenum, \ + "Correctable, Link %02d %s%s\n", link, _str, NVSWITCH_FIRST()); \ + NVSWITCH_LOG_NONFATAL_DATA(device, _HW, _logenum, \ + link, 0, NV_TRUE, &report); \ + if (nvswitch_lib_notify_client_events(device, \ + NVSWITCH_DEVICE_EVENT_NONFATAL) != NVL_SUCCESS) \ + { \ + NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify event\n", \ + __FUNCTION__); \ + } \ + } while(0) + +// Log uncorrectable error that is not fatal to the fabric +#define NVSWITCH_REPORT_NONFATAL(_logenum, _str) \ + do \ + { \ + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR ## _logenum, \ + "Non-fatal, Link %02d %s%s\n", link, _str, NVSWITCH_FIRST()); \ + NVSWITCH_LOG_NONFATAL_DATA(device, _HW, _logenum, \ + link, 0, NV_FALSE, &report); \ + if (nvswitch_lib_notify_client_events(device, \ + NVSWITCH_DEVICE_EVENT_NONFATAL) != NVL_SUCCESS) \ + { \ + NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify event\n", \ + __FUNCTION__); \ + } \ + } while(0) + +// Log uncorrectable error that is fatal to the fabric +#define NVSWITCH_REPORT_FATAL(_logenum, _str, device_fatal) \ + do \ + { \ + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR ## _logenum, \ + "Fatal, Link %02d %s%s\n", link, _str, NVSWITCH_FIRST()); \ + NVSWITCH_LOG_FATAL_DATA(device, _HW, _logenum, \ + link, 0, NV_FALSE, &report); \ + nvswitch_set_fatal_error(device, device_fatal, link); \ + if (nvswitch_lib_notify_client_events(device, \ + NVSWITCH_DEVICE_EVENT_FATAL) != NVL_SUCCESS) \ + { \ + NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify event\n",\ + __FUNCTION__); \ + } \ + } while(0) + +#define NVSWITCH_REPORT_PRI_ERROR_NONFATAL(_logenum, _str, instance, chiplet, err_data) \ + do \ + { \ + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR ## _logenum, \ + "Non-fatal, %s, instance=%d, chiplet=%d\n", _str, instance, chiplet); \ + NVSWITCH_LOG_NONFATAL_DATA(device, _HW, _logenum, \ + instance, chiplet, NV_FALSE, &err_data); \ + if (nvswitch_lib_notify_client_events(device, \ + NVSWITCH_DEVICE_EVENT_NONFATAL) != NVL_SUCCESS) \ + { \ + NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify event\n", \ + __FUNCTION__); \ + } \ + } while(0) + +#define NVSWITCH_REPORT_PRI_ERROR_FATAL(_logenum, _str, device_fatal, instance, chiplet, err_data) \ + do \ + { \ + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR ## _logenum, \ + "Fatal, %s, instance=%d, chiplet=%d\n", _str, instance, chiplet); \ + NVSWITCH_LOG_FATAL_DATA(device, _HW, _logenum, \ + instance, chiplet, NV_FALSE, &err_data); \ + nvswitch_set_fatal_error(device, device_fatal, 0); \ + if (nvswitch_lib_notify_client_events(device, \ + NVSWITCH_DEVICE_EVENT_FATAL) != NVL_SUCCESS) \ + { \ + NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify event\n", \ + __FUNCTION__); \ + } \ + } while(0) + +/* + * Automatically determine if error is fatal to the fabric based on + * if it is contained and will lock the port. + */ +#define NVSWITCH_REPORT_CONTAIN(_logenum, _str, device_fatal) \ + do \ + { \ + if (bit & contain) \ + { \ + NVSWITCH_REPORT_FATAL(_logenum, _str, device_fatal); \ + } \ + else \ + { \ + NVSWITCH_REPORT_NONFATAL(_logenum, _str); \ + } \ + } while (0) + +/* + * REPORT_*_DATA macros - optionally log data record for additional HW state. This + * is typically a captured packet, but there are a few other cases. + * + * Most interrupt controllers only latch additional data for errors tagged as first. + * For those cases use _FIRST to only log the data record when it is accurate. If + * two errors are detected in the same cycle, they will both be set in first. + */ +#define NVSWITCH_REPORT_DATA(_logenum, _data) \ + NVSWITCH_LOG_NONFATAL_DATA(device, _HW, _logenum, link, 0, NV_TRUE, &_data) + +#define NVSWITCH_REPORT_DATA_FIRST(_logenum, _data) \ + do \ + { \ + if (report.raw_first & bit) \ + { \ + NVSWITCH_REPORT_DATA(_logenum, _data); \ + } \ + } while(0) + +#define NVSWITCH_REPORT_CONTAIN_DATA(_logenum, _data) \ + do \ + { \ + if (bit & contain) \ + { \ + NVSWITCH_LOG_FATAL_DATA(device, _HW, _logenum, link, \ + 0, NV_FALSE, &_data); \ + } \ + else \ + { \ + NVSWITCH_LOG_NONFATAL_DATA(device, _HW, _logenum, link, \ + 0, NV_FALSE, &data); \ + } \ + } while(0) + +#define NVSWITCH_REPORT_CONTAIN_DATA_FIRST(_logenum, _data) \ + do \ + { \ + if (bit & report.raw_first) \ + { \ + NVSWITCH_REPORT_CONTAIN_DATA(_logenum, _data); \ + } \ + } while(0) + +#endif //_INTR_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/io_nvswitch.h b/src/common/nvswitch/kernel/inc/io_nvswitch.h new file mode 100644 index 000000000..7cfc04f9a --- /dev/null +++ b/src/common/nvswitch/kernel/inc/io_nvswitch.h @@ -0,0 +1,437 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _IO_NVSWITCH_H_ +#define _IO_NVSWITCH_H_ + +// NVSWITCH_REG_* MMIO wrappers are to be used for absolute symbolic BAR0 offset +// register references like SMC, CLOCK, BUS, and PRIV_MASTER. +// + +#define NVSWITCH_REG_RD32(_d, _dev, _reg) \ + ( \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: MEM_RD: %s, %s (+%04x)\n", \ + __FUNCTION__, \ + #_dev, #_reg, NV ## _dev ## _reg) \ + , \ + nvswitch_reg_read_32(_d, NV##_dev##_reg) \ + ); \ + ((void)(_d)) + +#define NVSWITCH_REG_WR32(_d, _dev, _reg, _data) \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: MEM_WR: %s, %s (+%04x) 0x%08x\n", \ + __FUNCTION__, \ + #_dev, #_reg, NV ## _dev ## _reg, _data); \ + nvswitch_reg_write_32(_d, NV##_dev##_reg, _data); \ + ((void)(_d)) + +// +// NVSWITCH_OFF_* MMIO wrappers are used to access a fully formed BAR0 offset. +// + +#define NVSWITCH_OFF_RD32(_d, _off) \ + nvswitch_reg_read_32(_d, _off); \ + ((void)(_d)) + +#define NVSWITCH_OFF_WR32(_d, _off, _data) \ + nvswitch_reg_write_32(_d, _off, _data); \ + ((void)(_d)) + +#define NVSWITCH_ENGINE_DESCRIPTOR_UC_SIZE 64 +#define NVSWITCH_ENGINE_DESCRIPTOR_MC_SIZE 3 + +#define NVSWITCH_ENGINE_INSTANCE_INVALID ((NvU32) (~0)) + +typedef struct engine_descriptor +{ + const char *eng_name; + NvU32 eng_id; // REGISTER_RW_ENGINE_* + NvU32 eng_count; + NvU32 uc_addr[NVSWITCH_ENGINE_DESCRIPTOR_UC_SIZE]; + NvU32 bc_addr; + NvU32 mc_addr[NVSWITCH_ENGINE_DESCRIPTOR_MC_SIZE]; + NvU32 mc_addr_count; +} NVSWITCH_ENGINE_DESCRIPTOR_TYPE; + +#define NVSWITCH_DECLARE_IO_DESCRIPTOR(_engine, _bcast) \ + NVSWITCH_ENGINE_DESCRIPTOR_TYPE _engine; + +#define NVSWITCH_BASE_ADDR_INVALID ((NvU32) (~0)) + +// +// All IP-based (0-based register manuals) engines that ever existed on *ANY* +// architecture(s) must be listed here in order to use the common IO wrappers. +// New engines need to be added here as well as in the chip-specific lists in +// their respective headers that generate chip-specific handlers. +// Absolute BAR0 offset-based units are legacy units in which the unit's offset +// in BAR0 is included in the register definition in the manuals. For these +// legacy units the discovered base is not used since it is already part of the +// register. Legacy units (e.g. PSMC, CLOCK, BUS, and PRIV_MASTER) should use +// NVSWITCH_REG_RD/WR IO wrappers. +// + +#define NVSWITCH_LIST_ALL_ENGINES(_op) \ + _op(XVE) \ + _op(SAW) \ + _op(SOE) \ + _op(SMR) \ + _op(GIN) \ + _op(XAL) \ + _op(XAL_FUNC) \ + _op(XPL) \ + _op(XTL) \ + _op(XTL_CONFIG) \ + _op(UXL) \ + _op(GPU_PTOP) \ + _op(PMC) \ + _op(PBUS) \ + _op(ROM2) \ + _op(GPIO) \ + _op(FSP) \ + _op(SYSCTRL) \ + _op(CLKS_SYS) \ + _op(CLKS_SYSB) \ + _op(CLKS_P0) \ + _op(SAW_PM) \ + _op(PCIE_PM) \ + _op(PRT_PRI_HUB) \ + _op(PRT_PRI_RS_CTRL) \ + _op(SYS_PRI_HUB) \ + _op(SYS_PRI_RS_CTRL) \ + _op(SYSB_PRI_HUB) \ + _op(SYSB_PRI_RS_CTRL) \ + _op(PRI_MASTER_RS) \ + _op(PTIMER) \ + \ + _op(NPG) \ + _op(NPORT) \ + \ + _op(NVLW) \ + _op(MINION) \ + _op(NVLIPT) \ + _op(NVLIPT_LNK) \ + _op(NVLTLC) \ + _op(NVLDL) \ + _op(CPR) \ + \ + _op(NXBAR) \ + _op(TILE) \ + _op(TILEOUT) \ + \ + _op(NPG_PERFMON) \ + _op(NPORT_PERFMON) \ + \ + _op(NVLW_PERFMON) \ + _op(RX_PERFMON) \ + _op(TX_PERFMON) \ + \ + _op(NXBAR_PERFMON) \ + _op(TILE_PERFMON) \ + _op(TILEOUT_PERFMON) \ + +#define ENGINE_ID_LIST(_eng) \ + NVSWITCH_ENGINE_ID_##_eng, + +// +// ENGINE_IDs are the complete list of all engines that are supported on +// *ANY* architecture(s) that may support them. Any one architecture may or +// may not understand how to operate on any one specific engine. +// Architectures that share a common ENGINE_ID are not guaranteed to have +// compatible manuals. +// +typedef enum nvswitch_engine_id +{ + NVSWITCH_LIST_ALL_ENGINES(ENGINE_ID_LIST) + NVSWITCH_ENGINE_ID_SIZE, +} NVSWITCH_ENGINE_ID; + +// +// NVSWITCH_ENG_* MMIO wrappers are to be used for top level discovered +// devices like SAW, FUSE, PMGR, XVE, etc. +// + +#define NVSWITCH_GET_ENG_DESC_TYPE 0 +#define NVSWITCH_GET_ENG_DESC_TYPE_UNICAST NVSWITCH_GET_ENG_DESC_TYPE +#define NVSWITCH_GET_ENG_DESC_TYPE_BCAST 1 +#define NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST 2 + +#define NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx) \ + ((_d)->hal.nvswitch_get_eng_base( \ + _d, \ + NVSWITCH_ENGINE_ID_##_eng, \ + NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \ + _engidx)) + +#define NVSWITCH_ENG_COUNT(_d, _eng, _bcast) \ + ((_d)->hal.nvswitch_get_eng_count( \ + _d, \ + NVSWITCH_ENGINE_ID_##_eng, \ + NVSWITCH_GET_ENG_DESC_TYPE##_bcast)) + +#define NVSWITCH_ENG_IS_VALID(_d, _eng, _engidx) \ + ( \ + NVSWITCH_GET_ENG(_d, _eng, , _engidx) != NVSWITCH_BASE_ADDR_INVALID \ + ) + +#define NVSWITCH_ENG_WR32(_d, _eng, _bcast, _engidx, _dev, _reg, _data) \ + { \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: MEM_WR %s[%d]: %s, %s (%06x+%04x) 0x%08x\n", \ + __FUNCTION__, \ + #_eng#_bcast, _engidx, \ + #_dev, #_reg, \ + NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \ + NV ## _dev ## _reg, _data); \ + \ + ((_d)->hal.nvswitch_eng_wr( \ + _d, \ + NVSWITCH_ENGINE_ID_##_eng, \ + NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \ + _engidx, \ + NV ## _dev ## _reg, _data)); \ + } + +#define NVSWITCH_ENG_RD32(_d, _eng, _bcast, _engidx, _dev, _reg) \ + ( \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: MEM_RD %s[%d]: %s, %s (%06x+%04x)\n", \ + __FUNCTION__, \ + #_eng#_bcast, _engidx, \ + #_dev, #_reg, \ + NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \ + NV ## _dev ## _reg) \ + , \ + ((_d)->hal.nvswitch_eng_rd( \ + _d, \ + NVSWITCH_ENGINE_ID_##_eng, \ + NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \ + _engidx, \ + NV ## _dev ## _reg)) \ + ); \ + ((void)(_d)) + +#define NVSWITCH_ENG_WR32_IDX(_d, _eng, _bcast, _engidx, _dev, _reg, _idx, _data) \ + { \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: MEM_WR %s[%d]: %s, %s(%d) (%06x+%04x) 0x%08x\n", \ + __FUNCTION__, \ + #_eng#_bcast, _engidx, \ + #_dev, #_reg, _idx, \ + NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \ + NV ## _dev ## _reg(_idx), _data); \ + \ + ((_d)->hal.nvswitch_eng_wr( \ + _d, \ + NVSWITCH_ENGINE_ID_##_eng, \ + NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \ + _engidx, \ + NV ## _dev ## _reg(_idx), _data)); \ + } + +#define NVSWITCH_ENG_RD32_IDX(_d, _eng, _bcast, _engidx, _dev, _reg, _idx) \ + ( \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: MEM_RD %s[%d]: %s, %s(%d) (%06x+%04x)\n", \ + __FUNCTION__, \ + #_eng#_bcast, _engidx, \ + #_dev, #_reg, _idx, \ + NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \ + NV ## _dev ## _reg(_idx)) \ + , \ + ((_d)->hal.nvswitch_eng_rd( \ + _d, \ + NVSWITCH_ENGINE_ID_##_eng, \ + NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \ + _engidx, \ + NV ## _dev ## _reg(_idx))) \ + ); \ + ((void)(_d)) + +#define NVSWITCH_ENG_OFF_WR32(_d, _eng, _bcast, _engidx, _offset, _data) \ + { \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: MEM_WR %s[%d]: 0x%x (%06x+%04x) 0x%08x\n", \ + __FUNCTION__, \ + #_eng#_bcast, _engidx, \ + _offset, \ + NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \ + _offset, _data); \ + ((_d)->hal.nvswitch_eng_wr( \ + _d, \ + NVSWITCH_ENGINE_ID_##_eng, \ + NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \ + _engidx, \ + _offset, _data)); \ + } + +#define NVSWITCH_ENG_OFF_RD32(_d, _eng, _bcast, _engidx, _offset) \ + ( \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: MEM_RD %s[%d]: 0x%x (%06x+%04x)\n", \ + __FUNCTION__, \ + #_eng#_bcast, _engidx, \ + _offset, \ + NVSWITCH_GET_ENG(_d, _eng, _bcast, _engidx), \ + _offset) \ + , \ + ((_d)->hal.nvswitch_eng_rd( \ + _d, \ + NVSWITCH_ENGINE_ID_##_eng, \ + NVSWITCH_GET_ENG_DESC_TYPE##_bcast, \ + _engidx, \ + _offset)) \ + ) + +// +// Per-link information +// + +#define NVSWITCH_MAX_LINK_COUNT 64 + +#define NVSWITCH_MAX_SEED_BUFFER_SIZE NVSWITCH_MAX_SEED_NUM + 1 + +#define NVSWITCH_MAX_INBAND_BUFFER_SIZE 256*8 +#define NVSWITCH_MAX_INBAND_BITS_SENT_AT_ONCE 32 +#define NVSWITCH_MAX_INBAND_BUFFER_ENTRIES NVSWITCH_MAX_INBAND_BUFFER_SIZE/NVSWITCH_MAX_INBAND_BITS_SENT_AT_ONCE + +// +// Inband data structure +// +struct nvswitch_inband_data +{ + // Inband bufer at sender Minion + NvU32 sendBuffer[NVSWITCH_MAX_INBAND_BUFFER_ENTRIES]; + + // Inband buffer at receiver Minion + NvU32 receiveBuffer[NVSWITCH_MAX_INBAND_BUFFER_ENTRIES]; + + // Is the current Minion a sender or receiver of Inband Data? + NvBool bIsSenderMinion; + + // Bool to say fail or not + NvBool bTransferFail; + + // # of transmisions done - count + // NvU32 txCount; +}; + +typedef struct +{ + NvBool valid; + NvU32 link_clock_khz; + + NvBool fatal_error_occurred; + NvBool ingress_packet_latched; + NvBool egress_packet_latched; + + NvBool nea; // Near end analog + NvBool ned; // Near end digital + + NvU32 lane_rxdet_status_mask; + + NvBool bIsRepeaterMode; + + // Minion Inband Data structure + struct nvswitch_inband_data inBandData; + +} NVSWITCH_LINK_TYPE; + +// +// Per link register access routines +// LINK_* MMIO wrappers are used to reference per-link engine instances +// + +#define NVSWITCH_LINK_COUNT(_d) \ + (nvswitch_get_num_links(_d)) + +#define NVSWITCH_GET_LINK_ENG_INST(_d, _linknum, _eng) \ + nvswitch_get_link_eng_inst(_d, _linknum, NVSWITCH_ENGINE_ID_##_eng) + +#define NVSWITCH_IS_LINK_ENG_VALID(_d, _linknum, _eng) \ + ( \ + (NVSWITCH_GET_ENG(_d, _eng, , \ + NVSWITCH_GET_LINK_ENG_INST(_d, _linknum, _eng)) \ + != NVSWITCH_BASE_ADDR_INVALID) && \ + nvswitch_is_link_valid(_d, _linknum) \ + ) + +#define NVSWITCH_LINK_OFFSET(_d, _physlinknum, _eng, _dev, _reg) \ + ( \ + NVSWITCH_ASSERT(NVSWITCH_IS_LINK_ENG_VALID(_d, _physlinknum, _eng)) \ + , \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: LINK_OFFSET link[%d] %s: %s,%s (+%04x)\n", \ + __FUNCTION__, \ + _physlinknum, \ + #_eng, #_dev, #_reg, NV ## _dev ## _reg) \ + , \ + NVSWITCH_GET_ENG(_d, _eng, , \ + NVSWITCH_GET_LINK_ENG_INST(_d, _physlinknum, _eng)) + \ + NV##_dev##_reg \ + ) + +#define NVSWITCH_LINK_WR32(_d, _physlinknum, _eng, _dev, _reg, _data) \ + NVSWITCH_ASSERT(NVSWITCH_IS_LINK_ENG_VALID(_d, _physlinknum, _eng)); \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: LINK_WR link[%d] %s: %s,%s (+%04x) 0x%08x\n", \ + __FUNCTION__, \ + _physlinknum, \ + #_eng, #_dev, #_reg, NV ## _dev ## _reg, _data); \ + ((_d)->hal.nvswitch_eng_wr( \ + _d, \ + NVSWITCH_ENGINE_ID_##_eng, \ + NVSWITCH_GET_ENG_DESC_TYPE_UNICAST, \ + NVSWITCH_GET_LINK_ENG_INST(_d, _physlinknum, _eng), \ + NV ## _dev ## _reg, _data)); \ + ((void)(_d)) + +#define NVSWITCH_LINK_RD32(_d, _physlinknum, _eng, _dev, _reg) \ + ( \ + NVSWITCH_ASSERT(NVSWITCH_IS_LINK_ENG_VALID(_d, _physlinknum, _eng)) \ + , \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: LINK_RD link[%d] %s: %s,%s (+%04x)\n", \ + __FUNCTION__, \ + _physlinknum, \ + #_eng, #_dev, #_reg, NV ## _dev ## _reg) \ + , \ + ((_d)->hal.nvswitch_eng_rd( \ + _d, \ + NVSWITCH_ENGINE_ID_##_eng, \ + NVSWITCH_GET_ENG_DESC_TYPE_UNICAST, \ + NVSWITCH_GET_LINK_ENG_INST(_d, _physlinknum, _eng), \ + NV ## _dev ## _reg)) \ + ); \ + ((void)(_d)) + +#define NVSWITCH_LINK_WR32_IDX(_d, _physlinknum, _eng, _dev, _reg, _idx, _data) \ + NVSWITCH_LINK_WR32(_d, _physlinknum, _eng, _dev, _reg(_idx), _data); \ + ((void)(_d)) + +#define NVSWITCH_LINK_RD32_IDX(_d, _physlinknum, _eng, _dev, _reg, _idx) \ + NVSWITCH_LINK_RD32(_d, _physlinknum, _eng, _dev, _reg(_idx)); \ + ((void)(_d)) + +#endif //_IO_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/ipmi/fru_nvswitch.h b/src/common/nvswitch/kernel/inc/ipmi/fru_nvswitch.h new file mode 100644 index 000000000..b76bbe748 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/ipmi/fru_nvswitch.h @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _FRU_NVSWITCH_H_ +#define _FRU_NVSWITCH_H_ + +#include "common_nvswitch.h" + +// +// FRU EEPROM board data +// Defined according to +// ipmi-platform-mgt-fru-infostorage-def-v1-0-rev-1-3-spec +// +#define NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_TYPE 7:6 +#define NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_LENGTH 5:0 +#define NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_TYPE_ASCII_6BIT (0x2) +#define NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_TYPE_ASCII_8BIT (0x3) +#define NVSWITCH_IPMI_FRU_SENTINEL (0xC1) + +// this includes null term +#define NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN 64 + +// mfgDateTime is in minutes from 0:00 hrs 1/1/1996 +typedef struct +{ + NvU32 mfgDateTime; + char mfg[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN]; + char productName[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN]; + char serialNum[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN]; + char partNum[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN]; + char fileId[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN]; + char customMfgInfo[NVSWITCH_IPMI_FRU_BOARD_INFO_FIELD_MAX_LEN]; +} NVSWITCH_IPMI_FRU_BOARD_INFO; + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_IPMI_FRU_EEPROM_COMMON_HEADER, 1) +{ + NvU8 version; + NvU8 internalUseOffset; + NvU8 chassisInfoOffset; + NvU8 boardInfoOffset; + NvU8 productInfoOffset; + NvU8 multirecordOffset; + NvU8 padding; + NvU8 checksum; +} NVSWITCH_IPMI_FRU_EEPROM_COMMON_HEADER; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +// +// Board Info area will be (size * 8) bytes. The last byte is a checksum byte +// +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_IPMI_FRU_EEPROM_BOARD_INFO, 1) +{ + NvU8 version; + NvU8 size; + NvU8 languageCode; + NVSWITCH_IPMI_FRU_BOARD_INFO boardInfo; // True size in rom could be smaller, layout will be different +} NVSWITCH_IPMI_FRU_EEPROM_BOARD_INFO; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +NvlStatus nvswitch_read_partition_fru_board_info(nvswitch_device *device, + NVSWITCH_IPMI_FRU_BOARD_INFO *pBoardInfo, + NvU8 *pRomImage); + +#endif //_FRU_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/lr10/clock_lr10.h b/src/common/nvswitch/kernel/inc/lr10/clock_lr10.h new file mode 100644 index 000000000..4a38ff3cc --- /dev/null +++ b/src/common/nvswitch/kernel/inc/lr10/clock_lr10.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _CLOCK_LR10_H_ +#define _CLOCK_LR10_H_ + +NvlStatus +nvswitch_init_pll_config_lr10 +( + nvswitch_device *device +); + +NvlStatus +nvswitch_init_pll_lr10 +( + nvswitch_device *device +); + +void +nvswitch_init_hw_counter_lr10 +( + nvswitch_device *device +); + +void +nvswitch_hw_counter_shutdown_lr10 +( + nvswitch_device *device +); + +NvU64 +nvswitch_hw_counter_read_counter_lr10 +( + nvswitch_device *device +); + +void +nvswitch_init_clock_gating_lr10 +( + nvswitch_device *device +); + +#endif //_CLOCK_LR10_H_ diff --git a/src/common/nvswitch/kernel/inc/lr10/inforom_lr10.h b/src/common/nvswitch/kernel/inc/lr10/inforom_lr10.h new file mode 100644 index 000000000..1196feb2e --- /dev/null +++ b/src/common/nvswitch/kernel/inc/lr10/inforom_lr10.h @@ -0,0 +1,163 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INFOROM_LR10_H_ +#define _INFOROM_LR10_H_ + +NvlStatus nvswitch_inforom_nvl_log_error_event_lr10 +( + nvswitch_device *device, + void *pNvlGeneric, + void *pNvlErrorEvent, + NvBool *bDirty +); + +NvlStatus nvswitch_inforom_nvl_update_link_correctable_error_info_lr10 +( + nvswitch_device *device, + void *pNvlGeneric, + void *pData, + NvU8 linkId, + NvU8 nvliptInstance, + NvU8 localLinkIdx, + void *pNvlErrorCounts, + NvBool *bDirty +); + +NvlStatus +nvswitch_inforom_nvl_get_max_correctable_error_rate_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *params +); + +NvlStatus +nvswitch_inforom_nvl_get_errors_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *params +); + +NvlStatus +nvswitch_inforom_ecc_log_error_event_lr10 +( + nvswitch_device *device, + INFOROM_ECC_OBJECT *pEccGeneric, + INFOROM_NVS_ECC_ERROR_EVENT *err_event +); + +void +nvswitch_inforom_ecc_get_total_errors_lr10 +( + nvswitch_device *device, + INFOROM_ECC_OBJECT *pEccGeneric, + NvU64 *pCorrectedTotal, + NvU64 *pUncorrectedTotal +); + +NvlStatus +nvswitch_inforom_ecc_get_errors_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *params +); + +void +nvswitch_initialize_oms_state_lr10 +( + nvswitch_device *device, + INFOROM_OMS_STATE *pOmsState +); + +NvBool +nvswitch_oms_get_device_disable_lr10 +( + INFOROM_OMS_STATE *pOmsState +); + +void +nvswitch_oms_set_device_disable_lr10 +( + INFOROM_OMS_STATE *pOmsState, + NvBool bForceDeviceDisable +); + +NvlStatus +nvswitch_oms_inforom_flush_lr10 +( + struct nvswitch_device *device +); + +NvlStatus +nvswitch_bbx_setup_prologue_lr10 +( + nvswitch_device *device, + void *pInforomBbxState +); + +NvlStatus +nvswitch_bbx_setup_epilogue_lr10 +( + nvswitch_device *device, + void *pInforomBbxState +); + +NvlStatus +nvswitch_bbx_add_data_time_lr10 +( + nvswitch_device *device, + void *pInforomBbxState, + void *pInforomBbxData +); + +NvlStatus +nvswitch_bbx_add_sxid_lr10 +( + nvswitch_device *device, + void *pInforomBbxState, + void *pInforomBbxData +); + +NvlStatus +nvswitch_bbx_add_temperature_lr10 +( + nvswitch_device *device, + void *pInforomBbxState, + void *pInforomBbxData +); + +void +nvswitch_bbx_set_initial_temperature_lr10 +( + nvswitch_device *device, + void *pInforomBbxState, + void *pInforomBbxData +); + +NvlStatus +nvswitch_inforom_bbx_get_sxid_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_SXIDS_PARAMS *params +); +#endif //_INFOROM_LR10_H_ diff --git a/src/common/nvswitch/kernel/inc/lr10/lr10.h b/src/common/nvswitch/kernel/inc/lr10/lr10.h new file mode 100644 index 000000000..e0421f308 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/lr10/lr10.h @@ -0,0 +1,653 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _LR10_H_ +#define _LR10_H_ + +#include "nvlink.h" +#include "nvCpuUuid.h" + +#include "export_nvswitch.h" +#include "common_nvswitch.h" +#include "pmgr_nvswitch.h" +#include "rom_nvswitch.h" +#include "error_nvswitch.h" + +#include "ctrl_dev_nvswitch.h" + +#include "nvswitch/lr10/dev_nvs_master.h" + +// +// Re-direction to use new common link access wrappers +// + +#define NVSWITCH_IS_LINK_ENG_VALID_LR10(_d, _eng, _linknum) \ + NVSWITCH_IS_LINK_ENG_VALID(_d, _linknum, _eng) + +#define NVSWITCH_LINK_OFFSET_LR10(_d, _physlinknum, _eng, _dev, _reg) \ + NVSWITCH_LINK_OFFSET(_d, _physlinknum, _eng, _dev, _reg) + +#define NVSWITCH_LINK_WR32_LR10(_d, _physlinknum, _eng, _dev, _reg, _data) \ + NVSWITCH_LINK_WR32(_d, _physlinknum, _eng, _dev, _reg, _data) + +#define NVSWITCH_LINK_RD32_LR10(_d, _physlinknum, _eng, _dev, _reg) \ + NVSWITCH_LINK_RD32(_d, _physlinknum, _eng, _dev, _reg) + +#define NVSWITCH_LINK_WR32_IDX_LR10(_d, _physlinknum, _eng, _dev, _reg, _idx, _data) \ + NVSWITCH_LINK_WR32_IDX(_d, _physlinknum, _eng, _dev, _reg, _idx, _data) + +#define NVSWITCH_LINK_RD32_IDX_LR10(_d, _physlinknum, _eng, _dev, _reg, _idx) \ + NVSWITCH_LINK_RD32_IDX(_d, _physlinknum, _eng, _dev, _reg, _idx) + +// +// NVSWITCH_ENG_* MMIO wrappers are to be used for top level discovered +// devices like SAW, FUSE, PMGR, XVE, etc. +// + +#define NVSWITCH_ENG_WR32_LR10(_d, _eng, _bcast, _engidx, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32(_d, _eng, _bcast, _engidx, _dev, _reg, _data) + +#define NVSWITCH_ENG_RD32_LR10(_d, _eng, _engidx, _dev, _reg) \ + NVSWITCH_ENG_RD32(_d, _eng, , _engidx, _dev, _reg) + +#define NVSWITCH_ENG_WR32_IDX_LR10(_d, _eng, _bcast, _engidx, _dev, _reg, _idx, _data) \ + NVSWITCH_ENG_WR32_IDX(_d, _eng, _bcast, _engidx, _dev, _reg, _idx, _data) + +#define NVSWITCH_BCAST_WR32_LR10(_d, _eng, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32_LR10(_d, _eng, _BCAST, 0, _dev, _reg, _data) + +#define NVSWITCH_BCAST_RD32_LR10(_d, _eng, _dev, _reg) \ + NVSWITCH_ENG_RD32(_d, _eng, _BCAST, 0, bc, _dev, _reg) + +#define NVSWITCH_CLK_NVLINK_RD32_LR10(_d, _reg, _idx) \ + NVSWITCH_REG_RD32(_d, _PCLOCK, _NVSW_NVLINK##_reg(_idx)) + +#define NVSWITCH_CLK_NVLINK_WR32_LR10(_d, _reg, _idx, _data) \ + if (IS_RTLSIM(_d) || IS_FMODEL(_d)) \ + { \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: Skip write NV_PCLOCK_NVSW_NVLINK%d %s (0x%06x) on FSF\n", \ + __FUNCTION__, \ + _idx, #_reg, \ + NV_PCLOCK_NVSW_NVLINK##_reg(_idx)); \ + } \ + else \ + { \ + NVSWITCH_REG_WR32(_d, _PCLOCK, _NVSW_NVLINK##_reg(_idx), _data); \ + } + +#define NVSWITCH_ENG_VALID_LR10(_d, _eng, _engidx) \ + ( \ + ((_engidx < NUM_##_eng##_ENGINE_LR10) && \ + (NVSWITCH_GET_CHIP_DEVICE_LR10(_d)->eng##_eng[_engidx].valid)) ? \ + NV_TRUE : NV_FALSE \ + ) + +#define NVSWITCH_SAW_RD32_LR10(_d, _dev, _reg) \ + NVSWITCH_ENG_RD32_LR10(_d, SAW, 0, _dev, _reg) + +#define NVSWITCH_SAW_WR32_LR10(_d, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32_LR10(_d, SAW, , 0, _dev, _reg, _data) + +#define NVSWITCH_NPG_RD32_LR10(_d, _engidx, _dev, _reg) \ + NVSWITCH_ENG_RD32_LR10(_d, NPG, _engidx, _dev, _reg) + +#define NVSWITCH_NPG_WR32_LR10(_d, _engidx, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32_LR10(_d, NPG, , _engidx, _dev, _reg, _data) + +#define NVSWITCH_NPGPERF_WR32_LR10(_d, _engidx, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32_LR10(_d, NPG_PERFMON, , _engidx, _dev, _reg, _data) + +#define NVSWITCH_NPORT_RD32_LR10(_d, _engidx, _dev, _reg) \ + NVSWITCH_ENG_RD32_LR10(_d, NPORT, _engidx, _dev, _reg) + +#define NVSWITCH_NPORT_WR32_LR10(_d, _engidx, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32_LR10(_d, NPORT, , _engidx, _dev, _reg, _data) + +#define NVSWITCH_NPORT_MC_BCAST_WR32_LR10(_d, _dev, _reg, _data) \ + NVSWITCH_BCAST_WR32_LR10(_d, NPORT, _dev, _reg, _data) + +#define NVSWITCH_NVLIPT_RD32_LR10(_d, _engidx, _dev, _reg) \ + NVSWITCH_ENG_RD32_LR10(_d, NVLIPT, _engidx, _dev, _reg) + +#define NVSWITCH_NVLIPT_WR32_LR10(_d, _engidx, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32_LR10(_d, NVLIPT, , _engidx, _dev, _reg, _data) + +typedef struct +{ + NvBool valid; + NvU32 initialized; + NvU32 version; + NvU32 disc_type; + union + { + struct + { + NvU32 cluster; + NvU32 cluster_id; + NvU32 discovery; // Used for top level only + } top; + struct + { + NvU32 uc_addr; + } uc; + struct + { + NvU32 bc_addr; + NvU32 mc_addr[3]; + } bc; + } info; +} ENGINE_DESCRIPTOR_TYPE_LR10; + +#define NUM_PTOP_ENGINE_LR10 1 +#define NUM_CLKS_ENGINE_LR10 1 +#define NUM_FUSE_ENGINE_LR10 1 +#define NUM_JTAG_ENGINE_LR10 1 +#define NUM_PMGR_ENGINE_LR10 1 +#define NUM_SAW_ENGINE_LR10 1 +#define NUM_XP3G_ENGINE_LR10 1 +#define NUM_XVE_ENGINE_LR10 1 +#define NUM_ROM_ENGINE_LR10 1 +#define NUM_EXTDEV_ENGINE_LR10 1 +#define NUM_PRIVMAIN_ENGINE_LR10 1 +#define NUM_PRIVLOC_ENGINE_LR10 10 +#define NUM_PTIMER_ENGINE_LR10 1 +#define NUM_SOE_ENGINE_LR10 1 +#define NUM_SMR_ENGINE_LR10 2 +#define NUM_I2C_ENGINE_LR10 1 +#define NUM_SE_ENGINE_LR10 1 +#define NUM_THERM_ENGINE_LR10 1 + +#define NUM_NPG_ENGINE_LR10 9 +#define NUM_NPG_BCAST_ENGINE_LR10 1 +#define NUM_NPG_PERFMON_ENGINE_LR10 9 +#define NUM_NPG_PERFMON_BCAST_ENGINE_LR10 1 +#define NUM_NPORT_ENGINE_LR10 36 +#define NUM_NPORT_BCAST_ENGINE_LR10 4 +#define NUM_NPORT_MULTICAST_ENGINE_LR10 9 +#define NUM_NPORT_MULTICAST_BCAST_ENGINE_LR10 1 +#define NUM_NPORT_PERFMON_ENGINE_LR10 36 +#define NUM_NPORT_PERFMON_BCAST_ENGINE_LR10 4 +#define NUM_NPORT_PERFMON_MULTICAST_ENGINE_LR10 9 +#define NUM_NPORT_PERFMON_MULTICAST_BCAST_ENGINE_LR10 1 + +#define NUM_NXBAR_ENGINE_LR10 4 +#define NUM_NXBAR_BCAST_ENGINE_LR10 1 +#define NUM_NXBAR_PERFMON_ENGINE_LR10 4 +#define NUM_NXBAR_PERFMON_BCAST_ENGINE_LR10 1 +#define NUM_TILE_ENGINE_LR10 16 +#define NUM_TILE_BCAST_ENGINE_LR10 4 +#define NUM_TILE_MULTICAST_ENGINE_LR10 4 +#define NUM_TILE_MULTICAST_BCAST_ENGINE_LR10 1 +#define NUM_TILE_PERFMON_ENGINE_LR10 16 +#define NUM_TILE_PERFMON_BCAST_ENGINE_LR10 4 +#define NUM_TILE_PERFMON_MULTICAST_ENGINE_LR10 4 +#define NUM_TILE_PERFMON_MULTICAST_BCAST_ENGINE_LR10 1 + +// +// Tile Column consists of 4 Tile blocks and 9 Tileout blocks. +// There are 4 Tile Columns, one per each NXBAR. + +#define NUM_NXBAR_TCS_LR10 NUM_NXBAR_ENGINE_LR10 +#define NUM_NXBAR_TILEOUTS_PER_TC_LR10 9 +#define NUM_NXBAR_TILES_PER_TC_LR10 4 + +#define TILE_TO_LINK(_device, _tc, _tile) \ + ( \ + NVSWITCH_ASSERT((_tc < NUM_NXBAR_TCS_LR10)) \ + , \ + NVSWITCH_ASSERT((_tile < NUM_NXBAR_TILES_PER_TC_LR10)) \ + , \ + ((_tc) * NUM_NXBAR_TILES_PER_TC_LR10 + (_tile)) \ + ) + +#define NV_NXBAR_TC_TILEOUT_ERR_FATAL_INTR_EN(i) (NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN + \ + i * (NV_NXBAR_TC_TILEOUT1_ERR_FATAL_INTR_EN - NV_NXBAR_TC_TILEOUT0_ERR_FATAL_INTR_EN)) + +#define NV_NXBAR_TC_TILEOUT_ERR_STATUS(i) (NV_NXBAR_TC_TILEOUT0_ERR_STATUS + \ + i * (NV_NXBAR_TC_TILEOUT1_ERR_STATUS - NV_NXBAR_TC_TILEOUT0_ERR_STATUS)) + +#define NV_NXBAR_TC_TILEOUT_ERR_FIRST(i) (NV_NXBAR_TC_TILEOUT0_ERR_FIRST + \ + i * (NV_NXBAR_TC_TILEOUT1_ERR_FIRST - NV_NXBAR_TC_TILEOUT0_ERR_FIRST)) + +#define NV_NXBAR_TC_TILEOUT_ERR_CYA(i) (NV_NXBAR_TC_TILEOUT0_ERR_CYA + \ + i * (NV_NXBAR_TC_TILEOUT1_ERR_CYA - NV_NXBAR_TC_TILEOUT0_ERR_CYA)) + +#define NVSWITCH_NXBAR_RD32_LR10(_d, _engidx, _dev, _reg) \ + NVSWITCH_ENG_RD32_LR10(_d, NXBAR, _engidx, _dev, _reg) + +#define NVSWITCH_NXBAR_WR32_LR10(_d, _engidx, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32_LR10(_d, NXBAR, , _engidx, _dev, _reg, _data) + +#define NVSWITCH_TILE_RD32_LR10(_d, _engidx, _dev, _reg) \ + NVSWITCH_ENG_RD32_LR10(_d, TILE, _engidx, _dev, _reg) + +#define NVSWITCH_TILE_WR32_LR10(_d, _engidx, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32_LR10(_d, TILE, , _engidx, _dev, _reg, _data) + + +#define NV_PPRIV_PRT_PRT_PRIV_ERROR_ADR(i) (NV_PPRIV_PRT_PRT0_PRIV_ERROR_ADR + \ + i * (NV_PPRIV_PRT_PRT1_PRIV_ERROR_ADR - NV_PPRIV_PRT_PRT0_PRIV_ERROR_ADR)) + +#define NV_PPRIV_PRT_PRT_PRIV_ERROR_WRDAT(i) (NV_PPRIV_PRT_PRT0_PRIV_ERROR_WRDAT + \ + i * (NV_PPRIV_PRT_PRT1_PRIV_ERROR_WRDAT - NV_PPRIV_PRT_PRT0_PRIV_ERROR_WRDAT)) + +#define NV_PPRIV_PRT_PRT_PRIV_ERROR_INFO(i) (NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO + \ + i * (NV_PPRIV_PRT_PRT1_PRIV_ERROR_INFO - NV_PPRIV_PRT_PRT0_PRIV_ERROR_INFO)) + +#define NV_PPRIV_PRT_PRT_PRIV_ERROR_CODE(i) (NV_PPRIV_PRT_PRT0_PRIV_ERROR_CODE + \ + i * (NV_PPRIV_PRT_PRT1_PRIV_ERROR_CODE - NV_PPRIV_PRT_PRT0_PRIV_ERROR_CODE)) + +#define NUM_NVLW_ENGINE_LR10 9 +#define NUM_NVLW_BCAST_ENGINE_LR10 1 +#define NUM_NVLW_PERFMON_ENGINE_LR10 9 +#define NUM_NVLW_PERFMON_BCAST_ENGINE_LR10 1 +#define NUM_MINION_ENGINE_LR10 9 +#define NUM_MINION_BCAST_ENGINE_LR10 1 +#define NUM_NVLIPT_ENGINE_LR10 9 +#define NUM_NVLIPT_BCAST_ENGINE_LR10 1 +#define NUM_NVLIPT_SYS_PERFMON_ENGINE_LR10 9 +#define NUM_NVLIPT_SYS_PERFMON_BCAST_ENGINE_LR10 1 +#define NUM_NVLTLC_ENGINE_LR10 36 +#define NUM_NVLTLC_BCAST_ENGINE_LR10 4 +#define NUM_NVLTLC_MULTICAST_ENGINE_LR10 9 +#define NUM_NVLTLC_MULTICAST_BCAST_ENGINE_LR10 1 +#define NUM_TX_PERFMON_ENGINE_LR10 36 +#define NUM_TX_PERFMON_BCAST_ENGINE_LR10 4 +#define NUM_TX_PERFMON_MULTICAST_ENGINE_LR10 9 +#define NUM_TX_PERFMON_MULTICAST_BCAST_ENGINE_LR10 1 +#define NUM_RX_PERFMON_ENGINE_LR10 36 +#define NUM_RX_PERFMON_BCAST_ENGINE_LR10 4 +#define NUM_RX_PERFMON_MULTICAST_ENGINE_LR10 9 +#define NUM_RX_PERFMON_MULTICAST_BCAST_ENGINE_LR10 1 +#define NUM_PLL_ENGINE_LR10 9 +#define NUM_PLL_BCAST_ENGINE_LR10 1 +#define NUM_NVLDL_ENGINE_LR10 36 +#define NUM_NVLDL_BCAST_ENGINE_LR10 4 +#define NUM_NVLDL_MULTICAST_ENGINE_LR10 9 +#define NUM_NVLDL_MULTICAST_BCAST_ENGINE_LR10 1 +#define NUM_NVLIPT_LNK_ENGINE_LR10 36 +#define NUM_NVLIPT_LNK_BCAST_ENGINE_LR10 4 +#define NUM_NVLIPT_LNK_MULTICAST_ENGINE_LR10 9 +#define NUM_NVLIPT_LNK_MULTICAST_BCAST_ENGINE_LR10 1 +#define NUM_SYS_PERFMON_ENGINE_LR10 36 +#define NUM_SYS_PERFMON_BCAST_ENGINE_LR10 4 +#define NUM_SYS_PERFMON_MULTICAST_ENGINE_LR10 9 +#define NUM_SYS_PERFMON_MULTICAST_BCAST_ENGINE_LR10 1 +#define NVSWITCH_NUM_PRIV_PRT_LR10 9 + + +#define NVSWITCH_NPORT_PER_NPG (NUM_NPORT_ENGINE_LR10/NUM_NPG_ENGINE_LR10) +#define NPORT_TO_LINK(_device, _npg, _nport) \ + ( \ + NVSWITCH_ASSERT((_npg < NUM_NPG_ENGINE_LR10)) \ + , \ + NVSWITCH_ASSERT((_nport < NVSWITCH_NPORT_PER_NPG))\ + , \ + ((_npg) * NVSWITCH_NPORT_PER_NPG + (_nport)) \ + ) +#define NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64(_nvlipt_idx) \ + (NVBIT64(NVSWITCH_LINKS_PER_NVLIPT) - 1) << (_nvlipt_idx * NVSWITCH_LINKS_PER_NVLIPT); + +#define NVSWITCH_NUM_LINKS_LR10 (NUM_NPORT_ENGINE_LR10) +#define NVSWITCH_NUM_LANES_LR10 4 + +#define NVSWITCH_LINKS_PER_NVLW (NVSWITCH_NUM_LINKS_LR10/NUM_NVLW_ENGINE_LR10) +#define NVSWITCH_LINKS_PER_MINION (NVSWITCH_NUM_LINKS_LR10/NUM_MINION_ENGINE_LR10) +#define NVSWITCH_LINKS_PER_NVLIPT (NVSWITCH_NUM_LINKS_LR10/NUM_NVLIPT_ENGINE_LR10) +#define NVSWITCH_LINKS_PER_NPG (NVSWITCH_NUM_LINKS_LR10/NUM_NPG_ENGINE_LR10) + +#define NVSWITCH_DECLARE_ENGINE_UC_LR10(_engine) \ + ENGINE_DESCRIPTOR_TYPE_LR10 eng##_engine[NUM_##_engine##_ENGINE_LR10]; + +#define NVSWITCH_DECLARE_ENGINE_LR10(_engine) \ + ENGINE_DESCRIPTOR_TYPE_LR10 eng##_engine[NUM_##_engine##_ENGINE_LR10]; \ + ENGINE_DESCRIPTOR_TYPE_LR10 eng##_engine##_BCAST[NUM_##_engine##_BCAST_ENGINE_LR10]; + +#define NVSWITCH_NVLIPT_GET_PUBLIC_ID_LR10(_physlinknum) \ + ((_physlinknum)/NVSWITCH_LINKS_PER_NVLIPT) + +#define NVSWITCH_NVLIPT_GET_LOCAL_LINK_ID_LR10(_physlinknum) \ + ((_physlinknum)%NVSWITCH_LINKS_PER_NVLIPT) + +#define DISCOVERY_TYPE_UNDEFINED 0 +#define DISCOVERY_TYPE_DISCOVERY 1 +#define DISCOVERY_TYPE_UNICAST 2 +#define DISCOVERY_TYPE_BROADCAST 3 + +// +// These field #defines describe which physical fabric address bits are +// relevant to the specific remap table address check/remap operation. +// +#define NV_INGRESS_REMAP_ADDR_PHYS_LR10 46:36 + +#define NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10 35:20 +#define NV_INGRESS_REMAP_ADR_BASE_PHYS_LR10 35:20 +#define NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LR10 35:20 + +typedef NVSWITCH_LINK_TYPE NVSWITCH_LINK_TYPE_LR10; + +// +// NPORT Portstat information +// + +// +// LR10 supports CREQ0(0), DNGRD(1), ATR(2), ATSD(3), PROBE(4), RSP0(5), CREQ1(6), and RSP1(7) VCs. +// But DNGRD(1), ATR(2), ATSD(3), and PROBE(4) will be never used as PowerPC ATS support is not a POR for LR10 HW. +// +#define NVSWITCH_NUM_VCS_LR10 8 + +typedef struct +{ + NvU32 count; + NvU32 low; + NvU32 medium; + NvU32 high; + NvU32 panic; +} +NVSWITCH_LATENCY_BINS_LR10; + +typedef struct +{ + NvU32 count; + NvU64 start_time_nsec; + NvU64 last_read_time_nsec; + NVSWITCH_LATENCY_BINS_LR10 accum_latency[NVSWITCH_NUM_LINKS_LR10]; +} +NVSWITCH_LATENCY_VC_LR10; + +typedef struct +{ + NvU32 sample_interval_msec; + NvU64 last_visited_time_nsec; + NVSWITCH_LATENCY_VC_LR10 latency[NVSWITCH_NUM_VCS_LR10]; +} NVSWITCH_LATENCY_STATS_LR10; + +#define NV_NPORT_PORTSTAT_LR10(_block, _reg, _vc, _index) (NV_NPORT_PORTSTAT ## _block ## _reg ## _0 ## _index + \ + _vc * (NV_NPORT_PORTSTAT ## _block ## _reg ## _1 ## _index - NV_NPORT_PORTSTAT ## _block ## _reg ## _0 ## _index)) + +#define NVSWITCH_NPORT_PORTSTAT_RD32_LR10(_d, _engidx, _block, _reg, _vc) \ + ( \ + NVSWITCH_ASSERT(NVSWITCH_IS_LINK_ENG_VALID_LR10(_d, NPORT, _engidx)) \ + , \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: MEM_RD NPORT_PORTSTAT[%d]: %s,%s (%06x+%04x)\n", \ + __FUNCTION__, \ + _engidx, \ + #_block, #_reg, \ + NVSWITCH_GET_ENG(_d, NPORT, , _engidx), \ + NV_NPORT_PORTSTAT_LR10(_block, _reg, _vc, _0)) \ + , \ + nvswitch_reg_read_32(_d, \ + NVSWITCH_GET_ENG(_d, NPORT, , _engidx) + \ + NV_NPORT_PORTSTAT_LR10(_block, _reg, _vc, _0)) \ + ); \ + ((void)(_d)) + +#define NVSWITCH_PORTSTAT_BCAST_WR32_LR10(_d, _block, _reg, _idx, _data) \ + { \ + NVSWITCH_PRINT(_d, MMIO, \ + "%s: BCAST_WR NPORT_PORTSTAT: %s,%s (%06x+%04x) 0x%08x\n", \ + __FUNCTION__, \ + #_block, #_reg, \ + NVSWITCH_GET_ENG(_d, NPORT, _BCAST, 0), \ + NV_NPORT_PORTSTAT_LR10(_block, _reg, _idx, ), _data); \ + NVSWITCH_OFF_WR32(_d, \ + NVSWITCH_GET_ENG(_d, NPORT, _BCAST, 0) + \ + NV_NPORT_PORTSTAT_LR10(_block, _reg, _idx, ), _data); \ + } + +// +// Per-chip device information +// + +// +// The chip-specific engine list is used to generate the code to collect +// discovered unit information and coalesce it into the data structures used by +// the common IO library (see io_nvswitch.h). +// +// The PTOP discovery table presents the information on wrappers and sub-units +// in a hierarchical manner. The top level discovery contains information +// about top level UNICAST units and IP wrappers like NPG, NVLW, and NXBAR. +// Individual units within an IP wrapper are described in discovery sub-tables. +// Each IP wrapper may have MULTICAST descriptors to allow addressing sub-units +// within a wrapper and a cluster of IP wrappers will also have a BCAST +// discovery tables, which have MULTICAST descriptors within them. +// In order to collect all the useful unit information into a single container, +// we need to pick where to find each piece within the parsed discovery table. +// Top level IP wrappers like NPG have a BCAST range to broadcast reads/writes, +// but IP sub-units like NPORT have a MULTICAST range within the BCAST IP +// wrapper to broadcast to all the sub-units in all the IP wrappers. +// So in the lists below top level IP wrappers (NPG, NVLW, and NXBAR) point +// to the _BCAST IP wrapper, but sub-unit point to the _MULTICAST range inside +// the BCAST unit (_MULTICAST_BCAST). +// +// All IP-based (0-based register manuals) engines need to be listed here to +// generate chip-specific handlers as well as in the global common list of all +// engines that have ever existed on *ANY* architecture(s) in order for them +// use common IO wrappers. +// + +#define NVSWITCH_LIST_LR10_ENGINES(_op) \ + _op(XVE, ) \ + _op(SAW, ) \ + _op(SOE, ) \ + _op(SMR, ) \ + _op(NPG, _BCAST) \ + _op(NPORT, _MULTICAST_BCAST) \ + \ + _op(NVLW, _BCAST) \ + _op(MINION, _BCAST) \ + _op(NVLIPT, _BCAST) \ + _op(NVLIPT_LNK, _MULTICAST_BCAST) \ + _op(NVLTLC, _MULTICAST_BCAST) \ + _op(NVLDL, _MULTICAST_BCAST) \ + \ + _op(NXBAR, _BCAST) \ + _op(TILE, _MULTICAST_BCAST) \ + \ + _op(NPG_PERFMON, _BCAST) \ + _op(NPORT_PERFMON, _MULTICAST_BCAST) \ + \ + _op(NVLW_PERFMON, _BCAST) \ + _op(RX_PERFMON, _MULTICAST_BCAST) \ + _op(TX_PERFMON, _MULTICAST_BCAST) \ + \ + _op(NXBAR_PERFMON, _BCAST) \ + _op(TILE_PERFMON, _MULTICAST_BCAST) \ + +typedef struct +{ + struct + { + NVSWITCH_ENGINE_DESCRIPTOR_TYPE common[NVSWITCH_ENGINE_ID_SIZE]; + } io; + + NVSWITCH_DECLARE_ENGINE_UC_LR10(PTOP) + NVSWITCH_DECLARE_ENGINE_UC_LR10(CLKS) + NVSWITCH_DECLARE_ENGINE_UC_LR10(FUSE) + NVSWITCH_DECLARE_ENGINE_UC_LR10(JTAG) + NVSWITCH_DECLARE_ENGINE_UC_LR10(PMGR) + NVSWITCH_DECLARE_ENGINE_UC_LR10(SAW) + NVSWITCH_DECLARE_ENGINE_UC_LR10(XP3G) + NVSWITCH_DECLARE_ENGINE_UC_LR10(XVE) + NVSWITCH_DECLARE_ENGINE_UC_LR10(ROM) + NVSWITCH_DECLARE_ENGINE_UC_LR10(EXTDEV) + NVSWITCH_DECLARE_ENGINE_UC_LR10(PRIVMAIN) + NVSWITCH_DECLARE_ENGINE_UC_LR10(PRIVLOC) + NVSWITCH_DECLARE_ENGINE_UC_LR10(PTIMER) + NVSWITCH_DECLARE_ENGINE_UC_LR10(SOE) + NVSWITCH_DECLARE_ENGINE_UC_LR10(SMR) + NVSWITCH_DECLARE_ENGINE_UC_LR10(I2C) + NVSWITCH_DECLARE_ENGINE_UC_LR10(SE) + NVSWITCH_DECLARE_ENGINE_UC_LR10(THERM) + + NVSWITCH_DECLARE_ENGINE_LR10(NVLW) + NVSWITCH_DECLARE_ENGINE_LR10(NXBAR) + NVSWITCH_DECLARE_ENGINE_LR10(NPG) + + NVSWITCH_DECLARE_ENGINE_LR10(MINION) + NVSWITCH_DECLARE_ENGINE_LR10(NVLIPT) + NVSWITCH_DECLARE_ENGINE_LR10(NVLTLC) + NVSWITCH_DECLARE_ENGINE_LR10(NVLTLC_MULTICAST) + NVSWITCH_DECLARE_ENGINE_LR10(NVLIPT_SYS_PERFMON) + NVSWITCH_DECLARE_ENGINE_LR10(TX_PERFMON) + NVSWITCH_DECLARE_ENGINE_LR10(RX_PERFMON) + NVSWITCH_DECLARE_ENGINE_LR10(TX_PERFMON_MULTICAST) + NVSWITCH_DECLARE_ENGINE_LR10(RX_PERFMON_MULTICAST) + NVSWITCH_DECLARE_ENGINE_LR10(PLL) + NVSWITCH_DECLARE_ENGINE_LR10(NVLW_PERFMON) + NVSWITCH_DECLARE_ENGINE_LR10(NVLDL) + NVSWITCH_DECLARE_ENGINE_LR10(NVLDL_MULTICAST) + NVSWITCH_DECLARE_ENGINE_LR10(NVLIPT_LNK) + NVSWITCH_DECLARE_ENGINE_LR10(NVLIPT_LNK_MULTICAST) + NVSWITCH_DECLARE_ENGINE_LR10(SYS_PERFMON) + NVSWITCH_DECLARE_ENGINE_LR10(SYS_PERFMON_MULTICAST) + + NVSWITCH_DECLARE_ENGINE_LR10(NPG_PERFMON) + NVSWITCH_DECLARE_ENGINE_LR10(NPORT) + NVSWITCH_DECLARE_ENGINE_LR10(NPORT_MULTICAST) + NVSWITCH_DECLARE_ENGINE_LR10(NPORT_PERFMON) + NVSWITCH_DECLARE_ENGINE_LR10(NPORT_PERFMON_MULTICAST) + + NVSWITCH_DECLARE_ENGINE_LR10(NXBAR_PERFMON) + NVSWITCH_DECLARE_ENGINE_LR10(TILE) + NVSWITCH_DECLARE_ENGINE_LR10(TILE_MULTICAST) + NVSWITCH_DECLARE_ENGINE_LR10(TILE_PERFMON) + NVSWITCH_DECLARE_ENGINE_LR10(TILE_PERFMON_MULTICAST) + + // VBIOS configuration Data + NVSWITCH_BIOS_NVLINK_CONFIG bios_config; + + // GPIO + const NVSWITCH_GPIO_INFO *gpio_pin; + NvU32 gpio_pin_size; + + // Interrupts + NvU32 intr_enable_legacy; + NvU32 intr_enable_corr; + NvU32 intr_enable_fatal; + NvU32 intr_enable_nonfatal; + NvU32 intr_minion_dest; + + // + // Book-keep interrupt masks to restore them after reset. + // Note: There is no need to book-keep interrupt masks for NVLink units like + // DL, MINION, TLC etc. because NVLink init routines would setup them. + // + struct + { + NVSWITCH_INTERRUPT_MASK route; + NVSWITCH_INTERRUPT_MASK ingress; + NVSWITCH_INTERRUPT_MASK egress; + NVSWITCH_INTERRUPT_MASK tstate; + NVSWITCH_INTERRUPT_MASK sourcetrack; + NVSWITCH_INTERRUPT_MASK tile; + NVSWITCH_INTERRUPT_MASK tileout; + } intr_mask; + + // Latency statistics + NVSWITCH_LATENCY_STATS_LR10 *latency_stats; + + // External TDIODE info + NVSWITCH_TDIODE_INFO_TYPE tdiode; + + // Ganged Link table + NvU64 *ganged_link_table; +} lr10_device; + +#define NVSWITCH_GET_CHIP_DEVICE_LR10(_device) \ + ( \ + ((_device)->chip_id == NV_PSMC_BOOT_42_CHIP_ID_LR10) ? \ + ((lr10_device *) _device->chip_device) : \ + NULL \ + ) + +// +// Internal function declarations +// +NvlStatus nvswitch_device_discovery_lr10(nvswitch_device *device, NvU32 discovery_offset); +void nvswitch_filter_discovery_lr10(nvswitch_device *device); +NvlStatus nvswitch_process_discovery_lr10(nvswitch_device *device); +nvswitch_device *nvswitch_get_device_by_pci_info_lr10(nvlink_pci_info *info); +NvlStatus nvswitch_ring_master_cmd_lr10(nvswitch_device *device, NvU32 cmd); +void nvswitch_initialize_interrupt_tree_lr10(nvswitch_device *device); +void nvswitch_lib_enable_interrupts_lr10(nvswitch_device *device); +void nvswitch_lib_disable_interrupts_lr10(nvswitch_device *device); +NvlStatus nvswitch_lib_service_interrupts_lr10(nvswitch_device *device); +NvlStatus nvswitch_lib_check_interrupts_lr10(nvswitch_device *device); +void nvswitch_set_ganged_link_table_lr10(nvswitch_device *device, NvU32 firstIndex, NvU64 *ganged_link_table, NvU32 numEntries); +NvlStatus nvswitch_pmgr_init_config_lr10(nvswitch_device *device); +NvlStatus nvswitch_minion_service_falcon_interrupts_lr10(nvswitch_device *device, NvU32 instance); +NvlStatus nvswitch_ctrl_i2c_indexed_lr10(nvswitch_device *device, + NVSWITCH_CTRL_I2C_INDEXED_PARAMS *pParams); +NvU32 nvswitch_i2c_get_port_info_lr10(nvswitch_device *device, NvU32 port); +void nvswitch_translate_error_lr10(NVSWITCH_ERROR_TYPE *error_entry, + NVSWITCH_NVLINK_ARCH_ERROR *arch_error, + NVSWITCH_NVLINK_HW_ERROR *hw_error); +NvlStatus nvswitch_corelib_add_link_lr10(nvlink_link *link); +NvlStatus nvswitch_corelib_remove_link_lr10(nvlink_link *link); +NvlStatus nvswitch_corelib_set_dl_link_mode_lr10(nvlink_link *link, NvU64 mode, NvU32 flags); +NvlStatus nvswitch_corelib_get_dl_link_mode_lr10(nvlink_link *link, NvU64 *mode); +NvlStatus nvswitch_corelib_set_tl_link_mode_lr10(nvlink_link *link, NvU64 mode, NvU32 flags); +NvlStatus nvswitch_corelib_get_tl_link_mode_lr10(nvlink_link *link, NvU64 *mode); +NvlStatus nvswitch_corelib_set_tx_mode_lr10(nvlink_link *link, NvU64 mode, NvU32 flags); +NvlStatus nvswitch_corelib_get_tx_mode_lr10(nvlink_link *link, NvU64 *mode, NvU32 *subMode); +NvlStatus nvswitch_corelib_set_rx_mode_lr10(nvlink_link *link, NvU64 mode, NvU32 flags); +NvlStatus nvswitch_corelib_get_rx_mode_lr10(nvlink_link *link, NvU64 *mode, NvU32 *subMode); +NvlStatus nvswitch_corelib_set_rx_detect_lr10(nvlink_link *link, NvU32 flags); +NvlStatus nvswitch_corelib_get_rx_detect_lr10(nvlink_link *link); +void nvswitch_corelib_training_complete_lr10(nvlink_link *link); +NvBool nvswitch_link_lane_reversed_lr10(nvswitch_device *device, NvU32 linkId); +NvBool nvswitch_is_link_in_reset_lr10(nvswitch_device *device, nvlink_link *link); +NvlStatus nvswitch_wait_for_tl_request_ready_lr10(nvlink_link *link); +NvlStatus nvswitch_request_tl_link_state_lr10(nvlink_link *link, NvU32 tlLinkState, NvBool bSync); +void nvswitch_execute_unilateral_link_shutdown_lr10(nvlink_link *link); +NvlStatus nvswitch_get_link_public_id_lr10(nvswitch_device *device, NvU32 linkId, NvU32 *publicId); +NvlStatus nvswitch_get_link_local_idx_lr10(nvswitch_device *device, NvU32 linkId, NvU32 *localLinkIdx); +NvlStatus nvswitch_set_training_error_info_lr10(nvswitch_device *device, + NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS *pLinkTrainingErrorInfoParams); +NvlStatus nvswitch_ctrl_get_fatal_error_scope_lr10(nvswitch_device *device, NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS *pParams); +void nvswitch_init_scratch_lr10(nvswitch_device *device); +void nvswitch_init_dlpl_interrupts_lr10(nvlink_link *link); +NvlStatus nvswitch_init_nport_lr10(nvswitch_device *device); +NvlStatus nvswitch_get_soe_ucode_binaries_lr10(nvswitch_device *device, const NvU32 **soe_ucode_data, const NvU32 **soe_ucode_header); +NvlStatus nvswitch_poll_sublink_state_lr10(nvswitch_device *device, nvlink_link *link); +void nvswitch_setup_link_loopback_mode_lr10(nvswitch_device *device, NvU32 linkNumber); +void nvswitch_reset_persistent_link_hw_state_lr10(nvswitch_device *device, NvU32 linkNumber); +void nvswitch_store_topology_information_lr10(nvswitch_device *device, nvlink_link *link); +void nvswitch_init_lpwr_regs_lr10(nvlink_link *link); +NvlStatus nvswitch_set_training_mode_lr10(nvswitch_device *device); +NvBool nvswitch_i2c_is_device_access_allowed_lr10(nvswitch_device *device, NvU32 port, NvU8 addr, NvBool bIsRead); +NvU32 nvswitch_get_sublink_width_lr10(nvswitch_device *device,NvU32 linkNumber); +NvlStatus nvswitch_parse_bios_image_lr10(nvswitch_device *device); +NvlStatus nvswitch_ctrl_get_throughput_counters_lr10(nvswitch_device *device, NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *p); +void nvswitch_corelib_get_uphy_load_lr10(nvlink_link *link, NvBool *bUnlocked); +void nvswitch_init_buffer_ready_lr10(nvswitch_device *device, nvlink_link *link, NvBool bNportBufferReady); +NvlStatus nvswitch_ctrl_get_nvlink_lp_counters_lr10(nvswitch_device *device, NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS *params); +NvlStatus nvswitch_service_nvldl_fatal_link_lr10(nvswitch_device *device, NvU32 nvliptInstance, NvU32 link); +NvlStatus nvswitch_service_minion_link_lr10(nvswitch_device *device, NvU32 nvliptInstance); +void nvswitch_apply_recal_settings_lr10(nvswitch_device *device, nvlink_link *link); +NvlStatus nvswitch_ctrl_get_sw_info_lr10(nvswitch_device *device, NVSWITCH_GET_SW_INFO_PARAMS *p); + +#endif //_LR10_H_ diff --git a/src/common/nvswitch/kernel/inc/lr10/minion_lr10.h b/src/common/nvswitch/kernel/inc/lr10/minion_lr10.h new file mode 100644 index 000000000..11fd79b41 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/lr10/minion_lr10.h @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _MINION_LR10_H_ +#define _MINION_LR10_H_ + +#include "lr10.h" + +// TODO modify these for LR10 +#define FALCON_IMEM_BLK_SIZE_BYTES_LR10 256 + +#define FALCON_CODE_HDR_OS_CODE_OFFSET_LR10 0 +#define FALCON_CODE_HDR_OS_CODE_SIZE_LR10 1 +#define FALCON_CODE_HDR_OS_DATA_OFFSET_LR10 2 +#define FALCON_CODE_HDR_OS_DATA_SIZE_LR10 3 +#define FALCON_CODE_HDR_NUM_APPS_LR10 4 +#define FALCON_CODE_HDR_APP_CODE_START_LR10 5 +#define FALCON_CODE_HDR_APP_DATA_START_LR10 ( FALCON_CODE_HDR_APP_CODE_START_LR10 + (FALCON_CODE_HDR_NUM_APPS_LR10 * 2)) +#define FALCON_CODE_HDR_CODE_OFFSET_LR10 0 +#define FALCON_CODE_HDR_CODE_SIZE_LR10 1 +#define FALCON_CODE_HDR_DATA_OFFSET_LR10 0 +#define FALCON_CODE_HDR_DATA_SIZE_LR10 1 + +#define NV_MINION_NVLINK_DL_STAT_ARGS_LANEID 15:12 +#define NV_MINION_NVLINK_DL_STAT_ARGS_ADDRS 11:0 + +typedef const struct +{ + NvU32 osCodeOffset; + NvU32 osCodeSize; + NvU32 osDataOffset; + NvU32 osDataSize; + NvU32 numApps; + NvU32 appCodeStart; + NvU32 appDataStart; + NvU32 codeOffset; + NvU32 codeSize; + NvU32 dataOffset; + NvU32 dataSize; +} FALCON_UCODE_HDR_INFO_LR10, *PFALCON_UCODE_HDR_INFO_LR10; + +#define NVSWITCH_MINION_LINK_RD32_LR10(_d, _physlinknum, _dev, _reg) \ + NVSWITCH_LINK_RD32_LR10(_d, _physlinknum, MINION, _dev, _reg) + +#define NVSWITCH_MINION_LINK_WR32_LR10(_d, _physlinknum, _dev, _reg, _data) \ + NVSWITCH_LINK_WR32_LR10(_d, _physlinknum, MINION, _dev, _reg, _data) + +#define NVSWITCH_MINION_WR32_LR10(_d, _instance, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32_LR10(_d, MINION, , _instance, _dev, _reg, _data) + +#define NVSWITCH_MINION_RD32_LR10(_d, _instance, _dev, _reg) \ + NVSWITCH_ENG_RD32_LR10(_d, MINION, _instance, _dev, _reg) + +#define NVSWITCH_MINION_WR32_BCAST_LR10(_d, _dev, _reg, _data) \ + NVSWITCH_BCAST_WR32_LR10(_d, MINION, _dev, _reg, _data) + +#define NVSWITCH_MINION_GET_LOCAL_LINK_ID(_physlinknum) \ + (_physlinknum%NVSWITCH_LINKS_PER_MINION) + +// +// Internal function declarations +// +NvlStatus nvswitch_init_minion_lr10(nvswitch_device *device); +NvlStatus nvswitch_minion_send_command_lr10(nvswitch_device *device, NvU32 linkNumber, NvU32 command, NvU32 scratch0); +NvlStatus nvswitch_minion_get_dl_status_lr10(nvswitch_device *device, NvU32 linkId, NvU32 statusIdx, NvU32 statusArgs, NvU32 *statusData); +NvlStatus nvswitch_minion_get_initoptimize_status_lr10(nvswitch_device *device, NvU32 linkId); +NvlStatus nvswitch_minion_get_initnegotiate_status_lr10(nvswitch_device *device, NvU32 linkId); +NvlStatus nvswitch_minion_get_rxdet_status_lr10(nvswitch_device *device, NvU32 linkId); +NvlStatus nvswitch_minion_set_rx_term_lr10(nvswitch_device *device, NvU32 linkId); +NvU32 nvswitch_minion_get_line_rate_Mbps_lr10(nvswitch_device *device, NvU32 linkId); +NvU32 nvswitch_minion_get_data_rate_KiBps_lr10(nvswitch_device *device, NvU32 linkId); +NvlStatus nvswitch_set_minion_initialized_lr10(nvswitch_device *device, NvU32 idx_minion, NvBool initialized); +NvBool nvswitch_is_minion_initialized_lr10(nvswitch_device *device, NvU32 idx_minion); +NvlStatus nvswitch_minion_clear_dl_error_counters_lr10(nvswitch_device *device, NvU32 linkId); +#endif //_MINION_LR10_H_ diff --git a/src/common/nvswitch/kernel/inc/lr10/minion_production_ucode_lr10_dbg.h b/src/common/nvswitch/kernel/inc/lr10/minion_production_ucode_lr10_dbg.h new file mode 100644 index 000000000..505838c17 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/lr10/minion_production_ucode_lr10_dbg.h @@ -0,0 +1,1593 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +/* + + DO NOT EDIT - THIS FILE WAS AUTOMATICALLY GENERATED + +*/ + +#ifndef _MINION_UCODE_LR10_DBG_H_ +#define _MINION_UCODE_LR10_DBG_H_ + +const NvU32 minion_ucode_data_lr10_dbg[] = { + 0x004000d0, 0x0004fe00, 0xc47ea4bd, 0x02f8006b, 0x4908a4b6, 0x9afa0830, 0x00b6b000, 0x090bfcf0, + 0x029fbb02, 0xfa08104f, 0xb4b300f9, 0x02f80600, 0x010a00f8, 0x107eb4bd, 0x00f80000, 0xb4bd020a, + 0x0000107e, 0x060a00f8, 0x107eb4bd, 0x00f80000, 0x000000df, 0x08bc4940, 0x89009ffa, 0xb6010060, + 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x0000d905, 0xa9fa4000, 0x40004f01, 0xfa08bc49, 0x00f8009f, + 0x000000df, 0x08bc4980, 0x89009ffa, 0xb6010060, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0xffffd905, + 0xb9fdbfff, 0x01abfa04, 0x0080008f, 0xfa08bc49, 0x00f8009f, 0x000000df, 0x08bc4904, 0x89009ffa, + 0xb6017304, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0xfa020905, 0x004f01a9, 0x08bc4904, 0xf8009ffa, + 0xffffd900, 0xa9fd00ff, 0x0000d904, 0xa9fd3000, 0x08504905, 0xf8009afa, 0xffffd900, 0xa9fd00ff, + 0x0000d904, 0xa9fd5000, 0x08584905, 0xf8009afa, 0xffffd900, 0xa9fd00ff, 0x0000d904, 0xa9fd6000, + 0x085c4905, 0xf8009afa, 0xffffd900, 0xa9fd00ff, 0x0000d904, 0xa9fd4000, 0x08644905, 0xf8009afa, + 0xffffd900, 0xa9fd00ff, 0x0000d904, 0xa9fd2000, 0x08544905, 0xf8009afa, 0xffffd900, 0xa9fd00ff, + 0x0000d904, 0xa9fd1000, 0x08604905, 0xf8009afa, 0xffffd900, 0xa9fd00ff, 0x0000d904, 0xa9fd7000, + 0x08684905, 0xf8009afa, 0x00328900, 0x0090fe00, 0x00003c89, 0x890091fe, 0xbd000028, 0xde9cbff4, + 0x80000000, 0x3e090049, 0xfa0001b4, 0xff90009e, 0x04999001, 0x08f4fca6, 0x4fe4bdf5, 0x00dd0980, + 0x3e800000, 0xfa0001d8, 0xf9b800fd, 0xfa000100, 0xee90009d, 0x04ff9001, 0x08f4eca6, 0xf994bded, + 0xf9120999, 0x8900f899, 0xb60100a0, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x00aace05, 0xf8ffa4f0, + 0x74a08900, 0x0fa4b601, 0xd9a0a9bc, 0x00800000, 0xce05a9fd, 0xa4f000aa, 0x4900f8ff, 0x99cf0884, + 0x00288f00, 0x7099c700, 0x00f8f9a0, 0x3049010f, 0x009ffa08, 0xc0ffeedf, 0xfa400900, 0x00f8009f, + 0xf403a6b0, 0x94bd090d, 0x00025d3e, 0x00002889, 0xa9a699bf, 0xf0089cf0, 0x96f00196, 0xf89a3201, + 0x0000d900, 0xb4f08000, 0x80aab8ff, 0xb9fd0002, 0x08c4b605, 0xf102a4b6, 0xfdffffc4, 0xacfa05cb, + 0x3d00f800, 0x02913e94, 0x92a92000, 0xaa9001bb, 0x00b4b301, 0xbd00f8f8, 0x02a93e94, 0xf8b93c00, + 0x3c01cc92, 0x999099af, 0x00c4b301, 0x4900f8f4, 0x99cf0608, 0x00008f00, 0x049ffd10, 0x8b2d1bf4, + 0x0c000030, 0x27108a10, 0x02977e00, 0x00408b00, 0x8a100c00, 0x7e002720, 0x8b000297, 0x0c000050, + 0x27308a10, 0x030f3e00, 0x00608b00, 0x8a100c00, 0x7e002710, 0x8b000297, 0x0c000070, 0x27208a10, + 0x02977e00, 0x27308a00, 0x00808b00, 0x7e100c00, 0x8a000297, 0x0b000050, 0x02837e10, 0x00808a00, + 0x7e100b00, 0x8a000283, 0x0b000040, 0x02837e10, 0x00708a00, 0x7e100b00, 0xf4000283, 0x10891f3c, + 0x93f00027, 0x0699fa06, 0x3cf403f8, 0x77008f00, 0x08f5b600, 0x00090089, 0xfd1094b6, 0x008f059f, + 0x9ffd0200, 0x009afe05, 0x00036c3e, 0x0077007e, 0x0003743e, 0x0003783e, 0x0000308a, 0x837e100b, + 0x608a0002, 0x100b0000, 0x0002837e, 0x844f00f8, 0x00ffcf08, 0x9abb0109, 0xff94f104, 0x049ffdff, + 0xf00bacf0, 0x00f801a6, 0x800000d9, 0x02aa9000, 0xfd0fa4b6, 0xaace05a9, 0xffa4f000, 0x148900f8, + 0xa4b60130, 0xa0a9bc0f, 0x800000d9, 0x05a9fd00, 0xc700aace, 0x00f864aa, 0x01202489, 0xbc0fa4b6, + 0x00d9a0a9, 0xfd008000, 0xaace05a9, 0x64aac700, 0x248900f8, 0xa4b60120, 0xa0a9bc0f, 0x800000d9, + 0x05a9fd00, 0xc700afce, 0x90b364f9, 0xa43d0804, 0xf9c400f8, 0x0b9cf00f, 0x00f89a32, 0x0003d87e, + 0xf000a6b0, 0x00f80bac, 0x0003d87e, 0xf006a6b0, 0x00f80bac, 0x0003be7e, 0xf006a6b0, 0x00f80bac, + 0x0003a87e, 0xf002a6b0, 0x00f80bac, 0xe04f94bd, 0x045b3e0f, 0x00fecf00, 0xa6019990, 0xf808f49a, + 0x288900f8, 0xd4bd0000, 0x888e9cbf, 0x00da0176, 0x0b008000, 0x048d3efb, 0xf5eaff00, 0xfd00f9ce, + 0xf9fa049b, 0x01dd9001, 0x8000eeb8, 0xf4dca600, 0x00f8ea08, 0x01315489, 0xbc0fa4b6, 0x00d9a0a9, + 0xff008000, 0x9cfa95a9, 0x0000d901, 0xaa920080, 0x95a9ff04, 0xd9019dfa, 0x00800000, 0xf010aa92, + 0xa9fd7fb4, 0x08b4b605, 0x000002d9, 0x05b9fd01, 0x0f01abfa, 0xbde4bd01, 0x3e050dc4, 0xce0004f3, + 0x010f00a9, 0x92189cc7, 0x9ec701dd, 0x060bf403, 0x3301cfc6, 0xb30800f0, 0xb3e800d4, 0xb30c00e0, + 0xbd0800c0, 0x0a00f8a4, 0x8900f829, 0xb6013140, 0xa9bc0fa4, 0x0000d9a0, 0xb4f00080, 0x05a9fd7f, + 0xd910b4b6, 0x01000001, 0xfa05b9fd, 0x010f01ab, 0xc4bde4bd, 0x4e3e050d, 0xa9ce0005, 0xc7010f00, + 0xdd92189c, 0x039ec701, 0xc6060bf4, 0xf03301cf, 0xd4b30800, 0xe0b3e800, 0xc0b30c00, 0xa4bd0800, + 0x2a0a00f8, 0x588900f8, 0xa4b60131, 0xa0a9bc0f, 0x800000d9, 0x05a9fd00, 0xf800aace, 0x315c8900, + 0x0fa4b601, 0xd9a0a9bc, 0x00800000, 0xce05a9fd, 0x00f800aa, 0x01214089, 0xbc0fa4b6, 0x00d9a0a9, + 0xf0008000, 0xa9fd7fb4, 0x10b4b605, 0x000001d9, 0x05b9fd01, 0xbd01abfa, 0x3ef4bd94, 0xce0005d2, + 0x9fc700a9, 0x01cc9218, 0xf40399c7, 0xf4b3070b, 0xc4b31400, 0x90b3ed00, 0xf4b30800, 0x2c0a0800, + 0xa4bd00f8, 0x548900f8, 0xa4b60121, 0xa0a9bc0f, 0x800000d9, 0x95a9ff00, 0xd9019cfa, 0x00800000, + 0xff04aa92, 0x9dfa95a9, 0x0000d901, 0xaa920080, 0x7fb4f010, 0xb605a9fd, 0x02d908b4, 0xfd010000, + 0xabfa05b9, 0xbd94bd01, 0x06403ef4, 0x00a9ce00, 0x92189fc7, 0x99c701ee, 0x070bf403, 0x1400f4b3, + 0xed00e4b3, 0x080090b3, 0x0800f4b3, 0x00f82b0a, 0x00f8a4bd, 0x01215889, 0xbc0fa4b6, 0x00d9a0a9, + 0xfd008000, 0xaace05a9, 0x8900f800, 0xb601215c, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x00aace05, + 0x6c8900f8, 0x9abc0002, 0x8900f8a8, 0xb200029c, 0xa89abcaf, 0x0002ac89, 0xf0989fbc, 0xa4b603a4, + 0xff94f008, 0xf805a9fd, 0x51248900, 0x0000df01, 0xa4b60080, 0xa0a9bc0f, 0xce95afff, 0xaab80099, + 0xfd000800, 0xaace05af, 0x0194f000, 0xbd071bf4, 0xf000f8a4, 0x00f801a4, 0x09f0babc, 0x00f6b001, + 0xa6081ef4, 0x0b9cf0bc, 0xda1f94b6, 0x7fffffff, 0xffa4faff, 0x00f8a59a, 0xff05bafd, 0x00f8a4cb, + 0xbab2a9b2, 0x18f4b9a6, 0xf89ab205, 0x0f348900, 0xbce4bd00, 0x4489a99e, 0x9ebc000f, 0x0f5489a9, + 0xa99ebc00, 0x000f6489, 0x89a99ebc, 0xbc000f74, 0x8489a99e, 0x9ebc000f, 0x0f9489a9, 0x10f48d00, + 0xa99ebc00, 0x89f8dabc, 0xbc000fa4, 0xb489a99e, 0x9ebc000f, 0x0fc489a9, 0xa99ebc00, 0x000fd489, + 0xbc01f5f0, 0xe489a99e, 0xdfbc000f, 0xa99ebca9, 0x0000b08f, 0x000ff489, 0x94a99ebc, 0x9fbc06a9, + 0xa0f4bd90, 0x01ff909e, 0xb3049990, 0x89f810f4, 0x8f0001b0, 0xbc0001c4, 0xc089a99e, 0x9e3c0001, + 0x05a994a9, 0x9fbce4bd, 0xa0f4bd90, 0x01ff909e, 0xb3049990, 0x89f808f4, 0xbc000244, 0x5489a99e, + 0x9e3c0002, 0x8900f8a9, 0x94010190, 0xf9bc0faf, 0x0000d9f0, 0xf9fd0080, 0x00f9ce05, 0xfa0195f0, + 0x0d7e01f9, 0x00f80007, 0x000e8489, 0x9ebce4bd, 0x0e7489a9, 0xa99ebc00, 0x000e5489, 0x89a99ebc, + 0xbc000e94, 0xa489a99e, 0x9ebc000e, 0x0eb489a9, 0xa99ebc00, 0x000ec489, 0x0010e48d, 0xbca99ebc, + 0xe489f8da, 0x9ebc000e, 0x0ef489a9, 0xa99ebc00, 0x000f0489, 0x89a99ebc, 0xf0000f14, 0x9ebc17f5, + 0xa9dfbca9, 0x000ed489, 0xf8a99ebc, 0x0fae9400, 0x01008089, 0x800000df, 0xe0e9bc00, 0xcef5efff, + 0x95f000f9, 0x01f9fa04, 0x800000df, 0x00eeb800, 0xefff0022, 0x00f9cef5, 0x010095f1, 0xd901f9fa, + 0x00800000, 0x1000eeb8, 0x05e9fd00, 0xf000e9ce, 0xe9fa1795, 0x07e87e01, 0x8900f800, 0x3c0001c0, + 0x9433989a, 0x988f5900, 0xa9940101, 0x909fbc0f, 0x800000df, 0x059ffd00, 0x8f0099ce, 0xbc0001b0, + 0x95b6e8fa, 0xa9f9bc10, 0xb6029ebb, 0x99ec0494, 0x96b03fff, 0x050df40f, 0xb08e0f09, 0xaf940000, + 0xf0f9bc04, 0x9098efbc, 0xe9bc0199, 0xff9eb3f9, 0x010f0e00, 0x0001c089, 0xf8a99f3c, 0x02548900, + 0x989a3c00, 0x56009433, 0x0101a08f, 0xbc0fa994, 0x00df909f, 0xfd008000, 0x99ce059f, 0x02448f00, + 0xe8fabc00, 0xbce899c7, 0x9ebba9f9, 0x6199e702, 0x0796b003, 0x09050df4, 0x01c48e07, 0x03af9400, + 0xbcf0f9bc, 0x999098ef, 0xf9e9bc01, 0x00ff9eb3, 0x89010f0e, 0x3c000254, 0x00f8a99f, 0x00026c89, + 0xb3989abc, 0xbd080290, 0x7e00f8a4, 0xec001acb, 0xf803e8aa, 0xb202f900, 0x1acb7ea0, 0x03e84c00, + 0xd4bdabb2, 0xe712a4b6, 0x7e01eebb, 0x89007272, 0xbc000500, 0x94b39890, 0xa0890c01, 0xc53e0000, + 0x10890009, 0x90bc0005, 0x0294b398, 0x0090890c, 0x09c53e00, 0x0190b300, 0xb2bfb20c, 0x09da3eae, + 0x00988900, 0x989cbf00, 0x677e019d, 0xb9940071, 0x10a5b610, 0xff10bf95, 0xa889a59a, 0x9cbf0000, + 0xb2019998, 0x10c5b6fb, 0xb6109d95, 0x9cff1094, 0x71677ec5, 0x10a99500, 0xfd10ba94, 0x01fb05a9, + 0x0002dc89, 0xf9989abc, 0xb3a0b202, 0x7e380194, 0x330006a9, 0xb23000a0, 0x04287e0a, 0x00a43300, + 0x7e0ab226, 0x33000434, 0xb21c00a4, 0x09757e0a, 0x027c8900, 0x9890bc00, 0x0d0294b3, 0x3e02a5b6, + 0xbd000a45, 0x8901fba4, 0xf9002784, 0xf89a3c22, 0x0002dc89, 0xb2989abc, 0x01f630a0, 0xb00b1cf0, + 0x2cf00196, 0x17537e0b, 0x025c8900, 0xc890bc00, 0x0003c489, 0x89f890bc, 0xbc000a34, 0x2489e890, + 0x90bc000a, 0x0a1489d8, 0x9890bc00, 0xb61e14b6, 0xa4b61d24, 0xffc4f11f, 0x0ff4f0ff, 0xf005acfd, + 0xf4b601e4, 0x13e4b614, 0xf005affd, 0xaefd01d4, 0x0194f005, 0xb612d4b6, 0xadfd1094, 0x05a9fd05, + 0xfd05a1fd, 0x21fb05a2, 0x0002dc89, 0xf9989abc, 0xb2cfb252, 0x0190b3a4, 0x3e94bd0a, 0x89000af2, + 0x3c000324, 0x9630989a, 0x0b9cf000, 0xf00196f0, 0xb1920196, 0x0f16b090, 0xc42f0cf4, 0xfec703f9, + 0xf099bc22, 0xbcf0f9bc, 0x94bc9044, 0x0294b690, 0x89f0f9bc, 0xbc002950, 0xf4b6f0fe, 0xf0f1bc04, + 0x3ea89fbc, 0xb30012f7, 0x036741b9, 0x0041b6b1, 0x00a20cf5, 0x3722b9b3, 0x22b6b002, 0xb34a0cf4, + 0x01c713b9, 0xf413b6b0, 0xb9b3250c, 0xb0014b10, 0x0cf410b6, 0x01bdb30c, 0x8b3e012d, 0xb9b3000c, + 0xb3017211, 0x011f12bd, 0x000cfc3e, 0xb115b9b3, 0x15b6b001, 0x019c08f5, 0xaf20b9b3, 0x21bdb301, + 0x573e0105, 0xb9b3000d, 0xb0025227, 0x0cf427b6, 0x24b9b31d, 0xb6b00205, 0xe608f524, 0x25b9b301, + 0xbdb3020d, 0x3e00e026, 0xb3000dc1, 0x02912cb9, 0xf42cb6b0, 0xb9b3110c, 0xb3024a28, 0x00c729bd, + 0x000e283e, 0x942eb9b3, 0x40bdb302, 0x773e00b9, 0xbbb3000e, 0x03fb0081, 0x0081b6b1, 0xb34c0cf4, + 0x03554cb9, 0x004cb6b1, 0xb3250cf4, 0x02e244b9, 0x0044b6b1, 0xb30c0cf4, 0x008b43bd, 0x000eb63e, + 0xe445b9b3, 0x46b4b302, 0x0f0c3e7d, 0x4eb9b300, 0xb6b10365, 0x08f5004e, 0xb9b30332, 0xb3036f4f, + 0x630080be, 0x000fa03e, 0x0087bbb3, 0xb6b1058e, 0x0cf40087, 0x84bbb328, 0xb1045500, 0xf40084b6, + 0xbeb30c0c, 0x3e400083, 0xb3001073, 0x640085bb, 0x86beb304, 0x463e3100, 0xbbb30011, 0x064d00a0, + 0x00a0b6b1, 0xb3120cf4, 0x010089bb, 0x8abeb306, 0x963e1500, 0xbbb30012, 0x065400a1, 0x00ffbbb3, + 0xdfda0674, 0x3ebadfba, 0xb20012f7, 0x0a477e4a, 0x12f73e00, 0x10048e00, 0x01943300, 0x00508f28, + 0x0f499401, 0xdf909fbc, 0x00800000, 0xce059ffd, 0xe4bc0099, 0x059ffdf8, 0x710100df, 0x0cc93e20, + 0xf8e4bc00, 0x710100d9, 0xa4f9ff20, 0x0012f73e, 0x000e5480, 0xbb019d33, 0x008c8905, 0x0f4b9401, + 0xd9b0b9bc, 0x00800000, 0xce05b9fd, 0xff4c00bb, 0xa804bc03, 0x03ffb4f1, 0x000d4f3e, 0x8b7e4ab2, + 0xf73e0006, 0x4ab20012, 0x00095c7e, 0x0012f73e, 0x5c7e4ab2, 0xa5b60009, 0x12f73e05, 0x7e4ab200, + 0x3e000a00, 0x800012f7, 0x33000f34, 0x0564019d, 0x8e0f4b94, 0xbc010198, 0x00d9b0be, 0xfd008000, + 0xbbce05b9, 0xa804bc00, 0x8c10b5b6, 0x7e00ffff, 0x3e0006d8, 0x800012f7, 0x33000f44, 0x0534019d, + 0x8f0f4b94, 0x3e010198, 0x80000e87, 0x33000f54, 0x0520019d, 0x890f4b94, 0x3e01019c, 0x80000e9e, + 0x33000f74, 0x050c019d, 0x8e0f4b94, 0xbc0101a0, 0x133eb0be, 0x8480000e, 0x9d33000f, 0x9404f501, + 0xa08f0f4b, 0xe83e0101, 0xa480000d, 0x9d33000f, 0x9404e101, 0xa4890f4b, 0xb9bc0101, 0x0d3a3eb0, + 0x0fb48000, 0x019d3300, 0x4b9404ca, 0x01a48e0f, 0xb0bebc01, 0x000ea13e, 0x000fc480, 0xb3019d33, + 0x0f4b9404, 0x0101a88f, 0xd9b0bfbc, 0x00800000, 0xce05b9fd, 0x04bc00bb, 0xe8bbc7a8, 0x0012883e, + 0x000fd480, 0x8b019d33, 0x0f4b9404, 0x0101a889, 0xd9b0b9bc, 0x00800000, 0xce05b9fd, 0x04bc00bb, + 0xffb4f0a8, 0x0012883e, 0x000e7480, 0x63019d33, 0x0f4b9404, 0x0122888e, 0x000d373e, 0xf40ff6b0, + 0xc4bd090d, 0x000e4a3e, 0x4994fcb2, 0x00b08f04, 0x909cbc00, 0x000e703e, 0xf407f6b0, 0xc4bd090d, + 0x000e663e, 0x4994fcb2, 0x01c48f03, 0x909cbc00, 0x3ea8f9bc, 0x800012f7, 0x33000ed4, 0x0414019d, + 0x8f0f4b94, 0xbc013294, 0xa13eb0bf, 0x8480000e, 0x9d33000e, 0x9403fd01, 0x88890f4b, 0xb9bc0132, + 0x0000d9b0, 0xb9fd0080, 0x00bbce05, 0xf0a804bc, 0x4b3e00b3, 0x9480000d, 0x9d33000f, 0x9403d501, + 0xa08e0f4b, 0xbebc0101, 0x0ef73eb0, 0x0f648000, 0x019d3300, 0x4b9403be, 0x01a08f0f, 0xb0bfbc01, + 0x000f1f3e, 0x000fe480, 0xa7019d33, 0x0f4b9403, 0x0101a889, 0xd9b0b9bc, 0x00800000, 0xce05b9fd, + 0x04bc00bb, 0xf0bbc7a8, 0x0012883e, 0x000ff480, 0x7f019d33, 0x0f4b9403, 0x0101a88e, 0xd9b0bebc, + 0x00800000, 0xce05b9fd, 0x04bc00bb, 0x18b5b6a8, 0x0012883e, 0x000ee480, 0x57019d33, 0x0f4b9403, + 0x0133988f, 0x000f993e, 0x000ef480, 0x43019d33, 0x0f4b9403, 0x01339c89, 0xd9b0b9bc, 0x00800000, + 0xce05b9fd, 0xffdc00bb, 0xbc00ffff, 0xc63ea804, 0x0480000f, 0x9d33000f, 0x94031901, 0xa08e0f4b, + 0xbebc0133, 0x0f5b3eb0, 0x0f148000, 0x019d3300, 0x4b940302, 0x33a48f0f, 0xb0bfbc01, 0x000f5b3e, + 0x000e6480, 0xeb019d33, 0x207c8902, 0x0f4b9401, 0xd9b0b9bc, 0x00800000, 0xce05b9fd, 0x04bc00bb, + 0xffffdca8, 0xbcfd7fff, 0x0d4f3e04, 0x0ec48f00, 0x0e948000, 0x0ea48200, 0x0eb48500, 0x01943300, + 0x0f41946a, 0x01328c8e, 0x800000d9, 0x101ebc00, 0xce0519fd, 0xf4bc0011, 0xff1bc4a8, 0x7e00ff4c, + 0xc4005fd5, 0x04bcffa3, 0xe81bc7a8, 0x7e00ff4c, 0xb2005fd5, 0xa824bca0, 0x4cf01bc7, 0x04b600ff, + 0x5fd57e08, 0xbca2b200, 0x1b95a854, 0x00ff4c18, 0x7e0503fd, 0xb6005fd5, 0xa4b61024, 0x0502fd18, + 0x3ea50aff, 0xbc0012f7, 0x04bc98f4, 0xf854bcd8, 0xf0e824bc, 0xd4b6ff94, 0x18f4b608, 0xfdffe4f0, + 0xd4f1059f, 0xe4b6ffff, 0x059efd10, 0x3ea59dff, 0x800012f7, 0x33001014, 0x0218019d, 0x8f0f4994, + 0xbc012028, 0x9d3e909f, 0x24800010, 0x9d330010, 0x94020101, 0x288e0f49, 0x9ebc0130, 0x0000df90, + 0x9ffd0080, 0x0099ce05, 0x3ef804bc, 0x330012e9, 0x8f720194, 0x94013040, 0x00de0f49, 0xbc008000, + 0x9eff909f, 0x00fdcef5, 0xfd189992, 0x9fce059e, 0x02f9c400, 0xe40b0bf4, 0x3e0f00dc, 0x890010e9, + 0xbc001074, 0x9c949894, 0x10f9c408, 0xc70a0bf4, 0xfd3e70d9, 0x94890010, 0x94bc0010, 0x109e9498, + 0xf420f9c4, 0xd9c70a0b, 0x11143e78, 0x10848900, 0x9894bc00, 0xff189094, 0x90ff95ec, 0x12f73ea5, + 0x10948900, 0xe894bc00, 0x00107489, 0x89f894bc, 0xbc001084, 0xe4b69894, 0x08f4b610, 0xb605effd, + 0xc33e1894, 0x94330012, 0x448e5801, 0x49940130, 0x909ebc0f, 0x800000de, 0xf59eff00, 0x9200ffce, + 0x9efd1c99, 0x009dce05, 0x3380d9c4, 0xc70b0090, 0x7d3e70f9, 0xa4890011, 0x94bc0010, 0x10919498, + 0xf440d9c4, 0xf9c70a0b, 0x11943e78, 0x10b48900, 0x9894bc00, 0xff189094, 0xf73ea501, 0xa4890012, + 0x94bc0010, 0x10b489f8, 0x9894bc00, 0xb610f4b6, 0xe93e1894, 0x348e0012, 0x94330010, 0x688f7901, + 0x49940130, 0x909fbc0f, 0x800000df, 0x059ffd00, 0xbc0092ce, 0x2fc498e4, 0xf4f9a63f, 0x9fb20518, + 0x00106489, 0xc49894bc, 0x2fc7fffd, 0xf4f9a6b8, 0x9fb20518, 0x00104489, 0x94e894bc, 0x9dff18f9, + 0xa82dc7f5, 0x18f4dea6, 0x89edb205, 0xbc001054, 0xd9940894, 0xb02dc708, 0xffff94f1, 0xa615f9ff, + 0x0518f4d0, 0xd9c40db2, 0x1094b6ff, 0x3ea519ff, 0x890012f7, 0xbc001064, 0x5489d894, 0xe4bc0010, + 0xe894bcf8, 0x00104489, 0xf09894bc, 0xd4b6fff4, 0xffe4f018, 0xb605fdfd, 0xe4b60894, 0xff94f110, + 0x05fefdff, 0x0012e93e, 0x000f2480, 0x23019433, 0x01301489, 0xbc0f4b94, 0x00d9b0b9, 0xfd008000, + 0xbbce05b9, 0xa804bc00, 0x3e00ff4c, 0xbc000d4f, 0xf73ea804, 0x4ab20012, 0xe46cfcc7, 0x7e0ffffb, + 0x3e0012f9, 0x8e0012f7, 0x94003910, 0x9ebc0449, 0x39108f90, 0x949ebf00, 0x99900249, 0x98f9bc01, + 0xff1094b6, 0xf73ea59e, 0x188f0012, 0x44bc0039, 0x0394b690, 0x8e909fbc, 0xbf003910, 0x0449949f, + 0x98909ebc, 0x94b60399, 0xa59fff10, 0x0012f73e, 0x0002ec89, 0xfba894bc, 0x28548951, 0x09a4b601, + 0x000000df, 0xc0cabca0, 0x03ffb4f1, 0xb606c4b6, 0xc9bc10b4, 0x05bffdc0, 0x800000d9, 0x95c9ff00, + 0xd9019bfa, 0x00800000, 0xfd04cc90, 0xccce05c9, 0xf8cab200, 0x28548900, 0x09a4b601, 0xb6d0dabc, + 0xd9bc06d4, 0x0000d9d0, 0xc3f06000, 0xffb4f100, 0x05c9fd03, 0xd910b4b6, 0x00800000, 0xfd05cbfd, + 0xdcfa05d9, 0xd900f801, 0x60000000, 0xf100b3f0, 0xfd03ffa4, 0xa4b605b9, 0x380cd910, 0xbafd0080, + 0x019bfa05, 0x00d900f8, 0xf1a00000, 0xb603ffa4, 0xa9fd10a4, 0x380cd905, 0x9afa0080, 0x3810da01, + 0xaace0080, 0x8900f800, 0xb6017090, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x00afce05, 0xf9fde709, + 0x03c9c404, 0xfd0394b6, 0xaffa05f9, 0x01b43301, 0x01944f1d, 0xc700a9ce, 0x9ca63399, 0x920e0bf4, + 0xf4b301ff, 0x180af200, 0xa4bd00f8, 0x908900f8, 0xa4b60170, 0xa0a9bc0f, 0x800000d9, 0x05a9fd00, + 0xdf00a9ce, 0x80000000, 0xfa059ffd, 0xfc0f01a9, 0xc4049ffd, 0x9ffd03cf, 0x01a9fa05, 0x1d01b433, + 0xce01944f, 0x99c700a9, 0xf49ca630, 0xff920e0b, 0x00f4b301, 0xf8190af2, 0xf8a4bd00, 0x70908900, + 0x0fa4b601, 0xd9a0a9bc, 0x00800000, 0xce05a9fd, 0xffdf00a9, 0xfd7fffff, 0xa9fa049f, 0x8f00f801, + 0xf9017090, 0x0fa99412, 0x9fbca1b2, 0x0000df90, 0x9ffd0080, 0x0099ce05, 0x000000df, 0xfd1a0002, + 0x0bf4049f, 0xbd010b20, 0x13a77ec4, 0xb3a0b200, 0xb21300a4, 0x143d7e1a, 0x89020f00, 0xbc00026c, + 0x0ab2199f, 0x02f911fb, 0x020c010b, 0xee7ea0b2, 0x0ab20013, 0x010c010b, 0x0013a77e, 0x0d00a4b3, + 0x6c89010f, 0x9fbc0002, 0x8f01fb09, 0x94017090, 0x9fbc0fa9, 0x0000df90, 0x9fff0080, 0x00d9ced5, + 0xffffffdf, 0x049ffdfe, 0x8e01d9fa, 0xbf00030c, 0xbb010fe9, 0xf9fd04fa, 0x08844905, 0x99cfefa0, + 0xff94f100, 0x0099b9ff, 0x0f059ffd, 0xf49fa6ff, 0x248f221b, 0xd9ce00f4, 0x1999c700, 0xb301ff92, + 0xb30c00f0, 0x3ef30190, 0x0a001531, 0x0190b317, 0xf8a4bd06, 0x030c8900, 0x099fbf00, 0xe49abc01, + 0xf494efff, 0xfefd060b, 0x030c8906, 0xf89fa000, 0x030c8e00, 0x09efbf00, 0x049abb01, 0xa0059ffd, + 0xf900f8e9, 0x03b4f012, 0x940fa4b6, 0x44890cbd, 0xb4b60128, 0xa0a9bc04, 0x00d1e4bd, 0x00008000, + 0xcfff4ccf, 0xcef5a1ff, 0x90fd00f9, 0x059bfd04, 0xfd049cfd, 0xf9fa059d, 0x01ee9001, 0xb340aa90, + 0xfbe504e4, 0x8912f911, 0x3c000c0c, 0xa0b2989a, 0x9933380a, 0x8900cc01, 0x3c0002fc, 0x9c89f890, + 0x90bc0002, 0x300e0ae8, 0x9cf001f6, 0x02e6b00b, 0xf00bfcf0, 0x9ffd01f6, 0xa51bf504, 0x03288900, + 0x98903c00, 0x9933120a, 0x94009801, 0x1c890f0d, 0x00dc0100, 0xbc008000, 0xdcffd0d9, 0x00cecec5, + 0x0002cc89, 0xdf9890bc, 0x8fffffff, 0xf004effd, 0x94b60794, 0x05e9fd1c, 0xdf01cefa, 0x00800000, + 0x2804ddb8, 0xf5dfff00, 0xf100f9ce, 0xfa081895, 0x00d901f9, 0x90008000, 0x894e18dd, 0x05d9fd00, + 0x9200dfce, 0xf9c701ee, 0x0194b303, 0x04f9c712, 0x0b0194b3, 0xb307f1c7, 0xb30c0110, 0x3e2100e0, + 0xb3001640, 0xb21900e0, 0x7e020b0a, 0x89001563, 0xbd0002dc, 0x0991bca4, 0x00167e3e, 0x11fb200a, + 0x01004489, 0xbc0fa4b6, 0x00d9a0a9, 0xb6008f00, 0xc4b60cb4, 0xffb4f104, 0xffc4f0ff, 0xf005b9fd, + 0xbcfd03d4, 0x02d4b605, 0x800000d9, 0x05bdfd00, 0xfa05a9fd, 0xa9ce01ab, 0x01ee9200, 0x0800e4b3, + 0x00f8040a, 0xb30394f0, 0x0aef0190, 0x0094b305, 0xf8a4bd06, 0x20948900, 0x0fa4b601, 0xbc0fb4f0, + 0x00d9a0a9, 0xfd008000, 0xabfa05a9, 0x04834f01, 0xb600a9ce, 0x0bf41f95, 0xf8a4bd07, 0x01ff9200, + 0xf000f4b3, 0x00f82d0a, 0x01004089, 0xbc0fa4b6, 0x0089a0a9, 0xb4b60f00, 0x03c4f004, 0xb6ffb4f0, + 0xb9fd02c4, 0x0000d905, 0xbcfd0080, 0x05a9fd05, 0x0901abfa, 0x17453e01, 0x00a9ce00, 0xf40394f0, + 0xdd92100b, 0x00d4b301, 0xb3240af4, 0xbd060094, 0x8900f8a4, 0xbc00028c, 0x96b0989a, 0x0bacf003, + 0x408900f8, 0xa4b60100, 0xa0a9bc0f, 0x800000d9, 0x05a9fd00, 0xf000aace, 0xacf003a4, 0xd900f80b, + 0x00800000, 0xb602aa90, 0xa9fd0fa4, 0x00aace05, 0xf8ffa4f0, 0x08b4b600, 0x0002ec89, 0xffffb4f1, + 0xbc04b5f0, 0x00f8a99b, 0x01284089, 0x800000df, 0x0fa4b600, 0xffa0a9bc, 0x99ce95af, 0x00aab800, + 0xaffd0001, 0xffff8f05, 0x049ffdfe, 0x0400008f, 0x8f059ffd, 0xf0f7ffff, 0x9ffd01b4, 0x13b4b604, + 0x0201008f, 0xfd05bffd, 0xabfa05b9, 0x8900f801, 0xdf012840, 0x00800000, 0xbc0fa4b6, 0xafffa0a9, + 0x0099ce95, 0x0100aab8, 0x05affd00, 0x9ffdfe0f, 0x0295f004, 0xf801a9fa, 0xbd02f900, 0x7ea0b2b4, + 0xb20017a8, 0x17ef7e0a, 0x8901fb00, 0xbc00029c, 0xb4b3a99b, 0x0f0f0f02, 0x0002ac89, 0xf8a99fbc, + 0x02ac8e00, 0x03b4b300, 0x2b8c8f20, 0x0fa99401, 0xdf909fbc, 0x00800000, 0xce059ffd, 0x99c70099, + 0x18673e68, 0xbc94bd00, 0x00f8a9e9, 0x00025889, 0x00f89a20, 0x00025889, 0x00f89a3f, 0x0002fc89, + 0x30989a3c, 0xacf00196, 0x0f00f80b, 0x03008901, 0xa99f3c00, 0x2089f43d, 0x9f3c0005, 0x0f00f8a9, + 0x03048901, 0xa99f3c00, 0x010f00f8, 0x00030889, 0xf8a99f3c, 0x03148900, 0xa99bbc00, 0x148900f8, + 0x9abc0003, 0x4767d9f8, 0xf9a68932, 0xf80bacf0, 0x89f43d00, 0x3c000324, 0xf4bda99f, 0x0002bc89, + 0xf8a99fbc, 0x03248900, 0xa89a3c00, 0x048900f8, 0xa4b60169, 0x0000df0f, 0xa9bc0080, 0xf5afffa0, + 0xde00f9ce, 0x7fffffff, 0xfa049efd, 0x00d901f9, 0xb8008000, 0x0207e0aa, 0xce05a9fd, 0xffdf00a9, + 0xfd7fffff, 0xa9fa049f, 0x8900f801, 0xb6016124, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x00a9ce05, + 0xffffffdf, 0x049ffd7f, 0xf801a9fa, 0x21088900, 0x0fa4b601, 0x800000df, 0xa0a9bc00, 0xcef5afff, + 0x95f100f9, 0xf9fa03ff, 0x0000d901, 0xaa900080, 0x05a9fd40, 0x0f00a9ce, 0x049ffdc0, 0xf0ff3f4f, + 0x9ffd1295, 0x4095f004, 0xf801a9fa, 0x08044900, 0x8d0099ce, 0xde017600, 0x00800000, 0xbc0fa4b6, + 0x008da0ad, 0xaefffe8a, 0xa0adbcf5, 0x3300ffce, 0xe44b0394, 0x738000f9, 0x8f430090, 0xbc01284c, + 0xaeffa0af, 0x0099ce95, 0xb8dfff4f, 0x000100aa, 0xff049ffd, 0xe9fae5ae, 0x0000de01, 0xaa920080, + 0x95aeffe8, 0xb80099ce, 0x000100af, 0xde05fefd, 0xefffffff, 0xfa049efd, 0x00f801f9, 0x044942f9, + 0x0099ce08, 0x0176008d, 0xde0fa394, 0x00800000, 0x8d303dbc, 0xfffe8a00, 0x3dbcf53e, 0x00ffce30, + 0xa9039d33, 0x00f9e400, 0x00997380, 0x4c8f00a0, 0x3fbc0128, 0x254eff40, 0x890020ce, 0xbc01294c, + 0xef093039, 0xfd153eff, 0x10fa0409, 0x02704a01, 0x200005f1, 0x00044c7e, 0xd90110fa, 0x00800000, + 0xffe83392, 0x00ce0539, 0x0031b800, 0x19fd0001, 0xffffd905, 0x09fdbfff, 0x0000d904, 0x09fd1000, + 0x0110fa05, 0x4c7e0f0a, 0x00d90004, 0xfd400000, 0x10fa0509, 0x7e0f0a01, 0xd900044c, 0xbfffffff, + 0xfa0409fd, 0x0f0a0110, 0x00044c7e, 0xbd0022ce, 0x1025f0f4, 0x800000de, 0x954eff00, 0x900192fa, + 0x449001ff, 0x04f4b340, 0x7e41fbf4, 0xb3003afd, 0xb31004a0, 0xb31a07a0, 0x3e0f00a0, 0xda001af4, + 0x01896402, 0x80da00f8, 0xf802faf0, 0x9f88da00, 0x00f8032a, 0xadbeefda, 0xf900f8de, 0x7ea1b222, + 0x33000440, 0x00c001ad, 0x287e1ab2, 0xad330004, 0xb200b501, 0x04347e1a, 0x01ad3300, 0x8c8900aa, + 0x10940130, 0x0009bc0f, 0x800000d9, 0x009b4f00, 0x89e509ff, 0xbcfecf74, 0xe9ce0009, 0x01ff9200, + 0x0f00f4b3, 0x01023209, 0x3e0391b0, 0xb6001b58, 0x90b31f95, 0x24bde801, 0x01005089, 0x09bc1ab2, + 0x5b517e00, 0x7e1ab200, 0xb2006472, 0x5fe37e1a, 0x7e1ab200, 0xb20062f3, 0x63477e1a, 0x7e1ab200, + 0xd90063ae, 0x00800000, 0xce9509ff, 0x94f00099, 0x0c0bf430, 0x2889010f, 0x9f3c0003, 0x4767df19, + 0x14898932, 0x9fbc0003, 0x0024b319, 0x3ea4bd0a, 0xb4001bb6, 0x010f03a0, 0x00032489, 0x3e199f3c, + 0x0a001bc5, 0x8921fb13, 0xbc00028c, 0x22f9989a, 0x90b3a0b2, 0xa4bd0a03, 0x001c873e, 0xd57eb4bd, + 0x0ab20016, 0xc4bd060b, 0x020202dd, 0x05dc4e02, 0x0005e67e, 0x9300adb3, 0x05dc4100, 0x020202d2, + 0xb2070b02, 0x050b7e0a, 0x01119200, 0x667e0ab2, 0xa2a60005, 0xb2751bf4, 0x057d7e0a, 0x00a4b300, + 0xbd0ab26c, 0x0d040cb4, 0x07d04e01, 0x0016807e, 0x5700a4b3, 0x060b0ab2, 0x02ddc4bd, 0x4e010101, + 0xe67e05dc, 0xa4b30005, 0xdc414100, 0x0102d205, 0x070b0101, 0x0b7e0ab2, 0x11920005, 0x7e0ab201, + 0xa6000566, 0x2d1bf4a2, 0x7d7e0ab2, 0xa4b30005, 0x0ab22400, 0xd57e0e0b, 0xa4b30016, 0x020f0d00, + 0x00027c89, 0xfb099fbc, 0x001db321, 0x213eff78, 0x14b3001c, 0x2f0ac000, 0x001c873e, 0xb4bd32f9, + 0xd57ea1b2, 0x1ab20016, 0xc4bd060b, 0x020202dd, 0x09c44e02, 0x0005e67e, 0xa4b3a2b2, 0xc4402c00, + 0x0202d309, 0x070b0202, 0x0b7e1ab2, 0x00920005, 0x7e1ab201, 0xa6000566, 0x111bf4a3, 0x7d7e1ab2, + 0xa4b30005, 0x2ab20800, 0x04b331fb, 0x2e02dc00, 0x001ce63e, 0x00028c89, 0xf9989abc, 0xb3a1b222, + 0xbd0a0390, 0x1d7f3e04, 0x7eb4bd00, 0xb20016d5, 0x00a4b3a0, 0x0b1ab26e, 0xddc4bd06, 0x02020202, + 0x7e09c44e, 0xb20005e6, 0x00a4b3a0, 0x09c44056, 0x020202d2, 0xb2070b02, 0x050b7e1a, 0x01009200, + 0x667e1ab2, 0xa2a60005, 0xb23b1bf4, 0x057d7e1a, 0x00a4b300, 0xbd1ab232, 0x0dc4bdb4, 0x07d04e01, + 0x0016807e, 0xa4b3a0b2, 0x010f1900, 0x00027c89, 0x9fbc1ab2, 0x089b7e19, 0x7e1ab200, 0xb20008fd, + 0xb321fb0a, 0x00b20004, 0x1d7f3e2e, 0x001c8900, 0x9432f901, 0x09bc0fa0, 0x0000d900, 0xa1b20080, + 0x893509ff, 0xbcfeffe4, 0x32ce0009, 0x02cc8900, 0x5c2fc700, 0x7ea99fbc, 0x33003ab8, 0x890f00a4, + 0xbc0002bc, 0x90b39891, 0xf4bd1900, 0x0002bc89, 0x9fbc1ab2, 0x7e020b19, 0x3e00182b, 0x8e001e71, + 0x3f000258, 0x33300ae9, 0x008c009d, 0x0002dc89, 0x09f891bc, 0x0ae92001, 0x01f0b30c, 0x89010f7a, + 0xbc00029c, 0x00df199f, 0xd9400000, 0x8fffffff, 0xfd9429ff, 0x39fa059f, 0x00688901, 0x0000df01, + 0x09bc0080, 0xf50fff00, 0xde00f9ce, 0x40000000, 0xfa059efd, 0x00df01f9, 0x90008000, 0x0fff4400, + 0x00f9cef5, 0xfdcfff4e, 0xf9fa049e, 0x0000d901, 0x0eb80080, 0xfd0022e8, 0xe9ce05e9, 0xffffdf00, + 0x9ffd7fff, 0x01e9fa04, 0xfd01ff90, 0xe9fa059f, 0xfba4bd01, 0x028c8931, 0xbcf4bd00, 0x9c89a99f, + 0x9fbc0002, 0x02bc89a9, 0xa99fbc00, 0x00025c89, 0x89a99fbc, 0xbc0002dc, 0x7c89a99f, 0x9fbc0002, + 0x026c89a9, 0xbc010e00, 0xac89a99e, 0x9fbc0002, 0x02ec89a9, 0xbc02f900, 0xfc89a99f, 0x9e3c0002, + 0x030089a9, 0xa99f3c00, 0x00030489, 0x89a99f3c, 0x3c000308, 0x2489a99f, 0x9f3c0003, 0x032889a9, + 0x3ca0b200, 0x357ea99f, 0x0ab20015, 0x0039c27e, 0x2d7e0ab2, 0x01fb0029, 0x0002dc89, 0xf9989abc, + 0x01a2b222, 0x029db325, 0xbc8901f4, 0xa0940166, 0xbc050f0f, 0x00d90009, 0xff008000, 0x9ffa9509, + 0x0000d901, 0x00900080, 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d901, + 0x00900080, 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d901, 0x00900080, + 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d901, 0x00900080, 0x9509ff04, + 0xd9019ffa, 0x00800000, 0x096800b8, 0x9509ff02, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, + 0x0000d901, 0x00900080, 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d901, + 0x00900080, 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d901, 0x00900080, + 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d101, 0x00b80080, 0x8f00177c, + 0xfffe8af8, 0x0fbc9501, 0x0099ce00, 0xf40194f0, 0x04895f1b, 0x09bc0175, 0x5b127e00, 0x7e2ab200, + 0xb2006630, 0x1e757e2a, 0x7e2ab200, 0xb200070d, 0x07e87e2a, 0xf501ff00, 0xf000f9ce, 0xf9fa0195, + 0x0000df01, 0x00b80080, 0xff021540, 0x99ce950f, 0x0000b800, 0x0fff0010, 0x0099ce95, 0xfe983c89, + 0x080000b8, 0xf50fff02, 0xce0009bc, 0x2ab200f9, 0x0014a67e, 0xadb3a1b2, 0xb2008300, 0x14cb7e2a, + 0xb3a1b200, 0xb27600a4, 0x7e010b2a, 0xb20032ac, 0x00a4b3a1, 0x75048f68, 0x8afc8901, 0x000fbcfe, + 0x800000de, 0xe50eff00, 0xce0009bc, 0xfe0f00e9, 0xfa049ffd, 0x2ab201e9, 0x003b2d7e, 0x1f00a433, + 0x0176088f, 0x8f000fbc, 0xd9fe89f8, 0x00800000, 0xbc9509ff, 0x010f000f, 0xb2019ffa, 0x3b367e2a, + 0x00a43300, 0x76548918, 0xbc010f01, 0x00d90009, 0xff008000, 0x9ffa9509, 0xfb1ab201, 0x7ea4bd21, + 0xbd001e75, 0x3b057ea4, 0x7e010a00, 0x0a001e75, 0x3b057e01, 0x7e020a00, 0x0a001e75, 0x3b057e02, + 0x7e030a00, 0x0a001e75, 0x3b057e03, 0x89f43d00, 0x20000310, 0xb200f89f, 0x01cfc4ad, 0xfa08c049, + 0xbe4f009f, 0x01c0b373, 0x4e030f06, 0xe9cf08c0, 0x0899c700, 0x0bf49ca6, 0x01ff9227, 0xf200f4b3, + 0xf001c6b0, 0x1d090bfc, 0x33a29fbc, 0xb21401b4, 0xb2040bac, 0x02617eda, 0xf8a4bd00, 0xf8a4bd00, + 0x02dc8900, 0x989abc00, 0x110f02f9, 0x94b3a0b2, 0x010b6802, 0x377e010c, 0x0d940021, 0x2820890f, + 0x0000df01, 0xd9bc0080, 0xf5dfffd0, 0x4e00f9ce, 0x9efdfdfe, 0x01f9fa04, 0x800000d9, 0x18dd9000, + 0xfd50b74e, 0xd9ce05d9, 0x209fc600, 0xc70194f0, 0x90b305ff, 0xf4bd0600, 0xb301ee92, 0xb30800e0, + 0xb2e700f0, 0xbd010b0a, 0x21377ec4, 0x89f4bd00, 0xbc0002bc, 0xfab2099f, 0x22f901fb, 0xc1b2b2b2, + 0x947ea0b2, 0x2209003a, 0x8a00ad33, 0x0314b300, 0x028c8f3c, 0x0124b300, 0x0018892b, 0x09f1bc01, + 0xbc0f0f94, 0x00d9f0f9, 0xfd008000, 0xf9ce05f9, 0xffffde00, 0x9efddfff, 0x01f9fa04, 0x0022593e, + 0xf9bc0209, 0x22593e09, 0x028c8900, 0xbc010f00, 0x0209099f, 0x0bf42fa6, 0x00ac893c, 0x0f0d9401, + 0xd9d0d9bc, 0x00800000, 0xce05d9fd, 0x26b000d9, 0x0bfcf000, 0x9401f4f0, 0xf4b602fc, 0xfdf70e03, + 0x9ffd049e, 0xfdfb0f05, 0x9cfd049f, 0x01d9fa05, 0x9ab294bd, 0x8c8921fb, 0x9abc0002, 0xb202f998, + 0x019992a0, 0xf40196b0, 0xdc893e0d, 0x9abc0002, 0x0194b398, 0x03f27e34, 0x01a43300, 0x7e0ab20e, + 0x3e001cf4, 0xb20022cd, 0x1c9c7e0a, 0x00a4b300, 0x7e0ab21a, 0xbd00177f, 0xb2acb2b4, 0x21fa7e0a, + 0x22e73e00, 0xfba4bd00, 0x0c32f901, 0xb2a2b207, 0x4e030db3, 0x807e07d0, 0xa1b20016, 0x3600a4b3, + 0x9207d040, 0x34b30100, 0x2ab20e00, 0x0003d87e, 0x00231a3e, 0xbe7e2ab2, 0xa6b00003, 0x0b9cf007, + 0x0c0000b3, 0xdf009033, 0x0023323e, 0x06009433, 0x1ab20601, 0xdc8931fb, 0x9abc0002, 0xb212f998, + 0x0299b3a1, 0xa87e016b, 0xa4f00003, 0x04a0b3ff, 0x02a0b314, 0x0b1ab210, 0x0d030c04, 0x17087e6e, + 0xbd1ab200, 0x22e97eb4, 0x0b1ab200, 0x22e97e01, 0x284c8900, 0x0f109401, 0x800000de, 0x0009bc00, + 0xce950eff, 0xef0f0099, 0x010000b8, 0x049ffd00, 0xf1e50eff, 0xfa200095, 0x00de01e9, 0x92008000, + 0x0effe800, 0x0099ce95, 0x00b8fd0f, 0xfd000100, 0x0eff049f, 0x0195f0e5, 0x0b01e9fa, 0xb2020c01, + 0x13ee7e1a, 0x0000d900, 0x00b80080, 0x4e000728, 0x09ff0106, 0x0000dab5, 0x00b81000, 0xff020ff4, + 0x6889c509, 0x09bcfedf, 0x00b9ce00, 0xc700cdce, 0x9afd749f, 0x0e0bf404, 0x0b00f4b3, 0x3e1fd995, + 0xbd002405, 0x01ee9294, 0x0800e0b3, 0xdd009033, 0x7e01384a, 0xb200044c, 0x7e030b1a, 0x89001563, + 0xb2012820, 0x0009bc1a, 0x010c010b, 0x0021377e, 0x800000df, 0xf50fff00, 0x4e00f9ce, 0x9efdf3e5, + 0x01f9fa04, 0x800000d9, 0x180f9000, 0xff50b74c, 0xedcee5f9, 0x18d9c400, 0xc6101bf4, 0x9bc740d9, + 0x82d9c406, 0x06009033, 0xcc92b4bd, 0x00c0b301, 0x00b03308, 0xb2b4bde1, 0x17a87e1a, 0xbd010b00, + 0x7e1ab2c4, 0xb2002137, 0x39ac7e1a, 0x7e1ab200, 0x890039b7, 0x0f0002dc, 0x199fbc02, 0x00027c89, + 0x9ebce4bd, 0x026c8919, 0x199ebc00, 0x6c8f11fb, 0x02f90100, 0xb20fa994, 0x909fbca0, 0x00dfe4bd, + 0xff008000, 0xfefaf59f, 0x0000df01, 0x99920080, 0x059ffd04, 0x9ffaf4bd, 0x0c040b01, 0x7e370d01, + 0xb2001708, 0x23367e0a, 0x8f01fb00, 0xf901006c, 0x0fa99402, 0x9fbca0b2, 0xdfe4bd90, 0x00800000, + 0xfaf59fff, 0x00df01fe, 0x92008000, 0x9ffd0499, 0xfaf4bd05, 0xee7e019f, 0x0ab20018, 0x0022967e, + 0x367e0ab2, 0x01fb0023, 0xa1b242f9, 0x0069307e, 0x00395089, 0x3398913c, 0x94380194, 0x70890f1e, + 0xd4bd0128, 0xdae0e9bc, 0x00800000, 0xffffffdb, 0x0000dcc0, 0xeaff1c00, 0x00f9cef5, 0xfd049bfd, + 0xf9fa059c, 0x01dd9001, 0xb340ee90, 0x89eb04d4, 0x94012840, 0x09bc0f10, 0x0000d900, 0x09ff0080, + 0x004fce45, 0x010000b8, 0x3509ff00, 0xfed6c089, 0xd90009bc, 0x3fffffff, 0xfa04f9fd, 0x1ab2013f, + 0x003ad07e, 0x1ab2abb2, 0x005cc37e, 0xadb3a2b2, 0x8900a300, 0x3c000324, 0x94339891, 0x1ab20a01, + 0x005ba67e, 0x01282089, 0xd90009bc, 0x00800000, 0xced509ff, 0x95f100d9, 0xd9fa0201, 0x180f9001, + 0x800000d9, 0xc5f9ff00, 0xce00c84f, 0xff9200ce, 0x01e9c401, 0x0b0194b3, 0xb305e9c7, 0xb30c0190, + 0x3e4a00f0, 0xb30025eb, 0xce4200f0, 0xff8f0049, 0x9ffdf9fe, 0x0139fa04, 0xf100d9ce, 0xfa040295, + 0xc84f01d9, 0x00cece00, 0xc701ff92, 0x94b301e9, 0xe9c70b01, 0x0190b306, 0x00f0b30c, 0x26253e16, + 0x00f4b300, 0x264f3e10, 0x3e370200, 0x02002651, 0xfb2ab221, 0xb252f941, 0xfc30f4a1, 0x003afd7e, + 0x1ab2a4b2, 0x003ad97e, 0x1ab2a3b2, 0x003b127e, 0x00031089, 0xa2b2993f, 0x92019933, 0x3800de00, + 0xe9ce0080, 0xffffdf00, 0x9ffd3fff, 0x01e9fa04, 0x217e1ab2, 0x00d50068, 0x94008000, 0x14890f10, + 0x09bc0171, 0x0505fd00, 0xb20000ce, 0x3aeb7e1a, 0x08884900, 0xb60099cf, 0x0bc41d95, 0x0091b0ff, + 0x3db24cb2, 0xe17e2eb2, 0xa4b3005e, 0x0c896800, 0x913c000c, 0x33390a98, 0x895b0190, 0x3c000c10, + 0x94339891, 0x04de2001, 0xce008038, 0xff8f00e9, 0x95fde0ff, 0x049ffd05, 0x0a00008f, 0xfa059ffd, + 0x010f01e9, 0x00031089, 0x90899f20, 0x1f940170, 0xf0f9bc0f, 0x800000d9, 0x05f9fd00, 0xde00f9ce, + 0x01000000, 0xfa059efd, 0x1ab201f9, 0x0015357e, 0x30f4a4bd, 0xf951fb04, 0x73108942, 0x0fa19401, + 0x19bca0b2, 0x0000df10, 0x1fff0080, 0x00f9cef5, 0xfa0295f0, 0x588901f9, 0x11b8fe8b, 0xdf000198, + 0x00800000, 0xbcf51fff, 0xf9ce1019, 0xe495f000, 0x7e01f9fa, 0xb20067ab, 0x69047e0a, 0x7e0ab200, + 0xb3003aeb, 0xb22e02a4, 0x69e27e0a, 0x7e0ab200, 0xb3003af4, 0xb20a02a4, 0x698a7e0a, 0x7e0ab200, + 0xb3003af4, 0xb23e03a4, 0x683e7e0a, 0x27e13e00, 0x7e0ab200, 0xb3003aeb, 0xb22a03a4, 0x686a7e0a, + 0x7e0ab200, 0xb3003af4, 0xb20a03a4, 0x68c87e0a, 0x7e0ab200, 0xb3003af4, 0xb20a02a4, 0x68807e0a, + 0x7e0ab200, 0xb3003af4, 0xb20e04a0, 0x3af47e0a, 0x05a4b300, 0x7e0ab20a, 0x8900689c, 0xbc016900, + 0x00d91019, 0xff008000, 0x42ce4519, 0x001fb800, 0xf9ff0208, 0x0031ce35, 0x9d7e0ab2, 0xa033003a, + 0xf7091000, 0xfd1015f0, 0x363e0429, 0xef090028, 0xfd0825f0, 0x0ab20419, 0x003aa67e, 0x1000a033, + 0x15f0ef09, 0x0429fd01, 0x0028543e, 0x25f0fe09, 0x0419fd10, 0xaf7e0ab2, 0xa033003a, 0xfd091000, + 0xff021ec5, 0x723ef429, 0xfd090028, 0xff022fc5, 0x4ffae419, 0x013efa01, 0x2489f43d, 0x9f3c0003, + 0xf941fb09, 0x7ea1b222, 0xb2006a03, 0x3ae27e1a, 0x03a0b300, 0x3874890f, 0x98913c00, 0x0a019433, + 0x7f7e1ab2, 0x1ab20067, 0x003b1b7e, 0x0a00a033, 0x4e7e1ab2, 0x74890069, 0x913c0038, 0x00903398, + 0x7e1ab20a, 0x8900194d, 0x94010068, 0x00df0f10, 0xbc400000, 0x00d90009, 0xff008000, 0x9ffa9509, + 0xff948f01, 0x040090fe, 0x800000d9, 0x9509ff00, 0xdf000fbc, 0x11000000, 0x89019ffa, 0x3c000324, + 0x24332891, 0x50892901, 0x1ab20100, 0x7e0009bc, 0xd9006502, 0x00800000, 0xce9509ff, 0x94f00099, + 0x0a0bf430, 0x00032889, 0xfb19923c, 0x02af9421, 0xbb05a994, 0x2c8f029f, 0xe4bd0003, 0xbd909fbc, + 0x909fa0f4, 0x999001ee, 0x07e4b304, 0x039c89f8, 0xa99fbc00, 0x0003ac89, 0x89a99fbc, 0x3c0003bc, + 0xc489a99f, 0x9fbc0003, 0x8900f8a9, 0x3c0003bc, 0x260f989a, 0x4f009033, 0x0003ac8e, 0xb0f8eabc, + 0x0cf406f6, 0x01f9901d, 0x94a9e9bc, 0x9abb03a9, 0x909fbc02, 0x00032c8f, 0x3ef8f9bc, 0x940029b6, + 0xaf9402a9, 0x02f9bb05, 0x00032c89, 0xbff0f9bc, 0xbc0109ff, 0xa9b8a9e9, 0xb6000248, 0x9ffa0294, + 0xb2f4bd01, 0xb800f8fa, 0x000248a9, 0xce0294b6, 0x9c8f009d, 0xfabc0003, 0x06e6b0e8, 0x90360cf4, + 0xf9bc01e9, 0x03af94a9, 0xbc02fabb, 0x2c8ef0fe, 0xedbc0003, 0x03c08ff9, 0x0794b300, 0x3c01090b, + 0x00f8a9f9, 0xf93c943d, 0x89f4bda9, 0xbc0003c4, 0x00f8a99f, 0x0003bc89, 0xf8a99b3c, 0x02a99400, + 0xaf9422f9, 0x12f9bc05, 0x00032c89, 0x981019bc, 0xa2b20510, 0x003b517e, 0x0bf40aa6, 0x3ea43d09, + 0x98002a57, 0x2ab20610, 0x003b5a7e, 0x9cf00aa6, 0x0196f00b, 0xfb019ac6, 0x02a99421, 0xaf9422f9, + 0x12f9bc05, 0x00032c89, 0x981019bc, 0xa2b20310, 0x003b3f7e, 0x0bf40aa6, 0x3ea43d09, 0x98002a93, + 0x2ab20410, 0x003b487e, 0x9cf00aa6, 0x0196f00b, 0xfb019ac6, 0xb212f921, 0x2a597ea0, 0xb2a13200, + 0x2a1d7e0a, 0x02099400, 0xbb050f94, 0x2c8902f9, 0xf9bc0003, 0x0ef9bff0, 0x0694b309, 0x00163028, + 0x300bfcf0, 0x9cf000a6, 0x00f0330b, 0x33030e0a, 0x0e120094, 0x00f43301, 0x33020e0c, 0x0e060094, + 0x89eab20f, 0xbc0003c4, 0x11fb099e, 0x9402af94, 0x9fbb05a9, 0x032c8f02, 0x909fbc00, 0x9fa0060f, + 0x01287089, 0xac9432f9, 0xbca1b20f, 0xb4bdc0c9, 0x0d03a994, 0x229abc01, 0x800000d3, 0x032c8000, + 0x95c3ff00, 0xc70099ce, 0x9fc7509e, 0x0ae4b668, 0xb63f94f0, 0x2dbc06f4, 0x05f9fda0, 0xfd01b9c4, + 0xfe9405fe, 0x0094b310, 0xa90fbc0b, 0x002b5c3e, 0x90980abc, 0xe9ff01dd, 0xa909bc95, 0x9001bb90, + 0xb4b340cc, 0x1ab2bf04, 0x003b3f7e, 0x8f021994, 0x9400032c, 0x09bb0510, 0x000fbc02, 0xb2030ab5, + 0x3b487e1a, 0x040ab500, 0x517e1ab2, 0x0ab5003b, 0x7e1ab205, 0x89003b5a, 0xbd00039c, 0x199fbcf4, + 0x0003ac89, 0xbc060ab5, 0x010f199f, 0x0003bc89, 0xfb199f3c, 0x27888931, 0xb299bf00, 0x0094b3af, + 0x3b688925, 0xb399bf00, 0xa6100194, 0x089cf0ab, 0x3e0196f0, 0x3d002bfb, 0xf4fba6a4, 0x010a2b0d, + 0x90b300f8, 0xa43d0801, 0x688900f8, 0x99bf003b, 0x0f0194b3, 0xfba6a43d, 0x320e0cf4, 0xa600f89a, + 0x089cf0ab, 0x00f89a32, 0x0038d089, 0xa4b6e4bd, 0xa0abbc02, 0x89a99ebc, 0xbc002910, 0x3089a99e, + 0x010d003a, 0x89a99dbc, 0x8f003b80, 0xbc00ffff, 0x6c89a99f, 0x9d3c003b, 0x3bd089a9, 0xa99e3c00, + 0x00369089, 0x89a99ebc, 0xbc0037e8, 0x3089a99e, 0x9ebc0038, 0xf900f8a9, 0x02b99402, 0xae94d0b2, + 0x04bd9404, 0xb006af94, 0x1df400c6, 0xf0efbc1d, 0x8e909dbc, 0xbc003550, 0xcfc4909f, 0x909ebc1f, + 0x3e20f5f0, 0xbc002c9d, 0x9dbcf0ef, 0x35508e90, 0x909fbc00, 0xbc01cfb9, 0xf4f0909e, 0x019fb51f, + 0x9404b994, 0xbf9406ad, 0xf0f9bc02, 0xbc04a994, 0xf9bc909d, 0x355089f0, 0x010eb900, 0xf0f0f9bc, + 0x94bd0fe4, 0xb502feb5, 0x01fb03f9, 0xb1b232f9, 0x087ea0b2, 0x2889002c, 0x050d003a, 0x94899da0, + 0x0ab20039, 0x04b6140f, 0x0001bc02, 0x89099fbc, 0xa000382c, 0x3760899d, 0xbcf4bd00, 0xd889099f, + 0xf10e0039, 0x89099ebc, 0x8200278c, 0xbc003710, 0x1bb2099e, 0x003b2883, 0xbc092ebc, 0xecb2093e, + 0x577eedb2, 0x20bc002c, 0x3830bcf8, 0x00387889, 0x890993bc, 0xbc003954, 0x31fb099f, 0x8e20a9c4, + 0xf4003a2c, 0xa9c40a0b, 0x2d503e1f, 0x01a9b900, 0xbfb9e9a0, 0x3c208901, 0xf89fa000, 0x9832f900, + 0xc19802c2, 0xb2b3b201, 0x2c087ea0, 0xb21ab200, 0x2d3c7e2b, 0x3c208900, 0x3a2c8f00, 0xbf9ebf00, + 0x387889ff, 0x0204b600, 0xbc0003bc, 0x5489099f, 0x9ebc0039, 0x9431fb09, 0x9bbc02a9, 0x37a489c0, + 0xf89cbc00, 0x0036d089, 0x89e89cbc, 0x0d003a28, 0x899da001, 0xf4003994, 0x22f9ec30, 0xfa90a1b2, + 0xc99abc02, 0x00371089, 0x9202e090, 0x9ebc02ee, 0x382c89c9, 0x92b2b200, 0x288b02ff, 0x9da0003b, + 0x89c9bfbc, 0xbc003760, 0x1609c990, 0xf416f6b0, 0xeb090c1c, 0x1ff4f9a6, 0xbcea0908, 0xe6b0c9b9, + 0x0f1ef400, 0xbd021994, 0x9092bce4, 0x002e1f3e, 0xe9a6f209, 0x94121ff4, 0xf10e0219, 0x8f9092bc, + 0xbc003710, 0xa6b099fe, 0x0f1df416, 0x0e021994, 0x9092bc16, 0x002e473e, 0xa9a6eb09, 0x94121ff4, + 0xea0e0219, 0x8f9092bc, 0xbc003994, 0x06b099fe, 0x0f1ef400, 0xbd021994, 0x9092bce4, 0x002e6f3e, + 0x09a6f209, 0x94121ff4, 0xf10e0219, 0x8f9092bc, 0xbc003760, 0x1f9499fe, 0x3b288902, 0xf0f2bc00, + 0x89e89fbc, 0xbc003710, 0xd889089f, 0x9ebc0039, 0x278c89f9, 0xbc1ab200, 0xecb2f990, 0x0db22bb2, + 0x002c577e, 0x94042994, 0x2f94061e, 0xf0f9bc02, 0xbc041994, 0xf9bc909e, 0x355089f0, 0xf0f9bc00, + 0x4dfef9bf, 0x0cdd9001, 0xd9a01ab2, 0xb201f998, 0xb5dcb22b, 0xf99801d9, 0x02d9b502, 0xb503f998, + 0xff9803d9, 0x04dfb504, 0x002d5d7e, 0x891425fb, 0xb6003878, 0xabbc02a4, 0xa99cbca0, 0x00395489, + 0xf8a99dbc, 0x02a99400, 0x9bbc42f9, 0x387889f0, 0x289fbc00, 0x0036908e, 0xf40026b0, 0x29c40d1d, + 0x2095f01f, 0x002f2e3e, 0xf00129b9, 0xe9bc1f94, 0x02a994f9, 0x89e09bbc, 0xbc003954, 0x3089489e, + 0xe88f0038, 0x9cbc0037, 0x0149b9e9, 0xbc0f94f0, 0xff89e9f9, 0xc9a600ff, 0x89650bf4, 0xbc003b6c, + 0xd93fd0e9, 0x8fffc0c4, 0x33003b80, 0x3d0f0194, 0xe9f0bc94, 0x853ed920, 0xfebc002f, 0xf409a698, + 0xf0bc060d, 0x02a994e9, 0x003a3083, 0xbc109bbc, 0x0ab2b831, 0x002bb57e, 0x2500a033, 0x2ab20bb2, + 0x0076ce7e, 0x0038d089, 0x9abc0bb2, 0x7e4ab219, 0x890076ce, 0xbc002910, 0x9abc1930, 0x8941fb19, + 0x94003690, 0xfbbc02af, 0xa89fbcf0, 0x0037e889, 0x89e89fbc, 0xbc003830, 0xa4f0989f, 0x0fe4f03f, + 0xb618a4b6, 0x94f110e4, 0xaefdffff, 0x05a9fd05, 0x808900f8, 0xa4b6003b, 0xa0abbc02, 0xf1a89abc, + 0xf8ef00a5, 0x00a6b000, 0x7e091df4, 0xf800768d, 0x08a4b600, 0x00768d7e, 0xf808a7b6, 0x00a6b000, + 0xb6121df4, 0x8d7e08a4, 0xaa900076, 0x08a7b6ff, 0x8d7e00f8, 0x00f80076, 0x7e08a4b6, 0x9000768d, + 0xa7b67faa, 0x8900f808, 0xbf0037a0, 0xb342f999, 0x94230090, 0x308f02a0, 0xd08e003a, 0xa4820038, + 0x10830037, 0xd0840029, 0x94b30036, 0xab3e6002, 0x30890030, 0xa094003a, 0x000bbc02, 0x891890bc, + 0xbc0038d0, 0x1bb2a890, 0x0030057e, 0x0037a489, 0x89099abc, 0xbc002910, 0x1bb2a890, 0x0030057e, + 0x0036d089, 0x3e099abc, 0xbc0030e8, 0xf0bc000b, 0xa8e0bc18, 0x1d7e1bb2, 0x2abc0030, 0xa830bc09, + 0x1d7e1bb2, 0xe53e0030, 0x0bbc0030, 0x18f0bc00, 0xb2a8e0bc, 0x30387e1b, 0x092abc00, 0xb2a830bc, + 0x30387e1b, 0x094abc00, 0x42f941fb, 0xb202e398, 0xb4b2b2a1, 0x40340690, 0x01e0981c, 0x18f4cda6, + 0x7e9cb237, 0xb2002f05, 0x7e3bb20a, 0x89002d3c, 0x8e003c20, 0xbf003a2c, 0x89eebf9d, 0x94003878, + 0xa43d021f, 0xbcf0f2bc, 0x5489f99e, 0x9dbc0039, 0x31683ef9, 0x7e9cb200, 0xb2002f05, 0x7e2bb21a, + 0x0a003047, 0xf44a2601, 0xa48f211b, 0x19940037, 0x9092bc02, 0x8fc8f9bc, 0xbc0036d0, 0x1ab2d8f9, + 0x577e2bb2, 0x4a32002c, 0xa99441fb, 0x8f42f902, 0xbc003bd0, 0x0fbc009b, 0xb2993f90, 0xb2b4b2a3, + 0x009d33c1, 0xd88d00c3, 0x94890039, 0xd0bc0039, 0x9890bce8, 0x1ff4e9a6, 0x278c8926, 0xf890bc00, + 0x00376089, 0xa69890bc, 0x131cf4f9, 0x003a2889, 0xe9bc99bf, 0x09d9bc90, 0x0031e33e, 0x00278c8b, + 0x00382c89, 0xbf023e94, 0xe0e4bc9c, 0x003b288d, 0xbcf8bebc, 0xd889d8de, 0xfcbc0039, 0xe99dbcf0, + 0xb2e9bfbc, 0xb23ab21c, 0x2f057e4b, 0x02399400, 0x0039d880, 0x812094bc, 0xbc00278c, 0x12bcc802, + 0xb23ab2d8, 0x2c577e4b, 0xe802bc00, 0x89d812bc, 0xbc003994, 0x7889f892, 0x9ebc0038, 0x39548929, + 0x299dbc00, 0x1ef4efa6, 0x37608946, 0x9892bc00, 0x1ef4d9a6, 0x3bd0893a, 0x2029bc00, 0x29200109, + 0x00326c3e, 0x002f057e, 0x4bb23ab2, 0x0030477e, 0x0037a489, 0x89c890bc, 0xbc0036d0, 0x3ab2d890, + 0x577e4bb2, 0x010a002c, 0x00326e3e, 0x41fba43d, 0xb604bf94, 0xbfbc02b4, 0x35508fb0, 0x06ae9400, + 0xbc04a4b6, 0xbabca0ae, 0xb0bfbcb0, 0x9ab2bfbf, 0xbf989fa0, 0x019fb501, 0xb502bf98, 0xbf98029f, + 0x039fb503, 0xb504bb98, 0x00f8049b, 0x01738089, 0xbc0fa4b6, 0x00d9a0a9, 0xfd008000, 0xa9ce05a9, + 0xfdfe0f00, 0xbfc4049f, 0x059ffd01, 0x4f01a9fa, 0xa9ce50b7, 0x0199c700, 0xa601ff92, 0x0b9cf09b, + 0x0c00f0b3, 0xee009033, 0x0032f23e, 0x90331b0a, 0xa4bd0600, 0xf08900f8, 0x9abc0004, 0xb3aeb2f8, + 0x0a2b00f0, 0xf4faa601, 0x2089431b, 0x9ebc0004, 0xf0020a98, 0x99920394, 0x0196b002, 0x892e0cf4, + 0xbd002784, 0xe99f3ca4, 0x208900f8, 0x9abc0004, 0x0394f098, 0x0b0190b3, 0x9aa6030a, 0x3d0e1bf4, + 0x278489f4, 0x3ca4bd00, 0x00f8e99f, 0x00042089, 0x89f89abc, 0xbc000500, 0xaeb2d89a, 0xb302f995, + 0xb03001d0, 0x08f401d6, 0xb3010a0d, 0x3e5c03d4, 0xf00033ab, 0x040a0194, 0x8f4f0bf4, 0x3d003950, + 0xe9f93c94, 0x00290c8f, 0x3ee9f93c, 0xf00033a7, 0x050a0294, 0x3d330bf4, 0x39508f94, 0xe9f93c00, + 0x00290c89, 0xbde99d3c, 0xf000f8a4, 0x060a0894, 0x0f170bf4, 0x39508901, 0x3ca4bd00, 0xf43de99f, + 0x00290c89, 0xf8e99f3c, 0x04208900, 0xf89abc00, 0x00051089, 0xb2e89abc, 0x07f995ad, 0x1901e0b3, + 0xf401e6b0, 0x010a2608, 0x2c02e4b3, 0x0a0494f0, 0x33fa3e07, 0x0294f000, 0x90b3080a, 0x50891a00, + 0xa4bd0037, 0xf8d99ebc, 0x89f4bd00, 0xbc003750, 0xa4bda99f, 0x208e00f8, 0xea3c0005, 0x33afb298, + 0xbd080094, 0x8900f8a4, 0xbc000420, 0x0e0a989a, 0xf44094f0, 0x01090a0b, 0xe93ca4bd, 0x8900f8f9, + 0xbc000420, 0x2489e89a, 0x9abc0005, 0xc7090df8, 0xefff4aee, 0xf49fa694, 0x94890c1b, 0xd4bd000a, + 0xb2a99ebc, 0x8f00f8da, 0xbc000554, 0x6489f8fa, 0x9abc0005, 0xb6aeb298, 0xa4bd0cf4, 0xfd1094b6, + 0x848f059f, 0x95f0000a, 0xe9f9bc12, 0x208900f8, 0x9abc0004, 0x053489e8, 0xf89abc00, 0xeec70a0d, + 0x94efffef, 0x1bf49fa6, 0x0aa4890c, 0xbcd4bd00, 0xdab2a99e, 0x208900f8, 0x9abc0004, 0x054489e8, + 0xf89abc00, 0xeec70b0c, 0x94efff97, 0x1bf49fa6, 0x0aa48d1d, 0x98dabc00, 0xdf10e4b6, 0x32000000, + 0x9ffdc4bd, 0x059efd05, 0xb2a9d9bc, 0x8900f8ca, 0xbc0005a4, 0x3089f89a, 0x9abc0004, 0xff0c0d98, + 0x9ec494f9, 0xf4efa601, 0x748f121b, 0xfabc000a, 0xffd4bd98, 0xf9bc95e9, 0xf8dab2a9, 0x05b48900, + 0xe89abc00, 0x00043089, 0x0d989abc, 0x01efc40d, 0xff0295b6, 0x9ea694f9, 0x8e151bf4, 0xbc000a74, + 0x94b6f8ea, 0xfdd4bd02, 0xe9bc059f, 0xf8dab2a9, 0x0a748d00, 0x04308900, 0xe8dabc00, 0x8f989abc, + 0xc7000574, 0xf9bc0899, 0x0894b6a9, 0xbc059efd, 0x00f8a9d9, 0x000a748d, 0x00043089, 0xbce8dabc, + 0x848f989a, 0x99c70005, 0xa9f9bc09, 0xfd0994b6, 0xd9bc059e, 0x8d00f8a9, 0x89000a74, 0xbc000430, + 0x9abce8da, 0x05948f98, 0x0a99c700, 0xb6a9f9bc, 0x9efd0a94, 0xa9d9bc05, 0x148900f8, 0x9abc0006, + 0x8f00f8a8, 0x94017100, 0x648e0fa9, 0x9fbc0009, 0xe8eabc90, 0x800000df, 0xf59fff00, 0xdf01fefa, + 0x00800000, 0x8e049990, 0xff000934, 0xeabcf59f, 0x01fefae8, 0x800000df, 0x0c999000, 0x0009448e, + 0xbcf59fff, 0xfefae8ea, 0x0000df01, 0x99920080, 0x09948e04, 0xf59fff00, 0xfae8eabc, 0x00df01fe, + 0x92008000, 0x9ffd0499, 0x09848f05, 0xf8fabc00, 0xf8019ffa, 0x06048900, 0xf89abc00, 0x0005f489, + 0x89c89abc, 0xbc0005c4, 0xd489e89a, 0x9abc0005, 0x05e489d8, 0x989abc00, 0xf001c4f0, 0xe4f001f4, + 0x0194f001, 0xb60a94b6, 0xf4b608e4, 0x01d4f002, 0xb605fcfd, 0xfefd09d4, 0x05fdfd05, 0x8905f9fd, + 0xb601768c, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x01affa05, 0x248f00f8, 0xfabc0006, 0x06548fb8, + 0xe8fabc00, 0x0008748f, 0x8fd8fabc, 0xbc0008c4, 0xd48fc8fa, 0xfabc0008, 0x064489f8, 0x989abc00, + 0xf003b4f0, 0xf4b61ff4, 0x0f94f017, 0xb607e4f0, 0xe4b60294, 0x059bfd07, 0xfd07d4f0, 0xd4b6059e, + 0xffc4f00a, 0xb6059dfd, 0x9cfd0fc4, 0x059ffd05, 0x0176848f, 0xbc0fa4b6, 0x00dfa0af, 0xfd008000, + 0xa9fa05af, 0x8900f801, 0xb6010004, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x00aace05, 0xc7080049, + 0xa0b35caa, 0xa6b04202, 0x140cf402, 0xa0b32009, 0x00493600, 0x01a4b301, 0x37643e2d, 0x04a0b300, + 0x40004915, 0xf404a6b0, 0xa4b31d08, 0x5a3e1805, 0x00890037, 0x643e0200, 0x00890037, 0x643e1000, + 0x94bd0037, 0x00f89ab2, 0x1489010f, 0x9fbc000a, 0xf8a4bda9, 0xb662f900, 0x60890fa4, 0xc2320128, + 0xbd00a9bc, 0x109990c4, 0x800000d6, 0xa0a9bc00, 0xff44c005, 0xffff83f0, 0x01bf98f8, 0x9802bd98, + 0x243303b1, 0xa6ff0b00, 0x37b03ee5, 0xe506ff00, 0xf000e9ce, 0x95fd3ff4, 0x059ffd04, 0xb60fdfc4, + 0x94fd08f4, 0x059ffd04, 0xfd071fc4, 0xf4b60493, 0x059ffd10, 0x9001e9fa, 0xaa9001cc, 0x14bb9040, + 0xb3400090, 0xfbb604c4, 0x02af9461, 0x003a988e, 0xbcf0fbbc, 0x52f9d8ef, 0x001a588e, 0xdf94a2b2, + 0x899ab204, 0xb2003950, 0x58923cb4, 0xd99434bd, 0xbc14bd02, 0x04bd909f, 0xbdf09ebc, 0x17d88eb4, + 0xbc030c00, 0x5b3e909e, 0x54330038, 0x93981600, 0x03919804, 0x98029098, 0x9cbf019b, 0x00384e3e, + 0xf398fcbf, 0x03f19804, 0x9802f098, 0xdd9001fb, 0x14999001, 0xb314ff90, 0x0e1100c0, 0x00503320, + 0xa6010e06, 0xc508f4de, 0x8f022994, 0xbc003a98, 0xfdbc9094, 0x04a3b599, 0xb503a1b5, 0xabb502a0, + 0xfbaca001, 0x00188f51, 0x0fa99401, 0xdf909fbc, 0x00800000, 0xcee59fff, 0x848900ef, 0x9a3c0027, + 0x01943398, 0x0000d910, 0xf9ff4000, 0x38b93e95, 0xffffd900, 0xf9ffbfff, 0x01e9fa94, 0x02f900f8, + 0x800000df, 0x0fa99400, 0x0100188e, 0xff909ebc, 0x0cce059f, 0xf899b800, 0x9fff002d, 0x00beceb5, + 0x0ffc99b8, 0xd59fff00, 0x8900dfce, 0x3c003950, 0x9433989a, 0x00d91601, 0xf0100000, 0xf5f010e5, + 0x95c9ff10, 0x0039183e, 0xf9fdef09, 0x04e9fd04, 0xffffffd9, 0x94c9ffef, 0xfa0109fa, 0xdffa01be, + 0x8f01fb01, 0x94010018, 0x9fbc0fa9, 0x0000df90, 0x9fff0080, 0x00efcee5, 0x00290c89, 0x33989a3c, + 0x09100194, 0x94f9ffe0, 0x3e0895f0, 0x09003954, 0x94f9ffe0, 0xf801e9fa, 0x0fae9400, 0x01001889, + 0x800000dd, 0xe0e9bc00, 0xced5edff, 0x508900df, 0x9abc0037, 0xffffd9c8, 0xf9fdf3ff, 0x03c4f004, + 0xfd1ac994, 0xdffa05f9, 0x0000d901, 0xeeb80080, 0xfd002fe8, 0xe9ce05e9, 0xffffdf00, 0xc4b6ff3f, + 0x049ffd16, 0xfa059cfd, 0x00f801e9, 0x1489f4bd, 0x9fbc000a, 0xbd00f8a9, 0x0a2489f4, 0xa99fbc00, + 0xd88900f8, 0x22f9003a, 0xbc02ac94, 0xbc8920c9, 0xe0810038, 0x030f003b, 0x68899fa0, 0xae94003b, + 0xbc010d04, 0x9da000e1, 0x003a9881, 0x00278889, 0xbdb0e1bc, 0x391081f4, 0x899fa000, 0xa00037a0, + 0x3ae8899d, 0xd0e9bc00, 0x81e0e1bc, 0xbd003a88, 0xc0c1bc94, 0xbfa0ff01, 0xefa0dfa0, 0x3c99cf3c, + 0x01a0992f, 0x90019990, 0xdd9004bb, 0x04ee9004, 0xb3040090, 0x89e30494, 0x3c003a80, 0x8489a99f, + 0x9f3c0027, 0x03d489a9, 0xa99f3c00, 0x0003d889, 0x89a99f3c, 0x3c0003dc, 0xe089a99f, 0x9f3c0003, + 0x03e489a9, 0xa99f3c00, 0x000a1489, 0x89a99fbc, 0xbc000a24, 0x3489a99f, 0x9fbc000a, 0x38b889a9, + 0xa99f3c00, 0x0037e489, 0x89a99f3c, 0x3c0039d4, 0x21fba99f, 0x0003d489, 0xf8a89a3c, 0x03dc8900, + 0xa89a3c00, 0xe08900f8, 0x9a3c0003, 0x8900f8a8, 0x3c0003e4, 0x00f8a89a, 0x00052089, 0x30989a3c, + 0xacf00096, 0x8900f80b, 0x3c002784, 0x00f8a89a, 0x0004e089, 0xf8a89abc, 0x04d08900, 0xa89abc00, + 0xc08900f8, 0x9abc0004, 0x8900f8a8, 0xbc000480, 0x00f8a89a, 0x00093489, 0xf8a89abc, 0x03ec8900, + 0xf89abf00, 0xbeefdf00, 0xec89dead, 0x9fa00003, 0x008900f8, 0x9abc0005, 0x8900f8a8, 0xbc003750, + 0x96b0989a, 0x0bacf000, 0xf801a6f0, 0x0ba48900, 0xa89a3c00, 0xa88900f8, 0x9a3c000b, 0x8900f8a8, + 0xbc000440, 0x00f8a89a, 0x00045089, 0xf8a89abc, 0x04608900, 0xa89abc00, 0x708900f8, 0x9abc0004, + 0x8900f8a8, 0xbc000614, 0xb4b3a99b, 0xbdb21109, 0x020bc4bd, 0x3e0dac4e, 0xb2003b84, 0x0bc4bdbd, + 0x09c44e02, 0x0005e67e, 0xafb200f8, 0x5933b0b3, 0xf433b6b0, 0xb0b3130c, 0xb0b32430, 0xb4b31831, + 0xc93e6e0c, 0xb0b3003b, 0xb4b35634, 0xd83e6236, 0x4489003b, 0xdc3e000a, 0x4089003b, 0x9abc0004, + 0x045089c8, 0x3bf03e00, 0x0a648900, 0xc89abc00, 0x000a5489, 0x003bf03e, 0x000ae489, 0xbdd89fbc, + 0x3bf33ec4, 0x0ab48900, 0xc89abc00, 0x000ac489, 0x4ed89fbc, 0xe67e09c4, 0x00f80005, 0x000ad489, + 0xbdd89abc, 0x09c44ec4, 0x0005e67e, 0xe48900f8, 0x9abc0006, 0xb262f9f8, 0x00f0b3a3, 0xa6010a10, + 0x8b1bf5fa, 0x3c503e00, 0x39508900, 0x989a3c00, 0x14019033, 0x001acb7e, 0x7fd9010f, 0xa60206cc, + 0x050cf4a9, 0x0802f4bd, 0x1d00f0b3, 0x003c633e, 0x00395089, 0x3398933c, 0x020c0194, 0x3ef4bd08, + 0x02003c65, 0x01f9c404, 0x940f3094, 0x14bd1794, 0x01287089, 0x800000d6, 0x0009bc00, 0x7fffffd5, + 0xf506ffff, 0xfd00f9ce, 0x94fd0495, 0x01f9fa05, 0x21bc060b, 0x400090d4, 0xb2081190, 0x4ec4bd3a, + 0xe67e09c4, 0x14b30005, 0xa4bddb20, 0x30f461fb, 0xb282f9c4, 0xf830f4a2, 0xa701b9b3, 0x01b6b000, + 0xb31108f4, 0x02f002b9, 0xcd04bdb3, 0x3d5f3e03, 0x38b88900, 0x3c010f00, 0xa994a99f, 0x04ae9406, + 0x8930e9bc, 0xb20027cc, 0x3039bce5, 0x0039208f, 0x00391089, 0xd0b2c6b2, 0xbc505fbc, 0x14bd40e9, + 0xe8870708, 0x508e0003, 0xe23c0039, 0x01943398, 0xb501a016, 0x08b50101, 0x0301b502, 0x3e0401b5, + 0x0f003d35, 0xb5010904, 0x0fb50101, 0x0301b502, 0xa00409b5, 0x98723c01, 0x0e019433, 0x3bb20ab2, + 0xe87e140c, 0x6ebf0076, 0x90143390, 0x00900466, 0x904ea014, 0x45a60444, 0x3eae1bf4, 0x890040a3, + 0x3c003950, 0x200f9892, 0x330ef1b0, 0x09090090, 0x0e91b001, 0x8f042e94, 0xbc003a98, 0xc089f0ef, + 0xf1b00038, 0x022f9413, 0xb090f9bc, 0x22bc1191, 0x9092bc90, 0xb00694b6, 0x88890d91, 0xf9bc003a, + 0x3b808980, 0x90e9bc00, 0x890f91b0, 0xbc003910, 0x44fe90e9, 0x1091b001, 0xd889d1b2, 0xc1b0003a, + 0x70f9bc12, 0xe88f34bd, 0x4490003a, 0x60efbc54, 0x9d33893f, 0xb2009500, 0xb22ab249, 0x37e97e3b, + 0xb449bf00, 0x3bb20ed0, 0x19a02ab2, 0xb5014e98, 0x4f98011e, 0xb54eb202, 0x4998021f, 0x0319b503, + 0xbf044f98, 0x041fb519, 0xf0b449a0, 0x01199813, 0x49b5fcbf, 0x021f9801, 0x98024fb5, 0x49b50319, + 0x1290b403, 0xb5041f98, 0x99bf044f, 0x003a808f, 0x3c0091b0, 0xf130f8f2, 0x30ea7e04, 0xbf3bb200, + 0xb28a2060, 0x2fbf7e2a, 0x0de0b400, 0x8f010990, 0xa0002950, 0x9033bc69, 0xb69093bc, 0xe9bc0494, + 0x9090bc90, 0x3f99fabc, 0x015d3385, 0x80890105, 0x923c003a, 0x000d3308, 0xe0b400be, 0x33e93f11, + 0xb23b0194, 0x7e3bb22a, 0xb4002d97, 0x49b211f0, 0x3bb22ab2, 0x707ef020, 0x49bf0032, 0x4e9819a0, + 0x011eb501, 0xb5024f98, 0x4998021f, 0x0319b503, 0xb5044e98, 0x1d3e041e, 0x793f003f, 0x61009433, + 0xb212f0b4, 0xbf2ab23b, 0x316a7efc, 0x2049b200, 0xb23bb27a, 0x32707e2a, 0xb249bf00, 0xa03bb22a, + 0x014e9819, 0x98011eb5, 0x1fb5024f, 0x03499802, 0x980319b5, 0x1eb5044e, 0x7e60bf04, 0xbc002fbf, + 0x508e9033, 0x0f900029, 0xb46fa001, 0x93bc0df0, 0x0494b690, 0xbc90f9bc, 0xeabc9090, 0x337f3f99, + 0xb44f01f4, 0xe9bf0fe0, 0x0038b88e, 0x3e29ef3c, 0xb2003f65, 0xb22ab249, 0x32707e3b, 0xa049bf00, + 0x014e9819, 0x98011eb5, 0x1fb5024f, 0x03499802, 0xb50ff0b4, 0x4e980319, 0x041eb504, 0xb88ef9bf, + 0xe53c0038, 0x10f0b429, 0xef0095f1, 0x90b4f9a0, 0x10e0b40f, 0x9011f0b4, 0x99900133, 0x04ee9004, + 0xb00f91b0, 0x90b410e1, 0x13e0b412, 0xb001ff90, 0x999011f1, 0x04ee9004, 0xb01291b0, 0x889013e1, + 0x14119001, 0x90017790, 0x3db30466, 0x3efe2604, 0x8f0040a3, 0x94003910, 0xae9404a9, 0x809fbc02, + 0x003a888f, 0xb0f0efbc, 0xaabc0bf1, 0xf0fabcf0, 0xb006f4b6, 0xd88f0cf1, 0x47fe003a, 0x50efbc01, + 0x8e14c1b0, 0x8f003ae8, 0xb2003b80, 0x409ebcd3, 0xbd609fbc, 0x54779014, 0xb21490b4, 0xbf2ab21b, + 0x0499909c, 0x7e1491b0, 0xb200316a, 0xb25a201b, 0x7e2ab279, 0xbf003270, 0xb21bb27e, 0x983ea02a, + 0x3fb5017f, 0x02799801, 0x980239b5, 0x3eb5037e, 0x047f9803, 0xbf043fb5, 0x2fbf7e40, 0xbc5e3f00, + 0x0f909011, 0xb44fa001, 0x91bc0cf0, 0x0494b690, 0xbc143390, 0x508f90f9, 0x90bc0029, 0x99fabc90, + 0x1d01e433, 0x3c0bf0b4, 0x943398f1, 0x69bf1300, 0x0038b88f, 0xf129fe3c, 0xa0ef0095, 0x01119089, + 0x90015590, 0x66900444, 0x04889004, 0x6c041db3, 0x40a33eff, 0x0c190b00, 0x02617e0f, 0x3e0f0a00, + 0xbd0040a5, 0x0830f4a4, 0xf93c85fb, 0xb2b0b252, 0xb2d2b2c1, 0x7ea5b2e3, 0xb2003707, 0x00a4b3a4, + 0x0b5ab214, 0x7e150c19, 0x0a000261, 0x41173e15, 0xb21db200, 0xb23bb20c, 0x71677e2a, 0x03e84c00, + 0x727ed4bd, 0x4c920072, 0x7ed4bd00, 0x92007272, 0xbf9301ae, 0x00f4b300, 0xfde6b10b, 0x130df407, + 0x190b5ab2, 0x617e140c, 0x140a0002, 0x0041173e, 0xa00790b4, 0xfba4bd9a, 0xb2efb251, 0xbd22f9de, + 0xb2b0b2d4, 0x4ca2b2c1, 0xeab203e8, 0x677efbb2, 0xe0b40071, 0xb21db204, 0xbbf4bd0c, 0xbfbb02ae, + 0x71677e03, 0x42408c00, 0x7ed4bd0f, 0xb4007272, 0xf4bd05e0, 0xbb02aebb, 0xae9203bf, 0x00bf9301, + 0x1500f4b3, 0xe9a6fe09, 0xb20d0cf4, 0x0daeb2bf, 0x41883e01, 0x0b2ab200, 0x7e130c19, 0x0a000261, + 0x41aa3e13, 0x01dd9000, 0x9d01ff95, 0xefff01ee, 0xf41bf495, 0xb406c0b4, 0xdcbc0790, 0x7e9ca0c2, + 0xb400721e, 0x9aa00890, 0x21fba4bd, 0xa99402f9, 0xbcd4bd03, 0x010ec29a, 0x00032c80, 0xc4f0cebc, + 0x1bf401d9, 0xf80fbc0a, 0x0041d53e, 0x90980fbc, 0x9f9501ee, 0x4af9c710, 0xc403b9b5, 0xb9b53ff9, + 0x66f9c701, 0x9002b9b5, 0xbb9001dd, 0x04d4b314, 0x7eb43dcf, 0xfb002a14, 0xec30f401, 0xc0b282f9, + 0xb9b3a3b2, 0xb000a401, 0x08f401b6, 0x02b9b311, 0xbdb3013d, 0x3e01a804, 0x890042a6, 0x3c0003e8, + 0x9433989a, 0xcbb20a01, 0x0041ac7e, 0x90063994, 0x3e94013f, 0x06fd9404, 0xb620e9bc, 0xcc8904f4, + 0xfdbc0027, 0x2029bc40, 0x884049bc, 0xbd003950, 0x06040714, 0x8e070501, 0x3c0003e8, 0x943398e3, + 0x2ab21201, 0x140c0bb2, 0x0076e87e, 0x0042973e, 0x3398833c, 0xa0160194, 0x0101b501, 0xb50205b5, + 0x01b50301, 0x42973e04, 0xb501a000, 0x07b50101, 0x0301b502, 0x900406b5, 0x22901400, 0xf424a614, + 0xc83eb81b, 0xc0890043, 0xe88e0038, 0x3f94003a, 0x80f9bc02, 0xbc043994, 0x988e509e, 0x42fe003a, + 0x409ebc01, 0x888914bd, 0xd88e003a, 0xf9bc003a, 0x60febc70, 0x3d242290, 0x0e94bdf4, 0x197f3c01, + 0xa0196f3c, 0x3c59a049, 0x1bb2198e, 0x3ab229b2, 0x0037e97e, 0x1bb22fbf, 0x2cb23ab2, 0x29980fa0, + 0x04449001, 0x90045590, 0x09b50111, 0x022e9801, 0x98020eb5, 0x0ebf032f, 0x98030fb5, 0x09b50429, + 0x982ea004, 0x2fb5010f, 0x02099801, 0x980229b5, 0x2eb5030e, 0x040f9803, 0xb5140090, 0x5d7e042f, + 0x14b3002d, 0xc83e9504, 0xd88e0043, 0xa994003a, 0x04a29402, 0x8f609ebc, 0x8e003ae8, 0xfe003a88, + 0xc1b20144, 0xbd202fbc, 0x509ebc04, 0x84bd743d, 0xb2244490, 0x09573c0b, 0xa009673c, 0x7e3ab228, + 0xb2002ccc, 0xb20bb249, 0x32707e3a, 0x904fbf00, 0x22900100, 0x981fa004, 0x19b50149, 0x024e9801, + 0x98021eb5, 0x1fb5034f, 0x04499803, 0x900419b5, 0x04b31411, 0xc83ec104, 0x190b0043, 0x617e0f0c, + 0x0f0a0002, 0x0043ca3e, 0x85fba4bd, 0x3d02f914, 0x37e48f94, 0x0fab9400, 0xb2a9f93c, 0x39d48fa0, + 0x3cd4bd00, 0xffdca9f9, 0x89f7ffff, 0xda012864, 0x00800000, 0xffe0b9bc, 0xf9cef5ea, 0x049cfd00, + 0x9001f9fa, 0xee9001dd, 0x04d4b340, 0x309c89ee, 0x0000df01, 0xb9bc0080, 0xf5bfffb0, 0xde00f9ce, + 0xbfffffff, 0xfa049efd, 0x00df01f9, 0xb8008000, 0x023098bb, 0xcef5bfff, 0x004e00f9, 0x049efdf8, + 0xdc01f9fa, 0x00800000, 0x30a4bbb8, 0xc5bcff00, 0x8f00c9ce, 0xbc000bac, 0xbc8fe8f0, 0xf0bc000b, + 0x1fff8fd8, 0x1fe4f0fc, 0xb6049ffd, 0x034f0de4, 0x059efde0, 0x07ffd4f1, 0xb6049ffd, 0x9dfd02d4, + 0x01c9fa05, 0x800000d9, 0x88beb800, 0xe9fd0210, 0x00e9ce05, 0xffffffdf, 0x049ffd07, 0x000000df, + 0x059ffd40, 0xb201e9fa, 0x2aec7e0a, 0x3bc08f00, 0xf8f0bc00, 0x000a2489, 0x170b0ab2, 0x0c099fbc, + 0x02617e01, 0xfba4bd00, 0xb222f901, 0xb2a1b2d2, 0x09c44cc0, 0x0005947e, 0x547e1ab2, 0x0aa00006, + 0x6b7e1ab2, 0x2aa00006, 0x22f921fb, 0xd2b2c0b2, 0x0b7ea1b2, 0x1ab20005, 0x0005667e, 0x20b30aa0, + 0x1ab20c00, 0x00057d7e, 0x21fb2aa0, 0xa0b202f9, 0x0a31b9b3, 0x31b6b002, 0xb33d0cf4, 0x00fc24b9, + 0xf424b6b0, 0xb0b3240c, 0xb6b05b21, 0x110cf421, 0x8b0cb9b3, 0x20bdb301, 0x683e02ea, 0xbdb30045, + 0x3e02e123, 0xb30045c1, 0x011126b9, 0xd330bdb3, 0x46a23e02, 0x34b9b300, 0xb9b3025b, 0xb3029a36, + 0x02c033bd, 0x00475a3e, 0x00046089, 0xbc02ac94, 0x7089d0c9, 0xc9bc0004, 0x44c97ec0, 0x481f3e00, + 0x09248900, 0x02ac9400, 0xc9bcd4bd, 0x44c97ec0, 0x09248900, 0xf890bc00, 0x00062489, 0xbc03fec4, + 0x3489099e, 0xfec70006, 0x099ebc64, 0x00064489, 0xc750fdc7, 0x9fbc68ff, 0x06548909, 0x47ed3e00, + 0x07048900, 0x02ac9400, 0x89d0c9bc, 0xbc0006f4, 0xc97ec0c9, 0xf4890044, 0x90bc0006, 0x070489f8, + 0xd890bc00, 0x00071489, 0xbc0ffec4, 0x2489099e, 0xfec70007, 0x099ebc64, 0x00085489, 0xc770fcc7, + 0x9fbc6cff, 0x08648909, 0xffd4f000, 0x89099cbc, 0x3e000874, 0x890047ed, 0x940008a4, 0xd4bd02ac, + 0x7ec0c9bc, 0x890044c9, 0xbc0008a4, 0xc489f890, 0xfee40008, 0x9ebcffff, 0x08d48909, 0xf0fec700, + 0x89099ebc, 0x950008e4, 0xffc71cfd, 0x099fbc78, 0x0008f489, 0x0047ed3e, 0x0008b489, 0xbd02ac94, + 0xc0c9bcd4, 0x0044c97e, 0x0008b489, 0x89f890bc, 0xc70005c4, 0x9ebc08fe, 0x05d48909, 0x09fec700, + 0x89099ebc, 0xc70005e4, 0x9ebc0afe, 0x05f48909, 0x02fdc700, 0xbc01f4f0, 0x0489099f, 0xed3e0006, + 0x94890047, 0xac940009, 0xd0c9bc02, 0x00098489, 0x7ec0c9bc, 0x3e0044ea, 0x8900481f, 0x94000914, + 0xc9bc02ac, 0x090489d0, 0xc0c9bc00, 0x0044ea7e, 0x00090489, 0x89f890bc, 0xbc000914, 0x3489d890, + 0xfec40009, 0x099ebcff, 0x00094489, 0xbce8fec7, 0x5489099e, 0xfc950009, 0xf0ffc718, 0x89099fbc, + 0x95000964, 0xd4b61adb, 0x05dcfd08, 0x89099dbc, 0xbc000974, 0x1f3e099b, 0xc4890048, 0xac940009, + 0xbcd4bd02, 0xea7ec0c9, 0xc4890044, 0x90bc0009, 0x066489f8, 0x01fec400, 0x89099ebc, 0xc7000674, + 0x9ebc24fe, 0x06848909, 0x70fdc700, 0xbc68ffc7, 0x9489099f, 0xed3e0006, 0xb4890047, 0xac940009, + 0xd0c9bc02, 0x0009a489, 0x7ec0c9bc, 0x890044ea, 0xbc0009a4, 0xb489f890, 0x90bc0009, 0x073489d8, + 0x0ffec400, 0x89099ebc, 0xc7000744, 0x9ebc64fe, 0x07548909, 0x70fcc700, 0xbc6cffc7, 0x6489099f, + 0xd4f00007, 0x099cbc07, 0x00088489, 0x0047ed3e, 0x0009d489, 0xbd02ac94, 0xc0c9bcd4, 0x0044ea7e, + 0x0009d489, 0x89f890bc, 0xe40006a4, 0xbcfffffe, 0xb489099e, 0xfec70006, 0x099ebc50, 0x00077489, + 0xc71cfd95, 0x9fbc78ff, 0x07848909, 0x099dbc00, 0x00481f3e, 0x0009e489, 0xbd02ac94, 0xc0c9bcd4, + 0x0044ea7e, 0x0009e489, 0x89f890bc, 0xc7000a04, 0xf4f002fe, 0x099ebc01, 0x0009f489, 0xfb099fbc, + 0xac30f401, 0xa1b232f9, 0x0018e57e, 0x0037e48f, 0x3398f13c, 0x00bf009d, 0xf93c0109, 0xf5a92619, + 0xb200861b, 0x65ef7e1a, 0x0f109400, 0x01287089, 0x09bc24bd, 0x0000d300, 0x03ff0080, 0x0099ce95, + 0x93fd040d, 0x40009004, 0x060094b3, 0xd2bb080d, 0x901ab204, 0x060b0822, 0xc44ec4bd, 0x05e67e09, + 0x2024b300, 0xbd020bd9, 0xfe1ab2d4, 0x00900140, 0x7e0cb260, 0x4b0044ea, 0x1ab2030a, 0x003b637e, + 0x010f0bbf, 0x0038b889, 0x0f199f3c, 0x3bc08901, 0x199fbc00, 0x030abeb3, 0xbd1ab23e, 0x48f77ec4, + 0x48f43e00, 0x06d48900, 0xb891bc00, 0xb20140fe, 0x1000901a, 0xf97e0cb2, 0x1ab20041, 0xc43d0bb2, + 0x0037757e, 0x0b0b1ab2, 0x248d010c, 0x087e00f4, 0xa4bd0017, 0xb35435fb, 0x5b010aba, 0x010ab6b1, + 0xb3140cf4, 0xb3490ab0, 0x280109ba, 0x1709b4b3, 0x0049273e, 0x0309bab3, 0x0abab32b, 0xbab35803, + 0x0a190209, 0xb200f801, 0x52847ecb, 0xb200f800, 0x4eb37ecb, 0xb200f800, 0x49b67ecb, 0x0f00f800, + 0x0a148901, 0xa99fbc00, 0x00495d3e, 0x0048217e, 0xd48900f8, 0x9a3c0039, 0x00903398, 0xf8a4bd08, + 0x3be08900, 0x04ab9400, 0x7eb0b9bc, 0xf8005419, 0x43cd7e00, 0xf400f800, 0x22f9fc30, 0xc1b2b0b2, + 0xcbb2a2b2, 0x003b637e, 0x290000b3, 0x40fe020b, 0x902ab201, 0xd4bd0c00, 0xea7e0cb2, 0x0bbf0044, + 0x1bf4b1a6, 0xbd2ab20f, 0x48f77ec4, 0x49b33e00, 0xfba4bd00, 0x300b0425, 0xf9e830f4, 0xf4a0b252, + 0x0c7eec30, 0x0ab20045, 0x0c7e0c0b, 0x0ab20045, 0x0035c37e, 0x00093489, 0x929890bc, 0x96b00499, + 0x090cf401, 0xa67e0ab2, 0x0ab20069, 0x0c7e310b, 0x74890045, 0x90bc0006, 0x04c089f8, 0x9890bc00, + 0x0bf4f9a6, 0x0b0ab213, 0x7e080c19, 0x0a000261, 0x4ead3e08, 0x06848900, 0xe890bc00, 0x00050089, + 0xa69890bc, 0x130bf4e9, 0x190b0ab2, 0x617e090c, 0x090a0002, 0x004ead3e, 0x00069489, 0x89f890bc, + 0xbc000510, 0xf9a69890, 0xb2130bf4, 0x0c190b0a, 0x02617e0a, 0x3e0a0a00, 0xb3004ead, 0xb31800f0, + 0xb21401e4, 0x0c190b0a, 0x02617e0b, 0x3e0b0a00, 0xb2004ead, 0x7e360b0a, 0x8900450c, 0xbc0009f4, + 0x94b39890, 0x010f0d00, 0x0003d489, 0x89099f3c, 0xbc000a04, 0x94b39890, 0x010f0d00, 0x0003d889, + 0xb2099f3c, 0x36357e0a, 0x05c48900, 0x05748f00, 0x9890bc00, 0xfdf8f0bc, 0x1bf4049f, 0x89010f0c, + 0x3c0003dc, 0xd489099f, 0x848f0005, 0x90bc0005, 0xf8f0bc98, 0xf4049ffd, 0x010f0c1b, 0x0003e089, + 0x89099f3c, 0x8f0005e4, 0xbc000594, 0xf0bc9890, 0x049ffdf8, 0x0f0c1bf4, 0x03e48901, 0x099f3c00, + 0x330b0ab2, 0x00450c7e, 0x000a9489, 0x89f890bc, 0xbc000884, 0x0109e890, 0xbbfff4f0, 0xf9fd049e, + 0x131bf404, 0x190b0ab2, 0x617e0d0c, 0x0d0a0002, 0x004ead3e, 0x01309c89, 0x8f0f0194, 0xbcfecf64, + 0x00d91019, 0xff008000, 0x1fbc4519, 0x004fce10, 0x000af489, 0xd9e890bc, 0x7fffffff, 0xb004f9fd, + 0x9cf000e6, 0x1f94b60b, 0xfa05f9fd, 0x3489014f, 0x90bc0007, 0x074489e8, 0xf890bc00, 0x00079489, + 0x90d890bc, 0xffbc01ee, 0x909fbc90, 0xbb039992, 0x348904e9, 0x9ebc0008, 0xf4eda609, 0x0ab21318, + 0x100c190b, 0x0002617e, 0xad3e100a, 0x5489004e, 0x90bc0007, 0x076489e8, 0xf890bc00, 0x0007c489, + 0x90d890bc, 0xff9001ee, 0x08448201, 0x90ffbc00, 0xbb909fbc, 0x2ebc04e9, 0xf4eda609, 0x0ab21318, + 0x110c190b, 0x0002617e, 0xad3e110a, 0x0ab2004e, 0x001acb7e, 0xbdd820bc, 0x0191b094, 0x090091b0, + 0x05a5950b, 0xfe0143fe, 0x33900142, 0x34229038, 0xb00291b0, 0x21b00331, 0xb20ab204, 0xbdc4bd5b, + 0x41197ee4, 0x00adb300, 0x34890288, 0xcc8f0120, 0x19bcfedf, 0x0000dc10, 0x1cff0080, 0x101fbcc5, + 0xbf00c9ce, 0xb82dbf3e, 0x002833ff, 0xb6049ffd, 0xd4f10be4, 0xe4f107ff, 0x004fffff, 0x059efdf8, + 0xfd049ffd, 0xc9fa059d, 0x08448901, 0xd890bc00, 0x91b094bd, 0x00c84901, 0x090091b0, 0x0143fe0b, + 0x900142fe, 0x22903033, 0x0291b02c, 0xb00331b0, 0x0ab20421, 0xc4bd5bb2, 0x197ee4bd, 0xadb30041, + 0xce020f00, 0x3ebf0049, 0xff8f2dbf, 0x9ffdff07, 0x0be4b604, 0x07ffd4f1, 0xffffe4f1, 0xfdf8004f, + 0x9ffd059e, 0x059dfd04, 0x890149fa, 0xd40130ac, 0x00800000, 0x8f1019bc, 0xfffecf54, 0x1fbc9514, + 0x0099ce10, 0x0008448f, 0xc7d8f0bc, 0x99e78d9f, 0x9fbb0142, 0x0191b004, 0xb000c849, 0x0b090091, + 0xfe0143fe, 0x33900142, 0x3c229040, 0xb00291b0, 0x21b00331, 0xb20ab204, 0xbdc4bd5b, 0x41197ee4, + 0x00adb300, 0xa889018c, 0x588f0130, 0x19bcfecf, 0xa514ff10, 0xce101fbc, 0x3ebf00af, 0xac892dbf, + 0xfcc7000b, 0x099cbc8d, 0x000bbc89, 0x0142fbe7, 0x89099bbc, 0xf0fc1fff, 0xf9fd1fe4, 0x0de4b604, + 0xfde00349, 0xd4f105fe, 0xf9fd07ff, 0x02d4b604, 0xfa05fdfd, 0x340b01af, 0x0c7e0ab2, 0x74890045, + 0x90bc0007, 0x078489e8, 0x9890bc00, 0x0007f48f, 0x90f8f0bc, 0x999001ee, 0x0294b601, 0x89d4e9bc, + 0xbc000814, 0xdfa6099d, 0xb21318f4, 0x0c190b0a, 0x02617e12, 0x3e120a00, 0x94004ead, 0x248f0202, + 0x2fbc0008, 0xb25bb290, 0x0091b00a, 0xe4bdc4bd, 0x0040ab7e, 0xd900adb3, 0x00048900, 0x1019bc01, + 0x800000d9, 0xa519ff00, 0x8900aece, 0xbc000824, 0xf0b3f890, 0xf4f11500, 0x004907ff, 0x94e9fff8, + 0x3e059ffd, 0xd9004e0f, 0x80000000, 0xfa95e9ff, 0xa48901a9, 0x90bc0006, 0x0aa489e8, 0xf890bc00, + 0x9ebb0109, 0xfff4f104, 0x04f9fdff, 0xb2131bf4, 0x0c190b0a, 0x02617e06, 0x3e060a00, 0x89004ead, + 0xbc0006d4, 0xe4b3099e, 0x80890b01, 0x9e3c003a, 0x06b48909, 0xd890bc00, 0x000aa489, 0x8f9890bc, + 0xde010000, 0x00ff0000, 0xfd04fdbb, 0x9ffd049e, 0x131bf404, 0x190b0ab2, 0x617e070c, 0x070a0002, + 0x004ead3e, 0x0006e489, 0x9dbc0ab2, 0x36967e09, 0x06c48f00, 0xbc0ab200, 0x030bc02f, 0xea7ed4bd, + 0x0ab20044, 0x094c010b, 0x49777e03, 0x1430f400, 0x891855fb, 0xbc0004c0, 0xf089f89a, 0x9abc0004, + 0x050089d8, 0xe89abc00, 0x00051089, 0xf0989abc, 0xf4f001d4, 0x0fe4f003, 0xb60f94f0, 0xe4b61094, + 0x04f4b608, 0xfd05fdfd, 0xf9fd05fe, 0x0a448905, 0xbc22f900, 0xa0b2a99f, 0x310bb2b2, 0x003b8a7e, + 0x300b0ab2, 0x003b8a7e, 0x00049089, 0x89d890bc, 0xbc0004b0, 0xa089e890, 0x90bc0004, 0x048089f8, + 0x9890bc00, 0xb618dc94, 0xf4f108f4, 0x94f0ffff, 0x05f9fdff, 0x000a6489, 0xb608d5b6, 0xedfd1ae4, + 0x099ebc05, 0x2e000089, 0x8905f9fd, 0xfd000a54, 0x0ab205fc, 0x0b099fbc, 0x3b8a7e0c, 0x05f48900, + 0xf890bc00, 0x0005a489, 0x89d890bc, 0xbc000604, 0xb489e890, 0x90bc0005, 0x04fdfd98, 0xb201fdc4, + 0x04e9fd0a, 0x000b8489, 0x8f099fbc, 0xc4000b94, 0xfebc01e9, 0x0294b609, 0x000ae48f, 0x0b059dfd, + 0x09f9bc36, 0x003b8a7e, 0x00087489, 0x89f890bc, 0xbc000524, 0xf9fd9890, 0x131bf404, 0x190b0ab2, + 0x617e030c, 0x030a0002, 0x0052823e, 0xf404f9c4, 0x020f090b, 0x004fe03e, 0xf402fec4, 0x010f100b, + 0x000af489, 0x3e099fbc, 0xc4004ff8, 0x0bf401f9, 0x0af4890a, 0x099ebc00, 0x00055489, 0x000af48f, + 0xbc1890bc, 0x6489f8f0, 0x90bc0005, 0x085489a8, 0xb890bc00, 0x00086489, 0x89c890bc, 0xbc000ab4, + 0xaf90099f, 0x90ffbc01, 0x90909fbc, 0xd9bb011d, 0x07c48904, 0x01ce9000, 0xbc099dbc, 0x9ebc90ee, + 0x01bf9090, 0x8904f9bb, 0xbc0007d4, 0x548e099f, 0x4489000b, 0xfda6000b, 0xbc0d0df4, 0xecbc099b, + 0x506b3e09, 0x0991bc00, 0x8909eabc, 0xbc000714, 0x2489b890, 0x90bc0007, 0x079489e8, 0xbc030f00, + 0xeebc099f, 0x909ebc90, 0x90039992, 0xf9bb01bf, 0x07a48904, 0x099fbc00, 0x000b348d, 0x000b248c, + 0xf403f6b0, 0xdebc0d0d, 0x09cbbc09, 0x0050ba3e, 0xd9bc0109, 0xbc020909, 0x448f09c9, 0xf0bc000b, + 0x0b248fe8, 0xd8f0bc00, 0x000b348f, 0x89f8f0bc, 0xbc000b54, 0xe4b69890, 0x04f4b60c, 0x94b60ab2, + 0x059efd10, 0xfd059dfd, 0xc48f059f, 0x330b000a, 0x7e09f9bc, 0x89003b8a, 0xbc0008c4, 0x3489f890, + 0x90bc0005, 0x04f9fd98, 0xb2131bf4, 0x0c190b0a, 0x02617e04, 0x3e040a00, 0xc4005282, 0x0bf410f9, + 0x3e040f09, 0xc400512f, 0x0bf402fe, 0x89010f10, 0xbc000b04, 0x633e099f, 0xf9c40051, 0x0e0bf401, + 0x000b0489, 0x3e099ebc, 0xc4005163, 0x020f04f9, 0xb2df1bf4, 0x0c190b0a, 0x02617e0f, 0x3e0f0a00, + 0x89005282, 0x3c0003e8, 0x94339890, 0xf4bd0d01, 0x000b0489, 0x89099fbc, 0xbc0008d4, 0x4489f890, + 0x90bc0005, 0x94f9ff98, 0xb2131bf4, 0x0c190b0a, 0x02617e05, 0x3e050a00, 0xc4005282, 0x0bf4029f, + 0x3e010f09, 0xf00051ad, 0x0bf40194, 0x0b148947, 0x099fbc00, 0x0008e489, 0x89b890bc, 0xbc0008f4, + 0xf489c890, 0x008e0007, 0x9ebc0300, 0x01c99009, 0x900294b6, 0xf9bb01bf, 0x08048904, 0x099fbc00, + 0x000b748d, 0x000b648a, 0x0df4fea6, 0x52013e21, 0x0b0ab200, 0x7e0e0c19, 0x0a000261, 0x52823e0e, + 0x09dcbc00, 0x3e09abbc, 0x09005215, 0x09d9bc03, 0xa9bc0209, 0x0b648f09, 0xe8f0bc00, 0x000b048f, + 0x8fd8f0bc, 0xbc000b14, 0x7489f8f0, 0x90bc000b, 0x18e4b698, 0xb210f4b6, 0x1c94b60a, 0xfd059efd, + 0x9ffd059d, 0x0ad48f05, 0xbc340b00, 0x8a7e09f9, 0x0089003b, 0x0d940130, 0xd0d9bc0f, 0x800000d9, + 0x05d9fd00, 0xbd00ddce, 0xc7030bc4, 0xc44e08dd, 0x7e0ab209, 0xb20005e6, 0x4c2bb20a, 0x777e0209, + 0x21fb0049, 0xa0b212f9, 0x200bb1b2, 0x00450c7e, 0x957e0ab2, 0xa0b3002a, 0xf43d0d0f, 0x0003e889, + 0xb2099f3c, 0x7e210b0a, 0xb200450c, 0x7e230b0a, 0xb200450c, 0x7e240b0a, 0xb200450c, 0x7e260b0a, + 0xb200450c, 0x4c1bb20a, 0x777e0109, 0x11fb0049, 0x000a1489, 0xf4989abc, 0x12f9fc30, 0x9db3a1b2, + 0x8900c901, 0xbc000614, 0x9fb3989a, 0x00bd0309, 0x0100ac89, 0xbc0fad94, 0x00d9d0d9, 0xff008000, + 0x5489e5d9, 0xd9bcfeff, 0x00efced0, 0x04000089, 0xf494f9ff, 0xff890d0b, 0xf9fffbff, 0x01e9fa94, + 0x01309c89, 0xd9bcf43d, 0x38b889d0, 0x199f3c00, 0x800000df, 0xf5dfff00, 0xde00f9ce, 0x40000000, + 0xfa059efd, 0xe4bd01f9, 0x0838ddb8, 0x0000db02, 0x00dc0080, 0xff080000, 0xf9cef5db, 0x059cfd00, + 0x9001f9fa, 0xdd9001ee, 0x04e4b340, 0xbd020bee, 0xfe1ab2d4, 0x00900140, 0x7e0cb208, 0xb20044ea, + 0x7e0a0b1a, 0xbf003b63, 0x0a04b300, 0x7e1ab224, 0xb2000050, 0xbd0bb21a, 0x48f77ec4, 0xb3030900, + 0x3e0e00a0, 0x090053af, 0x53af3e1e, 0xb294bd00, 0x0415fb9a, 0x0100ac8f, 0xf40fa994, 0x9fbcfc30, + 0xdf12f990, 0x00800000, 0x9fffa1b2, 0x00efcee5, 0x02000089, 0xf494f9ff, 0xff890d0b, 0xf9fffdff, + 0x01e9fa94, 0xd4bd020b, 0x40fe1ab2, 0x08009001, 0xea7e0cb2, 0x090b0044, 0x637e1ab2, 0x0bbf003b, + 0x1209b4b3, 0xc4bd1ab2, 0x0048f77e, 0xa4b30309, 0x94bd0600, 0x15fb9ab2, 0x38b88904, 0xac30f400, + 0x9a3c32f9, 0x89a1b2f8, 0xb2000614, 0x089abcbc, 0x1300f433, 0x0f0a04b3, 0x7e010a4b, 0x3e003b63, + 0xb3005490, 0x4d010a0e, 0x0006d489, 0x0fb891bc, 0x39d48901, 0x0143fe00, 0x90199f3c, 0x1ab21033, + 0xae7e3db2, 0xa2b2003c, 0x0200adb3, 0x38b88901, 0x98913c00, 0x1c009433, 0xc43d3bb2, 0x757e1ab2, + 0x1ab20037, 0x010c0b0b, 0x00f4248d, 0x0017087e, 0x0038b889, 0x3398913c, 0x00d10099, 0x030a0bb3, + 0x40fe00cc, 0x901ab201, 0x0bb21000, 0x003c0e7e, 0xadb3a2b2, 0xb200b800, 0x3d1ab20b, 0x37757ec4, + 0x00048900, 0x0f1f9401, 0xd9f0f9bc, 0x00800000, 0xce05f9fd, 0x004e00f9, 0x049efdf8, 0x0901f9fa, + 0x3bc08f01, 0xbc9ab200, 0x108919f9, 0xf0b20039, 0x94041e94, 0xff8b041f, 0xf9bc00ff, 0x0a348cf0, + 0x39208900, 0xbcd4bd00, 0xf9bfe0e9, 0xa604ff90, 0x091bf49b, 0xbc19cabc, 0xfea6190d, 0x89ee1bf4, + 0xbc003bc0, 0x90b39891, 0x4bfe1200, 0x901ab201, 0x010c10bb, 0x0037757e, 0xd4bd020b, 0x40fe1ab2, + 0x60009001, 0xea7e0cb2, 0x0a4b0044, 0x7e1ab203, 0xbf003b63, 0x0abeb30b, 0x1ab21503, 0xf77ec4bd, + 0xa2b20048, 0x00556a3e, 0x2ab224bd, 0x0b5435fb, 0xf4d4bd02, 0x22f9fc30, 0x40fea1b2, 0x0c009001, + 0xea7e0cb2, 0x1ab20044, 0x507e02bf, 0x14890000, 0x00bf0006, 0xa6b891bc, 0x0d1bf40b, 0x0bb21ab2, + 0xce3ec4bd, 0x2dc40055, 0xffbec4ff, 0x1bf4dea6, 0xe82fc70e, 0xa6e8b9c7, 0x110cf4f9, 0xa601e990, + 0x090bf4d9, 0xd23ea4bd, 0x1ab20055, 0xf77e010c, 0x25fb0048, 0xf4100b04, 0x22f9e830, 0x41fea2b2, + 0x0140fe01, 0x901c1190, 0x1cb22000, 0xea7e0db2, 0x0fbf0044, 0xc4891ebf, 0x030a0006, 0xfffffde4, + 0xffffece4, 0xb610f5b6, 0x92bc10e5, 0x0149feb8, 0xb50c9990, 0x9ca0029d, 0xb5039fb5, 0x9db2019e, + 0xe08cf4bd, 0x2e94003b, 0x00b4b302, 0x3ef9b20a, 0xbc005636, 0xd1bf92af, 0x9090e9bc, 0xdd9001ff, + 0x99c1bc04, 0xe504f4b3, 0x507e2ab2, 0xe0890000, 0x2ab2003b, 0xbc042b94, 0x197eb0b9, 0x25fb0054, + 0xdc02f918, 0x0080210c, 0xcccea0b2, 0x04408900, 0xa99cbc00, 0x802108d9, 0x0099ce00, 0x0004508f, + 0xf9bc9db2, 0x7e200ba9, 0xd9000494, 0x00802104, 0x8f0099ce, 0xbc000480, 0x00d909f9, 0xce008021, + 0x908f0099, 0xf9bc0004, 0x71148f09, 0x0f099401, 0xdf909fbc, 0x00800000, 0xce059ffd, 0xa08f0099, + 0xb4bd0004, 0xb209f9bc, 0x050b7e0a, 0x7e0ab200, 0x8900057d, 0xb60004b0, 0x9abc1aa5, 0xbd01fb09, + 0x76388f94, 0xde22f901, 0x00800000, 0x000a7482, 0x29bca0b2, 0x0fa994a9, 0xff909fbc, 0xffcef59e, + 0x50999000, 0x8e059efd, 0xbc000410, 0x99cea9ef, 0x04308e00, 0xa9e9bc00, 0x0005a489, 0xf002fdc7, + 0x9fbc01f4, 0x05b489a9, 0xa99dbc00, 0x0034ef7e, 0xa4b3a1b2, 0x0ab21000, 0x00351d7e, 0xa0b3a1b2, + 0x0ab21200, 0x1cb2180b, 0x0002617e, 0x00576f3e, 0x517e0ab2, 0x0ab20035, 0x0035747e, 0x977e0ab2, + 0x20bc0035, 0x0b0ab2d8, 0x7ec4bd26, 0xb2000494, 0x8921fb1a, 0xf9017604, 0x89fc8f72, 0x0fa594fe, + 0x59bca4b2, 0x0000d950, 0x59ff0080, 0x505fbce5, 0x8d00e9ce, 0xc4000ba4, 0x1bf4019f, 0xa9df3c10, + 0xfa0195f0, 0xaf3e01e9, 0x01090057, 0x89a9d93c, 0x8f017600, 0xbcfe8a00, 0x00d95059, 0xff008000, + 0x5fbc9559, 0x009fce50, 0xce080449, 0x90330099, 0xf4bd0a03, 0x0057db3e, 0x890fffc7, 0xbc017650, + 0x74895059, 0x9f3c0038, 0x0000d949, 0xb08f0080, 0x59fffe89, 0x505fbce5, 0x8d00e9ce, 0xc4000ba8, + 0x1bf4019f, 0x49df3c10, 0xfa0195f0, 0x173e01e9, 0x01090058, 0xb249d93c, 0x18e57e4a, 0x01a43300, + 0x064ab219, 0x18be7e16, 0x00a93300, 0x4ab202a9, 0xb57eb4bd, 0x0cd90018, 0xce008020, 0xdc8f0099, + 0x9e95000b, 0x1a9dc70c, 0xc749febc, 0xfc89769f, 0x9fbc000b, 0x0bec8949, 0x499dbc00, 0x000c0c8e, + 0x1600d4b3, 0x8e49ed3c, 0xb3000c10, 0x3c1400f0, 0x843e49ed, 0x943d0058, 0x000c108f, 0x0949f93c, + 0x49e93c01, 0x010b4ab2, 0x0032ac7e, 0xadb3a6b2, 0x89024400, 0xbc017380, 0x00d95059, 0xff008000, + 0x8089f559, 0x59bcfe8c, 0xce94bd50, 0x999000fe, 0x3294b301, 0xbd4ab2fa, 0x32ac7eb4, 0xb3a6b200, + 0x021300ad, 0x0174a88f, 0xdf505fbc, 0x00800000, 0xcef55fff, 0x95f100f9, 0xf9fa0100, 0x89f48f01, + 0x6455b8fe, 0x00d90001, 0xff008000, 0x5fbc9559, 0x0099ce50, 0x0003ec8b, 0x008fbdbf, 0x9cc70004, + 0x49f9bce8, 0x8f039ec4, 0xc70004c0, 0xfebc2499, 0x04d08f49, 0x49f9bc00, 0x0004e089, 0xd9499cbc, + 0xdeadbeef, 0x1bf4d9a6, 0x89bca005, 0xbf0003ec, 0xbeefd99f, 0xf9a6dead, 0x89190bf4, 0xbc0004e0, + 0x9fa69894, 0xb20d0bf4, 0x0c180b4a, 0x59eb3e0f, 0x76188900, 0x0000d701, 0x59bc0080, 0xf557ff50, + 0x8900ffce, 0x900003f0, 0x9fbc6855, 0xe557ff49, 0xfe898089, 0xce5059bc, 0x208900ee, 0x9ebc0004, + 0x04f08949, 0x01fdc400, 0x89499dbc, 0xc7000500, 0x9cbc21fc, 0x05108949, 0x26fbc700, 0x89499bbc, + 0xc7000520, 0x9a3c03fa, 0x05248949, 0x48f0c700, 0x894990bc, 0xc7000534, 0x91bcebf1, 0x05448949, + 0x93f2c700, 0x894992bc, 0x95000554, 0xffc71cf3, 0x499fbc78, 0x00056489, 0x93bc4ab2, 0x32f67e49, + 0xb3acb200, 0xb21200a0, 0x7e180b4a, 0x06000261, 0x5ad23e1f, 0x7e4ab200, 0xb200334c, 0x00a4b3ac, + 0x7e4ab2ea, 0xb20033c9, 0x00a4b3ac, 0x7e4ab2de, 0xb2003416, 0x00a4b3ac, 0x7e4ab2d2, 0xb2003885, + 0x38be7e4a, 0x7e4ab200, 0xb2003923, 0x39597e4a, 0x7e4ab200, 0xb200343f, 0x00a4b3ac, 0x7e4ab2ae, + 0xb2003467, 0x00a4b3ac, 0x0a9489a2, 0xc894bc00, 0x000a8489, 0x0bd894bc, 0x7e4ab223, 0xb2000494, + 0x348e7e4a, 0xb3acb200, 0xb28000a4, 0x34b67e4a, 0xb3acb200, 0xff7400ad, 0x000aa489, 0xbdd894bc, + 0xb2240bc4, 0x04947e4a, 0x7e4ab200, 0xb20056df, 0x00adb3ac, 0x248fff56, 0x5fbc0176, 0x9557ff50, + 0x8f0099ce, 0xbc000bcc, 0x94f049f9, 0x1f0bf401, 0x0003c089, 0x3308943c, 0xb2150104, 0x18e57e4a, + 0x00a43300, 0x03e8890b, 0x49903c00, 0x617e4ab2, 0x6ab20056, 0xdb8f71fb, 0x440902e7, 0xf8009ffa, + 0x02b5b600, 0x0c02a0b3, 0x1803a0b3, 0x00f8a4bd, 0x09f0bbbc, 0xf0fbbc40, 0xc4079fbb, 0x00f8079a, + 0x89f0bbbc, 0xbc492492, 0x9fbbf0fb, 0x079ac407, 0xa99400f8, 0xb6e4bd07, 0xd4bd04a4, 0x3ea0a9bc, + 0xa0005b36, 0x01ff909d, 0xb3049990, 0x90f809f4, 0xe0b301ee, 0xef941d04, 0x02e99405, 0x8f909fbc, + 0xbc000c14, 0x9fbc909a, 0x3ef4bd90, 0xf8005b23, 0x9452f900, 0xa99404af, 0xbda3b207, 0x40f9bc24, + 0x001ffc85, 0x005b893e, 0x3ab20b7f, 0x00902cb2, 0x00b3f006, 0x0012f97e, 0x11901aa0, 0xf405a604, + 0x2290e91b, 0x0420b301, 0x0229941f, 0xbc052f94, 0xc680909f, 0x94bc001f, 0x0c148910, 0x1019bc00, + 0x005b683e, 0x62f951fb, 0x9404af94, 0xa4b207a9, 0xf9bc14bd, 0x1ffe8650, 0x5bfd3e00, 0xb2207f00, + 0xe44ab21c, 0x7effff0b, 0x580012f9, 0x3cbf0129, 0xe4062290, 0xf0ffff0b, 0x9cff0093, 0x0099b9c4, + 0x9004a9fd, 0xacff0433, 0xb21db2c5, 0x13357e4a, 0xf426a600, 0x1190ca1b, 0x0410b301, 0x0219941f, + 0xbc051f94, 0xc882909f, 0x95bc001f, 0x0c148930, 0x3039bc00, 0x005bbd3e, 0x82f961fb, 0xc464bfc7, + 0xb9c70fb2, 0xc7a1b232, 0xf79474b8, 0x0496940c, 0x9404f594, 0x04bd0824, 0xb248b3c7, 0x02294b0c, + 0xf97e1ab2, 0xf8090012, 0x09c4a9ff, 0x4b0db2cf, 0x1ab20229, 0xfdc53cff, 0xc6fd04c9, 0x13357e05, + 0x4b0cb200, 0x1ab2022a, 0x0012f97e, 0xa9fff009, 0xff0f49c4, 0xfdc52cff, 0xff4904c9, 0xc55cfff0, + 0x8904c9fd, 0xb2ff0fff, 0x022a4b0d, 0xc4fd1ab2, 0x04c9fd05, 0x7e05c7fd, 0xb2001335, 0x022b4b0c, + 0xf97e1ab2, 0xf0090012, 0xa9ff0db2, 0x010090c4, 0xb2c58cff, 0x022b4b1a, 0x0013357e, 0x7f040db3, + 0xf981fbff, 0xb3a2b242, 0xb31304b0, 0x0a5d07b0, 0x00bdb323, 0x013e020e, 0x5680005d, 0x1e81001d, + 0x0b7f001e, 0xb2010c58, 0x0400902a, 0xf000b3f0, 0x040d00c3, 0x0013357e, 0x1bf401a6, 0x5d4b3ee8, + 0x1c5a8000, 0x1d568100, 0x580b7f00, 0x2ab2010c, 0xf0040090, 0xc3f000b3, 0x7e040d00, 0xa6001335, + 0xe81bf401, 0x005d4b3e, 0x001e1e80, 0x001eea81, 0x0c580b7f, 0x902ab201, 0xb3f00400, 0x00c3f000, + 0x357e040d, 0x01a60013, 0x89e81bf4, 0x3c002784, 0x94339892, 0x04bd2901, 0x0cb2fe01, 0x2ab2130b, + 0x0012f97e, 0xa1ff0db2, 0x010090c4, 0xb201c5f0, 0x7e130b2a, 0xb3001335, 0xb2e30404, 0x3b1b7e2a, + 0x01a43300, 0xbd240b62, 0x7e2ab2c4, 0x000012f9, 0xffd4bdcf, 0x240bc4a0, 0x357e2ab2, 0x240b0013, + 0x2ab2010c, 0x0012f97e, 0xa0ff010d, 0xb2240bc4, 0x13357e2a, 0x0c240b00, 0x7e2ab202, 0x0d0012f9, + 0xc4a0ff02, 0x2ab2240b, 0x0013357e, 0x030c240b, 0xf97e2ab2, 0x240b0012, 0x0dc4a0ff, 0x7e2ab203, + 0xb2001335, 0x3ae27e2a, 0x03a4b300, 0x4b2ab212, 0xcb4c00b9, 0x7e040d00, 0x89001335, 0x3c000300, + 0x94339892, 0xb4837701, 0x347f001f, 0x1cb214bd, 0xffff4be4, 0xf97e2ab2, 0x3c580012, 0x013f5802, + 0x4be41db2, 0xc9e4ffff, 0x99b9ffff, 0x04a9fd00, 0xf004cffd, 0xacff00c3, 0x7e2ab2c5, 0x58001335, + 0x1cb20330, 0x0be42ab2, 0xf97effff, 0x39580012, 0x043c5805, 0x0be41db2, 0x1190ffff, 0xc49cff01, + 0xb90093f0, 0xa9fd0099, 0x00c3f004, 0xb2c5acff, 0x13357e2a, 0x0414b300, 0x03048999, 0x98923c00, + 0x2b019433, 0xce0104bd, 0x644b0cb2, 0x7e2ab202, 0xb20012f9, 0xc4a1ff0d, 0xf0010090, 0x2ab201c5, + 0x7e02644b, 0xb3001335, 0x89e10404, 0x3c000308, 0x94339892, 0x04bd2b01, 0x0cb2ce01, 0xb202644b, + 0x12f97e2a, 0xff0db200, 0x0090c4a1, 0x11c5f001, 0x644b2ab2, 0x13357e02, 0x0404b300, 0xfba4bde1, + 0xb232f941, 0x0ab3b2a2, 0x01d9b331, 0xc99400da, 0x90e9bc08, 0x040196b1, 0xb1140cf4, 0xf4040096, + 0x96b05e18, 0x0c0cf401, 0x005f3a3e, 0x07039ab3, 0x3e230a0b, 0x80005fc3, 0x81001ba0, 0x7f001bf4, + 0x010b580a, 0xf0040090, 0xb3f000a3, 0x13677e00, 0xf401a600, 0x7c3eec1b, 0xa880005f, 0xfc81001a, + 0x0a7f001a, 0x90010b58, 0xa3f00400, 0x00b3f000, 0x0013677e, 0x1bf401a6, 0x5f7c3eec, 0x1afc8000, + 0x1b508100, 0x580a7f00, 0x0090010b, 0x00a3f004, 0x7e00b3f0, 0xa6001367, 0xec1bf401, 0x4a05b0b4, + 0xb4f000f7, 0x10b5f007, 0x0013677e, 0x3bb22ab2, 0x005ae17e, 0x1b01a0b3, 0x2902a4b3, 0x867e090a, + 0x3f490013, 0xb4a9ffff, 0x3e80b5f0, 0x0a005fbb, 0x13867e09, 0xff3f4900, 0x0ab4a9ff, 0x13677e09, + 0xfba4bd00, 0xf0bca631, 0xbabc0b9c, 0x1f94b6b0, 0xf8a59bff, 0xb0babc00, 0xcba6cab2, 0xb2050df4, + 0xf900f8ba, 0x008c8952, 0x0000d001, 0xa5940080, 0xbca4b20f, 0x50ff5059, 0x00bbceb5, 0x000e5481, + 0xf1a81abc, 0x4c03ffb4, 0x55b803ff, 0x7e001ff0, 0xff005fc5, 0x1abcb550, 0x00bbce49, 0x000e6481, + 0xdca814bc, 0x7fffffff, 0xb804bcfd, 0x00020c55, 0x005fc57e, 0xbcb550ff, 0xbbce491a, 0x0e748100, + 0xa814bc00, 0x8c10b5b6, 0xb800ffff, 0x00100055, 0x005fc57e, 0xbcb550ff, 0xbbce491a, 0x0e848100, + 0xa814bc00, 0x8c00b3f0, 0x9000ffff, 0xc57e0455, 0x50ff005f, 0xcd748905, 0x491abcfe, 0xce5059bc, + 0x9489000d, 0x94bc000e, 0xe8d9c7f8, 0xb1e09fbc, 0xf400fee6, 0xff4e060d, 0x0ea48900, 0xf894bc00, + 0x000e9489, 0xc7499ebc, 0x9fbcf0d9, 0xfee6b1e0, 0x060df400, 0x8900ff4e, 0xbc000eb4, 0xa489f894, + 0x9ebc000e, 0x18d99549, 0xb1e09fbc, 0xf400fee6, 0xff4e060d, 0x0ec48900, 0xf894bc00, 0x000eb489, + 0xc4499ebc, 0x9fbcffd9, 0xfef6b1f0, 0x060df400, 0x8900ff4f, 0xd3013294, 0x00800000, 0x895059bc, + 0xff000ec4, 0x9fbcb553, 0x00bbce49, 0x000ed480, 0xf0a804bc, 0xff8c00b3, 0x55b800ff, 0x7e000104, + 0xff005fc5, 0x0abcb553, 0x00bbce49, 0x000ee481, 0xd0a814bc, 0x00ffffff, 0xb204b0fd, 0x5fc57e0c, + 0x04559000, 0xff491abc, 0xbbceb553, 0x0ef48100, 0xa814bc00, 0xb204b0fd, 0x0455900c, 0x005fc57e, + 0xbcb553ff, 0xbbce491a, 0x0f048100, 0xa814bc00, 0xb204b0fd, 0x0455900c, 0x005fc57e, 0xbcb553ff, + 0xbbce491a, 0x0f148100, 0xa814bc00, 0xb204b0fd, 0x9055b80c, 0xc57e0203, 0x53ff005f, 0x491abcb5, + 0x8000bbce, 0xbc000f24, 0xbbc7a804, 0x7c55b8e8, 0xff4c022e, 0x5fc57e00, 0x490abc00, 0xce0553ff, + 0x34810000, 0x14bc000f, 0x100b95a8, 0x00ffff8c, 0x000f4482, 0x005fc57e, 0xbc491abc, 0x0be4a824, + 0xff8cffff, 0x559000ff, 0x5fc57e04, 0xb553ff00, 0xce492abc, 0x548000bb, 0x04bc000f, 0x00b3f0a8, + 0x8c045590, 0x7e00ffff, 0xbc005fc5, 0x53ff490a, 0x0000ce05, 0x000f6481, 0x95a814bc, 0xff4c180b, + 0x0f748200, 0x5fc57e00, 0x491abc00, 0xc4a824bc, 0xff4cff0b, 0x0f848100, 0x5fc57e00, 0x492abc00, + 0xc7a814bc, 0xff4ce80b, 0x0f948200, 0x5fc57e00, 0x491abc00, 0xc7a824bc, 0x5590f00b, 0x00ff4c04, + 0x7e0553ff, 0xbc005fc5, 0x00ce492a, 0x0fa48100, 0xa814bc00, 0x8c100b95, 0x8200ffff, 0x7e000fb4, + 0xbc005fc5, 0x24bc491a, 0xff0be4a8, 0xffff8cff, 0x04509000, 0x005fc57e, 0xbc0503fd, 0x00ce492a, + 0x0fc48100, 0xa814bc00, 0x4ce80bc7, 0xd48200ff, 0xc57e000f, 0x1abc005f, 0xa824bc49, 0x4cff0bc4, + 0xe48300ff, 0xc57e000f, 0x2abc005f, 0xa834bc49, 0x4cf00bc7, 0xf48100ff, 0xc57e000f, 0x3abc005f, + 0xa814bc49, 0x4c180b95, 0xc57e00ff, 0x1abc005f, 0x8f51fb49, 0xdb010050, 0x00800000, 0xbc0fa994, + 0x9bff909f, 0x00ffcef5, 0x0010048c, 0xb8d8cabc, 0x001fd899, 0xfde59bff, 0xcfbc05fd, 0x00eecea9, + 0x0010148d, 0xb8f8dabc, 0x00100099, 0xfd059bfd, 0xdebc05ef, 0x0099cea9, 0x0010248e, 0xfdf8eabc, + 0xe9bc059f, 0x8f00f8a9, 0x94013068, 0x9fbc0fa9, 0x0000df90, 0x9ffd0080, 0x009ece05, 0x0010348d, + 0xc4f8dabc, 0x9fa63fe9, 0xb20518f4, 0x10448cf9, 0xf8cabc00, 0xc7a9d9bc, 0x9fa6a8e9, 0xb20518f4, + 0x10548bf9, 0xf8babc00, 0xc7a9c9bc, 0x9fa6b0e9, 0xb20518f4, 0x10648df9, 0xf8dabc00, 0xc7a9b9bc, + 0x9fa6b8e9, 0xb20518f4, 0xa9d9bcf9, 0x408f00f8, 0x00d90130, 0x94008000, 0xefbc0fae, 0xf5e9ffe0, + 0x9200fcce, 0xe9ff18ee, 0xcfd889b5, 0xe0e9bcfe, 0xc400bdce, 0x0bf402d9, 0x68cfc70a, 0x0063e73e, + 0x00107489, 0x89f89abc, 0xbc001074, 0xd9c4a99f, 0x0a0bf420, 0x3e78cfc7, 0x89006402, 0xbc001084, + 0x8489f89a, 0x9fbc0010, 0x10d9c4a9, 0xc70a0bf4, 0x1d3e70cf, 0x94890064, 0x9abc0010, 0x304489f8, + 0xe0e9bc01, 0x00109489, 0xd9a99fbc, 0x00800000, 0xce95e9ff, 0xbdce009f, 0x80d9c400, 0x0b009033, + 0x3e70fec7, 0x8900644e, 0xbc0010a4, 0xa489e89a, 0x9ebc0010, 0x40d9c4a9, 0xc70a0bf4, 0x693e78ff, + 0xb4890064, 0x9abc0010, 0x10b489f8, 0xa99fbc00, 0x12f900f8, 0xb2015c4b, 0x76ce7ea0, 0x11048900, + 0x0f0d9400, 0x8fa0a9bc, 0x89001674, 0xd1010000, 0x00800000, 0x8be0d9bc, 0xbf0017d0, 0x0000dcf9, + 0xe9bc0080, 0x0591fd90, 0xa00099ce, 0x04ff90a9, 0xa604aa90, 0xe61bf4fb, 0x01300089, 0xffd0d9bc, + 0x99ce95dc, 0x10c48f00, 0x30ddb800, 0xf9bc0002, 0x95dcff09, 0x8f0099ce, 0x900010d4, 0xf9bc50dd, + 0xe5dcff09, 0x8f00eece, 0xb80010e4, 0x0230f0d9, 0xfd09febc, 0x99ce059c, 0x10f48f00, 0x09f9bc00, + 0x12f911fb, 0xb2015c4b, 0x76ce7ea0, 0x11048900, 0x0f0c9400, 0x8ea0a9bc, 0x89001674, 0xd1010000, + 0x00800000, 0x8bd0c9bc, 0xbf0017ac, 0xbcafbfe9, 0x91fd90d9, 0x019ffa05, 0x9004ee90, 0xeba604aa, + 0x89eb1bf4, 0xdd013000, 0x00800000, 0xffc0c9bc, 0xdeced5cd, 0x10c48900, 0xf890bc00, 0x000000d9, + 0x04e9fd42, 0xffffffd9, 0x04f9fdbd, 0xfa05effd, 0x00dd01de, 0xb8008000, 0x000230cc, 0xced5cdff, + 0xd48900de, 0x90bc0010, 0xffff89f8, 0x04e9fde4, 0x1b000089, 0xfd04f9fd, 0xdefa05ef, 0x0000dd01, + 0xcc900080, 0xd5cdff50, 0x8900dece, 0xbc0010e4, 0xf00f9890, 0xfd0fe4f0, 0xe9fd049f, 0x01defa05, + 0x800000d9, 0xf0cdb800, 0xd9fd0230, 0x00dece05, 0x0010f489, 0xd9f890bc, 0x0fffffff, 0xd904e9fd, + 0xf0000000, 0xfd04f9fd, 0xdefa05ef, 0x8911fb01, 0xf9010000, 0x015c4b02, 0xbc0fa094, 0xce7e0009, + 0x3c890076, 0xac8e0012, 0xa9bc0017, 0x0000dca0, 0xd08d0080, 0xe9bf0017, 0x09bcafbf, 0x059cfd90, + 0x90019ffa, 0xaa9004ee, 0xf4eda604, 0x01fbeb1b, 0x000e548e, 0xe9bc94bd, 0x0e648ea9, 0xa9e9bc00, + 0x000e748e, 0x8ea9e9bc, 0xbc000e84, 0x948ea9e9, 0xe9bc000e, 0x0ea48ea9, 0xa9e9bc00, 0x000eb48e, + 0x8ea9e9bc, 0xbc000ec4, 0xd48ea9e9, 0xe9bc000e, 0x0ee48ea9, 0xa9e9bc00, 0x000ef48e, 0x8ea9e9bc, + 0xbc000f04, 0x148ea9e9, 0xe9bc000f, 0x0f248ea9, 0xa9e9bc00, 0x000f348e, 0x8ea9e9bc, 0xbc000f44, + 0x548ea9e9, 0xe9bc000f, 0x0f648ea9, 0xa9e9bc00, 0x000f748e, 0x8ea9e9bc, 0xbc000f84, 0x948ea9e9, + 0xe9bc000f, 0x0fa48ea9, 0xa9e9bc00, 0x000fb48e, 0x8ea9e9bc, 0xbc000fc4, 0xd48ea9e9, 0xe9bc000f, + 0x0fe48ea9, 0xa9e9bc00, 0x000ff48e, 0x8ea9e9bc, 0xbc001004, 0x148ea9e9, 0xe9bc0010, 0x10248ea9, + 0xa9e9bc00, 0x0010348e, 0x8ea9e9bc, 0xbc001044, 0x548ea9e9, 0xe9bc0010, 0x10648ea9, 0xa9e9bc00, + 0x0010748e, 0x8ea9e9bc, 0xbc001084, 0x948ea9e9, 0xe9bc0010, 0x10a48ea9, 0xa9e9bc00, 0x0010b48e, + 0x8ea9e9bc, 0xbc0010c4, 0xd48ea9e9, 0xe9bc0010, 0x10e48ea9, 0xa9e9bc00, 0x0010f48e, 0xbc015c4b, + 0xce7ea9e9, 0x04890076, 0xf4bd0011, 0xbda0a9bc, 0x90afa094, 0xaa900199, 0x5794b304, 0xb600f8f8, + 0x348c0fa4, 0xb88b0021, 0xcfbf0021, 0xcef0afbc, 0xce9800f9, 0x01cd9802, 0xfd049efd, 0xf9fa059d, + 0x0ccc9001, 0x1bf4cba6, 0x8f00f8e4, 0xbf0021d0, 0x00d9cefd, 0x9801fe98, 0x9ffd02ff, 0x059efd04, + 0x8f01d9fa, 0x980021d0, 0xd9ce03fd, 0x04fe9800, 0xfd05ff98, 0x9efd049f, 0x01d9fa05, 0x0021d08f, + 0xce06fd98, 0xfe9800d9, 0x08ff9807, 0xfd049ffd, 0xd9fa059e, 0x21f48c01, 0x0fa4b600, 0x0023688b, + 0xafbccfbf, 0x00f9cef0, 0x9802ce98, 0x9efd01cd, 0x059dfd04, 0x9001f9fa, 0xcba60ccc, 0xf8e41bf4, + 0x3814de00, 0xe9ce0080, 0xffffdf00, 0x9ffd00ff, 0x0000df04, 0x9ffd0600, 0x01e9fa05, 0xa4b600f8, + 0x23f88c0f, 0x24288b00, 0xbccfbf00, 0xf9cef0af, 0x02ce9800, 0xfd01cd98, 0x9dfd049e, 0x01f9fa05, + 0xa60ccc90, 0xe41bf4cb, 0x88d900f8, 0xce008020, 0x004e009f, 0x04fefdff, 0xfa59f5f0, 0x00f8019f, + 0x816128d9, 0x0fa4b600, 0xcea0a9bc, 0x004f00a9, 0x049ffdf0, 0xfa6495f0, 0x00f801a9, 0x8c0fa4b6, + 0x8b002434, 0xbf002464, 0xf0afbccf, 0x9800f9ce, 0xcd9802ce, 0x049efd01, 0xfa059dfd, 0xcc9001f9, + 0xf4cba60c, 0x00f8e41b, 0x0024648f, 0xa4b6febf, 0xe0aebc0f, 0x9800e9ce, 0xff9801fd, 0x049ffd02, + 0xfa059dfd, 0x648f01e9, 0xf9980024, 0xa0a9bc03, 0x9800a9ce, 0xff9804fe, 0x049ffd05, 0xfa059efd, + 0x00f801a9, 0x8c0fa4b6, 0x8b002368, 0xbf0023e0, 0xf0afbccf, 0x9800f9ce, 0xcd9802ce, 0x049efd01, + 0xfa059dfd, 0xcc9001f9, 0xf4cba60c, 0x00f8e41b, 0x812828d9, 0x0fa4b600, 0xcea0a9bc, 0xff8f00a9, + 0x9ffdff00, 0x0095f104, 0x01a9fa68, 0x048f00f8, 0xfebf0021, 0xbc0fa4b6, 0xe9cee0ae, 0x01fd9800, + 0xfd02ff98, 0x9dfd049f, 0x01e9fa05, 0x0021048f, 0xbc03f998, 0xa9cea0a9, 0x04fe9800, 0xfd05ff98, + 0x9efd049f, 0x01a9fa05, 0x28d900f8, 0xb6008161, 0xa9bc0fa4, 0x00a9cea0, 0xfdf0004f, 0x95f0049f, + 0x01a9fa6c, 0xb88f00f8, 0xfebf0021, 0xbc0fa4b6, 0xe9cee0ae, 0x01fd9800, 0xfd02ff98, 0x9dfd049f, + 0x01e9fa05, 0x0021b88f, 0xbc03f998, 0xa9cea0a9, 0x04fe9800, 0xfd05ff98, 0x9efd049f, 0x01a9fa05, + 0x8cd900f8, 0xb600816a, 0xa9bc0fa4, 0x00a9cea0, 0xf8ffff8f, 0x8f049ffd, 0xfd070000, 0xa9fa059f, + 0xb600f801, 0xfc8c0fa4, 0x048b001f, 0xcfbf0021, 0xcef0afbc, 0xce9800f9, 0x01cd9802, 0xfd049efd, + 0xf9fa059d, 0x0ccc9001, 0x1bf4cba6, 0x8f00f8e4, 0xbf0017d0, 0x019990f9, 0x00f8f9a0, 0xa0f990f9, + 0xc0f9b0f9, 0xe0f9d0f9, 0x9ffef0f9, 0xfef0f901, 0xe0f9018e, 0x006cfb7e, 0xe8fee0fc, 0xfef0fc00, + 0xf0fc00f9, 0xd0fce0fc, 0xb0fcc0fc, 0x90fca0fc, 0x00f801f8, 0xa0f990f9, 0xc0f9b0f9, 0xe0f9d0f9, + 0x9ffef0f9, 0xfef0f901, 0xe0f9018e, 0x006ddf7e, 0xe8fee0fc, 0xfef0fc00, 0xf0fc00f9, 0xd0fce0fc, + 0xb0fcc0fc, 0x90fca0fc, 0x00f801f8, 0xa0f990f9, 0xc0f9b0f9, 0xe0f9d0f9, 0x9ffef0f9, 0xfef0f901, + 0xe0f9018e, 0x00708c7e, 0xe8fee0fc, 0xfef0fc00, 0xf0fc00f9, 0xd0fce0fc, 0xb0fcc0fc, 0x90fca0fc, + 0x00f801f8, 0xa0f990f9, 0xc0f9b0f9, 0xe0f9d0f9, 0x9ffef0f9, 0xfef0f901, 0xe0f9018e, 0x006d8e7e, + 0xe8fee0fc, 0xfef0fc00, 0xf0fc00f9, 0xd0fce0fc, 0xb0fcc0fc, 0x90fca0fc, 0x00f801f8, 0xa0f990f9, + 0xc0f9b0f9, 0xe0f9d0f9, 0x9ffef0f9, 0xfef0f901, 0xe0f9018e, 0x006cad7e, 0xe8fee0fc, 0xfef0fc00, + 0xf0fc00f9, 0xd0fce0fc, 0xb0fcc0fc, 0x90fca0fc, 0x00f801f8, 0xa0f990f9, 0xc0f9b0f9, 0xe0f9d0f9, + 0x9ffef0f9, 0xfef0f901, 0xe0f9018e, 0x006c5f7e, 0xe8fee0fc, 0xfef0fc00, 0xf0fc00f9, 0xd0fce0fc, + 0xb0fcc0fc, 0x90fca0fc, 0x00f801f8, 0xa0f990f9, 0xc0f9b0f9, 0xe0f9d0f9, 0x9ffef0f9, 0xfef0f901, + 0xe0f9018e, 0x006a2f7e, 0xe8fee0fc, 0xfef0fc00, 0xf0fc00f9, 0xd0fce0fc, 0xb0fcc0fc, 0x90fca0fc, + 0x00f801f8, 0x0017d489, 0x9433993f, 0x0a0a0c01, 0x107eb4bd, 0x46890000, 0x93fe0000, 0x492e0f00, + 0x9ffa0080, 0x5ad67e00, 0x08004900, 0x8f0099cf, 0xfd010000, 0x0bf4049f, 0x02af7e0e, 0x00468900, + 0x0093fe00, 0x00021b7e, 0x0020fd7e, 0x006a748a, 0x0000e17e, 0x006aac8a, 0x0000f97e, 0x006ae48a, + 0x0001117e, 0x006a3c8a, 0x0001297e, 0x006b1c8a, 0x0001417e, 0x006b548a, 0x0001597e, 0x006b8c8a, + 0x0001717e, 0x0001897e, 0x0004627e, 0x00022c7e, 0xd489010f, 0x9f200017, 0x3e0028f4, 0xf9006c58, + 0x08404912, 0xc70099cf, 0x1ab27491, 0x0002407e, 0x1000a433, 0x010b090a, 0x0000107e, 0x006cab3e, + 0x02a010b8, 0x0204b600, 0xe4000bce, 0xb2ffffbc, 0xf0bbc71a, 0x000ac87e, 0x02b019b8, 0x0294b600, + 0xd9009afa, 0x80000000, 0xfb0109fa, 0x4912f911, 0x99cf0840, 0x7091c700, 0x407e1ab2, 0xa4330002, + 0x080a1000, 0x107e010b, 0xf93e0000, 0x10b8006c, 0xb6000260, 0x0bce0204, 0xffbce400, 0xc71ab2ff, + 0xc87ef0bb, 0x19b8000a, 0xb6000270, 0x9afa0294, 0x0000d900, 0x09fa8000, 0xf911fb01, 0x08404902, + 0xc70099cf, 0x0ab26c90, 0x0002407e, 0x1000a433, 0x010b070a, 0x0000107e, 0x006d8c3e, 0x017e0ab2, + 0xa0b30002, 0xa6b03909, 0x0f0cf409, 0x4000a0b3, 0x1108a4b3, 0x006d473e, 0x190ea0b3, 0x0080aab3, + 0x3e010a30, 0xb2006d65, 0x18ee7e0a, 0x6d6c3e00, 0x7e0ab200, 0x3e00192b, 0xb2006d6c, 0x1ef87e0a, + 0x00a0b300, 0x80a9c50b, 0x006d6e3e, 0xa48e0109, 0x0f940174, 0xff94f00f, 0xdef0febc, 0x00800000, + 0xfa05fefd, 0x0ab201f9, 0x0000b47e, 0x12f901fb, 0xcf084049, 0x90c70099, 0x7e0ab268, 0x33000240, + 0x0a1000a4, 0x7e010b05, 0x3e000010, 0x8f006ddd, 0x94010060, 0x9fbc0f09, 0x0000df90, 0x9ffd0080, + 0x0091ce05, 0x000000d9, 0x9419ff11, 0xb2090bf4, 0x24ae7e0a, 0xb20ab200, 0x00807e1b, 0xf911fb00, + 0x08404922, 0xc40099cf, 0x1ab20f91, 0x0002407e, 0x1200a433, 0x010b040a, 0x107e24bd, 0x613e0000, + 0x19b80070, 0xb6000240, 0x92cf0294, 0xff20c400, 0xfd0209b3, 0x7e1ab200, 0x0b00038e, 0x00a9330b, + 0x09b30238, 0xb0017111, 0x0cf41106, 0x0609b367, 0x06b0010c, 0x2c0cf406, 0xd50209b3, 0x0206b000, + 0xb3110cf4, 0x021e0009, 0x3e010db3, 0x6f033e02, 0x0409b300, 0x06b000d4, 0xd60cf504, 0x6f1b3e00, + 0x0d09b300, 0x06b00100, 0x180cf40d, 0xe10809b3, 0x0806b000, 0x00cf08f5, 0x0e0c0db3, 0x6f573e02, + 0x0f09b300, 0x06b000f4, 0xf60cf50f, 0x6f6b3e00, 0x4109b300, 0x06b10170, 0x0cf40041, 0x1809b334, + 0x06b0012f, 0x160cf418, 0xff1509b3, 0x1709b300, 0x0db30115, 0x3e01d414, 0xb3006f9d, 0x012c2109, + 0x314009b3, 0x200db301, 0xd63e01c1, 0x09b3006f, 0xb1015b60, 0xf4006006, 0x09b3190c, 0xb1013b43, + 0xf5004306, 0xb3012808, 0x019f580d, 0x00701f3e, 0x4d7009b3, 0x7109b301, 0x0db30152, 0x3e018c61, + 0xb2007033, 0x25287e1a, 0x6fdc3e00, 0x0b1ab200, 0x7ec4bd01, 0x3e000261, 0xb2007061, 0x15a57e1a, + 0x6fdc3e00, 0x7e1ab200, 0x3e002883, 0xb2007061, 0x18197e1a, 0x70613e00, 0x7e1ab200, 0x3e002737, + 0xb2007061, 0x26557e1a, 0x6fdc3e00, 0x7e1ab200, 0x3e0024eb, 0xb2007061, 0x21817e1a, 0x6fdc3e00, + 0x7e1ab200, 0x3e005773, 0xb2006fdc, 0x53b47e1a, 0x6fdc3e00, 0x7e1ab200, 0x3e0052d0, 0xb2006fdc, + 0x177f7e1a, 0xb2010b00, 0x7e1ab2ac, 0x3e0021fa, 0xb2006fdc, 0x22967e1a, 0x6fdc3e00, 0x7e1ab200, + 0x3e00145f, 0x0b006fdc, 0x0c1ab201, 0x13a77e01, 0xb3abb200, 0x009e00ad, 0xcb7e1ab2, 0xdc3e0014, + 0x1ab2006f, 0x0018d17e, 0x0070613e, 0xfb7e1ab2, 0xdc3e001a, 0x1ab2006f, 0x00296b7e, 0xa9b3abb2, + 0x3e008300, 0xb2007051, 0x29c77e1a, 0x70613e00, 0x08b04b00, 0xb200bbcf, 0x5c1a7e1a, 0x70613e00, + 0x7e1ab200, 0x3e00188b, 0xb2007061, 0x189f7e1a, 0x70613e00, 0x7e1ab200, 0x3e0018aa, 0xb2007061, + 0x1d8d7e1a, 0x6fdc3e00, 0x7e1ab200, 0x3e0019fc, 0xb2007061, 0x198d7e1a, 0x70613e00, 0x7e1ab200, + 0x3e00084d, 0xb2007061, 0x07c77e1a, 0x70613e00, 0x04b0b300, 0x7e1ab210, 0x0e001795, 0x70633e01, + 0xb8e4bd00, 0x0002401d, 0xffffffdf, 0x1ee4b6bf, 0xd902d4b6, 0x80000000, 0xfd9529ff, 0xe9fd049f, + 0x00defa05, 0x100b21fb, 0x0070553e, 0x404922f9, 0x0099cf08, 0xb26491c7, 0x02407e1a, 0x00a43300, + 0x0b030a10, 0x00107e01, 0x71653e00, 0x7e1ab200, 0xb20001e7, 0x0ca0b3a0, 0x0ca6b062, 0xb3130cf4, + 0xb32002a0, 0xb33803a0, 0x3e5e00a0, 0xb3007155, 0xb36811a0, 0xb36e12a0, 0x3e7e0da4, 0xb200712f, + 0x1bc77e1a, 0xb2a2b200, 0x00507e1a, 0x0020b300, 0xb21ab278, 0x3e2cb20b, 0xb2007161, 0x1cf47e1a, + 0xb2a0b200, 0x00507e1a, 0x0000b300, 0xb21ab25c, 0x3e020b0c, 0xb2007161, 0x7e020b1a, 0x3d00182b, + 0x186c7ea4, 0x7e1ab200, 0x3e000050, 0xb2007165, 0x3e030b1a, 0xb200711b, 0x556f7e1a, 0x71473e00, + 0x7e1ab200, 0xb20055d5, 0x00a0b3ac, 0x0b1ab21c, 0x71613e19, 0x7e1ab200, 0xb2000050, 0x0c020b1a, + 0x02617e15, 0xf421fb00, 0x82f9e030, 0xffffc1e4, 0xb210c295, 0xffa0e4c5, 0x10a395ff, 0x48fea4b2, + 0x0147fe01, 0x903c8890, 0x8bb53477, 0x017db501, 0x0bb27ca0, 0x1ab28aa0, 0x0076ce7e, 0xa6b20bb2, + 0xce7e2ab2, 0x3bb20076, 0x1ab2a0b2, 0x0076ce7e, 0xa1b23bb2, 0xce7e2ab2, 0x10bc0076, 0x10699500, + 0x09bcadb2, 0xf410a600, 0x00890a0d, 0xd9bc0100, 0x017b98d0, 0xffff69e4, 0xfe100f94, 0xf9bc014e, + 0x24ee90f0, 0xa0100995, 0x90d9bcef, 0xbf01e9b5, 0x01ed98ec, 0x41fe4ab2, 0x2c119001, 0xa0011db5, + 0x76ce7e1c, 0x018b9800, 0x5ab2a0b2, 0x0076ce7e, 0xbc011b98, 0x1abf000a, 0xfbb00bbc, 0x30f42085, + 0x00c0b3f0, 0x014ffe33, 0xb508ff90, 0xfaa001fb, 0x9cbb2009, 0x0096b002, 0xbc211df4, 0xb9bcf5ac, + 0x014afe94, 0xbc059ffd, 0xa9a0f5bc, 0x9801afb5, 0xaabf01ab, 0xf81030f4, 0x014afe00, 0xbd019fb9, + 0xf5bfbc94, 0xa001a9b5, 0x01ab98af, 0x543eaabf, 0x30f40072, 0xfe82f9e4, 0x49fe014f, 0x30ff9001, + 0xb5389990, 0xfca001fd, 0xa0019bb5, 0xb2c2b29a, 0x00ddb3a6, 0xcba600bb, 0x00f70df5, 0x00ffff89, + 0x0cf5c9a6, 0xc6b1037a, 0x0df500ff, 0x080903af, 0x29bc180d, 0x24888fe5, 0x98fe3c00, 0xbbff94f0, + 0xd0b302d9, 0xbdbc1800, 0xbb2009f4, 0x9dbb042d, 0x9569bc02, 0xff046dbb, 0x2195b59f, 0xff25e410, + 0x4cb1ffff, 0xb20db1ff, 0x7e5bb24a, 0xb60076ce, 0x69951004, 0x0509fd10, 0x0df4a0a6, 0x01449214, + 0xa60002bc, 0x090cf420, 0x0cf5a0a6, 0x0abb036f, 0xff5bb202, 0x3ab23c01, 0x0076ce7e, 0xe40d01ff, + 0xb6ffff69, 0x09fd1004, 0xf4a0a605, 0x3392160d, 0x0002bc01, 0x0cf420a6, 0xf4a0a60b, 0x3392060d, + 0x10499401, 0x39ffb4bd, 0x1c85fba5, 0x0cf5dba6, 0xff8900ec, 0xd9a600ff, 0x00e90cf5, 0x00ffd6b1, + 0x02f10df5, 0x180c0809, 0x8fe5d9bc, 0x3c002488, 0x94f098fe, 0x72c9bcff, 0xed007db3, 0xf4dba600, + 0x26a60908, 0x00b60cf5, 0xb4bd010a, 0xb31c85fb, 0x090900c4, 0x0092cc01, 0x00ffff89, 0x0cf529a6, + 0x26b101c7, 0x0df500ff, 0x080902bb, 0x29bc180d, 0x24888fe5, 0x98fe3c00, 0xbcff94f0, 0x4db342d9, + 0xbc01cb00, 0x239502b2, 0xff25e410, 0xff0107ff, 0x5bb24c03, 0xce7e4ab2, 0x03ff0076, 0x1069950d, + 0xfd1004b6, 0xa0a60509, 0x92140df4, 0x02bc0144, 0xf420a600, 0xa0a6090c, 0x028b0cf5, 0xb2020abb, + 0x1c03ff5b, 0xce7e1ab2, 0x03ff0076, 0xff69e40d, 0x1004b6ff, 0xa60509fd, 0x160df4a0, 0xbc011192, + 0x20a60002, 0xa60b0cf4, 0x060df4a0, 0x94011192, 0x7bb21049, 0xfba519ff, 0xa4bd1c85, 0x85fbb4bd, + 0xffffd91c, 0xd9a600ff, 0x02010df5, 0x888f1809, 0xd9bc0024, 0x98fe3ce5, 0x94f0080c, 0x72c9bcff, + 0x1d0079b3, 0x9427bcff, 0xbbe4b7bc, 0x91b004d7, 0xbb200909, 0xb9bc0297, 0xf569bc05, 0xff9529bc, + 0x9dff85fe, 0x10439545, 0xffff41e4, 0xb25c03ff, 0x7e5ab21b, 0xff0076ce, 0x89950d03, 0x1004b610, + 0xa60509fd, 0x190df4a0, 0xbc015592, 0x40a60004, 0xa60e0cf4, 0x090df4a0, 0xbc015592, 0x0abb0004, + 0xff1bb202, 0x2ab22c03, 0x0076ce7e, 0xe40d03ff, 0xb6ffff89, 0x09fd1004, 0xf4a0a605, 0x2292190d, + 0x0004bc01, 0x0cf440a6, 0xf4a0a60e, 0x04bc090d, 0x01229200, 0xb40910b4, 0x59940930, 0x820abc10, + 0xf14529ff, 0xe4ffff14, 0xb2ffff40, 0x7e0ab21b, 0xb60076ce, 0xa5b21035, 0x0ab23bb2, 0x0076ce7e, + 0x42951bb2, 0xb2a0b210, 0x76ce7e2a, 0xb23bb200, 0x7e2ab2a1, 0xbc0076ce, 0x59950010, 0x0009bc10, + 0x0df410a6, 0x0000890a, 0xa0a9bc01, 0xbc100995, 0x8aa6a0a9, 0x00ed08f5, 0x0bf58aa6, 0x4ab200d4, + 0x85fbb4bd, 0xffffd91c, 0x29a600ff, 0x00fd0df5, 0x888f1809, 0x29bc0024, 0x98fe3ce5, 0x94f0080d, + 0x42d9bcff, 0x3f0049b3, 0xf4b4bcfe, 0x04bb2000, 0x9560bc02, 0xbc0424bb, 0x239505b0, 0x859fff10, + 0xe47c03ff, 0xb2ffff25, 0x7e5bb27a, 0xff0076ce, 0x89950d03, 0x1004b610, 0xa60590ff, 0x190df4a0, + 0xbc017792, 0x20a60002, 0xa60e0cf4, 0x090df4a0, 0xbc017792, 0x0abb0002, 0xff5bb202, 0x1ab21c03, + 0x0076ce7e, 0xe40d03ff, 0xb6ffff89, 0x09fd1004, 0xf4a0a605, 0x1192190d, 0x0002bc01, 0x0cf420a6, + 0xf4a0a60e, 0x1192090d, 0x0002bc01, 0xbb107994, 0x0abb0464, 0x7519ff02, 0x0073cf3e, 0xffffffd9, + 0xf4c9a600, 0x18094e0d, 0xb23e080d, 0x0f940072, 0xff5ee410, 0x9467bcff, 0xa6f0febc, 0x2118f59f, + 0x014a92ff, 0x85fbb4bd, 0xb210091c, 0x73683e9c, 0x0c94bd00, 0x73683e20, 0x0d94bd00, 0x72b23e20, + 0x0d94bd00, 0x73ae3e20, 0xb2100900, 0x73ae3e9d, 0xb2100900, 0x72b23e9d, 0x01449200, 0x3e0002bc, + 0x9200730e, 0x02bc0144, 0x73fc3e00, 0xb2a9b200, 0x00a6b0af, 0xb22b1ef4, 0x00b6b0ba, 0xff131ef4, + 0xb9ffacfa, 0x051ef496, 0xaab900f8, 0xb900f801, 0xfaff01ba, 0x96b9ffac, 0x3ef01ff4, 0xb90076aa, + 0xbab201af, 0xf400b6b0, 0xaf3ed81f, 0xaf950076, 0x10b99510, 0xff91a9ff, 0xbffda0ba, 0xb0b9bc01, + 0xbc10b4b6, 0x00f8a0ba, 0x1400c0b3, 0xb93c94bd, 0x99af3cf8, 0xa6019990, 0xf51bf49c, 0x02f800f8, + 0xf2e8911a, 0xefac17e9, 0xd5510687, 0xd8570622, 0xb47df343, 0x56dd5387, 0xd8db62f9, 0xff7f6fe1, + 0xa6d47a0f, 0xc564fcac, 0xa3fdd2f7, 0x2a44d4c6, 0xff16ae95, 0x77c19e61, 0x70c69a2a, 0xb18bf04b, + 0x3db93bbe, 0xce16b53b, 0xea324a27, 0xccb6435e, 0x5e27377e, 0xfbb2b5bc, 0x2a467f22, 0xbde9aa37, + 0x0c25e1a3, 0x5bc689a1, 0xd1f52c9d, 0x9541814d, 0x4a37b399, 0x2cc57e9f, 0x49f7c018, 0x4fd681c7, + 0x1b9828be, 0xc4d75244, 0xe6cfb742, 0xbd2de519, 0xc93cc6cb, 0x067e35d1, 0x07e3dfb7, 0x70fb061c, + 0x81fe8774, 0x9ba0ac41, 0x679832c8, 0xee8e5ab2, 0xe246ebfe, 0x5d4c48e1, 0xa5c1fef2, 0x656e1673, + 0xe89abb48, 0x75104d58, 0x4a9a7263, 0x8fec8d15, 0xafe66a20, 0xe27af99d, 0x72ba1e11, 0x0b554b07, + 0x14adbaef, 0x23437cd4, 0xb05da023, 0x23f3564c, 0xe9d5975c, 0xd9ca09cd, 0x8882ad5b, 0x4d1839db, + 0xb0e51902, 0xa79f84cf, 0xa7c51ffc, 0x58c36cbc, 0xd73d0595, 0x1a6c2200, 0x453a5f53, 0x7c4cddf3, + 0x3e124292, 0xa7dc8073, 0x349d3e1e, 0x79a4d051, 0x12bafc05, 0x9b85cd7e, 0xcc0c6807, 0x62ec6029, + 0x1d133c18, 0xde8a349a, 0x641c6a00, 0xf98631ab, 0xa8495295, 0xeb0f011d, 0x0a741942, 0xbb4e2d50, + 0x0e555c43, 0xbfd98150, 0xab5e3679, 0x8396ebeb, 0x7c38b13c, 0x65df1b01, 0xdf76015c, 0x589b2f5a, + 0x1d3d7b5d, 0xe86ab67e, 0xa97d1b5d, 0x2eaa9515, 0x4e2fc8a2, 0x10c382b2, 0xb06995ef, 0x04165c28, + 0x3788ebc0, 0xb42b10fc, 0x18eb1d0d, 0xb522183f, 0x89b1c90f, 0x44a7fea7, 0xb25c3e2e, 0x47b42dd5, + 0xeb90345a, 0x73474b29, 0x3b9fa34f, 0xd1f9bdb4, 0x177e97d0, 0xc7998ebe, 0x289dc743, 0x168d4b32, + 0x86ed9efb, 0xb95b5220, 0xe490edbc, 0x71356806, 0xea22c07a, 0x67843c07, 0x3c8a6fd0, 0xac6eb54f, + 0x03896fb1, 0x2d57a62a, 0x94da8b6a, 0x6d3a8fe4, 0x91788dd7, 0x72df6b5b, 0xe670bc22, 0x1c0fc6c9, + 0x0009af69, 0xa4fd0e35, 0xcd63eaaa, 0xdf80b721, 0xfe16c746, 0x97f305ec, 0xe790b754, 0xb44f8b88, + 0x53425d95, 0xe19dae7d, 0xc44b1ee5, 0xa9da60f4, 0x06a3950d, 0x143113bf, 0x758ba439, 0x134e5cdf, + 0xb4deab12, 0x9c74ea7b, 0xbe0d661a, 0x95c744ab, 0xbd4fbcc2, 0x9614c908, 0x48fa90f9, 0x76659130, + 0x66ddd9b7, 0x22d72ea6, 0xc7555efc, 0xd4d83df5, 0x2df5b168, 0x624cbf07, 0x4dedec50, 0x8f2e294a, + 0x2093ad43, 0x0b5bdabf, 0x134e4f46, 0xf55c5713, 0x7c4946d8, 0x93c67a13, 0x89f82c5b, 0x097b0778, + 0x4b1142e9, 0x2dec1435, 0xf13b6e21, 0x955d0cd6, 0x4abab028, 0x19db2dbb, 0x569ea44d, 0x1286c817, + 0xc54936f5, 0x8b73e573, 0x59d53662, 0xa4b5660d, 0x18d1df63, 0xc568ea0d, 0xdade3591, 0xbcf2434f, + 0xd9459e0c, 0x230ba4a2, 0xec017b6c, 0xa80cbfc7, 0xf8392e60, 0x083b7630, 0xdf29f426, 0x2e043734, + 0xcf2dcf6c, 0x796591aa, 0x03e1c95e, 0x6f0558c6, 0x690d83af, 0x36e7553a, 0xde8d14da, 0x7dbe3c3e, + 0x7952dda0, 0x440d78bd, 0x222ab3f5, 0x94e27084, 0x8ff9e3dc, 0xd0812543, 0xb1a546ea, 0x159ae778, + 0x2410a2fb, 0xf4d2ddb9, 0x0d1bbe42, 0x83bba5e0, 0x4e97a09b, 0x6a0c3ade, 0x590fb1a5, 0xac181285, + 0x79224f9a, 0x7b17f35a, 0xf9b94b74, 0xb670b309, 0x513dde33, 0xe49d8d0d, 0x87f17513, 0x9074cc08, + 0x760e2618, 0x3e060bcb, 0x69403849, 0x9ee491e3, 0x5cadd1a7, 0x52e9c279, 0x484022c1, 0xc58bef3e, + 0xa3e63650, 0xb640dbcb, 0xc7db7656, 0xd806bb4a, 0x85c5359c, 0x5c54fd8f, 0xc6cd3ba2, 0x4482600e, + 0xf42e24a3, 0x56556dde, 0x353ea8cb, 0x83e800ba, 0x65f2e861, 0x5dc67a34, 0x234b4625, 0xdb5b5f42, + 0xd1e44950, 0x33ad7724, 0x547a2634, 0x52f63b53, 0x264d008c, 0x21155a1d, 0xc1adff31, 0xb2f3ffed, + 0x851a663d, 0xa83aacd5, 0x7d27a8b0, 0xbcf3f356, 0x233de13a, 0xe8a83bc1, 0x8ae9d6f2, 0x16706e37, + 0x9f23a5d3, 0x04115a7b, 0x47a5008e, 0x469871d6, 0x4320776b, 0xeb0e4b88, 0x8d03ff31, 0x43a0d5b6, + 0x16490554, 0x23616857, 0x80f6f0fd, 0xeb0c9bc1, 0xf0bd8b10, 0xbdfe3cdc, 0xbf35f509, 0x9abd20f0, + 0xec8180dc, 0x9e7136c1, 0x5bf32074, 0x01c38d18, 0x3b98d636, 0xf08b0f8a, 0x8d6c3935, 0x347a8870, + 0x1306752b, 0x849a97ac, 0xe145d656, 0x376d752f, 0x05f40450, 0x013d1448, 0x05213a51, 0x9c9650a9, + 0xddb9cbeb, 0x04a4e02a, 0xe032b727, 0x200fbb95, 0xf28c2e5f, 0x354a10da, 0xf150a30a, 0x7035c230, + 0x96e12d2e, 0xe2a16e4c, 0x302f2c87, 0xbca7eebb, 0x8329fcd0, 0x86a00199, 0x13fd68a7, 0x6ee6ae12, + 0x9fde6d1d, 0x014d1bc5, 0x4578f43a, 0x11cb68b7, 0x0cf12b98, 0xa295bca0, 0xdf509264, 0x9b353475, + 0x172b15c5, 0x0223f14d, 0xd7c57249, 0xb356694b, 0x35709f50, 0x36f16222, 0x986ded4e, 0x6642d92d, + 0x8667f7d4, 0x238e0129, 0x1c392e68, 0x8dc12f83, 0xb74a8daa, 0xad18c0b5, 0x7156ad88, 0x2a1186c7, + 0xfdbef6c2, 0x3d365f9d, 0xa54b4dc5, 0x963f0bc5, 0xe57cfd08, 0x1d062bde, 0x012c4be8, 0xb6e01243, + 0x41380d7b, 0x20d90066, 0xbc241e4f, 0x4eb7147a, 0xb8499fe6, 0x62c05670, 0xdcc8fdbb, 0x56bc66b4, + 0x93e7113c, 0x4441c5fe, 0x53f34c23, 0x52c9cfa6, 0x8eee9671, 0x3dc0ba32, 0x3064480d, 0x2bf6dcdc, + 0xf7565b54, 0x3c8db6b4, 0xeffd3f32, 0x52b1612d, 0x5fb45931, 0x54421577, 0x8c311918, 0xb63c55fb, + 0x0e3e43aa, 0xadaf789d, 0x4a7bc5e1, 0x37167755, 0xc2d22ed5, 0xf9058dc1, 0xcb820179, 0xa150a785, + 0x0cf12b98, 0xa295bca0, 0xdf509264, 0x9b353475, 0x54f9de39, 0x8f154e77, 0x5c0c8a30, 0x9d932ab6, + 0xcceb6957, 0xf301315f, 0xe7acad99, 0x748042e2, 0x51917529, 0xbcfbd1b9, 0xd0f2790b, 0x0223e1aa, + 0xfbc13262, 0x721915f0, 0x7280cce8, 0x56be66a8, 0x6873e90b, 0x9e3fe4bd, 0x92841514, 0xa96bc696, + 0x1864a566, 0x1edeceff, 0xeec3db6f, 0x4c8aa448, 0x7cac3111, 0xcb622eef, 0x42fe4741, 0xd8ce151d, + 0x9dd89b16, 0x448d9cda, 0x1b770076, 0xbf34b9b7, 0x4b0b43fa, 0x81e8e6f0, 0x94ef92b4, 0xba1668a4, + 0xc753ced8, 0x4644003e, 0xaa421a63, 0x6ac06e28, 0x37c0e636, 0xc6a33f51, 0x02e0cf0b, 0x7c65a44d, + 0x10f04e15, 0xf1b4e7e1, 0xedf3660c, 0x9835c931, 0xd2c312ec, 0x00352d58, 0xe6956ce2, 0xf96310a5, + 0x1a4a51b9, 0x9207d2cb, 0x9456509a, 0x5900cc71, 0x4daee30e, 0x43ffd2ed, 0x0c923354, 0xb3b41c85, + 0xcc8066c4, 0xb3c3efb2, 0x0cef9d53, 0x180404cf, 0xb8e3e162, 0xd24ce069, 0x7a7a96a9, 0x24a6492e, + 0x70cbe609, 0xf0579178, 0x07e49d32, 0xf47b10cd, 0xca037594, 0x65862cf0, 0xb81127df, 0x18b05f0f, + 0xa326c579, 0x0a8c9e44, 0xe44dbabb, 0x8910e80f, 0xcab4e681, 0x8cb2c3ca, 0xf04e40c7, 0xa2bb680b, + 0xbcf12e4a, 0xb7c74daa, 0x58c167e0, 0xdba411c5, 0x8575f8ec, 0x97d1f818, 0x81440a3e, 0x9428ff37, + 0x84a8ae08, 0x3fccfa2a, 0xf2cf0ffc, 0x3db20114, 0xbb9f2b4e, 0xb2cf8b9a, 0x4774c9a9, 0xcdacf172, + 0x43171f3a, 0x1debd0d1, 0x5a881e5c, 0x67bcefef, 0x64a450a9, 0xd1a03b8d, 0x3d2b6f44, 0xffbebda4, + 0x43c399c5, 0xadd84c4e, 0x8dd45746, 0x0ae9d41b, 0xe2622296, 0x7b15d91b, 0xf33979f0, 0x5f2fc33f, + 0x910b7706, 0x5fe9f692, 0x04d2aa9a, 0x6c00b3f0, 0xa1892819, 0x863ff31c, 0x9629dfa0, 0x6c74074c, + 0xcd9e78b7, 0xe5f11702, 0x8ef618c4, 0x13b54a54, 0x6b74a73a, 0x45c4ffe5, 0x770ee3a3, 0x9b483286, + 0x28674aa6, 0x7c2c6836, 0x410c8c39, 0xe8980aa9, 0x8b42a99c, 0x866c443a, 0x2000a8bd, 0x4cd4799e, + 0x5379f0f3, 0x0144fc45, 0xed0acfdd, 0x6774430b, 0x94980065, 0x8f1c2b6a, 0x6d88eeeb, 0xc837e854, + 0xdf63a241, 0x6f08a80c, 0xf39a3b3c, 0xcd722471, 0xcc0bf79a, 0xdc2be884, 0x53b46a1f, 0xdfd60169, + 0x470bb491, 0xcc1920d2, 0xfd3ac52e, 0x5683fa01, 0xdf63a241, 0x6f08a80c, 0xf39a3b3c, 0xcd722471, + 0xcc0bf79a, 0xdc2be884, 0x53b46a1f, 0xdfd60169, 0x470bb491, 0xcc1920d2, 0xfd3ac52e, 0x5683fa01, + 0xdf63a241, 0x6f08a80c, 0xf39a3b3c, 0xcd722471, 0xcc0bf79a, 0xdc2be884, 0x53b46a1f, 0xdfd60169, + 0x470bb491, 0xcc1920d2, 0xfd3ac52e, 0x5683fa01, 0xdf63a241, 0x6f08a80c, 0xf39a3b3c, 0xcd722471, + 0x00000002, 0x0000002e, 0x00000000, 0x00000000, 0x206e614a, 0x32203031, 0x20323230, 0x333a3930, + 0x32323a36, 0x00580020, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x3fd26956, 0x6674ff70, 0xcc87316f, 0x440b2953, 0x50c0ef2d, 0x3f8fcfb3, 0xc5fef1a0, 0x7404d2c2, + 0x620d246e, 0xac97f8b8, 0xfbe1a191, 0x508703bc, 0x0000e38e, 0x00000000, 0x0000f5c2, 0x00000000, + 0x0000fc0f, 0x00000000, 0x007a0000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000005c, 0x00000058, 0x00001000, + 0x00000180, 0x00002060, 0x00002064, 0x00002068, 0x0000206c, 0x00002070, 0x00002074, 0x00002078, + 0x00002400, 0x00002404, 0x00002418, 0x0000240c, 0x00002414, 0x00002408, 0x00002410, 0x0000242c, + 0x00002420, 0x00002428, 0x0000241c, 0x00002424, 0x00002440, 0x00002434, 0x0000243c, 0x00002430, + 0x00002438, 0x00002450, 0x00002454, 0x0000245c, 0x00002458, 0x00002468, 0x00002464, 0x00002460, + 0x00002098, 0x00002008, 0x00002014, 0x00002100, 0x00002018, 0x0000201c, 0x0000221c, 0x00003064, + 0x00003024, 0x00003284, 0x00003400, 0x00003404, 0x00003420, 0x0000340c, 0x00003414, 0x0000341c, + 0x00003408, 0x00003410, 0x00003418, 0x0000343c, 0x00003428, 0x00003430, 0x00003438, 0x00003424, + 0x0000342c, 0x00003434, 0x00003450, 0x00003454, 0x0000345c, 0x00003458, 0x00003478, 0x0000346c, + 0x00003474, 0x00003468, 0x00003470, 0x00003340, 0x0000321c, 0x00003030, 0x00003034, 0x00003038, + 0x0000303c, 0x00003074, 0x00003390, 0x00002860, 0x000028a0, 0x000028e0, 0x00002920, 0x00002870, + 0x000028b0, 0x000028f0, 0x00002930, 0x0000281c, 0x00000000, 0x00000000, 0x00000000, 0x00000008, + 0x00000002, 0x00000000, 0x00000001, 0x00000000, 0x00000008, 0x00000008, 0x00000000, 0x00000001, + 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000001, 0x00000003, 0x00000003, 0x00000002, + 0x00000000, 0x00000001, 0x00000000, 0x00000003, 0x00000008, 0x00000000, 0x00000001, 0x00000000, + 0x00000003, 0x0000000d, 0x00000000, 0x00000001, 0x00000000, 0x00000022, 0x00000007, 0x00000000, + 0x00000001, 0x00000000, 0x00000022, 0x0000000c, 0x00000000, 0x00000001, 0x00000000, 0x00000027, + 0x00000007, 0x00000000, 0x00000001, 0x00000000, 0x00000027, 0x0000000c, 0x00000000, 0x00000001, + 0x00000000, 0x0000002c, 0x00000007, 0x00000000, 0x00000001, 0x00000000, 0x0000002c, 0x0000000c, + 0x00000000, 0x00000001, 0x00000000, 0x00000031, 0x00000007, 0x00000000, 0x00000001, 0x00000003, + 0x00000029, 0x00000008, 0x00000000, 0x00000001, 0x00000003, 0x00000026, 0x00000002, 0x00000000, + 0x00000001, 0x00000003, 0x00000029, 0x00000006, 0x00000000, 0x00000001, 0x00000003, 0x00000004, + 0x0000000c, 0x00000000, 0x00000001, 0x00000003, 0x00000002, 0x00000002, 0x00000000, 0x00000001, + 0x00000003, 0x00000002, 0x00000006, 0x00000000, 0x00000001, 0x00000003, 0x00000002, 0x0000000a, + 0x00000000, 0x00000001, 0x00000003, 0x00000002, 0x0000000e, 0x00000000, 0x00000001, 0x00000003, + 0x00000002, 0x00000001, 0x00000000, 0x00000001, 0x00000003, 0x00000000, 0x00000004, 0x00000000, + 0x00000001, 0x00000003, 0x00000000, 0x00000008, 0x00000000, 0x00000001, 0x00000003, 0x00000000, + 0x0000000c, 0x00000000, 0x00000001, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000003, 0x00000030, 0x00000004, 0x00000000, 0x00000001, 0x00000003, 0x00000032, 0x00000002, + 0x00000000, 0x00000001, 0x00000003, 0x00000032, 0x00000006, 0x00000000, 0x00000001, 0x00000003, + 0x00000034, 0x00000004, 0x00000000, 0x00000001, 0x00000003, 0x00000036, 0x00000002, 0x00000000, + 0x00000001, 0x00000003, 0x00000038, 0x00000004, 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000007, + 0x00000000, 0x00000000, 0x00010002, 0x70200005, 0x01100008, 0x1021000a, 0x01040020, 0x0ced0028, + 0x2a2b0038, 0x2c290039, 0x2a2b003a, 0x2829003b, 0x2527003c, 0x2224003d, 0x1f21003e, 0x1c1e003f, + 0x181a0040, 0x14160041, 0x10120042, 0x000d0043, 0x0208004e, 0x00980051, 0x00ec0067, 0x00010002, + 0xb0210005, 0x01100008, 0x1021000a, 0x01040020, 0x0ced0028, 0x2a2b0038, 0x2c290039, 0x2a2b003a, + 0x2829003b, 0x2527003c, 0x2224003d, 0x1f21003e, 0x1c1e003f, 0x181a0040, 0x14160041, 0x10120042, + 0x000d0043, 0x0208004e, 0x00980051, 0x00ec0067, 0x00010002, 0x69200005, 0x01100008, 0x1021000a, + 0x0eee0028, 0x2a2b0038, 0x2c290039, 0x2a2b003a, 0x2829003b, 0x2527003c, 0x2224003d, 0x1f21003e, + 0x1c1e003f, 0x181a0040, 0x14160041, 0x10120042, 0x000d0043, 0x0208004e, 0x00980051, 0x00ec0067, + 0x00010002, 0x70220005, 0x01100008, 0x1021000a, 0x01040020, 0x0ced0028, 0x2a2b0038, 0x2c290039, + 0x2a2b003a, 0x2829003b, 0x2527003c, 0x2224003d, 0x1f21003e, 0x1c1e003f, 0x181a0040, 0x14160041, + 0x10120042, 0x000d0043, 0x0208004e, 0x00980051, 0x00ec0067, 0x00010002, 0x8d1a0005, 0x01100008, + 0x1021000a, 0x01040020, 0x09e90028, 0x2a2b0038, 0x2c290039, 0x2a2b003a, 0x2829003b, 0x2527003c, + 0x2224003d, 0x1f21003e, 0x1c1e003f, 0x181a0040, 0x14160041, 0x10120042, 0x000d0043, 0x0208004e, + 0x00980051, 0x00ec0067, 0x00800009, 0x000900c0, 0x00c00000, 0x00280067, 0x00120038, 0x00130240, + 0x00140030, 0x00190220, 0x001e0164, 0x003af8ff, 0x003f6110, 0x00450100, 0x004b0513, 0x00500088, + 0x00552222, 0x005a0013, 0x00632270, 0x006d0c01, 0x007b0233, 0x0080001e, 0x00aa3145, 0x00af0ca5, + 0x00b40010, 0x00b9070e, 0x00c30098, 0x00c80002, 0x00cd0a60, 0x00d70733, 0x00fa0017, 0x01060001, + 0x010d0057, 0x011b1007, 0x01220023, 0x01540483, 0x015e0077, 0x016360d3, 0x01681053, 0x016d0a0a, + 0x01723400, 0x0177050f, 0x017c0005, 0x01810d0d, 0x01bb000b, 0x01c00120, 0x01c53fff, 0x01ca3fff, + 0x01cf003f, 0x01d20262, 0x01d50271, 0x01d80271, 0x01db0271, 0x01e10271, 0x01e40003, 0x01e70014, + 0x0223695f, 0x022a00f2, 0x022b8484, 0x022d900f, 0x022e8484, 0x0230901f, 0x02314343, 0x0233901f, + 0x02348484, 0x0237901f, 0x0238901f, 0x02820800, 0x02830310, 0x00120196, 0x00130240, 0x00140030, + 0x00190220, 0x001e0023, 0x003af0ff, 0x003f2131, 0x00450111, 0x00550513, 0x00630013, 0x006d0c40, + 0x007b0233, 0x0080001e, 0x00902145, 0x00aa0040, 0x00af0ca5, 0x00b40010, 0x00b90711, 0x00c30098, + 0x00c80008, 0x00cd0a60, 0x00d70733, 0x00fa0017, 0x01060001, 0x010d0057, 0x01221007, 0x01540483, + 0x015e0077, 0x01636192, 0x01681053, 0x016d0055, 0x01811500, 0x01bb0008, 0x01c00120, 0x01c53fff, + 0x01cf300f, 0x01d20273, 0x01d50271, 0x01d80271, 0x01db0271, 0x01e10271, 0x01e40003, 0x01e70014, + 0x022a695f, 0x022d8282, 0x02308282, 0x02334343, 0x02388282, 0x02820800, 0x02830310, 0x00120196, + 0x00130240, 0x00140030, 0x00190228, 0x001e00a3, 0x0023a4fc, 0x00240009, 0x003a0021, 0x003f6110, + 0x00450100, 0x00500513, 0x00552222, 0x00630013, 0x006d0c40, 0x007b0233, 0x0080001f, 0x00aa3146, + 0x00af0ca5, 0x00b40010, 0x00b90713, 0x00c30098, 0x00c80006, 0x00cd0a60, 0x00d70733, 0x00fa0007, + 0x01060001, 0x010d0057, 0x01221007, 0x01540483, 0x015e0077, 0x0163a0d5, 0x01681053, 0x01814322, + 0x01bb0008, 0x01c00120, 0x01c53fff, 0x01cf300f, 0x01d20273, 0x01d50271, 0x01d80271, 0x01db0271, + 0x01e10271, 0x01e40003, 0x01e70014, 0x022a695f, 0x022d4343, 0x02304343, 0x02334343, 0x02384343, + 0x02820800, 0x02830310, 0x00120196, 0x00130240, 0x00140030, 0x00190220, 0x001e0164, 0x003aa03c, + 0x003f2130, 0x00450100, 0x00550513, 0x006d0013, 0x007b0233, 0x0080001e, 0x00b43145, 0x00b90709, + 0x00c30098, 0x00c80002, 0x00cd0a60, 0x00d70733, 0x00fa0017, 0x01060001, 0x010d0057, 0x01221007, + 0x01540483, 0x015e0077, 0x016360d5, 0x01811053, 0x01bb0008, 0x01c00120, 0x01c53fff, 0x01cf300f, + 0x01d20273, 0x01d50271, 0x01d80271, 0x01db0271, 0x01e10271, 0x01e40003, 0x01e70014, 0x022a695f, + 0x022d8484, 0x02308484, 0x02334343, 0x02828484, 0x02830310, 0x00240196, 0x00300000, 0x00010013, + 0x00b90001, 0x00ff00cb, 0x00010264, 0x02640031, 0x00310011, 0x0030006c, 0x02640030, 0x0f020102, + 0x00060080, 0x0220000f, 0x011101ff, 0x0154020b, 0x020effff, 0xffff0159, 0x01630211, 0x02140007, + 0xffff0168, 0x016d0217, 0x021affff, 0xffff0172, 0x0181021d, 0x00ee000f, 0x000f0090, 0x00813384, + 0x00002000, 0xffffdfff, 0x00813084, 0x9e5107eb, 0x0000f800, 0x00813030, 0x02ef0000, 0xf800ffff, + 0x00813034, 0x0600bd27, 0x00000000, 0x00813038, 0x0000eb87, 0xffff0000, 0x0081303c, 0x03c602ef, + 0xf800f800, 0x00813074, 0x9e320000, 0x0000ffff, 0x008133ac, 0x317fda00, 0x80000000, 0x00813304, + 0x00000000, 0xfffffffe, 0x008130a8, 0x00028c60, 0xfffc0003, 0x00810284, 0x00000000, 0xfffffffe, + 0x0081028c, 0x00000000, 0xfffffffe, 0x00810068, 0x40000000, 0xbfffffff, 0x00810020, 0x00001500, + 0xffff00ff, 0x00810288, 0x00000000, 0xffff00ff, 0x00812080, 0x00000058, 0xfffff800, 0x00812088, + 0x000042a8, 0xffff0000, 0x00812148, 0x00000009, 0xffffffc0, 0x00812030, 0x00000058, 0xc0000000, + 0x00812020, 0x32a80000, 0x0000ffff, 0x0081201c, 0x0000a318, 0xffff0000, 0x00812234, 0x00000000, + 0xfffffffe, 0x0081004c, 0x02129000, 0xfc0c0fff, 0x00810044, 0x02000000, 0x000fffff, 0x00812828, + 0x00006800, 0xffff00ff, 0x00803814, 0x06000000, 0x00ffffff, 0x00813080, 0x00002e20, 0xffff0000, + 0x00813030, 0x03ce0000, 0xf800ffff, 0x00813034, 0x0000c4c1, 0x00000000, 0x00813038, 0x0000f230, + 0xffff0000, 0x0081303c, 0x03ce03ce, 0xf800f800, 0x008133ac, 0x317d5133, 0x80000000, 0x00812084, + 0x00002e24, 0xffff0000, 0x00812088, 0x000002af, 0xfffff800, 0x00812020, 0x42af0000, 0x0000ffff, + 0x00812018, 0x00002e48, 0xffff0000, 0x0081201c, 0x2e240000, 0x0000ffff, 0x00813084, 0x07af0000, + 0xf800ffff, 0x00810020, 0x00000d00, 0xffff00ff, 0x00802294, 0x00000007, 0xfffffff8, 0x00802288, + 0x00000007, 0xfffffff8, 0x00802284, 0x00000007, 0xfffffff8, 0x0081708c, 0x00000000, 0xfffffefe, + 0x00817294, 0x000001a1, 0xfffffe5e, 0x00817288, 0x00000181, 0xfffffe7e, 0x00817284, 0x000001fe, + 0xfffffe01, 0x0081728c, 0x0000007e, 0xffffff81, 0x00816904, 0x80000000, 0x7fffffff, 0x00816a94, + 0x0008ffff, 0xfff70000, 0x00816ab4, 0x00010101, 0xfffe0000, 0x00816a88, 0x0008ffff, 0xfff70000, + 0x00816aa8, 0x00010101, 0xfffe0000, 0x00816a84, 0x000fffff, 0xfff00000, 0x00816aa4, 0x00070101, + 0xfff80000, 0x00816aac, 0x00040000, 0xfffbffff, 0x00815884, 0x00000000, 0xfffffffe, 0x00815a94, + 0x00000003, 0xfffffffc, 0x00815a88, 0x00000003, 0xfffffffc, 0x00815a84, 0x0000002b, 0xffffffd4, + 0x00815a8c, 0x00000028, 0xffffffd7, 0x00815804, 0x000000a0, 0xffffff0f, 0x00816124, 0x800003e8, + 0x7f000000, 0x00816294, 0x19260000, 0xe6d9ffff, 0x00816200, 0x00000008, 0xfffffff7, 0x00816288, + 0x19260000, 0xe6d9ffff, 0x00816284, 0x27fe0000, 0xd801ffff, 0x0081628c, 0x26d80000, 0xd927ffff, + 0x00815084, 0x00000000, 0xfffffffe, 0x00815294, 0x00000101, 0xfffffefe, 0x00815288, 0x00000101, + 0xfffffefe, 0x00815284, 0x07800701, 0xf87ff8fe, 0x0081528c, 0x00000600, 0xfffff9ff, 0x00815004, + 0x000000a0, 0xffffff0f, 0x00816c0c, 0x00640001, 0x00000000, 0x00816c08, 0x00010001, 0x00000000, + 0x00816c18, 0x00013a84, 0x00000000, 0x00816c10, 0x00013a84, 0x00000000, 0x00816c00, 0x00000024, + 0xffffffdb, 0x0081640c, 0x00640001, 0x00000000, 0x00816408, 0x00010001, 0x00000000, 0x00816418, + 0x00013a84, 0x00000000, 0x00816410, 0x00013a84, 0x00000000, 0x00816400, 0x00000024, 0xffffffdb, + 0x00816a8c, 0x00070000, 0xfff8ffff, 0x00816128, 0x0000006c, 0xfffff000, 0x00816ab4, 0x00040000, + 0xfffbffff, 0x00816aa8, 0x00040000, 0xfffbffff, 0x00816aac, 0x00000000, 0xfffbffff, 0x00816128, + 0x00000064, 0xfffff000, 0x00802088, 0x00000059, 0xffffff00, 0x00816904, 0x00000000, 0x7fffffff, + 0x00816ab4, 0x00020000, 0xfffdffff, 0x00816aa8, 0x00020000, 0xfffdffff, 0x00816124, 0x00000000, + 0x7fffffff, 0x0081612c, 0x00030000, 0xfff0ffff, 0x00816100, 0x00000400, 0xfffff0ff, 0x00816128, + 0x00000064, 0xfffff000, 0x02020100, 0x03030303, 0x04040404, 0x04040404, 0x05050505, 0x05050505, + 0x05050505, 0x05050505, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, + 0x06060606, 0x06060606, 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, + 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, + 0x07070707, 0x07070707, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, + 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, + 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, + 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, + 0x08080808, 0x08080808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000010, 0x00000000, 0x7c010001, 0x04100c13, 0x00000293, 0x0000002c, 0x00000018, 0x00007272, + 0x0000041b, 0x00000304, 0x04200e00, 0x00000002, 0x0980440e, 0x0b820a81, 0x0d840c83, 0x0f860e85, + 0x11881087, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +// array of code and data overlay offsets and sizes +// Defined as follows: +// OS Code Offset +// OS Code Size +// OS Data Offset +// OS Data Size +// NumApps (N) +// App 0 Code Offset +// App 0 Code Size +// App 1 Code Offset +// App 1 Code Size +// . . . . +// . . . . +// App N-1 Code Offset +// App N-1 Code Size +// App 0 Data Offset +// App 0 Data Size +// App 1 Data Offset +// App 1 Data Size +// . . . . +// . . . . +// App N-1 Data Offset +// App N-1 Data Size +// OS Ovl Offset +// OS Ovl Size +// +// +const NvU32 minion_ucode_header_lr10_dbg[] = { + 0, + 30464, + 32768, + 9728, + 1, + 30464, + 2304, + 32768, + 0, + 32768, + 0, +}; + +const NvU32 minion_ucode_data_size_lr10_dbg = 12096; + +#endif // _MINION_UCODE_LR10_DBG_H_ diff --git a/src/common/nvswitch/kernel/inc/lr10/minion_production_ucode_lr10_prod.h b/src/common/nvswitch/kernel/inc/lr10/minion_production_ucode_lr10_prod.h new file mode 100644 index 000000000..7126039b2 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/lr10/minion_production_ucode_lr10_prod.h @@ -0,0 +1,1593 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +/* + + DO NOT EDIT - THIS FILE WAS AUTOMATICALLY GENERATED + +*/ + +#ifndef _MINION_UCODE_LR10_PROD_H_ +#define _MINION_UCODE_LR10_PROD_H_ + +const NvU32 minion_ucode_data_lr10_prod[] = { + 0x004000d0, 0x0004fe00, 0xc47ea4bd, 0x02f8006b, 0x4908a4b6, 0x9afa0830, 0x00b6b000, 0x090bfcf0, + 0x029fbb02, 0xfa08104f, 0xb4b300f9, 0x02f80600, 0x010a00f8, 0x107eb4bd, 0x00f80000, 0xb4bd020a, + 0x0000107e, 0x060a00f8, 0x107eb4bd, 0x00f80000, 0x000000df, 0x08bc4940, 0x89009ffa, 0xb6010060, + 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x0000d905, 0xa9fa4000, 0x40004f01, 0xfa08bc49, 0x00f8009f, + 0x000000df, 0x08bc4980, 0x89009ffa, 0xb6010060, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0xffffd905, + 0xb9fdbfff, 0x01abfa04, 0x0080008f, 0xfa08bc49, 0x00f8009f, 0x000000df, 0x08bc4904, 0x89009ffa, + 0xb6017304, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0xfa020905, 0x004f01a9, 0x08bc4904, 0xf8009ffa, + 0xffffd900, 0xa9fd00ff, 0x0000d904, 0xa9fd3000, 0x08504905, 0xf8009afa, 0xffffd900, 0xa9fd00ff, + 0x0000d904, 0xa9fd5000, 0x08584905, 0xf8009afa, 0xffffd900, 0xa9fd00ff, 0x0000d904, 0xa9fd6000, + 0x085c4905, 0xf8009afa, 0xffffd900, 0xa9fd00ff, 0x0000d904, 0xa9fd4000, 0x08644905, 0xf8009afa, + 0xffffd900, 0xa9fd00ff, 0x0000d904, 0xa9fd2000, 0x08544905, 0xf8009afa, 0xffffd900, 0xa9fd00ff, + 0x0000d904, 0xa9fd1000, 0x08604905, 0xf8009afa, 0xffffd900, 0xa9fd00ff, 0x0000d904, 0xa9fd7000, + 0x08684905, 0xf8009afa, 0x00328900, 0x0090fe00, 0x00003c89, 0x890091fe, 0xbd000028, 0xde9cbff4, + 0x80000000, 0x3e090049, 0xfa0001b4, 0xff90009e, 0x04999001, 0x08f4fca6, 0x4fe4bdf5, 0x00dd0980, + 0x3e800000, 0xfa0001d8, 0xf9b800fd, 0xfa000100, 0xee90009d, 0x04ff9001, 0x08f4eca6, 0xf994bded, + 0xf9120999, 0x8900f899, 0xb60100a0, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x00aace05, 0xf8ffa4f0, + 0x74a08900, 0x0fa4b601, 0xd9a0a9bc, 0x00800000, 0xce05a9fd, 0xa4f000aa, 0x4900f8ff, 0x99cf0884, + 0x00288f00, 0x7099c700, 0x00f8f9a0, 0x3049010f, 0x009ffa08, 0xc0ffeedf, 0xfa400900, 0x00f8009f, + 0xf403a6b0, 0x94bd090d, 0x00025d3e, 0x00002889, 0xa9a699bf, 0xf0089cf0, 0x96f00196, 0xf89a3201, + 0x0000d900, 0xb4f08000, 0x80aab8ff, 0xb9fd0002, 0x08c4b605, 0xf102a4b6, 0xfdffffc4, 0xacfa05cb, + 0x3d00f800, 0x02913e94, 0x92a92000, 0xaa9001bb, 0x00b4b301, 0xbd00f8f8, 0x02a93e94, 0xf8b93c00, + 0x3c01cc92, 0x999099af, 0x00c4b301, 0x4900f8f4, 0x99cf0608, 0x00008f00, 0x049ffd10, 0x8b2d1bf4, + 0x0c000030, 0x27108a10, 0x02977e00, 0x00408b00, 0x8a100c00, 0x7e002720, 0x8b000297, 0x0c000050, + 0x27308a10, 0x030f3e00, 0x00608b00, 0x8a100c00, 0x7e002710, 0x8b000297, 0x0c000070, 0x27208a10, + 0x02977e00, 0x27308a00, 0x00808b00, 0x7e100c00, 0x8a000297, 0x0b000050, 0x02837e10, 0x00808a00, + 0x7e100b00, 0x8a000283, 0x0b000040, 0x02837e10, 0x00708a00, 0x7e100b00, 0xf4000283, 0x10891f3c, + 0x93f00027, 0x0699fa06, 0x3cf403f8, 0x77008f00, 0x08f5b600, 0x00090089, 0xfd1094b6, 0x008f059f, + 0x9ffd0200, 0x009afe05, 0x00036c3e, 0x0077007e, 0x0003743e, 0x0003783e, 0x0000308a, 0x837e100b, + 0x608a0002, 0x100b0000, 0x0002837e, 0x844f00f8, 0x00ffcf08, 0x9abb0109, 0xff94f104, 0x049ffdff, + 0xf00bacf0, 0x00f801a6, 0x800000d9, 0x02aa9000, 0xfd0fa4b6, 0xaace05a9, 0xffa4f000, 0x148900f8, + 0xa4b60130, 0xa0a9bc0f, 0x800000d9, 0x05a9fd00, 0xc700aace, 0x00f864aa, 0x01202489, 0xbc0fa4b6, + 0x00d9a0a9, 0xfd008000, 0xaace05a9, 0x64aac700, 0x248900f8, 0xa4b60120, 0xa0a9bc0f, 0x800000d9, + 0x05a9fd00, 0xc700afce, 0x90b364f9, 0xa43d0804, 0xf9c400f8, 0x0b9cf00f, 0x00f89a32, 0x0003d87e, + 0xf000a6b0, 0x00f80bac, 0x0003d87e, 0xf006a6b0, 0x00f80bac, 0x0003be7e, 0xf006a6b0, 0x00f80bac, + 0x0003a87e, 0xf002a6b0, 0x00f80bac, 0xe04f94bd, 0x045b3e0f, 0x00fecf00, 0xa6019990, 0xf808f49a, + 0x288900f8, 0xd4bd0000, 0x888e9cbf, 0x00da0176, 0x0b008000, 0x048d3efb, 0xf5eaff00, 0xfd00f9ce, + 0xf9fa049b, 0x01dd9001, 0x8000eeb8, 0xf4dca600, 0x00f8ea08, 0x01315489, 0xbc0fa4b6, 0x00d9a0a9, + 0xff008000, 0x9cfa95a9, 0x0000d901, 0xaa920080, 0x95a9ff04, 0xd9019dfa, 0x00800000, 0xf010aa92, + 0xa9fd7fb4, 0x08b4b605, 0x000002d9, 0x05b9fd01, 0x0f01abfa, 0xbde4bd01, 0x3e050dc4, 0xce0004f3, + 0x010f00a9, 0x92189cc7, 0x9ec701dd, 0x060bf403, 0x3301cfc6, 0xb30800f0, 0xb3e800d4, 0xb30c00e0, + 0xbd0800c0, 0x0a00f8a4, 0x8900f829, 0xb6013140, 0xa9bc0fa4, 0x0000d9a0, 0xb4f00080, 0x05a9fd7f, + 0xd910b4b6, 0x01000001, 0xfa05b9fd, 0x010f01ab, 0xc4bde4bd, 0x4e3e050d, 0xa9ce0005, 0xc7010f00, + 0xdd92189c, 0x039ec701, 0xc6060bf4, 0xf03301cf, 0xd4b30800, 0xe0b3e800, 0xc0b30c00, 0xa4bd0800, + 0x2a0a00f8, 0x588900f8, 0xa4b60131, 0xa0a9bc0f, 0x800000d9, 0x05a9fd00, 0xf800aace, 0x315c8900, + 0x0fa4b601, 0xd9a0a9bc, 0x00800000, 0xce05a9fd, 0x00f800aa, 0x01214089, 0xbc0fa4b6, 0x00d9a0a9, + 0xf0008000, 0xa9fd7fb4, 0x10b4b605, 0x000001d9, 0x05b9fd01, 0xbd01abfa, 0x3ef4bd94, 0xce0005d2, + 0x9fc700a9, 0x01cc9218, 0xf40399c7, 0xf4b3070b, 0xc4b31400, 0x90b3ed00, 0xf4b30800, 0x2c0a0800, + 0xa4bd00f8, 0x548900f8, 0xa4b60121, 0xa0a9bc0f, 0x800000d9, 0x95a9ff00, 0xd9019cfa, 0x00800000, + 0xff04aa92, 0x9dfa95a9, 0x0000d901, 0xaa920080, 0x7fb4f010, 0xb605a9fd, 0x02d908b4, 0xfd010000, + 0xabfa05b9, 0xbd94bd01, 0x06403ef4, 0x00a9ce00, 0x92189fc7, 0x99c701ee, 0x070bf403, 0x1400f4b3, + 0xed00e4b3, 0x080090b3, 0x0800f4b3, 0x00f82b0a, 0x00f8a4bd, 0x01215889, 0xbc0fa4b6, 0x00d9a0a9, + 0xfd008000, 0xaace05a9, 0x8900f800, 0xb601215c, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x00aace05, + 0x6c8900f8, 0x9abc0002, 0x8900f8a8, 0xb200029c, 0xa89abcaf, 0x0002ac89, 0xf0989fbc, 0xa4b603a4, + 0xff94f008, 0xf805a9fd, 0x51248900, 0x0000df01, 0xa4b60080, 0xa0a9bc0f, 0xce95afff, 0xaab80099, + 0xfd000800, 0xaace05af, 0x0194f000, 0xbd071bf4, 0xf000f8a4, 0x00f801a4, 0x09f0babc, 0x00f6b001, + 0xa6081ef4, 0x0b9cf0bc, 0xda1f94b6, 0x7fffffff, 0xffa4faff, 0x00f8a59a, 0xff05bafd, 0x00f8a4cb, + 0xbab2a9b2, 0x18f4b9a6, 0xf89ab205, 0x0f348900, 0xbce4bd00, 0x4489a99e, 0x9ebc000f, 0x0f5489a9, + 0xa99ebc00, 0x000f6489, 0x89a99ebc, 0xbc000f74, 0x8489a99e, 0x9ebc000f, 0x0f9489a9, 0x10f48d00, + 0xa99ebc00, 0x89f8dabc, 0xbc000fa4, 0xb489a99e, 0x9ebc000f, 0x0fc489a9, 0xa99ebc00, 0x000fd489, + 0xbc01f5f0, 0xe489a99e, 0xdfbc000f, 0xa99ebca9, 0x0000b08f, 0x000ff489, 0x94a99ebc, 0x9fbc06a9, + 0xa0f4bd90, 0x01ff909e, 0xb3049990, 0x89f810f4, 0x8f0001b0, 0xbc0001c4, 0xc089a99e, 0x9e3c0001, + 0x05a994a9, 0x9fbce4bd, 0xa0f4bd90, 0x01ff909e, 0xb3049990, 0x89f808f4, 0xbc000244, 0x5489a99e, + 0x9e3c0002, 0x8900f8a9, 0x94010190, 0xf9bc0faf, 0x0000d9f0, 0xf9fd0080, 0x00f9ce05, 0xfa0195f0, + 0x0d7e01f9, 0x00f80007, 0x000e8489, 0x9ebce4bd, 0x0e7489a9, 0xa99ebc00, 0x000e5489, 0x89a99ebc, + 0xbc000e94, 0xa489a99e, 0x9ebc000e, 0x0eb489a9, 0xa99ebc00, 0x000ec489, 0x0010e48d, 0xbca99ebc, + 0xe489f8da, 0x9ebc000e, 0x0ef489a9, 0xa99ebc00, 0x000f0489, 0x89a99ebc, 0xf0000f14, 0x9ebc17f5, + 0xa9dfbca9, 0x000ed489, 0xf8a99ebc, 0x0fae9400, 0x01008089, 0x800000df, 0xe0e9bc00, 0xcef5efff, + 0x95f000f9, 0x01f9fa04, 0x800000df, 0x00eeb800, 0xefff0022, 0x00f9cef5, 0x010095f1, 0xd901f9fa, + 0x00800000, 0x1000eeb8, 0x05e9fd00, 0xf000e9ce, 0xe9fa1795, 0x07e87e01, 0x8900f800, 0x3c0001c0, + 0x9433989a, 0x988f5900, 0xa9940101, 0x909fbc0f, 0x800000df, 0x059ffd00, 0x8f0099ce, 0xbc0001b0, + 0x95b6e8fa, 0xa9f9bc10, 0xb6029ebb, 0x99ec0494, 0x96b03fff, 0x050df40f, 0xb08e0f09, 0xaf940000, + 0xf0f9bc04, 0x9098efbc, 0xe9bc0199, 0xff9eb3f9, 0x010f0e00, 0x0001c089, 0xf8a99f3c, 0x02548900, + 0x989a3c00, 0x56009433, 0x0101a08f, 0xbc0fa994, 0x00df909f, 0xfd008000, 0x99ce059f, 0x02448f00, + 0xe8fabc00, 0xbce899c7, 0x9ebba9f9, 0x6199e702, 0x0796b003, 0x09050df4, 0x01c48e07, 0x03af9400, + 0xbcf0f9bc, 0x999098ef, 0xf9e9bc01, 0x00ff9eb3, 0x89010f0e, 0x3c000254, 0x00f8a99f, 0x00026c89, + 0xb3989abc, 0xbd080290, 0x7e00f8a4, 0xec001acb, 0xf803e8aa, 0xb202f900, 0x1acb7ea0, 0x03e84c00, + 0xd4bdabb2, 0xe712a4b6, 0x7e01eebb, 0x89007272, 0xbc000500, 0x94b39890, 0xa0890c01, 0xc53e0000, + 0x10890009, 0x90bc0005, 0x0294b398, 0x0090890c, 0x09c53e00, 0x0190b300, 0xb2bfb20c, 0x09da3eae, + 0x00988900, 0x989cbf00, 0x677e019d, 0xb9940071, 0x10a5b610, 0xff10bf95, 0xa889a59a, 0x9cbf0000, + 0xb2019998, 0x10c5b6fb, 0xb6109d95, 0x9cff1094, 0x71677ec5, 0x10a99500, 0xfd10ba94, 0x01fb05a9, + 0x0002dc89, 0xf9989abc, 0xb3a0b202, 0x7e380194, 0x330006a9, 0xb23000a0, 0x04287e0a, 0x00a43300, + 0x7e0ab226, 0x33000434, 0xb21c00a4, 0x09757e0a, 0x027c8900, 0x9890bc00, 0x0d0294b3, 0x3e02a5b6, + 0xbd000a45, 0x8901fba4, 0xf9002784, 0xf89a3c22, 0x0002dc89, 0xb2989abc, 0x01f630a0, 0xb00b1cf0, + 0x2cf00196, 0x17537e0b, 0x025c8900, 0xc890bc00, 0x0003c489, 0x89f890bc, 0xbc000a34, 0x2489e890, + 0x90bc000a, 0x0a1489d8, 0x9890bc00, 0xb61e14b6, 0xa4b61d24, 0xffc4f11f, 0x0ff4f0ff, 0xf005acfd, + 0xf4b601e4, 0x13e4b614, 0xf005affd, 0xaefd01d4, 0x0194f005, 0xb612d4b6, 0xadfd1094, 0x05a9fd05, + 0xfd05a1fd, 0x21fb05a2, 0x0002dc89, 0xf9989abc, 0xb2cfb252, 0x0190b3a4, 0x3e94bd0a, 0x89000af2, + 0x3c000324, 0x9630989a, 0x0b9cf000, 0xf00196f0, 0xb1920196, 0x0f16b090, 0xc42f0cf4, 0xfec703f9, + 0xf099bc22, 0xbcf0f9bc, 0x94bc9044, 0x0294b690, 0x89f0f9bc, 0xbc002950, 0xf4b6f0fe, 0xf0f1bc04, + 0x3ea89fbc, 0xb30012f7, 0x036741b9, 0x0041b6b1, 0x00a20cf5, 0x3722b9b3, 0x22b6b002, 0xb34a0cf4, + 0x01c713b9, 0xf413b6b0, 0xb9b3250c, 0xb0014b10, 0x0cf410b6, 0x01bdb30c, 0x8b3e012d, 0xb9b3000c, + 0xb3017211, 0x011f12bd, 0x000cfc3e, 0xb115b9b3, 0x15b6b001, 0x019c08f5, 0xaf20b9b3, 0x21bdb301, + 0x573e0105, 0xb9b3000d, 0xb0025227, 0x0cf427b6, 0x24b9b31d, 0xb6b00205, 0xe608f524, 0x25b9b301, + 0xbdb3020d, 0x3e00e026, 0xb3000dc1, 0x02912cb9, 0xf42cb6b0, 0xb9b3110c, 0xb3024a28, 0x00c729bd, + 0x000e283e, 0x942eb9b3, 0x40bdb302, 0x773e00b9, 0xbbb3000e, 0x03fb0081, 0x0081b6b1, 0xb34c0cf4, + 0x03554cb9, 0x004cb6b1, 0xb3250cf4, 0x02e244b9, 0x0044b6b1, 0xb30c0cf4, 0x008b43bd, 0x000eb63e, + 0xe445b9b3, 0x46b4b302, 0x0f0c3e7d, 0x4eb9b300, 0xb6b10365, 0x08f5004e, 0xb9b30332, 0xb3036f4f, + 0x630080be, 0x000fa03e, 0x0087bbb3, 0xb6b1058e, 0x0cf40087, 0x84bbb328, 0xb1045500, 0xf40084b6, + 0xbeb30c0c, 0x3e400083, 0xb3001073, 0x640085bb, 0x86beb304, 0x463e3100, 0xbbb30011, 0x064d00a0, + 0x00a0b6b1, 0xb3120cf4, 0x010089bb, 0x8abeb306, 0x963e1500, 0xbbb30012, 0x065400a1, 0x00ffbbb3, + 0xdfda0674, 0x3ebadfba, 0xb20012f7, 0x0a477e4a, 0x12f73e00, 0x10048e00, 0x01943300, 0x00508f28, + 0x0f499401, 0xdf909fbc, 0x00800000, 0xce059ffd, 0xe4bc0099, 0x059ffdf8, 0x710100df, 0x0cc93e20, + 0xf8e4bc00, 0x710100d9, 0xa4f9ff20, 0x0012f73e, 0x000e5480, 0xbb019d33, 0x008c8905, 0x0f4b9401, + 0xd9b0b9bc, 0x00800000, 0xce05b9fd, 0xff4c00bb, 0xa804bc03, 0x03ffb4f1, 0x000d4f3e, 0x8b7e4ab2, + 0xf73e0006, 0x4ab20012, 0x00095c7e, 0x0012f73e, 0x5c7e4ab2, 0xa5b60009, 0x12f73e05, 0x7e4ab200, + 0x3e000a00, 0x800012f7, 0x33000f34, 0x0564019d, 0x8e0f4b94, 0xbc010198, 0x00d9b0be, 0xfd008000, + 0xbbce05b9, 0xa804bc00, 0x8c10b5b6, 0x7e00ffff, 0x3e0006d8, 0x800012f7, 0x33000f44, 0x0534019d, + 0x8f0f4b94, 0x3e010198, 0x80000e87, 0x33000f54, 0x0520019d, 0x890f4b94, 0x3e01019c, 0x80000e9e, + 0x33000f74, 0x050c019d, 0x8e0f4b94, 0xbc0101a0, 0x133eb0be, 0x8480000e, 0x9d33000f, 0x9404f501, + 0xa08f0f4b, 0xe83e0101, 0xa480000d, 0x9d33000f, 0x9404e101, 0xa4890f4b, 0xb9bc0101, 0x0d3a3eb0, + 0x0fb48000, 0x019d3300, 0x4b9404ca, 0x01a48e0f, 0xb0bebc01, 0x000ea13e, 0x000fc480, 0xb3019d33, + 0x0f4b9404, 0x0101a88f, 0xd9b0bfbc, 0x00800000, 0xce05b9fd, 0x04bc00bb, 0xe8bbc7a8, 0x0012883e, + 0x000fd480, 0x8b019d33, 0x0f4b9404, 0x0101a889, 0xd9b0b9bc, 0x00800000, 0xce05b9fd, 0x04bc00bb, + 0xffb4f0a8, 0x0012883e, 0x000e7480, 0x63019d33, 0x0f4b9404, 0x0122888e, 0x000d373e, 0xf40ff6b0, + 0xc4bd090d, 0x000e4a3e, 0x4994fcb2, 0x00b08f04, 0x909cbc00, 0x000e703e, 0xf407f6b0, 0xc4bd090d, + 0x000e663e, 0x4994fcb2, 0x01c48f03, 0x909cbc00, 0x3ea8f9bc, 0x800012f7, 0x33000ed4, 0x0414019d, + 0x8f0f4b94, 0xbc013294, 0xa13eb0bf, 0x8480000e, 0x9d33000e, 0x9403fd01, 0x88890f4b, 0xb9bc0132, + 0x0000d9b0, 0xb9fd0080, 0x00bbce05, 0xf0a804bc, 0x4b3e00b3, 0x9480000d, 0x9d33000f, 0x9403d501, + 0xa08e0f4b, 0xbebc0101, 0x0ef73eb0, 0x0f648000, 0x019d3300, 0x4b9403be, 0x01a08f0f, 0xb0bfbc01, + 0x000f1f3e, 0x000fe480, 0xa7019d33, 0x0f4b9403, 0x0101a889, 0xd9b0b9bc, 0x00800000, 0xce05b9fd, + 0x04bc00bb, 0xf0bbc7a8, 0x0012883e, 0x000ff480, 0x7f019d33, 0x0f4b9403, 0x0101a88e, 0xd9b0bebc, + 0x00800000, 0xce05b9fd, 0x04bc00bb, 0x18b5b6a8, 0x0012883e, 0x000ee480, 0x57019d33, 0x0f4b9403, + 0x0133988f, 0x000f993e, 0x000ef480, 0x43019d33, 0x0f4b9403, 0x01339c89, 0xd9b0b9bc, 0x00800000, + 0xce05b9fd, 0xffdc00bb, 0xbc00ffff, 0xc63ea804, 0x0480000f, 0x9d33000f, 0x94031901, 0xa08e0f4b, + 0xbebc0133, 0x0f5b3eb0, 0x0f148000, 0x019d3300, 0x4b940302, 0x33a48f0f, 0xb0bfbc01, 0x000f5b3e, + 0x000e6480, 0xeb019d33, 0x207c8902, 0x0f4b9401, 0xd9b0b9bc, 0x00800000, 0xce05b9fd, 0x04bc00bb, + 0xffffdca8, 0xbcfd7fff, 0x0d4f3e04, 0x0ec48f00, 0x0e948000, 0x0ea48200, 0x0eb48500, 0x01943300, + 0x0f41946a, 0x01328c8e, 0x800000d9, 0x101ebc00, 0xce0519fd, 0xf4bc0011, 0xff1bc4a8, 0x7e00ff4c, + 0xc4005fd5, 0x04bcffa3, 0xe81bc7a8, 0x7e00ff4c, 0xb2005fd5, 0xa824bca0, 0x4cf01bc7, 0x04b600ff, + 0x5fd57e08, 0xbca2b200, 0x1b95a854, 0x00ff4c18, 0x7e0503fd, 0xb6005fd5, 0xa4b61024, 0x0502fd18, + 0x3ea50aff, 0xbc0012f7, 0x04bc98f4, 0xf854bcd8, 0xf0e824bc, 0xd4b6ff94, 0x18f4b608, 0xfdffe4f0, + 0xd4f1059f, 0xe4b6ffff, 0x059efd10, 0x3ea59dff, 0x800012f7, 0x33001014, 0x0218019d, 0x8f0f4994, + 0xbc012028, 0x9d3e909f, 0x24800010, 0x9d330010, 0x94020101, 0x288e0f49, 0x9ebc0130, 0x0000df90, + 0x9ffd0080, 0x0099ce05, 0x3ef804bc, 0x330012e9, 0x8f720194, 0x94013040, 0x00de0f49, 0xbc008000, + 0x9eff909f, 0x00fdcef5, 0xfd189992, 0x9fce059e, 0x02f9c400, 0xe40b0bf4, 0x3e0f00dc, 0x890010e9, + 0xbc001074, 0x9c949894, 0x10f9c408, 0xc70a0bf4, 0xfd3e70d9, 0x94890010, 0x94bc0010, 0x109e9498, + 0xf420f9c4, 0xd9c70a0b, 0x11143e78, 0x10848900, 0x9894bc00, 0xff189094, 0x90ff95ec, 0x12f73ea5, + 0x10948900, 0xe894bc00, 0x00107489, 0x89f894bc, 0xbc001084, 0xe4b69894, 0x08f4b610, 0xb605effd, + 0xc33e1894, 0x94330012, 0x448e5801, 0x49940130, 0x909ebc0f, 0x800000de, 0xf59eff00, 0x9200ffce, + 0x9efd1c99, 0x009dce05, 0x3380d9c4, 0xc70b0090, 0x7d3e70f9, 0xa4890011, 0x94bc0010, 0x10919498, + 0xf440d9c4, 0xf9c70a0b, 0x11943e78, 0x10b48900, 0x9894bc00, 0xff189094, 0xf73ea501, 0xa4890012, + 0x94bc0010, 0x10b489f8, 0x9894bc00, 0xb610f4b6, 0xe93e1894, 0x348e0012, 0x94330010, 0x688f7901, + 0x49940130, 0x909fbc0f, 0x800000df, 0x059ffd00, 0xbc0092ce, 0x2fc498e4, 0xf4f9a63f, 0x9fb20518, + 0x00106489, 0xc49894bc, 0x2fc7fffd, 0xf4f9a6b8, 0x9fb20518, 0x00104489, 0x94e894bc, 0x9dff18f9, + 0xa82dc7f5, 0x18f4dea6, 0x89edb205, 0xbc001054, 0xd9940894, 0xb02dc708, 0xffff94f1, 0xa615f9ff, + 0x0518f4d0, 0xd9c40db2, 0x1094b6ff, 0x3ea519ff, 0x890012f7, 0xbc001064, 0x5489d894, 0xe4bc0010, + 0xe894bcf8, 0x00104489, 0xf09894bc, 0xd4b6fff4, 0xffe4f018, 0xb605fdfd, 0xe4b60894, 0xff94f110, + 0x05fefdff, 0x0012e93e, 0x000f2480, 0x23019433, 0x01301489, 0xbc0f4b94, 0x00d9b0b9, 0xfd008000, + 0xbbce05b9, 0xa804bc00, 0x3e00ff4c, 0xbc000d4f, 0xf73ea804, 0x4ab20012, 0xe46cfcc7, 0x7e0ffffb, + 0x3e0012f9, 0x8e0012f7, 0x94003910, 0x9ebc0449, 0x39108f90, 0x949ebf00, 0x99900249, 0x98f9bc01, + 0xff1094b6, 0xf73ea59e, 0x188f0012, 0x44bc0039, 0x0394b690, 0x8e909fbc, 0xbf003910, 0x0449949f, + 0x98909ebc, 0x94b60399, 0xa59fff10, 0x0012f73e, 0x0002ec89, 0xfba894bc, 0x28548951, 0x09a4b601, + 0x000000df, 0xc0cabca0, 0x03ffb4f1, 0xb606c4b6, 0xc9bc10b4, 0x05bffdc0, 0x800000d9, 0x95c9ff00, + 0xd9019bfa, 0x00800000, 0xfd04cc90, 0xccce05c9, 0xf8cab200, 0x28548900, 0x09a4b601, 0xb6d0dabc, + 0xd9bc06d4, 0x0000d9d0, 0xc3f06000, 0xffb4f100, 0x05c9fd03, 0xd910b4b6, 0x00800000, 0xfd05cbfd, + 0xdcfa05d9, 0xd900f801, 0x60000000, 0xf100b3f0, 0xfd03ffa4, 0xa4b605b9, 0x380cd910, 0xbafd0080, + 0x019bfa05, 0x00d900f8, 0xf1a00000, 0xb603ffa4, 0xa9fd10a4, 0x380cd905, 0x9afa0080, 0x3810da01, + 0xaace0080, 0x8900f800, 0xb6017090, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x00afce05, 0xf9fde709, + 0x03c9c404, 0xfd0394b6, 0xaffa05f9, 0x01b43301, 0x01944f1d, 0xc700a9ce, 0x9ca63399, 0x920e0bf4, + 0xf4b301ff, 0x180af200, 0xa4bd00f8, 0x908900f8, 0xa4b60170, 0xa0a9bc0f, 0x800000d9, 0x05a9fd00, + 0xdf00a9ce, 0x80000000, 0xfa059ffd, 0xfc0f01a9, 0xc4049ffd, 0x9ffd03cf, 0x01a9fa05, 0x1d01b433, + 0xce01944f, 0x99c700a9, 0xf49ca630, 0xff920e0b, 0x00f4b301, 0xf8190af2, 0xf8a4bd00, 0x70908900, + 0x0fa4b601, 0xd9a0a9bc, 0x00800000, 0xce05a9fd, 0xffdf00a9, 0xfd7fffff, 0xa9fa049f, 0x8f00f801, + 0xf9017090, 0x0fa99412, 0x9fbca1b2, 0x0000df90, 0x9ffd0080, 0x0099ce05, 0x000000df, 0xfd1a0002, + 0x0bf4049f, 0xbd010b20, 0x13a77ec4, 0xb3a0b200, 0xb21300a4, 0x143d7e1a, 0x89020f00, 0xbc00026c, + 0x0ab2199f, 0x02f911fb, 0x020c010b, 0xee7ea0b2, 0x0ab20013, 0x010c010b, 0x0013a77e, 0x0d00a4b3, + 0x6c89010f, 0x9fbc0002, 0x8f01fb09, 0x94017090, 0x9fbc0fa9, 0x0000df90, 0x9fff0080, 0x00d9ced5, + 0xffffffdf, 0x049ffdfe, 0x8e01d9fa, 0xbf00030c, 0xbb010fe9, 0xf9fd04fa, 0x08844905, 0x99cfefa0, + 0xff94f100, 0x0099b9ff, 0x0f059ffd, 0xf49fa6ff, 0x248f221b, 0xd9ce00f4, 0x1999c700, 0xb301ff92, + 0xb30c00f0, 0x3ef30190, 0x0a001531, 0x0190b317, 0xf8a4bd06, 0x030c8900, 0x099fbf00, 0xe49abc01, + 0xf494efff, 0xfefd060b, 0x030c8906, 0xf89fa000, 0x030c8e00, 0x09efbf00, 0x049abb01, 0xa0059ffd, + 0xf900f8e9, 0x03b4f012, 0x940fa4b6, 0x44890cbd, 0xb4b60128, 0xa0a9bc04, 0x00d1e4bd, 0x00008000, + 0xcfff4ccf, 0xcef5a1ff, 0x90fd00f9, 0x059bfd04, 0xfd049cfd, 0xf9fa059d, 0x01ee9001, 0xb340aa90, + 0xfbe504e4, 0x8912f911, 0x3c000c0c, 0xa0b2989a, 0x9933380a, 0x8900cc01, 0x3c0002fc, 0x9c89f890, + 0x90bc0002, 0x300e0ae8, 0x9cf001f6, 0x02e6b00b, 0xf00bfcf0, 0x9ffd01f6, 0xa51bf504, 0x03288900, + 0x98903c00, 0x9933120a, 0x94009801, 0x1c890f0d, 0x00dc0100, 0xbc008000, 0xdcffd0d9, 0x00cecec5, + 0x0002cc89, 0xdf9890bc, 0x8fffffff, 0xf004effd, 0x94b60794, 0x05e9fd1c, 0xdf01cefa, 0x00800000, + 0x2804ddb8, 0xf5dfff00, 0xf100f9ce, 0xfa081895, 0x00d901f9, 0x90008000, 0x894e18dd, 0x05d9fd00, + 0x9200dfce, 0xf9c701ee, 0x0194b303, 0x04f9c712, 0x0b0194b3, 0xb307f1c7, 0xb30c0110, 0x3e2100e0, + 0xb3001640, 0xb21900e0, 0x7e020b0a, 0x89001563, 0xbd0002dc, 0x0991bca4, 0x00167e3e, 0x11fb200a, + 0x01004489, 0xbc0fa4b6, 0x00d9a0a9, 0xb6008f00, 0xc4b60cb4, 0xffb4f104, 0xffc4f0ff, 0xf005b9fd, + 0xbcfd03d4, 0x02d4b605, 0x800000d9, 0x05bdfd00, 0xfa05a9fd, 0xa9ce01ab, 0x01ee9200, 0x0800e4b3, + 0x00f8040a, 0xb30394f0, 0x0aef0190, 0x0094b305, 0xf8a4bd06, 0x20948900, 0x0fa4b601, 0xbc0fb4f0, + 0x00d9a0a9, 0xfd008000, 0xabfa05a9, 0x04834f01, 0xb600a9ce, 0x0bf41f95, 0xf8a4bd07, 0x01ff9200, + 0xf000f4b3, 0x00f82d0a, 0x01004089, 0xbc0fa4b6, 0x0089a0a9, 0xb4b60f00, 0x03c4f004, 0xb6ffb4f0, + 0xb9fd02c4, 0x0000d905, 0xbcfd0080, 0x05a9fd05, 0x0901abfa, 0x17453e01, 0x00a9ce00, 0xf40394f0, + 0xdd92100b, 0x00d4b301, 0xb3240af4, 0xbd060094, 0x8900f8a4, 0xbc00028c, 0x96b0989a, 0x0bacf003, + 0x408900f8, 0xa4b60100, 0xa0a9bc0f, 0x800000d9, 0x05a9fd00, 0xf000aace, 0xacf003a4, 0xd900f80b, + 0x00800000, 0xb602aa90, 0xa9fd0fa4, 0x00aace05, 0xf8ffa4f0, 0x08b4b600, 0x0002ec89, 0xffffb4f1, + 0xbc04b5f0, 0x00f8a99b, 0x01284089, 0x800000df, 0x0fa4b600, 0xffa0a9bc, 0x99ce95af, 0x00aab800, + 0xaffd0001, 0xffff8f05, 0x049ffdfe, 0x0400008f, 0x8f059ffd, 0xf0f7ffff, 0x9ffd01b4, 0x13b4b604, + 0x0201008f, 0xfd05bffd, 0xabfa05b9, 0x8900f801, 0xdf012840, 0x00800000, 0xbc0fa4b6, 0xafffa0a9, + 0x0099ce95, 0x0100aab8, 0x05affd00, 0x9ffdfe0f, 0x0295f004, 0xf801a9fa, 0xbd02f900, 0x7ea0b2b4, + 0xb20017a8, 0x17ef7e0a, 0x8901fb00, 0xbc00029c, 0xb4b3a99b, 0x0f0f0f02, 0x0002ac89, 0xf8a99fbc, + 0x02ac8e00, 0x03b4b300, 0x2b8c8f20, 0x0fa99401, 0xdf909fbc, 0x00800000, 0xce059ffd, 0x99c70099, + 0x18673e68, 0xbc94bd00, 0x00f8a9e9, 0x00025889, 0x00f89a20, 0x00025889, 0x00f89a3f, 0x0002fc89, + 0x30989a3c, 0xacf00196, 0x0f00f80b, 0x03008901, 0xa99f3c00, 0x2089f43d, 0x9f3c0005, 0x0f00f8a9, + 0x03048901, 0xa99f3c00, 0x010f00f8, 0x00030889, 0xf8a99f3c, 0x03148900, 0xa99bbc00, 0x148900f8, + 0x9abc0003, 0x4767d9f8, 0xf9a68932, 0xf80bacf0, 0x89f43d00, 0x3c000324, 0xf4bda99f, 0x0002bc89, + 0xf8a99fbc, 0x03248900, 0xa89a3c00, 0x048900f8, 0xa4b60169, 0x0000df0f, 0xa9bc0080, 0xf5afffa0, + 0xde00f9ce, 0x7fffffff, 0xfa049efd, 0x00d901f9, 0xb8008000, 0x0207e0aa, 0xce05a9fd, 0xffdf00a9, + 0xfd7fffff, 0xa9fa049f, 0x8900f801, 0xb6016124, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x00a9ce05, + 0xffffffdf, 0x049ffd7f, 0xf801a9fa, 0x21088900, 0x0fa4b601, 0x800000df, 0xa0a9bc00, 0xcef5afff, + 0x95f100f9, 0xf9fa03ff, 0x0000d901, 0xaa900080, 0x05a9fd40, 0x0f00a9ce, 0x049ffdc0, 0xf0ff3f4f, + 0x9ffd1295, 0x4095f004, 0xf801a9fa, 0x08044900, 0x8d0099ce, 0xde017600, 0x00800000, 0xbc0fa4b6, + 0x008da0ad, 0xaefffe8a, 0xa0adbcf5, 0x3300ffce, 0xe44b0394, 0x738000f9, 0x8f430090, 0xbc01284c, + 0xaeffa0af, 0x0099ce95, 0xb8dfff4f, 0x000100aa, 0xff049ffd, 0xe9fae5ae, 0x0000de01, 0xaa920080, + 0x95aeffe8, 0xb80099ce, 0x000100af, 0xde05fefd, 0xefffffff, 0xfa049efd, 0x00f801f9, 0x044942f9, + 0x0099ce08, 0x0176008d, 0xde0fa394, 0x00800000, 0x8d303dbc, 0xfffe8a00, 0x3dbcf53e, 0x00ffce30, + 0xa9039d33, 0x00f9e400, 0x00997380, 0x4c8f00a0, 0x3fbc0128, 0x254eff40, 0x890020ce, 0xbc01294c, + 0xef093039, 0xfd153eff, 0x10fa0409, 0x02704a01, 0x200005f1, 0x00044c7e, 0xd90110fa, 0x00800000, + 0xffe83392, 0x00ce0539, 0x0031b800, 0x19fd0001, 0xffffd905, 0x09fdbfff, 0x0000d904, 0x09fd1000, + 0x0110fa05, 0x4c7e0f0a, 0x00d90004, 0xfd400000, 0x10fa0509, 0x7e0f0a01, 0xd900044c, 0xbfffffff, + 0xfa0409fd, 0x0f0a0110, 0x00044c7e, 0xbd0022ce, 0x1025f0f4, 0x800000de, 0x954eff00, 0x900192fa, + 0x449001ff, 0x04f4b340, 0x7e41fbf4, 0xb3003afd, 0xb31004a0, 0xb31a07a0, 0x3e0f00a0, 0xda001af4, + 0x01896402, 0x80da00f8, 0xf802faf0, 0x9f88da00, 0x00f8032a, 0xadbeefda, 0xf900f8de, 0x7ea1b222, + 0x33000440, 0x00c001ad, 0x287e1ab2, 0xad330004, 0xb200b501, 0x04347e1a, 0x01ad3300, 0x8c8900aa, + 0x10940130, 0x0009bc0f, 0x800000d9, 0x009b4f00, 0x89e509ff, 0xbcfecf74, 0xe9ce0009, 0x01ff9200, + 0x0f00f4b3, 0x01023209, 0x3e0391b0, 0xb6001b58, 0x90b31f95, 0x24bde801, 0x01005089, 0x09bc1ab2, + 0x5b517e00, 0x7e1ab200, 0xb2006472, 0x5fe37e1a, 0x7e1ab200, 0xb20062f3, 0x63477e1a, 0x7e1ab200, + 0xd90063ae, 0x00800000, 0xce9509ff, 0x94f00099, 0x0c0bf430, 0x2889010f, 0x9f3c0003, 0x4767df19, + 0x14898932, 0x9fbc0003, 0x0024b319, 0x3ea4bd0a, 0xb4001bb6, 0x010f03a0, 0x00032489, 0x3e199f3c, + 0x0a001bc5, 0x8921fb13, 0xbc00028c, 0x22f9989a, 0x90b3a0b2, 0xa4bd0a03, 0x001c873e, 0xd57eb4bd, + 0x0ab20016, 0xc4bd060b, 0x020202dd, 0x05dc4e02, 0x0005e67e, 0x9300adb3, 0x05dc4100, 0x020202d2, + 0xb2070b02, 0x050b7e0a, 0x01119200, 0x667e0ab2, 0xa2a60005, 0xb2751bf4, 0x057d7e0a, 0x00a4b300, + 0xbd0ab26c, 0x0d040cb4, 0x07d04e01, 0x0016807e, 0x5700a4b3, 0x060b0ab2, 0x02ddc4bd, 0x4e010101, + 0xe67e05dc, 0xa4b30005, 0xdc414100, 0x0102d205, 0x070b0101, 0x0b7e0ab2, 0x11920005, 0x7e0ab201, + 0xa6000566, 0x2d1bf4a2, 0x7d7e0ab2, 0xa4b30005, 0x0ab22400, 0xd57e0e0b, 0xa4b30016, 0x020f0d00, + 0x00027c89, 0xfb099fbc, 0x001db321, 0x213eff78, 0x14b3001c, 0x2f0ac000, 0x001c873e, 0xb4bd32f9, + 0xd57ea1b2, 0x1ab20016, 0xc4bd060b, 0x020202dd, 0x09c44e02, 0x0005e67e, 0xa4b3a2b2, 0xc4402c00, + 0x0202d309, 0x070b0202, 0x0b7e1ab2, 0x00920005, 0x7e1ab201, 0xa6000566, 0x111bf4a3, 0x7d7e1ab2, + 0xa4b30005, 0x2ab20800, 0x04b331fb, 0x2e02dc00, 0x001ce63e, 0x00028c89, 0xf9989abc, 0xb3a1b222, + 0xbd0a0390, 0x1d7f3e04, 0x7eb4bd00, 0xb20016d5, 0x00a4b3a0, 0x0b1ab26e, 0xddc4bd06, 0x02020202, + 0x7e09c44e, 0xb20005e6, 0x00a4b3a0, 0x09c44056, 0x020202d2, 0xb2070b02, 0x050b7e1a, 0x01009200, + 0x667e1ab2, 0xa2a60005, 0xb23b1bf4, 0x057d7e1a, 0x00a4b300, 0xbd1ab232, 0x0dc4bdb4, 0x07d04e01, + 0x0016807e, 0xa4b3a0b2, 0x010f1900, 0x00027c89, 0x9fbc1ab2, 0x089b7e19, 0x7e1ab200, 0xb20008fd, + 0xb321fb0a, 0x00b20004, 0x1d7f3e2e, 0x001c8900, 0x9432f901, 0x09bc0fa0, 0x0000d900, 0xa1b20080, + 0x893509ff, 0xbcfeffe4, 0x32ce0009, 0x02cc8900, 0x5c2fc700, 0x7ea99fbc, 0x33003ab8, 0x890f00a4, + 0xbc0002bc, 0x90b39891, 0xf4bd1900, 0x0002bc89, 0x9fbc1ab2, 0x7e020b19, 0x3e00182b, 0x8e001e71, + 0x3f000258, 0x33300ae9, 0x008c009d, 0x0002dc89, 0x09f891bc, 0x0ae92001, 0x01f0b30c, 0x89010f7a, + 0xbc00029c, 0x00df199f, 0xd9400000, 0x8fffffff, 0xfd9429ff, 0x39fa059f, 0x00688901, 0x0000df01, + 0x09bc0080, 0xf50fff00, 0xde00f9ce, 0x40000000, 0xfa059efd, 0x00df01f9, 0x90008000, 0x0fff4400, + 0x00f9cef5, 0xfdcfff4e, 0xf9fa049e, 0x0000d901, 0x0eb80080, 0xfd0022e8, 0xe9ce05e9, 0xffffdf00, + 0x9ffd7fff, 0x01e9fa04, 0xfd01ff90, 0xe9fa059f, 0xfba4bd01, 0x028c8931, 0xbcf4bd00, 0x9c89a99f, + 0x9fbc0002, 0x02bc89a9, 0xa99fbc00, 0x00025c89, 0x89a99fbc, 0xbc0002dc, 0x7c89a99f, 0x9fbc0002, + 0x026c89a9, 0xbc010e00, 0xac89a99e, 0x9fbc0002, 0x02ec89a9, 0xbc02f900, 0xfc89a99f, 0x9e3c0002, + 0x030089a9, 0xa99f3c00, 0x00030489, 0x89a99f3c, 0x3c000308, 0x2489a99f, 0x9f3c0003, 0x032889a9, + 0x3ca0b200, 0x357ea99f, 0x0ab20015, 0x0039c27e, 0x2d7e0ab2, 0x01fb0029, 0x0002dc89, 0xf9989abc, + 0x01a2b222, 0x029db325, 0xbc8901f4, 0xa0940166, 0xbc050f0f, 0x00d90009, 0xff008000, 0x9ffa9509, + 0x0000d901, 0x00900080, 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d901, + 0x00900080, 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d901, 0x00900080, + 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d901, 0x00900080, 0x9509ff04, + 0xd9019ffa, 0x00800000, 0x096800b8, 0x9509ff02, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, + 0x0000d901, 0x00900080, 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d901, + 0x00900080, 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d901, 0x00900080, + 0x9509ff04, 0xd9019ffa, 0x00800000, 0xff040090, 0x9ffa9509, 0x0000d101, 0x00b80080, 0x8f00177c, + 0xfffe8af8, 0x0fbc9501, 0x0099ce00, 0xf40194f0, 0x04895f1b, 0x09bc0175, 0x5b127e00, 0x7e2ab200, + 0xb2006630, 0x1e757e2a, 0x7e2ab200, 0xb200070d, 0x07e87e2a, 0xf501ff00, 0xf000f9ce, 0xf9fa0195, + 0x0000df01, 0x00b80080, 0xff021540, 0x99ce950f, 0x0000b800, 0x0fff0010, 0x0099ce95, 0xfe983c89, + 0x080000b8, 0xf50fff02, 0xce0009bc, 0x2ab200f9, 0x0014a67e, 0xadb3a1b2, 0xb2008300, 0x14cb7e2a, + 0xb3a1b200, 0xb27600a4, 0x7e010b2a, 0xb20032ac, 0x00a4b3a1, 0x75048f68, 0x8afc8901, 0x000fbcfe, + 0x800000de, 0xe50eff00, 0xce0009bc, 0xfe0f00e9, 0xfa049ffd, 0x2ab201e9, 0x003b2d7e, 0x1f00a433, + 0x0176088f, 0x8f000fbc, 0xd9fe89f8, 0x00800000, 0xbc9509ff, 0x010f000f, 0xb2019ffa, 0x3b367e2a, + 0x00a43300, 0x76548918, 0xbc010f01, 0x00d90009, 0xff008000, 0x9ffa9509, 0xfb1ab201, 0x7ea4bd21, + 0xbd001e75, 0x3b057ea4, 0x7e010a00, 0x0a001e75, 0x3b057e01, 0x7e020a00, 0x0a001e75, 0x3b057e02, + 0x7e030a00, 0x0a001e75, 0x3b057e03, 0x89f43d00, 0x20000310, 0xb200f89f, 0x01cfc4ad, 0xfa08c049, + 0xbe4f009f, 0x01c0b373, 0x4e030f06, 0xe9cf08c0, 0x0899c700, 0x0bf49ca6, 0x01ff9227, 0xf200f4b3, + 0xf001c6b0, 0x1d090bfc, 0x33a29fbc, 0xb21401b4, 0xb2040bac, 0x02617eda, 0xf8a4bd00, 0xf8a4bd00, + 0x02dc8900, 0x989abc00, 0x110f02f9, 0x94b3a0b2, 0x010b6802, 0x377e010c, 0x0d940021, 0x2820890f, + 0x0000df01, 0xd9bc0080, 0xf5dfffd0, 0x4e00f9ce, 0x9efdfdfe, 0x01f9fa04, 0x800000d9, 0x18dd9000, + 0xfd50b74e, 0xd9ce05d9, 0x209fc600, 0xc70194f0, 0x90b305ff, 0xf4bd0600, 0xb301ee92, 0xb30800e0, + 0xb2e700f0, 0xbd010b0a, 0x21377ec4, 0x89f4bd00, 0xbc0002bc, 0xfab2099f, 0x22f901fb, 0xc1b2b2b2, + 0x947ea0b2, 0x2209003a, 0x8a00ad33, 0x0314b300, 0x028c8f3c, 0x0124b300, 0x0018892b, 0x09f1bc01, + 0xbc0f0f94, 0x00d9f0f9, 0xfd008000, 0xf9ce05f9, 0xffffde00, 0x9efddfff, 0x01f9fa04, 0x0022593e, + 0xf9bc0209, 0x22593e09, 0x028c8900, 0xbc010f00, 0x0209099f, 0x0bf42fa6, 0x00ac893c, 0x0f0d9401, + 0xd9d0d9bc, 0x00800000, 0xce05d9fd, 0x26b000d9, 0x0bfcf000, 0x9401f4f0, 0xf4b602fc, 0xfdf70e03, + 0x9ffd049e, 0xfdfb0f05, 0x9cfd049f, 0x01d9fa05, 0x9ab294bd, 0x8c8921fb, 0x9abc0002, 0xb202f998, + 0x019992a0, 0xf40196b0, 0xdc893e0d, 0x9abc0002, 0x0194b398, 0x03f27e34, 0x01a43300, 0x7e0ab20e, + 0x3e001cf4, 0xb20022cd, 0x1c9c7e0a, 0x00a4b300, 0x7e0ab21a, 0xbd00177f, 0xb2acb2b4, 0x21fa7e0a, + 0x22e73e00, 0xfba4bd00, 0x0c32f901, 0xb2a2b207, 0x4e030db3, 0x807e07d0, 0xa1b20016, 0x3600a4b3, + 0x9207d040, 0x34b30100, 0x2ab20e00, 0x0003d87e, 0x00231a3e, 0xbe7e2ab2, 0xa6b00003, 0x0b9cf007, + 0x0c0000b3, 0xdf009033, 0x0023323e, 0x06009433, 0x1ab20601, 0xdc8931fb, 0x9abc0002, 0xb212f998, + 0x0299b3a1, 0xa87e016b, 0xa4f00003, 0x04a0b3ff, 0x02a0b314, 0x0b1ab210, 0x0d030c04, 0x17087e6e, + 0xbd1ab200, 0x22e97eb4, 0x0b1ab200, 0x22e97e01, 0x284c8900, 0x0f109401, 0x800000de, 0x0009bc00, + 0xce950eff, 0xef0f0099, 0x010000b8, 0x049ffd00, 0xf1e50eff, 0xfa200095, 0x00de01e9, 0x92008000, + 0x0effe800, 0x0099ce95, 0x00b8fd0f, 0xfd000100, 0x0eff049f, 0x0195f0e5, 0x0b01e9fa, 0xb2020c01, + 0x13ee7e1a, 0x0000d900, 0x00b80080, 0x4e000728, 0x09ff0106, 0x0000dab5, 0x00b81000, 0xff020ff4, + 0x6889c509, 0x09bcfedf, 0x00b9ce00, 0xc700cdce, 0x9afd749f, 0x0e0bf404, 0x0b00f4b3, 0x3e1fd995, + 0xbd002405, 0x01ee9294, 0x0800e0b3, 0xdd009033, 0x7e01384a, 0xb200044c, 0x7e030b1a, 0x89001563, + 0xb2012820, 0x0009bc1a, 0x010c010b, 0x0021377e, 0x800000df, 0xf50fff00, 0x4e00f9ce, 0x9efdf3e5, + 0x01f9fa04, 0x800000d9, 0x180f9000, 0xff50b74c, 0xedcee5f9, 0x18d9c400, 0xc6101bf4, 0x9bc740d9, + 0x82d9c406, 0x06009033, 0xcc92b4bd, 0x00c0b301, 0x00b03308, 0xb2b4bde1, 0x17a87e1a, 0xbd010b00, + 0x7e1ab2c4, 0xb2002137, 0x39ac7e1a, 0x7e1ab200, 0x890039b7, 0x0f0002dc, 0x199fbc02, 0x00027c89, + 0x9ebce4bd, 0x026c8919, 0x199ebc00, 0x6c8f11fb, 0x02f90100, 0xb20fa994, 0x909fbca0, 0x00dfe4bd, + 0xff008000, 0xfefaf59f, 0x0000df01, 0x99920080, 0x059ffd04, 0x9ffaf4bd, 0x0c040b01, 0x7e370d01, + 0xb2001708, 0x23367e0a, 0x8f01fb00, 0xf901006c, 0x0fa99402, 0x9fbca0b2, 0xdfe4bd90, 0x00800000, + 0xfaf59fff, 0x00df01fe, 0x92008000, 0x9ffd0499, 0xfaf4bd05, 0xee7e019f, 0x0ab20018, 0x0022967e, + 0x367e0ab2, 0x01fb0023, 0xa1b242f9, 0x0069307e, 0x00395089, 0x3398913c, 0x94380194, 0x70890f1e, + 0xd4bd0128, 0xdae0e9bc, 0x00800000, 0xffffffdb, 0x0000dcc0, 0xeaff1c00, 0x00f9cef5, 0xfd049bfd, + 0xf9fa059c, 0x01dd9001, 0xb340ee90, 0x89eb04d4, 0x94012840, 0x09bc0f10, 0x0000d900, 0x09ff0080, + 0x004fce45, 0x010000b8, 0x3509ff00, 0xfed6c089, 0xd90009bc, 0x3fffffff, 0xfa04f9fd, 0x1ab2013f, + 0x003ad07e, 0x1ab2abb2, 0x005cc37e, 0xadb3a2b2, 0x8900a300, 0x3c000324, 0x94339891, 0x1ab20a01, + 0x005ba67e, 0x01282089, 0xd90009bc, 0x00800000, 0xced509ff, 0x95f100d9, 0xd9fa0201, 0x180f9001, + 0x800000d9, 0xc5f9ff00, 0xce00c84f, 0xff9200ce, 0x01e9c401, 0x0b0194b3, 0xb305e9c7, 0xb30c0190, + 0x3e4a00f0, 0xb30025eb, 0xce4200f0, 0xff8f0049, 0x9ffdf9fe, 0x0139fa04, 0xf100d9ce, 0xfa040295, + 0xc84f01d9, 0x00cece00, 0xc701ff92, 0x94b301e9, 0xe9c70b01, 0x0190b306, 0x00f0b30c, 0x26253e16, + 0x00f4b300, 0x264f3e10, 0x3e370200, 0x02002651, 0xfb2ab221, 0xb252f941, 0xfc30f4a1, 0x003afd7e, + 0x1ab2a4b2, 0x003ad97e, 0x1ab2a3b2, 0x003b127e, 0x00031089, 0xa2b2993f, 0x92019933, 0x3800de00, + 0xe9ce0080, 0xffffdf00, 0x9ffd3fff, 0x01e9fa04, 0x217e1ab2, 0x00d50068, 0x94008000, 0x14890f10, + 0x09bc0171, 0x0505fd00, 0xb20000ce, 0x3aeb7e1a, 0x08884900, 0xb60099cf, 0x0bc41d95, 0x0091b0ff, + 0x3db24cb2, 0xe17e2eb2, 0xa4b3005e, 0x0c896800, 0x913c000c, 0x33390a98, 0x895b0190, 0x3c000c10, + 0x94339891, 0x04de2001, 0xce008038, 0xff8f00e9, 0x95fde0ff, 0x049ffd05, 0x0a00008f, 0xfa059ffd, + 0x010f01e9, 0x00031089, 0x90899f20, 0x1f940170, 0xf0f9bc0f, 0x800000d9, 0x05f9fd00, 0xde00f9ce, + 0x01000000, 0xfa059efd, 0x1ab201f9, 0x0015357e, 0x30f4a4bd, 0xf951fb04, 0x73108942, 0x0fa19401, + 0x19bca0b2, 0x0000df10, 0x1fff0080, 0x00f9cef5, 0xfa0295f0, 0x588901f9, 0x11b8fe8b, 0xdf000198, + 0x00800000, 0xbcf51fff, 0xf9ce1019, 0xe495f000, 0x7e01f9fa, 0xb20067ab, 0x69047e0a, 0x7e0ab200, + 0xb3003aeb, 0xb22e02a4, 0x69e27e0a, 0x7e0ab200, 0xb3003af4, 0xb20a02a4, 0x698a7e0a, 0x7e0ab200, + 0xb3003af4, 0xb23e03a4, 0x683e7e0a, 0x27e13e00, 0x7e0ab200, 0xb3003aeb, 0xb22a03a4, 0x686a7e0a, + 0x7e0ab200, 0xb3003af4, 0xb20a03a4, 0x68c87e0a, 0x7e0ab200, 0xb3003af4, 0xb20a02a4, 0x68807e0a, + 0x7e0ab200, 0xb3003af4, 0xb20e04a0, 0x3af47e0a, 0x05a4b300, 0x7e0ab20a, 0x8900689c, 0xbc016900, + 0x00d91019, 0xff008000, 0x42ce4519, 0x001fb800, 0xf9ff0208, 0x0031ce35, 0x9d7e0ab2, 0xa033003a, + 0xf7091000, 0xfd1015f0, 0x363e0429, 0xef090028, 0xfd0825f0, 0x0ab20419, 0x003aa67e, 0x1000a033, + 0x15f0ef09, 0x0429fd01, 0x0028543e, 0x25f0fe09, 0x0419fd10, 0xaf7e0ab2, 0xa033003a, 0xfd091000, + 0xff021ec5, 0x723ef429, 0xfd090028, 0xff022fc5, 0x4ffae419, 0x013efa01, 0x2489f43d, 0x9f3c0003, + 0xf941fb09, 0x7ea1b222, 0xb2006a03, 0x3ae27e1a, 0x03a0b300, 0x3874890f, 0x98913c00, 0x0a019433, + 0x7f7e1ab2, 0x1ab20067, 0x003b1b7e, 0x0a00a033, 0x4e7e1ab2, 0x74890069, 0x913c0038, 0x00903398, + 0x7e1ab20a, 0x8900194d, 0x94010068, 0x00df0f10, 0xbc400000, 0x00d90009, 0xff008000, 0x9ffa9509, + 0xff948f01, 0x040090fe, 0x800000d9, 0x9509ff00, 0xdf000fbc, 0x11000000, 0x89019ffa, 0x3c000324, + 0x24332891, 0x50892901, 0x1ab20100, 0x7e0009bc, 0xd9006502, 0x00800000, 0xce9509ff, 0x94f00099, + 0x0a0bf430, 0x00032889, 0xfb19923c, 0x02af9421, 0xbb05a994, 0x2c8f029f, 0xe4bd0003, 0xbd909fbc, + 0x909fa0f4, 0x999001ee, 0x07e4b304, 0x039c89f8, 0xa99fbc00, 0x0003ac89, 0x89a99fbc, 0x3c0003bc, + 0xc489a99f, 0x9fbc0003, 0x8900f8a9, 0x3c0003bc, 0x260f989a, 0x4f009033, 0x0003ac8e, 0xb0f8eabc, + 0x0cf406f6, 0x01f9901d, 0x94a9e9bc, 0x9abb03a9, 0x909fbc02, 0x00032c8f, 0x3ef8f9bc, 0x940029b6, + 0xaf9402a9, 0x02f9bb05, 0x00032c89, 0xbff0f9bc, 0xbc0109ff, 0xa9b8a9e9, 0xb6000248, 0x9ffa0294, + 0xb2f4bd01, 0xb800f8fa, 0x000248a9, 0xce0294b6, 0x9c8f009d, 0xfabc0003, 0x06e6b0e8, 0x90360cf4, + 0xf9bc01e9, 0x03af94a9, 0xbc02fabb, 0x2c8ef0fe, 0xedbc0003, 0x03c08ff9, 0x0794b300, 0x3c01090b, + 0x00f8a9f9, 0xf93c943d, 0x89f4bda9, 0xbc0003c4, 0x00f8a99f, 0x0003bc89, 0xf8a99b3c, 0x02a99400, + 0xaf9422f9, 0x12f9bc05, 0x00032c89, 0x981019bc, 0xa2b20510, 0x003b517e, 0x0bf40aa6, 0x3ea43d09, + 0x98002a57, 0x2ab20610, 0x003b5a7e, 0x9cf00aa6, 0x0196f00b, 0xfb019ac6, 0x02a99421, 0xaf9422f9, + 0x12f9bc05, 0x00032c89, 0x981019bc, 0xa2b20310, 0x003b3f7e, 0x0bf40aa6, 0x3ea43d09, 0x98002a93, + 0x2ab20410, 0x003b487e, 0x9cf00aa6, 0x0196f00b, 0xfb019ac6, 0xb212f921, 0x2a597ea0, 0xb2a13200, + 0x2a1d7e0a, 0x02099400, 0xbb050f94, 0x2c8902f9, 0xf9bc0003, 0x0ef9bff0, 0x0694b309, 0x00163028, + 0x300bfcf0, 0x9cf000a6, 0x00f0330b, 0x33030e0a, 0x0e120094, 0x00f43301, 0x33020e0c, 0x0e060094, + 0x89eab20f, 0xbc0003c4, 0x11fb099e, 0x9402af94, 0x9fbb05a9, 0x032c8f02, 0x909fbc00, 0x9fa0060f, + 0x01287089, 0xac9432f9, 0xbca1b20f, 0xb4bdc0c9, 0x0d03a994, 0x229abc01, 0x800000d3, 0x032c8000, + 0x95c3ff00, 0xc70099ce, 0x9fc7509e, 0x0ae4b668, 0xb63f94f0, 0x2dbc06f4, 0x05f9fda0, 0xfd01b9c4, + 0xfe9405fe, 0x0094b310, 0xa90fbc0b, 0x002b5c3e, 0x90980abc, 0xe9ff01dd, 0xa909bc95, 0x9001bb90, + 0xb4b340cc, 0x1ab2bf04, 0x003b3f7e, 0x8f021994, 0x9400032c, 0x09bb0510, 0x000fbc02, 0xb2030ab5, + 0x3b487e1a, 0x040ab500, 0x517e1ab2, 0x0ab5003b, 0x7e1ab205, 0x89003b5a, 0xbd00039c, 0x199fbcf4, + 0x0003ac89, 0xbc060ab5, 0x010f199f, 0x0003bc89, 0xfb199f3c, 0x27888931, 0xb299bf00, 0x0094b3af, + 0x3b688925, 0xb399bf00, 0xa6100194, 0x089cf0ab, 0x3e0196f0, 0x3d002bfb, 0xf4fba6a4, 0x010a2b0d, + 0x90b300f8, 0xa43d0801, 0x688900f8, 0x99bf003b, 0x0f0194b3, 0xfba6a43d, 0x320e0cf4, 0xa600f89a, + 0x089cf0ab, 0x00f89a32, 0x0038d089, 0xa4b6e4bd, 0xa0abbc02, 0x89a99ebc, 0xbc002910, 0x3089a99e, + 0x010d003a, 0x89a99dbc, 0x8f003b80, 0xbc00ffff, 0x6c89a99f, 0x9d3c003b, 0x3bd089a9, 0xa99e3c00, + 0x00369089, 0x89a99ebc, 0xbc0037e8, 0x3089a99e, 0x9ebc0038, 0xf900f8a9, 0x02b99402, 0xae94d0b2, + 0x04bd9404, 0xb006af94, 0x1df400c6, 0xf0efbc1d, 0x8e909dbc, 0xbc003550, 0xcfc4909f, 0x909ebc1f, + 0x3e20f5f0, 0xbc002c9d, 0x9dbcf0ef, 0x35508e90, 0x909fbc00, 0xbc01cfb9, 0xf4f0909e, 0x019fb51f, + 0x9404b994, 0xbf9406ad, 0xf0f9bc02, 0xbc04a994, 0xf9bc909d, 0x355089f0, 0x010eb900, 0xf0f0f9bc, + 0x94bd0fe4, 0xb502feb5, 0x01fb03f9, 0xb1b232f9, 0x087ea0b2, 0x2889002c, 0x050d003a, 0x94899da0, + 0x0ab20039, 0x04b6140f, 0x0001bc02, 0x89099fbc, 0xa000382c, 0x3760899d, 0xbcf4bd00, 0xd889099f, + 0xf10e0039, 0x89099ebc, 0x8200278c, 0xbc003710, 0x1bb2099e, 0x003b2883, 0xbc092ebc, 0xecb2093e, + 0x577eedb2, 0x20bc002c, 0x3830bcf8, 0x00387889, 0x890993bc, 0xbc003954, 0x31fb099f, 0x8e20a9c4, + 0xf4003a2c, 0xa9c40a0b, 0x2d503e1f, 0x01a9b900, 0xbfb9e9a0, 0x3c208901, 0xf89fa000, 0x9832f900, + 0xc19802c2, 0xb2b3b201, 0x2c087ea0, 0xb21ab200, 0x2d3c7e2b, 0x3c208900, 0x3a2c8f00, 0xbf9ebf00, + 0x387889ff, 0x0204b600, 0xbc0003bc, 0x5489099f, 0x9ebc0039, 0x9431fb09, 0x9bbc02a9, 0x37a489c0, + 0xf89cbc00, 0x0036d089, 0x89e89cbc, 0x0d003a28, 0x899da001, 0xf4003994, 0x22f9ec30, 0xfa90a1b2, + 0xc99abc02, 0x00371089, 0x9202e090, 0x9ebc02ee, 0x382c89c9, 0x92b2b200, 0x288b02ff, 0x9da0003b, + 0x89c9bfbc, 0xbc003760, 0x1609c990, 0xf416f6b0, 0xeb090c1c, 0x1ff4f9a6, 0xbcea0908, 0xe6b0c9b9, + 0x0f1ef400, 0xbd021994, 0x9092bce4, 0x002e1f3e, 0xe9a6f209, 0x94121ff4, 0xf10e0219, 0x8f9092bc, + 0xbc003710, 0xa6b099fe, 0x0f1df416, 0x0e021994, 0x9092bc16, 0x002e473e, 0xa9a6eb09, 0x94121ff4, + 0xea0e0219, 0x8f9092bc, 0xbc003994, 0x06b099fe, 0x0f1ef400, 0xbd021994, 0x9092bce4, 0x002e6f3e, + 0x09a6f209, 0x94121ff4, 0xf10e0219, 0x8f9092bc, 0xbc003760, 0x1f9499fe, 0x3b288902, 0xf0f2bc00, + 0x89e89fbc, 0xbc003710, 0xd889089f, 0x9ebc0039, 0x278c89f9, 0xbc1ab200, 0xecb2f990, 0x0db22bb2, + 0x002c577e, 0x94042994, 0x2f94061e, 0xf0f9bc02, 0xbc041994, 0xf9bc909e, 0x355089f0, 0xf0f9bc00, + 0x4dfef9bf, 0x0cdd9001, 0xd9a01ab2, 0xb201f998, 0xb5dcb22b, 0xf99801d9, 0x02d9b502, 0xb503f998, + 0xff9803d9, 0x04dfb504, 0x002d5d7e, 0x891425fb, 0xb6003878, 0xabbc02a4, 0xa99cbca0, 0x00395489, + 0xf8a99dbc, 0x02a99400, 0x9bbc42f9, 0x387889f0, 0x289fbc00, 0x0036908e, 0xf40026b0, 0x29c40d1d, + 0x2095f01f, 0x002f2e3e, 0xf00129b9, 0xe9bc1f94, 0x02a994f9, 0x89e09bbc, 0xbc003954, 0x3089489e, + 0xe88f0038, 0x9cbc0037, 0x0149b9e9, 0xbc0f94f0, 0xff89e9f9, 0xc9a600ff, 0x89650bf4, 0xbc003b6c, + 0xd93fd0e9, 0x8fffc0c4, 0x33003b80, 0x3d0f0194, 0xe9f0bc94, 0x853ed920, 0xfebc002f, 0xf409a698, + 0xf0bc060d, 0x02a994e9, 0x003a3083, 0xbc109bbc, 0x0ab2b831, 0x002bb57e, 0x2500a033, 0x2ab20bb2, + 0x0076ce7e, 0x0038d089, 0x9abc0bb2, 0x7e4ab219, 0x890076ce, 0xbc002910, 0x9abc1930, 0x8941fb19, + 0x94003690, 0xfbbc02af, 0xa89fbcf0, 0x0037e889, 0x89e89fbc, 0xbc003830, 0xa4f0989f, 0x0fe4f03f, + 0xb618a4b6, 0x94f110e4, 0xaefdffff, 0x05a9fd05, 0x808900f8, 0xa4b6003b, 0xa0abbc02, 0xf1a89abc, + 0xf8ef00a5, 0x00a6b000, 0x7e091df4, 0xf800768d, 0x08a4b600, 0x00768d7e, 0xf808a7b6, 0x00a6b000, + 0xb6121df4, 0x8d7e08a4, 0xaa900076, 0x08a7b6ff, 0x8d7e00f8, 0x00f80076, 0x7e08a4b6, 0x9000768d, + 0xa7b67faa, 0x8900f808, 0xbf0037a0, 0xb342f999, 0x94230090, 0x308f02a0, 0xd08e003a, 0xa4820038, + 0x10830037, 0xd0840029, 0x94b30036, 0xab3e6002, 0x30890030, 0xa094003a, 0x000bbc02, 0x891890bc, + 0xbc0038d0, 0x1bb2a890, 0x0030057e, 0x0037a489, 0x89099abc, 0xbc002910, 0x1bb2a890, 0x0030057e, + 0x0036d089, 0x3e099abc, 0xbc0030e8, 0xf0bc000b, 0xa8e0bc18, 0x1d7e1bb2, 0x2abc0030, 0xa830bc09, + 0x1d7e1bb2, 0xe53e0030, 0x0bbc0030, 0x18f0bc00, 0xb2a8e0bc, 0x30387e1b, 0x092abc00, 0xb2a830bc, + 0x30387e1b, 0x094abc00, 0x42f941fb, 0xb202e398, 0xb4b2b2a1, 0x40340690, 0x01e0981c, 0x18f4cda6, + 0x7e9cb237, 0xb2002f05, 0x7e3bb20a, 0x89002d3c, 0x8e003c20, 0xbf003a2c, 0x89eebf9d, 0x94003878, + 0xa43d021f, 0xbcf0f2bc, 0x5489f99e, 0x9dbc0039, 0x31683ef9, 0x7e9cb200, 0xb2002f05, 0x7e2bb21a, + 0x0a003047, 0xf44a2601, 0xa48f211b, 0x19940037, 0x9092bc02, 0x8fc8f9bc, 0xbc0036d0, 0x1ab2d8f9, + 0x577e2bb2, 0x4a32002c, 0xa99441fb, 0x8f42f902, 0xbc003bd0, 0x0fbc009b, 0xb2993f90, 0xb2b4b2a3, + 0x009d33c1, 0xd88d00c3, 0x94890039, 0xd0bc0039, 0x9890bce8, 0x1ff4e9a6, 0x278c8926, 0xf890bc00, + 0x00376089, 0xa69890bc, 0x131cf4f9, 0x003a2889, 0xe9bc99bf, 0x09d9bc90, 0x0031e33e, 0x00278c8b, + 0x00382c89, 0xbf023e94, 0xe0e4bc9c, 0x003b288d, 0xbcf8bebc, 0xd889d8de, 0xfcbc0039, 0xe99dbcf0, + 0xb2e9bfbc, 0xb23ab21c, 0x2f057e4b, 0x02399400, 0x0039d880, 0x812094bc, 0xbc00278c, 0x12bcc802, + 0xb23ab2d8, 0x2c577e4b, 0xe802bc00, 0x89d812bc, 0xbc003994, 0x7889f892, 0x9ebc0038, 0x39548929, + 0x299dbc00, 0x1ef4efa6, 0x37608946, 0x9892bc00, 0x1ef4d9a6, 0x3bd0893a, 0x2029bc00, 0x29200109, + 0x00326c3e, 0x002f057e, 0x4bb23ab2, 0x0030477e, 0x0037a489, 0x89c890bc, 0xbc0036d0, 0x3ab2d890, + 0x577e4bb2, 0x010a002c, 0x00326e3e, 0x41fba43d, 0xb604bf94, 0xbfbc02b4, 0x35508fb0, 0x06ae9400, + 0xbc04a4b6, 0xbabca0ae, 0xb0bfbcb0, 0x9ab2bfbf, 0xbf989fa0, 0x019fb501, 0xb502bf98, 0xbf98029f, + 0x039fb503, 0xb504bb98, 0x00f8049b, 0x01738089, 0xbc0fa4b6, 0x00d9a0a9, 0xfd008000, 0xa9ce05a9, + 0xfdfe0f00, 0xbfc4049f, 0x059ffd01, 0x4f01a9fa, 0xa9ce50b7, 0x0199c700, 0xa601ff92, 0x0b9cf09b, + 0x0c00f0b3, 0xee009033, 0x0032f23e, 0x90331b0a, 0xa4bd0600, 0xf08900f8, 0x9abc0004, 0xb3aeb2f8, + 0x0a2b00f0, 0xf4faa601, 0x2089431b, 0x9ebc0004, 0xf0020a98, 0x99920394, 0x0196b002, 0x892e0cf4, + 0xbd002784, 0xe99f3ca4, 0x208900f8, 0x9abc0004, 0x0394f098, 0x0b0190b3, 0x9aa6030a, 0x3d0e1bf4, + 0x278489f4, 0x3ca4bd00, 0x00f8e99f, 0x00042089, 0x89f89abc, 0xbc000500, 0xaeb2d89a, 0xb302f995, + 0xb03001d0, 0x08f401d6, 0xb3010a0d, 0x3e5c03d4, 0xf00033ab, 0x040a0194, 0x8f4f0bf4, 0x3d003950, + 0xe9f93c94, 0x00290c8f, 0x3ee9f93c, 0xf00033a7, 0x050a0294, 0x3d330bf4, 0x39508f94, 0xe9f93c00, + 0x00290c89, 0xbde99d3c, 0xf000f8a4, 0x060a0894, 0x0f170bf4, 0x39508901, 0x3ca4bd00, 0xf43de99f, + 0x00290c89, 0xf8e99f3c, 0x04208900, 0xf89abc00, 0x00051089, 0xb2e89abc, 0x07f995ad, 0x1901e0b3, + 0xf401e6b0, 0x010a2608, 0x2c02e4b3, 0x0a0494f0, 0x33fa3e07, 0x0294f000, 0x90b3080a, 0x50891a00, + 0xa4bd0037, 0xf8d99ebc, 0x89f4bd00, 0xbc003750, 0xa4bda99f, 0x208e00f8, 0xea3c0005, 0x33afb298, + 0xbd080094, 0x8900f8a4, 0xbc000420, 0x0e0a989a, 0xf44094f0, 0x01090a0b, 0xe93ca4bd, 0x8900f8f9, + 0xbc000420, 0x2489e89a, 0x9abc0005, 0xc7090df8, 0xefff4aee, 0xf49fa694, 0x94890c1b, 0xd4bd000a, + 0xb2a99ebc, 0x8f00f8da, 0xbc000554, 0x6489f8fa, 0x9abc0005, 0xb6aeb298, 0xa4bd0cf4, 0xfd1094b6, + 0x848f059f, 0x95f0000a, 0xe9f9bc12, 0x208900f8, 0x9abc0004, 0x053489e8, 0xf89abc00, 0xeec70a0d, + 0x94efffef, 0x1bf49fa6, 0x0aa4890c, 0xbcd4bd00, 0xdab2a99e, 0x208900f8, 0x9abc0004, 0x054489e8, + 0xf89abc00, 0xeec70b0c, 0x94efff97, 0x1bf49fa6, 0x0aa48d1d, 0x98dabc00, 0xdf10e4b6, 0x32000000, + 0x9ffdc4bd, 0x059efd05, 0xb2a9d9bc, 0x8900f8ca, 0xbc0005a4, 0x3089f89a, 0x9abc0004, 0xff0c0d98, + 0x9ec494f9, 0xf4efa601, 0x748f121b, 0xfabc000a, 0xffd4bd98, 0xf9bc95e9, 0xf8dab2a9, 0x05b48900, + 0xe89abc00, 0x00043089, 0x0d989abc, 0x01efc40d, 0xff0295b6, 0x9ea694f9, 0x8e151bf4, 0xbc000a74, + 0x94b6f8ea, 0xfdd4bd02, 0xe9bc059f, 0xf8dab2a9, 0x0a748d00, 0x04308900, 0xe8dabc00, 0x8f989abc, + 0xc7000574, 0xf9bc0899, 0x0894b6a9, 0xbc059efd, 0x00f8a9d9, 0x000a748d, 0x00043089, 0xbce8dabc, + 0x848f989a, 0x99c70005, 0xa9f9bc09, 0xfd0994b6, 0xd9bc059e, 0x8d00f8a9, 0x89000a74, 0xbc000430, + 0x9abce8da, 0x05948f98, 0x0a99c700, 0xb6a9f9bc, 0x9efd0a94, 0xa9d9bc05, 0x148900f8, 0x9abc0006, + 0x8f00f8a8, 0x94017100, 0x648e0fa9, 0x9fbc0009, 0xe8eabc90, 0x800000df, 0xf59fff00, 0xdf01fefa, + 0x00800000, 0x8e049990, 0xff000934, 0xeabcf59f, 0x01fefae8, 0x800000df, 0x0c999000, 0x0009448e, + 0xbcf59fff, 0xfefae8ea, 0x0000df01, 0x99920080, 0x09948e04, 0xf59fff00, 0xfae8eabc, 0x00df01fe, + 0x92008000, 0x9ffd0499, 0x09848f05, 0xf8fabc00, 0xf8019ffa, 0x06048900, 0xf89abc00, 0x0005f489, + 0x89c89abc, 0xbc0005c4, 0xd489e89a, 0x9abc0005, 0x05e489d8, 0x989abc00, 0xf001c4f0, 0xe4f001f4, + 0x0194f001, 0xb60a94b6, 0xf4b608e4, 0x01d4f002, 0xb605fcfd, 0xfefd09d4, 0x05fdfd05, 0x8905f9fd, + 0xb601768c, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x01affa05, 0x248f00f8, 0xfabc0006, 0x06548fb8, + 0xe8fabc00, 0x0008748f, 0x8fd8fabc, 0xbc0008c4, 0xd48fc8fa, 0xfabc0008, 0x064489f8, 0x989abc00, + 0xf003b4f0, 0xf4b61ff4, 0x0f94f017, 0xb607e4f0, 0xe4b60294, 0x059bfd07, 0xfd07d4f0, 0xd4b6059e, + 0xffc4f00a, 0xb6059dfd, 0x9cfd0fc4, 0x059ffd05, 0x0176848f, 0xbc0fa4b6, 0x00dfa0af, 0xfd008000, + 0xa9fa05af, 0x8900f801, 0xb6010004, 0xa9bc0fa4, 0x0000d9a0, 0xa9fd0080, 0x00aace05, 0xc7080049, + 0xa0b35caa, 0xa6b04202, 0x140cf402, 0xa0b32009, 0x00493600, 0x01a4b301, 0x37643e2d, 0x04a0b300, + 0x40004915, 0xf404a6b0, 0xa4b31d08, 0x5a3e1805, 0x00890037, 0x643e0200, 0x00890037, 0x643e1000, + 0x94bd0037, 0x00f89ab2, 0x1489010f, 0x9fbc000a, 0xf8a4bda9, 0xb662f900, 0x60890fa4, 0xc2320128, + 0xbd00a9bc, 0x109990c4, 0x800000d6, 0xa0a9bc00, 0xff44c005, 0xffff83f0, 0x01bf98f8, 0x9802bd98, + 0x243303b1, 0xa6ff0b00, 0x37b03ee5, 0xe506ff00, 0xf000e9ce, 0x95fd3ff4, 0x059ffd04, 0xb60fdfc4, + 0x94fd08f4, 0x059ffd04, 0xfd071fc4, 0xf4b60493, 0x059ffd10, 0x9001e9fa, 0xaa9001cc, 0x14bb9040, + 0xb3400090, 0xfbb604c4, 0x02af9461, 0x003a988e, 0xbcf0fbbc, 0x52f9d8ef, 0x001a588e, 0xdf94a2b2, + 0x899ab204, 0xb2003950, 0x58923cb4, 0xd99434bd, 0xbc14bd02, 0x04bd909f, 0xbdf09ebc, 0x17d88eb4, + 0xbc030c00, 0x5b3e909e, 0x54330038, 0x93981600, 0x03919804, 0x98029098, 0x9cbf019b, 0x00384e3e, + 0xf398fcbf, 0x03f19804, 0x9802f098, 0xdd9001fb, 0x14999001, 0xb314ff90, 0x0e1100c0, 0x00503320, + 0xa6010e06, 0xc508f4de, 0x8f022994, 0xbc003a98, 0xfdbc9094, 0x04a3b599, 0xb503a1b5, 0xabb502a0, + 0xfbaca001, 0x00188f51, 0x0fa99401, 0xdf909fbc, 0x00800000, 0xcee59fff, 0x848900ef, 0x9a3c0027, + 0x01943398, 0x0000d910, 0xf9ff4000, 0x38b93e95, 0xffffd900, 0xf9ffbfff, 0x01e9fa94, 0x02f900f8, + 0x800000df, 0x0fa99400, 0x0100188e, 0xff909ebc, 0x0cce059f, 0xf899b800, 0x9fff002d, 0x00beceb5, + 0x0ffc99b8, 0xd59fff00, 0x8900dfce, 0x3c003950, 0x9433989a, 0x00d91601, 0xf0100000, 0xf5f010e5, + 0x95c9ff10, 0x0039183e, 0xf9fdef09, 0x04e9fd04, 0xffffffd9, 0x94c9ffef, 0xfa0109fa, 0xdffa01be, + 0x8f01fb01, 0x94010018, 0x9fbc0fa9, 0x0000df90, 0x9fff0080, 0x00efcee5, 0x00290c89, 0x33989a3c, + 0x09100194, 0x94f9ffe0, 0x3e0895f0, 0x09003954, 0x94f9ffe0, 0xf801e9fa, 0x0fae9400, 0x01001889, + 0x800000dd, 0xe0e9bc00, 0xced5edff, 0x508900df, 0x9abc0037, 0xffffd9c8, 0xf9fdf3ff, 0x03c4f004, + 0xfd1ac994, 0xdffa05f9, 0x0000d901, 0xeeb80080, 0xfd002fe8, 0xe9ce05e9, 0xffffdf00, 0xc4b6ff3f, + 0x049ffd16, 0xfa059cfd, 0x00f801e9, 0x1489f4bd, 0x9fbc000a, 0xbd00f8a9, 0x0a2489f4, 0xa99fbc00, + 0xd88900f8, 0x22f9003a, 0xbc02ac94, 0xbc8920c9, 0xe0810038, 0x030f003b, 0x68899fa0, 0xae94003b, + 0xbc010d04, 0x9da000e1, 0x003a9881, 0x00278889, 0xbdb0e1bc, 0x391081f4, 0x899fa000, 0xa00037a0, + 0x3ae8899d, 0xd0e9bc00, 0x81e0e1bc, 0xbd003a88, 0xc0c1bc94, 0xbfa0ff01, 0xefa0dfa0, 0x3c99cf3c, + 0x01a0992f, 0x90019990, 0xdd9004bb, 0x04ee9004, 0xb3040090, 0x89e30494, 0x3c003a80, 0x8489a99f, + 0x9f3c0027, 0x03d489a9, 0xa99f3c00, 0x0003d889, 0x89a99f3c, 0x3c0003dc, 0xe089a99f, 0x9f3c0003, + 0x03e489a9, 0xa99f3c00, 0x000a1489, 0x89a99fbc, 0xbc000a24, 0x3489a99f, 0x9fbc000a, 0x38b889a9, + 0xa99f3c00, 0x0037e489, 0x89a99f3c, 0x3c0039d4, 0x21fba99f, 0x0003d489, 0xf8a89a3c, 0x03dc8900, + 0xa89a3c00, 0xe08900f8, 0x9a3c0003, 0x8900f8a8, 0x3c0003e4, 0x00f8a89a, 0x00052089, 0x30989a3c, + 0xacf00096, 0x8900f80b, 0x3c002784, 0x00f8a89a, 0x0004e089, 0xf8a89abc, 0x04d08900, 0xa89abc00, + 0xc08900f8, 0x9abc0004, 0x8900f8a8, 0xbc000480, 0x00f8a89a, 0x00093489, 0xf8a89abc, 0x03ec8900, + 0xf89abf00, 0xbeefdf00, 0xec89dead, 0x9fa00003, 0x008900f8, 0x9abc0005, 0x8900f8a8, 0xbc003750, + 0x96b0989a, 0x0bacf000, 0xf801a6f0, 0x0ba48900, 0xa89a3c00, 0xa88900f8, 0x9a3c000b, 0x8900f8a8, + 0xbc000440, 0x00f8a89a, 0x00045089, 0xf8a89abc, 0x04608900, 0xa89abc00, 0x708900f8, 0x9abc0004, + 0x8900f8a8, 0xbc000614, 0xb4b3a99b, 0xbdb21109, 0x020bc4bd, 0x3e0dac4e, 0xb2003b84, 0x0bc4bdbd, + 0x09c44e02, 0x0005e67e, 0xafb200f8, 0x5933b0b3, 0xf433b6b0, 0xb0b3130c, 0xb0b32430, 0xb4b31831, + 0xc93e6e0c, 0xb0b3003b, 0xb4b35634, 0xd83e6236, 0x4489003b, 0xdc3e000a, 0x4089003b, 0x9abc0004, + 0x045089c8, 0x3bf03e00, 0x0a648900, 0xc89abc00, 0x000a5489, 0x003bf03e, 0x000ae489, 0xbdd89fbc, + 0x3bf33ec4, 0x0ab48900, 0xc89abc00, 0x000ac489, 0x4ed89fbc, 0xe67e09c4, 0x00f80005, 0x000ad489, + 0xbdd89abc, 0x09c44ec4, 0x0005e67e, 0xe48900f8, 0x9abc0006, 0xb262f9f8, 0x00f0b3a3, 0xa6010a10, + 0x8b1bf5fa, 0x3c503e00, 0x39508900, 0x989a3c00, 0x14019033, 0x001acb7e, 0x7fd9010f, 0xa60206cc, + 0x050cf4a9, 0x0802f4bd, 0x1d00f0b3, 0x003c633e, 0x00395089, 0x3398933c, 0x020c0194, 0x3ef4bd08, + 0x02003c65, 0x01f9c404, 0x940f3094, 0x14bd1794, 0x01287089, 0x800000d6, 0x0009bc00, 0x7fffffd5, + 0xf506ffff, 0xfd00f9ce, 0x94fd0495, 0x01f9fa05, 0x21bc060b, 0x400090d4, 0xb2081190, 0x4ec4bd3a, + 0xe67e09c4, 0x14b30005, 0xa4bddb20, 0x30f461fb, 0xb282f9c4, 0xf830f4a2, 0xa701b9b3, 0x01b6b000, + 0xb31108f4, 0x02f002b9, 0xcd04bdb3, 0x3d5f3e03, 0x38b88900, 0x3c010f00, 0xa994a99f, 0x04ae9406, + 0x8930e9bc, 0xb20027cc, 0x3039bce5, 0x0039208f, 0x00391089, 0xd0b2c6b2, 0xbc505fbc, 0x14bd40e9, + 0xe8870708, 0x508e0003, 0xe23c0039, 0x01943398, 0xb501a016, 0x08b50101, 0x0301b502, 0x3e0401b5, + 0x0f003d35, 0xb5010904, 0x0fb50101, 0x0301b502, 0xa00409b5, 0x98723c01, 0x0e019433, 0x3bb20ab2, + 0xe87e140c, 0x6ebf0076, 0x90143390, 0x00900466, 0x904ea014, 0x45a60444, 0x3eae1bf4, 0x890040a3, + 0x3c003950, 0x200f9892, 0x330ef1b0, 0x09090090, 0x0e91b001, 0x8f042e94, 0xbc003a98, 0xc089f0ef, + 0xf1b00038, 0x022f9413, 0xb090f9bc, 0x22bc1191, 0x9092bc90, 0xb00694b6, 0x88890d91, 0xf9bc003a, + 0x3b808980, 0x90e9bc00, 0x890f91b0, 0xbc003910, 0x44fe90e9, 0x1091b001, 0xd889d1b2, 0xc1b0003a, + 0x70f9bc12, 0xe88f34bd, 0x4490003a, 0x60efbc54, 0x9d33893f, 0xb2009500, 0xb22ab249, 0x37e97e3b, + 0xb449bf00, 0x3bb20ed0, 0x19a02ab2, 0xb5014e98, 0x4f98011e, 0xb54eb202, 0x4998021f, 0x0319b503, + 0xbf044f98, 0x041fb519, 0xf0b449a0, 0x01199813, 0x49b5fcbf, 0x021f9801, 0x98024fb5, 0x49b50319, + 0x1290b403, 0xb5041f98, 0x99bf044f, 0x003a808f, 0x3c0091b0, 0xf130f8f2, 0x30ea7e04, 0xbf3bb200, + 0xb28a2060, 0x2fbf7e2a, 0x0de0b400, 0x8f010990, 0xa0002950, 0x9033bc69, 0xb69093bc, 0xe9bc0494, + 0x9090bc90, 0x3f99fabc, 0x015d3385, 0x80890105, 0x923c003a, 0x000d3308, 0xe0b400be, 0x33e93f11, + 0xb23b0194, 0x7e3bb22a, 0xb4002d97, 0x49b211f0, 0x3bb22ab2, 0x707ef020, 0x49bf0032, 0x4e9819a0, + 0x011eb501, 0xb5024f98, 0x4998021f, 0x0319b503, 0xb5044e98, 0x1d3e041e, 0x793f003f, 0x61009433, + 0xb212f0b4, 0xbf2ab23b, 0x316a7efc, 0x2049b200, 0xb23bb27a, 0x32707e2a, 0xb249bf00, 0xa03bb22a, + 0x014e9819, 0x98011eb5, 0x1fb5024f, 0x03499802, 0x980319b5, 0x1eb5044e, 0x7e60bf04, 0xbc002fbf, + 0x508e9033, 0x0f900029, 0xb46fa001, 0x93bc0df0, 0x0494b690, 0xbc90f9bc, 0xeabc9090, 0x337f3f99, + 0xb44f01f4, 0xe9bf0fe0, 0x0038b88e, 0x3e29ef3c, 0xb2003f65, 0xb22ab249, 0x32707e3b, 0xa049bf00, + 0x014e9819, 0x98011eb5, 0x1fb5024f, 0x03499802, 0xb50ff0b4, 0x4e980319, 0x041eb504, 0xb88ef9bf, + 0xe53c0038, 0x10f0b429, 0xef0095f1, 0x90b4f9a0, 0x10e0b40f, 0x9011f0b4, 0x99900133, 0x04ee9004, + 0xb00f91b0, 0x90b410e1, 0x13e0b412, 0xb001ff90, 0x999011f1, 0x04ee9004, 0xb01291b0, 0x889013e1, + 0x14119001, 0x90017790, 0x3db30466, 0x3efe2604, 0x8f0040a3, 0x94003910, 0xae9404a9, 0x809fbc02, + 0x003a888f, 0xb0f0efbc, 0xaabc0bf1, 0xf0fabcf0, 0xb006f4b6, 0xd88f0cf1, 0x47fe003a, 0x50efbc01, + 0x8e14c1b0, 0x8f003ae8, 0xb2003b80, 0x409ebcd3, 0xbd609fbc, 0x54779014, 0xb21490b4, 0xbf2ab21b, + 0x0499909c, 0x7e1491b0, 0xb200316a, 0xb25a201b, 0x7e2ab279, 0xbf003270, 0xb21bb27e, 0x983ea02a, + 0x3fb5017f, 0x02799801, 0x980239b5, 0x3eb5037e, 0x047f9803, 0xbf043fb5, 0x2fbf7e40, 0xbc5e3f00, + 0x0f909011, 0xb44fa001, 0x91bc0cf0, 0x0494b690, 0xbc143390, 0x508f90f9, 0x90bc0029, 0x99fabc90, + 0x1d01e433, 0x3c0bf0b4, 0x943398f1, 0x69bf1300, 0x0038b88f, 0xf129fe3c, 0xa0ef0095, 0x01119089, + 0x90015590, 0x66900444, 0x04889004, 0x6c041db3, 0x40a33eff, 0x0c190b00, 0x02617e0f, 0x3e0f0a00, + 0xbd0040a5, 0x0830f4a4, 0xf93c85fb, 0xb2b0b252, 0xb2d2b2c1, 0x7ea5b2e3, 0xb2003707, 0x00a4b3a4, + 0x0b5ab214, 0x7e150c19, 0x0a000261, 0x41173e15, 0xb21db200, 0xb23bb20c, 0x71677e2a, 0x03e84c00, + 0x727ed4bd, 0x4c920072, 0x7ed4bd00, 0x92007272, 0xbf9301ae, 0x00f4b300, 0xfde6b10b, 0x130df407, + 0x190b5ab2, 0x617e140c, 0x140a0002, 0x0041173e, 0xa00790b4, 0xfba4bd9a, 0xb2efb251, 0xbd22f9de, + 0xb2b0b2d4, 0x4ca2b2c1, 0xeab203e8, 0x677efbb2, 0xe0b40071, 0xb21db204, 0xbbf4bd0c, 0xbfbb02ae, + 0x71677e03, 0x42408c00, 0x7ed4bd0f, 0xb4007272, 0xf4bd05e0, 0xbb02aebb, 0xae9203bf, 0x00bf9301, + 0x1500f4b3, 0xe9a6fe09, 0xb20d0cf4, 0x0daeb2bf, 0x41883e01, 0x0b2ab200, 0x7e130c19, 0x0a000261, + 0x41aa3e13, 0x01dd9000, 0x9d01ff95, 0xefff01ee, 0xf41bf495, 0xb406c0b4, 0xdcbc0790, 0x7e9ca0c2, + 0xb400721e, 0x9aa00890, 0x21fba4bd, 0xa99402f9, 0xbcd4bd03, 0x010ec29a, 0x00032c80, 0xc4f0cebc, + 0x1bf401d9, 0xf80fbc0a, 0x0041d53e, 0x90980fbc, 0x9f9501ee, 0x4af9c710, 0xc403b9b5, 0xb9b53ff9, + 0x66f9c701, 0x9002b9b5, 0xbb9001dd, 0x04d4b314, 0x7eb43dcf, 0xfb002a14, 0xec30f401, 0xc0b282f9, + 0xb9b3a3b2, 0xb000a401, 0x08f401b6, 0x02b9b311, 0xbdb3013d, 0x3e01a804, 0x890042a6, 0x3c0003e8, + 0x9433989a, 0xcbb20a01, 0x0041ac7e, 0x90063994, 0x3e94013f, 0x06fd9404, 0xb620e9bc, 0xcc8904f4, + 0xfdbc0027, 0x2029bc40, 0x884049bc, 0xbd003950, 0x06040714, 0x8e070501, 0x3c0003e8, 0x943398e3, + 0x2ab21201, 0x140c0bb2, 0x0076e87e, 0x0042973e, 0x3398833c, 0xa0160194, 0x0101b501, 0xb50205b5, + 0x01b50301, 0x42973e04, 0xb501a000, 0x07b50101, 0x0301b502, 0x900406b5, 0x22901400, 0xf424a614, + 0xc83eb81b, 0xc0890043, 0xe88e0038, 0x3f94003a, 0x80f9bc02, 0xbc043994, 0x988e509e, 0x42fe003a, + 0x409ebc01, 0x888914bd, 0xd88e003a, 0xf9bc003a, 0x60febc70, 0x3d242290, 0x0e94bdf4, 0x197f3c01, + 0xa0196f3c, 0x3c59a049, 0x1bb2198e, 0x3ab229b2, 0x0037e97e, 0x1bb22fbf, 0x2cb23ab2, 0x29980fa0, + 0x04449001, 0x90045590, 0x09b50111, 0x022e9801, 0x98020eb5, 0x0ebf032f, 0x98030fb5, 0x09b50429, + 0x982ea004, 0x2fb5010f, 0x02099801, 0x980229b5, 0x2eb5030e, 0x040f9803, 0xb5140090, 0x5d7e042f, + 0x14b3002d, 0xc83e9504, 0xd88e0043, 0xa994003a, 0x04a29402, 0x8f609ebc, 0x8e003ae8, 0xfe003a88, + 0xc1b20144, 0xbd202fbc, 0x509ebc04, 0x84bd743d, 0xb2244490, 0x09573c0b, 0xa009673c, 0x7e3ab228, + 0xb2002ccc, 0xb20bb249, 0x32707e3a, 0x904fbf00, 0x22900100, 0x981fa004, 0x19b50149, 0x024e9801, + 0x98021eb5, 0x1fb5034f, 0x04499803, 0x900419b5, 0x04b31411, 0xc83ec104, 0x190b0043, 0x617e0f0c, + 0x0f0a0002, 0x0043ca3e, 0x85fba4bd, 0x3d02f914, 0x37e48f94, 0x0fab9400, 0xb2a9f93c, 0x39d48fa0, + 0x3cd4bd00, 0xffdca9f9, 0x89f7ffff, 0xda012864, 0x00800000, 0xffe0b9bc, 0xf9cef5ea, 0x049cfd00, + 0x9001f9fa, 0xee9001dd, 0x04d4b340, 0x309c89ee, 0x0000df01, 0xb9bc0080, 0xf5bfffb0, 0xde00f9ce, + 0xbfffffff, 0xfa049efd, 0x00df01f9, 0xb8008000, 0x023098bb, 0xcef5bfff, 0x004e00f9, 0x049efdf8, + 0xdc01f9fa, 0x00800000, 0x30a4bbb8, 0xc5bcff00, 0x8f00c9ce, 0xbc000bac, 0xbc8fe8f0, 0xf0bc000b, + 0x1fff8fd8, 0x1fe4f0fc, 0xb6049ffd, 0x034f0de4, 0x059efde0, 0x07ffd4f1, 0xb6049ffd, 0x9dfd02d4, + 0x01c9fa05, 0x800000d9, 0x88beb800, 0xe9fd0210, 0x00e9ce05, 0xffffffdf, 0x049ffd07, 0x000000df, + 0x059ffd40, 0xb201e9fa, 0x2aec7e0a, 0x3bc08f00, 0xf8f0bc00, 0x000a2489, 0x170b0ab2, 0x0c099fbc, + 0x02617e01, 0xfba4bd00, 0xb222f901, 0xb2a1b2d2, 0x09c44cc0, 0x0005947e, 0x547e1ab2, 0x0aa00006, + 0x6b7e1ab2, 0x2aa00006, 0x22f921fb, 0xd2b2c0b2, 0x0b7ea1b2, 0x1ab20005, 0x0005667e, 0x20b30aa0, + 0x1ab20c00, 0x00057d7e, 0x21fb2aa0, 0xa0b202f9, 0x0a31b9b3, 0x31b6b002, 0xb33d0cf4, 0x00fc24b9, + 0xf424b6b0, 0xb0b3240c, 0xb6b05b21, 0x110cf421, 0x8b0cb9b3, 0x20bdb301, 0x683e02ea, 0xbdb30045, + 0x3e02e123, 0xb30045c1, 0x011126b9, 0xd330bdb3, 0x46a23e02, 0x34b9b300, 0xb9b3025b, 0xb3029a36, + 0x02c033bd, 0x00475a3e, 0x00046089, 0xbc02ac94, 0x7089d0c9, 0xc9bc0004, 0x44c97ec0, 0x481f3e00, + 0x09248900, 0x02ac9400, 0xc9bcd4bd, 0x44c97ec0, 0x09248900, 0xf890bc00, 0x00062489, 0xbc03fec4, + 0x3489099e, 0xfec70006, 0x099ebc64, 0x00064489, 0xc750fdc7, 0x9fbc68ff, 0x06548909, 0x47ed3e00, + 0x07048900, 0x02ac9400, 0x89d0c9bc, 0xbc0006f4, 0xc97ec0c9, 0xf4890044, 0x90bc0006, 0x070489f8, + 0xd890bc00, 0x00071489, 0xbc0ffec4, 0x2489099e, 0xfec70007, 0x099ebc64, 0x00085489, 0xc770fcc7, + 0x9fbc6cff, 0x08648909, 0xffd4f000, 0x89099cbc, 0x3e000874, 0x890047ed, 0x940008a4, 0xd4bd02ac, + 0x7ec0c9bc, 0x890044c9, 0xbc0008a4, 0xc489f890, 0xfee40008, 0x9ebcffff, 0x08d48909, 0xf0fec700, + 0x89099ebc, 0x950008e4, 0xffc71cfd, 0x099fbc78, 0x0008f489, 0x0047ed3e, 0x0008b489, 0xbd02ac94, + 0xc0c9bcd4, 0x0044c97e, 0x0008b489, 0x89f890bc, 0xc70005c4, 0x9ebc08fe, 0x05d48909, 0x09fec700, + 0x89099ebc, 0xc70005e4, 0x9ebc0afe, 0x05f48909, 0x02fdc700, 0xbc01f4f0, 0x0489099f, 0xed3e0006, + 0x94890047, 0xac940009, 0xd0c9bc02, 0x00098489, 0x7ec0c9bc, 0x3e0044ea, 0x8900481f, 0x94000914, + 0xc9bc02ac, 0x090489d0, 0xc0c9bc00, 0x0044ea7e, 0x00090489, 0x89f890bc, 0xbc000914, 0x3489d890, + 0xfec40009, 0x099ebcff, 0x00094489, 0xbce8fec7, 0x5489099e, 0xfc950009, 0xf0ffc718, 0x89099fbc, + 0x95000964, 0xd4b61adb, 0x05dcfd08, 0x89099dbc, 0xbc000974, 0x1f3e099b, 0xc4890048, 0xac940009, + 0xbcd4bd02, 0xea7ec0c9, 0xc4890044, 0x90bc0009, 0x066489f8, 0x01fec400, 0x89099ebc, 0xc7000674, + 0x9ebc24fe, 0x06848909, 0x70fdc700, 0xbc68ffc7, 0x9489099f, 0xed3e0006, 0xb4890047, 0xac940009, + 0xd0c9bc02, 0x0009a489, 0x7ec0c9bc, 0x890044ea, 0xbc0009a4, 0xb489f890, 0x90bc0009, 0x073489d8, + 0x0ffec400, 0x89099ebc, 0xc7000744, 0x9ebc64fe, 0x07548909, 0x70fcc700, 0xbc6cffc7, 0x6489099f, + 0xd4f00007, 0x099cbc07, 0x00088489, 0x0047ed3e, 0x0009d489, 0xbd02ac94, 0xc0c9bcd4, 0x0044ea7e, + 0x0009d489, 0x89f890bc, 0xe40006a4, 0xbcfffffe, 0xb489099e, 0xfec70006, 0x099ebc50, 0x00077489, + 0xc71cfd95, 0x9fbc78ff, 0x07848909, 0x099dbc00, 0x00481f3e, 0x0009e489, 0xbd02ac94, 0xc0c9bcd4, + 0x0044ea7e, 0x0009e489, 0x89f890bc, 0xc7000a04, 0xf4f002fe, 0x099ebc01, 0x0009f489, 0xfb099fbc, + 0xac30f401, 0xa1b232f9, 0x0018e57e, 0x0037e48f, 0x3398f13c, 0x00bf009d, 0xf93c0109, 0xf5a92619, + 0xb200861b, 0x65ef7e1a, 0x0f109400, 0x01287089, 0x09bc24bd, 0x0000d300, 0x03ff0080, 0x0099ce95, + 0x93fd040d, 0x40009004, 0x060094b3, 0xd2bb080d, 0x901ab204, 0x060b0822, 0xc44ec4bd, 0x05e67e09, + 0x2024b300, 0xbd020bd9, 0xfe1ab2d4, 0x00900140, 0x7e0cb260, 0x4b0044ea, 0x1ab2030a, 0x003b637e, + 0x010f0bbf, 0x0038b889, 0x0f199f3c, 0x3bc08901, 0x199fbc00, 0x030abeb3, 0xbd1ab23e, 0x48f77ec4, + 0x48f43e00, 0x06d48900, 0xb891bc00, 0xb20140fe, 0x1000901a, 0xf97e0cb2, 0x1ab20041, 0xc43d0bb2, + 0x0037757e, 0x0b0b1ab2, 0x248d010c, 0x087e00f4, 0xa4bd0017, 0xb35435fb, 0x5b010aba, 0x010ab6b1, + 0xb3140cf4, 0xb3490ab0, 0x280109ba, 0x1709b4b3, 0x0049273e, 0x0309bab3, 0x0abab32b, 0xbab35803, + 0x0a190209, 0xb200f801, 0x52847ecb, 0xb200f800, 0x4eb37ecb, 0xb200f800, 0x49b67ecb, 0x0f00f800, + 0x0a148901, 0xa99fbc00, 0x00495d3e, 0x0048217e, 0xd48900f8, 0x9a3c0039, 0x00903398, 0xf8a4bd08, + 0x3be08900, 0x04ab9400, 0x7eb0b9bc, 0xf8005419, 0x43cd7e00, 0xf400f800, 0x22f9fc30, 0xc1b2b0b2, + 0xcbb2a2b2, 0x003b637e, 0x290000b3, 0x40fe020b, 0x902ab201, 0xd4bd0c00, 0xea7e0cb2, 0x0bbf0044, + 0x1bf4b1a6, 0xbd2ab20f, 0x48f77ec4, 0x49b33e00, 0xfba4bd00, 0x300b0425, 0xf9e830f4, 0xf4a0b252, + 0x0c7eec30, 0x0ab20045, 0x0c7e0c0b, 0x0ab20045, 0x0035c37e, 0x00093489, 0x929890bc, 0x96b00499, + 0x090cf401, 0xa67e0ab2, 0x0ab20069, 0x0c7e310b, 0x74890045, 0x90bc0006, 0x04c089f8, 0x9890bc00, + 0x0bf4f9a6, 0x0b0ab213, 0x7e080c19, 0x0a000261, 0x4ead3e08, 0x06848900, 0xe890bc00, 0x00050089, + 0xa69890bc, 0x130bf4e9, 0x190b0ab2, 0x617e090c, 0x090a0002, 0x004ead3e, 0x00069489, 0x89f890bc, + 0xbc000510, 0xf9a69890, 0xb2130bf4, 0x0c190b0a, 0x02617e0a, 0x3e0a0a00, 0xb3004ead, 0xb31800f0, + 0xb21401e4, 0x0c190b0a, 0x02617e0b, 0x3e0b0a00, 0xb2004ead, 0x7e360b0a, 0x8900450c, 0xbc0009f4, + 0x94b39890, 0x010f0d00, 0x0003d489, 0x89099f3c, 0xbc000a04, 0x94b39890, 0x010f0d00, 0x0003d889, + 0xb2099f3c, 0x36357e0a, 0x05c48900, 0x05748f00, 0x9890bc00, 0xfdf8f0bc, 0x1bf4049f, 0x89010f0c, + 0x3c0003dc, 0xd489099f, 0x848f0005, 0x90bc0005, 0xf8f0bc98, 0xf4049ffd, 0x010f0c1b, 0x0003e089, + 0x89099f3c, 0x8f0005e4, 0xbc000594, 0xf0bc9890, 0x049ffdf8, 0x0f0c1bf4, 0x03e48901, 0x099f3c00, + 0x330b0ab2, 0x00450c7e, 0x000a9489, 0x89f890bc, 0xbc000884, 0x0109e890, 0xbbfff4f0, 0xf9fd049e, + 0x131bf404, 0x190b0ab2, 0x617e0d0c, 0x0d0a0002, 0x004ead3e, 0x01309c89, 0x8f0f0194, 0xbcfecf64, + 0x00d91019, 0xff008000, 0x1fbc4519, 0x004fce10, 0x000af489, 0xd9e890bc, 0x7fffffff, 0xb004f9fd, + 0x9cf000e6, 0x1f94b60b, 0xfa05f9fd, 0x3489014f, 0x90bc0007, 0x074489e8, 0xf890bc00, 0x00079489, + 0x90d890bc, 0xffbc01ee, 0x909fbc90, 0xbb039992, 0x348904e9, 0x9ebc0008, 0xf4eda609, 0x0ab21318, + 0x100c190b, 0x0002617e, 0xad3e100a, 0x5489004e, 0x90bc0007, 0x076489e8, 0xf890bc00, 0x0007c489, + 0x90d890bc, 0xff9001ee, 0x08448201, 0x90ffbc00, 0xbb909fbc, 0x2ebc04e9, 0xf4eda609, 0x0ab21318, + 0x110c190b, 0x0002617e, 0xad3e110a, 0x0ab2004e, 0x001acb7e, 0xbdd820bc, 0x0191b094, 0x090091b0, + 0x05a5950b, 0xfe0143fe, 0x33900142, 0x34229038, 0xb00291b0, 0x21b00331, 0xb20ab204, 0xbdc4bd5b, + 0x41197ee4, 0x00adb300, 0x34890288, 0xcc8f0120, 0x19bcfedf, 0x0000dc10, 0x1cff0080, 0x101fbcc5, + 0xbf00c9ce, 0xb82dbf3e, 0x002833ff, 0xb6049ffd, 0xd4f10be4, 0xe4f107ff, 0x004fffff, 0x059efdf8, + 0xfd049ffd, 0xc9fa059d, 0x08448901, 0xd890bc00, 0x91b094bd, 0x00c84901, 0x090091b0, 0x0143fe0b, + 0x900142fe, 0x22903033, 0x0291b02c, 0xb00331b0, 0x0ab20421, 0xc4bd5bb2, 0x197ee4bd, 0xadb30041, + 0xce020f00, 0x3ebf0049, 0xff8f2dbf, 0x9ffdff07, 0x0be4b604, 0x07ffd4f1, 0xffffe4f1, 0xfdf8004f, + 0x9ffd059e, 0x059dfd04, 0x890149fa, 0xd40130ac, 0x00800000, 0x8f1019bc, 0xfffecf54, 0x1fbc9514, + 0x0099ce10, 0x0008448f, 0xc7d8f0bc, 0x99e78d9f, 0x9fbb0142, 0x0191b004, 0xb000c849, 0x0b090091, + 0xfe0143fe, 0x33900142, 0x3c229040, 0xb00291b0, 0x21b00331, 0xb20ab204, 0xbdc4bd5b, 0x41197ee4, + 0x00adb300, 0xa889018c, 0x588f0130, 0x19bcfecf, 0xa514ff10, 0xce101fbc, 0x3ebf00af, 0xac892dbf, + 0xfcc7000b, 0x099cbc8d, 0x000bbc89, 0x0142fbe7, 0x89099bbc, 0xf0fc1fff, 0xf9fd1fe4, 0x0de4b604, + 0xfde00349, 0xd4f105fe, 0xf9fd07ff, 0x02d4b604, 0xfa05fdfd, 0x340b01af, 0x0c7e0ab2, 0x74890045, + 0x90bc0007, 0x078489e8, 0x9890bc00, 0x0007f48f, 0x90f8f0bc, 0x999001ee, 0x0294b601, 0x89d4e9bc, + 0xbc000814, 0xdfa6099d, 0xb21318f4, 0x0c190b0a, 0x02617e12, 0x3e120a00, 0x94004ead, 0x248f0202, + 0x2fbc0008, 0xb25bb290, 0x0091b00a, 0xe4bdc4bd, 0x0040ab7e, 0xd900adb3, 0x00048900, 0x1019bc01, + 0x800000d9, 0xa519ff00, 0x8900aece, 0xbc000824, 0xf0b3f890, 0xf4f11500, 0x004907ff, 0x94e9fff8, + 0x3e059ffd, 0xd9004e0f, 0x80000000, 0xfa95e9ff, 0xa48901a9, 0x90bc0006, 0x0aa489e8, 0xf890bc00, + 0x9ebb0109, 0xfff4f104, 0x04f9fdff, 0xb2131bf4, 0x0c190b0a, 0x02617e06, 0x3e060a00, 0x89004ead, + 0xbc0006d4, 0xe4b3099e, 0x80890b01, 0x9e3c003a, 0x06b48909, 0xd890bc00, 0x000aa489, 0x8f9890bc, + 0xde010000, 0x00ff0000, 0xfd04fdbb, 0x9ffd049e, 0x131bf404, 0x190b0ab2, 0x617e070c, 0x070a0002, + 0x004ead3e, 0x0006e489, 0x9dbc0ab2, 0x36967e09, 0x06c48f00, 0xbc0ab200, 0x030bc02f, 0xea7ed4bd, + 0x0ab20044, 0x094c010b, 0x49777e03, 0x1430f400, 0x891855fb, 0xbc0004c0, 0xf089f89a, 0x9abc0004, + 0x050089d8, 0xe89abc00, 0x00051089, 0xf0989abc, 0xf4f001d4, 0x0fe4f003, 0xb60f94f0, 0xe4b61094, + 0x04f4b608, 0xfd05fdfd, 0xf9fd05fe, 0x0a448905, 0xbc22f900, 0xa0b2a99f, 0x310bb2b2, 0x003b8a7e, + 0x300b0ab2, 0x003b8a7e, 0x00049089, 0x89d890bc, 0xbc0004b0, 0xa089e890, 0x90bc0004, 0x048089f8, + 0x9890bc00, 0xb618dc94, 0xf4f108f4, 0x94f0ffff, 0x05f9fdff, 0x000a6489, 0xb608d5b6, 0xedfd1ae4, + 0x099ebc05, 0x2e000089, 0x8905f9fd, 0xfd000a54, 0x0ab205fc, 0x0b099fbc, 0x3b8a7e0c, 0x05f48900, + 0xf890bc00, 0x0005a489, 0x89d890bc, 0xbc000604, 0xb489e890, 0x90bc0005, 0x04fdfd98, 0xb201fdc4, + 0x04e9fd0a, 0x000b8489, 0x8f099fbc, 0xc4000b94, 0xfebc01e9, 0x0294b609, 0x000ae48f, 0x0b059dfd, + 0x09f9bc36, 0x003b8a7e, 0x00087489, 0x89f890bc, 0xbc000524, 0xf9fd9890, 0x131bf404, 0x190b0ab2, + 0x617e030c, 0x030a0002, 0x0052823e, 0xf404f9c4, 0x020f090b, 0x004fe03e, 0xf402fec4, 0x010f100b, + 0x000af489, 0x3e099fbc, 0xc4004ff8, 0x0bf401f9, 0x0af4890a, 0x099ebc00, 0x00055489, 0x000af48f, + 0xbc1890bc, 0x6489f8f0, 0x90bc0005, 0x085489a8, 0xb890bc00, 0x00086489, 0x89c890bc, 0xbc000ab4, + 0xaf90099f, 0x90ffbc01, 0x90909fbc, 0xd9bb011d, 0x07c48904, 0x01ce9000, 0xbc099dbc, 0x9ebc90ee, + 0x01bf9090, 0x8904f9bb, 0xbc0007d4, 0x548e099f, 0x4489000b, 0xfda6000b, 0xbc0d0df4, 0xecbc099b, + 0x506b3e09, 0x0991bc00, 0x8909eabc, 0xbc000714, 0x2489b890, 0x90bc0007, 0x079489e8, 0xbc030f00, + 0xeebc099f, 0x909ebc90, 0x90039992, 0xf9bb01bf, 0x07a48904, 0x099fbc00, 0x000b348d, 0x000b248c, + 0xf403f6b0, 0xdebc0d0d, 0x09cbbc09, 0x0050ba3e, 0xd9bc0109, 0xbc020909, 0x448f09c9, 0xf0bc000b, + 0x0b248fe8, 0xd8f0bc00, 0x000b348f, 0x89f8f0bc, 0xbc000b54, 0xe4b69890, 0x04f4b60c, 0x94b60ab2, + 0x059efd10, 0xfd059dfd, 0xc48f059f, 0x330b000a, 0x7e09f9bc, 0x89003b8a, 0xbc0008c4, 0x3489f890, + 0x90bc0005, 0x04f9fd98, 0xb2131bf4, 0x0c190b0a, 0x02617e04, 0x3e040a00, 0xc4005282, 0x0bf410f9, + 0x3e040f09, 0xc400512f, 0x0bf402fe, 0x89010f10, 0xbc000b04, 0x633e099f, 0xf9c40051, 0x0e0bf401, + 0x000b0489, 0x3e099ebc, 0xc4005163, 0x020f04f9, 0xb2df1bf4, 0x0c190b0a, 0x02617e0f, 0x3e0f0a00, + 0x89005282, 0x3c0003e8, 0x94339890, 0xf4bd0d01, 0x000b0489, 0x89099fbc, 0xbc0008d4, 0x4489f890, + 0x90bc0005, 0x94f9ff98, 0xb2131bf4, 0x0c190b0a, 0x02617e05, 0x3e050a00, 0xc4005282, 0x0bf4029f, + 0x3e010f09, 0xf00051ad, 0x0bf40194, 0x0b148947, 0x099fbc00, 0x0008e489, 0x89b890bc, 0xbc0008f4, + 0xf489c890, 0x008e0007, 0x9ebc0300, 0x01c99009, 0x900294b6, 0xf9bb01bf, 0x08048904, 0x099fbc00, + 0x000b748d, 0x000b648a, 0x0df4fea6, 0x52013e21, 0x0b0ab200, 0x7e0e0c19, 0x0a000261, 0x52823e0e, + 0x09dcbc00, 0x3e09abbc, 0x09005215, 0x09d9bc03, 0xa9bc0209, 0x0b648f09, 0xe8f0bc00, 0x000b048f, + 0x8fd8f0bc, 0xbc000b14, 0x7489f8f0, 0x90bc000b, 0x18e4b698, 0xb210f4b6, 0x1c94b60a, 0xfd059efd, + 0x9ffd059d, 0x0ad48f05, 0xbc340b00, 0x8a7e09f9, 0x0089003b, 0x0d940130, 0xd0d9bc0f, 0x800000d9, + 0x05d9fd00, 0xbd00ddce, 0xc7030bc4, 0xc44e08dd, 0x7e0ab209, 0xb20005e6, 0x4c2bb20a, 0x777e0209, + 0x21fb0049, 0xa0b212f9, 0x200bb1b2, 0x00450c7e, 0x957e0ab2, 0xa0b3002a, 0xf43d0d0f, 0x0003e889, + 0xb2099f3c, 0x7e210b0a, 0xb200450c, 0x7e230b0a, 0xb200450c, 0x7e240b0a, 0xb200450c, 0x7e260b0a, + 0xb200450c, 0x4c1bb20a, 0x777e0109, 0x11fb0049, 0x000a1489, 0xf4989abc, 0x12f9fc30, 0x9db3a1b2, + 0x8900c901, 0xbc000614, 0x9fb3989a, 0x00bd0309, 0x0100ac89, 0xbc0fad94, 0x00d9d0d9, 0xff008000, + 0x5489e5d9, 0xd9bcfeff, 0x00efced0, 0x04000089, 0xf494f9ff, 0xff890d0b, 0xf9fffbff, 0x01e9fa94, + 0x01309c89, 0xd9bcf43d, 0x38b889d0, 0x199f3c00, 0x800000df, 0xf5dfff00, 0xde00f9ce, 0x40000000, + 0xfa059efd, 0xe4bd01f9, 0x0838ddb8, 0x0000db02, 0x00dc0080, 0xff080000, 0xf9cef5db, 0x059cfd00, + 0x9001f9fa, 0xdd9001ee, 0x04e4b340, 0xbd020bee, 0xfe1ab2d4, 0x00900140, 0x7e0cb208, 0xb20044ea, + 0x7e0a0b1a, 0xbf003b63, 0x0a04b300, 0x7e1ab224, 0xb2000050, 0xbd0bb21a, 0x48f77ec4, 0xb3030900, + 0x3e0e00a0, 0x090053af, 0x53af3e1e, 0xb294bd00, 0x0415fb9a, 0x0100ac8f, 0xf40fa994, 0x9fbcfc30, + 0xdf12f990, 0x00800000, 0x9fffa1b2, 0x00efcee5, 0x02000089, 0xf494f9ff, 0xff890d0b, 0xf9fffdff, + 0x01e9fa94, 0xd4bd020b, 0x40fe1ab2, 0x08009001, 0xea7e0cb2, 0x090b0044, 0x637e1ab2, 0x0bbf003b, + 0x1209b4b3, 0xc4bd1ab2, 0x0048f77e, 0xa4b30309, 0x94bd0600, 0x15fb9ab2, 0x38b88904, 0xac30f400, + 0x9a3c32f9, 0x89a1b2f8, 0xb2000614, 0x089abcbc, 0x1300f433, 0x0f0a04b3, 0x7e010a4b, 0x3e003b63, + 0xb3005490, 0x4d010a0e, 0x0006d489, 0x0fb891bc, 0x39d48901, 0x0143fe00, 0x90199f3c, 0x1ab21033, + 0xae7e3db2, 0xa2b2003c, 0x0200adb3, 0x38b88901, 0x98913c00, 0x1c009433, 0xc43d3bb2, 0x757e1ab2, + 0x1ab20037, 0x010c0b0b, 0x00f4248d, 0x0017087e, 0x0038b889, 0x3398913c, 0x00d10099, 0x030a0bb3, + 0x40fe00cc, 0x901ab201, 0x0bb21000, 0x003c0e7e, 0xadb3a2b2, 0xb200b800, 0x3d1ab20b, 0x37757ec4, + 0x00048900, 0x0f1f9401, 0xd9f0f9bc, 0x00800000, 0xce05f9fd, 0x004e00f9, 0x049efdf8, 0x0901f9fa, + 0x3bc08f01, 0xbc9ab200, 0x108919f9, 0xf0b20039, 0x94041e94, 0xff8b041f, 0xf9bc00ff, 0x0a348cf0, + 0x39208900, 0xbcd4bd00, 0xf9bfe0e9, 0xa604ff90, 0x091bf49b, 0xbc19cabc, 0xfea6190d, 0x89ee1bf4, + 0xbc003bc0, 0x90b39891, 0x4bfe1200, 0x901ab201, 0x010c10bb, 0x0037757e, 0xd4bd020b, 0x40fe1ab2, + 0x60009001, 0xea7e0cb2, 0x0a4b0044, 0x7e1ab203, 0xbf003b63, 0x0abeb30b, 0x1ab21503, 0xf77ec4bd, + 0xa2b20048, 0x00556a3e, 0x2ab224bd, 0x0b5435fb, 0xf4d4bd02, 0x22f9fc30, 0x40fea1b2, 0x0c009001, + 0xea7e0cb2, 0x1ab20044, 0x507e02bf, 0x14890000, 0x00bf0006, 0xa6b891bc, 0x0d1bf40b, 0x0bb21ab2, + 0xce3ec4bd, 0x2dc40055, 0xffbec4ff, 0x1bf4dea6, 0xe82fc70e, 0xa6e8b9c7, 0x110cf4f9, 0xa601e990, + 0x090bf4d9, 0xd23ea4bd, 0x1ab20055, 0xf77e010c, 0x25fb0048, 0xf4100b04, 0x22f9e830, 0x41fea2b2, + 0x0140fe01, 0x901c1190, 0x1cb22000, 0xea7e0db2, 0x0fbf0044, 0xc4891ebf, 0x030a0006, 0xfffffde4, + 0xffffece4, 0xb610f5b6, 0x92bc10e5, 0x0149feb8, 0xb50c9990, 0x9ca0029d, 0xb5039fb5, 0x9db2019e, + 0xe08cf4bd, 0x2e94003b, 0x00b4b302, 0x3ef9b20a, 0xbc005636, 0xd1bf92af, 0x9090e9bc, 0xdd9001ff, + 0x99c1bc04, 0xe504f4b3, 0x507e2ab2, 0xe0890000, 0x2ab2003b, 0xbc042b94, 0x197eb0b9, 0x25fb0054, + 0xdc02f918, 0x0080210c, 0xcccea0b2, 0x04408900, 0xa99cbc00, 0x802108d9, 0x0099ce00, 0x0004508f, + 0xf9bc9db2, 0x7e200ba9, 0xd9000494, 0x00802104, 0x8f0099ce, 0xbc000480, 0x00d909f9, 0xce008021, + 0x908f0099, 0xf9bc0004, 0x71148f09, 0x0f099401, 0xdf909fbc, 0x00800000, 0xce059ffd, 0xa08f0099, + 0xb4bd0004, 0xb209f9bc, 0x050b7e0a, 0x7e0ab200, 0x8900057d, 0xb60004b0, 0x9abc1aa5, 0xbd01fb09, + 0x76388f94, 0xde22f901, 0x00800000, 0x000a7482, 0x29bca0b2, 0x0fa994a9, 0xff909fbc, 0xffcef59e, + 0x50999000, 0x8e059efd, 0xbc000410, 0x99cea9ef, 0x04308e00, 0xa9e9bc00, 0x0005a489, 0xf002fdc7, + 0x9fbc01f4, 0x05b489a9, 0xa99dbc00, 0x0034ef7e, 0xa4b3a1b2, 0x0ab21000, 0x00351d7e, 0xa0b3a1b2, + 0x0ab21200, 0x1cb2180b, 0x0002617e, 0x00576f3e, 0x517e0ab2, 0x0ab20035, 0x0035747e, 0x977e0ab2, + 0x20bc0035, 0x0b0ab2d8, 0x7ec4bd26, 0xb2000494, 0x8921fb1a, 0xf9017604, 0x89fc8f72, 0x0fa594fe, + 0x59bca4b2, 0x0000d950, 0x59ff0080, 0x505fbce5, 0x8d00e9ce, 0xc4000ba4, 0x1bf4019f, 0xa9df3c10, + 0xfa0195f0, 0xaf3e01e9, 0x01090057, 0x89a9d93c, 0x8f017600, 0xbcfe8a00, 0x00d95059, 0xff008000, + 0x5fbc9559, 0x009fce50, 0xce080449, 0x90330099, 0xf4bd0a03, 0x0057db3e, 0x890fffc7, 0xbc017650, + 0x74895059, 0x9f3c0038, 0x0000d949, 0xb08f0080, 0x59fffe89, 0x505fbce5, 0x8d00e9ce, 0xc4000ba8, + 0x1bf4019f, 0x49df3c10, 0xfa0195f0, 0x173e01e9, 0x01090058, 0xb249d93c, 0x18e57e4a, 0x01a43300, + 0x064ab219, 0x18be7e16, 0x00a93300, 0x4ab202a9, 0xb57eb4bd, 0x0cd90018, 0xce008020, 0xdc8f0099, + 0x9e95000b, 0x1a9dc70c, 0xc749febc, 0xfc89769f, 0x9fbc000b, 0x0bec8949, 0x499dbc00, 0x000c0c8e, + 0x1600d4b3, 0x8e49ed3c, 0xb3000c10, 0x3c1400f0, 0x843e49ed, 0x943d0058, 0x000c108f, 0x0949f93c, + 0x49e93c01, 0x010b4ab2, 0x0032ac7e, 0xadb3a6b2, 0x89024400, 0xbc017380, 0x00d95059, 0xff008000, + 0x8089f559, 0x59bcfe8c, 0xce94bd50, 0x999000fe, 0x3294b301, 0xbd4ab2fa, 0x32ac7eb4, 0xb3a6b200, + 0x021300ad, 0x0174a88f, 0xdf505fbc, 0x00800000, 0xcef55fff, 0x95f100f9, 0xf9fa0100, 0x89f48f01, + 0x6455b8fe, 0x00d90001, 0xff008000, 0x5fbc9559, 0x0099ce50, 0x0003ec8b, 0x008fbdbf, 0x9cc70004, + 0x49f9bce8, 0x8f039ec4, 0xc70004c0, 0xfebc2499, 0x04d08f49, 0x49f9bc00, 0x0004e089, 0xd9499cbc, + 0xdeadbeef, 0x1bf4d9a6, 0x89bca005, 0xbf0003ec, 0xbeefd99f, 0xf9a6dead, 0x89190bf4, 0xbc0004e0, + 0x9fa69894, 0xb20d0bf4, 0x0c180b4a, 0x59eb3e0f, 0x76188900, 0x0000d701, 0x59bc0080, 0xf557ff50, + 0x8900ffce, 0x900003f0, 0x9fbc6855, 0xe557ff49, 0xfe898089, 0xce5059bc, 0x208900ee, 0x9ebc0004, + 0x04f08949, 0x01fdc400, 0x89499dbc, 0xc7000500, 0x9cbc21fc, 0x05108949, 0x26fbc700, 0x89499bbc, + 0xc7000520, 0x9a3c03fa, 0x05248949, 0x48f0c700, 0x894990bc, 0xc7000534, 0x91bcebf1, 0x05448949, + 0x93f2c700, 0x894992bc, 0x95000554, 0xffc71cf3, 0x499fbc78, 0x00056489, 0x93bc4ab2, 0x32f67e49, + 0xb3acb200, 0xb21200a0, 0x7e180b4a, 0x06000261, 0x5ad23e1f, 0x7e4ab200, 0xb200334c, 0x00a4b3ac, + 0x7e4ab2ea, 0xb20033c9, 0x00a4b3ac, 0x7e4ab2de, 0xb2003416, 0x00a4b3ac, 0x7e4ab2d2, 0xb2003885, + 0x38be7e4a, 0x7e4ab200, 0xb2003923, 0x39597e4a, 0x7e4ab200, 0xb200343f, 0x00a4b3ac, 0x7e4ab2ae, + 0xb2003467, 0x00a4b3ac, 0x0a9489a2, 0xc894bc00, 0x000a8489, 0x0bd894bc, 0x7e4ab223, 0xb2000494, + 0x348e7e4a, 0xb3acb200, 0xb28000a4, 0x34b67e4a, 0xb3acb200, 0xff7400ad, 0x000aa489, 0xbdd894bc, + 0xb2240bc4, 0x04947e4a, 0x7e4ab200, 0xb20056df, 0x00adb3ac, 0x248fff56, 0x5fbc0176, 0x9557ff50, + 0x8f0099ce, 0xbc000bcc, 0x94f049f9, 0x1f0bf401, 0x0003c089, 0x3308943c, 0xb2150104, 0x18e57e4a, + 0x00a43300, 0x03e8890b, 0x49903c00, 0x617e4ab2, 0x6ab20056, 0xdb8f71fb, 0x440902e7, 0xf8009ffa, + 0x02b5b600, 0x0c02a0b3, 0x1803a0b3, 0x00f8a4bd, 0x09f0bbbc, 0xf0fbbc40, 0xc4079fbb, 0x00f8079a, + 0x89f0bbbc, 0xbc492492, 0x9fbbf0fb, 0x079ac407, 0xa99400f8, 0xb6e4bd07, 0xd4bd04a4, 0x3ea0a9bc, + 0xa0005b36, 0x01ff909d, 0xb3049990, 0x90f809f4, 0xe0b301ee, 0xef941d04, 0x02e99405, 0x8f909fbc, + 0xbc000c14, 0x9fbc909a, 0x3ef4bd90, 0xf8005b23, 0x9452f900, 0xa99404af, 0xbda3b207, 0x40f9bc24, + 0x001ffc85, 0x005b893e, 0x3ab20b7f, 0x00902cb2, 0x00b3f006, 0x0012f97e, 0x11901aa0, 0xf405a604, + 0x2290e91b, 0x0420b301, 0x0229941f, 0xbc052f94, 0xc680909f, 0x94bc001f, 0x0c148910, 0x1019bc00, + 0x005b683e, 0x62f951fb, 0x9404af94, 0xa4b207a9, 0xf9bc14bd, 0x1ffe8650, 0x5bfd3e00, 0xb2207f00, + 0xe44ab21c, 0x7effff0b, 0x580012f9, 0x3cbf0129, 0xe4062290, 0xf0ffff0b, 0x9cff0093, 0x0099b9c4, + 0x9004a9fd, 0xacff0433, 0xb21db2c5, 0x13357e4a, 0xf426a600, 0x1190ca1b, 0x0410b301, 0x0219941f, + 0xbc051f94, 0xc882909f, 0x95bc001f, 0x0c148930, 0x3039bc00, 0x005bbd3e, 0x82f961fb, 0xc464bfc7, + 0xb9c70fb2, 0xc7a1b232, 0xf79474b8, 0x0496940c, 0x9404f594, 0x04bd0824, 0xb248b3c7, 0x02294b0c, + 0xf97e1ab2, 0xf8090012, 0x09c4a9ff, 0x4b0db2cf, 0x1ab20229, 0xfdc53cff, 0xc6fd04c9, 0x13357e05, + 0x4b0cb200, 0x1ab2022a, 0x0012f97e, 0xa9fff009, 0xff0f49c4, 0xfdc52cff, 0xff4904c9, 0xc55cfff0, + 0x8904c9fd, 0xb2ff0fff, 0x022a4b0d, 0xc4fd1ab2, 0x04c9fd05, 0x7e05c7fd, 0xb2001335, 0x022b4b0c, + 0xf97e1ab2, 0xf0090012, 0xa9ff0db2, 0x010090c4, 0xb2c58cff, 0x022b4b1a, 0x0013357e, 0x7f040db3, + 0xf981fbff, 0xb3a2b242, 0xb31304b0, 0x0a5d07b0, 0x00bdb323, 0x013e020e, 0x5680005d, 0x1e81001d, + 0x0b7f001e, 0xb2010c58, 0x0400902a, 0xf000b3f0, 0x040d00c3, 0x0013357e, 0x1bf401a6, 0x5d4b3ee8, + 0x1c5a8000, 0x1d568100, 0x580b7f00, 0x2ab2010c, 0xf0040090, 0xc3f000b3, 0x7e040d00, 0xa6001335, + 0xe81bf401, 0x005d4b3e, 0x001e1e80, 0x001eea81, 0x0c580b7f, 0x902ab201, 0xb3f00400, 0x00c3f000, + 0x357e040d, 0x01a60013, 0x89e81bf4, 0x3c002784, 0x94339892, 0x04bd2901, 0x0cb2fe01, 0x2ab2130b, + 0x0012f97e, 0xa1ff0db2, 0x010090c4, 0xb201c5f0, 0x7e130b2a, 0xb3001335, 0xb2e30404, 0x3b1b7e2a, + 0x01a43300, 0xbd240b62, 0x7e2ab2c4, 0x000012f9, 0xffd4bdcf, 0x240bc4a0, 0x357e2ab2, 0x240b0013, + 0x2ab2010c, 0x0012f97e, 0xa0ff010d, 0xb2240bc4, 0x13357e2a, 0x0c240b00, 0x7e2ab202, 0x0d0012f9, + 0xc4a0ff02, 0x2ab2240b, 0x0013357e, 0x030c240b, 0xf97e2ab2, 0x240b0012, 0x0dc4a0ff, 0x7e2ab203, + 0xb2001335, 0x3ae27e2a, 0x03a4b300, 0x4b2ab212, 0xcb4c00b9, 0x7e040d00, 0x89001335, 0x3c000300, + 0x94339892, 0xb4837701, 0x347f001f, 0x1cb214bd, 0xffff4be4, 0xf97e2ab2, 0x3c580012, 0x013f5802, + 0x4be41db2, 0xc9e4ffff, 0x99b9ffff, 0x04a9fd00, 0xf004cffd, 0xacff00c3, 0x7e2ab2c5, 0x58001335, + 0x1cb20330, 0x0be42ab2, 0xf97effff, 0x39580012, 0x043c5805, 0x0be41db2, 0x1190ffff, 0xc49cff01, + 0xb90093f0, 0xa9fd0099, 0x00c3f004, 0xb2c5acff, 0x13357e2a, 0x0414b300, 0x03048999, 0x98923c00, + 0x2b019433, 0xce0104bd, 0x644b0cb2, 0x7e2ab202, 0xb20012f9, 0xc4a1ff0d, 0xf0010090, 0x2ab201c5, + 0x7e02644b, 0xb3001335, 0x89e10404, 0x3c000308, 0x94339892, 0x04bd2b01, 0x0cb2ce01, 0xb202644b, + 0x12f97e2a, 0xff0db200, 0x0090c4a1, 0x11c5f001, 0x644b2ab2, 0x13357e02, 0x0404b300, 0xfba4bde1, + 0xb232f941, 0x0ab3b2a2, 0x01d9b331, 0xc99400da, 0x90e9bc08, 0x040196b1, 0xb1140cf4, 0xf4040096, + 0x96b05e18, 0x0c0cf401, 0x005f3a3e, 0x07039ab3, 0x3e230a0b, 0x80005fc3, 0x81001ba0, 0x7f001bf4, + 0x010b580a, 0xf0040090, 0xb3f000a3, 0x13677e00, 0xf401a600, 0x7c3eec1b, 0xa880005f, 0xfc81001a, + 0x0a7f001a, 0x90010b58, 0xa3f00400, 0x00b3f000, 0x0013677e, 0x1bf401a6, 0x5f7c3eec, 0x1afc8000, + 0x1b508100, 0x580a7f00, 0x0090010b, 0x00a3f004, 0x7e00b3f0, 0xa6001367, 0xec1bf401, 0x4a05b0b4, + 0xb4f000f7, 0x10b5f007, 0x0013677e, 0x3bb22ab2, 0x005ae17e, 0x1b01a0b3, 0x2902a4b3, 0x867e090a, + 0x3f490013, 0xb4a9ffff, 0x3e80b5f0, 0x0a005fbb, 0x13867e09, 0xff3f4900, 0x0ab4a9ff, 0x13677e09, + 0xfba4bd00, 0xf0bca631, 0xbabc0b9c, 0x1f94b6b0, 0xf8a59bff, 0xb0babc00, 0xcba6cab2, 0xb2050df4, + 0xf900f8ba, 0x008c8952, 0x0000d001, 0xa5940080, 0xbca4b20f, 0x50ff5059, 0x00bbceb5, 0x000e5481, + 0xf1a81abc, 0x4c03ffb4, 0x55b803ff, 0x7e001ff0, 0xff005fc5, 0x1abcb550, 0x00bbce49, 0x000e6481, + 0xdca814bc, 0x7fffffff, 0xb804bcfd, 0x00020c55, 0x005fc57e, 0xbcb550ff, 0xbbce491a, 0x0e748100, + 0xa814bc00, 0x8c10b5b6, 0xb800ffff, 0x00100055, 0x005fc57e, 0xbcb550ff, 0xbbce491a, 0x0e848100, + 0xa814bc00, 0x8c00b3f0, 0x9000ffff, 0xc57e0455, 0x50ff005f, 0xcd748905, 0x491abcfe, 0xce5059bc, + 0x9489000d, 0x94bc000e, 0xe8d9c7f8, 0xb1e09fbc, 0xf400fee6, 0xff4e060d, 0x0ea48900, 0xf894bc00, + 0x000e9489, 0xc7499ebc, 0x9fbcf0d9, 0xfee6b1e0, 0x060df400, 0x8900ff4e, 0xbc000eb4, 0xa489f894, + 0x9ebc000e, 0x18d99549, 0xb1e09fbc, 0xf400fee6, 0xff4e060d, 0x0ec48900, 0xf894bc00, 0x000eb489, + 0xc4499ebc, 0x9fbcffd9, 0xfef6b1f0, 0x060df400, 0x8900ff4f, 0xd3013294, 0x00800000, 0x895059bc, + 0xff000ec4, 0x9fbcb553, 0x00bbce49, 0x000ed480, 0xf0a804bc, 0xff8c00b3, 0x55b800ff, 0x7e000104, + 0xff005fc5, 0x0abcb553, 0x00bbce49, 0x000ee481, 0xd0a814bc, 0x00ffffff, 0xb204b0fd, 0x5fc57e0c, + 0x04559000, 0xff491abc, 0xbbceb553, 0x0ef48100, 0xa814bc00, 0xb204b0fd, 0x0455900c, 0x005fc57e, + 0xbcb553ff, 0xbbce491a, 0x0f048100, 0xa814bc00, 0xb204b0fd, 0x0455900c, 0x005fc57e, 0xbcb553ff, + 0xbbce491a, 0x0f148100, 0xa814bc00, 0xb204b0fd, 0x9055b80c, 0xc57e0203, 0x53ff005f, 0x491abcb5, + 0x8000bbce, 0xbc000f24, 0xbbc7a804, 0x7c55b8e8, 0xff4c022e, 0x5fc57e00, 0x490abc00, 0xce0553ff, + 0x34810000, 0x14bc000f, 0x100b95a8, 0x00ffff8c, 0x000f4482, 0x005fc57e, 0xbc491abc, 0x0be4a824, + 0xff8cffff, 0x559000ff, 0x5fc57e04, 0xb553ff00, 0xce492abc, 0x548000bb, 0x04bc000f, 0x00b3f0a8, + 0x8c045590, 0x7e00ffff, 0xbc005fc5, 0x53ff490a, 0x0000ce05, 0x000f6481, 0x95a814bc, 0xff4c180b, + 0x0f748200, 0x5fc57e00, 0x491abc00, 0xc4a824bc, 0xff4cff0b, 0x0f848100, 0x5fc57e00, 0x492abc00, + 0xc7a814bc, 0xff4ce80b, 0x0f948200, 0x5fc57e00, 0x491abc00, 0xc7a824bc, 0x5590f00b, 0x00ff4c04, + 0x7e0553ff, 0xbc005fc5, 0x00ce492a, 0x0fa48100, 0xa814bc00, 0x8c100b95, 0x8200ffff, 0x7e000fb4, + 0xbc005fc5, 0x24bc491a, 0xff0be4a8, 0xffff8cff, 0x04509000, 0x005fc57e, 0xbc0503fd, 0x00ce492a, + 0x0fc48100, 0xa814bc00, 0x4ce80bc7, 0xd48200ff, 0xc57e000f, 0x1abc005f, 0xa824bc49, 0x4cff0bc4, + 0xe48300ff, 0xc57e000f, 0x2abc005f, 0xa834bc49, 0x4cf00bc7, 0xf48100ff, 0xc57e000f, 0x3abc005f, + 0xa814bc49, 0x4c180b95, 0xc57e00ff, 0x1abc005f, 0x8f51fb49, 0xdb010050, 0x00800000, 0xbc0fa994, + 0x9bff909f, 0x00ffcef5, 0x0010048c, 0xb8d8cabc, 0x001fd899, 0xfde59bff, 0xcfbc05fd, 0x00eecea9, + 0x0010148d, 0xb8f8dabc, 0x00100099, 0xfd059bfd, 0xdebc05ef, 0x0099cea9, 0x0010248e, 0xfdf8eabc, + 0xe9bc059f, 0x8f00f8a9, 0x94013068, 0x9fbc0fa9, 0x0000df90, 0x9ffd0080, 0x009ece05, 0x0010348d, + 0xc4f8dabc, 0x9fa63fe9, 0xb20518f4, 0x10448cf9, 0xf8cabc00, 0xc7a9d9bc, 0x9fa6a8e9, 0xb20518f4, + 0x10548bf9, 0xf8babc00, 0xc7a9c9bc, 0x9fa6b0e9, 0xb20518f4, 0x10648df9, 0xf8dabc00, 0xc7a9b9bc, + 0x9fa6b8e9, 0xb20518f4, 0xa9d9bcf9, 0x408f00f8, 0x00d90130, 0x94008000, 0xefbc0fae, 0xf5e9ffe0, + 0x9200fcce, 0xe9ff18ee, 0xcfd889b5, 0xe0e9bcfe, 0xc400bdce, 0x0bf402d9, 0x68cfc70a, 0x0063e73e, + 0x00107489, 0x89f89abc, 0xbc001074, 0xd9c4a99f, 0x0a0bf420, 0x3e78cfc7, 0x89006402, 0xbc001084, + 0x8489f89a, 0x9fbc0010, 0x10d9c4a9, 0xc70a0bf4, 0x1d3e70cf, 0x94890064, 0x9abc0010, 0x304489f8, + 0xe0e9bc01, 0x00109489, 0xd9a99fbc, 0x00800000, 0xce95e9ff, 0xbdce009f, 0x80d9c400, 0x0b009033, + 0x3e70fec7, 0x8900644e, 0xbc0010a4, 0xa489e89a, 0x9ebc0010, 0x40d9c4a9, 0xc70a0bf4, 0x693e78ff, + 0xb4890064, 0x9abc0010, 0x10b489f8, 0xa99fbc00, 0x12f900f8, 0xb2015c4b, 0x76ce7ea0, 0x11048900, + 0x0f0d9400, 0x8fa0a9bc, 0x89001674, 0xd1010000, 0x00800000, 0x8be0d9bc, 0xbf0017d0, 0x0000dcf9, + 0xe9bc0080, 0x0591fd90, 0xa00099ce, 0x04ff90a9, 0xa604aa90, 0xe61bf4fb, 0x01300089, 0xffd0d9bc, + 0x99ce95dc, 0x10c48f00, 0x30ddb800, 0xf9bc0002, 0x95dcff09, 0x8f0099ce, 0x900010d4, 0xf9bc50dd, + 0xe5dcff09, 0x8f00eece, 0xb80010e4, 0x0230f0d9, 0xfd09febc, 0x99ce059c, 0x10f48f00, 0x09f9bc00, + 0x12f911fb, 0xb2015c4b, 0x76ce7ea0, 0x11048900, 0x0f0c9400, 0x8ea0a9bc, 0x89001674, 0xd1010000, + 0x00800000, 0x8bd0c9bc, 0xbf0017ac, 0xbcafbfe9, 0x91fd90d9, 0x019ffa05, 0x9004ee90, 0xeba604aa, + 0x89eb1bf4, 0xdd013000, 0x00800000, 0xffc0c9bc, 0xdeced5cd, 0x10c48900, 0xf890bc00, 0x000000d9, + 0x04e9fd42, 0xffffffd9, 0x04f9fdbd, 0xfa05effd, 0x00dd01de, 0xb8008000, 0x000230cc, 0xced5cdff, + 0xd48900de, 0x90bc0010, 0xffff89f8, 0x04e9fde4, 0x1b000089, 0xfd04f9fd, 0xdefa05ef, 0x0000dd01, + 0xcc900080, 0xd5cdff50, 0x8900dece, 0xbc0010e4, 0xf00f9890, 0xfd0fe4f0, 0xe9fd049f, 0x01defa05, + 0x800000d9, 0xf0cdb800, 0xd9fd0230, 0x00dece05, 0x0010f489, 0xd9f890bc, 0x0fffffff, 0xd904e9fd, + 0xf0000000, 0xfd04f9fd, 0xdefa05ef, 0x8911fb01, 0xf9010000, 0x015c4b02, 0xbc0fa094, 0xce7e0009, + 0x3c890076, 0xac8e0012, 0xa9bc0017, 0x0000dca0, 0xd08d0080, 0xe9bf0017, 0x09bcafbf, 0x059cfd90, + 0x90019ffa, 0xaa9004ee, 0xf4eda604, 0x01fbeb1b, 0x000e548e, 0xe9bc94bd, 0x0e648ea9, 0xa9e9bc00, + 0x000e748e, 0x8ea9e9bc, 0xbc000e84, 0x948ea9e9, 0xe9bc000e, 0x0ea48ea9, 0xa9e9bc00, 0x000eb48e, + 0x8ea9e9bc, 0xbc000ec4, 0xd48ea9e9, 0xe9bc000e, 0x0ee48ea9, 0xa9e9bc00, 0x000ef48e, 0x8ea9e9bc, + 0xbc000f04, 0x148ea9e9, 0xe9bc000f, 0x0f248ea9, 0xa9e9bc00, 0x000f348e, 0x8ea9e9bc, 0xbc000f44, + 0x548ea9e9, 0xe9bc000f, 0x0f648ea9, 0xa9e9bc00, 0x000f748e, 0x8ea9e9bc, 0xbc000f84, 0x948ea9e9, + 0xe9bc000f, 0x0fa48ea9, 0xa9e9bc00, 0x000fb48e, 0x8ea9e9bc, 0xbc000fc4, 0xd48ea9e9, 0xe9bc000f, + 0x0fe48ea9, 0xa9e9bc00, 0x000ff48e, 0x8ea9e9bc, 0xbc001004, 0x148ea9e9, 0xe9bc0010, 0x10248ea9, + 0xa9e9bc00, 0x0010348e, 0x8ea9e9bc, 0xbc001044, 0x548ea9e9, 0xe9bc0010, 0x10648ea9, 0xa9e9bc00, + 0x0010748e, 0x8ea9e9bc, 0xbc001084, 0x948ea9e9, 0xe9bc0010, 0x10a48ea9, 0xa9e9bc00, 0x0010b48e, + 0x8ea9e9bc, 0xbc0010c4, 0xd48ea9e9, 0xe9bc0010, 0x10e48ea9, 0xa9e9bc00, 0x0010f48e, 0xbc015c4b, + 0xce7ea9e9, 0x04890076, 0xf4bd0011, 0xbda0a9bc, 0x90afa094, 0xaa900199, 0x5794b304, 0xb600f8f8, + 0x348c0fa4, 0xb88b0021, 0xcfbf0021, 0xcef0afbc, 0xce9800f9, 0x01cd9802, 0xfd049efd, 0xf9fa059d, + 0x0ccc9001, 0x1bf4cba6, 0x8f00f8e4, 0xbf0021d0, 0x00d9cefd, 0x9801fe98, 0x9ffd02ff, 0x059efd04, + 0x8f01d9fa, 0x980021d0, 0xd9ce03fd, 0x04fe9800, 0xfd05ff98, 0x9efd049f, 0x01d9fa05, 0x0021d08f, + 0xce06fd98, 0xfe9800d9, 0x08ff9807, 0xfd049ffd, 0xd9fa059e, 0x21f48c01, 0x0fa4b600, 0x0023688b, + 0xafbccfbf, 0x00f9cef0, 0x9802ce98, 0x9efd01cd, 0x059dfd04, 0x9001f9fa, 0xcba60ccc, 0xf8e41bf4, + 0x3814de00, 0xe9ce0080, 0xffffdf00, 0x9ffd00ff, 0x0000df04, 0x9ffd0600, 0x01e9fa05, 0xa4b600f8, + 0x23f88c0f, 0x24288b00, 0xbccfbf00, 0xf9cef0af, 0x02ce9800, 0xfd01cd98, 0x9dfd049e, 0x01f9fa05, + 0xa60ccc90, 0xe41bf4cb, 0x88d900f8, 0xce008020, 0x004e009f, 0x04fefdff, 0xfa59f5f0, 0x00f8019f, + 0x816128d9, 0x0fa4b600, 0xcea0a9bc, 0x004f00a9, 0x049ffdf0, 0xfa6495f0, 0x00f801a9, 0x8c0fa4b6, + 0x8b002434, 0xbf002464, 0xf0afbccf, 0x9800f9ce, 0xcd9802ce, 0x049efd01, 0xfa059dfd, 0xcc9001f9, + 0xf4cba60c, 0x00f8e41b, 0x0024648f, 0xa4b6febf, 0xe0aebc0f, 0x9800e9ce, 0xff9801fd, 0x049ffd02, + 0xfa059dfd, 0x648f01e9, 0xf9980024, 0xa0a9bc03, 0x9800a9ce, 0xff9804fe, 0x049ffd05, 0xfa059efd, + 0x00f801a9, 0x8c0fa4b6, 0x8b002368, 0xbf0023e0, 0xf0afbccf, 0x9800f9ce, 0xcd9802ce, 0x049efd01, + 0xfa059dfd, 0xcc9001f9, 0xf4cba60c, 0x00f8e41b, 0x812828d9, 0x0fa4b600, 0xcea0a9bc, 0xff8f00a9, + 0x9ffdff00, 0x0095f104, 0x01a9fa68, 0x048f00f8, 0xfebf0021, 0xbc0fa4b6, 0xe9cee0ae, 0x01fd9800, + 0xfd02ff98, 0x9dfd049f, 0x01e9fa05, 0x0021048f, 0xbc03f998, 0xa9cea0a9, 0x04fe9800, 0xfd05ff98, + 0x9efd049f, 0x01a9fa05, 0x28d900f8, 0xb6008161, 0xa9bc0fa4, 0x00a9cea0, 0xfdf0004f, 0x95f0049f, + 0x01a9fa6c, 0xb88f00f8, 0xfebf0021, 0xbc0fa4b6, 0xe9cee0ae, 0x01fd9800, 0xfd02ff98, 0x9dfd049f, + 0x01e9fa05, 0x0021b88f, 0xbc03f998, 0xa9cea0a9, 0x04fe9800, 0xfd05ff98, 0x9efd049f, 0x01a9fa05, + 0x8cd900f8, 0xb600816a, 0xa9bc0fa4, 0x00a9cea0, 0xf8ffff8f, 0x8f049ffd, 0xfd070000, 0xa9fa059f, + 0xb600f801, 0xfc8c0fa4, 0x048b001f, 0xcfbf0021, 0xcef0afbc, 0xce9800f9, 0x01cd9802, 0xfd049efd, + 0xf9fa059d, 0x0ccc9001, 0x1bf4cba6, 0x8f00f8e4, 0xbf0017d0, 0x019990f9, 0x00f8f9a0, 0xa0f990f9, + 0xc0f9b0f9, 0xe0f9d0f9, 0x9ffef0f9, 0xfef0f901, 0xe0f9018e, 0x006cfb7e, 0xe8fee0fc, 0xfef0fc00, + 0xf0fc00f9, 0xd0fce0fc, 0xb0fcc0fc, 0x90fca0fc, 0x00f801f8, 0xa0f990f9, 0xc0f9b0f9, 0xe0f9d0f9, + 0x9ffef0f9, 0xfef0f901, 0xe0f9018e, 0x006ddf7e, 0xe8fee0fc, 0xfef0fc00, 0xf0fc00f9, 0xd0fce0fc, + 0xb0fcc0fc, 0x90fca0fc, 0x00f801f8, 0xa0f990f9, 0xc0f9b0f9, 0xe0f9d0f9, 0x9ffef0f9, 0xfef0f901, + 0xe0f9018e, 0x00708c7e, 0xe8fee0fc, 0xfef0fc00, 0xf0fc00f9, 0xd0fce0fc, 0xb0fcc0fc, 0x90fca0fc, + 0x00f801f8, 0xa0f990f9, 0xc0f9b0f9, 0xe0f9d0f9, 0x9ffef0f9, 0xfef0f901, 0xe0f9018e, 0x006d8e7e, + 0xe8fee0fc, 0xfef0fc00, 0xf0fc00f9, 0xd0fce0fc, 0xb0fcc0fc, 0x90fca0fc, 0x00f801f8, 0xa0f990f9, + 0xc0f9b0f9, 0xe0f9d0f9, 0x9ffef0f9, 0xfef0f901, 0xe0f9018e, 0x006cad7e, 0xe8fee0fc, 0xfef0fc00, + 0xf0fc00f9, 0xd0fce0fc, 0xb0fcc0fc, 0x90fca0fc, 0x00f801f8, 0xa0f990f9, 0xc0f9b0f9, 0xe0f9d0f9, + 0x9ffef0f9, 0xfef0f901, 0xe0f9018e, 0x006c5f7e, 0xe8fee0fc, 0xfef0fc00, 0xf0fc00f9, 0xd0fce0fc, + 0xb0fcc0fc, 0x90fca0fc, 0x00f801f8, 0xa0f990f9, 0xc0f9b0f9, 0xe0f9d0f9, 0x9ffef0f9, 0xfef0f901, + 0xe0f9018e, 0x006a2f7e, 0xe8fee0fc, 0xfef0fc00, 0xf0fc00f9, 0xd0fce0fc, 0xb0fcc0fc, 0x90fca0fc, + 0x00f801f8, 0x0017d489, 0x9433993f, 0x0a0a0c01, 0x107eb4bd, 0x46890000, 0x93fe0000, 0x492e0f00, + 0x9ffa0080, 0x5ad67e00, 0x08004900, 0x8f0099cf, 0xfd010000, 0x0bf4049f, 0x02af7e0e, 0x00468900, + 0x0093fe00, 0x00021b7e, 0x0020fd7e, 0x006a748a, 0x0000e17e, 0x006aac8a, 0x0000f97e, 0x006ae48a, + 0x0001117e, 0x006a3c8a, 0x0001297e, 0x006b1c8a, 0x0001417e, 0x006b548a, 0x0001597e, 0x006b8c8a, + 0x0001717e, 0x0001897e, 0x0004627e, 0x00022c7e, 0xd489010f, 0x9f200017, 0x3e0028f4, 0xf9006c58, + 0x08404912, 0xc70099cf, 0x1ab27491, 0x0002407e, 0x1000a433, 0x010b090a, 0x0000107e, 0x006cab3e, + 0x02a010b8, 0x0204b600, 0xe4000bce, 0xb2ffffbc, 0xf0bbc71a, 0x000ac87e, 0x02b019b8, 0x0294b600, + 0xd9009afa, 0x80000000, 0xfb0109fa, 0x4912f911, 0x99cf0840, 0x7091c700, 0x407e1ab2, 0xa4330002, + 0x080a1000, 0x107e010b, 0xf93e0000, 0x10b8006c, 0xb6000260, 0x0bce0204, 0xffbce400, 0xc71ab2ff, + 0xc87ef0bb, 0x19b8000a, 0xb6000270, 0x9afa0294, 0x0000d900, 0x09fa8000, 0xf911fb01, 0x08404902, + 0xc70099cf, 0x0ab26c90, 0x0002407e, 0x1000a433, 0x010b070a, 0x0000107e, 0x006d8c3e, 0x017e0ab2, + 0xa0b30002, 0xa6b03909, 0x0f0cf409, 0x4000a0b3, 0x1108a4b3, 0x006d473e, 0x190ea0b3, 0x0080aab3, + 0x3e010a30, 0xb2006d65, 0x18ee7e0a, 0x6d6c3e00, 0x7e0ab200, 0x3e00192b, 0xb2006d6c, 0x1ef87e0a, + 0x00a0b300, 0x80a9c50b, 0x006d6e3e, 0xa48e0109, 0x0f940174, 0xff94f00f, 0xdef0febc, 0x00800000, + 0xfa05fefd, 0x0ab201f9, 0x0000b47e, 0x12f901fb, 0xcf084049, 0x90c70099, 0x7e0ab268, 0x33000240, + 0x0a1000a4, 0x7e010b05, 0x3e000010, 0x8f006ddd, 0x94010060, 0x9fbc0f09, 0x0000df90, 0x9ffd0080, + 0x0091ce05, 0x000000d9, 0x9419ff11, 0xb2090bf4, 0x24ae7e0a, 0xb20ab200, 0x00807e1b, 0xf911fb00, + 0x08404922, 0xc40099cf, 0x1ab20f91, 0x0002407e, 0x1200a433, 0x010b040a, 0x107e24bd, 0x613e0000, + 0x19b80070, 0xb6000240, 0x92cf0294, 0xff20c400, 0xfd0209b3, 0x7e1ab200, 0x0b00038e, 0x00a9330b, + 0x09b30238, 0xb0017111, 0x0cf41106, 0x0609b367, 0x06b0010c, 0x2c0cf406, 0xd50209b3, 0x0206b000, + 0xb3110cf4, 0x021e0009, 0x3e010db3, 0x6f033e02, 0x0409b300, 0x06b000d4, 0xd60cf504, 0x6f1b3e00, + 0x0d09b300, 0x06b00100, 0x180cf40d, 0xe10809b3, 0x0806b000, 0x00cf08f5, 0x0e0c0db3, 0x6f573e02, + 0x0f09b300, 0x06b000f4, 0xf60cf50f, 0x6f6b3e00, 0x4109b300, 0x06b10170, 0x0cf40041, 0x1809b334, + 0x06b0012f, 0x160cf418, 0xff1509b3, 0x1709b300, 0x0db30115, 0x3e01d414, 0xb3006f9d, 0x012c2109, + 0x314009b3, 0x200db301, 0xd63e01c1, 0x09b3006f, 0xb1015b60, 0xf4006006, 0x09b3190c, 0xb1013b43, + 0xf5004306, 0xb3012808, 0x019f580d, 0x00701f3e, 0x4d7009b3, 0x7109b301, 0x0db30152, 0x3e018c61, + 0xb2007033, 0x25287e1a, 0x6fdc3e00, 0x0b1ab200, 0x7ec4bd01, 0x3e000261, 0xb2007061, 0x15a57e1a, + 0x6fdc3e00, 0x7e1ab200, 0x3e002883, 0xb2007061, 0x18197e1a, 0x70613e00, 0x7e1ab200, 0x3e002737, + 0xb2007061, 0x26557e1a, 0x6fdc3e00, 0x7e1ab200, 0x3e0024eb, 0xb2007061, 0x21817e1a, 0x6fdc3e00, + 0x7e1ab200, 0x3e005773, 0xb2006fdc, 0x53b47e1a, 0x6fdc3e00, 0x7e1ab200, 0x3e0052d0, 0xb2006fdc, + 0x177f7e1a, 0xb2010b00, 0x7e1ab2ac, 0x3e0021fa, 0xb2006fdc, 0x22967e1a, 0x6fdc3e00, 0x7e1ab200, + 0x3e00145f, 0x0b006fdc, 0x0c1ab201, 0x13a77e01, 0xb3abb200, 0x009e00ad, 0xcb7e1ab2, 0xdc3e0014, + 0x1ab2006f, 0x0018d17e, 0x0070613e, 0xfb7e1ab2, 0xdc3e001a, 0x1ab2006f, 0x00296b7e, 0xa9b3abb2, + 0x3e008300, 0xb2007051, 0x29c77e1a, 0x70613e00, 0x08b04b00, 0xb200bbcf, 0x5c1a7e1a, 0x70613e00, + 0x7e1ab200, 0x3e00188b, 0xb2007061, 0x189f7e1a, 0x70613e00, 0x7e1ab200, 0x3e0018aa, 0xb2007061, + 0x1d8d7e1a, 0x6fdc3e00, 0x7e1ab200, 0x3e0019fc, 0xb2007061, 0x198d7e1a, 0x70613e00, 0x7e1ab200, + 0x3e00084d, 0xb2007061, 0x07c77e1a, 0x70613e00, 0x04b0b300, 0x7e1ab210, 0x0e001795, 0x70633e01, + 0xb8e4bd00, 0x0002401d, 0xffffffdf, 0x1ee4b6bf, 0xd902d4b6, 0x80000000, 0xfd9529ff, 0xe9fd049f, + 0x00defa05, 0x100b21fb, 0x0070553e, 0x404922f9, 0x0099cf08, 0xb26491c7, 0x02407e1a, 0x00a43300, + 0x0b030a10, 0x00107e01, 0x71653e00, 0x7e1ab200, 0xb20001e7, 0x0ca0b3a0, 0x0ca6b062, 0xb3130cf4, + 0xb32002a0, 0xb33803a0, 0x3e5e00a0, 0xb3007155, 0xb36811a0, 0xb36e12a0, 0x3e7e0da4, 0xb200712f, + 0x1bc77e1a, 0xb2a2b200, 0x00507e1a, 0x0020b300, 0xb21ab278, 0x3e2cb20b, 0xb2007161, 0x1cf47e1a, + 0xb2a0b200, 0x00507e1a, 0x0000b300, 0xb21ab25c, 0x3e020b0c, 0xb2007161, 0x7e020b1a, 0x3d00182b, + 0x186c7ea4, 0x7e1ab200, 0x3e000050, 0xb2007165, 0x3e030b1a, 0xb200711b, 0x556f7e1a, 0x71473e00, + 0x7e1ab200, 0xb20055d5, 0x00a0b3ac, 0x0b1ab21c, 0x71613e19, 0x7e1ab200, 0xb2000050, 0x0c020b1a, + 0x02617e15, 0xf421fb00, 0x82f9e030, 0xffffc1e4, 0xb210c295, 0xffa0e4c5, 0x10a395ff, 0x48fea4b2, + 0x0147fe01, 0x903c8890, 0x8bb53477, 0x017db501, 0x0bb27ca0, 0x1ab28aa0, 0x0076ce7e, 0xa6b20bb2, + 0xce7e2ab2, 0x3bb20076, 0x1ab2a0b2, 0x0076ce7e, 0xa1b23bb2, 0xce7e2ab2, 0x10bc0076, 0x10699500, + 0x09bcadb2, 0xf410a600, 0x00890a0d, 0xd9bc0100, 0x017b98d0, 0xffff69e4, 0xfe100f94, 0xf9bc014e, + 0x24ee90f0, 0xa0100995, 0x90d9bcef, 0xbf01e9b5, 0x01ed98ec, 0x41fe4ab2, 0x2c119001, 0xa0011db5, + 0x76ce7e1c, 0x018b9800, 0x5ab2a0b2, 0x0076ce7e, 0xbc011b98, 0x1abf000a, 0xfbb00bbc, 0x30f42085, + 0x00c0b3f0, 0x014ffe33, 0xb508ff90, 0xfaa001fb, 0x9cbb2009, 0x0096b002, 0xbc211df4, 0xb9bcf5ac, + 0x014afe94, 0xbc059ffd, 0xa9a0f5bc, 0x9801afb5, 0xaabf01ab, 0xf81030f4, 0x014afe00, 0xbd019fb9, + 0xf5bfbc94, 0xa001a9b5, 0x01ab98af, 0x543eaabf, 0x30f40072, 0xfe82f9e4, 0x49fe014f, 0x30ff9001, + 0xb5389990, 0xfca001fd, 0xa0019bb5, 0xb2c2b29a, 0x00ddb3a6, 0xcba600bb, 0x00f70df5, 0x00ffff89, + 0x0cf5c9a6, 0xc6b1037a, 0x0df500ff, 0x080903af, 0x29bc180d, 0x24888fe5, 0x98fe3c00, 0xbbff94f0, + 0xd0b302d9, 0xbdbc1800, 0xbb2009f4, 0x9dbb042d, 0x9569bc02, 0xff046dbb, 0x2195b59f, 0xff25e410, + 0x4cb1ffff, 0xb20db1ff, 0x7e5bb24a, 0xb60076ce, 0x69951004, 0x0509fd10, 0x0df4a0a6, 0x01449214, + 0xa60002bc, 0x090cf420, 0x0cf5a0a6, 0x0abb036f, 0xff5bb202, 0x3ab23c01, 0x0076ce7e, 0xe40d01ff, + 0xb6ffff69, 0x09fd1004, 0xf4a0a605, 0x3392160d, 0x0002bc01, 0x0cf420a6, 0xf4a0a60b, 0x3392060d, + 0x10499401, 0x39ffb4bd, 0x1c85fba5, 0x0cf5dba6, 0xff8900ec, 0xd9a600ff, 0x00e90cf5, 0x00ffd6b1, + 0x02f10df5, 0x180c0809, 0x8fe5d9bc, 0x3c002488, 0x94f098fe, 0x72c9bcff, 0xed007db3, 0xf4dba600, + 0x26a60908, 0x00b60cf5, 0xb4bd010a, 0xb31c85fb, 0x090900c4, 0x0092cc01, 0x00ffff89, 0x0cf529a6, + 0x26b101c7, 0x0df500ff, 0x080902bb, 0x29bc180d, 0x24888fe5, 0x98fe3c00, 0xbcff94f0, 0x4db342d9, + 0xbc01cb00, 0x239502b2, 0xff25e410, 0xff0107ff, 0x5bb24c03, 0xce7e4ab2, 0x03ff0076, 0x1069950d, + 0xfd1004b6, 0xa0a60509, 0x92140df4, 0x02bc0144, 0xf420a600, 0xa0a6090c, 0x028b0cf5, 0xb2020abb, + 0x1c03ff5b, 0xce7e1ab2, 0x03ff0076, 0xff69e40d, 0x1004b6ff, 0xa60509fd, 0x160df4a0, 0xbc011192, + 0x20a60002, 0xa60b0cf4, 0x060df4a0, 0x94011192, 0x7bb21049, 0xfba519ff, 0xa4bd1c85, 0x85fbb4bd, + 0xffffd91c, 0xd9a600ff, 0x02010df5, 0x888f1809, 0xd9bc0024, 0x98fe3ce5, 0x94f0080c, 0x72c9bcff, + 0x1d0079b3, 0x9427bcff, 0xbbe4b7bc, 0x91b004d7, 0xbb200909, 0xb9bc0297, 0xf569bc05, 0xff9529bc, + 0x9dff85fe, 0x10439545, 0xffff41e4, 0xb25c03ff, 0x7e5ab21b, 0xff0076ce, 0x89950d03, 0x1004b610, + 0xa60509fd, 0x190df4a0, 0xbc015592, 0x40a60004, 0xa60e0cf4, 0x090df4a0, 0xbc015592, 0x0abb0004, + 0xff1bb202, 0x2ab22c03, 0x0076ce7e, 0xe40d03ff, 0xb6ffff89, 0x09fd1004, 0xf4a0a605, 0x2292190d, + 0x0004bc01, 0x0cf440a6, 0xf4a0a60e, 0x04bc090d, 0x01229200, 0xb40910b4, 0x59940930, 0x820abc10, + 0xf14529ff, 0xe4ffff14, 0xb2ffff40, 0x7e0ab21b, 0xb60076ce, 0xa5b21035, 0x0ab23bb2, 0x0076ce7e, + 0x42951bb2, 0xb2a0b210, 0x76ce7e2a, 0xb23bb200, 0x7e2ab2a1, 0xbc0076ce, 0x59950010, 0x0009bc10, + 0x0df410a6, 0x0000890a, 0xa0a9bc01, 0xbc100995, 0x8aa6a0a9, 0x00ed08f5, 0x0bf58aa6, 0x4ab200d4, + 0x85fbb4bd, 0xffffd91c, 0x29a600ff, 0x00fd0df5, 0x888f1809, 0x29bc0024, 0x98fe3ce5, 0x94f0080d, + 0x42d9bcff, 0x3f0049b3, 0xf4b4bcfe, 0x04bb2000, 0x9560bc02, 0xbc0424bb, 0x239505b0, 0x859fff10, + 0xe47c03ff, 0xb2ffff25, 0x7e5bb27a, 0xff0076ce, 0x89950d03, 0x1004b610, 0xa60590ff, 0x190df4a0, + 0xbc017792, 0x20a60002, 0xa60e0cf4, 0x090df4a0, 0xbc017792, 0x0abb0002, 0xff5bb202, 0x1ab21c03, + 0x0076ce7e, 0xe40d03ff, 0xb6ffff89, 0x09fd1004, 0xf4a0a605, 0x1192190d, 0x0002bc01, 0x0cf420a6, + 0xf4a0a60e, 0x1192090d, 0x0002bc01, 0xbb107994, 0x0abb0464, 0x7519ff02, 0x0073cf3e, 0xffffffd9, + 0xf4c9a600, 0x18094e0d, 0xb23e080d, 0x0f940072, 0xff5ee410, 0x9467bcff, 0xa6f0febc, 0x2118f59f, + 0x014a92ff, 0x85fbb4bd, 0xb210091c, 0x73683e9c, 0x0c94bd00, 0x73683e20, 0x0d94bd00, 0x72b23e20, + 0x0d94bd00, 0x73ae3e20, 0xb2100900, 0x73ae3e9d, 0xb2100900, 0x72b23e9d, 0x01449200, 0x3e0002bc, + 0x9200730e, 0x02bc0144, 0x73fc3e00, 0xb2a9b200, 0x00a6b0af, 0xb22b1ef4, 0x00b6b0ba, 0xff131ef4, + 0xb9ffacfa, 0x051ef496, 0xaab900f8, 0xb900f801, 0xfaff01ba, 0x96b9ffac, 0x3ef01ff4, 0xb90076aa, + 0xbab201af, 0xf400b6b0, 0xaf3ed81f, 0xaf950076, 0x10b99510, 0xff91a9ff, 0xbffda0ba, 0xb0b9bc01, + 0xbc10b4b6, 0x00f8a0ba, 0x1400c0b3, 0xb93c94bd, 0x99af3cf8, 0xa6019990, 0xf51bf49c, 0x02f800f8, + 0x4c2f2637, 0xfc866c05, 0x254eb903, 0x179baf0f, 0xded1bf8c, 0x949f48e4, 0x95104317, 0xcee888a8, + 0x90186199, 0xa8bd1766, 0x6a9d039e, 0x7c88dbe6, 0x3e355a39, 0xe8d1712e, 0xf4b369f0, 0xfdda049f, + 0x7d4abea6, 0xdf8fb3c9, 0xae92df60, 0x8f1cb3bc, 0x6b04d605, 0xd1254c24, 0xf17782fd, 0x1cc60d9a, + 0x7a9adf76, 0xf41bc5aa, 0x254b000b, 0x7cdb66c1, 0xd1a54c9c, 0x57cc4a4d, 0x2dd17de9, 0x69ede398, + 0xa4006238, 0x4903e34b, 0x104ec91f, 0xe575c4cc, 0xd46e7336, 0xbff23478, 0x5c11b750, 0x5eebf002, + 0xe3affee4, 0xb2be33df, 0x52c00b5b, 0x700cb478, 0x12075eea, 0x6891e517, 0x37d9144e, 0x2c6fb2c6, + 0x126a5e5e, 0x1b70c600, 0x5549f978, 0x797e16a8, 0x011289b6, 0xb28d2b60, 0x17a2e0c0, 0x3a784f02, + 0xe8f810f6, 0x6b5f3edc, 0x24e4200e, 0xf10d3f30, 0x51d6579a, 0xd501c1cb, 0x9812304c, 0x80aa9ede, + 0xb0df0289, 0x854c9341, 0xfbef0760, 0xef8ccee0, 0xb52fac40, 0x9afb283d, 0x6631b87c, 0xfe446741, + 0x5724fac5, 0x954fdb0a, 0x27c71669, 0xcb24786c, 0x769022e7, 0x2037e7b8, 0xd42e3a88, 0x432721f9, + 0x23bc653c, 0x35de3141, 0x51de0585, 0xb371129f, 0xd842c094, 0x87c220c4, 0x4e1900ea, 0xe3b7f081, + 0xcc56c863, 0xd0286b09, 0x6ba90fb4, 0x71d61e74, 0x90af9538, 0x5d8bf4c7, 0x505f823f, 0x5807ba72, + 0xf839f8ff, 0x2b2f9ef8, 0x014192be, 0x8cf6508d, 0x4004f873, 0x34351b4a, 0x3aaf5899, 0xfed268f9, + 0x3263dfb8, 0x05e8ee80, 0x6ae58da6, 0x0b957594, 0x74d4688f, 0xa60df1f1, 0x084b3633, 0xc34a7c5a, + 0x3e837346, 0x93cf4225, 0x034187c8, 0x1ac29dd1, 0x1649fc43, 0x5b6cbdfd, 0x8b4a8cbb, 0xef7b79a7, + 0xac4680ed, 0xfb3f5166, 0x49ee3305, 0x1c43e2e0, 0xaae3763b, 0xb5f34a36, 0xc1c29bcc, 0x4fba1ee7, + 0x84281445, 0xf433485c, 0xd82eb8be, 0xc9c54560, 0xc55c590c, 0xa4655093, 0xc45e198f, 0x6170bb70, + 0x4811df45, 0xd8604741, 0x05e40d7a, 0x38e835e5, 0x1e21da2f, 0x6f4b8893, 0x7210d91d, 0x45ea9607, + 0x9551f61a, 0xf7eb3776, 0x2715bf34, 0x8246673f, 0xd483c307, 0xd9057ccf, 0x57d87dee, 0xf3e0d4d0, + 0x15e27f0c, 0xbd4048f8, 0x6a10945c, 0x8c853e1b, 0xc05d0129, 0x59ca1593, 0xfc96520c, 0x9ec28e67, + 0xc31e0056, 0x7ecadcb6, 0x6d6770b2, 0x444411ef, 0x2f6939f2, 0xe624b912, 0x04c68d24, 0x086480e8, + 0x37ddaa46, 0x846f14cf, 0x6a1c5792, 0xf364e7dc, 0xc5f34f34, 0xb80ed7a0, 0x41c761ea, 0xfea1529d, + 0xad52bb87, 0x7b37f439, 0x0248cc02, 0xf36c3ff7, 0x5bdfd618, 0x8d70c09c, 0xd964dff1, 0xb6c30e46, + 0xece152d9, 0x80b4a769, 0x0c50513e, 0x89bae0d3, 0x6c020451, 0x9ba0b60e, 0x8f311acc, 0x7dd53d31, + 0xf5fb654a, 0x8a503bee, 0xbdce703b, 0xa3ac30fd, 0xa8bfff04, 0x251f3806, 0x611eaf96, 0xe42610d6, + 0xcb8ee561, 0x0dd3127b, 0x31dd518e, 0x22200355, 0x65e0b608, 0xf98819b1, 0x3d3ca72c, 0xcfd1cff3, + 0x3001e642, 0x002abc03, 0x0ca31cb2, 0xddedc6e6, 0x59a26b7a, 0xb82bc501, 0x50729fe7, 0xfa69a882, + 0xfb26c0cc, 0x63aaa935, 0x16b9377b, 0x2e70620c, 0x0f12f98b, 0xab0aae2e, 0xd792e81a, 0xe6aa5bc5, + 0x7d068040, 0x60979354, 0x8854294e, 0x4ec86265, 0xd75ac400, 0x88675d28, 0x863db6e6, 0xf653d19d, + 0x04b29b5c, 0x6c90c590, 0x501e03b9, 0x48e27ead, 0x87d7e3f3, 0xeec4fa33, 0x4686db34, 0x4803e4b6, + 0x76c9a82d, 0xd539494b, 0xfb225de8, 0xe5929078, 0xfa33ebaf, 0x19ec3043, 0x9835ed60, 0x4adb8ef6, + 0x9bff3cbb, 0x2575c729, 0xd0381344, 0x9df8ba62, 0xa961d980, 0xc0a3d980, 0x56427155, 0xe8396f36, + 0xf767e0d9, 0xf86c1242, 0x6dd62949, 0x8393a7b2, 0x92abd33e, 0xe74efa5d, 0x76c320fb, 0xe7cfb386, + 0x98557b5f, 0xa52bbfad, 0x167cc754, 0x70196ba0, 0x7306570a, 0x3fd69f30, 0xb705df73, 0xb97aba82, + 0xd6017088, 0x78267e48, 0xe56c3b32, 0xb2fcd324, 0x74f6f2e2, 0x4983c762, 0x5c8a790a, 0xbe77d6c5, + 0x6efea989, 0xc9f581d9, 0xe0620a02, 0x811acd8d, 0xecbe928c, 0x60926da7, 0x8045de58, 0x2cdee67f, + 0x8cce5952, 0xd3ab2391, 0xca5b50e3, 0xfdc53811, 0xe10f5a53, 0x511e2477, 0x9fa10e23, 0x0a3a87b7, + 0xde16a592, 0xb1c64b42, 0x7484bc1d, 0x9f674eff, 0xaf90752c, 0x5555ddc8, 0xf5c94d78, 0x948c19c0, + 0x4828d108, 0x22408287, 0xf1b51b7d, 0x15577225, 0x5a9700d1, 0x69bf89b2, 0x9183a20d, 0x36e30ada, + 0x5e46adb3, 0xc8b9e177, 0xff25ac8b, 0x5c7459d3, 0x70ab741a, 0xeff0527a, 0xa3bd1993, 0x83d3a782, + 0x68c08518, 0x5957ad0e, 0xbd731eea, 0x8c62ca4e, 0xf4f321d2, 0xfa44b344, 0x90ef776c, 0xc4c9e0f3, + 0x0b3db6a8, 0x9b5853c3, 0xace1ba48, 0x9da47c48, 0xcae3e128, 0xc4e1dcfb, 0x27fad396, 0x8aa51131, + 0x9c18a603, 0x9ab1028c, 0x12123361, 0xad312acf, 0x17371287, 0x7f8b6b43, 0x9eb87dab, 0x6e578f6d, + 0xbfdfd69a, 0x9cdda417, 0x1d82de0c, 0xba0b004e, 0x0aad0cb5, 0xbafc6c79, 0x70e53d04, 0xd2d16165, + 0x761972e6, 0xf4fcf019, 0x8a54efcb, 0x1a24fb02, 0x78efe6d7, 0x212d477d, 0xc0003783, 0x7f0ddbeb, + 0x3f70cd10, 0xb6400a11, 0x62f9217f, 0x87408df8, 0x22881c2a, 0xd2bac483, 0x812b31d6, 0xb55c9959, + 0xd454cd9f, 0x9bb1c09d, 0x71f579f9, 0x81c7d37a, 0x240f01e5, 0xc64e8236, 0x2e462dca, 0x159250cd, + 0xccdfd07b, 0xd07a7875, 0x6fe18b19, 0x31b64d5f, 0xb105838e, 0xdc725897, 0x6ebb9bf3, 0x6fbb9c66, + 0xf4f321d2, 0xfa44b344, 0x90ef776c, 0xc4c9e0f3, 0x8d7128cf, 0xa62047a6, 0xab853f49, 0x6f5683f8, + 0xc2639248, 0x662e4aed, 0x5fed0a48, 0x9f748c46, 0x1994d657, 0x09abe2d0, 0x5fd8a825, 0x6022a5ca, + 0x0ae6a484, 0x0e3e8e8e, 0x4ab41369, 0x460d5538, 0x0e3a56d3, 0x2a23e6ff, 0xb5745baf, 0xca05d164, + 0x4d29fb20, 0x70c37ef3, 0x12101fe3, 0x7c09d30e, 0x4096cc75, 0x741731bf, 0x028a6630, 0x2801cd9c, + 0x2ae1585a, 0x04ea6026, 0xf4445f30, 0x1c03accf, 0x8b33f33b, 0x84aa8a79, 0x672238ac, 0x25ac1b92, + 0x02d9f3c0, 0x170a1402, 0x6f963d3c, 0xb8a6ff35, 0xdffd4b60, 0x8f3164dc, 0x80305225, 0x963aedd8, + 0x11432853, 0x3618d0e5, 0x097158a8, 0xd4d01f08, 0x77695f2b, 0x377ffef6, 0x0514b539, 0x0bd1e91b, + 0x72276fdb, 0x19bb42df, 0xb9ddc149, 0xe6e837d2, 0xe7b186c5, 0xd995c364, 0xb48f4b9a, 0xf1b539b4, + 0xf42ccb50, 0xf26cf035, 0xd7a900b6, 0x6f6421cc, 0xc4b6eab1, 0x765ba054, 0xf88d3522, 0xa81c31c7, + 0xc30fecc6, 0x728804d7, 0x087be9ad, 0x18250e18, 0xfba13390, 0xd7c30483, 0x8167d0f1, 0xd888bb9b, + 0x797e839e, 0x55a5bb67, 0x33e6327c, 0x921b7744, 0x1a6adb91, 0xc03f0809, 0xd651d26a, 0x2600a5d5, + 0x99498faa, 0x5c2ccaec, 0x6c908977, 0xbb9417aa, 0x17a08202, 0x32d6467e, 0x94b65c0a, 0x22c7caad, + 0x04eac232, 0x115eff41, 0x72d6f454, 0xca99f5cf, 0xef906af8, 0x92a5c90e, 0xa07c96cf, 0xc117f0f0, + 0xc413c5c6, 0x36994795, 0x68392c09, 0x8cb6cc7f, 0x91313fde, 0x3f77699e, 0xe940aae0, 0x021592b5, + 0x2b637883, 0x9e9db2ef, 0x1279c086, 0x535754b8, 0x5580f786, 0xe8ac3f23, 0x46734916, 0x557e84fc, + 0x3e601f69, 0xb3caa312, 0x907ec245, 0x527989f8, 0xce79dce6, 0x6f261352, 0x055a598d, 0xcae4b584, + 0x3bc5ce67, 0xd1909af8, 0x75e5ca5e, 0xfb6cfe9e, 0x30ca74ad, 0x63271c84, 0x73f22c68, 0x386ef062, + 0xd9697dee, 0x2cac1d12, 0x27e9dc6a, 0xbad6f393, 0x5debb5a5, 0x6b1339d4, 0x267ceb1a, 0xcc4d80a9, + 0x730de767, 0x57b22004, 0x1b76a08b, 0xe6c42549, 0xf59ad472, 0xbb782744, 0x14417485, 0x9a0f0e96, + 0x9e2799bc, 0x642f4d4c, 0x648416a0, 0x4d2907eb, 0x932146aa, 0x368a923f, 0x9fdadd0b, 0x16f2e758, + 0x26c61195, 0x1272d745, 0x8894d060, 0xf540e36b, 0x9e2799bc, 0x642f4d4c, 0x648416a0, 0x4d2907eb, + 0x932146aa, 0x368a923f, 0x9fdadd0b, 0x16f2e758, 0x26c61195, 0x1272d745, 0x8894d060, 0xf540e36b, + 0x9e2799bc, 0x642f4d4c, 0x648416a0, 0x4d2907eb, 0x932146aa, 0x368a923f, 0x9fdadd0b, 0x16f2e758, + 0x26c61195, 0x1272d745, 0x8894d060, 0xf540e36b, 0x9e2799bc, 0x642f4d4c, 0x648416a0, 0x4d2907eb, + 0x00000002, 0x0000002e, 0x00000000, 0x00000000, 0x206e614a, 0x32203031, 0x20323230, 0x333a3930, + 0x32323a36, 0x00580020, 0x00000000, 0x00000000, 0x4bb116ce, 0x005a9e8c, 0xdc8cdfd1, 0x9ca9094d, + 0x39ecb736, 0x777b071c, 0xdac3f5b4, 0xa1bb964c, 0xb0f63b92, 0x340cb1d5, 0xd8a942a6, 0xf466ffab, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x9fa62a39, 0xeafd8b15, 0x4c979bb0, 0xed0365f5, + 0x620d246e, 0xac97f8b8, 0xfbe1a191, 0x508703bc, 0x0000e38e, 0x00000000, 0x0000f5c2, 0x00000000, + 0x0000fc0f, 0x00000000, 0x007a0000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000005c, 0x00000058, 0x00001000, + 0x00000180, 0x00002060, 0x00002064, 0x00002068, 0x0000206c, 0x00002070, 0x00002074, 0x00002078, + 0x00002400, 0x00002404, 0x00002418, 0x0000240c, 0x00002414, 0x00002408, 0x00002410, 0x0000242c, + 0x00002420, 0x00002428, 0x0000241c, 0x00002424, 0x00002440, 0x00002434, 0x0000243c, 0x00002430, + 0x00002438, 0x00002450, 0x00002454, 0x0000245c, 0x00002458, 0x00002468, 0x00002464, 0x00002460, + 0x00002098, 0x00002008, 0x00002014, 0x00002100, 0x00002018, 0x0000201c, 0x0000221c, 0x00003064, + 0x00003024, 0x00003284, 0x00003400, 0x00003404, 0x00003420, 0x0000340c, 0x00003414, 0x0000341c, + 0x00003408, 0x00003410, 0x00003418, 0x0000343c, 0x00003428, 0x00003430, 0x00003438, 0x00003424, + 0x0000342c, 0x00003434, 0x00003450, 0x00003454, 0x0000345c, 0x00003458, 0x00003478, 0x0000346c, + 0x00003474, 0x00003468, 0x00003470, 0x00003340, 0x0000321c, 0x00003030, 0x00003034, 0x00003038, + 0x0000303c, 0x00003074, 0x00003390, 0x00002860, 0x000028a0, 0x000028e0, 0x00002920, 0x00002870, + 0x000028b0, 0x000028f0, 0x00002930, 0x0000281c, 0x00000000, 0x00000000, 0x00000000, 0x00000008, + 0x00000002, 0x00000000, 0x00000001, 0x00000000, 0x00000008, 0x00000008, 0x00000000, 0x00000001, + 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000001, 0x00000003, 0x00000003, 0x00000002, + 0x00000000, 0x00000001, 0x00000000, 0x00000003, 0x00000008, 0x00000000, 0x00000001, 0x00000000, + 0x00000003, 0x0000000d, 0x00000000, 0x00000001, 0x00000000, 0x00000022, 0x00000007, 0x00000000, + 0x00000001, 0x00000000, 0x00000022, 0x0000000c, 0x00000000, 0x00000001, 0x00000000, 0x00000027, + 0x00000007, 0x00000000, 0x00000001, 0x00000000, 0x00000027, 0x0000000c, 0x00000000, 0x00000001, + 0x00000000, 0x0000002c, 0x00000007, 0x00000000, 0x00000001, 0x00000000, 0x0000002c, 0x0000000c, + 0x00000000, 0x00000001, 0x00000000, 0x00000031, 0x00000007, 0x00000000, 0x00000001, 0x00000003, + 0x00000029, 0x00000008, 0x00000000, 0x00000001, 0x00000003, 0x00000026, 0x00000002, 0x00000000, + 0x00000001, 0x00000003, 0x00000029, 0x00000006, 0x00000000, 0x00000001, 0x00000003, 0x00000004, + 0x0000000c, 0x00000000, 0x00000001, 0x00000003, 0x00000002, 0x00000002, 0x00000000, 0x00000001, + 0x00000003, 0x00000002, 0x00000006, 0x00000000, 0x00000001, 0x00000003, 0x00000002, 0x0000000a, + 0x00000000, 0x00000001, 0x00000003, 0x00000002, 0x0000000e, 0x00000000, 0x00000001, 0x00000003, + 0x00000002, 0x00000001, 0x00000000, 0x00000001, 0x00000003, 0x00000000, 0x00000004, 0x00000000, + 0x00000001, 0x00000003, 0x00000000, 0x00000008, 0x00000000, 0x00000001, 0x00000003, 0x00000000, + 0x0000000c, 0x00000000, 0x00000001, 0x00000003, 0x00000000, 0x00000000, 0x00000000, 0x00000001, + 0x00000003, 0x00000030, 0x00000004, 0x00000000, 0x00000001, 0x00000003, 0x00000032, 0x00000002, + 0x00000000, 0x00000001, 0x00000003, 0x00000032, 0x00000006, 0x00000000, 0x00000001, 0x00000003, + 0x00000034, 0x00000004, 0x00000000, 0x00000001, 0x00000003, 0x00000036, 0x00000002, 0x00000000, + 0x00000001, 0x00000003, 0x00000038, 0x00000004, 0x00000000, 0x00000001, 0x00000000, 0x00000000, + 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000000, 0x00000001, + 0x00000000, 0x00000000, 0x00000007, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000007, + 0x00000000, 0x00000000, 0x00010002, 0x70200005, 0x01100008, 0x1021000a, 0x01040020, 0x0ced0028, + 0x2a2b0038, 0x2c290039, 0x2a2b003a, 0x2829003b, 0x2527003c, 0x2224003d, 0x1f21003e, 0x1c1e003f, + 0x181a0040, 0x14160041, 0x10120042, 0x000d0043, 0x0208004e, 0x00980051, 0x00ec0067, 0x00010002, + 0xb0210005, 0x01100008, 0x1021000a, 0x01040020, 0x0ced0028, 0x2a2b0038, 0x2c290039, 0x2a2b003a, + 0x2829003b, 0x2527003c, 0x2224003d, 0x1f21003e, 0x1c1e003f, 0x181a0040, 0x14160041, 0x10120042, + 0x000d0043, 0x0208004e, 0x00980051, 0x00ec0067, 0x00010002, 0x69200005, 0x01100008, 0x1021000a, + 0x0eee0028, 0x2a2b0038, 0x2c290039, 0x2a2b003a, 0x2829003b, 0x2527003c, 0x2224003d, 0x1f21003e, + 0x1c1e003f, 0x181a0040, 0x14160041, 0x10120042, 0x000d0043, 0x0208004e, 0x00980051, 0x00ec0067, + 0x00010002, 0x70220005, 0x01100008, 0x1021000a, 0x01040020, 0x0ced0028, 0x2a2b0038, 0x2c290039, + 0x2a2b003a, 0x2829003b, 0x2527003c, 0x2224003d, 0x1f21003e, 0x1c1e003f, 0x181a0040, 0x14160041, + 0x10120042, 0x000d0043, 0x0208004e, 0x00980051, 0x00ec0067, 0x00010002, 0x8d1a0005, 0x01100008, + 0x1021000a, 0x01040020, 0x09e90028, 0x2a2b0038, 0x2c290039, 0x2a2b003a, 0x2829003b, 0x2527003c, + 0x2224003d, 0x1f21003e, 0x1c1e003f, 0x181a0040, 0x14160041, 0x10120042, 0x000d0043, 0x0208004e, + 0x00980051, 0x00ec0067, 0x00800009, 0x000900c0, 0x00c00000, 0x00280067, 0x00120038, 0x00130240, + 0x00140030, 0x00190220, 0x001e0164, 0x003af8ff, 0x003f6110, 0x00450100, 0x004b0513, 0x00500088, + 0x00552222, 0x005a0013, 0x00632270, 0x006d0c01, 0x007b0233, 0x0080001e, 0x00aa3145, 0x00af0ca5, + 0x00b40010, 0x00b9070e, 0x00c30098, 0x00c80002, 0x00cd0a60, 0x00d70733, 0x00fa0017, 0x01060001, + 0x010d0057, 0x011b1007, 0x01220023, 0x01540483, 0x015e0077, 0x016360d3, 0x01681053, 0x016d0a0a, + 0x01723400, 0x0177050f, 0x017c0005, 0x01810d0d, 0x01bb000b, 0x01c00120, 0x01c53fff, 0x01ca3fff, + 0x01cf003f, 0x01d20262, 0x01d50271, 0x01d80271, 0x01db0271, 0x01e10271, 0x01e40003, 0x01e70014, + 0x0223695f, 0x022a00f2, 0x022b8484, 0x022d900f, 0x022e8484, 0x0230901f, 0x02314343, 0x0233901f, + 0x02348484, 0x0237901f, 0x0238901f, 0x02820800, 0x02830310, 0x00120196, 0x00130240, 0x00140030, + 0x00190220, 0x001e0023, 0x003af0ff, 0x003f2131, 0x00450111, 0x00550513, 0x00630013, 0x006d0c40, + 0x007b0233, 0x0080001e, 0x00902145, 0x00aa0040, 0x00af0ca5, 0x00b40010, 0x00b90711, 0x00c30098, + 0x00c80008, 0x00cd0a60, 0x00d70733, 0x00fa0017, 0x01060001, 0x010d0057, 0x01221007, 0x01540483, + 0x015e0077, 0x01636192, 0x01681053, 0x016d0055, 0x01811500, 0x01bb0008, 0x01c00120, 0x01c53fff, + 0x01cf300f, 0x01d20273, 0x01d50271, 0x01d80271, 0x01db0271, 0x01e10271, 0x01e40003, 0x01e70014, + 0x022a695f, 0x022d8282, 0x02308282, 0x02334343, 0x02388282, 0x02820800, 0x02830310, 0x00120196, + 0x00130240, 0x00140030, 0x00190228, 0x001e00a3, 0x0023a4fc, 0x00240009, 0x003a0021, 0x003f6110, + 0x00450100, 0x00500513, 0x00552222, 0x00630013, 0x006d0c40, 0x007b0233, 0x0080001f, 0x00aa3146, + 0x00af0ca5, 0x00b40010, 0x00b90713, 0x00c30098, 0x00c80006, 0x00cd0a60, 0x00d70733, 0x00fa0007, + 0x01060001, 0x010d0057, 0x01221007, 0x01540483, 0x015e0077, 0x0163a0d5, 0x01681053, 0x01814322, + 0x01bb0008, 0x01c00120, 0x01c53fff, 0x01cf300f, 0x01d20273, 0x01d50271, 0x01d80271, 0x01db0271, + 0x01e10271, 0x01e40003, 0x01e70014, 0x022a695f, 0x022d4343, 0x02304343, 0x02334343, 0x02384343, + 0x02820800, 0x02830310, 0x00120196, 0x00130240, 0x00140030, 0x00190220, 0x001e0164, 0x003aa03c, + 0x003f2130, 0x00450100, 0x00550513, 0x006d0013, 0x007b0233, 0x0080001e, 0x00b43145, 0x00b90709, + 0x00c30098, 0x00c80002, 0x00cd0a60, 0x00d70733, 0x00fa0017, 0x01060001, 0x010d0057, 0x01221007, + 0x01540483, 0x015e0077, 0x016360d5, 0x01811053, 0x01bb0008, 0x01c00120, 0x01c53fff, 0x01cf300f, + 0x01d20273, 0x01d50271, 0x01d80271, 0x01db0271, 0x01e10271, 0x01e40003, 0x01e70014, 0x022a695f, + 0x022d8484, 0x02308484, 0x02334343, 0x02828484, 0x02830310, 0x00240196, 0x00300000, 0x00010013, + 0x00b90001, 0x00ff00cb, 0x00010264, 0x02640031, 0x00310011, 0x0030006c, 0x02640030, 0x0f020102, + 0x00060080, 0x0220000f, 0x011101ff, 0x0154020b, 0x020effff, 0xffff0159, 0x01630211, 0x02140007, + 0xffff0168, 0x016d0217, 0x021affff, 0xffff0172, 0x0181021d, 0x00ee000f, 0x000f0090, 0x00813384, + 0x00002000, 0xffffdfff, 0x00813084, 0x9e5107eb, 0x0000f800, 0x00813030, 0x02ef0000, 0xf800ffff, + 0x00813034, 0x0600bd27, 0x00000000, 0x00813038, 0x0000eb87, 0xffff0000, 0x0081303c, 0x03c602ef, + 0xf800f800, 0x00813074, 0x9e320000, 0x0000ffff, 0x008133ac, 0x317fda00, 0x80000000, 0x00813304, + 0x00000000, 0xfffffffe, 0x008130a8, 0x00028c60, 0xfffc0003, 0x00810284, 0x00000000, 0xfffffffe, + 0x0081028c, 0x00000000, 0xfffffffe, 0x00810068, 0x40000000, 0xbfffffff, 0x00810020, 0x00001500, + 0xffff00ff, 0x00810288, 0x00000000, 0xffff00ff, 0x00812080, 0x00000058, 0xfffff800, 0x00812088, + 0x000042a8, 0xffff0000, 0x00812148, 0x00000009, 0xffffffc0, 0x00812030, 0x00000058, 0xc0000000, + 0x00812020, 0x32a80000, 0x0000ffff, 0x0081201c, 0x0000a318, 0xffff0000, 0x00812234, 0x00000000, + 0xfffffffe, 0x0081004c, 0x02129000, 0xfc0c0fff, 0x00810044, 0x02000000, 0x000fffff, 0x00812828, + 0x00006800, 0xffff00ff, 0x00803814, 0x06000000, 0x00ffffff, 0x00813080, 0x00002e20, 0xffff0000, + 0x00813030, 0x03ce0000, 0xf800ffff, 0x00813034, 0x0000c4c1, 0x00000000, 0x00813038, 0x0000f230, + 0xffff0000, 0x0081303c, 0x03ce03ce, 0xf800f800, 0x008133ac, 0x317d5133, 0x80000000, 0x00812084, + 0x00002e24, 0xffff0000, 0x00812088, 0x000002af, 0xfffff800, 0x00812020, 0x42af0000, 0x0000ffff, + 0x00812018, 0x00002e48, 0xffff0000, 0x0081201c, 0x2e240000, 0x0000ffff, 0x00813084, 0x07af0000, + 0xf800ffff, 0x00810020, 0x00000d00, 0xffff00ff, 0x00802294, 0x00000007, 0xfffffff8, 0x00802288, + 0x00000007, 0xfffffff8, 0x00802284, 0x00000007, 0xfffffff8, 0x0081708c, 0x00000000, 0xfffffefe, + 0x00817294, 0x000001a1, 0xfffffe5e, 0x00817288, 0x00000181, 0xfffffe7e, 0x00817284, 0x000001fe, + 0xfffffe01, 0x0081728c, 0x0000007e, 0xffffff81, 0x00816904, 0x80000000, 0x7fffffff, 0x00816a94, + 0x0008ffff, 0xfff70000, 0x00816ab4, 0x00010101, 0xfffe0000, 0x00816a88, 0x0008ffff, 0xfff70000, + 0x00816aa8, 0x00010101, 0xfffe0000, 0x00816a84, 0x000fffff, 0xfff00000, 0x00816aa4, 0x00070101, + 0xfff80000, 0x00816aac, 0x00040000, 0xfffbffff, 0x00815884, 0x00000000, 0xfffffffe, 0x00815a94, + 0x00000003, 0xfffffffc, 0x00815a88, 0x00000003, 0xfffffffc, 0x00815a84, 0x0000002b, 0xffffffd4, + 0x00815a8c, 0x00000028, 0xffffffd7, 0x00815804, 0x000000a0, 0xffffff0f, 0x00816124, 0x800003e8, + 0x7f000000, 0x00816294, 0x19260000, 0xe6d9ffff, 0x00816200, 0x00000008, 0xfffffff7, 0x00816288, + 0x19260000, 0xe6d9ffff, 0x00816284, 0x27fe0000, 0xd801ffff, 0x0081628c, 0x26d80000, 0xd927ffff, + 0x00815084, 0x00000000, 0xfffffffe, 0x00815294, 0x00000101, 0xfffffefe, 0x00815288, 0x00000101, + 0xfffffefe, 0x00815284, 0x07800701, 0xf87ff8fe, 0x0081528c, 0x00000600, 0xfffff9ff, 0x00815004, + 0x000000a0, 0xffffff0f, 0x00816c0c, 0x00640001, 0x00000000, 0x00816c08, 0x00010001, 0x00000000, + 0x00816c18, 0x00013a84, 0x00000000, 0x00816c10, 0x00013a84, 0x00000000, 0x00816c00, 0x00000024, + 0xffffffdb, 0x0081640c, 0x00640001, 0x00000000, 0x00816408, 0x00010001, 0x00000000, 0x00816418, + 0x00013a84, 0x00000000, 0x00816410, 0x00013a84, 0x00000000, 0x00816400, 0x00000024, 0xffffffdb, + 0x00816a8c, 0x00070000, 0xfff8ffff, 0x00816128, 0x0000006c, 0xfffff000, 0x00816ab4, 0x00040000, + 0xfffbffff, 0x00816aa8, 0x00040000, 0xfffbffff, 0x00816aac, 0x00000000, 0xfffbffff, 0x00816128, + 0x00000064, 0xfffff000, 0x00802088, 0x00000059, 0xffffff00, 0x00816904, 0x00000000, 0x7fffffff, + 0x00816ab4, 0x00020000, 0xfffdffff, 0x00816aa8, 0x00020000, 0xfffdffff, 0x00816124, 0x00000000, + 0x7fffffff, 0x0081612c, 0x00030000, 0xfff0ffff, 0x00816100, 0x00000400, 0xfffff0ff, 0x00816128, + 0x00000064, 0xfffff000, 0x02020100, 0x03030303, 0x04040404, 0x04040404, 0x05050505, 0x05050505, + 0x05050505, 0x05050505, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, + 0x06060606, 0x06060606, 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, + 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, 0x07070707, + 0x07070707, 0x07070707, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, + 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, + 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, + 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, 0x08080808, + 0x08080808, 0x08080808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000010, 0x00000000, 0x7c010001, 0x04100c13, 0x00000293, 0x0000002c, 0x00000018, 0x00007272, + 0x0000041b, 0x00000304, 0x04200e00, 0x00000002, 0x0980440e, 0x0b820a81, 0x0d840c83, 0x0f860e85, + 0x11881087, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +// array of code and data overlay offsets and sizes +// Defined as follows: +// OS Code Offset +// OS Code Size +// OS Data Offset +// OS Data Size +// NumApps (N) +// App 0 Code Offset +// App 0 Code Size +// App 1 Code Offset +// App 1 Code Size +// . . . . +// . . . . +// App N-1 Code Offset +// App N-1 Code Size +// App 0 Data Offset +// App 0 Data Size +// App 1 Data Offset +// App 1 Data Size +// . . . . +// . . . . +// App N-1 Data Offset +// App N-1 Data Size +// OS Ovl Offset +// OS Ovl Size +// +// +const NvU32 minion_ucode_header_lr10_prod[] = { + 0, + 30464, + 32768, + 9728, + 1, + 30464, + 2304, + 32768, + 0, + 32768, + 0, +}; + +const NvU32 minion_ucode_data_size_lr10_prod = 12096; + +#endif // _MINION_UCODE_LR10_PROD_H_ diff --git a/src/common/nvswitch/kernel/inc/lr10/pmgr_lr10.h b/src/common/nvswitch/kernel/inc/lr10/pmgr_lr10.h new file mode 100644 index 000000000..f0ee85e12 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/lr10/pmgr_lr10.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _PMGR_LR10_H_ +#define _PMGR_LR10_H_ + +#include "lr10.h" + +void +nvswitch_init_pmgr_lr10 +( + nvswitch_device *device +); + +void +nvswitch_init_pmgr_devices_lr10 +( + nvswitch_device *device +); + +NvU32 +nvswitch_read_physical_id_lr10 +( + nvswitch_device *device +); + +NvlStatus +nvswitch_get_rom_info_lr10 +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom +); + +void +nvswitch_i2c_set_hw_speed_mode_lr10 +( + nvswitch_device *device, + NvU32 port, + NvU32 speedMode +); + +NvBool +nvswitch_is_i2c_supported_lr10 +( + nvswitch_device *device +); + +#endif //_PMGR_LR10_H_ diff --git a/src/common/nvswitch/kernel/inc/lr10/smbpbi_lr10.h b/src/common/nvswitch/kernel/inc/lr10/smbpbi_lr10.h new file mode 100644 index 000000000..182ff653b --- /dev/null +++ b/src/common/nvswitch/kernel/inc/lr10/smbpbi_lr10.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SMBPBI_LR10_H_ +#define _SMBPBI_LR10_H_ + +NvlStatus +nvswitch_smbpbi_get_dem_num_messages_lr10 +( + nvswitch_device *device, + NvU8 *pMsgCount +); + +#endif //_SMBPBI_LR10_H_ diff --git a/src/common/nvswitch/kernel/inc/lr10/soe_lr10.h b/src/common/nvswitch/kernel/inc/lr10/soe_lr10.h new file mode 100644 index 000000000..a10370692 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/lr10/soe_lr10.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SOE_LR10_H_ +#define _SOE_LR10_H_ + +#include "lr10.h" + +typedef const struct +{ + + NvU32 appVersion; + NvU32 appCodeStartOffset; + NvU32 appCodeSize; + NvU32 appCodeImemOffset; + NvU32 appCodeIsSecure; + NvU32 appDataStartOffset; + NvU32 appDataSize; + NvU32 appDataDmemOffset; +} SOE_UCODE_APP_INFO_LR10, *PSOE_UCODE_APP_INFO_LR10; + +typedef const struct +{ + + NvU32 version; + NvU32 numApps; + NvU32 codeEntryPoint; + SOE_UCODE_APP_INFO_LR10 apps[0]; +} SOE_UCODE_HDR_INFO_LR10, *PSOE_UCODE_HDR_INFO_LR10; + +#define NVSWITCH_SOE_WR32_LR10(_d, _instance, _dev, _reg, _data) \ + NVSWITCH_ENG_WR32_LR10(_d, SOE, , _instance, _dev, _reg, _data) + +#define NVSWITCH_SOE_RD32_LR10(_d, _instance, _dev, _reg) \ + NVSWITCH_ENG_RD32_LR10(_d, SOE, _instance, _dev, _reg) + +// +// Internal function declarations +// +NvlStatus nvswitch_init_soe_lr10(nvswitch_device *device); +NvlStatus nvswitch_soe_prepare_for_reset_lr10(nvswitch_device *device); +void nvswitch_soe_unregister_events_lr10(nvswitch_device *device); +void nvswitch_therm_soe_callback_lr10(nvswitch_device *device, union RM_FLCN_MSG *pMsg, + void *pParams, NvU32 seqDesc, NV_STATUS status); +NvlStatus nvswitch_soe_set_ucode_core_lr10(nvswitch_device *device, NvBool bFalcon); +NvlStatus nvswitch_soe_register_event_callbacks_lr10(nvswitch_device *device); +#endif //_SOE_LR10_H_ diff --git a/src/common/nvswitch/kernel/inc/lr10/therm_lr10.h b/src/common/nvswitch/kernel/inc/lr10/therm_lr10.h new file mode 100644 index 000000000..aa60ab36d --- /dev/null +++ b/src/common/nvswitch/kernel/inc/lr10/therm_lr10.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _THERM_LR10_H_ +#define _THERM_LR10_H_ + +// +// LR10-specific fuse assignments +// +#define NVSWITCH_FUSE_OPT_TDIODE_LR10 NV_FUSE_OPT_CP2_TDIODE_OFFSET + +NvlStatus +nvswitch_init_thermal_lr10 +( + nvswitch_device *device +); + +NvlStatus +nvswitch_ctrl_therm_read_temperature_lr10 +( + nvswitch_device *device, + NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info +); + +NvlStatus +nvswitch_ctrl_therm_get_temperature_limit_lr10 +( + nvswitch_device *device, + NVSWITCH_CTRL_GET_TEMPERATURE_LIMIT_PARAMS *info +); + +void +nvswitch_monitor_thermal_alert_lr10 +( + nvswitch_device *device +); + +#endif //_THERM_LR10_H_ diff --git a/src/common/nvswitch/kernel/inc/pmgr_nvswitch.h b/src/common/nvswitch/kernel/inc/pmgr_nvswitch.h new file mode 100644 index 000000000..5bb1caf03 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/pmgr_nvswitch.h @@ -0,0 +1,327 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _PMGR_NVSWITCH_H_ +#define _PMGR_NVSWITCH_H_ + +#include "ctrl_dev_nvswitch.h" + +#define NVSWITCH_BITS_PER_BYTE 8 + +#define NVSWITCH_HIGH NV_TRUE +#define NVSWITCH_LOW NV_FALSE + +/*! Extract the first byte of a 10-bit address. */ +#define NVSWITCH_GET_ADDRESS_10BIT_FIRST(a) ((NvU8)((((a) >> 8) & 0x6) | 0xF0)) + +/*! Extract the second byte of a 10-bit address. */ +#define NVSWITCH_GET_ADDRESS_10BIT_SECOND(a) ((NvU8)(((a) >> 1) & 0xFF)) + +/*! Attaching read to read application interface */ +#define NVSWITCH_I2C_READ(a,b) _nvswitch_i2c_i2cRead(device, a, b) + +#define NVSWITCH_I2C_DELAY(a) NVSWITCH_NSEC_DELAY(a) + +#define NVSWITCH_MAX_I2C_PORTS 4 + +/*! bit 0 of address set indicates read cycle to follow */ +#define NVSWITCH_I2C_READCYCLE ((NvU8)0x01) + +/*! Determine if an address is valid in the 7-bit address space. */ +#define NVSWITCH_I2C_IS_7BIT_I2C_ADDRESS(a) ((a) <= 0xFF) + +/*! Determine if an address is valid in the 10-bit address space. */ +#define NVSWITCH_I2C_IS_10BIT_I2C_ADDRESS(a) ((a) <= 0x7FF) + +// by-the-spec delay defaults (yields 100KHz) +#define NVSWITCH_I2C_PROFILE_STANDARD_tF 300 +#define NVSWITCH_I2C_PROFILE_STANDARD_tR 1000 +#define NVSWITCH_I2C_PROFILE_STANDARD_tSUDAT 1800 // actually, spec calls for (min) 250, but we've borrowed from tHDDAT +#define NVSWITCH_I2C_PROFILE_STANDARD_tHDDAT 1900 // actually, spec calls for (max) 3450, but we've loaned time to tSUDAT +#define NVSWITCH_I2C_PROFILE_STANDARD_tHIGH 4000 +#define NVSWITCH_I2C_PROFILE_STANDARD_tSUSTO 4000 +#define NVSWITCH_I2C_PROFILE_STANDARD_tHDSTA 4000 +#define NVSWITCH_I2C_PROFILE_STANDARD_tSUSTA 4700 +#define NVSWITCH_I2C_PROFILE_STANDARD_tBUF 4700 +#define NVSWITCH_I2C_PROFILE_STANDARD_tLOW 4700 // NVSWITCH_I2C_PROFILE_STANDARD_tSUDAT + NVSWITCH_I2C_PROFILE_STANDARD_tR + NVSWITCH_I2C_PROFILE_STANDARD_tHDDAT +#define NVSWITCH_I2C_PROFILE_STANDARD_CYCLEPERIOD 10000 // NVSWITCH_I2C_PROFILE_STANDARD_tF + NVSWITCH_I2C_PROFILE_STANDARD_tLOW + NVSWITCH_I2C_PROFILE_STANDARD_tR + NVSWITCH_I2C_PROFILE_STANDARD_tHIGH + +// by-the-spec delay defaults (yields 400KHz) +#define NVSWITCH_I2C_PROFILE_FAST_tF 300 +#define NVSWITCH_I2C_PROFILE_FAST_tR 300 +#define NVSWITCH_I2C_PROFILE_FAST_tSUDAT 200 // actually, spec calls for (min) 100, but we've borrowed from tHDDAT +#define NVSWITCH_I2C_PROFILE_FAST_tHDDAT 800 // actually, spec calls for (max) 900, but we've loaned time to tSUDAT +#define NVSWITCH_I2C_PROFILE_FAST_tHIGH 600 +#define NVSWITCH_I2C_PROFILE_FAST_tSUSTO 600 +#define NVSWITCH_I2C_PROFILE_FAST_tHDSTA 600 +#define NVSWITCH_I2C_PROFILE_FAST_tSUSTA 600 +#define NVSWITCH_I2C_PROFILE_FAST_tBUF 1300 +#define NVSWITCH_I2C_PROFILE_FAST_tLOW 1300 // NVSWITCH_I2C_PROFILE_STANDARD_tSUDAT + NVSWITCH_I2C_PROFILE_STANDARD_tR + NVSWITCH_I2C_PROFILE_STANDARD_tHDDAT +#define NVSWITCH_I2C_PROFILE_FAST_CYCLEPERIOD 2500 // NVSWITCH_I2C_PROFILE_STANDARD_tF + NVSWITCH_I2C_PROFILE_STANDARD_tLOW + NVSWITCH_I2C_PROFILE_STANDARD_tR + NVSWITCH_I2C_PROFILE_STANDARD_tHIGH + +/*! + * The I2C specification does not specify any timeout conditions for clock + * stretching, i.e. any device can hold down SCL as long as it likes so this + * value needs to be adjusted on case by case basis. + */ +#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_1200US 1200 +#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_1000KHZ (NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ * 4) +#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_400KHZ (NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ * 4) +#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_300KHZ (NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ * 3) +#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_200KHZ (NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ * 2) +#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ (NVSWITCH_I2C_SCL_CLK_TIMEOUT_1200US / 10) + +/* A reasonable SCL timeout is five cycles at 20 KHz. Full use should be rare + * in devices, occurring when in the middle of a real-time task. That comes to + * 25 clock cycles at 100 KHz, or 250 us. */ +#define NVSWITCH_I2C_SCL_CLK_TIMEOUT_250US 250 + +/* We don't want I2C to deal with traffic slower than 20 KHz (50 us cycle). + */ +#define NVSWITCH_I2C_MAX_CYCLE_US 50 + +/* The longest HW I2C transaction: S BYTE*2 S BYTE*4 P, at 1 each for S/P, and + * 9 for each byte (+ack). */ +#define NVSWITCH_I2C_HW_MAX_CYCLES ((1 * 3) + (9 * 6)) + +/* We determine the HW operational timeout as the longest operation, plus two + * long SCL clock stretches. */ +#define I2C_HW_IDLE_TIMEOUT_NS (1000 * \ + ((NVSWITCH_I2C_MAX_CYCLE_US * NVSWITCH_I2C_HW_MAX_CYCLES) + (NVSWITCH_I2C_SCL_CLK_TIMEOUT_1200US * 2))) + +// +// PMGR board configuration information +// + +#define NVSWITCH_DESCRIBE_I2C_DEVICE(_port, _addr, _type, _rdWrAccessMask) \ + {NVSWITCH_I2C_PORT ## _port, _addr, NVSWITCH_I2C_DEVICE ## _type, _rdWrAccessMask} + +#define NVSWITCH_DESCRIBE_GPIO_PIN(_pin, _func, _hw_select, _misc_io) \ + {_pin, NVSWITCH_GPIO_ENTRY_FUNCTION ## _func, _hw_select, \ + NVSWITCH_GPIO_ENTRY_MISC_IO_ ## _misc_io} + +/*! Structure containing a description of the I2C bus as needed by the software + * bit-banging implementation. + */ +typedef struct +{ + NvU32 sclOut; // Bit number for SCL Output + NvU32 sdaOut; // Bit number for SDA Output + + NvU32 sclIn; // Bit number for SCL Input + NvU32 sdaIn; // Bit number for SDA Input + + NvU32 port; // Port number of the driving lines + NvU32 curLine; // Required for isLineHighFunction + + NvU32 regCache; // Keeps the cache value of registers. + // + // The following timings are used as stand-ins for I2C spec timings, so + // that different speed modes may share the same code. + // + NvU16 tF; + NvU16 tR; + NvU16 tSuDat; + NvU16 tHdDat; + NvU16 tHigh; + NvU16 tSuSto; + NvU16 tHdSta; + NvU16 tSuSta; + NvU16 tBuf; + NvU16 tLow; +} NVSWITCH_I2C_SW_BUS; + +/*! @brief Internal Command structure for HW I2C to perform I2C transaction */ +typedef struct +{ + NvU32 port; + NvU32 bRead; + NvU32 cntl; + NvU32 data; + NvU32 bytesRemaining; + NvS32 status; + NvU8 *pMessage; + NvBool bBlockProtocol; +} NVSWITCH_I2C_HW_CMD, *PNVSWITCH_I2C_HW_CMD; + + +typedef enum +{ + NVSWITCH_I2C_ACQUIRER_NONE = 0, + NVSWITCH_I2C_ACQUIRER_UNKNOWN, + NVSWITCH_I2C_ACQUIRER_IOCTL, // e.g. MODS + NVSWITCH_I2C_ACQUIRER_EXTERNAL, // e.g. Linux Direct + +} NVSWITCH_I2C_ACQUIRER; + +typedef enum { + i2cProfile_Standard, + i2cProfile_Fast, + i2cProfile_End +} NVSWITCH_I2CPROFILE; + +typedef enum +{ + pmgrReg_i2cAddr, + pmgrReg_i2cCntl, + pmgrReg_i2cTiming, + pmgrReg_i2cOverride, + pmgrReg_i2cPoll, + pmgrReg_i2cData, + pmgrReg_unsupported +} NVSWITCH_PMGRREG_TYPE; + + +// I2C Speed limits +#define NVSWITCH_I2C_SPEED_LIMIT_NONE NV_U16_MAX //Close enough to not having a speed limit. +#define NVSWITCH_I2C_SPEED_1000KHZ 1000 +#define NVSWITCH_I2C_SPEED_400KHZ 400 +#define NVSWITCH_I2C_SPEED_300KHZ 300 +#define NVSWITCH_I2C_SPEED_200KHZ 200 +#define NVSWITCH_I2C_SPEED_100KHZ 100 + +enum +{ + i2cSpeedLimit_dcb = 0, + i2cSpeedLimit_ctrl, + + // Always leave as last element! + NVSWITCH_I2C_SPEED_LIMIT_MAX_DEVICES +}; + + +// Timing for I2C cycles (allows for possibility of tweaking timing) +typedef struct __NVSWITCH_NVSWITCH_I2CTIMING +{ + NvU32 tR; // at 100KHz, normally 1000ns + NvU32 tF; // at 100KHz, normally 300ns + NvU32 tHIGH; // at 100KHz, normally 4000ns + NvU32 tSUDAT; // at 100KHz, normally 250ns (min), but we borrow time from tHDDAT to improve clock phase + NvU32 tHDDAT; // at 100KHz, normally 3450ns (max), but we loan time to tSUDAT to improve clock phase + NvU32 tSUSTO; // at 100KHz, normally 4000ns + NvU32 tHDSTA; // at 100KHz, normally 4000ns + NvU32 tBUF; // at 100KHz, normally 4700ns + + NvU32 tLOW; // computed to be: tSUDAT + tR + tHDDAT + + NvU32 speed; // Port speed + +} NVSWITCH_I2CTIMING; + +#define NV_NVSWITCH_I2C_DEVICE_WRITE_ACCESS_LEVEL 2:0 +#define NV_NVSWITCH_I2C_DEVICE_READ_ACCESS_LEVEL 5:3 +#define NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PUBLIC 0x00000000 +#define NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PRIVILEGED 0x00000001 +#define NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INTERNAL 0x00000002 +#define NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INACCESSIBLE 0x00000003 +#define NV_NVSWITCH_I2C_DEVICE_READ_ACCESS_LEVEL_PUBLIC NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PUBLIC +#define NV_NVSWITCH_I2C_DEVICE_READ_ACCESS_LEVEL_PRIVILEGED NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PRIVILEGED +#define NV_NVSWITCH_I2C_DEVICE_READ_ACCESS_LEVEL_INTERNAL NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INTERNAL +#define NV_NVSWITCH_I2C_DEVICE_READ_ACCESS_LEVEL_INACCESSIBLE NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INACCESSIBLE +#define NV_NVSWITCH_I2C_DEVICE_WRITE_ACCESS_LEVEL_PUBLIC NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PUBLIC +#define NV_NVSWITCH_I2C_DEVICE_WRITE_ACCESS_LEVEL_PRIVILEGED NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_PRIVILEGED +#define NV_NVSWITCH_I2C_DEVICE_WRITE_ACCESS_LEVEL_INTERNAL NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INTERNAL +#define NV_NVSIWTCH_I2C_DEVICE_WRITE_ACCESS_LEVEL_INACCESSIVLE NV_NVSWITCH_I2C_DEVICE_ACCESS_LEVEL_INACCESSIBLE + +typedef struct NVSWITCH_I2C_DEVICE_DESCRIPTOR +{ + NVSWITCH_I2C_PORT_TYPE i2cPortLogical; //_member))) - ((NvU8 *)(_basePtr)))) + +#define NVSWITCH_ELEMENT_PRESENT(_ptr, _element, _size) \ + (NV_OFFSETOF_MEMBER((_ptr), _element) + sizeof((_ptr)->_element) <= (_size)) + +#define NVSWITCH_ELEMENT_READ(_ptr, _element, _size, _default) \ + (NVSWITCH_ELEMENT_PRESENT(_ptr, _element, _size) ? \ + ((_ptr)->_element) : (_default)) + +#define NVSWITCH_ELEMENT_VALIDATE(_ptr, _element, _size, _default, _expected) \ + do \ + { \ + NvU32 data = NVSWITCH_ELEMENT_READ(_ptr, _element, _size, _default); \ + if (data != (_expected)) \ + { \ + NVSWITCH_PRINT(device, SETUP, \ + "Element '%s->%s'=0x%x but expected 0x%x\n", \ + #_ptr, #_element, data, (NvU32) (_expected)); \ + } \ + } while(0) + +#define NVSWITCH_ELEMENT_CHECK(_ptr, _element, _size, _default) \ + NVSWITCH_ELEMENT_VALIDATE(_ptr, _element, _size, _default, _default) + +#define NVSWITCH_STRUCT_PACKED_ALIGNED(typeName, bytes) \ + typedef struct __attribute__((packed, aligned(bytes))) + +#define NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +// +// AT24CM02 EEPROM +// http://ww1.microchip.com/downloads/en/DeviceDoc/Atmel-8828-SEEPROM-AT24CM02-Datasheet.pdf +// + +#define AT24CM02_INDEX_SIZE 18 // Addressing bits +#define AT24CM02_BLOCK_SIZE 256 // R/W block size (bytes) + +// +// AT24C02C EEPROM +// http://ww1.microchip.com/downloads/en/DeviceDoc/Atmel-8700-SEEPROM-AT24C01C-02C-Datasheet.pdf +// + +#define AT24C02C_INDEX_SIZE 8 // Addressing bits +#define AT24C02C_BLOCK_SIZE 8 // R/W block size (bytes) + +// +// AT24C02D EEPROM +// http://ww1.microchip.com/downloads/en/devicedoc/atmel-8871f-seeprom-at24c01d-02d-datasheet.pdf +// 2kb EEPROM used on LR10 P4790 B00 platform +// + +#define AT24C02D_INDEX_SIZE 8 // Addressing bits +#define AT24C02D_BLOCK_SIZE 8 // R/W block size (bytes) + +typedef struct +{ + NvU32 i2c_port; + NvU32 i2c_address; + NvU32 device_type; + NvU32 index_size; + NvU32 block_size; + NvU32 block_count; + NvU32 eeprom_size; +} NVSWITCH_EEPROM_TYPE; + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_EEPROM_HEADER, 1) +{ + char signature[4]; + NvU16 version; + NvU16 header_size; + NvU16 pci_vendor_id; + NvU16 pci_device_id; + NvU16 pci_system_vendor_id; + NvU16 pci_system_device_id; + NvU16 firmware_size; + NvU8 reserved[13]; + NvU8 checksum; +} NVSWITCH_EEPROM_HEADER; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_HEADER, 1) +{ + NvU16 id; + char signature[4]; + NvU16 bcd_version; + NvU8 header_size; + NvU8 token_size; + NvU8 token_entries; + NvU8 checksum; +} NVSWITCH_BIT_HEADER; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +#define NVSWITCH_BIT_TOKEN_CLOCK_PTRS 0x43 +#define NVSWITCH_BIT_TOKEN_NVINIT_PTRS 0x49 +#define NVSWITCH_BIT_TOKEN_NOP 0x4E +#define NVSWITCH_BIT_TOKEN_PERF_PTRS 0x50 +#define NVSWITCH_BIT_TOKEN_BRIDGE_FW_DATA 0x52 +#define NVSWITCH_BIT_TOKEN_DCB_PTRS 0x6E + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_TOKEN, 1) +{ + NvU8 id; + NvU8 data_version; + NvU16 data_size; + NvU16 data_offset; +} NVSWITCH_BIT_TOKEN; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +// 0x43: BIT_TOKEN_CLOCK_PTRS +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_CLOCK_PTRS, 1) +{ + NvU32 pll_info_table; + NvU32 vbe_mode_pclk; + NvU32 clocks_table; + NvU32 clocks_programming; + NvU32 nafll; + NvU32 adc_table; + NvU32 freq_control; +} NVSWITCH_BIT_CLOCK_PTRS; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +#define NVSWITCH_CLOCK_PTRS_PLL_INFO_VERSION 0x50 + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_PLL_INFO_HEADER, 1) +{ + NvU8 version; + NvU8 header_size; + NvU8 entry_size; + NvU8 entry_count; +} NVSWITCH_PLL_INFO_HEADER; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_PLL_INFO_ENTRY, 1) +{ + NvU8 pll_id; + NvU16 ref_min_mhz; + NvU16 ref_max_mhz; + NvU16 vco_min_mhz; + NvU16 vco_max_mhz; + NvU16 update_min_mhz; + NvU16 update_max_mhz; + NvU8 m_min; + NvU8 m_max; + NvU8 n_min; + NvU8 n_max; + NvU8 pl_min; + NvU8 pl_max; +} NVSWITCH_PLL_INFO_ENTRY; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +#define NVSWITCH_PLL_ID_SYSPLL 0x07 + +// 0x49: BIT_TOKEN_NVINIT_PTRS +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_NVINIT_PTRS, 1) +{ + NvU16 init_script; + NvU16 macro_index; + NvU16 macro_table; + NvU16 condition; + NvU16 io_condition; + NvU16 io_flag_condition; + NvU16 init_function; + NvU16 private_boot; + NvU16 data_arrays; + NvU16 pcie_settings; + NvU16 devinit; + NvU16 devinit_size; + NvU16 boot_script; + NvU16 boot_script_size; + NvU16 nvlink_config; + NvU16 boot_script_nonGC6; + NvU16 boot_script_nonGC6_size; +} NVSWITCH_BIT_NVINIT_PTRS; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_NVLINK_CONFIG, 1) +{ + NvU8 version; + NvU8 size; + NvU16 reserved; + NvU64 link_disable_mask; // 1 = disable + NvU64 link_speed_mask; // 1 = safe mode + NvU64 link_refclk_mask; // 0 = 100MHz, 1 = 133MHz + NvU8 flags; + NvU64 ac_coupled_mask; // 0 = DC, 1 = AC +} NVSWITCH_NVLINK_CONFIG; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +// 0x52: BIT_TOKEN_BRIDGE_FW_DATA +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_BRIDGE_FW_DATA, 1) +{ + NvU32 firmware_version; + NvU8 oem_version; + NvU16 firmware_size; + char BIOS_MOD_date[8]; + NvU32 firmware_flags; + NvU16 eng_product_name; + NvU8 eng_product_name_size; + NvU16 nvswitch_instance_id; +} NVSWITCH_BIT_BRIDGE_FW_DATA; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_BUILD 0:0 +#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_BUILD_REL 0 +#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_BUILD_ENG 1 +#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_I2C 1:1 +#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_I2C_MASTER 0 +#define NVSWITCH_BIT_BRIDGE_FW_DATA_FLAGS_I2C_NOT_MASTER 1 + +// 0x6E: BIT_TOKEN_DCB_PTRS +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_BIT_DCB_PTRS, 1) +{ + NvU16 dcb_header_ptr; +} NVSWITCH_BIT_DCB_PTRS; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +#define NVSWITCH_DCB_HEADER_VERSION_41 0x41 +#define NVSWITCH_DCB_HEADER_SIGNATURE 0x4edcbdcb + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_DCB_HEADER, 1) +{ + NvU8 version; + NvU8 header_size; + NvU8 entry_count; + NvU8 entry_size; + NvU16 ccb_block_ptr; + NvU32 dcb_signature; + NvU16 gpio_table; + NvU16 input_devices; + NvU16 personal_cinema; + NvU16 spread_spectrum; + NvU16 i2c_devices; + NvU16 connectors; + NvU8 flags; + NvU16 hdtv; + NvU16 switched_outputs; + NvU32 display_patch; + NvU32 connector_patch; +} NVSWITCH_DCB_HEADER; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +#define NVSWITCH_GPIO_TABLE_VERSION_42 0x42 + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_GPIO_TABLE, 1) +{ + NvU8 version; + NvU8 header_size; + NvU8 entry_count; + NvU8 entry_size; + NvU16 ext_gpio_master; +} NVSWITCH_GPIO_TABLE; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_GPIO_ENTRY, 1) +{ + NvU8 pin; + NvU8 function; + NvU8 output; + NvU8 input; + NvU8 misc; +} NVSWITCH_GPIO_ENTRY; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +#define NVSWITCH_GPIO_ENTRY_PIN_NUM 5:0 +#define NVSWITCH_GPIO_ENTRY_PIN_IO_TYPE 6:6 +#define NVSWITCH_GPIO_ENTRY_PIN_INIT_STATE 7:7 + +#define NVSWITCH_GPIO_ENTRY_FUNCTION 7:0 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_THERMAL_EVENT 17 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_OVERTEMP 35 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_THERMAL_ALERT 52 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_THERMAL_CRITICAL 53 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_POWER_ALERT 76 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID0 209 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID1 210 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID2 211 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID3 212 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID4 213 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID5 214 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID6 215 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID7 216 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID8 217 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID9 218 +#define NVSWITCH_GPIO_ENTRY_FUNCTION_SKIP_ENTRY 255 + +#define NVSWITCH_GPIO_ENTRY_OUTPUT 7:0 + +#define NVSWITCH_GPIO_ENTRY_INPUT_HW_SELECT 4:0 +#define NVSWITCH_GPIO_ENTRY_INPUT_HW_SELECT_NONE 0 +#define NVSWITCH_GPIO_ENTRY_INPUT_HW_SELECT_THERMAL_ALERT 22 +#define NVSWITCH_GPIO_ENTRY_INPUT_HW_SELECT_POWER_ALERT 23 +#define NVSWITCH_GPIO_ENTRY_INPUT_GSYNC 5:5 +#define NVSWITCH_GPIO_ENTRY_INPUT_OPEN_DRAIN 6:6 +#define NVSWITCH_GPIO_ENTRY_INPUT_PWM 7:7 +//#define NVSWITCH_GPIO_ENTRY_INPUT_3V3 ?:? + +#define NVSWITCH_GPIO_ENTRY_MISC_LOCK 3:0 +#define NVSWITCH_GPIO_ENTRY_MISC_IO 7:4 +#define NVSWITCH_GPIO_ENTRY_MISC_IO_UNUSED 0x0 +#define NVSWITCH_GPIO_ENTRY_MISC_IO_INV_OUT 0x1 +#define NVSWITCH_GPIO_ENTRY_MISC_IO_INV_OUT_TRISTATE 0x3 +#define NVSWITCH_GPIO_ENTRY_MISC_IO_OUT 0x4 +#define NVSWITCH_GPIO_ENTRY_MISC_IO_IN_STEREO_TRISTATE 0x6 +#define NVSWITCH_GPIO_ENTRY_MISC_IO_INV_OUT_TRISTATE_LO 0x9 +#define NVSWITCH_GPIO_ENTRY_MISC_IO_INV_IN 0xB +#define NVSWITCH_GPIO_ENTRY_MISC_IO_OUT_TRISTATE 0xC +#define NVSWITCH_GPIO_ENTRY_MISC_IO_IN 0xE + +#define NVSWITCH_I2C_VERSION 0x40 + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_I2C_TABLE, 1) +{ + NvU8 version; + NvU8 header_size; + NvU8 entry_count; + NvU8 entry_size; + NvU8 flags; +} NVSWITCH_I2C_TABLE; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_I2C_ENTRY, 1) +{ + NvU32 device; +} NVSWITCH_I2C_ENTRY; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +#define NVSWITCH_I2C_ENTRY_TYPE 7:0 +#define NVSWITCH_I2C_ENTRY_ADDRESS 15:8 +#define NVSWITCH_I2C_ENTRY_RESERVED1 19:16 +#define NVSWITCH_I2C_ENTRY_PORT_1 20:20 +#define NVSWITCH_I2C_ENTRY_WR_ACCESS 23:21 +#define NVSWITCH_I2C_ENTRY_RD_ACCESS 26:24 +#define NVSWITCH_I2C_ENTRY_PORT_2 27:27 +#define NVSWITCH_I2C_ENTRY_RESERVED2 31:28 + +#define NVSWITCH_CCB_VERSION 0x41 + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_CCB_TABLE, 1) +{ + NvU8 version; + NvU8 header_size; + NvU8 entry_count; + NvU8 entry_size; + NvU8 comm_port[4]; +} NVSWITCH_CCB_TABLE; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +NVSWITCH_STRUCT_PACKED_ALIGNED(_NVSWITCH_CCB_ENTRY, 1) +{ + NvU32 device; +} NVSWITCH_CCB_ENTRY; +NVSWITCH_STRUCT_PACKED_ALIGNED_SUFFIX + +#define NVSWITCH_CCB_DEVICE_I2C_PORT 4:0 +#define NVSWITCH_CCB_DEVICE_DPAUX 9:5 +#define NVSWITCH_CCB_DEVICE_VOLTAGE 10:10 +#define NVSWITCH_CCB_DEVICE_RESERVED 27:11 +#define NVSWITCH_CCB_DEVICE_I2C_SPEED 31:28 + +#define NVSWITCH_CCB_DEVICE_I2C_SPEED_DEFAULT 0x0 +#define NVSWITCH_CCB_DEVICE_I2C_SPEED_100KHZ 0x1 +#define NVSWITCH_CCB_DEVICE_I2C_SPEED_200KHZ 0x2 +#define NVSWITCH_CCB_DEVICE_I2C_SPEED_400KHZ 0x3 +#define NVSWITCH_CCB_DEVICE_I2C_SPEED_800KHZ 0x4 +#define NVSWITCH_CCB_DEVICE_I2C_SPEED_1600KHZ 0x5 +#define NVSWITCH_CCB_DEVICE_I2C_SPEED_3400KHZ 0x6 +#define NVSWITCH_CCB_DEVICE_I2C_SPEED_60KHZ 0x7 +#define NVSWITCH_CCB_DEVICE_I2C_SPEED_300KHZ 0x8 + +// +// Firmware data +// + +#define NVSWITCH_PRODUCT_NAME_MAX_LEN 64 + +typedef struct +{ + NvBool valid; + NvU32 ref_min_mhz; + NvU32 ref_max_mhz; + NvU32 vco_min_mhz; + NvU32 vco_max_mhz; + NvU32 update_min_mhz; + NvU32 update_max_mhz; + NvU32 m_min; + NvU32 m_max; + NvU32 n_min; + NvU32 n_max; + NvU32 pl_min; + NvU32 pl_max; +} NVSWITCH_PLL_LIMITS; + +typedef struct +{ + NvBool valid; + NvU32 i2c_speed; + NvBool i2c_33v; +} NVSWITCH_I2C_PORT; + +#define NVSWITCH_MAX_I2C_DEVICES 16 + +typedef struct +{ + NvU32 pin; + NvU32 function; + NvU32 hw_select; + NvU32 misc; +} NVSWITCH_GPIO_INFO; + +#define NVSWITCH_MAX_GPIO_PINS 25 + +typedef struct +{ + NvU32 firmware_size; + + // ROM Header + NvU16 pci_vendor_id; + NvU16 pci_device_id; + NvU16 pci_system_vendor_id; + NvU16 pci_system_device_id; + + // Firmware data + struct + { + NvBool bridge_fw_found; + NvU32 firmware_version; + NvU8 oem_version; + char BIOS_MOD_date[8]; + NvBool fw_release_build; + char product_name[NVSWITCH_PRODUCT_NAME_MAX_LEN+1]; + NvU16 instance_id; + } bridge; + + // Clocks + struct + { + NvBool clocks_found; + NVSWITCH_PLL_LIMITS sys_pll; + } clocks; + + // NVLink init + struct + { + NvBool link_config_found; + NvU64 link_enable_mask; // 1 = enabled + NvU64 link_ac_coupled_mask; // 0 = DC, 1 = AC + } nvlink; + + // DCB + struct + { + NvBool dcb_found; + NVSWITCH_I2C_PORT i2c[NVSWITCH_MAX_I2C_PORTS]; + NvU32 i2c_device_count; + NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE i2c_device[NVSWITCH_MAX_I2C_DEVICES]; + NvU32 gpio_pin_count; + NVSWITCH_GPIO_INFO gpio_pin[NVSWITCH_MAX_GPIO_PINS]; + } dcb; + +} NVSWITCH_FIRMWARE; + +#define NVSWITCH_FIRMWARE_BRIDGE_INSTANCE_ID_UNKNOWN 0xFFFF +#define NVSWITCH_FIRMWARE_BRIDGE_INSTANCE_ID_NORMAL 0xFFFE + +void +nvswitch_read_rom_tables +( + nvswitch_device *device, + NVSWITCH_FIRMWARE *firmware +); + + +#define BYTE_TO_BINARY_PATTERN "%c%c%c%c%c%c%c%c" +#define BYTE_TO_BINARY(byte) \ + (byte & 0x80 ? '1' : '0'), \ + (byte & 0x40 ? '1' : '0'), \ + (byte & 0x20 ? '1' : '0'), \ + (byte & 0x10 ? '1' : '0'), \ + (byte & 0x08 ? '1' : '0'), \ + (byte & 0x04 ? '1' : '0'), \ + (byte & 0x02 ? '1' : '0'), \ + (byte & 0x01 ? '1' : '0') + +#if !defined(BIOSTYPES_H_FILE) +#define bios_U008 NvU32 +#define bios_U016 NvU32 +#define bios_U032 NvU32 +#define bios_S008 NvS32 +#define bios_S016 NvS32 +#define bios_S032 NvS32 +#endif // !defined(BIOSTYPES_H_FILE) + +/************************************************************************************************************** +* Description: +* Definitions of BIOS BIT structures as defined starting in Core 5 +* +**************************************************************************************************************/ +#if !defined(_BIT_H_) +#define BIT_HEADER_ID 0xB8FF +#define BIT_HEADER_SIGNATURE 0x00544942 // "BIT\0" +#define BIT_HEADER_SIZE_OFFSET 8 +#define BIT_HEADER_LATEST_KNOWN_VERSION 0x100 +#endif // !defined(_BIT_H_) + +#define PCI_ROM_HEADER_SIZE 0x18 +#define PCI_DATA_STRUCT_SIZE 0x1c +#define PCI_ROM_HEADER_PCI_DATA_SIZE (PCI_ROM_HEADER_SIZE + PCI_DATA_STRUCT_SIZE) // ROM Header + PCI Dat Structure size +#define PCI_EXP_ROM_SIGNATURE 0xaa55 +#define PCI_DATA_STRUCT_SIGNATURE 0x52494350 // "PCIR" in dword format + +#define NVLINK_CONFIG_DATA_HEADER_VER_20 0x2 +#define NVLINK_CONFIG_DATA_HEADER_20_SIZE 8 +#define NVLINK_CONFIG_DATA_HEADER_20_FMT "6b1w" + +typedef struct _PCI_DATA_STRUCT +{ + bios_U032 sig; // 00h: Signature, the string "PCIR" or NVIDIA's alternate "NPDS" + bios_U016 vendorID; // 04h: Vendor Identification + bios_U016 deviceID; // 06h: Device Identification + bios_U016 deviceListPtr; // 08h: Device List Pointer + bios_U016 pciDataStructLen; // 0Ah: PCI Data Structure Length + bios_U008 pciDataStructRev; // 0Ch: PCI Data Structure Revision + bios_U008 classCode[3]; // 0Dh: Class Code + bios_U016 imageLen; // 10h: Image Length (units of 512 bytes) + bios_U016 vendorRomRev; // 12h: Revision Level of the Vendor's ROM + bios_U008 codeType; // 14h: holds NBSI_OBJ_CODE_TYPE (0x70) and others + bios_U008 lastImage; // 15h: Last Image Indicator: bit7=1 is lastImage + bios_U016 maxRunTimeImageLen; // 16h: Maximum Run-time Image Length (units of 512 bytes) + bios_U016 configUtilityCodePtr; // 18h: Pointer to Configurations Utility Code Header + bios_U016 CMDTFCLPEntryPointPtr; // 1Ah: Pointer to DMTF CLP Entry Point +} PCI_DATA_STRUCT, *PPCI_DATA_STRUCT; +#define PCI_DATA_STRUCT_FMT "1d4w4b2w2b3w" + +// BIT_TOKEN_NVINIT_PTRS 0x49 // 'I' Initialization Table Pointers +struct BIT_DATA_NVINIT_PTRS_V1 +{ + bios_U016 InitScriptTablePtr; // Init script table pointer + bios_U016 MacroIndexTablePtr; // Macro index table pointer + bios_U016 MacroTablePtr; // Macro table pointer + bios_U016 ConditionTablePtr; // Condition table pointer + bios_U016 IoConditionTablePtr; // IO Condition table pointer + bios_U016 IoFlagConditionTablePtr; // IO Flag Condition table pointer + bios_U016 InitFunctionTablePtr; // Init Function table pointer + bios_U016 VBIOSPrivateTablePtr; // VBIOS private table pointer + bios_U016 DataArraysTablePtr; // Data arrays table pointer + bios_U016 PCIESettingsScriptPtr; // PCI-E settings script pointer + bios_U016 DevinitTablesPtr; // Pointer to tables required by Devinit opcodes + bios_U016 DevinitTablesSize; // Size of tables required by Devinit opcodes + bios_U016 BootScriptsPtr; // Pointer to Devinit Boot Scripts + bios_U016 BootScriptsSize; // Size of Devinit Boot Scripts + bios_U016 NvlinkConfigDataPtr; // Pointer to NVLink Config Data +}; +#define BIT_DATA_NVINIT_PTRS_V1_30_FMT "15w" +typedef struct BIT_DATA_NVINIT_PTRS_V1 BIT_DATA_NVINIT_PTRS_V1; + +#define BIT_TOKEN_BIOSDATA 0x42 // 'B' BIOS Data +#define BIT_TOKEN_NVINIT_PTRS 0x49 // 'I' + +struct BIT_HEADER_V1_00 +{ + bios_U016 Id; // BMP=0x7FFF/BIT=0xB8FF + bios_U032 Signature; // 0x00544942 - BIT Data Structure Signature + bios_U016 BCD_Version; // BIT Version - 0x0100 for 1.00 + bios_U008 HeaderSize; // This version is 12 bytes long + bios_U008 TokenSize; // This version has 6 byte long Tokens + bios_U008 TokenEntries; // Number of Entries + bios_U008 HeaderChksum; // 0 Checksum of the header +}; +#define BIT_HEADER_V1_00_FMT "1w1d1w4b" +typedef struct BIT_HEADER_V1_00 BIT_HEADER_V1_00; + +struct BIT_TOKEN_V1_00 +{ + bios_U008 TokenId; + bios_U008 DataVersion; + bios_U016 DataSize; + bios_U016 DataPtr; +}; +#define BIT_TOKEN_V1_00_FMT "2b2w" +typedef struct BIT_TOKEN_V1_00 BIT_TOKEN_V1_00; + + +// BIT_TOKEN_BIOSDATA 0x42 // 'B' BIOS Data +struct BIT_DATA_BIOSDATA_V1 +{ + bios_U032 Version; // BIOS Binary Version Ex. 5.40.00.01.12 = 0x05400001 + bios_U008 OemVersion; // OEM Version Number Ex. 5.40.00.01.12 = 0x12 + // OEM can override the two fields above + bios_U008 Checksum; // Filled by MakeVGA + bios_U016 Int15CallbacksPost; // + bios_U016 Int15CallbacksSystem; // + bios_U016 BoardId; // + bios_U016 FrameCount; // Frame count for signon message delay + bios_U008 BiosmodDate[8]; // '00/00/04' Date BIOSMod was last run +}; +#define BIT_DATA_BIOSDATA_V1_FMT "1d2b4w8b" +typedef struct BIT_DATA_BIOSDATA_V1 BIT_DATA_BIOSDATA_V1; + +struct BIT_DATA_BIOSDATA_V2 +{ + bios_U032 Version; // BIOS Binary Version Ex. 5.40.00.01.12 = 0x05400001 + bios_U008 OemVersion; // OEM Version Number Ex. 5.40.00.01.12 = 0x12 + // OEM can override the two fields above + bios_U008 Checksum; // Filled by MakeVGA + bios_U016 Int15CallbacksPost; // + bios_U016 Int15CallbacksSystem; // + bios_U016 FrameCount; // Frame count for signon message delay + bios_U032 Reserved1; + bios_U032 Reserved2; + bios_U008 MaxHeadsAtPost; + bios_U008 MemorySizeReport; + bios_U008 HorizontalScaleFactor; + bios_U008 VerticalScaleFactor; + bios_U016 DataTablePtr; + bios_U016 RomPackPtr; + bios_U016 AppliedRomPacksPtr; + bios_U008 AppliedRomPackMax; + bios_U008 AppliedRomPackCount; + bios_U008 ModuleMapExternal; + bios_U032 CompressionInfoPtr; +}; +#define BIT_DATA_BIOSDATA_V2_FMT "1d2b3w2d4b3w3b1d" +typedef struct BIT_DATA_BIOSDATA_V2 BIT_DATA_BIOSDATA_V2; + +#ifndef PCI_VENDOR_ID_NVIDIA +#define PCI_VENDOR_ID_NVIDIA 0x10DE +#endif + +typedef struct _nvlink_Config_Data_Header_20 +{ + bios_U008 Version; // NVLink Config Data Structure version + bios_U008 HeaderSize; // Size of header + bios_U008 BaseEntrySize; + bios_U008 BaseEntryCount; + bios_U008 LinkEntrySize; + bios_U008 LinkEntryCount; + bios_U016 Reserved; // Reserved +} NVLINK_CONFIG_DATA_HEADER_20, *PNVLINK_CONFIG_DATA_HEADER_20; + +#define NV_NVLINK_VBIOS_PARAM0_LINK 0:0 +#define NV_NVLINK_VBIOS_PARAM0_LINK_ENABLE 0x0 +#define NV_NVLINK_VBIOS_PARAM0_LINK_DISABLE 0x1 +#define NV_NVLINK_VBIOS_PARAM0_RESERVED1 1:1 +#define NV_NVLINK_VBIOS_PARAM0_ACDC_MODE 2:2 +#define NV_NVLINK_VBIOS_PARAM0_ACDC_MODE_DC 0x0 +#define NV_NVLINK_VBIOS_PARAM0_ACDC_MODE_AC 0x1 +#define NV_NVLINK_VBIOS_PARAM0_RECEIVER_DETECT 3:3 +#define NV_NVLINK_VBIOS_PARAM0_RECEIVER_DETECT_DISABLE 0x0 +#define NV_NVLINK_VBIOS_PARAM0_RECEIVER_DETECT_ENABLE 0x1 +#define NV_NVLINK_VBIOS_PARAM0_RESTORE_PHY_TRAINING 4:4 +#define NV_NVLINK_VBIOS_PARAM0_RESTORE_PHY_TRAINING_DISABLE 0x0 +#define NV_NVLINK_VBIOS_PARAM0_RESTORE_PHY_TRAINING_ENABLE 0x1 +#define NV_NVLINK_VBIOS_PARAM0_SLM 5:5 +#define NV_NVLINK_VBIOS_PARAM0_SLM_DISABLE 0x0 +#define NV_NVLINK_VBIOS_PARAM0_SLM_ENABLE 0x1 +#define NV_NVLINK_VBIOS_PARAM0_L2 6:6 +#define NV_NVLINK_VBIOS_PARAM0_L2_DISABLE 0x0 +#define NV_NVLINK_VBIOS_PARAM0_L2_ENABLE 0x1 +#define NV_NVLINK_VBIOS_PARAM0_RESERVED2 7:7 + +#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE 7:0 +#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_50_00000 0x00 +#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_16_00000 0x01 +#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_20_00000 0x02 +#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_25_00000 0x03 +#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_25_78125 0x04 +#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_32_00000 0x05 +#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_40_00000 0x06 +#define NV_NVLINK_VBIOS_PARAM1_LINE_RATE_53_12500 0x07 + +#define NV_NVLINK_VBIOS_PARAM2_LINE_CODE_MODE 7:0 +#define NV_NVLINK_VBIOS_PARAM2_LINE_CODE_MODE_NRZ 0x00 +#define NV_NVLINK_VBIOS_PARAM2_LINE_CODE_MODE_NRZ_128B130 0x01 +#define NV_NVLINK_VBIOS_PARAM2_LINE_CODE_MODE_NRZ_PAM4 0x03 + +#define NV_NVLINK_VBIOS_PARAM3_REFERENCE_CLOCK_MODE 1:0 +#define NV_NVLINK_VBIOS_PARAM3_REFERENCE_CLOCK_MODE_COMMON 0x0 +#define NV_NVLINK_VBIOS_PARAM3_REFERENCE_CLOCK_MODE_RSVD 0x1 +#define NV_NVLINK_VBIOS_PARAM3_REFERENCE_CLOCK_MODE_NON_COMMON_NO_SS 0x2 +#define NV_NVLINK_VBIOS_PARAM3_REFERENCE_CLOCK_MODE_NON_COMMON_SS 0x3 + +#define NV_NVLINK_VBIOS_PARAM3_RESERVED1 3:2 +#define NV_NVLINK_VBIOS_PARAM3_CLOCK_MODE_BLOCK_CODE 5:4 +#define NV_NVLINK_VBIOS_PARAM3_CLOCK_MODE_BLOCK_CODE_OFF 0x0 +#define NV_NVLINK_VBIOS_PARAM3_CLOCK_MODE_BLOCK_CODE_ECC96 0x1 +#define NV_NVLINK_VBIOS_PARAM3_CLOCK_MODE_BLOCK_CODE_ECC88 0x2 +#define NV_NVLINK_VBIOS_PARAM3_RESERVED2 7:6 + +#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM 7:0 +#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_RSVD 0x00 +#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A0_SINGLE_PRESENT 0x01 +#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A1_PRESENT_ARRAY 0x02 +#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A2_FINE_GRAINED_EXHAUSTIVE 0x04 +#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A3_RSVD 0x08 +#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A4_FOM_CENTRIOD 0x10 +#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A5_RSVD 0x20 +#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A6_RSVD 0x40 +#define NV_NVLINK_VBIOS_PARAM4_TXTRAIN_OPTIMIZATION_ALGORITHM_A7_RSVD 0x80 + +#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_ADJUSTMENT_ALGORITHM 4:0 +#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_ADJUSTMENT_ALGORITHM_B0_NO_ADJUSTMENT 0x1 +#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_ADJUSTMENT_ALGORITHM_B1_FIXED_ADJUSTMENT 0x2 +#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_ADJUSTMENT_ALGORITHM_B2_RSVD 0x4 +#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_ADJUSTMENT_ALGORITHM_B3_RSVD 0x8 + +#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_FOM_FORMAT 7:5 +#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_FOM_FORMAT_FOM_A 0x1 +#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_FOM_FORMAT_FOM_B 0x2 +#define NV_NVLINK_VBIOS_PARAM5_TXTRAIN_FOM_FORMAT_FOM_C 0x4 + +#define NV_NVLINK_VBIOS_PARAM6_TXTRAIN_MINIMUM_TRAIN_TIME_MANTISSA 3:0 +#define NV_NVLINK_VBIOS_PARAM6_TXTRAIN_MINIMUM_TRAIN_TIME_EXPONENT 7:4 + +#define NVLINK_CONFIG_DATA_BASEENTRY_FMT "1b" +#define NVLINK_CONFIG_DATA_LINKENTRY_FMT "7b" +// Version 2.0 Link Entry and Base Entry +typedef struct _nvlink_config_data_baseentry_20 +{ + NvU8 positionId; +} NVLINK_CONFIG_DATA_BASEENTRY; + +typedef struct _nvlink_config_data_linkentry_20 +{ + // VBIOS configuration Data + NvU8 nvLinkparam0; + NvU8 nvLinkparam1; + NvU8 nvLinkparam2; + NvU8 nvLinkparam3; + NvU8 nvLinkparam4; + NvU8 nvLinkparam5; + NvU8 nvLinkparam6; +} NVLINK_CONFIG_DATA_LINKENTRY; + + +// Union of different VBIOS configuration table formats +typedef union __nvlink_Config_Data_Header +{ + NVLINK_CONFIG_DATA_HEADER_20 ver_20; +} NVLINK_CONFIG_DATA_HEADER, *PNVLINK_CONFIG_DATA_HEADER; + +typedef struct _nvlink_vbios_config_data_baseentry_20 +{ + bios_U008 positionId; +} NVLINK_VBIOS_CONFIG_DATA_BASEENTRY; + +typedef struct _nvlink_vbios_config_data_linkentry_20 +{ + // VBIOS configuration Data + bios_U008 nvLinkparam0; + bios_U008 nvLinkparam1; + bios_U008 nvLinkparam2; + bios_U008 nvLinkparam3; + bios_U008 nvLinkparam4; + bios_U008 nvLinkparam5; + bios_U008 nvLinkparam6; +} NVLINK_VBIOS_CONFIG_DATA_LINKENTRY, *PNVLINK_VBIOS_CONFIG_DATA_LINKENTRY; + +// +// NVSwitch driver structures +// + +#define NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY 12 + +typedef struct +{ + NVLINK_CONFIG_DATA_BASEENTRY link_vbios_base_entry[NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY]; + NVLINK_CONFIG_DATA_LINKENTRY link_vbios_entry[NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY][NVSWITCH_MAX_LINK_COUNT]; + NvU32 identified_Link_entries[NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY]; + NvU32 link_base_entry_assigned; + NvU64 vbios_disabled_link_mask; + + NvU32 bit_address; + NvU32 pci_image_address; + NvU32 nvlink_config_table_address; +} NVSWITCH_BIOS_NVLINK_CONFIG; + +#define NVSWITCH_DCB_PTR_OFFSET 0x36 + +typedef struct _nvswitch_vbios_dcb_header_41 +{ + bios_U008 version; + bios_U008 header_size; + bios_U008 entry_count; + bios_U008 entry_size; + bios_U016 ccb_block_ptr; + bios_U032 dcb_signature; + bios_U016 gpio_table; + bios_U016 input_devices; + bios_U016 personal_cinema; + bios_U016 spread_spectrum; + bios_U016 i2c_devices; + bios_U016 connectors; + bios_U008 flags; + bios_U016 hdtv; + bios_U016 switched_outputs; + bios_U032 display_patch; + bios_U032 connector_patch; +} NVSWITCH_VBIOS_DCB_HEADER; +#define NVSWITCH_VBIOS_DCB_HEADER_FMT "4b1w1d6w1b2w2d" + +typedef struct _nvswitch_vbios_ccb_table_41 +{ + bios_U008 version; + bios_U008 header_size; + bios_U008 entry_count; + bios_U008 entry_size; + bios_U008 comm_port[4]; +} NVSWITCH_VBIOS_CCB_TABLE; +#define NVSWITCH_VBIOS_CCB_TABLE_FMT "8b" + +typedef struct _nvswitch_vbios_i2c_table_40 +{ + bios_U008 version; + bios_U008 header_size; + bios_U008 entry_count; + bios_U008 entry_size; + bios_U008 flags; +} NVSWITCH_VBIOS_I2C_TABLE; +#define NVSWITCH_I2C_TABLE_FMT "5b" + +typedef struct _nvswitch_vbios_i2c_entry +{ + bios_U032 device; +} NVSWITCH_VBIOS_I2C_ENTRY; +#define NVSWITCH_I2C_ENTRY_FMT "1d" + +#endif //_ROM_NVSWITCH_H_ + diff --git a/src/common/nvswitch/kernel/inc/smbpbi_nvswitch.h b/src/common/nvswitch/kernel/inc/smbpbi_nvswitch.h new file mode 100644 index 000000000..bebea56f0 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/smbpbi_nvswitch.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SMBPBI_NVSWITCH_H_ +#define _SMBPBI_NVSWITCH_H_ + +#include "soe/soeifsmbpbi.h" +#include "smbpbi_shared_nvswitch.h" +#include "oob/smbpbi_priv.h" + +typedef struct +{ + NvBool isValid; + NvU64 attemptedTrainingMask0; + NvU64 trainingErrorMask0; +} NVSWITCH_LINK_TRAINING_ERROR_INFO; + +typedef struct +{ + NvBool isValid; + NvU64 mask0; +} NVSWITCH_LINK_RUNTIME_ERROR_INFO; + +struct smbpbi +{ + SOE_SMBPBI_SHARED_SURFACE *sharedSurface; + NvU64 dmaHandle; +}; + +NvlStatus nvswitch_smbpbi_init(nvswitch_device *); +NvlStatus nvswitch_smbpbi_post_init(nvswitch_device *); +NvlStatus nvswitch_smbpbi_set_link_error_info(nvswitch_device *, + NVSWITCH_LINK_TRAINING_ERROR_INFO *pLinkTrainingErrorInfo, + NVSWITCH_LINK_RUNTIME_ERROR_INFO *pLinkRuntimeError); +void nvswitch_smbpbi_unload(nvswitch_device *); +void nvswitch_smbpbi_destroy(nvswitch_device *); +NvlStatus nvswitch_smbpbi_refresh_ecc_counts(nvswitch_device *); +void nvswitch_smbpbi_log_message(nvswitch_device *device, NvU32 num, NvU32 msglen, NvU8 *osErrorString); + +#endif //_SMBPBI_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_dbg.h b/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_dbg.h new file mode 100644 index 000000000..d9bc115c1 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_dbg.h @@ -0,0 +1,2655 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018- NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + + DO NOT EDIT - THIS FILE WAS AUTOMATICALLY GENERATED + +*/ + +#ifndef _SOE_UCODE_LR10_DBG_H_ +#define _SOE_UCODE_LR10_DBG_H_ + + +const NvU32 soe_ucode_data_lr10_dbg[] = { + 0x00fec0d0, 0x0004fe00, 0x0017167e, 0x08f802f8, 0xa4b300f8, 0xb4890a00, 0x9abf000f, 0x1100a0b3, + 0x0d00b0b3, 0xa001aa98, 0xf8a43dba, 0x0a02f800, 0xbd00f8ff, 0xfc30f494, 0x40fe02f9, 0x04009001, + 0x0db209a0, 0x0003237e, 0xa4b3afb2, 0x0dbf1601, 0x0a01d4b3, 0x002fe189, 0xa43d9d20, 0x00006b3e, + 0xfc09040a, 0x0bf4f9a6, 0xfbff0a05, 0x337e0405, 0xa0330000, 0xa0330a04, 0x02f80600, 0xb37e00f8, + 0xafb20003, 0x0801a4b3, 0x00f8a43d, 0xfc09040a, 0x0bf4f9a6, 0x0a02f807, 0xbd00f8ff, 0xbdc4bdb4, + 0x007e7ed4, 0xf900f800, 0xb2a2b222, 0xb2c0b2b1, 0xb21bb22a, 0x7eff0d0c, 0x3300007e, 0xfbf400a4, + 0x01907e21, 0xb3afb200, 0x3d0801a4, 0x0a00f8a4, 0xa6ed0904, 0x070bf4f9, 0xff0a02f8, 0xbdb200f8, + 0xb4bdc4bd, 0x0000c17e, 0x037e00f8, 0x00f80005, 0x0009fc7e, 0x0801a4b3, 0x00f8a43d, 0xff0a02f8, + 0xfb7e00f8, 0x00f80004, 0x000a9b7e, 0x747e00f8, 0x00f8000a, 0xa0b212f9, 0x000a747e, 0x3e240190, + 0x9800013f, 0x90b30909, 0xf57e2400, 0xa0b30005, 0x2f7e0800, 0x09980005, 0x01999212, 0x981209b5, + 0x1ab21209, 0xf40096b0, 0xff09dc1c, 0xb5100190, 0x9b7e1209, 0x747e000a, 0x7a3e000a, 0x09980001, + 0x0090b304, 0x05f57e24, 0x00a0b300, 0x052f7e08, 0x11099800, 0xb5019992, 0x09981109, 0xb01ab211, + 0x1cf40096, 0xb5ff09dc, 0x9b7e1109, 0x11fb000a, 0xf9f430f4, 0x0149fe82, 0xb22c9990, 0xb2b5b2a0, + 0x019da0c4, 0x050b7ef7, 0x00adb300, 0x09b30146, 0x98013f00, 0x0fbf1409, 0xa60099b9, 0x301bf5f9, + 0x0054b301, 0x1009980e, 0x9db3fa01, 0x98012500, 0x0c411009, 0xf549a6fe, 0xfe01180c, 0x43fe0142, + 0x10079001, 0x01240690, 0x2c229001, 0xbd243390, 0x01f73e84, 0xbfee0100, 0x0090b329, 0x05987e6f, + 0x0a747e00, 0x11099800, 0x9fa6ff0f, 0xb5061bf4, 0x09981108, 0xa6ff0f12, 0x061bf49f, 0x7e1208b5, + 0xb3000a9b, 0xb20a0114, 0x051c7e3a, 0x0e099800, 0x2e0094b3, 0x2bb23ab2, 0x0005397e, 0x2200a4b3, + 0x6ab22bbf, 0x0006427e, 0x147e0ab2, 0x2e7e0001, 0xa4b30009, 0x0e7e1600, 0x683e0000, 0x0ab20002, + 0x0001147e, 0x00092e7e, 0x000a747e, 0x010e0998, 0x0090b3ed, 0x0309984e, 0x98100f98, 0x9fbc010e, + 0x0309b590, 0x08f49ea6, 0xb509bf08, 0x09980309, 0x030b980e, 0x4cb25ab2, 0xb5019992, 0x7e7e0e09, + 0x0998000b, 0x0090b304, 0x7e7ab218, 0xb20005f5, 0x01a4b3a1, 0x000e7e0c, 0x02bf3e00, 0x7e010100, + 0x0f000a9b, 0xf41fa6ed, 0x29bf1a1b, 0x230090b3, 0x2bb23ab2, 0x0005397e, 0x1d00a9b3, 0x02ef3eff, + 0xa6ee0900, 0x120bf519, 0x02ef3eff, 0xb2ef0100, 0x0c85fb1a, 0xa0b202f9, 0x7e02aa98, 0x98000b7e, + 0x0f980e09, 0x100e9802, 0x90010d98, 0x09b50199, 0xf0febc0e, 0xa6020fb5, 0x0808f4fd, 0x09b509bf, + 0xf901fb02, 0xb2a0b212, 0x00a0b3d1, 0x14a99873, 0x99b9afbf, 0xf4f9a600, 0xb4b3651b, 0xa9980b00, + 0x0094b310, 0x0010b361, 0x1009985d, 0x0cf4c9a6, 0x00b0b35a, 0x00c0b308, 0x0e0f9853, 0x0a0f0998, + 0xf4f9a6fc, 0x0ab24e18, 0x0002f47e, 0x09120f98, 0xf4f9a6ff, 0x09981f1b, 0x0090b309, 0x240a9036, + 0x0005f57e, 0x2b00a0b3, 0x19a00109, 0xb13e9ab2, 0xf9900003, 0x1209b501, 0x0003af3e, 0xb13eef0a, + 0xfa0a0003, 0x0003b13e, 0x3efe0c4a, 0x0a0003b1, 0xf411fb01, 0x82f9f430, 0x900149fe, 0xa1b22499, + 0xc4b2b5b2, 0xf7009da0, 0x00050b7e, 0x2a00adb3, 0x0019b301, 0x1998011c, 0xb91fbf14, 0xf9a60099, + 0x010d1bf5, 0x0e0054b3, 0x00101998, 0x009db3fa, 0x19980109, 0xf549a610, 0xb300fc0c, 0xb3090050, + 0x00f40049, 0xfe0143fe, 0x18900146, 0x10179024, 0x33900102, 0x28669024, 0x90b339bf, 0x987e7700, + 0x747e0005, 0x1998000a, 0xa6ff0f11, 0x081bf49f, 0x19b594bd, 0x12199811, 0x9fa6ff0f, 0xbd081bf4, + 0x1219b594, 0x000a9b7e, 0x0a0124b3, 0x1c7e6ab2, 0x1f980005, 0x0e19980f, 0x1bf4f9a6, 0xb26ab22d, + 0x05397e3b, 0x00a4b300, 0xb23bbf22, 0x06427e7a, 0x7e1ab200, 0x7e000114, 0xb300092e, 0x7e1600a4, + 0x3e00000e, 0xb2000491, 0x01147e1a, 0x092e7e00, 0x0a747e00, 0x0e1f9800, 0x000f1998, 0xf4f9a6fc, + 0x1ab22a18, 0x4cb25bb2, 0x0002f47e, 0xb3091998, 0xb2180090, 0x05f57e8a, 0xb3a0b200, 0x7e0c01a4, + 0x3e00000e, 0x000004c9, 0x0a9b7e01, 0xa6fc0f00, 0x251bf40f, 0x90b339bf, 0x6ab22000, 0xee023bb2, + 0x0005397e, 0x3400a9b3, 0x04f63eff, 0x3eef0000, 0x400004f6, 0x0ab2fe0c, 0x890c85fb, 0xbf000fc0, + 0x8900f89a, 0xbf000fb4, 0x8900f89a, 0xbf000fc4, 0x0096b099, 0xf00bacf0, 0x00f801a6, 0x000fc889, + 0xa9a099bf, 0x000fc089, 0xa9b599bf, 0x0f00f801, 0x0fcc8901, 0xf89fa000, 0xb222f900, 0x7eb2b2a0, + 0x89000a74, 0xbf000fc8, 0xa609bf9f, 0x110bf4f9, 0x000fc089, 0x09989fbf, 0xf4f9a601, 0xc08d3318, + 0x0e98000f, 0xbfd9bf01, 0x029ebb2f, 0x18f49fa6, 0xbdd9bf20, 0x92e9bc14, 0xa0909fbc, 0x0fc88929, + 0xa099bf00, 0xb5d9bf09, 0x903e0109, 0x01010005, 0x000a9b7e, 0x21fb1ab2, 0x000a747e, 0x000fc48f, + 0x9990f9bf, 0x7ef9a001, 0xf8000a9b, 0x8002f900, 0xbf000fd4, 0x0196b009, 0x7e070df4, 0x7e00000e, + 0x3e000a8f, 0x8e0005b3, 0x18001074, 0xefbf41a9, 0x9ac4abb2, 0xf4afa6ff, 0xeaa0050d, 0xb604a994, + 0xa9bc02a4, 0x0fd489a0, 0x10bb9000, 0x7ea0a9bc, 0xf8000bee, 0x9812f900, 0x909803a9, 0x24019003, + 0x517e1ab2, 0xc489000c, 0x99bf000f, 0x150094b3, 0x7e100a90, 0xb2000c51, 0x05c77e0a, 0x062b3e00, + 0x8a1bb200, 0x7e001078, 0x89000bee, 0xbf000fb4, 0x410f1899, 0x9918a4bd, 0xf4f92641, 0x010a050d, + 0x22f911fb, 0x000fb482, 0xb0b229bf, 0x000fc081, 0x7e249b90, 0xbf000c0f, 0xbc2abf19, 0xaa900009, + 0x0c517e10, 0xb529bf00, 0x19bf0490, 0x18f409a6, 0x108c890b, 0x067d3e00, 0x10908900, 0xbf9abf00, + 0x10bb902b, 0x000c0f7e, 0x30f421fb, 0x0fc489fc, 0xbf82f900, 0x0090b399, 0x89010f10, 0xa0000fcc, + 0x08453e9f, 0x0fb48900, 0x0148fe00, 0x95bf91b2, 0x343d243d, 0x748064bd, 0xbc870010, 0xb884000f, + 0x8890000f, 0x06d23e24, 0x3d09bf00, 0x01999224, 0x09bf09a0, 0x000fd48e, 0xb6049f94, 0x9fbc0294, + 0x909ebc90, 0x99b399bf, 0xbf013d00, 0xb949bf7f, 0xf9a60099, 0xbf0f0bf4, 0x7eec0b1a, 0x3e000a95, + 0xbf000742, 0x0090b349, 0x0859983d, 0xfe940fbf, 0x02f4b604, 0x8ef0febc, 0xbc000fd4, 0x9fa6f0fe, + 0xbf221bf4, 0x92b4bd49, 0x49a00199, 0x99b949bf, 0xa079a000, 0x7e1abf15, 0xb0000a89, 0x1ff500a6, + 0x0fbf00f9, 0xfd9409bf, 0x049e9404, 0xbc0294b6, 0xd48e909e, 0x9ebc000f, 0x01999890, 0xbc02f4b6, + 0xfebcf0fd, 0x019998f0, 0xbf01f9b5, 0x049f9409, 0xbc0294b6, 0x9ebc909f, 0x019e9890, 0x9f9409bf, + 0x0294b604, 0x8f909fbc, 0xbc000fdc, 0xe9a6909f, 0xbf2c1bf4, 0x9409bf0f, 0x9e9404fd, 0x0294b604, + 0x8e909ebc, 0xbc000fd4, 0x9998909e, 0x02f4b601, 0xbcf0fdbc, 0x9998f0fe, 0x01f9b501, 0x9f9409bf, + 0x0294b604, 0x8f909fbc, 0xbc000fd4, 0x9998909f, 0x03999801, 0x1abf19a0, 0x000a617e, 0x1000a4b3, + 0xeb0b1abf, 0x000a957e, 0x0007fa3e, 0x8bb21abf, 0x000a897e, 0xf400a6b0, 0x2210401f, 0x01331001, + 0x06013433, 0x09bf06bf, 0x94ff2ec4, 0x94b6049f, 0x909fbc02, 0x000fd48f, 0xbf909fbc, 0xf5e9a699, + 0xbffeb31b, 0x009db309, 0x1abffea4, 0x7efe0b4b, 0x3e000a95, 0x330006d2, 0x890a0030, 0xa0001074, + 0x0ad07e96, 0x0485fb00, 0xc48922f9, 0x99bf000f, 0xcd009db3, 0x10a88900, 0x899fbf00, 0xbf000fc0, + 0x0099b999, 0x0bf4f9a6, 0x0fb4890f, 0x0b9abf00, 0x0a957eec, 0x0fc08e00, 0x8fe9bf00, 0x900010a8, + 0xe9a00199, 0x99b9e9bf, 0xbff9a000, 0x0094b3e9, 0x10908f1f, 0x108c8e00, 0xbffcbf00, 0x0fc88de9, + 0xa0f9a000, 0x90d9bfec, 0xd9a00199, 0x0010908f, 0x99bff9bf, 0x0a0094b3, 0x003e04bd, 0xf9bf0009, + 0x98039998, 0x003e0390, 0x2fbf0009, 0xa6040998, 0x3e08f4f9, 0x000c517e, 0x900d0998, 0x90b3240a, + 0x517e0800, 0x0ab2000c, 0x0005c77e, 0x99bf19bf, 0x1f0090b3, 0x999819bf, 0x03909803, 0x0009083e, + 0x000fc082, 0x00109081, 0xb3100a90, 0x89bf0004, 0xbf0010ac, 0x0094b399, 0x09283e17, 0x10ac8f00, + 0x90f9bf00, 0xf9a00199, 0x000a877e, 0x32f921fb, 0x000fc481, 0xe90019bf, 0xc00099b3, 0x0a747e00, + 0x9219bf00, 0x19a00199, 0x9db319bf, 0x8900a800, 0xbf0010b0, 0x0099b399, 0x788f009d, 0xf9bf0010, + 0x0a0094b3, 0x703e04bd, 0xf9980009, 0x03909803, 0xb48314bd, 0x7882000f, 0xaa3e0010, 0x517e0009, + 0x0a90000c, 0x0c517e10, 0x7e0ab200, 0xbf0005c7, 0x410f1839, 0x99182ebf, 0xf4f92641, 0x0101050d, + 0x1100e0b3, 0x98032998, 0x0a900390, 0x0004b324, 0x10ac8fd1, 0xb3f9bf00, 0xb21b0090, 0x08487ef0, + 0x9209bf00, 0x09a00199, 0x94b309bf, 0xe03ef300, 0x10b30009, 0xcc890e01, 0x99bf000f, 0x160194b3, + 0xcc89f4bd, 0x0100000f, 0x0e7e9fa0, 0xf43e0000, 0x04bd0009, 0x000a9b7e, 0x31fb0ab2, 0xc48932f9, + 0x99bf000f, 0xf70fa3b2, 0x550094b3, 0x4b00a0b3, 0x0005987e, 0x000fc081, 0x000fb482, 0x2abf10bf, + 0x900030bc, 0x517e10aa, 0x29bf000c, 0xbf0490b5, 0xf409a619, 0x8c890b18, 0x423e0010, 0x9089000a, + 0x9abf0010, 0xbb902bbf, 0x0c0f7e10, 0x092e7e00, 0xb3010f00, 0x7e0a00a4, 0x0f00000e, 0xfbfab201, + 0x00a0b331, 0x03a99811, 0x99b9afbf, 0xf0f9a600, 0x00f80bac, 0xf41032f4, 0xa08f1132, 0xf9bf0005, + 0xa0019990, 0xf800f8f9, 0x10107e00, 0x7e00f800, 0xf8001885, 0x18b47e00, 0x8f00f800, 0xbf0005a0, + 0x0094b3f9, 0x1032f41a, 0xbd1132f4, 0x7eee0ba4, 0xf4000a95, 0x31f41031, 0xbf00f811, 0x019992f9, + 0xf9bff9a0, 0x0a0094b3, 0xf41031f4, 0x00f81131, 0x0041407e, 0xc9fe00f8, 0x1495b601, 0x3c089033, + 0xf4089630, 0x9033130c, 0x90332002, 0x90333403, 0x1c3e1800, 0x9033000b, 0x9033280f, 0x94331810, + 0x0c3e1e0a, 0x8a7e000b, 0x00f80006, 0x0013bc7e, 0xb07e00f8, 0x00f80013, 0x00f802f8, 0x00f802f8, + 0x0041557e, 0x000ad67e, 0x0041407e, 0x02f900f8, 0x0041557e, 0xcf020049, 0xdc800099, 0x0fbf0010, + 0xf4049ffd, 0x487e170b, 0x8a7e0008, 0x0fbf0006, 0xfa010049, 0x623e009f, 0xa4bd000b, 0x957eda0b, + 0x407e000a, 0x01fb0041, 0x0041557e, 0x00185a7e, 0x0800a0b3, 0x00068a7e, 0x0041407e, 0x94bd00f8, + 0x000b8d3e, 0x3cf8b93c, 0x999099af, 0xf49ca601, 0x00f8f508, 0xa23ea9b2, 0x9b20000b, 0x9001cc92, + 0xc4b30199, 0x00f8f800, 0xc73ee4bd, 0xae3c000b, 0x98be3cf8, 0x2601ee90, 0x0e0bf4f9, 0xf0fff4f0, + 0xf9bcff94, 0xa600f8a2, 0xe508f4ec, 0x00f8a4bd, 0xb508af90, 0xff0901af, 0xb502a9b5, 0xafb503af, + 0xa094bd04, 0xbd00f8a9, 0x04a9b594, 0xaf9800f8, 0x01bfb501, 0xb502f998, 0xf99802b9, 0x019bb502, + 0xb502fbb5, 0xa9bf04ba, 0xa0019990, 0xbf00f8a9, 0xa6ff09be, 0x0a1bf4e9, 0x3e04af98, 0x90000c33, + 0x293e08af, 0xff98000c, 0x01f99801, 0x9ea699bf, 0x98f60df4, 0xb9b501f9, 0x01b99801, 0xb5029bb5, + 0xfbb502bf, 0x04bab501, 0x9990a9bf, 0xf8a9a001, 0x01af9800, 0xb502a998, 0xaf9802f9, 0x01a99802, + 0x9801f9b5, 0xf99804af, 0xf49aa601, 0xa998091b, 0x01f9b502, 0xa9b594bd, 0x92f9bf04, 0xf9a00199, + 0x72f900f8, 0xb202b998, 0xb2b1b2a5, 0x1d9a95c2, 0xb2249034, 0x32e732d4, 0x009033a6, 0x7e23000f, + 0x330018d7, 0x011200a9, 0x119819bf, 0x2029bc01, 0x18f429a6, 0xa6ff0921, 0x0a0bf419, 0x3e011190, + 0x7e000cd3, 0x0b0000ea, 0x7e020002, 0x3e0018b4, 0xc4000db5, 0x45ffff23, 0xfd3f0095, 0x94f00593, + 0xd41bf503, 0x0a747e00, 0x0e067e00, 0x01fafe00, 0xfe081995, 0x7bfe009f, 0x01bcfe01, 0x95181994, + 0x9ffd082f, 0x0097fe05, 0xe4ff69c4, 0x33ffffcf, 0xf10f0070, 0xb6f8fff4, 0x253e0894, 0xf4f1000d, + 0x94b68fff, 0x95f9ff0c, 0x3e009bfe, 0xe5000d92, 0xfd010059, 0x9fb90593, 0x04f9fd01, 0x000d433e, + 0xa601f5b6, 0xfb0cf4f4, 0xbd03fd95, 0x0d543ee4, 0x01d5b600, 0xd0b3e932, 0xee900c00, 0x00eeb301, + 0x94f0f401, 0xff5ee4ff, 0x007033ff, 0x0794f014, 0xfd1094b6, 0x39fa059e, 0x0d893e05, 0x0794f000, + 0xfd1094b6, 0x39fa059e, 0x024fbb06, 0xbc303fbc, 0x44b3505f, 0xaffe9d00, 0x00b7fe00, 0xf800cbfe, + 0x7e170003, 0x3300195a, 0x3d0600a4, 0x0a9b7e04, 0x0ddd7e00, 0xfb0a3200, 0xf4010971, 0xe43dfc30, + 0x7e009130, 0xf4000c82, 0x00f80430, 0x0efc30f4, 0x00e13001, 0x000c827e, 0xf80430f4, 0x0a747e00, + 0x10e08f00, 0xb3f9bf00, 0xbf0f0090, 0x019992f9, 0x003ef9a0, 0xea7e000e, 0x030b0000, 0x0018b47e, + 0x000a9b7e, 0x747e00f8, 0xe08e000a, 0xefbf0010, 0xf9a6ff09, 0xbf0e0bf4, 0x019990e9, 0x2c3ee9a0, + 0xea7e000e, 0x030b0000, 0x0018b47e, 0x000a9b7e, 0x12f900f8, 0xafbfb9b2, 0xa0b29e3f, 0xb204d192, + 0x18fe20cb, 0x1cb2019e, 0x1801fe35, 0xfe35029e, 0x03991802, 0xbf03f935, 0x04aa90aa, 0x7e7e0aa0, + 0x09bf000b, 0xa09091bc, 0xf411fb09, 0x62f9f430, 0xb2b2a3b2, 0xd532c6b2, 0x0c00b4b3, 0x043d09f8, + 0x000f513e, 0x0001b918, 0xff94f001, 0xe4039990, 0xf501fc91, 0xbf00be0b, 0x7eff0baa, 0x980000de, + 0xa5f9033a, 0x900149fe, 0x9aa02099, 0xb83e94b2, 0x010a000e, 0x0000f07e, 0xbf053a98, 0xbca5f940, + 0xa0a6f010, 0xa60d0df4, 0x8d18f5fa, 0x0f0b3e00, 0x02399800, 0x08f4f9a6, 0x01399835, 0xa69019bc, + 0x7518f49a, 0x3d063e98, 0x014ffe94, 0x351cff90, 0xf92003f9, 0x0902f935, 0x35fbb204, 0x4ab201f9, + 0x040dc4bd, 0x3998e5f9, 0x9849a001, 0x1db20639, 0x2bb26cb2, 0x900140fe, 0x0ab22000, 0x41fe95f9, + 0x0142fe01, 0x90271190, 0x1ab22622, 0xf77e2bb2, 0x020a0040, 0x117eab32, 0x0abf0041, 0x00043998, + 0x3f95f901, 0x7e2b3f1a, 0xbf004111, 0x009b7e3a, 0xfb0a3200, 0x5d330c65, 0x3dff5c00, 0x0f4b3e04, + 0x3dbcb200, 0x8aabb2d4, 0x7e0010e4, 0xf8000e6b, 0x0dbcb200, 0x8aabb201, 0x7e0010e4, 0xf8000e6b, + 0xb212f900, 0xb3b132a0, 0xf80a00a4, 0x100e3e09, 0x2fd88900, 0x0b9abf00, 0x00de7eff, 0xff19c400, + 0x0301008f, 0xbc099e94, 0xefcfe0ef, 0x8099b800, 0x94b60001, 0x009dcf09, 0xf0020918, 0x09352095, + 0xf40fa602, 0xf63e411b, 0xf918000f, 0x2094f002, 0x3f340bf4, 0x009433f9, 0xf8cbbc0b, 0x000fef3e, + 0xf001f918, 0x9990ff94, 0x049afd03, 0xf6f0f9bc, 0xff3e00ef, 0x1bc4000f, 0x8cfc0aff, 0xa6002fc0, + 0xc91bf4fd, 0x002fd889, 0x9b7e9abf, 0x11fb0000, 0x108b02f9, 0x127e0011, 0xa0320000, 0x0f00a033, + 0x0000ea7e, 0x7eff0bc4, 0x890018b4, 0xbf001110, 0x11148990, 0x899fbf00, 0x18001100, 0x9e35180e, + 0xf40fa60e, 0x0918101b, 0x00943319, 0x3ea4bd0a, 0x1800107a, 0xb4331d0b, 0xa4bd0a00, 0x00106f3e, + 0x3d200a90, 0x13607ec4, 0x00a6b000, 0x3d0e1ef4, 0x19093594, 0x00111489, 0x01fb90a0, 0x89ffa4f0, + 0x8c003231, 0xbc003224, 0xacbcd0a9, 0x3fdf3fa0, 0x32318cae, 0xffe9c400, 0x20909cbc, 0x3224899f, + 0xfff4f000, 0x20f0f9bc, 0x20943dfe, 0xf900f8d9, 0x32318922, 0xffa0c400, 0x3f2009bc, 0x33a1322e, + 0x7e0c00e0, 0x3e00107c, 0x8f0010e0, 0x3f001118, 0xf4a926f9, 0x947d0d1b, 0xf975fe20, 0x01f97502, + 0x00323189, 0x91209f3f, 0x00322489, 0x3d0009bc, 0x20092094, 0x3224892f, 0xfff4f000, 0x20f0f9bc, + 0xf921fbf1, 0x11188f52, 0x32943d00, 0xb2c332b4, 0x35f5b2a2, 0x143d01f9, 0x00113a3e, 0x1110203f, + 0x01229001, 0x16000033, 0x3b320a32, 0x0010af7e, 0x33015918, 0x35070094, 0x14260150, 0xfbe008f4, + 0xfc30f451, 0xb13212f9, 0x900140fe, 0x0bb20800, 0x0000127e, 0x0d1800bf, 0x0014331d, 0x4f02f806, + 0xf3f1000b, 0x19c40000, 0xf49fa6ff, 0xdf320b0c, 0x9f3ee43d, 0x02f80011, 0xdf32e43d, 0x00119f3e, + 0xc401ff12, 0x09bcfff9, 0x20991890, 0x1bf49126, 0x3ea43d09, 0x330011c9, 0x32080094, 0x33010efd, + 0x33e100f4, 0xc41100e0, 0x09bcffd9, 0x20913590, 0x0011b63e, 0x49fe02f8, 0x08999001, 0x010a9fbf, + 0xfd19f918, 0xf935059a, 0x0415fb19, 0x0011417e, 0x0600a433, 0x00f802f8, 0xf9fc30f4, 0x32a0b212, + 0x0a747eb1, 0xfe0ab200, 0x00900140, 0x7e0bb208, 0xbf000012, 0x18f43d00, 0x163e1d0d, 0xe9180012, + 0x01ff1020, 0x2620ee90, 0x0d1bf491, 0x0100943d, 0x233ee920, 0xf9c40012, 0xe009bcff, 0x08f4fd26, + 0x7e043de0, 0x32000a9b, 0x0415fb0a, 0x0011d87e, 0x0600a433, 0x00f802f8, 0xa4f082f9, 0x32008fff, + 0x32188e00, 0x58fa7c00, 0xb298ea3c, 0xbdc632b7, 0xff94f034, 0x0005a488, 0x8120597c, 0x3e001118, + 0x52001354, 0x29e40122, 0x9494ffff, 0x034ffe08, 0xb31ff995, 0xd0100190, 0x03000000, 0xf594f0ff, + 0xbf00d51b, 0x01008e89, 0x089f9500, 0xfffff9e4, 0x18f59ea6, 0x99900010, 0x0894b601, 0x473e89a0, + 0x1f580013, 0x02195801, 0x1bf4f966, 0x000b4945, 0x000093f1, 0x0032248f, 0x18909fbc, 0x19180190, + 0xf4092601, 0x02f8051b, 0xb43d0a32, 0x00107c7e, 0x0032188e, 0x3cff09c4, 0x008ef8e9, 0xe97c0032, + 0xf0102098, 0x1975fff4, 0xf09f7c01, 0x58021f75, 0x9b520219, 0xffb9e401, 0x021b75ff, 0xfe0894b6, + 0xa995039a, 0x0190b31f, 0x3d7cb29d, 0x133f3ed4, 0x10c93f00, 0xcc9001dd, 0xff9fc401, 0x23009033, + 0x00320080, 0x80e80f7c, 0x3c003218, 0xbe66980f, 0xf00f08f4, 0xe97cff94, 0xf5b96690, 0x26ff6708, + 0xd008f4d6, 0xe4ffafc4, 0xb6fffff9, 0x49fa0894, 0x01339004, 0x0cf52566, 0x07f8ff0d, 0x81fb3ab2, + 0xc53252f9, 0xb332a4b2, 0x0800a0b3, 0x0c00b433, 0xff0a02f8, 0x0013ae3e, 0x41b2c43d, 0x0011037e, + 0x043d24bd, 0x0013a73e, 0x00101a3f, 0x01119001, 0x1700a033, 0x3c324bb2, 0x387e5d32, 0x2abc0012, + 0x00a6b020, 0x260a1ef4, 0xdf08f403, 0x51fb2ab2, 0x0000ea7e, 0xb47e2f0b, 0x00f80018, 0x0000ea7e, + 0xb47e2f0b, 0x00f80018, 0xa93f22f9, 0xd0b2c1b2, 0x3204c0b4, 0xff94f0ed, 0x08f4b9a6, 0x9402f805, + 0xb99402bf, 0x02ae9804, 0x5200c0b3, 0x3cf0f9bc, 0xa998f9ed, 0x18203402, 0x0e1e0dcc, 0x909fbc01, + 0x98019235, 0xebbb02a9, 0x909fbc04, 0x980191b5, 0x9fbc02a9, 0x029db590, 0xbc02a998, 0x9cb5909f, + 0x02a99803, 0xb4909fbc, 0x9fb505f0, 0x01a99804, 0xb5059efd, 0x543e01a9, 0xf9bc0014, 0x90e9bc90, + 0x98039cb5, 0x010901af, 0xb9049bbb, 0xf9fd0099, 0x01afb504, 0x21fbba32, 0xa0b202f9, 0x0001027e, + 0x98010998, 0xf9bc020f, 0xa29abc90, 0x0df4afa6, 0xfba4bd05, 0xb232f901, 0x98ff02a3, 0x143d01a0, + 0x0014ad3e, 0xf40109c4, 0x3998200b, 0xff1ac402, 0xb604af94, 0xafbc02a4, 0xa09abca0, 0x0014587e, + 0x0df42aa6, 0x10a2b205, 0x05b60111, 0x0004b301, 0xfb2ab2d7, 0xb282f931, 0x98643da5, 0x010801a4, + 0x00157a3e, 0xf50149c4, 0x9800ad0b, 0x61c40253, 0x021994ff, 0xbc041f94, 0x32bc209f, 0x7e0ab200, + 0xb3001458, 0x009100ad, 0x98010998, 0xf9bc020f, 0x0107b570, 0xf098323c, 0x94b30394, 0x027e2602, + 0x09980001, 0x020f9801, 0xa6c2a9bc, 0x2c08f4cf, 0xff9dcfff, 0xa9bc1ccf, 0x0109b592, 0x00153b3e, + 0x190094b3, 0xbc015f98, 0x14bd9481, 0xfd0099b9, 0x5fb504f9, 0x153b3e01, 0x9814bd00, 0x90b30309, + 0x0b183600, 0x00b03301, 0x3da4bd12, 0x7ed43dc4, 0x7e0011cc, 0x9800000e, 0x0a980309, 0xb27bb204, + 0x1895f91c, 0xb033010b, 0xa4bd0e00, 0xd43dc43d, 0x00122c7e, 0xb6016610, 0x4db30145, 0xfbff4a00, + 0x0c004d81, 0xcf0b004c, 0xcecf00df, 0x00d9cf00, 0x1bf4f9a6, 0xb5aea0f5, 0x00f801af, 0xcf0b0049, + 0xaabf0099, 0xf8a29abc, 0xb242f900, 0xb2b3b2a0, 0x00c0b3c1, 0x0b004929, 0xb20092cf, 0x0000b394, + 0xf93ab212, 0x00a03305, 0x3e010a0a, 0xcf0015e8, 0x92bb0049, 0xf491a602, 0x04b3e608, 0xa43d0a00, + 0x0015e83e, 0x05f93ab2, 0x000041fb, 0xd24e0041, 0x000005a0, 0x90fc00f8, 0x0015ea7e, 0xf90188fe, + 0x00289880, 0xb4d880f9, 0x9800000f, 0x47fe0088, 0xbd87a001, 0x0387b570, 0xfe0010f7, 0xb7b600a4, + 0x1fb9f002, 0xf9001bf7, 0x7e00f890, 0x3e0015ea, 0xf9001647, 0x3650daf2, 0x50db0000, 0x7e000034, + 0x7e0015f6, 0xf7000b68, 0xb4d80010, 0x9800000f, 0x87980088, 0x0074fe00, 0xb6028798, 0x79f00277, + 0x0017f71f, 0x28a080fc, 0x88fe80fc, 0xf8f0fb00, 0xdaf2f901, 0x00003650, 0x003450db, 0x15f67e00, + 0x0b2e7e00, 0x16473e00, 0x1832f400, 0x50daf2f9, 0xdb000034, 0x00003250, 0x0015f67e, 0x000b207e, + 0x0016473e, 0xf91832f4, 0x0188fef2, 0x3b7e80f9, 0x80fc0040, 0xfb0088fe, 0xb201f8f0, 0xb0afb2a9, + 0x1ef400a6, 0xb0bab22b, 0x1ef400b6, 0xacfaff13, 0xf496b9ff, 0x00f8051e, 0xf801aab9, 0x01bab900, + 0xffacfaff, 0x1ff496b9, 0x16d83ef0, 0x01afb900, 0xb6b0bab2, 0xd81ff400, 0x0016dd3e, 0x95e0abff, + 0xbf9510a9, 0x01b9fd10, 0xbb01affd, 0xa4b600ab, 0x00aebb10, 0x2cdf00f8, 0xf4000006, 0x62f9fc30, + 0xffbffebf, 0x900149fe, 0x9ea01c99, 0x000630d9, 0xbd9fa000, 0x010089f4, 0x009ff601, 0xb87fff4f, + 0x003f0099, 0xdf009ff6, 0x03ff0000, 0x010099b8, 0x009ff600, 0x00ff808f, 0x0a0099b8, 0x009ff600, + 0xb810004f, 0x024a0099, 0xd6009ff6, 0x40000000, 0x011c0085, 0x000640d0, 0x02008400, 0x00008301, + 0xaaa0d210, 0xfd01beef, 0x227e080a, 0xa6fd001c, 0x280bf504, 0x0049cf00, 0xf50493fd, 0xde001e0b, + 0x00000640, 0xf2fff4bd, 0x90e9a095, 0xee9001ff, 0x04f4b304, 0x17f03ef5, 0x0059cf00, 0xf40194f0, + 0x3cf4fa1b, 0xf009b21f, 0x99fa0593, 0xf503f806, 0xf590043c, 0xf590033c, 0xf5ac543c, 0xf5c4043c, + 0xb2d0333c, 0x0393f009, 0xf80599fa, 0x003cf403, 0xe9920ebf, 0xf491a601, 0x0e98250d, 0x01e99201, + 0x0df491a6, 0x020e981a, 0xa601e992, 0x0f0df491, 0x92030e98, 0x91a601e9, 0xff700cf5, 0x000640df, + 0xd094bd00, 0x0000062c, 0xa003f9b5, 0x01f9b5f9, 0xa002f9b5, 0x589a7e0e, 0x0630d900, 0x99bf0000, + 0x09a0a4bd, 0x900149fe, 0x9fbf1c99, 0xf9a609bf, 0x7e070bf4, 0xfb0042e9, 0x30f40465, 0xd112f9fc, + 0x0000062c, 0x40fe19bf, 0x08009001, 0xcf7e09a0, 0x0fbf0019, 0xa4f019bf, 0xf4f9a6ff, 0xe97e070b, + 0x15fb0042, 0x062cdf04, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0xf40031f4, 0x49fe0028, 0xd99fbf01, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cde, 0xf4efbf00, 0x49fefc30, + 0xf89fa001, 0xbf9fbf02, 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xf400f804, 0x12f9fc30, 0x00062cd1, + 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf001a81, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xd90415fb, + 0x00001438, 0xf9fc30f4, 0x062cd112, 0x9b180000, 0x0c9a180d, 0x40fe19bf, 0x08009001, 0xe67e09a0, + 0x0fbf001b, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0xd112f9fc, 0x0000062c, 0x40fe19bf, + 0x08009001, 0xe67e09a0, 0x0fbf001b, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0xd112f9fc, + 0x0000062c, 0x40fe19bf, 0x08009001, 0x517e09a0, 0x0fbf001b, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, + 0x2cdf0415, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x1400899f, 0x0099cf02, 0xf40194f0, 0x1f7e180b, + 0x008f0047, 0xf9ce0213, 0x0195f000, 0x3e00f9f7, 0xf80019b5, 0x0149fe02, 0x2cd99fbf, 0xbf000006, + 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xf400f804, 0x2cdff830, 0xf9000006, 0xfeffbf02, 0x99900149, + 0x3d9fa008, 0x2fe1d9f4, 0x9f200000, 0xcf06004f, 0x004e00ff, 0x00eecf07, 0xcf020049, 0x9ffd0099, + 0x10ef9504, 0xfd00eeb9, 0xffb9049e, 0x049fff00, 0xf40109c4, 0x5e7e070b, 0x09e40044, 0x0bf41000, + 0xfe010f2f, 0x008e0149, 0x99900289, 0xce9f2007, 0xfe0f00e9, 0xf7049ffd, 0x08d900e9, 0xbf000014, + 0x014bfe9a, 0xbb90010c, 0x006e7e07, 0x0009e400, 0x00907380, 0x19827e08, 0x01004900, 0xfe0090f7, + 0x99900149, 0xd99ebf08, 0x0000062c, 0xe1d99fbf, 0x3f00002f, 0xf4efa69a, 0xe97e070b, 0x05fb0042, + 0x062cde08, 0xefbf0000, 0xfefc30f4, 0x010a0149, 0x9fbf9fa0, 0xf9a6e9bf, 0x7e070bf4, 0xf40042e9, + 0x00f80430, 0xdffc30f4, 0x0000062c, 0xffbf02f9, 0x900149fe, 0xa0b20499, 0xa9989fa0, 0x0194f003, + 0xbf0d0bf4, 0x1c227eaa, 0x1ad23e00, 0xcfa9bf00, 0x0f98009a, 0x01099803, 0xf0020e98, 0xa9ff02f4, + 0xf09ea694, 0x9a320b9c, 0x0c00f0b3, 0xf0009630, 0x9a320b9c, 0x900149fe, 0x9fbf0499, 0x00062cd9, + 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40405fb, 0x12f9ec30, 0x900149fe, 0x2cd10899, 0xa0000006, + 0x019bb59a, 0xb5029cb5, 0x9bb2039e, 0xdcb219bf, 0xda0140fe, 0x00001aa4, 0xa0180090, 0x15a97e09, + 0xbf0fbf00, 0xf4f9a619, 0xe97e070b, 0x15fb0042, 0x062cd914, 0x99bf0000, 0x8efc30f4, 0xfe00b800, + 0xf9a0014f, 0xc400efcf, 0x1bf404f9, 0x3ea43d09, 0xc5001b7b, 0xe9f604f9, 0xfe010a00, 0x9fbf0149, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, + 0x9fa00149, 0x00900089, 0xf00099ce, 0x0bf40194, 0xf1008f1e, 0x00f9ce00, 0xf71095f0, 0xffb800f9, + 0xce025200, 0x95f000f9, 0x00f9f710, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xf00149fe, 0x9fa003b4, 0x8903a4f0, 0xb6009400, + 0xabfd02a4, 0x009af705, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, + 0x30f400f8, 0x062cdffc, 0x02f90000, 0x49feffbf, 0x04999001, 0x9fa0a0b2, 0x000a747e, 0x01c0008f, + 0xc700f9cf, 0x90b34c99, 0x90b34700, 0x02f8f601, 0x001c403e, 0xc700f9cf, 0x90b34c99, 0x90b30e00, + 0x02f8f601, 0x001c543e, 0x01c20080, 0x7e0000cf, 0xfe000a9b, 0x99900149, 0xd99fbf04, 0x0000062c, + 0x0ab299bf, 0x0bf4f9a6, 0x1caa3e28, 0xc1008900, 0x0090f601, 0x0000f1df, 0x0099b880, 0x9ff70201, + 0x009fcf00, 0x543e9fb2, 0xe97e001c, 0x05fb0042, 0xfc30f404, 0x00062cdf, 0xbf12f900, 0x0149feff, + 0xb2089990, 0xb29fa0a0, 0x0a747eb1, 0xc0008f00, 0x00f9cf01, 0xb34c99c7, 0xb33e0090, 0xf8f60190, + 0x1cd13e02, 0x00f9cf00, 0xb34c99c7, 0xb30e0090, 0xf8f60190, 0x1ce53e02, 0x0a9b7e00, 0x0149fe00, + 0xbf089990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x1d3a3e30, 0xc1008900, 0x0090f601, 0x010099b8, + 0x0091f600, 0x0000f2df, 0x0099b880, 0x9ff60202, 0x009fcf00, 0xe53e9fb2, 0xe97e001c, 0x15fb0042, + 0x062cdf04, 0xfebf0000, 0xb8fc30f4, 0x000180aa, 0xb60149fe, 0x9ea009a4, 0xbf00aacf, 0xa6f9bf9e, + 0x070bf4e9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0x03010089, 0xb6fc30f4, 0xa9bc09a4, + 0x0149fea0, 0xabf69fa0, 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, + 0xde00f804, 0x0000062c, 0x30f4efbf, 0x20008afc, 0x0149fe03, 0xaacf9fa0, 0xbf9fbf00, 0xf4f9a6e9, + 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, 0x00899fa0, 0x9af60320, + 0xbd400f00, 0x009ff694, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, + 0x2cde00f8, 0xbf000006, 0xfc30f4ef, 0x0321008a, 0xa00149fe, 0x00aacf9f, 0xe9bf9fbf, 0x0bf4f9a6, + 0x42e97e07, 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x1fa9959f, 0xa4f0c920, + 0x02a0b303, 0x03a0b30c, 0x01a4b308, 0x3ebaa00a, 0xbd001e57, 0xfeb9a094, 0x9fbf0149, 0x00062cd9, + 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xe9bf0000, 0xfefc30f4, 0xf9a0014f, + 0xa0e2a9c7, 0xeaa9c7b9, 0xaae7c9a0, 0xdaa00154, 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, + 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x01c4b39f, 0x82a9e70e, 0x3ed9a003, 0xe7001ed1, + 0xb30116a9, 0x0f0e0090, 0x11004919, 0xf8009ff7, 0x0149fe02, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, + 0xe97e070b, 0x30f40042, 0xd900f804, 0x0000062c, 0x30f499bf, 0x014ffefc, 0xa9e7f9a0, 0xaac702e2, + 0x06a0b39a, 0x06a6b020, 0xb30f0cf4, 0xb31604a0, 0x3e2505a4, 0xb0001f21, 0x0cf409a6, 0x1f2b3e1a, + 0x0294b600, 0x3e04b9b5, 0x92001f34, 0x94b602af, 0xf9b9bc02, 0xbf0149fe, 0x062cd99f, 0x99bf0000, + 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x1fa9959f, + 0xa4f0c920, 0x02a0b303, 0x03a0b30c, 0x01a4b308, 0x3ebaa00a, 0xbd001f7b, 0xfeb9a094, 0x9fbf0149, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xe9bf0000, 0xfefc30f4, + 0xf9a0014f, 0xa0a2a9c7, 0xe8a9c7b9, 0xaae7c9a0, 0xdaa00154, 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, + 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xb60149fe, 0x9fa00ca5, 0x1101c4b3, 0x07ffff89, + 0xa094a9ff, 0x1ffd3ed9, 0xffff8900, 0x94a9ff07, 0x0f0d0bf4, 0x11004919, 0xf8009ff7, 0x0149fe02, + 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xd900f804, 0x0000062c, 0x30f499bf, + 0x014ffefc, 0xa9e7f9a0, 0xaac702e2, 0x05a0b39a, 0x05a6b020, 0xb30b0cf4, 0x3e3700a0, 0xb3002064, + 0xb00e06a0, 0x0cf409a6, 0x20573e1e, 0x0294b600, 0x3e04b9b5, 0x9200206e, 0x94b602af, 0xf9b9bc02, + 0x00206e3e, 0x0049190f, 0x009ff711, 0x49fe02f8, 0xd99fbf01, 0x0000062c, 0xf9a699bf, 0x7e070bf4, + 0xf40042e9, 0x00f80430, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0x959fa001, 0xc9201fa9, 0xb303a4f0, + 0xb30c02a0, 0xb30803a0, 0xa00a01a4, 0x20b53eba, 0xa094bd00, 0x0149feb9, 0x2cd99fbf, 0xbf000006, + 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, 0xa9c79fa0, + 0xc7b9a0a2, 0xc9a0e8a9, 0x0154a9e7, 0xaac7d9a0, 0x00a0b370, 0x49190f0e, 0x9ff71100, 0xfe02f800, + 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, + 0xfefc30f4, 0xa5b60149, 0xb39fa00c, 0x891101c4, 0xff07ffff, 0xd9a094a9, 0x0021503e, 0x07ffff89, + 0xf494a9ff, 0x190f0d0b, 0xf7110049, 0x02f8009f, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, + 0x42e97e07, 0x0430f400, 0x2cd900f8, 0xbf000006, 0xfc30f499, 0xa0014ffe, 0xe2a9e7f9, 0x9aaac702, + 0x2005a0b3, 0xf405a6b0, 0xa0b30b0c, 0xb73e3700, 0xa0b30021, 0xa6b00e06, 0x1e0cf409, 0x0021aa3e, + 0xb50294b6, 0xc13e04b9, 0xaf920021, 0x0294b602, 0x3ef9b9bc, 0x0f0021c1, 0x11004919, 0xf8009ff7, + 0x0149fe02, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, + 0x30f4ffbf, 0x0149fefc, 0xa9959fa0, 0xf0c9201f, 0xa0b303a4, 0xa0b30c02, 0xa4b30803, 0xbaa00a01, + 0x0022083e, 0xb9a094bd, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, + 0x2cde00f8, 0xbf000006, 0xfc30f4e9, 0xa0014ffe, 0xa2a9c7f9, 0xa9c7b9a0, 0xe7c9a0e8, 0xa00154aa, + 0xbfffbfda, 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, + 0xc0b39fa0, 0xc0b31001, 0xc0b30c0c, 0xc4b3080d, 0xa9e70a14, 0xd9a0024c, 0xbf0149fe, 0x062cd99f, + 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cd900f8, 0xbf000006, 0xfc30f499, 0xa0014ffe, + 0xe2a9e7f9, 0x9aaac702, 0x2e06a0b3, 0xf406a6b0, 0xa0b30f0c, 0xa4b31604, 0xd63e3305, 0xa6b00022, + 0x280cf409, 0x0022e03e, 0x0049190f, 0x009ff711, 0xe93e02f8, 0x94b60022, 0x04b9b502, 0x0022e93e, + 0xb602af92, 0xb9bc0294, 0x0149fef9, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, + 0xf400f804, 0x82f9d430, 0x301590b4, 0xc1b027e1, 0x0ad1b00b, 0x94b6f4bd, 0x0c91b002, 0x900149fe, + 0x9fa04499, 0x20079990, 0x0b99929f, 0x95b29fa0, 0xa0049992, 0x9297b29f, 0x9fa00499, 0x00062cdf, + 0x90ffbf00, 0x4efe1499, 0xa0a6b201, 0x34ee909f, 0xb4b20209, 0x84bde9a0, 0x14bd34bd, 0x0024473e, + 0x227e6ab2, 0x49bf001c, 0x4bfea2b2, 0x014cfe01, 0x9044bb90, 0x95f94bcc, 0xb31100b4, 0x008e0209, + 0x9e0309b3, 0x010db300, 0x499800a8, 0xb27cb201, 0xfe5bb22a, 0xdd90014d, 0x3295f938, 0x0be0b40c, + 0xfd3ed4bd, 0x5fbf0023, 0xf9a6e9bf, 0x34381bf4, 0xe89827b0, 0x987fbf01, 0xb03302e9, 0xb0b40a00, + 0x90b9bc0c, 0x1bf4f9a6, 0x1448df1e, 0xf9180000, 0x0094330c, 0x90f1b206, 0x48d920ff, 0xa600002a, + 0xed1bf4f9, 0x130010b3, 0xbf0c1c35, 0x0118b55b, 0x79bf1ba0, 0x900219b5, 0xee9001dd, 0x0ab0b40c, + 0x08f4dba6, 0x242d3ea4, 0x02499800, 0x4dfe5cbf, 0xb22ab201, 0x34dd901b, 0x2d3e95f9, 0x10b30024, + 0x49980f00, 0xb25cbf03, 0xf91bb22a, 0x4b903495, 0x11009433, 0x14bdff09, 0xa00e91b0, 0xa094bd79, + 0x01339059, 0xb4046690, 0x39a60d90, 0xff1408f5, 0x900149fe, 0x9fbf4c99, 0x00062cd9, 0xa699bf00, + 0x070bf4f9, 0x0042e97e, 0xf42c85fb, 0x2cd9fc30, 0xf9000006, 0xf499bf72, 0x48d0fc30, 0xfe00002a, + 0xff90014f, 0x3ff9a024, 0xb2a6b209, 0xb2c3b2b5, 0x009d33d4, 0xb4bd0111, 0xda16044c, 0x00001448, + 0x000b947e, 0x00020cd9, 0x989cbf00, 0x91b2019d, 0x07b294bd, 0x8a0091b0, 0xdb02c000, 0x000001fc, + 0x48d0e43d, 0x7e000014, 0xbd002303, 0xb309bf24, 0x981e1394, 0x1d98021c, 0x02099803, 0xdb040a98, + 0x000001cc, 0x91b0010e, 0x23037e00, 0xb309bf00, 0x98350494, 0x1d98041c, 0x040a9805, 0xdb020998, + 0x000001dc, 0x91b0010e, 0x23037e00, 0x081c9800, 0x98091d98, 0x21b0040a, 0x01dcdb00, 0xe43d0000, + 0x0023037e, 0x94b309bf, 0x1c981b05, 0x071d9806, 0xb0040a98, 0xdcdb0021, 0x3d000001, 0x23037ee4, + 0xb309bf00, 0x98351594, 0x1d980c1c, 0x040a980d, 0xdb020998, 0x000001ec, 0x91b0010e, 0x23037e00, + 0x0a1c9800, 0x980b1d98, 0x21b0040a, 0x01ecdb00, 0xe43d0000, 0x0023037e, 0x94b309bf, 0x1c981b16, + 0x0f1d980e, 0xb0040a98, 0xecdb0021, 0x3d000001, 0x23037ee4, 0x20009000, 0x1bf507a6, 0x0109ff33, + 0x4d940920, 0x145cd902, 0x4cdf0000, 0xbc000014, 0xff0ad0d9, 0x002a4cde, 0xa6f9bf00, 0x2f1bf469, + 0xa601f998, 0x271bf459, 0x3308f918, 0xb3210194, 0xb0140130, 0x08f40136, 0xb3ff0a0d, 0x3e110234, + 0x980025ea, 0xec3e03fa, 0xdabf0025, 0x9020ff90, 0xfea620dd, 0x09c51bf4, 0xf4a9a6ff, 0x190f0d1b, + 0xf7110049, 0x02f8009f, 0x900149fe, 0x9fbf2499, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xfb0430f4, 0x30f40475, 0xd112f9f4, 0x0000062c, 0x40fe19bf, 0x10009001, 0x09a0caa0, 0xcbb594bd, + 0x02c9b501, 0x00140cd9, 0xfe9fbf00, 0x99900149, 0x019bb508, 0xb5b69aa0, 0xd8fab811, 0xb17e0002, + 0x0fbf001c, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40c15, 0xd252f9e8, 0x0000062c, 0x40fe29bf, + 0x18009001, 0x0cb50ba0, 0xb2e5b201, 0xfea3b2d4, 0x11900141, 0x7e19a02c, 0x7e000e06, 0x98000a74, + 0x0abf010b, 0x900140fe, 0x0cb22000, 0x0026267e, 0x4cb20bb2, 0x3ab25db2, 0x000dcc7e, 0x9b7ea032, + 0xdd7e000a, 0x1fbf000d, 0x0a3229bf, 0x0bf4f9a6, 0x42e97e07, 0x1855fb00, 0xf9e830f4, 0x062cd252, + 0x29bf0000, 0x900140fe, 0x0ba01800, 0xb2010cb5, 0xb2d4b2e5, 0x0141fea3, 0xa02c1190, 0x0e067e19, + 0x0a747e00, 0x010b9800, 0x40fe0abf, 0x20009001, 0x267e0cb2, 0x0bb20026, 0x5db24cb2, 0xb97e3ab2, + 0xa032000d, 0x000a9b7e, 0x000ddd7e, 0x29bf1fbf, 0xf9a60a32, 0x7e070bf4, 0xfb0042e9, 0x2cd91855, + 0xbf000006, 0xfc30f499, 0x0a014ffe, 0x7ef9a004, 0x330042f5, 0x8a2e00a0, 0x7e120050, 0xf0001c22, + 0x0bf401a4, 0x00588a1f, 0x1c227e12, 0x00a0b300, 0x07a9c416, 0x0a0d1bf4, 0x42f57e02, 0x00a43300, + 0xfe02f806, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x140cd900, + 0x30f40000, 0xd112f9fc, 0x0000062c, 0x19bf9abf, 0xdb0140fe, 0x40000000, 0xb8080090, 0x0008b4aa, + 0x2c7e09a0, 0x0fbf009f, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x0cd90415, 0xf4000014, 0x12f9fc30, + 0x00062cd1, 0xbf9abf00, 0x0140fe19, 0x000000db, 0x08009040, 0x08c4aab8, 0x7e09a000, 0xbf009f2c, + 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xd90415fb, 0x0000140c, 0xf9fc30f4, 0x062cd112, 0x9abf0000, + 0x40fe19bf, 0x0000db01, 0x00908000, 0xc4aab808, 0x09a00008, 0x009f2c7e, 0x19bf0fbf, 0x0bf4f9a6, + 0x42e97e07, 0x0415fb00, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0xd99fa001, 0x0000140c, 0x008f99bf, + 0x9a9001c0, 0x00f9cf60, 0xb34c99c7, 0xb34a0090, 0xf8f60190, 0x28653e02, 0x00f9cf00, 0xb34c99c7, + 0xb30e0090, 0xf8f60190, 0x28793e02, 0xc2008b00, 0x00bbcf01, 0x040018d9, 0x05b9fd04, 0x009f2c7e, + 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x28d23e28, 0xc1008900, 0x009af601, 0x0000f1df, + 0x0099b880, 0x9ff70201, 0x009fcf00, 0x793e9fb2, 0xe97e0028, 0x30f40042, 0xf400f804, 0x0cd9fc30, + 0xf9000014, 0xb299bf22, 0x062cd1a2, 0x40fe0000, 0xc89ab801, 0x19bf0008, 0xa00c0090, 0x1c227e09, + 0xbf0ebf00, 0xbb01091f, 0xa9fd0492, 0x0bacf004, 0x0bf4efa6, 0x42e97e07, 0x0425fb00, 0xd9fc30f4, + 0x0000062c, 0x99bf02f9, 0x90014ffe, 0xf9a004ff, 0x00140cd9, 0x8f9ebf00, 0xcf01c000, 0x99c700f9, + 0x0099b34c, 0x90b301d2, 0x02f8f501, 0x00293b3e, 0xc700f9cf, 0x99b34c99, 0xb301ae00, 0xf8f50190, + 0x29503e02, 0x00f9cf00, 0xb34c99c7, 0x01770099, 0xf50190b3, 0x653e02f8, 0xf9cf0029, 0x4c99c700, + 0x480099b3, 0x0190b301, 0x3e02f8f5, 0x7e00297a, 0xd90090f2, 0x00800000, 0xf49409ff, 0x317e070b, + 0x0cd9002d, 0xbf000014, 0xc0008f9e, 0x00f9cf01, 0xb34c99c7, 0x00f30099, 0xf50190b3, 0xad3e02f8, + 0xf9cf0029, 0x4c99c700, 0xcf0099b3, 0x0190b300, 0x3e02f8f5, 0xcf0029c2, 0x99c700f9, 0x0099b34c, + 0x90b30098, 0x02f8f501, 0x0029d73e, 0xc700f9cf, 0x90b34c99, 0x90b36e00, 0x02f8f601, 0x0029ec3e, + 0x007c0d7e, 0x000000d9, 0x9409ff40, 0x7e070bf4, 0xd9007e21, 0x02000000, 0x000000df, 0xb409ff04, + 0xf4940fff, 0xbffd060b, 0x0000df05, 0x0fff0800, 0x060bf494, 0xd905bffd, 0x0000140c, 0xaab89abf, + 0x7e0008c4, 0xfe009f2c, 0x99900149, 0xd99fbf04, 0x0000062c, 0xf9a699bf, 0x00e10bf5, 0x002b353e, + 0x01c20089, 0xff0099cf, 0x0995049d, 0x0194b31f, 0x2a003e97, 0xa4efb800, 0x00890008, 0x9ff601c1, + 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, 0x3e9fb200, 0x890029ec, 0xcf01c200, 0x008f009d, + 0xd73e01c0, 0xefb80029, 0x89000890, 0xf601c100, 0xf1df009f, 0xb8800000, 0x02010099, 0xcf009ff7, + 0x9fb2009f, 0x0029c23e, 0x01c20089, 0xff0099cf, 0x00d9049d, 0xff010000, 0x0bf59409, 0x8f3efeb9, + 0xefb80029, 0x89000898, 0xf601c100, 0xf1df009f, 0xb8800000, 0x02010099, 0xcf009ff7, 0x9fb2009f, + 0x00297a3e, 0x01c20089, 0x8f009dcf, 0x3e01c000, 0xb8002965, 0x000884ef, 0x01c10089, 0xdf009ff6, + 0x800000f1, 0x010099b8, 0x009ff702, 0xb2009fcf, 0x29503e9f, 0x42e97e00, 0x0405fb00, 0x00062cdf, + 0xf4ffbf00, 0x49fefc30, 0x8f9fa001, 0xcf01c000, 0x99c700f9, 0x0090b34c, 0x0190b341, 0x3e02f8f6, + 0xcf002b4f, 0x99c700f9, 0x0090b34c, 0x0190b30e, 0x3e02f8f6, 0x8a002b63, 0xcf01c200, 0x49fe00aa, + 0xd99fbf01, 0x0000062c, 0xaac799bf, 0xf4f9a601, 0xb83e2d0b, 0xafb8002b, 0x89000380, 0xf601c100, + 0xf1df009f, 0xb8800000, 0x02010099, 0xcf009ff7, 0x9fb2009f, 0x002b633e, 0x0042e97e, 0xf80430f4, + 0xfc30f400, 0x00062cd9, 0xbf82f900, 0x014ffe99, 0xa024ff90, 0x00a933f9, 0x04bd0087, 0x002a94d7, + 0x8314bd00, 0x8601c000, 0xd501c100, 0x800000f1, 0x01c20084, 0x7b987abf, 0x7e0cb201, 0xf0004212, + 0x0bf401a4, 0xbd0bb248, 0x0ad4bdc4, 0x246b7e08, 0x7ea2b200, 0x33002b3c, 0xb83300a4, 0x0004002a, + 0xc70039cf, 0x99b34c99, 0xb300dc00, 0xf8f50190, 0x2c203e02, 0x0039cf00, 0xb34c99c7, 0x00b90099, + 0xf50190b3, 0x353e02f8, 0x0090002c, 0x00119101, 0xa42404b3, 0xa00014b3, 0x002cda3e, 0x94d804bd, + 0xbd00002a, 0xc0008314, 0xc1008701, 0x00f1d601, 0x00858000, 0xfe0401c2, 0x8b988abf, 0x7e0cb201, + 0xf0004212, 0x0bf401a4, 0xbd0bb246, 0x0ad4bdc4, 0x246b7e08, 0x7ea2b200, 0x33002b3c, 0xb83100a4, + 0x0004002a, 0xc70039cf, 0x90b34c99, 0x90b37300, 0x02f8f601, 0x002ca43e, 0xc70039cf, 0x90b34c99, + 0x90b35100, 0x02f8f601, 0x002cb83e, 0x91010090, 0x04b30011, 0x14b3a624, 0x49fea200, 0x24999001, + 0x2cd99fbf, 0xbf000006, 0xa6a43d99, 0x410bf4f9, 0x002d2a3e, 0xf0004bcf, 0x6f7e01b5, 0x4a3e0047, + 0x6af6002c, 0x0035f700, 0x3e0039cf, 0xcf002c35, 0xb4fd005b, 0x476f7e04, 0x2ccc3e00, 0x007af600, + 0xcf0036f7, 0xb83e0039, 0xe97e002c, 0x85fb0042, 0xfc30f404, 0x00062cdf, 0xbf02f900, 0x0149feff, + 0xa0049990, 0xc0008f9f, 0x00f9cf01, 0xb34c99c7, 0xb34f0090, 0xf8f60190, 0x2d493e02, 0x00f9cf00, + 0xb34c99c7, 0xb32a0090, 0xf8f60190, 0x2d5d3e02, 0x2dda7e00, 0x3009c400, 0x7e070bf4, 0xc4002e5a, + 0x0bf44009, 0x2e927e3d, 0x2dbf3e00, 0xc2008900, 0x0090cf01, 0xf40109c4, 0x713ede0b, 0x488f002d, + 0x008901b0, 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, 0x3e9fb200, 0xfe002d5d, + 0x99900149, 0xd99fbf04, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x2cdf0405, 0xbf000006, + 0xfc30f4ff, 0xa00149fe, 0xc0008f9f, 0x00f9cf01, 0xb34c99c7, 0xb3220090, 0xf8f60190, 0x2ded3e02, + 0x00f9cf00, 0xb34c99c7, 0xb3390090, 0xf8f60190, 0x2e013e02, 0xad248f00, 0xc1008901, 0x009ff601, + 0x99b8f4bd, 0xf6000100, 0xf2df009f, 0xb8800000, 0x02020099, 0xcf009ff6, 0x9fb2009f, 0x002e013e, + 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0xd112f9fc, + 0x0000062c, 0x048a19bf, 0xb4bd01ad, 0x900140fe, 0x09a00800, 0x0047e97e, 0x01ad848a, 0xe97eb4bd, + 0x0fbf0047, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x2cdf0415, 0xbf000006, 0xfc30f4ff, 0xa00149fe, + 0xc0008f9f, 0x00f9cf01, 0xb34c99c7, 0xb3220090, 0xf8f60190, 0x2ea53e02, 0x00f9cf00, 0xb34c99c7, + 0xb3390090, 0xf8f60190, 0x2eb93e02, 0xada48f00, 0xc1008901, 0x009ff601, 0x99b8f4bd, 0xf6000100, + 0xf2df009f, 0xb8800000, 0x02020099, 0xcf009ff6, 0x9fb2009f, 0x002eb93e, 0xbf0149fe, 0x062cd99f, + 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0x950149fe, + 0x9fa018be, 0xb995adb2, 0x05a8df10, 0x94760000, 0x08b5b608, 0x3ee59eff, 0x26002f5c, 0x1c1bf49b, + 0x6601f958, 0x141bf49e, 0xa43df97f, 0x5808d975, 0xdf7501ff, 0x2f643e09, 0x04ff9000, 0x9433f93f, + 0x010add00, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cdf00f8, + 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x0aa9b29f, 0x00b0b302, 0x0895b60b, 0xb920a43d, 0xbf0149fe, + 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdffc, 0x02f90000, + 0x49feffbf, 0x04999001, 0x9fa0a0b2, 0x6500a0b3, 0xb305a998, 0xd95e0090, 0x0000140c, 0xaab89abf, + 0x7e0004e8, 0xe7001c22, 0xb30168a9, 0x4c0fff9a, 0x470090b3, 0xb314a5b6, 0x400fffaa, 0x3b00a0b3, + 0xb6050e98, 0xaf940c94, 0xa0a43d0c, 0x050e98e9, 0xf9bce9bf, 0x01efb5f0, 0xbd070f98, 0x98f9a094, + 0x0f980509, 0x9299bf07, 0xf9b50199, 0x30393e01, 0x3e350a00, 0x0a003039, 0x0149fe2e, 0xbf049990, + 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0405fb00, 0xd9fc30f4, 0x0000062c, 0x99bf32f9, + 0x90014ffe, 0xf9a010ff, 0xb9c4afb2, 0xf4020a03, 0xc9c4321b, 0xb8f2b2ff, 0x00392899, 0x01a2b3e7, + 0xbd029194, 0x309a3e04, 0x901ab200, 0x227e0100, 0x1190001c, 0x902aa004, 0x03a60422, 0x3ded08f4, + 0x0149fea4, 0xbf109990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0435fb00, 0xf9fc30f4, + 0x18a9bf12, 0xab1809af, 0x062cd008, 0x99b80000, 0x94003969, 0x09bf029a, 0xf00141fe, 0x119001f4, + 0xf0ffbc08, 0x44d919a0, 0xf0008800, 0xb9fd01b4, 0x05bffd05, 0x001cb17e, 0x09bf1fbf, 0xf9a6a43d, + 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0x062cdffc, 0x22f90000, 0x49feffbf, 0x0c999001, 0x9fa0a0b2, + 0xc2b2b132, 0x00b3350a, 0x09bf6900, 0x610094b3, 0xb3010998, 0x8a5a0094, 0x7e00e204, 0xc4001c22, + 0xa9c4ff1e, 0xf49ea601, 0x943d091b, 0x0031813e, 0x09080f18, 0x01e4f0f3, 0xf002abc5, 0xb9fd01f4, + 0x02f4b604, 0xbffdfe09, 0xe2048a05, 0x04b9fd00, 0x7e05befd, 0x8a001cb1, 0x7e00e204, 0x09001c22, + 0x0020b301, 0x3d292006, 0x318f3ea4, 0xfe010a00, 0x99900149, 0xd99fbf0c, 0x0000062c, 0xf9a699bf, + 0x7e070bf4, 0xfb0042e9, 0x30f40425, 0x062cdff8, 0x52f90000, 0x49feffbf, 0x1c999001, 0x9fa0a3b2, + 0xc272b0b2, 0x350ad4b2, 0xc70039b3, 0x00c97300, 0xd9b300ba, 0xbf00b500, 0x009db339, 0x399800b4, + 0x009db301, 0x088a00ac, 0x227e00e2, 0xa9c4001c, 0x0d0bf401, 0x02a2a9e7, 0xbb0294b6, 0x4cfe0209, + 0x3d3ab201, 0x1bcc90b4, 0x00310a7e, 0x8300ad33, 0x00008900, 0x0301c430, 0x09bc0405, 0xff19c400, + 0xb20209bb, 0x1c227e0a, 0xff19c400, 0xe4f259bc, 0xa6ffff29, 0x071df4f9, 0xffff2fe4, 0xf4bdfe32, + 0x0032563e, 0xf090913c, 0x94b60394, 0x95a9bc03, 0x90f9493c, 0xf93201ff, 0x08f4fe26, 0xffe9c4ea, + 0x7b040090, 0x20730229, 0x49bc0d00, 0x3e143d40, 0xfe003223, 0x99900149, 0xb29b3f1b, 0x7ec4bd3a, + 0x3e00310a, 0x0a00328f, 0x328f3e02, 0xfe010a00, 0x99900149, 0xd99fbf1c, 0x0000062c, 0xf9a699bf, + 0x7e070bf4, 0xfb0042e9, 0x30f40855, 0x062cdffc, 0x02f90000, 0x49feffbf, 0x04999001, 0x9fa0a0b2, + 0xa0b3350e, 0xb43d1a00, 0x0a7ec4bd, 0xae320031, 0x0c00a433, 0xbc7e0ab2, 0xae320030, 0x900149fe, + 0x9fbf0499, 0x00062cd9, 0x3299bf00, 0xf4f9a6ea, 0xe97e070b, 0x05fb0042, 0xf430f404, 0x00062cd9, + 0xbf12f900, 0x014ffe99, 0x4b10ff90, 0xf9a003e8, 0x0016fc7e, 0x4afea0b2, 0x08aa9001, 0x817ea1b2, + 0xa08a0015, 0x227e00e5, 0xa6b0001c, 0x1c1ff400, 0x9c7e1ab2, 0xa0a60015, 0x0a090df4, 0x334b3e04, + 0x000e7e00, 0x33223e00, 0xfea43d00, 0x99900149, 0xd99fbf10, 0x0000062c, 0xf9a699bf, 0x7e070bf4, + 0xfb0042e9, 0x30f40c15, 0x062cdffc, 0x02f90000, 0x49feffbf, 0x04999001, 0x9fa0b0b2, 0x3f01ad58, + 0x02a958af, 0x000000de, 0xffdce480, 0x03f4f0ff, 0xb60093f0, 0x9cbc12f4, 0x05fefd90, 0xf0019992, + 0xf9ffff94, 0x00d073b5, 0x01cf9218, 0x01000089, 0xff08f4b6, 0xf4f195b9, 0x9fffffff, 0x01aa18b5, + 0xf401a9c4, 0x00890a0b, 0xb9fd0200, 0xffa4f005, 0xf410a9c4, 0x00d90b0b, 0xfd080000, 0xa9c405b9, + 0x0b0bf404, 0x000000d9, 0x05b9fd20, 0x00e5a08a, 0x001cb17e, 0xf97e0ab2, 0x49fe0032, 0x04999001, + 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x05fb0042, 0xf830f404, 0x00062cdf, 0xbf52f900, + 0x0149feff, 0xb21c9990, 0x729fa0a3, 0x71020ab1, 0xf40100b6, 0x44fe450c, 0xe4a08201, 0x18449000, + 0x723e54bd, 0x45a00034, 0x16701072, 0x050df404, 0x3bb20400, 0xffff0ce4, 0x7e7e4ab2, 0x4bbf000b, + 0x107b2ab2, 0x04229002, 0x7e0003f0, 0xbc001cb1, 0x14733030, 0xa43dd400, 0x900149fe, 0x9fbf1c99, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40855fb, 0x2cd9fc30, 0xf9000006, 0xfe99bf32, + 0xff90014f, 0xa0b2b210, 0x58c3b2f9, 0xbb5802b9, 0x00a0b301, 0x0093f056, 0x9990fc0f, 0xffb0e403, + 0x149fffff, 0xb30b1bf4, 0x3e0c0004, 0xb3003507, 0xb20e0000, 0x34157eda, 0x00a43300, 0x8b2ab230, + 0x7e07a120, 0x33003366, 0xb32200a4, 0xb01e0010, 0xccf00006, 0x01c6f00b, 0x1b723ab2, 0x7e05c436, + 0x3e003054, 0x0a003509, 0x0149fe35, 0xbf109990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0435fb00, 0xdff830f4, 0x0000062c, 0xffbf12f9, 0x900149fe, 0x40fe0c99, 0x909fa001, 0xb1b20800, + 0xe87e0cb2, 0x0fbf0050, 0x9802f958, 0x93f003ff, 0x0c94b600, 0x0035653e, 0x0bf491a6, 0x06ff9815, + 0x100099b8, 0x00f4b300, 0x3e350af3, 0x98003584, 0xf99802fe, 0xbcffbf05, 0x9ea6909f, 0xf0089cf0, + 0x9a320196, 0x900149fe, 0x9fbf0c99, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40815fb, + 0x2cdfe030, 0xf9000006, 0xb0ffbf82, 0x49fe09b1, 0x40999001, 0xa00140fe, 0x3c00909f, 0x0cb2a4b2, + 0x0050e87e, 0x395803bf, 0x03319802, 0xb60093f0, 0xe63e0c94, 0xe0b40035, 0xf49ea609, 0x1198130b, + 0x0099b806, 0x14b30010, 0xb83ef000, 0x19980037, 0x031f9805, 0xb6041e98, 0x9fbc0395, 0xf42ea620, + 0x43fe4f0c, 0x904ab201, 0x2bb23833, 0x3db2010c, 0x0031aa7e, 0xad33a032, 0xbf01a400, 0xff004f39, + 0x90014efe, 0x99bc34ee, 0x059ffd90, 0x2cb24ab2, 0xe9a0ebb2, 0x4a7e040d, 0xa032004b, 0x7e00ad33, + 0x05199801, 0xb5019990, 0xba3e0519, 0xd4d90037, 0x9800002a, 0x49fe049e, 0x3c999001, 0x9ea09fb2, + 0x0036703e, 0x33091918, 0x98130090, 0xf1a00411, 0x14b3f1bf, 0xd43ef200, 0x39350037, 0x36a83e08, + 0x08b91800, 0x13009033, 0xa004bb98, 0xb31bbf1b, 0x3ef200b4, 0xbf0037b8, 0x7e4ab2bb, 0x32004a98, + 0x00ad33a0, 0x11bf0119, 0xbf011f98, 0x0142fe3b, 0x3fb54ab2, 0x04195801, 0x0c302290, 0x752db204, + 0xaa7e0439, 0xa0320031, 0xf200ad33, 0xb21cbf00, 0x0d2bb24a, 0x4b4a7e04, 0x33a03200, 0x00df00ad, + 0xffd92fbf, 0xb27fffff, 0xfd4ab22b, 0x2fa004f9, 0x040d3cbf, 0x004b4a7e, 0xad33a032, 0x5800c000, + 0x12980239, 0x0147fe03, 0xf00148fe, 0x31980093, 0x2c779003, 0x900c9694, 0xaf3e2888, 0xe0b40037, + 0xf46ea609, 0x19bf0a1b, 0xa0019990, 0x011b9819, 0x139815bf, 0x0c4ab205, 0x7e7db204, 0x330031aa, + 0x00a100ad, 0xffde7fbf, 0xbc00ffff, 0x3eff3035, 0x0000de94, 0xfefdff00, 0x059ffd04, 0x2c9879a0, + 0xb24ab201, 0x7e040d7b, 0x33004b4a, 0xa07500a4, 0x031b9823, 0x040c4ab2, 0xbb928db2, 0x31aa7e04, + 0x00a43300, 0x032c985f, 0x8bb24ab2, 0xcc92040d, 0x4b4a7e04, 0x00a43300, 0x021f984b, 0x29b594bd, + 0x0066b805, 0x2fb50010, 0x06229802, 0xb3061198, 0xff6f001d, 0x0037ba3e, 0x49fe3500, 0x40999001, + 0x2cd99fbf, 0xbf000006, 0xa60a3299, 0x1d0bf4f9, 0x0037e63e, 0xa00141fe, 0x3c1190fe, 0x00368d3e, + 0xba3ea032, 0xe97e0037, 0x85fb0042, 0xfc30f420, 0x00062cdf, 0xbf02f900, 0x0149feff, 0xb2049990, + 0xb39fa0a0, 0x985100a0, 0x90b306a9, 0x0cd94a00, 0xbf000014, 0xe0aab89a, 0x227e0004, 0xa9e4001c, + 0x9ab30fff, 0xb3380fff, 0xe7330090, 0xb3016caa, 0x2b0fffaa, 0x2600a0b3, 0xb6060e98, 0xaf940c94, + 0xa0a43d0c, 0x060e98e9, 0xf9bce9bf, 0x01efb5f0, 0x00385c3e, 0x5c3e350a, 0x2e0a0038, 0x900149fe, + 0x9fbf0499, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40405fb, 0x2cdfd830, 0xf9000006, + 0xfeffbf82, 0x99900149, 0xa0a6b248, 0x00a0b39f, 0x06a09821, 0x0bbffd09, 0xa601bf92, 0x110cf4f9, + 0x09010f98, 0xf4f9a6ff, 0xf4b3070b, 0x02f81100, 0x6db30205, 0x3e027d00, 0x92003b3c, 0x100c10bb, + 0x002ad4dd, 0x31aa7e00, 0x33a53200, 0x026400ad, 0x002ad4da, 0x0068db00, 0x080c0000, 0x000ba87e, + 0x7e00adb3, 0x01099802, 0xf9a60fbf, 0x027818f5, 0x0ffffee4, 0x02701bf5, 0xfe066f98, 0x99900149, + 0xbf843d44, 0x0af1b0ff, 0x94bd9ea0, 0x3e0b91b0, 0x3d003b19, 0x7e140ba4, 0xb20040e5, 0x00a4b3a3, + 0x0502f80c, 0x3b2f3e05, 0x0ae0b400, 0xb6ff89c4, 0x9ebc0c94, 0xa094bdb0, 0x04a9b5ab, 0x0fffb9e4, + 0x02241bf5, 0xb2014dfe, 0x90040c6a, 0xaa7e44dd, 0xa0330031, 0xa5320a00, 0x003b2b3e, 0xf11190b4, + 0x750fff94, 0x90b40239, 0x6c99e711, 0x03397501, 0xb61190b4, 0x39351f95, 0x11f0b408, 0x0ffff9e4, + 0x0fff9ab3, 0x6cf9e713, 0xff9ab301, 0x01090a0f, 0xbf093935, 0x0099b339, 0xd4df0090, 0x1800002a, + 0x99900dfb, 0x0ffc4a04, 0xf00991b0, 0xbb7effb4, 0x94bd0016, 0x91b0743d, 0xfffc090d, 0x91b094a9, + 0x3af13e0c, 0x0ba43d00, 0x40e57e1c, 0xb3a1b200, 0xf80c00a4, 0x3e050002, 0xb4003b58, 0x7ac40cb0, + 0x16fc7eff, 0xb5e4bd00, 0xb0b4061e, 0xfe040c09, 0xdd90014d, 0xb0babc40, 0x1bb56ab2, 0x31aa7e01, + 0x33a03200, 0x015500ad, 0xdf1090b4, 0x00ffffff, 0xa0049ffd, 0xf49fa619, 0x94bd071b, 0x1b9819a0, + 0x00b4b301, 0x0002f80c, 0x3b583e02, 0x0ce0b400, 0x92083f18, 0xb29001b9, 0x409ebc04, 0x0e00f033, + 0x00093918, 0x009d3335, 0x29900088, 0xb5f4bd04, 0x19b50414, 0x051fb503, 0x003ad73e, 0x2bb26ab2, + 0x4dfe040c, 0x3cdd9001, 0x0031aa7e, 0xad33a032, 0xb400ea00, 0x9fc70f90, 0x06f4b378, 0x014dfe28, + 0x2b906ab2, 0x904cb204, 0x857e38dd, 0xad330051, 0xbf00c800, 0x0ef0b419, 0xa0909fbc, 0x3aca3e19, + 0x07f4b300, 0xffffde29, 0x9efd00ff, 0x042b9004, 0xb50219b5, 0x1bb50414, 0xb24cb203, 0x141d906a, + 0x0051857e, 0xd23ea032, 0x2290003a, 0xf424a604, 0x0d338d0d, 0x98008600, 0x94b30339, 0x31b50b00, + 0x3aeb3e03, 0x0df0b400, 0x1006f1b5, 0x11b00177, 0x2ad4de0d, 0xe9180000, 0xf579260d, 0x98feca08, + 0x94b304e9, 0xe3b50b00, 0x3b133e04, 0x0bf0b400, 0x1004f3b5, 0x31b00188, 0x2ad4de0b, 0xe9180000, + 0xf589260c, 0x3efdf008, 0x33003b3c, 0x98110050, 0xff0f0669, 0x69989fa0, 0x019fb506, 0x900149fe, + 0x9fbf4899, 0x00062cd9, 0x3299bf00, 0xf4f9a65a, 0x6a3e1f0b, 0xa032003b, 0x2b3e0532, 0x3505003b, + 0x003b2f3e, 0x2f3e0205, 0xe97e003b, 0x85fb0042, 0xf430f428, 0x00062cdf, 0xbf22f900, 0x0149feff, + 0xfe149990, 0x9fa00142, 0x229094bd, 0x7e29a00c, 0x33004863, 0x0b5600a0, 0x2a9cda04, 0xbcb20000, + 0x397e2db2, 0xa032004e, 0x4300a433, 0x900141fe, 0x1ab21011, 0x003c087e, 0xa433a032, 0x1cbf3100, + 0xa4bd2bbf, 0x001428d1, 0xd1797e00, 0xb31aa000, 0x3d1a00a0, 0xd2117eb4, 0x00a0b300, 0x001abf12, + 0xd74b7eff, 0x3beb3e00, 0xfeff0000, 0x99900149, 0xd99fbf14, 0x0000062c, 0x0a3299bf, 0x0bf4f9a6, + 0x42e97e07, 0x0c25fb00, 0x00062cde, 0xf4e9bf00, 0x4ffefc30, 0x09f9a001, 0xbfa9a00e, 0x3de9bfff, + 0xf4f9a6a4, 0xe97e070b, 0x30f40042, 0xf400f804, 0x2cdfd830, 0xf9000006, 0xf4ffbf62, 0x49fef430, + 0x4c999001, 0xa93f9fa0, 0xa398a6b2, 0x01903301, 0x489d330d, 0x3c3e0360, 0x3018003e, 0x010d3304, + 0x3298011b, 0x04319803, 0xdc4ba43d, 0x01004c10, 0x000680d5, 0x40cb7e00, 0xb35aa000, 0xf80c00a4, + 0x3e050102, 0xfe003fb3, 0x99900149, 0x2af4d428, 0x1cb20000, 0xb50741b5, 0x2bb20191, 0xb5154035, + 0x92a00642, 0xdc4ed4bd, 0x26727e10, 0x33a13200, 0x015900ad, 0x993f59bf, 0x0d009033, 0xf0014998, + 0x49b50895, 0x0680d901, 0x99bf0000, 0x33699918, 0xdf130090, 0x00002af4, 0xf101f998, 0xb5400095, + 0xf4da01f9, 0x7e00002a, 0x9800b9ce, 0x027e0230, 0xebd90001, 0xb20000a6, 0x0091b0ac, 0xb4bd0db2, + 0xc8da04bd, 0xb000002f, 0x01300101, 0x7e010e08, 0xd90013c8, 0x00002af4, 0x000000df, 0x5090351e, + 0x900149fe, 0x42fe3499, 0x909fa001, 0x010a3822, 0xcd7e2bb2, 0xa43300b1, 0x2bbf1b1f, 0xf401b9c4, + 0xb5f0141b, 0xa0010a01, 0xb20f7e2b, 0x1fa03300, 0xbd09f806, 0x7e020ab4, 0xda00b20f, 0x00002af4, + 0x00b85d7e, 0xbd014afe, 0x0dc4bdb4, 0x34aa9001, 0x0090537e, 0x003e0d3e, 0x32020433, 0x000680d9, + 0xb39abf00, 0xd97f00a0, 0x00002b0c, 0xbf019c98, 0xd9f43d9b, 0x00002af4, 0x9f35d4bd, 0x10dc4e15, + 0x0026727e, 0x083ea132, 0x0d33003e, 0x7e020703, 0x1800010e, 0x90330839, 0x3e982400, 0x2b94d903, + 0x9cdf0000, 0xa000002b, 0x043e989e, 0x98019eb5, 0xf9a00539, 0xb5063e98, 0x391801fe, 0x0090331c, + 0x2af4df21, 0x39980000, 0x2dfe9808, 0xfd2efd98, 0xf9b5059e, 0x0939982d, 0xb5059dfd, 0x087e2ef9, + 0x143d0001, 0x003e0d3e, 0xab001d33, 0x016b1801, 0x817e3ab2, 0x0209000f, 0x90014ffe, 0xf92048ff, + 0xf935943d, 0x03331802, 0xb4bd0409, 0xf335fab2, 0x01f93503, 0x000f717e, 0x003fb33e, 0xfe0140fe, + 0x00900141, 0x40119044, 0xb20142fe, 0x3c22900a, 0x2cb21bb2, 0x537ed43d, 0x39980090, 0x0a0fbf02, + 0xf4f9a601, 0x70de3c1b, 0x3f000005, 0x009033e9, 0x0a943d0e, 0x3ee9201e, 0xd9003e9f, 0x200000ff, + 0xb394f9ff, 0xb2121c94, 0xb21bb20a, 0xb6647e2c, 0x3e9f3e00, 0xb20ab200, 0x7e2cb21b, 0xfe00a9ff, + 0xbb90014b, 0xc4bebf44, 0xffdf1fa9, 0xb6e0ffff, 0xeffd1894, 0xe59eff04, 0x00d9bea0, 0xff400000, + 0x0bf494e9, 0x1fa4336b, 0x0149fe68, 0xbf409990, 0x0499929d, 0xe4339cbf, 0x03df421e, 0x89ff0000, + 0xff3fffff, 0xd9fff4ef, 0x0294b694, 0xb3e5f9ff, 0xc50b00c0, 0x093e03e9, 0xfd09003f, 0xfd16df95, + 0xf0b304e9, 0xe9c50d00, 0x3eb9a001, 0x09003f2d, 0x94e9fffe, 0x003f093e, 0x000000df, 0xffffd9ff, + 0xefff00ff, 0x94d9fff4, 0xa005f9fd, 0x0149febf, 0x90014ffe, 0xff904099, 0xbf9ebf44, 0x049992ff, + 0x99929dbf, 0x929fa004, 0x9ea00499, 0xa0049992, 0x0a747e9d, 0x2af4d900, 0x99180000, 0x00943348, + 0x3e02f80a, 0x33003f6b, 0xfe1e0190, 0xee90014e, 0xdfe9bf38, 0xe0ffffff, 0xdf049ffd, 0x01000000, + 0xa0059ffd, 0xd9f43de9, 0x00002af4, 0x9f35010d, 0x014bfe48, 0x90014cfe, 0xcc9034bb, 0x014afe30, + 0x7e38aa90, 0x7e009053, 0x3d000a9b, 0x3fb33e14, 0xfee40100, 0x99900149, 0xd99fbf4c, 0x0000062c, + 0x1a3299bf, 0x0bf4f9a6, 0x42e97e07, 0x0c30f400, 0x922865fb, 0x9ba004a9, 0xb548ae92, 0x008906ec, + 0xe9a03000, 0x94bdafb2, 0xb24cff92, 0xf8f9a0fa, 0x7e02f900, 0xb300715d, 0xbd0a00a4, 0x40373e04, + 0x16898900, 0x0093fe00, 0x00163389, 0x890090fe, 0xfe001671, 0xa4bd0091, 0x897eb4bd, 0xa0b2000a, + 0xf400a6b0, 0xa4bd0c1f, 0x7efe0b4b, 0x7e000a95, 0x3e004140, 0xb200162b, 0x7e01fb0a, 0xf8000ad6, + 0x8f22f900, 0xb200ffff, 0xf4bfa6c0, 0xc0b3720c, 0xc9926f00, 0x049cfd01, 0xa6651bf4, 0x600cf4cf, + 0x0903bf90, 0x14f9fffc, 0x7e550bf4, 0x82000a74, 0x7f003244, 0x3240892d, 0x899abf00, 0xc2003246, + 0x9b7f0f0f, 0x5201ff79, 0x907c01d9, 0xc4f9ff90, 0x08f4cd66, 0xd0c17c21, 0xffffd9e4, 0x08f491a6, + 0xf4db6615, 0xc9e4100c, 0x2d60ffff, 0x3e009abc, 0xbd0040b5, 0x0a9b7e04, 0x40bf3e00, 0xb204bd00, + 0x0c21fb0a, 0x40417e04, 0xf900f800, 0x7eb1b212, 0xb2004041, 0x00a0b3a0, 0xbd1cb20c, 0x0b947eb4, + 0xfb0ab200, 0x7e040c11, 0xf80040cb, 0x3dabb200, 0x40e57ea4, 0x8f00f800, 0xbf001110, 0x1b9918f9, + 0x20069536, 0x18f9bfa9, 0x99c71b99, 0xf8b92024, 0x11108c00, 0x14cdbf00, 0xbec406af, 0x04e43603, + 0xf01bd918, 0xf9fd3f94, 0x1bdf3505, 0xf918cfbf, 0xcf94f01b, 0x3505e9fd, 0x327e1bfe, 0x00f80019, + 0x00111089, 0x9a1899bf, 0x24abc71b, 0x7e06a536, 0xf8001932, 0x18ff7e00, 0xf400f800, 0x82f9e030, + 0xffffc1e4, 0xb210c295, 0xffa0e4c5, 0x10a395ff, 0x48fea4b2, 0x0147fe01, 0x903c8890, 0x8bb53477, + 0x017db501, 0x0bb27ca0, 0x1ab28aa0, 0x0016fc7e, 0xa6b20bb2, 0xfc7e2ab2, 0x3bb20016, 0x1ab2a0b2, + 0x0016fc7e, 0xa1b23bb2, 0xfc7e2ab2, 0x10bc0016, 0x10699500, 0x09bcadb2, 0xf410a600, 0x00890a0d, + 0xd9bc0100, 0x017b98d0, 0xffff69e4, 0xfe100f94, 0xf9bc014e, 0x24ee90f0, 0xa0100995, 0x90d9bcef, + 0xbf01e9b5, 0x01ed98ec, 0x41fe4ab2, 0x2c119001, 0xa0011db5, 0x16fc7e1c, 0x018b9800, 0x5ab2a0b2, + 0x0016fc7e, 0xbc011b98, 0x1abf000a, 0xfbb00bbc, 0x30f42085, 0x00c0b3f0, 0x014ffe33, 0xb508ff90, + 0xfaa001fb, 0x9cbb2009, 0x0096b002, 0xbc211df4, 0xb9bcf5ac, 0x014afe94, 0xbc059ffd, 0xa9a0f5bc, + 0x9801afb5, 0xaabf01ab, 0xf81030f4, 0x014afe00, 0xbd019fb9, 0xf5bfbc94, 0xa001a9b5, 0x01ab98af, + 0x483eaabf, 0x30f40042, 0x00c0b3f0, 0x014ffe33, 0xb508ff90, 0xfaa001fb, 0x9cbb2009, 0x0096b002, + 0xbc211df4, 0x4efef4bc, 0x95a9bc01, 0xbc059ffd, 0xe9b5f4ac, 0x98efa001, 0xeabf01eb, 0xf81030f4, + 0x014efe00, 0xbd019fb9, 0xf4afbc94, 0xefb5e9a0, 0x98eabf01, 0x9c3e01eb, 0x2cde0042, 0xbf000006, + 0xfc30f4e9, 0xdd014ffe, 0x00000000, 0x34d9f9a0, 0xbf000014, 0xbf9da0ff, 0xa6a43de9, 0x070bf4f9, + 0x0042e97e, 0xf80430f4, 0x49180f00, 0x9ff71100, 0xf802f800, 0xf430f400, 0x00062cdf, 0xbf12f900, + 0x0149feff, 0xb2109990, 0x8a9fa0ab, 0x7e12004c, 0xfe001cb1, 0x3f80014a, 0xaa904c4b, 0x7ea1b208, + 0x8a001581, 0x7e12004c, 0xf0001c22, 0x1bf43fa4, 0x3e010a09, 0xb2004346, 0x159c7e1a, 0xf4a0a600, + 0x02f8e40d, 0x49fea43d, 0x10999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x15fb0042, + 0xfc30f40c, 0x00062cdf, 0xbf12f900, 0x0149feff, 0xb2089990, 0xb39fa0b1, 0xb02101a0, 0x08f401a6, + 0xb3ff0e0e, 0x00be02ad, 0x0044113e, 0x0049020f, 0x009ff60a, 0x00441f3e, 0xcf0a0049, 0xfe0e009f, + 0xf604fefd, 0xd033009f, 0x004f2d00, 0x00f9cf08, 0xf9f694bd, 0x09004000, 0x4b0009cf, 0xcab203e8, + 0x0016fc7e, 0x0200aab8, 0x0aa5b600, 0x3e000af6, 0x400043f9, 0x09cf0800, 0x03e84b00, 0xfc7ecab2, + 0xaab80016, 0xb6000200, 0xaa920aa5, 0x000af601, 0xcf090049, 0x9af6009f, 0x2a4cd900, 0x004f0000, + 0x0191b50a, 0xf000f9cf, 0xf9f60195, 0x443f3e00, 0x0a004e00, 0x0f00e9cf, 0x049ffdfe, 0x4f00e9f6, + 0xf9cf0800, 0xf694bd00, 0x004f00f9, 0x00f9cf09, 0xf9f694bd, 0xd9f4bd00, 0x00002a4c, 0x3d019fb5, + 0x0149fee4, 0xbf089990, 0x062cd99f, 0x99bf0000, 0xf9a6ea32, 0x7e070bf4, 0xfb0042e9, 0x2cdf0415, + 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x2a4cd99f, 0x99980000, 0x0194b301, 0x7e797e0c, 0x44833e00, + 0xfe02f800, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0xfc30f400, + 0x2cd122f9, 0xbf000006, 0xfea2b219, 0x148a0140, 0x009008d1, 0x7e09a00c, 0x3f001c22, 0xbf0ebf29, + 0x08aac71f, 0xa6ff94f0, 0x0bacf0a9, 0x0bf4efa6, 0x42e97e07, 0x0425fb00, 0xdffc30f4, 0x0000062c, + 0xffbf02f9, 0x900149fe, 0x9fa00499, 0x0189008f, 0xf000f9ce, 0xf9f78095, 0x43004e00, 0x0f00e9ce, + 0x049ffdfe, 0xd000e9f7, 0x0000140c, 0xaab80abf, 0x7e00034c, 0xbf001c22, 0x03abc509, 0x034c9ab8, + 0x1cb17e00, 0x0149fe00, 0xbf049990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0405fb00, + 0xdffc30f4, 0x0000062c, 0xffbf12f9, 0xfef430f4, 0x99900149, 0x8f9fa014, 0xcf01c000, 0x99c700f9, + 0x0099b34c, 0x90b30085, 0x02f8f501, 0x00455b3e, 0xc700f9cf, 0x90b34c99, 0x90b35f00, 0x02f8f601, + 0x0045703e, 0x001b957e, 0x0bb204bd, 0xd4bdc4bd, 0x6b7e0b0a, 0xa1900024, 0x7e1ab268, 0xe4001c22, + 0xb30e00ab, 0x0b0e00be, 0xb17e1ab2, 0x0090001c, 0x2404b301, 0xbd94bdd9, 0x089130b4, 0xb00091b0, + 0xc4bd0191, 0xe43dd4bd, 0x001414da, 0x13c87e00, 0x46073e00, 0xc2008900, 0x0099cf01, 0xf40194f0, + 0x843e280b, 0x1c8f0045, 0x00890285, 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, + 0x3e9fb200, 0xfe004570, 0x99900149, 0xd99fbf14, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, + 0x15fb0c30, 0xfc30f404, 0x2cd112f9, 0xbf000006, 0xf430f419, 0x1c8ab4bd, 0x40fe0285, 0x14009001, + 0xb17e09a0, 0x027e001c, 0x40d90001, 0xb0000045, 0x94bd0091, 0x9130acb2, 0x0191b008, 0x001414da, + 0x4db4bd00, 0x010e2710, 0x0013c87e, 0x19bf0fbf, 0x0bf4f9a6, 0x42e97e07, 0x0c30f400, 0xf40415fb, + 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf00280f, 0xa619bf0f, 0x070bf4f9, + 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf0027d6, + 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, + 0x7e09a008, 0xbf00279d, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, + 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf0028db, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xf40415fb, + 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf00291c, 0xa619bf0f, 0x070bf4f9, + 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf0065ad, + 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xdf0415fb, 0x0000062c, 0x30f4ffbf, 0x0149fefc, 0x008f9fa0, + 0xf9cf01c0, 0x4c99c700, 0x220090b3, 0xf60190b3, 0x823e02f8, 0xf9cf0047, 0x4c99c700, 0x330090b3, + 0xf60190b3, 0x963e02f8, 0x00890047, 0x9af601c1, 0x0099b800, 0x9bf60001, 0x00f2df00, 0x99b88000, + 0xf6020200, 0x9fcf009f, 0x3e9fb200, 0xfe004796, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, + 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x01c0008f, 0xc700f9cf, + 0x90b34c99, 0x90b32200, 0x02f8f601, 0x0047fc3e, 0xc700f9cf, 0x90b34c99, 0x90b33300, 0x02f8f601, + 0x0048103e, 0x01c10089, 0xb8009af6, 0x00010099, 0xdf009bf6, 0x800000f2, 0x020099b8, 0x009ff602, + 0xb2009fcf, 0x48103e9f, 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, + 0xde00f804, 0x0000062c, 0x30f4e9bf, 0x014ffefc, 0x1ed9f9a0, 0xbf000011, 0xbf9a3fff, 0xf4f9a6e9, + 0xe97e070b, 0x30f40042, 0xf400f804, 0x42f9f030, 0x00062cd4, 0xb249bf00, 0xbd0601a2, 0xfe1cb2b4, + 0x43fe0140, 0x16009001, 0xb2203390, 0x7e39a00a, 0xbd000b94, 0x014dfe94, 0xa01cdd90, 0x75d120d9, + 0x29180209, 0xb22ab204, 0x20c4bd0b, 0x35180909, 0x01090109, 0x7e010975, 0xbf003493, 0xa649bf3f, + 0x070bf4f9, 0x0042e97e, 0xf41045fb, 0x2cdfec30, 0xf9000006, 0xfeffbf22, 0x99900149, 0xa0a1b21c, + 0xb2060c9f, 0x0140feb2, 0x0090b4bd, 0x7e0ab20e, 0xbd000b94, 0x014ffe94, 0xa018ff90, 0x041e18f9, + 0x41fe1ab2, 0x14119001, 0x010919a0, 0x75020975, 0x05090109, 0x1909f920, 0x0bb20e20, 0xb2010935, + 0x7e1cb2fd, 0x32003493, 0x00a433a0, 0xb21abf0c, 0x2f7e7e2b, 0x0149fe00, 0xbf1c9990, 0x062cd99f, + 0x99bf0000, 0xf9a60a32, 0x7e070bf4, 0xfb0042e9, 0x30f41425, 0x062cd9f0, 0x42f90000, 0x4ffe99bf, + 0x20ff9001, 0xf9a0a3b2, 0xe84bbab2, 0x16fc7e03, 0x0142fe00, 0x04bda1b2, 0x90014afe, 0xaa901c22, + 0x7ea4b214, 0x3e001581, 0xb20049b7, 0x159c7e4a, 0x7ea0b200, 0xb200000e, 0x7e2bb23a, 0x330048eb, + 0xbf1600a4, 0x0194f029, 0x0d009033, 0x08f401a6, 0x49e03edd, 0xf401a600, 0xa43d0918, 0x0049e23e, + 0x49fe040a, 0x20999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x45fb0042, 0xf030f410, + 0x00062cdf, 0xbf22f900, 0x0149feff, 0x32189990, 0xa0a2b2b0, 0x488b7e9f, 0x00a43300, 0x00043364, + 0x3e143d0a, 0x18004a30, 0x94f01129, 0x0291140f, 0x060cb4bd, 0x900140fe, 0x0ab20e00, 0x000b947e, + 0x4ffe94bd, 0x14ff9001, 0xf135f9a0, 0x02097501, 0xb2042918, 0xb2fdb20b, 0x0909202a, 0x09f92001, + 0x01093518, 0xc4bd0209, 0x7e010975, 0x33003493, 0xb20e00a4, 0x86a08b2a, 0x49727e01, 0x0149fe00, + 0xbf189990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x1025fb00, 0xd9f030f4, 0x0000062c, + 0x99bf22f9, 0x90014ffe, 0xa1b218ff, 0xb2b2f9a0, 0xb4bd060c, 0x90014afe, 0x947e0eaa, 0x94bd000b, + 0xb20140fe, 0x1400901a, 0x8b7e09a0, 0xa4330048, 0x19186100, 0x0499c711, 0x0a0094b3, 0xe43e2009, + 0xd709004a, 0x19180920, 0x014efe04, 0xfe0eee90, 0xe920014f, 0x90102995, 0xf93514ff, 0x08299501, + 0x0902f935, 0x01e93518, 0xe9750409, 0xb2947d01, 0x03f235eb, 0xe975fdb2, 0xbd1ab202, 0x34937ec4, + 0x00a43300, 0x8b1ab20e, 0x7e07a120, 0xfe004972, 0x99900149, 0xd99fbf18, 0x0000062c, 0xf9a699bf, + 0x7e070bf4, 0xfb0042e9, 0x30f41025, 0x062cdfec, 0x72f90000, 0x49feffbf, 0x30999001, 0x9fa0a1b2, + 0xc3b2b7b2, 0xa9b3d672, 0x71009e00, 0xf50100d6, 0xbd00950c, 0x0cb4bd54, 0x0140fe06, 0x900142fe, + 0x22902000, 0x7505a02c, 0x25a00205, 0x900144fe, 0x4ab22644, 0x000b947e, 0x8b7e1ab2, 0xa4330048, + 0x23356800, 0x04191803, 0x05750bb2, 0x202db202, 0x20020909, 0x10399529, 0x95012935, 0x29350839, + 0x35080902, 0x04090109, 0x09751ab2, 0x7ec4bd01, 0x33003493, 0x753300a4, 0x45750146, 0x04191802, + 0x7db24bb2, 0x49201ab2, 0xc4bd1009, 0x7e014935, 0x33003493, 0xb21300a4, 0x32004b1a, 0x0049727e, + 0x004c063e, 0x49fe350a, 0x30999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x75fb0042, + 0xfc30f414, 0x00062cdf, 0xbf62f900, 0xfea3b2ff, 0x99900149, 0x989fa01c, 0xc4b205aa, 0xfd09d6b2, + 0xbebcaebf, 0x01ef9220, 0x0cf4f9a6, 0x01af9823, 0xf9a6ff09, 0xb3190bf4, 0xa61600f0, 0x1008f42e, + 0xa690c2bc, 0x0808f492, 0x0df49fa6, 0x1802f805, 0x010a1139, 0xb30599c7, 0xb2410094, 0x32aa7e3a, + 0x33a53200, 0xbd3300a4, 0x4cb13e14, 0x0241bc00, 0x010006b1, 0x40060df4, 0x3ab20100, 0xb2b061bc, + 0x7e0d722c, 0x33004b4a, 0xbc1100a4, 0x10bc2020, 0xf414a610, 0x5a32da08, 0x900149fe, 0x9fbf1c99, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40465fb, 0x2cd9fc30, 0xf9000006, 0xfe99bf72, + 0xff90014f, 0xa0a0b220, 0x05ae98f9, 0xfd02c5b2, 0xf992efbf, 0xf492a601, 0xee98260c, 0xa6ff0101, + 0x1c0bf4e1, 0x1900e0b3, 0xa630bfbc, 0x1008f43f, 0xa690c3bc, 0x0808f493, 0x0df49ea6, 0x0e02f80b, + 0x4dc33e35, 0x11a91800, 0x99c7010e, 0x009db305, 0xaa7e0096, 0xa4320032, 0x8900ad33, 0xb227b200, + 0x3e24bd16, 0xe4004db9, 0xf40fff19, 0x020e090b, 0x004dc33e, 0xbf060f98, 0x019992f9, 0x0cf497a6, + 0x01f99821, 0x0bf496a6, 0x0090b319, 0xb20ab216, 0x35247e1b, 0x00a03300, 0x3e030e0a, 0xb2004dc3, + 0x7e1bb20a, 0x32004a98, 0x00a433ae, 0x060f983a, 0x9992f9bf, 0xf497a601, 0xf9981d0c, 0xf496a601, + 0x90b3150b, 0x1bb21200, 0x9f7e0ab2, 0xae320035, 0x1300a433, 0x100022b8, 0x1023bc00, 0x08f425a6, + 0xfe4e3289, 0x99900149, 0xd99fbf20, 0x0000062c, 0xea3299bf, 0x0bf4f9a6, 0x42e97e07, 0x0475fb00, + 0xd9fc30f4, 0x0000062c, 0x99bf02f9, 0x90014ffe, 0xb0b204ff, 0xb9bff9a0, 0xa6b0c9bc, 0x1708f4b9, + 0xffffd9e4, 0xa6f09bbc, 0x0b08f4fb, 0xa6010998, 0x050df4f9, 0xdc7202f8, 0xaa7eedb2, 0x49fe0031, + 0x04999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x05fb0042, 0xfc30f404, 0x2cd112f9, + 0xbf000006, 0x0140fe19, 0xb2080090, 0xb209a0cf, 0x05ab98b9, 0x9cb2deb2, 0xe07efd72, 0x0fbf004d, + 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0xf0fc0415, 0xf9fc30f4, 0xf430f4f0, 0x90b472f9, 0xfeb0b20d, + 0xbb90014b, 0x01b9b520, 0x00062cd9, 0xfe99bf00, 0xff90014f, 0xa0a5b228, 0xa009bff9, 0x0ce1b0be, + 0xc9bcd4b2, 0xf439a630, 0xd3bc1f08, 0xf4f3a6f0, 0x09981708, 0xf4f9a601, 0xb6b20f0c, 0x010024bd, + 0x273efc07, 0x02f8004f, 0x2c3e0100, 0x42bc004f, 0x0096b192, 0x060df401, 0x90010049, 0xb4bd0399, + 0x4c1497ff, 0x00da0100, 0x7e000012, 0xb2000b94, 0x723bb25a, 0x1200dd1c, 0xaa7e0000, 0xa0320031, + 0x2c00a433, 0x6c986bbf, 0x1200da01, 0x2db20000, 0x7e01004e, 0x330026d8, 0xf80a00a0, 0x4f2c3e02, + 0x3031bc00, 0xa62021bc, 0xa508f424, 0x900149fe, 0x9fbf2899, 0x00062cd9, 0x3299bf00, 0xf4f9a60a, + 0xe97e070b, 0x70fb0042, 0xfc0c30f4, 0x0430f4f0, 0x30f4f4f9, 0x062cdfe4, 0x42f90000, 0x49feffbf, + 0x2c999001, 0x9fa0a1b2, 0x00111ed9, 0x33993f00, 0x3d0a0090, 0x50c13e04, 0xfef4bd00, 0x99900149, + 0x929fa024, 0x9fa00499, 0x98049992, 0xa49805a2, 0x7e9fa006, 0x32002fb6, 0x00a033a0, 0x2ea43308, + 0x982bbf6b, 0xb9a60129, 0x013318f5, 0x0fffb9e4, 0x012b1bf5, 0xb20143fe, 0x1c33901a, 0x3db2040c, + 0x0031aa7e, 0xa433a032, 0x3fbf4200, 0x4ad93500, 0xa6534646, 0x331bf4f9, 0xed7e1ab2, 0xa4330037, + 0x49bf3600, 0x9992fd0f, 0xf49fa601, 0x4f98290c, 0xa6ff0901, 0x1f0bf4f9, 0x1c00f0b3, 0x777e1ab2, + 0xa0320038, 0x1000a033, 0x29b5ff09, 0x3329a001, 0x00b2000d, 0xaa7e1ab2, 0xa0320032, 0xa500ad33, + 0x0cb4bd00, 0x0140fe06, 0xb2160090, 0x0b947e0a, 0xfe94bd00, 0xff90014f, 0x18f9a028, 0x0bb20419, + 0x0104fdb2, 0x9f090920, 0x1909f920, 0x09010935, 0x01047503, 0xfe020975, 0x1ab20142, 0xb2242290, + 0x34937e2c, 0x33a03200, 0xbf5a00a4, 0x7e1ab22b, 0x32002f12, 0x00a433a0, 0x1112184c, 0xb30529c7, + 0xfe390094, 0x1ab20143, 0xb2203390, 0x48eb7e3b, 0x00a43300, 0x943fbf15, 0x94f00229, 0x049ffd3c, + 0x35060bf4, 0x1ab22014, 0xfd7eb43d, 0xa0330049, 0xa0320a00, 0x0050c13e, 0x1ed9010f, 0x20000011, + 0x0149fe9f, 0xbf2c9990, 0x062cd99f, 0x99bf0000, 0xf9a60a32, 0x3e110bf4, 0x000050e1, 0x50083e02, + 0x42e97e00, 0x1c45fb00, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0xb39fa001, 0x982100a0, 0xfd0f06aa, + 0x9992a9bf, 0xf49fa601, 0xaa98110c, 0xa6ff0901, 0x070bf4a9, 0x0c00a4b3, 0x020a02f8, 0x00516b3e, + 0x0fffb9e4, 0x1bf4020a, 0x2ad4d945, 0x9f980000, 0x51633e04, 0x08f91800, 0x28009033, 0x3309f918, + 0x58210090, 0xfe5802f9, 0x0093f003, 0xa60c94b6, 0x0f08f4b9, 0xffffe9e4, 0xa60c94b6, 0x0a0df4b9, + 0xb304ff98, 0xa0d200f4, 0xfe350acf, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0xf830f400, 0x00062cdf, 0xbf52f900, 0xfea4b2ff, 0x42fe0149, 0x1c999001, 0xc3b2b0b2, + 0x9fa0d5b2, 0x350a14bd, 0x3e182290, 0xb200523c, 0x0c0bb24a, 0x7e2db204, 0x330031aa, 0x008900ad, + 0x94b329bf, 0x11900e00, 0x04009020, 0x00523c3e, 0xfd019eb9, 0x008904e9, 0xe9ffff00, 0x0bfcf094, + 0xd901f6f0, 0xff00ff00, 0xf094e9ff, 0xb6f00bbc, 0xf0f0d901, 0xe9fff0f0, 0x0bccf094, 0xd901c6f0, + 0xcccccccc, 0xf094e9ff, 0xd6f00bdc, 0xaaaad901, 0xf4b6aaaa, 0x03b4b604, 0xb602c4b6, 0xe9fd01d4, + 0x0b9cf004, 0xfd0196f0, 0xfbfd05f9, 0x05fcfd05, 0xa005fdfd, 0x101fbc2f, 0x0052423e, 0x0df503a6, + 0x51a0ff71, 0x900149fe, 0x9fbf1c99, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40855fb, + 0x2cdffc30, 0xf9000006, 0xb2ffbf42, 0xfec2b2a0, 0xe3b20149, 0xb2149990, 0xb29fa0b4, 0x3ec4bdea, + 0xf00052de, 0xf99407e2, 0xf0ffbc03, 0xbcf0f9bc, 0xbe3ff0fe, 0x9030ff92, 0xbab201bb, 0x3030e912, + 0x0df40996, 0x00f4b3e1, 0x33010f06, 0x312d64e0, 0xf40064e6, 0xe4330b1c, 0xca3e2862, 0xe0330052, + 0xe4330c73, 0xd13e1c77, 0xcfbc0052, 0x52de3ec0, 0x90ffbc00, 0x0052db3e, 0xbc02f994, 0xae3fc0c9, + 0x0d00e033, 0xbd01ab90, 0x529a3ef4, 0x20c6b000, 0xf8090df4, 0x54063e02, 0x00d0b300, 0xbfdca006, + 0x90c2bc0f, 0x9fa6ff01, 0x01000cf5, 0x002a9cda, 0x07a99800, 0xbf010098, 0xbc0db29b, 0xaa7eb02b, + 0xa1320031, 0xe400ad33, 0xb242b200, 0xff00440a, 0x0053f73e, 0xc203c994, 0xccbc07be, 0xbcdb3ff0, + 0xdd90f0f9, 0xf0febc01, 0xb230fc92, 0x30b912d3, 0xf4099630, 0xc4b3e10d, 0x010c0600, 0xf03e20b2, + 0xb0330053, 0xb6315b64, 0x1cf40064, 0x62bd330c, 0x833e0099, 0xb0330053, 0xbd331973, 0x3e008c77, + 0x3f0053a5, 0x01ad90a9, 0x3eff9fc4, 0x3f0053e6, 0x01ad90a9, 0xc4ff9fc4, 0x903380f9, 0xf4fd4c00, + 0x53e63e05, 0x01a91800, 0xad90af3f, 0xff94f002, 0xb6fff4f0, 0x9fff0894, 0x53e63ef5, 0x03ad1800, + 0x3f02af18, 0x01a918ae, 0xf018d4b6, 0xf4b6fff4, 0xffe4f010, 0xb6ff94f0, 0x9efd0894, 0x059ffd05, + 0x90f59dff, 0x0fa004ad, 0x9001cc92, 0xdab20400, 0xcdb302b2, 0x3fff7000, 0x00b0333b, 0x013d900f, + 0x4b3ec4bd, 0xff010053, 0x900149fe, 0x9fbf1499, 0x00062cd9, 0x3299bf00, 0xf4f9a61a, 0xe97e070b, + 0x45fb0042, 0xf830f404, 0x2cd222f9, 0xbf000006, 0xfebcb229, 0x40fe0141, 0x10119001, 0xa00c0090, + 0xb2d4bd19, 0x0074de0b, 0x5f7e0000, 0x1fbf0052, 0x0a7f29bf, 0x0bf4f9a6, 0x42e97e07, 0x0825fb00, + 0xf9f830f4, 0x062cd222, 0x29bf0000, 0x41febcb2, 0x0140fe01, 0x90101190, 0x19a00c00, 0x0bb2d4bd, + 0x000076de, 0x525f7e00, 0xbf1fbf00, 0xa60abf29, 0x070bf4f9, 0x0042e97e, 0xf40825fb, 0x22f9f830, + 0x00062cd2, 0xb229bf00, 0x0141febc, 0x900140fe, 0x00901011, 0xbd19a00c, 0xde0bb2d4, 0x00000078, + 0x00525f7e, 0x29bf1fbf, 0xf9a60a3f, 0x7e070bf4, 0xfb0042e9, 0x30f40825, 0x062cdff0, 0x62f90000, + 0x49feffbf, 0x0143fe01, 0xfe289990, 0x9fa00142, 0x001404d6, 0x1c339000, 0x05242290, 0xbf040402, + 0x0d080c6a, 0x7e3bb2ff, 0x330000c1, 0x98f400a4, 0x193f0131, 0xeb079433, 0x3d041918, 0x2a9cda04, + 0x94330000, 0x527e0a00, 0xa032004f, 0xb2013b18, 0x0f817e1a, 0x00043300, 0x022035ca, 0x11182520, + 0x352ab203, 0xb4bd0124, 0x7e032135, 0x3e000f71, 0xf40054ff, 0x2cdff430, 0xf9000006, 0xbdffbf22, + 0x0140fe94, 0xb50c0090, 0x09a00109, 0x900149fe, 0xd4d21499, 0xa000002f, 0x7e01b29f, 0x3e008f2c, + 0x98005592, 0x993f0119, 0x0a099433, 0x2f7e0ab2, 0xc8da003c, 0x7e00002f, 0xb2001475, 0xb22abfad, + 0x7e080c0b, 0x330000c1, 0xdadc00a0, 0x00002fc8, 0x0014b57e, 0x0055923e, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00062cde, 0xf4e9bf00, 0x4ffefc30, 0x3df9a001, 0xbfa92094, 0xa6e9bfff, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cde00, 0xe9bf0000, 0xfefc30f4, 0xf9a0014f, 0xa920943d, 0xe9bfffbf, 0x0bf4f9a6, + 0x42e97e07, 0x0430f400, 0x2cde00f8, 0xbf000006, 0xfc30f4e9, 0xa0014ffe, 0x20943df9, 0xbfffbfa9, + 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xde00f804, 0x0000062c, 0x30f4e9bf, 0x014ffefc, 0x943df9a0, + 0xffbfa920, 0xf9a6e9bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cde, 0xf4e9bf00, 0x4ffefc30, + 0x3df9a001, 0xbfa92094, 0xa6e9bfff, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xe9bf0000, + 0xfefc30f4, 0xf9a0014f, 0xa920943d, 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cde00f8, + 0xbf000006, 0xfc30f4e9, 0xa0014ffe, 0x20943df9, 0xbfffbfa9, 0xf4f9a6e9, 0xe97e070b, 0x30f40042, + 0xde00f804, 0x0000062c, 0x30f4e9bf, 0x014ffefc, 0x943df9a0, 0xffbfa920, 0xf9a6e9bf, 0x7e070bf4, + 0xf40042e9, 0x00f80430, 0x00062cde, 0xf4e9bf00, 0x4ffefc30, 0x3df9a001, 0xbfa92094, 0xa6e9bfff, + 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xe9bf0000, 0xfefc30f4, 0xf9a0014f, 0xa920943d, + 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cde00f8, 0xbf000006, 0xfc30f4e9, 0xa0014ffe, + 0x20943df9, 0xbfffbfa9, 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xde00f804, 0x0000062c, 0x30f4e9bf, + 0x014ffefc, 0x943df9a0, 0xffbfa920, 0xf9a6e9bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cde, + 0xf4e9bf00, 0x4ffefc30, 0x3df9a001, 0xbfa92094, 0xa6e9bfff, 0x070bf4f9, 0x0042e97e, 0xf80430f4, + 0x062cde00, 0xe9bf0000, 0xfefc30f4, 0xf9a0014f, 0xa920943d, 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, + 0x0430f400, 0x2cde00f8, 0xbf000006, 0xfc30f4e9, 0xa0014ffe, 0x20943df9, 0xbfffbfa9, 0xf4f9a6e9, + 0xe97e070b, 0x30f40042, 0xde00f804, 0x0000062c, 0x30f4e9bf, 0x014ffefc, 0x943df9a0, 0xffbfa920, + 0xf9a6e9bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cde, 0xf4e9bf00, 0x4ffefc30, 0x3df9a001, + 0xbfa92094, 0xa6e9bfff, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xe9bf0000, 0xfefc30f4, + 0xf9a0014f, 0xa920943d, 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdffc, + 0x12f90000, 0x30f4ffbf, 0x0149fed4, 0xa0349990, 0x49ff0f9f, 0x9ff70500, 0x5c997e00, 0x71c67e00, + 0x42ba7e00, 0x5f737e00, 0x00a93300, 0x060f02d6, 0xf7110049, 0x837e009f, 0xed7e005e, 0xb57e005e, + 0xbf7e0061, 0x1f7e005b, 0xb17e0064, 0xe67e0064, 0x657e005c, 0x497e0065, 0x7e7e0066, 0xe27e0066, + 0x387e0066, 0x8e7e0067, 0xe47e0067, 0xc37e0067, 0xb77e0089, 0x847e0068, 0x797e006d, 0x617e0063, + 0x314c0068, 0x00c3f177, 0x01943d00, 0x28913002, 0x30009130, 0x91300891, 0x14913010, 0x30189130, + 0x91301c91, 0x24913020, 0x300111b0, 0x10da0c11, 0xdb000006, 0x0000018c, 0x804e010d, 0x71d87e00, + 0x00ad3300, 0x08da023e, 0x0b000014, 0x7e010c04, 0x33006e05, 0x022c00ad, 0xf17ed24c, 0x000000c3, + 0x28a13001, 0x3000a130, 0xa13008a1, 0x10a1300c, 0x3014a130, 0xa13018a1, 0x24a13020, 0x000168db, + 0x0101b000, 0x0d1c0130, 0x00804e03, 0x000614da, 0x71d87e00, 0x1400da00, 0x040b0000, 0x057e080c, + 0xad33006e, 0x4c01dd00, 0xc3f18863, 0x44db0000, 0x30000001, 0xa13028a1, 0x10a13000, 0x3014a130, + 0xa13018a1, 0x24a13020, 0x300101b0, 0x11300811, 0x1c01300c, 0x804e080d, 0x0618da00, 0xd87e0000, + 0x24da0071, 0x0b000014, 0x7e080c04, 0x33006e05, 0x019000ad, 0xf155534c, 0xdb0000c3, 0x00000120, + 0x3028a130, 0xa13000a1, 0x14a13010, 0x3018a130, 0xa13020a1, 0x0101b024, 0x30081130, 0x01300c11, + 0x4e070d1c, 0x1cda0100, 0x7e000006, 0xda0071d8, 0x00002fd4, 0x080c1bb2, 0x006e057e, 0x4300ad33, + 0x54d64c01, 0x0000c3f1, 0x0000fcdb, 0x28a13000, 0x3000a130, 0xa13010a1, 0x18a13014, 0x3020a130, + 0x01b024a1, 0x08113001, 0x300c1130, 0x050d1c01, 0xda00c04e, 0x00000620, 0x0071d87e, 0x001404da, + 0x0c040b00, 0x6e057e08, 0x00ad3300, 0xf64c00f6, 0x00c3f18a, 0x00d8db00, 0xa1300000, 0x00a13028, + 0x3010a130, 0xa13014a1, 0x20a13018, 0xb024a130, 0x11300101, 0x0c113008, 0x0d1c0130, 0x00804e09, + 0x000624da, 0x71d87e00, 0x142cda00, 0x040b0000, 0x057e080c, 0xad33006e, 0x4c00a900, 0xc3f1dabf, + 0xb4db0000, 0x30000000, 0xa13028a1, 0x10a13000, 0x3014a130, 0xa13018a1, 0x24a13020, 0x300c1130, + 0x01b01c01, 0x08113001, 0xc04e0a0d, 0x0628da00, 0xd87e0000, 0x20da0071, 0x0b000014, 0x7e080c04, + 0x33006e05, 0x7e5c00a4, 0x7e007165, 0x7e005c75, 0x7e006dff, 0x7e000a74, 0x7e004747, 0x7e00645a, + 0x7e005c17, 0x7e005d1b, 0x7e0063ae, 0x7e0063de, 0x7e0066b3, 0x7e006717, 0x7e00676d, 0x7e0067c3, + 0x7e006840, 0x7e006896, 0x7e006dc9, 0x7e008989, 0x7e0068ec, 0x7e005eb8, 0x33006e7c, 0xf80600a0, + 0x0149fe02, 0xbf349990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x2c30f400, 0xf40415fb, + 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x2fe0da08, 0x09a00000, 0x001434d9, 0x9899bf00, + 0x95f90599, 0x19bf0fbf, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0x00062cde, 0xf4efbf00, 0x49fefc30, + 0xa0400a01, 0xbf9fbf9f, 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, + 0x0149fefc, 0x70df9fa0, 0x49200000, 0x9ff60700, 0x03004e00, 0x4f00e9cf, 0x9ffdcfff, 0x0095f104, + 0x00e9f680, 0x00b0718f, 0xf6040049, 0x008f009f, 0xf9ce0215, 0x0195f000, 0xfe00f9f7, 0x9fbf0149, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xefbf0000, 0xfefc30f4, + 0x004a0149, 0xbf9fa020, 0xa6e9bf9f, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, + 0xfefc30f4, 0x9fa00149, 0xce01004f, 0x95f000f9, 0x00f9f710, 0xce07004f, 0x95f000f9, 0x00f9f710, + 0xce04004f, 0x95f000f9, 0x00f9f710, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0430f400, 0x30f400f8, 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x08009001, 0x002fe2da, 0xd909a000, + 0x00001434, 0x999899bf, 0xbf95f908, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xd90415fb, 0x0000062c, + 0x30f499bf, 0x014ffefc, 0xad7ef9a0, 0xa433005d, 0xaddf1100, 0x49deadde, 0x9ff61000, 0x7e02f800, + 0x7e005df4, 0x7e005d65, 0xfe005e30, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x00900089, 0xf00099cf, 0x0bf40194, + 0xa5008915, 0x009fcf00, 0xf5f0ef0e, 0x04fefd07, 0xfe009ff6, 0x9fbf0149, 0x00062cd9, 0xa699bf00, + 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x00900089, + 0xc4009fcf, 0x1bf401f9, 0x3ea43d09, 0xc7005dda, 0x96b024f9, 0x0b9cf002, 0x49fe9a32, 0xd99fbf01, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, + 0x0f9fa001, 0x80008905, 0x009ff601, 0x99b8060f, 0xf6000100, 0x49fe009f, 0xd99fbf01, 0x0000062c, + 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0x899fa001, + 0xcf010200, 0x008f0099, 0x9ffd1000, 0x1a1bf404, 0x820434da, 0x1c227e00, 0x01a4f000, 0xf80b1bf4, + 0x3e240a02, 0x3d005e69, 0x0149fea4, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, + 0xf400f804, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x1438da08, 0x09a00000, 0x001434d9, + 0x9899bf00, 0x95f90e99, 0x19bf0fbf, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0x00062cd9, 0xf499bf00, + 0x4ffefc30, 0xa01e0a01, 0xdc967ef9, 0x00a03300, 0xfe02f806, 0x9fbf0149, 0x00062cd9, 0xa699bf00, + 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x0100008f, + 0xf6590049, 0x49fe009f, 0xd99fbf01, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, + 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0x899fa001, 0xce009000, 0x94f00099, 0x200bf401, 0x00f1008e, + 0x0f00e9ce, 0x049ffdef, 0xb800e9f7, 0x025200ee, 0xfd00e9ce, 0xe9f7049f, 0x0149fe00, 0x2cd99fbf, + 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, + 0xa00a004a, 0x1c227e9f, 0x14aae700, 0x06aa9201, 0xf401a6b0, 0x004f1f0c, 0x00f9cf4f, 0xb3e899c7, + 0xcf130f94, 0x94f000f9, 0x0b9cf0ff, 0xb43e9a32, 0xa43d005f, 0xbf0149fe, 0x062cd99f, 0x99bf0000, + 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdffc, 0x02f90000, 0x49feffbf, 0x04999001, + 0x00899fa0, 0x99ce0090, 0x0194f000, 0x00d70bf5, 0x0062817e, 0xc4bdb4bd, 0x090ad4bd, 0x00246b7e, + 0x7ec8aa90, 0x0c001c22, 0xbda0b201, 0x0ab4bdd4, 0x246b7e0a, 0x90f00b00, 0x0bffc8aa, 0x1cb17eb4, + 0xbdc4bd00, 0xbdb4bdd4, 0x246b7ea4, 0xb2b4bd00, 0x28aab8a0, 0xb17e0005, 0x0ab8001c, 0xbd000530, + 0x1cb17eb4, 0x340ab800, 0xb4bd0005, 0x001cb17e, 0x010c0ab8, 0x7e040b00, 0xb8001cb1, 0x0001000a, + 0xb17e4a0b, 0x0ab8001c, 0x0b000104, 0x1cb17e4a, 0x080ab800, 0x420b0001, 0x001cb17e, 0xb2640090, + 0x1c227e0a, 0xfffe0900, 0x0ab2b4a9, 0x001cb17e, 0x00e2f08a, 0x001c227e, 0x8a40abc5, 0x7e00e2f0, + 0x8a001cb1, 0x7e00e2f4, 0xc5001c22, 0xf48a40ab, 0xb17e00e2, 0xf47e001c, 0xf4bd0064, 0x038a0089, + 0xfe009ff6, 0x99900149, 0xd99fbf04, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40405, + 0x062cdffc, 0x12f90000, 0x49feffbf, 0x08999001, 0x00899fa0, 0x99ce0090, 0x0194f000, 0x009e0bf5, + 0xc4bdb4bd, 0x0b0ad4bd, 0x00246b7e, 0xd4bd010c, 0xb4bda1b2, 0x6b7e0c0a, 0xa0b20024, 0x7e1c1a90, + 0xc5001c22, 0x0a9010ab, 0x1cb17e1c, 0x1c1ab800, 0x227e0050, 0xabc5001c, 0x1c0ab810, 0xb17e0050, + 0x1ab8001c, 0x7e00101c, 0xc5001c22, 0x0ab810ab, 0x7e00101c, 0xb8001cb1, 0x00401c1a, 0x001c227e, + 0xb810abc5, 0x00401c0a, 0x001cb17e, 0x301c1ab8, 0x1c227e00, 0x10abc500, 0x301c0ab8, 0x1cb17e00, + 0x1c1ab800, 0x227e0060, 0xabc5001c, 0x1c0ab810, 0xb17e0060, 0xe37e001c, 0x49fe0062, 0x08999001, + 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x15fb0042, 0x062cdf04, 0xffbf0000, 0x8efc30f4, + 0xfe009500, 0x9fa00149, 0x4f00e9cf, 0x9ffdcfff, 0x0095f104, 0x00e9f620, 0x01a1008f, 0xf000f9cf, + 0xf9f60195, 0x24004e00, 0x8f00e9cf, 0xfd060000, 0xe9f6059f, 0x5f207e00, 0x95008f00, 0x00f9cf00, + 0xf62095f0, 0xffdf00f9, 0x8900ffff, 0xf601c300, 0x3e7e009f, 0x004a0027, 0x1c227e0a, 0x6ca9c700, + 0x001438de, 0x07e93500, 0xe9350309, 0x0ce9350d, 0x3598a9c7, 0xa9c704e9, 0x05e93574, 0xaac794bd, + 0x02e9b570, 0x8906ea35, 0xcf010200, 0x008f0099, 0x9ffd1000, 0x080bf404, 0xe9350109, 0x60de7e0e, + 0x5fce7e00, 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xa6a43d99, 0x070bf4f9, 0x0042e97e, 0xf80430f4, + 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x00900089, 0xf00099ce, 0x0bf40194, 0xbd010c2f, + 0x0ab4bdd4, 0x246b7e10, 0x00008b00, 0x40aa9001, 0x001cb17e, 0x0e0ab4bd, 0xd4bd010c, 0x00246b7e, + 0xaa90010b, 0x1cb17e94, 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, + 0xf400f804, 0x2cdffc30, 0xf9000006, 0xfeffbf12, 0x99900149, 0x899fa008, 0xce009000, 0x94f00099, + 0x5d0bf401, 0xc4bdb4bd, 0x0d0ad4bd, 0x00246b7e, 0xd4bd010c, 0xb4bda1b2, 0x6b7e0e0a, 0xa0b20024, + 0x7e0c1a90, 0xc5001c22, 0x0a9010ab, 0x1cb17e0c, 0xbdb4bd00, 0x0ad4bdc4, 0x246b7e0f, 0xb2b4bd00, + 0xbd010ca1, 0x7e100ad4, 0xb200246b, 0x0c1a90a0, 0x001c227e, 0x9010abc5, 0xb17e0c0a, 0x49fe001c, + 0x08999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x15fb0042, 0xfc30f404, 0x2cd112f9, + 0xbf000006, 0x0140fe19, 0xda080090, 0x00002a4c, 0x34d909a0, 0xbf000014, 0x11999899, 0x0fbf95f9, + 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x90a4bd01, + 0xb4bd0800, 0xc4bd09a0, 0x617ed43d, 0x0fbf0043, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x2cdf0415, + 0xbf000006, 0xfc30f4ff, 0xda0149fe, 0x00002a54, 0xb43d9fa0, 0x00734d7e, 0x002a54d9, 0xb399bf00, + 0xf8060094, 0x0149fe02, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xf400f804, + 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x2fe3da08, 0x09a00000, 0x001434d9, 0x3d9fbf00, + 0x98a92094, 0x95f910f9, 0x19bf0fbf, 0xf9a6a43d, 0x7e070bf4, 0xfb0042e9, 0x0cd90415, 0xf4000014, + 0x12f9fc30, 0x00062cd1, 0xbf9abf00, 0x03034b19, 0xb80140fe, 0x0002dcaa, 0xa0080090, 0x1cb17e09, + 0x6f448a00, 0x1c227e06, 0xbf0dbf00, 0x0650d91e, 0xa4f10000, 0x408f3fff, 0x9aa00f42, 0x000654d9, + 0x3d9fa000, 0xf4dea6a4, 0xe97e070b, 0x15fb0042, 0xfc30f404, 0x2cd112f9, 0xbf000006, 0x0cb4bd19, + 0x0140fe38, 0x002a5cda, 0x08009000, 0x947e09a0, 0x34d9000b, 0xbf000014, 0x2a5cda99, 0x99bf0000, + 0x0fbf95f9, 0xa43d19bf, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0xf9fc30f4, 0x062cd222, 0x29bf0000, + 0xd4bdc4bd, 0x41feb4bd, 0x90120a01, 0x19a00c11, 0x00246b7e, 0x0484a0b8, 0x7e0ab200, 0xc5001c22, + 0x0ab201ab, 0x001cb17e, 0x08b8c08a, 0x001c227e, 0xa9fffb09, 0xb8c08ab4, 0x1cb17e08, 0xb9408a00, + 0x1c227e08, 0xffe00900, 0x408ab4a9, 0xb5f008b9, 0x1cb17e02, 0xbf1fbf00, 0xf4f9a629, 0xe97e070b, + 0x25fb0042, 0xfc30f404, 0x2cd112f9, 0xbf000006, 0xbdb4bd19, 0xfed4bdc4, 0xa4bd0140, 0xa0080090, + 0x246b7e09, 0x1434d900, 0x9fbf0000, 0x00140cd9, 0xda9aa000, 0x00002fe5, 0xf90cf998, 0xbf0fbf95, + 0xf4f9a619, 0xe97e070b, 0x15fb0042, 0xfc30f404, 0x0cd022f9, 0xd2000014, 0x0000062c, 0x29bf0abf, + 0x41feff0b, 0xb8aab801, 0x11900008, 0x7e19a00c, 0xbf001cb1, 0xb8ff0b0a, 0x0008bcaa, 0x001cb17e, + 0xff0b0abf, 0x08c0aab8, 0x1cb17e00, 0x0b0abf00, 0xc4aab8ff, 0xb17e0008, 0x0abf001c, 0x7000008b, + 0x08d4aab8, 0x1cb17e00, 0xdb0abf00, 0x01000000, 0x08a8aab8, 0x1cb17e00, 0xdb0abf00, 0x0e03ffff, + 0x08e0aab8, 0x1cb17e00, 0xdb0abf00, 0xce000000, 0x08b4aab8, 0x1cb17e00, 0xbf1fbf00, 0xf4f9a629, + 0xe97e070b, 0x25fb0042, 0xfc30f404, 0x2cd112f9, 0xbf000006, 0x0140fe19, 0xda080090, 0x00002fe6, + 0x34d909a0, 0xbf000014, 0x07999899, 0x0fbf95f9, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, + 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x08009001, 0x002fe7da, 0xd909a000, 0x00001434, 0x999899bf, + 0xbf95f909, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xde0415fb, 0x0000062c, 0x30f4e9bf, 0x014ffefc, + 0x002a94dd, 0xbff9a000, 0x09efbffc, 0x09d9a0ff, 0x01d9b50f, 0x0bf4cfa6, 0x42e97e07, 0x0430f400, + 0x30f400f8, 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x08009001, 0x002fe8da, 0xd909a000, 0x00001434, + 0x999899bf, 0xbf95f90a, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xdf0415fb, 0x0000062c, 0x30f4febf, + 0x0149fefc, 0x9ebf9ea0, 0xe9a6f9bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0xf9fc30f4, 0x062cd112, + 0x19bf0000, 0x900140fe, 0xe9da0800, 0xa000002f, 0x1434d909, 0x99bf0000, 0xf9049998, 0xbf0fbf95, + 0xf4f9a619, 0xe97e070b, 0x15fb0042, 0x062cdf04, 0xfebf0000, 0xfefc30f4, 0x9ea00149, 0xf9bf9ebf, + 0x0bf4e9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x08009001, + 0x002feada, 0xd909a000, 0x00001434, 0x999899bf, 0xbf95f903, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, + 0xdf0415fb, 0x0000062c, 0x30f4febf, 0x0149fefc, 0x9ebf9ea0, 0xe9a6f9bf, 0x7e070bf4, 0xf40042e9, + 0x00f80430, 0xf9fc30f4, 0x062cd112, 0x19bf0000, 0x900140fe, 0xd0da0800, 0xa000002a, 0x1434d909, + 0x99bf0000, 0xf90f9998, 0x2ac8df95, 0x0dbf0000, 0x9cd91ebf, 0xb500002a, 0xc0df069f, 0xb500002a, + 0xf4bd059f, 0xa0099f35, 0x019fb59f, 0x0f089f35, 0x039fb532, 0x0bf4dea6, 0x42e97e07, 0x0415fb00, + 0x00062cdf, 0xf4febf00, 0x49fefc30, 0xbf9ea001, 0xa6f9bf9e, 0x070bf4e9, 0x0042e97e, 0xf80430f4, + 0xfc30f400, 0x2cd112f9, 0xbf000006, 0x0140fe19, 0xda080090, 0x00002feb, 0x34d909a0, 0xbf000014, + 0x06999899, 0x0fbf95f9, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x2cdf0415, 0xbf000006, 0xfc30f4fe, + 0xa00149fe, 0xbf9ebf9e, 0xf4e9a6f9, 0xe97e070b, 0x30f40042, 0xf400f804, 0x12f9fc30, 0x00062cd1, + 0xfe19bf00, 0x00900140, 0x2af0da08, 0x09a00000, 0x001434d9, 0x9899bf00, 0x95f90199, 0x19bf0fbf, + 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0xff1830f5, 0x00062cdf, 0xbf82f900, 0x0149feff, 0x010899b8, + 0xa0080a00, 0x1c227e9f, 0x0aa0b200, 0x1c227e08, 0x0000d900, 0x09fd4000, 0x4d1bf504, 0x1fa99504, + 0x460199b3, 0x4ba43d04, 0x040c01dc, 0x0040417e, 0xa9b3a4b2, 0xb5034500, 0xa0b576a0, 0x72a0b571, + 0xb573a0b5, 0xa0b574a0, 0x2ae8d975, 0x4ffe0000, 0xe8ff9001, 0x002a9cda, 0x019fb500, 0x002fb67e, + 0xa033a232, 0xad330900, 0xd903112e, 0x00002a9c, 0xde079998, 0x00002ae8, 0x99989fbf, 0x029fbb01, + 0x99b3e9a0, 0x98009b00, 0x9db37549, 0xfe008b00, 0xe5b20143, 0x554714bd, 0x4350d6aa, 0x33905249, + 0x6a0b3ea8, 0x2ae8da00, 0x1bb20000, 0x0054257e, 0x1bf4a766, 0x2ae8da56, 0x1b900000, 0x54257e18, + 0xf059bf00, 0xa1bc00a3, 0x1c0f9000, 0x18f4f9a6, 0x2ae8da3a, 0x0bb20000, 0x0054607e, 0x1bf4a6a6, + 0xda0cb22a, 0x00002ae8, 0xd4bd3bb2, 0x00007ade, 0x525f7e00, 0x00a43300, 0x01399828, 0x10de9eb3, + 0x7541b50c, 0x006a153e, 0xbf011190, 0x34999259, 0x08f419a6, 0x75499893, 0x5f0099b3, 0x002d3302, + 0x4998025a, 0x009db374, 0x42980110, 0x0024b375, 0x6c773e74, 0x2ae8da00, 0x2bb20000, 0x0054257e, + 0x1bf4a866, 0x2ae8da51, 0x2b900000, 0x54607e02, 0xf4a7a600, 0x42b5401b, 0x2ae8da74, 0x2b900000, + 0x7e04bd08, 0xbd00549b, 0xffa3c414, 0x006a853e, 0xdab002bc, 0x00002ae8, 0x7e010090, 0xf000549b, + 0x1abcffa4, 0xf403a610, 0x1033e908, 0x56b50900, 0x9856a001, 0x94b37449, 0x22902400, 0x6aaf3e01, + 0x2ae8d500, 0xff480000, 0x494287b8, 0xbf64bd54, 0x03999259, 0x08f529a6, 0x4c98ff7f, 0x00cdb374, + 0x773e0278, 0x3b98006c, 0x74499804, 0xc4033098, 0x09bcff1a, 0x16fc7e00, 0xbc2bb200, 0xd4bd000a, + 0xe8da0cb2, 0xde00002a, 0x00000087, 0x00525f7e, 0x8700ad33, 0x9829bf01, 0x2c98754f, 0x4994b303, + 0xc0cfbc2a, 0x002ae8da, 0xbd5bb200, 0x008cded4, 0x5f7e0000, 0xad330052, 0x98016100, 0x49980e5f, + 0x909fbc75, 0x107649b5, 0x3f980111, 0xff19c405, 0x08f49fa6, 0x76419894, 0x3f0019b3, 0x2ae8da01, + 0x1bb20000, 0x00549b7e, 0x2f02ad33, 0x2ae8da01, 0x1b900000, 0x549b7e01, 0x08ad3300, 0x40fe011e, + 0x2ae8da01, 0x00900000, 0xb21cb27c, 0xded4bd0b, 0x00000090, 0x00525f7e, 0xff00ad33, 0x05059800, + 0xf42456b0, 0x2405050d, 0x900149fe, 0x9e987c99, 0x019f9802, 0x94bd40b2, 0x1fbc24bd, 0x7049b5f0, + 0xbc0141fe, 0x119030fe, 0x6bf63e60, 0x2ae8da00, 0x1bb20000, 0xd4bd3cb2, 0x000095de, 0x525f7e00, + 0x00a43300, 0x90193f3d, 0x22900733, 0x18092001, 0x09350419, 0x08191801, 0x18020935, 0x09350c19, + 0x10191803, 0x18040935, 0x09351419, 0x18191805, 0x90060935, 0x25a60700, 0xb5b508f4, 0x41987042, + 0x2416b070, 0xbd0b0cf4, 0x3e043d24, 0x49006c23, 0x91f71000, 0x490a0f00, 0x9ff71100, 0x3d02f800, + 0xbd24bd04, 0x6c6b3e34, 0x714f9800, 0xbb039e94, 0xf99402e9, 0x06f4b609, 0xbc029fbb, 0x4ebce0e9, + 0x0090b390, 0x984e3c27, 0x330294f0, 0x0a1d0090, 0xc4b4bd01, 0x667eff0c, 0x3bff0042, 0xa52aff95, + 0xaeb29fb2, 0xf3b2e2b2, 0xc4010010, 0x91a6ff09, 0x3eb908f4, 0xbd006c7b, 0x8934bd24, 0xce009000, + 0x94f00099, 0xe10bf501, 0x0544bd00, 0x6d2a3ef7, 0x0129c400, 0x008d0bf5, 0xd4bdc4bd, 0x080a4bb2, + 0x00246b7e, 0xa1b8a0b2, 0xb2000608, 0x1c227e1a, 0x00abe500, 0x7e1ab280, 0xb8001cb1, 0x00062001, + 0x227e1ab2, 0xabc5001c, 0x7e1ab204, 0xb8001cb1, 0x00060001, 0x227e1ab2, 0xabe5001c, 0x1ab28000, + 0x001cb17e, 0x061801b8, 0x7e1ab200, 0xff001c22, 0x1ab2b4a5, 0x001cb17e, 0x060401b8, 0x7e1ab200, + 0xe5001c22, 0xb28000ab, 0x1cb17e1a, 0x1c00b800, 0x0ab20006, 0x001c227e, 0xb204abc5, 0x1cb17e0a, + 0x01449000, 0x9d013395, 0x23ff0122, 0x641bf595, 0x6d663eff, 0x0140fe00, 0x002ae8da, 0x60009000, + 0x0bb2d4bd, 0x000098de, 0x525f7e00, 0x00ad3300, 0x42feff2a, 0x0145fe01, 0x143d03b2, 0x90982290, + 0x2a3e2455, 0x49fe006b, 0x0899b801, 0x9fbf0001, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0x00e883fb, 0xf9fc30f4, 0x062cd112, 0x19bf0000, 0xc04cb4bd, 0x0140fe00, 0x002af4da, 0x08009000, + 0x947e09a0, 0x34d9000b, 0xbf000014, 0x2bb0da99, 0x99980000, 0xbf95f90d, 0x3d19bf0f, 0xf4f9a6a4, + 0xe97e070b, 0x15fb0042, 0x061cd904, 0x30f40000, 0xd112f9fc, 0x0000062c, 0x19bf9abf, 0xdb0140fe, + 0x00002fc8, 0x0c080090, 0x7e09a001, 0xbf0072f3, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0x7e0415fb, + 0xf8007155, 0x7ed43d00, 0xf8007361, 0xe430f400, 0xa1b212f9, 0xb0b3b0b2, 0xb9986100, 0x0796b004, + 0x3d570cf4, 0x7e440ba4, 0xb20040e5, 0xb3050aaf, 0x584a00f0, 0x4afe0a09, 0x08aa9001, 0x9801afb5, + 0x93f0010f, 0x0294b600, 0xb503a9b5, 0x091802af, 0x351bb210, 0x0f9818a9, 0x04afb502, 0xb5030998, + 0x00bf05a9, 0xd97ea0a0, 0xa6b0006f, 0x0b9cf001, 0x3e019a12, 0x0a006e79, 0x1c15fb02, 0x006eb07e, + 0xa6b0943d, 0x0bacf001, 0x3c01a6f0, 0x00f8a29a, 0x00059889, 0x9f989ebf, 0x019fb501, 0xf47e9ea0, + 0xa4b3006e, 0xa43d0801, 0x09f800f8, 0x00f8ff0a, 0xb48912f9, 0x99bf000f, 0x90b3f20a, 0xd0813800, + 0x10bf000f, 0x04b3f10a, 0x747e2c00, 0x0109000a, 0xc08919a0, 0x90a0000f, 0x0010a889, 0x9fa0ff0f, + 0x000fb889, 0xbc8990a0, 0x9fa0000f, 0x003ff17e, 0x11fb10a0, 0xb48912f9, 0x04bd000f, 0x90a0b4bd, + 0x8a00a04c, 0x7e000fd4, 0xbd000b94, 0x8a140cb4, 0x7e0010b4, 0xbd000b94, 0x8a140cb4, 0x7e0010c8, + 0x89000b94, 0xa0001090, 0x108c8990, 0xa0b4bd00, 0x8a140c90, 0x7e001078, 0x8a000b94, 0xbd001094, + 0x7e140cb4, 0x89000b94, 0xa00010b0, 0x0fc08990, 0x8990a000, 0xa00010a8, 0x10748990, 0x8990a000, + 0xa0000fd0, 0x0fc48990, 0x8990a000, 0xa00010ac, 0x0fcc8990, 0x8990a000, 0xa0000fc8, 0x0fb88990, + 0x8190a000, 0x89001074, 0xa0000fbc, 0x0fd48090, 0x900ab200, 0xd07e1400, 0x01a6000b, 0x8af51bf4, + 0x7e0010b4, 0x8a000bd0, 0x7e0010c8, 0x8a000bd0, 0x7e001078, 0x8a000bd0, 0x7e001094, 0x89000bd0, + 0x8f001090, 0xa00010b4, 0x108c899f, 0x10c88f00, 0x7e9fa000, 0xfb007140, 0xb232f911, 0xb3b3b2a0, + 0x00c700a9, 0x0101a9bf, 0x060094b3, 0x0918f801, 0x07963018, 0x01050df4, 0x020f98fe, 0xf403f9c4, + 0xfb01050b, 0xf0030998, 0x0bf40394, 0xb3fb0105, 0x010600f4, 0x010998fa, 0x8e0099b3, 0x011db300, + 0x0ab20085, 0x0070ac7e, 0xa4b3a1b2, 0x0ab27801, 0x7e010298, 0x7e0070c7, 0x89000a74, 0xbf0010b0, + 0x01ff909f, 0x99bf9fa0, 0x0e0194b3, 0x000fb489, 0x733e92a0, 0xd0890070, 0x99bf000f, 0x170094b3, + 0x000fb48e, 0x0f18e9bf, 0x41991818, 0x0cf49f26, 0xb2e2a005, 0x05c77e2a, 0x0030b300, 0x7e32a006, + 0x89000a9b, 0xbf000fd0, 0x0090b399, 0x0fb48919, 0x1899bf00, 0x9918180f, 0xf49f2641, 0x0e7e0718, + 0x1ab20000, 0xfa0131fb, 0x0070a23e, 0xb205a998, 0xb3fa0aaf, 0x98120090, 0xe80a03f9, 0x00ff96b1, + 0x0a050df4, 0xf900f801, 0x98a1b212, 0xaa9803ac, 0x01109802, 0xa5a5a5db, 0x0b947ea5, 0x03199800, + 0xbc021a98, 0x0aa0a0a9, 0x1c981bbf, 0x3fd37e04, 0xb90aa000, 0x0ab500aa, 0x05199803, 0xb5100a90, + 0x19180109, 0x41093518, 0xb5021998, 0x19bf0209, 0x180e09b5, 0x09351819, 0x03199840, 0x7e0f09b5, + 0x90000be7, 0xe77e240a, 0x00b5000b, 0x181f1807, 0x00b50809, 0xfff4f00c, 0xb5029fbb, 0x11fb0909, + 0x0016a489, 0x8a0093fe, 0x3d0005ad, 0x72a27eb4, 0xf8010a00, 0x10dc8900, 0xf89aa000, 0x10dc8900, + 0xf89abf00, 0x2fd88a00, 0x7eb43d00, 0x3300734d, 0xf80800a0, 0x8a00f809, 0x3d0010e4, 0x73577eb4, + 0x00a03300, 0xf809f806, 0x00b3f000, 0xa5df94bd, 0x3ea5a5a5, 0xa000719f, 0x019990af, 0xa604aa90, + 0xf608f49b, 0x508900f8, 0x008f0034, 0x9ab20011, 0x8b01f9b5, 0xbb003650, 0xb5b602b9, 0x04fb7502, + 0x0071897e, 0x907e00f8, 0xa433006e, 0xa67e0c00, 0xef7e0071, 0x00f80072, 0xf9e430f4, 0xb2c4b262, + 0x32b0b2a5, 0x34e272d3, 0x603444c0, 0x00a9b354, 0xb9b300ae, 0x8900a900, 0xbf001100, 0x06ce149f, + 0xffff2ae4, 0xbfa00501, 0xa048f034, 0x03c9c49b, 0xf0029436, 0xfd1403f4, 0x059ffd04, 0xfd059efd, + 0xa4b6059d, 0x1bb93502, 0x0040ed7e, 0x7100a0b3, 0x354c9034, 0xf0341d09, 0x010ab564, 0x350a0275, + 0x0f351803, 0x10f0b41a, 0x900149fe, 0x9ab51c99, 0x0390b501, 0xa0049fb5, 0x0a927594, 0x3d169335, + 0x1e0f35f4, 0x9bb2f4bd, 0xfe029fb5, 0x00900140, 0x7e0ab234, 0x32006e0d, 0x00a433a1, 0x00603324, + 0x320abf10, 0x3dc43d6b, 0x11cc7ed4, 0x0149fe00, 0xbf349990, 0x3e59a099, 0x0100729d, 0xfb1a3202, + 0x12f91c65, 0x30f4b0b2, 0x7ea1b2d4, 0x3d005bf4, 0xb0ae7294, 0x1cb20101, 0x30289130, 0x91300091, + 0x0c913008, 0x30109130, 0x91301491, 0x1c913018, 0x30209130, 0xdc8a2491, 0xac8b002f, 0xd43d0001, + 0x0071d87e, 0x0600a033, 0x30f402f8, 0x3d11fb2c, 0xf900f8a4, 0x30b0b202, 0x0df420c6, 0xc402f805, + 0xa994ffca, 0x02a4b604, 0xbda0a9bc, 0xb50c2094, 0xed7e0109, 0x0ab50040, 0xf901fb02, 0xb2c1b222, + 0x32d0b2a2, 0x7e540bba, 0xb20040e5, 0xb3050aac, 0xb21c00c0, 0xb20bb21a, 0x749a7e2d, 0x01a4b300, + 0x3ea43d0a, 0xf800734b, 0xfbff0a02, 0xb2010c21, 0x731b7ecd, 0x0c00f800, 0x7ed4bd01, 0xf800731b, + 0x3242f900, 0xb2c2b2d0, 0xb2a4b2b3, 0x16fc7eca, 0x54a19000, 0x1bb20a32, 0x0040e57e, 0xa0b3050f, + 0x1bb21e00, 0x2db23cb2, 0xa57e4eb2, 0xa4b30073, 0xf43d0a01, 0x00739c3e, 0xff0f02f8, 0x41fbfa32, + 0xf80eabb5, 0xfc30f400, 0x94bd52f9, 0x90014ffe, 0xf9a018ff, 0xb5b2a2b2, 0xd4b2c3b2, 0xa9c4e0b2, + 0xf4fb0103, 0x0101051b, 0x060034b3, 0x4efef901, 0x90f4bd01, 0xe43e18ee, 0xe9bf0073, 0xbc01ff90, + 0xe9a09049, 0x08f4f3a6, 0x90e9bff4, 0x59a65499, 0x01050bf4, 0x0029b3e8, 0x09b3009e, 0xb3009900, + 0x0096011d, 0x747e02a0, 0x0ebf000a, 0xbf14e998, 0x0099b9ef, 0x1bf4f9a6, 0x3ed10109, 0x9000748b, + 0xe9a05429, 0x5f920ebf, 0xd2f4bc54, 0xe9bfff0c, 0x000fb08b, 0xb5909fbc, 0x0fbf01e9, 0xf9b594bd, + 0xbf09bf0e, 0x029fb59f, 0xf9bf0fbf, 0xb5909dbc, 0x09bf03f9, 0xbf0f93b5, 0x1094b509, 0x9cb509bf, + 0xb509bf11, 0x09bf129c, 0x9fb5bfbf, 0xa00abf13, 0x10aa90ba, 0x000bd07e, 0xaa900abf, 0x0bd07e24, + 0xbf0fbf00, 0x0099b9f9, 0x7e14f9b5, 0x3e000a9b, 0x01007495, 0xfb1ab2fa, 0x42f90455, 0xb4b2a1b2, + 0xd3b2c2b2, 0xbaa6de00, 0x7e270cf4, 0xb2000a74, 0x0b1cb22a, 0xb2d4bd54, 0x73a57e3e, 0xb3a0b200, + 0xbf0c01a4, 0x7e4bb23a, 0x7e0073a0, 0xb2000a9b, 0xdf41fb0a, 0x0000062c, 0x30f4ffbf, 0x0149fefc, + 0x008f9fa0, 0xf9ce0289, 0x0195f000, 0xfe00f9f7, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, + 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x0089010f, 0x9ff60280, + 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, + 0x30f4ffbf, 0x80aab8fc, 0x49fe0001, 0x09a4b601, 0xabf69fa0, 0x0149fe00, 0x2cd99fbf, 0xbf000006, + 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, 0x00899fa0, + 0x9af60321, 0x0099b800, 0x9af60201, 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, + 0x30f40042, 0xf400f804, 0x2cdff030, 0xf9000006, 0xfeffbf82, 0x99900149, 0xa0a13230, 0x75097e9f, + 0xff1ac400, 0x7e0143fe, 0xfe001d41, 0x16c40142, 0xd4a5b2ff, 0x00000fac, 0x90243390, 0x00d82c22, + 0xd7000014, 0x00002fc0, 0x0077063e, 0x9433093f, 0x7ebc1e00, 0xbc6ab298, 0x417ee949, 0x1b32001d, + 0x0ab2a5b2, 0x000f817e, 0x0077063e, 0x0f010918, 0x0f3f2001, 0x013135fc, 0xf00130b5, 0x9990ff94, + 0x049ffd03, 0xbc9009bc, 0x0f3fe949, 0x7307f033, 0xf407f630, 0xf933150c, 0x33009104, 0x335505f0, + 0x00c702fd, 0x0076683e, 0x600af033, 0xf40af630, 0xfd330c0c, 0x3e00b409, 0x33007696, 0x33560bf0, + 0x00a70cfd, 0x0076bc3e, 0x1b320ab2, 0x000f817e, 0x2ab2093f, 0x2920b4bd, 0x2935943d, 0x03001802, + 0x29350409, 0x03203501, 0x000f717e, 0x0077063e, 0xfa3e8abf, 0xd4df0076, 0x3e00002f, 0xd90076c1, + 0x00001404, 0x0076b63e, 0x001424df, 0x76c13e00, 0x142cd900, 0x9abf0000, 0x0076fa3e, 0x001420df, + 0x3efabf00, 0x200076fa, 0xb2943d2f, 0x0229350a, 0x32030018, 0x012f351b, 0x7e032035, 0xbd000f81, + 0x7e2ab2b4, 0x7e000f71, 0x7e000a74, 0xf4001b95, 0x9b7e0028, 0x063e000a, 0xa0b30077, 0x3bb20c00, + 0xa77e080c, 0x1ec40000, 0x084ebcff, 0x1bf505a6, 0xd37efede, 0x49fe0074, 0x30999001, 0x2cd99fbf, + 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x85fb0042, 0xb830f410, 0x00062cdf, 0xbf62f900, 0x0149feff, + 0x0c609990, 0xfe9fa03c, 0xb4bd0140, 0xb2240090, 0x0b947e0a, 0x0000d900, 0xc0d10100, 0xb200002f, + 0xd919a09b, 0x00000fac, 0x9ba0a4bd, 0x001d6d7e, 0xa4bd1bbf, 0x8046343d, 0x753b7e00, 0x0911bf00, + 0x04067502, 0xb5010935, 0x03350101, 0x0b03350a, 0x804cb4bd, 0x0000da00, 0x947e0100, 0x804c000b, + 0xdab4bd00, 0x01000080, 0x000b947e, 0x0074d37e, 0x000080df, 0x2fc4d401, 0xfab20000, 0x6f7e4fa0, + 0x4fbf0075, 0x0010e4d2, 0x08067500, 0x90120335, 0x29b580f9, 0x1da3d902, 0x0fb50000, 0x0329b503, + 0x001e02d9, 0x0145fe00, 0x900529b5, 0x01091f55, 0x20035335, 0x13093559, 0x001100d9, 0x012fb500, + 0xdf010975, 0x00001dcb, 0x2fb50409, 0x02593504, 0x000e32df, 0xb5400900, 0x5935062f, 0x7e032001, + 0xbf000a74, 0x009b7e2a, 0xb20bb200, 0x0f717e5a, 0x0a9b7e00, 0x49070f00, 0x9ff71100, 0x1408d100, + 0x40fe0000, 0x23009001, 0x0bb21abf, 0xff0d010c, 0x0000c17e, 0xf400a433, 0x9433093f, 0xa77eee01, + 0x483e0075, 0x2cd90078, 0xbf000006, 0xfc30f499, 0x8e014ffe, 0xa001c000, 0x140cd9f9, 0x9fbf0000, + 0xc700e9cf, 0x90b34c99, 0x90b34100, 0x02f8f601, 0x0078803e, 0xc700f9cf, 0x90b34c99, 0x90b30e00, + 0x02f8f601, 0x0078943e, 0x01c2008a, 0xfe00aacf, 0x9fbf0149, 0x00062cd9, 0xf099bf00, 0xf9a601a4, + 0x3e2b0bf4, 0x900078e7, 0x008960ff, 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, + 0x3e9fb200, 0x7e007894, 0xf40042e9, 0x00f80430, 0xd9fc30f4, 0x0000062c, 0x99bf02f9, 0x90014ffe, + 0x54d004ff, 0xa0000006, 0x00a033f9, 0x8a0ba024, 0x7e066f44, 0x49001c22, 0xa9ffc000, 0x6f448ab4, + 0x00b5f106, 0x1cb17e20, 0x79573e00, 0x6f448a00, 0x1c227e06, 0x0650d900, 0x9bbf0000, 0xffc00049, + 0x448a94a9, 0xb4f1066f, 0x9bff3fff, 0x1cb17eb5, 0x42408900, 0xfe09a00f, 0x99900149, 0xd99fbf04, + 0x0000062c, 0xa43d99bf, 0x0bf4f9a6, 0x42e97e07, 0x0405fb00, 0xdffc30f4, 0x0000062c, 0xffbf12f9, + 0x900149fe, 0xa0b20899, 0x9fa0b1b2, 0x066f448a, 0x001c227e, 0x0c0104b3, 0x01b0aae7, 0x0079aa3e, + 0x04b3ff0e, 0xa4f12300, 0xa9e43fff, 0x1bf42000, 0x3ef4bd09, 0x8f0079bb, 0x94fe0000, 0xe43d03a9, + 0xa095f9ff, 0x0149fe19, 0xbf089990, 0x062cd99f, 0x99bf0000, 0xf9a6ea32, 0x7e070bf4, 0xfb0042e9, + 0x2cdf0415, 0xbf000006, 0xfc30f4ff, 0x8a0149fe, 0xa0066f40, 0x1c227e9f, 0x00a9e400, 0xffa4f120, + 0x0094b33f, 0x3ee4bd0a, 0x8e007a0f, 0xfefe0000, 0x9fbf0149, 0x00062cd9, 0xb699bf00, 0xeaff03a4, + 0xf4f9a6a5, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x00000658, 0xf9f430f4, 0xfef0b232, 0x99900149, + 0x019fb510, 0x00062cdf, 0xbdffbf00, 0x3591a014, 0x01020191, 0x99909220, 0xa0280308, 0xbd0ab29f, + 0x7e3cb2b4, 0x09000b94, 0x08023505, 0x35030135, 0x01350103, 0x04023502, 0x008f0920, 0xf9cf01c0, + 0x4c99c700, 0x610099b3, 0x0190b301, 0x3e02f8f5, 0xcf007a7e, 0x99c700f9, 0x0099b34c, 0x90b30134, + 0x02f8f501, 0x007a933e, 0xfe00008f, 0xff0394b6, 0x58df95f9, 0xb5000006, 0x008f03f9, 0xf9cf01c0, + 0x4c99c700, 0xe80099b3, 0x0190b300, 0x3e02f8f5, 0xcf007abe, 0x99c700f9, 0x0099b34c, 0x90b300b8, + 0x02f8f501, 0x007ad33e, 0xfe00008f, 0x00140cd9, 0x949dbf00, 0xf9ff03e9, 0x0658df95, 0xf9b50000, + 0xc0008f04, 0x00f9cf01, 0xb34c99c7, 0xb3660090, 0xf8f60190, 0x7b053e02, 0x00f9cf00, 0xb34c99c7, + 0xb30e0090, 0xf8f60190, 0x7b193e02, 0xc2008f00, 0x00ffcf01, 0x001400d9, 0xde9abf00, 0x00000658, + 0xc711f9c7, 0xe93519ff, 0x14ef3515, 0x0c014bfe, 0x10bb9008, 0x00006e7e, 0x900149fe, 0x9fbf1899, + 0x00062cd9, 0xa699bf00, 0xa10bf5f9, 0x7c063e00, 0x60df9000, 0x01c10089, 0xdf009ff6, 0x800000f1, + 0x010099b8, 0x009ff702, 0xb2009fcf, 0x7b193e9f, 0xc2008900, 0x0099cf01, 0xe41095b6, 0xe420009f, + 0xb33fff9e, 0xff4500fd, 0x007aec3e, 0x066f448f, 0x01c10089, 0xdf009ff6, 0x800000f1, 0x010099b8, + 0x009ff702, 0xb2009fcf, 0x7ad33e9f, 0xc2008900, 0x0099cf01, 0x20009fe4, 0x3fff94f1, 0xcc00fdb3, + 0x7aac3efe, 0x6f408f00, 0xc1008906, 0x009ff601, 0x0000f1df, 0x0099b880, 0x9ff70201, 0x009fcf00, + 0x933e9fb2, 0xe97e007a, 0x35fb0042, 0xfc30f40c, 0x2cd112f9, 0xbf000006, 0x0140fe19, 0xa0080090, + 0x7a2f7e09, 0x467f7e00, 0xbf0fbf00, 0xf4f9a619, 0xe97e070b, 0x15fb0042, 0x0658df04, 0x30f40000, + 0xb242f9f4, 0x0149fef0, 0xb5149990, 0x2cdf019f, 0xbf000006, 0xa014bdff, 0x01913591, 0x93200103, + 0x32089990, 0x029fa0a4, 0xbd0ab228, 0x7e2cb2b4, 0x09000b94, 0x01023505, 0x35040335, 0x04350801, + 0x02013509, 0x20030135, 0xc0008f09, 0x00f9cf01, 0xb34c99c7, 0x01660099, 0xf50190b3, 0x8d3e02f8, + 0xf9cf007c, 0x4c99c700, 0x390099b3, 0x0190b301, 0x3e02f8f5, 0x8f007ca2, 0xb6fe0000, 0xf9ff0394, + 0x0658df95, 0xf9b50000, 0xc0008f03, 0x00f9cf01, 0xb34c99c7, 0x00ed0099, 0xf50190b3, 0xcd3e02f8, + 0xf9cf007c, 0x4c99c700, 0xc00099b3, 0x0190b300, 0x3e02f8f5, 0x8f007ce2, 0xb6fe0000, 0xf9ff0394, + 0x0658df95, 0xf9b50000, 0x00403304, 0x140cd937, 0x9fbf0000, 0x01c0008e, 0xc700e9cf, 0x90b34c99, + 0x90b36a00, 0x02f8f601, 0x007d183e, 0xc700f9cf, 0x90b34c99, 0x90b33a00, 0x02f8f601, 0x007d2c3e, + 0x001400d9, 0xfe9abf00, 0x080c014b, 0x7e14bb90, 0xfe00006e, 0x99900149, 0xd99fbf1c, 0x0000062c, + 0xf9a699bf, 0x00ba0bf5, 0x007e1a3e, 0x01c20089, 0xde0099cf, 0x00000658, 0xc7109fc7, 0xef351899, + 0x14e93515, 0x007d403e, 0x8960ff90, 0xf601c100, 0xf1df009f, 0xb8800000, 0x02010099, 0xcf009ff7, + 0x9fb2009f, 0x007d2c3e, 0x01c20089, 0xe40099cf, 0xf120009f, 0xb33fff94, 0xff4000fd, 0x007cfb3e, + 0x066f448f, 0x01c10089, 0xdf009ff6, 0x800000f1, 0x010099b8, 0x009ff702, 0xb2009fcf, 0x7ce23e9f, + 0xc2008900, 0x0099cf01, 0x20009fe4, 0x3fff94f1, 0xc700fdb3, 0x7cbb3efe, 0x6f408f00, 0xc1008906, + 0x009ff601, 0x0000f1df, 0x0099b880, 0x9ff70201, 0x009fcf00, 0xa23e9fb2, 0xe97e007c, 0x45fb0042, + 0xfc30f40c, 0x00062cdf, 0xbf02f900, 0x0654d9ff, 0x90bf0000, 0x900149fe, 0x9fa00499, 0x0046a77e, + 0xc17e010a, 0x010a002b, 0x007c397e, 0x0cb2010a, 0xabb2d43d, 0x0043617e, 0x0600a033, 0x49fe02f8, + 0x04999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x05fb0042, 0x062cd904, 0x99bf0000, + 0xfefc30f4, 0xf9a0014f, 0x0078667e, 0x2800a433, 0xc4bdb4bd, 0x020ad43d, 0x0043617e, 0xc17ea43d, + 0xa43d002b, 0x007c397e, 0x0028487e, 0x0046cf7e, 0x007eb83e, 0x002bc17e, 0xbf0149fe, 0x062cd99f, + 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdfcc, 0x82f90000, 0x49feffbf, + 0x54999001, 0xa00148fe, 0x0144fe9f, 0xbd109992, 0x0147fef4, 0xb50146fe, 0x9fa0019f, 0x90508890, + 0x77904c44, 0x24669034, 0x001400d9, 0xfe9abf00, 0x080c014b, 0x0d44bb90, 0x00c17eff, 0x33a53200, + 0xb4e900a4, 0x033f1200, 0xe0053433, 0x34040918, 0x90334520, 0xe3010f00, 0x87019d33, 0x7f513e00, + 0x050a1800, 0x7e020b98, 0x320078f0, 0x7fbf3ea1, 0x08011800, 0x0ab22b32, 0x000f817e, 0x4320040f, + 0x35034535, 0x1033024f, 0x14330c00, 0x943e7e01, 0x0998007f, 0xb24ab202, 0x9079a07b, 0x9f980809, + 0x017fb501, 0xb5029f98, 0x9998027f, 0x0379b503, 0x007fb23e, 0x90020f98, 0x4ab20809, 0x6fa06bb2, + 0xb5019f98, 0x9f98016f, 0x026fb502, 0xb5039998, 0x14090369, 0x7e014935, 0x3e000f61, 0xb2007f08, + 0x7e2b320a, 0x0f000f81, 0x20943d05, 0x0289358f, 0x0f030018, 0x358ab204, 0x8035018f, 0x7eb4bd03, + 0x33000f71, 0xff250019, 0x083e02f8, 0x2cdf007f, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x02b0b39f, + 0x02b6b02c, 0xb30b0cf4, 0x3e1001b4, 0xb300801d, 0xb32d03b0, 0x0e3f04b0, 0x805f3e02, 0x3da9bf00, + 0x00008fe4, 0x804b3e0c, 0x8fa9bf00, 0x3df3ffff, 0x049ffde4, 0x0800008f, 0x00804b3e, 0xff8fa9bf, + 0xe43df3ff, 0x8f049ffd, 0xfd040000, 0x5d3e059f, 0xa9bf0080, 0xf3ffff8f, 0x9ffde43d, 0xfea9a004, + 0x9fbf0149, 0x00062cd9, 0x3299bf00, 0xf4f9a6ea, 0xe97e070b, 0x30f40042, 0xf400f804, 0x52f9fc30, + 0x00062cd2, 0xb229bf00, 0xb2c4b2b3, 0xfea0b2d5, 0x008a0141, 0x11900880, 0x7e19a018, 0x60001c22, + 0x10a5b60a, 0x2c8a3a60, 0x227e0880, 0x4a60001c, 0x6010a5b6, 0xbf1fbf5a, 0xf4f9a629, 0xe97e070b, + 0x55fb0042, 0xfc30f404, 0x2cd122f9, 0xbf000006, 0xfea2b219, 0x008a0140, 0x009008d5, 0x7e09a00c, + 0xbf001c22, 0x00a3f00f, 0x19bf2aa0, 0x0bf4f9a6, 0x42e97e07, 0x0425fb00, 0xf9fc30f4, 0x062cd122, + 0x19bf0000, 0x40fea2b2, 0xd5408a01, 0x0c009008, 0x227e09a0, 0x0fbf001c, 0xa000a3f0, 0xa619bf2a, + 0x070bf4f9, 0x0042e97e, 0xf40425fb, 0x22f9fc30, 0x00062cd1, 0xb219bf00, 0x0140fea2, 0x08d5808a, + 0xa00c0090, 0x1c227e09, 0xf00fbf00, 0x2aa000a3, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40425, + 0xd122f9fc, 0x0000062c, 0xa2b219bf, 0x8a0140fe, 0x9008d580, 0x09a00c00, 0x001c227e, 0x0fbf2aa0, + 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40425, 0xd122f9fc, 0x0000062c, 0xa2b219bf, 0x8a0140fe, + 0x9008d640, 0x09a00c00, 0x001c227e, 0x0fbf2aa0, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40425, + 0xd122f9fc, 0x0000062c, 0xa2b219bf, 0x8a0140fe, 0x90088854, 0x09a00c00, 0x001c227e, 0xa3f00fbf, + 0xbf2aa000, 0xf4f9a619, 0xe97e070b, 0x25fb0042, 0xfc30f404, 0x2cd142f9, 0xbf000006, 0xb2a3b219, + 0xfec4b2b2, 0xac8a0140, 0x00900884, 0x7e09a014, 0xc7001c22, 0x29a0e8a9, 0xa0f0a9c7, 0x18a5b639, + 0x0fbf4aa0, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40445, 0x062cdffc, 0x12f90000, 0x49feffbf, + 0x08999001, 0x9fa0a0b2, 0x888ab1b2, 0x227e0880, 0xa9c7001c, 0x0290b370, 0x0296b01a, 0xb30b0cf4, + 0x3e1d0194, 0xb300826f, 0xb3080390, 0xa0110494, 0xb4aac709, 0x1004a4b3, 0x0082a83e, 0x09a094bd, + 0x0082c23e, 0xf404a6b0, 0xa0b30f0c, 0xa4b31801, 0xa23e3002, 0xa0b30082, 0xa4b31808, 0xb83e2410, + 0x1aa00082, 0x0082b23e, 0xb03e0309, 0x04090082, 0xa43d19a0, 0x0082c43e, 0xb03e0509, 0x94bd0082, + 0xff0a19a0, 0x900149fe, 0x9fbf0899, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40415fb, + 0x2cdffc30, 0xf9000006, 0xfeffbf02, 0x99900149, 0xa0a0b204, 0x80888a9f, 0x1c227e08, 0x70aac700, + 0x1a02a0b3, 0xf402a6b0, 0xa4b30b0c, 0x1a3e1801, 0xa0b30083, 0xa4b30803, 0x0aa00c04, 0x283ea43d, + 0x94bd0083, 0x09a0ff0a, 0x900149fe, 0x9fbf0499, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf40405fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x408a0140, 0x009008c0, 0x7e09a008, 0xbf001c22, + 0xf019bf0f, 0xacf01fa4, 0xf4f9a60b, 0xe97e070b, 0x15fb0042, 0x062cd904, 0x99bf0000, 0xfefc30f4, + 0xf9a0014f, 0x0083437e, 0x0a00a433, 0x9d3ea4bd, 0x2c8a0083, 0x227e08d3, 0xaac7001c, 0x0149fe12, + 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xf400f804, 0x22f9fc30, 0x00062cd2, + 0xb229bf00, 0x0141fea0, 0x08d1008a, 0xa00c1190, 0x1c227e19, 0x00008900, 0xff04f1ff, 0xb4a9ffff, + 0x08d1008a, 0x7eb50bff, 0xbf001cb1, 0x3d29bf1f, 0xf4f9a6a4, 0xe97e070b, 0x25fb0042, 0xf830f404, + 0x00062cdf, 0xbf62f900, 0x0149feff, 0x201f9990, 0x9096b29c, 0xa4320199, 0xd3b29fa0, 0x0affb2c4, + 0x1c227e08, 0x0000d900, 0x04bd4000, 0xa9ff0105, 0x845f3e14, 0xa002bc00, 0x7ea45abc, 0xb30083b7, + 0xda1d0014, 0x0000449d, 0x3cb26bb2, 0x0015a97e, 0x0c00a433, 0x040a09f8, 0x0084663e, 0x26010090, + 0xd40cf440, 0x49fea43d, 0x20999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x65fb0042, + 0xf830f408, 0x00062cdf, 0xbf12f900, 0x0149feff, 0xb20c9990, 0xb29fa0b1, 0xc0408aab, 0x1cb17e08, + 0x8375da00, 0xb4bd0000, 0x312d00dc, 0x15a97e01, 0x33040e00, 0xfe1f00a0, 0x00900140, 0x7e0ab208, + 0x320082df, 0x00a433ae, 0xa609bf0d, 0x050bf491, 0x49fe3e0e, 0x0c999001, 0x2cd99fbf, 0xbf000006, + 0xa6ea3299, 0x070bf4f9, 0x0042e97e, 0xf40815fb, 0x2cdffc30, 0xf9000006, 0xfeffbf12, 0x99900149, + 0xa0b0b208, 0x8aa1b29f, 0x7e08d100, 0x89001c22, 0xf1ff0000, 0xffffff04, 0x008ab4a9, 0x0bff08d1, + 0x1cb17eb5, 0xd1708a00, 0xff14f108, 0x0000db03, 0x14b6c000, 0xb51bff10, 0x001cb17e, 0x08d1748a, + 0x001c227e, 0x02010089, 0xf700a3f0, 0x49fe009a, 0x08999001, 0x2cd99fbf, 0xbf000006, 0xa6a43d99, + 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x32f9fc30, 0x00062cd2, 0x3229bf00, 0x8ab032c1, 0xdb08d170, + 0xc2290000, 0x900143fe, 0x39a01033, 0x001cb17e, 0x08d1748a, 0x001c227e, 0xfff8a4f1, 0x290002db, + 0xb5abffa2, 0x08d1708a, 0x001cb17e, 0xf0ff04f0, 0x09940f14, 0x081f940c, 0xffff94f1, 0x08d1708a, + 0xdb0404b6, 0xa22a0000, 0xfdff04f0, 0x01fd0509, 0x050ffd05, 0x7eb50bff, 0xbf001cb1, 0x3d29bf3f, + 0xf4f9a6a4, 0xe97e070b, 0x35fb0042, 0xd430f404, 0xf0b472f9, 0x0149fe15, 0x32209990, 0x019fb5a2, + 0x3214f0b4, 0x32c532b4, 0xdf9fa0d6, 0x0000062c, 0x9990ffbf, 0xa0e3b228, 0x00e4b39f, 0x3e243d0a, + 0x0000873b, 0xbd143dff, 0xbc0e32f4, 0x94f0953f, 0x0d0bf401, 0x1bf40e26, 0x10f03205, 0xff900111, + 0x10f4b301, 0x101630ea, 0xc4120cf4, 0x09c4ff1f, 0x70f9bcff, 0xf41076b0, 0x09f80b0d, 0x3b3e0202, + 0x3ab20087, 0x0083b77e, 0x6d325c32, 0x2a324b32, 0x00856b7e, 0xb77e3ab2, 0x148a0083, 0x227e08d1, + 0x7f49001c, 0xb4a9ffff, 0x08d1148a, 0x001cb17e, 0x0b321a32, 0x640dc43d, 0x0083fd7e, 0xad33a232, + 0xb2009d00, 0x83b77e3a, 0xd1148a00, 0x1c227e08, 0x80abe500, 0xd1148a40, 0x1cb17e08, 0x321a3200, + 0x8d010c0b, 0x7e2625a0, 0x320083fd, 0x00a433a2, 0x7e3ab26e, 0x8a0083b7, 0x7e08d114, 0x49001c22, + 0x43febf7f, 0xb4a9ff01, 0x148a0132, 0x339008d1, 0x1cb17e28, 0x3e010400, 0xbc008718, 0xb77ea440, + 0x148a0083, 0x227e08d1, 0x00bc001c, 0x10a5b690, 0x109039bc, 0x9a600111, 0xa6ff10c4, 0xde1ef407, + 0x900149fe, 0x9c982099, 0xb29bbf01, 0x0ed4bd3a, 0x26d87e20, 0x00a03300, 0xfe02f806, 0x99900149, + 0xd99fbf48, 0x0000062c, 0x2a3299bf, 0x0bf4f9a6, 0x42e97e07, 0x2c75fb00, 0xd9e830f4, 0x0000062c, + 0x99bf52f9, 0x90014ffe, 0xa2b22cff, 0xc08af9a0, 0x227e08b8, 0xa5b2001c, 0x08b8c08a, 0x7e045bc5, + 0xda001cb1, 0x00008343, 0x408cb4bd, 0x0400030d, 0x0015a97e, 0xa800a933, 0xc0408a00, 0x1c227e08, + 0x0141fe00, 0x11902bb2, 0xb21aa028, 0x7fee7e1a, 0x33a03200, 0x008900ad, 0xf00f19bf, 0x4afe13b2, + 0x049ffd01, 0x900195f0, 0x19a018aa, 0x7fd1a4b2, 0x7e02faf0, 0xbf001581, 0x7e2bb23a, 0x32008481, + 0x3ea433a0, 0x7e4ab221, 0xa600159c, 0xea0df4a1, 0x2bb23abf, 0x0084817e, 0xdf3ea433, 0x063e0400, + 0xa0330088, 0x49fe3a00, 0x28999001, 0x41fe99bf, 0x0142fe01, 0x90241190, 0x19a02022, 0xdf7e2ab2, + 0xa4330082, 0x2bbf1a00, 0xee7e1ab2, 0xa433007f, 0x1bbf0e00, 0x08c0408a, 0x001cb17e, 0x08b8c08a, + 0xb17e5bb2, 0x49fe001c, 0x2c999001, 0x2cd99fbf, 0xbf000006, 0xa60a3299, 0x070bf4f9, 0x0042e97e, + 0xf41855fb, 0x2cdfdc30, 0xf9000006, 0xf4ffbf82, 0x49fef830, 0x4c999001, 0x40fe9fa0, 0x0c999201, + 0x0090f4bd, 0x0142fe2c, 0x9fb593b2, 0x909fa001, 0x02084822, 0x0406743d, 0xd90c0490, 0x00001424, + 0x3bb29abf, 0xff0d080c, 0x0000c17e, 0xef00a433, 0x3f013198, 0x0a943319, 0x011e98e6, 0x90041f18, + 0x35180419, 0x980ea001, 0x0eb5019e, 0x029e9801, 0x98020eb5, 0x0eb5039e, 0x04999803, 0x330409b5, + 0x304a02f0, 0x0cf402f6, 0x00f0330f, 0x01f4331e, 0x891b3eae, 0x04f03300, 0x04f63067, 0x333808f4, + 0x3e9c05f4, 0x18008956, 0x0b18010a, 0x020c1803, 0x7e040d18, 0x3e00856b, 0x98008962, 0x0b98010a, + 0x84ef7e02, 0x89623e00, 0x010a9800, 0x0087587e, 0x0089623e, 0x0a1849bf, 0x030b1801, 0x18020c18, + 0x0e98040d, 0x0091b002, 0xb0014998, 0xed7e0191, 0x623e0085, 0x02f80089, 0x0089673e, 0x9b3e02f8, + 0xad330088, 0x20ff3900, 0x02273528, 0x32031e18, 0x351ab25b, 0x2e350126, 0x0f817e03, 0xbd2ab200, + 0x0f717eb4, 0x889b3e00, 0x0624d900, 0x30f40000, 0xd112f9fc, 0x0000062c, 0x19bf9abf, 0x001414db, + 0xfe010c00, 0x00900140, 0x7e09a008, 0x7e0072f3, 0xbf0089fe, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, + 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x2fe4da08, 0x09a00000, 0x001434d9, + 0x3d9fbf00, 0x98a92094, 0x95f902f9, 0x19bf0fbf, 0xf9a6a43d, 0x7e070bf4, 0xfb0042e9, 0x30f40415, + 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x08009001, 0x257e09a0, 0xd87e0046, 0x0fbf0044, 0xf9a619bf, + 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0x062cd9f4, 0x22f90000, 0x30f499bf, 0x0140fefc, 0x90014ffe, + 0xff901000, 0xb50ca018, 0xa1b2010d, 0xf9a0b2b2, 0x0048637e, 0xa033ff0e, 0x09981e00, 0x2a9cda01, + 0x1cb20000, 0x91b02db2, 0x980ebf00, 0x6e7e07ab, 0xae32004e, 0x900149fe, 0x9fbf1899, 0x00062cd9, + 0x3299bf00, 0xf4f9a6ea, 0xe97e070b, 0x30f40042, 0x0c25fb04, 0x00062cdf, 0xf4ffbf00, 0x49fef030, + 0x0c999001, 0x9cd99fa0, 0x9800002a, 0x49fe079f, 0xb59aa001, 0xfebf019b, 0x9001ff98, 0xbcb20899, + 0xfebbd4bd, 0xa0abb202, 0x0e9ab29f, 0x26d87e04, 0x00a03300, 0x0a02f808, 0x0149fe01, 0xbf0c9990, + 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x1030f400, 0x30f400f8, 0x062cd9c0, 0x82f90000, + 0x4ffe99bf, 0x60ff9001, 0xa00146fe, 0x9094bdf9, 0x91b02c66, 0x1591b016, 0xfe0144fe, 0x69900148, + 0x40449004, 0xb05c8890, 0x743e0991, 0x50b4008c, 0x33593f16, 0x01410b9d, 0x18015e98, 0x5990045f, + 0x344ea004, 0xe13055e0, 0x019e982b, 0x98014eb5, 0x4eb5029e, 0x039e9802, 0x98034eb5, 0x49b50499, + 0x01f03304, 0x01f6302d, 0x330c08f4, 0x010702fd, 0x008bae3e, 0x98024998, 0x69b5014a, 0x09e0b402, + 0x98016ab5, 0x947e01eb, 0x453e008a, 0x4998008c, 0x014c9802, 0x98044b98, 0x69b5034a, 0x09e0b402, + 0x98016cb5, 0x2a7e01ed, 0x453e008a, 0x4018008c, 0x02429801, 0x58014198, 0x47180743, 0x3000da0c, + 0xb4bd0000, 0x7e02004c, 0x33000b94, 0xc4310104, 0x3ce4ff7b, 0x00daffff, 0x7e000030, 0xb2000b94, + 0x0262b51b, 0xb20161b5, 0xff3ee42c, 0xdad4bdff, 0x00003000, 0x0026d87e, 0x008c493e, 0x62b52cb2, + 0x0161b502, 0x3ee41bb2, 0x00daffff, 0xbd000030, 0x26727ed4, 0xff3ce400, 0xdef4bdff, 0x00003000, + 0xa690febc, 0x0e18f4fc, 0x9726993f, 0x3e0d0bf4, 0x3f008c72, 0x00943399, 0x01ff903d, 0x0200feb3, + 0x8c493edf, 0x00a43300, 0x3d02092d, 0x358920e4, 0xb034028e, 0x0359182b, 0x5ab2040e, 0x35018e35, + 0x817e0389, 0x8ab2000f, 0x717eb4bd, 0x743e000f, 0x02f8008c, 0x001414da, 0x14757e00, 0x142cd900, + 0xadb20000, 0x4bfe9abf, 0x90080c01, 0xc17e54bb, 0xa9330000, 0xdafe9c00, 0x00001414, 0x0014b57e, + 0x008c743e, 0xdffc30f4, 0x0000062c, 0x0cd122f9, 0xbf000014, 0xfe1ebfff, 0x99900149, 0xa0a2b20c, + 0xe8eab89f, 0x227e0004, 0xa0b2001c, 0x04f01abf, 0xe0aab801, 0x227e0004, 0x0109001c, 0x070004b3, + 0x2018a9c7, 0x0149fe29, 0xbf0c9990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0425fb00, + 0xd9fc30f4, 0x0000140c, 0x99bf22f9, 0x2cd1a2b2, 0xfe000006, 0x9ab80140, 0xbf000510, 0x0c009019, + 0x227e09a0, 0x0fbf001c, 0xa0f3aac7, 0xa619bf2a, 0x070bf4f9, 0x0042e97e, 0xf40425fb, 0x0cd9fc30, + 0xf9000014, 0xb299bf42, 0x062cd1a2, 0xb3b20000, 0x05109ab8, 0xb219bf00, 0x0140fec4, 0xa0140090, + 0x1c227e09, 0x49a9c700, 0xa9c72960, 0xc7396046, 0x4a6081aa, 0x19bf0fbf, 0x0bf4f9a6, 0x42e97e07, + 0x0445fb00, 0xf9ec30f4, 0x140cd142, 0x19bf0000, 0x2cd3a4b2, 0xfe000006, 0x9ab80142, 0xbf000500, + 0x24229039, 0x900140fe, 0x29a01400, 0x001c227e, 0x0aa019bf, 0x05049ab8, 0x1c227e00, 0xb519bf00, + 0x9ab8010a, 0x7e000508, 0xbf001c22, 0x020ab519, 0x050c9ab8, 0x1c227e00, 0xb50bb200, 0x100c030a, + 0x7e7e4ab2, 0x2fbf000b, 0xf9a639bf, 0x7e070bf4, 0xfb0042e9, 0x30f41445, 0x062cdff0, 0x22f90000, + 0x00140cd0, 0xbfffbf00, 0x0149fe0e, 0xb2189990, 0xb89fa0a2, 0x0004f8ea, 0x001c227e, 0x0abfa9b2, + 0x94189e95, 0x9f94089d, 0x0895b618, 0xff0094f1, 0xfd05fefd, 0x00d905f9, 0xfd00ff00, 0xfdfd04d9, + 0x0141fe05, 0x04fcaab8, 0x0c119000, 0xa00140fe, 0x1400901f, 0x227e0fa0, 0x2eb2001c, 0xbd011ab5, + 0x00a1dbd4, 0x2e010000, 0x35f80d3c, 0xdd9002e1, 0x00a1dc01, 0xf9c40000, 0x64ffc70f, 0x3c98b93c, + 0xe935f8bf, 0x90ef2001, 0xd4b303ee, 0xa9c4de04, 0x98c93c0f, 0xc70d2935, 0xc93c64a9, 0x0c293598, + 0x900149fe, 0x9fbf1899, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf41025fb, 0x22f9fc30, + 0x00062cd2, 0xdb29bf00, 0x00002b24, 0x002b26dc, 0x2b28dd00, 0x22da0000, 0xfe00002b, 0x11900141, + 0x7e19a00c, 0xd000807b, 0x00002af4, 0xda010998, 0x00002b14, 0x1e0095f1, 0x7e0109b5, 0x98008df6, + 0x2ada0109, 0xf100002b, 0xb5010095, 0x847e0109, 0x0998008d, 0xbf1ebf01, 0x0095f12f, 0x0109b520, + 0x0bf4efa6, 0x42e97e07, 0x0425fb00, 0x002af4de, 0xfc30f400, 0xe99812f9, 0x98edbf04, 0xef9801ec, + 0xc095f002, 0xd904e9b5, 0x34000001, 0xd905d9fd, 0x4c400000, 0x0905c9fd, 0x062cd1e3, 0xf9fd0000, + 0x091bbf04, 0x04f5f0fe, 0xd904f9fd, 0x00c75000, 0x0905f9fd, 0x14e93505, 0xefb5ff09, 0x15e9b502, + 0xa00140fe, 0x080090ed, 0xa001ecb5, 0x8ebb7e0b, 0xbf0fbf00, 0xf4f9a619, 0xe97e070b, 0x15fb0042, + 0xfc30f404, 0x00140cd9, 0xbf12f900, 0x01abc499, 0x00062cd0, 0x0141fe00, 0x07c49ab8, 0x9009bf00, + 0x19a00811, 0x001cb17e, 0x09bf1fbf, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0xdffc30f4, 0x0000062c, + 0xffbf12f9, 0x900149fe, 0xa0b20899, 0xb1b29fa0, 0x0082327e, 0x90b309bf, 0x96b01c02, 0x0b0cf402, + 0x100194b3, 0x0090123e, 0x0a0390b3, 0x060490b3, 0x09a094bd, 0x90b319bf, 0x96b02003, 0x0f0cf403, + 0x160190b3, 0x100294b3, 0x0090363e, 0x0a0490b3, 0x060590b3, 0x19a094bd, 0x900149fe, 0x9fbf0899, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x2cdffc30, 0xf9000006, 0xfeffbf22, + 0x99900149, 0xa0a1b20c, 0xb2b0b29f, 0x00d433c2, 0x00a0b332, 0x60e08a0e, 0x1c227e06, 0xb31aa000, + 0x8a0e0000, 0x7e0660e4, 0xa0001c22, 0x0020b30a, 0x60e88a48, 0x1c227e06, 0x3e2aa000, 0x0a0090d5, + 0x01d43302, 0x00b0b336, 0x8abbbf0e, 0x7e0660e4, 0xb3001cb1, 0xbf0e0020, 0x60e88a2b, 0x1cb17e06, + 0x0010b300, 0x7e1ab214, 0xbf00a91d, 0x60e08a1b, 0x1cb17e06, 0xfea43d00, 0x99900149, 0xd99fbf0c, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x2cdf0425, 0xbf000006, 0xf430f4ff, 0x900149fe, + 0x9fa00899, 0x01c0008f, 0xc700f9cf, 0x99b34c99, 0xb3014c00, 0xf8f50190, 0x91083e02, 0x00f9cf00, + 0xb34c99c7, 0x01250099, 0xf50190b3, 0x1d3e02f8, 0xffd90091, 0xff7fffff, 0x008fc4f9, 0xf9cf01c0, + 0x4c99c700, 0xdb0099b3, 0x0190b300, 0x3e02f8f5, 0xcf00913e, 0x99c700f9, 0x0090b34c, 0x0190b30e, + 0x3e02f8f6, 0x8f009153, 0xcf01c000, 0x99c700f9, 0x0099b34c, 0x90b3008d, 0x02f8f501, 0x00916b3e, + 0xc700f9cf, 0xd0b34c9d, 0xd0b35e00, 0x02f8f601, 0x0091803e, 0x3501f910, 0xfd3348e9, 0xd900e100, + 0x00000684, 0xb5014ffe, 0xd4d901f9, 0xbf00002f, 0x09fda09a, 0x09f92048, 0x0684de09, 0xe9200000, + 0xecb50409, 0x01fd3502, 0x35014bfe, 0xed3503ed, 0x01e93502, 0x6e7e080c, 0x7b3e0000, 0x09f80092, + 0x00927b3e, 0x01c20089, 0xde0099cf, 0x00002af4, 0x0948ef18, 0xf4f926ff, 0x7b3e9d1b, 0xe08f0092, + 0x00890660, 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, 0x3e9fb200, 0x8f009180, + 0x890660e0, 0xf601c100, 0x99b8009f, 0xf6000100, 0xf2df009c, 0xb8800000, 0x02020099, 0xcf009ff6, + 0x9fb2009f, 0x0091533e, 0x01c20089, 0x95009fcf, 0x94b31ff9, 0x323e8c01, 0xe08f0091, 0x00890660, + 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, 0x3e9fb200, 0xfe00911d, 0x99900149, + 0xd99fbf08, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80c30, 0xdff430f4, 0x0000062c, + 0xffbf22f9, 0x900149fe, 0xc1b21499, 0xf4d99fa0, 0x9800002a, 0xd2b2019f, 0x000000d9, 0xfd080e40, + 0x0bf404f9, 0x00a03375, 0x33030e10, 0x326c01a4, 0x92d73ea0, 0x0e043d00, 0x00b43304, 0xa67c7e5e, + 0x00a03300, 0x00804b24, 0x07000433, 0xfe00884b, 0x080c014a, 0x7e0caa90, 0x0e00a79f, 0x00a43306, + 0x93513e3a, 0x0680d900, 0x99bf0000, 0x90b3080e, 0x4ffe2900, 0x0cff9001, 0x10000033, 0xa0209e98, + 0x219998fe, 0x0093303e, 0xa0229e98, 0x239998fe, 0x3e01f9b5, 0xfe009351, 0x99900149, 0xd99fbf14, + 0x0000062c, 0xea3299bf, 0x0bf4f9a6, 0x93663e20, 0x0149fe00, 0xbf0c9990, 0xa01f0e9f, 0x0199981f, + 0x373e29a0, 0xe97e0093, 0x25fb0042, 0x062cde0c, 0xe9bf0000, 0xfefc30f4, 0xf9a0014f, 0xa9a02409, + 0xe9bfffbf, 0xf9a61f0a, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0xdfe430f4, 0x0000062c, 0xffbf82f9, + 0x900149fe, 0xf4d03c99, 0xa000002a, 0x2d09989f, 0x98280298, 0x0f982b01, 0x0d91b029, 0x980bc1b0, + 0xd1b02a09, 0x09e1b00a, 0xbf2e0098, 0x6492ffcc, 0xb0741fff, 0xc1b00e01, 0x0da0330c, 0x0ea4330c, + 0x93f53e10, 0xb264b200, 0x93fb3e75, 0xbd44bd00, 0x93fb3e54, 0x0d40b400, 0x090e50b4, 0xf5b926ff, + 0x9500851b, 0x4c9d015d, 0x5555d901, 0x33d05555, 0xff333333, 0xd9ffe4c9, 0xe24ebcf4, 0xfff35fbc, + 0xf994c4e0, 0x02e5b61e, 0xffe59eff, 0x0fd2d4f0, 0x950f0f0f, 0xe0ff02f1, 0xb410ffa4, 0xbca0acbc, + 0xb994b1bd, 0x04ae951c, 0xff04bf95, 0xeabce59e, 0xf1fbbce0, 0xdc04e2fd, 0x01010101, 0xb2b4f2ff, + 0x7eeab2cd, 0xb600415b, 0xb0b318b5, 0x090e0b24, 0x3300bdb3, 0x00b6b001, 0xf00b3cf0, 0x84bd0136, + 0x00949a3e, 0x0cffbac4, 0x7e240b40, 0x0e009fe5, 0x00a93304, 0x58b20112, 0x7d9543b2, 0x016c9d01, + 0x333333d1, 0x5555d233, 0xc2ff5555, 0xf4d2ffe4, 0xbce26ebc, 0xe1fff37f, 0x1ef994c4, 0xff02e5b6, + 0xf1ffe59e, 0x0f0fd4d4, 0xf0950f0f, 0xa4e1ff02, 0xbcb401ff, 0xbdbca0ac, 0x1cb994b1, 0x9504ae95, + 0x9eff04bf, 0xe0eabce5, 0xfdf1fbbc, 0x01dc04e4, 0xff010101, 0xcdb2b4f4, 0x5b7eeab2, 0xb5950041, + 0x2456b018, 0x009d0cf5, 0xb40de0b4, 0xfd950ef0, 0x01ec9d01, 0xffe4c2ff, 0xc0b4f4d2, 0x0ed0b40d, + 0xbce2cebc, 0xe1fff3df, 0x1ef994c4, 0xff02e5b6, 0xf1ffe59e, 0x02f095d4, 0xffa4e1ff, 0xacbcb401, + 0xb1bdbca0, 0x951cb994, 0xbf9504ae, 0xe59eff04, 0xbce0eabc, 0xe4fdf1fb, 0x0101dc04, 0xf4ff0101, + 0xb2cdb2b4, 0x415b7eea, 0x18b99500, 0xf42496b0, 0xd0b4320c, 0x0fffde0c, 0x004fff00, 0x0c94b6f0, + 0xfff4dfff, 0xfefdf55f, 0x0be0b404, 0xa005f9fd, 0x0af0b4ef, 0xf3a01f0e, 0xa00990b4, 0x95a33e98, + 0xfe060e00, 0x99900149, 0xd99fbf3c, 0x0000062c, 0xea3299bf, 0x0bf4f9a6, 0x42e97e07, 0x1c85fb00, + 0xdffc30f4, 0x0000062c, 0xffbf22f9, 0x010000d9, 0xffa4f001, 0xb6ffb4f0, 0xb4b614a4, 0xa0a9bc0f, + 0x900149fe, 0xabbc0c99, 0xb29fa020, 0xb8d1b2c0, 0x0020242a, 0x001c227e, 0xaac70309, 0x00a0b364, + 0xb301090c, 0x3d0604a0, 0xb8092094, 0x0030142a, 0x001c227e, 0xaac70309, 0x00a0b364, 0xb301090c, + 0x3d0604a0, 0xfe192094, 0x99900149, 0xd99fbf0c, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, + 0x30f40425, 0x062cdff0, 0x32f90000, 0x30f4ffbf, 0x0149fefc, 0xa0209990, 0x0141fe9f, 0x119094bd, + 0x014efe1c, 0xee9019a0, 0xb2e9a018, 0x09d3b2c2, 0xf4b926ff, 0x40fe201b, 0x90240b01, 0x0cb21400, + 0x00a3d97e, 0xb3ffa4f0, 0xbf3c1fa4, 0x3e10a000, 0xb00096ad, 0xa4f000e1, 0xffb4f0ff, 0x050c1eb2, + 0x527e240d, 0xa4f000a4, 0x1fa4b3ff, 0x0149fe1a, 0xbf1c9990, 0xa01f0a99, 0x0149fe29, 0xbf189990, + 0xfe39a099, 0x99900149, 0xd99fbf20, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x35fb0430, + 0xf030f410, 0x00062cdf, 0xbf32f900, 0xfc30f4ff, 0x900149fe, 0x9fa02099, 0xbd0141fe, 0x18119094, + 0xa0014efe, 0x14ee9019, 0xc2b2e9a0, 0xff09d3b2, 0x1bf4b926, 0x0140fe1d, 0x0090240b, 0x7e0cb21c, + 0x3300a3d9, 0xbf391fa4, 0x3e10a000, 0xb0009746, 0xa4f000e1, 0xffb4f0ff, 0x080c1eb2, 0x527e240d, + 0xa43300a4, 0x49fe1a1f, 0x18999001, 0x1f0a99bf, 0x49fe29a0, 0x14999001, 0x39a099bf, 0x900149fe, + 0x9fbf2099, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xfb0430f4, 0x30f41035, 0x062cdff4, + 0x22f90000, 0x30f4ffbf, 0x0149fefc, 0xa0189990, 0x014efe9f, 0xee9094bd, 0x0140fe14, 0x0090e9a0, + 0xb209a010, 0x09d2b2c1, 0xf4b926ff, 0xecb20f1b, 0xd97e240b, 0xcb3e00a3, 0x01b00097, 0xffa4f000, + 0x0cffb4f0, 0x7e240d01, 0x3300a452, 0xfe201fa4, 0x99900149, 0xfe99bf14, 0xff90014f, 0xe7ffbf10, + 0xa0038199, 0x81ffe719, 0xfe2fa003, 0x99900149, 0xd99fbf18, 0x0000062c, 0xf9a699bf, 0x7e070bf4, + 0xf40042e9, 0x25fb0430, 0xf430f40c, 0x00062cdf, 0xbf22f900, 0xfc30f4ff, 0x900149fe, 0x9fa01899, + 0xbd014efe, 0x14ee9094, 0xa00140fe, 0x100090e9, 0xc1b209a0, 0xff09d2b2, 0x1bf4b926, 0x0becb20f, + 0xa3d97e24, 0x985a3e00, 0x0001b000, 0xf0ffa4f0, 0x200cffb4, 0x527e240d, 0xa43300a4, 0x49fe181f, + 0x14999001, 0x19a099bf, 0x900149fe, 0x99bf1099, 0x49fe29a0, 0x18999001, 0x2cd99fbf, 0xbf000006, + 0xf4f9a699, 0xe97e070b, 0x30f40042, 0x0c25fb04, 0xdff430f4, 0x0000062c, 0xffbf52f9, 0x900149fe, + 0xb0322099, 0xc4b29fa0, 0xa6b00405, 0xbb0cf523, 0xfe34bd00, 0x41fe0142, 0x18229001, 0xa01c1190, + 0xb213a023, 0x7e2cb21b, 0x32009fa6, 0x1fad33a5, 0x1abf009b, 0xd37e2bbf, 0xa43300a0, 0x43a00a00, + 0x0099683e, 0x19bf2fbf, 0xb60ff4b6, 0xf9bc1494, 0x0000df90, 0x9fbc0101, 0x04003390, 0x0406302e, + 0x330b0cf4, 0x3e650304, 0x33009917, 0x332d0500, 0x3e590604, 0xb8009948, 0x0022889a, 0x001c227e, + 0x3e10a5b6, 0x90009932, 0x227e8c9a, 0xa4f1001c, 0x4aa003ff, 0x0099683e, 0x32889ab8, 0x1c227e00, + 0x00a3f000, 0x0099323e, 0x328c9ab8, 0x1c227e00, 0x18ae9500, 0xc7e8a9c7, 0xa4f0f0af, 0x909fbcff, + 0xbc909abc, 0x49a0909e, 0x900149fe, 0x9fbf2099, 0x00062cd9, 0x3299bf00, 0xf4f9a65a, 0xe97e070b, + 0x55fb0042, 0xf430f40c, 0x00062cdf, 0xbf42f900, 0x0149feff, 0xfe1c9990, 0x9fa0014e, 0xbd18ee90, + 0x014ffe94, 0xff90e9a0, 0x32f9a014, 0x09c4b2a1, 0xf4b926ff, 0xe3b22b1b, 0x04bdf2b2, 0x1b320ab2, + 0x907e3cb2, 0xa4330098, 0x2fbf311f, 0x009039bf, 0x909fbc01, 0x04b329a0, 0xed3ee624, 0xbac40099, + 0x32fcb2ff, 0x98907e1b, 0x1fa43300, 0x0149fe0e, 0xbf149990, 0xfe49a099, 0x99900149, 0xd99fbf1c, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40c45, 0x062cdfb8, 0x82f90000, 0x49feffbf, + 0x68999001, 0x9fa00105, 0x3011c1b0, 0xd1b049b1, 0x0fe1b010, 0xa033c4bf, 0xa630180a, 0x0f08f40a, + 0xa0330205, 0x03050c0b, 0x060ca033, 0x94bd543d, 0xbd014ffe, 0x60ff9064, 0xf9b504bd, 0xbdf9a001, + 0x08ff9274, 0xf1b2f920, 0x1bb20ab2, 0x00a2107e, 0x801fad33, 0x33193f04, 0x0a1c0090, 0xb2b4bd01, + 0x42667e0c, 0x957bff00, 0xb2a56aff, 0xb2aeb29f, 0x90f7b2e6, 0x04b30100, 0x083ed224, 0x9034009f, + 0x30040a49, 0x0cf52396, 0xc034044a, 0xbd010a49, 0xffc4f0b4, 0x0042667e, 0xb1b2a0b2, 0xdf0849c5, + 0xff000008, 0xb0f49fff, 0x7c7e13f1, 0xa43300a6, 0x46fe4d00, 0x0a24bd01, 0x3d34bd1f, 0x60669044, + 0x009b0c3e, 0xf40109c4, 0x4a321c0b, 0x6cb25b32, 0x00a5cc7e, 0x0f1fa433, 0x6f986ebf, 0x202ebc01, + 0x10313fbc, 0x11950144, 0x01009d01, 0xf49501ff, 0xa933d51b, 0x3e03c71f, 0x34009ef0, 0xff0949b0, + 0x0bf4b926, 0x2af4df78, 0x5ec40000, 0x03e994ff, 0x98909fbc, 0x9998189f, 0x04f1fd17, 0xa60490fd, + 0x081bf490, 0x0bf4f1a6, 0x9402ff54, 0xa6f413ff, 0x081bf490, 0x0bf4f1a6, 0x2af4d944, 0xef940000, + 0xf0f9bc03, 0xb91ff998, 0xfe90000c, 0x001bb97c, 0xb5049cfd, 0xe9981ff9, 0x5cfd9001, 0x9bfd090a, + 0x01e9b504, 0xfd17f998, 0xf9b5049c, 0x01d99817, 0xb5049bfd, 0xf03e01d9, 0x54f0009e, 0x2af4dbff, + 0x59b20000, 0xbc0394b6, 0x9f98909b, 0x1f999820, 0xfd1451b0, 0x90fd04f1, 0xf490a604, 0xf1a6081b, + 0x0e100bf4, 0x3044bd01, 0x54bd4ae1, 0x009c1a3e, 0x1db244bd, 0x54bd0cb2, 0x0f3eb43d, 0xc9c4009c, + 0x250bf401, 0xde14f0b4, 0x000006cc, 0xb6ffb9c4, 0x9fbc0394, 0x0394b690, 0xbf909ebc, 0x019f989e, + 0xbc404ebc, 0xbb10515f, 0x01dd9501, 0xff01cc9d, 0x1bf495cd, 0x30f43dcc, 0x90b44af1, 0x2af4db14, + 0x94b60000, 0x909bbc03, 0x98189f98, 0xf1fd1799, 0x0490fd04, 0x1bf490a6, 0xf5f1a609, 0xbd01200b, + 0xb0e43d94, 0x91b01791, 0x4be13016, 0x009d1b3e, 0xf50169c4, 0x3400b90b, 0xc0344b90, 0x06acdf4b, + 0x010a0000, 0xbdff94f0, 0x069894b4, 0xbcffc4f0, 0x667e808f, 0x50d20042, 0xb900002b, 0xb1b000b9, + 0x0ea1b00d, 0xb000abb9, 0xb1b00b91, 0x3d8db20c, 0x4ba03434, 0x3209d1b0, 0x014cfe3b, 0x7e58cc90, + 0xbf00a5cc, 0x012c9829, 0x3309d0b4, 0xbd351fa0, 0xb5dea0e4, 0xb0b401de, 0x082f980c, 0xfd092e98, + 0x29a0049b, 0xff0bb0b4, 0x29b594cb, 0x0c90b401, 0xb504ebfd, 0xf9fd092e, 0x082fb504, 0x009cfc3e, + 0xb416e0b4, 0x8fb517f0, 0xb48ea001, 0x9ffd0ef0, 0xb429a005, 0xcbff0db0, 0x0129b595, 0x90013310, + 0x229008dd, 0x08889008, 0x89043433, 0x954be034, 0x669d0177, 0x01ee1001, 0xff4be130, 0x1bf59567, + 0xf034ff32, 0x26ff0949, 0x290bf4f9, 0xdb1490b4, 0x00002af4, 0xbc0394b6, 0x9f98909b, 0x17999818, + 0xfd04f1fd, 0x90a60490, 0x01d71bf5, 0x1bf5f1a6, 0xe0b401d1, 0x04e5f013, 0xfe13e1b0, 0x99900149, + 0x9896bf60, 0x90b40197, 0x2af4df14, 0xe43d0000, 0x94b624bd, 0xbc34bd03, 0xe130909f, 0x7c9b9057, + 0xb05c9890, 0x0e3e0ab1, 0x09c4009e, 0x720bf401, 0x3414f0b4, 0xacde5790, 0xdb000006, 0x000006cc, + 0x94f0010a, 0x0394b6ff, 0x94909fbc, 0x94b6039f, 0xf0fbbc03, 0xbf909ebc, 0x01979896, 0xa001f7b5, + 0x57c034f6, 0x26bcb4bd, 0x3137bc20, 0x7effc4f0, 0x98004266, 0x8ebf018f, 0xfd00b9b9, 0xa9b904f9, + 0x018fb500, 0xa004e9fd, 0x0af0b48e, 0xff98f9bf, 0x059afd01, 0xb405fbfd, 0xb9a00ab0, 0x3401bfb5, + 0x119557e0, 0x01009d01, 0x3001ee10, 0x01ff57e1, 0x791bf595, 0x0149feff, 0xb5609990, 0x96a00197, + 0x334af034, 0x00b600fd, 0xbcc224bc, 0xd4b3d335, 0xfe891300, 0xc9a60fff, 0x3d090cf4, 0x9e9d3ef4, + 0x16df9400, 0x950ada95, 0xf9ff0ac9, 0xb2afb2b5, 0xb2ecb2be, 0x00d4b3fd, 0x89010f0f, 0xa60ffffe, + 0x3c0df4c9, 0x940abf95, 0xab9516a9, 0xa59fff0a, 0xaeb2bfb2, 0xfdb2ecb2, 0x0f00d4b3, 0xfe89020f, + 0xc9a60fff, 0x95190df4, 0xbd950aaf, 0x16b9940a, 0xb2059ffd, 0xb29eb2df, 0x0ffdb2ec, 0x13b0b403, + 0x0903f4f0, 0x94b9fffc, 0xb3e5f9ff, 0x890d00d4, 0xa60fffff, 0x0a0df4c9, 0x3e08e9c5, 0xff009edf, + 0x94b694c9, 0x000fdf04, 0xefffff00, 0x05f9fdf4, 0xf9fff709, 0x9edf3e94, 0x1390b400, 0xb40895f0, + 0x1f0a10e0, 0xf0b4e2a0, 0xb4f3a00f, 0xb9a011b0, 0x900149fe, 0x9fbf6899, 0x00062cd9, 0xa699bf00, + 0x280bf4f9, 0x009f253e, 0xb249e034, 0x0973b262, 0xf5e926ff, 0xb2fb8b1b, 0x3e71b260, 0x0a009abc, + 0x9ef03e09, 0x42e97e00, 0x4885fb00, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0x8f9fa001, 0xcf01c000, + 0x99c700f9, 0x0090b34c, 0x0190b322, 0x3e02f8f6, 0xcf009f3f, 0x99c700f9, 0x0090b34c, 0x0190b333, + 0x3e02f8f6, 0x89009f53, 0xf601c100, 0x99b8009a, 0xf6000100, 0xf2df009b, 0xb8800000, 0x02020099, + 0xcf009ff6, 0x9fb2009f, 0x009f533e, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x95a9b29f, 0x060a02af, 0xf408f6b0, + 0xbfa00c0c, 0x0a0394f0, 0xfec9a01f, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cdf00, 0xfebf0000, 0xfefc30f4, 0xbb920149, 0xbf9ea001, 0xbcf9bf9e, 0xbcffb0bc, + 0xffb4f0bc, 0xacf0aba6, 0xf4e9a608, 0xe97e070b, 0x30f40042, 0xf400f804, 0x2cdffc30, 0xf9000006, + 0xfeffbf02, 0x99900149, 0xa0c0b204, 0xb3dcb29f, 0xb03902a0, 0x0cf402a6, 0x01a4b30b, 0xa04f3e10, + 0x07a0b300, 0x08a0b32d, 0x3e060a3b, 0x0a00a0b8, 0x0104b31f, 0xc4dfbf67, 0x1f0a1fb9, 0xfd9409bc, + 0xdfa005f9, 0x00a0b83e, 0xb63ed0a0, 0xb9c400a0, 0xb6ddbf07, 0x9ec40294, 0x0399901f, 0x00a0943e, + 0xbfcdddbf, 0x90ffbc05, 0xbc909fbc, 0x9ec49099, 0x0599901f, 0xf00099b9, 0x9ebc1f94, 0xbbff0f90, + 0xf0ff05f9, 0x04febb94, 0xb9049ebb, 0xfdfd00ff, 0x05f9fd04, 0x1f0acfa0, 0x900149fe, 0x9fbf0499, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40405fb, 0x2cd9fc30, 0xf9000006, 0xfe99bf12, + 0xff90014f, 0xb2b1b208, 0x7ef9a0a0, 0x330046f7, 0x3d0a00a0, 0xa1273ea4, 0x0f199400, 0x017000df, + 0x1404b601, 0xbc000fbc, 0x0ab80009, 0x7e000600, 0xb2001c22, 0x800ab8a1, 0x227e0003, 0xa1fd001c, + 0x01a6f005, 0xfe01a4f0, 0x99900149, 0xd99fbf08, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, + 0x30f40415, 0x062cdff4, 0x32f90000, 0x49feffbf, 0x18999001, 0x9fa0b2b2, 0xa6b00603, 0x960cf523, + 0x0141fe00, 0x900140fe, 0x00901411, 0xb21bb210, 0x9fa67e0c, 0x33a33200, 0xbf7c1fa4, 0x7e0bbf1a, + 0x3300a0d3, 0xbf5a00a0, 0xb60abf19, 0xa4b61494, 0xa0a9bc0f, 0x010000d9, 0xa0a9bc01, 0x001c227e, + 0xb3ffa4f0, 0xb04c04a0, 0x0cf404a6, 0x02a6b013, 0xb0390cf4, 0x18f401a6, 0xa1dd3e2d, 0x08a0b300, + 0x08a6b02c, 0xb30b0cf4, 0x3e1005a4, 0xb300a1dd, 0xb31a0aa0, 0x03100ba0, 0xa1f33e06, 0x3e94bd00, + 0x0900a1f1, 0xa1f13e01, 0x3e020900, 0x0900a1f1, 0xfe29a003, 0x99900149, 0xd99fbf18, 0x0000062c, + 0x3a3299bf, 0x0bf4f9a6, 0x42e97e07, 0x0c35fb00, 0xd9f830f4, 0x0000062c, 0x99bf12f9, 0x90014ffe, + 0xb1b20cff, 0xff09f9a0, 0x900140fe, 0x09a00800, 0x427e0bb2, 0xa43300a1, 0x09bf0e1f, 0xf00296b0, + 0x19200b9c, 0x900149fe, 0x9fbf0c99, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40815fb, + 0x2cdfe830, 0xf9000006, 0xfeffbf42, 0x99900149, 0xa0b0b228, 0x33c3b29f, 0x304502a0, 0x0cf402a6, + 0x01ad330c, 0x983e00a6, 0xa93300a2, 0x3300b407, 0x009808ad, 0x00a34c3e, 0x40febab2, 0x27009001, + 0x107e0bb2, 0xa23200a2, 0x141fad33, 0x30093f01, 0x9cf00096, 0x0196f00b, 0x00a3ba3e, 0xb6300402, + 0xfb0cf523, 0xffbac400, 0xfe0144fe, 0x44900141, 0x1c119020, 0x1cb24bb2, 0x009fa67e, 0xad33a232, + 0xc400de1f, 0x40feff0a, 0x27009001, 0x107e0bb2, 0xa23200a2, 0xc81fad33, 0x33093f00, 0xbd0a0094, + 0xa3ba3e94, 0xbf49bf00, 0x1494b61a, 0xbc0fa4b6, 0x0cd9a0a9, 0xbc010176, 0x227ea0a9, 0xa5b6001c, + 0x07a63008, 0x02090df4, 0xa3bc3e06, 0xffafc400, 0x000048d9, 0xf89fbc00, 0xbc3e3fa0, 0xbab200a3, + 0x427ecbb2, 0xa23200a1, 0x00a3bc3e, 0xb6300402, 0x6b0cf423, 0xfeffbac4, 0x44fe0141, 0x18119001, + 0xb2144490, 0x7e4cb21b, 0x32009fa6, 0x1fa433a2, 0xff0ac44f, 0x900140fe, 0x0bb22000, 0x00a2107e, + 0xa433a232, 0x093f3a1f, 0x0a009433, 0xba3e3f09, 0x1a3f00a3, 0x41fe4b3f, 0x0140fe01, 0x90271190, + 0x1cb21c00, 0xc07e0db2, 0x093f0095, 0x94f01f3f, 0x07f4f007, 0xfd0394b6, 0x39a0059f, 0x900149fe, + 0x9fbf2899, 0x00062cd9, 0x3299bf00, 0xf4f9a62a, 0xe97e070b, 0x45fb0042, 0xf830f418, 0x00062cdf, + 0xbf62f900, 0x0149feff, 0x9990ff01, 0x0142fe20, 0xb3b2a532, 0x9fa0c4b2, 0x229004bd, 0x3e16b21c, + 0x3200a42c, 0xb20bb25a, 0xa25f7e2c, 0x1fa43300, 0xa629bf2a, 0x091bf416, 0x293e91b2, 0x19a600a4, + 0x0a090bf4, 0xa4373e09, 0x01009000, 0x08f403a6, 0x0a22bfd5, 0xfe42a01f, 0x99900149, 0xd99fbf20, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40865, 0x062cdff8, 0x82f90000, 0x49feffbf, + 0x28999001, 0xb0b2c2b2, 0xcbb2d5b2, 0xe7b29fa0, 0x00bca4b2, 0x16fc7ea0, 0xb25bb200, 0xc022bca3, + 0x32bc0ab2, 0x9fe57e60, 0xb4040100, 0xa0330c80, 0x30b27500, 0x900143fe, 0xbd3e2433, 0x4a3200a4, + 0x3cb20bb2, 0x00a25f7e, 0xa433a132, 0x3cbf591f, 0x4ab20bb2, 0xb2010090, 0xa0177e7d, 0xf405a600, + 0x06a64218, 0x3eda08f4, 0x3200a4f3, 0xb20bb24a, 0xa25f7e3c, 0x33a13200, 0xbf2c1fa4, 0xb20bb23c, + 0x0100904a, 0x177e8db2, 0x05a600a0, 0x3e1008f4, 0xfe00a503, 0x62bc0143, 0x24339020, 0x08f402a6, + 0xfe1f01cd, 0x99900149, 0xd99fbf28, 0x0000062c, 0x1a3299bf, 0x0bf4f9a6, 0x42e97e07, 0x0885fb00, + 0xdff430f4, 0x0000062c, 0xffbf62f9, 0x41fe94bd, 0x0140fe01, 0x90201190, 0x19a01c00, 0x49fe09a0, + 0x24999001, 0xc6b2b2b2, 0x9fa0d5b2, 0x0cb21bb2, 0x009fa67e, 0xa433a332, 0x1abf591f, 0xd37e0bbf, + 0xa43300a0, 0x09030a00, 0x00a5af3e, 0x00bf19bf, 0xb61494b6, 0x09bc0f04, 0x010e7e00, 0x5000d900, + 0x09bc0101, 0x4002bc00, 0xb21060bc, 0x1c227e1a, 0xb2a0b200, 0x1c227e4a, 0xb2a2b200, 0x1c227e1a, + 0xf40aa600, 0x087ee81b, 0x50b50001, 0xfe52a001, 0x99900149, 0xd99fbf24, 0x0000062c, 0x3a3299bf, + 0x0bf4f9a6, 0x42e97e07, 0x0c65fb00, 0xdff430f4, 0x0000062c, 0xffbf02f9, 0x900149fe, 0xc0b20c99, + 0xaf329fa0, 0xf630040a, 0x780cf423, 0x4dfe94bd, 0x04dd9001, 0xa001d9b5, 0x01b033d9, 0x01b63025, + 0x331108f4, 0x0a2802b0, 0x03b4331f, 0xa6383e3c, 0xfffac400, 0x4c154c4b, 0x413e1550, 0xfac400a6, + 0x1d4c4bff, 0x3e1d504c, 0xc400a641, 0xf44bfffa, 0x15f84c15, 0x00a6413e, 0x4bfffac4, 0xf84c1df4, + 0xa5207e1d, 0x0149fe00, 0xbf049990, 0x0199989e, 0x9506e5b6, 0x94b6069f, 0x010fb51a, 0xa005e9fd, + 0x0149fe0e, 0xbf0c9990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0c05fb00, 0x00062cde, + 0xf4e9bf00, 0x4ffefc30, 0xd9f9a001, 0x00002af4, 0x9a18ffbf, 0xa6e9bf15, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x080e0149, 0xa0339fa0, 0x1f0e1901, 0xf401a630, + 0xa9121008, 0x30060e30, 0x0cf40496, 0xfe070e05, 0x9fbf0149, 0x00062cd9, 0x3299bf00, 0xf4f9a6ea, + 0xe97e070b, 0x30f40042, 0xf400f804, 0x22f9f830, 0x00062cd2, 0xfe29bf00, 0x11900141, 0x0140fe10, + 0x0e7e19a0, 0x00900001, 0x7e0ab20c, 0xdf008d00, 0x00002af4, 0xf9980ebf, 0x15feb515, 0x9cf09ea6, + 0x58f9350b, 0x0001087e, 0x29bf1fbf, 0x0bf4f9a6, 0x42e97e07, 0x0825fb00, 0x00062cdf, 0xf4ffbf00, + 0x49fefc30, 0xa0adb201, 0x2af4d99f, 0x99180000, 0xb2beb215, 0x009033cf, 0x2b0cd91c, 0x9c980000, + 0xb29bbf01, 0x26d87efa, 0xa6a57e00, 0xa7853e00, 0x0680d900, 0x9abf0000, 0xecb2cbb2, 0x7ea0adbc, + 0x0a000b7e, 0x0149fe1f, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xd900f804, + 0x0000062c, 0x30f499bf, 0x014ffefc, 0xf9a0bdb2, 0xa0b3ceb2, 0xb6b13800, 0x0cf423bf, 0x23c04930, + 0xa6029bbb, 0x250cf4c9, 0x002af4d9, 0x15991800, 0x9033010c, 0x0cd91a00, 0x9800002b, 0x9bbf019c, + 0x0026727e, 0xec3eac32, 0x020c00a7, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0xf9a6ca32, 0x7e070bf4, + 0xf40042e9, 0x00f80430, 0xdfd830f4, 0x0000062c, 0xffbf72f9, 0x900149fe, 0x46fe4499, 0xb29fa001, + 0xb2b1b2a0, 0x246690c4, 0x23c0a6b1, 0xb17d0cf4, 0xf423c0b6, 0xbabc760c, 0xc096b190, 0x6c0cf423, + 0x000e067e, 0xd50302c4, 0x00002b0c, 0x07ff29c4, 0x3209bc20, 0x00a89b3e, 0x5c985bbf, 0xb26ab201, + 0x7e200e3d, 0x33002672, 0x000a00a0, 0xa8a13e06, 0xff29c400, 0xa60279bc, 0x050df401, 0x2bc410b2, + 0xff0cc4ff, 0x04f04ab2, 0xb06bbcff, 0xbb203390, 0x7e7e0210, 0x40bc000b, 0xb3243d40, 0x00bd0014, + 0x0ddd7e1f, 0xa8ab3e00, 0xfe060000, 0x99900149, 0xd99fbf44, 0x0000062c, 0x0a3299bf, 0x0bf4f9a6, + 0x42e97e07, 0x2875fb00, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0xa0adb201, 0x2af4d99f, 0x99180000, + 0x33beb215, 0x7e0c0090, 0x3e00a808, 0xd900a903, 0x00000680, 0xcab29bbf, 0xbdbcecb2, 0x0b7e7eb0, + 0xfe1f0a00, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0xf430f400, + 0x00062cdf, 0xbf32f900, 0x0149feff, 0xb2189990, 0xbf9fa0a2, 0x9899c7a9, 0xaa1d99b3, 0x2af4d900, + 0x99980000, 0x4299c702, 0x9a0099b3, 0x0140fe00, 0x0090010a, 0x7e0bb214, 0x3300b1cd, 0xfe551fa4, + 0x020a014b, 0x7e10bb90, 0x3300b1cd, 0xbf451fa4, 0xbd010309, 0x1493ff04, 0xf09530bc, 0x1bf40194, + 0x0204b312, 0xb7f07e0f, 0x00a03300, 0x0415f007, 0xb3010090, 0xfee50704, 0x99900149, 0xa699bf14, + 0x150bf419, 0x1bb2010a, 0x00b20f7e, 0x0a1fa033, 0xe43e09f8, 0x2ebf00a9, 0x900149fe, 0x99bf1099, + 0xffffffdf, 0xfd1f0abf, 0x99b904ef, 0x9419ff00, 0xf00b9cf0, 0x94b60196, 0x059efd1e, 0xe43e29a0, + 0x1f0a00a9, 0x900149fe, 0x9fbf1899, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40c35fb, + 0x2cdfe430, 0xf9000006, 0xfeffbf52, 0x99900149, 0xa0a3b230, 0xb2afbf9f, 0xc7b4b2c5, 0xddb31dfd, + 0x95075b00, 0xf99508fa, 0x32f03210, 0x339232a1, 0x00d515f9, 0xf415f630, 0xf933460c, 0x3005fa05, + 0x0cf405f6, 0x01f9331c, 0x1f0a0087, 0xf501f630, 0x30073808, 0x0cf503f6, 0xe93e0723, 0xf93300aa, + 0x30041910, 0x0cf410f6, 0x0df6300e, 0x070d08f5, 0x00ae163e, 0x0511fd33, 0xae203e07, 0x1df93300, + 0xf6300641, 0x250cf41d, 0xf918f933, 0x18f63003, 0x330c0cf4, 0x06e617fd, 0x00b0c83e, 0x001af933, + 0x1cfd3301, 0x853e06d8, 0xf93300b1, 0x30026221, 0x0cf421f6, 0x1efd330c, 0x8e3e06c4, 0xf93300ab, + 0x33063824, 0x06b625fd, 0x00b1593e, 0x002af4df, 0x14f91800, 0x18f4a926, 0xffa9c40d, 0x3e98f9bc, + 0xa000b02e, 0xb0f03ebd, 0x33080a00, 0x069c001d, 0x0079e27e, 0x0a030033, 0xfdff0049, 0x4aa004a9, + 0x00b0f03e, 0x1630030a, 0x7e0cf504, 0x2af4d906, 0x99bf0000, 0x080a010e, 0xc7f4e1bc, 0x9ffd9899, + 0x660bf504, 0x02103306, 0x0410330e, 0x3eea3210, 0xb200ab45, 0xab3b3eea, 0xfea4bd00, 0xbb90014b, + 0x79747e2c, 0xa6a57e00, 0x1fad3300, 0x49fe063e, 0x2c999001, 0xe9959ebf, 0x0190b31f, 0x3ed4bd0a, + 0xdd00ab68, 0xff000000, 0x3380e9c4, 0xbd0a0094, 0xab7f3ef4, 0xb0010f00, 0x1ff400e6, 0x95ff0f05, + 0xd9ff08e9, 0x90f9bc95, 0x873e49a0, 0x9b3200b1, 0x5db24cb2, 0x0092987e, 0x00b1873e, 0x5507a033, + 0xf407a630, 0xf4d9120c, 0x3000002a, 0x0df402a6, 0xabe53e2b, 0x0ca63000, 0xd9120cf4, 0x00002af4, + 0xf409a630, 0x083e5718, 0xa03300ac, 0x030a610d, 0xb70e1d33, 0xac3b3e05, 0x01999800, 0x000000df, + 0xac213e04, 0x01999800, 0x000000df, 0xac213e08, 0x2af4d900, 0x99980000, 0x00008f02, 0x049ffd02, + 0x00c61bf5, 0x00ad063e, 0x8f029998, 0xfd040000, 0x1bf5049f, 0x063e00c4, 0x999800ad, 0x00008f02, + 0x049ffd01, 0x3e2f1bf4, 0xd900ad06, 0x00002af4, 0x4000008f, 0x3e029998, 0xd900ac48, 0x00002af4, + 0xdf029998, 0x00800000, 0xf5049ffd, 0x3e00a91b, 0x3300ad06, 0x30730710, 0x0cf40716, 0x0210331a, + 0x0216304b, 0x33540cf4, 0x33270010, 0x0485011d, 0x00ac983e, 0xf40c1630, 0x16300d0c, 0x6518f409, + 0x00acd63e, 0xf40e1630, 0xf03e6d0d, 0x4ab200b0, 0x00936d7e, 0x00b1873e, 0x2b321a32, 0x5db24cb2, + 0x0098097e, 0x00b1873e, 0x2b321a32, 0x5db24cb2, 0x00977a7e, 0x00b1873e, 0x2b321a32, 0x857e4cb2, + 0x873e0099, 0x2b3200b1, 0x5db24cb2, 0xe17e070a, 0x873e0096, 0x080a00b1, 0x4cb22b32, 0x427e5db2, + 0x1a320096, 0x3cb22b32, 0x5eb24db2, 0x009a127e, 0x00b1873e, 0x2b321a32, 0x4db23cb2, 0x947e5eb2, + 0x873e0093, 0x080a00b1, 0x00b1873e, 0x9d33040a, 0xd9047900, 0x00002af4, 0x0a029998, 0x0094f108, + 0x660bf540, 0x0149fe04, 0x900142fe, 0x22902c99, 0xa09da018, 0x0499922d, 0x99929da0, 0x929da004, + 0x9da00499, 0xa0049992, 0x0119339d, 0x16300085, 0x0e08f401, 0x1d33030a, 0x3e043102, 0xfe00ade2, + 0x40fe0141, 0x2c119001, 0xb2280090, 0x7e0bb21a, 0xbf008fd8, 0xfe00bf19, 0x42fe0143, 0x0794f001, + 0x90243390, 0x3ab22022, 0xb20141fe, 0x1c11902b, 0xb20704f0, 0x0404b61c, 0x7e0509fd, 0xbf0081f1, + 0xbf3fbf29, 0x0141fe1e, 0xf00894b6, 0x94f1fff4, 0xf4b6ffff, 0x18e4b610, 0xfd181190, 0x1ab20509, + 0xfd050ffd, 0xbe7e050e, 0x083e0081, 0x2ab200ae, 0x00818e7e, 0x20bf2ab2, 0x00815e7e, 0x0e3e29bf, + 0x41fe00ae, 0x18119001, 0x2b7e1ab2, 0x1ab20081, 0xf87e10bf, 0x19bf0080, 0x04f11ab2, 0x94b6ffff, + 0x0590ff10, 0x0080c57e, 0x94f119bf, 0x40a0ffff, 0xf03e59a0, 0xfab200b0, 0x00b34b7e, 0x00b1873e, + 0x0d00a033, 0x5b01ad33, 0xae6d3e03, 0x33b0bf00, 0x330c0190, 0x3e2e0294, 0xfe00ae5e, 0x010a0141, + 0xb2181190, 0xb1cd7e1b, 0x1fad3300, 0x1fbf033e, 0x09fffe09, 0x049fff95, 0x613e10a0, 0x04f000ae, + 0xb22a327f, 0xb20f7e0b, 0xb1873e00, 0x7e9a3200, 0x3e00b1cd, 0xb200b187, 0xb24f7efa, 0xb1873e00, + 0x2af4d300, 0x39bf0000, 0x000000df, 0xfd080a20, 0x0bf5049f, 0x030a02f5, 0xef001d33, 0x0140fe02, + 0x90014cfe, 0xcc902800, 0xfe0bb22c, 0x22900142, 0x7e2ab224, 0x7f008d3b, 0x03967009, 0x940b9cf0, + 0x0e7e0791, 0x30180001, 0x01087e58, 0x00003300, 0x3e1fb20a, 0x7f00af0c, 0x00a4732a, 0x3e19b20a, + 0xc500af0a, 0xa0734019, 0xa0731403, 0xa4731804, 0x9fe51c02, 0x0c3e0100, 0x9fe500af, 0x0c3e0200, + 0x9fe500af, 0x0c3e0300, 0x9fb200af, 0x900149fe, 0x997f2c99, 0x88049973, 0x04967000, 0x731a0cf4, + 0x70480190, 0x08f40196, 0x02907336, 0x039d734c, 0x8e3e00e3, 0x997300af, 0x70009b07, 0x0cf40796, + 0x05907310, 0x069d7369, 0xc33e00cb, 0x997300af, 0x73009c08, 0x00bd099d, 0x00aff83e, 0xfe07ff89, + 0x3e04f9fd, 0x4900b010, 0xf9ffc7ff, 0x0095f194, 0xaf7f3e20, 0xc7ff4900, 0xf194f9ff, 0x8f180095, + 0xfdfe3fff, 0x9fe5049f, 0x103e4000, 0xff4900b0, 0x94f9ffc7, 0x100095f1, 0x00af7f3e, 0xffc7ff49, + 0x95f194f9, 0xb43e0800, 0xff4900af, 0x94f9ffc7, 0x100095f1, 0xfe3fff8f, 0xe5049ffd, 0x3e80009f, + 0x4900b010, 0xf9ffc7ff, 0x0095f194, 0xafdb3e08, 0xc7ff4900, 0xf194f9ff, 0x8f100095, 0xfdfe3fff, + 0x9fe5049f, 0x103ec000, 0xff4900b0, 0x94f9ffc7, 0x080095f1, 0x00b0023e, 0xffc7ff49, 0x95f194f9, + 0xff8f1000, 0x9ffdfe3f, 0x00008f04, 0xf59fff01, 0x002af4d9, 0x50991800, 0x0f009033, 0x02000089, + 0x3e95f9ff, 0x8900b02e, 0xfffdffff, 0x49a094f9, 0x00b0f03e, 0xf514a630, 0xc401480c, 0x1cd9ffaf, + 0xbc000005, 0xf4d9e89f, 0x0a00002a, 0x3befc708, 0xc7989fbc, 0x9fbb96ef, 0x0194f005, 0x012b0bf5, + 0xe7ff29c4, 0x54010def, 0x040a029b, 0x18f5bf66, 0xf3f00119, 0xffb9e400, 0xc2f9bcff, 0xf404c6b0, + 0x040c090d, 0x00b08a3e, 0xbae44da0, 0xe9c4ffff, 0x42ebe703, 0x00943301, 0xb0babc12, 0x002b14d9, + 0x3e4ab200, 0xd900b0b3, 0x00000680, 0xbabc99bf, 0xbc4ab2b0, 0x7e7eb09b, 0xf03e000b, 0x9b3200b0, + 0x00b8967e, 0x00b1873e, 0x0f00a033, 0x1d33080a, 0x3e00b101, 0xfe00b187, 0x00900140, 0x7e0ab224, + 0x3f008ca4, 0x00a54f09, 0x06009433, 0x4fa05a0f, 0x873e1f0a, 0xf4d900b1, 0x1800002a, 0x99331599, + 0x33008700, 0x303802a0, 0x0cf402a6, 0x00a0330f, 0x01a43318, 0xb12f3e6e, 0x03a03300, 0x04a4332c, + 0xb14f3e62, 0x7e3ab200, 0x3e00bb95, 0xb200b187, 0x7ecbb2ba, 0x3e00be0f, 0x3200b187, 0xbb1e7e2a, + 0xb1873e00, 0x7e2a3200, 0x3e00c025, 0x3200b187, 0xbfb97e2a, 0xb1873e00, 0x2af4d900, 0x99180000, + 0x00903315, 0x6cfbc724, 0x4db22c32, 0xa4f05eb2, 0xbcbf7e0f, 0xb1873e00, 0x3e020a00, 0x0a00b187, + 0xb1873e03, 0xfe090a00, 0x99900149, 0xd99fbf30, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, + 0x2cde1c55, 0xbf000006, 0xfc30f4ef, 0xf00149fe, 0x9fa0ffa4, 0xa002a4b6, 0xbf9fbfca, 0xa61f0ae9, + 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x040e0149, 0xa6309fa0, + 0x120cf402, 0xd9ffafc4, 0x00002bb4, 0x0ef89fbc, 0xfebfa01f, 0x9fbf0149, 0x00062cd9, 0x3299bf00, + 0xf4f9a6ea, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, 0x9fa0040e, + 0xf402a630, 0xafc4100c, 0x2bb4d9ff, 0x1f0e0000, 0xfef99bbc, 0x9fbf0149, 0x00062cd9, 0x3299bf00, + 0xf4f9a6ea, 0xe97e070b, 0x30f40042, 0xf400f804, 0x2cdffc30, 0xf9000006, 0xfeffbf12, 0x99900149, + 0xa0b1b208, 0x08ae959f, 0xd010af95, 0x00002af4, 0xe926ff09, 0x183d1bf4, 0xf9264909, 0x00b21bf5, + 0x0a4a0918, 0x0199331c, 0x963000ab, 0xa108f501, 0x33060a00, 0x009d029d, 0x3d130998, 0xa01f0af4, + 0x490918b9, 0x104a0f35, 0x09350199, 0xb3303e49, 0x4a091800, 0x94330a0a, 0xb4d97a00, 0xbf00002b, + 0xfff4f099, 0xb6e899c7, 0xf9bc0694, 0x3ff6b0f0, 0x0a5e0cf4, 0x0ce43303, 0x04f9905b, 0x004096b1, + 0x944e0cf4, 0xc0df02f9, 0xbc00002b, 0x9f7f909f, 0x0d019a58, 0x02f47308, 0x0409981b, 0x94f0290d, + 0x00903380, 0x8fa17e0f, 0x3d010900, 0x500935d4, 0x002af4df, 0x49fe1800, 0xb5ffd9c4, 0x020913f9, + 0xf04af935, 0x1c0affe4, 0x303e1ea0, 0x040a00b3, 0x900149fe, 0x9fbf0899, 0x00062cd9, 0xa699bf00, + 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x2cdffc30, 0xf9000006, 0xfeffbf22, 0x99900149, 0xa0b2b20c, + 0xc7adb29f, 0xbbbfe8a0, 0xf53f06b0, 0xc400cc0c, 0xaec4ffbc, 0x0fe4b3ff, 0xb0050a0d, 0x0cf53fc6, + 0xb4d900bb, 0xbf00002b, 0xff9fc499, 0x94e899c7, 0x94b606f1, 0x0ee0b306, 0x0fe0b32c, 0xb31f0a44, + 0x009a0ded, 0x7190097c, 0xf500ff96, 0xe4008c0c, 0xd9ffff9f, 0x00002bc0, 0xa0f89fbc, 0xb3d73e2f, + 0x90017c00, 0x00ff9671, 0xe46f0cf4, 0xd9ffff9f, 0x00002bc0, 0x0af99bbc, 0xb4393e1f, 0xf0c9bc00, + 0xd9c7050a, 0x02fb94f0, 0x94019990, 0xcbbc029c, 0x00e6b1e0, 0x440cf410, 0x0a9001bc, 0x0294b603, + 0xb1f0c9bc, 0xf41000f6, 0x9ba6320c, 0xa60808f4, 0x2008f49e, 0x0df4b9a6, 0xf4bfa608, 0xc0df1608, + 0xbc00002b, 0xbfbca09f, 0x0b7e7eb0, 0xb3d73e00, 0x3e040a00, 0x0a00b439, 0x0149fe03, 0xbf0c9990, + 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0425fb00, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, + 0xb29fa001, 0xffa6b1c9, 0x2d0cf40f, 0xb1909abc, 0xf4100096, 0xc0de230c, 0xbc00002b, 0xd03390ae, + 0x9ab20a00, 0x00b48c3e, 0x9bb2bab2, 0x000b7e7e, 0x983e1f0a, 0x040a00b4, 0xbf0149fe, 0x062cd99f, + 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdfe4, 0x12f90000, 0x49feffbf, + 0x20999001, 0x9fa0a0b2, 0xb4f0d1b2, 0xb4edb2ff, 0xe0b40aa0, 0x0709c40b, 0x1bf49ba6, 0x2f0fc775, + 0x900149fe, 0x9da00899, 0xb5029eb5, 0x9fbc019a, 0xfe94bdb8, 0xff90014f, 0xb5f9a014, 0xf1b501fc, + 0x2309c702, 0xf9bcbabf, 0x850fc7c8, 0x3c8a09c7, 0xf4f0e09f, 0x910dc71f, 0x3c05cfbb, 0xeeb990d9, + 0x1fd4f000, 0xb91fe4f0, 0xefbc0099, 0x1f94f0e0, 0x9dbcff0f, 0x95f9bc90, 0xbb05febb, 0xfcfd049d, + 0x0099b904, 0xfd04fdbb, 0x9ffd049a, 0xfeb9a005, 0x99900149, 0xd99fbf20, 0x0000062c, 0xf9a699bf, + 0x7e070bf4, 0xfb0042e9, 0x30f41c15, 0x062cd9f4, 0x82f90000, 0xd1b299bf, 0x90014ffe, 0xf9a02cff, + 0xff92dd3f, 0x091f0003, 0x01f03517, 0x2002f035, 0xb2b6b2f9, 0x33a5b2e3, 0xdb1900d4, 0x00000574, + 0x7e7e240c, 0x0909000b, 0x19200a32, 0x00b62d3e, 0xd630030a, 0x780cf409, 0xc40147fe, 0xf8b2ffc4, + 0x7790043d, 0xb6243e24, 0xff09c400, 0x92947bb2, 0xbc040c02, 0xd43da026, 0x00b4547e, 0x6b1fa433, + 0xf9c47fbf, 0xf494a607, 0xf9c75e18, 0x01991223, 0xf4019630, 0xfec7520c, 0xffe9c42f, 0xc7c8893c, + 0xf9c78afd, 0x90d93c85, 0x0cf49c26, 0x03e0333b, 0x91f9c738, 0x26909d3c, 0x2c0cf49c, 0x109052bc, + 0x9fa00100, 0x0926193f, 0x0aa108f4, 0x0149fe1f, 0xbf2c9990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, + 0xb65d3e23, 0xbf0d0a00, 0xff0fc439, 0x000000de, 0x049efdff, 0xa005f9fd, 0xb62d3e3f, 0x42e97e00, + 0x0c85fb00, 0xd9a830f4, 0x0000062c, 0x99bf82f9, 0xb2f830f4, 0x014ffea7, 0xa080ff90, 0xb0aabff9, + 0xc1b00eb1, 0x68a6c70d, 0x30016912, 0x0cf50396, 0xa9c7013b, 0x0a96306c, 0x01310cf5, 0xfe0141fe, + 0x11900140, 0x50009057, 0x2010a5b6, 0xb2b4bd19, 0xb1a27e0c, 0x1fad3300, 0x09bf011d, 0xc40142fe, + 0x2290ff6b, 0x04b4b658, 0xb9bc2ab2, 0xb26c32b0, 0x7e7eb21d, 0x3000b56a, 0xad333fa1, 0xfe00f51f, + 0x01080141, 0x3d401190, 0x08199024, 0x3d0c1e90, 0xb054bd34, 0xe1b00c91, 0xb7b93e0b, 0x1490b400, + 0x100c1bb2, 0x59bcd43d, 0x7e4ab240, 0x3300b454, 0x00c31fad, 0x1f9819bf, 0xffffde01, 0x9efde0ff, + 0x021fb504, 0x203319a0, 0x043d0a00, 0x00b7533e, 0xb40cb0b4, 0x1ab20bc0, 0x00a9ff7e, 0xa0321fbf, + 0xffffffde, 0x1f09c4e0, 0xb604fefd, 0x9ffd1894, 0xb219a005, 0x0c1bb24a, 0x7e010d10, 0x3300b454, + 0x33731fa4, 0x020a0020, 0xb7b33e01, 0x1f043300, 0x3e043d3f, 0xc400b7a0, 0x1c98ff09, 0x031d9802, + 0xfe0de0b4, 0xff90014f, 0xa8f9bc58, 0xb00e90b4, 0x001001e1, 0xb03b3201, 0x7eb20091, 0x00b4b27e, + 0x26579034, 0xd208f409, 0x00b7b33e, 0x843d19bf, 0x101f9295, 0x55900133, 0xf5362610, 0x0aff4208, + 0x0080331b, 0xb7cf3e11, 0x3e030a00, 0x3400b7d2, 0x49fe3fa0, 0x80999001, 0x2cd99fbf, 0xbf000006, + 0xf4f9a699, 0xe97e070b, 0x30f40042, 0x5885fb08, 0xdff430f4, 0x0000062c, 0xffbf12f9, 0x900149fe, + 0x41fe1099, 0x909fa001, 0xb84a0c11, 0xb2040b00, 0xa8c87e1c, 0x1fa43300, 0x0140fe2b, 0x9000b44a, + 0x040b0800, 0xc87e0cb2, 0xa43300a8, 0x19bf161f, 0x9fa60fbf, 0xf00b9cf0, 0x9a320196, 0x00b8423e, + 0x49fea43d, 0x10999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x15fb0042, 0xfc30f40c, + 0x2cd112f9, 0x98000006, 0x1fbf04a9, 0x900140fe, 0x95f00800, 0xb50fa020, 0xac9004a9, 0x4a010b59, + 0xc87e0090, 0x0fbf00a8, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0x062cd990, 0x52f90000, + 0x4ffe99bf, 0x84ff9001, 0xf9a0030e, 0x0500ad33, 0x2af4d901, 0x99180000, 0x00993359, 0xba3200f6, + 0xbd014cfe, 0x20cc90b4, 0x00b1a27e, 0xad33ae32, 0xfe00e31f, 0xb84a0144, 0x1c449000, 0x4cb2040b, + 0x00a8c87e, 0xad33ae32, 0xfe00cb1f, 0xb44a0140, 0x18009000, 0x0cb2040b, 0x00a8c87e, 0xad33ae32, + 0xbf00b31f, 0xa64abf09, 0xa60bf5a9, 0xbcaa9000, 0x0b0140fe, 0x27009001, 0xc87e0cb2, 0xae3200a8, + 0x911fad33, 0xfe093f00, 0x43b20142, 0xf0282290, 0x0045ff94, 0x02915410, 0x00b9753e, 0x1be43abf, + 0x5abcffff, 0xf4b9a692, 0x9bb2050d, 0xaa90b072, 0x00b3f0bc, 0xc87e2cb2, 0x0fe400a8, 0xae32ffff, + 0x511fa433, 0x107b39bf, 0x202fbc02, 0xf190f9bc, 0xa00fff94, 0x00147339, 0x0149fec7, 0xbf209990, + 0x0799909a, 0x4bfe9c3f, 0x90010d01, 0xc4f028bb, 0x02c4b6ff, 0x00b4547e, 0xa433ae32, 0x4cb2171f, + 0x0b00b84a, 0xa7387e04, 0x3eae3200, 0x0e00b9b1, 0x0149fe09, 0xbf849990, 0x062cd99f, 0x99bf0000, + 0xf9a6ea32, 0x7e070bf4, 0xfb0042e9, 0x2cde7055, 0xbf000006, 0xfc30f4e9, 0xa0014ffe, 0x04a998f9, + 0x008ffdbf, 0x9ffd3ff0, 0x04a9b505, 0xd9a6e9bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cdf, + 0xf4ffbf00, 0x49fefc30, 0xa0aeb201, 0x12040a9f, 0xee0910bf, 0x0df4f926, 0x00c43330, 0x3eea7f0a, + 0x5800ba26, 0xa07301ea, 0xff091800, 0x0bf4b926, 0xffa9e416, 0x059bbbff, 0xf40194f0, 0x090a091b, + 0x00ba463e, 0x49fe1f0a, 0xd99fbf01, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, + 0xdff830f4, 0x0000062c, 0xffbf32f9, 0xfe0149fe, 0x99900140, 0x0143fe14, 0xa2b29fa0, 0x0090b1b2, + 0x12339013, 0x0b10ec4a, 0x7e0cb201, 0x3300a808, 0x3f741fa4, 0x01a6300a, 0xb3690cf4, 0xf0210020, + 0xa994ffa4, 0xb6240b05, 0x2cb202a4, 0xb8a0a9bc, 0x0010eeaa, 0x00a8087e, 0x4b1fa433, 0x230010b3, + 0x040b0a3f, 0xa4f01cb2, 0x05a994ff, 0xbc02a4b6, 0xaab8a0a9, 0x7e0010ee, 0x3300a808, 0x4a281fa4, + 0x010b10ec, 0x087e3cb2, 0xa43300a8, 0x3f3f191f, 0xf401f630, 0x093f0e0c, 0x1bf49f26, 0xbb033e8a, + 0xfe050a00, 0x99900149, 0xd99fbf14, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40835, + 0x062cdfd8, 0x22f90000, 0x49feffbf, 0x30999001, 0x9fa0a132, 0x002af4d9, 0x04999800, 0x080ab2b2, + 0x400094f1, 0xfe360bf4, 0xb4bd0140, 0xb20c0090, 0xba607e0a, 0x1fa43300, 0x320ab225, 0x7e010c1b, + 0x3300b9fc, 0xc4171fa4, 0x9990ff19, 0x9099bc02, 0x7f9009bc, 0x0f92f099, 0x49fe29a0, 0x30999001, + 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x25fb0042, 0xd830f428, 0x00062cdf, 0xbf62f900, + 0x0149feff, 0xb2409990, 0xd99fa0b2, 0x00002af4, 0xb2049998, 0x0bc3b2a6, 0x0094f108, 0xe50bf510, + 0xfeb4bd00, 0x11900141, 0x7e1ab21c, 0x3200ba60, 0x1fad33ab, 0xff0b00d1, 0x010c1ab2, 0x00b9fc7e, + 0xad33ab32, 0x0b00c01f, 0x3d1ab2ff, 0xb9fc7ec4, 0x33ab3200, 0x00af1fad, 0xbd011458, 0xa029a094, + 0x04109039, 0x4c72157f, 0x004da47d, 0x01e4bd80, 0xbc453e01, 0x01c9c400, 0x7f200bf4, 0xf4fd660f, + 0x1ebc0a1d, 0xbc3a3ea4, 0xf4fd6600, 0x1ebc0d1b, 0x05a9fd94, 0x00bc3a3e, 0xee90df72, 0x01c57601, + 0x72020090, 0x00c473fd, 0xf06ebfd0, 0xa9c400d3, 0xffffdfff, 0x94b6ff00, 0x04effd10, 0xff00008c, + 0xfd059efd, 0x9dfd049c, 0xbf69a005, 0xff4ee429, 0xe8afc7ff, 0xf110ed94, 0xfdff0094, 0xf9fd059d, + 0xe42fa005, 0xb9ffff59, 0xe9ff00ee, 0x150bf4a4, 0x9cfd39bf, 0x059afd04, 0x29bf39a0, 0x010095f1, + 0x49fe29a0, 0x40999001, 0x2cd99fbf, 0xbf000006, 0xa6ba3299, 0x070bf4f9, 0x0042e97e, 0xf42865fb, + 0x2cdff030, 0xf9000006, 0xfeffbf62, 0x99900149, 0xa0a13228, 0x32b0329f, 0xb2d5b2c4, 0x04b630e6, + 0x01120cf5, 0x002af4d9, 0x049e9800, 0x90ffb9c4, 0x9fc41199, 0xbb080a1f, 0x99b905ef, 0x1f94f000, + 0x0f909fbc, 0x05f9bbff, 0xb334feff, 0x00e9013d, 0x4ffe94bd, 0x20ff9001, 0xb50142fe, 0x229001f9, + 0xbdf9a01c, 0x7e2bb2a4, 0x3300ba60, 0x00c91fad, 0x3c322ab2, 0xfc7e1b32, 0xad3300b9, 0x3300ba1f, + 0x30460200, 0x0cf40206, 0x00003310, 0x010d331a, 0x733e00a5, 0x003300bd, 0x0d334103, 0x3e009804, + 0xc400bda9, 0x90f1ff19, 0x9eb80126, 0x3e001136, 0xc400bd7f, 0x90f1ff19, 0x9eb80126, 0x0f001146, + 0xbdb83e10, 0xff19c400, 0x012690f1, 0x9eb8040f, 0x3e001156, 0xc400bdb8, 0x90f1ff19, 0x020f0126, + 0x115a9eb8, 0xbdb83e00, 0xff19c400, 0x012690f1, 0xb801004f, 0x00115c9e, 0x0a034994, 0xf89cc404, + 0x18f4cfa6, 0x08c99032, 0x9fa6080b, 0xbc060df4, 0xecbcb2fc, 0x0140fea0, 0xb2200090, 0xa8087e0c, + 0x1fa43300, 0xa009bf13, 0x01009859, 0xf43e60a0, 0x030a00bd, 0x900149fe, 0x9fbf2899, 0x00062cd9, + 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf41065fb, 0x2cdfe830, 0xf9000006, 0xfeffbf52, 0x99900149, + 0xa0a3b22c, 0x2af4d99f, 0x99980000, 0x0ab2b204, 0x0094f108, 0xca0bf520, 0x0140fe00, 0x0090a4bd, + 0x7e0bb218, 0x3300ba60, 0x00b81fad, 0xff0b0ab2, 0xfc7ec43d, 0xad3300b9, 0xfe00a91f, 0x007f0141, + 0x4a1c1190, 0x100b10dc, 0x087e1cb2, 0xad3300a8, 0x7200911f, 0xbd15b20b, 0x0104bdc4, 0x3eff04e0, + 0xc400befb, 0x0bf401b9, 0x07c6b069, 0xbf350cf4, 0xf85c3c3d, 0x901f0ec4, 0x99b90309, 0xfff4f000, + 0xbc1f94f0, 0x49bc909e, 0xf49fff95, 0xbb049ebb, 0x99b904fe, 0x049dfd00, 0xa0059ffd, 0xbeef3e39, + 0x3c2dbf00, 0x1ec4f85c, 0x1d09921f, 0xf00099b9, 0x94f0fff4, 0x909ebc1f, 0xff9549bc, 0x9ebbf49f, + 0x04febb04, 0xfd0099b9, 0x9ffd049d, 0x9029a005, 0xb57601cc, 0x04009001, 0x73041190, 0xfe8800b4, + 0x99900149, 0xd99fbf2c, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f41855, 0x062cdfd4, + 0x22f90000, 0x49feffbf, 0x34999001, 0x41fea232, 0x909fa001, 0x984a0c11, 0xb2280b23, 0xa8087e1c, + 0x1fa43300, 0x0110985d, 0x90bc19bf, 0xf490a6f2, 0xf6b01008, 0x0b9cf001, 0x3e0196f0, 0x0900bf69, + 0xf0f9a6e1, 0x96f00b9c, 0x330b0a01, 0xd9330090, 0x00002b0c, 0xbf019c98, 0x014ffe9b, 0xbc0cff90, + 0x9c4d90f0, 0x08923523, 0x0990240e, 0x1f94f001, 0xb504fa90, 0xd87e01f9, 0xa57e0026, 0x49fe00a6, + 0x34999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x25fb0042, 0xf830f42c, 0x00062cdf, + 0xbf12f900, 0x0149feff, 0x320c9990, 0xd99fa0a1, 0x00002af4, 0x0a049f98, 0x00008908, 0x04f9fd01, + 0xfe2a0bf4, 0xa4bd0140, 0xb2080090, 0xba607e0b, 0x1fa43300, 0x320ab219, 0x7ec43d1b, 0x3300b9fc, + 0xc40b1fa4, 0x1a7e1f1a, 0x49fe00bf, 0x0c999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, + 0x15fb0042, 0xf830f408, 0x00062cdf, 0xbf12f900, 0x0149feff, 0x320c9990, 0xd99fa0a1, 0x00002af4, + 0x0a049998, 0x0094f108, 0x00907380, 0x0140fe2e, 0x0090a4bd, 0x7e0bb208, 0x3300ba60, 0xb21c1fa4, + 0x3d1b320a, 0xb9fc7ec4, 0x1fa43300, 0x1f1ac40e, 0x7e20a5f0, 0xfe00bf1a, 0x99900149, 0xd99fbf0c, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40815, 0x062cdffc, 0x52f90000, 0xb995ffbf, + 0x0394b606, 0xa9bcc4b2, 0xfed5b230, 0xb0c40149, 0x1899903f, 0x053e9fa0, 0x31bf00c1, 0xb3013298, + 0x0a180054, 0xb2b4bd01, 0x42667e0c, 0x052bfd00, 0x3e051afd, 0x0a00c0ed, 0xb2b4bd01, 0x42667e0c, + 0x00b9b900, 0xfd00aab9, 0x1afd0429, 0x01449204, 0x0b0040b3, 0xb3010090, 0xa0c84004, 0x0132b531, + 0x339004bd, 0x0044b308, 0x0149feb5, 0xbf189990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0455fb00, 0xf9fc30f4, 0x062cd112, 0x19bf0000, 0xbd0140fe, 0x080090d4, 0x927e09a0, 0x0fbf00c0, + 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x90010d01, + 0x09a00800, 0x00c0927e, 0x19bf0fbf, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0xf9fc30f4, 0x062cd112, + 0x19bf0000, 0xb63fbcc4, 0xb4b606b5, 0x0140fe03, 0x90a0abbc, 0x09a00800, 0xbf01ab98, 0x42127eaa, + 0xbf0fbf00, 0x01a4f019, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0x00062cdf, 0xf4ffbf00, 0xaeb2fc30, + 0x3d0149fe, 0xbd9fa0a4, 0x08f0b3f4, 0x98ef3c0a, 0x90a0a93c, 0xfeb301ff, 0xfef30080, 0x9fbf0149, + 0x00062cd9, 0xf099bf00, 0xaa3907a2, 0xf4f9a601, 0xe97e070b, 0x30f40042, 0xd900f804, 0x0000062c, + 0x30f499bf, 0x014ffefc, 0xae3ff9a0, 0xf401e9c4, 0xf4bd1b0b, 0xaf3cff0e, 0xf59e2698, 0x9000c81b, + 0xf4b301ff, 0xe93ef410, 0xf4bd00c2, 0x1890afbc, 0x9d330999, 0x9000b100, 0xf4b301ff, 0xad18f207, + 0x26800c06, 0x9e0cf5dc, 0x00e43300, 0x01a91838, 0x93009d33, 0x02a91800, 0x8b009d33, 0x03a91800, + 0x83009d33, 0x04af1800, 0xf005a918, 0x9476fff4, 0x059ffd08, 0x6f009473, 0x1bf4dc26, 0xc2dc3e69, + 0x04af1800, 0xf005a918, 0x94f0fff4, 0x0894b6ff, 0xcd059ffd, 0x94737099, 0xa9184d00, 0x41991201, + 0xf4199630, 0xa918400c, 0x40963102, 0x361df400, 0x005a9631, 0x182f1cf4, 0x963103a9, 0x1df40040, + 0x5a963125, 0x1e1cf400, 0x1b00d033, 0xc4ffefc4, 0x1bf404f9, 0x0af9c417, 0x0b0294b3, 0x3307a918, + 0x0a0a0090, 0xc2eb3e06, 0xfea4bd00, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0xfc30f400, 0x00062cdf, 0xbf12f900, 0x0149feff, 0xb2089990, 0xb29fa0b1, 0xb3020ea0, + 0x3f2200a0, 0x0894f0a9, 0x7e171bf4, 0xb300c1b4, 0x20060010, 0x0809181a, 0xa926060e, 0xbd051bf4, + 0x0149fee4, 0xbf089990, 0x062cd99f, 0x99bf0000, 0xf9a6eab2, 0x7e070bf4, 0xfb0042e9, 0x2cdf0415, + 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x0aa9b29f, 0x00c0b302, 0xb29abf12, 0x07b4b6cd, 0x7e00804c, + 0xfe00d992, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0xf830f400, + 0x00062cdf, 0xbf22f900, 0x0149feff, 0xb2109990, 0x0a9fa0a0, 0x0000b302, 0x3d0abf34, 0x07b29494, + 0xb20141fe, 0x0f11902b, 0x1920010c, 0xcb7e1db2, 0xa4b300d9, 0x193f1700, 0x2bb20abf, 0x94f01db2, + 0x20010cfd, 0xd9927e19, 0x0149fe00, 0xbf109990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0825fb00, 0xdff830f4, 0x0000062c, 0xffbf42f9, 0x900149fe, 0xa1b21899, 0xb4b29fa0, 0xd3b2c0b2, + 0x5600a0b3, 0x5200c0b3, 0xbd07cf18, 0x0142fe94, 0x10142290, 0x29a001ff, 0xb207cf35, 0xc1b47eca, + 0x352bb200, 0x1ab2080a, 0x00c8be7e, 0x2c00a4b3, 0x0cb22bbf, 0x5e7e1ab2, 0xa4b300c3, 0x1ab21e00, + 0x9d7e4bb2, 0xa4b300c3, 0x30b31200, 0x22bf0e00, 0x783e32a0, 0x020a00c4, 0x900149fe, 0x9fbf1899, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40845fb, 0x2cdffc30, 0xf9000006, 0xfeffbf02, + 0x99900149, 0xa0c0b204, 0x00a0b39f, 0x00c0b324, 0xb6aabf20, 0x804c07b4, 0x7e0db200, 0xb300d9cb, + 0xb21000a4, 0xc1fb7e0a, 0xc4cf3e00, 0xfe020a00, 0x99900149, 0xd99fbf04, 0x0000062c, 0xf9a699bf, + 0x7e070bf4, 0xfb0042e9, 0x2cdf0405, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x00a0b39f, 0x00c0b319, + 0xb2aabf15, 0x07b4b6cd, 0xcb7e100c, 0x143e00d9, 0x020a00c5, 0xbf0149fe, 0x062cd99f, 0x99bf0000, + 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdffc, 0x42f90000, 0x49feffbf, 0x14999001, + 0x9fa0b3b2, 0xa2b2c4b2, 0xa0b30200, 0x0c7e4200, 0x030000c6, 0xa0b3a1b2, 0x3bb23600, 0x1cb22ab2, + 0x00c4937e, 0xa4b3a0b2, 0x1ab21e00, 0x057eb4bd, 0xa0b200c3, 0x1000a4b3, 0x0c0040b3, 0xf0061918, + 0x49a0ff94, 0x1bb22ab2, 0x00c6557e, 0x900149fe, 0x9fbf1499, 0x00062cd9, 0xb299bf00, 0xf4f9a60a, + 0xe97e070b, 0x45fb0042, 0x062cdf04, 0xfebf0000, 0xfefc30f4, 0x9ea00149, 0xf9bf9ebf, 0x0bf4e9a6, + 0x42e97e07, 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4fe, 0xa00149fe, 0xbf9ebf9e, 0xf4e9a6f9, + 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4febf, 0x0149fefc, 0x9ebf9ea0, 0xe9a6f9bf, + 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cdf, 0xf4ffbf00, 0xaeb2fc30, 0xa00149fe, 0x0eaa989f, + 0x0a00a0b3, 0x333ef4bd, 0xea9800c6, 0x00a0b30f, 0x90010f0e, 0x94bd0eff, 0xfef9e9bc, 0x9fbf0149, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, + 0x9fa00149, 0x1a00b0b3, 0xb30ea998, 0x980d0090, 0x94b30fa9, 0x01090c00, 0xbc0e9990, 0x49fe99ab, + 0xd99fbf01, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cde, 0xf4e9bf00, + 0x4ffefc30, 0x98f9a001, 0xa9a002b9, 0xa9b5ff09, 0x02b99801, 0xabb5ffbf, 0x03acb504, 0xbf02a9b5, + 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xf400f804, 0x2cdffc30, 0xf9000006, 0xfeffbf22, 0x99900149, + 0xa0a0b20c, 0xb2c1329f, 0xb3020ad2, 0xb85e00b0, 0x0001400c, 0xc998d4bd, 0x0090b304, 0x00143344, + 0x0390b308, 0x3fcf3f3c, 0xf4f926b9, 0xcf18321b, 0x01b91801, 0x1bf4f926, 0x02ce1827, 0xbc02bf18, + 0x9dbc90dd, 0x0394b690, 0x014099b8, 0x9009bc00, 0x1bf4ef26, 0xbd29a00b, 0xc7493ea4, 0x01dd9000, + 0xb318cc90, 0x0ab314d4, 0x0149fe04, 0xbf0c9990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0425fb00, 0x00062cd9, 0xf499bf00, 0x4ffefc30, 0xa0acb201, 0x09abbff9, 0xf4b9a6f0, 0xaa980d0b, + 0x01cbb503, 0x00d78b7e, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, + 0x30f400f8, 0x062cdffc, 0x32f90000, 0x49feffbf, 0x90a1b201, 0xb3b21099, 0x04bd9fa0, 0xe33ef002, + 0x19bf00c7, 0x0df492a6, 0x3e030a09, 0xa600c7ea, 0x091bf492, 0xea3e020a, 0x009000c7, 0x7e1ab201, + 0xa600c764, 0xdd08f403, 0x49fea4bd, 0x10999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, + 0x35fb0042, 0xf830f404, 0x00062cdf, 0xbf82f900, 0x0149feff, 0xb2289990, 0xb29fa0a3, 0x00a9b3b8, + 0xb0b30084, 0x47fe7f00, 0x05a49801, 0x54bd24bd, 0x779014bd, 0xc86d3e24, 0x0c3a9800, 0x02bc94bd, + 0xb279a0b0, 0xd78b7e7c, 0x0f79bf00, 0xf49fa6ff, 0x643d090b, 0x00c85b3e, 0x90015590, 0x04a60100, + 0x33d908f4, 0x90070060, 0x24bc0111, 0x03399820, 0x18f429a6, 0xbd01060b, 0xc85e3e04, 0xb24bb200, + 0x16fc7e1a, 0xf45aa600, 0x1190060d, 0x06399801, 0x19a6f43d, 0x0f050cf4, 0xbd8f2001, 0xc8a33ea4, + 0xfe020a00, 0x99900149, 0xd99fbf28, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40885, + 0x062cdff0, 0x82f90000, 0x49feffbf, 0x30999001, 0xa00147fe, 0x08a9989f, 0xb1b0a6b2, 0xb0f10509, + 0x843d0a91, 0x779090b2, 0x0369982c, 0x7fa0f4bd, 0x08f409a6, 0x0804bd07, 0x0a90b401, 0x1bf409a6, + 0x00803335, 0xc9663e32, 0x0c6a9800, 0xb24010bc, 0x7e4bb27c, 0xbf00d78b, 0xa6ff0f79, 0x0f1bf49f, + 0x09012290, 0xf439a6f1, 0x43b2051b, 0x3e011190, 0xbd00c939, 0xbdf10314, 0x05699824, 0x08f419a6, + 0x0020b3cb, 0xf429a61e, 0x60b50f18, 0x09f0b408, 0x773ef3a0, 0xf10f00c9, 0x1bf45fa6, 0xbc05b205, + 0xe93e0009, 0x1a0a00c8, 0x59a6f109, 0xb50d0bf4, 0x90b40865, 0xbd95a009, 0x0149fea4, 0xbf309990, + 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x1085fb00, 0xd9f830f4, 0x0000062c, 0x99bf82f9, + 0x90014ffe, 0xa3b228ff, 0xb4b2f9a0, 0xc033d0b2, 0xdab20e00, 0x140cb43d, 0x00da047e, 0xbd0142fe, + 0x24229014, 0xff07fe08, 0xfb05fc06, 0x00ca463e, 0xbd0c3a98, 0xb014bc94, 0x2cb229a0, 0x00d78b7e, + 0xf00f29bf, 0x0df49fa6, 0xa6fd0f56, 0x110cf49f, 0x18f496a6, 0xf495a630, 0x153e451b, 0x98a600ca, + 0xa62f0bf4, 0x371bf497, 0x90010998, 0x09b50199, 0xca433e01, 0x04099800, 0xb5019990, 0x433e0409, + 0x099800ca, 0x01999002, 0x3e0209b5, 0x9800ca43, 0x99900309, 0x0309b501, 0x00ca433e, 0x999009bf, + 0x9009a001, 0x39980111, 0xf419a605, 0x49fe8508, 0x28999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, + 0xe97e070b, 0x85fb0042, 0xf830f408, 0x00062cdf, 0xb222f900, 0xb2ffbfa1, 0x03aa98b2, 0x49fe1bbf, + 0x10999001, 0xa00140fe, 0x0c00909f, 0x8b7e0cb2, 0x0cbf00d7, 0xb2031a98, 0xd7c97e2b, 0x011b9800, + 0xb9a6ff09, 0x98101bf4, 0x12b50419, 0x0292b502, 0x00cabd3e, 0xb2031a98, 0xd7c97e2c, 0x031a9800, + 0xfd0c1bbf, 0x00d7c97e, 0x900149fe, 0x12a01099, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, + 0x25fb0042, 0xc830f408, 0x00062cdf, 0xbf82f900, 0xf830f4ff, 0x900149fe, 0x9fa06099, 0x18049992, + 0xf4bd1cae, 0xa9989fa0, 0x0aa2b205, 0x0b91b005, 0x1a00e933, 0x0c2b9802, 0xfe092c98, 0xf100014a, + 0xfe44aa90, 0xa6b20144, 0x057e2001, 0x05b200d8, 0x4490a3b2, 0xcb7e3e30, 0x08299800, 0x0bf439a6, + 0xb22ab231, 0xb2010c3b, 0xc9947e4d, 0x04499800, 0x1f0094b3, 0xb3034998, 0x98180094, 0x94b30249, + 0x49bf3b00, 0x18f491a6, 0x3e30b209, 0xb200cb71, 0x0bb0b419, 0x6ab291b2, 0x00d8457e, 0x35a6a3b2, + 0x0ab91bf4, 0xf503a603, 0xb301a30b, 0xb20c0014, 0x3e743d03, 0xb200cb9b, 0x94010703, 0x54bd0738, + 0xa73e86b2, 0x2a9800cc, 0xbce4bd0c, 0x4cfe1053, 0x16e1b001, 0xb258cc90, 0xd78b7e1b, 0x00adb300, + 0x90b400f3, 0xa6f00f16, 0xd80cf59f, 0xb294bd00, 0x1591b02a, 0x00c60c7e, 0xa9b3a4b2, 0xbf00d600, + 0x4c6bb22a, 0x4db20080, 0x00d9cb7e, 0xadb3a0b2, 0xb200a700, 0xb21bb22a, 0x014dfe4c, 0x7e50dd90, + 0xb200c404, 0x00adb3a0, 0x2ab20090, 0x0c014b90, 0x014dfe01, 0x7e54dd90, 0xb200c6cf, 0x00a4b3a0, + 0x15b0b478, 0xfe0c2c98, 0xaa90014a, 0xc6987e30, 0x0c90b400, 0x1bf491a6, 0x14b0b41e, 0x90014afe, + 0x697e30aa, 0x90b400ca, 0x059f9815, 0x2300f4b3, 0x00cc953e, 0x9ea6ef0e, 0x00090df4, 0xcc953e03, + 0x014afe00, 0x7e30aa90, 0x3e00c764, 0x9800cc31, 0xe0b402f9, 0xf491a614, 0xfeb5061b, 0xa6f9bf02, + 0x091bf491, 0x953efea0, 0xf99800cc, 0xf491a601, 0xfeb5061b, 0xb24bb201, 0xc6557e2a, 0x0004b300, + 0x01559013, 0x98806690, 0x59a60529, 0xfefa08f5, 0x46007033, 0xb2042f98, 0xb2e4bd8d, 0x00f1b02a, + 0x70dc020b, 0xfe000000, 0x11900141, 0x0111b05c, 0x00c5a97e, 0xbf042c98, 0x048bb22a, 0xd9597efb, + 0x981ebf00, 0xa0b2042c, 0x2ab2b4bd, 0xca7e0db2, 0x04b300c5, 0xff040600, 0x0d3e04bd, 0x2a9800cd, + 0xb003bc0c, 0x00904cb2, 0xd7c97e01, 0x0b90b400, 0x08f409a6, 0x032f98ec, 0xb59039bc, 0x9fa60929, + 0xbd0808f4, 0x0929b594, 0x49fea4bd, 0x60999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, + 0x30f40042, 0x3885fb08, 0xd9cc30f4, 0x0000062c, 0x99bf82f9, 0xfef830f4, 0xff90014f, 0xb0f9a05c, + 0xc8b20be1, 0xa3b2d6b2, 0x8400b9b3, 0x00e9b302, 0xaf18027f, 0x0141fe2c, 0x119094bd, 0x0019a054, + 0x00f93303, 0xc43d026d, 0xcf7e1db2, 0xa0b200c6, 0x5e00adb3, 0xbc17bf02, 0x7998f068, 0xf59fa601, + 0x98024d08, 0x94b30479, 0x05000a01, 0x00cfee3e, 0x3e0069b3, 0x027f9802, 0xf9a6f009, 0x022a0bf5, + 0x0c7e3ab2, 0xa2b200c6, 0x1e00a9b3, 0x0c3c9802, 0x40fe7bb2, 0x34009001, 0x987e0ab2, 0x0ab200c6, + 0xcc0570b5, 0xa27e708b, 0xa0b200c7, 0xe900adb3, 0x0d00b401, 0x01a6f001, 0x01ce0bf5, 0x01d00cf5, + 0x804cb43d, 0x7e2ab200, 0xbf00da04, 0x070b943a, 0xb200804c, 0xd9cb7e2d, 0x0ca1b000, 0xb600adb3, + 0x05291801, 0x76042f18, 0xf4f00894, 0xe59fffff, 0xe966ff09, 0x01980bf5, 0xffffe9e4, 0x08f589a6, + 0xf4bd018e, 0x18902fbc, 0x9d330999, 0x90018200, 0xf4b301ff, 0x083ef207, 0x8e3c00d0, 0xf59f26f2, + 0xc4016d08, 0x94f0fffd, 0x529dbcff, 0x0df456a6, 0x9065b205, 0xa43d10d9, 0x3db029bc, 0x3ee4bdc4, + 0xb100ceb3, 0xf5006fd6, 0xb401450c, 0xbe3c0b10, 0xf81e3c98, 0x0bf4f926, 0xff94f017, 0xfd009939, + 0x9033049f, 0x010a0600, 0x0ce9bf3c, 0x01ee9001, 0xa601dd90, 0xce08f4e5, 0xed00c933, 0xf0293f00, + 0x0bf40894, 0x00a93308, 0x94bd00d0, 0x91b03ab2, 0x1391b014, 0x301291b0, 0x4bfe5b91, 0x5bbb9001, + 0x00c8057e, 0xadb3a0b2, 0x3400ef00, 0x90335b90, 0x3ab21100, 0x00cae57e, 0xadb3a0b2, 0xb400db00, + 0x40b40d00, 0x014ffe11, 0xb250ff90, 0x070d942e, 0x4101f1b0, 0x3ab20080, 0x4cb2010b, 0x7e0011b0, + 0xb200c5a9, 0xb22cb20b, 0x014dfe3a, 0x7e4cdd90, 0xb400c404, 0xa0b214e0, 0x3ab21cb2, 0x0db2b4bd, + 0x00c5ca7e, 0x91000db3, 0x13b0b400, 0x90014afe, 0x697e34aa, 0x3a9800ca, 0x0db0b40c, 0x90014cfe, + 0x8b7e48cc, 0xa0b200d7, 0x6d00a4b3, 0x011290b4, 0xf491a6f0, 0x4e98321b, 0x70efcd01, 0x0600f4b3, + 0x2918700f, 0xff94f006, 0xbb909ebc, 0x49b5029f, 0xcfa53e01, 0xb20bb200, 0x7e2cb23a, 0xb200c35e, + 0x00a4b3a0, 0x0265bb34, 0x90014afe, 0x647e34aa, 0x60b300c7, 0x90b42000, 0x8085bc0b, 0xb09095bc, + 0xf13e0b91, 0x020000cd, 0x00cfd53e, 0xd53e0300, 0x00b400cf, 0xb23ab20c, 0xc6557e2b, 0xb594bd00, + 0xee3e0579, 0x030000cf, 0x00cfee3e, 0x49fe0200, 0x5c999001, 0x2cd99fbf, 0xbf000006, 0xa60ab299, + 0x170bf4f9, 0x00d0143e, 0x33062918, 0xfe4f009d, 0x00cfcc3e, 0x0042e97e, 0xfb0830f4, 0x30f43485, + 0x062cdfdc, 0x82f90000, 0x30f4ffbf, 0x0149fef8, 0xa04c9990, 0x0bb1b09f, 0xd4b2c2b2, 0xa5b2e6b2, + 0x1700b9b3, 0x00e9b301, 0x94bd0112, 0x3d0141fe, 0x481190c4, 0x1db219a0, 0x00c6cf7e, 0xadb3a0b2, + 0xbf00fb00, 0xf042bc19, 0xa6019998, 0xea08f59f, 0x005ab200, 0xc60c7e03, 0xb3a3b200, 0x00de00a9, + 0x5c981bbf, 0x0140fe0c, 0xb2300090, 0xc6987e0a, 0xcc0ab200, 0xa27e702b, 0xa8b200c7, 0xad00adb3, + 0x7021cd00, 0x420147fe, 0x77900080, 0xd1443e44, 0x0c00b400, 0x79a094bd, 0x09a6f009, 0x00091bf4, + 0xd14b3e02, 0xa6f00900, 0x090df409, 0x4b3e0300, 0xc0b400d1, 0x0704b60b, 0x0db2e4bd, 0xb4bd5ab2, + 0xb00021b0, 0xa97e0171, 0x5abf00c5, 0x2cb20bb2, 0xcb7e3db2, 0x7ebf00d9, 0x3bb2a0b2, 0x2cb25ab2, + 0xca7e0db2, 0x04b300c5, 0x3ab24500, 0x057eb4bd, 0xa0b200c3, 0x3700a4b3, 0x01bb7000, 0xf404a602, + 0x40b2050d, 0xb2101b90, 0xb03bbc6a, 0x2f7e0cb2, 0x4afe00da, 0x0240bb01, 0xbc30aa90, 0x647e6060, + 0x14bd00c7, 0x6d004db3, 0xb280b2ff, 0x7e3bb25a, 0x3e00c655, 0x0000d159, 0x0149fe02, 0xbf4c9990, + 0x062cd99f, 0x99bf0000, 0xf9a60ab2, 0x7e070bf4, 0xf40042e9, 0x85fb0830, 0xf830f424, 0x00062cd9, + 0xbf32f900, 0x014ffe99, 0xb214ff90, 0xbdf9a0a1, 0xb2b2b294, 0x0140fec3, 0x9003204b, 0x09a01000, + 0x577e0ab2, 0x09bf00da, 0x4c0090b3, 0x4800a4b3, 0x09bf91a0, 0xbf0192b5, 0x1000490f, 0xbf04f9b5, + 0xb520090f, 0x09bf05f9, 0xbf0693b5, 0x3501090f, 0x09bf1cf9, 0xbf2c9a35, 0xb5f0090f, 0x0fbf0af9, + 0xb540f990, 0x0fbf0ef9, 0xb5c0f990, 0x0abf0ff9, 0x00d1f63e, 0x49fea4bd, 0x14999001, 0x2cd99fbf, + 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x35fb0042, 0xd030f408, 0x00062cdf, 0xbf82f900, 0x0149feff, + 0xb2509990, 0x059fa0a3, 0x00a9b302, 0xb63004fb, 0x0b9cf000, 0xbd2ca935, 0x0ca9b594, 0x00c60c7e, + 0x3abfa0b2, 0x804cb4bd, 0x7e0db200, 0xb200d9cb, 0x00adb3a5, 0x093f04b1, 0xa24a9d33, 0x01091804, + 0x9a469d33, 0x02091804, 0x92469d33, 0x03091804, 0x8a539d33, 0x040e1804, 0x18050918, 0x0d18060f, + 0xffe4f007, 0xf0ff94f0, 0x94b6fff4, 0x10f4b608, 0xb6059efd, 0xf9fd18d4, 0x05dffd05, 0x045e0bf5, + 0xd6b00505, 0x5d0cf503, 0x013db504, 0x0e01d4b3, 0xb5500049, 0xe43e0239, 0x0d1800d2, 0x09091808, + 0x180a0f18, 0xd4f00b0e, 0xff94f0ff, 0xb6fff4f0, 0xf4b60894, 0x059dfd10, 0xfd18e4b6, 0xeffd05f9, + 0x023eb505, 0x0bb23ab2, 0x00c6557e, 0x09023a98, 0xf4a9a6ff, 0x04bd091b, 0x00d6fa3e, 0x9007a5b6, + 0x3ab5303b, 0xd8c17e03, 0xb3a5b200, 0x03f500ad, 0x09033b98, 0x343a90c0, 0xfd3fbb90, 0xb5b604b9, + 0xda577e03, 0xb3a5b200, 0x03d900ad, 0xfe0147fe, 0x77900148, 0x9044bd40, 0x88900179, 0x0991b03c, + 0x00d4543e, 0x8ea0e4bd, 0x0f0044b3, 0xbd0c3a98, 0x3efe0cb4, 0xb200d377, 0xb24bb23a, 0xc4ea7e7c, + 0xb3a5b200, 0x039d00ad, 0x94f0793f, 0x120bf401, 0xb20c3a98, 0x7eff0c4b, 0x3e00d7c9, 0xb200d451, + 0xc1fb7e7a, 0x00a0b300, 0x0c3a980f, 0xfd0c4bb2, 0x00d3773e, 0x94f0793f, 0x0e1bf402, 0xb20c3a98, + 0x3efd0c4b, 0xb400d446, 0x3ab209b0, 0x8db2010c, 0x00c6cf7e, 0x5d00a0b3, 0x3fb2793f, 0x99c724bd, + 0x01999002, 0x980a91b0, 0x54b354f5, 0xb0b43900, 0x0022bc09, 0x02bc030c, 0x0304b600, 0x014001b8, + 0x1031bc00, 0x2f7e1ab2, 0x30bc00da, 0x4309b800, 0x95200001, 0xb45302b5, 0x0fb50af0, 0x3e81a054, + 0x9000d411, 0xff900122, 0x1424b318, 0xd73e3ebe, 0x3f8ebf00, 0x027f5879, 0x98077d18, 0x3a9803ee, + 0x0299c70d, 0xcc00f3f0, 0x96cb70ff, 0xcb4bb21f, 0x010cd8e6, 0xebf0d6cb, 0x7e01e0f6, 0x9800c124, + 0x4bb20c3a, 0xc97e6cb2, 0xa5b200d7, 0xb400adb3, 0x01449002, 0xa6033b98, 0xeb08f54b, 0xbc94bdfe, + 0x89a0b0bb, 0x8b7e8ab2, 0xa5b200da, 0x9400adb3, 0xbd37b202, 0x547f9884, 0xbc9088bc, 0x94b69098, + 0x4099b803, 0x39bc0001, 0x0b91b090, 0x5300f9b3, 0x033c9802, 0x3d0fa0b4, 0xbc24bdb4, 0x44bdc0cc, + 0x00da047e, 0x3e0f60b4, 0x9800d56f, 0x2bb20d3a, 0x7e0c41b0, 0x3300c178, 0x00b500a9, 0xfe0c3a98, + 0x2bb2014c, 0x7e38cc90, 0xb300d78b, 0x020c00ad, 0xb40be0b4, 0xef980e90, 0xd899c703, 0x1bf59fa6, + 0x3a98008e, 0x0c2bb20d, 0xc14e7e01, 0x014cfe00, 0x2bb23ab2, 0x7e30cc90, 0xb300c52e, 0x981306a4, + 0x2bb20c3a, 0xc97efd0c, 0x6c3e00d7, 0xadb300d5, 0x7401cb00, 0x93f01c90, 0x9099bc00, 0x7f0069bc, + 0xff19e401, 0x091bf4ff, 0x6c3e0260, 0x3a9800d5, 0x014cfe0c, 0xffff1be4, 0x7e34cc90, 0xb300d78b, + 0x019800ad, 0x343af034, 0xf9263690, 0x60100df4, 0xff1be402, 0x0c3a98ff, 0x00d5613e, 0xb20c3a98, + 0x7efd0c2b, 0xb300d7c9, 0x017000ad, 0x98012290, 0x2aa6033a, 0xff3708f5, 0x6eb264b2, 0xd43db43d, + 0xc4bdf4bd, 0x00d5a33e, 0x9473e97f, 0x010d0a00, 0x00d59d3e, 0x0600d033, 0xcc90010b, 0x01ff9001, + 0xa602ee90, 0xe308f4fa, 0x0b00c4b3, 0x3e547cb5, 0x3300d6df, 0x00a600b9, 0xb0013998, 0x0cf40296, + 0xb2030930, 0x5479b56d, 0xf4bde4bd, 0x00d5e23e, 0x9073d97f, 0x697c0a00, 0x01ff90f9, 0x9001ee90, + 0x399802dd, 0xf4e9a603, 0x593ee908, 0x94bd00d6, 0x79b5f10b, 0xb224bd54, 0xd6333eb1, 0xe4407f00, + 0xf4ffff09, 0xf10f260b, 0x1bf4bfa6, 0xff0be40b, 0xd62b3eff, 0x0c3a9800, 0xffff0ce4, 0x00d7c97e, + 0xb900adb3, 0xff0be400, 0x9019b2ff, 0x44900122, 0x9891b202, 0x29a60339, 0x09c508f4, 0xf5b9a6f1, + 0x9800a00b, 0x3c980c3a, 0xd7c97e0a, 0x00adb300, 0x31b5008c, 0xd6df3e0a, 0xbd6f7f00, 0x01c19294, + 0xf05179b5, 0x04bd00f3, 0x3e527fb5, 0x7f00d697, 0x014c584b, 0x900c3a98, 0xb3f00100, 0x00c3f000, + 0x7e024490, 0xb300d7c9, 0xb45200a4, 0xe9980be0, 0x70999001, 0xa601e9b5, 0xd608f401, 0x9808607c, + 0xf00c0c3a, 0xffff0be4, 0x00d7c97e, 0x2d00a4b3, 0xe4014cfe, 0xb2ffff0b, 0x40cc903a, 0x00c4ea7e, + 0x1900a4b3, 0x98469034, 0x94f0517f, 0xf0f9bcff, 0x3e517fb5, 0x0a00d6df, 0x3ea5b203, 0x9000d6ea, + 0x77900188, 0x148db318, 0x49fefd90, 0x3c999001, 0x387e9abf, 0x003e00d9, 0x030500d7, 0x00d7023e, + 0x0bb204bd, 0x557e3ab2, 0x50b300c6, 0x3a981a00, 0x7e04bd0c, 0x9800d890, 0x30b50d3a, 0xd9387e0c, + 0x0d30b500, 0x900149fe, 0x9fbf5099, 0x00062cd9, 0xb299bf00, 0xf4f9a65a, 0x443e110b, 0x010500d7, + 0x00d7003e, 0x0042e97e, 0xf43085fb, 0x22f9fc30, 0x00062cd2, 0xb229bf00, 0x0141fea0, 0xa00c1190, + 0xc5eb7e19, 0x0c0a9800, 0x00d8907e, 0x7e0d0a98, 0xb200d938, 0xd9387e0a, 0xbf1fbf00, 0xa6a4bd29, + 0x070bf4f9, 0x0042e97e, 0xd90425fb, 0x0000062c, 0x30f499bf, 0x014ffefc, 0xf9a0aeb2, 0x020aa9bf, + 0x18f4b9a6, 0x01b9900d, 0xbd98e9bc, 0xfec9a0a4, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, + 0x0042e97e, 0xf80430f4, 0x062cd900, 0x99bf0000, 0xfefc30f4, 0xaeb2014f, 0xa9bff9a0, 0xb9a6020a, + 0x900b18f4, 0xa4bd01b9, 0xfe99ecbc, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cd900, 0x99bf0000, 0xfefc30f4, 0xf9a0014f, 0xc9a6b9bf, 0x0c0908f4, 0xd8293ef1, + 0x02abb500, 0xacb5aca0, 0x0149fe01, 0x2cd99fbf, 0xbf000006, 0xa6cab299, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0xaebf0149, 0xa9989fa0, 0xb0bebc02, 0x99bfaba0, + 0x08f4b9a6, 0x92b9bc08, 0xa998a9a0, 0xa6aabf01, 0x051bf4a9, 0x49fef10a, 0xd99fbf01, 0x0000062c, + 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cd9, 0xf499bf00, 0x4ffefc30, 0xb3f9a001, + 0x7e0800a0, 0xfe00d938, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, + 0xf830f400, 0x00062cdf, 0xbf32f900, 0x0149feff, 0xb2149990, 0xb29fa0a0, 0x00a0b3b3, 0xa6fd0242, + 0x3a0cf4a2, 0xfe01ab90, 0xb4b60141, 0x10119002, 0x8b7e1ab2, 0xa4b300da, 0x19bf2700, 0x2cb21db2, + 0x90a0e4bd, 0xe990dfbf, 0xbc9eb201, 0x90a699fc, 0xbff408f4, 0x3e3da0dd, 0x0a00d91d, 0x0149fe02, + 0xbf149990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0835fb00, 0x00062cdf, 0xf4febf00, + 0x49fefc30, 0xbf9ea001, 0xa6f9bf9e, 0x070bf4e9, 0x0042e97e, 0xf80430f4, 0xfc30f400, 0x2cd112f9, + 0xbf000006, 0x0140fe19, 0x002a9cda, 0x08009000, 0xd37e09a0, 0x0fbf004c, 0xa63019bf, 0x0bacf000, + 0xb901a6f0, 0xf9a601aa, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0xd112f9fc, 0x0000062c, 0x40fe19bf, + 0x2a9cda01, 0x00900000, 0x7e09a008, 0xbf004c21, 0x3019bf0f, 0xacf000a6, 0x01a6f00b, 0xa601aab9, + 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x9cda0140, 0x9000002a, + 0x09a00800, 0x004e397e, 0x19bf0fbf, 0xf000a630, 0xa6f00bac, 0x01aab901, 0x0bf4f9a6, 0x42e97e07, + 0x0415fb00, 0xf9fc30f4, 0x062cd112, 0x19bf0000, 0xfeffb4f0, 0x00900140, 0x7e09a008, 0xbf000b94, + 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, + 0x7e09a008, 0xbf000b7e, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x22f9fc30, 0x00062cd1, + 0xb219bf00, 0x0140fea2, 0x0090a43d, 0x7e09a00c, 0xa00040e5, 0xbf0fbf2a, 0x00a6b019, 0xa60bacf0, + 0x070bf4f9, 0x0042e97e, 0xf40425fb, 0x22f9fc30, 0x00062cd1, 0xb219bf00, 0x0140fea2, 0x0090a43d, + 0x7e09a00c, 0xa00040c3, 0xbf0fbf2a, 0x00a6b019, 0xa60bacf0, 0x070bf4f9, 0x0042e97e, 0xf40425fb, + 0x2cdfe430, 0xf9000006, 0xfeffbf82, 0x45fe0149, 0x3c999001, 0xa00147fe, 0x2455909f, 0xd9347790, + 0x00001420, 0x4bfe9abf, 0x90080c01, 0xff0d2cbb, 0x0000c17e, 0xeb00a433, 0x3f0c30b4, 0x0c943339, + 0x043118e2, 0x0f001033, 0xb0011933, 0x3e043d00, 0x9800dc65, 0x30d9023f, 0x98000014, 0x34580431, + 0x3f5fa00a, 0x0339989f, 0xb5183690, 0xff090159, 0xf43379a0, 0x717e1800, 0xa032003b, 0x2900ad33, + 0xdf010901, 0x00001430, 0x1272f920, 0xbd0043f0, 0xdba53e14, 0x0241bc00, 0x010006b1, 0x40060df4, + 0x947e0100, 0x28d9000b, 0xbf000014, 0xff2ce49a, 0xb26bb2ff, 0x1300de0d, 0x1e7e0000, 0x7aa000d0, + 0xd400adb3, 0x985bbf00, 0x1d90015c, 0x7c0eb204, 0x10bc2020, 0x1300da10, 0xd87e0000, 0xa0320026, + 0xc500ad33, 0x4cb4bd00, 0x00da0100, 0xa6000013, 0xa408f414, 0x00dc543e, 0xd9023f98, 0x00001430, + 0x58043498, 0x5fa00a32, 0x39989f3f, 0x18389003, 0x090159b5, 0x3379a0ff, 0x7e1600f4, 0x32003b71, + 0x00ad33a0, 0x30df0084, 0x20000014, 0xff26e4f1, 0x3e24bdff, 0xbc00dc45, 0x16b11262, 0x0df40100, + 0x01004106, 0x000b947e, 0x5c985bbf, 0x042d9001, 0x00da1eb2, 0x7e000013, 0xe4002672, 0xbcffff4c, + 0xa0322021, 0x1db28bb2, 0x001300de, 0x40417c00, 0x3500a433, 0x001428d9, 0x7e9abf00, 0xa000cd48, + 0x00a4b37a, 0x4cb4bd13, 0x00da0100, 0xa6000013, 0xa608f426, 0x5c985bbf, 0xbd7ab201, 0x7e040ed4, + 0x320026d8, 0x2db034a0, 0x817e3ab2, 0x0d33000f, 0x30fe7100, 0x020f3a01, 0x1838f130, 0x04090333, + 0x30014afe, 0x31303991, 0x90b4bd3b, 0x717e38aa, 0xdf3e000f, 0x54d900da, 0xf400002a, 0x2cdffc30, + 0xf9000006, 0xbfa0b202, 0xfe9abfff, 0x99900149, 0xa0640b04, 0x00de7e9f, 0x33100e00, 0x896100a4, + 0xcf02a600, 0xf5f1009f, 0x9ff61000, 0xa6008a00, 0x10004b02, 0x104dc4bd, 0x7ee4bd27, 0x33001b0f, + 0xd91500a4, 0x00002a54, 0x9b7e9abf, 0x040e0000, 0x00dd1c3e, 0xe84b0ab2, 0x16fc7e03, 0xa4008900, + 0x00aab802, 0xa5b60002, 0x01aa920a, 0x4f009af6, 0x99b80111, 0xf6000200, 0xe43d009f, 0x900149fe, + 0x9fbf0499, 0x00062cd9, 0x3299bf00, 0xf4f9a6ea, 0xe97e070b, 0x05fb0042, 0x00000004, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00005600, 0x00005625, 0x0000564a, 0x0000566f, 0x00005694, 0x000056b9, 0x000056de, 0x00005703, + 0x00005728, 0x0000574d, 0x00005772, 0x00005797, 0x000057bc, 0x000057e1, 0x00005806, 0x0000582b, + 0x00005850, 0x00005875, 0x0000c350, 0x00003e80, 0x00004e20, 0x000061a8, 0x000064b5, 0x00007d00, + 0x00009c40, 0x0000cf85, 0x4953424e, 0x004c4520, 0x00412f4e, 0x00640077, 0x64310062, 0x62347734, + 0x62327732, 0x32007733, 0x00773262, 0x00773531, 0x77316236, 0x00623700, 0x64317731, 0x62347731, + 0x32313000, 0x36353433, 0x41393837, 0x45444342, 0x00000046, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000021db, 0x00002222, 0x00002253, 0x00002292, 0x00001f4e, + 0x00001f95, 0x00001fc6, 0x00002017, 0x00002088, 0x000020cf, 0x00002119, 0x0000216a, 0x00001e2a, + 0x00001e71, 0x00001ea2, 0x00001eeb, 0x0000024c, 0x0000001a, 0x00000384, 0x00000009, 0x000003f0, + 0x00000004, 0x00000420, 0x00000002, 0x00000438, 0x00000009, 0x000004a4, 0x00000004, 0x000004d4, + 0x00000004, 0x00000504, 0x00000002, 0x0000000c, 0x00000000, 0x00000000, 0x00000013, 0x00000001, + 0x00000000, 0x00000013, 0x00000001, 0x00000001, 0x00000013, 0x00000001, 0x00000002, 0x00000013, + 0x00000001, 0x00000003, 0x00000013, 0x00000001, 0x00000004, 0x00000013, 0x00000001, 0x00000005, + 0x00000013, 0x00000001, 0x00000006, 0x00000013, 0x00000001, 0x00000007, 0x00000013, 0x00000001, + 0x00000008, 0x00000004, 0x00000002, 0x00000000, 0x00000004, 0x00000002, 0x00000001, 0x00000004, + 0x00000002, 0x00000002, 0x00000004, 0x00000002, 0x00000003, 0x00000004, 0x00000002, 0x00000004, + 0x00000004, 0x00000002, 0x00000005, 0x00000004, 0x00000002, 0x00000006, 0x00000004, 0x00000002, + 0x00000007, 0x00000004, 0x00000002, 0x00000008, 0x00000005, 0x00000003, 0x00000000, 0x00000015, + 0x00000004, 0x00000000, 0x00000015, 0x00000004, 0x00000001, 0x00000015, 0x00000004, 0x00000002, + 0x00000015, 0x00000004, 0x00000003, 0x00000016, 0x00000005, 0x00000000, 0x0000000e, 0x00000012, + 0x00000000, 0x00000006, 0x00000007, 0x00000000, 0x00000006, 0x00000007, 0x00000001, 0x00000006, + 0x00000007, 0x00000002, 0x00000006, 0x00000007, 0x00000003, 0x0000001a, 0x00000008, 0x00000000, + 0x0000001a, 0x00000008, 0x00000001, 0x0000001a, 0x00000008, 0x00000002, 0x0000001a, 0x00000008, + 0x00000003, 0x00000004, 0x00000015, 0x00000000, 0x00000002, 0x0000000b, 0x00000000, 0x00000002, + 0x0000000b, 0x00000001, 0x00000002, 0x0000000b, 0x00000002, 0x00000002, 0x0000000b, 0x00000003, + 0x00000003, 0x0000000c, 0x00000000, 0x00000001, 0x0000000a, 0x00000000, 0x00000001, 0x00000009, + 0x00000000, 0x00000001, 0x00000009, 0x00000001, 0x00000001, 0x00000009, 0x00000002, 0x00000001, + 0x00000009, 0x00000003, 0x00000001, 0x00000009, 0x00000004, 0x00000001, 0x00000009, 0x00000005, + 0x00000001, 0x00000009, 0x00000006, 0x00000001, 0x00000009, 0x00000007, 0x00000001, 0x00000009, + 0x00000008, 0x00000001, 0x0000000d, 0x00000000, 0x00000001, 0x0000000d, 0x00000001, 0x00000001, + 0x0000000d, 0x00000002, 0x00000001, 0x0000000d, 0x00000003, 0x00000002, 0x0000000f, 0x00000000, + 0x00000002, 0x0000000f, 0x00000001, 0x00000002, 0x0000000f, 0x00000002, 0x00000002, 0x0000000f, + 0x00000003, 0x00000003, 0x00000010, 0x00000000, 0x00000001, 0x0000000e, 0x00000000, 0x08000000, + 0x08400000, 0x08800000, 0x08c300a7, 0x09000000, 0x09400000, 0x09800000, 0x09c00000, 0x0a01c000, + 0x0a404038, 0x0a804040, 0x0ac04048, 0x0b004050, 0x0b420058, 0x0b8201ab, 0x11800000, 0x11c00000, + 0x12000000, 0x12400000, 0x12800000, 0x12c00000, 0x00000001, 0x00001c08, 0x00101c09, 0x00201c0a, + 0x0000bd08, 0x00209d09, 0x00309d0a, 0x00011f08, 0x00113e09, 0x00311e0a, 0x00010309, 0x00000000, + 0x0000ffff, 0x00004300, 0x46020f1f, 0x43010f1f, 0x44020f1f, 0x45020f1f, 0x601207ef, 0x601307ef, + 0x601407ef, 0x601507ef, 0x801607ef, 0x253207c2, 0x25330fc2, 0x25340fc2, 0x25350fc2, 0x1152079d, + 0x1253079d, 0x7014079d, 0x7015079d, 0x601203c8, 0x601307c8, 0x601407c8, 0xbb150720, 0x02172701, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00560000, 0x00420074, 0x007f0078, 0x008c0089, 0x004200c0, 0x00dc0042, 0x00051f00, 0x35040b08, + 0x0200001d, 0x00000000, 0x00000000, 0x00000000, 0x00000c00, 0x00000000, 0x00000000, 0x00000000, + 0x00003650, 0xc8b00000, 0x00010000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x010000d0, 0x0004fe00, 0x00e0927e, 0x02f802f8, 0x008980f9, 0x48fe0102, 0xcf22f901, 0x008f0099, + 0x9ffd1000, 0x1f1bf404, 0x00f7808b, 0x108a100c, 0x017e00ef, 0x908b00e1, 0x100c00f7, 0x00ef208a, + 0x00de5c3e, 0x00f7a08b, 0x108a100c, 0x017e00ef, 0x208a00e1, 0xb08b00ef, 0x100c00f7, 0x00e1017e, + 0x00f7808a, 0xc17e100b, 0x908a00e0, 0x100b00f7, 0x00e0c17e, 0x00f7a08a, 0xc17e100b, 0xb08a00e0, + 0x100b00f7, 0x00e0c17e, 0xffffffdf, 0xc3008900, 0x009ff601, 0x01c0008f, 0xc700f9cf, 0x99b34c99, + 0xb3019400, 0xf8f50190, 0xde983e02, 0x00f9cf00, 0xb34c9ec7, 0x016d00e9, 0xf501e0b3, 0xad3e02f8, + 0x008900de, 0x99cf0102, 0x00008f00, 0x049ffd10, 0x00ed008f, 0x890e1bf4, 0xb500ef80, 0xf03e80f9, + 0xfeb500de, 0xdef03e80, 0xed008900, 0x809fb500, 0x0089f4bd, 0x9fb500ed, 0xc0008f81, 0x00f9cf01, + 0xb34c99c7, 0x00fb0099, 0xf50190b3, 0xfd3e02f8, 0xf9cf00de, 0x4c99c700, 0xd10099b3, 0x0190b300, + 0x3e02f8f5, 0x9000df12, 0x95b601ff, 0xfa1bf401, 0xf6b0f9b2, 0x550cf401, 0xf00096b0, 0x00890b2c, + 0x804c00de, 0x0022bc01, 0x00ed008a, 0x940002bc, 0x19bc0701, 0x7e1bb210, 0x8900e101, 0x3f00de02, + 0x0704b61f, 0x180009bc, 0x0e3f0119, 0xb6fff4f0, 0x94f008f4, 0x18e4b6ff, 0xbc1094b6, 0x9ebc909f, + 0x2029bc90, 0xf7110049, 0x008f0092, 0x008900ed, 0x9ff60484, 0x40004f00, 0x1d0099b8, 0x009ff602, + 0x99b8010f, 0xf6020100, 0x99b8009f, 0xf6020600, 0x008f009f, 0xf5b600e2, 0x0d008908, 0x1094b600, + 0x8f059ffd, 0xfd020000, 0x9afe059f, 0xdfd43e00, 0x00000000, 0x00e2007e, 0x00dfdc3e, 0x00dfe03e, + 0x84fe20fb, 0xf880fc00, 0xc2008900, 0x0099cf01, 0xffff94f1, 0xff440bf5, 0x273ef4bd, 0x40df00df, + 0x89008241, 0xf601c100, 0xf1df009f, 0xb8800000, 0x02010099, 0xcf009ff7, 0x9fb2009f, 0x00df123e, + 0x01c20089, 0xc40099cf, 0x1bf5019f, 0xe93efe98, 0x38df00de, 0x89008207, 0xf601c100, 0xf1df009f, + 0xb8800000, 0x02010099, 0xcf009ff7, 0x9fb2009f, 0x00dead3e, 0x004f80f9, 0x0148fe01, 0xf000f9ce, + 0xf9f71095, 0x07004f00, 0xf000f9ce, 0xf9f71095, 0x04004f00, 0xf000f9ce, 0xf9f71095, 0x0084fe00, + 0x00f880fc, 0x48fe80f9, 0xfe02f801, 0x80fc0084, 0x80f900f8, 0x48fe020f, 0x11004901, 0x89009ff7, + 0xfe00e084, 0xff0f0093, 0xf7050049, 0x547e009f, 0x107e00e0, 0x02f800de, 0x84feff0a, 0xf880fc00, + 0x8f80f900, 0xbf00faa0, 0x0148feff, 0xf4048992, 0x9fa0fc30, 0xe23e943d, 0xa92000e0, 0x9001bb92, + 0xb4b301aa, 0x8992f800, 0x899fbf04, 0xbf00faa0, 0xf4f9a699, 0x047e070b, 0x84fe00e5, 0xf880fc00, + 0x8f80f900, 0xbf00faa0, 0x0148feff, 0xf4048992, 0x9fa0fc30, 0x263e94bd, 0xb93c00e1, 0x01cc92f8, + 0x9099af3c, 0xc4b30199, 0x8992f400, 0x899fbf04, 0xbf00faa0, 0xf4f9a699, 0x047e070b, 0x84fe00e5, + 0xf880fc00, 0x8f80f900, 0xbf00faa0, 0x0148feff, 0x92fc30f4, 0x9fa00489, 0x00e1583e, 0xf80202f8, + 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, + 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, + 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, + 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, + 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, + 0xfaf87cd1, 0xc173e31c, 0xa81042ef, 0x9da9c582, 0x9e6ed39d, 0x0cffe48e, 0xc9a42fc3, 0x6c242d07, + 0xe0f25002, 0x786761d3, 0x4c729aa8, 0x634749ac, 0xc91f57cb, 0x9d2fa3ef, 0x27c2ed5c, 0x8309de9a, + 0x4366642d, 0x55bc761c, 0xb81ab63b, 0x5ab68624, 0x3e120628, 0x18b0f2dd, 0xba076e72, 0x4e335007, + 0x4ede9d16, 0x57f03932, 0x475da457, 0xf5d5dd40, 0x0af3af8e, 0x2aab4ee8, 0xd3c3444c, 0xb5b1beae, + 0xa4811e02, 0xe46ad3f5, 0x80058851, 0x69047abd, 0xf42d8ff2, 0x43cb0a46, 0x5c4aa831, 0xfa9294d4, + 0x1dafaf88, 0xb0c97a4f, 0x492c8580, 0x54ce3e71, 0x858d73b8, 0xebfffea9, 0x9a1c7bff, 0x27321302, + 0x5268ac78, 0xd3165783, 0x09e96806, 0x13a9d897, 0xd77b1c34, 0xb84e8190, 0x229f825c, 0xb979da4b, + 0x08532d9c, 0x225ac931, 0xd5968a43, 0x76359d9e, 0x3792f89e, 0xb9fadebf, 0xe5490357, 0x47cef641, + 0xb238bc49, 0xc211023e, 0x17e99490, 0x2d06f251, 0x62e5f542, 0x4010d8c2, 0x0598980a, 0xce9638e6, + 0xf9faacfd, 0x9b917a13, 0x0825ae34, 0xd78a353d, 0x3b56fcd8, 0x780d49fc, 0x1fd3f77d, 0x45905905, + 0x84931a5e, 0xce03e165, 0xa7b10792, 0x8ed21679, 0x8f1f42ba, 0x6372fd75, 0xe8743cd4, 0x8ec1d798, + 0xe6bbc56b, 0x41f05909, 0xc24c3906, 0x7ed3cf8a, 0x805129e2, 0x6beaf9fd, 0x54e4238f, 0x1ed151a1, + 0xce91e626, 0xfd87f03a, 0x52f3043e, 0x06089767, 0xb76f4108, 0xa0fd2d54, 0xe209a3d9, 0x4a54b642, + 0xb70ec014, 0x365656cb, 0x955a7ce0, 0x92bb2d8e, 0xab98dc82, 0x69ecdbc1, 0xcacf5e44, 0x2a8ffbe1, + 0xa4c9cd61, 0x960e199e, 0xfcbf1671, 0xaf7bdbf9, 0x0635d125, 0x4959a274, 0x4f2ca71e, 0x287cbec4, + 0x026ecdee, 0x8d9946bd, 0x4b47a91b, 0xe40b8e49, 0xbca71cbc, 0x054c397e, 0xb2c646a8, 0x04f920b4, + 0x569b369b, 0xbfafef73, 0xea8761cb, 0xc86270b6, 0x7e6a1f2f, 0x55043f7f, 0xed887ded, 0x115023bc, + 0x81b057c0, 0x3f22ee1c, 0xd4eb0e34, 0x475d183d, 0x2f224670, 0x7ae28e78, 0x15d9e103, 0x49333382, + 0x5f1edd08, 0x26e8f38f, 0x9d6a3190, 0xbe96f092, 0xa6262763, 0xa9e1d52c, 0x3b3f393d, 0x25f58f9d, + 0xd8759cad, 0x5d18e948, 0xde761a5d, 0xc21a8f86, 0x35ef3106, 0x951cc567, 0xca484820, 0xd38bf362, + 0xb4e4f4e8, 0xcac22298, 0x5d1d7cc6, 0x8ee946a2, 0x729b2d12, 0x6a398983, 0xd6fd1839, 0x105dbb5a, + 0x6d39e369, 0x8b5c9029, 0x8e1e47db, 0xeaf66d7a, 0x7d928338, 0x34170545, 0x0d0fb022, 0x5c68527a, + 0x01d654d4, 0xde2fa3d6, 0x38ee5952, 0xee0a5c13, 0xf90451af, 0xa3878302, 0x700872b1, 0x4e65e6fa, + 0x4365b854, 0x34bff622, 0x88d18b76, 0x2b16cbdb, 0xd59c6017, 0x015495f6, 0xe3e96281, 0x3fae98d7, + 0x55123375, 0x08de34a7, 0x21788e38, 0x6080bc12, 0xc476124e, 0xb41db2cf, 0x875c0df4, 0xb2664c28, + 0x8bacd71c, 0xf79e2af9, 0x2dd49ef4, 0xf46ac923, 0x370a2135, 0xcc94d42e, 0x1af8a59f, 0x2c63dfe1, + 0x96821178, 0xb8f7455a, 0x8563f556, 0x6dc1a69b, 0x389b9cbb, 0x2672395c, 0x955ebc48, 0x55252947, + 0xa3bb2831, 0x81bfedc0, 0xa8494950, 0x0967f788, 0xd17de828, 0xf98d77d4, 0xc9632ad8, 0x0642dccf, + 0xe58d3e48, 0x2e514e2d, 0x0500ca8c, 0x8749052a, 0x5bc243a5, 0x4e13a51a, 0x226a49df, 0x605d3f20, + 0x992216c9, 0x7b12410c, 0x4b0f2384, 0x8dfaccc1, 0xd3bdefe4, 0x1ae7b200, 0x7766c22e, 0x6b557fca, + 0x95c9f5bd, 0x5db28248, 0xe1e236c9, 0x93428867, 0x89c3cbdf, 0x96d36236, 0xe0a0b87f, 0x04f631a2, + 0x00cadbbd, 0x44f77c75, 0xab47247f, 0x4d2fed17, 0xad54196f, 0xddc150c2, 0xb801f5c2, 0xce3f5526, + 0x373a52bf, 0x14526108, 0xb257c0d8, 0xbd5ad0de, 0x298f2c5f, 0x240580dc, 0x02ac7512, 0xfd111ea7, + 0x0746b0dc, 0x530892df, 0x8d81fa85, 0xaabedca5, 0x1ba01786, 0x32ffa473, 0x747b0791, 0x5b903064, + 0x0eab9146, 0x4fa5f75a, 0xaefdd0f6, 0xb1b71657, 0x9617b9b7, 0xba139933, 0x4ccbe38d, 0xb1b50ce5, + 0xdbf941d2, 0x2c9ae116, 0x6deaaf3f, 0x0c1d4412, 0x0465c342, 0x57dfac55, 0x9625b4fd, 0x7a4598ed, + 0x0c4c4be7, 0xa5f64ddc, 0xf6972222, 0x160bb345, 0x4372c123, 0xb056290b, 0xa6debb86, 0xfbb298e7, + 0x67cd9c99, 0xa8f16115, 0xafdddf42, 0x22190f63, 0xc90b3942, 0x6f7c52c0, 0xa2d0fa29, 0x31a3078c, + 0x0dbb4c31, 0xc1327f2d, 0x398e1581, 0xdaa7b5db, 0xe97cf8b2, 0x85b1bc42, 0xa80afd22, 0x686c9c3d, + 0x0076e256, 0x5fb57461, 0x19991f13, 0x523b3dea, 0x5dcd7cf5, 0xa452383a, 0x06caafa7, 0xeddb2847, + 0xc0fd636c, 0xfea1c3a2, 0xcfe4714b, 0x56006a18, 0x6473cc9f, 0x4c9365a7, 0x28389dc8, 0x4c08e730, + 0x60f0e8f2, 0xa4c75360, 0xfd2b8bd6, 0x636f1981, 0x8209ec68, 0x339735cf, 0xff2d002a, 0xb3488334, + 0xd63bc870, 0x35ee8478, 0x479f1e85, 0xbffeecdc, 0x28f9c7c8, 0x57590a31, 0xc8e19145, 0x75d7f32d, + 0x9b757088, 0x9a845d60, 0xfcf7ad09, 0x5c9ceae5, 0x7c571a8a, 0x5996b814, 0x985edbb7, 0x5e6cb570, + 0x2d8691d8, 0xe2ce3dcd, 0xc9e0ba16, 0x6f3c4fb6, 0xbc3b4763, 0x0f9a9f63, 0x9c124445, 0xbd62fce5, + 0xa87e0be2, 0x8cdccbc8, 0xd9747f4c, 0x8625b880, 0x6484b435, 0x0709890d, 0x5016659d, 0x5cf5f969, + 0x18b8a3de, 0x0fb8b5fd, 0xbd760132, 0xa3fb714f, 0x8ffc2eb6, 0x99ab7e16, 0xcc0641d7, 0x8588401f, + 0x0917c4c3, 0xc32da415, 0x10bb1875, 0xf8320ee6, 0xa50e1e76, 0x806eb785, 0xf462d198, 0x5396252b, + 0x880a570d, 0xa8fb2680, 0x49236cf9, 0x6a9eff69, 0xbb904b6c, 0xce755ab0, 0xb224e890, 0xe172ff57, + 0x8f5419d9, 0x25339e01, 0xb57851cd, 0xa844ffd6, 0x8806f29d, 0x1d37f4eb, 0xc850a379, 0x4d5ab71a, + 0xe33539b1, 0xe6125ea0, 0x4a9ccae6, 0xec8f12d0, 0xa27ef35d, 0x32b176dd, 0x292ba5e2, 0x420ec721, + 0x3bd09005, 0x367c5c71, 0x00ff48b2, 0xe4904d91, 0x85de03d6, 0x2f775d4b, 0xe5fecfb6, 0x83003d5c, + 0x46b3a876, 0x44ac5b3d, 0x453bcc72, 0x7fd94d7f, 0x3f7a6646, 0xc979b1a8, 0xaeea8b71, 0xa0edf7da, + 0x9d37712a, 0x32b87315, 0xdf50ade8, 0xccf2e499, 0x4111b9f0, 0x891411f8, 0xd96cca33, 0x545e1d32, + 0xbc3b4763, 0x0f9a9f63, 0x9c124445, 0xbd62fce5, 0xd79dbf34, 0x848a9853, 0x024ee1a3, 0x7fc81624, + 0xe09902d8, 0xee801988, 0xf9c4cfa5, 0xb2cde958, 0x914bf836, 0xe7cf7319, 0x0efe2519, 0x47ff3772, + 0x8f7eac01, 0xcb32ab4d, 0x3de2c2b1, 0x6d91a665, 0x7f898991, 0x460a7231, 0x4f430841, 0x6be64e96, + 0xf42f32c9, 0x5b36ad7f, 0xf93a45bd, 0x9474d74c, 0x5dcd7cf5, 0xa452383a, 0x06caafa7, 0xeddb2847, + 0xae0e0ba6, 0xb40dfc0a, 0x27688b5f, 0x8810df39, 0xc5c48476, 0xa3e63004, 0xbffccd8e, 0x89905f90, + 0x986ac0a1, 0x55e54c7f, 0x8d45084d, 0x572c2cc6, 0x7c8674ae, 0x0e191e51, 0x344428c1, 0x27afff85, + 0xfbdaaf61, 0x2c115e43, 0x4725c239, 0x514be641, 0xb2ff17f6, 0x28012dd0, 0xea41ab40, 0x0b88311d, + 0xcc3eeef0, 0x3aedefe3, 0x469375db, 0x05ad1adb, 0x51e974f4, 0x984d0166, 0x996b280a, 0xae051d54, + 0x642aeede, 0x37e64ed1, 0xf9ce9262, 0x3e0581da, 0x7c7ffc22, 0x1cf4a7f3, 0x159bee05, 0x63b99269, + 0x19aeef60, 0x44547574, 0x5f0a0bc4, 0xfb500103, 0x2f9c4919, 0xd25e062f, 0x2a3432aa, 0x72d9b3ee, + 0x6f12a604, 0x60b6021b, 0xb8a16a27, 0x1fa5f9b3, 0xd7c3bd78, 0x8c0f584e, 0x598dfe5d, 0x92316dcf, + 0x0564f7c2, 0x1b6d6cea, 0x04809ebd, 0xd2c81b0d, 0x74e4771a, 0xa8c7af3e, 0x0ecc3e55, 0x35e24550, + 0x0b50eb46, 0xb6e78942, 0xd491127f, 0x7c6505a7, 0xfa96175c, 0xa9443486, 0x41d8169a, 0x3e52015f, + 0xf8c9cad9, 0xd9527199, 0x907d6c62, 0x1eabd29f, 0x4752d17a, 0x2112b8f4, 0x0d713eec, 0x11267b05, + 0x505a3435, 0x139a9c9f, 0x2fd33aac, 0x0cf1a90b, 0x5d099ac4, 0x7a263556, 0x8b2b7116, 0x7a5734a3, + 0x63611adf, 0x83a975e2, 0x3dbb1ca2, 0x94e246f4, 0x8703cd64, 0xb1806828, 0x4ad7ef76, 0x38620b00, + 0x912c359f, 0x7c1d2641, 0xc908af47, 0x4ecf65d2, 0xf1754020, 0x2b94f528, 0xa783e92d, 0xbf094b90, + 0xbfd6ee80, 0x694a640b, 0xe2b35604, 0xac70c43c, 0xc03b9180, 0x14ec1311, 0xac076e73, 0xbe231df0, + 0x4e5fc898, 0xb32aa03d, 0x5913152c, 0x0687effc, 0xc449a86a, 0x7d94f915, 0x657b596f, 0xc0354b05, + 0x850d4c6f, 0x00123127, 0xd27c3834, 0x0d75e252, 0x5226407c, 0xa6727e77, 0xd0176cbf, 0x5cd731ca, + 0xff7199a3, 0xa8ee06e0, 0x426b80ba, 0x3f5561ca, 0x2cada27b, 0xeedc1f3a, 0x47be9e77, 0x9aa38776, + 0x74ce8807, 0x6745a3fa, 0xa46be3e5, 0x1c4dea25, 0x0e88a35b, 0x726e3920, 0x8ada9ae4, 0x69ee8d26, + 0xd856ff7a, 0x4428005f, 0xee9c628e, 0x80644b45, 0x3677effc, 0x859ccf77, 0x850aa1a6, 0xb6cc898c, + 0xbb9c5577, 0xfc769e2a, 0xb619ec3e, 0xa714ddee, 0x64b75940, 0x33c10070, 0xedce3c2c, 0x9851702b, + 0xc9f9e81a, 0x17fd83f5, 0x9c30f342, 0xe2bc1ea1, 0x8c7fd21f, 0x504a840e, 0x4a032f2b, 0xcbd52ed7, + 0xd2a6a916, 0x951671bb, 0x000848a0, 0xf809c052, 0x16a749e3, 0x28eb261f, 0x4176c210, 0x32cd257e, + 0x7006d075, 0x7da62133, 0x41e97339, 0xdcfaa814, 0x7bbd4349, 0x71258b58, 0xe0e8d58d, 0xecc9e669, + 0x0e3552ca, 0xca410a26, 0xfb674a72, 0x2ddebc3b, 0x512c5002, 0x92aa90cf, 0x9b2dee78, 0xdcb5a2aa, + 0xc6af65b0, 0xa5885e16, 0xd7ed21b2, 0xa1256f89, 0x62565b70, 0xb7cfce2a, 0x05d55e07, 0x8937f886, + 0xd640b328, 0x87f88568, 0x7f439b0c, 0xfa95c7df, 0x3e761328, 0x83694487, 0x53ee9e83, 0xa6d9eeae, + 0x90a32e90, 0x88fc3b19, 0x5fe24a72, 0x982c12a9, 0x951f0990, 0x8c420a00, 0x064dd22f, 0xe0265841, + 0x361f339c, 0x1564d6a8, 0xc222a0b1, 0x623b078d, 0x583346a9, 0xde3494e2, 0x92171472, 0x576f4f2d, + 0x8a273c1d, 0xe431c07a, 0xbe48bde7, 0xa9edcf84, 0x9cadb744, 0x5bd24cf1, 0x4399e07d, 0xe845f2c9, + 0x7735157d, 0x6f1a832a, 0xdab40b1d, 0x61d19272, 0x54b2e5ef, 0x8df27303, 0x1d935201, 0x7dcf6043, + 0xa1328c74, 0x335c6f0f, 0xf7a49e91, 0x41a77ae4, 0x2c0976bf, 0x748db1c4, 0xd9f9c7cf, 0x95430128, + 0xf62ccae5, 0x7d13f61d, 0x6e813f25, 0xb1f3c57d, 0x100655e6, 0xebf1a73f, 0x785de689, 0xbc3e4f0d, + 0x2984936d, 0x5c3ef3f2, 0xdfb92086, 0x49bf9921, 0x7b61bc94, 0x4f9bddda, 0x451af388, 0x819c09a1, + 0x67e09f67, 0x9274b321, 0xa161055d, 0xaa4f4aae, 0x1ebeb3eb, 0xf5cdb0d1, 0x1f341d5c, 0x863bd9ba, + 0x6c62f6dd, 0x1bc8f43e, 0xce26808d, 0xd127f40d, 0x98aa7f1c, 0x8efd0487, 0x16e22280, 0x49b802ff, + 0xd5a3d907, 0x7c8e96eb, 0x579a130e, 0x3b796619, 0x9d3d28b5, 0x09c1be27, 0x7fff261f, 0x3c857ba1, + 0x5b28a0a8, 0x1abf9e78, 0x8a584497, 0x3cd64986, 0x417a3c54, 0x1045c8e3, 0x052c0a63, 0xd84960f5, + 0x1617c22f, 0x9e41079c, 0x3bc2dbbc, 0x8c85a541, 0xa6ad8c2d, 0x9159fa92, 0x856f59b7, 0xc14627ae, + 0xbb904b6c, 0xce755ab0, 0xb224e890, 0xe172ff57, 0xa4d66604, 0xa4f22a42, 0x8676c3d9, 0xe57d48f9, + 0x4328cc75, 0x26af8bb3, 0x9822e817, 0x700356db, 0x5a52ea5f, 0xfc7d8957, 0xeb6438f6, 0xdc2e5c02, + 0xa48f2cf5, 0xed6a3249, 0xf8dab6a0, 0xe6fdd951, 0x4328cc75, 0x26af8bb3, 0x9822e817, 0x700356db, + 0x5a52ea5f, 0xfc7d8957, 0xeb6438f6, 0xdc2e5c02, 0xa48f2cf5, 0xed6a3249, 0xf8dab6a0, 0xe6fdd951, + 0x4328cc75, 0x26af8bb3, 0x9822e817, 0x700356db, 0x5a52ea5f, 0xfc7d8957, 0xeb6438f6, 0xdc2e5c02, + 0xa48f2cf5, 0xed6a3249, 0xf8dab6a0, 0xe6fdd951, 0x4328cc75, 0x26af8bb3, 0x9822e817, 0x700356db, + 0x5a52ea5f, 0xfc7d8957, 0xeb6438f6, 0xdc2e5c02, 0xa48f2cf5, 0xed6a3249, 0xf8dab6a0, 0xe6fdd951, + 0x4328cc75, 0x26af8bb3, 0x9822e817, 0x700356db, 0x5a52ea5f, 0xfc7d8957, 0xeb6438f6, 0xdc2e5c02, + 0xe8096636, 0x29850ac3, 0x312b8eaa, 0x1c2a200e, 0x5abd8429, 0x9b7de3a8, 0x3600e34d, 0x789b3baa, + 0xad9d7aef, 0x90031884, 0x8d6e997c, 0x7191231b, 0xefe80f6f, 0xcc81927a, 0x18bd08b5, 0x9c4f76c6, + 0x141f4b4a, 0x5ecfb35a, 0xa810f1b3, 0x718f6a16, 0x34d6a982, 0x84e60914, 0x8aee9de1, 0xafd3e188, + 0x1bc818a6, 0x026aa636, 0x069c1d1b, 0x42823f54, 0xf6d50f74, 0x804f8e46, 0x11fdedc1, 0x66958232, + 0xc6607e44, 0x2dd68b60, 0x85d35f0c, 0xe690ffd6, 0xc5846f47, 0x52ca5c59, 0x1e9aeec1, 0x9cf432e7, + 0x3ddeb948, 0x788b9238, 0xa95429d6, 0x6661ba77, 0x487907ff, 0x5bf23fe5, 0x2c1825ea, 0xc0c86c2f, + 0xc2171a29, 0xbe90b52e, 0x168be84e, 0xd24ba12b, 0x3ebc4b49, 0x2f51096a, 0x1efba77b, 0xf792c8b8, + 0x53edffc6, 0xb1fa610f, 0x5d625444, 0x3e8d180e, 0x9ed65ae2, 0x2c77d978, 0xe1eb9d7e, 0xa71e4dcf, + 0x16d5d3fd, 0x64d4bd31, 0x99b82978, 0x817b685a, 0x8c340d8b, 0xa22c6665, 0x3fdd356c, 0x17a02ce0, + 0x693ce418, 0x5d2a9525, 0x622d6284, 0x19a7f53c, 0xadc90738, 0x2550fc9b, 0xe65b8ce3, 0x45ef671c, + 0xf369e7da, 0x25244855, 0x7553ead9, 0x997a310a, 0xaa3d8bc1, 0x42cd3466, 0xac60a686, 0xebf3625b, + 0x72304006, 0x566c0e1d, 0xd1ae9e50, 0x70195df4, 0xf74f064a, 0x70548131, 0x21cfbf73, 0x089cd191, + 0x1c26b681, 0x40058444, 0x7699e83a, 0x09331713, 0x0eb84a9b, 0x00d82ce8, 0x55f7a6e9, 0x7606d041, + 0x92713d2b, 0xb262a690, 0x5ccc200d, 0xf124ab30, 0x7cc30d74, 0x3f264a54, 0xdffbac6d, 0x468e53a3, + 0x6b2712f4, 0x2e3459bc, 0xe922ec60, 0x6ec5785f, 0xc2b67cb7, 0x6b75fe23, 0x163d725a, 0xfb0323f0, + 0x5a41fe61, 0xbd35d03c, 0xc0696805, 0x07a8ba38, 0x067056b6, 0xe5b0653d, 0x6d957bb9, 0x7477c9b7, + 0x9b8f9b58, 0xca908f0c, 0xe8d8d358, 0x13d9c7b0, 0xf06250f4, 0x780196b1, 0xed08885d, 0xc40877b3, + 0xcb4422c0, 0x442561b2, 0xd0014821, 0x6ba04ccd, 0x14f38043, 0xfdb110b4, 0xa9ddf308, 0x8fcad54d, + 0x1b3c5a49, 0x4a812e49, 0x6355ef2f, 0x0e267bd6, 0xb0ba83ff, 0xbcf32229, 0x0198f9cb, 0x9ebaf83a, + 0x9f583600, 0xf4407538, 0x2ae808b5, 0x195f52e8, 0xfc81fd27, 0xdeb30fe4, 0x5ec89a95, 0xaa66f91c, + 0xb2a6387c, 0x11c058ec, 0x4982816f, 0x9984fe3c, 0x86cd5692, 0x7f0b199f, 0x3774ac75, 0x90c83e70, + 0xe5cffa1a, 0x5ab5e1fb, 0x265185c3, 0x90c7552b, 0x828d834d, 0x16325963, 0x2b46a9bf, 0x3e8ff4f3, + 0x77c4ec49, 0x937c290f, 0x7a85e419, 0x1279edb4, 0x0659dc1e, 0x172558fc, 0x507c6e4c, 0x89eb3b4d, + 0x856cdaf3, 0xbf8842dc, 0xe328552f, 0xaf417b6e, 0x57f0bd6b, 0xbcc4fc66, 0x9e993182, 0x0bf6d94e, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000001, 0x00000002, 0x00000000, 0x00000000, 0x00003300, 0x00000000, 0x0000de00, 0x00000000, + 0xe4f7f5ef, 0xee6f0434, 0xa742283d, 0xb837517d, 0x49e4d804, 0xd8d4d342, 0xa7c8bf31, 0xe15c25cf, + 0x3acb76c7, 0x6ca73cfc, 0xc5002b4d, 0x9dc59b12, 0x8f11945c, 0xe814622b, 0xc745df38, 0xc179824e, + 0x50ed0464, 0x40f4b2de, 0x4ea86960, 0xacb935cf, 0xf1b2d5f8, 0xa4ba5e6f, 0x90a0a1eb, 0x69da0dfa, + 0x6c9df4ee, 0x8839b01e, 0x66f4aabc, 0xb7f981bd, 0x8a4d967e, 0x56d405c6, 0xf32206c3, 0x99657af8, + 0x6ee38993, 0xcc7f8e9c, 0xf713fc76, 0xb7cd1802, 0xd1c3cd47, 0xf48ce818, 0xdc6df38a, 0x28ad0e5c, + 0x1875327b, 0x90c3b70e, 0x7f15e88c, 0x1b74c593, 0x5ac7d9b0, 0xedd70474, 0xed91afeb, 0x991150af, + 0x48da5e52, 0xe1d0112b, 0x7d36d470, 0x4d7daf15, 0x34a604f5, 0x9eab147d, 0x2dfad689, 0xf464bcb8, + 0xdd03d089, 0x86921b53, 0x9f2aed2c, 0xed4c2dfd, 0xab48a852, 0x28c194b9, 0x2389a853, 0x257bb576, + 0xb5251435, 0xfbab449e, 0xbdb9f479, 0xf2678bcf, 0xda80d6b6, 0xc74fa6cf, 0xfd2a7488, 0x6909c425, + 0x02a16ec5, 0x461d82f0, 0xd5a884a2, 0x4ec34431, 0xc169c889, 0x79d65f21, 0xc2ceb8cb, 0xd39c7ce6, + 0x88c62a3d, 0x831f5ac7, 0x33ee1132, 0x06195cbd, 0x9c372678, 0xb8b0c345, 0x83658b0f, 0x8d2ecce4, + 0xf2d64358, 0xdd67364d, 0x67644a8e, 0x50a87a4d, 0xcd0b0f87, 0xc67f1397, 0x31c5f225, 0x91a4f825, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00010001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x2cf026f1, 0x3400ca44, 0x2a940a6c, 0x3a85ce16, 0x9aca2a85, 0x13819510, 0x6e0aaeba, 0xb1742a27, + 0x1721de4e, 0x339f3918, 0xbe2f3205, 0x51e4c344, 0x577fe823, 0x92b0329f, 0x7443aa3e, 0x98b71f36, + 0x0f3d1d23, 0x60cf1800, 0x7b94d115, 0xf1bf27af, 0x458b89ed, 0x2eebe4fa, 0xb0438bf8, 0x6c4879ea, + 0x62d74d7f, 0x19640e2a, 0xefb7cdf9, 0xc6aa04ba, 0xa5dcbb8f, 0xfa8142c3, 0x703938dc, 0x35117442, + 0xdf5336a2, 0xa1b5a533, 0xb1544d87, 0x21bfc273, 0x9e5a18f2, 0x043ce72c, 0xca3660bf, 0x6bd94061, + 0x0ad69c2c, 0xcdf5d0e5, 0x124fb288, 0xf6a9af90, 0xf6b9e34e, 0x44b8aa6b, 0x9f6fa7d0, 0x88c571b1, + 0xccd9f95c, 0xdc121aaa, 0xeacf15d9, 0x4e1336bd, 0xacbc5e30, 0x8c4e374d, 0xaa8bc375, 0x26f67ae6, + 0x75a33f24, 0x966c871a, 0xa4ce0cae, 0x7b24b31d, 0x93fc7cea, 0xa8f3f2ce, 0x199a1be6, 0x58c24fa6, + 0x027aa313, 0x85b39a1c, 0xd558c04b, 0x8ca3a97c, 0x7e769b21, 0x93f0e528, 0xfa6d701a, 0x00c7b1f5, + 0x2ef890de, 0xd5dad21b, 0xaf5c0c11, 0x777585ae, 0x57efc20e, 0x102aaa14, 0x285727d6, 0x4fc0575f, + 0xe59129c1, 0x4d335e3a, 0x8cf0b7f8, 0x9a602f3e, 0xb05a515b, 0xf4bdb164, 0x57f94aeb, 0x58558b23, + 0xf71e8185, 0x9dc466e4, 0x55ae607e, 0xaba144de, 0x20f77067, 0xde424f60, 0xdbd7755d, 0x48ec98ec, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x577d3031, 0xdbcce9e5, 0x875ef27b, 0x7df0d67b, 0x211c233a, 0x703b6f8d, 0x836cd31b, 0x6a6286a0, + 0x1c423e74, 0xea669363, 0x16362c50, 0x74492f0a, 0x4685ef82, 0xd4e75c4f, 0x1c5bfba3, 0x31258e1d, + 0xe036f43e, 0x3649bad3, 0x119b1eb2, 0x6a0a305a, 0xe7e3cab7, 0x5728be4f, 0xd15226ec, 0x1ebf9b71, + 0x421cf563, 0x475b473c, 0x865da3aa, 0xc87c6fa1, 0xbaa1487c, 0x477d7a8b, 0x4d4eb6dc, 0x48a56340, + 0x9a1816b2, 0x277b0285, 0x2de13925, 0x8a1505ba, 0x3a6cee1b, 0x3827315e, 0xb4e9a099, 0x0532efc1, + 0x068d503d, 0x7df1e853, 0x2b62c5f3, 0xbc6f3792, 0x188e8c5e, 0x28789515, 0x8bff63cd, 0xf7113b6b, + 0x6966556a, 0xf7b0d1bc, 0x833598e9, 0xdc91e53d, 0x37637356, 0xaf005d67, 0x8bfaebfa, 0x6f6cf626, + 0xe8f44bd2, 0x58a1dcbc, 0xd559ab97, 0x36f10b94, 0xe506f8a6, 0xb51b9a87, 0x5a41657e, 0xd5fceb04, + 0xe347a680, 0xfc189930, 0x77e01a6b, 0xeeee34ae, 0x16ac288f, 0x92eb930e, 0xa3b053a6, 0x0d1212eb, + 0xf4b1adaf, 0x3a69213d, 0x0cfebcdd, 0x99e75d41, 0xcc152f7a, 0x99ce7993, 0x93f2c8b9, 0xd2766c82, + 0x760aa309, 0xd56341bd, 0x14992f22, 0x86616b17, 0x87a9c2e1, 0xf2385b56, 0x4823d497, 0x7d2b347e, + 0x0e53457d, 0xf7795318, 0xdec3c062, 0x65283a27, 0x0b84c304, 0xfee01b13, 0x36bb50a7, 0x8062cdd9, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x3a16e326, 0xba6b6ea3, 0x87a41305, 0x8f25c7a1, 0x33c8446a, 0xc71830a5, 0xa6396ea5, 0xaca6ee44, + 0x58e60046, 0xb8c9d1da, 0x5b48c2ac, 0x7c81985b, 0xa3c7950d, 0x475262b8, 0xc32028c8, 0xcda615d9, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +// Array of APP offsets and sizes +// Defined as follows: +// StructureVersion +// NumApps(N) +// Code Entry Point +// App 0 APP Version +// App 0 Code Offset (In Blob) +// App 0 Code Size +// App 0 Code IMEM Offset +// App 0 is secure (Boolean 1/0) +// App 0 Data Offset (In Blob) +// App 0 Data Size +// App 0 Data DMEM Offset +// App 1 APP Version +// App 1 Code Offset (In Blob) +// App 1 Code Size +// App 1 Code IMEM Offset +// App 1 is secure (Boolean 1/0) +// App 1 Data Offset (In Blob) +// App 1 Data Size +// App 1 Data DMEM Offset +// . . . . +// . . . . +// App N-1 APP Version +// App N-1 Code Offset (In Blob) +// App N-1 Code Size +// App N-1 Code IMEM Offset +// App N-1 is secure (Boolean 1/0) +// App N-1 Data Offset (In Blob) +// App N-1 Data Size +// App N-1 Data DMEM Offset + + +const NvU32 soe_ucode_header_lr10_dbg[] = { + /* .version = */ 1, + /* .numApps = */ 3, + /* .codeEntryPoint = */ 56832, + /* .appVersion = */ 1, + /* .appCodeStartOffset = */ 0, + /* .appCodeSize = */ 56832, + /* .appCodeImemOffset = */ 0, + /* .appCodeIsSecure = */ 0, + /* .appDataStartOffset = */ 56832, + /* .appDataSize = */ 13056, + /* .appDataDmemOffset = */ 0, + /* .appVersion = */ 1, + /* .appCodeStartOffset = */ 69888, + /* .appCodeSize = */ 1024, + /* .appCodeImemOffset = */ 56832, + /* .appCodeIsSecure = */ 0, + /* .appDataStartOffset = */ 74240, + /* .appDataSize = */ 7424, + /* .appDataDmemOffset = */ 56832, + /* .appVersion = */ 1, + /* .appCodeStartOffset = */ 70912, + /* .appCodeSize = */ 3328, + /* .appCodeImemOffset = */ 57856, + /* .appCodeIsSecure = */ 1, + /* .appDataStartOffset = */ 81664, + /* .appDataSize = */ 0, + /* .appDataDmemOffset = */ 0, +}; + +const NvU32 soe_ucode_data_size_lr10_dbg = 20416; + +#endif //_SOE_UCODE_LR10_DBG_H_ diff --git a/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_prd.h b/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_prd.h new file mode 100644 index 000000000..61377f5d1 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/soe/bin/g_soeuc_lr10_prd.h @@ -0,0 +1,2655 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018- NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + + DO NOT EDIT - THIS FILE WAS AUTOMATICALLY GENERATED + +*/ + +#ifndef _SOE_UCODE_LR10_PRD_H_ +#define _SOE_UCODE_LR10_PRD_H_ + + +const NvU32 soe_ucode_data_lr10_prd[] = { + 0x00fec0d0, 0x0004fe00, 0x0017167e, 0x08f802f8, 0xa4b300f8, 0xb4890a00, 0x9abf000f, 0x1100a0b3, + 0x0d00b0b3, 0xa001aa98, 0xf8a43dba, 0x0a02f800, 0xbd00f8ff, 0xfc30f494, 0x40fe02f9, 0x04009001, + 0x0db209a0, 0x0003237e, 0xa4b3afb2, 0x0dbf1601, 0x0a01d4b3, 0x002fe189, 0xa43d9d20, 0x00006b3e, + 0xfc09040a, 0x0bf4f9a6, 0xfbff0a05, 0x337e0405, 0xa0330000, 0xa0330a04, 0x02f80600, 0xb37e00f8, + 0xafb20003, 0x0801a4b3, 0x00f8a43d, 0xfc09040a, 0x0bf4f9a6, 0x0a02f807, 0xbd00f8ff, 0xbdc4bdb4, + 0x007e7ed4, 0xf900f800, 0xb2a2b222, 0xb2c0b2b1, 0xb21bb22a, 0x7eff0d0c, 0x3300007e, 0xfbf400a4, + 0x01907e21, 0xb3afb200, 0x3d0801a4, 0x0a00f8a4, 0xa6ed0904, 0x070bf4f9, 0xff0a02f8, 0xbdb200f8, + 0xb4bdc4bd, 0x0000c17e, 0x037e00f8, 0x00f80005, 0x0009fc7e, 0x0801a4b3, 0x00f8a43d, 0xff0a02f8, + 0xfb7e00f8, 0x00f80004, 0x000a9b7e, 0x747e00f8, 0x00f8000a, 0xa0b212f9, 0x000a747e, 0x3e240190, + 0x9800013f, 0x90b30909, 0xf57e2400, 0xa0b30005, 0x2f7e0800, 0x09980005, 0x01999212, 0x981209b5, + 0x1ab21209, 0xf40096b0, 0xff09dc1c, 0xb5100190, 0x9b7e1209, 0x747e000a, 0x7a3e000a, 0x09980001, + 0x0090b304, 0x05f57e24, 0x00a0b300, 0x052f7e08, 0x11099800, 0xb5019992, 0x09981109, 0xb01ab211, + 0x1cf40096, 0xb5ff09dc, 0x9b7e1109, 0x11fb000a, 0xf9f430f4, 0x0149fe82, 0xb22c9990, 0xb2b5b2a0, + 0x019da0c4, 0x050b7ef7, 0x00adb300, 0x09b30146, 0x98013f00, 0x0fbf1409, 0xa60099b9, 0x301bf5f9, + 0x0054b301, 0x1009980e, 0x9db3fa01, 0x98012500, 0x0c411009, 0xf549a6fe, 0xfe01180c, 0x43fe0142, + 0x10079001, 0x01240690, 0x2c229001, 0xbd243390, 0x01f73e84, 0xbfee0100, 0x0090b329, 0x05987e6f, + 0x0a747e00, 0x11099800, 0x9fa6ff0f, 0xb5061bf4, 0x09981108, 0xa6ff0f12, 0x061bf49f, 0x7e1208b5, + 0xb3000a9b, 0xb20a0114, 0x051c7e3a, 0x0e099800, 0x2e0094b3, 0x2bb23ab2, 0x0005397e, 0x2200a4b3, + 0x6ab22bbf, 0x0006427e, 0x147e0ab2, 0x2e7e0001, 0xa4b30009, 0x0e7e1600, 0x683e0000, 0x0ab20002, + 0x0001147e, 0x00092e7e, 0x000a747e, 0x010e0998, 0x0090b3ed, 0x0309984e, 0x98100f98, 0x9fbc010e, + 0x0309b590, 0x08f49ea6, 0xb509bf08, 0x09980309, 0x030b980e, 0x4cb25ab2, 0xb5019992, 0x7e7e0e09, + 0x0998000b, 0x0090b304, 0x7e7ab218, 0xb20005f5, 0x01a4b3a1, 0x000e7e0c, 0x02bf3e00, 0x7e010100, + 0x0f000a9b, 0xf41fa6ed, 0x29bf1a1b, 0x230090b3, 0x2bb23ab2, 0x0005397e, 0x1d00a9b3, 0x02ef3eff, + 0xa6ee0900, 0x120bf519, 0x02ef3eff, 0xb2ef0100, 0x0c85fb1a, 0xa0b202f9, 0x7e02aa98, 0x98000b7e, + 0x0f980e09, 0x100e9802, 0x90010d98, 0x09b50199, 0xf0febc0e, 0xa6020fb5, 0x0808f4fd, 0x09b509bf, + 0xf901fb02, 0xb2a0b212, 0x00a0b3d1, 0x14a99873, 0x99b9afbf, 0xf4f9a600, 0xb4b3651b, 0xa9980b00, + 0x0094b310, 0x0010b361, 0x1009985d, 0x0cf4c9a6, 0x00b0b35a, 0x00c0b308, 0x0e0f9853, 0x0a0f0998, + 0xf4f9a6fc, 0x0ab24e18, 0x0002f47e, 0x09120f98, 0xf4f9a6ff, 0x09981f1b, 0x0090b309, 0x240a9036, + 0x0005f57e, 0x2b00a0b3, 0x19a00109, 0xb13e9ab2, 0xf9900003, 0x1209b501, 0x0003af3e, 0xb13eef0a, + 0xfa0a0003, 0x0003b13e, 0x3efe0c4a, 0x0a0003b1, 0xf411fb01, 0x82f9f430, 0x900149fe, 0xa1b22499, + 0xc4b2b5b2, 0xf7009da0, 0x00050b7e, 0x2a00adb3, 0x0019b301, 0x1998011c, 0xb91fbf14, 0xf9a60099, + 0x010d1bf5, 0x0e0054b3, 0x00101998, 0x009db3fa, 0x19980109, 0xf549a610, 0xb300fc0c, 0xb3090050, + 0x00f40049, 0xfe0143fe, 0x18900146, 0x10179024, 0x33900102, 0x28669024, 0x90b339bf, 0x987e7700, + 0x747e0005, 0x1998000a, 0xa6ff0f11, 0x081bf49f, 0x19b594bd, 0x12199811, 0x9fa6ff0f, 0xbd081bf4, + 0x1219b594, 0x000a9b7e, 0x0a0124b3, 0x1c7e6ab2, 0x1f980005, 0x0e19980f, 0x1bf4f9a6, 0xb26ab22d, + 0x05397e3b, 0x00a4b300, 0xb23bbf22, 0x06427e7a, 0x7e1ab200, 0x7e000114, 0xb300092e, 0x7e1600a4, + 0x3e00000e, 0xb2000491, 0x01147e1a, 0x092e7e00, 0x0a747e00, 0x0e1f9800, 0x000f1998, 0xf4f9a6fc, + 0x1ab22a18, 0x4cb25bb2, 0x0002f47e, 0xb3091998, 0xb2180090, 0x05f57e8a, 0xb3a0b200, 0x7e0c01a4, + 0x3e00000e, 0x000004c9, 0x0a9b7e01, 0xa6fc0f00, 0x251bf40f, 0x90b339bf, 0x6ab22000, 0xee023bb2, + 0x0005397e, 0x3400a9b3, 0x04f63eff, 0x3eef0000, 0x400004f6, 0x0ab2fe0c, 0x890c85fb, 0xbf000fc0, + 0x8900f89a, 0xbf000fb4, 0x8900f89a, 0xbf000fc4, 0x0096b099, 0xf00bacf0, 0x00f801a6, 0x000fc889, + 0xa9a099bf, 0x000fc089, 0xa9b599bf, 0x0f00f801, 0x0fcc8901, 0xf89fa000, 0xb222f900, 0x7eb2b2a0, + 0x89000a74, 0xbf000fc8, 0xa609bf9f, 0x110bf4f9, 0x000fc089, 0x09989fbf, 0xf4f9a601, 0xc08d3318, + 0x0e98000f, 0xbfd9bf01, 0x029ebb2f, 0x18f49fa6, 0xbdd9bf20, 0x92e9bc14, 0xa0909fbc, 0x0fc88929, + 0xa099bf00, 0xb5d9bf09, 0x903e0109, 0x01010005, 0x000a9b7e, 0x21fb1ab2, 0x000a747e, 0x000fc48f, + 0x9990f9bf, 0x7ef9a001, 0xf8000a9b, 0x8002f900, 0xbf000fd4, 0x0196b009, 0x7e070df4, 0x7e00000e, + 0x3e000a8f, 0x8e0005b3, 0x18001074, 0xefbf41a9, 0x9ac4abb2, 0xf4afa6ff, 0xeaa0050d, 0xb604a994, + 0xa9bc02a4, 0x0fd489a0, 0x10bb9000, 0x7ea0a9bc, 0xf8000bee, 0x9812f900, 0x909803a9, 0x24019003, + 0x517e1ab2, 0xc489000c, 0x99bf000f, 0x150094b3, 0x7e100a90, 0xb2000c51, 0x05c77e0a, 0x062b3e00, + 0x8a1bb200, 0x7e001078, 0x89000bee, 0xbf000fb4, 0x410f1899, 0x9918a4bd, 0xf4f92641, 0x010a050d, + 0x22f911fb, 0x000fb482, 0xb0b229bf, 0x000fc081, 0x7e249b90, 0xbf000c0f, 0xbc2abf19, 0xaa900009, + 0x0c517e10, 0xb529bf00, 0x19bf0490, 0x18f409a6, 0x108c890b, 0x067d3e00, 0x10908900, 0xbf9abf00, + 0x10bb902b, 0x000c0f7e, 0x30f421fb, 0x0fc489fc, 0xbf82f900, 0x0090b399, 0x89010f10, 0xa0000fcc, + 0x08453e9f, 0x0fb48900, 0x0148fe00, 0x95bf91b2, 0x343d243d, 0x748064bd, 0xbc870010, 0xb884000f, + 0x8890000f, 0x06d23e24, 0x3d09bf00, 0x01999224, 0x09bf09a0, 0x000fd48e, 0xb6049f94, 0x9fbc0294, + 0x909ebc90, 0x99b399bf, 0xbf013d00, 0xb949bf7f, 0xf9a60099, 0xbf0f0bf4, 0x7eec0b1a, 0x3e000a95, + 0xbf000742, 0x0090b349, 0x0859983d, 0xfe940fbf, 0x02f4b604, 0x8ef0febc, 0xbc000fd4, 0x9fa6f0fe, + 0xbf221bf4, 0x92b4bd49, 0x49a00199, 0x99b949bf, 0xa079a000, 0x7e1abf15, 0xb0000a89, 0x1ff500a6, + 0x0fbf00f9, 0xfd9409bf, 0x049e9404, 0xbc0294b6, 0xd48e909e, 0x9ebc000f, 0x01999890, 0xbc02f4b6, + 0xfebcf0fd, 0x019998f0, 0xbf01f9b5, 0x049f9409, 0xbc0294b6, 0x9ebc909f, 0x019e9890, 0x9f9409bf, + 0x0294b604, 0x8f909fbc, 0xbc000fdc, 0xe9a6909f, 0xbf2c1bf4, 0x9409bf0f, 0x9e9404fd, 0x0294b604, + 0x8e909ebc, 0xbc000fd4, 0x9998909e, 0x02f4b601, 0xbcf0fdbc, 0x9998f0fe, 0x01f9b501, 0x9f9409bf, + 0x0294b604, 0x8f909fbc, 0xbc000fd4, 0x9998909f, 0x03999801, 0x1abf19a0, 0x000a617e, 0x1000a4b3, + 0xeb0b1abf, 0x000a957e, 0x0007fa3e, 0x8bb21abf, 0x000a897e, 0xf400a6b0, 0x2210401f, 0x01331001, + 0x06013433, 0x09bf06bf, 0x94ff2ec4, 0x94b6049f, 0x909fbc02, 0x000fd48f, 0xbf909fbc, 0xf5e9a699, + 0xbffeb31b, 0x009db309, 0x1abffea4, 0x7efe0b4b, 0x3e000a95, 0x330006d2, 0x890a0030, 0xa0001074, + 0x0ad07e96, 0x0485fb00, 0xc48922f9, 0x99bf000f, 0xcd009db3, 0x10a88900, 0x899fbf00, 0xbf000fc0, + 0x0099b999, 0x0bf4f9a6, 0x0fb4890f, 0x0b9abf00, 0x0a957eec, 0x0fc08e00, 0x8fe9bf00, 0x900010a8, + 0xe9a00199, 0x99b9e9bf, 0xbff9a000, 0x0094b3e9, 0x10908f1f, 0x108c8e00, 0xbffcbf00, 0x0fc88de9, + 0xa0f9a000, 0x90d9bfec, 0xd9a00199, 0x0010908f, 0x99bff9bf, 0x0a0094b3, 0x003e04bd, 0xf9bf0009, + 0x98039998, 0x003e0390, 0x2fbf0009, 0xa6040998, 0x3e08f4f9, 0x000c517e, 0x900d0998, 0x90b3240a, + 0x517e0800, 0x0ab2000c, 0x0005c77e, 0x99bf19bf, 0x1f0090b3, 0x999819bf, 0x03909803, 0x0009083e, + 0x000fc082, 0x00109081, 0xb3100a90, 0x89bf0004, 0xbf0010ac, 0x0094b399, 0x09283e17, 0x10ac8f00, + 0x90f9bf00, 0xf9a00199, 0x000a877e, 0x32f921fb, 0x000fc481, 0xe90019bf, 0xc00099b3, 0x0a747e00, + 0x9219bf00, 0x19a00199, 0x9db319bf, 0x8900a800, 0xbf0010b0, 0x0099b399, 0x788f009d, 0xf9bf0010, + 0x0a0094b3, 0x703e04bd, 0xf9980009, 0x03909803, 0xb48314bd, 0x7882000f, 0xaa3e0010, 0x517e0009, + 0x0a90000c, 0x0c517e10, 0x7e0ab200, 0xbf0005c7, 0x410f1839, 0x99182ebf, 0xf4f92641, 0x0101050d, + 0x1100e0b3, 0x98032998, 0x0a900390, 0x0004b324, 0x10ac8fd1, 0xb3f9bf00, 0xb21b0090, 0x08487ef0, + 0x9209bf00, 0x09a00199, 0x94b309bf, 0xe03ef300, 0x10b30009, 0xcc890e01, 0x99bf000f, 0x160194b3, + 0xcc89f4bd, 0x0100000f, 0x0e7e9fa0, 0xf43e0000, 0x04bd0009, 0x000a9b7e, 0x31fb0ab2, 0xc48932f9, + 0x99bf000f, 0xf70fa3b2, 0x550094b3, 0x4b00a0b3, 0x0005987e, 0x000fc081, 0x000fb482, 0x2abf10bf, + 0x900030bc, 0x517e10aa, 0x29bf000c, 0xbf0490b5, 0xf409a619, 0x8c890b18, 0x423e0010, 0x9089000a, + 0x9abf0010, 0xbb902bbf, 0x0c0f7e10, 0x092e7e00, 0xb3010f00, 0x7e0a00a4, 0x0f00000e, 0xfbfab201, + 0x00a0b331, 0x03a99811, 0x99b9afbf, 0xf0f9a600, 0x00f80bac, 0xf41032f4, 0xa08f1132, 0xf9bf0005, + 0xa0019990, 0xf800f8f9, 0x10107e00, 0x7e00f800, 0xf8001885, 0x18b47e00, 0x8f00f800, 0xbf0005a0, + 0x0094b3f9, 0x1032f41a, 0xbd1132f4, 0x7eee0ba4, 0xf4000a95, 0x31f41031, 0xbf00f811, 0x019992f9, + 0xf9bff9a0, 0x0a0094b3, 0xf41031f4, 0x00f81131, 0x0041407e, 0xc9fe00f8, 0x1495b601, 0x3c089033, + 0xf4089630, 0x9033130c, 0x90332002, 0x90333403, 0x1c3e1800, 0x9033000b, 0x9033280f, 0x94331810, + 0x0c3e1e0a, 0x8a7e000b, 0x00f80006, 0x0013bc7e, 0xb07e00f8, 0x00f80013, 0x00f802f8, 0x00f802f8, + 0x0041557e, 0x000ad67e, 0x0041407e, 0x02f900f8, 0x0041557e, 0xcf020049, 0xdc800099, 0x0fbf0010, + 0xf4049ffd, 0x487e170b, 0x8a7e0008, 0x0fbf0006, 0xfa010049, 0x623e009f, 0xa4bd000b, 0x957eda0b, + 0x407e000a, 0x01fb0041, 0x0041557e, 0x00185a7e, 0x0800a0b3, 0x00068a7e, 0x0041407e, 0x94bd00f8, + 0x000b8d3e, 0x3cf8b93c, 0x999099af, 0xf49ca601, 0x00f8f508, 0xa23ea9b2, 0x9b20000b, 0x9001cc92, + 0xc4b30199, 0x00f8f800, 0xc73ee4bd, 0xae3c000b, 0x98be3cf8, 0x2601ee90, 0x0e0bf4f9, 0xf0fff4f0, + 0xf9bcff94, 0xa600f8a2, 0xe508f4ec, 0x00f8a4bd, 0xb508af90, 0xff0901af, 0xb502a9b5, 0xafb503af, + 0xa094bd04, 0xbd00f8a9, 0x04a9b594, 0xaf9800f8, 0x01bfb501, 0xb502f998, 0xf99802b9, 0x019bb502, + 0xb502fbb5, 0xa9bf04ba, 0xa0019990, 0xbf00f8a9, 0xa6ff09be, 0x0a1bf4e9, 0x3e04af98, 0x90000c33, + 0x293e08af, 0xff98000c, 0x01f99801, 0x9ea699bf, 0x98f60df4, 0xb9b501f9, 0x01b99801, 0xb5029bb5, + 0xfbb502bf, 0x04bab501, 0x9990a9bf, 0xf8a9a001, 0x01af9800, 0xb502a998, 0xaf9802f9, 0x01a99802, + 0x9801f9b5, 0xf99804af, 0xf49aa601, 0xa998091b, 0x01f9b502, 0xa9b594bd, 0x92f9bf04, 0xf9a00199, + 0x72f900f8, 0xb202b998, 0xb2b1b2a5, 0x1d9a95c2, 0xb2249034, 0x32e732d4, 0x009033a6, 0x7e23000f, + 0x330018d7, 0x011200a9, 0x119819bf, 0x2029bc01, 0x18f429a6, 0xa6ff0921, 0x0a0bf419, 0x3e011190, + 0x7e000cd3, 0x0b0000ea, 0x7e020002, 0x3e0018b4, 0xc4000db5, 0x45ffff23, 0xfd3f0095, 0x94f00593, + 0xd41bf503, 0x0a747e00, 0x0e067e00, 0x01fafe00, 0xfe081995, 0x7bfe009f, 0x01bcfe01, 0x95181994, + 0x9ffd082f, 0x0097fe05, 0xe4ff69c4, 0x33ffffcf, 0xf10f0070, 0xb6f8fff4, 0x253e0894, 0xf4f1000d, + 0x94b68fff, 0x95f9ff0c, 0x3e009bfe, 0xe5000d92, 0xfd010059, 0x9fb90593, 0x04f9fd01, 0x000d433e, + 0xa601f5b6, 0xfb0cf4f4, 0xbd03fd95, 0x0d543ee4, 0x01d5b600, 0xd0b3e932, 0xee900c00, 0x00eeb301, + 0x94f0f401, 0xff5ee4ff, 0x007033ff, 0x0794f014, 0xfd1094b6, 0x39fa059e, 0x0d893e05, 0x0794f000, + 0xfd1094b6, 0x39fa059e, 0x024fbb06, 0xbc303fbc, 0x44b3505f, 0xaffe9d00, 0x00b7fe00, 0xf800cbfe, + 0x7e170003, 0x3300195a, 0x3d0600a4, 0x0a9b7e04, 0x0ddd7e00, 0xfb0a3200, 0xf4010971, 0xe43dfc30, + 0x7e009130, 0xf4000c82, 0x00f80430, 0x0efc30f4, 0x00e13001, 0x000c827e, 0xf80430f4, 0x0a747e00, + 0x10e08f00, 0xb3f9bf00, 0xbf0f0090, 0x019992f9, 0x003ef9a0, 0xea7e000e, 0x030b0000, 0x0018b47e, + 0x000a9b7e, 0x747e00f8, 0xe08e000a, 0xefbf0010, 0xf9a6ff09, 0xbf0e0bf4, 0x019990e9, 0x2c3ee9a0, + 0xea7e000e, 0x030b0000, 0x0018b47e, 0x000a9b7e, 0x12f900f8, 0xafbfb9b2, 0xa0b29e3f, 0xb204d192, + 0x18fe20cb, 0x1cb2019e, 0x1801fe35, 0xfe35029e, 0x03991802, 0xbf03f935, 0x04aa90aa, 0x7e7e0aa0, + 0x09bf000b, 0xa09091bc, 0xf411fb09, 0x62f9f430, 0xb2b2a3b2, 0xd532c6b2, 0x0c00b4b3, 0x043d09f8, + 0x000f513e, 0x0001b918, 0xff94f001, 0xe4039990, 0xf501fc91, 0xbf00be0b, 0x7eff0baa, 0x980000de, + 0xa5f9033a, 0x900149fe, 0x9aa02099, 0xb83e94b2, 0x010a000e, 0x0000f07e, 0xbf053a98, 0xbca5f940, + 0xa0a6f010, 0xa60d0df4, 0x8d18f5fa, 0x0f0b3e00, 0x02399800, 0x08f4f9a6, 0x01399835, 0xa69019bc, + 0x7518f49a, 0x3d063e98, 0x014ffe94, 0x351cff90, 0xf92003f9, 0x0902f935, 0x35fbb204, 0x4ab201f9, + 0x040dc4bd, 0x3998e5f9, 0x9849a001, 0x1db20639, 0x2bb26cb2, 0x900140fe, 0x0ab22000, 0x41fe95f9, + 0x0142fe01, 0x90271190, 0x1ab22622, 0xf77e2bb2, 0x020a0040, 0x117eab32, 0x0abf0041, 0x00043998, + 0x3f95f901, 0x7e2b3f1a, 0xbf004111, 0x009b7e3a, 0xfb0a3200, 0x5d330c65, 0x3dff5c00, 0x0f4b3e04, + 0x3dbcb200, 0x8aabb2d4, 0x7e0010e4, 0xf8000e6b, 0x0dbcb200, 0x8aabb201, 0x7e0010e4, 0xf8000e6b, + 0xb212f900, 0xb3b132a0, 0xf80a00a4, 0x100e3e09, 0x2fd88900, 0x0b9abf00, 0x00de7eff, 0xff19c400, + 0x0301008f, 0xbc099e94, 0xefcfe0ef, 0x8099b800, 0x94b60001, 0x009dcf09, 0xf0020918, 0x09352095, + 0xf40fa602, 0xf63e411b, 0xf918000f, 0x2094f002, 0x3f340bf4, 0x009433f9, 0xf8cbbc0b, 0x000fef3e, + 0xf001f918, 0x9990ff94, 0x049afd03, 0xf6f0f9bc, 0xff3e00ef, 0x1bc4000f, 0x8cfc0aff, 0xa6002fc0, + 0xc91bf4fd, 0x002fd889, 0x9b7e9abf, 0x11fb0000, 0x108b02f9, 0x127e0011, 0xa0320000, 0x0f00a033, + 0x0000ea7e, 0x7eff0bc4, 0x890018b4, 0xbf001110, 0x11148990, 0x899fbf00, 0x18001100, 0x9e35180e, + 0xf40fa60e, 0x0918101b, 0x00943319, 0x3ea4bd0a, 0x1800107a, 0xb4331d0b, 0xa4bd0a00, 0x00106f3e, + 0x3d200a90, 0x13607ec4, 0x00a6b000, 0x3d0e1ef4, 0x19093594, 0x00111489, 0x01fb90a0, 0x89ffa4f0, + 0x8c003231, 0xbc003224, 0xacbcd0a9, 0x3fdf3fa0, 0x32318cae, 0xffe9c400, 0x20909cbc, 0x3224899f, + 0xfff4f000, 0x20f0f9bc, 0x20943dfe, 0xf900f8d9, 0x32318922, 0xffa0c400, 0x3f2009bc, 0x33a1322e, + 0x7e0c00e0, 0x3e00107c, 0x8f0010e0, 0x3f001118, 0xf4a926f9, 0x947d0d1b, 0xf975fe20, 0x01f97502, + 0x00323189, 0x91209f3f, 0x00322489, 0x3d0009bc, 0x20092094, 0x3224892f, 0xfff4f000, 0x20f0f9bc, + 0xf921fbf1, 0x11188f52, 0x32943d00, 0xb2c332b4, 0x35f5b2a2, 0x143d01f9, 0x00113a3e, 0x1110203f, + 0x01229001, 0x16000033, 0x3b320a32, 0x0010af7e, 0x33015918, 0x35070094, 0x14260150, 0xfbe008f4, + 0xfc30f451, 0xb13212f9, 0x900140fe, 0x0bb20800, 0x0000127e, 0x0d1800bf, 0x0014331d, 0x4f02f806, + 0xf3f1000b, 0x19c40000, 0xf49fa6ff, 0xdf320b0c, 0x9f3ee43d, 0x02f80011, 0xdf32e43d, 0x00119f3e, + 0xc401ff12, 0x09bcfff9, 0x20991890, 0x1bf49126, 0x3ea43d09, 0x330011c9, 0x32080094, 0x33010efd, + 0x33e100f4, 0xc41100e0, 0x09bcffd9, 0x20913590, 0x0011b63e, 0x49fe02f8, 0x08999001, 0x010a9fbf, + 0xfd19f918, 0xf935059a, 0x0415fb19, 0x0011417e, 0x0600a433, 0x00f802f8, 0xf9fc30f4, 0x32a0b212, + 0x0a747eb1, 0xfe0ab200, 0x00900140, 0x7e0bb208, 0xbf000012, 0x18f43d00, 0x163e1d0d, 0xe9180012, + 0x01ff1020, 0x2620ee90, 0x0d1bf491, 0x0100943d, 0x233ee920, 0xf9c40012, 0xe009bcff, 0x08f4fd26, + 0x7e043de0, 0x32000a9b, 0x0415fb0a, 0x0011d87e, 0x0600a433, 0x00f802f8, 0xa4f082f9, 0x32008fff, + 0x32188e00, 0x58fa7c00, 0xb298ea3c, 0xbdc632b7, 0xff94f034, 0x0005a488, 0x8120597c, 0x3e001118, + 0x52001354, 0x29e40122, 0x9494ffff, 0x034ffe08, 0xb31ff995, 0xd0100190, 0x03000000, 0xf594f0ff, + 0xbf00d51b, 0x01008e89, 0x089f9500, 0xfffff9e4, 0x18f59ea6, 0x99900010, 0x0894b601, 0x473e89a0, + 0x1f580013, 0x02195801, 0x1bf4f966, 0x000b4945, 0x000093f1, 0x0032248f, 0x18909fbc, 0x19180190, + 0xf4092601, 0x02f8051b, 0xb43d0a32, 0x00107c7e, 0x0032188e, 0x3cff09c4, 0x008ef8e9, 0xe97c0032, + 0xf0102098, 0x1975fff4, 0xf09f7c01, 0x58021f75, 0x9b520219, 0xffb9e401, 0x021b75ff, 0xfe0894b6, + 0xa995039a, 0x0190b31f, 0x3d7cb29d, 0x133f3ed4, 0x10c93f00, 0xcc9001dd, 0xff9fc401, 0x23009033, + 0x00320080, 0x80e80f7c, 0x3c003218, 0xbe66980f, 0xf00f08f4, 0xe97cff94, 0xf5b96690, 0x26ff6708, + 0xd008f4d6, 0xe4ffafc4, 0xb6fffff9, 0x49fa0894, 0x01339004, 0x0cf52566, 0x07f8ff0d, 0x81fb3ab2, + 0xc53252f9, 0xb332a4b2, 0x0800a0b3, 0x0c00b433, 0xff0a02f8, 0x0013ae3e, 0x41b2c43d, 0x0011037e, + 0x043d24bd, 0x0013a73e, 0x00101a3f, 0x01119001, 0x1700a033, 0x3c324bb2, 0x387e5d32, 0x2abc0012, + 0x00a6b020, 0x260a1ef4, 0xdf08f403, 0x51fb2ab2, 0x0000ea7e, 0xb47e2f0b, 0x00f80018, 0x0000ea7e, + 0xb47e2f0b, 0x00f80018, 0xa93f22f9, 0xd0b2c1b2, 0x3204c0b4, 0xff94f0ed, 0x08f4b9a6, 0x9402f805, + 0xb99402bf, 0x02ae9804, 0x5200c0b3, 0x3cf0f9bc, 0xa998f9ed, 0x18203402, 0x0e1e0dcc, 0x909fbc01, + 0x98019235, 0xebbb02a9, 0x909fbc04, 0x980191b5, 0x9fbc02a9, 0x029db590, 0xbc02a998, 0x9cb5909f, + 0x02a99803, 0xb4909fbc, 0x9fb505f0, 0x01a99804, 0xb5059efd, 0x543e01a9, 0xf9bc0014, 0x90e9bc90, + 0x98039cb5, 0x010901af, 0xb9049bbb, 0xf9fd0099, 0x01afb504, 0x21fbba32, 0xa0b202f9, 0x0001027e, + 0x98010998, 0xf9bc020f, 0xa29abc90, 0x0df4afa6, 0xfba4bd05, 0xb232f901, 0x98ff02a3, 0x143d01a0, + 0x0014ad3e, 0xf40109c4, 0x3998200b, 0xff1ac402, 0xb604af94, 0xafbc02a4, 0xa09abca0, 0x0014587e, + 0x0df42aa6, 0x10a2b205, 0x05b60111, 0x0004b301, 0xfb2ab2d7, 0xb282f931, 0x98643da5, 0x010801a4, + 0x00157a3e, 0xf50149c4, 0x9800ad0b, 0x61c40253, 0x021994ff, 0xbc041f94, 0x32bc209f, 0x7e0ab200, + 0xb3001458, 0x009100ad, 0x98010998, 0xf9bc020f, 0x0107b570, 0xf098323c, 0x94b30394, 0x027e2602, + 0x09980001, 0x020f9801, 0xa6c2a9bc, 0x2c08f4cf, 0xff9dcfff, 0xa9bc1ccf, 0x0109b592, 0x00153b3e, + 0x190094b3, 0xbc015f98, 0x14bd9481, 0xfd0099b9, 0x5fb504f9, 0x153b3e01, 0x9814bd00, 0x90b30309, + 0x0b183600, 0x00b03301, 0x3da4bd12, 0x7ed43dc4, 0x7e0011cc, 0x9800000e, 0x0a980309, 0xb27bb204, + 0x1895f91c, 0xb033010b, 0xa4bd0e00, 0xd43dc43d, 0x00122c7e, 0xb6016610, 0x4db30145, 0xfbff4a00, + 0x0c004d81, 0xcf0b004c, 0xcecf00df, 0x00d9cf00, 0x1bf4f9a6, 0xb5aea0f5, 0x00f801af, 0xcf0b0049, + 0xaabf0099, 0xf8a29abc, 0xb242f900, 0xb2b3b2a0, 0x00c0b3c1, 0x0b004929, 0xb20092cf, 0x0000b394, + 0xf93ab212, 0x00a03305, 0x3e010a0a, 0xcf0015e8, 0x92bb0049, 0xf491a602, 0x04b3e608, 0xa43d0a00, + 0x0015e83e, 0x05f93ab2, 0x000041fb, 0xd24e0041, 0x000005a0, 0x90fc00f8, 0x0015ea7e, 0xf90188fe, + 0x00289880, 0xb4d880f9, 0x9800000f, 0x47fe0088, 0xbd87a001, 0x0387b570, 0xfe0010f7, 0xb7b600a4, + 0x1fb9f002, 0xf9001bf7, 0x7e00f890, 0x3e0015ea, 0xf9001647, 0x3650daf2, 0x50db0000, 0x7e000034, + 0x7e0015f6, 0xf7000b68, 0xb4d80010, 0x9800000f, 0x87980088, 0x0074fe00, 0xb6028798, 0x79f00277, + 0x0017f71f, 0x28a080fc, 0x88fe80fc, 0xf8f0fb00, 0xdaf2f901, 0x00003650, 0x003450db, 0x15f67e00, + 0x0b2e7e00, 0x16473e00, 0x1832f400, 0x50daf2f9, 0xdb000034, 0x00003250, 0x0015f67e, 0x000b207e, + 0x0016473e, 0xf91832f4, 0x0188fef2, 0x3b7e80f9, 0x80fc0040, 0xfb0088fe, 0xb201f8f0, 0xb0afb2a9, + 0x1ef400a6, 0xb0bab22b, 0x1ef400b6, 0xacfaff13, 0xf496b9ff, 0x00f8051e, 0xf801aab9, 0x01bab900, + 0xffacfaff, 0x1ff496b9, 0x16d83ef0, 0x01afb900, 0xb6b0bab2, 0xd81ff400, 0x0016dd3e, 0x95e0abff, + 0xbf9510a9, 0x01b9fd10, 0xbb01affd, 0xa4b600ab, 0x00aebb10, 0x2cdf00f8, 0xf4000006, 0x62f9fc30, + 0xffbffebf, 0x900149fe, 0x9ea01c99, 0x000630d9, 0xbd9fa000, 0x010089f4, 0x009ff601, 0xb87fff4f, + 0x003f0099, 0xdf009ff6, 0x03ff0000, 0x010099b8, 0x009ff600, 0x00ff808f, 0x0a0099b8, 0x009ff600, + 0xb810004f, 0x024a0099, 0xd6009ff6, 0x40000000, 0x011c0085, 0x000640d0, 0x02008400, 0x00008301, + 0xaaa0d210, 0xfd01beef, 0x227e080a, 0xa6fd001c, 0x280bf504, 0x0049cf00, 0xf50493fd, 0xde001e0b, + 0x00000640, 0xf2fff4bd, 0x90e9a095, 0xee9001ff, 0x04f4b304, 0x17f03ef5, 0x0059cf00, 0xf40194f0, + 0x3cf4fa1b, 0xf009b21f, 0x99fa0593, 0xf503f806, 0xf590043c, 0xf590033c, 0xf5ac543c, 0xf5c4043c, + 0xb2d0333c, 0x0393f009, 0xf80599fa, 0x003cf403, 0xe9920ebf, 0xf491a601, 0x0e98250d, 0x01e99201, + 0x0df491a6, 0x020e981a, 0xa601e992, 0x0f0df491, 0x92030e98, 0x91a601e9, 0xff700cf5, 0x000640df, + 0xd094bd00, 0x0000062c, 0xa003f9b5, 0x01f9b5f9, 0xa002f9b5, 0x589a7e0e, 0x0630d900, 0x99bf0000, + 0x09a0a4bd, 0x900149fe, 0x9fbf1c99, 0xf9a609bf, 0x7e070bf4, 0xfb0042e9, 0x30f40465, 0xd112f9fc, + 0x0000062c, 0x40fe19bf, 0x08009001, 0xcf7e09a0, 0x0fbf0019, 0xa4f019bf, 0xf4f9a6ff, 0xe97e070b, + 0x15fb0042, 0x062cdf04, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0xf40031f4, 0x49fe0028, 0xd99fbf01, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cde, 0xf4efbf00, 0x49fefc30, + 0xf89fa001, 0xbf9fbf02, 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xf400f804, 0x12f9fc30, 0x00062cd1, + 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf001a81, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xd90415fb, + 0x00001438, 0xf9fc30f4, 0x062cd112, 0x9b180000, 0x0c9a180d, 0x40fe19bf, 0x08009001, 0xe67e09a0, + 0x0fbf001b, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0xd112f9fc, 0x0000062c, 0x40fe19bf, + 0x08009001, 0xe67e09a0, 0x0fbf001b, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0xd112f9fc, + 0x0000062c, 0x40fe19bf, 0x08009001, 0x517e09a0, 0x0fbf001b, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, + 0x2cdf0415, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x1400899f, 0x0099cf02, 0xf40194f0, 0x1f7e180b, + 0x008f0047, 0xf9ce0213, 0x0195f000, 0x3e00f9f7, 0xf80019b5, 0x0149fe02, 0x2cd99fbf, 0xbf000006, + 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xf400f804, 0x2cdff830, 0xf9000006, 0xfeffbf02, 0x99900149, + 0x3d9fa008, 0x2fe1d9f4, 0x9f200000, 0xcf06004f, 0x004e00ff, 0x00eecf07, 0xcf020049, 0x9ffd0099, + 0x10ef9504, 0xfd00eeb9, 0xffb9049e, 0x049fff00, 0xf40109c4, 0x5e7e070b, 0x09e40044, 0x0bf41000, + 0xfe010f2f, 0x008e0149, 0x99900289, 0xce9f2007, 0xfe0f00e9, 0xf7049ffd, 0x08d900e9, 0xbf000014, + 0x014bfe9a, 0xbb90010c, 0x006e7e07, 0x0009e400, 0x00907380, 0x19827e08, 0x01004900, 0xfe0090f7, + 0x99900149, 0xd99ebf08, 0x0000062c, 0xe1d99fbf, 0x3f00002f, 0xf4efa69a, 0xe97e070b, 0x05fb0042, + 0x062cde08, 0xefbf0000, 0xfefc30f4, 0x010a0149, 0x9fbf9fa0, 0xf9a6e9bf, 0x7e070bf4, 0xf40042e9, + 0x00f80430, 0xdffc30f4, 0x0000062c, 0xffbf02f9, 0x900149fe, 0xa0b20499, 0xa9989fa0, 0x0194f003, + 0xbf0d0bf4, 0x1c227eaa, 0x1ad23e00, 0xcfa9bf00, 0x0f98009a, 0x01099803, 0xf0020e98, 0xa9ff02f4, + 0xf09ea694, 0x9a320b9c, 0x0c00f0b3, 0xf0009630, 0x9a320b9c, 0x900149fe, 0x9fbf0499, 0x00062cd9, + 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40405fb, 0x12f9ec30, 0x900149fe, 0x2cd10899, 0xa0000006, + 0x019bb59a, 0xb5029cb5, 0x9bb2039e, 0xdcb219bf, 0xda0140fe, 0x00001aa4, 0xa0180090, 0x15a97e09, + 0xbf0fbf00, 0xf4f9a619, 0xe97e070b, 0x15fb0042, 0x062cd914, 0x99bf0000, 0x8efc30f4, 0xfe00b800, + 0xf9a0014f, 0xc400efcf, 0x1bf404f9, 0x3ea43d09, 0xc5001b7b, 0xe9f604f9, 0xfe010a00, 0x9fbf0149, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, + 0x9fa00149, 0x00900089, 0xf00099ce, 0x0bf40194, 0xf1008f1e, 0x00f9ce00, 0xf71095f0, 0xffb800f9, + 0xce025200, 0x95f000f9, 0x00f9f710, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xf00149fe, 0x9fa003b4, 0x8903a4f0, 0xb6009400, + 0xabfd02a4, 0x009af705, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, + 0x30f400f8, 0x062cdffc, 0x02f90000, 0x49feffbf, 0x04999001, 0x9fa0a0b2, 0x000a747e, 0x01c0008f, + 0xc700f9cf, 0x90b34c99, 0x90b34700, 0x02f8f601, 0x001c403e, 0xc700f9cf, 0x90b34c99, 0x90b30e00, + 0x02f8f601, 0x001c543e, 0x01c20080, 0x7e0000cf, 0xfe000a9b, 0x99900149, 0xd99fbf04, 0x0000062c, + 0x0ab299bf, 0x0bf4f9a6, 0x1caa3e28, 0xc1008900, 0x0090f601, 0x0000f1df, 0x0099b880, 0x9ff70201, + 0x009fcf00, 0x543e9fb2, 0xe97e001c, 0x05fb0042, 0xfc30f404, 0x00062cdf, 0xbf12f900, 0x0149feff, + 0xb2089990, 0xb29fa0a0, 0x0a747eb1, 0xc0008f00, 0x00f9cf01, 0xb34c99c7, 0xb33e0090, 0xf8f60190, + 0x1cd13e02, 0x00f9cf00, 0xb34c99c7, 0xb30e0090, 0xf8f60190, 0x1ce53e02, 0x0a9b7e00, 0x0149fe00, + 0xbf089990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x1d3a3e30, 0xc1008900, 0x0090f601, 0x010099b8, + 0x0091f600, 0x0000f2df, 0x0099b880, 0x9ff60202, 0x009fcf00, 0xe53e9fb2, 0xe97e001c, 0x15fb0042, + 0x062cdf04, 0xfebf0000, 0xb8fc30f4, 0x000180aa, 0xb60149fe, 0x9ea009a4, 0xbf00aacf, 0xa6f9bf9e, + 0x070bf4e9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0x03010089, 0xb6fc30f4, 0xa9bc09a4, + 0x0149fea0, 0xabf69fa0, 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, + 0xde00f804, 0x0000062c, 0x30f4efbf, 0x20008afc, 0x0149fe03, 0xaacf9fa0, 0xbf9fbf00, 0xf4f9a6e9, + 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, 0x00899fa0, 0x9af60320, + 0xbd400f00, 0x009ff694, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, + 0x2cde00f8, 0xbf000006, 0xfc30f4ef, 0x0321008a, 0xa00149fe, 0x00aacf9f, 0xe9bf9fbf, 0x0bf4f9a6, + 0x42e97e07, 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x1fa9959f, 0xa4f0c920, + 0x02a0b303, 0x03a0b30c, 0x01a4b308, 0x3ebaa00a, 0xbd001e57, 0xfeb9a094, 0x9fbf0149, 0x00062cd9, + 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xe9bf0000, 0xfefc30f4, 0xf9a0014f, + 0xa0e2a9c7, 0xeaa9c7b9, 0xaae7c9a0, 0xdaa00154, 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, + 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x01c4b39f, 0x82a9e70e, 0x3ed9a003, 0xe7001ed1, + 0xb30116a9, 0x0f0e0090, 0x11004919, 0xf8009ff7, 0x0149fe02, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, + 0xe97e070b, 0x30f40042, 0xd900f804, 0x0000062c, 0x30f499bf, 0x014ffefc, 0xa9e7f9a0, 0xaac702e2, + 0x06a0b39a, 0x06a6b020, 0xb30f0cf4, 0xb31604a0, 0x3e2505a4, 0xb0001f21, 0x0cf409a6, 0x1f2b3e1a, + 0x0294b600, 0x3e04b9b5, 0x92001f34, 0x94b602af, 0xf9b9bc02, 0xbf0149fe, 0x062cd99f, 0x99bf0000, + 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x1fa9959f, + 0xa4f0c920, 0x02a0b303, 0x03a0b30c, 0x01a4b308, 0x3ebaa00a, 0xbd001f7b, 0xfeb9a094, 0x9fbf0149, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xe9bf0000, 0xfefc30f4, + 0xf9a0014f, 0xa0a2a9c7, 0xe8a9c7b9, 0xaae7c9a0, 0xdaa00154, 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, + 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xb60149fe, 0x9fa00ca5, 0x1101c4b3, 0x07ffff89, + 0xa094a9ff, 0x1ffd3ed9, 0xffff8900, 0x94a9ff07, 0x0f0d0bf4, 0x11004919, 0xf8009ff7, 0x0149fe02, + 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xd900f804, 0x0000062c, 0x30f499bf, + 0x014ffefc, 0xa9e7f9a0, 0xaac702e2, 0x05a0b39a, 0x05a6b020, 0xb30b0cf4, 0x3e3700a0, 0xb3002064, + 0xb00e06a0, 0x0cf409a6, 0x20573e1e, 0x0294b600, 0x3e04b9b5, 0x9200206e, 0x94b602af, 0xf9b9bc02, + 0x00206e3e, 0x0049190f, 0x009ff711, 0x49fe02f8, 0xd99fbf01, 0x0000062c, 0xf9a699bf, 0x7e070bf4, + 0xf40042e9, 0x00f80430, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0x959fa001, 0xc9201fa9, 0xb303a4f0, + 0xb30c02a0, 0xb30803a0, 0xa00a01a4, 0x20b53eba, 0xa094bd00, 0x0149feb9, 0x2cd99fbf, 0xbf000006, + 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, 0xa9c79fa0, + 0xc7b9a0a2, 0xc9a0e8a9, 0x0154a9e7, 0xaac7d9a0, 0x00a0b370, 0x49190f0e, 0x9ff71100, 0xfe02f800, + 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, + 0xfefc30f4, 0xa5b60149, 0xb39fa00c, 0x891101c4, 0xff07ffff, 0xd9a094a9, 0x0021503e, 0x07ffff89, + 0xf494a9ff, 0x190f0d0b, 0xf7110049, 0x02f8009f, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, + 0x42e97e07, 0x0430f400, 0x2cd900f8, 0xbf000006, 0xfc30f499, 0xa0014ffe, 0xe2a9e7f9, 0x9aaac702, + 0x2005a0b3, 0xf405a6b0, 0xa0b30b0c, 0xb73e3700, 0xa0b30021, 0xa6b00e06, 0x1e0cf409, 0x0021aa3e, + 0xb50294b6, 0xc13e04b9, 0xaf920021, 0x0294b602, 0x3ef9b9bc, 0x0f0021c1, 0x11004919, 0xf8009ff7, + 0x0149fe02, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, + 0x30f4ffbf, 0x0149fefc, 0xa9959fa0, 0xf0c9201f, 0xa0b303a4, 0xa0b30c02, 0xa4b30803, 0xbaa00a01, + 0x0022083e, 0xb9a094bd, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, + 0x2cde00f8, 0xbf000006, 0xfc30f4e9, 0xa0014ffe, 0xa2a9c7f9, 0xa9c7b9a0, 0xe7c9a0e8, 0xa00154aa, + 0xbfffbfda, 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, + 0xc0b39fa0, 0xc0b31001, 0xc0b30c0c, 0xc4b3080d, 0xa9e70a14, 0xd9a0024c, 0xbf0149fe, 0x062cd99f, + 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cd900f8, 0xbf000006, 0xfc30f499, 0xa0014ffe, + 0xe2a9e7f9, 0x9aaac702, 0x2e06a0b3, 0xf406a6b0, 0xa0b30f0c, 0xa4b31604, 0xd63e3305, 0xa6b00022, + 0x280cf409, 0x0022e03e, 0x0049190f, 0x009ff711, 0xe93e02f8, 0x94b60022, 0x04b9b502, 0x0022e93e, + 0xb602af92, 0xb9bc0294, 0x0149fef9, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, + 0xf400f804, 0x82f9d430, 0x301590b4, 0xc1b027e1, 0x0ad1b00b, 0x94b6f4bd, 0x0c91b002, 0x900149fe, + 0x9fa04499, 0x20079990, 0x0b99929f, 0x95b29fa0, 0xa0049992, 0x9297b29f, 0x9fa00499, 0x00062cdf, + 0x90ffbf00, 0x4efe1499, 0xa0a6b201, 0x34ee909f, 0xb4b20209, 0x84bde9a0, 0x14bd34bd, 0x0024473e, + 0x227e6ab2, 0x49bf001c, 0x4bfea2b2, 0x014cfe01, 0x9044bb90, 0x95f94bcc, 0xb31100b4, 0x008e0209, + 0x9e0309b3, 0x010db300, 0x499800a8, 0xb27cb201, 0xfe5bb22a, 0xdd90014d, 0x3295f938, 0x0be0b40c, + 0xfd3ed4bd, 0x5fbf0023, 0xf9a6e9bf, 0x34381bf4, 0xe89827b0, 0x987fbf01, 0xb03302e9, 0xb0b40a00, + 0x90b9bc0c, 0x1bf4f9a6, 0x1448df1e, 0xf9180000, 0x0094330c, 0x90f1b206, 0x48d920ff, 0xa600002a, + 0xed1bf4f9, 0x130010b3, 0xbf0c1c35, 0x0118b55b, 0x79bf1ba0, 0x900219b5, 0xee9001dd, 0x0ab0b40c, + 0x08f4dba6, 0x242d3ea4, 0x02499800, 0x4dfe5cbf, 0xb22ab201, 0x34dd901b, 0x2d3e95f9, 0x10b30024, + 0x49980f00, 0xb25cbf03, 0xf91bb22a, 0x4b903495, 0x11009433, 0x14bdff09, 0xa00e91b0, 0xa094bd79, + 0x01339059, 0xb4046690, 0x39a60d90, 0xff1408f5, 0x900149fe, 0x9fbf4c99, 0x00062cd9, 0xa699bf00, + 0x070bf4f9, 0x0042e97e, 0xf42c85fb, 0x2cd9fc30, 0xf9000006, 0xf499bf72, 0x48d0fc30, 0xfe00002a, + 0xff90014f, 0x3ff9a024, 0xb2a6b209, 0xb2c3b2b5, 0x009d33d4, 0xb4bd0111, 0xda16044c, 0x00001448, + 0x000b947e, 0x00020cd9, 0x989cbf00, 0x91b2019d, 0x07b294bd, 0x8a0091b0, 0xdb02c000, 0x000001fc, + 0x48d0e43d, 0x7e000014, 0xbd002303, 0xb309bf24, 0x981e1394, 0x1d98021c, 0x02099803, 0xdb040a98, + 0x000001cc, 0x91b0010e, 0x23037e00, 0xb309bf00, 0x98350494, 0x1d98041c, 0x040a9805, 0xdb020998, + 0x000001dc, 0x91b0010e, 0x23037e00, 0x081c9800, 0x98091d98, 0x21b0040a, 0x01dcdb00, 0xe43d0000, + 0x0023037e, 0x94b309bf, 0x1c981b05, 0x071d9806, 0xb0040a98, 0xdcdb0021, 0x3d000001, 0x23037ee4, + 0xb309bf00, 0x98351594, 0x1d980c1c, 0x040a980d, 0xdb020998, 0x000001ec, 0x91b0010e, 0x23037e00, + 0x0a1c9800, 0x980b1d98, 0x21b0040a, 0x01ecdb00, 0xe43d0000, 0x0023037e, 0x94b309bf, 0x1c981b16, + 0x0f1d980e, 0xb0040a98, 0xecdb0021, 0x3d000001, 0x23037ee4, 0x20009000, 0x1bf507a6, 0x0109ff33, + 0x4d940920, 0x145cd902, 0x4cdf0000, 0xbc000014, 0xff0ad0d9, 0x002a4cde, 0xa6f9bf00, 0x2f1bf469, + 0xa601f998, 0x271bf459, 0x3308f918, 0xb3210194, 0xb0140130, 0x08f40136, 0xb3ff0a0d, 0x3e110234, + 0x980025ea, 0xec3e03fa, 0xdabf0025, 0x9020ff90, 0xfea620dd, 0x09c51bf4, 0xf4a9a6ff, 0x190f0d1b, + 0xf7110049, 0x02f8009f, 0x900149fe, 0x9fbf2499, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xfb0430f4, 0x30f40475, 0xd112f9f4, 0x0000062c, 0x40fe19bf, 0x10009001, 0x09a0caa0, 0xcbb594bd, + 0x02c9b501, 0x00140cd9, 0xfe9fbf00, 0x99900149, 0x019bb508, 0xb5b69aa0, 0xd8fab811, 0xb17e0002, + 0x0fbf001c, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40c15, 0xd252f9e8, 0x0000062c, 0x40fe29bf, + 0x18009001, 0x0cb50ba0, 0xb2e5b201, 0xfea3b2d4, 0x11900141, 0x7e19a02c, 0x7e000e06, 0x98000a74, + 0x0abf010b, 0x900140fe, 0x0cb22000, 0x0026267e, 0x4cb20bb2, 0x3ab25db2, 0x000dcc7e, 0x9b7ea032, + 0xdd7e000a, 0x1fbf000d, 0x0a3229bf, 0x0bf4f9a6, 0x42e97e07, 0x1855fb00, 0xf9e830f4, 0x062cd252, + 0x29bf0000, 0x900140fe, 0x0ba01800, 0xb2010cb5, 0xb2d4b2e5, 0x0141fea3, 0xa02c1190, 0x0e067e19, + 0x0a747e00, 0x010b9800, 0x40fe0abf, 0x20009001, 0x267e0cb2, 0x0bb20026, 0x5db24cb2, 0xb97e3ab2, + 0xa032000d, 0x000a9b7e, 0x000ddd7e, 0x29bf1fbf, 0xf9a60a32, 0x7e070bf4, 0xfb0042e9, 0x2cd91855, + 0xbf000006, 0xfc30f499, 0x0a014ffe, 0x7ef9a004, 0x330042f5, 0x8a2e00a0, 0x7e120050, 0xf0001c22, + 0x0bf401a4, 0x00588a1f, 0x1c227e12, 0x00a0b300, 0x07a9c416, 0x0a0d1bf4, 0x42f57e02, 0x00a43300, + 0xfe02f806, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x140cd900, + 0x30f40000, 0xd112f9fc, 0x0000062c, 0x19bf9abf, 0xdb0140fe, 0x40000000, 0xb8080090, 0x0008b4aa, + 0x2c7e09a0, 0x0fbf009f, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x0cd90415, 0xf4000014, 0x12f9fc30, + 0x00062cd1, 0xbf9abf00, 0x0140fe19, 0x000000db, 0x08009040, 0x08c4aab8, 0x7e09a000, 0xbf009f2c, + 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xd90415fb, 0x0000140c, 0xf9fc30f4, 0x062cd112, 0x9abf0000, + 0x40fe19bf, 0x0000db01, 0x00908000, 0xc4aab808, 0x09a00008, 0x009f2c7e, 0x19bf0fbf, 0x0bf4f9a6, + 0x42e97e07, 0x0415fb00, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0xd99fa001, 0x0000140c, 0x008f99bf, + 0x9a9001c0, 0x00f9cf60, 0xb34c99c7, 0xb34a0090, 0xf8f60190, 0x28653e02, 0x00f9cf00, 0xb34c99c7, + 0xb30e0090, 0xf8f60190, 0x28793e02, 0xc2008b00, 0x00bbcf01, 0x040018d9, 0x05b9fd04, 0x009f2c7e, + 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x28d23e28, 0xc1008900, 0x009af601, 0x0000f1df, + 0x0099b880, 0x9ff70201, 0x009fcf00, 0x793e9fb2, 0xe97e0028, 0x30f40042, 0xf400f804, 0x0cd9fc30, + 0xf9000014, 0xb299bf22, 0x062cd1a2, 0x40fe0000, 0xc89ab801, 0x19bf0008, 0xa00c0090, 0x1c227e09, + 0xbf0ebf00, 0xbb01091f, 0xa9fd0492, 0x0bacf004, 0x0bf4efa6, 0x42e97e07, 0x0425fb00, 0xd9fc30f4, + 0x0000062c, 0x99bf02f9, 0x90014ffe, 0xf9a004ff, 0x00140cd9, 0x8f9ebf00, 0xcf01c000, 0x99c700f9, + 0x0099b34c, 0x90b301d2, 0x02f8f501, 0x00293b3e, 0xc700f9cf, 0x99b34c99, 0xb301ae00, 0xf8f50190, + 0x29503e02, 0x00f9cf00, 0xb34c99c7, 0x01770099, 0xf50190b3, 0x653e02f8, 0xf9cf0029, 0x4c99c700, + 0x480099b3, 0x0190b301, 0x3e02f8f5, 0x7e00297a, 0xd90090f2, 0x00800000, 0xf49409ff, 0x317e070b, + 0x0cd9002d, 0xbf000014, 0xc0008f9e, 0x00f9cf01, 0xb34c99c7, 0x00f30099, 0xf50190b3, 0xad3e02f8, + 0xf9cf0029, 0x4c99c700, 0xcf0099b3, 0x0190b300, 0x3e02f8f5, 0xcf0029c2, 0x99c700f9, 0x0099b34c, + 0x90b30098, 0x02f8f501, 0x0029d73e, 0xc700f9cf, 0x90b34c99, 0x90b36e00, 0x02f8f601, 0x0029ec3e, + 0x007c0d7e, 0x000000d9, 0x9409ff40, 0x7e070bf4, 0xd9007e21, 0x02000000, 0x000000df, 0xb409ff04, + 0xf4940fff, 0xbffd060b, 0x0000df05, 0x0fff0800, 0x060bf494, 0xd905bffd, 0x0000140c, 0xaab89abf, + 0x7e0008c4, 0xfe009f2c, 0x99900149, 0xd99fbf04, 0x0000062c, 0xf9a699bf, 0x00e10bf5, 0x002b353e, + 0x01c20089, 0xff0099cf, 0x0995049d, 0x0194b31f, 0x2a003e97, 0xa4efb800, 0x00890008, 0x9ff601c1, + 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, 0x3e9fb200, 0x890029ec, 0xcf01c200, 0x008f009d, + 0xd73e01c0, 0xefb80029, 0x89000890, 0xf601c100, 0xf1df009f, 0xb8800000, 0x02010099, 0xcf009ff7, + 0x9fb2009f, 0x0029c23e, 0x01c20089, 0xff0099cf, 0x00d9049d, 0xff010000, 0x0bf59409, 0x8f3efeb9, + 0xefb80029, 0x89000898, 0xf601c100, 0xf1df009f, 0xb8800000, 0x02010099, 0xcf009ff7, 0x9fb2009f, + 0x00297a3e, 0x01c20089, 0x8f009dcf, 0x3e01c000, 0xb8002965, 0x000884ef, 0x01c10089, 0xdf009ff6, + 0x800000f1, 0x010099b8, 0x009ff702, 0xb2009fcf, 0x29503e9f, 0x42e97e00, 0x0405fb00, 0x00062cdf, + 0xf4ffbf00, 0x49fefc30, 0x8f9fa001, 0xcf01c000, 0x99c700f9, 0x0090b34c, 0x0190b341, 0x3e02f8f6, + 0xcf002b4f, 0x99c700f9, 0x0090b34c, 0x0190b30e, 0x3e02f8f6, 0x8a002b63, 0xcf01c200, 0x49fe00aa, + 0xd99fbf01, 0x0000062c, 0xaac799bf, 0xf4f9a601, 0xb83e2d0b, 0xafb8002b, 0x89000380, 0xf601c100, + 0xf1df009f, 0xb8800000, 0x02010099, 0xcf009ff7, 0x9fb2009f, 0x002b633e, 0x0042e97e, 0xf80430f4, + 0xfc30f400, 0x00062cd9, 0xbf82f900, 0x014ffe99, 0xa024ff90, 0x00a933f9, 0x04bd0087, 0x002a94d7, + 0x8314bd00, 0x8601c000, 0xd501c100, 0x800000f1, 0x01c20084, 0x7b987abf, 0x7e0cb201, 0xf0004212, + 0x0bf401a4, 0xbd0bb248, 0x0ad4bdc4, 0x246b7e08, 0x7ea2b200, 0x33002b3c, 0xb83300a4, 0x0004002a, + 0xc70039cf, 0x99b34c99, 0xb300dc00, 0xf8f50190, 0x2c203e02, 0x0039cf00, 0xb34c99c7, 0x00b90099, + 0xf50190b3, 0x353e02f8, 0x0090002c, 0x00119101, 0xa42404b3, 0xa00014b3, 0x002cda3e, 0x94d804bd, + 0xbd00002a, 0xc0008314, 0xc1008701, 0x00f1d601, 0x00858000, 0xfe0401c2, 0x8b988abf, 0x7e0cb201, + 0xf0004212, 0x0bf401a4, 0xbd0bb246, 0x0ad4bdc4, 0x246b7e08, 0x7ea2b200, 0x33002b3c, 0xb83100a4, + 0x0004002a, 0xc70039cf, 0x90b34c99, 0x90b37300, 0x02f8f601, 0x002ca43e, 0xc70039cf, 0x90b34c99, + 0x90b35100, 0x02f8f601, 0x002cb83e, 0x91010090, 0x04b30011, 0x14b3a624, 0x49fea200, 0x24999001, + 0x2cd99fbf, 0xbf000006, 0xa6a43d99, 0x410bf4f9, 0x002d2a3e, 0xf0004bcf, 0x6f7e01b5, 0x4a3e0047, + 0x6af6002c, 0x0035f700, 0x3e0039cf, 0xcf002c35, 0xb4fd005b, 0x476f7e04, 0x2ccc3e00, 0x007af600, + 0xcf0036f7, 0xb83e0039, 0xe97e002c, 0x85fb0042, 0xfc30f404, 0x00062cdf, 0xbf02f900, 0x0149feff, + 0xa0049990, 0xc0008f9f, 0x00f9cf01, 0xb34c99c7, 0xb34f0090, 0xf8f60190, 0x2d493e02, 0x00f9cf00, + 0xb34c99c7, 0xb32a0090, 0xf8f60190, 0x2d5d3e02, 0x2dda7e00, 0x3009c400, 0x7e070bf4, 0xc4002e5a, + 0x0bf44009, 0x2e927e3d, 0x2dbf3e00, 0xc2008900, 0x0090cf01, 0xf40109c4, 0x713ede0b, 0x488f002d, + 0x008901b0, 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, 0x3e9fb200, 0xfe002d5d, + 0x99900149, 0xd99fbf04, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x2cdf0405, 0xbf000006, + 0xfc30f4ff, 0xa00149fe, 0xc0008f9f, 0x00f9cf01, 0xb34c99c7, 0xb3220090, 0xf8f60190, 0x2ded3e02, + 0x00f9cf00, 0xb34c99c7, 0xb3390090, 0xf8f60190, 0x2e013e02, 0xad248f00, 0xc1008901, 0x009ff601, + 0x99b8f4bd, 0xf6000100, 0xf2df009f, 0xb8800000, 0x02020099, 0xcf009ff6, 0x9fb2009f, 0x002e013e, + 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0xd112f9fc, + 0x0000062c, 0x048a19bf, 0xb4bd01ad, 0x900140fe, 0x09a00800, 0x0047e97e, 0x01ad848a, 0xe97eb4bd, + 0x0fbf0047, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x2cdf0415, 0xbf000006, 0xfc30f4ff, 0xa00149fe, + 0xc0008f9f, 0x00f9cf01, 0xb34c99c7, 0xb3220090, 0xf8f60190, 0x2ea53e02, 0x00f9cf00, 0xb34c99c7, + 0xb3390090, 0xf8f60190, 0x2eb93e02, 0xada48f00, 0xc1008901, 0x009ff601, 0x99b8f4bd, 0xf6000100, + 0xf2df009f, 0xb8800000, 0x02020099, 0xcf009ff6, 0x9fb2009f, 0x002eb93e, 0xbf0149fe, 0x062cd99f, + 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0x950149fe, + 0x9fa018be, 0xb995adb2, 0x05a8df10, 0x94760000, 0x08b5b608, 0x3ee59eff, 0x26002f5c, 0x1c1bf49b, + 0x6601f958, 0x141bf49e, 0xa43df97f, 0x5808d975, 0xdf7501ff, 0x2f643e09, 0x04ff9000, 0x9433f93f, + 0x010add00, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cdf00f8, + 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x0aa9b29f, 0x00b0b302, 0x0895b60b, 0xb920a43d, 0xbf0149fe, + 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdffc, 0x02f90000, + 0x49feffbf, 0x04999001, 0x9fa0a0b2, 0x6500a0b3, 0xb305a998, 0xd95e0090, 0x0000140c, 0xaab89abf, + 0x7e0004e8, 0xe7001c22, 0xb30168a9, 0x4c0fff9a, 0x470090b3, 0xb314a5b6, 0x400fffaa, 0x3b00a0b3, + 0xb6050e98, 0xaf940c94, 0xa0a43d0c, 0x050e98e9, 0xf9bce9bf, 0x01efb5f0, 0xbd070f98, 0x98f9a094, + 0x0f980509, 0x9299bf07, 0xf9b50199, 0x30393e01, 0x3e350a00, 0x0a003039, 0x0149fe2e, 0xbf049990, + 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0405fb00, 0xd9fc30f4, 0x0000062c, 0x99bf32f9, + 0x90014ffe, 0xf9a010ff, 0xb9c4afb2, 0xf4020a03, 0xc9c4321b, 0xb8f2b2ff, 0x00392899, 0x01a2b3e7, + 0xbd029194, 0x309a3e04, 0x901ab200, 0x227e0100, 0x1190001c, 0x902aa004, 0x03a60422, 0x3ded08f4, + 0x0149fea4, 0xbf109990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0435fb00, 0xf9fc30f4, + 0x18a9bf12, 0xab1809af, 0x062cd008, 0x99b80000, 0x94003969, 0x09bf029a, 0xf00141fe, 0x119001f4, + 0xf0ffbc08, 0x44d919a0, 0xf0008800, 0xb9fd01b4, 0x05bffd05, 0x001cb17e, 0x09bf1fbf, 0xf9a6a43d, + 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0x062cdffc, 0x22f90000, 0x49feffbf, 0x0c999001, 0x9fa0a0b2, + 0xc2b2b132, 0x00b3350a, 0x09bf6900, 0x610094b3, 0xb3010998, 0x8a5a0094, 0x7e00e204, 0xc4001c22, + 0xa9c4ff1e, 0xf49ea601, 0x943d091b, 0x0031813e, 0x09080f18, 0x01e4f0f3, 0xf002abc5, 0xb9fd01f4, + 0x02f4b604, 0xbffdfe09, 0xe2048a05, 0x04b9fd00, 0x7e05befd, 0x8a001cb1, 0x7e00e204, 0x09001c22, + 0x0020b301, 0x3d292006, 0x318f3ea4, 0xfe010a00, 0x99900149, 0xd99fbf0c, 0x0000062c, 0xf9a699bf, + 0x7e070bf4, 0xfb0042e9, 0x30f40425, 0x062cdff8, 0x52f90000, 0x49feffbf, 0x1c999001, 0x9fa0a3b2, + 0xc272b0b2, 0x350ad4b2, 0xc70039b3, 0x00c97300, 0xd9b300ba, 0xbf00b500, 0x009db339, 0x399800b4, + 0x009db301, 0x088a00ac, 0x227e00e2, 0xa9c4001c, 0x0d0bf401, 0x02a2a9e7, 0xbb0294b6, 0x4cfe0209, + 0x3d3ab201, 0x1bcc90b4, 0x00310a7e, 0x8300ad33, 0x00008900, 0x0301c430, 0x09bc0405, 0xff19c400, + 0xb20209bb, 0x1c227e0a, 0xff19c400, 0xe4f259bc, 0xa6ffff29, 0x071df4f9, 0xffff2fe4, 0xf4bdfe32, + 0x0032563e, 0xf090913c, 0x94b60394, 0x95a9bc03, 0x90f9493c, 0xf93201ff, 0x08f4fe26, 0xffe9c4ea, + 0x7b040090, 0x20730229, 0x49bc0d00, 0x3e143d40, 0xfe003223, 0x99900149, 0xb29b3f1b, 0x7ec4bd3a, + 0x3e00310a, 0x0a00328f, 0x328f3e02, 0xfe010a00, 0x99900149, 0xd99fbf1c, 0x0000062c, 0xf9a699bf, + 0x7e070bf4, 0xfb0042e9, 0x30f40855, 0x062cdffc, 0x02f90000, 0x49feffbf, 0x04999001, 0x9fa0a0b2, + 0xa0b3350e, 0xb43d1a00, 0x0a7ec4bd, 0xae320031, 0x0c00a433, 0xbc7e0ab2, 0xae320030, 0x900149fe, + 0x9fbf0499, 0x00062cd9, 0x3299bf00, 0xf4f9a6ea, 0xe97e070b, 0x05fb0042, 0xf430f404, 0x00062cd9, + 0xbf12f900, 0x014ffe99, 0x4b10ff90, 0xf9a003e8, 0x0016fc7e, 0x4afea0b2, 0x08aa9001, 0x817ea1b2, + 0xa08a0015, 0x227e00e5, 0xa6b0001c, 0x1c1ff400, 0x9c7e1ab2, 0xa0a60015, 0x0a090df4, 0x334b3e04, + 0x000e7e00, 0x33223e00, 0xfea43d00, 0x99900149, 0xd99fbf10, 0x0000062c, 0xf9a699bf, 0x7e070bf4, + 0xfb0042e9, 0x30f40c15, 0x062cdffc, 0x02f90000, 0x49feffbf, 0x04999001, 0x9fa0b0b2, 0x3f01ad58, + 0x02a958af, 0x000000de, 0xffdce480, 0x03f4f0ff, 0xb60093f0, 0x9cbc12f4, 0x05fefd90, 0xf0019992, + 0xf9ffff94, 0x00d073b5, 0x01cf9218, 0x01000089, 0xff08f4b6, 0xf4f195b9, 0x9fffffff, 0x01aa18b5, + 0xf401a9c4, 0x00890a0b, 0xb9fd0200, 0xffa4f005, 0xf410a9c4, 0x00d90b0b, 0xfd080000, 0xa9c405b9, + 0x0b0bf404, 0x000000d9, 0x05b9fd20, 0x00e5a08a, 0x001cb17e, 0xf97e0ab2, 0x49fe0032, 0x04999001, + 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x05fb0042, 0xf830f404, 0x00062cdf, 0xbf52f900, + 0x0149feff, 0xb21c9990, 0x729fa0a3, 0x71020ab1, 0xf40100b6, 0x44fe450c, 0xe4a08201, 0x18449000, + 0x723e54bd, 0x45a00034, 0x16701072, 0x050df404, 0x3bb20400, 0xffff0ce4, 0x7e7e4ab2, 0x4bbf000b, + 0x107b2ab2, 0x04229002, 0x7e0003f0, 0xbc001cb1, 0x14733030, 0xa43dd400, 0x900149fe, 0x9fbf1c99, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40855fb, 0x2cd9fc30, 0xf9000006, 0xfe99bf32, + 0xff90014f, 0xa0b2b210, 0x58c3b2f9, 0xbb5802b9, 0x00a0b301, 0x0093f056, 0x9990fc0f, 0xffb0e403, + 0x149fffff, 0xb30b1bf4, 0x3e0c0004, 0xb3003507, 0xb20e0000, 0x34157eda, 0x00a43300, 0x8b2ab230, + 0x7e07a120, 0x33003366, 0xb32200a4, 0xb01e0010, 0xccf00006, 0x01c6f00b, 0x1b723ab2, 0x7e05c436, + 0x3e003054, 0x0a003509, 0x0149fe35, 0xbf109990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0435fb00, 0xdff830f4, 0x0000062c, 0xffbf12f9, 0x900149fe, 0x40fe0c99, 0x909fa001, 0xb1b20800, + 0xe87e0cb2, 0x0fbf0050, 0x9802f958, 0x93f003ff, 0x0c94b600, 0x0035653e, 0x0bf491a6, 0x06ff9815, + 0x100099b8, 0x00f4b300, 0x3e350af3, 0x98003584, 0xf99802fe, 0xbcffbf05, 0x9ea6909f, 0xf0089cf0, + 0x9a320196, 0x900149fe, 0x9fbf0c99, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40815fb, + 0x2cdfe030, 0xf9000006, 0xb0ffbf82, 0x49fe09b1, 0x40999001, 0xa00140fe, 0x3c00909f, 0x0cb2a4b2, + 0x0050e87e, 0x395803bf, 0x03319802, 0xb60093f0, 0xe63e0c94, 0xe0b40035, 0xf49ea609, 0x1198130b, + 0x0099b806, 0x14b30010, 0xb83ef000, 0x19980037, 0x031f9805, 0xb6041e98, 0x9fbc0395, 0xf42ea620, + 0x43fe4f0c, 0x904ab201, 0x2bb23833, 0x3db2010c, 0x0031aa7e, 0xad33a032, 0xbf01a400, 0xff004f39, + 0x90014efe, 0x99bc34ee, 0x059ffd90, 0x2cb24ab2, 0xe9a0ebb2, 0x4a7e040d, 0xa032004b, 0x7e00ad33, + 0x05199801, 0xb5019990, 0xba3e0519, 0xd4d90037, 0x9800002a, 0x49fe049e, 0x3c999001, 0x9ea09fb2, + 0x0036703e, 0x33091918, 0x98130090, 0xf1a00411, 0x14b3f1bf, 0xd43ef200, 0x39350037, 0x36a83e08, + 0x08b91800, 0x13009033, 0xa004bb98, 0xb31bbf1b, 0x3ef200b4, 0xbf0037b8, 0x7e4ab2bb, 0x32004a98, + 0x00ad33a0, 0x11bf0119, 0xbf011f98, 0x0142fe3b, 0x3fb54ab2, 0x04195801, 0x0c302290, 0x752db204, + 0xaa7e0439, 0xa0320031, 0xf200ad33, 0xb21cbf00, 0x0d2bb24a, 0x4b4a7e04, 0x33a03200, 0x00df00ad, + 0xffd92fbf, 0xb27fffff, 0xfd4ab22b, 0x2fa004f9, 0x040d3cbf, 0x004b4a7e, 0xad33a032, 0x5800c000, + 0x12980239, 0x0147fe03, 0xf00148fe, 0x31980093, 0x2c779003, 0x900c9694, 0xaf3e2888, 0xe0b40037, + 0xf46ea609, 0x19bf0a1b, 0xa0019990, 0x011b9819, 0x139815bf, 0x0c4ab205, 0x7e7db204, 0x330031aa, + 0x00a100ad, 0xffde7fbf, 0xbc00ffff, 0x3eff3035, 0x0000de94, 0xfefdff00, 0x059ffd04, 0x2c9879a0, + 0xb24ab201, 0x7e040d7b, 0x33004b4a, 0xa07500a4, 0x031b9823, 0x040c4ab2, 0xbb928db2, 0x31aa7e04, + 0x00a43300, 0x032c985f, 0x8bb24ab2, 0xcc92040d, 0x4b4a7e04, 0x00a43300, 0x021f984b, 0x29b594bd, + 0x0066b805, 0x2fb50010, 0x06229802, 0xb3061198, 0xff6f001d, 0x0037ba3e, 0x49fe3500, 0x40999001, + 0x2cd99fbf, 0xbf000006, 0xa60a3299, 0x1d0bf4f9, 0x0037e63e, 0xa00141fe, 0x3c1190fe, 0x00368d3e, + 0xba3ea032, 0xe97e0037, 0x85fb0042, 0xfc30f420, 0x00062cdf, 0xbf02f900, 0x0149feff, 0xb2049990, + 0xb39fa0a0, 0x985100a0, 0x90b306a9, 0x0cd94a00, 0xbf000014, 0xe0aab89a, 0x227e0004, 0xa9e4001c, + 0x9ab30fff, 0xb3380fff, 0xe7330090, 0xb3016caa, 0x2b0fffaa, 0x2600a0b3, 0xb6060e98, 0xaf940c94, + 0xa0a43d0c, 0x060e98e9, 0xf9bce9bf, 0x01efb5f0, 0x00385c3e, 0x5c3e350a, 0x2e0a0038, 0x900149fe, + 0x9fbf0499, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40405fb, 0x2cdfd830, 0xf9000006, + 0xfeffbf82, 0x99900149, 0xa0a6b248, 0x00a0b39f, 0x06a09821, 0x0bbffd09, 0xa601bf92, 0x110cf4f9, + 0x09010f98, 0xf4f9a6ff, 0xf4b3070b, 0x02f81100, 0x6db30205, 0x3e027d00, 0x92003b3c, 0x100c10bb, + 0x002ad4dd, 0x31aa7e00, 0x33a53200, 0x026400ad, 0x002ad4da, 0x0068db00, 0x080c0000, 0x000ba87e, + 0x7e00adb3, 0x01099802, 0xf9a60fbf, 0x027818f5, 0x0ffffee4, 0x02701bf5, 0xfe066f98, 0x99900149, + 0xbf843d44, 0x0af1b0ff, 0x94bd9ea0, 0x3e0b91b0, 0x3d003b19, 0x7e140ba4, 0xb20040e5, 0x00a4b3a3, + 0x0502f80c, 0x3b2f3e05, 0x0ae0b400, 0xb6ff89c4, 0x9ebc0c94, 0xa094bdb0, 0x04a9b5ab, 0x0fffb9e4, + 0x02241bf5, 0xb2014dfe, 0x90040c6a, 0xaa7e44dd, 0xa0330031, 0xa5320a00, 0x003b2b3e, 0xf11190b4, + 0x750fff94, 0x90b40239, 0x6c99e711, 0x03397501, 0xb61190b4, 0x39351f95, 0x11f0b408, 0x0ffff9e4, + 0x0fff9ab3, 0x6cf9e713, 0xff9ab301, 0x01090a0f, 0xbf093935, 0x0099b339, 0xd4df0090, 0x1800002a, + 0x99900dfb, 0x0ffc4a04, 0xf00991b0, 0xbb7effb4, 0x94bd0016, 0x91b0743d, 0xfffc090d, 0x91b094a9, + 0x3af13e0c, 0x0ba43d00, 0x40e57e1c, 0xb3a1b200, 0xf80c00a4, 0x3e050002, 0xb4003b58, 0x7ac40cb0, + 0x16fc7eff, 0xb5e4bd00, 0xb0b4061e, 0xfe040c09, 0xdd90014d, 0xb0babc40, 0x1bb56ab2, 0x31aa7e01, + 0x33a03200, 0x015500ad, 0xdf1090b4, 0x00ffffff, 0xa0049ffd, 0xf49fa619, 0x94bd071b, 0x1b9819a0, + 0x00b4b301, 0x0002f80c, 0x3b583e02, 0x0ce0b400, 0x92083f18, 0xb29001b9, 0x409ebc04, 0x0e00f033, + 0x00093918, 0x009d3335, 0x29900088, 0xb5f4bd04, 0x19b50414, 0x051fb503, 0x003ad73e, 0x2bb26ab2, + 0x4dfe040c, 0x3cdd9001, 0x0031aa7e, 0xad33a032, 0xb400ea00, 0x9fc70f90, 0x06f4b378, 0x014dfe28, + 0x2b906ab2, 0x904cb204, 0x857e38dd, 0xad330051, 0xbf00c800, 0x0ef0b419, 0xa0909fbc, 0x3aca3e19, + 0x07f4b300, 0xffffde29, 0x9efd00ff, 0x042b9004, 0xb50219b5, 0x1bb50414, 0xb24cb203, 0x141d906a, + 0x0051857e, 0xd23ea032, 0x2290003a, 0xf424a604, 0x0d338d0d, 0x98008600, 0x94b30339, 0x31b50b00, + 0x3aeb3e03, 0x0df0b400, 0x1006f1b5, 0x11b00177, 0x2ad4de0d, 0xe9180000, 0xf579260d, 0x98feca08, + 0x94b304e9, 0xe3b50b00, 0x3b133e04, 0x0bf0b400, 0x1004f3b5, 0x31b00188, 0x2ad4de0b, 0xe9180000, + 0xf589260c, 0x3efdf008, 0x33003b3c, 0x98110050, 0xff0f0669, 0x69989fa0, 0x019fb506, 0x900149fe, + 0x9fbf4899, 0x00062cd9, 0x3299bf00, 0xf4f9a65a, 0x6a3e1f0b, 0xa032003b, 0x2b3e0532, 0x3505003b, + 0x003b2f3e, 0x2f3e0205, 0xe97e003b, 0x85fb0042, 0xf430f428, 0x00062cdf, 0xbf22f900, 0x0149feff, + 0xfe149990, 0x9fa00142, 0x229094bd, 0x7e29a00c, 0x33004863, 0x0b5600a0, 0x2a9cda04, 0xbcb20000, + 0x397e2db2, 0xa032004e, 0x4300a433, 0x900141fe, 0x1ab21011, 0x003c087e, 0xa433a032, 0x1cbf3100, + 0xa4bd2bbf, 0x001428d1, 0xd1797e00, 0xb31aa000, 0x3d1a00a0, 0xd2117eb4, 0x00a0b300, 0x001abf12, + 0xd74b7eff, 0x3beb3e00, 0xfeff0000, 0x99900149, 0xd99fbf14, 0x0000062c, 0x0a3299bf, 0x0bf4f9a6, + 0x42e97e07, 0x0c25fb00, 0x00062cde, 0xf4e9bf00, 0x4ffefc30, 0x09f9a001, 0xbfa9a00e, 0x3de9bfff, + 0xf4f9a6a4, 0xe97e070b, 0x30f40042, 0xf400f804, 0x2cdfd830, 0xf9000006, 0xf4ffbf62, 0x49fef430, + 0x4c999001, 0xa93f9fa0, 0xa398a6b2, 0x01903301, 0x489d330d, 0x3c3e0360, 0x3018003e, 0x010d3304, + 0x3298011b, 0x04319803, 0xdc4ba43d, 0x01004c10, 0x000680d5, 0x40cb7e00, 0xb35aa000, 0xf80c00a4, + 0x3e050102, 0xfe003fb3, 0x99900149, 0x2af4d428, 0x1cb20000, 0xb50741b5, 0x2bb20191, 0xb5154035, + 0x92a00642, 0xdc4ed4bd, 0x26727e10, 0x33a13200, 0x015900ad, 0x993f59bf, 0x0d009033, 0xf0014998, + 0x49b50895, 0x0680d901, 0x99bf0000, 0x33699918, 0xdf130090, 0x00002af4, 0xf101f998, 0xb5400095, + 0xf4da01f9, 0x7e00002a, 0x9800b9ce, 0x027e0230, 0xebd90001, 0xb20000a6, 0x0091b0ac, 0xb4bd0db2, + 0xc8da04bd, 0xb000002f, 0x01300101, 0x7e010e08, 0xd90013c8, 0x00002af4, 0x000000df, 0x5090351e, + 0x900149fe, 0x42fe3499, 0x909fa001, 0x010a3822, 0xcd7e2bb2, 0xa43300b1, 0x2bbf1b1f, 0xf401b9c4, + 0xb5f0141b, 0xa0010a01, 0xb20f7e2b, 0x1fa03300, 0xbd09f806, 0x7e020ab4, 0xda00b20f, 0x00002af4, + 0x00b85d7e, 0xbd014afe, 0x0dc4bdb4, 0x34aa9001, 0x0090537e, 0x003e0d3e, 0x32020433, 0x000680d9, + 0xb39abf00, 0xd97f00a0, 0x00002b0c, 0xbf019c98, 0xd9f43d9b, 0x00002af4, 0x9f35d4bd, 0x10dc4e15, + 0x0026727e, 0x083ea132, 0x0d33003e, 0x7e020703, 0x1800010e, 0x90330839, 0x3e982400, 0x2b94d903, + 0x9cdf0000, 0xa000002b, 0x043e989e, 0x98019eb5, 0xf9a00539, 0xb5063e98, 0x391801fe, 0x0090331c, + 0x2af4df21, 0x39980000, 0x2dfe9808, 0xfd2efd98, 0xf9b5059e, 0x0939982d, 0xb5059dfd, 0x087e2ef9, + 0x143d0001, 0x003e0d3e, 0xab001d33, 0x016b1801, 0x817e3ab2, 0x0209000f, 0x90014ffe, 0xf92048ff, + 0xf935943d, 0x03331802, 0xb4bd0409, 0xf335fab2, 0x01f93503, 0x000f717e, 0x003fb33e, 0xfe0140fe, + 0x00900141, 0x40119044, 0xb20142fe, 0x3c22900a, 0x2cb21bb2, 0x537ed43d, 0x39980090, 0x0a0fbf02, + 0xf4f9a601, 0x70de3c1b, 0x3f000005, 0x009033e9, 0x0a943d0e, 0x3ee9201e, 0xd9003e9f, 0x200000ff, + 0xb394f9ff, 0xb2121c94, 0xb21bb20a, 0xb6647e2c, 0x3e9f3e00, 0xb20ab200, 0x7e2cb21b, 0xfe00a9ff, + 0xbb90014b, 0xc4bebf44, 0xffdf1fa9, 0xb6e0ffff, 0xeffd1894, 0xe59eff04, 0x00d9bea0, 0xff400000, + 0x0bf494e9, 0x1fa4336b, 0x0149fe68, 0xbf409990, 0x0499929d, 0xe4339cbf, 0x03df421e, 0x89ff0000, + 0xff3fffff, 0xd9fff4ef, 0x0294b694, 0xb3e5f9ff, 0xc50b00c0, 0x093e03e9, 0xfd09003f, 0xfd16df95, + 0xf0b304e9, 0xe9c50d00, 0x3eb9a001, 0x09003f2d, 0x94e9fffe, 0x003f093e, 0x000000df, 0xffffd9ff, + 0xefff00ff, 0x94d9fff4, 0xa005f9fd, 0x0149febf, 0x90014ffe, 0xff904099, 0xbf9ebf44, 0x049992ff, + 0x99929dbf, 0x929fa004, 0x9ea00499, 0xa0049992, 0x0a747e9d, 0x2af4d900, 0x99180000, 0x00943348, + 0x3e02f80a, 0x33003f6b, 0xfe1e0190, 0xee90014e, 0xdfe9bf38, 0xe0ffffff, 0xdf049ffd, 0x01000000, + 0xa0059ffd, 0xd9f43de9, 0x00002af4, 0x9f35010d, 0x014bfe48, 0x90014cfe, 0xcc9034bb, 0x014afe30, + 0x7e38aa90, 0x7e009053, 0x3d000a9b, 0x3fb33e14, 0xfee40100, 0x99900149, 0xd99fbf4c, 0x0000062c, + 0x1a3299bf, 0x0bf4f9a6, 0x42e97e07, 0x0c30f400, 0x922865fb, 0x9ba004a9, 0xb548ae92, 0x008906ec, + 0xe9a03000, 0x94bdafb2, 0xb24cff92, 0xf8f9a0fa, 0x7e02f900, 0xb300715d, 0xbd0a00a4, 0x40373e04, + 0x16898900, 0x0093fe00, 0x00163389, 0x890090fe, 0xfe001671, 0xa4bd0091, 0x897eb4bd, 0xa0b2000a, + 0xf400a6b0, 0xa4bd0c1f, 0x7efe0b4b, 0x7e000a95, 0x3e004140, 0xb200162b, 0x7e01fb0a, 0xf8000ad6, + 0x8f22f900, 0xb200ffff, 0xf4bfa6c0, 0xc0b3720c, 0xc9926f00, 0x049cfd01, 0xa6651bf4, 0x600cf4cf, + 0x0903bf90, 0x14f9fffc, 0x7e550bf4, 0x82000a74, 0x7f003244, 0x3240892d, 0x899abf00, 0xc2003246, + 0x9b7f0f0f, 0x5201ff79, 0x907c01d9, 0xc4f9ff90, 0x08f4cd66, 0xd0c17c21, 0xffffd9e4, 0x08f491a6, + 0xf4db6615, 0xc9e4100c, 0x2d60ffff, 0x3e009abc, 0xbd0040b5, 0x0a9b7e04, 0x40bf3e00, 0xb204bd00, + 0x0c21fb0a, 0x40417e04, 0xf900f800, 0x7eb1b212, 0xb2004041, 0x00a0b3a0, 0xbd1cb20c, 0x0b947eb4, + 0xfb0ab200, 0x7e040c11, 0xf80040cb, 0x3dabb200, 0x40e57ea4, 0x8f00f800, 0xbf001110, 0x1b9918f9, + 0x20069536, 0x18f9bfa9, 0x99c71b99, 0xf8b92024, 0x11108c00, 0x14cdbf00, 0xbec406af, 0x04e43603, + 0xf01bd918, 0xf9fd3f94, 0x1bdf3505, 0xf918cfbf, 0xcf94f01b, 0x3505e9fd, 0x327e1bfe, 0x00f80019, + 0x00111089, 0x9a1899bf, 0x24abc71b, 0x7e06a536, 0xf8001932, 0x18ff7e00, 0xf400f800, 0x82f9e030, + 0xffffc1e4, 0xb210c295, 0xffa0e4c5, 0x10a395ff, 0x48fea4b2, 0x0147fe01, 0x903c8890, 0x8bb53477, + 0x017db501, 0x0bb27ca0, 0x1ab28aa0, 0x0016fc7e, 0xa6b20bb2, 0xfc7e2ab2, 0x3bb20016, 0x1ab2a0b2, + 0x0016fc7e, 0xa1b23bb2, 0xfc7e2ab2, 0x10bc0016, 0x10699500, 0x09bcadb2, 0xf410a600, 0x00890a0d, + 0xd9bc0100, 0x017b98d0, 0xffff69e4, 0xfe100f94, 0xf9bc014e, 0x24ee90f0, 0xa0100995, 0x90d9bcef, + 0xbf01e9b5, 0x01ed98ec, 0x41fe4ab2, 0x2c119001, 0xa0011db5, 0x16fc7e1c, 0x018b9800, 0x5ab2a0b2, + 0x0016fc7e, 0xbc011b98, 0x1abf000a, 0xfbb00bbc, 0x30f42085, 0x00c0b3f0, 0x014ffe33, 0xb508ff90, + 0xfaa001fb, 0x9cbb2009, 0x0096b002, 0xbc211df4, 0xb9bcf5ac, 0x014afe94, 0xbc059ffd, 0xa9a0f5bc, + 0x9801afb5, 0xaabf01ab, 0xf81030f4, 0x014afe00, 0xbd019fb9, 0xf5bfbc94, 0xa001a9b5, 0x01ab98af, + 0x483eaabf, 0x30f40042, 0x00c0b3f0, 0x014ffe33, 0xb508ff90, 0xfaa001fb, 0x9cbb2009, 0x0096b002, + 0xbc211df4, 0x4efef4bc, 0x95a9bc01, 0xbc059ffd, 0xe9b5f4ac, 0x98efa001, 0xeabf01eb, 0xf81030f4, + 0x014efe00, 0xbd019fb9, 0xf4afbc94, 0xefb5e9a0, 0x98eabf01, 0x9c3e01eb, 0x2cde0042, 0xbf000006, + 0xfc30f4e9, 0xdd014ffe, 0x00000000, 0x34d9f9a0, 0xbf000014, 0xbf9da0ff, 0xa6a43de9, 0x070bf4f9, + 0x0042e97e, 0xf80430f4, 0x49180f00, 0x9ff71100, 0xf802f800, 0xf430f400, 0x00062cdf, 0xbf12f900, + 0x0149feff, 0xb2109990, 0x8a9fa0ab, 0x7e12004c, 0xfe001cb1, 0x3f80014a, 0xaa904c4b, 0x7ea1b208, + 0x8a001581, 0x7e12004c, 0xf0001c22, 0x1bf43fa4, 0x3e010a09, 0xb2004346, 0x159c7e1a, 0xf4a0a600, + 0x02f8e40d, 0x49fea43d, 0x10999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x15fb0042, + 0xfc30f40c, 0x00062cdf, 0xbf12f900, 0x0149feff, 0xb2089990, 0xb39fa0b1, 0xb02101a0, 0x08f401a6, + 0xb3ff0e0e, 0x00be02ad, 0x0044113e, 0x0049020f, 0x009ff60a, 0x00441f3e, 0xcf0a0049, 0xfe0e009f, + 0xf604fefd, 0xd033009f, 0x004f2d00, 0x00f9cf08, 0xf9f694bd, 0x09004000, 0x4b0009cf, 0xcab203e8, + 0x0016fc7e, 0x0200aab8, 0x0aa5b600, 0x3e000af6, 0x400043f9, 0x09cf0800, 0x03e84b00, 0xfc7ecab2, + 0xaab80016, 0xb6000200, 0xaa920aa5, 0x000af601, 0xcf090049, 0x9af6009f, 0x2a4cd900, 0x004f0000, + 0x0191b50a, 0xf000f9cf, 0xf9f60195, 0x443f3e00, 0x0a004e00, 0x0f00e9cf, 0x049ffdfe, 0x4f00e9f6, + 0xf9cf0800, 0xf694bd00, 0x004f00f9, 0x00f9cf09, 0xf9f694bd, 0xd9f4bd00, 0x00002a4c, 0x3d019fb5, + 0x0149fee4, 0xbf089990, 0x062cd99f, 0x99bf0000, 0xf9a6ea32, 0x7e070bf4, 0xfb0042e9, 0x2cdf0415, + 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x2a4cd99f, 0x99980000, 0x0194b301, 0x7e797e0c, 0x44833e00, + 0xfe02f800, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0xfc30f400, + 0x2cd122f9, 0xbf000006, 0xfea2b219, 0x148a0140, 0x009008d1, 0x7e09a00c, 0x3f001c22, 0xbf0ebf29, + 0x08aac71f, 0xa6ff94f0, 0x0bacf0a9, 0x0bf4efa6, 0x42e97e07, 0x0425fb00, 0xdffc30f4, 0x0000062c, + 0xffbf02f9, 0x900149fe, 0x9fa00499, 0x0189008f, 0xf000f9ce, 0xf9f78095, 0x43004e00, 0x0f00e9ce, + 0x049ffdfe, 0xd000e9f7, 0x0000140c, 0xaab80abf, 0x7e00034c, 0xbf001c22, 0x03abc509, 0x034c9ab8, + 0x1cb17e00, 0x0149fe00, 0xbf049990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0405fb00, + 0xdffc30f4, 0x0000062c, 0xffbf12f9, 0xfef430f4, 0x99900149, 0x8f9fa014, 0xcf01c000, 0x99c700f9, + 0x0099b34c, 0x90b30085, 0x02f8f501, 0x00455b3e, 0xc700f9cf, 0x90b34c99, 0x90b35f00, 0x02f8f601, + 0x0045703e, 0x001b957e, 0x0bb204bd, 0xd4bdc4bd, 0x6b7e0b0a, 0xa1900024, 0x7e1ab268, 0xe4001c22, + 0xb30e00ab, 0x0b0e00be, 0xb17e1ab2, 0x0090001c, 0x2404b301, 0xbd94bdd9, 0x089130b4, 0xb00091b0, + 0xc4bd0191, 0xe43dd4bd, 0x001414da, 0x13c87e00, 0x46073e00, 0xc2008900, 0x0099cf01, 0xf40194f0, + 0x843e280b, 0x1c8f0045, 0x00890285, 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, + 0x3e9fb200, 0xfe004570, 0x99900149, 0xd99fbf14, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, + 0x15fb0c30, 0xfc30f404, 0x2cd112f9, 0xbf000006, 0xf430f419, 0x1c8ab4bd, 0x40fe0285, 0x14009001, + 0xb17e09a0, 0x027e001c, 0x40d90001, 0xb0000045, 0x94bd0091, 0x9130acb2, 0x0191b008, 0x001414da, + 0x4db4bd00, 0x010e2710, 0x0013c87e, 0x19bf0fbf, 0x0bf4f9a6, 0x42e97e07, 0x0c30f400, 0xf40415fb, + 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf00280f, 0xa619bf0f, 0x070bf4f9, + 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf0027d6, + 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, + 0x7e09a008, 0xbf00279d, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, + 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf0028db, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xf40415fb, + 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf00291c, 0xa619bf0f, 0x070bf4f9, + 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x7e09a008, 0xbf0065ad, + 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xdf0415fb, 0x0000062c, 0x30f4ffbf, 0x0149fefc, 0x008f9fa0, + 0xf9cf01c0, 0x4c99c700, 0x220090b3, 0xf60190b3, 0x823e02f8, 0xf9cf0047, 0x4c99c700, 0x330090b3, + 0xf60190b3, 0x963e02f8, 0x00890047, 0x9af601c1, 0x0099b800, 0x9bf60001, 0x00f2df00, 0x99b88000, + 0xf6020200, 0x9fcf009f, 0x3e9fb200, 0xfe004796, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, + 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x01c0008f, 0xc700f9cf, + 0x90b34c99, 0x90b32200, 0x02f8f601, 0x0047fc3e, 0xc700f9cf, 0x90b34c99, 0x90b33300, 0x02f8f601, + 0x0048103e, 0x01c10089, 0xb8009af6, 0x00010099, 0xdf009bf6, 0x800000f2, 0x020099b8, 0x009ff602, + 0xb2009fcf, 0x48103e9f, 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, + 0xde00f804, 0x0000062c, 0x30f4e9bf, 0x014ffefc, 0x1ed9f9a0, 0xbf000011, 0xbf9a3fff, 0xf4f9a6e9, + 0xe97e070b, 0x30f40042, 0xf400f804, 0x42f9f030, 0x00062cd4, 0xb249bf00, 0xbd0601a2, 0xfe1cb2b4, + 0x43fe0140, 0x16009001, 0xb2203390, 0x7e39a00a, 0xbd000b94, 0x014dfe94, 0xa01cdd90, 0x75d120d9, + 0x29180209, 0xb22ab204, 0x20c4bd0b, 0x35180909, 0x01090109, 0x7e010975, 0xbf003493, 0xa649bf3f, + 0x070bf4f9, 0x0042e97e, 0xf41045fb, 0x2cdfec30, 0xf9000006, 0xfeffbf22, 0x99900149, 0xa0a1b21c, + 0xb2060c9f, 0x0140feb2, 0x0090b4bd, 0x7e0ab20e, 0xbd000b94, 0x014ffe94, 0xa018ff90, 0x041e18f9, + 0x41fe1ab2, 0x14119001, 0x010919a0, 0x75020975, 0x05090109, 0x1909f920, 0x0bb20e20, 0xb2010935, + 0x7e1cb2fd, 0x32003493, 0x00a433a0, 0xb21abf0c, 0x2f7e7e2b, 0x0149fe00, 0xbf1c9990, 0x062cd99f, + 0x99bf0000, 0xf9a60a32, 0x7e070bf4, 0xfb0042e9, 0x30f41425, 0x062cd9f0, 0x42f90000, 0x4ffe99bf, + 0x20ff9001, 0xf9a0a3b2, 0xe84bbab2, 0x16fc7e03, 0x0142fe00, 0x04bda1b2, 0x90014afe, 0xaa901c22, + 0x7ea4b214, 0x3e001581, 0xb20049b7, 0x159c7e4a, 0x7ea0b200, 0xb200000e, 0x7e2bb23a, 0x330048eb, + 0xbf1600a4, 0x0194f029, 0x0d009033, 0x08f401a6, 0x49e03edd, 0xf401a600, 0xa43d0918, 0x0049e23e, + 0x49fe040a, 0x20999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x45fb0042, 0xf030f410, + 0x00062cdf, 0xbf22f900, 0x0149feff, 0x32189990, 0xa0a2b2b0, 0x488b7e9f, 0x00a43300, 0x00043364, + 0x3e143d0a, 0x18004a30, 0x94f01129, 0x0291140f, 0x060cb4bd, 0x900140fe, 0x0ab20e00, 0x000b947e, + 0x4ffe94bd, 0x14ff9001, 0xf135f9a0, 0x02097501, 0xb2042918, 0xb2fdb20b, 0x0909202a, 0x09f92001, + 0x01093518, 0xc4bd0209, 0x7e010975, 0x33003493, 0xb20e00a4, 0x86a08b2a, 0x49727e01, 0x0149fe00, + 0xbf189990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x1025fb00, 0xd9f030f4, 0x0000062c, + 0x99bf22f9, 0x90014ffe, 0xa1b218ff, 0xb2b2f9a0, 0xb4bd060c, 0x90014afe, 0x947e0eaa, 0x94bd000b, + 0xb20140fe, 0x1400901a, 0x8b7e09a0, 0xa4330048, 0x19186100, 0x0499c711, 0x0a0094b3, 0xe43e2009, + 0xd709004a, 0x19180920, 0x014efe04, 0xfe0eee90, 0xe920014f, 0x90102995, 0xf93514ff, 0x08299501, + 0x0902f935, 0x01e93518, 0xe9750409, 0xb2947d01, 0x03f235eb, 0xe975fdb2, 0xbd1ab202, 0x34937ec4, + 0x00a43300, 0x8b1ab20e, 0x7e07a120, 0xfe004972, 0x99900149, 0xd99fbf18, 0x0000062c, 0xf9a699bf, + 0x7e070bf4, 0xfb0042e9, 0x30f41025, 0x062cdfec, 0x72f90000, 0x49feffbf, 0x30999001, 0x9fa0a1b2, + 0xc3b2b7b2, 0xa9b3d672, 0x71009e00, 0xf50100d6, 0xbd00950c, 0x0cb4bd54, 0x0140fe06, 0x900142fe, + 0x22902000, 0x7505a02c, 0x25a00205, 0x900144fe, 0x4ab22644, 0x000b947e, 0x8b7e1ab2, 0xa4330048, + 0x23356800, 0x04191803, 0x05750bb2, 0x202db202, 0x20020909, 0x10399529, 0x95012935, 0x29350839, + 0x35080902, 0x04090109, 0x09751ab2, 0x7ec4bd01, 0x33003493, 0x753300a4, 0x45750146, 0x04191802, + 0x7db24bb2, 0x49201ab2, 0xc4bd1009, 0x7e014935, 0x33003493, 0xb21300a4, 0x32004b1a, 0x0049727e, + 0x004c063e, 0x49fe350a, 0x30999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x75fb0042, + 0xfc30f414, 0x00062cdf, 0xbf62f900, 0xfea3b2ff, 0x99900149, 0x989fa01c, 0xc4b205aa, 0xfd09d6b2, + 0xbebcaebf, 0x01ef9220, 0x0cf4f9a6, 0x01af9823, 0xf9a6ff09, 0xb3190bf4, 0xa61600f0, 0x1008f42e, + 0xa690c2bc, 0x0808f492, 0x0df49fa6, 0x1802f805, 0x010a1139, 0xb30599c7, 0xb2410094, 0x32aa7e3a, + 0x33a53200, 0xbd3300a4, 0x4cb13e14, 0x0241bc00, 0x010006b1, 0x40060df4, 0x3ab20100, 0xb2b061bc, + 0x7e0d722c, 0x33004b4a, 0xbc1100a4, 0x10bc2020, 0xf414a610, 0x5a32da08, 0x900149fe, 0x9fbf1c99, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40465fb, 0x2cd9fc30, 0xf9000006, 0xfe99bf72, + 0xff90014f, 0xa0a0b220, 0x05ae98f9, 0xfd02c5b2, 0xf992efbf, 0xf492a601, 0xee98260c, 0xa6ff0101, + 0x1c0bf4e1, 0x1900e0b3, 0xa630bfbc, 0x1008f43f, 0xa690c3bc, 0x0808f493, 0x0df49ea6, 0x0e02f80b, + 0x4dc33e35, 0x11a91800, 0x99c7010e, 0x009db305, 0xaa7e0096, 0xa4320032, 0x8900ad33, 0xb227b200, + 0x3e24bd16, 0xe4004db9, 0xf40fff19, 0x020e090b, 0x004dc33e, 0xbf060f98, 0x019992f9, 0x0cf497a6, + 0x01f99821, 0x0bf496a6, 0x0090b319, 0xb20ab216, 0x35247e1b, 0x00a03300, 0x3e030e0a, 0xb2004dc3, + 0x7e1bb20a, 0x32004a98, 0x00a433ae, 0x060f983a, 0x9992f9bf, 0xf497a601, 0xf9981d0c, 0xf496a601, + 0x90b3150b, 0x1bb21200, 0x9f7e0ab2, 0xae320035, 0x1300a433, 0x100022b8, 0x1023bc00, 0x08f425a6, + 0xfe4e3289, 0x99900149, 0xd99fbf20, 0x0000062c, 0xea3299bf, 0x0bf4f9a6, 0x42e97e07, 0x0475fb00, + 0xd9fc30f4, 0x0000062c, 0x99bf02f9, 0x90014ffe, 0xb0b204ff, 0xb9bff9a0, 0xa6b0c9bc, 0x1708f4b9, + 0xffffd9e4, 0xa6f09bbc, 0x0b08f4fb, 0xa6010998, 0x050df4f9, 0xdc7202f8, 0xaa7eedb2, 0x49fe0031, + 0x04999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x05fb0042, 0xfc30f404, 0x2cd112f9, + 0xbf000006, 0x0140fe19, 0xb2080090, 0xb209a0cf, 0x05ab98b9, 0x9cb2deb2, 0xe07efd72, 0x0fbf004d, + 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0xf0fc0415, 0xf9fc30f4, 0xf430f4f0, 0x90b472f9, 0xfeb0b20d, + 0xbb90014b, 0x01b9b520, 0x00062cd9, 0xfe99bf00, 0xff90014f, 0xa0a5b228, 0xa009bff9, 0x0ce1b0be, + 0xc9bcd4b2, 0xf439a630, 0xd3bc1f08, 0xf4f3a6f0, 0x09981708, 0xf4f9a601, 0xb6b20f0c, 0x010024bd, + 0x273efc07, 0x02f8004f, 0x2c3e0100, 0x42bc004f, 0x0096b192, 0x060df401, 0x90010049, 0xb4bd0399, + 0x4c1497ff, 0x00da0100, 0x7e000012, 0xb2000b94, 0x723bb25a, 0x1200dd1c, 0xaa7e0000, 0xa0320031, + 0x2c00a433, 0x6c986bbf, 0x1200da01, 0x2db20000, 0x7e01004e, 0x330026d8, 0xf80a00a0, 0x4f2c3e02, + 0x3031bc00, 0xa62021bc, 0xa508f424, 0x900149fe, 0x9fbf2899, 0x00062cd9, 0x3299bf00, 0xf4f9a60a, + 0xe97e070b, 0x70fb0042, 0xfc0c30f4, 0x0430f4f0, 0x30f4f4f9, 0x062cdfe4, 0x42f90000, 0x49feffbf, + 0x2c999001, 0x9fa0a1b2, 0x00111ed9, 0x33993f00, 0x3d0a0090, 0x50c13e04, 0xfef4bd00, 0x99900149, + 0x929fa024, 0x9fa00499, 0x98049992, 0xa49805a2, 0x7e9fa006, 0x32002fb6, 0x00a033a0, 0x2ea43308, + 0x982bbf6b, 0xb9a60129, 0x013318f5, 0x0fffb9e4, 0x012b1bf5, 0xb20143fe, 0x1c33901a, 0x3db2040c, + 0x0031aa7e, 0xa433a032, 0x3fbf4200, 0x4ad93500, 0xa6534646, 0x331bf4f9, 0xed7e1ab2, 0xa4330037, + 0x49bf3600, 0x9992fd0f, 0xf49fa601, 0x4f98290c, 0xa6ff0901, 0x1f0bf4f9, 0x1c00f0b3, 0x777e1ab2, + 0xa0320038, 0x1000a033, 0x29b5ff09, 0x3329a001, 0x00b2000d, 0xaa7e1ab2, 0xa0320032, 0xa500ad33, + 0x0cb4bd00, 0x0140fe06, 0xb2160090, 0x0b947e0a, 0xfe94bd00, 0xff90014f, 0x18f9a028, 0x0bb20419, + 0x0104fdb2, 0x9f090920, 0x1909f920, 0x09010935, 0x01047503, 0xfe020975, 0x1ab20142, 0xb2242290, + 0x34937e2c, 0x33a03200, 0xbf5a00a4, 0x7e1ab22b, 0x32002f12, 0x00a433a0, 0x1112184c, 0xb30529c7, + 0xfe390094, 0x1ab20143, 0xb2203390, 0x48eb7e3b, 0x00a43300, 0x943fbf15, 0x94f00229, 0x049ffd3c, + 0x35060bf4, 0x1ab22014, 0xfd7eb43d, 0xa0330049, 0xa0320a00, 0x0050c13e, 0x1ed9010f, 0x20000011, + 0x0149fe9f, 0xbf2c9990, 0x062cd99f, 0x99bf0000, 0xf9a60a32, 0x3e110bf4, 0x000050e1, 0x50083e02, + 0x42e97e00, 0x1c45fb00, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0xb39fa001, 0x982100a0, 0xfd0f06aa, + 0x9992a9bf, 0xf49fa601, 0xaa98110c, 0xa6ff0901, 0x070bf4a9, 0x0c00a4b3, 0x020a02f8, 0x00516b3e, + 0x0fffb9e4, 0x1bf4020a, 0x2ad4d945, 0x9f980000, 0x51633e04, 0x08f91800, 0x28009033, 0x3309f918, + 0x58210090, 0xfe5802f9, 0x0093f003, 0xa60c94b6, 0x0f08f4b9, 0xffffe9e4, 0xa60c94b6, 0x0a0df4b9, + 0xb304ff98, 0xa0d200f4, 0xfe350acf, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0xf830f400, 0x00062cdf, 0xbf52f900, 0xfea4b2ff, 0x42fe0149, 0x1c999001, 0xc3b2b0b2, + 0x9fa0d5b2, 0x350a14bd, 0x3e182290, 0xb200523c, 0x0c0bb24a, 0x7e2db204, 0x330031aa, 0x008900ad, + 0x94b329bf, 0x11900e00, 0x04009020, 0x00523c3e, 0xfd019eb9, 0x008904e9, 0xe9ffff00, 0x0bfcf094, + 0xd901f6f0, 0xff00ff00, 0xf094e9ff, 0xb6f00bbc, 0xf0f0d901, 0xe9fff0f0, 0x0bccf094, 0xd901c6f0, + 0xcccccccc, 0xf094e9ff, 0xd6f00bdc, 0xaaaad901, 0xf4b6aaaa, 0x03b4b604, 0xb602c4b6, 0xe9fd01d4, + 0x0b9cf004, 0xfd0196f0, 0xfbfd05f9, 0x05fcfd05, 0xa005fdfd, 0x101fbc2f, 0x0052423e, 0x0df503a6, + 0x51a0ff71, 0x900149fe, 0x9fbf1c99, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40855fb, + 0x2cdffc30, 0xf9000006, 0xb2ffbf42, 0xfec2b2a0, 0xe3b20149, 0xb2149990, 0xb29fa0b4, 0x3ec4bdea, + 0xf00052de, 0xf99407e2, 0xf0ffbc03, 0xbcf0f9bc, 0xbe3ff0fe, 0x9030ff92, 0xbab201bb, 0x3030e912, + 0x0df40996, 0x00f4b3e1, 0x33010f06, 0x312d64e0, 0xf40064e6, 0xe4330b1c, 0xca3e2862, 0xe0330052, + 0xe4330c73, 0xd13e1c77, 0xcfbc0052, 0x52de3ec0, 0x90ffbc00, 0x0052db3e, 0xbc02f994, 0xae3fc0c9, + 0x0d00e033, 0xbd01ab90, 0x529a3ef4, 0x20c6b000, 0xf8090df4, 0x54063e02, 0x00d0b300, 0xbfdca006, + 0x90c2bc0f, 0x9fa6ff01, 0x01000cf5, 0x002a9cda, 0x07a99800, 0xbf010098, 0xbc0db29b, 0xaa7eb02b, + 0xa1320031, 0xe400ad33, 0xb242b200, 0xff00440a, 0x0053f73e, 0xc203c994, 0xccbc07be, 0xbcdb3ff0, + 0xdd90f0f9, 0xf0febc01, 0xb230fc92, 0x30b912d3, 0xf4099630, 0xc4b3e10d, 0x010c0600, 0xf03e20b2, + 0xb0330053, 0xb6315b64, 0x1cf40064, 0x62bd330c, 0x833e0099, 0xb0330053, 0xbd331973, 0x3e008c77, + 0x3f0053a5, 0x01ad90a9, 0x3eff9fc4, 0x3f0053e6, 0x01ad90a9, 0xc4ff9fc4, 0x903380f9, 0xf4fd4c00, + 0x53e63e05, 0x01a91800, 0xad90af3f, 0xff94f002, 0xb6fff4f0, 0x9fff0894, 0x53e63ef5, 0x03ad1800, + 0x3f02af18, 0x01a918ae, 0xf018d4b6, 0xf4b6fff4, 0xffe4f010, 0xb6ff94f0, 0x9efd0894, 0x059ffd05, + 0x90f59dff, 0x0fa004ad, 0x9001cc92, 0xdab20400, 0xcdb302b2, 0x3fff7000, 0x00b0333b, 0x013d900f, + 0x4b3ec4bd, 0xff010053, 0x900149fe, 0x9fbf1499, 0x00062cd9, 0x3299bf00, 0xf4f9a61a, 0xe97e070b, + 0x45fb0042, 0xf830f404, 0x2cd222f9, 0xbf000006, 0xfebcb229, 0x40fe0141, 0x10119001, 0xa00c0090, + 0xb2d4bd19, 0x0074de0b, 0x5f7e0000, 0x1fbf0052, 0x0a7f29bf, 0x0bf4f9a6, 0x42e97e07, 0x0825fb00, + 0xf9f830f4, 0x062cd222, 0x29bf0000, 0x41febcb2, 0x0140fe01, 0x90101190, 0x19a00c00, 0x0bb2d4bd, + 0x000076de, 0x525f7e00, 0xbf1fbf00, 0xa60abf29, 0x070bf4f9, 0x0042e97e, 0xf40825fb, 0x22f9f830, + 0x00062cd2, 0xb229bf00, 0x0141febc, 0x900140fe, 0x00901011, 0xbd19a00c, 0xde0bb2d4, 0x00000078, + 0x00525f7e, 0x29bf1fbf, 0xf9a60a3f, 0x7e070bf4, 0xfb0042e9, 0x30f40825, 0x062cdff0, 0x62f90000, + 0x49feffbf, 0x0143fe01, 0xfe289990, 0x9fa00142, 0x001404d6, 0x1c339000, 0x05242290, 0xbf040402, + 0x0d080c6a, 0x7e3bb2ff, 0x330000c1, 0x98f400a4, 0x193f0131, 0xeb079433, 0x3d041918, 0x2a9cda04, + 0x94330000, 0x527e0a00, 0xa032004f, 0xb2013b18, 0x0f817e1a, 0x00043300, 0x022035ca, 0x11182520, + 0x352ab203, 0xb4bd0124, 0x7e032135, 0x3e000f71, 0xf40054ff, 0x2cdff430, 0xf9000006, 0xbdffbf22, + 0x0140fe94, 0xb50c0090, 0x09a00109, 0x900149fe, 0xd4d21499, 0xa000002f, 0x7e01b29f, 0x3e008f2c, + 0x98005592, 0x993f0119, 0x0a099433, 0x2f7e0ab2, 0xc8da003c, 0x7e00002f, 0xb2001475, 0xb22abfad, + 0x7e080c0b, 0x330000c1, 0xdadc00a0, 0x00002fc8, 0x0014b57e, 0x0055923e, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00062cde, 0xf4e9bf00, 0x4ffefc30, 0x3df9a001, 0xbfa92094, 0xa6e9bfff, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cde00, 0xe9bf0000, 0xfefc30f4, 0xf9a0014f, 0xa920943d, 0xe9bfffbf, 0x0bf4f9a6, + 0x42e97e07, 0x0430f400, 0x2cde00f8, 0xbf000006, 0xfc30f4e9, 0xa0014ffe, 0x20943df9, 0xbfffbfa9, + 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xde00f804, 0x0000062c, 0x30f4e9bf, 0x014ffefc, 0x943df9a0, + 0xffbfa920, 0xf9a6e9bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cde, 0xf4e9bf00, 0x4ffefc30, + 0x3df9a001, 0xbfa92094, 0xa6e9bfff, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xe9bf0000, + 0xfefc30f4, 0xf9a0014f, 0xa920943d, 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cde00f8, + 0xbf000006, 0xfc30f4e9, 0xa0014ffe, 0x20943df9, 0xbfffbfa9, 0xf4f9a6e9, 0xe97e070b, 0x30f40042, + 0xde00f804, 0x0000062c, 0x30f4e9bf, 0x014ffefc, 0x943df9a0, 0xffbfa920, 0xf9a6e9bf, 0x7e070bf4, + 0xf40042e9, 0x00f80430, 0x00062cde, 0xf4e9bf00, 0x4ffefc30, 0x3df9a001, 0xbfa92094, 0xa6e9bfff, + 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xe9bf0000, 0xfefc30f4, 0xf9a0014f, 0xa920943d, + 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x2cde00f8, 0xbf000006, 0xfc30f4e9, 0xa0014ffe, + 0x20943df9, 0xbfffbfa9, 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xde00f804, 0x0000062c, 0x30f4e9bf, + 0x014ffefc, 0x943df9a0, 0xffbfa920, 0xf9a6e9bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cde, + 0xf4e9bf00, 0x4ffefc30, 0x3df9a001, 0xbfa92094, 0xa6e9bfff, 0x070bf4f9, 0x0042e97e, 0xf80430f4, + 0x062cde00, 0xe9bf0000, 0xfefc30f4, 0xf9a0014f, 0xa920943d, 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, + 0x0430f400, 0x2cde00f8, 0xbf000006, 0xfc30f4e9, 0xa0014ffe, 0x20943df9, 0xbfffbfa9, 0xf4f9a6e9, + 0xe97e070b, 0x30f40042, 0xde00f804, 0x0000062c, 0x30f4e9bf, 0x014ffefc, 0x943df9a0, 0xffbfa920, + 0xf9a6e9bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cde, 0xf4e9bf00, 0x4ffefc30, 0x3df9a001, + 0xbfa92094, 0xa6e9bfff, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xe9bf0000, 0xfefc30f4, + 0xf9a0014f, 0xa920943d, 0xe9bfffbf, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdffc, + 0x12f90000, 0x30f4ffbf, 0x0149fed4, 0xa0349990, 0x49ff0f9f, 0x9ff70500, 0x5c997e00, 0x71c67e00, + 0x42ba7e00, 0x5f737e00, 0x00a93300, 0x060f02d6, 0xf7110049, 0x837e009f, 0xed7e005e, 0xb57e005e, + 0xbf7e0061, 0x1f7e005b, 0xb17e0064, 0xe67e0064, 0x657e005c, 0x497e0065, 0x7e7e0066, 0xe27e0066, + 0x387e0066, 0x8e7e0067, 0xe47e0067, 0xc37e0067, 0xb77e0089, 0x847e0068, 0x797e006d, 0x617e0063, + 0x314c0068, 0x00c3f177, 0x01943d00, 0x28913002, 0x30009130, 0x91300891, 0x14913010, 0x30189130, + 0x91301c91, 0x24913020, 0x300111b0, 0x10da0c11, 0xdb000006, 0x0000018c, 0x804e010d, 0x71d87e00, + 0x00ad3300, 0x08da023e, 0x0b000014, 0x7e010c04, 0x33006e05, 0x022c00ad, 0xf17ed24c, 0x000000c3, + 0x28a13001, 0x3000a130, 0xa13008a1, 0x10a1300c, 0x3014a130, 0xa13018a1, 0x24a13020, 0x000168db, + 0x0101b000, 0x0d1c0130, 0x00804e03, 0x000614da, 0x71d87e00, 0x1400da00, 0x040b0000, 0x057e080c, + 0xad33006e, 0x4c01dd00, 0xc3f18863, 0x44db0000, 0x30000001, 0xa13028a1, 0x10a13000, 0x3014a130, + 0xa13018a1, 0x24a13020, 0x300101b0, 0x11300811, 0x1c01300c, 0x804e080d, 0x0618da00, 0xd87e0000, + 0x24da0071, 0x0b000014, 0x7e080c04, 0x33006e05, 0x019000ad, 0xf155534c, 0xdb0000c3, 0x00000120, + 0x3028a130, 0xa13000a1, 0x14a13010, 0x3018a130, 0xa13020a1, 0x0101b024, 0x30081130, 0x01300c11, + 0x4e070d1c, 0x1cda0100, 0x7e000006, 0xda0071d8, 0x00002fd4, 0x080c1bb2, 0x006e057e, 0x4300ad33, + 0x54d64c01, 0x0000c3f1, 0x0000fcdb, 0x28a13000, 0x3000a130, 0xa13010a1, 0x18a13014, 0x3020a130, + 0x01b024a1, 0x08113001, 0x300c1130, 0x050d1c01, 0xda00c04e, 0x00000620, 0x0071d87e, 0x001404da, + 0x0c040b00, 0x6e057e08, 0x00ad3300, 0xf64c00f6, 0x00c3f18a, 0x00d8db00, 0xa1300000, 0x00a13028, + 0x3010a130, 0xa13014a1, 0x20a13018, 0xb024a130, 0x11300101, 0x0c113008, 0x0d1c0130, 0x00804e09, + 0x000624da, 0x71d87e00, 0x142cda00, 0x040b0000, 0x057e080c, 0xad33006e, 0x4c00a900, 0xc3f1dabf, + 0xb4db0000, 0x30000000, 0xa13028a1, 0x10a13000, 0x3014a130, 0xa13018a1, 0x24a13020, 0x300c1130, + 0x01b01c01, 0x08113001, 0xc04e0a0d, 0x0628da00, 0xd87e0000, 0x20da0071, 0x0b000014, 0x7e080c04, + 0x33006e05, 0x7e5c00a4, 0x7e007165, 0x7e005c75, 0x7e006dff, 0x7e000a74, 0x7e004747, 0x7e00645a, + 0x7e005c17, 0x7e005d1b, 0x7e0063ae, 0x7e0063de, 0x7e0066b3, 0x7e006717, 0x7e00676d, 0x7e0067c3, + 0x7e006840, 0x7e006896, 0x7e006dc9, 0x7e008989, 0x7e0068ec, 0x7e005eb8, 0x33006e7c, 0xf80600a0, + 0x0149fe02, 0xbf349990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x2c30f400, 0xf40415fb, + 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x2fe0da08, 0x09a00000, 0x001434d9, 0x9899bf00, + 0x95f90599, 0x19bf0fbf, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0x00062cde, 0xf4efbf00, 0x49fefc30, + 0xa0400a01, 0xbf9fbf9f, 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, + 0x0149fefc, 0x70df9fa0, 0x49200000, 0x9ff60700, 0x03004e00, 0x4f00e9cf, 0x9ffdcfff, 0x0095f104, + 0x00e9f680, 0x00b0718f, 0xf6040049, 0x008f009f, 0xf9ce0215, 0x0195f000, 0xfe00f9f7, 0x9fbf0149, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cde00, 0xefbf0000, 0xfefc30f4, + 0x004a0149, 0xbf9fa020, 0xa6e9bf9f, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, + 0xfefc30f4, 0x9fa00149, 0xce01004f, 0x95f000f9, 0x00f9f710, 0xce07004f, 0x95f000f9, 0x00f9f710, + 0xce04004f, 0x95f000f9, 0x00f9f710, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0430f400, 0x30f400f8, 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x08009001, 0x002fe2da, 0xd909a000, + 0x00001434, 0x999899bf, 0xbf95f908, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xd90415fb, 0x0000062c, + 0x30f499bf, 0x014ffefc, 0xad7ef9a0, 0xa433005d, 0xaddf1100, 0x49deadde, 0x9ff61000, 0x7e02f800, + 0x7e005df4, 0x7e005d65, 0xfe005e30, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x00900089, 0xf00099cf, 0x0bf40194, + 0xa5008915, 0x009fcf00, 0xf5f0ef0e, 0x04fefd07, 0xfe009ff6, 0x9fbf0149, 0x00062cd9, 0xa699bf00, + 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x00900089, + 0xc4009fcf, 0x1bf401f9, 0x3ea43d09, 0xc7005dda, 0x96b024f9, 0x0b9cf002, 0x49fe9a32, 0xd99fbf01, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, + 0x0f9fa001, 0x80008905, 0x009ff601, 0x99b8060f, 0xf6000100, 0x49fe009f, 0xd99fbf01, 0x0000062c, + 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0x899fa001, + 0xcf010200, 0x008f0099, 0x9ffd1000, 0x1a1bf404, 0x820434da, 0x1c227e00, 0x01a4f000, 0xf80b1bf4, + 0x3e240a02, 0x3d005e69, 0x0149fea4, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, + 0xf400f804, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x1438da08, 0x09a00000, 0x001434d9, + 0x9899bf00, 0x95f90e99, 0x19bf0fbf, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0x00062cd9, 0xf499bf00, + 0x4ffefc30, 0xa01e0a01, 0xdc967ef9, 0x00a03300, 0xfe02f806, 0x9fbf0149, 0x00062cd9, 0xa699bf00, + 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x0100008f, + 0xf6590049, 0x49fe009f, 0xd99fbf01, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, + 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0x899fa001, 0xce009000, 0x94f00099, 0x200bf401, 0x00f1008e, + 0x0f00e9ce, 0x049ffdef, 0xb800e9f7, 0x025200ee, 0xfd00e9ce, 0xe9f7049f, 0x0149fe00, 0x2cd99fbf, + 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, + 0xa00a004a, 0x1c227e9f, 0x14aae700, 0x06aa9201, 0xf401a6b0, 0x004f1f0c, 0x00f9cf4f, 0xb3e899c7, + 0xcf130f94, 0x94f000f9, 0x0b9cf0ff, 0xb43e9a32, 0xa43d005f, 0xbf0149fe, 0x062cd99f, 0x99bf0000, + 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdffc, 0x02f90000, 0x49feffbf, 0x04999001, + 0x00899fa0, 0x99ce0090, 0x0194f000, 0x00d70bf5, 0x0062817e, 0xc4bdb4bd, 0x090ad4bd, 0x00246b7e, + 0x7ec8aa90, 0x0c001c22, 0xbda0b201, 0x0ab4bdd4, 0x246b7e0a, 0x90f00b00, 0x0bffc8aa, 0x1cb17eb4, + 0xbdc4bd00, 0xbdb4bdd4, 0x246b7ea4, 0xb2b4bd00, 0x28aab8a0, 0xb17e0005, 0x0ab8001c, 0xbd000530, + 0x1cb17eb4, 0x340ab800, 0xb4bd0005, 0x001cb17e, 0x010c0ab8, 0x7e040b00, 0xb8001cb1, 0x0001000a, + 0xb17e4a0b, 0x0ab8001c, 0x0b000104, 0x1cb17e4a, 0x080ab800, 0x420b0001, 0x001cb17e, 0xb2640090, + 0x1c227e0a, 0xfffe0900, 0x0ab2b4a9, 0x001cb17e, 0x00e2f08a, 0x001c227e, 0x8a40abc5, 0x7e00e2f0, + 0x8a001cb1, 0x7e00e2f4, 0xc5001c22, 0xf48a40ab, 0xb17e00e2, 0xf47e001c, 0xf4bd0064, 0x038a0089, + 0xfe009ff6, 0x99900149, 0xd99fbf04, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40405, + 0x062cdffc, 0x12f90000, 0x49feffbf, 0x08999001, 0x00899fa0, 0x99ce0090, 0x0194f000, 0x009e0bf5, + 0xc4bdb4bd, 0x0b0ad4bd, 0x00246b7e, 0xd4bd010c, 0xb4bda1b2, 0x6b7e0c0a, 0xa0b20024, 0x7e1c1a90, + 0xc5001c22, 0x0a9010ab, 0x1cb17e1c, 0x1c1ab800, 0x227e0050, 0xabc5001c, 0x1c0ab810, 0xb17e0050, + 0x1ab8001c, 0x7e00101c, 0xc5001c22, 0x0ab810ab, 0x7e00101c, 0xb8001cb1, 0x00401c1a, 0x001c227e, + 0xb810abc5, 0x00401c0a, 0x001cb17e, 0x301c1ab8, 0x1c227e00, 0x10abc500, 0x301c0ab8, 0x1cb17e00, + 0x1c1ab800, 0x227e0060, 0xabc5001c, 0x1c0ab810, 0xb17e0060, 0xe37e001c, 0x49fe0062, 0x08999001, + 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x15fb0042, 0x062cdf04, 0xffbf0000, 0x8efc30f4, + 0xfe009500, 0x9fa00149, 0x4f00e9cf, 0x9ffdcfff, 0x0095f104, 0x00e9f620, 0x01a1008f, 0xf000f9cf, + 0xf9f60195, 0x24004e00, 0x8f00e9cf, 0xfd060000, 0xe9f6059f, 0x5f207e00, 0x95008f00, 0x00f9cf00, + 0xf62095f0, 0xffdf00f9, 0x8900ffff, 0xf601c300, 0x3e7e009f, 0x004a0027, 0x1c227e0a, 0x6ca9c700, + 0x001438de, 0x07e93500, 0xe9350309, 0x0ce9350d, 0x3598a9c7, 0xa9c704e9, 0x05e93574, 0xaac794bd, + 0x02e9b570, 0x8906ea35, 0xcf010200, 0x008f0099, 0x9ffd1000, 0x080bf404, 0xe9350109, 0x60de7e0e, + 0x5fce7e00, 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xa6a43d99, 0x070bf4f9, 0x0042e97e, 0xf80430f4, + 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x00900089, 0xf00099ce, 0x0bf40194, 0xbd010c2f, + 0x0ab4bdd4, 0x246b7e10, 0x00008b00, 0x40aa9001, 0x001cb17e, 0x0e0ab4bd, 0xd4bd010c, 0x00246b7e, + 0xaa90010b, 0x1cb17e94, 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, + 0xf400f804, 0x2cdffc30, 0xf9000006, 0xfeffbf12, 0x99900149, 0x899fa008, 0xce009000, 0x94f00099, + 0x5d0bf401, 0xc4bdb4bd, 0x0d0ad4bd, 0x00246b7e, 0xd4bd010c, 0xb4bda1b2, 0x6b7e0e0a, 0xa0b20024, + 0x7e0c1a90, 0xc5001c22, 0x0a9010ab, 0x1cb17e0c, 0xbdb4bd00, 0x0ad4bdc4, 0x246b7e0f, 0xb2b4bd00, + 0xbd010ca1, 0x7e100ad4, 0xb200246b, 0x0c1a90a0, 0x001c227e, 0x9010abc5, 0xb17e0c0a, 0x49fe001c, + 0x08999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x15fb0042, 0xfc30f404, 0x2cd112f9, + 0xbf000006, 0x0140fe19, 0xda080090, 0x00002a4c, 0x34d909a0, 0xbf000014, 0x11999899, 0x0fbf95f9, + 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x90a4bd01, + 0xb4bd0800, 0xc4bd09a0, 0x617ed43d, 0x0fbf0043, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x2cdf0415, + 0xbf000006, 0xfc30f4ff, 0xda0149fe, 0x00002a54, 0xb43d9fa0, 0x00734d7e, 0x002a54d9, 0xb399bf00, + 0xf8060094, 0x0149fe02, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xf400f804, + 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x2fe3da08, 0x09a00000, 0x001434d9, 0x3d9fbf00, + 0x98a92094, 0x95f910f9, 0x19bf0fbf, 0xf9a6a43d, 0x7e070bf4, 0xfb0042e9, 0x0cd90415, 0xf4000014, + 0x12f9fc30, 0x00062cd1, 0xbf9abf00, 0x03034b19, 0xb80140fe, 0x0002dcaa, 0xa0080090, 0x1cb17e09, + 0x6f448a00, 0x1c227e06, 0xbf0dbf00, 0x0650d91e, 0xa4f10000, 0x408f3fff, 0x9aa00f42, 0x000654d9, + 0x3d9fa000, 0xf4dea6a4, 0xe97e070b, 0x15fb0042, 0xfc30f404, 0x2cd112f9, 0xbf000006, 0x0cb4bd19, + 0x0140fe38, 0x002a5cda, 0x08009000, 0x947e09a0, 0x34d9000b, 0xbf000014, 0x2a5cda99, 0x99bf0000, + 0x0fbf95f9, 0xa43d19bf, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0xf9fc30f4, 0x062cd222, 0x29bf0000, + 0xd4bdc4bd, 0x41feb4bd, 0x90120a01, 0x19a00c11, 0x00246b7e, 0x0484a0b8, 0x7e0ab200, 0xc5001c22, + 0x0ab201ab, 0x001cb17e, 0x08b8c08a, 0x001c227e, 0xa9fffb09, 0xb8c08ab4, 0x1cb17e08, 0xb9408a00, + 0x1c227e08, 0xffe00900, 0x408ab4a9, 0xb5f008b9, 0x1cb17e02, 0xbf1fbf00, 0xf4f9a629, 0xe97e070b, + 0x25fb0042, 0xfc30f404, 0x2cd112f9, 0xbf000006, 0xbdb4bd19, 0xfed4bdc4, 0xa4bd0140, 0xa0080090, + 0x246b7e09, 0x1434d900, 0x9fbf0000, 0x00140cd9, 0xda9aa000, 0x00002fe5, 0xf90cf998, 0xbf0fbf95, + 0xf4f9a619, 0xe97e070b, 0x15fb0042, 0xfc30f404, 0x0cd022f9, 0xd2000014, 0x0000062c, 0x29bf0abf, + 0x41feff0b, 0xb8aab801, 0x11900008, 0x7e19a00c, 0xbf001cb1, 0xb8ff0b0a, 0x0008bcaa, 0x001cb17e, + 0xff0b0abf, 0x08c0aab8, 0x1cb17e00, 0x0b0abf00, 0xc4aab8ff, 0xb17e0008, 0x0abf001c, 0x7000008b, + 0x08d4aab8, 0x1cb17e00, 0xdb0abf00, 0x01000000, 0x08a8aab8, 0x1cb17e00, 0xdb0abf00, 0x0e03ffff, + 0x08e0aab8, 0x1cb17e00, 0xdb0abf00, 0xce000000, 0x08b4aab8, 0x1cb17e00, 0xbf1fbf00, 0xf4f9a629, + 0xe97e070b, 0x25fb0042, 0xfc30f404, 0x2cd112f9, 0xbf000006, 0x0140fe19, 0xda080090, 0x00002fe6, + 0x34d909a0, 0xbf000014, 0x07999899, 0x0fbf95f9, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, + 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x08009001, 0x002fe7da, 0xd909a000, 0x00001434, 0x999899bf, + 0xbf95f909, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xde0415fb, 0x0000062c, 0x30f4e9bf, 0x014ffefc, + 0x002a94dd, 0xbff9a000, 0x09efbffc, 0x09d9a0ff, 0x01d9b50f, 0x0bf4cfa6, 0x42e97e07, 0x0430f400, + 0x30f400f8, 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x08009001, 0x002fe8da, 0xd909a000, 0x00001434, + 0x999899bf, 0xbf95f90a, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xdf0415fb, 0x0000062c, 0x30f4febf, + 0x0149fefc, 0x9ebf9ea0, 0xe9a6f9bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0xf9fc30f4, 0x062cd112, + 0x19bf0000, 0x900140fe, 0xe9da0800, 0xa000002f, 0x1434d909, 0x99bf0000, 0xf9049998, 0xbf0fbf95, + 0xf4f9a619, 0xe97e070b, 0x15fb0042, 0x062cdf04, 0xfebf0000, 0xfefc30f4, 0x9ea00149, 0xf9bf9ebf, + 0x0bf4e9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x08009001, + 0x002feada, 0xd909a000, 0x00001434, 0x999899bf, 0xbf95f903, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, + 0xdf0415fb, 0x0000062c, 0x30f4febf, 0x0149fefc, 0x9ebf9ea0, 0xe9a6f9bf, 0x7e070bf4, 0xf40042e9, + 0x00f80430, 0xf9fc30f4, 0x062cd112, 0x19bf0000, 0x900140fe, 0xd0da0800, 0xa000002a, 0x1434d909, + 0x99bf0000, 0xf90f9998, 0x2ac8df95, 0x0dbf0000, 0x9cd91ebf, 0xb500002a, 0xc0df069f, 0xb500002a, + 0xf4bd059f, 0xa0099f35, 0x019fb59f, 0x0f089f35, 0x039fb532, 0x0bf4dea6, 0x42e97e07, 0x0415fb00, + 0x00062cdf, 0xf4febf00, 0x49fefc30, 0xbf9ea001, 0xa6f9bf9e, 0x070bf4e9, 0x0042e97e, 0xf80430f4, + 0xfc30f400, 0x2cd112f9, 0xbf000006, 0x0140fe19, 0xda080090, 0x00002feb, 0x34d909a0, 0xbf000014, + 0x06999899, 0x0fbf95f9, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x2cdf0415, 0xbf000006, 0xfc30f4fe, + 0xa00149fe, 0xbf9ebf9e, 0xf4e9a6f9, 0xe97e070b, 0x30f40042, 0xf400f804, 0x12f9fc30, 0x00062cd1, + 0xfe19bf00, 0x00900140, 0x2af0da08, 0x09a00000, 0x001434d9, 0x9899bf00, 0x95f90199, 0x19bf0fbf, + 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0xff1830f5, 0x00062cdf, 0xbf82f900, 0x0149feff, 0x010899b8, + 0xa0080a00, 0x1c227e9f, 0x0aa0b200, 0x1c227e08, 0x0000d900, 0x09fd4000, 0x4d1bf504, 0x1fa99504, + 0x460199b3, 0x4ba43d04, 0x040c01dc, 0x0040417e, 0xa9b3a4b2, 0xb5034500, 0xa0b576a0, 0x72a0b571, + 0xb573a0b5, 0xa0b574a0, 0x2ae8d975, 0x4ffe0000, 0xe8ff9001, 0x002a9cda, 0x019fb500, 0x002fb67e, + 0xa033a232, 0xad330900, 0xd903112e, 0x00002a9c, 0xde079998, 0x00002ae8, 0x99989fbf, 0x029fbb01, + 0x99b3e9a0, 0x98009b00, 0x9db37549, 0xfe008b00, 0xe5b20143, 0x554714bd, 0x4350d6aa, 0x33905249, + 0x6a0b3ea8, 0x2ae8da00, 0x1bb20000, 0x0054257e, 0x1bf4a766, 0x2ae8da56, 0x1b900000, 0x54257e18, + 0xf059bf00, 0xa1bc00a3, 0x1c0f9000, 0x18f4f9a6, 0x2ae8da3a, 0x0bb20000, 0x0054607e, 0x1bf4a6a6, + 0xda0cb22a, 0x00002ae8, 0xd4bd3bb2, 0x00007ade, 0x525f7e00, 0x00a43300, 0x01399828, 0x10de9eb3, + 0x7541b50c, 0x006a153e, 0xbf011190, 0x34999259, 0x08f419a6, 0x75499893, 0x5f0099b3, 0x002d3302, + 0x4998025a, 0x009db374, 0x42980110, 0x0024b375, 0x6c773e74, 0x2ae8da00, 0x2bb20000, 0x0054257e, + 0x1bf4a866, 0x2ae8da51, 0x2b900000, 0x54607e02, 0xf4a7a600, 0x42b5401b, 0x2ae8da74, 0x2b900000, + 0x7e04bd08, 0xbd00549b, 0xffa3c414, 0x006a853e, 0xdab002bc, 0x00002ae8, 0x7e010090, 0xf000549b, + 0x1abcffa4, 0xf403a610, 0x1033e908, 0x56b50900, 0x9856a001, 0x94b37449, 0x22902400, 0x6aaf3e01, + 0x2ae8d500, 0xff480000, 0x494287b8, 0xbf64bd54, 0x03999259, 0x08f529a6, 0x4c98ff7f, 0x00cdb374, + 0x773e0278, 0x3b98006c, 0x74499804, 0xc4033098, 0x09bcff1a, 0x16fc7e00, 0xbc2bb200, 0xd4bd000a, + 0xe8da0cb2, 0xde00002a, 0x00000087, 0x00525f7e, 0x8700ad33, 0x9829bf01, 0x2c98754f, 0x4994b303, + 0xc0cfbc2a, 0x002ae8da, 0xbd5bb200, 0x008cded4, 0x5f7e0000, 0xad330052, 0x98016100, 0x49980e5f, + 0x909fbc75, 0x107649b5, 0x3f980111, 0xff19c405, 0x08f49fa6, 0x76419894, 0x3f0019b3, 0x2ae8da01, + 0x1bb20000, 0x00549b7e, 0x2f02ad33, 0x2ae8da01, 0x1b900000, 0x549b7e01, 0x08ad3300, 0x40fe011e, + 0x2ae8da01, 0x00900000, 0xb21cb27c, 0xded4bd0b, 0x00000090, 0x00525f7e, 0xff00ad33, 0x05059800, + 0xf42456b0, 0x2405050d, 0x900149fe, 0x9e987c99, 0x019f9802, 0x94bd40b2, 0x1fbc24bd, 0x7049b5f0, + 0xbc0141fe, 0x119030fe, 0x6bf63e60, 0x2ae8da00, 0x1bb20000, 0xd4bd3cb2, 0x000095de, 0x525f7e00, + 0x00a43300, 0x90193f3d, 0x22900733, 0x18092001, 0x09350419, 0x08191801, 0x18020935, 0x09350c19, + 0x10191803, 0x18040935, 0x09351419, 0x18191805, 0x90060935, 0x25a60700, 0xb5b508f4, 0x41987042, + 0x2416b070, 0xbd0b0cf4, 0x3e043d24, 0x49006c23, 0x91f71000, 0x490a0f00, 0x9ff71100, 0x3d02f800, + 0xbd24bd04, 0x6c6b3e34, 0x714f9800, 0xbb039e94, 0xf99402e9, 0x06f4b609, 0xbc029fbb, 0x4ebce0e9, + 0x0090b390, 0x984e3c27, 0x330294f0, 0x0a1d0090, 0xc4b4bd01, 0x667eff0c, 0x3bff0042, 0xa52aff95, + 0xaeb29fb2, 0xf3b2e2b2, 0xc4010010, 0x91a6ff09, 0x3eb908f4, 0xbd006c7b, 0x8934bd24, 0xce009000, + 0x94f00099, 0xe10bf501, 0x0544bd00, 0x6d2a3ef7, 0x0129c400, 0x008d0bf5, 0xd4bdc4bd, 0x080a4bb2, + 0x00246b7e, 0xa1b8a0b2, 0xb2000608, 0x1c227e1a, 0x00abe500, 0x7e1ab280, 0xb8001cb1, 0x00062001, + 0x227e1ab2, 0xabc5001c, 0x7e1ab204, 0xb8001cb1, 0x00060001, 0x227e1ab2, 0xabe5001c, 0x1ab28000, + 0x001cb17e, 0x061801b8, 0x7e1ab200, 0xff001c22, 0x1ab2b4a5, 0x001cb17e, 0x060401b8, 0x7e1ab200, + 0xe5001c22, 0xb28000ab, 0x1cb17e1a, 0x1c00b800, 0x0ab20006, 0x001c227e, 0xb204abc5, 0x1cb17e0a, + 0x01449000, 0x9d013395, 0x23ff0122, 0x641bf595, 0x6d663eff, 0x0140fe00, 0x002ae8da, 0x60009000, + 0x0bb2d4bd, 0x000098de, 0x525f7e00, 0x00ad3300, 0x42feff2a, 0x0145fe01, 0x143d03b2, 0x90982290, + 0x2a3e2455, 0x49fe006b, 0x0899b801, 0x9fbf0001, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0x00e883fb, 0xf9fc30f4, 0x062cd112, 0x19bf0000, 0xc04cb4bd, 0x0140fe00, 0x002af4da, 0x08009000, + 0x947e09a0, 0x34d9000b, 0xbf000014, 0x2bb0da99, 0x99980000, 0xbf95f90d, 0x3d19bf0f, 0xf4f9a6a4, + 0xe97e070b, 0x15fb0042, 0x061cd904, 0x30f40000, 0xd112f9fc, 0x0000062c, 0x19bf9abf, 0xdb0140fe, + 0x00002fc8, 0x0c080090, 0x7e09a001, 0xbf0072f3, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0x7e0415fb, + 0xf8007155, 0x7ed43d00, 0xf8007361, 0xe430f400, 0xa1b212f9, 0xb0b3b0b2, 0xb9986100, 0x0796b004, + 0x3d570cf4, 0x7e440ba4, 0xb20040e5, 0xb3050aaf, 0x584a00f0, 0x4afe0a09, 0x08aa9001, 0x9801afb5, + 0x93f0010f, 0x0294b600, 0xb503a9b5, 0x091802af, 0x351bb210, 0x0f9818a9, 0x04afb502, 0xb5030998, + 0x00bf05a9, 0xd97ea0a0, 0xa6b0006f, 0x0b9cf001, 0x3e019a12, 0x0a006e79, 0x1c15fb02, 0x006eb07e, + 0xa6b0943d, 0x0bacf001, 0x3c01a6f0, 0x00f8a29a, 0x00059889, 0x9f989ebf, 0x019fb501, 0xf47e9ea0, + 0xa4b3006e, 0xa43d0801, 0x09f800f8, 0x00f8ff0a, 0xb48912f9, 0x99bf000f, 0x90b3f20a, 0xd0813800, + 0x10bf000f, 0x04b3f10a, 0x747e2c00, 0x0109000a, 0xc08919a0, 0x90a0000f, 0x0010a889, 0x9fa0ff0f, + 0x000fb889, 0xbc8990a0, 0x9fa0000f, 0x003ff17e, 0x11fb10a0, 0xb48912f9, 0x04bd000f, 0x90a0b4bd, + 0x8a00a04c, 0x7e000fd4, 0xbd000b94, 0x8a140cb4, 0x7e0010b4, 0xbd000b94, 0x8a140cb4, 0x7e0010c8, + 0x89000b94, 0xa0001090, 0x108c8990, 0xa0b4bd00, 0x8a140c90, 0x7e001078, 0x8a000b94, 0xbd001094, + 0x7e140cb4, 0x89000b94, 0xa00010b0, 0x0fc08990, 0x8990a000, 0xa00010a8, 0x10748990, 0x8990a000, + 0xa0000fd0, 0x0fc48990, 0x8990a000, 0xa00010ac, 0x0fcc8990, 0x8990a000, 0xa0000fc8, 0x0fb88990, + 0x8190a000, 0x89001074, 0xa0000fbc, 0x0fd48090, 0x900ab200, 0xd07e1400, 0x01a6000b, 0x8af51bf4, + 0x7e0010b4, 0x8a000bd0, 0x7e0010c8, 0x8a000bd0, 0x7e001078, 0x8a000bd0, 0x7e001094, 0x89000bd0, + 0x8f001090, 0xa00010b4, 0x108c899f, 0x10c88f00, 0x7e9fa000, 0xfb007140, 0xb232f911, 0xb3b3b2a0, + 0x00c700a9, 0x0101a9bf, 0x060094b3, 0x0918f801, 0x07963018, 0x01050df4, 0x020f98fe, 0xf403f9c4, + 0xfb01050b, 0xf0030998, 0x0bf40394, 0xb3fb0105, 0x010600f4, 0x010998fa, 0x8e0099b3, 0x011db300, + 0x0ab20085, 0x0070ac7e, 0xa4b3a1b2, 0x0ab27801, 0x7e010298, 0x7e0070c7, 0x89000a74, 0xbf0010b0, + 0x01ff909f, 0x99bf9fa0, 0x0e0194b3, 0x000fb489, 0x733e92a0, 0xd0890070, 0x99bf000f, 0x170094b3, + 0x000fb48e, 0x0f18e9bf, 0x41991818, 0x0cf49f26, 0xb2e2a005, 0x05c77e2a, 0x0030b300, 0x7e32a006, + 0x89000a9b, 0xbf000fd0, 0x0090b399, 0x0fb48919, 0x1899bf00, 0x9918180f, 0xf49f2641, 0x0e7e0718, + 0x1ab20000, 0xfa0131fb, 0x0070a23e, 0xb205a998, 0xb3fa0aaf, 0x98120090, 0xe80a03f9, 0x00ff96b1, + 0x0a050df4, 0xf900f801, 0x98a1b212, 0xaa9803ac, 0x01109802, 0xa5a5a5db, 0x0b947ea5, 0x03199800, + 0xbc021a98, 0x0aa0a0a9, 0x1c981bbf, 0x3fd37e04, 0xb90aa000, 0x0ab500aa, 0x05199803, 0xb5100a90, + 0x19180109, 0x41093518, 0xb5021998, 0x19bf0209, 0x180e09b5, 0x09351819, 0x03199840, 0x7e0f09b5, + 0x90000be7, 0xe77e240a, 0x00b5000b, 0x181f1807, 0x00b50809, 0xfff4f00c, 0xb5029fbb, 0x11fb0909, + 0x0016a489, 0x8a0093fe, 0x3d0005ad, 0x72a27eb4, 0xf8010a00, 0x10dc8900, 0xf89aa000, 0x10dc8900, + 0xf89abf00, 0x2fd88a00, 0x7eb43d00, 0x3300734d, 0xf80800a0, 0x8a00f809, 0x3d0010e4, 0x73577eb4, + 0x00a03300, 0xf809f806, 0x00b3f000, 0xa5df94bd, 0x3ea5a5a5, 0xa000719f, 0x019990af, 0xa604aa90, + 0xf608f49b, 0x508900f8, 0x008f0034, 0x9ab20011, 0x8b01f9b5, 0xbb003650, 0xb5b602b9, 0x04fb7502, + 0x0071897e, 0x907e00f8, 0xa433006e, 0xa67e0c00, 0xef7e0071, 0x00f80072, 0xf9e430f4, 0xb2c4b262, + 0x32b0b2a5, 0x34e272d3, 0x603444c0, 0x00a9b354, 0xb9b300ae, 0x8900a900, 0xbf001100, 0x06ce149f, + 0xffff2ae4, 0xbfa00501, 0xa048f034, 0x03c9c49b, 0xf0029436, 0xfd1403f4, 0x059ffd04, 0xfd059efd, + 0xa4b6059d, 0x1bb93502, 0x0040ed7e, 0x7100a0b3, 0x354c9034, 0xf0341d09, 0x010ab564, 0x350a0275, + 0x0f351803, 0x10f0b41a, 0x900149fe, 0x9ab51c99, 0x0390b501, 0xa0049fb5, 0x0a927594, 0x3d169335, + 0x1e0f35f4, 0x9bb2f4bd, 0xfe029fb5, 0x00900140, 0x7e0ab234, 0x32006e0d, 0x00a433a1, 0x00603324, + 0x320abf10, 0x3dc43d6b, 0x11cc7ed4, 0x0149fe00, 0xbf349990, 0x3e59a099, 0x0100729d, 0xfb1a3202, + 0x12f91c65, 0x30f4b0b2, 0x7ea1b2d4, 0x3d005bf4, 0xb0ae7294, 0x1cb20101, 0x30289130, 0x91300091, + 0x0c913008, 0x30109130, 0x91301491, 0x1c913018, 0x30209130, 0xdc8a2491, 0xac8b002f, 0xd43d0001, + 0x0071d87e, 0x0600a033, 0x30f402f8, 0x3d11fb2c, 0xf900f8a4, 0x30b0b202, 0x0df420c6, 0xc402f805, + 0xa994ffca, 0x02a4b604, 0xbda0a9bc, 0xb50c2094, 0xed7e0109, 0x0ab50040, 0xf901fb02, 0xb2c1b222, + 0x32d0b2a2, 0x7e540bba, 0xb20040e5, 0xb3050aac, 0xb21c00c0, 0xb20bb21a, 0x749a7e2d, 0x01a4b300, + 0x3ea43d0a, 0xf800734b, 0xfbff0a02, 0xb2010c21, 0x731b7ecd, 0x0c00f800, 0x7ed4bd01, 0xf800731b, + 0x3242f900, 0xb2c2b2d0, 0xb2a4b2b3, 0x16fc7eca, 0x54a19000, 0x1bb20a32, 0x0040e57e, 0xa0b3050f, + 0x1bb21e00, 0x2db23cb2, 0xa57e4eb2, 0xa4b30073, 0xf43d0a01, 0x00739c3e, 0xff0f02f8, 0x41fbfa32, + 0xf80eabb5, 0xfc30f400, 0x94bd52f9, 0x90014ffe, 0xf9a018ff, 0xb5b2a2b2, 0xd4b2c3b2, 0xa9c4e0b2, + 0xf4fb0103, 0x0101051b, 0x060034b3, 0x4efef901, 0x90f4bd01, 0xe43e18ee, 0xe9bf0073, 0xbc01ff90, + 0xe9a09049, 0x08f4f3a6, 0x90e9bff4, 0x59a65499, 0x01050bf4, 0x0029b3e8, 0x09b3009e, 0xb3009900, + 0x0096011d, 0x747e02a0, 0x0ebf000a, 0xbf14e998, 0x0099b9ef, 0x1bf4f9a6, 0x3ed10109, 0x9000748b, + 0xe9a05429, 0x5f920ebf, 0xd2f4bc54, 0xe9bfff0c, 0x000fb08b, 0xb5909fbc, 0x0fbf01e9, 0xf9b594bd, + 0xbf09bf0e, 0x029fb59f, 0xf9bf0fbf, 0xb5909dbc, 0x09bf03f9, 0xbf0f93b5, 0x1094b509, 0x9cb509bf, + 0xb509bf11, 0x09bf129c, 0x9fb5bfbf, 0xa00abf13, 0x10aa90ba, 0x000bd07e, 0xaa900abf, 0x0bd07e24, + 0xbf0fbf00, 0x0099b9f9, 0x7e14f9b5, 0x3e000a9b, 0x01007495, 0xfb1ab2fa, 0x42f90455, 0xb4b2a1b2, + 0xd3b2c2b2, 0xbaa6de00, 0x7e270cf4, 0xb2000a74, 0x0b1cb22a, 0xb2d4bd54, 0x73a57e3e, 0xb3a0b200, + 0xbf0c01a4, 0x7e4bb23a, 0x7e0073a0, 0xb2000a9b, 0xdf41fb0a, 0x0000062c, 0x30f4ffbf, 0x0149fefc, + 0x008f9fa0, 0xf9ce0289, 0x0195f000, 0xfe00f9f7, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, + 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x9fa00149, 0x0089010f, 0x9ff60280, + 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, + 0x30f4ffbf, 0x80aab8fc, 0x49fe0001, 0x09a4b601, 0xabf69fa0, 0x0149fe00, 0x2cd99fbf, 0xbf000006, + 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, 0x00899fa0, + 0x9af60321, 0x0099b800, 0x9af60201, 0x0149fe00, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, + 0x30f40042, 0xf400f804, 0x2cdff030, 0xf9000006, 0xfeffbf82, 0x99900149, 0xa0a13230, 0x75097e9f, + 0xff1ac400, 0x7e0143fe, 0xfe001d41, 0x16c40142, 0xd4a5b2ff, 0x00000fac, 0x90243390, 0x00d82c22, + 0xd7000014, 0x00002fc0, 0x0077063e, 0x9433093f, 0x7ebc1e00, 0xbc6ab298, 0x417ee949, 0x1b32001d, + 0x0ab2a5b2, 0x000f817e, 0x0077063e, 0x0f010918, 0x0f3f2001, 0x013135fc, 0xf00130b5, 0x9990ff94, + 0x049ffd03, 0xbc9009bc, 0x0f3fe949, 0x7307f033, 0xf407f630, 0xf933150c, 0x33009104, 0x335505f0, + 0x00c702fd, 0x0076683e, 0x600af033, 0xf40af630, 0xfd330c0c, 0x3e00b409, 0x33007696, 0x33560bf0, + 0x00a70cfd, 0x0076bc3e, 0x1b320ab2, 0x000f817e, 0x2ab2093f, 0x2920b4bd, 0x2935943d, 0x03001802, + 0x29350409, 0x03203501, 0x000f717e, 0x0077063e, 0xfa3e8abf, 0xd4df0076, 0x3e00002f, 0xd90076c1, + 0x00001404, 0x0076b63e, 0x001424df, 0x76c13e00, 0x142cd900, 0x9abf0000, 0x0076fa3e, 0x001420df, + 0x3efabf00, 0x200076fa, 0xb2943d2f, 0x0229350a, 0x32030018, 0x012f351b, 0x7e032035, 0xbd000f81, + 0x7e2ab2b4, 0x7e000f71, 0x7e000a74, 0xf4001b95, 0x9b7e0028, 0x063e000a, 0xa0b30077, 0x3bb20c00, + 0xa77e080c, 0x1ec40000, 0x084ebcff, 0x1bf505a6, 0xd37efede, 0x49fe0074, 0x30999001, 0x2cd99fbf, + 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x85fb0042, 0xb830f410, 0x00062cdf, 0xbf62f900, 0x0149feff, + 0x0c609990, 0xfe9fa03c, 0xb4bd0140, 0xb2240090, 0x0b947e0a, 0x0000d900, 0xc0d10100, 0xb200002f, + 0xd919a09b, 0x00000fac, 0x9ba0a4bd, 0x001d6d7e, 0xa4bd1bbf, 0x8046343d, 0x753b7e00, 0x0911bf00, + 0x04067502, 0xb5010935, 0x03350101, 0x0b03350a, 0x804cb4bd, 0x0000da00, 0x947e0100, 0x804c000b, + 0xdab4bd00, 0x01000080, 0x000b947e, 0x0074d37e, 0x000080df, 0x2fc4d401, 0xfab20000, 0x6f7e4fa0, + 0x4fbf0075, 0x0010e4d2, 0x08067500, 0x90120335, 0x29b580f9, 0x1da3d902, 0x0fb50000, 0x0329b503, + 0x001e02d9, 0x0145fe00, 0x900529b5, 0x01091f55, 0x20035335, 0x13093559, 0x001100d9, 0x012fb500, + 0xdf010975, 0x00001dcb, 0x2fb50409, 0x02593504, 0x000e32df, 0xb5400900, 0x5935062f, 0x7e032001, + 0xbf000a74, 0x009b7e2a, 0xb20bb200, 0x0f717e5a, 0x0a9b7e00, 0x49070f00, 0x9ff71100, 0x1408d100, + 0x40fe0000, 0x23009001, 0x0bb21abf, 0xff0d010c, 0x0000c17e, 0xf400a433, 0x9433093f, 0xa77eee01, + 0x483e0075, 0x2cd90078, 0xbf000006, 0xfc30f499, 0x8e014ffe, 0xa001c000, 0x140cd9f9, 0x9fbf0000, + 0xc700e9cf, 0x90b34c99, 0x90b34100, 0x02f8f601, 0x0078803e, 0xc700f9cf, 0x90b34c99, 0x90b30e00, + 0x02f8f601, 0x0078943e, 0x01c2008a, 0xfe00aacf, 0x9fbf0149, 0x00062cd9, 0xf099bf00, 0xf9a601a4, + 0x3e2b0bf4, 0x900078e7, 0x008960ff, 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, + 0x3e9fb200, 0x7e007894, 0xf40042e9, 0x00f80430, 0xd9fc30f4, 0x0000062c, 0x99bf02f9, 0x90014ffe, + 0x54d004ff, 0xa0000006, 0x00a033f9, 0x8a0ba024, 0x7e066f44, 0x49001c22, 0xa9ffc000, 0x6f448ab4, + 0x00b5f106, 0x1cb17e20, 0x79573e00, 0x6f448a00, 0x1c227e06, 0x0650d900, 0x9bbf0000, 0xffc00049, + 0x448a94a9, 0xb4f1066f, 0x9bff3fff, 0x1cb17eb5, 0x42408900, 0xfe09a00f, 0x99900149, 0xd99fbf04, + 0x0000062c, 0xa43d99bf, 0x0bf4f9a6, 0x42e97e07, 0x0405fb00, 0xdffc30f4, 0x0000062c, 0xffbf12f9, + 0x900149fe, 0xa0b20899, 0x9fa0b1b2, 0x066f448a, 0x001c227e, 0x0c0104b3, 0x01b0aae7, 0x0079aa3e, + 0x04b3ff0e, 0xa4f12300, 0xa9e43fff, 0x1bf42000, 0x3ef4bd09, 0x8f0079bb, 0x94fe0000, 0xe43d03a9, + 0xa095f9ff, 0x0149fe19, 0xbf089990, 0x062cd99f, 0x99bf0000, 0xf9a6ea32, 0x7e070bf4, 0xfb0042e9, + 0x2cdf0415, 0xbf000006, 0xfc30f4ff, 0x8a0149fe, 0xa0066f40, 0x1c227e9f, 0x00a9e400, 0xffa4f120, + 0x0094b33f, 0x3ee4bd0a, 0x8e007a0f, 0xfefe0000, 0x9fbf0149, 0x00062cd9, 0xb699bf00, 0xeaff03a4, + 0xf4f9a6a5, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x00000658, 0xf9f430f4, 0xfef0b232, 0x99900149, + 0x019fb510, 0x00062cdf, 0xbdffbf00, 0x3591a014, 0x01020191, 0x99909220, 0xa0280308, 0xbd0ab29f, + 0x7e3cb2b4, 0x09000b94, 0x08023505, 0x35030135, 0x01350103, 0x04023502, 0x008f0920, 0xf9cf01c0, + 0x4c99c700, 0x610099b3, 0x0190b301, 0x3e02f8f5, 0xcf007a7e, 0x99c700f9, 0x0099b34c, 0x90b30134, + 0x02f8f501, 0x007a933e, 0xfe00008f, 0xff0394b6, 0x58df95f9, 0xb5000006, 0x008f03f9, 0xf9cf01c0, + 0x4c99c700, 0xe80099b3, 0x0190b300, 0x3e02f8f5, 0xcf007abe, 0x99c700f9, 0x0099b34c, 0x90b300b8, + 0x02f8f501, 0x007ad33e, 0xfe00008f, 0x00140cd9, 0x949dbf00, 0xf9ff03e9, 0x0658df95, 0xf9b50000, + 0xc0008f04, 0x00f9cf01, 0xb34c99c7, 0xb3660090, 0xf8f60190, 0x7b053e02, 0x00f9cf00, 0xb34c99c7, + 0xb30e0090, 0xf8f60190, 0x7b193e02, 0xc2008f00, 0x00ffcf01, 0x001400d9, 0xde9abf00, 0x00000658, + 0xc711f9c7, 0xe93519ff, 0x14ef3515, 0x0c014bfe, 0x10bb9008, 0x00006e7e, 0x900149fe, 0x9fbf1899, + 0x00062cd9, 0xa699bf00, 0xa10bf5f9, 0x7c063e00, 0x60df9000, 0x01c10089, 0xdf009ff6, 0x800000f1, + 0x010099b8, 0x009ff702, 0xb2009fcf, 0x7b193e9f, 0xc2008900, 0x0099cf01, 0xe41095b6, 0xe420009f, + 0xb33fff9e, 0xff4500fd, 0x007aec3e, 0x066f448f, 0x01c10089, 0xdf009ff6, 0x800000f1, 0x010099b8, + 0x009ff702, 0xb2009fcf, 0x7ad33e9f, 0xc2008900, 0x0099cf01, 0x20009fe4, 0x3fff94f1, 0xcc00fdb3, + 0x7aac3efe, 0x6f408f00, 0xc1008906, 0x009ff601, 0x0000f1df, 0x0099b880, 0x9ff70201, 0x009fcf00, + 0x933e9fb2, 0xe97e007a, 0x35fb0042, 0xfc30f40c, 0x2cd112f9, 0xbf000006, 0x0140fe19, 0xa0080090, + 0x7a2f7e09, 0x467f7e00, 0xbf0fbf00, 0xf4f9a619, 0xe97e070b, 0x15fb0042, 0x0658df04, 0x30f40000, + 0xb242f9f4, 0x0149fef0, 0xb5149990, 0x2cdf019f, 0xbf000006, 0xa014bdff, 0x01913591, 0x93200103, + 0x32089990, 0x029fa0a4, 0xbd0ab228, 0x7e2cb2b4, 0x09000b94, 0x01023505, 0x35040335, 0x04350801, + 0x02013509, 0x20030135, 0xc0008f09, 0x00f9cf01, 0xb34c99c7, 0x01660099, 0xf50190b3, 0x8d3e02f8, + 0xf9cf007c, 0x4c99c700, 0x390099b3, 0x0190b301, 0x3e02f8f5, 0x8f007ca2, 0xb6fe0000, 0xf9ff0394, + 0x0658df95, 0xf9b50000, 0xc0008f03, 0x00f9cf01, 0xb34c99c7, 0x00ed0099, 0xf50190b3, 0xcd3e02f8, + 0xf9cf007c, 0x4c99c700, 0xc00099b3, 0x0190b300, 0x3e02f8f5, 0x8f007ce2, 0xb6fe0000, 0xf9ff0394, + 0x0658df95, 0xf9b50000, 0x00403304, 0x140cd937, 0x9fbf0000, 0x01c0008e, 0xc700e9cf, 0x90b34c99, + 0x90b36a00, 0x02f8f601, 0x007d183e, 0xc700f9cf, 0x90b34c99, 0x90b33a00, 0x02f8f601, 0x007d2c3e, + 0x001400d9, 0xfe9abf00, 0x080c014b, 0x7e14bb90, 0xfe00006e, 0x99900149, 0xd99fbf1c, 0x0000062c, + 0xf9a699bf, 0x00ba0bf5, 0x007e1a3e, 0x01c20089, 0xde0099cf, 0x00000658, 0xc7109fc7, 0xef351899, + 0x14e93515, 0x007d403e, 0x8960ff90, 0xf601c100, 0xf1df009f, 0xb8800000, 0x02010099, 0xcf009ff7, + 0x9fb2009f, 0x007d2c3e, 0x01c20089, 0xe40099cf, 0xf120009f, 0xb33fff94, 0xff4000fd, 0x007cfb3e, + 0x066f448f, 0x01c10089, 0xdf009ff6, 0x800000f1, 0x010099b8, 0x009ff702, 0xb2009fcf, 0x7ce23e9f, + 0xc2008900, 0x0099cf01, 0x20009fe4, 0x3fff94f1, 0xc700fdb3, 0x7cbb3efe, 0x6f408f00, 0xc1008906, + 0x009ff601, 0x0000f1df, 0x0099b880, 0x9ff70201, 0x009fcf00, 0xa23e9fb2, 0xe97e007c, 0x45fb0042, + 0xfc30f40c, 0x00062cdf, 0xbf02f900, 0x0654d9ff, 0x90bf0000, 0x900149fe, 0x9fa00499, 0x0046a77e, + 0xc17e010a, 0x010a002b, 0x007c397e, 0x0cb2010a, 0xabb2d43d, 0x0043617e, 0x0600a033, 0x49fe02f8, + 0x04999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x05fb0042, 0x062cd904, 0x99bf0000, + 0xfefc30f4, 0xf9a0014f, 0x0078667e, 0x2800a433, 0xc4bdb4bd, 0x020ad43d, 0x0043617e, 0xc17ea43d, + 0xa43d002b, 0x007c397e, 0x0028487e, 0x0046cf7e, 0x007eb83e, 0x002bc17e, 0xbf0149fe, 0x062cd99f, + 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdfcc, 0x82f90000, 0x49feffbf, + 0x54999001, 0xa00148fe, 0x0144fe9f, 0xbd109992, 0x0147fef4, 0xb50146fe, 0x9fa0019f, 0x90508890, + 0x77904c44, 0x24669034, 0x001400d9, 0xfe9abf00, 0x080c014b, 0x0d44bb90, 0x00c17eff, 0x33a53200, + 0xb4e900a4, 0x033f1200, 0xe0053433, 0x34040918, 0x90334520, 0xe3010f00, 0x87019d33, 0x7f513e00, + 0x050a1800, 0x7e020b98, 0x320078f0, 0x7fbf3ea1, 0x08011800, 0x0ab22b32, 0x000f817e, 0x4320040f, + 0x35034535, 0x1033024f, 0x14330c00, 0x943e7e01, 0x0998007f, 0xb24ab202, 0x9079a07b, 0x9f980809, + 0x017fb501, 0xb5029f98, 0x9998027f, 0x0379b503, 0x007fb23e, 0x90020f98, 0x4ab20809, 0x6fa06bb2, + 0xb5019f98, 0x9f98016f, 0x026fb502, 0xb5039998, 0x14090369, 0x7e014935, 0x3e000f61, 0xb2007f08, + 0x7e2b320a, 0x0f000f81, 0x20943d05, 0x0289358f, 0x0f030018, 0x358ab204, 0x8035018f, 0x7eb4bd03, + 0x33000f71, 0xff250019, 0x083e02f8, 0x2cdf007f, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x02b0b39f, + 0x02b6b02c, 0xb30b0cf4, 0x3e1001b4, 0xb300801d, 0xb32d03b0, 0x0e3f04b0, 0x805f3e02, 0x3da9bf00, + 0x00008fe4, 0x804b3e0c, 0x8fa9bf00, 0x3df3ffff, 0x049ffde4, 0x0800008f, 0x00804b3e, 0xff8fa9bf, + 0xe43df3ff, 0x8f049ffd, 0xfd040000, 0x5d3e059f, 0xa9bf0080, 0xf3ffff8f, 0x9ffde43d, 0xfea9a004, + 0x9fbf0149, 0x00062cd9, 0x3299bf00, 0xf4f9a6ea, 0xe97e070b, 0x30f40042, 0xf400f804, 0x52f9fc30, + 0x00062cd2, 0xb229bf00, 0xb2c4b2b3, 0xfea0b2d5, 0x008a0141, 0x11900880, 0x7e19a018, 0x60001c22, + 0x10a5b60a, 0x2c8a3a60, 0x227e0880, 0x4a60001c, 0x6010a5b6, 0xbf1fbf5a, 0xf4f9a629, 0xe97e070b, + 0x55fb0042, 0xfc30f404, 0x2cd122f9, 0xbf000006, 0xfea2b219, 0x008a0140, 0x009008d5, 0x7e09a00c, + 0xbf001c22, 0x00a3f00f, 0x19bf2aa0, 0x0bf4f9a6, 0x42e97e07, 0x0425fb00, 0xf9fc30f4, 0x062cd122, + 0x19bf0000, 0x40fea2b2, 0xd5408a01, 0x0c009008, 0x227e09a0, 0x0fbf001c, 0xa000a3f0, 0xa619bf2a, + 0x070bf4f9, 0x0042e97e, 0xf40425fb, 0x22f9fc30, 0x00062cd1, 0xb219bf00, 0x0140fea2, 0x08d5808a, + 0xa00c0090, 0x1c227e09, 0xf00fbf00, 0x2aa000a3, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40425, + 0xd122f9fc, 0x0000062c, 0xa2b219bf, 0x8a0140fe, 0x9008d580, 0x09a00c00, 0x001c227e, 0x0fbf2aa0, + 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40425, 0xd122f9fc, 0x0000062c, 0xa2b219bf, 0x8a0140fe, + 0x9008d640, 0x09a00c00, 0x001c227e, 0x0fbf2aa0, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40425, + 0xd122f9fc, 0x0000062c, 0xa2b219bf, 0x8a0140fe, 0x90088854, 0x09a00c00, 0x001c227e, 0xa3f00fbf, + 0xbf2aa000, 0xf4f9a619, 0xe97e070b, 0x25fb0042, 0xfc30f404, 0x2cd142f9, 0xbf000006, 0xb2a3b219, + 0xfec4b2b2, 0xac8a0140, 0x00900884, 0x7e09a014, 0xc7001c22, 0x29a0e8a9, 0xa0f0a9c7, 0x18a5b639, + 0x0fbf4aa0, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40445, 0x062cdffc, 0x12f90000, 0x49feffbf, + 0x08999001, 0x9fa0a0b2, 0x888ab1b2, 0x227e0880, 0xa9c7001c, 0x0290b370, 0x0296b01a, 0xb30b0cf4, + 0x3e1d0194, 0xb300826f, 0xb3080390, 0xa0110494, 0xb4aac709, 0x1004a4b3, 0x0082a83e, 0x09a094bd, + 0x0082c23e, 0xf404a6b0, 0xa0b30f0c, 0xa4b31801, 0xa23e3002, 0xa0b30082, 0xa4b31808, 0xb83e2410, + 0x1aa00082, 0x0082b23e, 0xb03e0309, 0x04090082, 0xa43d19a0, 0x0082c43e, 0xb03e0509, 0x94bd0082, + 0xff0a19a0, 0x900149fe, 0x9fbf0899, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40415fb, + 0x2cdffc30, 0xf9000006, 0xfeffbf02, 0x99900149, 0xa0a0b204, 0x80888a9f, 0x1c227e08, 0x70aac700, + 0x1a02a0b3, 0xf402a6b0, 0xa4b30b0c, 0x1a3e1801, 0xa0b30083, 0xa4b30803, 0x0aa00c04, 0x283ea43d, + 0x94bd0083, 0x09a0ff0a, 0x900149fe, 0x9fbf0499, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf40405fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x408a0140, 0x009008c0, 0x7e09a008, 0xbf001c22, + 0xf019bf0f, 0xacf01fa4, 0xf4f9a60b, 0xe97e070b, 0x15fb0042, 0x062cd904, 0x99bf0000, 0xfefc30f4, + 0xf9a0014f, 0x0083437e, 0x0a00a433, 0x9d3ea4bd, 0x2c8a0083, 0x227e08d3, 0xaac7001c, 0x0149fe12, + 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xf400f804, 0x22f9fc30, 0x00062cd2, + 0xb229bf00, 0x0141fea0, 0x08d1008a, 0xa00c1190, 0x1c227e19, 0x00008900, 0xff04f1ff, 0xb4a9ffff, + 0x08d1008a, 0x7eb50bff, 0xbf001cb1, 0x3d29bf1f, 0xf4f9a6a4, 0xe97e070b, 0x25fb0042, 0xf830f404, + 0x00062cdf, 0xbf62f900, 0x0149feff, 0x201f9990, 0x9096b29c, 0xa4320199, 0xd3b29fa0, 0x0affb2c4, + 0x1c227e08, 0x0000d900, 0x04bd4000, 0xa9ff0105, 0x845f3e14, 0xa002bc00, 0x7ea45abc, 0xb30083b7, + 0xda1d0014, 0x0000449d, 0x3cb26bb2, 0x0015a97e, 0x0c00a433, 0x040a09f8, 0x0084663e, 0x26010090, + 0xd40cf440, 0x49fea43d, 0x20999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x65fb0042, + 0xf830f408, 0x00062cdf, 0xbf12f900, 0x0149feff, 0xb20c9990, 0xb29fa0b1, 0xc0408aab, 0x1cb17e08, + 0x8375da00, 0xb4bd0000, 0x312d00dc, 0x15a97e01, 0x33040e00, 0xfe1f00a0, 0x00900140, 0x7e0ab208, + 0x320082df, 0x00a433ae, 0xa609bf0d, 0x050bf491, 0x49fe3e0e, 0x0c999001, 0x2cd99fbf, 0xbf000006, + 0xa6ea3299, 0x070bf4f9, 0x0042e97e, 0xf40815fb, 0x2cdffc30, 0xf9000006, 0xfeffbf12, 0x99900149, + 0xa0b0b208, 0x8aa1b29f, 0x7e08d100, 0x89001c22, 0xf1ff0000, 0xffffff04, 0x008ab4a9, 0x0bff08d1, + 0x1cb17eb5, 0xd1708a00, 0xff14f108, 0x0000db03, 0x14b6c000, 0xb51bff10, 0x001cb17e, 0x08d1748a, + 0x001c227e, 0x02010089, 0xf700a3f0, 0x49fe009a, 0x08999001, 0x2cd99fbf, 0xbf000006, 0xa6a43d99, + 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x32f9fc30, 0x00062cd2, 0x3229bf00, 0x8ab032c1, 0xdb08d170, + 0xc2290000, 0x900143fe, 0x39a01033, 0x001cb17e, 0x08d1748a, 0x001c227e, 0xfff8a4f1, 0x290002db, + 0xb5abffa2, 0x08d1708a, 0x001cb17e, 0xf0ff04f0, 0x09940f14, 0x081f940c, 0xffff94f1, 0x08d1708a, + 0xdb0404b6, 0xa22a0000, 0xfdff04f0, 0x01fd0509, 0x050ffd05, 0x7eb50bff, 0xbf001cb1, 0x3d29bf3f, + 0xf4f9a6a4, 0xe97e070b, 0x35fb0042, 0xd430f404, 0xf0b472f9, 0x0149fe15, 0x32209990, 0x019fb5a2, + 0x3214f0b4, 0x32c532b4, 0xdf9fa0d6, 0x0000062c, 0x9990ffbf, 0xa0e3b228, 0x00e4b39f, 0x3e243d0a, + 0x0000873b, 0xbd143dff, 0xbc0e32f4, 0x94f0953f, 0x0d0bf401, 0x1bf40e26, 0x10f03205, 0xff900111, + 0x10f4b301, 0x101630ea, 0xc4120cf4, 0x09c4ff1f, 0x70f9bcff, 0xf41076b0, 0x09f80b0d, 0x3b3e0202, + 0x3ab20087, 0x0083b77e, 0x6d325c32, 0x2a324b32, 0x00856b7e, 0xb77e3ab2, 0x148a0083, 0x227e08d1, + 0x7f49001c, 0xb4a9ffff, 0x08d1148a, 0x001cb17e, 0x0b321a32, 0x640dc43d, 0x0083fd7e, 0xad33a232, + 0xb2009d00, 0x83b77e3a, 0xd1148a00, 0x1c227e08, 0x80abe500, 0xd1148a40, 0x1cb17e08, 0x321a3200, + 0x8d010c0b, 0x7e2625a0, 0x320083fd, 0x00a433a2, 0x7e3ab26e, 0x8a0083b7, 0x7e08d114, 0x49001c22, + 0x43febf7f, 0xb4a9ff01, 0x148a0132, 0x339008d1, 0x1cb17e28, 0x3e010400, 0xbc008718, 0xb77ea440, + 0x148a0083, 0x227e08d1, 0x00bc001c, 0x10a5b690, 0x109039bc, 0x9a600111, 0xa6ff10c4, 0xde1ef407, + 0x900149fe, 0x9c982099, 0xb29bbf01, 0x0ed4bd3a, 0x26d87e20, 0x00a03300, 0xfe02f806, 0x99900149, + 0xd99fbf48, 0x0000062c, 0x2a3299bf, 0x0bf4f9a6, 0x42e97e07, 0x2c75fb00, 0xd9e830f4, 0x0000062c, + 0x99bf52f9, 0x90014ffe, 0xa2b22cff, 0xc08af9a0, 0x227e08b8, 0xa5b2001c, 0x08b8c08a, 0x7e045bc5, + 0xda001cb1, 0x00008343, 0x408cb4bd, 0x0400030d, 0x0015a97e, 0xa800a933, 0xc0408a00, 0x1c227e08, + 0x0141fe00, 0x11902bb2, 0xb21aa028, 0x7fee7e1a, 0x33a03200, 0x008900ad, 0xf00f19bf, 0x4afe13b2, + 0x049ffd01, 0x900195f0, 0x19a018aa, 0x7fd1a4b2, 0x7e02faf0, 0xbf001581, 0x7e2bb23a, 0x32008481, + 0x3ea433a0, 0x7e4ab221, 0xa600159c, 0xea0df4a1, 0x2bb23abf, 0x0084817e, 0xdf3ea433, 0x063e0400, + 0xa0330088, 0x49fe3a00, 0x28999001, 0x41fe99bf, 0x0142fe01, 0x90241190, 0x19a02022, 0xdf7e2ab2, + 0xa4330082, 0x2bbf1a00, 0xee7e1ab2, 0xa433007f, 0x1bbf0e00, 0x08c0408a, 0x001cb17e, 0x08b8c08a, + 0xb17e5bb2, 0x49fe001c, 0x2c999001, 0x2cd99fbf, 0xbf000006, 0xa60a3299, 0x070bf4f9, 0x0042e97e, + 0xf41855fb, 0x2cdfdc30, 0xf9000006, 0xf4ffbf82, 0x49fef830, 0x4c999001, 0x40fe9fa0, 0x0c999201, + 0x0090f4bd, 0x0142fe2c, 0x9fb593b2, 0x909fa001, 0x02084822, 0x0406743d, 0xd90c0490, 0x00001424, + 0x3bb29abf, 0xff0d080c, 0x0000c17e, 0xef00a433, 0x3f013198, 0x0a943319, 0x011e98e6, 0x90041f18, + 0x35180419, 0x980ea001, 0x0eb5019e, 0x029e9801, 0x98020eb5, 0x0eb5039e, 0x04999803, 0x330409b5, + 0x304a02f0, 0x0cf402f6, 0x00f0330f, 0x01f4331e, 0x891b3eae, 0x04f03300, 0x04f63067, 0x333808f4, + 0x3e9c05f4, 0x18008956, 0x0b18010a, 0x020c1803, 0x7e040d18, 0x3e00856b, 0x98008962, 0x0b98010a, + 0x84ef7e02, 0x89623e00, 0x010a9800, 0x0087587e, 0x0089623e, 0x0a1849bf, 0x030b1801, 0x18020c18, + 0x0e98040d, 0x0091b002, 0xb0014998, 0xed7e0191, 0x623e0085, 0x02f80089, 0x0089673e, 0x9b3e02f8, + 0xad330088, 0x20ff3900, 0x02273528, 0x32031e18, 0x351ab25b, 0x2e350126, 0x0f817e03, 0xbd2ab200, + 0x0f717eb4, 0x889b3e00, 0x0624d900, 0x30f40000, 0xd112f9fc, 0x0000062c, 0x19bf9abf, 0x001414db, + 0xfe010c00, 0x00900140, 0x7e09a008, 0x7e0072f3, 0xbf0089fe, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, + 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, 0x2fe4da08, 0x09a00000, 0x001434d9, + 0x3d9fbf00, 0x98a92094, 0x95f902f9, 0x19bf0fbf, 0xf9a6a43d, 0x7e070bf4, 0xfb0042e9, 0x30f40415, + 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x08009001, 0x257e09a0, 0xd87e0046, 0x0fbf0044, 0xf9a619bf, + 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0x062cd9f4, 0x22f90000, 0x30f499bf, 0x0140fefc, 0x90014ffe, + 0xff901000, 0xb50ca018, 0xa1b2010d, 0xf9a0b2b2, 0x0048637e, 0xa033ff0e, 0x09981e00, 0x2a9cda01, + 0x1cb20000, 0x91b02db2, 0x980ebf00, 0x6e7e07ab, 0xae32004e, 0x900149fe, 0x9fbf1899, 0x00062cd9, + 0x3299bf00, 0xf4f9a6ea, 0xe97e070b, 0x30f40042, 0x0c25fb04, 0x00062cdf, 0xf4ffbf00, 0x49fef030, + 0x0c999001, 0x9cd99fa0, 0x9800002a, 0x49fe079f, 0xb59aa001, 0xfebf019b, 0x9001ff98, 0xbcb20899, + 0xfebbd4bd, 0xa0abb202, 0x0e9ab29f, 0x26d87e04, 0x00a03300, 0x0a02f808, 0x0149fe01, 0xbf0c9990, + 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x1030f400, 0x30f400f8, 0x062cd9c0, 0x82f90000, + 0x4ffe99bf, 0x60ff9001, 0xa00146fe, 0x9094bdf9, 0x91b02c66, 0x1591b016, 0xfe0144fe, 0x69900148, + 0x40449004, 0xb05c8890, 0x743e0991, 0x50b4008c, 0x33593f16, 0x01410b9d, 0x18015e98, 0x5990045f, + 0x344ea004, 0xe13055e0, 0x019e982b, 0x98014eb5, 0x4eb5029e, 0x039e9802, 0x98034eb5, 0x49b50499, + 0x01f03304, 0x01f6302d, 0x330c08f4, 0x010702fd, 0x008bae3e, 0x98024998, 0x69b5014a, 0x09e0b402, + 0x98016ab5, 0x947e01eb, 0x453e008a, 0x4998008c, 0x014c9802, 0x98044b98, 0x69b5034a, 0x09e0b402, + 0x98016cb5, 0x2a7e01ed, 0x453e008a, 0x4018008c, 0x02429801, 0x58014198, 0x47180743, 0x3000da0c, + 0xb4bd0000, 0x7e02004c, 0x33000b94, 0xc4310104, 0x3ce4ff7b, 0x00daffff, 0x7e000030, 0xb2000b94, + 0x0262b51b, 0xb20161b5, 0xff3ee42c, 0xdad4bdff, 0x00003000, 0x0026d87e, 0x008c493e, 0x62b52cb2, + 0x0161b502, 0x3ee41bb2, 0x00daffff, 0xbd000030, 0x26727ed4, 0xff3ce400, 0xdef4bdff, 0x00003000, + 0xa690febc, 0x0e18f4fc, 0x9726993f, 0x3e0d0bf4, 0x3f008c72, 0x00943399, 0x01ff903d, 0x0200feb3, + 0x8c493edf, 0x00a43300, 0x3d02092d, 0x358920e4, 0xb034028e, 0x0359182b, 0x5ab2040e, 0x35018e35, + 0x817e0389, 0x8ab2000f, 0x717eb4bd, 0x743e000f, 0x02f8008c, 0x001414da, 0x14757e00, 0x142cd900, + 0xadb20000, 0x4bfe9abf, 0x90080c01, 0xc17e54bb, 0xa9330000, 0xdafe9c00, 0x00001414, 0x0014b57e, + 0x008c743e, 0xdffc30f4, 0x0000062c, 0x0cd122f9, 0xbf000014, 0xfe1ebfff, 0x99900149, 0xa0a2b20c, + 0xe8eab89f, 0x227e0004, 0xa0b2001c, 0x04f01abf, 0xe0aab801, 0x227e0004, 0x0109001c, 0x070004b3, + 0x2018a9c7, 0x0149fe29, 0xbf0c9990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0425fb00, + 0xd9fc30f4, 0x0000140c, 0x99bf22f9, 0x2cd1a2b2, 0xfe000006, 0x9ab80140, 0xbf000510, 0x0c009019, + 0x227e09a0, 0x0fbf001c, 0xa0f3aac7, 0xa619bf2a, 0x070bf4f9, 0x0042e97e, 0xf40425fb, 0x0cd9fc30, + 0xf9000014, 0xb299bf42, 0x062cd1a2, 0xb3b20000, 0x05109ab8, 0xb219bf00, 0x0140fec4, 0xa0140090, + 0x1c227e09, 0x49a9c700, 0xa9c72960, 0xc7396046, 0x4a6081aa, 0x19bf0fbf, 0x0bf4f9a6, 0x42e97e07, + 0x0445fb00, 0xf9ec30f4, 0x140cd142, 0x19bf0000, 0x2cd3a4b2, 0xfe000006, 0x9ab80142, 0xbf000500, + 0x24229039, 0x900140fe, 0x29a01400, 0x001c227e, 0x0aa019bf, 0x05049ab8, 0x1c227e00, 0xb519bf00, + 0x9ab8010a, 0x7e000508, 0xbf001c22, 0x020ab519, 0x050c9ab8, 0x1c227e00, 0xb50bb200, 0x100c030a, + 0x7e7e4ab2, 0x2fbf000b, 0xf9a639bf, 0x7e070bf4, 0xfb0042e9, 0x30f41445, 0x062cdff0, 0x22f90000, + 0x00140cd0, 0xbfffbf00, 0x0149fe0e, 0xb2189990, 0xb89fa0a2, 0x0004f8ea, 0x001c227e, 0x0abfa9b2, + 0x94189e95, 0x9f94089d, 0x0895b618, 0xff0094f1, 0xfd05fefd, 0x00d905f9, 0xfd00ff00, 0xfdfd04d9, + 0x0141fe05, 0x04fcaab8, 0x0c119000, 0xa00140fe, 0x1400901f, 0x227e0fa0, 0x2eb2001c, 0xbd011ab5, + 0x00a1dbd4, 0x2e010000, 0x35f80d3c, 0xdd9002e1, 0x00a1dc01, 0xf9c40000, 0x64ffc70f, 0x3c98b93c, + 0xe935f8bf, 0x90ef2001, 0xd4b303ee, 0xa9c4de04, 0x98c93c0f, 0xc70d2935, 0xc93c64a9, 0x0c293598, + 0x900149fe, 0x9fbf1899, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf41025fb, 0x22f9fc30, + 0x00062cd2, 0xdb29bf00, 0x00002b24, 0x002b26dc, 0x2b28dd00, 0x22da0000, 0xfe00002b, 0x11900141, + 0x7e19a00c, 0xd000807b, 0x00002af4, 0xda010998, 0x00002b14, 0x1e0095f1, 0x7e0109b5, 0x98008df6, + 0x2ada0109, 0xf100002b, 0xb5010095, 0x847e0109, 0x0998008d, 0xbf1ebf01, 0x0095f12f, 0x0109b520, + 0x0bf4efa6, 0x42e97e07, 0x0425fb00, 0x002af4de, 0xfc30f400, 0xe99812f9, 0x98edbf04, 0xef9801ec, + 0xc095f002, 0xd904e9b5, 0x34000001, 0xd905d9fd, 0x4c400000, 0x0905c9fd, 0x062cd1e3, 0xf9fd0000, + 0x091bbf04, 0x04f5f0fe, 0xd904f9fd, 0x00c75000, 0x0905f9fd, 0x14e93505, 0xefb5ff09, 0x15e9b502, + 0xa00140fe, 0x080090ed, 0xa001ecb5, 0x8ebb7e0b, 0xbf0fbf00, 0xf4f9a619, 0xe97e070b, 0x15fb0042, + 0xfc30f404, 0x00140cd9, 0xbf12f900, 0x01abc499, 0x00062cd0, 0x0141fe00, 0x07c49ab8, 0x9009bf00, + 0x19a00811, 0x001cb17e, 0x09bf1fbf, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0xdffc30f4, 0x0000062c, + 0xffbf12f9, 0x900149fe, 0xa0b20899, 0xb1b29fa0, 0x0082327e, 0x90b309bf, 0x96b01c02, 0x0b0cf402, + 0x100194b3, 0x0090123e, 0x0a0390b3, 0x060490b3, 0x09a094bd, 0x90b319bf, 0x96b02003, 0x0f0cf403, + 0x160190b3, 0x100294b3, 0x0090363e, 0x0a0490b3, 0x060590b3, 0x19a094bd, 0x900149fe, 0x9fbf0899, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x2cdffc30, 0xf9000006, 0xfeffbf22, + 0x99900149, 0xa0a1b20c, 0xb2b0b29f, 0x00d433c2, 0x00a0b332, 0x60e08a0e, 0x1c227e06, 0xb31aa000, + 0x8a0e0000, 0x7e0660e4, 0xa0001c22, 0x0020b30a, 0x60e88a48, 0x1c227e06, 0x3e2aa000, 0x0a0090d5, + 0x01d43302, 0x00b0b336, 0x8abbbf0e, 0x7e0660e4, 0xb3001cb1, 0xbf0e0020, 0x60e88a2b, 0x1cb17e06, + 0x0010b300, 0x7e1ab214, 0xbf00a91d, 0x60e08a1b, 0x1cb17e06, 0xfea43d00, 0x99900149, 0xd99fbf0c, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x2cdf0425, 0xbf000006, 0xf430f4ff, 0x900149fe, + 0x9fa00899, 0x01c0008f, 0xc700f9cf, 0x99b34c99, 0xb3014c00, 0xf8f50190, 0x91083e02, 0x00f9cf00, + 0xb34c99c7, 0x01250099, 0xf50190b3, 0x1d3e02f8, 0xffd90091, 0xff7fffff, 0x008fc4f9, 0xf9cf01c0, + 0x4c99c700, 0xdb0099b3, 0x0190b300, 0x3e02f8f5, 0xcf00913e, 0x99c700f9, 0x0090b34c, 0x0190b30e, + 0x3e02f8f6, 0x8f009153, 0xcf01c000, 0x99c700f9, 0x0099b34c, 0x90b3008d, 0x02f8f501, 0x00916b3e, + 0xc700f9cf, 0xd0b34c9d, 0xd0b35e00, 0x02f8f601, 0x0091803e, 0x3501f910, 0xfd3348e9, 0xd900e100, + 0x00000684, 0xb5014ffe, 0xd4d901f9, 0xbf00002f, 0x09fda09a, 0x09f92048, 0x0684de09, 0xe9200000, + 0xecb50409, 0x01fd3502, 0x35014bfe, 0xed3503ed, 0x01e93502, 0x6e7e080c, 0x7b3e0000, 0x09f80092, + 0x00927b3e, 0x01c20089, 0xde0099cf, 0x00002af4, 0x0948ef18, 0xf4f926ff, 0x7b3e9d1b, 0xe08f0092, + 0x00890660, 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, 0x3e9fb200, 0x8f009180, + 0x890660e0, 0xf601c100, 0x99b8009f, 0xf6000100, 0xf2df009c, 0xb8800000, 0x02020099, 0xcf009ff6, + 0x9fb2009f, 0x0091533e, 0x01c20089, 0x95009fcf, 0x94b31ff9, 0x323e8c01, 0xe08f0091, 0x00890660, + 0x9ff601c1, 0x00f1df00, 0x99b88000, 0xf7020100, 0x9fcf009f, 0x3e9fb200, 0xfe00911d, 0x99900149, + 0xd99fbf08, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80c30, 0xdff430f4, 0x0000062c, + 0xffbf22f9, 0x900149fe, 0xc1b21499, 0xf4d99fa0, 0x9800002a, 0xd2b2019f, 0x000000d9, 0xfd080e40, + 0x0bf404f9, 0x00a03375, 0x33030e10, 0x326c01a4, 0x92d73ea0, 0x0e043d00, 0x00b43304, 0xa67c7e5e, + 0x00a03300, 0x00804b24, 0x07000433, 0xfe00884b, 0x080c014a, 0x7e0caa90, 0x0e00a79f, 0x00a43306, + 0x93513e3a, 0x0680d900, 0x99bf0000, 0x90b3080e, 0x4ffe2900, 0x0cff9001, 0x10000033, 0xa0209e98, + 0x219998fe, 0x0093303e, 0xa0229e98, 0x239998fe, 0x3e01f9b5, 0xfe009351, 0x99900149, 0xd99fbf14, + 0x0000062c, 0xea3299bf, 0x0bf4f9a6, 0x93663e20, 0x0149fe00, 0xbf0c9990, 0xa01f0e9f, 0x0199981f, + 0x373e29a0, 0xe97e0093, 0x25fb0042, 0x062cde0c, 0xe9bf0000, 0xfefc30f4, 0xf9a0014f, 0xa9a02409, + 0xe9bfffbf, 0xf9a61f0a, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0xdfe430f4, 0x0000062c, 0xffbf82f9, + 0x900149fe, 0xf4d03c99, 0xa000002a, 0x2d09989f, 0x98280298, 0x0f982b01, 0x0d91b029, 0x980bc1b0, + 0xd1b02a09, 0x09e1b00a, 0xbf2e0098, 0x6492ffcc, 0xb0741fff, 0xc1b00e01, 0x0da0330c, 0x0ea4330c, + 0x93f53e10, 0xb264b200, 0x93fb3e75, 0xbd44bd00, 0x93fb3e54, 0x0d40b400, 0x090e50b4, 0xf5b926ff, + 0x9500851b, 0x4c9d015d, 0x5555d901, 0x33d05555, 0xff333333, 0xd9ffe4c9, 0xe24ebcf4, 0xfff35fbc, + 0xf994c4e0, 0x02e5b61e, 0xffe59eff, 0x0fd2d4f0, 0x950f0f0f, 0xe0ff02f1, 0xb410ffa4, 0xbca0acbc, + 0xb994b1bd, 0x04ae951c, 0xff04bf95, 0xeabce59e, 0xf1fbbce0, 0xdc04e2fd, 0x01010101, 0xb2b4f2ff, + 0x7eeab2cd, 0xb600415b, 0xb0b318b5, 0x090e0b24, 0x3300bdb3, 0x00b6b001, 0xf00b3cf0, 0x84bd0136, + 0x00949a3e, 0x0cffbac4, 0x7e240b40, 0x0e009fe5, 0x00a93304, 0x58b20112, 0x7d9543b2, 0x016c9d01, + 0x333333d1, 0x5555d233, 0xc2ff5555, 0xf4d2ffe4, 0xbce26ebc, 0xe1fff37f, 0x1ef994c4, 0xff02e5b6, + 0xf1ffe59e, 0x0f0fd4d4, 0xf0950f0f, 0xa4e1ff02, 0xbcb401ff, 0xbdbca0ac, 0x1cb994b1, 0x9504ae95, + 0x9eff04bf, 0xe0eabce5, 0xfdf1fbbc, 0x01dc04e4, 0xff010101, 0xcdb2b4f4, 0x5b7eeab2, 0xb5950041, + 0x2456b018, 0x009d0cf5, 0xb40de0b4, 0xfd950ef0, 0x01ec9d01, 0xffe4c2ff, 0xc0b4f4d2, 0x0ed0b40d, + 0xbce2cebc, 0xe1fff3df, 0x1ef994c4, 0xff02e5b6, 0xf1ffe59e, 0x02f095d4, 0xffa4e1ff, 0xacbcb401, + 0xb1bdbca0, 0x951cb994, 0xbf9504ae, 0xe59eff04, 0xbce0eabc, 0xe4fdf1fb, 0x0101dc04, 0xf4ff0101, + 0xb2cdb2b4, 0x415b7eea, 0x18b99500, 0xf42496b0, 0xd0b4320c, 0x0fffde0c, 0x004fff00, 0x0c94b6f0, + 0xfff4dfff, 0xfefdf55f, 0x0be0b404, 0xa005f9fd, 0x0af0b4ef, 0xf3a01f0e, 0xa00990b4, 0x95a33e98, + 0xfe060e00, 0x99900149, 0xd99fbf3c, 0x0000062c, 0xea3299bf, 0x0bf4f9a6, 0x42e97e07, 0x1c85fb00, + 0xdffc30f4, 0x0000062c, 0xffbf22f9, 0x010000d9, 0xffa4f001, 0xb6ffb4f0, 0xb4b614a4, 0xa0a9bc0f, + 0x900149fe, 0xabbc0c99, 0xb29fa020, 0xb8d1b2c0, 0x0020242a, 0x001c227e, 0xaac70309, 0x00a0b364, + 0xb301090c, 0x3d0604a0, 0xb8092094, 0x0030142a, 0x001c227e, 0xaac70309, 0x00a0b364, 0xb301090c, + 0x3d0604a0, 0xfe192094, 0x99900149, 0xd99fbf0c, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, + 0x30f40425, 0x062cdff0, 0x32f90000, 0x30f4ffbf, 0x0149fefc, 0xa0209990, 0x0141fe9f, 0x119094bd, + 0x014efe1c, 0xee9019a0, 0xb2e9a018, 0x09d3b2c2, 0xf4b926ff, 0x40fe201b, 0x90240b01, 0x0cb21400, + 0x00a3d97e, 0xb3ffa4f0, 0xbf3c1fa4, 0x3e10a000, 0xb00096ad, 0xa4f000e1, 0xffb4f0ff, 0x050c1eb2, + 0x527e240d, 0xa4f000a4, 0x1fa4b3ff, 0x0149fe1a, 0xbf1c9990, 0xa01f0a99, 0x0149fe29, 0xbf189990, + 0xfe39a099, 0x99900149, 0xd99fbf20, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x35fb0430, + 0xf030f410, 0x00062cdf, 0xbf32f900, 0xfc30f4ff, 0x900149fe, 0x9fa02099, 0xbd0141fe, 0x18119094, + 0xa0014efe, 0x14ee9019, 0xc2b2e9a0, 0xff09d3b2, 0x1bf4b926, 0x0140fe1d, 0x0090240b, 0x7e0cb21c, + 0x3300a3d9, 0xbf391fa4, 0x3e10a000, 0xb0009746, 0xa4f000e1, 0xffb4f0ff, 0x080c1eb2, 0x527e240d, + 0xa43300a4, 0x49fe1a1f, 0x18999001, 0x1f0a99bf, 0x49fe29a0, 0x14999001, 0x39a099bf, 0x900149fe, + 0x9fbf2099, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xfb0430f4, 0x30f41035, 0x062cdff4, + 0x22f90000, 0x30f4ffbf, 0x0149fefc, 0xa0189990, 0x014efe9f, 0xee9094bd, 0x0140fe14, 0x0090e9a0, + 0xb209a010, 0x09d2b2c1, 0xf4b926ff, 0xecb20f1b, 0xd97e240b, 0xcb3e00a3, 0x01b00097, 0xffa4f000, + 0x0cffb4f0, 0x7e240d01, 0x3300a452, 0xfe201fa4, 0x99900149, 0xfe99bf14, 0xff90014f, 0xe7ffbf10, + 0xa0038199, 0x81ffe719, 0xfe2fa003, 0x99900149, 0xd99fbf18, 0x0000062c, 0xf9a699bf, 0x7e070bf4, + 0xf40042e9, 0x25fb0430, 0xf430f40c, 0x00062cdf, 0xbf22f900, 0xfc30f4ff, 0x900149fe, 0x9fa01899, + 0xbd014efe, 0x14ee9094, 0xa00140fe, 0x100090e9, 0xc1b209a0, 0xff09d2b2, 0x1bf4b926, 0x0becb20f, + 0xa3d97e24, 0x985a3e00, 0x0001b000, 0xf0ffa4f0, 0x200cffb4, 0x527e240d, 0xa43300a4, 0x49fe181f, + 0x14999001, 0x19a099bf, 0x900149fe, 0x99bf1099, 0x49fe29a0, 0x18999001, 0x2cd99fbf, 0xbf000006, + 0xf4f9a699, 0xe97e070b, 0x30f40042, 0x0c25fb04, 0xdff430f4, 0x0000062c, 0xffbf52f9, 0x900149fe, + 0xb0322099, 0xc4b29fa0, 0xa6b00405, 0xbb0cf523, 0xfe34bd00, 0x41fe0142, 0x18229001, 0xa01c1190, + 0xb213a023, 0x7e2cb21b, 0x32009fa6, 0x1fad33a5, 0x1abf009b, 0xd37e2bbf, 0xa43300a0, 0x43a00a00, + 0x0099683e, 0x19bf2fbf, 0xb60ff4b6, 0xf9bc1494, 0x0000df90, 0x9fbc0101, 0x04003390, 0x0406302e, + 0x330b0cf4, 0x3e650304, 0x33009917, 0x332d0500, 0x3e590604, 0xb8009948, 0x0022889a, 0x001c227e, + 0x3e10a5b6, 0x90009932, 0x227e8c9a, 0xa4f1001c, 0x4aa003ff, 0x0099683e, 0x32889ab8, 0x1c227e00, + 0x00a3f000, 0x0099323e, 0x328c9ab8, 0x1c227e00, 0x18ae9500, 0xc7e8a9c7, 0xa4f0f0af, 0x909fbcff, + 0xbc909abc, 0x49a0909e, 0x900149fe, 0x9fbf2099, 0x00062cd9, 0x3299bf00, 0xf4f9a65a, 0xe97e070b, + 0x55fb0042, 0xf430f40c, 0x00062cdf, 0xbf42f900, 0x0149feff, 0xfe1c9990, 0x9fa0014e, 0xbd18ee90, + 0x014ffe94, 0xff90e9a0, 0x32f9a014, 0x09c4b2a1, 0xf4b926ff, 0xe3b22b1b, 0x04bdf2b2, 0x1b320ab2, + 0x907e3cb2, 0xa4330098, 0x2fbf311f, 0x009039bf, 0x909fbc01, 0x04b329a0, 0xed3ee624, 0xbac40099, + 0x32fcb2ff, 0x98907e1b, 0x1fa43300, 0x0149fe0e, 0xbf149990, 0xfe49a099, 0x99900149, 0xd99fbf1c, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40c45, 0x062cdfb8, 0x82f90000, 0x49feffbf, + 0x68999001, 0x9fa00105, 0x3011c1b0, 0xd1b049b1, 0x0fe1b010, 0xa033c4bf, 0xa630180a, 0x0f08f40a, + 0xa0330205, 0x03050c0b, 0x060ca033, 0x94bd543d, 0xbd014ffe, 0x60ff9064, 0xf9b504bd, 0xbdf9a001, + 0x08ff9274, 0xf1b2f920, 0x1bb20ab2, 0x00a2107e, 0x801fad33, 0x33193f04, 0x0a1c0090, 0xb2b4bd01, + 0x42667e0c, 0x957bff00, 0xb2a56aff, 0xb2aeb29f, 0x90f7b2e6, 0x04b30100, 0x083ed224, 0x9034009f, + 0x30040a49, 0x0cf52396, 0xc034044a, 0xbd010a49, 0xffc4f0b4, 0x0042667e, 0xb1b2a0b2, 0xdf0849c5, + 0xff000008, 0xb0f49fff, 0x7c7e13f1, 0xa43300a6, 0x46fe4d00, 0x0a24bd01, 0x3d34bd1f, 0x60669044, + 0x009b0c3e, 0xf40109c4, 0x4a321c0b, 0x6cb25b32, 0x00a5cc7e, 0x0f1fa433, 0x6f986ebf, 0x202ebc01, + 0x10313fbc, 0x11950144, 0x01009d01, 0xf49501ff, 0xa933d51b, 0x3e03c71f, 0x34009ef0, 0xff0949b0, + 0x0bf4b926, 0x2af4df78, 0x5ec40000, 0x03e994ff, 0x98909fbc, 0x9998189f, 0x04f1fd17, 0xa60490fd, + 0x081bf490, 0x0bf4f1a6, 0x9402ff54, 0xa6f413ff, 0x081bf490, 0x0bf4f1a6, 0x2af4d944, 0xef940000, + 0xf0f9bc03, 0xb91ff998, 0xfe90000c, 0x001bb97c, 0xb5049cfd, 0xe9981ff9, 0x5cfd9001, 0x9bfd090a, + 0x01e9b504, 0xfd17f998, 0xf9b5049c, 0x01d99817, 0xb5049bfd, 0xf03e01d9, 0x54f0009e, 0x2af4dbff, + 0x59b20000, 0xbc0394b6, 0x9f98909b, 0x1f999820, 0xfd1451b0, 0x90fd04f1, 0xf490a604, 0xf1a6081b, + 0x0e100bf4, 0x3044bd01, 0x54bd4ae1, 0x009c1a3e, 0x1db244bd, 0x54bd0cb2, 0x0f3eb43d, 0xc9c4009c, + 0x250bf401, 0xde14f0b4, 0x000006cc, 0xb6ffb9c4, 0x9fbc0394, 0x0394b690, 0xbf909ebc, 0x019f989e, + 0xbc404ebc, 0xbb10515f, 0x01dd9501, 0xff01cc9d, 0x1bf495cd, 0x30f43dcc, 0x90b44af1, 0x2af4db14, + 0x94b60000, 0x909bbc03, 0x98189f98, 0xf1fd1799, 0x0490fd04, 0x1bf490a6, 0xf5f1a609, 0xbd01200b, + 0xb0e43d94, 0x91b01791, 0x4be13016, 0x009d1b3e, 0xf50169c4, 0x3400b90b, 0xc0344b90, 0x06acdf4b, + 0x010a0000, 0xbdff94f0, 0x069894b4, 0xbcffc4f0, 0x667e808f, 0x50d20042, 0xb900002b, 0xb1b000b9, + 0x0ea1b00d, 0xb000abb9, 0xb1b00b91, 0x3d8db20c, 0x4ba03434, 0x3209d1b0, 0x014cfe3b, 0x7e58cc90, + 0xbf00a5cc, 0x012c9829, 0x3309d0b4, 0xbd351fa0, 0xb5dea0e4, 0xb0b401de, 0x082f980c, 0xfd092e98, + 0x29a0049b, 0xff0bb0b4, 0x29b594cb, 0x0c90b401, 0xb504ebfd, 0xf9fd092e, 0x082fb504, 0x009cfc3e, + 0xb416e0b4, 0x8fb517f0, 0xb48ea001, 0x9ffd0ef0, 0xb429a005, 0xcbff0db0, 0x0129b595, 0x90013310, + 0x229008dd, 0x08889008, 0x89043433, 0x954be034, 0x669d0177, 0x01ee1001, 0xff4be130, 0x1bf59567, + 0xf034ff32, 0x26ff0949, 0x290bf4f9, 0xdb1490b4, 0x00002af4, 0xbc0394b6, 0x9f98909b, 0x17999818, + 0xfd04f1fd, 0x90a60490, 0x01d71bf5, 0x1bf5f1a6, 0xe0b401d1, 0x04e5f013, 0xfe13e1b0, 0x99900149, + 0x9896bf60, 0x90b40197, 0x2af4df14, 0xe43d0000, 0x94b624bd, 0xbc34bd03, 0xe130909f, 0x7c9b9057, + 0xb05c9890, 0x0e3e0ab1, 0x09c4009e, 0x720bf401, 0x3414f0b4, 0xacde5790, 0xdb000006, 0x000006cc, + 0x94f0010a, 0x0394b6ff, 0x94909fbc, 0x94b6039f, 0xf0fbbc03, 0xbf909ebc, 0x01979896, 0xa001f7b5, + 0x57c034f6, 0x26bcb4bd, 0x3137bc20, 0x7effc4f0, 0x98004266, 0x8ebf018f, 0xfd00b9b9, 0xa9b904f9, + 0x018fb500, 0xa004e9fd, 0x0af0b48e, 0xff98f9bf, 0x059afd01, 0xb405fbfd, 0xb9a00ab0, 0x3401bfb5, + 0x119557e0, 0x01009d01, 0x3001ee10, 0x01ff57e1, 0x791bf595, 0x0149feff, 0xb5609990, 0x96a00197, + 0x334af034, 0x00b600fd, 0xbcc224bc, 0xd4b3d335, 0xfe891300, 0xc9a60fff, 0x3d090cf4, 0x9e9d3ef4, + 0x16df9400, 0x950ada95, 0xf9ff0ac9, 0xb2afb2b5, 0xb2ecb2be, 0x00d4b3fd, 0x89010f0f, 0xa60ffffe, + 0x3c0df4c9, 0x940abf95, 0xab9516a9, 0xa59fff0a, 0xaeb2bfb2, 0xfdb2ecb2, 0x0f00d4b3, 0xfe89020f, + 0xc9a60fff, 0x95190df4, 0xbd950aaf, 0x16b9940a, 0xb2059ffd, 0xb29eb2df, 0x0ffdb2ec, 0x13b0b403, + 0x0903f4f0, 0x94b9fffc, 0xb3e5f9ff, 0x890d00d4, 0xa60fffff, 0x0a0df4c9, 0x3e08e9c5, 0xff009edf, + 0x94b694c9, 0x000fdf04, 0xefffff00, 0x05f9fdf4, 0xf9fff709, 0x9edf3e94, 0x1390b400, 0xb40895f0, + 0x1f0a10e0, 0xf0b4e2a0, 0xb4f3a00f, 0xb9a011b0, 0x900149fe, 0x9fbf6899, 0x00062cd9, 0xa699bf00, + 0x280bf4f9, 0x009f253e, 0xb249e034, 0x0973b262, 0xf5e926ff, 0xb2fb8b1b, 0x3e71b260, 0x0a009abc, + 0x9ef03e09, 0x42e97e00, 0x4885fb00, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0x8f9fa001, 0xcf01c000, + 0x99c700f9, 0x0090b34c, 0x0190b322, 0x3e02f8f6, 0xcf009f3f, 0x99c700f9, 0x0090b34c, 0x0190b333, + 0x3e02f8f6, 0x89009f53, 0xf601c100, 0x99b8009a, 0xf6000100, 0xf2df009b, 0xb8800000, 0x02020099, + 0xcf009ff6, 0x9fb2009f, 0x009f533e, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x95a9b29f, 0x060a02af, 0xf408f6b0, + 0xbfa00c0c, 0x0a0394f0, 0xfec9a01f, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cdf00, 0xfebf0000, 0xfefc30f4, 0xbb920149, 0xbf9ea001, 0xbcf9bf9e, 0xbcffb0bc, + 0xffb4f0bc, 0xacf0aba6, 0xf4e9a608, 0xe97e070b, 0x30f40042, 0xf400f804, 0x2cdffc30, 0xf9000006, + 0xfeffbf02, 0x99900149, 0xa0c0b204, 0xb3dcb29f, 0xb03902a0, 0x0cf402a6, 0x01a4b30b, 0xa04f3e10, + 0x07a0b300, 0x08a0b32d, 0x3e060a3b, 0x0a00a0b8, 0x0104b31f, 0xc4dfbf67, 0x1f0a1fb9, 0xfd9409bc, + 0xdfa005f9, 0x00a0b83e, 0xb63ed0a0, 0xb9c400a0, 0xb6ddbf07, 0x9ec40294, 0x0399901f, 0x00a0943e, + 0xbfcdddbf, 0x90ffbc05, 0xbc909fbc, 0x9ec49099, 0x0599901f, 0xf00099b9, 0x9ebc1f94, 0xbbff0f90, + 0xf0ff05f9, 0x04febb94, 0xb9049ebb, 0xfdfd00ff, 0x05f9fd04, 0x1f0acfa0, 0x900149fe, 0x9fbf0499, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40405fb, 0x2cd9fc30, 0xf9000006, 0xfe99bf12, + 0xff90014f, 0xb2b1b208, 0x7ef9a0a0, 0x330046f7, 0x3d0a00a0, 0xa1273ea4, 0x0f199400, 0x017000df, + 0x1404b601, 0xbc000fbc, 0x0ab80009, 0x7e000600, 0xb2001c22, 0x800ab8a1, 0x227e0003, 0xa1fd001c, + 0x01a6f005, 0xfe01a4f0, 0x99900149, 0xd99fbf08, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, + 0x30f40415, 0x062cdff4, 0x32f90000, 0x49feffbf, 0x18999001, 0x9fa0b2b2, 0xa6b00603, 0x960cf523, + 0x0141fe00, 0x900140fe, 0x00901411, 0xb21bb210, 0x9fa67e0c, 0x33a33200, 0xbf7c1fa4, 0x7e0bbf1a, + 0x3300a0d3, 0xbf5a00a0, 0xb60abf19, 0xa4b61494, 0xa0a9bc0f, 0x010000d9, 0xa0a9bc01, 0x001c227e, + 0xb3ffa4f0, 0xb04c04a0, 0x0cf404a6, 0x02a6b013, 0xb0390cf4, 0x18f401a6, 0xa1dd3e2d, 0x08a0b300, + 0x08a6b02c, 0xb30b0cf4, 0x3e1005a4, 0xb300a1dd, 0xb31a0aa0, 0x03100ba0, 0xa1f33e06, 0x3e94bd00, + 0x0900a1f1, 0xa1f13e01, 0x3e020900, 0x0900a1f1, 0xfe29a003, 0x99900149, 0xd99fbf18, 0x0000062c, + 0x3a3299bf, 0x0bf4f9a6, 0x42e97e07, 0x0c35fb00, 0xd9f830f4, 0x0000062c, 0x99bf12f9, 0x90014ffe, + 0xb1b20cff, 0xff09f9a0, 0x900140fe, 0x09a00800, 0x427e0bb2, 0xa43300a1, 0x09bf0e1f, 0xf00296b0, + 0x19200b9c, 0x900149fe, 0x9fbf0c99, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40815fb, + 0x2cdfe830, 0xf9000006, 0xfeffbf42, 0x99900149, 0xa0b0b228, 0x33c3b29f, 0x304502a0, 0x0cf402a6, + 0x01ad330c, 0x983e00a6, 0xa93300a2, 0x3300b407, 0x009808ad, 0x00a34c3e, 0x40febab2, 0x27009001, + 0x107e0bb2, 0xa23200a2, 0x141fad33, 0x30093f01, 0x9cf00096, 0x0196f00b, 0x00a3ba3e, 0xb6300402, + 0xfb0cf523, 0xffbac400, 0xfe0144fe, 0x44900141, 0x1c119020, 0x1cb24bb2, 0x009fa67e, 0xad33a232, + 0xc400de1f, 0x40feff0a, 0x27009001, 0x107e0bb2, 0xa23200a2, 0xc81fad33, 0x33093f00, 0xbd0a0094, + 0xa3ba3e94, 0xbf49bf00, 0x1494b61a, 0xbc0fa4b6, 0x0cd9a0a9, 0xbc010176, 0x227ea0a9, 0xa5b6001c, + 0x07a63008, 0x02090df4, 0xa3bc3e06, 0xffafc400, 0x000048d9, 0xf89fbc00, 0xbc3e3fa0, 0xbab200a3, + 0x427ecbb2, 0xa23200a1, 0x00a3bc3e, 0xb6300402, 0x6b0cf423, 0xfeffbac4, 0x44fe0141, 0x18119001, + 0xb2144490, 0x7e4cb21b, 0x32009fa6, 0x1fa433a2, 0xff0ac44f, 0x900140fe, 0x0bb22000, 0x00a2107e, + 0xa433a232, 0x093f3a1f, 0x0a009433, 0xba3e3f09, 0x1a3f00a3, 0x41fe4b3f, 0x0140fe01, 0x90271190, + 0x1cb21c00, 0xc07e0db2, 0x093f0095, 0x94f01f3f, 0x07f4f007, 0xfd0394b6, 0x39a0059f, 0x900149fe, + 0x9fbf2899, 0x00062cd9, 0x3299bf00, 0xf4f9a62a, 0xe97e070b, 0x45fb0042, 0xf830f418, 0x00062cdf, + 0xbf62f900, 0x0149feff, 0x9990ff01, 0x0142fe20, 0xb3b2a532, 0x9fa0c4b2, 0x229004bd, 0x3e16b21c, + 0x3200a42c, 0xb20bb25a, 0xa25f7e2c, 0x1fa43300, 0xa629bf2a, 0x091bf416, 0x293e91b2, 0x19a600a4, + 0x0a090bf4, 0xa4373e09, 0x01009000, 0x08f403a6, 0x0a22bfd5, 0xfe42a01f, 0x99900149, 0xd99fbf20, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40865, 0x062cdff8, 0x82f90000, 0x49feffbf, + 0x28999001, 0xb0b2c2b2, 0xcbb2d5b2, 0xe7b29fa0, 0x00bca4b2, 0x16fc7ea0, 0xb25bb200, 0xc022bca3, + 0x32bc0ab2, 0x9fe57e60, 0xb4040100, 0xa0330c80, 0x30b27500, 0x900143fe, 0xbd3e2433, 0x4a3200a4, + 0x3cb20bb2, 0x00a25f7e, 0xa433a132, 0x3cbf591f, 0x4ab20bb2, 0xb2010090, 0xa0177e7d, 0xf405a600, + 0x06a64218, 0x3eda08f4, 0x3200a4f3, 0xb20bb24a, 0xa25f7e3c, 0x33a13200, 0xbf2c1fa4, 0xb20bb23c, + 0x0100904a, 0x177e8db2, 0x05a600a0, 0x3e1008f4, 0xfe00a503, 0x62bc0143, 0x24339020, 0x08f402a6, + 0xfe1f01cd, 0x99900149, 0xd99fbf28, 0x0000062c, 0x1a3299bf, 0x0bf4f9a6, 0x42e97e07, 0x0885fb00, + 0xdff430f4, 0x0000062c, 0xffbf62f9, 0x41fe94bd, 0x0140fe01, 0x90201190, 0x19a01c00, 0x49fe09a0, + 0x24999001, 0xc6b2b2b2, 0x9fa0d5b2, 0x0cb21bb2, 0x009fa67e, 0xa433a332, 0x1abf591f, 0xd37e0bbf, + 0xa43300a0, 0x09030a00, 0x00a5af3e, 0x00bf19bf, 0xb61494b6, 0x09bc0f04, 0x010e7e00, 0x5000d900, + 0x09bc0101, 0x4002bc00, 0xb21060bc, 0x1c227e1a, 0xb2a0b200, 0x1c227e4a, 0xb2a2b200, 0x1c227e1a, + 0xf40aa600, 0x087ee81b, 0x50b50001, 0xfe52a001, 0x99900149, 0xd99fbf24, 0x0000062c, 0x3a3299bf, + 0x0bf4f9a6, 0x42e97e07, 0x0c65fb00, 0xdff430f4, 0x0000062c, 0xffbf02f9, 0x900149fe, 0xc0b20c99, + 0xaf329fa0, 0xf630040a, 0x780cf423, 0x4dfe94bd, 0x04dd9001, 0xa001d9b5, 0x01b033d9, 0x01b63025, + 0x331108f4, 0x0a2802b0, 0x03b4331f, 0xa6383e3c, 0xfffac400, 0x4c154c4b, 0x413e1550, 0xfac400a6, + 0x1d4c4bff, 0x3e1d504c, 0xc400a641, 0xf44bfffa, 0x15f84c15, 0x00a6413e, 0x4bfffac4, 0xf84c1df4, + 0xa5207e1d, 0x0149fe00, 0xbf049990, 0x0199989e, 0x9506e5b6, 0x94b6069f, 0x010fb51a, 0xa005e9fd, + 0x0149fe0e, 0xbf0c9990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0c05fb00, 0x00062cde, + 0xf4e9bf00, 0x4ffefc30, 0xd9f9a001, 0x00002af4, 0x9a18ffbf, 0xa6e9bf15, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x080e0149, 0xa0339fa0, 0x1f0e1901, 0xf401a630, + 0xa9121008, 0x30060e30, 0x0cf40496, 0xfe070e05, 0x9fbf0149, 0x00062cd9, 0x3299bf00, 0xf4f9a6ea, + 0xe97e070b, 0x30f40042, 0xf400f804, 0x22f9f830, 0x00062cd2, 0xfe29bf00, 0x11900141, 0x0140fe10, + 0x0e7e19a0, 0x00900001, 0x7e0ab20c, 0xdf008d00, 0x00002af4, 0xf9980ebf, 0x15feb515, 0x9cf09ea6, + 0x58f9350b, 0x0001087e, 0x29bf1fbf, 0x0bf4f9a6, 0x42e97e07, 0x0825fb00, 0x00062cdf, 0xf4ffbf00, + 0x49fefc30, 0xa0adb201, 0x2af4d99f, 0x99180000, 0xb2beb215, 0x009033cf, 0x2b0cd91c, 0x9c980000, + 0xb29bbf01, 0x26d87efa, 0xa6a57e00, 0xa7853e00, 0x0680d900, 0x9abf0000, 0xecb2cbb2, 0x7ea0adbc, + 0x0a000b7e, 0x0149fe1f, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x30f40042, 0xd900f804, + 0x0000062c, 0x30f499bf, 0x014ffefc, 0xf9a0bdb2, 0xa0b3ceb2, 0xb6b13800, 0x0cf423bf, 0x23c04930, + 0xa6029bbb, 0x250cf4c9, 0x002af4d9, 0x15991800, 0x9033010c, 0x0cd91a00, 0x9800002b, 0x9bbf019c, + 0x0026727e, 0xec3eac32, 0x020c00a7, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0xf9a6ca32, 0x7e070bf4, + 0xf40042e9, 0x00f80430, 0xdfd830f4, 0x0000062c, 0xffbf72f9, 0x900149fe, 0x46fe4499, 0xb29fa001, + 0xb2b1b2a0, 0x246690c4, 0x23c0a6b1, 0xb17d0cf4, 0xf423c0b6, 0xbabc760c, 0xc096b190, 0x6c0cf423, + 0x000e067e, 0xd50302c4, 0x00002b0c, 0x07ff29c4, 0x3209bc20, 0x00a89b3e, 0x5c985bbf, 0xb26ab201, + 0x7e200e3d, 0x33002672, 0x000a00a0, 0xa8a13e06, 0xff29c400, 0xa60279bc, 0x050df401, 0x2bc410b2, + 0xff0cc4ff, 0x04f04ab2, 0xb06bbcff, 0xbb203390, 0x7e7e0210, 0x40bc000b, 0xb3243d40, 0x00bd0014, + 0x0ddd7e1f, 0xa8ab3e00, 0xfe060000, 0x99900149, 0xd99fbf44, 0x0000062c, 0x0a3299bf, 0x0bf4f9a6, + 0x42e97e07, 0x2875fb00, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, 0xa0adb201, 0x2af4d99f, 0x99180000, + 0x33beb215, 0x7e0c0090, 0x3e00a808, 0xd900a903, 0x00000680, 0xcab29bbf, 0xbdbcecb2, 0x0b7e7eb0, + 0xfe1f0a00, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0xf430f400, + 0x00062cdf, 0xbf32f900, 0x0149feff, 0xb2189990, 0xbf9fa0a2, 0x9899c7a9, 0xaa1d99b3, 0x2af4d900, + 0x99980000, 0x4299c702, 0x9a0099b3, 0x0140fe00, 0x0090010a, 0x7e0bb214, 0x3300b1cd, 0xfe551fa4, + 0x020a014b, 0x7e10bb90, 0x3300b1cd, 0xbf451fa4, 0xbd010309, 0x1493ff04, 0xf09530bc, 0x1bf40194, + 0x0204b312, 0xb7f07e0f, 0x00a03300, 0x0415f007, 0xb3010090, 0xfee50704, 0x99900149, 0xa699bf14, + 0x150bf419, 0x1bb2010a, 0x00b20f7e, 0x0a1fa033, 0xe43e09f8, 0x2ebf00a9, 0x900149fe, 0x99bf1099, + 0xffffffdf, 0xfd1f0abf, 0x99b904ef, 0x9419ff00, 0xf00b9cf0, 0x94b60196, 0x059efd1e, 0xe43e29a0, + 0x1f0a00a9, 0x900149fe, 0x9fbf1899, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40c35fb, + 0x2cdfe430, 0xf9000006, 0xfeffbf52, 0x99900149, 0xa0a3b230, 0xb2afbf9f, 0xc7b4b2c5, 0xddb31dfd, + 0x95075b00, 0xf99508fa, 0x32f03210, 0x339232a1, 0x00d515f9, 0xf415f630, 0xf933460c, 0x3005fa05, + 0x0cf405f6, 0x01f9331c, 0x1f0a0087, 0xf501f630, 0x30073808, 0x0cf503f6, 0xe93e0723, 0xf93300aa, + 0x30041910, 0x0cf410f6, 0x0df6300e, 0x070d08f5, 0x00ae163e, 0x0511fd33, 0xae203e07, 0x1df93300, + 0xf6300641, 0x250cf41d, 0xf918f933, 0x18f63003, 0x330c0cf4, 0x06e617fd, 0x00b0c83e, 0x001af933, + 0x1cfd3301, 0x853e06d8, 0xf93300b1, 0x30026221, 0x0cf421f6, 0x1efd330c, 0x8e3e06c4, 0xf93300ab, + 0x33063824, 0x06b625fd, 0x00b1593e, 0x002af4df, 0x14f91800, 0x18f4a926, 0xffa9c40d, 0x3e98f9bc, + 0xa000b02e, 0xb0f03ebd, 0x33080a00, 0x069c001d, 0x0079e27e, 0x0a030033, 0xfdff0049, 0x4aa004a9, + 0x00b0f03e, 0x1630030a, 0x7e0cf504, 0x2af4d906, 0x99bf0000, 0x080a010e, 0xc7f4e1bc, 0x9ffd9899, + 0x660bf504, 0x02103306, 0x0410330e, 0x3eea3210, 0xb200ab45, 0xab3b3eea, 0xfea4bd00, 0xbb90014b, + 0x79747e2c, 0xa6a57e00, 0x1fad3300, 0x49fe063e, 0x2c999001, 0xe9959ebf, 0x0190b31f, 0x3ed4bd0a, + 0xdd00ab68, 0xff000000, 0x3380e9c4, 0xbd0a0094, 0xab7f3ef4, 0xb0010f00, 0x1ff400e6, 0x95ff0f05, + 0xd9ff08e9, 0x90f9bc95, 0x873e49a0, 0x9b3200b1, 0x5db24cb2, 0x0092987e, 0x00b1873e, 0x5507a033, + 0xf407a630, 0xf4d9120c, 0x3000002a, 0x0df402a6, 0xabe53e2b, 0x0ca63000, 0xd9120cf4, 0x00002af4, + 0xf409a630, 0x083e5718, 0xa03300ac, 0x030a610d, 0xb70e1d33, 0xac3b3e05, 0x01999800, 0x000000df, + 0xac213e04, 0x01999800, 0x000000df, 0xac213e08, 0x2af4d900, 0x99980000, 0x00008f02, 0x049ffd02, + 0x00c61bf5, 0x00ad063e, 0x8f029998, 0xfd040000, 0x1bf5049f, 0x063e00c4, 0x999800ad, 0x00008f02, + 0x049ffd01, 0x3e2f1bf4, 0xd900ad06, 0x00002af4, 0x4000008f, 0x3e029998, 0xd900ac48, 0x00002af4, + 0xdf029998, 0x00800000, 0xf5049ffd, 0x3e00a91b, 0x3300ad06, 0x30730710, 0x0cf40716, 0x0210331a, + 0x0216304b, 0x33540cf4, 0x33270010, 0x0485011d, 0x00ac983e, 0xf40c1630, 0x16300d0c, 0x6518f409, + 0x00acd63e, 0xf40e1630, 0xf03e6d0d, 0x4ab200b0, 0x00936d7e, 0x00b1873e, 0x2b321a32, 0x5db24cb2, + 0x0098097e, 0x00b1873e, 0x2b321a32, 0x5db24cb2, 0x00977a7e, 0x00b1873e, 0x2b321a32, 0x857e4cb2, + 0x873e0099, 0x2b3200b1, 0x5db24cb2, 0xe17e070a, 0x873e0096, 0x080a00b1, 0x4cb22b32, 0x427e5db2, + 0x1a320096, 0x3cb22b32, 0x5eb24db2, 0x009a127e, 0x00b1873e, 0x2b321a32, 0x4db23cb2, 0x947e5eb2, + 0x873e0093, 0x080a00b1, 0x00b1873e, 0x9d33040a, 0xd9047900, 0x00002af4, 0x0a029998, 0x0094f108, + 0x660bf540, 0x0149fe04, 0x900142fe, 0x22902c99, 0xa09da018, 0x0499922d, 0x99929da0, 0x929da004, + 0x9da00499, 0xa0049992, 0x0119339d, 0x16300085, 0x0e08f401, 0x1d33030a, 0x3e043102, 0xfe00ade2, + 0x40fe0141, 0x2c119001, 0xb2280090, 0x7e0bb21a, 0xbf008fd8, 0xfe00bf19, 0x42fe0143, 0x0794f001, + 0x90243390, 0x3ab22022, 0xb20141fe, 0x1c11902b, 0xb20704f0, 0x0404b61c, 0x7e0509fd, 0xbf0081f1, + 0xbf3fbf29, 0x0141fe1e, 0xf00894b6, 0x94f1fff4, 0xf4b6ffff, 0x18e4b610, 0xfd181190, 0x1ab20509, + 0xfd050ffd, 0xbe7e050e, 0x083e0081, 0x2ab200ae, 0x00818e7e, 0x20bf2ab2, 0x00815e7e, 0x0e3e29bf, + 0x41fe00ae, 0x18119001, 0x2b7e1ab2, 0x1ab20081, 0xf87e10bf, 0x19bf0080, 0x04f11ab2, 0x94b6ffff, + 0x0590ff10, 0x0080c57e, 0x94f119bf, 0x40a0ffff, 0xf03e59a0, 0xfab200b0, 0x00b34b7e, 0x00b1873e, + 0x0d00a033, 0x5b01ad33, 0xae6d3e03, 0x33b0bf00, 0x330c0190, 0x3e2e0294, 0xfe00ae5e, 0x010a0141, + 0xb2181190, 0xb1cd7e1b, 0x1fad3300, 0x1fbf033e, 0x09fffe09, 0x049fff95, 0x613e10a0, 0x04f000ae, + 0xb22a327f, 0xb20f7e0b, 0xb1873e00, 0x7e9a3200, 0x3e00b1cd, 0xb200b187, 0xb24f7efa, 0xb1873e00, + 0x2af4d300, 0x39bf0000, 0x000000df, 0xfd080a20, 0x0bf5049f, 0x030a02f5, 0xef001d33, 0x0140fe02, + 0x90014cfe, 0xcc902800, 0xfe0bb22c, 0x22900142, 0x7e2ab224, 0x7f008d3b, 0x03967009, 0x940b9cf0, + 0x0e7e0791, 0x30180001, 0x01087e58, 0x00003300, 0x3e1fb20a, 0x7f00af0c, 0x00a4732a, 0x3e19b20a, + 0xc500af0a, 0xa0734019, 0xa0731403, 0xa4731804, 0x9fe51c02, 0x0c3e0100, 0x9fe500af, 0x0c3e0200, + 0x9fe500af, 0x0c3e0300, 0x9fb200af, 0x900149fe, 0x997f2c99, 0x88049973, 0x04967000, 0x731a0cf4, + 0x70480190, 0x08f40196, 0x02907336, 0x039d734c, 0x8e3e00e3, 0x997300af, 0x70009b07, 0x0cf40796, + 0x05907310, 0x069d7369, 0xc33e00cb, 0x997300af, 0x73009c08, 0x00bd099d, 0x00aff83e, 0xfe07ff89, + 0x3e04f9fd, 0x4900b010, 0xf9ffc7ff, 0x0095f194, 0xaf7f3e20, 0xc7ff4900, 0xf194f9ff, 0x8f180095, + 0xfdfe3fff, 0x9fe5049f, 0x103e4000, 0xff4900b0, 0x94f9ffc7, 0x100095f1, 0x00af7f3e, 0xffc7ff49, + 0x95f194f9, 0xb43e0800, 0xff4900af, 0x94f9ffc7, 0x100095f1, 0xfe3fff8f, 0xe5049ffd, 0x3e80009f, + 0x4900b010, 0xf9ffc7ff, 0x0095f194, 0xafdb3e08, 0xc7ff4900, 0xf194f9ff, 0x8f100095, 0xfdfe3fff, + 0x9fe5049f, 0x103ec000, 0xff4900b0, 0x94f9ffc7, 0x080095f1, 0x00b0023e, 0xffc7ff49, 0x95f194f9, + 0xff8f1000, 0x9ffdfe3f, 0x00008f04, 0xf59fff01, 0x002af4d9, 0x50991800, 0x0f009033, 0x02000089, + 0x3e95f9ff, 0x8900b02e, 0xfffdffff, 0x49a094f9, 0x00b0f03e, 0xf514a630, 0xc401480c, 0x1cd9ffaf, + 0xbc000005, 0xf4d9e89f, 0x0a00002a, 0x3befc708, 0xc7989fbc, 0x9fbb96ef, 0x0194f005, 0x012b0bf5, + 0xe7ff29c4, 0x54010def, 0x040a029b, 0x18f5bf66, 0xf3f00119, 0xffb9e400, 0xc2f9bcff, 0xf404c6b0, + 0x040c090d, 0x00b08a3e, 0xbae44da0, 0xe9c4ffff, 0x42ebe703, 0x00943301, 0xb0babc12, 0x002b14d9, + 0x3e4ab200, 0xd900b0b3, 0x00000680, 0xbabc99bf, 0xbc4ab2b0, 0x7e7eb09b, 0xf03e000b, 0x9b3200b0, + 0x00b8967e, 0x00b1873e, 0x0f00a033, 0x1d33080a, 0x3e00b101, 0xfe00b187, 0x00900140, 0x7e0ab224, + 0x3f008ca4, 0x00a54f09, 0x06009433, 0x4fa05a0f, 0x873e1f0a, 0xf4d900b1, 0x1800002a, 0x99331599, + 0x33008700, 0x303802a0, 0x0cf402a6, 0x00a0330f, 0x01a43318, 0xb12f3e6e, 0x03a03300, 0x04a4332c, + 0xb14f3e62, 0x7e3ab200, 0x3e00bb95, 0xb200b187, 0x7ecbb2ba, 0x3e00be0f, 0x3200b187, 0xbb1e7e2a, + 0xb1873e00, 0x7e2a3200, 0x3e00c025, 0x3200b187, 0xbfb97e2a, 0xb1873e00, 0x2af4d900, 0x99180000, + 0x00903315, 0x6cfbc724, 0x4db22c32, 0xa4f05eb2, 0xbcbf7e0f, 0xb1873e00, 0x3e020a00, 0x0a00b187, + 0xb1873e03, 0xfe090a00, 0x99900149, 0xd99fbf30, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, + 0x2cde1c55, 0xbf000006, 0xfc30f4ef, 0xf00149fe, 0x9fa0ffa4, 0xa002a4b6, 0xbf9fbfca, 0xa61f0ae9, + 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0x040e0149, 0xa6309fa0, + 0x120cf402, 0xd9ffafc4, 0x00002bb4, 0x0ef89fbc, 0xfebfa01f, 0x9fbf0149, 0x00062cd9, 0x3299bf00, + 0xf4f9a6ea, 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4ffbf, 0x0149fefc, 0x9fa0040e, + 0xf402a630, 0xafc4100c, 0x2bb4d9ff, 0x1f0e0000, 0xfef99bbc, 0x9fbf0149, 0x00062cd9, 0x3299bf00, + 0xf4f9a6ea, 0xe97e070b, 0x30f40042, 0xf400f804, 0x2cdffc30, 0xf9000006, 0xfeffbf12, 0x99900149, + 0xa0b1b208, 0x08ae959f, 0xd010af95, 0x00002af4, 0xe926ff09, 0x183d1bf4, 0xf9264909, 0x00b21bf5, + 0x0a4a0918, 0x0199331c, 0x963000ab, 0xa108f501, 0x33060a00, 0x009d029d, 0x3d130998, 0xa01f0af4, + 0x490918b9, 0x104a0f35, 0x09350199, 0xb3303e49, 0x4a091800, 0x94330a0a, 0xb4d97a00, 0xbf00002b, + 0xfff4f099, 0xb6e899c7, 0xf9bc0694, 0x3ff6b0f0, 0x0a5e0cf4, 0x0ce43303, 0x04f9905b, 0x004096b1, + 0x944e0cf4, 0xc0df02f9, 0xbc00002b, 0x9f7f909f, 0x0d019a58, 0x02f47308, 0x0409981b, 0x94f0290d, + 0x00903380, 0x8fa17e0f, 0x3d010900, 0x500935d4, 0x002af4df, 0x49fe1800, 0xb5ffd9c4, 0x020913f9, + 0xf04af935, 0x1c0affe4, 0x303e1ea0, 0x040a00b3, 0x900149fe, 0x9fbf0899, 0x00062cd9, 0xa699bf00, + 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x2cdffc30, 0xf9000006, 0xfeffbf22, 0x99900149, 0xa0b2b20c, + 0xc7adb29f, 0xbbbfe8a0, 0xf53f06b0, 0xc400cc0c, 0xaec4ffbc, 0x0fe4b3ff, 0xb0050a0d, 0x0cf53fc6, + 0xb4d900bb, 0xbf00002b, 0xff9fc499, 0x94e899c7, 0x94b606f1, 0x0ee0b306, 0x0fe0b32c, 0xb31f0a44, + 0x009a0ded, 0x7190097c, 0xf500ff96, 0xe4008c0c, 0xd9ffff9f, 0x00002bc0, 0xa0f89fbc, 0xb3d73e2f, + 0x90017c00, 0x00ff9671, 0xe46f0cf4, 0xd9ffff9f, 0x00002bc0, 0x0af99bbc, 0xb4393e1f, 0xf0c9bc00, + 0xd9c7050a, 0x02fb94f0, 0x94019990, 0xcbbc029c, 0x00e6b1e0, 0x440cf410, 0x0a9001bc, 0x0294b603, + 0xb1f0c9bc, 0xf41000f6, 0x9ba6320c, 0xa60808f4, 0x2008f49e, 0x0df4b9a6, 0xf4bfa608, 0xc0df1608, + 0xbc00002b, 0xbfbca09f, 0x0b7e7eb0, 0xb3d73e00, 0x3e040a00, 0x0a00b439, 0x0149fe03, 0xbf0c9990, + 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0425fb00, 0x00062cdf, 0xf4ffbf00, 0x49fefc30, + 0xb29fa001, 0xffa6b1c9, 0x2d0cf40f, 0xb1909abc, 0xf4100096, 0xc0de230c, 0xbc00002b, 0xd03390ae, + 0x9ab20a00, 0x00b48c3e, 0x9bb2bab2, 0x000b7e7e, 0x983e1f0a, 0x040a00b4, 0xbf0149fe, 0x062cd99f, + 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdfe4, 0x12f90000, 0x49feffbf, + 0x20999001, 0x9fa0a0b2, 0xb4f0d1b2, 0xb4edb2ff, 0xe0b40aa0, 0x0709c40b, 0x1bf49ba6, 0x2f0fc775, + 0x900149fe, 0x9da00899, 0xb5029eb5, 0x9fbc019a, 0xfe94bdb8, 0xff90014f, 0xb5f9a014, 0xf1b501fc, + 0x2309c702, 0xf9bcbabf, 0x850fc7c8, 0x3c8a09c7, 0xf4f0e09f, 0x910dc71f, 0x3c05cfbb, 0xeeb990d9, + 0x1fd4f000, 0xb91fe4f0, 0xefbc0099, 0x1f94f0e0, 0x9dbcff0f, 0x95f9bc90, 0xbb05febb, 0xfcfd049d, + 0x0099b904, 0xfd04fdbb, 0x9ffd049a, 0xfeb9a005, 0x99900149, 0xd99fbf20, 0x0000062c, 0xf9a699bf, + 0x7e070bf4, 0xfb0042e9, 0x30f41c15, 0x062cd9f4, 0x82f90000, 0xd1b299bf, 0x90014ffe, 0xf9a02cff, + 0xff92dd3f, 0x091f0003, 0x01f03517, 0x2002f035, 0xb2b6b2f9, 0x33a5b2e3, 0xdb1900d4, 0x00000574, + 0x7e7e240c, 0x0909000b, 0x19200a32, 0x00b62d3e, 0xd630030a, 0x780cf409, 0xc40147fe, 0xf8b2ffc4, + 0x7790043d, 0xb6243e24, 0xff09c400, 0x92947bb2, 0xbc040c02, 0xd43da026, 0x00b4547e, 0x6b1fa433, + 0xf9c47fbf, 0xf494a607, 0xf9c75e18, 0x01991223, 0xf4019630, 0xfec7520c, 0xffe9c42f, 0xc7c8893c, + 0xf9c78afd, 0x90d93c85, 0x0cf49c26, 0x03e0333b, 0x91f9c738, 0x26909d3c, 0x2c0cf49c, 0x109052bc, + 0x9fa00100, 0x0926193f, 0x0aa108f4, 0x0149fe1f, 0xbf2c9990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, + 0xb65d3e23, 0xbf0d0a00, 0xff0fc439, 0x000000de, 0x049efdff, 0xa005f9fd, 0xb62d3e3f, 0x42e97e00, + 0x0c85fb00, 0xd9a830f4, 0x0000062c, 0x99bf82f9, 0xb2f830f4, 0x014ffea7, 0xa080ff90, 0xb0aabff9, + 0xc1b00eb1, 0x68a6c70d, 0x30016912, 0x0cf50396, 0xa9c7013b, 0x0a96306c, 0x01310cf5, 0xfe0141fe, + 0x11900140, 0x50009057, 0x2010a5b6, 0xb2b4bd19, 0xb1a27e0c, 0x1fad3300, 0x09bf011d, 0xc40142fe, + 0x2290ff6b, 0x04b4b658, 0xb9bc2ab2, 0xb26c32b0, 0x7e7eb21d, 0x3000b56a, 0xad333fa1, 0xfe00f51f, + 0x01080141, 0x3d401190, 0x08199024, 0x3d0c1e90, 0xb054bd34, 0xe1b00c91, 0xb7b93e0b, 0x1490b400, + 0x100c1bb2, 0x59bcd43d, 0x7e4ab240, 0x3300b454, 0x00c31fad, 0x1f9819bf, 0xffffde01, 0x9efde0ff, + 0x021fb504, 0x203319a0, 0x043d0a00, 0x00b7533e, 0xb40cb0b4, 0x1ab20bc0, 0x00a9ff7e, 0xa0321fbf, + 0xffffffde, 0x1f09c4e0, 0xb604fefd, 0x9ffd1894, 0xb219a005, 0x0c1bb24a, 0x7e010d10, 0x3300b454, + 0x33731fa4, 0x020a0020, 0xb7b33e01, 0x1f043300, 0x3e043d3f, 0xc400b7a0, 0x1c98ff09, 0x031d9802, + 0xfe0de0b4, 0xff90014f, 0xa8f9bc58, 0xb00e90b4, 0x001001e1, 0xb03b3201, 0x7eb20091, 0x00b4b27e, + 0x26579034, 0xd208f409, 0x00b7b33e, 0x843d19bf, 0x101f9295, 0x55900133, 0xf5362610, 0x0aff4208, + 0x0080331b, 0xb7cf3e11, 0x3e030a00, 0x3400b7d2, 0x49fe3fa0, 0x80999001, 0x2cd99fbf, 0xbf000006, + 0xf4f9a699, 0xe97e070b, 0x30f40042, 0x5885fb08, 0xdff430f4, 0x0000062c, 0xffbf12f9, 0x900149fe, + 0x41fe1099, 0x909fa001, 0xb84a0c11, 0xb2040b00, 0xa8c87e1c, 0x1fa43300, 0x0140fe2b, 0x9000b44a, + 0x040b0800, 0xc87e0cb2, 0xa43300a8, 0x19bf161f, 0x9fa60fbf, 0xf00b9cf0, 0x9a320196, 0x00b8423e, + 0x49fea43d, 0x10999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x15fb0042, 0xfc30f40c, + 0x2cd112f9, 0x98000006, 0x1fbf04a9, 0x900140fe, 0x95f00800, 0xb50fa020, 0xac9004a9, 0x4a010b59, + 0xc87e0090, 0x0fbf00a8, 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0x062cd990, 0x52f90000, + 0x4ffe99bf, 0x84ff9001, 0xf9a0030e, 0x0500ad33, 0x2af4d901, 0x99180000, 0x00993359, 0xba3200f6, + 0xbd014cfe, 0x20cc90b4, 0x00b1a27e, 0xad33ae32, 0xfe00e31f, 0xb84a0144, 0x1c449000, 0x4cb2040b, + 0x00a8c87e, 0xad33ae32, 0xfe00cb1f, 0xb44a0140, 0x18009000, 0x0cb2040b, 0x00a8c87e, 0xad33ae32, + 0xbf00b31f, 0xa64abf09, 0xa60bf5a9, 0xbcaa9000, 0x0b0140fe, 0x27009001, 0xc87e0cb2, 0xae3200a8, + 0x911fad33, 0xfe093f00, 0x43b20142, 0xf0282290, 0x0045ff94, 0x02915410, 0x00b9753e, 0x1be43abf, + 0x5abcffff, 0xf4b9a692, 0x9bb2050d, 0xaa90b072, 0x00b3f0bc, 0xc87e2cb2, 0x0fe400a8, 0xae32ffff, + 0x511fa433, 0x107b39bf, 0x202fbc02, 0xf190f9bc, 0xa00fff94, 0x00147339, 0x0149fec7, 0xbf209990, + 0x0799909a, 0x4bfe9c3f, 0x90010d01, 0xc4f028bb, 0x02c4b6ff, 0x00b4547e, 0xa433ae32, 0x4cb2171f, + 0x0b00b84a, 0xa7387e04, 0x3eae3200, 0x0e00b9b1, 0x0149fe09, 0xbf849990, 0x062cd99f, 0x99bf0000, + 0xf9a6ea32, 0x7e070bf4, 0xfb0042e9, 0x2cde7055, 0xbf000006, 0xfc30f4e9, 0xa0014ffe, 0x04a998f9, + 0x008ffdbf, 0x9ffd3ff0, 0x04a9b505, 0xd9a6e9bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cdf, + 0xf4ffbf00, 0x49fefc30, 0xa0aeb201, 0x12040a9f, 0xee0910bf, 0x0df4f926, 0x00c43330, 0x3eea7f0a, + 0x5800ba26, 0xa07301ea, 0xff091800, 0x0bf4b926, 0xffa9e416, 0x059bbbff, 0xf40194f0, 0x090a091b, + 0x00ba463e, 0x49fe1f0a, 0xd99fbf01, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, + 0xdff830f4, 0x0000062c, 0xffbf32f9, 0xfe0149fe, 0x99900140, 0x0143fe14, 0xa2b29fa0, 0x0090b1b2, + 0x12339013, 0x0b10ec4a, 0x7e0cb201, 0x3300a808, 0x3f741fa4, 0x01a6300a, 0xb3690cf4, 0xf0210020, + 0xa994ffa4, 0xb6240b05, 0x2cb202a4, 0xb8a0a9bc, 0x0010eeaa, 0x00a8087e, 0x4b1fa433, 0x230010b3, + 0x040b0a3f, 0xa4f01cb2, 0x05a994ff, 0xbc02a4b6, 0xaab8a0a9, 0x7e0010ee, 0x3300a808, 0x4a281fa4, + 0x010b10ec, 0x087e3cb2, 0xa43300a8, 0x3f3f191f, 0xf401f630, 0x093f0e0c, 0x1bf49f26, 0xbb033e8a, + 0xfe050a00, 0x99900149, 0xd99fbf14, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40835, + 0x062cdfd8, 0x22f90000, 0x49feffbf, 0x30999001, 0x9fa0a132, 0x002af4d9, 0x04999800, 0x080ab2b2, + 0x400094f1, 0xfe360bf4, 0xb4bd0140, 0xb20c0090, 0xba607e0a, 0x1fa43300, 0x320ab225, 0x7e010c1b, + 0x3300b9fc, 0xc4171fa4, 0x9990ff19, 0x9099bc02, 0x7f9009bc, 0x0f92f099, 0x49fe29a0, 0x30999001, + 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x25fb0042, 0xd830f428, 0x00062cdf, 0xbf62f900, + 0x0149feff, 0xb2409990, 0xd99fa0b2, 0x00002af4, 0xb2049998, 0x0bc3b2a6, 0x0094f108, 0xe50bf510, + 0xfeb4bd00, 0x11900141, 0x7e1ab21c, 0x3200ba60, 0x1fad33ab, 0xff0b00d1, 0x010c1ab2, 0x00b9fc7e, + 0xad33ab32, 0x0b00c01f, 0x3d1ab2ff, 0xb9fc7ec4, 0x33ab3200, 0x00af1fad, 0xbd011458, 0xa029a094, + 0x04109039, 0x4c72157f, 0x004da47d, 0x01e4bd80, 0xbc453e01, 0x01c9c400, 0x7f200bf4, 0xf4fd660f, + 0x1ebc0a1d, 0xbc3a3ea4, 0xf4fd6600, 0x1ebc0d1b, 0x05a9fd94, 0x00bc3a3e, 0xee90df72, 0x01c57601, + 0x72020090, 0x00c473fd, 0xf06ebfd0, 0xa9c400d3, 0xffffdfff, 0x94b6ff00, 0x04effd10, 0xff00008c, + 0xfd059efd, 0x9dfd049c, 0xbf69a005, 0xff4ee429, 0xe8afc7ff, 0xf110ed94, 0xfdff0094, 0xf9fd059d, + 0xe42fa005, 0xb9ffff59, 0xe9ff00ee, 0x150bf4a4, 0x9cfd39bf, 0x059afd04, 0x29bf39a0, 0x010095f1, + 0x49fe29a0, 0x40999001, 0x2cd99fbf, 0xbf000006, 0xa6ba3299, 0x070bf4f9, 0x0042e97e, 0xf42865fb, + 0x2cdff030, 0xf9000006, 0xfeffbf62, 0x99900149, 0xa0a13228, 0x32b0329f, 0xb2d5b2c4, 0x04b630e6, + 0x01120cf5, 0x002af4d9, 0x049e9800, 0x90ffb9c4, 0x9fc41199, 0xbb080a1f, 0x99b905ef, 0x1f94f000, + 0x0f909fbc, 0x05f9bbff, 0xb334feff, 0x00e9013d, 0x4ffe94bd, 0x20ff9001, 0xb50142fe, 0x229001f9, + 0xbdf9a01c, 0x7e2bb2a4, 0x3300ba60, 0x00c91fad, 0x3c322ab2, 0xfc7e1b32, 0xad3300b9, 0x3300ba1f, + 0x30460200, 0x0cf40206, 0x00003310, 0x010d331a, 0x733e00a5, 0x003300bd, 0x0d334103, 0x3e009804, + 0xc400bda9, 0x90f1ff19, 0x9eb80126, 0x3e001136, 0xc400bd7f, 0x90f1ff19, 0x9eb80126, 0x0f001146, + 0xbdb83e10, 0xff19c400, 0x012690f1, 0x9eb8040f, 0x3e001156, 0xc400bdb8, 0x90f1ff19, 0x020f0126, + 0x115a9eb8, 0xbdb83e00, 0xff19c400, 0x012690f1, 0xb801004f, 0x00115c9e, 0x0a034994, 0xf89cc404, + 0x18f4cfa6, 0x08c99032, 0x9fa6080b, 0xbc060df4, 0xecbcb2fc, 0x0140fea0, 0xb2200090, 0xa8087e0c, + 0x1fa43300, 0xa009bf13, 0x01009859, 0xf43e60a0, 0x030a00bd, 0x900149fe, 0x9fbf2899, 0x00062cd9, + 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf41065fb, 0x2cdfe830, 0xf9000006, 0xfeffbf52, 0x99900149, + 0xa0a3b22c, 0x2af4d99f, 0x99980000, 0x0ab2b204, 0x0094f108, 0xca0bf520, 0x0140fe00, 0x0090a4bd, + 0x7e0bb218, 0x3300ba60, 0x00b81fad, 0xff0b0ab2, 0xfc7ec43d, 0xad3300b9, 0xfe00a91f, 0x007f0141, + 0x4a1c1190, 0x100b10dc, 0x087e1cb2, 0xad3300a8, 0x7200911f, 0xbd15b20b, 0x0104bdc4, 0x3eff04e0, + 0xc400befb, 0x0bf401b9, 0x07c6b069, 0xbf350cf4, 0xf85c3c3d, 0x901f0ec4, 0x99b90309, 0xfff4f000, + 0xbc1f94f0, 0x49bc909e, 0xf49fff95, 0xbb049ebb, 0x99b904fe, 0x049dfd00, 0xa0059ffd, 0xbeef3e39, + 0x3c2dbf00, 0x1ec4f85c, 0x1d09921f, 0xf00099b9, 0x94f0fff4, 0x909ebc1f, 0xff9549bc, 0x9ebbf49f, + 0x04febb04, 0xfd0099b9, 0x9ffd049d, 0x9029a005, 0xb57601cc, 0x04009001, 0x73041190, 0xfe8800b4, + 0x99900149, 0xd99fbf2c, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f41855, 0x062cdfd4, + 0x22f90000, 0x49feffbf, 0x34999001, 0x41fea232, 0x909fa001, 0x984a0c11, 0xb2280b23, 0xa8087e1c, + 0x1fa43300, 0x0110985d, 0x90bc19bf, 0xf490a6f2, 0xf6b01008, 0x0b9cf001, 0x3e0196f0, 0x0900bf69, + 0xf0f9a6e1, 0x96f00b9c, 0x330b0a01, 0xd9330090, 0x00002b0c, 0xbf019c98, 0x014ffe9b, 0xbc0cff90, + 0x9c4d90f0, 0x08923523, 0x0990240e, 0x1f94f001, 0xb504fa90, 0xd87e01f9, 0xa57e0026, 0x49fe00a6, + 0x34999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x25fb0042, 0xf830f42c, 0x00062cdf, + 0xbf12f900, 0x0149feff, 0x320c9990, 0xd99fa0a1, 0x00002af4, 0x0a049f98, 0x00008908, 0x04f9fd01, + 0xfe2a0bf4, 0xa4bd0140, 0xb2080090, 0xba607e0b, 0x1fa43300, 0x320ab219, 0x7ec43d1b, 0x3300b9fc, + 0xc40b1fa4, 0x1a7e1f1a, 0x49fe00bf, 0x0c999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, + 0x15fb0042, 0xf830f408, 0x00062cdf, 0xbf12f900, 0x0149feff, 0x320c9990, 0xd99fa0a1, 0x00002af4, + 0x0a049998, 0x0094f108, 0x00907380, 0x0140fe2e, 0x0090a4bd, 0x7e0bb208, 0x3300ba60, 0xb21c1fa4, + 0x3d1b320a, 0xb9fc7ec4, 0x1fa43300, 0x1f1ac40e, 0x7e20a5f0, 0xfe00bf1a, 0x99900149, 0xd99fbf0c, + 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40815, 0x062cdffc, 0x52f90000, 0xb995ffbf, + 0x0394b606, 0xa9bcc4b2, 0xfed5b230, 0xb0c40149, 0x1899903f, 0x053e9fa0, 0x31bf00c1, 0xb3013298, + 0x0a180054, 0xb2b4bd01, 0x42667e0c, 0x052bfd00, 0x3e051afd, 0x0a00c0ed, 0xb2b4bd01, 0x42667e0c, + 0x00b9b900, 0xfd00aab9, 0x1afd0429, 0x01449204, 0x0b0040b3, 0xb3010090, 0xa0c84004, 0x0132b531, + 0x339004bd, 0x0044b308, 0x0149feb5, 0xbf189990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0455fb00, 0xf9fc30f4, 0x062cd112, 0x19bf0000, 0xbd0140fe, 0x080090d4, 0x927e09a0, 0x0fbf00c0, + 0xf9a619bf, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0xd112f9fc, 0x0000062c, 0x40fe19bf, 0x90010d01, + 0x09a00800, 0x00c0927e, 0x19bf0fbf, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0xf9fc30f4, 0x062cd112, + 0x19bf0000, 0xb63fbcc4, 0xb4b606b5, 0x0140fe03, 0x90a0abbc, 0x09a00800, 0xbf01ab98, 0x42127eaa, + 0xbf0fbf00, 0x01a4f019, 0x0bf4f9a6, 0x42e97e07, 0x0415fb00, 0x00062cdf, 0xf4ffbf00, 0xaeb2fc30, + 0x3d0149fe, 0xbd9fa0a4, 0x08f0b3f4, 0x98ef3c0a, 0x90a0a93c, 0xfeb301ff, 0xfef30080, 0x9fbf0149, + 0x00062cd9, 0xf099bf00, 0xaa3907a2, 0xf4f9a601, 0xe97e070b, 0x30f40042, 0xd900f804, 0x0000062c, + 0x30f499bf, 0x014ffefc, 0xae3ff9a0, 0xf401e9c4, 0xf4bd1b0b, 0xaf3cff0e, 0xf59e2698, 0x9000c81b, + 0xf4b301ff, 0xe93ef410, 0xf4bd00c2, 0x1890afbc, 0x9d330999, 0x9000b100, 0xf4b301ff, 0xad18f207, + 0x26800c06, 0x9e0cf5dc, 0x00e43300, 0x01a91838, 0x93009d33, 0x02a91800, 0x8b009d33, 0x03a91800, + 0x83009d33, 0x04af1800, 0xf005a918, 0x9476fff4, 0x059ffd08, 0x6f009473, 0x1bf4dc26, 0xc2dc3e69, + 0x04af1800, 0xf005a918, 0x94f0fff4, 0x0894b6ff, 0xcd059ffd, 0x94737099, 0xa9184d00, 0x41991201, + 0xf4199630, 0xa918400c, 0x40963102, 0x361df400, 0x005a9631, 0x182f1cf4, 0x963103a9, 0x1df40040, + 0x5a963125, 0x1e1cf400, 0x1b00d033, 0xc4ffefc4, 0x1bf404f9, 0x0af9c417, 0x0b0294b3, 0x3307a918, + 0x0a0a0090, 0xc2eb3e06, 0xfea4bd00, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0xfc30f400, 0x00062cdf, 0xbf12f900, 0x0149feff, 0xb2089990, 0xb29fa0b1, 0xb3020ea0, + 0x3f2200a0, 0x0894f0a9, 0x7e171bf4, 0xb300c1b4, 0x20060010, 0x0809181a, 0xa926060e, 0xbd051bf4, + 0x0149fee4, 0xbf089990, 0x062cd99f, 0x99bf0000, 0xf9a6eab2, 0x7e070bf4, 0xfb0042e9, 0x2cdf0415, + 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x0aa9b29f, 0x00c0b302, 0xb29abf12, 0x07b4b6cd, 0x7e00804c, + 0xfe00d992, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0xf830f400, + 0x00062cdf, 0xbf22f900, 0x0149feff, 0xb2109990, 0x0a9fa0a0, 0x0000b302, 0x3d0abf34, 0x07b29494, + 0xb20141fe, 0x0f11902b, 0x1920010c, 0xcb7e1db2, 0xa4b300d9, 0x193f1700, 0x2bb20abf, 0x94f01db2, + 0x20010cfd, 0xd9927e19, 0x0149fe00, 0xbf109990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0825fb00, 0xdff830f4, 0x0000062c, 0xffbf42f9, 0x900149fe, 0xa1b21899, 0xb4b29fa0, 0xd3b2c0b2, + 0x5600a0b3, 0x5200c0b3, 0xbd07cf18, 0x0142fe94, 0x10142290, 0x29a001ff, 0xb207cf35, 0xc1b47eca, + 0x352bb200, 0x1ab2080a, 0x00c8be7e, 0x2c00a4b3, 0x0cb22bbf, 0x5e7e1ab2, 0xa4b300c3, 0x1ab21e00, + 0x9d7e4bb2, 0xa4b300c3, 0x30b31200, 0x22bf0e00, 0x783e32a0, 0x020a00c4, 0x900149fe, 0x9fbf1899, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf40845fb, 0x2cdffc30, 0xf9000006, 0xfeffbf02, + 0x99900149, 0xa0c0b204, 0x00a0b39f, 0x00c0b324, 0xb6aabf20, 0x804c07b4, 0x7e0db200, 0xb300d9cb, + 0xb21000a4, 0xc1fb7e0a, 0xc4cf3e00, 0xfe020a00, 0x99900149, 0xd99fbf04, 0x0000062c, 0xf9a699bf, + 0x7e070bf4, 0xfb0042e9, 0x2cdf0405, 0xbf000006, 0xfc30f4ff, 0xa00149fe, 0x00a0b39f, 0x00c0b319, + 0xb2aabf15, 0x07b4b6cd, 0xcb7e100c, 0x143e00d9, 0x020a00c5, 0xbf0149fe, 0x062cd99f, 0x99bf0000, + 0x0bf4f9a6, 0x42e97e07, 0x0430f400, 0x30f400f8, 0x062cdffc, 0x42f90000, 0x49feffbf, 0x14999001, + 0x9fa0b3b2, 0xa2b2c4b2, 0xa0b30200, 0x0c7e4200, 0x030000c6, 0xa0b3a1b2, 0x3bb23600, 0x1cb22ab2, + 0x00c4937e, 0xa4b3a0b2, 0x1ab21e00, 0x057eb4bd, 0xa0b200c3, 0x1000a4b3, 0x0c0040b3, 0xf0061918, + 0x49a0ff94, 0x1bb22ab2, 0x00c6557e, 0x900149fe, 0x9fbf1499, 0x00062cd9, 0xb299bf00, 0xf4f9a60a, + 0xe97e070b, 0x45fb0042, 0x062cdf04, 0xfebf0000, 0xfefc30f4, 0x9ea00149, 0xf9bf9ebf, 0x0bf4e9a6, + 0x42e97e07, 0x0430f400, 0x2cdf00f8, 0xbf000006, 0xfc30f4fe, 0xa00149fe, 0xbf9ebf9e, 0xf4e9a6f9, + 0xe97e070b, 0x30f40042, 0xdf00f804, 0x0000062c, 0x30f4febf, 0x0149fefc, 0x9ebf9ea0, 0xe9a6f9bf, + 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cdf, 0xf4ffbf00, 0xaeb2fc30, 0xa00149fe, 0x0eaa989f, + 0x0a00a0b3, 0x333ef4bd, 0xea9800c6, 0x00a0b30f, 0x90010f0e, 0x94bd0eff, 0xfef9e9bc, 0x9fbf0149, + 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, + 0x9fa00149, 0x1a00b0b3, 0xb30ea998, 0x980d0090, 0x94b30fa9, 0x01090c00, 0xbc0e9990, 0x49fe99ab, + 0xd99fbf01, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cde, 0xf4e9bf00, + 0x4ffefc30, 0x98f9a001, 0xa9a002b9, 0xa9b5ff09, 0x02b99801, 0xabb5ffbf, 0x03acb504, 0xbf02a9b5, + 0xf4f9a6e9, 0xe97e070b, 0x30f40042, 0xf400f804, 0x2cdffc30, 0xf9000006, 0xfeffbf22, 0x99900149, + 0xa0a0b20c, 0xb2c1329f, 0xb3020ad2, 0xb85e00b0, 0x0001400c, 0xc998d4bd, 0x0090b304, 0x00143344, + 0x0390b308, 0x3fcf3f3c, 0xf4f926b9, 0xcf18321b, 0x01b91801, 0x1bf4f926, 0x02ce1827, 0xbc02bf18, + 0x9dbc90dd, 0x0394b690, 0x014099b8, 0x9009bc00, 0x1bf4ef26, 0xbd29a00b, 0xc7493ea4, 0x01dd9000, + 0xb318cc90, 0x0ab314d4, 0x0149fe04, 0xbf0c9990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, + 0x0425fb00, 0x00062cd9, 0xf499bf00, 0x4ffefc30, 0xa0acb201, 0x09abbff9, 0xf4b9a6f0, 0xaa980d0b, + 0x01cbb503, 0x00d78b7e, 0xbf0149fe, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0430f400, + 0x30f400f8, 0x062cdffc, 0x32f90000, 0x49feffbf, 0x90a1b201, 0xb3b21099, 0x04bd9fa0, 0xe33ef002, + 0x19bf00c7, 0x0df492a6, 0x3e030a09, 0xa600c7ea, 0x091bf492, 0xea3e020a, 0x009000c7, 0x7e1ab201, + 0xa600c764, 0xdd08f403, 0x49fea4bd, 0x10999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, + 0x35fb0042, 0xf830f404, 0x00062cdf, 0xbf82f900, 0x0149feff, 0xb2289990, 0xb29fa0a3, 0x00a9b3b8, + 0xb0b30084, 0x47fe7f00, 0x05a49801, 0x54bd24bd, 0x779014bd, 0xc86d3e24, 0x0c3a9800, 0x02bc94bd, + 0xb279a0b0, 0xd78b7e7c, 0x0f79bf00, 0xf49fa6ff, 0x643d090b, 0x00c85b3e, 0x90015590, 0x04a60100, + 0x33d908f4, 0x90070060, 0x24bc0111, 0x03399820, 0x18f429a6, 0xbd01060b, 0xc85e3e04, 0xb24bb200, + 0x16fc7e1a, 0xf45aa600, 0x1190060d, 0x06399801, 0x19a6f43d, 0x0f050cf4, 0xbd8f2001, 0xc8a33ea4, + 0xfe020a00, 0x99900149, 0xd99fbf28, 0x0000062c, 0xf9a699bf, 0x7e070bf4, 0xfb0042e9, 0x30f40885, + 0x062cdff0, 0x82f90000, 0x49feffbf, 0x30999001, 0xa00147fe, 0x08a9989f, 0xb1b0a6b2, 0xb0f10509, + 0x843d0a91, 0x779090b2, 0x0369982c, 0x7fa0f4bd, 0x08f409a6, 0x0804bd07, 0x0a90b401, 0x1bf409a6, + 0x00803335, 0xc9663e32, 0x0c6a9800, 0xb24010bc, 0x7e4bb27c, 0xbf00d78b, 0xa6ff0f79, 0x0f1bf49f, + 0x09012290, 0xf439a6f1, 0x43b2051b, 0x3e011190, 0xbd00c939, 0xbdf10314, 0x05699824, 0x08f419a6, + 0x0020b3cb, 0xf429a61e, 0x60b50f18, 0x09f0b408, 0x773ef3a0, 0xf10f00c9, 0x1bf45fa6, 0xbc05b205, + 0xe93e0009, 0x1a0a00c8, 0x59a6f109, 0xb50d0bf4, 0x90b40865, 0xbd95a009, 0x0149fea4, 0xbf309990, + 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x1085fb00, 0xd9f830f4, 0x0000062c, 0x99bf82f9, + 0x90014ffe, 0xa3b228ff, 0xb4b2f9a0, 0xc033d0b2, 0xdab20e00, 0x140cb43d, 0x00da047e, 0xbd0142fe, + 0x24229014, 0xff07fe08, 0xfb05fc06, 0x00ca463e, 0xbd0c3a98, 0xb014bc94, 0x2cb229a0, 0x00d78b7e, + 0xf00f29bf, 0x0df49fa6, 0xa6fd0f56, 0x110cf49f, 0x18f496a6, 0xf495a630, 0x153e451b, 0x98a600ca, + 0xa62f0bf4, 0x371bf497, 0x90010998, 0x09b50199, 0xca433e01, 0x04099800, 0xb5019990, 0x433e0409, + 0x099800ca, 0x01999002, 0x3e0209b5, 0x9800ca43, 0x99900309, 0x0309b501, 0x00ca433e, 0x999009bf, + 0x9009a001, 0x39980111, 0xf419a605, 0x49fe8508, 0x28999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, + 0xe97e070b, 0x85fb0042, 0xf830f408, 0x00062cdf, 0xb222f900, 0xb2ffbfa1, 0x03aa98b2, 0x49fe1bbf, + 0x10999001, 0xa00140fe, 0x0c00909f, 0x8b7e0cb2, 0x0cbf00d7, 0xb2031a98, 0xd7c97e2b, 0x011b9800, + 0xb9a6ff09, 0x98101bf4, 0x12b50419, 0x0292b502, 0x00cabd3e, 0xb2031a98, 0xd7c97e2c, 0x031a9800, + 0xfd0c1bbf, 0x00d7c97e, 0x900149fe, 0x12a01099, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, + 0x25fb0042, 0xc830f408, 0x00062cdf, 0xbf82f900, 0xf830f4ff, 0x900149fe, 0x9fa06099, 0x18049992, + 0xf4bd1cae, 0xa9989fa0, 0x0aa2b205, 0x0b91b005, 0x1a00e933, 0x0c2b9802, 0xfe092c98, 0xf100014a, + 0xfe44aa90, 0xa6b20144, 0x057e2001, 0x05b200d8, 0x4490a3b2, 0xcb7e3e30, 0x08299800, 0x0bf439a6, + 0xb22ab231, 0xb2010c3b, 0xc9947e4d, 0x04499800, 0x1f0094b3, 0xb3034998, 0x98180094, 0x94b30249, + 0x49bf3b00, 0x18f491a6, 0x3e30b209, 0xb200cb71, 0x0bb0b419, 0x6ab291b2, 0x00d8457e, 0x35a6a3b2, + 0x0ab91bf4, 0xf503a603, 0xb301a30b, 0xb20c0014, 0x3e743d03, 0xb200cb9b, 0x94010703, 0x54bd0738, + 0xa73e86b2, 0x2a9800cc, 0xbce4bd0c, 0x4cfe1053, 0x16e1b001, 0xb258cc90, 0xd78b7e1b, 0x00adb300, + 0x90b400f3, 0xa6f00f16, 0xd80cf59f, 0xb294bd00, 0x1591b02a, 0x00c60c7e, 0xa9b3a4b2, 0xbf00d600, + 0x4c6bb22a, 0x4db20080, 0x00d9cb7e, 0xadb3a0b2, 0xb200a700, 0xb21bb22a, 0x014dfe4c, 0x7e50dd90, + 0xb200c404, 0x00adb3a0, 0x2ab20090, 0x0c014b90, 0x014dfe01, 0x7e54dd90, 0xb200c6cf, 0x00a4b3a0, + 0x15b0b478, 0xfe0c2c98, 0xaa90014a, 0xc6987e30, 0x0c90b400, 0x1bf491a6, 0x14b0b41e, 0x90014afe, + 0x697e30aa, 0x90b400ca, 0x059f9815, 0x2300f4b3, 0x00cc953e, 0x9ea6ef0e, 0x00090df4, 0xcc953e03, + 0x014afe00, 0x7e30aa90, 0x3e00c764, 0x9800cc31, 0xe0b402f9, 0xf491a614, 0xfeb5061b, 0xa6f9bf02, + 0x091bf491, 0x953efea0, 0xf99800cc, 0xf491a601, 0xfeb5061b, 0xb24bb201, 0xc6557e2a, 0x0004b300, + 0x01559013, 0x98806690, 0x59a60529, 0xfefa08f5, 0x46007033, 0xb2042f98, 0xb2e4bd8d, 0x00f1b02a, + 0x70dc020b, 0xfe000000, 0x11900141, 0x0111b05c, 0x00c5a97e, 0xbf042c98, 0x048bb22a, 0xd9597efb, + 0x981ebf00, 0xa0b2042c, 0x2ab2b4bd, 0xca7e0db2, 0x04b300c5, 0xff040600, 0x0d3e04bd, 0x2a9800cd, + 0xb003bc0c, 0x00904cb2, 0xd7c97e01, 0x0b90b400, 0x08f409a6, 0x032f98ec, 0xb59039bc, 0x9fa60929, + 0xbd0808f4, 0x0929b594, 0x49fea4bd, 0x60999001, 0x2cd99fbf, 0xbf000006, 0xf4f9a699, 0xe97e070b, + 0x30f40042, 0x3885fb08, 0xd9cc30f4, 0x0000062c, 0x99bf82f9, 0xfef830f4, 0xff90014f, 0xb0f9a05c, + 0xc8b20be1, 0xa3b2d6b2, 0x8400b9b3, 0x00e9b302, 0xaf18027f, 0x0141fe2c, 0x119094bd, 0x0019a054, + 0x00f93303, 0xc43d026d, 0xcf7e1db2, 0xa0b200c6, 0x5e00adb3, 0xbc17bf02, 0x7998f068, 0xf59fa601, + 0x98024d08, 0x94b30479, 0x05000a01, 0x00cfee3e, 0x3e0069b3, 0x027f9802, 0xf9a6f009, 0x022a0bf5, + 0x0c7e3ab2, 0xa2b200c6, 0x1e00a9b3, 0x0c3c9802, 0x40fe7bb2, 0x34009001, 0x987e0ab2, 0x0ab200c6, + 0xcc0570b5, 0xa27e708b, 0xa0b200c7, 0xe900adb3, 0x0d00b401, 0x01a6f001, 0x01ce0bf5, 0x01d00cf5, + 0x804cb43d, 0x7e2ab200, 0xbf00da04, 0x070b943a, 0xb200804c, 0xd9cb7e2d, 0x0ca1b000, 0xb600adb3, + 0x05291801, 0x76042f18, 0xf4f00894, 0xe59fffff, 0xe966ff09, 0x01980bf5, 0xffffe9e4, 0x08f589a6, + 0xf4bd018e, 0x18902fbc, 0x9d330999, 0x90018200, 0xf4b301ff, 0x083ef207, 0x8e3c00d0, 0xf59f26f2, + 0xc4016d08, 0x94f0fffd, 0x529dbcff, 0x0df456a6, 0x9065b205, 0xa43d10d9, 0x3db029bc, 0x3ee4bdc4, + 0xb100ceb3, 0xf5006fd6, 0xb401450c, 0xbe3c0b10, 0xf81e3c98, 0x0bf4f926, 0xff94f017, 0xfd009939, + 0x9033049f, 0x010a0600, 0x0ce9bf3c, 0x01ee9001, 0xa601dd90, 0xce08f4e5, 0xed00c933, 0xf0293f00, + 0x0bf40894, 0x00a93308, 0x94bd00d0, 0x91b03ab2, 0x1391b014, 0x301291b0, 0x4bfe5b91, 0x5bbb9001, + 0x00c8057e, 0xadb3a0b2, 0x3400ef00, 0x90335b90, 0x3ab21100, 0x00cae57e, 0xadb3a0b2, 0xb400db00, + 0x40b40d00, 0x014ffe11, 0xb250ff90, 0x070d942e, 0x4101f1b0, 0x3ab20080, 0x4cb2010b, 0x7e0011b0, + 0xb200c5a9, 0xb22cb20b, 0x014dfe3a, 0x7e4cdd90, 0xb400c404, 0xa0b214e0, 0x3ab21cb2, 0x0db2b4bd, + 0x00c5ca7e, 0x91000db3, 0x13b0b400, 0x90014afe, 0x697e34aa, 0x3a9800ca, 0x0db0b40c, 0x90014cfe, + 0x8b7e48cc, 0xa0b200d7, 0x6d00a4b3, 0x011290b4, 0xf491a6f0, 0x4e98321b, 0x70efcd01, 0x0600f4b3, + 0x2918700f, 0xff94f006, 0xbb909ebc, 0x49b5029f, 0xcfa53e01, 0xb20bb200, 0x7e2cb23a, 0xb200c35e, + 0x00a4b3a0, 0x0265bb34, 0x90014afe, 0x647e34aa, 0x60b300c7, 0x90b42000, 0x8085bc0b, 0xb09095bc, + 0xf13e0b91, 0x020000cd, 0x00cfd53e, 0xd53e0300, 0x00b400cf, 0xb23ab20c, 0xc6557e2b, 0xb594bd00, + 0xee3e0579, 0x030000cf, 0x00cfee3e, 0x49fe0200, 0x5c999001, 0x2cd99fbf, 0xbf000006, 0xa60ab299, + 0x170bf4f9, 0x00d0143e, 0x33062918, 0xfe4f009d, 0x00cfcc3e, 0x0042e97e, 0xfb0830f4, 0x30f43485, + 0x062cdfdc, 0x82f90000, 0x30f4ffbf, 0x0149fef8, 0xa04c9990, 0x0bb1b09f, 0xd4b2c2b2, 0xa5b2e6b2, + 0x1700b9b3, 0x00e9b301, 0x94bd0112, 0x3d0141fe, 0x481190c4, 0x1db219a0, 0x00c6cf7e, 0xadb3a0b2, + 0xbf00fb00, 0xf042bc19, 0xa6019998, 0xea08f59f, 0x005ab200, 0xc60c7e03, 0xb3a3b200, 0x00de00a9, + 0x5c981bbf, 0x0140fe0c, 0xb2300090, 0xc6987e0a, 0xcc0ab200, 0xa27e702b, 0xa8b200c7, 0xad00adb3, + 0x7021cd00, 0x420147fe, 0x77900080, 0xd1443e44, 0x0c00b400, 0x79a094bd, 0x09a6f009, 0x00091bf4, + 0xd14b3e02, 0xa6f00900, 0x090df409, 0x4b3e0300, 0xc0b400d1, 0x0704b60b, 0x0db2e4bd, 0xb4bd5ab2, + 0xb00021b0, 0xa97e0171, 0x5abf00c5, 0x2cb20bb2, 0xcb7e3db2, 0x7ebf00d9, 0x3bb2a0b2, 0x2cb25ab2, + 0xca7e0db2, 0x04b300c5, 0x3ab24500, 0x057eb4bd, 0xa0b200c3, 0x3700a4b3, 0x01bb7000, 0xf404a602, + 0x40b2050d, 0xb2101b90, 0xb03bbc6a, 0x2f7e0cb2, 0x4afe00da, 0x0240bb01, 0xbc30aa90, 0x647e6060, + 0x14bd00c7, 0x6d004db3, 0xb280b2ff, 0x7e3bb25a, 0x3e00c655, 0x0000d159, 0x0149fe02, 0xbf4c9990, + 0x062cd99f, 0x99bf0000, 0xf9a60ab2, 0x7e070bf4, 0xf40042e9, 0x85fb0830, 0xf830f424, 0x00062cd9, + 0xbf32f900, 0x014ffe99, 0xb214ff90, 0xbdf9a0a1, 0xb2b2b294, 0x0140fec3, 0x9003204b, 0x09a01000, + 0x577e0ab2, 0x09bf00da, 0x4c0090b3, 0x4800a4b3, 0x09bf91a0, 0xbf0192b5, 0x1000490f, 0xbf04f9b5, + 0xb520090f, 0x09bf05f9, 0xbf0693b5, 0x3501090f, 0x09bf1cf9, 0xbf2c9a35, 0xb5f0090f, 0x0fbf0af9, + 0xb540f990, 0x0fbf0ef9, 0xb5c0f990, 0x0abf0ff9, 0x00d1f63e, 0x49fea4bd, 0x14999001, 0x2cd99fbf, + 0xbf000006, 0xf4f9a699, 0xe97e070b, 0x35fb0042, 0xd030f408, 0x00062cdf, 0xbf82f900, 0x0149feff, + 0xb2509990, 0x059fa0a3, 0x00a9b302, 0xb63004fb, 0x0b9cf000, 0xbd2ca935, 0x0ca9b594, 0x00c60c7e, + 0x3abfa0b2, 0x804cb4bd, 0x7e0db200, 0xb200d9cb, 0x00adb3a5, 0x093f04b1, 0xa24a9d33, 0x01091804, + 0x9a469d33, 0x02091804, 0x92469d33, 0x03091804, 0x8a539d33, 0x040e1804, 0x18050918, 0x0d18060f, + 0xffe4f007, 0xf0ff94f0, 0x94b6fff4, 0x10f4b608, 0xb6059efd, 0xf9fd18d4, 0x05dffd05, 0x045e0bf5, + 0xd6b00505, 0x5d0cf503, 0x013db504, 0x0e01d4b3, 0xb5500049, 0xe43e0239, 0x0d1800d2, 0x09091808, + 0x180a0f18, 0xd4f00b0e, 0xff94f0ff, 0xb6fff4f0, 0xf4b60894, 0x059dfd10, 0xfd18e4b6, 0xeffd05f9, + 0x023eb505, 0x0bb23ab2, 0x00c6557e, 0x09023a98, 0xf4a9a6ff, 0x04bd091b, 0x00d6fa3e, 0x9007a5b6, + 0x3ab5303b, 0xd8c17e03, 0xb3a5b200, 0x03f500ad, 0x09033b98, 0x343a90c0, 0xfd3fbb90, 0xb5b604b9, + 0xda577e03, 0xb3a5b200, 0x03d900ad, 0xfe0147fe, 0x77900148, 0x9044bd40, 0x88900179, 0x0991b03c, + 0x00d4543e, 0x8ea0e4bd, 0x0f0044b3, 0xbd0c3a98, 0x3efe0cb4, 0xb200d377, 0xb24bb23a, 0xc4ea7e7c, + 0xb3a5b200, 0x039d00ad, 0x94f0793f, 0x120bf401, 0xb20c3a98, 0x7eff0c4b, 0x3e00d7c9, 0xb200d451, + 0xc1fb7e7a, 0x00a0b300, 0x0c3a980f, 0xfd0c4bb2, 0x00d3773e, 0x94f0793f, 0x0e1bf402, 0xb20c3a98, + 0x3efd0c4b, 0xb400d446, 0x3ab209b0, 0x8db2010c, 0x00c6cf7e, 0x5d00a0b3, 0x3fb2793f, 0x99c724bd, + 0x01999002, 0x980a91b0, 0x54b354f5, 0xb0b43900, 0x0022bc09, 0x02bc030c, 0x0304b600, 0x014001b8, + 0x1031bc00, 0x2f7e1ab2, 0x30bc00da, 0x4309b800, 0x95200001, 0xb45302b5, 0x0fb50af0, 0x3e81a054, + 0x9000d411, 0xff900122, 0x1424b318, 0xd73e3ebe, 0x3f8ebf00, 0x027f5879, 0x98077d18, 0x3a9803ee, + 0x0299c70d, 0xcc00f3f0, 0x96cb70ff, 0xcb4bb21f, 0x010cd8e6, 0xebf0d6cb, 0x7e01e0f6, 0x9800c124, + 0x4bb20c3a, 0xc97e6cb2, 0xa5b200d7, 0xb400adb3, 0x01449002, 0xa6033b98, 0xeb08f54b, 0xbc94bdfe, + 0x89a0b0bb, 0x8b7e8ab2, 0xa5b200da, 0x9400adb3, 0xbd37b202, 0x547f9884, 0xbc9088bc, 0x94b69098, + 0x4099b803, 0x39bc0001, 0x0b91b090, 0x5300f9b3, 0x033c9802, 0x3d0fa0b4, 0xbc24bdb4, 0x44bdc0cc, + 0x00da047e, 0x3e0f60b4, 0x9800d56f, 0x2bb20d3a, 0x7e0c41b0, 0x3300c178, 0x00b500a9, 0xfe0c3a98, + 0x2bb2014c, 0x7e38cc90, 0xb300d78b, 0x020c00ad, 0xb40be0b4, 0xef980e90, 0xd899c703, 0x1bf59fa6, + 0x3a98008e, 0x0c2bb20d, 0xc14e7e01, 0x014cfe00, 0x2bb23ab2, 0x7e30cc90, 0xb300c52e, 0x981306a4, + 0x2bb20c3a, 0xc97efd0c, 0x6c3e00d7, 0xadb300d5, 0x7401cb00, 0x93f01c90, 0x9099bc00, 0x7f0069bc, + 0xff19e401, 0x091bf4ff, 0x6c3e0260, 0x3a9800d5, 0x014cfe0c, 0xffff1be4, 0x7e34cc90, 0xb300d78b, + 0x019800ad, 0x343af034, 0xf9263690, 0x60100df4, 0xff1be402, 0x0c3a98ff, 0x00d5613e, 0xb20c3a98, + 0x7efd0c2b, 0xb300d7c9, 0x017000ad, 0x98012290, 0x2aa6033a, 0xff3708f5, 0x6eb264b2, 0xd43db43d, + 0xc4bdf4bd, 0x00d5a33e, 0x9473e97f, 0x010d0a00, 0x00d59d3e, 0x0600d033, 0xcc90010b, 0x01ff9001, + 0xa602ee90, 0xe308f4fa, 0x0b00c4b3, 0x3e547cb5, 0x3300d6df, 0x00a600b9, 0xb0013998, 0x0cf40296, + 0xb2030930, 0x5479b56d, 0xf4bde4bd, 0x00d5e23e, 0x9073d97f, 0x697c0a00, 0x01ff90f9, 0x9001ee90, + 0x399802dd, 0xf4e9a603, 0x593ee908, 0x94bd00d6, 0x79b5f10b, 0xb224bd54, 0xd6333eb1, 0xe4407f00, + 0xf4ffff09, 0xf10f260b, 0x1bf4bfa6, 0xff0be40b, 0xd62b3eff, 0x0c3a9800, 0xffff0ce4, 0x00d7c97e, + 0xb900adb3, 0xff0be400, 0x9019b2ff, 0x44900122, 0x9891b202, 0x29a60339, 0x09c508f4, 0xf5b9a6f1, + 0x9800a00b, 0x3c980c3a, 0xd7c97e0a, 0x00adb300, 0x31b5008c, 0xd6df3e0a, 0xbd6f7f00, 0x01c19294, + 0xf05179b5, 0x04bd00f3, 0x3e527fb5, 0x7f00d697, 0x014c584b, 0x900c3a98, 0xb3f00100, 0x00c3f000, + 0x7e024490, 0xb300d7c9, 0xb45200a4, 0xe9980be0, 0x70999001, 0xa601e9b5, 0xd608f401, 0x9808607c, + 0xf00c0c3a, 0xffff0be4, 0x00d7c97e, 0x2d00a4b3, 0xe4014cfe, 0xb2ffff0b, 0x40cc903a, 0x00c4ea7e, + 0x1900a4b3, 0x98469034, 0x94f0517f, 0xf0f9bcff, 0x3e517fb5, 0x0a00d6df, 0x3ea5b203, 0x9000d6ea, + 0x77900188, 0x148db318, 0x49fefd90, 0x3c999001, 0x387e9abf, 0x003e00d9, 0x030500d7, 0x00d7023e, + 0x0bb204bd, 0x557e3ab2, 0x50b300c6, 0x3a981a00, 0x7e04bd0c, 0x9800d890, 0x30b50d3a, 0xd9387e0c, + 0x0d30b500, 0x900149fe, 0x9fbf5099, 0x00062cd9, 0xb299bf00, 0xf4f9a65a, 0x443e110b, 0x010500d7, + 0x00d7003e, 0x0042e97e, 0xf43085fb, 0x22f9fc30, 0x00062cd2, 0xb229bf00, 0x0141fea0, 0xa00c1190, + 0xc5eb7e19, 0x0c0a9800, 0x00d8907e, 0x7e0d0a98, 0xb200d938, 0xd9387e0a, 0xbf1fbf00, 0xa6a4bd29, + 0x070bf4f9, 0x0042e97e, 0xd90425fb, 0x0000062c, 0x30f499bf, 0x014ffefc, 0xf9a0aeb2, 0x020aa9bf, + 0x18f4b9a6, 0x01b9900d, 0xbd98e9bc, 0xfec9a0a4, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, + 0x0042e97e, 0xf80430f4, 0x062cd900, 0x99bf0000, 0xfefc30f4, 0xaeb2014f, 0xa9bff9a0, 0xb9a6020a, + 0x900b18f4, 0xa4bd01b9, 0xfe99ecbc, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cd900, 0x99bf0000, 0xfefc30f4, 0xf9a0014f, 0xc9a6b9bf, 0x0c0908f4, 0xd8293ef1, + 0x02abb500, 0xacb5aca0, 0x0149fe01, 0x2cd99fbf, 0xbf000006, 0xa6cab299, 0x070bf4f9, 0x0042e97e, + 0xf80430f4, 0x062cdf00, 0xffbf0000, 0xfefc30f4, 0xaebf0149, 0xa9989fa0, 0xb0bebc02, 0x99bfaba0, + 0x08f4b9a6, 0x92b9bc08, 0xa998a9a0, 0xa6aabf01, 0x051bf4a9, 0x49fef10a, 0xd99fbf01, 0x0000062c, + 0xf9a699bf, 0x7e070bf4, 0xf40042e9, 0x00f80430, 0x00062cd9, 0xf499bf00, 0x4ffefc30, 0xb3f9a001, + 0x7e0800a0, 0xfe00d938, 0x9fbf0149, 0x00062cd9, 0xa699bf00, 0x070bf4f9, 0x0042e97e, 0xf80430f4, + 0xf830f400, 0x00062cdf, 0xbf32f900, 0x0149feff, 0xb2149990, 0xb29fa0a0, 0x00a0b3b3, 0xa6fd0242, + 0x3a0cf4a2, 0xfe01ab90, 0xb4b60141, 0x10119002, 0x8b7e1ab2, 0xa4b300da, 0x19bf2700, 0x2cb21db2, + 0x90a0e4bd, 0xe990dfbf, 0xbc9eb201, 0x90a699fc, 0xbff408f4, 0x3e3da0dd, 0x0a00d91d, 0x0149fe02, + 0xbf149990, 0x062cd99f, 0x99bf0000, 0x0bf4f9a6, 0x42e97e07, 0x0835fb00, 0x00062cdf, 0xf4febf00, + 0x49fefc30, 0xbf9ea001, 0xa6f9bf9e, 0x070bf4e9, 0x0042e97e, 0xf80430f4, 0xfc30f400, 0x2cd112f9, + 0xbf000006, 0x0140fe19, 0x002a9cda, 0x08009000, 0xd37e09a0, 0x0fbf004c, 0xa63019bf, 0x0bacf000, + 0xb901a6f0, 0xf9a601aa, 0x7e070bf4, 0xfb0042e9, 0x30f40415, 0xd112f9fc, 0x0000062c, 0x40fe19bf, + 0x2a9cda01, 0x00900000, 0x7e09a008, 0xbf004c21, 0x3019bf0f, 0xacf000a6, 0x01a6f00b, 0xa601aab9, + 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x9cda0140, 0x9000002a, + 0x09a00800, 0x004e397e, 0x19bf0fbf, 0xf000a630, 0xa6f00bac, 0x01aab901, 0x0bf4f9a6, 0x42e97e07, + 0x0415fb00, 0xf9fc30f4, 0x062cd112, 0x19bf0000, 0xfeffb4f0, 0x00900140, 0x7e09a008, 0xbf000b94, + 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x12f9fc30, 0x00062cd1, 0xfe19bf00, 0x00900140, + 0x7e09a008, 0xbf000b7e, 0xa619bf0f, 0x070bf4f9, 0x0042e97e, 0xf40415fb, 0x22f9fc30, 0x00062cd1, + 0xb219bf00, 0x0140fea2, 0x0090a43d, 0x7e09a00c, 0xa00040e5, 0xbf0fbf2a, 0x00a6b019, 0xa60bacf0, + 0x070bf4f9, 0x0042e97e, 0xf40425fb, 0x22f9fc30, 0x00062cd1, 0xb219bf00, 0x0140fea2, 0x0090a43d, + 0x7e09a00c, 0xa00040c3, 0xbf0fbf2a, 0x00a6b019, 0xa60bacf0, 0x070bf4f9, 0x0042e97e, 0xf40425fb, + 0x2cdfe430, 0xf9000006, 0xfeffbf82, 0x45fe0149, 0x3c999001, 0xa00147fe, 0x2455909f, 0xd9347790, + 0x00001420, 0x4bfe9abf, 0x90080c01, 0xff0d2cbb, 0x0000c17e, 0xeb00a433, 0x3f0c30b4, 0x0c943339, + 0x043118e2, 0x0f001033, 0xb0011933, 0x3e043d00, 0x9800dc65, 0x30d9023f, 0x98000014, 0x34580431, + 0x3f5fa00a, 0x0339989f, 0xb5183690, 0xff090159, 0xf43379a0, 0x717e1800, 0xa032003b, 0x2900ad33, + 0xdf010901, 0x00001430, 0x1272f920, 0xbd0043f0, 0xdba53e14, 0x0241bc00, 0x010006b1, 0x40060df4, + 0x947e0100, 0x28d9000b, 0xbf000014, 0xff2ce49a, 0xb26bb2ff, 0x1300de0d, 0x1e7e0000, 0x7aa000d0, + 0xd400adb3, 0x985bbf00, 0x1d90015c, 0x7c0eb204, 0x10bc2020, 0x1300da10, 0xd87e0000, 0xa0320026, + 0xc500ad33, 0x4cb4bd00, 0x00da0100, 0xa6000013, 0xa408f414, 0x00dc543e, 0xd9023f98, 0x00001430, + 0x58043498, 0x5fa00a32, 0x39989f3f, 0x18389003, 0x090159b5, 0x3379a0ff, 0x7e1600f4, 0x32003b71, + 0x00ad33a0, 0x30df0084, 0x20000014, 0xff26e4f1, 0x3e24bdff, 0xbc00dc45, 0x16b11262, 0x0df40100, + 0x01004106, 0x000b947e, 0x5c985bbf, 0x042d9001, 0x00da1eb2, 0x7e000013, 0xe4002672, 0xbcffff4c, + 0xa0322021, 0x1db28bb2, 0x001300de, 0x40417c00, 0x3500a433, 0x001428d9, 0x7e9abf00, 0xa000cd48, + 0x00a4b37a, 0x4cb4bd13, 0x00da0100, 0xa6000013, 0xa608f426, 0x5c985bbf, 0xbd7ab201, 0x7e040ed4, + 0x320026d8, 0x2db034a0, 0x817e3ab2, 0x0d33000f, 0x30fe7100, 0x020f3a01, 0x1838f130, 0x04090333, + 0x30014afe, 0x31303991, 0x90b4bd3b, 0x717e38aa, 0xdf3e000f, 0x54d900da, 0xf400002a, 0x2cdffc30, + 0xf9000006, 0xbfa0b202, 0xfe9abfff, 0x99900149, 0xa0640b04, 0x00de7e9f, 0x33100e00, 0x896100a4, + 0xcf02a600, 0xf5f1009f, 0x9ff61000, 0xa6008a00, 0x10004b02, 0x104dc4bd, 0x7ee4bd27, 0x33001b0f, + 0xd91500a4, 0x00002a54, 0x9b7e9abf, 0x040e0000, 0x00dd1c3e, 0xe84b0ab2, 0x16fc7e03, 0xa4008900, + 0x00aab802, 0xa5b60002, 0x01aa920a, 0x4f009af6, 0x99b80111, 0xf6000200, 0xe43d009f, 0x900149fe, + 0x9fbf0499, 0x00062cd9, 0x3299bf00, 0xf4f9a6ea, 0xe97e070b, 0x05fb0042, 0x00000004, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00005600, 0x00005625, 0x0000564a, 0x0000566f, 0x00005694, 0x000056b9, 0x000056de, 0x00005703, + 0x00005728, 0x0000574d, 0x00005772, 0x00005797, 0x000057bc, 0x000057e1, 0x00005806, 0x0000582b, + 0x00005850, 0x00005875, 0x0000c350, 0x00003e80, 0x00004e20, 0x000061a8, 0x000064b5, 0x00007d00, + 0x00009c40, 0x0000cf85, 0x4953424e, 0x004c4520, 0x00412f4e, 0x00640077, 0x64310062, 0x62347734, + 0x62327732, 0x32007733, 0x00773262, 0x00773531, 0x77316236, 0x00623700, 0x64317731, 0x62347731, + 0x32313000, 0x36353433, 0x41393837, 0x45444342, 0x00000046, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000021db, 0x00002222, 0x00002253, 0x00002292, 0x00001f4e, + 0x00001f95, 0x00001fc6, 0x00002017, 0x00002088, 0x000020cf, 0x00002119, 0x0000216a, 0x00001e2a, + 0x00001e71, 0x00001ea2, 0x00001eeb, 0x0000024c, 0x0000001a, 0x00000384, 0x00000009, 0x000003f0, + 0x00000004, 0x00000420, 0x00000002, 0x00000438, 0x00000009, 0x000004a4, 0x00000004, 0x000004d4, + 0x00000004, 0x00000504, 0x00000002, 0x0000000c, 0x00000000, 0x00000000, 0x00000013, 0x00000001, + 0x00000000, 0x00000013, 0x00000001, 0x00000001, 0x00000013, 0x00000001, 0x00000002, 0x00000013, + 0x00000001, 0x00000003, 0x00000013, 0x00000001, 0x00000004, 0x00000013, 0x00000001, 0x00000005, + 0x00000013, 0x00000001, 0x00000006, 0x00000013, 0x00000001, 0x00000007, 0x00000013, 0x00000001, + 0x00000008, 0x00000004, 0x00000002, 0x00000000, 0x00000004, 0x00000002, 0x00000001, 0x00000004, + 0x00000002, 0x00000002, 0x00000004, 0x00000002, 0x00000003, 0x00000004, 0x00000002, 0x00000004, + 0x00000004, 0x00000002, 0x00000005, 0x00000004, 0x00000002, 0x00000006, 0x00000004, 0x00000002, + 0x00000007, 0x00000004, 0x00000002, 0x00000008, 0x00000005, 0x00000003, 0x00000000, 0x00000015, + 0x00000004, 0x00000000, 0x00000015, 0x00000004, 0x00000001, 0x00000015, 0x00000004, 0x00000002, + 0x00000015, 0x00000004, 0x00000003, 0x00000016, 0x00000005, 0x00000000, 0x0000000e, 0x00000012, + 0x00000000, 0x00000006, 0x00000007, 0x00000000, 0x00000006, 0x00000007, 0x00000001, 0x00000006, + 0x00000007, 0x00000002, 0x00000006, 0x00000007, 0x00000003, 0x0000001a, 0x00000008, 0x00000000, + 0x0000001a, 0x00000008, 0x00000001, 0x0000001a, 0x00000008, 0x00000002, 0x0000001a, 0x00000008, + 0x00000003, 0x00000004, 0x00000015, 0x00000000, 0x00000002, 0x0000000b, 0x00000000, 0x00000002, + 0x0000000b, 0x00000001, 0x00000002, 0x0000000b, 0x00000002, 0x00000002, 0x0000000b, 0x00000003, + 0x00000003, 0x0000000c, 0x00000000, 0x00000001, 0x0000000a, 0x00000000, 0x00000001, 0x00000009, + 0x00000000, 0x00000001, 0x00000009, 0x00000001, 0x00000001, 0x00000009, 0x00000002, 0x00000001, + 0x00000009, 0x00000003, 0x00000001, 0x00000009, 0x00000004, 0x00000001, 0x00000009, 0x00000005, + 0x00000001, 0x00000009, 0x00000006, 0x00000001, 0x00000009, 0x00000007, 0x00000001, 0x00000009, + 0x00000008, 0x00000001, 0x0000000d, 0x00000000, 0x00000001, 0x0000000d, 0x00000001, 0x00000001, + 0x0000000d, 0x00000002, 0x00000001, 0x0000000d, 0x00000003, 0x00000002, 0x0000000f, 0x00000000, + 0x00000002, 0x0000000f, 0x00000001, 0x00000002, 0x0000000f, 0x00000002, 0x00000002, 0x0000000f, + 0x00000003, 0x00000003, 0x00000010, 0x00000000, 0x00000001, 0x0000000e, 0x00000000, 0x08000000, + 0x08400000, 0x08800000, 0x08c300a7, 0x09000000, 0x09400000, 0x09800000, 0x09c00000, 0x0a01c000, + 0x0a404038, 0x0a804040, 0x0ac04048, 0x0b004050, 0x0b420058, 0x0b8201ab, 0x11800000, 0x11c00000, + 0x12000000, 0x12400000, 0x12800000, 0x12c00000, 0x00000001, 0x00001c08, 0x00101c09, 0x00201c0a, + 0x0000bd08, 0x00209d09, 0x00309d0a, 0x00011f08, 0x00113e09, 0x00311e0a, 0x00010309, 0x00000000, + 0x0000ffff, 0x00004300, 0x46020f1f, 0x43010f1f, 0x44020f1f, 0x45020f1f, 0x601207ef, 0x601307ef, + 0x601407ef, 0x601507ef, 0x801607ef, 0x253207c2, 0x25330fc2, 0x25340fc2, 0x25350fc2, 0x1152079d, + 0x1253079d, 0x7014079d, 0x7015079d, 0x601203c8, 0x601307c8, 0x601407c8, 0xbb150720, 0x02172701, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00560000, 0x00420074, 0x007f0078, 0x008c0089, 0x004200c0, 0x00dc0042, 0x00051f00, 0x35040b08, + 0x0200001d, 0x00000000, 0x00000000, 0x00000000, 0x00000c00, 0x00000000, 0x00000000, 0x00000000, + 0x00003650, 0xc8b00000, 0x00010000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x010000d0, 0x0004fe00, 0x00e0927e, 0x02f802f8, 0x008980f9, 0x48fe0102, 0xcf22f901, 0x008f0099, + 0x9ffd1000, 0x1f1bf404, 0x00f7808b, 0x108a100c, 0x017e00ef, 0x908b00e1, 0x100c00f7, 0x00ef208a, + 0x00de5c3e, 0x00f7a08b, 0x108a100c, 0x017e00ef, 0x208a00e1, 0xb08b00ef, 0x100c00f7, 0x00e1017e, + 0x00f7808a, 0xc17e100b, 0x908a00e0, 0x100b00f7, 0x00e0c17e, 0x00f7a08a, 0xc17e100b, 0xb08a00e0, + 0x100b00f7, 0x00e0c17e, 0xffffffdf, 0xc3008900, 0x009ff601, 0x01c0008f, 0xc700f9cf, 0x99b34c99, + 0xb3019400, 0xf8f50190, 0xde983e02, 0x00f9cf00, 0xb34c9ec7, 0x016d00e9, 0xf501e0b3, 0xad3e02f8, + 0x008900de, 0x99cf0102, 0x00008f00, 0x049ffd10, 0x00ed008f, 0x890e1bf4, 0xb500ef80, 0xf03e80f9, + 0xfeb500de, 0xdef03e80, 0xed008900, 0x809fb500, 0x0089f4bd, 0x9fb500ed, 0xc0008f81, 0x00f9cf01, + 0xb34c99c7, 0x00fb0099, 0xf50190b3, 0xfd3e02f8, 0xf9cf00de, 0x4c99c700, 0xd10099b3, 0x0190b300, + 0x3e02f8f5, 0x9000df12, 0x95b601ff, 0xfa1bf401, 0xf6b0f9b2, 0x550cf401, 0xf00096b0, 0x00890b2c, + 0x804c00de, 0x0022bc01, 0x00ed008a, 0x940002bc, 0x19bc0701, 0x7e1bb210, 0x8900e101, 0x3f00de02, + 0x0704b61f, 0x180009bc, 0x0e3f0119, 0xb6fff4f0, 0x94f008f4, 0x18e4b6ff, 0xbc1094b6, 0x9ebc909f, + 0x2029bc90, 0xf7110049, 0x008f0092, 0x008900ed, 0x9ff60484, 0x40004f00, 0x1d0099b8, 0x009ff602, + 0x99b8010f, 0xf6020100, 0x99b8009f, 0xf6020600, 0x008f009f, 0xf5b600e2, 0x0d008908, 0x1094b600, + 0x8f059ffd, 0xfd020000, 0x9afe059f, 0xdfd43e00, 0x00000000, 0x00e2007e, 0x00dfdc3e, 0x00dfe03e, + 0x84fe20fb, 0xf880fc00, 0xc2008900, 0x0099cf01, 0xffff94f1, 0xff440bf5, 0x273ef4bd, 0x40df00df, + 0x89008241, 0xf601c100, 0xf1df009f, 0xb8800000, 0x02010099, 0xcf009ff7, 0x9fb2009f, 0x00df123e, + 0x01c20089, 0xc40099cf, 0x1bf5019f, 0xe93efe98, 0x38df00de, 0x89008207, 0xf601c100, 0xf1df009f, + 0xb8800000, 0x02010099, 0xcf009ff7, 0x9fb2009f, 0x00dead3e, 0x004f80f9, 0x0148fe01, 0xf000f9ce, + 0xf9f71095, 0x07004f00, 0xf000f9ce, 0xf9f71095, 0x04004f00, 0xf000f9ce, 0xf9f71095, 0x0084fe00, + 0x00f880fc, 0x48fe80f9, 0xfe02f801, 0x80fc0084, 0x80f900f8, 0x48fe020f, 0x11004901, 0x89009ff7, + 0xfe00e084, 0xff0f0093, 0xf7050049, 0x547e009f, 0x107e00e0, 0x02f800de, 0x84feff0a, 0xf880fc00, + 0x8f80f900, 0xbf00faa0, 0x0148feff, 0xf4048992, 0x9fa0fc30, 0xe23e943d, 0xa92000e0, 0x9001bb92, + 0xb4b301aa, 0x8992f800, 0x899fbf04, 0xbf00faa0, 0xf4f9a699, 0x047e070b, 0x84fe00e5, 0xf880fc00, + 0x8f80f900, 0xbf00faa0, 0x0148feff, 0xf4048992, 0x9fa0fc30, 0x263e94bd, 0xb93c00e1, 0x01cc92f8, + 0x9099af3c, 0xc4b30199, 0x8992f400, 0x899fbf04, 0xbf00faa0, 0xf4f9a699, 0x047e070b, 0x84fe00e5, + 0xf880fc00, 0x8f80f900, 0xbf00faa0, 0x0148feff, 0x92fc30f4, 0x9fa00489, 0x00e1583e, 0xf80202f8, + 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, + 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, + 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, + 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, + 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, 0x0202f802, 0xf80202f8, 0x02f80202, + 0xb859f817, 0x04a6e76d, 0x94ec71f5, 0x7b0eddc1, 0x2e3ae6fa, 0xaf67abb5, 0xa53604d2, 0x58d889a7, + 0x7fa7e4a5, 0xaab29c19, 0x3198f6d8, 0xab903e17, 0x787fe632, 0x063c1e39, 0x490ed528, 0xa73d1c1f, + 0x73ba3f30, 0x96e1fca0, 0xb47874df, 0x62270009, 0x9bbf441f, 0x6d1bafef, 0xea8d89c1, 0x9862171e, + 0xaaadf711, 0xd464cc25, 0x6fe3f17c, 0x724e93e5, 0x4bde116f, 0x3d99385b, 0x19964f75, 0xdf23d4e5, + 0xcb93f417, 0xdc227910, 0x39b85869, 0xef6b8f4c, 0x305a62e4, 0x44346aff, 0x8e42b348, 0x70ff7926, + 0x678573cf, 0x2225d250, 0xd37b88c3, 0x56a0ed3e, 0x0805d841, 0xb6dc2e85, 0xbabee266, 0x4a75b071, + 0xafac5b2a, 0x2a582073, 0x882270bb, 0x41c77e20, 0x7706328f, 0x1ed8529c, 0xe3e0d3d1, 0x3af00eb2, + 0xe7085af6, 0xd00303c9, 0x5c02ef68, 0xbb39c8ab, 0x190b8db6, 0x948a9b1c, 0x39229811, 0xff401d51, + 0xc828a932, 0x4c052cf1, 0x89440789, 0x755873c2, 0xd335e69e, 0xde092499, 0x6016d2d4, 0xdea024b6, + 0x5fbc2e57, 0x2439a6bb, 0x2b9dad19, 0xcae2661e, 0x71749893, 0x5b0569f4, 0x048b3b7f, 0xb2e00703, + 0xd1e21c4f, 0x7c37569d, 0x4887eb93, 0x0e873659, 0xfb77f695, 0x97cb0dad, 0xcd05323e, 0x63bef316, + 0xe568b4f4, 0x5f533b06, 0x18249315, 0xe29d7352, 0xa2c78edf, 0xfe0cfd7e, 0xe463b0e2, 0x45de6605, + 0xb93087bc, 0x89d81181, 0x0dcc8192, 0xcd388be4, 0x826f7651, 0xf28467fb, 0xf2290c77, 0x7aef943b, + 0x7756ab51, 0x656ca234, 0x14b7efae, 0x5b795da0, 0x6487f9a8, 0x048e62af, 0xdecc9b15, 0x79d98972, + 0xfd802f98, 0xe490197a, 0x59d176b5, 0x4a3e4723, 0x126d4eb5, 0x46b89b5e, 0x372d2680, 0xd8150ba3, + 0x6b1f3972, 0xe043747b, 0x437ed738, 0xb4b3aece, 0x25bc91a6, 0x19db2026, 0xc74108ad, 0x3c9c2f19, + 0x753edfd9, 0xfb463308, 0xd1a0688a, 0xb9e77aeb, 0xdefbbb5a, 0x6b518878, 0xa7d27c61, 0x10e8a47e, + 0xe624527e, 0x7e0738f1, 0xe939b305, 0xf1c18367, 0xe53bbfbd, 0x7ebc0a51, 0xb5bc02dc, 0xedf9830c, + 0x7e2b0519, 0x0716b545, 0x8ec6aec9, 0x1a878730, 0x84c64af1, 0x67321e01, 0xa5a34416, 0xa54fd0f7, + 0x2bec4c3d, 0x42a315d2, 0xd1c88608, 0x598fceb4, 0x4f203641, 0x14b34afe, 0xcc59662a, 0x81654f41, + 0x90bf348b, 0x4cf847ad, 0xe205617a, 0x23422b74, 0x5187d73a, 0x5cd8c60b, 0x1d4526ea, 0xafa024c9, + 0x8eb17715, 0x51a5fb2c, 0xbd5a61f8, 0xff9de5e5, 0x23344ba0, 0xa2e230a7, 0x881b9186, 0xe2c87765, + 0x6c7235f0, 0x43382006, 0x20746977, 0x3c85928b, 0xec1e092c, 0x8fde0974, 0xcc766925, 0xa659c09e, + 0xc3dceff9, 0xa9edd8af, 0x0fb04cbc, 0x28f82831, 0x44935479, 0xafa83d67, 0x92720371, 0xc7927a2e, + 0xf55ec3ed, 0x66d1d842, 0x074c442a, 0x50a36e1c, 0x393aeaa1, 0x1e01b057, 0x191f20ec, 0x613ebacf, + 0x04b38819, 0xac02d752, 0x4b241c1d, 0x4024412b, 0x4ba5d8c1, 0x8b833d1e, 0xadf36624, 0x4413b6c4, + 0x34e6914b, 0xa99ee9c0, 0x1e9118c2, 0xde1e873a, 0xed28a3e4, 0x13b8f17e, 0x8a9ac8d9, 0x6bfd1b2b, + 0x6d25d290, 0xa2645bfa, 0x83fe9c9f, 0x593e0370, 0x092f950b, 0xf4676205, 0xbfaa916b, 0xa55ed05d, + 0x1dc7c544, 0xe92e9cbb, 0xda4d932f, 0xc16a7b2f, 0x459666fa, 0x1d537691, 0x65dba917, 0xb4eed01a, + 0x6847dab7, 0x0a181d7e, 0x1f51b672, 0x80508490, 0x3675e65b, 0xaac376bf, 0xb3942209, 0x6c4cdf63, + 0x7f7df269, 0xb05bcce6, 0x069a7e05, 0xa171c98a, 0x2be8ea60, 0x5ec99b39, 0xafb0636b, 0xc6938fed, + 0x2fdd4681, 0x2b027b79, 0x4a195566, 0xc41df13b, 0x88472a5b, 0x311d2d17, 0xe03e8b4e, 0x181c3d8f, + 0xbfc544a7, 0x7aab96ac, 0x3dafd10a, 0x64591c59, 0xf5ee9a1f, 0x8141de1d, 0xd246a1a3, 0x09143e3a, + 0xb94506ca, 0xd2a6f9f0, 0x876d44fd, 0xbc1d5a44, 0xfa7ac546, 0x0a7aca38, 0x656c0b4a, 0xc97c9ea2, + 0x6136594d, 0xca63e052, 0xc14c63eb, 0x3a00ebf0, 0x01ae1ccc, 0x97f98375, 0x4f4f2bc2, 0xe34fd2d3, + 0x9a4aee3a, 0x7b0ee0eb, 0xb45dbb17, 0x3eeb92f3, 0x4d8caf5e, 0x60b03c27, 0xa8de0574, 0xf208d502, + 0xb9c5a4b8, 0xec101349, 0xb3bebe9a, 0x5eefe006, 0x2f3ccf02, 0x01147611, 0x31e5fa35, 0xa423ff28, + 0xe2b427db, 0x41912d39, 0x12851939, 0xebfd4f26, 0x67dbe7c0, 0xeeee08eb, 0xf6c1ef9e, 0x3765c802, + 0x3cd2da12, 0xa9e3aca7, 0x4a6f6482, 0xa50b3e2c, 0x0b31078f, 0x73570769, 0x473438ab, 0xa1d85612, + 0x8e4d1717, 0xbffa3e38, 0xf4d5bd69, 0xc7175e52, 0x3d9029d6, 0x108c00ef, 0x1c12b333, 0xa8bb0d10, + 0xa1e178bf, 0x5680e8c3, 0x97cd85eb, 0xcc3ec891, 0x1d19428f, 0x428af28f, 0x11802755, 0xc90f1dbb, + 0x9be6bac0, 0x67448bf7, 0x622b9f52, 0x4e58d3fa, 0xd54b2868, 0xe2cf86d6, 0x30cd1044, 0x3b96d541, + 0x9de5ffc0, 0x29df7c0d, 0x7fcb302d, 0xc5f270cd, 0x6bc45858, 0x95feeb32, 0x80b95e99, 0xa6b25cc4, + 0xa966bf96, 0x971cc81c, 0xfab4a9f6, 0xd34896c8, 0xb7b737a0, 0x88207653, 0x38bd3125, 0x41bb0628, + 0xa0dfe7e0, 0xd97da4f0, 0x3d7e1039, 0xc9c25ebb, 0xd7db195d, 0x55fdb6c4, 0x49eca747, 0xfbdf5231, + 0xd514105a, 0x2f663593, 0xba2ef33e, 0x1518a023, 0x505fa7f2, 0x08d209b4, 0x8af598d9, 0x6d9d350f, + 0xf1c755e0, 0x9348f579, 0xd34d8d13, 0x88732607, 0x9a013c97, 0x5f3abda1, 0x6e6d00d2, 0xde470e9c, + 0xdf58cdae, 0xdeea3bcb, 0x327401aa, 0x220676e5, 0xd79c0b91, 0xa8f2f2d5, 0x411d6d61, 0x41d20826, + 0x6ca74f70, 0x4d1ddb82, 0xfe5e6f7b, 0x904dd2fa, 0x9f9970ce, 0x677d2e57, 0xe86fb28c, 0x82a1f5bf, + 0xddb4872a, 0xc80a9a16, 0x42167966, 0x44d1dcf1, 0x9b3060b8, 0xb5c73483, 0x07616d5a, 0xc46af3ee, + 0xa929a8dd, 0xc36fe034, 0x2ec3c1a9, 0xa5be6b51, 0x0fcaf95e, 0xb18b8725, 0xfdd354b4, 0xd5e1ae2f, + 0x979f8249, 0x64d54720, 0xa011a978, 0xa415ebbe, 0x297892cd, 0xb5978ed7, 0x74a2f9a8, 0xbf76451d, + 0x6495bafe, 0x489841b2, 0x790ec1e8, 0x389138a9, 0x24662925, 0xbbd5f6a5, 0x854b9ff1, 0xfd7c06c9, + 0x775240e9, 0xc75c288a, 0x8249225e, 0xbe91e574, 0xc4099930, 0xbf347667, 0x3a85329d, 0x285817a2, + 0xd7db195d, 0x55fdb6c4, 0x49eca747, 0xfbdf5231, 0x576a23d4, 0x633214b1, 0x3e817fb9, 0xbc866644, + 0xdf5c6b4f, 0xb0989dac, 0x8096e4f3, 0x04a42b74, 0xd0ace188, 0xcb6c9533, 0xfea1befa, 0x74f89788, + 0x51ee3867, 0xec161326, 0x6f2eea7c, 0x53246e0a, 0xdf04d0d5, 0x10459088, 0xdc876e59, 0x5e56b4c2, + 0x1a37bd3c, 0xc1d0f084, 0x655677e8, 0x55b63a0e, 0x3d9029d6, 0x108c00ef, 0x1c12b333, 0xa8bb0d10, + 0x7b64cb04, 0x15d32464, 0x036a3fca, 0xac2d4ca1, 0xa8d37cb1, 0x2f55fc2a, 0x18b4330c, 0x032da4b2, + 0x608c86f9, 0x1ec96f6b, 0x67aab027, 0xf3ba7984, 0x8564fd68, 0xc2dfba66, 0xd2e06357, 0x33380b7f, + 0x9acd3a3e, 0xcea3b291, 0xfaf71be9, 0xdec76d78, 0x5da97d04, 0xe4f00504, 0x83ce0a3b, 0x7a65c6a3, + 0x8e3f77a3, 0x66526877, 0x3921a73e, 0x3bf9b918, 0x7824ee74, 0x01bbfc49, 0xd71f2ee1, 0xc8b0b860, + 0xad6bfa61, 0x114ccfe5, 0x1166c3ad, 0xed466479, 0x546ffc5b, 0x4c659e43, 0x2fe8c19a, 0xbe020cda, + 0x62b9da49, 0x944a7c0a, 0x71d066d5, 0xa2791c6c, 0x1d408355, 0xec00718b, 0x604047cb, 0x09202ea7, + 0x0d97d022, 0xb4ca8e40, 0x0dfeab4c, 0x74d858fb, 0xadadee3c, 0x7eafa270, 0xdfdb1b46, 0xbacb460f, + 0xa2edb54d, 0x039946cc, 0xfafd5066, 0x1c7cd2bf, 0x6c44fc40, 0xd785a91b, 0x04db8475, 0x397021bd, + 0x951eb489, 0xde24004b, 0x02dc53c2, 0x127b85bf, 0xf75130e9, 0xb2c9f44c, 0x24842804, 0xfdcb45fe, + 0x5b5ff2fe, 0xf07233f7, 0x284af222, 0x584c9271, 0x163227b3, 0xb663e1d5, 0xd15abaca, 0x3e700cc1, + 0xae7e8d50, 0x02c438ab, 0xd1185758, 0x36c5cc70, 0xc7b18bfe, 0x546c50e5, 0x169a5a7e, 0xea283de1, + 0xaf4bb1bf, 0x8d948ac5, 0x42fe07a1, 0x2efabe9a, 0x0d4bbbcb, 0xd8cd5f59, 0xb1ef4ebb, 0xeacd52db, + 0x68e6e8c3, 0x14bb1609, 0x002c0bba, 0x83b1c3f7, 0x1989d585, 0x3084bbd4, 0x0f77eecd, 0x04cbf166, + 0xe4c3fcd9, 0x0f82f7d4, 0x66086cfc, 0xb61cb927, 0x3b98a24f, 0xe517d8c1, 0x05a03974, 0xdb75d137, + 0xec815f5d, 0x949043d8, 0x57d98703, 0xea27c491, 0xa48f62b9, 0xd417be98, 0x8f22d021, 0x450951b5, + 0x4de12619, 0x6b55cb6e, 0xa0a8f5b1, 0x9d883809, 0x5b9b88eb, 0xdceef5c0, 0x6734f350, 0x197fe97b, + 0x2e291fb0, 0x7bbf4317, 0xb7d42053, 0x897b7d50, 0x782311a8, 0x5b334268, 0xaf1dc1eb, 0xca8a6a93, + 0xc69d822b, 0x817e4336, 0x0b42af77, 0xf0794eaf, 0x3d2917ac, 0xad6136e6, 0xc579f91c, 0x5c57561a, + 0x9854a600, 0xe490963d, 0x2e4dbf32, 0xef093ecd, 0xe2e5b982, 0x437d800a, 0x8f43a91b, 0xcf8ef1a3, + 0xfef45e6d, 0xbb5c8ab2, 0x2fb95c00, 0xf9497bf3, 0x23cdd6de, 0x732a2db5, 0xb657775d, 0x9da40f98, + 0xd51e0056, 0x37367dda, 0x1ae76401, 0x85234696, 0xb507dece, 0x09916f3d, 0x2a273b44, 0x5abc3069, + 0xad30c8e1, 0x92969918, 0xe792d54d, 0x7b9f0ba3, 0x87966483, 0x4b563eb6, 0xfca9c9e6, 0x5293a999, + 0x20c14e97, 0x596a21c0, 0x63ffecd6, 0xa9327d25, 0x282b78e9, 0x4234c705, 0x5e895e03, 0x2b9f2818, + 0xfca5a80d, 0x65a10dcd, 0x18963ec1, 0x69912f3f, 0x7a96fd9f, 0xd0c3781b, 0x92ea4828, 0x069f0a09, + 0x092c8afa, 0xf3f5550b, 0x54c9197f, 0x0101853d, 0xd3cd45f7, 0x0741f5ec, 0x698db53c, 0x450b3559, + 0xb6fefc1f, 0x90265a82, 0xa2d45627, 0xaf86643c, 0xfd2c5735, 0x21815156, 0xe89d2b2f, 0x12b84ab1, + 0x1a18316f, 0x2c727b94, 0x3858aba5, 0xd6deae21, 0xa4366fda, 0xe72dfcd1, 0xfd40bab4, 0x79031810, + 0xb3934c5e, 0xadc7ac50, 0x840fd0e7, 0x7a8472de, 0xa46bb650, 0x6f201575, 0x9d2e278f, 0xaeeb38e8, + 0xa6d5f95f, 0xf14b58ed, 0x8e27f428, 0xcb1af58d, 0x63a42c86, 0x5f34177f, 0xef51c3c7, 0x3883cf03, + 0xcd6ca577, 0xbb798063, 0xea5addaa, 0xb92acba1, 0x4651e5d7, 0x97de551b, 0x6ff0c31c, 0xd7e92a47, + 0x0b0e14e1, 0x4e4093b4, 0x109508a1, 0xef865bca, 0x96160163, 0xd81b44c9, 0x1e813c17, 0x87c0a8c3, + 0x3844bd49, 0x85797ffd, 0x23442cb8, 0x8df81f77, 0xe60f3b9b, 0x75b1fa03, 0xb2378301, 0x1f03058e, + 0x9789cf11, 0x4eec4f84, 0xed527644, 0xfeff452c, 0xf8ca5bdf, 0xe626b06a, 0xe1536f17, 0x35fba9e6, + 0xf314d7ee, 0x8d4f1d47, 0x915e214f, 0x291b7441, 0xf5a39ce2, 0x3ffcba34, 0xc3c67cf3, 0x38ef67a4, + 0x0b12c4f1, 0x6b22ce3e, 0x1b08f56c, 0x086abeb0, 0x3ce5d12f, 0x36542525, 0x01032679, 0xae4bdce0, + 0x5edff5a8, 0x6951f88b, 0x2b737b59, 0x39b65936, 0x7a4f9653, 0x847e5bd6, 0x54a88eb4, 0x0f671246, + 0xd0be4ee2, 0xc82b1369, 0xaee8c49f, 0x49b9bf4f, 0x67b1416e, 0x5d225902, 0x5adbb041, 0x1c348495, + 0x561e5eea, 0x93c92df0, 0x74229026, 0x29be6d1f, 0x68422294, 0xe0c4ffec, 0xc42fda50, 0xbdb317e1, + 0x9f9970ce, 0x677d2e57, 0xe86fb28c, 0x82a1f5bf, 0x52ca0fcd, 0x17f3f17b, 0x00ae4bd0, 0x398fce10, + 0xffe3626f, 0xa83259ea, 0x680cdff5, 0xca77f8ae, 0x7d3690ae, 0xb0ccacfb, 0xdd0e4820, 0xcce052a1, + 0x75e43df2, 0x44ddbfdf, 0x48e65a97, 0x2e94eca6, 0xffe3626f, 0xa83259ea, 0x680cdff5, 0xca77f8ae, + 0x7d3690ae, 0xb0ccacfb, 0xdd0e4820, 0xcce052a1, 0x75e43df2, 0x44ddbfdf, 0x48e65a97, 0x2e94eca6, + 0xffe3626f, 0xa83259ea, 0x680cdff5, 0xca77f8ae, 0x7d3690ae, 0xb0ccacfb, 0xdd0e4820, 0xcce052a1, + 0x75e43df2, 0x44ddbfdf, 0x48e65a97, 0x2e94eca6, 0xffe3626f, 0xa83259ea, 0x680cdff5, 0xca77f8ae, + 0x7d3690ae, 0xb0ccacfb, 0xdd0e4820, 0xcce052a1, 0x75e43df2, 0x44ddbfdf, 0x48e65a97, 0x2e94eca6, + 0xffe3626f, 0xa83259ea, 0x680cdff5, 0xca77f8ae, 0x7d3690ae, 0xb0ccacfb, 0xdd0e4820, 0xcce052a1, + 0xc198c2ca, 0x535b8a13, 0xac9ba936, 0xb12ac8b9, 0xe2d4710a, 0x4d4c34aa, 0xe76baa54, 0xedc7d177, + 0x5e38a99a, 0x9043013e, 0x5b4317e5, 0xb3b74dcf, 0x5dc45886, 0x3321f51f, 0x9d775998, 0xd4037cc1, + 0x50d0643a, 0x1c1af080, 0x85f66596, 0xebf998e5, 0x53476d41, 0x9d8f4d86, 0x4a871299, 0x765b6080, + 0xa201ace0, 0xd5e844e7, 0x5bd48955, 0x2904f50d, 0x69c4e901, 0x443fe20e, 0x36d1819e, 0xda8bf511, + 0x307a8d65, 0x37ef2564, 0x9fe7b3ed, 0x549bfb20, 0x107ff663, 0x58fe3dbe, 0x0852b56f, 0x6e5ccb5b, + 0xc4aed244, 0x161beb08, 0xf73f410b, 0xa81cca55, 0xaa70cfd4, 0xfc97ac83, 0x6b30fc47, 0x6e139312, + 0x96054812, 0x7116322b, 0x5a5e0e8c, 0x157bdd33, 0xd8c2a802, 0x0f751dd9, 0x7f8d6ded, 0x8bdbfe84, + 0x36481374, 0x6ee65553, 0xc97f5d9d, 0x13e0787a, 0xfd9dc59c, 0xd8e382b1, 0x4377211c, 0xf3a7a5eb, + 0x67c5e79e, 0xe72280f8, 0x0964a98a, 0xc5e7c143, 0x5118edf3, 0xc2c22e89, 0x18a7f3d5, 0xd7b42911, + 0xd6cce106, 0x09396d10, 0x81a0f305, 0x9107138b, 0x05aac19c, 0xb6ba6da0, 0x60140926, 0x29efbab2, + 0x99217c3d, 0x1959f256, 0xa7efc950, 0xcf8074f8, 0x1b17b9fc, 0x6431ea8a, 0xe4b3e571, 0x7444c8f2, + 0xf8b65657, 0xf4aa2058, 0x352ca609, 0x8c46e48e, 0x409a02ba, 0xcc9d3d13, 0x6b707f11, 0x62ceaf50, + 0x3bd4264e, 0x0c823a72, 0x6a652136, 0x28374261, 0xb97f654e, 0x5969831e, 0x50e02f30, 0xfea72a23, + 0x7a85428f, 0x0a0d7731, 0x796d10d1, 0x8ab436d6, 0x6d6aa304, 0x6433cdba, 0x4dc1fbf2, 0xdf65335f, + 0x23887c1d, 0x007b5762, 0x87407bf9, 0x0a04c9d6, 0x9605f788, 0x412a2a8b, 0x886c1f57, 0x13b60dd6, + 0xc7408b74, 0xdb3baa36, 0x1d6cad4b, 0x09a13b4f, 0x58bc1f17, 0x9512bf93, 0x9fb340e5, 0x2d6a18fd, + 0x6cbc9504, 0x7cb7c5ae, 0x19fc0969, 0x6c19bf92, 0xb9db2b72, 0xb9996a3e, 0xc2999889, 0x9728eaf7, + 0x439cd046, 0xd899b866, 0x386286f3, 0x51af4a30, 0x2edcb22a, 0xa2e2597c, 0x17e062e0, 0xf637ffd8, + 0xdbc8e455, 0x40605071, 0xeedd0943, 0x7c3c8e72, 0x7b2d0c19, 0xc05051a7, 0xddb96c21, 0x4c311bed, + 0x0480530f, 0x8e397632, 0x2e17ecea, 0x1d629902, 0xdebdbe34, 0x82cd42f7, 0x1859e693, 0xa4d9ca20, + 0x01763a80, 0xcc3a7d9c, 0xcfcc404c, 0xe04d5ff7, 0x32e4880d, 0x2b97f956, 0xc0f2cb43, 0x9d077356, + 0x32ba3929, 0x9f945f66, 0x04387942, 0x2aebe005, 0x6d44a0d3, 0x14fb2981, 0xb6adfade, 0x5a255875, + 0xa6cd9fdd, 0x8d3382af, 0x11c92d78, 0x0415a7e6, 0x00a98faf, 0x41dd66bf, 0x8fb2c7a0, 0x83e5e7d4, + 0x0106eb06, 0xf76b5b22, 0x2ed00b4b, 0xeb403290, 0x9dd067b0, 0x887ecb74, 0x74744931, 0x83cfcc60, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000001, 0x00000002, 0x00000000, 0x00000000, 0x00003300, 0x00000000, 0x0000de00, 0x00000000, + 0xe4f7f5ef, 0xee6f0434, 0xa742283d, 0xb837517d, 0x49e4d804, 0xd8d4d342, 0xa7c8bf31, 0xe15c25cf, + 0x3acb76c7, 0x6ca73cfc, 0xc5002b4d, 0x9dc59b12, 0x8f11945c, 0xe814622b, 0xc745df38, 0xc179824e, + 0x50ed0464, 0x40f4b2de, 0x4ea86960, 0xacb935cf, 0xf1b2d5f8, 0xa4ba5e6f, 0x90a0a1eb, 0x69da0dfa, + 0x6c9df4ee, 0x8839b01e, 0x66f4aabc, 0xb7f981bd, 0x8a4d967e, 0x56d405c6, 0xf32206c3, 0x99657af8, + 0x6ee38993, 0xcc7f8e9c, 0xf713fc76, 0xb7cd1802, 0xd1c3cd47, 0xf48ce818, 0xdc6df38a, 0x28ad0e5c, + 0x1875327b, 0x90c3b70e, 0x7f15e88c, 0x1b74c593, 0x5ac7d9b0, 0xedd70474, 0xed91afeb, 0x991150af, + 0x48da5e52, 0xe1d0112b, 0x7d36d470, 0x4d7daf15, 0x34a604f5, 0x9eab147d, 0x2dfad689, 0xf464bcb8, + 0xdd03d089, 0x86921b53, 0x9f2aed2c, 0xed4c2dfd, 0xab48a852, 0x28c194b9, 0x2389a853, 0x257bb576, + 0xb5251435, 0xfbab449e, 0xbdb9f479, 0xf2678bcf, 0xda80d6b6, 0xc74fa6cf, 0xfd2a7488, 0x6909c425, + 0x02a16ec5, 0x461d82f0, 0xd5a884a2, 0x4ec34431, 0xc169c889, 0x79d65f21, 0xc2ceb8cb, 0xd39c7ce6, + 0x88c62a3d, 0x831f5ac7, 0x33ee1132, 0x06195cbd, 0x9c372678, 0xb8b0c345, 0x83658b0f, 0x8d2ecce4, + 0xf2d64358, 0xdd67364d, 0x67644a8e, 0x50a87a4d, 0xcd0b0f87, 0xc67f1397, 0x31c5f225, 0x91a4f825, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00010001, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x2cf026f1, 0x3400ca44, 0x2a940a6c, 0x3a85ce16, 0x9aca2a85, 0x13819510, 0x6e0aaeba, 0xb1742a27, + 0x1721de4e, 0x339f3918, 0xbe2f3205, 0x51e4c344, 0x577fe823, 0x92b0329f, 0x7443aa3e, 0x98b71f36, + 0x0f3d1d23, 0x60cf1800, 0x7b94d115, 0xf1bf27af, 0x458b89ed, 0x2eebe4fa, 0xb0438bf8, 0x6c4879ea, + 0x62d74d7f, 0x19640e2a, 0xefb7cdf9, 0xc6aa04ba, 0xa5dcbb8f, 0xfa8142c3, 0x703938dc, 0x35117442, + 0xdf5336a2, 0xa1b5a533, 0xb1544d87, 0x21bfc273, 0x9e5a18f2, 0x043ce72c, 0xca3660bf, 0x6bd94061, + 0x0ad69c2c, 0xcdf5d0e5, 0x124fb288, 0xf6a9af90, 0xf6b9e34e, 0x44b8aa6b, 0x9f6fa7d0, 0x88c571b1, + 0xccd9f95c, 0xdc121aaa, 0xeacf15d9, 0x4e1336bd, 0xacbc5e30, 0x8c4e374d, 0xaa8bc375, 0x26f67ae6, + 0x75a33f24, 0x966c871a, 0xa4ce0cae, 0x7b24b31d, 0x93fc7cea, 0xa8f3f2ce, 0x199a1be6, 0x58c24fa6, + 0x027aa313, 0x85b39a1c, 0xd558c04b, 0x8ca3a97c, 0x7e769b21, 0x93f0e528, 0xfa6d701a, 0x00c7b1f5, + 0x2ef890de, 0xd5dad21b, 0xaf5c0c11, 0x777585ae, 0x57efc20e, 0x102aaa14, 0x285727d6, 0x4fc0575f, + 0xe59129c1, 0x4d335e3a, 0x8cf0b7f8, 0x9a602f3e, 0xb05a515b, 0xf4bdb164, 0x57f94aeb, 0x58558b23, + 0xf71e8185, 0x9dc466e4, 0x55ae607e, 0xaba144de, 0x20f77067, 0xde424f60, 0xdbd7755d, 0x48ec98ec, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x577d3031, 0xdbcce9e5, 0x875ef27b, 0x7df0d67b, 0x211c233a, 0x703b6f8d, 0x836cd31b, 0x6a6286a0, + 0x1c423e74, 0xea669363, 0x16362c50, 0x74492f0a, 0x4685ef82, 0xd4e75c4f, 0x1c5bfba3, 0x31258e1d, + 0xe036f43e, 0x3649bad3, 0x119b1eb2, 0x6a0a305a, 0xe7e3cab7, 0x5728be4f, 0xd15226ec, 0x1ebf9b71, + 0x421cf563, 0x475b473c, 0x865da3aa, 0xc87c6fa1, 0xbaa1487c, 0x477d7a8b, 0x4d4eb6dc, 0x48a56340, + 0x9a1816b2, 0x277b0285, 0x2de13925, 0x8a1505ba, 0x3a6cee1b, 0x3827315e, 0xb4e9a099, 0x0532efc1, + 0x068d503d, 0x7df1e853, 0x2b62c5f3, 0xbc6f3792, 0x188e8c5e, 0x28789515, 0x8bff63cd, 0xf7113b6b, + 0x6966556a, 0xf7b0d1bc, 0x833598e9, 0xdc91e53d, 0x37637356, 0xaf005d67, 0x8bfaebfa, 0x6f6cf626, + 0xe8f44bd2, 0x58a1dcbc, 0xd559ab97, 0x36f10b94, 0xe506f8a6, 0xb51b9a87, 0x5a41657e, 0xd5fceb04, + 0xe347a680, 0xfc189930, 0x77e01a6b, 0xeeee34ae, 0x16ac288f, 0x92eb930e, 0xa3b053a6, 0x0d1212eb, + 0xf4b1adaf, 0x3a69213d, 0x0cfebcdd, 0x99e75d41, 0xcc152f7a, 0x99ce7993, 0x93f2c8b9, 0xd2766c82, + 0x760aa309, 0xd56341bd, 0x14992f22, 0x86616b17, 0x87a9c2e1, 0xf2385b56, 0x4823d497, 0x7d2b347e, + 0x0e53457d, 0xf7795318, 0xdec3c062, 0x65283a27, 0x0b84c304, 0xfee01b13, 0x36bb50a7, 0x8062cdd9, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x3a16e326, 0xba6b6ea3, 0x87a41305, 0x8f25c7a1, 0x33c8446a, 0xc71830a5, 0xa6396ea5, 0xaca6ee44, + 0x58e60046, 0xb8c9d1da, 0x5b48c2ac, 0x7c81985b, 0xa3c7950d, 0x475262b8, 0xc32028c8, 0xcda615d9, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +// Array of APP offsets and sizes +// Defined as follows: +// StructureVersion +// NumApps(N) +// Code Entry Point +// App 0 APP Version +// App 0 Code Offset (In Blob) +// App 0 Code Size +// App 0 Code IMEM Offset +// App 0 is secure (Boolean 1/0) +// App 0 Data Offset (In Blob) +// App 0 Data Size +// App 0 Data DMEM Offset +// App 1 APP Version +// App 1 Code Offset (In Blob) +// App 1 Code Size +// App 1 Code IMEM Offset +// App 1 is secure (Boolean 1/0) +// App 1 Data Offset (In Blob) +// App 1 Data Size +// App 1 Data DMEM Offset +// . . . . +// . . . . +// App N-1 APP Version +// App N-1 Code Offset (In Blob) +// App N-1 Code Size +// App N-1 Code IMEM Offset +// App N-1 is secure (Boolean 1/0) +// App N-1 Data Offset (In Blob) +// App N-1 Data Size +// App N-1 Data DMEM Offset + + +const NvU32 soe_ucode_header_lr10_prd[] = { + /* .version = */ 1, + /* .numApps = */ 3, + /* .codeEntryPoint = */ 56832, + /* .appVersion = */ 1, + /* .appCodeStartOffset = */ 0, + /* .appCodeSize = */ 56832, + /* .appCodeImemOffset = */ 0, + /* .appCodeIsSecure = */ 0, + /* .appDataStartOffset = */ 56832, + /* .appDataSize = */ 13056, + /* .appDataDmemOffset = */ 0, + /* .appVersion = */ 1, + /* .appCodeStartOffset = */ 69888, + /* .appCodeSize = */ 1024, + /* .appCodeImemOffset = */ 56832, + /* .appCodeIsSecure = */ 0, + /* .appDataStartOffset = */ 74240, + /* .appDataSize = */ 7424, + /* .appDataDmemOffset = */ 56832, + /* .appVersion = */ 1, + /* .appCodeStartOffset = */ 70912, + /* .appCodeSize = */ 3328, + /* .appCodeImemOffset = */ 57856, + /* .appCodeIsSecure = */ 1, + /* .appDataStartOffset = */ 81664, + /* .appDataSize = */ 0, + /* .appDataDmemOffset = */ 0, +}; + +const NvU32 soe_ucode_data_size_lr10_prd = 20416; + +#endif //_SOE_UCODE_LR10_PRD_H_ diff --git a/src/common/nvswitch/kernel/inc/soe/haldefs_soe_nvswitch.h b/src/common/nvswitch/kernel/inc/soe/haldefs_soe_nvswitch.h new file mode 100644 index 000000000..8fd9cfd9d --- /dev/null +++ b/src/common/nvswitch/kernel/inc/soe/haldefs_soe_nvswitch.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _HALDEFS_SOE_NVSWITCH_H_ +#define _HALDEFS_SOE_NVSWITCH_H_ + + +#include "nvstatus.h" +#include "flcnifcmn.h" +#include "flcn/haldefs_flcnable_nvswitch.h" + +struct SOE; + +typedef struct { + // needs to be the first thing in this struct so that a soe_hal* can be + // re-interpreted as a flcnable_hal* and vise-versa. + flcnable_hal base; + + //add any hal functions specific to SOE here + NV_STATUS (*processMessages)( + struct nvswitch_device *device, + struct SOE *pSoe); + NV_STATUS (*waitForInitAck)( + struct nvswitch_device *device, + struct SOE *pSoe); + + NvU32 (*service)( + struct nvswitch_device *device, + struct SOE *pSoe); + void (*serviceHalt)( + struct nvswitch_device *device, + struct SOE *pSoe); + void (*ememTransfer)( + struct nvswitch_device *device, + struct SOE *pSoe, + NvU32 dmemAddr, + NvU8 *pBuf, + NvU32 sizeBytes, + NvU8 port, + NvBool bCopyFrom); + NvU32 (*getEmemSize)( + struct nvswitch_device *device, + struct SOE *pSoe); + NvU32 (*getEmemStartOffset)( + struct nvswitch_device *device, + struct SOE *pSoe); + NV_STATUS (*ememPortToRegAddr)( + struct nvswitch_device *device, + struct SOE *pSoe, + NvU32 port, + NvU32 *pEmemCAddr, + NvU32 *pEmemDAddr); + void (*serviceExterr)( + struct nvswitch_device *device, + struct SOE *pSoe); + NV_STATUS (*getExtErrRegAddrs)( + struct nvswitch_device *device, + struct SOE *pSoe, + NvU32 *pExtErrAddr, + NvU32 *pExtErrStat); + NvU32 (*ememPortSizeGet)( + struct nvswitch_device *device, + struct SOE *pSoe); + NvBool (*isCpuHalted)( + struct nvswitch_device *device, + struct SOE *pSoe); + NvlStatus (*testDma)( + struct nvswitch_device *device); + NvlStatus (*setPexEOM)( + struct nvswitch_device *device, + NvU8 mode, + NvU8 nblks, + NvU8 nerrs, + NvU8 berEyeSel); + NvlStatus (*getPexEomStatus)( + struct nvswitch_device *device, + NvU8 mode, + NvU8 nblks, + NvU8 nerrs, + NvU8 berEyeSel, + NvU32 laneMask, + NvU16 *pEomStatus); + NvlStatus (*getUphyDlnCfgSpace)( + struct nvswitch_device *device, + NvU32 regAddress, + NvU32 laneSelectMask, + NvU16 *pRegValue); + NvlStatus (*forceThermalSlowdown)( + struct nvswitch_device *device, + NvBool slowdown, + NvU32 periodUs); + NvlStatus (*setPcieLinkSpeed)( + struct nvswitch_device *device, + NvU32 linkSpeed); +} soe_hal; + +// HAL functions +void soeSetupHal_LR10(struct SOE *pSoe); + +#endif //_HALDEFS_SOE_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/soe/soe_nvswitch.h b/src/common/nvswitch/kernel/inc/soe/soe_nvswitch.h new file mode 100644 index 000000000..95f21ad74 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/soe/soe_nvswitch.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SOE_NVSWITCH_H_ +#define _SOE_NVSWITCH_H_ + +#include "nvlink_errors.h" +#include "nvtypes.h" +#include "nvstatus.h" + +typedef struct SOE SOE, *PSOE; +struct FLCNABLE; +struct nvswitch_device; + +SOE *soeAllocNew(void); +NvlStatus soeInit(struct nvswitch_device *device, PSOE pSoe, NvU32 pci_device_id); +void soeDestroy(struct nvswitch_device *device, PSOE pSoe); + +//HAL functions +NV_STATUS soeProcessMessages (struct nvswitch_device *device, PSOE pSoe); +NV_STATUS soeWaitForInitAck (struct nvswitch_device *device, PSOE pSoe); + + +NvU32 soeService_HAL (struct nvswitch_device *device, PSOE pSoe); +void soeServiceHalt_HAL (struct nvswitch_device *device, PSOE pSoe); +void soeEmemTransfer_HAL (struct nvswitch_device *device, PSOE pSoe, NvU32 dmemAddr, NvU8 *pBuf, NvU32 sizeBytes, NvU8 port, NvBool bCopyFrom); +NvU32 soeGetEmemSize_HAL (struct nvswitch_device *device, PSOE pSoe); +NvU32 soeGetEmemStartOffset_HAL (struct nvswitch_device *device, PSOE pSoe); +NV_STATUS soeEmemPortToRegAddr_HAL (struct nvswitch_device *device, PSOE pSoe, NvU32 port, NvU32 *pEmemCAddr, NvU32 *pEmemDAddr); +void soeServiceExterr_HAL (struct nvswitch_device *device, PSOE pSoe); +NV_STATUS soeGetExtErrRegAddrs_HAL (struct nvswitch_device *device, PSOE pSoe, NvU32 *pExtErrAddr, NvU32 *pExtErrStat); +NvU32 soeEmemPortSizeGet_HAL (struct nvswitch_device *device, PSOE pSoe); +NvBool soeIsCpuHalted_HAL (struct nvswitch_device *device, PSOE pSoe); +NvlStatus soeTestDma_HAL (struct nvswitch_device *device, PSOE pSoe); +NvlStatus soeSetPexEOM_HAL (struct nvswitch_device *device, NvU8 mode, NvU8 nblks, NvU8 nerrs, NvU8 berEyeSel); +NvlStatus soeGetPexEomStatus_HAL (struct nvswitch_device *device, NvU8 mode, NvU8 nblks, NvU8 nerrs, NvU8 berEyeSel, NvU32 laneMask, NvU16 *pEomStatus); +NvlStatus soeGetUphyDlnCfgSpace_HAL (struct nvswitch_device *device, NvU32 regAddress, NvU32 laneSelectMask, NvU16 *pRegValue); +NvlStatus soeForceThermalSlowdown_HAL (struct nvswitch_device *device, NvBool slowdown, NvU32 periodUs); +NvlStatus soeSetPcieLinkSpeed_HAL (struct nvswitch_device *device, NvU32 linkSpeed); + +#endif //_SOE_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/soe/soe_priv_nvswitch.h b/src/common/nvswitch/kernel/inc/soe/soe_priv_nvswitch.h new file mode 100644 index 000000000..2bb172545 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/soe/soe_priv_nvswitch.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SOE_PRIV_NVSWITCH_H_ +#define _SOE_PRIV_NVSWITCH_H_ + +#include "soe/haldefs_soe_nvswitch.h" +#include "soe/soeifcmn.h" + +#include "flcn/flcnqueue_nvswitch.h" +#include "flcn/flcnable_nvswitch.h" + +#define SOE_DMEM_ALIGNMENT (4) + +struct SOE +{ + // needs to be the first thing in this struct so that a PSOE can be + // re-interpreted as a PFLCNABLE and vise-versa. While it is possible + // to remove this restriction by using (&pSoe->parent) instead of a cast, + // 1) the reverse (getting a PSOE from a PFLCNABLE) would be difficult and + // spooky 2) that would force anybody wanting to do the conversion + // to know the layout of an SOE object (not a big deal, but still annoying) + union { + // pointer to our function table - should always be the first thing in any object (including parent) + soe_hal *pHal; + FLCNABLE parent; + } base; + + // Other member variables specific to SOE go here + + /*! + * Structure tracking all information for active and inactive SEC2 sequences. + */ + FLCN_QMGR_SEQ_INFO seqInfo[RM_SOE_MAX_NUM_SEQUENCES]; + + /*! The event descriptor for the Thermal event handler */ + NvU32 thermEvtDesc; +}; + +#endif //_SOE_PRIV_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inc/spi_nvswitch.h b/src/common/nvswitch/kernel/inc/spi_nvswitch.h new file mode 100644 index 000000000..5ddd79ba2 --- /dev/null +++ b/src/common/nvswitch/kernel/inc/spi_nvswitch.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _SPI_NVSWITCH_H_ +#define _SPI_NVSWITCH_H_ + +NvlStatus nvswitch_spi_init(nvswitch_device *); + +#endif //_SPI_NVSWITCH_H_ diff --git a/src/common/nvswitch/kernel/inforom/ifrbbx_nvswitch.c b/src/common/nvswitch/kernel/inforom/ifrbbx_nvswitch.c new file mode 100644 index 000000000..a3eca18d3 --- /dev/null +++ b/src/common/nvswitch/kernel/inforom/ifrbbx_nvswitch.c @@ -0,0 +1,78 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "regkey_nvswitch.h" +#include "nvVer.h" +#include "inforom/inforom_nvswitch.h" + +void +nvswitch_bbx_collect_current_time +( + nvswitch_device *device, + void *pBbxState +) +{ + return; +} + +NvlStatus +nvswitch_inforom_bbx_add_sxid +( + nvswitch_device *device, + NvU32 exceptionType, + NvU32 data0, + NvU32 data1, + NvU32 data2 +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +void +nvswitch_inforom_bbx_unload +( + nvswitch_device *device +) +{ + return; +} + +NvlStatus +nvswitch_inforom_bbx_load +( + nvswitch_device *device +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_inforom_bbx_get_sxid +( + nvswitch_device *device, + NVSWITCH_GET_SXIDS_PARAMS *params +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} diff --git a/src/common/nvswitch/kernel/inforom/ifrecc_nvswitch.c b/src/common/nvswitch/kernel/inforom/ifrecc_nvswitch.c new file mode 100644 index 000000000..c76c06edd --- /dev/null +++ b/src/common/nvswitch/kernel/inforom/ifrecc_nvswitch.c @@ -0,0 +1,267 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "inforom/inforom_nvswitch.h" + +NvlStatus +nvswitch_inforom_ecc_load +( + nvswitch_device *device +) +{ + NvlStatus status; + NvU8 version = 0; + NvU8 subversion = 0; + INFOROM_ECC_STATE *pEccState = NULL; + struct inforom *pInforom = device->pInforom; + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + status = nvswitch_inforom_get_object_version_info(device, "ECC", &version, + &subversion); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, WARN, "no ECC object found, rc:%d\n", status); + return NVL_SUCCESS; + } + + if (!INFOROM_OBJECT_SUBVERSION_SUPPORTS_NVSWITCH(subversion)) + { + NVSWITCH_PRINT(device, WARN, "ECC v%u.%u not supported\n", + version, subversion); + return -NVL_ERR_NOT_SUPPORTED; + } + + NVSWITCH_PRINT(device, INFO, "ECC v%u.%u found\n", version, subversion); + + pEccState = nvswitch_os_malloc(sizeof(INFOROM_ECC_STATE)); + if (pEccState == NULL) + { + return -NVL_NO_MEM; + } + nvswitch_os_memset(pEccState, 0, sizeof(INFOROM_ECC_STATE)); + + switch (version) + { + case 6: + pEccState->pFmt = INFOROM_ECC_OBJECT_V6_S0_FMT; + pEccState->pPackedObject = nvswitch_os_malloc(INFOROM_ECC_OBJECT_V6_S0_PACKED_SIZE); + if (pEccState->pPackedObject == NULL) + { + status = -NVL_NO_MEM; + goto nvswitch_inforom_ecc_version_fail; + } + + pEccState->pEcc = nvswitch_os_malloc(sizeof(INFOROM_ECC_OBJECT)); + if (pEccState->pEcc == NULL) + { + status = -NVL_NO_MEM; + nvswitch_os_free(pEccState->pPackedObject); + goto nvswitch_inforom_ecc_version_fail; + } + + break; + + default: + NVSWITCH_PRINT(device, WARN, "ECC v%u.%u not supported\n", + version, subversion); + goto nvswitch_inforom_ecc_version_fail; + break; + } + + status = nvswitch_inforom_read_object(device, "ECC", pEccState->pFmt, + pEccState->pPackedObject, + pEccState->pEcc); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to read ECC object, rc:%d\n", status); + goto nvswitch_inforom_read_fail; + } + + status = nvswitch_inforom_add_object(pInforom, &pEccState->pEcc->header); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to cache ECC object header, rc:%d\n", + status); + goto nvswitch_inforom_read_fail; + } + + pInforom->pEccState = pEccState; + + // Update shared surface counts, non-fatal if we encounter a failure + status = nvswitch_smbpbi_refresh_ecc_counts(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, WARN, "Failed to update ECC counts on SMBPBI " + "shared surface rc:%d\n", status); + } + + return NVL_SUCCESS; + +nvswitch_inforom_read_fail: + nvswitch_os_free(pEccState->pPackedObject); + nvswitch_os_free(pEccState->pEcc); +nvswitch_inforom_ecc_version_fail: + nvswitch_os_free(pEccState); + + return status; +} + +void +nvswitch_inforom_ecc_unload +( + nvswitch_device *device +) +{ + INFOROM_ECC_STATE *pEccState; + struct inforom *pInforom = device->pInforom; + + if (pInforom == NULL) + { + return; + } + + pEccState = pInforom->pEccState; + if (pEccState == NULL) + { + return; + } + + // + // Flush the data to InfoROM before unloading the object + // Currently the driver doesn't support deferred processing and so the + // error logging path in the interrupt handler cannot defer the flush. + // This is WAR until the driver adds support for deferred processing + // + nvswitch_inforom_ecc_flush(device); + + nvswitch_os_free(pEccState->pPackedObject); + nvswitch_os_free(pEccState->pEcc); + nvswitch_os_free(pEccState); + pInforom->pEccState = NULL; +} + +NvlStatus +nvswitch_inforom_ecc_flush +( + struct nvswitch_device *device +) +{ + NvlStatus status = NVL_SUCCESS; + struct inforom *pInforom = device->pInforom; + INFOROM_ECC_STATE *pEccState; + + if (pInforom == NULL || pInforom->pEccState == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + pEccState = pInforom->pEccState; + + if (pEccState->bDirty) + { + status = nvswitch_inforom_write_object(device, "ECC", + pEccState->pFmt, pEccState->pEcc, + pEccState->pPackedObject); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to flush ECC object to InfoROM, rc: %d\n", status); + } + else + { + pEccState->bDirty = NV_FALSE; + } + } + + return status; +} + +NvlStatus +nvswitch_inforom_ecc_log_err_event +( + struct nvswitch_device *device, + INFOROM_NVS_ECC_ERROR_EVENT *err_event +) +{ + NvlStatus status; + INFOROM_ECC_STATE *pEccState; + NvU64 time_ns; + struct inforom *pInforom = device->pInforom; + + if (pInforom == NULL || pInforom->pEccState == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + if (err_event == NULL) + { + return -NVL_BAD_ARGS; + } + + pEccState = pInforom->pEccState; + + time_ns = nvswitch_os_get_platform_time(); + err_event->timestamp = (NvU32)(time_ns / NVSWITCH_INTERVAL_1SEC_IN_NS); + + // Scrub the incoming address field if it is invalid + if (!(err_event->bAddressValid)) + { + err_event->address = 0; + } + + // Invoke the chip dependent inforom logging routine + status = device->hal.nvswitch_inforom_ecc_log_error_event(device, pEccState->pEcc, + err_event); + if (status == NVL_SUCCESS) + { + // + // If the error was logged successfully, mark the object as dirty to be + // written on the subsequent flush. + // + pEccState->bDirty = NV_TRUE; + } + + return status; +} + +NvlStatus +nvswitch_inforom_ecc_get_errors +( + nvswitch_device *device, + NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *params +) +{ + struct inforom *pInforom = device->pInforom; + + if (pInforom == NULL || pInforom->pEccState == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + return device->hal.nvswitch_inforom_ecc_get_errors(device, params); +} diff --git a/src/common/nvswitch/kernel/inforom/ifrnvlink_nvswitch.c b/src/common/nvswitch/kernel/inforom/ifrnvlink_nvswitch.c new file mode 100644 index 000000000..caea578a2 --- /dev/null +++ b/src/common/nvswitch/kernel/inforom/ifrnvlink_nvswitch.c @@ -0,0 +1,107 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "error_nvswitch.h" + +#include "inforom/inforom_nvswitch.h" + +NvlStatus +nvswitch_inforom_nvlink_flush +( + struct nvswitch_device *device +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_inforom_nvlink_load +( + nvswitch_device *device +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +void +nvswitch_inforom_nvlink_unload +( + nvswitch_device *device +) +{ + return; +} + +NvlStatus +nvswitch_inforom_nvlink_get_minion_data +( + nvswitch_device *device, + NvU8 linkId, + NvU32 *seedData +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_inforom_nvlink_set_minion_data +( + nvswitch_device *device, + NvU8 linkId, + NvU32 *seedData, + NvU32 size +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_inforom_nvlink_log_error_event +( + nvswitch_device *device, + void *error_event +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_inforom_nvlink_get_max_correctable_error_rate +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *params +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_inforom_nvlink_get_errors +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *params +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} diff --git a/src/common/nvswitch/kernel/inforom/ifroms_nvswitch.c b/src/common/nvswitch/kernel/inforom/ifroms_nvswitch.c new file mode 100644 index 000000000..99f65763b --- /dev/null +++ b/src/common/nvswitch/kernel/inforom/ifroms_nvswitch.c @@ -0,0 +1,209 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "error_nvswitch.h" + +#include "inforom/inforom_nvswitch.h" + +NvlStatus +nvswitch_inforom_oms_get_device_disable +( + nvswitch_device *device, + NvBool *pBDisabled +) +{ + struct inforom *pInforom = device->pInforom; + INFOROM_OMS_STATE *pOmsState; + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + pOmsState = pInforom->pOmsState; + if (pOmsState == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + *pBDisabled = device->hal.nvswitch_oms_get_device_disable(pOmsState); + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_inforom_oms_set_device_disable +( + nvswitch_device *device, + NvBool bForceDeviceDisable +) +{ + struct inforom *pInforom = device->pInforom; + INFOROM_OMS_STATE *pOmsState; + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + pOmsState = pInforom->pOmsState; + if (pOmsState == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + device->hal.nvswitch_oms_set_device_disable(pOmsState, bForceDeviceDisable); + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_inforom_oms_load +( + nvswitch_device *device +) +{ + NvlStatus status; + NvU8 version = 0; + NvU8 subversion = 0; + INFOROM_OMS_STATE *pOmsState = NULL; + struct inforom *pInforom = device->pInforom; + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + status = nvswitch_inforom_get_object_version_info(device, "OMS", &version, + &subversion); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, INFO, "no OMS object found, rc:%d\n", status); + return NVL_SUCCESS; + } + + if (!INFOROM_OBJECT_SUBVERSION_SUPPORTS_NVSWITCH(subversion)) + { + NVSWITCH_PRINT(device, WARN, "OMS v%u.%u not supported\n", + version, subversion); + return -NVL_ERR_NOT_SUPPORTED; + } + + NVSWITCH_PRINT(device, INFO, "OMS v%u.%u found\n", version, subversion); + + pOmsState = nvswitch_os_malloc(sizeof(INFOROM_OMS_STATE)); + if (pOmsState == NULL) + { + return -NVL_NO_MEM; + } + nvswitch_os_memset(pOmsState, 0, sizeof(INFOROM_OMS_STATE)); + + switch (version) + { + case 1: + pOmsState->pFmt = INFOROM_OMS_OBJECT_V1S_FMT; + pOmsState->pPackedObject = nvswitch_os_malloc(INFOROM_OMS_OBJECT_V1_PACKED_SIZE); + if (pOmsState->pPackedObject == NULL) + { + status = -NVL_NO_MEM; + goto nvswitch_inforom_oms_version_fail; + } + + pOmsState->pOms = nvswitch_os_malloc(sizeof(INFOROM_OMS_OBJECT)); + if (pOmsState->pOms == NULL) + { + status = -NVL_NO_MEM; + nvswitch_os_free(pOmsState->pPackedObject); + goto nvswitch_inforom_oms_version_fail; + } + + break; + + default: + NVSWITCH_PRINT(device, WARN, "OMS v%u.%u not supported\n", + version, subversion); + goto nvswitch_inforom_oms_version_fail; + break; + } + + status = nvswitch_inforom_load_object(device, pInforom, "OMS", + pOmsState->pFmt, + pOmsState->pPackedObject, + pOmsState->pOms); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to load OMS object, rc: %d\n", + status); + goto nvswitch_inforom_oms_load_fail; + } + + pInforom->pOmsState = pOmsState; + + device->hal.nvswitch_initialize_oms_state(device, pOmsState); + + return NVL_SUCCESS; + +nvswitch_inforom_oms_load_fail: + nvswitch_os_free(pOmsState->pOms); + nvswitch_os_free(pOmsState->pPackedObject); +nvswitch_inforom_oms_version_fail: + nvswitch_os_free(pOmsState); + + return status; +} + +void +nvswitch_inforom_oms_unload +( + nvswitch_device *device +) +{ + struct inforom *pInforom = device->pInforom; + INFOROM_OMS_STATE *pOmsState; + NvlStatus status; + + if (pInforom == NULL) + { + return; + } + + pOmsState = pInforom->pOmsState; + if (pOmsState == NULL) + { + return; + } + + (void)device->hal.nvswitch_read_oob_blacklist_state(device); + status = device->hal.nvswitch_oms_inforom_flush(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "Flushing OMS failed during unload, rc:%d\n", status); + } + + nvswitch_os_free(pOmsState->pPackedObject); + nvswitch_os_free(pOmsState->pOms); + nvswitch_os_free(pOmsState); + pInforom->pOmsState = NULL; +} diff --git a/src/common/nvswitch/kernel/inforom/ifrro_nvswitch.c b/src/common/nvswitch/kernel/inforom/ifrro_nvswitch.c new file mode 100644 index 000000000..636a364e3 --- /dev/null +++ b/src/common/nvswitch/kernel/inforom/ifrro_nvswitch.c @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "inforom/inforom_nvswitch.h" + +NvlStatus +nvswitch_inforom_read_only_objects_load +( + nvswitch_device *device +) +{ + NvlStatus status; + struct inforom *pInforom = device->pInforom; + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + status = nvswitch_inforom_load_object(device, pInforom, "OBD", + INFOROM_OBD_OBJECT_V1_XX_FMT, + pInforom->OBD.packedObject, + &pInforom->OBD.object); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to load OBD object, rc:%d\n", + status); + } + else + { + pInforom->OBD.bValid = NV_TRUE; + } + + status = nvswitch_inforom_load_object(device, pInforom, "OEM", + INFOROM_OEM_OBJECT_V1_00_FMT, + pInforom->OEM.packedObject, + &pInforom->OEM.object); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to load OEM object, rc:%d\n", + status); + } + else + { + pInforom->OEM.bValid = NV_TRUE; + } + + status = nvswitch_inforom_load_object(device, pInforom, "IMG", + INFOROM_IMG_OBJECT_V1_00_FMT, + pInforom->IMG.packedObject, + &pInforom->IMG.object); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to load IMG object, rc:%d\n", + status); + } + else + { + pInforom->IMG.bValid = NV_TRUE; + } + + return NVL_SUCCESS; +} diff --git a/src/common/nvswitch/kernel/inforom/inforom_nvswitch.c b/src/common/nvswitch/kernel/inforom/inforom_nvswitch.c new file mode 100644 index 000000000..4c8933572 --- /dev/null +++ b/src/common/nvswitch/kernel/inforom/inforom_nvswitch.c @@ -0,0 +1,1181 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink_common.h" +#include "common_nvswitch.h" +#include "error_nvswitch.h" +#include "haldef_nvswitch.h" + +#include "inforom/inforom_nvswitch.h" + +#include "soe/soeififr.h" +#include "rmsoecmdif.h" +#include "flcn/flcn_nvswitch.h" +#include "rmflcncmdif_nvswitch.h" + +// Interface functions +static NvlStatus _nvswitch_inforom_unpack_object(const char *, NvU8 *, NvU32 *); +static NvlStatus _nvswitch_inforom_pack_object(const char *, NvU32 *, NvU8 *); +static void _nvswitch_inforom_string_copy(inforom_U008 *pSrc, NvU8 *pDst, NvU32 size); +static NvlStatus _nvswitch_inforom_read_file(nvswitch_device *device, + const char objectName[INFOROM_FS_FILE_NAME_SIZE], + NvU32 packedObjectSize, NvU8 *pPackedObject); +static NvlStatus _nvswitch_inforom_write_file(nvswitch_device *device, + const char objectName[INFOROM_FS_FILE_NAME_SIZE], + NvU32 packedObjectSize, + NvU8 *pPackedObject); + +/*! + * Interface to copy string of inforom object. + * inforom_U008 is NvU32, and we use 0xff bits to store the character. + * Therefore we need a special copy API. + * + * @param[in] pSrc Source pointer + * @param[out] pDst Destination pointer + * @param[in] length Length of the string + */ +static void +_nvswitch_inforom_string_copy +( + inforom_U008 *pSrc, + NvU8 *pDst, + NvU32 length +) +{ + NvU32 i; + + for (i = 0; i < length; ++i) + { + pDst[i] = (NvU8)(pSrc[i] & 0xff); + } +} + +static NvlStatus +_nvswitch_inforom_calc_packed_object_size +( + const char *objectFormat, + NvU16 *pPackedObjectSize +) +{ + NvU16 count; + char type; + NvU16 packedObjectSize = 0; + + while ((type = *objectFormat++) != '\0') + { + count = 0; + while ((type >= '0') && (type <= '9')) + { + count *= 10; + count += (type - '0'); + type = *objectFormat++; + } + count = (count > 0) ? count : 1; + + switch (type) + { + case INFOROM_FMT_S08: + packedObjectSize += count; + break; + case INFOROM_FMT_U04: + if (count % 2) + return -NVL_ERR_INVALID_STATE; + packedObjectSize += (count / 2); + break; + case INFOROM_FMT_U08: + packedObjectSize += count; + break; + case INFOROM_FMT_U16: + packedObjectSize += (count * 2); + break; + case INFOROM_FMT_U24: + packedObjectSize += (count * 3); + break; + case INFOROM_FMT_U32: + packedObjectSize += (count * 4); + break; + case INFOROM_FMT_U64: + packedObjectSize += (count * 8); + break; + case INFOROM_FMT_BINARY: + packedObjectSize += count; + break; + default: + return -NVL_BAD_ARGS; + } + } + + *pPackedObjectSize = packedObjectSize; + + return NVL_SUCCESS; +} + +static NV_INLINE void +_nvswitch_inforom_unpack_uint_field +( + NvU8 **ppPackedObject, + NvU32 **ppObject, + NvU8 width +) +{ + NvU8 i; + NvU64 field = 0; + + if (width > 8) + { + return; + } + + for (i = 0; i < width; i++, (*ppPackedObject)++) + { + field |= (((NvU64)**ppPackedObject) << (8 * i)); + } + + if (width <= 4) + { + **ppObject = (NvU32)field; + (*ppObject)++; + } + else + { + **(NvU64 **)ppObject = field; + *ppObject += 2; + } +} + +static NvlStatus +_nvswitch_inforom_unpack_object +( + const char *objectFormat, + NvU8 *pPackedObject, + NvU32 *pObject +) +{ + NvU16 count; + char type; + NvU64 field; + + while ((type = *objectFormat++) != '\0') + { + count = 0; + while ((type >= '0') && (type <= '9')) + { + count *= 10; + count += (type - '0'); + type = *objectFormat++; + } + count = (count > 0) ? count : 1; + + for (; count > 0; count--) + { + switch (type) + { + case INFOROM_FMT_S08: + field = *pPackedObject++; + field |= ((field & 0x80) ? ~0xff : 0); + *pObject++ = (NvU32)field; + break; + case INFOROM_FMT_U04: + // Extract two nibbles per byte, and adjust count accordingly + if (count % 2) + return -NVL_ERR_INVALID_STATE; + field = *pPackedObject++; + *pObject++ = (NvU32)(field & 0x0f); + *pObject++ = (NvU32)((field & 0xf0) >> 4); + count--; + break; + case INFOROM_FMT_U08: + _nvswitch_inforom_unpack_uint_field(&pPackedObject, &pObject, 1); + break; + case INFOROM_FMT_U16: + _nvswitch_inforom_unpack_uint_field(&pPackedObject, &pObject, 2); + break; + case INFOROM_FMT_U24: + _nvswitch_inforom_unpack_uint_field(&pPackedObject, &pObject, 3); + break; + case INFOROM_FMT_U32: + _nvswitch_inforom_unpack_uint_field(&pPackedObject, &pObject, 4); + break; + case INFOROM_FMT_U64: + _nvswitch_inforom_unpack_uint_field(&pPackedObject, &pObject, 8); + break; + case INFOROM_FMT_BINARY: + nvswitch_os_memcpy(pObject, pPackedObject, count); + pObject += NV_CEIL(count, 4); + pPackedObject += count; + // Adjust count to exit the loop. + count = 1; + break; + default: + return -NVL_BAD_ARGS; + } + } + } + + return NVL_SUCCESS; +} + +static NV_INLINE void +_nvswitch_inforom_pack_uint_field +( + NvU8 **ppPackedObject, + NvU32 **ppObject, + NvU8 width +) +{ + NvU8 i; + NvU64 field = (width <= 4) ? **ppObject : **((NvU64 **)ppObject); + + if (width > 8) + { + return; + } + + for (i = 0; i < width; i++, (*ppPackedObject)++) + { + **ppPackedObject = (NvU8)((field >> (8 * i)) & 0xff); + } + + if (width <= 4) + { + (*ppObject)++; + } + else + { + *ppObject += 2; + } +} + +static NvlStatus +_nvswitch_inforom_pack_object +( + const char *objectFormat, + NvU32 *pObject, + NvU8 *pPackedObject +) +{ + NvU16 count; + char type; + NvU64 field; + + while ((type = *objectFormat++) != '\0') + { + count = 0; + while ((type >= '0') && (type <= '9')) + { + count *= 10; + count += (type - '0'); + type = *objectFormat++; + } + count = (count > 0) ? count : 1; + + for (; count > 0; count--) + { + switch (type) + { + case INFOROM_FMT_S08: + field = *pObject++; + *pPackedObject++ = (NvS8)field; + break; + case INFOROM_FMT_U04: + // Encode two nibbles per byte, and adjust count accordingly + if (count % 2) + return -NVL_ERR_INVALID_STATE; + field = (*pObject++) & 0xf; + field |= (((*pObject++) & 0xf) << 4); + *pPackedObject++ = (NvU8)field; + count--; + break; + case INFOROM_FMT_U08: + _nvswitch_inforom_pack_uint_field(&pPackedObject, &pObject, 1); + break; + case INFOROM_FMT_U16: + _nvswitch_inforom_pack_uint_field(&pPackedObject, &pObject, 2); + break; + case INFOROM_FMT_U24: + _nvswitch_inforom_pack_uint_field(&pPackedObject, &pObject, 3); + break; + case INFOROM_FMT_U32: + _nvswitch_inforom_pack_uint_field(&pPackedObject, &pObject, 4); + break; + case INFOROM_FMT_U64: + _nvswitch_inforom_pack_uint_field(&pPackedObject, &pObject, 8); + break; + case INFOROM_FMT_BINARY: + nvswitch_os_memcpy(pPackedObject, pObject, count); + pObject += NV_CEIL(count, 4); + pPackedObject += count; + // Adjust count to exit the loop. + count = 1; + break; + default: + return -NVL_BAD_ARGS; + } + } + } + + return NVL_SUCCESS; +} + +/*! + * Read and unpack an object from the InfoROM filesystem. + * + * @param[in] device switch device pointer + * @param[in] pInforom INFOROM object pointer + * @param[in] objectName Name of the object to read from the InfoROM + * @param[in] pObjectFormat Ascii-string describing the layout of the + * object to read. Used to calculate the packed + * object size and to unpack the data. + * @param[out] pPackedObject Written with the packed object read from the + * InfoROM. It is assumed that this is large + * enough to hold the packed data size computed + * from the pObjectFormat string. This argument + * cannot be NULL. + * @param[out] pObject Written with the unpacked object read from the + * InfoROM. It is assumed that this is large + * enough to hold the unpacked data size computed + * from the pObjectFormat string. This argument + * may be NULL. + * + * @return NVL_SUCCESS + * Object successfully read, and unpacked if necessary + * @return -NVL_BAD_ARGS + * If one of the required pointer arguments is NULL + * @return -NVL_ERR_NOT_SUPPORTED + * The InfoROM filesystem image is not supported + * @return Other error + * If packed size determination fails, object unpacking fails, or there + * is a filesystem adapter failure in reading any packed data, it may + * result in other error values. + */ + +NvlStatus +nvswitch_inforom_read_object +( + nvswitch_device *device, + const char objectName[3], + const char *pObjectFormat, + NvU8 *pPackedObject, + void *pObject +) +{ + struct inforom *pInforom = device->pInforom; + NvlStatus status; + NvU16 packedSize; + NvU16 fileSize; + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + status = _nvswitch_inforom_calc_packed_object_size(pObjectFormat, &packedSize); + if (status != NVL_SUCCESS) + { + return status; + } + + status = _nvswitch_inforom_read_file(device, objectName, packedSize, pPackedObject); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "InfoROM FS read for %c%c%c failed! rc:%d\n", + objectName[0], objectName[1], objectName[2], status); + return status; + } + + // + // Verify a couple things about the object data: + // 1. The size in the header matches the calculated packed size. + // 2. The type is as it was expected + // + fileSize = INFOROM_FS_FILE_SIZE(pPackedObject); + if (packedSize != fileSize) + { + NVSWITCH_PRINT(device, ERROR, + "NVRM: %s: object %c%c%c was found, but discarded due to " + "a size mismatch! (Expected = 0x%X bytes, Actual = 0x%X " + "bytes)\n", __FUNCTION__, + objectName[0], objectName[1], objectName[2], + packedSize, fileSize); + return -NVL_ERR_INVALID_STATE; + } + + if (!INFOROM_FS_FILE_NAMES_MATCH(pPackedObject, objectName)) + { + NVSWITCH_PRINT(device, ERROR, + "NVRM: %s: object %c%c%c was found, but discarded due to " + "a type mismatch in the header!\n", __FUNCTION__, + objectName[0], objectName[1], objectName[2]); + return -NVL_ERR_INVALID_STATE; + } + + if (pObject != NULL) + { + status = _nvswitch_inforom_unpack_object(pObjectFormat, pPackedObject, pObject); + if (status != NVL_SUCCESS) + { + return status; + } + } + + return status; +} + +static NvlStatus +_nvswitch_inforom_read_file +( + nvswitch_device *device, + const char objectName[INFOROM_FS_FILE_NAME_SIZE], + NvU32 packedObjectSize, + NvU8 *pPackedObject +) +{ + NvlStatus status = NVL_SUCCESS; + void *pDmaBuf; + NvU64 dmaHandle; + NvU32 fsRet; + FLCN *pFlcn = device->pSoe->pFlcn; + RM_FLCN_CMD_SOE soeCmd; + RM_SOE_IFR_CMD *pIfrCmd = &soeCmd.cmd.ifr; + RM_SOE_IFR_CMD_PARAMS *pParams = &pIfrCmd->params; + NvU32 cmdSeqDesc; + NVSWITCH_TIMEOUT timeout; + // The first 4 bytes are reserved for status/debug data from SOE + NvU32 transferSize = packedObjectSize + sizeof(NvU32); + + status = nvswitch_os_alloc_contig_memory(device->os_handle, &pDmaBuf, transferSize, + (device->dma_addr_width == 32)); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to allocate contig memory\n", __FUNCTION__); + return status; + } + + status = nvswitch_os_map_dma_region(device->os_handle, pDmaBuf, &dmaHandle, + transferSize, NVSWITCH_DMA_DIR_TO_SYSMEM); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to map DMA region\n", __FUNCTION__); + goto ifr_dma_free_and_exit; + } + + nvswitch_os_memset(&soeCmd, 0, sizeof(soeCmd)); + soeCmd.hdr.unitId = RM_SOE_UNIT_IFR; + soeCmd.hdr.size = sizeof(soeCmd); + pIfrCmd->cmdType = RM_SOE_IFR_READ; + + RM_FLCN_U64_PACK(&pParams->dmaHandle, &dmaHandle); + nvswitch_os_memcpy(pParams->fileName, objectName, INFOROM_FS_FILE_NAME_SIZE); + pParams->offset = 0; + pParams->sizeInBytes = packedObjectSize; + + //SOE will copy entire file into SYSMEM + nvswitch_os_memset(pDmaBuf, 0, transferSize); + + cmdSeqDesc = 0; + nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS * 100, &timeout); + status = flcnQueueCmdPostBlocking(device, pFlcn, (PRM_FLCN_CMD)&soeCmd, NULL, NULL, + SOE_RM_CMDQ_LOG_ID, &cmdSeqDesc, &timeout); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: DMA transfer failed\n", __FUNCTION__); + goto ifr_dma_unmap_and_exit; + } + + status = nvswitch_os_sync_dma_region_for_cpu(device->os_handle, dmaHandle, + transferSize, + NVSWITCH_DMA_DIR_TO_SYSMEM); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to sync DMA region\n", __FUNCTION__); + goto ifr_dma_unmap_and_exit; + } + + nvswitch_os_memcpy(pPackedObject, (NvU8 *)pDmaBuf + sizeof(NvU32), packedObjectSize); + + fsRet = *(NvU32*)pDmaBuf; + if (fsRet != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: FS error %x. Filename: %3s\n", __FUNCTION__, fsRet, + pParams->fileName); + } + +ifr_dma_unmap_and_exit: + nvswitch_os_unmap_dma_region(device->os_handle, pDmaBuf, dmaHandle, + transferSize, NVSWITCH_DMA_DIR_FROM_SYSMEM); +ifr_dma_free_and_exit: + nvswitch_os_free_contig_memory(device->os_handle, pDmaBuf, transferSize); + + return status; +} + +static NvlStatus +_nvswitch_inforom_write_file +( + nvswitch_device *device, + const char objectName[INFOROM_FS_FILE_NAME_SIZE], + NvU32 packedObjectSize, + NvU8 *pPackedObject +) +{ + NvlStatus status = NVL_SUCCESS; + void *pDmaBuf; + NvU64 dmaHandle; + NvU32 fsRet; + FLCN *pFlcn = device->pSoe->pFlcn; + RM_FLCN_CMD_SOE soeCmd; + RM_SOE_IFR_CMD *pIfrCmd = &soeCmd.cmd.ifr; + RM_SOE_IFR_CMD_PARAMS *pParams = &pIfrCmd->params; + NvU32 cmdSeqDesc; + NVSWITCH_TIMEOUT timeout; + // The first 4 bytes are reserved for status/debug data from SOE + NvU32 transferSize = packedObjectSize + sizeof(NvU32); + + status = nvswitch_os_alloc_contig_memory(device->os_handle, &pDmaBuf, transferSize, + (device->dma_addr_width == 32)); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to allocate contig memory\n", __FUNCTION__); + return status; + } + + status = nvswitch_os_map_dma_region(device->os_handle, pDmaBuf, &dmaHandle, + transferSize, NVSWITCH_DMA_DIR_BIDIRECTIONAL); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to map DMA region\n", __FUNCTION__); + goto ifr_dma_free_and_exit; + } + + nvswitch_os_memset(&soeCmd, 0, sizeof(soeCmd)); + soeCmd.hdr.unitId = RM_SOE_UNIT_IFR; + soeCmd.hdr.size = sizeof(soeCmd); + pIfrCmd->cmdType = RM_SOE_IFR_WRITE; + + RM_FLCN_U64_PACK(&pParams->dmaHandle, &dmaHandle); + nvswitch_os_memcpy(pParams->fileName, objectName, INFOROM_FS_FILE_NAME_SIZE); + pParams->offset = 0; + pParams->sizeInBytes = packedObjectSize; + + //SOE will copy entire file from SYSMEM + nvswitch_os_memset(pDmaBuf, 0, transferSize); + nvswitch_os_memcpy((NvU8 *)pDmaBuf + sizeof(NvU32), pPackedObject, packedObjectSize); + + status = nvswitch_os_sync_dma_region_for_device(device->os_handle, dmaHandle, + transferSize, + NVSWITCH_DMA_DIR_BIDIRECTIONAL); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to sync DMA region\n", __FUNCTION__); + goto ifr_dma_unmap_and_exit; + } + + cmdSeqDesc = 0; + nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS * 100, &timeout); + status = flcnQueueCmdPostBlocking(device, pFlcn, (PRM_FLCN_CMD)&soeCmd, NULL, NULL, + SOE_RM_CMDQ_LOG_ID, &cmdSeqDesc, &timeout); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: DMA transfer failed\n", __FUNCTION__); + goto ifr_dma_unmap_and_exit; + } + + status = nvswitch_os_sync_dma_region_for_cpu(device->os_handle, dmaHandle, + transferSize, + NVSWITCH_DMA_DIR_BIDIRECTIONAL); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to sync DMA region\n", __FUNCTION__); + goto ifr_dma_unmap_and_exit; + } + + fsRet = *(NvU32*)pDmaBuf; + if (fsRet != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: FS returned %x. Filename: %3s\n", __FUNCTION__, fsRet, + pParams->fileName); + } + +ifr_dma_unmap_and_exit: + nvswitch_os_unmap_dma_region(device->os_handle, pDmaBuf, dmaHandle, + packedObjectSize, NVSWITCH_DMA_DIR_FROM_SYSMEM); +ifr_dma_free_and_exit: + nvswitch_os_free_contig_memory(device->os_handle, pDmaBuf, transferSize); + + if (status != NV_OK) + { + return status; + } + + return status; +} + +/*! + * Pack and write an object to the InfoROM filesystem. + * + * @param[in] device switch device pointer + * @param[in] pInforom INFOROM object pointer + * @param[in] objectName Name of the object to write to the InfoROM + * @param[in] pObjectFormat Ascii-string describing the layout of the + * object to write. Used to calculate the + * packed object size and to pack the data. + * @param[in] pObject Contains the unpacked object to write to + * the InfoROM. It is assumed that this is + * large enough to hold the unpacked data + * size computed from the pObjectFormat + * string. This argument may not be NULL. + * @param[in|out] pOldPackedObject As input, contains the old packed data of + * the object, to be used to determine if any + * parts of the write can be avoided. This + * argument may be NULL. + * + * @return NVL_SUCCESS + * If the object data is successfully written + * @return -NVL_BAD_ARGS + * If any of the required pointers are NULL + * @return -NVL_ERR_NOT_SUPPORTED + * If the InfoROM filesystem image is not supported + * @return Other error + * If dynamic memory allocation fails, packed size determination fails, + * object packing fails, or if there is a filesystem adapter failure in + * writing the packed data, it may result in other error values. + */ + +NvlStatus +nvswitch_inforom_write_object +( + nvswitch_device *device, + const char objectName[3], + const char *pObjectFormat, + void *pObject, + NvU8 *pOldPackedObject +) +{ + struct inforom *pInforom = device->pInforom; + NvlStatus status; + NvU8 *pPackedObject; + NvU16 packedObjectSize; + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + status = _nvswitch_inforom_calc_packed_object_size(pObjectFormat, + &packedObjectSize); + if (status != NVL_SUCCESS) + { + return status; + } + + if (packedObjectSize > INFOROM_MAX_PACKED_SIZE) + { + NVSWITCH_ASSERT(packedObjectSize > INFOROM_MAX_PACKED_SIZE); + return -NVL_ERR_INVALID_STATE; + } + + // Allocate a buffer to pack the object into + pPackedObject = nvswitch_os_malloc(packedObjectSize); + if (!pPackedObject) + { + return -NVL_NO_MEM; + } + + status = _nvswitch_inforom_pack_object(pObjectFormat, pObject, pPackedObject); + if (status != NVL_SUCCESS) + { + goto done; + } + + status = _nvswitch_inforom_write_file(device, objectName, packedObjectSize, pPackedObject); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "InfoROM FS write for %c%c%c failed! rc:%d\n", + objectName[0], objectName[1], objectName[2], status); + goto done; + } + +done: + nvswitch_os_free(pPackedObject); + return status; +} + +/*! + * @brief Looks for an object of type pType in the object cache. + */ +static NvlStatus +_nvswitch_inforom_get_cached_object +( + struct inforom *pInforom, + const char *pType, + INFOROM_OBJECT_HEADER_V1_00 **ppHeader +) +{ + struct INFOROM_OBJECT_CACHE_ENTRY *pCacheEntry = pInforom->pObjectCache; + + while (pCacheEntry != NULL) + { + if (INFOROM_FS_FILE_NAMES_MATCH(pType, pCacheEntry->header.type)) + { + *ppHeader = &pCacheEntry->header; + return NVL_SUCCESS; + } + + pCacheEntry = pCacheEntry->pNext; + } + + return -NVL_NOT_FOUND; +} + +/*! + * @brief Adds an object's unpacked header and packed data to the object cache. + * + * @param[in] pInforom INFOROM object pointer + * @param[in] pHeader A pointer to the object's unpacked header + * + * @return NVL_SUCCESS + * If the object information is successfully added to the object cache + * @return Other error + * If dynamic memory allocation of the cache entry fails + */ +NvlStatus nvswitch_inforom_add_object +( + struct inforom *pInforom, + INFOROM_OBJECT_HEADER_V1_00 *pHeader +) +{ + struct INFOROM_OBJECT_CACHE_ENTRY *pCacheEntry = NULL; + + if (!pInforom || !pHeader) + { + return -NVL_ERR_INVALID_STATE; + } + + // Allocate a new cache entry + pCacheEntry = nvswitch_os_malloc(sizeof(struct INFOROM_OBJECT_CACHE_ENTRY)); + if (!pCacheEntry) + { + return -NVL_NO_MEM; + } + + nvswitch_os_memset(pCacheEntry, 0, + sizeof(struct INFOROM_OBJECT_CACHE_ENTRY)); + + nvswitch_os_memcpy(&pCacheEntry->header, pHeader, + sizeof(INFOROM_OBJECT_HEADER_V1_00)); + + pCacheEntry->pNext = pInforom->pObjectCache; + pInforom->pObjectCache = pCacheEntry; + + return NVL_SUCCESS; +} + +/*! + * Get the version/subversion of an object from the Inforom. + * + * @param[in] device switch device pointer + * @param[in] pInforom INFOROM object pointer + * @param[in] objectName The name of the object to get the version info of + * @param[out] pVersion The version of the named object + * @param[out] pSubVersion The subversion of the named object + * + * @return NVL_SUCCESS + * Version information successfully read from the inforom. + * + * @return -NVL_ERR_NOT_SUPPORTED + * The InfoROM filesystem could not be used. + * + * @return Other error + * From @inforomReadObject if the object was not cached and could not be + * read from the filesystem. + */ +NvlStatus +nvswitch_inforom_get_object_version_info +( + nvswitch_device *device, + const char objectName[3], + NvU8 *pVersion, + NvU8 *pSubVersion +) +{ + NvlStatus status = NVL_SUCCESS; + struct inforom *pInforom = device->pInforom; + NvU8 packedHeader[INFOROM_OBJECT_HEADER_V1_00_PACKED_SIZE]; + INFOROM_OBJECT_HEADER_V1_00 *pHeader = NULL; + INFOROM_OBJECT_HEADER_V1_00 header; + NvU8 *pFile; + NvU16 fileSize; + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + pFile = NULL; + + // First, check the cache for the object in question + status = _nvswitch_inforom_get_cached_object(pInforom, objectName, &pHeader); + if (status != NVL_SUCCESS) + { + // + // The object wasn't cached, so we need to read it from + // the filesystem. Since just the header is read, no checksum + // verification is performed. + // + status = _nvswitch_inforom_read_file(device, objectName, + INFOROM_OBJECT_HEADER_V1_00_PACKED_SIZE, packedHeader); + if (status != NVL_SUCCESS) + { + goto done; + } + + // Unpack the header + status = _nvswitch_inforom_unpack_object(INFOROM_OBJECT_HEADER_V1_00_FMT, + packedHeader, (NvU32 *)&header); + if (status != NVL_SUCCESS) + { + goto done; + } + + pHeader = &header; + + // + // Verify that the file is not corrupt, by attempting to read in the + // entire file. We only need to do this for objects that weren't + // cached, as we assume that cached objects were validated when they + // were added to the cache. + // + fileSize = (NvU16)pHeader->size; + if ((fileSize == 0) || (fileSize > INFOROM_MAX_PACKED_SIZE)) + { + status = -NVL_ERR_INVALID_STATE; + goto done; + } + + pFile = nvswitch_os_malloc(fileSize); + if (!pFile) + { + status = -NVL_NO_MEM; + goto done; + } + + status = _nvswitch_inforom_read_file(device, objectName, fileSize, pFile); +done: + if (pFile != NULL) + { + nvswitch_os_free(pFile); + } + } + + if (status == NVL_SUCCESS && pHeader != NULL) + { + *pVersion = (NvU8)(pHeader->version & 0xFF); + *pSubVersion = (NvU8)(pHeader->subversion & 0xFF); + } + + return status; +} + +/*! + * Fill in the static identification data structure for the use by the SOE + * to be passed on to a BMC over the I2CS interface. + * + * @param[in] device switch device pointer + * @param[in] pInforom INFOROM object pointer + * @param[in, out] pData Target data structure pointer (the structure + * must be zero initialized by the caller) + */ +void +nvswitch_inforom_read_static_data +( + nvswitch_device *device, + struct inforom *pInforom, + RM_SOE_SMBPBI_INFOROM_DATA *pData +) +{ +#define _INFOROM_TO_SOE_STRING_COPY(obj, irName, soeName) \ +{ \ + NvU32 _i; \ + ct_assert(NV_ARRAY_ELEMENTS(pInforom->obj.object.irName) <= \ + NV_ARRAY_ELEMENTS(pData->obj.soeName)); \ + for (_i = 0; _i < NV_ARRAY_ELEMENTS(pInforom->obj.object.irName); ++_i) \ + { \ + pData->obj.soeName[_i] = (NvU8)(pInforom->obj.object.irName[_i] & 0xff); \ + } \ + if (NV_ARRAY_ELEMENTS(pInforom->obj.object.irName) < \ + NV_ARRAY_ELEMENTS(pData->obj.soeName)) \ + { \ + do \ + { \ + pData->obj.soeName[_i++] = 0; \ + } \ + while (_i < NV_ARRAY_ELEMENTS(pData->obj.soeName)); \ + } \ +} + + if (pInforom->OBD.bValid) + { + pData->OBD.bValid = NV_TRUE; + pData->OBD.buildDate = (NvU32)pInforom->OBD.object.buildDate; + _nvswitch_inforom_string_copy(pInforom->OBD.object.marketingName, + pData->OBD.marketingName, + NV_ARRAY_ELEMENTS(pData->OBD.marketingName)); + + _nvswitch_inforom_string_copy(pInforom->OBD.object.serialNumber, + pData->OBD.serialNum, + NV_ARRAY_ELEMENTS(pData->OBD.serialNum)); + + // + // boardPartNum requires special handling, as its size exceeds that + // of its InfoROM representation + // + _INFOROM_TO_SOE_STRING_COPY(OBD, productPartNumber, boardPartNum); + } + + if (pInforom->OEM.bValid) + { + pData->OEM.bValid = NV_TRUE; + _nvswitch_inforom_string_copy(pInforom->OEM.object.oemInfo, + pData->OEM.oemInfo, + NV_ARRAY_ELEMENTS(pData->OEM.oemInfo)); + } + + if (pInforom->IMG.bValid) + { + pData->IMG.bValid = NV_TRUE; + _nvswitch_inforom_string_copy(pInforom->IMG.object.version, + pData->IMG.inforomVer, + NV_ARRAY_ELEMENTS(pData->IMG.inforomVer)); + } + +#undef _INFOROM_TO_SOE_STRING_COPY +} + +/*! + * + * Wrapper to read an inforom object into system and cache the header + * + */ +NvlStatus +nvswitch_inforom_load_object +( + nvswitch_device *device, + struct inforom *pInforom, + const char objectName[3], + const char *pObjectFormat, + NvU8 *pPackedObject, + void *pObject +) +{ + NvlStatus status; + + status = nvswitch_inforom_read_object(device, objectName, pObjectFormat, + pPackedObject, pObject); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to read %c%c%c object, rc:%d\n", + objectName[0], objectName[1], objectName[2], status); + return status; + } + + status = nvswitch_inforom_add_object(pInforom, + (INFOROM_OBJECT_HEADER_V1_00 *)pObject); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to cache %c%c%c object header, rc:%d\n", + objectName[0], objectName[1], objectName[2], status); + return status; + } + + return status; +} + +/*! + * @brief Inforom State Initialization + * + * Initializes the filesystem layer of the InfoROM so that InfoROM objects can + * be read. Also load certain InfoROM objects that are neded as early as possible + * in the initialization path (See bug 992278). + * + * @param[in] device switch device pointer + * @param[in/out] pInforom INFOROM object pointer. + * + * @return NVL_SUCCESS + * If the filesystem layer is initialized successfully + * @return -NVL_ERR_NOT_SUPPORTED + * If an adapter could not be set up for the InfoROM image device. + * @return Other error + * From attempting to determine the image device location of the InfoROM + * or constructing a filesystem adapter for the image. + */ +NvlStatus +nvswitch_initialize_inforom +( + nvswitch_device *device +) +{ + struct inforom *pInforom; + + pInforom = nvswitch_os_malloc(sizeof(struct inforom)); + if (!pInforom) + { + return -NVL_NO_MEM; + } + nvswitch_os_memset(pInforom, 0, sizeof(struct inforom)); + + device->pInforom = pInforom; + + return NVL_SUCCESS; +} + +/*! + * @brief Tears down the state of the InfoROM. + * + * This includes tearing down the HAL, the FSAL, and freeing any objects left + * in the object cache. + * + * @param[in] device switch device pointer + * @param[in] pInforom INFOROM object pointer + */ +void +nvswitch_destroy_inforom +( + nvswitch_device *device +) +{ + struct inforom *pInforom = device->pInforom; + struct INFOROM_OBJECT_CACHE_ENTRY *pCacheEntry; + struct INFOROM_OBJECT_CACHE_ENTRY *pTmpCacheEntry; + + if (pInforom) + { + pCacheEntry = pInforom->pObjectCache; + while (pCacheEntry != NULL) + { + pTmpCacheEntry = pCacheEntry; + pCacheEntry = pCacheEntry->pNext; + nvswitch_os_free(pTmpCacheEntry); + } + + nvswitch_os_free(pInforom); + device->pInforom = NULL; + } +} + +void +nvswitch_inforom_post_init +( + nvswitch_device *device +) +{ + return; +} + +NvlStatus +nvswitch_initialize_inforom_objects +( + nvswitch_device *device +) +{ + NvlStatus status; + struct inforom *pInforom = device->pInforom; + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + // RO objects + status = nvswitch_inforom_read_only_objects_load(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, INFO, "Failed to load RO objects, rc:%d\n", + status); + } + + // NVL object + status = nvswitch_inforom_nvlink_load(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, INFO, "Failed to load NVL object, rc:%d\n", + status); + } + + status = nvswitch_inforom_ecc_load(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, INFO, "Failed to load ECC object, rc:%d\n", + status); + } + + status = nvswitch_inforom_oms_load(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, INFO, "Failed to load OMS object, rc:%d\n", + status); + } + + status = nvswitch_inforom_bbx_load(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, INFO, "Failed to load BBX object, rc: %d\n", + status); + } + + status = nvswitch_inforom_dem_load(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, INFO, "Failed to load DEM object, rc: %d\n", + status); + } + + return NVL_SUCCESS; +} + +void +nvswitch_destroy_inforom_objects +( + nvswitch_device *device +) +{ + struct inforom *pInforom = device->pInforom; + + if (pInforom == NULL) + { + return; + } + + // BBX object + nvswitch_inforom_bbx_unload(device); + + // ECC object + nvswitch_inforom_ecc_unload(device); + + // NVL object + nvswitch_inforom_nvlink_unload(device); + + // OMS object + nvswitch_inforom_oms_unload(device); +} diff --git a/src/common/nvswitch/kernel/ipmi/fru_nvswitch.c b/src/common/nvswitch/kernel/ipmi/fru_nvswitch.c new file mode 100644 index 000000000..d6b4f9b28 --- /dev/null +++ b/src/common/nvswitch/kernel/ipmi/fru_nvswitch.c @@ -0,0 +1,158 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ipmi/fru_nvswitch.h" + +#define ASCII_6BIT_TO_8BIT(b) ((b) + 0x20) + +#define OFFSET_SCALE (8) + +static NvU8 +_nvswitch_calculate_checksum +( + NvU8 *data, + NvU32 size +) +{ + NvU32 i; + NvU8 checksum = 0; + + for (i = 0; i < size; ++i) + { + checksum += data[i]; + } + return checksum; +} + +/* + * @brief Retieves from bytes from src and stores into dest. + * + * @return The size of the field including the type/length byte. + */ +static NvU8 +_nvswitch_get_field_bytes +( + NvU8 *pFieldSrc, + NvU8 *pFieldDest +) +{ + NvU32 i; + NvU8 type; + NvU8 length; + NvU8 byte; + + if (*pFieldSrc == NVSWITCH_IPMI_FRU_SENTINEL) + { + return 0; + } + + type = DRF_VAL(SWITCH_IPMI, _FRU_TYPE_LENGTH_BYTE, _TYPE, *pFieldSrc); + length = DRF_VAL(SWITCH_IPMI, _FRU_TYPE_LENGTH_BYTE, _LENGTH, *pFieldSrc); + + pFieldSrc++; + + for (i = 0; i < length; ++i) + { + switch (type) + { + case NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_TYPE_ASCII_6BIT: + byte = ASCII_6BIT_TO_8BIT(pFieldSrc[i]); + break; + case NVSWITCH_IPMI_FRU_TYPE_LENGTH_BYTE_TYPE_ASCII_8BIT: + byte = pFieldSrc[i]; + break; + default: + byte = 0; + break; + } + pFieldDest[i] = byte; + } + + return (length + 1); +} + +/* + * @brief Parse FRU board info from the given rom image. + * + * @return NVL_SUCCESS if board field is valid + */ +NvlStatus +nvswitch_read_partition_fru_board_info +( + nvswitch_device *device, + NVSWITCH_IPMI_FRU_BOARD_INFO *pBoardInfo, + NvU8 *pRomImage +) +{ + NVSWITCH_IPMI_FRU_EEPROM_COMMON_HEADER *pEepromHeader; + NVSWITCH_IPMI_FRU_EEPROM_BOARD_INFO *pEepromBoardInfo; + NvU8 *pInfoSrc; + + if (pBoardInfo == NULL || pRomImage == NULL) + { + return -NVL_ERR_GENERIC; + } + pEepromHeader = (NVSWITCH_IPMI_FRU_EEPROM_COMMON_HEADER *)pRomImage; + + // zero checksum + if (_nvswitch_calculate_checksum((NvU8 *)pEepromHeader, + sizeof(NVSWITCH_IPMI_FRU_EEPROM_COMMON_HEADER)) != 0) + { + NVSWITCH_PRINT(device, SETUP, + "%s: Common header checksum error.\n", __FUNCTION__); + return -NVL_ERR_GENERIC; + } + + pEepromBoardInfo = (NVSWITCH_IPMI_FRU_EEPROM_BOARD_INFO *)(pRomImage + + (pEepromHeader->boardInfoOffset * OFFSET_SCALE)); + + if (_nvswitch_calculate_checksum((NvU8 *)pEepromBoardInfo, + pEepromBoardInfo->size * OFFSET_SCALE) != 0) + { + NVSWITCH_PRINT(device, SETUP, + "%s: Board info checksum error.\n", __FUNCTION__); + return -NVL_ERR_GENERIC; + } + + if (pEepromBoardInfo->version != 0x1 || pEepromBoardInfo->languageCode != 0x0) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + nvswitch_os_memset(pBoardInfo, 0, sizeof(NVSWITCH_IPMI_FRU_BOARD_INFO)); + + pInfoSrc = (NvU8 *)&pEepromBoardInfo->boardInfo; + + // LS byte first + pBoardInfo->mfgDateTime = pInfoSrc[0] | (pInfoSrc[1] << 8) | (pInfoSrc[2] << 16); + pInfoSrc += 3; + + pInfoSrc += _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->mfg); + pInfoSrc += _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->productName); + pInfoSrc += _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->serialNum); + pInfoSrc += _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->partNum); + pInfoSrc += _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->fileId); + _nvswitch_get_field_bytes(pInfoSrc, (NvU8 *)pBoardInfo->customMfgInfo); + + return NVL_SUCCESS; +} diff --git a/src/common/nvswitch/kernel/lr10/clock_lr10.c b/src/common/nvswitch/kernel/lr10/clock_lr10.c new file mode 100644 index 000000000..9ca271ddf --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/clock_lr10.c @@ -0,0 +1,374 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "lr10/lr10.h" +#include "lr10/clock_lr10.h" +#include "lr10/soe_lr10.h" +#include "nvswitch/lr10/dev_soe_ip.h" +#include "nvswitch/lr10/dev_pri_ringstation_sys.h" +#include "nvswitch/lr10/dev_trim.h" +#include "nvswitch/lr10/dev_nvs.h" +#include "nvswitch/lr10/dev_nvlperf_ip.h" +#include "nvswitch/lr10/dev_npgperf_ip.h" +#include "nvswitch/lr10/dev_nvlctrl_ip.h" +#include "nvswitch/lr10/dev_nv_xp.h" +#include "nvswitch/lr10/dev_nv_xve.h" +#include "nvswitch/lr10/dev_nport_ip.h" +#include "nvswitch/lr10/dev_minion_ip.h" +#include "nvswitch/lr10/dev_timer.h" +#include "nvswitch/lr10/dev_pri_ringmaster.h" +#include "nvswitch/lr10/dev_pri_ringstation_prt.h" + +// +// Initialize the software state of the switch PLL +// +NvlStatus +nvswitch_init_pll_config_lr10 +( + nvswitch_device *device +) +{ + NVSWITCH_PLL_LIMITS pll_limits; + NVSWITCH_PLL_INFO pll; + NvlStatus retval = NVL_SUCCESS; + + // + // These parameters could come from schmoo'ing API, settings file or a ROM. + // If no configuration ROM settings are present, use the PLL documentation + // + // Refer to the PLL35G_DYN_PRB_ESD_B2 cell Vbios Table, in the PLL datasheet + // for restrictions on MDIV, NDIV and PLDIV to satisfy the pll's frequency limitation. + // + // PLL35G_DYN_PRB_ESD_B1.doc + // + + pll_limits.ref_min_mhz = 100; + pll_limits.ref_max_mhz = 100; + pll_limits.vco_min_mhz = 1750; + pll_limits.vco_max_mhz = 3800; + pll_limits.update_min_mhz = 13; // 13.5MHz + pll_limits.update_max_mhz = 38; // 38.4MHz + pll_limits.m_min = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_MDIV_MIN; + pll_limits.m_max = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_MDIV_MAX; + pll_limits.n_min = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_NDIV_MIN; + pll_limits.n_max = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_NDIV_MAX; + pll_limits.pl_min = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_PLDIV_MIN; + pll_limits.pl_max = NV_PCLOCK_NVSW_SWITCHPLL_COEFF_PLDIV_MAX; + pll_limits.valid = NV_TRUE; + + // + // set well known coefficients to achieve frequency + // MDIV: need to set > 1 to achieve update_rate < 38.4 MHz + // 100 / 5 = 20 MHz update rate, therefore MDIV = 5 + // NDIV needs to take us all the way to 1640 MHz + // 1640 / 20 = 82. But 100*82/5 < 1.75GHz VCOmin, + // therefore double NDIV to 164 and set PDIV to 2. + // + + pll.src_freq_khz = 100000; // 100MHz + pll.M = 5; + pll.N = 164; + pll.PL = 1; + pll.dist_mode = NV_PCLOCK_NVSW_CLK_DIST_MODE_SWITCH2CLK_DIST_MODE_2XCLK; + pll.refclk_div = 15; + + retval = nvswitch_validate_pll_config(device, &pll, pll_limits); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, WARN, + "Selecting default PLL setting.\n"); + + // Select default, safe clock (1.64GHz) + pll.src_freq_khz = 100000; // 100MHz + pll.M = 5; + pll.N = 164; + pll.PL = 2; + pll.dist_mode = + NV_PCLOCK_NVSW_CLK_DIST_MODE_SWITCH2CLK_DIST_MODE_1XCLK; + pll.refclk_div = NV_PCLOCK_NVSW_RX_BYPASS_REFCLK_DIV_INIT; + + retval = nvswitch_validate_pll_config(device, &pll, pll_limits); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "Default PLL setting failed.\n"); + return retval; + } + } + + device->switch_pll = pll; + + return NVL_SUCCESS; +} + +// +// Check that the PLLs are initialized. VBIOS is expected to configure PLLs +// +NvlStatus +nvswitch_init_pll_lr10 +( + nvswitch_device *device +) +{ + NvU32 pllRegVal; + + // + // Clocks should only be initialized on silicon or a clocks netlist on emulation + // Unfortunately, we don't have a full robust infrastructure for detecting the + // runtime environment as we do on GPU. + // + if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device)) + { + NVSWITCH_PRINT(device, WARN, + "%s: Skipping setup of NVSwitch clocks\n", + __FUNCTION__); + return NVL_SUCCESS; + } + + pllRegVal = NVSWITCH_REG_RD32(device, _PCLOCK, _NVSW_SWITCHPLL_CFG); + if (!FLD_TEST_DRF(_PCLOCK, _NVSW_SWITCHPLL_CFG, _PLL_LOCK, _TRUE, pllRegVal)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: _PLL_LOCK failed\n", + __FUNCTION__); + return -NVL_INITIALIZATION_TOTAL_FAILURE; + } + if (!FLD_TEST_DRF(_PCLOCK, _NVSW_SWITCHPLL_CFG, _PLL_FREQLOCK, _YES, pllRegVal)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: _PLL_FREQLOCK failed\n", + __FUNCTION__); + return -NVL_INITIALIZATION_TOTAL_FAILURE; + } + + pllRegVal = NVSWITCH_REG_RD32(device, _PCLOCK, _NVSW_SWITCHCLK); + if (!FLD_TEST_DRF_NUM(_PCLOCK, _NVSW_SWITCHCLK, _RDY_SWITCHPLL, 1, pllRegVal)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: _RDY_SWITCHPLL failed\n", + __FUNCTION__); + return -NVL_INITIALIZATION_TOTAL_FAILURE; + } + + pllRegVal = NVSWITCH_REG_RD32(device, _PCLOCK, _NVSW_SYSTEMCLK); + if (!FLD_TEST_DRF_NUM(_PCLOCK, _NVSW_SYSTEMCLK, _SYSTEMCLK_RDY_SWITCHPLL, 1, pllRegVal)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: _RDY_SWITCHPLL for SYSTEMCLK failed\n", + __FUNCTION__); + return -NVL_INITIALIZATION_TOTAL_FAILURE; + } + + return NVL_SUCCESS; +} + +// +// Timer functions +// + +void +nvswitch_init_hw_counter_lr10 +( + nvswitch_device *device +) +{ + return; +} + +void +nvswitch_hw_counter_shutdown_lr10 +( + nvswitch_device *device +) +{ + return; +} + +// +// Reads the 36-bit free running counter +// +NvU64 +nvswitch_hw_counter_read_counter_lr10 +( + nvswitch_device *device +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +// +// Initialize clock gating. +// +void +nvswitch_init_clock_gating_lr10 +( + nvswitch_device *device +) +{ + NvU32 regval; + NvU32 i; + + // BUS + NVSWITCH_REG_WR32(device, _PBUS, _EXT_CG1, + DRF_DEF(_PBUS, _EXT_CG1, _SLCG, __PROD) | + DRF_DEF(_PBUS, _EXT_CG1, _SLCG_C11, __PROD) | + DRF_DEF(_PBUS, _EXT_CG1, _SLCG_PRI, __PROD) | + DRF_DEF(_PBUS, _EXT_CG1, _SLCG_UNROLL, __PROD) | + DRF_DEF(_PBUS, _EXT_CG1, _SLCG_ROLL, __PROD) | + DRF_DEF(_PBUS, _EXT_CG1, _SLCG_IFR, __PROD) | + DRF_DEF(_PBUS, _EXT_CG1, _SLCG_PMC, __PROD)); + + // PRI + NVSWITCH_REG_WR32(device, _PPRIV_MASTER, _CG1, + DRF_DEF(_PPRIV_MASTER, _CG1, _SLCG, __PROD)); + + regval = + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _SLOWCLK, __PROD) | + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_CONFIG_REGS, __PROD) | + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_FUNNEL_DECODER, __PROD) | + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_FUNNEL_ARB, __PROD) | + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_HISTORY_BUFFER, __PROD) | + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_MASTER, __PROD) | + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_SLAVE, __PROD) | + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV_UCODE_TRAP, __PROD) | + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PRIV, __PROD) | + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _LOC_PRIV, __PROD) | + DRF_DEF(_PPRIV_PRT, _CG1_SLCG, _PM, __PROD); + + NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT0, _CG1, regval); + NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT1, _CG1, regval); + NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT2, _CG1, regval); + NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT3, _CG1, regval); + NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT4, _CG1, regval); + NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT5, _CG1, regval); + NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT6, _CG1, regval); + NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT7, _CG1, regval); + NVSWITCH_REG_WR32(device, _PPRIV_PRT_PRT8, _CG1, regval); + + // XP3G + NVSWITCH_REG_WR32(device, _XP, _PRI_XP3G_CG, + DRF_DEF(_XP, _PRI_XP3G_CG, _IDLE_CG_DLY_CNT, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _IDLE_CG_EN, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _STATE_CG_EN, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _STALL_CG_DLY_CNT, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _STALL_CG_EN, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _QUIESCENT_CG_EN, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _WAKEUP_DLY_CNT, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _THROT_CLK_CNT, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _DI_DT_SKEW_VAL, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _THROT_CLK_EN, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _THROT_CLK_SW_OVER, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _PAUSE_CG_EN, __PROD) | + DRF_DEF(_XP, _PRI_XP3G_CG, _HALT_CG_EN, __PROD)); + + NVSWITCH_REG_WR32(device, _XP, _PRI_XP3G_CG1, + DRF_DEF(_XP, _PRI_XP3G_CG1, _MONITOR_CG_EN, __PROD)); + + // XVE + NVSWITCH_ENG_WR32_LR10(device, XVE, , 0, _XVE, _PRI_XVE_CG, + DRF_DEF(_XVE, _PRI_XVE_CG, _IDLE_CG_DLY_CNT, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _IDLE_CG_EN, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _STATE_CG_EN, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _STALL_CG_DLY_CNT, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _STALL_CG_EN, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _QUIESCENT_CG_EN, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _WAKEUP_DLY_CNT, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _THROT_CLK_CNT, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _DI_DT_SKEW_VAL, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _THROT_CLK_EN, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _THROT_CLK_SW_OVER, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _PAUSE_CG_EN, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG, _HALT_CG_EN, __PROD)); + + NVSWITCH_ENG_WR32_LR10(device, XVE, , 0, _XVE, _PRI_XVE_CG1, + DRF_DEF(_XVE, _PRI_XVE_CG1, _MONITOR_CG_EN, __PROD) | + DRF_DEF(_XVE, _PRI_XVE_CG1, _SLCG, __PROD)); + + // NPORT + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _CTRL_SLCG, + DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _INGRESS, __PROD) | + DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _ROUTE, __PROD) | + DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _EGRESS, __PROD) | + DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _STRACK, __PROD) | + DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _TAGSTATE, __PROD) | + DRF_DEF(_NPORT, _CTRL_SLCG_DIS_CG, _TREX, __PROD)); + + // NPG_PERFMON + NVSWITCH_BCAST_WR32_LR10(device, NPG_PERFMON, _NPGPERF, _CTRL_CLOCK_GATING, + DRF_DEF(_NPGPERF, _CTRL_CLOCK_GATING, _CG1_SLCG, __PROD)); + + NVSWITCH_BCAST_WR32_LR10(device, NPG_PERFMON, _NPGPERF, _PERF_CTRL_CLOCK_GATING, + DRF_DEF(_NPGPERF, _PERF_CTRL_CLOCK_GATING, _CG1_SLCG, __PROD) | + DRF_DEF(_NPGPERF, _PERF_CTRL_CLOCK_GATING, _CONTEXT_FREEZE, __PROD)); + + // + // NVLW_PERFMON + // + // There registers are protected by PRIV_LEVEL_MASK6. + // PLM6 will not be blown on Production fuses. + // + NVSWITCH_BCAST_WR32_LR10(device, NVLW_PERFMON, _NVLPERF, _CTRL_CLOCK_GATING, + DRF_DEF(_NVLPERF, _CTRL_CLOCK_GATING, _CG1_SLCG, __PROD) | + DRF_DEF(_NVLPERF, _CTRL_CLOCK_GATING, _CG1_SLCG_CTRL, __PROD)); + + NVSWITCH_BCAST_WR32_LR10(device, NVLW_PERFMON, _NVLPERF, _PERF_CTRL_CLOCK_GATING, + DRF_DEF(_NVLPERF, _PERF_CTRL_CLOCK_GATING, _CG1_SLCG, __PROD) | + DRF_DEF(_NVLPERF, _PERF_CTRL_CLOCK_GATING, _CONTEXT_FREEZE, __PROD)); + + // NVLCTRL + NVSWITCH_BCAST_WR32_LR10(device, NVLW, _NVLCTRL, _PLL_PRI_CLOCK_GATING, + DRF_DEF(_NVLCTRL, _PLL_PRI_CLOCK_GATING, _CG1_SLCG, __PROD)); + + // MINION + for (i = 0; i < NVSWITCH_ENG_COUNT(device, MINION, ); i++) + { + regval = NVSWITCH_ENG_RD32_LR10(device, MINION, i, _CMINION_FALCON, _CG2); + + NVSWITCH_ENG_WR32_LR10(device, MINION, , i, _CMINION_FALCON, _CG2, + FLD_SET_DRF(_CMINION_FALCON, _CG2, _SLCG, __PROD, regval)); + } + + // PTIMER + NVSWITCH_REG_WR32(device, _PTIMER, _PRI_TMR_CG1, + DRF_DEF(_PTIMER, _PRI_TMR_CG1, _MONITOR_CG_EN, __PROD) | + DRF_DEF(_PTIMER, _PRI_TMR_CG1, _SLCG, __PROD)); + + // SOE + regval = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE, _FBIF_CG1); + regval = FLD_SET_DRF(_SOE, _FBIF_CG1, _SLCG, __PROD, regval); + NVSWITCH_SOE_WR32_LR10(device, 0, _SOE, _FBIF_CG1, regval); + + regval = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE, _FALCON_CG2); + regval = FLD_SET_DRF(_SOE, _FALCON_CG2, _SLCG, __PROD, regval); + NVSWITCH_SOE_WR32_LR10(device, 0, _SOE_FALCON, _CG2, regval); + + regval = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_MISC, _CG1); + regval = FLD_SET_DRF(_SOE, _MISC_CG1, _SLCG, __PROD, regval); + NVSWITCH_SOE_WR32_LR10(device, 0, _SOE_MISC, _CG1, regval); + + NVSWITCH_SOE_WR32_LR10(device, 0, _SOE_MISC, _TOP_CG, + DRF_DEF(_SOE_MISC, _TOP_CG, _IDLE_CG_DLY_CNT, __PROD)); + + return; +} diff --git a/src/common/nvswitch/kernel/lr10/discovery_lr10.c b/src/common/nvswitch/kernel/lr10/discovery_lr10.c new file mode 100644 index 000000000..f282ba590 --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/discovery_lr10.c @@ -0,0 +1,1470 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "common_nvswitch.h" +#include "lr10/lr10.h" + +#include "nvswitch/lr10/dev_nvs_top.h" +#include "nvswitch/lr10/nvlinkip_discovery.h" +#include "nvswitch/lr10/npgip_discovery.h" +#include "nvswitch/lr10/nxbar_discovery.h" + +#define VERBOSE_MMIO_DISCOVERY 0 + +#define MAKE_DISCOVERY_LR10(device, _chip, _engine) \ + { \ + #_engine, \ + NUM_##_engine##_ENGINE_##_chip, \ + NV_SWPTOP_ENUM_DEVICE_##_engine, \ + chip_device->eng##_engine \ + } + +typedef struct +{ + const char *engname; + NvU32 engcount_max; + NvU32 discovery_id; + ENGINE_DESCRIPTOR_TYPE_LR10 *engine; +} +DISCOVERY_TABLE_TYPE_LR10; + +#define NVSWITCH_DISCOVERY_ENTRY_INVALID 0x0 +#define NVSWITCH_DISCOVERY_ENTRY_ENUM 0x1 +#define NVSWITCH_DISCOVERY_ENTRY_DATA1 0x2 +#define NVSWITCH_DISCOVERY_ENTRY_DATA2 0x3 + +typedef struct +{ + void (*parse_entry)(nvswitch_device *device, NvU32 entry, NvU32 *entry_type, NvBool *entry_chain); + void (*parse_enum)(nvswitch_device *device, NvU32 entry, NvU32 *entry_device, NvU32 *entry_id, NvU32 *entry_version); + void (*handle_data1)(nvswitch_device *device, NvU32 entry, ENGINE_DESCRIPTOR_TYPE_LR10 *engine, NvU32 entry_device, NvU32 *discovery_list_size); + void (*handle_data2)(nvswitch_device *device, NvU32 entry, ENGINE_DESCRIPTOR_TYPE_LR10 *engine, NvU32 entry_device); +} +NVSWITCH_DISCOVERY_HANDLERS_LR10; + +#define DISCOVERY_DUMP_ENGINE_LR10(_device, _engine, _bcast) \ + _discovery_dump_eng_lr10(_device, #_engine, NVSWITCH_GET_CHIP_DEVICE_LR10(_device)->eng##_engine##_bcast, NUM_##_engine##_bcast##_ENGINE_LR10); + +static void +_discovery_dump_eng_lr10 +( + nvswitch_device *device, + const char *eng_name, + ENGINE_DESCRIPTOR_TYPE_LR10 *engine, + NvU32 count +) +{ + NvU32 i; + + if (VERBOSE_MMIO_DISCOVERY) + { + for (i = 0; i < count; i++) + { + if (engine[i].valid) + { + if (engine[i].disc_type == DISCOVERY_TYPE_DISCOVERY) + { + NVSWITCH_PRINT(device, SETUP, + "%-24s[%2d]:V:%1x %s:%6x CL/ID:%2x/%2x\n", + eng_name, i, + engine[i].version, + "DI", + engine[i].info.top.discovery, + engine[i].info.top.cluster, engine[i].info.top.cluster_id); + } + else if (engine[i].disc_type == DISCOVERY_TYPE_UNICAST) + { + NVSWITCH_PRINT(device, SETUP, + "%-24s[%2d]:V:%1x %s:%6x\n", + eng_name, i, + engine[i].version, + "UC", + engine[i].info.uc.uc_addr); + } + else if (engine[i].disc_type == DISCOVERY_TYPE_BROADCAST) + { + NVSWITCH_PRINT(device, SETUP, + "%-24s[%2d]:V:%1x %s:%6x %s:%6x/%6x/%6x\n", + eng_name, i, + engine[i].version, + "BC", + engine[i].info.bc.bc_addr, + "MC", + engine[i].info.bc.mc_addr[0], + engine[i].info.bc.mc_addr[1], + engine[i].info.bc.mc_addr[2]); + } + else + { + NVSWITCH_PRINT(device, SETUP, + "%-24s[%2d]:V:%1x UNDEFINED\n", + eng_name, i, + engine[i].version); + } + } + else + { + NVSWITCH_PRINT(device, SETUP, + "%-24s[%2d]: INVALID\n", + eng_name, i); + } + } + } +} + +static NvlStatus +_nvswitch_device_discovery_lr10 +( + nvswitch_device *device, + NvU32 discovery_offset, + DISCOVERY_TABLE_TYPE_LR10 *discovery_table, + NvU32 discovery_table_size, + NVSWITCH_DISCOVERY_HANDLERS_LR10 *discovery_handlers +) +{ + ENGINE_DESCRIPTOR_TYPE_LR10 *engine = NULL; + NvU32 entry_type = NVSWITCH_DISCOVERY_ENTRY_INVALID; + NvBool entry_chain = NV_FALSE; + NvU32 entry = 0; + NvU32 entry_device = 0; + NvU32 entry_id = 0; + NvU32 entry_version = 0; + NvU32 entry_count = 0; + NvBool done = NV_FALSE; + NvlStatus retval = NVL_SUCCESS; + + // + // Must be at least two entries. We'll fix it up later when we find the length in the table + // + NvU32 discovery_list_size = 2; + + if (VERBOSE_MMIO_DISCOVERY) + { + NVSWITCH_PRINT(device, SETUP, + "%s: NvSwitch Engine discovery table @%x\n", + __FUNCTION__, + discovery_offset); + } + + while ((!done) && (entry_count < discovery_list_size)) + { + entry = NVSWITCH_OFF_RD32(device, discovery_offset); + discovery_handlers->parse_entry(device, entry, &entry_type, &entry_chain); + + switch (entry_type) + { + case NVSWITCH_DISCOVERY_ENTRY_ENUM: + NVSWITCH_ASSERT(engine == NULL); + discovery_handlers->parse_enum(device, entry, &entry_device, &entry_id, &entry_version); + + { + NvU32 i; + + for(i = 0; i < discovery_table_size; i++) + { + if (entry_device == discovery_table[i].discovery_id) + { + if (discovery_table[i].engine == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s:_ENUM: ERROR: %s:device=%x id=%x version=%x not supported!\n", + __FUNCTION__, + discovery_table[i].engname, + entry_device, entry_id, entry_version); + NVSWITCH_ASSERT(0); + continue; + } + + if (entry_id < discovery_table[i].engcount_max) + { + engine = &(discovery_table[i].engine[entry_id]); + break; + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s:_ENUM: ERROR: %s[%d] out of engine range %d..%d\n", + __FUNCTION__, + discovery_table[i].engname, + entry_id, + 0, discovery_table[i].engcount_max-1); + } + } + } + + if (engine == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s:_ENUM: ERROR: device=%x id=%x version=%x not recognized!\n", + __FUNCTION__, + entry_device, entry_id, entry_version); + } + } + + if (engine != NULL) + { + if ((engine->valid == NV_TRUE) && + (engine->disc_type != DISCOVERY_TYPE_DISCOVERY)) + { + NVSWITCH_PRINT(device, WARN, + "%s:_ENUM: WARNING: device=%x id=%x previously discovered!\n", + __FUNCTION__, + entry_device, entry_id); + } + engine->valid = NV_TRUE; + engine->version = entry_version; + } + + break; + + case NVSWITCH_DISCOVERY_ENTRY_DATA1: + discovery_handlers->handle_data1(device, entry, engine, entry_device, &discovery_list_size); + break; + + case NVSWITCH_DISCOVERY_ENTRY_DATA2: + if (engine == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s:DATA2:engine == NULL. Skipping processing\n", + __FUNCTION__); + } + else + { + discovery_handlers->handle_data2(device, entry, engine, entry_device); + } + break; + + default: + NVSWITCH_PRINT(device, ERROR, + "%s:Unknown (%d)\n", + __FUNCTION__, entry_type); + NVSWITCH_ASSERT(0); + // Deliberate fallthrough + case NVSWITCH_DISCOVERY_ENTRY_INVALID: + // Invalid entry. Just ignore it + NVSWITCH_PRINT(device, SETUP, + "%s:_INVALID -- skip 0x%08x\n", + __FUNCTION__, entry); + break; + } + + if (!entry_chain) + { + // End of chain. Close the active engine + engine = NULL; + entry_device = 0; // Mark invalid + entry_id = ~0; + entry_version = ~0; + } + + discovery_offset += sizeof(NvU32); + entry_count++; + } + + if (entry_chain) + { + NVSWITCH_PRINT(device, ERROR, + "%s:Discovery list incorrectly terminated: chain end(%d)\n", + __FUNCTION__, + entry_chain); + NVSWITCH_ASSERT(!entry_chain); + } + + return retval; +} + +static void +_nvswitch_ptop_parse_entry_lr10 +( + nvswitch_device *device, + NvU32 entry, + NvU32 *entry_type, + NvBool *entry_chain +) +{ + NvU32 entry_type_lr10; + + entry_type_lr10 = DRF_VAL(_SWPTOP, _, ENTRY, entry); + *entry_chain = FLD_TEST_DRF(_SWPTOP, _, CHAIN, _ENABLE, entry); + + switch (entry_type_lr10) + { + case NV_SWPTOP_ENTRY_ENUM: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_ENUM; + break; + case NV_SWPTOP_ENTRY_DATA1: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_DATA1; + break; + case NV_SWPTOP_ENTRY_DATA2: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_DATA2; + break; + default: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_INVALID; + break; + } +} + +static void +_nvswitch_ptop_parse_enum_lr10 +( + nvswitch_device *device, + NvU32 entry, + NvU32 *entry_device, + NvU32 *entry_id, + NvU32 *entry_version +) +{ + *entry_device = DRF_VAL(_SWPTOP, _, ENUM_DEVICE, entry); + *entry_id = DRF_VAL(_SWPTOP, _, ENUM_ID, entry); + *entry_version = DRF_VAL(_SWPTOP, _, ENUM_VERSION, entry); +} + +static void +_nvswitch_ptop_handle_data1_lr10 +( + nvswitch_device *device, + NvU32 entry, + ENGINE_DESCRIPTOR_TYPE_LR10 *engine, + NvU32 entry_device, + NvU32 *discovery_list_size +) +{ + if (NV_SWPTOP_ENUM_DEVICE_PTOP == entry_device) + { + *discovery_list_size = DRF_VAL(_SWPTOP, _DATA1, _PTOP_LENGTH, entry); + return; + } + else + { + NVSWITCH_ASSERT(DRF_VAL(_SWPTOP, _DATA1, _RESERVED, entry) == 0); + } + + if (engine == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s:DATA1:engine == NULL. Skipping processing\n", + __FUNCTION__); + return; + } + + engine->info.top.cluster = DRF_VAL(_SWPTOP, _DATA1, _CLUSTER_TYPE, entry); + engine->info.top.cluster_id = DRF_VAL(_SWPTOP, _DATA1, _CLUSTER_NUMBER, entry); + engine->disc_type = DISCOVERY_TYPE_DISCOVERY; +} + +static void +_nvswitch_ptop_handle_data2_lr10 +( + nvswitch_device *device, + NvU32 entry, + ENGINE_DESCRIPTOR_TYPE_LR10 *engine, + NvU32 entry_device +) +{ + NvU32 data2_type = DRF_VAL(_SWPTOP, _DATA2, _TYPE, entry); + NvU32 data2_addr = DRF_VAL(_SWPTOP, _DATA2, _ADDR, entry); + + switch(data2_type) + { + case NV_SWPTOP_DATA2_TYPE_DISCOVERY: + // Parse sub-discovery table + engine->disc_type = DISCOVERY_TYPE_DISCOVERY; + engine->info.top.discovery = data2_addr*sizeof(NvU32); + break; + case NV_SWPTOP_DATA2_TYPE_UNICAST: + engine->disc_type = DISCOVERY_TYPE_UNICAST; + engine->info.uc.uc_addr = data2_addr*sizeof(NvU32); + break; + case NV_SWPTOP_DATA2_TYPE_BROADCAST: + engine->disc_type = DISCOVERY_TYPE_BROADCAST; + engine->info.bc.bc_addr = data2_addr*sizeof(NvU32); + break; + case NV_SWPTOP_DATA2_TYPE_MULTICAST0: + case NV_SWPTOP_DATA2_TYPE_MULTICAST1: + case NV_SWPTOP_DATA2_TYPE_MULTICAST2: + { + NvU32 mc_idx = data2_type - NV_SWPTOP_DATA2_TYPE_MULTICAST0; + engine->disc_type = DISCOVERY_TYPE_BROADCAST; + engine->info.bc.mc_addr[mc_idx] = data2_addr*sizeof(NvU32); + } + break; + case NV_SWPTOP_DATA2_TYPE_INVALID: + NVSWITCH_PRINT(device, SETUP, + "%s:_DATA2: %s=%6x\n", + __FUNCTION__, + "_INVALID", data2_addr); + engine->disc_type = DISCOVERY_TYPE_UNDEFINED; + break; + default: + NVSWITCH_PRINT(device, SETUP, + "%s:_DATA2: Unknown type 0x%x (0x%08x)!\n", + __FUNCTION__, data2_type, entry); + engine->disc_type = DISCOVERY_TYPE_UNDEFINED; + NVSWITCH_ASSERT(0); + break; + } +} + +void +nvswitch_nvlw_parse_entry_lr10 +( + nvswitch_device *device, + NvU32 entry, + NvU32 *entry_type, + NvBool *entry_chain +) +{ + NvU32 entry_type_nvlw; + + entry_type_nvlw = DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON, _ENTRY, entry); + *entry_chain = FLD_TEST_DRF(_NVLINKIP, _DISCOVERY_COMMON, _CHAIN, _ENABLE, entry); + + switch (entry_type_nvlw) + { + case NV_NVLINKIP_DISCOVERY_COMMON_ENTRY_ENUM: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_ENUM; + break; + case NV_NVLINKIP_DISCOVERY_COMMON_ENTRY_DATA1: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_DATA1; + break; + case NV_NVLINKIP_DISCOVERY_COMMON_ENTRY_DATA2: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_DATA2; + break; + default: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_INVALID; + break; + } +} + +void +nvswitch_nvlw_parse_enum_lr10 +( + nvswitch_device *device, + NvU32 entry, + NvU32 *entry_device, + NvU32 *entry_id, + NvU32 *entry_version +) +{ + NvU32 entry_reserved; + + *entry_device = DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON, _DEVICE, entry); + *entry_id = DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON, _ID, entry); + *entry_version = DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON, _VERSION, entry); + + entry_reserved = DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON, _RESERVED, entry); + NVSWITCH_ASSERT(entry_reserved == 0); + + if (*entry_version != NV_NVLINKIP_DISCOVERY_COMMON_VERSION_NVLINK30) + { + NVSWITCH_PRINT(device, ERROR, + "%s:_NVLINKIP, _DISCOVERY_COMMON, _VERSION = %x but expected %x (_NVLINK30).\n", + __FUNCTION__, *entry_version, NV_NVLINKIP_DISCOVERY_COMMON_VERSION_NVLINK30); + } +} + +void +nvswitch_nvlw_handle_data1_lr10 +( + nvswitch_device *device, + NvU32 entry, + ENGINE_DESCRIPTOR_TYPE_LR10 *engine, + NvU32 entry_device, + NvU32 *discovery_list_size +) +{ + if ((NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_IOCTRL == entry_device) || + (NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_SIOCTRL == entry_device) || + (NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_TIOCTRL == entry_device) || + (NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLW == entry_device)) + { + *discovery_list_size = DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON, _DATA1_IOCTRL_LENGTH, entry); + } + + if (engine == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s:DATA1:engine == NULL. Skipping processing\n", + __FUNCTION__); + return; + } + + if ((NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_IOCTRL != entry_device) && + (NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_SIOCTRL != entry_device) && + (NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_TIOCTRL != entry_device) && + (NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_NVLW != entry_device)) + { + // Nothing specific needed to handle + if (0 != DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON, _DATA1_RESERVED, entry)) + { + NVSWITCH_PRINT(device, WARN, + "%s:WARNING:IOCTRL _RESERVED field != 0 (entry %x -> %x)\n", + __FUNCTION__, + entry, DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON, _DATA1_RESERVED, entry)); + } + } + + if (0 != DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON, _DATA1_RESERVED2, entry)) + { + NVSWITCH_PRINT(device, WARN, + "%s:WARNING:IOCTRL _RESERVED2 field != 0 (entry %x -> %x)\n", + __FUNCTION__, + entry, DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON, _DATA1_RESERVED2, entry)); + } +} + +void +nvswitch_nvlw_handle_data2_lr10 +( + nvswitch_device *device, + NvU32 entry, + ENGINE_DESCRIPTOR_TYPE_LR10 *engine, + NvU32 entry_device +) +{ + NvU32 data2_type = DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON_DATA2, _TYPE, entry); + NvU32 data2_addr = DRF_VAL(_NVLINKIP, _DISCOVERY_COMMON_DATA2, _ADDR, entry); + + switch(data2_type) + { + + case NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_DISCOVERY: + // Parse sub-discovery table + + // + // Currently _DISCOVERY is not used in the second + // level discovery. + // + NVSWITCH_ASSERT(0); + + break; + + case NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_UNICAST: + engine->disc_type = DISCOVERY_TYPE_UNICAST; + engine->info.uc.uc_addr = data2_addr*sizeof(NvU32); + break; + + case NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_BROADCAST: + engine->disc_type = DISCOVERY_TYPE_BROADCAST; + engine->info.bc.bc_addr = data2_addr*sizeof(NvU32); + break; + + case NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_MULTICAST0: + case NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_MULTICAST1: + case NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_MULTICAST2: + { + NvU32 mc_idx = data2_type - NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_MULTICAST0; + engine->disc_type = DISCOVERY_TYPE_BROADCAST; + engine->info.bc.mc_addr[mc_idx] = data2_addr*sizeof(NvU32); + } + break; + + case NV_NVLINKIP_DISCOVERY_COMMON_DATA2_TYPE_INVALID: + NVSWITCH_PRINT(device, ERROR, + "%s:_DATA2: %s=%6x\n", + __FUNCTION__, + "_INVALID", data2_addr); + engine->disc_type = DISCOVERY_TYPE_UNDEFINED; + break; + + default: + NVSWITCH_PRINT(device, ERROR, + "%s:_DATA2: Unknown!\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); + break; + } +} + +static void +_nvswitch_npg_parse_entry_lr10 +( + nvswitch_device *device, + NvU32 entry, + NvU32 *entry_type, + NvBool *entry_chain +) +{ + NvU32 entry_type_npg; + + entry_type_npg = DRF_VAL(_NPG, _DISCOVERY, _ENTRY, entry); + *entry_chain = FLD_TEST_DRF(_NPG, _DISCOVERY, _CHAIN, _ENABLE, entry); + + switch (entry_type_npg) + { + case NV_NPG_DISCOVERY_ENTRY_ENUM: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_ENUM; + break; + case NV_NPG_DISCOVERY_ENTRY_DATA1: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_DATA1; + break; + case NV_NPG_DISCOVERY_ENTRY_DATA2: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_DATA2; + break; + default: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_INVALID; + break; + } +} + +static void +_nvswitch_npg_parse_enum_lr10 +( + nvswitch_device *device, + NvU32 entry, + NvU32 *entry_device, + NvU32 *entry_id, + NvU32 *entry_version +) +{ + *entry_device = DRF_VAL(_NPG, _DISCOVERY, _ENUM_DEVICE, entry); + *entry_id = DRF_VAL(_NPG, _DISCOVERY, _ENUM_ID, entry); + *entry_version = DRF_VAL(_NPG, _DISCOVERY, _ENUM_VERSION, entry); + NVSWITCH_ASSERT(DRF_VAL(_NPG, _DISCOVERY, _ENUM_RESERVED, entry) == 0); + + if (*entry_version != NV_NPG_DISCOVERY_ENUM_VERSION_2) + { + NVSWITCH_PRINT(device, ERROR, + "%s:_NPG_DISCOVERY_ENUM_VERSION = %x but expected %x (_VERSION_2).\n", + __FUNCTION__, *entry_version, NV_NPG_DISCOVERY_ENUM_VERSION_2); + } +} + +static void +_nvswitch_npg_handle_data1_lr10 +( + nvswitch_device *device, + NvU32 entry, + ENGINE_DESCRIPTOR_TYPE_LR10 *engine, + NvU32 entry_device, + NvU32 *discovery_list_size +) +{ + if (NV_NPG_DISCOVERY_ENUM_DEVICE_NPG == entry_device) + { + *discovery_list_size = DRF_VAL(_NPG, _DISCOVERY, _DATA1_NPG_LENGTH, entry); + } + else + { + NVSWITCH_ASSERT(0 == DRF_VAL(_NPG, _DISCOVERY, _DATA1_RESERVED, entry)); + } + + if (engine == NULL) + { + NVSWITCH_PRINT(device, SETUP, + "%s:DATA1:engine == NULL. Skipping processing\n", + __FUNCTION__); + return; + } +} + +static void +_nvswitch_npg_handle_data2_lr10 +( + nvswitch_device *device, + NvU32 entry, + ENGINE_DESCRIPTOR_TYPE_LR10 *engine, + NvU32 entry_device +) +{ + NvU32 data2_type = DRF_VAL(_NPG, _DISCOVERY_DATA2, _TYPE, entry); + NvU32 data2_addr = DRF_VAL(_NPG, _DISCOVERY_DATA2, _ADDR, entry); + + switch(data2_type) + { + case NV_NPG_DISCOVERY_DATA2_TYPE_DISCOVERY: + // Parse sub-discovery table + + // + // Currently _DISCOVERY is not used in the second + // level discovery. + // + NVSWITCH_ASSERT(0); + + break; + + case NV_NPG_DISCOVERY_DATA2_TYPE_UNICAST: + engine->disc_type = DISCOVERY_TYPE_UNICAST; + engine->info.uc.uc_addr = data2_addr*sizeof(NvU32); + break; + + case NV_NPG_DISCOVERY_DATA2_TYPE_BROADCAST: + engine->disc_type = DISCOVERY_TYPE_BROADCAST; + engine->info.bc.bc_addr = data2_addr*sizeof(NvU32); + break; + + case NV_NPG_DISCOVERY_DATA2_TYPE_MULTICAST0: + case NV_NPG_DISCOVERY_DATA2_TYPE_MULTICAST1: + case NV_NPG_DISCOVERY_DATA2_TYPE_MULTICAST2: + { + NvU32 mc_idx = data2_type - NV_NPG_DISCOVERY_DATA2_TYPE_MULTICAST0; + engine->disc_type = DISCOVERY_TYPE_BROADCAST; + engine->info.bc.mc_addr[mc_idx] = data2_addr*sizeof(NvU32); + } + break; + + case NV_NPG_DISCOVERY_DATA2_TYPE_INVALID: + NVSWITCH_PRINT(device, SETUP, + "%s:_DATA2: %s=%6x\n", + __FUNCTION__, + "_INVALID", data2_addr); + engine->disc_type = DISCOVERY_TYPE_UNDEFINED; + break; + + default: + NVSWITCH_PRINT(device, SETUP, + "%s:_DATA2: Unknown!\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); + break; + } +} + +void +nvswitch_nxbar_parse_entry_lr10 +( + nvswitch_device *device, + NvU32 entry, + NvU32 *entry_type, + NvBool *entry_chain +) +{ + NvU32 entry_type_nxbar; + + entry_type_nxbar = DRF_VAL(_NXBAR, _DISCOVERY, _ENTRY, entry); + *entry_chain = FLD_TEST_DRF(_NXBAR, _DISCOVERY, _CHAIN, _ENABLE, entry); + + switch (entry_type_nxbar) + { + case NV_NXBAR_DISCOVERY_ENTRY_ENUM: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_ENUM; + break; + case NV_NXBAR_DISCOVERY_ENTRY_DATA1: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_DATA1; + break; + case NV_NXBAR_DISCOVERY_ENTRY_DATA2: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_DATA2; + break; + default: + *entry_type = NVSWITCH_DISCOVERY_ENTRY_INVALID; + break; + } +} + +void +nvswitch_nxbar_parse_enum_lr10 +( + nvswitch_device *device, + NvU32 entry, + NvU32 *entry_device, + NvU32 *entry_id, + NvU32 *entry_version +) +{ + NvU32 entry_reserved; + + *entry_device = DRF_VAL(_NXBAR, _DISCOVERY, _ENUM_DEVICE, entry); + *entry_id = DRF_VAL(_NXBAR, _DISCOVERY, _ENUM_ID, entry); + *entry_version = DRF_VAL(_NXBAR, _DISCOVERY, _ENUM_VERSION, entry); + + entry_reserved = DRF_VAL(_NXBAR, _DISCOVERY, _ENUM_RESERVED, entry); + NVSWITCH_ASSERT(entry_reserved == 0); + + if (*entry_version != NV_NXBAR_DISCOVERY_ENUM_VERSION_2) + { + NVSWITCH_PRINT(device, ERROR, + "%s:_NXBAR_DISCOVERY_ENUM_VERSION = %x but expected %x (_VERSION_2).\n", + __FUNCTION__, *entry_version, NV_NXBAR_DISCOVERY_ENUM_VERSION_2); + } +} + +void +nvswitch_nxbar_handle_data1_lr10 +( + nvswitch_device *device, + NvU32 entry, + ENGINE_DESCRIPTOR_TYPE_LR10 *engine, + NvU32 entry_device, + NvU32 *discovery_list_size +) +{ + if (NV_NXBAR_DISCOVERY_ENUM_DEVICE_NXBAR == entry_device) + { + *discovery_list_size = DRF_VAL(_NXBAR, _DISCOVERY, _DATA1_NXBAR_LENGTH, entry); + } + + if (engine == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s:DATA1:engine == NULL. Skipping processing\n", + __FUNCTION__); + return; + } + + if (NV_NXBAR_DISCOVERY_ENUM_DEVICE_NXBAR != entry_device) + { + NVSWITCH_ASSERT(DRF_VAL(_NXBAR, _DISCOVERY, _DATA1_RESERVED, entry) == 0); + } +} + +void +nvswitch_nxbar_handle_data2_lr10 +( + nvswitch_device *device, + NvU32 entry, + ENGINE_DESCRIPTOR_TYPE_LR10 *engine, + NvU32 entry_device +) +{ + NvU32 data2_type = DRF_VAL(_NXBAR, _DISCOVERY_DATA2, _TYPE, entry); + NvU32 data2_addr = DRF_VAL(_NXBAR, _DISCOVERY_DATA2, _ADDR, entry); + + switch(data2_type) + { + case NV_NXBAR_DISCOVERY_DATA2_TYPE_DISCOVERY: + // Parse sub-discovery table + + // + // Currently _DISCOVERY is not used in the second + // level discovery. + // + NVSWITCH_ASSERT(0); + + break; + + case NV_NXBAR_DISCOVERY_DATA2_TYPE_UNICAST: + engine->disc_type = DISCOVERY_TYPE_UNICAST; + engine->info.uc.uc_addr = data2_addr*sizeof(NvU32); + break; + + case NV_NXBAR_DISCOVERY_DATA2_TYPE_BROADCAST: + engine->disc_type = DISCOVERY_TYPE_BROADCAST; + engine->info.bc.bc_addr = data2_addr*sizeof(NvU32); + engine->info.bc.mc_addr[0] = 0; + engine->info.bc.mc_addr[1] = 0; + engine->info.bc.mc_addr[2] = 0; + break; + + case NV_NXBAR_DISCOVERY_DATA2_TYPE_MULTICAST0: + case NV_NXBAR_DISCOVERY_DATA2_TYPE_MULTICAST1: + case NV_NXBAR_DISCOVERY_DATA2_TYPE_MULTICAST2: + { + NvU32 mc_idx = data2_type - NV_NXBAR_DISCOVERY_DATA2_TYPE_MULTICAST0; + engine->info.bc.mc_addr[mc_idx] = data2_addr*sizeof(NvU32); + NVSWITCH_PRINT(device, ERROR, + "%s:_DATA2: NXBAR MULTICAST%d=0x%x but should be unused!\n", + __FUNCTION__, mc_idx, engine->info.bc.mc_addr[mc_idx]); + NVSWITCH_ASSERT(0); + } + break; + + case NV_NXBAR_DISCOVERY_DATA2_TYPE_INVALID: + NVSWITCH_PRINT(device, ERROR, + "%s:_DATA2: %s=%6x\n", + __FUNCTION__, + "_INVALID", data2_addr); + engine->disc_type = DISCOVERY_TYPE_UNDEFINED; + break; + + default: + NVSWITCH_PRINT(device, ERROR, + "%s:_DATA2: Unknown!\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); + break; + } +} + +#define MAKE_DISCOVERY_NVLINK_LR10(_eng, _bcast) \ + { \ + #_eng#_bcast, \ + NUM_##_eng##_bcast##_ENGINE_LR10, \ + NV_NVLINKIP_DISCOVERY_COMMON_DEVICE_##_eng, \ + chip_device->eng##_eng##_bcast \ + } + +#define MAKE_DISCOVERY_NPG_LR10(_eng, _bcast) \ + { \ + #_eng#_bcast, \ + NUM_##_eng##_bcast##_ENGINE_LR10, \ + NV_NPG_DISCOVERY_ENUM_DEVICE_##_eng, \ + chip_device->eng##_eng##_bcast \ + } + +#define MAKE_DISCOVERY_NXBAR_LR10(_eng, _bcast) \ + { \ + #_eng#_bcast, \ + NUM_##_eng##_bcast##_ENGINE_LR10, \ + NV_NXBAR_DISCOVERY_ENUM_DEVICE_##_eng, \ + chip_device->eng##_eng##_bcast \ + } + +static +NVSWITCH_DISCOVERY_HANDLERS_LR10 discovery_handlers_ptop_lr10 = +{ + &_nvswitch_ptop_parse_entry_lr10, + &_nvswitch_ptop_parse_enum_lr10, + &_nvswitch_ptop_handle_data1_lr10, + &_nvswitch_ptop_handle_data2_lr10 +}; + +static +NVSWITCH_DISCOVERY_HANDLERS_LR10 discovery_handlers_npg_lr10 = +{ + &_nvswitch_npg_parse_entry_lr10, + &_nvswitch_npg_parse_enum_lr10, + &_nvswitch_npg_handle_data1_lr10, + &_nvswitch_npg_handle_data2_lr10 +}; + +static +NVSWITCH_DISCOVERY_HANDLERS_LR10 discovery_handlers_nvlw_lr10 = +{ + &nvswitch_nvlw_parse_entry_lr10, + &nvswitch_nvlw_parse_enum_lr10, + &nvswitch_nvlw_handle_data1_lr10, + &nvswitch_nvlw_handle_data2_lr10 +}; + +static +NVSWITCH_DISCOVERY_HANDLERS_LR10 discovery_handlers_nxbar_lr10 = +{ + &nvswitch_nxbar_parse_entry_lr10, + &nvswitch_nxbar_parse_enum_lr10, + &nvswitch_nxbar_handle_data1_lr10, + &nvswitch_nxbar_handle_data2_lr10 +}; + +// +// Parse top level PTOP engine discovery information to identify MMIO, interrupt, and +// reset information +// + +NvlStatus +nvswitch_device_discovery_lr10 +( + nvswitch_device *device, + NvU32 discovery_offset +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + + DISCOVERY_TABLE_TYPE_LR10 discovery_table_lr10[] = + { + MAKE_DISCOVERY_LR10(device, LR10, PTOP), + MAKE_DISCOVERY_LR10(device, LR10, NPG), + MAKE_DISCOVERY_LR10(device, LR10, NPG_BCAST), + MAKE_DISCOVERY_LR10(device, LR10, CLKS), + MAKE_DISCOVERY_LR10(device, LR10, FUSE), + MAKE_DISCOVERY_LR10(device, LR10, JTAG), + MAKE_DISCOVERY_LR10(device, LR10, PMGR), + MAKE_DISCOVERY_LR10(device, LR10, SAW), + MAKE_DISCOVERY_LR10(device, LR10, XP3G), + MAKE_DISCOVERY_LR10(device, LR10, XVE), + MAKE_DISCOVERY_LR10(device, LR10, ROM), + MAKE_DISCOVERY_LR10(device, LR10, EXTDEV), + MAKE_DISCOVERY_LR10(device, LR10, PRIVMAIN), + MAKE_DISCOVERY_LR10(device, LR10, PRIVLOC), + MAKE_DISCOVERY_LR10(device, LR10, PTIMER), + MAKE_DISCOVERY_LR10(device, LR10, SOE), + MAKE_DISCOVERY_LR10(device, LR10, SMR), + MAKE_DISCOVERY_LR10(device, LR10, I2C), + MAKE_DISCOVERY_LR10(device, LR10, SE), + MAKE_DISCOVERY_LR10(device, LR10, NVLW), + MAKE_DISCOVERY_LR10(device, LR10, NVLW_BCAST), + MAKE_DISCOVERY_LR10(device, LR10, NXBAR), + MAKE_DISCOVERY_LR10(device, LR10, NXBAR_BCAST), + MAKE_DISCOVERY_LR10(device, LR10, THERM) + }; + NvU32 discovery_table_lr10_size = NV_ARRAY_ELEMENTS(discovery_table_lr10); + NvU32 i; + NvlStatus status; + + status = _nvswitch_device_discovery_lr10( + device, discovery_offset, discovery_table_lr10, discovery_table_lr10_size, + &discovery_handlers_ptop_lr10); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "MMIO discovery failed\n"); + return status; + } + + if (VERBOSE_MMIO_DISCOVERY) + { + NVSWITCH_PRINT(device, SETUP, + "PTOP Discovery\n"); + + DISCOVERY_DUMP_ENGINE_LR10(device, PTOP, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NPG, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NPG, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, CLKS, ); + DISCOVERY_DUMP_ENGINE_LR10(device, FUSE, ); + DISCOVERY_DUMP_ENGINE_LR10(device, JTAG, ); + DISCOVERY_DUMP_ENGINE_LR10(device, PMGR, ); + DISCOVERY_DUMP_ENGINE_LR10(device, SAW, ); + DISCOVERY_DUMP_ENGINE_LR10(device, XP3G, ); + DISCOVERY_DUMP_ENGINE_LR10(device, XVE, ); + DISCOVERY_DUMP_ENGINE_LR10(device, ROM, ); + DISCOVERY_DUMP_ENGINE_LR10(device, EXTDEV, ); + DISCOVERY_DUMP_ENGINE_LR10(device, PRIVMAIN, ); + DISCOVERY_DUMP_ENGINE_LR10(device, PRIVLOC, ); + DISCOVERY_DUMP_ENGINE_LR10(device, PTIMER, ); + DISCOVERY_DUMP_ENGINE_LR10(device, SOE, ); + DISCOVERY_DUMP_ENGINE_LR10(device, SMR, ); + DISCOVERY_DUMP_ENGINE_LR10(device, I2C, ); + DISCOVERY_DUMP_ENGINE_LR10(device, SE, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLW, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLW, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NXBAR, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NXBAR, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, THERM, ); + } + + for (i = 0; i < NUM_NVLW_ENGINE_LR10; i++) + { + if (NVSWITCH_ENG_VALID_LR10(device, NVLW, i) && + (chip_device->engNVLW[i].info.top.discovery != 0)) + { + DISCOVERY_TABLE_TYPE_LR10 discovery_table_nvlw[] = + { + MAKE_DISCOVERY_NVLINK_LR10(MINION, ), + MAKE_DISCOVERY_NVLINK_LR10(NVLIPT, ), + MAKE_DISCOVERY_NVLINK_LR10(TX_PERFMON, ), + MAKE_DISCOVERY_NVLINK_LR10(RX_PERFMON, ), + MAKE_DISCOVERY_NVLINK_LR10(TX_PERFMON_MULTICAST, ), + MAKE_DISCOVERY_NVLINK_LR10(RX_PERFMON_MULTICAST, ), + MAKE_DISCOVERY_NVLINK_LR10(NVLTLC, ), + MAKE_DISCOVERY_NVLINK_LR10(NVLTLC_MULTICAST, ), + MAKE_DISCOVERY_NVLINK_LR10(NVLIPT_SYS_PERFMON, ), + MAKE_DISCOVERY_NVLINK_LR10(NVLW, ), + MAKE_DISCOVERY_NVLINK_LR10(PLL, ), + MAKE_DISCOVERY_NVLINK_LR10(NVLW_PERFMON, ), + MAKE_DISCOVERY_NVLINK_LR10(NVLDL_MULTICAST, ), + MAKE_DISCOVERY_NVLINK_LR10(NVLIPT_LNK_MULTICAST, ), + MAKE_DISCOVERY_NVLINK_LR10(SYS_PERFMON_MULTICAST, ), + MAKE_DISCOVERY_NVLINK_LR10(NVLDL, ), + MAKE_DISCOVERY_NVLINK_LR10(NVLIPT_LNK, ), + MAKE_DISCOVERY_NVLINK_LR10(SYS_PERFMON, ) + }; + NvU32 discovery_table_nvlw_size = NV_ARRAY_ELEMENTS(discovery_table_nvlw); + + status = _nvswitch_device_discovery_lr10( + device, chip_device->engNVLW[i].info.top.discovery, discovery_table_nvlw, + discovery_table_nvlw_size, &discovery_handlers_nvlw_lr10); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "NVLW[%d] discovery failed\n", i); + return status; + } + } + } + if (VERBOSE_MMIO_DISCOVERY) + { + NVSWITCH_PRINT(device, SETUP, + "NVLW[0..%d] Discovery\n", + NUM_NVLW_ENGINE_LR10-1); + DISCOVERY_DUMP_ENGINE_LR10(device, MINION, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLIPT, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLTLC, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLTLC_MULTICAST, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLIPT_SYS_PERFMON, ); + DISCOVERY_DUMP_ENGINE_LR10(device, TX_PERFMON_MULTICAST, ); + DISCOVERY_DUMP_ENGINE_LR10(device, RX_PERFMON_MULTICAST, ); + DISCOVERY_DUMP_ENGINE_LR10(device, TX_PERFMON, ); + DISCOVERY_DUMP_ENGINE_LR10(device, RX_PERFMON, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLW, ); + DISCOVERY_DUMP_ENGINE_LR10(device, PLL, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLW_PERFMON, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLDL_MULTICAST, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLIPT_LNK_MULTICAST, ); + DISCOVERY_DUMP_ENGINE_LR10(device, SYS_PERFMON_MULTICAST, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLDL, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLIPT_LNK, ); + DISCOVERY_DUMP_ENGINE_LR10(device, SYS_PERFMON, ); + } + + for (i = 0; i < NUM_NVLW_BCAST_ENGINE_LR10; i++) + { + if (NVSWITCH_ENG_VALID_LR10(device, NVLW_BCAST, i) && + (chip_device->engNVLW_BCAST[i].info.top.discovery != 0)) + { + DISCOVERY_TABLE_TYPE_LR10 discovery_table_nvlw[] = + { + MAKE_DISCOVERY_NVLINK_LR10(MINION, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(NVLIPT, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(NVLTLC, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(NVLTLC_MULTICAST, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(NVLIPT_SYS_PERFMON, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(TX_PERFMON_MULTICAST, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(RX_PERFMON_MULTICAST, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(TX_PERFMON, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(RX_PERFMON, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(NVLW, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(PLL, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(NVLW_PERFMON, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(NVLDL_MULTICAST, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(NVLIPT_LNK_MULTICAST, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(SYS_PERFMON_MULTICAST, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(NVLDL, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(NVLIPT_LNK, _BCAST), + MAKE_DISCOVERY_NVLINK_LR10(SYS_PERFMON, _BCAST) + }; + NvU32 discovery_table_nvlw_size = NV_ARRAY_ELEMENTS(discovery_table_nvlw); + + status = _nvswitch_device_discovery_lr10( + device, chip_device->engNVLW_BCAST[i].info.top.discovery, discovery_table_nvlw, + discovery_table_nvlw_size, &discovery_handlers_nvlw_lr10); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "NVLW_BCAST[%d] discovery failed\n", i); + return status; + } + } + } + if (VERBOSE_MMIO_DISCOVERY) + { + NVSWITCH_PRINT(device, SETUP, + "NVLW_BCAST[0..%d] Discovery\n", + NUM_NVLW_BCAST_ENGINE_LR10-1); + DISCOVERY_DUMP_ENGINE_LR10(device, MINION, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLIPT, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLTLC, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLTLC_MULTICAST, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLIPT_SYS_PERFMON, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, TX_PERFMON_MULTICAST, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, RX_PERFMON_MULTICAST, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, TX_PERFMON, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, RX_PERFMON, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLW, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, PLL, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLW_PERFMON, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLDL_MULTICAST, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLIPT_LNK_MULTICAST, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, SYS_PERFMON_MULTICAST, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLDL, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NVLIPT_LNK, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, SYS_PERFMON, _BCAST); + } + + for (i = 0; i < NUM_NPG_ENGINE_LR10; i++) + { + if (NVSWITCH_ENG_VALID_LR10(device, NPG, i) && + (chip_device->engNPG[i].info.top.discovery != 0)) + { + DISCOVERY_TABLE_TYPE_LR10 discovery_table_npg[] = + { + MAKE_DISCOVERY_NPG_LR10(NPG, ), + MAKE_DISCOVERY_NPG_LR10(NPORT, ), + MAKE_DISCOVERY_NPG_LR10(NPORT_MULTICAST, ), + MAKE_DISCOVERY_NPG_LR10(NPG_PERFMON, ), + MAKE_DISCOVERY_NPG_LR10(NPORT_PERFMON, ), + MAKE_DISCOVERY_NPG_LR10(NPORT_PERFMON_MULTICAST, ) + }; + NvU32 discovery_table_npg_size = NV_ARRAY_ELEMENTS(discovery_table_npg); + + status = _nvswitch_device_discovery_lr10( + device, chip_device->engNPG[i].info.top.discovery, discovery_table_npg, + discovery_table_npg_size, &discovery_handlers_npg_lr10); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "NPG[%d] discovery failed\n", i); + return status; + } + } + } + if (VERBOSE_MMIO_DISCOVERY) + { + NVSWITCH_PRINT(device, SETUP, + "NPG[0..%d] Discovery\n", + NUM_NPG_ENGINE_LR10-1); + DISCOVERY_DUMP_ENGINE_LR10(device, NPG, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NPORT, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NPORT_MULTICAST, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NPG_PERFMON, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NPORT_PERFMON, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NPORT_PERFMON_MULTICAST, ); + } + + for (i = 0; i < NUM_NPG_BCAST_ENGINE_LR10; i++) + { + if (NVSWITCH_ENG_VALID_LR10(device, NPG_BCAST, i) && + (chip_device->engNPG_BCAST[i].info.top.discovery != 0)) + { + DISCOVERY_TABLE_TYPE_LR10 discovery_table_npg[] = + { + MAKE_DISCOVERY_NPG_LR10(NPG, _BCAST), + MAKE_DISCOVERY_NPG_LR10(NPORT, _BCAST), + MAKE_DISCOVERY_NPG_LR10(NPORT_MULTICAST, _BCAST), + MAKE_DISCOVERY_NPG_LR10(NPG_PERFMON, _BCAST), + MAKE_DISCOVERY_NPG_LR10(NPORT_PERFMON, _BCAST), + MAKE_DISCOVERY_NPG_LR10(NPORT_PERFMON_MULTICAST, _BCAST) + }; + NvU32 discovery_table_npg_size = NV_ARRAY_ELEMENTS(discovery_table_npg); + + status = _nvswitch_device_discovery_lr10( + device, chip_device->engNPG_BCAST[i].info.top.discovery, discovery_table_npg, + discovery_table_npg_size, &discovery_handlers_npg_lr10); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "NPG_BCAST[%d] discovery failed\n", i); + return status; + } + } + } + if (VERBOSE_MMIO_DISCOVERY) + { + NVSWITCH_PRINT(device, SETUP, + "NPG_BCAST[%d] Discovery\n", + NUM_NPG_BCAST_ENGINE_LR10-1); + DISCOVERY_DUMP_ENGINE_LR10(device, NPG, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NPORT, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NPORT_MULTICAST, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NPG_PERFMON, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NPORT_PERFMON, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NPORT_PERFMON_MULTICAST, _BCAST); + } + + for (i = 0; i < NUM_NXBAR_ENGINE_LR10; i++) + { + if (NVSWITCH_ENG_VALID_LR10(device, NXBAR, i) && + (chip_device->engNXBAR[i].info.top.discovery != 0)) + { + DISCOVERY_TABLE_TYPE_LR10 discovery_table_nxbar[] = + { + MAKE_DISCOVERY_NXBAR_LR10(NXBAR, ), + MAKE_DISCOVERY_NXBAR_LR10(TILE, ), + MAKE_DISCOVERY_NXBAR_LR10(TILE_MULTICAST, ), + MAKE_DISCOVERY_NXBAR_LR10(NXBAR_PERFMON, ), + MAKE_DISCOVERY_NXBAR_LR10(TILE_PERFMON, ), + MAKE_DISCOVERY_NXBAR_LR10(TILE_PERFMON_MULTICAST, ) + }; + NvU32 discovery_table_nxbar_size = NV_ARRAY_ELEMENTS(discovery_table_nxbar); + + status = _nvswitch_device_discovery_lr10( + device, chip_device->engNXBAR[i].info.top.discovery, + discovery_table_nxbar, discovery_table_nxbar_size, + &discovery_handlers_nxbar_lr10); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "NXBAR[%d] discovery failed\n", i); + return status; + } + } + } + if (VERBOSE_MMIO_DISCOVERY) + { + NVSWITCH_PRINT(device, SETUP, + "NXBAR[0..%d] Discovery\n", + NUM_NXBAR_ENGINE_LR10-1); + DISCOVERY_DUMP_ENGINE_LR10(device, NXBAR, ); + DISCOVERY_DUMP_ENGINE_LR10(device, TILE, ); + DISCOVERY_DUMP_ENGINE_LR10(device, TILE_MULTICAST, ); + DISCOVERY_DUMP_ENGINE_LR10(device, NXBAR_PERFMON, ); + DISCOVERY_DUMP_ENGINE_LR10(device, TILE_PERFMON, ); + DISCOVERY_DUMP_ENGINE_LR10(device, TILE_PERFMON_MULTICAST, ); + } + + for (i = 0; i < NUM_NXBAR_BCAST_ENGINE_LR10; i++) + { + if (NVSWITCH_ENG_VALID_LR10(device, NXBAR_BCAST, i) && + (chip_device->engNXBAR_BCAST[i].info.top.discovery != 0)) + { + DISCOVERY_TABLE_TYPE_LR10 discovery_table_nxbar[] = + { + MAKE_DISCOVERY_NXBAR_LR10(NXBAR, _BCAST), + MAKE_DISCOVERY_NXBAR_LR10(TILE, _BCAST), + MAKE_DISCOVERY_NXBAR_LR10(TILE_MULTICAST, _BCAST), + MAKE_DISCOVERY_NXBAR_LR10(NXBAR_PERFMON, _BCAST), + MAKE_DISCOVERY_NXBAR_LR10(TILE_PERFMON, _BCAST), + MAKE_DISCOVERY_NXBAR_LR10(TILE_PERFMON_MULTICAST, _BCAST) + }; + NvU32 discovery_table_nxbar_size = NV_ARRAY_ELEMENTS(discovery_table_nxbar); + + status = _nvswitch_device_discovery_lr10( + device, chip_device->engNXBAR_BCAST[i].info.top.discovery, + discovery_table_nxbar, discovery_table_nxbar_size, + &discovery_handlers_nxbar_lr10); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "NXBAR_BCAST[%d] discovery failed\n", i); + return status; + } + } + } + if (VERBOSE_MMIO_DISCOVERY) + { + NVSWITCH_PRINT(device, SETUP, + "NXBAR_BCAST[0..%d] Discovery\n", + NUM_NXBAR_BCAST_ENGINE_LR10-1); + DISCOVERY_DUMP_ENGINE_LR10(device, NXBAR, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, TILE, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, TILE_MULTICAST, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, NXBAR_PERFMON, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, TILE_PERFMON, _BCAST); + DISCOVERY_DUMP_ENGINE_LR10(device, TILE_PERFMON_MULTICAST, _BCAST); + } + + return status; +} + +// +// Filter engine discovery information to handle platform-specific differences. +// +// Emulation and RTL sims have devices that show up in the discovery table but +// are actually tied off and not present. On GPU the engine enables and +// floorsweeping info are used to disable devices that are not present. +// But a similar mechanism does not exist in NvSwitch. +// So instead we invalidate the devices that are known to be not-present on a +// given platform. +// + +void +nvswitch_filter_discovery_lr10 +( + nvswitch_device *device +) +{ + return; +} + +#define NVSWITCH_PROCESS_DISCOVERY(_current, _engine, _multicast) \ + { \ + NvU32 i; \ + ct_assert(NUM_##_engine##_ENGINE_LR10 <= NVSWITCH_ENGINE_DESCRIPTOR_UC_SIZE); \ + \ + _current->eng_name = #_engine; \ + _current->eng_id = NVSWITCH_ENGINE_ID_##_engine; \ + _current->eng_count = NUM_##_engine##_ENGINE_LR10; \ + \ + for (i = 0; i < NUM_##_engine##_ENGINE_LR10; i++) \ + { \ + if (chip_device->eng##_engine[i].valid) \ + { \ + _current->uc_addr[i] = \ + chip_device->eng##_engine[i].info.uc.uc_addr; \ + } \ + } \ + \ + if (chip_device->eng##_engine##_multicast[0].valid) \ + { \ + _current->bc_addr = \ + chip_device->eng##_engine##_multicast[0].info.bc.bc_addr; \ + } \ + \ + _current->mc_addr_count = 0; \ + } \ + +#define NVSWITCH_PROCESS_COMMON(_engine, _multicast) \ + { \ + NVSWITCH_ENGINE_DESCRIPTOR_TYPE *current; \ + ct_assert(NVSWITCH_ENGINE_ID_##_engine < NVSWITCH_ENGINE_ID_SIZE); \ + \ + current = &chip_device->io.common[NVSWITCH_ENGINE_ID_##_engine]; \ + NVSWITCH_PROCESS_DISCOVERY(current, _engine, _multicast) \ + } + +// +// Process engine discovery information to associate engines +// + +NvlStatus +nvswitch_process_discovery_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 i, j; + NvlStatus retval = NVL_SUCCESS; + NvU64 link_enable_mask; + + // + // Process per-link information + // + for (i = 0; i < NVSWITCH_NUM_LINKS_LR10; i++) + { + device->link[i].valid = + NVSWITCH_ENG_VALID_LR10(device, NPORT, NVSWITCH_GET_LINK_ENG_INST(device, i, NPORT)) && + NVSWITCH_ENG_VALID_LR10(device, NVLTLC, NVSWITCH_GET_LINK_ENG_INST(device, i, NVLTLC)) && + NVSWITCH_ENG_VALID_LR10(device, NVLDL, NVSWITCH_GET_LINK_ENG_INST(device, i, NVLDL)) && + NVSWITCH_ENG_VALID_LR10(device, NVLIPT_LNK, NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT_LNK)) && + NVSWITCH_ENG_VALID_LR10(device, NVLW, NVSWITCH_GET_LINK_ENG_INST(device, i, NVLW)) && + NVSWITCH_ENG_VALID_LR10(device, MINION, NVSWITCH_GET_LINK_ENG_INST(device, i, MINION)) && + NVSWITCH_ENG_VALID_LR10(device, NVLIPT, NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT)); + } + + // + // Disable engines requested by regkey "LinkEnableMask". + // All the links are enabled by default. + // + link_enable_mask = ((NvU64)device->regkeys.link_enable_mask2 << 32 | + (NvU64)device->regkeys.link_enable_mask); + + for (i = 0; i < NVSWITCH_NUM_LINKS_LR10; i++) + { + if ((NVBIT64(i) & link_enable_mask) == 0) + { + NVSWITCH_PRINT(device, SETUP, + "%s: Disable link #%d\n", + __FUNCTION__, i); + device->link[i].valid = NV_FALSE; + chip_device->engNPORT[i].valid = NV_FALSE; + chip_device->engNPORT_PERFMON[i].valid = NV_FALSE; + chip_device->engNVLTLC[i].valid = NV_FALSE; + chip_device->engTX_PERFMON[i].valid = NV_FALSE; + chip_device->engRX_PERFMON[i].valid = NV_FALSE; + } + } + + // + // Process common engine information + // + + // Mark all entries as invalid + for (i = 0; i < NVSWITCH_ENGINE_ID_SIZE; i++) + { + chip_device->io.common[i].eng_name = ""; + chip_device->io.common[i].eng_id = NVSWITCH_ENGINE_ID_SIZE; // Out of range + chip_device->io.common[i].eng_count = 0; + for (j = 0; j < NVSWITCH_ENGINE_DESCRIPTOR_UC_SIZE; j++) + { + chip_device->io.common[i].uc_addr[j] = NVSWITCH_BASE_ADDR_INVALID; + } + chip_device->io.common[i].bc_addr = NVSWITCH_BASE_ADDR_INVALID; + for (j = 0; j < NVSWITCH_ENGINE_DESCRIPTOR_MC_SIZE; j++) + { + chip_device->io.common[i].mc_addr[j] = NVSWITCH_BASE_ADDR_INVALID; + } + chip_device->io.common[i].mc_addr_count = 0; + } + + NVSWITCH_LIST_LR10_ENGINES(NVSWITCH_PROCESS_COMMON) + + return retval; +} diff --git a/src/common/nvswitch/kernel/lr10/flcn_lr10.c b/src/common/nvswitch/kernel/lr10/flcn_lr10.c new file mode 100644 index 000000000..aef388b89 --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/flcn_lr10.c @@ -0,0 +1,336 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "lr10/lr10.h" +#include "flcn/flcn_nvswitch.h" + +#include "nvswitch/lr10/dev_falcon_v4.h" + +static NvU32 +_flcnRegRead_LR10 +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 offset +) +{ + // Probably should perform some checks on the offset, the device, and the engine descriptor + return nvswitch_reg_read_32(device, pFlcn->engDescUc.base + offset); +} + +static void +_flcnRegWrite_LR10 +( + nvswitch_device *device, + PFLCN pFlcn, + NvU32 offset, + NvU32 data +) +{ + // Probably should perform some checks on the offset, the device, and the engine descriptor + nvswitch_reg_write_32(device, pFlcn->engDescUc.base + offset, data); +} + +/* + * @brief Retrigger an interrupt message from the engine to the NV_CTRL tree + * + * @param[in] device nvswitch_device pointer + * @param[in] pFlcn FLCN pointer + */ +static void +_flcnIntrRetrigger_LR10 +( + nvswitch_device *device, + FLCN *pFlcn +) +{ + NvU32 val = DRF_DEF(_PFALCON, _FALCON_INTR_RETRIGGER, _TRIGGER, _TRUE); + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_INTR_RETRIGGER(0), val); +} + +static NvBool +_flcnAreEngDescsInitialized_LR10 +( + nvswitch_device *device, + FLCN *pFlcn +) +{ + // if pFlcn->engDescUc is 0, we haven't finished discovery, return false + // if pFlcn->engDescUc is NOT 0, and pFlcn->engDescBc is NULL, this is a unicast only engine + return pFlcn->engDescUc.base != 0 && pFlcn->engDescUc.initialized && + (pFlcn->engDescBc.base == 0 || pFlcn->engDescBc.initialized); +} + +/* + * @brief Waits for falcon to finish scrubbing IMEM/DMEM. + * + * @param[in] device switch device + * @param[in] pFlcn FLCN pointer + * + * @returns nothing + */ +static NV_STATUS +_flcnWaitForResetToFinish_LR10 +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NVSWITCH_TIMEOUT timeout; + NvU32 dmaCtrl; + + // Add a dummy write (of anything) to trigger scrubbing + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_MAILBOX0, 0); + + // TODO: Adapt timeout to our model, this should be centralized. + if (IS_EMULATION(device)) + { + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + } + else + { + nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout); + } + + while (1) + { + dmaCtrl = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_DMACTL); + + if (FLD_TEST_DRF(_PFALCON, _FALCON_DMACTL, _DMEM_SCRUBBING, _DONE, dmaCtrl) && + FLD_TEST_DRF(_PFALCON, _FALCON_DMACTL, _IMEM_SCRUBBING, _DONE, dmaCtrl)) + { + // Operation successful, IMEM and DMEM scrubbing has finished. + return NV_OK; + } + + if (nvswitch_timeout_check(&timeout)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for scrubbing to finish!!!\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); + return NV_ERR_TIMEOUT; + } + } +} + +/*! + * @brief Capture and dump the falconPC trace. + * + * @param[in] device nvswitch device pointer + * @param[in] pFlcn FLCN object pointer + * + * @returns nothing + */ +void +_flcnDbgInfoCapturePcTrace_LR10 +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NvU32 regTraceIdx; + NvU32 idx; + NvU32 maxIdx; + + // Dump entire PC trace buffer + regTraceIdx = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_TRACEIDX); + maxIdx = DRF_VAL(_PFALCON_FALCON, _TRACEIDX, _MAXIDX, regTraceIdx); + + NVSWITCH_PRINT(device, ERROR, + "PC TRACE (TOTAL %d ENTRIES. Entry 0 is the most recent branch):\n", + maxIdx); + + for (idx = 0; idx < maxIdx; idx++) + { + regTraceIdx = + FLD_SET_DRF_NUM(_PFALCON, _FALCON_TRACEIDX, _IDX, idx, regTraceIdx); + + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_TRACEIDX, regTraceIdx); + + NVSWITCH_PRINT(device, ERROR, "FALCON_TRACEPC(%d) : 0x%08x\n", idx, + DRF_VAL(_PFALCON, _FALCON_TRACEPC, _PC, + flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_TRACEPC))); + } +} + +/*! + * @brief Read falcon core revision + * + * @param[in] device nvswitch_device pointer + * @param[in] pFlcn FLCN pointer + * + * @return @ref NV_FLCN_CORE_REV_X_Y. + */ +NvU8 +_flcnReadCoreRev_LR10 +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NvU32 hwcfg1 = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_HWCFG1); + + return ((DRF_VAL(_PFALCON, _FALCON_HWCFG1, _CORE_REV, hwcfg1) << 4) | + DRF_VAL(_PFALCON, _FALCON_HWCFG1, _CORE_REV_SUBVERSION, hwcfg1)); +} + +// +// Store pointers to ucode header and data. +// Preload ucode from registry if available. +// +NV_STATUS +_flcnConstruct_LR10 +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + NV_STATUS status; + PFLCNABLE pFlcnable = pFlcn->pFlcnable; + PFALCON_QUEUE_INFO pQueueInfo; + pFlcn->bConstructed = NV_TRUE; + if (pFlcn->engArch == NV_UPROC_ENGINE_ARCH_DEFAULT) + { + // Default the arch to Falcon if it's not set + pFlcn->engArch = NV_UPROC_ENGINE_ARCH_FALCON; + } + // Allocate the memory for Queue Data Structure if needed. + if (pFlcn->bQueuesEnabled) + { + pQueueInfo = pFlcn->pQueueInfo = nvswitch_os_malloc(sizeof(*pQueueInfo)); + if (pQueueInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + NVSWITCH_ASSERT(0); + goto _flcnConstruct_LR10_fail; + } + nvswitch_os_memset(pQueueInfo, 0, sizeof(FALCON_QUEUE_INFO)); + // Assert if Number of Queues are zero + NVSWITCH_ASSERT(pFlcn->numQueues != 0); + pQueueInfo->pQueues = nvswitch_os_malloc(sizeof(FLCNQUEUE) * pFlcn->numQueues); + if (pQueueInfo->pQueues == NULL) + { + status = NV_ERR_NO_MEMORY; + NVSWITCH_ASSERT(0); + goto _flcnConstruct_LR10_fail; + } + nvswitch_os_memset(pQueueInfo->pQueues, 0, sizeof(FLCNQUEUE) * pFlcn->numQueues); + // Sequences can be optional + if (pFlcn->numSequences != 0) + { + if ((pFlcn->numSequences - 1) > ((NvU32)NV_U8_MAX)) + { + status = NV_ERR_OUT_OF_RANGE; + NVSWITCH_PRINT(device, ERROR, + "Max numSequences index = %d cannot fit into byte\n", + (pFlcn->numSequences - 1)); + NVSWITCH_ASSERT(0); + goto _flcnConstruct_LR10_fail; + } + flcnQueueSeqInfoStateInit(device, pFlcn); + } + } + // DEBUG + NVSWITCH_PRINT(device, INFO, "Falcon: %s\n", flcnGetName_HAL(device, pFlcn)); + NVSWITCH_ASSERT(pFlcnable != NULL); + flcnableGetExternalConfig(device, pFlcnable, &pFlcn->extConfig); + return NV_OK; +_flcnConstruct_LR10_fail: + // call flcnDestruct to free the memory allocated in this construct function + flcnDestruct_HAL(device, pFlcn); + return status; +} + +void +_flcnDestruct_LR10 +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + PFALCON_QUEUE_INFO pQueueInfo; + PFLCNABLE pFlcnable = pFlcn->pFlcnable; + if (!pFlcn->bConstructed) + { + return; + } + pFlcn->bConstructed = NV_FALSE; + if (pFlcnable == NULL) { + NVSWITCH_ASSERT(pFlcnable != NULL); + return; + } + if (pFlcn->bQueuesEnabled && (pFlcn->pQueueInfo != NULL)) + { + pQueueInfo = pFlcn->pQueueInfo; + if (NULL != pQueueInfo->pQueues) + { + nvswitch_os_free(pQueueInfo->pQueues); + pQueueInfo->pQueues = NULL; + } + nvswitch_os_free(pFlcn->pQueueInfo); + pFlcn->pQueueInfo = NULL; + } +} +const char * +_flcnGetName_LR10 +( + nvswitch_device *device, + PFLCN pFlcn +) +{ + if (pFlcn->name == NULL) + { + return "UNKNOWN"; + } + return pFlcn->name; +} + +/** + * @brief set hal function pointers for functions defined in LR10 (i.e. this file) + * + * this function has to be at the end of the file so that all the + * other functions are already defined. + * + * @param[in] pFlcn The flcn for which to set hals + */ +void +flcnSetupHal_LR10 +( + PFLCN pFlcn +) +{ + flcn_hal *pHal = pFlcn->pHal; + + pHal->readCoreRev = _flcnReadCoreRev_LR10; + pHal->regRead = _flcnRegRead_LR10; + pHal->regWrite = _flcnRegWrite_LR10; + pHal->construct = _flcnConstruct_LR10; + pHal->destruct = _flcnDestruct_LR10; + pHal->getName = _flcnGetName_LR10; + pHal->intrRetrigger = _flcnIntrRetrigger_LR10; + pHal->areEngDescsInitialized = _flcnAreEngDescsInitialized_LR10; + pHal->waitForResetToFinish = _flcnWaitForResetToFinish_LR10; + pHal->dbgInfoCapturePcTrace = _flcnDbgInfoCapturePcTrace_LR10; +} diff --git a/src/common/nvswitch/kernel/lr10/inforom_lr10.c b/src/common/nvswitch/kernel/lr10/inforom_lr10.c new file mode 100644 index 000000000..f0d7ff5ad --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/inforom_lr10.c @@ -0,0 +1,818 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "lr10/lr10.h" +#include "lr10/inforom_lr10.h" +#include "inforom/ifrstruct.h" +#include "nvswitch/lr10/dev_nvlsaw_ip.h" +#include "nvswitch/lr10/dev_nvlsaw_ip_addendum.h" +#include "nvswitch/lr10/dev_pmgr.h" + +// +// TODO: Split individual object hals to their own respective files +// +static void _oms_parse(nvswitch_device *device, INFOROM_OMS_STATE *pOmsState); +static void _oms_refresh(nvswitch_device *device, INFOROM_OMS_STATE *pOmsState); +NvlStatus +nvswitch_inforom_nvl_log_error_event_lr10 +( + nvswitch_device *device, + void *pNvlGeneric, + void *pNvlErrorEvent, + NvBool *bDirty +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_inforom_nvl_get_max_correctable_error_rate_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *params +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_inforom_nvl_get_errors_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *params +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} +NvlStatus nvswitch_inforom_nvl_update_link_correctable_error_info_lr10 +( + nvswitch_device *device, + void *pNvlGeneric, + void *pData, + NvU8 linkId, + NvU8 nvliptInstance, + NvU8 localLinkIdx, + void *pNvlErrorCounts, + NvBool *bDirty +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} +static +NvlStatus +_inforom_ecc_find_useable_entry_index +( + INFOROM_ECC_OBJECT_V6_S0 *pEccObj, + INFOROM_NVS_ECC_ERROR_EVENT *error_event, + NvU8 *pEntryIndex +) +{ + NvU8 entry; + + // + // The size of the "entry" variable needs to be updated if the InfoROM ECC + // error log ever grows past 256 + // + ct_assert(INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT <= NV_U8_MAX); + + for (entry = 0; entry < INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT; entry++) + { + INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER *pErrorEntry = &(pEccObj->errorEntries[entry]); + + // + // Check if the entry already exists + // Ideally the address should be verified only if it is valid, however + // we scrub an invalid address early on so expect them to match the + // recorded value in either case + // + if ((pErrorEntry->errId == error_event->sxid) && + FLD_TEST_DRF_NUM(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER, + _ADDR_VALID, error_event->bAddressValid, pErrorEntry->header) && + (pErrorEntry->address == error_event->address) && + FLD_TEST_DRF_NUM(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _LOCATION, + _LINK_ID, error_event->linkId, pErrorEntry->location)) + break; + // + // Encountering an empty entry indicates this is the first instance of the error + // The ECC error log on the InfoROM is never sparse so we can terminate + // the search early + // + else if (FLD_TEST_DRF(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER, + _VALID, _FALSE, pErrorEntry->header)) + break; + } + + if (entry == INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT) + return -NVL_NOT_FOUND; + + *pEntryIndex = entry; + + return NVL_SUCCESS; +} + +static +NvlStatus +_inforom_ecc_calc_timestamp_delta +( + INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER *pErrorEntry, + INFOROM_NVS_ECC_ERROR_EVENT *error_event, + NvU64 existingCount +) +{ + // + // Subtracting 1 from the existingCount to drop the first error event counter + // Unfortunately we cannot track the first error events counts so assuming 1 + // + + NvlStatus status = NVL_SUCCESS; + NvU32 currTime = error_event->timestamp; + NvU64 tmp = ((NvU64) pErrorEntry->averageEventDelta) * (existingCount - 1); + NvU64 ovfTmp = tmp + (currTime - pErrorEntry->lastErrorTimestamp); + NvU64 totCnt, delta; + + if (ovfTmp < tmp) + { + status = -NVL_NO_MEM; + goto _updateEntryTimeFailed; + } + + totCnt = error_event->errorCount + existingCount - 1; + delta = ovfTmp / totCnt; + + if (delta > NV_U32_MAX) + { + status = -NVL_NO_MEM; + goto _updateEntryTimeFailed; + } + + pErrorEntry->averageEventDelta = (NvU32) delta; + +_updateEntryTimeFailed: + return status; +} + +static +NvlStatus +_inforom_ecc_record_entry +( + INFOROM_ECC_OBJECT_V6_S0 *pEccObj, + INFOROM_NVS_ECC_ERROR_EVENT *error_event, + NvU8 entry +) +{ + NvBool bNewEntry; + NvU32 *pErrCnt; + + INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER *pErrorEntry = &(pEccObj->errorEntries[entry]); + + bNewEntry = FLD_TEST_DRF(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER, + _VALID, _FALSE, pErrorEntry->header); + + pErrCnt = ((error_event->bUncErr) ? &(pErrorEntry->uncorrectedCount) : + &(pErrorEntry->correctedCount)); + + if (bNewEntry) + { + pErrorEntry->errId = error_event->sxid; + + pErrorEntry->location = FLD_SET_DRF_NUM(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, + _LOCATION, _LINK_ID, error_event->linkId, pErrorEntry->location); + + pErrorEntry->header = FLD_SET_DRF_NUM(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, + _HEADER, _ADDR_VALID, error_event->bAddressValid, pErrorEntry->header); + + pErrorEntry->address = error_event->address; + + pErrorEntry->sublocation = 0; + + *pErrCnt = error_event->errorCount; + + pErrorEntry->averageEventDelta = 0; + + pErrorEntry->header = FLD_SET_DRF(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER, + _VALID, _TRUE, pErrorEntry->header); + } + else + { + NvlStatus status; + NvU64 tmpCnt; + NvU64 existingCnt = (NvU64) (pErrorEntry->uncorrectedCount + pErrorEntry->correctedCount); + + status = _inforom_ecc_calc_timestamp_delta(pErrorEntry, error_event, existingCnt); + if (status != NVL_SUCCESS) + { + pErrorEntry->header = FLD_SET_DRF(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, + _HEADER, _CORRUPT_TIMEDATA, _TRUE, pErrorEntry->header); + } + + // Update error counts by summing them up + tmpCnt = (NvU64) *pErrCnt + error_event->errorCount; + + // Saturate at NvU32 limit + if (tmpCnt > NV_U32_MAX) + { + tmpCnt = NV_U32_MAX; + } + + *pErrCnt = (NvU32) tmpCnt; + } + + pErrorEntry->lastErrorTimestamp = error_event->timestamp; + + return NVL_SUCCESS; + +} + +NvlStatus +nvswitch_inforom_ecc_log_error_event_lr10 +( + nvswitch_device *device, + INFOROM_ECC_OBJECT *pEccGeneric, + INFOROM_NVS_ECC_ERROR_EVENT *err_event +) +{ + NvU8 entry; + NvU64_ALIGN32 *pInforomTotalCount; + NvU64 tmpCount; + NvlStatus status; + INFOROM_ECC_OBJECT_V6_S0 *pEccObj; + + if ((err_event == NULL) || (pEccGeneric == NULL)) + return -NVL_BAD_ARGS; + + pEccObj = &(pEccGeneric->v6s); + + // + // Find the appropriate entry to log the error event + // If the function returns "out of memory" error, indicates no free entries + // + status = _inforom_ecc_find_useable_entry_index(pEccObj, err_event, &entry); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "InfoROM ECC: Unable to find logging entry rc: %d\n", status); + goto _ecc_log_error_event_lr10_failed; + } + + // + // Record the error data into appropriate members of the error entry struct + // Also mark the entry as in-use if it is a new entry + // + status = _inforom_ecc_record_entry(pEccObj, err_event, entry); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "InfoROM ECC: Unable to record entry:%u rc:%d\n", + entry, status); + + goto _ecc_log_error_event_lr10_failed; + } + + // Log the error count to the InfoROM total values + if (err_event->bUncErr) + { + pInforomTotalCount = &(pEccObj->uncorrectedTotal); + } + else + { + pInforomTotalCount = &(pEccObj->correctedTotal); + } + + NvU64_ALIGN32_UNPACK(&tmpCount, pInforomTotalCount); + + tmpCount += err_event->errorCount; + if (tmpCount < err_event->errorCount) + { + tmpCount = NV_U64_MAX; + } + + NvU64_ALIGN32_PACK(pInforomTotalCount, &tmpCount); + + // Update shared surface counts, non-fatal if we encounter a failure + status = nvswitch_smbpbi_refresh_ecc_counts(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, WARN, "Failed to update ECC counts on SMBPBI " + "shared surface rc:%d\n", status); + } + + return NVL_SUCCESS; + +_ecc_log_error_event_lr10_failed: + + NVSWITCH_PRINT(device, ERROR, "Missed recording sxid=%u, linkId=%u, address=0x%04x, " + "timestamp=%u, errorCount=%u\n", err_event->sxid, + err_event->linkId, err_event->address, err_event->timestamp, + err_event->errorCount); + + return status; +} + +void +nvswitch_inforom_ecc_get_total_errors_lr10 +( + nvswitch_device *device, + INFOROM_ECC_OBJECT *pEccGeneric, + NvU64 *pCorrectedTotal, + NvU64 *pUncorrectedTotal +) +{ + INFOROM_ECC_OBJECT_V6_S0 *pEccObj = &(pEccGeneric->v6s); + + NvU64_ALIGN32_UNPACK(pCorrectedTotal, &pEccObj->correctedTotal); + NvU64_ALIGN32_UNPACK(pUncorrectedTotal, &pEccObj->uncorrectedTotal); +} + +static void _nvswitch_inforom_map_ecc_error_to_userspace_error +( + INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER *pEccError, + NVSWITCH_ECC_ERROR_ENTRY *pErrorLog +) +{ + pErrorLog->sxid = pEccError->errId; + pErrorLog->linkId = DRF_VAL(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _LOCATION, _LINK_ID, pEccError->location); + pErrorLog->lastErrorTimestamp = pEccError->lastErrorTimestamp; + pErrorLog->bAddressValid = DRF_VAL(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER, _ADDR_VALID, pEccError->header); + pErrorLog->address = pEccError->address; + pErrorLog->correctedCount = pEccError->correctedCount; + pErrorLog->uncorrectedCount = pEccError->uncorrectedCount; + return; +} + + +NvlStatus +nvswitch_inforom_ecc_get_errors_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *params +) +{ + struct inforom *pInforom = device->pInforom; + PINFOROM_ECC_STATE pEccState; + INFOROM_ECC_OBJECT *pEcc; + NvU32 errIndx; + + /* + * Compile time check is needed here to make sure that the ECC_ERROR API interface query size is in sync + * with its internal counterpart. When the definition of the internal InfoROM error size limit changes, + * it will enforce API interface change as well, or use a retry style query with err_index + */ + ct_assert(NVSWITCH_ECC_ERRORS_MAX_READ_COUNT == INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT); + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + pEccState = pInforom->pEccState; + if (pEccState == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + pEcc = pEccState->pEcc; + if (pEcc == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + nvswitch_os_memset(params->errorLog, 0, sizeof(params->errorLog)); + + nvswitch_os_memcpy(¶ms->correctedTotal, &pEcc->v6s.correctedTotal, sizeof(params->correctedTotal)); + nvswitch_os_memcpy(¶ms->uncorrectedTotal, &pEcc->v6s.uncorrectedTotal, sizeof(params->uncorrectedTotal)); + + for (errIndx = 0; errIndx < NVSWITCH_ECC_ERRORS_MAX_READ_COUNT; errIndx++) + { + if (FLD_TEST_DRF(_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER, _HEADER, _VALID, _FALSE, + pEcc->v6s.errorEntries[errIndx].header)) + { + break; // the last entry + } + + _nvswitch_inforom_map_ecc_error_to_userspace_error(&pEcc->v6s.errorEntries[errIndx], + ¶ms->errorLog[errIndx]); + } + + params->errorCount = errIndx; + + return NVL_SUCCESS; +} + +static NvU8 _oms_dword_byte_sum(NvU16 dword) +{ + NvU8 i, sum = 0; + for (i = 0; i < sizeof(dword); i++) + sum += (NvU8)((dword >> (8*i)) & 0xFF); + return sum; +} + +static void _oms_update_entry_checksum +( + INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pEntry +) +{ + NvU8 datasum = 0; + + // Upper byte is the checksum + datasum += _oms_dword_byte_sum(pEntry->data & ~0xFF00); + + pEntry->data = FLD_SET_REF_NUM( + INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY_DATA_ENTRY_CHECKSUM, + 0x00u - datasum, pEntry->data); +} + +static void +_oms_reset_entry_iter +( + INFOROM_OMS_STATE *pOmsState, + NvBool bStart +) +{ + INFOROM_OMS_OBJECT_V1S *pOms = &pOmsState->pOms->v1s; + INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s; + + if (bStart) + { + pVerData->pIter = &pOms->settings[0]; + } + else + { + pVerData->pIter = &pOms->settings[ + INFOROM_OMS_OBJECT_V1S_NUM_SETTINGS_ENTRIES - 1]; + } +} + +static NvBool +_oms_entry_available +( + INFOROM_OMS_STATE *pOmsState +) +{ + INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pEntry = pOmsState->omsData.v1s.pIter; + + if (pEntry == NULL) + return NV_FALSE; + + return FLD_TEST_REF(INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_ENTRY_AVAILABLE, + _YES, pEntry->data); +} + +static NvBool +_oms_entry_valid +( + INFOROM_OMS_STATE *pOmsState +) +{ + INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pEntry = pOmsState->omsData.v1s.pIter; + NvU8 sum; + + if (pEntry == NULL) + return NV_FALSE; + + sum = _oms_dword_byte_sum(pEntry->data); + + return (sum == 0); +} + +/* + * + * Sets nextIdx to one after currIdx. Returns NV_TRUE if nextIdx + * is valid. NV_FALSE otherwise. + * + */ +static NvBool +_oms_entry_iter_next +( + INFOROM_OMS_STATE *pOmsState +) +{ + INFOROM_OMS_OBJECT_V1S *pOms = &pOmsState->pOms->v1s; + INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s; + + if (pVerData->pIter >= pOms->settings + + INFOROM_OMS_OBJECT_V1S_NUM_SETTINGS_ENTRIES) + { + pVerData->pIter = NULL; + } + else + { + pVerData->pIter++; + } + + return (pVerData->pIter != NULL); +} + +static void +_oms_refresh +( + nvswitch_device *device, + INFOROM_OMS_STATE *pOmsState +) +{ + INFOROM_OMS_OBJECT_V1S *pOms = &pOmsState->pOms->v1s; + + nvswitch_os_memset(pOms->settings, 0xFF, sizeof(pOms->settings)); + pOms->lifetimeRefreshCount++; + + // This is guaranteed to find and set an UpdateEntry now + _oms_parse(device, pOmsState); +} + +static void +_oms_set_current_entry +( + INFOROM_OMS_STATE *pOmsState +) +{ + pOmsState->omsData.v1s.prev = *pOmsState->omsData.v1s.pIter; +} + +static void +_oms_set_update_entry +( + INFOROM_OMS_STATE *pOmsState +) +{ + INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s; + + pVerData->pNext = pVerData->pIter; + + // Next settings always start out the same as the previous + *pVerData->pNext = pVerData->prev; +} + +static NvBool +_oms_entry_iter_prev +( + INFOROM_OMS_STATE *pOmsState +) +{ + INFOROM_OMS_OBJECT_V1S *pOms = &pOmsState->pOms->v1s; + INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s; + + if (pVerData->pIter <= pOms->settings) + { + pVerData->pIter = NULL; + } + else + { + pVerData->pIter--; + } + + return (pVerData->pIter != NULL); +} + +static void +_oms_parse +( + nvswitch_device *device, + INFOROM_OMS_STATE *pOmsState +) +{ + NvBool bCurrentValid = NV_FALSE; + NvBool bIterValid = NV_TRUE; + + // + // To find the "latest" entry - the one with the settings that were last + // flushed to the InfoROM - scan from the end of the array until we find + // an entry that is not available and is valid. + // + _oms_reset_entry_iter(pOmsState, NV_FALSE); + while (bIterValid) + { + if (!_oms_entry_available(pOmsState) && + _oms_entry_valid(pOmsState)) + { + _oms_set_current_entry(pOmsState); + bCurrentValid = NV_TRUE; + break; + } + + bIterValid = _oms_entry_iter_prev(pOmsState); + } + + // + // To find the "next" entry - one that we will write to if a setting is + // updated - start scanning from the entry after the latest entry to find + // an available one. This will skip entries that were previously written + // to but are invalid. + // + if (bCurrentValid) + { + bIterValid = _oms_entry_iter_next(pOmsState); + } + else + { + _oms_reset_entry_iter(pOmsState, NV_TRUE); + bIterValid = NV_TRUE; + } + + while (bIterValid) + { + if (_oms_entry_available(pOmsState)) + { + _oms_set_update_entry(pOmsState); + break; + } + + bIterValid = _oms_entry_iter_next(pOmsState); + } + + if (!bIterValid) + { + // + // No more entries available, we will need to refresh the object. + // We should have at least one valid recent entry in this case + // (otherwise every entry is corrupted). + // + NVSWITCH_ASSERT(bCurrentValid); + _oms_refresh(device, pOmsState); + } +} + +static NvBool +_oms_is_content_dirty +( + INFOROM_OMS_STATE *pOmsState +) +{ + INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s; + + if (pVerData->pNext == NULL) + return NV_FALSE; + + return (pVerData->pNext->data != pVerData->prev.data); +} + +NvlStatus +nvswitch_oms_inforom_flush_lr10 +( + nvswitch_device *device +) +{ + NvlStatus status = NVL_SUCCESS; + struct inforom *pInforom = device->pInforom; + INFOROM_OMS_STATE *pOmsState; + + if (pInforom == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + pOmsState = pInforom->pOmsState; + + if (pOmsState != NULL && _oms_is_content_dirty(pOmsState)) + { + status = nvswitch_inforom_write_object(device, "OMS", + pOmsState->pFmt, pOmsState->pOms, + pOmsState->pPackedObject); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to flush OMS object to InfoROM, rc: %d\n", status); + } + else + { + _oms_parse(device, pOmsState); + } + } + + return status; +} + +void +nvswitch_initialize_oms_state_lr10 +( + nvswitch_device *device, + INFOROM_OMS_STATE *pOmsState +) +{ + pOmsState->omsData.v1s.pIter = pOmsState->omsData.v1s.pNext = NULL; + pOmsState->omsData.v1s.prev.data = + REF_DEF(INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_ENTRY_AVAILABLE, _NO) | + REF_DEF(INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE, _NO); + _oms_update_entry_checksum(&pOmsState->omsData.v1s.prev); + + _oms_parse(device, pOmsState); +} + +NvBool +nvswitch_oms_get_device_disable_lr10 +( + INFOROM_OMS_STATE *pOmsState +) +{ + INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s; + + return FLD_TEST_REF( + INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE, + _YES, pVerData->pNext->data); +} + +void +nvswitch_oms_set_device_disable_lr10 +( + INFOROM_OMS_STATE *pOmsState, + NvBool bForceDeviceDisable +) +{ + INFOROM_OMS_V1S_DATA *pVerData = &pOmsState->omsData.v1s; + + pVerData->pNext->data = FLD_SET_REF_NUM( + INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE, + bForceDeviceDisable, pVerData->pNext->data); + + _oms_update_entry_checksum(pVerData->pNext); +} + +NvlStatus +nvswitch_bbx_setup_prologue_lr10 +( + nvswitch_device *device, + void *pInforomBbxState +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_bbx_setup_epilogue_lr10 +( + nvswitch_device *device, + void *pInforomBbxState +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_bbx_add_data_time_lr10 +( + nvswitch_device *device, + void *pInforomBbxState, + void *pInforomBbxData +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_bbx_add_sxid_lr10 +( + nvswitch_device *device, + void *pInforomBbxState, + void *pInforomBbxData +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_bbx_add_temperature_lr10 +( + nvswitch_device *device, + void *pInforomBbxState, + void *pInforomBbxData +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} +void +nvswitch_bbx_set_initial_temperature_lr10 +( + nvswitch_device *device, + void *pInforomBbxState, + void *pInforomBbxData +) +{ + return; +} + +NvlStatus +nvswitch_inforom_bbx_get_sxid_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_SXIDS_PARAMS *params +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} diff --git a/src/common/nvswitch/kernel/lr10/intr_lr10.c b/src/common/nvswitch/kernel/lr10/intr_lr10.c new file mode 100644 index 000000000..df231362c --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/intr_lr10.c @@ -0,0 +1,5462 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "intr_nvswitch.h" +#include "lr10/lr10.h" +#include "lr10/minion_lr10.h" +#include "regkey_nvswitch.h" +#include "soe/soe_nvswitch.h" +#include "inforom/inforom_nvswitch.h" + +#include "nvswitch/lr10/dev_nvs.h" +#include "nvswitch/lr10/dev_nvs_master.h" +#include "nvswitch/lr10/dev_timer.h" +#include "nvswitch/lr10/dev_nvlsaw_ip.h" +#include "nvswitch/lr10/dev_pri_ringmaster.h" +#include "nvswitch/lr10/dev_pri_ringstation_sys.h" +#include "nvswitch/lr10/dev_pri_ringstation_prt.h" +#include "nvswitch/lr10/dev_nv_xve.h" +#include "nvswitch/lr10/dev_npg_ip.h" +#include "nvswitch/lr10/dev_nport_ip.h" +#include "nvswitch/lr10/dev_route_ip.h" +#include "nvswitch/lr10/dev_ingress_ip.h" +#include "nvswitch/lr10/dev_sourcetrack_ip.h" +#include "nvswitch/lr10/dev_egress_ip.h" +#include "nvswitch/lr10/dev_tstate_ip.h" +#include "nvswitch/lr10/dev_nxbar_tc_global_ip.h" +#include "nvswitch/lr10/dev_nxbar_tile_ip.h" +#include "nvswitch/lr10/dev_nvlipt_ip.h" +#include "nvswitch/lr10/dev_nvltlc_ip.h" +#include "nvswitch/lr10/dev_nvlipt_lnk_ip.h" +#include "nvswitch/lr10/dev_minion_ip.h" +#include "nvswitch/lr10/dev_nvldl_ip.h" +#include "nvswitch/lr10/dev_nvltlc_ip.h" +#include "nvswitch/lr10/dev_nvlctrl_ip.h" + +static void +_nvswitch_construct_ecc_error_event +( + INFOROM_NVS_ECC_ERROR_EVENT *err_event, + NvU32 sxid, + NvU32 linkId, + NvBool bAddressValid, + NvU32 address, + NvBool bUncErr, + NvU32 errorCount +) +{ + err_event->sxid = sxid; + err_event->linkId = linkId; + err_event->bAddressValid = bAddressValid; + err_event->address = address; + err_event->bUncErr = bUncErr; + err_event->errorCount = errorCount; +} + +/* + * @Brief : Enable top level HW interrupts. + * + * @Description : + * + * @param[in] device operate on this device + */ +void +nvswitch_lib_enable_interrupts_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 saw_legacy_intr_enable = 0; + + if (FLD_TEST_DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _PTIMER, 1, chip_device->intr_enable_legacy)) + { + saw_legacy_intr_enable = FLD_SET_DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_LEGACY, _PTIMER_0, 1, saw_legacy_intr_enable); + saw_legacy_intr_enable = FLD_SET_DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_LEGACY, _PTIMER_1, 1, saw_legacy_intr_enable); + } + if (FLD_TEST_DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _PMGR, 1, chip_device->intr_enable_legacy)) + { + saw_legacy_intr_enable = FLD_SET_DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_LEGACY, _PMGR_0, 1, saw_legacy_intr_enable); + saw_legacy_intr_enable = FLD_SET_DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_LEGACY, _PMGR_1, 1, saw_legacy_intr_enable); + } + + NVSWITCH_REG_WR32(device, _PSMC, _INTR_EN_SET_LEGACY, chip_device->intr_enable_legacy); + NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _INTR_EN_SET_LEGACY, saw_legacy_intr_enable); +} + +/* + * @Brief : Disable top level HW interrupts. + * + * @Description : + * + * @param[in] device operate on this device + */ +void +nvswitch_lib_disable_interrupts_lr10 +( + nvswitch_device *device +) +{ + if (NVSWITCH_GET_CHIP_DEVICE_LR10(device) == NULL) + { + NVSWITCH_PRINT(device, WARN, + "%s: Can not disable interrupts. Chip device==NULL\n", + __FUNCTION__); + return; + } + + NVSWITCH_REG_WR32(device, _PSMC, _INTR_EN_CLR_LEGACY, 0xffffffff); + + // + // Need a bit more time to ensure interrupt de-asserts, on + // RTL simulation. Part of BUG 1869204 and 1881361. + // + if (IS_RTLSIM(device)) + { + (void)NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _INTR_EN_CLR_CORRECTABLE); + } +} + +static void +_nvswitch_build_top_interrupt_mask_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 intr_bit; + NvU32 i; + + chip_device->intr_enable_legacy = +// DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _PTIMER, 1) | +// DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _PMGR, 1) | + DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _SAW, 1) | +// DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _DECODE_TRAP_PRIV_LEVEL_VIOLATION, 1) | +// DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _DECODE_TRAP_WRITE_DROPPED, 1) | +// DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _RING_MANAGE_SUCCESS, 1) | + DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _PBUS, 1) | +// DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _XVE, 1) | + DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _PRIV_RING, 1) | + 0; + + chip_device->intr_enable_fatal = 0; + chip_device->intr_enable_nonfatal = 0; + chip_device->intr_enable_corr = 0; + + for (i = 0; i < NUM_NXBAR_ENGINE_LR10; i++) + { + if (NVSWITCH_ENG_VALID_LR10(device, NXBAR, i)) + { + intr_bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_FATAL, _NXBAR_0, 1) << i; + + // NXBAR only has fatal interrupts + chip_device->intr_enable_fatal |= intr_bit; + } + } + + for (i = 0; i < NUM_NPG_ENGINE_LR10; i++) + { + if (NVSWITCH_ENG_VALID_LR10(device, NPG, i)) + { + intr_bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_FATAL, _NPG_0, 1) << i; + chip_device->intr_enable_fatal |= intr_bit; + + intr_bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_NONFATAL, _NPG_0, 1) << i; + chip_device->intr_enable_nonfatal |= intr_bit; + } + } + + for (i = 0; i < NUM_NVLW_ENGINE_LR10; i++) + { + if (NVSWITCH_ENG_VALID_LR10(device, NVLW, i)) + { + intr_bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_FATAL, _NVLIPT_0, 1) << i; + chip_device->intr_enable_fatal |= intr_bit; + + intr_bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_NONFATAL, _NVLIPT_0, 1) << i; + chip_device->intr_enable_nonfatal |= intr_bit; + } + } + +#if defined(NV_NVLSAW_NVSPMC_INTR_EN_SET_FATAL_SOE) + if (nvswitch_is_soe_supported(device)) + { + intr_bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_FATAL, _SOE, 1); + chip_device->intr_enable_fatal |= intr_bit; + } +#endif +} + +static void +_nvswitch_initialize_minion_interrupts +( + nvswitch_device *device, + NvU32 instance +) +{ + NvU32 intrEn, localDiscoveredLinks, globalLink, i; + localDiscoveredLinks = 0; + + // Tree 1 (non-stall) is disabled until there is a need + NVSWITCH_MINION_WR32_LR10(device, instance, _MINION, _MINION_INTR_NONSTALL_EN, 0); + + // Tree 0 (stall) is where we route _all_ MINION interrupts for now + intrEn = DRF_DEF(_MINION, _MINION_INTR_STALL_EN, _FATAL, _ENABLE) | + DRF_DEF(_MINION, _MINION_INTR_STALL_EN, _NONFATAL, _ENABLE) | + DRF_DEF(_MINION, _MINION_INTR_STALL_EN, _FALCON_STALL, _ENABLE) | + DRF_DEF(_MINION, _MINION_INTR_STALL_EN, _FALCON_NOSTALL, _DISABLE); + + for (i = 0; i < NVSWITCH_LINKS_PER_MINION; ++i) + { + // get the global link number of the link we are iterating over + globalLink = (instance * NVSWITCH_LINKS_PER_MINION) + i; + + // the link is valid place bit in link mask + if (device->link[globalLink].valid) + { + localDiscoveredLinks |= NVBIT(i); + } + } + + intrEn = FLD_SET_DRF_NUM(_MINION, _MINION_INTR_STALL_EN, _LINK, + localDiscoveredLinks, intrEn); + + NVSWITCH_MINION_WR32_LR10(device, instance, _MINION, _MINION_INTR_STALL_EN, intrEn); +} + +static void +_nvswitch_initialize_nvlipt_interrupts_lr10 +( + nvswitch_device *device +) +{ + NvU32 i; + NvU32 regval = 0; + + // + // NVLipt interrupt routing (NVLIPT_COMMON, NVLIPT_LNK, NVLDL, NVLTLC) + // will be initialized by MINION NVLPROD flow + // + // We must enable interrupts at the top levels in NVLW, NVLIPT_COMMON, + // NVLIPT_LNK and MINION + // + + // NVLW + regval = DRF_NUM(_NVLCTRL_COMMON, _INTR_0_MASK, _FATAL, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_0_MASK, _NONFATAL, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_0_MASK, _CORRECTABLE, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_0_MASK, _INTR0, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_0_MASK, _INTR1, 0x1); + NVSWITCH_BCAST_WR32_LR10(device, NVLW, _NVLCTRL_COMMON, _INTR_0_MASK, regval); + + regval = DRF_NUM(_NVLCTRL_COMMON, _INTR_1_MASK, _FATAL, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_1_MASK, _NONFATAL, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_1_MASK, _CORRECTABLE, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_1_MASK, _INTR0, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_1_MASK, _INTR1, 0x1); + NVSWITCH_BCAST_WR32_LR10(device, NVLW, _NVLCTRL_COMMON, _INTR_1_MASK, regval); + + regval = DRF_NUM(_NVLCTRL_COMMON, _INTR_2_MASK, _FATAL, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_2_MASK, _NONFATAL, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_2_MASK, _CORRECTABLE, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_2_MASK, _INTR0, 0x1) | + DRF_NUM(_NVLCTRL_COMMON, _INTR_2_MASK, _INTR1, 0x1); + NVSWITCH_BCAST_WR32_LR10(device, NVLW, _NVLCTRL_COMMON, _INTR_2_MASK, regval); + + // NVLW link + for (i = 0; i < NV_NVLCTRL_LINK_INTR_0_MASK__SIZE_1; i++) + { + regval = DRF_NUM(_NVLCTRL_LINK, _INTR_0_MASK, _FATAL, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_0_MASK, _NONFATAL, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_0_MASK, _CORRECTABLE, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_0_MASK, _INTR0, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_0_MASK, _INTR1, 0x1); + NVSWITCH_BCAST_WR32_LR10(device, NVLW, _NVLCTRL_LINK, _INTR_0_MASK(i), regval); + + regval = DRF_NUM(_NVLCTRL_LINK, _INTR_1_MASK, _FATAL, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_1_MASK, _NONFATAL, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_1_MASK, _CORRECTABLE, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_1_MASK, _INTR0, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_1_MASK, _INTR1, 0x1); + NVSWITCH_BCAST_WR32_LR10(device, NVLW, _NVLCTRL_LINK, _INTR_1_MASK(i), regval); + + regval = DRF_NUM(_NVLCTRL_LINK, _INTR_2_MASK, _FATAL, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_2_MASK, _NONFATAL, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_2_MASK, _CORRECTABLE, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_2_MASK, _INTR0, 0x1) | + DRF_NUM(_NVLCTRL_LINK, _INTR_2_MASK, _INTR1, 0x1); + NVSWITCH_BCAST_WR32_LR10(device, NVLW, _NVLCTRL_LINK, _INTR_2_MASK(i), regval); + } + + // NVLIPT_COMMON + regval = DRF_NUM(_NVLIPT_COMMON, _INTR_CONTROL_COMMON, _INT0_EN, 0x1) | + DRF_NUM(_NVLIPT_COMMON, _INTR_CONTROL_COMMON, _INT1_EN, 0x1); + + NVSWITCH_BCAST_WR32_LR10(device, NVLIPT, _NVLIPT_COMMON, _INTR_CONTROL_COMMON, regval); + + // NVLIPT_LNK + regval = DRF_NUM(_NVLIPT_LNK, _INTR_CONTROL_LINK, _INT0_EN, 0x1) | + DRF_NUM(_NVLIPT_LNK, _INTR_CONTROL_LINK, _INT1_EN, 0x1); + NVSWITCH_BCAST_WR32_LR10(device, NVLIPT_LNK, _NVLIPT_LNK, _INTR_CONTROL_LINK, regval); + + // MINION + for (i = 0; i < NUM_MINION_ENGINE_LR10; ++i) + { + if (!NVSWITCH_ENG_VALID_LR10(device, MINION, i)) + { + continue; + } + + _nvswitch_initialize_minion_interrupts(device,i); + } +} + +static void +_nvswitch_initialize_nxbar_tileout_interrupts +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 report_fatal; + NvU32 tileout; + + report_fatal = + DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_FATAL_INTR_EN, _INGRESS_BUFFER_OVERFLOW, 1) | + DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_FATAL_INTR_EN, _INGRESS_BUFFER_UNDERFLOW, 1) | + DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_FATAL_INTR_EN, _EGRESS_CREDIT_OVERFLOW, 1) | + DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_FATAL_INTR_EN, _EGRESS_CREDIT_UNDERFLOW, 1) | + DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_FATAL_INTR_EN, _INGRESS_NON_BURSTY_PKT, 1) | + DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_FATAL_INTR_EN, _INGRESS_NON_STICKY_PKT, 1) | + DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_FATAL_INTR_EN, _INGRESS_BURST_GT_9_DATA_VC, 1) | + DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_FATAL_INTR_EN, _EGRESS_CDT_PARITY_ERROR, 1); + + for (tileout = 0; tileout < NUM_NXBAR_TILEOUTS_PER_TC_LR10; tileout++) + { + NVSWITCH_BCAST_WR32_LR10(device, NXBAR, _NXBAR, _TC_TILEOUT_ERR_FATAL_INTR_EN(tileout), report_fatal); + } + + chip_device->intr_mask.tileout.fatal = report_fatal; +} + +static void +_nvswitch_initialize_nxbar_tile_interrupts +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 report_fatal; + + report_fatal = + DRF_NUM(_NXBAR, _TILE_ERR_FATAL_INTR_EN, _INGRESS_BUFFER_OVERFLOW, 1) | + DRF_NUM(_NXBAR, _TILE_ERR_FATAL_INTR_EN, _INGRESS_BUFFER_UNDERFLOW, 1) | + DRF_NUM(_NXBAR, _TILE_ERR_FATAL_INTR_EN, _EGRESS_CREDIT_OVERFLOW, 1) | + DRF_NUM(_NXBAR, _TILE_ERR_FATAL_INTR_EN, _EGRESS_CREDIT_UNDERFLOW, 1) | + DRF_NUM(_NXBAR, _TILE_ERR_FATAL_INTR_EN, _INGRESS_NON_BURSTY_PKT, 1) | + DRF_NUM(_NXBAR, _TILE_ERR_FATAL_INTR_EN, _INGRESS_NON_STICKY_PKT, 1) | + DRF_NUM(_NXBAR, _TILE_ERR_FATAL_INTR_EN, _INGRESS_BURST_GT_9_DATA_VC, 1) | + DRF_NUM(_NXBAR, _TILE_ERR_FATAL_INTR_EN, _INGRESS_PKT_INVALID_DST, 1) | + DRF_NUM(_NXBAR, _TILE_ERR_FATAL_INTR_EN, _INGRESS_PKT_PARITY_ERROR, 1); + + NVSWITCH_BCAST_WR32_LR10(device, TILE, _NXBAR, _TILE_ERR_FATAL_INTR_EN, report_fatal); + + chip_device->intr_mask.tile.fatal = report_fatal; +} + +static void +_nvswitch_initialize_nxbar_interrupts +( + nvswitch_device *device +) +{ + _nvswitch_initialize_nxbar_tile_interrupts(device); + _nvswitch_initialize_nxbar_tileout_interrupts(device); +} + +static void +_nvswitch_initialize_route_interrupts +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 enable; + NvU32 report_fatal; + NvU32 report_nonfatal; + NvU32 contain; + + report_fatal = + DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _ROUTEBUFERR, _ENABLE) | + DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _NOPORTDEFINEDERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _INVALIDROUTEPOLICYERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _GLT_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _GLT_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _TRANSDONERESVERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _PDCTRLPARERR, _ENABLE) | + DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _NVS_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _NVS_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_ROUTE, _ERR_FATAL_REPORT_EN_0, _CDTPARERR, _ENABLE); + + report_nonfatal = + DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _ROUTEBUFERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _NOPORTDEFINEDERR, _ENABLE) | + DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _INVALIDROUTEPOLICYERR, _ENABLE) | + DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _GLT_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _GLT_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _TRANSDONERESVERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _PDCTRLPARERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _NVS_ECC_LIMIT_ERR, _ENABLE) | + DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _NVS_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_ROUTE, _ERR_NON_FATAL_REPORT_EN_0, _CDTPARERR, _DISABLE); + + contain = + DRF_DEF(_ROUTE, _ERR_CONTAIN_EN_0, _ROUTEBUFERR, __PROD) | + DRF_DEF(_ROUTE, _ERR_CONTAIN_EN_0, _NOPORTDEFINEDERR, __PROD) | + DRF_DEF(_ROUTE, _ERR_CONTAIN_EN_0, _INVALIDROUTEPOLICYERR, __PROD) | + DRF_DEF(_ROUTE, _ERR_CONTAIN_EN_0, _GLT_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_ROUTE, _ERR_CONTAIN_EN_0, _GLT_ECC_DBE_ERR, __PROD) | + DRF_DEF(_ROUTE, _ERR_CONTAIN_EN_0, _TRANSDONERESVERR, __PROD) | + DRF_DEF(_ROUTE, _ERR_CONTAIN_EN_0, _PDCTRLPARERR, __PROD) | + DRF_DEF(_ROUTE, _ERR_CONTAIN_EN_0, _NVS_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_ROUTE, _ERR_CONTAIN_EN_0, _NVS_ECC_DBE_ERR, __PROD) | + DRF_DEF(_ROUTE, _ERR_CONTAIN_EN_0, _CDTPARERR, __PROD); + + enable = report_fatal | report_nonfatal; + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _ERR_LOG_EN_0, enable); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _ERR_FATAL_REPORT_EN_0, report_fatal); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0, report_nonfatal); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _ERR_CORRECTABLE_REPORT_EN_0, 0); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _ERR_CONTAIN_EN_0, contain); + + chip_device->intr_mask.route.fatal = report_fatal; + chip_device->intr_mask.route.nonfatal = report_nonfatal; +} + +static void +_nvswitch_initialize_ingress_interrupts +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 enable; + NvU32 report_fatal; + NvU32 report_nonfatal; + NvU32 contain; + + report_fatal = + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _CMDDECODEERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _NCISOC_HDR_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _INVALIDVCSET, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _REMAPTAB_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _RIDTAB_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _RLANTAB_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _NCISOC_PARITY_ERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _REQCONTEXTMISMATCHERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _ACLFAIL, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _NCISOC_HDR_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _ADDRBOUNDSERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _RIDTABCFGERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _RLANTABCFGERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _REMAPTAB_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _RIDTAB_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _RLANTAB_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_FATAL_REPORT_EN_0, _ADDRTYPEERR, _DISABLE); + + report_nonfatal = + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _REQCONTEXTMISMATCHERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _ACLFAIL, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NCISOC_HDR_ECC_LIMIT_ERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _ADDRBOUNDSERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RIDTABCFGERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RLANTABCFGERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _REMAPTAB_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RIDTAB_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RLANTAB_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _ADDRTYPEERR, _ENABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _CMDDECODEERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NCISOC_HDR_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _INVALIDVCSET, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _REMAPTAB_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RIDTAB_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RLANTAB_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_INGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NCISOC_PARITY_ERR, _DISABLE); + + contain = + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _REQCONTEXTMISMATCHERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _ACLFAIL, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _NCISOC_HDR_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _ADDRBOUNDSERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _RIDTABCFGERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _RLANTABCFGERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _REMAPTAB_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _RIDTAB_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _RLANTAB_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _ADDRTYPEERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _CMDDECODEERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _NCISOC_HDR_ECC_DBE_ERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _INVALIDVCSET, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _REMAPTAB_ECC_DBE_ERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _RIDTAB_ECC_DBE_ERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _RLANTAB_ECC_DBE_ERR, __PROD) | + DRF_DEF(_INGRESS, _ERR_CONTAIN_EN_0, _NCISOC_PARITY_ERR, __PROD); + + enable = report_fatal | report_nonfatal; + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _INGRESS, _ERR_LOG_EN_0, enable); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _INGRESS, _ERR_FATAL_REPORT_EN_0, report_fatal); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0, report_nonfatal); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _INGRESS, _ERR_CORRECTABLE_REPORT_EN_0, 0); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _INGRESS, _ERR_CONTAIN_EN_0, contain); + + chip_device->intr_mask.ingress.fatal = report_fatal; + chip_device->intr_mask.ingress.nonfatal = report_nonfatal; +} + +static void +_nvswitch_initialize_egress_interrupts +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 enable; + NvU32 report_fatal; + NvU32 report_nonfatal; + NvU32 contain; + + report_fatal = + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _EGRESSBUFERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _PKTROUTEERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _SEQIDERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NXBAR_HDR_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NXBAR_HDR_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _RAM_OUT_HDR_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _RAM_OUT_HDR_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NCISOCCREDITOVFL, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _REQTGTIDMISMATCHERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _RSPREQIDMISMATCHERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _URRSPERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _PRIVRSPERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _HWRSPERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NXBAR_HDR_PARITY_ERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NCISOC_CREDIT_PARITY_ERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _NXBAR_FLITTYPE_MISMATCH_ERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_FATAL_REPORT_EN_0, _CREDIT_TIME_OUT_ERR, _ENABLE); + + report_nonfatal = + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _EGRESSBUFERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _PKTROUTEERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _SEQIDERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NXBAR_HDR_ECC_LIMIT_ERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NXBAR_HDR_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RAM_OUT_HDR_ECC_LIMIT_ERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RAM_OUT_HDR_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NCISOCCREDITOVFL, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _REQTGTIDMISMATCHERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _RSPREQIDMISMATCHERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _URRSPERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _PRIVRSPERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _HWRSPERR, _ENABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NXBAR_HDR_PARITY_ERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NCISOC_CREDIT_PARITY_ERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _NXBAR_FLITTYPE_MISMATCH_ERR, _DISABLE) | + DRF_DEF(_EGRESS, _ERR_NON_FATAL_REPORT_EN_0, _CREDIT_TIME_OUT_ERR, _DISABLE); + + contain = + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _EGRESSBUFERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _PKTROUTEERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _SEQIDERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _NXBAR_HDR_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _NXBAR_HDR_ECC_DBE_ERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _RAM_OUT_HDR_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _RAM_OUT_HDR_ECC_DBE_ERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _NCISOCCREDITOVFL, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _REQTGTIDMISMATCHERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _RSPREQIDMISMATCHERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _URRSPERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _PRIVRSPERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _HWRSPERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _NXBAR_HDR_PARITY_ERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _NCISOC_CREDIT_PARITY_ERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _NXBAR_FLITTYPE_MISMATCH_ERR, __PROD) | + DRF_DEF(_EGRESS, _ERR_CONTAIN_EN_0, _CREDIT_TIME_OUT_ERR, __PROD); + + enable = report_fatal | report_nonfatal; + + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _ERR_LOG_EN_0, enable); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _ERR_FATAL_REPORT_EN_0, report_fatal); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0, report_nonfatal); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _ERR_CORRECTABLE_REPORT_EN_0, 0); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _ERR_CONTAIN_EN_0, contain); + + chip_device->intr_mask.egress.fatal = report_fatal; + chip_device->intr_mask.egress.nonfatal = report_nonfatal; +} + +static void +_nvswitch_initialize_tstate_interrupts +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 enable; + NvU32 report_fatal; + NvU32 report_nonfatal; + NvU32 contain; + + // TD_TID errors are disbaled on both fatal & non-fatal trees since TD_TID RAM is no longer used. + + report_fatal = + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _TAGPOOLBUFERR, _ENABLE) | + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _TAGPOOL_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _TAGPOOL_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _CRUMBSTOREBUFERR, _ENABLE) | + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _CRUMBSTORE_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _CRUMBSTORE_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _TD_TID_RAMBUFERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _TD_TID_RAM_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _TD_TID_RAM_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _ATO_ERR, _ENABLE) | + DRF_DEF(_TSTATE, _ERR_FATAL_REPORT_EN_0, _CAMRSP_ERR, _ENABLE); + + report_nonfatal = + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _TAGPOOLBUFERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _TAGPOOL_ECC_LIMIT_ERR, _ENABLE) | + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _TAGPOOL_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _CRUMBSTOREBUFERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _CRUMBSTORE_ECC_LIMIT_ERR, _ENABLE) | + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _CRUMBSTORE_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _TD_TID_RAMBUFERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _TD_TID_RAM_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _TD_TID_RAM_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _ATO_ERR, _DISABLE) | + DRF_DEF(_TSTATE, _ERR_NON_FATAL_REPORT_EN_0, _CAMRSP_ERR, _DISABLE); + + contain = + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _TAGPOOLBUFERR, __PROD) | + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _TAGPOOL_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _TAGPOOL_ECC_DBE_ERR, __PROD) | + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _CRUMBSTOREBUFERR, __PROD) | + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _CRUMBSTORE_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _CRUMBSTORE_ECC_DBE_ERR, __PROD) | + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _TD_TID_RAMBUFERR, __PROD) | + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _TD_TID_RAM_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _TD_TID_RAM_ECC_DBE_ERR, __PROD) | + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _ATO_ERR, __PROD) | + DRF_DEF(_TSTATE, _ERR_CONTAIN_EN_0, _CAMRSP_ERR, __PROD); + + enable = report_fatal | report_nonfatal; + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ERR_LOG_EN_0, enable); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ERR_FATAL_REPORT_EN_0, report_fatal); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0, report_nonfatal); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ERR_CORRECTABLE_REPORT_EN_0, 0); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ERR_CONTAIN_EN_0, contain); + + chip_device->intr_mask.tstate.fatal = report_fatal; + chip_device->intr_mask.tstate.nonfatal = report_nonfatal; +} + +static void +_nvswitch_initialize_sourcetrack_interrupts +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 enable; + NvU32 report_fatal; + NvU32 report_nonfatal; + NvU32 contain; + + report_fatal = + DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR, _ENABLE) | + DRF_DEF(_SOURCETRACK, _ERR_FATAL_REPORT_EN_0, _SOURCETRACK_TIME_OUT_ERR, _ENABLE); + + report_nonfatal = + DRF_DEF(_SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, _ENABLE) | + DRF_DEF(_SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, _CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR, _DISABLE) | + DRF_DEF(_SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, _CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR, _ENABLE) | + DRF_DEF(_SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, _CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, _CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR, _DISABLE) | + DRF_DEF(_SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, _SOURCETRACK_TIME_OUT_ERR, _DISABLE); + + contain = + DRF_DEF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _CREQ_TCEN0_TD_CRUMBSTORE_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR, __PROD) | + DRF_DEF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, __PROD) | + DRF_DEF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _CREQ_TCEN0_TD_CRUMBSTORE_ECC_DBE_ERR, __PROD) | + DRF_DEF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR, __PROD) | + DRF_DEF(_SOURCETRACK, _ERR_CONTAIN_EN_0, _SOURCETRACK_TIME_OUT_ERR, __PROD); + + enable = report_fatal | report_nonfatal; + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _ERR_LOG_EN_0, enable); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _ERR_FATAL_REPORT_EN_0, report_fatal); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, report_nonfatal); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _ERR_CORRECTABLE_REPORT_EN_0, 0); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _ERR_CONTAIN_EN_0, contain); + + chip_device->intr_mask.sourcetrack.fatal = report_fatal; + chip_device->intr_mask.sourcetrack.nonfatal = report_nonfatal; + +} + +void +_nvswitch_initialize_nport_interrupts +( + nvswitch_device *device +) +{ + NvU32 val; + + val = + DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 1) | + DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 1) | + DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 1); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _ERR_CONTROL_COMMON_NPORT, val); + + _nvswitch_initialize_route_interrupts(device); + _nvswitch_initialize_ingress_interrupts(device); + _nvswitch_initialize_egress_interrupts(device); + _nvswitch_initialize_tstate_interrupts(device); + _nvswitch_initialize_sourcetrack_interrupts(device); +} + +static void +_nvswitch_initialize_saw_interrupts +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + + NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _INTR_EN_SET_CORRECTABLE, chip_device->intr_enable_corr); + NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _INTR_EN_SET_FATAL, chip_device->intr_enable_fatal); + NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _INTR_EN_SET_NONFATAL, chip_device->intr_enable_nonfatal); +} + +/* + * Initialize interrupt tree HW for all units. + * + * Init and servicing both depend on bits matching across STATUS/MASK + * and IErr STATUS/LOG/REPORT/CONTAIN registers. + */ +void +nvswitch_initialize_interrupt_tree_lr10 +( + nvswitch_device *device +) +{ + _nvswitch_build_top_interrupt_mask_lr10(device); + + // Initialize legacy interrupt tree - depends on reset to disable + // unused interrupts + NVSWITCH_REG_WR32(device, _PBUS, _INTR_0, 0xffffffff); + + // Clear prior saved PRI error data + NVSWITCH_REG_WR32(device, _PBUS, _PRI_TIMEOUT_SAVE_0, + DRF_DEF(_PBUS, _PRI_TIMEOUT_SAVE_0, _TO, _CLEAR)); + + NVSWITCH_REG_WR32(device, _PBUS, _INTR_EN_0, + DRF_DEF(_PBUS, _INTR_EN_0, _PRI_SQUASH, _ENABLED) | + DRF_DEF(_PBUS, _INTR_EN_0, _PRI_FECSERR, _ENABLED) | + DRF_DEF(_PBUS, _INTR_EN_0, _PRI_TIMEOUT, _ENABLED) | + DRF_DEF(_PBUS, _INTR_EN_0, _SW, _ENABLED)); + + // SAW block + _nvswitch_initialize_saw_interrupts(device); + + // NPG/NPORT + _nvswitch_initialize_nport_interrupts(device); + + // NVLIPT interrupts + _nvswitch_initialize_nvlipt_interrupts_lr10(device); + + // NXBAR interrupts + _nvswitch_initialize_nxbar_interrupts(device); +} + +/* + * @brief Service MINION Falcon interrupts on the requested interrupt tree + * Falcon Interrupts are a little in unqiue in how they are handled:#include + * IRQSTAT is used to read in interrupt status from FALCON + * IRQMASK is used to read in mask of interrupts + * IRQDEST is used to read in enabled interrupts that are routed to the HOST + * + * IRQSTAT & IRQMASK gives the pending interrupting on this minion + * + * @param[in] device MINION on this device + * @param[in] instance MINION instance + * + */ +NvlStatus +nvswitch_minion_service_falcon_interrupts_lr10 +( + nvswitch_device *device, + NvU32 instance +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, unhandled, intr, link; + + link = instance * NVSWITCH_LINKS_PER_MINION; + report.raw_pending = NVSWITCH_MINION_RD32_LR10(device, instance, _CMINION, _FALCON_IRQSTAT); + report.raw_enable = chip_device->intr_minion_dest; + report.mask = NVSWITCH_MINION_RD32_LR10(device, instance, _CMINION, _FALCON_IRQMASK); + + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _WDTMR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_MINION_WATCHDOG, "MINION Watchdog timer ran out", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _HALT, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_MINION_HALT, "MINION HALT", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _EXTERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_MINION_EXTERR, "MINION EXTERR", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _SWGEN0, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_PRINT(device, INFO, + "%s: Received MINION Falcon SWGEN0 interrupt on MINION %d.\n", + __FUNCTION__, instance); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_CMINION_FALCON, _IRQSTAT, _SWGEN1, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_PRINT(device, INFO, + "%s: Received MINION Falcon SWGEN1 interrupt on MINION %d.\n", + __FUNCTION__, instance); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (device->link[link].fatal_error_occurred) + { + intr = NVSWITCH_MINION_RD32_LR10(device, instance, _MINION, _MINION_INTR_STALL_EN); + intr = FLD_SET_DRF(_MINION, _MINION_INTR_STALL_EN, _FATAL, _DISABLE, intr); + intr = FLD_SET_DRF(_MINION, _MINION_INTR_STALL_EN, _FALCON_STALL, _DISABLE, intr); + NVSWITCH_MINION_WR32_LR10(device, instance, _MINION, _MINION_INTR_STALL_EN, intr); + } + + // Write to IRQSCLR to clear status of interrupt + NVSWITCH_MINION_WR32_LR10(device, instance, _CMINION, _FALCON_IRQSCLR, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +// +// Check if there are interrupts pending. +// +// On silicon/emulation we only use MSIs which are not shared, so this +// function does not need to be called. +// +// FSF/RTMsim does not model interrupts correctly. The interrupt is shared +// with USB so we must check the HW status. In addition we must disable +// interrupts to run the interrupt thread. On silicon this is done +// automatically in XVE. +// +// This is called in the ISR context by the Linux driver. The WAR does +// access more of device outside the Linux mutex than it should. Sim only +// supports 1 device currently so these fields are safe while interrupts +// are enabled. +// +// TODO: Bug 1881361 to remove the FSF WAR +// +NvlStatus +nvswitch_lib_check_interrupts_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 saw_legacy_intr_enable = 0; + NvU32 pending; + + if (IS_RTLSIM(device) || IS_FMODEL(device)) + { + pending = NVSWITCH_REG_RD32(device, _PSMC, _INTR_LEGACY); + pending &= chip_device->intr_enable_legacy; + if (pending) + { + NVSWITCH_PRINT(device, WARN, + "%s: _PSMC, _INTR_LEGACY pending (0x%0x)\n", + __FUNCTION__, pending); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + if (FLD_TEST_DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _PTIMER, 1, chip_device->intr_enable_legacy)) + { + saw_legacy_intr_enable = FLD_SET_DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_LEGACY, _PTIMER_0, 1, saw_legacy_intr_enable); + saw_legacy_intr_enable = FLD_SET_DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_LEGACY, _PTIMER_1, 1, saw_legacy_intr_enable); + } + if (FLD_TEST_DRF_NUM(_PSMC, _INTR_EN_SET_LEGACY, _PMGR, 1, chip_device->intr_enable_legacy)) + { + saw_legacy_intr_enable = FLD_SET_DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_LEGACY, _PMGR_0, 1, saw_legacy_intr_enable); + saw_legacy_intr_enable = FLD_SET_DRF_NUM(_NVLSAW_NVSPMC, _INTR_EN_SET_LEGACY, _PMGR_1, 1, saw_legacy_intr_enable); + } + + pending = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _INTR_LEGACY); + pending &= saw_legacy_intr_enable; + if (pending) + { + NVSWITCH_PRINT(device, WARN, + "%s: _NVLSAW_NVSPMC, _INTR_LEGACY pending (0x%0x)\n", + __FUNCTION__, pending); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + // Fatal Interrupts + pending = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _INTR_FATAL); + pending &= chip_device->intr_enable_fatal; + if (pending) + { + NVSWITCH_PRINT(device, WARN, + "%s: _NVLSAW_NVSPMC, _INTR_FATAL pending (0x%0x)\n", + __FUNCTION__, pending); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + // Non-Fatal interrupts + pending = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _INTR_NONFATAL); + pending &= chip_device->intr_enable_nonfatal; + if (pending) + { + NVSWITCH_PRINT(device, WARN, + "%s: _NVLSAW_NVSPMC, _INTR_NONFATAL pending (0x%0x)\n", + __FUNCTION__, pending); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; + } + else + { + return -NVL_MORE_PROCESSING_REQUIRED; + } +} + +/* + * The MSI interrupt block must be re-armed after servicing interrupts. This + * write generates an EOI, which allows further MSIs to be triggered. + */ +static void +_nvswitch_rearm_msi_lr10 +( + nvswitch_device *device +) +{ + NVSWITCH_ENG_WR32_LR10(device, XVE, , 0, _XVE_CYA, _2, 0xff); +} + +static NvlStatus +_nvswitch_service_pbus_lr10 +( + nvswitch_device *device +) +{ + NvU32 pending, mask, bit, unhandled; + NvU32 save0, save1, save3, errCode; + NVSWITCH_PRI_TIMEOUT_ERROR_LOG_TYPE pri_timeout = { 0 }; + + pending = NVSWITCH_REG_RD32(device, _PBUS, _INTR_0); + mask = NVSWITCH_REG_RD32(device, _PBUS, _INTR_EN_0); + pending &= mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + bit = DRF_DEF(_PBUS, _INTR_0, _PRI_SQUASH, _PENDING) | + DRF_DEF(_PBUS, _INTR_0, _PRI_FECSERR, _PENDING) | + DRF_DEF(_PBUS, _INTR_0, _PRI_TIMEOUT, _PENDING); + + if (nvswitch_test_flags(pending, bit)) + { + // PRI timeout is likely not recoverable + NVSWITCH_REG_WR32(device, _PBUS, _INTR_0, + DRF_DEF(_PBUS, _INTR_0, _PRI_TIMEOUT, _RESET)); + + save0 = NVSWITCH_REG_RD32(device, _PBUS, _PRI_TIMEOUT_SAVE_0); + save1 = NVSWITCH_REG_RD32(device, _PBUS, _PRI_TIMEOUT_SAVE_1); + save3 = NVSWITCH_REG_RD32(device, _PBUS, _PRI_TIMEOUT_SAVE_3); + errCode = NVSWITCH_REG_RD32(device, _PBUS, _PRI_TIMEOUT_FECS_ERRCODE); + + pri_timeout.addr = DRF_VAL(_PBUS, _PRI_TIMEOUT_SAVE_0, _ADDR, save0) * 4; + pri_timeout.data = DRF_VAL(_PBUS, _PRI_TIMEOUT_SAVE_1, _DATA, save1); + pri_timeout.write = DRF_VAL(_PBUS, _PRI_TIMEOUT_SAVE_0, _WRITE, save0); + pri_timeout.dest = DRF_VAL(_PBUS, _PRI_TIMEOUT_SAVE_0, _TO, save0); + pri_timeout.subId = DRF_VAL(_PBUS, _PRI_TIMEOUT_SAVE_3, _SUBID, save3); + pri_timeout.errCode = DRF_VAL(_PBUS, _PRI_TIMEOUT_FECS_ERRCODE, _DATA, errCode); + + // Dump register values as well + pri_timeout.raw_data[0] = save0; + pri_timeout.raw_data[1] = save1; + pri_timeout.raw_data[2] = save3; + pri_timeout.raw_data[3] = errCode; + + NVSWITCH_PRINT(device, ERROR, + "PBUS PRI error: %s offset: 0x%x data: 0x%x to: %d, " + "subId: 0x%x, FECS errCode: 0x%x\n", + pri_timeout.write ? "write" : "read", + pri_timeout.addr, + pri_timeout.data, + pri_timeout.dest, + pri_timeout.subId, + pri_timeout.errCode); + + if (FLD_TEST_DRF(_PBUS, _INTR_0, _PRI_SQUASH, _PENDING, bit)) + { + NVSWITCH_REPORT_PRI_ERROR_NONFATAL(_HW_HOST_PRIV_TIMEOUT, "PBUS PRI SQUASH error", NVSWITCH_PBUS_PRI_SQUASH, 0, pri_timeout); + NVSWITCH_PRINT(device, ERROR, "PRI_SQUASH: " + "PBUS PRI error due to pri access while target block is in reset\n"); + } + + if (FLD_TEST_DRF(_PBUS, _INTR_0, _PRI_FECSERR, _PENDING, bit)) + { + NVSWITCH_REPORT_PRI_ERROR_NONFATAL(_HW_HOST_PRIV_TIMEOUT, "PBUS PRI FECSERR error", NVSWITCH_PBUS_PRI_FECSERR, 0, pri_timeout); + NVSWITCH_PRINT(device, ERROR, "PRI_FECSERR: " + "FECS detected the error while processing a PRI request\n"); + } + + if (FLD_TEST_DRF(_PBUS, _INTR_0, _PRI_TIMEOUT, _PENDING, bit)) + { + NVSWITCH_REPORT_PRI_ERROR_NONFATAL(_HW_HOST_PRIV_TIMEOUT, "PBUS PRI TIMEOUT error", NVSWITCH_PBUS_PRI_TIMEOUT, 0, pri_timeout); + NVSWITCH_PRINT(device, ERROR, "PRI_TIMEOUT: " + "PBUS PRI error due non-existent host register or timeout waiting for FECS\n"); + } + + // allow next error to latch + NVSWITCH_REG_WR32(device, _PBUS, _PRI_TIMEOUT_SAVE_0, + FLD_SET_DRF(_PBUS, _PRI_TIMEOUT_SAVE_0, _TO, _CLEAR, save0)); + + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_DEF(_PBUS, _INTR_0, _SW, _PENDING); + if (nvswitch_test_flags(pending, bit)) + { + // Useful for debugging SW interrupts + NVSWITCH_PRINT(device, INFO, "SW intr\n"); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_REG_WR32(device, _PBUS, _INTR_0, pending); // W1C with _RESET + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_priv_ring_lr10 +( + nvswitch_device *device +) +{ + NvU32 pending, i; + NVSWITCH_PRI_ERROR_LOG_TYPE pri_error; + + pending = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_INTERRUPT_STATUS0); + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + if (FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, + _GBL_WRITE_ERROR_SYS, 1, pending)) + { + pri_error.addr = NVSWITCH_REG_RD32(device, _PPRIV_SYS, _PRIV_ERROR_ADR); + pri_error.data = NVSWITCH_REG_RD32(device, _PPRIV_SYS, _PRIV_ERROR_WRDAT); + pri_error.info = NVSWITCH_REG_RD32(device, _PPRIV_SYS, _PRIV_ERROR_INFO); + pri_error.code = NVSWITCH_REG_RD32(device, _PPRIV_SYS, _PRIV_ERROR_CODE); + + NVSWITCH_REPORT_PRI_ERROR_NONFATAL(_HW_HOST_PRIV_ERROR, "PRI WRITE SYS error", NVSWITCH_PPRIV_WRITE_SYS, 0, pri_error); + + NVSWITCH_PRINT(device, ERROR, + "SYS PRI write error addr: 0x%08x data: 0x%08x info: 0x%08x code: 0x%08x\n", + pri_error.addr, pri_error.data, + pri_error.info, pri_error.code); + + pending = FLD_SET_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, + _GBL_WRITE_ERROR_SYS, 0, pending); + } + + for (i = 0; i < NVSWITCH_NUM_PRIV_PRT_LR10; i++) + { + if (DRF_VAL(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, + _GBL_WRITE_ERROR_FBP, pending) & NVBIT(i)) + { + pri_error.addr = NVSWITCH_REG_RD32(device, _PPRIV_PRT_PRT, _PRIV_ERROR_ADR(i)); + pri_error.data = NVSWITCH_REG_RD32(device, _PPRIV_PRT_PRT, _PRIV_ERROR_WRDAT(i)); + pri_error.info = NVSWITCH_REG_RD32(device, _PPRIV_PRT_PRT, _PRIV_ERROR_INFO(i)); + pri_error.code = NVSWITCH_REG_RD32(device, _PPRIV_PRT_PRT, _PRIV_ERROR_CODE(i)); + + NVSWITCH_REPORT_PRI_ERROR_NONFATAL(_HW_HOST_PRIV_ERROR, "PRI WRITE PRT error", NVSWITCH_PPRIV_WRITE_PRT, i, pri_error); + + NVSWITCH_PRINT(device, ERROR, + "PRT%d PRI write error addr: 0x%08x data: 0x%08x info: 0x%08x code: 0x%08x\n", + i, pri_error.addr, pri_error.data, pri_error.info, pri_error.code); + + pending &= ~DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, + _GBL_WRITE_ERROR_FBP, NVBIT(i)); + } + } + + if (pending != 0) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_PRIV_ERROR, + "Fatal, Unexpected PRI error\n"); + NVSWITCH_LOG_FATAL_DATA(device, _HW, _HW_HOST_PRIV_ERROR, 2, 0, NV_FALSE, &pending); + + NVSWITCH_PRINT(device, ERROR, + "Unexpected PRI error 0x%08x\n", pending); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + // TODO reset the priv ring like GPU driver? + + // acknowledge the interrupt to the ringmaster + nvswitch_ring_master_cmd_lr10(device, + DRF_DEF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _ACK_INTERRUPT)); + + return NVL_SUCCESS; +} + +static void +_nvswitch_save_route_err_header_lr10 +( + nvswitch_device *device, + NvU32 link, + NVSWITCH_RAW_ERROR_LOG_TYPE *data +) +{ + NvU32 val; + NvU32 i = 0; + + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_TIMESTAMP_LOG); + val = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_VALID); + + if (FLD_TEST_DRF_NUM(_ROUTE, _ERR_HEADER_LOG_VALID, _HEADERVALID0, 1, val)) + { + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_MISC_LOG_0); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_0); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_1); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_2); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_3); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_4); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_5); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_6); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_7); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_8); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_9); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_HEADER_LOG_10); + } +} + +static NvlStatus +_nvswitch_service_route_fatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, contain, unhandled; + NVSWITCH_RAW_ERROR_LOG_TYPE data = {{ 0 }}; + INFOROM_NVS_ECC_ERROR_EVENT err_event = {0}; + + report.raw_pending = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & chip_device->intr_mask.route.fatal; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + report.raw_first = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_FIRST_0); + contain = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_CONTAIN_EN_0); + _nvswitch_save_route_err_header_lr10(device, link, &data); + + bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _ROUTEBUFERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_ROUTEBUFERR, "route buffer over/underflow", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_ROUTEBUFERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _GLT_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, + _ERR_GLT_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_ROUTE_ERR_GLT, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID, + addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, + _ERR_GLT_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_GLT_ECC_DBE_ERR, "route GLT DBE", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_GLT_ECC_DBE_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_ROUTE_GLT_ECC_DBE_ERR, link, bAddressValid, + address, NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _TRANSDONERESVERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_TRANSDONERESVERR, "route transdone over/underflow", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_TRANSDONERESVERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _PDCTRLPARERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_PDCTRLPARERR, "route parity", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_PDCTRLPARERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _NVS_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_NVS_ECC_DBE_ERR, "route incoming DBE", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_NVS_ECC_DBE_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_ROUTE_NVS_ECC_DBE_ERR, link, NV_FALSE, 0, + NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + + // Clear associated LIMIT_ERR interrupt + if (report.raw_pending & DRF_NUM(_ROUTE, _ERR_STATUS_0, _NVS_ECC_LIMIT_ERR, 1)) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_STATUS_0, + DRF_NUM(_ROUTE, _ERR_STATUS_0, _NVS_ECC_LIMIT_ERR, 1)); + } + } + + bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _CDTPARERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_ROUTE_CDTPARERR, "route credit parity", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_ROUTE_CDTPARERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_ROUTE_CDTPARERR, link, NV_FALSE, 0, + NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_FIRST_0, + report.raw_first & report.mask); + } + NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_route_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, unhandled; + NVSWITCH_RAW_ERROR_LOG_TYPE data = {{ 0 }}; + INFOROM_NVS_ECC_ERROR_EVENT err_event = {0}; + + report.raw_pending = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & chip_device->intr_mask.route.nonfatal; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_FIRST_0); + _nvswitch_save_route_err_header_lr10(device, link, &data); + + bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _NOPORTDEFINEDERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_NOPORTDEFINEDERR, "route undefined route"); + NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_NOPORTDEFINEDERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _INVALIDROUTEPOLICYERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_INVALIDROUTEPOLICYERR, "route invalid policy"); + NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_INVALIDROUTEPOLICYERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_ROUTE, _ERR_STATUS_0, _NVS_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + // Ignore LIMIT error if DBE is pending + if (!(nvswitch_test_flags(report.raw_pending, + DRF_NUM(_ROUTE, _ERR_STATUS_0, _NVS_ECC_DBE_ERR, 1)))) + { + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER); + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_ROUTE_NVS_ECC_LIMIT_ERR, "route incoming ECC limit"); + NVSWITCH_REPORT_DATA(_HW_NPORT_ROUTE_NVS_ECC_LIMIT_ERR, data); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_ROUTE_NVS_ECC_LIMIT_ERR, link, NV_FALSE, 0, + NV_FALSE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_STATUS_0, pending); + + // + // Note, when traffic is flowing, if we reset ERR_COUNT before ERR_STATUS + // register, we won't see an interrupt again until counter wraps around. + // In that case, we will miss writing back many ECC victim entries. Hence, + // always clear _ERR_COUNT only after _ERR_STATUS register is cleared! + // + NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER, 0x0); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +// +// Ingress +// + +static void +_nvswitch_save_ingress_err_header_lr10 +( + nvswitch_device *device, + NvU32 link, + NVSWITCH_RAW_ERROR_LOG_TYPE *data +) +{ + NvU32 val; + NvU32 i = 0; + + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_TIMESTAMP_LOG); + + val = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_VALID); + if (FLD_TEST_DRF_NUM(_INGRESS, _ERR_HEADER_LOG_VALID, _HEADERVALID0, 1, val)) + { + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_MISC_LOG_0); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_0); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_1); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_2); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_3); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_4); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_5); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_6); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_7); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_8); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_9); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_HEADER_LOG_10); + } + else + { + data->data[i++] = 0xdeadbeef; + } +} + +static NvlStatus +_nvswitch_service_ingress_fatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, contain, unhandled; + NVSWITCH_RAW_ERROR_LOG_TYPE data = {{ 0 }}; + INFOROM_NVS_ECC_ERROR_EVENT err_event = {0}; + + report.raw_pending = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & chip_device->intr_mask.ingress.fatal; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_FIRST_0); + contain = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_CONTAIN_EN_0); + _nvswitch_save_ingress_err_header_lr10(device, link, &data); + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _CMDDECODEERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_CMDDECODEERR, "ingress invalid command", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_CMDDECODEERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_HDR_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER); + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_NCISOC_HDR_ECC_DBE_ERR, "ingress header DBE", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_NCISOC_HDR_ECC_DBE_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_INGRESS_NCISOC_HDR_ECC_DBE_ERR, link, NV_FALSE, 0, + NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + + // Clear associated LIMIT_ERR interrupt + if (report.raw_pending & DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_HDR_ECC_LIMIT_ERR, 1)) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_STATUS_0, + DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_HDR_ECC_LIMIT_ERR, 1)); + } + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _INVALIDVCSET, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_INVALIDVCSET, "ingress invalid VCSet", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_INVALIDVCSET, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _REMAPTAB_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, + _ERR_REMAPTAB_ECC_ERROR_ADDRESS); + + if (FLD_TEST_DRF(_INGRESS_ERR_REMAPTAB, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID, + addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, + _ERR_REMAPTAB_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_REMAPTAB_ECC_ERROR_COUNTER); + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_REMAPTAB_ECC_ERROR_ADDRESS); + report.data[2] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_REMAPTAB_ECC_ERROR_ADDRESS_VALID); + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_REMAPTAB_ECC_DBE_ERR, "ingress Remap DBE", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_REMAPTAB_ECC_DBE_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_INGRESS_REMAPTAB_ECC_DBE_ERR, link, bAddressValid, + address, NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _RIDTAB_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, + _ERR_RIDTAB_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_INGRESS_ERR_RIDTAB, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID, + addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, + _ERR_RIDTAB_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_RIDTAB_ECC_ERROR_COUNTER); + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_RIDTAB_ECC_ERROR_ADDRESS); + report.data[2] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_RIDTAB_ECC_ERROR_ADDRESS_VALID); + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_RIDTAB_ECC_DBE_ERR, "ingress RID DBE", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_RIDTAB_ECC_DBE_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_INGRESS_RIDTAB_ECC_DBE_ERR, link, bAddressValid, + address, NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _RLANTAB_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, + _ERR_RLANTAB_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_INGRESS_ERR_RLANTAB, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID, + addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, + _ERR_RLANTAB_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_RLANTAB_ECC_ERROR_COUNTER); + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_RLANTAB_ECC_ERROR_ADDRESS); + report.data[2] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_RLANTAB_ECC_ERROR_ADDRESS_VALID); + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_RLANTAB_ECC_DBE_ERR, "ingress RLAN DBE", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_RLANTAB_ECC_DBE_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_INGRESS_RLANTAB_ECC_DBE_ERR, link, bAddressValid, + address, NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_PARITY_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_INGRESS_NCISOC_PARITY_ERR, "ingress control parity", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_INGRESS_NCISOC_PARITY_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_INGRESS_NCISOC_PARITY_ERR, link, NV_FALSE, 0, + NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_ingress_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, unhandled; + NVSWITCH_RAW_ERROR_LOG_TYPE data = {{ 0 }}; + INFOROM_NVS_ECC_ERROR_EVENT err_event = {0}; + + report.raw_pending = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & chip_device->intr_mask.ingress.nonfatal; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_FIRST_0); + _nvswitch_save_ingress_err_header_lr10(device, link, &data); + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _REQCONTEXTMISMATCHERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_REQCONTEXTMISMATCHERR, "ingress request context mismatch"); + NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_REQCONTEXTMISMATCHERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _ACLFAIL, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_ACLFAIL, "ingress invalid ACL"); + NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_ACLFAIL, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_HDR_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + // Ignore LIMIT error if DBE is pending + if (!(nvswitch_test_flags(report.raw_pending, + DRF_NUM(_INGRESS, _ERR_STATUS_0, _NCISOC_HDR_ECC_DBE_ERR, 1)))) + { + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER); + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_NCISOC_HDR_ECC_LIMIT_ERR, "ingress header ECC"); + NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_NCISOC_HDR_ECC_LIMIT_ERR, data); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_INGRESS_NCISOC_HDR_ECC_LIMIT_ERR, link, NV_FALSE, 0, + NV_FALSE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _ADDRBOUNDSERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_ADDRBOUNDSERR, "ingress address bounds"); + NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_ADDRBOUNDSERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _RIDTABCFGERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_RIDTABCFGERR, "ingress RID packet"); + NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_RIDTABCFGERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _RLANTABCFGERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_RLANTABCFGERR, "ingress RLAN packet"); + NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_RLANTABCFGERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_INGRESS, _ERR_STATUS_0, _ADDRTYPEERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_INGRESS_ADDRTYPEERR, "ingress illegal address"); + NVSWITCH_REPORT_DATA(_HW_NPORT_INGRESS_ADDRTYPEERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +// +// Egress +// + +static void +_nvswitch_save_egress_err_header_lr10 +( + nvswitch_device *device, + NvU32 link, + NVSWITCH_RAW_ERROR_LOG_TYPE *data +) +{ + NvU32 val; + NvU32 i = 0; + + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_TIMESTAMP_LOG); + + val = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_VALID); + if (FLD_TEST_DRF_NUM(_EGRESS, _ERR_HEADER_LOG_VALID, _HEADERVALID0, 1, val)) + { + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_MISC_LOG_0); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_0); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_1); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_2); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_3); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_4); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_5); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_6); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_7); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_8); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_9); + data->data[i++] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_HEADER_LOG_10); + } + else + { + data->data[i++] = 0xdeadbeef; + } +} + +static NvlStatus +_nvswitch_service_tstate_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, unhandled; + NVSWITCH_RAW_ERROR_LOG_TYPE data = {{ 0 }}; + INFOROM_NVS_ECC_ERROR_EVENT err_event = {0}; + + report.raw_pending = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & chip_device->intr_mask.tstate.nonfatal; + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_MISC_LOG_0); + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_FIRST_0); + + bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + // Ignore LIMIT error if DBE is pending + if(!(nvswitch_test_flags(report.raw_pending, + DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_DBE_ERR, 1)))) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, + _ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_TSTATE_ERR_TAGPOOL, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID, + addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, + _ERR_TAGPOOL_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER); + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, + DRF_DEF(_TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT)); + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_TSTATE_TAGPOOL_ECC_LIMIT_ERR, "TS tag store single-bit threshold"); + _nvswitch_save_egress_err_header_lr10(device, link, &data); + NVSWITCH_REPORT_DATA(_HW_NPORT_TSTATE_TAGPOOL_ECC_LIMIT_ERR, data); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_TSTATE_TAGPOOL_ECC_LIMIT_ERR, link, + bAddressValid, address, NV_FALSE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + // Ignore LIMIT error if DBE is pending + if(!(nvswitch_test_flags(report.raw_pending, + DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_DBE_ERR, 1)))) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, + _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_TSTATE_ERR_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID, + addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, + _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER); + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, + DRF_DEF(_TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT)); + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_TSTATE_CRUMBSTORE_ECC_LIMIT_ERR, "TS crumbstore single-bit threshold"); + _nvswitch_save_ingress_err_header_lr10(device, link, &data); + NVSWITCH_REPORT_DATA(_HW_NPORT_TSTATE_CRUMBSTORE_ECC_LIMIT_ERR, data); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_TSTATE_CRUMBSTORE_ECC_LIMIT_ERR, link, + bAddressValid, address, NV_FALSE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_tstate_fatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, contain, unhandled; + NVSWITCH_RAW_ERROR_LOG_TYPE data = {{ 0 }}; + INFOROM_NVS_ECC_ERROR_EVENT err_event = {0}; + + report.raw_pending = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & chip_device->intr_mask.tstate.fatal; + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_MISC_LOG_0); + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_FIRST_0); + contain = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_CONTAIN_EN_0); + + bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOLBUFERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_TAGPOOLBUFERR, "TS pointer crossover", NV_FALSE); + _nvswitch_save_egress_err_header_lr10(device, link, &data); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_TSTATE_TAGPOOLBUFERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, + _ERR_TAGPOOL_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_TSTATE_ERR_TAGPOOL, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID, + addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, + _ERR_TAGPOOL_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER); + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, + DRF_DEF(_TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT)); + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_TAGPOOL_ECC_DBE_ERR, "TS tag store fatal ECC", NV_FALSE); + _nvswitch_save_egress_err_header_lr10(device, link, &data); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_TSTATE_TAGPOOL_ECC_DBE_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_TSTATE_TAGPOOL_ECC_DBE_ERR, link, bAddressValid, + address, NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + + // Clear associated LIMIT_ERR interrupt + if (report.raw_pending & DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1)) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_STATUS_0, + DRF_NUM(_TSTATE, _ERR_STATUS_0, _TAGPOOL_ECC_LIMIT_ERR, 1)); + } + } + + bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTOREBUFERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_CRUMBSTOREBUFERR, "TS crumbstore", NV_FALSE); + _nvswitch_save_egress_err_header_lr10(device, link, &data); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_TSTATE_CRUMBSTOREBUFERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, + _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_TSTATE_ERR_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID, + addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, + _ERR_CRUMBSTORE_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER); + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, + DRF_DEF(_TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, _INIT)); + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_CRUMBSTORE_ECC_DBE_ERR, "TS crumbstore fatal ECC", NV_FALSE); + _nvswitch_save_ingress_err_header_lr10(device, link, &data); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_TSTATE_CRUMBSTORE_ECC_DBE_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_TSTATE_CRUMBSTORE_ECC_DBE_ERR, link, bAddressValid, + address, NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + + // Clear associated LIMIT_ERR interrupt + if (report.raw_pending & DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1)) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_STATUS_0, + DRF_NUM(_TSTATE, _ERR_STATUS_0, _CRUMBSTORE_ECC_LIMIT_ERR, 1)); + } + } + + bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _ATO_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + if (FLD_TEST_DRF_NUM(_TSTATE, _ERR_FIRST_0, _ATO_ERR, 1, report.raw_first)) + { + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _TSTATE, _ERR_DEBUG); + } + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_ATO_ERR, "TS ATO timeout", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_TSTATE, _ERR_STATUS_0, _CAMRSP_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_TSTATE_CAMRSP_ERR, "Rsp Tag value out of range", NV_FALSE); + _nvswitch_save_ingress_err_header_lr10(device, link, &data); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_TSTATE_CAMRSP_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_egress_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, unhandled; + NVSWITCH_RAW_ERROR_LOG_TYPE data = { { 0 } }; + INFOROM_NVS_ECC_ERROR_EVENT err_event = {0}; + + report.raw_pending = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & chip_device->intr_mask.egress.nonfatal; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_FIRST_0); + _nvswitch_save_egress_err_header_lr10(device, link, &data); + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + // Ignore LIMIT error if DBE is pending + if (!(nvswitch_test_flags(report.raw_pending, + DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_DBE_ERR, 1)))) + { + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER); + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_LIMIT_ERR, "egress input ECC error limit"); + NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_LIMIT_ERR, data); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_HDR_ECC_LIMIT_ERR, link, NV_FALSE, 0, + NV_FALSE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _RAM_OUT_HDR_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + // Ignore LIMIT error if DBE is pending + if(!(nvswitch_test_flags(report.raw_pending, + DRF_NUM(_EGRESS, _ERR_STATUS_0, _RAM_OUT_HDR_ECC_DBE_ERR, 1)))) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, + _ERR_RAM_OUT_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_EGRESS_ERR_RAM_OUT, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID, + addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, + _ERR_RAM_OUT_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER); + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_ADDRESS); + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_LIMIT_ERR, "egress output ECC error limit"); + NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_LIMIT_ERR, data); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_LIMIT_ERR, link, bAddressValid, address, + NV_FALSE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _URRSPERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_DROPNPURRSPERR, "egress non-posted UR"); + NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_DROPNPURRSPERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _PRIVRSPERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_PRIVRSPERR, "egress non-posted PRIV error"); + NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_PRIVRSPERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _HWRSPERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_EGRESS_HWRSPERR, "egress non-posted HW error"); + NVSWITCH_REPORT_DATA(_HW_NPORT_EGRESS_HWRSPERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_egress_fatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, contain, unhandled; + NVSWITCH_RAW_ERROR_LOG_TYPE data = {{ 0 }}; + NVSWITCH_RAW_ERROR_LOG_TYPE credit_data = { { 0 } }; + NVSWITCH_RAW_ERROR_LOG_TYPE buffer_data = { { 0 } }; + INFOROM_NVS_ECC_ERROR_EVENT err_event = {0}; + + report.raw_pending = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & chip_device->intr_mask.egress.fatal; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_FIRST_0); + contain = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _ERR_CONTAIN_EN_0); + _nvswitch_save_egress_err_header_lr10(device, link, &data); + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _EGRESSBUFERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_EGRESSBUFERR, "egress crossbar overflow", NV_TRUE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_EGRESSBUFERR, data); + + buffer_data.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _BUFFER_POINTERS0); + buffer_data.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _BUFFER_POINTERS1); + buffer_data.data[2] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _BUFFER_POINTERS2); + buffer_data.data[3] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _BUFFER_POINTERS3); + buffer_data.data[4] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _BUFFER_POINTERS4); + buffer_data.data[5] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _BUFFER_POINTERS5); + buffer_data.data[6] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _BUFFER_POINTERS6); + buffer_data.data[7] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _BUFFER_POINTERS7); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_EGRESSBUFERR, buffer_data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _PKTROUTEERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_PKTROUTEERR, "egress packet route", NV_TRUE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_PKTROUTEERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _SEQIDERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_SEQIDERR, "egress sequence ID error", NV_TRUE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_SEQIDERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_DBE_ERR, "egress input ECC DBE error", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NXBAR_HDR_ECC_DBE_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_HDR_ECC_DBE_ERR, link, NV_FALSE, 0, + NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + + // Clear associated LIMIT_ERR interrupt + if (report.raw_pending & DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_LIMIT_ERR, 1)) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_STATUS_0, + DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_ECC_LIMIT_ERR, 1)); + } + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _RAM_OUT_HDR_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, + _ERR_RAM_OUT_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_EGRESS_ERR_RAM_OUT, _ECC_ERROR_ADDRESS_VALID, _VALID, _VALID, + addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, + _ERR_RAM_OUT_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_DBE_ERR, "egress output ECC DBE error", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_DBE_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_EGRESS_RAM_OUT_HDR_ECC_DBE_ERR, link, bAddressValid, + address, NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + + // Clear associated LIMIT_ERR interrupt + if (report.raw_pending & DRF_NUM(_EGRESS, _ERR_STATUS_0, _RAM_OUT_HDR_ECC_LIMIT_ERR, 1)) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_STATUS_0, + DRF_NUM(_EGRESS, _ERR_STATUS_0, _RAM_OUT_HDR_ECC_LIMIT_ERR, 1)); + } + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NCISOCCREDITOVFL, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NCISOCCREDITOVFL, "egress credit overflow", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NCISOCCREDITOVFL, data); + + credit_data.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT0); + credit_data.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT1); + credit_data.data[2] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT2); + credit_data.data[3] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT3); + credit_data.data[4] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT4); + credit_data.data[5] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT5); + credit_data.data[6] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT6); + credit_data.data[7] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT7); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NCISOCCREDITOVFL, credit_data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _REQTGTIDMISMATCHERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_REQTGTIDMISMATCHERR, "egress destination request ID error", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_REQTGTIDMISMATCHERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _RSPREQIDMISMATCHERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_RSPREQIDMISMATCHERR, "egress destination response ID error", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_RSPREQIDMISMATCHERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_HDR_PARITY_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NXBAR_HDR_PARITY_ERR, "egress control parity error", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NXBAR_HDR_PARITY_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_EGRESS_NXBAR_HDR_PARITY_ERR, link, NV_FALSE, 0, + NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NCISOC_CREDIT_PARITY_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NCISOC_CREDIT_PARITY_ERR, "egress credit parity error", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NCISOC_CREDIT_PARITY_ERR, data); + + credit_data.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT0); + credit_data.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT1); + credit_data.data[2] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT2); + credit_data.data[3] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT3); + credit_data.data[4] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT4); + credit_data.data[5] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT5); + credit_data.data[6] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT6); + credit_data.data[7] = NVSWITCH_NPORT_RD32_LR10(device, link, _EGRESS, _NCISOC_CREDIT7); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NCISOC_CREDIT_PARITY_ERR, credit_data); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_EGRESS_NCISOC_CREDIT_PARITY_ERR, link, NV_FALSE, 0, + NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _NXBAR_FLITTYPE_MISMATCH_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_NXBAR_FLITTYPE_MISMATCH_ERR, "egress flit type mismatch", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_NXBAR_FLITTYPE_MISMATCH_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_EGRESS, _ERR_STATUS_0, _CREDIT_TIME_OUT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_EGRESS_CREDIT_TIME_OUT_ERR, "egress credit timeout", NV_FALSE); + NVSWITCH_REPORT_CONTAIN_DATA(_HW_NPORT_EGRESS_CREDIT_TIME_OUT_ERR, data); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_sourcetrack_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, unhandled; + INFOROM_NVS_ECC_ERROR_EVENT err_event = {0}; + + report.raw_pending = NVSWITCH_NPORT_RD32_LR10(device, link, + _SOURCETRACK, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NPORT_RD32_LR10(device, link, + _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & chip_device->intr_mask.sourcetrack.nonfatal; + + pending = report.raw_pending & report.mask; + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, _ERR_FIRST_0); + + bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + // Ignore LIMIT error if DBE is pending + if (!(nvswitch_test_flags(report.raw_pending, + DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, 1)))) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, + _VALID, _VALID, addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER); + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS); + report.data[2] = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID); + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, + "sourcetrack TCEN0 crumbstore ECC limit err"); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, link, + bAddressValid, address, NV_FALSE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + // Ignore LIMIT error if DBE is pending + if (!(nvswitch_test_flags(report.raw_pending, + DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR, 1)))) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, + _VALID, _VALID, addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER); + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS); + report.data[2] = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID); + NVSWITCH_REPORT_NONFATAL(_HW_NPORT_SOURCETRACK_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR, + "sourcetrack TCEN1 crumbstore ECC limit err"); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR, link, + bAddressValid, address, NV_FALSE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + } + + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + // + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_sourcetrack_fatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, contain, unhandled; + INFOROM_NVS_ECC_ERROR_EVENT err_event = {0}; + + report.raw_pending = NVSWITCH_NPORT_RD32_LR10(device, link, + _SOURCETRACK, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NPORT_RD32_LR10(device, link, + _SOURCETRACK, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & chip_device->intr_mask.sourcetrack.fatal; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, _ERR_FIRST_0); + contain = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, _ERR_CONTAIN_EN_0); + + bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, + _VALID, _VALID, addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS); + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID); + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, + "sourcetrack TCEN0 crumbstore DBE", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN0_CRUMBSTORE_ECC_DBE_ERR, + link, bAddressValid, address, NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + + // Clear associated LIMIT_ERR interrupt + if (report.raw_pending & DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, 1)) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_STATUS_0, + DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN0_CRUMBSTORE_ECC_LIMIT_ERR, 1)); + } + } + + bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NvBool bAddressValid = NV_FALSE; + NvU32 address = 0; + NvU32 addressValid = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID); + + if (FLD_TEST_DRF(_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE, _ECC_ERROR_ADDRESS_VALID, + _VALID, _VALID, addressValid)) + { + address = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS); + bAddressValid = NV_TRUE; + } + + report.data[0] = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS); + report.data[1] = NVSWITCH_NPORT_RD32_LR10(device, link, _SOURCETRACK, + _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_ADDRESS_VALID); + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_SOURCETRACK_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR, + "sourcetrack TCEN1 crumbstore DBE", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + + _nvswitch_construct_ecc_error_event(&err_event, + NVSWITCH_ERR_HW_NPORT_SOURCETRACK_CREQ_TCEN1_CRUMBSTORE_ECC_DBE_ERR, + link, bAddressValid, address, NV_TRUE, 1); + + nvswitch_inforom_ecc_log_err_event(device, &err_event); + + // Clear associated LIMIT_ERR interrupt + if (report.raw_pending & DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR, 1)) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_STATUS_0, + DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _CREQ_TCEN1_CRUMBSTORE_ECC_LIMIT_ERR, 1)); + } + } + + bit = DRF_NUM(_SOURCETRACK, _ERR_STATUS_0, _SOURCETRACK_TIME_OUT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NPORT_SOURCETRACK_SOURCETRACK_TIME_OUT_ERR, + "sourcetrack timeout error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + // + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; + +} + +static NvlStatus +_nvswitch_service_nport_fatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + NvlStatus status[5]; + + status[0] = _nvswitch_service_route_fatal_lr10(device, link); + status[1] = _nvswitch_service_ingress_fatal_lr10(device, link); + status[2] = _nvswitch_service_egress_fatal_lr10(device, link); + status[3] = _nvswitch_service_tstate_fatal_lr10(device, link); + status[4] = _nvswitch_service_sourcetrack_fatal_lr10(device, link); + + if ((status[0] != NVL_SUCCESS) && + (status[1] != NVL_SUCCESS) && + (status[2] != NVL_SUCCESS) && + (status[3] != NVL_SUCCESS) && + (status[4] != NVL_SUCCESS)) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_npg_fatal_lr10 +( + nvswitch_device *device, + NvU32 npg +) +{ + NvU32 pending, mask, bit, unhandled; + NvU32 nport; + NvU32 link; + + pending = NVSWITCH_NPG_RD32_LR10(device, npg, _NPG, _NPG_INTERRUPT_STATUS); + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + mask = + DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _FATAL) | + DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _FATAL) | + DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _FATAL) | + DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV3_INT_STATUS, _FATAL); + pending &= mask; + unhandled = pending; + + for (nport = 0; nport < NVSWITCH_NPORT_PER_NPG; nport++) + { + switch (nport) + { + case 0: + bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _FATAL); + break; + case 1: + bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _FATAL); + break; + case 2: + bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _FATAL); + break; + case 3: + bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV3_INT_STATUS, _FATAL); + break; + default: + bit = 0; + NVSWITCH_ASSERT(0); + break; + } + if (nvswitch_test_flags(pending, bit)) + { + link = NPORT_TO_LINK(device, npg, nport); + if (NVSWITCH_ENG_VALID_LR10(device, NPORT, link)) + { + if (_nvswitch_service_nport_fatal_lr10(device, link) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + } + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nport_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + NvlStatus status[5]; + + status[0] = _nvswitch_service_route_nonfatal_lr10(device, link); + status[1] = _nvswitch_service_ingress_nonfatal_lr10(device, link); + status[2] = _nvswitch_service_egress_nonfatal_lr10(device, link); + status[3] = _nvswitch_service_tstate_nonfatal_lr10(device, link); + status[4] = _nvswitch_service_sourcetrack_nonfatal_lr10(device, link); + + if ((status[0] != NVL_SUCCESS) && + (status[1] != NVL_SUCCESS) && + (status[2] != NVL_SUCCESS) && + (status[3] != NVL_SUCCESS) && + (status[4] != NVL_SUCCESS)) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_npg_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 npg +) +{ + NvU32 pending, mask, bit, unhandled; + NvU32 nport; + NvU32 link; + + pending = NVSWITCH_NPG_RD32_LR10(device, npg, _NPG, _NPG_INTERRUPT_STATUS); + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + mask = + DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _NONFATAL) | + DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _NONFATAL) | + DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _NONFATAL) | + DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV3_INT_STATUS, _NONFATAL); + pending &= mask; + unhandled = pending; + + for (nport = 0; nport < NVSWITCH_NPORT_PER_NPG; nport++) + { + switch (nport) + { + case 0: + bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV0_INT_STATUS, _NONFATAL); + break; + case 1: + bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV1_INT_STATUS, _NONFATAL); + break; + case 2: + bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV2_INT_STATUS, _NONFATAL); + break; + case 3: + bit = DRF_DEF(_NPG, _NPG_INTERRUPT_STATUS, _DEV3_INT_STATUS, _NONFATAL); + break; + default: + bit = 0; + NVSWITCH_ASSERT(0); + break; + } + if (nvswitch_test_flags(pending, bit)) + { + link = NPORT_TO_LINK(device, npg, nport); + if (NVSWITCH_ENG_VALID_LR10(device, NPORT, link)) + { + if (_nvswitch_service_nport_nonfatal_lr10(device, link) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + } + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_service_minion_link_lr10 +( + nvswitch_device *device, + NvU32 instance +) +{ + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, unhandled, minionIntr, linkIntr, reg, enabledLinks, bit; + NvU32 localLinkIdx, link; + + // + // _MINION_MINION_INTR shows all interrupts currently at the host on this minion + // Note: _MINIO_MINION_INTR is not used to clear link specific interrupts + // + minionIntr = NVSWITCH_MINION_RD32_LR10(device, instance, _MINION, _MINION_INTR); + + // get all possible interrupting links associated with this minion + report.raw_pending = DRF_VAL(_MINION, _MINION_INTR, _LINK, minionIntr); + + // read in the enaled minion interrupts on this minion + reg = NVSWITCH_MINION_RD32_LR10(device, instance, _MINION, _MINION_INTR_STALL_EN); + + // get the links with enabled interrupts on this minion + enabledLinks = DRF_VAL(_MINION, _MINION_INTR_STALL_EN, _LINK, reg); + + report.raw_enable = enabledLinks; + report.mask = report.raw_enable; + + // pending bit field contains interrupting links after being filtered + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + FOR_EACH_INDEX_IN_MASK(32, localLinkIdx, pending) + { + link = (instance * NVSWITCH_LINKS_PER_NVLIPT) + localLinkIdx; + bit = NVBIT(localLinkIdx); + + // read in the interrupt register for the given link + linkIntr = NVSWITCH_MINION_LINK_RD32_LR10(device, link, _MINION, _NVLINK_LINK_INTR(localLinkIdx)); + + // _STATE must be set for _CODE to be valid + if (!DRF_VAL(_MINION, _NVLINK_LINK_INTR, _STATE, linkIntr)) + { + continue; + } + + report.data[0] = linkIntr; + + switch(DRF_VAL(_MINION, _NVLINK_LINK_INTR, _CODE, linkIntr)) + { + case NV_MINION_NVLINK_LINK_INTR_CODE_NA: + NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link NA interrupt", NV_FALSE); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_SWREQ: + NVSWITCH_PRINT(device, INFO, + "%s: Received MINION Link SW Generate interrupt on MINION %d : link %d.\n", + __FUNCTION__, instance, link); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_DLREQ: + NVSWITCH_REPORT_NONFATAL(_HW_MINION_NONFATAL, "Minion Link DLREQ interrupt"); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_PMDISABLED: + NVSWITCH_REPORT_NONFATAL(_HW_MINION_NONFATAL, "Minion Link PMDISABLED interrupt"); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_DLCMDFAULT: + NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link DLCMDFAULT interrupt", NV_FALSE); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_TLREQ: + NVSWITCH_REPORT_NONFATAL(_HW_MINION_NONFATAL, "Minion Link TLREQ interrupt"); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_NOINIT: + NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link NOINIT interrupt", NV_FALSE); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_NOTIFY: + NVSWITCH_PRINT(device, INFO, + "%s: Received MINION NOTIFY interrupt on MINION %d : link %d.\n", + __FUNCTION__, instance, link); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_LOCAL_CONFIG_ERR: + NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link Local-Config-Error interrupt", NV_FALSE); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_NEGOTIATION_CONFIG_ERR: + NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link Negotiation Config Err Interrupt", NV_FALSE); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_BADINIT: + NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link BADINIT interrupt", NV_FALSE); + break; + case NV_MINION_NVLINK_LINK_INTR_CODE_PMFAIL: + NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Link PMFAIL interrupt", NV_FALSE); + break; + default: + NVSWITCH_REPORT_FATAL(_HW_MINION_FATAL_LINK_INTR, "Minion Interrupt code unknown", NV_FALSE); + } + nvswitch_clear_flags(&unhandled, bit); + + // Disable interrupt bit for the given link - fatal error ocurred before + if (device->link[link].fatal_error_occurred) + { + enabledLinks &= ~bit; + reg = DRF_NUM(_MINION, _MINION_INTR_STALL_EN, _LINK, enabledLinks); + NVSWITCH_MINION_LINK_WR32_LR10(device, link, _MINION, _MINION_INTR_STALL_EN, reg); + } + + // + // _MINION_INTR_LINK is a read-only register field for the host + // Host must write 1 to _NVLINK_LINK_INTR_STATE to clear the interrupt on the link + // + reg = DRF_NUM(_MINION, _NVLINK_LINK_INTR, _STATE, 1); + NVSWITCH_MINION_WR32_LR10(device, instance, _MINION, _NVLINK_LINK_INTR(localLinkIdx), reg); + } + FOR_EACH_INDEX_IN_MASK_END; + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvldl_nonfatal_link_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLDL, _NVLDL_TOP, _INTR); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLDL, _NVLDL_TOP, _INTR_NONSTALL_EN); + report.mask = report.raw_enable; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_REPLAY, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_DLPL_TX_REPLAY, "TX Replay Error"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_RECOVERY_SHORT, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_DLPL_TX_RECOVERY_SHORT, "TX Recovery Short"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_SHORT_ERROR_RATE, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_SHORT_ERROR_RATE, "RX Short Error Rate"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_LONG_ERROR_RATE, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_LONG_ERROR_RATE, "RX Long Error Rate"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_ILA_TRIGGER, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_ILA_TRIGGER, "RX ILA Trigger"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_CRC_COUNTER, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_DLPL_RX_CRC_COUNTER, "RX CRC Counter"); + nvswitch_clear_flags(&unhandled, bit); + + // + // Mask CRC counter after first occurrance - otherwise, this interrupt + // will continue to fire once the CRC counter has hit the threshold + // See Bug 3341528 + // + report.raw_enable = report.raw_enable & (~bit); + NVSWITCH_LINK_WR32_LR10(device, link, NVLDL, _NVLDL_TOP, _INTR_NONSTALL_EN, + report.raw_enable); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + NVSWITCH_LINK_WR32_LR10(device, link, NVLDL, _NVLDL_TOP, _INTR, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLDL nonfatal interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvldl_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance +) +{ + NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask; + NvU32 i; + nvlink_link *link; + NvlStatus status = -NVL_MORE_PROCESSING_REQUIRED; + + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64(nvlipt_instance); + localEnabledLinkMask = enabledLinkMask & localLinkMask; + + FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask) + { + link = nvswitch_get_link(device, i); + if (link == NULL) + { + // An interrupt on an invalid link should never occur + NVSWITCH_ASSERT(link != NULL); + continue; + } + + if (NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT) != nvlipt_instance) + { + NVSWITCH_ASSERT(0); + break; + } + + if (nvswitch_is_link_in_reset(device, link)) + { + continue; + } + + if (_nvswitch_service_nvldl_nonfatal_link_lr10(device, i) == NVL_SUCCESS) + { + status = NVL_SUCCESS; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return status; +} + +static NvlStatus +_nvswitch_service_nvltlc_rx_lnk_nonfatal_0_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_NON_FATAL_REPORT_EN_0); + report.mask = report.raw_enable; + + pending = report.raw_pending & report.mask; + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_0); + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXRSPSTATUS_PRIV_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_RX_LNK_RXRSPSTATUS_PRIV_ERR, "RX Rsp Status PRIV Error"); + nvswitch_clear_flags(&unhandled, bit); + + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_0, + report.raw_first & report.mask); + } + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLTLC_RX_LNK _0 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvltlc_tx_lnk_nonfatal_0_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0); + report.mask = report.raw_enable; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_0); + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _CREQ_RAM_DAT_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_CREQ_RAM_DAT_ECC_DBE_ERR, "CREQ RAM DAT ECC DBE Error"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _CREQ_RAM_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_CREQ_RAM_ECC_LIMIT_ERR, "CREQ RAM DAT ECC Limit Error"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP_RAM_DAT_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_RSP_RAM_DAT_ECC_DBE_ERR, "Response RAM DAT ECC DBE Error"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP_RAM_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_RSP_RAM_ECC_LIMIT_ERR, "Response RAM ECC Limit Error"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _COM_RAM_DAT_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_COM_RAM_DAT_ECC_DBE_ERR, "COM RAM DAT ECC DBE Error"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _COM_RAM_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_COM_RAM_ECC_LIMIT_ERR, "COM RAM ECC Limit Error"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP1_RAM_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_RSP1_RAM_ECC_LIMIT_ERR, "RSP1 RAM ECC Limit Error"); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_0, + report.raw_first & report.mask); + } + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLTLC_TX_LNK _0 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvltlc_rx_lnk_nonfatal_1_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NvU32 pending, bit, unhandled, injected; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_1); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_NON_FATAL_REPORT_EN_1); + report.mask = report.raw_enable; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_1); + injected = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_REPORT_INJECT_1); + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _AN1_HEARTBEAT_TIMEOUT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_RX_LNK_AN1_HEARTBEAT_TIMEOUT_ERR, "AN1 Heartbeat Timeout Error"); + nvswitch_clear_flags(&unhandled, bit); + + if (FLD_TEST_DRF_NUM(_NVLTLC, _RX_LNK_ERR_REPORT_INJECT_1, _AN1_HEARTBEAT_TIMEOUT_ERR, 0x0, injected)) + { + // + // WAR Bug 200627368: Mask off HBTO to avoid a storm + // During the start of reset_and_drain, all links on the GPU + // will go into contain, causing HBTO on other switch links connected + // to that GPU. For the switch side, these interrupts are not fatal, + // but until we get to reset_and_drain for this link, HBTO will continue + // to fire repeatedly. After reset_and_drain, HBTO will be re-enabled + // by MINION after links are trained. + // + report.raw_enable = report.raw_enable & (~bit); + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_NON_FATAL_REPORT_EN_1, + report.raw_enable); + } + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_NON_FATAL_REPORT_EN_1, + report.raw_enable & (~pending)); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_1, + report.raw_first & report.mask); + } + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_1, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLTLC_RX_LNK _1 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvltlc_tx_lnk_nonfatal_1_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_1); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_1); + report.mask = report.raw_enable; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_1); + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _AN1_TIMEOUT_VC0, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC0, "AN1 Timeout VC0"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _AN1_TIMEOUT_VC1, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC1, "AN1 Timeout VC1"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _AN1_TIMEOUT_VC2, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC2, "AN1 Timeout VC2"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _AN1_TIMEOUT_VC3, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC3, "AN1 Timeout VC3"); + nvswitch_clear_flags(&unhandled, bit); + + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _AN1_TIMEOUT_VC4, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC4, "AN1 Timeout VC4"); + nvswitch_clear_flags(&unhandled, bit); + + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _AN1_TIMEOUT_VC5, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC5, "AN1 Timeout VC5"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _AN1_TIMEOUT_VC6, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC6, "AN1 Timeout VC6"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_1, _AN1_TIMEOUT_VC7, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLTLC_TX_LNK_AN1_TIMEOUT_VC7, "AN1 Timeout VC7"); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_NON_FATAL_REPORT_EN_1, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_1, + report.raw_first & report.mask); + } + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_1, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLTLC_TX_LNK _1 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvltlc_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance +) +{ + NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask; + NvU32 i; + nvlink_link *link; + NvlStatus status = -NVL_MORE_PROCESSING_REQUIRED; + + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64(nvlipt_instance); + localEnabledLinkMask = enabledLinkMask & localLinkMask; + + FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask) + { + link = nvswitch_get_link(device, i); + if (link == NULL) + { + // An interrupt on an invalid link should never occur + NVSWITCH_ASSERT(link != NULL); + continue; + } + + if (NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT) != nvlipt_instance) + { + NVSWITCH_ASSERT(0); + break; + } + + if (nvswitch_is_link_in_reset(device, link)) + { + continue; + } + + if (_nvswitch_service_nvltlc_rx_lnk_nonfatal_0_lr10(device, nvlipt_instance, i) == NVL_SUCCESS) + { + status = NVL_SUCCESS; + } + + if (_nvswitch_service_nvltlc_tx_lnk_nonfatal_0_lr10(device, nvlipt_instance, i) == NVL_SUCCESS) + { + status = NVL_SUCCESS; + } + + if (_nvswitch_service_nvltlc_rx_lnk_nonfatal_1_lr10(device, nvlipt_instance, i) == NVL_SUCCESS) + { + status = NVL_SUCCESS; + } + + if (_nvswitch_service_nvltlc_tx_lnk_nonfatal_1_lr10(device, nvlipt_instance, i) == NVL_SUCCESS) + { + status = NVL_SUCCESS; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return status; +} + +static NvlStatus +_nvswitch_service_nvlipt_lnk_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, unhandled; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_NON_FATAL_REPORT_EN_0); + report.mask = report.raw_enable; + + pending = report.raw_pending & report.mask; + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FIRST_0); + + bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _ILLEGALLINKSTATEREQUEST, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_ILLEGALLINKSTATEREQUEST, "_HW_NVLIPT_LNK_ILLEGALLINKSTATEREQUEST"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _FAILEDMINIONREQUEST, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_FAILEDMINIONREQUEST, "_FAILEDMINIONREQUEST"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _RESERVEDREQUESTVALUE, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_RESERVEDREQUESTVALUE, "_RESERVEDREQUESTVALUE"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _LINKSTATEWRITEWHILEBUSY, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_LINKSTATEWRITEWHILEBUSY, "_LINKSTATEWRITEWHILEBUSY"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _LINK_STATE_REQUEST_TIMEOUT, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_LINK_STATE_REQUEST_TIMEOUT, "_LINK_STATE_REQUEST_TIMEOUT"); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _WRITE_TO_LOCKED_SYSTEM_REG_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_NONFATAL(_HW_NVLIPT_LNK_WRITE_TO_LOCKED_SYSTEM_REG_ERR, "_WRITE_TO_LOCKED_SYSTEM_REG_ERR"); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FIRST_0, + report.raw_first & report.mask); + } + NVSWITCH_LINK_WR32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLIPT_LNK NON_FATAL interrupts, pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvlipt_link_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance +) +{ + NvU32 i, intrLink; + NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask, interruptingLinks = 0; + + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64(nvlipt_instance); + localEnabledLinkMask = enabledLinkMask & localLinkMask; + + FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask) + { + intrLink = NVSWITCH_LINK_RD32_LR10(device, i, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0); + + if(intrLink) + { + interruptingLinks |= NVBIT(i); + } + } + FOR_EACH_INDEX_IN_MASK_END; + + if(interruptingLinks) + { + FOR_EACH_INDEX_IN_MASK(64, i, interruptingLinks) + { + if( _nvswitch_service_nvlipt_lnk_nonfatal_lr10(device, nvlipt_instance, i) != NVL_SUCCESS) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + } + FOR_EACH_INDEX_IN_MASK_END; + return NVL_SUCCESS; + } + else + { + return -NVL_MORE_PROCESSING_REQUIRED; + } +} + +static NvlStatus +_nvswitch_service_nvlipt_nonfatal_lr10 +( + nvswitch_device *device, + NvU32 instance +) +{ + NvlStatus status[4]; + + // + // MINION LINK interrupts trigger both INTR_FATAL and INTR_NONFATAL + // trees (Bug 3037835). Because of this, we must service them in both the + // fatal and nonfatal handlers + // + status[0] = device->hal.nvswitch_service_minion_link(device, instance); + status[1] = _nvswitch_service_nvldl_nonfatal_lr10(device, instance); + status[2] = _nvswitch_service_nvltlc_nonfatal_lr10(device, instance); + status[3] = _nvswitch_service_nvlipt_link_nonfatal_lr10(device, instance); + + if (status[0] != NVL_SUCCESS && + status[1] != NVL_SUCCESS && + status[2] != NVL_SUCCESS && + status[3] != NVL_SUCCESS) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_soe_fatal_lr10 +( + nvswitch_device *device +) +{ + // We only support 1 SOE as of LR10. + if (soeService_HAL(device, (PSOE)device->pSoe) != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_saw_legacy_lr10 +( + nvswitch_device *device +) +{ + //TODO : SAW Legacy interrupts + + return -NVL_MORE_PROCESSING_REQUIRED; +} + +static NvlStatus +_nvswitch_service_saw_nonfatal_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 pending, bit, unhandled; + NvU32 i; + + pending = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _INTR_NONFATAL); + pending &= chip_device->intr_enable_nonfatal; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + for (i = 0; i < NUM_NPG_ENGINE_LR10; i++) + { + if (!NVSWITCH_ENG_VALID_LR10(device, NPG, i)) + continue; + + bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_NONFATAL, _NPG_0, 1) << i; + if (nvswitch_test_flags(pending, bit)) + { + if (_nvswitch_service_npg_nonfatal_lr10(device, i) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + } + + for (i = 0; i < NUM_NVLIPT_ENGINE_LR10; i++) + { + if (!NVSWITCH_ENG_VALID_LR10(device, NVLIPT, i)) + { + continue; + } + + bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_NONFATAL, _NVLIPT_0, 1) << i; + + if (nvswitch_test_flags(pending, bit)) + { + if (_nvswitch_service_nvlipt_nonfatal_lr10(device, i) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nxbar_tile_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_TILE_RD32_LR10(device, link, _NXBAR_TILE, _ERR_STATUS); + report.raw_enable = NVSWITCH_TILE_RD32_LR10(device, link, _NXBAR_TILE, _ERR_FATAL_INTR_EN); + report.mask = chip_device->intr_mask.tile.fatal; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_TILE_RD32_LR10(device, link, _NXBAR_TILE, _ERR_FIRST); + + bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_BUFFER_OVERFLOW, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_BUFFER_OVERFLOW, "ingress SRC-VC buffer overflow", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_BUFFER_UNDERFLOW, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_BUFFER_UNDERFLOW, "ingress SRC-VC buffer underflow", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _EGRESS_CREDIT_OVERFLOW, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_EGRESS_CREDIT_OVERFLOW, "egress DST-VC credit overflow", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _EGRESS_CREDIT_UNDERFLOW, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_EGRESS_CREDIT_UNDERFLOW, "egress DST-VC credit underflow", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_NON_BURSTY_PKT, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_NON_BURSTY_PKT, "ingress packet burst error", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_NON_STICKY_PKT, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_NON_STICKY_PKT, "ingress packet sticky error", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_BURST_GT_9_DATA_VC, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_BURST_GT_9_DATA_VC, "possible bubbles at ingress", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_PKT_INVALID_DST, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_PKT_INVALID_DST, "ingress packet invalid dst error", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR_TILE, _ERR_STATUS, _INGRESS_PKT_PARITY_ERROR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILE_INGRESS_PKT_PARITY_ERROR, "ingress packet parity error", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_TILE_WR32_LR10(device, link, _NXBAR_TILE, _ERR_FIRST, + report.raw_first & report.mask); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + NVSWITCH_TILE_WR32_LR10(device, link, _NXBAR_TILE, _ERR_FATAL_INTR_EN, + report.raw_enable ^ pending); + + NVSWITCH_TILE_WR32_LR10(device, link, _NXBAR_TILE, _ERR_STATUS, pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nxbar_tileout_lr10 +( + nvswitch_device *device, + NvU32 link, + NvU32 tileout +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_NXBAR_RD32_LR10(device, link, _NXBAR_TC_TILEOUT, _ERR_STATUS(tileout)); + report.raw_enable = NVSWITCH_NXBAR_RD32_LR10(device, link, _NXBAR_TC_TILEOUT, _ERR_FATAL_INTR_EN(tileout)); + report.mask = chip_device->intr_mask.tileout.fatal; + report.data[0] = tileout; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NXBAR_RD32_LR10(device, link, _NXBAR_TC_TILEOUT, _ERR_FIRST(tileout)); + + bit = DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_STATUS, _INGRESS_BUFFER_OVERFLOW, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_INGRESS_BUFFER_OVERFLOW, "ingress SRC-VC buffer overflow", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_STATUS, _INGRESS_BUFFER_UNDERFLOW, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_INGRESS_BUFFER_UNDERFLOW, "ingress SRC-VC buffer underflow", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_STATUS, _EGRESS_CREDIT_OVERFLOW, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_EGRESS_CREDIT_OVERFLOW, "egress DST-VC credit overflow", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_STATUS, _EGRESS_CREDIT_UNDERFLOW, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_EGRESS_CREDIT_UNDERFLOW, "egress DST-VC credit underflow", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_STATUS, _INGRESS_NON_BURSTY_PKT, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_INGRESS_NON_BURSTY_PKT, "ingress packet burst error", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_STATUS, _INGRESS_NON_STICKY_PKT, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_INGRESS_NON_STICKY_PKT, "ingress packet sticky error", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_STATUS, _INGRESS_BURST_GT_9_DATA_VC, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_INGRESS_BURST_GT_9_DATA_VC, "possible bubbles at ingress", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NXBAR, _TC_TILEOUT0_ERR_STATUS, _EGRESS_CDT_PARITY_ERROR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NXBAR_TILEOUT_EGRESS_CDT_PARITY_ERROR, "ingress credit parity error", NV_TRUE); + nvswitch_clear_flags(&unhandled, bit); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_NXBAR_WR32_LR10(device, link, _NXBAR_TC_TILEOUT, _ERR_FIRST(tileout), + report.raw_first & report.mask); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + // This helps prevent an interrupt storm if HW keeps triggering unnecessary stream of interrupts. + NVSWITCH_NXBAR_WR32_LR10(device, link, _NXBAR_TC_TILEOUT, _ERR_FATAL_INTR_EN(tileout), + report.raw_enable ^ pending); + + NVSWITCH_NXBAR_WR32_LR10(device, link, _NXBAR_TC_TILEOUT, _ERR_STATUS(tileout), pending); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nxbar_fatal_lr10 +( + nvswitch_device *device, + NvU32 nxbar +) +{ + NvU32 pending, bit, unhandled; + NvU32 link; + NvU32 tile, tileout; + + pending = NVSWITCH_NXBAR_RD32_LR10(device, nxbar, _NXBAR, _TC_ERROR_STATUS); + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + for (tile = 0; tile < NUM_NXBAR_TILES_PER_TC_LR10; tile++) + { + bit = DRF_NUM(_NXBAR, _TC_ERROR_STATUS, _TILE0, 1) << tile; + if (nvswitch_test_flags(pending, bit)) + { + link = TILE_TO_LINK(device, nxbar, tile); + if (NVSWITCH_ENG_VALID_LR10(device, TILE, link)) + { + if (_nvswitch_service_nxbar_tile_lr10(device, link) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + } + } + + for (tileout = 0; tileout < NUM_NXBAR_TILEOUTS_PER_TC_LR10; tileout++) + { + bit = DRF_NUM(_NXBAR, _TC_ERROR_STATUS, _TILEOUT0, 1) << tileout; + if (nvswitch_test_flags(pending, bit)) + { + if (_nvswitch_service_nxbar_tileout_lr10(device, nxbar, tileout) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + } + + // TODO: Perform hot_reset to recover NXBAR + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +NvlStatus +_nvswitch_service_minion_fatal_lr10 +( + nvswitch_device *device, + NvU32 instance +) +{ + NvU32 pending, bit, unhandled, mask; + + pending = NVSWITCH_MINION_RD32_LR10(device, instance, _MINION, _MINION_INTR); + mask = NVSWITCH_MINION_RD32_LR10(device, instance, _MINION, _MINION_INTR_STALL_EN); + + // Don't consider MINION Link interrupts in this handler + mask &= ~(DRF_NUM(_MINION, _MINION_INTR_STALL_EN, _LINK, NV_MINION_MINION_INTR_STALL_EN_LINK_ENABLE_ALL)); + + pending &= mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + bit = DRF_NUM(_MINION, _MINION_INTR, _FALCON_STALL, 0x1); + if (nvswitch_test_flags(pending, bit)) + { + if (nvswitch_minion_service_falcon_interrupts_lr10(device, instance) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_service_nvldl_fatal_link_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLDL, _NVLDL_TOP, _INTR); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLDL, _NVLDL_TOP, _INTR_STALL_EN); + report.mask = report.raw_enable; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_FAULT_RAM, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_DLPL_TX_FAULT_RAM, "TX Fault Ram", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_FAULT_INTERFACE, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_DLPL_TX_FAULT_INTERFACE, "TX Fault Interface", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _TX_FAULT_SUBLINK_CHANGE, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_DLPL_TX_FAULT_SUBLINK_CHANGE, "TX Fault Sublink Change", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_FAULT_SUBLINK_CHANGE, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_DLPL_RX_FAULT_SUBLINK_CHANGE, "RX Fault Sublink Change", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _RX_FAULT_DL_PROTOCOL, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_DLPL_RX_FAULT_DL_PROTOCOL, "RX Fault DL Protocol", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_DOWN, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_FAULT_DOWN, "LTSSM Fault Down", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_FAULT_UP, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_FAULT_UP, "LTSSM Fault Up", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLDL_TOP, _INTR, _LTSSM_PROTOCOL, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_DLPL_LTSSM_PROTOCOL, "LTSSM Protocol Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + + // TODO 2827793 this should be logged to the InfoROM as fatal + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLDL, _NVLDL_TOP, _INTR_STALL_EN, + report.raw_enable ^ pending); + } + + NVSWITCH_LINK_WR32_LR10(device, link, NVLDL, _NVLDL_TOP, _INTR, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLDL fatal interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +NvlStatus +_nvswitch_service_nvldl_fatal_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance +) +{ + NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask, runtimeErrorMask = 0; + NvU32 i; + nvlink_link *link; + NvlStatus status = -NVL_MORE_PROCESSING_REQUIRED; + NVSWITCH_LINK_TRAINING_ERROR_INFO linkTrainingErrorInfo = { 0 }; + NVSWITCH_LINK_RUNTIME_ERROR_INFO linkRuntimeErrorInfo = { 0 }; + + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64(nvlipt_instance); + localEnabledLinkMask = enabledLinkMask & localLinkMask; + + FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask) + { + link = nvswitch_get_link(device, i); + if (link == NULL) + { + // An interrupt on an invalid link should never occur + NVSWITCH_ASSERT(link != NULL); + continue; + } + + if (NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT) != nvlipt_instance) + { + NVSWITCH_ASSERT(0); + break; + } + + if (nvswitch_is_link_in_reset(device, link)) + { + continue; + } + + if (device->hal.nvswitch_service_nvldl_fatal_link(device, nvlipt_instance, i) == NVL_SUCCESS) + { + runtimeErrorMask |= NVBIT64(i); + status = NVL_SUCCESS; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + linkTrainingErrorInfo.isValid = NV_FALSE; + linkRuntimeErrorInfo.isValid = NV_TRUE; + linkRuntimeErrorInfo.mask0 = runtimeErrorMask; + + if (nvswitch_smbpbi_set_link_error_info(device, &linkTrainingErrorInfo, &linkRuntimeErrorInfo) != + NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to send Runtime Error bitmask: 0x%llx,\n", + __FUNCTION__, runtimeErrorMask); + } + + return status; +} + +static NvlStatus +_nvswitch_service_nvltlc_tx_sys_fatal_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_FIRST_0); + + bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _NCISOC_PARITY_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_NCISOC_PARITY_ERR, "NCISOC Parity Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _NCISOC_HDR_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_NCISOC_HDR_ECC_DBE_ERR, "NCISOC HDR ECC DBE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _NCISOC_DAT_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_NCISOC_DAT_ECC_DBE_ERR, "NCISOC DAT ECC DBE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _NCISOC_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_NCISOC_ECC_LIMIT_ERR, "NCISOC ECC Limit Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _TXPOISONDET, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TXPOISONDET, "Poison Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _TXRSPSTATUS_HW_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_TXRSPSTATUS_HW_ERR, "TX Response Status HW Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _TXRSPSTATUS_UR_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_TXRSPSTATUS_UR_ERR, "TX Response Status UR Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_SYS, _ERR_STATUS_0, _TXRSPSTATUS_PRIV_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_SYS_TXRSPSTATUS_PRIV_ERR, "TX Response Status PRIV Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_SYS, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLTLC_TX_SYS interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvltlc_rx_sys_fatal_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_FIRST_0); + + bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _NCISOC_PARITY_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_NCISOC_PARITY_ERR, "NCISOC Parity Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _HDR_RAM_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_HDR_RAM_ECC_DBE_ERR, "HDR RAM ECC DBE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _HDR_RAM_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_HDR_RAM_ECC_LIMIT_ERR, "HDR RAM ECC Limit Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _DAT0_RAM_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_DAT0_RAM_ECC_DBE_ERR, "DAT0 RAM ECC DBE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _DAT0_RAM_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_DAT0_RAM_ECC_LIMIT_ERR, "DAT0 RAM ECC Limit Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _DAT1_RAM_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_DAT1_RAM_ECC_DBE_ERR, "DAT1 RAM ECC DBE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_SYS, _ERR_STATUS_0, _DAT1_RAM_ECC_LIMIT_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_SYS_DAT1_RAM_ECC_LIMIT_ERR, "DAT1 RAM ECC Limit Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_FIRST_0, + report.raw_first & report.mask); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_SYS, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLTLC_RX_SYS interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvltlc_tx_lnk_fatal_0_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_0); + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _TXDLCREDITPARITYERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TXDLCREDITPARITYERR, "TX DL Credit Parity Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _CREQ_RAM_HDR_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_LNK_CREQ_RAM_HDR_ECC_DBE_ERR, "CREQ RAM HDR ECC DBE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP_RAM_HDR_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_LNK_RSP_RAM_HDR_ECC_DBE_ERR, "Response RAM HDR ECC DBE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _COM_RAM_HDR_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_LNK_COM_RAM_HDR_ECC_DBE_ERR, "COM RAM HDR ECC DBE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP1_RAM_HDR_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_LNK_RSP1_RAM_HDR_ECC_DBE_ERR, "RSP1 RAM HDR ECC DBE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_TX_LNK, _ERR_STATUS_0, _RSP1_RAM_DAT_ECC_DBE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_TX_LNK_RSP1_RAM_DAT_ECC_DBE_ERR, "RSP1 RAM DAT ECC DBE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_FIRST_0, + report.raw_first & report.mask); + } + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_TX_LNK, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLTLC_TX_LNK _0 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvltlc_rx_lnk_fatal_0_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable; + pending = report.raw_pending & report.mask; + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_0); + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXDLHDRPARITYERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXDLHDRPARITYERR, "RX DL HDR Parity Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXDLDATAPARITYERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXDLDATAPARITYERR, "RX DL Data Parity Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXDLCTRLPARITYERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXDLCTRLPARITYERR, "RX DL Ctrl Parity Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXINVALIDAEERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXINVALIDAEERR, "RX Invalid DAE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXINVALIDBEERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXINVALIDBEERR, "RX Invalid BE Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXINVALIDADDRALIGNERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXINVALIDADDRALIGNERR, "RX Invalid Addr Align Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXPKTLENERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXPKTLENERR, "RX Packet Length Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RSVCMDENCERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RSVCMDENCERR, "RSV Cmd Encoding Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RSVDATLENENCERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RSVDATLENENCERR, "RSV Data Length Encoding Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RSVPKTSTATUSERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RSVPKTSTATUSERR, "RSV Packet Status Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RSVCACHEATTRPROBEREQERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RSVCACHEATTRPROBEREQERR, "RSV Packet Status Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RSVCACHEATTRPROBERSPERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RSVCACHEATTRPROBERSPERR, "RSV CacheAttr Probe Rsp Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _DATLENGTRMWREQMAXERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_DATLENGTRMWREQMAXERR, "Data Length RMW Req Max Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _DATLENLTATRRSPMINERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_DATLENLTATRRSPMINERR, "Data Len Lt ATR RSP Min Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _INVALIDCACHEATTRPOERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_INVALIDCACHEATTRPOERR, "Invalid Cache Attr PO Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _INVALIDCRERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_INVALIDCRERR, "Invalid CR Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXRSPSTATUS_HW_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_LNK_RXRSPSTATUS_HW_ERR, "RX Rsp Status HW Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _RXRSPSTATUS_UR_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_LNK_RXRSPSTATUS_UR_ERR, "RX Rsp Status UR Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_0, _INVALID_COLLAPSED_RESPONSE_ERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RX_LNK_INVALID_COLLAPSED_RESPONSE_ERR, "Invalid Collapsed Response Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_0, + report.raw_first & report.mask); + } + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLTLC_RX_LNK _0 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvltlc_rx_lnk_fatal_1_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NvU32 pending, bit, unhandled; + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_1); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FATAL_REPORT_EN_1); + report.mask = report.raw_enable; + pending = report.raw_pending & report.mask; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_1); + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _RXHDROVFERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXHDROVFERR, "RX HDR OVF Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _RXDATAOVFERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXDATAOVFERR, "RX Data OVF Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _STOMPDETERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_STOMPDETERR, "Stomp Det Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLTLC_RX_LNK, _ERR_STATUS_1, _RXPOISONERR, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLTLC_RXPOISONERR, "RX Poison Error", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FATAL_REPORT_EN_1, + report.raw_enable ^ pending); + } + + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_FIRST_1, + report.raw_first & report.mask); + } + NVSWITCH_LINK_WR32_LR10(device, link, NVLTLC, _NVLTLC_RX_LNK, _ERR_STATUS_1, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLTLC_RX_LNK _1 interrupts, link: %d pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, link, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +NvlStatus +_nvswitch_service_nvltlc_fatal_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance +) +{ + NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask; + NvU32 i; + nvlink_link *link; + NvlStatus status = -NVL_MORE_PROCESSING_REQUIRED; + + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64(nvlipt_instance); + localEnabledLinkMask = enabledLinkMask & localLinkMask; + + FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask) + { + link = nvswitch_get_link(device, i); + if (link == NULL) + { + // An interrupt on an invalid link should never occur + NVSWITCH_ASSERT(link != NULL); + continue; + } + + if (NVSWITCH_GET_LINK_ENG_INST(device, i, NVLIPT) != nvlipt_instance) + { + NVSWITCH_ASSERT(0); + break; + } + + if (nvswitch_is_link_in_reset(device, link)) + { + continue; + } + + if (_nvswitch_service_nvltlc_tx_sys_fatal_lr10(device, nvlipt_instance, i) == NVL_SUCCESS) + { + status = NVL_SUCCESS; + } + + if (_nvswitch_service_nvltlc_rx_sys_fatal_lr10(device, nvlipt_instance, i) == NVL_SUCCESS) + { + status = NVL_SUCCESS; + } + + if (_nvswitch_service_nvltlc_tx_lnk_fatal_0_lr10(device, nvlipt_instance, i) == NVL_SUCCESS) + { + status = NVL_SUCCESS; + } + + if (_nvswitch_service_nvltlc_rx_lnk_fatal_0_lr10(device, nvlipt_instance, i) == NVL_SUCCESS) + { + status = NVL_SUCCESS; + } + + if (_nvswitch_service_nvltlc_rx_lnk_fatal_1_lr10(device, nvlipt_instance, i) == NVL_SUCCESS) + { + status = NVL_SUCCESS; + } + + } + FOR_EACH_INDEX_IN_MASK_END; + + return status; +} + +static NvlStatus +_nvswitch_service_nvlipt_common_fatal_lr10 +( + nvswitch_device *device, + NvU32 instance +) +{ + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, contain, unhandled; + NvU32 link, local_link_idx; + + report.raw_pending = NVSWITCH_NVLIPT_RD32_LR10(device, instance, _NVLIPT_COMMON, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_NVLIPT_RD32_LR10(device, instance, _NVLIPT_COMMON, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable & + (DRF_NUM(_NVLIPT_COMMON, _ERR_STATUS_0, _CLKCTL_ILLEGAL_REQUEST, 1) | + DRF_NUM(_NVLIPT_COMMON, _ERR_STATUS_0, _RSTSEQ_PLL_TIMEOUT, 1) | + DRF_NUM(_NVLIPT_COMMON, _ERR_STATUS_0, _RSTSEQ_PHYARB_TIMEOUT, 1)); + + pending = report.raw_pending & report.mask; + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_NVLIPT_RD32_LR10(device, instance, _NVLIPT_COMMON, _ERR_FIRST_0); + contain = NVSWITCH_NVLIPT_RD32_LR10(device, instance, _NVLIPT_COMMON, _ERR_CONTAIN_EN_0); + + bit = DRF_NUM(_NVLIPT_COMMON, _ERR_STATUS_0, _CLKCTL_ILLEGAL_REQUEST, 1); + if (nvswitch_test_flags(pending, bit)) + { + for (local_link_idx = 0; local_link_idx < NVSWITCH_LINKS_PER_NVLIPT; local_link_idx++) + { + link = (instance * NVSWITCH_LINKS_PER_NVLIPT) + local_link_idx; + if (nvswitch_is_link_valid(device, link)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NVLIPT_CLKCTL_ILLEGAL_REQUEST, "CLKCTL_ILLEGAL_REQUEST", NV_FALSE); + } + } + + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLIPT_COMMON, _ERR_STATUS_0, _RSTSEQ_PLL_TIMEOUT, 1); + if (nvswitch_test_flags(pending, bit)) + { + for (local_link_idx = 0; local_link_idx < NVSWITCH_LINKS_PER_NVLIPT; local_link_idx++) + { + link = (instance * NVSWITCH_LINKS_PER_NVLIPT) + local_link_idx; + if (nvswitch_is_link_valid(device, link)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NVLIPT_RSTSEQ_PLL_TIMEOUT, "RSTSEQ_PLL_TIMEOUT", NV_FALSE); + } + } + + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLIPT_COMMON, _ERR_STATUS_0, _RSTSEQ_PHYARB_TIMEOUT, 1); + if (nvswitch_test_flags(pending, bit)) + { + for (local_link_idx = 0; local_link_idx < NVSWITCH_LINKS_PER_NVLIPT; local_link_idx++) + { + link = (instance * NVSWITCH_LINKS_PER_NVLIPT) + local_link_idx; + if (nvswitch_is_link_valid(device, link)) + { + NVSWITCH_REPORT_CONTAIN(_HW_NVLIPT_RSTSEQ_PHYARB_TIMEOUT, "RSTSEQ_PHYARB_TIMEOUT", NV_FALSE); + } + } + + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + for (local_link_idx = 0; local_link_idx < NVSWITCH_LINKS_PER_NVLIPT; local_link_idx++) + { + link = (instance * NVSWITCH_LINKS_PER_NVLIPT) + local_link_idx; + if (nvswitch_is_link_valid(device, link) && + (device->link[link].fatal_error_occurred)) + { + NVSWITCH_NVLIPT_WR32_LR10(device, instance, _NVLIPT_COMMON, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + break; + } + } + + // clear the interrupts + if (report.raw_first & report.mask) + { + NVSWITCH_NVLIPT_WR32_LR10(device, instance, _NVLIPT_COMMON, _ERR_FIRST_0, + report.raw_first & report.mask); + } + NVSWITCH_NVLIPT_WR32_LR10(device, instance, _NVLIPT_COMMON, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLIPT_COMMON FATAL interrupts, pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvlipt_lnk_fatal_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance, + NvU32 link +) +{ + NVSWITCH_INTERRUPT_LOG_TYPE report = { 0 }; + NvU32 pending, bit, unhandled; + + report.raw_pending = NVSWITCH_LINK_RD32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0); + report.raw_enable = NVSWITCH_LINK_RD32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FATAL_REPORT_EN_0); + report.mask = report.raw_enable; + + pending = report.raw_pending & report.mask; + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + report.raw_first = NVSWITCH_LINK_RD32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FIRST_0); + + bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _SLEEPWHILEACTIVELINK, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLIPT_LNK_SLEEPWHILEACTIVELINK, "No non-empty link is detected", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _RSTSEQ_PHYCTL_TIMEOUT, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLIPT_LNK_RSTSEQ_PHYCTL_TIMEOUT, "Reset sequencer timed out waiting for a handshake from PHYCTL", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + bit = DRF_NUM(_NVLIPT_LNK, _ERR_STATUS_0, _RSTSEQ_CLKCTL_TIMEOUT, 1); + if (nvswitch_test_flags(pending, bit)) + { + NVSWITCH_REPORT_FATAL(_HW_NVLIPT_LNK_RSTSEQ_CLKCTL_TIMEOUT, "Reset sequencer timed out waiting for a handshake from CLKCTL", NV_FALSE); + nvswitch_clear_flags(&unhandled, bit); + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + // Disable interrupts that have occurred after fatal error. + if (device->link[link].fatal_error_occurred) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FATAL_REPORT_EN_0, + report.raw_enable ^ pending); + } + + // clear interrupts + if (report.raw_first & report.mask) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_FIRST_0, + report.raw_first & report.mask); + } + NVSWITCH_LINK_WR32_LR10(device, link, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0, pending); + + if (unhandled != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Unhandled NVLIPT_LNK FATAL interrupts, pending: 0x%x enabled: 0x%x.\n", + __FUNCTION__, pending, report.raw_enable); + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_nvlipt_link_fatal_lr10 +( + nvswitch_device *device, + NvU32 nvlipt_instance +) +{ + NvU32 i, intrLink; + NvU64 enabledLinkMask, localLinkMask, localEnabledLinkMask, interruptingLinks = 0; + + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + localLinkMask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64(nvlipt_instance); + localEnabledLinkMask = enabledLinkMask & localLinkMask; + + FOR_EACH_INDEX_IN_MASK(64, i, localEnabledLinkMask) + { + intrLink = NVSWITCH_LINK_RD32_LR10(device, i, NVLIPT_LNK, _NVLIPT_LNK, _ERR_STATUS_0); + + if(intrLink) + { + interruptingLinks |= NVBIT(i); + } + } + FOR_EACH_INDEX_IN_MASK_END; + + if(interruptingLinks) + { + FOR_EACH_INDEX_IN_MASK(64, i, interruptingLinks) + { + if( _nvswitch_service_nvlipt_lnk_fatal_lr10(device, nvlipt_instance, i) != NVL_SUCCESS) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + } + FOR_EACH_INDEX_IN_MASK_END; + return NVL_SUCCESS; + } + else + { + return -NVL_MORE_PROCESSING_REQUIRED; + } +} + +static NvlStatus +_nvswitch_service_nvlipt_fatal_lr10 +( + nvswitch_device *device, + NvU32 instance +) +{ + NvlStatus status[6]; + + // + // MINION LINK interrupts trigger both INTR_FATAL and INTR_NONFATAL + // trees (Bug 3037835). Because of this, we must service them in both the + // fatal and nonfatal handlers + // + status[0] = device->hal.nvswitch_service_minion_link(device, instance); + status[1] = _nvswitch_service_nvldl_fatal_lr10(device, instance); + status[2] = _nvswitch_service_nvltlc_fatal_lr10(device, instance); + status[3] = _nvswitch_service_minion_fatal_lr10(device, instance); + status[4] = _nvswitch_service_nvlipt_common_fatal_lr10(device, instance); + status[5] = _nvswitch_service_nvlipt_link_fatal_lr10(device, instance); + + if (status[0] != NVL_SUCCESS && + status[1] != NVL_SUCCESS && + status[2] != NVL_SUCCESS && + status[3] != NVL_SUCCESS && + status[4] != NVL_SUCCESS && + status[5] != NVL_SUCCESS) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_saw_fatal_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 pending, bit, unhandled; + NvU32 i; + + pending = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _INTR_FATAL); + pending &= chip_device->intr_enable_fatal; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + for (i = 0; i < NUM_NPG_ENGINE_LR10; i++) + { + if (!NVSWITCH_ENG_VALID_LR10(device, NPG, i)) + { + continue; + } + + bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_FATAL, _NPG_0, 1) << i; + if (nvswitch_test_flags(pending, bit)) + { + if (_nvswitch_service_npg_fatal_lr10(device, i) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + } + + for (i = 0; i < NUM_NXBAR_ENGINE_LR10; i++) + { + if (!NVSWITCH_ENG_VALID_LR10(device, NXBAR, i)) + continue; + + bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_FATAL, _NXBAR_0, 1) << i; + if (nvswitch_test_flags(pending, bit)) + { + if (_nvswitch_service_nxbar_fatal_lr10(device, i) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + } + + for (i = 0; i < NUM_NVLIPT_ENGINE_LR10; i++) + { + if (!NVSWITCH_ENG_VALID_LR10(device, NVLIPT, i)) + { + continue; + } + + bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_FATAL, _NVLIPT_0, 1) << i; + if (nvswitch_test_flags(pending, bit)) + { + if (_nvswitch_service_nvlipt_fatal_lr10(device, i) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + } + + if (NVSWITCH_ENG_VALID_LR10(device, SOE, 0)) + { + bit = DRF_NUM(_NVLSAW_NVSPMC, _INTR_FATAL, _SOE, 1); + if (nvswitch_test_flags(pending, bit)) + { + if (_nvswitch_service_soe_fatal_lr10(device) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_saw_lr10 +( + nvswitch_device *device +) +{ + NvlStatus status[4]; + + status[0] = _nvswitch_service_saw_legacy_lr10(device); + status[1] = _nvswitch_service_saw_fatal_lr10(device); + status[2] = _nvswitch_service_saw_nonfatal_lr10(device); + + if ((status[0] != NVL_SUCCESS) && + (status[1] != NVL_SUCCESS) && + (status[2] != NVL_SUCCESS)) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_service_legacy_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 pending, bit, unhandled; + + pending = NVSWITCH_REG_RD32(device, _PSMC, _INTR_LEGACY); + pending &= chip_device->intr_enable_legacy; + + if (pending == 0) + { + return -NVL_NOT_FOUND; + } + + unhandled = pending; + + bit = DRF_NUM(_PSMC, _INTR_LEGACY, _SAW, 1); + if (nvswitch_test_flags(pending, bit)) + { + if (_nvswitch_service_saw_lr10(device) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + + bit = DRF_NUM(_PSMC, _INTR_LEGACY, _PRIV_RING, 1); + if (nvswitch_test_flags(pending, bit)) + { + if (_nvswitch_service_priv_ring_lr10(device) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + + bit = DRF_NUM(_PSMC, _INTR_LEGACY, _PBUS, 1); + if (nvswitch_test_flags(pending, bit)) + { + if (_nvswitch_service_pbus_lr10(device) == NVL_SUCCESS) + { + nvswitch_clear_flags(&unhandled, bit); + } + } + + NVSWITCH_UNHANDLED_CHECK(device, unhandled); + + if (unhandled != 0) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + return NVL_SUCCESS; +} + +// +// Service interrupt and re-enable interrupts. Interrupts should disabled when +// this is called. +// +NvlStatus +nvswitch_lib_service_interrupts_lr10 +( + nvswitch_device *device +) +{ + NvlStatus status; + + status = _nvswitch_service_legacy_lr10(device); + + /// @todo remove NVL_NOT_FOUND from the condition below, it was added as a WAR until Bug 2856055 is fixed. + if ((status != NVL_SUCCESS) && (status != -NVL_NOT_FOUND)) + { + return -NVL_MORE_PROCESSING_REQUIRED; + } + + _nvswitch_rearm_msi_lr10(device); + + return NVL_SUCCESS; +} + diff --git a/src/common/nvswitch/kernel/lr10/link_lr10.c b/src/common/nvswitch/kernel/lr10/link_lr10.c new file mode 100644 index 000000000..f48d55388 --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/link_lr10.c @@ -0,0 +1,2038 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink_export.h" + +#include "export_nvswitch.h" +#include "common_nvswitch.h" +#include "regkey_nvswitch.h" +#include "lr10/lr10.h" +#include "lr10/minion_lr10.h" + +#include "nvswitch/lr10/dev_nvldl_ip.h" +#include "nvswitch/lr10/dev_nvldl_ip_addendum.h" +#include "nvswitch/lr10/dev_minion_ip_addendum.h" +#include "nvswitch/lr10/dev_nvlipt_lnk_ip.h" +#include "nvswitch/lr10/dev_nvlphyctl_ip.h" +#include "nvswitch/lr10/dev_nvltlc_ip.h" +#include "nvswitch/lr10/dev_minion_ip.h" +#include "nvswitch/lr10/dev_trim.h" +#include "nvswitch/lr10/dev_pri_ringstation_sys.h" +#include "nvswitch/lr10/dev_nvlperf_ip.h" +#include "nvswitch/lr10/dev_nvlipt_ip.h" +#include "nvswitch/lr10/dev_nport_ip.h" + +void +nvswitch_setup_link_loopback_mode_lr10 +( + nvswitch_device *device, + NvU32 linkNumber +) +{ + nvlink_link *link; + NV_STATUS status; + + link = nvswitch_get_link(device, linkNumber); + + if ((link == NULL) || + !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) || + (linkNumber >= NVSWITCH_NVLINK_MAX_LINKS)) + { + return; + } + + if (device->link[link->linkNumber].nea) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Setting NEA on link %d\n", + __FUNCTION__, link->linkNumber); + + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_SETNEA, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SETNEA CMD failed for link %d.\n", + __FUNCTION__, link->linkNumber); + } + } + + if (device->link[link->linkNumber].ned) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Setting NED on link %d\n", + __FUNCTION__, link->linkNumber); + + // setting NEDR + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_SETNEDR, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SETNEDR CMD failed for link %d.\n", + __FUNCTION__, link->linkNumber); + } + + // setting NEDW + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_SETNEDW, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SETNEDW CMD failed for link %d.\n", + __FUNCTION__, link->linkNumber); + } + } +} + +static NV_STATUS +_nvswitch_ioctrl_setup_link_plls_lr10 +( + nvlink_link *link +) +{ + NV_STATUS status = NV_OK; + NvU32 linkId, tempRegVal; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + + nvswitch_device *device = link->dev->pDevInfo; + linkId = link->linkNumber; + + if (IS_EMULATION(device)) + { + NVSWITCH_PRINT(device, ERROR,"Skipping PLL init on emulation. \n"); + return status; + } + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS * 400, &timeout); + + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + tempRegVal = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLIPT_LNK , _NVLIPT_LNK , _CTRL_CLK_CTRL); + + if (FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CLK_CTRL, _PLL_PWR_STS, _ON, tempRegVal)) + break; + + nvswitch_os_sleep(1); + } while (keepPolling == NV_TRUE); + + if (FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CLK_CTRL, _PLL_PWR_STS, _OFF, tempRegVal)) + { + NVSWITCH_PRINT(device, ERROR, + "PLL_PWR_STS did not turn _ON for linkId = 0x%x!!\n", linkId); + return NV_ERR_TIMEOUT; + } + + // Request Minion to setup the NVLink clocks + status = nvswitch_minion_send_command(device, linkId, + NV_MINION_NVLINK_DL_CMD_COMMAND_TXCLKSWITCH_PLL, 0); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "Error sending TXCLKSWITCH_PLL command to MINION. Link = %d\n", linkId); + NVSWITCH_ASSERT_INFO(NV_ERR_NVLINK_CLOCK_ERROR, NVBIT32(link->linkNumber), TXCLKSWITCH_PLL_ERROR); + return NV_ERR_NVLINK_CLOCK_ERROR; + } + + // Poll for the links to switch to NVLink clocks + nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS * 400, &timeout); + + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + tempRegVal = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLIPT_LNK , _NVLIPT_LNK , _CTRL_CLK_CTRL); + if (FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CLK_CTRL, _TXCLK_STS, _PLL_CLK, tempRegVal)) + break; + + nvswitch_os_sleep(1); + } while (keepPolling == NV_TRUE); + + if (!FLD_TEST_DRF(_NVLIPT_LNK, _CTRL_CLK_CTRL, _TXCLK_STS, _PLL_CLK, tempRegVal)) + { + // Print the links for which we were unable to switch to PLL clock + NVSWITCH_PRINT(device, ERROR, + "TXCLK_STS did not switch to _PLL_CLOCK for linkId = 0x%x!!\n", linkId); + return NV_ERR_TIMEOUT; + } + + return status; +} + +NvBool +nvswitch_is_link_in_reset_lr10 +( + nvswitch_device *device, + nvlink_link *link +) +{ + NvU32 val; + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, + NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET); + + return (FLD_TEST_DRF(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, _LINK_RESET_STATUS, + _ASSERTED, val)) ? NV_TRUE : NV_FALSE; +} + +NvlStatus +nvswitch_poll_sublink_state_lr10 +( + nvswitch_device *device, + nvlink_link *link +) +{ + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + NvU32 val; + NvBool bPreSiPlatform = (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device)); + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS * (bPreSiPlatform ? 2000: 200), &timeout); + + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _SUBLINK_CHANGE); + + if (FLD_TEST_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _STATUS, _FAULT, val)) + { + NVSWITCH_PRINT(device, ERROR, + "%s : Fault while changing sublink state (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return -NVL_ERR_INVALID_STATE; + } + + if (FLD_TEST_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _STATUS, _DONE, val)) + { + break; + } + + nvswitch_os_sleep(1); + } + while (keepPolling); + + if ((!FLD_TEST_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _STATUS, _DONE, val))) + { + NVSWITCH_PRINT(device, ERROR, + "%s : Timeout while waiting sublink state (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return -NVL_ERR_GENERIC; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_init_dl_pll +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvlStatus status; + + status = nvswitch_minion_send_command(device, link->linkNumber, NV_MINION_NVLINK_DL_CMD_COMMAND_INITPLL, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: INITPLL failed for link %d.\n", + __FUNCTION__, link->linkNumber); + NVSWITCH_ASSERT_INFO(NV_ERR_NVLINK_CLOCK_ERROR, NVBIT32(link->linkNumber), INITPLL_ERROR); + return NV_ERR_NVLINK_CLOCK_ERROR; + } + + status = _nvswitch_ioctrl_setup_link_plls_lr10(link); + if (status != NV_OK){ + return status; + } + + status = nvswitch_minion_send_command(device, link->linkNumber, NV_MINION_NVLINK_DL_CMD_COMMAND_INITPHY, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: INITPHY failed for link %d.\n", + __FUNCTION__, link->linkNumber); + NVSWITCH_ASSERT_INFO(NV_ERR_NVLINK_INIT_ERROR, NVBIT32(link->linkNumber), INITPHY_ERROR); + return NV_ERR_NVLINK_INIT_ERROR; + } + + + return NVL_SUCCESS; +} + +NvU32 +nvswitch_get_sublink_width_lr10 +( + nvswitch_device *device, + NvU32 linkNumber +) +{ + return NVSWITCH_NUM_LANES_LR10; +} + +void +nvswitch_init_dlpl_interrupts_lr10 +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 linkNumber = link->linkNumber; + NvU32 crcShortRegkeyVal = device->regkeys.crc_bit_error_rate_short; + NvU32 crcLongRegkeyVal = device->regkeys.crc_bit_error_rate_long; + NvU32 intrRegVal; + NvU32 crcRegVal; + NvU32 shortRateMask; + NvU32 longRateMask; + + ct_assert(DRF_BASE(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_MAN) == + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_THRESHOLD_MAN)); + ct_assert(DRF_EXTENT(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_MAN) == + DRF_EXTENT(NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_THRESHOLD_MAN)); + ct_assert(DRF_BASE(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_EXP) == + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_THRESHOLD_EXP)); + ct_assert(DRF_EXTENT(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_EXP) == + DRF_EXTENT(NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_THRESHOLD_EXP)); + ct_assert(DRF_BASE(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_MAN) == + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_TIMESCALE_MAN)); + ct_assert(DRF_EXTENT(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_MAN) == + DRF_EXTENT(NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_TIMESCALE_MAN)); + ct_assert(DRF_BASE(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_EXP) == + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_TIMESCALE_EXP)); + ct_assert(DRF_EXTENT(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_EXP) == + DRF_EXTENT(NV_NVLDL_RX_ERROR_RATE_CTRL_SHORT_TIMESCALE_EXP)); + + ct_assert(DRF_BASE(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_THRESHOLD_MAN) == + (DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN) - + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN))); + ct_assert(DRF_EXTENT(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_THRESHOLD_MAN) == + (DRF_EXTENT(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN) - + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN))); + ct_assert(DRF_BASE(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_THRESHOLD_EXP) == + (DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_EXP) - + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN))); + ct_assert(DRF_EXTENT(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_THRESHOLD_EXP) == + (DRF_EXTENT(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_EXP) - + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN))); + ct_assert(DRF_BASE(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_TIMESCALE_MAN) == + (DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_TIMESCALE_MAN) - + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN))); + ct_assert(DRF_EXTENT(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_TIMESCALE_MAN) == + (DRF_EXTENT(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_TIMESCALE_MAN) - + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN))); + ct_assert(DRF_BASE(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_TIMESCALE_EXP) == + (DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_TIMESCALE_EXP) - + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN))); + ct_assert(DRF_EXTENT(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_TIMESCALE_EXP) == + (DRF_EXTENT(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_TIMESCALE_EXP) - + DRF_BASE(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN))); + + // W1C any stale state. + NVSWITCH_LINK_WR32_LR10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR, 0xffffffff); + NVSWITCH_LINK_WR32_LR10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR_SW2, 0xffffffff); + + // Stall tree routes to INTR_A which is connected to NVLIPT fatal tree + NVSWITCH_LINK_WR32_LR10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR_STALL_EN, + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_REPLAY, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_RECOVERY_SHORT, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _LTSSM_FAULT_UP, _ENABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_FAULT_RAM, _ENABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_FAULT_INTERFACE, _ENABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _TX_FAULT_SUBLINK_CHANGE, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _RX_FAULT_SUBLINK_CHANGE, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _RX_FAULT_DL_PROTOCOL, _ENABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _RX_SHORT_ERROR_RATE, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _RX_LONG_ERROR_RATE, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _RX_ILA_TRIGGER, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _RX_CRC_COUNTER, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _LTSSM_PROTOCOL, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_STALL_EN, _MINION_REQUEST, _DISABLE)); + + // NONSTALL -> NONFATAL + NVSWITCH_LINK_WR32_LR10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR_NONSTALL_EN, + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _TX_REPLAY, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _TX_RECOVERY_SHORT, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _LTSSM_FAULT_UP, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _TX_FAULT_RAM, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _TX_FAULT_INTERFACE, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _TX_FAULT_SUBLINK_CHANGE, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_FAULT_SUBLINK_CHANGE, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_FAULT_DL_PROTOCOL, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_SHORT_ERROR_RATE, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_LONG_ERROR_RATE, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_ILA_TRIGGER, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_CRC_COUNTER, _ENABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _LTSSM_PROTOCOL, _DISABLE) | + DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _MINION_REQUEST, _DISABLE)); + + intrRegVal = NVSWITCH_LINK_RD32_LR10(device, linkNumber, NVLDL, + _NVLDL_TOP, _INTR_NONSTALL_EN); + crcRegVal = NVSWITCH_LINK_RD32_LR10(device, linkNumber, NVLDL, + _NVLDL_RX, _ERROR_RATE_CTRL); + + // Enable RX error rate short interrupt if the regkey is set + if (crcShortRegkeyVal != NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_OFF) + { + shortRateMask = DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_MAN) | + DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_THRESHOLD_EXP) | + DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_MAN) | + DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_TIMESCALE_EXP); + + intrRegVal |= DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_SHORT_ERROR_RATE, _ENABLE); + crcRegVal &= ~shortRateMask; + crcRegVal |= crcShortRegkeyVal; + } + // Enable RX error rate long interrupt if the regkey is set + if (crcLongRegkeyVal != NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_OFF) + { + longRateMask = DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_THRESHOLD_MAN) | + DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_THRESHOLD_EXP) | + DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_TIMESCALE_MAN) | + DRF_SHIFTMASK(NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_TIMESCALE_EXP); + + intrRegVal |= DRF_DEF(_NVLDL_TOP, _INTR_NONSTALL_EN, _RX_LONG_ERROR_RATE, _ENABLE); + crcRegVal &= ~longRateMask; + crcRegVal |= crcLongRegkeyVal << DRF_SHIFT(NV_NVLDL_RX_ERROR_RATE_CTRL_LONG_THRESHOLD_MAN); + } + + NVSWITCH_LINK_WR32_LR10(device, linkNumber, NVLDL, + _NVLDL_TOP, _INTR_NONSTALL_EN, intrRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNumber, NVLDL, + _NVLDL_RX, _ERROR_RATE_CTRL, crcRegVal); +} + +static void +_nvswitch_disable_dlpl_interrupts +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 linkNumber = link->linkNumber; + + NVSWITCH_LINK_WR32_LR10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR_STALL_EN, 0x0); + NVSWITCH_LINK_WR32_LR10(device, linkNumber, NVLDL, _NVLDL_TOP, _INTR_NONSTALL_EN, 0x0); +} + +void +nvswitch_store_topology_information_lr10 +( + nvswitch_device *device, + nvlink_link *link +) +{ + NvU32 tempval; + + link->bInitnegotiateConfigGood = NV_TRUE; + link->remoteSid = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK, _TOPOLOGY_REMOTE_CHIP_SID_HI); + link->remoteSid = link->remoteSid << 32; + link->remoteSid |= NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK, _TOPOLOGY_REMOTE_CHIP_SID_LO); + + tempval = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, _TOPOLOGY_REMOTE_LINK_INFO); + link->remoteLinkId = DRF_VAL(_NVLIPT_LNK, _TOPOLOGY_REMOTE_LINK_INFO, _LINK_NUMBER, tempval); + + link->localSid = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT, + _NVLIPT_COMMON, _TOPOLOGY_LOCAL_CHIP_SID_HI); + link->localSid = link->localSid << 32; + link->localSid |= NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT, + _NVLIPT_COMMON, _TOPOLOGY_LOCAL_CHIP_SID_LO); + + tempval = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK, _TOPOLOGY_REMOTE_CHIP_TYPE); + + // Update the remoteDeviceType with NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE values. + switch(tempval) + { + case NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_TYPE_TYPE_NV3P0AMP: + link->remoteDeviceType = NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU; + break; + case NV_NVLIPT_LNK_TOPOLOGY_REMOTE_CHIP_TYPE_TYPE_NV3P0LRK: + link->remoteDeviceType = NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH; + break; + default: + link->remoteDeviceType = NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE; + break; + } +} + +void +nvswitch_init_lpwr_regs_lr10 +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 linkNum = link->linkNumber; + NvU32 tempRegVal, icLimit, fbIcInc, lpIcInc, fbIcDec, lpIcDec, lpEntryThreshold; + NvU32 lpExitThreshold; + NvU8 softwareDesired, hardwareDisable; + NvBool bLpEnable; + + if (device->regkeys.enable_pm == NV_SWITCH_REGKEY_ENABLE_PM_NO) + { + return; + } + + // + // Power Management threshold settings + // These settings are currently being hard coded. + // They will be parsed from the VBIOS NVLink LPWR table once bug 2767390 is + // implemented + // + + // IC Limit + icLimit = 16110000; + + tempRegVal = 0; + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _PWRM_IC_LIMIT, _LIMIT, icLimit, + tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_LNK, _PWRM_IC_LIMIT, + tempRegVal); + + tempRegVal = 0; + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK,_PWRM_IC_LIMIT, _LIMIT, icLimit, + tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_LNK, _PWRM_IC_LIMIT, + tempRegVal); + + //IC Inc + fbIcInc = 1; + lpIcInc = 1; + + tempRegVal = 0; + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _PWRM_IC_INC, _FBINC, fbIcInc, tempRegVal); + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _PWRM_IC_INC, _LPINC, lpIcInc, tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_LNK, _PWRM_IC_INC, + tempRegVal); + + tempRegVal = 0; + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _PWRM_IC_INC, _FBINC, fbIcInc, tempRegVal); + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _PWRM_IC_INC, _LPINC, lpIcInc, tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_LNK, _PWRM_IC_INC, + tempRegVal); + + //IC Dec + fbIcDec = 1; + lpIcDec = 65535; + + tempRegVal = 0; + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _PWRM_IC_DEC, _FBDEC, fbIcDec, tempRegVal); + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _PWRM_IC_DEC, _LPDEC, lpIcDec, tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_LNK, _PWRM_IC_DEC, + tempRegVal); + + tempRegVal = 0; + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _PWRM_IC_DEC, _FBDEC, fbIcDec, tempRegVal); + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _PWRM_IC_DEC, _LPDEC, lpIcDec, tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_LNK, _PWRM_IC_DEC, + tempRegVal); + + //IC Enter Threshold + lpEntryThreshold = 16110000; + + tempRegVal = 0; + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _PWRM_IC_LP_ENTER_THRESHOLD, _THRESHOLD, lpEntryThreshold, tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_LNK, _PWRM_IC_LP_ENTER_THRESHOLD, + tempRegVal); + + tempRegVal = 0; + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _PWRM_IC_LP_ENTER_THRESHOLD, _THRESHOLD, lpEntryThreshold, tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_LNK, _PWRM_IC_LP_ENTER_THRESHOLD, + tempRegVal); + + //IC Exit Threshold + lpExitThreshold = 16044465; + + tempRegVal = 0; + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _PWRM_IC_LP_EXIT_THRESHOLD, _THRESHOLD, lpExitThreshold, tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_LNK, _PWRM_IC_LP_EXIT_THRESHOLD, + tempRegVal); + + tempRegVal = 0; + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _PWRM_IC_LP_EXIT_THRESHOLD, _THRESHOLD, lpExitThreshold, tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_LNK, _PWRM_IC_LP_EXIT_THRESHOLD, + tempRegVal); + + //LP Entry Enable + bLpEnable = NV_TRUE; + softwareDesired = (bLpEnable) ? 0x1 : 0x0; + hardwareDisable = (bLpEnable) ? 0x0 : 0x1; + + tempRegVal = NVSWITCH_LINK_RD32_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_LNK, _PWRM_IC_SW_CTRL); + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _PWRM_IC_SW_CTRL, _SOFTWAREDESIRED, + softwareDesired, tempRegVal); + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_TX_LNK, _PWRM_IC_SW_CTRL, _HARDWAREDISABLE, + hardwareDisable, tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_LNK, _PWRM_IC_SW_CTRL, + tempRegVal); + + tempRegVal = NVSWITCH_LINK_RD32_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_LNK, _PWRM_IC_SW_CTRL); + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _PWRM_IC_SW_CTRL, _SOFTWAREDESIRED, + softwareDesired, tempRegVal); + tempRegVal = FLD_SET_DRF_NUM(_NVLTLC_RX_LNK, _PWRM_IC_SW_CTRL, _HARDWAREDISABLE, + hardwareDisable, tempRegVal); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_LNK, _PWRM_IC_SW_CTRL, + tempRegVal); +} + + +void +nvswitch_init_buffer_ready_lr10 +( + nvswitch_device *device, + nvlink_link *link, + NvBool bNportBufferReady +) +{ + NvU32 val; + NvU32 linkNum = link->linkNumber; + + if (FLD_TEST_DRF(_SWITCH_REGKEY, _SKIP_BUFFER_READY, _TLC, _NO, + device->regkeys.skip_buffer_ready)) + { + val = DRF_NUM(_NVLTLC_RX_SYS, _CTRL_BUFFER_READY, _BUFFERRDY, 0x1); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_SYS, _CTRL_BUFFER_READY, val); + val = DRF_NUM(_NVLTLC_TX_SYS, _CTRL_BUFFER_READY, _BUFFERRDY, 0x1); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_SYS, _CTRL_BUFFER_READY, val); + } + + if (bNportBufferReady && + FLD_TEST_DRF(_SWITCH_REGKEY, _SKIP_BUFFER_READY, _NPORT, _NO, + device->regkeys.skip_buffer_ready)) + { + val = DRF_NUM(_NPORT, _CTRL_BUFFER_READY, _BUFFERRDY, 0x1); + NVSWITCH_LINK_WR32_LR10(device, linkNum, NPORT, _NPORT, _CTRL_BUFFER_READY, val); + } +} + +static void +_nvswitch_configure_reserved_throughput_counters +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 linkNum = link->linkNumber; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLTLC, link->linkNumber)) + { + NVSWITCH_PRINT(device, INFO, + "Invalid link, skipping NVLink throughput counter config for link %d\n", + link->linkNumber); + return; + } + + // + // Counters 0 and 2 will be reserved for monitoring tools + // Counters 1 and 3 will be user-configurable and used by devtools + // + + // Rx0 config + NVSWITCH_LINK_WR32_IDX_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, 0, + DRF_DEF(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _UNIT, _FLITS) | + DRF_DEF(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _FLITFILTER, _DATA) | + DRF_DEF(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _VCSETFILTERMODE, _INIT)); + + // Tx0 config + NVSWITCH_LINK_WR32_IDX_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, 0, + DRF_DEF(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _UNIT, _FLITS) | + DRF_DEF(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _FLITFILTER, _DATA) | + DRF_DEF(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _VCSETFILTERMODE, _INIT)); + + // Rx2 config + NVSWITCH_LINK_WR32_IDX_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, 2, + DRF_DEF(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _UNIT, _FLITS) | + DRF_DEF(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _FLITFILTER, _HEAD) | + DRF_DEF(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _FLITFILTER, _AE) | + DRF_DEF(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _FLITFILTER, _BE) | + DRF_DEF(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _FLITFILTER, _DATA) | + DRF_DEF(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL_0, _VCSETFILTERMODE, _INIT)); + + // Tx2 config + NVSWITCH_LINK_WR32_IDX_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, 2, + DRF_DEF(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _UNIT, _FLITS) | + DRF_DEF(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _FLITFILTER, _HEAD) | + DRF_DEF(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _FLITFILTER, _AE) | + DRF_DEF(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _FLITFILTER, _BE) | + DRF_DEF(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _FLITFILTER, _DATA) | + DRF_DEF(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL_0, _VCSETFILTERMODE, _INIT)); + + // Enable Rx for counters 0, 2 + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, + DRF_NUM(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, _ENRX0, 0x1) | + DRF_NUM(_NVLTLC_RX_LNK, _DEBUG_TP_CNTR_CTRL, _ENRX2, 0x1)); + + // Enable Tx for counters 0, 2 + NVSWITCH_LINK_WR32_LR10(device, linkNum, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, + DRF_NUM(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, _ENTX0, 0x1) | + DRF_NUM(_NVLTLC_TX_LNK, _DEBUG_TP_CNTR_CTRL, _ENTX2, 0x1)); +} + +static NvlStatus +_nvswitch_init_link_post_active +( + nvlink_link *link, + NvU32 flags +) +{ + NvlStatus status = NVL_SUCCESS; + nvswitch_device *device = link->dev->pDevInfo; + + nvswitch_init_lpwr_regs(link); + status = nvswitch_request_tl_link_state_lr10(link, + NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_ACTIVE, + flags == NVLINK_STATE_CHANGE_SYNC); + if (status != NVL_SUCCESS) + { + return status; + } + + // Note: buffer_rdy should be asserted last! + nvswitch_init_buffer_ready(device, link, NV_TRUE); + + return status; +} + +static void +_nvswitch_power_down_link_plls +( + nvlink_link *link +) +{ + NvlStatus status = NVL_SUCCESS; + nvswitch_device *device = link->dev->pDevInfo; + + if (IS_EMULATION(device)) + { + NVSWITCH_PRINT(device, ERROR,"Skipping PLL init on emulation. \n"); + return; + } + + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_TXCLKSWITCH_ALT, 0); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: TXCLKSWITCH_ALT CMD failed for link %d.\n", + __FUNCTION__, link->linkNumber); + return; + } + + return; +} + +NvlStatus +nvswitch_corelib_add_link_lr10 +( + nvlink_link *link +) +{ + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_corelib_remove_link_lr10 +( + nvlink_link *link +) +{ + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_corelib_set_dl_link_mode_lr10 +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 val; + NvU32 link_state; + NvlStatus status = NVL_SUCCESS; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return -NVL_UNBOUND_DEVICE; + } + + switch (mode) + { + case NVLINK_LINKSTATE_SAFE: + { + // check if link is in reset + if (nvswitch_is_link_in_reset(device, link)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d is still in reset, cannot change link state\n", + __FUNCTION__, link->linkNumber); + return NVL_ERR_INVALID_STATE; + } + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_STATE); + link_state = DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, val); + + if (link_state == NV_NVLDL_TOP_LINK_STATE_STATE_SWCFG) + { + NVSWITCH_PRINT(device, INFO, + "%s : Link is already in Safe mode for (%s).\n", + __FUNCTION__, link->linkName); + break; + } + else if (link_state == NV_NVLDL_TOP_LINK_STATE_STATE_HWCFG) + { + NVSWITCH_PRINT(device, INFO, + "%s : Link already transitioning to Safe mode for (%s).\n", + __FUNCTION__, link->linkName); + break; + } + + NVSWITCH_PRINT(device, INFO, + "NVRM: %s : Changing Link state to Safe for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + + if (link_state == NV_NVLDL_TOP_LINK_STATE_STATE_INIT) + { + val = 0; + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_CHANGE); + val = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _NEWSTATE, _HWCFG, val); + val = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _OLDSTATE_MASK, _DONTCARE, val); + val = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _ACTION, _LTSSM_CHANGE, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_CHANGE, val); + } + else if (link_state == NV_NVLDL_TOP_LINK_STATE_STATE_ACTIVE) + { + val = 0; + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_CHANGE); + val = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _NEWSTATE, _SWCFG, val); + val = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _OLDSTATE_MASK, _DONTCARE, val); + val = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _ACTION, _LTSSM_CHANGE, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_CHANGE, val); + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s : Link is in invalid state" + " cannot set to safe state (%s):(%s). (%x) (%x)\n", + __FUNCTION__, device->name, link->linkName, val, link_state); + return -NVL_ERR_INVALID_STATE; + } + + break; + } + + case NVLINK_LINKSTATE_HS: + { + // check if link is in reset + if (nvswitch_is_link_in_reset(device, link)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d is still in reset, cannot change link state\n", + __FUNCTION__, link->linkNumber); + return NVL_ERR_INVALID_STATE; + } + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_STATE); + link_state = DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, val); + + if (link_state == NV_NVLDL_TOP_LINK_STATE_STATE_ACTIVE) + { + NVSWITCH_PRINT(device, INFO, + "%s : Link is already in Active mode (%s).\n", + __FUNCTION__, link->linkName); + break; + } + else if (link_state == NV_NVLDL_TOP_LINK_STATE_STATE_INIT) + { + NVSWITCH_PRINT(device, ERROR, + "%s : Link cannot be taken from INIT state to" + " Active mode for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return -NVL_ERR_INVALID_STATE; + } + else if (link_state == NV_NVLDL_TOP_LINK_STATE_STATE_SWCFG) + { + NVSWITCH_PRINT(device, INFO, + "%s : Changing Link state to Active for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_CHANGE); + val = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _NEWSTATE, _ACTIVE, val); + val = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _OLDSTATE_MASK, _DONTCARE, val); + val = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _ACTION, _LTSSM_CHANGE, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_CHANGE, val); + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s : Link is in invalid state" + " cannot set to active state (%s):(%s). (%x) (%x)\n", + __FUNCTION__, device->name, link->linkName, val, link_state); + return -NVL_ERR_INVALID_STATE; + } + + break; + } + + case NVLINK_LINKSTATE_OFF: + { + _nvswitch_power_down_link_plls(link); + + if (nvswitch_lib_notify_client_events(device, + NVSWITCH_DEVICE_EVENT_PORT_DOWN) != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify PORT_DOWN event\n", + __FUNCTION__); + } + + break; + } + + case NVLINK_LINKSTATE_RESET: + { + break; + } + + case NVLINK_LINKSTATE_ENABLE_PM: + { + if (device->regkeys.enable_pm == NV_SWITCH_REGKEY_ENABLE_PM_YES) + { + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_ENABLEPM, 0); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ENABLEPM CMD failed for link %d.\n", + __FUNCTION__, link->linkNumber); + return status; + } + } + break; + } + + case NVLINK_LINKSTATE_DISABLE_PM: + { + if (device->regkeys.enable_pm == NV_SWITCH_REGKEY_ENABLE_PM_YES) + { + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_DISABLEPM, 0); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: DISABLEPM CMD failed for link %d.\n", + __FUNCTION__, link->linkNumber); + return status; + } + } + break; + } + + case NVLINK_LINKSTATE_DISABLE_HEARTBEAT: + { + // NOP + break; + } + + case NVLINK_LINKSTATE_PRE_HS: + { + break; + } + + case NVLINK_LINKSTATE_TRAFFIC_SETUP: + { + status = _nvswitch_init_link_post_active(link, flags); + if (status != NVL_SUCCESS) + { + return status; + } + + break; + } + + case NVLINK_LINKSTATE_DISABLE_ERR_DETECT: + { + // Disable DL/PL interrupts + _nvswitch_disable_dlpl_interrupts(link); + break; + } + + case NVLINK_LINKSTATE_LANE_DISABLE: + { + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_LANEDISABLE, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : LANEDISABLE CMD failed for link (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return status; + } + + break; + } + + case NVLINK_LINKSTATE_LANE_SHUTDOWN: + { + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_LANESHUTDOWN, 0); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : SHUTDOWN CMD failed for link (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return status; + } + + break; + } + + case NVLINK_LINKSTATE_INITPHASE1: + { + + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_INITPHASE1, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : INITPHASE1 failed for link (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + NVSWITCH_ASSERT_INFO(NV_ERR_NVLINK_CONFIGURATION_ERROR, + NVBIT32(link->linkNumber), INITPHASE1_ERROR); + return NV_ERR_NVLINK_CONFIGURATION_ERROR; + } + + // After INITPHASE1, apply NEA setting + nvswitch_setup_link_loopback_mode(device, link->linkNumber); + break; + } + + case NVLINK_LINKSTATE_INITOPTIMIZE: + { + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_INITOPTIMIZE, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : INITOPTIMIZE failed for link (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return status; + } + + break; + } + + case NVLINK_LINKSTATE_POST_INITOPTIMIZE: + { + // Poll for TRAINING_GOOD + status = nvswitch_minion_get_initoptimize_status_lr10(device, link->linkNumber); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s Error polling for INITOPTIMIZE TRAINING_GOOD. Link (%s):(%s)\n", + __FUNCTION__, device->name, link->linkName); + NVSWITCH_ASSERT_INFO(NV_ERR_NVLINK_TRAINING_ERROR, NVBIT32(link->linkNumber), INITOPTIMIZE_ERROR); + return NV_ERR_NVLINK_TRAINING_ERROR; + } + + // Send INITTL DLCMD + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_INITTL, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : INITTL failed for link (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + NVSWITCH_ASSERT_INFO(NV_ERR_NVLINK_TRAINING_ERROR, NVBIT32(link->linkNumber), INITTL_ERROR); + return NV_ERR_NVLINK_TRAINING_ERROR; + } + + break; + } + + case NVLINK_LINKSTATE_INITNEGOTIATE: + { + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_INITNEGOTIATE, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : INITNEGOTIATE failed for link (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return status; + } + + break; + } + + case NVLINK_LINKSTATE_POST_INITNEGOTIATE: + { + // Poll for CONFIG_GOOD + status = nvswitch_minion_get_initnegotiate_status_lr10(device, link->linkNumber); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s Error polling for INITNEGOTIATE CONFIG_GOOD. Link (%s):(%s)\n", + __FUNCTION__, device->name, link->linkName); + NVSWITCH_ASSERT_INFO(NV_ERR_NVLINK_CONFIGURATION_ERROR, + NVBIT32(link->linkNumber), INITNEGOTIATE_ERROR); + return NV_ERR_NVLINK_CONFIGURATION_ERROR; + } + else + { + nvswitch_store_topology_information(device, link); + } + + break; + } + + default: + { + NVSWITCH_PRINT(device, ERROR, + "%s : Invalid mode specified.\n", + __FUNCTION__); + break; + } + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_corelib_get_dl_link_mode_lr10 +( + nvlink_link *link, + NvU64 *mode +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 link_state; + NvU32 val = 0; + + *mode = NVLINK_LINKSTATE_OFF; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return -NVL_UNBOUND_DEVICE; + } + + // check if links are in reset + if (nvswitch_is_link_in_reset(device, link)) + { + *mode = NVLINK_LINKSTATE_RESET; + return NVL_SUCCESS; + } + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_STATE); + + link_state = DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, val); + + switch (link_state) + { + case NV_NVLDL_TOP_LINK_STATE_STATE_INIT: + *mode = NVLINK_LINKSTATE_OFF; + break; + case NV_NVLDL_TOP_LINK_STATE_STATE_HWCFG: + *mode = NVLINK_LINKSTATE_DETECT; + break; + case NV_NVLDL_TOP_LINK_STATE_STATE_SWCFG: + *mode = NVLINK_LINKSTATE_SAFE; + break; + case NV_NVLDL_TOP_LINK_STATE_STATE_ACTIVE: + *mode = NVLINK_LINKSTATE_HS; + break; + case NV_NVLDL_TOP_LINK_STATE_STATE_FAULT: + *mode = NVLINK_LINKSTATE_FAULT; + break; + case NV_NVLDL_TOP_LINK_STATE_STATE_RCVY_AC: + case NV_NVLDL_TOP_LINK_STATE_STATE_RCVY_RX: + *mode = NVLINK_LINKSTATE_RECOVERY; + break; + default: + *mode = NVLINK_LINKSTATE_OFF; + break; + } + + return NVL_SUCCESS; +} + +void +nvswitch_corelib_get_uphy_load_lr10 +( + nvlink_link *link, + NvBool *bUnlocked +) +{ + *bUnlocked = NV_FALSE; +} + +NvlStatus +nvswitch_corelib_set_tl_link_mode_lr10 +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvlStatus status = NVL_SUCCESS; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return -NVL_UNBOUND_DEVICE; + } + + switch (mode) + { + case NVLINK_LINKSTATE_RESET: + { + // perform TL reset + NVSWITCH_PRINT(device, INFO, + "%s: Performing TL Reset on link %d\n", + __FUNCTION__, link->linkNumber); + + status = nvswitch_request_tl_link_state_lr10(link, + NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET, + flags == NVLINK_STATE_CHANGE_SYNC); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NvLink Reset has failed for link %d\n", + __FUNCTION__, link->linkNumber); + return status; + } + break; + } + + default: + { + NVSWITCH_PRINT(device, ERROR, + "%s : Invalid mode specified.\n", + __FUNCTION__); + break; + } + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_corelib_get_tl_link_mode_lr10 +( + nvlink_link *link, + NvU64 *mode +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + nvswitch_device *device = link->dev->pDevInfo; + NvU32 link_state; + NvU32 val = 0; + + *mode = NVLINK_LINKSTATE_OFF; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return -NVL_UNBOUND_DEVICE; + } + + // check if links are in reset + if (nvswitch_is_link_in_reset(device, link)) + { + *mode = NVLINK_LINKSTATE_RESET; + return NVL_SUCCESS; + } + + // Read state from NVLIPT HW + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK, _CTRL_LINK_STATE_STATUS); + + link_state = DRF_VAL(_NVLIPT_LNK, _CTRL_LINK_STATE_STATUS, _CURRENTLINKSTATE, + val); + + switch(link_state) + { + case NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_ACTIVE: + *mode = NVLINK_LINKSTATE_HS; + break; + + case NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_L2: + *mode = NVLINK_LINKSTATE_SLEEP; + break; + + case NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS_CURRENTLINKSTATE_CONTAIN: + *mode = NVLINK_LINKSTATE_CONTAIN; + break; + + default: + // Currently, only ACTIVE, L2 and CONTAIN states are supported + return NVL_ERR_INVALID_STATE; + break; + } + +#endif + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_corelib_set_tx_mode_lr10 +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 tx_sublink_state; + NvU32 val; + NvlStatus status = NVL_SUCCESS; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return -NVL_UNBOUND_DEVICE; + } + + // check if link is in reset + if (nvswitch_is_link_in_reset(device, link)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d is still in reset, cannot change sub-link state\n", + __FUNCTION__, link->linkNumber); + return -NVL_ERR_INVALID_STATE; + } + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _SLSM_STATUS_TX); + + tx_sublink_state = DRF_VAL(_NVLDL_TX, _SLSM_STATUS_TX, _PRIMARY_STATE, val); + + // Check if Sublink State Machine is ready to accept a sublink change request. + status = nvswitch_poll_sublink_state(device, link); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : SLSM not ready to accept a state change request for(%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return status; + } + + switch (mode) + { + case NVLINK_SUBLINK_STATE_TX_COMMON_MODE: + { + val = _nvswitch_init_dl_pll(link); + if (val != NVL_SUCCESS) + { + return val; + } + + break; + } + + case NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE: + { + // Not applicable for NV IP + break; + } + + case NVLINK_SUBLINK_STATE_TX_DATA_READY: + { + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_INITDLPL, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: INITNVLDL CMD failed for link %d.\n", + __FUNCTION__, link->linkNumber); + NVSWITCH_ASSERT_INFO(NV_ERR_NVLINK_INIT_ERROR, + NVBIT32(link->linkNumber), INITDLPL_ERROR); + return NV_ERR_NVLINK_INIT_ERROR; + } + + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_INITLANEENABLE, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: INITLANEENABLE CMD failed for link %d.\n", + __FUNCTION__, link->linkNumber); + NVSWITCH_ASSERT_INFO(NV_ERR_NVLINK_INIT_ERROR, NVBIT32(link->linkNumber), INITLANEENABLE_ERROR); + return NV_ERR_NVLINK_INIT_ERROR; + } + + break; + } + + case NVLINK_SUBLINK_STATE_TX_PRBS_EN: + { + // Not needed with ALT + break; + } + + case NVLINK_SUBLINK_STATE_TX_POST_HS: + { + // NOP: In general, there is no point to downgrade *_PRBS_* and *_SCRAM_* values. + break; + } + + case NVLINK_SUBLINK_STATE_TX_EQ: + { + //TODO: To be implemented + break; + } + + case NVLINK_SUBLINK_STATE_TX_HS: + { + // Not needed with ALT + break; + } + + case NVLINK_SUBLINK_STATE_TX_SAFE: + { + if (tx_sublink_state == NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_SAFE) + { + NVSWITCH_PRINT(device, INFO, + "%s : TX already in Safe mode for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + break; + } + + NVSWITCH_PRINT(device, INFO, + "%s : Changing TX sublink state to Safe mode for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _SUBLINK_CHANGE); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _NEWSTATE, _SAFE, val); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _SUBLINK, _TX, val); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _ACTION, _SLSM_CHANGE, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _SUBLINK_CHANGE, val); + + status = nvswitch_poll_sublink_state(device, link); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : Error while changing TX sublink to Safe Mode for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return status; + } + + break; + } + + case NVLINK_SUBLINK_STATE_TX_OFF: + { + if (tx_sublink_state == NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_OFF) + { + NVSWITCH_PRINT(device, INFO, + "%s : TX already OFF (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + break; + } + else if (tx_sublink_state == NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_HS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : TX cannot be taken from HS to OFF directly for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return -NVL_ERR_GENERIC; + } + + NVSWITCH_PRINT(device, INFO, + "%s : Changing TX sublink state to OFF for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _SUBLINK_CHANGE); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _COUNTDOWN, _IMMEDIATE, val); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _NEWSTATE, _OFF, val); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _SUBLINK, _TX, val); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _ACTION, _SLSM_CHANGE, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _SUBLINK_CHANGE, val); + + status = nvswitch_poll_sublink_state(device, link); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : Error while changing TX sublink to off Mode for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return status; + } + break; + } + + default: + NVSWITCH_PRINT(device, ERROR, + "%s : Invalid TX sublink mode specified.\n", + __FUNCTION__); + break; + } + + return status; +} + +NvlStatus +nvswitch_corelib_get_tx_mode_lr10 +( + nvlink_link *link, + NvU64 *mode, + NvU32 *subMode +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 tx_sublink_state; + NvU32 data = 0; + + *mode = NVLINK_SUBLINK_STATE_TX_OFF; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return -NVL_UNBOUND_DEVICE; + } + + // check if link is in reset + if (nvswitch_is_link_in_reset(device, link)) + { + *mode = NVLINK_SUBLINK_STATE_TX_OFF; + return NVL_SUCCESS; + } + + data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _SLSM_STATUS_TX); + + tx_sublink_state = DRF_VAL(_NVLDL_TX, _SLSM_STATUS_TX, _PRIMARY_STATE, data); + + // Return NVLINK_SUBLINK_SUBSTATE_TX_STABLE for sub-state + *subMode = NVLINK_SUBLINK_SUBSTATE_TX_STABLE; + + switch (tx_sublink_state) + { + case NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_EIGHTH: + *mode = NVLINK_SUBLINK_STATE_TX_SINGLE_LANE; + break; + + case NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_HS: + *mode = NVLINK_SUBLINK_STATE_TX_HS; + break; + + case NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_TRAIN: + *mode = NVLINK_SUBLINK_STATE_TX_TRAIN; + break; + + case NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_SAFE: + *mode = NVLINK_SUBLINK_STATE_TX_SAFE; + break; + + case NV_NVLDL_TX_SLSM_STATUS_TX_PRIMARY_STATE_OFF: + *mode = NVLINK_SUBLINK_STATE_TX_OFF; + break; + + default: + *mode = NVLINK_SUBLINK_STATE_TX_OFF; + break; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_corelib_set_rx_mode_lr10 +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 rx_sublink_state; + NvU32 val; + NvlStatus status = NVL_SUCCESS; + NvU32 delay_ns; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return -NVL_UNBOUND_DEVICE; + } + + // check if link is in reset + if (nvswitch_is_link_in_reset(device, link)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d is still in reset, cannot change sub-link state\n", + __FUNCTION__, link->linkNumber); + return -NVL_ERR_INVALID_STATE; + } + + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _SLSM_STATUS_RX); + + rx_sublink_state = DRF_VAL(_NVLDL_RX, _SLSM_STATUS_RX, _PRIMARY_STATE, val); + + // Check if Sublink State Machine is ready to accept a sublink change request. + status = nvswitch_poll_sublink_state(device, link); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : SLSM not ready to accept a state change request for(%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return status; + } + + switch (mode) + { + case NVLINK_SUBLINK_STATE_RX_HS: + break; + + case NVLINK_SUBLINK_STATE_RX_SAFE: + break; + + case NVLINK_SUBLINK_STATE_RX_OFF: + { + if (rx_sublink_state == NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_OFF) + { + NVSWITCH_PRINT(device, INFO, + "%s : RX already OFF (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + break; + } + else if (rx_sublink_state == NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_HS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : RX cannot be taken from HS to OFF directly for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + status = -NVL_ERR_GENERIC; + return status; + } + + NVSWITCH_PRINT(device, INFO, + "%s : Changing RX sublink state to OFF for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _SUBLINK_CHANGE); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _COUNTDOWN, _IMMEDIATE, val); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _OLDSTATE_MASK, _DONTCARE, val); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _NEWSTATE, _OFF, val); + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _SUBLINK, _RX, val); + + // When changing RX sublink state use FORCE, otherwise it will fault. + val = FLD_SET_DRF(_NVLDL_TOP, _SUBLINK_CHANGE, _ACTION, _SLSM_FORCE, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _SUBLINK_CHANGE, val); + + NVSWITCH_PRINT(device, INFO, + "%s : NV_NVLDL_TOP_SUBLINK_CHANGE = 0x%08x\n", __FUNCTION__, val); + + status = nvswitch_poll_sublink_state(device, link); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s : Error while changing RX sublink to Off Mode for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return status; + } + break; + } + + case NVLINK_SUBLINK_STATE_RX_RXCAL: + { + // Enable RXCAL in CFG_CTL_6, Delay 200us (bug 2551877), and check CFG_STATUS_0 for RXCAL_DONE=1. + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLPHYCTL_COMMON, _CFG_CTL_6); + val = FLD_SET_DRF(_NVLPHYCTL_COMMON, _CFG_CTL_6, _RXCAL , _ON, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLDL, _NVLPHYCTL_COMMON, _CFG_CTL_6, val); + + if (IS_FMODEL(device) || IS_EMULATION(device) || IS_RTLSIM(device)) + { + delay_ns = NVSWITCH_INTERVAL_1SEC_IN_NS; + } + else + { + delay_ns = 200 * NVSWITCH_INTERVAL_1USEC_IN_NS; + } + + NVSWITCH_NSEC_DELAY(delay_ns); + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLPHYCTL_COMMON, _CFG_STATUS_0); + if (!FLD_TEST_DRF_NUM(_NVLPHYCTL_COMMON, _CFG_STATUS_0, _RXCAL_DONE, 0x1, val)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout while waiting for RXCAL_DONE on link %d.\n", + __FUNCTION__, link->linkNumber); + return -NVL_ERR_GENERIC; + } + break; + } + + case NVLINK_SUBLINK_STATE_RX_INIT_TERM: + { + // Invoke MINION routine to enable RX Termination + status = nvswitch_minion_set_rx_term_lr10(device, link->linkNumber); + + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s : Error while setting RX INIT_TERM for (%s):(%s).\n", + __FUNCTION__, device->name, link->linkName); + return status; + } + + break; + } + + + default: + NVSWITCH_PRINT(device, ERROR, + "%s : Invalid RX sublink mode specified.\n", + __FUNCTION__); + break; + } + + return status; +} + +NvlStatus +nvswitch_corelib_get_rx_mode_lr10 +( + nvlink_link *link, + NvU64 *mode, + NvU32 *subMode +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvU32 rx_sublink_state; + NvU32 data = 0; + + *mode = NVLINK_SUBLINK_STATE_RX_OFF; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return -NVL_UNBOUND_DEVICE; + } + + // check if link is in reset + if (nvswitch_is_link_in_reset(device, link)) + { + *mode = NVLINK_SUBLINK_STATE_RX_OFF; + return NVL_SUCCESS; + } + + data = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _SLSM_STATUS_RX); + + rx_sublink_state = DRF_VAL(_NVLDL_RX, _SLSM_STATUS_RX, _PRIMARY_STATE, data); + + // Return NVLINK_SUBLINK_SUBSTATE_RX_STABLE for sub-state + *subMode = NVLINK_SUBLINK_SUBSTATE_RX_STABLE; + + switch (rx_sublink_state) + { + case NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_HS: + *mode = NVLINK_SUBLINK_STATE_RX_HS; + break; + + case NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_TRAIN: + *mode = NVLINK_SUBLINK_STATE_RX_TRAIN; + break; + + case NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_SAFE: + *mode = NVLINK_SUBLINK_STATE_RX_SAFE; + break; + + case NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_EIGHTH: + *mode = NVLINK_SUBLINK_STATE_RX_SINGLE_LANE; + break; + + case NV_NVLDL_RX_SLSM_STATUS_RX_PRIMARY_STATE_OFF: + *mode = NVLINK_SUBLINK_STATE_RX_OFF; + break; + + default: + *mode = NVLINK_SUBLINK_STATE_RX_OFF; + break; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_corelib_set_rx_detect_lr10 +( + nvlink_link *link, + NvU32 flags +) +{ + NvlStatus status; + nvswitch_device *device = link->dev->pDevInfo; + + status = nvswitch_minion_send_command(device, link->linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_TURING_RXDET, 0); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Set RXDET failed for link %d.\n", + __FUNCTION__, link->linkNumber); + return status; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_corelib_get_rx_detect_lr10 +( + nvlink_link *link +) +{ + NvlStatus status; + nvswitch_device *device = link->dev->pDevInfo; + + status = nvswitch_minion_get_rxdet_status_lr10(device, link->linkNumber); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, WARN, + "%s: Get RXDET failed for link %d.\n", + __FUNCTION__, link->linkNumber); + return status; + } + return NVL_SUCCESS; +} + +void +nvswitch_corelib_training_complete_lr10 +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + + nvswitch_init_dlpl_interrupts(link); + + _nvswitch_configure_reserved_throughput_counters(link); + + if (nvswitch_lib_notify_client_events(device, + NVSWITCH_DEVICE_EVENT_PORT_UP) != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify PORT_UP event\n", + __FUNCTION__); + } + +} + +NvlStatus +nvswitch_wait_for_tl_request_ready_lr10 +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + NvU32 linkRequest; +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + NvU32 linkStatus, linkErr; +#endif + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return -NVL_BAD_ARGS; + } + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS * 400, &timeout); + + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + linkRequest = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, + NVLIPT_LNK , _NVLIPT_LNK , _CTRL_LINK_STATE_REQUEST); + + if (FLD_TEST_DRF_NUM(_NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, _READY, 1, linkRequest)) + { + return NVL_SUCCESS; + } + + nvswitch_os_sleep(1); + } + while(keepPolling); + + // + // NVSWITCH_PRINT is not defined for release builds, + // so this keeps compiler happy + // +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + linkStatus = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, + NVLIPT_LNK , _NVLIPT_LNK , _CTRL_LINK_STATE_STATUS); + linkErr = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, + NVLIPT_LNK , _NVLIPT_LNK , _ERR_STATUS_0); +#endif + + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for TL link state ready on link #%d! " + "NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST = 0x%x, " + "NV_NVLIPT_LNK_CTRL_LINK_STATE_STATUS = 0x%x, " + "NV_NVLIPT_LNK_ERR_STATUS_0 = 0x%x\n", + __FUNCTION__, link->linkNumber, linkRequest, linkStatus, linkErr); + + return -NVL_ERR_GENERIC; +} + +NvlStatus +nvswitch_request_tl_link_state_lr10 +( + nvlink_link *link, + NvU32 tlLinkState, + NvBool bSync +) +{ + nvswitch_device *device = link->dev->pDevInfo; + NvlStatus status = NVL_SUCCESS; + NvU32 linkStatus; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return -NVL_UNBOUND_DEVICE; + } + + // Wait for the TL link state register to report ready + status = nvswitch_wait_for_tl_request_ready_lr10(link); + if (status != NVL_SUCCESS) + { + return status; + } + + // Request RESET state through CTRL_LINK_STATE_REQUEST + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, + NVLIPT_LNK, _NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, + DRF_NUM(_NVLIPT_LNK, _CTRL_LINK_STATE_REQUEST, _REQUEST, tlLinkState)); + + if (bSync) + { + // Wait for the TL link state register to complete + status = nvswitch_wait_for_tl_request_ready_lr10(link); + if (status != NVL_SUCCESS) + { + return status; + } + + // Check for state requested + linkStatus = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, + NVLIPT_LNK , _NVLIPT_LNK , _CTRL_LINK_STATE_STATUS); + + if (DRF_VAL(_NVLIPT_LNK, _CTRL_LINK_STATE_STATUS, _CURRENTLINKSTATE, linkStatus) != + tlLinkState) + { + NVSWITCH_PRINT(device, ERROR, + "%s: TL link state request to state 0x%x for link #%d did not complete!\n", + __FUNCTION__, tlLinkState, link->linkNumber); + return -NVL_ERR_GENERIC; + } + } + + return status; + +} + +void +nvswitch_execute_unilateral_link_shutdown_lr10 +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link->linkNumber); + return; + } + + // + // Perform unilateral shutdown + // This follows "Unilateral variant" from + // NVLink 3.x Shutdown (confluence page ID: 164573291) + // + // Status is explicitly ignored here since we are required to soldier-on + // in this scenario + // + nvswitch_corelib_set_dl_link_mode_lr10(link, NVLINK_LINKSTATE_DISABLE_PM, 0); + nvswitch_corelib_set_dl_link_mode_lr10(link, NVLINK_LINKSTATE_DISABLE_ERR_DETECT, 0); + nvswitch_corelib_set_dl_link_mode_lr10(link, NVLINK_LINKSTATE_LANE_DISABLE, 0); + nvswitch_corelib_set_dl_link_mode_lr10(link, NVLINK_LINKSTATE_OFF, 0); +} + +void +nvswitch_reset_persistent_link_hw_state_lr10 +( + nvswitch_device *device, + NvU32 linkNumber +) +{ + // Not Implemented for LR10 +} + +void +nvswitch_apply_recal_settings_lr10 +( + nvswitch_device *device, + nvlink_link *link +) +{ + // Not supported on LR10 + return; +} + diff --git a/src/common/nvswitch/kernel/lr10/lr10.c b/src/common/nvswitch/kernel/lr10/lr10.c new file mode 100644 index 000000000..daa678dd6 --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/lr10.c @@ -0,0 +1,7066 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "error_nvswitch.h" +#include "regkey_nvswitch.h" +#include "haldef_nvswitch.h" +#include "lr10/lr10.h" +#include "lr10/clock_lr10.h" +#include "lr10/minion_lr10.h" +#include "lr10/soe_lr10.h" +#include "lr10/pmgr_lr10.h" +#include "lr10/therm_lr10.h" +#include "lr10/inforom_lr10.h" +#include "lr10/smbpbi_lr10.h" +#include "flcn/flcnable_nvswitch.h" +#include "soe/soe_nvswitch.h" + +#include "nvswitch/lr10/dev_nvs_top.h" +#include "nvswitch/lr10/dev_pri_ringmaster.h" +#include "nvswitch/lr10/dev_pri_ringstation_sys.h" +#include "nvswitch/lr10/dev_nvlsaw_ip.h" +#include "nvswitch/lr10/dev_nvlsaw_ip_addendum.h" +#include "nvswitch/lr10/dev_nvs_master.h" +#include "nvswitch/lr10/dev_nvltlc_ip.h" +#include "nvswitch/lr10/dev_nvldl_ip.h" +#include "nvswitch/lr10/dev_nvlipt_lnk_ip.h" +#include "nvswitch/lr10/dev_nvlctrl_ip.h" +#include "nvswitch/lr10/dev_npg_ip.h" +#include "nvswitch/lr10/dev_npgperf_ip.h" +#include "nvswitch/lr10/dev_nport_ip.h" +#include "nvswitch/lr10/dev_ingress_ip.h" +#include "nvswitch/lr10/dev_tstate_ip.h" +#include "nvswitch/lr10/dev_egress_ip.h" +#include "nvswitch/lr10/dev_route_ip.h" +#include "nvswitch/lr10/dev_therm.h" +#include "nvswitch/lr10/dev_soe_ip.h" +#include "nvswitch/lr10/dev_route_ip_addendum.h" +#include "nvswitch/lr10/dev_minion_ip.h" +#include "nvswitch/lr10/dev_minion_ip_addendum.h" +#include "nvswitch/lr10/dev_nport_ip_addendum.h" +#include "nvswitch/lr10/dev_nxbar_tile_ip.h" +#include "nvswitch/lr10/dev_nxbar_tc_global_ip.h" +#include "nvswitch/lr10/dev_sourcetrack_ip.h" + +#include "oob/smbpbi.h" + +#define DMA_ADDR_WIDTH_LR10 64 +#define ROUTE_GANG_TABLE_SIZE (1 << DRF_SIZE(NV_ROUTE_REG_TABLE_ADDRESS_INDEX)) + +static void +_nvswitch_deassert_link_resets_lr10 +( + nvswitch_device *device +) +{ + NvU32 val, i; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + + NVSWITCH_PRINT(device, WARN, + "%s: NVSwitch Driver is taking the links out of reset. This should only happen during forced config.\n", + __FUNCTION__); + + for (i = 0; i < NVSWITCH_LINK_COUNT(device); i++) + { + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, i)) continue; + + val = NVSWITCH_LINK_RD32_LR10(device, i, + NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET); + val = FLD_SET_DRF_NUM(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, _LINK_RESET, + NV_NVLIPT_LNK_RESET_RSTSEQ_LINK_RESET_LINK_RESET_DEASSERT, val); + + NVSWITCH_LINK_WR32_LR10(device, i, + NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, val); + } + + for (i = 0; i < NVSWITCH_LINK_COUNT(device); i++) + { + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLIPT_LNK, i)) continue; + + // Poll for _RESET_STATUS == _DEASSERTED + nvswitch_timeout_create(25*NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); + + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + val = NVSWITCH_LINK_RD32_LR10(device, i, + NVLIPT_LNK, _NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET); + if (FLD_TEST_DRF(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, + _LINK_RESET_STATUS, _DEASSERTED, val)) + { + break; + } + + nvswitch_os_sleep(1); + } + while (keepPolling); + + if (!FLD_TEST_DRF(_NVLIPT_LNK, _RESET_RSTSEQ_LINK_RESET, + _LINK_RESET_STATUS, _DEASSERTED, val)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for link %d_LINK_RESET_STATUS == _DEASSERTED\n", + __FUNCTION__, i); + // Bug 2974064: Review this timeout handling (fall through) + } + } +} + +static void +_nvswitch_train_forced_config_link_lr10 +( + nvswitch_device *device, + NvU32 linkId +) +{ + NvU32 data, i; + nvlink_link *link; + + link = nvswitch_get_link(device, linkId); + + if ((link == NULL) || + !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) || + (linkId >= NVSWITCH_NVLINK_MAX_LINKS)) + { + return; + } + + data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST); + data = FLD_SET_DRF(_NVLDL_TOP, _LINK_TEST, _AUTO_HWCFG, _ENABLE, data); + NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST, data); + + // Add some delay to let the sim/emu go to SAFE + NVSWITCH_NSEC_DELAY(400 * NVSWITCH_INTERVAL_1USEC_IN_NS); + + data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST); + data = FLD_SET_DRF(_NVLDL_TOP, _LINK_TEST, _AUTO_NVHS, _ENABLE, data); + NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_TEST, data); + + // Add some delay to let the sim/emu go to HS + NVSWITCH_NSEC_DELAY(400 * NVSWITCH_INTERVAL_1USEC_IN_NS); + + data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE); + data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _NEWSTATE, _ACTIVE, data); + data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _OLDSTATE_MASK, _DONTCARE, data); + data = FLD_SET_DRF(_NVLDL_TOP, _LINK_CHANGE, _ACTION, _LTSSM_CHANGE, data); + NVSWITCH_LINK_WR32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE, data); + + i = 0; + + // Poll until LINK_CHANGE[1:0] != 2b01. + while (i < 5) + { + data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_CHANGE); + + if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_CHANGE, _STATUS, _BUSY, data)) + { + NVSWITCH_PRINT(device, INFO, + "%s : Waiting for link %d to go to ACTIVE\n", + __FUNCTION__, linkId); + } + else if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_CHANGE, _STATUS, _FAULT, data)) + { + NVSWITCH_PRINT(device, ERROR, + "%s : Fault while changing LINK to ACTIVE. Link = %d\n", + __FUNCTION__, linkId); + break; + } + else + { + break; + } + + NVSWITCH_NSEC_DELAY(5 * NVSWITCH_INTERVAL_1USEC_IN_NS); + i++; + } + + data = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_TOP, _LINK_STATE); + + if (FLD_TEST_DRF(_NVLDL_TOP, _LINK_STATE, _STATE, _ACTIVE, data)) + { + NVSWITCH_PRINT(device, INFO, + "%s : Link %d is in ACTIVE state, setting BUFFER_READY\n", + __FUNCTION__, linkId); + + // Set buffer ready only for nvlink TLC and not NPORT + nvswitch_init_buffer_ready(device, link, NV_FALSE); + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s : Timeout while waiting for link %d to go to ACTIVE\n", + __FUNCTION__, linkId); + NVSWITCH_PRINT(device, ERROR, + "%s : Link %d is in 0x%x state\n", + __FUNCTION__, linkId,DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, data)); + } + +} + +void +_nvswitch_setup_chiplib_forced_config_lr10 +( + nvswitch_device *device +) +{ + NvU64 links = ((NvU64)device->regkeys.chiplib_forced_config_link_mask) + + ((NvU64)device->regkeys.chiplib_forced_config_link_mask2 << 32); + NvU32 i; + + if (links == 0) + { + return; + } + + // + // First, take the links out of reset + // + // NOTE: On LR10, MINION will take the links out of reset during INITPHASE1 + // On platforms where MINION is not present and/or we want to run with forced + // config, the driver must de-assert the link reset + // + _nvswitch_deassert_link_resets_lr10(device); + + // Next, train the links to ACTIVE/NVHS + FOR_EACH_INDEX_IN_MASK(64, i, links) + { + if (device->link[i].valid) + { + _nvswitch_train_forced_config_link_lr10(device, i); + } + } + FOR_EACH_INDEX_IN_MASK_END; +} + +static NvU32 +_nvswitch_get_nvlink_linerate +( + nvswitch_device *device, + NvU32 val +) +{ + NvU32 lineRate = 0; + switch (val) + { + case NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_16G: + lineRate = NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_16_00000_GBPS; + break; + case NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_20G: + lineRate = NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_20_00000_GBPS; + break; + case NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_25G: + lineRate = NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_25_00000_GBPS; + break; + case NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_32G: + lineRate = NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_32_00000_GBPS; + break; + case NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_40G: + lineRate = NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_40_00000_GBPS; + break; + case NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_50G: + lineRate = NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_50_00000_GBPS; + break; + case NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_53_12500G: + lineRate = NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_53_12500_GBPS; + break; + default: + NVSWITCH_PRINT(device, SETUP, "%s:ERROR LINE_RATE = 0x%x requested by regkey\n", + __FUNCTION__, lineRate); + lineRate = NV_NVLIPT_LNK_CTRL_SYSTEM_LINK_CLK_CTRL_LINE_RATE_ILLEGAL_LINE_RATE; + } + return lineRate; +} + +static void +_nvswitch_setup_link_system_registers_lr10 +( + nvswitch_device *device, + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config, + nvlink_link *link +) +{ + NvU32 regval, fldval; + NvU32 lineRate = 0; + NVLINK_CONFIG_DATA_LINKENTRY *vbios_link_entry = NULL; + + // + // Identify the valid link entry to update. If not, proceed with the default settings + // + if ((bios_config == NULL) || (bios_config->bit_address == 0)) + { + NVSWITCH_PRINT(device, SETUP, + "%s: No override with VBIOS - VBIOS NvLink configuration table not found\n", + __FUNCTION__); + } + else + { + vbios_link_entry = &bios_config->link_vbios_entry[bios_config->link_base_entry_assigned][link->linkNumber]; + } + + // LINE_RATE SYSTEM register + if (device->regkeys.nvlink_speed_control != NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_DEFAULT) + { + regval = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK_CTRL_SYSTEM_LINK, _CLK_CTRL); + lineRate = _nvswitch_get_nvlink_linerate(device, device->regkeys.nvlink_speed_control); + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CLK_CTRL, + _LINE_RATE, lineRate, regval); + NVSWITCH_PRINT(device, SETUP, "%s: LINE_RATE = 0x%x requested by regkey\n", + __FUNCTION__, lineRate); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK_CTRL_SYSTEM_LINK, _CLK_CTRL, regval); + } + + // TXTRAIN SYSTEM register + regval = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL); + + fldval = DRF_VAL(_SWITCH_REGKEY, _TXTRAIN_CONTROL, _FOM_FORMAT, + device->regkeys.txtrain_control); + if (fldval != NV_SWITCH_REGKEY_TXTRAIN_CONTROL_FOM_FORMAT_NOP) + { + NVSWITCH_PRINT(device, SETUP, "%s: FOM_FORMAT = 0x%x requested by regkey\n", + __FUNCTION__, fldval); + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL, + _TXTRAIN_FOM_FORMAT, fldval, regval); + } + else if (vbios_link_entry != NULL) + { + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _TXTRAIN_FOM_FORMAT, + DRF_VAL(_NVLINK_VBIOS,_PARAM5,_TXTRAIN_FOM_FORMAT, vbios_link_entry->nvLinkparam5), + regval); + } + + fldval = DRF_VAL(_SWITCH_REGKEY, _TXTRAIN_CONTROL, _OPTIMIZATION_ALGORITHM, + device->regkeys.txtrain_control); + if (fldval != NV_SWITCH_REGKEY_TXTRAIN_CONTROL_OPTIMIZATION_ALGORITHM_NOP) + { + NVSWITCH_PRINT(device, SETUP, "%s: OPTIMIZATION_ALGORITHM = 0x%x requested by regkey\n", + __FUNCTION__, fldval); + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL, + _TXTRAIN_OPTIMIZATION_ALGORITHM, fldval, regval); + } + else if (vbios_link_entry != NULL) + { + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _TXTRAIN_OPTIMIZATION_ALGORITHM, + vbios_link_entry->nvLinkparam4, regval); + } + + fldval = DRF_VAL(_SWITCH_REGKEY, _TXTRAIN_CONTROL, _ADJUSTMENT_ALGORITHM, + device->regkeys.txtrain_control); + if (fldval != NV_SWITCH_REGKEY_TXTRAIN_CONTROL_ADJUSTMENT_ALGORITHM_NOP) + { + NVSWITCH_PRINT(device, SETUP, "%s: ADJUSTMENT_ALGORITHM = 0x%x requested by regkey\n", + __FUNCTION__, fldval); + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL, + _TXTRAIN_ADJUSTMENT_ALGORITHM, fldval, regval); + } + else if (vbios_link_entry != NULL) + { + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _TXTRAIN_ADJUSTMENT_ALGORITHM, + DRF_VAL(_NVLINK_VBIOS,_PARAM5,_TXTRAIN_ADJUSTMENT_ALGORITHM, vbios_link_entry->nvLinkparam5), + regval); + } + + fldval = DRF_VAL(_SWITCH_REGKEY, _TXTRAIN_CONTROL, _MINIMUM_TRAIN_TIME_MANTISSA, + device->regkeys.txtrain_control); + if (fldval != NV_SWITCH_REGKEY_TXTRAIN_CONTROL_MINIMUM_TRAIN_TIME_MANTISSA_NOP) + { + NVSWITCH_PRINT(device, SETUP, "%s: MINIMUM_TRAIN_TIME_MANTISSA = 0x%x requested by regkey\n", + __FUNCTION__, fldval); + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL, + _TXTRAIN_MINIMUM_TRAIN_TIME_MANTISSA, fldval, regval); + } + else if (vbios_link_entry != NULL) + { + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _TXTRAIN_MINIMUM_TRAIN_TIME_MANTISSA, + DRF_VAL(_NVLINK_VBIOS,_PARAM6,_TXTRAIN_MINIMUM_TRAIN_TIME_MANTISSA, vbios_link_entry->nvLinkparam6), + regval); + } + else + { + // + // Default working configuration for LR10 + // This will be provided by VBIOS once support available (bug 2767390) + // + NVSWITCH_PRINT(device, SETUP, "%s: MINIMUM_TRAIN_TIME_MANTISSA = 0x5 forced by driver\n", + __FUNCTION__); + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL, + _TXTRAIN_MINIMUM_TRAIN_TIME_MANTISSA, 0x5, regval); + } + + fldval = DRF_VAL(_SWITCH_REGKEY, _TXTRAIN_CONTROL, _MINIMUM_TRAIN_TIME_EXPONENT, + device->regkeys.txtrain_control); + if (fldval != NV_SWITCH_REGKEY_TXTRAIN_CONTROL_MINIMUM_TRAIN_TIME_EXPONENT_NOP) + { + NVSWITCH_PRINT(device, SETUP, "%s: MINIMUM_TRAIN_TIME_EXPONENT = 0x%x requested by regkey\n", + __FUNCTION__, fldval); + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL, + _TXTRAIN_MINIMUM_TRAIN_TIME_EXPONENT, fldval, regval); + } + else if (vbios_link_entry != NULL) + { + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _TXTRAIN_MINIMUM_TRAIN_TIME_EXPONENT, + DRF_VAL(_NVLINK_VBIOS,_PARAM6,_TXTRAIN_MINIMUM_TRAIN_TIME_EXPONENT, vbios_link_entry->nvLinkparam6), + regval); + } + else + { + // + // Default working configuration for LR10 + // This will be provided by VBIOS once support available (bug 2767390) + // + NVSWITCH_PRINT(device, SETUP, "%s: MINIMUM_TRAIN_TIME_EXPONENT = 0x4 forced by driver\n", + __FUNCTION__); + regval = FLD_SET_DRF_NUM(_NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL, + _TXTRAIN_MINIMUM_TRAIN_TIME_EXPONENT, 0x4, regval); + } + + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK_CTRL_SYSTEM_LINK, _CHANNEL_CTRL, regval); + + // Disable L2 (Bug 3176196) + regval = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, _CTRL_SYSTEM_LINK_AN1_CTRL); + regval = FLD_SET_DRF(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_AN1_CTRL, _PWRM_L2_ENABLE, _DISABLE, regval); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK, _NVLIPT_LNK, _CTRL_SYSTEM_LINK_AN1_CTRL, regval); + + // SW WAR: Bug 3364420 + nvswitch_apply_recal_settings(device, link); +} + +/*! + * @brief Parse packed little endian data and unpack into padded structure + * + * @param[in] format Data format + * @param[in] packedData Packed little endian data + * @param[out] unpackedData Unpacked padded structure + * @param[out] unpackedSize Unpacked data size + * @param[out] fieldsCount Number of fields + * + * @return 'NV_OK' + */ +NV_STATUS +_nvswitch_devinit_unpack_structure +( + const char *format, + const NvU8 *packedData, + NvU32 *unpackedData, + NvU32 *unpackedSize, + NvU32 *fieldsCount +) +{ + NvU32 unpkdSize = 0; + NvU32 fields = 0; + NvU32 count; + NvU32 data; + char fmt; + + while ((fmt = *format++)) + { + count = 0; + while ((fmt >= '0') && (fmt <= '9')) + { + count *= 10; + count += fmt - '0'; + fmt = *format++; + } + if (count == 0) + count = 1; + + while (count--) + { + switch (fmt) + { + case 'b': + data = *packedData++; + unpkdSize += 1; + break; + + case 's': // signed byte + data = *packedData++; + if (data & 0x80) + data |= ~0xff; + unpkdSize += 1; + break; + + case 'w': + data = *packedData++; + data |= *packedData++ << 8; + unpkdSize += 2; + break; + + case 'd': + data = *packedData++; + data |= *packedData++ << 8; + data |= *packedData++ << 16; + data |= *packedData++ << 24; + unpkdSize += 4; + break; + + default: + return NV_ERR_GENERIC; + } + *unpackedData++ = data; + fields++; + } + } + + if (unpackedSize != NULL) + *unpackedSize = unpkdSize; + + if (fieldsCount != NULL) + *fieldsCount = fields; + + return NV_OK; +} + +/*! + * @brief Calculate packed and unpacked data size based on given data format + * + * @param[in] format Data format + * @param[out] packedSize Packed data size + * @param[out] unpackedSize Unpacked data size + * + */ +void +_nvswitch_devinit_calculate_sizes +( + const char *format, + NvU32 *packedSize, + NvU32 *unpackedSize +) +{ + NvU32 unpkdSize = 0; + NvU32 pkdSize = 0; + NvU32 count; + char fmt; + + while ((fmt = *format++)) + { + count = 0; + while ((fmt >= '0') && (fmt <= '9')) + { + count *= 10; + count += fmt - '0'; + fmt = *format++; + } + if (count == 0) + count = 1; + + switch (fmt) + { + case 'b': + pkdSize += count * 1; + unpkdSize += count * sizeof(bios_U008); + break; + + case 's': // signed byte + pkdSize += count * 1; + unpkdSize += count * sizeof(bios_S008); + break; + + case 'w': + pkdSize += count * 2; + unpkdSize += count * sizeof(bios_U016); + break; + + case 'd': + pkdSize += count * 4; + unpkdSize += count * sizeof(bios_U032); + break; + } + } + + if (packedSize != NULL) + *packedSize = pkdSize; + + if (unpackedSize != NULL) + *unpackedSize = unpkdSize; +} + +/*! + * @brief Calculate packed and unpacked data size based on given data format + * + * @param[in] format Data format + * @param[out] packedSize Packed data size + * @param[out] unpackedSize Unpacked data size + * + */ + +NV_STATUS +_nvswitch_vbios_read_structure +( + nvswitch_device *device, + void *structure, + NvU32 offset, + NvU32 *ppacked_size, + const char *format +) +{ + NvU32 packed_size; + NvU8 *packed_data; + NvU32 unpacked_bytes; + + // calculate the size of the data as indicated by its packed format. + _nvswitch_devinit_calculate_sizes(format, &packed_size, &unpacked_bytes); + + if (ppacked_size) + *ppacked_size = packed_size; + + // + // is 'offset' too big? + // happens when we read bad ptrs from fixed addrs in image frequently + // + if ((offset + packed_size) > device->biosImage.size) + { + NVSWITCH_PRINT(device, ERROR, "%s: Bad offset in bios read: 0x%x, max is 0x%x, fmt is '%s'\n", + __FUNCTION__, offset, device->biosImage.size, format); + return NV_ERR_GENERIC; + } + + packed_data = &device->biosImage.pImage[offset]; + return _nvswitch_devinit_unpack_structure(format, packed_data, structure, + &unpacked_bytes, NULL); +} + +NvU8 +_nvswitch_vbios_read8 +( + nvswitch_device *device, + NvU32 offset +) +{ + bios_U008 data; // BiosReadStructure expects 'bios' types + + _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "b"); + + return (NvU8) data; +} + +NvU16 +_nvswitch_vbios_read16 +( + nvswitch_device *device, + NvU32 offset +) +{ + bios_U016 data; // BiosReadStructure expects 'bios' types + + _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "w"); + + return (NvU16) data; +} + + +NvU32 +_nvswitch_vbios_read32 +( + nvswitch_device *device, + NvU32 offset +) +{ + bios_U032 data; // BiosReadStructure expects 'bios' types + + _nvswitch_vbios_read_structure(device, &data, offset, (NvU32 *) 0, "d"); + + return (NvU32) data; +} + +NV_STATUS +_nvswitch_perform_BIT_offset_update +( + nvswitch_device *device, + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config +) +{ + BIT_HEADER_V1_00 bitHeader; + BIT_TOKEN_V1_00 bitToken; + NV_STATUS rmStatus; + NvU32 dataPointerOffset; + NvU32 i; + + rmStatus = _nvswitch_vbios_read_structure(device, + (NvU8*) &bitHeader, + bios_config->bit_address, + (NvU32 *) 0, + BIT_HEADER_V1_00_FMT); + + if(rmStatus != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to read BIT table structure!.\n", + __FUNCTION__); + return rmStatus; + } + + for(i=0; i < bitHeader.TokenEntries; i++) + { + NvU32 BitTokenLocation = bios_config->bit_address + bitHeader.HeaderSize + (i * bitHeader.TokenSize); + rmStatus = _nvswitch_vbios_read_structure(device, + (NvU8*) &bitToken, + BitTokenLocation, + (NvU32 *) 0, + BIT_TOKEN_V1_00_FMT); + if(rmStatus != NV_OK) + { + NVSWITCH_PRINT(device, WARN, + "%s: Failed to read BIT token %d!\n", + __FUNCTION__, i); + return NV_ERR_GENERIC; + } + + dataPointerOffset = (bios_config->pci_image_address + bitToken.DataPtr); + switch(bitToken.TokenId) + { + case BIT_TOKEN_NVINIT_PTRS: + { + BIT_DATA_NVINIT_PTRS_V1 nvInitTablePtrs; + rmStatus = _nvswitch_vbios_read_structure(device, + (NvU8*) &nvInitTablePtrs, + dataPointerOffset, + (NvU32 *) 0, + BIT_DATA_NVINIT_PTRS_V1_30_FMT); + if (rmStatus != NV_OK) + { + NVSWITCH_PRINT(device, WARN, + "%s: Failed to read internal data structure\n", + __FUNCTION__); + return NV_ERR_GENERIC; + } + // Update the retrived info with device info + bios_config->nvlink_config_table_address = (nvInitTablePtrs.NvlinkConfigDataPtr + bios_config->pci_image_address); + } + break; + } + } + + return NV_OK; +} + +NV_STATUS +_nvswitch_validate_BIT_header +( + nvswitch_device *device, + NvU32 bit_address +) +{ + NvU32 headerSize = 0; + NvU32 chkSum = 0; + NvU32 i; + + // + // For now let's assume the Header Size is always at the same place. + // We can create something more complex if needed later. + // + headerSize = (NvU32)_nvswitch_vbios_read8(device, bit_address + BIT_HEADER_SIZE_OFFSET); + + // Now perform checksum + for (i = 0; i < headerSize; i++) + chkSum += (NvU32)_nvswitch_vbios_read8(device, bit_address + i); + + //Byte checksum removes upper bytes + chkSum = chkSum & 0xFF; + + if (chkSum) + return NV_ERR_GENERIC; + + return NV_OK; +} + + +NV_STATUS +nvswitch_verify_header +( + nvswitch_device *device, + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config +) +{ + NvU32 i; + NV_STATUS status = NV_ERR_GENERIC; + + if ((bios_config == NULL) || (!bios_config->pci_image_address)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: PCI Image offset is not identified\n", + __FUNCTION__); + return status; + } + + // attempt to find the init info in the BIOS + for (i = bios_config->pci_image_address; i < device->biosImage.size - 3; i++) + { + NvU16 bitheaderID = _nvswitch_vbios_read16(device, i); + if (bitheaderID == BIT_HEADER_ID) + { + NvU32 signature = _nvswitch_vbios_read32(device, i + 2); + if (signature == BIT_HEADER_SIGNATURE) + { + bios_config->bit_address = i; + + // Checksum BIT to prove accuracy + if (NV_OK != _nvswitch_validate_BIT_header(device, bios_config->bit_address)) + { + device->biosImage.pImage = 0; + device->biosImage.size = 0; + } + } + } + // only if we find the bit address do we break + if (bios_config->bit_address) + break; + } + if (bios_config->bit_address) + { + status = NV_OK; + } + + return status; +} + +NV_STATUS +_nvswitch_vbios_update_bit_Offset +( + nvswitch_device *device, + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config +) +{ + NV_STATUS status = NV_OK; + + if (bios_config->bit_address) + { + goto vbios_update_bit_Offset_done; + } + + status = nvswitch_verify_header(device, bios_config); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: *** BIT header is not found in vbios!\n", + __FUNCTION__); + goto vbios_update_bit_Offset_done; + } + + if (bios_config->bit_address) + { + + status = _nvswitch_perform_BIT_offset_update(device, bios_config); + if (status != NV_OK) + goto vbios_update_bit_Offset_done; + } + +vbios_update_bit_Offset_done: + return status; +} + + +NV_STATUS +_nvswitch_vbios_identify_pci_image_loc +( + nvswitch_device *device, + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + if (bios_config->pci_image_address) + { + goto vbios_identify_pci_image_loc_done; + } + + // Match the PCI_EXP_ROM_SIGNATURE and followed by the PCI Data structure + // with PCIR and matching vendor ID + NVSWITCH_PRINT(device, SETUP, + "%s: Verifying and extracting PCI Data.\n", + __FUNCTION__); + + // attempt to find the init info in the BIOS + for (i = 0; i < (device->biosImage.size - PCI_ROM_HEADER_PCI_DATA_SIZE); i++) + { + NvU16 pci_rom_sigature = _nvswitch_vbios_read16(device, i); + + if (pci_rom_sigature == PCI_EXP_ROM_SIGNATURE) + { + NvU32 pcir_data_dffSet = _nvswitch_vbios_read16(device, i + PCI_ROM_HEADER_SIZE); // 0x16 -> 0x18 i.e, including the ROM Signature bytes + + if (((i + pcir_data_dffSet) + PCI_DATA_STRUCT_SIZE) < device->biosImage.size) + { + NvU32 pcirSigature = _nvswitch_vbios_read32(device, (i + pcir_data_dffSet)); + + if (pcirSigature == PCI_DATA_STRUCT_SIGNATURE) + { + PCI_DATA_STRUCT pciData; + status = _nvswitch_vbios_read_structure(device, + (NvU8*) &pciData, + i + pcir_data_dffSet, + (NvU32 *) 0, + PCI_DATA_STRUCT_FMT); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, WARN, + "%s: Failed to PCI Data for validation\n", + __FUNCTION__); + goto vbios_identify_pci_image_loc_done; + } + + // Validate the vendor details as well + if (pciData.vendorID == PCI_VENDOR_ID_NVIDIA) + { + bios_config->pci_image_address = i; + break; + } + } + } + } + } + +vbios_identify_pci_image_loc_done: + return status; +} + +NvU32 _nvswitch_get_nvlink_config_address +( + nvswitch_device *device, + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config +) +{ + return bios_config->nvlink_config_table_address; +} + +NV_STATUS +_nvswitch_read_vbios_link_base_entry +( + nvswitch_device *device, + NvU32 tblPtr, + NVLINK_CONFIG_DATA_BASEENTRY *link_base_entry +) +{ + NV_STATUS status = NV_ERR_INVALID_PARAMETER; + NVLINK_VBIOS_CONFIG_DATA_BASEENTRY vbios_link_base_entry; + + status = _nvswitch_vbios_read_structure(device, &vbios_link_base_entry, tblPtr, (NvU32 *)0, NVLINK_CONFIG_DATA_BASEENTRY_FMT); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Error on reading nvlink base entry\n", + __FUNCTION__); + return status; + } + + link_base_entry->positionId = vbios_link_base_entry.positionId; + + return status; +} + +NV_STATUS +_nvswitch_read_vbios_link_entries +( + nvswitch_device *device, + NvU32 tblPtr, + NvU32 expected_link_entriesCount, + NVLINK_CONFIG_DATA_LINKENTRY *link_entries, + NvU32 *identified_link_entriesCount +) +{ + NV_STATUS status = NV_ERR_INVALID_PARAMETER; + NvU32 i; + NVLINK_VBIOS_CONFIG_DATA_LINKENTRY vbios_link_entry; + *identified_link_entriesCount = 0; + + for (i = 0; i < expected_link_entriesCount; i++) + { + status = _nvswitch_vbios_read_structure(device, + &vbios_link_entry, + tblPtr, (NvU32 *)0, + NVLINK_CONFIG_DATA_LINKENTRY_FMT); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Error on reading nvlink entry\n", + __FUNCTION__); + return status; + } + link_entries[i].nvLinkparam0 = (NvU8)vbios_link_entry.nvLinkparam0; + link_entries[i].nvLinkparam1 = (NvU8)vbios_link_entry.nvLinkparam1; + link_entries[i].nvLinkparam2 = (NvU8)vbios_link_entry.nvLinkparam2; + link_entries[i].nvLinkparam3 = (NvU8)vbios_link_entry.nvLinkparam3; + link_entries[i].nvLinkparam4 = (NvU8)vbios_link_entry.nvLinkparam4; + link_entries[i].nvLinkparam5 = (NvU8)vbios_link_entry.nvLinkparam5; + link_entries[i].nvLinkparam6 = (NvU8)vbios_link_entry.nvLinkparam6; + tblPtr += sizeof(NVLINK_CONFIG_DATA_LINKENTRY); + + NVSWITCH_PRINT(device, SETUP, + "<<<---- NvLink ID 0x%x ---->>>\n", i); + NVSWITCH_PRINT(device, SETUP, + "NVLink Params 0 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam0, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam0)); + NVSWITCH_PRINT(device, SETUP, + "NVLink Params 1 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam1, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam1)); + NVSWITCH_PRINT(device, SETUP, + "NVLink Params 2 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam2, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam2)); + NVSWITCH_PRINT(device, SETUP, + "NVLink Params 3 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam3, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam3)); + NVSWITCH_PRINT(device, SETUP, + "NVLink Params 4 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam4, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam4)); + NVSWITCH_PRINT(device, SETUP, + "NVLink Params 5 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam5, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam5)); + NVSWITCH_PRINT(device, SETUP, + "NVLink Params 6 \t0x%x \tBinary:"BYTE_TO_BINARY_PATTERN"\n", vbios_link_entry.nvLinkparam6, BYTE_TO_BINARY(vbios_link_entry.nvLinkparam6)); + NVSWITCH_PRINT(device, SETUP, + "<<<---- NvLink ID 0x%x ---->>>\n\n", i); + } + *identified_link_entriesCount = i; + return status; +} + +NV_STATUS +_nvswitch_vbios_fetch_nvlink_entries +( + nvswitch_device *device, + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config +) +{ + NvU32 tblPtr; + NvU8 version; + NvU8 size; + NV_STATUS status = NV_ERR_GENERIC; + NVLINK_CONFIG_DATA_HEADER header; + NvU32 base_entry_index; + NvU32 expected_base_entry_count; + + tblPtr = _nvswitch_get_nvlink_config_address(device, bios_config); + if (!tblPtr) + { + NVSWITCH_PRINT(device, ERROR, + "%s: No NvLink Config table set\n", + __FUNCTION__); + goto vbios_fetch_nvlink_entries_done; + } + + // Read the table version number + version = _nvswitch_vbios_read8(device, tblPtr); + switch (version) + { + case NVLINK_CONFIG_DATA_HEADER_VER_20: + size = _nvswitch_vbios_read8(device, tblPtr + 1); + if (size == NVLINK_CONFIG_DATA_HEADER_20_SIZE) + { + // Grab Nvlink Config Data Header + status = _nvswitch_vbios_read_structure(device, &header.ver_20, tblPtr, (NvU32 *) 0, NVLINK_CONFIG_DATA_HEADER_20_FMT); + + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Error on reading the nvlink config header\n", + __FUNCTION__); + } + } + break; + default: + NVSWITCH_PRINT(device, ERROR, + "%s: Invalid version 0x%x\n", + __FUNCTION__, version); + } + if (status != NV_OK) + { + goto vbios_fetch_nvlink_entries_done; + } + + NVSWITCH_PRINT(device, SETUP, + "<<<---- NvLink Header ---->>>\n\n"); + NVSWITCH_PRINT(device, SETUP, + "Version \t\t 0x%x\n", header.ver_20.Version); + NVSWITCH_PRINT(device, SETUP, + "Header Size \t0x%x\n", header.ver_20.HeaderSize); + NVSWITCH_PRINT(device, SETUP, + "Base Entry Size \t0x%x\n", header.ver_20.BaseEntrySize); + NVSWITCH_PRINT(device, SETUP, + "Base Entry count \t0x%x\n", header.ver_20.BaseEntryCount); + NVSWITCH_PRINT(device, SETUP, + "Link Entry Size \t0x%x\n", header.ver_20.LinkEntrySize); + NVSWITCH_PRINT(device, SETUP, + "Link Entry Count \t0x%x\n", header.ver_20.LinkEntryCount); + NVSWITCH_PRINT(device, SETUP, + "Reserved \t0x%x\n", header.ver_20.Reserved); + NVSWITCH_PRINT(device, SETUP, + "<<<---- NvLink Header ---->>>\n"); + + expected_base_entry_count = header.ver_20.BaseEntryCount; + if (expected_base_entry_count > NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY) + { + NVSWITCH_PRINT(device, WARN, + "%s: Greater than expected base entry count 0x%x - Restricting to count 0x%x\n", + __FUNCTION__, expected_base_entry_count, NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY); + expected_base_entry_count = NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY; + } + + tblPtr += header.ver_20.HeaderSize; + for (base_entry_index = 0; base_entry_index < expected_base_entry_count; base_entry_index++) + { + NvU32 expected_link_entriesCount = header.ver_20.LinkEntryCount; + if (expected_link_entriesCount > NVSWITCH_LINK_COUNT(device)) + { + NVSWITCH_PRINT(device, WARN, + "%s: Greater than expected link count 0x%x - Restricting to count 0x%x\n", + __FUNCTION__, expected_link_entriesCount, NVSWITCH_LINK_COUNT(device)); + expected_link_entriesCount = NVSWITCH_LINK_COUNT(device); + } + + // Grab Nvlink Config Data Base Entry + _nvswitch_read_vbios_link_base_entry(device, tblPtr, &bios_config->link_vbios_base_entry[base_entry_index]); + tblPtr += header.ver_20.BaseEntrySize; + + _nvswitch_read_vbios_link_entries(device, + tblPtr, + expected_link_entriesCount, + bios_config->link_vbios_entry[base_entry_index], + &bios_config->identified_Link_entries[base_entry_index]); + tblPtr += (expected_link_entriesCount * sizeof(NVLINK_CONFIG_DATA_LINKENTRY)); + } +vbios_fetch_nvlink_entries_done: + return status; +} + +NV_STATUS +_nvswitch_vbios_assign_base_entry +( + nvswitch_device *device, + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config +) +{ + NvU32 physical_id; + NvU32 entry_index; + + physical_id = nvswitch_read_physical_id(device); + + for (entry_index = 0; entry_index < NVSWITCH_NUM_BIOS_NVLINK_CONFIG_BASE_ENTRY; entry_index++) + { + if (physical_id == bios_config->link_vbios_base_entry[entry_index].positionId) + { + bios_config->link_base_entry_assigned = entry_index; + return NV_OK; + } + } + + // TODO: Bug 3507948 + NVSWITCH_PRINT(device, ERROR, + "%s: Error on assigning base entry. Setting base entry index = 0\n", + __FUNCTION__); + bios_config->link_base_entry_assigned = 0; + + return NV_OK; +} + +NV_STATUS +_nvswitch_setup_link_vbios_overrides +( + nvswitch_device *device, + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config +) +{ + NV_STATUS status = NV_OK; + + if (bios_config == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: BIOS config override not supported\n", + __FUNCTION__); + return -NVL_ERR_NOT_SUPPORTED; + } + + bios_config->vbios_disabled_link_mask = 0; + + bios_config->bit_address = 0; + bios_config->pci_image_address = 0; + bios_config->nvlink_config_table_address = 0; + + if ((device->biosImage.size == 0) || (device->biosImage.pImage == NULL)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: VBIOS not exist size:0x%x\n", + __FUNCTION__, device->biosImage.size); + return -NVL_ERR_NOT_SUPPORTED; + } + + // + // Locate the PCI ROM Image + // + if (_nvswitch_vbios_identify_pci_image_loc(device, bios_config) != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Error on identifying pci image loc\n", + __FUNCTION__); + status = NV_ERR_GENERIC; + goto setup_link_vbios_overrides_done; + } + + // + // Locate and fetch BIT offset + // + if (_nvswitch_vbios_update_bit_Offset(device, bios_config) != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Error on identifying pci image loc\n", + __FUNCTION__); + status = NV_ERR_GENERIC; + goto setup_link_vbios_overrides_done; + } + + // + // Fetch NvLink Entries + // + if (_nvswitch_vbios_fetch_nvlink_entries(device, bios_config) != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Error on identifying pci image loc\n", + __FUNCTION__); + status = NV_ERR_GENERIC; + goto setup_link_vbios_overrides_done; + } + + // + // Assign Base Entry for this device + // + if (_nvswitch_vbios_assign_base_entry(device, bios_config) != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Error on assigning base entry\n", + __FUNCTION__); + status = NV_ERR_GENERIC; + goto setup_link_vbios_overrides_done; + } + +setup_link_vbios_overrides_done: + if (status != NV_OK) + { + bios_config->bit_address = 0; + bios_config->pci_image_address = 0; + bios_config->nvlink_config_table_address =0; + } + return status; +} + +static void +_nvswitch_load_link_disable_settings_lr10 +( + nvswitch_device *device, + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config, + nvlink_link *link +) +{ + NvU32 val; + NVLINK_CONFIG_DATA_LINKENTRY *vbios_link_entry = NULL; + + // SW CTRL - clear out LINK_DISABLE on driver load + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, + NVLIPT_LNK, _NVLIPT_LNK, _CTRL_SW_LINK_MODE_CTRL); + val = FLD_SET_DRF(_NVLIPT_LNK, _CTRL_SW_LINK_MODE_CTRL, _LINK_DISABLE, + _ENABLED, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, + NVLIPT_LNK, _NVLIPT_LNK, _CTRL_SW_LINK_MODE_CTRL, val); + + // + // SYSTEM CTRL + // If the SYSTEM_CTRL setting had been overidden by another entity, + // it should also be locked, so this write would not take effect. + // + if (bios_config != NULL) + { + vbios_link_entry = &bios_config->link_vbios_entry[bios_config->link_base_entry_assigned][link->linkNumber]; + } + + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, + NVLIPT_LNK, _NVLIPT_LNK, _CTRL_SYSTEM_LINK_MODE_CTRL); + + if ((vbios_link_entry != NULL) && + (FLD_TEST_DRF(_NVLINK_VBIOS,_PARAM0, _LINK, _DISABLE, vbios_link_entry->nvLinkparam0))) + { + if (!nvswitch_is_link_in_reset(device, link)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d is not in reset, cannot set LINK_DISABLE\n", + __FUNCTION__, link->linkNumber); + return; + } + val = FLD_SET_DRF(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_MODE_CTRL, _LINK_DISABLE, + _DISABLED, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, + NVLIPT_LNK, _NVLIPT_LNK, _CTRL_SYSTEM_LINK_MODE_CTRL, val); + + // Set link to invalid and unregister from corelib + device->link[link->linkNumber].valid = NV_FALSE; + nvlink_lib_unregister_link(link); + nvswitch_destroy_link(link); + + return; + } + else + { + val = FLD_SET_DRF(_NVLIPT_LNK, _CTRL_SYSTEM_LINK_MODE_CTRL, _LINK_DISABLE, + _ENABLED, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, + NVLIPT_LNK, _NVLIPT_LNK, _CTRL_SYSTEM_LINK_MODE_CTRL, val); + } +} + +/* + * @Brief : Setting up system registers after device initialization + * + * @Description : + * + * @param[in] device a reference to the device to initialize + */ +NvlStatus +nvswitch_setup_link_system_registers_lr10 +( + nvswitch_device *device +) +{ + nvlink_link *link; + NvU8 i; + NvU32 val; + NvU64 enabledLinkMask; + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config; + + bios_config = nvswitch_get_bios_nvlink_config(device); + if ((bios_config == NULL) || (bios_config->bit_address == 0)) + { + NVSWITCH_PRINT(device, WARN, + "%s: VBIOS NvLink configuration table not found\n", + __FUNCTION__); + } + + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + + FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask) + { + NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device)); + + link = nvswitch_get_link(device, i); + + if ((link == NULL) || + !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) || + (i >= NVSWITCH_NVLINK_MAX_LINKS)) + { + continue; + } + + // AC vs DC mode SYSTEM register + if (link->ac_coupled) + { + // + // In NVL3.0, ACMODE is handled by MINION in the INITPHASE1 command + // Here we just setup the register with the proper info + // + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL); + val = FLD_SET_DRF(_NVLIPT_LNK, + _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _AC_DC_MODE, _AC, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, val); + } + + _nvswitch_setup_link_system_registers_lr10(device, bios_config, link); + _nvswitch_load_link_disable_settings_lr10(device, bios_config, link); + } + FOR_EACH_INDEX_IN_MASK_END; + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_deassert_link_reset_lr10 +( + nvswitch_device *device, + nvlink_link *link +) +{ + NvU64 mode; + NvlStatus status = NVL_SUCCESS; + + status = device->hal.nvswitch_corelib_get_dl_link_mode(link, &mode); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s:DL link mode failed on link %d\n", + __FUNCTION__, link->linkNumber); + return status; + } + + // Check if the link is RESET + if (mode != NVLINK_LINKSTATE_RESET) + { + return NVL_SUCCESS; + } + + // Send INITPHASE1 to bring link out of reset + status = link->link_handlers->set_dl_link_mode(link, + NVLINK_LINKSTATE_INITPHASE1, + NVLINK_STATE_CHANGE_ASYNC); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: INITPHASE1 failed on link %d\n", + __FUNCTION__, link->linkNumber); + } + + return status; +} + +static NvU32 +_nvswitch_get_num_vcs_lr10 +( + nvswitch_device *device +) +{ + return NVSWITCH_NUM_VCS_LR10; +} + +void +nvswitch_determine_platform_lr10 +( + nvswitch_device *device +) +{ + NvU32 value; + + // + // Determine which model we are using SMC_BOOT_2 and OS query + // + value = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_2); + device->is_emulation = FLD_TEST_DRF(_PSMC, _BOOT_2, _EMULATION, _YES, value); + + if (!IS_EMULATION(device)) + { + // If we are not on fmodel, we must be on RTL sim or silicon + if (FLD_TEST_DRF(_PSMC, _BOOT_2, _FMODEL, _YES, value)) + { + device->is_fmodel = NV_TRUE; + } + else + { + device->is_rtlsim = NV_TRUE; + + // Let OS code finalize RTL sim vs silicon setting + nvswitch_os_override_platform(device->os_handle, &device->is_rtlsim); + } + } + +#if defined(NVLINK_PRINT_ENABLED) + { + const char *build; + const char *mode; + + build = "HW"; + if (IS_FMODEL(device)) + mode = "fmodel"; + else if (IS_RTLSIM(device)) + mode = "rtlsim"; + else if (IS_EMULATION(device)) + mode = "emulation"; + else + mode = "silicon"; + + NVSWITCH_PRINT(device, SETUP, + "%s: build: %s platform: %s\n", + __FUNCTION__, build, mode); + } +#endif // NVLINK_PRINT_ENABLED +} + +static void +_nvswitch_portstat_reset_latency_counters +( + nvswitch_device *device +) +{ + // Set SNAPONDEMAND from 0->1 to reset the counters + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL, + DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) | + DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _ENABLE)); + + // Set SNAPONDEMAND back to 0. + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL, + DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) | + DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE)); +} + +// +// Data collector which runs on a background thread, collecting latency stats. +// +// The latency counters have a maximum window period of 3.299 seconds +// (2^32 clk cycles). The counters reset after this period. So SW snaps +// the bins and records latencies every 3 seconds. Setting SNAPONDEMAND from 0->1 +// snaps the latency counters and updates them to PRI registers for +// the SW to read. It then resets the counters to start collecting fresh latencies. +// + +void +nvswitch_internal_latency_bin_log_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 idx_nport; + NvU32 idx_vc; + NvBool vc_valid; + NvU32 latency; + NvU64 time_nsec; + NvU32 link_type; // Access or trunk link + NvU64 last_visited_time_nsec; + + if (chip_device->latency_stats == NULL) + { + // Latency stat buffers not allocated yet + return; + } + + time_nsec = nvswitch_os_get_platform_time(); + last_visited_time_nsec = chip_device->latency_stats->last_visited_time_nsec; + + // Update last visited time + chip_device->latency_stats->last_visited_time_nsec = time_nsec; + + // Compare time stamp and reset the counters if the snap is missed + if (!IS_RTLSIM(device) || !IS_FMODEL(device)) + { + if ((last_visited_time_nsec != 0) && + ((time_nsec - last_visited_time_nsec) > 3 * NVSWITCH_INTERVAL_1SEC_IN_NS)) + { + NVSWITCH_PRINT(device, ERROR, + "Latency metrics recording interval missed. Resetting counters.\n"); + _nvswitch_portstat_reset_latency_counters(device); + return; + } + } + + for (idx_nport=0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++) + { + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, idx_nport)) + { + continue; + } + + // Setting SNAPONDEMAND from 0->1 snaps the latencies and resets the counters + NVSWITCH_LINK_WR32_LR10(device, idx_nport, NPORT, _NPORT, _PORTSTAT_SNAP_CONTROL, + DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) | + DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _ENABLE)); + + // + // TODO: Check _STARTCOUNTER and don't log if counter not enabled. + // Currently all counters are always enabled + // + + link_type = NVSWITCH_LINK_RD32_LR10(device, idx_nport, NPORT, _NPORT, _CTRL); + for (idx_vc = 0; idx_vc < NVSWITCH_NUM_VCS_LR10; idx_vc++) + { + vc_valid = NV_FALSE; + + // VC's CREQ0(0) and RSP0(5) are relevant on access links. + if (FLD_TEST_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _ACCESSLINK, link_type) && + ((idx_vc == NV_NPORT_VC_MAPPING_CREQ0) || + (idx_vc == NV_NPORT_VC_MAPPING_RSP0))) + { + vc_valid = NV_TRUE; + } + + // VC's CREQ0(0), RSP0(5), CREQ1(6) and RSP1(7) are relevant on trunk links. + if (FLD_TEST_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _TRUNKLINK, link_type) && + ((idx_vc == NV_NPORT_VC_MAPPING_CREQ0) || + (idx_vc == NV_NPORT_VC_MAPPING_RSP0) || + (idx_vc == NV_NPORT_VC_MAPPING_CREQ1) || + (idx_vc == NV_NPORT_VC_MAPPING_RSP1))) + { + vc_valid = NV_TRUE; + } + + // If the VC is not being used, skip reading it + if (!vc_valid) + { + continue; + } + + latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _LOW, idx_vc); + chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].low += latency; + + latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _MEDIUM, idx_vc); + chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].medium += latency; + + latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _HIGH, idx_vc); + chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].high += latency; + + latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _COUNT, _PANIC, idx_vc); + chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].panic += latency; + + latency = NVSWITCH_NPORT_PORTSTAT_RD32_LR10(device, idx_nport, _PACKET, _COUNT, idx_vc); + chip_device->latency_stats->latency[idx_vc].accum_latency[idx_nport].count += latency; + + // Note the time of this snap + chip_device->latency_stats->latency[idx_vc].last_read_time_nsec = time_nsec; + chip_device->latency_stats->latency[idx_vc].count++; + } + + // Disable SNAPONDEMAND after fetching the latencies + NVSWITCH_LINK_WR32_LR10(device, idx_nport, NPORT, _NPORT, _PORTSTAT_SNAP_CONTROL, + DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) | + DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE)); + } +} + +void +nvswitch_ecc_writeback_task_lr10 +( + nvswitch_device *device +) +{ +} + +void +nvswitch_set_ganged_link_table_lr10 +( + nvswitch_device *device, + NvU32 firstIndex, + NvU64 *ganged_link_table, + NvU32 numEntries +) +{ + NvU32 i; + + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_ADDRESS, + DRF_NUM(_ROUTE, _REG_TABLE_ADDRESS, _INDEX, firstIndex) | + DRF_NUM(_ROUTE, _REG_TABLE_ADDRESS, _AUTO_INCR, 1)); + + for (i = 0; i < numEntries; i++) + { + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_DATA0, + NvU64_LO32(ganged_link_table[i])); + + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _REG_TABLE_DATA0, + NvU64_HI32(ganged_link_table[i])); + } +} + +static NvlStatus +_nvswitch_init_ganged_link_routing +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 gang_index, gang_size; + NvU64 gang_entry; + NvU32 block_index; + NvU32 block_count = 16; + NvU32 glt_entries = 16; + NvU32 glt_size = ROUTE_GANG_TABLE_SIZE / 2; + NvU64 *ganged_link_table = NULL; + NvU32 block_size = ROUTE_GANG_TABLE_SIZE / block_count; + NvU32 table_index = 0; + NvU32 i; + + // + // Refer to switch IAS 11.2 Figure 82. Limerock Ganged RAM Table Format + // + // The ganged link routing table is composed of 512 entries divided into 16 sections. + // Each section specifies how requests should be routed through the ganged links. + // Each 32-bit entry is composed of eight 4-bit fields specifying the set of of links + // to distribute through. More complex spray patterns could be constructed, but for + // now initialize it with a uniform distribution pattern. + // + // The ganged link routing table will be loaded with following values: + // Typically the first section would be filled with (0,1,2,3,4,5,6,7), (8,9,10,11,12,13,14,15),... + // Typically the second section would be filled with (0,0,0,0,0,0,0,0), (0,0,0,0,0,0,0,0),... + // Typically the third section would be filled with (0,1,0,1,0,1,0,1), (0,1,0,1,0,1,0,1),... + // Typically the third section would be filled with (0,1,2,0,1,2,0,1), (2,0,1,2,0,1,2,0),... + // : + // The last section would typically be filled with (0,1,2,3,4,5,6,7), (8,9,10,11,12,13,14,0),... + // + // Refer table 20: Definition of size bits used with Ganged Link Number Table. + // Note that section 0 corresponds with 16 ganged links. Section N corresponds with + // N ganged links. + // + + //Alloc memory for Ganged Link Table + ganged_link_table = nvswitch_os_malloc(glt_size * sizeof(gang_entry)); + if (ganged_link_table == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to allocate memory for GLT!!\n"); + return -NVL_NO_MEM; + } + + for (block_index = 0; block_index < block_count; block_index++) + { + gang_size = ((block_index==0) ? 16 : block_index); + + for (gang_index = 0; gang_index < block_size/2; gang_index++) + { + gang_entry = 0; + NVSWITCH_ASSERT(table_index < glt_size); + + for (i = 0; i < glt_entries; i++) + { + gang_entry |= + DRF_NUM64(_ROUTE, _REG_TABLE_DATA0, _GLX(i), (16 * gang_index + i) % gang_size); + } + + ganged_link_table[table_index++] = gang_entry; + } + } + + nvswitch_set_ganged_link_table_lr10(device, 0, ganged_link_table, glt_size); + + chip_device->ganged_link_table = ganged_link_table; + + return NVL_SUCCESS; +} + +static NvlStatus +nvswitch_initialize_ip_wrappers_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 engine_enable_mask; + NvU32 engine_disable_mask; + NvU32 i, j; + NvU32 idx_link; + + // + // Now that software knows the devices and addresses, it must take all + // the wrapper modules out of reset. It does this by writing to the + // PMC module enable registers. + // + +// Temporary - bug 2069764 +// NVSWITCH_REG_WR32(device, _PSMC, _ENABLE, +// DRF_DEF(_PSMC, _ENABLE, _SAW, _ENABLE) | +// DRF_DEF(_PSMC, _ENABLE, _PRIV_RING, _ENABLE) | +// DRF_DEF(_PSMC, _ENABLE, _PERFMON, _ENABLE)); + + NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE, + DRF_DEF(_NVLSAW_NVSPMC, _ENABLE, _NXBAR, _ENABLE)); + + // + // At this point the list of discovered devices has been cross-referenced + // with the ROM configuration, platform configuration, and regkey override. + // The NVLIPT & NPORT enable filtering done here further updates the MMIO + // information based on KVM. + // + + // Enable the NVLIPT units that have been discovered + engine_enable_mask = 0; + for (i = 0; i < NVSWITCH_ENG_COUNT(device, NVLW, ); i++) + { + if (NVSWITCH_ENG_IS_VALID(device, NVLW, i)) + { + engine_enable_mask |= NVBIT(i); + } + } + NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NVLIPT, engine_enable_mask); + + // + // In bare metal we write ENABLE_NVLIPT to enable the units that aren't + // disabled by ROM configuration, platform configuration, or regkey override. + // If we are running inside a VM, the hypervisor has already set ENABLE_NVLIPT + // and write protected it. Reading ENABLE_NVLIPT tells us which units we + // are allowed to use inside this VM. + // + engine_disable_mask = ~NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NVLIPT); + if (engine_enable_mask != ~engine_disable_mask) + { + NVSWITCH_PRINT(device, WARN, + "NV_NVLSAW_NVSPMC_ENABLE_NVLIPT mismatch: wrote 0x%x, read 0x%x\n", + engine_enable_mask, + ~engine_disable_mask); + NVSWITCH_PRINT(device, WARN, + "Ignoring NV_NVLSAW_NVSPMC_ENABLE_NVLIPT readback until supported on fmodel\n"); + engine_disable_mask = ~engine_enable_mask; + } + engine_disable_mask &= NVBIT(NVSWITCH_ENG_COUNT(device, NVLW, )) - 1; + FOR_EACH_INDEX_IN_MASK(32, i, engine_disable_mask) + { + chip_device->engNVLW[i].valid = NV_FALSE; + for (j = 0; j < NVSWITCH_LINKS_PER_NVLW; j++) + { + idx_link = i * NVSWITCH_LINKS_PER_NVLW + j; + if (idx_link < NVSWITCH_LINK_COUNT(device)) + { + device->link[idx_link].valid = NV_FALSE; + // + // TODO: This invalidate used to also invalidate all the + // associated NVLW engFOO units. This is probably not necessary + // but code that bypasses the link valid check might touch the + // underlying units when they are not supposed to. + // + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // Enable the NPORT units that have been discovered + engine_enable_mask = 0; + for (i = 0; i < NVSWITCH_ENG_COUNT(device, NPG, ); i++) + { + if (NVSWITCH_ENG_IS_VALID(device, NPG, i)) + { + engine_enable_mask |= NVBIT(i); + } + } + NVSWITCH_SAW_WR32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NPG, engine_enable_mask); + + // + // In bare metal we write ENABLE_NPG to enable the units that aren't + // disabled by ROM configuration, platform configuration, or regkey override. + // If we are running inside a VM, the hypervisor has already set ENABLE_NPG + // and write protected it. Reading ENABLE_NPG tells us which units we + // are allowed to use inside this VM. + // + engine_disable_mask = ~NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _ENABLE_NPG); + if (engine_enable_mask != ~engine_disable_mask) + { + NVSWITCH_PRINT(device, WARN, + "NV_NVLSAW_NVSPMC_ENABLE_NPG mismatch: wrote 0x%x, read 0x%x\n", + engine_enable_mask, + ~engine_disable_mask); + NVSWITCH_PRINT(device, WARN, + "Ignoring NV_NVLSAW_NVSPMC_ENABLE_NPG readback until supported on fmodel\n"); + engine_disable_mask = ~engine_enable_mask; + } + engine_disable_mask &= NVBIT(NVSWITCH_ENG_COUNT(device, NPG, )) - 1; + FOR_EACH_INDEX_IN_MASK(32, i, engine_disable_mask) + { + chip_device->engNPG[i].valid = NV_FALSE; + for (j = 0; j < NVSWITCH_LINKS_PER_NPG; j++) + { + idx_link = i * NVSWITCH_LINKS_PER_NPG + j; + + if (idx_link < NVSWITCH_LINK_COUNT(device)) + { + device->link[idx_link].valid = NV_FALSE; + // + // TODO: This invalidate used to also invalidate all the + // associated NPG engFOO units. This is probably not necessary + // but code that bypasses the link valid check might touch the + // underlying units when they are not supposed to. + // + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return NVL_SUCCESS; +} + +// +// Bring units out of warm reset on boot. Used by driver load. +// +static void +_nvswitch_init_warm_reset_lr10 +( + nvswitch_device *device +) +{ + NvU32 idx_npg; + NvU32 idx_nport; + NvU32 nport_mask; + NvU32 nport_disable = 0; + +#if defined(NV_NPG_WARMRESET_NPORTDISABLE) + nport_disable = DRF_NUM(_NPG, _WARMRESET, _NPORTDISABLE, ~nport_mask); +#endif + + // + // Walk the NPGs and build the mask of extant NPORTs + // + for (idx_npg = 0; idx_npg < NVSWITCH_ENG_COUNT(device, NPG, ); idx_npg++) + { + if (NVSWITCH_ENG_IS_VALID(device, NPG, idx_npg)) + { + nport_mask = 0; + for (idx_nport = 0; idx_nport < NVSWITCH_NPORT_PER_NPG; idx_nport++) + { + nport_mask |= + (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_npg*NVSWITCH_NPORT_PER_NPG + idx_nport) ? + NVBIT(idx_nport) : 0x0); + } + + NVSWITCH_NPG_WR32_LR10(device, idx_npg, + _NPG, _WARMRESET, + nport_disable | + DRF_NUM(_NPG, _WARMRESET, _NPORTWARMRESET, nport_mask)); + } + } +} + +/* + * CTRL_NVSWITCH_SET_REMAP_POLICY + */ + +NvlStatus +nvswitch_get_remap_table_selector_lr10 +( + nvswitch_device *device, + NVSWITCH_TABLE_SELECT_REMAP table_selector, + NvU32 *remap_ram_sel +) +{ + NvU32 ram_sel = 0; + + switch (table_selector) + { + case NVSWITCH_TABLE_SELECT_REMAP_PRIMARY: + ram_sel = NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM; + break; + default: + // Unsupported remap table selector + return -NVL_ERR_NOT_SUPPORTED; + break; + } + + if (remap_ram_sel) + { + *remap_ram_sel = ram_sel; + } + + return NVL_SUCCESS; +} + +NvU32 +nvswitch_get_ingress_ram_size_lr10 +( + nvswitch_device *device, + NvU32 ingress_ram_selector // NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECT* +) +{ + NvU32 ram_size = 0; + + switch (ingress_ram_selector) + { + case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM: + ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_REMAPTAB_DEPTH + 1; + break; + case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM: + ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RID_TAB_DEPTH + 1; + break; + case NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM: + ram_size = NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_RLAN_TAB_DEPTH + 1; + break; + default: + // Unsupported ingress RAM selector + break; + } + + return ram_size; +} + +static void +_nvswitch_set_remap_policy_lr10 +( + nvswitch_device *device, + NvU32 portNum, + NvU32 firstIndex, + NvU32 numEntries, + NVSWITCH_REMAP_POLICY_ENTRY *remap_policy +) +{ + NvU32 i; + NvU32 remap_address; + NvU32 address_offset; + NvU32 address_base; + NvU32 address_limit; + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR, + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) | + DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) | + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1)); + + for (i = 0; i < numEntries; i++) + { + // Set each field if enabled, else set it to 0. + remap_address = DRF_VAL64(_INGRESS, _REMAP, _ADDR_PHYS_LR10, remap_policy[i].address); + address_offset = DRF_VAL64(_INGRESS, _REMAP, _ADR_OFFSET_PHYS_LR10, remap_policy[i].addressOffset); + address_base = DRF_VAL64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LR10, remap_policy[i].addressBase); + address_limit = DRF_VAL64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LR10, remap_policy[i].addressLimit); + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA1, + DRF_NUM(_INGRESS, _REMAPTABDATA1, _REQCTXT_MSK, remap_policy[i].reqCtxMask) | + DRF_NUM(_INGRESS, _REMAPTABDATA1, _REQCTXT_CHK, remap_policy[i].reqCtxChk)); + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA2, + DRF_NUM(_INGRESS, _REMAPTABDATA2, _REQCTXT_REP, remap_policy[i].reqCtxRep) | + DRF_NUM(_INGRESS, _REMAPTABDATA2, _ADR_OFFSET, address_offset)); + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA3, + DRF_NUM(_INGRESS, _REMAPTABDATA3, _ADR_BASE, address_base) | + DRF_NUM(_INGRESS, _REMAPTABDATA3, _ADR_LIMIT, address_limit)); + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA4, + DRF_NUM(_INGRESS, _REMAPTABDATA4, _TGTID, remap_policy[i].targetId) | + DRF_NUM(_INGRESS, _REMAPTABDATA4, _RFUNC, remap_policy[i].flags)); + + // Write last and auto-increment + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REMAPTABDATA0, + DRF_NUM(_INGRESS, _REMAPTABDATA0, _RMAP_ADDR, remap_address) | + DRF_NUM(_INGRESS, _REMAPTABDATA0, _IRL_SEL, remap_policy[i].irlSelect) | + DRF_NUM(_INGRESS, _REMAPTABDATA0, _ACLVALID, remap_policy[i].entryValid)); + } +} + +NvlStatus +nvswitch_ctrl_set_remap_policy_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_REMAP_POLICY *p +) +{ + NvU32 i; + NvU32 rfunc; + NvU32 ram_size; + NvlStatus retval = NVL_SUCCESS; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum)) + { + NVSWITCH_PRINT(device, ERROR, + "NPORT port #%d not valid\n", + p->portNum); + return -NVL_BAD_ARGS; + } + + if (p->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY) + { + NVSWITCH_PRINT(device, ERROR, + "Remap table #%d not supported\n", + p->tableSelect); + return -NVL_ERR_NOT_SUPPORTED; + } + + ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM); + if ((p->firstIndex >= ram_size) || + (p->numEntries > NVSWITCH_REMAP_POLICY_ENTRIES_MAX) || + (p->firstIndex + p->numEntries > ram_size)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d..%d] overflows range %d..%d or size %d.\n", + p->firstIndex, p->firstIndex + p->numEntries - 1, + 0, ram_size - 1, + NVSWITCH_REMAP_POLICY_ENTRIES_MAX); + return -NVL_BAD_ARGS; + } + + for (i = 0; i < p->numEntries; i++) + { + if (p->remapPolicy[i].targetId & + ~DRF_MASK(NV_INGRESS_REMAPTABDATA4_TGTID)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].targetId 0x%x out of valid range (0x%x..0x%x)\n", + i, p->remapPolicy[i].targetId, + 0, DRF_MASK(NV_INGRESS_REMAPTABDATA4_TGTID)); + return -NVL_BAD_ARGS; + } + + if (p->remapPolicy[i].irlSelect & + ~DRF_MASK(NV_INGRESS_REMAPTABDATA0_IRL_SEL)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].irlSelect 0x%x out of valid range (0x%x..0x%x)\n", + i, p->remapPolicy[i].irlSelect, + 0, DRF_MASK(NV_INGRESS_REMAPTABDATA0_IRL_SEL)); + return -NVL_BAD_ARGS; + } + + rfunc = p->remapPolicy[i].flags & + ( + NVSWITCH_REMAP_POLICY_FLAGS_REMAP_ADDR | + NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_CHECK | + NVSWITCH_REMAP_POLICY_FLAGS_REQCTXT_REPLACE | + NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE | + NVSWITCH_REMAP_POLICY_FLAGS_ADR_OFFSET + ); + if (rfunc != p->remapPolicy[i].flags) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].flags 0x%x has undefined flags (0x%x)\n", + i, p->remapPolicy[i].flags, + p->remapPolicy[i].flags ^ rfunc); + return -NVL_BAD_ARGS; + } + + // Validate that only bits 46:36 are used + if (p->remapPolicy[i].address & + ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADDR_PHYS_LR10)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].address 0x%llx & ~0x%llx != 0\n", + i, p->remapPolicy[i].address, + DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADDR_PHYS_LR10)); + return -NVL_BAD_ARGS; + } + + if (p->remapPolicy[i].reqCtxMask & + ~DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].reqCtxMask 0x%x out of valid range (0x%x..0x%x)\n", + i, p->remapPolicy[i].reqCtxMask, + 0, DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_MSK)); + return -NVL_BAD_ARGS; + } + + if (p->remapPolicy[i].reqCtxChk & + ~DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].reqCtxChk 0x%x out of valid range (0x%x..0x%x)\n", + i, p->remapPolicy[i].reqCtxChk, + 0, DRF_MASK(NV_INGRESS_REMAPTABDATA1_REQCTXT_CHK)); + return -NVL_BAD_ARGS; + } + + if (p->remapPolicy[i].reqCtxRep & + ~DRF_MASK(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].reqCtxRep 0x%x out of valid range (0x%x..0x%x)\n", + i, p->remapPolicy[i].reqCtxRep, + 0, DRF_MASK(NV_INGRESS_REMAPTABDATA2_REQCTXT_REP)); + return -NVL_BAD_ARGS; + } + + if ((p->remapPolicy[i].flags & NVSWITCH_REMAP_POLICY_FLAGS_ADR_OFFSET) && + !(p->remapPolicy[i].flags & NVSWITCH_REMAP_POLICY_FLAGS_ADR_BASE)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].flags: _FLAGS_ADR_OFFSET should not be set if " + "_FLAGS_ADR_BASE is not set\n", + i); + return -NVL_BAD_ARGS; + } + + // Validate that only bits 35:20 are used + if (p->remapPolicy[i].addressBase & + ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_BASE_PHYS_LR10)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].addressBase 0x%llx & ~0x%llx != 0\n", + i, p->remapPolicy[i].addressBase, + DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_BASE_PHYS_LR10)); + return -NVL_BAD_ARGS; + } + + // Validate that only bits 35:20 are used + if (p->remapPolicy[i].addressLimit & + ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LR10)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].addressLimit 0x%llx & ~0x%llx != 0\n", + i, p->remapPolicy[i].addressLimit, + DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_LIMIT_PHYS_LR10)); + return -NVL_BAD_ARGS; + } + + // Validate base & limit describe a region + if (p->remapPolicy[i].addressBase > p->remapPolicy[i].addressLimit) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].addressBase/Limit invalid: 0x%llx > 0x%llx\n", + i, p->remapPolicy[i].addressBase, p->remapPolicy[i].addressLimit); + return -NVL_BAD_ARGS; + } + + // Validate that only bits 35:20 are used + if (p->remapPolicy[i].addressOffset & + ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].addressOffset 0x%llx & ~0x%llx != 0\n", + i, p->remapPolicy[i].addressOffset, + DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10)); + return -NVL_BAD_ARGS; + } + + // Validate limit - base + offset doesn't overflow 64G + if ((p->remapPolicy[i].addressLimit - p->remapPolicy[i].addressBase + + p->remapPolicy[i].addressOffset) & + ~DRF_SHIFTMASK64(NV_INGRESS_REMAP_ADR_OFFSET_PHYS_LR10)) + { + NVSWITCH_PRINT(device, ERROR, + "remapPolicy[%d].addressLimit 0x%llx - addressBase 0x%llx + " + "addressOffset 0x%llx overflows 64GB\n", + i, p->remapPolicy[i].addressLimit, p->remapPolicy[i].addressBase, + p->remapPolicy[i].addressOffset); + return -NVL_BAD_ARGS; + } + } + + _nvswitch_set_remap_policy_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->remapPolicy); + + return retval; +} + +/* + * CTRL_NVSWITCH_GET_REMAP_POLICY + */ + +#define NVSWITCH_NUM_REMAP_POLICY_REGS_LR10 5 + +NvlStatus +nvswitch_ctrl_get_remap_policy_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_REMAP_POLICY_PARAMS *params +) +{ + NVSWITCH_REMAP_POLICY_ENTRY *remap_policy; + NvU32 remap_policy_data[NVSWITCH_NUM_REMAP_POLICY_REGS_LR10]; // 5 REMAP tables + NvU32 table_index; + NvU32 remap_count; + NvU32 remap_address; + NvU32 address_offset; + NvU32 address_base; + NvU32 address_limit; + NvU32 ram_size; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum)) + { + NVSWITCH_PRINT(device, ERROR, + "NPORT port #%d not valid\n", + params->portNum); + return -NVL_BAD_ARGS; + } + + if (params->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY) + { + NVSWITCH_PRINT(device, ERROR, + "Remap table #%d not supported\n", + params->tableSelect); + return -NVL_ERR_NOT_SUPPORTED; + } + + ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM); + if ((params->firstIndex >= ram_size)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: remapPolicy first index %d out of range[%d..%d].\n", + __FUNCTION__, params->firstIndex, 0, ram_size - 1); + return -NVL_BAD_ARGS; + } + + nvswitch_os_memset(params->entry, 0, (NVSWITCH_REMAP_POLICY_ENTRIES_MAX * + sizeof(NVSWITCH_REMAP_POLICY_ENTRY))); + + table_index = params->firstIndex; + remap_policy = params->entry; + remap_count = 0; + + /* set table offset */ + NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) | + DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) | + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1)); + + while (remap_count < NVSWITCH_REMAP_POLICY_ENTRIES_MAX && + table_index < ram_size) + { + remap_policy_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA0); + remap_policy_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA1); + remap_policy_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA2); + remap_policy_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA3); + remap_policy_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _REMAPTABDATA4); + + /* add to remap_entries list if nonzero */ + if (remap_policy_data[0] || remap_policy_data[1] || remap_policy_data[2] || + remap_policy_data[3] || remap_policy_data[4]) + { + remap_policy[remap_count].irlSelect = + DRF_VAL(_INGRESS, _REMAPTABDATA0, _IRL_SEL, remap_policy_data[0]); + + remap_policy[remap_count].entryValid = + DRF_VAL(_INGRESS, _REMAPTABDATA0, _ACLVALID, remap_policy_data[0]); + + remap_address = + DRF_VAL(_INGRESS, _REMAPTABDATA0, _RMAP_ADDR, remap_policy_data[0]); + + remap_policy[remap_count].address = + DRF_NUM64(_INGRESS, _REMAP, _ADDR_PHYS_LR10, remap_address); + + remap_policy[remap_count].reqCtxMask = + DRF_VAL(_INGRESS, _REMAPTABDATA1, _REQCTXT_MSK, remap_policy_data[1]); + + remap_policy[remap_count].reqCtxChk = + DRF_VAL(_INGRESS, _REMAPTABDATA1, _REQCTXT_CHK, remap_policy_data[1]); + + remap_policy[remap_count].reqCtxRep = + DRF_VAL(_INGRESS, _REMAPTABDATA2, _REQCTXT_REP, remap_policy_data[2]); + + address_offset = + DRF_VAL(_INGRESS, _REMAPTABDATA2, _ADR_OFFSET, remap_policy_data[2]); + + remap_policy[remap_count].addressOffset = + DRF_NUM64(_INGRESS, _REMAP, _ADR_OFFSET_PHYS_LR10, address_offset); + + address_base = + DRF_VAL(_INGRESS, _REMAPTABDATA3, _ADR_BASE, remap_policy_data[3]); + + remap_policy[remap_count].addressBase = + DRF_NUM64(_INGRESS, _REMAP, _ADR_BASE_PHYS_LR10, address_base); + + address_limit = + DRF_VAL(_INGRESS, _REMAPTABDATA3, _ADR_LIMIT, remap_policy_data[3]); + + remap_policy[remap_count].addressLimit = + DRF_NUM64(_INGRESS, _REMAP, _ADR_LIMIT_PHYS_LR10, address_limit); + + remap_policy[remap_count].targetId = + DRF_VAL(_INGRESS, _REMAPTABDATA4, _TGTID, remap_policy_data[4]); + + remap_policy[remap_count].flags = + DRF_VAL(_INGRESS, _REMAPTABDATA4, _RFUNC, remap_policy_data[4]); + + remap_count++; + } + + table_index++; + } + + params->nextIndex = table_index; + params->numEntries = remap_count; + + return NVL_SUCCESS; +} + +/* + * CTRL_NVSWITCH_SET_REMAP_POLICY_VALID + */ +NvlStatus +nvswitch_ctrl_set_remap_policy_valid_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_REMAP_POLICY_VALID *p +) +{ + NvU32 remap_ram; + NvU32 ram_address = p->firstIndex; + NvU32 remap_policy_data[NVSWITCH_NUM_REMAP_POLICY_REGS_LR10]; // 5 REMAP tables + NvU32 i; + NvU32 ram_size; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NPORT port #%d not valid\n", + __FUNCTION__, p->portNum); + return -NVL_BAD_ARGS; + } + + if (p->tableSelect != NVSWITCH_TABLE_SELECT_REMAP_PRIMARY) + { + NVSWITCH_PRINT(device, ERROR, + "Remap table #%d not supported\n", + p->tableSelect); + return -NVL_ERR_NOT_SUPPORTED; + } + + ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSREMAPPOLICYRAM); + if ((p->firstIndex >= ram_size) || + (p->numEntries > NVSWITCH_REMAP_POLICY_ENTRIES_MAX) || + (p->firstIndex + p->numEntries > ram_size)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: remapPolicy[%d..%d] overflows range %d..%d or size %d.\n", + __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1, + 0, ram_size - 1, + NVSWITCH_REMAP_POLICY_ENTRIES_MAX); + return -NVL_BAD_ARGS; + } + + // Select REMAPPOLICY RAM and disable Auto Increament. + remap_ram = + DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSREMAPPOLICYRAM) | + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0); + + for (i = 0; i < p->numEntries; i++) + { + /* set the ram address */ + remap_ram = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, remap_ram); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, remap_ram); + + remap_policy_data[0] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA0); + remap_policy_data[1] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA1); + remap_policy_data[2] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA2); + remap_policy_data[3] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA3); + remap_policy_data[4] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA4); + + // Set valid bit in REMAPTABDATA0. + remap_policy_data[0] = FLD_SET_DRF_NUM(_INGRESS, _REMAPTABDATA0, _ACLVALID, p->entryValid[i], remap_policy_data[0]); + + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA4, remap_policy_data[4]); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA3, remap_policy_data[3]); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA2, remap_policy_data[2]); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA1, remap_policy_data[1]); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REMAPTABDATA0, remap_policy_data[0]); + } + + return NVL_SUCCESS; +} + +// +// Programming invalid entries to 0x3F causes Route block to detect an invalid port number +// and flag a PRIV error to the FM. (See Table 14.RID RAM Programming, IAS 3.3.4) +// + +#define NVSWITCH_INVALID_PORT_VAL_LR10 0x3F +#define NVSWITCH_INVALID_VC_VAL_LR10 0x0 + +#define NVSWITCH_PORTLIST_PORT_LR10(_entry, _idx) \ + ((_idx < _entry.numEntries) ? _entry.portList[_idx].destPortNum : NVSWITCH_INVALID_PORT_VAL_LR10) + +#define NVSWITCH_PORTLIST_VC_LR10(_entry, _idx) \ + ((_idx < _entry.numEntries) ? _entry.portList[_idx].vcMap : NVSWITCH_INVALID_VC_VAL_LR10) + +/* + * CTRL_NVSWITCH_SET_ROUTING_ID + */ + +static void +_nvswitch_set_routing_id_lr10 +( + nvswitch_device *device, + NvU32 portNum, + NvU32 firstIndex, + NvU32 numEntries, + NVSWITCH_ROUTING_ID_ENTRY *routing_id +) +{ + NvU32 i; + NvU32 rmod; + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR, + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) | + DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) | + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1)); + + for (i = 0; i < numEntries; i++) + { + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA1, + DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT3, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 3)) | + DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE3, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 3)) | + DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT4, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 4)) | + DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE4, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 4)) | + DRF_NUM(_INGRESS, _RIDTABDATA1, _PORT5, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 5)) | + DRF_NUM(_INGRESS, _RIDTABDATA1, _VC_MODE5, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 5))); + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA2, + DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT6, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 6)) | + DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE6, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 6)) | + DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT7, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 7)) | + DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE7, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 7)) | + DRF_NUM(_INGRESS, _RIDTABDATA2, _PORT8, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 8)) | + DRF_NUM(_INGRESS, _RIDTABDATA2, _VC_MODE8, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 8))); + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA3, + DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT9, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 9)) | + DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE9, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 9)) | + DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT10, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 10)) | + DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE10, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 10)) | + DRF_NUM(_INGRESS, _RIDTABDATA3, _PORT11, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 11)) | + DRF_NUM(_INGRESS, _RIDTABDATA3, _VC_MODE11, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 11))); + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA4, + DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT12, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 12)) | + DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE12, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 12)) | + DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT13, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 13)) | + DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE13, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 13)) | + DRF_NUM(_INGRESS, _RIDTABDATA4, _PORT14, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 14)) | + DRF_NUM(_INGRESS, _RIDTABDATA4, _VC_MODE14, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 14))); + + rmod = + (routing_id[i].useRoutingLan ? NVBIT(6) : 0) | + (routing_id[i].enableIrlErrResponse ? NVBIT(9) : 0); + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA5, + DRF_NUM(_INGRESS, _RIDTABDATA5, _PORT15, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 15)) | + DRF_NUM(_INGRESS, _RIDTABDATA5, _VC_MODE15, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 15)) | + DRF_NUM(_INGRESS, _RIDTABDATA5, _RMOD, rmod) | + DRF_NUM(_INGRESS, _RIDTABDATA5, _ACLVALID, routing_id[i].entryValid)); + + NVSWITCH_ASSERT(routing_id[i].numEntries <= 16); + // Write last and auto-increment + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RIDTABDATA0, + DRF_NUM(_INGRESS, _RIDTABDATA0, _GSIZE, + (routing_id[i].numEntries == 16) ? 0x0 : routing_id[i].numEntries) | + DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT0, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 0)) | + DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE0, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 0)) | + DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT1, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 1)) | + DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE1, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 1)) | + DRF_NUM(_INGRESS, _RIDTABDATA0, _PORT2, NVSWITCH_PORTLIST_PORT_LR10(routing_id[i], 2)) | + DRF_NUM(_INGRESS, _RIDTABDATA0, _VC_MODE2, NVSWITCH_PORTLIST_VC_LR10(routing_id[i], 2))); + } +} + +#define NVSWITCH_NUM_RIDTABDATA_REGS_LR10 6 + +NvlStatus +nvswitch_ctrl_get_routing_id_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_ROUTING_ID_PARAMS *params +) +{ + NVSWITCH_ROUTING_ID_IDX_ENTRY *rid_entries; + NvU32 table_index; + NvU32 rid_tab_data[NVSWITCH_NUM_RIDTABDATA_REGS_LR10]; // 6 RID tables + NvU32 rid_count; + NvU32 rmod; + NvU32 gsize; + NvU32 ram_size; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NPORT port #%d not valid\n", + __FUNCTION__, params->portNum); + return -NVL_BAD_ARGS; + } + + ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM); + if (params->firstIndex >= ram_size) + { + NVSWITCH_PRINT(device, ERROR, + "%s: routingId first index %d out of range[%d..%d].\n", + __FUNCTION__, params->firstIndex, 0, ram_size - 1); + return -NVL_BAD_ARGS; + } + + nvswitch_os_memset(params->entries, 0, sizeof(params->entries)); + + table_index = params->firstIndex; + rid_entries = params->entries; + rid_count = 0; + + /* set table offset */ + NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) | + DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) | + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1)); + + while (rid_count < NVSWITCH_ROUTING_ID_ENTRIES_MAX && + table_index < ram_size) + { + rid_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA0); + rid_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA1); + rid_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA2); + rid_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA3); + rid_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA4); + rid_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RIDTABDATA5); + + /* add to rid_entries list if nonzero */ + if (rid_tab_data[0] || rid_tab_data[1] || rid_tab_data[2] || + rid_tab_data[3] || rid_tab_data[4] || rid_tab_data[5]) + { + rid_entries[rid_count].entry.portList[0].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT0, rid_tab_data[0]); + rid_entries[rid_count].entry.portList[0].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE0, rid_tab_data[0]); + + rid_entries[rid_count].entry.portList[1].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT1, rid_tab_data[0]); + rid_entries[rid_count].entry.portList[1].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE1, rid_tab_data[0]); + + rid_entries[rid_count].entry.portList[2].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA0, _PORT2, rid_tab_data[0]); + rid_entries[rid_count].entry.portList[2].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA0, _VC_MODE2, rid_tab_data[0]); + + rid_entries[rid_count].entry.portList[3].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT3, rid_tab_data[1]); + rid_entries[rid_count].entry.portList[3].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE3, rid_tab_data[1]); + + rid_entries[rid_count].entry.portList[4].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT4, rid_tab_data[1]); + rid_entries[rid_count].entry.portList[4].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE4, rid_tab_data[1]); + + rid_entries[rid_count].entry.portList[5].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA1, _PORT5, rid_tab_data[1]); + rid_entries[rid_count].entry.portList[5].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA1, _VC_MODE5, rid_tab_data[1]); + + rid_entries[rid_count].entry.portList[6].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT6, rid_tab_data[2]); + rid_entries[rid_count].entry.portList[6].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE6, rid_tab_data[2]); + + rid_entries[rid_count].entry.portList[7].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT7, rid_tab_data[2]); + rid_entries[rid_count].entry.portList[7].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE7, rid_tab_data[2]); + + rid_entries[rid_count].entry.portList[8].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA2, _PORT8, rid_tab_data[2]); + rid_entries[rid_count].entry.portList[8].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA2, _VC_MODE8, rid_tab_data[2]); + + rid_entries[rid_count].entry.portList[9].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT9, rid_tab_data[3]); + rid_entries[rid_count].entry.portList[9].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE9, rid_tab_data[3]); + + rid_entries[rid_count].entry.portList[10].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT10, rid_tab_data[3]); + rid_entries[rid_count].entry.portList[10].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE10, rid_tab_data[3]); + + rid_entries[rid_count].entry.portList[11].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA3, _PORT11, rid_tab_data[3]); + rid_entries[rid_count].entry.portList[11].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA3, _VC_MODE11, rid_tab_data[3]); + + rid_entries[rid_count].entry.portList[12].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT12, rid_tab_data[4]); + rid_entries[rid_count].entry.portList[12].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE12, rid_tab_data[4]); + + rid_entries[rid_count].entry.portList[13].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT13, rid_tab_data[4]); + rid_entries[rid_count].entry.portList[13].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE13, rid_tab_data[4]); + + rid_entries[rid_count].entry.portList[14].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA4, _PORT14, rid_tab_data[4]); + rid_entries[rid_count].entry.portList[14].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA4, _VC_MODE14, rid_tab_data[4]); + + rid_entries[rid_count].entry.portList[15].destPortNum = DRF_VAL(_INGRESS, _RIDTABDATA5, _PORT15, rid_tab_data[5]); + rid_entries[rid_count].entry.portList[15].vcMap = DRF_VAL(_INGRESS, _RIDTABDATA5, _VC_MODE15, rid_tab_data[5]); + rid_entries[rid_count].entry.entryValid = DRF_VAL(_INGRESS, _RIDTABDATA5, _ACLVALID, rid_tab_data[5]); + + rmod = DRF_VAL(_INGRESS, _RIDTABDATA5, _RMOD, rid_tab_data[5]); + rid_entries[rid_count].entry.useRoutingLan = (NVBIT(6) & rmod) ? 1 : 0; + rid_entries[rid_count].entry.enableIrlErrResponse = (NVBIT(9) & rmod) ? 1 : 0; + + // Gsize of 16 falls into the 0th entry of GLT region. The _GSIZE field must be mapped accordingly + // to the number of port entries (See IAS, Table 20, Sect 3.4.2.2. Packet Routing). + gsize = DRF_VAL(_INGRESS, _RIDTABDATA0, _GSIZE, rid_tab_data[0]); + rid_entries[rid_count].entry.numEntries = ((gsize == 0) ? 16 : gsize); + + rid_entries[rid_count].idx = table_index; + rid_count++; + } + + table_index++; + } + + params->nextIndex = table_index; + params->numEntries = rid_count; + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_ctrl_set_routing_id_valid_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_ROUTING_ID_VALID *p +) +{ + NvU32 rid_ctrl; + NvU32 rid_tab_data0; + NvU32 rid_tab_data1; + NvU32 rid_tab_data2; + NvU32 rid_tab_data3; + NvU32 rid_tab_data4; + NvU32 rid_tab_data5; + NvU32 ram_address = p->firstIndex; + NvU32 i; + NvU32 ram_size; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NPORT port #%d not valid\n", + __FUNCTION__, p->portNum); + return -NVL_BAD_ARGS; + } + + ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM); + if ((p->firstIndex >= ram_size) || + (p->numEntries > NVSWITCH_ROUTING_ID_ENTRIES_MAX) || + (p->firstIndex + p->numEntries > ram_size)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: routingId[%d..%d] overflows range %d..%d or size %d.\n", + __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1, + 0, ram_size - 1, + NVSWITCH_ROUTING_ID_ENTRIES_MAX); + return -NVL_BAD_ARGS; + } + + // Select RID RAM and disable Auto Increment. + rid_ctrl = + DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRIDROUTERAM) | + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0); + + + for (i = 0; i < p->numEntries; i++) + { + /* set the ram address */ + rid_ctrl = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, rid_ctrl); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, rid_ctrl); + + rid_tab_data0 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA0); + rid_tab_data1 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA1); + rid_tab_data2 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA2); + rid_tab_data3 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA3); + rid_tab_data4 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA4); + rid_tab_data5 = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA5); + + // Set the valid bit in _RIDTABDATA5 + rid_tab_data5 = FLD_SET_DRF_NUM(_INGRESS, _RIDTABDATA5, _ACLVALID, + p->entryValid[i], rid_tab_data5); + + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA1, rid_tab_data1); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA2, rid_tab_data2); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA3, rid_tab_data3); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA4, rid_tab_data4); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA5, rid_tab_data5); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RIDTABDATA0, rid_tab_data0); + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_ctrl_set_routing_id_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_ROUTING_ID *p +) +{ + NvU32 i, j; + NvlStatus retval = NVL_SUCCESS; + NvU32 ram_size; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum)) + { + NVSWITCH_PRINT(device, ERROR, + "NPORT port #%d not valid\n", + p->portNum); + return -NVL_BAD_ARGS; + } + + ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM); + if ((p->firstIndex >= ram_size) || + (p->numEntries > NVSWITCH_ROUTING_ID_ENTRIES_MAX) || + (p->firstIndex + p->numEntries > ram_size)) + { + NVSWITCH_PRINT(device, ERROR, + "routingId[%d..%d] overflows range %d..%d or size %d.\n", + p->firstIndex, p->firstIndex + p->numEntries - 1, + 0, ram_size - 1, + NVSWITCH_ROUTING_ID_ENTRIES_MAX); + return -NVL_BAD_ARGS; + } + + for (i = 0; i < p->numEntries; i++) + { + if ((p->routingId[i].numEntries < 1) || + (p->routingId[i].numEntries > NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX)) + { + NVSWITCH_PRINT(device, ERROR, + "routingId[%d].portList[] size %d overflows range %d..%d\n", + i, p->routingId[i].numEntries, + 1, NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX); + return -NVL_BAD_ARGS; + } + + for (j = 0; j < p->routingId[i].numEntries; j++) + { + if (p->routingId[i].portList[j].vcMap > DRF_MASK(NV_INGRESS_RIDTABDATA0_VC_MODE0)) + { + NVSWITCH_PRINT(device, ERROR, + "routingId[%d].portList[%d] vcMap 0x%x out of valid range (0x%x..0x%x)\n", + i, j, + p->routingId[i].portList[j].vcMap, + 0, DRF_MASK(NV_INGRESS_RIDTABDATA0_VC_MODE0)); + return -NVL_BAD_ARGS; + } + + if (p->routingId[i].portList[j].destPortNum > DRF_MASK(NV_INGRESS_RIDTABDATA0_PORT0)) + { + NVSWITCH_PRINT(device, ERROR, + "routingId[%d].portList[%d] destPortNum 0x%x out of valid range (0x%x..0x%x)\n", + i, j, + p->routingId[i].portList[j].destPortNum, + 0, DRF_MASK(NV_INGRESS_RIDTABDATA0_PORT0)); + return -NVL_BAD_ARGS; + } + } + } + + _nvswitch_set_routing_id_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->routingId); + + return retval; +} + +/* + * CTRL_NVSWITCH_SET_ROUTING_LAN + */ + +// +// Check the data field is present in the list. Return either the data field +// or default if not present. +// +#define NVSWITCH_PORTLIST_VALID_LR10(_entry, _idx, _field, _default) \ + ((_idx < _entry.numEntries) ? _entry.portList[_idx]._field : _default) + +static void +_nvswitch_set_routing_lan_lr10 +( + nvswitch_device *device, + NvU32 portNum, + NvU32 firstIndex, + NvU32 numEntries, + NVSWITCH_ROUTING_LAN_ENTRY *routing_lan +) +{ + NvU32 i; + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _REQRSPMAPADDR, + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, firstIndex) | + DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM) | + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1)); + + for (i = 0; i < numEntries; i++) + { + // + // NOTE: The GRP_SIZE field is 4-bits. A subgroup is size 1 through 16 + // with encoding 0x0=16 and 0x1=1, ..., 0xF=15. + // Programming of GRP_SIZE takes advantage of the inherent masking of + // DRF_NUM to truncate 16 to 0. + // See bug #3300673 + // + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA1, + DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_3, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 3, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_3, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 3, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_4, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 4, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_4, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 4, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SEL_5, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 5, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA1, _GRP_SIZE_5, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 5, groupSize, 1))); + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA2, + DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_6, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 6, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_6, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 6, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_7, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 7, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_7, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 7, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SEL_8, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 8, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA2, _GRP_SIZE_8, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 8, groupSize, 1))); + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA3, + DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_9, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 9, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_9, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 9, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_10, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 10, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_10, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 10, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SEL_11, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 11, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA3, _GRP_SIZE_11, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 11, groupSize, 1))); + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA4, + DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_12, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 12, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_12, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 12, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_13, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 13, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_13, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 13, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SEL_14, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 14, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA4, _GRP_SIZE_14, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 14, groupSize, 1))); + + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA5, + DRF_NUM(_INGRESS, _RLANTABDATA5, _GRP_SEL_15, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 15, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA5, _GRP_SIZE_15, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 15, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA5, _ACLVALID, routing_lan[i].entryValid)); + + // Write last and auto-increment + NVSWITCH_LINK_WR32_LR10(device, portNum, NPORT, _INGRESS, _RLANTABDATA0, + DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_0, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 0, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_0, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 0, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_1, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 1, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_1, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 1, groupSize, 1)) | + DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SEL_2, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 2, groupSelect, 0)) | + DRF_NUM(_INGRESS, _RLANTABDATA0, _GRP_SIZE_2, NVSWITCH_PORTLIST_VALID_LR10(routing_lan[i], 2, groupSize, 1))); + } +} + +NvlStatus +nvswitch_ctrl_set_routing_lan_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_ROUTING_LAN *p +) +{ + NvU32 i, j; + NvlStatus retval = NVL_SUCCESS; + NvU32 ram_size; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NPORT port #%d not valid\n", + __FUNCTION__, p->portNum); + return -NVL_BAD_ARGS; + } + + ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM); + if ((p->firstIndex >= ram_size) || + (p->numEntries > NVSWITCH_ROUTING_LAN_ENTRIES_MAX) || + (p->firstIndex + p->numEntries > ram_size)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: routingLan[%d..%d] overflows range %d..%d or size %d.\n", + __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1, + 0, ram_size - 1, + NVSWITCH_ROUTING_LAN_ENTRIES_MAX); + return -NVL_BAD_ARGS; + } + + for (i = 0; i < p->numEntries; i++) + { + if (p->routingLan[i].numEntries > NVSWITCH_ROUTING_LAN_GROUP_SEL_MAX) + { + NVSWITCH_PRINT(device, ERROR, + "%s: routingLan[%d].portList[] size %d overflows range %d..%d\n", + __FUNCTION__, i, p->routingLan[i].numEntries, + 0, NVSWITCH_ROUTING_LAN_GROUP_SEL_MAX); + return -NVL_BAD_ARGS; + } + + for (j = 0; j < p->routingLan[i].numEntries; j++) + { + if (p->routingLan[i].portList[j].groupSelect > DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SEL_0)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: routingLan[%d].portList[%d] groupSelect 0x%x out of valid range (0x%x..0x%x)\n", + __FUNCTION__, i, j, + p->routingLan[i].portList[j].groupSelect, + 0, DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SEL_0)); + return -NVL_BAD_ARGS; + } + + if ((p->routingLan[i].portList[j].groupSize == 0) || + (p->routingLan[i].portList[j].groupSize > DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SIZE_0) + 1)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: routingLan[%d].portList[%d] groupSize 0x%x out of valid range (0x%x..0x%x)\n", + __FUNCTION__, i, j, + p->routingLan[i].portList[j].groupSize, + 1, DRF_MASK(NV_INGRESS_RLANTABDATA0_GRP_SIZE_0) + 1); + return -NVL_BAD_ARGS; + } + } + } + + _nvswitch_set_routing_lan_lr10(device, p->portNum, p->firstIndex, p->numEntries, p->routingLan); + + return retval; +} + +#define NVSWITCH_NUM_RLANTABDATA_REGS_LR10 6 + +NvlStatus +nvswitch_ctrl_get_routing_lan_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_ROUTING_LAN_PARAMS *params +) +{ + NVSWITCH_ROUTING_LAN_IDX_ENTRY *rlan_entries; + NvU32 table_index; + NvU32 rlan_tab_data[NVSWITCH_NUM_RLANTABDATA_REGS_LR10]; // 6 RLAN tables + NvU32 rlan_count; + NvU32 ram_size; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NPORT port #%d not valid\n", + __FUNCTION__, params->portNum); + return -NVL_BAD_ARGS; + } + + ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM); + if ((params->firstIndex >= ram_size)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: routingLan first index %d out of range[%d..%d].\n", + __FUNCTION__, params->firstIndex, 0, ram_size - 1); + return -NVL_BAD_ARGS; + } + + nvswitch_os_memset(params->entries, 0, (NVSWITCH_ROUTING_LAN_ENTRIES_MAX * + sizeof(NVSWITCH_ROUTING_LAN_IDX_ENTRY))); + + table_index = params->firstIndex; + rlan_entries = params->entries; + rlan_count = 0; + + /* set table offset */ + NVSWITCH_LINK_WR32_LR10(device, params->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, params->firstIndex) | + DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM) | + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 1)); + + while (rlan_count < NVSWITCH_ROUTING_LAN_ENTRIES_MAX && + table_index < ram_size) + { + /* read one entry */ + rlan_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA0); + rlan_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA1); + rlan_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA2); + rlan_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA3); + rlan_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA4); + rlan_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, params->portNum, NPORT, _INGRESS, _RLANTABDATA5); + + /* add to rlan_entries list if nonzero */ + if (rlan_tab_data[0] || rlan_tab_data[1] || rlan_tab_data[2] || + rlan_tab_data[3] || rlan_tab_data[4] || rlan_tab_data[5]) + { + rlan_entries[rlan_count].entry.portList[0].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_0, rlan_tab_data[0]); + rlan_entries[rlan_count].entry.portList[0].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_0, rlan_tab_data[0]); + if (rlan_entries[rlan_count].entry.portList[0].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[0].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[1].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_1, rlan_tab_data[0]); + rlan_entries[rlan_count].entry.portList[1].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_1, rlan_tab_data[0]); + if (rlan_entries[rlan_count].entry.portList[1].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[1].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[2].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SEL_2, rlan_tab_data[0]); + rlan_entries[rlan_count].entry.portList[2].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA0, _GRP_SIZE_2, rlan_tab_data[0]); + if (rlan_entries[rlan_count].entry.portList[2].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[2].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[3].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_3, rlan_tab_data[1]); + rlan_entries[rlan_count].entry.portList[3].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_3, rlan_tab_data[1]); + if (rlan_entries[rlan_count].entry.portList[3].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[3].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[4].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_4, rlan_tab_data[1]); + rlan_entries[rlan_count].entry.portList[4].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_4, rlan_tab_data[1]); + if (rlan_entries[rlan_count].entry.portList[4].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[4].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[5].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SEL_5, rlan_tab_data[1]); + rlan_entries[rlan_count].entry.portList[5].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA1, _GRP_SIZE_5, rlan_tab_data[1]); + if (rlan_entries[rlan_count].entry.portList[5].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[5].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[6].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_6, rlan_tab_data[2]); + rlan_entries[rlan_count].entry.portList[6].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_6, rlan_tab_data[2]); + if (rlan_entries[rlan_count].entry.portList[6].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[6].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[7].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_7, rlan_tab_data[2]); + rlan_entries[rlan_count].entry.portList[7].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_7, rlan_tab_data[2]); + if (rlan_entries[rlan_count].entry.portList[7].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[7].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[8].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SEL_8, rlan_tab_data[2]); + rlan_entries[rlan_count].entry.portList[8].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA2, _GRP_SIZE_8, rlan_tab_data[2]); + if (rlan_entries[rlan_count].entry.portList[8].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[8].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[9].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_9, rlan_tab_data[3]); + rlan_entries[rlan_count].entry.portList[9].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_9, rlan_tab_data[3]); + if (rlan_entries[rlan_count].entry.portList[9].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[9].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[10].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_10, rlan_tab_data[3]); + rlan_entries[rlan_count].entry.portList[10].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_10, rlan_tab_data[3]); + if (rlan_entries[rlan_count].entry.portList[10].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[10].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[11].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SEL_11, rlan_tab_data[3]); + rlan_entries[rlan_count].entry.portList[11].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA3, _GRP_SIZE_11, rlan_tab_data[3]); + if (rlan_entries[rlan_count].entry.portList[11].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[11].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[12].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_12, rlan_tab_data[4]); + rlan_entries[rlan_count].entry.portList[12].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_12, rlan_tab_data[4]); + if (rlan_entries[rlan_count].entry.portList[12].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[12].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[13].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_13, rlan_tab_data[4]); + rlan_entries[rlan_count].entry.portList[13].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_13, rlan_tab_data[4]); + if (rlan_entries[rlan_count].entry.portList[13].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[13].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[14].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SEL_14, rlan_tab_data[4]); + rlan_entries[rlan_count].entry.portList[14].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA4, _GRP_SIZE_14, rlan_tab_data[4]); + if (rlan_entries[rlan_count].entry.portList[14].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[14].groupSize = 16; + } + + rlan_entries[rlan_count].entry.portList[15].groupSelect = DRF_VAL(_INGRESS, _RLANTABDATA5, _GRP_SEL_15, rlan_tab_data[5]); + rlan_entries[rlan_count].entry.portList[15].groupSize = DRF_VAL(_INGRESS, _RLANTABDATA5, _GRP_SIZE_15, rlan_tab_data[5]); + if (rlan_entries[rlan_count].entry.portList[15].groupSize == 0) + { + rlan_entries[rlan_count].entry.portList[15].groupSize = 16; + } + + rlan_entries[rlan_count].entry.entryValid = DRF_VAL(_INGRESS, _RLANTABDATA5, _ACLVALID, rlan_tab_data[5]); + rlan_entries[rlan_count].entry.numEntries = NVSWITCH_ROUTING_ID_DEST_PORT_LIST_MAX; + rlan_entries[rlan_count].idx = table_index; + + rlan_count++; + } + + table_index++; + } + + params->nextIndex = table_index; + params->numEntries = rlan_count; + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_ctrl_set_routing_lan_valid_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_ROUTING_LAN_VALID *p +) +{ + NvU32 rlan_ctrl; + NvU32 rlan_tab_data[NVSWITCH_NUM_RLANTABDATA_REGS_LR10]; // 6 RLAN tables + NvU32 ram_address = p->firstIndex; + NvU32 i; + NvU32 ram_size; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, p->portNum)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NPORT port #%d not valid\n", + __FUNCTION__, p->portNum); + return -NVL_BAD_ARGS; + } + + ram_size = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM); + if ((p->firstIndex >= ram_size) || + (p->numEntries > NVSWITCH_ROUTING_LAN_ENTRIES_MAX) || + (p->firstIndex + p->numEntries > ram_size)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: routingLan[%d..%d] overflows range %d..%d or size %d.\n", + __FUNCTION__, p->firstIndex, p->firstIndex + p->numEntries - 1, + 0, ram_size - 1, + NVSWITCH_ROUTING_LAN_ENTRIES_MAX); + return -NVL_BAD_ARGS; + } + + // Select RLAN RAM and disable Auto Increament. + rlan_ctrl = + DRF_DEF(_INGRESS, _REQRSPMAPADDR, _RAM_SEL, _SELECTSRLANROUTERAM) | + DRF_NUM(_INGRESS, _REQRSPMAPADDR, _AUTO_INCR, 0); + + for (i = 0; i < p->numEntries; i++) + { + /* set the RAM address */ + rlan_ctrl = FLD_SET_DRF_NUM(_INGRESS, _REQRSPMAPADDR, _RAM_ADDRESS, ram_address++, rlan_ctrl); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _REQRSPMAPADDR, rlan_ctrl); + + rlan_tab_data[0] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA0); + rlan_tab_data[1] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA1); + rlan_tab_data[2] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA2); + rlan_tab_data[3] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA3); + rlan_tab_data[4] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA4); + rlan_tab_data[5] = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA5); + + // Set the valid bit in _RLANTABDATA5 + rlan_tab_data[5] = FLD_SET_DRF_NUM(_INGRESS, _RLANTABDATA5, _ACLVALID, + p->entryValid[i], rlan_tab_data[5]); + + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA1, rlan_tab_data[1]); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA2, rlan_tab_data[2]); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA3, rlan_tab_data[3]); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA4, rlan_tab_data[4]); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA5, rlan_tab_data[5]); + NVSWITCH_LINK_WR32_LR10(device, p->portNum, NPORT, _INGRESS, _RLANTABDATA0, rlan_tab_data[0]); + } + + return NVL_SUCCESS; +} + +/* + * @Brief : Send priv ring command and wait for completion + * + * @Description : + * + * @param[in] device a reference to the device to initialize + * @param[in] cmd encoded priv ring command + */ +NvlStatus +nvswitch_ring_master_cmd_lr10 +( + nvswitch_device *device, + NvU32 cmd +) +{ + NvU32 value; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + + NVSWITCH_REG_WR32(device, _PPRIV_MASTER, _RING_COMMAND, cmd); + + nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout); + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_COMMAND); + if (FLD_TEST_DRF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _NO_CMD, value)) + { + break; + } + + nvswitch_os_sleep(1); + } + while (keepPolling); + + if (!FLD_TEST_DRF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _NO_CMD, value)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for RING_COMMAND == NO_CMD (cmd=0x%x).\n", + __FUNCTION__, cmd); + return -NVL_INITIALIZATION_TOTAL_FAILURE; + } + + return NVL_SUCCESS; +} + +/* + * @brief Process the information read from ROM tables and apply it to device + * settings. + * + * @param[in] device a reference to the device to query + * @param[in] firmware Information parsed from ROM tables + */ +static void +_nvswitch_process_firmware_info_lr10 +( + nvswitch_device *device, + NVSWITCH_FIRMWARE *firmware +) +{ + NvU32 idx_link; + NvU64 link_enable_mask; + + if (device->firmware.firmware_size == 0) + { + return; + } + + if (device->firmware.nvlink.link_config_found) + { + link_enable_mask = ((NvU64)device->regkeys.link_enable_mask2 << 32 | + (NvU64)device->regkeys.link_enable_mask); + // + // If the link enables were not already overridden by regkey, then + // apply the ROM link enables + // + if (link_enable_mask == NV_U64_MAX) + { + for (idx_link = 0; idx_link < nvswitch_get_num_links(device); idx_link++) + { + if ((device->firmware.nvlink.link_enable_mask & NVBIT64(idx_link)) == 0) + { + device->link[idx_link].valid = NV_FALSE; + } + } + } + } +} + +static void +_nvswitch_init_npg_multicast_lr10 +( + nvswitch_device *device +) +{ + NvU32 idx_npg; + NvU32 idx_nport; + NvU32 nport_mask; + + // + // Walk the NPGs and build the mask of extant NPORTs + // + for (idx_npg = 0; idx_npg < NVSWITCH_ENG_COUNT(device, NPG, ); idx_npg++) + { + if (NVSWITCH_ENG_IS_VALID(device, NPG, idx_npg)) + { + nport_mask = 0; + for (idx_nport = 0; idx_nport < NVSWITCH_NPORT_PER_NPG; idx_nport++) + { + nport_mask |= + (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_npg*NVSWITCH_NPORT_PER_NPG + idx_nport) ? + NVBIT(idx_nport) : 0x0); + } + + NVSWITCH_NPG_WR32_LR10(device, idx_npg, + _NPG, _CTRL_PRI_MULTICAST, + DRF_NUM(_NPG, _CTRL_PRI_MULTICAST, _NPORT_ENABLE, nport_mask) | + DRF_DEF(_NPG, _CTRL_PRI_MULTICAST, _READ_MODE, _AND_ALL_BUSSES)); + + NVSWITCH_NPGPERF_WR32_LR10(device, idx_npg, + _NPGPERF, _CTRL_PRI_MULTICAST, + DRF_NUM(_NPGPERF, _CTRL_PRI_MULTICAST, _NPORT_ENABLE, nport_mask) | + DRF_DEF(_NPGPERF, _CTRL_PRI_MULTICAST, _READ_MODE, _AND_ALL_BUSSES)); + } + } +} + +static NvlStatus +nvswitch_clear_nport_rams_lr10 +( + nvswitch_device *device +) +{ + NvU32 idx_nport; + NvU64 nport_mask = 0; + NvU32 zero_init_mask; + NvU32 val; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + NvlStatus retval = NVL_SUCCESS; + + // Build the mask of available NPORTs + for (idx_nport = 0; idx_nport < NVSWITCH_ENG_COUNT(device, NPORT, ); idx_nport++) + { + if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport)) + { + nport_mask |= NVBIT64(idx_nport); + } + } + + // Start the HW zero init + zero_init_mask = + DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT) | + DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_1, _HWINIT) | + DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_2, _HWINIT) | + DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_3, _HWINIT) | + DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_4, _HWINIT) | + DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_5, _HWINIT) | + DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_6, _HWINIT) | + DRF_DEF(_NPORT, _INITIALIZATION, _LINKTABLEINIT, _HWINIT) | + DRF_DEF(_NPORT, _INITIALIZATION, _REMAPTABINIT, _HWINIT) | + DRF_DEF(_NPORT, _INITIALIZATION, _RIDTABINIT, _HWINIT) | + DRF_DEF(_NPORT, _INITIALIZATION, _RLANTABINIT, _HWINIT); + + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _NPORT, _INITIALIZATION, + zero_init_mask); + + nvswitch_timeout_create(25*NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); + + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + // Check each enabled NPORT that is still pending until all are done + for (idx_nport = 0; idx_nport < NVSWITCH_ENG_COUNT(device, NPORT, ); idx_nport++) + { + if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport) && (nport_mask & NVBIT64(idx_nport))) + { + val = NVSWITCH_ENG_RD32_LR10(device, NPORT, idx_nport, _NPORT, _INITIALIZATION); + if (val == zero_init_mask) + { + nport_mask &= ~NVBIT64(idx_nport); + } + } + } + + if (nport_mask == 0) + { + break; + } + + nvswitch_os_sleep(1); + } + while (keepPolling); + + if (nport_mask != 0) + { + NVSWITCH_PRINT(device, WARN, + "%s: Timeout waiting for NV_NPORT_INITIALIZATION (0x%llx)\n", + __FUNCTION__, nport_mask); + // Bug 2974064: Review this timeout handling (fall through) + retval = -NVL_ERR_INVALID_STATE; + } + + //bug 2737147 requires SW To init this crumbstore setting for LR10 + val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0) | + DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CRUMBSTORE_RAM) | + DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 0) | + DRF_DEF(_TSTATE, _RAM_ADDRESS, _VC, _VC5_TRANSDONE); + + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _RAM_ADDRESS, val); + + return retval; +} + +static void +_nvswitch_init_nport_ecc_control_lr10 +( + nvswitch_device *device +) +{ + // Set ingress ECC error limits + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER, + DRF_NUM(_INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT, 1); + + // Set egress ECC error limits + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER, + DRF_NUM(_EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT, 1); + + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER, + DRF_NUM(_EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT, 1); + + // Set route ECC error limits + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER, + DRF_NUM(_ROUTE, _ERR_NVS_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_NVS_ECC_ERROR_COUNTER_LIMIT, 1); + + // Set tstate ECC error limits + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, + DRF_NUM(_TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1); + + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, + DRF_NUM(_TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT, 1); + + // Set sourcetrack ECC error limits to _PROD value + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, + DRF_NUM(_SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1); + + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, + DRF_NUM(_SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER, _ERROR_COUNT, 0x0)); + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, 1); + + // Enable ECC/parity + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _INGRESS, _ERR_ECC_CTRL, + DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_HDR_ECC_ENABLE, __PROD) | + DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, __PROD) | + DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _REMAPTAB_ECC_ENABLE, __PROD) | + DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _RIDTAB_ECC_ENABLE, __PROD) | + DRF_DEF(_INGRESS, _ERR_ECC_CTRL, _RLANTAB_ECC_ENABLE, __PROD)); + + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _EGRESS, _ERR_ECC_CTRL, + DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NXBAR_ECC_ENABLE, __PROD) | + DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NXBAR_PARITY_ENABLE, __PROD) | + DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _RAM_OUT_ECC_ENABLE, __PROD) | + DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_ECC_ENABLE, __PROD) | + DRF_DEF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, __PROD)); + + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _ROUTE, _ERR_ECC_CTRL, + DRF_DEF(_ROUTE, _ERR_ECC_CTRL, _GLT_ECC_ENABLE, __PROD) | + DRF_DEF(_ROUTE, _ERR_ECC_CTRL, _NVS_ECC_ENABLE, __PROD)); + + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _TSTATE, _ERR_ECC_CTRL, + DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _CRUMBSTORE_ECC_ENABLE, __PROD) | + DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _TAGPOOL_ECC_ENABLE, __PROD) | + DRF_DEF(_TSTATE, _ERR_ECC_CTRL, _TD_TID_ECC_ENABLE, _DISABLE)); + + NVSWITCH_BCAST_WR32_LR10(device, NPORT, _SOURCETRACK, _ERR_ECC_CTRL, + DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN0_CRUMBSTORE_ECC_ENABLE, __PROD) | + DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN0_TD_CRUMBSTORE_ECC_ENABLE, _DISABLE) | + DRF_DEF(_SOURCETRACK, _ERR_ECC_CTRL, _CREQ_TCEN1_CRUMBSTORE_ECC_ENABLE, __PROD)); +} + +static void +_nvswitch_init_cmd_routing +( + nvswitch_device *device +) +{ + NvU32 val; + + //Set Hash policy for the requests. + val = DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN1, _SPRAY) | + DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN2, _SPRAY) | + DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN4, _SPRAY) | + DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE0, _RFUN7, _SPRAY); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _CMD_ROUTE_TABLE0, val); + + // Set Random policy for reponses. + val = DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE2, _RFUN16, _RANDOM) | + DRF_DEF(_ROUTE, _CMD_ROUTE_TABLE2, _RFUN17, _RANDOM); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _CMD_ROUTE_TABLE2, val); +} + +static NvlStatus +_nvswitch_init_portstat_counters +( + nvswitch_device *device +) +{ + NvlStatus retval; + NvU32 idx_channel; + NVSWITCH_SET_LATENCY_BINS default_latency_bins; + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + + chip_device->latency_stats = nvswitch_os_malloc(sizeof(NVSWITCH_LATENCY_STATS_LR10)); + if (chip_device->latency_stats == NULL) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed allocate memory for latency stats\n", + __FUNCTION__); + return -NVL_NO_MEM; + } + + nvswitch_os_memset(chip_device->latency_stats, 0, sizeof(NVSWITCH_LATENCY_STATS_LR10)); + + // + // These bin thresholds are values provided by Arch based off + // switch latency expectations. + // + for (idx_channel=0; idx_channel < NVSWITCH_NUM_VCS_LR10; idx_channel++) + { + default_latency_bins.bin[idx_channel].lowThreshold = 120; // 120ns + default_latency_bins.bin[idx_channel].medThreshold = 200; // 200ns + default_latency_bins.bin[idx_channel].hiThreshold = 1000; // 1us + } + + chip_device->latency_stats->sample_interval_msec = 3000; // 3 second sample interval + + retval = nvswitch_ctrl_set_latency_bins(device, &default_latency_bins); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to set latency bins\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); + return retval; + } + + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_CONTROL, + DRF_DEF(_NPORT, _PORTSTAT_CONTROL, _SWEEPMODE, _SWONDEMAND) | + DRF_DEF(_NPORT, _PORTSTAT_CONTROL, _RANGESELECT, _BITS13TO0)); + + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SOURCE_FILTER_0, + DRF_NUM(_NPORT, _PORTSTAT_SOURCE_FILTER_0, _SRCFILTERBIT, 0xFFFFFFFF)); + + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SOURCE_FILTER_1, + DRF_NUM(_NPORT, _PORTSTAT_SOURCE_FILTER_1, _SRCFILTERBIT, 0xF)); + + // Set window limit to the maximum value + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_WINDOW_LIMIT, 0xffffffff); + + NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _GLBLLATENCYTIMERCTRL, + DRF_DEF(_NVLSAW, _GLBLLATENCYTIMERCTRL, _ENABLE, _ENABLE)); + + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _PORTSTAT_SNAP_CONTROL, + DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _STARTCOUNTER, _ENABLE) | + DRF_DEF(_NPORT, _PORTSTAT_SNAP_CONTROL, _SNAPONDEMAND, _DISABLE)); + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_init_nxbar_lr10 +( + nvswitch_device *device +) +{ + NvU32 tileout; + + // Setting this bit will send error detection info to NPG. + NVSWITCH_BCAST_WR32_LR10(device, TILE, _NXBAR, _TILE_ERR_CYA, + DRF_DEF(_NXBAR, _TILE_ERR_CYA, _SRCID_UPDATE_AT_EGRESS_CTRL, __PROD)); + + for (tileout = 0; tileout < NUM_NXBAR_TILEOUTS_PER_TC_LR10; tileout++) + { + NVSWITCH_BCAST_WR32_LR10(device, NXBAR, _NXBAR, _TC_TILEOUT_ERR_CYA(tileout), + DRF_DEF(_NXBAR, _TC_TILEOUT0_ERR_CYA, _SRCID_UPDATE_AT_EGRESS_CTRL, __PROD)); + } + + // Enable idle-based clk gating and setup delay count. + NVSWITCH_BCAST_WR32_LR10(device, TILE, _NXBAR, _TILE_PRI_NXBAR_TILE_CG, + DRF_DEF(_NXBAR, _TILE_PRI_NXBAR_TILE_CG, _IDLE_CG_EN, __PROD) | + DRF_DEF(_NXBAR, _TILE_PRI_NXBAR_TILE_CG, _IDLE_CG_DLY_CNT, __PROD)); + + NVSWITCH_BCAST_WR32_LR10(device, NXBAR, _NXBAR, _TC_PRI_NXBAR_TC_CG, + DRF_DEF(_NXBAR, _TC_PRI_NXBAR_TC_CG, _IDLE_CG_EN, __PROD) | + DRF_DEF(_NXBAR, _TC_PRI_NXBAR_TC_CG, _IDLE_CG_DLY_CNT, __PROD)); + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_init_nport_lr10 +( + nvswitch_device *device +) +{ + NvU32 data32, timeout; + NvU32 idx_nport; + NvU32 num_nports; + + num_nports = NVSWITCH_ENG_COUNT(device, NPORT, ); + + for (idx_nport = 0; idx_nport < num_nports; idx_nport++) + { + // Find the first valid nport + if (NVSWITCH_ENG_IS_VALID(device, NPORT, idx_nport)) + { + break; + } + } + + // There were no valid nports + if (idx_nport == num_nports) + { + NVSWITCH_PRINT(device, ERROR, "%s: No valid nports found!\n", __FUNCTION__); + return -NVL_ERR_INVALID_STATE; + } + + _nvswitch_init_nport_ecc_control_lr10(device); + + data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _ROUTE, _ROUTE_CONTROL); + data32 = FLD_SET_DRF(_ROUTE, _ROUTE_CONTROL, _URRESPENB, __PROD, data32); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _ROUTE, _ROUTE_CONTROL, data32); + + data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _EGRESS, _CTRL); + data32 = FLD_SET_DRF(_EGRESS, _CTRL, _DESTINATIONIDCHECKENB, __PROD, data32); + data32 = FLD_SET_DRF(_EGRESS, _CTRL, _CTO_ENB, __PROD, data32); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _CTRL, data32); + + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _EGRESS, _CTO_TIMER_LIMIT, + DRF_DEF(_EGRESS, _CTO_TIMER_LIMIT, _LIMIT, __PROD)); + + if (DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _DISABLE, device->regkeys.ato_control) == + NV_SWITCH_REGKEY_ATO_CONTROL_DISABLE_TRUE) + { + // ATO Disable + data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _TAGSTATECONTROL); + data32 = FLD_SET_DRF(_TSTATE, _TAGSTATECONTROL, _ATO_ENB, _OFF, data32); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _TAGSTATECONTROL, data32); + } + else + { + // ATO Enable + data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _TAGSTATECONTROL); + data32 = FLD_SET_DRF(_TSTATE, _TAGSTATECONTROL, _ATO_ENB, _ON, data32); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _TAGSTATECONTROL, data32); + + // ATO Timeout value + timeout = DRF_VAL(_SWITCH_REGKEY, _ATO_CONTROL, _TIMEOUT, device->regkeys.ato_control); + if (timeout != NV_SWITCH_REGKEY_ATO_CONTROL_TIMEOUT_DEFAULT) + { + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ATO_TIMER_LIMIT, + DRF_NUM(_TSTATE, _ATO_TIMER_LIMIT, _LIMIT, timeout)); + } + else + { + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _ATO_TIMER_LIMIT, + DRF_DEF(_TSTATE, _ATO_TIMER_LIMIT, _LIMIT, __PROD)); + } + } + + if (DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _DISABLE, device->regkeys.sto_control) == + NV_SWITCH_REGKEY_STO_CONTROL_DISABLE_TRUE) + { + // STO Disable + data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _SOURCETRACK, _CTRL); + data32 = FLD_SET_DRF(_SOURCETRACK, _CTRL, _STO_ENB, _OFF, data32); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _CTRL, data32); + } + else + { + // STO Enable + data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _SOURCETRACK, _CTRL); + data32 = FLD_SET_DRF(_SOURCETRACK, _CTRL, _STO_ENB, _ON, data32); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _CTRL, data32); + + // STO Timeout value + timeout = DRF_VAL(_SWITCH_REGKEY, _STO_CONTROL, _TIMEOUT, device->regkeys.sto_control); + if (timeout != NV_SWITCH_REGKEY_STO_CONTROL_TIMEOUT_DEFAULT) + { + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _MULTISEC_TIMER0, + DRF_NUM(_SOURCETRACK, _MULTISEC_TIMER0, _TIMERVAL0, timeout)); + } + else + { + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _SOURCETRACK, _MULTISEC_TIMER0, + DRF_DEF(_SOURCETRACK, _MULTISEC_TIMER0, _TIMERVAL0, __PROD)); + } + } + + // + // WAR for bug 200606509 + // Disable CAM for entry 0 to prevent false ATO trigger + // + data32 = NVSWITCH_NPORT_RD32_LR10(device, idx_nport, _TSTATE, _CREQ_CAM_LOCK); + data32 = DRF_NUM(_TSTATE, _CREQ_CAM_LOCK, _ON, 0x1); + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _TSTATE, _CREQ_CAM_LOCK, data32); + + // + // WAR for bug 3115824 + // Clear CONTAIN_AND_DRAIN during init for links in reset. + // Since SBR does not clear CONTAIN_AND_DRAIN, this will clear the bit + // when the driver is reloaded after an SBR. If the driver has been reloaded + // without an SBR, then CONTAIN_AND_DRAIN will be re-triggered. + // + NVSWITCH_NPORT_MC_BCAST_WR32_LR10(device, _NPORT, _CONTAIN_AND_DRAIN, + DRF_DEF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE)); + + return NVL_SUCCESS; +} + +void * +nvswitch_alloc_chipdevice_lr10 +( + nvswitch_device *device +) +{ + void *chip_device; + + chip_device = nvswitch_os_malloc(sizeof(lr10_device)); + if (NULL != chip_device) + { + nvswitch_os_memset(chip_device, 0, sizeof(lr10_device)); + } + + device->chip_id = NV_PSMC_BOOT_42_CHIP_ID_LR10; + return(chip_device); +} + +static NvlStatus +nvswitch_initialize_pmgr_lr10 +( + nvswitch_device *device +) +{ + nvswitch_init_pmgr_lr10(device); + nvswitch_init_pmgr_devices_lr10(device); + + return NVL_SUCCESS; +} + +static NvlStatus +nvswitch_initialize_route_lr10 +( + nvswitch_device *device +) +{ + NvlStatus retval; + + retval = _nvswitch_init_ganged_link_routing(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to initialize GLT\n", + __FUNCTION__); + goto nvswitch_initialize_route_exit; + } + + _nvswitch_init_cmd_routing(device); + + // Initialize Portstat Counters + retval = _nvswitch_init_portstat_counters(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to initialize portstat counters\n", + __FUNCTION__); + goto nvswitch_initialize_route_exit; + } + +nvswitch_initialize_route_exit: + return retval; +} + + +NvlStatus +nvswitch_pri_ring_init_lr10 +( + nvswitch_device *device +) +{ + NvU32 i; + NvU32 value; + NvBool enumerated = NV_FALSE; + NvlStatus retval = NVL_SUCCESS; + + // + // Sometimes on RTL simulation we see the priv ring initialization fail. + // Retry up to 3 times until this issue is root caused. Bug 1826216. + // + for (i = 0; !enumerated && (i < 3); i++) + { + value = DRF_DEF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _ENUMERATE_AND_START_RING); + retval = nvswitch_ring_master_cmd_lr10(device, value); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: PRIV ring enumeration failed\n", + __FUNCTION__); + continue; + } + + value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_START_RESULTS); + if (!FLD_TEST_DRF(_PPRIV_MASTER, _RING_START_RESULTS, _CONNECTIVITY, _PASS, value)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: PRIV ring connectivity failed\n", + __FUNCTION__); + continue; + } + + value = NVSWITCH_REG_RD32(device, _PPRIV_MASTER, _RING_INTERRUPT_STATUS0); + if (value) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NV_PPRIV_MASTER_RING_INTERRUPT_STATUS0 = %x\n", + __FUNCTION__, value); + + if ((!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, + _RING_START_CONN_FAULT, 0, value)) || + (!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, + _DISCONNECT_FAULT, 0, value)) || + (!FLD_TEST_DRF_NUM(_PPRIV_MASTER, _RING_INTERRUPT_STATUS0, + _OVERFLOW_FAULT, 0, value))) + { + NVSWITCH_PRINT(device, ERROR, + "%s: PRIV ring error interrupt\n", + __FUNCTION__); + } + + (void)nvswitch_ring_master_cmd_lr10(device, + DRF_DEF(_PPRIV_MASTER, _RING_COMMAND, _CMD, _ACK_INTERRUPT)); + + continue; + } + + enumerated = NV_TRUE; + } + + if (!enumerated) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Cannot enumerate PRIV ring!\n", + __FUNCTION__); + retval = -NVL_INITIALIZATION_TOTAL_FAILURE; + } + + return retval; +} + +/* + * @Brief : Initializes an NvSwitch hardware state + * + * @Description : + * + * @param[in] device a reference to the device to initialize + * + * @returns NVL_SUCCESS if the action succeeded + * -NVL_BAD_ARGS if bad arguments provided + * -NVL_PCI_ERROR if bar info unable to be retrieved + */ +NvlStatus +nvswitch_initialize_device_state_lr10 +( + nvswitch_device *device +) +{ + NvlStatus retval = NVL_SUCCESS; + + // alloc chip-specific device structure + device->chip_device = nvswitch_alloc_chipdevice(device); + if (NULL == device->chip_device) + { + NVSWITCH_PRINT(device, ERROR, + "nvswitch_os_malloc during chip_device creation failed!\n"); + retval = -NVL_NO_MEM; + goto nvswitch_initialize_device_state_exit; + } + + NVSWITCH_PRINT(device, SETUP, + "%s: MMIO discovery\n", + __FUNCTION__); + retval = nvswitch_device_discovery(device, NV_SWPTOP_TABLE_BASE_ADDRESS_OFFSET); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Engine discovery failed\n", + __FUNCTION__); + goto nvswitch_initialize_device_state_exit; + } + + nvswitch_filter_discovery(device); + + retval = nvswitch_process_discovery(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Discovery processing failed\n", + __FUNCTION__); + goto nvswitch_initialize_device_state_exit; + } + + // now that we have completed discovery, perform initialization steps that + // depend on engineDescriptors being initialized + // + // Temporary location, really needs to be done somewhere common to all flcnables + if (nvswitch_is_soe_supported(device)) + { + flcnablePostDiscoveryInit(device, device->pSoe); + } + else + { + NVSWITCH_PRINT(device, INFO, "%s: Skipping SOE post discovery init.\n", + __FUNCTION__); + } + + // Make sure interrupts are disabled before we enable interrupts with the OS. + nvswitch_lib_disable_interrupts(device); + + retval = nvswitch_pri_ring_init(device); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: PRI init failed\n", __FUNCTION__); + goto nvswitch_initialize_device_state_exit; + } + + NVSWITCH_PRINT(device, SETUP, + "%s: Enabled links: 0x%llx\n", + __FUNCTION__, + ((NvU64)device->regkeys.link_enable_mask2 << 32 | + (NvU64)device->regkeys.link_enable_mask) & + ((~0ULL) >> (64 - NVSWITCH_LINK_COUNT(device)))); + + if (nvswitch_is_soe_supported(device)) + { + retval = nvswitch_init_soe(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, "%s: Init SOE failed\n", + __FUNCTION__); + goto nvswitch_initialize_device_state_exit; + } + } + else + { + NVSWITCH_PRINT(device, INFO, "%s: Skipping SOE init.\n", + __FUNCTION__); + } + + // Read ROM configuration + nvswitch_read_rom_tables(device, &device->firmware); + _nvswitch_process_firmware_info_lr10(device, &device->firmware); + + // Init PMGR info + retval = nvswitch_initialize_pmgr(device); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: PMGR init failed\n", __FUNCTION__); + retval = -NVL_INITIALIZATION_TOTAL_FAILURE; + goto nvswitch_initialize_device_state_exit; + } + + retval = nvswitch_init_pll_config(device); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: failed\n", __FUNCTION__); + retval = -NVL_INITIALIZATION_TOTAL_FAILURE; + goto nvswitch_initialize_device_state_exit; + } + + // + // PLL init should be done *first* before other hardware init + // + retval = nvswitch_init_pll(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "%s: PLL init failed\n", + __FUNCTION__); + goto nvswitch_initialize_device_state_exit; + } + + // + // Now that software knows the devices and addresses, it must take all + // the wrapper modules out of reset. It does this by writing to the + // PMC module enable registers. + // + + // Init IP wrappers +// _nvswitch_init_mc_enable_lr10(device); + retval = nvswitch_initialize_ip_wrappers(device); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: init failed\n", __FUNCTION__); + retval = -NVL_INITIALIZATION_TOTAL_FAILURE; + goto nvswitch_initialize_device_state_exit; + } + + _nvswitch_init_warm_reset_lr10(device); + _nvswitch_init_npg_multicast_lr10(device); + retval = nvswitch_clear_nport_rams(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NPORT RAM clear failed\n", + __FUNCTION__); + goto nvswitch_initialize_device_state_exit; + } + + retval = nvswitch_init_nport(device); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Init NPORTs failed\n", + __FUNCTION__); + goto nvswitch_initialize_device_state_exit; + } + + retval = nvswitch_init_nxbar(device); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Init NXBARs failed\n", + __FUNCTION__); + goto nvswitch_initialize_device_state_exit; + } + + if (device->regkeys.minion_disable != NV_SWITCH_REGKEY_MINION_DISABLE_YES) + { + NVSWITCH_PRINT(device, WARN, "%s: Entering init minion\n", __FUNCTION__); + + retval = nvswitch_init_minion(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Init MINIONs failed\n", + __FUNCTION__); + goto nvswitch_initialize_device_state_exit; + } + } + else + { + NVSWITCH_PRINT(device, INFO, "MINION is disabled via regkey.\n"); + + NVSWITCH_PRINT(device, INFO, "%s: Skipping MINION init\n", + __FUNCTION__); + } + + _nvswitch_setup_chiplib_forced_config_lr10(device); + + // Init route + retval = nvswitch_initialize_route(device); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: route init failed\n", __FUNCTION__); + retval = -NVL_INITIALIZATION_TOTAL_FAILURE; + goto nvswitch_initialize_device_state_exit; + } + + nvswitch_init_clock_gating(device); + + // Initialize SPI + if (nvswitch_is_spi_supported(device)) + { + retval = nvswitch_spi_init(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SPI init failed!, rc: %d\n", + __FUNCTION__, retval); + goto nvswitch_initialize_device_state_exit; + } + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s: Skipping SPI init.\n", + __FUNCTION__); + } + + // Initialize SMBPBI + if (nvswitch_is_smbpbi_supported(device)) + { + retval = nvswitch_smbpbi_init(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SMBPBI init failed!, rc: %d\n", + __FUNCTION__, retval); + goto nvswitch_initialize_device_state_exit; + } + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s: Skipping SMBPBI init.\n", + __FUNCTION__); + } + + nvswitch_initialize_interrupt_tree(device); + + // Initialize external thermal sensor + retval = nvswitch_init_thermal(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "%s: External Thermal init failed\n", + __FUNCTION__); + } + + return NVL_SUCCESS; + +nvswitch_initialize_device_state_exit: + nvswitch_destroy_device_state(device); + + return retval; +} + +/* + * @Brief : Destroys an NvSwitch hardware state + * + * @Description : + * + * @param[in] device a reference to the device to initialize + */ +void +nvswitch_destroy_device_state_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + + if (nvswitch_is_soe_supported(device)) + { + nvswitch_soe_unregister_events(device); + } + + if (chip_device != NULL) + { + if ((chip_device->latency_stats) != NULL) + { + nvswitch_os_free(chip_device->latency_stats); + } + + if ((chip_device->ganged_link_table) != NULL) + { + nvswitch_os_free(chip_device->ganged_link_table); + } + + nvswitch_free_chipdevice(device); + } + + nvswitch_i2c_destroy(device); + + return; +} + +static void +_nvswitch_set_nvlink_caps_lr10 +( + NvU32 *pCaps +) +{ + NvU8 tempCaps[NVSWITCH_NVLINK_CAPS_TBL_SIZE]; + + nvswitch_os_memset(tempCaps, 0, sizeof(tempCaps)); + + NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _VALID); + NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SUPPORTED); + NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _P2P_SUPPORTED); + NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _P2P_ATOMICS); + + // Assume IBM P9 for PPC -- TODO Xavier support. +#if defined(NVCPU_PPC64LE) + NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SYSMEM_ACCESS); + NVSWITCH_SET_CAP(tempCaps, NVSWITCH_NVLINK_CAPS, _SYSMEM_ATOMICS); +#endif + + nvswitch_os_memcpy(pCaps, tempCaps, sizeof(tempCaps)); +} + +/* + * @brief Determines if a link's lanes are reversed + * + * @param[in] device a reference to the device to query + * @param[in] linkId Target link ID + * + * @return NV_TRUE if a link's lanes are reversed + */ +NvBool +nvswitch_link_lane_reversed_lr10 +( + nvswitch_device *device, + NvU32 linkId +) +{ + NvU32 regData; + nvlink_link *link; + + link = nvswitch_get_link(device, linkId); + if (nvswitch_is_link_in_reset(device, link)) + { + return NV_FALSE; + } + + regData = NVSWITCH_LINK_RD32_LR10(device, linkId, NVLDL, _NVLDL_RX, _CONFIG_RX); + + // HW may reverse the lane ordering or it may be overridden by SW. + if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _REVERSAL_OVERRIDE, _ON, regData)) + { + // Overridden + if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _LANE_REVERSE, _ON, regData)) + { + return NV_TRUE; + } + else + { + return NV_FALSE; + } + } + else + { + // Sensed in HW + if (FLD_TEST_DRF(_NVLDL_RX, _CONFIG_RX, _HW_LANE_REVERSE, _ON, regData)) + { + return NV_TRUE; + } + else + { + return NV_FALSE; + } + } + + return NV_FALSE; +} + +NvlStatus +nvswitch_ctrl_get_nvlink_status_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_STATUS_PARAMS *ret +) +{ + NvlStatus retval = NVL_SUCCESS; + nvlink_link *link; + NvU8 i; + NvU32 linkState, txSublinkStatus, rxSublinkStatus; + nvlink_conn_info conn_info = {0}; + NvU64 enabledLinkMask; + NvU32 nvlink_caps_version; + + enabledLinkMask = nvswitch_get_enabled_link_mask(device); + ret->enabledLinkMask = enabledLinkMask; + + FOR_EACH_INDEX_IN_MASK(64, i, enabledLinkMask) + { + NVSWITCH_ASSERT(i < NVSWITCH_LINK_COUNT(device)); + + link = nvswitch_get_link(device, i); + + if ((link == NULL) || + !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) || + (i >= NVSWITCH_NVLINK_MAX_LINKS)) + { + continue; + } + + // + // Call the core library to get the remote end information. On the first + // invocation this will also trigger link training, if link-training is + // not externally managed by FM. Therefore it is necessary that this be + // before link status on the link is populated since this call will + // actually change link state. + // + if (device->regkeys.external_fabric_mgmt) + { + nvlink_lib_get_remote_conn_info(link, &conn_info); + } + else + { + nvlink_lib_discover_and_get_remote_conn_info(link, &conn_info, NVLINK_STATE_CHANGE_SYNC); + } + + // Set NVLINK per-link caps + _nvswitch_set_nvlink_caps_lr10(&ret->linkInfo[i].capsTbl); + + ret->linkInfo[i].phyType = NVSWITCH_NVLINK_STATUS_PHY_NVHS; + ret->linkInfo[i].subLinkWidth = nvswitch_get_sublink_width(device, link->linkNumber); + + if (!nvswitch_is_link_in_reset(device, link)) + { + linkState = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TOP, _LINK_STATE); + linkState = DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, linkState); + + txSublinkStatus = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_TX, _SLSM_STATUS_TX); + txSublinkStatus = DRF_VAL(_NVLDL_TX, _SLSM_STATUS_TX, _PRIMARY_STATE, txSublinkStatus); + + rxSublinkStatus = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLDL, _NVLDL_RX, _SLSM_STATUS_RX); + rxSublinkStatus = DRF_VAL(_NVLDL_RX, _SLSM_STATUS_RX, _PRIMARY_STATE, rxSublinkStatus); + + ret->linkInfo[i].bLaneReversal = nvswitch_link_lane_reversed_lr10(device, i); + } + else + { + linkState = NVSWITCH_NVLINK_STATUS_LINK_STATE_INIT; + txSublinkStatus = NVSWITCH_NVLINK_STATUS_SUBLINK_TX_STATE_OFF; + rxSublinkStatus = NVSWITCH_NVLINK_STATUS_SUBLINK_RX_STATE_OFF; + } + + ret->linkInfo[i].linkState = linkState; + ret->linkInfo[i].txSublinkStatus = txSublinkStatus; + ret->linkInfo[i].rxSublinkStatus = rxSublinkStatus; + + nvlink_caps_version = nvswitch_get_caps_nvlink_version(device); + if (nvlink_caps_version == NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0) + { + ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_3_0; + ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_3_0; + } + else + { + NVSWITCH_PRINT(device, WARN, + "%s WARNING: Unknown NVSWITCH_NVLINK_CAPS_NVLINK_VERSION 0x%x\n", + __FUNCTION__, nvlink_caps_version); + ret->linkInfo[i].nvlinkVersion = NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_INVALID; + ret->linkInfo[i].nciVersion = NVSWITCH_NVLINK_STATUS_NCI_VERSION_INVALID; + } + + ret->linkInfo[i].phyVersion = NVSWITCH_NVLINK_STATUS_NVHS_VERSION_1_0; + + if (conn_info.bConnected) + { + ret->linkInfo[i].connected = NVSWITCH_NVLINK_STATUS_CONNECTED_TRUE; + ret->linkInfo[i].remoteDeviceLinkNumber = (NvU8)conn_info.linkNumber; + + ret->linkInfo[i].remoteDeviceInfo.domain = conn_info.domain; + ret->linkInfo[i].remoteDeviceInfo.bus = conn_info.bus; + ret->linkInfo[i].remoteDeviceInfo.device = conn_info.device; + ret->linkInfo[i].remoteDeviceInfo.function = conn_info.function; + ret->linkInfo[i].remoteDeviceInfo.pciDeviceId = conn_info.pciDeviceId; + ret->linkInfo[i].remoteDeviceInfo.deviceType = conn_info.deviceType; + + ret->linkInfo[i].localLinkSid = link->localSid; + ret->linkInfo[i].remoteLinkSid = link->remoteSid; + + if (0 != conn_info.pciDeviceId) + { + ret->linkInfo[i].remoteDeviceInfo.deviceIdFlags = + FLD_SET_DRF(SWITCH_NVLINK, _DEVICE_INFO, _DEVICE_ID_FLAGS, + _PCI, ret->linkInfo[i].remoteDeviceInfo.deviceIdFlags); + } + + // Does not use loopback + ret->linkInfo[i].loopProperty = + NVSWITCH_NVLINK_STATUS_LOOP_PROPERTY_NONE; + } + else + { + ret->linkInfo[i].connected = + NVSWITCH_NVLINK_STATUS_CONNECTED_FALSE; + ret->linkInfo[i].remoteDeviceInfo.deviceType = + NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE; + } + + // Set the device information for the local end of the link + ret->linkInfo[i].localDeviceInfo.domain = device->nvlink_device->pciInfo.domain; + ret->linkInfo[i].localDeviceInfo.bus = device->nvlink_device->pciInfo.bus; + ret->linkInfo[i].localDeviceInfo.device = device->nvlink_device->pciInfo.device; + ret->linkInfo[i].localDeviceInfo.function = device->nvlink_device->pciInfo.function; + ret->linkInfo[i].localDeviceInfo.pciDeviceId = 0xdeadbeef; // TODO + ret->linkInfo[i].localDeviceLinkNumber = i; + ret->linkInfo[i].laneRxdetStatusMask = device->link[i].lane_rxdet_status_mask; + ret->linkInfo[i].localDeviceInfo.deviceType = + NVSWITCH_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH; + + // Clock data + ret->linkInfo[i].nvlinkLineRateMbps = nvswitch_minion_get_line_rate_Mbps_lr10(device, i); + ret->linkInfo[i].nvlinkLinkDataRateKiBps = nvswitch_minion_get_data_rate_KiBps_lr10(device, i); + ret->linkInfo[i].nvlinkLinkClockMhz = ret->linkInfo[i].nvlinkLineRateMbps / 32; + ret->linkInfo[i].nvlinkRefClkSpeedMhz = 156; + ret->linkInfo[i].nvlinkRefClkType = NVSWITCH_NVLINK_REFCLK_TYPE_NVHS; + + } + FOR_EACH_INDEX_IN_MASK_END; + +// NVSWITCH_ASSERT(ret->enabledLinkMask == enabledLinkMask); + + return retval; +} + +NvlStatus +nvswitch_ctrl_get_counters_lr10 +( + nvswitch_device *device, + NVSWITCH_NVLINK_GET_COUNTERS_PARAMS *ret +) +{ + nvlink_link *link; + NvU8 i; + NvU32 counterMask; + NvU32 data; + NvU32 val; + NvU64 tx0TlCount; + NvU64 tx1TlCount; + NvU64 rx0TlCount; + NvU64 rx1TlCount; + NvU32 laneId; + NvBool bLaneReversed; + NvlStatus status; + NvBool minion_enabled; + + ct_assert(NVSWITCH_NUM_LANES_LR10 <= NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE__SIZE); + + link = nvswitch_get_link(device, ret->linkId); + if ((link == NULL) || + !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber)) + { + return -NVL_BAD_ARGS; + } + + minion_enabled = nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION)); + + counterMask = ret->counterMask; + + // Common usage allows one of these to stand for all of them + if (counterMask & (NVSWITCH_NVLINK_COUNTER_TL_TX0 | + NVSWITCH_NVLINK_COUNTER_TL_TX1 | + NVSWITCH_NVLINK_COUNTER_TL_RX0 | + NVSWITCH_NVLINK_COUNTER_TL_RX1)) + { + tx0TlCount = nvswitch_read_64bit_counter(device, + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(0)), + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(0))); + if (NVBIT64(63) & tx0TlCount) + { + ret->bTx0TlCounterOverflow = NV_TRUE; + tx0TlCount &= ~(NVBIT64(63)); + } + + tx1TlCount = nvswitch_read_64bit_counter(device, + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(1)), + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(1))); + if (NVBIT64(63) & tx1TlCount) + { + ret->bTx1TlCounterOverflow = NV_TRUE; + tx1TlCount &= ~(NVBIT64(63)); + } + + rx0TlCount = nvswitch_read_64bit_counter(device, + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(0)), + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(0))); + if (NVBIT64(63) & rx0TlCount) + { + ret->bRx0TlCounterOverflow = NV_TRUE; + rx0TlCount &= ~(NVBIT64(63)); + } + + rx1TlCount = nvswitch_read_64bit_counter(device, + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(1)), + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(1))); + if (NVBIT64(63) & rx1TlCount) + { + ret->bRx1TlCounterOverflow = NV_TRUE; + rx1TlCount &= ~(NVBIT64(63)); + } + + ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_TX0)] = tx0TlCount; + ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_TX1)] = tx1TlCount; + ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_RX0)] = rx0TlCount; + ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_TL_RX1)] = rx1TlCount; + } + + if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT) + { + if (minion_enabled) + { + status = nvswitch_minion_get_dl_status(device, link->linkNumber, + NV_NVLSTAT_RX01, 0, &data); + if (status != NVL_SUCCESS) + { + return status; + } + data = DRF_VAL(_NVLSTAT, _RX01, _FLIT_CRC_ERRORS_VALUE, data); + } + else + { + // MINION disabled + data = 0; + } + + ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT)] + = data; + } + + data = 0x0; + bLaneReversed = nvswitch_link_lane_reversed_lr10(device, link->linkNumber); + + for (laneId = 0; laneId < NVSWITCH_NUM_LANES_LR10; laneId++) + { + // + // HW may reverse the lane ordering or it may be overridden by SW. + // If so, invert the interpretation of the lane CRC errors. + // + i = (NvU8)((bLaneReversed) ? (NVSWITCH_NUM_LANES_LR10 - 1) - laneId : laneId); + + if (minion_enabled) + { + status = nvswitch_minion_get_dl_status(device, link->linkNumber, + NV_NVLSTAT_DB01, 0, &data); + if (status != NVL_SUCCESS) + { + return status; + } + } + else + { + // MINION disabled + data = 0; + } + + if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(laneId)) + { + val = BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(laneId)); + + switch (i) + { + case 0: + ret->nvlinkCounters[val] + = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L0, data); + break; + case 1: + ret->nvlinkCounters[val] + = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L1, data); + break; + case 2: + ret->nvlinkCounters[val] + = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L2, data); + break; + case 3: + ret->nvlinkCounters[val] + = DRF_VAL(_NVLSTAT, _DB01, _ERROR_COUNT_ERR_LANECRC_L3, data); + break; + } + } + } + + if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY) + { + if (minion_enabled) + { + status = nvswitch_minion_get_dl_status(device, link->linkNumber, + NV_NVLSTAT_TX09, 0, &data); + if (status != NVL_SUCCESS) + { + return status; + } + data = DRF_VAL(_NVLSTAT, _TX09, _REPLAY_EVENTS_VALUE, data); + } + else + { + // MINION disabled + data = 0; + } + + ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_REPLAY)] + = data; + } + + if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY) + { + if (minion_enabled) + { + status = nvswitch_minion_get_dl_status(device, link->linkNumber, + NV_NVLSTAT_LNK1, 0, &data); + if (status != NVL_SUCCESS) + { + return status; + } + data = DRF_VAL(_NVLSTAT, _LNK1, _ERROR_COUNT1_RECOVERY_EVENTS_VALUE, data); + } + else + { + // MINION disabled + data = 0; + } + + ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_TX_ERR_RECOVERY)] + = data; + } + + if (counterMask & NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY) + { + if (minion_enabled) + { + status = nvswitch_minion_get_dl_status(device, link->linkNumber, + NV_NVLSTAT_RX00, 0, &data); + if (status != NVL_SUCCESS) + { + return status; + } + data = DRF_VAL(_NVLSTAT, _RX00, _REPLAY_EVENTS_VALUE, data); + } + else + { + // MINION disabled + data = 0; + } + + ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_DL_RX_ERR_REPLAY)] + = data; + } + + if (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS) + { + ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_PASS)] = 0; + } + + if (counterMask & NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL) + { + ret->nvlinkCounters[BIT_IDX_32(NVSWITCH_NVLINK_COUNTER_PHY_REFRESH_FAIL)] = 0; + } + + return NVL_SUCCESS; +} + +/* + * CTRL_NVSWITCH_GET_INFO + * + * Query for miscellaneous information analogous to NV2080_CTRL_GPU_INFO + * This provides a single API to query for multiple pieces of miscellaneous + * information via a single call. + * + */ + +static NvU32 +_nvswitch_get_info_chip_id +( + nvswitch_device *device +) +{ + NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42); + + return (DRF_VAL(_PSMC, _BOOT_42, _CHIP_ID, val)); +} + +static NvU32 +_nvswitch_get_info_revision_major +( + nvswitch_device *device +) +{ + NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42); + + return (DRF_VAL(_PSMC, _BOOT_42, _MAJOR_REVISION, val)); +} + +static NvU32 +_nvswitch_get_info_revision_minor +( + nvswitch_device *device +) +{ + NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42); + + return (DRF_VAL(_PSMC, _BOOT_42, _MINOR_REVISION, val)); +} + +static NvU32 +_nvswitch_get_info_revision_minor_ext +( + nvswitch_device *device +) +{ + NvU32 val = NVSWITCH_REG_RD32(device, _PSMC, _BOOT_42); + + return (DRF_VAL(_PSMC, _BOOT_42, _MINOR_EXTENDED_REVISION, val)); +} + +static NvU32 +_nvswitch_get_info_voltage +( + nvswitch_device *device +) +{ + NvU32 voltage = 0; + + return voltage; +} + +static NvBool +_nvswitch_inforom_nvl_supported +( + nvswitch_device *device +) +{ + return NV_FALSE; +} + +static NvBool +_nvswitch_inforom_bbx_supported +( + nvswitch_device *device +) +{ + return NV_FALSE; +} + +/* + * CTRL_NVSWITCH_GET_INFO + * + * Query for miscellaneous information analogous to NV2080_CTRL_GPU_INFO + * This provides a single API to query for multiple pieces of miscellaneous + * information via a single call. + * + */ + +NvlStatus +nvswitch_ctrl_get_info_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_INFO *p +) +{ + NvlStatus retval = NVL_SUCCESS; + NvU32 i; + + if (p->count > NVSWITCH_GET_INFO_COUNT_MAX) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Invalid args\n", + __FUNCTION__); + return -NVL_BAD_ARGS; + } + + nvswitch_os_memset(p->info, 0, sizeof(NvU32)*NVSWITCH_GET_INFO_COUNT_MAX); + + for (i = 0; i < p->count; i++) + { + switch (p->index[i]) + { + case NVSWITCH_GET_INFO_INDEX_ARCH: + p->info[i] = device->chip_arch; + break; + case NVSWITCH_GET_INFO_INDEX_PLATFORM: + if (IS_RTLSIM(device)) + { + p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_RTLSIM; + } + else if (IS_FMODEL(device)) + { + p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_FMODEL; + } + else if (IS_EMULATION(device)) + { + p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_EMULATION; + } + else + { + p->info[i] = NVSWITCH_GET_INFO_INDEX_PLATFORM_SILICON; + } + break; + case NVSWITCH_GET_INFO_INDEX_IMPL: + p->info[i] = device->chip_impl; + break; + case NVSWITCH_GET_INFO_INDEX_CHIPID: + p->info[i] = _nvswitch_get_info_chip_id(device); + break; + case NVSWITCH_GET_INFO_INDEX_REVISION_MAJOR: + p->info[i] = _nvswitch_get_info_revision_major(device); + break; + case NVSWITCH_GET_INFO_INDEX_REVISION_MINOR: + p->info[i] = _nvswitch_get_info_revision_minor(device); + break; + case NVSWITCH_GET_INFO_INDEX_REVISION_MINOR_EXT: + p->info[i] = _nvswitch_get_info_revision_minor_ext(device); + break; + case NVSWITCH_GET_INFO_INDEX_DEVICE_ID: + p->info[i] = device->nvlink_device->pciInfo.pciDeviceId; + break; + case NVSWITCH_GET_INFO_INDEX_NUM_PORTS: + p->info[i] = NVSWITCH_LINK_COUNT(device); + break; + case NVSWITCH_GET_INFO_INDEX_ENABLED_PORTS_MASK_31_0: + p->info[i] = NvU64_LO32(nvswitch_get_enabled_link_mask(device)); + break; + case NVSWITCH_GET_INFO_INDEX_ENABLED_PORTS_MASK_63_32: + p->info[i] = NvU64_HI32(nvswitch_get_enabled_link_mask(device)); + break; + case NVSWITCH_GET_INFO_INDEX_NUM_VCS: + p->info[i] = _nvswitch_get_num_vcs_lr10(device); + break; + case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_TABLE_SIZE: + { + NvU32 remap_ram_sel; + NvlStatus status; + + status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_PRIMARY, &remap_ram_sel); + if (status == NVL_SUCCESS) + { + p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel); + } + else + { + p->info[i] = 0; + } + } + break; + case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_EXTA_TABLE_SIZE: + { + NvU32 remap_ram_sel; + NvlStatus status; + + status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_EXTA, &remap_ram_sel); + if (status == NVL_SUCCESS) + { + p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel); + } + else + { + p->info[i] = 0; + } + } + break; + case NVSWITCH_GET_INFO_INDEX_REMAP_POLICY_EXTB_TABLE_SIZE: + { + NvU32 remap_ram_sel; + NvlStatus status; + + status = nvswitch_get_remap_table_selector(device, NVSWITCH_TABLE_SELECT_REMAP_EXTB, &remap_ram_sel); + if (status == NVL_SUCCESS) + { + p->info[i] = nvswitch_get_ingress_ram_size(device, remap_ram_sel); + } + else + { + p->info[i] = 0; + } + } + break; + case NVSWITCH_GET_INFO_INDEX_ROUTING_ID_TABLE_SIZE: + p->info[i] = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRIDROUTERAM); + break; + case NVSWITCH_GET_INFO_INDEX_ROUTING_LAN_TABLE_SIZE: + p->info[i] = nvswitch_get_ingress_ram_size(device, NV_INGRESS_REQRSPMAPADDR_RAM_SEL_SELECTSRLANROUTERAM); + break; + case NVSWITCH_GET_INFO_INDEX_FREQ_KHZ: + p->info[i] = device->switch_pll.freq_khz; + break; + case NVSWITCH_GET_INFO_INDEX_VCOFREQ_KHZ: + p->info[i] = device->switch_pll.vco_freq_khz; + break; + case NVSWITCH_GET_INFO_INDEX_VOLTAGE_MVOLT: + p->info[i] = _nvswitch_get_info_voltage(device); + break; + case NVSWITCH_GET_INFO_INDEX_PHYSICAL_ID: + p->info[i] = nvswitch_read_physical_id(device); + break; + case NVSWITCH_GET_INFO_INDEX_PCI_DOMAIN: + p->info[i] = device->nvlink_device->pciInfo.domain; + break; + case NVSWITCH_GET_INFO_INDEX_PCI_BUS: + p->info[i] = device->nvlink_device->pciInfo.bus; + break; + case NVSWITCH_GET_INFO_INDEX_PCI_DEVICE: + p->info[i] = device->nvlink_device->pciInfo.device; + break; + case NVSWITCH_GET_INFO_INDEX_PCI_FUNCTION: + p->info[i] = device->nvlink_device->pciInfo.function; + break; + default: + NVSWITCH_PRINT(device, ERROR, + "%s: Undefined NVSWITCH_GET_INFO_INDEX 0x%x\n", + __FUNCTION__, + p->index[i]); + retval = -NVL_BAD_ARGS; + break; + } + } + + return retval; +} + +NvlStatus +nvswitch_set_nport_port_config_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_SWITCH_PORT_CONFIG *p +) +{ + NvU32 val; + + if (p->requesterLinkID > DRF_MASK(NV_NPORT_REQLINKID_REQROUTINGID)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Invalid requester RID 0x%x\n", + __FUNCTION__, p->requesterLinkID); + return -NVL_BAD_ARGS; + } + + if (p->requesterLanID > DRF_MASK(NV_NPORT_REQLINKID_REQROUTINGLAN)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Invalid requester RLAN 0x%x\n", + __FUNCTION__, p->requesterLanID); + return -NVL_BAD_ARGS; + } + + val = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _NPORT, _CTRL); + switch (p->type) + { + case CONNECT_ACCESS_GPU: + case CONNECT_ACCESS_CPU: + case CONNECT_ACCESS_SWITCH: + val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _ACCESSLINK, val); + break; + case CONNECT_TRUNK_SWITCH: + val = FLD_SET_DRF(_NPORT, _CTRL, _TRUNKLINKENB, _TRUNKLINK, val); + break; + default: + NVSWITCH_PRINT(device, ERROR, + "%s: invalid type #%d\n", + __FUNCTION__, p->type); + return -NVL_BAD_ARGS; + } + + switch(p->count) + { + case CONNECT_COUNT_512: + val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _512, val); + break; + case CONNECT_COUNT_1024: + val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _1024, val); + break; + case CONNECT_COUNT_2048: + val = FLD_SET_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _2048, val); + break; + default: + NVSWITCH_PRINT(device, ERROR, + "%s: invalid count #%d\n", + __FUNCTION__, p->count); + return -NVL_BAD_ARGS; + } + NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _CTRL, val); + + NVSWITCH_LINK_WR32(device, p->portNum, NPORT, _NPORT, _REQLINKID, + DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGID, p->requesterLinkID) | + DRF_NUM(_NPORT, _REQLINKID, _REQROUTINGLAN, p->requesterLanID)); + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_ctrl_set_switch_port_config_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_SWITCH_PORT_CONFIG *p +) +{ + nvlink_link *link; + NvU32 val; + NvlStatus status; + + if (!NVSWITCH_IS_LINK_ENG_VALID(device, p->portNum, NPORT)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: invalid link #%d\n", + __FUNCTION__, p->portNum); + return -NVL_BAD_ARGS; + } + + if (p->enableVC1 && (p->type != CONNECT_TRUNK_SWITCH)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: VC1 only allowed on trunk links\n", + __FUNCTION__); + return -NVL_BAD_ARGS; + } + + // Validate chip-specific NPORT settings and program port config settings. + status = nvswitch_set_nport_port_config(device, p); + if (status != NVL_SUCCESS) + { + return status; + } + + link = nvswitch_get_link(device, (NvU8)p->portNum); + if (link == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: invalid link\n", + __FUNCTION__); + return -NVL_ERR_INVALID_STATE; + } + + // + // If ac_coupled_mask is configured during nvswitch_create_link, + // give preference to it. + // + if (device->regkeys.ac_coupled_mask || + device->regkeys.ac_coupled_mask2 || + device->firmware.nvlink.link_ac_coupled_mask) + { + if (link->ac_coupled != p->acCoupled) + { + NVSWITCH_PRINT(device, ERROR, + "%s: port[%d]: Unsupported AC coupled change (%s)\n", + __FUNCTION__, p->portNum, p->acCoupled ? "AC" : "DC"); + return -NVL_BAD_ARGS; + } + } + + link->ac_coupled = p->acCoupled; + + // AC vs DC mode SYSTEM register + if (link->ac_coupled) + { + // + // In NVL3.0, ACMODE is handled by MINION in the INITPHASE1 command + // Here we just setup the register with the proper info + // + val = NVSWITCH_LINK_RD32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL); + val = FLD_SET_DRF(_NVLIPT_LNK, + _CTRL_SYSTEM_LINK_CHANNEL_CTRL, _AC_DC_MODE, _AC, val); + NVSWITCH_LINK_WR32_LR10(device, link->linkNumber, NVLIPT_LNK, + _NVLIPT_LNK, _CTRL_SYSTEM_LINK_CHANNEL_CTRL, val); + } + + // If _BUFFER_RDY is asserted, credits are locked. + val = NVSWITCH_LINK_RD32_LR10(device, p->portNum, NPORT, _NPORT, _CTRL_BUFFER_READY); + if (FLD_TEST_DRF(_NPORT, _CTRL_BUFFER_READY, _BUFFERRDY, _ENABLE, val)) + { + NVSWITCH_PRINT(device, SETUP, + "%s: port[%d]: BUFFERRDY already enabled.\n", + __FUNCTION__, p->portNum); + return NVL_SUCCESS; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_ctrl_set_ingress_request_table_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_INGRESS_REQUEST_TABLE *p +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_ctrl_get_ingress_request_table_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_INGRESS_REQUEST_TABLE_PARAMS *params +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_ctrl_set_ingress_request_valid_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_INGRESS_REQUEST_VALID *p +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_ctrl_get_ingress_response_table_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_INGRESS_RESPONSE_TABLE_PARAMS *params +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + + +NvlStatus +nvswitch_ctrl_set_ingress_response_table_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_INGRESS_RESPONSE_TABLE *p +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +static NvlStatus +nvswitch_ctrl_set_ganged_link_table_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_GANGED_LINK_TABLE *p +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +static NvlStatus +nvswitch_ctrl_get_internal_latency_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_INTERNAL_LATENCY *pLatency +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 vc_selector = pLatency->vc_selector; + NvU32 idx_nport; + + // Validate VC selector + if (vc_selector >= NVSWITCH_NUM_VCS_LR10) + { + return -NVL_BAD_ARGS; + } + + nvswitch_os_memset(pLatency, 0, sizeof(*pLatency)); + pLatency->vc_selector = vc_selector; + + for (idx_nport=0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++) + { + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, idx_nport)) + { + continue; + } + + pLatency->egressHistogram[idx_nport].low = + chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].low; + pLatency->egressHistogram[idx_nport].medium = + chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].medium; + pLatency->egressHistogram[idx_nport].high = + chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].high; + pLatency->egressHistogram[idx_nport].panic = + chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].panic; + pLatency->egressHistogram[idx_nport].count = + chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].count; + } + + pLatency->elapsed_time_msec = + (chip_device->latency_stats->latency[vc_selector].last_read_time_nsec - + chip_device->latency_stats->latency[vc_selector].start_time_nsec)/1000000ULL; + + chip_device->latency_stats->latency[vc_selector].start_time_nsec = + chip_device->latency_stats->latency[vc_selector].last_read_time_nsec; + + chip_device->latency_stats->latency[vc_selector].count = 0; + + // Clear accum_latency[] + for (idx_nport = 0; idx_nport < NVSWITCH_LINK_COUNT(device); idx_nport++) + { + chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].low = 0; + chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].medium = 0; + chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].high = 0; + chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].panic = 0; + chip_device->latency_stats->latency[vc_selector].accum_latency[idx_nport].count = 0; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_ctrl_set_latency_bins_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_LATENCY_BINS *pLatency +) +{ + NvU32 vc_selector; + const NvU32 freq_mhz = 1330; + const NvU32 switchpll_hz = freq_mhz * 1000000ULL; // TODO: Update this with device->switch_pll.freq_khz after LR10 PLL update + const NvU32 min_threshold = 10; // Must be > zero to avoid div by zero + const NvU32 max_threshold = 10000; + + // Quick input validation and ns to register value conversion + for (vc_selector = 0; vc_selector < NVSWITCH_NUM_VCS_LR10; vc_selector++) + { + if ((pLatency->bin[vc_selector].lowThreshold > max_threshold) || + (pLatency->bin[vc_selector].lowThreshold < min_threshold) || + (pLatency->bin[vc_selector].medThreshold > max_threshold) || + (pLatency->bin[vc_selector].medThreshold < min_threshold) || + (pLatency->bin[vc_selector].hiThreshold > max_threshold) || + (pLatency->bin[vc_selector].hiThreshold < min_threshold) || + (pLatency->bin[vc_selector].lowThreshold > pLatency->bin[vc_selector].medThreshold) || + (pLatency->bin[vc_selector].medThreshold > pLatency->bin[vc_selector].hiThreshold)) + { + return -NVL_BAD_ARGS; + } + + pLatency->bin[vc_selector].lowThreshold = + switchpll_hz / (1000000000 / pLatency->bin[vc_selector].lowThreshold); + pLatency->bin[vc_selector].medThreshold = + switchpll_hz / (1000000000 / pLatency->bin[vc_selector].medThreshold); + pLatency->bin[vc_selector].hiThreshold = + switchpll_hz / (1000000000 / pLatency->bin[vc_selector].hiThreshold); + + NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _LOW, vc_selector, pLatency->bin[vc_selector].lowThreshold); + NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _MEDIUM, vc_selector, pLatency->bin[vc_selector].medThreshold); + NVSWITCH_PORTSTAT_BCAST_WR32_LR10(device, _LIMIT, _HIGH, vc_selector, pLatency->bin[vc_selector].hiThreshold); + } + + return NVL_SUCCESS; +} + +#define NV_NPORT_REQLINKID_REQROUTINGLAN_1024 18:18 +#define NV_NPORT_REQLINKID_REQROUTINGLAN_2048 18:17 + +/* + * @brief Returns the ingress requester link id. + * + * On LR10, REQROUTINGID only gives the endpoint but not the specific port of the response packet. + * To identify the specific port, the routing_ID must be appended with the upper bits of REQROUTINGLAN. + * + * When NV_NPORT_CTRL_ENDPOINT_COUNT = 1024, the upper bit of NV_NPORT_REQLINKID_REQROUTINGLAN become REQROUTINGID[9]. + * When NV_NPORT_CTRL_ENDPOINT_COUNT = 2048, the upper two bits of NV_NPORT_REQLINKID_REQROUTINGLAN become REQROUTINGID[10:9]. + * + * @param[in] device nvswitch device + * @param[in] params NVSWITCH_GET_INGRESS_REQLINKID_PARAMS + * + * @returns NVL_SUCCESS if action succeeded, + * -NVL_ERR_INVALID_STATE invalid link + */ +NvlStatus +nvswitch_ctrl_get_ingress_reqlinkid_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_INGRESS_REQLINKID_PARAMS *params +) +{ + NvU32 regval; + NvU32 reqRid; + NvU32 reqRlan; + NvU32 rlan_shift = DRF_SHIFT_RT(NV_NPORT_REQLINKID_REQROUTINGID) + 1; + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, params->portNum)) + { + return -NVL_BAD_ARGS; + } + + regval = NVSWITCH_NPORT_RD32_LR10(device, params->portNum, _NPORT, _REQLINKID); + reqRid = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGID, regval); + reqRlan = regval; + + regval = NVSWITCH_NPORT_RD32_LR10(device, params->portNum, _NPORT, _CTRL); + if (FLD_TEST_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _1024, regval)) + { + reqRlan = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGLAN_1024, reqRlan); + params->requesterLinkID = (reqRid | (reqRlan << rlan_shift)); + } + else if (FLD_TEST_DRF(_NPORT, _CTRL, _ENDPOINT_COUNT, _2048, regval)) + { + reqRlan = DRF_VAL(_NPORT, _REQLINKID, _REQROUTINGLAN_2048, reqRlan); + params->requesterLinkID = (reqRid | (reqRlan << rlan_shift)); + } + else + { + params->requesterLinkID = reqRid; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_ctrl_get_bios_info_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_BIOS_INFO_PARAMS *p +) +{ + NvU32 biosVersionBytes; + NvU32 biosOemVersionBytes; + NvU32 biosMagic = 0x9210; + + // + // Example: 92.10.09.00.00 is the formatted version string + // | | | + // | | |__ BIOS OEM version byte + // | | + // |_________|_____ BIOS version bytes + // + biosVersionBytes = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_6); + biosOemVersionBytes = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_7); + + // + // LR10 is built out of core92 and the BIOS version will always begin with + // 92.10.xx.xx.xx + // + if ((biosVersionBytes >> 16) != biosMagic) + { + NVSWITCH_PRINT(device, ERROR, + "BIOS version not found in scratch register\n"); + return -NVL_ERR_INVALID_STATE; + } + + p->version = (((NvU64)biosVersionBytes) << 8) | (biosOemVersionBytes & 0xff); + + return NVL_SUCCESS; +} + +static void +_nvlink_clear_corelib_state +( + nvlink_link *link +) +{ + // Receiver Detect needs to happen again + link->bRxDetected = NV_FALSE; + + // INITNEGOTIATE needs to happen again + link->bInitnegotiateConfigGood = NV_FALSE; + + // TxCommonMode needs to happen again + link->bTxCommonModeFail = NV_FALSE; + + // SAFE transition needs to happen again + link->bSafeTransitionFail = NV_FALSE; + + // Reset the SW state tracking the link and sublink states + link->state = NVLINK_LINKSTATE_OFF; + link->tx_sublink_state = NVLINK_SUBLINK_STATE_TX_OFF; + link->rx_sublink_state = NVLINK_SUBLINK_STATE_RX_OFF; +} + +const static NvU32 nport_reg_addr[] = +{ + NV_NPORT_CTRL, + NV_NPORT_CTRL_SLCG, + NV_NPORT_REQLINKID, + NV_NPORT_PORTSTAT_CONTROL, + NV_NPORT_PORTSTAT_SNAP_CONTROL, + NV_NPORT_PORTSTAT_WINDOW_LIMIT, + NV_NPORT_PORTSTAT_LIMIT_LOW_0, + NV_NPORT_PORTSTAT_LIMIT_MEDIUM_0, + NV_NPORT_PORTSTAT_LIMIT_HIGH_0, + NV_NPORT_PORTSTAT_LIMIT_LOW_1, + NV_NPORT_PORTSTAT_LIMIT_MEDIUM_1, + NV_NPORT_PORTSTAT_LIMIT_HIGH_1, + NV_NPORT_PORTSTAT_LIMIT_LOW_2, + NV_NPORT_PORTSTAT_LIMIT_MEDIUM_2, + NV_NPORT_PORTSTAT_LIMIT_HIGH_2, + NV_NPORT_PORTSTAT_LIMIT_LOW_3, + NV_NPORT_PORTSTAT_LIMIT_MEDIUM_3, + NV_NPORT_PORTSTAT_LIMIT_HIGH_3, + NV_NPORT_PORTSTAT_LIMIT_LOW_4, + NV_NPORT_PORTSTAT_LIMIT_MEDIUM_4, + NV_NPORT_PORTSTAT_LIMIT_HIGH_4, + NV_NPORT_PORTSTAT_LIMIT_LOW_5, + NV_NPORT_PORTSTAT_LIMIT_MEDIUM_5, + NV_NPORT_PORTSTAT_LIMIT_HIGH_5, + NV_NPORT_PORTSTAT_LIMIT_LOW_6, + NV_NPORT_PORTSTAT_LIMIT_MEDIUM_6, + NV_NPORT_PORTSTAT_LIMIT_HIGH_6, + NV_NPORT_PORTSTAT_LIMIT_LOW_7, + NV_NPORT_PORTSTAT_LIMIT_MEDIUM_7, + NV_NPORT_PORTSTAT_LIMIT_HIGH_7, + NV_NPORT_PORTSTAT_SOURCE_FILTER_0, + NV_NPORT_PORTSTAT_SOURCE_FILTER_1, + NV_ROUTE_ROUTE_CONTROL, + NV_ROUTE_CMD_ROUTE_TABLE0, + NV_ROUTE_CMD_ROUTE_TABLE1, + NV_ROUTE_CMD_ROUTE_TABLE2, + NV_ROUTE_CMD_ROUTE_TABLE3, + NV_ROUTE_ERR_LOG_EN_0, + NV_ROUTE_ERR_CONTAIN_EN_0, + NV_ROUTE_ERR_ECC_CTRL, + NV_ROUTE_ERR_GLT_ECC_ERROR_COUNTER_LIMIT, + NV_ROUTE_ERR_NVS_ECC_ERROR_COUNTER_LIMIT, + NV_INGRESS_ERR_LOG_EN_0, + NV_INGRESS_ERR_CONTAIN_EN_0, + NV_INGRESS_ERR_ECC_CTRL, + NV_INGRESS_ERR_REMAPTAB_ECC_ERROR_COUNTER_LIMIT, + NV_INGRESS_ERR_RIDTAB_ECC_ERROR_COUNTER_LIMIT, + NV_INGRESS_ERR_RLANTAB_ECC_ERROR_COUNTER_LIMIT, + NV_INGRESS_ERR_NCISOC_HDR_ECC_ERROR_COUNTER_LIMIT, + NV_EGRESS_CTRL, + NV_EGRESS_CTO_TIMER_LIMIT, + NV_EGRESS_ERR_LOG_EN_0, + NV_EGRESS_ERR_CONTAIN_EN_0, + NV_EGRESS_ERR_ECC_CTRL, + NV_EGRESS_ERR_NXBAR_ECC_ERROR_COUNTER_LIMIT, + NV_EGRESS_ERR_RAM_OUT_ECC_ERROR_COUNTER_LIMIT, + NV_TSTATE_TAGSTATECONTROL, + NV_TSTATE_ATO_TIMER_LIMIT, + NV_TSTATE_CREQ_CAM_LOCK, + NV_TSTATE_ERR_LOG_EN_0, + NV_TSTATE_ERR_CONTAIN_EN_0, + NV_TSTATE_ERR_ECC_CTRL, + NV_TSTATE_ERR_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, + NV_TSTATE_ERR_TAGPOOL_ECC_ERROR_COUNTER_LIMIT, + NV_TSTATE_ERR_TD_TID_RAM_ECC_ERROR_COUNTER_LIMIT, + NV_SOURCETRACK_CTRL, + NV_SOURCETRACK_MULTISEC_TIMER0, + NV_SOURCETRACK_ERR_LOG_EN_0, + NV_SOURCETRACK_ERR_CONTAIN_EN_0, + NV_SOURCETRACK_ERR_ECC_CTRL, + NV_SOURCETRACK_ERR_CREQ_TCEN0_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, + NV_SOURCETRACK_ERR_CREQ_TCEN0_TD_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, + NV_SOURCETRACK_ERR_CREQ_TCEN1_CRUMBSTORE_ECC_ERROR_COUNTER_LIMIT, +}; + +/* + * Disable interrupts comming from NPG & NVLW blocks. + */ +static void +_nvswitch_link_disable_interrupts_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + NvU32 i; + + NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT, + DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x0) | + DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x0) | + DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x0)); + + for (i = 0; i < NV_NVLCTRL_LINK_INTR_0_STATUS__SIZE_1; i++) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_0_MASK(i), + DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _FATAL, 0x0) | + DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _NONFATAL, 0x0) | + DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _CORRECTABLE, 0x0)); + + NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_1_MASK(i), + DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _FATAL, 0x0) | + DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _NONFATAL, 0x0) | + DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _CORRECTABLE, 0x0)); + + NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_2_MASK(i), + DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _FATAL, 0x0) | + DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _NONFATAL, 0x0) | + DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _CORRECTABLE, 0x0)); + } +} + +/* + * Reset NPG & NVLW interrupt state. + */ +static void +_nvswitch_link_reset_interrupts_lr10 +( + nvswitch_device *device, + NvU32 link +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 i; + + NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _ERR_CONTROL_COMMON_NPORT, + DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _CORRECTABLEENABLE, 0x1) | + DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _FATALENABLE, 0x1) | + DRF_NUM(_NPORT, _ERR_CONTROL_COMMON_NPORT, _NONFATALENABLE, 0x1)); + + for (i = 0; i < NV_NVLCTRL_LINK_INTR_0_STATUS__SIZE_1; i++) + { + NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_0_MASK(i), + DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _FATAL, 0x1) | + DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _NONFATAL, 0x1) | + DRF_NUM(_NVLCTRL, _LINK_INTR_0_MASK, _CORRECTABLE, 0x1)); + + NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_1_MASK(i), + DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _FATAL, 0x1) | + DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _NONFATAL, 0x1) | + DRF_NUM(_NVLCTRL, _LINK_INTR_1_MASK, _CORRECTABLE, 0x1)); + + NVSWITCH_LINK_WR32_LR10(device, link, NVLW, _NVLCTRL, _LINK_INTR_2_MASK(i), + DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _FATAL, 0x1) | + DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _NONFATAL, 0x1) | + DRF_NUM(_NVLCTRL, _LINK_INTR_2_MASK, _CORRECTABLE, 0x1)); + } + + // Enable interrupts which are disabled to prevent interrupt storm. + NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.route.fatal); + NVSWITCH_NPORT_WR32_LR10(device, link, _ROUTE, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.route.nonfatal); + NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.ingress.fatal); + NVSWITCH_NPORT_WR32_LR10(device, link, _INGRESS, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.ingress.nonfatal); + NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.egress.fatal); + NVSWITCH_NPORT_WR32_LR10(device, link, _EGRESS, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.egress.nonfatal); + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.tstate.fatal); + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.tstate.nonfatal); + NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_FATAL_REPORT_EN_0, chip_device->intr_mask.sourcetrack.fatal); + NVSWITCH_NPORT_WR32_LR10(device, link, _SOURCETRACK, _ERR_NON_FATAL_REPORT_EN_0, chip_device->intr_mask.sourcetrack.nonfatal); + + // Clear fatal error status + device->link[link].fatal_error_occurred = NV_FALSE; +} + +/* + * @Brief : Control to reset and drain the links. + * + * @param[in] device A reference to the device to initialize + * @param[in] linkMask A mask of link(s) to be reset. + * + * @returns : NVL_SUCCESS if there were no errors + * -NVL_BAD_PARAMS if input parameters are wrong. + * -NVL_ERR_INVALID_STATE if other errors are present and a full-chip reset is required. + * -NVL_INITIALIZATION_TOTAL_FAILURE if NPORT initialization failed and a retry is required. + */ + +NvlStatus +nvswitch_reset_and_drain_links_lr10 +( + nvswitch_device *device, + NvU64 link_mask +) +{ + NvlStatus status = -NVL_ERR_GENERIC; + nvlink_link *link_info; + NvU32 val; + NvU32 link; + NvU32 idx_nport; + NvU32 npg; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + NvU32 i; + NvU64 link_mode, tx_sublink_mode, rx_sublink_mode; + NvU32 tx_sublink_submode, rx_sublink_submode; + NvU32 *nport_reg_val = NULL; + NvU32 reg_count = NV_ARRAY_ELEMENTS(nport_reg_addr); + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + + if ((link_mask == 0) || + (link_mask >> NVSWITCH_LINK_COUNT(device))) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Invalid link_mask = 0x%llx\n", + __FUNCTION__, link_mask); + + return -NVL_BAD_ARGS; + } + + // Check for in-active links + FOR_EACH_INDEX_IN_MASK(64, link, link_mask) + { + if (!nvswitch_is_link_valid(device, link)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link #%d invalid\n", + __FUNCTION__, link); + + continue; + } + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NPORT, link)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NPORT #%d invalid\n", + __FUNCTION__, link); + + continue; + } + + if (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLW, link)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NVLW #%d invalid\n", + __FUNCTION__, link); + + continue; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // Buffer to backup NPORT state + nport_reg_val = nvswitch_os_malloc(sizeof(nport_reg_addr)); + if (nport_reg_val == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to allocate memory\n", + __FUNCTION__); + + return -NVL_NO_MEM; + } + + FOR_EACH_INDEX_IN_MASK(64, link, link_mask) + { + // Unregister links to make them unusable while reset is in progress. + link_info = nvswitch_get_link(device, link); + if (link_info == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: invalid link %d\n", + __FUNCTION__, link); + continue; + } + + nvlink_lib_unregister_link(link_info); + + // + // Step 0 : + // Prior to starting port reset, FM must shutdown the NVlink links + // it wishes to reset. + // However, with shared-virtualization, FM is unable to shut down the links + // since the GPU is no longer attached to the service VM. + // In this case, we must perform unilateral shutdown on the LR10 side + // of the link. + // + // If links are in OFF or RESET, we don't need to perform shutdown + // If links already went through a proper pseudo-clean shutdown sequence, + // they'll be in SAFE + sublinks in OFF + // + + status = nvswitch_corelib_get_dl_link_mode_lr10(link_info, &link_mode); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to get link mode from link %d\n", + __FUNCTION__, link); + goto nvswitch_reset_and_drain_links_exit; + } + status = nvswitch_corelib_get_tx_mode_lr10(link_info, &tx_sublink_mode, &tx_sublink_submode); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to get tx sublink mode from link %d\n", + __FUNCTION__, link); + goto nvswitch_reset_and_drain_links_exit; + } + status = nvswitch_corelib_get_rx_mode_lr10(link_info, &rx_sublink_mode, &rx_sublink_submode); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to get rx sublink mode from link %d\n", + __FUNCTION__, link); + goto nvswitch_reset_and_drain_links_exit; + } + + if (!((link_mode == NVLINK_LINKSTATE_RESET) || + (link_mode == NVLINK_LINKSTATE_OFF) || + ((link_mode == NVLINK_LINKSTATE_SAFE) && + (tx_sublink_mode == NVLINK_SUBLINK_STATE_TX_OFF) && + (rx_sublink_mode == NVLINK_SUBLINK_STATE_RX_OFF)))) + { + nvswitch_execute_unilateral_link_shutdown_lr10(link_info); + _nvlink_clear_corelib_state(link_info); + } + + // + // Step 1 : Perform surgical reset + // Refer to switch IAS 11.5.2 Link Reset. + // + + // Step 1.a : Backup NPORT state before reset + for (i = 0; i < reg_count; i++) + { + nport_reg_val[i] = NVSWITCH_ENG_OFF_RD32(device, NPORT, _UNICAST, link, + nport_reg_addr[i]); + } + + // Step 1.b : Assert INGRESS_STOP / EGRESS_STOP + val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _CTRL_STOP); + val = FLD_SET_DRF(_NPORT, _CTRL_STOP, _INGRESS_STOP, _STOP, val); + val = FLD_SET_DRF(_NPORT, _CTRL_STOP, _EGRESS_STOP, _STOP, val); + NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _CTRL_STOP, val); + + // Wait for stop operation to take effect at TLC. + // Expected a minimum of 256 clk cycles. + nvswitch_os_sleep(1); + + // + // Step 1.c : Disable NPG & NVLW interrupts + // + _nvswitch_link_disable_interrupts_lr10(device, link); + + // Step 1.d : Assert NPortWarmReset + npg = link / NVSWITCH_LINKS_PER_NPG; + val = NVSWITCH_NPG_RD32_LR10(device, npg, _NPG, _WARMRESET); + + idx_nport = link % NVSWITCH_LINKS_PER_NPG; + NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _WARMRESET, + DRF_NUM(_NPG, _WARMRESET, _NPORTWARMRESET, ~NVBIT(idx_nport))); + + // Step 1.e : Initiate Minion reset sequence. + status = nvswitch_request_tl_link_state_lr10(link_info, + NV_NVLIPT_LNK_CTRL_LINK_STATE_REQUEST_REQUEST_RESET, NV_TRUE); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NvLink Reset has failed for link %d\n", + __FUNCTION__, link); + goto nvswitch_reset_and_drain_links_exit; + } + + // Step 1.e : De-assert NPortWarmReset + NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _WARMRESET, val); + + // Step 1.f : Assert and De-assert NPort debug_clear + // to clear the error status + NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _DEBUG_CLEAR, + DRF_NUM(_NPG, _DEBUG_CLEAR, _CLEAR, NVBIT(idx_nport))); + + NVSWITCH_NPG_WR32_LR10(device, npg, _NPG, _DEBUG_CLEAR, + DRF_DEF(_NPG, _DEBUG_CLEAR, _CLEAR, _DEASSERT)); + + // Step 1.g : Clear CONTAIN_AND_DRAIN to clear contain state (Bug 3115824) + NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _CONTAIN_AND_DRAIN, + DRF_DEF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE)); + + val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _CONTAIN_AND_DRAIN); + if (FLD_TEST_DRF(_NPORT, _CONTAIN_AND_DRAIN, _CLEAR, _ENABLE, val)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NPORT Contain and Drain Clear has failed for link %d\n", + __FUNCTION__, link); + status = NVL_ERR_INVALID_STATE; + goto nvswitch_reset_and_drain_links_exit; + } + + // + // Step 2 : Assert NPORT Reset after Control & Drain routine. + // Clear Tagpool, CrumbStore and CAM RAMs + // + + // Step 2.a Clear Tagpool RAM + NVSWITCH_NPORT_WR32_LR10(device, link, _NPORT, _INITIALIZATION, + DRF_DEF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT)); + + nvswitch_timeout_create(25 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); + + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + // Check if NPORT initialization is done + val = NVSWITCH_NPORT_RD32_LR10(device, link, _NPORT, _INITIALIZATION); + if (FLD_TEST_DRF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT, val)) + { + break; + } + + nvswitch_os_sleep(1); + } + while (keepPolling); + + if (!FLD_TEST_DRF(_NPORT, _INITIALIZATION, _TAGPOOLINIT_0, _HWINIT, val)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for TAGPOOL Initialization on link %d)\n", + __FUNCTION__, link); + + status = -NVL_INITIALIZATION_TOTAL_FAILURE; + goto nvswitch_reset_and_drain_links_exit; + } + + // Step 2.b Clear CrumbStore RAM + val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0) | + DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CRUMBSTORE_RAM) | + DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 1); + + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_ADDRESS, val); + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA1, 0x0); + + val = DRF_NUM(_TSTATE, _RAM_DATA0, _ECC, 0x7f); + for (i = 0; i <= NV_TSTATE_RAM_ADDRESS_ADDR_TAGPOOL_CRUMBSTORE_TDTID_DEPTH; i++) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA0, val); + } + + // Step 2.c Clear CAM RAM + val = DRF_NUM(_TSTATE, _RAM_ADDRESS, _ADDR, 0) | + DRF_DEF(_TSTATE, _RAM_ADDRESS, _SELECT, _CREQ_CAM) | + DRF_NUM(_TSTATE, _RAM_ADDRESS, _AUTO_INCR, 1); + + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_ADDRESS, val); + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA1, 0x0); + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA2, 0x0); + + for (i = 0; i <= NV_TSTATE_RAM_ADDRESS_ADDR_CREQ_CAM_DEPTH; i++) + { + NVSWITCH_NPORT_WR32_LR10(device, link, _TSTATE, _RAM_DATA0, 0x0); + } + + // + // Step 3 : Restore link state + // + + // Restore NPORT state after reset + for (i = 0; i < reg_count; i++) + { + NVSWITCH_ENG_OFF_WR32(device, NPORT, _UNICAST, link, + nport_reg_addr[i], nport_reg_val[i]); + } + + // Initialize GLT + nvswitch_set_ganged_link_table_lr10(device, 0, chip_device->ganged_link_table, + ROUTE_GANG_TABLE_SIZE/2); + + // Initialize select scratch registers to 0x0 + nvswitch_init_scratch_lr10(device); + + // Reset NVLW and NPORT interrupt state + _nvswitch_link_reset_interrupts_lr10(device, link); + + // Re-register links. + status = nvlink_lib_register_link(device->nvlink_device, link_info); + if (status != NVL_SUCCESS) + { + nvswitch_destroy_link(link_info); + goto nvswitch_reset_and_drain_links_exit; + } + } + FOR_EACH_INDEX_IN_MASK_END; + +nvswitch_reset_and_drain_links_exit: + nvswitch_os_free(nport_reg_val); + return status; +} + +NvlStatus +nvswitch_get_nvlink_ecc_errors_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS *params +) +{ + NvU32 statData; + NvU8 i, j; + NvlStatus status; + NvBool bLaneReversed; + + nvswitch_os_memset(params->errorLink, 0, sizeof(params->errorLink)); + + FOR_EACH_INDEX_IN_MASK(64, i, params->linkMask) + { + nvlink_link *link; + NVSWITCH_LANE_ERROR *errorLane; + NvU8 offset; + NvBool minion_enabled; + NvU32 sublinkWidth; + + link = nvswitch_get_link(device, i); + sublinkWidth = device->hal.nvswitch_get_sublink_width(device, i); + + if ((link == NULL) || + !NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLDL, link->linkNumber) || + (i >= NVSWITCH_LINK_COUNT(device))) + { + return -NVL_BAD_ARGS; + } + + minion_enabled = nvswitch_is_minion_initialized(device, + NVSWITCH_GET_LINK_ENG_INST(device, link->linkNumber, MINION)); + + bLaneReversed = nvswitch_link_lane_reversed_lr10(device, link->linkNumber); + + for (j = 0; j < NVSWITCH_NVLINK_MAX_LANES; j++) + { + if (minion_enabled && (j < sublinkWidth)) + { + status = nvswitch_minion_get_dl_status(device, i, + (NV_NVLSTAT_RX12 + j), 0, &statData); + + if (status != NVL_SUCCESS) + { + return status; + } + offset = bLaneReversed ? ((sublinkWidth - 1) - j) : j; + errorLane = ¶ms->errorLink[i].errorLane[offset]; + errorLane->valid = NV_TRUE; + } + else + { + // MINION disabled + statData = 0; + offset = j; + errorLane = ¶ms->errorLink[i].errorLane[offset]; + errorLane->valid = NV_FALSE; + } + + errorLane->eccErrorValue = DRF_VAL(_NVLSTAT, _RX12, _ECC_CORRECTED_ERR_L0_VALUE, statData); + errorLane->overflowed = DRF_VAL(_NVLSTAT, _RX12, _ECC_CORRECTED_ERR_L0_OVER, statData); + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return NVL_SUCCESS; +} + +static NvU32 +nvswitch_get_num_links_lr10 +( + nvswitch_device *device +) +{ + NvU32 num_links = NVSWITCH_NUM_LINKS_LR10; + return num_links; +} + +NvBool +nvswitch_is_link_valid_lr10 +( + nvswitch_device *device, + NvU32 link_id +) +{ + if (link_id >= nvswitch_get_num_links(device)) + { + return NV_FALSE; + } + return device->link[link_id].valid; +} + +NvlStatus +nvswitch_ctrl_get_fom_values_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_FOM_VALUES_PARAMS *p +) +{ + NvlStatus status; + NvU32 statData; + + NVSWITCH_ASSERT(p->linkId < nvswitch_get_num_links(device)); + + status = nvswitch_minion_get_dl_status(device, p->linkId, + NV_NVLSTAT_TR16, 0, &statData); + p->figureOfMeritValues[0] = (NvU16) (statData & 0xFFFF); + p->figureOfMeritValues[1] = (NvU16) ((statData >> 16) & 0xFFFF); + + status = nvswitch_minion_get_dl_status(device, p->linkId, + NV_NVLSTAT_TR17, 0, &statData); + p->figureOfMeritValues[2] = (NvU16) (statData & 0xFFFF); + p->figureOfMeritValues[3] = (NvU16) ((statData >> 16) & 0xFFFF); + + p->numLanes = nvswitch_get_sublink_width(device, p->linkId); + + return status; +} + +void +nvswitch_set_fatal_error_lr10 +( + nvswitch_device *device, + NvBool device_fatal, + NvU32 link_id +) +{ + NvU32 reg; + + NVSWITCH_ASSERT(link_id < nvswitch_get_num_links(device)); + + // On first fatal error, notify PORT_DOWN + if (!device->link[link_id].fatal_error_occurred) + { + if (nvswitch_lib_notify_client_events(device, + NVSWITCH_DEVICE_EVENT_PORT_DOWN) != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to notify PORT_DOWN event\n", + __FUNCTION__); + } + } + + device->link[link_id].fatal_error_occurred = NV_TRUE; + + if (device_fatal) + { + reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12); + reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_RESET_REQUIRED, + 1, reg); + + NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _SW_SCRATCH_12, reg); + } + else + { + reg = NVSWITCH_LINK_RD32_LR10(device, link_id, NPORT, _NPORT, _SCRATCH_WARM); + reg = FLD_SET_DRF_NUM(_NPORT, _SCRATCH_WARM, _PORT_RESET_REQUIRED, + 1, reg); + + NVSWITCH_LINK_WR32_LR10(device, link_id, NPORT, _NPORT, _SCRATCH_WARM, reg); + } +} + +static NvU32 +nvswitch_get_latency_sample_interval_msec_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + return chip_device->latency_stats->sample_interval_msec; +} + +NvU32 +nvswitch_get_swap_clk_default_lr10 +( + nvswitch_device *device +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvBool +nvswitch_is_link_in_use_lr10 +( + nvswitch_device *device, + NvU32 link_id +) +{ + NvU32 data; + nvlink_link *link; + + link = nvswitch_get_link(device, link_id); + if (link == NULL) + { + // A query on an invalid link should never occur + NVSWITCH_ASSERT(link != NULL); + return NV_FALSE; + } + + if (nvswitch_is_link_in_reset(device, link)) + { + return NV_FALSE; + } + + data = NVSWITCH_LINK_RD32_LR10(device, link_id, + NVLDL, _NVLDL_TOP, _LINK_STATE); + + return (DRF_VAL(_NVLDL_TOP, _LINK_STATE, _STATE, data) != + NV_NVLDL_TOP_LINK_STATE_STATE_INIT); +} + +static NvU32 +nvswitch_get_device_dma_width_lr10 +( + nvswitch_device *device +) +{ + return DMA_ADDR_WIDTH_LR10; +} + +NvU32 +nvswitch_get_link_ip_version_lr10 +( + nvswitch_device *device, + NvU32 link_id +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 nvldl_instance; + + nvldl_instance = NVSWITCH_GET_LINK_ENG_INST(device, link_id, NVLDL); + if (NVSWITCH_ENG_IS_VALID(device, NVLDL, nvldl_instance)) + { + return chip_device->engNVLDL[nvldl_instance].version; + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s: NVLink[0x%x] NVLDL instance invalid\n", + __FUNCTION__, link_id); + return 0; + } +} + +static NvlStatus +nvswitch_test_soe_dma_lr10 +( + nvswitch_device *device +) +{ + return soeTestDma_HAL(device, (PSOE)device->pSoe); +} + +static NvlStatus +_nvswitch_get_reserved_throughput_counters +( + nvswitch_device *device, + nvlink_link *link, + NvU16 counter_mask, + NvU64 *counter_values +) +{ + NvU16 counter = 0; + + // + // LR10 to use counters 0 & 2 for monitoring + // (Same as GPU behavior) + // Counter 0 counts data flits + // Counter 2 counts all flits + // + FOR_EACH_INDEX_IN_MASK(16, counter, counter_mask) + { + NvU32 counter_type = NVBIT(counter); + NvU64 data = 0; + + switch (counter_type) + { + case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_TX: + { + data = nvswitch_read_64bit_counter(device, + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, + NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(0)), + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, + NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(0))); + break; + } + case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_DATA_RX: + { + data = nvswitch_read_64bit_counter(device, + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, + NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(0)), + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, + NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(0))); + break; + } + case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_RAW_TX: + { + data = nvswitch_read_64bit_counter(device, + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, + NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_LO(2)), + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, + NVLTLC, _NVLTLC_TX_LNK, _DEBUG_TP_CNTR_HI(2))); + break; + } + case NVSWITCH_THROUGHPUT_COUNTERS_TYPE_RAW_RX: + { + data = nvswitch_read_64bit_counter(device, + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, + NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_LO(2)), + NVSWITCH_LINK_OFFSET_LR10(device, link->linkNumber, + NVLTLC, _NVLTLC_RX_LNK, _DEBUG_TP_CNTR_HI(2))); + break; + } + default: + { + return -NVL_ERR_NOT_SUPPORTED; + } + } + counter_values[counter] = data; + } + FOR_EACH_INDEX_IN_MASK_END; + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_ctrl_get_throughput_counters_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *p +) +{ + NvlStatus status; + nvlink_link *link; + NvU16 i = 0; + + nvswitch_os_memset(p->counters, 0, sizeof(p->counters)); + + FOR_EACH_INDEX_IN_MASK(64, i, p->linkMask) + { + link = nvswitch_get_link(device, i); + if ((link == NULL) || (link->linkNumber >= NVSWITCH_MAX_PORTS) || + (!NVSWITCH_IS_LINK_ENG_VALID_LR10(device, NVLTLC, link->linkNumber))) + { + continue; + } + + status = _nvswitch_get_reserved_throughput_counters(device, link, p->counterMask, + p->counters[link->linkNumber].values); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to get reserved NVLINK throughput counters on link %d\n", + link->linkNumber); + return status; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return NVL_SUCCESS; +} + +static NvBool +nvswitch_is_soe_supported_lr10 +( + nvswitch_device *device +) +{ + return NV_TRUE; +} + +NvBool +nvswitch_is_inforom_supported_lr10 +( + nvswitch_device *device +) +{ + if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device)) + { + NVSWITCH_PRINT(device, INFO, + "INFOROM is not supported on non-silicon platform\n"); + return NV_FALSE; + } + + if (!nvswitch_is_soe_supported(device)) + { + NVSWITCH_PRINT(device, INFO, + "INFOROM is not supported since SOE is not supported\n"); + return NV_FALSE; + } + + return NV_TRUE; +} + +NvBool +nvswitch_is_spi_supported_lr10 +( + nvswitch_device *device +) +{ + return nvswitch_is_soe_supported(device); +} + +NvBool +nvswitch_is_smbpbi_supported_lr10 +( + nvswitch_device *device +) +{ + if (IS_RTLSIM(device) || IS_FMODEL(device)) + { + NVSWITCH_PRINT(device, INFO, + "SMBPBI is not supported on RTLSIM/FMODEL platforms\n"); + return NV_FALSE; + } + + if (!nvswitch_is_soe_supported(device)) + { + NVSWITCH_PRINT(device, INFO, + "SMBPBI is not supported since SOE is not supported\n"); + return NV_FALSE; + } + + return NV_TRUE; +} + +/* + * @Brief : Additional setup needed after device initialization + * + * @Description : + * + * @param[in] device a reference to the device to initialize + */ +NvlStatus +nvswitch_post_init_device_setup_lr10 +( + nvswitch_device *device +) +{ + NvlStatus retval; + + if (device->regkeys.soe_dma_self_test == + NV_SWITCH_REGKEY_SOE_DMA_SELFTEST_DISABLE) + { + NVSWITCH_PRINT(device, INFO, + "Skipping SOE DMA selftest as requested using regkey\n"); + } + else if (IS_RTLSIM(device) || IS_FMODEL(device)) + { + NVSWITCH_PRINT(device, SETUP, + "Skipping DMA selftest on FMODEL/RTLSIM platforms\n"); + } + else if (!nvswitch_is_soe_supported(device)) + { + NVSWITCH_PRINT(device, SETUP, + "Skipping DMA selftest since SOE is not supported\n"); + } + else + { + retval = nvswitch_test_soe_dma_lr10(device); + if (retval != NVL_SUCCESS) + { + return retval; + } + } + + if (nvswitch_is_inforom_supported(device)) + { + nvswitch_inforom_post_init(device); + } + else + { + NVSWITCH_PRINT(device, SETUP, "Skipping INFOROM init\n"); + } + + return NVL_SUCCESS; +} + +/* + * @Brief : Additional setup needed after blacklisted device initialization + * + * @Description : + * + * @param[in] device a reference to the device to initialize + */ +void +nvswitch_post_init_blacklist_device_setup_lr10 +( + nvswitch_device *device +) +{ + NvlStatus status; + + if (nvswitch_is_inforom_supported(device)) + { + nvswitch_inforom_post_init(device); + } + + // + // Initialize the driver state monitoring callback. + // This is still needed for SOE to report correct driver state. + // + status = nvswitch_smbpbi_post_init(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Smbpbi post init failed, rc:%d\n", + status); + return; + } + + // + // This internally will only flush if OMS value has changed + // + status = device->hal.nvswitch_oms_inforom_flush(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Flushing OMS failed, rc:%d\n", + status); + return; + } +} + +void +nvswitch_load_uuid_lr10 +( + nvswitch_device *device +) +{ + NvU32 regData[4]; + + // + // Read 128-bit UUID from secure scratch registers which must be + // populated by firmware. + // + regData[0] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_8); + regData[1] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_9); + regData[2] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_10); + regData[3] = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_11); + + nvswitch_os_memcpy(&device->uuid.uuid, (NvU8 *)regData, NV_UUID_LEN); +} + +NvlStatus +nvswitch_read_oob_blacklist_state_lr10 +( + nvswitch_device *device +) +{ + NvU32 reg; + NvBool is_oob_blacklist; + NvlStatus status; + + if (device == NULL) + { + NVSWITCH_PRINT(device, ERROR, "%s: Called with invalid argument\n", __FUNCTION__); + return -NVL_BAD_ARGS; + } + + reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SCRATCH_COLD); + + // Check for uninitialized SCRATCH_COLD before declaring the device blacklisted + if (reg == NV_NVLSAW_SCRATCH_COLD_DATA_INIT) + is_oob_blacklist = NV_FALSE; + else + is_oob_blacklist = DRF_VAL(_NVLSAW, _SCRATCH_COLD, _OOB_BLACKLIST_DEVICE_REQUESTED, reg); + + status = nvswitch_inforom_oms_set_device_disable(device, is_oob_blacklist); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to set device disable to %d, rc:%d\n", + is_oob_blacklist, status); + } + + if (is_oob_blacklist) + { + device->device_fabric_state = NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED; + device->device_blacklist_reason = NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_OUT_OF_BAND; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_write_fabric_state_lr10 +( + nvswitch_device *device +) +{ + NvU32 reg; + + if (device == NULL) + { + NVSWITCH_PRINT(device, ERROR, "%s: Called with invalid argument\n", __FUNCTION__); + return -NVL_BAD_ARGS; + } + + // bump the sequence number for each write + device->fabric_state_sequence_number++; + + reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12); + + reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_BLACKLIST_REASON, + device->device_blacklist_reason, reg); + reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_FABRIC_STATE, + device->device_fabric_state, reg); + reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DRIVER_FABRIC_STATE, + device->driver_fabric_state, reg); + reg = FLD_SET_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _EVENT_MESSAGE_COUNT, + device->fabric_state_sequence_number, reg); + + NVSWITCH_SAW_WR32_LR10(device, _NVLSAW, _SW_SCRATCH_12, reg); + + return NVL_SUCCESS; +} + +static NVSWITCH_ENGINE_DESCRIPTOR_TYPE * +_nvswitch_get_eng_descriptor_lr10 +( + nvswitch_device *device, + NVSWITCH_ENGINE_ID eng_id +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NVSWITCH_ENGINE_DESCRIPTOR_TYPE *engine = NULL; + + if (eng_id >= NVSWITCH_ENGINE_ID_SIZE) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Engine_ID 0x%x out of range 0..0x%x\n", + __FUNCTION__, + eng_id, NVSWITCH_ENGINE_ID_SIZE-1); + return NULL; + } + + engine = &(chip_device->io.common[eng_id]); + NVSWITCH_ASSERT(eng_id == engine->eng_id); + + return engine; +} + +NvU32 +nvswitch_get_eng_base_lr10 +( + nvswitch_device *device, + NVSWITCH_ENGINE_ID eng_id, + NvU32 eng_bcast, + NvU32 eng_instance +) +{ + NVSWITCH_ENGINE_DESCRIPTOR_TYPE *engine; + NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID; + + engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id); + if (engine == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ID 0x%x[%d] %s not found\n", + __FUNCTION__, + eng_id, eng_instance, + ( + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" : + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" : + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" : + "??" + )); + return NVSWITCH_BASE_ADDR_INVALID; + } + + if ((eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) && + (eng_instance < engine->eng_count)) + { + base_addr = engine->uc_addr[eng_instance]; + } + else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) + { + base_addr = engine->bc_addr; + } + else if ((eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) && + (eng_instance < engine->mc_addr_count)) + { + base_addr = engine->mc_addr[eng_instance]; + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unknown address space type 0x%x (not UC, BC, or MC)\n", + __FUNCTION__, + eng_bcast); + } + + if (base_addr == NVSWITCH_BASE_ADDR_INVALID) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ID 0x%x[%d] %s invalid address\n", + __FUNCTION__, + eng_id, eng_instance, + ( + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" : + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" : + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" : + "??" + )); + } + + return base_addr; +} + +NvU32 +nvswitch_get_eng_count_lr10 +( + nvswitch_device *device, + NVSWITCH_ENGINE_ID eng_id, + NvU32 eng_bcast +) +{ + NVSWITCH_ENGINE_DESCRIPTOR_TYPE *engine; + NvU32 eng_count = 0; + + engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id); + if (engine == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ID 0x%x %s not found\n", + __FUNCTION__, + eng_id, + ( + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" : + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" : + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" : + "??" + )); + return 0; + } + + if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) + { + eng_count = engine->eng_count; + } + else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) + { + if (engine->bc_addr == NVSWITCH_BASE_ADDR_INVALID) + { + eng_count = 0; + } + else + { + eng_count = 1; + } + } + else if (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) + { + eng_count = engine->mc_addr_count; + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unknown address space type 0x%x (not UC, BC, or MC)\n", + __FUNCTION__, + eng_bcast); + } + + return eng_count; +} + +NvU32 +nvswitch_eng_rd_lr10 +( + nvswitch_device *device, + NVSWITCH_ENGINE_ID eng_id, + NvU32 eng_bcast, + NvU32 eng_instance, + NvU32 offset +) +{ + NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID; + NvU32 data; + + base_addr = nvswitch_get_eng_base_lr10(device, eng_id, eng_bcast, eng_instance); + if (base_addr == NVSWITCH_BASE_ADDR_INVALID) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ID 0x%x[%d] %s invalid address\n", + __FUNCTION__, + eng_id, eng_instance, + ( + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" : + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" : + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" : + "??" + )); + NVSWITCH_ASSERT(base_addr != NVSWITCH_BASE_ADDR_INVALID); + return 0xBADFBADF; + } + + data = nvswitch_reg_read_32(device, base_addr + offset); + +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + { + NVSWITCH_ENGINE_DESCRIPTOR_TYPE *engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id); + + NVSWITCH_PRINT(device, MMIO, + "%s: ENG_RD %s(0x%x)[%d] @0x%08x+0x%06x = 0x%08x\n", + __FUNCTION__, + engine->eng_name, engine->eng_id, + eng_instance, + base_addr, offset, + data); + } +#endif //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + + return data; +} + +void +nvswitch_eng_wr_lr10 +( + nvswitch_device *device, + NVSWITCH_ENGINE_ID eng_id, + NvU32 eng_bcast, + NvU32 eng_instance, + NvU32 offset, + NvU32 data +) +{ + NvU32 base_addr = NVSWITCH_BASE_ADDR_INVALID; + + base_addr = nvswitch_get_eng_base_lr10(device, eng_id, eng_bcast, eng_instance); + if (base_addr == NVSWITCH_BASE_ADDR_INVALID) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ID 0x%x[%d] %s invalid address\n", + __FUNCTION__, + eng_id, eng_instance, + ( + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_UNICAST) ? "UC" : + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_BCAST) ? "BC" : + (eng_bcast == NVSWITCH_GET_ENG_DESC_TYPE_MULTICAST) ? "MC" : + "??" + )); + NVSWITCH_ASSERT(base_addr != NVSWITCH_BASE_ADDR_INVALID); + return; + } + + nvswitch_reg_write_32(device, base_addr + offset, data); + +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + { + NVSWITCH_ENGINE_DESCRIPTOR_TYPE *engine = _nvswitch_get_eng_descriptor_lr10(device, eng_id); + + NVSWITCH_PRINT(device, MMIO, + "%s: ENG_WR %s(0x%x)[%d] @0x%08x+0x%06x = 0x%08x\n", + __FUNCTION__, + engine->eng_name, engine->eng_id, + eng_instance, + base_addr, offset, + data); + } +#endif //defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) +} + +NvU32 +nvswitch_get_link_eng_inst_lr10 +( + nvswitch_device *device, + NvU32 link_id, + NVSWITCH_ENGINE_ID eng_id +) +{ + NvU32 eng_instance = NVSWITCH_ENGINE_INSTANCE_INVALID; + + if (link_id >= NVSWITCH_LINK_COUNT(device)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: link ID 0x%x out-of-range [0x0..0x%x]\n", + __FUNCTION__, + link_id, NVSWITCH_LINK_COUNT(device)-1); + return NVSWITCH_ENGINE_INSTANCE_INVALID; + } + + switch (eng_id) + { + case NVSWITCH_ENGINE_ID_NPG: + eng_instance = link_id / NVSWITCH_LINKS_PER_NPG; + break; + case NVSWITCH_ENGINE_ID_NVLIPT: + eng_instance = link_id / NVSWITCH_LINKS_PER_NVLIPT; + break; + case NVSWITCH_ENGINE_ID_NVLW: + case NVSWITCH_ENGINE_ID_NVLW_PERFMON: + eng_instance = link_id / NVSWITCH_LINKS_PER_NVLW; + break; + case NVSWITCH_ENGINE_ID_MINION: + eng_instance = link_id / NVSWITCH_LINKS_PER_MINION; + break; + case NVSWITCH_ENGINE_ID_NPORT: + case NVSWITCH_ENGINE_ID_NVLTLC: + case NVSWITCH_ENGINE_ID_NVLDL: + case NVSWITCH_ENGINE_ID_NVLIPT_LNK: + case NVSWITCH_ENGINE_ID_NPORT_PERFMON: + case NVSWITCH_ENGINE_ID_RX_PERFMON: + case NVSWITCH_ENGINE_ID_TX_PERFMON: + eng_instance = link_id; + break; + default: + NVSWITCH_PRINT(device, ERROR, + "%s: link ID 0x%x has no association with EngID 0x%x\n", + __FUNCTION__, + link_id, eng_id); + eng_instance = NVSWITCH_ENGINE_INSTANCE_INVALID; + break; + } + + return eng_instance; +} + +NvU32 +nvswitch_get_caps_nvlink_version_lr10 +( + nvswitch_device *device +) +{ + ct_assert(NVSWITCH_NVLINK_STATUS_NVLINK_VERSION_3_0 == + NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0); + return NVSWITCH_NVLINK_CAPS_NVLINK_VERSION_3_0; +} + +NVSWITCH_BIOS_NVLINK_CONFIG * +nvswitch_get_bios_nvlink_config_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + + return (chip_device != NULL) ? &chip_device->bios_config : NULL; +} + +/* + * CTRL_NVSWITCH_SET_RESIDENCY_BINS + */ +static NvlStatus +nvswitch_ctrl_set_residency_bins_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_RESIDENCY_BINS *p +) +{ + NVSWITCH_PRINT(device, ERROR, + "SET_RESIDENCY_BINS should not be called on LR10\n"); + return -NVL_ERR_NOT_SUPPORTED; +} + +/* + * CTRL_NVSWITCH_GET_RESIDENCY_BINS + */ +static NvlStatus +nvswitch_ctrl_get_residency_bins_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_RESIDENCY_BINS *p +) +{ + NVSWITCH_PRINT(device, ERROR, + "GET_RESIDENCY_BINS should not be called on LR10\n"); + return -NVL_ERR_NOT_SUPPORTED; +} + +/* + * CTRL_NVSWITCH_GET_RB_STALL_BUSY + */ +static NvlStatus +nvswitch_ctrl_get_rb_stall_busy_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_RB_STALL_BUSY *p +) +{ + NVSWITCH_PRINT(device, ERROR, + "GET_RB_STALL_BUSY should not be called on LR10\n"); + return -NVL_ERR_NOT_SUPPORTED; +} + +/* +* @brief: This function retrieves the NVLIPT public ID for a given global link idx +* @params[in] device reference to current nvswitch device +* @params[in] linkId link to retrieve NVLIPT public ID from +* @params[out] publicId Public ID of NVLIPT owning linkId +*/ +NvlStatus nvswitch_get_link_public_id_lr10 +( + nvswitch_device *device, + NvU32 linkId, + NvU32 *publicId +) +{ + if (!device->hal.nvswitch_is_link_valid(device, linkId) || + (publicId == NULL)) + { + return -NVL_BAD_ARGS; + } + + *publicId = NVSWITCH_NVLIPT_GET_PUBLIC_ID_LR10(linkId); + + + return (NVSWITCH_ENG_VALID_LR10(device, NVLIPT, *publicId)) ? + NVL_SUCCESS : -NVL_BAD_ARGS; +} + +/* +* @brief: This function retrieves the internal link idx for a given global link idx +* @params[in] device reference to current nvswitch device +* @params[in] linkId link to retrieve NVLIPT public ID from +* @params[out] localLinkIdx Internal link index of linkId +*/ +NvlStatus nvswitch_get_link_local_idx_lr10 +( + nvswitch_device *device, + NvU32 linkId, + NvU32 *localLinkIdx +) +{ + if (!device->hal.nvswitch_is_link_valid(device, linkId) || + (localLinkIdx == NULL)) + { + return -NVL_BAD_ARGS; + } + + *localLinkIdx = NVSWITCH_NVLIPT_GET_LOCAL_LINK_ID_LR10(linkId); + + return NVL_SUCCESS; +} + +NvlStatus nvswitch_set_training_error_info_lr10 +( + nvswitch_device *device, + NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS *pLinkTrainingErrorInfoParams +) +{ + NVSWITCH_LINK_TRAINING_ERROR_INFO linkTrainingErrorInfo; + NVSWITCH_LINK_RUNTIME_ERROR_INFO linkRuntimeErrorInfo; + + linkTrainingErrorInfo.isValid = NV_TRUE; + linkTrainingErrorInfo.attemptedTrainingMask0 = + pLinkTrainingErrorInfoParams->attemptedTrainingMask0; + linkTrainingErrorInfo.trainingErrorMask0 = + pLinkTrainingErrorInfoParams->trainingErrorMask0; + + linkRuntimeErrorInfo.isValid = NV_FALSE; + linkRuntimeErrorInfo.mask0 = 0; + + return nvswitch_smbpbi_set_link_error_info(device, + &linkTrainingErrorInfo, + &linkRuntimeErrorInfo); +} + +NvlStatus nvswitch_ctrl_get_fatal_error_scope_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS *pParams +) +{ + NvU32 linkId; + NvU32 reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW, _SW_SCRATCH_12); + pParams->device = FLD_TEST_DRF_NUM(_NVLSAW, _SW_SCRATCH_12, _DEVICE_RESET_REQUIRED, + 1, reg); + + for (linkId = 0; linkId < NVSWITCH_MAX_PORTS; linkId++) + { + if (!nvswitch_is_link_valid(device, linkId)) + { + pParams->port[linkId] = NV_FALSE; + continue; + } + + reg = NVSWITCH_LINK_RD32_LR10(device, linkId, NPORT, _NPORT, _SCRATCH_WARM); + pParams->port[linkId] = FLD_TEST_DRF_NUM(_NPORT, _SCRATCH_WARM, + _PORT_RESET_REQUIRED, 1, reg); + } + + return NVL_SUCCESS; +} + +void nvswitch_init_scratch_lr10 +( + nvswitch_device *device +) +{ + NvU32 linkId; + NvU32 reg; + + for (linkId = 0; linkId < nvswitch_get_num_links(device); linkId++) + { + if (!nvswitch_is_link_valid(device, linkId)) + { + continue; + } + + reg = NVSWITCH_LINK_RD32_LR10(device, linkId, NPORT, _NPORT, _SCRATCH_WARM); + if (reg == NV_NPORT_SCRATCH_WARM_DATA_INIT) + { + NVSWITCH_LINK_WR32_LR10(device, linkId, NPORT, _NPORT, _SCRATCH_WARM, 0); + } + } +} + +NvlStatus +nvswitch_set_training_mode_lr10 +( + nvswitch_device *device +) +{ + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_parse_bios_image_lr10 +( + nvswitch_device *device +) +{ + NVSWITCH_BIOS_NVLINK_CONFIG *bios_config; + NV_STATUS status = NV_OK; + + // check if spi is supported + if (!nvswitch_is_spi_supported(device)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SPI is not supported\n", + __FUNCTION__); + return -NVL_ERR_NOT_SUPPORTED; + } + + bios_config = nvswitch_get_bios_nvlink_config(device); + + // Parse and retrieve the VBIOS info + status = _nvswitch_setup_link_vbios_overrides(device, bios_config); + if ((status != NV_OK) && device->pSoe) + { + NVSWITCH_PRINT(device, ERROR, + "%s: error=0x%x\n", + __FUNCTION__, status); + + return -NVL_ERR_GENERIC; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_ctrl_get_nvlink_lp_counters_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS *params +) +{ + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_ctrl_get_sw_info_lr10 +( + nvswitch_device *device, + NVSWITCH_GET_SW_INFO_PARAMS *p +) +{ + NvlStatus retval = NVL_SUCCESS; + NvU32 i; + + if (p->count > NVSWITCH_GET_SW_INFO_COUNT_MAX) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Invalid args\n", + __FUNCTION__); + return -NVL_BAD_ARGS; + } + + nvswitch_os_memset(p->info, 0, sizeof(NvU32)*NVSWITCH_GET_SW_INFO_COUNT_MAX); + + for (i = 0; i < p->count; i++) + { + switch (p->index[i]) + { + case NVSWITCH_GET_SW_INFO_INDEX_INFOROM_NVL_SUPPORTED: + p->info[i] = (NvU32)_nvswitch_inforom_nvl_supported(device); + break; + case NVSWITCH_GET_SW_INFO_INDEX_INFOROM_BBX_SUPPORTED: + p->info[i] = (NvU32)_nvswitch_inforom_bbx_supported(device); + break; + default: + NVSWITCH_PRINT(device, ERROR, + "%s: Undefined NVSWITCH_GET_SW_INFO_INDEX 0x%x\n", + __FUNCTION__, + p->index[i]); + retval = -NVL_BAD_ARGS; + break; + } + } + + return retval; +} + +// +// This function auto creates the lr10 HAL connectivity from the NVSWITCH_INIT_HAL +// macro in haldef_nvswitch.h +// +// Note: All hal fns must be implemented for each chip. +// There is no automatic stubbing here. +// +void nvswitch_setup_hal_lr10(nvswitch_device *device) +{ + device->chip_arch = NVSWITCH_GET_INFO_INDEX_ARCH_LR10; + + { + device->chip_impl = NVSWITCH_GET_INFO_INDEX_IMPL_LR10; + } + + NVSWITCH_INIT_HAL(device, lr10); +} diff --git a/src/common/nvswitch/kernel/lr10/minion_lr10.c b/src/common/nvswitch/kernel/lr10/minion_lr10.c new file mode 100644 index 000000000..7c49dcb87 --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/minion_lr10.c @@ -0,0 +1,1390 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlink_export.h" +#include "common_nvswitch.h" +#include "lr10/lr10.h" +#include "lr10/minion_lr10.h" +#include "lr10/minion_production_ucode_lr10_dbg.h" +#include "lr10/minion_production_ucode_lr10_prod.h" +#include "regkey_nvswitch.h" + +#include "nvswitch/lr10/dev_nvlipt_lnk_ip.h" +#include "nvswitch/lr10/dev_minion_ip.h" +#include "nvswitch/lr10/dev_minion_ip_addendum.h" +#include "nvswitch/lr10/dev_ingress_ip.h" +#include "nvswitch/lr10/dev_egress_ip.h" + +/* + * @Brief : Check if MINION is already running. + * + * The function assumes that if one of MINIONs is running, all of them are + * running. This approach needs to be fixed. + * + * TODO: Refactor minion code to check for each minion's status individually. + * + * @param[in] device Bootstrap MINIONs on this device + */ +static NvBool +_nvswitch_check_running_minions +( + nvswitch_device *device +) +{ + NvU32 data, i; + NvBool bMinionRunning = NV_FALSE; + + for (i = 0; i < NVSWITCH_ENG_COUNT(device, MINION, ); i++) + { + if (!NVSWITCH_ENG_IS_VALID(device, MINION, i)) + { + NVSWITCH_PRINT(device, SETUP, + "%s: MINION instance %d is not valid.\n", + __FUNCTION__, i); + continue; + } + + data = NVSWITCH_MINION_RD32_LR10(device, i, _CMINION, _FALCON_IRQSTAT); + if (FLD_TEST_DRF(_CMINION, _FALCON_IRQSTAT, _HALT, _FALSE, data)) + { + data = NVSWITCH_MINION_RD32_LR10(device, i, _MINION, _MINION_STATUS); + if (FLD_TEST_DRF(_MINION, _MINION_STATUS, _STATUS, _BOOT, data)) + { + // + // Set initialized flag if MINION is running. + // We don't want to bootstrap a falcon that is already running. + // + nvswitch_set_minion_initialized(device, i, NV_TRUE); + + NVSWITCH_PRINT(device, SETUP, + "%s: MINION instance %d is already bootstrapped.\n", + __FUNCTION__, i); + bMinionRunning = NV_TRUE; + } + } + } + + return bMinionRunning; +} + +/* + * @Brief : MINION pre init routine + * Waits for scrubbing to finish + * + * @param[in] device MINIONs on this device + */ +static NvlStatus +_nvswitch_minion_pre_init +( + nvswitch_device *device +) +{ + NvU32 data; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + NvU32 idx_minion; + NvlStatus status = NVL_SUCCESS; + NvU32 falconIntrMask, falconIntrDest; + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + + // Find first valid MINION instance + for (idx_minion = 0; idx_minion < NVSWITCH_ENG_COUNT(device, MINION, ); idx_minion++) + { + if (NVSWITCH_ENG_IS_VALID(device, MINION, idx_minion)) + { + break; + } + } + if (idx_minion >= NVSWITCH_ENG_COUNT(device, MINION, )) + { + NVSWITCH_PRINT(device, SETUP, + "%s: No MINIONs instantiated. Skipping MINION pre-init\n", + __FUNCTION__); + goto _nvswitch_minion_pre_init_exit; + } + + // Since we are not using Falcon DMA to load ucode, set REQUIRE_CTX to FALSE + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_DMACTL, 0x0); + + // + // Initialize falcon specific interrupts before MINION is loaded. + // Once MINION is loaded, these registers get locked down. + // + + // falcon interrupt mask is set through IRQMSET + falconIntrMask = (DRF_DEF(_CMINION, _FALCON_IRQMSET, _WDTMR, _SET) | + DRF_DEF(_CMINION, _FALCON_IRQMSET, _HALT, _SET) | + DRF_DEF(_CMINION, _FALCON_IRQMSET, _EXTERR, _SET)| + DRF_DEF(_CMINION, _FALCON_IRQMSET, _SWGEN0, _SET)| + DRF_DEF(_CMINION, _FALCON_IRQMSET, _SWGEN1, _SET)); + + // falcon interrupt routing to the HOST + falconIntrDest = (DRF_DEF(_CMINION, _FALCON_IRQDEST, _HOST_WDTMR, _HOST) | + DRF_DEF(_CMINION, _FALCON_IRQDEST, _HOST_HALT, _HOST) | + DRF_DEF(_CMINION, _FALCON_IRQDEST, _HOST_EXTERR, _HOST) | + DRF_DEF(_CMINION, _FALCON_IRQDEST, _HOST_SWGEN0, _HOST) | + DRF_DEF(_CMINION, _FALCON_IRQDEST, _HOST_SWGEN1, _HOST) | + DRF_DEF(_CMINION, _FALCON_IRQDEST, _TARGET_WDTMR, _HOST_NORMAL) | + DRF_DEF(_CMINION, _FALCON_IRQDEST, _TARGET_HALT, _HOST_NORMAL) | + DRF_DEF(_CMINION, _FALCON_IRQDEST, _TARGET_EXTERR, _HOST_NORMAL) | + DRF_DEF(_CMINION, _FALCON_IRQDEST, _TARGET_SWGEN0, _HOST_NORMAL) | + DRF_DEF(_CMINION, _FALCON_IRQDEST, _TARGET_SWGEN1, _HOST_NORMAL)); + + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_IRQMSET, falconIntrMask); + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_IRQDEST, falconIntrDest); + chip_device->intr_minion_dest = falconIntrDest; + + // + // As soon as we access any falcon reg (above), the scrubber will start scrubbing + // IMEM and DMEM. Wait for the scrubber to finish scrubbing. + // + if (IS_FMODEL(device) || IS_EMULATION(device) || IS_RTLSIM(device)) + { + nvswitch_timeout_create(10*NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + } + else + { + nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout); + } + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + // Check if scrubbing was done for first enabled MINION + data = NVSWITCH_MINION_RD32_LR10(device, idx_minion, _CMINION, _FALCON_DMACTL); + if (FLD_TEST_DRF(_CMINION, _FALCON_DMACTL, _DMEM_SCRUBBING, _DONE, data) && + FLD_TEST_DRF(_CMINION, _FALCON_DMACTL, _IMEM_SCRUBBING, _DONE, data)) + { + break; + } + + nvswitch_os_sleep(1); + } + while (keepPolling); + + if (!FLD_TEST_DRF(_CMINION, _FALCON_DMACTL, _DMEM_SCRUBBING, _DONE, data) || + !FLD_TEST_DRF(_CMINION, _FALCON_DMACTL, _IMEM_SCRUBBING, _DONE, data)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for scrubbing to finish on MINION %d.\n", + __FUNCTION__, idx_minion); + status = -NVL_ERR_INVALID_STATE; + return status; + } + +_nvswitch_minion_pre_init_exit: + return status; +} + +/* + * @Brief : Copy the minion ucode to IMEM and DMEM in broadcast mode + * + * @param[in] device Copy ucode to all MINIONS associated with the device + */ +static NvlStatus +_nvswitch_minion_copy_ucode_bc +( + nvswitch_device *device, + const NvU32 *minion_ucode_data, + const NvU32 *minion_ucode_header +) +{ + const PFALCON_UCODE_HDR_INFO_LR10 pUcodeHeader = + (PFALCON_UCODE_HDR_INFO_LR10) &minion_ucode_header[0]; + const NvU32 *pHeader = &minion_ucode_header[0]; + + NvU32 data, i, app, dataSize; + NvU32 appCodeOffset, appCodeSize, appDataOffset, appDataSize; + NvU16 tag; + NvU32 idx_minion; + + // Find first valid MINION instance + for (idx_minion = 0; idx_minion < NVSWITCH_ENG_COUNT(device, MINION, ); idx_minion++) + { + if (NVSWITCH_ENG_IS_VALID(device, MINION, idx_minion)) + { + break; + } + } + if (idx_minion >= NVSWITCH_ENG_COUNT(device, MINION, )) + { + NVSWITCH_PRINT(device, SETUP, + "%s: No MINIONs instantiated. Skipping MINION ucode load\n", + __FUNCTION__); + goto _nvswitch_minion_copy_ucode_bc_exit; + } + + dataSize = sizeof(minion_ucode_data[0]); + + // Initialize address of IMEM to 0x0 and set auto-increment on write + data = 0; + data = FLD_SET_DRF_NUM(_CMINION, _FALCON_IMEMC, _OFFS, 0x0, data); + data = FLD_SET_DRF_NUM(_CMINION, _FALCON_IMEMC, _BLK, 0x0, data); + data = FLD_SET_DRF(_CMINION, _FALCON_IMEMC, _AINCW, _TRUE, data); + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_IMEMC(0), data); + + // + // Initialize IMEM tag to 0 explicitly even though power-on value is 0. + // Writes to IMEM don't work if we don't do this + // + tag = 0; + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_IMEMT(0), tag); + + // Copy over IMEM part of the ucode and tag along the way + for (i = 0; i < (pUcodeHeader->osCodeSize / dataSize) ; i++) + { + // Increment tag for after every block (256 bytes) + if (i && ((i % (FALCON_IMEM_BLK_SIZE_BYTES_LR10 / dataSize)) == 0)) + { + tag++; + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_IMEMT(0), (NvU32) tag); + } + + // Copy IMEM DWORD by DWORD + data = minion_ucode_data[(pUcodeHeader->osCodeOffset / dataSize) + i]; + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_IMEMD(0), data); + } + + // Initialize address of DMEM to 0x0 and set auto-increment on write + data = 0; + data = FLD_SET_DRF_NUM(_CMINION, _FALCON_DMEMC, _OFFS, 0x0, data); + data = FLD_SET_DRF_NUM(_CMINION, _FALCON_DMEMC, _BLK, 0x0, data); + data = FLD_SET_DRF(_CMINION, _FALCON_DMEMC, _AINCW, _TRUE, data); + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_DMEMC(0), data); + + // Copy over DMEM part of the ucode + for (i = 0; i < (pUcodeHeader->osDataSize / dataSize) ; i++) + { + // Copy DMEM DWORD by DWORD + data = minion_ucode_data[(pUcodeHeader->osDataOffset / dataSize) + i]; + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_DMEMD(0), data); + } + + // Copy over any apps in the ucode with the appropriate tags + if (pUcodeHeader->numApps) + { + for (app = 0; app < pUcodeHeader->numApps ; app++) + { + // Index into the app code info + appCodeOffset = pHeader[FALCON_CODE_HDR_APP_CODE_START_LR10 + 2*app]; + appCodeSize = pHeader[FALCON_CODE_HDR_APP_CODE_START_LR10 + 2*app + 1]; + + // Index into the app data info using appCodeStart offset as a base + appDataOffset = pHeader[FALCON_CODE_HDR_APP_CODE_START_LR10 + + 2*pUcodeHeader->numApps + 2*app]; + appDataSize = pHeader[FALCON_CODE_HDR_APP_CODE_START_LR10 + + 2*pUcodeHeader->numApps + 2*app + 1]; + + // Mark the following IMEM blocks as secure + data = NVSWITCH_MINION_RD32_LR10(device, idx_minion, _CMINION, _FALCON_IMEMC(0)); + data = FLD_SET_DRF_NUM(_CMINION, _FALCON_IMEMC, _SECURE, 0x1, data); + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_IMEMC(0), data); + + // Copy to IMEM and tag along the way + tag = (NvU16)(appCodeOffset >> 8); + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_IMEMT(0), tag); + + // Copy app code to IMEM picking up where last load left off + for (i = 0; i < (appCodeSize / dataSize); i++) + { + if (i && ((i % (FALCON_IMEM_BLK_SIZE_BYTES_LR10 / dataSize)) == 0)) + { + tag++; + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_IMEMT(0), tag); + } + + data = minion_ucode_data[(appCodeOffset / dataSize) + i]; + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_IMEMD(0), data); + } + + // Copy app data to DMEM picking up where last load left off + for (i = 0; i < (appDataSize / dataSize); i++) + { + data = minion_ucode_data[appDataOffset + i]; + NVSWITCH_MINION_WR32_BCAST_LR10(device, _CMINION, _FALCON_DMEMD(0), data); + } + } + } + +_nvswitch_minion_copy_ucode_bc_exit: + return NVL_SUCCESS; +} + +/* + * @brief : Print MINION ucode (first 8 DWORDS). + * This is used for diagnostic purposes only. + * + * @param[in] device Print ucode for a MINION on this device + * @param[in] link Print ucode for MINION associated with the link + */ +static void +_nvswitch_minion_print_ucode +( + nvswitch_device *device, + NvU32 instance +) +{ +#if defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS) + NvU32 data, i; + NvU32 buf[8]; + + data = 0; + data = FLD_SET_DRF_NUM(_CMINION, _FALCON_IMEMC, _OFFS, 0x0, data); + data = FLD_SET_DRF_NUM(_CMINION, _FALCON_IMEMC, _BLK, 0x0, data); + data = FLD_SET_DRF(_CMINION, _FALCON_IMEMC, _AINCR, _TRUE, data); + NVSWITCH_MINION_WR32_LR10(device, instance, _CMINION, _FALCON_IMEMC(0), data); + + NVSWITCH_PRINT(device, SETUP, "MINION IMEMD = \n"); + for (i = 0; i < 8 ; i++) + { + buf[i] = NVSWITCH_MINION_RD32_LR10(device, instance, _CMINION, _FALCON_IMEMD(0)); + } + NVSWITCH_PRINT(device, SETUP, " 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); + + NVSWITCH_PRINT(device, SETUP, "MINION IMEMC = \n"); + for (i = 0; i < 8 ; i++) + { + buf[i] = NVSWITCH_MINION_RD32_LR10(device, instance, _CMINION, _FALCON_IMEMC(0)); + } + NVSWITCH_PRINT(device, SETUP, " 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); + + data = 0; + data = FLD_SET_DRF_NUM(_CMINION, _FALCON_DMEMC, _OFFS, 0x0, data); + data = FLD_SET_DRF_NUM(_CMINION, _FALCON_DMEMC, _BLK, 0x0, data); + data = FLD_SET_DRF(_CMINION, _FALCON_DMEMC, _AINCR, _TRUE, data); + NVSWITCH_MINION_WR32_LR10(device, instance, _CMINION, _FALCON_DMEMC(0), data); + + NVSWITCH_PRINT(device, SETUP, "MINION DMEMD = \n"); + for (i = 0; i < 8 ; i++) + { + buf[i] = NVSWITCH_MINION_RD32_LR10(device, instance, _CMINION, _FALCON_DMEMD(0)); + } + NVSWITCH_PRINT(device, SETUP, " 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); + + NVSWITCH_PRINT(device, SETUP, "MINION DMEMC = \n"); + for (i = 0; i < 8 ; i++) + { + buf[i] = NVSWITCH_MINION_RD32_LR10(device, instance, _CMINION, _FALCON_DMEMC(0)); + } + NVSWITCH_PRINT(device, SETUP, " 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); +#endif //defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS) +} + +/* + * @Brief : Test MINION by sending SWINTR DLCMD + * + * @param[in] device Send command to MINION on this device + * @param[in] link DLCMD will be sent on this link + * + * @return Returns true if the DLCMD passed + */ +static NvBool +_nvswitch_minion_test_dlcmd +( + nvswitch_device *device, + NvU32 linkNumber +) +{ + NvU32 interrupts, localLinkNumber; + localLinkNumber = linkNumber % NVSWITCH_LINKS_PER_MINION; + + if (nvswitch_minion_send_command(device, linkNumber, + NV_MINION_NVLINK_DL_CMD_COMMAND_SWINTR, 0) != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SWINTR DL CMD failed for link %d.\n", + __FUNCTION__, linkNumber); + return NV_FALSE; + } + + interrupts = NVSWITCH_MINION_LINK_RD32_LR10(device, linkNumber, + _MINION, _NVLINK_LINK_INTR(localLinkNumber)); + + if (DRF_VAL(_MINION, _NVLINK_LINK_INTR, _CODE, interrupts) == + NV_MINION_NVLINK_LINK_INTR_CODE_SWREQ) + { + NVSWITCH_PRINT(device, SETUP, + "%s: Received NON-FATAL INTR_CODE = SWREQ, SUBCODE = 0x%x." + " SWINTR DLCMD was executed successfully.\n", + __FUNCTION__, + DRF_VAL(_MINION, _NVLINK_LINK_INTR, _SUBCODE, interrupts)); + + // clear the interrupt + interrupts = DRF_NUM(_MINION, _NVLINK_LINK_INTR, _STATE, 1); + NVSWITCH_MINION_LINK_WR32_LR10(device, linkNumber, _MINION, + _NVLINK_LINK_INTR(localLinkNumber), interrupts); + + return NV_TRUE; + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s: No SWINTR interrupt received. DL CMD failed for link %d.\n", + __FUNCTION__, linkNumber); + return NV_FALSE; + } + + return NV_TRUE; +} + +static void +_nvswitch_print_minion_info +( + nvswitch_device *device, + NvU32 id +) +{ +#if defined(DEVELOP) || defined(DEBUG) + NvU32 falcon_os; + NvU32 falcon_mailbox; + NvU32 falcon_sctl; + + falcon_os = NVSWITCH_MINION_RD32_LR10(device, id, _CMINION, _FALCON_OS); + falcon_mailbox = NVSWITCH_MINION_RD32_LR10(device, id, _CMINION, _FALCON_MAILBOX1); + falcon_sctl = NVSWITCH_MINION_RD32_LR10(device, id, _CMINION, _FALCON_SCTL); + + // Dump the ucode ID string epilog + NVSWITCH_PRINT(device, SETUP, + "MINION Falcon ucode version info: Ucode v%d.%d Phy v%d\n", + (falcon_os >> 16) & 0xFFFF, + falcon_os & 0xFFFF, + falcon_mailbox); + + // Display security level info at info level, very useful for logs. + NVSWITCH_PRINT(device, SETUP, + "%s: NV_CMINION_FALCON_SCTL : 0x%08X\n", + __FUNCTION__, falcon_sctl); +#endif +} + +/* + * @Brief : Bootstrap MINION associated with the link + * + * @param[in] device Bootstrap MINION on this device + * @param[in] link Bootstrap MINION associated with the link + */ +static NvlStatus +_nvswitch_minion_bootstrap +( + nvswitch_device *device +) +{ + NvU32 data, i, link_num; + NvU64 link_mask; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + NvlStatus status = NVL_SUCCESS; + + for (i = 0; i < NVSWITCH_ENG_COUNT(device, MINION, ) ; i++) + { + if (!NVSWITCH_ENG_IS_VALID(device, MINION, i)) + { + NVSWITCH_PRINT(device, WARN, + "%s: MINION[%d] is not valid. Skipping\n", + __FUNCTION__, i); + continue; + } + + if (nvswitch_is_minion_initialized(device, i)) + { + NVSWITCH_PRINT(device, WARN, + "%s: MINION[%d] is already bootstrapped.\n", + __FUNCTION__, i); + continue; + } + + // Verify if the ucode was written properly + _nvswitch_minion_print_ucode(device, i); + + // Write boot vector to 0x0 + data = NVSWITCH_MINION_RD32_LR10(device, i, _CMINION, _FALCON_BOOTVEC); + data = FLD_SET_DRF_NUM(_CMINION, _FALCON_BOOTVEC, _VEC, 0x0, data); + NVSWITCH_MINION_WR32_LR10(device, i, _CMINION, _FALCON_BOOTVEC, data); + + // + // Start the Falcon + // If a falcon is managed (and thus supports secure mode), we need to + // write NV_PFALCON_FALCON_CPUCTL_ALIAS_STARTCPU = _TRUE. + // Below write is a nop in secure mode. + // + data = NVSWITCH_MINION_RD32_LR10(device, i, _CMINION, _FALCON_CPUCTL); + data = FLD_SET_DRF(_CMINION, _FALCON_CPUCTL, _STARTCPU, _TRUE, data); + NVSWITCH_MINION_WR32_LR10(device, i, _CMINION, _FALCON_CPUCTL, data); + + if (IS_FMODEL(device) || IS_EMULATION(device) || IS_RTLSIM(device)) + { + nvswitch_timeout_create(10*NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + } + else + { + nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout); + } + + // + // We will exit this if we recieve bootstrap signal OR + // if we timeout waiting for bootstrap signal OR + // if bootstrap fails + // + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + data = NVSWITCH_MINION_RD32_LR10(device, i, _MINION, _MINION_STATUS); + + // The INIT sequence has completed, success? + if (FLD_TEST_DRF(_MINION, _MINION_STATUS, _STATUS, _BOOT, data)) + { + // MINION Init succeeded. + NVSWITCH_PRINT(device, SETUP, + "%s: NVLink MINION %d bootstrap complete signal received.\n", + __FUNCTION__, i); + + _nvswitch_print_minion_info(device, i); + break; + } + + // + // Check if any falcon interrupts are hit & pending. + // TODO: Check return status of the call below + // + nvswitch_minion_service_falcon_interrupts_lr10(device, i); + + nvswitch_os_sleep(1); + } + while (keepPolling); + + if (!FLD_TEST_DRF(_MINION, _MINION_STATUS, _STATUS, _BOOT, data)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for NVLink MINION %d to complete bootstrap!" + "NV_CMINION_MINION_STATUS = 0x%08x\n", + __FUNCTION__, i, data); + // Bug 2974064: Review this timeout handling (fall through) + } + nvswitch_set_minion_initialized(device, i, NV_TRUE); + + // Run a test DLCMD to see if MINION is accepting commands. + link_mask = NVSWITCH_NVLIPT_GET_LOCAL_LINK_MASK64(i); + FOR_EACH_INDEX_IN_MASK(64, link_num, link_mask) + { + // Pick a valid lick in this NVLipt + if (nvswitch_is_link_valid(device, link_num)) + { + break; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + if (!_nvswitch_minion_test_dlcmd(device, link_num)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to bootstrap MINION %d.\n", + __FUNCTION__, i); + nvswitch_set_minion_initialized(device, i, NV_FALSE); + return -NVL_ERR_INVALID_STATE; + } + else + { + NVSWITCH_PRINT(device, SETUP, + "%s: MINION %d successfully bootstrapped and accepting DLCMDs.\n", + __FUNCTION__, i); + nvswitch_set_minion_initialized(device, i, NV_TRUE); + } + } + + return status; +} + +/* + * @Brief : Send MINION DL CMD for a particular link + * + * @param[in] device Send command to MINION on this device + * @param[in] linkNumber DLCMD will be sent on this link number + * + * @return Returns true if the DLCMD passed + */ +NvlStatus +nvswitch_minion_send_command_lr10 +( + nvswitch_device *device, + NvU32 linkNumber, + NvU32 command, + NvU32 scratch0 +) +{ + NvU32 data = 0, localLinkNumber, statData = 0; + NvU32 ingressEccRegVal = 0, egressEccRegVal = 0; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + + localLinkNumber = linkNumber % NVSWITCH_LINKS_PER_MINION; + + if (!nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, linkNumber, MINION))) + { + NVSWITCH_PRINT(device, ERROR, + "%s: MINION %d is not initialized for link %08x.\n", + __FUNCTION__, NVSWITCH_GET_LINK_ENG_INST(device, linkNumber, MINION), + linkNumber); + return NVL_SUCCESS; + } + + data = NVSWITCH_MINION_LINK_RD32_LR10(device, linkNumber, _MINION, _NVLINK_DL_CMD(localLinkNumber)); + if (FLD_TEST_DRF_NUM(_MINION, _NVLINK_DL_CMD, _FAULT, 1, data)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: MINION %d is in fault state. NV_MINION_NVLINK_DL_CMD(%d) = %08x\n", + __FUNCTION__, NVSWITCH_GET_LINK_ENG_INST(device, linkNumber, MINION), + linkNumber, data); + return -NVL_ERR_INVALID_STATE; + } + + // Write to minion scratch if needed by command + switch (command) + { + case NV_MINION_NVLINK_DL_CMD_COMMAND_CONFIGEOM: + data = 0; + data = FLD_SET_DRF_NUM(_MINION, _MISC_0, _SCRATCH_SWRW_0, scratch0, data); + NVSWITCH_MINION_WR32_LR10(device, + NVSWITCH_GET_LINK_ENG_INST(device, linkNumber, MINION), _MINION, _MISC_0, data); + break; + case NV_MINION_NVLINK_DL_CMD_COMMAND_INITPHASE1: + // + // WAR bug 2708497 + // Before INITPHASE1, we must clear these values, then set back to + // _PROD after the call + // NV_INGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE + // NV_EGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE + // + + ingressEccRegVal = NVSWITCH_NPORT_RD32_LR10(device, linkNumber, _INGRESS, _ERR_ECC_CTRL); + NVSWITCH_NPORT_WR32_LR10(device, linkNumber, _INGRESS, _ERR_ECC_CTRL, + FLD_SET_DRF(_INGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, _DISABLE, ingressEccRegVal)); + + egressEccRegVal = NVSWITCH_NPORT_RD32_LR10(device, linkNumber, _EGRESS, _ERR_ECC_CTRL); + NVSWITCH_NPORT_WR32_LR10(device, linkNumber, _EGRESS, _ERR_ECC_CTRL, + FLD_SET_DRF(_EGRESS, _ERR_ECC_CTRL, _NCISOC_PARITY_ENABLE, _DISABLE, egressEccRegVal)); + break; + default: + break; + } + + data = FLD_SET_DRF_NUM(_MINION, _NVLINK_DL_CMD, _COMMAND, command, data); + data = FLD_SET_DRF_NUM(_MINION, _NVLINK_DL_CMD, _FAULT, 1, data); + NVSWITCH_MINION_LINK_WR32_LR10(device, linkNumber, _MINION, _NVLINK_DL_CMD(localLinkNumber), data); + + if (IS_FMODEL(device) || IS_EMULATION(device) || IS_RTLSIM(device)) + { + nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + } + else + { + nvswitch_timeout_create(NVSWITCH_INTERVAL_5MSEC_IN_NS, &timeout); + } + + // + // We will exit this if the command is successful OR + // if timeout waiting for the READY bit to be set OR + // if it generates a MINION FAULT + // + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + data = NVSWITCH_MINION_LINK_RD32_LR10(device, linkNumber, _MINION, _NVLINK_DL_CMD(localLinkNumber)); + if (FLD_TEST_DRF_NUM(_MINION, _NVLINK_DL_CMD, _READY, 1, data)) + { + // The command has completed, success? + if (FLD_TEST_DRF_NUM(_MINION, _NVLINK_DL_CMD, _FAULT, 1, data)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: NVLink MINION command faulted!" + " NV_MINION_NVLINK_DL_CMD(%d) = 0x%08x\n", + __FUNCTION__, linkNumber, data); + + // Pull fault code and subcode + if (nvswitch_minion_get_dl_status(device, linkNumber, + NV_NVLSTAT_MN00, 0, &statData) == NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Minion DLCMD Fault code = 0x%x, Sub-code = 0x%x\n", + __FUNCTION__, + DRF_VAL(_NVLSTAT, _MN00, _LINK_INTR_CODE, statData), + DRF_VAL(_NVLSTAT, _MN00, _LINK_INTR_SUBCODE, statData)); + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to get code and subcode from DLSTAT, link %d\n", + __FUNCTION__, linkNumber); + } + + // Clear the fault and return + NVSWITCH_PRINT(device, ERROR, + "%s: Clearing NVLink MINION fault for link %d\n", + __FUNCTION__, linkNumber); + + data = FLD_SET_DRF_NUM(_MINION, _NVLINK_DL_CMD, _FAULT, 1, 0x0); + NVSWITCH_MINION_LINK_WR32_LR10(device, linkNumber, _MINION, _NVLINK_DL_CMD(localLinkNumber), data); + return -NVL_ERR_INVALID_STATE; + } + else + { + NVSWITCH_PRINT(device, SETUP, + "%s: NVLink MINION command %x was sent successfully for link %d\n", + __FUNCTION__, command, linkNumber); + break; + } + } + + if (IS_FMODEL(device) || IS_EMULATION(device) || IS_RTLSIM(device)) + { + nvswitch_os_sleep(1); + } + } + while (keepPolling); + + if (!FLD_TEST_DRF_NUM(_MINION, _NVLINK_DL_CMD, _READY, 1, data)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for NVLink MINION command to complete!" + " NV_MINION_NVLINK_DL_CMD(%d) = 0x%08x\n", + __FUNCTION__, linkNumber, data); + return -NVL_ERR_INVALID_STATE; + } + + if (command == NV_MINION_NVLINK_DL_CMD_COMMAND_INITPHASE1) + { + // + // WAR bug 2708497 + // Before INITPHASE1, we must clear these values, then set back to + // _PROD after the call + // NV_INGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE + // NV_EGRESS_ERR_ECC_CTRL_NCISOC_PARITY_ENABLE + // + NVSWITCH_NPORT_WR32_LR10(device, linkNumber, _INGRESS, _ERR_ECC_CTRL, ingressEccRegVal); + NVSWITCH_NPORT_WR32_LR10(device, linkNumber, _EGRESS, _ERR_ECC_CTRL, egressEccRegVal); + } + + return NVL_SUCCESS; +} + +/* + * @Brief : Load minion ucode from regkeys + * Overrides minion image from the regkeys + * + * @param device The nvswitch device + */ +static NvlStatus +_nvswitch_load_minion_ucode_image_from_regkeys +( + nvswitch_device *device +) +{ + NvlStatus status = NVL_SUCCESS; + + NvU32 *data = NULL; + NvU32 *header = NULL; + NvU32 data_size; + NvU32 header_size; + + if (!NV_SWITCH_REGKEY_PRIVATE_ALLOWED) + { + // Regkey override of ucode image only allowed on internal use debug drivers. + return -NVL_ERR_GENERIC; + } + + status = nvswitch_os_read_registry_dword(device->os_handle, + NV_SWITCH_REGKEY_MINION_SET_UCODE_HDR_SIZE, &header_size); + if (status != NVL_SUCCESS) + { + return status; + } + + status = nvswitch_os_read_registry_dword(device->os_handle, + NV_SWITCH_REGKEY_MINION_SET_UCODE_DATA_SIZE, &data_size); + if (status != NVL_SUCCESS) + { + return status; + } + + if (header_size == 0 || data_size == 0) + { + NVSWITCH_PRINT(device, SETUP, + "%s: Failed to query ucode size via regkey.\n", + __FUNCTION__); + return -NVL_BAD_ARGS; + } + + header = nvswitch_os_malloc(header_size); + if (header == NULL) + { + status = -NVL_NO_MEM; + goto done; + } + + data = nvswitch_os_malloc(data_size); + if (data == NULL) + { + status = -NVL_NO_MEM; + goto done; + } + + status = nvswitch_os_read_registery_binary(device->os_handle, + NV_SWITCH_REGKEY_MINION_SET_UCODE_HDR, (NvU8*)header, header_size); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "%s: Failed to query ucode header.\n", + __FUNCTION__); + goto done; + } + + status = nvswitch_os_read_registery_binary(device->os_handle, + NV_SWITCH_REGKEY_MINION_SET_UCODE_DATA, (NvU8*)data, data_size); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "%s: Failed to query ucode data.\n", + __FUNCTION__); + goto done; + } + + // Copy the ucode to IMEM and DMEM by using backdoor PMB access + status = _nvswitch_minion_copy_ucode_bc(device, data, header); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to copy MINION ucode in broadcast mode!\n", + __FUNCTION__); + goto done; + } + else + { + NVSWITCH_PRINT(device, SETUP, + "Successfully loaded MINION microcode override.\n"); + } + +done: + if (header != NULL) + { + nvswitch_os_free(header); + } + + if (data != NULL) + { + nvswitch_os_free(data); + } + + return status; +} + +/* + * @Brief : Load minion ucode image + * + * @param device The nvswitch device + */ +static NvlStatus +_nvswitch_load_minion_ucode_image +( + nvswitch_device *device +) +{ + NvlStatus status; + NvU32 data; + NvBool bDebugMode = NV_FALSE; + + // load ucode image via regkey + status = _nvswitch_load_minion_ucode_image_from_regkeys(device); + if (status == NVL_SUCCESS) + { + NVSWITCH_PRINT(device, INFO, + "%s: Successfully loaded ucode via regkey\n", + __FUNCTION__); + return status; + } + + // + // Determine if _dbg or _prod ucode needs to be loaded + // Read from MINION 0 - we don't support MINIONs being in different debug modes + // + data = NVSWITCH_MINION_RD32_LR10(device, 0, _CMINION, _SCP_CTL_STAT); + bDebugMode = FLD_TEST_DRF(_CMINION, _SCP_CTL_STAT, _DEBUG_MODE, _DISABLED, data) ? + (NV_FALSE) : (NV_TRUE); + + // + // If ucode load fails via regkey fallback to the default ucode. + // Copy the ucode to IMEM and DMEM by using backdoor PMB access + // + if (bDebugMode) + { + status = _nvswitch_minion_copy_ucode_bc(device, minion_ucode_data_lr10_dbg, minion_ucode_header_lr10_dbg); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to copy dbg MINION ucode in broadcast mode!\n", + __FUNCTION__); + return status; + } + } + else + { + status = _nvswitch_minion_copy_ucode_bc(device, minion_ucode_data_lr10_prod, minion_ucode_header_lr10_prod); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to copy prod MINION ucode in broadcast mode!\n", + __FUNCTION__); + return status; + } + } + + return status; +} + +/* + * @Brief : Bootstrap all MINIONs on the specified device + * + * @param[in] device Bootstrap MINIONs on this device + */ +NvlStatus +nvswitch_init_minion_lr10 +( + nvswitch_device *device +) +{ + NvlStatus status = NVL_SUCCESS; + + if (_nvswitch_check_running_minions(device)) + { + return NVL_SUCCESS; + } + + status = _nvswitch_minion_pre_init(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: MINION pre init failed\n", + __FUNCTION__); + return status; + } + + // Load MINION + status = _nvswitch_load_minion_ucode_image(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to load MINION ucode image!\n", + __FUNCTION__); + return status; + } + + status = _nvswitch_minion_bootstrap(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to bootstrap MINION!\n", + __FUNCTION__); + return status; + } + + return status; +} + +NvlStatus +nvswitch_minion_get_dl_status_lr10 +( + nvswitch_device *device, + NvU32 linkId, + NvU32 statusIdx, + NvU32 statusArgs, + NvU32 *statusData +) +{ + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + NvU32 regData, localLinkNumber; + localLinkNumber = linkId % NVSWITCH_LINKS_PER_MINION; + + if (!nvswitch_is_minion_initialized(device, NVSWITCH_GET_LINK_ENG_INST(device, linkId, MINION))) + { + NVSWITCH_PRINT(device, ERROR, + "%s: MINION %d is not initialized for link %08x.\n", + __FUNCTION__, NVSWITCH_GET_LINK_ENG_INST(device, linkId, MINION), + linkId); + return -NVL_ERR_INVALID_STATE; + } + + // Query the DL status interface to get the data + NVSWITCH_MINION_LINK_WR32_LR10(device, linkId, _MINION, _NVLINK_DL_STAT(localLinkNumber), + DRF_NUM(_MINION, _NVLINK_DL_STAT, _ARGS, statusArgs) | + DRF_NUM(_MINION, _NVLINK_DL_STAT, _STATUSIDX, statusIdx)); + + if (IS_FMODEL(device) || IS_EMULATION(device) || IS_RTLSIM(device)) + { + nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + } + else + { + nvswitch_timeout_create(20 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); + } + + // Poll for READY bit to be set + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + regData = NVSWITCH_MINION_LINK_RD32_LR10(device, linkId, _MINION, _NVLINK_DL_STAT(localLinkNumber)); + if (FLD_TEST_DRF_NUM(_MINION, _NVLINK_DL_STAT, _READY, 1, regData)) + { + *statusData = NVSWITCH_MINION_LINK_RD32_LR10(device, linkId, _MINION, _NVLINK_DL_STATDATA(localLinkNumber)); + return NVL_SUCCESS; + } + if (IS_FMODEL(device) || IS_RTLSIM(device)) + { + nvswitch_os_sleep(1); + } + } + while (keepPolling); + + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for DL_STAT request to complete" + " NV_MINION_NVLINK_DL_STAT(%d) = 0x%08x\n", + __FUNCTION__, linkId, regData); + return -NVL_ERR_INVALID_STATE; +} + +NvlStatus +nvswitch_minion_get_initoptimize_status_lr10 +( + nvswitch_device *device, + NvU32 linkId +) +{ + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + NvU32 statData; + NvlStatus status; + + if (IS_FMODEL(device) || IS_EMULATION(device) || IS_RTLSIM(device)) + { + nvswitch_timeout_create(100 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + } + else + { + nvswitch_timeout_create(20 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + } + + // Poll for READY bit to be set + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + // Poll for INITOPTIMIZE status on MINION DL STAT interface + status = nvswitch_minion_get_dl_status(device, linkId, NV_NVLSTAT_UC01, 0, &statData); + if (status != NVL_SUCCESS) + { + return status; + } + + if (FLD_TEST_DRF_NUM(_NVLSTAT, _UC01, _TRAINING_GOOD, 0x1, statData)) + { + NVSWITCH_PRINT(device, INFO, + "%s: INITOPTIMIZE TRAINING_GOOD on link: %d\n", + __FUNCTION__, linkId); + return NVL_SUCCESS; + } + + nvswitch_os_sleep(1); + } + while (keepPolling); + + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for INITOPTIMIZE TRAINING_GOOD on link: %d\n", + __FUNCTION__, linkId); + return -NVL_ERR_INVALID_STATE; +} + +NvlStatus +nvswitch_minion_get_initnegotiate_status_lr10 +( + nvswitch_device *device, + NvU32 linkId +) +{ + NvU32 statData; + NvlStatus status; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + + if (IS_FMODEL(device) || IS_EMULATION(device) || IS_RTLSIM(device)) + { + nvswitch_timeout_create(10 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + } + else + { + nvswitch_timeout_create(2 * NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + } + + // Poll for READY bit to be set + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + // Check INITNEGOTIATE status on MINION DL STAT interface + status = nvswitch_minion_get_dl_status(device, linkId, NV_NVLSTAT_UC01, 0, &statData); + if (status != NVL_SUCCESS) + { + return status; + } + + if (FLD_TEST_DRF(_NVLSTAT, _UC01, _CONFIG_GOOD, _SUCCESS, statData)) + { + NVSWITCH_PRINT(device, INFO, + "%s: INITNEGOTIATE CONFIG_GOOD on link: %d\n", + __FUNCTION__, linkId); + + return NVL_SUCCESS; + } + + nvswitch_os_sleep(1); + } + while (keepPolling); + + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for INITNEGOTIATE CONFIG_GOOD on link: %d\n", + __FUNCTION__, linkId); + + return -NVL_ERR_INVALID_STATE; +} + +NvlStatus +nvswitch_minion_get_rxdet_status_lr10 +( + nvswitch_device *device, + NvU32 linkId +) +{ + NvU32 statData; + NvlStatus status; + NVSWITCH_TIMEOUT timeout; + NvBool keepPolling; + + if (IS_FMODEL(device) || IS_EMULATION(device) || IS_RTLSIM(device)) + { + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + } + else + { + nvswitch_timeout_create(20 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); + } + + // Poll for READY bit to be set + do + { + keepPolling = (nvswitch_timeout_check(&timeout)) ? NV_FALSE : NV_TRUE; + + // Check RXDET status on MINION DL STAT interface + status = nvswitch_minion_get_dl_status(device, linkId, NV_NVLSTAT_LNK2, 0, &statData); + if (status != NVL_SUCCESS) + { + return status; + } + + if (FLD_TEST_DRF(_NVLSTAT, _LNK2, _RXDET_LINK_STATUS, _FOUND, statData)) + { + NVSWITCH_PRINT(device, INFO, + "%s: RXDET LINK_STATUS = FOUND on link: %d\n", + __FUNCTION__, linkId); + + // Retrieve which lanes were found (should be all) + device->link[linkId].lane_rxdet_status_mask = + DRF_VAL(_NVLSTAT, _LNK2, _RXDET_LANE_STATUS, statData); + + // + // MINION doesn't have knowledge of lane reversal and therefore + // reports logical lanes. We must reverse the bitmask here if applicable + // since RM reports physical lanes. + // + if (nvswitch_link_lane_reversed_lr10(device, linkId)) + { + NVSWITCH_REVERSE_BITMASK_32(NVSWITCH_NUM_LANES_LR10, + device->link[linkId].lane_rxdet_status_mask); + } + + return NVL_SUCCESS; + } + + if (FLD_TEST_DRF(_NVLSTAT, _LNK2, _RXDET_LINK_STATUS, _TIMEOUT, statData)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: RXDET LINK_STATUS = TIMEOUT on link: %d\n", + __FUNCTION__, linkId); + + // Retrieve which lanes were found + device->link[linkId].lane_rxdet_status_mask = + DRF_VAL(_NVLSTAT, _LNK2, _RXDET_LANE_STATUS, statData); + + // + // MINION doesn't have knowledge of lane reversal and therefore + // reports logical lanes. We must reverse the bitmask here if applicable + // since RM reports physical lanes. + // + if (nvswitch_link_lane_reversed_lr10(device, linkId)) + { + NVSWITCH_REVERSE_BITMASK_32(NVSWITCH_NUM_LANES_LR10, + device->link[linkId].lane_rxdet_status_mask); + } + + return -NVL_ERR_INVALID_STATE; + } + + nvswitch_os_sleep(1); + } + while (keepPolling); + + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for RXDET STATUS on link: %d\n", + __FUNCTION__, linkId); + + return -NVL_ERR_INVALID_STATE; +} + +NvlStatus +nvswitch_minion_set_rx_term_lr10 +( + nvswitch_device *device, + NvU32 linkId +) +{ + if (nvswitch_minion_send_command(device, linkId, + NV_MINION_NVLINK_DL_CMD_COMMAND_INITRXTERM, 0) != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: INITRXTERM DL CMD failed for link %d.\n", + __FUNCTION__, linkId); + return -NVL_ERR_INVALID_STATE; + } + + return NVL_SUCCESS; +} + +NvU32 +nvswitch_minion_get_line_rate_Mbps_lr10 +( + nvswitch_device *device, + NvU32 linkId +) +{ + NvU32 statData; + NvlStatus status; + + status = nvswitch_minion_get_dl_status(device, linkId, NV_NVLSTAT_LNK3, 0, &statData); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to retrieve LINERATE from MINION DLSTAT for link %d.\n", + __FUNCTION__, linkId); + } + + return DRF_NUM(_NVLSTAT, _LNK3, _LINERATE, statData); +} + +NvU32 +nvswitch_minion_get_data_rate_KiBps_lr10 +( + nvswitch_device *device, + NvU32 linkId +) +{ + NvU32 statData; + NvlStatus status; + + status = nvswitch_minion_get_dl_status(device, linkId, NV_NVLSTAT_LNK5, 0, &statData); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to retrieve DATARATE from MINION DLSTAT for link %d.\n", + __FUNCTION__, linkId); + } + + return DRF_NUM(_NVLSTAT, _LNK5, _DATARATE, statData); +} + +NvlStatus +nvswitch_set_minion_initialized_lr10 +( + nvswitch_device *device, + NvU32 idx_minion, + NvBool initialized +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + + if (!NVSWITCH_ENG_VALID_LR10(device, MINION, idx_minion)) + { + return -NVL_BAD_ARGS; + } + + chip_device->engMINION[idx_minion].initialized = initialized; + return NVL_SUCCESS; +} + +NvBool +nvswitch_is_minion_initialized_lr10 +( + nvswitch_device *device, + NvU32 idx_minion +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + + if (!NVSWITCH_ENG_VALID_LR10(device, MINION, idx_minion)) + { + return NV_FALSE; + } + return (chip_device->engMINION[idx_minion].initialized != 0); +} + +NvlStatus +nvswitch_minion_clear_dl_error_counters_lr10 +( + nvswitch_device *device, + NvU32 linkId +) +{ + NvlStatus status; + + status = nvswitch_minion_send_command(device, linkId, + NV_MINION_NVLINK_DL_CMD_COMMAND_DLSTAT_CLR_DLERRCNT, 0); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s : Failed to clear error count to MINION for (%s):(%d).\n", + __FUNCTION__, device->name, linkId); + } + return status; +} + diff --git a/src/common/nvswitch/kernel/lr10/pmgr_lr10.c b/src/common/nvswitch/kernel/lr10/pmgr_lr10.c new file mode 100644 index 000000000..cbe0f733c --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/pmgr_lr10.c @@ -0,0 +1,335 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "export_nvswitch.h" +#include "common_nvswitch.h" +#include "error_nvswitch.h" +#include "rom_nvswitch.h" +#include "lr10/lr10.h" +#include "lr10/pmgr_lr10.h" +#include "nvswitch/lr10/dev_pmgr.h" + +void _nvswitch_i2c_set_port_pmgr(nvswitch_device *device, NvU32 port); + +/*! The number of nanoseconds we will wait for slave clock stretching. + * Previously, this was set to 100us, but proved too + * short (see bug 630691) so was increased to 2ms. + */ +#define I2C_STRETCHED_LOW_TIMEOUT_NS_LR10 2000000 + +NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE nvswitch_i2c_device_allow_list_lr10[] = +{ + +}; + +const NvU32 nvswitch_i2c_device_allow_list_size_lr10 = + NV_ARRAY_ELEMENTS(nvswitch_i2c_device_allow_list_lr10); + +// +// PMGR functions +// + +/*! + * @brief Return I2c port info used in PMGR implementation. + */ +NvU32 +nvswitch_i2c_get_port_info_lr10 +( + nvswitch_device *device, + NvU32 port +) +{ + PNVSWITCH_OBJI2C pI2c = device->pI2c; + + if (port >= NVSWITCH_MAX_I2C_PORTS) + { + return 0; + } + else + { + return pI2c->PortInfo[port]; + } +} + +// +// Pre-initialize the software & hardware state of the switch I2C & GPIO interface +// +void +nvswitch_init_pmgr_lr10 +( + nvswitch_device *device +) +{ + PNVSWITCH_OBJI2C pI2c; + + // Initialize I2C object + nvswitch_i2c_init(device); + + pI2c = device->pI2c; + + // + // Dynamically allocate the I2C device allowlist + // once VBIOS table reads are implemented. + // + pI2c->i2c_allow_list = nvswitch_i2c_device_allow_list_lr10; + pI2c->i2c_allow_list_size = nvswitch_i2c_device_allow_list_size_lr10; + + // Setup the 3 I2C ports + _nvswitch_i2c_set_port_pmgr(device, NVSWITCH_I2C_PORT_I2CA); + _nvswitch_i2c_set_port_pmgr(device, NVSWITCH_I2C_PORT_I2CB); + _nvswitch_i2c_set_port_pmgr(device, NVSWITCH_I2C_PORT_I2CC); + +} + +static const NVSWITCH_GPIO_INFO nvswitch_gpio_pin_Default[] = +{ + NVSWITCH_DESCRIBE_GPIO_PIN( 0, _INSTANCE_ID0, 0, IN), // Instance ID bit 0 + NVSWITCH_DESCRIBE_GPIO_PIN( 1, _INSTANCE_ID1, 0, IN), // Instance ID bit 1 + NVSWITCH_DESCRIBE_GPIO_PIN( 2, _INSTANCE_ID2, 0, IN), // Instance ID bit 2 + NVSWITCH_DESCRIBE_GPIO_PIN( 3, _INSTANCE_ID3, 0, IN), // Instance ID bit 3 + NVSWITCH_DESCRIBE_GPIO_PIN( 4, _INSTANCE_ID4, 0, IN), // Instance ID bit 4 + NVSWITCH_DESCRIBE_GPIO_PIN( 5, _INSTANCE_ID5, 0, IN), // Instance ID bit 5 + NVSWITCH_DESCRIBE_GPIO_PIN( 6, _INSTANCE_ID6, 0, IN), // Instance ID bit 6 +}; + +static const NvU32 nvswitch_gpio_pin_Default_size = NV_ARRAY_ELEMENTS(nvswitch_gpio_pin_Default); + +// +// Initialize the software state of the switch I2C & GPIO interface +// Temporarily forcing default GPIO values. +// + +// TODO: This function should be updated with the board values from DCB. + +void +nvswitch_init_pmgr_devices_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + PNVSWITCH_OBJI2C pI2c = device->pI2c; + + chip_device->gpio_pin = nvswitch_gpio_pin_Default; + chip_device->gpio_pin_size = nvswitch_gpio_pin_Default_size; + + pI2c->device_list = NULL; + pI2c->device_list_size = 0; +} + +/*! + * RM Control command to determine the physical id of the device. + */ +NvU32 +nvswitch_read_physical_id_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + NvU32 physical_id = 0; + NvU32 data; + NvU32 idx_gpio; + NvU32 input_inv; + NvU32 function_offset; + + for (idx_gpio = 0; idx_gpio < chip_device->gpio_pin_size; idx_gpio++) + { + if ((chip_device->gpio_pin[idx_gpio].function >= NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID0) && + (chip_device->gpio_pin[idx_gpio].function <= NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID6)) + { + if (chip_device->gpio_pin[idx_gpio].misc == NVSWITCH_GPIO_ENTRY_MISC_IO_INV_IN) + { + input_inv = NV_PMGR_GPIO_INPUT_CNTL_1_INV_YES; + } + else + { + input_inv = NV_PMGR_GPIO_INPUT_CNTL_1_INV_NO; + } + + NVSWITCH_REG_WR32(device, _PMGR, _GPIO_INPUT_CNTL_1, + DRF_NUM(_PMGR, _GPIO_INPUT_CNTL_1, _PINNUM, chip_device->gpio_pin[idx_gpio].pin) | + DRF_NUM(_PMGR, _GPIO_INPUT_CNTL_1, _INV, input_inv) | + DRF_DEF(_PMGR, _GPIO_INPUT_CNTL_1, _BYPASS_FILTER, _NO)); + + data = NVSWITCH_REG_RD32(device, _PMGR, _GPIO_INPUT_CNTL_1); + function_offset = chip_device->gpio_pin[idx_gpio].function - + NVSWITCH_GPIO_ENTRY_FUNCTION_INSTANCE_ID0; + physical_id |= + (DRF_VAL(_PMGR, _GPIO_INPUT_CNTL_1, _READ, data) << function_offset); + } + } + + NVSWITCH_PRINT(device, SETUP, "%s Device position Id = 0x%x\n", __FUNCTION__, physical_id); + + return physical_id; +} + +/*! + * RM Control command to perform indexed I2C. + */ +NvlStatus +nvswitch_ctrl_i2c_indexed_lr10 +( + nvswitch_device *device, + NVSWITCH_CTRL_I2C_INDEXED_PARAMS *pParams +) +{ + NvlStatus status = (-NVL_ERR_GENERIC); + return status; +} + +NvlStatus +nvswitch_get_rom_info_lr10 +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom +) +{ + if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device)) + { + NVSWITCH_PRINT(device, SETUP, + "ROM configuration not supported on Fmodel/RTL/emulation\n"); + return -NVL_ERR_NOT_SUPPORTED; + } + + return -NVL_ERR_NOT_SUPPORTED; +} + +/*! + * Set the speed of the HW I2C controller on a given port. + * + * @param[in] port The port identifying the controller. + * + * @param[in] speedMode The speed mode to run at. + */ +void +nvswitch_i2c_set_hw_speed_mode_lr10 +( + nvswitch_device *device, + NvU32 port, + NvU32 speedMode +) +{ + NvU32 timing = DRF_DEF(_PMGR, _I2C_TIMING, _IGNORE_ACK, _DISABLE) | + DRF_DEF(_PMGR, _I2C_TIMING, _TIMEOUT_CHECK, _ENABLE); + + switch (speedMode) + { + // Default should not be hit if above layers work correctly. + default: + NVSWITCH_PRINT(device, ERROR, + "%s: undefined speed\n", + __FUNCTION__); + // Deliberate fallthrough + case NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_100KHZ: + timing = FLD_SET_DRF(_PMGR, _I2C_TIMING, _SCL_PERIOD, _100KHZ, timing); + timing = FLD_SET_DRF_NUM(_PMGR, _I2C_TIMING, _TIMEOUT_CLK_CNT, NVSWITCH_I2C_SCL_CLK_TIMEOUT_100KHZ, timing); + break; + + case NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_200KHZ: + timing = FLD_SET_DRF(_PMGR, _I2C_TIMING, _SCL_PERIOD, _200KHZ, timing); + timing = FLD_SET_DRF_NUM(_PMGR, _I2C_TIMING, _TIMEOUT_CLK_CNT, NVSWITCH_I2C_SCL_CLK_TIMEOUT_200KHZ, timing); + break; + + case NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_300KHZ: + timing = FLD_SET_DRF(_PMGR, _I2C_TIMING, _SCL_PERIOD, _300KHZ, timing); + timing = FLD_SET_DRF_NUM(_PMGR, _I2C_TIMING, _TIMEOUT_CLK_CNT, NVSWITCH_I2C_SCL_CLK_TIMEOUT_300KHZ, timing); + break; + + case NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_400KHZ: + timing = FLD_SET_DRF(_PMGR, _I2C_TIMING, _SCL_PERIOD, _400KHZ, timing); + timing = FLD_SET_DRF_NUM(_PMGR, _I2C_TIMING, _TIMEOUT_CLK_CNT, NVSWITCH_I2C_SCL_CLK_TIMEOUT_400KHZ, timing); + break; + + case NVSWITCH_CTRL_I2C_FLAGS_SPEED_MODE_1000KHZ: + timing = FLD_SET_DRF(_PMGR, _I2C_TIMING, _SCL_PERIOD, _1000KHZ, timing); + timing = FLD_SET_DRF_NUM(_PMGR, _I2C_TIMING, _TIMEOUT_CLK_CNT, NVSWITCH_I2C_SCL_CLK_TIMEOUT_1000KHZ, timing); + break; + } + + NVSWITCH_REG_WR32(device, _PMGR, _I2C_TIMING(port), timing); +} + +/*! + * Return if I2C transactions are supported. + * + * @param[in] device The NvSwitch Device. + * + */ +NvBool +nvswitch_is_i2c_supported_lr10 +( + nvswitch_device *device +) +{ + return NV_TRUE; +} + +/*! + * Return if I2C device and port is allowed access + * + * @param[in] device The NvSwitch Device. + * @param[in] port The I2C Port. + * @param[in] addr The I2C device to access. + * @param[in] bIsRead Boolean if I2C transaction is a read. + * + */ +NvBool +nvswitch_i2c_is_device_access_allowed_lr10 +( + nvswitch_device *device, + NvU32 port, + NvU8 addr, + NvBool bIsRead +) +{ + NvU32 i; + NvU32 device_allow_list_size; + NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE *device_allow_list; + NvBool bAllow = NV_FALSE; + PNVSWITCH_OBJI2C pI2c = device->pI2c; + + device_allow_list = pI2c->i2c_allow_list; + device_allow_list_size = pI2c->i2c_allow_list_size; + + for (i = 0; i < device_allow_list_size; i++) + { + NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE i2c_device = device_allow_list[i]; + + if ((port == i2c_device.i2cPortLogical) && + (addr == i2c_device.i2cAddress)) + { + bAllow = bIsRead ? + FLD_TEST_DRF(_NVSWITCH, _I2C_DEVICE, _READ_ACCESS_LEVEL, + _PUBLIC, i2c_device.i2cRdWrAccessMask) : + FLD_TEST_DRF(_NVSWITCH, _I2C_DEVICE, _WRITE_ACCESS_LEVEL, + _PUBLIC, i2c_device.i2cRdWrAccessMask); + break; + } + } + + return bAllow; +} + diff --git a/src/common/nvswitch/kernel/lr10/smbpbi_lr10.c b/src/common/nvswitch/kernel/lr10/smbpbi_lr10.c new file mode 100644 index 000000000..560b633e8 --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/smbpbi_lr10.c @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "lr10/lr10.h" +#include "lr10/smbpbi_lr10.h" +#include "nvswitch/lr10/dev_nvlsaw_ip.h" +#include "nvswitch/lr10/dev_nvlsaw_ip_addendum.h" + + +NvlStatus +nvswitch_smbpbi_get_dem_num_messages_lr10 +( + nvswitch_device *device, + NvU8 *pMsgCount +) +{ + NvU32 reg = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_SW, _SCRATCH_12); + + *pMsgCount = DRF_VAL(_NVLSAW_SW, _SCRATCH_12, _EVENT_MESSAGE_COUNT, reg); + + return NVL_SUCCESS; +} diff --git a/src/common/nvswitch/kernel/lr10/soe_lr10.c b/src/common/nvswitch/kernel/lr10/soe_lr10.c new file mode 100644 index 000000000..9d7995011 --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/soe_lr10.c @@ -0,0 +1,2204 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "soe/soe_nvswitch.h" +#include "soe/soe_priv_nvswitch.h" +#include "soe/soebif.h" +#include "rmlsfm.h" + +#include "nvlink_export.h" +#include "common_nvswitch.h" +#include "lr10/lr10.h" +#include "lr10/soe_lr10.h" +#include "soe/bin/g_soeuc_lr10_dbg.h" +#include "soe/bin/g_soeuc_lr10_prd.h" +#include "soe/soeifcmn.h" +#include "nvswitch/lr10/dev_soe_ip.h" +#include "nvswitch/lr10/dev_soe_ip_addendum.h" +#include "nvswitch/lr10/dev_falcon_v4.h" +#include "nvswitch/lr10/dev_nvlsaw_ip.h" +#include "nvswitch/lr10/dev_therm.h" +#include "regkey_nvswitch.h" + +#include "flcn/flcnable_nvswitch.h" +#include "flcn/flcn_nvswitch.h" + +#include "rmflcncmdif_nvswitch.h" + +/* + * @Brief : Selects SOE core (Falcon or Riscv) + * + * @param[in] device Bootstrap SOE on this device + * + * Does nothing on LR10 + */ +NvlStatus +nvswitch_soe_set_ucode_core_lr10 +( + nvswitch_device *device, + NvBool bFalcon +) +{ + return NVL_SUCCESS; +} + +/* + * @Brief : Reset SOE at the engine level. + * + * @param[in] device Reset SOE on this device + */ +static NvlStatus +_nvswitch_reset_soe +( + nvswitch_device *device +) +{ + NvU32 value; + NvlStatus status; + + // Assert reset + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _ENGINE); + value = FLD_SET_DRF(_SOE, _FALCON, _ENGINE_RESET, _TRUE, value); + NVSWITCH_SOE_WR32_LR10(device, 0, _SOE_FALCON, _ENGINE, value); + + // + // TODO: Track down correct delay, if any. + // Currently GPU does not enforce a delay, use 1ms for now. + // + nvswitch_os_sleep(1); + + // Unassert reset + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _ENGINE); + value = FLD_SET_DRF(_SOE, _FALCON, _ENGINE_RESET, _FALSE, value); + NVSWITCH_SOE_WR32_LR10(device, 0, _SOE_FALCON, _ENGINE, value); + + // Set SOE ucode core to falcon + status = nvswitch_soe_set_ucode_core(device, NV_TRUE); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to set SOE core\n"); + return status; + } + + // Wait for reset to complete + if (flcnWaitForResetToFinish_HAL(device, device->pSoe->pFlcn) != NV_OK) + { + // TODO: Fix up NV_STATUS translation, anything but NV_OK is a failure. + return NVL_INITIALIZATION_TOTAL_FAILURE; + } + + return NVL_SUCCESS; +} + +/* + * @Brief : Copy the soe ucode to IMEM and DMEM and write soe ucode entrypoint + * to boot vector register. + * + * @param[in] device Copy ucode to this device's SOE + */ +static NvlStatus +_nvswitch_soe_copy_ucode_cpubitbang +( + nvswitch_device *device, + const NvU32 *soe_ucode_data, + const NvU32 *soe_ucode_header +) +{ + const PSOE_UCODE_HDR_INFO_LR10 pUcodeHeader = + (PSOE_UCODE_HDR_INFO_LR10) &(soe_ucode_header[0]); + + NvU32 dataSize, data, i, appCount; + NvU32 appCodeStartOffset, appCodeSize, appCodeImemOffset; + NvU32 appDataStartOffset, appDataSize, appDataDmemOffset; + NvU32 appCodeIsSecure; + NvU16 tag; + FLCN *pFlcn = device->pSoe->pFlcn; + + dataSize = sizeof(soe_ucode_data[0]); + + // Initialize address of IMEM to 0x0 and set auto-increment on write + data = 0; + data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_IMEMC, _OFFS, 0x0, data); + data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_IMEMC, _BLK, 0x0, data); + data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_IMEMC, _AINCW, 0x1, data); + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_IMEMC(0), data); + + for (appCount = 0; appCount < pUcodeHeader -> numApps; appCount++) + { + appCodeStartOffset = pUcodeHeader -> apps[appCount].appCodeStartOffset; + appCodeSize = pUcodeHeader -> apps[appCount].appCodeSize; + appCodeImemOffset = pUcodeHeader -> apps[appCount].appCodeImemOffset; + appCodeIsSecure = pUcodeHeader -> apps[appCount].appCodeIsSecure; + appDataStartOffset = pUcodeHeader -> apps[appCount].appDataStartOffset; + appDataSize = pUcodeHeader -> apps[appCount].appDataSize; + appDataDmemOffset = pUcodeHeader -> apps[appCount].appDataDmemOffset; + + if(appCodeSize > 0) + { + // Mark the following code as secure or unsecure + data = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_IMEMC(0)); + data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_IMEMC, _SECURE, appCodeIsSecure, data); + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_IMEMC(0), data); + // Initialize IMEM tag. + // Writes to IMEM don't work if we don't do this + tag = (NvU16)(appCodeImemOffset >> 8); + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_IMEMT(0), (NvU32) tag); + + // Copy over IMEM part of the ucode and tag along the way + for (i = 0; i < (appCodeSize / dataSize); i++) + { + // Increment tag for after every block (256 bytes) + if (i && ((i % ((256/dataSize))) == 0)) + { + tag++; + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_IMEMT(0), (NvU32) tag); + } + + // Copy IMEM DWORD by DWORD + data = soe_ucode_data[(appCodeStartOffset / dataSize) + i]; + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_IMEMD(0), data); + } + } + + if(appDataSize > 0) + { + // Initialize address of DMEM to appDataDmemOffset and set auto-increment on write + data = 0; + data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_DMEMC, _OFFS, (appDataDmemOffset&0xFF), data); + data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_DMEMC, _BLK, appDataDmemOffset>>8, data); + data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_DMEMC, _AINCW, 0x1, data); + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_DMEMC(0), data); + + // Copy over DMEM part of the ucode + for (i = 0; i < (appDataSize / dataSize); i++) + { + // Copy DMEM DWORD by DWORD + data = soe_ucode_data[appDataStartOffset/dataSize + i]; + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_DMEMD(0), data); + } + } + } + + // + // In this ucode load path, we bit bang, we do not use DMA, + // so set REQUIRE_CTX to FALSE. This must be set before we start SOE. + // + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_DMACTL, + DRF_NUM(_PFALCON_FALCON, _DMACTL, _REQUIRE_CTX, NV_FALSE)); + + // Write soe ucode entrypoint to boot vector register + data = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_BOOTVEC); + data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_BOOTVEC, _VEC, pUcodeHeader->codeEntryPoint, data); + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_BOOTVEC, data); + + return NVL_SUCCESS; +} + +/* + * @Brief : Send a command to pFlcn for testing (this function is temporary) + * + * @param device The nvswitch device + * @param pFlcn The flcn + */ +static NV_STATUS +_nvswitch_soe_send_test_cmd +( + nvswitch_device *device +) +{ + RM_FLCN_CMD_SOE cmd; + NVSWITCH_TIMEOUT timeout; + NvU32 cmdSeqDesc; + NV_STATUS status; + + FLCN *pFlcn = device->pSoe->pFlcn; + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + + cmd.hdr.unitId = RM_SOE_UNIT_NULL; + // sending nothing but a header for UNIT_NULL + cmd.hdr.size = RM_FLCN_QUEUE_HDR_SIZE; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 5, &timeout); + status = flcnQueueCmdPostBlocking(device, pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg - not used for now + NULL, // pPayload - not used for now + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + if (status != NV_OK) + { + NVSWITCH_ASSERT(status == NV_OK); + return status; + } + + return status; +} + +NvlStatus +nvswitch_get_soe_ucode_binaries_lr10 +( + nvswitch_device *device, + const NvU32 **soe_ucode_data, + const NvU32 **soe_ucode_header +) +{ + NvU32 debug_mode; + + if (!soe_ucode_data || !soe_ucode_header) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SOE get ucode binaries BadArgs!\n", + __FUNCTION__); + return -NVL_BAD_ARGS; + } + + debug_mode = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_SCP, _CTL_STAT); + debug_mode = DRF_VAL(_SOE, _SCP_CTL_STAT, _DEBUG_MODE, debug_mode); + + if (debug_mode) + { + *soe_ucode_data = soe_ucode_data_lr10_dbg; + *soe_ucode_header = soe_ucode_header_lr10_dbg; + } + else + { + *soe_ucode_data = soe_ucode_data_lr10_prd; + *soe_ucode_header = soe_ucode_header_lr10_prd; + } + + return NVL_SUCCESS; +} + +/* + * @Brief : Load soe ucode image into SOE Falcon + * + * @param device The nvswitch device + */ +static NvlStatus +_nvswitch_load_soe_ucode_image +( + nvswitch_device *device +) +{ + NvlStatus status; + const NvU32 *soe_ucode_data; + const NvU32 *soe_ucode_header; + + status = nvswitch_get_soe_ucode_binaries(device, &soe_ucode_data, &soe_ucode_header); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to get SOE ucode binaries!\n", + __FUNCTION__); + return status; + } + + status = _nvswitch_soe_copy_ucode_cpubitbang(device, soe_ucode_data, + soe_ucode_header); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to copy SOE ucode!\n", + __FUNCTION__); + return status; + } + + return status; +} + +/* + * @Brief : Bootstrap SOE + * + * @param[in] device Bootstrap SOE on this device + */ +static NvlStatus +_nvswitch_soe_bootstrap +( + nvswitch_device *device +) +{ + NVSWITCH_TIMEOUT timeout; + NvU32 data; + FLCN *pFlcn; + + // POR requires SOE, no SOE, total failure. + if (!NVSWITCH_ENG_IS_VALID(device, SOE, 0)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SOE is not present, failing driver load.\n", + __FUNCTION__); + return -NVL_INITIALIZATION_TOTAL_FAILURE; + } + + pFlcn = device->pSoe->pFlcn; + + // + // Start the SOE Falcon + // + data = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_CPUCTL); + data = FLD_SET_DRF(_PFALCON, _FALCON_CPUCTL, _STARTCPU, _TRUE, data); + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_CPUCTL, data); + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 20, &timeout); + + // + // We will exit this if we recieve bootstrap signal OR + // if we timeout waiting for bootstrap signal OR + // if bootstrap fails + // + while (1) + { + data = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_MAILBOX1); + if (data == SOE_BOOTSTRAP_SUCCESS) + { + pFlcn->engDescUc.initialized = NV_TRUE; + return NVL_SUCCESS; + } + + // + // Check if SOE has halted unexpectedly. + // + // The explicit check is required because the interrupts + // are not yet enabled as the device is still initializing. + // + if (soeIsCpuHalted_HAL(device, ((PSOE)pFlcn->pFlcnable))) + { + break; + } + + if (nvswitch_timeout_check(&timeout)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for SOE to complete bootstrap!" + "NV_PFALCON_FALCON_MAILBOX1 = 0x%08x\n", + __FUNCTION__, data); + NVSWITCH_ASSERT(0); + break; + } + } + + NVSWITCH_PRINT(device, SETUP, + "%s: Failed to bootstrap SOE.\n", + __FUNCTION__); + + // Log any failures SOE may have had during bootstrap + (void)soeService_HAL(device, ((PSOE)pFlcn->pFlcnable)); + return -NVL_ERR_INVALID_STATE; +} + +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) +/*! + * Helper function to dump some registers for debug. + * + * @param[in] device nvswitch_device pointer + */ +static void +dumpDebugRegisters +( + nvswitch_device *device +) +{ + NvU32 value; + + // Mail boxes and CPU control + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _MAILBOX0); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_MAILBOX0: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _MAILBOX1); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_MAILBOX1: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _CPUCTL); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_CPUCTL: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _DEBUGINFO); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_DEBUGINFO: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _EXCI); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_EXCI: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _SCTL); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_SCTL: 0x%x\n", + __FUNCTION__, + value); + + // Legacy steering and interrupts + value = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _STEER_INTR_LEGACY); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_NVLSAW_NVSPMC_STEER_INTR_LEGACY: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _INTR_SOE_EN_LEGACY); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_NVLSAW_NVSPMC_INTR_SOE_EN_LEGACY: 0x%x\n", + __FUNCTION__, + value); + + // Correctable steering and interrupts + value = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _STEER_INTR_CORRECTABLE); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_NVLSAW_NVSPMC_STEER_INTR_CORRECTABLE: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _INTR_SOE_EN_CORRECTABLE); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_NVLSAW_NVSPMC_INTR_SOE_EN_CORRECTABLE: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SAW_RD32_LR10(device, _NVLSAW_NVSPMC, _INTR_SOE_LEGACY); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_NVLSAW_NVSPMC_INTR_SOE_LEGACY: 0x%x\n", + __FUNCTION__, + value); + + // EXTIO interrupts + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_MISC, _EXTIO_IRQSTAT); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_MISC_EXTIO_IRQSTAT: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_MISC, _EXTIO_IRQMASK); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_MISC_EXTIO_IRQMASK: 0x%x\n", + __FUNCTION__, + value); + + + // Falcon top level interrupts + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _IRQSTAT); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_IRQSTAT: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _IRQSTAT_ALIAS); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_IRQSTAT_ALIAS: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _IRQMODE); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_IRQMODE: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _IRQMASK); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_IRQMASK: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _IRQDEST); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_IRQDEST: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _IRQDEST2); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_IRQDEST2: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _IRQSCMASK); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_IRQSCMASK: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_REG_RD32(device, _THERM, _MSGBOX_COMMAND); + NVSWITCH_PRINT(device, ERROR, + "%s: MSGBOX_COMMAND: 0x%x\n", + __FUNCTION__, + value); + + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _RESET_PRIV_LEVEL_MASK); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK: 0x%x\n", + __FUNCTION__, + value); + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _IRQTMR_PRIV_LEVEL_MASK); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_IRQTMR_PRIV_LEVEL_MASK: 0x%x\n", + __FUNCTION__, + value); + value = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _EXE_PRIV_LEVEL_MASK); + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_EXE_PRIV_LEVEL_MASK: 0x%x\n", + __FUNCTION__, + value); +} +#endif // defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + +/* + * @Brief : Request SOE GFW image to exit and halt + * + * i) Query for SOE firmware validation state. + * ii) Request for SOE to exit and halt. + * iii) Wait fot IFR to complete and exit by halting SOE. + */ +static NvlStatus +_nvswitch_soe_request_gfw_image_halt +( + nvswitch_device *device +) +{ + NvU32 val; + NVSWITCH_TIMEOUT timeout; + FLCN* pFlcn = device->pSoe->pFlcn; + + // + // Poll for firmware boot state. + // GFW takes around 150ms to finish it's sequence. + // + nvswitch_timeout_create(1000 * NV_GFW_SOE_EXIT_AND_HALT_TIMEOUT, &timeout); + do + { + val = NVSWITCH_REG_RD32(device, _GFW, _SOE_BOOT); + if (FLD_TEST_DRF(_GFW, _SOE_BOOT, _PROGRESS, _COMPLETED, val) && + !FLD_TEST_DRF(_GFW, _SOE_BOOT, _VALIDATION_STATUS, _IN_PROGRESS, val)) + { + break; + } + + if (nvswitch_timeout_check(&timeout)) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_TIMEOUT, + "SOE reset timeout error(0)\n"); + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for SOE GFW boot to complete. rc = 0x%x.\n", + __FUNCTION__, val); + return -NVL_ERR_INVALID_STATE; + } + + nvswitch_os_sleep(5); + } while (NV_TRUE); + + // Check for firmware validation status. + if (!FLD_TEST_DRF(_GFW, _SOE_BOOT, _VALIDATION_STATUS, + _PASS_TRUSTED, val)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SOE Firmware validation failed. rc = 0x%x\n", + __FUNCTION__, val); + return -NVL_ERR_INVALID_STATE; + } + + // Request SOE GFW image to exit and halt. + val = NVSWITCH_REG_RD32(device, _GFW, _SOE_EXIT_AND_HALT); + val = FLD_SET_DRF(_GFW, _SOE_EXIT_AND_HALT, _REQUESTED, _YES, val); + NVSWITCH_REG_WR32(device, _GFW, _SOE_EXIT_AND_HALT, val); + + // + // Wait for SOE to halt. + // + do + { + val = flcnRegRead_HAL(device, pFlcn, NV_SOE_FALCON_CPUCTL); + if (FLD_TEST_DRF(_SOE, _FALCON_CPUCTL, _HALTED, _TRUE, val)) + { + NVSWITCH_PRINT(device, INFO, + "%s: Handshake with SOE GFW successful.\n", + __FUNCTION__); + break; + } + + if (nvswitch_timeout_check(&timeout)) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_TIMEOUT, + "SOE reset timeout error(1)\n"); + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for SOE GFW image to exit and halt.\n", + __FUNCTION__); + return -NVL_ERR_INVALID_STATE; + } + + nvswitch_os_sleep(5); + } while (NV_TRUE); + + return NVL_SUCCESS; +} + +void +nvswitch_soe_unregister_events_lr10 +( + nvswitch_device *device +) +{ + PFLCN pFlcn = device->pSoe->pFlcn; + PSOE pSoe = (PSOE)device->pSoe; + NV_STATUS status; + + // un-register thermal callback funcion + status = flcnQueueEventUnregister(device, pFlcn, + pSoe->thermEvtDesc); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to un-register thermal event handler.\n", + __FUNCTION__); + } +} + +/* + * @Brief : Register callback functions for events + * and messages from SOE. + */ +NvlStatus +nvswitch_soe_register_event_callbacks_lr10 +( + nvswitch_device *device +) +{ + PFLCN pFlcn = device->pSoe->pFlcn; + PSOE pSoe = (PSOE)device->pSoe; + NV_STATUS status; + + // Register Thermal callback funcion + status = flcnQueueEventRegister( + device, pFlcn, + RM_SOE_UNIT_THERM, + NULL, + nvswitch_therm_soe_callback_lr10, + NULL, + &pSoe->thermEvtDesc); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to register thermal event handler.\n", + __FUNCTION__); + return -NVL_ERR_INVALID_STATE; + } + + return NVL_SUCCESS; +} + +/* + * @Brief : Request SOE old driver image to provide L0 write permissions + * for reset registers to perform reset and boot up the new image. + */ +static NvlStatus +_nvswitch_soe_request_reset_permissions +( + nvswitch_device *device +) +{ + NVSWITCH_TIMEOUT timeout; + NvU32 reset_plm, engctl_plm; + + // Request reset access. + NVSWITCH_REG_WR32(device, _SOE, _RESET_SEQUENCE, + DRF_DEF(_SOE, _RESET_SEQUENCE, _REQUESTED, _YES)); + + // Poll on reset PLMs. + nvswitch_timeout_create(20 * NVSWITCH_INTERVAL_1MSEC_IN_NS, &timeout); + do + { + // Verify if SOE has given L0 write access for reset registers. + reset_plm = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _RESET_PRIV_LEVEL_MASK); + engctl_plm = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE_FALCON, _ENGCTL_PRIV_LEVEL_MASK); + + if (FLD_TEST_DRF(_SOE_FALCON, _RESET_PRIV_LEVEL_MASK, _WRITE_PROTECTION_LEVEL0, _ENABLE, reset_plm) && + FLD_TEST_DRF(_SOE_FALCON, _ENGCTL_PRIV_LEVEL_MASK, _WRITE_PROTECTION_LEVEL0, _ENABLE, engctl_plm)) + { + NVSWITCH_PRINT(device, INFO, + "%s: Got write access for reset registers from SOE.\n", + __FUNCTION__); + break; + } + + if (nvswitch_timeout_check(&timeout)) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_TIMEOUT, + "SOE reset timeout error(2)\n"); + NVSWITCH_PRINT(device, ERROR, + "%s: Timeout waiting for SOE to provide write access for reset registers.\n", + __FUNCTION__); + + NVSWITCH_PRINT(device, ERROR, + "%s: NV_SOE_FALCON_RESET_PRIV_LEVEL_MASK = 0x%x, NV_SOE_FALCON_ENGCTL_PRIV_LEVEL_MASK = 0x%x.\n", + __FUNCTION__, reset_plm, engctl_plm); + + return -NVL_ERR_INVALID_STATE; + } + + nvswitch_os_sleep(1); + } while (NV_TRUE); + + return NVL_SUCCESS; +} + +/* + * @Brief : Execute SOE pre-reset sequence for secure reset. + */ +NvlStatus +nvswitch_soe_prepare_for_reset_lr10 +( + nvswitch_device *device +) +{ + NvlStatus status; + NvU32 val; + + if (IS_FMODEL(device) || IS_RTLSIM(device) || IS_EMULATION(device)) + { + NVSWITCH_PRINT(device, SETUP, + "%s: Skipping SOE pre-reset sequence on pre-silicon.\n", + __FUNCTION__); + return NVL_SUCCESS; + } + + val = NVSWITCH_REG_RD32(device, _GFW, _SOE_PROGRESS_CODE); + if (!FLD_TEST_DRF(_GFW, _SOE_PROGRESS_CODE, _VALUE, _COMPLETED, val)) + { + // Request SOE GFW to exit and halt. + status = _nvswitch_soe_request_gfw_image_halt(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: GFW shutdown request failed!\n", + __FUNCTION__); + } + } + else + { + // + // The SOE image from previous driver load needs to be reset. + // Request reset permissions from that SOE image to perform the reset. + // + status = _nvswitch_soe_request_reset_permissions(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "%s: SOE reset request failed!\n", + __FUNCTION__); + } + } + + return status; +} + +/* + * @Brief : Bootstrap SOE on the specified device + * + * @param[in] device Bootstrap SOE on this device + */ +NvlStatus +nvswitch_init_soe_lr10 +( + nvswitch_device *device +) +{ + NvlStatus status; + + // Prepare SOE for reset. + status = nvswitch_soe_prepare_for_reset(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_RESET, + "Failed to reset SOE(0)\n"); + return status; + } + + // Reset SOE + status = _nvswitch_reset_soe(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_RESET, + "Failed to reset SOE(1)\n"); + return status; + } + + // Load SOE + status = _nvswitch_load_soe_ucode_image(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_BOOTSTRAP, + "Failed to boot SOE(0)\n"); + return status; + } + + // Start SOE + status = _nvswitch_soe_bootstrap(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_BOOTSTRAP, + "Failed to boot SOE(1)\n"); + return status; + } + + // Sanity the command and message queues as a final check + if (_nvswitch_soe_send_test_cmd(device) != NV_OK) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_BOOTSTRAP, + "Failed to boot SOE(2)\n"); + +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + dumpDebugRegisters(device); +#endif // defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + + return -NVL_ERR_INVALID_STATE; + } + + // Register SOE callbacks + status = nvswitch_soe_register_event_callbacks(device); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_COMMAND_QUEUE, + "Failed to register SOE events\n"); + return status; + } + + NVSWITCH_PRINT(device, SETUP, + "%s: SOE successfully bootstrapped.\n", + __FUNCTION__); + + return status; +} + +/** + * @brief SOE construct + * + * @param[in] device nvswitch_device pointer + * @param[in] pFlcnable SOE pointer + * + * @return NV_OK + */ +static NV_STATUS +_soeConstruct_LR10 +( + nvswitch_device *device, + FLCNABLE *pFlcnable +) +{ + SOE *pSoe = (PSOE)pFlcnable; + FLCN *pFlcn = ENG_GET_FLCN(pFlcnable); + PFALCON_QUEUE_INFO pQueueInfo; + NV_STATUS status; + + NVSWITCH_ASSERT(pFlcn != NULL); + + // + // Set SOE specific Falcon state + // This is where any default Falcon state should be overridden if necessary. + // + pFlcn->name = "SOE"; + pFlcn->pFlcnable = pFlcnable; + pFlcn->bQueuesEnabled = NV_TRUE; + pFlcn->numQueues = SOE_QUEUE_NUM; + pFlcn->numSequences = RM_SOE_MAX_NUM_SEQUENCES; + pFlcn->bEmemEnabled = NV_TRUE; + pFlcn->engineTag = ENG_TAG_SOE; + + nvswitch_os_memset(pSoe->seqInfo, 0, sizeof(pSoe->seqInfo)); + + // Do the HAL dependent init for Falcon + status = flcnConstruct_HAL(device, pFlcn); + + pQueueInfo = pFlcn->pQueueInfo; + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueueInfo->pQueues != NULL); + + // + // Fill in the Message Queue handling details + // + pQueueInfo->maxUnitId = RM_SOE_UNIT_END; + pQueueInfo->maxMsgSize = sizeof(RM_FLCN_MSG_SOE); + pQueueInfo->initEventUnitId = RM_SOE_UNIT_INIT; + + return status; +} + +/** + * @brief SOE destruct + * + * @param[in] device nvswitch_device pointer + * @param[in] pFlcnable SOE pointer + */ +static void +_soeDestruct_LR10 +( + nvswitch_device *device, + FLCNABLE *pFlcnable +) +{ + flcnDestruct_HAL(device, ENG_GET_FLCN(pFlcnable)); +} + + /*! + * @brief Sets up the external configuration for accessing registers,etc. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe FLCNABLE pointer + * @param[in] pConfig FALCON_EXTERNAL_CONFIG pointer + * + * @returns void. + */ +static void +_soeGetExternalConfig_LR10 +( + nvswitch_device *device, + FLCNABLE *pSoe, + PFALCON_EXTERNAL_CONFIG pConfig +) +{ + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + PFALCON_QUEUE_INFO pQueueInfo; + + NVSWITCH_ASSERT(pFlcn != NULL); + + pConfig->bResetInPmc = NV_TRUE; + pConfig->fbifBase = NV_SOE_FBIF_TRANSCFG(0); + + pQueueInfo = pFlcn->pQueueInfo; + NVSWITCH_ASSERT(pQueueInfo != NULL); + + // Populate the falcon queue details + pQueueInfo->cmdQHeadSize = NV_SOE_QUEUE_HEAD__SIZE_1; + pQueueInfo->cmdQTailSize = NV_SOE_QUEUE_TAIL__SIZE_1; + pQueueInfo->msgQHeadSize = NV_SOE_MSGQ_HEAD__SIZE_1; + pQueueInfo->msgQTailSize = NV_SOE_MSGQ_TAIL__SIZE_1; + + pQueueInfo->cmdQHeadBaseAddress = NV_SOE_QUEUE_HEAD(0); + pQueueInfo->cmdQHeadStride = NV_SOE_QUEUE_HEAD(1) - NV_SOE_QUEUE_HEAD(0); + pQueueInfo->cmdQTailBaseAddress = NV_SOE_QUEUE_TAIL(0); + pQueueInfo->cmdQTailStride = NV_SOE_QUEUE_TAIL(1) - NV_SOE_QUEUE_TAIL(0); + pQueueInfo->msgQHeadBaseAddress = NV_SOE_MSGQ_HEAD(0); + pQueueInfo->msgQTailBaseAddress = NV_SOE_MSGQ_TAIL(0); + + pQueueInfo->maxCmdQueueIndex = SOE_RM_CMDQ_LOG_ID__LAST; +} + +/*! + * @brief Top level service routine + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + * + * @return 32-bit interrupt status AFTER all known interrupt-sources were + * serviced. + */ +static NvU32 +_soeService_LR10 +( + nvswitch_device *device, + PSOE pSoe +) +{ + NvBool bRecheckMsgQ = NV_FALSE; + NvU32 clearBits = 0; + NvU32 intrStatus; + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + + if (pFlcn == NULL) + { + NVSWITCH_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + } + + // Get the IRQ status and mask the sources not directed to host. + intrStatus = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_IRQSTAT) & + flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_IRQMASK) & + flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_IRQDEST); + + // Exit if there is nothing to do + if (intrStatus == 0) + { + return 0; + } + + // Service pending interrupts + if (intrStatus & DRF_DEF(_PFALCON, _FALCON_IRQSTAT, _WDTMR, _TRUE)) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_WATCHDOG, + "SOE Watchdog error\n"); + NVSWITCH_PRINT(device, INFO, + "%s: Watchdog timer fired. We do not support this " + "yet.\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + + clearBits |= DRF_DEF(_PFALCON, _FALCON_IRQSCLR, _WDTMR, _SET); + } + + if (intrStatus & DRF_DEF(_PFALCON, _FALCON_IRQSTAT, _EXTERR, _TRUE)) + { + clearBits |= DRF_DEF(_PFALCON, _FALCON_IRQSCLR, _EXTERR, _SET); + + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_EXTERR, "SOE EXTERR\n"); + soeServiceExterr_HAL(device, pSoe); + } + + if (intrStatus & DRF_DEF(_PFALCON, _FALCON_IRQSTAT, _HALT, _TRUE)) + { + clearBits |= DRF_DEF(_PFALCON, _FALCON_IRQSCLR, _HALT, _SET); + + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_SOE_HALT, "SOE HALTED\n"); + soeServiceHalt_HAL(device, pSoe); + } + + if (intrStatus & DRF_DEF(_PFALCON, _FALCON_IRQSTAT, _SWGEN0, _TRUE)) + { + clearBits |= DRF_DEF(_PFALCON, _FALCON_IRQSCLR, _SWGEN0, _SET); + + NVSWITCH_PRINT(device, INFO, + "%s: Received a message from SOE via SWGEN0\n", + __FUNCTION__); + soeProcessMessages(device, pSoe); + bRecheckMsgQ = NV_TRUE; + } + + if (intrStatus & DRF_DEF(_PFALCON, _FALCON_IRQSTAT, _SWGEN1, _TRUE)) + { + clearBits |= DRF_DEF(_PFALCON, _FALCON_IRQSCLR, _SWGEN1, _SET); + + NVSWITCH_PRINT(device, INFO, + "%s: Received a SWGEN1 interrupt\n", + __FUNCTION__); + } + + // Clear any sources that were serviced and get the new status. + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_IRQSCLR, clearBits); + + // Re-read interrupt status before retriggering to return correct value + intrStatus = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_IRQSTAT) & + flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_IRQMASK) & + flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_IRQDEST); + + // + // If we just processed a SWGEN0 message queue interrupt, peek + // into the message queue and see if any messages were missed the last time + // the queue was purged (above). If it is not empty, re-generate SWGEN0 + // (since it is now cleared) and exit. As long as an interrupt is pending, + // this function will be re-entered and the message(s) will be processed. + // + if (bRecheckMsgQ) + { + PFALCON_QUEUE_INFO pQueueInfo; + FLCNQUEUE *pMsgQ; + + pQueueInfo = pFlcn->pQueueInfo; + + NVSWITCH_ASSERT(pQueueInfo != NULL); + NVSWITCH_ASSERT(pQueueInfo->pQueues != NULL); + + pMsgQ = &pQueueInfo->pQueues[SOE_RM_MSGQ_LOG_ID]; + + if (!pMsgQ->isEmpty(device, pFlcn, pMsgQ)) + { + // It is not necessary to RMW IRQSSET (zeros are ignored) + flcnRegWrite_HAL(device, pFlcn, NV_PFALCON_FALCON_IRQSSET, + DRF_DEF(_PFALCON, _FALCON_IRQSSET, _SWGEN0, _SET)); + } + } + + flcnIntrRetrigger_HAL(device, pFlcn); + + return intrStatus; +} + +/*! + * Called by soeService to handle a SOE halt. This function will dump the + * current status of SOE and then trap the CPU for further inspection for a + * debug build. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE object pointer + */ +static void +_soeServiceHalt_LR10 +( + nvswitch_device *device, + PSOE pSoe +) +{ + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + NvU32 value; + + NVSWITCH_PRINT(device, ERROR, + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + "!! ** SOE HALTED ** !!\n" + "!! Please file a bug with the following information. !!\n" + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n"); + + // TODO: Break out the register dumps to specific hals +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + dumpDebugRegisters(device); + flcnDbgInfoCapturePcTrace_HAL(device, pFlcn); +#endif // defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + + // + // If the halt was related to security, we store the information in + // MAILBOX0. Print out an error that clearly indicates the reason for the + // halt. + // + value = flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_MAILBOX0); + + if (value == LSF_FALCON_MODE_TOKEN_FLCN_INSECURE) + { + NVSWITCH_PRINT(device, ERROR, + "SOE HAS HALTED BECAUSE IT IS NOT RUNNING IN " + "SECURE MODE\n"); + } + + NVSWITCH_ASSERT(0); +} + +/*! + * Depending on the direction of the copy, copies 'sizeBytes' to/from 'pBuf' + * from/to DMEM offset dmemAddr. Note the below statement about dmemAddr. + * The address must be located in the EMEM region located directly above the + * maximum virtual address of DMEM. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + * @param[in] dmemAddr The DMEM address for the copy + * @param[out] pBuf pPointer to the buffer containing the data to copy + * @param[in] sizeBytes The number of bytes to copy from EMEM + * @param[in] port EMEM port + * @param[in] bCopyFrom Boolean representing the copy direction (to/from EMEM) + */ +static void +_soeEmemTransfer_LR10 +( + nvswitch_device *device, + PSOE pSoe, + NvU32 dmemAddr, + NvU8 *pBuf, + NvU32 sizeBytes, + NvU8 port, + NvBool bCopyFrom +) +{ + NvU32 numWords; + NvU32 numBytes; + NvU32 *pData = (NvU32 *)pBuf; + NvU32 startEmem; + NvU32 endEmem; + NvU32 reg32; + NvU32 i; + NvU32 ememCOffset; + NvU32 ememDOffset; + NvU32 maxEmemPorts = soeEmemPortSizeGet_HAL(device, pSoe); + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + NV_STATUS status; + + if (pFlcn == NULL) + { + NVSWITCH_ASSERT(pFlcn != NULL); + return; + } + + status = soeEmemPortToRegAddr_HAL(device, pSoe, port, &ememCOffset, &ememDOffset); + if (status != NV_OK) + { + NVSWITCH_ASSERT(status == NV_OK); + return; + } + + // Simply return if the copy-size is zero + if (sizeBytes == 0) + { + NVSWITCH_PRINT(device, ERROR, + "%s: zero-byte copy requested.\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + return; + } + + // The source must be 4-byte aligned + if (!NV_IS_ALIGNED(dmemAddr, FLCN_DMEM_ACCESS_ALIGNMENT)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Address is not 4-byte aligned. dmemAddr=0x%08x\n", + __FUNCTION__, dmemAddr); + NVSWITCH_ASSERT(0); + return; + } + + // Check the port. Only one port for SOE LR10. + if (port >= maxEmemPorts) + { + NVSWITCH_PRINT(device, ERROR, + "%s: only %d ports supported. Accessed port=%d\n", + __FUNCTION__, maxEmemPorts, port); + NVSWITCH_ASSERT(0); + return; + } + + // + // Verify that the dmemAddr address is located in EMEM, above addressable DMEM, + // and that the copy does not overshoot the end of EMEM. + // + startEmem = soeGetEmemStartOffset_HAL(device, pSoe); + + // END_EMEM = START_EMEM + SIZE_EMEM (the size of EMEM is given in blocks) + endEmem = startEmem + soeGetEmemSize_HAL(device, pSoe); + + if (dmemAddr < startEmem || (dmemAddr + sizeBytes) > endEmem) + { + NVSWITCH_PRINT(device, ERROR, + "%s: copy must be in EMEM aperature [0x%x, 0x%x)\n", + __FUNCTION__, startEmem, endEmem); + NVSWITCH_ASSERT(0); + return; + } + + // Convert to EMEM offset for use by EMEMC/EMEMD + dmemAddr -= startEmem; + + // Calculate the number of words and bytes + numWords = sizeBytes >> 2; + numBytes = sizeBytes & 0x3; + + // Mask off all but the OFFSET and BLOCK in EMEM offset + reg32 = dmemAddr & (DRF_SHIFTMASK(NV_SOE_EMEMC_OFFS) | + DRF_SHIFTMASK(NV_SOE_EMEMC_BLK)); + + if (bCopyFrom) + { + // mark auto-increment on read + reg32 = FLD_SET_DRF(_SOE, _EMEMC, _AINCR, _TRUE, reg32); + } + else + { + // mark auto-increment on write + reg32 = FLD_SET_DRF(_SOE, _EMEMC, _AINCW, _TRUE, reg32); + } + flcnRegWrite_HAL(device, pFlcn, ememCOffset, reg32); + + // Directly copy as many words as possible + for (i = 0; i < numWords; i++) + { + if (bCopyFrom) + { + pData[i] = flcnRegRead_HAL(device, pFlcn, ememDOffset); + } + else + { + flcnRegWrite_HAL(device, pFlcn, ememDOffset, pData[i]); + } + } + + // Check if there are leftover bytes to copy + if (numBytes > 0) + { + NvU32 bytesCopied = numWords << 2; + + // + // Read the contents first. If we're copying to the EMEM, we've set + // autoincrement on write, so reading does not modify the pointer. We + // can, thus, do a read/modify/write without needing to worry about the + // pointer having moved forward. There is no special explanation needed + // if we're copying from the EMEM since this is the last access to HW + // in that case. + // + reg32 = flcnRegRead_HAL(device, pFlcn, ememDOffset); + if (bCopyFrom) + { + for (i = 0; i < numBytes; i++) + { + pBuf[bytesCopied + i] = ((NvU8 *)®32)[i]; + } + } + else + { + for (i = 0; i < numBytes; i++) + { + ((NvU8 *)®32)[i] = pBuf[bytesCopied + i]; + } + flcnRegWrite_HAL(device, pFlcn, ememDOffset, reg32); + } + } +} + +/*! + * Get the EMEM size in bytes + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + */ +static NvU32 +_soeGetEmemSize_LR10 +( + nvswitch_device *device, + PSOE pSoe +) +{ + NvU32 data = flcnRegRead_HAL(device, ENG_GET_FLCN(pSoe), NV_SOE_HWCFG); + return DRF_VAL(_SOE, _HWCFG, _EMEM_SIZE, data) * FLCN_BLK_ALIGNMENT; +} + +/*! + * Get the EMEM start offset in DMEM VA space + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + */ +static NvU32 +_soeGetEmemStartOffset_LR10 +( + nvswitch_device *device, + PSOE pSoe +) +{ + // + // EMEM is mapped at the top of DMEM VA space + // START_EMEM = DMEM_VA_MAX = 2^(DMEM_TAG_WIDTH + 8) + // + NvU32 data = flcnRegRead_HAL(device, ENG_GET_FLCN(pSoe), NV_SOE_FALCON_HWCFG1); + return (1 << (DRF_VAL(_SOE, _FALCON_HWCFG1, _DMEM_TAG_WIDTH, data) + 8)); +} + +/*! + * Get the EMEMC/D register addresses for the specified port + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + * @param[in] port EMEM port number + * @param[out] pEmemCAddr BAR0 address of the specified EMEMC port + * @param[out] pEmemDAddr BAR0 address of the specified EMEMD port + */ +static NV_STATUS +_soeEmemPortToRegAddr_LR10 +( + nvswitch_device *device, + PSOE pSoe, + NvU32 port, + NvU32 *pEmemCAddr, + NvU32 *pEmemDAddr +) +{ + if (!pEmemCAddr || !pEmemDAddr) + { + return NV_ERR_INVALID_ARGUMENT; + } + if (pEmemCAddr) + { + *pEmemCAddr = NV_SOE_EMEMC(port); + } + if (pEmemDAddr) + { + *pEmemDAddr = NV_SOE_EMEMD(port); + } + return NV_OK; +} + +/*! + * Called by soeService to handle a SOE exterr. This function will dump the + * current status of SOE and then trap the CPU for further inspection for a + * debug build. + * + * @param[in] device nvswitch_device object pointer + * @param[in] pSoe SOE object pointer + */ +static void +_soeServiceExterr_LR10 +( + nvswitch_device *device, + PSOE pSoe +) +{ + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + NvU32 extErrAddrOffset = 0, extErrStatOffset = 0; + NvU32 exterrStatVal; + + NVSWITCH_PRINT(device, ERROR, + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + "!! ** SOE EXTERR ** !!\n" + "!! Please file a bug with the following information. !!\n" + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n"); + + if (pFlcn == NULL) + { + NVSWITCH_ASSERT(0); + return; + } + + NVSWITCH_PRINT(device, ERROR, + "<<<<<<<<<<< SOE DEBUG INFORMATION >>>>>>>>>>>\n"); + NVSWITCH_PRINT(device, ERROR, + "OS VERSION (FALCON_OS): %u\n", + flcnRegRead_HAL(device, pFlcn, NV_PFALCON_FALCON_OS)); + + if (soeGetExtErrRegAddrs_HAL(device, pSoe, &extErrAddrOffset, &extErrStatOffset) != NV_OK) + { + NVSWITCH_ASSERT(0); + return; + } + + NVSWITCH_PRINT(device, ERROR, + "EXTERRADDR : %u\n", + flcnRegRead_HAL(device, pFlcn, extErrAddrOffset)); + + exterrStatVal = flcnRegRead_HAL(device, pFlcn, extErrStatOffset); + NVSWITCH_PRINT(device, ERROR, + "EXTERRSTAT : %u\n", exterrStatVal); + NVSWITCH_PRINT(device, ERROR, + "(AT PC) : 0x%08X\n", + DRF_VAL(_SOE, _FALCON_EXTERRSTAT, _PC, exterrStatVal)); + + // + // HW will continue to assert this interrupt as long as the _VALID bit is + // set. Clear it to allow reporting of further failures since we have + // already alerted the user that a transaction has failed. + // + flcnRegWrite_HAL(device, pFlcn, extErrStatOffset, FLD_SET_DRF(_SOE, _FALCON_EXTERRSTAT, _VALID, _FALSE, exterrStatVal)); + + // Break to allow the user to inspect this on a debug build. + NVSWITCH_ASSERT(0); +} + +/*! + * Get the bar0 offsets of NV_SOE_FALCON_EXTERRADDR and/or NV_SOE_FALCON_EXTERRSTAT + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + * @param[out] pExtErrAddr BAR0 offset of NV_SOE_FALCON_EXTERRADDR + * @param[out] pExtErrAddr BAR0 offset of NV_SOE_FALCON_EXTERRSTAT + */ +static NV_STATUS +_soeGetExtErrRegAddrs_LR10 +( + nvswitch_device *device, + PSOE pSoe, + NvU32 *pExtErrAddr, + NvU32 *pExtErrStat +) +{ + if (!pExtErrAddr || !pExtErrStat) + { + return NV_ERR_INVALID_ARGUMENT; + } + if (pExtErrAddr) + { + *pExtErrAddr = NV_SOE_FALCON_EXTERRADDR; + } + if (pExtErrStat) + { + *pExtErrStat = NV_SOE_FALCON_EXTERRSTAT; + } + return NV_OK; +} + +/* + * Get number of EMEM ports + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + */ +static NvU32 +_soeEmemPortSizeGet_LR10 +( + nvswitch_device *device, + PSOE pSoe +) +{ + return NV_SOE_EMEMC__SIZE_1; +} + +/** + * @brief sets pEngDescUc and pEngDescBc to the discovered + * engine that matches this flcnable instance + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + * @param[out] pEngDescUc pointer to the UniCast Engine + * Descriptor + * @param[out] pEngDescBc pointer to the BroadCast Engine + * Descriptor + */ +static void +_soeFetchEngines_LR10 +( + nvswitch_device *device, + FLCNABLE *pSoe, + ENGINE_DESCRIPTOR_TYPE *pEngDescUc, + ENGINE_DESCRIPTOR_TYPE *pEngDescBc +) +{ + pEngDescUc->initialized = NV_FALSE; + if (NVSWITCH_ENG_IS_VALID(device, SOE, 0)) + { + pEngDescUc->base = NVSWITCH_GET_ENG(device, SOE, , 0); + } + else + { + pEngDescUc->base = 0; + } + + pEngDescBc->initialized = NV_FALSE; + pEngDescBc->base = 0; +} + +/*! + * @brief Determine if the SOE Falcon CPU is halted + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + * + * @return NvBool reflecting the SOE Falcon CPU halted state + */ +static NvBool +_soeIsCpuHalted_LR10 +( + nvswitch_device *device, + PSOE pSoe +) +{ + NvU32 data = flcnRegRead_HAL(device, ENG_GET_FLCN(pSoe), NV_PFALCON_FALCON_CPUCTL); + return (FLD_TEST_DRF(_PFALCON, _FALCON_CPUCTL, _HALTED, _TRUE, data)); +} + +static NvlStatus +_soeDmaStartTest +( + nvswitch_device *device, + void *cpuAddr, + NvU64 dmaHandle, + NvU16 xferSize, + NvU8 subCmd +) +{ + FLCN *pFlcn = device->pSoe->pFlcn; + NvU32 cmdSeqDesc; + NV_STATUS status; + RM_FLCN_CMD_SOE cmd; + RM_SOE_CORE_CMD_DMA_TEST *pDmaCmd; + NVSWITCH_TIMEOUT timeout; + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + + cmd.hdr.unitId = RM_SOE_UNIT_CORE; + cmd.hdr.size = sizeof(cmd); + + pDmaCmd = &cmd.cmd.core.dma_test; + RM_FLCN_U64_PACK(&pDmaCmd->dmaHandle, &dmaHandle); + pDmaCmd->xferSize = xferSize; + pDmaCmd->dataPattern = SOE_DMA_TEST_XFER_PATTERN; + pDmaCmd->cmdType = RM_SOE_CORE_CMD_DMA_SELFTEST; + pDmaCmd->subCmdType = subCmd; + + cmdSeqDesc = 0; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 5, &timeout); + status = flcnQueueCmdPostBlocking(device, pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg + NULL, // pPayload + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "Failed to send DMA test command to SOE\n"); + return -NVL_ERR_INVALID_STATE; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_soeValidateDmaTestResult +( + nvswitch_device *device, + void *cpuAddr, + NvU16 xferSize +) +{ + NvU16 iter; + + // Verify data written by SOE DMA matches what we expect. + for (iter = 0; iter < SOE_DMA_TEST_BUF_SIZE; iter++) + { + NvU8 data = ((NvU8*) cpuAddr)[iter]; + + // SOE would only touch data as much as the xfer size. + if (iter < xferSize) + { + if (data != SOE_DMA_TEST_XFER_PATTERN) + { + NVSWITCH_PRINT(device, ERROR, "Incorrect data byte at offset %d = 0x%04x" + " for xfersize = %d\n", iter, data, xferSize); + return -NVL_ERR_INVALID_STATE; + } + } + // We expect the rest of the data to be at init value. + else + { + if (data != SOE_DMA_TEST_INIT_PATTERN) + { + NVSWITCH_PRINT(device, ERROR, "Incorrect data byte at offset %d = 0x%04x" + " for xferSize = 0x%04x\n", iter, data, xferSize); + return -NVL_ERR_INVALID_STATE; + } + } + } + + return NVL_SUCCESS; +} + +static NvlStatus +_soeDmaSelfTest +( + nvswitch_device *device +) +{ + NvlStatus ret; + void *cpuAddr; + NvU64 dmaHandle; + NvU16 xferSize; + + ret = nvswitch_os_alloc_contig_memory(device->os_handle, &cpuAddr, SOE_DMA_TEST_BUF_SIZE, + (device->dma_addr_width == 32)); + + if (ret != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "nvswitch_os_alloc_contig_memory returned %d\n", ret); + return ret; + } + + // SOE DMA Write test + + nvswitch_os_memset(cpuAddr, SOE_DMA_TEST_INIT_PATTERN, SOE_DMA_TEST_BUF_SIZE); + + ret = nvswitch_os_map_dma_region(device->os_handle, cpuAddr, &dmaHandle, + SOE_DMA_TEST_BUF_SIZE, NVSWITCH_DMA_DIR_TO_SYSMEM); + + if (ret != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "nvswitch_os_map_dma_region returned %d\n", ret); + goto _soeDmaSelfTest_exit; + } + + // SOE DMA transfer sizes are in powers of 2. + for (xferSize = SOE_DMA_MIN_SIZE; xferSize <= SOE_DMA_MAX_SIZE; xferSize <<= 1) + { + ret = nvswitch_os_sync_dma_region_for_device(device->os_handle, dmaHandle, SOE_DMA_TEST_BUF_SIZE, + NVSWITCH_DMA_DIR_TO_SYSMEM); + + if (ret != NVL_SUCCESS) + break; + + ret = _soeDmaStartTest(device, cpuAddr, dmaHandle, xferSize, + RM_SOE_DMA_WRITE_TEST_SUBCMD); + + if (ret != NVL_SUCCESS) + break; + + ret = nvswitch_os_sync_dma_region_for_cpu(device->os_handle, dmaHandle, SOE_DMA_TEST_BUF_SIZE, + NVSWITCH_DMA_DIR_TO_SYSMEM); + + if (ret != NVL_SUCCESS) + break; + + ret = _soeValidateDmaTestResult(device, cpuAddr, xferSize); + + if (ret != NVL_SUCCESS) + break; + + nvswitch_os_memset(cpuAddr, SOE_DMA_TEST_INIT_PATTERN, SOE_DMA_TEST_BUF_SIZE); + } + + nvswitch_os_unmap_dma_region(device->os_handle, cpuAddr, dmaHandle, + SOE_DMA_TEST_BUF_SIZE, NVSWITCH_DMA_DIR_TO_SYSMEM); + + if (ret != NVL_SUCCESS) + goto _soeDmaSelfTest_exit; + + // SOE DMA read test + + nvswitch_os_memset(cpuAddr, SOE_DMA_TEST_INIT_PATTERN, SOE_DMA_TEST_BUF_SIZE); + + // + // 4B/8B reads will overfetch 16B from PCIe. The Falcon logic ignores the extra + // data. In the case of this test the reads only occur from the start of + // a DMA mapped buffer which is larger than 16B, hence the selftest does + // not need special handling for this behavior. However this will need to + // be handled for other cases where SW cannot guarentee that the overfetch + // will not exceed mapped regions. + // + // + + ret = nvswitch_os_map_dma_region(device->os_handle, cpuAddr, &dmaHandle, + SOE_DMA_TEST_BUF_SIZE, NVSWITCH_DMA_DIR_FROM_SYSMEM); + + if (ret != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "nvswitch_os_map_dma_region returned %d\n", ret); + goto _soeDmaSelfTest_exit; + } + + for (xferSize = SOE_DMA_MIN_SIZE; xferSize <= SOE_DMA_MAX_SIZE; xferSize <<= 1) + { + ret = nvswitch_os_sync_dma_region_for_cpu(device->os_handle, dmaHandle, SOE_DMA_TEST_BUF_SIZE, + NVSWITCH_DMA_DIR_FROM_SYSMEM); + + if (ret != NVL_SUCCESS) + break; + + // Fill in relevant data for the read test. + nvswitch_os_memset(cpuAddr, SOE_DMA_TEST_XFER_PATTERN, xferSize); + + ret = nvswitch_os_sync_dma_region_for_device(device->os_handle, dmaHandle, SOE_DMA_TEST_BUF_SIZE, + NVSWITCH_DMA_DIR_FROM_SYSMEM); + + if (ret != NVL_SUCCESS) + break; + + ret = _soeDmaStartTest(device, cpuAddr, dmaHandle, xferSize, + RM_SOE_DMA_READ_TEST_SUBCMD); + + if (ret != NVL_SUCCESS) + break; + } + + nvswitch_os_unmap_dma_region(device->os_handle, cpuAddr, dmaHandle, + SOE_DMA_TEST_BUF_SIZE, NVSWITCH_DMA_DIR_FROM_SYSMEM); + +_soeDmaSelfTest_exit: + + nvswitch_os_free_contig_memory(device->os_handle, cpuAddr, SOE_DMA_TEST_BUF_SIZE); + + return ret; +} + +static NvlStatus +_soeTestDma_LR10 +( + nvswitch_device *device +) +{ + NvlStatus retval; + + retval = _soeDmaSelfTest(device); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "SOE DMA selftest failed\n"); + } + else + { + NVSWITCH_PRINT(device, INFO, "SOE DMA selftest succeeded\n"); + } + + return retval; +} + +/*! + * @brief Send the EOM parameters to SOE + * + * @param[in] device nvswitch device pointer + * @param[in] mode Node of EOM + * @param[in] nblks Number of blocks + * @param[in] nerrs Number of Errors. + * + * @return NVL_SUCCESS on success + */ +static NvlStatus +_soeSetPexEOM_LR10 +( + nvswitch_device *device, + NvU8 mode, + NvU8 nblks, + NvU8 nerrs, + NvU8 berEyeSel +) +{ + FLCN *pFlcn = device->pSoe->pFlcn; + NvU32 cmdSeqDesc = 0; + NV_STATUS status = NV_OK; + RM_FLCN_CMD_SOE cmd; + RM_SOE_BIF_CMD_EOM *pBifCmd = NULL; + NVSWITCH_TIMEOUT timeout = {0}; + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + + cmd.hdr.unitId = RM_SOE_UNIT_BIF; + cmd.hdr.size = RM_SOE_CMD_SIZE(BIF, EOM); + cmd.cmd.bif.cmdType = RM_SOE_BIF_CMD_UPDATE_EOM; + // + // We use SOE to set the EOM UPHY register since its Decode trapped and + // hence CPU accessible. + // + pBifCmd = &cmd.cmd.bif.eomctl; + pBifCmd->mode = mode; + pBifCmd->nblks = nblks; + pBifCmd->nerrs = nerrs; + pBifCmd->berEyeSel = berEyeSel; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 5, &timeout); + + status = flcnQueueCmdPostBlocking(device, + pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg + NULL, // pPayload + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to Set EOM via SOE, Error 0x%x\n", + __FUNCTION__, status); + return -NVL_ERR_INVALID_STATE; + } + + return NVL_SUCCESS; +} + +/*! + * @brief Send the EOM parameters to SOE + * + * @param[in] device nvswitch device pointer + * @param[in] mode Node of EOM + * @param[in] nblks Number of blocks + * @param[in] nerrs Number of Errors. + * + * @return NVL_SUCCESS on success + */ +static NvlStatus +_soeGetPexEomStatus_LR10 +( + nvswitch_device *device, + NvU8 mode, + NvU8 nblks, + NvU8 nerrs, + NvU8 berEyeSel, + NvU32 laneMask, + NvU16 *pEomStatus +) +{ + FLCN *pFlcn = device->pSoe->pFlcn; + NvU32 cmdSeqDesc = 0; + NV_STATUS status = NV_OK; + RM_FLCN_CMD_SOE cmd; + RM_SOE_BIF_CMD_EOM_STATUS *pBifCmd = NULL; + NVSWITCH_TIMEOUT timeout = { 0 }; + NvU64 dmaHandle = 0; + NvU8 *pReadBuffer = NULL; + NvU32 bufferSize = BIF_MAX_PCIE_LANES * sizeof(NvU16); + + if (bufferSize > SOE_DMA_MAX_SIZE) + { + NVSWITCH_PRINT(device, ERROR, "%s: Buffer size too large\n", + __FUNCTION__); + return -NVL_BAD_ARGS; + } + + // Create DMA mapping + status = nvswitch_os_alloc_contig_memory(device->os_handle, + (void**)&pReadBuffer, bufferSize, (device->dma_addr_width == 32)); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to allocate contig memory\n", + __FUNCTION__); + return status; + } + + status = nvswitch_os_map_dma_region(device->os_handle, + pReadBuffer, + &dmaHandle, + bufferSize, + NVSWITCH_DMA_DIR_TO_SYSMEM); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to map dma region to sysmem\n"); + nvswitch_os_free_contig_memory(device->os_handle, pReadBuffer, bufferSize); + return status; + } + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + nvswitch_os_memset(pReadBuffer, 0, bufferSize); + + cmd.hdr.unitId = RM_SOE_UNIT_BIF; + cmd.hdr.size = RM_SOE_CMD_SIZE(BIF, EOM_STATUS); + cmd.cmd.bif.cmdType = RM_SOE_BIF_CMD_GET_EOM_STATUS; + + pBifCmd = &cmd.cmd.bif.eomStatus; + pBifCmd->mode = mode; + pBifCmd->nblks = nblks; + pBifCmd->nerrs = nerrs; + pBifCmd->berEyeSel = berEyeSel; + pBifCmd->laneMask = laneMask; + RM_FLCN_U64_PACK(&pBifCmd->dmaHandle, &dmaHandle); + + status = nvswitch_os_sync_dma_region_for_device(device->os_handle, dmaHandle, + bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "Failed to yield to DMA controller\n"); + goto _soeGetPexEomStatus_LR10_exit; + } + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 5, &timeout); + + status = flcnQueueCmdPostBlocking(device, + pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg + NULL, // pPayload + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to Get EOM status via SOE, Error 0x%x\n", + __FUNCTION__, status); + status = -NVL_ERR_INVALID_STATE; + goto _soeGetPexEomStatus_LR10_exit; + } + + status = nvswitch_os_sync_dma_region_for_cpu(device->os_handle, dmaHandle, + bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "DMA controller failed to yield back\n"); + goto _soeGetPexEomStatus_LR10_exit; + } + + nvswitch_os_memcpy(((NvU8*)pEomStatus), pReadBuffer, bufferSize); + +_soeGetPexEomStatus_LR10_exit : + nvswitch_os_unmap_dma_region(device->os_handle, pReadBuffer, dmaHandle, + bufferSize, NVSWITCH_DMA_DIR_TO_SYSMEM); + nvswitch_os_free_contig_memory(device->os_handle, pReadBuffer, bufferSize); + + return status; +} + +/*! + * @brief Get the register values of UPHY registers + * + * Read the register value from a scratch register updated by SOE. + * + * @param[in] device nvswitch device pointer + * @param[in] regAddress Register address whose value is to be retrieved + * @param[in] laneSelectMask Mask of lanes to read from + * @param[out] *pRegValue Value of register address + * + * Read the register value from a scratch register updated by SOE. + * + * @return NVL_SUCCESS on success + */ +static NvlStatus +_soeGetUphyDlnCfgSpace_LR10 +( + nvswitch_device *device, + NvU32 regAddress, + NvU32 laneSelectMask, + NvU16 *pRegValue +) +{ + FLCN *pFlcn = device->pSoe->pFlcn; + NvU32 cmdSeqDesc = 0; + NV_STATUS status = NV_OK; + RM_FLCN_CMD_SOE cmd; + RM_SOE_BIF_CMD_UPHY_DLN_CFG_SPACE *pBifCmd = NULL; + NVSWITCH_TIMEOUT timeout = { 0 }; + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + + cmd.hdr.unitId = RM_SOE_UNIT_BIF; + cmd.hdr.size = RM_SOE_CMD_SIZE(BIF, UPHY_DLN_CFG_SPACE); + cmd.cmd.bif.cmdType = RM_SOE_BIF_CMD_GET_UPHY_DLN_CFG_SPACE; + + pBifCmd = &cmd.cmd.bif.cfgctl; + pBifCmd->regAddress = regAddress; + pBifCmd->laneSelectMask = laneSelectMask; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + + status = flcnQueueCmdPostBlocking(device, + pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg + NULL, // pPayload + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to execute BIF GET_UPHY_DLN_CFG_SPACE via SOE, Error 0x%x\n", + __FUNCTION__, status); + return -NVL_ERR_INVALID_STATE; + } + + *pRegValue = NVSWITCH_SOE_RD32_LR10(device, 0, _SOE, _MAILBOX(0)); + + return NVL_SUCCESS; +} + +static NvlStatus +_soeForceThermalSlowdown_LR10 +( + nvswitch_device *device, + NvBool slowdown, + NvU32 periodUs +) +{ + FLCN *pFlcn = device->pSoe->pFlcn; + NvU32 cmdSeqDesc = 0; + NV_STATUS status = NV_OK; + RM_FLCN_CMD_SOE cmd; + NVSWITCH_TIMEOUT timeout = {0}; + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.unitId = RM_SOE_UNIT_THERM; + cmd.hdr.size = sizeof(cmd); + cmd.cmd.therm.cmdType = RM_SOE_THERM_FORCE_SLOWDOWN; + cmd.cmd.therm.slowdown.slowdown = slowdown; + cmd.cmd.therm.slowdown.periodUs = periodUs; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + status = flcnQueueCmdPostBlocking(device, pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg - not used for now + NULL, // pPayload - not used for now + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Thermal slowdown failed. rc:%d\n", + __FUNCTION__, status); + + return -NVL_ERR_GENERIC; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_soeSetPcieLinkSpeed_LR10 +( + nvswitch_device *device, + NvU32 linkSpeed +) +{ + FLCN *pFlcn = device->pSoe->pFlcn; + NvU32 cmdSeqDesc = 0; + NV_STATUS status = NV_OK; + RM_FLCN_CMD_SOE cmd; + RM_SOE_BIF_CMD_PCIE_LINK_SPEED *pBifCmd = NULL; + NVSWITCH_TIMEOUT timeout = { 0 }; + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + + cmd.hdr.unitId = RM_SOE_UNIT_BIF; + cmd.hdr.size = RM_SOE_CMD_SIZE(BIF, PCIE_LINK_SPEED); + cmd.cmd.bif.cmdType = RM_SOE_BIF_CMD_SET_PCIE_LINK_SPEED; + + pBifCmd = &cmd.cmd.bif.speedctl; + pBifCmd->linkSpeed = linkSpeed; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + + status = flcnQueueCmdPostBlocking(device, + pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg + NULL, // pPayload + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to execute BIF SET_PCIE_LINK_SPEED via SOE, Error 0x%x\n", + __FUNCTION__, status); + return -NVL_ERR_INVALID_STATE; + } + + return NVL_SUCCESS; +} + +/*! + * @brief set hal function pointers for functions defined in LR10 (i.e. this file) + * + * this function has to be at the end of the file so that all the + * other functions are already defined. + * + * @param[in] pFlcnable The flcnable for which to set hals + */ +void +soeSetupHal_LR10 +( + SOE *pSoe +) +{ + soe_hal *pHal = pSoe->base.pHal; + flcnable_hal *pParentHal = (flcnable_hal *)pHal; + //set any functions we want to override + pParentHal->construct = _soeConstruct_LR10; + pParentHal->destruct = _soeDestruct_LR10; + pParentHal->getExternalConfig = _soeGetExternalConfig_LR10; + pParentHal->fetchEngines = _soeFetchEngines_LR10; + + // set any functions specific to SOE + pHal->service = _soeService_LR10; + pHal->serviceHalt = _soeServiceHalt_LR10; + pHal->getEmemSize = _soeGetEmemSize_LR10; + pHal->ememTransfer = _soeEmemTransfer_LR10; + pHal->getEmemSize = _soeGetEmemSize_LR10; + pHal->getEmemStartOffset = _soeGetEmemStartOffset_LR10; + pHal->ememPortToRegAddr = _soeEmemPortToRegAddr_LR10; + pHal->serviceExterr = _soeServiceExterr_LR10; + pHal->getExtErrRegAddrs = _soeGetExtErrRegAddrs_LR10; + pHal->ememPortSizeGet = _soeEmemPortSizeGet_LR10; + pHal->isCpuHalted = _soeIsCpuHalted_LR10; + pHal->testDma = _soeTestDma_LR10; + pHal->setPexEOM = _soeSetPexEOM_LR10; + pHal->getUphyDlnCfgSpace = _soeGetUphyDlnCfgSpace_LR10; + pHal->forceThermalSlowdown = _soeForceThermalSlowdown_LR10; + pHal->setPcieLinkSpeed = _soeSetPcieLinkSpeed_LR10; + pHal->getPexEomStatus = _soeGetPexEomStatus_LR10; +} diff --git a/src/common/nvswitch/kernel/lr10/therm_lr10.c b/src/common/nvswitch/kernel/lr10/therm_lr10.c new file mode 100644 index 000000000..2ece59b1e --- /dev/null +++ b/src/common/nvswitch/kernel/lr10/therm_lr10.c @@ -0,0 +1,299 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "export_nvswitch.h" +#include "common_nvswitch.h" +#include "error_nvswitch.h" +#include "lr10/lr10.h" +#include "lr10/therm_lr10.h" +#include "soe/soeiftherm.h" +#include "rmflcncmdif_nvswitch.h" +#include "soe/soe_nvswitch.h" + +#include "nvswitch/lr10/dev_therm.h" +#include "nvswitch/lr10/dev_nvlsaw_ip.h" + +// +// Thermal functions +// + +// +// Initialize thermal offsets for External Tdiode. +// + +NvlStatus +nvswitch_init_thermal_lr10 +( + nvswitch_device *device +) +{ + lr10_device *chip_device = NVSWITCH_GET_CHIP_DEVICE_LR10(device); + + // Mark everything invalid + chip_device->tdiode.method = NVSWITCH_THERM_METHOD_UNKNOWN; + + return NVL_SUCCESS; +} + +static void +_nvswitch_read_max_tsense_temperature +( + nvswitch_device *device, + NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info, + NvU32 channel +) +{ + NvU32 offset; + NvU32 temperature; + + temperature = nvswitch_reg_read_32(device, NV_THERM_TSENSE_MAXIMUM_TEMPERATURE); + temperature = DRF_VAL(_THERM_TSENSE, _MAXIMUM_TEMPERATURE, _MAXIMUM_TEMPERATURE, temperature); + + if (channel == NVSWITCH_THERM_CHANNEL_LR10_TSENSE_MAX) + { + offset = nvswitch_reg_read_32(device, NV_THERM_TSENSE_U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS); + offset = DRF_VAL(_THERM_TSENSE, _U2_A_0_BJT_0_TEMPERATURE_MODIFICATIONS, _TEMPERATURE_OFFSET, offset); + + // Temperature of the sensor reported equals calculation of the max temperature reported + // from the TSENSE HUB plus the temperature offset programmed by SW. This offset needs to + // be substracted to get the actual temperature of the sensor. + temperature -= offset; + } + + info->temperature[channel] = NV_TSENSE_FXP_9_5_TO_24_8(temperature); + info->status[channel] = NVL_SUCCESS; +} + +static void +_nvswitch_read_external_tdiode_temperature +( + nvswitch_device *device, + NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info, + NvU32 channel +) +{ +} + +NvlStatus +nvswitch_ctrl_therm_read_temperature_lr10 +( + nvswitch_device *device, + NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info +) +{ + NvU32 channel; + + if (!info->channelMask) + { + NVSWITCH_PRINT(device, ERROR, + "%s: No channel given in the input.\n", + __FUNCTION__); + + return -NVL_BAD_ARGS; + } + + nvswitch_os_memset(info->temperature, 0x0, sizeof(info->temperature)); + + channel = NVSWITCH_THERM_CHANNEL_LR10_TSENSE_MAX; + if (info->channelMask & NVBIT(channel)) + { + _nvswitch_read_max_tsense_temperature(device, info, channel); + info->channelMask &= ~NVBIT(channel); + } + + channel = NVSWITCH_THERM_CHANNEL_LR10_TSENSE_OFFSET_MAX; + if (info->channelMask & NVBIT(channel)) + { + _nvswitch_read_max_tsense_temperature(device, info, channel); + info->channelMask &= ~NVBIT(channel); + } + + channel = NVSWITCH_THERM_CHANNEL_LR10_TDIODE; + if (info->channelMask & NVBIT(channel)) + { + _nvswitch_read_external_tdiode_temperature(device, info, channel); + info->channelMask &= ~NVBIT(channel); + } + + channel = NVSWITCH_THERM_CHANNEL_LR10_TDIODE_OFFSET; + if (info->channelMask & NVBIT(channel)) + { + _nvswitch_read_external_tdiode_temperature(device, info, channel); + info->channelMask &= ~NVBIT(channel); + } + + if (info->channelMask) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ChannelMask %x absent on LR10.\n", + __FUNCTION__, info->channelMask); + + return -NVL_BAD_ARGS; + } + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_ctrl_therm_get_temperature_limit_lr10 +( + nvswitch_device *device, + NVSWITCH_CTRL_GET_TEMPERATURE_LIMIT_PARAMS *info +) +{ + NvU32 threshold; + NvU32 temperature; + + threshold = nvswitch_reg_read_32(device, NV_THERM_TSENSE_THRESHOLD_TEMPERATURES); + + switch (info->thermalEventId) + { + case NVSWITCH_CTRL_THERMAL_EVENT_ID_WARN: + { + // Get Slowdown temperature + temperature = DRF_VAL(_THERM_TSENSE, _THRESHOLD_TEMPERATURES, + _WARNING_TEMPERATURE, threshold); + break; + } + case NVSWITCH_CTRL_THERMAL_EVENT_ID_OVERT: + { + // Get Shutdown temperature + temperature = DRF_VAL(_THERM_TSENSE, _THRESHOLD_TEMPERATURES, + _OVERTEMP_TEMPERATURE, threshold); + break; + } + default: + { + NVSWITCH_PRINT(device, ERROR, "Invalid Thermal Event Id: 0x%x\n", info->thermalEventId); + return -NVL_BAD_ARGS; + } + } + + info->temperatureLimit = NV_TSENSE_FXP_9_5_TO_24_8(temperature); + + return NVL_SUCCESS; +} + +// Background task to monitor thermal warn and adjust link mode +void +nvswitch_monitor_thermal_alert_lr10 +( + nvswitch_device *device +) +{ + return; +} + +/* + * @brief Callback function to recieve thermal messages from SOE. + */ +void +nvswitch_therm_soe_callback_lr10 +( + nvswitch_device *device, + RM_FLCN_MSG *pGenMsg, + void *pParams, + NvU32 seqDesc, + NV_STATUS status +) +{ + RM_SOE_THERM_MSG_SLOWDOWN_STATUS slowdown_status; + RM_SOE_THERM_MSG_SHUTDOWN_STATUS shutdown_status; + RM_FLCN_MSG_SOE *pMsg = (RM_FLCN_MSG_SOE *)pGenMsg; + NvU32 temperature; + NvU32 threshold; + + switch (pMsg->msg.soeTherm.msgType) + { + case RM_SOE_THERM_MSG_ID_SLOWDOWN_STATUS: + { + slowdown_status = pMsg->msg.soeTherm.slowdown; + if (slowdown_status.bSlowdown) + { + if (slowdown_status.source.bTsense) // TSENSE_THERM_ALERT + { + temperature = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.maxTemperature); + threshold = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.warnThreshold); + + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START, + "NVSWITCH Temperature %dC | TSENSE WARN Threshold %dC\n", + temperature, threshold); + + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START, + "Thermal Slowdown Engaged | Temp higher than WARN Threshold\n"); + } + + if (slowdown_status.source.bPmgr) // PMGR_THERM_ALERT + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START, + "Thermal Slowdown Engaged | PMGR WARN Threshold reached\n"); + } + } + else // REVERT_SLOWDOWN + { + temperature = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.maxTemperature); + threshold = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(slowdown_status.warnThreshold); + + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_END, + "NVSWITCH Temperature %dC | TSENSE WARN Threshold %dC\n", + temperature, threshold); + + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_END, + "Thermal slowdown Disengaged\n"); + } + break; + } + + case RM_SOE_THERM_MSG_ID_SHUTDOWN_STATUS: + { + shutdown_status = pMsg->msg.soeTherm.shutdown; + if (shutdown_status.source.bTsense) // TSENSE_THERM_SHUTDOWN + { + temperature = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(shutdown_status.maxTemperature); + threshold = RM_SOE_NV_TEMP_TO_CELSIUS_TRUNCED(shutdown_status.overtThreshold); + + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_SHUTDOWN, + "NVSWITCH Temperature %dC | OVERT Threshold %dC\n", + temperature, threshold); + + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_SHUTDOWN, + "TSENSE OVERT Threshold reached. Shutting Down\n"); + } + + + if (shutdown_status.source.bPmgr) // PMGR_THERM_SHUTDOWN + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_THERMAL_EVENT_START, + "PMGR OVERT Threshold reached. Shutting Down\n"); + } + break; + } + default: + { + NVSWITCH_PRINT(device, ERROR, "%s Unknown message Id\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + } + } +} + diff --git a/src/common/nvswitch/kernel/nvswitch.c b/src/common/nvswitch/kernel/nvswitch.c new file mode 100644 index 000000000..5b7eeee85 --- /dev/null +++ b/src/common/nvswitch/kernel/nvswitch.c @@ -0,0 +1,4210 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "rom_nvswitch.h" +#include "error_nvswitch.h" +#include "regkey_nvswitch.h" +#include "bios_nvswitch.h" +#include "haldef_nvswitch.h" +#include "flcn/haldefs_flcnable_nvswitch.h" +#include "flcn/flcn_nvswitch.h" +#include "soe/soe_nvswitch.h" +#include "nvVer.h" + +#define NVSWITCH_DEV_CMD_CHECK_ADMIN NVBIT64(0) +#define NVSWITCH_DEV_CMD_CHECK_FM NVBIT64(1) + +#define NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(cmd, function, type, private, flags)\ + case cmd: \ + { \ + if (sizeof(type) != size) \ + { \ + retval = -NVL_BAD_ARGS; \ + break; \ + } \ + \ + retval = _nvswitch_lib_validate_privileged_ctrl(private, flags); \ + if (retval != NVL_SUCCESS) \ + { \ + break; \ + } \ + \ + retval = function(device, params); \ + break; \ + } \ + +#define NVSWITCH_DEV_CMD_DISPATCH_RESERVED(cmd) \ + case cmd: \ + { \ + retval = -NVL_ERR_NOT_IMPLEMENTED; \ + break; \ + } \ + +// +// HW's device id list can be found here - +// P4hw:2001: hw\doc\engr\Dev_ID\DeviceID_master_list.txt +// + +const static NvU32 nvswitch_lr10_device_ids[] = +{ + 0x1AE8, 0x1AF0, 0x1AF1, 0x1AF2, 0x1AF3, 0x1AF4, 0x1AF5, 0x1AF6, 0x1AF7, + 0x1AF8, 0x1AF9, 0x1AFA, 0x1AFB, 0x1AFC, 0x1AFD, 0x1AFE, 0x1AFF +}; + +nvlink_link_handlers link_handlers; + +static NvBool +_nvswitch_is_device_id_present +( + const NvU32 *array, + NvU32 array_len, + NvU32 device_id +) +{ + NvU32 i = 0; + + for(i = 0; i < array_len; i++) + { + if (array[i] == device_id) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +NvBool +nvswitch_is_lr10_device_id +( + NvU32 device_id +) +{ + NvU32 count = (sizeof(nvswitch_lr10_device_ids) / + sizeof(nvswitch_lr10_device_ids[0])); + + return _nvswitch_is_device_id_present(nvswitch_lr10_device_ids, count, device_id); +} + +/* + * NVLink corelib callbacks are used by the NVLink library separate from the + * NVSwitch driver, therefore they do not take a device lock and can not modify + * nvswitch_device state or use error logging. + * + * These NVSwitch functions modify link state outside of the corelib: + * _nvswitch_ctrl_inject_link_error - injects asynchronous link errors (MODS-only) + */ + +static NV_API_CALL NvlStatus +_nvswitch_corelib_add_link +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_add_link(link); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_remove_link +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_remove_link(link); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_set_dl_link_mode +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_set_dl_link_mode(link, mode, flags); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_get_dl_link_mode +( + nvlink_link *link, + NvU64 *mode +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_get_dl_link_mode(link, mode); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_set_tl_link_mode +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_set_tl_link_mode(link, mode, flags); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_get_tl_link_mode +( + nvlink_link *link, + NvU64 *mode +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_get_tl_link_mode(link, mode); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_set_tx_mode +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_set_tx_mode(link, mode, flags); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_get_tx_mode +( + nvlink_link *link, + NvU64 *mode, + NvU32 *subMode +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_get_tx_mode(link, mode, subMode); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_set_rx_mode +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_set_rx_mode(link, mode, flags); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_get_rx_mode +( + nvlink_link *link, + NvU64 *mode, + NvU32 *subMode +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_get_rx_mode(link, mode, subMode); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_set_rx_detect +( + nvlink_link *link, + NvU32 flags +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_set_rx_detect(link, flags); +} + +static NV_API_CALL NvlStatus +_nvswitch_corelib_get_rx_detect +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_get_rx_detect(link); +} + +static NV_API_CALL void +_nvswitch_corelib_training_complete +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + device->hal.nvswitch_corelib_training_complete(link); +} + +static NV_API_CALL void +_nvswitch_corelib_get_uphy_load +( + nvlink_link *link, + NvBool *bUnlocked +) +{ + nvswitch_device *device = link->dev->pDevInfo; + return device->hal.nvswitch_corelib_get_uphy_load(link, bUnlocked); +} + + +static NV_API_CALL NvlStatus +_nvswitch_corelib_write_discovery_token +( + nvlink_link *link, + NvU64 token +) +{ + return NVL_SUCCESS; +} + +void +nvswitch_get_link_handlers +( + nvlink_link_handlers *nvswitch_link_handlers +) +{ + if (!nvswitch_link_handlers) + { + NVSWITCH_ASSERT(0); + return; + } + + nvswitch_link_handlers->add = _nvswitch_corelib_add_link; + nvswitch_link_handlers->remove = _nvswitch_corelib_remove_link; + nvswitch_link_handlers->set_dl_link_mode = _nvswitch_corelib_set_dl_link_mode; + nvswitch_link_handlers->get_dl_link_mode = _nvswitch_corelib_get_dl_link_mode; + nvswitch_link_handlers->set_tl_link_mode = _nvswitch_corelib_set_tl_link_mode; + nvswitch_link_handlers->get_tl_link_mode = _nvswitch_corelib_get_tl_link_mode; + nvswitch_link_handlers->set_tx_mode = _nvswitch_corelib_set_tx_mode; + nvswitch_link_handlers->get_tx_mode = _nvswitch_corelib_get_tx_mode; + nvswitch_link_handlers->set_rx_mode = _nvswitch_corelib_set_rx_mode; + nvswitch_link_handlers->get_rx_mode = _nvswitch_corelib_get_rx_mode; + nvswitch_link_handlers->set_rx_detect = _nvswitch_corelib_set_rx_detect; + nvswitch_link_handlers->get_rx_detect = _nvswitch_corelib_get_rx_detect; + nvswitch_link_handlers->training_complete = _nvswitch_corelib_training_complete; + nvswitch_link_handlers->get_uphy_load = _nvswitch_corelib_get_uphy_load; + nvswitch_link_handlers->write_discovery_token = _nvswitch_corelib_write_discovery_token; +} + +#define NVSWITCH_INIT_REGKEY(_private, _regkey, _string, _default_val) \ +do \ +{ \ + NvU32 data; \ + \ + device->regkeys._regkey = _default_val; \ + if (NV_SWITCH_REGKEY_PRIVATE_ALLOWED || !NV_SWITCH_REGKEY##_private) \ + { \ + if (NVL_SUCCESS == \ + nvswitch_os_read_registry_dword(device->os_handle, _string, &data)) \ + { \ + NVSWITCH_PRINT(device, SETUP, \ + "%s: Applying regkey %s=0x%x\n", \ + __FUNCTION__, \ + _string, data); \ + device->regkeys._regkey = data; \ + } \ + } \ +} while(0) + +static void +_nvswitch_init_device_regkeys +( + nvswitch_device *device +) +{ + // + // Public external use regkeys + // + NVSWITCH_INIT_REGKEY(_PUBLIC, ato_control, + NV_SWITCH_REGKEY_ATO_CONTROL, + NV_SWITCH_REGKEY_ATO_CONTROL_DEFAULT); + + NVSWITCH_INIT_REGKEY(_PUBLIC, sto_control, + NV_SWITCH_REGKEY_STO_CONTROL, + NV_SWITCH_REGKEY_STO_CONTROL_DEFAULT); + + NVSWITCH_INIT_REGKEY(_PUBLIC, crc_bit_error_rate_short, + NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT, + NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_SHORT_OFF); + + NVSWITCH_INIT_REGKEY(_PUBLIC, crc_bit_error_rate_long, + NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG, + NV_SWITCH_REGKEY_CRC_BIT_ERROR_RATE_LONG_OFF); + + // + // Private internal use regkeys + // Not available on release build kernel drivers + // + NVSWITCH_INIT_REGKEY(_PRIVATE, external_fabric_mgmt, + NV_SWITCH_REGKEY_EXTERNAL_FABRIC_MGMT, + NV_SWITCH_REGKEY_EXTERNAL_FABRIC_MGMT_ENABLE); + + NVSWITCH_INIT_REGKEY(_PRIVATE, txtrain_control, + NV_SWITCH_REGKEY_TXTRAIN_CONTROL, + NV_SWITCH_REGKEY_TXTRAIN_CONTROL_NOP); + + NVSWITCH_INIT_REGKEY(_PRIVATE, crossbar_DBI, + NV_SWITCH_REGKEY_CROSSBAR_DBI, + NV_SWITCH_REGKEY_CROSSBAR_DBI_ENABLE); + + NVSWITCH_INIT_REGKEY(_PRIVATE, link_DBI, + NV_SWITCH_REGKEY_LINK_DBI, + NV_SWITCH_REGKEY_LINK_DBI_ENABLE); + + NVSWITCH_INIT_REGKEY(_PRIVATE, ac_coupled_mask, + NV_SWITCH_REGKEY_AC_COUPLED_MASK, + 0); + + NVSWITCH_INIT_REGKEY(_PRIVATE, ac_coupled_mask2, + NV_SWITCH_REGKEY_AC_COUPLED_MASK2, + 0); + + NVSWITCH_INIT_REGKEY(_PRIVATE, swap_clk, + NV_SWITCH_REGKEY_SWAP_CLK_OVERRIDE, + nvswitch_get_swap_clk_default(device)); + + NVSWITCH_INIT_REGKEY(_PRIVATE, link_enable_mask, + NV_SWITCH_REGKEY_ENABLE_LINK_MASK, + NV_U32_MAX); + + NVSWITCH_INIT_REGKEY(_PRIVATE, link_enable_mask2, + NV_SWITCH_REGKEY_ENABLE_LINK_MASK2, + NV_U32_MAX); + + NVSWITCH_INIT_REGKEY(_PRIVATE, bandwidth_shaper, + NV_SWITCH_REGKEY_BANDWIDTH_SHAPER, + NV_SWITCH_REGKEY_BANDWIDTH_SHAPER_PROD); + + NVSWITCH_INIT_REGKEY(_PRIVATE, ssg_control, + NV_SWITCH_REGKEY_SSG_CONTROL, + 0); + + NVSWITCH_INIT_REGKEY(_PRIVATE, skip_buffer_ready, + NV_SWITCH_REGKEY_SKIP_BUFFER_READY, + 0); + + NVSWITCH_INIT_REGKEY(_PRIVATE, enable_pm, + NV_SWITCH_REGKEY_ENABLE_PM, + NV_SWITCH_REGKEY_ENABLE_PM_YES); + + NVSWITCH_INIT_REGKEY(_PRIVATE, chiplib_forced_config_link_mask, + NV_SWITCH_REGKEY_CHIPLIB_FORCED_LINK_CONFIG_MASK, + 0); + + NVSWITCH_INIT_REGKEY(_PRIVATE, chiplib_forced_config_link_mask2, + NV_SWITCH_REGKEY_CHIPLIB_FORCED_LINK_CONFIG_MASK2, + 0); + + NVSWITCH_INIT_REGKEY(_PRIVATE, soe_dma_self_test, + NV_SWITCH_REGKEY_SOE_DMA_SELFTEST, + NV_SWITCH_REGKEY_SOE_DMA_SELFTEST_ENABLE); + + NVSWITCH_INIT_REGKEY(_PRIVATE, soe_disable, + NV_SWITCH_REGKEY_SOE_DISABLE, + NV_SWITCH_REGKEY_SOE_DISABLE_NO); + + NVSWITCH_INIT_REGKEY(_PUBLIC, soe_boot_core, + NV_SWITCH_REGKEY_SOE_BOOT_CORE, + NV_SWITCH_REGKEY_SOE_BOOT_CORE_DEFAULT); + NVSWITCH_INIT_REGKEY(_PRIVATE, latency_counter, + NV_SWITCH_REGKEY_LATENCY_COUNTER_LOGGING, + NV_SWITCH_REGKEY_LATENCY_COUNTER_LOGGING_ENABLE); + + NVSWITCH_INIT_REGKEY(_PRIVATE, nvlink_speed_control, + NV_SWITCH_REGKEY_SPEED_CONTROL, + NV_SWITCH_REGKEY_SPEED_CONTROL_SPEED_DEFAULT); + + NVSWITCH_INIT_REGKEY(_PRIVATE, inforom_bbx_periodic_flush, + NV_SWITCH_REGKEY_INFOROM_BBX_ENABLE_PERIODIC_FLUSHING, + NV_SWITCH_REGKEY_INFOROM_BBX_ENABLE_PERIODIC_FLUSHING_DISABLE); + + NVSWITCH_INIT_REGKEY(_PRIVATE, inforom_bbx_write_periodicity, + NV_SWITCH_REGKEY_INFOROM_BBX_WRITE_PERIODICITY, + NV_SWITCH_REGKEY_INFOROM_BBX_WRITE_PERIODICITY_DEFAULT); + + NVSWITCH_INIT_REGKEY(_PRIVATE, inforom_bbx_write_min_duration, + NV_SWITCH_REGKEY_INFOROM_BBX_WRITE_MIN_DURATION, + NV_SWITCH_REGKEY_INFOROM_BBX_WRITE_MIN_DURATION_DEFAULT); + + NVSWITCH_INIT_REGKEY(_PRIVATE, minion_disable, + NV_SWITCH_REGKEY_MINION_DISABLE, + NV_SWITCH_REGKEY_MINION_DISABLE_NO); + + NVSWITCH_INIT_REGKEY(_PRIVATE, set_ucode_target, + NV_SWITCH_REGKEY_MINION_SET_UCODE_TARGET, + NV_SWITCH_REGKEY_MINION_SET_UCODE_TARGET_DEFAULT); + + NVSWITCH_INIT_REGKEY(_PRIVATE, set_simmode, + NV_SWITCH_REGKEY_MINION_SET_SIMMODE, + NV_SWITCH_REGKEY_MINION_SET_SIMMODE_DEFAULT); + + NVSWITCH_INIT_REGKEY(_PRIVATE, set_smf_settings, + NV_SWITCH_REGKEY_MINION_SET_SMF_SETTINGS, + NV_SWITCH_REGKEY_MINION_SET_SMF_SETTINGS_DEFAULT); + + NVSWITCH_INIT_REGKEY(_PRIVATE, select_uphy_tables, + NV_SWITCH_REGKEY_MINION_SELECT_UPHY_TABLES, + NV_SWITCH_REGKEY_MINION_SELECT_UPHY_TABLES_DEFAULT); + + NVSWITCH_INIT_REGKEY(_PRIVATE, i2c_access_control, + NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL, + NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL_DEFAULT); + + NVSWITCH_INIT_REGKEY(_PRIVATE, link_recal_settings, + NV_SWITCH_REGKEY_LINK_RECAL_SETTINGS, + NV_SWITCH_REGKEY_LINK_RECAL_SETTINGS_NOP); +} +NvU64 +nvswitch_lib_deferred_task_dispatcher +( + nvswitch_device *device +) +{ + NvU64 time_nsec; + NvU64 time_next_nsec = nvswitch_os_get_platform_time() + 100*NVSWITCH_INTERVAL_1MSEC_IN_NS; + NVSWITCH_TASK_TYPE *task; + + if (!NVSWITCH_IS_DEVICE_VALID(device)) + { + return NV_U64_MAX; + } + + task = device->tasks; + + // Walk the task list, executing those whose next execution interval is at hand + while (task) + { + // Get current time (nsec) for scheduling + time_nsec = nvswitch_os_get_platform_time(); + + if (time_nsec >= task->last_run_nsec + task->period_nsec) + { + // + // The task has never been run or it is time to run + // Mark its last run time + // + task->last_run_nsec = time_nsec; + // Run the task + if (NVSWITCH_IS_DEVICE_INITIALIZED(device) || + (task->flags & NVSWITCH_TASK_TYPE_FLAGS_ALWAYS_RUN)) + (*task->task_fn)(device); + } + + // Determine its next run time + time_next_nsec = NV_MIN(task->last_run_nsec + task->period_nsec, time_next_nsec); + task = task->next; + } + + time_nsec = nvswitch_os_get_platform_time(); + + // Return to the OS layer how long to wait before calling again + return(time_next_nsec >= time_nsec ? time_next_nsec - time_nsec : 0); +} + +static NvlStatus +_nvswitch_setup_hal +( + nvswitch_device *device, + NvU32 pci_device_id +) +{ + if (nvswitch_is_lr10_device_id(pci_device_id)) + { + nvswitch_setup_hal_lr10(device); + return NVL_SUCCESS; + } + NVSWITCH_PRINT(device, ERROR, + "NVSwitch HAL setup failed - Unrecognized PCI Device ID\n"); + return -NVL_ERR_NOT_SUPPORTED; +} + +NvlStatus +nvswitch_lib_check_api_version +( + const char *user_version, + char *kernel_version, + NvU32 length +) +{ + const NvLength VERSION_LENGTH = nvswitch_os_strlen(NV_VERSION_STRING); + + if (kernel_version == NULL || user_version == NULL) + { + return -NVL_BAD_ARGS; + } + + if (length < VERSION_LENGTH) + { + return -NVL_NO_MEM; + } + + nvswitch_os_memset(kernel_version, 0x0, length); + nvswitch_os_strncpy(kernel_version, NV_VERSION_STRING, VERSION_LENGTH); + + kernel_version[length - 1] = '\0'; + + if (nvswitch_os_strncmp(user_version, kernel_version, VERSION_LENGTH)) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + return NVL_SUCCESS; +} + +NvBool +nvswitch_is_inforom_supported +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_is_inforom_supported(device); +} + +NvBool +nvswitch_is_spi_supported +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_is_spi_supported(device); +} + +NvBool +nvswitch_is_smbpbi_supported +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_is_smbpbi_supported(device); +} + +NvlStatus +nvswitch_soe_prepare_for_reset +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_soe_prepare_for_reset(device); +} + +NvBool +nvswitch_is_soe_supported +( + nvswitch_device *device +) +{ + if (device->regkeys.soe_disable == NV_SWITCH_REGKEY_SOE_DISABLE_YES) + { + NVSWITCH_PRINT(device, INFO, "SOE is disabled via regkey.\n"); + return NV_FALSE; + } + + return device->hal.nvswitch_is_soe_supported(device); +} + +NvlStatus +nvswitch_soe_set_ucode_core +( + nvswitch_device *device, + NvBool bFalcon +) +{ + return device->hal.nvswitch_soe_set_ucode_core(device, bFalcon); +} + +NvlStatus +nvswitch_init_soe +( + nvswitch_device *device +) +{ + if (device->regkeys.soe_disable == NV_SWITCH_REGKEY_SOE_DISABLE_YES) + { + NVSWITCH_PRINT(device, INFO, "SOE is disabled via regkey.\n"); + return NV_FALSE; + } + + return device->hal.nvswitch_init_soe(device); +} + +static NvlStatus +_nvswitch_construct_soe +( + nvswitch_device *device +) +{ + FLCNABLE *pSoe = NULL; + NvlStatus retval; + + device->pSoe = pSoe = (PFLCNABLE)soeAllocNew(); + if (pSoe == NULL) + { + NVSWITCH_PRINT(device, ERROR, "SOE allocation failed.\n"); + return -NVL_NO_MEM; + } + + retval = soeInit(device, (PSOE)pSoe, device->nvlink_device->pciInfo.pciDeviceId); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "SOE init failed.\n"); + goto soe_init_failed; + } + + if (flcnableConstruct_HAL(device, pSoe) != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "FALCON construct failed.\n"); + retval = -NVL_ERR_INVALID_STATE; + goto flcn_construct_failed; + } + + return NVL_SUCCESS; + +flcn_construct_failed: + soeDestroy(device, (PSOE)pSoe); + +soe_init_failed: + nvswitch_os_free(pSoe); + device->pSoe = NULL; + + return retval; +} + +static void +_nvswitch_destruct_soe +( + nvswitch_device *device +) +{ + FLCNABLE *pSoe = device->pSoe; + + if (pSoe == NULL) + { + return; + } + + flcnableDestruct_HAL(device, pSoe); + soeDestroy(device, (PSOE)pSoe); + + nvswitch_os_free(pSoe); + device->pSoe = NULL; +} + +static NvlStatus +_nvswitch_initialize_device_state +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_initialize_device_state(device); +} + +static NvlStatus +_nvswitch_post_init_device_setup +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_post_init_device_setup(device); +} + +static NvlStatus +_nvswitch_setup_link_system_registers +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_setup_link_system_registers(device); +} + +static void +_nvswitch_post_init_blacklist_device_setup +( + nvswitch_device *device +) +{ + device->hal.nvswitch_post_init_blacklist_device_setup(device); +} + +static void +_nvswitch_set_dma_mask +( + nvswitch_device *device +) +{ + NvU32 hw_dma_width, retval; + + hw_dma_width = device->hal.nvswitch_get_device_dma_width(device); + + if (hw_dma_width == 0) + { + NVSWITCH_PRINT(device, INFO, "DMA is not supported on this device\n"); + return; + } + + retval = nvswitch_os_set_dma_mask(device->os_handle, hw_dma_width); + if (retval == NVL_SUCCESS) + { + device->dma_addr_width = hw_dma_width; + return; + } + + NVSWITCH_PRINT(device, SETUP, + "%s: Failed to set DMA mask, trying 32-bit fallback : %d\n", + __FUNCTION__, retval); + + retval = nvswitch_os_set_dma_mask(device->os_handle, 32); + if (retval == NVL_SUCCESS) + { + device->dma_addr_width = 32; + return; + } + + // failure is not fatal, the driver will just restrict DMA functionality + NVSWITCH_PRINT(device, ERROR, "Failed to set DMA mask : %d\n", retval); +} + +NvlStatus +nvswitch_deassert_link_reset +( + nvswitch_device *device, + nvlink_link *link +) +{ + return device->hal.nvswitch_deassert_link_reset(device, link); +} + +NvU32 +nvswitch_get_sublink_width +( + nvswitch_device *device, + NvU32 linkNumber +) +{ + return device->hal.nvswitch_get_sublink_width(device, linkNumber); +} + +static void +_nvswitch_unregister_links +( + nvswitch_device *device +) +{ + nvlink_link *link = NULL; + NvU32 link_num; + NvBool is_blacklisted; + + + if (!NVSWITCH_IS_DEVICE_INITIALIZED(device)) + return; + + device->nvlink_device->initialized = 0; + is_blacklisted = (device->device_fabric_state == NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED); + + for (link_num = 0; link_num < nvswitch_get_num_links(device); link_num++) + { + if (nvlink_lib_get_link(device->nvlink_device, link_num, &link) == NVL_SUCCESS) + { + nvlink_lib_unregister_link(link); + nvswitch_destroy_link(link); + } + } + + if (!is_blacklisted) + nvswitch_inforom_nvlink_flush(device); +} + +NvlStatus NV_API_CALL +nvswitch_lib_read_fabric_state +( + nvswitch_device *device, + NVSWITCH_DEVICE_FABRIC_STATE *device_fabric_state, + NVSWITCH_DEVICE_BLACKLIST_REASON *device_blacklist_reason, + NVSWITCH_DRIVER_FABRIC_STATE *driver_fabric_state +) +{ + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + return -NVL_BAD_ARGS; + + if (device_fabric_state != NULL) + *device_fabric_state = device->device_fabric_state; + + if (device_blacklist_reason != NULL) + *device_blacklist_reason = device->device_blacklist_reason; + + if (driver_fabric_state != NULL) + *driver_fabric_state = device->driver_fabric_state; + + return NVL_SUCCESS; +} + +static NvlStatus +nvswitch_lib_blacklist_device +( + nvswitch_device *device, + NVSWITCH_DEVICE_BLACKLIST_REASON device_blacklist_reason +) +{ + NvlStatus status; + + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + if (device->device_fabric_state == NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED) + { + NVSWITCH_PRINT(device, WARN, "Device is already blacklisted\n"); + return -NVL_ERR_NOT_SUPPORTED; + } + + device->device_fabric_state = NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED; + device->device_blacklist_reason = device_blacklist_reason; + + status = device->hal.nvswitch_write_fabric_state(device); + if (status != NVL_SUCCESS) + NVSWITCH_PRINT(device, INFO, "Cannot send fabric state to SOE\n"); + + return NVL_SUCCESS; +} + +static NvlStatus +nvswitch_ctrl_blacklist_device( + nvswitch_device *device, + NVSWITCH_BLACKLIST_DEVICE_PARAMS *p +) +{ + NvlStatus status; + + status = nvswitch_lib_blacklist_device(device, p->deviceReason); + if (status != NVL_SUCCESS) + return status; + + nvswitch_lib_disable_interrupts(device); + + // Unregister links from NVLinkCoreLib, so that link training is not + // attempted + _nvswitch_unregister_links(device); + + // Keep device registered for HAL access and Fabric State updates + + return NVL_SUCCESS; +} + +static NvlStatus +nvswitch_ctrl_set_fm_driver_state( + nvswitch_device *device, + NVSWITCH_SET_FM_DRIVER_STATE_PARAMS *p +) +{ + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + device->driver_fabric_state = p->driverState; + device->fabric_state_timestamp = nvswitch_os_get_platform_time(); + + return NVL_SUCCESS; +} + +static NvlStatus +nvswitch_ctrl_set_device_fabric_state( + nvswitch_device *device, + NVSWITCH_SET_DEVICE_FABRIC_STATE_PARAMS *p +) +{ + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + if (device->device_fabric_state == NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED) + return -NVL_ERR_NOT_SUPPORTED; + + device->device_fabric_state = p->deviceState; + device->fabric_state_timestamp = nvswitch_os_get_platform_time(); + + // If FM had exceeded timeout, reset the status to not timed-out + if (device->driver_fabric_state == NVSWITCH_DRIVER_FABRIC_STATE_MANAGER_TIMEOUT) + device->driver_fabric_state = NVSWITCH_DRIVER_FABRIC_STATE_CONFIGURED; + + return NVL_SUCCESS; +} + +static NvlStatus +nvswitch_ctrl_set_fm_timeout( + nvswitch_device *device, + NVSWITCH_SET_FM_HEARTBEAT_TIMEOUT_PARAMS *p +) +{ + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + device->fm_timeout = p->fmTimeout; + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_ctrl_register_events( + nvswitch_device *device, + NVSWITCH_REGISTER_EVENTS_PARAMS *p, + void *osPrivate +) +{ + NvlStatus status = NVL_SUCCESS; + NvU32 i; + NvBool many_events, os_descriptor; + void *osDescriptor = osPrivate; + + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + status = nvswitch_os_get_supported_register_events_params(&many_events, + &os_descriptor); + if (status != NVL_SUCCESS) + { + return status; + } + + if ((!many_events && (p->numEvents > 1)) || + (p->numEvents == 0)) + { + return -NVL_BAD_ARGS; + } + + if (os_descriptor) + { + osDescriptor = (void *) p->osDescriptor; + } + + for (i = 0; i < p->numEvents; i++) + { + status = nvswitch_lib_add_client_event(device, osDescriptor, p->eventIds[i]); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to add client event.\n", __FUNCTION__); + return status; + } + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_ctrl_unregister_events( + nvswitch_device *device, + NVSWITCH_UNREGISTER_EVENTS_PARAMS *p, + void *osPrivate +) +{ + NvlStatus status = NVL_SUCCESS; + NvBool many_events, os_descriptor; + void *osDescriptor = osPrivate; + + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + status = nvswitch_os_get_supported_register_events_params(&many_events, + &os_descriptor); + if (status != NVL_SUCCESS) + { + return status; + } + + if (os_descriptor) + { + osDescriptor = (void *) p->osDescriptor; + } + + status = nvswitch_lib_remove_client_events(device, osDescriptor); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to remove client event.\n", __FUNCTION__); + return status; + } + + return NVL_SUCCESS; +} + +static void +nvswitch_fabric_state_heartbeat( + nvswitch_device *device +) +{ + NvU64 age; + + if (!NVSWITCH_IS_DEVICE_VALID(device)) + return; + + age = nvswitch_os_get_platform_time() - device->fabric_state_timestamp; + + // Check to see if we have exceeded the FM timeout + if (device->driver_fabric_state == NVSWITCH_DRIVER_FABRIC_STATE_CONFIGURED && + age > (NvU64)device->fm_timeout * 1000ULL * 1000ULL) + device->driver_fabric_state = NVSWITCH_DRIVER_FABRIC_STATE_MANAGER_TIMEOUT; + + (void)device->hal.nvswitch_write_fabric_state(device); +} + +static NvlStatus +_nvswitch_ctrl_set_training_error_info +( + nvswitch_device *device, + NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS *p +) +{ + return device->hal.nvswitch_set_training_error_info(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_fatal_error_scope +( + nvswitch_device *device, + NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS *pParams +) +{ + return device->hal.nvswitch_ctrl_get_fatal_error_scope(device, pParams); +} + +static NvlStatus +_nvswitch_ctrl_therm_get_temperature_limit +( + nvswitch_device *device, + NVSWITCH_CTRL_GET_TEMPERATURE_LIMIT_PARAMS *pParams +) +{ + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + return device->hal.nvswitch_ctrl_therm_get_temperature_limit(device, pParams); +} + +NvlStatus +nvswitch_lib_initialize_device +( + nvswitch_device *device +) +{ + NvlStatus retval = NVL_SUCCESS; + NvU8 link_num; + nvlink_link *link = NULL; + NvBool is_blacklisted_by_os = NV_FALSE; + + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + if (NVSWITCH_IS_DEVICE_INITIALIZED(device)) + { + NVSWITCH_PRINT(device, SETUP, "Device is already initialized!\n"); + return NVL_SUCCESS; + } + + NVSWITCH_PRINT(device, SETUP, + "Initializing nvswitch at (%04x:%02x:%02x.%02x)\n", + device->nvlink_device->pciInfo.domain, + device->nvlink_device->pciInfo.bus, + device->nvlink_device->pciInfo.device, + device->nvlink_device->pciInfo.function); + + nvListInit(&device->client_events_list); + + retval = nvswitch_lib_load_platform_info(device); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to load platform information\n"); + return retval; + } + + if (nvswitch_is_soe_supported(device)) + { + retval = _nvswitch_construct_soe(device); + if (retval != NVL_SUCCESS) + { + return retval; + } + } + else + { + NVSWITCH_PRINT(device, INFO, "SOE is not supported, skipping construct\n"); + } + + _nvswitch_set_dma_mask(device); + + retval = _nvswitch_initialize_device_state(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to initialize device state: %d!\n", + retval); + goto nvswitch_initialize_device_state_fail; + } + + device->hal.nvswitch_load_uuid(device); + + /* + * Check module parameters for blacklisted device + */ + if (nvswitch_os_is_uuid_in_blacklist(&device->uuid) == NV_TRUE) + { + NVSWITCH_PRINT(device, SETUP, + "Blacklisted nvswitch at (%04x:%02x:%02x.%02x)\n", + device->nvlink_device->pciInfo.domain, + device->nvlink_device->pciInfo.bus, + device->nvlink_device->pciInfo.device, + device->nvlink_device->pciInfo.function); + is_blacklisted_by_os = NV_TRUE; + // initialization continues until we have updated InfoROM... + } + + if (nvswitch_is_inforom_supported(device)) + { + retval = nvswitch_initialize_inforom(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to initialize InfoROM rc: %d\n", + retval); + goto nvswitch_initialize_device_state_fail; + } + + retval = nvswitch_initialize_inforom_objects(device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to initialize InfoROM objects! rc:%d\n", + retval); + goto nvswitch_initialize_inforom_fail; + } + } + else + { + NVSWITCH_PRINT(device, INFO, + "InfoROM is not supported, skipping init\n"); + } + + (void)device->hal.nvswitch_read_oob_blacklist_state(device); + (void)device->hal.nvswitch_write_fabric_state(device); + + nvswitch_task_create(device, &nvswitch_fabric_state_heartbeat, + NVSWITCH_HEARTBEAT_INTERVAL_NS, + NVSWITCH_TASK_TYPE_FLAGS_ALWAYS_RUN); + + if (device->device_blacklist_reason == NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_OUT_OF_BAND) + { + NVSWITCH_PRINT(device, SETUP, + "Blacklisted nvswitch at (%04x:%02x:%02x.%02x)\n", + device->nvlink_device->pciInfo.domain, + device->nvlink_device->pciInfo.bus, + device->nvlink_device->pciInfo.device, + device->nvlink_device->pciInfo.function); + return retval; + } + + if (is_blacklisted_by_os) + { + (void)nvswitch_lib_blacklist_device(device, NVSWITCH_DEVICE_BLACKLIST_REASON_MANUAL_IN_BAND); + return retval; + } + + for (link_num=0; link_num < nvswitch_get_num_links(device); link_num++) + { + if (!nvswitch_is_link_valid(device, link_num)) + { + continue; + } + + retval = nvswitch_create_link(device, link_num, &link); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to create link %d : %d!\n", + link_num, + retval); + goto nvswitch_link_fail; + } + + retval = nvlink_lib_register_link(device->nvlink_device, link); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to register link %d with the nvlink core : %d!\n", + link_num, + retval); + + // Free the single dangling link. + nvswitch_destroy_link(link); + + goto nvswitch_link_fail; + } + + nvswitch_reset_persistent_link_hw_state(device, link_num); + } + + retval = nvswitch_set_training_mode(device); + + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to determine link training mode! rc: %d\n", retval); + goto nvswitch_link_fail; + } + // Initialize select scratch registers to 0x0 + device->hal.nvswitch_init_scratch(device); + + retval = nvswitch_construct_error_log(&device->log_FATAL_ERRORS, 1024, NV_FALSE); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to construct log_FATAL_ERRORS! rc: %d\n", retval); + goto nvswitch_construct_error_log_fail; + } + + retval = nvswitch_construct_error_log(&device->log_NONFATAL_ERRORS, 1024, NV_TRUE); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to construct log_NONFATAL_ERRORS! rc: %d\n", retval); + goto nvswitch_construct_error_log_fail; + } + + if (device->regkeys.latency_counter == NV_SWITCH_REGKEY_LATENCY_COUNTER_LOGGING_ENABLE) + { + nvswitch_task_create(device, &nvswitch_internal_latency_bin_log, + nvswitch_get_latency_sample_interval_msec(device) * NVSWITCH_INTERVAL_1MSEC_IN_NS * 9/10, 0); + } + + nvswitch_task_create(device, &nvswitch_ecc_writeback_task, + (60 * NVSWITCH_INTERVAL_1SEC_IN_NS), 0); + + if (IS_RTLSIM(device) || IS_EMULATION(device) || IS_FMODEL(device)) + { + NVSWITCH_PRINT(device, WARN, + "%s: Skipping setup of NvSwitch thermal alert monitoring\n", + __FUNCTION__); + } + else + { + nvswitch_task_create(device, &nvswitch_monitor_thermal_alert, + 100*NVSWITCH_INTERVAL_1MSEC_IN_NS, 0); + } + + device->nvlink_device->initialized = 1; + + return NVL_SUCCESS; + +nvswitch_construct_error_log_fail: + //free allocated memory to avoid leaking + nvswitch_destroy_error_log(device, &device->log_FATAL_ERRORS); + nvswitch_destroy_error_log(device, &device->log_NONFATAL_ERRORS); + +nvswitch_link_fail: + // Track down all links that successfully registered. + for (link_num = 0; link_num < nvswitch_get_num_links(device); link_num++) + { + if (nvlink_lib_get_link(device->nvlink_device, link_num, &link) == NVL_SUCCESS) + { + nvlink_lib_unregister_link(link); + nvswitch_destroy_link(link); + } + } + + nvswitch_destroy_inforom_objects(device); + +nvswitch_initialize_inforom_fail: + nvswitch_destroy_inforom(device); + +nvswitch_initialize_device_state_fail: + _nvswitch_destruct_soe(device); + + return retval; +} + +NvBool +nvswitch_lib_validate_device_id +( + NvU32 device_id +) +{ + if (nvswitch_is_lr10_device_id(device_id)) + { + return NV_TRUE; + } + return NV_FALSE; +} + +NvlStatus +nvswitch_lib_post_init_device +( + nvswitch_device *device +) +{ + NvlStatus retval; + + if (!NVSWITCH_IS_DEVICE_INITIALIZED(device)) + { + return -NVL_ERR_INVALID_STATE; + } + + retval = _nvswitch_post_init_device_setup(device); + if (retval != NVL_SUCCESS) + { + return retval; + } + + if (nvswitch_is_spi_supported(device)) + { + retval = nvswitch_bios_get_image(device); + if (retval != NVL_SUCCESS) + { + return retval; + } + + retval = nvswitch_parse_bios_image(device); + if (retval != NVL_SUCCESS) + { + return retval; + } + } + else + { + NVSWITCH_PRINT(device, ERROR, + "%s: Skipping BIOS parsing since SPI is unsupported.\n", + __FUNCTION__); + } + + retval = _nvswitch_setup_link_system_registers(device); + if (retval != NVL_SUCCESS) + { + return retval; + } + + nvswitch_smbpbi_post_init(device); + + return NVL_SUCCESS; +} + +void +nvswitch_lib_post_init_blacklist_device +( + nvswitch_device *device +) +{ + _nvswitch_post_init_blacklist_device_setup(device); +} + +/*! + * @brief: Gets the client event associated with the file descriptor + * if it already exists in the Device's client event list. + */ +NvlStatus +nvswitch_lib_get_client_event +( + nvswitch_device *device, + void *osPrivate, + NVSWITCH_CLIENT_EVENT **ppClientEvent +) +{ + NVSWITCH_CLIENT_EVENT *curr = NULL; + + *ppClientEvent = NULL; + + if(!NVSWITCH_IS_DEVICE_VALID(device)) + { + return -NVL_BAD_ARGS; + } + + nvListForEachEntry(curr, &device->client_events_list, entry) + { + if (curr->private_driver_data == osPrivate) + { + *ppClientEvent = curr; + return NVL_SUCCESS; + } + } + + return -NVL_NOT_FOUND; +} + +/*! + * @brief: Adds an event to the front of the + * Device's client event list. + */ +NvlStatus +nvswitch_lib_add_client_event +( + nvswitch_device *device, + void *osPrivate, + NvU32 eventId +) +{ + NVSWITCH_CLIENT_EVENT *newEvent; + NvlStatus status = NVL_SUCCESS; + + if (!NVSWITCH_IS_DEVICE_VALID(device)) + { + return -NVL_BAD_ARGS; + } + + if (eventId >= NVSWITCH_DEVICE_EVENT_COUNT) + { + NVSWITCH_PRINT(device, ERROR, "%s: Invalid event Id.\n", __FUNCTION__); + return -NVL_BAD_ARGS; + } + + // Invoke OS specific API to add event. + status = nvswitch_os_add_client_event(device->os_handle, + osPrivate, + eventId); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to add client event.\n", __FUNCTION__); + return status; + } + + newEvent = nvswitch_os_malloc(sizeof(*newEvent)); + if (newEvent == NULL) + { + return -NVL_NO_MEM; + } + + newEvent->eventId = eventId; + newEvent->private_driver_data = osPrivate; + + nvListAdd(&newEvent->entry, &device->client_events_list); + + return NVL_SUCCESS; +} + +/*! + * @brief: Removes all events corresponding to osPrivate, + * from the Device's client event list. + */ +NvlStatus +nvswitch_lib_remove_client_events +( + nvswitch_device *device, + void *osPrivate +) +{ + NVSWITCH_CLIENT_EVENT *curr = NULL; + NVSWITCH_CLIENT_EVENT *next = NULL; + NvlStatus status = NVL_SUCCESS; + + // + // Device shutdown may happen before this is called, so return + // if device is gone + // + if (!NVSWITCH_IS_DEVICE_VALID(device)) + { + return NVL_SUCCESS; + } + + nvListForEachEntry_safe(curr, next, &device->client_events_list, entry) + { + if (curr->private_driver_data == osPrivate) + { + nvListDel(&curr->entry); + nvswitch_os_free(curr); + } + } + + // Invoke OS specific API to remove event. + status = nvswitch_os_remove_client_event(device->os_handle, + osPrivate); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "%s: Failed to remove client events.\n", __FUNCTION__); + return status; + } + + return NVL_SUCCESS; +} + +/*! + * @brief: Notifies all events with matching event id in the + * Device's client event list. + */ +NvlStatus +nvswitch_lib_notify_client_events +( + nvswitch_device *device, + NvU32 eventId +) +{ + NvlStatus status; + NVSWITCH_CLIENT_EVENT *curr = NULL; + + if (!NVSWITCH_IS_DEVICE_VALID(device)) + { + return -NVL_BAD_ARGS; + } + + if (eventId >= NVSWITCH_DEVICE_EVENT_COUNT) + { + NVSWITCH_PRINT(device, ERROR, "%s: Invalid event Id.\n", __FUNCTION__); + return -NVL_BAD_ARGS; + } + + nvListForEachEntry(curr, &device->client_events_list, entry) + { + if (curr->eventId == eventId) + { + // OS specific event notification. + status = nvswitch_os_notify_client_event(device->os_handle, + curr->private_driver_data, + eventId); + if (status != NVL_SUCCESS) + { + return status; + } + } + } + + return NVL_SUCCESS; +} + +/*! + @brief: Release ROM image from memory. +*/ +void +_nvswitch_destroy_rom(nvswitch_device *device) +{ + if (device->biosImage.pImage != NULL) + { + nvswitch_os_free(device->biosImage.pImage); + device->biosImage.pImage = NULL; + } +} + +/*! + * @brief: Free the device's client event list + */ +static void +_nvswitch_destroy_event_list(nvswitch_device *device) +{ + NVSWITCH_CLIENT_EVENT *curr = NULL; + NVSWITCH_CLIENT_EVENT *next = NULL; + + nvListForEachEntry_safe(curr, next, &device->client_events_list, entry) + { + nvListDel(&curr->entry); + nvswitch_os_free(curr); + } +} + +NvlStatus +nvswitch_lib_shutdown_device +( + nvswitch_device *device +) +{ + + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + // + // Set fabric state to offline + // + if (device->device_fabric_state != NVSWITCH_DEVICE_FABRIC_STATE_BLACKLISTED) + device->device_fabric_state = NVSWITCH_DEVICE_FABRIC_STATE_OFFLINE; + device->driver_fabric_state = NVSWITCH_DRIVER_FABRIC_STATE_OFFLINE; + (void)device->hal.nvswitch_write_fabric_state(device); + + nvswitch_hw_counter_shutdown(device); + + _nvswitch_unregister_links(device); + + nvswitch_destroy_error_log(device, &device->log_FATAL_ERRORS); + nvswitch_destroy_error_log(device, &device->log_NONFATAL_ERRORS); + + nvswitch_smbpbi_unload(device); + _nvswitch_destroy_event_list(device); + + nvswitch_destroy_inforom_objects(device); + nvswitch_destroy_inforom(device); + + nvswitch_smbpbi_destroy(device); + + nvswitch_destroy_device_state(device); + + _nvswitch_destroy_rom(device); + + _nvswitch_destruct_soe(device); + + nvswitch_tasks_destroy(device); + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_lib_get_log_count +( + nvswitch_device *device, + NvU32 *fatal, NvU32 *nonfatal +) +{ + if (!NVSWITCH_IS_DEVICE_INITIALIZED(device) || + fatal == NULL || nonfatal == NULL) + { + return -NVL_BAD_ARGS; + } + + *fatal = device->log_FATAL_ERRORS.error_count; + *nonfatal = device->log_NONFATAL_ERRORS.error_count; + // No report of log_INFO currently + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_lib_load_platform_info +( + nvswitch_device *device +) +{ + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + device->hal.nvswitch_determine_platform(device); + + return NVL_SUCCESS; +} + +void +nvswitch_lib_get_device_info +( + nvswitch_device *device, + struct nvlink_pci_info **pciInfo +) +{ + if (!NVSWITCH_IS_DEVICE_VALID(device) || pciInfo == NULL) + { + NVSWITCH_ASSERT(0); + return; + } + + *pciInfo = &device->nvlink_device->pciInfo; +} + +NvlStatus +nvswitch_lib_get_bios_version +( + nvswitch_device *device, + NvU64 *version +) +{ + NVSWITCH_GET_BIOS_INFO_PARAMS p = { 0 }; + NvlStatus ret; + + if (!device) + return -NVL_BAD_ARGS; + + ret = device->hal.nvswitch_ctrl_get_bios_info(device, &p); + *version = p.version; + + return ret; +} + +NvlStatus +nvswitch_lib_use_pin_irq +( + nvswitch_device *device +) +{ + return IS_FMODEL(device); +} + + +NvlStatus +nvswitch_lib_register_device +( + NvU16 pci_domain, + NvU8 pci_bus, + NvU8 pci_device, + NvU8 pci_func, + NvU16 pci_device_id, + void *os_handle, + NvU32 os_instance, + nvswitch_device **return_device +) +{ + nvswitch_device *device = NULL; + nvlink_device *coreDev = NULL; + NvlStatus retval = NVL_SUCCESS; + + if (!nvlink_lib_is_initialized()) + { + NVSWITCH_PRINT(device, ERROR, + "NVLink core lib isn't initialized yet!\n"); + return -NVL_INITIALIZATION_TOTAL_FAILURE; + } + + if (return_device == NULL || os_handle == NULL) + { + return -NVL_BAD_ARGS; + } + + *return_device = NULL; + + device = nvswitch_os_malloc(sizeof(*device)); + if (NULL == device) + { + NVSWITCH_PRINT(device, ERROR, + "nvswitch_os_malloc during device creation failed!\n"); + return -NVL_NO_MEM; + } + nvswitch_os_memset(device, 0, sizeof(*device)); + + nvswitch_os_snprintf(device->name, sizeof(device->name), + NVSWITCH_DEVICE_NAME "%d", os_instance); + + coreDev = nvswitch_os_malloc(sizeof(*coreDev)); + if (NULL == coreDev) + { + NVSWITCH_PRINT(device, ERROR, + "nvswitch_os_malloc during device creation failed!\n"); + + retval = -NVL_NO_MEM; + goto nvlink_lib_register_device_fail; + } + nvswitch_os_memset(coreDev, 0, sizeof(*coreDev)); + + coreDev->driverName = + nvswitch_os_malloc(sizeof(NVSWITCH_DRIVER_NAME)); + if (coreDev->driverName == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "nvswitch_os_malloc during device creation failed!\n"); + + retval = -NVL_NO_MEM; + goto nvlink_lib_register_device_fail; + } + nvswitch_os_memcpy(coreDev->driverName, NVSWITCH_DRIVER_NAME, + sizeof(NVSWITCH_DRIVER_NAME)); + + device->os_handle = os_handle; + device->os_instance = os_instance; + + device->nvlink_device = coreDev; + device->nvlink_device->deviceName = device->name; + device->nvlink_device->uuid = NULL; // No UUID support for switch + + device->nvlink_device->pciInfo.domain = pci_domain; + device->nvlink_device->pciInfo.bus = pci_bus; + device->nvlink_device->pciInfo.device = pci_device; + device->nvlink_device->pciInfo.function = pci_func; + device->nvlink_device->pciInfo.pciDeviceId = pci_device_id; + + // nvlink_device has a back pointer to nvswitch_device + device->nvlink_device->pDevInfo = device; + device->nvlink_device->type = NVLINK_DEVICE_TYPE_NVSWITCH; + + // + // Initialize the Fabric State + // + device->fm_timeout = NVSWITCH_DEFAULT_FM_HEARTBEAT_TIMEOUT_MSEC; + device->fabric_state_sequence_number = 0; + device->driver_fabric_state = NVSWITCH_DRIVER_FABRIC_STATE_STANDBY; + device->device_fabric_state = NVSWITCH_DEVICE_FABRIC_STATE_STANDBY; + device->device_blacklist_reason = NVSWITCH_DEVICE_BLACKLIST_REASON_NONE; + + // + // Initialize HAL connectivity as early as possible so that other lib + // interfaces can work. + // + retval = _nvswitch_setup_hal(device, device->nvlink_device->pciInfo.pciDeviceId); + if (retval != NVL_SUCCESS) + { + goto nvlink_lib_register_device_fail; + } + + // + // Initialize regkeys as early as possible so that most routines can take + // advantage of them. + // + _nvswitch_init_device_regkeys(device); + + retval = nvlink_lib_register_device(device->nvlink_device); + if (NVL_SUCCESS != retval) + { + NVSWITCH_PRINT(device, ERROR, + "nvlinklib register device failed!\n"); + goto nvlink_lib_register_device_fail; + } + + *return_device = device; + + NVSWITCH_PRINT(device, SETUP, + "Successfully registered with nvlinkcore\n"); + + return retval; + +nvlink_lib_register_device_fail: + + if (NULL != coreDev) + { + nvswitch_os_free(coreDev->driverName); + nvswitch_os_free(coreDev); + } + + if (NULL != device) + nvswitch_os_free(device); + + return retval; +} + +void +nvswitch_lib_unregister_device +( + nvswitch_device *device +) +{ + if (!NVSWITCH_IS_DEVICE_VALID(device)) + { + NVSWITCH_ASSERT(0); + return; + } + + nvlink_lib_unregister_device(device->nvlink_device); + + nvswitch_os_free(device->nvlink_device->driverName); + nvswitch_os_free(device->nvlink_device); + nvswitch_os_free(device); + + return; +} + +/*! + * @brief: Gets the mask of valid I2C ports on the + * Device. + */ +NvlStatus +nvswitch_lib_get_valid_ports_mask +( + nvswitch_device *device, + NvU32 *validPortsMask +) +{ + NvU32 port_info; + NvU32 i; + NvU32 ports_mask = 0; + NvBool is_i2c_access_allowed; + NvBool is_port_allowed; + + if (!NVSWITCH_IS_DEVICE_VALID(device) || + (validPortsMask == NULL)) + { + return -NVL_BAD_ARGS; + } + + is_i2c_access_allowed = (device->regkeys.i2c_access_control == + NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL_ENABLE) ? + NV_TRUE : NV_FALSE; + + for (i = 0; i < NVSWITCH_MAX_I2C_PORTS; i++) + { + port_info = nvswitch_i2c_get_port_info(device, i); + + is_port_allowed = is_i2c_access_allowed ? NV_TRUE : + FLD_TEST_DRF(_I2C, _PORTINFO, _ACCESS_ALLOWED, _TRUE, + port_info); + + if (is_port_allowed && + FLD_TEST_DRF(_I2C, _PORTINFO, _DEFINED, _PRESENT, port_info)) + { + ports_mask |= NVBIT(i); + } + } + + *validPortsMask = ports_mask; + return NVL_SUCCESS; +} + +/*! + * @brief: Returns if the I2C transactions are supported. + */ +NvBool +nvswitch_lib_is_i2c_supported +( + nvswitch_device *device +) +{ + if (!NVSWITCH_IS_DEVICE_VALID(device)) + { + NVSWITCH_ASSERT(0); + return NV_FALSE; + } + + return nvswitch_is_i2c_supported(device); +} + +static NvlStatus +_nvswitch_perform_i2c_transfer +( + nvswitch_device *device, + NvU32 client, + NvU8 type, + NvU16 addr, + NvU8 port, + NvU8 cmd, + NvU32 msgLength, + NvU8 *pData +) +{ + NvlStatus status; + NvU16 deviceAddr; + NvU32 speedMode; + NvBool bIsRead = NV_FALSE; + NvU32 flags = 0; + NVSWITCH_CTRL_I2C_INDEXED_PARAMS i2c_params = {0}; + NvBool is_i2c_access_allowed; + + if (!nvswitch_os_is_admin()) + { + return -NVL_ERR_INSUFFICIENT_PERMISSIONS; + } + + is_i2c_access_allowed = (device->regkeys.i2c_access_control == + NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL_ENABLE) ? + NV_TRUE : NV_FALSE; + + // + // The address needs to be shifted by 1, + // See NVSWITCH_CTRL_I2C_INDEXED_PARAMS + // + deviceAddr = addr << 1; + speedMode = device->pI2c->Ports[port].defaultSpeedMode; + flags = DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _START, _SEND) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _STOP, _SEND) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _ADDRESS_MODE, _7BIT) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _FLAVOR, _HW) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _BLOCK_PROTOCOL, _DISABLED) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _TRANSACTION_MODE, _NORMAL); + + switch (speedMode) + { + case NVSWITCH_I2C_SPEED_MODE_1000KHZ: + { + flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _SPEED_MODE, _1000KHZ, flags); + break; + } + case NVSWITCH_I2C_SPEED_MODE_400KHZ: + { + flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _SPEED_MODE, _400KHZ, flags); + break; + } + case NVSWITCH_I2C_SPEED_MODE_300KHZ: + { + flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _SPEED_MODE, _300KHZ, flags); + break; + } + case NVSWITCH_I2C_SPEED_MODE_200KHZ: + { + flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _SPEED_MODE, _200KHZ, flags); + break; + } + case NVSWITCH_I2C_SPEED_MODE_100KHZ: + { + flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _SPEED_MODE, _100KHZ, flags); + break; + } + default: + { + NVSWITCH_PRINT(device, ERROR, "Invalid I2C speed!\n"); + status = -NVL_BAD_ARGS; + goto end; + } + } + + switch (type) + { + case NVSWITCH_I2C_CMD_READ: + bIsRead = NV_TRUE; + // Fall through + case NVSWITCH_I2C_CMD_WRITE: + { + flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _INDEX_LENGTH, _ZERO, flags); + break; + } + case NVSWITCH_I2C_CMD_SMBUS_READ: + { + bIsRead = NV_TRUE; + flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _RESTART, _SEND, flags); + // Fall through + } + case NVSWITCH_I2C_CMD_SMBUS_WRITE: + { + flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _INDEX_LENGTH, _ONE, flags); + break; + } + case NVSWITCH_I2C_CMD_SMBUS_QUICK_READ: + bIsRead = NV_TRUE; + // Fall through + case NVSWITCH_I2C_CMD_SMBUS_QUICK_WRITE: + { + flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _INDEX_LENGTH, _ZERO, flags); + msgLength = 0; + break; + } + default: + { + NVSWITCH_PRINT(device, ERROR, "Invalid SMBUS protocol! Protocol not supported.\n"); + status = -NVL_BAD_ARGS; + goto end; + } + } + + if (!is_i2c_access_allowed && + !nvswitch_i2c_is_device_access_allowed(device, port, deviceAddr, bIsRead)) + { + return -NVL_BAD_ARGS; + } + + if (msgLength > NVSWITCH_CTRL_I2C_MESSAGE_LENGTH_MAX) + { + NVSWITCH_PRINT(device, ERROR, + "Length of buffer (0x%x bytes) provided larger than max (0x%x bytes)\n", + msgLength, NVSWITCH_CTRL_I2C_MESSAGE_LENGTH_MAX); + status = -NVL_BAD_ARGS; + goto end; + } + + if (bIsRead) + { + i2c_params.bIsRead = NV_TRUE; + } + else + { + flags = FLD_SET_DRF(SWITCH_CTRL, _I2C_FLAGS, _RESTART, _NONE, flags); + nvswitch_os_memcpy(i2c_params.message, pData, msgLength); + } + + if (FLD_TEST_DRF(SWITCH_CTRL, _I2C_FLAGS, _INDEX_LENGTH, _ONE, flags)) + { + i2c_params.index[0] = cmd; + } + + i2c_params.port = port; + i2c_params.address = deviceAddr; + i2c_params.acquirer = client; + i2c_params.flags = flags; + i2c_params.messageLength = msgLength; + + status = nvswitch_ctrl_i2c_indexed(device, &i2c_params); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "I2C transfer Failed!\n"); + goto end; + } + + if (bIsRead) + { + nvswitch_os_memcpy(pData, i2c_params.message, msgLength); + } + +end: + return status; +} + +/*! + * @brief: Performs an I2C transaction. + */ +NvlStatus +nvswitch_lib_i2c_transfer +( + nvswitch_device *device, + NvU32 port, + NvU8 type, + NvU8 addr, + NvU8 command, + NvU32 len, + NvU8 *pData +) +{ + NvlStatus status; + NvU32 port_info; + NvBool is_i2c_access_allowed; + NvBool is_port_allowed; + + if (!NVSWITCH_IS_DEVICE_VALID(device)) + { + NVSWITCH_ASSERT(0); + return -NVL_ERR_INVALID_STATE; + } + + port_info = nvswitch_i2c_get_port_info(device, port); + + is_i2c_access_allowed = (device->regkeys.i2c_access_control == + NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL_ENABLE) ? + NV_TRUE : NV_FALSE; + is_port_allowed = is_i2c_access_allowed ? NV_TRUE : + FLD_TEST_DRF(_I2C, _PORTINFO, _ACCESS_ALLOWED, _TRUE, + port_info); + + if (!is_port_allowed || + !FLD_TEST_DRF(_I2C, _PORTINFO, _DEFINED, _PRESENT, port_info)) + { + NVSWITCH_PRINT(device, INFO, + "%s: Invalid port access %d.\n", + __FUNCTION__, port); + return (-NVL_BAD_ARGS); + } + + status = _nvswitch_perform_i2c_transfer(device, NVSWITCH_I2C_ACQUIRER_EXTERNAL, + type, (NvU16)addr, port, command, len, pData); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "I2C transaction failed!\n"); + return status; + } + + return NVL_SUCCESS; +} + +void +nvswitch_timeout_create +( + NvU64 timeout_ns, + NVSWITCH_TIMEOUT *time +) +{ + NvU64 time_current; + + time_current = nvswitch_os_get_platform_time(); + time->timeout_ns = time_current + timeout_ns; +} + +NvBool +nvswitch_timeout_check +( + NVSWITCH_TIMEOUT *time +) +{ + NvU64 time_current; + + time_current = nvswitch_os_get_platform_time(); + return (time->timeout_ns <= time_current); +} + +void +nvswitch_task_create +( + nvswitch_device *device, + void (*task_fn)(nvswitch_device *device), + NvU64 period_nsec, + NvU32 flags +) +{ + NVSWITCH_TASK_TYPE *task = nvswitch_os_malloc(sizeof(*task)); + + if (task == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Unable to allocate task.\n", + __FUNCTION__); + } + else + { + task->task_fn = task_fn; + task->period_nsec = period_nsec; + task->last_run_nsec = 0; + task->flags = flags; + task->next = device->tasks; + device->tasks = task; + } +} + +void +nvswitch_tasks_destroy +( + nvswitch_device *device +) +{ + NVSWITCH_TASK_TYPE *task = device->tasks; + NVSWITCH_TASK_TYPE *next_task; + + device->tasks = NULL; + + while (task) + { + next_task = task->next; + nvswitch_os_free(task); + task = next_task; + } +} + +void +nvswitch_destroy_device_state +( + nvswitch_device *device +) +{ + device->hal.nvswitch_destroy_device_state(device); +} + +static NvlStatus +_nvswitch_ctrl_get_info +( + nvswitch_device *device, + NVSWITCH_GET_INFO *p +) +{ + return device->hal.nvswitch_ctrl_get_info(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_nvlink_status +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_STATUS_PARAMS *ret +) +{ + return device->hal.nvswitch_ctrl_get_nvlink_status(device, ret); +} + +static NvlStatus +_nvswitch_ctrl_get_counters +( + nvswitch_device *device, + NVSWITCH_NVLINK_GET_COUNTERS_PARAMS *ret +) +{ + return device->hal.nvswitch_ctrl_get_counters(device, ret); +} + +NvlStatus +nvswitch_set_nport_port_config +( + nvswitch_device *device, + NVSWITCH_SET_SWITCH_PORT_CONFIG *p +) +{ + return device->hal.nvswitch_set_nport_port_config(device, p); +} + +static NvlStatus +_nvswitch_ctrl_set_switch_port_config +( + nvswitch_device *device, + NVSWITCH_SET_SWITCH_PORT_CONFIG *p +) +{ + return device->hal.nvswitch_ctrl_set_switch_port_config(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_ingress_request_table +( + nvswitch_device *device, + NVSWITCH_GET_INGRESS_REQUEST_TABLE_PARAMS *params +) +{ + return device->hal.nvswitch_ctrl_get_ingress_request_table(device, params); +} + +static NvlStatus +_nvswitch_ctrl_set_ingress_request_table +( + nvswitch_device *device, + NVSWITCH_SET_INGRESS_REQUEST_TABLE *p +) +{ + return device->hal.nvswitch_ctrl_set_ingress_request_table(device, p); +} + +static NvlStatus +_nvswitch_ctrl_set_ingress_request_valid +( + nvswitch_device *device, + NVSWITCH_SET_INGRESS_REQUEST_VALID *p +) +{ + return device->hal.nvswitch_ctrl_set_ingress_request_valid(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_ingress_response_table +( + nvswitch_device *device, + NVSWITCH_GET_INGRESS_RESPONSE_TABLE_PARAMS *params +) +{ + return device->hal.nvswitch_ctrl_get_ingress_response_table(device, params); +} + +static NvlStatus +_nvswitch_ctrl_set_ingress_response_table +( + nvswitch_device *device, + NVSWITCH_SET_INGRESS_RESPONSE_TABLE *p +) +{ + return device->hal.nvswitch_ctrl_set_ingress_response_table(device, p); +} + +static NvlStatus +_nvswitch_ctrl_set_ganged_link_table +( + nvswitch_device *device, + NVSWITCH_SET_GANGED_LINK_TABLE *p +) +{ + return device->hal.nvswitch_ctrl_set_ganged_link_table(device, p); +} + +static NvlStatus +_nvswitch_ctrl_set_remap_policy +( + nvswitch_device *device, + NVSWITCH_SET_REMAP_POLICY *p +) +{ + return device->hal.nvswitch_ctrl_set_remap_policy(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_remap_policy +( + nvswitch_device *device, + NVSWITCH_GET_REMAP_POLICY_PARAMS *params +) +{ + return device->hal.nvswitch_ctrl_get_remap_policy(device, params); +} + +static NvlStatus +_nvswitch_ctrl_set_remap_policy_valid +( + nvswitch_device *device, + NVSWITCH_SET_REMAP_POLICY_VALID *p +) +{ + return device->hal.nvswitch_ctrl_set_remap_policy_valid(device, p); +} + +static NvlStatus +_nvswitch_ctrl_set_routing_id +( + nvswitch_device *device, + NVSWITCH_SET_ROUTING_ID *p +) +{ + return device->hal.nvswitch_ctrl_set_routing_id(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_routing_id +( + nvswitch_device *device, + NVSWITCH_GET_ROUTING_ID_PARAMS *params +) +{ + return device->hal.nvswitch_ctrl_get_routing_id(device, params); +} + +static NvlStatus +_nvswitch_ctrl_set_routing_id_valid +( + nvswitch_device *device, + NVSWITCH_SET_ROUTING_ID_VALID *p +) +{ + return device->hal.nvswitch_ctrl_set_routing_id_valid(device, p); +} + +static NvlStatus +_nvswitch_ctrl_set_routing_lan +( + nvswitch_device *device, + NVSWITCH_SET_ROUTING_LAN *p +) +{ + return device->hal.nvswitch_ctrl_set_routing_lan(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_routing_lan +( + nvswitch_device *device, + NVSWITCH_GET_ROUTING_LAN_PARAMS *params +) +{ + return device->hal.nvswitch_ctrl_get_routing_lan(device, params); +} + +static NvlStatus +_nvswitch_ctrl_set_routing_lan_valid +( + nvswitch_device *device, + NVSWITCH_SET_ROUTING_LAN_VALID *p +) +{ + return device->hal.nvswitch_ctrl_set_routing_lan_valid(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_internal_latency +( + nvswitch_device *device, + NVSWITCH_GET_INTERNAL_LATENCY *p +) +{ + return device->hal.nvswitch_ctrl_get_internal_latency(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_nvlipt_counters +( + nvswitch_device *device, + NVSWITCH_GET_NVLIPT_COUNTERS *p +) +{ + // + // This control call is now deprecated. + // New control call to fetch throughput counters is: + // _nvswitch_ctrl_get_throughput_counters + // + return -NVL_ERR_NOT_SUPPORTED; +} + +static NvlStatus +_nvswitch_ctrl_set_nvlipt_counter_config +( + nvswitch_device *device, + NVSWITCH_SET_NVLIPT_COUNTER_CONFIG *p +) +{ + // + // This control call is now deprecated. + // New control call to fetch throughput counters is: + // _nvswitch_ctrl_get_throughput_counters_lr10 + // + // Setting counter config is not allowed on these + // non-configurable counters. These counters are + // expected to be used by monitoring clients. + // + return -NVL_ERR_NOT_SUPPORTED; +} + +static NvlStatus +_nvswitch_ctrl_get_nvlipt_counter_config +( + nvswitch_device *device, + NVSWITCH_GET_NVLIPT_COUNTER_CONFIG *p +) +{ + // + // This control call is now deprecated. + // New control call to fetch throughput counters is: + // _nvswitch_ctrl_get_throughput_counters_lr10 + // + // Getting counter config is useful if counters are + // configurable. These counters are not configurable + // and are expected to be used by monitoring clients. + // + return -NVL_ERR_NOT_SUPPORTED; +} + +NvU32 +nvswitch_i2c_get_port_info +( + nvswitch_device *device, + NvU32 port +) +{ + return device->hal.nvswitch_i2c_get_port_info(device, port); +} + +NvlStatus +nvswitch_ctrl_i2c_indexed +( + nvswitch_device *device, + NVSWITCH_CTRL_I2C_INDEXED_PARAMS *pParams +) +{ + return device->hal.nvswitch_ctrl_i2c_indexed(device, pParams); +} + +static NvlStatus +_nvswitch_ctrl_therm_read_temperature +( + nvswitch_device *device, + NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS *info +) +{ + return device->hal.nvswitch_ctrl_therm_read_temperature(device, info); +} + +static NvlStatus +_nvswitch_ctrl_get_bios_info +( + nvswitch_device *device, + NVSWITCH_GET_BIOS_INFO_PARAMS *p +) +{ + return device->hal.nvswitch_ctrl_get_bios_info(device, p); +} + +NvlStatus +nvswitch_ctrl_set_latency_bins +( + nvswitch_device *device, + NVSWITCH_SET_LATENCY_BINS *p +) +{ + return device->hal.nvswitch_ctrl_set_latency_bins(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_ingress_reqlinkid +( + nvswitch_device *device, + NVSWITCH_GET_INGRESS_REQLINKID_PARAMS *params +) +{ + return device->hal.nvswitch_ctrl_get_ingress_reqlinkid(device, params); +} + +static NvlStatus +_nvswitch_ctrl_get_throughput_counters +( + nvswitch_device *device, + NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS *p +) +{ + return device->hal.nvswitch_ctrl_get_throughput_counters(device, p); +} + +static NvlStatus +_nvswitch_ctrl_unregister_link +( + nvswitch_device *device, + NVSWITCH_UNREGISTER_LINK_PARAMS *params +) +{ + nvlink_link *link = nvswitch_get_link(device, (NvU8)params->portNum); + + if (link == NULL) + { + return -NVL_BAD_ARGS; + } + + if (device->hal.nvswitch_is_link_in_use(device, params->portNum)) + { + return -NVL_ERR_STATE_IN_USE; + } + + nvlink_lib_unregister_link(link); + nvswitch_destroy_link(link); + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_ctrl_acquire_capability +( + nvswitch_device *device, + NVSWITCH_ACQUIRE_CAPABILITY_PARAMS *params, + void *osPrivate +) +{ + return nvswitch_os_acquire_fabric_mgmt_cap(osPrivate, + params->capDescriptor); +} + +static NvlStatus +_nvswitch_ctrl_reset_and_drain_links +( + nvswitch_device *device, + NVSWITCH_RESET_AND_DRAIN_LINKS_PARAMS *params +) +{ + return device->hal.nvswitch_reset_and_drain_links(device, params->linkMask); +} + +static NvlStatus +_nvswitch_ctrl_get_fom_values +( + nvswitch_device *device, + NVSWITCH_GET_FOM_VALUES_PARAMS *ret +) +{ + return device->hal.nvswitch_ctrl_get_fom_values(device, ret); +} + +static NvlStatus +_nvswitch_ctrl_get_nvlink_ecc_errors +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS *params +) +{ + return device->hal.nvswitch_get_nvlink_ecc_errors(device, params); +} + +static NvlStatus +_nvswitch_ctrl_set_residency_bins +( + nvswitch_device *device, + NVSWITCH_SET_RESIDENCY_BINS *p +) +{ + return device->hal.nvswitch_ctrl_set_residency_bins(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_residency_bins +( + nvswitch_device *device, + NVSWITCH_GET_RESIDENCY_BINS *p +) +{ + return device->hal.nvswitch_ctrl_get_residency_bins(device, p); +} + +static NvlStatus +_nvswitch_ctrl_get_rb_stall_busy +( + nvswitch_device *device, + NVSWITCH_GET_RB_STALL_BUSY *p +) +{ + return device->hal.nvswitch_ctrl_get_rb_stall_busy(device, p); +} + +static NvlStatus +_nvswitch_ctrl_i2c_smbus_command +( + nvswitch_device *device, + NVSWITCH_I2C_SMBUS_COMMAND_PARAMS *pParams +) +{ + NvU32 port_info; + NvU32 port = pParams->port; + NvU8 msgLen; + NvU8 cmd; + NvU16 addr; + NvU8 cmdType; + NvU8 *pData; + NvBool is_i2c_access_allowed; + NvBool is_port_allowed; + + port_info = nvswitch_i2c_get_port_info(device, port); + + is_i2c_access_allowed = (device->regkeys.i2c_access_control == + NV_SWITCH_REGKEY_I2C_ACCESS_CONTROL_ENABLE) ? + NV_TRUE : NV_FALSE; + is_port_allowed = is_i2c_access_allowed ? NV_TRUE : + FLD_TEST_DRF(_I2C, _PORTINFO, _ACCESS_ALLOWED, _TRUE, + port_info); + + if (!is_port_allowed || + !FLD_TEST_DRF(_I2C, _PORTINFO, _DEFINED, _PRESENT, port_info)) + { + NVSWITCH_PRINT(device, ERROR, "Invalid port access %d.\n", port); + return NVL_BAD_ARGS; + } + + addr = pParams->deviceAddr; + + switch (pParams->cmdType) + { + case NVSWITCH_I2C_SMBUS_CMD_QUICK: + { + cmd = 0; + msgLen = 0; + cmdType = pParams->bRead ? + NVSWITCH_I2C_CMD_SMBUS_QUICK_READ : + NVSWITCH_I2C_CMD_SMBUS_QUICK_WRITE; + pData = NULL; + break; + } + case NVSWITCH_I2C_SMBUS_CMD_BYTE: + { + cmd = 0; + msgLen = 1; + cmdType = pParams->bRead ? + NVSWITCH_I2C_CMD_READ : + NVSWITCH_I2C_CMD_WRITE; + pData = (NvU8 *)&pParams->transactionData.smbusByte.message; + break; + } + case NVSWITCH_I2C_SMBUS_CMD_BYTE_DATA: + { + msgLen = 1; + cmd = pParams->transactionData.smbusByteData.cmd; + cmdType = pParams->bRead ? + NVSWITCH_I2C_CMD_SMBUS_READ : + NVSWITCH_I2C_CMD_SMBUS_WRITE; + pData = (NvU8 *)&pParams->transactionData.smbusByteData.message; + break; + } + case NVSWITCH_I2C_SMBUS_CMD_WORD_DATA: + { + msgLen = 2; + cmd = pParams->transactionData.smbusWordData.cmd; + cmdType = pParams->bRead ? + NVSWITCH_I2C_CMD_SMBUS_READ : + NVSWITCH_I2C_CMD_SMBUS_WRITE; + pData = (NvU8 *)&pParams->transactionData.smbusWordData.message; + break; + } + default: + { + NVSWITCH_PRINT(device, ERROR, "Invalid Smbus command: %d.\n", port); + return NVL_BAD_ARGS; + } + } + + return _nvswitch_perform_i2c_transfer(device, NVSWITCH_I2C_ACQUIRER_IOCTL, + cmdType, addr, port, cmd, msgLen, pData); +} + +static NvlStatus +_nvswitch_ctrl_get_inforom_nvlink_max_correctable_error_rate +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS *params +) +{ + return nvswitch_inforom_nvlink_get_max_correctable_error_rate(device, params); +} + +static NvlStatus +_nvswitch_ctrl_get_inforom_nvlink_errors +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS *params +) +{ + return nvswitch_inforom_nvlink_get_errors(device, params); +} + +static NvlStatus +_nvswitch_ctrl_get_inforom_ecc_errors +( + nvswitch_device *device, + NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS *params +) +{ + return nvswitch_inforom_ecc_get_errors(device, params); +} + +static NvlStatus +_nvswitch_ctrl_get_inforom_bbx_sxid +( + nvswitch_device *device, + NVSWITCH_GET_SXIDS_PARAMS *params +) +{ + return nvswitch_inforom_bbx_get_sxid(device, params); +} + +static NvlStatus +_nvswitch_ctrl_get_nvlink_lp_counters +( + nvswitch_device *device, + NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS *params +) +{ + return device->hal.nvswitch_ctrl_get_nvlink_lp_counters(device, params); +} + +static NvlStatus +_nvswitch_ctrl_get_sw_info +( + nvswitch_device *device, + NVSWITCH_GET_SW_INFO_PARAMS *params +) +{ + return device->hal.nvswitch_ctrl_get_sw_info(device, params); +} + +static NvlStatus +_nvswitch_lib_validate_privileged_ctrl +( + void *osPrivate, + NvU64 flags +) +{ + if (flags & NVSWITCH_DEV_CMD_CHECK_ADMIN) + { + if (nvswitch_os_is_admin()) + { + return NVL_SUCCESS; + } + } + + if (flags & NVSWITCH_DEV_CMD_CHECK_FM) + { + if (nvswitch_os_is_fabric_manager(osPrivate)) + { + return NVL_SUCCESS; + } + } + + return -NVL_ERR_INSUFFICIENT_PERMISSIONS; +} + +/* + * @Brief : Constructs an NVS link struct with the given data + * + * @Description : + * + * @param[in] device NvSwitch device to contain this link + * @param[in] link_num link number of the link + * @param[out] link reference to store the created link into + * + * @returns NVL_SUCCESS if action succeeded, + * -NVL_NO_MEM if memory allocation failed + */ +NvlStatus +nvswitch_create_link +( + nvswitch_device *device, + NvU32 link_number, + nvlink_link **link +) +{ + NvlStatus retval = NVL_SUCCESS; + nvlink_link *ret = NULL; + LINK_INFO *link_info = NULL; + NvU64 ac_coupled_mask; + + NVSWITCH_ASSERT(nvswitch_get_num_links(device) <= NVSWITCH_MAX_NUM_LINKS); + + ret = nvswitch_os_malloc(sizeof(*ret)); + if (NULL == ret) + { + NVSWITCH_PRINT(device, ERROR, + "nvswitch_os_malloc during link creation failed!\n"); + retval = -NVL_NO_MEM; + goto nvswitch_create_link_cleanup; + } + nvswitch_os_memset(ret, 0, sizeof(*ret)); + + link_info = nvswitch_os_malloc(sizeof(*link_info)); + if (NULL == link_info) + { + NVSWITCH_PRINT(device, ERROR, + "nvswitch_os_malloc during link creation failed!\n"); + retval = -NVL_NO_MEM; + goto nvswitch_create_link_cleanup; + } + nvswitch_os_memset(link_info, 0, sizeof(*link_info)); + nvswitch_os_snprintf(link_info->name, sizeof(link_info->name), NVSWITCH_LINK_NAME "%d", link_number); + + ret->dev = device->nvlink_device; + ret->linkName = link_info->name; + ret->linkNumber = link_number; + ret->state = NVLINK_LINKSTATE_OFF; + ret->ac_coupled = NV_FALSE; + ret->version = nvswitch_get_link_ip_version(device, link_number); + + ac_coupled_mask = ((NvU64)device->regkeys.ac_coupled_mask2 << 32 | + (NvU64)device->regkeys.ac_coupled_mask); + + if (ac_coupled_mask) + { + if (ac_coupled_mask & NVBIT64(link_number)) + { + ret->ac_coupled = NV_TRUE; + } + } + else if (device->firmware.nvlink.link_config_found) + { + if (device->firmware.nvlink.link_ac_coupled_mask & NVBIT64(link_number)) + { + ret->ac_coupled = NV_TRUE; + } + } + + // Initialize NVLink corelib callbacks for switch + nvswitch_get_link_handlers(&link_handlers); + + ret->link_handlers = &link_handlers; + + // + // link_info is used to store private link information + // + + ret->link_info = link_info; + + *link = ret; + + return retval; + +nvswitch_create_link_cleanup: + if (NULL != ret) + { + nvswitch_os_free(ret); + } + if (NULL != link_info) + { + nvswitch_os_free(link_info); + } + + return retval; +} + +void +nvswitch_destroy_link +( + nvlink_link *link +) +{ + if (NULL != link->link_info) + { + nvswitch_os_free(link->link_info); + } + + nvswitch_os_free(link); +} + +NvU32 +nvswitch_get_num_links +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_get_num_links(device); +} + +NvBool +nvswitch_is_link_valid +( + nvswitch_device *device, + NvU32 link_id +) +{ + return device->hal.nvswitch_is_link_valid(device, link_id); +} + +nvlink_link* +nvswitch_get_link(nvswitch_device *device, NvU8 link_id) +{ + nvlink_link *link = NULL; + + nvlink_lib_get_link(device->nvlink_device, link_id, &link); + + return link; +} + +NvU64 +nvswitch_get_enabled_link_mask +( + nvswitch_device *device +) +{ + NvU64 ret; + nvlink_link *link; + NvU32 link_num; + + ret = 0x0; + + for (link_num = 0; link_num < nvswitch_get_num_links(device); link_num++) + { + if (nvlink_lib_get_link(device->nvlink_device, link_num, &link) == NVL_SUCCESS) + { + ret |= NVBIT64(link_num); + } + } + + return ret; +} + +void +nvswitch_set_fatal_error +( + nvswitch_device *device, + NvBool device_fatal, + NvU32 link_id +) +{ + device->hal.nvswitch_set_fatal_error(device, device_fatal, link_id); +} + +NvU32 +nvswitch_get_swap_clk_default +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_get_swap_clk_default(device); +} + +NvU32 +nvswitch_get_latency_sample_interval_msec +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_get_latency_sample_interval_msec(device); +} + +void +nvswitch_internal_latency_bin_log +( + nvswitch_device *device +) +{ + device->hal.nvswitch_internal_latency_bin_log(device); +} + +void +nvswitch_ecc_writeback_task +( + nvswitch_device *device +) +{ + device->hal.nvswitch_ecc_writeback_task(device); +} + +void +nvswitch_monitor_thermal_alert +( + nvswitch_device *device +) +{ + device->hal.nvswitch_monitor_thermal_alert(device); +} + +void +nvswitch_hw_counter_shutdown +( + nvswitch_device *device +) +{ + device->hal.nvswitch_hw_counter_shutdown(device); +} + +NvlStatus +nvswitch_get_rom_info +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom +) +{ + return device->hal.nvswitch_get_rom_info(device, eeprom); +} + +void +nvswitch_lib_enable_interrupts +( + nvswitch_device *device +) +{ + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + NVSWITCH_ASSERT(0); + return; + } + + device->hal.nvswitch_lib_enable_interrupts(device); +} + +void +nvswitch_lib_disable_interrupts +( + nvswitch_device *device +) +{ + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + NVSWITCH_ASSERT(0); + return; + } + + device->hal.nvswitch_lib_disable_interrupts(device); +} + +NvlStatus +nvswitch_lib_check_interrupts +( + nvswitch_device *device +) +{ + if (!NVSWITCH_IS_DEVICE_INITIALIZED(device)) + { + return -NVL_BAD_ARGS; + } + + return device->hal.nvswitch_lib_check_interrupts(device); +} + +NvlStatus +nvswitch_lib_service_interrupts +( + nvswitch_device *device +) +{ + if (!NVSWITCH_IS_DEVICE_INITIALIZED(device)) + { + return -NVL_BAD_ARGS; + } + + return device->hal.nvswitch_lib_service_interrupts(device); +} + +NvU64 +nvswitch_hw_counter_read_counter +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_hw_counter_read_counter(device); +} + +NvU32 +nvswitch_get_link_ip_version +( + nvswitch_device *device, + NvU32 link_id +) +{ + return device->hal.nvswitch_get_link_ip_version(device, link_id); +} + +NvU32 +nvswitch_reg_read_32 +( + nvswitch_device *device, + NvU32 offset +) +{ + NvU32 val; + + if (device->nvlink_device->pciInfo.bars[0].pBar == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "register read failed at offset 0x%x\n", offset); + + return 0xFFFFFFFF; + } + + val = nvswitch_os_mem_read32((NvU8 *)device->nvlink_device->pciInfo.bars[0].pBar + offset); + + if ((val & 0xFFFF0000) == 0xBADF0000) + { + NvU32 boot_0; + NVSWITCH_PRINT(device, WARN, + "Potential IO failure reading 0x%x (0x%x)\n", offset, val); + boot_0 = nvswitch_os_mem_read32((NvU8 *)device->nvlink_device->pciInfo.bars[0].pBar + 0x0); + + if ((boot_0 & 0xFFFF0000) == 0xBADF0000) + { + NVSWITCH_PRINT_SXID(device, NVSWITCH_ERR_HW_HOST_IO_FAILURE, + "IO failure\n"); + NVSWITCH_PRINT(device, ERROR, + "IO failure reading 0x%x (0x%x)\n", offset, val); + } + } + +#ifdef _VERBOSE_REG_ACCESS + NVSWITCH_PRINT(device, SETUP, + "NVSWITCH read 0x%6x+%6x = 0x%08x\n", + device->nvlink_device->pciInfo.bars[0].baseAddr, offset, val); +#endif + + return val; +} + +void +nvswitch_reg_write_32 +( + nvswitch_device *device, + NvU32 offset, + NvU32 data +) +{ + if (device->nvlink_device->pciInfo.bars[0].pBar == NULL) + { + NVSWITCH_PRINT(device, ERROR, + "register write failed at offset 0x%x\n", offset); + + return; + } + +#ifdef _VERBOSE_REG_ACCESS + NVSWITCH_PRINT(device, SETUP, + "NVSWITCH write 0x%6x+%6x = 0x%08x\n", + device->nvlink_device->pciInfo.bars[0].baseAddr, offset, data); +#endif + + // Write the register + nvswitch_os_mem_write32((NvU8 *)device->nvlink_device->pciInfo.bars[0].pBar + offset, data); + + return; +} + +NvU64 +nvswitch_read_64bit_counter +( + nvswitch_device *device, + NvU32 lo_offset, + NvU32 hi_offset +) +{ + NvU32 hi0; + NvU32 hi1; + NvU32 lo; + + hi0 = nvswitch_reg_read_32(device, hi_offset); + do + { + hi1 = hi0; + lo = nvswitch_reg_read_32(device, lo_offset); + hi0 = nvswitch_reg_read_32(device, hi_offset); + } while (hi0 != hi1); + + return (lo | ((NvU64)hi0 << 32)); +} + +NvlStatus +nvswitch_validate_pll_config +( + nvswitch_device *device, + NVSWITCH_PLL_INFO *switch_pll, + NVSWITCH_PLL_LIMITS default_pll_limits +) +{ + NvU32 update_rate_khz; + NvU32 vco_freq_khz; + NVSWITCH_PLL_LIMITS pll_limits; + + NVSWITCH_PRINT(device, SETUP, + "%s: Validating PLL: %dkHz * %d / (%d * %d * (1 << %d))\n", + __FUNCTION__, + switch_pll->src_freq_khz, + switch_pll->N, + switch_pll->M, + switch_pll->PL, + switch_pll->dist_mode); + + // + // These parameters could come from schmoo'ing API, settings file or a ROM. + // For now, hard code with POR. + // + if (device->firmware.firmware_size > 0 && + device->firmware.clocks.clocks_found && + device->firmware.clocks.sys_pll.valid) + { + pll_limits = device->firmware.clocks.sys_pll; + } + else + { + pll_limits = default_pll_limits; + } + + NVSWITCH_ASSERT(switch_pll->M != 0); + NVSWITCH_ASSERT(switch_pll->PL != 0); + + if ((switch_pll->src_freq_khz < pll_limits.ref_min_mhz * 1000) || + (switch_pll->src_freq_khz > pll_limits.ref_max_mhz * 1000)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ERROR: Ref(%d) out-of-range\n", + __FUNCTION__, + switch_pll->src_freq_khz); + return -NVL_ERR_INVALID_STATE; + } + + if ((switch_pll->M < pll_limits.m_min) || + (switch_pll->M > pll_limits.m_max)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ERROR: M(%d) out-of-range\n", + __FUNCTION__, + switch_pll->M); + return -NVL_ERR_INVALID_STATE; + } + + if ((switch_pll->N < pll_limits.n_min) || + (switch_pll->N > pll_limits.n_max)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ERROR: N(%d) out-of-range\n", + __FUNCTION__, + switch_pll->N); + return -NVL_ERR_INVALID_STATE; + } + + if ((switch_pll->PL < pll_limits.pl_min) || + (switch_pll->PL > pll_limits.pl_max)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ERROR: PL(%d) out-of-range\n", + __FUNCTION__, + switch_pll->PL); + return -NVL_ERR_INVALID_STATE; + } + + vco_freq_khz = switch_pll->src_freq_khz * switch_pll->N + / switch_pll->M; + if ((vco_freq_khz < pll_limits.vco_min_mhz * 1000) || + (vco_freq_khz > pll_limits.vco_max_mhz * 1000)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ERROR: VCO(%d) freq out-of-range\n", + __FUNCTION__, + vco_freq_khz); + return -NVL_ERR_INVALID_STATE; + } + + update_rate_khz = switch_pll->src_freq_khz / switch_pll->M; + if ((update_rate_khz < pll_limits.update_min_mhz * 1000) || + (update_rate_khz > pll_limits.update_max_mhz * 1000)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: ERROR: update rate(%d) out-of-range\n", + __FUNCTION__, + update_rate_khz); + return -NVL_ERR_INVALID_STATE; + } + + switch_pll->vco_freq_khz = vco_freq_khz; + + switch_pll->freq_khz = + switch_pll->src_freq_khz * switch_pll->N / + (switch_pll->M * switch_pll->PL * (1 << switch_pll->dist_mode)); + + NVSWITCH_PRINT(device, SETUP, + "%s: Validated PLL: %dkHz * %d / (%d * %d * (1 << %d)) = %dkHz\n", + __FUNCTION__, + switch_pll->src_freq_khz, + switch_pll->N, + switch_pll->M, + switch_pll->PL, + switch_pll->dist_mode, + switch_pll->freq_khz); + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_init_pll_config +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_init_pll_config(device); +} + +NvlStatus +nvswitch_init_pll +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_init_pll(device); +} + +void +nvswitch_init_clock_gating +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_init_clock_gating(device); +} + +void +nvswitch_lib_get_uuid +( + nvswitch_device *device, + NvUuid *uuid +) +{ + if (!NVSWITCH_IS_DEVICE_INITIALIZED(device) || (uuid == NULL)) + { + return; + } + + nvswitch_os_memcpy(uuid, &device->uuid, sizeof(device->uuid)); +} + +NvlStatus +nvswitch_lib_get_physid +( + nvswitch_device *device, + NvU32 *phys_id +) +{ + NVSWITCH_GET_INFO get_info; + NvlStatus ret; + + if (phys_id == NULL || !NVSWITCH_IS_DEVICE_ACCESSIBLE(device)) + { + return -NVL_BAD_ARGS; + } + + get_info.count=1; + get_info.index[0] = NVSWITCH_GET_INFO_INDEX_PHYSICAL_ID; + + ret = _nvswitch_ctrl_get_info(device, &get_info); + if (ret != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to get physical ID\n"); + return ret; + } + + *phys_id = get_info.info[0]; + + return NVL_SUCCESS; +} + +void +nvswitch_i2c_set_hw_speed_mode +( + nvswitch_device *device, + NvU32 port, + NvU32 speedMode +) +{ + device->hal.nvswitch_i2c_set_hw_speed_mode(device, port, speedMode); + return; +} + +void +nvswitch_lib_smbpbi_log_sxid +( + nvswitch_device *device, + NvU32 sxid, + const char *pFormat, + ... +) +{ + va_list arglist; + int msglen; + char string[80]; + + va_start(arglist, pFormat); + msglen = nvswitch_os_vsnprintf(string, sizeof(string), pFormat, arglist); + va_end(arglist); + + if (!(msglen < 0)) + { + msglen = NV_MIN(msglen + 1, (int) sizeof(string)); + nvswitch_smbpbi_log_message(device, sxid, msglen, (NvU8 *) string); + } +} + +NvlStatus +nvswitch_set_minion_initialized +( + nvswitch_device *device, + NvU32 idx_minion, + NvBool initialized +) +{ + return device->hal.nvswitch_set_minion_initialized(device, idx_minion, initialized); +} + +NvBool +nvswitch_is_minion_initialized +( + nvswitch_device *device, + NvU32 idx_minion +) +{ + return device->hal.nvswitch_is_minion_initialized(device, idx_minion); +} + +NvlStatus +nvswitch_device_discovery +( + nvswitch_device *device, + NvU32 discovery_offset +) +{ + return device->hal.nvswitch_device_discovery(device, discovery_offset); +} + +void +nvswitch_filter_discovery +( + nvswitch_device *device +) +{ + device->hal.nvswitch_filter_discovery(device); +} + +NvlStatus +nvswitch_process_discovery +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_process_discovery(device); +} + +NvlStatus +nvswitch_init_minion +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_init_minion(device); +} + +NvU32 +nvswitch_get_link_eng_inst +( + nvswitch_device *device, + NvU32 link_id, + NVSWITCH_ENGINE_ID eng_id +) +{ + return device->hal.nvswitch_get_link_eng_inst(device, link_id, eng_id); +} + +void * +nvswitch_alloc_chipdevice +( + nvswitch_device *device +) +{ + return(device->hal.nvswitch_alloc_chipdevice(device)); +} + +void +nvswitch_free_chipdevice +( + nvswitch_device *device +) +{ + if (device->chip_device) + { + nvswitch_os_free(device->chip_device); + device->chip_device = NULL; + } +} + +NvlStatus +nvswitch_init_thermal +( + nvswitch_device *device +) +{ + return(device->hal.nvswitch_init_thermal(device)); +} + +NvU32 +nvswitch_read_physical_id +( + nvswitch_device *device +) +{ + return(device->hal.nvswitch_read_physical_id(device)); +} + +NvU32 +nvswitch_get_caps_nvlink_version +( + nvswitch_device *device +) +{ + return(device->hal.nvswitch_get_caps_nvlink_version(device)); +} + +void +nvswitch_initialize_interrupt_tree +( + nvswitch_device *device +) +{ + device->hal.nvswitch_initialize_interrupt_tree(device); +} + +void +nvswitch_init_dlpl_interrupts +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + + device->hal.nvswitch_init_dlpl_interrupts(link); +} + +NvlStatus +nvswitch_initialize_pmgr +( + nvswitch_device *device +) +{ + return(device->hal.nvswitch_initialize_pmgr(device)); +} + +NvlStatus +nvswitch_initialize_ip_wrappers +( + nvswitch_device *device +) +{ + return(device->hal.nvswitch_initialize_ip_wrappers(device)); +} + +NvlStatus +nvswitch_initialize_route +( + nvswitch_device *device +) +{ + return(device->hal.nvswitch_initialize_route(device)); +} + +void +nvswitch_soe_unregister_events +( + nvswitch_device *device +) +{ + device->hal.nvswitch_soe_unregister_events(device); +} + +NvlStatus +nvswitch_soe_register_event_callbacks +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_soe_register_event_callbacks(device); +} + +NVSWITCH_BIOS_NVLINK_CONFIG * +nvswitch_get_bios_nvlink_config +( + nvswitch_device *device +) +{ + return(device->hal.nvswitch_get_bios_nvlink_config(device)); +} + +NvlStatus +nvswitch_minion_send_command +( + nvswitch_device *device, + NvU32 linkNumber, + NvU32 command, + NvU32 scratch0 +) +{ + return(device->hal.nvswitch_minion_send_command(device, linkNumber, + command, scratch0)); +} + +NvlStatus +nvswitch_init_nport +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_init_nport(device); +} + +NvlStatus +nvswitch_init_nxbar +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_init_nxbar(device); +} + +NvlStatus +nvswitch_clear_nport_rams +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_clear_nport_rams(device); +} + +NvlStatus +nvswitch_pri_ring_init +( + nvswitch_device *device +) +{ + return(device->hal.nvswitch_pri_ring_init(device)); +} + +NvlStatus +nvswitch_get_soe_ucode_binaries +( + nvswitch_device *device, + const NvU32 **soe_ucode_data, + const NvU32 **soe_ucode_header +) +{ + return device->hal.nvswitch_get_soe_ucode_binaries(device, soe_ucode_data, soe_ucode_header); +} + +NvlStatus +nvswitch_get_remap_table_selector +( + nvswitch_device *device, + NVSWITCH_TABLE_SELECT_REMAP table_selector, + NvU32 *remap_ram_sel +) +{ + return device->hal.nvswitch_get_remap_table_selector(device, table_selector, remap_ram_sel); +} + +NvU32 +nvswitch_get_ingress_ram_size +( + nvswitch_device *device, + NvU32 ingress_ram_selector // NV_INGRESS_REQRSPMAPADDR_RAM_ADDRESS_* +) +{ + return device->hal.nvswitch_get_ingress_ram_size(device, ingress_ram_selector); +} + +NvlStatus +nvswitch_minion_get_dl_status +( + nvswitch_device *device, + NvU32 linkId, + NvU32 statusIdx, + NvU32 statusArgs, + NvU32 *statusData +) +{ + return device->hal.nvswitch_minion_get_dl_status(device, linkId, statusIdx, statusArgs, statusData); +} + +NvBool +nvswitch_is_i2c_supported +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_is_i2c_supported(device); +} + + +NvlStatus +nvswitch_poll_sublink_state +( + nvswitch_device *device, + nvlink_link *link +) +{ + return device->hal.nvswitch_poll_sublink_state(device, link); +} + +void +nvswitch_setup_link_loopback_mode +( + nvswitch_device *device, + NvU32 linkNumber +) +{ + return device->hal.nvswitch_setup_link_loopback_mode(device, linkNumber); +} + +void +nvswitch_reset_persistent_link_hw_state +( + nvswitch_device *device, + NvU32 linkNumber +) +{ + return device->hal.nvswitch_reset_persistent_link_hw_state(device, linkNumber); +} + +void +nvswitch_store_topology_information +( + nvswitch_device *device, + nvlink_link *link +) +{ + return device->hal.nvswitch_store_topology_information(device, link); +} + +void +nvswitch_init_lpwr_regs +( + nvlink_link *link +) +{ + nvswitch_device *device = link->dev->pDevInfo; + device->hal.nvswitch_init_lpwr_regs(link); +} + +NvlStatus +nvswitch_set_training_mode +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_set_training_mode(device); +} + +NvBool +nvswitch_is_link_in_reset +( + nvswitch_device *device, + nvlink_link *link +) +{ + return device->hal.nvswitch_is_link_in_reset(device, link); +} + +NvBool +nvswitch_i2c_is_device_access_allowed +( + nvswitch_device *device, + NvU32 port, + NvU8 addr, + NvBool bIsRead +) +{ + return device->hal.nvswitch_i2c_is_device_access_allowed(device, port, addr, bIsRead); +} + +NvlStatus +nvswitch_parse_bios_image +( + nvswitch_device *device +) +{ + return device->hal.nvswitch_parse_bios_image(device); +} + +void +nvswitch_init_buffer_ready +( + nvswitch_device *device, + nvlink_link *link, + NvBool bNportBufferReady +) +{ + return device->hal.nvswitch_init_buffer_ready(device, link, bNportBufferReady); +} + +void +nvswitch_apply_recal_settings +( + nvswitch_device *device, + nvlink_link *link +) +{ + return device->hal.nvswitch_apply_recal_settings(device, link); +} + +NvlStatus +nvswitch_lib_ctrl +( + nvswitch_device *device, + NvU32 cmd, + void *params, + NvU64 size, + void *osPrivate +) +{ + NvlStatus retval; + NvU64 flags = 0; + + if (!NVSWITCH_IS_DEVICE_ACCESSIBLE(device) || params == NULL) + { + return -NVL_BAD_ARGS; + } + + flags = NVSWITCH_DEV_CMD_CHECK_ADMIN | NVSWITCH_DEV_CMD_CHECK_FM; + switch (cmd) + { + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_INFO, + _nvswitch_ctrl_get_info, + NVSWITCH_GET_INFO); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_INTERNAL_LATENCY, + _nvswitch_ctrl_get_internal_latency, + NVSWITCH_GET_INTERNAL_LATENCY); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_NVLIPT_COUNTERS, + _nvswitch_ctrl_get_nvlipt_counters, + NVSWITCH_GET_NVLIPT_COUNTERS); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_ERRORS, + nvswitch_ctrl_get_errors, + NVSWITCH_GET_ERRORS_PARAMS); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_NVLINK_STATUS, + _nvswitch_ctrl_get_nvlink_status, + NVSWITCH_GET_NVLINK_STATUS_PARAMS); + NVSWITCH_DEV_CMD_DISPATCH_WITH_PRIVATE_DATA( + CTRL_NVSWITCH_ACQUIRE_CAPABILITY, + _nvswitch_ctrl_acquire_capability, + NVSWITCH_ACQUIRE_CAPABILITY_PARAMS, + osPrivate); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_TEMPERATURE, + _nvswitch_ctrl_therm_read_temperature, + NVSWITCH_CTRL_GET_TEMPERATURE_PARAMS); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_THROUGHPUT_COUNTERS, + _nvswitch_ctrl_get_throughput_counters, + NVSWITCH_GET_THROUGHPUT_COUNTERS_PARAMS); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_FATAL_ERROR_SCOPE, + _nvswitch_ctrl_get_fatal_error_scope, + NVSWITCH_GET_FATAL_ERROR_SCOPE_PARAMS); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_SWITCH_PORT_CONFIG, + _nvswitch_ctrl_set_switch_port_config, + NVSWITCH_SET_SWITCH_PORT_CONFIG, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_INGRESS_REQUEST_TABLE, + _nvswitch_ctrl_get_ingress_request_table, + NVSWITCH_GET_INGRESS_REQUEST_TABLE_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_INGRESS_REQUEST_TABLE, + _nvswitch_ctrl_set_ingress_request_table, + NVSWITCH_SET_INGRESS_REQUEST_TABLE, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_INGRESS_REQUEST_VALID, + _nvswitch_ctrl_set_ingress_request_valid, + NVSWITCH_SET_INGRESS_REQUEST_VALID, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_INGRESS_RESPONSE_TABLE, + _nvswitch_ctrl_get_ingress_response_table, + NVSWITCH_GET_INGRESS_RESPONSE_TABLE_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_INGRESS_RESPONSE_TABLE, + _nvswitch_ctrl_set_ingress_response_table, + NVSWITCH_SET_INGRESS_RESPONSE_TABLE, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_GANGED_LINK_TABLE, + _nvswitch_ctrl_set_ganged_link_table, + NVSWITCH_SET_GANGED_LINK_TABLE, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_SET_LATENCY_BINS, + nvswitch_ctrl_set_latency_bins, + NVSWITCH_SET_LATENCY_BINS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_NVLIPT_COUNTER_CONFIG, + _nvswitch_ctrl_set_nvlipt_counter_config, + NVSWITCH_SET_NVLIPT_COUNTER_CONFIG, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_NVLIPT_COUNTER_CONFIG, + _nvswitch_ctrl_get_nvlipt_counter_config, + NVSWITCH_GET_NVLIPT_COUNTER_CONFIG, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_SET_REMAP_POLICY, + _nvswitch_ctrl_set_remap_policy, + NVSWITCH_SET_REMAP_POLICY, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_GET_REMAP_POLICY, + _nvswitch_ctrl_get_remap_policy, + NVSWITCH_GET_REMAP_POLICY_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_REMAP_POLICY_VALID, + _nvswitch_ctrl_set_remap_policy_valid, + NVSWITCH_SET_REMAP_POLICY_VALID, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_SET_ROUTING_ID, + _nvswitch_ctrl_set_routing_id, + NVSWITCH_SET_ROUTING_ID, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_GET_ROUTING_ID, + _nvswitch_ctrl_get_routing_id, + NVSWITCH_GET_ROUTING_ID_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_SET_ROUTING_ID_VALID, + _nvswitch_ctrl_set_routing_id_valid, + NVSWITCH_SET_ROUTING_LAN_VALID, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_SET_ROUTING_LAN, + _nvswitch_ctrl_set_routing_lan, + NVSWITCH_SET_ROUTING_LAN, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_GET_ROUTING_LAN, + _nvswitch_ctrl_get_routing_lan, + NVSWITCH_GET_ROUTING_LAN_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_ROUTING_LAN_VALID, + _nvswitch_ctrl_set_routing_lan_valid, + NVSWITCH_SET_ROUTING_LAN_VALID, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_INGRESS_REQLINKID, + _nvswitch_ctrl_get_ingress_reqlinkid, + NVSWITCH_GET_INGRESS_REQLINKID_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_UNREGISTER_LINK, + _nvswitch_ctrl_unregister_link, + NVSWITCH_UNREGISTER_LINK_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_RESET_AND_DRAIN_LINKS, + _nvswitch_ctrl_reset_and_drain_links, + NVSWITCH_RESET_AND_DRAIN_LINKS_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_BIOS_INFO, + _nvswitch_ctrl_get_bios_info, + NVSWITCH_GET_BIOS_INFO_PARAMS); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_BLACKLIST_DEVICE, + nvswitch_ctrl_blacklist_device, + NVSWITCH_BLACKLIST_DEVICE_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_FM_DRIVER_STATE, + nvswitch_ctrl_set_fm_driver_state, + NVSWITCH_SET_FM_DRIVER_STATE_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_DEVICE_FABRIC_STATE, + nvswitch_ctrl_set_device_fabric_state, + NVSWITCH_SET_DEVICE_FABRIC_STATE_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_FM_HEARTBEAT_TIMEOUT, + nvswitch_ctrl_set_fm_timeout, + NVSWITCH_SET_FM_HEARTBEAT_TIMEOUT_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_WITH_PRIVATE_DATA( + CTRL_NVSWITCH_REGISTER_EVENTS, + _nvswitch_ctrl_register_events, + NVSWITCH_REGISTER_EVENTS_PARAMS, + osPrivate); + NVSWITCH_DEV_CMD_DISPATCH_WITH_PRIVATE_DATA( + CTRL_NVSWITCH_UNREGISTER_EVENTS, + _nvswitch_ctrl_unregister_events, + NVSWITCH_UNREGISTER_EVENTS_PARAMS, + osPrivate); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_SET_TRAINING_ERROR_INFO, + _nvswitch_ctrl_set_training_error_info, + NVSWITCH_SET_TRAINING_ERROR_INFO_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_COUNTERS, + _nvswitch_ctrl_get_counters, + NVSWITCH_NVLINK_GET_COUNTERS_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_NVLINK_ECC_ERRORS, + _nvswitch_ctrl_get_nvlink_ecc_errors, + NVSWITCH_GET_NVLINK_ECC_ERRORS_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_I2C_SMBUS_COMMAND, + _nvswitch_ctrl_i2c_smbus_command, + NVSWITCH_I2C_SMBUS_COMMAND_PARAMS, + osPrivate, NVSWITCH_DEV_CMD_CHECK_ADMIN); + NVSWITCH_DEV_CMD_DISPATCH_RESERVED( + CTRL_NVSWITCH_RESERVED_0); + NVSWITCH_DEV_CMD_DISPATCH_RESERVED( + CTRL_NVSWITCH_RESERVED_1); + NVSWITCH_DEV_CMD_DISPATCH_RESERVED( + CTRL_NVSWITCH_RESERVED_2); + NVSWITCH_DEV_CMD_DISPATCH_RESERVED( + CTRL_NVSWITCH_RESERVED_3); + NVSWITCH_DEV_CMD_DISPATCH_RESERVED( + CTRL_NVSWITCH_RESERVED_4); + NVSWITCH_DEV_CMD_DISPATCH_RESERVED( + CTRL_NVSWITCH_RESERVED_5); + NVSWITCH_DEV_CMD_DISPATCH( + CTRL_NVSWITCH_GET_TEMPERATURE_LIMIT, + _nvswitch_ctrl_therm_get_temperature_limit, + NVSWITCH_CTRL_GET_TEMPERATURE_LIMIT_PARAMS); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_NVLINK_MAX_ERROR_RATES, + _nvswitch_ctrl_get_inforom_nvlink_max_correctable_error_rate, + NVSWITCH_GET_NVLINK_MAX_CORRECTABLE_ERROR_RATES_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_NVLINK_ERROR_COUNTS, + _nvswitch_ctrl_get_inforom_nvlink_errors, + NVSWITCH_GET_NVLINK_ERROR_COUNTS_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_ECC_ERROR_COUNTS, + _nvswitch_ctrl_get_inforom_ecc_errors, + NVSWITCH_GET_ECC_ERROR_COUNTS_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_SXIDS, + _nvswitch_ctrl_get_inforom_bbx_sxid, + NVSWITCH_GET_SXIDS_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_FOM_VALUES, + _nvswitch_ctrl_get_fom_values, + NVSWITCH_GET_FOM_VALUES_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_NVLINK_LP_COUNTERS, + _nvswitch_ctrl_get_nvlink_lp_counters, + NVSWITCH_GET_NVLINK_LP_COUNTERS_PARAMS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_RESIDENCY_BINS, + _nvswitch_ctrl_get_residency_bins, + NVSWITCH_GET_RESIDENCY_BINS); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED(CTRL_NVSWITCH_SET_RESIDENCY_BINS, + _nvswitch_ctrl_set_residency_bins, + NVSWITCH_SET_RESIDENCY_BINS, + osPrivate, flags); + NVSWITCH_DEV_CMD_DISPATCH(CTRL_NVSWITCH_GET_RB_STALL_BUSY, + _nvswitch_ctrl_get_rb_stall_busy, + NVSWITCH_GET_RB_STALL_BUSY); + NVSWITCH_DEV_CMD_DISPATCH_PRIVILEGED( + CTRL_NVSWITCH_GET_SW_INFO, + _nvswitch_ctrl_get_sw_info, + NVSWITCH_GET_SW_INFO_PARAMS, + osPrivate, flags); + default: + nvswitch_os_print(NVSWITCH_DBG_LEVEL_INFO, "unknown ioctl %x\n", cmd); + retval = -NVL_BAD_ARGS; + break; + } + + return retval; +} diff --git a/src/common/nvswitch/kernel/pmgr_nvswitch.c b/src/common/nvswitch/kernel/pmgr_nvswitch.c new file mode 100644 index 000000000..8c59a9b3c --- /dev/null +++ b/src/common/nvswitch/kernel/pmgr_nvswitch.c @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "error_nvswitch.h" +#include "pmgr_nvswitch.h" + +void +nvswitch_i2c_init +( + nvswitch_device *device +) +{ + PNVSWITCH_OBJI2C pI2c = nvswitch_os_malloc(sizeof(struct NVSWITCH_OBJI2C)); + nvswitch_os_memset(pI2c, 0, sizeof(struct NVSWITCH_OBJI2C)); + device->pI2c = pI2c; +} + +void +nvswitch_i2c_destroy +( + nvswitch_device *device +) +{ + if (device->pI2c == NULL) + return; + + nvswitch_os_free(device->pI2c); + device->pI2c = NULL; +} + +/*! @brief Set up a port to use a PMGR implementation. + * + * @param[in] device NvSwitch device + * @param[in] port The port identifier for the bus. + */ +void +_nvswitch_i2c_set_port_pmgr +( + nvswitch_device *device, + NvU32 port +) +{ + NvU32 i; + NvU32 device_allow_list_size; + NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE *device_allow_list; + PNVSWITCH_OBJI2C pI2c = device->pI2c; + + NVSWITCH_ASSERT(port < NVSWITCH_MAX_I2C_PORTS); + + pI2c->PortInfo[port] = FLD_SET_DRF(_I2C, _PORTINFO, _DEFINED, _PRESENT, pI2c->PortInfo[port]); + pI2c->Ports[port].defaultSpeedMode = NVSWITCH_I2C_SPEED_MODE_100KHZ; + + device_allow_list = pI2c->i2c_allow_list; + device_allow_list_size = pI2c->i2c_allow_list_size; + + for (i = 0; i < device_allow_list_size; i++) + { + if (port == device_allow_list[i].i2cPortLogical) + { + pI2c->PortInfo[port] = FLD_SET_DRF(_I2C, _PORTINFO, + _ACCESS_ALLOWED, _TRUE, + pI2c->PortInfo[port]); + break; + } + } +} + diff --git a/src/common/nvswitch/kernel/rom_nvswitch.c b/src/common/nvswitch/kernel/rom_nvswitch.c new file mode 100644 index 000000000..fa3159e4f --- /dev/null +++ b/src/common/nvswitch/kernel/rom_nvswitch.c @@ -0,0 +1,1085 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "export_nvswitch.h" +#include "ctrl_dev_nvswitch.h" +#include "rom_nvswitch.h" +#include "common_nvswitch.h" +#include "haldef_nvswitch.h" + +static NvU8 +_nvswitch_calculate_checksum +( + NvU8 *data, + NvU32 size +) +{ + NvU32 i; + NvU8 checksum = 0; + + for (i = 0; i < size; i++) + { + checksum += data[i]; + } + return -checksum; +} + +static NvlStatus +_nvswitch_read_rom_bytes +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NvU32 offset, + NvU8 *buffer, + NvU32 buffer_size +) +{ + NVSWITCH_CTRL_I2C_INDEXED_PARAMS i2cIndexed = {0}; + NvU32 i; + NvlStatus retval; + + if (offset + buffer_size > (NvU32)(1 << eeprom->index_size)) + { + NVSWITCH_PRINT(device, SETUP, + "EEPROM offset 0x%x..0x%x out of range\n", + offset, offset + buffer_size - 1); + return -NVL_BAD_ARGS; + } + + if (buffer_size > NVSWITCH_CTRL_I2C_MESSAGE_LENGTH_MAX) + { + NVSWITCH_PRINT(device, SETUP, + "EEPROM read buffer (0x%x bytes) larger than max (0x%x bytes)\n", + buffer_size, NVSWITCH_CTRL_I2C_MESSAGE_LENGTH_MAX); + return -NVL_BAD_ARGS; + } + + i2cIndexed.port = (NvU8)eeprom->i2c_port; + i2cIndexed.bIsRead = NV_TRUE; + i2cIndexed.address = (NvU16)eeprom->i2c_address; + i2cIndexed.flags = + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _START, _SEND) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _RESTART, _SEND) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _STOP, _SEND) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _ADDRESS_MODE, _7BIT) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _FLAVOR, _HW) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _SPEED_MODE, _400KHZ) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _BLOCK_PROTOCOL, _DISABLED) | + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _TRANSACTION_MODE, _NORMAL) | + 0; + + if (eeprom->index_size <= 8) + { + i2cIndexed.flags |= + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _INDEX_LENGTH, _ONE); + i2cIndexed.index[0] = offset & 0x000FF; // Read [eeprom_offset] + } + else + { + i2cIndexed.address |= ((offset & 0x30000) >> 15); + i2cIndexed.flags |= + DRF_DEF(SWITCH_CTRL, _I2C_FLAGS, _INDEX_LENGTH, _TWO); + i2cIndexed.index[0] = (offset & 0x0FF00) >> 8; // Read [eeprom_offset] + i2cIndexed.index[1] = (offset & 0x000FF); + } + + i2cIndexed.messageLength = NV_MIN(buffer_size, NVSWITCH_CTRL_I2C_MESSAGE_LENGTH_MAX); + + retval = nvswitch_ctrl_i2c_indexed(device, &i2cIndexed); + if (retval != NVL_SUCCESS) + { + return retval; + } + + for (i = 0; i < i2cIndexed.messageLength; i++) + { + buffer[i] = i2cIndexed.message[i]; + } + + return retval; +} + +// +// Parse EEPROM header, if present +// +static NvlStatus +_nvswitch_read_rom_header +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NVSWITCH_FIRMWARE *firmware, + NvU32 *offset +) +{ + NVSWITCH_EEPROM_HEADER eeprom_header = {{0}}; + NvlStatus retval; + + firmware->firmware_size = 0; + *offset = 0x0000; + + retval = _nvswitch_read_rom_bytes(device, + eeprom, *offset, + (NvU8 *) &eeprom_header, sizeof(eeprom_header)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "Unable to read ROM header\n"); + return retval; + } + + if ((eeprom_header.signature[0] == 'N') && + (eeprom_header.signature[1] == 'V') && + (eeprom_header.signature[2] == 'L') && + (eeprom_header.signature[3] == 'S') && + (_nvswitch_calculate_checksum((NvU8 *) &eeprom_header, sizeof(eeprom_header)) == 0x00)) + { + // Assume eeprom_header is version 1 + + *offset += eeprom_header.header_size; + + firmware->pci_vendor_id = eeprom_header.pci_vendor_id; + firmware->pci_device_id = eeprom_header.pci_device_id; + firmware->pci_system_vendor_id = eeprom_header.pci_system_vendor_id; + firmware->pci_system_device_id = eeprom_header.pci_system_device_id; + + // EEPROM header firmware size field is in 512 byte blocks + firmware->firmware_size = eeprom_header.firmware_size * 512; + } + else + { + NVSWITCH_PRINT(device, SETUP, + "Firmware header not found\n"); + return -NVL_NOT_FOUND; + } + + return NVL_SUCCESS; +} + +static NvlStatus +_nvswitch_rom_parse_bit_bridge_fw_data +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NVSWITCH_FIRMWARE *firmware, + NVSWITCH_BIT_TOKEN *bit_token +) +{ + NVSWITCH_BIT_BRIDGE_FW_DATA bit_bridge_fw = {0}; + NvU32 copy_size; + NvU32 bridge_fw_size; + NvlStatus retval; + + firmware->bridge.bridge_fw_found = NV_FALSE; + + if (bit_token->data_size != sizeof(bit_bridge_fw)) + { + NVSWITCH_PRINT(device, SETUP, + "BIT_BRIDGE_FW_DATA: Expected data size 0x%x but found 0x%x\n", + (NvU32) sizeof(bit_bridge_fw), bit_token->data_size); + } + + bridge_fw_size = NV_MIN(bit_token->data_size, sizeof(bit_bridge_fw)); + + // Get basic bridge-specific firmware info + retval = _nvswitch_read_rom_bytes(device, eeprom, bit_token->data_offset, + (NvU8 *) &bit_bridge_fw, bridge_fw_size); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "Failed to read BIT_BRIDGE_FW_DATA\n"); + return -NVL_ERR_NOT_SUPPORTED; + } + + firmware->bridge.bridge_fw_found = NV_TRUE; + + firmware->bridge.firmware_version = + NVSWITCH_ELEMENT_READ(&bit_bridge_fw, firmware_version, bridge_fw_size, 0); + + firmware->bridge.oem_version = + NVSWITCH_ELEMENT_READ(&bit_bridge_fw, oem_version, bridge_fw_size, 0); + + NVSWITCH_ELEMENT_VALIDATE(&bit_bridge_fw, firmware_size, bridge_fw_size, 0, + firmware->firmware_size/512); + + if (NVSWITCH_ELEMENT_PRESENT(&bit_bridge_fw, BIOS_MOD_date, bridge_fw_size)) + { + nvswitch_os_memcpy(firmware->bridge.BIOS_MOD_date, bit_bridge_fw.BIOS_MOD_date, + sizeof(firmware->bridge.BIOS_MOD_date)); + } + + firmware->bridge.fw_release_build = + (NVSWITCH_ELEMENT_PRESENT(&bit_bridge_fw, firmware_flags, bridge_fw_size) ? + FLD_TEST_DRF(SWITCH_BIT_BRIDGE_FW_DATA, _FLAGS, _BUILD, _REL, + bit_bridge_fw.firmware_flags) : + NV_FALSE); + + copy_size = NV_MIN(NVSWITCH_PRODUCT_NAME_MAX_LEN, + NVSWITCH_ELEMENT_READ(&bit_bridge_fw, eng_product_name_size, bridge_fw_size, 0)); + if (copy_size > 0) + { + retval = _nvswitch_read_rom_bytes(device, eeprom, + bit_bridge_fw.eng_product_name, + (NvU8 *) firmware->bridge.product_name, copy_size); + if (retval != NVL_SUCCESS) + { + // Failed to read product name string + copy_size = 0; + } + } + firmware->bridge.product_name[copy_size] = 0; + + firmware->bridge.instance_id = NVSWITCH_ELEMENT_READ( + &bit_bridge_fw, + nvswitch_instance_id, + bridge_fw_size, + NVSWITCH_FIRMWARE_BRIDGE_INSTANCE_ID_UNKNOWN); + + return retval; +} + +static NvlStatus +_nvswitch_rom_parse_bit_clock_ptrs +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NVSWITCH_FIRMWARE *firmware, + NVSWITCH_BIT_TOKEN *bit_token +) +{ + NVSWITCH_BIT_CLOCK_PTRS bit_clock_ptrs = {0}; + NVSWITCH_PLL_INFO_HEADER pll_info_header; + NVSWITCH_PLL_INFO_ENTRY pll_info; + NvU32 pll_info_offset; + NvU32 idx_pll; + NvU32 clock_ptrs_size; + NvU32 pll_info_table; + NvlStatus retval; + + firmware->clocks.clocks_found = NV_FALSE; + + if (bit_token->data_size != sizeof(bit_clock_ptrs)) + { + NVSWITCH_PRINT(device, SETUP, + "CLOCK_PTRS: Expected data size 0x%x but found 0x%x\n", + (NvU32) sizeof(bit_clock_ptrs), bit_token->data_size); + } + + clock_ptrs_size = NV_MIN(bit_token->data_size, sizeof(bit_clock_ptrs)); + + // Get PLL limits + retval = _nvswitch_read_rom_bytes(device, eeprom, bit_token->data_offset, + (NvU8 *) &bit_clock_ptrs, clock_ptrs_size); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "NVINIT_PTRS: Failed to read BIT_TOKEN_CLOCK_PTRS\n"); + return -NVL_ERR_NOT_SUPPORTED; + } + + pll_info_table = NVSWITCH_ELEMENT_READ(&bit_clock_ptrs, pll_info_table, clock_ptrs_size, 0); + + if ((pll_info_table == 0) || + (pll_info_table + sizeof(pll_info_header) > firmware->firmware_size)) + { + NVSWITCH_PRINT(device, SETUP, + "NVINIT_PTRS: BIT_TOKEN_CLOCK_PTRS not preset or out of range (0x%x)\n", + bit_clock_ptrs.pll_info_table); + return -NVL_ERR_NOT_SUPPORTED; + } + + retval = _nvswitch_read_rom_bytes(device, eeprom, pll_info_table, + (NvU8 *) &pll_info_header, sizeof(pll_info_header)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "CLOCK_PTRS: Failed to read NVSWITCH_PLL_INFO_HEADER\n"); + return -NVL_ERR_NOT_SUPPORTED; + } + + if (pll_info_header.version != NVSWITCH_CLOCK_PTRS_PLL_INFO_VERSION) + { + NVSWITCH_PRINT(device, SETUP, + "PLL_INFO version (0x%x) != expected version (0x%x)\n", + pll_info_header.version, NVSWITCH_CLOCK_PTRS_PLL_INFO_VERSION); + return -NVL_ERR_NOT_SUPPORTED; + } + if (pll_info_header.header_size != sizeof(NVSWITCH_PLL_INFO_HEADER)) + { + NVSWITCH_PRINT(device, SETUP, + "PLL_INFO header size (0x%x) != expected (0x%x)\n", + pll_info_header.header_size, (NvU32) sizeof(NVSWITCH_PLL_INFO_HEADER)); + return -NVL_ERR_NOT_SUPPORTED; + } + if (pll_info_header.entry_size != sizeof(NVSWITCH_PLL_INFO_ENTRY)) + { + NVSWITCH_PRINT(device, SETUP, + "PLL_INFO: Expected entry size 0x%x but found 0x%x\n", + (NvU32) sizeof(NVSWITCH_PLL_INFO_ENTRY), pll_info_header.entry_size); + return -NVL_ERR_NOT_SUPPORTED; + } + + firmware->clocks.clocks_found = NV_TRUE; + firmware->clocks.sys_pll.valid = NV_FALSE; + + for (idx_pll = 0; idx_pll < pll_info_header.entry_count; idx_pll++) + { + pll_info_offset = + bit_clock_ptrs.pll_info_table + pll_info_header.header_size + + idx_pll*pll_info_header.entry_size; + if (pll_info_offset + sizeof(pll_info) > firmware->firmware_size) + { + NVSWITCH_PRINT(device, SETUP, + "PLL info #%d out of range (%x+%x > %x)\n", idx_pll, + pll_info_offset, (NvU32) sizeof(pll_info), firmware->firmware_size); + retval = -NVL_NOT_FOUND; + break; + } + + retval = _nvswitch_read_rom_bytes(device, eeprom, pll_info_offset, + (NvU8 *) &pll_info, sizeof(pll_info)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "CLOCK_PTRS: Failed to read NVSWITCH_PLL_INFO_ENTRY\n"); + retval = -NVL_ERR_NOT_SUPPORTED; + break; + } + + if (pll_info.pll_id == NVSWITCH_PLL_ID_SYSPLL) + { + if (firmware->clocks.sys_pll.valid) + { + NVSWITCH_PRINT(device, SETUP, + "NVINIT_PTRS: More than 1 SYSPLL entry found. Skipping\n"); + } + else + { + firmware->clocks.sys_pll.valid = NV_TRUE; + firmware->clocks.sys_pll.ref_min_mhz = pll_info.ref_min_mhz; + firmware->clocks.sys_pll.ref_max_mhz = pll_info.ref_max_mhz; + firmware->clocks.sys_pll.vco_min_mhz = pll_info.vco_min_mhz; + firmware->clocks.sys_pll.vco_max_mhz = pll_info.vco_max_mhz; + firmware->clocks.sys_pll.update_min_mhz = pll_info.update_min_mhz; + firmware->clocks.sys_pll.update_max_mhz = pll_info.update_max_mhz; + firmware->clocks.sys_pll.m_min = pll_info.m_min; + firmware->clocks.sys_pll.m_max = pll_info.m_max; + firmware->clocks.sys_pll.n_min = pll_info.n_min; + firmware->clocks.sys_pll.n_max = pll_info.n_max; + firmware->clocks.sys_pll.pl_min = pll_info.pl_min; + firmware->clocks.sys_pll.pl_max = pll_info.pl_max; + } + } + else + { + NVSWITCH_PRINT(device, SETUP, + "Ignoring PLL ID 0x%x\n", pll_info.pll_id); + } + } + + return retval; +} + +static NvlStatus +_nvswitch_rom_parse_bit_nvinit_ptrs +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NVSWITCH_FIRMWARE *firmware, + NVSWITCH_BIT_TOKEN *bit_token +) +{ + NVSWITCH_BIT_NVINIT_PTRS bit_nvinit_ptrs = {0}; + NVSWITCH_NVLINK_CONFIG nvlink_config; + NvU32 nvinit_ptrs_size; + NvU32 nvlink_config_offset; + NvU32 nvlink_config_size; + NvlStatus retval; + + firmware->nvlink.link_config_found = NV_FALSE; + + if (bit_token->data_size != sizeof(bit_nvinit_ptrs)) + { + NVSWITCH_PRINT(device, SETUP, + "NVINIT_PTRS: Expected data size 0x%x but found 0x%x\n", + (NvU32) sizeof(bit_nvinit_ptrs), bit_token->data_size); + } + + nvinit_ptrs_size = NV_MIN(bit_token->data_size, sizeof(bit_nvinit_ptrs)); + + // Get basic NVLink settings + retval = _nvswitch_read_rom_bytes(device, eeprom, bit_token->data_offset, + (NvU8 *) &bit_nvinit_ptrs, nvinit_ptrs_size); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "NVINIT_PTRS: Failed to read NVSWITCH_BIT_NVINIT_PTRS\n"); + return -NVL_ERR_NOT_SUPPORTED; + } + + nvlink_config_offset = NVSWITCH_ELEMENT_READ(&bit_nvinit_ptrs, nvlink_config, nvinit_ptrs_size, 0); + if ((nvlink_config_offset == 0) || + (nvlink_config_offset + sizeof(nvlink_config) > firmware->firmware_size)) + { + NVSWITCH_PRINT(device, SETUP, + "NVINIT_PTRS: NVSWITCH_BIT_NVINIT_PTRS NVLink config absent or out of range (0x%x)\n", + bit_nvinit_ptrs.nvlink_config); + return -NVL_ERR_NOT_SUPPORTED; + } + + retval = _nvswitch_read_rom_bytes(device, eeprom, nvlink_config_offset, + (NvU8 *) &nvlink_config, sizeof(nvlink_config)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "NVINIT_PTRS: Failed to read NVSWITCH_NVLINK_CONFIG\n"); + return -NVL_ERR_NOT_SUPPORTED; + } + + nvlink_config_size = NV_MIN(nvlink_config.size, sizeof(nvlink_config)); + + if (0x01 != NVSWITCH_ELEMENT_READ(&nvlink_config, version, nvlink_config_size, 0)) + { + NVSWITCH_PRINT(device, SETUP, + "NVINIT_PTRS: NVLINK_CONFIG version mismatch (0x01 != 0x%x)\n", + NVSWITCH_ELEMENT_READ(&nvlink_config, version, nvlink_config_size, 0)); + return -NVL_ERR_NOT_SUPPORTED; + } + + NVSWITCH_ELEMENT_CHECK(&nvlink_config, flags, nvlink_config_size, 0x0); + NVSWITCH_ELEMENT_CHECK(&nvlink_config, link_speed_mask, nvlink_config_size, 0x0); + NVSWITCH_ELEMENT_CHECK(&nvlink_config, link_refclk_mask, nvlink_config_size, 0x0); + + firmware->nvlink.link_config_found = NV_TRUE; + + // + // If nvlink_config is incomplete, assume: + // 1) all links enabled + // 2) DC coupled + // + firmware->nvlink.link_enable_mask = ~NVSWITCH_ELEMENT_READ(&nvlink_config, link_disable_mask, nvlink_config_size, 0); + firmware->nvlink.link_ac_coupled_mask = NVSWITCH_ELEMENT_READ(&nvlink_config, ac_coupled_mask, nvlink_config_size, 0); + + return retval; +} + +static void +_nvswitch_rom_parse_bit_dcb_ccb_block +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NVSWITCH_FIRMWARE *firmware, + NvU32 ccb_block_offset +) +{ + NVSWITCH_CCB_TABLE ccb; + NVSWITCH_CCB_ENTRY ccb_entry; + NvU32 ccb_table_offset; + NvU32 idx_ccb; + NvU32 retval; + + // dcb:ccb_block_ptr + if ((ccb_block_offset == 0) || + (ccb_block_offset + sizeof(NVSWITCH_CCB_TABLE) > firmware->firmware_size)) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: CCB_BLOCK absent or out of range (0x%x)\n", + ccb_block_offset); + return; + } + + retval = _nvswitch_read_rom_bytes(device, eeprom, ccb_block_offset, + (NvU8 *) &ccb, sizeof(ccb)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, "DCB_PTRS: CCB header read failure\n"); + return; + } + + if ((ccb.version != NVSWITCH_CCB_VERSION) || + (ccb.header_size != sizeof(ccb))) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: CCB_BLOCK version (0x%x) or size mismatch (0x%x)\n", + ccb.version, ccb.header_size); + return; + } + + ccb_table_offset = ccb_block_offset + ccb.header_size; + + for (idx_ccb = 0; idx_ccb < ccb.entry_count; idx_ccb++) + { + NvU32 ccb_entry_offset = ccb_table_offset + idx_ccb*ccb.entry_size; + NvU32 i2c_bus_idx; + NvU32 idx_i2c_port; + + if (ccb_entry_offset + sizeof(ccb_entry) > firmware->firmware_size) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: CCB out of range\n"); + break; + } + + retval = _nvswitch_read_rom_bytes(device, eeprom, ccb_entry_offset, + (NvU8 *) &ccb_entry, sizeof(ccb_entry)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: CCB entry[%d] read failure\n", + idx_ccb); + break; + } + + i2c_bus_idx = DRF_VAL(SWITCH_CCB, _DEVICE, _I2C_PORT, ccb_entry.device); + if (i2c_bus_idx >= NVSWITCH_MAX_I2C_PORTS) + { + continue; + } + + for (idx_i2c_port = 0; idx_i2c_port < NVSWITCH_MAX_I2C_PORTS; idx_i2c_port++) + { + if (ccb.comm_port[idx_i2c_port] == i2c_bus_idx) + { + break; + } + } + + if (idx_i2c_port >= NVSWITCH_MAX_I2C_PORTS) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: CCB entry[%d] I2C port %x out of range\n", + idx_ccb, idx_i2c_port); + continue; + } + + firmware->dcb.i2c[idx_i2c_port].valid = NV_TRUE; + firmware->dcb.i2c[idx_i2c_port].i2c_speed = DRF_VAL(SWITCH_CCB, _DEVICE, _I2C_SPEED, ccb_entry.device); + firmware->dcb.i2c[idx_i2c_port].i2c_33v = DRF_VAL(SWITCH_CCB, _DEVICE, _VOLTAGE, ccb_entry.device); + } +} + +static void +_nvswitch_rom_parse_bit_dcb_gpio_table +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NVSWITCH_FIRMWARE *firmware, + NvU32 gpio_table_offset +) +{ + NVSWITCH_GPIO_TABLE gpio; + NVSWITCH_GPIO_ENTRY gpio_entry; + NvU32 idx_gpio; + NvU32 retval; + + // gpio_tables + if ((gpio_table_offset == 0) || + (gpio_table_offset + sizeof(NVSWITCH_GPIO_TABLE) > firmware->firmware_size)) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: GPIO_TABLE absent or out of range (0x%x)\n", + gpio_table_offset); + return; + } + + retval = _nvswitch_read_rom_bytes(device, eeprom, gpio_table_offset, + (NvU8 *) &gpio, sizeof(gpio)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: GPIO table read failure\n"); + return; + } + + if ((gpio.version != NVSWITCH_GPIO_TABLE_VERSION_42) || + (gpio.header_size != sizeof(gpio))) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: GPIO_TABLE version (0x%x) or size mismatch (0x%x)\n", + gpio.version, gpio.header_size); + return; + } + + if (gpio.entry_size != sizeof(gpio_entry)) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: GPIO_ENTRY size mismatch (0x%x != 0x%x)\n", + gpio.entry_size, (NvU32) sizeof(gpio_entry)); + return; + } + + NVSWITCH_ELEMENT_CHECK(&gpio, ext_gpio_master, gpio.header_size, 0x0000); + + gpio_table_offset += gpio.header_size; + firmware->dcb.gpio_pin_count = 0; + + for (idx_gpio = 0; idx_gpio < gpio.entry_count; idx_gpio++) + { + NVSWITCH_GPIO_INFO *gpio_pin; + NvU32 gpio_entry_offset = gpio_table_offset + idx_gpio*gpio.entry_size; + + if (gpio_entry_offset + gpio.entry_size > firmware->firmware_size) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: GPIO entry[%d] out of range\n", + idx_gpio); + break; + } + + if (firmware->dcb.gpio_pin_count == NVSWITCH_MAX_GPIO_PINS) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: Too many GPIO pins listed\n"); + break; + } + + retval = _nvswitch_read_rom_bytes(device, eeprom, gpio_entry_offset, + (NvU8 *) &gpio_entry, sizeof(gpio_entry)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: GPIO entry read failure\n"); + break; + } + + if (!FLD_TEST_DRF(SWITCH_GPIO_ENTRY, , _FUNCTION, _SKIP_ENTRY, gpio_entry.function)) + { + gpio_pin = &firmware->dcb.gpio_pin[firmware->dcb.gpio_pin_count]; + firmware->dcb.gpio_pin_count++; + + gpio_pin->pin = DRF_VAL(SWITCH_GPIO_ENTRY, _PIN, _NUM, gpio_entry.pin); + gpio_pin->function = DRF_VAL(SWITCH_GPIO_ENTRY, , _FUNCTION, gpio_entry.function); + gpio_pin->hw_select = DRF_VAL(SWITCH_GPIO_ENTRY, _INPUT, _HW_SELECT, gpio_entry.input); + gpio_pin->misc = DRF_VAL(SWITCH_GPIO_ENTRY, _MISC, _IO, gpio_entry.misc); + } + } +} + +static void +_nvswitch_rom_parse_bit_dcb_i2c_devices +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NVSWITCH_FIRMWARE *firmware, + NvU32 i2c_devices_offset +) +{ + NVSWITCH_I2C_TABLE i2c; + NVSWITCH_I2C_ENTRY i2c_entry; + NvU32 i2c_table_offset; + NvU32 idx_i2c; + NvU32 retval; + + // i2c_devices + if ((i2c_devices_offset == 0) || + (i2c_devices_offset + sizeof(NVSWITCH_I2C_TABLE) > firmware->firmware_size)) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: I2C_DEVICES absent or out of range (0x%x)\n", + i2c_devices_offset); + return; + } + + retval = _nvswitch_read_rom_bytes(device, eeprom, i2c_devices_offset, + (NvU8 *) &i2c, sizeof(i2c)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: I2C device read failure\n"); + return; + } + + if ((i2c.version != NVSWITCH_I2C_VERSION) || + (i2c.header_size != sizeof(i2c)) || + (i2c.entry_size != sizeof(i2c_entry))) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: I2C header version (0x%x) or header/entry size mismatch (0x%x/0x%x)\n", + i2c.version, i2c.header_size, i2c.entry_size); + return; + } + + i2c_table_offset = i2c_devices_offset + i2c.header_size; + + firmware->dcb.i2c_device_count = 0; + + for (idx_i2c = 0; idx_i2c < i2c.entry_count; idx_i2c++) + { + NvU32 i2c_entry_offset = i2c_table_offset + idx_i2c*i2c.entry_size; + NVSWITCH_I2C_DEVICE_DESCRIPTOR_TYPE *i2c_device; + + if (i2c_entry_offset + sizeof(i2c_entry) > firmware->firmware_size) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: I2C[%d] out of range\n", + idx_i2c); + break; + } + + if (firmware->dcb.i2c_device_count >= NVSWITCH_MAX_I2C_DEVICES) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: Too many I2C devices listed\n"); + break; + } + + retval = _nvswitch_read_rom_bytes(device, eeprom, i2c_entry_offset, + (NvU8 *) &i2c_entry, sizeof(i2c_entry)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: I2C read failure\n"); + break; + } + + if (NVSWITCH_I2C_DEVICE_SKIP != DRF_VAL(SWITCH_I2C, _ENTRY, _TYPE, i2c_entry.device)) + { + i2c_device = &firmware->dcb.i2c_device[firmware->dcb.i2c_device_count]; + firmware->dcb.i2c_device_count++; + + i2c_device->i2cDeviceType = DRF_VAL(SWITCH_I2C, _ENTRY, _TYPE, i2c_entry.device); + i2c_device->i2cAddress = DRF_VAL(SWITCH_I2C, _ENTRY, _ADDRESS, i2c_entry.device); + i2c_device->i2cPortLogical = + (DRF_VAL(SWITCH_I2C, _ENTRY, _PORT_2, i2c_entry.device) << 1) | + DRF_VAL(SWITCH_I2C, _ENTRY, _PORT_1, i2c_entry.device); + } + } +} + +static NvlStatus +_nvswitch_rom_parse_bit_dcb_ptrs +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NVSWITCH_FIRMWARE *firmware, + NVSWITCH_BIT_TOKEN *bit_token +) +{ + NVSWITCH_BIT_DCB_PTRS dcb_ptrs; + NVSWITCH_DCB_HEADER dcb; + NvU32 dcb_ptrs_size; + NvU32 dcb_version; + NvU32 dcb_signature; + NvlStatus retval = NVL_SUCCESS; + + firmware->dcb.dcb_found = NV_FALSE; + + if (bit_token->data_size != sizeof(dcb_ptrs)) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: Expected data size 0x%x but found 0x%x\n", + (NvU32) sizeof(dcb_ptrs), bit_token->data_size); + } + + dcb_ptrs_size = NV_MIN(bit_token->data_size, sizeof(dcb_ptrs)); + + // Get I2C & GPIO tables + retval = _nvswitch_read_rom_bytes(device, eeprom, bit_token->data_offset, + (NvU8 *) &dcb_ptrs, dcb_ptrs_size); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: Failed to read NVSWITCH_BIT_DCB_PTRS\n"); + return retval; + } + + if ((dcb_ptrs.dcb_header_ptr == 0) || + (dcb_ptrs.dcb_header_ptr >= firmware->firmware_size)) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: DCB header absent or out of range (0x%x)\n", + dcb_ptrs.dcb_header_ptr); + return -NVL_ERR_NOT_SUPPORTED; + } + + retval = _nvswitch_read_rom_bytes(device, eeprom, dcb_ptrs.dcb_header_ptr, + (NvU8 *) &dcb, sizeof(dcb)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: DCB header read failure\n"); + return retval; + } + + dcb_version = NVSWITCH_ELEMENT_READ(&dcb, version, dcb.header_size, 0x0); + dcb_signature = NVSWITCH_ELEMENT_READ(&dcb, dcb_signature, dcb.header_size, 0x0); + if ((dcb_version != NVSWITCH_DCB_HEADER_VERSION_41) || + (dcb_signature != NVSWITCH_DCB_HEADER_SIGNATURE)) + { + NVSWITCH_PRINT(device, SETUP, + "DCB_PTRS: DCB header version (0x%x) or signature mismatch (0x%x)\n", + dcb_version, dcb_signature); + return -NVL_ERR_NOT_SUPPORTED; + } + + _nvswitch_rom_parse_bit_dcb_ccb_block(device, eeprom, firmware, dcb.ccb_block_ptr); + _nvswitch_rom_parse_bit_dcb_i2c_devices(device, eeprom, firmware, dcb.i2c_devices); + _nvswitch_rom_parse_bit_dcb_gpio_table(device, eeprom, firmware, dcb.gpio_table); + + return retval; +} + +// +// Parse BIT tokens, if present +// +static NvlStatus +_nvswitch_read_bit_tokens +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NVSWITCH_FIRMWARE *firmware, + NVSWITCH_BIT_HEADER *bit_header, + NvU32 *offset +) +{ + NvU32 idx_token; + NvU32 bit_entry_offset; + NVSWITCH_BIT_TOKEN bit_token; + NvlStatus retval = NVL_SUCCESS; + + for (idx_token = 0; idx_token < bit_header->token_entries; idx_token++) + { + bit_entry_offset = *offset + idx_token*bit_header->token_size; + if (bit_entry_offset >= firmware->firmware_size) + { + NVSWITCH_PRINT(device, SETUP, + "BIT token out of range (%x >= %x)\n", + bit_entry_offset, firmware->firmware_size); + return -NVL_NOT_FOUND; + } + + retval = _nvswitch_read_rom_bytes(device, + eeprom, bit_entry_offset, + (NvU8 *) &bit_token, sizeof(bit_token)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "Error reading BIT token[%d]\n", idx_token); + return -NVL_NOT_FOUND; + } + + if (bit_token.data_offset >= firmware->firmware_size) + { + NVSWITCH_PRINT(device, SETUP, + "BIT 0x%x target data out of range (%x >= %x)\n", + bit_token.id, + bit_token.data_offset, firmware->firmware_size); + // Soldier on to the next one. Hopefully it's valid + continue; + } + + switch (bit_token.id) + { + case NVSWITCH_BIT_TOKEN_CLOCK_PTRS: + { + retval = _nvswitch_rom_parse_bit_clock_ptrs(device, eeprom, firmware, &bit_token); + break; + } + case NVSWITCH_BIT_TOKEN_NVINIT_PTRS: + { + retval = _nvswitch_rom_parse_bit_nvinit_ptrs(device, eeprom, firmware, &bit_token); + break; + } + case NVSWITCH_BIT_TOKEN_NOP: + { + // Ignore + break; + } + case NVSWITCH_BIT_TOKEN_PERF_PTRS: + { + NVSWITCH_PRINT(device, INFO, "Skipping parsing BIT_TOKEN_PERF_PTRS\n"); + break; + } + case NVSWITCH_BIT_TOKEN_BRIDGE_FW_DATA: + { + retval = _nvswitch_rom_parse_bit_bridge_fw_data(device, eeprom, firmware, &bit_token); + break; + } + case NVSWITCH_BIT_TOKEN_DCB_PTRS: + { + retval = _nvswitch_rom_parse_bit_dcb_ptrs(device, eeprom, firmware, &bit_token); + break; + } + default: + { + NVSWITCH_PRINT(device, SETUP, + "Unrecognized BIT_TOKEN 0x%02x\n", bit_token.id); + break; + } + } + } + + return retval; +} + +// +// Parse BIT table, if present +// +static NvlStatus +_nvswitch_read_bit_table +( + nvswitch_device *device, + NVSWITCH_EEPROM_TYPE *eeprom, + NVSWITCH_FIRMWARE *firmware, + NvU32 *offset +) +{ + NVSWITCH_BIT_HEADER bit_header = {0}; + NvlStatus retval; + + retval = _nvswitch_read_rom_bytes(device, + eeprom, *offset, + (NvU8 *) &bit_header, sizeof(bit_header)); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "Unable to read BIT header @%04x\n", + *offset); + return retval; + } + + if ((bit_header.id == 0xB8FF) && + (bit_header.signature[0] == 'B') && + (bit_header.signature[1] == 'I') && + (bit_header.signature[2] == 'T') && + (bit_header.signature[3] == 0x00) && + (bit_header.bcd_version == 0x0100) && + (_nvswitch_calculate_checksum((NvU8 *) &bit_header, sizeof(bit_header)) == 0x00)) + { + *offset += bit_header.header_size; + if (*offset >= firmware->firmware_size) + { + NVSWITCH_PRINT(device, SETUP, + "BIT token table out of range (%x >= %x)\n", + *offset, firmware->firmware_size); + return -NVL_NOT_FOUND; + } + } + else + { + NVSWITCH_PRINT(device, SETUP, + "BIT header not found @%04x\n", + *offset); + return -NVL_NOT_FOUND; + } + + retval = _nvswitch_read_bit_tokens(device, eeprom, firmware, &bit_header, offset); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "Unable to read BIT tokens\n"); + return retval; + } + + return NVL_SUCCESS; +} + +// +// Print BIT table information +// +static void +_nvswitch_print_bit_table_info +( + nvswitch_device *device, + NVSWITCH_FIRMWARE *firmware +) +{ + if (firmware->firmware_size > 0) + { + NVSWITCH_PRINT(device, SETUP, "PCI ID: %04x/%04x\n", + firmware->pci_vendor_id, + firmware->pci_device_id); + NVSWITCH_PRINT(device, SETUP, "Subsystem PCI ID: %04x/%04x\n", + firmware->pci_system_vendor_id, + firmware->pci_system_device_id); + + if (firmware->bridge.bridge_fw_found) + { + NVSWITCH_PRINT(device, SETUP, "firmware_version: %08x\n", + firmware->bridge.firmware_version); + NVSWITCH_PRINT(device, SETUP, "oem_version: %02x\n", + firmware->bridge.oem_version); + NVSWITCH_PRINT(device, SETUP, "BIOS_MOD_date: '%.8s'\n", + firmware->bridge.BIOS_MOD_date); + NVSWITCH_PRINT(device, SETUP, "fw_release_build: %s\n", + (firmware->bridge.fw_release_build ? "REL" : "ENG")); + NVSWITCH_PRINT(device, SETUP, "product_name: '%s'\n", + firmware->bridge.product_name); + if (firmware->bridge.instance_id != NVSWITCH_FIRMWARE_BRIDGE_INSTANCE_ID_UNKNOWN) + { + NVSWITCH_PRINT(device, SETUP, "instance_id: %04x\n", + firmware->bridge.instance_id); + } + } + + if (firmware->nvlink.link_config_found) + { + NVSWITCH_PRINT(device, SETUP, "link_enable: %016llx\n", firmware->nvlink.link_enable_mask); + NVSWITCH_PRINT(device, SETUP, "ac_coupled: %016llx\n", firmware->nvlink.link_ac_coupled_mask); + } + } +} + +// +// Parse EEPROM BIT tables, if present +// +void +nvswitch_read_rom_tables +( + nvswitch_device *device, + NVSWITCH_FIRMWARE *firmware +) +{ + NVSWITCH_EEPROM_TYPE eeprom = {0}; + NvU32 offset; + NvlStatus retval; + + retval = nvswitch_get_rom_info(device, &eeprom); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "ROM configuration not supported\n"); + return; + } + + retval = _nvswitch_read_rom_header(device, &eeprom, firmware, &offset); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "Unable to read ROM header\n"); + return; + } + + retval = _nvswitch_read_bit_table(device, &eeprom, firmware, &offset); + if (retval != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, SETUP, + "Unable to read BIT table\n"); + return; + } + + _nvswitch_print_bit_table_info(device, firmware); + + return; +} + diff --git a/src/common/nvswitch/kernel/smbpbi_nvswitch.c b/src/common/nvswitch/kernel/smbpbi_nvswitch.c new file mode 100644 index 000000000..6e4389073 --- /dev/null +++ b/src/common/nvswitch/kernel/smbpbi_nvswitch.c @@ -0,0 +1,734 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvfixedtypes.h" +#include "common_nvswitch.h" +#include "error_nvswitch.h" +#include "rmsoecmdif.h" +#include "smbpbi_nvswitch.h" +#include "nvswitch/lr10/dev_ext_devices.h" + +#include "flcn/flcn_nvswitch.h" + +#include "rmflcncmdif_nvswitch.h" + +#define GET_PFIFO_FROM_DEVICE(dev) (&(dev)->pSmbpbi->sharedSurface->inforomObjects.DEM.object.v1) + +#define DEM_FIFO_SIZE INFOROM_DEM_OBJECT_V1_00_FIFO_SIZE +#define DEM_FIFO_PTR(x) ((x) % DEM_FIFO_SIZE) +#define DEM_PTR_DIFF(cur, next) (((next) > (cur)) ? ((next) - (cur)) : \ + (DEM_FIFO_SIZE - ((cur) - (next)))) +#define DEM_BYTES_OCCUPIED(pf) DEM_PTR_DIFF((pf)->readOffset, (pf)->writeOffset) +// +// See how much space is available in the FIFO. +// Must leave 1 word free so the write pointer does not +// catch up with the read pointer. That would be indistinguishable +// from an empty FIFO. +// +#define DEM_BYTES_AVAILABLE(pf) (DEM_PTR_DIFF((pf)->writeOffset, (pf)->readOffset) - \ + sizeof(NvU32)) +#define DEM_RECORD_SIZE_MAX (sizeof(NV_MSGBOX_DEM_RECORD) \ + + NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE) +#define DEM_RECORD_SIZE_MIN (sizeof(NV_MSGBOX_DEM_RECORD) + 1) + +#define FIFO_REC_LOOP_ITERATOR _curPtr +#define FIFO_REC_LOOP_REC_PTR _recPtr +#define FIFO_REC_LOOP_REC_SIZE _recSize +#define FIFO_REC_LOOP_START(pf, cond) \ +{ \ + NvU16 _nextPtr; \ + for (FIFO_REC_LOOP_ITERATOR = (pf)->readOffset; cond; FIFO_REC_LOOP_ITERATOR = _nextPtr) \ + { \ + NV_MSGBOX_DEM_RECORD *FIFO_REC_LOOP_REC_PTR = (NV_MSGBOX_DEM_RECORD *) \ + ((pf)->fifoBuffer + FIFO_REC_LOOP_ITERATOR); \ + NvU16 FIFO_REC_LOOP_REC_SIZE = \ + FIFO_REC_LOOP_REC_PTR->recordSize * sizeof(NvU32); + +#define FIFO_REC_LOOP_END \ + _nextPtr = DEM_FIFO_PTR(FIFO_REC_LOOP_ITERATOR + FIFO_REC_LOOP_REC_SIZE); \ + } \ +} + +static void _smbpbiDemInit(nvswitch_device *device, struct smbpbi *pSmbpbi, struct INFOROM_DEM_OBJECT_V1_00 *pFifo); +static void _nvswitch_smbpbi_dem_flush(nvswitch_device *device); + + +NvlStatus +nvswitch_smbpbi_init +( + nvswitch_device *device +) +{ + NV_STATUS status; + NvU64 dmaHandle; + void *cpuAddr; + + if (!device->pSoe) + { + return -NVL_ERR_INVALID_STATE; + } + + // Create DMA mapping for SMBPBI transactions + status = nvswitch_os_alloc_contig_memory(device->os_handle, &cpuAddr, + sizeof(SOE_SMBPBI_SHARED_SURFACE), + (device->dma_addr_width == 32)); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to allocate contig memory, rc:%d\n", + status); + return status; + } + + nvswitch_os_memset(cpuAddr, 0, sizeof(SOE_SMBPBI_SHARED_SURFACE)); + + status = nvswitch_os_map_dma_region(device->os_handle, cpuAddr, &dmaHandle, + sizeof(SOE_SMBPBI_SHARED_SURFACE), + NVSWITCH_DMA_DIR_BIDIRECTIONAL); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, + "Failed to map dma region for SMBPBI shared surface, rc:%d\n", + status); + goto os_map_dma_region_fail; + } + + device->pSmbpbi = nvswitch_os_malloc(sizeof(struct smbpbi)); + if (!device->pSmbpbi) + { + status = -NVL_NO_MEM; + goto smbpbi_init_fail; + } + + device->pSmbpbi->sharedSurface = cpuAddr; + device->pSmbpbi->dmaHandle = dmaHandle; + + return NVL_SUCCESS; + +smbpbi_init_fail: + nvswitch_os_unmap_dma_region(device->os_handle, cpuAddr, dmaHandle, + sizeof(SOE_SMBPBI_SHARED_SURFACE), NVSWITCH_DMA_DIR_BIDIRECTIONAL); +os_map_dma_region_fail: + nvswitch_os_free_contig_memory(device->os_handle, cpuAddr, sizeof(SOE_SMBPBI_SHARED_SURFACE)); + + return status; +} + +NvlStatus +nvswitch_smbpbi_post_init +( + nvswitch_device * device +) +{ + struct smbpbi *pSmbpbi = device->pSmbpbi; + FLCN *pFlcn; + NvU64 dmaHandle; + RM_FLCN_CMD_SOE cmd; + NVSWITCH_TIMEOUT timeout; + NvU32 cmdSeqDesc; + RM_SOE_SMBPBI_CMD_INIT *pInitCmd = &cmd.cmd.smbpbiCmd.init; + NvlStatus status; + + if (!device->pSmbpbi || !device->pInforom) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + // Populate shared surface with static InfoROM data + nvswitch_inforom_read_static_data(device, device->pInforom, + &device->pSmbpbi->sharedSurface->inforomObjects); + + pFlcn = device->pSoe->pFlcn; + dmaHandle = pSmbpbi->dmaHandle; + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.unitId = RM_SOE_UNIT_SMBPBI; + cmd.hdr.size = RM_SOE_CMD_SIZE(SMBPBI, INIT); + cmd.cmd.smbpbiCmd.cmdType = RM_SOE_SMBPBI_CMD_ID_INIT; + RM_FLCN_U64_PACK(&pInitCmd->dmaHandle, &dmaHandle); + + // + // Make the interval twice the heartbeat period to avoid + // skew between driver and soe threads + // + pInitCmd->driverPollingPeriodUs = (NVSWITCH_HEARTBEAT_INTERVAL_NS / 1000) * 2; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + status = flcnQueueCmdPostBlocking(device, pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg - not used for now + NULL, // pPayload - not used for now + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: SMBPBI Init command failed. rc:%d\n", + __FUNCTION__, status); + return status; + } + + nvswitch_lib_smbpbi_log_sxid(device, NVSWITCH_ERR_NO_ERROR, + "NVSWITCH SMBPBI server is online."); + + NVSWITCH_PRINT(device, INFO, "%s: SMBPBI POST INIT completed\n", __FUNCTION__); + + return NVL_SUCCESS; +} + +static void +_nvswitch_smbpbi_send_unload +( + nvswitch_device *device +) +{ + FLCN *pFlcn; + RM_FLCN_CMD_SOE cmd; + NVSWITCH_TIMEOUT timeout; + NvU32 cmdSeqDesc; + NvlStatus status; + + pFlcn = device->pSoe->pFlcn; + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.unitId = RM_SOE_UNIT_SMBPBI; + cmd.hdr.size = RM_SOE_CMD_SIZE(SMBPBI, UNLOAD); + cmd.cmd.smbpbiCmd.cmdType = RM_SOE_SMBPBI_CMD_ID_UNLOAD; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + status = flcnQueueCmdPostBlocking(device, pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg - not used for now + NULL, // pPayload - not used for now + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: SMBPBI unload command failed. rc:%d\n", + __FUNCTION__, status); + } +} + +void +nvswitch_smbpbi_unload +( + nvswitch_device *device +) +{ + if (device->pSmbpbi) + { + _nvswitch_smbpbi_send_unload(device); + _nvswitch_smbpbi_dem_flush(device); + } +} + +void +nvswitch_smbpbi_destroy +( + nvswitch_device *device +) +{ + if (device->pSmbpbi) + { + nvswitch_os_unmap_dma_region(device->os_handle, + device->pSmbpbi->sharedSurface, + device->pSmbpbi->dmaHandle, + sizeof(SOE_SMBPBI_SHARED_SURFACE), + NVSWITCH_DMA_DIR_BIDIRECTIONAL); + nvswitch_os_free_contig_memory(device->os_handle, device->pSmbpbi->sharedSurface, + sizeof(SOE_SMBPBI_SHARED_SURFACE)); + nvswitch_os_free(device->pSmbpbi); + device->pSmbpbi = NULL; + } +} + +NvlStatus +nvswitch_smbpbi_refresh_ecc_counts +( + nvswitch_device *device +) +{ + PRM_SOE_SMBPBI_INFOROM_DATA pObjs; + struct inforom *pInforom = device->pInforom; + NvU64 corCnt; + NvU64 uncCnt; + + if ((device->pSmbpbi == NULL) || (device->pSmbpbi->sharedSurface == NULL)) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + if (pInforom == NULL || pInforom->pEccState == NULL) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + device->hal.nvswitch_inforom_ecc_get_total_errors(device, pInforom->pEccState->pEcc, + &corCnt, &uncCnt); + + pObjs = &device->pSmbpbi->sharedSurface->inforomObjects; + NvU64_ALIGN32_PACK(&(pObjs->ECC.correctedTotal), &corCnt); + NvU64_ALIGN32_PACK(&(pObjs->ECC.uncorrectedTotal), &uncCnt); + + return NVL_SUCCESS; +} + +NvlStatus +nvswitch_inforom_dem_load +( + nvswitch_device *device +) +{ + NvlStatus status; + NvU8 version = 0; + NvU8 subversion = 0; + struct inforom *pInforom = device->pInforom; + NvU8 *pPackedObject = NULL; + struct INFOROM_DEM_OBJECT_V1_00 *pFifo; + + if ((pInforom == NULL) || (device->pSmbpbi == NULL) || + (device->pSmbpbi->sharedSurface == NULL)) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + pFifo = GET_PFIFO_FROM_DEVICE(device); + + status = nvswitch_inforom_get_object_version_info(device, "DEM", &version, + &subversion); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, INFO, "no DEM object found, rc:%d\n", status); + goto nvswitch_inforom_dem_load_fail; + } + + if (!INFOROM_OBJECT_SUBVERSION_SUPPORTS_NVSWITCH(subversion)) + { + NVSWITCH_PRINT(device, WARN, "DEM v%u.%u not supported\n", + version, subversion); + status = -NVL_ERR_NOT_SUPPORTED; + goto nvswitch_inforom_dem_load_fail; + } + + NVSWITCH_PRINT(device, INFO, "DEM v%u.%u found\n", version, subversion); + + if (version != 1) + { + NVSWITCH_PRINT(device, WARN, "DEM v%u.%u not supported\n", + version, subversion); + status = -NVL_ERR_NOT_SUPPORTED; + goto nvswitch_inforom_dem_load_fail; + } + + pPackedObject = nvswitch_os_malloc(INFOROM_DEM_OBJECT_V1_00_PACKED_SIZE); + + if (pPackedObject == NULL) + { + status = -NVL_NO_MEM; + goto nvswitch_inforom_dem_load_fail; + } + + status = nvswitch_inforom_load_object(device, pInforom, "DEM", + INFOROM_DEM_OBJECT_V1_00_FMT, + pPackedObject, + pFifo); + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "Failed to load DEM object, rc: %d\n", + status); + goto nvswitch_inforom_dem_load_fail; + } + +nvswitch_inforom_dem_load_fail: + + if (pPackedObject) + { + nvswitch_os_free(pPackedObject); + } + + // + // Mark the cached DEM as usable for Xid logging, even if we were + // unable to find it in the InfoROM image. + // + + device->pSmbpbi->sharedSurface->inforomObjects.DEM.bValid = NV_TRUE; + + _smbpbiDemInit(device, device->pSmbpbi, pFifo); + + return status; +} + +/*! + * Validate/Initialize the Driver Event Message (SXid) FIFO buffer + * + * @param[in] device device object pointer + * @param[in] pSmbpbi SMBPBI object pointer + * @param[in,out] pFifo DEM object pointer + * + * @return void + */ +static void +_smbpbiDemInit +( + nvswitch_device *device, + struct smbpbi *pSmbpbi, + struct INFOROM_DEM_OBJECT_V1_00 *pFifo +) +{ + NvU8 msgLeft; + unsigned recordsHeld = 0; + NvU16 FIFO_REC_LOOP_ITERATOR; + NvU16 bytesOccupied; + NvU16 bytesSeen; + NvBool status = NV_FALSE; + + // validate the FIFO buffer + + if ((DEM_FIFO_PTR(pFifo->writeOffset) != pFifo->writeOffset) || + (DEM_FIFO_PTR(pFifo->readOffset) != pFifo->readOffset) || + ((pFifo->writeOffset % sizeof(NvU32)) != 0) || + ((pFifo->readOffset % sizeof(NvU32)) != 0)) + { + goto smbpbiDemInit_exit; + } + + if (pFifo->writeOffset == pFifo->readOffset) + { + // The FIFO is empty + status = NV_TRUE; + goto smbpbiDemInit_exit; + } + + // + // This HAL extracts from a scratch register the count of DEM messages + // in the FIFO that has not yet been requested by the SMBPBI client. + // If the FIFO holds more messages than that, it means those in excess + // of this count have been delivered to the client by PreOS app. + // + if (device->hal.nvswitch_smbpbi_get_dem_num_messages(device, &msgLeft) != NVL_SUCCESS) + { + // assume the maximum + msgLeft = ~0; + } + + if (msgLeft == 0) + { + // Nothing of value in the FIFO. Lets reset it explicitly. + status = NV_TRUE; + pFifo->writeOffset = 0; + pFifo->readOffset = 0; + goto smbpbiDemInit_exit; + } + + // + // Count the messages in the FIFO, while also checking the structure + // for integrity. Reset the FIFO in case any corruption is found. + // + bytesOccupied = DEM_BYTES_OCCUPIED(pFifo); + + bytesSeen = 0; + FIFO_REC_LOOP_START(pFifo, bytesSeen < bytesOccupied) + if ((_recSize > DEM_RECORD_SIZE_MAX) || + (FIFO_REC_LOOP_REC_SIZE < DEM_RECORD_SIZE_MIN)) + { + goto smbpbiDemInit_exit; + } + + bytesSeen += FIFO_REC_LOOP_REC_SIZE; + ++recordsHeld; + FIFO_REC_LOOP_END + + if ((bytesSeen != bytesOccupied) || (msgLeft > recordsHeld)) + { + goto smbpbiDemInit_exit; + } + + // + // Advance the FIFO read ptr in order to remove those messages that + // have already been delivered to the client. + // + FIFO_REC_LOOP_START(pFifo, recordsHeld > msgLeft) + --recordsHeld; + FIFO_REC_LOOP_END + + pFifo->readOffset = FIFO_REC_LOOP_ITERATOR; + status = NV_TRUE; + +smbpbiDemInit_exit: + + if (!status) + { + // Reset the FIFO + pFifo->writeOffset = 0; + pFifo->readOffset = 0; + pFifo->seqNumber = 0; + } +} + +static void +_nvswitch_smbpbi_dem_flush(nvswitch_device *device) +{ + NvU8 *pPackedObject = NULL; + struct INFOROM_DEM_OBJECT_V1_00 *pFifo; + NvlStatus status = NVL_SUCCESS; + + pPackedObject = nvswitch_os_malloc(INFOROM_DEM_OBJECT_V1_00_PACKED_SIZE); + + if (pPackedObject == NULL) + { + status = -NVL_NO_MEM; + goto _nvswitch_smbpbi_dem_flush_exit; + } + + pFifo = GET_PFIFO_FROM_DEVICE(device); + + status = nvswitch_inforom_write_object(device, "DEM", + INFOROM_DEM_OBJECT_V1_00_FMT, + pFifo, + pPackedObject); + +_nvswitch_smbpbi_dem_flush_exit: + nvswitch_os_free(pPackedObject); + + if (status != NVL_SUCCESS) + { + NVSWITCH_PRINT(device, ERROR, "DEM object write failed, status=%d\n", + status); + } +} + +/*! + * A helper to create a new DEM FIFO record + * + * @param[in,out] pFifo DEM object pointer + * @param[in] num Xid number + * @param[in] osErrorString text message to store + * @param[in] msglen message size + * @param[out] pRecSize new record size in bytes + * + * @return ptr to the new record + * @return NULL if there's no room in the FIFO + * or dynamic allocation error + */ +static NV_MSGBOX_DEM_RECORD * +_makeNewRecord +( + INFOROM_DEM_OBJECT_V1_00 *pFifo, + NvU32 num, + NvU8 *osErrorString, + NvU32 msglen, + NvU32 *pRecSize +) +{ + NV_MSGBOX_DEM_RECORD *pNewRec; + + *pRecSize = NV_MIN(sizeof(NV_MSGBOX_DEM_RECORD) + msglen, + DEM_RECORD_SIZE_MAX); + + if ((*pRecSize > DEM_BYTES_AVAILABLE(pFifo)) || + ((pNewRec = nvswitch_os_malloc(*pRecSize)) == NULL)) + { + return NULL; + } + + // Fill the new record. + nvswitch_os_memset(pNewRec, 0, *pRecSize); + pNewRec->recordSize = NV_UNSIGNED_DIV_CEIL(*pRecSize, sizeof(NvU32)); + pNewRec->xidId = num; + pNewRec->seqNumber = pFifo->seqNumber++; + pNewRec->timeStamp = nvswitch_os_get_platform_time() / NVSWITCH_NSEC_PER_SEC; + + if (msglen > NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE) + { + // The text string is too long. Truncate and notify the client. + pNewRec->flags = FLD_SET_DRF(_MSGBOX, _DEM_RECORD_FLAGS, + _TRUNC, _SET, pNewRec->flags); + msglen = NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE - 1; + } + + nvswitch_os_memcpy(pNewRec->textMessage, osErrorString, msglen); + + return pNewRec; +} + +/*! + * A helper to add the new record to the DEM FIFO + * + * @param[in,out] pFifo DEM object pointer + * @param[in] pNewRec the new record + * @param[in] recSize new record size in bytes + * + * @return void + */ +static void +_addNewRecord +( + INFOROM_DEM_OBJECT_V1_00 *pFifo, + NV_MSGBOX_DEM_RECORD *pNewRec, + NvU32 recSize +) +{ + NvU16 rem; + NvU16 curPtr; + NvU16 copySz; + NvU8 *srcPtr; + + // Copy the new record into the FIFO, handling a possible wrap-around. + rem = recSize; + curPtr = pFifo->writeOffset; + srcPtr = (NvU8 *)pNewRec; + while (rem > 0) + { + copySz = NV_MIN(rem, DEM_FIFO_SIZE - curPtr); + nvswitch_os_memcpy(pFifo->fifoBuffer + curPtr, srcPtr, copySz); + rem -= copySz; + srcPtr += copySz; + curPtr = DEM_FIFO_PTR(curPtr + copySz); + } + + // Advance the FIFO write ptr. + pFifo->writeOffset = DEM_FIFO_PTR(pFifo->writeOffset + + (pNewRec->recordSize * sizeof(NvU32))); +} + +/*! + * Add a Driver Event Message (SXid) to the InfoROM DEM FIFO buffer + * + * @param[in] device device object pointer + * @param[in] num Xid number + * @param[in] msglen message size + * @param[in] osErrorString text message to store + * + * @return void + */ +void +nvswitch_smbpbi_log_message +( + nvswitch_device *device, + NvU32 num, + NvU32 msglen, + NvU8 *osErrorString +) +{ + INFOROM_DEM_OBJECT_V1_00 *pFifo; + NvU32 recSize; + NvU16 FIFO_REC_LOOP_ITERATOR; + NV_MSGBOX_DEM_RECORD *pNewRec; + + if ((device->pSmbpbi == NULL) || + (device->pSmbpbi->sharedSurface == NULL)) + { + return; + } + + pFifo = GET_PFIFO_FROM_DEVICE(device); + + pNewRec = _makeNewRecord(pFifo, num, osErrorString, msglen, &recSize); + + if (pNewRec != NULL) + { + _addNewRecord(pFifo, pNewRec, recSize); + nvswitch_os_free(pNewRec); + } + else + { + // + // We are unable to log this message. Mark the latest record + // with a flag telling the client that message(s) were dropped. + // + + NvU16 bytesOccupied = DEM_BYTES_OCCUPIED(pFifo); + NvU16 bytesSeen; + NV_MSGBOX_DEM_RECORD *pLastRec = NULL; + + // Find the newest record + bytesSeen = 0; + FIFO_REC_LOOP_START(pFifo, bytesSeen < bytesOccupied) + pLastRec = FIFO_REC_LOOP_REC_PTR; + bytesSeen += FIFO_REC_LOOP_REC_SIZE; + FIFO_REC_LOOP_END + + if (pLastRec != NULL) + { + pLastRec->flags = FLD_SET_DRF(_MSGBOX, _DEM_RECORD_FLAGS, + _OVFL, _SET, pLastRec->flags); + } + } + + return; +} + +NvlStatus +nvswitch_smbpbi_set_link_error_info +( + nvswitch_device *device, + NVSWITCH_LINK_TRAINING_ERROR_INFO *pLinkTrainingErrorInfo, + NVSWITCH_LINK_RUNTIME_ERROR_INFO *pLinkRuntimeErrorInfo +) +{ + FLCN *pFlcn; + RM_FLCN_CMD_SOE cmd; + NVSWITCH_TIMEOUT timeout; + NvU32 cmdSeqDesc; + RM_SOE_SMBPBI_CMD_SET_LINK_ERROR_INFO *pSetCmd = &cmd.cmd.smbpbiCmd.linkErrorInfo; + NvlStatus status; + + if (!device->pSmbpbi) + { + return -NVL_ERR_NOT_SUPPORTED; + } + + pFlcn = device->pSoe->pFlcn; + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.unitId = RM_SOE_UNIT_SMBPBI; + cmd.hdr.size = RM_SOE_CMD_SIZE(SMBPBI, SET_LINK_ERROR_INFO); + cmd.cmd.smbpbiCmd.cmdType = RM_SOE_SMBPBI_CMD_ID_SET_LINK_ERROR_INFO; + + pSetCmd->trainingErrorInfo.isValid = pLinkTrainingErrorInfo->isValid; + pSetCmd->runtimeErrorInfo.isValid = pLinkRuntimeErrorInfo->isValid; + + RM_FLCN_U64_PACK(&pSetCmd->trainingErrorInfo.attemptedTrainingMask0, + &pLinkTrainingErrorInfo->attemptedTrainingMask0); + RM_FLCN_U64_PACK(&pSetCmd->trainingErrorInfo.trainingErrorMask0, + &pLinkTrainingErrorInfo->trainingErrorMask0); + RM_FLCN_U64_PACK(&pSetCmd->runtimeErrorInfo.mask0, &pLinkRuntimeErrorInfo->mask0); + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS, &timeout); + status = flcnQueueCmdPostBlocking(device, pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg - not used for now + NULL, // pPayload - not used for now + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s SMBPBI Set Link Error Info command failed. rc:%d\n", + __FUNCTION__, status); + return status; + } + + return NVL_SUCCESS; +} + diff --git a/src/common/nvswitch/kernel/soe/soe_call_hal_nvswitch.c b/src/common/nvswitch/kernel/soe/soe_call_hal_nvswitch.c new file mode 100644 index 000000000..70551a46a --- /dev/null +++ b/src/common/nvswitch/kernel/soe/soe_call_hal_nvswitch.c @@ -0,0 +1,345 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "common_nvswitch.h" +#include "soe/haldefs_soe_nvswitch.h" +#include "soe/soe_nvswitch.h" +#include "soe/soe_priv_nvswitch.h" + +#include "export_nvswitch.h" + +NV_STATUS +soeProcessMessages +( + nvswitch_device *device, + PSOE pSoe +) +{ + if (pSoe->base.pHal->processMessages == NULL) + { + NVSWITCH_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + } + + return pSoe->base.pHal->processMessages(device, pSoe); +} + +NV_STATUS +soeWaitForInitAck +( + nvswitch_device *device, + PSOE pSoe +) +{ + if (pSoe->base.pHal->waitForInitAck == NULL) + { + NVSWITCH_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + } + + return pSoe->base.pHal->waitForInitAck(device, pSoe); +} + + + +NvU32 +soeService_HAL +( + nvswitch_device *device, + PSOE pSoe +) +{ + if (pSoe->base.pHal->service == NULL) + { + NVSWITCH_ASSERT(0); + return 0; + } + + return pSoe->base.pHal->service(device, pSoe); +} + +void +soeServiceHalt_HAL +( + nvswitch_device *device, + PSOE pSoe +) +{ + if (pSoe->base.pHal->serviceHalt == NULL) + { + NVSWITCH_ASSERT(0); + return; + } + + pSoe->base.pHal->serviceHalt(device, pSoe); +} + +void +soeEmemTransfer_HAL +( + nvswitch_device *device, + PSOE pSoe, + NvU32 dmemAddr, + NvU8 *pBuf, + NvU32 sizeBytes, + NvU8 port, + NvBool bCopyFrom +) +{ + if (pSoe->base.pHal->ememTransfer == NULL) + { + NVSWITCH_ASSERT(0); + return; + } + + pSoe->base.pHal->ememTransfer(device, pSoe, dmemAddr, pBuf, sizeBytes, port, bCopyFrom); +} + +NvU32 +soeGetEmemSize_HAL +( + nvswitch_device *device, + PSOE pSoe +) +{ + if (pSoe->base.pHal->getEmemSize == NULL) + { + NVSWITCH_ASSERT(0); + return 0; + } + + return pSoe->base.pHal->getEmemSize(device, pSoe); +} + +NvU32 +soeGetEmemStartOffset_HAL +( + nvswitch_device *device, + PSOE pSoe +) +{ + if (pSoe->base.pHal->getEmemStartOffset == NULL) + { + NVSWITCH_ASSERT(0); + return 0; + } + + return pSoe->base.pHal->getEmemStartOffset(device, pSoe); +} + +NV_STATUS +soeEmemPortToRegAddr_HAL +( + nvswitch_device *device, + PSOE pSoe, + NvU32 port, + NvU32 *pEmemCAddr, + NvU32 *pEmemDAddr +) +{ + if (pSoe->base.pHal->ememPortToRegAddr == NULL) + { + NVSWITCH_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + } + + return pSoe->base.pHal->ememPortToRegAddr(device, pSoe, port, pEmemCAddr, pEmemDAddr); +} + +void +soeServiceExterr_HAL +( + nvswitch_device *device, + PSOE pSoe +) +{ + if (pSoe->base.pHal->serviceExterr == NULL) + { + NVSWITCH_ASSERT(0); + return; + } + + pSoe->base.pHal->serviceExterr(device, pSoe); +} + +NV_STATUS +soeGetExtErrRegAddrs_HAL +( + nvswitch_device *device, + PSOE pSoe, + NvU32 *pExtErrAddr, + NvU32 *pExtErrStat +) +{ + if (pSoe->base.pHal->getExtErrRegAddrs == NULL) + { + NVSWITCH_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + } + + return pSoe->base.pHal->getExtErrRegAddrs(device, pSoe, pExtErrAddr, pExtErrStat); +} + +NvU32 +soeEmemPortSizeGet_HAL +( + nvswitch_device *device, + PSOE pSoe +) +{ + if (pSoe->base.pHal->ememPortSizeGet == NULL) + { + NVSWITCH_ASSERT(0); + return 0; + } + + return pSoe->base.pHal->ememPortSizeGet(device, pSoe); +} + +NvBool +soeIsCpuHalted_HAL +( + nvswitch_device *device, + PSOE pSoe +) +{ + if (pSoe->base.pHal->isCpuHalted == NULL) + { + NVSWITCH_ASSERT(0); + return NV_FALSE; + } + + return pSoe->base.pHal->isCpuHalted(device, pSoe); +} + +NvlStatus +soeTestDma_HAL +( + nvswitch_device *device, + PSOE pSoe +) +{ + if (pSoe->base.pHal->testDma == NULL) + { + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + return pSoe->base.pHal->testDma(device); +} + +NvlStatus +soeSetPexEOM_HAL +( + nvswitch_device *device, + NvU8 mode, + NvU8 nblks, + NvU8 nerrs, + NvU8 berEyeSel +) +{ + PSOE pSoe = (PSOE)device->pSoe; + if (pSoe->base.pHal->setPexEOM == NULL) + { + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + return pSoe->base.pHal->setPexEOM(device, mode, nblks, nerrs, berEyeSel); +} + +NvlStatus +soeGetPexEomStatus_HAL +( + nvswitch_device *device, + NvU8 mode, + NvU8 nblks, + NvU8 nerrs, + NvU8 berEyeSel, + NvU32 laneMask, + NvU16 *pEomStatus +) +{ + PSOE pSoe = (PSOE)device->pSoe; + if (pSoe->base.pHal->getPexEomStatus == NULL) + { + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + return pSoe->base.pHal->getPexEomStatus(device, mode, nblks, nerrs, berEyeSel, laneMask, pEomStatus); +} + +NvlStatus +soeGetUphyDlnCfgSpace_HAL +( + nvswitch_device *device, + NvU32 regAddress, + NvU32 laneSelectMask, + NvU16 *pRegValue +) +{ + PSOE pSoe = (PSOE)device->pSoe; + if (pSoe->base.pHal->getUphyDlnCfgSpace == NULL) + { + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + return pSoe->base.pHal->getUphyDlnCfgSpace(device, regAddress, laneSelectMask, pRegValue); +} + +NvlStatus +soeForceThermalSlowdown_HAL +( + nvswitch_device *device, + NvBool slowdown, + NvU32 periodUs +) +{ + PSOE pSoe = (PSOE)device->pSoe; + if (pSoe->base.pHal->forceThermalSlowdown == NULL) + { + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + return pSoe->base.pHal->forceThermalSlowdown(device, slowdown, periodUs); +} + +NvlStatus +soeSetPcieLinkSpeed_HAL +( + nvswitch_device *device, + NvU32 linkSpeed +) +{ + PSOE pSoe = (PSOE)device->pSoe; + if (pSoe->base.pHal->setPcieLinkSpeed == NULL) + { + NVSWITCH_ASSERT(0); + return -NVL_BAD_ARGS; + } + + return pSoe->base.pHal->setPcieLinkSpeed(device, linkSpeed); +} diff --git a/src/common/nvswitch/kernel/soe/soe_nvswitch.c b/src/common/nvswitch/kernel/soe/soe_nvswitch.c new file mode 100644 index 000000000..a012917b9 --- /dev/null +++ b/src/common/nvswitch/kernel/soe/soe_nvswitch.c @@ -0,0 +1,665 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "soe/soe_nvswitch.h" +#include "soe/soe_priv_nvswitch.h" + +#include "flcn/haldefs_flcnable_nvswitch.h" +#include "flcn/haldefs_flcn_nvswitch.h" +#include "flcn/flcn_nvswitch.h" + +#include "rmflcncmdif_nvswitch.h" +#include "common_nvswitch.h" + +static NV_STATUS _soeGetInitMessage(nvswitch_device *device, PSOE pSoe, RM_FLCN_MSG_SOE *pMsg); + +/*! + * Use the SOE INIT Message to construct and initialize all SOE Queues. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE object pointer + * @param[in] pMsg Pointer to the INIT Message + * + * @return 'NV_OK' upon successful creation of all SOE Queues + */ +static NV_STATUS +_soeQMgrCreateQueuesFromInitMsg +( + nvswitch_device *device, + PFLCNABLE pSoe, + RM_FLCN_MSG_SOE *pMsg +) +{ + RM_SOE_INIT_MSG_SOE_INIT *pInit; + NvU32 i; + NvU32 queueLogId; + NV_STATUS status; + FLCNQUEUE *pQueue; + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + PFALCON_QUEUE_INFO pQueueInfo; + + NVSWITCH_ASSERT(pFlcn != NULL); + + pQueueInfo = pFlcn->pQueueInfo; + NVSWITCH_ASSERT(pQueueInfo != NULL); + + pInit = &pMsg->msg.init.soeInit; + NVSWITCH_ASSERT(pInit->numQueues <= pFlcn->numQueues); + + for (i = 0; i < pFlcn->numQueues; i++) + { + queueLogId = pInit->qInfo[i].queueLogId; + NVSWITCH_ASSERT(queueLogId < pFlcn->numQueues); + pQueue = &pQueueInfo->pQueues[queueLogId]; + status = flcnQueueConstruct_dmem_nvswitch( + device, + pFlcn, + &pQueue, // ppQueue + queueLogId, // Logical ID of the queue + pInit->qInfo[i].queuePhyId, // Physical ID of the queue + pInit->qInfo[i].queueOffset, // offset + pInit->qInfo[i].queueSize, // size + RM_FLCN_QUEUE_HDR_SIZE); // cmdHdrSize + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Error constructing SOE Queue (status=" + "0x%08x).\n", __FUNCTION__, status); + NVSWITCH_ASSERT(0); + return status; + } + } + return NV_OK; +} + +/*! + * Purges all the messages from the SOE's message queue. Each message will + * be analyzed, clients will be notified of status, and events will be routed + * to all registered event listeners. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE object pointer + * + * @return 'NV_OK' if the message queue was successfully purged. + */ +static NV_STATUS +_soeProcessMessages_IMPL +( + nvswitch_device *device, + PSOE pSoe +) +{ + RM_FLCN_MSG_SOE soeMessage; + NV_STATUS status; + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + + // keep processing messages until no more exist in the message queue + while (NV_OK == (status = flcnQueueReadData( + device, + pFlcn, + SOE_RM_MSGQ_LOG_ID, + (RM_FLCN_MSG *)&soeMessage, NV_TRUE))) + { + NVSWITCH_PRINT(device, INFO, + "%s: unitId=0x%02x, size=0x%02x, ctrlFlags=0x%02x, " \ + "seqNumId=0x%02x\n", + __FUNCTION__, + soeMessage.hdr.unitId, + soeMessage.hdr.size, + soeMessage.hdr.ctrlFlags, + soeMessage.hdr.seqNumId); + + // check to see if the message is a reply or an event. + if ((soeMessage.hdr.ctrlFlags &= RM_FLCN_QUEUE_HDR_FLAGS_EVENT) != 0) + { + flcnQueueEventHandle(device, pFlcn, (RM_FLCN_MSG *)&soeMessage, NV_OK); + } + // the message is a response from a previously queued command + else + { + flcnQueueResponseHandle(device, pFlcn, (RM_FLCN_MSG *)&soeMessage); + } + } + + // + // Status NV_ERR_NOT_READY implies, Queue is empty. + // Log the message in other error cases. + // + if (status != NV_ERR_NOT_READY) + { + NVSWITCH_PRINT(device, ERROR, + "%s: unexpected error while purging message queue (status=0x%x).\n", + __FUNCTION__, (status)); + } + + return status; +} + +/*! + * This function exists to solve a natural chicken-and-egg problem that arises + * due to the fact that queue information (location, size, id, etc...) is + * relayed to the RM as a message in a queue. Queue construction is done when + * the message arives and the normal queue read/write functions are not + * available until construction is complete. Construction cannot be done until + * the message is read from the queue. Therefore, the very first message read + * from the Message Queue must be considered as a special-case and must NOT use + * any functionality provided by the SOE's queue manager. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE object pointer + * + * @return 'NV_OK' + * Upon successful extraction and processing of the first SOE message. + */ +static NV_STATUS +_soeProcessMessagesPreInit_IMPL +( + nvswitch_device *device, + PSOE pSoe +) +{ + RM_FLCN_MSG_SOE msg; + NV_STATUS status; + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + + // extract the "INIT" message (this is never expected to fail) + status = _soeGetInitMessage(device, pSoe, &msg); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to extract the INIT message " + "from the SOE Message Queue (status=0x%08x).", + __FUNCTION__, status); + NVSWITCH_ASSERT(0); + return status; + } + + // + // Now hookup the "real" message-processing function and handle the "INIT" + // message. + // + pSoe->base.pHal->processMessages = _soeProcessMessages_IMPL; + return flcnQueueEventHandle(device, pFlcn, (RM_FLCN_MSG *)&msg, NV_OK); +} + +/*! + * @brief Process the "INIT" message sent from the SOE ucode application. + * + * When the SOE ucode is done initializing, it will post an INIT message in + * the Message Queue that contains all the necessary attributes that are + * needed to enqueuing commands and extracting messages from the queues. + * The packet will also contain the offset and size of portion of DMEM that + * the RM must manage. Upon receiving this message it will be assume that + * the SOE is ready to start accepting commands. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE object pointer + * @param[in] pMsg Pointer to the event's message data + * + * @return 'NV_OK' if the event was successfully handled. + */ +static NV_STATUS +_soeHandleInitEvent_IMPL +( + nvswitch_device *device, + PFLCNABLE pSoe, + RM_FLCN_MSG *pGenMsg +) +{ + NV_STATUS status; + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + RM_FLCN_MSG_SOE *pMsg = (RM_FLCN_MSG_SOE *)pGenMsg; + + if (pFlcn == NULL) + { + NVSWITCH_ASSERT(pFlcn != NULL); + return NV_ERR_INVALID_POINTER; + } + + NVSWITCH_PRINT(device, INFO, + "%s: Received INIT message from SOE\n", + __FUNCTION__); + + // + // Pass the INIT message to the queue manager to allow it to create the + // queues. + // + status = _soeQMgrCreateQueuesFromInitMsg(device, pSoe, pMsg); + if (status != NV_OK) + { + NVSWITCH_ASSERT(0); + return status; + } + + flcnDbgInfoDmemOffsetSet(device, pFlcn, + pMsg->msg.init.soeInit.osDebugEntryPoint); + + // the SOE ucode is now initialized and ready to accept commands + pFlcn->bOSReady = NV_TRUE; + + return NV_OK; +} + +/*! + * @brief Read the INIT message directly out of the Message Queue. + * + * This function accesses the Message Queue directly using the HAL. It does + * NOT and may NOT use the queue manager as it has not yet been constructed and + * initialized. The Message Queue may not be empty when this function is called + * and the first message in the queue MUST be the INIT message. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE object pointer + * @param[out] pMsg Message structure to fill with the INIT message data + * + * @return 'NV_OK' upon successful extraction of the INIT message. + * @return + * 'NV_ERR_INVALID_STATE' if the first message found was not an INIT + * message or if the message was improperly formatted. + */ +static NV_STATUS +_soeGetInitMessage +( + nvswitch_device *device, + PSOE pSoe, + RM_FLCN_MSG_SOE *pMsg +) +{ + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + NV_STATUS status = NV_OK; + NvU32 tail = 0; + PFALCON_QUEUE_INFO pQueueInfo; + // on the GPU, rmEmemPortId = sec2RmEmemPortIdGet_HAL(...); + NvU8 rmEmemPortId = 0; + + if (pFlcn == NULL) + { + NVSWITCH_ASSERT(pFlcn != NULL); + return NV_ERR_INVALID_POINTER; + } + + pQueueInfo = pFlcn->pQueueInfo; + if (pQueueInfo == NULL) + { + NVSWITCH_ASSERT(pQueueInfo != NULL); + return NV_ERR_INVALID_POINTER; + } + + // + // Message queue 0 is used by SOE to communicate with RM + // Check SOE_CMDMGMT_MSG_QUEUE_RM in //uproc/soe/inc/soe_cmdmgmt.h + // + pQueueInfo->pQueues[SOE_RM_MSGQ_LOG_ID].queuePhyId = 0; + + // read the header starting at the current tail position + (void)flcnMsgQueueTailGet(device, pFlcn, + &pQueueInfo->pQueues[SOE_RM_MSGQ_LOG_ID], &tail); + if (pFlcn->bEmemEnabled) + { + // + // We use the offset in DMEM for the src address, since + // EmemCopyFrom automatically converts it to the offset in EMEM + // + flcnableEmemCopyFrom( + device, pFlcn->pFlcnable, + tail, // src + (NvU8 *)&pMsg->hdr, // pDst + RM_FLCN_QUEUE_HDR_SIZE, // numBytes + rmEmemPortId); // port + } + else + { + status = flcnDmemCopyFrom(device, + pFlcn, + tail, // src + (NvU8 *)&pMsg->hdr, // pDst + RM_FLCN_QUEUE_HDR_SIZE, // numBytes + 0); // port + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to copy from SOE DMEM\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + goto _soeGetInitMessage_exit; + } + } + + if (pMsg->hdr.unitId != RM_SOE_UNIT_INIT) + { + status = NV_ERR_INVALID_STATE; + NVSWITCH_ASSERT(0); + goto _soeGetInitMessage_exit; + } + + // read the message body and update the tail position + if (pFlcn->bEmemEnabled) + { + // + // We use the offset in DMEM for the src address, since + // EmemCopyFrom automatically converts it to the offset in EMEM + // + flcnableEmemCopyFrom( + device, pFlcn->pFlcnable, + tail + RM_FLCN_QUEUE_HDR_SIZE, // src + (NvU8 *)&pMsg->msg, // pDst + pMsg->hdr.size - RM_FLCN_QUEUE_HDR_SIZE, // numBytes + rmEmemPortId); // port + } + else + { + status = flcnDmemCopyFrom(device, + pFlcn, + tail + RM_FLCN_QUEUE_HDR_SIZE, // src + (NvU8 *)&pMsg->msg, // pDst + pMsg->hdr.size - RM_FLCN_QUEUE_HDR_SIZE, // numBytes + 0); // port + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, + "%s: Failed to copy from SOE DMEM\n", __FUNCTION__); + NVSWITCH_ASSERT(0); + goto _soeGetInitMessage_exit; + } + } + + tail += NV_ALIGN_UP(pMsg->hdr.size, SOE_DMEM_ALIGNMENT); + flcnMsgQueueTailSet(device, pFlcn, + &pQueueInfo->pQueues[SOE_RM_MSGQ_LOG_ID], tail); + +_soeGetInitMessage_exit: + return status; +} + +/*! + * Copies 'sizeBytes' from DMEM address 'src' to 'pDst' using EMEM access port. + * + * The address must be located in the EMEM region located directly above the + * maximum virtual address of DMEM. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + * @param[in] src The DMEM address for the source of the copy + * @param[out] pDst Pointer to write with copied data from EMEM + * @param[in] sizeBytes The number of bytes to copy from EMEM + * @param[in] port EMEM port + */ +static void +_soeEmemCopyFrom_IMPL +( + nvswitch_device *device, + FLCNABLE *pSoe, + NvU32 src, + NvU8 *pDst, + NvU32 sizeBytes, + NvU8 port +) +{ + soeEmemTransfer_HAL(device, (PSOE)pSoe, src, pDst, sizeBytes, port, NV_TRUE); +} + +/*! + * Copies 'sizeBytes' from 'pDst' to DMEM address 'dst' using EMEM access port. + * + * The address must be located in the EMEM region located directly above the + * maximum virtual address of DMEM. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + * @param[in] dst The DMEM address for the copy destination. + * @param[in] pSrc The pointer to the buffer containing the data to copy + * @param[in] sizeBytes The number of bytes to copy into EMEM + * @param[in] port EMEM port + */ +static void +_soeEmemCopyTo_IMPL +( + nvswitch_device *device, + FLCNABLE *pSoe, + NvU32 dst, + NvU8 *pSrc, + NvU32 sizeBytes, + NvU8 port +) +{ + soeEmemTransfer_HAL(device, (PSOE)pSoe, dst, pSrc, sizeBytes, port, NV_FALSE); +} + +/*! + * Loop until SOE RTOS is loaded and gives us an INIT message + * + * @param[in] device nvswitch_device object pointer + * @param[in] pSoe SOE object pointer + */ +static NV_STATUS +_soeWaitForInitAck_IMPL +( + nvswitch_device *device, + PSOE pSoe +) +{ + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + // POBJMC pMc = GPU_GET_MC(device); + NVSWITCH_TIMEOUT timeout; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1SEC_IN_NS * 5, &timeout); + while (!pFlcn->bOSReady && !nvswitch_timeout_check(&timeout)) + { + // Once interrupt handling is ready, might need to replace this with + //mcServiceSingle_HAL(device, pMc, MC_ENGINE_IDX_SOE, NV_FALSE); + soeService_HAL(device, pSoe); + nvswitch_os_sleep(1); + } + + if (!pFlcn->bOSReady) + { + NVSWITCH_PRINT(device, ERROR, + "%s Timeout while waiting for SOE bootup\n", + __FUNCTION__); + NVSWITCH_ASSERT(0); + return NV_ERR_TIMEOUT; + } + return NV_OK; +} + +/*! + * @brief Retrieves a pointer to the engine specific SEQ_INFO structure. + * + * @param[in] device nvswitch_device pointer + * @param[in] pSoe SOE pointer + * @param[in] seqIndex Index of the structure to retrieve + * + * @return Pointer to the SEQ_INFO structure or NULL on invalid index. + */ +static PFLCN_QMGR_SEQ_INFO +_soeQueueSeqInfoGet_IMPL +( + nvswitch_device *device, + FLCNABLE *pSoe, + NvU32 seqIndex +) +{ + FLCN *pFlcn = ENG_GET_FLCN(pSoe); + + if (seqIndex < pFlcn->numSequences) + { + return &(((PSOE)pSoe)->seqInfo[seqIndex]); + } + return NULL; +} + +/*! + * @copydoc flcnableQueueCmdValidate_IMPL + */ +static NvBool +_soeQueueCmdValidate_IMPL +( + nvswitch_device *device, + FLCNABLE *pSoe, + PRM_FLCN_CMD pCmd, + PRM_FLCN_MSG pMsg, + void *pPayload, + NvU32 queueIdLogical +) +{ + PFLCN pFlcn = ENG_GET_FLCN(pSoe); + FLCNQUEUE *pQueue = &pFlcn->pQueueInfo->pQueues[queueIdLogical]; + NvU32 cmdSize = pCmd->cmdGen.hdr.size; + + // Verify that the target queue ID represents a valid RM queue. + if (queueIdLogical != SOE_RM_CMDQ_LOG_ID) + { + NVSWITCH_PRINT(device, ERROR, + "%s: invalid SOE command queue ID = 0x%x\n", + __FUNCTION__, queueIdLogical); + return NV_FALSE; + } + + // + // Command size cannot be larger than queue size / 2. Otherwise, it is + // impossible to send two commands back to back if we start from the + // beginning of the queue. + // + if (cmdSize > (pQueue->queueSize / 2)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: invalid command (illegal size = 0x%x)\n", + __FUNCTION__, cmdSize); + return NV_FALSE; + } + + // Validate the command's unit identifier. + if (!RM_SOE_UNITID_IS_VALID(pCmd->cmdGen.hdr.unitId)) + { + NVSWITCH_PRINT(device, ERROR, + "%s: invalid unitID = %d\n", + __FUNCTION__, pCmd->cmdGen.hdr.unitId); + return NV_FALSE; + } + + return NV_TRUE; +} + +/* -------------------- Object construction/initialization ------------------- */ + +static void +soeSetupHal +( + SOE *pSoe, + NvU32 pci_device_id +) +{ + soe_hal *pHal = NULL; + flcnable_hal *pParentHal = NULL; + + if (nvswitch_is_lr10_device_id(pci_device_id)) + { + soeSetupHal_LR10(pSoe); + } + else + { + // we're on a device which doesn't support SOE + NVSWITCH_PRINT(NULL, ERROR, "Tried to initialize SOE on device with no SOE\n"); + NVSWITCH_ASSERT(0); + } + + pHal = pSoe->base.pHal; + pParentHal = (flcnable_hal *)pHal; + //set any functions we want to override + pParentHal->handleInitEvent = _soeHandleInitEvent_IMPL; + pParentHal->ememCopyTo = _soeEmemCopyTo_IMPL; + pParentHal->ememCopyFrom = _soeEmemCopyFrom_IMPL; + pParentHal->queueSeqInfoGet = _soeQueueSeqInfoGet_IMPL; + pParentHal->queueCmdValidate = _soeQueueCmdValidate_IMPL; + + //set any functions specific to SOE + pHal->processMessages = _soeProcessMessagesPreInit_IMPL; + pHal->waitForInitAck = _soeWaitForInitAck_IMPL; +} + +SOE * +soeAllocNew(void) +{ + SOE *pSoe = nvswitch_os_malloc(sizeof(*pSoe)); + if (pSoe != NULL) + { + nvswitch_os_memset(pSoe, 0, sizeof(*pSoe)); + } + + return pSoe; +} + +NvlStatus +soeInit +( + nvswitch_device *device, + SOE *pSoe, + NvU32 pci_device_id +) +{ + NvlStatus retval; + + // allocate hal if a child class hasn't already + if (pSoe->base.pHal == NULL) + { + soe_hal *pHal = pSoe->base.pHal = nvswitch_os_malloc(sizeof(*pHal)); + if (pHal == NULL) + { + NVSWITCH_PRINT(device, ERROR, "Flcn allocation failed!\n"); + retval = -NVL_NO_MEM; + goto soe_init_fail; + } + nvswitch_os_memset(pHal, 0, sizeof(*pHal)); + } + + // init parent class + retval = flcnableInit(device, (PFLCNABLE)pSoe, pci_device_id); + if (retval != NVL_SUCCESS) + { + goto soe_init_fail; + } + + soeSetupHal(pSoe, pci_device_id); + + return retval; +soe_init_fail: + soeDestroy(device, pSoe); + return retval; +} + +// reverse of soeInit() +void +soeDestroy +( + nvswitch_device *device, + SOE *pSoe +) +{ + // destroy parent class + flcnableDestroy(device, (PFLCNABLE)pSoe); + + if (pSoe->base.pHal != NULL) + { + nvswitch_os_free(pSoe->base.pHal); + pSoe->base.pHal = NULL; + } +} diff --git a/src/common/nvswitch/kernel/spi_nvswitch.c b/src/common/nvswitch/kernel/spi_nvswitch.c new file mode 100644 index 000000000..ac590c4aa --- /dev/null +++ b/src/common/nvswitch/kernel/spi_nvswitch.c @@ -0,0 +1,69 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "common_nvswitch.h" +#include "error_nvswitch.h" +#include "rmsoecmdif.h" +#include "spi_nvswitch.h" +#include "flcn/flcn_nvswitch.h" +#include "rmflcncmdif_nvswitch.h" + +NvlStatus +nvswitch_spi_init +( + nvswitch_device *device +) +{ + RM_FLCN_CMD_SOE cmd; + NVSWITCH_TIMEOUT timeout; + NvU32 cmdSeqDesc; + NV_STATUS status; + FLCN *pFlcn; + + if (!device->pSoe) + { + return -NVL_ERR_INVALID_STATE; + } + + pFlcn = device->pSoe->pFlcn; + + nvswitch_os_memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.unitId = RM_SOE_UNIT_SPI; + cmd.hdr.size = sizeof(cmd); + cmd.cmd.spi.cmdType = RM_SOE_SPI_INIT; + + nvswitch_timeout_create(NVSWITCH_INTERVAL_1MSEC_IN_NS * 30, &timeout); + status = flcnQueueCmdPostBlocking(device, pFlcn, + (PRM_FLCN_CMD)&cmd, + NULL, // pMsg - not used for now + NULL, // pPayload - not used for now + SOE_RM_CMDQ_LOG_ID, + &cmdSeqDesc, + &timeout); + if (status != NV_OK) + { + NVSWITCH_PRINT(device, ERROR, "%s: SPI INIT failed. rc:%d\n", + __FUNCTION__, status); + } + + return status; +} diff --git a/src/common/sdk/nvidia/inc/Nvcm.h b/src/common/sdk/nvidia/inc/Nvcm.h new file mode 100644 index 000000000..d0d942271 --- /dev/null +++ b/src/common/sdk/nvidia/inc/Nvcm.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************* Operating System Interface Routines *******************\ +* * +* Module: NVCM.H * +* Windows Configuration Manager defines and prototypes. * +* * +* ***IMPORTANT*** The interfaces defined in this file are *deprecated* * +* ***IMPORTANT*** in favor of RmControl. * +* ***IMPORTANT*** Try hard to not use this file at all and definitely * +* ***IMPORTANT*** do not add or modify interfaces here. * +* Ref: bug 488474: delete CFG and CFG_EX * +* * +\***************************************************************************/ + +#ifndef _NVCM_H_ +#define _NVCM_H_ + +#include "nvdeprecated.h" + +#if NV_DEPRECATED_COMPAT(RM_CONFIG_GET_SET) + +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(XAPIGEN) /* avoid duplicate generated xapi fns */ +#include "nvgputypes.h" +#ifndef _H2INC +#include "rmcd.h" +#endif + +#include "nverror.h" +#endif /* !XAPIGEN */ + +#define NV_ROBUST_CHANNEL_ALLOCFAIL_CLIENT 0x00000001 +#define NV_ROBUST_CHANNEL_ALLOCFAIL_DEVICE 0x00000002 +#define NV_ROBUST_CHANNEL_ALLOCFAIL_SUBDEVICE 0x00000004 +#define NV_ROBUST_CHANNEL_ALLOCFAIL_CHANNEL 0x00000008 +#define NV_ROBUST_CHANNEL_ALLOCFAIL_CTXDMA 0x00000010 +#define NV_ROBUST_CHANNEL_ALLOCFAIL_EVENT 0x00000020 +#define NV_ROBUST_CHANNEL_ALLOCFAIL_MEMORY 0x00000040 +#define NV_ROBUST_CHANNEL_ALLOCFAIL_OBJECT 0x00000080 +#define NV_ROBUST_CHANNEL_ALLOCFAIL_HEAP 0x00000100 + +#define NV_ROBUST_CHANNEL_BREAKONERROR_DEFAULT 0x00000000 +#define NV_ROBUST_CHANNEL_BREAKONERROR_DISABLE 0x00000001 +#define NV_ROBUST_CHANNEL_BREAKONERROR_ENABLE 0x00000002 + +#endif // NV_DEPRECATED_RM_CONFIG_GET_SET + +#endif // _NVCM_H_ diff --git a/src/common/sdk/nvidia/inc/class/cl0000.h b/src/common/sdk/nvidia/inc/class/cl0000.h new file mode 100644 index 000000000..c33bdb4c0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0000.h @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0000_h_ +#define _cl0000_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvlimits.h" + +/* object NV01_NULL_OBJECT */ +#define NV01_NULL_OBJECT (0x00000000) +/* obsolete alises */ +#define NV1_NULL_OBJECT NV01_NULL_OBJECT + +/*event values*/ +#define NV0000_NOTIFIERS_DISPLAY_CHANGE (0) +#define NV0000_NOTIFIERS_EVENT_NONE_PENDING (1) +#define NV0000_NOTIFIERS_VM_START (2) +#define NV0000_NOTIFIERS_GPU_BIND_EVENT (3) +#define NV0000_NOTIFIERS_NVTELEMETRY_REPORT_EVENT (4) +#define NV0000_NOTIFIERS_MAXCOUNT (5) + +/*Status definitions for NV0000_NOTIFIERS_DISPLAY_CHANGE event*/ + +#define NV0000_NOTIFIERS_STATUS_ACPI_DISPLAY_DEVICE_CYCLE (0) + +//--------------------------------------------------------------------------- + +#define NV01_ROOT (0x00000000) +/* NvNotification[] fields and values */ +#define NV000_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +/* NvAlloc parameteters */ +typedef struct { + NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */ + NvU32 processID; + char processName[NV_PROC_NAME_MAX_LENGTH]; +} NV0000_ALLOC_PARAMETERS; + + /* pio method data structure */ +typedef volatile struct _cl0000_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv000Typedef, Nv01Root; + +/* obsolete aliases */ +#define NV000_TYPEDEF Nv01Root +#define Nv1Root Nv01Root +#define nv1Root Nv01Root +#define nv01Root Nv01Root + +/*event values*/ +#define NV0000_NOTIFIERS_ENABLE_CPU_UTIL_CTRL (1) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0000_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl0001.h b/src/common/sdk/nvidia/inc/class/cl0001.h new file mode 100644 index 000000000..10a8af4e4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0001.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0001_h_ +#define _cl0001_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV01_ROOT_NON_PRIV (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0001_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0002.h b/src/common/sdk/nvidia/inc/class/cl0002.h new file mode 100644 index 000000000..dd1d6c6ca --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0002.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0002_h_ +#define _cl0002_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_CONTEXT_DMA_FROM_MEMORY (0x00000002) +/* NvNotification[] fields and values */ +#define NV002_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0002_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv002Typedef, Nv01ContextDmaFromMemory; +#define NV002_TYPEDEF Nv01ContextDmaFromMemory +/* obsolete stuff */ +#define NV1_CONTEXT_DMA_FROM_MEMORY (0x00000002) +#define NV01_CONTEXT_DMA (0x00000002) +#define Nv1ContextDmaFromMemory Nv01ContextDmaFromMemory +#define nv1ContextDmaFromMemory Nv01ContextDmaFromMemory +#define nv01ContextDmaFromMemory Nv01ContextDmaFromMemory + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0002_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0004.h b/src/common/sdk/nvidia/inc/class/cl0004.h new file mode 100644 index 000000000..32e25e78d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0004.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0004_h_ +#define _cl0004_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_TIMER (0x00000004) +/* NvNotification[] elements */ +#define NV004_NOTIFIERS_SET_ALARM_NOTIFY (0) +#define NV004_NOTIFIERS_MAXCOUNT (1) + +/* mapped timer registers */ +typedef volatile struct _Nv01TimerMapTypedef { + NvU32 Reserved00[0x100]; + NvU32 PTimerTime0; /* 0x00009400 */ + NvU32 Reserved01[0x3]; + NvU32 PTimerTime1; /* 0x00009410 */ +} Nv01TimerMap; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0004_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0005.h b/src/common/sdk/nvidia/inc/class/cl0005.h new file mode 100644 index 000000000..af4ddcc0b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0005.h @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0005_h_ +#define _cl0005_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_EVENT (0x00000005) +/* NvNotification[] fields and values */ +#define NV003_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0005_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv005Typedef, Nv01Event; +#define NV005_TYPEDEF Nv01Event +/* obsolete stuff */ +#define NV1_TIMER (0x00000004) +#define Nv1Event Nv01Event +#define nv1Event Nv01Event +#define nv01Event Nv01Event + +/* NvRmAlloc() parameters */ +typedef struct { + NvHandle hParentClient; + NvHandle hSrcResource; + + NvV32 hClass; + NvV32 notifyIndex; + NvP64 data NV_ALIGN_BYTES(8); +} NV0005_ALLOC_PARAMETERS; + +/* NV0005_ALLOC_PARAMETERS's notifyIndex field is overloaded to contain the + * notifyIndex value itself, plus flags, and optionally a subdevice field if + * flags contains NV01_EVENT_SUBDEVICE_SPECIFIC. Note that NV01_EVENT_* + * contain the full 32-bit flag value that is OR'd into notifyIndex, not the + * contents of the FLAGS field (i.e. NV01_EVENT_* are pre-shifted into place). + */ +#define NV0005_NOTIFY_INDEX_INDEX 15:0 +#define NV0005_NOTIFY_INDEX_SUBDEVICE 23:16 +#define NV0005_NOTIFY_INDEX_FLAGS 31:24 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0005_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl000f.h b/src/common/sdk/nvidia/inc/class/cl000f.h new file mode 100644 index 000000000..43663d191 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl000f.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl000f_h_ +#define _cl000f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define FABRIC_MANAGER_SESSION (0x0000000F) + +#define NV000F_NOTIFIERS_FABRIC_EVENT (0) + +#define NV000F_FLAGS_CHANNEL_RECOVERY 0:0 +#define NV000F_FLAGS_CHANNEL_RECOVERY_ENABLED 0x0 +#define NV000F_FLAGS_CHANNEL_RECOVERY_DISABLED 0x1 + +typedef struct +{ + // + // capDescriptor is a file descriptor for unix RM clients, but a void + // pointer for windows RM clients. + // + // capDescriptor is transparent to RM clients i.e. RM's user-mode shim + // populates this field on behalf of clients. + // + NV_DECLARE_ALIGNED(NvU64 capDescriptor, 8); + + NvU32 flags; +} NV000F_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl000f_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl0020.h b/src/common/sdk/nvidia/inc/class/cl0020.h new file mode 100644 index 000000000..87154a942 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0020.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0020_h_ +#define _cl0020_h_ + +#include "nvtypes.h" + +#define NV0020_GPU_MANAGEMENT (0x00000020) + +#endif /* _cl0020_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0030.h b/src/common/sdk/nvidia/inc/class/cl0030.h new file mode 100644 index 000000000..9a0000ecf --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0030.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0030_h_ +#define _cl0030_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_NULL (0x00000030) +/* NvNotification[] fields and values */ +#define NV030_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0030_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv030Typedef, Nv01Null; +#define NV030_TYPEDEF Nv01Null +/* obsolete stuff */ +#define NV1_NULL (0x00000030) +#define Nv1Null Nv01Null +#define nv1Null Nv01Null +#define nv01Null Nv01Null + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0030_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl003e.h b/src/common/sdk/nvidia/inc/class/cl003e.h new file mode 100644 index 000000000..d23e642fd --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl003e.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl003e_h_ +#define _cl003e_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_CONTEXT_ERROR_TO_MEMORY (0x0000003E) +#define NV01_MEMORY_SYSTEM (0x0000003E) +/* NvNotification[] fields and values */ +#define NV03E_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl003e_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv03eTypedef, Nv01ContextErrorToMemory; +#define NV03E_TYPEDEF Nv01ContextErrorToMemory +/* obsolete stuff */ +#define NV1_CONTEXT_ERROR_TO_MEMORY (0x0000003E) +#define Nv1ContextErrorToMemory Nv01ContextErrorToMemory +#define nv1ContextErrorToMemory Nv01ContextErrorToMemory +#define nv01ContextErrorToMemory Nv01ContextErrorToMemory + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl003e_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl003f.h b/src/common/sdk/nvidia/inc/class/cl003f.h new file mode 100644 index 000000000..b124538ca --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl003f.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl003f_h_ +#define _cl003f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_LOCAL_PRIVILEGED (0x0000003F) +/* NvNotification[] fields and values */ +#define NV03F_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +#ifndef AMD64 +typedef volatile struct _cl003f_tag0 { +#else +typedef struct { +#endif + NvV32 Reserved00[0x7c0]; +} Nv01MemoryLocalPrivileged; +#define NV03F_TYPEDEF Nv01MemoryLocalPrivileged +typedef Nv01MemoryLocalPrivileged Nv03fTypedef; +/* obsolete stuff */ +#define NV01_MEMORY_PRIVILEGED (0x0000003F) +#define NV1_MEMORY_PRIVILEGED (0x0000003F) +#define Nv01MemoryPrivileged Nv01MemoryLocalPrivileged +#define nv01MemoryPrivileged Nv01MemoryLocalPrivileged +#define Nv1MemoryPrivileged Nv01MemoryLocalPrivileged +#define nv1MemoryPrivileged Nv01MemoryLocalPrivileged +#define nv01MemoryLocalPrivileged Nv01MemoryLocalPrivileged + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl003f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0040.h b/src/common/sdk/nvidia/inc/class/cl0040.h new file mode 100644 index 000000000..103ec3a71 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0040.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2001 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl0040_h_ +#define _cl0040_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_LOCAL_USER (0x00000040) +/* NvNotification[] fields and values */ +#define NV040_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0040_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv040Typedef, Nv01MemoryLocalUser; +#define NV040_TYPEDEF Nv01MemoryLocalUser +/* obsolete stuff */ +#define NV01_MEMORY_USER (0x00000040) +#define NV1_MEMORY_USER (0x00000040) +#define Nv01MemoryUser Nv01MemoryLocalUser +#define nv01MemoryUser Nv01MemoryLocalUser +#define Nv1MemoryUser Nv01MemoryLocalUser +#define nv1MemoryUser Nv01MemoryLocalUser +#define nv01MemoryLocalUser Nv01MemoryLocalUser + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0040_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0041.h b/src/common/sdk/nvidia/inc/class/cl0041.h new file mode 100644 index 000000000..fa8707ad0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0041.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2001-2005, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0041_h_ +#define _cl0041_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV04_MEMORY (0x00000041) +/* NvNotification[] fields and values */ +#define NV041_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0041_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv041Typedef, Nv04Memory; +#define NV041_TYPEDEF Nv04Memory; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0041_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0060.h b/src/common/sdk/nvidia/inc/class/cl0060.h new file mode 100644 index 000000000..91fddcb6e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0060.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl0060_h_ +#define _cl0060_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV0060_SYNC_GPU_BOOST (0x00000060) + +/*! + */ +typedef struct { + NvU32 gpuBoostGroupId; +} NV0060_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl0060_h + diff --git a/src/common/sdk/nvidia/inc/class/cl0070.h b/src/common/sdk/nvidia/inc/class/cl0070.h new file mode 100644 index 000000000..e9202df9e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0070.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2001-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0070_h_ +#define _cl0070_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_VIRTUAL (0x00000070) +#define NV01_MEMORY_SYSTEM_DYNAMIC (0x00000070) + +/* + * NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS + * + * Allocation params for NV01_MEMORY_VIRTUAL. + * + * NV01_MEMORY_SYSTEM_DYNAMIC is an alias for NV01_MEMORY_VIRTUAL. This + * was traditionally allocated with RmAllocMemory64(). The default GPU + * virtual address space is used, and the limit of this address space is + * returned in limit. The NV01_MEMORY_SYSTEM_DYNAMIC handle can be + * passed to RmAllocContextDma2() with an offset/limit. The context dma + * handle can then be used as the hDma handle for RmMapMemoryDma. + * + * This behavior is maintained in the RM compatibility shim. + * + * NV01_MEMORY_VIRTUAL replaces this behavior with a single object. + * + * hVASpace - if hVASpace is NV01_NULL_OBJECT the default GPU VA space is + * selected. Alternatively a FERMI_VASPACE_A handle may be specified. + * + * The NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE is used for by the + * compatibility layer to emulate NV01_MEMORY_SYSTEM_DYNAMIC semantics. + * + * offset - An offset into the virtual address space may be specified. This + * will limit range of the GPU VA returned by RmMapMemoryDma to be + * above offset. + * + * limit - When limit is zero the maximum limit used. If a non-zero limit + * is specified then it will be used. The final limit is returned. + */ +typedef struct +{ + NvU64 offset NV_ALIGN_BYTES(8); // [IN] - offset into address space + NvU64 limit NV_ALIGN_BYTES(8); // [IN/OUT] - limit of address space + NvHandle hVASpace; // [IN] - Address space handle +} NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS; + +#define NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE (0xffffffffu) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0070_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0071.h b/src/common/sdk/nvidia/inc/class/cl0071.h new file mode 100644 index 000000000..be106d3d9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0071.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2001-2001, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0071_h_ +#define _cl0071_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_SYSTEM_OS_DESCRIPTOR (0x00000071) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0071_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0073.h b/src/common/sdk/nvidia/inc/class/cl0073.h new file mode 100644 index 000000000..377528739 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0073.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2001-2021, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0073_h_ +#define _cl0073_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV04_DISPLAY_COMMON (0x00000073) + +/* event values */ +#define NV0073_NOTIFIERS_SW (0) +#define NV0073_NOTIFIERS_MAXCOUNT (5) + + +#define NV0073_NOTIFICATION_STATUS_IN_PROGRESS (0x8000) +#define NV0073_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000) +#define NV0073_NOTIFICATION_STATUS_ERROR_INVALID_STATE (0x2000) +#define NV0073_NOTIFICATION_STATUS_ERROR_STATE_IN_USE (0x1000) +#define NV0073_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + +/* pio method data structure */ +typedef volatile struct _cl0073_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv073Typedef, Nv04DisplayCommon; +#define NV073_TYPEDEF Nv04DisplayCommon + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0073_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0076.h b/src/common/sdk/nvidia/inc/class/cl0076.h new file mode 100644 index 000000000..50bd0a92c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0076.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0076_h_ +#define _cl0076_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_FRAMEBUFFER_CONSOLE (0x00000076) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0076_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl007d.h b/src/common/sdk/nvidia/inc/class/cl007d.h new file mode 100644 index 000000000..f9d25d9be --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl007d.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl007d_h_ +#define _cl007d_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV04_SOFTWARE_TEST (0x0000007D) +#define NV07D 0x00001fff:0x00000000 +/* NvNotification[] elements */ +#define NV07D_NOTIFIERS_NOTIFY (0) +#define NV07D_NOTIFIERS_MAXCOUNT (1) +/* NvNotification[] fields and values */ +#define NV07D_NOTIFICATION_STATUS_IN_PROGRESS (0x8000) +#define NV07D_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +#define NV07D_NOTIFICATION_STATUS_ERROR_BAD_ARGUMENT (0x2000) +#define NV07D_NOTIFICATION_STATUS_ERROR_INVALID_STATE (0x1000) +#define NV07D_NOTIFICATION_STATUS_ERROR_STATE_IN_USE (0x0800) +#define NV07D_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + +/* pio method data structures */ +typedef volatile struct _cl007d_tag0 { + NvV32 NoOperation; /* ignored 0100-0103*/ + NvV32 Notify; /* NV07D_NOTIFY_* 0104-0107*/ + NvV32 Reserved0104[0x78/4]; + NvV32 SetContextDmaNotifies; /* NV01_CONTEXT_DMA 0180-0183*/ + NvV32 Reserved0184[0x1f7c/4]; +} Nv07dTypedef, Nv04SoftwareTest; + +#define NV07D_TYPEDEF Nv04SoftwareTest +/* dma method offsets, fields, and values */ +#define NV07D_SET_OBJECT (0x00000000) +#define NV07D_NO_OPERATION (0x00000100) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl007d_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0080.h b/src/common/sdk/nvidia/inc/class/cl0080.h new file mode 100644 index 000000000..7a920feed --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0080.h @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl0080_h_ +#define _cl0080_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvlimits.h" +#include "nvtypes.h" + +#define NV01_DEVICE_0 (0x00000080) +/* NvNotification[] fields and values */ +#define NV080_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl0080_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv080Typedef, Nv01Device0; +#define NV080_TYPEDEF Nv01Device0 + +/* NvAlloc parameteters */ +#define NV0080_MAX_DEVICES NV_MAX_DEVICES +/** + * @brief Alloc param + * + * @param vaMode mode for virtual address space allocation + * Three modes: + * NV_DEVICE_ALLOCATION_VAMODE_OPTIONAL_MULTIPLE_VASPACES + * NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE + * NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES + * Detailed description of these modes is in nvos.h + **/ +typedef struct { + NvU32 deviceId; + NvHandle hClientShare; + NvHandle hTargetClient; + NvHandle hTargetDevice; + NvV32 flags; + NvU64 vaSpaceSize NV_ALIGN_BYTES(8); + NvU64 vaStartInternal NV_ALIGN_BYTES(8); + NvU64 vaLimitInternal NV_ALIGN_BYTES(8); + NvV32 vaMode; +} NV0080_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl0080_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0090.h b/src/common/sdk/nvidia/inc/class/cl0090.h new file mode 100644 index 000000000..1ed7835ac --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0090.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl0090_h_ +#define _cl0090_h_ + +#include "nvtypes.h" + +#define KERNEL_GRAPHICS_CONTEXT (0x00000090) + +#endif /* _cl0090_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl0092.h b/src/common/sdk/nvidia/inc/class/cl0092.h new file mode 100644 index 000000000..8a9007070 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl0092.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_CL0092_H +#define SDK_CL0092_H + +#include "nvtypes.h" + +/* + * This RgLineCallback class allows RM clients to register/unregister the RG line callback functions. + * + * Must be allocated with kernel access rights. + * + * Allocation params: + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the NV04_DISPLAY_COMMON parent device to which the + * operation should be directed. + * head + * This parameter specifies the head for which the callback is to be egistered/unregistered. This value must be + * less than the maximum number of heads supported by the GPU subdevice. + * rgLineNum + * This indicates the RG scanout line number on which the callback will be executed. + * 1/ Client should set the proper RG line number based on mode in which the display head is running and + * subsequent possible modeset that may affect the line number. + * 2/ Client is expected to clear/set the interrupts around modesets or power-transitions (like s3/hibernation). + * 3/ Client should make sure that this param does not exceed the raster settings. + * pCallbkFn + * Pointer to callback function. Cannot be NULL. + * pCallbkParams + * Pointer to the ctrl call param struct. + */ + +#define NV0092_RG_LINE_CALLBACK 0x0092 + +typedef void (*NV0092_REGISTER_RG_LINE_CALLBACK_FN)(NvU32 rgIntrLine, void *param1, NvBool bIsIrqlIsr); + +typedef struct +{ + NvU32 subDeviceInstance; + NvU32 head; + NvU32 rgLineNum; + + NV0092_REGISTER_RG_LINE_CALLBACK_FN pCallbkFn; + + void *pCallbkParams; +} NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS; + +#endif // SDK_CL0092_H diff --git a/src/common/sdk/nvidia/inc/class/cl00b1.h b/src/common/sdk/nvidia/inc/class/cl00b1.h new file mode 100644 index 000000000..bc6e69f17 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00b1.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _CL00B1_H_ +#define _CL00B1_H_ + +#define NV01_MEMORY_HW_RESOURCES 0x00b1 + +#endif // _CL00B1_H_ diff --git a/src/common/sdk/nvidia/inc/class/cl00c1.h b/src/common/sdk/nvidia/inc/class/cl00c1.h new file mode 100644 index 000000000..3e57495ba --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00c1.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl00c1_h_ +#define _cl00c1_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvlimits.h" + +#define NV_FB_SEGMENT (0x000000C1) + +/* + * NV_FB_SEGMENT_ALLOCATION_PARAMS - Allocation params to create FB segment through + * NvRmAlloc. + */ +typedef struct +{ + NvHandle hCtxDma; + NvU32 subDeviceIDMask NV_ALIGN_BYTES(8); + NvU64 dmaOffset NV_ALIGN_BYTES(8); + NvU64 VidOffset NV_ALIGN_BYTES(8); + NvU64 Offset NV_ALIGN_BYTES(8); // To be deprecated + NvU64 pOffset[NV_MAX_SUBDEVICES] NV_ALIGN_BYTES(8); + NvU64 Length NV_ALIGN_BYTES(8); + NvU64 ValidLength NV_ALIGN_BYTES(8); + NvP64 pPageArray NV_ALIGN_BYTES(8); + NvU32 startPageIndex; + NvHandle AllocHintHandle; + NvU32 Flags; + NvHandle hMemory; // Not used in NvRmAlloc path; only used in CTRL path + NvHandle hClient; // Not used in NvRmAlloc path; only used in CTRL path + NvHandle hDevice; // Not used in NvRmAlloc path; only used in CTRL path + NvP64 pCpuAddress NV_ALIGN_BYTES(8); // To be deprecated + NvP64 ppCpuAddress[NV_MAX_SUBDEVICES] NV_ALIGN_BYTES(8); + NvU64 GpuAddress NV_ALIGN_BYTES(8); // To be deprecated + NvU64 pGpuAddress[NV_MAX_SUBDEVICES] NV_ALIGN_BYTES(8); + NvHandle hAllocHintClient; + NvU32 kind; + NvU32 compTag; +} NV_FB_SEGMENT_ALLOCATION_PARAMS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl00c1_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl00c2.h b/src/common/sdk/nvidia/inc/class/cl00c2.h new file mode 100644 index 000000000..eca77e53e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00c2.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl00c2_h_ +#define _cl00c2_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV01_MEMORY_LOCAL_PHYSICAL (0x000000c2) + +typedef struct +{ + NvU64 memSize NV_ALIGN_BYTES(8); // [OUT] + NvU32 format; // [IN] - PTE format to use + NvU32 pageSize; // [IN] - Page size to use +} NV_PHYSICAL_MEMORY_ALLOCATION_PARAMS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl00c2_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl00c3.h b/src/common/sdk/nvidia/inc/class/cl00c3.h new file mode 100644 index 000000000..8d100bc10 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00c3.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_CL00C3_H +#define SDK_CL00C3_H + +#include "nvtypes.h" + +#define NV01_MEMORY_SYNCPOINT 0x00C3 + +/* +* NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS - Allocation params to create syncpoint +through NvRmAlloc. +*/ +typedef struct +{ + NvU32 syncpointId; +} NV_MEMORY_SYNCPOINT_ALLOCATION_PARAMS; + +#endif // SDK_CL00C3_H diff --git a/src/common/sdk/nvidia/inc/class/cl00db.h b/src/common/sdk/nvidia/inc/class/cl00db.h new file mode 100644 index 000000000..bc1a549f7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00db.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl00db_h_ +#define _cl00db_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV40_DEBUG_BUFFER (0x000000db) + +/* NvRmAlloc() parameters */ +typedef struct { + NvU32 size; /* Desired message size / actual size returned */ + NvU32 tag; /* Protobuf tag for message location in dump message */ +} NV00DB_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl00db_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl00f2.h b/src/common/sdk/nvidia/inc/class/cl00f2.h new file mode 100644 index 000000000..13b062426 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00f2.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl00f2_h_ +#define _cl00f2_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define IO_VASPACE_A (0x000000f2) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl00f2_h + diff --git a/src/common/sdk/nvidia/inc/class/cl00f3.h b/src/common/sdk/nvidia/inc/class/cl00f3.h new file mode 100644 index 000000000..1f5093777 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00f3.h @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +/* + * Class definition for creating a memory descriptor from a FLA range in RmAllocMemory. + * No memory is allocated, only a memory descriptor and memory object is created + * for later use in other calls. These classes are used by clients who tries to + * import the memory exported by other GPU(s)/FAM/process. The range, size and + * other parameters are passed as Nv01MemoryFla structure. + */ + +#ifndef _cl00f3_h_ +#define _cl00f3_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV01_MEMORY_FLA (0x000000f3) + +/* + * Structure of NV_FLA_MEMORY_ALLOCATION_PARAMS + * + * + */ +typedef struct { + NvU32 type; /* FBMEM: NVOS32_TYPE_* */ + NvU32 flags; /* FBMEM: NVOS32_ALLOC_FLAGS_* */ + NvU32 attr; /* FBMEM: NVOS32_ATTR_* */ + NvU32 attr2; /* FBMEM: NVOS32_ATTR2_* */ + NvU64 base; /* base of FLA range */ + NvU64 align; /* alignment for FLA range*/ + NvU64 limit NV_ALIGN_BYTES(8); + // + // For Direct connected systems, clients need to program this hSubDevice with + // the exporting GPU, for RM to route the traffic to the destination GPU + // Clients need not program this for NvSwitch connected systems + // + NvHandle hExportSubdevice; /* hSubdevice of the exporting GPU */ + // + // Instead of base and limit, clients can also pass the FLA handle (or hExportHandle) + // being exported from destination side to import on the access side + // + NvHandle hExportHandle; /* FLA handle being exported or Export handle */ + // The RM client used to export memory + NvHandle hExportClient; + NvU32 flagsOs02; +} NV_FLA_MEMORY_ALLOCATION_PARAMS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl00f3_h + diff --git a/src/common/sdk/nvidia/inc/class/cl00f8.h b/src/common/sdk/nvidia/inc/class/cl00f8.h new file mode 100644 index 000000000..662eafd9c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00f8.h @@ -0,0 +1,118 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. +*/ + +#include "nvtypes.h" + +/* + * Class definition for allocating a contiguous or discontiguous FLA. + */ + +#ifndef _cl00f8_h_ +#define _cl00f8_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV_MEMORY_FABRIC (0x000000f8) + +/* + * alignment [IN] + * Alignment for the allocation. + * Should be at least the requested page size. + * + * allocSize [IN] + * Size of the FLA VA. + * + * pageSize [IN] + * Requested page size. Can be any of the NV_MEMORY_FABRIC_PAGE_SIZE_* + * + * allocFlags [IN] + * Can be any of the NV00F8_ALLOC_FLAGS_* + * DEFAULT (sticky) + * The FLA -> PA mappings will be stuck to the object, i.e, once the mapping is created + * there is no way to unmap it explicitly. + * The FLA object must be destroyed to release the mappings. + * The FLA object can't be duped or exported until it has a mapping associated with it. + * Partial FLA->PA mappings will NOT be allowed. + * FLEXIBLE_FLA + * The FLA -> PA mappings can be modified anytime irrespective of the FLA object is duped + * or exported. + * Partial FLA mappings are allowed. + * FORCE_NONCONTIGUOUS + * The allocator may pick contiguous memory whenever possible. This flag forces the + * allocator to always allocate noncontiguous memory. This flag is mainly used for + * testing purpose. So, use with caution. + * FORCE_CONTIGUOUS + * This flag forces the allocator to always allocate contiguous memory. + * READ_ONLY + * The FLA -> PA mappings will be created read-only. This option is only available on + * debug/develop builds due to security concerns. The security concerns are due to the + * fact that FLA access errors (a.k.a PRIV errors) are not aways context attributable. + * + * map.offset [IN] + * Offset into the physical memory descriptor. + * Must be physical memory page size aligned. + * + * map.hVidMem [IN] + * Handle to the physical video memory. Must be passed when the sticky flag is set so that the + * FLA -> PA mapping can happen during object creation. + * Phys memory with 2MB pages is supported. + * Phys memory handle can be NV01_NULL_OBJECT if FLEXIBLE_FLA flag is passed. + * hVidMem should belong the same device and client which is allocating FLA. + * + * map.flags [IN] + * Reserved for future use. + * Clients should pass 0 as of now. + */ + +#define NV_MEMORY_FABRIC_PAGE_SIZE_2M 0x200000 +#define NV_MEMORY_FABRIC_PAGE_SIZE_512M 0x20000000 + +#define NV00F8_ALLOC_FLAGS_DEFAULT 0 +#define NV00F8_ALLOC_FLAGS_FLEXIBLE_FLA NVBIT(0) +#define NV00F8_ALLOC_FLAGS_FORCE_NONCONTIGUOUS NVBIT(1) +#define NV00F8_ALLOC_FLAGS_FORCE_CONTIGUOUS NVBIT(2) +#define NV00F8_ALLOC_FLAGS_READ_ONLY NVBIT(3) + +typedef struct { + + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NV_DECLARE_ALIGNED(NvU64 allocSize, 8); + + NvU32 pageSize; + NvU32 allocFlags; + + struct { + NV_DECLARE_ALIGNED(NvU64 offset, 8); + + NvHandle hVidMem; + NvU32 flags; + } map; + +} NV00F8_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif /* _cl00f8_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl00fc.h b/src/common/sdk/nvidia/inc/class/cl00fc.h new file mode 100644 index 000000000..aa50ead4c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl00fc.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl00fc_h_ +#define _cl00fc_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define FABRIC_VASPACE_A (0x000000fc) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl00fc_h + diff --git a/src/common/sdk/nvidia/inc/class/cl2080.h b/src/common/sdk/nvidia/inc/class/cl2080.h new file mode 100644 index 000000000..b43209c12 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl2080.h @@ -0,0 +1,497 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl2080_h_ +#define _cl2080_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvlimits.h" + +#define NV20_SUBDEVICE_0 (0x00002080) + +/* event values */ +#define NV2080_NOTIFIERS_SW (0) +#define NV2080_NOTIFIERS_HOTPLUG (1) +#define NV2080_NOTIFIERS_POWER_CONNECTOR (2) +#define NV2080_NOTIFIERS_THERMAL_SW (3) +#define NV2080_NOTIFIERS_THERMAL_HW (4) +#define NV2080_NOTIFIERS_FULL_SCREEN_CHANGE (5) +#define NV2080_NOTIFIERS_EVENTBUFFER (6) +#define NV2080_NOTIFIERS_DP_IRQ (7) +#define NV2080_NOTIFIERS_GR_DEBUG_INTR (8) +#define NV2080_NOTIFIERS_PMU_EVENT (9) +#define NV2080_NOTIFIERS_PMU_COMMAND (10) +#define NV2080_NOTIFIERS_TIMER (11) +#define NV2080_NOTIFIERS_GRAPHICS (12) +#define NV2080_NOTIFIERS_PPP (13) +#define NV2080_NOTIFIERS_VLD (14) // also known as BSP +#define NV2080_NOTIFIERS_NVDEC0 NV2080_NOTIFIERS_VLD +#define NV2080_NOTIFIERS_NVDEC1 (15) +#define NV2080_NOTIFIERS_NVDEC2 (16) +#define NV2080_NOTIFIERS_NVDEC3 (17) +#define NV2080_NOTIFIERS_NVDEC4 (18) +#define NV2080_NOTIFIERS_RESERVED19 (19) +#define NV2080_NOTIFIERS_RESERVED20 (20) +#define NV2080_NOTIFIERS_RESERVED21 (21) +#define NV2080_NOTIFIERS_PDEC (22) // also known as VP +#define NV2080_NOTIFIERS_CE0 (23) +#define NV2080_NOTIFIERS_CE1 (24) +#define NV2080_NOTIFIERS_CE2 (25) +#define NV2080_NOTIFIERS_CE3 (26) +#define NV2080_NOTIFIERS_CE4 (27) +#define NV2080_NOTIFIERS_CE5 (28) +#define NV2080_NOTIFIERS_CE6 (29) +#define NV2080_NOTIFIERS_CE7 (30) +#define NV2080_NOTIFIERS_CE8 (31) +#define NV2080_NOTIFIERS_CE9 (32) +#define NV2080_NOTIFIERS_PSTATE_CHANGE (33) +#define NV2080_NOTIFIERS_HDCP_STATUS_CHANGE (34) +#define NV2080_NOTIFIERS_FIFO_EVENT_MTHD (35) +#define NV2080_NOTIFIERS_PRIV_RING_HANG (36) +#define NV2080_NOTIFIERS_RC_ERROR (37) +#define NV2080_NOTIFIERS_MSENC (38) +#define NV2080_NOTIFIERS_NVENC0 NV2080_NOTIFIERS_MSENC +#define NV2080_NOTIFIERS_NVENC1 (39) +#define NV2080_NOTIFIERS_NVENC2 (40) +#define NV2080_NOTIFIERS_UNUSED_0 (41) // Unused +#define NV2080_NOTIFIERS_ACPI_NOTIFY (42) +#define NV2080_NOTIFIERS_COOLER_DIAG_ZONE (43) +#define NV2080_NOTIFIERS_THERMAL_DIAG_ZONE (44) +#define NV2080_NOTIFIERS_AUDIO_HDCP_REQUEST (45) +#define NV2080_NOTIFIERS_WORKLOAD_MODULATION_CHANGE (46) +#define NV2080_NOTIFIERS_GPIO_0_RISING_INTERRUPT (47) +#define NV2080_NOTIFIERS_GPIO_1_RISING_INTERRUPT (48) +#define NV2080_NOTIFIERS_GPIO_2_RISING_INTERRUPT (49) +#define NV2080_NOTIFIERS_GPIO_3_RISING_INTERRUPT (50) +#define NV2080_NOTIFIERS_GPIO_4_RISING_INTERRUPT (51) +#define NV2080_NOTIFIERS_GPIO_5_RISING_INTERRUPT (52) +#define NV2080_NOTIFIERS_GPIO_6_RISING_INTERRUPT (53) +#define NV2080_NOTIFIERS_GPIO_7_RISING_INTERRUPT (54) +#define NV2080_NOTIFIERS_GPIO_8_RISING_INTERRUPT (55) +#define NV2080_NOTIFIERS_GPIO_9_RISING_INTERRUPT (56) +#define NV2080_NOTIFIERS_GPIO_10_RISING_INTERRUPT (57) +#define NV2080_NOTIFIERS_GPIO_11_RISING_INTERRUPT (58) +#define NV2080_NOTIFIERS_GPIO_12_RISING_INTERRUPT (59) +#define NV2080_NOTIFIERS_GPIO_13_RISING_INTERRUPT (60) +#define NV2080_NOTIFIERS_GPIO_14_RISING_INTERRUPT (61) +#define NV2080_NOTIFIERS_GPIO_15_RISING_INTERRUPT (62) +#define NV2080_NOTIFIERS_GPIO_16_RISING_INTERRUPT (63) +#define NV2080_NOTIFIERS_GPIO_17_RISING_INTERRUPT (64) +#define NV2080_NOTIFIERS_GPIO_18_RISING_INTERRUPT (65) +#define NV2080_NOTIFIERS_GPIO_19_RISING_INTERRUPT (66) +#define NV2080_NOTIFIERS_GPIO_20_RISING_INTERRUPT (67) +#define NV2080_NOTIFIERS_GPIO_21_RISING_INTERRUPT (68) +#define NV2080_NOTIFIERS_GPIO_22_RISING_INTERRUPT (69) +#define NV2080_NOTIFIERS_GPIO_23_RISING_INTERRUPT (70) +#define NV2080_NOTIFIERS_GPIO_24_RISING_INTERRUPT (71) +#define NV2080_NOTIFIERS_GPIO_25_RISING_INTERRUPT (72) +#define NV2080_NOTIFIERS_GPIO_26_RISING_INTERRUPT (73) +#define NV2080_NOTIFIERS_GPIO_27_RISING_INTERRUPT (74) +#define NV2080_NOTIFIERS_GPIO_28_RISING_INTERRUPT (75) +#define NV2080_NOTIFIERS_GPIO_29_RISING_INTERRUPT (76) +#define NV2080_NOTIFIERS_GPIO_30_RISING_INTERRUPT (77) +#define NV2080_NOTIFIERS_GPIO_31_RISING_INTERRUPT (78) +#define NV2080_NOTIFIERS_GPIO_0_FALLING_INTERRUPT (79) +#define NV2080_NOTIFIERS_GPIO_1_FALLING_INTERRUPT (80) +#define NV2080_NOTIFIERS_GPIO_2_FALLING_INTERRUPT (81) +#define NV2080_NOTIFIERS_GPIO_3_FALLING_INTERRUPT (82) +#define NV2080_NOTIFIERS_GPIO_4_FALLING_INTERRUPT (83) +#define NV2080_NOTIFIERS_GPIO_5_FALLING_INTERRUPT (84) +#define NV2080_NOTIFIERS_GPIO_6_FALLING_INTERRUPT (85) +#define NV2080_NOTIFIERS_GPIO_7_FALLING_INTERRUPT (86) +#define NV2080_NOTIFIERS_GPIO_8_FALLING_INTERRUPT (87) +#define NV2080_NOTIFIERS_GPIO_9_FALLING_INTERRUPT (88) +#define NV2080_NOTIFIERS_GPIO_10_FALLING_INTERRUPT (89) +#define NV2080_NOTIFIERS_GPIO_11_FALLING_INTERRUPT (90) +#define NV2080_NOTIFIERS_GPIO_12_FALLING_INTERRUPT (91) +#define NV2080_NOTIFIERS_GPIO_13_FALLING_INTERRUPT (92) +#define NV2080_NOTIFIERS_GPIO_14_FALLING_INTERRUPT (93) +#define NV2080_NOTIFIERS_GPIO_15_FALLING_INTERRUPT (94) +#define NV2080_NOTIFIERS_GPIO_16_FALLING_INTERRUPT (95) +#define NV2080_NOTIFIERS_GPIO_17_FALLING_INTERRUPT (96) +#define NV2080_NOTIFIERS_GPIO_18_FALLING_INTERRUPT (97) +#define NV2080_NOTIFIERS_GPIO_19_FALLING_INTERRUPT (98) +#define NV2080_NOTIFIERS_GPIO_20_FALLING_INTERRUPT (99) +#define NV2080_NOTIFIERS_GPIO_21_FALLING_INTERRUPT (100) +#define NV2080_NOTIFIERS_GPIO_22_FALLING_INTERRUPT (101) +#define NV2080_NOTIFIERS_GPIO_23_FALLING_INTERRUPT (102) +#define NV2080_NOTIFIERS_GPIO_24_FALLING_INTERRUPT (103) +#define NV2080_NOTIFIERS_GPIO_25_FALLING_INTERRUPT (104) +#define NV2080_NOTIFIERS_GPIO_26_FALLING_INTERRUPT (105) +#define NV2080_NOTIFIERS_GPIO_27_FALLING_INTERRUPT (106) +#define NV2080_NOTIFIERS_GPIO_28_FALLING_INTERRUPT (107) +#define NV2080_NOTIFIERS_GPIO_29_FALLING_INTERRUPT (108) +#define NV2080_NOTIFIERS_GPIO_30_FALLING_INTERRUPT (109) +#define NV2080_NOTIFIERS_GPIO_31_FALLING_INTERRUPT (110) +#define NV2080_NOTIFIERS_ECC_SBE (111) +#define NV2080_NOTIFIERS_ECC_DBE (112) +#define NV2080_NOTIFIERS_STEREO_EMITTER_DETECTION (113) +#define NV2080_NOTIFIERS_GC5_GPU_READY (114) +#define NV2080_NOTIFIERS_SEC2 (115) +#define NV2080_NOTIFIERS_GC6_REFCOUNT_INC (116) +#define NV2080_NOTIFIERS_GC6_REFCOUNT_DEC (117) +#define NV2080_NOTIFIERS_POWER_EVENT (118) +#define NV2080_NOTIFIERS_CLOCKS_CHANGE (119) +#define NV2080_NOTIFIERS_HOTPLUG_PROCESSING_COMPLETE (120) +#define NV2080_NOTIFIERS_PHYSICAL_PAGE_FAULT (121) +#define NV2080_NOTIFIERS_RESERVED_122 (122) +#define NV2080_NOTIFIERS_NVLINK_ERROR_FATAL (123) +#define NV2080_NOTIFIERS_PRIV_REG_ACCESS_FAULT (124) +#define NV2080_NOTIFIERS_NVLINK_ERROR_RECOVERY_REQUIRED (125) +#define NV2080_NOTIFIERS_NVJPG (126) +#define NV2080_NOTIFIERS_NVJPEG0 NV2080_NOTIFIERS_NVJPG +#define NV2080_NOTIFIERS_RESERVED127 (127) +#define NV2080_NOTIFIERS_RESERVED128 (128) +#define NV2080_NOTIFIERS_RESERVED129 (129) +#define NV2080_NOTIFIERS_RESERVED130 (130) +#define NV2080_NOTIFIERS_RESERVED131 (131) +#define NV2080_NOTIFIERS_RESERVED132 (132) +#define NV2080_NOTIFIERS_RESERVED133 (133) +#define NV2080_NOTIFIERS_RUNLIST_AND_ENG_IDLE (134) +#define NV2080_NOTIFIERS_RUNLIST_ACQUIRE (135) +#define NV2080_NOTIFIERS_RUNLIST_ACQUIRE_AND_ENG_IDLE (136) +#define NV2080_NOTIFIERS_RUNLIST_IDLE (137) +#define NV2080_NOTIFIERS_TSG_PREEMPT_COMPLETE (138) +#define NV2080_NOTIFIERS_RUNLIST_PREEMPT_COMPLETE (139) +#define NV2080_NOTIFIERS_CTXSW_TIMEOUT (140) +#define NV2080_NOTIFIERS_INFOROM_ECC_OBJECT_UPDATED (141) +#define NV2080_NOTIFIERS_NVTELEMETRY_REPORT_EVENT (142) +#define NV2080_NOTIFIERS_DSTATE_XUSB_PPC (143) +#define NV2080_NOTIFIERS_FECS_CTX_SWITCH (144) +#define NV2080_NOTIFIERS_XUSB_PPC_CONNECTED (145) +#define NV2080_NOTIFIERS_GR0 NV2080_NOTIFIERS_GRAPHICS +#define NV2080_NOTIFIERS_GR1 (146) +#define NV2080_NOTIFIERS_GR2 (147) +#define NV2080_NOTIFIERS_GR3 (148) +#define NV2080_NOTIFIERS_GR4 (149) +#define NV2080_NOTIFIERS_GR5 (150) +#define NV2080_NOTIFIERS_GR6 (151) +#define NV2080_NOTIFIERS_GR7 (152) +#define NV2080_NOTIFIERS_OFA (153) +#define NV2080_NOTIFIERS_DSTATE_HDA (154) +#define NV2080_NOTIFIERS_POISON_ERROR_NON_FATAL (155) +#define NV2080_NOTIFIERS_POISON_ERROR_FATAL (156) +#define NV2080_NOTIFIERS_UCODE_RESET (157) +#define NV2080_NOTIFIERS_PLATFORM_POWER_MODE_CHANGE (158) +#define NV2080_NOTIFIERS_SMC_CONFIG_UPDATE (159) +#define NV2080_NOTIFIERS_INFOROM_RRL_OBJECT_UPDATED (160) +#define NV2080_NOTIFIERS_INFOROM_PBL_OBJECT_UPDATED (161) +#define NV2080_NOTIFIERS_LPWR_DIFR_PREFETCH_REQUEST (162) +#define NV2080_NOTIFIERS_SEC_FAULT_ERROR (163) +#define NV2080_NOTIFIERS_POSSIBLE_ERROR (164) +#define NV2080_NOTIFIERS_MAXCOUNT (165) + +// Indexed GR notifier reference +#define NV2080_NOTIFIERS_GR(x) ((x == 0) ? (NV2080_NOTIFIERS_GR0) : (NV2080_NOTIFIERS_GR1 + (x-1))) +#define NV2080_NOTIFIER_TYPE_IS_GR(x) (((x) == NV2080_NOTIFIERS_GR0) || (((x) >= NV2080_NOTIFIERS_GR1) && ((x) <= NV2080_NOTIFIERS_GR7))) +// Indexed CE notifier reference +#define NV2080_NOTIFIERS_CE(x) (NV2080_NOTIFIERS_CE0 + (x)) +#define NV2080_NOTIFIER_TYPE_IS_CE(x) (((x) >= NV2080_NOTIFIERS_CE0) && ((x) <= NV2080_NOTIFIERS_CE9)) +// Indexed MSENC notifier reference +#define NV2080_NOTIFIERS_NVENC(x) (NV2080_NOTIFIERS_NVENC0 + (x)) +#define NV2080_NOTIFIER_TYPE_IS_NVENC(x) (((x) >= NV2080_NOTIFIERS_NVENC0) && ((x) <= NV2080_NOTIFIERS_NVENC2)) +// Indexed NVDEC notifier reference +#define NV2080_NOTIFIERS_NVDEC(x) (NV2080_NOTIFIERS_NVDEC0 + (x)) + +#define NV2080_NOTIFIER_TYPE_IS_NVDEC(x) (((x) >= NV2080_NOTIFIERS_NVDEC0) && ((x) <= NV2080_NOTIFIERS_NVDEC4)) + +// Indexed NVJPEG notifier reference +#define NV2080_NOTIFIERS_NVJPEG(x) (NV2080_NOTIFIERS_NVJPEG0 + (x)) +#define NV2080_NOTIFIER_TYPE_IS_NVJPEG(x) (((x) >= NV2080_NOTIFIERS_NVJPEG0) && ((x) <= NV2080_NOTIFIERS_NVJPEG0)) + +#define NV2080_NOTIFIERS_GPIO_RISING_INTERRUPT(pin) (NV2080_NOTIFIERS_GPIO_0_RISING_INTERRUPT+(pin)) +#define NV2080_NOTIFIERS_GPIO_FALLING_INTERRUPT(pin) (NV2080_NOTIFIERS_GPIO_0_FALLING_INTERRUPT+(pin)) + +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_IN_PROGRESS (0x8000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_ERROR_INVALID_STATE (0x2000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_ERROR_STATE_IN_USE (0x1000) +#define NV2080_SUBDEVICE_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + +/* exported engine defines */ +#define NV2080_ENGINE_TYPE_NULL (0x00000000) +#define NV2080_ENGINE_TYPE_GRAPHICS (0x00000001) +#define NV2080_ENGINE_TYPE_GR0 NV2080_ENGINE_TYPE_GRAPHICS +#define NV2080_ENGINE_TYPE_GR1 (0x00000002) +#define NV2080_ENGINE_TYPE_GR2 (0x00000003) +#define NV2080_ENGINE_TYPE_GR3 (0x00000004) +#define NV2080_ENGINE_TYPE_GR4 (0x00000005) +#define NV2080_ENGINE_TYPE_GR5 (0x00000006) +#define NV2080_ENGINE_TYPE_GR6 (0x00000007) +#define NV2080_ENGINE_TYPE_GR7 (0x00000008) +#define NV2080_ENGINE_TYPE_COPY0 (0x00000009) +#define NV2080_ENGINE_TYPE_COPY1 (0x0000000a) +#define NV2080_ENGINE_TYPE_COPY2 (0x0000000b) +#define NV2080_ENGINE_TYPE_COPY3 (0x0000000c) +#define NV2080_ENGINE_TYPE_COPY4 (0x0000000d) +#define NV2080_ENGINE_TYPE_COPY5 (0x0000000e) +#define NV2080_ENGINE_TYPE_COPY6 (0x0000000f) +#define NV2080_ENGINE_TYPE_COPY7 (0x00000010) +#define NV2080_ENGINE_TYPE_COPY8 (0x00000011) +#define NV2080_ENGINE_TYPE_COPY9 (0x00000012) +#define NV2080_ENGINE_TYPE_BSP (0x00000013) +#define NV2080_ENGINE_TYPE_NVDEC0 NV2080_ENGINE_TYPE_BSP +#define NV2080_ENGINE_TYPE_NVDEC1 (0x00000014) +#define NV2080_ENGINE_TYPE_NVDEC2 (0x00000015) +#define NV2080_ENGINE_TYPE_NVDEC3 (0x00000016) +#define NV2080_ENGINE_TYPE_NVDEC4 (0x00000017) +#define NV2080_ENGINE_TYPE_RESERVED18 (0x00000018) +#define NV2080_ENGINE_TYPE_RESERVED19 (0x00000019) +#define NV2080_ENGINE_TYPE_RESERVED1A (0x0000001a) +#define NV2080_ENGINE_TYPE_MSENC (0x0000001b) +#define NV2080_ENGINE_TYPE_NVENC0 NV2080_ENGINE_TYPE_MSENC /* Mutually exclusive alias */ +#define NV2080_ENGINE_TYPE_NVENC1 (0x0000001c) +#define NV2080_ENGINE_TYPE_NVENC2 (0x0000001d) +#define NV2080_ENGINE_TYPE_VP (0x0000001e) +#define NV2080_ENGINE_TYPE_ME (0x0000001f) +#define NV2080_ENGINE_TYPE_PPP (0x00000020) +#define NV2080_ENGINE_TYPE_MPEG (0x00000021) +#define NV2080_ENGINE_TYPE_SW (0x00000022) +#define NV2080_ENGINE_TYPE_CIPHER (0x00000023) +#define NV2080_ENGINE_TYPE_TSEC NV2080_ENGINE_TYPE_CIPHER +#define NV2080_ENGINE_TYPE_VIC (0x00000024) +#define NV2080_ENGINE_TYPE_MP (0x00000025) +#define NV2080_ENGINE_TYPE_SEC2 (0x00000026) +#define NV2080_ENGINE_TYPE_HOST (0x00000027) +#define NV2080_ENGINE_TYPE_DPU (0x00000028) +#define NV2080_ENGINE_TYPE_PMU (0x00000029) +#define NV2080_ENGINE_TYPE_FBFLCN (0x0000002a) +#define NV2080_ENGINE_TYPE_NVJPG (0x0000002b) +#define NV2080_ENGINE_TYPE_NVJPEG0 NV2080_ENGINE_TYPE_NVJPG +#define NV2080_ENGINE_TYPE_RESERVED2C (0x0000002c) +#define NV2080_ENGINE_TYPE_RESERVED2D (0x0000002d) +#define NV2080_ENGINE_TYPE_RESERVED2E (0x0000002e) +#define NV2080_ENGINE_TYPE_RESERVED2F (0x0000002f) +#define NV2080_ENGINE_TYPE_RESERVED30 (0x00000030) +#define NV2080_ENGINE_TYPE_RESERVED31 (0x00000031) +#define NV2080_ENGINE_TYPE_RESERVED32 (0x00000032) +#define NV2080_ENGINE_TYPE_OFA (0x00000033) +#define NV2080_ENGINE_TYPE_LAST (0x00000034) +#define NV2080_ENGINE_TYPE_ALLENGINES (0xffffffff) + +#define NV2080_ENGINE_TYPE_COPY_SIZE 10 +#define NV2080_ENGINE_TYPE_NVENC_SIZE 3 + +#define NV2080_ENGINE_TYPE_NVJPEG_SIZE 1 + +#define NV2080_ENGINE_TYPE_NVDEC_SIZE 5 + +#define NV2080_ENGINE_TYPE_GR_SIZE 8 + +// Indexed engines +#define NV2080_ENGINE_TYPE_COPY(i) (NV2080_ENGINE_TYPE_COPY0+(i)) +#define NV2080_ENGINE_TYPE_IS_COPY(i) (((i) >= NV2080_ENGINE_TYPE_COPY0) && ((i) < NV2080_ENGINE_TYPE_COPY(NV2080_ENGINE_TYPE_COPY_SIZE))) +#define NV2080_ENGINE_TYPE_COPY_IDX(i) ((i) - NV2080_ENGINE_TYPE_COPY0) + +#define NV2080_ENGINE_TYPE_NVENC(i) (NV2080_ENGINE_TYPE_NVENC0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVENC(i) (((i) >= NV2080_ENGINE_TYPE_NVENC0) && ((i) < NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE))) +#define NV2080_ENGINE_TYPE_NVENC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVENC0) + +#define NV2080_ENGINE_TYPE_NVDEC(i) (NV2080_ENGINE_TYPE_NVDEC0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVDEC(i) (((i) >= NV2080_ENGINE_TYPE_NVDEC0) && ((i) < NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE))) +#define NV2080_ENGINE_TYPE_NVDEC_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVDEC0) + +#define NV2080_ENGINE_TYPE_NVJPEG(i) (NV2080_ENGINE_TYPE_NVJPEG0+(i)) +#define NV2080_ENGINE_TYPE_IS_NVJPEG(i) (((i) >= NV2080_ENGINE_TYPE_NVJPEG0) && ((i) < NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE))) +#define NV2080_ENGINE_TYPE_NVJPEG_IDX(i) ((i) - NV2080_ENGINE_TYPE_NVJPEG0) + +#define NV2080_ENGINE_TYPE_GR(i) (NV2080_ENGINE_TYPE_GR0 + (i)) +#define NV2080_ENGINE_TYPE_IS_GR(i) (((i) >= NV2080_ENGINE_TYPE_GR0) && ((i) < NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE))) +#define NV2080_ENGINE_TYPE_GR_IDX(i) ((i) - NV2080_ENGINE_TYPE_GR0) + +#define NV2080_ENGINE_TYPE_IS_VALID(i) (((i) > (NV2080_ENGINE_TYPE_NULL)) && ((i) < (NV2080_ENGINE_TYPE_LAST))) + +/* exported client defines */ +#define NV2080_CLIENT_TYPE_TEX (0x00000001) +#define NV2080_CLIENT_TYPE_COLOR (0x00000002) +#define NV2080_CLIENT_TYPE_DEPTH (0x00000003) +#define NV2080_CLIENT_TYPE_DA (0x00000004) +#define NV2080_CLIENT_TYPE_FE (0x00000005) +#define NV2080_CLIENT_TYPE_SCC (0x00000006) +#define NV2080_CLIENT_TYPE_WID (0x00000007) +#define NV2080_CLIENT_TYPE_MSVLD (0x00000008) +#define NV2080_CLIENT_TYPE_MSPDEC (0x00000009) +#define NV2080_CLIENT_TYPE_MSPPP (0x0000000a) +#define NV2080_CLIENT_TYPE_VIC (0x0000000b) +#define NV2080_CLIENT_TYPE_ALLCLIENTS (0xffffffff) + +/* GC5 Gpu Ready event defines */ +#define NV2080_GC5_EXIT_COMPLETE (0x00000001) +#define NV2080_GC5_ENTRY_ABORTED (0x00000002) + +/* Platform Power Mode event defines */ +#define NV2080_PLATFORM_POWER_MODE_CHANGE_COMPLETION (0x00000000) +#define NV2080_PLATFORM_POWER_MODE_CHANGE_ACPI_NOTIFICATION (0x00000001) + +/* NvNotification[] fields and values */ +#define NV2080_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl2080_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv2080Typedef, Nv20Subdevice0; +#define NV2080_TYPEDEF Nv20Subdevice0 + +/* NvAlloc parameteters */ +#define NV2080_MAX_SUBDEVICES NV_MAX_SUBDEVICES +typedef struct { + NvU32 subDeviceId; +} NV2080_ALLOC_PARAMETERS; + +/* HDCP Status change notification information */ +typedef struct Nv2080HdcpStatusChangeNotificationRec { + NvU32 displayId; + NvU32 hdcpStatusChangeNotif; +} Nv2080HdcpStatusChangeNotification; + +/* Pstate change notification information */ +typedef struct Nv2080PStateChangeNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 NewPstate; +} Nv2080PStateChangeNotification; + +/* Clocks change notification information */ +typedef struct Nv2080ClocksChangeNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ +} Nv2080ClocksChangeNotification; + +/* WorkLoad Modulation state change notification information*/ +typedef struct Nv2080WorkloadModulationChangeNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvBool WorkloadModulationEnabled; +} Nv2080WorkloadModulationChangeNotification; + +/* Hotplug notification information */ +typedef struct { + NvU32 plugDisplayMask; + NvU32 unplugDisplayMask; +} Nv2080HotplugNotification; + +/* Power state changing notification information */ +typedef struct { + NvBool bSwitchToAC; + NvBool bGPUCapabilityChanged; + NvU32 displayMaskAffected; +} Nv2080PowerEventNotification; + +/* DP IRQ notification information */ +typedef struct Nv2080DpIrqNotificationRec { + NvU32 displayId; +} Nv2080DpIrqNotification; + +/* XUSB/PPC D-State change notification information */ +typedef struct Nv2080DstateXusbPpcNotificationRec { + NvU32 dstateXusb; + NvU32 dstatePpc; +} Nv2080DstateXusbPpcNotification; + +/* XUSB/PPC Connection status notification information */ +typedef struct Nv2080XusbPpcConnectStateNotificationRec { + NvBool bConnected; +} Nv2080XusbPpcConnectStateNotification; + +/* ACPI event notification information */ +typedef struct Nv2080ACPIEvent { + NvU32 event; +} Nv2080ACPIEvent; + +/* Cooler Zone notification information */ +typedef struct _NV2080_COOLER_DIAG_ZONE_NOTIFICATION_REC { + NvU32 currentZone; +} NV2080_COOLER_DIAG_ZONE_NOTIFICATION_REC; + +/* Thermal Zone notification information */ +typedef struct _NV2080_THERM_DIAG_ZONE_NOTIFICATION_REC { + NvU32 currentZone; +} NV2080_THERM_DIAG_ZONE_NOTIFICATION_REC; + +/* HDCP ref count change notification information */ +typedef struct Nv2080AudioHdcpRequestRec { + NvU32 displayId; + NvU32 requestedState; +} Nv2080AudioHdcpRequest; + +/* Gpu ready event information */ +typedef struct Nv2080GC5GpuReadyParams { + NvU32 event; + NvU32 sciIntr0; + NvU32 sciIntr1; +} Nv2080GC5GpuReadyParams; + +/* Priv reg access fault notification information */ +typedef struct { + NvU32 errAddr; +} Nv2080PrivRegAccessFaultNotification; + +/* HDA D-State change notification information + * See @HDACODEC_DSTATE for definitions + */ +typedef struct Nv2080DstateHdaCodecNotificationRec { + NvU32 dstateHdaCodec; +} Nv2080DstateHdaCodecNotification; + +/* + * Platform Power Mode event information + */ +typedef struct _NV2080_PLATFORM_POWER_MODE_CHANGE_STATUS { + NvU8 platformPowerModeIndex; + NvU8 platformPowerModeMask; + NvU8 eventReason; +} NV2080_PLATFORM_POWER_MODE_CHANGE_STATUS; + +#define NV2080_PLATFORM_POWER_MODE_CHANGE_INFO_INDEX 7:0 +#define NV2080_PLATFORM_POWER_MODE_CHANGE_INFO_MASK 15:8 +#define NV2080_PLATFORM_POWER_MODE_CHANGE_INFO_REASON 23:16 + +/* + * ENGINE_INFO_TYPE_NV2080 of the engine for which the QOS interrupt has been raised + */ +typedef struct { + NvU32 engineType; +} Nv2080QosIntrNotification; + +typedef struct { + NvU64 physAddress NV_ALIGN_BYTES(8); +} Nv2080EccDbeNotification; + +/* + * LPWR DIFR Prefetch Request - Size of L2 Cache + */ +typedef struct { + NvU32 l2CacheSize; +} Nv2080LpwrDifrPrefetchNotification; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl2080_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl2081.h b/src/common/sdk/nvidia/inc/class/cl2081.h new file mode 100644 index 000000000..e1301bd01 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl2081.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl2081_h_ +#define _cl2081_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV2081_BINAPI (0x00002081) + +typedef struct{ + NvU32 reserved; +}NV2081_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif diff --git a/src/common/sdk/nvidia/inc/class/cl2082.h b/src/common/sdk/nvidia/inc/class/cl2082.h new file mode 100644 index 000000000..8beda618e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl2082.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl2082_h_ +#define _cl2082_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV2082_BINAPI_PRIVILEGED (0x00002082) + +typedef struct{ + NvU32 reserved; +}NV2082_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif diff --git a/src/common/sdk/nvidia/inc/class/cl208f.h b/src/common/sdk/nvidia/inc/class/cl208f.h new file mode 100644 index 000000000..c9d7f016f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl208f.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2006 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl208f_h_ +#define _cl208f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* Class within the subdevice used for diagnostic purpose*/ +#define NV20_SUBDEVICE_DIAG (0x0000208f) + +/* event values */ +#define NV208F_NOTIFIERS_SW (0) +#define NV208F_NOTIFIERS_MAXCOUNT (1) + +/* NvNotification[] fields and values */ +#define NV208f_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl208f_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv208fTypedef, Nv20SubdeviceDiag; +#define NV208f_TYPEDEF Nv20SubdeviceDiag + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl208f_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl30f1.h b/src/common/sdk/nvidia/inc/class/cl30f1.h new file mode 100644 index 000000000..3ce657bfb --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl30f1.h @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl30f1_h_ +#define _cl30f1_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class NV30_GSYNC */ +#define NV30_GSYNC (0x000030F1) + +/* + * A client should use NV01_EVENT_OS_EVENT as hClass and NV30F1_GSYNC_NOTIFIERS_* as + * notify index when allocating event, if separate event notifications are needed for + * separate event types. + * + * A client should use NV01_EVENT_KERNEL_CALLBACK as hClass and + * NV30F1_GSYNC_NOTIFIERS_ALL as notify index, if a single event is required. + * In this case RM would set event data equal to a pointer to NvNotification structure. + * The info32 field of NvNotification structure would be equal a bitmask of + * NV30F1_GSYNC_NOTIFIERS_* values. + */ + +/* NvNotification[] fields and values */ + +/* Framelock sync gain and loss events. These are connector specific events. */ +#define NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(c) (0x00+(c)) +#define NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(c) (0x04+(c)) + +/* Framelock stereo gain and loss events. These are connector specific events. */ +#define NV30F1_GSYNC_NOTIFIERS_STEREO_LOSS(c) (0x08+(c)) +#define NV30F1_GSYNC_NOTIFIERS_STEREO_GAIN(c) (0x0C+(c)) + +/* House cable gain(plug in) and loss(plug out) events. */ +#define NV30F1_GSYNC_NOTIFIERS_HOUSE_GAIN (0x10) +#define NV30F1_GSYNC_NOTIFIERS_HOUSE_LOSS (0x11) + +/* RJ45 cable gain(plug in) and loss(plug out) events. */ +#define NV30F1_GSYNC_NOTIFIERS_RJ45_GAIN (0x12) +#define NV30F1_GSYNC_NOTIFIERS_RJ45_LOSS (0x13) + +#define NV30F1_GSYNC_NOTIFIERS_MAXCOUNT (0x14) + +/* + * For handling all event types. + * Note for Windows, it only handles NV01_EVENT_KERNEL_CALLBACK_EX; as for NV01_EVENT_OS_EVENT, it can only + * signal an event but not handle over any information. + */ +#define NV30F1_GSYNC_NOTIFIERS_ALL NV30F1_GSYNC_NOTIFIERS_MAXCOUNT + + +#define NV30F1_GSYNC_CONNECTOR_ONE (0) +#define NV30F1_GSYNC_CONNECTOR_TWO (1) +#define NV30F1_GSYNC_CONNECTOR_THREE (2) +#define NV30F1_GSYNC_CONNECTOR_FOUR (3) + +#define NV30F1_GSYNC_CONNECTOR_PRIMARY NV30F1_GSYNC_CONNECTOR_ONE +#define NV30F1_GSYNC_CONNECTOR_SECONDARY NV30F1_GSYNC_CONNECTOR_TWO + +#define NV30F1_GSYNC_CONNECTOR_COUNT (4) + + +/* NvRmAlloc parameters */ +#define NV30F1_MAX_GSYNCS (0x0000004) +typedef struct { + NvU32 gsyncInstance; +} NV30F1_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl30f1_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl402c.h b/src/common/sdk/nvidia/inc/class/cl402c.h new file mode 100644 index 000000000..9ba7c3b50 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl402c.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl402c_h_ +#define _cl402c_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* I2C operations */ +#define NV40_I2C (0x0000402c) + +typedef volatile struct _cl402c_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv402cTypedef, Nv40I2c; +#define NV402C_TYPEDEF Nv40I2c + + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl402c_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl503b.h b/src/common/sdk/nvidia/inc/class/cl503b.h new file mode 100644 index 000000000..d0b5850b1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl503b.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl503b_h_ +#define _cl503b_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV50_P2P (0x0000503b) + +#define NV503B_FLAGS_P2P_TYPE 0:0 +#define NV503B_FLAGS_P2P_TYPE_GPA 0 +#define NV503B_FLAGS_P2P_TYPE_SPA 1 + +/* NvRmAlloc parameters */ +typedef struct { + NvHandle hSubDevice; /* subDevice handle of local GPU */ + NvHandle hPeerSubDevice; /* subDevice handle of peer GPU */ + NvU32 subDevicePeerIdMask; /* Bit mask of peer ID for SubDevice + * A value of 0 defaults to RM selected + * PeerIdMasks must match in loopback */ + NvU32 peerSubDevicePeerIdMask; /* Bit mask of peer ID for PeerSubDevice + * A value of 0 defaults to RM selected + * PeerIdMasks must match in loopback */ + NvU64 mailboxBar1Addr; /* P2P Mailbox area base offset in BAR1 + * Must have the same value across the GPUs */ + NvU32 mailboxTotalSize; /* Size of the P2P Mailbox area + * Must have the same value across the GPUs */ + NvU32 flags; /* Flag to indicate types/attib of p2p */ +} NV503B_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl503b_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl503c.h b/src/common/sdk/nvidia/inc/class/cl503c.h new file mode 100644 index 000000000..99c334f1d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl503c.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl503c_h_ +#define _cl503c_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV50_THIRD_PARTY_P2P (0x0000503c) + +/* NvRmAlloc parameters */ +typedef struct { + NvU32 flags; +} NV503C_ALLOC_PARAMETERS; + +#define NV503C_ALLOC_PARAMETERS_FLAGS_TYPE 1:0 +#define NV503C_ALLOC_PARAMETERS_FLAGS_TYPE_PROPRIETARY (0x00000000) +#define NV503C_ALLOC_PARAMETERS_FLAGS_TYPE_BAR1 (0x00000001) +#define NV503C_ALLOC_PARAMETERS_FLAGS_TYPE_NVLINK (0x00000002) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl503c_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl506f.h b/src/common/sdk/nvidia/inc/class/cl506f.h new file mode 100644 index 000000000..b086c4312 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl506f.h @@ -0,0 +1,157 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl506f_h_ +#define _cl506f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class NV50_CHANNEL_GPFIFO */ +#define NV50_CHANNEL_GPFIFO (0x0000506F) + +/* NvNotification[] elements */ +#define NV506F_NOTIFIERS_RC (0) +#define NV506F_NOTIFIERS_SW (1) +#define NV506F_NOTIFIERS_GR_DEBUG_INTR (2) +#define NV506F_NOTIFIERS_MAXCOUNT (3) + +/* NvNotification[] fields and values */ +#define NV506f_NOTIFICATION_STATUS_ERROR_BAD_ARGUMENT (0x2000) +#define NV506f_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) +/* pio method data structure */ +typedef volatile struct _cl506f_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv506fTypedef, Nv50ChannelGPFifo; +#define NV506F_TYPEDEF Nv50ChannelGPFifo +/* pio flow control data structure */ +typedef volatile struct _cl506f_tag1 { + NvU32 Ignored00[0x010]; /* 0000-0039*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 SetReference; /* set reference value 0050-0053*/ + NvU32 Ignored02[0x001]; /* 0054-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored03[0x007]; /* 0064-007f*/ + NvU32 Yield; /* engine yield, write only 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x3dc]; +} Nv506fControl, Nv50ControlGPFifo; +/* fields and values */ +#define NV506F_NUMBER_OF_SUBCHANNELS (8) +#define NV506F_SET_OBJECT (0x00000000) +#define NV506F_SET_REFERENCE (0x00000050) +#define NV506F_SET_CONTEXT_DMA_SEMAPHORE (0x00000060) +#define NV506F_SEMAPHORE_OFFSET (0x00000064) +#define NV506F_SEMAPHORE_ACQUIRE (0x00000068) +#define NV506F_SEMAPHORE_RELEASE (0x0000006c) +#define NV506F_YIELD (0x00000080) + +// +// GPFIFO entry format +// +#define NV506F_GP_ENTRY__SIZE 8 +#define NV506F_GP_ENTRY0_DISABLE 0:0 +#define NV506F_GP_ENTRY0_DISABLE_NOT 0x00000000 +#define NV506F_GP_ENTRY0_DISABLE_SKIP 0x00000001 +#define NV506F_GP_ENTRY0_NO_CONTEXT_SWITCH 1:1 +#define NV506F_GP_ENTRY0_NO_CONTEXT_SWITCH_FALSE 0x00000000 +#define NV506F_GP_ENTRY0_NO_CONTEXT_SWITCH_TRUE 0x00000001 +#define NV506F_GP_ENTRY0_GET 31:2 +#define NV506F_GP_ENTRY1_GET_HI 7:0 +#define NV506F_GP_ENTRY1_PRIV 8:8 +#define NV506F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NV506F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NV506F_GP_ENTRY1_LEVEL 9:9 +#define NV506F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NV506F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NV506F_GP_ENTRY1_LENGTH 31:10 + +/* dma method descriptor formats */ +#define NV506F_DMA_PRIMARY_OPCODE 1:0 +#define NV506F_DMA_PRIMARY_OPCODE_USES_SECONDARY (0x00000000) +#define NV506F_DMA_PRIMARY_OPCODE_RESERVED (0x00000003) +#define NV506F_DMA_METHOD_ADDRESS 12:2 +#define NV506F_DMA_METHOD_SUBCHANNEL 15:13 +#define NV506F_DMA_TERT_OP 17:16 +#define NV506F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NV506F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NV506F_DMA_TERT_OP_GRP0_DOUBLE_HEADER (0x00000003) +#define NV506F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NV506F_DMA_TERT_OP_GRP2_RESERVED01 (0x00000001) +#define NV506F_DMA_TERT_OP_GRP2_RESERVED10 (0x00000002) +#define NV506F_DMA_TERT_OP_GRP2_RESERVED11 (0x00000003) +#define NV506F_DMA_METHOD_COUNT 28:18 +#define NV506F_DMA_SEC_OP 31:29 +#define NV506F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NV506F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NV506F_DMA_SEC_OP_GRP3_RESERVED (0x00000003) +#define NV506F_DMA_SEC_OP_GRP4_RESERVED (0x00000004) +#define NV506F_DMA_SEC_OP_GRP5_RESERVED (0x00000005) +#define NV506F_DMA_SEC_OP_GRP6_RESERVED (0x00000006) +#define NV506F_DMA_SEC_OP_GRP7_RESERVED (0x00000007) +#define NV506F_DMA_LONG_COUNT 31:0 +/* dma legacy method descriptor format */ +#define NV506F_DMA_OPCODE2 1:0 +#define NV506F_DMA_OPCODE2_NONE (0x00000000) +#define NV506F_DMA_OPCODE 31:29 +#define NV506F_DMA_OPCODE_METHOD (0x00000000) +#define NV506F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NV506F_DMA_OPCODE3_NONE (0x00000000) +/* dma data format */ +#define NV506F_DMA_DATA 31:0 +/* dma double header descriptor format */ +#define NV506F_DMA_DH_OPCODE2 1:0 +#define NV506F_DMA_DH_OPCODE2_NONE (0x00000000) +#define NV506F_DMA_DH_METHOD_ADDRESS 12:2 +#define NV506F_DMA_DH_METHOD_SUBCHANNEL 15:13 +#define NV506F_DMA_DH_OPCODE3 17:16 +#define NV506F_DMA_DH_OPCODE3_DOUBLE_HEADER (0x00000003) +#define NV506F_DMA_DH_OPCODE 31:29 +#define NV506F_DMA_DH_OPCODE_METHOD (0x00000000) +/* dma double header method count format */ +#define NV506F_DMA_DH_METHOD_COUNT 23:0 +/* dma double header data format */ +#define NV506F_DMA_DH_DATA 31:0 +/* dma nop format */ +#define NV506F_DMA_NOP (0x00000000) +/* dma set subdevice mask format */ +#define NV506F_DMA_SET_SUBDEVICE_MASK (0x00010000) +#define NV506F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NV506F_DMA_OPCODE3 17:16 +#define NV506F_DMA_OPCODE3_SET_SUBDEVICE_MASK (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl506f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl5070.h b/src/common/sdk/nvidia/inc/class/cl5070.h new file mode 100644 index 000000000..0c7fca6dc --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl5070.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 1993-2021, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl5070_h_ +#define _cl5070_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV50_DISPLAY (0x00005070) + +/* event values */ +#define NV5070_NOTIFIERS_SW (0) +#define NV5070_NOTIFIERS_MAXCOUNT (1) + +#define NV5070_NOTIFICATION_STATUS_IN_PROGRESS (0x8000) +#define NV5070_NOTIFICATION_STATUS_BAD_ARGUMENT (0x4000) +#define NV5070_NOTIFICATION_STATUS_ERROR_INVALID_STATE (0x2000) +#define NV5070_NOTIFICATION_STATUS_ERROR_STATE_IN_USE (0x1000) +#define NV5070_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV5070_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl5070_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl5080.h b/src/common/sdk/nvidia/inc/class/cl5080.h new file mode 100644 index 000000000..686956745 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl5080.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _cl5080_h_ +#define _cl5080_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV50_DEFERRED_API_CLASS (0x00005080) + +/* NvRmAlloc parameters */ +typedef struct { + // Should the deferred api completion trigger an event + NvBool notifyCompletion; +} NV5080_ALLOC_PARAMS; + +/* dma method offsets, fields, and values */ +#define NV5080_SET_OBJECT (0x00000000) +#define NV5080_NO_OPERATION (0x00000100) +#define NV5080_DEFERRED_API (0x00000200) +#define NV5080_DEFERRED_API_HANDLE 31:0 + +// Class-specific allocation capabilities + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl5080_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl50a0.h b/src/common/sdk/nvidia/inc/class/cl50a0.h new file mode 100644 index 000000000..5fec57e2a --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl50a0.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2005, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl50a0_h_ +#define _cl50a0_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV50_MEMORY_VIRTUAL (0x000050a0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl50a0_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl83de.h b/src/common/sdk/nvidia/inc/class/cl83de.h new file mode 100644 index 000000000..2968227e7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl83de.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl83de_h_ +#define _cl83de_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define GT200_DEBUGGER (0x000083de) + +/* + * Creating the GT200_DEBUGGER object: + * - The debug object is instantiated as a child of either the compute or the + * 3D-class object. + * - The Cuda/GR debugger uses the NV83DE_ALLOC_PARAMETERS to fill in the Client + * and 3D-Class handles of the debuggee and passes this to the NvRmAlloc. + * e.g: + NV83DE_ALLOC_PARAMETERS params; + * memset (¶ms, 0, sizeof (NV83DE_ALLOC_PARAMETERS)); + * params.hAppClient = DebuggeeClient; + * params.hClass3dObject = 3DClassHandle; + * NvRmAlloc(hDebuggerClient, hDebuggerClient, hDebugger, GT200_DEBUGGER, ¶ms); + */ + +typedef struct { + NvHandle hDebuggerClient_Obsolete; // No longer supported (must be zero) + NvHandle hAppClient; + NvHandle hClass3dObject; +} NV83DE_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl83de_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl844c.h b/src/common/sdk/nvidia/inc/class/cl844c.h new file mode 100644 index 000000000..c4afe5ff7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl844c.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl844c_h_ +#define _cl844c_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define G84_PERFBUFFER (0x0000844C) + +/* pio method data structure */ +typedef volatile struct _cl844c_tag0 { + NvV32 Reserved00[0x7c0]; +} G844cTypedef, G84PerfBuffer; +#define G844C_TYPEDEF G84PerfBuffer + +#define G844C_PERFBUFFER_MEMORY_HANDLE (0x844C0001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl844c_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl84a0.h b/src/common/sdk/nvidia/inc/class/cl84a0.h new file mode 100644 index 000000000..599a47b06 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl84a0.h @@ -0,0 +1,158 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl84A0_h_ +#define _cl84A0_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* + * Class definitions for creating a memory descriptor from a list of page numbers + * in RmAllocMemory. No memory is allocated: only a memory descriptor and + * memory object are created for later use in other calls. These classes + * are used by vGPU to create references to memory assigned to a guest VM. + * In all cases, the list is passed as reference, in the pAddress argument + * of RmAllocMemory, to a Nv01MemoryList structure (cast to a void **). + */ + +/* List of system memory physical page numbers */ +#define NV01_MEMORY_LIST_SYSTEM (0x00000081) +/* List of frame buffer physical page numbers */ +#define NV01_MEMORY_LIST_FBMEM (0x00000082) +/* List of page numbers relative to the start of the specified object */ +#define NV01_MEMORY_LIST_OBJECT (0x00000083) + +/* + * List structure of NV01_MEMORY_LIST_* classes + * + * The pageNumber array is variable in length, with pageCount elements, + * so the allocated size of the structure must reflect that. + * + * FBMEM items apply only to NV01_MEMORY_LIST_FBMEM and to + * NV01_MEMORY_LIST_OBJECT when the underlying object is + * FBMEM (must be zero for other cases) + * + * Nv01MemoryList is deprecated. NV_MEMORY_LIST_ALLOCATION_PARAMS should be used + * instead. + */ +typedef struct Nv01MemoryListRec { + NvHandle hClient; /* client to which object belongs + * (may differ from client creating the mapping). + * May be NV01_NULL_OBJECT, in which case client + * handle is used */ + NvHandle hParent; /* device with which object is associated. + * Must be NV01_NULL_OBJECT if hClient is NV01_NULL_OBJECT. + * Must not be NV01_NULL_OBJECT if hClient is + * not NV01_NULL_OBJECT. */ + NvHandle hObject; /* object to which pages are relative + * (NV01_NULL_OBJECT for NV01_MEMORY_LIST_SYSTEM + * and NV01_MEMORY_LIST_FBMEM) */ + NvHandle hHwResClient;/* client associated with the backdoor vnc surface*/ + NvHandle hHwResDevice;/* device associated to the bacdoor vnc surface*/ + NvHandle hHwResHandle;/* handle to hardware resources allocated to + * backdoor vnc surface*/ + NvU32 pteAdjust; /* offset of data in first page */ + NvU32 type; /* FBMEM: NVOS32_TYPE_* */ + NvU32 flags; /* FBMEM: NVOS32_ALLOC_FLAGS_* */ + NvU32 attr; /* FBMEM: NVOS32_ATTR_* */ + NvU32 attr2; /* FBMEM: NVOS32_ATTR2_* */ + NvU32 height; /* FBMEM: height in pixels */ + NvU32 width; /* FBMEM: width in pixels */ + NvU32 format; /* FBMEM: memory kind */ + NvU32 comprcovg; /* FBMEM: compression coverage */ + NvU32 zcullcovg; /* FBMEM: Z-cull coverage */ + NvU32 pageCount; /* count of elements in pageNumber array */ + NvU32 heapOwner; /* heap owner information from client */ + NvU32 reserved_1; /* reserved: must be 0 */ + NvU64 NV_DECLARE_ALIGNED(guestId,8); + /* ID of the guest VM. e.g., domain ID in case of Xen */ + NvU64 NV_DECLARE_ALIGNED(rangeBegin,8); + /* preferred VA range start address */ + NvU64 NV_DECLARE_ALIGNED(rangeEnd,8); + /* preferred VA range end address */ + NvU32 pitch; + NvU32 ctagOffset; + NvU64 size; + NvU64 align; + NvU64 pageNumber[1]; /* variable length array of page numbers */ +} Nv01MemoryList; + +/* + * NV_MEMORY_LIST_ALLOCATION_PARAMS - Allocation params to create memory list + * through NvRmAlloc. + */ +typedef struct +{ + NvHandle hClient; /* client to which object belongs + * (may differ from client creating the mapping). + * May be NV01_NULL_OBJECT, in which case client + * handle is used */ + NvHandle hParent; /* device with which object is associated. + * Must be NV01_NULL_OBJECT if hClient is NV01_NULL_OBJECT. + * Must not be NV01_NULL_OBJECT if hClient is + * not NV01_NULL_OBJECT. */ + NvHandle hObject; /* object to which pages are relative + * (NV01_NULL_OBJECT for NV01_MEMORY_LIST_SYSTEM + * and NV01_MEMORY_LIST_FBMEM) */ + NvHandle hHwResClient;/* client associated with the backdoor vnc surface*/ + NvHandle hHwResDevice;/* device associated to the bacdoor vnc surface*/ + NvHandle hHwResHandle;/* handle to hardware resources allocated to + * backdoor vnc surface*/ + NvU32 pteAdjust; /* offset of data in first page */ + NvU32 reserved_0; /* reserved: must be 0 */ + NvU32 type; /* FBMEM: NVOS32_TYPE_* */ + NvU32 flags; /* FBMEM: NVOS32_ALLOC_FLAGS_* */ + NvU32 attr; /* FBMEM: NVOS32_ATTR_* */ + NvU32 attr2; /* FBMEM: NVOS32_ATTR2_* */ + NvU32 height; /* FBMEM: height in pixels */ + NvU32 width; /* FBMEM: width in pixels */ + NvU32 format; /* FBMEM: memory kind */ + NvU32 comprcovg; /* FBMEM: compression coverage */ + NvU32 zcullcovg; /* FBMEM: Z-cull coverage */ + NvU32 pageCount; /* count of elements in pageNumber array */ + NvU32 heapOwner; /* heap owner information from client */ + + NvU64 NV_DECLARE_ALIGNED(guestId,8); + /* ID of the guest VM. e.g., domain ID in case of Xen */ + NvU64 NV_DECLARE_ALIGNED(rangeBegin,8); + /* preferred VA range start address */ + NvU64 NV_DECLARE_ALIGNED(rangeEnd,8); + /* preferred VA range end address */ + NvU32 pitch; + NvU32 ctagOffset; + NvU64 size; + NvU64 align; + NvP64 pageNumberList NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); + NvU32 flagsOs02; +} NV_MEMORY_LIST_ALLOCATION_PARAMS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl84A0_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl85b5sw.h b/src/common/sdk/nvidia/inc/class/cl85b5sw.h new file mode 100644 index 000000000..37ce6516b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl85b5sw.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl85b5sw_h_ +#define _cl85b5sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* This file is *not* auto-generated. */ + +typedef struct +{ + NvU32 version; // set to 0 + NvU32 engineInstance; // CE instance +} NV85B5_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl85b5sw_h_ + diff --git a/src/common/sdk/nvidia/inc/class/cl900e.h b/src/common/sdk/nvidia/inc/class/cl900e.h new file mode 100644 index 000000000..668488977 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl900e.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl900e_h_ +#define _cl900e_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define MPS_COMPUTE (0x0000900E) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl900e_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl9010.h b/src/common/sdk/nvidia/inc/class/cl9010.h new file mode 100644 index 000000000..1ec334b6b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9010.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef SDK_CL9010_H +#define SDK_CL9010_H + +#include "nvtypes.h" + +#define NV9010_VBLANK_CALLBACK 0x9010 + +typedef void (*OSVBLANKCALLBACKPROC)(void * pParm1, void * pParm2); + +typedef struct +{ + OSVBLANKCALLBACKPROC pProc; // Routine to call at vblank time + NvV32 LogicalHead; // Logical Head + void *pParm1; // pParm1 + void *pParm2; // pParm2 +} NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS; + +#endif // SDK_CL9010_H diff --git a/src/common/sdk/nvidia/inc/class/cl902d.h b/src/common/sdk/nvidia/inc/class/cl902d.h new file mode 100644 index 000000000..38bd5cf5b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl902d.h @@ -0,0 +1,1092 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_fermi_twod_a_h_ +#define _cl_fermi_twod_a_h_ + +#define FERMI_TWOD_A 0x902D + +typedef volatile struct fermi_twod_a_struct { + NvU32 SetObject; + NvU32 Reserved_0x04[0x3F]; + NvU32 NoOperation; + NvU32 SetNotifyA; + NvU32 SetNotifyB; + NvU32 Notify; + NvU32 WaitForIdle; + NvU32 LoadMmeInstructionRamPointer; + NvU32 LoadMmeInstructionRam; + NvU32 LoadMmeStartAddressRamPointer; + NvU32 LoadMmeStartAddressRam; + NvU32 SetMmeShadowRamControl; + NvU32 Reserved_0x128[0x2]; + NvU32 SetGlobalRenderEnableA; + NvU32 SetGlobalRenderEnableB; + NvU32 SetGlobalRenderEnableC; + NvU32 SendGoIdle; + NvU32 PmTrigger; + NvU32 Reserved_0x144[0x3]; + NvU32 SetInstrumentationMethodHeader; + NvU32 SetInstrumentationMethodData; + NvU32 Reserved_0x158[0x25]; + NvU32 SetMmeSwitchState; + NvU32 Reserved_0x1F0[0x4]; + NvU32 SetDstFormat; + NvU32 SetDstMemoryLayout; + NvU32 SetDstBlockSize; + NvU32 SetDstDepth; + NvU32 SetDstLayer; + NvU32 SetDstPitch; + NvU32 SetDstWidth; + NvU32 SetDstHeight; + NvU32 SetDstOffsetUpper; + NvU32 SetDstOffsetLower; + NvU32 FlushAndInvalidateRopMiniCache; + NvU32 SetSpareNoop06; + NvU32 SetSrcFormat; + NvU32 SetSrcMemoryLayout; + NvU32 SetSrcBlockSize; + NvU32 SetSrcDepth; + NvU32 TwodInvalidateTextureDataCache; + NvU32 SetSrcPitch; + NvU32 SetSrcWidth; + NvU32 SetSrcHeight; + NvU32 SetSrcOffsetUpper; + NvU32 SetSrcOffsetLower; + NvU32 SetPixelsFromMemorySectorPromotion; + NvU32 SetSpareNoop12; + NvU32 SetNumProcessingClusters; + NvU32 SetRenderEnableA; + NvU32 SetRenderEnableB; + NvU32 SetRenderEnableC; + NvU32 SetSpareNoop08; + NvU32 SetSpareNoop01; + NvU32 SetSpareNoop11; + NvU32 SetSpareNoop07; + NvU32 SetClipX0; + NvU32 SetClipY0; + NvU32 SetClipWidth; + NvU32 SetClipHeight; + NvU32 SetClipEnable; + NvU32 SetColorKeyFormat; + NvU32 SetColorKey; + NvU32 SetColorKeyEnable; + NvU32 SetRop; + NvU32 SetBeta1; + NvU32 SetBeta4; + NvU32 SetOperation; + NvU32 SetPatternOffset; + NvU32 SetPatternSelect; + NvU32 SetDstColorRenderToZetaSurface; + NvU32 SetSpareNoop04; + NvU32 SetSpareNoop15; + NvU32 SetSpareNoop13; + NvU32 SetSpareNoop03; + NvU32 SetSpareNoop14; + NvU32 SetSpareNoop02; + NvU32 SetCompression; + NvU32 SetSpareNoop09; + NvU32 SetRenderEnableOverride; + NvU32 SetPixelsFromMemoryDirection; + NvU32 SetSpareNoop10; + NvU32 SetMonochromePatternColorFormat; + NvU32 SetMonochromePatternFormat; + NvU32 SetMonochromePatternColor0; + NvU32 SetMonochromePatternColor1; + NvU32 SetMonochromePattern0; + NvU32 SetMonochromePattern1; + NvU32 ColorPatternX8R8G8B8[0x40]; + NvU32 ColorPatternR5G6B5[0x20]; + NvU32 ColorPatternX1R5G5B5[0x20]; + NvU32 ColorPatternY8[0x10]; + NvU32 SetRenderSolidPrimColor0; + NvU32 SetRenderSolidPrimColor1; + NvU32 SetRenderSolidPrimColor2; + NvU32 SetRenderSolidPrimColor3; + NvU32 SetMmeMemAddressA; + NvU32 SetMmeMemAddressB; + NvU32 SetMmeDataRamAddress; + NvU32 MmeDmaRead; + NvU32 MmeDmaReadFifoed; + NvU32 MmeDmaWrite; + NvU32 MmeDmaReduction; + NvU32 MmeDmaSysmembar; + NvU32 MmeDmaSync; + NvU32 SetMmeDataFifoConfig; + NvU32 Reserved_0x578[0x2]; + NvU32 RenderSolidPrimMode; + NvU32 SetRenderSolidPrimColorFormat; + NvU32 SetRenderSolidPrimColor; + NvU32 SetRenderSolidLineTieBreakBits; + NvU32 Reserved_0x590[0x14]; + NvU32 RenderSolidPrimPointXY; + NvU32 Reserved_0x5E4[0x7]; + struct { + NvU32 SetX; + NvU32 Y; + } RenderSolidPrimPoint[0x40]; + NvU32 SetPixelsFromCpuDataType; + NvU32 SetPixelsFromCpuColorFormat; + NvU32 SetPixelsFromCpuIndexFormat; + NvU32 SetPixelsFromCpuMonoFormat; + NvU32 SetPixelsFromCpuWrap; + NvU32 SetPixelsFromCpuColor0; + NvU32 SetPixelsFromCpuColor1; + NvU32 SetPixelsFromCpuMonoOpacity; + NvU32 Reserved_0x820[0x6]; + NvU32 SetPixelsFromCpuSrcWidth; + NvU32 SetPixelsFromCpuSrcHeight; + NvU32 SetPixelsFromCpuDxDuFrac; + NvU32 SetPixelsFromCpuDxDuInt; + NvU32 SetPixelsFromCpuDyDvFrac; + NvU32 SetPixelsFromCpuDyDvInt; + NvU32 SetPixelsFromCpuDstX0Frac; + NvU32 SetPixelsFromCpuDstX0Int; + NvU32 SetPixelsFromCpuDstY0Frac; + NvU32 SetPixelsFromCpuDstY0Int; + NvU32 PixelsFromCpuData; + NvU32 Reserved_0x864[0x3]; + NvU32 SetBigEndianControl; + NvU32 Reserved_0x874[0x3]; + NvU32 SetPixelsFromMemoryBlockShape; + NvU32 SetPixelsFromMemoryCorralSize; + NvU32 SetPixelsFromMemorySafeOverlap; + NvU32 SetPixelsFromMemorySampleMode; + NvU32 Reserved_0x890[0x8]; + NvU32 SetPixelsFromMemoryDstX0; + NvU32 SetPixelsFromMemoryDstY0; + NvU32 SetPixelsFromMemoryDstWidth; + NvU32 SetPixelsFromMemoryDstHeight; + NvU32 SetPixelsFromMemoryDuDxFrac; + NvU32 SetPixelsFromMemoryDuDxInt; + NvU32 SetPixelsFromMemoryDvDyFrac; + NvU32 SetPixelsFromMemoryDvDyInt; + NvU32 SetPixelsFromMemorySrcX0Frac; + NvU32 SetPixelsFromMemorySrcX0Int; + NvU32 SetPixelsFromMemorySrcY0Frac; + NvU32 PixelsFromMemorySrcY0Int; + NvU32 SetFalcon00; + NvU32 SetFalcon01; + NvU32 SetFalcon02; + NvU32 SetFalcon03; + NvU32 SetFalcon04; + NvU32 SetFalcon05; + NvU32 SetFalcon06; + NvU32 SetFalcon07; + NvU32 SetFalcon08; + NvU32 SetFalcon09; + NvU32 SetFalcon10; + NvU32 SetFalcon11; + NvU32 SetFalcon12; + NvU32 SetFalcon13; + NvU32 SetFalcon14; + NvU32 SetFalcon15; + NvU32 SetFalcon16; + NvU32 SetFalcon17; + NvU32 SetFalcon18; + NvU32 SetFalcon19; + NvU32 SetFalcon20; + NvU32 SetFalcon21; + NvU32 SetFalcon22; + NvU32 SetFalcon23; + NvU32 SetFalcon24; + NvU32 SetFalcon25; + NvU32 SetFalcon26; + NvU32 SetFalcon27; + NvU32 SetFalcon28; + NvU32 SetFalcon29; + NvU32 SetFalcon30; + NvU32 SetFalcon31; + NvU32 Reserved_0x960[0x123]; + NvU32 MmeDmaWriteMethodBarrier; + NvU32 Reserved_0xDF0[0x984]; + NvU32 SetMmeShadowScratch[0x100]; + struct { + NvU32 Macro; + NvU32 Data; + } CallMme[0xE0]; +} fermi_twod_a_t; + + +#define NV902D_SET_OBJECT 0x0000 +#define NV902D_SET_OBJECT_CLASS_ID 15:0 +#define NV902D_SET_OBJECT_ENGINE_ID 20:16 + +#define NV902D_NO_OPERATION 0x0100 +#define NV902D_NO_OPERATION_V 31:0 + +#define NV902D_SET_NOTIFY_A 0x0104 +#define NV902D_SET_NOTIFY_A_ADDRESS_UPPER 24:0 + +#define NV902D_SET_NOTIFY_B 0x0108 +#define NV902D_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NV902D_NOTIFY 0x010c +#define NV902D_NOTIFY_TYPE 31:0 +#define NV902D_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NV902D_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NV902D_WAIT_FOR_IDLE 0x0110 +#define NV902D_WAIT_FOR_IDLE_V 31:0 + +#define NV902D_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NV902D_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NV902D_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NV902D_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NV902D_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NV902D_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NV902D_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NV902D_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NV902D_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NV902D_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NV902D_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NV902D_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NV902D_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NV902D_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NV902D_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NV902D_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NV902D_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NV902D_SEND_GO_IDLE 0x013c +#define NV902D_SEND_GO_IDLE_V 31:0 + +#define NV902D_PM_TRIGGER 0x0140 +#define NV902D_PM_TRIGGER_V 31:0 + +#define NV902D_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NV902D_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NV902D_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NV902D_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NV902D_SET_MME_SWITCH_STATE 0x01ec +#define NV902D_SET_MME_SWITCH_STATE_VALID 0:0 +#define NV902D_SET_MME_SWITCH_STATE_VALID_FALSE 0x00000000 +#define NV902D_SET_MME_SWITCH_STATE_VALID_TRUE 0x00000001 +#define NV902D_SET_MME_SWITCH_STATE_SAVE_MACRO 11:4 +#define NV902D_SET_MME_SWITCH_STATE_RESTORE_MACRO 19:12 + +#define NV902D_SET_DST_FORMAT 0x0200 +#define NV902D_SET_DST_FORMAT_V 7:0 +#define NV902D_SET_DST_FORMAT_V_A8R8G8B8 0x000000CF +#define NV902D_SET_DST_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NV902D_SET_DST_FORMAT_V_A2R10G10B10 0x000000DF +#define NV902D_SET_DST_FORMAT_V_A8B8G8R8 0x000000D5 +#define NV902D_SET_DST_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NV902D_SET_DST_FORMAT_V_A2B10G10R10 0x000000D1 +#define NV902D_SET_DST_FORMAT_V_X8R8G8B8 0x000000E6 +#define NV902D_SET_DST_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NV902D_SET_DST_FORMAT_V_X8B8G8R8 0x000000F9 +#define NV902D_SET_DST_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NV902D_SET_DST_FORMAT_V_R5G6B5 0x000000E8 +#define NV902D_SET_DST_FORMAT_V_A1R5G5B5 0x000000E9 +#define NV902D_SET_DST_FORMAT_V_X1R5G5B5 0x000000F8 +#define NV902D_SET_DST_FORMAT_V_Y8 0x000000F3 +#define NV902D_SET_DST_FORMAT_V_Y16 0x000000EE +#define NV902D_SET_DST_FORMAT_V_Y32 0x000000FF +#define NV902D_SET_DST_FORMAT_V_Z1R5G5B5 0x000000FB +#define NV902D_SET_DST_FORMAT_V_O1R5G5B5 0x000000FC +#define NV902D_SET_DST_FORMAT_V_Z8R8G8B8 0x000000FD +#define NV902D_SET_DST_FORMAT_V_O8R8G8B8 0x000000FE +#define NV902D_SET_DST_FORMAT_V_Y1_8X8 0x0000001C +#define NV902D_SET_DST_FORMAT_V_RF16 0x000000F2 +#define NV902D_SET_DST_FORMAT_V_RF32 0x000000E5 +#define NV902D_SET_DST_FORMAT_V_RF32_GF32 0x000000CB +#define NV902D_SET_DST_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NV902D_SET_DST_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NV902D_SET_DST_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NV902D_SET_DST_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NV902D_SET_DST_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NV902D_SET_DST_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NV902D_SET_DST_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NV902D_SET_DST_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NV902D_SET_DST_FORMAT_V_RF16_GF16 0x000000DE +#define NV902D_SET_DST_FORMAT_V_R16_G16 0x000000DA +#define NV902D_SET_DST_FORMAT_V_RN16_GN16 0x000000DB +#define NV902D_SET_DST_FORMAT_V_G8R8 0x000000EA +#define NV902D_SET_DST_FORMAT_V_GN8RN8 0x000000EB +#define NV902D_SET_DST_FORMAT_V_RN16 0x000000EF +#define NV902D_SET_DST_FORMAT_V_RN8 0x000000F4 +#define NV902D_SET_DST_FORMAT_V_A8 0x000000F7 + +#define NV902D_SET_DST_MEMORY_LAYOUT 0x0204 +#define NV902D_SET_DST_MEMORY_LAYOUT_V 0:0 +#define NV902D_SET_DST_MEMORY_LAYOUT_V_BLOCKLINEAR 0x00000000 +#define NV902D_SET_DST_MEMORY_LAYOUT_V_PITCH 0x00000001 + +#define NV902D_SET_DST_BLOCK_SIZE 0x0208 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT 6:4 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NV902D_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH 10:8 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NV902D_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NV902D_SET_DST_DEPTH 0x020c +#define NV902D_SET_DST_DEPTH_V 31:0 + +#define NV902D_SET_DST_LAYER 0x0210 +#define NV902D_SET_DST_LAYER_V 31:0 + +#define NV902D_SET_DST_PITCH 0x0214 +#define NV902D_SET_DST_PITCH_V 31:0 + +#define NV902D_SET_DST_WIDTH 0x0218 +#define NV902D_SET_DST_WIDTH_V 31:0 + +#define NV902D_SET_DST_HEIGHT 0x021c +#define NV902D_SET_DST_HEIGHT_V 31:0 + +#define NV902D_SET_DST_OFFSET_UPPER 0x0220 +#define NV902D_SET_DST_OFFSET_UPPER_V 7:0 + +#define NV902D_SET_DST_OFFSET_LOWER 0x0224 +#define NV902D_SET_DST_OFFSET_LOWER_V 31:0 + +#define NV902D_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE 0x0228 +#define NV902D_FLUSH_AND_INVALIDATE_ROP_MINI_CACHE_V 0:0 + +#define NV902D_SET_SPARE_NOOP06 0x022c +#define NV902D_SET_SPARE_NOOP06_V 31:0 + +#define NV902D_SET_SRC_FORMAT 0x0230 +#define NV902D_SET_SRC_FORMAT_V 7:0 +#define NV902D_SET_SRC_FORMAT_V_A8R8G8B8 0x000000CF +#define NV902D_SET_SRC_FORMAT_V_A8RL8GL8BL8 0x000000D0 +#define NV902D_SET_SRC_FORMAT_V_A2R10G10B10 0x000000DF +#define NV902D_SET_SRC_FORMAT_V_A8B8G8R8 0x000000D5 +#define NV902D_SET_SRC_FORMAT_V_A8BL8GL8RL8 0x000000D6 +#define NV902D_SET_SRC_FORMAT_V_A2B10G10R10 0x000000D1 +#define NV902D_SET_SRC_FORMAT_V_X8R8G8B8 0x000000E6 +#define NV902D_SET_SRC_FORMAT_V_X8RL8GL8BL8 0x000000E7 +#define NV902D_SET_SRC_FORMAT_V_X8B8G8R8 0x000000F9 +#define NV902D_SET_SRC_FORMAT_V_X8BL8GL8RL8 0x000000FA +#define NV902D_SET_SRC_FORMAT_V_R5G6B5 0x000000E8 +#define NV902D_SET_SRC_FORMAT_V_A1R5G5B5 0x000000E9 +#define NV902D_SET_SRC_FORMAT_V_X1R5G5B5 0x000000F8 +#define NV902D_SET_SRC_FORMAT_V_Y8 0x000000F3 +#define NV902D_SET_SRC_FORMAT_V_AY8 0x0000001D +#define NV902D_SET_SRC_FORMAT_V_Y16 0x000000EE +#define NV902D_SET_SRC_FORMAT_V_Y32 0x000000FF +#define NV902D_SET_SRC_FORMAT_V_Z1R5G5B5 0x000000FB +#define NV902D_SET_SRC_FORMAT_V_O1R5G5B5 0x000000FC +#define NV902D_SET_SRC_FORMAT_V_Z8R8G8B8 0x000000FD +#define NV902D_SET_SRC_FORMAT_V_O8R8G8B8 0x000000FE +#define NV902D_SET_SRC_FORMAT_V_Y1_8X8 0x0000001C +#define NV902D_SET_SRC_FORMAT_V_RF16 0x000000F2 +#define NV902D_SET_SRC_FORMAT_V_RF32 0x000000E5 +#define NV902D_SET_SRC_FORMAT_V_RF32_GF32 0x000000CB +#define NV902D_SET_SRC_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NV902D_SET_SRC_FORMAT_V_RF16_GF16_BF16_X16 0x000000CE +#define NV902D_SET_SRC_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NV902D_SET_SRC_FORMAT_V_RF32_GF32_BF32_X32 0x000000C3 +#define NV902D_SET_SRC_FORMAT_V_R16_G16_B16_A16 0x000000C6 +#define NV902D_SET_SRC_FORMAT_V_RN16_GN16_BN16_AN16 0x000000C7 +#define NV902D_SET_SRC_FORMAT_V_BF10GF11RF11 0x000000E0 +#define NV902D_SET_SRC_FORMAT_V_AN8BN8GN8RN8 0x000000D7 +#define NV902D_SET_SRC_FORMAT_V_RF16_GF16 0x000000DE +#define NV902D_SET_SRC_FORMAT_V_R16_G16 0x000000DA +#define NV902D_SET_SRC_FORMAT_V_RN16_GN16 0x000000DB +#define NV902D_SET_SRC_FORMAT_V_G8R8 0x000000EA +#define NV902D_SET_SRC_FORMAT_V_GN8RN8 0x000000EB +#define NV902D_SET_SRC_FORMAT_V_RN16 0x000000EF +#define NV902D_SET_SRC_FORMAT_V_RN8 0x000000F4 +#define NV902D_SET_SRC_FORMAT_V_A8 0x000000F7 + +#define NV902D_SET_SRC_MEMORY_LAYOUT 0x0234 +#define NV902D_SET_SRC_MEMORY_LAYOUT_V 0:0 +#define NV902D_SET_SRC_MEMORY_LAYOUT_V_BLOCKLINEAR 0x00000000 +#define NV902D_SET_SRC_MEMORY_LAYOUT_V_PITCH 0x00000001 + +#define NV902D_SET_SRC_BLOCK_SIZE 0x0238 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT 6:4 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NV902D_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH 10:8 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NV902D_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NV902D_SET_SRC_DEPTH 0x023c +#define NV902D_SET_SRC_DEPTH_V 31:0 + +#define NV902D_TWOD_INVALIDATE_TEXTURE_DATA_CACHE 0x0240 +#define NV902D_TWOD_INVALIDATE_TEXTURE_DATA_CACHE_V 1:0 +#define NV902D_TWOD_INVALIDATE_TEXTURE_DATA_CACHE_V_L1_ONLY 0x00000000 +#define NV902D_TWOD_INVALIDATE_TEXTURE_DATA_CACHE_V_L2_ONLY 0x00000001 +#define NV902D_TWOD_INVALIDATE_TEXTURE_DATA_CACHE_V_L1_AND_L2 0x00000002 + +#define NV902D_SET_SRC_PITCH 0x0244 +#define NV902D_SET_SRC_PITCH_V 31:0 + +#define NV902D_SET_SRC_WIDTH 0x0248 +#define NV902D_SET_SRC_WIDTH_V 31:0 + +#define NV902D_SET_SRC_HEIGHT 0x024c +#define NV902D_SET_SRC_HEIGHT_V 31:0 + +#define NV902D_SET_SRC_OFFSET_UPPER 0x0250 +#define NV902D_SET_SRC_OFFSET_UPPER_V 7:0 + +#define NV902D_SET_SRC_OFFSET_LOWER 0x0254 +#define NV902D_SET_SRC_OFFSET_LOWER_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION 0x0258 +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION_V 1:0 +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION_V_NO_PROMOTION 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION_V_PROMOTE_TO_2_V 0x00000001 +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION_V_PROMOTE_TO_2_H 0x00000002 +#define NV902D_SET_PIXELS_FROM_MEMORY_SECTOR_PROMOTION_V_PROMOTE_TO_4 0x00000003 + +#define NV902D_SET_SPARE_NOOP12 0x025c +#define NV902D_SET_SPARE_NOOP12_V 31:0 + +#define NV902D_SET_NUM_PROCESSING_CLUSTERS 0x0260 +#define NV902D_SET_NUM_PROCESSING_CLUSTERS_V 0:0 +#define NV902D_SET_NUM_PROCESSING_CLUSTERS_V_ALL 0x00000000 +#define NV902D_SET_NUM_PROCESSING_CLUSTERS_V_ONE 0x00000001 + +#define NV902D_SET_RENDER_ENABLE_A 0x0264 +#define NV902D_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NV902D_SET_RENDER_ENABLE_B 0x0268 +#define NV902D_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NV902D_SET_RENDER_ENABLE_C 0x026c +#define NV902D_SET_RENDER_ENABLE_C_MODE 2:0 +#define NV902D_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NV902D_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NV902D_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NV902D_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NV902D_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NV902D_SET_SPARE_NOOP08 0x0270 +#define NV902D_SET_SPARE_NOOP08_V 31:0 + +#define NV902D_SET_SPARE_NOOP01 0x0274 +#define NV902D_SET_SPARE_NOOP01_V 31:0 + +#define NV902D_SET_SPARE_NOOP11 0x0278 +#define NV902D_SET_SPARE_NOOP11_V 31:0 + +#define NV902D_SET_SPARE_NOOP07 0x027c +#define NV902D_SET_SPARE_NOOP07_V 31:0 + +#define NV902D_SET_CLIP_X0 0x0280 +#define NV902D_SET_CLIP_X0_V 31:0 + +#define NV902D_SET_CLIP_Y0 0x0284 +#define NV902D_SET_CLIP_Y0_V 31:0 + +#define NV902D_SET_CLIP_WIDTH 0x0288 +#define NV902D_SET_CLIP_WIDTH_V 31:0 + +#define NV902D_SET_CLIP_HEIGHT 0x028c +#define NV902D_SET_CLIP_HEIGHT_V 31:0 + +#define NV902D_SET_CLIP_ENABLE 0x0290 +#define NV902D_SET_CLIP_ENABLE_V 0:0 +#define NV902D_SET_CLIP_ENABLE_V_FALSE 0x00000000 +#define NV902D_SET_CLIP_ENABLE_V_TRUE 0x00000001 + +#define NV902D_SET_COLOR_KEY_FORMAT 0x0294 +#define NV902D_SET_COLOR_KEY_FORMAT_V 2:0 +#define NV902D_SET_COLOR_KEY_FORMAT_V_A16R5G6B5 0x00000000 +#define NV902D_SET_COLOR_KEY_FORMAT_V_A1R5G5B5 0x00000001 +#define NV902D_SET_COLOR_KEY_FORMAT_V_A8R8G8B8 0x00000002 +#define NV902D_SET_COLOR_KEY_FORMAT_V_A2R10G10B10 0x00000003 +#define NV902D_SET_COLOR_KEY_FORMAT_V_Y8 0x00000004 +#define NV902D_SET_COLOR_KEY_FORMAT_V_Y16 0x00000005 +#define NV902D_SET_COLOR_KEY_FORMAT_V_Y32 0x00000006 + +#define NV902D_SET_COLOR_KEY 0x0298 +#define NV902D_SET_COLOR_KEY_V 31:0 + +#define NV902D_SET_COLOR_KEY_ENABLE 0x029c +#define NV902D_SET_COLOR_KEY_ENABLE_V 0:0 +#define NV902D_SET_COLOR_KEY_ENABLE_V_FALSE 0x00000000 +#define NV902D_SET_COLOR_KEY_ENABLE_V_TRUE 0x00000001 + +#define NV902D_SET_ROP 0x02a0 +#define NV902D_SET_ROP_V 7:0 + +#define NV902D_SET_BETA1 0x02a4 +#define NV902D_SET_BETA1_V 31:0 + +#define NV902D_SET_BETA4 0x02a8 +#define NV902D_SET_BETA4_B 7:0 +#define NV902D_SET_BETA4_G 15:8 +#define NV902D_SET_BETA4_R 23:16 +#define NV902D_SET_BETA4_A 31:24 + +#define NV902D_SET_OPERATION 0x02ac +#define NV902D_SET_OPERATION_V 2:0 +#define NV902D_SET_OPERATION_V_SRCCOPY_AND 0x00000000 +#define NV902D_SET_OPERATION_V_ROP_AND 0x00000001 +#define NV902D_SET_OPERATION_V_BLEND_AND 0x00000002 +#define NV902D_SET_OPERATION_V_SRCCOPY 0x00000003 +#define NV902D_SET_OPERATION_V_ROP 0x00000004 +#define NV902D_SET_OPERATION_V_SRCCOPY_PREMULT 0x00000005 +#define NV902D_SET_OPERATION_V_BLEND_PREMULT 0x00000006 + +#define NV902D_SET_PATTERN_OFFSET 0x02b0 +#define NV902D_SET_PATTERN_OFFSET_X 5:0 +#define NV902D_SET_PATTERN_OFFSET_Y 13:8 + +#define NV902D_SET_PATTERN_SELECT 0x02b4 +#define NV902D_SET_PATTERN_SELECT_V 1:0 +#define NV902D_SET_PATTERN_SELECT_V_MONOCHROME_8x8 0x00000000 +#define NV902D_SET_PATTERN_SELECT_V_MONOCHROME_64x1 0x00000001 +#define NV902D_SET_PATTERN_SELECT_V_MONOCHROME_1x64 0x00000002 +#define NV902D_SET_PATTERN_SELECT_V_COLOR 0x00000003 + +#define NV902D_SET_DST_COLOR_RENDER_TO_ZETA_SURFACE 0x02b8 +#define NV902D_SET_DST_COLOR_RENDER_TO_ZETA_SURFACE_V 0:0 +#define NV902D_SET_DST_COLOR_RENDER_TO_ZETA_SURFACE_V_FALSE 0x00000000 +#define NV902D_SET_DST_COLOR_RENDER_TO_ZETA_SURFACE_V_TRUE 0x00000001 + +#define NV902D_SET_SPARE_NOOP04 0x02bc +#define NV902D_SET_SPARE_NOOP04_V 31:0 + +#define NV902D_SET_SPARE_NOOP15 0x02c0 +#define NV902D_SET_SPARE_NOOP15_V 31:0 + +#define NV902D_SET_SPARE_NOOP13 0x02c4 +#define NV902D_SET_SPARE_NOOP13_V 31:0 + +#define NV902D_SET_SPARE_NOOP03 0x02c8 +#define NV902D_SET_SPARE_NOOP03_V 31:0 + +#define NV902D_SET_SPARE_NOOP14 0x02cc +#define NV902D_SET_SPARE_NOOP14_V 31:0 + +#define NV902D_SET_SPARE_NOOP02 0x02d0 +#define NV902D_SET_SPARE_NOOP02_V 31:0 + +#define NV902D_SET_COMPRESSION 0x02d4 +#define NV902D_SET_COMPRESSION_ENABLE 0:0 +#define NV902D_SET_COMPRESSION_ENABLE_FALSE 0x00000000 +#define NV902D_SET_COMPRESSION_ENABLE_TRUE 0x00000001 + +#define NV902D_SET_SPARE_NOOP09 0x02d8 +#define NV902D_SET_SPARE_NOOP09_V 31:0 + +#define NV902D_SET_RENDER_ENABLE_OVERRIDE 0x02dc +#define NV902D_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NV902D_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NV902D_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NV902D_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION 0x02e0 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_HORIZONTAL 1:0 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_HORIZONTAL_HW_DECIDES 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_HORIZONTAL_LEFT_TO_RIGHT 0x00000001 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_HORIZONTAL_RIGHT_TO_LEFT 0x00000002 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_VERTICAL 5:4 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_VERTICAL_HW_DECIDES 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_VERTICAL_TOP_TO_BOTTOM 0x00000001 +#define NV902D_SET_PIXELS_FROM_MEMORY_DIRECTION_VERTICAL_BOTTOM_TO_TOP 0x00000002 + +#define NV902D_SET_SPARE_NOOP10 0x02e4 +#define NV902D_SET_SPARE_NOOP10_V 31:0 + +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT 0x02e8 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V 2:0 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8R5G6B5 0x00000000 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A1R5G5B5 0x00000001 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8R8G8B8 0x00000002 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8Y8 0x00000003 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_A8X8Y16 0x00000004 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_Y32 0x00000005 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR_FORMAT_V_BYTE_EXPAND 0x00000006 + +#define NV902D_SET_MONOCHROME_PATTERN_FORMAT 0x02ec +#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V 0:0 +#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V_CGA6_M1 0x00000000 +#define NV902D_SET_MONOCHROME_PATTERN_FORMAT_V_LE_M1 0x00000001 + +#define NV902D_SET_MONOCHROME_PATTERN_COLOR0 0x02f0 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR0_V 31:0 + +#define NV902D_SET_MONOCHROME_PATTERN_COLOR1 0x02f4 +#define NV902D_SET_MONOCHROME_PATTERN_COLOR1_V 31:0 + +#define NV902D_SET_MONOCHROME_PATTERN0 0x02f8 +#define NV902D_SET_MONOCHROME_PATTERN0_V 31:0 + +#define NV902D_SET_MONOCHROME_PATTERN1 0x02fc +#define NV902D_SET_MONOCHROME_PATTERN1_V 31:0 + +#define NV902D_COLOR_PATTERN_X8R8G8B8(i) (0x0300+(i)*4) +#define NV902D_COLOR_PATTERN_X8R8G8B8_B0 7:0 +#define NV902D_COLOR_PATTERN_X8R8G8B8_G0 15:8 +#define NV902D_COLOR_PATTERN_X8R8G8B8_R0 23:16 +#define NV902D_COLOR_PATTERN_X8R8G8B8_IGNORE0 31:24 + +#define NV902D_COLOR_PATTERN_R5G6B5(i) (0x0400+(i)*4) +#define NV902D_COLOR_PATTERN_R5G6B5_B0 4:0 +#define NV902D_COLOR_PATTERN_R5G6B5_G0 10:5 +#define NV902D_COLOR_PATTERN_R5G6B5_R0 15:11 +#define NV902D_COLOR_PATTERN_R5G6B5_B1 20:16 +#define NV902D_COLOR_PATTERN_R5G6B5_G1 26:21 +#define NV902D_COLOR_PATTERN_R5G6B5_R1 31:27 + +#define NV902D_COLOR_PATTERN_X1R5G5B5(i) (0x0480+(i)*4) +#define NV902D_COLOR_PATTERN_X1R5G5B5_B0 4:0 +#define NV902D_COLOR_PATTERN_X1R5G5B5_G0 9:5 +#define NV902D_COLOR_PATTERN_X1R5G5B5_R0 14:10 +#define NV902D_COLOR_PATTERN_X1R5G5B5_IGNORE0 15:15 +#define NV902D_COLOR_PATTERN_X1R5G5B5_B1 20:16 +#define NV902D_COLOR_PATTERN_X1R5G5B5_G1 25:21 +#define NV902D_COLOR_PATTERN_X1R5G5B5_R1 30:26 +#define NV902D_COLOR_PATTERN_X1R5G5B5_IGNORE1 31:31 + +#define NV902D_COLOR_PATTERN_Y8(i) (0x0500+(i)*4) +#define NV902D_COLOR_PATTERN_Y8_Y0 7:0 +#define NV902D_COLOR_PATTERN_Y8_Y1 15:8 +#define NV902D_COLOR_PATTERN_Y8_Y2 23:16 +#define NV902D_COLOR_PATTERN_Y8_Y3 31:24 + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR0 0x0540 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR0_V 31:0 + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR1 0x0544 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR1_V 31:0 + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR2 0x0548 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR2_V 31:0 + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR3 0x054c +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR3_V 31:0 + +#define NV902D_SET_MME_MEM_ADDRESS_A 0x0550 +#define NV902D_SET_MME_MEM_ADDRESS_A_UPPER 24:0 + +#define NV902D_SET_MME_MEM_ADDRESS_B 0x0554 +#define NV902D_SET_MME_MEM_ADDRESS_B_LOWER 31:0 + +#define NV902D_SET_MME_DATA_RAM_ADDRESS 0x0558 +#define NV902D_SET_MME_DATA_RAM_ADDRESS_WORD 31:0 + +#define NV902D_MME_DMA_READ 0x055c +#define NV902D_MME_DMA_READ_LENGTH 31:0 + +#define NV902D_MME_DMA_READ_FIFOED 0x0560 +#define NV902D_MME_DMA_READ_FIFOED_LENGTH 31:0 + +#define NV902D_MME_DMA_WRITE 0x0564 +#define NV902D_MME_DMA_WRITE_LENGTH 31:0 + +#define NV902D_MME_DMA_REDUCTION 0x0568 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP 2:0 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_ADD 0x00000000 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_MIN 0x00000001 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_MAX 0x00000002 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_INC 0x00000003 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_DEC 0x00000004 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_AND 0x00000005 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_OR 0x00000006 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_OP_RED_XOR 0x00000007 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_FORMAT 5:4 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_FORMAT_UNSIGNED 0x00000000 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_FORMAT_SIGNED 0x00000001 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_SIZE 8:8 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_SIZE_FOUR_BYTES 0x00000000 +#define NV902D_MME_DMA_REDUCTION_REDUCTION_SIZE_EIGHT_BYTES 0x00000001 + +#define NV902D_MME_DMA_SYSMEMBAR 0x056c +#define NV902D_MME_DMA_SYSMEMBAR_V 0:0 + +#define NV902D_MME_DMA_SYNC 0x0570 +#define NV902D_MME_DMA_SYNC_VALUE 31:0 + +#define NV902D_SET_MME_DATA_FIFO_CONFIG 0x0574 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE 2:0 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_0KB 0x00000000 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_4KB 0x00000001 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_8KB 0x00000002 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_12KB 0x00000003 +#define NV902D_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_16KB 0x00000004 + +#define NV902D_RENDER_SOLID_PRIM_MODE 0x0580 +#define NV902D_RENDER_SOLID_PRIM_MODE_V 2:0 +#define NV902D_RENDER_SOLID_PRIM_MODE_V_POINTS 0x00000000 +#define NV902D_RENDER_SOLID_PRIM_MODE_V_LINES 0x00000001 +#define NV902D_RENDER_SOLID_PRIM_MODE_V_POLYLINE 0x00000002 +#define NV902D_RENDER_SOLID_PRIM_MODE_V_TRIANGLES 0x00000003 +#define NV902D_RENDER_SOLID_PRIM_MODE_V_RECTS 0x00000004 + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT 0x0584 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V 7:0 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF32_GF32_BF32_AF32 0x000000C0 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF16_GF16_BF16_AF16 0x000000CA +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_RF32_GF32 0x000000CB +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8R8G8B8 0x000000CF +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2R10G10B10 0x000000DF +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A8B8G8R8 0x000000D5 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A2B10G10R10 0x000000D1 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8R8G8B8 0x000000E6 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X8B8G8R8 0x000000F9 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_R5G6B5 0x000000E8 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_A1R5G5B5 0x000000E9 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_X1R5G5B5 0x000000F8 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y8 0x000000F3 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y16 0x000000EE +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Y32 0x000000FF +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z1R5G5B5 0x000000FB +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O1R5G5B5 0x000000FC +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_Z8R8G8B8 0x000000FD +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_FORMAT_V_O8R8G8B8 0x000000FE + +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR 0x0588 +#define NV902D_SET_RENDER_SOLID_PRIM_COLOR_V 31:0 + +#define NV902D_SET_RENDER_SOLID_LINE_TIE_BREAK_BITS 0x058c +#define NV902D_SET_RENDER_SOLID_LINE_TIE_BREAK_BITS_XMAJ__XINC__YINC 0:0 +#define NV902D_SET_RENDER_SOLID_LINE_TIE_BREAK_BITS_XMAJ__XDEC__YINC 4:4 +#define NV902D_SET_RENDER_SOLID_LINE_TIE_BREAK_BITS_YMAJ__XINC__YINC 8:8 +#define NV902D_SET_RENDER_SOLID_LINE_TIE_BREAK_BITS_YMAJ__XDEC__YINC 12:12 + +#define NV902D_RENDER_SOLID_PRIM_POINT_X_Y 0x05e0 +#define NV902D_RENDER_SOLID_PRIM_POINT_X_Y_X 15:0 +#define NV902D_RENDER_SOLID_PRIM_POINT_X_Y_Y 31:16 + +#define NV902D_RENDER_SOLID_PRIM_POINT_SET_X(j) (0x0600+(j)*8) +#define NV902D_RENDER_SOLID_PRIM_POINT_SET_X_V 31:0 + +#define NV902D_RENDER_SOLID_PRIM_POINT_Y(j) (0x0604+(j)*8) +#define NV902D_RENDER_SOLID_PRIM_POINT_Y_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE 0x0800 +#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V 0:0 +#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_COLOR 0x00000000 +#define NV902D_SET_PIXELS_FROM_CPU_DATA_TYPE_V_INDEX 0x00000001 + +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT 0x0804 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V 7:0 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8R8G8B8 0x000000CF +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2R10G10B10 0x000000DF +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A8B8G8R8 0x000000D5 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A2B10G10R10 0x000000D1 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8R8G8B8 0x000000E6 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X8B8G8R8 0x000000F9 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_R5G6B5 0x000000E8 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_A1R5G5B5 0x000000E9 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_X1R5G5B5 0x000000F8 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y8 0x000000F3 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y16 0x000000EE +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Y32 0x000000FF +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z1R5G5B5 0x000000FB +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O1R5G5B5 0x000000FC +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_Z8R8G8B8 0x000000FD +#define NV902D_SET_PIXELS_FROM_CPU_COLOR_FORMAT_V_O8R8G8B8 0x000000FE + +#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT 0x0808 +#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V 1:0 +#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I1 0x00000000 +#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I4 0x00000001 +#define NV902D_SET_PIXELS_FROM_CPU_INDEX_FORMAT_V_I8 0x00000002 + +#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT 0x080c +#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V 0:0 +#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_CGA6_M1 0x00000000 +#define NV902D_SET_PIXELS_FROM_CPU_MONO_FORMAT_V_LE_M1 0x00000001 + +#define NV902D_SET_PIXELS_FROM_CPU_WRAP 0x0810 +#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V 1:0 +#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_PIXEL 0x00000000 +#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_BYTE 0x00000001 +#define NV902D_SET_PIXELS_FROM_CPU_WRAP_V_WRAP_DWORD 0x00000002 + +#define NV902D_SET_PIXELS_FROM_CPU_COLOR0 0x0814 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR0_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_COLOR1 0x0818 +#define NV902D_SET_PIXELS_FROM_CPU_COLOR1_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY 0x081c +#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V 0:0 +#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_TRANSPARENT 0x00000000 +#define NV902D_SET_PIXELS_FROM_CPU_MONO_OPACITY_V_OPAQUE 0x00000001 + +#define NV902D_SET_PIXELS_FROM_CPU_SRC_WIDTH 0x0838 +#define NV902D_SET_PIXELS_FROM_CPU_SRC_WIDTH_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_SRC_HEIGHT 0x083c +#define NV902D_SET_PIXELS_FROM_CPU_SRC_HEIGHT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_FRAC 0x0840 +#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_INT 0x0844 +#define NV902D_SET_PIXELS_FROM_CPU_DX_DU_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_FRAC 0x0848 +#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_INT 0x084c +#define NV902D_SET_PIXELS_FROM_CPU_DY_DV_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_FRAC 0x0850 +#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_INT 0x0854 +#define NV902D_SET_PIXELS_FROM_CPU_DST_X0_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC 0x0858 +#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_INT 0x085c +#define NV902D_SET_PIXELS_FROM_CPU_DST_Y0_INT_V 31:0 + +#define NV902D_PIXELS_FROM_CPU_DATA 0x0860 +#define NV902D_PIXELS_FROM_CPU_DATA_V 31:0 + +#define NV902D_SET_BIG_ENDIAN_CONTROL 0x0870 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X32_SWAP_1 0:0 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X32_SWAP_4 1:1 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X32_SWAP_8 2:2 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X32_SWAP_16 3:3 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X16_SWAP_1 4:4 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X16_SWAP_4 5:5 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X16_SWAP_8 6:6 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X16_SWAP_16 7:7 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X8_SWAP_1 8:8 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X8_SWAP_4 9:9 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X8_SWAP_8 10:10 +#define NV902D_SET_BIG_ENDIAN_CONTROL_X8_SWAP_16 11:11 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_CGA6_SWAP_1 12:12 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_CGA6_SWAP_4 13:13 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_CGA6_SWAP_8 14:14 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_CGA6_SWAP_16 15:15 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_LE_SWAP_1 16:16 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_LE_SWAP_4 17:17 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_LE_SWAP_8 18:18 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I1_X8_LE_SWAP_16 19:19 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I4_SWAP_1 20:20 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I4_SWAP_4 21:21 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I4_SWAP_8 22:22 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I4_SWAP_16 23:23 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I8_SWAP_1 24:24 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I8_SWAP_4 25:25 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I8_SWAP_8 26:26 +#define NV902D_SET_BIG_ENDIAN_CONTROL_I8_SWAP_16 27:27 +#define NV902D_SET_BIG_ENDIAN_CONTROL_OVERRIDE 28:28 + +#define NV902D_SET_PIXELS_FROM_MEMORY_BLOCK_SHAPE 0x0880 +#define NV902D_SET_PIXELS_FROM_MEMORY_BLOCK_SHAPE_V 2:0 +#define NV902D_SET_PIXELS_FROM_MEMORY_BLOCK_SHAPE_V_AUTO 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_BLOCK_SHAPE_V_SHAPE_8X8 0x00000001 +#define NV902D_SET_PIXELS_FROM_MEMORY_BLOCK_SHAPE_V_SHAPE_16X4 0x00000002 + +#define NV902D_SET_PIXELS_FROM_MEMORY_CORRAL_SIZE 0x0884 +#define NV902D_SET_PIXELS_FROM_MEMORY_CORRAL_SIZE_V 9:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP 0x0888 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V 0:0 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_FALSE 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAFE_OVERLAP_V_TRUE 0x00000001 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE 0x088c +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_ORIGIN 0:0 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_ORIGIN_CENTER 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_ORIGIN_CORNER 0x00000001 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_FILTER 4:4 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_FILTER_POINT 0x00000000 +#define NV902D_SET_PIXELS_FROM_MEMORY_SAMPLE_MODE_FILTER_BILINEAR 0x00000001 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_X0 0x08b0 +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_X0_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_Y0 0x08b4 +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_Y0_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_WIDTH 0x08b8 +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_WIDTH_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT 0x08bc +#define NV902D_SET_PIXELS_FROM_MEMORY_DST_HEIGHT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC 0x08c0 +#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_INT 0x08c4 +#define NV902D_SET_PIXELS_FROM_MEMORY_DU_DX_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC 0x08c8 +#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_INT 0x08cc +#define NV902D_SET_PIXELS_FROM_MEMORY_DV_DY_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC 0x08d0 +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_FRAC_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT 0x08d4 +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_X0_INT_V 31:0 + +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC 0x08d8 +#define NV902D_SET_PIXELS_FROM_MEMORY_SRC_Y0_FRAC_V 31:0 + +#define NV902D_PIXELS_FROM_MEMORY_SRC_Y0_INT 0x08dc +#define NV902D_PIXELS_FROM_MEMORY_SRC_Y0_INT_V 31:0 + +#define NV902D_SET_FALCON00 0x08e0 +#define NV902D_SET_FALCON00_V 31:0 + +#define NV902D_SET_FALCON01 0x08e4 +#define NV902D_SET_FALCON01_V 31:0 + +#define NV902D_SET_FALCON02 0x08e8 +#define NV902D_SET_FALCON02_V 31:0 + +#define NV902D_SET_FALCON03 0x08ec +#define NV902D_SET_FALCON03_V 31:0 + +#define NV902D_SET_FALCON04 0x08f0 +#define NV902D_SET_FALCON04_V 31:0 + +#define NV902D_SET_FALCON05 0x08f4 +#define NV902D_SET_FALCON05_V 31:0 + +#define NV902D_SET_FALCON06 0x08f8 +#define NV902D_SET_FALCON06_V 31:0 + +#define NV902D_SET_FALCON07 0x08fc +#define NV902D_SET_FALCON07_V 31:0 + +#define NV902D_SET_FALCON08 0x0900 +#define NV902D_SET_FALCON08_V 31:0 + +#define NV902D_SET_FALCON09 0x0904 +#define NV902D_SET_FALCON09_V 31:0 + +#define NV902D_SET_FALCON10 0x0908 +#define NV902D_SET_FALCON10_V 31:0 + +#define NV902D_SET_FALCON11 0x090c +#define NV902D_SET_FALCON11_V 31:0 + +#define NV902D_SET_FALCON12 0x0910 +#define NV902D_SET_FALCON12_V 31:0 + +#define NV902D_SET_FALCON13 0x0914 +#define NV902D_SET_FALCON13_V 31:0 + +#define NV902D_SET_FALCON14 0x0918 +#define NV902D_SET_FALCON14_V 31:0 + +#define NV902D_SET_FALCON15 0x091c +#define NV902D_SET_FALCON15_V 31:0 + +#define NV902D_SET_FALCON16 0x0920 +#define NV902D_SET_FALCON16_V 31:0 + +#define NV902D_SET_FALCON17 0x0924 +#define NV902D_SET_FALCON17_V 31:0 + +#define NV902D_SET_FALCON18 0x0928 +#define NV902D_SET_FALCON18_V 31:0 + +#define NV902D_SET_FALCON19 0x092c +#define NV902D_SET_FALCON19_V 31:0 + +#define NV902D_SET_FALCON20 0x0930 +#define NV902D_SET_FALCON20_V 31:0 + +#define NV902D_SET_FALCON21 0x0934 +#define NV902D_SET_FALCON21_V 31:0 + +#define NV902D_SET_FALCON22 0x0938 +#define NV902D_SET_FALCON22_V 31:0 + +#define NV902D_SET_FALCON23 0x093c +#define NV902D_SET_FALCON23_V 31:0 + +#define NV902D_SET_FALCON24 0x0940 +#define NV902D_SET_FALCON24_V 31:0 + +#define NV902D_SET_FALCON25 0x0944 +#define NV902D_SET_FALCON25_V 31:0 + +#define NV902D_SET_FALCON26 0x0948 +#define NV902D_SET_FALCON26_V 31:0 + +#define NV902D_SET_FALCON27 0x094c +#define NV902D_SET_FALCON27_V 31:0 + +#define NV902D_SET_FALCON28 0x0950 +#define NV902D_SET_FALCON28_V 31:0 + +#define NV902D_SET_FALCON29 0x0954 +#define NV902D_SET_FALCON29_V 31:0 + +#define NV902D_SET_FALCON30 0x0958 +#define NV902D_SET_FALCON30_V 31:0 + +#define NV902D_SET_FALCON31 0x095c +#define NV902D_SET_FALCON31_V 31:0 + +#define NV902D_MME_DMA_WRITE_METHOD_BARRIER 0x0dec +#define NV902D_MME_DMA_WRITE_METHOD_BARRIER_V 0:0 + +#define NV902D_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NV902D_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NV902D_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NV902D_CALL_MME_MACRO_V 31:0 + +#define NV902D_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NV902D_CALL_MME_DATA_V 31:0 + +#endif /* _cl_fermi_twod_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl9067.h b/src/common/sdk/nvidia/inc/class/cl9067.h new file mode 100644 index 000000000..5355f18ee --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9067.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl9067_h_ +#define _cl9067_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define FERMI_CONTEXT_SHARE_A (0x00009067) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl9067_h + diff --git a/src/common/sdk/nvidia/inc/class/cl906f.h b/src/common/sdk/nvidia/inc/class/cl906f.h new file mode 100644 index 000000000..b20d8ff15 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl906f.h @@ -0,0 +1,242 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl906f_h_ +#define _cl906f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class GF100_CHANNEL_GPFIFO */ +/* + * Documentation for GF100_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + */ +#define GF100_CHANNEL_GPFIFO (0x0000906F) + +/* pio method data structure */ +typedef volatile struct _cl906f_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv906fTypedef, GF100ChannelGPFifo; +#define NV906F_TYPEDEF GF100ChannelGPFifo +/* dma flow control data structure */ +typedef volatile struct _cl906f_tag1 { + NvU32 Ignored00[0x010]; /* 0000-0043*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 SetReferenceThreshold; /* set reference value threshold 0050-0053*/ + NvU32 Ignored01[0x001]; /* 0054-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x3dc]; +} Nv906fControl, GF100ControlGPFifo; +/* fields and values */ +#define NV906F_NUMBER_OF_SUBCHANNELS (8) +#define NV906F_SET_OBJECT (0x00000000) +#define NV906F_SET_OBJECT_NVCLASS 15:0 +#define NV906F_SET_OBJECT_ENGINE 20:16 +#define NV906F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NV906F_ILLEGAL (0x00000004) +#define NV906F_ILLEGAL_HANDLE 31:0 +#define NV906F_NOP (0x00000008) +#define NV906F_NOP_HANDLE 31:0 +#define NV906F_SEMAPHOREA (0x00000010) +#define NV906F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NV906F_SEMAPHOREB (0x00000014) +#define NV906F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NV906F_SEMAPHOREC (0x00000018) +#define NV906F_SEMAPHOREC_PAYLOAD 31:0 +#define NV906F_SEMAPHORED (0x0000001C) +#define NV906F_SEMAPHORED_OPERATION 3:0 +#define NV906F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NV906F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NV906F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NV906F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NV906F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NV906F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NV906F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NV906F_SEMAPHORED_RELEASE_WFI 20:20 +#define NV906F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NV906F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NV906F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NV906F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NV906F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NV906F_NON_STALL_INTERRUPT (0x00000020) +#define NV906F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NV906F_FB_FLUSH (0x00000024) +#define NV906F_FB_FLUSH_HANDLE 31:0 +#define NV906F_MEM_OP_A (0x00000028) +#define NV906F_MEM_OP_A_OPERAND_LOW 31:2 +#define NV906F_MEM_OP_A_TLB_INVALIDATE_ADDR 29:2 +#define NV906F_MEM_OP_A_TLB_INVALIDATE_TARGET 31:30 +#define NV906F_MEM_OP_A_TLB_INVALIDATE_TARGET_VID_MEM 0x00000000 +#define NV906F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT 0x00000002 +#define NV906F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 +#define NV906F_MEM_OP_B (0x0000002c) +#define NV906F_MEM_OP_B_OPERAND_HIGH 7:0 +#define NV906F_MEM_OP_B_OPERATION 31:27 +#define NV906F_MEM_OP_B_OPERATION_SYSMEMBAR_FLUSH 0x00000005 +#define NV906F_MEM_OP_B_OPERATION_SOFT_FLUSH 0x00000006 +#define NV906F_MEM_OP_B_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NV906F_MEM_OP_B_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NV906F_MEM_OP_B_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +#define NV906F_MEM_OP_B_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NV906F_MEM_OP_B_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NV906F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB 0:0 +#define NV906F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NV906F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ALL 0x00000001 +#define NV906F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC 1:1 +#define NV906F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NV906F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NV906F_SET_REFERENCE (0x00000050) +#define NV906F_SET_REFERENCE_COUNT 31:0 +#define NV906F_CRC_CHECK (0x0000007c) +#define NV906F_CRC_CHECK_VALUE 31:0 +#define NV906F_YIELD (0x00000080) +#define NV906F_YIELD_OP 1:0 +#define NV906F_YIELD_OP_NOP 0x00000000 + +/* GPFIFO entry format */ +#define NV906F_GP_ENTRY__SIZE 8 +#define NV906F_GP_ENTRY0_FETCH 0:0 +#define NV906F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NV906F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NV906F_GP_ENTRY0_NO_CONTEXT_SWITCH 1:1 +#define NV906F_GP_ENTRY0_NO_CONTEXT_SWITCH_FALSE 0x00000000 +#define NV906F_GP_ENTRY0_NO_CONTEXT_SWITCH_TRUE 0x00000001 +#define NV906F_GP_ENTRY0_GET 31:2 +#define NV906F_GP_ENTRY0_OPERAND 31:0 +#define NV906F_GP_ENTRY1_GET_HI 7:0 +#define NV906F_GP_ENTRY1_PRIV 8:8 +#define NV906F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NV906F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NV906F_GP_ENTRY1_LEVEL 9:9 +#define NV906F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NV906F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NV906F_GP_ENTRY1_LENGTH 30:10 +#define NV906F_GP_ENTRY1_SYNC 31:31 +#define NV906F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NV906F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NV906F_GP_ENTRY1_OPCODE 7:0 +#define NV906F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NV906F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NV906F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NV906F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NV906F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NV906F_DMA_METHOD_ADDRESS 11:0 +#define NV906F_DMA_SUBDEVICE_MASK 15:4 +#define NV906F_DMA_METHOD_SUBCHANNEL 15:13 +#define NV906F_DMA_TERT_OP 17:16 +#define NV906F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NV906F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NV906F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NV906F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NV906F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NV906F_DMA_METHOD_COUNT_OLD 28:18 +#define NV906F_DMA_METHOD_COUNT 28:16 +#define NV906F_DMA_IMMD_DATA 28:16 +#define NV906F_DMA_SEC_OP 31:29 +#define NV906F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NV906F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NV906F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NV906F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NV906F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NV906F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NV906F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NV906F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NV906F_DMA_INCR_ADDRESS 11:0 +#define NV906F_DMA_INCR_SUBCHANNEL 15:13 +#define NV906F_DMA_INCR_COUNT 28:16 +#define NV906F_DMA_INCR_OPCODE 31:29 +#define NV906F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NV906F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NV906F_DMA_NONINCR_ADDRESS 11:0 +#define NV906F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NV906F_DMA_NONINCR_COUNT 28:16 +#define NV906F_DMA_NONINCR_OPCODE 31:29 +#define NV906F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NV906F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NV906F_DMA_ONEINCR_ADDRESS 11:0 +#define NV906F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NV906F_DMA_ONEINCR_COUNT 28:16 +#define NV906F_DMA_ONEINCR_OPCODE 31:29 +#define NV906F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NV906F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NV906F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NV906F_DMA_IMMD_ADDRESS 11:0 +#define NV906F_DMA_IMMD_SUBCHANNEL 15:13 +#define NV906F_DMA_IMMD_DATA 28:16 +#define NV906F_DMA_IMMD_OPCODE 31:29 +#define NV906F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NV906F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NV906F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NV906F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NV906F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NV906F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NV906F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NV906F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NV906F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NV906F_DMA_ENDSEG_OPCODE 31:29 +#define NV906F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NV906F_DMA_ADDRESS 12:2 +#define NV906F_DMA_SUBCH 15:13 +#define NV906F_DMA_OPCODE3 17:16 +#define NV906F_DMA_OPCODE3_NONE (0x00000000) +#define NV906F_DMA_COUNT 28:18 +#define NV906F_DMA_OPCODE 31:29 +#define NV906F_DMA_OPCODE_METHOD (0x00000000) +#define NV906F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NV906F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl906f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl906fsw.h b/src/common/sdk/nvidia/inc/class/cl906fsw.h new file mode 100644 index 000000000..465a91af1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl906fsw.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl906f_sw_h_ +#define _cl906f_sw_h_ + +/* NvNotification[] elements */ +#define NV906F_NOTIFIERS_RC (0) +#define NV906F_NOTIFIERS_REFCNT (1) +#define NV906F_NOTIFIERS_NONSTALL (2) +#define NV906F_NOTIFIERS_EVENTBUFFER (3) +#define NV906F_NOTIFIERS_IDLECHANNEL (4) +#define NV906F_NOTIFIERS_ENDCTX (5) +#define NV906F_NOTIFIERS_SW (6) +#define NV906F_NOTIFIERS_GR_DEBUG_INTR (7) +#define NV906F_NOTIFIERS_MAXCOUNT (8) + +/* NvNotification[] fields and values */ +#define NV906f_NOTIFICATION_STATUS_ERROR_BAD_ARGUMENT (0x2000) +#define NV906f_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +#endif /* _cl906f_sw_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl9072.h b/src/common/sdk/nvidia/inc/class/cl9072.h new file mode 100644 index 000000000..020adecd7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9072.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9072_h_ +#define _cl9072_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define GF100_DISP_SW 0x00009072 + +#define NV9072_NOTIFIERS_NOTIFY_ON_VBLANK (9) +#define NV9072_NOTIFICATION_STATUS_DONE_SUCCESS (0x0000) + + +typedef struct +{ + NvU32 logicalHeadId; + /* + * 0 implies use Head argument only (i.e. whatever is currently setup on this head) + */ + NvU32 displayMask; + NvU32 caps; +} NV9072_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9072_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl9074.h b/src/common/sdk/nvidia/inc/class/cl9074.h new file mode 100644 index 000000000..60436ba31 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9074.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9074_h_ +#define _cl9074_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define GF100_TIMED_SEMAPHORE_SW (0x00009074) + +/* NvNotification[] fields and values */ +#define NV9074_NOTIFICATION_STATUS_PENDING (0x8000) +#define NV9074_NOTIFICATION_STATUS_DONE_FLUSHED (0x0001) +#define NV9074_NOTIFICATION_STATUS_DONE (0x0000) + +#define NV9074_SET_NOTIFIER_HI_V 7:0 + +#define NV9074_SET_SEMAPHORE_HI_V 7:0 + +#define NV9074_SCHEDULE_SEMAPHORE_RELEASE_NOTIFY 1:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9074_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/cl907dswspare.h b/src/common/sdk/nvidia/inc/class/cl907dswspare.h new file mode 100644 index 000000000..ee9ddaada --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl907dswspare.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2010-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl907d_sw_spare_h_ +#define _cl907d_sw_spare_h_ + +/* This file is *not* auto-generated. */ + +#define NV907D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF 1:0 +#define NV907D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_NO_PREF (0x00000000) +#define NV907D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_GSYNC (0x00000001) + +#define NV907D_PIOR_SET_SW_SPARE_A_CODE_FOR_LOCK_SIGNAL_PROPAGATION_ONLY 1:0 +#define NV907D_PIOR_SET_SW_SPARE_A_CODE_FOR_LOCK_SIGNAL_PROPAGATION_ONLY_FALSE (0x00000000) +#define NV907D_PIOR_SET_SW_SPARE_A_CODE_FOR_LOCK_SIGNAL_PROPAGATION_ONLY_TRUE (0x00000001) + +#endif // _cl907d_sw_spare_h_ + diff --git a/src/common/sdk/nvidia/inc/class/cl9096.h b/src/common/sdk/nvidia/inc/class/cl9096.h new file mode 100644 index 000000000..559216de7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9096.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9096_h_ +#define _cl9096_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define GF100_ZBC_CLEAR (0x00009096) +#define NV9096_TYPEDEF GF100ZBCClear + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9096_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl90cc.h b/src/common/sdk/nvidia/inc/class/cl90cc.h new file mode 100644 index 000000000..25d85701d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90cc.h @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90cc_h_ +#define _cl90cc_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define GF100_PROFILER (0x000090CC) + +/* + * Creating the GF100_PROFILER object: + * - The profiler object is instantiated as a child of either the subdevice or + * a channel group or channel, depending on whether reservations + * should be global to the subdevice or per-context. When the profiler + * requests a reservation or information about outstanding reservations, the + * scope of the request is determined by the profiler object's parent class. + */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl90cc_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl90cd.h b/src/common/sdk/nvidia/inc/class/cl90cd.h new file mode 100644 index 000000000..2b8c2e5e7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90cd.h @@ -0,0 +1,244 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90cd_h_ +#define _cl90cd_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +* NV_EVENT_BUFFER +* An event buffer is shared between user (RO) and kernel(RW). +* It holds debug/profile event data provided by the kernel. +* +*/ +#define NV_EVENT_BUFFER (0x000090CD) + +/* +* NV_EVENT_BUFFER_HEADER +* This structure holds the get and put values used to index/consume event buffer. +* Along with other RO data shared with the user. +* +* recordGet/Put: These "pointers" work in the traditional sense: +* - when GET==PUT, the fifo is empty +* - when GET==PUT+1, the fifo is full +* This implies a full fifo always has one "wasted" element. +* +* recordCount: This is the total number of records added to the buffer by the kernel +* This information is filled out when the buffer is setup to keep newest records. +* recordCount = number of records currently in the buffer + overflow count. +* +* recordDropcount: This is the number of event records that are dropped because the +* buffer is full. +* This information is filled out when event buffer is setup to keep oldest records. +* +* vardataDropcount: Event buffer provides a dual stream of data, where the record can contain +* an optional offset to a variable length data buffer. +* This is the number of variable data records that are dropped because the +* buffer is full. +* This information is filled out when event buffer is setup to keep oldest records. +*/ +typedef struct +{ + NvU32 recordGet; + NvU32 recordPut; + NvU64 recordCount; + NvU64 recordDropcount; + NvU64 vardataDropcount; +} NV_EVENT_BUFFER_HEADER; + +/* +* NV_EVENT_BUFFER_RECORD_HEADER +* This is the header added to each event record. +* This helps identify the event type and variable length data is associated with it. +*/ +typedef struct +{ + NvU16 type; + NvU16 subtype; + NvU32 varData; // [31: 5] = (varDataOffset >> 5); 0 < vardataOffset <= vardataBufferSize + // [ 4: 1] = reserved for future use + // [ 0: 0] = isVardataStartOffsetZero +} NV_EVENT_BUFFER_RECORD_HEADER; + +/* +* NV_EVENT_BUFFER_RECORD +* This structure defines a generic event record. +* The size of this record is fixed for a given event buffer. +* It is configured by the user during allocation. +*/ +typedef struct +{ + NV_EVENT_BUFFER_RECORD_HEADER recordHeader; + NvU64 inlinePayload[1] NV_ALIGN_BYTES(8); // 1st element of the payload/data + // Do not add more elements here, inlinePayload can contain more than one elements +} NV_EVENT_BUFFER_RECORD; + +#define NV_EVENT_VARDATA_GRANULARITY 32 +#define NV_EVENT_VARDATA_OFFSET_MASK (~(NV_EVENT_VARDATA_GRANULARITY - 1)) +#define NV_EVENT_VARDATA_START_OFFSET_ZERO 0x01 + +/* +* NV_EVENT_BUFFER_ALLOC_PARAMETERS +* +* bufferHeader [OUT] +* This is the user VA offset pointing to the base of NV_EVENT_BUFFER_HEADER. +* +* recordBuffer [OUT] +* This is the user VA offset pointing to the base of the event record buffer. +* This buffer will contain NV_EVENT_BUFFER_RECORDs added by the kernel. +* +* recordSize [IN] +* This is the size of NV_EVENT_BUFFER_RECORD used by this buffer +* +* recordCount [IN] +* This is the number of records that recordBuffer can hold. +* +* vardataBuffer [OUT] +* This is the user VA offset pointing to the base of the variable data buffer. +* +* vardataBufferSize [IN] +* Size of the variable data buffer in bytes. +* +* recordsFreeThreshold [IN] +* This is the notification threshold for the event record buffer. +* This felid specifies the number of records that the buffer can +* still hold before it gets full. +* +* vardataFreeThreshold [IN] +* This is the notification threshold for the vardata buffer. +* This felid specifies the number of bytes that the buffer can +* still hold before it gets full. +* +* notificationHandle [IN] +* When recordsFreeThreshold or vardataFreeThreshold is met, kernel will notify +* user on this handle. If notificationHandle = NULL, event notification +* is disabled. This is an OS specific notification handle. +* It is a Windows event handle or a fd pointer on Linux. +* +* hSubDevice [IN] +* An event buffer can either hold sub-device related events or system events. +* This handle specifies the sub-device to associate this buffer with. +* If this parameter is NULL, then the buffer is tied to the client instead. +* +* flags [IN] +* Set to 0 by default. +* This field can hold any future flags to configure the buffer if needed. +* +* hBufferHeader [IN] +* The backing memory object for the buffer header. Must be a NV01_MEMORY_DEVICELESS object. +* On Windows platforms, a buffer will be internally generated if hBufferHeader is 0. +* +* hRecordBuffer [IN] +* The backing memory object for the record buffer. Must be a NV01_MEMORY_DEVICELESS object. +* On Windows platforms, a buffer will be internally generated if hRecordBuffer is 0. +* +* hVardataBuffer [IN] +* The backing memory object for the vardata buffer. Must be a NV01_MEMORY_DEVICELESS object. +* On Windows platforms, a buffer will be internally generated if hVardataBuffer is 0. +* +*/ +typedef struct +{ + NvP64 bufferHeader NV_ALIGN_BYTES(8); + NvP64 recordBuffer NV_ALIGN_BYTES(8); + NvU32 recordSize; + NvU32 recordCount; + NvP64 vardataBuffer NV_ALIGN_BYTES(8); + NvU32 vardataBufferSize; + NvU32 recordsFreeThreshold; + NvU64 notificationHandle NV_ALIGN_BYTES(8); + NvU32 vardataFreeThreshold; + NvHandle hSubDevice; + NvU32 flags; + + NvHandle hBufferHeader; + NvHandle hRecordBuffer; + NvHandle hVardataBuffer; +} NV_EVENT_BUFFER_ALLOC_PARAMETERS; + +/* +* NV_EVENT_BUFFER_BIND +* This class is used to allocate an Event Type object bound to a given event buffer. +* This allocation call associates an event type with an event buffer. +* Multiple event types can be associated with the same buffer as long as they belong to +* the same category i.e. either sub-device or system. +* When event buffer is enabled, if an event bound to this buffer occurs, +* some relevant data gets added to it. +* cl2080.h has a list of sub-device events that can be associated with a buffer +* cl0000.h has a list of system events that can be associated with a buffer +* These defines are also used in class NV01_EVENT_OS_EVENT (0x79) to get event notification +* and class NV01_EVENT_KERNEL_CALLBACK_EX (0x7E) to get kernel callbacks. +* This class extends that support to additionally get relevant data in an event buffer +* +*/ +#define NV_EVENT_BUFFER_BIND (0x0000007F) + +/* +* NV_EVENT_BUFFER_BIND_PARAMETERS +* +* bufferHandle [IN] +* Event buffer handle used to bind the given event type +* +* eventType [IN] +* This is one of the eventTypeIDs from cl2080.h/cl000.h +* e.g. NV2080_NOTIFIERS_PSTATE_CHANGE +* +* eventSubtype [IN] +* Event subtype for a given type of event. +* This field is optional depending on if an eventtype has a subtype. +* +* hClientTarget [IN] +* Handle of the target client whose events are to be bound to the given buffer +* e.g. context switch events can be tracked for a given client. +* This field is optional depending on the event type. +* e.g. pstate change events are per gpu but do not depend on a client. +* +* hSrcResource [IN] +* source resource handle for the event type +* e.g. channel handle: RC/context switch can be tracked for a given channel +* This field is optional depending on the event type. +* e.g. pstate change events are per gpu and cannot be sub-categorized +* +* KernelCallbackdata [IN] +* This field is reserved for KERNEL ONLY clients. +* +*/ +typedef struct +{ + NvHandle bufferHandle; + NvU16 eventType; + NvU16 eventSubtype; + NvHandle hClientTarget; + NvHandle hSrcResource; + NvP64 KernelCallbackdata NV_ALIGN_BYTES(8); +} NV_EVENT_BUFFER_BIND_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl90cd_h_ + diff --git a/src/common/sdk/nvidia/inc/class/cl90cdfecs.h b/src/common/sdk/nvidia/inc/class/cl90cdfecs.h new file mode 100644 index 000000000..5118249cf --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90cdfecs.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90cdFecs_h_ +#define _cl90cdFecs_h_ + +/* This file defines parameters for FECS context switch events*/ + +#define NV_EVENT_BUFFER_FECS_VERSION 2 + +/* + * These are the types of context switch events + * This field gets added to NV_EVENT_BUFFER_FECS_RECORD to specify the sub type of fecs event + * Do *not* edit these as they are defined to maintain consistency with Tegra tools + */ +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_SO 0x00 +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_CTXSW_REQ_BY_HOST 0x01 +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK 0x02 +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_WFI 0x0a +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_GFXP 0x0b +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_CTAP 0x0c +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_CILP 0x0d +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_SAVE_END 0x03 +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_RESTORE_START 0x04 +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_CONTEXT_START 0x05 +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_SIMPLE_START 0x06 +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_SIMPLE_END 0x07 +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_ENGINE_RESET 0xfe +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_INVALID_TIMESTAMP 0xff +#define NV_EVENT_BUFFER_FECS_CTXSWTAG_LAST NV_EVENT_BUFFER_FECS_EVENTS_CTXSWTAG_INVALID_TIMESTAMP + +/* + * Bit fields used to enable a particular sub type of event + */ +#define NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_SO NVBIT(NV_EVENT_BUFFER_FECS_CTXSWTAG_SO) +#define NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_CTXSW_REQ_BY_HOST NVBIT(NV_EVENT_BUFFER_FECS_CTXSWTAG_CTXSW_REQ_BY_HOST) +#define NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_FE_ACK NVBIT(NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK) +#define NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_SAVE_END NVBIT(NV_EVENT_BUFFER_FECS_CTXSWTAG_SAVE_END) +#define NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_RESTORE_START NVBIT(NV_EVENT_BUFFER_FECS_CTXSWTAG_RESTORE_START) +#define NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_CONTEXT_START NVBIT(NV_EVENT_BUFFER_FECS_CTXSWTAG_CONTEXT_START) +#define NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_SIMPLE_START NVBIT(NV_EVENT_BUFFER_FECS_CTXSWTAG_SIMPLE_START) +#define NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_SIMPLE_END NVBIT(NV_EVENT_BUFFER_FECS_CTXSWTAG_SIMPLE_END) + +/* context_id is set to this value if fecs info doesn't match a known channel/tsg handle*/ +#define NV_EVENT_BUFFER_INVALID_CONTEXT 0xFFFFFFFF + +/* + * PID/context_id are set to these values if the data is from another user's + * client and the current user is not an administrator + */ +#define NV_EVENT_BUFFER_HIDDEN_PID 0x0 +#define NV_EVENT_BUFFER_HIDDEN_CONTEXT 0x0 + +/* + * PID/context_id are set to these values if the data is from a kernel client + * and the data is being read by a user client + */ +#define NV_EVENT_BUFFER_KERNEL_PID 0xFFFFFFFF +#define NV_EVENT_BUFFER_KERNEL_CONTEXT 0xFFFFFFFF + +// V1 ------------------------------------------------------------------------ +typedef struct +{ + NvU8 tag; ///< NV_EVENT_BUFFER_FECS_CTXSWTAG_* + NvU8 vmid; + NvU16 seqno; ///< used to detect drop + NvU32 context_id; ///< channel/tsg handle + NvU64 pid NV_ALIGN_BYTES(8); ///< process id + NvU64 timestamp NV_ALIGN_BYTES(8); + /* Do *not* edit items above this to maintain consistency with tegra tools + Always add to the end of this structure to retain backward compatibility */ +} NV_EVENT_BUFFER_FECS_RECORD_V1; + +// V2 ------------------------------------------------------------------------ +typedef struct +{ + NvU8 tag; ///< NV_EVENT_BUFFER_FECS_CTXSWTAG_* + NvU8 vmid; + NvU16 seqno; ///< used to detect drop + NvU32 context_id; ///< channel/tsg handle + NvU32 pid; ///< process id + NvU16 reserved0; + NvU8 migGpuInstanceId; + NvU8 migComputeInstanceId; + NvU64 timestamp NV_ALIGN_BYTES(8); + /* Do *not* edit items above this to maintain consistency with tegra tools + Always add to the end of this structure to retain backward compatibility */ +} NV_EVENT_BUFFER_FECS_RECORD_V2; + +typedef NV_EVENT_BUFFER_FECS_RECORD_V1 NV_EVENT_BUFFER_FECS_RECORD_V0; +typedef NV_EVENT_BUFFER_FECS_RECORD_V1 NV_EVENT_BUFFER_FECS_RECORD; +#endif /* _cl90cdFecs_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl90cdtypes.h b/src/common/sdk/nvidia/inc/class/cl90cdtypes.h new file mode 100644 index 000000000..f2a40cf35 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90cdtypes.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90cdtypes_h_ +#define _cl90cdtypes_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +// +// Legacy values record type values have been kept for backward +// compatibility. New values should be added sequentially. +// +#define NV_EVENT_BUFFER_RECORD_TYPE_INVALID (0) +#define NV_EVENT_BUFFER_RECORD_TYPE_VIDEO_TRACE (1) +#define NV_EVENT_BUFFER_RECORD_TYPE_FECS_CTX_SWITCH_V2 (2) +#define NV_EVENT_BUFFER_RECORD_TYPE_NVTELEMETRY_REPORT_EVENT_SYSTEM (4) +#define NV_EVENT_BUFFER_RECORD_TYPE_NVTELEMETRY_REPORT_EVENT_SUBDEVICE (132) +#define NV_EVENT_BUFFER_RECORD_TYPE_FECS_CTX_SWITCH (134) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl90cdtypes_h_ + diff --git a/src/common/sdk/nvidia/inc/class/cl90ce.h b/src/common/sdk/nvidia/inc/class/cl90ce.h new file mode 100644 index 000000000..b4506dfeb --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90ce.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90ce_h_ +#define _cl90ce_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +* NV_MEMORY_DEVICELESS +* Memory that is not associated with a device +*/ +#define NV01_MEMORY_DEVICELESS (0x000090CE) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl90ce_h_ + diff --git a/src/common/sdk/nvidia/inc/class/cl90e6.h b/src/common/sdk/nvidia/inc/class/cl90e6.h new file mode 100644 index 000000000..63348eab1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90e6.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90e6_h_ +#define _cl90e6_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define GF100_SUBDEVICE_MASTER (0x000090e6) + +typedef struct { + NvU32 Reserved00[0x400]; /* NV_PMC 0x00000FFF:0x00000000 */ +} Nv90e6MapTypedef, GF100MASTERMap; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl90e6_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl90ec.h b/src/common/sdk/nvidia/inc/class/cl90ec.h new file mode 100644 index 000000000..182640a4f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90ec.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl90ec_h_ +#define _cl90ec_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* Class within the subdevice used for communicating with HDACODEC*/ +#define GF100_HDACODEC (0x000090EC) + + /* pio method data structure */ +typedef volatile struct _cl90ec_tag0 { + NvV32 Reserved00[0x7c0]; +} Nv90ECTypedef, GF100Hdacodec; +#define NV90EC_TYPEDEF GF100Hdacodec + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9071_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl90f1.h b/src/common/sdk/nvidia/inc/class/cl90f1.h new file mode 100644 index 000000000..7523ef198 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl90f1.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cl90f1_h_ +#define _cl90f1_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define FERMI_VASPACE_A (0x000090f1) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl90f1_h + diff --git a/src/common/sdk/nvidia/inc/class/cl9170.h b/src/common/sdk/nvidia/inc/class/cl9170.h new file mode 100644 index 000000000..082ea5bcf --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9170.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 1993-2004, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9170_h_ +#define _cl9170_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV9170_DISPLAY (0x00009170) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9170_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9170_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl9171.h b/src/common/sdk/nvidia/inc/class/cl9171.h new file mode 100644 index 000000000..dcbd90be0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9171.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9171_h_ +#define _cl9171_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9171_DISP_SF_USER 0x9171 + +typedef volatile struct _cl9171_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9171DispSfUser, Nv9171DispSfUserMap; + +#define NV9171_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9171_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9171_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9171_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9171_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9171_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9171_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9171_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9171_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9171_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9171_h_ diff --git a/src/common/sdk/nvidia/inc/class/cl917a.h b/src/common/sdk/nvidia/inc/class/cl917a.h new file mode 100644 index 000000000..b6d62393d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917a.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917a_h_ +#define _cl917a_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917A_CURSOR_CHANNEL_PIO (0x0000917A) + +typedef volatile struct { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x1D]; + NvV32 Update; // 0x00000080 - 0x00000083 + NvV32 SetCursorHotSpotPointsOut[2]; // 0x00000084 - 0x0000008B + NvV32 Reserved02[0x3DD]; +} GK104DispCursorControlPio; + +#define NV917A_FREE (0x00000008) +#define NV917A_FREE_COUNT 5:0 +#define NV917A_UPDATE (0x00000080) +#define NV917A_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917A_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917A_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917A_SET_CURSOR_HOT_SPOT_POINTS_OUT(b) (0x00000084 + (b)*0x00000004) +#define NV917A_SET_CURSOR_HOT_SPOT_POINTS_OUT_X 15:0 +#define NV917A_SET_CURSOR_HOT_SPOT_POINTS_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917a_h + diff --git a/src/common/sdk/nvidia/inc/class/cl917b.h b/src/common/sdk/nvidia/inc/class/cl917b.h new file mode 100644 index 000000000..05270e2d1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917b.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917b_h_ +#define _cl917b_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917B_OVERLAY_IMM_CHANNEL_PIO (0x0000917B) + +typedef volatile struct { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x1D]; + NvV32 Update; // 0x00000080 - 0x00000083 + NvV32 SetPointsOut[2]; // 0x00000084 - 0x0000008B + NvV32 Reserved02[0x1]; + NvV32 AwakenOnceFlippedTo; // 0x00000090 - 0x00000093 + NvV32 Reserved03[0x3DB]; +} GK104DispOverlayImmControlPio; + +#define NV917B_FREE (0x00000008) +#define NV917B_FREE_COUNT 5:0 +#define NV917B_UPDATE (0x00000080) +#define NV917B_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917B_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917B_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917B_SET_POINTS_OUT(b) (0x00000084 + (b)*0x00000004) +#define NV917B_SET_POINTS_OUT_X 15:0 +#define NV917B_SET_POINTS_OUT_Y 31:16 +#define NV917B_AWAKEN_ONCE_FLIPPED_TO (0x00000090) +#define NV917B_AWAKEN_ONCE_FLIPPED_TO_AWAKEN_COUNT 11:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917b_h + diff --git a/src/common/sdk/nvidia/inc/class/cl917c.h b/src/common/sdk/nvidia/inc/class/cl917c.h new file mode 100644 index 000000000..2b7c6a293 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917c.h @@ -0,0 +1,298 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917c_h_ +#define _cl917c_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917C_BASE_CHANNEL_DMA (0x0000917C) + +#define NV_DISP_BASE_NOTIFIER_1 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1_SIZEOF 0x00000004 +#define NV_DISP_BASE_NOTIFIER_1__0 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_PRESENTATION_COUNT 15:0 +#define NV_DISP_BASE_NOTIFIER_1__0_TIMESTAMP 29:16 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS 31:30 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED 0x00000002 + + +#define NV_DISP_NOTIFICATION_2 0x00000000 +#define NV_DISP_NOTIFICATION_2_SIZEOF 0x00000010 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0 0x00000000 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0_NANOSECONDS0 31:0 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1 0x00000001 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1_NANOSECONDS1 31:0 +#define NV_DISP_NOTIFICATION_2_INFO32_2 0x00000002 +#define NV_DISP_NOTIFICATION_2_INFO32_2_R0 31:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3 0x00000003 +#define NV_DISP_NOTIFICATION_2_INFO16_3_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3_FIELD 8:8 +#define NV_DISP_NOTIFICATION_2_INFO16_3_R1 15:9 +#define NV_DISP_NOTIFICATION_2__3_STATUS 31:16 +#define NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED 0x00000000 + + +#define NV_DISP_NOTIFICATION_INFO16 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_INFO16__0 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_INFO16__0_FIELD 8:8 +#define NV_DISP_NOTIFICATION_INFO16__0_R1 15:9 + + +#define NV_DISP_NOTIFICATION_STATUS 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_STATUS__0 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS 15:0 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_FINISHED 0x00000000 + + +// dma opcode instructions +#define NV917C_DMA 0x00000000 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_METHOD 0x00000000 +#define NV917C_DMA_OPCODE_JUMP 0x00000001 +#define NV917C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_METHOD 0x00000000 +#define NV917C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917C_DMA_METHOD_COUNT 27:18 +#define NV917C_DMA_METHOD_OFFSET 11:2 +#define NV917C_DMA_DATA 31:0 +#define NV917C_DMA_DATA_NOP 0x00000000 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_JUMP 0x00000001 +#define NV917C_DMA_JUMP_OFFSET 11:2 +#define NV917C_DMA_OPCODE 31:29 +#define NV917C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917C_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV917C_PUT (0x00000000) +#define NV917C_PUT_PTR 11:2 +#define NV917C_GET (0x00000004) +#define NV917C_GET_PTR 11:2 +#define NV917C_GET_SCANLINE (0x00000010) +#define NV917C_GET_SCANLINE_LINE 15:0 +#define NV917C_UPDATE (0x00000080) +#define NV917C_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917C_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917C_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917C_UPDATE_SPECIAL_HANDLING 25:24 +#define NV917C_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV917C_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV917C_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV917C_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV917C_SET_PRESENT_CONTROL (0x00000084) +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE 9:8 +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE (0x00000002) +#define NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE 3:3 +#define NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_PAIR_FLIP (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_AT_ANY_FRAME (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_TIMESTAMP_MODE 2:2 +#define NV917C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4 +#define NV917C_SET_PRESENT_CONTROL_BEGIN_LINE 30:16 +#define NV917C_SET_PRESENT_CONTROL_ON_LINE_MARGIN 15:10 +#define NV917C_SET_PRESENT_CONTROL_MODE 1:0 +#define NV917C_SET_PRESENT_CONTROL_MODE_MONO (0x00000000) +#define NV917C_SET_PRESENT_CONTROL_MODE_STEREO (0x00000001) +#define NV917C_SET_PRESENT_CONTROL_MODE_SPEC_FLIP (0x00000002) +#define NV917C_SET_SEMAPHORE_CONTROL (0x00000088) +#define NV917C_SET_SEMAPHORE_CONTROL_OFFSET 11:2 +#define NV917C_SET_SEMAPHORE_CONTROL_DELAY 26:26 +#define NV917C_SET_SEMAPHORE_CONTROL_DELAY_DISABLE (0x00000000) +#define NV917C_SET_SEMAPHORE_CONTROL_DELAY_ENABLE (0x00000001) +#define NV917C_SET_SEMAPHORE_CONTROL_FORMAT 28:28 +#define NV917C_SET_SEMAPHORE_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917C_SET_SEMAPHORE_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917C_SET_SEMAPHORE_ACQUIRE (0x0000008C) +#define NV917C_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NV917C_SET_SEMAPHORE_RELEASE (0x00000090) +#define NV917C_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NV917C_SET_CONTEXT_DMA_SEMAPHORE (0x00000094) +#define NV917C_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NV917C_SET_NOTIFIER_CONTROL (0x000000A0) +#define NV917C_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV917C_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV917C_SET_NOTIFIER_CONTROL_DELAY 26:26 +#define NV917C_SET_NOTIFIER_CONTROL_DELAY_DISABLE (0x00000000) +#define NV917C_SET_NOTIFIER_CONTROL_DELAY_ENABLE (0x00000001) +#define NV917C_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV917C_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917C_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917C_SET_CONTEXT_DMA_NOTIFIER (0x000000A4) +#define NV917C_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV917C_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004) +#define NV917C_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV917C_SET_BASE_LUT_LO (0x000000E0) +#define NV917C_SET_BASE_LUT_LO_ENABLE 31:30 +#define NV917C_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV917C_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV917C_SET_BASE_LUT_LO_MODE 27:24 +#define NV917C_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV917C_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV917C_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917C_SET_BASE_LUT_HI (0x000000E4) +#define NV917C_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV917C_SET_OUTPUT_LUT_LO (0x000000E8) +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE 31:30 +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV917C_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV917C_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV917C_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917C_SET_OUTPUT_LUT_HI (0x000000EC) +#define NV917C_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV917C_SET_CONTEXT_DMA_LUT (0x000000FC) +#define NV917C_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV917C_SET_PROCESSING (0x00000110) +#define NV917C_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV917C_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV917C_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV917C_SET_CONVERSION_RED (0x00000114) +#define NV917C_SET_CONVERSION_RED_GAIN 15:0 +#define NV917C_SET_CONVERSION_RED_OFS 31:16 +#define NV917C_SET_CONVERSION_GRN (0x00000118) +#define NV917C_SET_CONVERSION_GRN_GAIN 15:0 +#define NV917C_SET_CONVERSION_GRN_OFS 31:16 +#define NV917C_SET_CONVERSION_BLU (0x0000011C) +#define NV917C_SET_CONVERSION_BLU_GAIN 15:0 +#define NV917C_SET_CONVERSION_BLU_OFS 31:16 +#define NV917C_SET_TIMESTAMP_ORIGIN_LO (0x00000130) +#define NV917C_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NV917C_SET_TIMESTAMP_ORIGIN_HI (0x00000134) +#define NV917C_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NV917C_SET_UPDATE_TIMESTAMP_LO (0x00000138) +#define NV917C_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NV917C_SET_UPDATE_TIMESTAMP_HI (0x0000013C) +#define NV917C_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NV917C_SET_CSC_RED2RED (0x00000140) +#define NV917C_SET_CSC_RED2RED_OWNER 31:31 +#define NV917C_SET_CSC_RED2RED_OWNER_CORE (0x00000000) +#define NV917C_SET_CSC_RED2RED_OWNER_BASE (0x00000001) +#define NV917C_SET_CSC_RED2RED_COEFF 18:0 +#define NV917C_SET_CSC_GRN2RED (0x00000144) +#define NV917C_SET_CSC_GRN2RED_COEFF 18:0 +#define NV917C_SET_CSC_BLU2RED (0x00000148) +#define NV917C_SET_CSC_BLU2RED_COEFF 18:0 +#define NV917C_SET_CSC_CONSTANT2RED (0x0000014C) +#define NV917C_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV917C_SET_CSC_RED2GRN (0x00000150) +#define NV917C_SET_CSC_RED2GRN_COEFF 18:0 +#define NV917C_SET_CSC_GRN2GRN (0x00000154) +#define NV917C_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV917C_SET_CSC_BLU2GRN (0x00000158) +#define NV917C_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV917C_SET_CSC_CONSTANT2GRN (0x0000015C) +#define NV917C_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV917C_SET_CSC_RED2BLU (0x00000160) +#define NV917C_SET_CSC_RED2BLU_COEFF 18:0 +#define NV917C_SET_CSC_GRN2BLU (0x00000164) +#define NV917C_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV917C_SET_CSC_BLU2BLU (0x00000168) +#define NV917C_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV917C_SET_CSC_CONSTANT2BLU (0x0000016C) +#define NV917C_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV917C_SET_SPARE (0x000003BC) +#define NV917C_SET_SPARE_UNUSED 31:0 +#define NV917C_SET_SPARE_NOOP(b) (0x000003C0 + (b)*0x00000004) +#define NV917C_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV917C_SURFACE_SET_OFFSET(a,b) (0x00000400 + (a)*0x00000020 + (b)*0x00000004) +#define NV917C_SURFACE_SET_OFFSET_ORIGIN 31:0 +#define NV917C_SURFACE_SET_SIZE(a) (0x00000408 + (a)*0x00000020) +#define NV917C_SURFACE_SET_SIZE_WIDTH 15:0 +#define NV917C_SURFACE_SET_SIZE_HEIGHT 31:16 +#define NV917C_SURFACE_SET_STORAGE(a) (0x0000040C + (a)*0x00000020) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV917C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV917C_SURFACE_SET_STORAGE_PITCH 20:8 +#define NV917C_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV917C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV917C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV917C_SURFACE_SET_PARAMS(a) (0x00000410 + (a)*0x00000020) +#define NV917C_SURFACE_SET_PARAMS_FORMAT 15:8 +#define NV917C_SURFACE_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV917C_SURFACE_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV917C_SURFACE_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV917C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV917C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV917C_SURFACE_SET_PARAMS_GAMMA 2:2 +#define NV917C_SURFACE_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV917C_SURFACE_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV917C_SURFACE_SET_PARAMS_LAYOUT 5:4 +#define NV917C_SURFACE_SET_PARAMS_LAYOUT_FRM (0x00000000) +#define NV917C_SURFACE_SET_PARAMS_LAYOUT_FLD1 (0x00000001) +#define NV917C_SURFACE_SET_PARAMS_LAYOUT_FLD2 (0x00000002) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917c_h diff --git a/src/common/sdk/nvidia/inc/class/cl917cswspare.h b/src/common/sdk/nvidia/inc/class/cl917cswspare.h new file mode 100644 index 000000000..39e88c09e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917cswspare.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2010-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl917c_sw_spare_h_ +#define _cl917c_sw_spare_h_ + +/* This file is *not* auto-generated. */ + +/* NV917C_SET_SPARE_PRE_UPDATE_TRAP is an alias of NV917C_SET_SPARE_NOOP(0) */ +#define NV917C_SET_SPARE_PRE_UPDATE_TRAP (0x000003C0) +#define NV917C_SET_SPARE_PRE_UPDATE_TRAP_UNUSED 31:0 + +/* NV917C_SET_SPARE_POST_UPDATE_TRAP is an alias of NV917C_SET_SPARE_NOOP(1) */ +#define NV917C_SET_SPARE_POST_UPDATE_TRAP (0x000003C4) +#define NV917C_SET_SPARE_POST_UPDATE_TRAP_UNUSED 31:0 + + +#endif /* _cl917c_sw_spare_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl917d.h b/src/common/sdk/nvidia/inc/class/cl917d.h new file mode 100644 index 000000000..4f70d8136 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917d.h @@ -0,0 +1,1551 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917d_h_ +#define _cl917d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917D_CORE_CHANNEL_DMA (0x0000917D) + +#define NV917D_CORE_NOTIFIER_3 0x00000000 +#define NV917D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV917D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV917D_CORE_NOTIFIER_3__1 0x00000001 +#define NV917D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV917D_CORE_NOTIFIER_3__2 0x00000002 +#define NV917D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV917D_CORE_NOTIFIER_3__3 0x00000003 +#define NV917D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 12:12 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 15:14 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:27 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_LVDS_CLK_MAX 23:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R8 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV917D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV917D_DMA 0x00000000 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_METHOD 0x00000000 +#define NV917D_DMA_OPCODE_JUMP 0x00000001 +#define NV917D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_METHOD 0x00000000 +#define NV917D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917D_DMA_METHOD_COUNT 27:18 +#define NV917D_DMA_METHOD_OFFSET 11:2 +#define NV917D_DMA_DATA 31:0 +#define NV917D_DMA_DATA_NOP 0x00000000 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_JUMP 0x00000001 +#define NV917D_DMA_JUMP_OFFSET 11:2 +#define NV917D_DMA_OPCODE 31:29 +#define NV917D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV917D_PUT (0x00000000) +#define NV917D_PUT_PTR 11:2 +#define NV917D_GET (0x00000004) +#define NV917D_GET_PTR 11:2 +#define NV917D_UPDATE (0x00000080) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV917D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV917D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV917D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV917D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV917D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV917D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV917D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV917D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV917D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV917D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV917D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV917D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV917D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV917D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV917D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV917D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV917D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV917D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV917D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV917D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV917D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV917D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV917D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV917D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV917D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV917D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV917D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV917D_GET_CAPABILITIES (0x0000008C) +#define NV917D_GET_CAPABILITIES_DUMMY 31:0 +#define NV917D_SET_SPARE (0x0000016C) +#define NV917D_SET_SPARE_UNUSED 31:0 +#define NV917D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV917D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV917D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV917D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV917D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV917D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV917D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV917D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV917D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV917D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV917D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV917D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV917D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV917D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV917D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV917D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV917D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV917D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV917D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV917D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV917D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV917D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV917D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV917D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV917D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV917D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV917D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV917D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV917D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV917D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV917D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV917D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV917D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV917D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV917D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV917D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV917D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV917D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV917D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV917D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV917D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV917D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV917D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV917D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV917D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV917D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV917D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV917D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV917D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV917D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV917D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV917D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV917D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV917D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV917D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV917D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV917D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV917D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV917D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV917D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV917D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV917D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV917D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV917D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV917D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV917D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV917D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV917D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV917D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV917D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV917D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV917D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV917D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV917D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV917D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV917D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV917D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV917D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV917D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV917D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV917D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV917D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV917D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV917D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV917D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV917D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV917D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV917D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV917D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV917D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV917D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV917D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV917D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV917D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV917D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV917D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV917D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV917D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV917D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV917D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV917D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV917D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV917D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV917D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV917D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV917D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV917D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV917D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV917D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV917D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV917D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV917D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV917D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV917D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV917D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV917D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV917D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV917D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV917D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV917D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV917D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV917D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV917D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV917D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV917D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV917D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV917D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV917D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV917D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV917D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV917D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV917D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV917D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV917D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV917D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV917D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV917D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV917D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV917D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV917D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV917D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV917D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV917D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV917D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV917D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV917D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV917D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV917D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV917D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV917D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV917D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917d_h diff --git a/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h b/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h new file mode 100644 index 000000000..9abc6057e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917dcrcnotif.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2003-2010, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __cl917dcrcnotif_h__ +#define __cl917dcrcnotif_h__ +/* This file is autogenerated. Do not edit */ + +#define NV917D_NOTIFIER_CRC_1_STATUS_0 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_DONE 0:0 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_FALSE 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_TRUE 0x00000001 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW 3:3 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW_FALSE 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW_TRUE 0x00000001 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW 4:4 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_FALSE 0x00000000 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_TRUE 0x00000001 +#define NV917D_NOTIFIER_CRC_1_STATUS_0_COUNT 31:24 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3 0x00000003 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3_COMPOSITOR_CRC 31:0 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4 0x00000004 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4_PRIMARY_OUTPUT_CRC 31:0 +#define NV917D_NOTIFIER_CRC_1_CRC_ENTRY1_8 0x00000008 + +#endif // __cl917dcrcnotif_h__ diff --git a/src/common/sdk/nvidia/inc/class/cl917e.h b/src/common/sdk/nvidia/inc/class/cl917e.h new file mode 100644 index 000000000..6586bda6e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl917e.h @@ -0,0 +1,265 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl917e_h_ +#define _cl917e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV917E_OVERLAY_CHANNEL_DMA (0x0000917E) + +#define NV_DISP_NOTIFICATION_2 0x00000000 +#define NV_DISP_NOTIFICATION_2_SIZEOF 0x00000010 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0 0x00000000 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0_NANOSECONDS0 31:0 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1 0x00000001 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1_NANOSECONDS1 31:0 +#define NV_DISP_NOTIFICATION_2_INFO32_2 0x00000002 +#define NV_DISP_NOTIFICATION_2_INFO32_2_R0 31:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3 0x00000003 +#define NV_DISP_NOTIFICATION_2_INFO16_3_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3_FIELD 8:8 +#define NV_DISP_NOTIFICATION_2_INFO16_3_R1 15:9 +#define NV_DISP_NOTIFICATION_2__3_STATUS 31:16 +#define NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED 0x00000000 + + +#define NV_DISP_NOTIFICATION_INFO16 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_INFO16__0 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_INFO16__0_FIELD 8:8 +#define NV_DISP_NOTIFICATION_INFO16__0_R1 15:9 + + +#define NV_DISP_NOTIFICATION_STATUS 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_STATUS__0 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS 15:0 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_FINISHED 0x00000000 + + +// dma opcode instructions +#define NV917E_DMA 0x00000000 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_METHOD 0x00000000 +#define NV917E_DMA_OPCODE_JUMP 0x00000001 +#define NV917E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_METHOD 0x00000000 +#define NV917E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV917E_DMA_METHOD_COUNT 27:18 +#define NV917E_DMA_METHOD_OFFSET 11:2 +#define NV917E_DMA_DATA 31:0 +#define NV917E_DMA_DATA_NOP 0x00000000 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_JUMP 0x00000001 +#define NV917E_DMA_JUMP_OFFSET 11:2 +#define NV917E_DMA_OPCODE 31:29 +#define NV917E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV917E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV917E_PUT (0x00000000) +#define NV917E_PUT_PTR 11:2 +#define NV917E_GET (0x00000004) +#define NV917E_GET_PTR 11:2 +#define NV917E_UPDATE (0x00000080) +#define NV917E_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV917E_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV917E_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV917E_UPDATE_SPECIAL_HANDLING 25:24 +#define NV917E_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV917E_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV917E_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV917E_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV917E_SET_PRESENT_CONTROL (0x00000084) +#define NV917E_SET_PRESENT_CONTROL_BEGIN_MODE 1:0 +#define NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP (0x00000000) +#define NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP (0x00000003) +#define NV917E_SET_PRESENT_CONTROL_STEREO_FLIP_MODE 3:3 +#define NV917E_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_PAIR_FLIP (0x00000000) +#define NV917E_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_AT_ANY_FRAME (0x00000001) +#define NV917E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4 +#define NV917E_SET_PRESENT_CONTROL_MODE 11:10 +#define NV917E_SET_PRESENT_CONTROL_MODE_MONO (0x00000000) +#define NV917E_SET_PRESENT_CONTROL_MODE_STEREO (0x00000001) +#define NV917E_SET_PRESENT_CONTROL_MODE_SPEC_FLIP (0x00000002) +#define NV917E_SET_SEMAPHORE_ACQUIRE (0x00000088) +#define NV917E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NV917E_SET_SEMAPHORE_RELEASE (0x0000008C) +#define NV917E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NV917E_SET_SEMAPHORE_CONTROL (0x00000090) +#define NV917E_SET_SEMAPHORE_CONTROL_OFFSET 11:2 +#define NV917E_SET_SEMAPHORE_CONTROL_FORMAT 28:28 +#define NV917E_SET_SEMAPHORE_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917E_SET_SEMAPHORE_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917E_SET_CONTEXT_DMA_SEMAPHORE (0x00000094) +#define NV917E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NV917E_SET_NOTIFIER_CONTROL (0x000000A0) +#define NV917E_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV917E_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV917E_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV917E_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV917E_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV917E_SET_CONTEXT_DMA_NOTIFIER (0x000000A4) +#define NV917E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV917E_SET_CONTEXT_DMA_LUT (0x000000B0) +#define NV917E_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV917E_SET_OVERLAY_LUT_LO (0x000000B4) +#define NV917E_SET_OVERLAY_LUT_LO_ENABLE 31:31 +#define NV917E_SET_OVERLAY_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV917E_SET_OVERLAY_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV917E_SET_OVERLAY_LUT_LO_MODE 27:24 +#define NV917E_SET_OVERLAY_LUT_LO_MODE_LORES (0x00000000) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_HIRES (0x00000001) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV917E_SET_OVERLAY_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV917E_SET_OVERLAY_LUT_HI (0x000000B8) +#define NV917E_SET_OVERLAY_LUT_HI_ORIGIN 31:0 +#define NV917E_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004) +#define NV917E_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV917E_SET_POINT_IN (0x000000E0) +#define NV917E_SET_POINT_IN_X 14:0 +#define NV917E_SET_POINT_IN_Y 30:16 +#define NV917E_SET_SIZE_IN (0x000000E4) +#define NV917E_SET_SIZE_IN_WIDTH 14:0 +#define NV917E_SET_SIZE_IN_HEIGHT 30:16 +#define NV917E_SET_SIZE_OUT (0x000000E8) +#define NV917E_SET_SIZE_OUT_WIDTH 14:0 +#define NV917E_SET_COMPOSITION_CONTROL (0x00000100) +#define NV917E_SET_COMPOSITION_CONTROL_MODE 3:0 +#define NV917E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING (0x00000000) +#define NV917E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING (0x00000001) +#define NV917E_SET_COMPOSITION_CONTROL_MODE_OPAQUE (0x00000002) +#define NV917E_SET_KEY_COLOR_LO (0x00000104) +#define NV917E_SET_KEY_COLOR_LO_COLOR 31:0 +#define NV917E_SET_KEY_COLOR_HI (0x00000108) +#define NV917E_SET_KEY_COLOR_HI_COLOR 31:0 +#define NV917E_SET_KEY_MASK_LO (0x0000010C) +#define NV917E_SET_KEY_MASK_LO_MASK 31:0 +#define NV917E_SET_KEY_MASK_HI (0x00000110) +#define NV917E_SET_KEY_MASK_HI_MASK 31:0 +#define NV917E_SET_PROCESSING (0x00000118) +#define NV917E_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV917E_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV917E_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV917E_SET_CONVERSION_RED (0x0000011C) +#define NV917E_SET_CONVERSION_RED_GAIN 15:0 +#define NV917E_SET_CONVERSION_RED_OFS 31:16 +#define NV917E_SET_CONVERSION_GRN (0x00000120) +#define NV917E_SET_CONVERSION_GRN_GAIN 15:0 +#define NV917E_SET_CONVERSION_GRN_OFS 31:16 +#define NV917E_SET_CONVERSION_BLU (0x00000124) +#define NV917E_SET_CONVERSION_BLU_GAIN 15:0 +#define NV917E_SET_CONVERSION_BLU_OFS 31:16 +#define NV917E_SET_TIMESTAMP_ORIGIN_LO (0x00000130) +#define NV917E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NV917E_SET_TIMESTAMP_ORIGIN_HI (0x00000134) +#define NV917E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NV917E_SET_UPDATE_TIMESTAMP_LO (0x00000138) +#define NV917E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NV917E_SET_UPDATE_TIMESTAMP_HI (0x0000013C) +#define NV917E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NV917E_SET_CSC_RED2RED (0x00000140) +#define NV917E_SET_CSC_RED2RED_COEFF 18:0 +#define NV917E_SET_CSC_GRN2RED (0x00000144) +#define NV917E_SET_CSC_GRN2RED_COEFF 18:0 +#define NV917E_SET_CSC_BLU2RED (0x00000148) +#define NV917E_SET_CSC_BLU2RED_COEFF 18:0 +#define NV917E_SET_CSC_CONSTANT2RED (0x0000014C) +#define NV917E_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV917E_SET_CSC_RED2GRN (0x00000150) +#define NV917E_SET_CSC_RED2GRN_COEFF 18:0 +#define NV917E_SET_CSC_GRN2GRN (0x00000154) +#define NV917E_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV917E_SET_CSC_BLU2GRN (0x00000158) +#define NV917E_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV917E_SET_CSC_CONSTANT2GRN (0x0000015C) +#define NV917E_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV917E_SET_CSC_RED2BLU (0x00000160) +#define NV917E_SET_CSC_RED2BLU_COEFF 18:0 +#define NV917E_SET_CSC_GRN2BLU (0x00000164) +#define NV917E_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV917E_SET_CSC_BLU2BLU (0x00000168) +#define NV917E_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV917E_SET_CSC_CONSTANT2BLU (0x0000016C) +#define NV917E_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV917E_SET_SPARE (0x000003BC) +#define NV917E_SET_SPARE_UNUSED 31:0 +#define NV917E_SET_SPARE_NOOP(b) (0x000003C0 + (b)*0x00000004) +#define NV917E_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV917E_SURFACE_SET_OFFSET(b) (0x00000400 + (b)*0x00000004) +#define NV917E_SURFACE_SET_OFFSET_ORIGIN 31:0 +#define NV917E_SURFACE_SET_SIZE (0x00000408) +#define NV917E_SURFACE_SET_SIZE_WIDTH 15:0 +#define NV917E_SURFACE_SET_SIZE_HEIGHT 31:16 +#define NV917E_SURFACE_SET_STORAGE (0x0000040C) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV917E_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV917E_SURFACE_SET_STORAGE_PITCH 20:8 +#define NV917E_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV917E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV917E_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV917E_SURFACE_SET_PARAMS (0x00000410) +#define NV917E_SURFACE_SET_PARAMS_FORMAT 15:8 +#define NV917E_SURFACE_SET_PARAMS_FORMAT_VE8YO8UE8YE8 (0x00000028) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_YO8VE8YE8UE8 (0x00000029) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV917E_SURFACE_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE 1:0 +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE_RGB (0x00000000) +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_601 (0x00000001) +#define NV917E_SURFACE_SET_PARAMS_COLOR_SPACE_YUV_709 (0x00000002) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl917e_h diff --git a/src/common/sdk/nvidia/inc/class/cl9270.h b/src/common/sdk/nvidia/inc/class/cl9270.h new file mode 100644 index 000000000..e2ffbf712 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9270.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 1993-2010, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9270_h_ +#define _cl9270_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV9270_DISPLAY (0x00009270) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9270_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9270_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl9271.h b/src/common/sdk/nvidia/inc/class/cl9271.h new file mode 100644 index 000000000..397cdc218 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9271.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9271_h_ +#define _cl9271_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9271_DISP_SF_USER 0x9271 + +typedef volatile struct _cl9271_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9271DispSfUser, Nv9271DispSfUserMap; + +#define NV9271_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9271_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9271_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9271_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9271_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9271_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9271_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9271_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9271_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9271_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9271_h_ diff --git a/src/common/sdk/nvidia/inc/class/cl927c.h b/src/common/sdk/nvidia/inc/class/cl927c.h new file mode 100644 index 000000000..1b78305c6 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl927c.h @@ -0,0 +1,299 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl927c_h_ +#define _cl927c_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV927C_BASE_CHANNEL_DMA (0x0000927C) + +#define NV_DISP_BASE_NOTIFIER_1 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1_SIZEOF 0x00000004 +#define NV_DISP_BASE_NOTIFIER_1__0 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_PRESENTATION_COUNT 15:0 +#define NV_DISP_BASE_NOTIFIER_1__0_TIMESTAMP 29:16 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS 31:30 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED 0x00000002 + + +#define NV_DISP_NOTIFICATION_2 0x00000000 +#define NV_DISP_NOTIFICATION_2_SIZEOF 0x00000010 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0 0x00000000 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_0_NANOSECONDS0 31:0 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1 0x00000001 +#define NV_DISP_NOTIFICATION_2_TIME_STAMP_1_NANOSECONDS1 31:0 +#define NV_DISP_NOTIFICATION_2_INFO32_2 0x00000002 +#define NV_DISP_NOTIFICATION_2_INFO32_2_R0 31:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3 0x00000003 +#define NV_DISP_NOTIFICATION_2_INFO16_3_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_2_INFO16_3_FIELD 8:8 +#define NV_DISP_NOTIFICATION_2_INFO16_3_R1 15:9 +#define NV_DISP_NOTIFICATION_2__3_STATUS 31:16 +#define NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED 0x00000000 + + +#define NV_DISP_NOTIFICATION_INFO16 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_INFO16__0 0x00000000 +#define NV_DISP_NOTIFICATION_INFO16__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFICATION_INFO16__0_FIELD 8:8 +#define NV_DISP_NOTIFICATION_INFO16__0_R1 15:9 + + +#define NV_DISP_NOTIFICATION_STATUS 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS_SIZEOF 0x00000002 +#define NV_DISP_NOTIFICATION_STATUS__0 0x00000000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS 15:0 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_NOT_BEGUN 0x00008000 +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_BEGUN 0x0000FFFF +#define NV_DISP_NOTIFICATION_STATUS__0_STATUS_FINISHED 0x00000000 + + +// dma opcode instructions +#define NV927C_DMA 0x00000000 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_METHOD 0x00000000 +#define NV927C_DMA_OPCODE_JUMP 0x00000001 +#define NV927C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_METHOD 0x00000000 +#define NV927C_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927C_DMA_METHOD_COUNT 27:18 +#define NV927C_DMA_METHOD_OFFSET 11:2 +#define NV927C_DMA_DATA 31:0 +#define NV927C_DMA_DATA_NOP 0x00000000 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_JUMP 0x00000001 +#define NV927C_DMA_JUMP_OFFSET 11:2 +#define NV927C_DMA_OPCODE 31:29 +#define NV927C_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927C_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV927C_PUT (0x00000000) +#define NV927C_PUT_PTR 11:2 +#define NV927C_GET (0x00000004) +#define NV927C_GET_PTR 11:2 +#define NV927C_GET_SCANLINE (0x00000010) +#define NV927C_GET_SCANLINE_LINE 15:0 +#define NV927C_UPDATE (0x00000080) +#define NV927C_UPDATE_INTERLOCK_WITH_CORE 0:0 +#define NV927C_UPDATE_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NV927C_UPDATE_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NV927C_UPDATE_SPECIAL_HANDLING 25:24 +#define NV927C_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV927C_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV927C_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV927C_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV927C_SET_PRESENT_CONTROL (0x00000084) +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE 9:8 +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE (0x00000002) +#define NV927C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE 3:3 +#define NV927C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_PAIR_FLIP (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE_AT_ANY_FRAME (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_TIMESTAMP_MODE 2:2 +#define NV927C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4 +#define NV927C_SET_PRESENT_CONTROL_BEGIN_LINE 30:16 +#define NV927C_SET_PRESENT_CONTROL_ON_LINE_MARGIN 15:10 +#define NV927C_SET_PRESENT_CONTROL_MODE 1:0 +#define NV927C_SET_PRESENT_CONTROL_MODE_MONO (0x00000000) +#define NV927C_SET_PRESENT_CONTROL_MODE_STEREO (0x00000001) +#define NV927C_SET_PRESENT_CONTROL_MODE_SPEC_FLIP (0x00000002) +#define NV927C_SET_SEMAPHORE_CONTROL (0x00000088) +#define NV927C_SET_SEMAPHORE_CONTROL_OFFSET 11:2 +#define NV927C_SET_SEMAPHORE_CONTROL_DELAY 26:26 +#define NV927C_SET_SEMAPHORE_CONTROL_DELAY_DISABLE (0x00000000) +#define NV927C_SET_SEMAPHORE_CONTROL_DELAY_ENABLE (0x00000001) +#define NV927C_SET_SEMAPHORE_CONTROL_FORMAT 28:28 +#define NV927C_SET_SEMAPHORE_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV927C_SET_SEMAPHORE_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV927C_SET_SEMAPHORE_ACQUIRE (0x0000008C) +#define NV927C_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NV927C_SET_SEMAPHORE_RELEASE (0x00000090) +#define NV927C_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NV927C_SET_CONTEXT_DMA_SEMAPHORE (0x00000094) +#define NV927C_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NV927C_SET_NOTIFIER_CONTROL (0x000000A0) +#define NV927C_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV927C_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV927C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV927C_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV927C_SET_NOTIFIER_CONTROL_DELAY 26:26 +#define NV927C_SET_NOTIFIER_CONTROL_DELAY_DISABLE (0x00000000) +#define NV927C_SET_NOTIFIER_CONTROL_DELAY_ENABLE (0x00000001) +#define NV927C_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV927C_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV927C_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV927C_SET_CONTEXT_DMA_NOTIFIER (0x000000A4) +#define NV927C_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV927C_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004) +#define NV927C_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV927C_SET_BASE_LUT_LO (0x000000E0) +#define NV927C_SET_BASE_LUT_LO_ENABLE 31:30 +#define NV927C_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV927C_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV927C_SET_BASE_LUT_LO_MODE 27:24 +#define NV927C_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV927C_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV927C_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927C_SET_BASE_LUT_HI (0x000000E4) +#define NV927C_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV927C_SET_OUTPUT_LUT_LO (0x000000E8) +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE 31:30 +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) +#define NV927C_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000002) +#define NV927C_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV927C_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927C_SET_OUTPUT_LUT_HI (0x000000EC) +#define NV927C_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV927C_SET_CONTEXT_DMA_LUT (0x000000FC) +#define NV927C_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV927C_SET_PROCESSING (0x00000110) +#define NV927C_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV927C_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV927C_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV927C_SET_CONVERSION_RED (0x00000114) +#define NV927C_SET_CONVERSION_RED_GAIN 15:0 +#define NV927C_SET_CONVERSION_RED_OFS 31:16 +#define NV927C_SET_CONVERSION_GRN (0x00000118) +#define NV927C_SET_CONVERSION_GRN_GAIN 15:0 +#define NV927C_SET_CONVERSION_GRN_OFS 31:16 +#define NV927C_SET_CONVERSION_BLU (0x0000011C) +#define NV927C_SET_CONVERSION_BLU_GAIN 15:0 +#define NV927C_SET_CONVERSION_BLU_OFS 31:16 +#define NV927C_SET_TIMESTAMP_ORIGIN_LO (0x00000130) +#define NV927C_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NV927C_SET_TIMESTAMP_ORIGIN_HI (0x00000134) +#define NV927C_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NV927C_SET_UPDATE_TIMESTAMP_LO (0x00000138) +#define NV927C_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NV927C_SET_UPDATE_TIMESTAMP_HI (0x0000013C) +#define NV927C_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NV927C_SET_CSC_RED2RED (0x00000140) +#define NV927C_SET_CSC_RED2RED_OWNER 31:31 +#define NV927C_SET_CSC_RED2RED_OWNER_CORE (0x00000000) +#define NV927C_SET_CSC_RED2RED_OWNER_BASE (0x00000001) +#define NV927C_SET_CSC_RED2RED_COEFF 18:0 +#define NV927C_SET_CSC_GRN2RED (0x00000144) +#define NV927C_SET_CSC_GRN2RED_COEFF 18:0 +#define NV927C_SET_CSC_BLU2RED (0x00000148) +#define NV927C_SET_CSC_BLU2RED_COEFF 18:0 +#define NV927C_SET_CSC_CONSTANT2RED (0x0000014C) +#define NV927C_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV927C_SET_CSC_RED2GRN (0x00000150) +#define NV927C_SET_CSC_RED2GRN_COEFF 18:0 +#define NV927C_SET_CSC_GRN2GRN (0x00000154) +#define NV927C_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV927C_SET_CSC_BLU2GRN (0x00000158) +#define NV927C_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV927C_SET_CSC_CONSTANT2GRN (0x0000015C) +#define NV927C_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV927C_SET_CSC_RED2BLU (0x00000160) +#define NV927C_SET_CSC_RED2BLU_COEFF 18:0 +#define NV927C_SET_CSC_GRN2BLU (0x00000164) +#define NV927C_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV927C_SET_CSC_BLU2BLU (0x00000168) +#define NV927C_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV927C_SET_CSC_CONSTANT2BLU (0x0000016C) +#define NV927C_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV927C_SET_SPARE (0x000003BC) +#define NV927C_SET_SPARE_UNUSED 31:0 +#define NV927C_SET_SPARE_NOOP(b) (0x000003C0 + (b)*0x00000004) +#define NV927C_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV927C_SURFACE_SET_OFFSET(a,b) (0x00000400 + (a)*0x00000020 + (b)*0x00000004) +#define NV927C_SURFACE_SET_OFFSET_ORIGIN 31:0 +#define NV927C_SURFACE_SET_SIZE(a) (0x00000408 + (a)*0x00000020) +#define NV927C_SURFACE_SET_SIZE_WIDTH 15:0 +#define NV927C_SURFACE_SET_SIZE_HEIGHT 31:16 +#define NV927C_SURFACE_SET_STORAGE(a) (0x0000040C + (a)*0x00000020) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV927C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV927C_SURFACE_SET_STORAGE_PITCH 20:8 +#define NV927C_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV927C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV927C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV927C_SURFACE_SET_PARAMS(a) (0x00000410 + (a)*0x00000020) +#define NV927C_SURFACE_SET_PARAMS_FORMAT 15:8 +#define NV927C_SURFACE_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV927C_SURFACE_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV927C_SURFACE_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV927C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV927C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV927C_SURFACE_SET_PARAMS_GAMMA 2:2 +#define NV927C_SURFACE_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV927C_SURFACE_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV927C_SURFACE_SET_PARAMS_LAYOUT 5:4 +#define NV927C_SURFACE_SET_PARAMS_LAYOUT_FRM (0x00000000) +#define NV927C_SURFACE_SET_PARAMS_LAYOUT_FLD1 (0x00000001) +#define NV927C_SURFACE_SET_PARAMS_LAYOUT_FLD2 (0x00000002) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl927c_h diff --git a/src/common/sdk/nvidia/inc/class/cl927d.h b/src/common/sdk/nvidia/inc/class/cl927d.h new file mode 100644 index 000000000..df45fece4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl927d.h @@ -0,0 +1,1556 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl927d_h_ +#define _cl927d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV927D_CORE_CHANNEL_DMA (0x0000927D) + +#define NV927D_CORE_NOTIFIER_3 0x00000000 +#define NV927D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV927D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV927D_CORE_NOTIFIER_3__1 0x00000001 +#define NV927D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV927D_CORE_NOTIFIER_3__2 0x00000002 +#define NV927D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV927D_CORE_NOTIFIER_3__3 0x00000003 +#define NV927D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 12:12 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 15:14 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:27 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_LVDS_CLK_MAX 23:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R8 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV927D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV927D_DMA 0x00000000 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_METHOD 0x00000000 +#define NV927D_DMA_OPCODE_JUMP 0x00000001 +#define NV927D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_METHOD 0x00000000 +#define NV927D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV927D_DMA_METHOD_COUNT 27:18 +#define NV927D_DMA_METHOD_OFFSET 11:2 +#define NV927D_DMA_DATA 31:0 +#define NV927D_DMA_DATA_NOP 0x00000000 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_JUMP 0x00000001 +#define NV927D_DMA_JUMP_OFFSET 11:2 +#define NV927D_DMA_OPCODE 31:29 +#define NV927D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV927D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV927D_PUT (0x00000000) +#define NV927D_PUT_PTR 11:2 +#define NV927D_GET (0x00000004) +#define NV927D_GET_PTR 11:2 +#define NV927D_UPDATE (0x00000080) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV927D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV927D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV927D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV927D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV927D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV927D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV927D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV927D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV927D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV927D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV927D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV927D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV927D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV927D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV927D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV927D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV927D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV927D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV927D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV927D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV927D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV927D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV927D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV927D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV927D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV927D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV927D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV927D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV927D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV927D_GET_CAPABILITIES (0x0000008C) +#define NV927D_GET_CAPABILITIES_DUMMY 31:0 +#define NV927D_SET_SPARE (0x0000016C) +#define NV927D_SET_SPARE_UNUSED 31:0 +#define NV927D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV927D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV927D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV927D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV927D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV927D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV927D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV927D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV927D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV927D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV927D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV927D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV927D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV927D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV927D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV927D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV927D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV927D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV927D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV927D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV927D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV927D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV927D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV927D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV927D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV927D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV927D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV927D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV927D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV927D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV927D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV927D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV927D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV927D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV927D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV927D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV927D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV927D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV927D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 20:13 +#define NV927D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV927D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV927D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV927D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV927D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV927D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV927D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV927D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV927D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV927D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV927D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV927D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV927D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV927D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV927D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV927D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV927D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV927D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV927D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV927D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV927D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV927D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV927D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV927D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV927D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV927D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV927D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV927D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV927D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV927D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV927D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV927D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV927D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV927D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV927D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV927D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV927D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV927D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV927D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV927D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV927D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV927D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV927D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV927D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV927D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV927D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV927D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV927D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV927D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV927D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV927D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV927D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV927D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV927D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV927D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV927D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV927D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV927D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV927D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV927D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV927D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV927D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV927D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV927D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV927D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV927D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV927D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV927D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV927D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV927D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV927D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV927D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV927D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV927D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV927D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV927D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV927D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV927D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV927D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV927D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV927D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV927D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV927D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV927D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV927D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV927D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV927D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV927D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV927D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV927D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV927D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV927D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV927D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV927D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV927D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV927D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV927D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV927D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV927D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV927D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV927D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV927D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV927D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV927D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV927D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV927D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV927D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV927D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV927D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV927D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV927D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV927D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV927D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV927D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV927D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV927D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV927D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV927D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV927D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV927D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV927D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV927D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV927D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV927D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV927D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV927D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV927D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV927D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV927D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl927d_h diff --git a/src/common/sdk/nvidia/inc/class/cl9470.h b/src/common/sdk/nvidia/inc/class/cl9470.h new file mode 100644 index 000000000..bedd82dd6 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9470.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 1993-2012, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9470_h_ +#define _cl9470_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV9470_DISPLAY (0x00009470) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9470_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9470_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl9471.h b/src/common/sdk/nvidia/inc/class/cl9471.h new file mode 100644 index 000000000..c5f557fce --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9471.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9471_h_ +#define _cl9471_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9471_DISP_SF_USER 0x9471 + +typedef volatile struct _cl9471_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9471DispSfUser, Nv9471DispSfUserMap; + +#define NV9471_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9471_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9471_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9471_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9471_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9471_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9471_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9471_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9471_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9471_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9471_h_ diff --git a/src/common/sdk/nvidia/inc/class/cl947d.h b/src/common/sdk/nvidia/inc/class/cl947d.h new file mode 100644 index 000000000..684240605 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl947d.h @@ -0,0 +1,1606 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl947d_h_ +#define _cl947d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV947D_CORE_CHANNEL_DMA (0x0000947D) + +#define NV947D_CORE_NOTIFIER_3 0x00000000 +#define NV947D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV947D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV947D_CORE_NOTIFIER_3__1 0x00000001 +#define NV947D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV947D_CORE_NOTIFIER_3__2 0x00000002 +#define NV947D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV947D_CORE_NOTIFIER_3__3 0x00000003 +#define NV947D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 12:12 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 15:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_LVDS_CLK_MAX 23:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R8 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_REORDER_BANK_WIDTH_SIZE_MAX 13:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV947D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV947D_DMA 0x00000000 +#define NV947D_DMA_OPCODE 31:29 +#define NV947D_DMA_OPCODE_METHOD 0x00000000 +#define NV947D_DMA_OPCODE_JUMP 0x00000001 +#define NV947D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV947D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV947D_DMA_METHOD_COUNT 27:18 +#define NV947D_DMA_METHOD_OFFSET 11:2 +#define NV947D_DMA_DATA 31:0 +#define NV947D_DMA_DATA_NOP 0x00000000 +#define NV947D_DMA_JUMP_OFFSET 11:2 +#define NV947D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV947D_PUT (0x00000000) +#define NV947D_PUT_PTR 11:2 +#define NV947D_GET (0x00000004) +#define NV947D_GET_PTR 11:2 +#define NV947D_UPDATE (0x00000080) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV947D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV947D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV947D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV947D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV947D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV947D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV947D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV947D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV947D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV947D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV947D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV947D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV947D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV947D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV947D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV947D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV947D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV947D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV947D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV947D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV947D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV947D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV947D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV947D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV947D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV947D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV947D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV947D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV947D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV947D_GET_CAPABILITIES (0x0000008C) +#define NV947D_GET_CAPABILITIES_DUMMY 31:0 +#define NV947D_SET_SPARE (0x0000016C) +#define NV947D_SET_SPARE_UNUSED 31:0 +#define NV947D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV947D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV947D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV947D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV947D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV947D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV947D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV947D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV947D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV947D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV947D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV947D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV947D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV947D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV947D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV947D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV947D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV947D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV947D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV947D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV947D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV947D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV947D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV947D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV947D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV947D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV947D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV947D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV947D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV947D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV947D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV947D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV947D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV947D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV947D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV947D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV947D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV947D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV947D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 20:13 +#define NV947D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV947D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV947D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV947D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV947D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV947D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV947D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV947D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV947D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV947D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV947D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV947D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV947D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV947D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV947D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV947D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV947D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV947D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV947D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV947D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV947D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV947D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV947D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV947D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV947D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV947D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV947D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV947D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV947D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV947D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV947D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV947D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV947D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV947D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV947D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV947D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV947D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV947D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV947D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV947D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV947D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV947D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV947D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV947D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV947D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV947D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV947D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV947D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV947D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV947D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV947D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV947D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV947D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV947D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV947D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV947D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV947D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV947D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV947D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV947D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV947D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV947D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV947D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV947D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV947D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV947D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV947D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV947D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV947D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV947D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV947D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV947D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV947D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV947D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV947D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV947D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV947D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV947D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV947D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV947D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV947D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV947D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV947D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV947D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV947D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV947D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV947D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV947D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV947D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV947D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV947D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV947D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV947D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV947D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV947D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV947D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV947D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV947D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV947D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV947D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV947D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV947D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV947D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV947D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV947D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV947D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV947D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV947D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV947D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV947D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV947D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV947D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV947D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV947D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV947D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV947D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV947D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV947D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV947D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV947D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV947D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV947D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV947D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV947D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV947D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV947D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV947D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV947D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV947D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV947D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV947D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV947D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV947D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV947D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV947D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV947D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV947D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV947D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV947D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV947D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV947D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl947d_h diff --git a/src/common/sdk/nvidia/inc/class/cl9570.h b/src/common/sdk/nvidia/inc/class/cl9570.h new file mode 100644 index 000000000..ab5e6ccd0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9570.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 1993-2013, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9570_h_ +#define _cl9570_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV9570_DISPLAY (0x00009570) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9570_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9570_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl9571.h b/src/common/sdk/nvidia/inc/class/cl9571.h new file mode 100644 index 000000000..be40b1fdf --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9571.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2011, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9571_h_ +#define _cl9571_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV9571_DISP_SF_USER 0x9571 + +typedef volatile struct _cl9571_tag0 { + NvU32 dispSfUserOffset[0x400]; /* NV_PDISP_SF_USER 0x00690FFF:0x00690000 */ +} _Nv9571DispSfUser, Nv9571DispSfUserMap; + +#define NV9571_SF_HDMI_INFO_IDX_AVI_INFOFRAME 0x00000000 /* */ +#define NV9571_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NV9571_SF_HDMI_INFO_IDX_GCP 0x00000003 /* */ +#define NV9571_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NV9571_SF_HDMI_INFO_CTRL(i,j) (0x00690000-0x00690000+(i)*1024+(j)*64) /* RWX4A */ +#define NV9571_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE 0:0 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_NO 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_YES 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_DIS 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_ENABLE_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_OTHER 4:4 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_OTHER_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_OTHER_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_SINGLE 8:8 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_SINGLE_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_SINGLE_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW 9:9 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_ENABLE 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_DISABLE 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_DIS 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_CHKSUM_HW_INIT 0x00000001 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_HBLANK 12:12 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_HBLANK_DIS 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_CTRL_HBLANK_EN 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT 16:16 /* RWIVF */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT_SW_CONTROLLED 0x00000000 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT_HW_CONTROLLED 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_INFO_CTRL_VIDEO_FMT_INIT 0x00000001 /* RWI-V */ +#define NV9571_SF_HDMI_INFO_STATUS(i,j) (0x00690004-0x00690000+(i)*1024+(j)*64) /* R--4A */ +#define NV9571_SF_HDMI_INFO_STATUS__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_INFO_STATUS__SIZE_2 5 /* */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT 0:0 /* R--VF */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT_DONE 0x00000001 /* R---V */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT_WAITING 0x00000000 /* R---V */ +#define NV9571_SF_HDMI_INFO_STATUS_SENT_INIT 0x00000000 /* R-I-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL(i) (0x00690000-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_CTRL__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER(i) (0x00690008-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW(i) (0x0069000C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH(i) (0x00690010-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW(i) (0x00690014-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH(i) (0x00690018-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_HEADER(i) (0x00690048-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_HEADER__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW(i) (0x0069004C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH(i) (0x00690050-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW(i) (0x00690054-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH(i) (0x00690058-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW(i) (0x0069005C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH(i) (0x00690060-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW(i) (0x00690064-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH(i) (0x00690068-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GENERIC_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GCP_SUBPACK(i) (0x006900CC-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_GCP_SUBPACK__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0_INIT 0x00000001 /* RWI-V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0_SET_AVMUTE 0x00000001 /* RW--V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB0_CLR_AVMUTE 0x00000010 /* RW--V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_GCP_SUBPACK_SB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_HEADER(i) (0x00690108-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_HEADER__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_HEADER_HB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_HEADER_HB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_HEADER_HB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_HEADER_HB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_HEADER_HB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_HEADER_HB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW(i) (0x0069010C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB0 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB0_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB1 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB1_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB2 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB2_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB3 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_LOW_PB3_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH(i) (0x00690110-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB4_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB5 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB5_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB6 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK0_HIGH_PB6_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW(i) (0x00690114-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB7 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB7_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB8 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB8_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB9 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB9_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB10 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_LOW_PB10_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH(i) (0x00690118-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB11 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB11_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB12 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB12_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB13 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK1_HIGH_PB13_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW(i) (0x0069011C-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB14 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB14_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB15 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB15_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB16 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB16_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB17 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_LOW_PB17_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH(i) (0x00690120-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB18 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB18_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB19 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB19_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB20 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK2_HIGH_PB20_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW(i) (0x00690124-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB21 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB21_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB22 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB22_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB23 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB23_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB24 31:24 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_LOW_PB24_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH(i) (0x00690128-0x00690000+(i)*1024) /* RWX4A */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH__SIZE_1 4 /* */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB25 7:0 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB25_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB26 15:8 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB26_INIT 0x00000000 /* RWI-V */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB27 23:16 /* RWIVF */ +#define NV9571_SF_HDMI_VSI_SUBPACK3_HIGH_PB27_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _cl9571_h_ diff --git a/src/common/sdk/nvidia/inc/class/cl957d.h b/src/common/sdk/nvidia/inc/class/cl957d.h new file mode 100644 index 000000000..ea10694fc --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl957d.h @@ -0,0 +1,1602 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl957d_h_ +#define _cl957d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV957D_CORE_CHANNEL_DMA (0x0000957D) + +#define NV957D_CORE_NOTIFIER_3 0x00000000 +#define NV957D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV957D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV957D_CORE_NOTIFIER_3__1 0x00000001 +#define NV957D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV957D_CORE_NOTIFIER_3__2 0x00000002 +#define NV957D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV957D_CORE_NOTIFIER_3__3 0x00000003 +#define NV957D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 13:12 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 16:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_CLK_MAX 23:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_LVDS_CLK_MAX 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44 0x0000002C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_44_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45 0x0000002D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR0_45_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46 0x0000002E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_46_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47 0x0000002F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR1_47_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48 0x00000030 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_48_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49 0x00000031 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR2_49_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50 0x00000032 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_50_R0 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51 0x00000033 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SIR3_51_R1 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV957D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV957D_DMA 0x00000000 +#define NV957D_DMA_OPCODE 31:29 +#define NV957D_DMA_OPCODE_METHOD 0x00000000 +#define NV957D_DMA_OPCODE_JUMP 0x00000001 +#define NV957D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV957D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV957D_DMA_METHOD_COUNT 27:18 +#define NV957D_DMA_METHOD_OFFSET 11:2 +#define NV957D_DMA_DATA 31:0 +#define NV957D_DMA_DATA_NOP 0x00000000 +#define NV957D_DMA_JUMP_OFFSET 11:2 +#define NV957D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV957D_PUT (0x00000000) +#define NV957D_PUT_PTR 11:2 +#define NV957D_GET (0x00000004) +#define NV957D_GET_PTR 11:2 +#define NV957D_UPDATE (0x00000080) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV957D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV957D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV957D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV957D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV957D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV957D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV957D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV957D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV957D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV957D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV957D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV957D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV957D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV957D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV957D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV957D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV957D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV957D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV957D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV957D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV957D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV957D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV957D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV957D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV957D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV957D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV957D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV957D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV957D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV957D_GET_CAPABILITIES (0x0000008C) +#define NV957D_GET_CAPABILITIES_DUMMY 31:0 +#define NV957D_SET_SPARE (0x0000016C) +#define NV957D_SET_SPARE_UNUSED 31:0 +#define NV957D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV957D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV957D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV957D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV957D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV957D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV957D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV957D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV957D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV957D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV957D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV957D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV957D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV957D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV957D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV957D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV957D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV957D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV957D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV957D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV957D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV957D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV957D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV957D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV957D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV957D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV957D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV957D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV957D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV957D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV957D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV957D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV957D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV957D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV957D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV957D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV957D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV957D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV957D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 20:13 +#define NV957D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV957D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV957D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV957D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV957D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV957D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV957D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV957D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV957D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV957D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV957D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV957D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV957D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV957D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV957D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV957D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV957D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV957D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV957D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV957D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV957D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV957D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV957D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV957D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV957D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV957D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV957D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV957D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV957D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV957D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV957D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV957D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV957D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV957D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV957D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV957D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV957D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV957D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV957D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV957D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV957D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV957D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV957D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV957D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV957D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV957D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV957D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV957D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV957D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV957D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV957D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV957D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV957D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV957D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV957D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV957D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV957D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV957D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV957D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV957D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV957D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV957D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV957D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV957D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV957D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV957D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV957D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV957D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV957D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV957D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV957D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV957D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV957D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV957D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV957D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV957D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV957D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV957D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV957D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV957D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV957D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV957D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV957D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV957D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV957D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV957D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV957D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV957D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV957D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV957D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV957D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV957D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV957D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV957D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV957D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV957D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV957D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV957D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV957D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV957D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV957D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV957D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV957D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV957D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV957D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV957D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV957D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV957D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV957D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV957D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV957D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV957D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV957D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV957D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV957D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV957D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV957D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV957D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV957D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV957D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV957D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV957D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV957D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV957D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV957D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV957D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV957D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV957D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV957D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV957D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV957D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV957D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV957D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV957D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV957D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV957D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV957D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV957D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV957D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV957D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV957D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl957d_h diff --git a/src/common/sdk/nvidia/inc/class/cl9770.h b/src/common/sdk/nvidia/inc/class/cl9770.h new file mode 100644 index 000000000..28ab00d0b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9770.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9770_h_ +#define _cl9770_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV9770_DISPLAY (0x00009770) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9770_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9770_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl977d.h b/src/common/sdk/nvidia/inc/class/cl977d.h new file mode 100644 index 000000000..adf59a20f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl977d.h @@ -0,0 +1,1587 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl977d_h_ +#define _cl977d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV977D_CORE_CHANNEL_DMA (0x0000977D) + +#define NV977D_CORE_NOTIFIER_3 0x00000000 +#define NV977D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV977D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV977D_CORE_NOTIFIER_3__1 0x00000001 +#define NV977D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV977D_CORE_NOTIFIER_3__2 0x00000002 +#define NV977D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV977D_CORE_NOTIFIER_3__3 0x00000003 +#define NV977D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 13:12 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 16:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_CLK_MAX 23:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_LVDS_CLK_MAX 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:7 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV977D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV977D_DMA 0x00000000 +#define NV977D_DMA_OPCODE 31:29 +#define NV977D_DMA_OPCODE_METHOD 0x00000000 +#define NV977D_DMA_OPCODE_JUMP 0x00000001 +#define NV977D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV977D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV977D_DMA_METHOD_COUNT 27:18 +#define NV977D_DMA_METHOD_OFFSET 11:2 +#define NV977D_DMA_DATA 31:0 +#define NV977D_DMA_DATA_NOP 0x00000000 +#define NV977D_DMA_JUMP_OFFSET 11:2 +#define NV977D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV977D_PUT (0x00000000) +#define NV977D_PUT_PTR 11:2 +#define NV977D_GET (0x00000004) +#define NV977D_GET_PTR 11:2 +#define NV977D_UPDATE (0x00000080) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV977D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV977D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV977D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV977D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV977D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV977D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV977D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV977D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV977D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV977D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV977D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV977D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV977D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV977D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV977D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV977D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV977D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV977D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV977D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV977D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV977D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV977D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV977D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV977D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV977D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV977D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV977D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV977D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV977D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV977D_GET_CAPABILITIES (0x0000008C) +#define NV977D_GET_CAPABILITIES_DUMMY 31:0 +#define NV977D_SET_SPARE (0x0000016C) +#define NV977D_SET_SPARE_UNUSED 31:0 +#define NV977D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV977D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV977D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV977D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV977D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV977D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV977D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV977D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV977D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV977D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV977D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV977D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV977D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV977D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV977D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV977D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV977D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV977D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV977D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV977D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV977D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV977D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV977D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV977D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV977D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV977D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV977D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV977D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV977D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV977D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV977D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV977D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV977D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV977D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV977D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV977D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV977D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV977D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV977D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 24:13 +#define NV977D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV977D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV977D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV977D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV977D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV977D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV977D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV977D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV977D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV977D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV977D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV977D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV977D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV977D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV977D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV977D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV977D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV977D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV977D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV977D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV977D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV977D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV977D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV977D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV977D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV977D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV977D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV977D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV977D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV977D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV977D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV977D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV977D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV977D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV977D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV977D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV977D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV977D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV977D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV977D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV977D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV977D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV977D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV977D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV977D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV977D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV977D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV977D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV977D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV977D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV977D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV977D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV977D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV977D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV977D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV977D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV977D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV977D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV977D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV977D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV977D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV977D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV977D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV977D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV977D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV977D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV977D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV977D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV977D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV977D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV977D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV977D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV977D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV977D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV977D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV977D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV977D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV977D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV977D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV977D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV977D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV977D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NV977D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV977D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV977D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV977D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV977D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV977D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV977D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV977D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV977D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV977D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV977D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV977D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV977D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV977D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV977D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV977D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV977D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV977D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV977D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV977D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV977D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV977D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV977D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV977D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV977D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV977D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV977D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV977D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV977D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV977D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV977D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV977D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV977D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV977D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV977D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV977D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV977D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV977D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV977D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV977D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV977D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV977D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV977D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV977D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV977D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV977D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV977D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV977D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV977D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV977D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV977D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV977D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV977D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV977D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV977D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV977D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV977D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl977d_h diff --git a/src/common/sdk/nvidia/inc/class/cl9870.h b/src/common/sdk/nvidia/inc/class/cl9870.h new file mode 100644 index 000000000..bcfc9900e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl9870.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl9870_h_ +#define _cl9870_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV9870_DISPLAY (0x00009870) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numDacs; // Number of DACs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NV9870_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cl9870_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cl987d.h b/src/common/sdk/nvidia/inc/class/cl987d.h new file mode 100644 index 000000000..ab01f62c3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cl987d.h @@ -0,0 +1,1590 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _cl987d_h_ +#define _cl987d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NV987D_CORE_CHANNEL_DMA (0x0000987D) + +#define NV987D_CORE_NOTIFIER_3 0x00000000 +#define NV987D_CORE_NOTIFIER_3_SIZEOF 0x00000150 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0 0x00000000 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_DONE 0:0 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_DONE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_R0 15:1 +#define NV987D_CORE_NOTIFIER_3_COMPLETION_0_TIMESTAMP 29:16 +#define NV987D_CORE_NOTIFIER_3__1 0x00000001 +#define NV987D_CORE_NOTIFIER_3__1_R1 31:0 +#define NV987D_CORE_NOTIFIER_3__2 0x00000002 +#define NV987D_CORE_NOTIFIER_3__2_R2 31:0 +#define NV987D_CORE_NOTIFIER_3__3 0x00000003 +#define NV987D_CORE_NOTIFIER_3__3_R3 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_VM_USABLE4ISO_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_NVM_USABLE4ISO_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_R0 19:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA 20:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_FOS_FETCH_X4AA_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_R1 29:21 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE 30:30 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN0USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_4_INTERNAL_FLIP_LOCK_PIN1USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5 0x00000005 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE 3:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN0USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN1USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE 11:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN2USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE 15:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN3USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE 19:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN4USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN5USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE 27:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN6USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_5_LOCK_PIN7USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6 0x00000006 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE 3:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN8USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN9USAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE 11:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_AUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE 15:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_BUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE 19:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_CUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_DUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE 27:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_EUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_UNAVAILABLE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_SCAN_LOCK 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_FLIP_LOCK 0x00000002 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_6_LOCK_PIN_FUSAGE_STEREO 0x00000004 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_7 0x00000007 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_7_DISPCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_7_R4 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_8 0x00000008 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_8_R5 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_9 0x00000009 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_9_R6 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_10 0x0000000A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_10_R7 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_11 0x0000000B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_11_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12 0x0000000C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_12_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13 0x0000000D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC0_13_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14 0x0000000E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_14_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15 0x0000000F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC1_15_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16 0x00000010 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_16_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17 0x00000011 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC2_17_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18 0x00000012 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_RGB_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_TV_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_SCART_USABLE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_18_R0 31:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19 0x00000013 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_CRT_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_DAC3_19_R1 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20 0x00000014 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_20_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21 0x00000015 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR0_21_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22 0x00000016 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_22_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23 0x00000017 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR1_23_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24 0x00000018 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_24_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25 0x00000019 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR2_25_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26 0x0000001A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_26_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27 0x0000001B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR3_27_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28 0x0000001C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_28_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29 0x0000001D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR4_29_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30 0x0000001E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_30_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31 0x0000001F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR5_31_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32 0x00000020 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_32_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33 0x00000021 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR6_33_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34 0x00000022 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18 2:2 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS18_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24 3:3 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_LVDS24_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R0 7:4 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A 8:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B 9:9 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_SINGLE_TMDS_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R1 10:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS 11:11 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DUAL_TMDS_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R2 13:12 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R3 16:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R4 19:17 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R5 23:20 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A 24:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_A_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B 25:25 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_B_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE 26:26 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_DP_INTERLACE_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_34_R6 31:28 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35 0x00000023 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_DP_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_R7 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_TMDS_CLK_MAX 23:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_SOR7_35_LVDS_CLK_MAX 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36 0x00000024 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_36_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37 0x00000025 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR0_37_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38 0x00000026 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_38_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39 0x00000027 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR1_39_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40 0x00000028 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_40_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41 0x00000029 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR2_41_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42 0x0000002A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC 0:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC 1:1 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TV_ENC_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED 6:6 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_FALSE 0x00000000 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_EXT_TMDS10BPC_ALLOWED_TRUE 0x00000001 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_42_R0 31:10 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43 0x0000002B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_EXT_ENC_CLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R1 15:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_PIOR3_43_R2 31:24 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52 0x00000034 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_52_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53 0x00000035 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_53_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54 0x00000036 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_54_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55 0x00000037 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_55_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56 0x00000038 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_56_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57 0x00000039 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_57_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58 0x0000003A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_58_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59 0x0000003B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD0_59_R10 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60 0x0000003C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_60_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61 0x0000003D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_61_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62 0x0000003E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_62_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63 0x0000003F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_63_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64 0x00000040 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_64_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65 0x00000041 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_65_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66 0x00000042 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_66_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67 0x00000043 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD1_67_R10 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68 0x00000044 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_68_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69 0x00000045 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_69_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70 0x00000046 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_70_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71 0x00000047 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_71_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72 0x00000048 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_72_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73 0x00000049 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_73_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74 0x0000004A +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_74_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75 0x0000004B +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD2_75_R10 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76 0x0000004C +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_76_R0 31:14 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77 0x0000004D +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R1 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_MAX_PIXELS5TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_77_R2 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78 0x0000004E +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R3 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_MAX_PIXELS3TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_78_R4 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79 0x0000004F +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP444 14:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R5 15:15 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_MAX_PIXELS2TAP422 30:16 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_79_R6 31:31 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80 0x00000050 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_PCLK_MAX 7:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_80_R7 31:8 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81 0x00000051 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_81_R8 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82 0x00000052 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_82_R9 31:0 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83 0x00000053 +#define NV987D_CORE_NOTIFIER_3_CAPABILITIES_CAP_HEAD3_83_R10 31:0 + + +// dma opcode instructions +#define NV987D_DMA 0x00000000 +#define NV987D_DMA_OPCODE 31:29 +#define NV987D_DMA_OPCODE_METHOD 0x00000000 +#define NV987D_DMA_OPCODE_JUMP 0x00000001 +#define NV987D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NV987D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NV987D_DMA_METHOD_COUNT 27:18 +#define NV987D_DMA_METHOD_OFFSET 11:2 +#define NV987D_DMA_DATA 31:0 +#define NV987D_DMA_DATA_NOP 0x00000000 +#define NV987D_DMA_JUMP_OFFSET 11:2 +#define NV987D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NV987D_PUT (0x00000000) +#define NV987D_PUT_PTR 11:2 +#define NV987D_GET (0x00000004) +#define NV987D_GET_PTR 11:2 +#define NV987D_UPDATE (0x00000080) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR(i) (0 +(i)*4):(0 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR0 0:0 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR1 4:4 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR2 8:8 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR3 12:12 +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE(i) (1 +(i)*4):(1 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE0 1:1 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE1 5:5 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE2 9:9 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE3 13:13 +#define NV987D_UPDATE_INTERLOCK_WITH_BASE3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_BASE3_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY(i) (2 +(i)*4):(2 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY0 2:2 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY1 6:6 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY2 10:10 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY3 14:14 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY3_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM(i) (3 +(i)*4):(3 +(i)*4) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM__SIZE_1 4 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0 3:3 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM0_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1 7:7 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM1_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2 11:11 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM2_ENABLE (0x00000001) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3 15:15 +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_DISABLE (0x00000000) +#define NV987D_UPDATE_INTERLOCK_WITH_OVERLAY_IMM3_ENABLE (0x00000001) +#define NV987D_UPDATE_SPECIAL_HANDLING 25:24 +#define NV987D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NV987D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NV987D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NV987D_UPDATE_SPECIAL_HANDLING_REASON 23:16 +#define NV987D_UPDATE_NOT_DRIVER_FRIENDLY 31:31 +#define NV987D_UPDATE_NOT_DRIVER_FRIENDLY_FALSE (0x00000000) +#define NV987D_UPDATE_NOT_DRIVER_FRIENDLY_TRUE (0x00000001) +#define NV987D_UPDATE_NOT_DRIVER_UNFRIENDLY 30:30 +#define NV987D_UPDATE_NOT_DRIVER_UNFRIENDLY_FALSE (0x00000000) +#define NV987D_UPDATE_NOT_DRIVER_UNFRIENDLY_TRUE (0x00000001) +#define NV987D_UPDATE_INHIBIT_INTERRUPTS 29:29 +#define NV987D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NV987D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL (0x00000084) +#define NV987D_SET_NOTIFIER_CONTROL_MODE 30:30 +#define NV987D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL_OFFSET 11:2 +#define NV987D_SET_NOTIFIER_CONTROL_NOTIFY 31:31 +#define NV987D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL_FORMAT 28:28 +#define NV987D_SET_NOTIFIER_CONTROL_FORMAT_LEGACY (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_FORMAT_FOUR_WORD (0x00000001) +#define NV987D_SET_NOTIFIER_CONTROL_NO_WAIT_ACTIVE 0:0 +#define NV987D_SET_NOTIFIER_CONTROL_NO_WAIT_ACTIVE_FALSE (0x00000000) +#define NV987D_SET_NOTIFIER_CONTROL_NO_WAIT_ACTIVE_TRUE (0x00000001) +#define NV987D_SET_CONTEXT_DMA_NOTIFIER (0x00000088) +#define NV987D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NV987D_GET_CAPABILITIES (0x0000008C) +#define NV987D_GET_CAPABILITIES_DUMMY 31:0 +#define NV987D_SET_SPARE (0x0000016C) +#define NV987D_SET_SPARE_UNUSED 31:0 +#define NV987D_SET_SPARE_NOOP(b) (0x00000170 + (b)*0x00000004) +#define NV987D_SET_SPARE_NOOP_UNUSED 31:0 + +#define NV987D_DAC_SET_CONTROL(a) (0x00000180 + (a)*0x00000020) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK 3:0 +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV987D_DAC_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV987D_DAC_SET_CONTROL_PROTOCOL 12:8 +#define NV987D_DAC_SET_CONTROL_PROTOCOL_RGB_CRT (0x00000000) +#define NV987D_DAC_SET_CONTROL_PROTOCOL_YUV_CRT (0x00000013) +#define NV987D_DAC_SET_SW_SPARE_A(a) (0x00000184 + (a)*0x00000020) +#define NV987D_DAC_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_DAC_SET_SW_SPARE_B(a) (0x00000188 + (a)*0x00000020) +#define NV987D_DAC_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_DAC_SET_CUSTOM_REASON(a) (0x00000190 + (a)*0x00000020) +#define NV987D_DAC_SET_CUSTOM_REASON_CODE 31:0 + +#define NV987D_SOR_SET_CONTROL(a) (0x00000200 + (a)*0x00000020) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV987D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV987D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NV987D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NV987D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NV987D_SOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV987D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV987D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NV987D_SOR_SET_SW_SPARE_A(a) (0x00000204 + (a)*0x00000020) +#define NV987D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_SOR_SET_SW_SPARE_B(a) (0x00000208 + (a)*0x00000020) +#define NV987D_SOR_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_SOR_SET_CUSTOM_REASON(a) (0x00000210 + (a)*0x00000020) +#define NV987D_SOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV987D_PIOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK 3:0 +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NV987D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NV987D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NV987D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NV987D_PIOR_SET_CONTROL_PROTOCOL_EXT_TV_ENC (0x00000001) +#define NV987D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 14:14 +#define NV987D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_PIOR_SET_SW_SPARE_A(a) (0x00000304 + (a)*0x00000020) +#define NV987D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_PIOR_SET_SW_SPARE_B(a) (0x00000308 + (a)*0x00000020) +#define NV987D_PIOR_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_PIOR_SET_CUSTOM_REASON(a) (0x00000310 + (a)*0x00000020) +#define NV987D_PIOR_SET_CUSTOM_REASON_CODE 31:0 + +#define NV987D_HEAD_SET_PRESENT_CONTROL(a) (0x00000400 + (a)*0x00000300) +#define NV987D_HEAD_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NV987D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 8:8 +#define NV987D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NV987D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NV987D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00000404 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 3:3 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 4:4 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 9:6 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 12:12 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 24:13 +#define NV987D_HEAD_SET_CONTROL(a) (0x00000408 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_STRUCTURE 0:0 +#define NV987D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_STRUCTURE_INTERLACED (0x00000001) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 3:2 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 19:15 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 7:4 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 9:8 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 14:10 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK 1:1 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN 24:20 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN 29:25 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NV987D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NV987D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_OVERSCAN_COLOR(a) (0x00000410 + (a)*0x00000300) +#define NV987D_HEAD_SET_OVERSCAN_COLOR_RED 9:0 +#define NV987D_HEAD_SET_OVERSCAN_COLOR_GRN 19:10 +#define NV987D_HEAD_SET_OVERSCAN_COLOR_BLU 29:20 +#define NV987D_HEAD_SET_RASTER_SIZE(a) (0x00000414 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NV987D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NV987D_HEAD_SET_RASTER_SYNC_END(a) (0x00000418 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NV987D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NV987D_HEAD_SET_RASTER_BLANK_END(a) (0x0000041C + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NV987D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NV987D_HEAD_SET_RASTER_BLANK_START(a) (0x00000420 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NV987D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NV987D_HEAD_SET_RASTER_VERT_BLANK2(a) (0x00000424 + (a)*0x00000300) +#define NV987D_HEAD_SET_RASTER_VERT_BLANK2_YSTART 14:0 +#define NV987D_HEAD_SET_RASTER_VERT_BLANK2_YEND 30:16 +#define NV987D_HEAD_SET_LOCK_CHAIN(a) (0x00000428 + (a)*0x00000300) +#define NV987D_HEAD_SET_LOCK_CHAIN_POSITION 27:24 +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR(a) (0x0000042C + (a)*0x00000300) +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR_RED 9:0 +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR_GREEN 19:10 +#define NV987D_HEAD_SET_DEFAULT_BASE_COLOR_BLUE 29:20 +#define NV987D_HEAD_SET_CRC_CONTROL(a) (0x00000430 + (a)*0x00000300) +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 1:0 +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_BASE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_OVERLAY (0x00000002) +#define NV987D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 2:2 +#define NV987D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE 3:3 +#define NV987D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_FALSE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_TIMESTAMP_MODE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE 4:4 +#define NV987D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_FALSE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_FLIPLOCK_MODE_TRUE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT 19:8 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC0 (0x00000FF0) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC1 (0x00000FF1) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC2 (0x00000FF2) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC3 (0x00000FF3) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG0 (0x00000FF8) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG1 (0x00000FF9) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG2 (0x00000FFA) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_RG3 (0x00000FFB) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR0 (0x00000F0F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR1 (0x00000F1F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR2 (0x00000F2F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR3 (0x00000F3F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR4 (0x00000F4F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR5 (0x00000F5F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR6 (0x00000F6F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR7 (0x00000F7F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF0 (0x00000F8F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF1 (0x00000F9F) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF2 (0x00000FAF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF3 (0x00000FBF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR0 (0x000000FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR1 (0x000001FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR2 (0x000002FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR3 (0x000003FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR4 (0x000004FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR5 (0x000005FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR6 (0x000006FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR7 (0x000007FF) +#define NV987D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_NONE (0x00000FFF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT 31:20 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC(i) (0x00000FF0 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC0 (0x00000FF0) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC1 (0x00000FF1) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC2 (0x00000FF2) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_DAC3 (0x00000FF3) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG(i) (0x00000FF8 +(i)) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG0 (0x00000FF8) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG1 (0x00000FF9) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG2 (0x00000FFA) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_RG3 (0x00000FFB) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR(i) (0x00000F0F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR0 (0x00000F0F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR1 (0x00000F1F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR2 (0x00000F2F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR3 (0x00000F3F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR4 (0x00000F4F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR5 (0x00000F5F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR6 (0x00000F6F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SOR7 (0x00000F7F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF(i) (0x00000F8F +(i)*16) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF__SIZE_1 4 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF0 (0x00000F8F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF1 (0x00000F9F) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF2 (0x00000FAF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_SF3 (0x00000FBF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR(i) (0x000000FF +(i)*256) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR__SIZE_1 8 +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR0 (0x000000FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR1 (0x000001FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR2 (0x000002FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR3 (0x000003FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR4 (0x000004FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR5 (0x000005FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR6 (0x000006FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_PIOR7 (0x000007FF) +#define NV987D_HEAD_SET_CRC_CONTROL_SECONDARY_OUTPUT_NONE (0x00000FFF) +#define NV987D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 +#define NV987D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV987D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NV987D_HEAD_SET_BASE_LUT_LO(a) (0x00000440 + (a)*0x00000300) +#define NV987D_HEAD_SET_BASE_LUT_LO_ENABLE 31:31 +#define NV987D_HEAD_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE 27:24 +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV987D_HEAD_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV987D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV987D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_BASE_LUT_HI(a) (0x00000444 + (a)*0x00000300) +#define NV987D_HEAD_SET_BASE_LUT_HI_ORIGIN 31:0 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_ENABLE 31:31 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE 27:24 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE 20:20 +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_OUTPUT_LUT_HI(a) (0x0000044C + (a)*0x00000300) +#define NV987D_HEAD_SET_OUTPUT_LUT_HI_ORIGIN 31:0 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x00000450 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x00000454 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE 21:20 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_25 (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_28 (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_MODE_CLK_CUSTOM (0x00000002) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 24:24 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING 25:25 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_ENABLE_HOPPING_TRUE (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 26:26 +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00000458 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NV987D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NV987D_HEAD_SET_CONTEXT_DMA_LUT(a) (0x0000045C + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTEXT_DMA_LUT_HANDLE 31:0 +#define NV987D_HEAD_SET_OFFSET(a) (0x00000460 + (a)*0x00000300) +#define NV987D_HEAD_SET_OFFSET_ORIGIN 31:0 +#define NV987D_HEAD_SET_SIZE(a) (0x00000468 + (a)*0x00000300) +#define NV987D_HEAD_SET_SIZE_WIDTH 15:0 +#define NV987D_HEAD_SET_SIZE_HEIGHT 31:16 +#define NV987D_HEAD_SET_STORAGE(a) (0x0000046C + (a)*0x00000300) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NV987D_HEAD_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NV987D_HEAD_SET_STORAGE_PITCH 20:8 +#define NV987D_HEAD_SET_STORAGE_MEMORY_LAYOUT 24:24 +#define NV987D_HEAD_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NV987D_HEAD_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NV987D_HEAD_SET_PARAMS(a) (0x00000470 + (a)*0x00000300) +#define NV987D_HEAD_SET_PARAMS_FORMAT 15:8 +#define NV987D_HEAD_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NV987D_HEAD_SET_PARAMS_FORMAT_VOID16 (0x0000001F) +#define NV987D_HEAD_SET_PARAMS_FORMAT_VOID32 (0x0000002E) +#define NV987D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NV987D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NV987D_HEAD_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NV987D_HEAD_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NV987D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NV987D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NV987D_HEAD_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NV987D_HEAD_SET_PARAMS_SUPER_SAMPLE 1:0 +#define NV987D_HEAD_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV987D_HEAD_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV987D_HEAD_SET_PARAMS_GAMMA 2:2 +#define NV987D_HEAD_SET_PARAMS_GAMMA_LINEAR (0x00000000) +#define NV987D_HEAD_SET_PARAMS_GAMMA_SRGB (0x00000001) +#define NV987D_HEAD_SET_CONTEXT_DMAS_ISO(a) (0x00000474 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x0000047C + (a)*0x00000300) +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 1:0 +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NV987D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_SPEC_FLIP (0x00000002) +#define NV987D_HEAD_SET_CONTROL_CURSOR(a) (0x00000480 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NV987D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_FORMAT 25:24 +#define NV987D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE 27:26 +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NV987D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 15:8 +#define NV987D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 23:16 +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION 29:28 +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) +#define NV987D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV987D_HEAD_SET_OFFSETS_CURSOR(a,b) (0x00000484 + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_OFFSETS_CURSOR_ORIGIN 31:0 +#define NV987D_HEAD_SET_CONTEXT_DMAS_CURSOR(a,b) (0x0000048C + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_CONTEXT_DMAS_CURSOR_HANDLE 31:0 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00000494 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3_ADAPTIVE (0x00000003) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 4:3 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1 (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8 (0x00000002) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_HRESPONSE_BIAS 23:16 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_VRESPONSE_BIAS 31:24 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422 8:8 +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_OUTPUT_SCALER_FORCE422_ENABLE (0x00000001) +#define NV987D_HEAD_SET_PROCAMP(a) (0x00000498 + (a)*0x00000300) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NV987D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NV987D_HEAD_SET_PROCAMP_CHROMA_LPF 2:2 +#define NV987D_HEAD_SET_PROCAMP_CHROMA_LPF_AUTO (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_CHROMA_LPF_ON (0x00000001) +#define NV987D_HEAD_SET_PROCAMP_SAT_COS 19:8 +#define NV987D_HEAD_SET_PROCAMP_SAT_SINE 31:20 +#define NV987D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 5:5 +#define NV987D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NV987D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 6:6 +#define NV987D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NV987D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) +#define NV987D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NV987D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS 2:1 +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_6_BITS (0x00000000) +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_8_BITS (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL_BITS_DITHER_TO_10_BITS (0x00000002) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE 6:3 +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NV987D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NV987D_HEAD_SET_DITHER_CONTROL_PHASE 8:7 +#define NV987D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x000004B0 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NV987D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x000004B8 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NV987D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x000004BC + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NV987D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x000004C0 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(a) (0x000004C4 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN_HEIGHT 30:16 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(a) (0x000004C8 + (a)*0x00000300) +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_WIDTH 14:0 +#define NV987D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX_HEIGHT 30:16 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(a) (0x000004D0 + (a)*0x00000300) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE 0:0 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_8 (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE 13:12 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X1_AA (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_SUPER_SAMPLE_X4_AA (0x00000002) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT 17:16 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT 21:20 +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV987D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS(a) (0x000004D4 + (a)*0x00000300) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE 0:0 +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_FALSE (0x00000000) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_USABLE_TRUE (0x00000001) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH 11:8 +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_16 (0x00000001) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_32 (0x00000003) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_BPP_64 (0x00000005) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT 13:12 +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV987D_HEAD_SET_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) +#define NV987D_HEAD_SET_PROCESSING(a) (0x000004E0 + (a)*0x00000300) +#define NV987D_HEAD_SET_PROCESSING_USE_GAIN_OFS 0:0 +#define NV987D_HEAD_SET_PROCESSING_USE_GAIN_OFS_DISABLE (0x00000000) +#define NV987D_HEAD_SET_PROCESSING_USE_GAIN_OFS_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONVERSION_RED(a) (0x000004E4 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONVERSION_RED_GAIN 15:0 +#define NV987D_HEAD_SET_CONVERSION_RED_OFS 31:16 +#define NV987D_HEAD_SET_CONVERSION_GRN(a) (0x000004E8 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONVERSION_GRN_GAIN 15:0 +#define NV987D_HEAD_SET_CONVERSION_GRN_OFS 31:16 +#define NV987D_HEAD_SET_CONVERSION_BLU(a) (0x000004EC + (a)*0x00000300) +#define NV987D_HEAD_SET_CONVERSION_BLU_GAIN 15:0 +#define NV987D_HEAD_SET_CONVERSION_BLU_OFS 31:16 +#define NV987D_HEAD_SET_CSC_RED2RED(a) (0x000004F0 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE 31:31 +#define NV987D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CSC_RED2RED_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CSC_RED2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_GRN2RED(a) (0x000004F4 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_GRN2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_BLU2RED(a) (0x000004F8 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_BLU2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_CONSTANT2RED(a) (0x000004FC + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_RED2GRN(a) (0x00000500 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_RED2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_GRN2GRN(a) (0x00000504 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_GRN2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_BLU2GRN(a) (0x00000508 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_BLU2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_CONSTANT2GRN(a) (0x0000050C + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_CONSTANT2GRN_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_RED2BLU(a) (0x00000510 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_RED2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_GRN2BLU(a) (0x00000514 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_GRN2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_BLU2BLU(a) (0x00000518 + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_BLU2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_CSC_CONSTANT2BLU(a) (0x0000051C + (a)*0x00000300) +#define NV987D_HEAD_SET_CSC_CONSTANT2BLU_COEFF 18:0 +#define NV987D_HEAD_SET_HDMI_CTRL(a) (0x00000520 + (a)*0x00000300) +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NV987D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NV987D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE 15:12 +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000000) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_FIELD_ALTERNATIVE (0x00000001) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_LINE_ALTERNATIVE (0x00000002) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_FULL (0x00000003) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH (0x00000004) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_L_DEPTH_GRAPHICS (0x00000005) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_TOP_AND_BOTTOM (0x00000006) +#define NV987D_HEAD_SET_HDMI_CTRL_STEREO3D_STRUCTURE_SIDE_BY_SIDE_HALF (0x00000008) +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR(a) (0x00000524 + (a)*0x00000300) +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR_RED_CR 9:0 +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR_GRN_Y 19:10 +#define NV987D_HEAD_SET_VACTIVE_SPACE_COLOR_BLU_CB 29:20 +#define NV987D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00000528 + (a)*0x00000300) +#define NV987D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NV987D_HEAD_SET_DISPLAY_ID(a,b) (0x0000052C + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_A(a) (0x0000054C + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_B(a) (0x00000550 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_C(a) (0x00000554 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NV987D_HEAD_SET_SW_SPARE_D(a) (0x00000558 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NV987D_HEAD_SET_GET_BLANKING_CTRL(a) (0x0000055C + (a)*0x00000300) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NV987D_HEAD_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION(a) (0x00000560 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_ENABLE 0:0 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_DISABLE (0x00000000) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_ENABLE_ENABLE (0x00000001) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_CHUNK_BANDWIDTH 12:1 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LAST_BANDWIDTH 24:13 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA(a) (0x00000564 + (a)*0x00000300) +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY1 7:4 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY2 11:8 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_LOSSY3 15:12 +#define NV987D_HEAD_SET_CONTROL_COMPRESSION_LA_CHUNK_SIZE 23:16 +#define NV987D_HEAD_SET_STALL_LOCK(a) (0x00000568 + (a)*0x00000300) +#define NV987D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NV987D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NV987D_HEAD_SET_STALL_LOCK_MODE 1:1 +#define NV987D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN 6:2 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000000 +(i)) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000001) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000002) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000003) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000004) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000005) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000006) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000007) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000008) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x00000009) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000A) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000B) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000C) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000D) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000E) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x0000000F) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_UNSPECIFIED (0x00000010) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 4 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK(i) (0x0000001E +(i)) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1 2 +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x0000001E) +#define NV987D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x0000001F) +#define NV987D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 7:7 +#define NV987D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NV987D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_A(a) (0x000006D0 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_A_UNUSED 31:0 +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_B(a) (0x000006D4 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_B_UNUSED 31:0 +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_C(a) (0x000006D8 + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_C_UNUSED 31:0 +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_D(a) (0x000006DC + (a)*0x00000300) +#define NV987D_HEAD_SET_SW_METHOD_PLACEHOLDER_D_UNUSED 31:0 +#define NV987D_HEAD_SET_SPARE(a) (0x000006EC + (a)*0x00000300) +#define NV987D_HEAD_SET_SPARE_UNUSED 31:0 +#define NV987D_HEAD_SET_SPARE_NOOP(a,b) (0x000006F0 + (a)*0x00000300 + (b)*0x00000004) +#define NV987D_HEAD_SET_SPARE_NOOP_UNUSED 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cl987d_h diff --git a/src/common/sdk/nvidia/inc/class/cla06c.h b/src/common/sdk/nvidia/inc/class/cla06c.h new file mode 100644 index 000000000..0b4f5c98b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla06c.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _cla06c_h_ +#define _cla06c_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define KEPLER_CHANNEL_GROUP_A (0x0000A06C) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cla06c_h + diff --git a/src/common/sdk/nvidia/inc/class/cla06f.h b/src/common/sdk/nvidia/inc/class/cla06f.h new file mode 100644 index 000000000..336cab57f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla06f.h @@ -0,0 +1,240 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clA06f_h_ +#define _clA06f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class KEPLER_CHANNEL_GPFIFO */ +/* + * Documentation for KEPLER_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + */ +#define KEPLER_CHANNEL_GPFIFO_A (0x0000A06F) + + +/* pio method data structure */ +typedef volatile struct _cla06f_tag0 { + NvV32 Reserved00[0x7c0]; +} NvA06FTypedef, KEPLER_ChannelGPFifo; +#define NVA06F_TYPEDEF KEPLER_CHANNELChannelGPFifo +/* dma flow control data structure */ +typedef volatile struct _cla06f_tag1 { + NvU32 Ignored00[0x010]; /* 0000-0043*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} NvA06FControl, KeplerAControlGPFifo; +/* fields and values */ +#define NVA06F_NUMBER_OF_SUBCHANNELS (8) +#define NVA06F_SET_OBJECT (0x00000000) +#define NVA06F_SET_OBJECT_NVCLASS 15:0 +#define NVA06F_SET_OBJECT_ENGINE 20:16 +#define NVA06F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVA06F_ILLEGAL (0x00000004) +#define NVA06F_ILLEGAL_HANDLE 31:0 +#define NVA06F_NOP (0x00000008) +#define NVA06F_NOP_HANDLE 31:0 +#define NVA06F_SEMAPHOREA (0x00000010) +#define NVA06F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVA06F_SEMAPHOREB (0x00000014) +#define NVA06F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVA06F_SEMAPHOREC (0x00000018) +#define NVA06F_SEMAPHOREC_PAYLOAD 31:0 +#define NVA06F_SEMAPHORED (0x0000001C) +#define NVA06F_SEMAPHORED_OPERATION 3:0 +#define NVA06F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVA06F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVA06F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVA06F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVA06F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVA06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVA06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVA06F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVA06F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVA06F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVA06F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVA06F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVA06F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVA06F_NON_STALL_INTERRUPT (0x00000020) +#define NVA06F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVA06F_FB_FLUSH (0x00000024) +#define NVA06F_FB_FLUSH_HANDLE 31:0 +#define NVA06F_MEM_OP_A (0x00000028) +#define NVA06F_MEM_OP_A_OPERAND_LOW 31:2 +#define NVA06F_MEM_OP_A_TLB_INVALIDATE_ADDR 29:2 +#define NVA06F_MEM_OP_A_TLB_INVALIDATE_TARGET 31:30 +#define NVA06F_MEM_OP_A_TLB_INVALIDATE_TARGET_VID_MEM 0x00000000 +#define NVA06F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT 0x00000002 +#define NVA06F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 +#define NVA06F_MEM_OP_B (0x0000002c) +#define NVA06F_MEM_OP_B_OPERAND_HIGH 7:0 +#define NVA06F_MEM_OP_B_OPERATION 31:27 +#define NVA06F_MEM_OP_B_OPERATION_SYSMEMBAR_FLUSH 0x00000005 +#define NVA06F_MEM_OP_B_OPERATION_SOFT_FLUSH 0x00000006 +#define NVA06F_MEM_OP_B_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVA06F_MEM_OP_B_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVA06F_MEM_OP_B_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +#define NVA06F_MEM_OP_B_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVA06F_MEM_OP_B_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB 0:0 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ALL 0x00000001 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC 1:1 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVA06F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVA06F_SET_REFERENCE (0x00000050) +#define NVA06F_SET_REFERENCE_COUNT 31:0 +#define NVA06F_CRC_CHECK (0x0000007c) +#define NVA06F_CRC_CHECK_VALUE 31:0 +#define NVA06F_YIELD (0x00000080) +#define NVA06F_YIELD_OP 1:0 +#define NVA06F_YIELD_OP_NOP 0x00000000 + + +/* GPFIFO entry format */ +#define NVA06F_GP_ENTRY__SIZE 8 +#define NVA06F_GP_ENTRY0_FETCH 0:0 +#define NVA06F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVA06F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVA06F_GP_ENTRY0_GET 31:2 +#define NVA06F_GP_ENTRY0_OPERAND 31:0 +#define NVA06F_GP_ENTRY1_GET_HI 7:0 +#define NVA06F_GP_ENTRY1_PRIV 8:8 +#define NVA06F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVA06F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVA06F_GP_ENTRY1_LEVEL 9:9 +#define NVA06F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVA06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVA06F_GP_ENTRY1_LENGTH 30:10 +#define NVA06F_GP_ENTRY1_SYNC 31:31 +#define NVA06F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVA06F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVA06F_GP_ENTRY1_OPCODE 7:0 +#define NVA06F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVA06F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVA06F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVA06F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVA06F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVA06F_DMA_METHOD_ADDRESS 11:0 +#define NVA06F_DMA_SUBDEVICE_MASK 15:4 +#define NVA06F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVA06F_DMA_TERT_OP 17:16 +#define NVA06F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVA06F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVA06F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVA06F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVA06F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVA06F_DMA_METHOD_COUNT_OLD 28:18 +#define NVA06F_DMA_METHOD_COUNT 28:16 +#define NVA06F_DMA_IMMD_DATA 28:16 +#define NVA06F_DMA_SEC_OP 31:29 +#define NVA06F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVA06F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVA06F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVA06F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVA06F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVA06F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVA06F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVA06F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVA06F_DMA_INCR_ADDRESS 11:0 +#define NVA06F_DMA_INCR_SUBCHANNEL 15:13 +#define NVA06F_DMA_INCR_COUNT 28:16 +#define NVA06F_DMA_INCR_OPCODE 31:29 +#define NVA06F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVA06F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVA06F_DMA_NONINCR_ADDRESS 11:0 +#define NVA06F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVA06F_DMA_NONINCR_COUNT 28:16 +#define NVA06F_DMA_NONINCR_OPCODE 31:29 +#define NVA06F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVA06F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVA06F_DMA_ONEINCR_ADDRESS 11:0 +#define NVA06F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVA06F_DMA_ONEINCR_COUNT 28:16 +#define NVA06F_DMA_ONEINCR_OPCODE 31:29 +#define NVA06F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVA06F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVA06F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVA06F_DMA_IMMD_ADDRESS 11:0 +#define NVA06F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVA06F_DMA_IMMD_DATA 28:16 +#define NVA06F_DMA_IMMD_OPCODE 31:29 +#define NVA06F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVA06F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVA06F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA06F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVA06F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVA06F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA06F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVA06F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA06F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVA06F_DMA_ENDSEG_OPCODE 31:29 +#define NVA06F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVA06F_DMA_ADDRESS 12:2 +#define NVA06F_DMA_SUBCH 15:13 +#define NVA06F_DMA_OPCODE3 17:16 +#define NVA06F_DMA_OPCODE3_NONE (0x00000000) +#define NVA06F_DMA_COUNT 28:18 +#define NVA06F_DMA_OPCODE 31:29 +#define NVA06F_DMA_OPCODE_METHOD (0x00000000) +#define NVA06F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVA06F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clA06F_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cla06fsubch.h b/src/common/sdk/nvidia/inc/class/cla06fsubch.h new file mode 100644 index 000000000..c65d6c107 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla06fsubch.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cla06fsubch_h_ +#define _cla06fsubch_h_ + +#define NVA06F_SUBCHANNEL_2D 3 +#define NVA06F_SUBCHANNEL_COPY_ENGINE 4 + +#endif // _cla06fsubch_h_ diff --git a/src/common/sdk/nvidia/inc/class/cla06fsw.h b/src/common/sdk/nvidia/inc/class/cla06fsw.h new file mode 100644 index 000000000..2194da927 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla06fsw.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cla06f_sw_h_ +#define _cla06f_sw_h_ + +#define NVA06F_NOTIFIERS_RC (0) +#define NVA06F_NOTIFIERS_REFCNT (1) +#define NVA06F_NOTIFIERS_NONSTALL (2) +#define NVA06F_NOTIFIERS_EVENTBUFFER (3) +#define NVA06F_NOTIFIERS_IDLECHANNEL (4) +#define NVA06F_NOTIFIERS_ENDCTX (5) +#define NVA06F_NOTIFIERS_SW (6) +#define NVA06F_NOTIFIERS_GR_DEBUG_INTR (7) +#define NVA06F_NOTIFIERS_MAXCOUNT (8) + +/* NvNotification[] fields and values */ +#define NVA06F_NOTIFICATION_STATUS_ERROR_BAD_ARGUMENT (0x2000) +#define NVA06F_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +#endif /* _cla06f_sw_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cla097.h b/src/common/sdk/nvidia/inc/class/cla097.h new file mode 100644 index 000000000..2ed72052e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla097.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cla097_h_ +#define _cla097_h_ + +#define KEPLER_A 0xA097 + +#endif // _cla097_h_ diff --git a/src/common/sdk/nvidia/inc/class/cla0b0.h b/src/common/sdk/nvidia/inc/class/cla0b0.h new file mode 100644 index 000000000..ef4faa809 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla0b0.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvtypes.h" + +#ifndef _cla0b0_h_ +#define _cla0b0_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVA0B0_VIDEO_DECODER (0x0000A0B0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cla0b0_h diff --git a/src/common/sdk/nvidia/inc/class/cla0b5.h b/src/common/sdk/nvidia/inc/class/cla0b5.h new file mode 100644 index 000000000..92076c64a --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla0b5.h @@ -0,0 +1,262 @@ +/******************************************************************************* + Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _cla0b5_h_ +#define _cla0b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define KEPLER_DMA_COPY_A (0x0000A0B5) + +#define NVA0B5_NOP (0x00000100) +#define NVA0B5_NOP_PARAMETER 31:0 +#define NVA0B5_PM_TRIGGER (0x00000140) +#define NVA0B5_PM_TRIGGER_V 31:0 +#define NVA0B5_SET_SEMAPHORE_A (0x00000240) +#define NVA0B5_SET_SEMAPHORE_A_UPPER 7:0 +#define NVA0B5_SET_SEMAPHORE_B (0x00000244) +#define NVA0B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVA0B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVA0B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVA0B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVA0B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVA0B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVA0B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVA0B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVA0B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVA0B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVA0B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVA0B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVA0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVA0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVA0B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVA0B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVA0B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVA0B5_SET_DST_PHYS_MODE (0x00000264) +#define NVA0B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVA0B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVA0B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVA0B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVA0B5_LAUNCH_DMA (0x00000300) +#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVA0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVA0B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVA0B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVA0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVA0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVA0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVA0B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVA0B5_LAUNCH_DMA_BYPASS_L2 11:11 +#define NVA0B5_LAUNCH_DMA_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVA0B5_LAUNCH_DMA_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVA0B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVA0B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVA0B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVA0B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVA0B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVA0B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMIN (0x0000000B) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMAX (0x0000000C) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMUL (0x0000000D) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMUL (0x0000000E) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVA0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVA0B5_OFFSET_IN_UPPER (0x00000400) +#define NVA0B5_OFFSET_IN_UPPER_UPPER 7:0 +#define NVA0B5_OFFSET_IN_LOWER (0x00000404) +#define NVA0B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVA0B5_OFFSET_OUT_UPPER (0x00000408) +#define NVA0B5_OFFSET_OUT_UPPER_UPPER 7:0 +#define NVA0B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVA0B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVA0B5_PITCH_IN (0x00000410) +#define NVA0B5_PITCH_IN_VALUE 31:0 +#define NVA0B5_PITCH_OUT (0x00000414) +#define NVA0B5_PITCH_OUT_VALUE 31:0 +#define NVA0B5_LINE_LENGTH_IN (0x00000418) +#define NVA0B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVA0B5_LINE_COUNT (0x0000041C) +#define NVA0B5_LINE_COUNT_VALUE 31:0 +#define NVA0B5_SET_REMAP_CONST_A (0x00000700) +#define NVA0B5_SET_REMAP_CONST_A_V 31:0 +#define NVA0B5_SET_REMAP_CONST_B (0x00000704) +#define NVA0B5_SET_REMAP_CONST_B_V 31:0 +#define NVA0B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVA0B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVA0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVA0B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVA0B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVA0B5_SET_DST_BLOCK_SIZE_WIDTH_QUARTER_GOB (0x0000000E) +#define NVA0B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVA0B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVA0B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVA0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVA0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_TESLA_4 (0x00000000) +#define NVA0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVA0B5_SET_DST_WIDTH (0x00000710) +#define NVA0B5_SET_DST_WIDTH_V 31:0 +#define NVA0B5_SET_DST_HEIGHT (0x00000714) +#define NVA0B5_SET_DST_HEIGHT_V 31:0 +#define NVA0B5_SET_DST_DEPTH (0x00000718) +#define NVA0B5_SET_DST_DEPTH_V 31:0 +#define NVA0B5_SET_DST_LAYER (0x0000071C) +#define NVA0B5_SET_DST_LAYER_V 31:0 +#define NVA0B5_SET_DST_ORIGIN (0x00000720) +#define NVA0B5_SET_DST_ORIGIN_X 15:0 +#define NVA0B5_SET_DST_ORIGIN_Y 31:16 +#define NVA0B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVA0B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVA0B5_SET_SRC_BLOCK_SIZE_WIDTH_QUARTER_GOB (0x0000000E) +#define NVA0B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVA0B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVA0B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVA0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVA0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_TESLA_4 (0x00000000) +#define NVA0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVA0B5_SET_SRC_WIDTH (0x0000072C) +#define NVA0B5_SET_SRC_WIDTH_V 31:0 +#define NVA0B5_SET_SRC_HEIGHT (0x00000730) +#define NVA0B5_SET_SRC_HEIGHT_V 31:0 +#define NVA0B5_SET_SRC_DEPTH (0x00000734) +#define NVA0B5_SET_SRC_DEPTH_V 31:0 +#define NVA0B5_SET_SRC_LAYER (0x00000738) +#define NVA0B5_SET_SRC_LAYER_V 31:0 +#define NVA0B5_SET_SRC_ORIGIN (0x0000073C) +#define NVA0B5_SET_SRC_ORIGIN_X 15:0 +#define NVA0B5_SET_SRC_ORIGIN_Y 31:16 +#define NVA0B5_PM_TRIGGER_END (0x00001114) +#define NVA0B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _cla0b5_h + diff --git a/src/common/sdk/nvidia/inc/class/cla0c0.h b/src/common/sdk/nvidia/inc/class/cla0c0.h new file mode 100644 index 000000000..bc73e4094 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla0c0.h @@ -0,0 +1,646 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_kepler_compute_a_h_ +#define _cl_kepler_compute_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../class/bin/sw_header.pl kepler_compute_a */ + +#include "nvtypes.h" + +#define KEPLER_COMPUTE_A 0xA0C0 + +#define NVA0C0_SET_OBJECT 0x0000 +#define NVA0C0_SET_OBJECT_CLASS_ID 15:0 +#define NVA0C0_SET_OBJECT_ENGINE_ID 20:16 + +#define NVA0C0_NO_OPERATION 0x0100 +#define NVA0C0_NO_OPERATION_V 31:0 + +#define NVA0C0_SET_NOTIFY_A 0x0104 +#define NVA0C0_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVA0C0_SET_NOTIFY_B 0x0108 +#define NVA0C0_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVA0C0_NOTIFY 0x010c +#define NVA0C0_NOTIFY_TYPE 31:0 +#define NVA0C0_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVA0C0_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVA0C0_WAIT_FOR_IDLE 0x0110 +#define NVA0C0_WAIT_FOR_IDLE_V 31:0 + +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVA0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVA0C0_SEND_GO_IDLE 0x013c +#define NVA0C0_SEND_GO_IDLE_V 31:0 + +#define NVA0C0_PM_TRIGGER 0x0140 +#define NVA0C0_PM_TRIGGER_V 31:0 + +#define NVA0C0_PM_TRIGGER_WFI 0x0144 +#define NVA0C0_PM_TRIGGER_WFI_V 31:0 + +#define NVA0C0_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVA0C0_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVA0C0_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVA0C0_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVA0C0_LINE_LENGTH_IN 0x0180 +#define NVA0C0_LINE_LENGTH_IN_VALUE 31:0 + +#define NVA0C0_LINE_COUNT 0x0184 +#define NVA0C0_LINE_COUNT_VALUE 31:0 + +#define NVA0C0_OFFSET_OUT_UPPER 0x0188 +#define NVA0C0_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVA0C0_OFFSET_OUT 0x018c +#define NVA0C0_OFFSET_OUT_VALUE 31:0 + +#define NVA0C0_PITCH_OUT 0x0190 +#define NVA0C0_PITCH_OUT_VALUE 31:0 + +#define NVA0C0_SET_DST_BLOCK_SIZE 0x0194 +#define NVA0C0_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVA0C0_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVA0C0_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVA0C0_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVA0C0_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVA0C0_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVA0C0_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVA0C0_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVA0C0_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVA0C0_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVA0C0_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVA0C0_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVA0C0_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVA0C0_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVA0C0_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVA0C0_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVA0C0_SET_DST_WIDTH 0x0198 +#define NVA0C0_SET_DST_WIDTH_V 31:0 + +#define NVA0C0_SET_DST_HEIGHT 0x019c +#define NVA0C0_SET_DST_HEIGHT_V 31:0 + +#define NVA0C0_SET_DST_DEPTH 0x01a0 +#define NVA0C0_SET_DST_DEPTH_V 31:0 + +#define NVA0C0_SET_DST_LAYER 0x01a4 +#define NVA0C0_SET_DST_LAYER_V 31:0 + +#define NVA0C0_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVA0C0_SET_DST_ORIGIN_BYTES_X_V 19:0 + +#define NVA0C0_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVA0C0_SET_DST_ORIGIN_SAMPLES_Y_V 15:0 + +#define NVA0C0_LAUNCH_DMA 0x01b0 +#define NVA0C0_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVA0C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVA0C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVA0C0_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVA0C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVA0C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVA0C0_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVA0C0_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVA0C0_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVA0C0_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVA0C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVA0C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVA0C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVA0C0_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVA0C0_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA0C0_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA0C0_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVA0C0_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA0C0_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA0C0_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA0C0_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVA0C0_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA0C0_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVA0C0_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVA0C0_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA0C0_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVA0C0_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA0C0_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVA0C0_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVA0C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVA0C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVA0C0_LOAD_INLINE_DATA 0x01b4 +#define NVA0C0_LOAD_INLINE_DATA_V 31:0 + +#define NVA0C0_SET_I2M_SEMAPHORE_A 0x01dc +#define NVA0C0_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVA0C0_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVA0C0_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVA0C0_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVA0C0_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVA0C0_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVA0C0_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVA0C0_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVA0C0_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVA0C0_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVA0C0_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVA0C0_SET_I2M_SPARE_NOOP03 0x01fc +#define NVA0C0_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVA0C0_PERFMON_TRANSFER 0x0210 +#define NVA0C0_PERFMON_TRANSFER_V 31:0 + +#define NVA0C0_SET_SHADER_SHARED_MEMORY_WINDOW 0x0214 +#define NVA0C0_SET_SHADER_SHARED_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVA0C0_INVALIDATE_SHADER_CACHES 0x021c +#define NVA0C0_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVA0C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVA0C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVA0C0_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVA0C0_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVA0C0_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVA0C0_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVA0C0_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVA0C0_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVA0C0_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVA0C0_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVA0C0_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVA0C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVA0C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVA0C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVA0C0_SET_CWD_CONTROL 0x0240 +#define NVA0C0_SET_CWD_CONTROL_SM_SELECTION 0:0 +#define NVA0C0_SET_CWD_CONTROL_SM_SELECTION_LOAD_BALANCED 0x00000000 +#define NVA0C0_SET_CWD_CONTROL_SM_SELECTION_ROUND_ROBIN 0x00000001 + +#define NVA0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x0244 +#define NVA0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVA0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVA0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVA0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVA0C0_SET_CWD_REF_COUNTER 0x0248 +#define NVA0C0_SET_CWD_REF_COUNTER_SELECT 5:0 +#define NVA0C0_SET_CWD_REF_COUNTER_VALUE 23:8 + +#define NVA0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A 0x0274 +#define NVA0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A_ADDRESS_UPPER 7:0 + +#define NVA0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B 0x0278 +#define NVA0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B_ADDRESS_LOWER 31:0 + +#define NVA0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C 0x027c +#define NVA0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_BYTE_COUNT 16:0 +#define NVA0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2 31:31 +#define NVA0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_FALSE 0x00000000 +#define NVA0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_TRUE 0x00000001 + +#define NVA0C0_SET_COMPUTE_CLASS_VERSION 0x0280 +#define NVA0C0_SET_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVA0C0_SET_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA0C0_CHECK_COMPUTE_CLASS_VERSION 0x0284 +#define NVA0C0_CHECK_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVA0C0_CHECK_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA0C0_SET_QMD_VERSION 0x0288 +#define NVA0C0_SET_QMD_VERSION_CURRENT 15:0 +#define NVA0C0_SET_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA0C0_CHECK_QMD_VERSION 0x0290 +#define NVA0C0_CHECK_QMD_VERSION_CURRENT 15:0 +#define NVA0C0_CHECK_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA0C0_SET_CWD_SLOT_COUNT 0x02b0 +#define NVA0C0_SET_CWD_SLOT_COUNT_V 7:0 + +#define NVA0C0_SEND_PCAS_A 0x02b4 +#define NVA0C0_SEND_PCAS_A_QMD_ADDRESS_SHIFTED8 31:0 + +#define NVA0C0_SEND_PCAS_B 0x02b8 +#define NVA0C0_SEND_PCAS_B_FROM 23:0 +#define NVA0C0_SEND_PCAS_B_DELTA 31:24 + +#define NVA0C0_SEND_SIGNALING_PCAS_B 0x02bc +#define NVA0C0_SEND_SIGNALING_PCAS_B_INVALIDATE 0:0 +#define NVA0C0_SEND_SIGNALING_PCAS_B_INVALIDATE_FALSE 0x00000000 +#define NVA0C0_SEND_SIGNALING_PCAS_B_INVALIDATE_TRUE 0x00000001 +#define NVA0C0_SEND_SIGNALING_PCAS_B_SCHEDULE 1:1 +#define NVA0C0_SEND_SIGNALING_PCAS_B_SCHEDULE_FALSE 0x00000000 +#define NVA0C0_SEND_SIGNALING_PCAS_B_SCHEDULE_TRUE 0x00000001 + +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A 0x02e4 +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B 0x02e8 +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C 0x02ec +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A 0x02f0 +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B 0x02f4 +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C 0x02f8 +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVA0C0_SET_SPA_VERSION 0x0310 +#define NVA0C0_SET_SPA_VERSION_MINOR 7:0 +#define NVA0C0_SET_SPA_VERSION_MAJOR 15:8 + +#define NVA0C0_SET_FALCON00 0x0500 +#define NVA0C0_SET_FALCON00_V 31:0 + +#define NVA0C0_SET_FALCON01 0x0504 +#define NVA0C0_SET_FALCON01_V 31:0 + +#define NVA0C0_SET_FALCON02 0x0508 +#define NVA0C0_SET_FALCON02_V 31:0 + +#define NVA0C0_SET_FALCON03 0x050c +#define NVA0C0_SET_FALCON03_V 31:0 + +#define NVA0C0_SET_FALCON04 0x0510 +#define NVA0C0_SET_FALCON04_V 31:0 + +#define NVA0C0_SET_FALCON05 0x0514 +#define NVA0C0_SET_FALCON05_V 31:0 + +#define NVA0C0_SET_FALCON06 0x0518 +#define NVA0C0_SET_FALCON06_V 31:0 + +#define NVA0C0_SET_FALCON07 0x051c +#define NVA0C0_SET_FALCON07_V 31:0 + +#define NVA0C0_SET_FALCON08 0x0520 +#define NVA0C0_SET_FALCON08_V 31:0 + +#define NVA0C0_SET_FALCON09 0x0524 +#define NVA0C0_SET_FALCON09_V 31:0 + +#define NVA0C0_SET_FALCON10 0x0528 +#define NVA0C0_SET_FALCON10_V 31:0 + +#define NVA0C0_SET_FALCON11 0x052c +#define NVA0C0_SET_FALCON11_V 31:0 + +#define NVA0C0_SET_FALCON12 0x0530 +#define NVA0C0_SET_FALCON12_V 31:0 + +#define NVA0C0_SET_FALCON13 0x0534 +#define NVA0C0_SET_FALCON13_V 31:0 + +#define NVA0C0_SET_FALCON14 0x0538 +#define NVA0C0_SET_FALCON14_V 31:0 + +#define NVA0C0_SET_FALCON15 0x053c +#define NVA0C0_SET_FALCON15_V 31:0 + +#define NVA0C0_SET_FALCON16 0x0540 +#define NVA0C0_SET_FALCON16_V 31:0 + +#define NVA0C0_SET_FALCON17 0x0544 +#define NVA0C0_SET_FALCON17_V 31:0 + +#define NVA0C0_SET_FALCON18 0x0548 +#define NVA0C0_SET_FALCON18_V 31:0 + +#define NVA0C0_SET_FALCON19 0x054c +#define NVA0C0_SET_FALCON19_V 31:0 + +#define NVA0C0_SET_FALCON20 0x0550 +#define NVA0C0_SET_FALCON20_V 31:0 + +#define NVA0C0_SET_FALCON21 0x0554 +#define NVA0C0_SET_FALCON21_V 31:0 + +#define NVA0C0_SET_FALCON22 0x0558 +#define NVA0C0_SET_FALCON22_V 31:0 + +#define NVA0C0_SET_FALCON23 0x055c +#define NVA0C0_SET_FALCON23_V 31:0 + +#define NVA0C0_SET_FALCON24 0x0560 +#define NVA0C0_SET_FALCON24_V 31:0 + +#define NVA0C0_SET_FALCON25 0x0564 +#define NVA0C0_SET_FALCON25_V 31:0 + +#define NVA0C0_SET_FALCON26 0x0568 +#define NVA0C0_SET_FALCON26_V 31:0 + +#define NVA0C0_SET_FALCON27 0x056c +#define NVA0C0_SET_FALCON27_V 31:0 + +#define NVA0C0_SET_FALCON28 0x0570 +#define NVA0C0_SET_FALCON28_V 31:0 + +#define NVA0C0_SET_FALCON29 0x0574 +#define NVA0C0_SET_FALCON29_V 31:0 + +#define NVA0C0_SET_FALCON30 0x0578 +#define NVA0C0_SET_FALCON30_V 31:0 + +#define NVA0C0_SET_FALCON31 0x057c +#define NVA0C0_SET_FALCON31_V 31:0 + +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVA0C0_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVA0C0_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVA0C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVA0C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVA0C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVA0C0_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVA0C0_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVA0C0_SET_SPARE_NOOP12 0x0f44 +#define NVA0C0_SET_SPARE_NOOP12_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP13 0x0f48 +#define NVA0C0_SET_SPARE_NOOP13_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP14 0x0f4c +#define NVA0C0_SET_SPARE_NOOP14_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP15 0x0f50 +#define NVA0C0_SET_SPARE_NOOP15_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP00 0x1040 +#define NVA0C0_SET_SPARE_NOOP00_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP01 0x1044 +#define NVA0C0_SET_SPARE_NOOP01_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP02 0x1048 +#define NVA0C0_SET_SPARE_NOOP02_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP03 0x104c +#define NVA0C0_SET_SPARE_NOOP03_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP04 0x1050 +#define NVA0C0_SET_SPARE_NOOP04_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP05 0x1054 +#define NVA0C0_SET_SPARE_NOOP05_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP06 0x1058 +#define NVA0C0_SET_SPARE_NOOP06_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP07 0x105c +#define NVA0C0_SET_SPARE_NOOP07_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP08 0x1060 +#define NVA0C0_SET_SPARE_NOOP08_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP09 0x1064 +#define NVA0C0_SET_SPARE_NOOP09_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP10 0x1068 +#define NVA0C0_SET_SPARE_NOOP10_V 31:0 + +#define NVA0C0_SET_SPARE_NOOP11 0x106c +#define NVA0C0_SET_SPARE_NOOP11_V 31:0 + +#define NVA0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVA0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVA0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVA0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVA0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVA0C0_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVA0C0_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVA0C0_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVA0C0_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVA0C0_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVA0C0_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVA0C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVA0C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVA0C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVA0C0_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVA0C0_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVA0C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVA0C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVA0C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVA0C0_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVA0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVA0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVA0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVA0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVA0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVA0C0_SET_SHADER_EXCEPTIONS 0x1528 +#define NVA0C0_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVA0C0_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVA0C0_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVA0C0_SET_RENDER_ENABLE_A 0x1550 +#define NVA0C0_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVA0C0_SET_RENDER_ENABLE_B 0x1554 +#define NVA0C0_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVA0C0_SET_RENDER_ENABLE_C 0x1558 +#define NVA0C0_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVA0C0_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVA0C0_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVA0C0_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVA0C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVA0C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVA0C0_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVA0C0_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVA0C0_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVA0C0_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVA0C0_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVA0C0_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVA0C0_SET_TEX_HEADER_POOL_A 0x1574 +#define NVA0C0_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVA0C0_SET_TEX_HEADER_POOL_B 0x1578 +#define NVA0C0_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVA0C0_SET_TEX_HEADER_POOL_C 0x157c +#define NVA0C0_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVA0C0_SET_PROGRAM_REGION_A 0x1608 +#define NVA0C0_SET_PROGRAM_REGION_A_ADDRESS_UPPER 7:0 + +#define NVA0C0_SET_PROGRAM_REGION_B 0x160c +#define NVA0C0_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVA0C0_SET_SHADER_CONTROL 0x1690 +#define NVA0C0_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVA0C0_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVA0C0_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 + +#define NVA0C0_INVALIDATE_SHADER_CACHES_NO_WFI 0x1698 +#define NVA0C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVA0C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVA0C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVA0C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVA0C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVA0C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVA0C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVA0C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVA0C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVA0C0_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVA0C0_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVA0C0_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVA0C0_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVA0C0_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVA0C0_PIPE_NOP 0x1a2c +#define NVA0C0_PIPE_NOP_V 31:0 + +#define NVA0C0_SET_SPARE00 0x1a30 +#define NVA0C0_SET_SPARE00_V 31:0 + +#define NVA0C0_SET_SPARE01 0x1a34 +#define NVA0C0_SET_SPARE01_V 31:0 + +#define NVA0C0_SET_SPARE02 0x1a38 +#define NVA0C0_SET_SPARE02_V 31:0 + +#define NVA0C0_SET_SPARE03 0x1a3c +#define NVA0C0_SET_SPARE03_V 31:0 + +#define NVA0C0_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVA0C0_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVA0C0_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVA0C0_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVA0C0_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVA0C0_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVA0C0_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVA0C0_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVA0C0_SET_BINDLESS_TEXTURE 0x2608 +#define NVA0C0_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 2:0 + +#define NVA0C0_SET_TRAP_HANDLER 0x260c +#define NVA0C0_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVA0C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVA0C0_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVA0C0_SET_MME_SHADOW_SCRATCH_V 31:0 + +#endif /* _cl_kepler_compute_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cla0c0qmd.h b/src/common/sdk/nvidia/inc/class/cla0c0qmd.h new file mode 100644 index 000000000..b04ce5b63 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla0c0qmd.h @@ -0,0 +1,658 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __CLA0C0QMD_H__ +#define __CLA0C0QMD_H__ + +/* +** Queue Meta Data, Version 00_06 + */ + +// The below C preprocessor definitions describe "multi-word" structures, where +// fields may have bit numbers beyond 32. For example, MW(127:96) means +// the field is in bits 0-31 of word number 3 of the structure. The "MW(X:Y)" +// syntax is to distinguish from similar "X:Y" single-word definitions: the +// macros historically used for single-word definitions would fail with +// multi-word definitions. +// +// See nvmisc.h:DRF_VAL_MW() in the source code of the kernel +// interface layer of nvidia.ko for an example of how to manipulate +// these MW(X:Y) definitions. + +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_A MW(30:0) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_B MW(31:31) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_C MW(62:32) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_D MW(63:63) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_E MW(94:64) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_F MW(95:95) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_G MW(126:96) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_H MW(127:127) +#define NVA0C0_QMDV00_06_QMD_RESERVED_A_A MW(159:128) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_I MW(191:160) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_J MW(196:192) +#define NVA0C0_QMDV00_06_QMD_RESERVED_A MW(199:197) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_K MW(200:200) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_K_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_K_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_L MW(201:201) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_L_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_L_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_SEMAPHORE_RELEASE_ENABLE0 MW(202:202) +#define NVA0C0_QMDV00_06_SEMAPHORE_RELEASE_ENABLE0_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_SEMAPHORE_RELEASE_ENABLE0_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_SEMAPHORE_RELEASE_ENABLE1 MW(203:203) +#define NVA0C0_QMDV00_06_SEMAPHORE_RELEASE_ENABLE1_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_SEMAPHORE_RELEASE_ENABLE1_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_QMD_RESERVED_B MW(207:204) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_M MW(222:208) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_N MW(223:223) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_N_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_N_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_O MW(248:224) +#define NVA0C0_QMDV00_06_QMD_RESERVED_C MW(249:249) +#define NVA0C0_QMDV00_06_INVALIDATE_TEXTURE_HEADER_CACHE MW(250:250) +#define NVA0C0_QMDV00_06_INVALIDATE_TEXTURE_HEADER_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_INVALIDATE_TEXTURE_HEADER_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_INVALIDATE_TEXTURE_SAMPLER_CACHE MW(251:251) +#define NVA0C0_QMDV00_06_INVALIDATE_TEXTURE_SAMPLER_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_INVALIDATE_TEXTURE_SAMPLER_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_INVALIDATE_TEXTURE_DATA_CACHE MW(252:252) +#define NVA0C0_QMDV00_06_INVALIDATE_TEXTURE_DATA_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_INVALIDATE_TEXTURE_DATA_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_INVALIDATE_SHADER_DATA_CACHE MW(253:253) +#define NVA0C0_QMDV00_06_INVALIDATE_SHADER_DATA_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_INVALIDATE_SHADER_DATA_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_INVALIDATE_INSTRUCTION_CACHE MW(254:254) +#define NVA0C0_QMDV00_06_INVALIDATE_INSTRUCTION_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_INVALIDATE_INSTRUCTION_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_INVALIDATE_SHADER_CONSTANT_CACHE MW(255:255) +#define NVA0C0_QMDV00_06_INVALIDATE_SHADER_CONSTANT_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_INVALIDATE_SHADER_CONSTANT_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_PROGRAM_OFFSET MW(287:256) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_P MW(319:288) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_Q MW(327:320) +#define NVA0C0_QMDV00_06_QMD_RESERVED_D MW(335:328) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_R MW(351:336) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_S MW(357:352) +#define NVA0C0_QMDV00_06_QMD_RESERVED_E MW(365:358) +#define NVA0C0_QMDV00_06_RELEASE_MEMBAR_TYPE MW(366:366) +#define NVA0C0_QMDV00_06_RELEASE_MEMBAR_TYPE_FE_NONE 0x00000000 +#define NVA0C0_QMDV00_06_RELEASE_MEMBAR_TYPE_FE_SYSMEMBAR 0x00000001 +#define NVA0C0_QMDV00_06_CWD_MEMBAR_TYPE MW(369:368) +#define NVA0C0_QMDV00_06_CWD_MEMBAR_TYPE_L1_NONE 0x00000000 +#define NVA0C0_QMDV00_06_CWD_MEMBAR_TYPE_L1_SYSMEMBAR 0x00000001 +#define NVA0C0_QMDV00_06_CWD_MEMBAR_TYPE_L1_MEMBAR 0x00000003 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_T MW(370:370) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_T_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_T_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_U MW(371:371) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_U_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_U_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_THROTTLED MW(372:372) +#define NVA0C0_QMDV00_06_THROTTLED_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_THROTTLED_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_QMD_RESERVED_E2_A MW(376:376) +#define NVA0C0_QMDV00_06_QMD_RESERVED_E2_B MW(377:377) +#define NVA0C0_QMDV00_06_API_VISIBLE_CALL_LIMIT MW(378:378) +#define NVA0C0_QMDV00_06_API_VISIBLE_CALL_LIMIT__32 0x00000000 +#define NVA0C0_QMDV00_06_API_VISIBLE_CALL_LIMIT_NO_CHECK 0x00000001 +#define NVA0C0_QMDV00_06_SHARED_MEMORY_BANK_MAPPING MW(379:379) +#define NVA0C0_QMDV00_06_SHARED_MEMORY_BANK_MAPPING_FOUR_BYTES_PER_BANK 0x00000000 +#define NVA0C0_QMDV00_06_SHARED_MEMORY_BANK_MAPPING_EIGHT_BYTES_PER_BANK 0x00000001 +#define NVA0C0_QMDV00_06_SAMPLER_INDEX MW(382:382) +#define NVA0C0_QMDV00_06_SAMPLER_INDEX_INDEPENDENTLY 0x00000000 +#define NVA0C0_QMDV00_06_SAMPLER_INDEX_VIA_HEADER_INDEX 0x00000001 +#define NVA0C0_QMDV00_06_QMD_RESERVED_E3_A MW(383:383) +#define NVA0C0_QMDV00_06_CTA_RASTER_WIDTH MW(415:384) +#define NVA0C0_QMDV00_06_CTA_RASTER_HEIGHT MW(431:416) +#define NVA0C0_QMDV00_06_CTA_RASTER_DEPTH MW(447:432) +#define NVA0C0_QMDV00_06_CTA_RASTER_WIDTH_RESUME MW(479:448) +#define NVA0C0_QMDV00_06_CTA_RASTER_HEIGHT_RESUME MW(495:480) +#define NVA0C0_QMDV00_06_CTA_RASTER_DEPTH_RESUME MW(511:496) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_V MW(535:512) +#define NVA0C0_QMDV00_06_QMD_RESERVED_F MW(542:536) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_W MW(543:543) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_W_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_W_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_SHARED_MEMORY_SIZE MW(561:544) +#define NVA0C0_QMDV00_06_QMD_RESERVED_G MW(575:562) +#define NVA0C0_QMDV00_06_QMD_VERSION MW(579:576) +#define NVA0C0_QMDV00_06_QMD_MAJOR_VERSION MW(583:580) +#define NVA0C0_QMDV00_06_QMD_RESERVED_H MW(591:584) +#define NVA0C0_QMDV00_06_CTA_THREAD_DIMENSION0 MW(607:592) +#define NVA0C0_QMDV00_06_CTA_THREAD_DIMENSION1 MW(623:608) +#define NVA0C0_QMDV00_06_CTA_THREAD_DIMENSION2 MW(639:624) +#define NVA0C0_QMDV00_06_CONSTANT_BUFFER_VALID(i) MW((640+(i)*1):(640+(i)*1)) +#define NVA0C0_QMDV00_06_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_QMD_RESERVED_I MW(668:648) +#define NVA0C0_QMDV00_06_L1_CONFIGURATION MW(671:669) +#define NVA0C0_QMDV00_06_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVA0C0_QMDV00_06_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_32KB 0x00000002 +#define NVA0C0_QMDV00_06_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_X MW(703:672) +#define NVA0C0_QMDV00_06_QMD_RESERVED_V1_Y MW(735:704) +#define NVA0C0_QMDV00_06_RELEASE0_ADDRESS_LOWER MW(767:736) +#define NVA0C0_QMDV00_06_RELEASE0_ADDRESS_UPPER MW(775:768) +#define NVA0C0_QMDV00_06_QMD_RESERVED_J MW(783:776) +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_OP MW(790:788) +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_OP_RED_INC 0x00000003 +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_OP_RED_AND 0x00000005 +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_OP_RED_OR 0x00000006 +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA0C0_QMDV00_06_QMD_RESERVED_K MW(791:791) +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_FORMAT MW(793:792) +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_ENABLE MW(794:794) +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_RELEASE0_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_RELEASE0_STRUCTURE_SIZE MW(799:799) +#define NVA0C0_QMDV00_06_RELEASE0_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVA0C0_QMDV00_06_RELEASE0_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVA0C0_QMDV00_06_RELEASE0_PAYLOAD MW(831:800) +#define NVA0C0_QMDV00_06_RELEASE1_ADDRESS_LOWER MW(863:832) +#define NVA0C0_QMDV00_06_RELEASE1_ADDRESS_UPPER MW(871:864) +#define NVA0C0_QMDV00_06_QMD_RESERVED_L MW(879:872) +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_OP MW(886:884) +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_OP_RED_INC 0x00000003 +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_OP_RED_AND 0x00000005 +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_OP_RED_OR 0x00000006 +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA0C0_QMDV00_06_QMD_RESERVED_M MW(887:887) +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_FORMAT MW(889:888) +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_ENABLE MW(890:890) +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_RELEASE1_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_RELEASE1_STRUCTURE_SIZE MW(895:895) +#define NVA0C0_QMDV00_06_RELEASE1_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVA0C0_QMDV00_06_RELEASE1_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVA0C0_QMDV00_06_RELEASE1_PAYLOAD MW(927:896) +#define NVA0C0_QMDV00_06_CONSTANT_BUFFER_ADDR_LOWER(i) MW((959+(i)*64):(928+(i)*64)) +#define NVA0C0_QMDV00_06_CONSTANT_BUFFER_ADDR_UPPER(i) MW((967+(i)*64):(960+(i)*64)) +#define NVA0C0_QMDV00_06_CONSTANT_BUFFER_RESERVED_ADDR(i) MW((973+(i)*64):(968+(i)*64)) +#define NVA0C0_QMDV00_06_CONSTANT_BUFFER_INVALIDATE(i) MW((974+(i)*64):(974+(i)*64)) +#define NVA0C0_QMDV00_06_CONSTANT_BUFFER_INVALIDATE_FALSE 0x00000000 +#define NVA0C0_QMDV00_06_CONSTANT_BUFFER_INVALIDATE_TRUE 0x00000001 +#define NVA0C0_QMDV00_06_CONSTANT_BUFFER_SIZE(i) MW((991+(i)*64):(975+(i)*64)) +#define NVA0C0_QMDV00_06_SHADER_LOCAL_MEMORY_LOW_SIZE MW(1463:1440) +#define NVA0C0_QMDV00_06_QMD_RESERVED_N MW(1466:1464) +#define NVA0C0_QMDV00_06_BARRIER_COUNT MW(1471:1467) +#define NVA0C0_QMDV00_06_SHADER_LOCAL_MEMORY_HIGH_SIZE MW(1495:1472) +#define NVA0C0_QMDV00_06_REGISTER_COUNT MW(1503:1496) +#define NVA0C0_QMDV00_06_SHADER_LOCAL_MEMORY_CRS_SIZE MW(1527:1504) +#define NVA0C0_QMDV00_06_SASS_VERSION MW(1535:1528) +#define NVA0C0_QMDV00_06_QMD_SPARE_A MW(1567:1536) +#define NVA0C0_QMDV00_06_QMD_SPARE_B MW(1599:1568) +#define NVA0C0_QMDV00_06_QMD_SPARE_C MW(1631:1600) +#define NVA0C0_QMDV00_06_QMD_SPARE_D MW(1663:1632) +#define NVA0C0_QMDV00_06_QMD_SPARE_E MW(1695:1664) +#define NVA0C0_QMDV00_06_QMD_SPARE_F MW(1727:1696) +#define NVA0C0_QMDV00_06_QMD_SPARE_G MW(1759:1728) +#define NVA0C0_QMDV00_06_QMD_SPARE_H MW(1791:1760) +#define NVA0C0_QMDV00_06_QMD_SPARE_I MW(1823:1792) +#define NVA0C0_QMDV00_06_QMD_SPARE_J MW(1855:1824) +#define NVA0C0_QMDV00_06_QMD_SPARE_K MW(1887:1856) +#define NVA0C0_QMDV00_06_QMD_SPARE_L MW(1919:1888) +#define NVA0C0_QMDV00_06_QMD_SPARE_M MW(1951:1920) +#define NVA0C0_QMDV00_06_QMD_SPARE_N MW(1983:1952) +#define NVA0C0_QMDV00_06_DEBUG_ID_UPPER MW(2015:1984) +#define NVA0C0_QMDV00_06_DEBUG_ID_LOWER MW(2047:2016) + + +/* +** Queue Meta Data, Version 01_06 + */ + +#define NVA0C0_QMDV01_06_OUTER_PUT MW(30:0) +#define NVA0C0_QMDV01_06_OUTER_OVERFLOW MW(31:31) +#define NVA0C0_QMDV01_06_OUTER_GET MW(62:32) +#define NVA0C0_QMDV01_06_OUTER_STICKY_OVERFLOW MW(63:63) +#define NVA0C0_QMDV01_06_INNER_GET MW(94:64) +#define NVA0C0_QMDV01_06_INNER_OVERFLOW MW(95:95) +#define NVA0C0_QMDV01_06_INNER_PUT MW(126:96) +#define NVA0C0_QMDV01_06_INNER_STICKY_OVERFLOW MW(127:127) +#define NVA0C0_QMDV01_06_QMD_RESERVED_A_A MW(159:128) +#define NVA0C0_QMDV01_06_SCHEDULER_NEXT_QMD_POINTER MW(191:160) +#define NVA0C0_QMDV01_06_QMD_GROUP_ID MW(197:192) +#define NVA0C0_QMDV01_06_QMD_RESERVED_A MW(199:198) +#define NVA0C0_QMDV01_06_SCHEDULE_ON_PUT_UPDATE_ENABLE MW(200:200) +#define NVA0C0_QMDV01_06_SCHEDULE_ON_PUT_UPDATE_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_SCHEDULE_ON_PUT_UPDATE_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_ADD_TO_HEAD_OF_QMD_GROUP_LINKED_LIST MW(201:201) +#define NVA0C0_QMDV01_06_ADD_TO_HEAD_OF_QMD_GROUP_LINKED_LIST_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_ADD_TO_HEAD_OF_QMD_GROUP_LINKED_LIST_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_SEMAPHORE_RELEASE_ENABLE0 MW(202:202) +#define NVA0C0_QMDV01_06_SEMAPHORE_RELEASE_ENABLE0_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_SEMAPHORE_RELEASE_ENABLE0_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_SEMAPHORE_RELEASE_ENABLE1 MW(203:203) +#define NVA0C0_QMDV01_06_SEMAPHORE_RELEASE_ENABLE1_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_SEMAPHORE_RELEASE_ENABLE1_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_REQUIRE_SCHEDULING_PCAS MW(204:204) +#define NVA0C0_QMDV01_06_REQUIRE_SCHEDULING_PCAS_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_REQUIRE_SCHEDULING_PCAS_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_QMD_RESERVED_B MW(207:205) +#define NVA0C0_QMDV01_06_SKED_PRIVATE_LIST_ADDR MW(222:208) +#define NVA0C0_QMDV01_06_SKED_PRIVATE_LIST_VALID MW(223:223) +#define NVA0C0_QMDV01_06_SKED_PRIVATE_LIST_VALID_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_SKED_PRIVATE_LIST_VALID_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_CIRCULAR_QUEUE_SIZE MW(248:224) +#define NVA0C0_QMDV01_06_QMD_RESERVED_C MW(249:249) +#define NVA0C0_QMDV01_06_INVALIDATE_TEXTURE_HEADER_CACHE MW(250:250) +#define NVA0C0_QMDV01_06_INVALIDATE_TEXTURE_HEADER_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_INVALIDATE_TEXTURE_HEADER_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_INVALIDATE_TEXTURE_SAMPLER_CACHE MW(251:251) +#define NVA0C0_QMDV01_06_INVALIDATE_TEXTURE_SAMPLER_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_INVALIDATE_TEXTURE_SAMPLER_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_INVALIDATE_TEXTURE_DATA_CACHE MW(252:252) +#define NVA0C0_QMDV01_06_INVALIDATE_TEXTURE_DATA_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_INVALIDATE_TEXTURE_DATA_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_INVALIDATE_SHADER_DATA_CACHE MW(253:253) +#define NVA0C0_QMDV01_06_INVALIDATE_SHADER_DATA_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_INVALIDATE_SHADER_DATA_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_INVALIDATE_INSTRUCTION_CACHE MW(254:254) +#define NVA0C0_QMDV01_06_INVALIDATE_INSTRUCTION_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_INVALIDATE_INSTRUCTION_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_INVALIDATE_SHADER_CONSTANT_CACHE MW(255:255) +#define NVA0C0_QMDV01_06_INVALIDATE_SHADER_CONSTANT_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_INVALIDATE_SHADER_CONSTANT_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_PROGRAM_OFFSET MW(287:256) +#define NVA0C0_QMDV01_06_CIRCULAR_QUEUE_ADDR_LOWER MW(319:288) +#define NVA0C0_QMDV01_06_CIRCULAR_QUEUE_ADDR_UPPER MW(327:320) +#define NVA0C0_QMDV01_06_QMD_RESERVED_D MW(335:328) +#define NVA0C0_QMDV01_06_CIRCULAR_QUEUE_ENTRY_SIZE MW(351:336) +#define NVA0C0_QMDV01_06_CWD_REFERENCE_COUNT_ID MW(357:352) +#define NVA0C0_QMDV01_06_CWD_REFERENCE_COUNT_DELTA_MINUS_ONE MW(365:358) +#define NVA0C0_QMDV01_06_RELEASE_MEMBAR_TYPE MW(366:366) +#define NVA0C0_QMDV01_06_RELEASE_MEMBAR_TYPE_FE_NONE 0x00000000 +#define NVA0C0_QMDV01_06_RELEASE_MEMBAR_TYPE_FE_SYSMEMBAR 0x00000001 +#define NVA0C0_QMDV01_06_CWD_REFERENCE_COUNT_INCR_ENABLE MW(367:367) +#define NVA0C0_QMDV01_06_CWD_REFERENCE_COUNT_INCR_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_CWD_REFERENCE_COUNT_INCR_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_CWD_MEMBAR_TYPE MW(369:368) +#define NVA0C0_QMDV01_06_CWD_MEMBAR_TYPE_L1_NONE 0x00000000 +#define NVA0C0_QMDV01_06_CWD_MEMBAR_TYPE_L1_SYSMEMBAR 0x00000001 +#define NVA0C0_QMDV01_06_CWD_MEMBAR_TYPE_L1_MEMBAR 0x00000003 +#define NVA0C0_QMDV01_06_SEQUENTIALLY_RUN_CTAS MW(370:370) +#define NVA0C0_QMDV01_06_SEQUENTIALLY_RUN_CTAS_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_SEQUENTIALLY_RUN_CTAS_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_CWD_REFERENCE_COUNT_DECR_ENABLE MW(371:371) +#define NVA0C0_QMDV01_06_CWD_REFERENCE_COUNT_DECR_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_CWD_REFERENCE_COUNT_DECR_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_THROTTLED MW(372:372) +#define NVA0C0_QMDV01_06_THROTTLED_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_THROTTLED_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_FP32_NAN_BEHAVIOR MW(376:376) +#define NVA0C0_QMDV01_06_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVA0C0_QMDV01_06_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVA0C0_QMDV01_06_FP32_F2I_NAN_BEHAVIOR MW(377:377) +#define NVA0C0_QMDV01_06_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVA0C0_QMDV01_06_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 +#define NVA0C0_QMDV01_06_API_VISIBLE_CALL_LIMIT MW(378:378) +#define NVA0C0_QMDV01_06_API_VISIBLE_CALL_LIMIT__32 0x00000000 +#define NVA0C0_QMDV01_06_API_VISIBLE_CALL_LIMIT_NO_CHECK 0x00000001 +#define NVA0C0_QMDV01_06_SHARED_MEMORY_BANK_MAPPING MW(379:379) +#define NVA0C0_QMDV01_06_SHARED_MEMORY_BANK_MAPPING_FOUR_BYTES_PER_BANK 0x00000000 +#define NVA0C0_QMDV01_06_SHARED_MEMORY_BANK_MAPPING_EIGHT_BYTES_PER_BANK 0x00000001 +#define NVA0C0_QMDV01_06_SAMPLER_INDEX MW(382:382) +#define NVA0C0_QMDV01_06_SAMPLER_INDEX_INDEPENDENTLY 0x00000000 +#define NVA0C0_QMDV01_06_SAMPLER_INDEX_VIA_HEADER_INDEX 0x00000001 +#define NVA0C0_QMDV01_06_FP32_NARROW_INSTRUCTION MW(383:383) +#define NVA0C0_QMDV01_06_FP32_NARROW_INSTRUCTION_KEEP_DENORMS 0x00000000 +#define NVA0C0_QMDV01_06_FP32_NARROW_INSTRUCTION_FLUSH_DENORMS 0x00000001 +#define NVA0C0_QMDV01_06_CTA_RASTER_WIDTH MW(415:384) +#define NVA0C0_QMDV01_06_CTA_RASTER_HEIGHT MW(431:416) +#define NVA0C0_QMDV01_06_CTA_RASTER_DEPTH MW(447:432) +#define NVA0C0_QMDV01_06_CTA_RASTER_WIDTH_RESUME MW(479:448) +#define NVA0C0_QMDV01_06_CTA_RASTER_HEIGHT_RESUME MW(495:480) +#define NVA0C0_QMDV01_06_CTA_RASTER_DEPTH_RESUME MW(511:496) +#define NVA0C0_QMDV01_06_LAUNCH_QUOTA MW(535:512) +#define NVA0C0_QMDV01_06_QMD_RESERVED_F MW(542:536) +#define NVA0C0_QMDV01_06_LAUNCH_QUOTA_ENABLE MW(543:543) +#define NVA0C0_QMDV01_06_LAUNCH_QUOTA_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_LAUNCH_QUOTA_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_SHARED_MEMORY_SIZE MW(561:544) +#define NVA0C0_QMDV01_06_QMD_RESERVED_G MW(575:562) +#define NVA0C0_QMDV01_06_QMD_VERSION MW(579:576) +#define NVA0C0_QMDV01_06_QMD_MAJOR_VERSION MW(583:580) +#define NVA0C0_QMDV01_06_QMD_RESERVED_H MW(591:584) +#define NVA0C0_QMDV01_06_CTA_THREAD_DIMENSION0 MW(607:592) +#define NVA0C0_QMDV01_06_CTA_THREAD_DIMENSION1 MW(623:608) +#define NVA0C0_QMDV01_06_CTA_THREAD_DIMENSION2 MW(639:624) +#define NVA0C0_QMDV01_06_CONSTANT_BUFFER_VALID(i) MW((640+(i)*1):(640+(i)*1)) +#define NVA0C0_QMDV01_06_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_QMD_RESERVED_I MW(668:648) +#define NVA0C0_QMDV01_06_L1_CONFIGURATION MW(671:669) +#define NVA0C0_QMDV01_06_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVA0C0_QMDV01_06_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_32KB 0x00000002 +#define NVA0C0_QMDV01_06_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 +#define NVA0C0_QMDV01_06_SM_DISABLE_MASK_LOWER MW(703:672) +#define NVA0C0_QMDV01_06_SM_DISABLE_MASK_UPPER MW(735:704) +#define NVA0C0_QMDV01_06_RELEASE0_ADDRESS_LOWER MW(767:736) +#define NVA0C0_QMDV01_06_RELEASE0_ADDRESS_UPPER MW(775:768) +#define NVA0C0_QMDV01_06_QMD_RESERVED_J MW(783:776) +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_OP MW(790:788) +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_OP_RED_INC 0x00000003 +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_OP_RED_AND 0x00000005 +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_OP_RED_OR 0x00000006 +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA0C0_QMDV01_06_QMD_RESERVED_K MW(791:791) +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_FORMAT MW(793:792) +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_ENABLE MW(794:794) +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_RELEASE0_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_RELEASE0_STRUCTURE_SIZE MW(799:799) +#define NVA0C0_QMDV01_06_RELEASE0_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVA0C0_QMDV01_06_RELEASE0_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVA0C0_QMDV01_06_RELEASE0_PAYLOAD MW(831:800) +#define NVA0C0_QMDV01_06_RELEASE1_ADDRESS_LOWER MW(863:832) +#define NVA0C0_QMDV01_06_RELEASE1_ADDRESS_UPPER MW(871:864) +#define NVA0C0_QMDV01_06_QMD_RESERVED_L MW(879:872) +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_OP MW(886:884) +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_OP_RED_INC 0x00000003 +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_OP_RED_AND 0x00000005 +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_OP_RED_OR 0x00000006 +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA0C0_QMDV01_06_QMD_RESERVED_M MW(887:887) +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_FORMAT MW(889:888) +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_ENABLE MW(890:890) +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_RELEASE1_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_RELEASE1_STRUCTURE_SIZE MW(895:895) +#define NVA0C0_QMDV01_06_RELEASE1_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVA0C0_QMDV01_06_RELEASE1_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVA0C0_QMDV01_06_RELEASE1_PAYLOAD MW(927:896) +#define NVA0C0_QMDV01_06_CONSTANT_BUFFER_ADDR_LOWER(i) MW((959+(i)*64):(928+(i)*64)) +#define NVA0C0_QMDV01_06_CONSTANT_BUFFER_ADDR_UPPER(i) MW((967+(i)*64):(960+(i)*64)) +#define NVA0C0_QMDV01_06_CONSTANT_BUFFER_RESERVED_ADDR(i) MW((973+(i)*64):(968+(i)*64)) +#define NVA0C0_QMDV01_06_CONSTANT_BUFFER_INVALIDATE(i) MW((974+(i)*64):(974+(i)*64)) +#define NVA0C0_QMDV01_06_CONSTANT_BUFFER_INVALIDATE_FALSE 0x00000000 +#define NVA0C0_QMDV01_06_CONSTANT_BUFFER_INVALIDATE_TRUE 0x00000001 +#define NVA0C0_QMDV01_06_CONSTANT_BUFFER_SIZE(i) MW((991+(i)*64):(975+(i)*64)) +#define NVA0C0_QMDV01_06_SHADER_LOCAL_MEMORY_LOW_SIZE MW(1463:1440) +#define NVA0C0_QMDV01_06_QMD_RESERVED_N MW(1466:1464) +#define NVA0C0_QMDV01_06_BARRIER_COUNT MW(1471:1467) +#define NVA0C0_QMDV01_06_SHADER_LOCAL_MEMORY_HIGH_SIZE MW(1495:1472) +#define NVA0C0_QMDV01_06_REGISTER_COUNT MW(1503:1496) +#define NVA0C0_QMDV01_06_SHADER_LOCAL_MEMORY_CRS_SIZE MW(1527:1504) +#define NVA0C0_QMDV01_06_SASS_VERSION MW(1535:1528) +#define NVA0C0_QMDV01_06_HW_ONLY_INNER_GET MW(1566:1536) +#define NVA0C0_QMDV01_06_HW_ONLY_REQUIRE_SCHEDULING_PCAS MW(1567:1567) +#define NVA0C0_QMDV01_06_HW_ONLY_INNER_PUT MW(1598:1568) +#define NVA0C0_QMDV01_06_HW_ONLY_SCHEDULE_ON_PUT_UPDATE_ENABLE MW(1599:1599) +#define NVA0C0_QMDV01_06_QUEUE_ENTRIES_PER_CTA_MINUS_ONE MW(1606:1600) +#define NVA0C0_QMDV01_06_QMD_RESERVED_Q MW(1609:1607) +#define NVA0C0_QMDV01_06_COALESCE_WAITING_PERIOD MW(1617:1610) +#define NVA0C0_QMDV01_06_QMD_RESERVED_R MW(1631:1618) +#define NVA0C0_QMDV01_06_QMD_SPARE_D MW(1663:1632) +#define NVA0C0_QMDV01_06_QMD_SPARE_E MW(1695:1664) +#define NVA0C0_QMDV01_06_QMD_SPARE_F MW(1727:1696) +#define NVA0C0_QMDV01_06_QMD_SPARE_G MW(1759:1728) +#define NVA0C0_QMDV01_06_QMD_SPARE_H MW(1791:1760) +#define NVA0C0_QMDV01_06_QMD_SPARE_I MW(1823:1792) +#define NVA0C0_QMDV01_06_QMD_SPARE_J MW(1855:1824) +#define NVA0C0_QMDV01_06_QMD_SPARE_K MW(1887:1856) +#define NVA0C0_QMDV01_06_QMD_SPARE_L MW(1919:1888) +#define NVA0C0_QMDV01_06_QMD_SPARE_M MW(1951:1920) +#define NVA0C0_QMDV01_06_QMD_SPARE_N MW(1983:1952) +#define NVA0C0_QMDV01_06_DEBUG_ID_UPPER MW(2015:1984) +#define NVA0C0_QMDV01_06_DEBUG_ID_LOWER MW(2047:2016) + + +/* +** Queue Meta Data, Version 01_07 + */ + +#define NVA0C0_QMDV01_07_OUTER_PUT MW(30:0) +#define NVA0C0_QMDV01_07_OUTER_OVERFLOW MW(31:31) +#define NVA0C0_QMDV01_07_OUTER_GET MW(62:32) +#define NVA0C0_QMDV01_07_OUTER_STICKY_OVERFLOW MW(63:63) +#define NVA0C0_QMDV01_07_INNER_GET MW(94:64) +#define NVA0C0_QMDV01_07_INNER_OVERFLOW MW(95:95) +#define NVA0C0_QMDV01_07_INNER_PUT MW(126:96) +#define NVA0C0_QMDV01_07_INNER_STICKY_OVERFLOW MW(127:127) +#define NVA0C0_QMDV01_07_QMD_RESERVED_A_A MW(159:128) +#define NVA0C0_QMDV01_07_DEPENDENT_QMD_POINTER MW(191:160) +#define NVA0C0_QMDV01_07_QMD_GROUP_ID MW(197:192) +#define NVA0C0_QMDV01_07_QMD_RESERVED_A MW(200:198) +#define NVA0C0_QMDV01_07_ADD_TO_HEAD_OF_QMD_GROUP_LINKED_LIST MW(201:201) +#define NVA0C0_QMDV01_07_ADD_TO_HEAD_OF_QMD_GROUP_LINKED_LIST_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_ADD_TO_HEAD_OF_QMD_GROUP_LINKED_LIST_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_SEMAPHORE_RELEASE_ENABLE0 MW(202:202) +#define NVA0C0_QMDV01_07_SEMAPHORE_RELEASE_ENABLE0_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_SEMAPHORE_RELEASE_ENABLE0_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_SEMAPHORE_RELEASE_ENABLE1 MW(203:203) +#define NVA0C0_QMDV01_07_SEMAPHORE_RELEASE_ENABLE1_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_SEMAPHORE_RELEASE_ENABLE1_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_REQUIRE_SCHEDULING_PCAS MW(204:204) +#define NVA0C0_QMDV01_07_REQUIRE_SCHEDULING_PCAS_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_REQUIRE_SCHEDULING_PCAS_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_DEPENDENT_QMD_SCHEDULE_ENABLE MW(205:205) +#define NVA0C0_QMDV01_07_DEPENDENT_QMD_SCHEDULE_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_DEPENDENT_QMD_SCHEDULE_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_DEPENDENT_QMD_TYPE MW(206:206) +#define NVA0C0_QMDV01_07_DEPENDENT_QMD_TYPE_QUEUE 0x00000000 +#define NVA0C0_QMDV01_07_DEPENDENT_QMD_TYPE_GRID 0x00000001 +#define NVA0C0_QMDV01_07_DEPENDENT_QMD_FIELD_COPY MW(207:207) +#define NVA0C0_QMDV01_07_DEPENDENT_QMD_FIELD_COPY_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_DEPENDENT_QMD_FIELD_COPY_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_QMD_RESERVED_B MW(223:208) +#define NVA0C0_QMDV01_07_CIRCULAR_QUEUE_SIZE MW(248:224) +#define NVA0C0_QMDV01_07_QMD_RESERVED_C MW(249:249) +#define NVA0C0_QMDV01_07_INVALIDATE_TEXTURE_HEADER_CACHE MW(250:250) +#define NVA0C0_QMDV01_07_INVALIDATE_TEXTURE_HEADER_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_INVALIDATE_TEXTURE_HEADER_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_INVALIDATE_TEXTURE_SAMPLER_CACHE MW(251:251) +#define NVA0C0_QMDV01_07_INVALIDATE_TEXTURE_SAMPLER_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_INVALIDATE_TEXTURE_SAMPLER_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_INVALIDATE_TEXTURE_DATA_CACHE MW(252:252) +#define NVA0C0_QMDV01_07_INVALIDATE_TEXTURE_DATA_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_INVALIDATE_TEXTURE_DATA_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_INVALIDATE_SHADER_DATA_CACHE MW(253:253) +#define NVA0C0_QMDV01_07_INVALIDATE_SHADER_DATA_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_INVALIDATE_SHADER_DATA_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_INVALIDATE_INSTRUCTION_CACHE MW(254:254) +#define NVA0C0_QMDV01_07_INVALIDATE_INSTRUCTION_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_INVALIDATE_INSTRUCTION_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_INVALIDATE_SHADER_CONSTANT_CACHE MW(255:255) +#define NVA0C0_QMDV01_07_INVALIDATE_SHADER_CONSTANT_CACHE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_INVALIDATE_SHADER_CONSTANT_CACHE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_PROGRAM_OFFSET MW(287:256) +#define NVA0C0_QMDV01_07_CIRCULAR_QUEUE_ADDR_LOWER MW(319:288) +#define NVA0C0_QMDV01_07_CIRCULAR_QUEUE_ADDR_UPPER MW(327:320) +#define NVA0C0_QMDV01_07_QMD_RESERVED_D MW(335:328) +#define NVA0C0_QMDV01_07_CIRCULAR_QUEUE_ENTRY_SIZE MW(351:336) +#define NVA0C0_QMDV01_07_CWD_REFERENCE_COUNT_ID MW(357:352) +#define NVA0C0_QMDV01_07_CWD_REFERENCE_COUNT_DELTA_MINUS_ONE MW(365:358) +#define NVA0C0_QMDV01_07_RELEASE_MEMBAR_TYPE MW(366:366) +#define NVA0C0_QMDV01_07_RELEASE_MEMBAR_TYPE_FE_NONE 0x00000000 +#define NVA0C0_QMDV01_07_RELEASE_MEMBAR_TYPE_FE_SYSMEMBAR 0x00000001 +#define NVA0C0_QMDV01_07_CWD_REFERENCE_COUNT_INCR_ENABLE MW(367:367) +#define NVA0C0_QMDV01_07_CWD_REFERENCE_COUNT_INCR_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_CWD_REFERENCE_COUNT_INCR_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_CWD_MEMBAR_TYPE MW(369:368) +#define NVA0C0_QMDV01_07_CWD_MEMBAR_TYPE_L1_NONE 0x00000000 +#define NVA0C0_QMDV01_07_CWD_MEMBAR_TYPE_L1_SYSMEMBAR 0x00000001 +#define NVA0C0_QMDV01_07_CWD_MEMBAR_TYPE_L1_MEMBAR 0x00000003 +#define NVA0C0_QMDV01_07_SEQUENTIALLY_RUN_CTAS MW(370:370) +#define NVA0C0_QMDV01_07_SEQUENTIALLY_RUN_CTAS_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_SEQUENTIALLY_RUN_CTAS_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_CWD_REFERENCE_COUNT_DECR_ENABLE MW(371:371) +#define NVA0C0_QMDV01_07_CWD_REFERENCE_COUNT_DECR_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_CWD_REFERENCE_COUNT_DECR_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_THROTTLED MW(372:372) +#define NVA0C0_QMDV01_07_THROTTLED_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_THROTTLED_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_FP32_NAN_BEHAVIOR MW(376:376) +#define NVA0C0_QMDV01_07_FP32_NAN_BEHAVIOR_LEGACY 0x00000000 +#define NVA0C0_QMDV01_07_FP32_NAN_BEHAVIOR_FP64_COMPATIBLE 0x00000001 +#define NVA0C0_QMDV01_07_FP32_F2I_NAN_BEHAVIOR MW(377:377) +#define NVA0C0_QMDV01_07_FP32_F2I_NAN_BEHAVIOR_PASS_ZERO 0x00000000 +#define NVA0C0_QMDV01_07_FP32_F2I_NAN_BEHAVIOR_PASS_INDEFINITE 0x00000001 +#define NVA0C0_QMDV01_07_API_VISIBLE_CALL_LIMIT MW(378:378) +#define NVA0C0_QMDV01_07_API_VISIBLE_CALL_LIMIT__32 0x00000000 +#define NVA0C0_QMDV01_07_API_VISIBLE_CALL_LIMIT_NO_CHECK 0x00000001 +#define NVA0C0_QMDV01_07_SHARED_MEMORY_BANK_MAPPING MW(379:379) +#define NVA0C0_QMDV01_07_SHARED_MEMORY_BANK_MAPPING_FOUR_BYTES_PER_BANK 0x00000000 +#define NVA0C0_QMDV01_07_SHARED_MEMORY_BANK_MAPPING_EIGHT_BYTES_PER_BANK 0x00000001 +#define NVA0C0_QMDV01_07_SAMPLER_INDEX MW(382:382) +#define NVA0C0_QMDV01_07_SAMPLER_INDEX_INDEPENDENTLY 0x00000000 +#define NVA0C0_QMDV01_07_SAMPLER_INDEX_VIA_HEADER_INDEX 0x00000001 +#define NVA0C0_QMDV01_07_FP32_NARROW_INSTRUCTION MW(383:383) +#define NVA0C0_QMDV01_07_FP32_NARROW_INSTRUCTION_KEEP_DENORMS 0x00000000 +#define NVA0C0_QMDV01_07_FP32_NARROW_INSTRUCTION_FLUSH_DENORMS 0x00000001 +#define NVA0C0_QMDV01_07_CTA_RASTER_WIDTH MW(415:384) +#define NVA0C0_QMDV01_07_CTA_RASTER_HEIGHT MW(431:416) +#define NVA0C0_QMDV01_07_CTA_RASTER_DEPTH MW(447:432) +#define NVA0C0_QMDV01_07_CTA_RASTER_WIDTH_RESUME MW(479:448) +#define NVA0C0_QMDV01_07_CTA_RASTER_HEIGHT_RESUME MW(495:480) +#define NVA0C0_QMDV01_07_CTA_RASTER_DEPTH_RESUME MW(511:496) +#define NVA0C0_QMDV01_07_QUEUE_ENTRIES_PER_CTA_MINUS_ONE MW(518:512) +#define NVA0C0_QMDV01_07_COALESCE_WAITING_PERIOD MW(529:522) +#define NVA0C0_QMDV01_07_SHARED_MEMORY_SIZE MW(561:544) +#define NVA0C0_QMDV01_07_QMD_RESERVED_G MW(575:562) +#define NVA0C0_QMDV01_07_QMD_VERSION MW(579:576) +#define NVA0C0_QMDV01_07_QMD_MAJOR_VERSION MW(583:580) +#define NVA0C0_QMDV01_07_QMD_RESERVED_H MW(591:584) +#define NVA0C0_QMDV01_07_CTA_THREAD_DIMENSION0 MW(607:592) +#define NVA0C0_QMDV01_07_CTA_THREAD_DIMENSION1 MW(623:608) +#define NVA0C0_QMDV01_07_CTA_THREAD_DIMENSION2 MW(639:624) +#define NVA0C0_QMDV01_07_CONSTANT_BUFFER_VALID(i) MW((640+(i)*1):(640+(i)*1)) +#define NVA0C0_QMDV01_07_CONSTANT_BUFFER_VALID_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_CONSTANT_BUFFER_VALID_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_QMD_RESERVED_I MW(668:648) +#define NVA0C0_QMDV01_07_L1_CONFIGURATION MW(671:669) +#define NVA0C0_QMDV01_07_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_16KB 0x00000001 +#define NVA0C0_QMDV01_07_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_32KB 0x00000002 +#define NVA0C0_QMDV01_07_L1_CONFIGURATION_DIRECTLY_ADDRESSABLE_MEMORY_SIZE_48KB 0x00000003 +#define NVA0C0_QMDV01_07_SM_DISABLE_MASK_LOWER MW(703:672) +#define NVA0C0_QMDV01_07_SM_DISABLE_MASK_UPPER MW(735:704) +#define NVA0C0_QMDV01_07_RELEASE0_ADDRESS_LOWER MW(767:736) +#define NVA0C0_QMDV01_07_RELEASE0_ADDRESS_UPPER MW(775:768) +#define NVA0C0_QMDV01_07_QMD_RESERVED_J MW(783:776) +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_OP MW(790:788) +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_OP_RED_INC 0x00000003 +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_OP_RED_AND 0x00000005 +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_OP_RED_OR 0x00000006 +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA0C0_QMDV01_07_QMD_RESERVED_K MW(791:791) +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_FORMAT MW(793:792) +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_ENABLE MW(794:794) +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_RELEASE0_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_RELEASE0_STRUCTURE_SIZE MW(799:799) +#define NVA0C0_QMDV01_07_RELEASE0_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVA0C0_QMDV01_07_RELEASE0_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVA0C0_QMDV01_07_RELEASE0_PAYLOAD MW(831:800) +#define NVA0C0_QMDV01_07_RELEASE1_ADDRESS_LOWER MW(863:832) +#define NVA0C0_QMDV01_07_RELEASE1_ADDRESS_UPPER MW(871:864) +#define NVA0C0_QMDV01_07_QMD_RESERVED_L MW(879:872) +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_OP MW(886:884) +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_OP_RED_INC 0x00000003 +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_OP_RED_AND 0x00000005 +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_OP_RED_OR 0x00000006 +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA0C0_QMDV01_07_QMD_RESERVED_M MW(887:887) +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_FORMAT MW(889:888) +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_ENABLE MW(890:890) +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_RELEASE1_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_RELEASE1_STRUCTURE_SIZE MW(895:895) +#define NVA0C0_QMDV01_07_RELEASE1_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVA0C0_QMDV01_07_RELEASE1_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVA0C0_QMDV01_07_RELEASE1_PAYLOAD MW(927:896) +#define NVA0C0_QMDV01_07_CONSTANT_BUFFER_ADDR_LOWER(i) MW((959+(i)*64):(928+(i)*64)) +#define NVA0C0_QMDV01_07_CONSTANT_BUFFER_ADDR_UPPER(i) MW((967+(i)*64):(960+(i)*64)) +#define NVA0C0_QMDV01_07_CONSTANT_BUFFER_RESERVED_ADDR(i) MW((973+(i)*64):(968+(i)*64)) +#define NVA0C0_QMDV01_07_CONSTANT_BUFFER_INVALIDATE(i) MW((974+(i)*64):(974+(i)*64)) +#define NVA0C0_QMDV01_07_CONSTANT_BUFFER_INVALIDATE_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_CONSTANT_BUFFER_INVALIDATE_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_CONSTANT_BUFFER_SIZE(i) MW((991+(i)*64):(975+(i)*64)) +#define NVA0C0_QMDV01_07_SHADER_LOCAL_MEMORY_LOW_SIZE MW(1463:1440) +#define NVA0C0_QMDV01_07_QMD_RESERVED_N MW(1466:1464) +#define NVA0C0_QMDV01_07_BARRIER_COUNT MW(1471:1467) +#define NVA0C0_QMDV01_07_SHADER_LOCAL_MEMORY_HIGH_SIZE MW(1495:1472) +#define NVA0C0_QMDV01_07_REGISTER_COUNT MW(1503:1496) +#define NVA0C0_QMDV01_07_SHADER_LOCAL_MEMORY_CRS_SIZE MW(1527:1504) +#define NVA0C0_QMDV01_07_SASS_VERSION MW(1535:1528) +#define NVA0C0_QMDV01_07_HW_ONLY_INNER_GET MW(1566:1536) +#define NVA0C0_QMDV01_07_HW_ONLY_REQUIRE_SCHEDULING_PCAS MW(1567:1567) +#define NVA0C0_QMDV01_07_HW_ONLY_INNER_PUT MW(1598:1568) +#define NVA0C0_QMDV01_07_QMD_RESERVED_P MW(1599:1599) +#define NVA0C0_QMDV01_07_HW_ONLY_SPAN_LIST_HEAD_INDEX MW(1629:1600) +#define NVA0C0_QMDV01_07_QMD_RESERVED_Q MW(1630:1630) +#define NVA0C0_QMDV01_07_HW_ONLY_SPAN_LIST_HEAD_INDEX_VALID MW(1631:1631) +#define NVA0C0_QMDV01_07_HW_ONLY_SPAN_LIST_HEAD_INDEX_VALID_FALSE 0x00000000 +#define NVA0C0_QMDV01_07_HW_ONLY_SPAN_LIST_HEAD_INDEX_VALID_TRUE 0x00000001 +#define NVA0C0_QMDV01_07_HW_ONLY_SKED_NEXT_QMD_POINTER MW(1663:1632) +#define NVA0C0_QMDV01_07_QMD_SPARE_E MW(1695:1664) +#define NVA0C0_QMDV01_07_QMD_SPARE_F MW(1727:1696) +#define NVA0C0_QMDV01_07_QMD_SPARE_G MW(1759:1728) +#define NVA0C0_QMDV01_07_QMD_SPARE_H MW(1791:1760) +#define NVA0C0_QMDV01_07_QMD_SPARE_I MW(1823:1792) +#define NVA0C0_QMDV01_07_QMD_SPARE_J MW(1855:1824) +#define NVA0C0_QMDV01_07_QMD_SPARE_K MW(1887:1856) +#define NVA0C0_QMDV01_07_QMD_SPARE_L MW(1919:1888) +#define NVA0C0_QMDV01_07_QMD_SPARE_M MW(1951:1920) +#define NVA0C0_QMDV01_07_QMD_SPARE_N MW(1983:1952) +#define NVA0C0_QMDV01_07_DEBUG_ID_UPPER MW(2015:1984) +#define NVA0C0_QMDV01_07_DEBUG_ID_LOWER MW(2047:2016) + + + +#endif // #ifndef __CLA0C0QMD_H__ diff --git a/src/common/sdk/nvidia/inc/class/cla140.h b/src/common/sdk/nvidia/inc/class/cla140.h new file mode 100644 index 000000000..08b8fc139 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla140.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cla140_h_ +#define _cla140_h_ + +#define KEPLER_INLINE_TO_MEMORY_B 0xA140 + +#endif // _cla140_h_ diff --git a/src/common/sdk/nvidia/inc/class/cla16f.h b/src/common/sdk/nvidia/inc/class/cla16f.h new file mode 100644 index 000000000..66e7594b5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla16f.h @@ -0,0 +1,254 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cla16f_h_ +#define _cla16f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class KEPLER_CHANNEL_GPFIFO */ +/* + * Documentation for KEPLER_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + */ +#define KEPLER_CHANNEL_GPFIFO_B (0x0000A16F) + +/* pio method data structure */ +typedef volatile struct _cla16f_tag0 { + NvV32 Reserved00[0x7c0]; +} NvA16FTypedef, KEPLER_ChannelGPFifoB; +#define NVA16F_TYPEDEF KEPLER_CHANNELChannelGPFifo +/* dma flow control data structure */ +typedef volatile struct _cla16f_tag1 { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} NvA16FControl, KeplerBControlGPFifo; +/* fields and values */ +#define NVA16F_NUMBER_OF_SUBCHANNELS (8) +#define NVA16F_SET_OBJECT (0x00000000) +#define NVA16F_SET_OBJECT_NVCLASS 15:0 +#define NVA16F_SET_OBJECT_ENGINE 20:16 +#define NVA16F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVA16F_ILLEGAL (0x00000004) +#define NVA16F_ILLEGAL_HANDLE 31:0 +#define NVA16F_NOP (0x00000008) +#define NVA16F_NOP_HANDLE 31:0 +#define NVA16F_SEMAPHOREA (0x00000010) +#define NVA16F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVA16F_SEMAPHOREB (0x00000014) +#define NVA16F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVA16F_SEMAPHOREC (0x00000018) +#define NVA16F_SEMAPHOREC_PAYLOAD 31:0 +#define NVA16F_SEMAPHORED (0x0000001C) +#define NVA16F_SEMAPHORED_OPERATION 4:0 +#define NVA16F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVA16F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVA16F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVA16F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVA16F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVA16F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVA16F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVA16F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVA16F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVA16F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVA16F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVA16F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVA16F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVA16F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVA16F_SEMAPHORED_REDUCTION 30:27 +#define NVA16F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVA16F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVA16F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVA16F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVA16F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVA16F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVA16F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVA16F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVA16F_SEMAPHORED_FORMAT 31:31 +#define NVA16F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVA16F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVA16F_NON_STALL_INTERRUPT (0x00000020) +#define NVA16F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVA16F_FB_FLUSH (0x00000024) +#define NVA16F_FB_FLUSH_HANDLE 31:0 +#define NVA16F_MEM_OP_A (0x00000028) +#define NVA16F_MEM_OP_A_OPERAND_LOW 31:2 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_ADDR 29:2 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET 31:30 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET_VID_MEM 0x00000000 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT 0x00000002 +#define NVA16F_MEM_OP_A_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 +#define NVA16F_MEM_OP_B (0x0000002c) +#define NVA16F_MEM_OP_B_OPERAND_HIGH 7:0 +#define NVA16F_MEM_OP_B_OPERATION 31:27 +#define NVA16F_MEM_OP_B_OPERATION_SYSMEMBAR_FLUSH 0x00000005 +#define NVA16F_MEM_OP_B_OPERATION_SOFT_FLUSH 0x00000006 +#define NVA16F_MEM_OP_B_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVA16F_MEM_OP_B_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVA16F_MEM_OP_B_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +#define NVA16F_MEM_OP_B_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVA16F_MEM_OP_B_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB 0:0 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_PDB_ALL 0x00000001 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC 1:1 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVA16F_MEM_OP_B_MMU_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVA16F_SET_REFERENCE (0x00000050) +#define NVA16F_SET_REFERENCE_COUNT 31:0 +#define NVA16F_WFI (0x00000078) +#define NVA16F_WFI_HANDLE 31:0 +#define NVA16F_CRC_CHECK (0x0000007c) +#define NVA16F_CRC_CHECK_VALUE 31:0 +#define NVA16F_YIELD (0x00000080) +#define NVA16F_YIELD_OP 1:0 +#define NVA16F_YIELD_OP_NOP 0x00000000 + + +/* GPFIFO entry format */ +#define NVA16F_GP_ENTRY__SIZE 8 +#define NVA16F_GP_ENTRY0_FETCH 0:0 +#define NVA16F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVA16F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVA16F_GP_ENTRY0_GET 31:2 +#define NVA16F_GP_ENTRY0_OPERAND 31:0 +#define NVA16F_GP_ENTRY1_GET_HI 7:0 +#define NVA16F_GP_ENTRY1_PRIV 8:8 +#define NVA16F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVA16F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVA16F_GP_ENTRY1_LEVEL 9:9 +#define NVA16F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVA16F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVA16F_GP_ENTRY1_LENGTH 30:10 +#define NVA16F_GP_ENTRY1_SYNC 31:31 +#define NVA16F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVA16F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVA16F_GP_ENTRY1_OPCODE 7:0 +#define NVA16F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVA16F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVA16F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVA16F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVA16F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVA16F_DMA_METHOD_ADDRESS 11:0 +#define NVA16F_DMA_SUBDEVICE_MASK 15:4 +#define NVA16F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVA16F_DMA_TERT_OP 17:16 +#define NVA16F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVA16F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVA16F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVA16F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVA16F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVA16F_DMA_METHOD_COUNT_OLD 28:18 +#define NVA16F_DMA_METHOD_COUNT 28:16 +#define NVA16F_DMA_IMMD_DATA 28:16 +#define NVA16F_DMA_SEC_OP 31:29 +#define NVA16F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVA16F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVA16F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVA16F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVA16F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVA16F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVA16F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVA16F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVA16F_DMA_INCR_ADDRESS 11:0 +#define NVA16F_DMA_INCR_SUBCHANNEL 15:13 +#define NVA16F_DMA_INCR_COUNT 28:16 +#define NVA16F_DMA_INCR_OPCODE 31:29 +#define NVA16F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVA16F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVA16F_DMA_NONINCR_ADDRESS 11:0 +#define NVA16F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVA16F_DMA_NONINCR_COUNT 28:16 +#define NVA16F_DMA_NONINCR_OPCODE 31:29 +#define NVA16F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVA16F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVA16F_DMA_ONEINCR_ADDRESS 11:0 +#define NVA16F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVA16F_DMA_ONEINCR_COUNT 28:16 +#define NVA16F_DMA_ONEINCR_OPCODE 31:29 +#define NVA16F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVA16F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVA16F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVA16F_DMA_IMMD_ADDRESS 11:0 +#define NVA16F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVA16F_DMA_IMMD_DATA 28:16 +#define NVA16F_DMA_IMMD_OPCODE 31:29 +#define NVA16F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVA16F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVA16F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA16F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVA16F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVA16F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA16F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVA16F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVA16F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVA16F_DMA_ENDSEG_OPCODE 31:29 +#define NVA16F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVA16F_DMA_ADDRESS 12:2 +#define NVA16F_DMA_SUBCH 15:13 +#define NVA16F_DMA_OPCODE3 17:16 +#define NVA16F_DMA_OPCODE3_NONE (0x00000000) +#define NVA16F_DMA_COUNT 28:18 +#define NVA16F_DMA_OPCODE 31:29 +#define NVA16F_DMA_OPCODE_METHOD (0x00000000) +#define NVA16F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVA16F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _cla16F_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cla16fsw.h b/src/common/sdk/nvidia/inc/class/cla16fsw.h new file mode 100644 index 000000000..f76ac352d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla16fsw.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cla16f_sw_h_ +#define _cla16f_sw_h_ + +#define NVA16F_NOTIFIERS_RC (0) +#define NVA16F_NOTIFIERS_REFCNT (1) +#define NVA16F_NOTIFIERS_NONSTALL (2) +#define NVA16F_NOTIFIERS_EVENTBUFFER (3) +#define NVA16F_NOTIFIERS_IDLECHANNEL (4) +#define NVA16F_NOTIFIERS_ENDCTX (5) +#define NVA16F_NOTIFIERS_SW (6) +#define NVA16F_NOTIFIERS_GR_DEBUG_INTR (7) +#define NVA16F_NOTIFIERS_MAXCOUNT (8) + +/* NvNotification[] fields and values */ +#define NVA16F_NOTIFICATION_STATUS_ERROR_BAD_ARGUMENT (0x2000) +#define NVA16F_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +#endif /* _cla16f_sw_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cla197.h b/src/common/sdk/nvidia/inc/class/cla197.h new file mode 100644 index 000000000..8e4edbebd --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla197.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cla197_h_ +#define _cla197_h_ + +#define KEPLER_B 0xA197 + +#endif // _cla197_h_ diff --git a/src/common/sdk/nvidia/inc/class/cla1c0.h b/src/common/sdk/nvidia/inc/class/cla1c0.h new file mode 100644 index 000000000..2b904c03b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla1c0.h @@ -0,0 +1,669 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_kepler_compute_b_h_ +#define _cl_kepler_compute_b_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../class/bin/sw_header.pl kepler_compute_b */ + +#include "nvtypes.h" + +#define KEPLER_COMPUTE_B 0xA1C0 + +#define NVA1C0_SET_OBJECT 0x0000 +#define NVA1C0_SET_OBJECT_CLASS_ID 15:0 +#define NVA1C0_SET_OBJECT_ENGINE_ID 20:16 + +#define NVA1C0_NO_OPERATION 0x0100 +#define NVA1C0_NO_OPERATION_V 31:0 + +#define NVA1C0_SET_NOTIFY_A 0x0104 +#define NVA1C0_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVA1C0_SET_NOTIFY_B 0x0108 +#define NVA1C0_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVA1C0_NOTIFY 0x010c +#define NVA1C0_NOTIFY_TYPE 31:0 +#define NVA1C0_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVA1C0_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVA1C0_WAIT_FOR_IDLE 0x0110 +#define NVA1C0_WAIT_FOR_IDLE_V 31:0 + +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVA1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVA1C0_SEND_GO_IDLE 0x013c +#define NVA1C0_SEND_GO_IDLE_V 31:0 + +#define NVA1C0_PM_TRIGGER 0x0140 +#define NVA1C0_PM_TRIGGER_V 31:0 + +#define NVA1C0_PM_TRIGGER_WFI 0x0144 +#define NVA1C0_PM_TRIGGER_WFI_V 31:0 + +#define NVA1C0_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVA1C0_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVA1C0_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVA1C0_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVA1C0_LINE_LENGTH_IN 0x0180 +#define NVA1C0_LINE_LENGTH_IN_VALUE 31:0 + +#define NVA1C0_LINE_COUNT 0x0184 +#define NVA1C0_LINE_COUNT_VALUE 31:0 + +#define NVA1C0_OFFSET_OUT_UPPER 0x0188 +#define NVA1C0_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVA1C0_OFFSET_OUT 0x018c +#define NVA1C0_OFFSET_OUT_VALUE 31:0 + +#define NVA1C0_PITCH_OUT 0x0190 +#define NVA1C0_PITCH_OUT_VALUE 31:0 + +#define NVA1C0_SET_DST_BLOCK_SIZE 0x0194 +#define NVA1C0_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVA1C0_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVA1C0_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVA1C0_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVA1C0_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVA1C0_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVA1C0_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVA1C0_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVA1C0_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVA1C0_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVA1C0_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVA1C0_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVA1C0_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVA1C0_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVA1C0_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVA1C0_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVA1C0_SET_DST_WIDTH 0x0198 +#define NVA1C0_SET_DST_WIDTH_V 31:0 + +#define NVA1C0_SET_DST_HEIGHT 0x019c +#define NVA1C0_SET_DST_HEIGHT_V 31:0 + +#define NVA1C0_SET_DST_DEPTH 0x01a0 +#define NVA1C0_SET_DST_DEPTH_V 31:0 + +#define NVA1C0_SET_DST_LAYER 0x01a4 +#define NVA1C0_SET_DST_LAYER_V 31:0 + +#define NVA1C0_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVA1C0_SET_DST_ORIGIN_BYTES_X_V 19:0 + +#define NVA1C0_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVA1C0_SET_DST_ORIGIN_SAMPLES_Y_V 15:0 + +#define NVA1C0_LAUNCH_DMA 0x01b0 +#define NVA1C0_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVA1C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVA1C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVA1C0_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVA1C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVA1C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVA1C0_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVA1C0_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVA1C0_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVA1C0_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVA1C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVA1C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVA1C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVA1C0_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVA1C0_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA1C0_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA1C0_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVA1C0_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA1C0_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA1C0_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA1C0_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVA1C0_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA1C0_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVA1C0_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVA1C0_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA1C0_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVA1C0_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA1C0_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVA1C0_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVA1C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVA1C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVA1C0_LOAD_INLINE_DATA 0x01b4 +#define NVA1C0_LOAD_INLINE_DATA_V 31:0 + +#define NVA1C0_SET_I2M_SEMAPHORE_A 0x01dc +#define NVA1C0_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVA1C0_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVA1C0_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVA1C0_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVA1C0_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVA1C0_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVA1C0_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVA1C0_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVA1C0_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVA1C0_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVA1C0_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVA1C0_SET_I2M_SPARE_NOOP03 0x01fc +#define NVA1C0_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVA1C0_SET_VALID_SPAN_OVERFLOW_AREA_A 0x0200 +#define NVA1C0_SET_VALID_SPAN_OVERFLOW_AREA_A_ADDRESS_UPPER 7:0 + +#define NVA1C0_SET_VALID_SPAN_OVERFLOW_AREA_B 0x0204 +#define NVA1C0_SET_VALID_SPAN_OVERFLOW_AREA_B_ADDRESS_LOWER 31:0 + +#define NVA1C0_SET_VALID_SPAN_OVERFLOW_AREA_C 0x0208 +#define NVA1C0_SET_VALID_SPAN_OVERFLOW_AREA_C_SIZE 31:0 + +#define NVA1C0_SET_COALESCE_WAITING_PERIOD_UNIT 0x020c +#define NVA1C0_SET_COALESCE_WAITING_PERIOD_UNIT_CLOCKS 31:0 + +#define NVA1C0_PERFMON_TRANSFER 0x0210 +#define NVA1C0_PERFMON_TRANSFER_V 31:0 + +#define NVA1C0_SET_SHADER_SHARED_MEMORY_WINDOW 0x0214 +#define NVA1C0_SET_SHADER_SHARED_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVA1C0_INVALIDATE_SHADER_CACHES 0x021c +#define NVA1C0_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVA1C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVA1C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVA1C0_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVA1C0_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVA1C0_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVA1C0_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVA1C0_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVA1C0_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVA1C0_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVA1C0_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVA1C0_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVA1C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVA1C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVA1C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVA1C0_SET_CWD_CONTROL 0x0240 +#define NVA1C0_SET_CWD_CONTROL_SM_SELECTION 0:0 +#define NVA1C0_SET_CWD_CONTROL_SM_SELECTION_LOAD_BALANCED 0x00000000 +#define NVA1C0_SET_CWD_CONTROL_SM_SELECTION_ROUND_ROBIN 0x00000001 + +#define NVA1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x0244 +#define NVA1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVA1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVA1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVA1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVA1C0_SET_CWD_REF_COUNTER 0x0248 +#define NVA1C0_SET_CWD_REF_COUNTER_SELECT 5:0 +#define NVA1C0_SET_CWD_REF_COUNTER_VALUE 23:8 + +#define NVA1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A 0x0274 +#define NVA1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A_ADDRESS_UPPER 7:0 + +#define NVA1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B 0x0278 +#define NVA1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B_ADDRESS_LOWER 31:0 + +#define NVA1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C 0x027c +#define NVA1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_BYTE_COUNT 16:0 +#define NVA1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2 31:31 +#define NVA1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_FALSE 0x00000000 +#define NVA1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_TRUE 0x00000001 + +#define NVA1C0_SET_COMPUTE_CLASS_VERSION 0x0280 +#define NVA1C0_SET_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVA1C0_SET_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA1C0_CHECK_COMPUTE_CLASS_VERSION 0x0284 +#define NVA1C0_CHECK_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVA1C0_CHECK_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA1C0_SET_QMD_VERSION 0x0288 +#define NVA1C0_SET_QMD_VERSION_CURRENT 15:0 +#define NVA1C0_SET_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA1C0_CHECK_QMD_VERSION 0x0290 +#define NVA1C0_CHECK_QMD_VERSION_CURRENT 15:0 +#define NVA1C0_CHECK_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVA1C0_SET_CWD_SLOT_COUNT 0x02b0 +#define NVA1C0_SET_CWD_SLOT_COUNT_V 7:0 + +#define NVA1C0_SEND_PCAS_A 0x02b4 +#define NVA1C0_SEND_PCAS_A_QMD_ADDRESS_SHIFTED8 31:0 + +#define NVA1C0_SEND_PCAS_B 0x02b8 +#define NVA1C0_SEND_PCAS_B_FROM 23:0 +#define NVA1C0_SEND_PCAS_B_DELTA 31:24 + +#define NVA1C0_SEND_SIGNALING_PCAS_B 0x02bc +#define NVA1C0_SEND_SIGNALING_PCAS_B_INVALIDATE 0:0 +#define NVA1C0_SEND_SIGNALING_PCAS_B_INVALIDATE_FALSE 0x00000000 +#define NVA1C0_SEND_SIGNALING_PCAS_B_INVALIDATE_TRUE 0x00000001 +#define NVA1C0_SEND_SIGNALING_PCAS_B_SCHEDULE 1:1 +#define NVA1C0_SEND_SIGNALING_PCAS_B_SCHEDULE_FALSE 0x00000000 +#define NVA1C0_SEND_SIGNALING_PCAS_B_SCHEDULE_TRUE 0x00000001 + +#define NVA1C0_SET_GLOBAL_LOAD_VIA_TEXTURE 0x02c4 +#define NVA1C0_SET_GLOBAL_LOAD_VIA_TEXTURE_ENABLE 0:0 +#define NVA1C0_SET_GLOBAL_LOAD_VIA_TEXTURE_ENABLE_FALSE 0x00000000 +#define NVA1C0_SET_GLOBAL_LOAD_VIA_TEXTURE_ENABLE_TRUE 0x00000001 +#define NVA1C0_SET_GLOBAL_LOAD_VIA_TEXTURE_HEADER_INDEX 23:4 + +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A 0x02e4 +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B 0x02e8 +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C 0x02ec +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A 0x02f0 +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B 0x02f4 +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C 0x02f8 +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVA1C0_SET_SPA_VERSION 0x0310 +#define NVA1C0_SET_SPA_VERSION_MINOR 7:0 +#define NVA1C0_SET_SPA_VERSION_MAJOR 15:8 + +#define NVA1C0_SET_FALCON00 0x0500 +#define NVA1C0_SET_FALCON00_V 31:0 + +#define NVA1C0_SET_FALCON01 0x0504 +#define NVA1C0_SET_FALCON01_V 31:0 + +#define NVA1C0_SET_FALCON02 0x0508 +#define NVA1C0_SET_FALCON02_V 31:0 + +#define NVA1C0_SET_FALCON03 0x050c +#define NVA1C0_SET_FALCON03_V 31:0 + +#define NVA1C0_SET_FALCON04 0x0510 +#define NVA1C0_SET_FALCON04_V 31:0 + +#define NVA1C0_SET_FALCON05 0x0514 +#define NVA1C0_SET_FALCON05_V 31:0 + +#define NVA1C0_SET_FALCON06 0x0518 +#define NVA1C0_SET_FALCON06_V 31:0 + +#define NVA1C0_SET_FALCON07 0x051c +#define NVA1C0_SET_FALCON07_V 31:0 + +#define NVA1C0_SET_FALCON08 0x0520 +#define NVA1C0_SET_FALCON08_V 31:0 + +#define NVA1C0_SET_FALCON09 0x0524 +#define NVA1C0_SET_FALCON09_V 31:0 + +#define NVA1C0_SET_FALCON10 0x0528 +#define NVA1C0_SET_FALCON10_V 31:0 + +#define NVA1C0_SET_FALCON11 0x052c +#define NVA1C0_SET_FALCON11_V 31:0 + +#define NVA1C0_SET_FALCON12 0x0530 +#define NVA1C0_SET_FALCON12_V 31:0 + +#define NVA1C0_SET_FALCON13 0x0534 +#define NVA1C0_SET_FALCON13_V 31:0 + +#define NVA1C0_SET_FALCON14 0x0538 +#define NVA1C0_SET_FALCON14_V 31:0 + +#define NVA1C0_SET_FALCON15 0x053c +#define NVA1C0_SET_FALCON15_V 31:0 + +#define NVA1C0_SET_FALCON16 0x0540 +#define NVA1C0_SET_FALCON16_V 31:0 + +#define NVA1C0_SET_FALCON17 0x0544 +#define NVA1C0_SET_FALCON17_V 31:0 + +#define NVA1C0_SET_FALCON18 0x0548 +#define NVA1C0_SET_FALCON18_V 31:0 + +#define NVA1C0_SET_FALCON19 0x054c +#define NVA1C0_SET_FALCON19_V 31:0 + +#define NVA1C0_SET_FALCON20 0x0550 +#define NVA1C0_SET_FALCON20_V 31:0 + +#define NVA1C0_SET_FALCON21 0x0554 +#define NVA1C0_SET_FALCON21_V 31:0 + +#define NVA1C0_SET_FALCON22 0x0558 +#define NVA1C0_SET_FALCON22_V 31:0 + +#define NVA1C0_SET_FALCON23 0x055c +#define NVA1C0_SET_FALCON23_V 31:0 + +#define NVA1C0_SET_FALCON24 0x0560 +#define NVA1C0_SET_FALCON24_V 31:0 + +#define NVA1C0_SET_FALCON25 0x0564 +#define NVA1C0_SET_FALCON25_V 31:0 + +#define NVA1C0_SET_FALCON26 0x0568 +#define NVA1C0_SET_FALCON26_V 31:0 + +#define NVA1C0_SET_FALCON27 0x056c +#define NVA1C0_SET_FALCON27_V 31:0 + +#define NVA1C0_SET_FALCON28 0x0570 +#define NVA1C0_SET_FALCON28_V 31:0 + +#define NVA1C0_SET_FALCON29 0x0574 +#define NVA1C0_SET_FALCON29_V 31:0 + +#define NVA1C0_SET_FALCON30 0x0578 +#define NVA1C0_SET_FALCON30_V 31:0 + +#define NVA1C0_SET_FALCON31 0x057c +#define NVA1C0_SET_FALCON31_V 31:0 + +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVA1C0_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVA1C0_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVA1C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVA1C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVA1C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVA1C0_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVA1C0_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVA1C0_SET_SPARE_NOOP12 0x0f44 +#define NVA1C0_SET_SPARE_NOOP12_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP13 0x0f48 +#define NVA1C0_SET_SPARE_NOOP13_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP14 0x0f4c +#define NVA1C0_SET_SPARE_NOOP14_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP15 0x0f50 +#define NVA1C0_SET_SPARE_NOOP15_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP00 0x1040 +#define NVA1C0_SET_SPARE_NOOP00_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP01 0x1044 +#define NVA1C0_SET_SPARE_NOOP01_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP02 0x1048 +#define NVA1C0_SET_SPARE_NOOP02_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP03 0x104c +#define NVA1C0_SET_SPARE_NOOP03_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP04 0x1050 +#define NVA1C0_SET_SPARE_NOOP04_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP05 0x1054 +#define NVA1C0_SET_SPARE_NOOP05_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP06 0x1058 +#define NVA1C0_SET_SPARE_NOOP06_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP07 0x105c +#define NVA1C0_SET_SPARE_NOOP07_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP08 0x1060 +#define NVA1C0_SET_SPARE_NOOP08_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP09 0x1064 +#define NVA1C0_SET_SPARE_NOOP09_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP10 0x1068 +#define NVA1C0_SET_SPARE_NOOP10_V 31:0 + +#define NVA1C0_SET_SPARE_NOOP11 0x106c +#define NVA1C0_SET_SPARE_NOOP11_V 31:0 + +#define NVA1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVA1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVA1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVA1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVA1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVA1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT 0x12a8 +#define NVA1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL 0:0 +#define NVA1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_FALSE 0x00000000 +#define NVA1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_TRUE 0x00000001 + +#define NVA1C0_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVA1C0_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVA1C0_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVA1C0_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVA1C0_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVA1C0_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVA1C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVA1C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVA1C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVA1C0_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVA1C0_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVA1C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVA1C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVA1C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVA1C0_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVA1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVA1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVA1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVA1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVA1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVA1C0_SET_SHADER_EXCEPTIONS 0x1528 +#define NVA1C0_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVA1C0_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVA1C0_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVA1C0_SET_RENDER_ENABLE_A 0x1550 +#define NVA1C0_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVA1C0_SET_RENDER_ENABLE_B 0x1554 +#define NVA1C0_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVA1C0_SET_RENDER_ENABLE_C 0x1558 +#define NVA1C0_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVA1C0_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVA1C0_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVA1C0_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVA1C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVA1C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVA1C0_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVA1C0_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVA1C0_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVA1C0_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVA1C0_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVA1C0_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVA1C0_SET_TEX_HEADER_POOL_A 0x1574 +#define NVA1C0_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVA1C0_SET_TEX_HEADER_POOL_B 0x1578 +#define NVA1C0_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVA1C0_SET_TEX_HEADER_POOL_C 0x157c +#define NVA1C0_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVA1C0_SET_PROGRAM_REGION_A 0x1608 +#define NVA1C0_SET_PROGRAM_REGION_A_ADDRESS_UPPER 7:0 + +#define NVA1C0_SET_PROGRAM_REGION_B 0x160c +#define NVA1C0_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVA1C0_SET_SHADER_CONTROL 0x1690 +#define NVA1C0_SET_SHADER_CONTROL_DEFAULT_PARTIAL 0:0 +#define NVA1C0_SET_SHADER_CONTROL_DEFAULT_PARTIAL_ZERO 0x00000000 +#define NVA1C0_SET_SHADER_CONTROL_DEFAULT_PARTIAL_INFINITY 0x00000001 + +#define NVA1C0_INVALIDATE_SHADER_CACHES_NO_WFI 0x1698 +#define NVA1C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVA1C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVA1C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVA1C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVA1C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVA1C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVA1C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVA1C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVA1C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVA1C0_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVA1C0_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVA1C0_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVA1C0_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVA1C0_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVA1C0_PIPE_NOP 0x1a2c +#define NVA1C0_PIPE_NOP_V 31:0 + +#define NVA1C0_SET_SPARE00 0x1a30 +#define NVA1C0_SET_SPARE00_V 31:0 + +#define NVA1C0_SET_SPARE01 0x1a34 +#define NVA1C0_SET_SPARE01_V 31:0 + +#define NVA1C0_SET_SPARE02 0x1a38 +#define NVA1C0_SET_SPARE02_V 31:0 + +#define NVA1C0_SET_SPARE03 0x1a3c +#define NVA1C0_SET_SPARE03_V 31:0 + +#define NVA1C0_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVA1C0_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVA1C0_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVA1C0_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVA1C0_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVA1C0_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVA1C0_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVA1C0_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVA1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVA1C0_SET_BINDLESS_TEXTURE 0x2608 +#define NVA1C0_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 2:0 + +#define NVA1C0_SET_TRAP_HANDLER 0x260c +#define NVA1C0_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVA1C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVA1C0_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVA1C0_SET_MME_SHADOW_SCRATCH_V 31:0 + +#endif /* _cl_kepler_compute_b_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/cla297.h b/src/common/sdk/nvidia/inc/class/cla297.h new file mode 100644 index 000000000..6280e5366 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cla297.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_kepler_c_h_ +#define _cl_kepler_c_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../class/bin/sw_header.pl kepler_c */ + +#include "nvtypes.h" + +#define KEPLER_C 0xA297 + +#endif /* _cl_kepler_c_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clb069.h b/src/common/sdk/nvidia/inc/class/clb069.h new file mode 100644 index 000000000..94140dd24 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb069.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clb069_h_ +#define _clb069_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAXWELL_FAULT_BUFFER_A (0xb069) + +#define NVB069_FAULT_BUF_ENTRY 0x0000001f:0x00000000 +#define NVB069_FAULT_BUF_SIZE 32 +#define NVB069_FAULT_BUF_ENTRY_INST_APERTURE MW((9+0*32):(0*32+8)) +#define NVB069_FAULT_BUF_ENTRY_INST_APERTURE_VID_MEM 0x00000000 +#define NVB069_FAULT_BUF_ENTRY_INST_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVB069_FAULT_BUF_ENTRY_INST_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVB069_FAULT_BUF_ENTRY_INST_LO MW((31+0*32):(0*32+12)) +#define NVB069_FAULT_BUF_ENTRY_INST_HI MW((31+1*32):(1*32+0)) +#define NVB069_FAULT_BUF_ENTRY_INST MW((31+1*32):(0*32+12)) +#define NVB069_FAULT_BUF_ENTRY_ADDR_LO MW((31+2*32):(2*32+0)) +#define NVB069_FAULT_BUF_ENTRY_ADDR_HI MW((31+3*32):(3*32+0)) +#define NVB069_FAULT_BUF_ENTRY_ADDR MW((31+3*32):(2*32+0)) +#define NVB069_FAULT_BUF_ENTRY_TIMESTAMP_LO MW((31+4*32):(4*32+0)) +#define NVB069_FAULT_BUF_ENTRY_TIMESTAMP_HI MW((31+5*32):(5*32+0)) +#define NVB069_FAULT_BUF_ENTRY_TIMESTAMP MW((31+5*32):(4*32+0)) +#define NVB069_FAULT_BUF_ENTRY_RESERVED MW((31+6*32):(6*32+0)) +#define NVB069_FAULT_BUF_ENTRY_FAULT_TYPE MW((4+7*32):(7*32+0)) +#define NVB069_FAULT_BUF_ENTRY_CLIENT MW((14+7*32):(7*32+8)) +#define NVB069_FAULT_BUF_ENTRY_ACCESS_TYPE MW((18+7*32):(7*32+16)) +#define NVB069_FAULT_BUF_ENTRY_MMU_CLIENT_TYPE MW((20+7*32):(7*32+20)) +#define NVB069_FAULT_BUF_ENTRY_GPC_ID MW((28+7*32):(7*32+24)) +#define NVB069_FAULT_BUF_ENTRY_VALID MW((31+7*32):(7*32+31)) +#define NVB069_FAULT_BUF_ENTRY_VALID_FALSE 0x00000000 +#define NVB069_FAULT_BUF_ENTRY_VALID_TRUE 0x00000001 +#define NVB069_NOTIFIERS_REPLAYABLE_FAULT (0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clb069_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/clb069sw.h b/src/common/sdk/nvidia/inc/class/clb069sw.h new file mode 100644 index 000000000..621d214f9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb069sw.h @@ -0,0 +1,46 @@ +/******************************************************************************* + Copyright (c) 2008-2013 NVIDIA Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clb069_sw_h_ +#define _clb069_sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* This file is *not* auto-generated. */ + + +typedef struct +{ + NvU32 flags; // set to 0 + +} NVB069_ALLOCATION_PARAMETERS; + + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clb096_sw_h diff --git a/src/common/sdk/nvidia/inc/class/clb06f.h b/src/common/sdk/nvidia/inc/class/clb06f.h new file mode 100644 index 000000000..b2b51e045 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb06f.h @@ -0,0 +1,260 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clb06f_h_ +#define _clb06f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class MAXWELL_CHANNEL_GPFIFO */ +/* + * Documentation for MAXWELL_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + */ +#define MAXWELL_CHANNEL_GPFIFO_A (0x0000B06F) + +#define NVB06F_TYPEDEF MAXWELL_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct _clb06f_tag0 { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvb06FControl, MaxwellAControlGPFifo; + +/* fields and values */ +#define NVB06F_NUMBER_OF_SUBCHANNELS (8) +#define NVB06F_SET_OBJECT (0x00000000) +#define NVB06F_SET_OBJECT_NVCLASS 15:0 +#define NVB06F_SET_OBJECT_ENGINE 20:16 +#define NVB06F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVB06F_ILLEGAL (0x00000004) +#define NVB06F_ILLEGAL_HANDLE 31:0 +#define NVB06F_NOP (0x00000008) +#define NVB06F_NOP_HANDLE 31:0 +#define NVB06F_SEMAPHOREA (0x00000010) +#define NVB06F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVB06F_SEMAPHOREB (0x00000014) +#define NVB06F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVB06F_SEMAPHOREC (0x00000018) +#define NVB06F_SEMAPHOREC_PAYLOAD 31:0 +#define NVB06F_SEMAPHORED (0x0000001C) +#define NVB06F_SEMAPHORED_OPERATION 4:0 +#define NVB06F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVB06F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVB06F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVB06F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVB06F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVB06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVB06F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVB06F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVB06F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVB06F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVB06F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVB06F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVB06F_SEMAPHORED_REDUCTION 30:27 +#define NVB06F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVB06F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVB06F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVB06F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVB06F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVB06F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVB06F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVB06F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVB06F_SEMAPHORED_FORMAT 31:31 +#define NVB06F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVB06F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVB06F_NON_STALL_INTERRUPT (0x00000020) +#define NVB06F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVB06F_FB_FLUSH (0x00000024) +#define NVB06F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been removed for gm20x to make room for +// possible future MEM_OP features. MEM_OP_C/D have identical functionality +// to the previous MEM_OP_A/B methods. +#define NVB06F_MEM_OP_C (0x00000030) +#define NVB06F_MEM_OP_C_OPERAND_LOW 31:2 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET 11:10 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_VID_MEM 0x00000000 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_SYS_MEM_COHERENT 0x00000002 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_TARGET_SYS_MEM_NONCOHERENT 0x00000003 +#define NVB06F_MEM_OP_C_TLB_INVALIDATE_ADDR_LO 31:12 +#define NVB06F_MEM_OP_D (0x00000034) +#define NVB06F_MEM_OP_D_OPERAND_HIGH 7:0 +#define NVB06F_MEM_OP_D_OPERATION 31:27 +#define NVB06F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVB06F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVB06F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVB06F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +#define NVB06F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVB06F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVB06F_MEM_OP_D_TLB_INVALIDATE_ADDR_HI 7:0 +#define NVB06F_SET_REFERENCE (0x00000050) +#define NVB06F_SET_REFERENCE_COUNT 31:0 +#define NVB06F_WFI (0x00000078) +#define NVB06F_WFI_SCOPE 0:0 +#define NVB06F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVB06F_WFI_SCOPE_ALL 0x00000001 +#define NVB06F_CRC_CHECK (0x0000007c) +#define NVB06F_CRC_CHECK_VALUE 31:0 +#define NVB06F_YIELD (0x00000080) +#define NVB06F_YIELD_OP 1:0 +#define NVB06F_YIELD_OP_NOP 0x00000000 +#define NVB06F_YIELD_OP_PBDMA_TIMESLICE 0x00000001 +#define NVB06F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002 +#define NVB06F_YIELD_OP_TSG 0x00000003 + + +/* GPFIFO entry format */ +#define NVB06F_GP_ENTRY__SIZE 8 +#define NVB06F_GP_ENTRY0_FETCH 0:0 +#define NVB06F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVB06F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVB06F_GP_ENTRY0_GET 31:2 +#define NVB06F_GP_ENTRY0_OPERAND 31:0 +#define NVB06F_GP_ENTRY1_GET_HI 7:0 +#define NVB06F_GP_ENTRY1_PRIV 8:8 +#define NVB06F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVB06F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVB06F_GP_ENTRY1_LEVEL 9:9 +#define NVB06F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVB06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVB06F_GP_ENTRY1_LENGTH 30:10 +#define NVB06F_GP_ENTRY1_SYNC 31:31 +#define NVB06F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVB06F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVB06F_GP_ENTRY1_OPCODE 7:0 +#define NVB06F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVB06F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVB06F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVB06F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVB06F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVB06F_DMA_METHOD_ADDRESS 11:0 +#define NVB06F_DMA_SUBDEVICE_MASK 15:4 +#define NVB06F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVB06F_DMA_TERT_OP 17:16 +#define NVB06F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVB06F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVB06F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVB06F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVB06F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVB06F_DMA_METHOD_COUNT_OLD 28:18 +#define NVB06F_DMA_METHOD_COUNT 28:16 +#define NVB06F_DMA_IMMD_DATA 28:16 +#define NVB06F_DMA_SEC_OP 31:29 +#define NVB06F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVB06F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVB06F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVB06F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVB06F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVB06F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVB06F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVB06F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVB06F_DMA_INCR_ADDRESS 11:0 +#define NVB06F_DMA_INCR_SUBCHANNEL 15:13 +#define NVB06F_DMA_INCR_COUNT 28:16 +#define NVB06F_DMA_INCR_OPCODE 31:29 +#define NVB06F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVB06F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVB06F_DMA_NONINCR_ADDRESS 11:0 +#define NVB06F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVB06F_DMA_NONINCR_COUNT 28:16 +#define NVB06F_DMA_NONINCR_OPCODE 31:29 +#define NVB06F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVB06F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVB06F_DMA_ONEINCR_ADDRESS 11:0 +#define NVB06F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVB06F_DMA_ONEINCR_COUNT 28:16 +#define NVB06F_DMA_ONEINCR_OPCODE 31:29 +#define NVB06F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVB06F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVB06F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVB06F_DMA_IMMD_ADDRESS 11:0 +#define NVB06F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVB06F_DMA_IMMD_DATA 28:16 +#define NVB06F_DMA_IMMD_OPCODE 31:29 +#define NVB06F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVB06F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVB06F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVB06F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVB06F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVB06F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVB06F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVB06F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVB06F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVB06F_DMA_ENDSEG_OPCODE 31:29 +#define NVB06F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVB06F_DMA_ADDRESS 12:2 +#define NVB06F_DMA_SUBCH 15:13 +#define NVB06F_DMA_OPCODE3 17:16 +#define NVB06F_DMA_OPCODE3_NONE (0x00000000) +#define NVB06F_DMA_COUNT 28:18 +#define NVB06F_DMA_OPCODE 31:29 +#define NVB06F_DMA_OPCODE_METHOD (0x00000000) +#define NVB06F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVB06F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clb06f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clb06fsw.h b/src/common/sdk/nvidia/inc/class/clb06fsw.h new file mode 100644 index 000000000..9a9a39150 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb06fsw.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clb06f_sw_h_ +#define _clb06f_sw_h_ + +#define NVB06F_NOTIFIERS_RC (0) +#define NVB06F_NOTIFIERS_REFCNT (1) +#define NVB06F_NOTIFIERS_NONSTALL (2) +#define NVB06F_NOTIFIERS_EVENTBUFFER (3) +#define NVB06F_NOTIFIERS_IDLECHANNEL (4) +#define NVB06F_NOTIFIERS_ENDCTX (5) +#define NVB06F_NOTIFIERS_SW (6) +#define NVB06F_NOTIFIERS_GR_DEBUG_INTR (7) +#define NVB06F_NOTIFIERS_REPLAYABLE_FAULT (8) +#define NVB06F_NOTIFIERS_MAXCOUNT (9) + +/* NvNotification[] fields and values */ +#define NVB06F_NOTIFICATION_STATUS_ERROR_BAD_ARGUMENT (0x2000) +#define NVB06F_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +#endif /* _clb06f_sw_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clb097.h b/src/common/sdk/nvidia/inc/class/clb097.h new file mode 100644 index 000000000..31dc488a2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb097.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clb097_h_ +#define _clb097_h_ + +#define MAXWELL_A 0xB097 + +#endif // _clb097_h_ diff --git a/src/common/sdk/nvidia/inc/class/clb0b0.h b/src/common/sdk/nvidia/inc/class/clb0b0.h new file mode 100644 index 000000000..030040cda --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb0b0.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvtypes.h" + +#ifndef _clb0b0_h_ +#define _clb0b0_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVB0B0_VIDEO_DECODER (0x0000B0B0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clb0b0_h diff --git a/src/common/sdk/nvidia/inc/class/clb0b5.h b/src/common/sdk/nvidia/inc/class/clb0b5.h new file mode 100644 index 000000000..63fe69577 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb0b5.h @@ -0,0 +1,261 @@ +/******************************************************************************* + Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clb0b5_h_ +#define _clb0b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAXWELL_DMA_COPY_A (0x0000B0B5) + +#define NVB0B5_NOP (0x00000100) +#define NVB0B5_NOP_PARAMETER 31:0 +#define NVB0B5_PM_TRIGGER (0x00000140) +#define NVB0B5_PM_TRIGGER_V 31:0 +#define NVB0B5_SET_SEMAPHORE_A (0x00000240) +#define NVB0B5_SET_SEMAPHORE_A_UPPER 7:0 +#define NVB0B5_SET_SEMAPHORE_B (0x00000244) +#define NVB0B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVB0B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVB0B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVB0B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVB0B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVB0B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVB0B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVB0B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVB0B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVB0B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVB0B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVB0B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVB0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVB0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVB0B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVB0B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVB0B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVB0B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVB0B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVB0B5_SET_DST_PHYS_MODE (0x00000264) +#define NVB0B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVB0B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVB0B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVB0B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVB0B5_LAUNCH_DMA (0x00000300) +#define NVB0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVB0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVB0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVB0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVB0B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVB0B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVB0B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVB0B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVB0B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVB0B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVB0B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVB0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVB0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVB0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVB0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVB0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVB0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVB0B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVB0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVB0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVB0B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVB0B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVB0B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVB0B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVB0B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVB0B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVB0B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVB0B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVB0B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVB0B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVB0B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVB0B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVB0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVB0B5_LAUNCH_DMA_BYPASS_L2 20:20 +#define NVB0B5_LAUNCH_DMA_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVB0B5_LAUNCH_DMA_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVB0B5_OFFSET_IN_UPPER (0x00000400) +#define NVB0B5_OFFSET_IN_UPPER_UPPER 7:0 +#define NVB0B5_OFFSET_IN_LOWER (0x00000404) +#define NVB0B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVB0B5_OFFSET_OUT_UPPER (0x00000408) +#define NVB0B5_OFFSET_OUT_UPPER_UPPER 7:0 +#define NVB0B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVB0B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVB0B5_PITCH_IN (0x00000410) +#define NVB0B5_PITCH_IN_VALUE 31:0 +#define NVB0B5_PITCH_OUT (0x00000414) +#define NVB0B5_PITCH_OUT_VALUE 31:0 +#define NVB0B5_LINE_LENGTH_IN (0x00000418) +#define NVB0B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVB0B5_LINE_COUNT (0x0000041C) +#define NVB0B5_LINE_COUNT_VALUE 31:0 +#define NVB0B5_SET_REMAP_CONST_A (0x00000700) +#define NVB0B5_SET_REMAP_CONST_A_V 31:0 +#define NVB0B5_SET_REMAP_CONST_B (0x00000704) +#define NVB0B5_SET_REMAP_CONST_B_V 31:0 +#define NVB0B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVB0B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVB0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVB0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVB0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVB0B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVB0B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVB0B5_SET_DST_BLOCK_SIZE_WIDTH_QUARTER_GOB (0x0000000E) +#define NVB0B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVB0B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVB0B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVB0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVB0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_TESLA_4 (0x00000000) +#define NVB0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVB0B5_SET_DST_WIDTH (0x00000710) +#define NVB0B5_SET_DST_WIDTH_V 31:0 +#define NVB0B5_SET_DST_HEIGHT (0x00000714) +#define NVB0B5_SET_DST_HEIGHT_V 31:0 +#define NVB0B5_SET_DST_DEPTH (0x00000718) +#define NVB0B5_SET_DST_DEPTH_V 31:0 +#define NVB0B5_SET_DST_LAYER (0x0000071C) +#define NVB0B5_SET_DST_LAYER_V 31:0 +#define NVB0B5_SET_DST_ORIGIN (0x00000720) +#define NVB0B5_SET_DST_ORIGIN_X 15:0 +#define NVB0B5_SET_DST_ORIGIN_Y 31:16 +#define NVB0B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVB0B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVB0B5_SET_SRC_BLOCK_SIZE_WIDTH_QUARTER_GOB (0x0000000E) +#define NVB0B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVB0B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVB0B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVB0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVB0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_TESLA_4 (0x00000000) +#define NVB0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVB0B5_SET_SRC_WIDTH (0x0000072C) +#define NVB0B5_SET_SRC_WIDTH_V 31:0 +#define NVB0B5_SET_SRC_HEIGHT (0x00000730) +#define NVB0B5_SET_SRC_HEIGHT_V 31:0 +#define NVB0B5_SET_SRC_DEPTH (0x00000734) +#define NVB0B5_SET_SRC_DEPTH_V 31:0 +#define NVB0B5_SET_SRC_LAYER (0x00000738) +#define NVB0B5_SET_SRC_LAYER_V 31:0 +#define NVB0B5_SET_SRC_ORIGIN (0x0000073C) +#define NVB0B5_SET_SRC_ORIGIN_X 15:0 +#define NVB0B5_SET_SRC_ORIGIN_Y 31:16 +#define NVB0B5_PM_TRIGGER_END (0x00001114) +#define NVB0B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clb0b5_h + diff --git a/src/common/sdk/nvidia/inc/class/clb0b5sw.h b/src/common/sdk/nvidia/inc/class/clb0b5sw.h new file mode 100644 index 000000000..f0e0d315a --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb0b5sw.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _clb0b5sw_h_ +#define _clb0b5sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* This file is *not* auto-generated. */ + +// +// Using VERSION_0 will cause the API to interpret +// engineType as a CE engine instance. This allows +// for backward compatibility with 85B5sw and 90B5sw. +// +#define NVB0B5_ALLOCATION_PARAMETERS_VERSION_0 0 + +// +// Using VERSION_1 will cause the API to interpret +// engineType as an NV2080_ENGINE_TYPE ordinal. +// +#define NVB0B5_ALLOCATION_PARAMETERS_VERSION_1 1 + +typedef struct +{ + NvU32 version; + NvU32 engineType; +} NVB0B5_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clb0b5sw_h_ + diff --git a/src/common/sdk/nvidia/inc/class/clb0c0.h b/src/common/sdk/nvidia/inc/class/clb0c0.h new file mode 100644 index 000000000..c343c51cd --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb0c0.h @@ -0,0 +1,720 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_maxwell_compute_a_h_ +#define _cl_maxwell_compute_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl maxwell_compute_a */ + +#include "nvtypes.h" + +#define MAXWELL_COMPUTE_A 0xB0C0 + +#define NVB0C0_SET_OBJECT 0x0000 +#define NVB0C0_SET_OBJECT_CLASS_ID 15:0 +#define NVB0C0_SET_OBJECT_ENGINE_ID 20:16 + +#define NVB0C0_NO_OPERATION 0x0100 +#define NVB0C0_NO_OPERATION_V 31:0 + +#define NVB0C0_SET_NOTIFY_A 0x0104 +#define NVB0C0_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVB0C0_SET_NOTIFY_B 0x0108 +#define NVB0C0_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVB0C0_NOTIFY 0x010c +#define NVB0C0_NOTIFY_TYPE 31:0 +#define NVB0C0_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVB0C0_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVB0C0_WAIT_FOR_IDLE 0x0110 +#define NVB0C0_WAIT_FOR_IDLE_V 31:0 + +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVB0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVB0C0_SEND_GO_IDLE 0x013c +#define NVB0C0_SEND_GO_IDLE_V 31:0 + +#define NVB0C0_PM_TRIGGER 0x0140 +#define NVB0C0_PM_TRIGGER_V 31:0 + +#define NVB0C0_PM_TRIGGER_WFI 0x0144 +#define NVB0C0_PM_TRIGGER_WFI_V 31:0 + +#define NVB0C0_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVB0C0_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVB0C0_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVB0C0_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVB0C0_LINE_LENGTH_IN 0x0180 +#define NVB0C0_LINE_LENGTH_IN_VALUE 31:0 + +#define NVB0C0_LINE_COUNT 0x0184 +#define NVB0C0_LINE_COUNT_VALUE 31:0 + +#define NVB0C0_OFFSET_OUT_UPPER 0x0188 +#define NVB0C0_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVB0C0_OFFSET_OUT 0x018c +#define NVB0C0_OFFSET_OUT_VALUE 31:0 + +#define NVB0C0_PITCH_OUT 0x0190 +#define NVB0C0_PITCH_OUT_VALUE 31:0 + +#define NVB0C0_SET_DST_BLOCK_SIZE 0x0194 +#define NVB0C0_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVB0C0_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVB0C0_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVB0C0_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVB0C0_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVB0C0_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB0C0_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB0C0_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB0C0_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB0C0_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVB0C0_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVB0C0_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVB0C0_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVB0C0_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB0C0_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB0C0_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVB0C0_SET_DST_WIDTH 0x0198 +#define NVB0C0_SET_DST_WIDTH_V 31:0 + +#define NVB0C0_SET_DST_HEIGHT 0x019c +#define NVB0C0_SET_DST_HEIGHT_V 31:0 + +#define NVB0C0_SET_DST_DEPTH 0x01a0 +#define NVB0C0_SET_DST_DEPTH_V 31:0 + +#define NVB0C0_SET_DST_LAYER 0x01a4 +#define NVB0C0_SET_DST_LAYER_V 31:0 + +#define NVB0C0_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVB0C0_SET_DST_ORIGIN_BYTES_X_V 19:0 + +#define NVB0C0_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVB0C0_SET_DST_ORIGIN_SAMPLES_Y_V 15:0 + +#define NVB0C0_LAUNCH_DMA 0x01b0 +#define NVB0C0_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVB0C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVB0C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVB0C0_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVB0C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVB0C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVB0C0_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVB0C0_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVB0C0_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVB0C0_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVB0C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVB0C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVB0C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVB0C0_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVB0C0_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVB0C0_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVB0C0_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVB0C0_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVB0C0_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVB0C0_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVB0C0_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVB0C0_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVB0C0_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVB0C0_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVB0C0_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVB0C0_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVB0C0_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVB0C0_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVB0C0_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVB0C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVB0C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVB0C0_LOAD_INLINE_DATA 0x01b4 +#define NVB0C0_LOAD_INLINE_DATA_V 31:0 + +#define NVB0C0_SET_I2M_SEMAPHORE_A 0x01dc +#define NVB0C0_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVB0C0_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVB0C0_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVB0C0_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVB0C0_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVB0C0_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVB0C0_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVB0C0_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVB0C0_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVB0C0_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVB0C0_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVB0C0_SET_I2M_SPARE_NOOP03 0x01fc +#define NVB0C0_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVB0C0_SET_VALID_SPAN_OVERFLOW_AREA_A 0x0200 +#define NVB0C0_SET_VALID_SPAN_OVERFLOW_AREA_A_ADDRESS_UPPER 7:0 + +#define NVB0C0_SET_VALID_SPAN_OVERFLOW_AREA_B 0x0204 +#define NVB0C0_SET_VALID_SPAN_OVERFLOW_AREA_B_ADDRESS_LOWER 31:0 + +#define NVB0C0_SET_VALID_SPAN_OVERFLOW_AREA_C 0x0208 +#define NVB0C0_SET_VALID_SPAN_OVERFLOW_AREA_C_SIZE 31:0 + +#define NVB0C0_SET_COALESCE_WAITING_PERIOD_UNIT 0x020c +#define NVB0C0_SET_COALESCE_WAITING_PERIOD_UNIT_CLOCKS 31:0 + +#define NVB0C0_PERFMON_TRANSFER 0x0210 +#define NVB0C0_PERFMON_TRANSFER_V 31:0 + +#define NVB0C0_SET_SHADER_SHARED_MEMORY_WINDOW 0x0214 +#define NVB0C0_SET_SHADER_SHARED_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVB0C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS 0x0218 +#define NVB0C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V 0:0 +#define NVB0C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_FALSE 0x00000000 +#define NVB0C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_TRUE 0x00000001 + +#define NVB0C0_INVALIDATE_SHADER_CACHES 0x021c +#define NVB0C0_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVB0C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVB0C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVB0C0_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVB0C0_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVB0C0_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVB0C0_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVB0C0_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVB0C0_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVB0C0_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVB0C0_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVB0C0_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVB0C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVB0C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVB0C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVB0C0_SET_RESERVED_SW_METHOD00 0x0220 +#define NVB0C0_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD01 0x0224 +#define NVB0C0_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD02 0x0228 +#define NVB0C0_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD03 0x022c +#define NVB0C0_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD04 0x0230 +#define NVB0C0_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD05 0x0234 +#define NVB0C0_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD06 0x0238 +#define NVB0C0_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD07 0x023c +#define NVB0C0_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVB0C0_SET_CWD_CONTROL 0x0240 +#define NVB0C0_SET_CWD_CONTROL_SM_SELECTION 0:0 +#define NVB0C0_SET_CWD_CONTROL_SM_SELECTION_LOAD_BALANCED 0x00000000 +#define NVB0C0_SET_CWD_CONTROL_SM_SELECTION_ROUND_ROBIN 0x00000001 + +#define NVB0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x0244 +#define NVB0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVB0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVB0C0_SET_CWD_REF_COUNTER 0x0248 +#define NVB0C0_SET_CWD_REF_COUNTER_SELECT 5:0 +#define NVB0C0_SET_CWD_REF_COUNTER_VALUE 23:8 + +#define NVB0C0_SET_RESERVED_SW_METHOD08 0x024c +#define NVB0C0_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD09 0x0250 +#define NVB0C0_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD10 0x0254 +#define NVB0C0_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD11 0x0258 +#define NVB0C0_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD12 0x025c +#define NVB0C0_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD13 0x0260 +#define NVB0C0_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD14 0x0264 +#define NVB0C0_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVB0C0_SET_RESERVED_SW_METHOD15 0x0268 +#define NVB0C0_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVB0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A 0x0274 +#define NVB0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A_ADDRESS_UPPER 7:0 + +#define NVB0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B 0x0278 +#define NVB0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B_ADDRESS_LOWER 31:0 + +#define NVB0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C 0x027c +#define NVB0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_BYTE_COUNT 16:0 +#define NVB0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2 31:31 +#define NVB0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_FALSE 0x00000000 +#define NVB0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_TRUE 0x00000001 + +#define NVB0C0_SET_COMPUTE_CLASS_VERSION 0x0280 +#define NVB0C0_SET_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVB0C0_SET_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB0C0_CHECK_COMPUTE_CLASS_VERSION 0x0284 +#define NVB0C0_CHECK_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVB0C0_CHECK_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB0C0_SET_QMD_VERSION 0x0288 +#define NVB0C0_SET_QMD_VERSION_CURRENT 15:0 +#define NVB0C0_SET_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB0C0_CHECK_QMD_VERSION 0x0290 +#define NVB0C0_CHECK_QMD_VERSION_CURRENT 15:0 +#define NVB0C0_CHECK_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB0C0_SET_CWD_SLOT_COUNT 0x02b0 +#define NVB0C0_SET_CWD_SLOT_COUNT_V 7:0 + +#define NVB0C0_SEND_PCAS_A 0x02b4 +#define NVB0C0_SEND_PCAS_A_QMD_ADDRESS_SHIFTED8 31:0 + +#define NVB0C0_SEND_PCAS_B 0x02b8 +#define NVB0C0_SEND_PCAS_B_FROM 23:0 +#define NVB0C0_SEND_PCAS_B_DELTA 31:24 + +#define NVB0C0_SEND_SIGNALING_PCAS_B 0x02bc +#define NVB0C0_SEND_SIGNALING_PCAS_B_INVALIDATE 0:0 +#define NVB0C0_SEND_SIGNALING_PCAS_B_INVALIDATE_FALSE 0x00000000 +#define NVB0C0_SEND_SIGNALING_PCAS_B_INVALIDATE_TRUE 0x00000001 +#define NVB0C0_SEND_SIGNALING_PCAS_B_SCHEDULE 1:1 +#define NVB0C0_SEND_SIGNALING_PCAS_B_SCHEDULE_FALSE 0x00000000 +#define NVB0C0_SEND_SIGNALING_PCAS_B_SCHEDULE_TRUE 0x00000001 + +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A 0x02e4 +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B 0x02e8 +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C 0x02ec +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A 0x02f0 +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B 0x02f4 +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C 0x02f8 +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVB0C0_SET_SPA_VERSION 0x0310 +#define NVB0C0_SET_SPA_VERSION_MINOR 7:0 +#define NVB0C0_SET_SPA_VERSION_MAJOR 15:8 + +#define NVB0C0_SET_FALCON00 0x0500 +#define NVB0C0_SET_FALCON00_V 31:0 + +#define NVB0C0_SET_FALCON01 0x0504 +#define NVB0C0_SET_FALCON01_V 31:0 + +#define NVB0C0_SET_FALCON02 0x0508 +#define NVB0C0_SET_FALCON02_V 31:0 + +#define NVB0C0_SET_FALCON03 0x050c +#define NVB0C0_SET_FALCON03_V 31:0 + +#define NVB0C0_SET_FALCON04 0x0510 +#define NVB0C0_SET_FALCON04_V 31:0 + +#define NVB0C0_SET_FALCON05 0x0514 +#define NVB0C0_SET_FALCON05_V 31:0 + +#define NVB0C0_SET_FALCON06 0x0518 +#define NVB0C0_SET_FALCON06_V 31:0 + +#define NVB0C0_SET_FALCON07 0x051c +#define NVB0C0_SET_FALCON07_V 31:0 + +#define NVB0C0_SET_FALCON08 0x0520 +#define NVB0C0_SET_FALCON08_V 31:0 + +#define NVB0C0_SET_FALCON09 0x0524 +#define NVB0C0_SET_FALCON09_V 31:0 + +#define NVB0C0_SET_FALCON10 0x0528 +#define NVB0C0_SET_FALCON10_V 31:0 + +#define NVB0C0_SET_FALCON11 0x052c +#define NVB0C0_SET_FALCON11_V 31:0 + +#define NVB0C0_SET_FALCON12 0x0530 +#define NVB0C0_SET_FALCON12_V 31:0 + +#define NVB0C0_SET_FALCON13 0x0534 +#define NVB0C0_SET_FALCON13_V 31:0 + +#define NVB0C0_SET_FALCON14 0x0538 +#define NVB0C0_SET_FALCON14_V 31:0 + +#define NVB0C0_SET_FALCON15 0x053c +#define NVB0C0_SET_FALCON15_V 31:0 + +#define NVB0C0_SET_FALCON16 0x0540 +#define NVB0C0_SET_FALCON16_V 31:0 + +#define NVB0C0_SET_FALCON17 0x0544 +#define NVB0C0_SET_FALCON17_V 31:0 + +#define NVB0C0_SET_FALCON18 0x0548 +#define NVB0C0_SET_FALCON18_V 31:0 + +#define NVB0C0_SET_FALCON19 0x054c +#define NVB0C0_SET_FALCON19_V 31:0 + +#define NVB0C0_SET_FALCON20 0x0550 +#define NVB0C0_SET_FALCON20_V 31:0 + +#define NVB0C0_SET_FALCON21 0x0554 +#define NVB0C0_SET_FALCON21_V 31:0 + +#define NVB0C0_SET_FALCON22 0x0558 +#define NVB0C0_SET_FALCON22_V 31:0 + +#define NVB0C0_SET_FALCON23 0x055c +#define NVB0C0_SET_FALCON23_V 31:0 + +#define NVB0C0_SET_FALCON24 0x0560 +#define NVB0C0_SET_FALCON24_V 31:0 + +#define NVB0C0_SET_FALCON25 0x0564 +#define NVB0C0_SET_FALCON25_V 31:0 + +#define NVB0C0_SET_FALCON26 0x0568 +#define NVB0C0_SET_FALCON26_V 31:0 + +#define NVB0C0_SET_FALCON27 0x056c +#define NVB0C0_SET_FALCON27_V 31:0 + +#define NVB0C0_SET_FALCON28 0x0570 +#define NVB0C0_SET_FALCON28_V 31:0 + +#define NVB0C0_SET_FALCON29 0x0574 +#define NVB0C0_SET_FALCON29_V 31:0 + +#define NVB0C0_SET_FALCON30 0x0578 +#define NVB0C0_SET_FALCON30_V 31:0 + +#define NVB0C0_SET_FALCON31 0x057c +#define NVB0C0_SET_FALCON31_V 31:0 + +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVB0C0_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVB0C0_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVB0C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVB0C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVB0C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVB0C0_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVB0C0_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVB0C0_SET_SPARE_NOOP12 0x0f44 +#define NVB0C0_SET_SPARE_NOOP12_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP13 0x0f48 +#define NVB0C0_SET_SPARE_NOOP13_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP14 0x0f4c +#define NVB0C0_SET_SPARE_NOOP14_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP15 0x0f50 +#define NVB0C0_SET_SPARE_NOOP15_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP00 0x1040 +#define NVB0C0_SET_SPARE_NOOP00_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP01 0x1044 +#define NVB0C0_SET_SPARE_NOOP01_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP02 0x1048 +#define NVB0C0_SET_SPARE_NOOP02_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP03 0x104c +#define NVB0C0_SET_SPARE_NOOP03_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP04 0x1050 +#define NVB0C0_SET_SPARE_NOOP04_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP05 0x1054 +#define NVB0C0_SET_SPARE_NOOP05_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP06 0x1058 +#define NVB0C0_SET_SPARE_NOOP06_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP07 0x105c +#define NVB0C0_SET_SPARE_NOOP07_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP08 0x1060 +#define NVB0C0_SET_SPARE_NOOP08_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP09 0x1064 +#define NVB0C0_SET_SPARE_NOOP09_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP10 0x1068 +#define NVB0C0_SET_SPARE_NOOP10_V 31:0 + +#define NVB0C0_SET_SPARE_NOOP11 0x106c +#define NVB0C0_SET_SPARE_NOOP11_V 31:0 + +#define NVB0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVB0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVB0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVB0C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT 0x12a8 +#define NVB0C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL 0:0 +#define NVB0C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_FALSE 0x00000000 +#define NVB0C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_TRUE 0x00000001 + +#define NVB0C0_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVB0C0_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVB0C0_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVB0C0_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVB0C0_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVB0C0_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVB0C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVB0C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVB0C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVB0C0_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVB0C0_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVB0C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVB0C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVB0C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVB0C0_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVB0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVB0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVB0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVB0C0_SET_SHADER_EXCEPTIONS 0x1528 +#define NVB0C0_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVB0C0_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVB0C0_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVB0C0_SET_RENDER_ENABLE_A 0x1550 +#define NVB0C0_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVB0C0_SET_RENDER_ENABLE_B 0x1554 +#define NVB0C0_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVB0C0_SET_RENDER_ENABLE_C 0x1558 +#define NVB0C0_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVB0C0_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVB0C0_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVB0C0_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVB0C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVB0C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVB0C0_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVB0C0_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVB0C0_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVB0C0_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVB0C0_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVB0C0_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVB0C0_SET_TEX_HEADER_POOL_A 0x1574 +#define NVB0C0_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVB0C0_SET_TEX_HEADER_POOL_B 0x1578 +#define NVB0C0_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVB0C0_SET_TEX_HEADER_POOL_C 0x157c +#define NVB0C0_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVB0C0_SET_PROGRAM_REGION_A 0x1608 +#define NVB0C0_SET_PROGRAM_REGION_A_ADDRESS_UPPER 7:0 + +#define NVB0C0_SET_PROGRAM_REGION_B 0x160c +#define NVB0C0_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVB0C0_INVALIDATE_SHADER_CACHES_NO_WFI 0x1698 +#define NVB0C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVB0C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVB0C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVB0C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVB0C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVB0C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVB0C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVB0C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVB0C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVB0C0_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVB0C0_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVB0C0_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVB0C0_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVB0C0_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVB0C0_PIPE_NOP 0x1a2c +#define NVB0C0_PIPE_NOP_V 31:0 + +#define NVB0C0_SET_SPARE00 0x1a30 +#define NVB0C0_SET_SPARE00_V 31:0 + +#define NVB0C0_SET_SPARE01 0x1a34 +#define NVB0C0_SET_SPARE01_V 31:0 + +#define NVB0C0_SET_SPARE02 0x1a38 +#define NVB0C0_SET_SPARE02_V 31:0 + +#define NVB0C0_SET_SPARE03 0x1a3c +#define NVB0C0_SET_SPARE03_V 31:0 + +#define NVB0C0_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVB0C0_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVB0C0_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVB0C0_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVB0C0_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVB0C0_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVB0C0_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVB0C0_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVB0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVB0C0_SET_BINDLESS_TEXTURE 0x2608 +#define NVB0C0_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 2:0 + +#define NVB0C0_SET_TRAP_HANDLER 0x260c +#define NVB0C0_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVB0C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVB0C0_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVB0C0_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVB0C0_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVB0C0_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVB0C0_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVB0C0_SET_MME_SHADOW_SCRATCH_V 31:0 + +#endif /* _cl_maxwell_compute_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clb0cc.h b/src/common/sdk/nvidia/inc/class/clb0cc.h new file mode 100644 index 000000000..61ee79f40 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb0cc.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clb0cc_h_ +#define _clb0cc_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define MAXWELL_PROFILER (0x0000B0CC) + +/* + * This is an interface definition for MAXWELL_PROFILER class and cannot + * be instantiated by clients. + * MAXWELL_PROFILER_DEVICE extends this interface to define interface for + * device level profiling + */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clb0cc_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clb197.h b/src/common/sdk/nvidia/inc/class/clb197.h new file mode 100644 index 000000000..77d77352c --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb197.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clb197_h_ +#define _clb197_h_ + +#define MAXWELL_B 0xB197 + +#endif // _clb197_h_ diff --git a/src/common/sdk/nvidia/inc/class/clb1c0.h b/src/common/sdk/nvidia/inc/class/clb1c0.h new file mode 100644 index 000000000..8d7aaac41 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb1c0.h @@ -0,0 +1,750 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_maxwell_compute_b_h_ +#define _cl_maxwell_compute_b_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl maxwell_compute_b */ + +#include "nvtypes.h" + +#define MAXWELL_COMPUTE_B 0xB1C0 + +#define NVB1C0_SET_OBJECT 0x0000 +#define NVB1C0_SET_OBJECT_CLASS_ID 15:0 +#define NVB1C0_SET_OBJECT_ENGINE_ID 20:16 + +#define NVB1C0_NO_OPERATION 0x0100 +#define NVB1C0_NO_OPERATION_V 31:0 + +#define NVB1C0_SET_NOTIFY_A 0x0104 +#define NVB1C0_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVB1C0_SET_NOTIFY_B 0x0108 +#define NVB1C0_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVB1C0_NOTIFY 0x010c +#define NVB1C0_NOTIFY_TYPE 31:0 +#define NVB1C0_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVB1C0_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVB1C0_WAIT_FOR_IDLE 0x0110 +#define NVB1C0_WAIT_FOR_IDLE_V 31:0 + +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVB1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVB1C0_SEND_GO_IDLE 0x013c +#define NVB1C0_SEND_GO_IDLE_V 31:0 + +#define NVB1C0_PM_TRIGGER 0x0140 +#define NVB1C0_PM_TRIGGER_V 31:0 + +#define NVB1C0_PM_TRIGGER_WFI 0x0144 +#define NVB1C0_PM_TRIGGER_WFI_V 31:0 + +#define NVB1C0_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVB1C0_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVB1C0_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVB1C0_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVB1C0_LINE_LENGTH_IN 0x0180 +#define NVB1C0_LINE_LENGTH_IN_VALUE 31:0 + +#define NVB1C0_LINE_COUNT 0x0184 +#define NVB1C0_LINE_COUNT_VALUE 31:0 + +#define NVB1C0_OFFSET_OUT_UPPER 0x0188 +#define NVB1C0_OFFSET_OUT_UPPER_VALUE 7:0 + +#define NVB1C0_OFFSET_OUT 0x018c +#define NVB1C0_OFFSET_OUT_VALUE 31:0 + +#define NVB1C0_PITCH_OUT 0x0190 +#define NVB1C0_PITCH_OUT_VALUE 31:0 + +#define NVB1C0_SET_DST_BLOCK_SIZE 0x0194 +#define NVB1C0_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVB1C0_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVB1C0_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVB1C0_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVB1C0_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVB1C0_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVB1C0_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVB1C0_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVB1C0_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVB1C0_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVB1C0_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVB1C0_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVB1C0_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVB1C0_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVB1C0_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVB1C0_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVB1C0_SET_DST_WIDTH 0x0198 +#define NVB1C0_SET_DST_WIDTH_V 31:0 + +#define NVB1C0_SET_DST_HEIGHT 0x019c +#define NVB1C0_SET_DST_HEIGHT_V 31:0 + +#define NVB1C0_SET_DST_DEPTH 0x01a0 +#define NVB1C0_SET_DST_DEPTH_V 31:0 + +#define NVB1C0_SET_DST_LAYER 0x01a4 +#define NVB1C0_SET_DST_LAYER_V 31:0 + +#define NVB1C0_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVB1C0_SET_DST_ORIGIN_BYTES_X_V 19:0 + +#define NVB1C0_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVB1C0_SET_DST_ORIGIN_SAMPLES_Y_V 15:0 + +#define NVB1C0_LAUNCH_DMA 0x01b0 +#define NVB1C0_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVB1C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVB1C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVB1C0_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVB1C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVB1C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVB1C0_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVB1C0_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVB1C0_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVB1C0_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVB1C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVB1C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVB1C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVB1C0_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVB1C0_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVB1C0_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVB1C0_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVB1C0_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVB1C0_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVB1C0_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVB1C0_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVB1C0_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVB1C0_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVB1C0_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVB1C0_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVB1C0_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVB1C0_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVB1C0_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVB1C0_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVB1C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVB1C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVB1C0_LOAD_INLINE_DATA 0x01b4 +#define NVB1C0_LOAD_INLINE_DATA_V 31:0 + +#define NVB1C0_SET_I2M_SEMAPHORE_A 0x01dc +#define NVB1C0_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVB1C0_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVB1C0_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVB1C0_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVB1C0_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVB1C0_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVB1C0_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVB1C0_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVB1C0_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVB1C0_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVB1C0_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVB1C0_SET_I2M_SPARE_NOOP03 0x01fc +#define NVB1C0_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVB1C0_SET_VALID_SPAN_OVERFLOW_AREA_A 0x0200 +#define NVB1C0_SET_VALID_SPAN_OVERFLOW_AREA_A_ADDRESS_UPPER 7:0 + +#define NVB1C0_SET_VALID_SPAN_OVERFLOW_AREA_B 0x0204 +#define NVB1C0_SET_VALID_SPAN_OVERFLOW_AREA_B_ADDRESS_LOWER 31:0 + +#define NVB1C0_SET_VALID_SPAN_OVERFLOW_AREA_C 0x0208 +#define NVB1C0_SET_VALID_SPAN_OVERFLOW_AREA_C_SIZE 31:0 + +#define NVB1C0_SET_COALESCE_WAITING_PERIOD_UNIT 0x020c +#define NVB1C0_SET_COALESCE_WAITING_PERIOD_UNIT_CLOCKS 31:0 + +#define NVB1C0_PERFMON_TRANSFER 0x0210 +#define NVB1C0_PERFMON_TRANSFER_V 31:0 + +#define NVB1C0_SET_SHADER_SHARED_MEMORY_WINDOW 0x0214 +#define NVB1C0_SET_SHADER_SHARED_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVB1C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS 0x0218 +#define NVB1C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V 0:0 +#define NVB1C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_FALSE 0x00000000 +#define NVB1C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_TRUE 0x00000001 + +#define NVB1C0_INVALIDATE_SHADER_CACHES 0x021c +#define NVB1C0_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVB1C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVB1C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVB1C0_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVB1C0_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVB1C0_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVB1C0_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVB1C0_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVB1C0_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVB1C0_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVB1C0_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVB1C0_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVB1C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVB1C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVB1C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVB1C0_SET_RESERVED_SW_METHOD00 0x0220 +#define NVB1C0_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD01 0x0224 +#define NVB1C0_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD02 0x0228 +#define NVB1C0_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD03 0x022c +#define NVB1C0_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD04 0x0230 +#define NVB1C0_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD05 0x0234 +#define NVB1C0_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD06 0x0238 +#define NVB1C0_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD07 0x023c +#define NVB1C0_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVB1C0_SET_CWD_CONTROL 0x0240 +#define NVB1C0_SET_CWD_CONTROL_SM_SELECTION 0:0 +#define NVB1C0_SET_CWD_CONTROL_SM_SELECTION_LOAD_BALANCED 0x00000000 +#define NVB1C0_SET_CWD_CONTROL_SM_SELECTION_ROUND_ROBIN 0x00000001 + +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x0244 +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVB1C0_SET_CWD_REF_COUNTER 0x0248 +#define NVB1C0_SET_CWD_REF_COUNTER_SELECT 5:0 +#define NVB1C0_SET_CWD_REF_COUNTER_VALUE 23:8 + +#define NVB1C0_SET_RESERVED_SW_METHOD08 0x024c +#define NVB1C0_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD09 0x0250 +#define NVB1C0_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD10 0x0254 +#define NVB1C0_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD11 0x0258 +#define NVB1C0_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD12 0x025c +#define NVB1C0_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD13 0x0260 +#define NVB1C0_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD14 0x0264 +#define NVB1C0_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVB1C0_SET_RESERVED_SW_METHOD15 0x0268 +#define NVB1C0_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVB1C0_SET_GWC_SCG_TYPE 0x026c +#define NVB1C0_SET_GWC_SCG_TYPE_SCG_TYPE 0:0 +#define NVB1C0_SET_GWC_SCG_TYPE_SCG_TYPE_GRAPHICS_COMPUTE0 0x00000000 +#define NVB1C0_SET_GWC_SCG_TYPE_SCG_TYPE_COMPUTE1 0x00000001 + +#define NVB1C0_SET_SCG_CONTROL 0x0270 +#define NVB1C0_SET_SCG_CONTROL_COMPUTE1_MAX_SM_COUNT 8:0 + +#define NVB1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A 0x0274 +#define NVB1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A_ADDRESS_UPPER 7:0 + +#define NVB1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B 0x0278 +#define NVB1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B_ADDRESS_LOWER 31:0 + +#define NVB1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C 0x027c +#define NVB1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_BYTE_COUNT 16:0 +#define NVB1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2 31:31 +#define NVB1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_FALSE 0x00000000 +#define NVB1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_TRUE 0x00000001 + +#define NVB1C0_SET_COMPUTE_CLASS_VERSION 0x0280 +#define NVB1C0_SET_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVB1C0_SET_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB1C0_CHECK_COMPUTE_CLASS_VERSION 0x0284 +#define NVB1C0_CHECK_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVB1C0_CHECK_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB1C0_SET_QMD_VERSION 0x0288 +#define NVB1C0_SET_QMD_VERSION_CURRENT 15:0 +#define NVB1C0_SET_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB1C0_SET_WFI_CONFIG 0x028c +#define NVB1C0_SET_WFI_CONFIG_ENABLE_SCG_TYPE_WFI 0:0 +#define NVB1C0_SET_WFI_CONFIG_ENABLE_SCG_TYPE_WFI_FALSE 0x00000000 +#define NVB1C0_SET_WFI_CONFIG_ENABLE_SCG_TYPE_WFI_TRUE 0x00000001 + +#define NVB1C0_CHECK_QMD_VERSION 0x0290 +#define NVB1C0_CHECK_QMD_VERSION_CURRENT 15:0 +#define NVB1C0_CHECK_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVB1C0_WAIT_FOR_IDLE_SCG_TYPE 0x0294 +#define NVB1C0_WAIT_FOR_IDLE_SCG_TYPE_V 31:0 + +#define NVB1C0_INVALIDATE_SKED_CACHES 0x0298 +#define NVB1C0_INVALIDATE_SKED_CACHES_V 0:0 + +#define NVB1C0_SET_SCG_RENDER_ENABLE_CONTROL 0x029c +#define NVB1C0_SET_SCG_RENDER_ENABLE_CONTROL_COMPUTE1_USES_RENDER_ENABLE 0:0 +#define NVB1C0_SET_SCG_RENDER_ENABLE_CONTROL_COMPUTE1_USES_RENDER_ENABLE_FALSE 0x00000000 +#define NVB1C0_SET_SCG_RENDER_ENABLE_CONTROL_COMPUTE1_USES_RENDER_ENABLE_TRUE 0x00000001 + +#define NVB1C0_SET_CWD_SLOT_COUNT 0x02b0 +#define NVB1C0_SET_CWD_SLOT_COUNT_V 7:0 + +#define NVB1C0_SEND_PCAS_A 0x02b4 +#define NVB1C0_SEND_PCAS_A_QMD_ADDRESS_SHIFTED8 31:0 + +#define NVB1C0_SEND_PCAS_B 0x02b8 +#define NVB1C0_SEND_PCAS_B_FROM 23:0 +#define NVB1C0_SEND_PCAS_B_DELTA 31:24 + +#define NVB1C0_SEND_SIGNALING_PCAS_B 0x02bc +#define NVB1C0_SEND_SIGNALING_PCAS_B_INVALIDATE 0:0 +#define NVB1C0_SEND_SIGNALING_PCAS_B_INVALIDATE_FALSE 0x00000000 +#define NVB1C0_SEND_SIGNALING_PCAS_B_INVALIDATE_TRUE 0x00000001 +#define NVB1C0_SEND_SIGNALING_PCAS_B_SCHEDULE 1:1 +#define NVB1C0_SEND_SIGNALING_PCAS_B_SCHEDULE_FALSE 0x00000000 +#define NVB1C0_SEND_SIGNALING_PCAS_B_SCHEDULE_TRUE 0x00000001 + +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A 0x02e4 +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B 0x02e8 +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C 0x02ec +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A 0x02f0 +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B 0x02f4 +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C 0x02f8 +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVB1C0_SET_SPA_VERSION 0x0310 +#define NVB1C0_SET_SPA_VERSION_MINOR 7:0 +#define NVB1C0_SET_SPA_VERSION_MAJOR 15:8 + +#define NVB1C0_SET_FALCON00 0x0500 +#define NVB1C0_SET_FALCON00_V 31:0 + +#define NVB1C0_SET_FALCON01 0x0504 +#define NVB1C0_SET_FALCON01_V 31:0 + +#define NVB1C0_SET_FALCON02 0x0508 +#define NVB1C0_SET_FALCON02_V 31:0 + +#define NVB1C0_SET_FALCON03 0x050c +#define NVB1C0_SET_FALCON03_V 31:0 + +#define NVB1C0_SET_FALCON04 0x0510 +#define NVB1C0_SET_FALCON04_V 31:0 + +#define NVB1C0_SET_FALCON05 0x0514 +#define NVB1C0_SET_FALCON05_V 31:0 + +#define NVB1C0_SET_FALCON06 0x0518 +#define NVB1C0_SET_FALCON06_V 31:0 + +#define NVB1C0_SET_FALCON07 0x051c +#define NVB1C0_SET_FALCON07_V 31:0 + +#define NVB1C0_SET_FALCON08 0x0520 +#define NVB1C0_SET_FALCON08_V 31:0 + +#define NVB1C0_SET_FALCON09 0x0524 +#define NVB1C0_SET_FALCON09_V 31:0 + +#define NVB1C0_SET_FALCON10 0x0528 +#define NVB1C0_SET_FALCON10_V 31:0 + +#define NVB1C0_SET_FALCON11 0x052c +#define NVB1C0_SET_FALCON11_V 31:0 + +#define NVB1C0_SET_FALCON12 0x0530 +#define NVB1C0_SET_FALCON12_V 31:0 + +#define NVB1C0_SET_FALCON13 0x0534 +#define NVB1C0_SET_FALCON13_V 31:0 + +#define NVB1C0_SET_FALCON14 0x0538 +#define NVB1C0_SET_FALCON14_V 31:0 + +#define NVB1C0_SET_FALCON15 0x053c +#define NVB1C0_SET_FALCON15_V 31:0 + +#define NVB1C0_SET_FALCON16 0x0540 +#define NVB1C0_SET_FALCON16_V 31:0 + +#define NVB1C0_SET_FALCON17 0x0544 +#define NVB1C0_SET_FALCON17_V 31:0 + +#define NVB1C0_SET_FALCON18 0x0548 +#define NVB1C0_SET_FALCON18_V 31:0 + +#define NVB1C0_SET_FALCON19 0x054c +#define NVB1C0_SET_FALCON19_V 31:0 + +#define NVB1C0_SET_FALCON20 0x0550 +#define NVB1C0_SET_FALCON20_V 31:0 + +#define NVB1C0_SET_FALCON21 0x0554 +#define NVB1C0_SET_FALCON21_V 31:0 + +#define NVB1C0_SET_FALCON22 0x0558 +#define NVB1C0_SET_FALCON22_V 31:0 + +#define NVB1C0_SET_FALCON23 0x055c +#define NVB1C0_SET_FALCON23_V 31:0 + +#define NVB1C0_SET_FALCON24 0x0560 +#define NVB1C0_SET_FALCON24_V 31:0 + +#define NVB1C0_SET_FALCON25 0x0564 +#define NVB1C0_SET_FALCON25_V 31:0 + +#define NVB1C0_SET_FALCON26 0x0568 +#define NVB1C0_SET_FALCON26_V 31:0 + +#define NVB1C0_SET_FALCON27 0x056c +#define NVB1C0_SET_FALCON27_V 31:0 + +#define NVB1C0_SET_FALCON28 0x0570 +#define NVB1C0_SET_FALCON28_V 31:0 + +#define NVB1C0_SET_FALCON29 0x0574 +#define NVB1C0_SET_FALCON29_V 31:0 + +#define NVB1C0_SET_FALCON30 0x0578 +#define NVB1C0_SET_FALCON30_V 31:0 + +#define NVB1C0_SET_FALCON31 0x057c +#define NVB1C0_SET_FALCON31_V 31:0 + +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 7:0 + +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVB1C0_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVB1C0_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVB1C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVB1C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVB1C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVB1C0_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVB1C0_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVB1C0_SET_SPARE_NOOP12 0x0f44 +#define NVB1C0_SET_SPARE_NOOP12_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP13 0x0f48 +#define NVB1C0_SET_SPARE_NOOP13_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP14 0x0f4c +#define NVB1C0_SET_SPARE_NOOP14_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP15 0x0f50 +#define NVB1C0_SET_SPARE_NOOP15_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP00 0x1040 +#define NVB1C0_SET_SPARE_NOOP00_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP01 0x1044 +#define NVB1C0_SET_SPARE_NOOP01_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP02 0x1048 +#define NVB1C0_SET_SPARE_NOOP02_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP03 0x104c +#define NVB1C0_SET_SPARE_NOOP03_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP04 0x1050 +#define NVB1C0_SET_SPARE_NOOP04_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP05 0x1054 +#define NVB1C0_SET_SPARE_NOOP05_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP06 0x1058 +#define NVB1C0_SET_SPARE_NOOP06_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP07 0x105c +#define NVB1C0_SET_SPARE_NOOP07_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP08 0x1060 +#define NVB1C0_SET_SPARE_NOOP08_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP09 0x1064 +#define NVB1C0_SET_SPARE_NOOP09_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP10 0x1068 +#define NVB1C0_SET_SPARE_NOOP10_V 31:0 + +#define NVB1C0_SET_SPARE_NOOP11 0x106c +#define NVB1C0_SET_SPARE_NOOP11_V 31:0 + +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_ALL 0x120c +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_ALL_V 0:0 + +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_ALL 0x1210 +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_ALL_V 0:0 + +#define NVB1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVB1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVB1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVB1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT 0x12a8 +#define NVB1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL 0:0 +#define NVB1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_FALSE 0x00000000 +#define NVB1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_TRUE 0x00000001 + +#define NVB1C0_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVB1C0_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVB1C0_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVB1C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVB1C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVB1C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVB1C0_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVB1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVB1C0_SET_SHADER_EXCEPTIONS 0x1528 +#define NVB1C0_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVB1C0_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVB1C0_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVB1C0_SET_RENDER_ENABLE_A 0x1550 +#define NVB1C0_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVB1C0_SET_RENDER_ENABLE_B 0x1554 +#define NVB1C0_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVB1C0_SET_RENDER_ENABLE_C 0x1558 +#define NVB1C0_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVB1C0_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVB1C0_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVB1C0_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVB1C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVB1C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVB1C0_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVB1C0_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 7:0 + +#define NVB1C0_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVB1C0_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVB1C0_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVB1C0_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVB1C0_SET_TEX_HEADER_POOL_A 0x1574 +#define NVB1C0_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 7:0 + +#define NVB1C0_SET_TEX_HEADER_POOL_B 0x1578 +#define NVB1C0_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVB1C0_SET_TEX_HEADER_POOL_C 0x157c +#define NVB1C0_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVB1C0_SET_PROGRAM_REGION_A 0x1608 +#define NVB1C0_SET_PROGRAM_REGION_A_ADDRESS_UPPER 7:0 + +#define NVB1C0_SET_PROGRAM_REGION_B 0x160c +#define NVB1C0_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVB1C0_INVALIDATE_SHADER_CACHES_NO_WFI 0x1698 +#define NVB1C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVB1C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVB1C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVB1C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVB1C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVB1C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVB1C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVB1C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVB1C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVB1C0_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVB1C0_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVB1C0_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVB1C0_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVB1C0_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVB1C0_PIPE_NOP 0x1a2c +#define NVB1C0_PIPE_NOP_V 31:0 + +#define NVB1C0_SET_SPARE00 0x1a30 +#define NVB1C0_SET_SPARE00_V 31:0 + +#define NVB1C0_SET_SPARE01 0x1a34 +#define NVB1C0_SET_SPARE01_V 31:0 + +#define NVB1C0_SET_SPARE02 0x1a38 +#define NVB1C0_SET_SPARE02_V 31:0 + +#define NVB1C0_SET_SPARE03 0x1a3c +#define NVB1C0_SET_SPARE03_V 31:0 + +#define NVB1C0_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVB1C0_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVB1C0_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVB1C0_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVB1C0_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVB1C0_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVB1C0_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVB1C0_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVB1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVB1C0_SET_BINDLESS_TEXTURE 0x2608 +#define NVB1C0_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 2:0 + +#define NVB1C0_SET_TRAP_HANDLER 0x260c +#define NVB1C0_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVB1C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVB1C0_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVB1C0_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVB1C0_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVB1C0_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVB1C0_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVB1C0_SET_MME_SHADOW_SCRATCH_V 31:0 + +#endif /* _cl_maxwell_compute_b_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clb2cc.h b/src/common/sdk/nvidia/inc/class/clb2cc.h new file mode 100644 index 000000000..142ac762b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb2cc.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clb2cc_h_ +#define _clb2cc_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "clb0cc.h" + +#define MAXWELL_PROFILER_DEVICE (0x0000B2CC) + +/* + * Creating the MAXWELL_PROFILER_DEVICE object: + * - The profiler object is instantiated as a child of subdevice. + */ +typedef struct { + /* + * This parameter specifies the handle of the client that owns the context + * specified by hContextTarget. This can set it to 0 where a context + * specific operation is not needed. For context level operations see: + * @ref NVB0CC_CTRL_CMD_RESERVE_HWPM_LEGACY, @ref NVB0CC_CTRL_CMD_RESERVE_PM_AREA_SMPC, + * @ref NVB0CC_CTRL_CMD_ALLOC_PMA_STREAM. + */ + NvHandle hClientTarget; + + /* + * This parameter specifies the handle of the BC channel (or BC channel + * group) object instance to which context-specific operations are to be + * directed. If hClientTarget is set to 0 then this parameter is ignored. + */ + NvHandle hContextTarget; +} NVB2CC_ALLOC_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clb2cc_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clb4b7.h b/src/common/sdk/nvidia/inc/class/clb4b7.h new file mode 100644 index 000000000..e0151518f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb4b7.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef clb4b7_h_ +#define clb4b7_h_ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVB4B7_VIDEO_ENCODER (0x0000B4B7) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // clb4b7_h + diff --git a/src/common/sdk/nvidia/inc/class/clb6b0.h b/src/common/sdk/nvidia/inc/class/clb6b0.h new file mode 100644 index 000000000..d79dcb425 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clb6b0.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvtypes.h" + +#ifndef _clb6b0_h_ +#define _clb6b0_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVB6B0_VIDEO_DECODER (0x0000B6B0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clb6b0_h diff --git a/src/common/sdk/nvidia/inc/class/clc06f.h b/src/common/sdk/nvidia/inc/class/clc06f.h new file mode 100644 index 000000000..d8b86072b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc06f.h @@ -0,0 +1,312 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc06f_h_ +#define _clc06f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class PASCAL_CHANNEL_GPFIFO */ +/* + * Documentation for PASCAL_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define PASCAL_CHANNEL_GPFIFO_A (0x0000C06F) + +#define NVC06F_TYPEDEF PASCAL_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc06fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc06fControl, PascalAControlGPFifo; + +/* fields and values */ +#define NVC06F_NUMBER_OF_SUBCHANNELS (8) +#define NVC06F_SET_OBJECT (0x00000000) +#define NVC06F_SET_OBJECT_NVCLASS 15:0 +#define NVC06F_SET_OBJECT_ENGINE 20:16 +#define NVC06F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC06F_ILLEGAL (0x00000004) +#define NVC06F_ILLEGAL_HANDLE 31:0 +#define NVC06F_NOP (0x00000008) +#define NVC06F_NOP_HANDLE 31:0 +#define NVC06F_SEMAPHOREA (0x00000010) +#define NVC06F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC06F_SEMAPHOREB (0x00000014) +#define NVC06F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC06F_SEMAPHOREC (0x00000018) +#define NVC06F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC06F_SEMAPHORED (0x0000001C) +#define NVC06F_SEMAPHORED_OPERATION 4:0 +#define NVC06F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC06F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC06F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC06F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC06F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC06F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC06F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC06F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC06F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC06F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC06F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC06F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC06F_SEMAPHORED_REDUCTION 30:27 +#define NVC06F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC06F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC06F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC06F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC06F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC06F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC06F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC06F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC06F_SEMAPHORED_FORMAT 31:31 +#define NVC06F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC06F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC06F_NON_STALL_INTERRUPT (0x00000020) +#define NVC06F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC06F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC06F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC06F_MEM_OP_A (0x00000028) +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC06F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC06F_MEM_OP_B (0x0000002c) +#define NVC06F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC06F_MEM_OP_C (0x00000030) +#define NVC06F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC06F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC06F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC06F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC06F_MEM_OP_D (0x00000034) +#define NVC06F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC06F_MEM_OP_D_OPERATION 31:27 +#define NVC06F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC06F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC06F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC06F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC06F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC06F_MEM_OP_D_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +// This B alias is confusing but it was missed as part of the update. Left here +// for compatibility. +#define NVC06F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC06F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC06F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC06F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC06F_SET_REFERENCE (0x00000050) +#define NVC06F_SET_REFERENCE_COUNT 31:0 +// Syncpoint methods are only available on Tegra parts. Attempting to use +// them on discrete GPUs will result in Host raising NV_PPBDMA_INTR_0_METHOD. +#define NVC06F_SYNCPOINTA (0x00000070) +#define NVC06F_SYNCPOINTA_PAYLOAD 31:0 +#define NVC06F_SYNCPOINTB (0x00000074) +#define NVC06F_SYNCPOINTB_OPERATION 0:0 +#define NVC06F_SYNCPOINTB_OPERATION_WAIT 0x00000000 +#define NVC06F_SYNCPOINTB_OPERATION_INCR 0x00000001 +#define NVC06F_SYNCPOINTB_WAIT_SWITCH 4:4 +#define NVC06F_SYNCPOINTB_WAIT_SWITCH_DIS 0x00000000 +#define NVC06F_SYNCPOINTB_WAIT_SWITCH_EN 0x00000001 +#define NVC06F_SYNCPOINTB_SYNCPT_INDEX 19:8 +#define NVC06F_WFI (0x00000078) +#define NVC06F_WFI_SCOPE 0:0 +#define NVC06F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC06F_WFI_SCOPE_ALL 0x00000001 +#define NVC06F_CRC_CHECK (0x0000007c) +#define NVC06F_CRC_CHECK_VALUE 31:0 +#define NVC06F_YIELD (0x00000080) +#define NVC06F_YIELD_OP 1:0 +#define NVC06F_YIELD_OP_NOP 0x00000000 +#define NVC06F_YIELD_OP_PBDMA_TIMESLICE 0x00000001 +#define NVC06F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002 +#define NVC06F_YIELD_OP_TSG 0x00000003 + + +/* GPFIFO entry format */ +#define NVC06F_GP_ENTRY__SIZE 8 +#define NVC06F_GP_ENTRY0_FETCH 0:0 +#define NVC06F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC06F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC06F_GP_ENTRY0_GET 31:2 +#define NVC06F_GP_ENTRY0_OPERAND 31:0 +#define NVC06F_GP_ENTRY1_GET_HI 7:0 +#define NVC06F_GP_ENTRY1_PRIV 8:8 +#define NVC06F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVC06F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVC06F_GP_ENTRY1_LEVEL 9:9 +#define NVC06F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC06F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC06F_GP_ENTRY1_LENGTH 30:10 +#define NVC06F_GP_ENTRY1_SYNC 31:31 +#define NVC06F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC06F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC06F_GP_ENTRY1_OPCODE 7:0 +#define NVC06F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC06F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC06F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC06F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC06F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC06F_DMA_METHOD_ADDRESS 11:0 +#define NVC06F_DMA_SUBDEVICE_MASK 15:4 +#define NVC06F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC06F_DMA_TERT_OP 17:16 +#define NVC06F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC06F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC06F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC06F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC06F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC06F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC06F_DMA_METHOD_COUNT 28:16 +#define NVC06F_DMA_IMMD_DATA 28:16 +#define NVC06F_DMA_SEC_OP 31:29 +#define NVC06F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC06F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC06F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC06F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC06F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC06F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC06F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC06F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC06F_DMA_INCR_ADDRESS 11:0 +#define NVC06F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC06F_DMA_INCR_COUNT 28:16 +#define NVC06F_DMA_INCR_OPCODE 31:29 +#define NVC06F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC06F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC06F_DMA_NONINCR_ADDRESS 11:0 +#define NVC06F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC06F_DMA_NONINCR_COUNT 28:16 +#define NVC06F_DMA_NONINCR_OPCODE 31:29 +#define NVC06F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC06F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC06F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC06F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC06F_DMA_ONEINCR_COUNT 28:16 +#define NVC06F_DMA_ONEINCR_OPCODE 31:29 +#define NVC06F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC06F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC06F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC06F_DMA_IMMD_ADDRESS 11:0 +#define NVC06F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC06F_DMA_IMMD_DATA 28:16 +#define NVC06F_DMA_IMMD_OPCODE 31:29 +#define NVC06F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC06F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC06F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC06F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC06F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC06F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC06F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC06F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC06F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC06F_DMA_ENDSEG_OPCODE 31:29 +#define NVC06F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC06F_DMA_ADDRESS 12:2 +#define NVC06F_DMA_SUBCH 15:13 +#define NVC06F_DMA_OPCODE3 17:16 +#define NVC06F_DMA_OPCODE3_NONE (0x00000000) +#define NVC06F_DMA_COUNT 28:18 +#define NVC06F_DMA_OPCODE 31:29 +#define NVC06F_DMA_OPCODE_METHOD (0x00000000) +#define NVC06F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC06F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc06f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc06fsw.h b/src/common/sdk/nvidia/inc/class/clc06fsw.h new file mode 100644 index 000000000..8dc2c0755 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc06fsw.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* This file is *not* auto-generated. */ + +#ifndef _clc06f_sw_h_ +#define _clc06f_sw_h_ + +#define NVC06F_NOTIFIERS_RC (0) +#define NVC06F_NOTIFIERS_REFCNT (1) +#define NVC06F_NOTIFIERS_NONSTALL (2) +#define NVC06F_NOTIFIERS_EVENTBUFFER (3) +#define NVC06F_NOTIFIERS_IDLECHANNEL (4) +#define NVC06F_NOTIFIERS_ENDCTX (5) +#define NVC06F_NOTIFIERS_SW (6) +#define NVC06F_NOTIFIERS_GR_DEBUG_INTR (7) +#define NVC06F_NOTIFIERS_REPLAYABLE_FAULT (8) +#define NVC06F_NOTIFIERS_MAXCOUNT (9) + +/* NvNotification[] fields and values */ +#define NVC06F_NOTIFICATION_STATUS_ERROR_BAD_ARGUMENT (0x2000) +#define NVC06F_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +#endif /* _clc06f_sw_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc076.h b/src/common/sdk/nvidia/inc/class/clc076.h new file mode 100644 index 000000000..284c57a23 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc076.h @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc076_h_ +#define _clc076_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define GP100_UVM_SW (0x0000c076) + +#define NVC076_SET_OBJECT (0x00000000) +#define NVC076_NO_OPERATION (0x00000100) + +/* Method data fields to support gpu fault cancel. These are pushed in order by UVM */ + +#define NVC076_FAULT_CANCEL_A (0x00000104) +#define NVC076_FAULT_CANCEL_A_INST_APERTURE 1:0 +#define NVC076_FAULT_CANCEL_A_INST_APERTURE_VID_MEM 0x00000000 +#define NVC076_FAULT_CANCEL_A_INST_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC076_FAULT_CANCEL_A_INST_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 + +/* instance pointer is 4k aligned so those bits are reused to store the aperture */ +#define NVC076_FAULT_CANCEL_A_INST_LOW 31:12 + +#define NVC076_FAULT_CANCEL_B (0x00000108) +#define NVC076_FAULT_CANCEL_B_INST_HI 31:0 + +#define NVC076_FAULT_CANCEL_C (0x0000010c) +#define NVC076_FAULT_CANCEL_C_CLIENT_ID 5:0 +#define NVC076_FAULT_CANCEL_C_GPC_ID 10:6 +#define NVC076_FAULT_CANCEL_C_MODE 31:30 +#define NVC076_FAULT_CANCEL_C_MODE_TARGETED 0x00000000 +#define NVC076_FAULT_CANCEL_C_MODE_GLOBAL 0x00000001 + +/* Method data fields to support clearing faulted bit. These are pushed in order by UVM */ + +#define NVC076_CLEAR_FAULTED_A (0x00000110) + +#define NVC076_CLEAR_FAULTED_A_INST_APERTURE 1:0 +#define NVC076_CLEAR_FAULTED_A_INST_APERTURE_VID_MEM 0x00000000 +#define NVC076_CLEAR_FAULTED_A_INST_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC076_CLEAR_FAULTED_A_INST_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 + +#define NVC076_CLEAR_FAULTED_A_TYPE 2:2 +#define NVC076_CLEAR_FAULTED_A_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC076_CLEAR_FAULTED_A_TYPE_ENG_FAULTED 0x00000001 + +/* instance pointer is 4k aligned */ +#define NVC076_CLEAR_FAULTED_A_INST_LOW 31:12 + +#define NVC076_CLEAR_FAULTED_B (0x00000114) +#define NVC076_CLEAR_FAULTED_B_INST_HI 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc076_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc097.h b/src/common/sdk/nvidia/inc/class/clc097.h new file mode 100644 index 000000000..6d1e31b4d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc097.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc097_h_ +#define _clc097_h_ + +#define PASCAL_A 0xC097 + +#endif // _clc097_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc0b5.h b/src/common/sdk/nvidia/inc/class/clc0b5.h new file mode 100644 index 000000000..d1fa85ad8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc0b5.h @@ -0,0 +1,261 @@ +/******************************************************************************* + Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clc0b5_h_ +#define _clc0b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define PASCAL_DMA_COPY_A (0x0000C0B5) + +#define NVC0B5_NOP (0x00000100) +#define NVC0B5_NOP_PARAMETER 31:0 +#define NVC0B5_PM_TRIGGER (0x00000140) +#define NVC0B5_PM_TRIGGER_V 31:0 +#define NVC0B5_SET_SEMAPHORE_A (0x00000240) +#define NVC0B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC0B5_SET_SEMAPHORE_B (0x00000244) +#define NVC0B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC0B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC0B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC0B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC0B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC0B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC0B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC0B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC0B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC0B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC0B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC0B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC0B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC0B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC0B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC0B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC0B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC0B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC0B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC0B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC0B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC0B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC0B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC0B5_LAUNCH_DMA (0x00000300) +#define NVC0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC0B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC0B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC0B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC0B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC0B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC0B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC0B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC0B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC0B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC0B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC0B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC0B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC0B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC0B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC0B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC0B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC0B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC0B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC0B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC0B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC0B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC0B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC0B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC0B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC0B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC0B5_LAUNCH_DMA_SRC_BYPASS_L2 20:20 +#define NVC0B5_LAUNCH_DMA_SRC_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC0B5_LAUNCH_DMA_SRC_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC0B5_LAUNCH_DMA_DST_BYPASS_L2 21:21 +#define NVC0B5_LAUNCH_DMA_DST_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC0B5_LAUNCH_DMA_DST_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC0B5_LAUNCH_DMA_RESERVED 31:28 +#define NVC0B5_OFFSET_IN_UPPER (0x00000400) +#define NVC0B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC0B5_OFFSET_IN_LOWER (0x00000404) +#define NVC0B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC0B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC0B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC0B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC0B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC0B5_PITCH_IN (0x00000410) +#define NVC0B5_PITCH_IN_VALUE 31:0 +#define NVC0B5_PITCH_OUT (0x00000414) +#define NVC0B5_PITCH_OUT_VALUE 31:0 +#define NVC0B5_LINE_LENGTH_IN (0x00000418) +#define NVC0B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC0B5_LINE_COUNT (0x0000041C) +#define NVC0B5_LINE_COUNT_VALUE 31:0 +#define NVC0B5_SET_REMAP_CONST_A (0x00000700) +#define NVC0B5_SET_REMAP_CONST_A_V 31:0 +#define NVC0B5_SET_REMAP_CONST_B (0x00000704) +#define NVC0B5_SET_REMAP_CONST_B_V 31:0 +#define NVC0B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC0B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC0B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVC0B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVC0B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC0B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC0B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC0B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC0B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC0B5_SET_DST_WIDTH (0x00000710) +#define NVC0B5_SET_DST_WIDTH_V 31:0 +#define NVC0B5_SET_DST_HEIGHT (0x00000714) +#define NVC0B5_SET_DST_HEIGHT_V 31:0 +#define NVC0B5_SET_DST_DEPTH (0x00000718) +#define NVC0B5_SET_DST_DEPTH_V 31:0 +#define NVC0B5_SET_DST_LAYER (0x0000071C) +#define NVC0B5_SET_DST_LAYER_V 31:0 +#define NVC0B5_SET_DST_ORIGIN (0x00000720) +#define NVC0B5_SET_DST_ORIGIN_X 15:0 +#define NVC0B5_SET_DST_ORIGIN_Y 31:16 +#define NVC0B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVC0B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVC0B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC0B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC0B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC0B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC0B5_SET_SRC_WIDTH (0x0000072C) +#define NVC0B5_SET_SRC_WIDTH_V 31:0 +#define NVC0B5_SET_SRC_HEIGHT (0x00000730) +#define NVC0B5_SET_SRC_HEIGHT_V 31:0 +#define NVC0B5_SET_SRC_DEPTH (0x00000734) +#define NVC0B5_SET_SRC_DEPTH_V 31:0 +#define NVC0B5_SET_SRC_LAYER (0x00000738) +#define NVC0B5_SET_SRC_LAYER_V 31:0 +#define NVC0B5_SET_SRC_ORIGIN (0x0000073C) +#define NVC0B5_SET_SRC_ORIGIN_X 15:0 +#define NVC0B5_SET_SRC_ORIGIN_Y 31:16 +#define NVC0B5_PM_TRIGGER_END (0x00001114) +#define NVC0B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc0b5_h + diff --git a/src/common/sdk/nvidia/inc/class/clc0b5sw.h b/src/common/sdk/nvidia/inc/class/clc0b5sw.h new file mode 100644 index 000000000..c2e45466e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc0b5sw.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _clc0b5sw_h_ +#define _clc0b5sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* This file is *not* auto-generated. */ + +// +// Using VERSION_0 will cause the API to interpret +// engineType as a CE engine instance. This allows +// for backward compatibility with 85B5sw and 90B5sw. +// +#define NVC0B5_ALLOCATION_PARAMETERS_VERSION_0 0 + +// +// Using VERSION_1 will cause the API to interpret +// engineType as an NV2080_ENGINE_TYPE ordinal. +// +#define NVC0B5_ALLOCATION_PARAMETERS_VERSION_1 1 + +typedef struct +{ + NvU32 version; + NvU32 engineType; +} NVC0B5_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc0b5sw_h_ + diff --git a/src/common/sdk/nvidia/inc/class/clc0b7.h b/src/common/sdk/nvidia/inc/class/clc0b7.h new file mode 100644 index 000000000..bdb9bba72 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc0b7.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef clc0b7_h +#define clc0b7_h + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC0B7_VIDEO_ENCODER (0x0000C0B7) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // clc0b7_h + diff --git a/src/common/sdk/nvidia/inc/class/clc0c0.h b/src/common/sdk/nvidia/inc/class/clc0c0.h new file mode 100644 index 000000000..fde870298 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc0c0.h @@ -0,0 +1,777 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_pascal_compute_a_h_ +#define _cl_pascal_compute_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl pascal_compute_a */ + +#include "nvtypes.h" + +#define PASCAL_COMPUTE_A 0xC0C0 + +#define NVC0C0_SET_OBJECT 0x0000 +#define NVC0C0_SET_OBJECT_CLASS_ID 15:0 +#define NVC0C0_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC0C0_NO_OPERATION 0x0100 +#define NVC0C0_NO_OPERATION_V 31:0 + +#define NVC0C0_SET_NOTIFY_A 0x0104 +#define NVC0C0_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC0C0_SET_NOTIFY_B 0x0108 +#define NVC0C0_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC0C0_NOTIFY 0x010c +#define NVC0C0_NOTIFY_TYPE 31:0 +#define NVC0C0_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC0C0_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC0C0_WAIT_FOR_IDLE 0x0110 +#define NVC0C0_WAIT_FOR_IDLE_V 31:0 + +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC0C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC0C0_SEND_GO_IDLE 0x013c +#define NVC0C0_SEND_GO_IDLE_V 31:0 + +#define NVC0C0_PM_TRIGGER 0x0140 +#define NVC0C0_PM_TRIGGER_V 31:0 + +#define NVC0C0_PM_TRIGGER_WFI 0x0144 +#define NVC0C0_PM_TRIGGER_WFI_V 31:0 + +#define NVC0C0_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC0C0_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC0C0_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC0C0_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC0C0_LINE_LENGTH_IN 0x0180 +#define NVC0C0_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC0C0_LINE_COUNT 0x0184 +#define NVC0C0_LINE_COUNT_VALUE 31:0 + +#define NVC0C0_OFFSET_OUT_UPPER 0x0188 +#define NVC0C0_OFFSET_OUT_UPPER_VALUE 16:0 + +#define NVC0C0_OFFSET_OUT 0x018c +#define NVC0C0_OFFSET_OUT_VALUE 31:0 + +#define NVC0C0_PITCH_OUT 0x0190 +#define NVC0C0_PITCH_OUT_VALUE 31:0 + +#define NVC0C0_SET_DST_BLOCK_SIZE 0x0194 +#define NVC0C0_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC0C0_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC0C0_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC0C0_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC0C0_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC0C0_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC0C0_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC0C0_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC0C0_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC0C0_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC0C0_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC0C0_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC0C0_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC0C0_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC0C0_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC0C0_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC0C0_SET_DST_WIDTH 0x0198 +#define NVC0C0_SET_DST_WIDTH_V 31:0 + +#define NVC0C0_SET_DST_HEIGHT 0x019c +#define NVC0C0_SET_DST_HEIGHT_V 31:0 + +#define NVC0C0_SET_DST_DEPTH 0x01a0 +#define NVC0C0_SET_DST_DEPTH_V 31:0 + +#define NVC0C0_SET_DST_LAYER 0x01a4 +#define NVC0C0_SET_DST_LAYER_V 31:0 + +#define NVC0C0_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC0C0_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC0C0_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC0C0_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC0C0_LAUNCH_DMA 0x01b0 +#define NVC0C0_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC0C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC0C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC0C0_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC0C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC0C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC0C0_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC0C0_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC0C0_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC0C0_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC0C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC0C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC0C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC0C0_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC0C0_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC0C0_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC0C0_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC0C0_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC0C0_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC0C0_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC0C0_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC0C0_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC0C0_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC0C0_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC0C0_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC0C0_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC0C0_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC0C0_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC0C0_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC0C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC0C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC0C0_LOAD_INLINE_DATA 0x01b4 +#define NVC0C0_LOAD_INLINE_DATA_V 31:0 + +#define NVC0C0_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC0C0_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC0C0_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC0C0_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC0C0_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC0C0_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC0C0_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC0C0_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC0C0_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC0C0_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC0C0_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC0C0_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC0C0_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC0C0_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC0C0_SET_VALID_SPAN_OVERFLOW_AREA_A 0x0200 +#define NVC0C0_SET_VALID_SPAN_OVERFLOW_AREA_A_ADDRESS_UPPER 7:0 + +#define NVC0C0_SET_VALID_SPAN_OVERFLOW_AREA_B 0x0204 +#define NVC0C0_SET_VALID_SPAN_OVERFLOW_AREA_B_ADDRESS_LOWER 31:0 + +#define NVC0C0_SET_VALID_SPAN_OVERFLOW_AREA_C 0x0208 +#define NVC0C0_SET_VALID_SPAN_OVERFLOW_AREA_C_SIZE 31:0 + +#define NVC0C0_SET_COALESCE_WAITING_PERIOD_UNIT 0x020c +#define NVC0C0_SET_COALESCE_WAITING_PERIOD_UNIT_CLOCKS 31:0 + +#define NVC0C0_PERFMON_TRANSFER 0x0210 +#define NVC0C0_PERFMON_TRANSFER_V 31:0 + +#define NVC0C0_SET_SHADER_SHARED_MEMORY_WINDOW 0x0214 +#define NVC0C0_SET_SHADER_SHARED_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC0C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS 0x0218 +#define NVC0C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V 0:0 +#define NVC0C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_FALSE 0x00000000 +#define NVC0C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_TRUE 0x00000001 + +#define NVC0C0_INVALIDATE_SHADER_CACHES 0x021c +#define NVC0C0_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC0C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC0C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC0C0_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC0C0_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC0C0_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC0C0_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC0C0_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC0C0_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC0C0_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC0C0_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC0C0_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC0C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC0C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC0C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC0C0_SET_RESERVED_SW_METHOD00 0x0220 +#define NVC0C0_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD01 0x0224 +#define NVC0C0_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD02 0x0228 +#define NVC0C0_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD03 0x022c +#define NVC0C0_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD04 0x0230 +#define NVC0C0_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD05 0x0234 +#define NVC0C0_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD06 0x0238 +#define NVC0C0_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD07 0x023c +#define NVC0C0_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC0C0_SET_CWD_CONTROL 0x0240 +#define NVC0C0_SET_CWD_CONTROL_SM_SELECTION 0:0 +#define NVC0C0_SET_CWD_CONTROL_SM_SELECTION_LOAD_BALANCED 0x00000000 +#define NVC0C0_SET_CWD_CONTROL_SM_SELECTION_ROUND_ROBIN 0x00000001 + +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x0244 +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC0C0_SET_CWD_REF_COUNTER 0x0248 +#define NVC0C0_SET_CWD_REF_COUNTER_SELECT 5:0 +#define NVC0C0_SET_CWD_REF_COUNTER_VALUE 23:8 + +#define NVC0C0_SET_RESERVED_SW_METHOD08 0x024c +#define NVC0C0_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD09 0x0250 +#define NVC0C0_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD10 0x0254 +#define NVC0C0_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD11 0x0258 +#define NVC0C0_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD12 0x025c +#define NVC0C0_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD13 0x0260 +#define NVC0C0_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD14 0x0264 +#define NVC0C0_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC0C0_SET_RESERVED_SW_METHOD15 0x0268 +#define NVC0C0_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC0C0_SET_GWC_SCG_TYPE 0x026c +#define NVC0C0_SET_GWC_SCG_TYPE_SCG_TYPE 0:0 +#define NVC0C0_SET_GWC_SCG_TYPE_SCG_TYPE_GRAPHICS_COMPUTE0 0x00000000 +#define NVC0C0_SET_GWC_SCG_TYPE_SCG_TYPE_COMPUTE1 0x00000001 + +#define NVC0C0_SET_SCG_CONTROL 0x0270 +#define NVC0C0_SET_SCG_CONTROL_COMPUTE1_MAX_SM_COUNT 8:0 + +#define NVC0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A 0x0274 +#define NVC0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A_ADDRESS_UPPER 16:0 + +#define NVC0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B 0x0278 +#define NVC0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B_ADDRESS_LOWER 31:0 + +#define NVC0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C 0x027c +#define NVC0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_BYTE_COUNT 16:0 +#define NVC0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2 31:31 +#define NVC0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_FALSE 0x00000000 +#define NVC0C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_TRUE 0x00000001 + +#define NVC0C0_SET_COMPUTE_CLASS_VERSION 0x0280 +#define NVC0C0_SET_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC0C0_SET_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC0C0_CHECK_COMPUTE_CLASS_VERSION 0x0284 +#define NVC0C0_CHECK_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC0C0_CHECK_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC0C0_SET_QMD_VERSION 0x0288 +#define NVC0C0_SET_QMD_VERSION_CURRENT 15:0 +#define NVC0C0_SET_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC0C0_SET_WFI_CONFIG 0x028c +#define NVC0C0_SET_WFI_CONFIG_ENABLE_SCG_TYPE_WFI 0:0 +#define NVC0C0_SET_WFI_CONFIG_ENABLE_SCG_TYPE_WFI_FALSE 0x00000000 +#define NVC0C0_SET_WFI_CONFIG_ENABLE_SCG_TYPE_WFI_TRUE 0x00000001 + +#define NVC0C0_CHECK_QMD_VERSION 0x0290 +#define NVC0C0_CHECK_QMD_VERSION_CURRENT 15:0 +#define NVC0C0_CHECK_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC0C0_WAIT_FOR_IDLE_SCG_TYPE 0x0294 +#define NVC0C0_WAIT_FOR_IDLE_SCG_TYPE_V 31:0 + +#define NVC0C0_INVALIDATE_SKED_CACHES 0x0298 +#define NVC0C0_INVALIDATE_SKED_CACHES_V 0:0 + +#define NVC0C0_SET_SCG_RENDER_ENABLE_CONTROL 0x029c +#define NVC0C0_SET_SCG_RENDER_ENABLE_CONTROL_COMPUTE1_USES_RENDER_ENABLE 0:0 +#define NVC0C0_SET_SCG_RENDER_ENABLE_CONTROL_COMPUTE1_USES_RENDER_ENABLE_FALSE 0x00000000 +#define NVC0C0_SET_SCG_RENDER_ENABLE_CONTROL_COMPUTE1_USES_RENDER_ENABLE_TRUE 0x00000001 + +#define NVC0C0_SET_SHADER_SHARED_MEMORY_WINDOW_A 0x02a0 +#define NVC0C0_SET_SHADER_SHARED_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC0C0_SET_SHADER_SHARED_MEMORY_WINDOW_B 0x02a4 +#define NVC0C0_SET_SHADER_SHARED_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC0C0_SET_CWD_SLOT_COUNT 0x02b0 +#define NVC0C0_SET_CWD_SLOT_COUNT_V 7:0 + +#define NVC0C0_SEND_PCAS_A 0x02b4 +#define NVC0C0_SEND_PCAS_A_QMD_ADDRESS_SHIFTED8 31:0 + +#define NVC0C0_SEND_PCAS_B 0x02b8 +#define NVC0C0_SEND_PCAS_B_FROM 23:0 +#define NVC0C0_SEND_PCAS_B_DELTA 31:24 + +#define NVC0C0_SEND_SIGNALING_PCAS_B 0x02bc +#define NVC0C0_SEND_SIGNALING_PCAS_B_INVALIDATE 0:0 +#define NVC0C0_SEND_SIGNALING_PCAS_B_INVALIDATE_FALSE 0x00000000 +#define NVC0C0_SEND_SIGNALING_PCAS_B_INVALIDATE_TRUE 0x00000001 +#define NVC0C0_SEND_SIGNALING_PCAS_B_SCHEDULE 1:1 +#define NVC0C0_SEND_SIGNALING_PCAS_B_SCHEDULE_FALSE 0x00000000 +#define NVC0C0_SEND_SIGNALING_PCAS_B_SCHEDULE_TRUE 0x00000001 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A 0x02e4 +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B 0x02e8 +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C 0x02ec +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A 0x02f0 +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B 0x02f4 +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C 0x02f8 +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVC0C0_SET_SPA_VERSION 0x0310 +#define NVC0C0_SET_SPA_VERSION_MINOR 7:0 +#define NVC0C0_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC0C0_SET_INLINE_QMD_ADDRESS_A 0x0318 +#define NVC0C0_SET_INLINE_QMD_ADDRESS_A_QMD_ADDRESS_SHIFTED8_UPPER 31:0 + +#define NVC0C0_SET_INLINE_QMD_ADDRESS_B 0x031c +#define NVC0C0_SET_INLINE_QMD_ADDRESS_B_QMD_ADDRESS_SHIFTED8_LOWER 31:0 + +#define NVC0C0_LOAD_INLINE_QMD_DATA(i) (0x0320+(i)*4) +#define NVC0C0_LOAD_INLINE_QMD_DATA_V 31:0 + +#define NVC0C0_SET_FALCON00 0x0500 +#define NVC0C0_SET_FALCON00_V 31:0 + +#define NVC0C0_SET_FALCON01 0x0504 +#define NVC0C0_SET_FALCON01_V 31:0 + +#define NVC0C0_SET_FALCON02 0x0508 +#define NVC0C0_SET_FALCON02_V 31:0 + +#define NVC0C0_SET_FALCON03 0x050c +#define NVC0C0_SET_FALCON03_V 31:0 + +#define NVC0C0_SET_FALCON04 0x0510 +#define NVC0C0_SET_FALCON04_V 31:0 + +#define NVC0C0_SET_FALCON05 0x0514 +#define NVC0C0_SET_FALCON05_V 31:0 + +#define NVC0C0_SET_FALCON06 0x0518 +#define NVC0C0_SET_FALCON06_V 31:0 + +#define NVC0C0_SET_FALCON07 0x051c +#define NVC0C0_SET_FALCON07_V 31:0 + +#define NVC0C0_SET_FALCON08 0x0520 +#define NVC0C0_SET_FALCON08_V 31:0 + +#define NVC0C0_SET_FALCON09 0x0524 +#define NVC0C0_SET_FALCON09_V 31:0 + +#define NVC0C0_SET_FALCON10 0x0528 +#define NVC0C0_SET_FALCON10_V 31:0 + +#define NVC0C0_SET_FALCON11 0x052c +#define NVC0C0_SET_FALCON11_V 31:0 + +#define NVC0C0_SET_FALCON12 0x0530 +#define NVC0C0_SET_FALCON12_V 31:0 + +#define NVC0C0_SET_FALCON13 0x0534 +#define NVC0C0_SET_FALCON13_V 31:0 + +#define NVC0C0_SET_FALCON14 0x0538 +#define NVC0C0_SET_FALCON14_V 31:0 + +#define NVC0C0_SET_FALCON15 0x053c +#define NVC0C0_SET_FALCON15_V 31:0 + +#define NVC0C0_SET_FALCON16 0x0540 +#define NVC0C0_SET_FALCON16_V 31:0 + +#define NVC0C0_SET_FALCON17 0x0544 +#define NVC0C0_SET_FALCON17_V 31:0 + +#define NVC0C0_SET_FALCON18 0x0548 +#define NVC0C0_SET_FALCON18_V 31:0 + +#define NVC0C0_SET_FALCON19 0x054c +#define NVC0C0_SET_FALCON19_V 31:0 + +#define NVC0C0_SET_FALCON20 0x0550 +#define NVC0C0_SET_FALCON20_V 31:0 + +#define NVC0C0_SET_FALCON21 0x0554 +#define NVC0C0_SET_FALCON21_V 31:0 + +#define NVC0C0_SET_FALCON22 0x0558 +#define NVC0C0_SET_FALCON22_V 31:0 + +#define NVC0C0_SET_FALCON23 0x055c +#define NVC0C0_SET_FALCON23_V 31:0 + +#define NVC0C0_SET_FALCON24 0x0560 +#define NVC0C0_SET_FALCON24_V 31:0 + +#define NVC0C0_SET_FALCON25 0x0564 +#define NVC0C0_SET_FALCON25_V 31:0 + +#define NVC0C0_SET_FALCON26 0x0568 +#define NVC0C0_SET_FALCON26_V 31:0 + +#define NVC0C0_SET_FALCON27 0x056c +#define NVC0C0_SET_FALCON27_V 31:0 + +#define NVC0C0_SET_FALCON28 0x0570 +#define NVC0C0_SET_FALCON28_V 31:0 + +#define NVC0C0_SET_FALCON29 0x0574 +#define NVC0C0_SET_FALCON29_V 31:0 + +#define NVC0C0_SET_FALCON30 0x0578 +#define NVC0C0_SET_FALCON30_V 31:0 + +#define NVC0C0_SET_FALCON31 0x057c +#define NVC0C0_SET_FALCON31_V 31:0 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 16:0 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A 0x07b0 +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B 0x07b4 +#define NVC0C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC0C0_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC0C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC0C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC0C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC0C0_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC0C0_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC0C0_SET_SPARE_NOOP12 0x0f44 +#define NVC0C0_SET_SPARE_NOOP12_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP13 0x0f48 +#define NVC0C0_SET_SPARE_NOOP13_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP14 0x0f4c +#define NVC0C0_SET_SPARE_NOOP14_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP15 0x0f50 +#define NVC0C0_SET_SPARE_NOOP15_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP00 0x1040 +#define NVC0C0_SET_SPARE_NOOP00_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP01 0x1044 +#define NVC0C0_SET_SPARE_NOOP01_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP02 0x1048 +#define NVC0C0_SET_SPARE_NOOP02_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP03 0x104c +#define NVC0C0_SET_SPARE_NOOP03_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP04 0x1050 +#define NVC0C0_SET_SPARE_NOOP04_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP05 0x1054 +#define NVC0C0_SET_SPARE_NOOP05_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP06 0x1058 +#define NVC0C0_SET_SPARE_NOOP06_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP07 0x105c +#define NVC0C0_SET_SPARE_NOOP07_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP08 0x1060 +#define NVC0C0_SET_SPARE_NOOP08_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP09 0x1064 +#define NVC0C0_SET_SPARE_NOOP09_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP10 0x1068 +#define NVC0C0_SET_SPARE_NOOP10_V 31:0 + +#define NVC0C0_SET_SPARE_NOOP11 0x106c +#define NVC0C0_SET_SPARE_NOOP11_V 31:0 + +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_ALL 0x120c +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_ALL_V 0:0 + +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_ALL 0x1210 +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_ALL_V 0:0 + +#define NVC0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC0C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC0C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT 0x12a8 +#define NVC0C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL 0:0 +#define NVC0C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_FALSE 0x00000000 +#define NVC0C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_TRUE 0x00000001 + +#define NVC0C0_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC0C0_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC0C0_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC0C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC0C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC0C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC0C0_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC0C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC0C0_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC0C0_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC0C0_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC0C0_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC0C0_SET_RENDER_ENABLE_A 0x1550 +#define NVC0C0_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC0C0_SET_RENDER_ENABLE_B 0x1554 +#define NVC0C0_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC0C0_SET_RENDER_ENABLE_C 0x1558 +#define NVC0C0_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC0C0_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC0C0_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC0C0_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC0C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC0C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC0C0_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC0C0_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC0C0_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC0C0_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC0C0_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC0C0_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC0C0_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC0C0_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC0C0_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC0C0_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC0C0_SET_TEX_HEADER_POOL_C 0x157c +#define NVC0C0_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC0C0_SET_PROGRAM_REGION_A 0x1608 +#define NVC0C0_SET_PROGRAM_REGION_A_ADDRESS_UPPER 16:0 + +#define NVC0C0_SET_PROGRAM_REGION_B 0x160c +#define NVC0C0_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVC0C0_INVALIDATE_SHADER_CACHES_NO_WFI 0x1698 +#define NVC0C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC0C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC0C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC0C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC0C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC0C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC0C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC0C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC0C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC0C0_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC0C0_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC0C0_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC0C0_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC0C0_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC0C0_PIPE_NOP 0x1a2c +#define NVC0C0_PIPE_NOP_V 31:0 + +#define NVC0C0_SET_SPARE00 0x1a30 +#define NVC0C0_SET_SPARE00_V 31:0 + +#define NVC0C0_SET_SPARE01 0x1a34 +#define NVC0C0_SET_SPARE01_V 31:0 + +#define NVC0C0_SET_SPARE02 0x1a38 +#define NVC0C0_SET_SPARE02_V 31:0 + +#define NVC0C0_SET_SPARE03 0x1a3c +#define NVC0C0_SET_SPARE03_V 31:0 + +#define NVC0C0_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC0C0_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC0C0_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC0C0_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC0C0_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC0C0_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC0C0_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC0C0_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC0C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVC0C0_SET_BINDLESS_TEXTURE 0x2608 +#define NVC0C0_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 2:0 + +#define NVC0C0_SET_TRAP_HANDLER 0x260c +#define NVC0C0_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC0C0_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC0C0_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC0C0_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC0C0_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC0C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC0C0_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC0C0_SET_MME_SHADOW_SCRATCH_V 31:0 + +#endif /* _cl_pascal_compute_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc197.h b/src/common/sdk/nvidia/inc/class/clc197.h new file mode 100644 index 000000000..0c8cce115 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc197.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc197_h_ +#define _clc197_h_ + +#define PASCAL_B 0xC197 + +#endif // _clc197_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc1b0.h b/src/common/sdk/nvidia/inc/class/clc1b0.h new file mode 100644 index 000000000..ec139e9b0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc1b0.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvtypes.h" + +#ifndef _clc1b0_h_ +#define _clc1b0_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC1B0_VIDEO_DECODER (0x0000C1B0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc1b0_h + diff --git a/src/common/sdk/nvidia/inc/class/clc1b5.h b/src/common/sdk/nvidia/inc/class/clc1b5.h new file mode 100644 index 000000000..fff8467fb --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc1b5.h @@ -0,0 +1,273 @@ +/******************************************************************************* + Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clc1b5_h_ +#define _clc1b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define PASCAL_DMA_COPY_B (0x0000C1B5) + +#define NVC1B5_NOP (0x00000100) +#define NVC1B5_NOP_PARAMETER 31:0 +#define NVC1B5_PM_TRIGGER (0x00000140) +#define NVC1B5_PM_TRIGGER_V 31:0 +#define NVC1B5_SET_SEMAPHORE_A (0x00000240) +#define NVC1B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC1B5_SET_SEMAPHORE_B (0x00000244) +#define NVC1B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC1B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC1B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC1B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC1B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC1B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC1B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC1B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC1B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC1B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC1B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC1B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC1B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC1B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC1B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC1B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC1B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC1B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC1B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC1B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC1B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC1B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC1B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC1B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC1B5_LAUNCH_DMA (0x00000300) +#define NVC1B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC1B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC1B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC1B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC1B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC1B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC1B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC1B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC1B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC1B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC1B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC1B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC1B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC1B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC1B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC1B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC1B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC1B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC1B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC1B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC1B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC1B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC1B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC1B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC1B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC1B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC1B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC1B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC1B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC1B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC1B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC1B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC1B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC1B5_LAUNCH_DMA_SRC_BYPASS_L2 20:20 +#define NVC1B5_LAUNCH_DMA_SRC_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC1B5_LAUNCH_DMA_SRC_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC1B5_LAUNCH_DMA_DST_BYPASS_L2 21:21 +#define NVC1B5_LAUNCH_DMA_DST_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC1B5_LAUNCH_DMA_DST_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC1B5_LAUNCH_DMA_VPRMODE 23:22 +#define NVC1B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000) +#define NVC1B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001) +#define NVC1B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24 +#define NVC1B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28 +#define NVC1B5_OFFSET_IN_UPPER (0x00000400) +#define NVC1B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC1B5_OFFSET_IN_LOWER (0x00000404) +#define NVC1B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC1B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC1B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC1B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC1B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC1B5_PITCH_IN (0x00000410) +#define NVC1B5_PITCH_IN_VALUE 31:0 +#define NVC1B5_PITCH_OUT (0x00000414) +#define NVC1B5_PITCH_OUT_VALUE 31:0 +#define NVC1B5_LINE_LENGTH_IN (0x00000418) +#define NVC1B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC1B5_LINE_COUNT (0x0000041C) +#define NVC1B5_LINE_COUNT_VALUE 31:0 +#define NVC1B5_SET_REMAP_CONST_A (0x00000700) +#define NVC1B5_SET_REMAP_CONST_A_V 31:0 +#define NVC1B5_SET_REMAP_CONST_B (0x00000704) +#define NVC1B5_SET_REMAP_CONST_B_V 31:0 +#define NVC1B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC1B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC1B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC1B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC1B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVC1B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVC1B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC1B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC1B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC1B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC1B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC1B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC1B5_SET_DST_WIDTH (0x00000710) +#define NVC1B5_SET_DST_WIDTH_V 31:0 +#define NVC1B5_SET_DST_HEIGHT (0x00000714) +#define NVC1B5_SET_DST_HEIGHT_V 31:0 +#define NVC1B5_SET_DST_DEPTH (0x00000718) +#define NVC1B5_SET_DST_DEPTH_V 31:0 +#define NVC1B5_SET_DST_LAYER (0x0000071C) +#define NVC1B5_SET_DST_LAYER_V 31:0 +#define NVC1B5_SET_DST_ORIGIN (0x00000720) +#define NVC1B5_SET_DST_ORIGIN_X 15:0 +#define NVC1B5_SET_DST_ORIGIN_Y 31:16 +#define NVC1B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVC1B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVC1B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC1B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC1B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC1B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC1B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC1B5_SET_SRC_WIDTH (0x0000072C) +#define NVC1B5_SET_SRC_WIDTH_V 31:0 +#define NVC1B5_SET_SRC_HEIGHT (0x00000730) +#define NVC1B5_SET_SRC_HEIGHT_V 31:0 +#define NVC1B5_SET_SRC_DEPTH (0x00000734) +#define NVC1B5_SET_SRC_DEPTH_V 31:0 +#define NVC1B5_SET_SRC_LAYER (0x00000738) +#define NVC1B5_SET_SRC_LAYER_V 31:0 +#define NVC1B5_SET_SRC_ORIGIN (0x0000073C) +#define NVC1B5_SET_SRC_ORIGIN_X 15:0 +#define NVC1B5_SET_SRC_ORIGIN_Y 31:16 +#define NVC1B5_SRC_ORIGIN_X (0x00000744) +#define NVC1B5_SRC_ORIGIN_X_VALUE 31:0 +#define NVC1B5_SRC_ORIGIN_Y (0x00000748) +#define NVC1B5_SRC_ORIGIN_Y_VALUE 31:0 +#define NVC1B5_DST_ORIGIN_X (0x0000074C) +#define NVC1B5_DST_ORIGIN_X_VALUE 31:0 +#define NVC1B5_DST_ORIGIN_Y (0x00000750) +#define NVC1B5_DST_ORIGIN_Y_VALUE 31:0 +#define NVC1B5_PM_TRIGGER_END (0x00001114) +#define NVC1B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc1b5_h + diff --git a/src/common/sdk/nvidia/inc/class/clc1b5sw.h b/src/common/sdk/nvidia/inc/class/clc1b5sw.h new file mode 100644 index 000000000..213119ec8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc1b5sw.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _clc1b5sw_h_ +#define _clc1b5sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* This file is *not* auto-generated. */ + +// +// Using VERSION_0 will cause the API to interpret +// engineType as a CE engine instance. This allows +// for backward compatibility with 85B5sw and 90B5sw. +// +#define NVC1B5_ALLOCATION_PARAMETERS_VERSION_0 0 + +// +// Using VERSION_1 will cause the API to interpret +// engineType as an NV2080_ENGINE_TYPE ordinal. +// +#define NVC1B5_ALLOCATION_PARAMETERS_VERSION_1 1 + +typedef struct +{ + NvU32 version; + NvU32 engineType; +} NVC1B5_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc1b5sw_h_ + diff --git a/src/common/sdk/nvidia/inc/class/clc1b7.h b/src/common/sdk/nvidia/inc/class/clc1b7.h new file mode 100644 index 000000000..14d94ed7e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc1b7.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef clc1b7_h +#define clc1b7_h + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC1B7_VIDEO_ENCODER (0x0000C1B7) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // clc1b7_h + diff --git a/src/common/sdk/nvidia/inc/class/clc1c0.h b/src/common/sdk/nvidia/inc/class/clc1c0.h new file mode 100644 index 000000000..f2569bfcf --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc1c0.h @@ -0,0 +1,795 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_pascal_compute_b_h_ +#define _cl_pascal_compute_b_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl pascal_compute_b */ + +#include "nvtypes.h" + +#define PASCAL_COMPUTE_B 0xC1C0 + +#define NVC1C0_SET_OBJECT 0x0000 +#define NVC1C0_SET_OBJECT_CLASS_ID 15:0 +#define NVC1C0_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC1C0_NO_OPERATION 0x0100 +#define NVC1C0_NO_OPERATION_V 31:0 + +#define NVC1C0_SET_NOTIFY_A 0x0104 +#define NVC1C0_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC1C0_SET_NOTIFY_B 0x0108 +#define NVC1C0_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC1C0_NOTIFY 0x010c +#define NVC1C0_NOTIFY_TYPE 31:0 +#define NVC1C0_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC1C0_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC1C0_WAIT_FOR_IDLE 0x0110 +#define NVC1C0_WAIT_FOR_IDLE_V 31:0 + +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC1C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC1C0_SEND_GO_IDLE 0x013c +#define NVC1C0_SEND_GO_IDLE_V 31:0 + +#define NVC1C0_PM_TRIGGER 0x0140 +#define NVC1C0_PM_TRIGGER_V 31:0 + +#define NVC1C0_PM_TRIGGER_WFI 0x0144 +#define NVC1C0_PM_TRIGGER_WFI_V 31:0 + +#define NVC1C0_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC1C0_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC1C0_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC1C0_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC1C0_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC1C0_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC1C0_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC1C0_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC1C0_LINE_LENGTH_IN 0x0180 +#define NVC1C0_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC1C0_LINE_COUNT 0x0184 +#define NVC1C0_LINE_COUNT_VALUE 31:0 + +#define NVC1C0_OFFSET_OUT_UPPER 0x0188 +#define NVC1C0_OFFSET_OUT_UPPER_VALUE 16:0 + +#define NVC1C0_OFFSET_OUT 0x018c +#define NVC1C0_OFFSET_OUT_VALUE 31:0 + +#define NVC1C0_PITCH_OUT 0x0190 +#define NVC1C0_PITCH_OUT_VALUE 31:0 + +#define NVC1C0_SET_DST_BLOCK_SIZE 0x0194 +#define NVC1C0_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC1C0_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC1C0_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC1C0_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC1C0_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC1C0_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC1C0_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC1C0_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC1C0_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC1C0_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC1C0_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC1C0_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC1C0_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC1C0_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC1C0_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC1C0_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC1C0_SET_DST_WIDTH 0x0198 +#define NVC1C0_SET_DST_WIDTH_V 31:0 + +#define NVC1C0_SET_DST_HEIGHT 0x019c +#define NVC1C0_SET_DST_HEIGHT_V 31:0 + +#define NVC1C0_SET_DST_DEPTH 0x01a0 +#define NVC1C0_SET_DST_DEPTH_V 31:0 + +#define NVC1C0_SET_DST_LAYER 0x01a4 +#define NVC1C0_SET_DST_LAYER_V 31:0 + +#define NVC1C0_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC1C0_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC1C0_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC1C0_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC1C0_LAUNCH_DMA 0x01b0 +#define NVC1C0_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC1C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC1C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC1C0_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC1C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC1C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC1C0_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC1C0_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC1C0_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC1C0_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC1C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC1C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC1C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC1C0_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC1C0_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC1C0_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC1C0_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC1C0_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC1C0_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC1C0_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC1C0_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC1C0_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC1C0_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC1C0_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC1C0_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC1C0_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC1C0_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC1C0_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC1C0_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC1C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC1C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC1C0_LOAD_INLINE_DATA 0x01b4 +#define NVC1C0_LOAD_INLINE_DATA_V 31:0 + +#define NVC1C0_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC1C0_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC1C0_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC1C0_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC1C0_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC1C0_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC1C0_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC1C0_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC1C0_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC1C0_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC1C0_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC1C0_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC1C0_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC1C0_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC1C0_SET_VALID_SPAN_OVERFLOW_AREA_A 0x0200 +#define NVC1C0_SET_VALID_SPAN_OVERFLOW_AREA_A_ADDRESS_UPPER 7:0 + +#define NVC1C0_SET_VALID_SPAN_OVERFLOW_AREA_B 0x0204 +#define NVC1C0_SET_VALID_SPAN_OVERFLOW_AREA_B_ADDRESS_LOWER 31:0 + +#define NVC1C0_SET_VALID_SPAN_OVERFLOW_AREA_C 0x0208 +#define NVC1C0_SET_VALID_SPAN_OVERFLOW_AREA_C_SIZE 31:0 + +#define NVC1C0_SET_COALESCE_WAITING_PERIOD_UNIT 0x020c +#define NVC1C0_SET_COALESCE_WAITING_PERIOD_UNIT_CLOCKS 31:0 + +#define NVC1C0_PERFMON_TRANSFER 0x0210 +#define NVC1C0_PERFMON_TRANSFER_V 31:0 + +#define NVC1C0_SET_SHADER_SHARED_MEMORY_WINDOW 0x0214 +#define NVC1C0_SET_SHADER_SHARED_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC1C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS 0x0218 +#define NVC1C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V 0:0 +#define NVC1C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_FALSE 0x00000000 +#define NVC1C0_SET_SELECT_MAXWELL_TEXTURE_HEADERS_V_TRUE 0x00000001 + +#define NVC1C0_INVALIDATE_SHADER_CACHES 0x021c +#define NVC1C0_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC1C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC1C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC1C0_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC1C0_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC1C0_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC1C0_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC1C0_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC1C0_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC1C0_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC1C0_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC1C0_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC1C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC1C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC1C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC1C0_SET_RESERVED_SW_METHOD00 0x0220 +#define NVC1C0_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD01 0x0224 +#define NVC1C0_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD02 0x0228 +#define NVC1C0_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD03 0x022c +#define NVC1C0_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD04 0x0230 +#define NVC1C0_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD05 0x0234 +#define NVC1C0_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD06 0x0238 +#define NVC1C0_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD07 0x023c +#define NVC1C0_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC1C0_SET_CWD_CONTROL 0x0240 +#define NVC1C0_SET_CWD_CONTROL_SM_SELECTION 0:0 +#define NVC1C0_SET_CWD_CONTROL_SM_SELECTION_LOAD_BALANCED 0x00000000 +#define NVC1C0_SET_CWD_CONTROL_SM_SELECTION_ROUND_ROBIN 0x00000001 + +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x0244 +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC1C0_SET_CWD_REF_COUNTER 0x0248 +#define NVC1C0_SET_CWD_REF_COUNTER_SELECT 5:0 +#define NVC1C0_SET_CWD_REF_COUNTER_VALUE 23:8 + +#define NVC1C0_SET_RESERVED_SW_METHOD08 0x024c +#define NVC1C0_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD09 0x0250 +#define NVC1C0_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD10 0x0254 +#define NVC1C0_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD11 0x0258 +#define NVC1C0_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD12 0x025c +#define NVC1C0_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD13 0x0260 +#define NVC1C0_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD14 0x0264 +#define NVC1C0_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC1C0_SET_RESERVED_SW_METHOD15 0x0268 +#define NVC1C0_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC1C0_SET_GWC_SCG_TYPE 0x026c +#define NVC1C0_SET_GWC_SCG_TYPE_SCG_TYPE 0:0 +#define NVC1C0_SET_GWC_SCG_TYPE_SCG_TYPE_GRAPHICS_COMPUTE0 0x00000000 +#define NVC1C0_SET_GWC_SCG_TYPE_SCG_TYPE_COMPUTE1 0x00000001 + +#define NVC1C0_SET_SCG_CONTROL 0x0270 +#define NVC1C0_SET_SCG_CONTROL_COMPUTE1_MAX_SM_COUNT 8:0 +#define NVC1C0_SET_SCG_CONTROL_COMPUTE1_MIN_SM_COUNT 20:12 +#define NVC1C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE 24:24 +#define NVC1C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE_FALSE 0x00000000 +#define NVC1C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE_TRUE 0x00000001 + +#define NVC1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A 0x0274 +#define NVC1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_A_ADDRESS_UPPER 16:0 + +#define NVC1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B 0x0278 +#define NVC1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_B_ADDRESS_LOWER 31:0 + +#define NVC1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C 0x027c +#define NVC1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_BYTE_COUNT 16:0 +#define NVC1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2 31:31 +#define NVC1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_FALSE 0x00000000 +#define NVC1C0_INVALIDATE_CONSTANT_BUFFER_CACHE_C_THRU_L2_TRUE 0x00000001 + +#define NVC1C0_SET_COMPUTE_CLASS_VERSION 0x0280 +#define NVC1C0_SET_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC1C0_SET_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC1C0_CHECK_COMPUTE_CLASS_VERSION 0x0284 +#define NVC1C0_CHECK_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC1C0_CHECK_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC1C0_SET_QMD_VERSION 0x0288 +#define NVC1C0_SET_QMD_VERSION_CURRENT 15:0 +#define NVC1C0_SET_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC1C0_SET_WFI_CONFIG 0x028c +#define NVC1C0_SET_WFI_CONFIG_ENABLE_SCG_TYPE_WFI 0:0 +#define NVC1C0_SET_WFI_CONFIG_ENABLE_SCG_TYPE_WFI_FALSE 0x00000000 +#define NVC1C0_SET_WFI_CONFIG_ENABLE_SCG_TYPE_WFI_TRUE 0x00000001 + +#define NVC1C0_CHECK_QMD_VERSION 0x0290 +#define NVC1C0_CHECK_QMD_VERSION_CURRENT 15:0 +#define NVC1C0_CHECK_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC1C0_WAIT_FOR_IDLE_SCG_TYPE 0x0294 +#define NVC1C0_WAIT_FOR_IDLE_SCG_TYPE_V 31:0 + +#define NVC1C0_INVALIDATE_SKED_CACHES 0x0298 +#define NVC1C0_INVALIDATE_SKED_CACHES_V 0:0 + +#define NVC1C0_SET_SCG_RENDER_ENABLE_CONTROL 0x029c +#define NVC1C0_SET_SCG_RENDER_ENABLE_CONTROL_COMPUTE1_USES_RENDER_ENABLE 0:0 +#define NVC1C0_SET_SCG_RENDER_ENABLE_CONTROL_COMPUTE1_USES_RENDER_ENABLE_FALSE 0x00000000 +#define NVC1C0_SET_SCG_RENDER_ENABLE_CONTROL_COMPUTE1_USES_RENDER_ENABLE_TRUE 0x00000001 + +#define NVC1C0_SET_SHADER_SHARED_MEMORY_WINDOW_A 0x02a0 +#define NVC1C0_SET_SHADER_SHARED_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC1C0_SET_SHADER_SHARED_MEMORY_WINDOW_B 0x02a4 +#define NVC1C0_SET_SHADER_SHARED_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC1C0_SCG_HYSTERESIS_CONTROL 0x02a8 +#define NVC1C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE 0:0 +#define NVC1C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE_FALSE 0x00000000 +#define NVC1C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE_TRUE 0x00000001 +#define NVC1C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE 1:1 +#define NVC1C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE_FALSE 0x00000000 +#define NVC1C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE_TRUE 0x00000001 + +#define NVC1C0_SET_CWD_SLOT_COUNT 0x02b0 +#define NVC1C0_SET_CWD_SLOT_COUNT_V 7:0 + +#define NVC1C0_SEND_PCAS_A 0x02b4 +#define NVC1C0_SEND_PCAS_A_QMD_ADDRESS_SHIFTED8 31:0 + +#define NVC1C0_SEND_PCAS_B 0x02b8 +#define NVC1C0_SEND_PCAS_B_FROM 23:0 +#define NVC1C0_SEND_PCAS_B_DELTA 31:24 + +#define NVC1C0_SEND_SIGNALING_PCAS_B 0x02bc +#define NVC1C0_SEND_SIGNALING_PCAS_B_INVALIDATE 0:0 +#define NVC1C0_SEND_SIGNALING_PCAS_B_INVALIDATE_FALSE 0x00000000 +#define NVC1C0_SEND_SIGNALING_PCAS_B_INVALIDATE_TRUE 0x00000001 +#define NVC1C0_SEND_SIGNALING_PCAS_B_SCHEDULE 1:1 +#define NVC1C0_SEND_SIGNALING_PCAS_B_SCHEDULE_FALSE 0x00000000 +#define NVC1C0_SEND_SIGNALING_PCAS_B_SCHEDULE_TRUE 0x00000001 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A 0x02e4 +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B 0x02e8 +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C 0x02ec +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A 0x02f0 +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B 0x02f4 +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C 0x02f8 +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVC1C0_SET_SPA_VERSION 0x0310 +#define NVC1C0_SET_SPA_VERSION_MINOR 7:0 +#define NVC1C0_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC1C0_SET_INLINE_QMD_ADDRESS_A 0x0318 +#define NVC1C0_SET_INLINE_QMD_ADDRESS_A_QMD_ADDRESS_SHIFTED8_UPPER 31:0 + +#define NVC1C0_SET_INLINE_QMD_ADDRESS_B 0x031c +#define NVC1C0_SET_INLINE_QMD_ADDRESS_B_QMD_ADDRESS_SHIFTED8_LOWER 31:0 + +#define NVC1C0_LOAD_INLINE_QMD_DATA(i) (0x0320+(i)*4) +#define NVC1C0_LOAD_INLINE_QMD_DATA_V 31:0 + +#define NVC1C0_SET_FALCON00 0x0500 +#define NVC1C0_SET_FALCON00_V 31:0 + +#define NVC1C0_SET_FALCON01 0x0504 +#define NVC1C0_SET_FALCON01_V 31:0 + +#define NVC1C0_SET_FALCON02 0x0508 +#define NVC1C0_SET_FALCON02_V 31:0 + +#define NVC1C0_SET_FALCON03 0x050c +#define NVC1C0_SET_FALCON03_V 31:0 + +#define NVC1C0_SET_FALCON04 0x0510 +#define NVC1C0_SET_FALCON04_V 31:0 + +#define NVC1C0_SET_FALCON05 0x0514 +#define NVC1C0_SET_FALCON05_V 31:0 + +#define NVC1C0_SET_FALCON06 0x0518 +#define NVC1C0_SET_FALCON06_V 31:0 + +#define NVC1C0_SET_FALCON07 0x051c +#define NVC1C0_SET_FALCON07_V 31:0 + +#define NVC1C0_SET_FALCON08 0x0520 +#define NVC1C0_SET_FALCON08_V 31:0 + +#define NVC1C0_SET_FALCON09 0x0524 +#define NVC1C0_SET_FALCON09_V 31:0 + +#define NVC1C0_SET_FALCON10 0x0528 +#define NVC1C0_SET_FALCON10_V 31:0 + +#define NVC1C0_SET_FALCON11 0x052c +#define NVC1C0_SET_FALCON11_V 31:0 + +#define NVC1C0_SET_FALCON12 0x0530 +#define NVC1C0_SET_FALCON12_V 31:0 + +#define NVC1C0_SET_FALCON13 0x0534 +#define NVC1C0_SET_FALCON13_V 31:0 + +#define NVC1C0_SET_FALCON14 0x0538 +#define NVC1C0_SET_FALCON14_V 31:0 + +#define NVC1C0_SET_FALCON15 0x053c +#define NVC1C0_SET_FALCON15_V 31:0 + +#define NVC1C0_SET_FALCON16 0x0540 +#define NVC1C0_SET_FALCON16_V 31:0 + +#define NVC1C0_SET_FALCON17 0x0544 +#define NVC1C0_SET_FALCON17_V 31:0 + +#define NVC1C0_SET_FALCON18 0x0548 +#define NVC1C0_SET_FALCON18_V 31:0 + +#define NVC1C0_SET_FALCON19 0x054c +#define NVC1C0_SET_FALCON19_V 31:0 + +#define NVC1C0_SET_FALCON20 0x0550 +#define NVC1C0_SET_FALCON20_V 31:0 + +#define NVC1C0_SET_FALCON21 0x0554 +#define NVC1C0_SET_FALCON21_V 31:0 + +#define NVC1C0_SET_FALCON22 0x0558 +#define NVC1C0_SET_FALCON22_V 31:0 + +#define NVC1C0_SET_FALCON23 0x055c +#define NVC1C0_SET_FALCON23_V 31:0 + +#define NVC1C0_SET_FALCON24 0x0560 +#define NVC1C0_SET_FALCON24_V 31:0 + +#define NVC1C0_SET_FALCON25 0x0564 +#define NVC1C0_SET_FALCON25_V 31:0 + +#define NVC1C0_SET_FALCON26 0x0568 +#define NVC1C0_SET_FALCON26_V 31:0 + +#define NVC1C0_SET_FALCON27 0x056c +#define NVC1C0_SET_FALCON27_V 31:0 + +#define NVC1C0_SET_FALCON28 0x0570 +#define NVC1C0_SET_FALCON28_V 31:0 + +#define NVC1C0_SET_FALCON29 0x0574 +#define NVC1C0_SET_FALCON29_V 31:0 + +#define NVC1C0_SET_FALCON30 0x0578 +#define NVC1C0_SET_FALCON30_V 31:0 + +#define NVC1C0_SET_FALCON31 0x057c +#define NVC1C0_SET_FALCON31_V 31:0 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_WINDOW 0x077c +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_WINDOW_BASE_ADDRESS 31:0 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 16:0 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A 0x07b0 +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B 0x07b4 +#define NVC1C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC1C0_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC1C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC1C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC1C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC1C0_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC1C0_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC1C0_SET_SPARE_NOOP12 0x0f44 +#define NVC1C0_SET_SPARE_NOOP12_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP13 0x0f48 +#define NVC1C0_SET_SPARE_NOOP13_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP14 0x0f4c +#define NVC1C0_SET_SPARE_NOOP14_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP15 0x0f50 +#define NVC1C0_SET_SPARE_NOOP15_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP00 0x1040 +#define NVC1C0_SET_SPARE_NOOP00_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP01 0x1044 +#define NVC1C0_SET_SPARE_NOOP01_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP02 0x1048 +#define NVC1C0_SET_SPARE_NOOP02_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP03 0x104c +#define NVC1C0_SET_SPARE_NOOP03_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP04 0x1050 +#define NVC1C0_SET_SPARE_NOOP04_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP05 0x1054 +#define NVC1C0_SET_SPARE_NOOP05_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP06 0x1058 +#define NVC1C0_SET_SPARE_NOOP06_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP07 0x105c +#define NVC1C0_SET_SPARE_NOOP07_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP08 0x1060 +#define NVC1C0_SET_SPARE_NOOP08_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP09 0x1064 +#define NVC1C0_SET_SPARE_NOOP09_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP10 0x1068 +#define NVC1C0_SET_SPARE_NOOP10_V 31:0 + +#define NVC1C0_SET_SPARE_NOOP11 0x106c +#define NVC1C0_SET_SPARE_NOOP11_V 31:0 + +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_ALL 0x120c +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_ALL_V 0:0 + +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_ALL 0x1210 +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_ALL_V 0:0 + +#define NVC1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC1C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT 0x12a8 +#define NVC1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL 0:0 +#define NVC1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_FALSE 0x00000000 +#define NVC1C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_TRUE 0x00000001 + +#define NVC1C0_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC1C0_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC1C0_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC1C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC1C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC1C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC1C0_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC1C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC1C0_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC1C0_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC1C0_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC1C0_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC1C0_SET_RENDER_ENABLE_A 0x1550 +#define NVC1C0_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC1C0_SET_RENDER_ENABLE_B 0x1554 +#define NVC1C0_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC1C0_SET_RENDER_ENABLE_C 0x1558 +#define NVC1C0_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC1C0_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC1C0_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC1C0_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC1C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC1C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC1C0_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC1C0_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC1C0_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC1C0_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC1C0_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC1C0_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC1C0_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC1C0_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC1C0_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC1C0_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC1C0_SET_TEX_HEADER_POOL_C 0x157c +#define NVC1C0_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC1C0_SET_PROGRAM_REGION_A 0x1608 +#define NVC1C0_SET_PROGRAM_REGION_A_ADDRESS_UPPER 16:0 + +#define NVC1C0_SET_PROGRAM_REGION_B 0x160c +#define NVC1C0_SET_PROGRAM_REGION_B_ADDRESS_LOWER 31:0 + +#define NVC1C0_INVALIDATE_SHADER_CACHES_NO_WFI 0x1698 +#define NVC1C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC1C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC1C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC1C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC1C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC1C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC1C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC1C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC1C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC1C0_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC1C0_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC1C0_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC1C0_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC1C0_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC1C0_PIPE_NOP 0x1a2c +#define NVC1C0_PIPE_NOP_V 31:0 + +#define NVC1C0_SET_SPARE00 0x1a30 +#define NVC1C0_SET_SPARE00_V 31:0 + +#define NVC1C0_SET_SPARE01 0x1a34 +#define NVC1C0_SET_SPARE01_V 31:0 + +#define NVC1C0_SET_SPARE02 0x1a38 +#define NVC1C0_SET_SPARE02_V 31:0 + +#define NVC1C0_SET_SPARE03 0x1a3c +#define NVC1C0_SET_SPARE03_V 31:0 + +#define NVC1C0_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC1C0_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC1C0_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC1C0_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC1C0_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC1C0_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC1C0_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC1C0_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC1C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVC1C0_SET_BINDLESS_TEXTURE 0x2608 +#define NVC1C0_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 2:0 + +#define NVC1C0_SET_TRAP_HANDLER 0x260c +#define NVC1C0_SET_TRAP_HANDLER_OFFSET 31:0 + +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC1C0_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC1C0_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC1C0_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC1C0_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC1C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC1C0_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC1C0_SET_MME_SHADOW_SCRATCH_V 31:0 + +#endif /* _cl_pascal_compute_b_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc2b0.h b/src/common/sdk/nvidia/inc/class/clc2b0.h new file mode 100644 index 000000000..9961f82b9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc2b0.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _clc2b0_h_ +#define _clc2b0_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC2B0_VIDEO_DECODER (0x0000C2B0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc2b0_h + diff --git a/src/common/sdk/nvidia/inc/class/clc2b7.h b/src/common/sdk/nvidia/inc/class/clc2b7.h new file mode 100644 index 000000000..4c4e37da4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc2b7.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef clc2b7_h +#define clc2b7_h + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC2B7_VIDEO_ENCODER (0x0000C2B7) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // clc2b7_h + diff --git a/src/common/sdk/nvidia/inc/class/clc361.h b/src/common/sdk/nvidia/inc/class/clc361.h new file mode 100644 index 000000000..daa88416d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc361.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc361_h_ +#define _clc361_h_ + +#define VOLTA_USERMODE_A (0xc361) + +#define NVC361 0x0081ffff:0x00810000 +#define NVC361_NV_USERMODE__SIZE 65536 +#define NVC361_TIME_0 0x00000080 +#define NVC361_TIME_1 0x00000084 +#define NVC361_NOTIFY_CHANNEL_PENDING 0x00000090 + +#endif // _clc361_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc365.h b/src/common/sdk/nvidia/inc/class/clc365.h new file mode 100644 index 000000000..2bbd7667a --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc365.h @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc365_h_ +#define _clc365_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define ACCESS_COUNTER_NOTIFY_BUFFER (0xc365) + +#define NVC365_NOTIFY_BUF +#define NVC365_NOTIFY_BUF_ENTRY 0x0000001f:0x00000000 +#define NVC365_NOTIFY_BUF_SIZE 32 +#define NVC365_NOTIFY_BUF_ENTRY_TYPE MW((0+0*32):(0*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_TYPE_CPU 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_TYPE_GPU 0x00000001 +#define NVC365_NOTIFY_BUF_ENTRY_ADDR_TYPE MW((1+0*32):(0*32+1)) +#define NVC365_NOTIFY_BUF_ENTRY_ADDR_TYPE_GVA 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_ADDR_TYPE_GPA 0x00000001 +#define NVC365_NOTIFY_BUF_ENTRY_BANK MW((5+0*32):(0*32+2)) +#define NVC365_NOTIFY_BUF_ENTRY_BANK_0 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_1 0x00000001 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_2 0x00000002 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_3 0x00000003 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_4 0x00000004 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_5 0x00000005 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_6 0x00000006 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_7 0x00000007 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_8 0x00000008 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_9 0x00000009 +#define NVC365_NOTIFY_BUF_ENTRY_BANK_10 0x0000000a +#define NVC365_NOTIFY_BUF_ENTRY_BANK_11 0x0000000b +#define NVC365_NOTIFY_BUF_ENTRY_BANK_12 0x0000000c +#define NVC365_NOTIFY_BUF_ENTRY_BANK_13 0x0000000d +#define NVC365_NOTIFY_BUF_ENTRY_BANK_14 0x0000000e +#define NVC365_NOTIFY_BUF_ENTRY_BANK_15 0x0000000f +#define NVC365_NOTIFY_BUF_ENTRY_APERTURE MW((9+0*32):(0*32+8)) +#define NVC365_NOTIFY_BUF_ENTRY_APERTURE_VID_MEM 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_APERTURE_PEER_MEM 0x00000001 +#define NVC365_NOTIFY_BUF_ENTRY_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC365_NOTIFY_BUF_ENTRY_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC365_NOTIFY_BUF_ENTRY_INST_APERTURE MW((11+0*32):(0*32+10)) +#define NVC365_NOTIFY_BUF_ENTRY_INST_APERTURE_VID_MEM 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_INST_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC365_NOTIFY_BUF_ENTRY_INST_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC365_NOTIFY_BUF_ENTRY_INST_LO MW((31+0*32):(0*32+12)) +#define NVC365_NOTIFY_BUF_ENTRY_INST_HI MW((31+1*32):(1*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_INST MW((31+1*32):(0*32+12)) +#define NVC365_NOTIFY_BUF_ENTRY_ADDR_LO MW((31+2*32):(2*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_ADDR_HI MW((31+3*32):(3*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_ADDR MW((31+3*32):(2*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_SUB_GRANULARITY MW((31+4*32):(4*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_NOTIFY_TAG MW((19+5*32):(5*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_COUNTER_VAL MW((15+6*32):(6*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_PEER_ID MW((2+7*32):(7*32+0)) +#define NVC365_NOTIFY_BUF_ENTRY_MMU_ENGINE_ID MW((28+7*32):(7*32+20)) +#define NVC365_NOTIFY_BUF_ENTRY_VALID MW((31+7*32):(7*32+31)) +#define NVC365_NOTIFY_BUF_ENTRY_VALID_FALSE 0x00000000 +#define NVC365_NOTIFY_BUF_ENTRY_VALID_TRUE 0x00000001 +#define NVC365_NOTIFIERS_ACCESS_COUNTER (0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc365_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc369.h b/src/common/sdk/nvidia/inc/class/clc369.h new file mode 100644 index 000000000..a73e182dd --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc369.h @@ -0,0 +1,77 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc369_h_ +#define _clc369_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define MMU_FAULT_BUFFER (0xc369) + +#define NVC369_BUF +#define NVC369_BUF_ENTRY 0x0000001f:0x00000000 +#define NVC369_BUF_SIZE 32 +#define NVC369_BUF_ENTRY_INST_APERTURE MW((9+0*32):(0*32+8)) +#define NVC369_BUF_ENTRY_INST_APERTURE_VID_MEM 0x00000000 +#define NVC369_BUF_ENTRY_INST_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC369_BUF_ENTRY_INST_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC369_BUF_ENTRY_INST_LO MW((31+0*32):(0*32+12)) +#define NVC369_BUF_ENTRY_INST_HI MW((31+1*32):(1*32+0)) +#define NVC369_BUF_ENTRY_INST MW((31+1*32):(0*32+12)) +#define NVC369_BUF_ENTRY_ADDR_PHYS_APERTURE MW((1+2*32):(2*32+0)) +#define NVC369_BUF_ENTRY_ADDR_LO MW((31+2*32):(2*32+12)) +#define NVC369_BUF_ENTRY_ADDR_HI MW((31+3*32):(3*32+0)) +#define NVC369_BUF_ENTRY_ADDR MW((31+3*32):(2*32+12)) +#define NVC369_BUF_ENTRY_TIMESTAMP_LO MW((31+4*32):(4*32+0)) +#define NVC369_BUF_ENTRY_TIMESTAMP_HI MW((31+5*32):(5*32+0)) +#define NVC369_BUF_ENTRY_TIMESTAMP MW((31+5*32):(4*32+0)) +#define NVC369_BUF_ENTRY_ENGINE_ID MW((8+6*32):(6*32+0)) +#define NVC369_BUF_ENTRY_FAULT_TYPE MW((4+7*32):(7*32+0)) +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT MW((7+7*32):(7*32+7)) +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT_FALSE 0x00000000 +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT_TRUE 0x00000001 +#define NVC369_BUF_ENTRY_CLIENT MW((14+7*32):(7*32+8)) +#define NVC369_BUF_ENTRY_ACCESS_TYPE MW((19+7*32):(7*32+16)) +#define NVC369_BUF_ENTRY_MMU_CLIENT_TYPE MW((20+7*32):(7*32+20)) +#define NVC369_BUF_ENTRY_GPC_ID MW((28+7*32):(7*32+24)) +#define NVC369_BUF_ENTRY_PROTECTED_MODE MW((29+7*32):(7*32+29)) +#define NVC369_BUF_ENTRY_PROTECTED_MODE_FALSE 0x00000000 +#define NVC369_BUF_ENTRY_PROTECTED_MODE_TRUE 0x00000001 +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT_EN MW((30+7*32):(7*32+30)) +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT_EN_FALSE 0x00000000 +#define NVC369_BUF_ENTRY_REPLAYABLE_FAULT_EN_TRUE 0x00000001 +#define NVC369_BUF_ENTRY_VALID MW((31+7*32):(7*32+31)) +#define NVC369_BUF_ENTRY_VALID_FALSE 0x00000000 +#define NVC369_BUF_ENTRY_VALID_TRUE 0x00000001 +#define NVC369_NOTIFIER_MMU_FAULT_NON_REPLAYABLE 0 +#define NVC369_NOTIFIER_MMU_FAULT_REPLAYABLE 1 +#define NVC369_NOTIFIER_MMU_FAULT_ERROR 2 +#define NVC369_NOTIFIER_MMU_FAULT_NON_REPLAYABLE_IN_PRIV 3 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc369_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc36f.h b/src/common/sdk/nvidia/inc/class/clc36f.h new file mode 100644 index 000000000..cae1c6839 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc36f.h @@ -0,0 +1,366 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc36f_h_ +#define _clc36f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class VOLTA_CHANNEL_GPFIFO */ +/* + * Documentation for VOLTA_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define VOLTA_CHANNEL_GPFIFO_A (0x0000C36F) + +#define NVC36F_TYPEDEF VOLTA_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc36fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc36fControl, VoltaAControlGPFifo; + +/* fields and values */ +#define NVC36F_NUMBER_OF_SUBCHANNELS (8) +#define NVC36F_SET_OBJECT (0x00000000) +#define NVC36F_SET_OBJECT_NVCLASS 15:0 +#define NVC36F_SET_OBJECT_ENGINE 20:16 +#define NVC36F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC36F_ILLEGAL (0x00000004) +#define NVC36F_ILLEGAL_HANDLE 31:0 +#define NVC36F_NOP (0x00000008) +#define NVC36F_NOP_HANDLE 31:0 +#define NVC36F_SEMAPHOREA (0x00000010) +#define NVC36F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC36F_SEMAPHOREB (0x00000014) +#define NVC36F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC36F_SEMAPHOREC (0x00000018) +#define NVC36F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC36F_SEMAPHORED (0x0000001C) +#define NVC36F_SEMAPHORED_OPERATION 4:0 +#define NVC36F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC36F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC36F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC36F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC36F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC36F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC36F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC36F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC36F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC36F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC36F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC36F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC36F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC36F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC36F_SEMAPHORED_REDUCTION 30:27 +#define NVC36F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC36F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC36F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC36F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC36F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC36F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC36F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC36F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC36F_SEMAPHORED_FORMAT 31:31 +#define NVC36F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC36F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC36F_NON_STALL_INTERRUPT (0x00000020) +#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC36F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC36F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC36F_MEM_OP_A (0x00000028) +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC36F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC36F_MEM_OP_B (0x0000002c) +#define NVC36F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC36F_MEM_OP_C (0x00000030) +#define NVC36F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC36F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC36F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +#define NVC36F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0 +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC36F_MEM_OP_D (0x00000034) +#define NVC36F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC36F_MEM_OP_D_OPERATION 31:27 +#define NVC36F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC36F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC36F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC36F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC36F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC36F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC36F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC36F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001 +#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3 +#define NVC36F_SET_REFERENCE (0x00000050) +#define NVC36F_SET_REFERENCE_COUNT 31:0 +#define NVC36F_SEM_ADDR_LO (0x0000005c) +#define NVC36F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC36F_SEM_ADDR_HI (0x00000060) +#define NVC36F_SEM_ADDR_HI_OFFSET 7:0 +#define NVC36F_SEM_PAYLOAD_LO (0x00000064) +#define NVC36F_SEM_PAYLOAD_LO_PAYLOAD 31:0 +#define NVC36F_SEM_PAYLOAD_HI (0x00000068) +#define NVC36F_SEM_PAYLOAD_HI_PAYLOAD 31:0 +#define NVC36F_SEM_EXECUTE (0x0000006c) +#define NVC36F_SEM_EXECUTE_OPERATION 2:0 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC36F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004 +#define NVC36F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005 +#define NVC36F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC36F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC36F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC36F_SEM_EXECUTE_REDUCTION 30:27 +#define NVC36F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000 +#define NVC36F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001 +#define NVC36F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002 +#define NVC36F_SEM_EXECUTE_REDUCTION_IAND 0x00000003 +#define NVC36F_SEM_EXECUTE_REDUCTION_IOR 0x00000004 +#define NVC36F_SEM_EXECUTE_REDUCTION_IADD 0x00000005 +#define NVC36F_SEM_EXECUTE_REDUCTION_INC 0x00000006 +#define NVC36F_SEM_EXECUTE_REDUCTION_DEC 0x00000007 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT 31:31 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000 +#define NVC36F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001 +#define NVC36F_WFI (0x00000078) +#define NVC36F_WFI_SCOPE 0:0 +#define NVC36F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC36F_WFI_SCOPE_CURRENT_VEID 0x00000000 +#define NVC36F_WFI_SCOPE_ALL 0x00000001 +#define NVC36F_CRC_CHECK (0x0000007c) +#define NVC36F_CRC_CHECK_VALUE 31:0 +#define NVC36F_YIELD (0x00000080) +#define NVC36F_YIELD_OP 1:0 +#define NVC36F_YIELD_OP_NOP 0x00000000 +#define NVC36F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002 +#define NVC36F_YIELD_OP_TSG 0x00000003 +#define NVC36F_CLEAR_FAULTED (0x00000084) +#define NVC36F_CLEAR_FAULTED_CHID 11:0 +#define NVC36F_CLEAR_FAULTED_TYPE 31:31 +#define NVC36F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC36F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001 + + +/* GPFIFO entry format */ +#define NVC36F_GP_ENTRY__SIZE 8 +#define NVC36F_GP_ENTRY0_FETCH 0:0 +#define NVC36F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC36F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC36F_GP_ENTRY0_GET 31:2 +#define NVC36F_GP_ENTRY0_OPERAND 31:0 +#define NVC36F_GP_ENTRY1_GET_HI 7:0 +#define NVC36F_GP_ENTRY1_PRIV 8:8 +#define NVC36F_GP_ENTRY1_PRIV_USER 0x00000000 +#define NVC36F_GP_ENTRY1_PRIV_KERNEL 0x00000001 +#define NVC36F_GP_ENTRY1_LEVEL 9:9 +#define NVC36F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC36F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC36F_GP_ENTRY1_LENGTH 30:10 +#define NVC36F_GP_ENTRY1_SYNC 31:31 +#define NVC36F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC36F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC36F_GP_ENTRY1_OPCODE 7:0 +#define NVC36F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC36F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC36F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC36F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC36F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC36F_DMA_METHOD_ADDRESS 11:0 +#define NVC36F_DMA_SUBDEVICE_MASK 15:4 +#define NVC36F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC36F_DMA_TERT_OP 17:16 +#define NVC36F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC36F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC36F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC36F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC36F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC36F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC36F_DMA_METHOD_COUNT 28:16 +#define NVC36F_DMA_IMMD_DATA 28:16 +#define NVC36F_DMA_SEC_OP 31:29 +#define NVC36F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC36F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC36F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC36F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC36F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC36F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC36F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC36F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC36F_DMA_INCR_ADDRESS 11:0 +#define NVC36F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC36F_DMA_INCR_COUNT 28:16 +#define NVC36F_DMA_INCR_OPCODE 31:29 +#define NVC36F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC36F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC36F_DMA_NONINCR_ADDRESS 11:0 +#define NVC36F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC36F_DMA_NONINCR_COUNT 28:16 +#define NVC36F_DMA_NONINCR_OPCODE 31:29 +#define NVC36F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC36F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC36F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC36F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC36F_DMA_ONEINCR_COUNT 28:16 +#define NVC36F_DMA_ONEINCR_OPCODE 31:29 +#define NVC36F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC36F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC36F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC36F_DMA_IMMD_ADDRESS 11:0 +#define NVC36F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC36F_DMA_IMMD_DATA 28:16 +#define NVC36F_DMA_IMMD_OPCODE 31:29 +#define NVC36F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC36F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC36F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC36F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC36F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC36F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC36F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC36F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC36F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC36F_DMA_ENDSEG_OPCODE 31:29 +#define NVC36F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC36F_DMA_ADDRESS 12:2 +#define NVC36F_DMA_SUBCH 15:13 +#define NVC36F_DMA_OPCODE3 17:16 +#define NVC36F_DMA_OPCODE3_NONE (0x00000000) +#define NVC36F_DMA_COUNT 28:18 +#define NVC36F_DMA_OPCODE 31:29 +#define NVC36F_DMA_OPCODE_METHOD (0x00000000) +#define NVC36F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC36F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc36f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc36fsw.h b/src/common/sdk/nvidia/inc/class/clc36fsw.h new file mode 100644 index 000000000..e05b2d997 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc36fsw.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* This file is *not* auto-generated. */ + +#ifndef _clc36f_sw_h_ +#define _clc36f_sw_h_ + +#define NVC36F_NOTIFIERS_RC (0) +#define NVC36F_NOTIFIERS_REFCNT (1) +#define NVC36F_NOTIFIERS_NONSTALL (2) +#define NVC36F_NOTIFIERS_EVENTBUFFER (3) +#define NVC36F_NOTIFIERS_IDLECHANNEL (4) +#define NVC36F_NOTIFIERS_ENDCTX (5) +#define NVC36F_NOTIFIERS_SW (6) +#define NVC36F_NOTIFIERS_GR_DEBUG_INTR (7) +#define NVC36F_NOTIFIERS_REPLAYABLE_FAULT (8) +#define NVC36F_NOTIFIERS_MAXCOUNT (9) + +/* NvNotification[] fields and values */ +#define NVC36F_NOTIFICATION_STATUS_ERROR_BAD_ARGUMENT (0x2000) +#define NVC36F_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +#endif /* _clc36f_sw_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc370.h b/src/common/sdk/nvidia/inc/class/clc370.h new file mode 100644 index 000000000..075f0815e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc370.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc370_h_ +#define _clc370_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#include "class/cl5070.h" + +#define NVC370_DISPLAY (0x0000C370) + +/* event values */ +#define NVC370_NOTIFIERS_SW NV5070_NOTIFIERS_SW +#define NVC370_NOTIFIERS_BEGIN NV5070_NOTIFIERS_MAXCOUNT +#define NVC370_NOTIFIERS_VPR NVC370_NOTIFIERS_BEGIN + (0) +#define NVC370_NOTIFIERS_RG_SEM_NOTIFICATION NVC370_NOTIFIERS_VPR + (1) +#define NVC370_NOTIFIERS_WIN_SEM_NOTIFICATION NVC370_NOTIFIERS_RG_SEM_NOTIFICATION + (1) +#define NVC370_NOTIFIERS_MAXCOUNT NVC370_NOTIFIERS_WIN_SEM_NOTIFICATION + (1) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NVC370_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc370_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc371.h b/src/common/sdk/nvidia/inc/class/clc371.h new file mode 100644 index 000000000..a8398cce3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc371.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc371_h_ +#define _clc371_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC371_DISP_SF_USER (0x000C371) + +typedef volatile struct _clc371_tag0 { + NvU32 dispSfUserOffset[0x400]; +} _NvC371DispSfUser, NvC371DispSfUserMap; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clc371_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc372sw.h b/src/common/sdk/nvidia/inc/class/clc372sw.h new file mode 100644 index 000000000..552ea088e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc372sw.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc372sw_h_ +#define _clc372sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC372_DISPLAY_SW (0x0000C372) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc372sw_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc373.h b/src/common/sdk/nvidia/inc/class/clc373.h new file mode 100644 index 000000000..c7070225d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc373.h @@ -0,0 +1,350 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc373_h_ +#define _clc373_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC373_DISP_CAPABILITIES 0xC373 + +typedef volatile struct _clc373_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvC373DispCapabilities,NvC373DispCapabilities_Map ; + + +#define NVC373_SYS_CAP 0x0 /* RW-4R */ +#define NVC373_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC373_SYS_CAP_HEAD7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC373_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC373_SYS_CAP_HEAD_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC373_SYS_CAP_SOR0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC373_SYS_CAP_SOR1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC373_SYS_CAP_SOR2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC373_SYS_CAP_SOR3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC373_SYS_CAP_SOR4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC373_SYS_CAP_SOR5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC373_SYS_CAP_SOR6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC373_SYS_CAP_SOR7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC373_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC373_SYS_CAP_SOR_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB 0x4 /* RW-4R */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS 0:0 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS 1:1 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS 2:2 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS 3:3 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS 4:4 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS 5:5 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS 6:6 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS 7:7 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS 8:8 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW8_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS 9:9 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW9_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS 10:10 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW10_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS 11:11 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW11_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS 12:12 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW12_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS 13:13 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW13_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS 14:14 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW14_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS 15:15 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW15_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS 16:16 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW16_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS 17:17 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW17_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS 18:18 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW18_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS 19:19 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW19_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS 20:20 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW20_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS 21:21 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW21_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS 22:22 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW22_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS 23:23 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW23_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS 24:24 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW24_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS 25:25 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW25_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS 26:26 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW26_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS 27:27 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW27_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS 28:28 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW28_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS 29:29 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW29_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS 30:30 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW30_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS 31:31 /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW31_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS__SIZE_1 32 /* */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC373_SYS_CAPB_WINDOW_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA(i) (0x30+(i)*32) /* RW-4A */ +#define NVC373_HEAD_CAPA__SIZE_1 8 /* */ +#define NVC373_HEAD_CAPA_SCALER 0:0 /* RWIVF */ +#define NVC373_HEAD_CAPA_SCALER_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422 1:1 /* RWIVF */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_SCALER_HAS_YUV422_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_HSAT 2:2 /* RWIVF */ +#define NVC373_HEAD_CAPA_HSAT_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_HSAT_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_HSAT_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_OCSC 3:3 /* RWIVF */ +#define NVC373_HEAD_CAPA_OCSC_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_OCSC_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_OCSC_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_YUV422 4:4 /* RWIVF */ +#define NVC373_HEAD_CAPA_YUV422_TRUE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_YUV422_FALSE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_YUV422_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_LUT_TYPE 6:5 /* RWIVF */ +#define NVC373_HEAD_CAPA_LUT_TYPE_NONE 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_TYPE_257 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_TYPE_1025 0x00000002 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_TYPE_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPA_LUT_LOCATION 7:7 /* RWIVF */ +#define NVC373_HEAD_CAPA_LUT_LOCATION_EARLY 0x00000000 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_LOCATION_LATE 0x00000001 /* RW--V */ +#define NVC373_HEAD_CAPA_LUT_LOCATION_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPD(i) (0x3c+(i)*32) /* RW-4A */ +#define NVC373_HEAD_CAPD__SIZE_1 8 /* */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP422 15:0 /* RWIUF */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP422_INIT 0x00000000 /* RWI-V */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP444 31:16 /* RWIUF */ +#define NVC373_HEAD_CAPD_MAX_PIXELS_2TAP444_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC373_SOR_CAP__SIZE_1 8 /* */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC373_SOR_CAP_DUAL_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC373_SOR_CAP_DUAL_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC373_SOR_CAP_DUAL_TMDS_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC373_SOR_CAP_SDI_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC373_SOR_CAP_DP_A_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC373_SOR_CAP_DP_B_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC373_SOR_CAP_DP_INTERLACE_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC373_SOR_CAP_DP_8_LANES_INIT 0x00000000 /* RWI-V */ +#define NVC373_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC373_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC373_SOR_CLK_CAP(i) (0x608+(i)*4) /* RW-4A */ +#define NVC373_SOR_CLK_CAP__SIZE_1 8 /* */ +#define NVC373_SOR_CLK_CAP_DP_MAX 7:0 /* RWIUF */ +#define NVC373_SOR_CLK_CAP_DP_MAX_INIT 0x00000036 /* RWI-V */ +#define NVC373_SOR_CLK_CAP_TMDS_MAX 23:16 /* RWIUF */ +#define NVC373_SOR_CLK_CAP_TMDS_MAX_INIT 0x0000003C /* RWI-V */ +#define NVC373_SOR_CLK_CAP_LVDS_MAX 31:24 /* RWIUF */ +#define NVC373_SOR_CLK_CAP_LVDS_MAX_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc373_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc37a.h b/src/common/sdk/nvidia/inc/class/clc37a.h new file mode 100644 index 000000000..4a096b68b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37a.h @@ -0,0 +1,213 @@ +/* + * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clc37a__h_ +#define _clc37a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37A_CURSOR_IMM_CHANNEL_PIO (0x0000C37A) + +typedef volatile struct _clc37a_tag0 { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x7D]; + NvV32 Update; // 0x00000200 - 0x00000203 + NvV32 SetInterlockFlags; // 0x00000204 - 0x00000207 + NvV32 SetCursorHotSpotPointOut[2]; // 0x00000208 - 0x0000020F + NvV32 SetWindowInterlockFlags; // 0x00000210 - 0x00000213 + NvV32 Reserved02[0x37B]; +} NVC37ADispCursorImmControlPio; + +#define NVC37A_FREE (0x00000008) +#define NVC37A_FREE_COUNT 5:0 +#define NVC37A_UPDATE (0x00000200) +#define NVC37A_UPDATE_RELEASE_ELV 0:0 +#define NVC37A_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37A_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37A_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37A_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC37A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVC37A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC37A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc37a_h + diff --git a/src/common/sdk/nvidia/inc/class/clc37b.h b/src/common/sdk/nvidia/inc/class/clc37b.h new file mode 100644 index 000000000..b61700eec --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37b.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 1993-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clC37b_h_ +#define _clC37b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37B_WINDOW_IMM_CHANNEL_DMA (0x0000C37B) + +// dma opcode instructions +#define NVC37B_DMA +#define NVC37B_DMA_OPCODE 31:29 +#define NVC37B_DMA_OPCODE_METHOD 0x00000000 +#define NVC37B_DMA_OPCODE_JUMP 0x00000001 +#define NVC37B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC37B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC37B_DMA_METHOD_COUNT 27:18 +#define NVC37B_DMA_METHOD_OFFSET 13:2 +#define NVC37B_DMA_DATA 31:0 +#define NVC37B_DMA_DATA_NOP 0x00000000 +#define NVC37B_DMA_JUMP_OFFSET 11:2 +#define NVC37B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC37B_PUT (0x00000000) +#define NVC37B_PUT_PTR 9:0 +#define NVC37B_GET (0x00000004) +#define NVC37B_GET_PTR 9:0 +#define NVC37B_UPDATE (0x00000200) +#define NVC37B_UPDATE_RELEASE_ELV 0:0 +#define NVC37B_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37B_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC37B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC37B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC37B_SET_POINT_OUT_X 15:0 +#define NVC37B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC37b_h diff --git a/src/common/sdk/nvidia/inc/class/clc37d.h b/src/common/sdk/nvidia/inc/class/clc37d.h new file mode 100644 index 000000000..9ac705007 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37d.h @@ -0,0 +1,953 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clC37d_h_ +#define _clC37d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37D_CORE_CHANNEL_DMA (0x0000C37D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC37D_DMA +#define NVC37D_DMA_OPCODE 31:29 +#define NVC37D_DMA_OPCODE_METHOD 0x00000000 +#define NVC37D_DMA_OPCODE_JUMP 0x00000001 +#define NVC37D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC37D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC37D_DMA_METHOD_COUNT 27:18 +#define NVC37D_DMA_METHOD_OFFSET 13:2 +#define NVC37D_DMA_DATA 31:0 +#define NVC37D_DMA_DATA_NOP 0x00000000 +#define NVC37D_DMA_JUMP_OFFSET 11:2 +#define NVC37D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC37D_PUT (0x00000000) +#define NVC37D_PUT_PTR 9:0 +#define NVC37D_GET (0x00000004) +#define NVC37D_GET_PTR 9:0 +#define NVC37D_UPDATE (0x00000200) +#define NVC37D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC37D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC37D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC37D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC37D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC37D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC37D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC37D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC37D_UPDATE_RELEASE_ELV 0:0 +#define NVC37D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC37D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC37D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC37D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC37D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC37D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC37D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC37D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL (0x00000210) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC37D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC37D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC37D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC37D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC37D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC37D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC37D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC37D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC37D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC37D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC37D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC37D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC37D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC37D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC37D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC37D_PIOR_SET_CONTROL(a) (0x00000280 + (a)*0x00000020) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC37D_PIOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_EXT_TMDS_ENC (0x00000000) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_EXT_SDI_SD_ENC (0x00000001) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_EXT_SDI_HD_ENC (0x00000002) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_DIST_RENDER_OUT (0x00000004) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_DIST_RENDER_IN (0x00000005) +#define NVC37D_PIOR_SET_CONTROL_PROTOCOL_DIST_RENDER_INOUT (0x00000006) +#define NVC37D_PIOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC37D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_PIOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_PIOR_SET_CUSTOM_REASON(a) (0x00000284 + (a)*0x00000020) +#define NVC37D_PIOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC37D_PIOR_SET_SW_SPARE_A(a) (0x00000288 + (a)*0x00000020) +#define NVC37D_PIOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC37D_PIOR_SET_SW_SPARE_B(a) (0x0000028C + (a)*0x00000020) +#define NVC37D_PIOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC37D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC37D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_DSI (0x0000000A) +#define NVC37D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC37D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC37D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC37D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC37D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC37D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC37D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC37D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC37D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC37D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC37D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC37D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC37D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT 17:16 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_NONE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_257 (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_LUT_USAGE_1025 (0x00000002) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) + +#define NVC37D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_SAT_COS 15:4 +#define NVC37D_HEAD_SET_PROCAMP_SAT_SINE 27:16 +#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION 29:29 +#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL 31:30 +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_AUTO (0x00000000) +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_VIDEO (0x00000001) +#define NVC37D_HEAD_SET_PROCAMP_BLACK_LEVEL_GRAPHICS (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC37D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC37D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC37D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC37D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC37D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC37D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC37D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC37D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC37D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT 5:4 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC37D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC37D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC37D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC37D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC37D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC37D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC37D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC37D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC37D_HEAD_SET_DESKTOP_COLOR(a) (0x00002060 + (a)*0x00000400) +#define NVC37D_HEAD_SET_DESKTOP_COLOR_ALPHA 7:0 +#define NVC37D_HEAD_SET_DESKTOP_COLOR_RED 15:8 +#define NVC37D_HEAD_SET_DESKTOP_COLOR_GREEN 23:16 +#define NVC37D_HEAD_SET_DESKTOP_COLOR_BLUE 31:24 +#define NVC37D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC37D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC37D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC37D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC37D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC37D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC37D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC37D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC37D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC37D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC37D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC37D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC37D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC37D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC37D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC37D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC37D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC37D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA 29:28 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_NONE (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_SRGB (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_YUV8_10 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_DE_GAMMA_YUV12 (0x00000003) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT(a) (0x000020A4 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE 1:0 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_257 (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_SIZE_SIZE_1025 (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE 5:4 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_UNITY (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_XRBIAS (0x00000001) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_RANGE_XVYCC (0x00000002) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE 9:8 +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INDEX (0x00000000) +#define NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT_OUTPUT_MODE_INTERPOLATE (0x00000001) +#define NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT(a) (0x000020A8 + (a)*0x00000400) +#define NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT_ORIGIN 31:0 +#define NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(a) (0x000020AC + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT_HANDLE 31:0 +#define NVC37D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC37D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 4:0 +#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR(i) (0x00000060 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR__SIZE_1 4 +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR0 (0x00000060) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR1 (0x00000061) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR2 (0x00000062) +#define NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR3 (0x00000063) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR(i) (0x00000060 +(i)) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR__SIZE_1 4 +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR0 (0x00000060) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR1 (0x00000061) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR2 (0x00000062) +#define NVC37D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_PIOR3 (0x00000063) +#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC37D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC37D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC37D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC37D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC37D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC37D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC37D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC37D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC37D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC37D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC37D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC37D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC37D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC37d_h diff --git a/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h b/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h new file mode 100644 index 000000000..88b5b7769 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37dcrcnotif.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2003-2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __clc37dcrcnotif_h__ +#define __clc37dcrcnotif_h__ +/* This file is autogenerated. Do not edit */ + +#define NVC37D_NOTIFIER_CRC_STATUS_0 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_DONE 0:0 +#define NVC37D_NOTIFIER_CRC_STATUS_0_DONE_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_DONE_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW 3:3 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW 4:4 +#define NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW 5:5 +#define NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_FALSE 0x00000000 +#define NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW_TRUE 0x00000001 +#define NVC37D_NOTIFIER_CRC_STATUS_0_COUNT 27:16 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11 0x0000000B +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11_COMPOSITOR_CRC 31:0 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12 0x0000000C +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12_RG_CRC 31:0 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13 0x0000000D +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13_PRIMARY_OUTPUT_CRC 31:0 +#define NVC37D_NOTIFIER_CRC_CRC_ENTRY1_21 0x00000015 + +#endif // __clc37dcrcnotif_h__ diff --git a/src/common/sdk/nvidia/inc/class/clc37dswspare.h b/src/common/sdk/nvidia/inc/class/clc37dswspare.h new file mode 100644 index 000000000..44b21a8eb --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37dswspare.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc37d_sw_spare_h_ +#define _clc37d_sw_spare_h_ + +/* This file is *not* auto-generated. */ + +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF 1:0 +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_NO_PREF (0x00000000) +#define NVC37D_HEAD_SET_SW_SPARE_A_CODE_VPLL_REF_QSYNC (0x00000001) + +#define NVC37D_HEAD_SET_SW_SPARE_A_DISABLE_MID_FRAME_AND_DWCF_WATERMARK 2:2 +#define NVC37D_HEAD_SET_SW_SPARE_A_DISABLE_MID_FRAME_AND_DWCF_WATERMARK_FALSE (0x00000000) +#define NVC37D_HEAD_SET_SW_SPARE_A_DISABLE_MID_FRAME_AND_DWCF_WATERMARK_TRUE (0x00000001) + +#endif // _clc37d_sw_spare_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc37e.h b/src/common/sdk/nvidia/inc/class/clc37e.h new file mode 100644 index 000000000..f46929a45 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc37e.h @@ -0,0 +1,498 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clC37e_h_ +#define _clC37e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC37E_WINDOW_CHANNEL_DMA (0x0000C37E) + +// dma opcode instructions +#define NVC37E_DMA +#define NVC37E_DMA_OPCODE 31:29 +#define NVC37E_DMA_OPCODE_METHOD 0x00000000 +#define NVC37E_DMA_OPCODE_JUMP 0x00000001 +#define NVC37E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC37E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC37E_DMA_METHOD_COUNT 27:18 +#define NVC37E_DMA_METHOD_OFFSET 13:2 +#define NVC37E_DMA_DATA 31:0 +#define NVC37E_DMA_DATA_NOP 0x00000000 +#define NVC37E_DMA_JUMP_OFFSET 11:2 +#define NVC37E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC37E_PUT (0x00000000) +#define NVC37E_PUT_PTR 9:0 +#define NVC37E_GET (0x00000004) +#define NVC37E_GET_PTR 9:0 +#define NVC37E_UPDATE (0x00000200) +#define NVC37E_UPDATE_RELEASE_ELV 0:0 +#define NVC37E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC37E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC37E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC37E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC37E_GET_LINE (0x00000208) +#define NVC37E_GET_LINE_LINE 15:0 +#define NVC37E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC37E_SET_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC37E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC37E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC37E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC37E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC37E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218) +#define NVC37E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NVC37E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C) +#define NVC37E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC37E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC37E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC37E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC37E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC37E_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC37E_SET_SIZE (0x00000224) +#define NVC37E_SET_SIZE_WIDTH 15:0 +#define NVC37E_SET_SIZE_HEIGHT 31:16 +#define NVC37E_SET_STORAGE (0x00000228) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC37E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC37E_SET_STORAGE_MEMORY_LAYOUT 4:4 +#define NVC37E_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC37E_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC37E_SET_PARAMS (0x0000022C) +#define NVC37E_SET_PARAMS_FORMAT 7:0 +#define NVC37E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC37E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC37E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC37E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC37E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC37E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC37E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC37E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC37E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC37E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC37E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NVC37E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC37E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC37E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC37E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8V8_N422R (0x00000037) +#define NVC37E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A) +#define NVC37E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10V10_N422R (0x00000057) +#define NVC37E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10___V10_N444 (0x0000005A) +#define NVC37E_SET_PARAMS_FORMAT_Y10___U10___V10_N420 (0x0000005B) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12V12_N422R (0x00000077) +#define NVC37E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12___V12_N444 (0x0000007A) +#define NVC37E_SET_PARAMS_FORMAT_Y12___U12___V12_N420 (0x0000007B) +#define NVC37E_SET_PARAMS_COLOR_SPACE 9:8 +#define NVC37E_SET_PARAMS_COLOR_SPACE_RGB (0x00000000) +#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC37E_SET_PARAMS_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC37E_SET_PARAMS_INPUT_RANGE 13:12 +#define NVC37E_SET_PARAMS_INPUT_RANGE_BYPASS (0x00000000) +#define NVC37E_SET_PARAMS_INPUT_RANGE_LIMITED (0x00000001) +#define NVC37E_SET_PARAMS_INPUT_RANGE_FULL (0x00000002) +#define NVC37E_SET_PARAMS_UNDERREPLICATE 16:16 +#define NVC37E_SET_PARAMS_UNDERREPLICATE_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_UNDERREPLICATE_ENABLE (0x00000001) +#define NVC37E_SET_PARAMS_DE_GAMMA 21:20 +#define NVC37E_SET_PARAMS_DE_GAMMA_NONE (0x00000000) +#define NVC37E_SET_PARAMS_DE_GAMMA_SRGB (0x00000001) +#define NVC37E_SET_PARAMS_DE_GAMMA_YUV8_10 (0x00000002) +#define NVC37E_SET_PARAMS_DE_GAMMA_YUV12 (0x00000003) +#define NVC37E_SET_PARAMS_CSC 17:17 +#define NVC37E_SET_PARAMS_CSC_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_CSC_ENABLE (0x00000001) +#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC37E_SET_PARAMS_SWAP_UV 19:19 +#define NVC37E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC37E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC37E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC37E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC37E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004) +#define NVC37E_SET_CONTEXT_DMA_ISO_HANDLE 31:0 +#define NVC37E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004) +#define NVC37E_SET_OFFSET_ORIGIN 31:0 +#define NVC37E_SET_PROCESSING (0x00000280) +#define NVC37E_SET_PROCESSING_USE_GAIN_OFFSETS 0:0 +#define NVC37E_SET_PROCESSING_USE_GAIN_OFFSETS_DISABLE (0x00000000) +#define NVC37E_SET_PROCESSING_USE_GAIN_OFFSETS_ENABLE (0x00000001) +#define NVC37E_SET_CONVERSION_RED (0x00000284) +#define NVC37E_SET_CONVERSION_RED_GAIN 15:0 +#define NVC37E_SET_CONVERSION_RED_OFFSET 31:16 +#define NVC37E_SET_CONVERSION_GREEN (0x00000288) +#define NVC37E_SET_CONVERSION_GREEN_GAIN 15:0 +#define NVC37E_SET_CONVERSION_GREEN_OFFSET 31:16 +#define NVC37E_SET_CONVERSION_BLUE (0x0000028C) +#define NVC37E_SET_CONVERSION_BLUE_GAIN 15:0 +#define NVC37E_SET_CONVERSION_BLUE_OFFSET 31:16 +#define NVC37E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC37E_SET_POINT_IN_X 15:0 +#define NVC37E_SET_POINT_IN_Y 31:16 +#define NVC37E_SET_SIZE_IN (0x00000298) +#define NVC37E_SET_SIZE_IN_WIDTH 14:0 +#define NVC37E_SET_SIZE_IN_HEIGHT 30:16 +#define NVC37E_SET_SIZE_OUT (0x000002A4) +#define NVC37E_SET_SIZE_OUT_WIDTH 14:0 +#define NVC37E_SET_SIZE_OUT_HEIGHT 30:16 +#define NVC37E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC37E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC37E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC37E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC37E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC37E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC37E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC37E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC37E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC37E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC37E_SET_CONTROL_INPUT_LUT (0x000002B0) +#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE 1:0 +#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257 (0x00000000) +#define NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 (0x00000002) +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE 5:4 +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY (0x00000000) +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_XRBIAS (0x00000001) +#define NVC37E_SET_CONTROL_INPUT_LUT_RANGE_XVYCC (0x00000002) +#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE 9:8 +#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INDEX (0x00000000) +#define NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE (0x00000001) +#define NVC37E_SET_OFFSET_INPUT_LUT (0x000002B4) +#define NVC37E_SET_OFFSET_INPUT_LUT_ORIGIN 31:0 +#define NVC37E_SET_CONTEXT_DMA_INPUT_LUT (0x000002B8) +#define NVC37E_SET_CONTEXT_DMA_INPUT_LUT_HANDLE 31:0 +#define NVC37E_SET_CSC_RED2RED (0x000002BC) +#define NVC37E_SET_CSC_RED2RED_COEFF 18:0 +#define NVC37E_SET_CSC_GREEN2RED (0x000002C0) +#define NVC37E_SET_CSC_GREEN2RED_COEFF 18:0 +#define NVC37E_SET_CSC_BLUE2RED (0x000002C4) +#define NVC37E_SET_CSC_BLUE2RED_COEFF 18:0 +#define NVC37E_SET_CSC_CONSTANT2RED (0x000002C8) +#define NVC37E_SET_CSC_CONSTANT2RED_COEFF 18:0 +#define NVC37E_SET_CSC_RED2GREEN (0x000002CC) +#define NVC37E_SET_CSC_RED2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_GREEN2GREEN (0x000002D0) +#define NVC37E_SET_CSC_GREEN2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_BLUE2GREEN (0x000002D4) +#define NVC37E_SET_CSC_BLUE2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_CONSTANT2GREEN (0x000002D8) +#define NVC37E_SET_CSC_CONSTANT2GREEN_COEFF 18:0 +#define NVC37E_SET_CSC_RED2BLUE (0x000002DC) +#define NVC37E_SET_CSC_RED2BLUE_COEFF 18:0 +#define NVC37E_SET_CSC_GREEN2BLUE (0x000002E0) +#define NVC37E_SET_CSC_GREEN2BLUE_COEFF 18:0 +#define NVC37E_SET_CSC_BLUE2BLUE (0x000002E4) +#define NVC37E_SET_CSC_BLUE2BLUE_COEFF 18:0 +#define NVC37E_SET_CSC_CONSTANT2BLUE (0x000002E8) +#define NVC37E_SET_CSC_CONSTANT2BLUE_COEFF 18:0 +#define NVC37E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC37E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC37E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC37E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC37E_SET_KEY_ALPHA (0x000002F8) +#define NVC37E_SET_KEY_ALPHA_MIN 15:0 +#define NVC37E_SET_KEY_ALPHA_MAX 31:16 +#define NVC37E_SET_KEY_RED_CR (0x000002FC) +#define NVC37E_SET_KEY_RED_CR_MIN 15:0 +#define NVC37E_SET_KEY_RED_CR_MAX 31:16 +#define NVC37E_SET_KEY_GREEN_Y (0x00000300) +#define NVC37E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC37E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC37E_SET_KEY_BLUE_CB (0x00000304) +#define NVC37E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC37E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC37E_SET_PRESENT_CONTROL (0x00000308) +#define NVC37E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC37E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC37E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC37E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC37E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC37E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC37E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC37E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC37E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC37E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC37E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC37E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC37E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC37E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC37E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC37e_h diff --git a/src/common/sdk/nvidia/inc/class/clc397.h b/src/common/sdk/nvidia/inc/class/clc397.h new file mode 100644 index 000000000..2345ee662 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc397.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc397_h_ +#define _clc397_h_ + +#define VOLTA_A 0xC397 + +#endif // _clc397_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc3b0.h b/src/common/sdk/nvidia/inc/class/clc3b0.h new file mode 100644 index 000000000..5ccaeddc0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc3b0.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _clc3b0_h_ +#define _clc3b0_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC3B0_VIDEO_DECODER (0x0000C3B0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc3b0_h + diff --git a/src/common/sdk/nvidia/inc/class/clc3b5.h b/src/common/sdk/nvidia/inc/class/clc3b5.h new file mode 100644 index 000000000..74ee1b74f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc3b5.h @@ -0,0 +1,278 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clc3b5_h_ +#define _clc3b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define VOLTA_DMA_COPY_A (0x0000C3B5) + +#define NVC3B5_NOP (0x00000100) +#define NVC3B5_NOP_PARAMETER 31:0 +#define NVC3B5_PM_TRIGGER (0x00000140) +#define NVC3B5_PM_TRIGGER_V 31:0 +#define NVC3B5_SET_SEMAPHORE_A (0x00000240) +#define NVC3B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC3B5_SET_SEMAPHORE_B (0x00000244) +#define NVC3B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC3B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC3B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC3B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC3B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC3B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC3B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC3B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC3B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC3B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC3B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC3B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC3B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC3B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC3B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC3B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC3B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC3B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC3B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC3B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2 +#define NVC3B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC3B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC3B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC3B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC3B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC3B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2 +#define NVC3B5_LAUNCH_DMA (0x00000300) +#define NVC3B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC3B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC3B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC3B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC3B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC3B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC3B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC3B5_LAUNCH_DMA_FLUSH_TYPE 25:25 +#define NVC3B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000) +#define NVC3B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC3B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC3B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC3B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC3B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC3B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC3B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC3B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC3B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC3B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC3B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC3B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC3B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC3B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC3B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC3B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC3B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC3B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC3B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC3B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC3B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC3B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC3B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC3B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC3B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC3B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC3B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC3B5_LAUNCH_DMA_SRC_BYPASS_L2 20:20 +#define NVC3B5_LAUNCH_DMA_SRC_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC3B5_LAUNCH_DMA_SRC_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC3B5_LAUNCH_DMA_DST_BYPASS_L2 21:21 +#define NVC3B5_LAUNCH_DMA_DST_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC3B5_LAUNCH_DMA_DST_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC3B5_LAUNCH_DMA_VPRMODE 23:22 +#define NVC3B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000) +#define NVC3B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001) +#define NVC3B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24 +#define NVC3B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28 +#define NVC3B5_OFFSET_IN_UPPER (0x00000400) +#define NVC3B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC3B5_OFFSET_IN_LOWER (0x00000404) +#define NVC3B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC3B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC3B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC3B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC3B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC3B5_PITCH_IN (0x00000410) +#define NVC3B5_PITCH_IN_VALUE 31:0 +#define NVC3B5_PITCH_OUT (0x00000414) +#define NVC3B5_PITCH_OUT_VALUE 31:0 +#define NVC3B5_LINE_LENGTH_IN (0x00000418) +#define NVC3B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC3B5_LINE_COUNT (0x0000041C) +#define NVC3B5_LINE_COUNT_VALUE 31:0 +#define NVC3B5_SET_REMAP_CONST_A (0x00000700) +#define NVC3B5_SET_REMAP_CONST_A_V 31:0 +#define NVC3B5_SET_REMAP_CONST_B (0x00000704) +#define NVC3B5_SET_REMAP_CONST_B_V 31:0 +#define NVC3B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC3B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC3B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC3B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC3B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVC3B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVC3B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC3B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC3B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC3B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC3B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC3B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC3B5_SET_DST_WIDTH (0x00000710) +#define NVC3B5_SET_DST_WIDTH_V 31:0 +#define NVC3B5_SET_DST_HEIGHT (0x00000714) +#define NVC3B5_SET_DST_HEIGHT_V 31:0 +#define NVC3B5_SET_DST_DEPTH (0x00000718) +#define NVC3B5_SET_DST_DEPTH_V 31:0 +#define NVC3B5_SET_DST_LAYER (0x0000071C) +#define NVC3B5_SET_DST_LAYER_V 31:0 +#define NVC3B5_SET_DST_ORIGIN (0x00000720) +#define NVC3B5_SET_DST_ORIGIN_X 15:0 +#define NVC3B5_SET_DST_ORIGIN_Y 31:16 +#define NVC3B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVC3B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVC3B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC3B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC3B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC3B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC3B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC3B5_SET_SRC_WIDTH (0x0000072C) +#define NVC3B5_SET_SRC_WIDTH_V 31:0 +#define NVC3B5_SET_SRC_HEIGHT (0x00000730) +#define NVC3B5_SET_SRC_HEIGHT_V 31:0 +#define NVC3B5_SET_SRC_DEPTH (0x00000734) +#define NVC3B5_SET_SRC_DEPTH_V 31:0 +#define NVC3B5_SET_SRC_LAYER (0x00000738) +#define NVC3B5_SET_SRC_LAYER_V 31:0 +#define NVC3B5_SET_SRC_ORIGIN (0x0000073C) +#define NVC3B5_SET_SRC_ORIGIN_X 15:0 +#define NVC3B5_SET_SRC_ORIGIN_Y 31:16 +#define NVC3B5_SRC_ORIGIN_X (0x00000744) +#define NVC3B5_SRC_ORIGIN_X_VALUE 31:0 +#define NVC3B5_SRC_ORIGIN_Y (0x00000748) +#define NVC3B5_SRC_ORIGIN_Y_VALUE 31:0 +#define NVC3B5_DST_ORIGIN_X (0x0000074C) +#define NVC3B5_DST_ORIGIN_X_VALUE 31:0 +#define NVC3B5_DST_ORIGIN_Y (0x00000750) +#define NVC3B5_DST_ORIGIN_Y_VALUE 31:0 +#define NVC3B5_PM_TRIGGER_END (0x00001114) +#define NVC3B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc3b5_h + diff --git a/src/common/sdk/nvidia/inc/class/clc3b5sw.h b/src/common/sdk/nvidia/inc/class/clc3b5sw.h new file mode 100644 index 000000000..2c1055c9d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc3b5sw.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _clc3b5sw_h_ +#define _clc3b5sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* This file is *not* auto-generated. */ + +// +// Using VERSION_0 will cause the API to interpret +// engineType as a CE engine instance. This allows +// for backward compatibility with 85B5sw and 90B5sw. +// +#define NVC3B5_ALLOCATION_PARAMETERS_VERSION_0 0 + +// +// Using VERSION_1 will cause the API to interpret +// engineType as an NV2080_ENGINE_TYPE ordinal. +// +#define NVC3B5_ALLOCATION_PARAMETERS_VERSION_1 1 + +typedef struct +{ + NvU32 version; + NvU32 engineType; +} NVC3B5_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc3b5sw_h_ + diff --git a/src/common/sdk/nvidia/inc/class/clc3b7.h b/src/common/sdk/nvidia/inc/class/clc3b7.h new file mode 100644 index 000000000..0a313b87f --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc3b7.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef clc3b7_h +#define clc3b7_h + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC3B7_VIDEO_ENCODER (0x0000C3B7) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // clc3b7_h + diff --git a/src/common/sdk/nvidia/inc/class/clc3c0.h b/src/common/sdk/nvidia/inc/class/clc3c0.h new file mode 100644 index 000000000..76b45664a --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc3c0.h @@ -0,0 +1,703 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_volta_compute_a_h_ +#define _cl_volta_compute_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl volta_compute_a */ + +#include "nvtypes.h" + +#define VOLTA_COMPUTE_A 0xC3C0 + +#define NVC3C0_SET_OBJECT 0x0000 +#define NVC3C0_SET_OBJECT_CLASS_ID 15:0 +#define NVC3C0_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC3C0_NO_OPERATION 0x0100 +#define NVC3C0_NO_OPERATION_V 31:0 + +#define NVC3C0_SET_NOTIFY_A 0x0104 +#define NVC3C0_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC3C0_SET_NOTIFY_B 0x0108 +#define NVC3C0_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC3C0_NOTIFY 0x010c +#define NVC3C0_NOTIFY_TYPE 31:0 +#define NVC3C0_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC3C0_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC3C0_WAIT_FOR_IDLE 0x0110 +#define NVC3C0_WAIT_FOR_IDLE_V 31:0 + +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC3C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC3C0_SEND_GO_IDLE 0x013c +#define NVC3C0_SEND_GO_IDLE_V 31:0 + +#define NVC3C0_PM_TRIGGER 0x0140 +#define NVC3C0_PM_TRIGGER_V 31:0 + +#define NVC3C0_PM_TRIGGER_WFI 0x0144 +#define NVC3C0_PM_TRIGGER_WFI_V 31:0 + +#define NVC3C0_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC3C0_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC3C0_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC3C0_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC3C0_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC3C0_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC3C0_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC3C0_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC3C0_LINE_LENGTH_IN 0x0180 +#define NVC3C0_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC3C0_LINE_COUNT 0x0184 +#define NVC3C0_LINE_COUNT_VALUE 31:0 + +#define NVC3C0_OFFSET_OUT_UPPER 0x0188 +#define NVC3C0_OFFSET_OUT_UPPER_VALUE 16:0 + +#define NVC3C0_OFFSET_OUT 0x018c +#define NVC3C0_OFFSET_OUT_VALUE 31:0 + +#define NVC3C0_PITCH_OUT 0x0190 +#define NVC3C0_PITCH_OUT_VALUE 31:0 + +#define NVC3C0_SET_DST_BLOCK_SIZE 0x0194 +#define NVC3C0_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC3C0_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC3C0_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC3C0_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC3C0_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC3C0_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC3C0_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC3C0_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC3C0_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC3C0_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC3C0_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC3C0_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC3C0_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC3C0_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC3C0_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC3C0_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC3C0_SET_DST_WIDTH 0x0198 +#define NVC3C0_SET_DST_WIDTH_V 31:0 + +#define NVC3C0_SET_DST_HEIGHT 0x019c +#define NVC3C0_SET_DST_HEIGHT_V 31:0 + +#define NVC3C0_SET_DST_DEPTH 0x01a0 +#define NVC3C0_SET_DST_DEPTH_V 31:0 + +#define NVC3C0_SET_DST_LAYER 0x01a4 +#define NVC3C0_SET_DST_LAYER_V 31:0 + +#define NVC3C0_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC3C0_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC3C0_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC3C0_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC3C0_LAUNCH_DMA 0x01b0 +#define NVC3C0_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC3C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC3C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC3C0_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC3C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC3C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC3C0_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC3C0_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC3C0_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC3C0_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC3C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC3C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC3C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC3C0_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC3C0_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC3C0_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC3C0_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC3C0_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC3C0_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC3C0_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC3C0_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC3C0_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC3C0_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC3C0_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC3C0_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC3C0_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC3C0_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC3C0_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC3C0_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC3C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC3C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC3C0_LOAD_INLINE_DATA 0x01b4 +#define NVC3C0_LOAD_INLINE_DATA_V 31:0 + +#define NVC3C0_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC3C0_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC3C0_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC3C0_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC3C0_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC3C0_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC3C0_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC3C0_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC3C0_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC3C0_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC3C0_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC3C0_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC3C0_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC3C0_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC3C0_SET_VALID_SPAN_OVERFLOW_AREA_A 0x0200 +#define NVC3C0_SET_VALID_SPAN_OVERFLOW_AREA_A_ADDRESS_UPPER 7:0 + +#define NVC3C0_SET_VALID_SPAN_OVERFLOW_AREA_B 0x0204 +#define NVC3C0_SET_VALID_SPAN_OVERFLOW_AREA_B_ADDRESS_LOWER 31:0 + +#define NVC3C0_SET_VALID_SPAN_OVERFLOW_AREA_C 0x0208 +#define NVC3C0_SET_VALID_SPAN_OVERFLOW_AREA_C_SIZE 31:0 + +#define NVC3C0_PERFMON_TRANSFER 0x0210 +#define NVC3C0_PERFMON_TRANSFER_V 31:0 + +#define NVC3C0_INVALIDATE_SHADER_CACHES 0x021c +#define NVC3C0_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC3C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC3C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC3C0_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC3C0_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC3C0_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC3C0_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC3C0_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC3C0_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC3C0_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC3C0_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC3C0_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC3C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC3C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC3C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC3C0_SET_RESERVED_SW_METHOD00 0x0220 +#define NVC3C0_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD01 0x0224 +#define NVC3C0_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD02 0x0228 +#define NVC3C0_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD03 0x022c +#define NVC3C0_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD04 0x0230 +#define NVC3C0_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD05 0x0234 +#define NVC3C0_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD06 0x0238 +#define NVC3C0_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD07 0x023c +#define NVC3C0_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x0244 +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC3C0_SET_CWD_REF_COUNTER 0x0248 +#define NVC3C0_SET_CWD_REF_COUNTER_SELECT 5:0 +#define NVC3C0_SET_CWD_REF_COUNTER_VALUE 23:8 + +#define NVC3C0_SET_RESERVED_SW_METHOD08 0x024c +#define NVC3C0_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD09 0x0250 +#define NVC3C0_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD10 0x0254 +#define NVC3C0_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD11 0x0258 +#define NVC3C0_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD12 0x025c +#define NVC3C0_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD13 0x0260 +#define NVC3C0_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD14 0x0264 +#define NVC3C0_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC3C0_SET_RESERVED_SW_METHOD15 0x0268 +#define NVC3C0_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC3C0_SET_SCG_CONTROL 0x0270 +#define NVC3C0_SET_SCG_CONTROL_COMPUTE1_MAX_SM_COUNT 8:0 +#define NVC3C0_SET_SCG_CONTROL_COMPUTE1_MIN_SM_COUNT 20:12 +#define NVC3C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE 24:24 +#define NVC3C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE_FALSE 0x00000000 +#define NVC3C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE_TRUE 0x00000001 + +#define NVC3C0_SET_COMPUTE_CLASS_VERSION 0x0280 +#define NVC3C0_SET_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC3C0_SET_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC3C0_CHECK_COMPUTE_CLASS_VERSION 0x0284 +#define NVC3C0_CHECK_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC3C0_CHECK_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC3C0_SET_QMD_VERSION 0x0288 +#define NVC3C0_SET_QMD_VERSION_CURRENT 15:0 +#define NVC3C0_SET_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC3C0_CHECK_QMD_VERSION 0x0290 +#define NVC3C0_CHECK_QMD_VERSION_CURRENT 15:0 +#define NVC3C0_CHECK_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC3C0_INVALIDATE_SKED_CACHES 0x0298 +#define NVC3C0_INVALIDATE_SKED_CACHES_V 0:0 + +#define NVC3C0_SET_SHADER_SHARED_MEMORY_WINDOW_A 0x02a0 +#define NVC3C0_SET_SHADER_SHARED_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC3C0_SET_SHADER_SHARED_MEMORY_WINDOW_B 0x02a4 +#define NVC3C0_SET_SHADER_SHARED_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC3C0_SCG_HYSTERESIS_CONTROL 0x02a8 +#define NVC3C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE 0:0 +#define NVC3C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE_FALSE 0x00000000 +#define NVC3C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE_TRUE 0x00000001 +#define NVC3C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE 1:1 +#define NVC3C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE_FALSE 0x00000000 +#define NVC3C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE_TRUE 0x00000001 + +#define NVC3C0_SET_CWD_SLOT_COUNT 0x02b0 +#define NVC3C0_SET_CWD_SLOT_COUNT_V 7:0 + +#define NVC3C0_SEND_PCAS_A 0x02b4 +#define NVC3C0_SEND_PCAS_A_QMD_ADDRESS_SHIFTED8 31:0 + +#define NVC3C0_SEND_PCAS_B 0x02b8 +#define NVC3C0_SEND_PCAS_B_FROM 23:0 +#define NVC3C0_SEND_PCAS_B_DELTA 31:24 + +#define NVC3C0_SEND_SIGNALING_PCAS_B 0x02bc +#define NVC3C0_SEND_SIGNALING_PCAS_B_INVALIDATE 0:0 +#define NVC3C0_SEND_SIGNALING_PCAS_B_INVALIDATE_FALSE 0x00000000 +#define NVC3C0_SEND_SIGNALING_PCAS_B_INVALIDATE_TRUE 0x00000001 +#define NVC3C0_SEND_SIGNALING_PCAS_B_SCHEDULE 1:1 +#define NVC3C0_SEND_SIGNALING_PCAS_B_SCHEDULE_FALSE 0x00000000 +#define NVC3C0_SEND_SIGNALING_PCAS_B_SCHEDULE_TRUE 0x00000001 + +#define NVC3C0_SET_SKED_CACHE_CONTROL 0x02cc +#define NVC3C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID 0:0 +#define NVC3C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID_FALSE 0x00000000 +#define NVC3C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID_TRUE 0x00000001 + +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A 0x02e4 +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B 0x02e8 +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C 0x02ec +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVC3C0_SET_SPA_VERSION 0x0310 +#define NVC3C0_SET_SPA_VERSION_MINOR 7:0 +#define NVC3C0_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC3C0_SET_INLINE_QMD_ADDRESS_A 0x0318 +#define NVC3C0_SET_INLINE_QMD_ADDRESS_A_QMD_ADDRESS_SHIFTED8_UPPER 31:0 + +#define NVC3C0_SET_INLINE_QMD_ADDRESS_B 0x031c +#define NVC3C0_SET_INLINE_QMD_ADDRESS_B_QMD_ADDRESS_SHIFTED8_LOWER 31:0 + +#define NVC3C0_LOAD_INLINE_QMD_DATA(i) (0x0320+(i)*4) +#define NVC3C0_LOAD_INLINE_QMD_DATA_V 31:0 + +#define NVC3C0_SET_FALCON00 0x0500 +#define NVC3C0_SET_FALCON00_V 31:0 + +#define NVC3C0_SET_FALCON01 0x0504 +#define NVC3C0_SET_FALCON01_V 31:0 + +#define NVC3C0_SET_FALCON02 0x0508 +#define NVC3C0_SET_FALCON02_V 31:0 + +#define NVC3C0_SET_FALCON03 0x050c +#define NVC3C0_SET_FALCON03_V 31:0 + +#define NVC3C0_SET_FALCON04 0x0510 +#define NVC3C0_SET_FALCON04_V 31:0 + +#define NVC3C0_SET_FALCON05 0x0514 +#define NVC3C0_SET_FALCON05_V 31:0 + +#define NVC3C0_SET_FALCON06 0x0518 +#define NVC3C0_SET_FALCON06_V 31:0 + +#define NVC3C0_SET_FALCON07 0x051c +#define NVC3C0_SET_FALCON07_V 31:0 + +#define NVC3C0_SET_FALCON08 0x0520 +#define NVC3C0_SET_FALCON08_V 31:0 + +#define NVC3C0_SET_FALCON09 0x0524 +#define NVC3C0_SET_FALCON09_V 31:0 + +#define NVC3C0_SET_FALCON10 0x0528 +#define NVC3C0_SET_FALCON10_V 31:0 + +#define NVC3C0_SET_FALCON11 0x052c +#define NVC3C0_SET_FALCON11_V 31:0 + +#define NVC3C0_SET_FALCON12 0x0530 +#define NVC3C0_SET_FALCON12_V 31:0 + +#define NVC3C0_SET_FALCON13 0x0534 +#define NVC3C0_SET_FALCON13_V 31:0 + +#define NVC3C0_SET_FALCON14 0x0538 +#define NVC3C0_SET_FALCON14_V 31:0 + +#define NVC3C0_SET_FALCON15 0x053c +#define NVC3C0_SET_FALCON15_V 31:0 + +#define NVC3C0_SET_FALCON16 0x0540 +#define NVC3C0_SET_FALCON16_V 31:0 + +#define NVC3C0_SET_FALCON17 0x0544 +#define NVC3C0_SET_FALCON17_V 31:0 + +#define NVC3C0_SET_FALCON18 0x0548 +#define NVC3C0_SET_FALCON18_V 31:0 + +#define NVC3C0_SET_FALCON19 0x054c +#define NVC3C0_SET_FALCON19_V 31:0 + +#define NVC3C0_SET_FALCON20 0x0550 +#define NVC3C0_SET_FALCON20_V 31:0 + +#define NVC3C0_SET_FALCON21 0x0554 +#define NVC3C0_SET_FALCON21_V 31:0 + +#define NVC3C0_SET_FALCON22 0x0558 +#define NVC3C0_SET_FALCON22_V 31:0 + +#define NVC3C0_SET_FALCON23 0x055c +#define NVC3C0_SET_FALCON23_V 31:0 + +#define NVC3C0_SET_FALCON24 0x0560 +#define NVC3C0_SET_FALCON24_V 31:0 + +#define NVC3C0_SET_FALCON25 0x0564 +#define NVC3C0_SET_FALCON25_V 31:0 + +#define NVC3C0_SET_FALCON26 0x0568 +#define NVC3C0_SET_FALCON26_V 31:0 + +#define NVC3C0_SET_FALCON27 0x056c +#define NVC3C0_SET_FALCON27_V 31:0 + +#define NVC3C0_SET_FALCON28 0x0570 +#define NVC3C0_SET_FALCON28_V 31:0 + +#define NVC3C0_SET_FALCON29 0x0574 +#define NVC3C0_SET_FALCON29_V 31:0 + +#define NVC3C0_SET_FALCON30 0x0578 +#define NVC3C0_SET_FALCON30_V 31:0 + +#define NVC3C0_SET_FALCON31 0x057c +#define NVC3C0_SET_FALCON31_V 31:0 + +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 16:0 + +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A 0x07b0 +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B 0x07b4 +#define NVC3C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC3C0_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC3C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC3C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC3C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC3C0_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC3C0_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_ALL 0x120c +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_ALL_V 0:0 + +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_ALL 0x1210 +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_ALL_V 0:0 + +#define NVC3C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC3C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC3C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC3C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC3C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC3C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT 0x12a8 +#define NVC3C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL 0:0 +#define NVC3C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_FALSE 0x00000000 +#define NVC3C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_TRUE 0x00000001 + +#define NVC3C0_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC3C0_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC3C0_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC3C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC3C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC3C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC3C0_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC3C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC3C0_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC3C0_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC3C0_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC3C0_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC3C0_SET_RENDER_ENABLE_A 0x1550 +#define NVC3C0_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC3C0_SET_RENDER_ENABLE_B 0x1554 +#define NVC3C0_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC3C0_SET_RENDER_ENABLE_C 0x1558 +#define NVC3C0_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC3C0_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC3C0_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC3C0_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC3C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC3C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC3C0_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC3C0_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC3C0_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC3C0_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC3C0_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC3C0_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC3C0_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC3C0_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC3C0_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC3C0_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC3C0_SET_TEX_HEADER_POOL_C 0x157c +#define NVC3C0_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC3C0_INVALIDATE_SHADER_CACHES_NO_WFI 0x1698 +#define NVC3C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC3C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC3C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC3C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC3C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC3C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC3C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC3C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC3C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC3C0_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC3C0_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC3C0_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC3C0_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC3C0_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC3C0_PIPE_NOP 0x1a2c +#define NVC3C0_PIPE_NOP_V 31:0 + +#define NVC3C0_SET_SPARE00 0x1a30 +#define NVC3C0_SET_SPARE00_V 31:0 + +#define NVC3C0_SET_SPARE01 0x1a34 +#define NVC3C0_SET_SPARE01_V 31:0 + +#define NVC3C0_SET_SPARE02 0x1a38 +#define NVC3C0_SET_SPARE02_V 31:0 + +#define NVC3C0_SET_SPARE03 0x1a3c +#define NVC3C0_SET_SPARE03_V 31:0 + +#define NVC3C0_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC3C0_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC3C0_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC3C0_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC3C0_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC3C0_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC3C0_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC3C0_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC3C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 + +#define NVC3C0_SET_TRAP_HANDLER_A 0x25f8 +#define NVC3C0_SET_TRAP_HANDLER_A_ADDRESS_UPPER 16:0 + +#define NVC3C0_SET_TRAP_HANDLER_B 0x25fc +#define NVC3C0_SET_TRAP_HANDLER_B_ADDRESS_LOWER 31:0 + +#define NVC3C0_SET_BINDLESS_TEXTURE 0x2608 +#define NVC3C0_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 2:0 + +#define NVC3C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE(i) (0x32f4+(i)*4) +#define NVC3C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_V 31:0 + +#define NVC3C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER(i) (0x3314+(i)*4) +#define NVC3C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC3C0_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3334 +#define NVC3C0_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC3C0_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3338 +#define NVC3C0_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC3C0_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC3C0_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC3C0_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC3C0_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC3C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC3C0_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC3C0_SET_MME_SHADOW_SCRATCH_V 31:0 + +#endif /* _cl_volta_compute_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc461.h b/src/common/sdk/nvidia/inc/class/clc461.h new file mode 100644 index 000000000..0c0378fc7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc461.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc461_h_ +#define _clc461_h_ + +#define TURING_USERMODE_A (0xc461) + +#endif // _clc461_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc46f.h b/src/common/sdk/nvidia/inc/class/clc46f.h new file mode 100644 index 000000000..fb1c594e3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc46f.h @@ -0,0 +1,365 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc46f_h_ +#define _clc46f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class TURING_CHANNEL_GPFIFO */ +/* + * Documentation for TURING_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define TURING_CHANNEL_GPFIFO_A (0x0000C46F) + +#define NVC46F_TYPEDEF TURING_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc46fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc46fControl, TuringAControlGPFifo; + +/* fields and values */ +#define NVC46F_NUMBER_OF_SUBCHANNELS (8) +#define NVC46F_SET_OBJECT (0x00000000) +#define NVC46F_SET_OBJECT_NVCLASS 15:0 +#define NVC46F_SET_OBJECT_ENGINE 20:16 +#define NVC46F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC46F_ILLEGAL (0x00000004) +#define NVC46F_ILLEGAL_HANDLE 31:0 +#define NVC46F_NOP (0x00000008) +#define NVC46F_NOP_HANDLE 31:0 +#define NVC46F_SEMAPHOREA (0x00000010) +#define NVC46F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC46F_SEMAPHOREB (0x00000014) +#define NVC46F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC46F_SEMAPHOREC (0x00000018) +#define NVC46F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC46F_SEMAPHORED (0x0000001C) +#define NVC46F_SEMAPHORED_OPERATION 4:0 +#define NVC46F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC46F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC46F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC46F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC46F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC46F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC46F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC46F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC46F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC46F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC46F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC46F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC46F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC46F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC46F_SEMAPHORED_REDUCTION 30:27 +#define NVC46F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC46F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC46F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC46F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC46F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC46F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC46F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC46F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC46F_SEMAPHORED_FORMAT 31:31 +#define NVC46F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC46F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC46F_NON_STALL_INTERRUPT (0x00000020) +#define NVC46F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC46F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC46F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC46F_MEM_OP_A (0x00000028) +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC46F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC46F_MEM_OP_B (0x0000002c) +#define NVC46F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC46F_MEM_OP_C (0x00000030) +#define NVC46F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC46F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC46F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC46F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +#define NVC46F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0 +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC46F_MEM_OP_D (0x00000034) +#define NVC46F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC46F_MEM_OP_D_OPERATION 31:27 +#define NVC46F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC46F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC46F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC46F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC46F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC46F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC46F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC46F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC46F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC46F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001 +#define NVC46F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3 +#define NVC46F_SET_REFERENCE (0x00000050) +#define NVC46F_SET_REFERENCE_COUNT 31:0 +#define NVC46F_SEM_ADDR_LO (0x0000005c) +#define NVC46F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC46F_SEM_ADDR_HI (0x00000060) +#define NVC46F_SEM_ADDR_HI_OFFSET 7:0 +#define NVC46F_SEM_PAYLOAD_LO (0x00000064) +#define NVC46F_SEM_PAYLOAD_LO_PAYLOAD 31:0 +#define NVC46F_SEM_PAYLOAD_HI (0x00000068) +#define NVC46F_SEM_PAYLOAD_HI_PAYLOAD 31:0 +#define NVC46F_SEM_EXECUTE (0x0000006c) +#define NVC46F_SEM_EXECUTE_OPERATION 2:0 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC46F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004 +#define NVC46F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005 +#define NVC46F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006 +#define NVC46F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC46F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000 +#define NVC46F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC46F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC46F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC46F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001 +#define NVC46F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC46F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC46F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001 +#define NVC46F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC46F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC46F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC46F_SEM_EXECUTE_REDUCTION 30:27 +#define NVC46F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000 +#define NVC46F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001 +#define NVC46F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002 +#define NVC46F_SEM_EXECUTE_REDUCTION_IAND 0x00000003 +#define NVC46F_SEM_EXECUTE_REDUCTION_IOR 0x00000004 +#define NVC46F_SEM_EXECUTE_REDUCTION_IADD 0x00000005 +#define NVC46F_SEM_EXECUTE_REDUCTION_INC 0x00000006 +#define NVC46F_SEM_EXECUTE_REDUCTION_DEC 0x00000007 +#define NVC46F_SEM_EXECUTE_REDUCTION_FORMAT 31:31 +#define NVC46F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000 +#define NVC46F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001 +#define NVC46F_WFI (0x00000078) +#define NVC46F_WFI_SCOPE 0:0 +#define NVC46F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC46F_WFI_SCOPE_CURRENT_VEID 0x00000000 +#define NVC46F_WFI_SCOPE_ALL 0x00000001 +#define NVC46F_CRC_CHECK (0x0000007c) +#define NVC46F_CRC_CHECK_VALUE 31:0 +#define NVC46F_YIELD (0x00000080) +#define NVC46F_YIELD_OP 1:0 +#define NVC46F_YIELD_OP_NOP 0x00000000 +#define NVC46F_YIELD_OP_RUNLIST_TIMESLICE 0x00000002 +#define NVC46F_YIELD_OP_TSG 0x00000003 +#define NVC46F_CLEAR_FAULTED (0x00000084) +// Note: RM provides the HANDLE as an opaque value; the internal detail fields +// are intentionally not exposed to the driver through these defines. +#define NVC46F_CLEAR_FAULTED_HANDLE 30:0 +#define NVC46F_CLEAR_FAULTED_TYPE 31:31 +#define NVC46F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC46F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001 + + +/* GPFIFO entry format */ +#define NVC46F_GP_ENTRY__SIZE 8 +#define NVC46F_GP_ENTRY0_FETCH 0:0 +#define NVC46F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC46F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC46F_GP_ENTRY0_GET 31:2 +#define NVC46F_GP_ENTRY0_OPERAND 31:0 +#define NVC46F_GP_ENTRY1_GET_HI 7:0 +#define NVC46F_GP_ENTRY1_LEVEL 9:9 +#define NVC46F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC46F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC46F_GP_ENTRY1_LENGTH 30:10 +#define NVC46F_GP_ENTRY1_SYNC 31:31 +#define NVC46F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC46F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC46F_GP_ENTRY1_OPCODE 7:0 +#define NVC46F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC46F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC46F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC46F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC46F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC46F_DMA_METHOD_ADDRESS 11:0 +#define NVC46F_DMA_SUBDEVICE_MASK 15:4 +#define NVC46F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC46F_DMA_TERT_OP 17:16 +#define NVC46F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC46F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC46F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC46F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC46F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC46F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC46F_DMA_METHOD_COUNT 28:16 +#define NVC46F_DMA_IMMD_DATA 28:16 +#define NVC46F_DMA_SEC_OP 31:29 +#define NVC46F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC46F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC46F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC46F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC46F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC46F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC46F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC46F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC46F_DMA_INCR_ADDRESS 11:0 +#define NVC46F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC46F_DMA_INCR_COUNT 28:16 +#define NVC46F_DMA_INCR_OPCODE 31:29 +#define NVC46F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC46F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC46F_DMA_NONINCR_ADDRESS 11:0 +#define NVC46F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC46F_DMA_NONINCR_COUNT 28:16 +#define NVC46F_DMA_NONINCR_OPCODE 31:29 +#define NVC46F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC46F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC46F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC46F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC46F_DMA_ONEINCR_COUNT 28:16 +#define NVC46F_DMA_ONEINCR_OPCODE 31:29 +#define NVC46F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC46F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC46F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC46F_DMA_IMMD_ADDRESS 11:0 +#define NVC46F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC46F_DMA_IMMD_DATA 28:16 +#define NVC46F_DMA_IMMD_OPCODE 31:29 +#define NVC46F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC46F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC46F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC46F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC46F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC46F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC46F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC46F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC46F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC46F_DMA_ENDSEG_OPCODE 31:29 +#define NVC46F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC46F_DMA_ADDRESS 12:2 +#define NVC46F_DMA_SUBCH 15:13 +#define NVC46F_DMA_OPCODE3 17:16 +#define NVC46F_DMA_OPCODE3_NONE (0x00000000) +#define NVC46F_DMA_COUNT 28:18 +#define NVC46F_DMA_OPCODE 31:29 +#define NVC46F_DMA_OPCODE_METHOD (0x00000000) +#define NVC46F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC46F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc46f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc46fsw.h b/src/common/sdk/nvidia/inc/class/clc46fsw.h new file mode 100644 index 000000000..4137f0f74 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc46fsw.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* This file is *not* auto-generated. */ + +#ifndef _clc46f_sw_h_ +#define _clc46f_sw_h_ + +#define NVC46F_NOTIFIERS_RC (0) +#define NVC46F_NOTIFIERS_REFCNT (1) +#define NVC46F_NOTIFIERS_NONSTALL (2) +#define NVC46F_NOTIFIERS_EVENTBUFFER (3) +#define NVC46F_NOTIFIERS_IDLECHANNEL (4) +#define NVC46F_NOTIFIERS_ENDCTX (5) +#define NVC46F_NOTIFIERS_SW (6) +#define NVC46F_NOTIFIERS_GR_DEBUG_INTR (7) +#define NVC46F_NOTIFIERS_REPLAYABLE_FAULT (8) +#define NVC46F_NOTIFIERS_MAXCOUNT (9) + +/* NvNotification[] fields and values */ +#define NVC46F_NOTIFICATION_STATUS_ERROR_BAD_ARGUMENT (0x2000) +#define NVC46F_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +#endif /* _clc46f_sw_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc4b0.h b/src/common/sdk/nvidia/inc/class/clc4b0.h new file mode 100644 index 000000000..4b7d3357b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc4b0.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef clc4b0_h_ +#define clc4b0_h_ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC4B0_VIDEO_DECODER (0x0000C4B0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // clc4b0_h + diff --git a/src/common/sdk/nvidia/inc/class/clc4b7.h b/src/common/sdk/nvidia/inc/class/clc4b7.h new file mode 100644 index 000000000..27f3983ec --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc4b7.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef clc4b7_h_ +#define clc4b7_h_ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC4B7_VIDEO_ENCODER (0x0000C4B7) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // clc4b7_h + diff --git a/src/common/sdk/nvidia/inc/class/clc4c0.h b/src/common/sdk/nvidia/inc/class/clc4c0.h new file mode 100644 index 000000000..f96026c53 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc4c0.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_volta_compute_b_h_ +#define _cl_volta_compute_b_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl volta_compute_b */ + +#include "nvtypes.h" + +#define VOLTA_COMPUTE_B 0xC4C0 + +#endif /* _cl_volta_compute_b_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc4d1.h b/src/common/sdk/nvidia/inc/class/clc4d1.h new file mode 100644 index 000000000..196b0340d --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc4d1.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvtypes.h" + +#ifndef _clc4d1_h_ +#define _clc4d1_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC4D1_VIDEO_NVJPG (0x0000C4D1) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc4d1_h + diff --git a/src/common/sdk/nvidia/inc/class/clc561.h b/src/common/sdk/nvidia/inc/class/clc561.h new file mode 100644 index 000000000..83a9339a9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc561.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc561_h_ +#define _clc561_h_ + +#define AMPERE_USERMODE_A (0xc561) + +#endif // _clc561_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc56f.h b/src/common/sdk/nvidia/inc/class/clc56f.h new file mode 100644 index 000000000..bc8b675c5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc56f.h @@ -0,0 +1,367 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc56f_h_ +#define _clc56f_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* class AMPERE_CHANNEL_GPFIFO */ +/* + * Documentation for AMPERE_CHANNEL_GPFIFO can be found in dev_pbdma.ref, + * chapter "User Control Registers". It is documented as device NV_UDMA. + * The GPFIFO format itself is also documented in dev_pbdma.ref, + * NV_PPBDMA_GP_ENTRY_*. The pushbuffer format is documented in dev_ram.ref, + * chapter "FIFO DMA RAM", NV_FIFO_DMA_*. + * + * Note there is no .mfs file for this class. + */ +#define AMPERE_CHANNEL_GPFIFO_A (0x0000C56F) + +#define NVC56F_TYPEDEF AMPERE_CHANNELChannelGPFifoA + +/* dma flow control data structure */ +typedef volatile struct Nvc56fControl_struct { + NvU32 Ignored00[0x010]; /* 0000-003f*/ + NvU32 Put; /* put offset, read/write 0040-0043*/ + NvU32 Get; /* get offset, read only 0044-0047*/ + NvU32 Reference; /* reference value, read only 0048-004b*/ + NvU32 PutHi; /* high order put offset bits 004c-004f*/ + NvU32 Ignored01[0x002]; /* 0050-0057*/ + NvU32 TopLevelGet; /* top level get offset, read only 0058-005b*/ + NvU32 TopLevelGetHi; /* high order top level get bits 005c-005f*/ + NvU32 GetHi; /* high order get offset bits 0060-0063*/ + NvU32 Ignored02[0x007]; /* 0064-007f*/ + NvU32 Ignored03; /* used to be engine yield 0080-0083*/ + NvU32 Ignored04[0x001]; /* 0084-0087*/ + NvU32 GPGet; /* GP FIFO get offset, read only 0088-008b*/ + NvU32 GPPut; /* GP FIFO put offset 008c-008f*/ + NvU32 Ignored05[0x5c]; +} Nvc56fControl, AmpereAControlGPFifo; + +/* fields and values */ +#define NVC56F_NUMBER_OF_SUBCHANNELS (8) +#define NVC56F_SET_OBJECT (0x00000000) +#define NVC56F_SET_OBJECT_NVCLASS 15:0 +#define NVC56F_SET_OBJECT_ENGINE 20:16 +#define NVC56F_SET_OBJECT_ENGINE_SW 0x0000001f +#define NVC56F_ILLEGAL (0x00000004) +#define NVC56F_ILLEGAL_HANDLE 31:0 +#define NVC56F_NOP (0x00000008) +#define NVC56F_NOP_HANDLE 31:0 +#define NVC56F_SEMAPHOREA (0x00000010) +#define NVC56F_SEMAPHOREA_OFFSET_UPPER 7:0 +#define NVC56F_SEMAPHOREB (0x00000014) +#define NVC56F_SEMAPHOREB_OFFSET_LOWER 31:2 +#define NVC56F_SEMAPHOREC (0x00000018) +#define NVC56F_SEMAPHOREC_PAYLOAD 31:0 +#define NVC56F_SEMAPHORED (0x0000001C) +#define NVC56F_SEMAPHORED_OPERATION 4:0 +#define NVC56F_SEMAPHORED_OPERATION_ACQUIRE 0x00000001 +#define NVC56F_SEMAPHORED_OPERATION_RELEASE 0x00000002 +#define NVC56F_SEMAPHORED_OPERATION_ACQ_GEQ 0x00000004 +#define NVC56F_SEMAPHORED_OPERATION_ACQ_AND 0x00000008 +#define NVC56F_SEMAPHORED_OPERATION_REDUCTION 0x00000010 +#define NVC56F_SEMAPHORED_ACQUIRE_SWITCH 12:12 +#define NVC56F_SEMAPHORED_ACQUIRE_SWITCH_DISABLED 0x00000000 +#define NVC56F_SEMAPHORED_ACQUIRE_SWITCH_ENABLED 0x00000001 +#define NVC56F_SEMAPHORED_RELEASE_WFI 20:20 +#define NVC56F_SEMAPHORED_RELEASE_WFI_EN 0x00000000 +#define NVC56F_SEMAPHORED_RELEASE_WFI_DIS 0x00000001 +#define NVC56F_SEMAPHORED_RELEASE_SIZE 24:24 +#define NVC56F_SEMAPHORED_RELEASE_SIZE_16BYTE 0x00000000 +#define NVC56F_SEMAPHORED_RELEASE_SIZE_4BYTE 0x00000001 +#define NVC56F_SEMAPHORED_REDUCTION 30:27 +#define NVC56F_SEMAPHORED_REDUCTION_MIN 0x00000000 +#define NVC56F_SEMAPHORED_REDUCTION_MAX 0x00000001 +#define NVC56F_SEMAPHORED_REDUCTION_XOR 0x00000002 +#define NVC56F_SEMAPHORED_REDUCTION_AND 0x00000003 +#define NVC56F_SEMAPHORED_REDUCTION_OR 0x00000004 +#define NVC56F_SEMAPHORED_REDUCTION_ADD 0x00000005 +#define NVC56F_SEMAPHORED_REDUCTION_INC 0x00000006 +#define NVC56F_SEMAPHORED_REDUCTION_DEC 0x00000007 +#define NVC56F_SEMAPHORED_FORMAT 31:31 +#define NVC56F_SEMAPHORED_FORMAT_SIGNED 0x00000000 +#define NVC56F_SEMAPHORED_FORMAT_UNSIGNED 0x00000001 +#define NVC56F_NON_STALL_INTERRUPT (0x00000020) +#define NVC56F_NON_STALL_INTERRUPT_HANDLE 31:0 +#define NVC56F_FB_FLUSH (0x00000024) // Deprecated - use MEMBAR TYPE SYS_MEMBAR +#define NVC56F_FB_FLUSH_HANDLE 31:0 +// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for +// specifying the page address for a targeted TLB invalidate and the uTLB for +// a targeted REPLAY_CANCEL for UVM. +// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly +// rearranged fields. +#define NVC56F_MEM_OP_A (0x00000028) +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE 7:6 // only relevant for invalidates with NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE for invalidating link TLB only, or non-link TLB only or all TLBs +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_ALL_TLBS 0 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_LINK_TLBS 1 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_NON_LINK_TLBS 2 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_INVAL_SCOPE_RSVRVD 3 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000 +#define NVC56F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12 +#define NVC56F_MEM_OP_B (0x0000002c) +#define NVC56F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0 +#define NVC56F_MEM_OP_C (0x00000030) +#define NVC56F_MEM_OP_C_MEMBAR_TYPE 2:0 +#define NVC56F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000 +#define NVC56F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003 +#define NVC56F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE +#define NVC56F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0 +// MEM_OP_D MUST be preceded by MEM_OPs A-C. +#define NVC56F_MEM_OP_D (0x00000034) +#define NVC56F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE +#define NVC56F_MEM_OP_D_OPERATION 31:27 +#define NVC56F_MEM_OP_D_OPERATION_MEMBAR 0x00000005 +#define NVC56F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009 +#define NVC56F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a +#define NVC56F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d +#define NVC56F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e +// CLEAN_LINES is an alias for Tegra/GPU IP usage +#define NVC56F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e +#define NVC56F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f +#define NVC56F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010 +#define NVC56F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015 +#define NVC56F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001 +#define NVC56F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3 +#define NVC56F_SET_REFERENCE (0x00000050) +#define NVC56F_SET_REFERENCE_COUNT 31:0 +#define NVC56F_SEM_ADDR_LO (0x0000005c) +#define NVC56F_SEM_ADDR_LO_OFFSET 31:2 +#define NVC56F_SEM_ADDR_HI (0x00000060) +#define NVC56F_SEM_ADDR_HI_OFFSET 7:0 +#define NVC56F_SEM_PAYLOAD_LO (0x00000064) +#define NVC56F_SEM_PAYLOAD_LO_PAYLOAD 31:0 +#define NVC56F_SEM_PAYLOAD_HI (0x00000068) +#define NVC56F_SEM_PAYLOAD_HI_PAYLOAD 31:0 +#define NVC56F_SEM_EXECUTE (0x0000006c) +#define NVC56F_SEM_EXECUTE_OPERATION 2:0 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQUIRE 0x00000000 +#define NVC56F_SEM_EXECUTE_OPERATION_RELEASE 0x00000001 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_STRICT_GEQ 0x00000002 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_CIRC_GEQ 0x00000003 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_AND 0x00000004 +#define NVC56F_SEM_EXECUTE_OPERATION_ACQ_NOR 0x00000005 +#define NVC56F_SEM_EXECUTE_OPERATION_REDUCTION 0x00000006 +#define NVC56F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG 12:12 +#define NVC56F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_DIS 0x00000000 +#define NVC56F_SEM_EXECUTE_ACQUIRE_SWITCH_TSG_EN 0x00000001 +#define NVC56F_SEM_EXECUTE_RELEASE_WFI 20:20 +#define NVC56F_SEM_EXECUTE_RELEASE_WFI_DIS 0x00000000 +#define NVC56F_SEM_EXECUTE_RELEASE_WFI_EN 0x00000001 +#define NVC56F_SEM_EXECUTE_PAYLOAD_SIZE 24:24 +#define NVC56F_SEM_EXECUTE_PAYLOAD_SIZE_32BIT 0x00000000 +#define NVC56F_SEM_EXECUTE_PAYLOAD_SIZE_64BIT 0x00000001 +#define NVC56F_SEM_EXECUTE_RELEASE_TIMESTAMP 25:25 +#define NVC56F_SEM_EXECUTE_RELEASE_TIMESTAMP_DIS 0x00000000 +#define NVC56F_SEM_EXECUTE_RELEASE_TIMESTAMP_EN 0x00000001 +#define NVC56F_SEM_EXECUTE_REDUCTION 30:27 +#define NVC56F_SEM_EXECUTE_REDUCTION_IMIN 0x00000000 +#define NVC56F_SEM_EXECUTE_REDUCTION_IMAX 0x00000001 +#define NVC56F_SEM_EXECUTE_REDUCTION_IXOR 0x00000002 +#define NVC56F_SEM_EXECUTE_REDUCTION_IAND 0x00000003 +#define NVC56F_SEM_EXECUTE_REDUCTION_IOR 0x00000004 +#define NVC56F_SEM_EXECUTE_REDUCTION_IADD 0x00000005 +#define NVC56F_SEM_EXECUTE_REDUCTION_INC 0x00000006 +#define NVC56F_SEM_EXECUTE_REDUCTION_DEC 0x00000007 +#define NVC56F_SEM_EXECUTE_REDUCTION_FORMAT 31:31 +#define NVC56F_SEM_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000000 +#define NVC56F_SEM_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000001 +#define NVC56F_WFI (0x00000078) +#define NVC56F_WFI_SCOPE 0:0 +#define NVC56F_WFI_SCOPE_CURRENT_SCG_TYPE 0x00000000 +#define NVC56F_WFI_SCOPE_CURRENT_VEID 0x00000000 +#define NVC56F_WFI_SCOPE_ALL 0x00000001 +#define NVC56F_YIELD (0x00000080) +#define NVC56F_YIELD_OP 1:0 +#define NVC56F_YIELD_OP_NOP 0x00000000 +#define NVC56F_YIELD_OP_TSG 0x00000003 +#define NVC56F_CLEAR_FAULTED (0x00000084) +// Note: RM provides the HANDLE as an opaque value; the internal detail fields +// are intentionally not exposed to the driver through these defines. +#define NVC56F_CLEAR_FAULTED_HANDLE 30:0 +#define NVC56F_CLEAR_FAULTED_TYPE 31:31 +#define NVC56F_CLEAR_FAULTED_TYPE_PBDMA_FAULTED 0x00000000 +#define NVC56F_CLEAR_FAULTED_TYPE_ENG_FAULTED 0x00000001 + + +/* GPFIFO entry format */ +#define NVC56F_GP_ENTRY__SIZE 8 +#define NVC56F_GP_ENTRY0_FETCH 0:0 +#define NVC56F_GP_ENTRY0_FETCH_UNCONDITIONAL 0x00000000 +#define NVC56F_GP_ENTRY0_FETCH_CONDITIONAL 0x00000001 +#define NVC56F_GP_ENTRY0_GET 31:2 +#define NVC56F_GP_ENTRY0_OPERAND 31:0 +#define NVC56F_GP_ENTRY1_GET_HI 7:0 +#define NVC56F_GP_ENTRY1_LEVEL 9:9 +#define NVC56F_GP_ENTRY1_LEVEL_MAIN 0x00000000 +#define NVC56F_GP_ENTRY1_LEVEL_SUBROUTINE 0x00000001 +#define NVC56F_GP_ENTRY1_LENGTH 30:10 +#define NVC56F_GP_ENTRY1_SYNC 31:31 +#define NVC56F_GP_ENTRY1_SYNC_PROCEED 0x00000000 +#define NVC56F_GP_ENTRY1_SYNC_WAIT 0x00000001 +#define NVC56F_GP_ENTRY1_OPCODE 7:0 +#define NVC56F_GP_ENTRY1_OPCODE_NOP 0x00000000 +#define NVC56F_GP_ENTRY1_OPCODE_ILLEGAL 0x00000001 +#define NVC56F_GP_ENTRY1_OPCODE_GP_CRC 0x00000002 +#define NVC56F_GP_ENTRY1_OPCODE_PB_CRC 0x00000003 + +/* dma method formats */ +#define NVC56F_DMA_METHOD_ADDRESS_OLD 12:2 +#define NVC56F_DMA_METHOD_ADDRESS 11:0 +#define NVC56F_DMA_SUBDEVICE_MASK 15:4 +#define NVC56F_DMA_METHOD_SUBCHANNEL 15:13 +#define NVC56F_DMA_TERT_OP 17:16 +#define NVC56F_DMA_TERT_OP_GRP0_INC_METHOD (0x00000000) +#define NVC56F_DMA_TERT_OP_GRP0_SET_SUB_DEV_MASK (0x00000001) +#define NVC56F_DMA_TERT_OP_GRP0_STORE_SUB_DEV_MASK (0x00000002) +#define NVC56F_DMA_TERT_OP_GRP0_USE_SUB_DEV_MASK (0x00000003) +#define NVC56F_DMA_TERT_OP_GRP2_NON_INC_METHOD (0x00000000) +#define NVC56F_DMA_METHOD_COUNT_OLD 28:18 +#define NVC56F_DMA_METHOD_COUNT 28:16 +#define NVC56F_DMA_IMMD_DATA 28:16 +#define NVC56F_DMA_SEC_OP 31:29 +#define NVC56F_DMA_SEC_OP_GRP0_USE_TERT (0x00000000) +#define NVC56F_DMA_SEC_OP_INC_METHOD (0x00000001) +#define NVC56F_DMA_SEC_OP_GRP2_USE_TERT (0x00000002) +#define NVC56F_DMA_SEC_OP_NON_INC_METHOD (0x00000003) +#define NVC56F_DMA_SEC_OP_IMMD_DATA_METHOD (0x00000004) +#define NVC56F_DMA_SEC_OP_ONE_INC (0x00000005) +#define NVC56F_DMA_SEC_OP_RESERVED6 (0x00000006) +#define NVC56F_DMA_SEC_OP_END_PB_SEGMENT (0x00000007) +/* dma incrementing method format */ +#define NVC56F_DMA_INCR_ADDRESS 11:0 +#define NVC56F_DMA_INCR_SUBCHANNEL 15:13 +#define NVC56F_DMA_INCR_COUNT 28:16 +#define NVC56F_DMA_INCR_OPCODE 31:29 +#define NVC56F_DMA_INCR_OPCODE_VALUE (0x00000001) +#define NVC56F_DMA_INCR_DATA 31:0 +/* dma non-incrementing method format */ +#define NVC56F_DMA_NONINCR_ADDRESS 11:0 +#define NVC56F_DMA_NONINCR_SUBCHANNEL 15:13 +#define NVC56F_DMA_NONINCR_COUNT 28:16 +#define NVC56F_DMA_NONINCR_OPCODE 31:29 +#define NVC56F_DMA_NONINCR_OPCODE_VALUE (0x00000003) +#define NVC56F_DMA_NONINCR_DATA 31:0 +/* dma increment-once method format */ +#define NVC56F_DMA_ONEINCR_ADDRESS 11:0 +#define NVC56F_DMA_ONEINCR_SUBCHANNEL 15:13 +#define NVC56F_DMA_ONEINCR_COUNT 28:16 +#define NVC56F_DMA_ONEINCR_OPCODE 31:29 +#define NVC56F_DMA_ONEINCR_OPCODE_VALUE (0x00000005) +#define NVC56F_DMA_ONEINCR_DATA 31:0 +/* dma no-operation format */ +#define NVC56F_DMA_NOP (0x00000000) +/* dma immediate-data format */ +#define NVC56F_DMA_IMMD_ADDRESS 11:0 +#define NVC56F_DMA_IMMD_SUBCHANNEL 15:13 +#define NVC56F_DMA_IMMD_DATA 28:16 +#define NVC56F_DMA_IMMD_OPCODE 31:29 +#define NVC56F_DMA_IMMD_OPCODE_VALUE (0x00000004) +/* dma set sub-device mask format */ +#define NVC56F_DMA_SET_SUBDEVICE_MASK_VALUE 15:4 +#define NVC56F_DMA_SET_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC56F_DMA_SET_SUBDEVICE_MASK_OPCODE_VALUE (0x00000001) +/* dma store sub-device mask format */ +#define NVC56F_DMA_STORE_SUBDEVICE_MASK_VALUE 15:4 +#define NVC56F_DMA_STORE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC56F_DMA_STORE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000002) +/* dma use sub-device mask format */ +#define NVC56F_DMA_USE_SUBDEVICE_MASK_OPCODE 31:16 +#define NVC56F_DMA_USE_SUBDEVICE_MASK_OPCODE_VALUE (0x00000003) +/* dma end-segment format */ +#define NVC56F_DMA_ENDSEG_OPCODE 31:29 +#define NVC56F_DMA_ENDSEG_OPCODE_VALUE (0x00000007) +/* dma legacy incrementing/non-incrementing formats */ +#define NVC56F_DMA_ADDRESS 12:2 +#define NVC56F_DMA_SUBCH 15:13 +#define NVC56F_DMA_OPCODE3 17:16 +#define NVC56F_DMA_OPCODE3_NONE (0x00000000) +#define NVC56F_DMA_COUNT 28:18 +#define NVC56F_DMA_OPCODE 31:29 +#define NVC56F_DMA_OPCODE_METHOD (0x00000000) +#define NVC56F_DMA_OPCODE_NONINC_METHOD (0x00000002) +#define NVC56F_DMA_DATA 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc56f_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc56fsw.h b/src/common/sdk/nvidia/inc/class/clc56fsw.h new file mode 100644 index 000000000..38ef41244 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc56fsw.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* This file is *not* auto-generated. */ + +#ifndef _clc56f_sw_h_ +#define _clc56f_sw_h_ + +#define NVC56F_NOTIFIERS_RC (0) +#define NVC56F_NOTIFIERS_REFCNT (1) +#define NVC56F_NOTIFIERS_NONSTALL (2) +#define NVC56F_NOTIFIERS_EVENTBUFFER (3) +#define NVC56F_NOTIFIERS_IDLECHANNEL (4) +#define NVC56F_NOTIFIERS_ENDCTX (5) +#define NVC56F_NOTIFIERS_SW (6) +#define NVC56F_NOTIFIERS_GR_DEBUG_INTR (7) +#define NVC56F_NOTIFIERS_REPLAYABLE_FAULT (8) +#define NVC56F_NOTIFIERS_MAXCOUNT (9) + +/* NvNotification[] fields and values */ +#define NVC56F_NOTIFICATION_STATUS_ERROR_BAD_ARGUMENT (0x2000) +#define NVC56F_NOTIFICATION_STATUS_ERROR_PROTECTION_FAULT (0x4000) + +#endif /* _clc56f_sw_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc570.h b/src/common/sdk/nvidia/inc/class/clc570.h new file mode 100644 index 000000000..12d200f47 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc570.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc570_h_ +#define _clc570_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#include "class/cl5070.h" + +#define NVC570_DISPLAY (0x0000C570) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numPiors; // Number of PIORs in this chip/display +} NVC570_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc570_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc572.h b/src/common/sdk/nvidia/inc/class/clc572.h new file mode 100644 index 000000000..79dd9b3d4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc572.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc572_h_ +#define _clc572_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define PHYSICAL_CHANNEL_GPFIFO (0x0000C572) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc572_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc573.h b/src/common/sdk/nvidia/inc/class/clc573.h new file mode 100644 index 000000000..a7bb1f453 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc573.h @@ -0,0 +1,598 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc573_h_ +#define _clc573_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC573_DISP_CAPABILITIES 0xC573 + +typedef volatile struct _clc573_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvC573DispCapabilities,NvC573DispCapabilities_Map ; + + +#define NVC573_SYS_CAP 0x0 /* RW-4R */ +#define NVC573_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC573_SYS_CAP_HEAD7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC573_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC573_SYS_CAP_HEAD_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC573_SYS_CAP_SOR0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC573_SYS_CAP_SOR1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC573_SYS_CAP_SOR2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC573_SYS_CAP_SOR3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC573_SYS_CAP_SOR4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC573_SYS_CAP_SOR5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC573_SYS_CAP_SOR6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC573_SYS_CAP_SOR7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC573_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC573_SYS_CAP_SOR_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB 0x4 /* RW-4R */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS 0:0 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS 1:1 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS 2:2 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS 3:3 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS 4:4 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS 5:5 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS 6:6 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS 7:7 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS 8:8 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW8_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS 9:9 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW9_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS 10:10 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW10_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS 11:11 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW11_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS 12:12 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW12_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS 13:13 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW13_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS 14:14 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW14_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS 15:15 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW15_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS 16:16 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW16_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS 17:17 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW17_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS 18:18 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW18_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS 19:19 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW19_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS 20:20 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW20_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS 21:21 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW21_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS 22:22 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW22_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS 23:23 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW23_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS 24:24 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW24_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS 25:25 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW25_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS 26:26 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW26_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS 27:27 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW27_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS 28:28 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW28_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS 29:29 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW29_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS 30:30 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW30_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS 31:31 /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW31_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS__SIZE_1 32 /* */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC573_SYS_CAPB_WINDOW_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA 0x10 /* RW-4R */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES 15:0 /* RWIUF */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH 17:16 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_32B 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_64B 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_128B 0x00000002 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_256B 0x00000003 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR 19:19 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA 20:20 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION 21:21 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG 22:22 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MSCG_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH 23:23 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT 26:26 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION 31:30 /* RWIVF */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_INIT 0x00000000 /* RWI-V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_32B 0x00000000 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_64B 0x00000001 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_128B 0x00000002 /* RW--V */ +#define NVC573_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_256B 0x00000003 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA(i) (0x680+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_FULL_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_UNIT_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT 16:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT 17:17 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT 18:18 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT 19:19 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OLPF_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT 20:20 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT 21:21 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT 22:22 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB(i) (0x684+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA 0:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_VGA_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC(i) (0x688+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_PRECISION 4:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_PRECISION 12:8 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP 13:13 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_SF_PRECISION 20:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_SF_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_CI_PRECISION 24:21 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_CI_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB 25:25 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD(i) (0x68c+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE(i) (0x690+(i)*32) /* RW-4A */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE__SIZE_1 8 /* */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_RATEBUFSIZE 3:0 /* RWIUF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_RATEBUFSIZE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_LINEBUFSIZE 13:8 /* RWIUF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_LINEBUFSIZE_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422 16:16 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_INIT 0x00000000 /* RWI-V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420 17:17 /* RWIVF */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_TRUE 0x00000001 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_FALSE 0x00000000 /* RW--V */ +#define NVC573_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC573_SOR_CAP__SIZE_1 8 /* */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC573_SOR_CAP_DUAL_LVDS_18_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC573_SOR_CAP_DUAL_LVDS_24_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC573_SOR_CAP_DUAL_TMDS_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC573_SOR_CAP_SDI_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC573_SOR_CAP_DP_A_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC573_SOR_CAP_DP_B_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC573_SOR_CAP_DP_INTERLACE_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC573_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC573_SOR_CAP_DP_8_LANES_INIT 0x00000000 /* RWI-V */ +#define NVC573_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC573_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA(i) (0x780+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH 13:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT 16:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT 17:17 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT 18:18 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT 19:19 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT 20:20 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT 22:22 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT 23:23 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT 24:24 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB(i) (0x784+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC(i) (0x788+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION 20:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD(i) (0x78c+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ 3:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR 6:4 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD 8:8 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT 9:9 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION 16:12 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION 20:17 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA 22:22 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE(i) (0x790+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION 4:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGSZ_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGNR 12:10 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGNR_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT 15:15 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION 20:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF(i) (0x794+(i)*32) /* RW-4A */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF__SIZE_1 32 /* */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP_INIT 0x00000000 /* RWI-V */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC573_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP_INIT 0x00000000 /* RWI-V */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc573_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc574.h b/src/common/sdk/nvidia/inc/class/clc574.h new file mode 100644 index 000000000..6df8d0320 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc574.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc574_h_ +#define _clc574_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define UVM_CHANNEL_RETAINER (0x0000C574) + +typedef struct +{ + NvHandle hClient; + NvHandle hChannel; +}NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc574_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc57a.h b/src/common/sdk/nvidia/inc/class/clc57a.h new file mode 100644 index 000000000..54eb7416e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc57a.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 1993-2020, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc57a__h_ +#define _clc57a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57A_CURSOR_IMM_CHANNEL_PIO (0x0000C57A) + +typedef volatile struct _clc57a_tag0 { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x7D]; + NvV32 Update; // 0x00000200 - 0x00000203 + NvV32 SetInterlockFlags; // 0x00000204 - 0x00000207 + NvV32 SetCursorHotSpotPointOut[2]; // 0x00000208 - 0x0000020F + NvV32 SetWindowInterlockFlags; // 0x00000210 - 0x00000213 + NvV32 Reserved02[0x37B]; +} NVC57ADispCursorImmControlPio; + +#define NVC57A_FREE (0x00000008) +#define NVC57A_FREE_COUNT 5:0 +#define NVC57A_UPDATE (0x00000200) +#define NVC57A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC57A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC57A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC57A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVC57A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC57A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc57a_h + diff --git a/src/common/sdk/nvidia/inc/class/clc57b.h b/src/common/sdk/nvidia/inc/class/clc57b.h new file mode 100644 index 000000000..545ccc5e7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc57b.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC57b_h_ +#define _clC57b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57B_WINDOW_IMM_CHANNEL_DMA (0x0000C57B) + +// dma opcode instructions +#define NVC57B_DMA +#define NVC57B_DMA_OPCODE 31:29 +#define NVC57B_DMA_OPCODE_METHOD 0x00000000 +#define NVC57B_DMA_OPCODE_JUMP 0x00000001 +#define NVC57B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC57B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC57B_DMA_METHOD_COUNT 27:18 +#define NVC57B_DMA_METHOD_OFFSET 13:2 +#define NVC57B_DMA_DATA 31:0 +#define NVC57B_DMA_DATA_NOP 0x00000000 +#define NVC57B_DMA_JUMP_OFFSET 11:2 +#define NVC57B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC57B_PUT (0x00000000) +#define NVC57B_PUT_PTR 9:0 +#define NVC57B_GET (0x00000004) +#define NVC57B_GET_PTR 9:0 +#define NVC57B_UPDATE (0x00000200) +#define NVC57B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVC57B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC57B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC57B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC57B_SET_POINT_OUT_X 15:0 +#define NVC57B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC57b_h diff --git a/src/common/sdk/nvidia/inc/class/clc57d.h b/src/common/sdk/nvidia/inc/class/clc57d.h new file mode 100644 index 000000000..4f415d0ef --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc57d.h @@ -0,0 +1,1277 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC57d_h_ +#define _clC57d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57D_CORE_CHANNEL_DMA (0x0000C57D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC57D_DMA +#define NVC57D_DMA_OPCODE 31:29 +#define NVC57D_DMA_OPCODE_METHOD 0x00000000 +#define NVC57D_DMA_OPCODE_JUMP 0x00000001 +#define NVC57D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC57D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC57D_DMA_METHOD_COUNT 27:18 +#define NVC57D_DMA_METHOD_OFFSET 13:2 +#define NVC57D_DMA_DATA 31:0 +#define NVC57D_DMA_DATA_NOP 0x00000000 +#define NVC57D_DMA_JUMP_OFFSET 11:2 +#define NVC57D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC57D_PUT (0x00000000) +#define NVC57D_PUT_PTR 9:0 +#define NVC57D_GET (0x00000004) +#define NVC57D_GET_PTR 9:0 +#define NVC57D_UPDATE (0x00000200) +#define NVC57D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC57D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC57D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC57D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC57D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC57D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC57D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC57D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC57D_UPDATE_RELEASE_ELV 0:0 +#define NVC57D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC57D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC57D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC57D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC57D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC57D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC57D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC57D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC57D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC57D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC57D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC57D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL (0x00000210) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC57D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC57D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC57D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC57D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC57D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC57D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC57D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC57D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC57D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC57D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC57D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC57D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC57D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC57D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC57D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC57D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC57D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_DSI (0x0000000A) +#define NVC57D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC57D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC57D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC57D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC57D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC57D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC57D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC57D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC57D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC57D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC57D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC57D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) + +#define NVC57D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC57D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC57D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVC57D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC57D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC57D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC57D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC57D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC57D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC57D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC57D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC57D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC57D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVC57D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC57D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC57D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC57D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC57D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC57D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC57D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC57D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC57D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC57D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC57D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC57D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC57D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC57D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC57D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC57D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC57D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC57D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC57D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC57D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC57D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC57D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC57D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC57D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC57D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC57D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC57D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC57D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC57D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC57D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC57D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC57D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC57D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC57D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC57D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC57D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC57D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC57D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC57D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC57D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC57D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC57D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC57D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC57D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC57D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(a) (0x00002214 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC57D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC57D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC57D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(a) (0x00002220 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_ALPHA 7:0 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_RED 31:16 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(a) (0x00002224 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_GREEN 15:0 +#define NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_BLUE 31:16 +#define NVC57D_HEAD_SET_CURSOR_COLOR_NORM_SCALE(a) (0x00002228 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CURSOR_COLOR_NORM_SCALE_VALUE 15:0 +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR(a) (0x0000222C + (a)*0x00000400) +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR_LOG2PEAK_LUMINANCE 3:0 +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR_S1 16:4 +#define NVC57D_HEAD_SET_XOR_BLEND_FACTOR_S2 30:18 +#define NVC57D_HEAD_SET_CLAMP_RANGE_GREEN(a) (0x00002238 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CLAMP_RANGE_GREEN_LOW 11:0 +#define NVC57D_HEAD_SET_CLAMP_RANGE_GREEN_HIGH 27:16 +#define NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE(a) (0x0000223C + (a)*0x00000400) +#define NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE_LOW 11:0 +#define NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE_HIGH 27:16 +#define NVC57D_HEAD_SET_OCSC0CONTROL(a) (0x00002240 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_OCSC0CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OCSC0CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C00(a) (0x00002244 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C00_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C01(a) (0x00002248 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C01_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C02(a) (0x0000224C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C02_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C03(a) (0x00002250 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C03_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C10(a) (0x00002254 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C10_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C11(a) (0x00002258 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C11_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C12(a) (0x0000225C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C12_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C13(a) (0x00002260 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C13_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C20(a) (0x00002264 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C20_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C21(a) (0x00002268 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C21_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C22(a) (0x0000226C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C22_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C23(a) (0x00002270 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC0COEFFICIENT_C23_VALUE 20:0 +#define NVC57D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC57D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC57D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 +#define NVC57D_HEAD_SET_CONTEXT_DMA_OLUT(a) (0x00002288 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTEXT_DMA_OLUT_HANDLE 31:0 +#define NVC57D_HEAD_SET_OFFSET_OLUT(a) (0x0000228C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OFFSET_OLUT_ORIGIN 31:0 +#define NVC57D_HEAD_SET_OCSC1CONTROL(a) (0x0000229C + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_OCSC1CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_OCSC1CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C00(a) (0x000022A0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C00_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C01(a) (0x000022A4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C01_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C02(a) (0x000022A8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C02_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C03(a) (0x000022AC + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C03_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C10(a) (0x000022B0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C10_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C11(a) (0x000022B4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C11_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C12(a) (0x000022B8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C12_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C13(a) (0x000022BC + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C13_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C20(a) (0x000022C0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C20_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C21(a) (0x000022C4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C21_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C22(a) (0x000022C8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C22_VALUE 20:0 +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C23(a) (0x000022CC + (a)*0x00000400) +#define NVC57D_HEAD_SET_OCSC1COEFFICIENT_C23_VALUE 20:0 +#define NVC57D_HEAD_SET_TILE_POSITION(a) (0x000022D0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_TILE_POSITION_X 2:0 +#define NVC57D_HEAD_SET_TILE_POSITION_Y 6:4 +#define NVC57D_HEAD_SET_DSC_CONTROL(a) (0x000022D4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_DSC_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE 2:1 +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_SINGLE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_DUAL (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_QUAD (0x00000002) +#define NVC57D_HEAD_SET_DSC_CONTROL_MODE_DROP (0x00000003) +#define NVC57D_HEAD_SET_DSC_CONTROL_AUTO_RESET 3:3 +#define NVC57D_HEAD_SET_DSC_CONTROL_AUTO_RESET_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_AUTO_RESET_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION 4:4 +#define NVC57D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_ENABLE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET 5:5 +#define NVC57D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_FALSE (0x00000000) +#define NVC57D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_TRUE (0x00000001) +#define NVC57D_HEAD_SET_DSC_CONTROL_FLATNESS_DET_THRESH 15:6 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL(a) (0x000022D8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_ENABLE 0:0 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_LOCATION 1:1 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_SIZE 9:2 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY 10:10 +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC57D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC57D_HEAD_SET_DSC_PPS_HEAD(a) (0x000022DC + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE0 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE1 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE2 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_HEAD_BYTE3 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0(a) (0x000022E0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MINOR 3:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MAJOR 7:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_PPS_IDENTIFIER 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_RESERVED 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_LINEBUF_DEPTH 27:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA0_BITS_PER_COMPONENT 31:28 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1(a) (0x000022E4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_HIGH 1:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_VBR_ENABLE 2:2 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_SIMPLE422 3:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_CONVERT_RGB 4:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_BLOCK_PRED_ENABLE 5:5 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_RESERVED 7:6 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2(a) (0x000022E8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3(a) (0x000022EC + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4(a) (0x000022F0 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_HIGH 1:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_RESERVED 7:2 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5(a) (0x000022F4 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_RESERVED0 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_INITIAL_SCALE_VALUE 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_RESERVED1 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6(a) (0x000022F8 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_HIGH 3:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_RESERVED0 7:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_RESERVED1 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_FIRST_LINE_BPG_OFFSET 28:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA6_RESERVED2 31:29 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7(a) (0x000022FC + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8(a) (0x00002300 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9(a) (0x00002304 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MIN_QP 4:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RESERVED0 7:5 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MAX_QP 12:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RESERVED1 15:13 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_LOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10(a) (0x00002308 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_EDGE_FACTOR 3:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RESERVED0 7:4 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT0 12:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RESERVED1 15:13 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT1 20:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RESERVED2 23:21 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_LO 27:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_HI 31:28 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11(a) (0x0000230C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH0 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH1 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH2 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH3 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12(a) (0x00002310 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH4 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH5 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH6 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH7 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13(a) (0x00002314 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH8 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH9 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH10 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH11 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14(a) (0x00002318 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH12 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH13 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_HIGH0 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MIN_QP0 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_BPG_OFFSET0 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_LOW0 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15(a) (0x0000231C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH1 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP1 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET1 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW1 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH2 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP2 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET2 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW2 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16(a) (0x00002320 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH3 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP3 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET3 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW3 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH4 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP4 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET4 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW4 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17(a) (0x00002324 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH5 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP5 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET5 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW5 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH6 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP6 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET6 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW6 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18(a) (0x00002328 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH7 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP7 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET7 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW7 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH8 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP8 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET8 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW8 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19(a) (0x0000232C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH9 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP9 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET9 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW9 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH10 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP10 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET10 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW10 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20(a) (0x00002330 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH11 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP11 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET11 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW11 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH12 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP12 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET12 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW12 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21(a) (0x00002334 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH13 2:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP13 7:3 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET13 13:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW13 15:14 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH14 18:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP14 23:19 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET14 29:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW14 31:30 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22(a) (0x00002338 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NATIVE422 0:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NATIVE420 1:1 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_RESERVED0 7:2 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_SECOND_LINE_BPG_OFFSET 12:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_RESERVED1 15:13 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSET_HIGH 23:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSETLOW 31:24 +#define NVC57D_HEAD_SET_DSC_PPS_DATA23(a) (0x0000233C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_HIGH 7:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_LOW 15:8 +#define NVC57D_HEAD_SET_DSC_PPS_DATA23_RESERVED 31:16 +#define NVC57D_HEAD_SET_DSC_PPS_DATA24(a) (0x00002340 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA24_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA25(a) (0x00002344 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA25_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA26(a) (0x00002348 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA26_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA27(a) (0x0000234C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA27_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA28(a) (0x00002350 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA28_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA29(a) (0x00002354 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA29_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA30(a) (0x00002358 + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA30_RESERVED 31:0 +#define NVC57D_HEAD_SET_DSC_PPS_DATA31(a) (0x0000235C + (a)*0x00000400) +#define NVC57D_HEAD_SET_DSC_PPS_DATA31_RESERVED 31:0 +#define NVC57D_HEAD_SET_RG_MERGE(a) (0x00002360 + (a)*0x00000400) +#define NVC57D_HEAD_SET_RG_MERGE_MODE 1:0 +#define NVC57D_HEAD_SET_RG_MERGE_MODE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_RG_MERGE_MODE_SETUP (0x00000001) +#define NVC57D_HEAD_SET_RG_MERGE_MODE_MASTER (0x00000002) +#define NVC57D_HEAD_SET_RG_MERGE_MODE_SLAVE (0x00000003) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC57d_h diff --git a/src/common/sdk/nvidia/inc/class/clc57e.h b/src/common/sdk/nvidia/inc/class/clc57e.h new file mode 100644 index 000000000..d61341085 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc57e.h @@ -0,0 +1,657 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clC57e_h_ +#define _clC57e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC57E_WINDOW_CHANNEL_DMA (0x0000C57E) + +// dma opcode instructions +#define NVC57E_DMA +#define NVC57E_DMA_OPCODE 31:29 +#define NVC57E_DMA_OPCODE_METHOD 0x00000000 +#define NVC57E_DMA_OPCODE_JUMP 0x00000001 +#define NVC57E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC57E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC57E_DMA_METHOD_COUNT 27:18 +#define NVC57E_DMA_METHOD_OFFSET 13:2 +#define NVC57E_DMA_DATA 31:0 +#define NVC57E_DMA_DATA_NOP 0x00000000 +#define NVC57E_DMA_JUMP_OFFSET 11:2 +#define NVC57E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC57E_PUT (0x00000000) +#define NVC57E_PUT_PTR 9:0 +#define NVC57E_GET (0x00000004) +#define NVC57E_GET_PTR 9:0 +#define NVC57E_UPDATE (0x00000200) +#define NVC57E_UPDATE_RELEASE_ELV 0:0 +#define NVC57E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC57E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC57E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC57E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC57E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC57E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC57E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC57E_GET_LINE (0x00000208) +#define NVC57E_GET_LINE_LINE 15:0 +#define NVC57E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC57E_SET_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC57E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC57E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC57E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC57E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC57E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218) +#define NVC57E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NVC57E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C) +#define NVC57E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC57E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC57E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC57E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC57E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC57E_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC57E_SET_SIZE (0x00000224) +#define NVC57E_SET_SIZE_WIDTH 15:0 +#define NVC57E_SET_SIZE_HEIGHT 31:16 +#define NVC57E_SET_STORAGE (0x00000228) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC57E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC57E_SET_STORAGE_MEMORY_LAYOUT 4:4 +#define NVC57E_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC57E_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC57E_SET_PARAMS (0x0000022C) +#define NVC57E_SET_PARAMS_FORMAT 7:0 +#define NVC57E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC57E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC57E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC57E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC57E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC57E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC57E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC57E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC57E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC57E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC57E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC57E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) +#define NVC57E_SET_PARAMS_FORMAT_X2BL10GL10RL10_XVYCC (0x00000024) +#define NVC57E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC57E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC57E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC57E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC57E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC57E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC57E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC57E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC57E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC57E_SET_PARAMS_SWAP_UV 19:19 +#define NVC57E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC57E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE 22:22 +#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000) +#define NVC57E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001) +#define NVC57E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC57E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC57E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004) +#define NVC57E_SET_CONTEXT_DMA_ISO_HANDLE 31:0 +#define NVC57E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004) +#define NVC57E_SET_OFFSET_ORIGIN 31:0 +#define NVC57E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC57E_SET_POINT_IN_X 15:0 +#define NVC57E_SET_POINT_IN_Y 31:16 +#define NVC57E_SET_SIZE_IN (0x00000298) +#define NVC57E_SET_SIZE_IN_WIDTH 15:0 +#define NVC57E_SET_SIZE_IN_HEIGHT 31:16 +#define NVC57E_SET_SIZE_OUT (0x000002A4) +#define NVC57E_SET_SIZE_OUT_WIDTH 15:0 +#define NVC57E_SET_SIZE_OUT_HEIGHT 31:16 +#define NVC57E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC57E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC57E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC57E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC57E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC57E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC57E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC57E_SET_COMPOSITION_CONTROL_BYPASS 16:16 +#define NVC57E_SET_COMPOSITION_CONTROL_BYPASS_DISABLE (0x00000000) +#define NVC57E_SET_COMPOSITION_CONTROL_BYPASS_ENABLE (0x00000001) +#define NVC57E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC57E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC57E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC57E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC57E_SET_KEY_ALPHA (0x000002F8) +#define NVC57E_SET_KEY_ALPHA_MIN 15:0 +#define NVC57E_SET_KEY_ALPHA_MAX 31:16 +#define NVC57E_SET_KEY_RED_CR (0x000002FC) +#define NVC57E_SET_KEY_RED_CR_MIN 15:0 +#define NVC57E_SET_KEY_RED_CR_MAX 31:16 +#define NVC57E_SET_KEY_GREEN_Y (0x00000300) +#define NVC57E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC57E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC57E_SET_KEY_BLUE_CB (0x00000304) +#define NVC57E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC57E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC57E_SET_PRESENT_CONTROL (0x00000308) +#define NVC57E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC57E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC57E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC57E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC57E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC57E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC57E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC57E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC57E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC57E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC57E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC57E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC57E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC57E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC57E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL (0x00000398) +#define NVC57E_SET_EXT_PACKET_CONTROL_ENABLE 0:0 +#define NVC57E_SET_EXT_PACKET_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_LOCATION 4:4 +#define NVC57E_SET_EXT_PACKET_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_FREQUENCY 8:8 +#define NVC57E_SET_EXT_PACKET_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE 12:12 +#define NVC57E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_DISABLE (0x00000000) +#define NVC57E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_ENABLE (0x00000001) +#define NVC57E_SET_EXT_PACKET_CONTROL_SIZE 27:16 +#define NVC57E_SET_EXT_PACKET_DATA (0x0000039C) +#define NVC57E_SET_EXT_PACKET_DATA_DB0 7:0 +#define NVC57E_SET_EXT_PACKET_DATA_DB1 15:8 +#define NVC57E_SET_EXT_PACKET_DATA_DB2 23:16 +#define NVC57E_SET_EXT_PACKET_DATA_DB3 31:24 +#define NVC57E_SET_FMT_COEFFICIENT_C00 (0x00000400) +#define NVC57E_SET_FMT_COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C01 (0x00000404) +#define NVC57E_SET_FMT_COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C02 (0x00000408) +#define NVC57E_SET_FMT_COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C03 (0x0000040C) +#define NVC57E_SET_FMT_COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C10 (0x00000410) +#define NVC57E_SET_FMT_COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C11 (0x00000414) +#define NVC57E_SET_FMT_COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C12 (0x00000418) +#define NVC57E_SET_FMT_COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C13 (0x0000041C) +#define NVC57E_SET_FMT_COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C20 (0x00000420) +#define NVC57E_SET_FMT_COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C21 (0x00000424) +#define NVC57E_SET_FMT_COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C22 (0x00000428) +#define NVC57E_SET_FMT_COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_FMT_COEFFICIENT_C23 (0x0000042C) +#define NVC57E_SET_FMT_COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_ILUT_CONTROL (0x00000440) +#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_ILUT_CONTROL_MIRROR 1:1 +#define NVC57E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57E_SET_ILUT_CONTROL_MODE 3:2 +#define NVC57E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC57E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC57E_SET_ILUT_CONTROL_SIZE 18:8 +#define NVC57E_SET_CONTEXT_DMA_ILUT (0x00000444) +#define NVC57E_SET_CONTEXT_DMA_ILUT_HANDLE 31:0 +#define NVC57E_SET_OFFSET_ILUT (0x00000448) +#define NVC57E_SET_OFFSET_ILUT_ORIGIN 31:0 +#define NVC57E_SET_CSC00CONTROL (0x0000045C) +#define NVC57E_SET_CSC00CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC00CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC00CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC00COEFFICIENT_C00 (0x00000460) +#define NVC57E_SET_CSC00COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C01 (0x00000464) +#define NVC57E_SET_CSC00COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C02 (0x00000468) +#define NVC57E_SET_CSC00COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C03 (0x0000046C) +#define NVC57E_SET_CSC00COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C10 (0x00000470) +#define NVC57E_SET_CSC00COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C11 (0x00000474) +#define NVC57E_SET_CSC00COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C12 (0x00000478) +#define NVC57E_SET_CSC00COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C13 (0x0000047C) +#define NVC57E_SET_CSC00COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C20 (0x00000480) +#define NVC57E_SET_CSC00COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C21 (0x00000484) +#define NVC57E_SET_CSC00COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C22 (0x00000488) +#define NVC57E_SET_CSC00COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC00COEFFICIENT_C23 (0x0000048C) +#define NVC57E_SET_CSC00COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_CSC0LUT_CONTROL (0x000004A0) +#define NVC57E_SET_CSC0LUT_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_CSC0LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_CSC0LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_CSC0LUT_CONTROL_MIRROR 1:1 +#define NVC57E_SET_CSC0LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57E_SET_CSC0LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57E_SET_CSC0LUT_CONTROL_ENABLE 4:4 +#define NVC57E_SET_CSC0LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC0LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC0LUT_SEGMENT_SIZE (0x000004A4) +#define NVC57E_SET_CSC0LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC57E_SET_CSC0LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC57E_SET_CSC0LUT_ENTRY (0x000004A8) +#define NVC57E_SET_CSC0LUT_ENTRY_IDX 10:0 +#define NVC57E_SET_CSC0LUT_ENTRY_VALUE 31:16 +#define NVC57E_SET_CSC01CONTROL (0x000004BC) +#define NVC57E_SET_CSC01CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC01CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC01CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC01COEFFICIENT_C00 (0x000004C0) +#define NVC57E_SET_CSC01COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C01 (0x000004C4) +#define NVC57E_SET_CSC01COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C02 (0x000004C8) +#define NVC57E_SET_CSC01COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C03 (0x000004CC) +#define NVC57E_SET_CSC01COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C10 (0x000004D0) +#define NVC57E_SET_CSC01COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C11 (0x000004D4) +#define NVC57E_SET_CSC01COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C12 (0x000004D8) +#define NVC57E_SET_CSC01COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C13 (0x000004DC) +#define NVC57E_SET_CSC01COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C20 (0x000004E0) +#define NVC57E_SET_CSC01COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C21 (0x000004E4) +#define NVC57E_SET_CSC01COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C22 (0x000004E8) +#define NVC57E_SET_CSC01COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC01COEFFICIENT_C23 (0x000004EC) +#define NVC57E_SET_CSC01COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_TMO_CONTROL (0x00000500) +#define NVC57E_SET_TMO_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_TMO_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_TMO_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_TMO_CONTROL_SAT_MODE 3:2 +#define NVC57E_SET_TMO_CONTROL_SIZE 18:8 +#define NVC57E_SET_TMO_LOW_INTENSITY_ZONE (0x00000508) +#define NVC57E_SET_TMO_LOW_INTENSITY_ZONE_END 29:16 +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE (0x0000050C) +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC57E_SET_TMO_LOW_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_ZONE (0x00000510) +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_ZONE_START 13:0 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_ZONE_END 29:16 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE (0x00000514) +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC57E_SET_TMO_MEDIUM_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC57E_SET_TMO_HIGH_INTENSITY_ZONE (0x00000518) +#define NVC57E_SET_TMO_HIGH_INTENSITY_ZONE_START 13:0 +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE (0x0000051C) +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC57E_SET_TMO_HIGH_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC57E_SET_CONTEXT_DMA_TMO_LUT (0x00000528) +#define NVC57E_SET_CONTEXT_DMA_TMO_LUT_HANDLE 31:0 +#define NVC57E_SET_OFFSET_TMO_LUT (0x0000052C) +#define NVC57E_SET_OFFSET_TMO_LUT_ORIGIN 31:0 +#define NVC57E_SET_CSC10CONTROL (0x0000053C) +#define NVC57E_SET_CSC10CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC10CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC10CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC10COEFFICIENT_C00 (0x00000540) +#define NVC57E_SET_CSC10COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C01 (0x00000544) +#define NVC57E_SET_CSC10COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C02 (0x00000548) +#define NVC57E_SET_CSC10COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C03 (0x0000054C) +#define NVC57E_SET_CSC10COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C10 (0x00000550) +#define NVC57E_SET_CSC10COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C11 (0x00000554) +#define NVC57E_SET_CSC10COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C12 (0x00000558) +#define NVC57E_SET_CSC10COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C13 (0x0000055C) +#define NVC57E_SET_CSC10COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C20 (0x00000560) +#define NVC57E_SET_CSC10COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C21 (0x00000564) +#define NVC57E_SET_CSC10COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C22 (0x00000568) +#define NVC57E_SET_CSC10COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC10COEFFICIENT_C23 (0x0000056C) +#define NVC57E_SET_CSC10COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_CSC1LUT_CONTROL (0x00000580) +#define NVC57E_SET_CSC1LUT_CONTROL_INTERPOLATE 0:0 +#define NVC57E_SET_CSC1LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC57E_SET_CSC1LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC57E_SET_CSC1LUT_CONTROL_MIRROR 1:1 +#define NVC57E_SET_CSC1LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC57E_SET_CSC1LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC57E_SET_CSC1LUT_CONTROL_ENABLE 4:4 +#define NVC57E_SET_CSC1LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC1LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC1LUT_SEGMENT_SIZE (0x00000584) +#define NVC57E_SET_CSC1LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC57E_SET_CSC1LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC57E_SET_CSC1LUT_ENTRY (0x00000588) +#define NVC57E_SET_CSC1LUT_ENTRY_IDX 10:0 +#define NVC57E_SET_CSC1LUT_ENTRY_VALUE 31:16 +#define NVC57E_SET_CSC11CONTROL (0x0000059C) +#define NVC57E_SET_CSC11CONTROL_ENABLE 0:0 +#define NVC57E_SET_CSC11CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC57E_SET_CSC11CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC57E_SET_CSC11COEFFICIENT_C00 (0x000005A0) +#define NVC57E_SET_CSC11COEFFICIENT_C00_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C01 (0x000005A4) +#define NVC57E_SET_CSC11COEFFICIENT_C01_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C02 (0x000005A8) +#define NVC57E_SET_CSC11COEFFICIENT_C02_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C03 (0x000005AC) +#define NVC57E_SET_CSC11COEFFICIENT_C03_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C10 (0x000005B0) +#define NVC57E_SET_CSC11COEFFICIENT_C10_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C11 (0x000005B4) +#define NVC57E_SET_CSC11COEFFICIENT_C11_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C12 (0x000005B8) +#define NVC57E_SET_CSC11COEFFICIENT_C12_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C13 (0x000005BC) +#define NVC57E_SET_CSC11COEFFICIENT_C13_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C20 (0x000005C0) +#define NVC57E_SET_CSC11COEFFICIENT_C20_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C21 (0x000005C4) +#define NVC57E_SET_CSC11COEFFICIENT_C21_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C22 (0x000005C8) +#define NVC57E_SET_CSC11COEFFICIENT_C22_VALUE 20:0 +#define NVC57E_SET_CSC11COEFFICIENT_C23 (0x000005CC) +#define NVC57E_SET_CSC11COEFFICIENT_C23_VALUE 20:0 +#define NVC57E_SET_CLAMP_RANGE (0x000005D0) +#define NVC57E_SET_CLAMP_RANGE_LOW 15:0 +#define NVC57E_SET_CLAMP_RANGE_HIGH 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC57e_h diff --git a/src/common/sdk/nvidia/inc/class/clc57esw.h b/src/common/sdk/nvidia/inc/class/clc57esw.h new file mode 100644 index 000000000..8c106b42e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc57esw.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2009-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc57e_sw_h_ +#define _clc57e_sw_h_ + +/* This file is *not* auto-generated. */ + +#define NVC57E_WINDOWS_NOTIFY_RM (0x0000058C) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_CHANGE 0:0 +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_CHANGE_FALSE (0x00000000) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_CHANGE_TRUE (0x00000001) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE 1:1 +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_OFF (0x00000000) +#define NVC57E_WINDOWS_NOTIFY_RM_VSYNC_STATE_ON (0x00000001) +#define NVC57E_WINDOWS_NOTIFY_RM_ASSOCIATED_HEAD 7:4 + +#define SwSetMClkSwitch Reserved05[1] + +#define NVC57E_SW_SET_MCLK_SWITCH (0x000002B4) +#define NVC57E_SW_SET_MCLK_SWITCH_ENABLE 0:0 +#define NVC57E_SW_SET_MCLK_SWITCH_ENABLE_FALSE (0x00000000) +#define NVC57E_SW_SET_MCLK_SWITCH_ENABLE_TRUE (0x00000001) + +#endif // _clc57e_sw_h_ + diff --git a/src/common/sdk/nvidia/inc/class/clc58b.h b/src/common/sdk/nvidia/inc/class/clc58b.h new file mode 100644 index 000000000..abeefa790 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc58b.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc58b_h_ +#define _clc58b_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define TURING_VMMU_A (0x0000c58b) + +/** + * @brief NvAlloc parameters for TuringVmmuA class + * + * This class represents mapping between guest physical and system physical. + * Will also be used to represent VF specific state for a given guest. + * + * gfid [in] + * GFID of VF + **/ + +typedef struct +{ + NvHandle hHostVgpuDevice; +} TURING_VMMU_A_ALLOCATION_PARAMETERS; + + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clc58b_h + diff --git a/src/common/sdk/nvidia/inc/class/clc597.h b/src/common/sdk/nvidia/inc/class/clc597.h new file mode 100644 index 000000000..988e3c069 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc597.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc597_h_ +#define _clc597_h_ + +#define TURING_A 0xC597 + +#endif // _clc597_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc5b5.h b/src/common/sdk/nvidia/inc/class/clc5b5.h new file mode 100644 index 000000000..cc3633e99 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc5b5.h @@ -0,0 +1,282 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clc5b5_h_ +#define _clc5b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define TURING_DMA_COPY_A (0x0000C5B5) + +#define NVC5B5_NOP (0x00000100) +#define NVC5B5_NOP_PARAMETER 31:0 +#define NVC5B5_PM_TRIGGER (0x00000140) +#define NVC5B5_PM_TRIGGER_V 31:0 +#define NVC5B5_SET_SEMAPHORE_A (0x00000240) +#define NVC5B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC5B5_SET_SEMAPHORE_B (0x00000244) +#define NVC5B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC5B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC5B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC5B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC5B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC5B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC5B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC5B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC5B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC5B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC5B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC5B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC5B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC5B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC5B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC5B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC5B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC5B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC5B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC5B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2 +#define NVC5B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC5B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC5B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC5B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC5B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC5B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2 +#define NVC5B5_LAUNCH_DMA (0x00000300) +#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC5B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC5B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC5B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_FLUSH_TYPE 25:25 +#define NVC5B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000) +#define NVC5B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_CONDITIONAL_INTR_SEMAPHORE (0x00000003) +#define NVC5B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC5B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC5B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC5B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC5B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC5B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC5B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC5B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC5B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC5B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC5B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC5B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC5B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC5B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC5B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC5B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC5B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC5B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC5B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_SRC_BYPASS_L2 20:20 +#define NVC5B5_LAUNCH_DMA_SRC_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC5B5_LAUNCH_DMA_SRC_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC5B5_LAUNCH_DMA_DST_BYPASS_L2 21:21 +#define NVC5B5_LAUNCH_DMA_DST_BYPASS_L2_USE_PTE_SETTING (0x00000000) +#define NVC5B5_LAUNCH_DMA_DST_BYPASS_L2_FORCE_VOLATILE (0x00000001) +#define NVC5B5_LAUNCH_DMA_VPRMODE 23:22 +#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000) +#define NVC5B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001) +#define NVC5B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24 +#define NVC5B5_LAUNCH_DMA_DISABLE_PLC 26:26 +#define NVC5B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000) +#define NVC5B5_LAUNCH_DMA_DISABLE_PLC_TRUE (0x00000001) +#define NVC5B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28 +#define NVC5B5_OFFSET_IN_UPPER (0x00000400) +#define NVC5B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC5B5_OFFSET_IN_LOWER (0x00000404) +#define NVC5B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC5B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC5B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC5B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC5B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC5B5_PITCH_IN (0x00000410) +#define NVC5B5_PITCH_IN_VALUE 31:0 +#define NVC5B5_PITCH_OUT (0x00000414) +#define NVC5B5_PITCH_OUT_VALUE 31:0 +#define NVC5B5_LINE_LENGTH_IN (0x00000418) +#define NVC5B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC5B5_LINE_COUNT (0x0000041C) +#define NVC5B5_LINE_COUNT_VALUE 31:0 +#define NVC5B5_SET_REMAP_CONST_A (0x00000700) +#define NVC5B5_SET_REMAP_CONST_A_V 31:0 +#define NVC5B5_SET_REMAP_CONST_B (0x00000704) +#define NVC5B5_SET_REMAP_CONST_B_V 31:0 +#define NVC5B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC5B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC5B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC5B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC5B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVC5B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVC5B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC5B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC5B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC5B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC5B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC5B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC5B5_SET_DST_WIDTH (0x00000710) +#define NVC5B5_SET_DST_WIDTH_V 31:0 +#define NVC5B5_SET_DST_HEIGHT (0x00000714) +#define NVC5B5_SET_DST_HEIGHT_V 31:0 +#define NVC5B5_SET_DST_DEPTH (0x00000718) +#define NVC5B5_SET_DST_DEPTH_V 31:0 +#define NVC5B5_SET_DST_LAYER (0x0000071C) +#define NVC5B5_SET_DST_LAYER_V 31:0 +#define NVC5B5_SET_DST_ORIGIN (0x00000720) +#define NVC5B5_SET_DST_ORIGIN_X 15:0 +#define NVC5B5_SET_DST_ORIGIN_Y 31:16 +#define NVC5B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVC5B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVC5B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC5B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC5B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC5B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC5B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC5B5_SET_SRC_WIDTH (0x0000072C) +#define NVC5B5_SET_SRC_WIDTH_V 31:0 +#define NVC5B5_SET_SRC_HEIGHT (0x00000730) +#define NVC5B5_SET_SRC_HEIGHT_V 31:0 +#define NVC5B5_SET_SRC_DEPTH (0x00000734) +#define NVC5B5_SET_SRC_DEPTH_V 31:0 +#define NVC5B5_SET_SRC_LAYER (0x00000738) +#define NVC5B5_SET_SRC_LAYER_V 31:0 +#define NVC5B5_SET_SRC_ORIGIN (0x0000073C) +#define NVC5B5_SET_SRC_ORIGIN_X 15:0 +#define NVC5B5_SET_SRC_ORIGIN_Y 31:16 +#define NVC5B5_SRC_ORIGIN_X (0x00000744) +#define NVC5B5_SRC_ORIGIN_X_VALUE 31:0 +#define NVC5B5_SRC_ORIGIN_Y (0x00000748) +#define NVC5B5_SRC_ORIGIN_Y_VALUE 31:0 +#define NVC5B5_DST_ORIGIN_X (0x0000074C) +#define NVC5B5_DST_ORIGIN_X_VALUE 31:0 +#define NVC5B5_DST_ORIGIN_Y (0x00000750) +#define NVC5B5_DST_ORIGIN_Y_VALUE 31:0 +#define NVC5B5_PM_TRIGGER_END (0x00001114) +#define NVC5B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc5b5_h + diff --git a/src/common/sdk/nvidia/inc/class/clc5b5sw.h b/src/common/sdk/nvidia/inc/class/clc5b5sw.h new file mode 100644 index 000000000..4adc5a308 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc5b5sw.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _clc5b5sw_h_ +#define _clc5b5sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* This file is *not* auto-generated. */ + +// +// Using VERSION_0 will cause the API to interpret +// engineType as a CE engine instance. This allows +// for backward compatibility with 85B5sw and 90B5sw. +// +#define NVC5B5_ALLOCATION_PARAMETERS_VERSION_0 0 + +// +// Using VERSION_1 will cause the API to interpret +// engineType as an NV2080_ENGINE_TYPE ordinal. +// +#define NVC5B5_ALLOCATION_PARAMETERS_VERSION_1 1 + +typedef struct +{ + NvU32 version; + NvU32 engineType; +} NVC5B5_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc5b5sw_h_ + diff --git a/src/common/sdk/nvidia/inc/class/clc5c0.h b/src/common/sdk/nvidia/inc/class/clc5c0.h new file mode 100644 index 000000000..51e0837f7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc5c0.h @@ -0,0 +1,732 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_turing_compute_a_h_ +#define _cl_turing_compute_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl turing_compute_a */ + +#include "nvtypes.h" + +#define TURING_COMPUTE_A 0xC5C0 + +#define NVC5C0_SET_OBJECT 0x0000 +#define NVC5C0_SET_OBJECT_CLASS_ID 15:0 +#define NVC5C0_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC5C0_NO_OPERATION 0x0100 +#define NVC5C0_NO_OPERATION_V 31:0 + +#define NVC5C0_SET_NOTIFY_A 0x0104 +#define NVC5C0_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC5C0_SET_NOTIFY_B 0x0108 +#define NVC5C0_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC5C0_NOTIFY 0x010c +#define NVC5C0_NOTIFY_TYPE 31:0 +#define NVC5C0_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC5C0_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC5C0_WAIT_FOR_IDLE 0x0110 +#define NVC5C0_WAIT_FOR_IDLE_V 31:0 + +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC5C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC5C0_SEND_GO_IDLE 0x013c +#define NVC5C0_SEND_GO_IDLE_V 31:0 + +#define NVC5C0_PM_TRIGGER 0x0140 +#define NVC5C0_PM_TRIGGER_V 31:0 + +#define NVC5C0_PM_TRIGGER_WFI 0x0144 +#define NVC5C0_PM_TRIGGER_WFI_V 31:0 + +#define NVC5C0_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC5C0_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC5C0_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC5C0_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC5C0_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC5C0_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC5C0_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC5C0_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC5C0_LINE_LENGTH_IN 0x0180 +#define NVC5C0_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC5C0_LINE_COUNT 0x0184 +#define NVC5C0_LINE_COUNT_VALUE 31:0 + +#define NVC5C0_OFFSET_OUT_UPPER 0x0188 +#define NVC5C0_OFFSET_OUT_UPPER_VALUE 16:0 + +#define NVC5C0_OFFSET_OUT 0x018c +#define NVC5C0_OFFSET_OUT_VALUE 31:0 + +#define NVC5C0_PITCH_OUT 0x0190 +#define NVC5C0_PITCH_OUT_VALUE 31:0 + +#define NVC5C0_SET_DST_BLOCK_SIZE 0x0194 +#define NVC5C0_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC5C0_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC5C0_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC5C0_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC5C0_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC5C0_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC5C0_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC5C0_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC5C0_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC5C0_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC5C0_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC5C0_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC5C0_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC5C0_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC5C0_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC5C0_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC5C0_SET_DST_WIDTH 0x0198 +#define NVC5C0_SET_DST_WIDTH_V 31:0 + +#define NVC5C0_SET_DST_HEIGHT 0x019c +#define NVC5C0_SET_DST_HEIGHT_V 31:0 + +#define NVC5C0_SET_DST_DEPTH 0x01a0 +#define NVC5C0_SET_DST_DEPTH_V 31:0 + +#define NVC5C0_SET_DST_LAYER 0x01a4 +#define NVC5C0_SET_DST_LAYER_V 31:0 + +#define NVC5C0_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC5C0_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC5C0_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC5C0_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC5C0_LAUNCH_DMA 0x01b0 +#define NVC5C0_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC5C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC5C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC5C0_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC5C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC5C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC5C0_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC5C0_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC5C0_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC5C0_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC5C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC5C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC5C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC5C0_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC5C0_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC5C0_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC5C0_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC5C0_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC5C0_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC5C0_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC5C0_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC5C0_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC5C0_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC5C0_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC5C0_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC5C0_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC5C0_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC5C0_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC5C0_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC5C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC5C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC5C0_LOAD_INLINE_DATA 0x01b4 +#define NVC5C0_LOAD_INLINE_DATA_V 31:0 + +#define NVC5C0_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC5C0_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC5C0_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC5C0_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC5C0_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC5C0_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC5C0_SET_SM_SCG_CONTROL 0x01e8 +#define NVC5C0_SET_SM_SCG_CONTROL_COMPUTE_IN_GRAPHICS 0:0 +#define NVC5C0_SET_SM_SCG_CONTROL_COMPUTE_IN_GRAPHICS_FALSE 0x00000000 +#define NVC5C0_SET_SM_SCG_CONTROL_COMPUTE_IN_GRAPHICS_TRUE 0x00000001 + +#define NVC5C0_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC5C0_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC5C0_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC5C0_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC5C0_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC5C0_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC5C0_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC5C0_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC5C0_SET_VALID_SPAN_OVERFLOW_AREA_A 0x0200 +#define NVC5C0_SET_VALID_SPAN_OVERFLOW_AREA_A_ADDRESS_UPPER 7:0 + +#define NVC5C0_SET_VALID_SPAN_OVERFLOW_AREA_B 0x0204 +#define NVC5C0_SET_VALID_SPAN_OVERFLOW_AREA_B_ADDRESS_LOWER 31:0 + +#define NVC5C0_SET_VALID_SPAN_OVERFLOW_AREA_C 0x0208 +#define NVC5C0_SET_VALID_SPAN_OVERFLOW_AREA_C_SIZE 31:0 + +#define NVC5C0_PERFMON_TRANSFER 0x0210 +#define NVC5C0_PERFMON_TRANSFER_V 31:0 + +#define NVC5C0_SET_QMD_VIRTUALIZATION_BASE_A 0x0214 +#define NVC5C0_SET_QMD_VIRTUALIZATION_BASE_A_ADDRESS_UPPER 7:0 + +#define NVC5C0_SET_QMD_VIRTUALIZATION_BASE_B 0x0218 +#define NVC5C0_SET_QMD_VIRTUALIZATION_BASE_B_ADDRESS_LOWER 31:0 + +#define NVC5C0_INVALIDATE_SHADER_CACHES 0x021c +#define NVC5C0_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC5C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC5C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC5C0_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC5C0_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC5C0_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC5C0_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC5C0_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC5C0_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC5C0_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC5C0_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC5C0_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC5C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC5C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC5C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC5C0_SET_RESERVED_SW_METHOD00 0x0220 +#define NVC5C0_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD01 0x0224 +#define NVC5C0_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD02 0x0228 +#define NVC5C0_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD03 0x022c +#define NVC5C0_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD04 0x0230 +#define NVC5C0_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD05 0x0234 +#define NVC5C0_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD06 0x0238 +#define NVC5C0_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD07 0x023c +#define NVC5C0_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC5C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x0244 +#define NVC5C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC5C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC5C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC5C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC5C0_SET_CWD_REF_COUNTER 0x0248 +#define NVC5C0_SET_CWD_REF_COUNTER_SELECT 5:0 +#define NVC5C0_SET_CWD_REF_COUNTER_VALUE 23:8 + +#define NVC5C0_SET_RESERVED_SW_METHOD08 0x024c +#define NVC5C0_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD09 0x0250 +#define NVC5C0_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD10 0x0254 +#define NVC5C0_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD11 0x0258 +#define NVC5C0_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD12 0x025c +#define NVC5C0_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD13 0x0260 +#define NVC5C0_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD14 0x0264 +#define NVC5C0_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC5C0_SET_RESERVED_SW_METHOD15 0x0268 +#define NVC5C0_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC5C0_SET_SCG_CONTROL 0x0270 +#define NVC5C0_SET_SCG_CONTROL_COMPUTE1_MAX_SM_COUNT 8:0 +#define NVC5C0_SET_SCG_CONTROL_COMPUTE1_MIN_SM_COUNT 20:12 +#define NVC5C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE 24:24 +#define NVC5C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE_FALSE 0x00000000 +#define NVC5C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE_TRUE 0x00000001 + +#define NVC5C0_SET_COMPUTE_CLASS_VERSION 0x0280 +#define NVC5C0_SET_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC5C0_SET_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC5C0_CHECK_COMPUTE_CLASS_VERSION 0x0284 +#define NVC5C0_CHECK_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC5C0_CHECK_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC5C0_SET_QMD_VERSION 0x0288 +#define NVC5C0_SET_QMD_VERSION_CURRENT 15:0 +#define NVC5C0_SET_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC5C0_CHECK_QMD_VERSION 0x0290 +#define NVC5C0_CHECK_QMD_VERSION_CURRENT 15:0 +#define NVC5C0_CHECK_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC5C0_INVALIDATE_SKED_CACHES 0x0298 +#define NVC5C0_INVALIDATE_SKED_CACHES_V 0:0 + +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL 0x029c +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_CONSTANT_BUFFER_MASK 7:0 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_ADDR_ENABLE 8:8 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_ADDR_ENABLE_FALSE 0x00000000 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_ADDR_ENABLE_TRUE 0x00000001 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_CONSTANT_BUFFER_ENABLE 12:12 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_CONSTANT_BUFFER_ENABLE_FALSE 0x00000000 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_CONSTANT_BUFFER_ENABLE_TRUE 0x00000001 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_ADDR_ENABLE 16:16 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_ADDR_ENABLE_FALSE 0x00000000 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_ADDR_ENABLE_TRUE 0x00000001 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_CONSTANT_BUFFER_ENABLE 20:20 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_CONSTANT_BUFFER_ENABLE_FALSE 0x00000000 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_CONSTANT_BUFFER_ENABLE_TRUE 0x00000001 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_SEND_PCAS_ENABLE 24:24 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_SEND_PCAS_ENABLE_FALSE 0x00000000 +#define NVC5C0_SET_QMD_VIRTUALIZATION_CONTROL_SEND_PCAS_ENABLE_TRUE 0x00000001 + +#define NVC5C0_SET_SHADER_SHARED_MEMORY_WINDOW_A 0x02a0 +#define NVC5C0_SET_SHADER_SHARED_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC5C0_SET_SHADER_SHARED_MEMORY_WINDOW_B 0x02a4 +#define NVC5C0_SET_SHADER_SHARED_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC5C0_SCG_HYSTERESIS_CONTROL 0x02a8 +#define NVC5C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE 0:0 +#define NVC5C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE_FALSE 0x00000000 +#define NVC5C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE_TRUE 0x00000001 +#define NVC5C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE 1:1 +#define NVC5C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE_FALSE 0x00000000 +#define NVC5C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE_TRUE 0x00000001 + +#define NVC5C0_SET_CWD_SLOT_COUNT 0x02b0 +#define NVC5C0_SET_CWD_SLOT_COUNT_V 7:0 + +#define NVC5C0_SEND_PCAS_A 0x02b4 +#define NVC5C0_SEND_PCAS_A_QMD_ADDRESS_SHIFTED8 31:0 + +#define NVC5C0_SEND_PCAS_B 0x02b8 +#define NVC5C0_SEND_PCAS_B_FROM 23:0 +#define NVC5C0_SEND_PCAS_B_DELTA 31:24 + +#define NVC5C0_SEND_SIGNALING_PCAS_B 0x02bc +#define NVC5C0_SEND_SIGNALING_PCAS_B_INVALIDATE 0:0 +#define NVC5C0_SEND_SIGNALING_PCAS_B_INVALIDATE_FALSE 0x00000000 +#define NVC5C0_SEND_SIGNALING_PCAS_B_INVALIDATE_TRUE 0x00000001 +#define NVC5C0_SEND_SIGNALING_PCAS_B_SCHEDULE 1:1 +#define NVC5C0_SEND_SIGNALING_PCAS_B_SCHEDULE_FALSE 0x00000000 +#define NVC5C0_SEND_SIGNALING_PCAS_B_SCHEDULE_TRUE 0x00000001 + +#define NVC5C0_SET_SKED_CACHE_CONTROL 0x02cc +#define NVC5C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID 0:0 +#define NVC5C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID_FALSE 0x00000000 +#define NVC5C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID_TRUE 0x00000001 + +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A 0x02e4 +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B 0x02e8 +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C 0x02ec +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVC5C0_SET_SPA_VERSION 0x0310 +#define NVC5C0_SET_SPA_VERSION_MINOR 7:0 +#define NVC5C0_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC5C0_SET_INLINE_QMD_ADDRESS_A 0x0318 +#define NVC5C0_SET_INLINE_QMD_ADDRESS_A_QMD_ADDRESS_SHIFTED8_UPPER 31:0 + +#define NVC5C0_SET_INLINE_QMD_ADDRESS_B 0x031c +#define NVC5C0_SET_INLINE_QMD_ADDRESS_B_QMD_ADDRESS_SHIFTED8_LOWER 31:0 + +#define NVC5C0_LOAD_INLINE_QMD_DATA(i) (0x0320+(i)*4) +#define NVC5C0_LOAD_INLINE_QMD_DATA_V 31:0 + +#define NVC5C0_SET_FALCON00 0x0500 +#define NVC5C0_SET_FALCON00_V 31:0 + +#define NVC5C0_SET_FALCON01 0x0504 +#define NVC5C0_SET_FALCON01_V 31:0 + +#define NVC5C0_SET_FALCON02 0x0508 +#define NVC5C0_SET_FALCON02_V 31:0 + +#define NVC5C0_SET_FALCON03 0x050c +#define NVC5C0_SET_FALCON03_V 31:0 + +#define NVC5C0_SET_FALCON04 0x0510 +#define NVC5C0_SET_FALCON04_V 31:0 + +#define NVC5C0_SET_FALCON05 0x0514 +#define NVC5C0_SET_FALCON05_V 31:0 + +#define NVC5C0_SET_FALCON06 0x0518 +#define NVC5C0_SET_FALCON06_V 31:0 + +#define NVC5C0_SET_FALCON07 0x051c +#define NVC5C0_SET_FALCON07_V 31:0 + +#define NVC5C0_SET_FALCON08 0x0520 +#define NVC5C0_SET_FALCON08_V 31:0 + +#define NVC5C0_SET_FALCON09 0x0524 +#define NVC5C0_SET_FALCON09_V 31:0 + +#define NVC5C0_SET_FALCON10 0x0528 +#define NVC5C0_SET_FALCON10_V 31:0 + +#define NVC5C0_SET_FALCON11 0x052c +#define NVC5C0_SET_FALCON11_V 31:0 + +#define NVC5C0_SET_FALCON12 0x0530 +#define NVC5C0_SET_FALCON12_V 31:0 + +#define NVC5C0_SET_FALCON13 0x0534 +#define NVC5C0_SET_FALCON13_V 31:0 + +#define NVC5C0_SET_FALCON14 0x0538 +#define NVC5C0_SET_FALCON14_V 31:0 + +#define NVC5C0_SET_FALCON15 0x053c +#define NVC5C0_SET_FALCON15_V 31:0 + +#define NVC5C0_SET_FALCON16 0x0540 +#define NVC5C0_SET_FALCON16_V 31:0 + +#define NVC5C0_SET_FALCON17 0x0544 +#define NVC5C0_SET_FALCON17_V 31:0 + +#define NVC5C0_SET_FALCON18 0x0548 +#define NVC5C0_SET_FALCON18_V 31:0 + +#define NVC5C0_SET_FALCON19 0x054c +#define NVC5C0_SET_FALCON19_V 31:0 + +#define NVC5C0_SET_FALCON20 0x0550 +#define NVC5C0_SET_FALCON20_V 31:0 + +#define NVC5C0_SET_FALCON21 0x0554 +#define NVC5C0_SET_FALCON21_V 31:0 + +#define NVC5C0_SET_FALCON22 0x0558 +#define NVC5C0_SET_FALCON22_V 31:0 + +#define NVC5C0_SET_FALCON23 0x055c +#define NVC5C0_SET_FALCON23_V 31:0 + +#define NVC5C0_SET_FALCON24 0x0560 +#define NVC5C0_SET_FALCON24_V 31:0 + +#define NVC5C0_SET_FALCON25 0x0564 +#define NVC5C0_SET_FALCON25_V 31:0 + +#define NVC5C0_SET_FALCON26 0x0568 +#define NVC5C0_SET_FALCON26_V 31:0 + +#define NVC5C0_SET_FALCON27 0x056c +#define NVC5C0_SET_FALCON27_V 31:0 + +#define NVC5C0_SET_FALCON28 0x0570 +#define NVC5C0_SET_FALCON28_V 31:0 + +#define NVC5C0_SET_FALCON29 0x0574 +#define NVC5C0_SET_FALCON29_V 31:0 + +#define NVC5C0_SET_FALCON30 0x0578 +#define NVC5C0_SET_FALCON30_V 31:0 + +#define NVC5C0_SET_FALCON31 0x057c +#define NVC5C0_SET_FALCON31_V 31:0 + +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 16:0 + +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A 0x07b0 +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B 0x07b4 +#define NVC5C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC5C0_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC5C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC5C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC5C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC5C0_SET_SCG_COMPUTE_SCHEDULING_PARAMETERS(i) (0x0da0+(i)*4) +#define NVC5C0_SET_SCG_COMPUTE_SCHEDULING_PARAMETERS_V 31:0 + +#define NVC5C0_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC5C0_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC5C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC5C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC5C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC5C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC5C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC5C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT 0x12a8 +#define NVC5C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL 0:0 +#define NVC5C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_FALSE 0x00000000 +#define NVC5C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_TRUE 0x00000001 + +#define NVC5C0_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC5C0_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC5C0_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC5C0_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC5C0_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC5C0_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC5C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC5C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC5C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC5C0_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC5C0_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC5C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC5C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC5C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC5C0_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC5C0_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC5C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC5C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC5C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC5C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC5C0_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC5C0_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC5C0_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC5C0_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC5C0_SET_RENDER_ENABLE_A 0x1550 +#define NVC5C0_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC5C0_SET_RENDER_ENABLE_B 0x1554 +#define NVC5C0_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC5C0_SET_RENDER_ENABLE_C 0x1558 +#define NVC5C0_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC5C0_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC5C0_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC5C0_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC5C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC5C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC5C0_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC5C0_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC5C0_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC5C0_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC5C0_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC5C0_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC5C0_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC5C0_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC5C0_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC5C0_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC5C0_SET_TEX_HEADER_POOL_C 0x157c +#define NVC5C0_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC5C0_INVALIDATE_SHADER_CACHES_NO_WFI 0x1698 +#define NVC5C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC5C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC5C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC5C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC5C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC5C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC5C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC5C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC5C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC5C0_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC5C0_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC5C0_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC5C0_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC5C0_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC5C0_PIPE_NOP 0x1a2c +#define NVC5C0_PIPE_NOP_V 31:0 + +#define NVC5C0_SET_SPARE00 0x1a30 +#define NVC5C0_SET_SPARE00_V 31:0 + +#define NVC5C0_SET_SPARE01 0x1a34 +#define NVC5C0_SET_SPARE01_V 31:0 + +#define NVC5C0_SET_SPARE02 0x1a38 +#define NVC5C0_SET_SPARE02_V 31:0 + +#define NVC5C0_SET_SPARE03 0x1a3c +#define NVC5C0_SET_SPARE03_V 31:0 + +#define NVC5C0_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC5C0_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC5C0_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC5C0_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC5C0_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC5C0_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC5C0_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC5C0_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP 19:19 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_FALSE 0x00000000 +#define NVC5C0_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_TRUE 0x00000001 + +#define NVC5C0_SET_TRAP_HANDLER_A 0x25f8 +#define NVC5C0_SET_TRAP_HANDLER_A_ADDRESS_UPPER 16:0 + +#define NVC5C0_SET_TRAP_HANDLER_B 0x25fc +#define NVC5C0_SET_TRAP_HANDLER_B_ADDRESS_LOWER 31:0 + +#define NVC5C0_SET_BINDLESS_TEXTURE 0x2608 +#define NVC5C0_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 2:0 + +#define NVC5C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE(i) (0x32f4+(i)*4) +#define NVC5C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_V 31:0 + +#define NVC5C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER(i) (0x3314+(i)*4) +#define NVC5C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC5C0_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3334 +#define NVC5C0_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC5C0_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3338 +#define NVC5C0_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC5C0_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC5C0_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC5C0_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC5C0_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC5C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC5C0_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC5C0_SET_MME_SHADOW_SCRATCH_V 31:0 + +#endif /* _cl_turing_compute_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc637.h b/src/common/sdk/nvidia/inc/class/clc637.h new file mode 100644 index 000000000..6d5572dbf --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc637.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc637_h_ +#define _clc637_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define AMPERE_SMC_PARTITION_REF (0x0000c637) + +// +// This swizzId can be used by root clients like tools for device level +// profiling +// +#define NVC637_DEVICE_PROFILING_SWIZZID (0xFFFFFFFE) + +// +// TODO: Deprecate NVC637_DEVICE_LEVEL_SWIZZID once all the clients are moved to +// NVC637_DEVICE_PROFILING_SWIZZID +// +#define NVC637_DEVICE_LEVEL_SWIZZID NVC637_DEVICE_PROFILING_SWIZZID + +/* NvRmAlloc parameters */ +typedef struct { + // + // capDescriptor is a file descriptor for unix RM clients, but a void + // pointer for windows RM clients. + // + // capDescriptor is transparent to RM clients i.e. RM's user-mode shim + // populates this field on behalf of clients. + // + NV_DECLARE_ALIGNED(NvU64 capDescriptor, 8); + + NvU32 swizzId; +} NVC637_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc637_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/clc638.h b/src/common/sdk/nvidia/inc/class/clc638.h new file mode 100644 index 000000000..1bc65f423 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc638.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clc638_h_ +#define _clc638_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define AMPERE_SMC_EXEC_PARTITION_REF (0x0000c638) + +/* NvRmAlloc parameters */ +typedef struct { + // + // capDescriptor is a file descriptor for unix RM clients, but a void + // pointer for windows RM clients. + // + // capDescriptor is transparent to RM clients i.e. RM's user-mode shim + // populates this field on behalf of clients. + // + NV_DECLARE_ALIGNED(NvU64 capDescriptor, 8); + + NvU32 execPartitionId; +} NVC638_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc638_h_ */ + diff --git a/src/common/sdk/nvidia/inc/class/clc639.h b/src/common/sdk/nvidia/inc/class/clc639.h new file mode 100644 index 000000000..239f1b795 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc639.h @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clc639_h_ +#define _clc639_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define AMPERE_SMC_CONFIG_SESSION (0x0000c639) + +/* NvRmAlloc parameters */ +typedef struct { + // + // capDescriptor is a file descriptor for unix RM clients, but a void + // pointer for windows RM clients. + // + // capDescriptor is transparent to RM clients i.e. RM's user-mode shim + // populates this field on behalf of clients. + // + NV_DECLARE_ALIGNED(NvU64 capDescriptor, 8); +} NVC639_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc639_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc640.h b/src/common/sdk/nvidia/inc/class/clc640.h new file mode 100644 index 000000000..8f1fff3cf --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc640.h @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clc640_h_ +#define _clc640_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define AMPERE_SMC_MONITOR_SESSION (0x0000c640) + +/* NvRmAlloc parameters */ +typedef struct { + // + // capDescriptor is a file descriptor for unix RM clients, but a void + // pointer for windows RM clients. + // + // capDescriptor is transparent to RM clients i.e. RM's user-mode shim + // populates this field on behalf of clients. + // + NV_DECLARE_ALIGNED(NvU64 capDescriptor, 8); +} NVC640_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc640_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc670.h b/src/common/sdk/nvidia/inc/class/clc670.h new file mode 100644 index 000000000..e981a30a0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc670.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc670_h_ +#define _clc670_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NVC670_DISPLAY (0x0000C670) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numDsis; // Number of DSIs in this chip/display +} NVC670_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc670_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc671.h b/src/common/sdk/nvidia/inc/class/clc671.h new file mode 100644 index 000000000..11f77d78b --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc671.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc671_h_ +#define _clc671_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC671_DISP_SF_USER (0x000C671) + +typedef volatile struct _clc671_tag0 { + NvU32 dispSfUserOffset[0x400]; +} _NvC671DispSfUser, NvC671DispSfUserMap; + +#define NVC671_SF_HDMI_INFO_IDX_GENERIC_INFOFRAME 0x00000001 /* */ +#define NVC671_SF_HDMI_INFO_IDX_VSI 0x00000004 /* */ +#define NVC671_SF_HDMI_INFO_CTRL(i,j) (0x000E0000-0x000E0000+(i)*1024+(j)*64) /* RW-4A */ +#define NVC671_SF_HDMI_INFO_CTRL__SIZE_1 4 /* */ +#define NVC671_SF_HDMI_INFO_CTRL__SIZE_2 5 /* */ + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif // _clc671_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc673.h b/src/common/sdk/nvidia/inc/class/clc673.h new file mode 100644 index 000000000..7ae133467 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc673.h @@ -0,0 +1,399 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc673_h_ +#define _clc673_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC673_DISP_CAPABILITIES 0xC673 + +typedef volatile struct _clc673_tag0 { + NvU32 dispCapabilities[0x400]; +} _NvC673DispCapabilities,NvC673DispCapabilities_Map ; + + +#define NVC673_SYS_CAP 0x0 /* RW-4R */ +#define NVC673_SYS_CAP_HEAD0_EXISTS 0:0 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD1_EXISTS 1:1 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD2_EXISTS 2:2 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD3_EXISTS 3:3 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD4_EXISTS 4:4 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD5_EXISTS 5:5 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD6_EXISTS 6:6 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD7_EXISTS 7:7 /* RWIVF */ +#define NVC673_SYS_CAP_HEAD7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* RWIVF */ +#define NVC673_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */ +#define NVC673_SYS_CAP_HEAD_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR0_EXISTS 8:8 /* RWIVF */ +#define NVC673_SYS_CAP_SOR0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR1_EXISTS 9:9 /* RWIVF */ +#define NVC673_SYS_CAP_SOR1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR2_EXISTS 10:10 /* RWIVF */ +#define NVC673_SYS_CAP_SOR2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR3_EXISTS 11:11 /* RWIVF */ +#define NVC673_SYS_CAP_SOR3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR4_EXISTS 12:12 /* RWIVF */ +#define NVC673_SYS_CAP_SOR4_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR4_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR5_EXISTS 13:13 /* RWIVF */ +#define NVC673_SYS_CAP_SOR5_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR5_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR6_EXISTS 14:14 /* RWIVF */ +#define NVC673_SYS_CAP_SOR6_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR6_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR7_EXISTS 15:15 /* RWIVF */ +#define NVC673_SYS_CAP_SOR7_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR7_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_SOR_EXISTS(i) (8+(i)):(8+(i)) /* RWIVF */ +#define NVC673_SYS_CAP_SOR_EXISTS__SIZE_1 8 /* */ +#define NVC673_SYS_CAP_SOR_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_SOR_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI0_EXISTS 20:20 /* RWIVF */ +#define NVC673_SYS_CAP_DSI0_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI0_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI1_EXISTS 21:21 /* RWIVF */ +#define NVC673_SYS_CAP_DSI1_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI1_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI2_EXISTS 22:22 /* RWIVF */ +#define NVC673_SYS_CAP_DSI2_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI2_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI3_EXISTS 23:23 /* RWIVF */ +#define NVC673_SYS_CAP_DSI3_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI3_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_SYS_CAP_DSI_EXISTS(i) (20+(i)):(20+(i)) /* RWIVF */ +#define NVC673_SYS_CAP_DSI_EXISTS__SIZE_1 4 /* */ +#define NVC673_SYS_CAP_DSI_EXISTS_NO 0x00000000 /* RW--V */ +#define NVC673_SYS_CAP_DSI_EXISTS_YES 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA 0x10 /* RW-4R */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRIES 15:0 /* RWIUF */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH 17:16 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_32B 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_64B 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_128B 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_MEMPOOL_ENTRY_WIDTH_256B 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_ROTATION 18:18 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_ROTATION_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_ROTATION_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_PLANAR 19:19 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_PLANAR_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_VGA 20:20 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION 21:21 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MEMPOOL_COMPRESSION_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MSCG 22:22 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MSCG_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MSCG_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH 23:23 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_MCLK_SWITCH_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT 26:26 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_SUPPORT_LATENCY_EVENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION 31:30 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_32B 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_64B 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_128B 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPA_REQUEST_SIZE_PER_LINE_NON_ROTATION_256B 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC 0x18 /* RW-4R */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE 1:0 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_32B 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_64B 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_128B 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_PITCH_REQUEST_SIZE_256B 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED 6:4 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_NONE 0x00000000 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_TWO 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_FOUR 0x00000002 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_EIGHT 0x00000003 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_MAX_LINES_BUFFERED_SIXTEEN 0x00000004 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR 11:11 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_FALSE 0x00000000 /* RWI-V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_SEMI_PLANAR_TRUE 0x00000001 /* RW--V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP 12:12 /* RWIVF */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_FALSE 0x00000000 /* RWI-V */ +#define NVC673_IHUB_COMMON_CAPC_SUPPORT_HOR_VER_FLIP_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA(i) (0x680+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT 16:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC0_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT 17:17 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OCSC1_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT 18:18 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT 19:19 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_HCLPF_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT 20:20 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DTH_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT 21:21 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_OSCAN_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT 22:22 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_DSC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT 23:23 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_VFILTER_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT 24:24 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPA_RCRC_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB(i) (0x684+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_VGA 0:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_VGA_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_VGA_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPB_OLUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC(i) (0x688+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_PRECISION 4:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC0_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_PRECISION 12:8 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP 13:13 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_OCSC1_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_SF_PRECISION 20:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_CI_PRECISION 24:21 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB 25:25 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPC_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD(i) (0x68c+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPD_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE(i) (0x690+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_RATEBUFSIZE 3:0 /* RWIUF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_LINEBUFSIZE 13:8 /* RWIUF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422 16:16 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE422_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420 17:17 /* RWIVF */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_TRUE 0x00000001 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPE_DSC_NATIVE420_FALSE 0x00000000 /* RW--V */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPF(i) (0x694+(i)*32) /* RW-4A */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPF__SIZE_1 8 /* */ +#define NVC673_POSTCOMP_HEAD_HDR_CAPF_VFILTER_MAX_PIXELS 15:0 /* RWIVF */ +#define NVC673_SOR_CAP(i) (0x144+(i)*8) /* RW-4A */ +#define NVC673_SOR_CAP__SIZE_1 8 /* */ +#define NVC673_SOR_CAP_SINGLE_LVDS_18 0:0 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_LVDS_24 1:1 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_18 2:2 /* RWIVF */ +#define NVC673_SOR_CAP_DUAL_LVDS_18_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_18_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_24 3:3 /* RWIVF */ +#define NVC673_SOR_CAP_DUAL_LVDS_24_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_LVDS_24_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_A 8:8 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_TMDS_A_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_A_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_B 9:9 /* RWIVF */ +#define NVC673_SOR_CAP_SINGLE_TMDS_B_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SINGLE_TMDS_B_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_TMDS 11:11 /* RWIVF */ +#define NVC673_SOR_CAP_DUAL_TMDS_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DUAL_TMDS_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DISPLAY_OVER_PCIE 13:13 /* RWIVF */ +#define NVC673_SOR_CAP_DISPLAY_OVER_PCIE_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DISPLAY_OVER_PCIE_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_SDI 16:16 /* RWIVF */ +#define NVC673_SOR_CAP_SDI_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_SDI_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_A 24:24 /* RWIVF */ +#define NVC673_SOR_CAP_DP_A_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_A_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_B 25:25 /* RWIVF */ +#define NVC673_SOR_CAP_DP_B_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_B_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_INTERLACE 26:26 /* RWIVF */ +#define NVC673_SOR_CAP_DP_INTERLACE_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_INTERLACE_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_DP_8_LANES 27:27 /* RWIVF */ +#define NVC673_SOR_CAP_DP_8_LANES_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_DP_8_LANES_TRUE 0x00000001 /* RW--V */ +#define NVC673_SOR_CAP_HDMI_FRL 28:28 /* RWIVF */ +#define NVC673_SOR_CAP_HDMI_FRL_FALSE 0x00000000 /* RW--V */ +#define NVC673_SOR_CAP_HDMI_FRL_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA(i) (0x780+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_FULL_WIDTH 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_UNIT_WIDTH 9:5 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_ALPHA_WIDTH 13:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT 16:16 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC00_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT 17:17 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC0LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT 18:18 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC01_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT 19:19 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_SCLR_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT 20:20 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_TMO_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_GMA_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT 22:22 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC10_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT 23:23 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC1LUT_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT 24:24 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPA_CSC11_PRESENT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB(i) (0x784+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_FMT_PRECISION 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPB_ILUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC(i) (0x788+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_PRECISION 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC00_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC0LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_PRECISION 20:16 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPC_CSC01_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD(i) (0x78c+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGSZ 3:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_LOGNR 6:4 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD 8:8 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT 9:9 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_TMO_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_SF_PRECISION 16:12 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_CI_PRECISION 20:17 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_EXT_RGB_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA 22:22 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_EXT_ALPHA_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR 28:28 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_VS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR 30:30 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_2X 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPD_SCLR_HS_MAX_SCALE_FACTOR_4X 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE(i) (0x790+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_PRECISION 4:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP 5:5 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC10_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGSZ 9:6 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_LOGNR 12:10 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD 14:14 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_SFCLOAD_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT 15:15 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC1LUT_DIRECT_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_PRECISION 20:16 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP 21:21 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_TRUE 0x00000001 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPE_CSC11_UNITY_CLAMP_FALSE 0x00000000 /* RW--V */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF(i) (0x794+(i)*32) /* RW-4A */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF__SIZE_1 32 /* */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_2TAP 15:0 /* RWIVF */ +#define NVC673_PRECOMP_WIN_PIPE_HDR_CAPF_VSCLR_MAX_PIXELS_5TAP 31:16 /* RWIVF */ + +#ifdef __cplusplus +}; +#endif /* extern C */ +#endif //_clc673_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc67a.h b/src/common/sdk/nvidia/inc/class/clc67a.h new file mode 100644 index 000000000..ab6f1d447 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc67a.h @@ -0,0 +1,181 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + +#ifndef _clc67a__h_ +#define _clc67a__h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67A_CURSOR_IMM_CHANNEL_PIO (0x0000C67A) + +typedef volatile struct _clc67a_tag0 { + NvV32 Reserved00[0x2]; + NvV32 Free; // 0x00000008 - 0x0000000B + NvV32 Reserved01[0x7D]; + NvV32 Update; // 0x00000200 - 0x00000203 + NvV32 SetInterlockFlags; // 0x00000204 - 0x00000207 + NvV32 SetCursorHotSpotPointOut[2]; // 0x00000208 - 0x0000020F + NvV32 SetWindowInterlockFlags; // 0x00000210 - 0x00000213 + NvV32 Reserved02[0x37B]; +} NVC67ADispCursorImmControlPio; + +#define NVC67A_FREE (0x00000008) +#define NVC67A_FREE_COUNT 5:0 +#define NVC67A_UPDATE (0x00000200) +#define NVC67A_SET_INTERLOCK_FLAGS (0x00000204) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC67A_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC67A_SET_CURSOR_HOT_SPOT_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC67A_SET_CURSOR_HOT_SPOT_POINT_OUT_X 15:0 +#define NVC67A_SET_CURSOR_HOT_SPOT_POINT_OUT_Y 31:16 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS (0x00000210) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC67A_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc67a_h + diff --git a/src/common/sdk/nvidia/inc/class/clc67b.h b/src/common/sdk/nvidia/inc/class/clc67b.h new file mode 100644 index 000000000..c9779a042 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc67b.h @@ -0,0 +1,66 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + + +#ifndef _clC67b_h_ +#define _clC67b_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67B_WINDOW_IMM_CHANNEL_DMA (0x0000C67B) + +// dma opcode instructions +#define NVC67B_DMA +#define NVC67B_DMA_OPCODE 31:29 +#define NVC67B_DMA_OPCODE_METHOD 0x00000000 +#define NVC67B_DMA_OPCODE_JUMP 0x00000001 +#define NVC67B_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC67B_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC67B_DMA_METHOD_COUNT 27:18 +#define NVC67B_DMA_METHOD_OFFSET 13:2 +#define NVC67B_DMA_DATA 31:0 +#define NVC67B_DMA_DATA_NOP 0x00000000 +#define NVC67B_DMA_JUMP_OFFSET 11:2 +#define NVC67B_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC67B_PUT (0x00000000) +#define NVC67B_PUT_PTR 9:0 +#define NVC67B_GET (0x00000004) +#define NVC67B_GET_PTR 9:0 +#define NVC67B_UPDATE (0x00000200) +#define NVC67B_UPDATE_INTERLOCK_WITH_WINDOW 1:1 +#define NVC67B_UPDATE_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC67B_UPDATE_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC67B_SET_POINT_OUT(b) (0x00000208 + (b)*0x00000004) +#define NVC67B_SET_POINT_OUT_X 15:0 +#define NVC67B_SET_POINT_OUT_Y 31:16 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC67b_h diff --git a/src/common/sdk/nvidia/inc/class/clc67d.h b/src/common/sdk/nvidia/inc/class/clc67d.h new file mode 100644 index 000000000..dd2a4f646 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc67d.h @@ -0,0 +1,1339 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + + +#ifndef _clC67d_h_ +#define _clC67d_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67D_CORE_CHANNEL_DMA (0x0000C67D) + +#define NV_DISP_NOTIFIER 0x00000000 +#define NV_DISP_NOTIFIER_SIZEOF 0x00000010 +#define NV_DISP_NOTIFIER__0 0x00000000 +#define NV_DISP_NOTIFIER__0_PRESENT_COUNT 7:0 +#define NV_DISP_NOTIFIER__0_FIELD 8:8 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE 9:9 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_NON_TEARING 0x00000000 +#define NV_DISP_NOTIFIER__0_FLIP_TYPE_IMMEDIATE 0x00000001 +#define NV_DISP_NOTIFIER__0_R1 15:10 +#define NV_DISP_NOTIFIER__0_R2 23:16 +#define NV_DISP_NOTIFIER__0_R3 29:24 +#define NV_DISP_NOTIFIER__0_STATUS 31:30 +#define NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN 0x00000000 +#define NV_DISP_NOTIFIER__0_STATUS_BEGUN 0x00000001 +#define NV_DISP_NOTIFIER__0_STATUS_FINISHED 0x00000002 +#define NV_DISP_NOTIFIER__1 0x00000001 +#define NV_DISP_NOTIFIER__1_R4 31:0 +#define NV_DISP_NOTIFIER__2 0x00000002 +#define NV_DISP_NOTIFIER__2_TIMESTAMP_LO 31:0 +#define NV_DISP_NOTIFIER__3 0x00000003 +#define NV_DISP_NOTIFIER__3_TIMESTAMP_HI 31:0 + + +// dma opcode instructions +#define NVC67D_DMA +#define NVC67D_DMA_OPCODE 31:29 +#define NVC67D_DMA_OPCODE_METHOD 0x00000000 +#define NVC67D_DMA_OPCODE_JUMP 0x00000001 +#define NVC67D_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC67D_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC67D_DMA_METHOD_COUNT 27:18 +#define NVC67D_DMA_METHOD_OFFSET 13:2 +#define NVC67D_DMA_DATA 31:0 +#define NVC67D_DMA_DATA_NOP 0x00000000 +#define NVC67D_DMA_JUMP_OFFSET 11:2 +#define NVC67D_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// if cap SUPPORT_FLEXIBLE_WIN_MAPPING is FALSE, this define can be used to obtain which head a window is mapped to +#define NVC37D_WINDOW_MAPPED_TO_HEAD(w) ((w)>>1) +#define NVC37D_GET_VALID_WINDOWMASK_FOR_HEAD(h) ((1<<((h)*2)) | (1<<((h)*2+1))) + +// class methods +#define NVC67D_PUT (0x00000000) +#define NVC67D_PUT_PTR 9:0 +#define NVC67D_GET (0x00000004) +#define NVC67D_GET_PTR 9:0 +#define NVC67D_UPDATE (0x00000200) +#define NVC67D_UPDATE_SPECIAL_HANDLING 21:20 +#define NVC67D_UPDATE_SPECIAL_HANDLING_NONE (0x00000000) +#define NVC67D_UPDATE_SPECIAL_HANDLING_INTERRUPT_RM (0x00000001) +#define NVC67D_UPDATE_SPECIAL_HANDLING_MODE_SWITCH (0x00000002) +#define NVC67D_UPDATE_SPECIAL_HANDLING_REASON 19:12 +#define NVC67D_UPDATE_INHIBIT_INTERRUPTS 24:24 +#define NVC67D_UPDATE_INHIBIT_INTERRUPTS_FALSE (0x00000000) +#define NVC67D_UPDATE_INHIBIT_INTERRUPTS_TRUE (0x00000001) +#define NVC67D_UPDATE_RELEASE_ELV 0:0 +#define NVC67D_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC67D_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC67D_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_SET_CONTEXT_DMA_NOTIFIER (0x00000208) +#define NVC67D_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC67D_SET_NOTIFIER_CONTROL (0x0000020C) +#define NVC67D_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC67D_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC67D_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67D_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC67D_SET_NOTIFIER_CONTROL_NOTIFY 12:12 +#define NVC67D_SET_NOTIFIER_CONTROL_NOTIFY_DISABLE (0x00000000) +#define NVC67D_SET_NOTIFIER_CONTROL_NOTIFY_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL (0x00000210) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN(i) ((i)+0):((i)+0) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN__SIZE_1 4 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN0 0:0 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN0_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN0_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN1 1:1 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN1_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN1_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN2 2:2 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN2_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN2_ENABLE (0x00000001) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN3 3:3 +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN3_DISABLE (0x00000000) +#define NVC67D_SET_CONTROL_FLIP_LOCK_PIN3_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS (0x00000218) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+0):((i)+0) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 0:0 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 1:1 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 2:2 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 3:3 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 4:4 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 5:5 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 6:6 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 7:7 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 16:16 +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC67D_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS (0x0000021C) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC67D_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC67D_GET_RG_SCAN_LINE(b) (0x00000220 + (b)*0x00000004) +#define NVC67D_GET_RG_SCAN_LINE_LINE 15:0 +#define NVC67D_GET_RG_SCAN_LINE_VBLANK 16:16 +#define NVC67D_GET_RG_SCAN_LINE_VBLANK_FALSE (0x00000000) +#define NVC67D_GET_RG_SCAN_LINE_VBLANK_TRUE (0x00000001) +#define NVC67D_SET_GET_BLANKING_CTRL(b) (0x00000240 + (b)*0x00000004) +#define NVC67D_SET_GET_BLANKING_CTRL_BLANK 0:0 +#define NVC67D_SET_GET_BLANKING_CTRL_BLANK_NO_CHANGE (0x00000000) +#define NVC67D_SET_GET_BLANKING_CTRL_BLANK_ENABLE (0x00000001) +#define NVC67D_SET_GET_BLANKING_CTRL_UNBLANK 1:1 +#define NVC67D_SET_GET_BLANKING_CTRL_UNBLANK_NO_CHANGE (0x00000000) +#define NVC67D_SET_GET_BLANKING_CTRL_UNBLANK_ENABLE (0x00000001) + +#define NVC67D_SOR_SET_CONTROL(a) (0x00000300 + (a)*0x00000020) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK 7:0 +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC67D_SOR_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL 11:8 +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM (0x00000000) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A (0x00000001) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B (0x00000002) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS (0x00000005) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_DP_A (0x00000008) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_DP_B (0x00000009) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL (0x0000000C) +#define NVC67D_SOR_SET_CONTROL_PROTOCOL_CUSTOM (0x0000000F) +#define NVC67D_SOR_SET_CONTROL_DE_SYNC_POLARITY 16:16 +#define NVC67D_SOR_SET_CONTROL_DE_SYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_SOR_SET_CONTROL_DE_SYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE 21:20 +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC67D_SOR_SET_CONTROL_PIXEL_REPLICATE_MODE_X4 (0x00000002) +#define NVC67D_SOR_SET_CUSTOM_REASON(a) (0x00000304 + (a)*0x00000020) +#define NVC67D_SOR_SET_CUSTOM_REASON_CODE 31:0 +#define NVC67D_SOR_SET_SW_SPARE_A(a) (0x00000308 + (a)*0x00000020) +#define NVC67D_SOR_SET_SW_SPARE_A_CODE 31:0 +#define NVC67D_SOR_SET_SW_SPARE_B(a) (0x0000030C + (a)*0x00000020) +#define NVC67D_SOR_SET_SW_SPARE_B_CODE 31:0 + +#define NVC67D_DSI_SET_CONTROL(a) (0x00000500 + (a)*0x00000020) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK 7:0 +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_NONE (0x00000000) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD0 (0x00000001) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD1 (0x00000002) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD2 (0x00000004) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD3 (0x00000008) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD4 (0x00000010) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD5 (0x00000020) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD6 (0x00000040) +#define NVC67D_DSI_SET_CONTROL_OWNER_MASK_HEAD7 (0x00000080) +#define NVC67D_DSI_SET_CUSTOM_REASON(a) (0x00000504 + (a)*0x00000020) +#define NVC67D_DSI_SET_CUSTOM_REASON_CODE 31:0 +#define NVC67D_DSI_SET_SW_SPARE_A(a) (0x00000508 + (a)*0x00000020) +#define NVC67D_DSI_SET_SW_SPARE_A_CODE 31:0 +#define NVC67D_DSI_SET_SW_SPARE_B(a) (0x0000050C + (a)*0x00000020) +#define NVC67D_DSI_SET_SW_SPARE_B_CODE 31:0 + +#define NVC67D_WINDOW_SET_CONTROL(a) (0x00001000 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_CONTROL_OWNER 3:0 +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD(i) (0x00000000 +(i)) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD__SIZE_1 8 +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD0 (0x00000000) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD1 (0x00000001) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD2 (0x00000002) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD3 (0x00000003) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD4 (0x00000004) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD5 (0x00000005) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD6 (0x00000006) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_HEAD7 (0x00000007) +#define NVC67D_WINDOW_SET_CONTROL_OWNER_NONE (0x0000000F) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(a) (0x00001004 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS(a) (0x00001008 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP 0:0 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED1BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP 1:1 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED2BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP 2:2 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED4BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP 3:3 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_RGB_PACKED8BPP_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422 4:4 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PACKED422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420 5:5 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444 6:6 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420 7:7 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422 8:8 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R 9:9 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444 10:10 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420 11:11 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444 12:12 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420 13:13 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR420_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422 14:14 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R 15:15 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR422R_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444 16:16 +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_ROTATED_FORMAT_USAGE_BOUNDS_EXT_YUV_SEMI_PLANAR444_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(a) (0x0000100C + (a)*0x00000080) +#define NVC67D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC67D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS(a) (0x00001010 + (a)*0x00000080) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_MAX_PIXELS_FETCHED_PER_LINE 14:0 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED 16:16 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_ILUT_ALLOWED_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED 28:28 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_TMO_LUT_ALLOWED_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS 22:20 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_INPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED 24:24 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED 30:30 +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC67D_WINDOW_SET_WINDOW_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) + +#define NVC67D_HEAD_SET_PROCAMP(a) (0x00002000 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE 1:0 +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601 (0x00000001) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709 (0x00000002) +#define NVC67D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_2020 (0x00000003) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_LPF 3:3 +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_LPF_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_LPF_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V 4:4 +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PROCAMP_DYNAMIC_RANGE 28:28 +#define NVC67D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_VESA (0x00000000) +#define NVC67D_HEAD_SET_PROCAMP_DYNAMIC_RANGE_CEA (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(a) (0x00002004 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE 1:0 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY 2:2 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_HSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY 3:3 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_VSYNC_POLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH 7:4 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_444 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444NP (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE 24:24 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_OVERRIDE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_COLOR_SPACE_FLAG 23:12 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN 31:26 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN0 (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN1 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN2 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN3 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN4 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN5 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN6 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN7 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN8 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN9 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN10 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN11 (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN12 (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN13 (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN14 (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN15 (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN16 (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN17 (0x00000011) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN18 (0x00000012) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN19 (0x00000013) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN20 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN21 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN22 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN23 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN24 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN25 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN26 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN27 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN28 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN29 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN30 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_WIN31 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_EXT_PACKET_WIN_NONE (0x0000003F) +#define NVC67D_HEAD_SET_CONTROL(a) (0x00002008 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_STRUCTURE 1:0 +#define NVC67D_HEAD_SET_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE 2:2 +#define NVC67D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_NORMAL (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_STEREO3D_STRUCTURE_FRAME_PACKED (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_YUV420PACKER 3:3 +#define NVC67D_HEAD_SET_CONTROL_YUV420PACKER_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_YUV420PACKER_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE 11:10 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN 8:4 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_LOCKOUT_WINDOW 15:12 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE 23:22 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN 20:16 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN 28:24 +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE 30:30 +#define NVC67D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE 31:31 +#define NVC67D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(a) (0x0000200C + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_HERTZ 30:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001 31:31 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_REORDER_CONTROL(a) (0x00002010 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_REORDER_CONTROL_BANK_WIDTH 13:0 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER(a) (0x00002014 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC67D_HEAD_SET_DITHER_CONTROL(a) (0x00002018 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS 5:4 +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_6_BITS (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_8_BITS (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_10_BITS (0x00000002) +#define NVC67D_HEAD_SET_DITHER_CONTROL_BITS_TO_12_BITS (0x00000003) +#define NVC67D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE 2:2 +#define NVC67D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_OFFSET_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE 10:8 +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_ERR_ACC (0x00000000) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_ERR_ACC (0x00000001) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_DYNAMIC_2X2 (0x00000002) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_STATIC_2X2 (0x00000003) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_TEMPORAL (0x00000004) +#define NVC67D_HEAD_SET_DITHER_CONTROL_MODE_ROUND (0x00000005) +#define NVC67D_HEAD_SET_DITHER_CONTROL_PHASE 13:12 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(a) (0x0000201C + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER 0:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_FALSE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_NOT_DRIVER_TRUE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING 4:4 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE 9:8 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_VBLANK (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION_HOPPING_MODE_HBLANK (0x00000001) +#define NVC67D_HEAD_SET_DISPLAY_ID(a,b) (0x00002020 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_DISPLAY_ID_CODE 31:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(a) (0x00002028 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_HERTZ 30:0 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001 31:31 +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_FALSE (0x00000000) +#define NVC67D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX_ADJ1000DIV1001_TRUE (0x00000001) +#define NVC67D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(a) (0x0000202C + (a)*0x00000400) +#define NVC67D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_HORIZONTAL 15:0 +#define NVC67D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR_VERTICAL 31:16 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS(a) (0x00002030 + (a)*0x00000400) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR 2:0 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_NONE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W32_H32 (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W64_H64 (0x00000002) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W128_H128 (0x00000003) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_CURSOR_USAGE_W256_H256 (0x00000004) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED 4:4 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_FALSE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OLUT_ALLOWED_TRUE (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS 14:12 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_2 (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OUTPUT_SCALER_TAPS_TAPS_5 (0x00000004) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED 8:8 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_FALSE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_UPSCALING_ALLOWED_TRUE (0x00000001) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED 16:16 +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_FALSE (0x00000000) +#define NVC67D_HEAD_SET_HEAD_USAGE_BOUNDS_OVERFETCH_ENABLED_TRUE (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK(a) (0x00002034 + (a)*0x00000400) +#define NVC67D_HEAD_SET_STALL_LOCK_ENABLE 0:0 +#define NVC67D_HEAD_SET_STALL_LOCK_ENABLE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_ENABLE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_MODE 2:2 +#define NVC67D_HEAD_SET_STALL_LOCK_MODE_CONTINUOUS (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_MODE_ONE_SHOT (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN 8:4 +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67D_HEAD_SET_STALL_LOCK_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_MODE 12:12 +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_CRASH_LOCK (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_MODE_LINE_LOCK (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_TEPOLARITY 14:14 +#define NVC67D_HEAD_SET_STALL_LOCK_TEPOLARITY_POSITIVE_TRUE (0x00000000) +#define NVC67D_HEAD_SET_STALL_LOCK_TEPOLARITY_NEGATIVE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_STALL_LOCK_UNSTALL_SYNC_ADVANCE 25:16 +#define NVC67D_HEAD_SET_LOCK_CHAIN(a) (0x00002044 + (a)*0x00000400) +#define NVC67D_HEAD_SET_LOCK_CHAIN_POSITION 3:0 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_IN(a) (0x00002048 + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_POINT_IN_X 14:0 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_IN_Y 30:16 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_IN(a) (0x0000204C + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_IN_WIDTH 14:0 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_IN_HEIGHT 30:16 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_OUT(a) (0x00002058 + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_OUT_WIDTH 14:0 +#define NVC67D_HEAD_SET_VIEWPORT_SIZE_OUT_HEIGHT 30:16 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(a) (0x0000205C + (a)*0x00000400) +#define NVC67D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_X 15:0 +#define NVC67D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST_Y 31:16 +#define NVC67D_HEAD_SET_RASTER_SIZE(a) (0x00002064 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_SIZE_WIDTH 14:0 +#define NVC67D_HEAD_SET_RASTER_SIZE_HEIGHT 30:16 +#define NVC67D_HEAD_SET_RASTER_SYNC_END(a) (0x00002068 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_SYNC_END_X 14:0 +#define NVC67D_HEAD_SET_RASTER_SYNC_END_Y 30:16 +#define NVC67D_HEAD_SET_RASTER_BLANK_END(a) (0x0000206C + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_BLANK_END_X 14:0 +#define NVC67D_HEAD_SET_RASTER_BLANK_END_Y 30:16 +#define NVC67D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_BLANK_START_X 14:0 +#define NVC67D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC67D_HEAD_SET_OVERSCAN_COLOR(a) (0x00002078 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OVERSCAN_COLOR_RED_CR 9:0 +#define NVC67D_HEAD_SET_OVERSCAN_COLOR_GREEN_Y 19:10 +#define NVC67D_HEAD_SET_OVERSCAN_COLOR_BLUE_CB 29:20 +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(a) (0x0000207C + (a)*0x00000400) +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_RED_CR 9:0 +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_GREEN_Y 19:10 +#define NVC67D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR_BLUE_CB 29:20 +#define NVC67D_HEAD_SET_HDMI_CTRL(a) (0x00002080 + (a)*0x00000400) +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT 2:0 +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_NORMAL (0x00000000) +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_EXTENDED (0x00000001) +#define NVC67D_HEAD_SET_HDMI_CTRL_VIDEO_FORMAT_STEREO3D (0x00000002) +#define NVC67D_HEAD_SET_HDMI_CTRL_HDMI_VIC 11:4 +#define NVC67D_HEAD_SET_CONTEXT_DMA_CURSOR(a,b) (0x00002088 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 +#define NVC67D_HEAD_SET_OFFSET_CURSOR(a,b) (0x00002090 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR(a) (0x00002098 + (a)*0x00000400) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE 0:0 +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_MONO (0x00000000) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_CURSOR_MODE_STEREO (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR(a) (0x0000209C + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_ENABLE 31:31 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_FORMAT 7:0 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE 9:8 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W32_H32 (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W64_H64 (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W128_H128 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_SIZE_W256_H256 (0x00000003) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_X 19:12 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_HOT_SPOT_Y 27:20 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(a) (0x000020A0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_K1 7:0 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT 11:8 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_CURSOR_COLOR_FACTOR_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT 15:12 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_ZERO (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_K1 (0x00000002) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_VIEWPORT_COLOR_FACTOR_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE 16:16 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_BLEND (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_MODE_XOR (0x00000001) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS 20:20 +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_BYPASS_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC67D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC67D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC67D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC67D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC67D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC67D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC67D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC67D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PRESENT_CONTROL(a) (0x0000218C + (a)*0x00000400) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD 0:0 +#define NVC67D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_USE_BEGIN_FIELD_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_PRESENT_CONTROL_BEGIN_FIELD 6:4 +#define NVC67D_HEAD_SET_SW_SPARE_A(a) (0x00002194 + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_A_CODE 31:0 +#define NVC67D_HEAD_SET_SW_SPARE_B(a) (0x00002198 + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_B_CODE 31:0 +#define NVC67D_HEAD_SET_SW_SPARE_C(a) (0x0000219C + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_C_CODE 31:0 +#define NVC67D_HEAD_SET_SW_SPARE_D(a) (0x000021A0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_SW_SPARE_D_CODE 31:0 +#define NVC67D_HEAD_SET_DISPLAY_RATE(a) (0x000021A8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DISPLAY_RATE_RUN_MODE 0:0 +#define NVC67D_HEAD_SET_DISPLAY_RATE_RUN_MODE_CONTINUOUS (0x00000000) +#define NVC67D_HEAD_SET_DISPLAY_RATE_RUN_MODE_ONE_SHOT (0x00000001) +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL 25:4 +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH 2:2 +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE(a,b) (0x000021AC + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE_HANDLE 31:0 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(a,b) (0x000021CC + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE 10:10 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_ONE_TIME (0x00000000) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RUN_MODE_CONTINUOUS (0x00000001) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL_RASTER_LINE 30:16 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE(a,b) (0x000021EC + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC67D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(a) (0x00002214 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC67D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC67D_HEAD_SET_MIN_FRAME_IDLE(a) (0x00002218 + (a)*0x00000400) +#define NVC67D_HEAD_SET_MIN_FRAME_IDLE_LEADING_RASTER_LINES 14:0 +#define NVC67D_HEAD_SET_MIN_FRAME_IDLE_TRAILING_RASTER_LINES 30:16 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(a) (0x00002220 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_ALPHA 7:0 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED_RED 31:16 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(a) (0x00002224 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_GREEN 15:0 +#define NVC67D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE_BLUE 31:16 +#define NVC67D_HEAD_SET_CURSOR_COLOR_NORM_SCALE(a) (0x00002228 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CURSOR_COLOR_NORM_SCALE_VALUE 15:0 +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR(a) (0x0000222C + (a)*0x00000400) +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR_LOG2PEAK_LUMINANCE 3:0 +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR_S1 16:4 +#define NVC67D_HEAD_SET_XOR_BLEND_FACTOR_S2 30:18 +#define NVC67D_HEAD_SET_CLAMP_RANGE_GREEN(a) (0x00002238 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CLAMP_RANGE_GREEN_LOW 11:0 +#define NVC67D_HEAD_SET_CLAMP_RANGE_GREEN_HIGH 27:16 +#define NVC67D_HEAD_SET_CLAMP_RANGE_RED_BLUE(a) (0x0000223C + (a)*0x00000400) +#define NVC67D_HEAD_SET_CLAMP_RANGE_RED_BLUE_LOW 11:0 +#define NVC67D_HEAD_SET_CLAMP_RANGE_RED_BLUE_HIGH 27:16 +#define NVC67D_HEAD_SET_OCSC0CONTROL(a) (0x00002240 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_OCSC0CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OCSC0CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C00(a) (0x00002244 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C00_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C01(a) (0x00002248 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C01_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C02(a) (0x0000224C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C02_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C03(a) (0x00002250 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C03_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C10(a) (0x00002254 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C10_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C11(a) (0x00002258 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C11_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C12(a) (0x0000225C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C12_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C13(a) (0x00002260 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C13_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C20(a) (0x00002264 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C20_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C21(a) (0x00002268 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C21_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C22(a) (0x0000226C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C22_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C23(a) (0x00002270 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC0COEFFICIENT_C23_VALUE 20:0 +#define NVC67D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 +#define NVC67D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MIRROR 1:1 +#define NVC67D_HEAD_SET_OLUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE 3:2 +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC67D_HEAD_SET_OLUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC67D_HEAD_SET_OLUT_CONTROL_SIZE 18:8 +#define NVC67D_HEAD_SET_OLUT_FP_NORM_SCALE(a) (0x00002284 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OLUT_FP_NORM_SCALE_VALUE 31:0 +#define NVC67D_HEAD_SET_CONTEXT_DMA_OLUT(a) (0x00002288 + (a)*0x00000400) +#define NVC67D_HEAD_SET_CONTEXT_DMA_OLUT_HANDLE 31:0 +#define NVC67D_HEAD_SET_OFFSET_OLUT(a) (0x0000228C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OFFSET_OLUT_ORIGIN 31:0 +#define NVC67D_HEAD_SET_OCSC1CONTROL(a) (0x0000229C + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_OCSC1CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_OCSC1CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C00(a) (0x000022A0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C00_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C01(a) (0x000022A4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C01_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C02(a) (0x000022A8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C02_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C03(a) (0x000022AC + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C03_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C10(a) (0x000022B0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C10_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C11(a) (0x000022B4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C11_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C12(a) (0x000022B8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C12_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C13(a) (0x000022BC + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C13_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C20(a) (0x000022C0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C20_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C21(a) (0x000022C4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C21_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C22(a) (0x000022C8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C22_VALUE 20:0 +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C23(a) (0x000022CC + (a)*0x00000400) +#define NVC67D_HEAD_SET_OCSC1COEFFICIENT_C23_VALUE 20:0 +#define NVC67D_HEAD_SET_TILE_POSITION(a) (0x000022D0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_TILE_POSITION_X 2:0 +#define NVC67D_HEAD_SET_TILE_POSITION_Y 6:4 +#define NVC67D_HEAD_SET_DSC_CONTROL(a) (0x000022D4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_DSC_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE 2:1 +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE_SINGLE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE_DUAL (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_MODE_QUAD (0x00000002) +#define NVC67D_HEAD_SET_DSC_CONTROL_AUTO_RESET 3:3 +#define NVC67D_HEAD_SET_DSC_CONTROL_AUTO_RESET_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_AUTO_RESET_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION 4:4 +#define NVC67D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_FULL_ICH_ERR_PRECISION_ENABLE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET 5:5 +#define NVC67D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_FALSE (0x00000000) +#define NVC67D_HEAD_SET_DSC_CONTROL_FORCE_ICH_RESET_TRUE (0x00000001) +#define NVC67D_HEAD_SET_DSC_CONTROL_FLATNESS_DET_THRESH 15:6 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL(a) (0x000022D8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_ENABLE 0:0 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_FALSE (0x00000000) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_ENABLE_TRUE (0x00000001) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_LOCATION 1:1 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_SIZE 9:2 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY 10:10 +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC67D_HEAD_SET_DSC_PPS_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC67D_HEAD_SET_DSC_PPS_HEAD(a) (0x000022DC + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE0 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE1 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE2 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_HEAD_BYTE3 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0(a) (0x000022E0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MINOR 3:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_DSC_VERSION_MAJOR 7:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_PPS_IDENTIFIER 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_RESERVED 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_LINEBUF_DEPTH 27:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA0_BITS_PER_COMPONENT 31:28 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1(a) (0x000022E4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_HIGH 1:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_VBR_ENABLE 2:2 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_SIMPLE422 3:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_CONVERT_RGB 4:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_BLOCK_PRED_ENABLE 5:5 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_RESERVED 7:6 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_BITS_PER_PIXEL_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA1_PIC_HEIGHT_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2(a) (0x000022E8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_PIC_WIDTH_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA2_SLICE_HEIGHT_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3(a) (0x000022EC + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_SLICE_WIDTH_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA3_CHUNK_SIZE_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4(a) (0x000022F0 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_HIGH 1:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_RESERVED 7:2 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_XMIT_DELAY_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA4_INITIAL_DEC_DELAY_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5(a) (0x000022F4 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_RESERVED0 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_INITIAL_SCALE_VALUE 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_RESERVED1 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA5_SCALE_INCREMENT_INTERVAL_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6(a) (0x000022F8 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_HIGH 3:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_RESERVED0 7:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_SCALE_DECREMENT_INTERVAL_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_RESERVED1 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_FIRST_LINE_BPG_OFFSET 28:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA6_RESERVED2 31:29 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7(a) (0x000022FC + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_NFL_BPG_OFFSET_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA7_SLICE_BPG_OFFSET_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8(a) (0x00002300 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_INITIAL_OFFSET_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA8_FINAL_OFFSET_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9(a) (0x00002304 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MIN_QP 4:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RESERVED0 7:5 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_FLATNESS_MAX_QP 12:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RESERVED1 15:13 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA9_RC_MODEL_SIZE_LOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10(a) (0x00002308 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_EDGE_FACTOR 3:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RESERVED0 7:4 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT0 12:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RESERVED1 15:13 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_QUANT_INCR_LIMIT1 20:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RESERVED2 23:21 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_LO 27:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA10_RC_TGT_OFFSET_HI 31:28 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11(a) (0x0000230C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH0 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH1 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH2 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA11_RC_BUF_THRESH3 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12(a) (0x00002310 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH4 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH5 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH6 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA12_RC_BUF_THRESH7 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13(a) (0x00002314 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH8 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH9 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH10 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA13_RC_BUF_THRESH11 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14(a) (0x00002318 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH12 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_BUF_THRESH13 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_HIGH0 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MIN_QP0 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_BPG_OFFSET0 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA14_RC_RANGE_MAX_QP_LOW0 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15(a) (0x0000231C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH1 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP1 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET1 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW1 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_HIGH2 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MIN_QP2 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_BPG_OFFSET2 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA15_RC_RANGE_MAX_QP_LOW2 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16(a) (0x00002320 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH3 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP3 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET3 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW3 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_HIGH4 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MIN_QP4 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_BPG_OFFSET4 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA16_RC_RANGE_MAX_QP_LOW4 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17(a) (0x00002324 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH5 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP5 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET5 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW5 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_HIGH6 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MIN_QP6 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_BPG_OFFSET6 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA17_RC_RANGE_MAX_QP_LOW6 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18(a) (0x00002328 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH7 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP7 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET7 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW7 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_HIGH8 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MIN_QP8 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_BPG_OFFSET8 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA18_RC_RANGE_MAX_QP_LOW8 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19(a) (0x0000232C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH9 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP9 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET9 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW9 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_HIGH10 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MIN_QP10 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_BPG_OFFSET10 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA19_RC_RANGE_MAX_QP_LOW10 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20(a) (0x00002330 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH11 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP11 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET11 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW11 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_HIGH12 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MIN_QP12 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_BPG_OFFSET12 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA20_RC_RANGE_MAX_QP_LOW12 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21(a) (0x00002334 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH13 2:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP13 7:3 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET13 13:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW13 15:14 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_HIGH14 18:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MIN_QP14 23:19 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_BPG_OFFSET14 29:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA21_RC_RANGE_MAX_QP_LOW14 31:30 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22(a) (0x00002338 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NATIVE422 0:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NATIVE420 1:1 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_RESERVED0 7:2 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_SECOND_LINE_BPG_OFFSET 12:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_RESERVED1 15:13 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSET_HIGH 23:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA22_NSL_BPG_OFFSETLOW 31:24 +#define NVC67D_HEAD_SET_DSC_PPS_DATA23(a) (0x0000233C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_HIGH 7:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA23_SECOND_LINE_OFFSET_ADJ_LOW 15:8 +#define NVC67D_HEAD_SET_DSC_PPS_DATA23_RESERVED 31:16 +#define NVC67D_HEAD_SET_DSC_PPS_DATA24(a) (0x00002340 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA24_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA25(a) (0x00002344 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA25_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA26(a) (0x00002348 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA26_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA27(a) (0x0000234C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA27_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA28(a) (0x00002350 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA28_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA29(a) (0x00002354 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA29_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA30(a) (0x00002358 + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA30_RESERVED 31:0 +#define NVC67D_HEAD_SET_DSC_PPS_DATA31(a) (0x0000235C + (a)*0x00000400) +#define NVC67D_HEAD_SET_DSC_PPS_DATA31_RESERVED 31:0 +#define NVC67D_HEAD_SET_RG_MERGE(a) (0x00002360 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RG_MERGE_MODE 1:0 +#define NVC67D_HEAD_SET_RG_MERGE_MODE_DISABLE (0x00000000) +#define NVC67D_HEAD_SET_RG_MERGE_MODE_SETUP (0x00000001) +#define NVC67D_HEAD_SET_RG_MERGE_MODE_MASTER (0x00000002) +#define NVC67D_HEAD_SET_RG_MERGE_MODE_SLAVE (0x00000003) +#define NVC67D_HEAD_SET_RASTER_HBLANK_DELAY(a) (0x00002364 + (a)*0x00000400) +#define NVC67D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_START 15:0 +#define NVC67D_HEAD_SET_RASTER_HBLANK_DELAY_BLANK_END 31:16 +#define NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE(a) (0x00002368 + (a)*0x00000400) +#define NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE_BYTES 15:0 +#define NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE_TRI_BYTES 31:16 +#define NVC67D_HEAD_SET_HDMI_DSC_HCBLANK(a) (0x0000236C + (a)*0x00000400) +#define NVC67D_HEAD_SET_HDMI_DSC_HCBLANK_WIDTH 15:0 +#define NVC67D_HEAD_SW_RESERVED(a,b) (0x00002370 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SW_RESERVED_VALUE 31:0 +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI(a,b) (0x00002380 + (a)*0x00000400 + (b)*0x00000004) +#define NVC67D_HEAD_SET_RG_REL_SEMAPHORE_VALUE_HI_VALUE 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC67d_h diff --git a/src/common/sdk/nvidia/inc/class/clc67e.h b/src/common/sdk/nvidia/inc/class/clc67e.h new file mode 100644 index 000000000..516e6ead1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc67e.h @@ -0,0 +1,700 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + + + +#ifndef _clC67e_h_ +#define _clC67e_h_ + + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC67E_WINDOW_CHANNEL_DMA (0x0000C67E) + +// dma opcode instructions +#define NVC67E_DMA +#define NVC67E_DMA_OPCODE 31:29 +#define NVC67E_DMA_OPCODE_METHOD 0x00000000 +#define NVC67E_DMA_OPCODE_JUMP 0x00000001 +#define NVC67E_DMA_OPCODE_NONINC_METHOD 0x00000002 +#define NVC67E_DMA_OPCODE_SET_SUBDEVICE_MASK 0x00000003 +#define NVC67E_DMA_METHOD_COUNT 27:18 +#define NVC67E_DMA_METHOD_OFFSET 13:2 +#define NVC67E_DMA_DATA 31:0 +#define NVC67E_DMA_DATA_NOP 0x00000000 +#define NVC67E_DMA_JUMP_OFFSET 11:2 +#define NVC67E_DMA_SET_SUBDEVICE_MASK_VALUE 11:0 + +// class methods +#define NVC67E_PUT (0x00000000) +#define NVC67E_PUT_PTR 9:0 +#define NVC67E_GET (0x00000004) +#define NVC67E_GET_PTR 9:0 +#define NVC67E_UPDATE (0x00000200) +#define NVC67E_UPDATE_RELEASE_ELV 0:0 +#define NVC67E_UPDATE_RELEASE_ELV_FALSE (0x00000000) +#define NVC67E_UPDATE_RELEASE_ELV_TRUE (0x00000001) +#define NVC67E_UPDATE_FLIP_LOCK_PIN 8:4 +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE (0x00000000) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(i) (0x00000001 +(i)) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN__SIZE_1 16 +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_0 (0x00000001) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_1 (0x00000002) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_2 (0x00000003) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_3 (0x00000004) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_4 (0x00000005) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_5 (0x00000006) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_6 (0x00000007) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_7 (0x00000008) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_8 (0x00000009) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_9 (0x0000000A) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_A (0x0000000B) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_B (0x0000000C) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_C (0x0000000D) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_D (0x0000000E) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_E (0x0000000F) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_F (0x00000010) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 (0x00000014) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_1 (0x00000015) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_2 (0x00000016) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_3 (0x00000017) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK(i) (0x00000018 +(i)) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1 8 +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_0 (0x00000018) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_1 (0x00000019) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_2 (0x0000001A) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_3 (0x0000001B) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_4 (0x0000001C) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_5 (0x0000001D) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_6 (0x0000001E) +#define NVC67E_UPDATE_FLIP_LOCK_PIN_INTERNAL_SCAN_LOCK_7 (0x0000001F) +#define NVC67E_UPDATE_INTERLOCK_WITH_WIN_IMM 12:12 +#define NVC67E_UPDATE_INTERLOCK_WITH_WIN_IMM_DISABLE (0x00000000) +#define NVC67E_UPDATE_INTERLOCK_WITH_WIN_IMM_ENABLE (0x00000001) +#define NVC67E_SET_SEMAPHORE_ACQUIRE_HI (0x00000204) +#define NVC67E_SET_SEMAPHORE_ACQUIRE_HI_VALUE 31:0 +#define NVC67E_GET_LINE (0x00000208) +#define NVC67E_GET_LINE_LINE 15:0 +#define NVC67E_SET_SEMAPHORE_CONTROL (0x0000020C) +#define NVC67E_SET_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC67E_SET_SEMAPHORE_CONTROL_SKIP_ACQ 11:11 +#define NVC67E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_FALSE (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_SKIP_ACQ_TRUE (0x00000001) +#define NVC67E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC67E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVC67E_SET_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVC67E_SET_SEMAPHORE_CONTROL_REL_MODE 14:14 +#define NVC67E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE (0x00000000) +#define NVC67E_SET_SEMAPHORE_CONTROL_REL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67E_SET_SEMAPHORE_ACQUIRE (0x00000210) +#define NVC67E_SET_SEMAPHORE_ACQUIRE_VALUE 31:0 +#define NVC67E_SET_SEMAPHORE_RELEASE (0x00000214) +#define NVC67E_SET_SEMAPHORE_RELEASE_VALUE 31:0 +#define NVC67E_SET_CONTEXT_DMA_SEMAPHORE (0x00000218) +#define NVC67E_SET_CONTEXT_DMA_SEMAPHORE_HANDLE 31:0 +#define NVC67E_SET_CONTEXT_DMA_NOTIFIER (0x0000021C) +#define NVC67E_SET_CONTEXT_DMA_NOTIFIER_HANDLE 31:0 +#define NVC67E_SET_NOTIFIER_CONTROL (0x00000220) +#define NVC67E_SET_NOTIFIER_CONTROL_MODE 0:0 +#define NVC67E_SET_NOTIFIER_CONTROL_MODE_WRITE (0x00000000) +#define NVC67E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN (0x00000001) +#define NVC67E_SET_NOTIFIER_CONTROL_OFFSET 11:4 +#define NVC67E_SET_SIZE (0x00000224) +#define NVC67E_SET_SIZE_WIDTH 15:0 +#define NVC67E_SET_SIZE_HEIGHT 31:16 +#define NVC67E_SET_STORAGE (0x00000228) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT 3:0 +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_ONE_GOB (0x00000000) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_TWO_GOBS (0x00000001) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC67E_SET_STORAGE_BLOCK_HEIGHT_NVD_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC67E_SET_PARAMS (0x0000022C) +#define NVC67E_SET_PARAMS_FORMAT 7:0 +#define NVC67E_SET_PARAMS_FORMAT_I8 (0x0000001E) +#define NVC67E_SET_PARAMS_FORMAT_R4G4B4A4 (0x0000002F) +#define NVC67E_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) +#define NVC67E_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) +#define NVC67E_SET_PARAMS_FORMAT_R5G5B5A1 (0x0000002E) +#define NVC67E_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) +#define NVC67E_SET_PARAMS_FORMAT_X8R8G8B8 (0x000000E6) +#define NVC67E_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) +#define NVC67E_SET_PARAMS_FORMAT_X8B8G8R8 (0x000000F9) +#define NVC67E_SET_PARAMS_FORMAT_A2R10G10B10 (0x000000DF) +#define NVC67E_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) +#define NVC67E_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) +#define NVC67E_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) +#define NVC67E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) +#define NVC67E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422 (0x00000028) +#define NVC67E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422 (0x00000029) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8V8_N444 (0x00000035) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8V8_N422 (0x00000036) +#define NVC67E_SET_PARAMS_FORMAT_Y8___V8U8_N420 (0x00000038) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N444 (0x0000003A) +#define NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N420 (0x0000003B) +#define NVC67E_SET_PARAMS_FORMAT_Y10___U10V10_N444 (0x00000055) +#define NVC67E_SET_PARAMS_FORMAT_Y10___U10V10_N422 (0x00000056) +#define NVC67E_SET_PARAMS_FORMAT_Y10___V10U10_N420 (0x00000058) +#define NVC67E_SET_PARAMS_FORMAT_Y12___U12V12_N444 (0x00000075) +#define NVC67E_SET_PARAMS_FORMAT_Y12___U12V12_N422 (0x00000076) +#define NVC67E_SET_PARAMS_FORMAT_Y12___V12U12_N420 (0x00000078) +#define NVC67E_SET_PARAMS_CLAMP_BEFORE_BLEND 18:18 +#define NVC67E_SET_PARAMS_CLAMP_BEFORE_BLEND_DISABLE (0x00000000) +#define NVC67E_SET_PARAMS_CLAMP_BEFORE_BLEND_ENABLE (0x00000001) +#define NVC67E_SET_PARAMS_SWAP_UV 19:19 +#define NVC67E_SET_PARAMS_SWAP_UV_DISABLE (0x00000000) +#define NVC67E_SET_PARAMS_SWAP_UV_ENABLE (0x00000001) +#define NVC67E_SET_PARAMS_FMT_ROUNDING_MODE 22:22 +#define NVC67E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_TO_NEAREST (0x00000000) +#define NVC67E_SET_PARAMS_FMT_ROUNDING_MODE_ROUND_DOWN (0x00000001) +#define NVC67E_SET_PLANAR_STORAGE(b) (0x00000230 + (b)*0x00000004) +#define NVC67E_SET_PLANAR_STORAGE_PITCH 12:0 +#define NVC67E_SET_SEMAPHORE_RELEASE_HI (0x0000023C) +#define NVC67E_SET_SEMAPHORE_RELEASE_HI_VALUE 31:0 +#define NVC67E_SET_CONTEXT_DMA_ISO(b) (0x00000240 + (b)*0x00000004) +#define NVC67E_SET_CONTEXT_DMA_ISO_HANDLE 31:0 +#define NVC67E_SET_OFFSET(b) (0x00000260 + (b)*0x00000004) +#define NVC67E_SET_OFFSET_ORIGIN 31:0 +#define NVC67E_SET_POINT_IN(b) (0x00000290 + (b)*0x00000004) +#define NVC67E_SET_POINT_IN_X 15:0 +#define NVC67E_SET_POINT_IN_Y 31:16 +#define NVC67E_SET_SIZE_IN (0x00000298) +#define NVC67E_SET_SIZE_IN_WIDTH 15:0 +#define NVC67E_SET_SIZE_IN_HEIGHT 31:16 +#define NVC67E_SET_SIZE_OUT (0x000002A4) +#define NVC67E_SET_SIZE_OUT_WIDTH 15:0 +#define NVC67E_SET_SIZE_OUT_HEIGHT 31:16 +#define NVC67E_SET_CONTROL_INPUT_SCALER (0x000002A8) +#define NVC67E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS 2:0 +#define NVC67E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2 (0x00000001) +#define NVC67E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 (0x00000004) +#define NVC67E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS 6:4 +#define NVC67E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2 (0x00000001) +#define NVC67E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 (0x00000004) +#define NVC67E_SET_INPUT_SCALER_COEFF_VALUE (0x000002AC) +#define NVC67E_SET_INPUT_SCALER_COEFF_VALUE_DATA 9:0 +#define NVC67E_SET_INPUT_SCALER_COEFF_VALUE_INDEX 19:12 +#define NVC67E_SET_COMPOSITION_CONTROL (0x000002EC) +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT 1:0 +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE (0x00000000) +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC (0x00000001) +#define NVC67E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST (0x00000002) +#define NVC67E_SET_COMPOSITION_CONTROL_DEPTH 11:4 +#define NVC67E_SET_COMPOSITION_CONTROL_BYPASS 16:16 +#define NVC67E_SET_COMPOSITION_CONTROL_BYPASS_DISABLE (0x00000000) +#define NVC67E_SET_COMPOSITION_CONTROL_BYPASS_ENABLE (0x00000001) +#define NVC67E_SET_COMPOSITION_CONSTANT_ALPHA (0x000002F0) +#define NVC67E_SET_COMPOSITION_CONSTANT_ALPHA_K1 7:0 +#define NVC67E_SET_COMPOSITION_CONSTANT_ALPHA_K2 15:8 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT (0x000002F4) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT 3:0 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT 7:4 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_SRC (0x00000005) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT 11:8 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT 15:12 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1 (0x00000004) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_K1_TIMES_DST (0x00000006) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_COLOR_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT 19:16 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT 23:20 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K1 (0x00000002) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_SRC_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_DST (0x00000008) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT 27:24 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT 31:28 +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ZERO (0x00000000) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_ONE (0x00000001) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_K2 (0x00000003) +#define NVC67E_SET_COMPOSITION_FACTOR_SELECT_DST_ALPHA_FACTOR_NO_MATCH_SELECT_NEG_K1_TIMES_SRC (0x00000007) +#define NVC67E_SET_KEY_ALPHA (0x000002F8) +#define NVC67E_SET_KEY_ALPHA_MIN 15:0 +#define NVC67E_SET_KEY_ALPHA_MAX 31:16 +#define NVC67E_SET_KEY_RED_CR (0x000002FC) +#define NVC67E_SET_KEY_RED_CR_MIN 15:0 +#define NVC67E_SET_KEY_RED_CR_MAX 31:16 +#define NVC67E_SET_KEY_GREEN_Y (0x00000300) +#define NVC67E_SET_KEY_GREEN_Y_MIN 15:0 +#define NVC67E_SET_KEY_GREEN_Y_MAX 31:16 +#define NVC67E_SET_KEY_BLUE_CB (0x00000304) +#define NVC67E_SET_KEY_BLUE_CB_MIN 15:0 +#define NVC67E_SET_KEY_BLUE_CB_MAX 31:16 +#define NVC67E_SET_PRESENT_CONTROL (0x00000308) +#define NVC67E_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 3:0 +#define NVC67E_SET_PRESENT_CONTROL_BEGIN_MODE 6:4 +#define NVC67E_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) +#define NVC67E_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) +#define NVC67E_SET_PRESENT_CONTROL_TIMESTAMP_MODE 8:8 +#define NVC67E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) +#define NVC67E_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE 13:12 +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE_MONO (0x00000000) +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE_PAIR_FLIP (0x00000001) +#define NVC67E_SET_PRESENT_CONTROL_STEREO_MODE_AT_ANY_FRAME (0x00000002) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE_HI (0x0000030C) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE_HI_VALUE 31:0 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL (0x00000330) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_OFFSET 7:0 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE 15:15 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_32BIT (0x00000000) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_PAYLOAD_SIZE_PAYLOAD_64BIT (0x00000001) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE 13:12 +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_EQ (0x00000000) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_CGEQ (0x00000001) +#define NVC67E_SET_ACQ_SEMAPHORE_CONTROL_ACQ_MODE_STRICT_GEQ (0x00000002) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE (0x00000334) +#define NVC67E_SET_ACQ_SEMAPHORE_VALUE_VALUE 31:0 +#define NVC67E_SET_CONTEXT_DMA_ACQ_SEMAPHORE (0x00000338) +#define NVC67E_SET_CONTEXT_DMA_ACQ_SEMAPHORE_HANDLE 31:0 +#define NVC67E_SET_SCAN_DIRECTION (0x0000033C) +#define NVC67E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION 0:0 +#define NVC67E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_LEFT (0x00000000) +#define NVC67E_SET_SCAN_DIRECTION_HORIZONTAL_DIRECTION_FROM_RIGHT (0x00000001) +#define NVC67E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION 1:1 +#define NVC67E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_TOP (0x00000000) +#define NVC67E_SET_SCAN_DIRECTION_VERTICAL_DIRECTION_FROM_BOTTOM (0x00000001) +#define NVC67E_SET_SCAN_DIRECTION_COLUMN_ORDER 2:2 +#define NVC67E_SET_SCAN_DIRECTION_COLUMN_ORDER_FALSE (0x00000000) +#define NVC67E_SET_SCAN_DIRECTION_COLUMN_ORDER_TRUE (0x00000001) +#define NVC67E_SET_TIMESTAMP_ORIGIN_LO (0x00000340) +#define NVC67E_SET_TIMESTAMP_ORIGIN_LO_TIMESTAMP_LO 31:0 +#define NVC67E_SET_TIMESTAMP_ORIGIN_HI (0x00000344) +#define NVC67E_SET_TIMESTAMP_ORIGIN_HI_TIMESTAMP_HI 31:0 +#define NVC67E_SET_UPDATE_TIMESTAMP_LO (0x00000348) +#define NVC67E_SET_UPDATE_TIMESTAMP_LO_TIMESTAMP_LO 31:0 +#define NVC67E_SET_UPDATE_TIMESTAMP_HI (0x0000034C) +#define NVC67E_SET_UPDATE_TIMESTAMP_HI_TIMESTAMP_HI 31:0 +#define NVC67E_SET_INTERLOCK_FLAGS (0x00000370) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE 0:0 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CORE_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR(i) ((i)+1):((i)+1) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR__SIZE_1 8 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0 1:1 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR0_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1 2:2 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR1_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2 3:3 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR2_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3 4:4 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR3_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4 5:5 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR4_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5 6:6 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR5_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6 7:7 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR6_ENABLE (0x00000001) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7 8:8 +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_DISABLE (0x00000000) +#define NVC67E_SET_INTERLOCK_FLAGS_INTERLOCK_WITH_CURSOR7_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS (0x00000374) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW(i) ((i)+0):((i)+0) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW__SIZE_1 32 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0 0:0 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW0_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1 1:1 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW1_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2 2:2 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW2_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3 3:3 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW3_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4 4:4 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW4_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5 5:5 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW5_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6 6:6 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW6_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7 7:7 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW7_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8 8:8 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW8_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9 9:9 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW9_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10 10:10 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW10_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11 11:11 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW11_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12 12:12 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW12_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13 13:13 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW13_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14 14:14 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW14_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15 15:15 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW15_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16 16:16 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW16_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17 17:17 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW17_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18 18:18 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW18_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19 19:19 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW19_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20 20:20 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW20_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21 21:21 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW21_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22 22:22 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW22_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23 23:23 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW23_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24 24:24 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW24_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25 25:25 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW25_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26 26:26 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW26_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27 27:27 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW27_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28 28:28 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW28_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29 29:29 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW29_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30 30:30 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW30_ENABLE (0x00000001) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31 31:31 +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_DISABLE (0x00000000) +#define NVC67E_SET_WINDOW_INTERLOCK_FLAGS_INTERLOCK_WITH_WINDOW31_ENABLE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL (0x00000398) +#define NVC67E_SET_EXT_PACKET_CONTROL_ENABLE 0:0 +#define NVC67E_SET_EXT_PACKET_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_LOCATION 4:4 +#define NVC67E_SET_EXT_PACKET_CONTROL_LOCATION_VSYNC (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_LOCATION_VBLANK (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_FREQUENCY 8:8 +#define NVC67E_SET_EXT_PACKET_CONTROL_FREQUENCY_EVERY_FRAME (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_FREQUENCY_ONCE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE 12:12 +#define NVC67E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_DISABLE (0x00000000) +#define NVC67E_SET_EXT_PACKET_CONTROL_HEADER_OVERRIDE_ENABLE (0x00000001) +#define NVC67E_SET_EXT_PACKET_CONTROL_SIZE 27:16 +#define NVC67E_SET_EXT_PACKET_DATA (0x0000039C) +#define NVC67E_SET_EXT_PACKET_DATA_DB0 7:0 +#define NVC67E_SET_EXT_PACKET_DATA_DB1 15:8 +#define NVC67E_SET_EXT_PACKET_DATA_DB2 23:16 +#define NVC67E_SET_EXT_PACKET_DATA_DB3 31:24 +#define NVC67E_SET_FMT_COEFFICIENT_C00 (0x00000400) +#define NVC67E_SET_FMT_COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C01 (0x00000404) +#define NVC67E_SET_FMT_COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C02 (0x00000408) +#define NVC67E_SET_FMT_COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C03 (0x0000040C) +#define NVC67E_SET_FMT_COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C10 (0x00000410) +#define NVC67E_SET_FMT_COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C11 (0x00000414) +#define NVC67E_SET_FMT_COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C12 (0x00000418) +#define NVC67E_SET_FMT_COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C13 (0x0000041C) +#define NVC67E_SET_FMT_COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C20 (0x00000420) +#define NVC67E_SET_FMT_COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C21 (0x00000424) +#define NVC67E_SET_FMT_COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C22 (0x00000428) +#define NVC67E_SET_FMT_COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_FMT_COEFFICIENT_C23 (0x0000042C) +#define NVC67E_SET_FMT_COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_ILUT_CONTROL (0x00000440) +#define NVC67E_SET_ILUT_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_ILUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_ILUT_CONTROL_MIRROR 1:1 +#define NVC67E_SET_ILUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67E_SET_ILUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67E_SET_ILUT_CONTROL_MODE 3:2 +#define NVC67E_SET_ILUT_CONTROL_MODE_SEGMENTED (0x00000000) +#define NVC67E_SET_ILUT_CONTROL_MODE_DIRECT8 (0x00000001) +#define NVC67E_SET_ILUT_CONTROL_MODE_DIRECT10 (0x00000002) +#define NVC67E_SET_ILUT_CONTROL_SIZE 18:8 +#define NVC67E_SET_CONTEXT_DMA_ILUT (0x00000444) +#define NVC67E_SET_CONTEXT_DMA_ILUT_HANDLE 31:0 +#define NVC67E_SET_OFFSET_ILUT (0x00000448) +#define NVC67E_SET_OFFSET_ILUT_ORIGIN 31:0 +#define NVC67E_SET_CSC00CONTROL (0x0000045C) +#define NVC67E_SET_CSC00CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC00CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC00CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC00COEFFICIENT_C00 (0x00000460) +#define NVC67E_SET_CSC00COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C01 (0x00000464) +#define NVC67E_SET_CSC00COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C02 (0x00000468) +#define NVC67E_SET_CSC00COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C03 (0x0000046C) +#define NVC67E_SET_CSC00COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C10 (0x00000470) +#define NVC67E_SET_CSC00COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C11 (0x00000474) +#define NVC67E_SET_CSC00COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C12 (0x00000478) +#define NVC67E_SET_CSC00COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C13 (0x0000047C) +#define NVC67E_SET_CSC00COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C20 (0x00000480) +#define NVC67E_SET_CSC00COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C21 (0x00000484) +#define NVC67E_SET_CSC00COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C22 (0x00000488) +#define NVC67E_SET_CSC00COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC00COEFFICIENT_C23 (0x0000048C) +#define NVC67E_SET_CSC00COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_CSC0LUT_CONTROL (0x000004A0) +#define NVC67E_SET_CSC0LUT_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_CSC0LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_CSC0LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_CSC0LUT_CONTROL_MIRROR 1:1 +#define NVC67E_SET_CSC0LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67E_SET_CSC0LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67E_SET_CSC0LUT_CONTROL_ENABLE 4:4 +#define NVC67E_SET_CSC0LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC0LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC0LUT_SEGMENT_SIZE (0x000004A4) +#define NVC67E_SET_CSC0LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC67E_SET_CSC0LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC67E_SET_CSC0LUT_ENTRY (0x000004A8) +#define NVC67E_SET_CSC0LUT_ENTRY_IDX 10:0 +#define NVC67E_SET_CSC0LUT_ENTRY_VALUE 31:16 +#define NVC67E_SET_CSC01CONTROL (0x000004BC) +#define NVC67E_SET_CSC01CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC01CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC01CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC01COEFFICIENT_C00 (0x000004C0) +#define NVC67E_SET_CSC01COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C01 (0x000004C4) +#define NVC67E_SET_CSC01COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C02 (0x000004C8) +#define NVC67E_SET_CSC01COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C03 (0x000004CC) +#define NVC67E_SET_CSC01COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C10 (0x000004D0) +#define NVC67E_SET_CSC01COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C11 (0x000004D4) +#define NVC67E_SET_CSC01COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C12 (0x000004D8) +#define NVC67E_SET_CSC01COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C13 (0x000004DC) +#define NVC67E_SET_CSC01COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C20 (0x000004E0) +#define NVC67E_SET_CSC01COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C21 (0x000004E4) +#define NVC67E_SET_CSC01COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C22 (0x000004E8) +#define NVC67E_SET_CSC01COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC01COEFFICIENT_C23 (0x000004EC) +#define NVC67E_SET_CSC01COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_TMO_CONTROL (0x00000500) +#define NVC67E_SET_TMO_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_TMO_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_TMO_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_TMO_CONTROL_SAT_MODE 3:2 +#define NVC67E_SET_TMO_CONTROL_SIZE 18:8 +#define NVC67E_SET_TMO_LOW_INTENSITY_ZONE (0x00000508) +#define NVC67E_SET_TMO_LOW_INTENSITY_ZONE_END 29:16 +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE (0x0000050C) +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC67E_SET_TMO_LOW_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_ZONE (0x00000510) +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_ZONE_START 13:0 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_ZONE_END 29:16 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE (0x00000514) +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC67E_SET_TMO_MEDIUM_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC67E_SET_TMO_HIGH_INTENSITY_ZONE (0x00000518) +#define NVC67E_SET_TMO_HIGH_INTENSITY_ZONE_START 13:0 +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE (0x0000051C) +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE_LIN_WEIGHT 8:0 +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE_NON_LIN_WEIGHT 20:12 +#define NVC67E_SET_TMO_HIGH_INTENSITY_VALUE_THRESHOLD 31:24 +#define NVC67E_SET_CONTEXT_DMA_TMO_LUT (0x00000528) +#define NVC67E_SET_CONTEXT_DMA_TMO_LUT_HANDLE 31:0 +#define NVC67E_SET_OFFSET_TMO_LUT (0x0000052C) +#define NVC67E_SET_OFFSET_TMO_LUT_ORIGIN 31:0 +#define NVC67E_SET_CSC10CONTROL (0x0000053C) +#define NVC67E_SET_CSC10CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC10CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC10CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC10COEFFICIENT_C00 (0x00000540) +#define NVC67E_SET_CSC10COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C01 (0x00000544) +#define NVC67E_SET_CSC10COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C02 (0x00000548) +#define NVC67E_SET_CSC10COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C03 (0x0000054C) +#define NVC67E_SET_CSC10COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C10 (0x00000550) +#define NVC67E_SET_CSC10COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C11 (0x00000554) +#define NVC67E_SET_CSC10COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C12 (0x00000558) +#define NVC67E_SET_CSC10COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C13 (0x0000055C) +#define NVC67E_SET_CSC10COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C20 (0x00000560) +#define NVC67E_SET_CSC10COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C21 (0x00000564) +#define NVC67E_SET_CSC10COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C22 (0x00000568) +#define NVC67E_SET_CSC10COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC10COEFFICIENT_C23 (0x0000056C) +#define NVC67E_SET_CSC10COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_CSC1LUT_CONTROL (0x00000580) +#define NVC67E_SET_CSC1LUT_CONTROL_INTERPOLATE 0:0 +#define NVC67E_SET_CSC1LUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) +#define NVC67E_SET_CSC1LUT_CONTROL_INTERPOLATE_ENABLE (0x00000001) +#define NVC67E_SET_CSC1LUT_CONTROL_MIRROR 1:1 +#define NVC67E_SET_CSC1LUT_CONTROL_MIRROR_DISABLE (0x00000000) +#define NVC67E_SET_CSC1LUT_CONTROL_MIRROR_ENABLE (0x00000001) +#define NVC67E_SET_CSC1LUT_CONTROL_ENABLE 4:4 +#define NVC67E_SET_CSC1LUT_CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC1LUT_CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC1LUT_SEGMENT_SIZE (0x00000584) +#define NVC67E_SET_CSC1LUT_SEGMENT_SIZE_IDX 5:0 +#define NVC67E_SET_CSC1LUT_SEGMENT_SIZE_VALUE 18:16 +#define NVC67E_SET_CSC1LUT_ENTRY (0x00000588) +#define NVC67E_SET_CSC1LUT_ENTRY_IDX 10:0 +#define NVC67E_SET_CSC1LUT_ENTRY_VALUE 31:16 +#define NVC67E_SET_CSC11CONTROL (0x0000059C) +#define NVC67E_SET_CSC11CONTROL_ENABLE 0:0 +#define NVC67E_SET_CSC11CONTROL_ENABLE_DISABLE (0x00000000) +#define NVC67E_SET_CSC11CONTROL_ENABLE_ENABLE (0x00000001) +#define NVC67E_SET_CSC11COEFFICIENT_C00 (0x000005A0) +#define NVC67E_SET_CSC11COEFFICIENT_C00_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C01 (0x000005A4) +#define NVC67E_SET_CSC11COEFFICIENT_C01_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C02 (0x000005A8) +#define NVC67E_SET_CSC11COEFFICIENT_C02_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C03 (0x000005AC) +#define NVC67E_SET_CSC11COEFFICIENT_C03_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C10 (0x000005B0) +#define NVC67E_SET_CSC11COEFFICIENT_C10_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C11 (0x000005B4) +#define NVC67E_SET_CSC11COEFFICIENT_C11_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C12 (0x000005B8) +#define NVC67E_SET_CSC11COEFFICIENT_C12_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C13 (0x000005BC) +#define NVC67E_SET_CSC11COEFFICIENT_C13_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C20 (0x000005C0) +#define NVC67E_SET_CSC11COEFFICIENT_C20_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C21 (0x000005C4) +#define NVC67E_SET_CSC11COEFFICIENT_C21_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C22 (0x000005C8) +#define NVC67E_SET_CSC11COEFFICIENT_C22_VALUE 20:0 +#define NVC67E_SET_CSC11COEFFICIENT_C23 (0x000005CC) +#define NVC67E_SET_CSC11COEFFICIENT_C23_VALUE 20:0 +#define NVC67E_SET_CLAMP_RANGE (0x000005D0) +#define NVC67E_SET_CLAMP_RANGE_LOW 15:0 +#define NVC67E_SET_CLAMP_RANGE_HIGH 31:16 +#define NVC67E_SW_RESERVED(b) (0x000005D4 + (b)*0x00000004) +#define NVC67E_SW_RESERVED_VALUE 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clC67e_h diff --git a/src/common/sdk/nvidia/inc/class/clc697.h b/src/common/sdk/nvidia/inc/class/clc697.h new file mode 100644 index 000000000..550ebcae0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc697.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc697_h_ +#define _clc697_h_ + +#define AMPERE_A 0xC697 + +#endif // _clc697_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc6b0.h b/src/common/sdk/nvidia/inc/class/clc6b0.h new file mode 100644 index 000000000..d36f053fb --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc6b0.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef clc6b0_h_ +#define clc6b0_h_ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC6B0_VIDEO_DECODER (0x0000C6B0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // clc6b0_h + diff --git a/src/common/sdk/nvidia/inc/class/clc6b5.h b/src/common/sdk/nvidia/inc/class/clc6b5.h new file mode 100644 index 000000000..873ca74df --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc6b5.h @@ -0,0 +1,282 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clc6b5_h_ +#define _clc6b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define AMPERE_DMA_COPY_A (0x0000C6B5) + +#define NVC6B5_NOP (0x00000100) +#define NVC6B5_NOP_PARAMETER 31:0 +#define NVC6B5_PM_TRIGGER (0x00000140) +#define NVC6B5_PM_TRIGGER_V 31:0 +#define NVC6B5_SET_SEMAPHORE_A (0x00000240) +#define NVC6B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC6B5_SET_SEMAPHORE_B (0x00000244) +#define NVC6B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC6B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC6B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC6B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC6B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC6B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC6B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC6B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC6B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC6B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC6B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC6B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC6B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC6B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC6B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC6B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC6B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC6B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC6B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC6B5_SET_SRC_PHYS_MODE_TARGET_PEERMEM (0x00000003) +#define NVC6B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2 +#define NVC6B5_SET_SRC_PHYS_MODE_PEER_ID 8:6 +#define NVC6B5_SET_SRC_PHYS_MODE_FLA 9:9 +#define NVC6B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC6B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC6B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC6B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC6B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC6B5_SET_DST_PHYS_MODE_TARGET_PEERMEM (0x00000003) +#define NVC6B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2 +#define NVC6B5_SET_DST_PHYS_MODE_PEER_ID 8:6 +#define NVC6B5_SET_DST_PHYS_MODE_FLA 9:9 +#define NVC6B5_LAUNCH_DMA (0x00000300) +#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC6B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC6B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC6B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_FLUSH_TYPE 25:25 +#define NVC6B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000) +#define NVC6B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_CONDITIONAL_INTR_SEMAPHORE (0x00000003) +#define NVC6B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC6B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC6B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC6B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC6B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC6B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC6B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC6B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC6B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC6B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC6B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC6B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC6B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC6B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC6B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC6B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC6B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC6B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC6B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_VPRMODE 23:22 +#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000) +#define NVC6B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001) +#define NVC6B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24 +#define NVC6B5_LAUNCH_DMA_DISABLE_PLC 26:26 +#define NVC6B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000) +#define NVC6B5_LAUNCH_DMA_DISABLE_PLC_TRUE (0x00000001) +#define NVC6B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28 +#define NVC6B5_OFFSET_IN_UPPER (0x00000400) +#define NVC6B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC6B5_OFFSET_IN_LOWER (0x00000404) +#define NVC6B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC6B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC6B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC6B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC6B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC6B5_PITCH_IN (0x00000410) +#define NVC6B5_PITCH_IN_VALUE 31:0 +#define NVC6B5_PITCH_OUT (0x00000414) +#define NVC6B5_PITCH_OUT_VALUE 31:0 +#define NVC6B5_LINE_LENGTH_IN (0x00000418) +#define NVC6B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC6B5_LINE_COUNT (0x0000041C) +#define NVC6B5_LINE_COUNT_VALUE 31:0 +#define NVC6B5_SET_REMAP_CONST_A (0x00000700) +#define NVC6B5_SET_REMAP_CONST_A_V 31:0 +#define NVC6B5_SET_REMAP_CONST_B (0x00000704) +#define NVC6B5_SET_REMAP_CONST_B_V 31:0 +#define NVC6B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC6B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC6B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC6B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC6B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVC6B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVC6B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC6B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC6B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC6B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC6B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC6B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC6B5_SET_DST_WIDTH (0x00000710) +#define NVC6B5_SET_DST_WIDTH_V 31:0 +#define NVC6B5_SET_DST_HEIGHT (0x00000714) +#define NVC6B5_SET_DST_HEIGHT_V 31:0 +#define NVC6B5_SET_DST_DEPTH (0x00000718) +#define NVC6B5_SET_DST_DEPTH_V 31:0 +#define NVC6B5_SET_DST_LAYER (0x0000071C) +#define NVC6B5_SET_DST_LAYER_V 31:0 +#define NVC6B5_SET_DST_ORIGIN (0x00000720) +#define NVC6B5_SET_DST_ORIGIN_X 15:0 +#define NVC6B5_SET_DST_ORIGIN_Y 31:16 +#define NVC6B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVC6B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVC6B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC6B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC6B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC6B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC6B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC6B5_SET_SRC_WIDTH (0x0000072C) +#define NVC6B5_SET_SRC_WIDTH_V 31:0 +#define NVC6B5_SET_SRC_HEIGHT (0x00000730) +#define NVC6B5_SET_SRC_HEIGHT_V 31:0 +#define NVC6B5_SET_SRC_DEPTH (0x00000734) +#define NVC6B5_SET_SRC_DEPTH_V 31:0 +#define NVC6B5_SET_SRC_LAYER (0x00000738) +#define NVC6B5_SET_SRC_LAYER_V 31:0 +#define NVC6B5_SET_SRC_ORIGIN (0x0000073C) +#define NVC6B5_SET_SRC_ORIGIN_X 15:0 +#define NVC6B5_SET_SRC_ORIGIN_Y 31:16 +#define NVC6B5_SRC_ORIGIN_X (0x00000744) +#define NVC6B5_SRC_ORIGIN_X_VALUE 31:0 +#define NVC6B5_SRC_ORIGIN_Y (0x00000748) +#define NVC6B5_SRC_ORIGIN_Y_VALUE 31:0 +#define NVC6B5_DST_ORIGIN_X (0x0000074C) +#define NVC6B5_DST_ORIGIN_X_VALUE 31:0 +#define NVC6B5_DST_ORIGIN_Y (0x00000750) +#define NVC6B5_DST_ORIGIN_Y_VALUE 31:0 +#define NVC6B5_PM_TRIGGER_END (0x00001114) +#define NVC6B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc6b5_h + diff --git a/src/common/sdk/nvidia/inc/class/clc6b5sw.h b/src/common/sdk/nvidia/inc/class/clc6b5sw.h new file mode 100644 index 000000000..d9d15ea5e --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc6b5sw.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvtypes.h" + +#ifndef _clc6b5sw_h_ +#define _clc6b5sw_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* This file is *not* auto-generated. */ + +// +// Using VERSION_0 will cause the API to interpret +// engineType as a CE engine instance. This allows +// for backward compatibility with 85B5sw and 90B5sw. +// +#define NVC6B5_ALLOCATION_PARAMETERS_VERSION_0 0 + +// +// Using VERSION_1 will cause the API to interpret +// engineType as an NV2080_ENGINE_TYPE ordinal. +// +#define NVC6B5_ALLOCATION_PARAMETERS_VERSION_1 1 + +typedef struct +{ + NvU32 version; + NvU32 engineType; +} NVC6B5_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc5b5sw_h_ + diff --git a/src/common/sdk/nvidia/inc/class/clc6c0.h b/src/common/sdk/nvidia/inc/class/clc6c0.h new file mode 100644 index 000000000..48aa38507 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc6c0.h @@ -0,0 +1,697 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_ampere_compute_a_h_ +#define _cl_ampere_compute_a_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl ampere_compute_a */ + +#include "nvtypes.h" + +#define AMPERE_COMPUTE_A 0xC6C0 + +#define NVC6C0_SET_OBJECT 0x0000 +#define NVC6C0_SET_OBJECT_CLASS_ID 15:0 +#define NVC6C0_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC6C0_NO_OPERATION 0x0100 +#define NVC6C0_NO_OPERATION_V 31:0 + +#define NVC6C0_SET_NOTIFY_A 0x0104 +#define NVC6C0_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC6C0_SET_NOTIFY_B 0x0108 +#define NVC6C0_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC6C0_NOTIFY 0x010c +#define NVC6C0_NOTIFY_TYPE 31:0 +#define NVC6C0_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC6C0_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC6C0_WAIT_FOR_IDLE 0x0110 +#define NVC6C0_WAIT_FOR_IDLE_V 31:0 + +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC6C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC6C0_SEND_GO_IDLE 0x013c +#define NVC6C0_SEND_GO_IDLE_V 31:0 + +#define NVC6C0_PM_TRIGGER 0x0140 +#define NVC6C0_PM_TRIGGER_V 31:0 + +#define NVC6C0_PM_TRIGGER_WFI 0x0144 +#define NVC6C0_PM_TRIGGER_WFI_V 31:0 + +#define NVC6C0_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC6C0_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC6C0_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC6C0_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC6C0_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC6C0_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC6C0_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC6C0_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC6C0_LINE_LENGTH_IN 0x0180 +#define NVC6C0_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC6C0_LINE_COUNT 0x0184 +#define NVC6C0_LINE_COUNT_VALUE 31:0 + +#define NVC6C0_OFFSET_OUT_UPPER 0x0188 +#define NVC6C0_OFFSET_OUT_UPPER_VALUE 16:0 + +#define NVC6C0_OFFSET_OUT 0x018c +#define NVC6C0_OFFSET_OUT_VALUE 31:0 + +#define NVC6C0_PITCH_OUT 0x0190 +#define NVC6C0_PITCH_OUT_VALUE 31:0 + +#define NVC6C0_SET_DST_BLOCK_SIZE 0x0194 +#define NVC6C0_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC6C0_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC6C0_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC6C0_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC6C0_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC6C0_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC6C0_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC6C0_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC6C0_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC6C0_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC6C0_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC6C0_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC6C0_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC6C0_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC6C0_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC6C0_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC6C0_SET_DST_WIDTH 0x0198 +#define NVC6C0_SET_DST_WIDTH_V 31:0 + +#define NVC6C0_SET_DST_HEIGHT 0x019c +#define NVC6C0_SET_DST_HEIGHT_V 31:0 + +#define NVC6C0_SET_DST_DEPTH 0x01a0 +#define NVC6C0_SET_DST_DEPTH_V 31:0 + +#define NVC6C0_SET_DST_LAYER 0x01a4 +#define NVC6C0_SET_DST_LAYER_V 31:0 + +#define NVC6C0_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC6C0_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC6C0_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC6C0_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC6C0_LAUNCH_DMA 0x01b0 +#define NVC6C0_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC6C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC6C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC6C0_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC6C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC6C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC6C0_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC6C0_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC6C0_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC6C0_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC6C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC6C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC6C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC6C0_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC6C0_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC6C0_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC6C0_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC6C0_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC6C0_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC6C0_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC6C0_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC6C0_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC6C0_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC6C0_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC6C0_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC6C0_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC6C0_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC6C0_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC6C0_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC6C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC6C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC6C0_LOAD_INLINE_DATA 0x01b4 +#define NVC6C0_LOAD_INLINE_DATA_V 31:0 + +#define NVC6C0_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC6C0_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC6C0_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC6C0_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC6C0_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC6C0_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC6C0_SET_SM_SCG_CONTROL 0x01e8 +#define NVC6C0_SET_SM_SCG_CONTROL_COMPUTE_IN_GRAPHICS 0:0 +#define NVC6C0_SET_SM_SCG_CONTROL_COMPUTE_IN_GRAPHICS_FALSE 0x00000000 +#define NVC6C0_SET_SM_SCG_CONTROL_COMPUTE_IN_GRAPHICS_TRUE 0x00000001 + +#define NVC6C0_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC6C0_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC6C0_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC6C0_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC6C0_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC6C0_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC6C0_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC6C0_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC6C0_SET_VALID_SPAN_OVERFLOW_AREA_A 0x0200 +#define NVC6C0_SET_VALID_SPAN_OVERFLOW_AREA_A_ADDRESS_UPPER 7:0 + +#define NVC6C0_SET_VALID_SPAN_OVERFLOW_AREA_B 0x0204 +#define NVC6C0_SET_VALID_SPAN_OVERFLOW_AREA_B_ADDRESS_LOWER 31:0 + +#define NVC6C0_SET_VALID_SPAN_OVERFLOW_AREA_C 0x0208 +#define NVC6C0_SET_VALID_SPAN_OVERFLOW_AREA_C_SIZE 31:0 + +#define NVC6C0_PERFMON_TRANSFER 0x0210 +#define NVC6C0_PERFMON_TRANSFER_V 31:0 + +#define NVC6C0_SET_QMD_VIRTUALIZATION_BASE_A 0x0214 +#define NVC6C0_SET_QMD_VIRTUALIZATION_BASE_A_ADDRESS_UPPER 7:0 + +#define NVC6C0_SET_QMD_VIRTUALIZATION_BASE_B 0x0218 +#define NVC6C0_SET_QMD_VIRTUALIZATION_BASE_B_ADDRESS_LOWER 31:0 + +#define NVC6C0_INVALIDATE_SHADER_CACHES 0x021c +#define NVC6C0_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC6C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC6C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC6C0_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC6C0_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC6C0_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC6C0_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC6C0_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC6C0_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC6C0_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC6C0_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC6C0_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC6C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC6C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC6C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC6C0_SET_RESERVED_SW_METHOD00 0x0220 +#define NVC6C0_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD01 0x0224 +#define NVC6C0_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD02 0x0228 +#define NVC6C0_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD03 0x022c +#define NVC6C0_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD04 0x0230 +#define NVC6C0_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD05 0x0234 +#define NVC6C0_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD06 0x0238 +#define NVC6C0_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD07 0x023c +#define NVC6C0_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC6C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x0244 +#define NVC6C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC6C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC6C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC6C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC6C0_SET_CWD_REF_COUNTER 0x0248 +#define NVC6C0_SET_CWD_REF_COUNTER_SELECT 5:0 +#define NVC6C0_SET_CWD_REF_COUNTER_VALUE 23:8 + +#define NVC6C0_SET_RESERVED_SW_METHOD08 0x024c +#define NVC6C0_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD09 0x0250 +#define NVC6C0_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD10 0x0254 +#define NVC6C0_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD11 0x0258 +#define NVC6C0_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD12 0x025c +#define NVC6C0_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD13 0x0260 +#define NVC6C0_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD14 0x0264 +#define NVC6C0_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC6C0_SET_RESERVED_SW_METHOD15 0x0268 +#define NVC6C0_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC6C0_SET_SCG_CONTROL 0x0270 +#define NVC6C0_SET_SCG_CONTROL_COMPUTE1_MAX_SM_COUNT 8:0 +#define NVC6C0_SET_SCG_CONTROL_COMPUTE1_MIN_SM_COUNT 20:12 +#define NVC6C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE 24:24 +#define NVC6C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE_FALSE 0x00000000 +#define NVC6C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE_TRUE 0x00000001 + +#define NVC6C0_SET_COMPUTE_CLASS_VERSION 0x0280 +#define NVC6C0_SET_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC6C0_SET_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC6C0_CHECK_COMPUTE_CLASS_VERSION 0x0284 +#define NVC6C0_CHECK_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC6C0_CHECK_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC6C0_SET_QMD_VERSION 0x0288 +#define NVC6C0_SET_QMD_VERSION_CURRENT 15:0 +#define NVC6C0_SET_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC6C0_CHECK_QMD_VERSION 0x0290 +#define NVC6C0_CHECK_QMD_VERSION_CURRENT 15:0 +#define NVC6C0_CHECK_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC6C0_INVALIDATE_SKED_CACHES 0x0298 +#define NVC6C0_INVALIDATE_SKED_CACHES_V 0:0 + +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL 0x029c +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_CONSTANT_BUFFER_MASK 7:0 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_ADDR_ENABLE 8:8 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_ADDR_ENABLE_FALSE 0x00000000 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_ADDR_ENABLE_TRUE 0x00000001 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_CONSTANT_BUFFER_ENABLE 12:12 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_CONSTANT_BUFFER_ENABLE_FALSE 0x00000000 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_CONSTANT_BUFFER_ENABLE_TRUE 0x00000001 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_ADDR_ENABLE 16:16 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_ADDR_ENABLE_FALSE 0x00000000 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_ADDR_ENABLE_TRUE 0x00000001 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_CONSTANT_BUFFER_ENABLE 20:20 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_CONSTANT_BUFFER_ENABLE_FALSE 0x00000000 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_CONSTANT_BUFFER_ENABLE_TRUE 0x00000001 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_SEND_PCAS_ENABLE 24:24 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_SEND_PCAS_ENABLE_FALSE 0x00000000 +#define NVC6C0_SET_QMD_VIRTUALIZATION_CONTROL_SEND_PCAS_ENABLE_TRUE 0x00000001 + +#define NVC6C0_SET_SHADER_SHARED_MEMORY_WINDOW_A 0x02a0 +#define NVC6C0_SET_SHADER_SHARED_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC6C0_SET_SHADER_SHARED_MEMORY_WINDOW_B 0x02a4 +#define NVC6C0_SET_SHADER_SHARED_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC6C0_SCG_HYSTERESIS_CONTROL 0x02a8 +#define NVC6C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE 0:0 +#define NVC6C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE_FALSE 0x00000000 +#define NVC6C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE_TRUE 0x00000001 +#define NVC6C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE 1:1 +#define NVC6C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE_FALSE 0x00000000 +#define NVC6C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE_TRUE 0x00000001 + +#define NVC6C0_SET_CWD_SLOT_COUNT 0x02b0 +#define NVC6C0_SET_CWD_SLOT_COUNT_V 7:0 + +#define NVC6C0_SEND_PCAS_A 0x02b4 +#define NVC6C0_SEND_PCAS_A_QMD_ADDRESS_SHIFTED8 31:0 + +#define NVC6C0_SEND_PCAS_B 0x02b8 +#define NVC6C0_SEND_PCAS_B_FROM 23:0 +#define NVC6C0_SEND_PCAS_B_DELTA 31:24 + +#define NVC6C0_SEND_SIGNALING_PCAS_B 0x02bc +#define NVC6C0_SEND_SIGNALING_PCAS_B_INVALIDATE 0:0 +#define NVC6C0_SEND_SIGNALING_PCAS_B_INVALIDATE_FALSE 0x00000000 +#define NVC6C0_SEND_SIGNALING_PCAS_B_INVALIDATE_TRUE 0x00000001 +#define NVC6C0_SEND_SIGNALING_PCAS_B_SCHEDULE 1:1 +#define NVC6C0_SEND_SIGNALING_PCAS_B_SCHEDULE_FALSE 0x00000000 +#define NVC6C0_SEND_SIGNALING_PCAS_B_SCHEDULE_TRUE 0x00000001 + +#define NVC6C0_SEND_SIGNALING_PCAS2_B 0x02c0 +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION 3:0 +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_NOP 0x00000000 +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INVALIDATE 0x00000001 +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_SCHEDULE 0x00000002 +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INVALIDATE_COPY_SCHEDULE 0x00000003 +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INCREMENT_PUT 0x00000006 +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_DECREMENT_DEPENDENCE 0x00000007 +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_PREFETCH 0x00000008 +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_PREFETCH_SCHEDULE 0x00000009 +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INVALIDATE_PREFETCH_COPY_SCHEDULE 0x0000000A +#define NVC6C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INVALIDATE_PREFETCH_COPY_FORCE_REQUIRE_SCHEDULING 0x0000000B + +#define NVC6C0_SET_SKED_CACHE_CONTROL 0x02cc +#define NVC6C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID 0:0 +#define NVC6C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID_FALSE 0x00000000 +#define NVC6C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID_TRUE 0x00000001 + +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A 0x02e4 +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B 0x02e8 +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C 0x02ec +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVC6C0_SET_SPA_VERSION 0x0310 +#define NVC6C0_SET_SPA_VERSION_MINOR 7:0 +#define NVC6C0_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC6C0_SET_INLINE_QMD_ADDRESS_A 0x0318 +#define NVC6C0_SET_INLINE_QMD_ADDRESS_A_QMD_ADDRESS_SHIFTED8_UPPER 31:0 + +#define NVC6C0_SET_INLINE_QMD_ADDRESS_B 0x031c +#define NVC6C0_SET_INLINE_QMD_ADDRESS_B_QMD_ADDRESS_SHIFTED8_LOWER 31:0 + +#define NVC6C0_LOAD_INLINE_QMD_DATA(i) (0x0320+(i)*4) +#define NVC6C0_LOAD_INLINE_QMD_DATA_V 31:0 + +#define NVC6C0_SET_FALCON00 0x0500 +#define NVC6C0_SET_FALCON00_V 31:0 + +#define NVC6C0_SET_FALCON01 0x0504 +#define NVC6C0_SET_FALCON01_V 31:0 + +#define NVC6C0_SET_FALCON02 0x0508 +#define NVC6C0_SET_FALCON02_V 31:0 + +#define NVC6C0_SET_FALCON03 0x050c +#define NVC6C0_SET_FALCON03_V 31:0 + +#define NVC6C0_SET_FALCON04 0x0510 +#define NVC6C0_SET_FALCON04_V 31:0 + +#define NVC6C0_SET_FALCON05 0x0514 +#define NVC6C0_SET_FALCON05_V 31:0 + +#define NVC6C0_SET_FALCON06 0x0518 +#define NVC6C0_SET_FALCON06_V 31:0 + +#define NVC6C0_SET_FALCON07 0x051c +#define NVC6C0_SET_FALCON07_V 31:0 + +#define NVC6C0_SET_FALCON08 0x0520 +#define NVC6C0_SET_FALCON08_V 31:0 + +#define NVC6C0_SET_FALCON09 0x0524 +#define NVC6C0_SET_FALCON09_V 31:0 + +#define NVC6C0_SET_FALCON10 0x0528 +#define NVC6C0_SET_FALCON10_V 31:0 + +#define NVC6C0_SET_FALCON11 0x052c +#define NVC6C0_SET_FALCON11_V 31:0 + +#define NVC6C0_SET_FALCON12 0x0530 +#define NVC6C0_SET_FALCON12_V 31:0 + +#define NVC6C0_SET_FALCON13 0x0534 +#define NVC6C0_SET_FALCON13_V 31:0 + +#define NVC6C0_SET_FALCON14 0x0538 +#define NVC6C0_SET_FALCON14_V 31:0 + +#define NVC6C0_SET_FALCON15 0x053c +#define NVC6C0_SET_FALCON15_V 31:0 + +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 16:0 + +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A 0x07b0 +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B 0x07b4 +#define NVC6C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC6C0_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC6C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC6C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC6C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC6C0_SET_SCG_COMPUTE_SCHEDULING_PARAMETERS(i) (0x0da0+(i)*4) +#define NVC6C0_SET_SCG_COMPUTE_SCHEDULING_PARAMETERS_V 31:0 + +#define NVC6C0_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC6C0_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC6C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC6C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC6C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC6C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC6C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC6C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT 0x12a8 +#define NVC6C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL 0:0 +#define NVC6C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_FALSE 0x00000000 +#define NVC6C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_TRUE 0x00000001 + +#define NVC6C0_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC6C0_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC6C0_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC6C0_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC6C0_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC6C0_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC6C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC6C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC6C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC6C0_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC6C0_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC6C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC6C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC6C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC6C0_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC6C0_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC6C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC6C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC6C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC6C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC6C0_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC6C0_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC6C0_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC6C0_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC6C0_SET_RENDER_ENABLE_A 0x1550 +#define NVC6C0_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC6C0_SET_RENDER_ENABLE_B 0x1554 +#define NVC6C0_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC6C0_SET_RENDER_ENABLE_C 0x1558 +#define NVC6C0_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC6C0_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC6C0_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC6C0_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC6C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC6C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC6C0_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC6C0_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC6C0_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC6C0_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC6C0_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC6C0_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC6C0_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC6C0_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC6C0_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC6C0_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC6C0_SET_TEX_HEADER_POOL_C 0x157c +#define NVC6C0_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI 0x1698 +#define NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC6C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC6C0_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC6C0_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC6C0_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC6C0_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC6C0_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC6C0_PIPE_NOP 0x1a2c +#define NVC6C0_PIPE_NOP_V 31:0 + +#define NVC6C0_SET_SPARE00 0x1a30 +#define NVC6C0_SET_SPARE00_V 31:0 + +#define NVC6C0_SET_SPARE01 0x1a34 +#define NVC6C0_SET_SPARE01_V 31:0 + +#define NVC6C0_SET_SPARE02 0x1a38 +#define NVC6C0_SET_SPARE02_V 31:0 + +#define NVC6C0_SET_SPARE03 0x1a3c +#define NVC6C0_SET_SPARE03_V 31:0 + +#define NVC6C0_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC6C0_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC6C0_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC6C0_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC6C0_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC6C0_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC6C0_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC6C0_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP 19:19 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_FALSE 0x00000000 +#define NVC6C0_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_TRUE 0x00000001 + +#define NVC6C0_SET_TRAP_HANDLER_A 0x25f8 +#define NVC6C0_SET_TRAP_HANDLER_A_ADDRESS_UPPER 16:0 + +#define NVC6C0_SET_TRAP_HANDLER_B 0x25fc +#define NVC6C0_SET_TRAP_HANDLER_B_ADDRESS_LOWER 31:0 + +#define NVC6C0_SET_BINDLESS_TEXTURE 0x2608 +#define NVC6C0_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 2:0 + +#define NVC6C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE(i) (0x32f4+(i)*4) +#define NVC6C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_V 31:0 + +#define NVC6C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER(i) (0x3314+(i)*4) +#define NVC6C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC6C0_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3334 +#define NVC6C0_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC6C0_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3338 +#define NVC6C0_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC6C0_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC6C0_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC6C0_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC6C0_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC6C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC6C0_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC6C0_SET_MME_SHADOW_SCRATCH_V 31:0 + +#endif /* _cl_ampere_compute_a_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc6fa.h b/src/common/sdk/nvidia/inc/class/clc6fa.h new file mode 100644 index 000000000..92d06eab6 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc6fa.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc6fa_h_ +#define _clc6fa_h_ + +#define NVC6FA_VIDEO_OFA (0x0000C6FA) + +#endif // _clc6fa_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc770.h b/src/common/sdk/nvidia/inc/class/clc770.h new file mode 100644 index 000000000..53851df53 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc770.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc770_h_ +#define _clc770_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NVC770_DISPLAY (0x0000C770) + +typedef struct +{ + NvU32 numHeads; // Number of HEADs in this chip/display + NvU32 numSors; // Number of SORs in this chip/display + NvU32 numDsis; // Number of DSIs in this chip/display +} NVC770_ALLOCATION_PARAMETERS; + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif /* _clc770_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc797.h b/src/common/sdk/nvidia/inc/class/clc797.h new file mode 100644 index 000000000..251b6e1cc --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc797.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clc797_h_ +#define _clc797_h_ + +#define AMPERE_B 0xC797 + +#endif // _clc797_h_ diff --git a/src/common/sdk/nvidia/inc/class/clc7b0.h b/src/common/sdk/nvidia/inc/class/clc7b0.h new file mode 100644 index 000000000..8c98771a8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc7b0.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef clc7b0_h_ +#define clc7b0_h_ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC7B0_VIDEO_DECODER (0x0000C7B0) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // clc7b0_h + diff --git a/src/common/sdk/nvidia/inc/class/clc7b5.h b/src/common/sdk/nvidia/inc/class/clc7b5.h new file mode 100644 index 000000000..df331e8ad --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc7b5.h @@ -0,0 +1,304 @@ +/******************************************************************************* + Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +*******************************************************************************/ + +#include "nvtypes.h" + +#ifndef _clc7b5_h_ +#define _clc7b5_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define AMPERE_DMA_COPY_B (0x0000C7B5) + +#define NVC7B5_NOP (0x00000100) +#define NVC7B5_NOP_PARAMETER 31:0 +#define NVC7B5_PM_TRIGGER (0x00000140) +#define NVC7B5_PM_TRIGGER_V 31:0 +#define NVC7B5_SET_MONITORED_FENCE_TYPE (0x0000021C) +#define NVC7B5_SET_MONITORED_FENCE_TYPE_TYPE 0:0 +#define NVC7B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE (0x00000000) +#define NVC7B5_SET_MONITORED_FENCE_TYPE_TYPE_MONITORED_FENCE_EXT (0x00000001) +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER (0x00000220) +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_UPPER_UPPER 16:0 +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER (0x00000224) +#define NVC7B5_SET_MONITORED_FENCE_SIGNAL_ADDR_BASE_LOWER_LOWER 31:0 +#define NVC7B5_SET_SEMAPHORE_A (0x00000240) +#define NVC7B5_SET_SEMAPHORE_A_UPPER 16:0 +#define NVC7B5_SET_SEMAPHORE_B (0x00000244) +#define NVC7B5_SET_SEMAPHORE_B_LOWER 31:0 +#define NVC7B5_SET_SEMAPHORE_PAYLOAD (0x00000248) +#define NVC7B5_SET_SEMAPHORE_PAYLOAD_PAYLOAD 31:0 +#define NVC7B5_SET_SEMAPHORE_PAYLOAD_UPPER (0x0000024C) +#define NVC7B5_SET_SEMAPHORE_PAYLOAD_UPPER_PAYLOAD 31:0 +#define NVC7B5_SET_RENDER_ENABLE_A (0x00000254) +#define NVC7B5_SET_RENDER_ENABLE_A_UPPER 7:0 +#define NVC7B5_SET_RENDER_ENABLE_B (0x00000258) +#define NVC7B5_SET_RENDER_ENABLE_B_LOWER 31:0 +#define NVC7B5_SET_RENDER_ENABLE_C (0x0000025C) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_FALSE (0x00000000) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_TRUE (0x00000001) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_CONDITIONAL (0x00000002) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL (0x00000003) +#define NVC7B5_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL (0x00000004) +#define NVC7B5_SET_SRC_PHYS_MODE (0x00000260) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET 1:0 +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC7B5_SET_SRC_PHYS_MODE_TARGET_PEERMEM (0x00000003) +#define NVC7B5_SET_SRC_PHYS_MODE_BASIC_KIND 5:2 +#define NVC7B5_SET_SRC_PHYS_MODE_PEER_ID 8:6 +#define NVC7B5_SET_SRC_PHYS_MODE_FLA 9:9 +#define NVC7B5_SET_DST_PHYS_MODE (0x00000264) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET 1:0 +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_LOCAL_FB (0x00000000) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_COHERENT_SYSMEM (0x00000001) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_NONCOHERENT_SYSMEM (0x00000002) +#define NVC7B5_SET_DST_PHYS_MODE_TARGET_PEERMEM (0x00000003) +#define NVC7B5_SET_DST_PHYS_MODE_BASIC_KIND 5:2 +#define NVC7B5_SET_DST_PHYS_MODE_PEER_ID 8:6 +#define NVC7B5_SET_DST_PHYS_MODE_FLA 9:9 +#define NVC7B5_LAUNCH_DMA (0x00000300) +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE 1:0 +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_PIPELINED (0x00000001) +#define NVC7B5_LAUNCH_DMA_DATA_TRANSFER_TYPE_NON_PIPELINED (0x00000002) +#define NVC7B5_LAUNCH_DMA_FLUSH_ENABLE 2:2 +#define NVC7B5_LAUNCH_DMA_FLUSH_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_FLUSH_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_FLUSH_TYPE 25:25 +#define NVC7B5_LAUNCH_DMA_FLUSH_TYPE_SYS (0x00000000) +#define NVC7B5_LAUNCH_DMA_FLUSH_TYPE_GL (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE 4:3 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_NO_TIMESTAMP (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_SEMAPHORE_WITH_TIMESTAMP (0x00000002) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_ONE_WORD_SEMAPHORE (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_FOUR_WORD_SEMAPHORE (0x00000002) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_TYPE_RELEASE_CONDITIONAL_INTR_SEMAPHORE (0x00000003) +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE 6:5 +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE_BLOCKING (0x00000001) +#define NVC7B5_LAUNCH_DMA_INTERRUPT_TYPE_NON_BLOCKING (0x00000002) +#define NVC7B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT 7:7 +#define NVC7B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC7B5_LAUNCH_DMA_SRC_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC7B5_LAUNCH_DMA_DST_MEMORY_LAYOUT 8:8 +#define NVC7B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) +#define NVC7B5_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH (0x00000001) +#define NVC7B5_LAUNCH_DMA_MULTI_LINE_ENABLE 9:9 +#define NVC7B5_LAUNCH_DMA_MULTI_LINE_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_MULTI_LINE_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_REMAP_ENABLE 10:10 +#define NVC7B5_LAUNCH_DMA_REMAP_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_REMAP_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_FORCE_RMWDISABLE 11:11 +#define NVC7B5_LAUNCH_DMA_FORCE_RMWDISABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_FORCE_RMWDISABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_SRC_TYPE 12:12 +#define NVC7B5_LAUNCH_DMA_SRC_TYPE_VIRTUAL (0x00000000) +#define NVC7B5_LAUNCH_DMA_SRC_TYPE_PHYSICAL (0x00000001) +#define NVC7B5_LAUNCH_DMA_DST_TYPE 13:13 +#define NVC7B5_LAUNCH_DMA_DST_TYPE_VIRTUAL (0x00000000) +#define NVC7B5_LAUNCH_DMA_DST_TYPE_PHYSICAL (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION 17:14 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMIN (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IMAX (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IXOR (0x00000002) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IAND (0x00000003) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IOR (0x00000004) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_IADD (0x00000005) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INC (0x00000006) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_DEC (0x00000007) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDA (0x00000008) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDB (0x00000009) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FADD (0x0000000A) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMIN (0x0000000B) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_FMAX (0x0000000C) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDC (0x0000000D) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDD (0x0000000E) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_INVALIDE (0x0000000F) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN 18:18 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_SIGNED (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_SIGN_UNSIGNED (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE 19:19 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_REDUCTION_ENABLE_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_VPRMODE 23:22 +#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_NONE (0x00000000) +#define NVC7B5_LAUNCH_DMA_VPRMODE_VPR_VID2VID (0x00000001) +#define NVC7B5_LAUNCH_DMA_RESERVED_START_OF_COPY 24:24 +#define NVC7B5_LAUNCH_DMA_DISABLE_PLC 26:26 +#define NVC7B5_LAUNCH_DMA_DISABLE_PLC_FALSE (0x00000000) +#define NVC7B5_LAUNCH_DMA_DISABLE_PLC_TRUE (0x00000001) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE 27:27 +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_ONE_WORD (0x00000000) +#define NVC7B5_LAUNCH_DMA_SEMAPHORE_PAYLOAD_SIZE_TWO_WORD (0x00000001) +#define NVC7B5_LAUNCH_DMA_RESERVED_ERR_CODE 31:28 +#define NVC7B5_OFFSET_IN_UPPER (0x00000400) +#define NVC7B5_OFFSET_IN_UPPER_UPPER 16:0 +#define NVC7B5_OFFSET_IN_LOWER (0x00000404) +#define NVC7B5_OFFSET_IN_LOWER_VALUE 31:0 +#define NVC7B5_OFFSET_OUT_UPPER (0x00000408) +#define NVC7B5_OFFSET_OUT_UPPER_UPPER 16:0 +#define NVC7B5_OFFSET_OUT_LOWER (0x0000040C) +#define NVC7B5_OFFSET_OUT_LOWER_VALUE 31:0 +#define NVC7B5_PITCH_IN (0x00000410) +#define NVC7B5_PITCH_IN_VALUE 31:0 +#define NVC7B5_PITCH_OUT (0x00000414) +#define NVC7B5_PITCH_OUT_VALUE 31:0 +#define NVC7B5_LINE_LENGTH_IN (0x00000418) +#define NVC7B5_LINE_LENGTH_IN_VALUE 31:0 +#define NVC7B5_LINE_COUNT (0x0000041C) +#define NVC7B5_LINE_COUNT_VALUE 31:0 +#define NVC7B5_SET_REMAP_CONST_A (0x00000700) +#define NVC7B5_SET_REMAP_CONST_A_V 31:0 +#define NVC7B5_SET_REMAP_CONST_B (0x00000704) +#define NVC7B5_SET_REMAP_CONST_B_V 31:0 +#define NVC7B5_SET_REMAP_COMPONENTS (0x00000708) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X 2:0 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_X_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y 6:4 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Y_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z 10:8 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_Z_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W 14:12 +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_X (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_Y (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_Z (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_SRC_W (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_CONST_A (0x00000004) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_CONST_B (0x00000005) +#define NVC7B5_SET_REMAP_COMPONENTS_DST_W_NO_WRITE (0x00000006) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE 17:16 +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_ONE (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_TWO (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_THREE (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_COMPONENT_SIZE_FOUR (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS 21:20 +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_ONE (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_TWO (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_THREE (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_SRC_COMPONENTS_FOUR (0x00000003) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS 25:24 +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_ONE (0x00000000) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_TWO (0x00000001) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_THREE (0x00000002) +#define NVC7B5_SET_REMAP_COMPONENTS_NUM_DST_COMPONENTS_FOUR (0x00000003) +#define NVC7B5_SET_DST_BLOCK_SIZE (0x0000070C) +#define NVC7B5_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC7B5_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC7B5_SET_DST_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC7B5_SET_DST_WIDTH (0x00000710) +#define NVC7B5_SET_DST_WIDTH_V 31:0 +#define NVC7B5_SET_DST_HEIGHT (0x00000714) +#define NVC7B5_SET_DST_HEIGHT_V 31:0 +#define NVC7B5_SET_DST_DEPTH (0x00000718) +#define NVC7B5_SET_DST_DEPTH_V 31:0 +#define NVC7B5_SET_DST_LAYER (0x0000071C) +#define NVC7B5_SET_DST_LAYER_V 31:0 +#define NVC7B5_SET_DST_ORIGIN (0x00000720) +#define NVC7B5_SET_DST_ORIGIN_X 15:0 +#define NVC7B5_SET_DST_ORIGIN_Y 31:16 +#define NVC7B5_SET_SRC_BLOCK_SIZE (0x00000728) +#define NVC7B5_SET_SRC_BLOCK_SIZE_WIDTH 3:0 +#define NVC7B5_SET_SRC_BLOCK_SIZE_WIDTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT 7:4 +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_ONE_GOB (0x00000000) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_TWO_GOBS (0x00000001) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_SRC_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH 11:8 +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_ONE_GOB (0x00000000) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_TWO_GOBS (0x00000001) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_FOUR_GOBS (0x00000002) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_EIGHT_GOBS (0x00000003) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS (0x00000004) +#define NVC7B5_SET_SRC_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS (0x00000005) +#define NVC7B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT 15:12 +#define NVC7B5_SET_SRC_BLOCK_SIZE_GOB_HEIGHT_GOB_HEIGHT_FERMI_8 (0x00000001) +#define NVC7B5_SET_SRC_WIDTH (0x0000072C) +#define NVC7B5_SET_SRC_WIDTH_V 31:0 +#define NVC7B5_SET_SRC_HEIGHT (0x00000730) +#define NVC7B5_SET_SRC_HEIGHT_V 31:0 +#define NVC7B5_SET_SRC_DEPTH (0x00000734) +#define NVC7B5_SET_SRC_DEPTH_V 31:0 +#define NVC7B5_SET_SRC_LAYER (0x00000738) +#define NVC7B5_SET_SRC_LAYER_V 31:0 +#define NVC7B5_SET_SRC_ORIGIN (0x0000073C) +#define NVC7B5_SET_SRC_ORIGIN_X 15:0 +#define NVC7B5_SET_SRC_ORIGIN_Y 31:16 +#define NVC7B5_SRC_ORIGIN_X (0x00000744) +#define NVC7B5_SRC_ORIGIN_X_VALUE 31:0 +#define NVC7B5_SRC_ORIGIN_Y (0x00000748) +#define NVC7B5_SRC_ORIGIN_Y_VALUE 31:0 +#define NVC7B5_DST_ORIGIN_X (0x0000074C) +#define NVC7B5_DST_ORIGIN_X_VALUE 31:0 +#define NVC7B5_DST_ORIGIN_Y (0x00000750) +#define NVC7B5_DST_ORIGIN_Y_VALUE 31:0 +#define NVC7B5_PM_TRIGGER_END (0x00001114) +#define NVC7B5_PM_TRIGGER_END_V 31:0 + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc7b5_h + diff --git a/src/common/sdk/nvidia/inc/class/clc7b7.h b/src/common/sdk/nvidia/inc/class/clc7b7.h new file mode 100644 index 000000000..909195bf9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc7b7.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef clc7b7_h_ +#define clc7b7_h_ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC7B7_VIDEO_ENCODER (0x0000C7B7) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // clc7b7_h + diff --git a/src/common/sdk/nvidia/inc/class/clc7c0.h b/src/common/sdk/nvidia/inc/class/clc7c0.h new file mode 100644 index 000000000..6eb288964 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc7c0.h @@ -0,0 +1,848 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _cl_ampere_compute_b_h_ +#define _cl_ampere_compute_b_h_ + +/* AUTO GENERATED FILE -- DO NOT EDIT */ +/* Command: ../../../../class/bin/sw_header.pl ampere_compute_b */ + +#include "nvtypes.h" + +#define AMPERE_COMPUTE_B 0xC7C0 + +#define NVC7C0_SET_OBJECT 0x0000 +#define NVC7C0_SET_OBJECT_CLASS_ID 15:0 +#define NVC7C0_SET_OBJECT_ENGINE_ID 20:16 + +#define NVC7C0_NO_OPERATION 0x0100 +#define NVC7C0_NO_OPERATION_V 31:0 + +#define NVC7C0_SET_NOTIFY_A 0x0104 +#define NVC7C0_SET_NOTIFY_A_ADDRESS_UPPER 7:0 + +#define NVC7C0_SET_NOTIFY_B 0x0108 +#define NVC7C0_SET_NOTIFY_B_ADDRESS_LOWER 31:0 + +#define NVC7C0_NOTIFY 0x010c +#define NVC7C0_NOTIFY_TYPE 31:0 +#define NVC7C0_NOTIFY_TYPE_WRITE_ONLY 0x00000000 +#define NVC7C0_NOTIFY_TYPE_WRITE_THEN_AWAKEN 0x00000001 + +#define NVC7C0_WAIT_FOR_IDLE 0x0110 +#define NVC7C0_WAIT_FOR_IDLE_V 31:0 + +#define NVC7C0_LOAD_MME_INSTRUCTION_RAM_POINTER 0x0114 +#define NVC7C0_LOAD_MME_INSTRUCTION_RAM_POINTER_V 31:0 + +#define NVC7C0_LOAD_MME_INSTRUCTION_RAM 0x0118 +#define NVC7C0_LOAD_MME_INSTRUCTION_RAM_V 31:0 + +#define NVC7C0_LOAD_MME_START_ADDRESS_RAM_POINTER 0x011c +#define NVC7C0_LOAD_MME_START_ADDRESS_RAM_POINTER_V 31:0 + +#define NVC7C0_LOAD_MME_START_ADDRESS_RAM 0x0120 +#define NVC7C0_LOAD_MME_START_ADDRESS_RAM_V 31:0 + +#define NVC7C0_SET_MME_SHADOW_RAM_CONTROL 0x0124 +#define NVC7C0_SET_MME_SHADOW_RAM_CONTROL_MODE 1:0 +#define NVC7C0_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK 0x00000000 +#define NVC7C0_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_TRACK_WITH_FILTER 0x00000001 +#define NVC7C0_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_PASSTHROUGH 0x00000002 +#define NVC7C0_SET_MME_SHADOW_RAM_CONTROL_MODE_METHOD_REPLAY 0x00000003 + +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_A 0x0130 +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_B 0x0134 +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_C 0x0138 +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_C_MODE 2:0 +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC7C0_SET_GLOBAL_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC7C0_SEND_GO_IDLE 0x013c +#define NVC7C0_SEND_GO_IDLE_V 31:0 + +#define NVC7C0_PM_TRIGGER 0x0140 +#define NVC7C0_PM_TRIGGER_V 31:0 + +#define NVC7C0_PM_TRIGGER_WFI 0x0144 +#define NVC7C0_PM_TRIGGER_WFI_V 31:0 + +#define NVC7C0_FE_ATOMIC_SEQUENCE_BEGIN 0x0148 +#define NVC7C0_FE_ATOMIC_SEQUENCE_BEGIN_V 31:0 + +#define NVC7C0_FE_ATOMIC_SEQUENCE_END 0x014c +#define NVC7C0_FE_ATOMIC_SEQUENCE_END_V 31:0 + +#define NVC7C0_SET_INSTRUMENTATION_METHOD_HEADER 0x0150 +#define NVC7C0_SET_INSTRUMENTATION_METHOD_HEADER_V 31:0 + +#define NVC7C0_SET_INSTRUMENTATION_METHOD_DATA 0x0154 +#define NVC7C0_SET_INSTRUMENTATION_METHOD_DATA_V 31:0 + +#define NVC7C0_SET_REPORT_SEMAPHORE_PAYLOAD_LOWER 0x0158 +#define NVC7C0_SET_REPORT_SEMAPHORE_PAYLOAD_LOWER_PAYLOAD_LOWER 31:0 + +#define NVC7C0_SET_REPORT_SEMAPHORE_PAYLOAD_UPPER 0x015c +#define NVC7C0_SET_REPORT_SEMAPHORE_PAYLOAD_UPPER_PAYLOAD_UPPER 31:0 + +#define NVC7C0_SET_REPORT_SEMAPHORE_ADDRESS_LOWER 0x0160 +#define NVC7C0_SET_REPORT_SEMAPHORE_ADDRESS_LOWER_LOWER 31:0 + +#define NVC7C0_SET_REPORT_SEMAPHORE_ADDRESS_UPPER 0x0164 +#define NVC7C0_SET_REPORT_SEMAPHORE_ADDRESS_UPPER_UPPER 7:0 + +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE 0x0168 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_OPERATION 1:0 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_OPERATION_RELEASE 0x00000000 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_OPERATION_ACQUIRE 0x00000001 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_OPERATION_REPORT_ONLY 0x00000002 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_OPERATION_TRAP 0x00000003 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE 2:2 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE 4:3 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_FOUR_WORDS 0x00000000 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_ONE_WORD 0x00000001 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_STRUCTURE_SIZE_SEMAPHORE_TWO_WORDS 0x00000002 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE 5:5 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE 6:6 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP 9:7 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_INC 0x00000003 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_AND 0x00000005 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_OR 0x00000006 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT 11:10 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT_UNSIGNED 0x00000000 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_REDUCTION_FORMAT_SIGNED 0x00000001 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64 12:12 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64_FALSE 0x00000000 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_PAYLOAD_SIZE64_TRUE 0x00000001 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE 14:13 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_NONE 0x00000000 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_UNCONDITIONAL 0x00000001 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_CONDITIONAL 0x00000002 +#define NVC7C0_REPORT_SEMAPHORE_EXECUTE_TRAP_TYPE_TRAP_CONDITIONAL_EXT 0x00000003 + +#define NVC7C0_LINE_LENGTH_IN 0x0180 +#define NVC7C0_LINE_LENGTH_IN_VALUE 31:0 + +#define NVC7C0_LINE_COUNT 0x0184 +#define NVC7C0_LINE_COUNT_VALUE 31:0 + +#define NVC7C0_OFFSET_OUT_UPPER 0x0188 +#define NVC7C0_OFFSET_OUT_UPPER_VALUE 16:0 + +#define NVC7C0_OFFSET_OUT 0x018c +#define NVC7C0_OFFSET_OUT_VALUE 31:0 + +#define NVC7C0_PITCH_OUT 0x0190 +#define NVC7C0_PITCH_OUT_VALUE 31:0 + +#define NVC7C0_SET_DST_BLOCK_SIZE 0x0194 +#define NVC7C0_SET_DST_BLOCK_SIZE_WIDTH 3:0 +#define NVC7C0_SET_DST_BLOCK_SIZE_WIDTH_ONE_GOB 0x00000000 +#define NVC7C0_SET_DST_BLOCK_SIZE_HEIGHT 7:4 +#define NVC7C0_SET_DST_BLOCK_SIZE_HEIGHT_ONE_GOB 0x00000000 +#define NVC7C0_SET_DST_BLOCK_SIZE_HEIGHT_TWO_GOBS 0x00000001 +#define NVC7C0_SET_DST_BLOCK_SIZE_HEIGHT_FOUR_GOBS 0x00000002 +#define NVC7C0_SET_DST_BLOCK_SIZE_HEIGHT_EIGHT_GOBS 0x00000003 +#define NVC7C0_SET_DST_BLOCK_SIZE_HEIGHT_SIXTEEN_GOBS 0x00000004 +#define NVC7C0_SET_DST_BLOCK_SIZE_HEIGHT_THIRTYTWO_GOBS 0x00000005 +#define NVC7C0_SET_DST_BLOCK_SIZE_DEPTH 11:8 +#define NVC7C0_SET_DST_BLOCK_SIZE_DEPTH_ONE_GOB 0x00000000 +#define NVC7C0_SET_DST_BLOCK_SIZE_DEPTH_TWO_GOBS 0x00000001 +#define NVC7C0_SET_DST_BLOCK_SIZE_DEPTH_FOUR_GOBS 0x00000002 +#define NVC7C0_SET_DST_BLOCK_SIZE_DEPTH_EIGHT_GOBS 0x00000003 +#define NVC7C0_SET_DST_BLOCK_SIZE_DEPTH_SIXTEEN_GOBS 0x00000004 +#define NVC7C0_SET_DST_BLOCK_SIZE_DEPTH_THIRTYTWO_GOBS 0x00000005 + +#define NVC7C0_SET_DST_WIDTH 0x0198 +#define NVC7C0_SET_DST_WIDTH_V 31:0 + +#define NVC7C0_SET_DST_HEIGHT 0x019c +#define NVC7C0_SET_DST_HEIGHT_V 31:0 + +#define NVC7C0_SET_DST_DEPTH 0x01a0 +#define NVC7C0_SET_DST_DEPTH_V 31:0 + +#define NVC7C0_SET_DST_LAYER 0x01a4 +#define NVC7C0_SET_DST_LAYER_V 31:0 + +#define NVC7C0_SET_DST_ORIGIN_BYTES_X 0x01a8 +#define NVC7C0_SET_DST_ORIGIN_BYTES_X_V 20:0 + +#define NVC7C0_SET_DST_ORIGIN_SAMPLES_Y 0x01ac +#define NVC7C0_SET_DST_ORIGIN_SAMPLES_Y_V 16:0 + +#define NVC7C0_LAUNCH_DMA 0x01b0 +#define NVC7C0_LAUNCH_DMA_DST_MEMORY_LAYOUT 0:0 +#define NVC7C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_BLOCKLINEAR 0x00000000 +#define NVC7C0_LAUNCH_DMA_DST_MEMORY_LAYOUT_PITCH 0x00000001 +#define NVC7C0_LAUNCH_DMA_COMPLETION_TYPE 5:4 +#define NVC7C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_DISABLE 0x00000000 +#define NVC7C0_LAUNCH_DMA_COMPLETION_TYPE_FLUSH_ONLY 0x00000001 +#define NVC7C0_LAUNCH_DMA_COMPLETION_TYPE_RELEASE_SEMAPHORE 0x00000002 +#define NVC7C0_LAUNCH_DMA_INTERRUPT_TYPE 9:8 +#define NVC7C0_LAUNCH_DMA_INTERRUPT_TYPE_NONE 0x00000000 +#define NVC7C0_LAUNCH_DMA_INTERRUPT_TYPE_INTERRUPT 0x00000001 +#define NVC7C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE 12:12 +#define NVC7C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_FOUR_WORDS 0x00000000 +#define NVC7C0_LAUNCH_DMA_SEMAPHORE_STRUCT_SIZE_ONE_WORD 0x00000001 +#define NVC7C0_LAUNCH_DMA_REDUCTION_ENABLE 1:1 +#define NVC7C0_LAUNCH_DMA_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC7C0_LAUNCH_DMA_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC7C0_LAUNCH_DMA_REDUCTION_OP 15:13 +#define NVC7C0_LAUNCH_DMA_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC7C0_LAUNCH_DMA_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC7C0_LAUNCH_DMA_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC7C0_LAUNCH_DMA_REDUCTION_OP_RED_INC 0x00000003 +#define NVC7C0_LAUNCH_DMA_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC7C0_LAUNCH_DMA_REDUCTION_OP_RED_AND 0x00000005 +#define NVC7C0_LAUNCH_DMA_REDUCTION_OP_RED_OR 0x00000006 +#define NVC7C0_LAUNCH_DMA_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC7C0_LAUNCH_DMA_REDUCTION_FORMAT 3:2 +#define NVC7C0_LAUNCH_DMA_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC7C0_LAUNCH_DMA_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC7C0_LAUNCH_DMA_SYSMEMBAR_DISABLE 6:6 +#define NVC7C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_FALSE 0x00000000 +#define NVC7C0_LAUNCH_DMA_SYSMEMBAR_DISABLE_TRUE 0x00000001 + +#define NVC7C0_LOAD_INLINE_DATA 0x01b4 +#define NVC7C0_LOAD_INLINE_DATA_V 31:0 + +#define NVC7C0_SET_I2M_SEMAPHORE_A 0x01dc +#define NVC7C0_SET_I2M_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC7C0_SET_I2M_SEMAPHORE_B 0x01e0 +#define NVC7C0_SET_I2M_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC7C0_SET_I2M_SEMAPHORE_C 0x01e4 +#define NVC7C0_SET_I2M_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC7C0_SET_SM_SCG_CONTROL 0x01e8 +#define NVC7C0_SET_SM_SCG_CONTROL_COMPUTE_IN_GRAPHICS 0:0 +#define NVC7C0_SET_SM_SCG_CONTROL_COMPUTE_IN_GRAPHICS_FALSE 0x00000000 +#define NVC7C0_SET_SM_SCG_CONTROL_COMPUTE_IN_GRAPHICS_TRUE 0x00000001 + +#define NVC7C0_SET_MME_SWITCH_STATE 0x01ec +#define NVC7C0_SET_MME_SWITCH_STATE_VALID 0:0 +#define NVC7C0_SET_MME_SWITCH_STATE_VALID_FALSE 0x00000000 +#define NVC7C0_SET_MME_SWITCH_STATE_VALID_TRUE 0x00000001 +#define NVC7C0_SET_MME_SWITCH_STATE_SAVE_MACRO 11:4 +#define NVC7C0_SET_MME_SWITCH_STATE_RESTORE_MACRO 19:12 + +#define NVC7C0_SET_I2M_SPARE_NOOP00 0x01f0 +#define NVC7C0_SET_I2M_SPARE_NOOP00_V 31:0 + +#define NVC7C0_SET_I2M_SPARE_NOOP01 0x01f4 +#define NVC7C0_SET_I2M_SPARE_NOOP01_V 31:0 + +#define NVC7C0_SET_I2M_SPARE_NOOP02 0x01f8 +#define NVC7C0_SET_I2M_SPARE_NOOP02_V 31:0 + +#define NVC7C0_SET_I2M_SPARE_NOOP03 0x01fc +#define NVC7C0_SET_I2M_SPARE_NOOP03_V 31:0 + +#define NVC7C0_SET_VALID_SPAN_OVERFLOW_AREA_A 0x0200 +#define NVC7C0_SET_VALID_SPAN_OVERFLOW_AREA_A_ADDRESS_UPPER 7:0 + +#define NVC7C0_SET_VALID_SPAN_OVERFLOW_AREA_B 0x0204 +#define NVC7C0_SET_VALID_SPAN_OVERFLOW_AREA_B_ADDRESS_LOWER 31:0 + +#define NVC7C0_SET_VALID_SPAN_OVERFLOW_AREA_C 0x0208 +#define NVC7C0_SET_VALID_SPAN_OVERFLOW_AREA_C_SIZE 31:0 + +#define NVC7C0_PERFMON_TRANSFER 0x0210 +#define NVC7C0_PERFMON_TRANSFER_V 31:0 + +#define NVC7C0_SET_QMD_VIRTUALIZATION_BASE_A 0x0214 +#define NVC7C0_SET_QMD_VIRTUALIZATION_BASE_A_ADDRESS_UPPER 7:0 + +#define NVC7C0_SET_QMD_VIRTUALIZATION_BASE_B 0x0218 +#define NVC7C0_SET_QMD_VIRTUALIZATION_BASE_B_ADDRESS_LOWER 31:0 + +#define NVC7C0_INVALIDATE_SHADER_CACHES 0x021c +#define NVC7C0_INVALIDATE_SHADER_CACHES_INSTRUCTION 0:0 +#define NVC7C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_FALSE 0x00000000 +#define NVC7C0_INVALIDATE_SHADER_CACHES_INSTRUCTION_TRUE 0x00000001 +#define NVC7C0_INVALIDATE_SHADER_CACHES_DATA 4:4 +#define NVC7C0_INVALIDATE_SHADER_CACHES_DATA_FALSE 0x00000000 +#define NVC7C0_INVALIDATE_SHADER_CACHES_DATA_TRUE 0x00000001 +#define NVC7C0_INVALIDATE_SHADER_CACHES_CONSTANT 12:12 +#define NVC7C0_INVALIDATE_SHADER_CACHES_CONSTANT_FALSE 0x00000000 +#define NVC7C0_INVALIDATE_SHADER_CACHES_CONSTANT_TRUE 0x00000001 +#define NVC7C0_INVALIDATE_SHADER_CACHES_LOCKS 1:1 +#define NVC7C0_INVALIDATE_SHADER_CACHES_LOCKS_FALSE 0x00000000 +#define NVC7C0_INVALIDATE_SHADER_CACHES_LOCKS_TRUE 0x00000001 +#define NVC7C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA 2:2 +#define NVC7C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_FALSE 0x00000000 +#define NVC7C0_INVALIDATE_SHADER_CACHES_FLUSH_DATA_TRUE 0x00000001 + +#define NVC7C0_SET_RESERVED_SW_METHOD00 0x0220 +#define NVC7C0_SET_RESERVED_SW_METHOD00_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD01 0x0224 +#define NVC7C0_SET_RESERVED_SW_METHOD01_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD02 0x0228 +#define NVC7C0_SET_RESERVED_SW_METHOD02_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD03 0x022c +#define NVC7C0_SET_RESERVED_SW_METHOD03_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD04 0x0230 +#define NVC7C0_SET_RESERVED_SW_METHOD04_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD05 0x0234 +#define NVC7C0_SET_RESERVED_SW_METHOD05_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD06 0x0238 +#define NVC7C0_SET_RESERVED_SW_METHOD06_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD07 0x023c +#define NVC7C0_SET_RESERVED_SW_METHOD07_V 31:0 + +#define NVC7C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI 0x0244 +#define NVC7C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES 0:0 +#define NVC7C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC7C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC7C0_INVALIDATE_TEXTURE_HEADER_CACHE_NO_WFI_TAG 25:4 + +#define NVC7C0_SET_CWD_REF_COUNTER 0x0248 +#define NVC7C0_SET_CWD_REF_COUNTER_SELECT 5:0 +#define NVC7C0_SET_CWD_REF_COUNTER_VALUE 23:8 + +#define NVC7C0_SET_RESERVED_SW_METHOD08 0x024c +#define NVC7C0_SET_RESERVED_SW_METHOD08_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD09 0x0250 +#define NVC7C0_SET_RESERVED_SW_METHOD09_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD10 0x0254 +#define NVC7C0_SET_RESERVED_SW_METHOD10_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD11 0x0258 +#define NVC7C0_SET_RESERVED_SW_METHOD11_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD12 0x025c +#define NVC7C0_SET_RESERVED_SW_METHOD12_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD13 0x0260 +#define NVC7C0_SET_RESERVED_SW_METHOD13_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD14 0x0264 +#define NVC7C0_SET_RESERVED_SW_METHOD14_V 31:0 + +#define NVC7C0_SET_RESERVED_SW_METHOD15 0x0268 +#define NVC7C0_SET_RESERVED_SW_METHOD15_V 31:0 + +#define NVC7C0_SET_SCG_CONTROL 0x0270 +#define NVC7C0_SET_SCG_CONTROL_COMPUTE1_MAX_SM_COUNT 8:0 +#define NVC7C0_SET_SCG_CONTROL_COMPUTE1_MIN_SM_COUNT 20:12 +#define NVC7C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE 24:24 +#define NVC7C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE_FALSE 0x00000000 +#define NVC7C0_SET_SCG_CONTROL_DISABLE_COMPUTE1_LIMIT_IN_ALL_COMPUTE_TRUE 0x00000001 + +#define NVC7C0_SET_COMPUTE_CLASS_VERSION 0x0280 +#define NVC7C0_SET_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC7C0_SET_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC7C0_CHECK_COMPUTE_CLASS_VERSION 0x0284 +#define NVC7C0_CHECK_COMPUTE_CLASS_VERSION_CURRENT 15:0 +#define NVC7C0_CHECK_COMPUTE_CLASS_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC7C0_SET_QMD_VERSION 0x0288 +#define NVC7C0_SET_QMD_VERSION_CURRENT 15:0 +#define NVC7C0_SET_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC7C0_CHECK_QMD_VERSION 0x0290 +#define NVC7C0_CHECK_QMD_VERSION_CURRENT 15:0 +#define NVC7C0_CHECK_QMD_VERSION_OLDEST_SUPPORTED 31:16 + +#define NVC7C0_INVALIDATE_SKED_CACHES 0x0298 +#define NVC7C0_INVALIDATE_SKED_CACHES_V 0:0 + +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL 0x029c +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_CONSTANT_BUFFER_MASK 7:0 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_ADDR_ENABLE 8:8 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_ADDR_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_ADDR_ENABLE_TRUE 0x00000001 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_CONSTANT_BUFFER_ENABLE 12:12 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_CONSTANT_BUFFER_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_CONSTANT_BUFFER_ENABLE_TRUE 0x00000001 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_DEPENDENT_ENABLE 9:9 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_DEPENDENT_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_I2M_DEPENDENT_ENABLE_TRUE 0x00000001 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_ADDR_ENABLE 16:16 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_ADDR_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_ADDR_ENABLE_TRUE 0x00000001 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_CONSTANT_BUFFER_ENABLE 20:20 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_CONSTANT_BUFFER_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_CONSTANT_BUFFER_ENABLE_TRUE 0x00000001 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_DEPENDENT_ENABLE 10:10 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_DEPENDENT_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_IQ2M_DEPENDENT_ENABLE_TRUE 0x00000001 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_SEND_PCAS_ENABLE 24:24 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_SEND_PCAS_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_QMD_VIRTUALIZATION_CONTROL_SEND_PCAS_ENABLE_TRUE 0x00000001 + +#define NVC7C0_SET_SHADER_SHARED_MEMORY_WINDOW_A 0x02a0 +#define NVC7C0_SET_SHADER_SHARED_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC7C0_SET_SHADER_SHARED_MEMORY_WINDOW_B 0x02a4 +#define NVC7C0_SET_SHADER_SHARED_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC7C0_SCG_HYSTERESIS_CONTROL 0x02a8 +#define NVC7C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE 0:0 +#define NVC7C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE_FALSE 0x00000000 +#define NVC7C0_SCG_HYSTERESIS_CONTROL_USE_TIMEOUT_ONCE_TRUE 0x00000001 +#define NVC7C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE 1:1 +#define NVC7C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE_FALSE 0x00000000 +#define NVC7C0_SCG_HYSTERESIS_CONTROL_USE_NULL_TIMEOUT_ONCE_TRUE 0x00000001 + +#define NVC7C0_SET_CWD_SLOT_COUNT 0x02b0 +#define NVC7C0_SET_CWD_SLOT_COUNT_V 7:0 + +#define NVC7C0_SEND_PCAS_A 0x02b4 +#define NVC7C0_SEND_PCAS_A_QMD_ADDRESS_SHIFTED8 31:0 + +#define NVC7C0_SEND_PCAS_B 0x02b8 +#define NVC7C0_SEND_PCAS_B_FROM 23:0 +#define NVC7C0_SEND_PCAS_B_DELTA 31:24 + +#define NVC7C0_SEND_SIGNALING_PCAS_B 0x02bc +#define NVC7C0_SEND_SIGNALING_PCAS_B_INVALIDATE 0:0 +#define NVC7C0_SEND_SIGNALING_PCAS_B_INVALIDATE_FALSE 0x00000000 +#define NVC7C0_SEND_SIGNALING_PCAS_B_INVALIDATE_TRUE 0x00000001 +#define NVC7C0_SEND_SIGNALING_PCAS_B_SCHEDULE 1:1 +#define NVC7C0_SEND_SIGNALING_PCAS_B_SCHEDULE_FALSE 0x00000000 +#define NVC7C0_SEND_SIGNALING_PCAS_B_SCHEDULE_TRUE 0x00000001 + +#define NVC7C0_SEND_SIGNALING_PCAS2_B 0x02c0 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION 3:0 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_NOP 0x00000000 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INVALIDATE 0x00000001 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_SCHEDULE 0x00000002 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INVALIDATE_COPY_SCHEDULE 0x00000003 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INCREMENT_PUT 0x00000006 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_DECREMENT_DEPENDENCE 0x00000007 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_PREFETCH 0x00000008 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_PREFETCH_SCHEDULE 0x00000009 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INVALIDATE_PREFETCH_COPY_SCHEDULE 0x0000000A +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INVALIDATE_PREFETCH_COPY_FORCE_REQUIRE_SCHEDULING 0x0000000B +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INCREMENT_DEPENDENCE 0x0000000C +#define NVC7C0_SEND_SIGNALING_PCAS2_B_PCAS_ACTION_INCREMENT_CWD_REF_COUNTER 0x0000000D +#define NVC7C0_SEND_SIGNALING_PCAS2_B_SELECT 13:8 +#define NVC7C0_SEND_SIGNALING_PCAS2_B_OFFSET_MINUS_ONE 23:14 + +#define NVC7C0_SET_SKED_CACHE_CONTROL 0x02cc +#define NVC7C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID 0:0 +#define NVC7C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID_FALSE 0x00000000 +#define NVC7C0_SET_SKED_CACHE_CONTROL_IGNORE_VEID_TRUE 0x00000001 + +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A 0x02e4 +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_A_SIZE_UPPER 7:0 + +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B 0x02e8 +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_B_SIZE_LOWER 31:0 + +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C 0x02ec +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_NON_THROTTLED_C_MAX_SM_COUNT 8:0 + +#define NVC7C0_SET_SPA_VERSION 0x0310 +#define NVC7C0_SET_SPA_VERSION_MINOR 7:0 +#define NVC7C0_SET_SPA_VERSION_MAJOR 15:8 + +#define NVC7C0_SET_INLINE_QMD_ADDRESS_A 0x0318 +#define NVC7C0_SET_INLINE_QMD_ADDRESS_A_QMD_ADDRESS_SHIFTED8_UPPER 31:0 + +#define NVC7C0_SET_INLINE_QMD_ADDRESS_B 0x031c +#define NVC7C0_SET_INLINE_QMD_ADDRESS_B_QMD_ADDRESS_SHIFTED8_LOWER 31:0 + +#define NVC7C0_LOAD_INLINE_QMD_DATA(i) (0x0320+(i)*4) +#define NVC7C0_LOAD_INLINE_QMD_DATA_V 31:0 + +#define NVC7C0_SET_FALCON00 0x0500 +#define NVC7C0_SET_FALCON00_V 31:0 + +#define NVC7C0_SET_FALCON01 0x0504 +#define NVC7C0_SET_FALCON01_V 31:0 + +#define NVC7C0_SET_FALCON02 0x0508 +#define NVC7C0_SET_FALCON02_V 31:0 + +#define NVC7C0_SET_FALCON03 0x050c +#define NVC7C0_SET_FALCON03_V 31:0 + +#define NVC7C0_SET_FALCON04 0x0510 +#define NVC7C0_SET_FALCON04_V 31:0 + +#define NVC7C0_SET_FALCON05 0x0514 +#define NVC7C0_SET_FALCON05_V 31:0 + +#define NVC7C0_SET_FALCON06 0x0518 +#define NVC7C0_SET_FALCON06_V 31:0 + +#define NVC7C0_SET_FALCON07 0x051c +#define NVC7C0_SET_FALCON07_V 31:0 + +#define NVC7C0_SET_FALCON08 0x0520 +#define NVC7C0_SET_FALCON08_V 31:0 + +#define NVC7C0_SET_FALCON09 0x0524 +#define NVC7C0_SET_FALCON09_V 31:0 + +#define NVC7C0_SET_FALCON10 0x0528 +#define NVC7C0_SET_FALCON10_V 31:0 + +#define NVC7C0_SET_FALCON11 0x052c +#define NVC7C0_SET_FALCON11_V 31:0 + +#define NVC7C0_SET_FALCON12 0x0530 +#define NVC7C0_SET_FALCON12_V 31:0 + +#define NVC7C0_SET_FALCON13 0x0534 +#define NVC7C0_SET_FALCON13_V 31:0 + +#define NVC7C0_SET_FALCON14 0x0538 +#define NVC7C0_SET_FALCON14_V 31:0 + +#define NVC7C0_SET_FALCON15 0x053c +#define NVC7C0_SET_FALCON15_V 31:0 + +#define NVC7C0_SET_MME_MEM_ADDRESS_A 0x0550 +#define NVC7C0_SET_MME_MEM_ADDRESS_A_UPPER 16:0 + +#define NVC7C0_SET_MME_MEM_ADDRESS_B 0x0554 +#define NVC7C0_SET_MME_MEM_ADDRESS_B_LOWER 31:0 + +#define NVC7C0_SET_MME_DATA_RAM_ADDRESS 0x0558 +#define NVC7C0_SET_MME_DATA_RAM_ADDRESS_WORD 31:0 + +#define NVC7C0_MME_DMA_READ 0x055c +#define NVC7C0_MME_DMA_READ_LENGTH 31:0 + +#define NVC7C0_MME_DMA_READ_FIFOED 0x0560 +#define NVC7C0_MME_DMA_READ_FIFOED_LENGTH 31:0 + +#define NVC7C0_MME_DMA_WRITE 0x0564 +#define NVC7C0_MME_DMA_WRITE_LENGTH 31:0 + +#define NVC7C0_MME_DMA_REDUCTION 0x0568 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_OP 2:0 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_OP_RED_INC 0x00000003 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_OP_RED_AND 0x00000005 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_OP_RED_OR 0x00000006 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_FORMAT 5:4 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_FORMAT_UNSIGNED 0x00000000 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_FORMAT_SIGNED 0x00000001 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_SIZE 8:8 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_SIZE_FOUR_BYTES 0x00000000 +#define NVC7C0_MME_DMA_REDUCTION_REDUCTION_SIZE_EIGHT_BYTES 0x00000001 + +#define NVC7C0_MME_DMA_SYSMEMBAR 0x056c +#define NVC7C0_MME_DMA_SYSMEMBAR_V 0:0 + +#define NVC7C0_MME_DMA_SYNC 0x0570 +#define NVC7C0_MME_DMA_SYNC_VALUE 31:0 + +#define NVC7C0_SET_MME_DATA_FIFO_CONFIG 0x0574 +#define NVC7C0_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE 2:0 +#define NVC7C0_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_0KB 0x00000000 +#define NVC7C0_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_4KB 0x00000001 +#define NVC7C0_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_8KB 0x00000002 +#define NVC7C0_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_12KB 0x00000003 +#define NVC7C0_SET_MME_DATA_FIFO_CONFIG_FIFO_SIZE_SIZE_16KB 0x00000004 + +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_A 0x0790 +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_A_ADDRESS_UPPER 16:0 + +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_B 0x0794 +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_B_ADDRESS_LOWER 31:0 + +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A 0x07b0 +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_WINDOW_A_BASE_ADDRESS_UPPER 16:0 + +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B 0x07b4 +#define NVC7C0_SET_SHADER_LOCAL_MEMORY_WINDOW_B_BASE_ADDRESS 31:0 + +#define NVC7C0_THROTTLE_SM 0x07fc +#define NVC7C0_THROTTLE_SM_MULTIPLY_ADD 0:0 +#define NVC7C0_THROTTLE_SM_MULTIPLY_ADD_FALSE 0x00000000 +#define NVC7C0_THROTTLE_SM_MULTIPLY_ADD_TRUE 0x00000001 + +#define NVC7C0_SET_SHADER_CACHE_CONTROL 0x0d94 +#define NVC7C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE 0:0 +#define NVC7C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_SHADER_CACHE_CONTROL_ICACHE_PREFETCH_ENABLE_TRUE 0x00000001 + +#define NVC7C0_SET_SCG_COMPUTE_SCHEDULING_PARAMETERS(i) (0x0da0+(i)*4) +#define NVC7C0_SET_SCG_COMPUTE_SCHEDULING_PARAMETERS_V 31:0 + +#define NVC7C0_SET_SM_TIMEOUT_INTERVAL 0x0de4 +#define NVC7C0_SET_SM_TIMEOUT_INTERVAL_COUNTER_BIT 5:0 + +#define NVC7C0_MME_DMA_WRITE_METHOD_BARRIER 0x0dec +#define NVC7C0_MME_DMA_WRITE_METHOD_BARRIER_V 0:0 + +#define NVC7C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI 0x1288 +#define NVC7C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES 0:0 +#define NVC7C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC7C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC7C0_INVALIDATE_TEXTURE_DATA_CACHE_NO_WFI_TAG 25:4 + +#define NVC7C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT 0x12a8 +#define NVC7C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL 0:0 +#define NVC7C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_FALSE 0x00000000 +#define NVC7C0_ACTIVATE_PERF_SETTINGS_FOR_COMPUTE_CONTEXT_ALL_TRUE 0x00000001 + +#define NVC7C0_INVALIDATE_SAMPLER_CACHE 0x1330 +#define NVC7C0_INVALIDATE_SAMPLER_CACHE_LINES 0:0 +#define NVC7C0_INVALIDATE_SAMPLER_CACHE_LINES_ALL 0x00000000 +#define NVC7C0_INVALIDATE_SAMPLER_CACHE_LINES_ONE 0x00000001 +#define NVC7C0_INVALIDATE_SAMPLER_CACHE_TAG 25:4 + +#define NVC7C0_INVALIDATE_TEXTURE_HEADER_CACHE 0x1334 +#define NVC7C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES 0:0 +#define NVC7C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ALL 0x00000000 +#define NVC7C0_INVALIDATE_TEXTURE_HEADER_CACHE_LINES_ONE 0x00000001 +#define NVC7C0_INVALIDATE_TEXTURE_HEADER_CACHE_TAG 25:4 + +#define NVC7C0_INVALIDATE_TEXTURE_DATA_CACHE 0x1338 +#define NVC7C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES 0:0 +#define NVC7C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ALL 0x00000000 +#define NVC7C0_INVALIDATE_TEXTURE_DATA_CACHE_LINES_ONE 0x00000001 +#define NVC7C0_INVALIDATE_TEXTURE_DATA_CACHE_TAG 25:4 + +#define NVC7C0_INVALIDATE_SAMPLER_CACHE_NO_WFI 0x1424 +#define NVC7C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES 0:0 +#define NVC7C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ALL 0x00000000 +#define NVC7C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_LINES_ONE 0x00000001 +#define NVC7C0_INVALIDATE_SAMPLER_CACHE_NO_WFI_TAG 25:4 + +#define NVC7C0_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC7C0_SET_SHADER_EXCEPTIONS_ENABLE 0:0 +#define NVC7C0_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_SHADER_EXCEPTIONS_ENABLE_TRUE 0x00000001 + +#define NVC7C0_SET_RENDER_ENABLE_A 0x1550 +#define NVC7C0_SET_RENDER_ENABLE_A_OFFSET_UPPER 7:0 + +#define NVC7C0_SET_RENDER_ENABLE_B 0x1554 +#define NVC7C0_SET_RENDER_ENABLE_B_OFFSET_LOWER 31:0 + +#define NVC7C0_SET_RENDER_ENABLE_C 0x1558 +#define NVC7C0_SET_RENDER_ENABLE_C_MODE 2:0 +#define NVC7C0_SET_RENDER_ENABLE_C_MODE_FALSE 0x00000000 +#define NVC7C0_SET_RENDER_ENABLE_C_MODE_TRUE 0x00000001 +#define NVC7C0_SET_RENDER_ENABLE_C_MODE_CONDITIONAL 0x00000002 +#define NVC7C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_EQUAL 0x00000003 +#define NVC7C0_SET_RENDER_ENABLE_C_MODE_RENDER_IF_NOT_EQUAL 0x00000004 + +#define NVC7C0_SET_TEX_SAMPLER_POOL_A 0x155c +#define NVC7C0_SET_TEX_SAMPLER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC7C0_SET_TEX_SAMPLER_POOL_B 0x1560 +#define NVC7C0_SET_TEX_SAMPLER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC7C0_SET_TEX_SAMPLER_POOL_C 0x1564 +#define NVC7C0_SET_TEX_SAMPLER_POOL_C_MAXIMUM_INDEX 19:0 + +#define NVC7C0_SET_TEX_HEADER_POOL_A 0x1574 +#define NVC7C0_SET_TEX_HEADER_POOL_A_OFFSET_UPPER 16:0 + +#define NVC7C0_SET_TEX_HEADER_POOL_B 0x1578 +#define NVC7C0_SET_TEX_HEADER_POOL_B_OFFSET_LOWER 31:0 + +#define NVC7C0_SET_TEX_HEADER_POOL_C 0x157c +#define NVC7C0_SET_TEX_HEADER_POOL_C_MAXIMUM_INDEX 21:0 + +#define NVC7C0_INVALIDATE_SHADER_CACHES_NO_WFI 0x1698 +#define NVC7C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION 0:0 +#define NVC7C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_FALSE 0x00000000 +#define NVC7C0_INVALIDATE_SHADER_CACHES_NO_WFI_INSTRUCTION_TRUE 0x00000001 +#define NVC7C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA 4:4 +#define NVC7C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_FALSE 0x00000000 +#define NVC7C0_INVALIDATE_SHADER_CACHES_NO_WFI_GLOBAL_DATA_TRUE 0x00000001 +#define NVC7C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT 12:12 +#define NVC7C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_FALSE 0x00000000 +#define NVC7C0_INVALIDATE_SHADER_CACHES_NO_WFI_CONSTANT_TRUE 0x00000001 + +#define NVC7C0_SET_RENDER_ENABLE_OVERRIDE 0x1944 +#define NVC7C0_SET_RENDER_ENABLE_OVERRIDE_MODE 1:0 +#define NVC7C0_SET_RENDER_ENABLE_OVERRIDE_MODE_USE_RENDER_ENABLE 0x00000000 +#define NVC7C0_SET_RENDER_ENABLE_OVERRIDE_MODE_ALWAYS_RENDER 0x00000001 +#define NVC7C0_SET_RENDER_ENABLE_OVERRIDE_MODE_NEVER_RENDER 0x00000002 + +#define NVC7C0_PIPE_NOP 0x1a2c +#define NVC7C0_PIPE_NOP_V 31:0 + +#define NVC7C0_SET_SPARE00 0x1a30 +#define NVC7C0_SET_SPARE00_V 31:0 + +#define NVC7C0_SET_SPARE01 0x1a34 +#define NVC7C0_SET_SPARE01_V 31:0 + +#define NVC7C0_SET_SPARE02 0x1a38 +#define NVC7C0_SET_SPARE02_V 31:0 + +#define NVC7C0_SET_SPARE03 0x1a3c +#define NVC7C0_SET_SPARE03_V 31:0 + +#define NVC7C0_SET_REPORT_SEMAPHORE_A 0x1b00 +#define NVC7C0_SET_REPORT_SEMAPHORE_A_OFFSET_UPPER 7:0 + +#define NVC7C0_SET_REPORT_SEMAPHORE_B 0x1b04 +#define NVC7C0_SET_REPORT_SEMAPHORE_B_OFFSET_LOWER 31:0 + +#define NVC7C0_SET_REPORT_SEMAPHORE_C 0x1b08 +#define NVC7C0_SET_REPORT_SEMAPHORE_C_PAYLOAD 31:0 + +#define NVC7C0_SET_REPORT_SEMAPHORE_D 0x1b0c +#define NVC7C0_SET_REPORT_SEMAPHORE_D_OPERATION 1:0 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_OPERATION_RELEASE 0x00000000 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_OPERATION_TRAP 0x00000003 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE 20:20 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_AWAKEN_ENABLE_TRUE 0x00000001 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE 28:28 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_FOUR_WORDS 0x00000000 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_STRUCTURE_SIZE_ONE_WORD 0x00000001 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE 2:2 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_FALSE 0x00000000 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_FLUSH_DISABLE_TRUE 0x00000001 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE 3:3 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_FALSE 0x00000000 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_ENABLE_TRUE 0x00000001 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP 11:9 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_ADD 0x00000000 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MIN 0x00000001 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_MAX 0x00000002 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_INC 0x00000003 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_DEC 0x00000004 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_AND 0x00000005 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_OR 0x00000006 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_OP_RED_XOR 0x00000007 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT 18:17 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_UNSIGNED_32 0x00000000 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_REDUCTION_FORMAT_SIGNED_32 0x00000001 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP 19:19 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_FALSE 0x00000000 +#define NVC7C0_SET_REPORT_SEMAPHORE_D_CONDITIONAL_TRAP_TRUE 0x00000001 + +#define NVC7C0_SET_TRAP_HANDLER_A 0x25f8 +#define NVC7C0_SET_TRAP_HANDLER_A_ADDRESS_UPPER 16:0 + +#define NVC7C0_SET_TRAP_HANDLER_B 0x25fc +#define NVC7C0_SET_TRAP_HANDLER_B_ADDRESS_LOWER 31:0 + +#define NVC7C0_SET_BINDLESS_TEXTURE 0x2608 +#define NVC7C0_SET_BINDLESS_TEXTURE_CONSTANT_BUFFER_SLOT_SELECT 2:0 + +#define NVC7C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE(i) (0x32f4+(i)*4) +#define NVC7C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_V 31:0 + +#define NVC7C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER(i) (0x3314+(i)*4) +#define NVC7C0_SET_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC7C0_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3334 +#define NVC7C0_ENABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC7C0_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER 0x3338 +#define NVC7C0_DISABLE_SHADER_PERFORMANCE_SNAPSHOT_COUNTER_V 0:0 + +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER(i) (0x333c+(i)*4) +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_UPPER_V 31:0 + +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE(i) (0x335c+(i)*4) +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_VALUE_V 31:0 + +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT(i) (0x337c+(i)*4) +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_EVENT_EVENT 7:0 + +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A(i) (0x339c+(i)*4) +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT0 1:0 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT0 4:2 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT1 6:5 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT1 9:7 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT2 11:10 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT2 14:12 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT3 16:15 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT3 19:17 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT4 21:20 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT4 24:22 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_EVENT5 26:25 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_BIT_SELECT5 29:27 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_A_SPARE 31:30 + +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B(i) (0x33bc+(i)*4) +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_EDGE 0:0 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_MODE 2:1 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_WINDOWED 3:3 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CONTROL_B_FUNC 19:4 + +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL 0x33dc +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_TRAP_CONTROL_MASK 7:0 + +#define NVC7C0_START_SHADER_PERFORMANCE_COUNTER 0x33e0 +#define NVC7C0_START_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC7C0_STOP_SHADER_PERFORMANCE_COUNTER 0x33e4 +#define NVC7C0_STOP_SHADER_PERFORMANCE_COUNTER_COUNTER_MASK 7:0 + +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER 0x33e8 +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_SCTL_FILTER_V 31:0 + +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER 0x33ec +#define NVC7C0_SET_SHADER_PERFORMANCE_COUNTER_CORE_MIO_FILTER_V 31:0 + +#define NVC7C0_SET_MME_SHADOW_SCRATCH(i) (0x3400+(i)*4) +#define NVC7C0_SET_MME_SHADOW_SCRATCH_V 31:0 + +#define NVC7C0_CALL_MME_MACRO(j) (0x3800+(j)*8) +#define NVC7C0_CALL_MME_MACRO_V 31:0 + +#define NVC7C0_CALL_MME_DATA(j) (0x3804+(j)*8) +#define NVC7C0_CALL_MME_DATA_V 31:0 + +#endif /* _cl_ampere_compute_b_h_ */ diff --git a/src/common/sdk/nvidia/inc/class/clc7fa.h b/src/common/sdk/nvidia/inc/class/clc7fa.h new file mode 100644 index 000000000..50892c523 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/clc7fa.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvtypes.h" + +#ifndef _clc7fa_h_ +#define _clc7fa_h_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVC7FA_VIDEO_OFA (0x0000C7FA) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // _clc7fa_h + diff --git a/src/common/sdk/nvidia/inc/class/cld0b7.h b/src/common/sdk/nvidia/inc/class/cld0b7.h new file mode 100644 index 000000000..d7c044de3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/class/cld0b7.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef cld0b7_h +#define cld0b7_h + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVD0B7_VIDEO_ENCODER (0x0000D0B7) + +#ifdef __cplusplus +}; /* extern "C" */ +#endif +#endif // cld0b7_h + diff --git a/src/common/sdk/nvidia/inc/cpuopsys.h b/src/common/sdk/nvidia/inc/cpuopsys.h new file mode 100644 index 000000000..ee911b77f --- /dev/null +++ b/src/common/sdk/nvidia/inc/cpuopsys.h @@ -0,0 +1,428 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! \brief + * Define compile time symbols for CPU type and operating system type. + * This file should only contain preprocessor commands so that + * there are no dependencies on other files. + * + * cpuopsys.h + * + * Copyright (c) 2001, Nvidia Corporation. All rights reserved. + */ + +/*! + * Uniform names are defined for compile time options to distinguish + * CPU types and Operating systems. + * Distinctions between CPU and OpSys should be orthogonal. + * + * These uniform names have initially been defined by keying off the + * makefile/build names defined for builds in the OpenGL group. + * Getting the uniform names defined for other builds may require + * different qualifications. + * + * The file is placed here to allow for the possibility of all driver + * components using the same naming convention for conditional compilation. + */ + +#ifndef CPUOPSYS_H +#define CPUOPSYS_H + +/*****************************************************************************/ +/* Define all OS/CPU-Chip related symbols */ + +/* ***** WINDOWS variations */ +#if defined(_WIN32) || defined(_WIN16) +# define NV_WINDOWS + +# if defined(_WIN32_WINNT) +# define NV_WINDOWS_NT +# elif defined(_WIN32_WCE) +# define NV_WINDOWS_CE +# else +# define NV_WINDOWS_9X +# endif +#endif /* _WIN32 || defined(_WIN16) */ + +/* ***** Unix variations */ +#if defined(__linux__) && !defined(NV_LINUX) && !defined(NV_VMWARE) +# define NV_LINUX +#endif /* defined(__linux__) */ + +#if defined(__VMWARE__) && !defined(NV_VMWARE) +# define NV_VMWARE +#endif /* defined(__VMWARE__) */ + +/* SunOS + gcc */ +#if defined(__sun__) && defined(__svr4__) && !defined(NV_SUNOS) +# define NV_SUNOS +#endif /* defined(__sun__) && defined(__svr4__) */ + +/* SunOS + Sun Compiler (named SunPro, Studio or Forte) */ +#if defined(__SUNPRO_C) || defined(__SUNPRO_CC) +# define NV_SUNPRO_C +# define NV_SUNOS +#endif /* defined(_SUNPRO_C) || defined(__SUNPRO_CC) */ + +#if defined(__FreeBSD__) && !defined(NV_BSD) +# define NV_BSD +#endif /* defined(__FreeBSD__) */ + +/* XXXar don't define NV_UNIX on MacOSX or vxworks or QNX */ +#if (defined(__unix__) || defined(__unix) || defined(__INTEGRITY) ) && !defined(nvmacosx) && !defined(vxworks) && !defined(NV_UNIX) && !defined(__QNX__) && !defined(__QNXNTO__)/* XXX until removed from Makefiles */ +# define NV_UNIX +#endif /* defined(__unix__) */ + +#if (defined(__QNX__) || defined(__QNXNTO__)) && !defined(NV_QNX) +# define NV_QNX +#endif + +#if (defined(__ANDROID__) || defined(ANDROID)) && !defined(NV_ANDROID) +# define NV_ANDROID +#endif + +#if defined(DceCore) && !defined(NV_DCECORE) +# define NV_DCECORE +#endif + +/* ***** Apple variations */ +#if defined(macintosh) || defined(__APPLE__) +# define NV_MACINTOSH +# if defined(__MACH__) +# define NV_MACINTOSH_OSX +# else +# define NV_MACINTOSH_OS9 +# endif +# if defined(__LP64__) +# define NV_MACINTOSH_64 +# endif +#endif /* defined(macintosh) */ + +/* ***** VxWorks */ +/* Tornado 2.21 is gcc 2.96 and #defines __vxworks. */ +/* Tornado 2.02 is gcc 2.7.2 and doesn't define any OS symbol, so we rely on */ +/* the build system #defining vxworks. */ +#if defined(__vxworks) || defined(vxworks) +# define NV_VXWORKS +#endif + +/* ***** Integrity OS */ +#if defined(__INTEGRITY) +# if !defined(NV_INTEGRITY) +# define NV_INTEGRITY +# endif +#endif + +/* ***** Processor type variations */ +/* Note: The prefix NV_CPU_* is taken by Nvcm.h */ + +#if ((defined(_M_IX86) || defined(__i386__) || defined(__i386)) && !defined(NVCPU_X86)) /* XXX until removed from Makefiles */ +/* _M_IX86 for windows, __i386__ for Linux (or any x86 using gcc) */ +/* __i386 for Studio compiler on Solaris x86 */ +# define NVCPU_X86 /* any IA32 machine (not x86-64) */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(NV_LINUX) && defined(__ia64__) +# define NVCPU_IA64_LINUX /* any IA64 for Linux opsys */ +#endif +#if defined(NVCPU_IA64_WINDOWS) || defined(NVCPU_IA64_LINUX) || defined(IA64) +# define NVCPU_IA64 /* any IA64 for any opsys */ +#endif + +#if (defined(NV_MACINTOSH) && !(defined(__i386__) || defined(__x86_64__))) || defined(__PPC__) || defined(__ppc) +# if defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) +# ifndef NVCPU_PPC64LE +# define NVCPU_PPC64LE /* PPC 64-bit little endian */ +# endif +# else +# ifndef NVCPU_PPC +# define NVCPU_PPC /* any non-PPC64LE PowerPC architecture */ +# endif +# ifndef NV_BIG_ENDIAN +# define NV_BIG_ENDIAN +# endif +# endif +# define NVCPU_FAMILY_PPC +#endif + +#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64) +# define NVCPU_X86_64 /* any x86-64 for any opsys */ +#endif + +#if defined(NVCPU_X86) || defined(NVCPU_X86_64) +# define NVCPU_FAMILY_X86 +#endif + +#if defined(__riscv) && (__riscv_xlen==64) +# define NVCPU_RISCV64 +# if defined(__nvriscv) +# define NVCPU_NVRISCV64 +# endif +#endif + +#if defined(__arm__) || defined(_M_ARM) +/* + * 32-bit instruction set on, e.g., ARMv7 or AArch32 execution state + * on ARMv8 + */ +# define NVCPU_ARM +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(__aarch64__) || defined(__ARM64__) || defined(_M_ARM64) +# define NVCPU_AARCH64 /* 64-bit A64 instruction set on ARMv8 */ +# define NVCPU_MIN_PAGE_SHIFT 12 +#endif + +#if defined(NVCPU_ARM) || defined(NVCPU_AARCH64) +# define NVCPU_FAMILY_ARM +#endif + +#if defined(__SH4__) +# ifndef NVCPU_SH4 +# define NVCPU_SH4 /* Renesas (formerly Hitachi) SH4 */ +# endif +# if defined NV_WINDOWS_CE +# define NVCPU_MIN_PAGE_SHIFT 12 +# endif +#endif + +/* For Xtensa processors */ +#if defined(__XTENSA__) +# define NVCPU_XTENSA +# if defined(__XTENSA_EB__) +# define NV_BIG_ENDIAN +# endif +#endif + + +/* + * Other flavors of CPU type should be determined at run-time. + * For example, an x86 architecture with/without SSE. + * If it can compile, then there's no need for a compile time option. + * For some current GCC limitations, these may be fixed by using the Intel + * compiler for certain files in a Linux build. + */ + +/* The minimum page size can be determined from the minimum page shift */ +#if defined(NVCPU_MIN_PAGE_SHIFT) +#define NVCPU_MIN_PAGE_SIZE (1 << NVCPU_MIN_PAGE_SHIFT) +#endif + +#if defined(NVCPU_IA64) || defined(NVCPU_X86_64) || \ + defined(NV_MACINTOSH_64) || defined(NVCPU_AARCH64) || \ + defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64) +# define NV_64_BITS /* all architectures where pointers are 64 bits */ +#else +/* we assume 32 bits. I don't see a need for NV_16_BITS. */ +#endif + +/* For verification-only features not intended to be included in normal drivers */ +#if (defined(NV_MODS) || defined(NV_GSP_MODS)) && defined(DEBUG) && !defined(DISABLE_VERIF_FEATURES) +#define NV_VERIF_FEATURES +#endif + +/* + * New, safer family of #define's -- these ones use 0 vs. 1 rather than + * defined/!defined. This is advantageous because if you make a typo, + * say misspelled ENDIAN: + * + * #if NVCPU_IS_BIG_ENDAIN + * + * ...some compilers can give you a warning telling you that you screwed up. + * The compiler can also give you a warning if you forget to #include + * "cpuopsys.h" in your code before the point where you try to use these + * conditionals. + * + * Also, the names have been prefixed in more cases with "CPU" or "OS" for + * increased clarity. You can tell the names apart from the old ones because + * they all use "_IS_" in the name. + * + * Finally, these can be used in "if" statements and not just in #if's. For + * example: + * + * if (NVCPU_IS_BIG_ENDIAN) x = Swap32(x); + * + * Maybe some day in the far-off future these can replace the old #define's. + */ + +#define NV_IS_MODS 0 + +#if defined(NV_GSP_MODS) +#define NV_IS_GSP_MODS 1 +#else +#define NV_IS_GSP_MODS 0 +#endif + +#define NVOS_IS_WINDOWS 0 +#if defined(NV_WINDOWS_CE) +#define NVOS_IS_WINDOWS_CE 1 +#else +#define NVOS_IS_WINDOWS_CE 0 +#endif +#if defined(NV_LINUX) +#define NVOS_IS_LINUX 1 +#else +#define NVOS_IS_LINUX 0 +#endif +#if defined(NV_UNIX) +#define NVOS_IS_UNIX 1 +#else +#define NVOS_IS_UNIX 0 +#endif +#if defined(NV_BSD) +#define NVOS_IS_FREEBSD 1 +#else +#define NVOS_IS_FREEBSD 0 +#endif +#if defined(NV_SUNOS) +#define NVOS_IS_SOLARIS 1 +#else +#define NVOS_IS_SOLARIS 0 +#endif +#define NVOS_IS_VMWARE 0 +#if defined(NV_QNX) +#define NVOS_IS_QNX 1 +#else +#define NVOS_IS_QNX 0 +#endif +#if defined(NV_ANDROID) +#define NVOS_IS_ANDROID 1 +#else +#define NVOS_IS_ANDROID 0 +#endif +#if defined(NV_MACINTOSH) +#define NVOS_IS_MACINTOSH 1 +#else +#define NVOS_IS_MACINTOSH 0 +#endif +#if defined(NV_VXWORKS) +#define NVOS_IS_VXWORKS 1 +#else +#define NVOS_IS_VXWORKS 0 +#endif +#if defined(NV_LIBOS) +#define NVOS_IS_LIBOS 1 +#else +#define NVOS_IS_LIBOS 0 +#endif +#if defined(NV_INTEGRITY) +#define NVOS_IS_INTEGRITY 1 +#else +#define NVOS_IS_INTEGRITY 0 +#endif + +#if defined(NVCPU_X86) +#define NVCPU_IS_X86 1 +#else +#define NVCPU_IS_X86 0 +#endif +#if defined(NVCPU_RISCV64) +#define NVCPU_IS_RISCV64 1 +#else +#define NVCPU_IS_RISCV64 0 +#endif +#if defined(NVCPU_NVRISCV64) +#define NVCPU_IS_NVRISCV64 1 +#else +#define NVCPU_IS_NVRISCV64 0 +#endif +#if defined(NVCPU_IA64) +#define NVCPU_IS_IA64 1 +#else +#define NVCPU_IS_IA64 0 +#endif +#if defined(NVCPU_X86_64) +#define NVCPU_IS_X86_64 1 +#else +#define NVCPU_IS_X86_64 0 +#endif +#if defined(NVCPU_FAMILY_X86) +#define NVCPU_IS_FAMILY_X86 1 +#else +#define NVCPU_IS_FAMILY_X86 0 +#endif +#if defined(NVCPU_PPC) +#define NVCPU_IS_PPC 1 +#else +#define NVCPU_IS_PPC 0 +#endif +#if defined(NVCPU_PPC64LE) +#define NVCPU_IS_PPC64LE 1 +#else +#define NVCPU_IS_PPC64LE 0 +#endif +#if defined(NVCPU_FAMILY_PPC) +#define NVCPU_IS_FAMILY_PPC 1 +#else +#define NVCPU_IS_FAMILY_PPC 0 +#endif +#if defined(NVCPU_ARM) +#define NVCPU_IS_ARM 1 +#else +#define NVCPU_IS_ARM 0 +#endif +#if defined(NVCPU_AARCH64) +#define NVCPU_IS_AARCH64 1 +#else +#define NVCPU_IS_AARCH64 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_FAMILY_ARM 1 +#else +#define NVCPU_IS_FAMILY_ARM 0 +#endif +#if defined(NVCPU_SH4) +#define NVCPU_IS_SH4 1 +#else +#define NVCPU_IS_SH4 0 +#endif +#if defined(NVCPU_XTENSA) +#define NVCPU_IS_XTENSA 1 +#else +#define NVCPU_IS_XTENSA 0 +#endif +#if defined(NV_BIG_ENDIAN) +#define NVCPU_IS_BIG_ENDIAN 1 +#else +#define NVCPU_IS_BIG_ENDIAN 0 +#endif +#if defined(NV_64_BITS) +#define NVCPU_IS_64_BITS 1 +#else +#define NVCPU_IS_64_BITS 0 +#endif +#if defined(NVCPU_FAMILY_ARM) +#define NVCPU_IS_PCIE_CACHE_COHERENT 0 +#else +#define NVCPU_IS_PCIE_CACHE_COHERENT 1 +#endif +#if defined(NV_DCECORE) +#define NVOS_IS_DCECORE 1 +#else +#define NVOS_IS_DCECORE 0 +#endif +/*****************************************************************************/ + +#endif /* CPUOPSYS_H */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h new file mode 100644 index 000000000..f88db2e66 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000base.h @@ -0,0 +1,72 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000base.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) control commands and parameters */ + +#define NV0000_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0000,NV0000_CTRL_##cat,idx) + +/* Client command categories (6bits) */ +#define NV0000_CTRL_RESERVED (0x00) +#define NV0000_CTRL_SYSTEM (0x01) +#define NV0000_CTRL_GPU (0x02) +#define NV0000_CTRL_GSYNC (0x03) +#define NV0000_CTRL_DIAG (0x04) +#define NV0000_CTRL_EVENT (0x05) +#define NV0000_CTRL_NVD (0x06) +#define NV0000_CTRL_SWINSTR (0x07) +#define NV0000_CTRL_GSPC (0x08) +#define NV0000_CTRL_PROC (0x09) +#define NV0000_CTRL_SYNC_GPU_BOOST (0x0A) +#define NV0000_CTRL_GPUACCT (0x0B) +#define NV0000_CTRL_VGPU (0x0C) +#define NV0000_CTRL_CLIENT (0x0D) + +// per-OS categories start at highest category and work backwards +#define NV0000_CTRL_OS_WINDOWS (0x3F) +#define NV0000_CTRL_OS_MACOS (0x3E) +#define NV0000_CTRL_OS_UNIX (0x3D) + + +/* + * NV0000_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_NULL (0x0) /* finn: Evaluated from "(FINN_NV01_ROOT_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl0000_base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h new file mode 100644 index 000000000..1bca5a183 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000client.h @@ -0,0 +1,167 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000client.finn +// + + + + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "class/cl0000.h" +#include "rs_access.h" + +/* + * NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE + * + * This command may be used to query memory address space type associated with an object + * + * Parameters: + * hObject[IN] + * handle of the object to look up + * addrSpaceType[OUT] + * addrSpaceType with associated memory descriptor + * + * Possible status values are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE (0xd01) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS { + NvHandle hObject; /* [in] - Handle of object to look up */ + NvU32 mapFlags; /* [in] - Flags that will be used when mapping the object */ + NvU32 addrSpaceType; /* [out] - Memory Address Space Type */ +} NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS; + +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID 0x00000000 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM 0x00000001 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM 0x00000002 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_REGMEM 0x00000003 +#define NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC 0x00000004 + +/* + * NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO + * + * This command may be used to query information on a handle + */ +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO (0xd02) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS { + NvHandle hObject; /* [in] - Handle of object to look up */ + NvU32 index; /* [in] - Type of lookup */ + + union { + NvHandle hResult; /* [out] - Result of lookup when result is a handle type */ + NV_DECLARE_ALIGNED(NvU64 iResult, 8); /* [out] - Result of lookup when result is a integer */ + } data; +} NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS; + +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_INVALID 0x00000000 +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_PARENT 0x00000001 +#define NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_CLASSID 0x00000002 + +/* + * NV0000_CTRL_CMD_CLIENT_GET_ACCESS_RIGHTS + * + * This command may be used to get this client's access rights for an object + * The object to which access rights are checked does not have to be owned by + * the client calling the command, it is owned by the hClient parameter + */ +#define NV0000_CTRL_CMD_CLIENT_GET_ACCESS_RIGHTS (0xd03) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS { + NvHandle hObject; /* [in] - Handle of object to look up */ + NvHandle hClient; /* [in] - Handle of client which owns hObject */ + RS_ACCESS_MASK maskResult; /* [out] - Result of lookup */ +} NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS; + +/* + * NV0000_CTRL_CMD_CLIENT_SET_INHERITED_SHARE_POLICY + * + * DEPRECATED: Calls NV0000_CTRL_CMD_CLIENT_SHARE_OBJECT with hObject=hClient + * + * This command will modify a client's inherited share policy list + * The policy is applied in the same way that NvRmShare applies policies, + * except to the client's inherited policy list instead of an object's policy list + */ +#define NV0000_CTRL_CMD_CLIENT_SET_INHERITED_SHARE_POLICY (0xd04) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS { + RS_SHARE_POLICY sharePolicy; /* [in] - Share Policy to apply */ +} NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS; + +/* + * NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE + * + * This command may be used to get a handle of a child of a given type + */ +#define NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE (0xd05) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS { + NvHandle hParent; /* [in] - Handle of parent object */ + NvU32 classId; /* [in] - Class ID of the child object */ + NvHandle hObject; /* [out] - Handle of the child object (0 if not found) */ +} NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS; + +/* + * NV0000_CTRL_CMD_CLIENT_SHARE_OBJECT + * + * This command is meant to imitate the NvRmShare API. + * Applies a share policy to an object, which should be owned by the caller's client. + * The policy is applied in the same way that NvRmShare applies policies. + * + * This ctrl command is only meant to be used in older branches. For releases after R450, + * use NvRmShare directly instead. + */ +#define NV0000_CTRL_CMD_CLIENT_SHARE_OBJECT (0xd06) /* finn: Evaluated from "(FINN_NV01_ROOT_CLIENT_INTERFACE_ID << 8) | NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS { + NvHandle hObject; /* [in] - Handle of object to share */ + RS_SHARE_POLICY sharePolicy; /* [in] - Share Policy to apply */ +} NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS; + +/* _ctrl0000client_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h new file mode 100644 index 000000000..5fb7008fe --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000diag.h @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000diag.finn +// + + + + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) system control commands and parameters */ + +/* + * NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_STATE + * + * This command returns the current lock meter logging state. + * + * state + * This parameter returns the current lock meter logging state. + * NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_DISABLED + * This value indicates lock metering is disabled. + * NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_ENABLED + * This value indicates lock metering is enabled. + * count + * This parameter returns the total number of lock metering entries + * (NV0000_CTRL_DIAG_LOCK_METER_ENTRY) available. This value will + * not exceed NV0000_CTRL_DIAG_LOCK_METER_MAX_ENTRIES. When lock metering + * is enabled this parameter will return zero. + * missedCount + * This parameter returns the number of lock metering entries that had + * to be discarded due to a full lock metering table. This value will + * not exceed NV0000_CTRL_DIAG_LOCK_METER_MAX_TABLE_ENTRIES. When lock + * metering is enabled this parameter will return zero. + * bCircularBuffer + * This parameter returns type of buffer. + * TRUE + * Buffer is circular + * FALSE + * Buffer is sequential + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_STATE (0x480) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS { + NvU32 state; + NvU32 count; + NvU32 missedCount; + NvBool bCircularBuffer; +} NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_PARAMS; + +/* valid lock metering state values */ +#define NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_DISABLED (0x00000000) +#define NV0000_CTRL_DIAG_GET_LOCK_METER_STATE_ENABLED (0x00000001) + +/* maximum possible number of lock metering entries stored internally */ +#define NV0000_CTRL_DIAG_LOCK_METER_MAX_TABLE_ENTRIES (0x20000) + +/* + * NV0000_CTRL_CMD_DIAG_SET_LOCK_METER_STATE + * + * This command sets the current lock meter logging state. + * + * state + * This parameter specifies the new state of the lock metering mechanism. + * Legal state values are: + * NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_DISABLE + * This value disables lock metering. + * NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_ENABLE + * This value enables lock metering. + * NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_RESET + * This value resets, or clears, all lock metering state. Lock + * metering must be disabled prior to attempting a reset. + * bCircularBuffer + * This parameter specifies type of buffer. + * Possible values are: + * TRUE + * For circular buffer. + * FALSE + * For sequential buffer. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_DIAG_SET_LOCK_METER_STATE (0x481) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS { + NvU32 state; + NvBool bCircularBuffer; +} NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_PARAMS; + +/* valid lock metering state values */ +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_DISABLE (0x00000000) +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_ENABLE (0x00000001) +#define NV0000_CTRL_DIAG_SET_LOCK_METER_STATE_RESET (0x00000002) + +/* + * NV0000_CTRL_DIAG_LOCK_METER_ENTRY + * + * This structure represents a single lock meter entry. + * + * counter + * This field contains the number of nanonseconds elapsed since the + * the last system boot when the lock meter entry was generated. + * freq + * This field contains the CPU performance counter frequency in units + * of ticks per second. + * line + * This field contains the relevant line number. + * filename + * This field contains the relevant file name. + * tag + * This field contains a tag uniquely identifying the user of the metered + * lock operations. + * cpuNum + * This field contains the CPU number from which the metered operation + * was initiated. + * irql + * This field contains the IRQL at which the metered operation was + * initiated. + * data0 + * data1 + * data2 + * These fields contain tag-specific data. + */ +#define NV0000_CTRL_DIAG_LOCK_METER_ENTRY_FILENAME_LENGTH (0xc) + +typedef struct NV0000_CTRL_DIAG_LOCK_METER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 counter, 8); + + NvU32 line; + NvU8 filename[NV0000_CTRL_DIAG_LOCK_METER_ENTRY_FILENAME_LENGTH]; + + NvU16 tag; + NvU8 cpuNum; + NvU8 irql; + + NV_DECLARE_ALIGNED(NvU64 threadId, 8); + + NvU32 data0; + NvU32 data1; + NvU32 data2; +} NV0000_CTRL_DIAG_LOCK_METER_ENTRY; + +/* valid lock meter entry tag values */ +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_SEMA (0x00000001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_SEMA_FORCED (0x00000002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_SEMA_COND (0x00000003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RELEASE_SEMA (0x00000004) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_API (0x00000010) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RELEASE_API (0x00000011) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ACQUIRE_GPUS (0x00000020) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RELEASE_GPUS (0x00000021) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_DATA (0x00000100) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_RMCTRL (0x00001000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_CFG_GET (0x00002000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_CFG_SET (0x00002001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_CFG_GETEX (0x00002002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_CFG_SETEX (0x00002003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_VIDHEAP (0x00003000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_MAPMEM (0x00003001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_UNMAPMEM (0x00003002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_MAPMEM_DMA (0x00003003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_UNMAPMEM_DMA (0x00003004) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ALLOC (0x00004000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ALLOC_MEM (0x00004001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_DUP_OBJECT (0x00004010) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CLIENT (0x00005000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_DEVICE (0x00005001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_SUBDEVICE (0x00005002) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_SUBDEVICE_DIAG (0x00005003) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_DISP (0x00005004) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_DISP_CMN (0x00005005) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CHANNEL (0x00005006) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CHANNEL_MPEG (0x00005007) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_CHANNEL_DISP (0x00005008) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_MEMORY (0x00005009) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_FBMEM (0x0000500A) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_OBJECT (0x0000500B) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_FREE_EVENT (0x0000500C) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_IDLE_CHANNELS (0x00006000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_BIND_CTXDMA (0x00007000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ALLOC_CTXDMA (0x00007001) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_ISR (0x0000F000) +#define NV0000_CTRL_DIAG_LOCK_METER_TAG_DPC (0x0000F00F) + +/* + * NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_ENTRIES + * + * This command returns lock metering data in a fixed-sized array of entries. + * Each request will return up NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_MAX_ENTRIES + * entries. + * + * It is up to the caller to repeat these requests to retrieve the total number + * of entries reported by NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_STATE. + * + * entryCount + * This parameter returns the total number of valid entries returned + * in the entries array. This value will not exceed + * NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_MAX but may be less. + * A value of zero indicates there are no more valid entries. + * entries + * This parameter contains the storage into which lock metering entry + * data is returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_DIAG_GET_LOCK_METER_ENTRIES (0x485) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS_MESSAGE_ID" */ + +/* total number of entries returned */ +#define NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_MAX (0x40) + +#define NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS { + NvU32 entryCount; + NV_DECLARE_ALIGNED(NV0000_CTRL_DIAG_LOCK_METER_ENTRY entries[NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_MAX], 8); +} NV0000_CTRL_DIAG_GET_LOCK_METER_ENTRIES_PARAMS; + +/* + * NV0000_CTRL_CMD_DIAG_PROFILE_RPC + * + * This command returns the RPC runtime information, and + * will only return valid when it is running inside VGX mode. + * + * rpcProfileCmd: + * RPC profiler command issued by rpc profiler utility + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_DIAG_PROFILE_RPC (0x488) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS_MESSAGE_ID" */ + +typedef struct RPC_METER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 startTimeInNs, 8); + NV_DECLARE_ALIGNED(NvU64 endTimeInNs, 8); + NV_DECLARE_ALIGNED(NvU64 rpcDataTag, 8); + NV_DECLARE_ALIGNED(NvU64 rpcExtraData, 8); +} RPC_METER_ENTRY; + +#define NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS { + NvU32 rpcProfileCmd; +} NV0000_CTRL_DIAG_PROFILE_RPC_PARAMS; + +#define NV0000_CTRL_PROFILE_RPC_CMD_DISABLE (0x00000000) +#define NV0000_CTRL_PROFILE_RPC_CMD_ENABLE (0x00000001) +#define NV0000_CTRL_PROFILE_RPC_CMD_RESET (0x00000002) + +/* + * NV0000_CTRL_CMD_DIAG_DUMP_RPC + * + * This command returns the RPC runtime information, which + * will be logged by NV0000_CTRL_CMD_DIAG_PROFILE_RPC command + * when running inside VGX mode. + * + * When issuing this command, the RPC profiler has to be disabled. + * + * firstEntryOffset: + * [IN] offset for first entry. + * + * outputEntryCout: + * [OUT] number of entries returned in rpcProfilerBuffer. + * + * remainingEntryCount: + * [OUT] number of entries remaining. + * + * elapsedTimeInNs: + * [OUT] runtime for the RPC profiler tool. + * + * rpcProfilerBuffer: + * [OUT] buffer to store the RPC entries + */ + +#define NV0000_CTRL_CMD_DIAG_DUMP_RPC (0x489) /* finn: Evaluated from "(FINN_NV01_ROOT_DIAG_INTERFACE_ID << 8) | NV0000_CTRL_DIAG_DUMP_RPC_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_DIAG_RPC_MAX_ENTRIES (100) + +#define NV0000_CTRL_DIAG_DUMP_RPC_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV0000_CTRL_DIAG_DUMP_RPC_PARAMS { + NvU32 firstEntryOffset; + NvU32 outputEntryCount; + NvU32 remainingEntryCount; + NV_DECLARE_ALIGNED(NvU64 elapsedTimeInNs, 8); + NV_DECLARE_ALIGNED(RPC_METER_ENTRY rpcProfilerBuffer[NV0000_CTRL_DIAG_RPC_MAX_ENTRIES], 8); +} NV0000_CTRL_DIAG_DUMP_RPC_PARAMS; + +/* _ctrl0000diag_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h new file mode 100644 index 000000000..906df53c4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000event.h @@ -0,0 +1,116 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000event.finn +// + + + + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "class/cl0000.h" +/* + * NV0000_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification for the system events. + * + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. The valid event values can be found in + * cl0000.h. + * + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE + * This action disables event notification for the specified + * event. + * NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE + * This action enables single-shot event notification for the + * specified event. + * NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + * This action enables repeated event notification for the + * specified event. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_CLIENT + * + */ + +#define NV0000_CTRL_CMD_EVENT_SET_NOTIFICATION (0x501) /* finn: Evaluated from "(FINN_NV01_ROOT_EVENT_INTERFACE_ID << 8) | NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; +} NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +/* + * NV0000_CTRL_CMD_GET_SYSTEM_EVENT_STATUS + * + * This command returns the status of the specified system event type. + * See the description of NV01_EVENT for details on registering events. + * + * event + * This parameter specifies the event type. Valid event type values + * can be found in cl0000.h. + * status + * This parameter returns the status for a given event type. Valid + * status values can be found in cl0000.h. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_CLIENT + * + */ + +#define NV0000_CTRL_CMD_GET_SYSTEM_EVENT_STATUS (0x502) /* finn: Evaluated from "(FINN_NV01_ROOT_EVENT_INTERFACE_ID << 8) | NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS { + NvU32 event; + NvU32 status; +} NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS; + +/* _ctrl0000event_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h new file mode 100644 index 000000000..f67696af7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpu.h @@ -0,0 +1,854 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gpu.finn +// + + + + +#include "ctrl/ctrl0000/ctrl0000base.h" +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrlxxxx.h" +#include "nvlimits.h" + +/* NV01_ROOT (client) GPU control commands and parameters */ + +/* + * NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS + * + * This command returns a table of attached gpuId values. + * The table is NV0000_CTRL_GPU_MAX_ATTACHED_GPUS entries in size. + * + * gpuIds[] + * This parameter returns the table of attached GPU IDs. + * The GPU ID is an opaque platform-dependent value that can be used + * with the NV0000_CTRL_CMD_GPU_GET_ID_INFO command to retrieve + * additional information about the GPU. The valid entries in gpuIds[] + * are contiguous, with a value of NV0000_CTRL_GPU_INVALID_ID indicating + * the invalid entries. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS (0x201) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_ATTACHED_GPUS 32 +#define NV0000_CTRL_GPU_INVALID_ID (0xffffffff) + +#define NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_ATTACHED_GPUS]; +} NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS; + +/* + * Deprecated. Please use NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 instead. + */ +#define NV0000_CTRL_CMD_GPU_GET_ID_INFO (0x202) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_ID_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_SZNAME 128 + +#define NV0000_CTRL_NO_NUMA_NODE (-1) + +#define NV0000_CTRL_GPU_GET_ID_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GPU_GET_ID_INFO_PARAMS { + NvU32 gpuId; + NvU32 gpuFlags; + NvU32 deviceInstance; + NvU32 subDeviceInstance; + NV_DECLARE_ALIGNED(NvP64 szName, 8); + NvU32 sliStatus; + NvU32 boardId; + NvU32 gpuInstance; + NvS32 numaId; +} NV0000_CTRL_GPU_GET_ID_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 + * This command returns GPU instance information for the specified GPU. + * + * [in] gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * [out] gpuFlags + * This parameter returns various flags values for the specified GPU. + * Valid flag values include: + * NV0000_CTRL_GPU_ID_INFO_IN_USE + * When true this flag indicates there are client references + * to the GPU in the form of device class instantiations (see + * NV01_DEVICE or NV03_DEVICE descriptions for details). + * NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE + * When true this flag indicates the GPU is linked into an + * active SLI device. + * NV0000_CTRL_GPU_ID_INFO_MOBILE + * When true this flag indicates the GPU is a mobile GPU. + * NV0000_CTRL_GPU_ID_BOOT_MASTER + * When true this flag indicates the GPU is the boot master GPU. + * NV0000_CTRL_GPU_ID_INFO_SOC + * When true this flag indicates the GPU is part of a + * System-on-Chip (SOC). + * NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED + * When ATS is enabled on the system. + * [out] deviceInstance + * This parameter returns the broadcast device instance number associated + * with the specified GPU. This value can be used to instantiate + * a broadcast reference to the GPU using the NV01_DEVICE classes. + * [out] subDeviceInstance + * This parameter returns the unicast subdevice instance number + * associated with the specified GPU. This value can be used to + * instantiate a unicast reference to the GPU using the NV20_SUBDEVICE + * classes. + * [out] sliStatus + * This parameters returns the SLI status for the specified GPU. + * Legal values for this member are described by NV0000_CTRL_SLI_STATUS. + * [out] boardId + * This parameter returns the board ID value with which the + * specified GPU is associated. Multiple GPUs can share the + * same board ID in multi-GPU configurations. + * [out] gpuInstance + * This parameter returns the GPU instance number for the specified GPU. + * GPU instance numbers are assigned in bus-probe order beginning with + * zero and are limited to one less the number of GPUs in the system. + * [out] numaId + * This parameter returns the ID of NUMA node for the specified GPU. + * In case there is no NUMA node, NV0000_CTRL_NO_NUMA_NODE is returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + + + +#define NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 (0x205) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS { + NvU32 gpuId; + NvU32 gpuFlags; + NvU32 deviceInstance; + NvU32 subDeviceInstance; + NvU32 sliStatus; + NvU32 boardId; + NvU32 gpuInstance; + NvS32 numaId; +} NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS; + + +/* valid flags values */ +#define NV0000_CTRL_GPU_ID_INFO_IN_USE 0:0 +#define NV0000_CTRL_GPU_ID_INFO_IN_USE_FALSE (0x00000000) +#define NV0000_CTRL_GPU_ID_INFO_IN_USE_TRUE (0x00000001) +#define NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE 1:1 +#define NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE_FALSE (0x00000000) +#define NV0000_CTRL_GPU_ID_INFO_LINKED_INTO_SLI_DEVICE_TRUE (0x00000001) +#define NV0000_CTRL_GPU_ID_INFO_MOBILE 2:2 +#define NV0000_CTRL_GPU_ID_INFO_MOBILE_FALSE (0x00000000) +#define NV0000_CTRL_GPU_ID_INFO_MOBILE_TRUE (0x00000001) +#define NV0000_CTRL_GPU_ID_INFO_BOOT_MASTER 3:3 +#define NV0000_CTRL_GPU_ID_INFO_BOOT_MASTER_FALSE (0x00000000) +#define NV0000_CTRL_GPU_ID_INFO_BOOT_MASTER_TRUE (0x00000001) + + +#define NV0000_CTRL_GPU_ID_INFO_SOC 5:5 +#define NV0000_CTRL_GPU_ID_INFO_SOC_FALSE (0x00000000) +#define NV0000_CTRL_GPU_ID_INFO_SOC_TRUE (0x00000001) +#define NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED 6:6 +#define NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED_FALSE (0x00000000) +#define NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED_TRUE (0x00000001) + +/* + * NV0000_CTRL_CMD_GPU_GET_INIT_STATUS + * + * This command returns the initialization status for the specified GPU, and + * will return NV_ERR_INVALID_STATE if called prior to GPU + * initialization. + * + * gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * status + * This parameter returns the status code identifying the initialization + * state of the GPU. If this parameter has the value NV_OK, + * then no errors were detected during GPU initialization. Otherwise, this + * parameter specifies the top-level error that was detected during GPU + * initialization. Note that a value of NV_OK only means that + * no errors were detected during the actual GPU initialization, and other + * errors may have occurred that prevent the GPU from being attached or + * accessible via the NV01_DEVICE or NV20_SUBDEVICE classes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_GPU_GET_INIT_STATUS (0x203) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS { + NvU32 gpuId; + NvU32 status; +} NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_DEVICE_IDS + * + * This command returns a mask of valid device IDs. These device IDs + * can be used to instantiate the NV01_DEVICE_0 class (see NV01_DEVICE_0 + * for more information). + * + * deviceIds + * This parameter returns the mask of valid device IDs. Each enabled bit + * in the mask corresponds to a valid device instance. Valid device + * instances can be used to initialize the NV0080_ALLOC_PARAMETERS + * structure when using NvRmAlloc to instantiate device handles. The + * number of device IDs will not exceed NV_MAX_DEVICES in number. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0000_CTRL_CMD_GPU_GET_DEVICE_IDS (0x204) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS { + NvU32 deviceIds; +} NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS; + + + +/* + * NV0000_CTRL_CMD_GPU_GET_PROBED_IDS + * + * This command returns a table of probed gpuId values. + * The table is NV0000_CTRL_GPU_MAX_PROBED_GPUS entries in size. + * + * gpuIds[] + * This parameter returns the table of probed GPU IDs. + * The GPU ID is an opaque platform-dependent value that can + * be used with the NV0000_CTRL_CMD_GPU_ATTACH_IDS and + * NV0000_CTRL_CMD_GPU_DETACH_ID commands to attach and detach + * the GPU. + * The valid entries in gpuIds[] are contiguous, with a value + * of NV0000_CTRL_GPU_INVALID_ID indicating the invalid entries. + * excludedGpuIds[] + * This parameter returns the table of excluded GPU IDs. + * An excluded GPU ID is an opaque platform-dependent value that + * can be used with NV0000_CTRL_CMD_GPU_GET_PCI_INFO and + * NV0000_CTRL_CMD_GPU_GET_UUID_INFO. + * The valid entries in excludedGpuIds[] are contiguous, with a value + * of NV0000_CTRL_GPU_INVALID_ID indicating the invalid entries. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_GPU_GET_PROBED_IDS (0x214) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_MAX_PROBED_GPUS NV_MAX_DEVICES + +#define NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_PROBED_GPUS]; + NvU32 excludedGpuIds[NV0000_CTRL_GPU_MAX_PROBED_GPUS]; +} NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_PCI_INFO + * + * This command takes a gpuId and returns PCI bus information about + * the device. If the OS does not support returning PCI bus + * information, this call will return NV_ERR_NOT_SUPPORTED + * + * gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * + * domain + * This parameter returns the PCI domain of the GPU. + * + * bus + * This parameter returns the PCI bus of the GPU. + * + * slot + * This parameter returns the PCI slot of the GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GPU_GET_PCI_INFO (0x21b) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS_MESSAGE_ID (0x1BU) + +typedef struct NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS { + NvU32 gpuId; + NvU32 domain; + NvU16 bus; + NvU16 slot; +} NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_ATTACH_IDS + * + * This command attaches the GPUs with the gpuIds matching those in + * the table provided by the client. + * The table is NV0000_CTRL_GPU_MAX_PROBED_GPUS entries in size. + * + * gpuIds[] + * This parameter holds the table of gpuIds to attach. At least + * one gpuId must be specified; clients may use the special + * gpuId value NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS to indicate + * that all probed GPUs are to be attached. + * The entries in gpuIds[] must be contiguous, with a value of + * NV0000_CTRL_GPU_INVALID_ID to indicate the first invalid + * entry. + * If one or more of the gpuId values do not specify a GPU found + * in the system, the NV_ERR_INVALID_ARGUMENT error + * status is returned. + * + * failedId + * If NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS is specified and + * a GPU cannot be attached, the NV0000_CTRL_CMD_GPU_ATTACH_IDS + * command returns an error code and saves the failing GPU's + * gpuId in this field. + * + * If a table of gpuIds is provided, these gpuIds will be validated + * against the RM's table of probed gpuIds and attached in turn, + * if valid; if NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS is used, all + * probed gpuIds will be attached, in the order the associated GPUs + * were probed in by the RM. + * + * If a gpuId fails to attach, this gpuId is stored in the failedId + * field. Any GPUs attached by the command prior the failure are + * detached. + * + * If multiple clients use NV0000_CTRL_CMD_GPU_ATTACH_IDS to attach + * a gpuId, the RM ensures that the gpuId won't be detached until + * all clients have issued a call to NV0000_CTRL_CMD_GPU_DETACH_IDS + * to detach the gpuId (or have terminated). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_IRQ_EDGE_TRIGGERED + * NV_ERR_IRQ_NOT_FIRING + */ +#define NV0000_CTRL_CMD_GPU_ATTACH_IDS (0x215) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_ATTACH_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS (0x0000ffff) + +#define NV0000_CTRL_GPU_ATTACH_IDS_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV0000_CTRL_GPU_ATTACH_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_PROBED_GPUS]; + NvU32 failedId; +} NV0000_CTRL_GPU_ATTACH_IDS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_DETACH_IDS + * + * This command detaches the GPUs with the gpuIds matching those in + * the table provided by the client. + * The table is NV0000_CTRL_GPU_MAX_ATTACHED_GPUS entries in size. + * + * gpuIds[] + * This parameter holds the table of gpuIds to detach. At least + * one gpuId must be specified; clients may use the special + * gpuId NV0000_CTRL_GPU_DETACH_ALL_ATTACHED_IDS to indicate that + * all attached GPUs are to be detached. + * The entries in gpuIds[] must be contiguous, with a value of + * NV0000_CTRL_GPU_INVALID_ID to indicate the first invalid + * entry. + * If one or more of the gpuId values do not specify a GPU found + * in the system, the NV_ERR_INVALID_ARGUMENT error + * status is returned. + * + * If a table of gpuIds is provided, these gpuIds will be validated + * against the RM's list of attached gpuIds; each valid gpuId is + * detached immediately if it's no longer in use (i.e. if there are + * no client references to the associated GPU in the form of + * device class instantiations (see the NV01_DEVICE or NV03_DEVICE + * descriptions for details)) and if no other client still requires + * the associated GPU to be attached. + * + * If a given gpuId can't be detached immediately, it will instead + * be detached when the last client reference is freed or when + * the last client that issued NV0000_CTRL_CMD_GPU_ATTACH_IDS for + * this gpuId either issues NV0000_CTRL_CMD_GPU_DETACH_IDS or exits + * without detaching the gpuId explicitly. + * + * Clients may use the NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS command + * to obtain a table of the attached gpuIds. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GPU_DETACH_IDS (0x216) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_DETACH_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_DETACH_ALL_ATTACHED_IDS (0x0000ffff) + +#define NV0000_CTRL_GPU_DETACH_IDS_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV0000_CTRL_GPU_DETACH_IDS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_GPU_MAX_ATTACHED_GPUS]; +} NV0000_CTRL_GPU_DETACH_IDS_PARAMS; + + + +/* + * NV0000_CTRL_CMD_GPU_GET_SVM_SIZE + * + * This command is used to get the SVM size. + * + * gpuId + * This parameter uniquely identifies the GPU whose associated + * SVM size is to be returned. The value of this field must + * match one of those in the table returned by + * NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS + * + * SvmSize + * SVM size is returned in this. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * + */ +#define NV0000_CTRL_CMD_GPU_GET_SVM_SIZE (0x240) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS { + NvU32 gpuId; + NvU32 svmSize; +} NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS; + + + +/* + * NV0000_CTRL_CMD_GPU_GET_UUID_INFO + * + * This command returns requested information pertaining to the GPU + * specified by the GPU UUID passed in. + * + * Generally only GPUs that have been attached are visible to this call. Therefore + * queries on unattached GPUs will fail with NV_ERR_OBJECT_NOT_FOUND. However, + * a query for a SHA1 UUID may succeed for an unattached GPU in cases where the GID + * is cached, such as an excluded GPU. + * + * gpuGuid (INPUT) + * The GPU UUID of the gpu whose parameters are to be returned. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * flags (INPUT) + * The _FORMAT* flags designate ascii string format or a binary format. + * + * The _TYPE* flags designate either SHA-1-based (32-hex-character) or + * SHA-256-based (64-hex-character). + * + * gpuId (OUTPUT) + * The GPU ID of the GPU identified by gpuGuid. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * deviceInstance (OUTPUT) + * The device instance of the GPU identified by gpuGuid. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * subdeviceInstance (OUTPUT) + * The subdevice instance of the GPU identified by gpuGuid. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * + */ +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO (0x274) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum possible number of bytes of GID information */ +#define NV0000_GPU_MAX_GID_LENGTH (0x00000100) + +#define NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS { + NvU8 gpuUuid[NV0000_GPU_MAX_GID_LENGTH]; + NvU32 flags; + NvU32 gpuId; + NvU32 deviceInstance; + NvU32 subdeviceInstance; +} NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS; + +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT 1:0 +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT_ASCII (0x00000000) +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT_BINARY (0x00000002) + +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_TYPE 2:2 +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_TYPE_SHA1 (0x00000000) +#define NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_TYPE_SHA256 (0x00000001) + +/* + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID + * + * This command returns the GPU UUID for the provided GPU ID. + * Note that only GPUs that have been attached are visible to this call. + * Therefore queries on unattached GPUs will fail + * with NV_ERR_OBJECT_NOT_FOUND. + * + * gpuId (INPUT) + * The GPU ID whose parameters are to be returned. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * flags (INPUT) + * + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_ASCII + * This value is used to request the GPU UUID in ASCII format. + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_BINARY + * This value is used to request the GPU UUID in binary format. + * + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA1 + * This value is used to request that the GPU UUID value + * be SHA1-based (32-hex-character). + * NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA256 + * This value is used to request that the GPU UUID value + * be SHA256-based (64-hex-character). + * + * gpuUuid[NV0000_GPU_MAX_GID_LENGTH] (OUTPUT) + * The GPU UUID of the GPU identified by GPU ID. Refer to + * NV0000_CTRL_CMD_GPU_GET_ID_INFO for more information. + * + * uuidStrLen (OUTPUT) + * The length of the UUID returned which is related to the format that + * was requested using flags. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID (0x275) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS { + NvU32 gpuId; + NvU32 flags; + NvU8 gpuUuid[NV0000_GPU_MAX_GID_LENGTH]; + NvU32 uuidStrLen; +} NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS; + +/* valid format values */ +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT 1:0 +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_ASCII (0x00000000) +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_FORMAT_BINARY (0x00000002) + +/*valid type values*/ +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE 2:2 +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA1 (0x00000000) +#define NV0000_CTRL_CMD_GPU_GET_UUID_FROM_GPU_ID_FLAGS_TYPE_SHA256 (0x00000001) + + + +/* + * NV0000_CTRL_CMD_GPU_MODIFY_DRAIN_STATE + * + * This command is used to enter or exit the so called "drain" state. + * When this state is enabled, the existing clients continue executing + * as usual, however no new client connections are allowed. + * This is done in order to "drain" the system of the running clients + * in preparation to selectively powering down the GPU. + * No GPU can enter a bleed state if that GPU is in an SLI group. + * In that case, NV_ERR_IN_USE is returned. + * Requires administrator privileges. + * + * It is expected, that the "drain" state will be eventually deprecated + * and replaced with another mechanism to quiesce a GPU (Bug 1718113). + * + * gpuId (INPUT) + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * newState (INPUT) + * This input parameter is used to enter or exit the "drain" + * software state of the GPU specified by the gpuId parameter. + * Possible values are: + * NV0000_CTRL_GPU_DRAIN_STATE_ENABLED + * NV0000_CTRL_GPU_DRAIN_STATE_DISABLED + * flags (INPUT) + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE + * if set, upon reaching quiescence, a request will be made to + * the OS to "forget" the PCI device associated with the + * GPU specified by the gpuId parameter, in case such a request + * is supported by the OS. Otherwise, NV_ERR_NOT_SUPPORTED + * will be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_IN_USE + */ + +#define NV0000_CTRL_CMD_GPU_MODIFY_DRAIN_STATE (0x278) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS_MESSAGE_ID" */ + +/* Possible values of newState */ +#define NV0000_CTRL_GPU_DRAIN_STATE_DISABLED (0x00000000) +#define NV0000_CTRL_GPU_DRAIN_STATE_ENABLED (0x00000001) + +/* Defined bits for the "flags" argument */ +#define NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE (0x00000001) +#define NV0000_CTRL_GPU_DRAIN_STATE_FLAG_LINK_DISABLE (0x00000002) + +#define NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS_MESSAGE_ID (0x78U) + +typedef struct NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS { + NvU32 gpuId; + NvU32 newState; + NvU32 flags; +} NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_QUERY_DRAIN_STATE + * + * gpuId (INPUT) + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NVOS_STATUS_ERROR_INVALID_ARGUMENT is returned. + * drainState (OUTPUT) + * This parameter returns a value indicating if the "drain" + * state is currently enabled or not for the specified GPU. See the + * description of NV0000_CTRL_CMD_GPU_MODIFY_DRAIN_STATE. + * Possible values are: + * NV0000_CTRL_GPU_DRAIN_STATE_ENABLED + * NV0000_CTRL_GPU_DRAIN_STATE_DISABLED + * flags (OUTPUT) + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE + * if set, upon reaching quiesence, the GPU device will be + * removed automatically from the kernel space, similar + * to what writing "1" to the sysfs "remove" node does. + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_LINK_DISABLE + * after removing the GPU, also disable the parent bridge's + * PCIe link. This flag can only be set in conjunction with + * NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE, and then + * only when the GPU is already idle (not attached). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPU_QUERY_DRAIN_STATE (0x279) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS_MESSAGE_ID (0x79U) + +typedef struct NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS { + NvU32 gpuId; + NvU32 drainState; + NvU32 flags; +} NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_DISCOVER + * + * This request asks the OS to scan the PCI tree or a sub-tree for GPUs, + * that are not yet known to the OS, and to make them available for use. + * If all of domain:bus:slot.function are zeros, the entire tree is scanned, + * otherwise the parameters identify the bridge device, that roots the + * subtree to be scanned. + * Requires administrator privileges. + * + * domain (INPUT) + * PCI domain of the bridge + * bus (INPUT) + * PCI bus of the bridge + * slot (INPUT) + * PCI slot of the bridge + * function (INPUT) + * PCI function of the bridge + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_GPU_DISCOVER (0x27a) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | 0x7A" */ + +typedef struct NV0000_CTRL_GPU_DISCOVER_PARAMS { + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; +} NV0000_CTRL_GPU_DISCOVER_PARAMS; + +/* + * NV0000_CTRL_CMD_GPU_GET_MEMOP_ENABLE + * + * This command is used to get the content of the MemOp (CUDA Memory Operation) + * enablement mask, which can be overridden by using the MemOpOverride RegKey. + * + * The enableMask member must be treated as a bitmask, where each bit controls + * the enablement of a feature. + * + * So far, the only feature which is defined controls to whole MemOp APIs. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV0000_CTRL_CMD_GPU_GET_MEMOP_ENABLE (0x27b) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS_MESSAGE_ID (0x7BU) + +typedef struct NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS { + NvU32 enableMask; +} NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS; + +#define NV0000_CTRL_GPU_FLAGS_MEMOP_ENABLE (0x00000001) + + + +/* + * NV0000_CTRL_CMD_GPU_DISABLE_NVLINK_INIT + * + * This privileged command is used to disable initialization for the NVLinks + * provided in the mask. + * + * The mask must be applied before the GPU is attached. DISABLE_NVLINK_INIT + * is an NOP for non-NVLink GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_STATE + * NV_ERR_IN_USE + * + */ +#define NV0000_CTRL_CMD_GPU_DISABLE_NVLINK_INIT (0x281) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS { + NvU32 gpuId; + NvU32 mask; + NvBool bSkipHwNvlinkDisable; +} NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS; + + +#define NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PARAM_DATA 0x00000175 +#define NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_IN 6 +#define NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_OUT 5 + +/* + * NV0000_CTRL_CMD_GPU_LEGACY_CONFIG + * + * Path to use legacy RM GetConfig/Set API. This API is being phased out. + */ +#define NV0000_CTRL_CMD_GPU_LEGACY_CONFIG (0x282) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS_MESSAGE_ID (0x82U) + +typedef struct NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS { + NvHandle hContext; /* [in] - Handle of object to perform operation on (Device, Subdevice, etc) */ + NvU32 opType; /* [in] - Type of API */ + NvV32 index; /* [in] - command type */ + NvU32 dataType; /* [out] - data union type */ + + union { + struct { + NvV32 value; + } configGet; + struct { + NvU32 newValue; + NvU32 oldValue; + } configSet; + struct { + NvU8 paramData[NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PARAM_DATA]; + NvU32 paramSize; + } configEx; + struct { + NvU32 propertyId; + NvU32 propertyIn[NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_IN]; + NvU32 propertyOut[NV0000_CTRL_GPU_LEGACY_CONFIG_MAX_PROPERTIES_OUT]; + } reservedProperty; + } data; +} NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS; + +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_GET (0x00000000) +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_SET (0x00000001) +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_GET_EX (0x00000002) +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_SET_EX (0x00000003) +#define NV0000_CTRL_GPU_LEGACY_CONFIG_OP_TYPE_RESERVED (0x00000004) + +/* + * NV0000_CTRL_CMD_IDLE_CHANNELS + */ +#define NV0000_CTRL_CMD_IDLE_CHANNELS (0x283) /* finn: Evaluated from "(FINN_NV01_ROOT_GPU_INTERFACE_ID << 8) | NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS { + NvHandle hDevice; + NvHandle hChannel; + NvV32 numChannels; + /* C form: NvP64 phClients NV_ALIGN_BYTES(8); */ + NV_DECLARE_ALIGNED(NvP64 phClients, 8); + /* C form: NvP64 phDevices NV_ALIGN_BYTES(8); */ + NV_DECLARE_ALIGNED(NvP64 phDevices, 8); + /* C form: NvP64 phChannels NV_ALIGN_BYTES(8); */ + NV_DECLARE_ALIGNED(NvP64 phChannels, 8); + NvV32 flags; + NvV32 timeout; +} NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS; + +/* _ctrl0000gpu_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h new file mode 100644 index 000000000..07f1cbcf5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gpuacct.h @@ -0,0 +1,258 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gpuacct.finn +// + + + + +#include "ctrl/ctrl0000/ctrl0000base.h" + +/* + * NV0000_CTRL_CMD_GPUACCT_SET_ACCOUNTING_STATE + * + * This command is used to enable or disable the per process GPU accounting. + * This is part of GPU's software state and will persist if persistent + * software state is enabled. Refer to the description of + * NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE for more information. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process for which + * the accounting state needs to be set. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid. This + * parameter is set only when this RM control is called from VGPU plugin, + * otherwise it is zero meaning set/reset the accounting state for the + * specified GPU. + * newState + * This input parameter is used to enable or disable the GPU accounting. + * Possible values are: + * NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED + * NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPUACCT_SET_ACCOUNTING_STATE (0xb01) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID" */ + +/* Possible values of persistentSwState */ +#define NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED (0x00000000) +#define NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED (0x00000001) + +#define NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 newState; +} NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS; + +/* + * NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_STATE + * + * This command is used to get the current state of GPU accounting. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process of which the + * accounting state needs to be queried. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid. This + * parameter is set only when this RM control is called from VGPU plugin, + * otherwise it is zero meaning the accounting state needs to be queried for + * the specified GPU. + * state + * This parameter returns a value indicating if per process GPU accounting + * is currently enabled or not for the specified GPU. See the + * description of NV0000_CTRL_CMD_GPU_SET_ACCOUNTING_STATE. + * Possible values are: + * NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED + * NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_STATE (0xb02) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 state; +} NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS; + +/* + * NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS + * + * This command returns GPU accounting data for the process. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This parameter specifies the PID of the process for which information is + * to be queried. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid inside + * which the subPid is running. This parameter is set to VGPU plugin pid + * when this RM control is called from VGPU plugin. + * subPid + * In case of VGX host, this parameter specifies the PID of the process for + * which information is to be queried. In other cases, it is zero. + * gpuUtil + * This parameter returns the average GR utilization during the process's + * lifetime. + * fbUtil + * This parameter returns the average FB bandwidth utilization during the + * process's lifetime. + * maxFbUsage + * This parameter returns the maximum FB allocated (in bytes) by the process. + * startTime + * This parameter returns the time stamp value in micro seconds at the time + * process started utilizing GPU. + * stopTime + * This parameter returns the time stamp value in micro seconds at the time + * process stopped utilizing GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GPUACCT_GET_PROC_ACCOUNTING_INFO (0xb03) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 subPid; + NvU32 gpuUtil; + NvU32 fbUtil; + NV_DECLARE_ALIGNED(NvU64 maxFbUsage, 8); + NV_DECLARE_ALIGNED(NvU64 startTime, 8); + NV_DECLARE_ALIGNED(NvU64 endTime, 8); +} NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_PIDS + * + * This command is used to get the PIDS of processes with accounting + * information in the driver. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process of which the + * information needs to be queried. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid. This + * parameter is set only when this RM control is called from VGPU plugin, + * otherwise it is zero meaning get the pid list of the all the processes + * running on the specified GPU. + * pidTbl + * This parameter returns the table of all PIDs for which driver has + * accounting info. + * pidCount + * This parameter returns the number of entries in the PID table. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_PIDS (0xb04) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_MESSAGE_ID" */ + +/* max size of pidTable */ +#define NV0000_GPUACCT_PID_MAX_COUNT 4000 + +#define NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS { + NvU32 gpuId; + NvU32 pid; + NvU32 pidTbl[NV0000_GPUACCT_PID_MAX_COUNT]; + NvU32 pidCount; +} NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS; + +/* + * NV0000_CTRL_CMD_GPUACCT_CLEAR_ACCOUNTING_DATA + * + * This command is used to clear previously collected GPU accounting data. This + * will have no affect on data for the running processes, accounting data for + * these processes will not be cleared and will still be logged for these + * processes. In order to clear ALL accounting data, accounting needs to be + * disabled using NV0000_CTRL_CMD_GPUACCT_SET_ACCOUNTING_STATE before executing + * this command. + * + * gpuId + * This parameter should specify a valid GPU ID value. Refer to the + * description of NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS for more + * information. If there is no GPU present with the specified ID, + * a status of NV_ERR_INVALID_ARGUMENT is returned. + * pid + * This input parameter specifies the process id of the process for which + * the accounting data needs to be cleared. + * In case of VGX host, this parameter specifies VGPU plugin(VM) pid for + * which the accounting data needs to be cleared. This parameter is set only + * when this RM control is called from VGPU plugin, otherwise it is zero + * meaning clear the accounting data of processes running on baremetal + * system. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV0000_CTRL_CMD_GPUACCT_CLEAR_ACCOUNTING_DATA (0xb05) /* finn: Evaluated from "(FINN_NV01_ROOT_GPUACCT_INTERFACE_ID << 8) | NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS { + NvU32 gpuId; + NvU32 pid; +} NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS; + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h new file mode 100644 index 000000000..419d00649 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gspc.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gspc.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h new file mode 100644 index 000000000..e4597f3a5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000gsync.h @@ -0,0 +1,104 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000gsync.finn +// + + + + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "class/cl30f1.h" +/* NV01_ROOT (client) system controller control commands and parameters */ + +/* + * NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS + * + * This command returns a table of attached gsyncId values. + * The table is NV0000_CTRL_GSYNC_MAX_ATTACHED_GSYNCS entries in size. + * + * gsyncIds[] + * This parameter returns the table of attached gsync IDs. + * The gsync ID is an opaque platform-dependent value that + * can be used with the NV0000_CTRL_CMD_GSYNC_GET_ID_INFO command to + * retrieve additional information about the gsync device. + * The valid entries in gsyncIds[] are contiguous, with a value + * of NV0000_CTRL_GSYNC_INVALID_ID indicating the invalid entries. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS (0x301) /* finn: Evaluated from "(FINN_NV01_ROOT_GSYNC_INTERFACE_ID << 8) | NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS { + NvU32 gsyncIds[NV30F1_MAX_GSYNCS]; +} NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS; + +/* this value marks entries in gsyncIds[] as invalid */ +#define NV0000_CTRL_GSYNC_INVALID_ID (0xffffffff) + +/* + * NV0000_CTRL_CMD_GSYNC_GET_ID_INFO + * + * This command returns gsync instance information for the + * specified gsync device. + * + * gsyncId + * This parameter should specify a valid gsync ID value. + * If there is no gsync present with the specified ID, a + * status of NV_ERR_INVALID_ARGUMENT is returned. + * gsyncFlags + * This parameter returns the current state of the gsync device. + * gsyncInstance + * This parameter returns the instance number associated with the + * specified gsync. This value can be used to instantiate + * a reference to the gsync using one of the NV30_GSYNC + * classes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_GSYNC_GET_ID_INFO (0x302) /* finn: Evaluated from "(FINN_NV01_ROOT_GSYNC_INTERFACE_ID << 8) | NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS { + NvU32 gsyncId; + NvU32 gsyncFlags; + NvU32 gsyncInstance; +} NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS; + +/* _ctrl0000gsync_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h new file mode 100644 index 000000000..359b6e830 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000nvd.h @@ -0,0 +1,639 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000nvd.finn +// + + + + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) nvd control commands and parameters */ + +/* + * NV0080_CTRL_NVD_DUMP_COMPONENT + * + * The following dump components are used to describe legal ranges in + * commands below: + * + * NV0080_CTRL_CMD_NVD_DUMP_COMPONENT_SYS + * This is the system dump component. + * NV0080_CTRL_CMD_NVD_DUMP_COMPONENT_NVLOG + * This is the nvlog dump component. + * NV0080_CTRL_CMD_NVD_DUMP_COMPONENT_RESERVED + * This component is reserved. + * + * See nvdump.h for more information on dump component values. + */ +#define NV0000_CTRL_NVD_DUMP_COMPONENT_SYS (0x400) +#define NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG (0x800) +#define NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED (0xB00) + +/* + * NV0000_CTRL_CMD_NVD_GET_DUMP_SIZE + * + * This command gets the expected dump size of a particular system + * dump component. Note that events that occur between this command + * and a later NV0000_CTRL_CMD_NVD_GET_DUMP command could alter the size of + * the buffer required. + * + * component + * This parameter specifies the system dump component for which the + * dump size is desired. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_SYS and + * less than NV0000_CTRL_NVD_GET_DUMP_COMPONENT_NVLOG. + * size + * This parameter returns the expected size in bytes. The maximum + * value of this call is NV0000_CTRL_NVD_MAX_DUMP_SIZE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if components are invalid. + */ + +#define NV0000_CTRL_CMD_NVD_GET_DUMP_SIZE (0x601) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS { + NvU32 component; + NvU32 size; +} NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS; + +/* Max size that a GET_DUMP_SIZE_PARAMS call can return */ +#define NV0000_CTRL_NVD_MAX_DUMP_SIZE (1000000) + +/* + * NV0000_CTRL_CMD_NVD_GET_DUMP + * + * This command gets a dump of a particular system dump component. If triggers + * is non-zero, the command waits for the trigger to occur before it returns. + * + * pBuffer + * This parameter points to the buffer for the data. + * component + * This parameter specifies the system dump component for which the + * dump is to be retrieved. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_SYS and + * less than NV0000_CTRL_NVD_GET_DUMP_COMPONENT_NVLOG. + * size + * On entry, this parameter specifies the maximum length for + * the returned data. On exit, it specifies the number of bytes + * returned. + * + * Possible status values returned are: + * NV_OK + * NVOS_ERROR_INVALID_ARGUMENT if components are invalid. + * NVOS_ERROR_INVALID_ADDRESS if pBuffer is invalid + * NVOS_ERROR_INVALID_???? if the buffer was too small + */ +#define NV0000_CTRL_CMD_NVD_GET_DUMP (0x602) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_NVD_GET_DUMP_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pBuffer, 8); + NvU32 component; + NvU32 size; +} NV0000_CTRL_NVD_GET_DUMP_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_TIMESTAMP + * + * This command returns the current value of the timestamp used + * by the RM in NvDebug dumps. It is provided to keep the RM and NvDebug + * clients on the same time base. + * + * cpuClkId + * See also NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO + * This parameter specifies the source of the CPU clock. Legal values for + * this parameter include: + * NV0000_NVD_CPU_TIME_CLK_ID_DEFAULT and NV0000_NVD_CPU_TIME_CLK_ID_OSTIME + * This clock id will provide real time in microseconds since 00:00:00 UTC on January 1, 1970. + * It is calculated as follows: + * (seconds * 1000000) + uSeconds + * NV0000_NVD_CPU_TIME_CLK_ID_PLATFORM_API + * This clock id will provide time stamp that is constant-rate, high + * precision using platform API that is also available in the user mode. + * NV0000_NVD_CPU_TIME_CLK_ID_TSC + * This clock id will provide time stamp using CPU's time stamp counter. + * + * timestamp + * Retrieved timestamp + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_NVD_CPU_TIME_CLK_ID_DEFAULT (0x00000000) +#define NV0000_NVD_CPU_TIME_CLK_ID_OSTIME (0x00000001) +#define NV0000_NVD_CPU_TIME_CLK_ID_TSC (0x00000002) +#define NV0000_NVD_CPU_TIME_CLK_ID_PLATFORM_API (0x00000003) + +#define NV0000_CTRL_CMD_NVD_GET_TIMESTAMP (0x603) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS { + NV_DECLARE_ALIGNED(NvU64 timestamp, 8); + NvU8 cpuClkId; +} NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_NVLOG_INFO + * + * This command gets the current state of the NvLog buffer system. + * + * component (in) + * This parameter specifies the system dump component for which the + * NvLog info is desired. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG and + * less than NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED. + * version (out) + * This parameter returns the version of the Nvlog subsystem. + * runtimeSizes (out) + * This parameter returns the array of sizes for all supported printf + * specifiers. This information is necessary to know how many bytes + * to decode when given a certain specifier (such as %d). + * The following describes the contents of each array entry: + * NV0000_CTRL_NVD_RUNTIME_SIZE_UNUSED + * This array entry has special meaning and is unused in the + * runtimeSizes array. + * NV0000_CTRL_NVD_RUNTIME_SIZE_INT + * This array entry returns the size of integer types for use in + * interpreting the %d, %u, %x, %X, %i, %o specifiers. + * NV0000_CTRL_NVD_RUNTIME_SIZE_LONG_LONG + * This array entry returns the size of long long integer types for + * using in interpreting the %lld, %llu, %llx, %llX, %lli, %llo + * specifiers. + * NV0000_CTRL_NVD_RUNTIME_SIZE_STRING + * This array entry returns zero as strings are not allowed. + * NV0000_CTRL_NVD_RUNTIME_SIZE_PTR + * This array entry returns the size of the pointer type for use + * in interpreting the %p specifier. + * NV0000_CTRL_NVD_RUNTIME_SIZE_CHAR + * This array entry returns the size of the char type for use in + * intpreting the %c specifier. + * NV0000_CTRL_NVD_RUNTIME_SIZE_FLOAT + * This array entry returns the size of the float types for use in + * in interpreting the %f, %g, %e, %F, %G, %E specifiers. + * All remaining entries are reserved and return 0. + * printFlags (out) + * This parameter returns the flags of the NvLog system. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_BUFFER_FLAGS + * See NV0000_CTRL_CMD_NVD_GET_NVLOG_BUF_INFO for more details. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_BUFFER_SIZE + * This field returns the buffer size in KBytes. A value of zero + * is returned when logging is disabled. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP + * This field returns the format of the timestamp. Legal values + * for this parameter include: + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_NONE + * This value indicates no timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_32BIT + * This value indicates a 32-bit timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_64BIT + * This value indicates a 64-bit timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_TIMESTAMP_32BIT_DIFF + * This value indicates a 32-bit differential timestamp. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_RESERVED + * This field is reserved. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_RUNTIME_LEVEL + * This field returns the lowest debug level for which logging + * is enabled by default. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_INIT + * This field indicates if logging for the specified component has + * been initialized. Legal values for this parameter include: + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_INIT_NO + * This value indicates NvLog is uninitialized. + * NV0000_CTRL_NVD_NVLOG_PRINT_FLAGS_INIT_YES + * This value indicates NvLog has been initialized. + * signature (out) + * This parameter is the signature of the database + * required to decode these logs, autogenerated at buildtime. + * bufferTags (out) + * This parameter identifies the buffer tag used during allocation + * or a value of '0' if buffer is unallocated for each possible + * buffer. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if components are invalid. + */ +#define NV0000_CTRL_CMD_NVD_GET_NVLOG_INFO (0x604) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum size of the runtimeSizes array */ +#define NV0000_CTRL_NVD_MAX_RUNTIME_SIZES (16) + +/* size of signature parameter */ +#define NV0000_CTRL_NVD_SIGNATURE_SIZE (4) + +/* Maximum number of buffers */ +#define NV0000_CTRL_NVD_MAX_BUFFERS (256) + +#define NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS { + NvU32 component; + NvU32 version; + NvU8 runtimeSizes[NV0000_CTRL_NVD_MAX_RUNTIME_SIZES]; + NvU32 printFlags; + NvU32 signature[NV0000_CTRL_NVD_SIGNATURE_SIZE]; + NvU32 bufferTags[NV0000_CTRL_NVD_MAX_BUFFERS]; +} NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS; + +/* runtimeSize array indices */ +#define NV0000_CTRL_NVD_RUNTIME_SIZE_UNUSED (0) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_INT (1) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_LONG_LONG (2) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_STRING (3) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_PTR (4) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_CHAR (5) +#define NV0000_CTRL_NVD_RUNTIME_SIZE_FLOAT (6) + +/* printFlags fields and values */ +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_INFO 7:0 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_SIZE 23:8 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_SIZE_DISABLE (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_BUFFER_SIZE_DEFAULT (0x00000004) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_RUNTIME_LEVEL 28:25 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP 30:29 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_NONE (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_32 (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_64 (0x00000002) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_TIMESTAMP_32_DIFF (0x00000003) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_INITED 31:31 +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_INITED_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_INFO_PRINTFLAGS_INITED_YES (0x00000001) + +/* + * NV0000_CTRL_CMD_NVD_GET_NVLOG_BUFFER_INFO + * + * This command gets the current state of a specific buffer in the NvLog + * buffer system. + * + * component (in) + * This parameter specifies the system dump component for which the + * NvLog info is desired. Legal values for this parameter must + * be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG and + * less than NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED. + * buffer (in/out) + * This parameter specifies the buffer number from which to retrieve the + * buffer information. Valid values are 0 to (NV0000_CTRL_NVD_MAX_BUFFERS - 1). + * If the buffer is specified using the 'tag' parameter, the buffer + * number is returned through this one. + * tag (in/out) + * If this parameter is non-zero, it will be used to specify the buffer, + * instead of 'buffer' parameter. It returns the tag of the specified buffer + * size (out) + * This parameter returns the size of the specified buffer. + * flags (in/out) + * On input, this parameter sets the following behavior: + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE + * This flag controls if the nvlog system should pause output + * to this buffer. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_YES + * The buffer should be paused until another command + * unpauses this buffer. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_NO + * The buffer should not be paused. + * On output, this parameter returns the flags of a specified buffer: + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED + * This flag indicates if logging to the specified buffer is + * disabled or not. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE + * This flag indicates the buffer logging type: + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_RING + * This type value indicates logging to the buffer wraps. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_NOWRAP + * This type value indicates logging to the buffer does not wrap. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE + * This flag indicates if the buffer size is expandable. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_NO + * The buffer is not expandable. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_YES + * The buffer is expandable. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NON_PAGED + * This flag indicates if the buffer occupies non-paged or pageable + * memory. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NON_PAGED_NO + * The buffer is in pageable memory. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NON_PAGES_YES + * The buffer is in non-paged memory. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING + * This flag indicates the locking mode for the specified buffer. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_NONE + * This locking value indicates that no locking is performed. This + * locking mode is typically used for inherently single-threaded + * buffers. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_STATE + * This locking value indicates that the buffer is locked only + * during state changes and that memory copying is unlocked. This + * mode should not be used tiny buffers that overflow every write + * or two. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_FULL + * This locking value indicates the buffer is locked for the full + * duration of the write. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA + * This flag indicates if the buffer is stored in OCA dumps. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_NO + * The buffer is not included in OCA dumps. + * NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_YES + * The buffer is included in OCA dumps. + * pos (out) + * This parameter is the current position of the tracker/cursor in the + * buffer. + * overflow (out) + * This parameter is the number of times the buffer has overflowed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if components are invalid. + */ + +#define NV0000_CTRL_CMD_NVD_GET_NVLOG_BUFFER_INFO (0x605) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS { + NvU32 component; + NvU32 buffer; + NvU32 tag; + NvU32 size; + NvU32 flags; + NvU32 pos; + NvU32 overflow; +} NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS; + +/* flags fields and values */ +/* input */ +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE 0:0 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_PAUSE_YES (0x00000001) + +/* output */ +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED 0:0 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_DISABLED_YES (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE 1:1 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_RING (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_TYPE_NOWRAP (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE 2:2 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_EXPANDABLE_YES (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NONPAGED 3:3 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NONPAGED_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_NONPAGED_YES (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING 5:4 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_NONE (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_STATE (0x00000001) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_LOCKING_FULL (0x00000002) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA 6:6 +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_NO (0x00000000) +#define NV0000_CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS_OCA_YES (0x00000001) + +/* + * NV0000_CTRL_CMD_NVD_GET_NVLOG + * + * This command retrieves the specified dump block from the specified + * NvLog buffer. To retrieve the entire buffer, the caller should start + * with blockNum set to 0 and continue issuing calls with an incremented + * blockNum until the returned size value is less than + * NV0000_CTRL_NVD_NVLOG_MAX_BLOCK_SIZE. + * + * component (in) + * This parameter specifies the system dump component for which the NvLog + * dump operation is to be directed. Legal values for this parameter + * must be greater than or equal to NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG + * and less than NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED. + * buffer (in) + * This parameter specifies the NvLog buffer to dump. + * blockNum (in) + * This parameter specifies the block number for which data is to be + * dumped. + * size (in/out) + * On entry, this parameter specifies the maximum length in bytes for + * the returned data (should be set to NV0000_CTRL_NVLOG_MAX_BLOCK_SIZE). + * On exit, it specifies the number of bytes returned. + * data (out) + * This parameter returns the data for the specified block. The size + * patameter values indicates the number of valid bytes returned. + * + * Possible status values returned are: + * NV_OK + * NVOS_ERROR_INVALID_ARGUMENT if components are invalid. + */ +#define NV0000_CTRL_CMD_NVD_GET_NVLOG (0x606) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_NVLOG_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVLOG_MAX_BLOCK_SIZE (4000) + +#define NV0000_CTRL_NVD_GET_NVLOG_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0000_CTRL_NVD_GET_NVLOG_PARAMS { + NvU32 component; + NvU32 buffer; + NvU32 blockNum; + NvU32 size; + NvU8 data[NV0000_CTRL_NVLOG_MAX_BLOCK_SIZE]; +} NV0000_CTRL_NVD_GET_NVLOG_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_RCERR_RPT + * + * This command returns block of registers that were recorded at the time + * of an RC error for the current process. + * + * reqIdx: + * [IN] the index of the report being requested. + * index rolls over to 0. + * if the requested index is not in the circular buffer, then no data is + * transferred & either NV_ERR_INVALID_INDEX (indicating the specified + * index is not in the table) is returned. + * + * rptIdx: + * [OUT] the index of the report being returned. + * if the requested index is not in the circular buffer, then the value is + * undefined, no data istransferred & NV_ERR_INVALID_INDEX is returned. + * if the the specified index is present, but does not meet the requested + * criteria (refer to the owner & processId fields). the rptIdx will be + * set to a value that does not match the reqIdx, and no data will be + * transferred. NV_ERR_INSUFFICIENT_PERMISSIONS is still returned. + * + * gpuTag: + * [OUT] id of the GPU whose data was collected. + * + * rptTimeInNs: + * [OUT] the timestamp for when the report was created. + * + * startIdx: + * [OUT] the index of the oldest start record for the first report that + * matches the specified criteria (refer to the owner & processId + * fields). If no records match the specified criteria, this value is + * undefined, the failure code NV_ERR_MISSING_TABLE_ENTRY will + * be returned, and no data will be transferred. + * + * endIdx: + * [OUT] the index of the newest end record for the most recent report that + * matches the specified criteria (refer to the owner & processId + * fields). If no records match the specified criteria, this value is + * undefined, the failure code NV_ERR_MISSING_TABLE_ENTRY will + * be returned, and no data will be transferred. + * + * rptType: + * [OUT] indicator of what data is in the report. + * + * flags + * [OUT] a set odf flags indicating attributes of the record + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_FIRST -- indicates this is the first record of a report. + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_LAST -- indicates this is the last record of the report. + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_RANGE_VALID -- indicates this is the response contains a valid +* index range. + * Note, this may be set when an error is returned indicating a valid range was found, but event of + * the requested index was not. + * NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_DATA_VALID -- indicates this is the response contains valid data. + * + * rptCount: + * [OUT] number of entries returned in report. + * + * owner: + * [IN] Entries are only returned if they have the same owner as the specified owner or the specified + * owner Id is NV0000_CTRL_CMD_NVD_RPT_ANY_OWNER_ID. + * if the requested index is not owned by the specified owner, the rptIdx + * will be set to a value that does not match the reqIdx, and no data will + * be transferred. NV_ERR_INSUFFICIENT_PERMISSIONS is returned. + * + * processId: + * [IN] Deprecated + * report: + * [OUT] array of rptCount enum/value pair entries containing the data from the report. + * entries beyond rptCount are undefined. + * + * + * Possible status values returned are: + * NV_OK -- we found & transferred the requested record. + * NV_ERR_MISSING_TABLE_ENTRY -- we don't find any records that meet the criteria. + * NV_ERR_INVALID_INDEX -- the requested index was not found in the buffer. + * NV_ERR_INSUFFICIENT_PERMISSIONS -- the requested record was found, but it did not meet the criteria. + * NV_ERR_BUSY_RETRY -- We could not access the circular buffer. + * + */ + +#define NV0000_CTRL_CMD_NVD_GET_RCERR_RPT (0x607) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_MAX_ENTRIES 200 + +// report types +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_TEST 0 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_GRSTATUS 1 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_GPCSTATUS 2 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_TYPE_MMU_FAULT_STATUS 3 + +// pseudo register enums attribute content +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_EMPTY 0x00000000 +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_OVERFLOWED 0x00000001 // number of missed entries. +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_MAX_PSEDO_REG 0x0000000f + + + +// Flags Definitions +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_FIRST 0x00000001 // indicates this is the first record of a report. +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_LAST 0x00000002 // indicates this is the last record of the report. +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_RANGE_VALID 0x00000004 // indicates this is the response contains a valid range +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_DATA_VALID 0x00000008 // indicates this is the response contains valid data + + +// Attribute Definitions +#define TPC_REG_ATTR(gpcId, tpcId) ((gpcId << 8) | (tpcId)) +#define ROP_REG_ATTR(gpcId, ropId) ((gpcId << 8) | (ropId)) +#define SM_REG_ATTR(gpcId, tpcId, smId) ((((gpcId) << 16) | ((tpcId) << 8)) | (smId)) + +// Process Id Pseudo values +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_PROCESS_ID 0x00000000 // get report for any process ID + +#define NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_OWNER_ID 0xFFFFFFFF // get report for any owner ID + + +typedef struct NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_ENTRY { + NvU32 tag; + NvU32 value; + NvU32 attribute; +} NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_ENTRY; + +#define NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS { + NvU16 reqIdx; + NvU16 rptIdx; + NvU32 GPUTag; + NvU32 rptTime; // time in seconds since 1/1/1970 + NvU16 startIdx; + NvU16 endIdx; + NvU16 rptType; + NvU32 flags; + NvU16 rptCount; + NvU32 owner; // indicating whose reports to get + NvU32 processId; // deprecated field + + NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_ENTRY report[NV0000_CTRL_CMD_NVD_RCERR_RPT_MAX_ENTRIES]; +} NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS; + +/* + * NV0000_CTRL_CMD_NVD_GET_DPC_ISR_TS + * + * This command returns the time stamp information that are collected from + * the execution of various DPCs/ISRs. This time stamp information is for + * debugging purposes only and would help with analyzing regressions and + * latencies for DPC/ISR execution times. + * + * tsBufferSize + * This field specifies the size of the buffer that the caller allocates. + * tsBuffer + * THis field specifies a pointer in the callers address space to the + * buffer into which the timestamp info on DPC/ISR is to be returned. + * This buffer must at least be as big as tsBufferSize. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_NVD_GET_DPC_ISR_TS (0x608) /* finn: Evaluated from "(FINN_NV01_ROOT_NVD_INTERFACE_ID << 8) | NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS { + NvU32 tsBufferSize; + NV_DECLARE_ALIGNED(NvP64 pTSBuffer, 8); +} NV0000_CTRL_NVD_GET_DPC_ISR_TS_PARAMS; + +/* _ctrl0000nvd_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h new file mode 100644 index 000000000..a97d28db7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000proc.h @@ -0,0 +1,101 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000proc.finn +// + + + + +#include "ctrl/ctrl0000/ctrl0000base.h" +#include "nvlimits.h" + +/* + * NV0000_CTRL_CMD_SET_SUB_PROCESS_ID + * + * Save the sub process ID and sub process name in client database + * subProcID + * Sub process ID + * subProcessName + * Sub process name + * + * In vGPU environment, sub process means the guest user/kernel process running + * within a single VM. It also refers to any sub process (or sub-sub process) + * within a parent process. + * + * Please refer to the wiki for more details about sub process concept: Resource_Server + * + * Possible return values are: + * NV_OK + */ +#define NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS { + NvU32 subProcessID; + char subProcessName[NV_PROC_NAME_MAX_LENGTH]; +} NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS; + +/* + * NV0000_CTRL_CMD_DISABLE_SUB_PROCESS_USERD_ISOLATION + * + * Disable sub process USERD isolation. + * bIsSubProcIsolated + * NV_TRUE to disable sub process USERD isolation + * + * USERD allocated by different domains should not be put into the same physical page. + * This provides the basic security isolation because a physical page is the unit of + * granularity at which OS can provide isolation between processes. + * + * GUEST_USER: USERD allocated by guest user process + * GUEST_KERNEL: USERD allocated by guest kernel process + * GUEST_INSECURE: USERD allocated by guest/kernel process, + * INSECURE means there is no isolation between guest user and guest kernel + * HOST_USER: USERD allocated by host user process + * HOST_KERNEL: USERD allocated by host kernel process + * + * When sub process USERD isolation is disabled, we won't distinguish USERD allocated by guest + * user and guest kernel. They all belong to the GUEST_INSECURE domain. + * + * Please refer to wiki for more details: RM_USERD_Isolation + * + * Possible return values are: + * NV_OK + */ +#define NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS { + NvBool bIsSubProcessDisabled; +} NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS; + +#define NV0000_CTRL_CMD_SET_SUB_PROCESS_ID (0x901) /* finn: Evaluated from "(FINN_NV01_ROOT_PROC_INTERFACE_ID << 8) | NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_DISABLE_SUB_PROCESS_USERD_ISOLATION (0x902) /* finn: Evaluated from "(FINN_NV01_ROOT_PROC_INTERFACE_ID << 8) | NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS_MESSAGE_ID" */ + +/* _ctrl0000proc_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h new file mode 100644 index 000000000..1ccecbdd1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000syncgpuboost.h @@ -0,0 +1,115 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000syncgpuboost.finn +// + + + + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +#include "nvtypes.h" +#include "nvlimits.h" + +/* --------------------------- Macros ----------------------------------------*/ +// There are at least 2 GPUs in a sync group. Hence max is half of max devices. +#define NV0000_SYNC_GPU_BOOST_MAX_GROUPS (0x10) /* finn: Evaluated from "((NV_MAX_DEVICES) >> 1)" */ +#define NV0000_SYNC_GPU_BOOST_INVALID_GROUP_ID 0xFFFFFFFF + +/*-------------------------Command Prototypes---------------------------------*/ + +/*! + * Query whether SYNC GPU BOOST MANAGER is enabled or disabled. + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_INFO (0xa01) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_SYNC_GPU_BOOST_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_SYNC_GPU_BOOST_INFO_PARAMS { + // [out] Specifies if Sync Gpu Boost Manager is enabled or not. + NvBool bEnabled; +} NV0000_SYNC_GPU_BOOST_INFO_PARAMS; + +/*! + * Creates a Synchronized GPU-Boost Group (SGBG) + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_CREATE (0xa02) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS_MESSAGE_ID" */ + +/*! + * Describes a Synchronized GPU-Boost Group configuration + */ +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_CONFIG { + // [in] Number of elements in @ref gpuIds + NvU32 gpuCount; + + // [in] IDs of GPUs to be put in the Sync Boost Group + NvU32 gpuIds[NV_MAX_DEVICES]; + + // [out] Unique ID of the SGBG, if created + NvU32 boostGroupId; + + // [in] If this group represents bridgeless SLI + NvBool bBridgeless; +} NV0000_SYNC_GPU_BOOST_GROUP_CONFIG; + +#define NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS { + NV0000_SYNC_GPU_BOOST_GROUP_CONFIG boostConfig; +} NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS; + +/*! + * Destroys a previously created Synchronized GPU-Boost Group(SGBG) + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_DESTROY (0xa03) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS_MESSAGE_ID" */ + +#define NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS { + // [[in] Unique ID of the SGBG to be destroyed + NvU32 boostGroupId; +} NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS; + +/*! + * Get configuration information for all Synchronized Boost Groups in the system. + */ +#define NV0000_CTRL_CMD_SYNC_GPU_BOOST_GROUP_INFO (0xa04) /* finn: Evaluated from "(FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID << 8) | NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS { + // [out] Number of groups retrieved. @ref NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS::boostGroups + NvU32 groupCount; + + // [out] @ref NV0000_SYNC_GPU_BOOST_GROUP_CONFIG + NV0000_SYNC_GPU_BOOST_GROUP_CONFIG pBoostGroups[NV0000_SYNC_GPU_BOOST_MAX_GROUPS]; +} NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS; + +/* _ctrl0000syncgpuboost_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h new file mode 100644 index 000000000..4391ae46d --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000system.h @@ -0,0 +1,1278 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000system.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl0000/ctrl0000base.h" + +/* NV01_ROOT (client) system control commands and parameters */ + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_FEATURES + * + * This command returns a mask of supported features for the SYSTEM category + * of the 0000 class. + * + * Valid features include: + * + * NV0000_CTRL_GET_FEATURES_SLI + * When this bit is set, SLI is supported. + * NV0000_CTRL_GET_FEATURES_UEFI + * When this bit is set, it is a UEFI system. + * NV0000_CTRL_SYSTEM_GET_FEATURES_IS_EFI_INIT + * When this bit is set, EFI has initialized core channel + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_FEATURES (0x1f0) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS_MESSAGE_ID (0xF0U) + +typedef struct NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS { + NvU32 featuresMask; +} NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS; + + + +/* Valid feature values */ +#define NV0000_CTRL_SYSTEM_GET_FEATURES_SLI 0:0 +#define NV0000_CTRL_SYSTEM_GET_FEATURES_SLI_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_SLI_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_UEFI 1:1 +#define NV0000_CTRL_SYSTEM_GET_FEATURES_UEFI_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_UEFI_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_IS_EFI_INIT 2:2 +#define NV0000_CTRL_SYSTEM_GET_FEATURES_IS_EFI_INIT_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_FEATURES_IS_EFI_INIT_TRUE (0x00000001) +/* + * NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION + * + * This command returns the current driver information. + * The first time this is called the size of strings is + * set with the greater of NV_BUILD_BRANCH_VERSION and + * NV_DISPLAY_DRIVER_TITLE. The client then allocates memory + * of size sizeOfStrings for pVersionBuffer and pTitleBuffer + * and calls the command again to receive driver info. + * + * sizeOfStrings + * This field returns the size in bytes of the pVersionBuffer and + * pTitleBuffer strings. + * pDriverVersionBuffer + * This field returns the version (NV_VERSION_STRING). + * pVersionBuffer + * This field returns the version (NV_BUILD_BRANCH_VERSION). + * pTitleBuffer + * This field returns the title (NV_DISPLAY_DRIVER_TITLE). + * changelistNumber + * This field returns the changelist value (NV_BUILD_CHANGELIST_NUM). + * officialChangelistNumber + * This field returns the last official changelist value + * (NV_LAST_OFFICIAL_CHANGELIST_NUM). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION (0x101) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS { + NvU32 sizeOfStrings; + NV_DECLARE_ALIGNED(NvP64 pDriverVersionBuffer, 8); + NV_DECLARE_ALIGNED(NvP64 pVersionBuffer, 8); + NV_DECLARE_ALIGNED(NvP64 pTitleBuffer, 8); + NvU32 changelistNumber; + NvU32 officialChangelistNumber; +} NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CPU_INFO + * + * This command returns system CPU information. + * + * type + * This field returns the processor type. + * Legal processor types include: + * Intel processors: + * P55 : P55C - MMX + * P6 : PPro + * P2 : PentiumII + * P2XC : Xeon & Celeron + * CELA : Celeron-A + * P3 : Pentium-III + * P3_INTL2 : Pentium-III w/integrated L2 (fullspeed, on die, 256K) + * P4 : Pentium 4 + * CORE2 : Core2 Duo Conroe + * AMD processors + * K62 : K6-2 w/ 3DNow + * IDT/Centaur processors + * C6 : WinChip C6 + * C62 : WinChip 2 w/ 3DNow + * Cyrix processors + * GX : MediaGX + * M1 : 6x86 + * M2 : M2 + * MGX : MediaGX w/ MMX + * Transmeta processors + * TM_CRUSOE : Transmeta Crusoe(tm) + * PowerPC processors + * PPC603 : PowerPC 603 + * PPC604 : PowerPC 604 + * PPC750 : PowerPC 750 + * + * capabilities + * This field returns the capabilities of the processor. + * Legal processor capabilities include: + * MMX : supports MMX + * SSE : supports SSE + * 3DNOW : supports 3DNow + * SSE2 : supports SSE2 + * SFENCE : supports SFENCE + * WRITE_COMBINING : supports write-combining + * ALTIVEC : supports ALTIVEC + * PUT_NEEDS_IO : requires OUT inst w/PUT updates + * NEEDS_WC_WORKAROUND : requires workaround for P4 write-combining bug + * 3DNOW_EXT : supports 3DNow Extensions + * MMX_EXT : supports MMX Extensions + * CMOV : supports CMOV + * CLFLUSH : supports CLFLUSH + * SSE3 : supports SSE3 + * NEEDS_WAR_124888 : requires write to GPU while spinning on + * : GPU value + * HT : support hyper-threading + * clock + * This field returns the processor speed in MHz. + * L1DataCacheSize + * This field returns the level 1 data (or unified) cache size + * in kilobytes. + * L2DataCacheSize + * This field returns the level 2 data (or unified) cache size + * in kilobytes. + * dataCacheLineSize + * This field returns the bytes per line in the level 1 data cache. + * numLogicalCpus + * This field returns the number of logical processors. On Intel x86 + * systems that support it, this value will incorporate the current state + * of HyperThreading. + * numPhysicalCpus + * This field returns the number of physical processors. + * name + * This field returns the CPU name in ASCII string format. + * family + * Vendor defined Family and Extended Family combined + * model + * Vendor defined Model and Extended Model combined + * stepping + * Silicon stepping + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CPU_INFO (0x102) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS { + NvU32 type; /* processor type */ + NvU32 capabilities; /* processor caps */ + NvU32 clock; /* processor speed (MHz) */ + NvU32 L1DataCacheSize; /* L1 dcache size (KB) */ + NvU32 L2DataCacheSize; /* L2 dcache size (KB) */ + NvU32 dataCacheLineSize; /* L1 dcache bytes/line */ + NvU32 numLogicalCpus; /* logial processor cnt */ + NvU32 numPhysicalCpus; /* physical processor cnt*/ + NvU8 name[52]; /* embedded cpu name */ + NvU32 family; /* Vendor defined Family and Extended Family combined */ + NvU32 model; /* Vendor defined Model and Extended Model combined */ + NvU8 stepping; /* Silicon stepping */ + NvU32 coresOnDie; /* cpu cores per die */ +} NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS; + +/* processor type values */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_UNKNOWN (0x00000000) +/* Intel types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P5 (0x00000001) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P55 (0x00000002) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P6 (0x00000003) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P2 (0x00000004) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P2XC (0x00000005) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CELA (0x00000006) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P3 (0x00000007) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P3_INTL2 (0x00000008) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_P4 (0x00000009) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CORE2 (0x00000010) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CELN_M16H (0x00000011) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_CORE2_EXTRM (0x00000012) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ATOM (0x00000013) +/* AMD types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K5 (0x00000030) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K6 (0x00000031) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K62 (0x00000032) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K63 (0x00000033) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K7 (0x00000034) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K8 (0x00000035) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K10 (0x00000036) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_K11 (0x00000037) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_RYZEN (0x00000038) +/* IDT/Centaur types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_C6 (0x00000060) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_C62 (0x00000061) +/* Cyrix types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_GX (0x00000070) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_M1 (0x00000071) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_M2 (0x00000072) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_MGX (0x00000073) +/* Transmeta types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_TM_CRUSOE (0x00000080) +/* IBM types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_PPC603 (0x00000090) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_PPC604 (0x00000091) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_PPC750 (0x00000092) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_POWERN (0x00000093) +/* Unknown ARM architecture CPU type */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_UNKNOWN (0xA0000000) +/* ARM Ltd types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_A9 (0xA0000009) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_A15 (0xA000000F) +/* NVIDIA types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_NV_DENVER_1_0 (0xA0001000) +#define NV0000_CTRL_SYSTEM_CPU_TYPE_NV_DENVER_2_0 (0xA0002000) + +/* Generic types */ +#define NV0000_CTRL_SYSTEM_CPU_TYPE_ARMV8A_GENERIC (0xA00FF000) + +/* processor capabilities */ +#define NV0000_CTRL_SYSTEM_CPU_CAP_MMX (0x00000001) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE (0x00000002) +#define NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW (0x00000004) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE2 (0x00000008) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SFENCE (0x00000010) +#define NV0000_CTRL_SYSTEM_CPU_CAP_WRITE_COMBINING (0x00000020) +#define NV0000_CTRL_SYSTEM_CPU_CAP_ALTIVEC (0x00000040) +#define NV0000_CTRL_SYSTEM_CPU_CAP_PUT_NEEDS_IO (0x00000080) +#define NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WC_WORKAROUND (0x00000100) +#define NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW_EXT (0x00000200) +#define NV0000_CTRL_SYSTEM_CPU_CAP_MMX_EXT (0x00000400) +#define NV0000_CTRL_SYSTEM_CPU_CAP_CMOV (0x00000800) +#define NV0000_CTRL_SYSTEM_CPU_CAP_CLFLUSH (0x00001000) +#define NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WAR_190854 (0x00002000) /* deprecated */ +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE3 (0x00004000) +#define NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WAR_124888 (0x00008000) +#define NV0000_CTRL_SYSTEM_CPU_CAP_HT_CAPABLE (0x00010000) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE41 (0x00020000) +#define NV0000_CTRL_SYSTEM_CPU_CAP_SSE42 (0x00040000) +#define NV0000_CTRL_SYSTEM_CPU_CAP_AVX (0x00080000) +#define NV0000_CTRL_SYSTEM_CPU_CAP_ERMS (0x00100000) + +/* feature mask (as opposed to bugs, requirements, etc.) */ +#define NV0000_CTRL_SYSTEM_CPU_CAP_FEATURE_MASK (0x1f5e7f) /* finn: Evaluated from "(NV0000_CTRL_SYSTEM_CPU_CAP_MMX | NV0000_CTRL_SYSTEM_CPU_CAP_SSE | NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW | NV0000_CTRL_SYSTEM_CPU_CAP_SSE2 | NV0000_CTRL_SYSTEM_CPU_CAP_SFENCE | NV0000_CTRL_SYSTEM_CPU_CAP_WRITE_COMBINING | NV0000_CTRL_SYSTEM_CPU_CAP_ALTIVEC | NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW_EXT | NV0000_CTRL_SYSTEM_CPU_CAP_MMX_EXT | NV0000_CTRL_SYSTEM_CPU_CAP_CMOV | NV0000_CTRL_SYSTEM_CPU_CAP_CLFLUSH | NV0000_CTRL_SYSTEM_CPU_CAP_SSE3 | NV0000_CTRL_SYSTEM_CPU_CAP_HT_CAPABLE | NV0000_CTRL_SYSTEM_CPU_CAP_SSE41 | NV0000_CTRL_SYSTEM_CPU_CAP_SSE42 | NV0000_CTRL_SYSTEM_CPU_CAP_AVX | NV0000_CTRL_SYSTEM_CPU_CAP_ERMS)" */ + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CAPS + * + * This command returns the set of system capabilities in the + * form of an array of unsigned bytes. System capabilities include + * supported features and required workarounds for the system, + * each represented by a byte offset into the table and a bit + * position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0000_CTRL_SYSTEM_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the system caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CAPS (0x103) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | 0x3" */ + +typedef struct NV0000_CTRL_SYSTEM_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0000_CTRL_SYSTEM_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0000_CTRL_SYSTEM_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0000_CTRL_SYSTEM_CAPS_POWER_SLI_SUPPORTED 0:0x01 + +/* size in bytes of system caps table */ +#define NV0000_CTRL_SYSTEM_CAPS_TBL_SIZE 1 + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CHIPSET_INFO + * + * This command returns system chipset information. + * + * vendorId + * This parameter returns the vendor identification for the chipset. + * A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the chipset + * cannot be identified. + * deviceId + * This parameter returns the device identification for the chipset. + * A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the chipset + * cannot be identified. + * subSysVendorId + * This parameter returns the subsystem vendor identification for the + * chipset. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the + * chipset cannot be identified. + * subSysDeviceId + * This parameter returns the subsystem device identification for the + * chipset. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates the + * chipset cannot be identified. + * HBvendorId + * This parameter returns the vendor identification for the chipset's + * host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates + * the chipset's host bridge cannot be identified. + * HBdeviceId + * This parameter returns the device identification for the chipset's + * host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID indicates + * the chipset's host bridge cannot be identified. + * HBsubSysVendorId + * This parameter returns the subsystem vendor identification for the + * chipset's host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID + * indicates the chipset's host bridge cannot be identified. + * HBsubSysDeviceId + * This parameter returns the subsystem device identification for the + * chipset's host bridge. A value of NV0000_SYSTEM_CHIPSET_INVALID_ID + * indicates the chipset's host bridge cannot be identified. + * sliBondId + * This parameter returns the SLI bond identification for the chipset. + * vendorNameString + * This parameter returns the vendor name string. + * chipsetNameString + * This parameter returns the vendor name string. + * sliBondNameString + * This parameter returns the SLI bond name string. + * flag + * This parameter specifies NV0000_CTRL_SYSTEM_CHIPSET_FLAG_XXX flags: + * _HAS_RESIZABLE_BAR_ISSUE_YES: Chipset where the use of resizable BAR1 + * should be disabled - bug 3440153 + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CHIPSET_INFO (0x104) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum name string length */ +#define NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH (0x0000020) + +/* invalid id */ +#define NV0000_SYSTEM_CHIPSET_INVALID_ID (0xffff) + +#define NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS { + NvU16 vendorId; + NvU16 deviceId; + NvU16 subSysVendorId; + NvU16 subSysDeviceId; + NvU16 HBvendorId; + NvU16 HBdeviceId; + NvU16 HBsubSysVendorId; + NvU16 HBsubSysDeviceId; + NvU32 sliBondId; + NvU8 vendorNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU8 subSysVendorNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU8 chipsetNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU8 sliBondNameString[NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH]; + NvU32 flags; +} NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS; + +#define NV0000_CTRL_SYSTEM_CHIPSET_FLAG_HAS_RESIZABLE_BAR_ISSUE 0:0 +#define NV0000_CTRL_SYSTEM_CHIPSET_FLAG_HAS_RESIZABLE_BAR_ISSUE_NO (0x00000000) +#define NV0000_CTRL_SYSTEM_CHIPSET_FLAG_HAS_RESIZABLE_BAR_ISSUE_YES (0x00000001) + + + +/* + * NV0000_CTRL_CMD_SYSTEM_SET_MEMORY_SIZE + * + * This command is used to set the system memory size in pages. + * + * memorySize + * This parameter specifies the system memory size in pages. All values + * are considered legal. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0000_CTRL_CMD_SYSTEM_SET_MEMORY_SIZE (0x107) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS { + NvU32 memorySize; +} NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_CLASSLIST + * + * This command is used to retrieve the set of system-level classes + * supported by the platform. + * + * numClasses + * This parameter returns the number of valid entries in the returned + * classes[] list. This parameter will not exceed + * Nv0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE. + * classes + * This parameter returns the list of supported classes + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_CLASSLIST (0x108) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS_MESSAGE_ID" */ + +/* maximum number of classes returned in classes[] array */ +#define NV0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE (32) + +#define NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS { + NvU32 numClasses; + NvU32 classes[NV0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE]; +} NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_NOTIFY_EVENT + * + * This command is used to send triggered mobile related system events + * to the RM. + * + * eventType + * This parameter indicates the triggered event type. This parameter + * should specify a valid NV0000_CTRL_SYSTEM_EVENT_TYPE value. + * eventData + * This parameter specifies the type-dependent event data associated + * with EventType. This parameter should specify a valid + * NV0000_CTRL_SYSTEM_EVENT_DATA value. + * bEventDataForced + * This parameter specifies what we have to do, Whether trust current + * Lid/Dock state or not. This parameter should specify a valid + * NV0000_CTRL_SYSTEM_EVENT_DATA_FORCED value. + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * Sync this up (#defines) with one in nvapi.spec! + * (NV_ACPI_EVENT_TYPE & NV_ACPI_EVENT_DATA) + */ +#define NV0000_CTRL_CMD_SYSTEM_NOTIFY_EVENT (0x110) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS { + NvU32 eventType; + NvU32 eventData; + NvBool bEventDataForced; +} NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS; + +/* valid eventType values */ +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_LID_STATE (0x00000000) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_POWER_SOURCE (0x00000001) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_DOCK_STATE (0x00000002) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_LID (0x00000003) +#define NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_DOCK (0x00000004) + +/* valid eventData values */ +#define NV0000_CTRL_SYSTEM_EVENT_DATA_LID_OPEN (0x00000000) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_LID_CLOSED (0x00000001) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_BATTERY (0x00000000) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_AC (0x00000001) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_UNDOCKED (0x00000000) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_DOCKED (0x00000001) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_DSM (0x00000000) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_DCS (0x00000001) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_NVIF (0x00000002) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_ACPI (0x00000003) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_POLL (0x00000004) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_COUNT (0x5) /* finn: Evaluated from "(NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_LID_POLL + 1)" */ +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_DSM (0x00000000) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_DCS (0x00000001) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_NVIF (0x00000002) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_ACPI (0x00000003) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_POLL (0x00000004) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_COUNT (0x5) /* finn: Evaluated from "(NV0000_CTRL_SYSTEM_EVENT_DATA_TRUST_DOCK_POLL + 1)" */ + +/* valid bEventDataForced values */ +#define NV0000_CTRL_SYSTEM_EVENT_DATA_FORCED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_EVENT_DATA_FORCED_TRUE (0x00000001) + +/* + * NV000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE + * + * This command is used to query the platform type. + * + * systemType + * This parameter returns the type of the system. + * Legal values for this parameter include: + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_DESKTOP + * The system is a desktop platform. + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_GENERIC + * The system is a mobile (non-Toshiba) platform. + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_DESKTOP + * The system is a mobile Toshiba platform. + * NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_SOC + * The system is a system-on-a-chip (SOC) platform. + * + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE (0x111) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS { + NvU32 systemType; +} NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS; + +/* valid systemType values */ +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_DESKTOP (0x000000) +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_GENERIC (0x000001) +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_TOSHIBA (0x000002) +#define NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_SOC (0x000003) + + + + +/* + * NV0000_CTRL_CMD_SYSTEM_DEBUG_RMMSG_CTRL + * + * This command controls the current RmMsg filters. + * + * It is only supported if RmMsg is enabled (e.g. debug builds). + * + * cmd + * GET - Gets the current RmMsg filter string. + * SET - Sets the current RmMsg filter string. + * + * count + * The length of the RmMsg filter string. + * + * data + * The RmMsg filter string. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_SYSTEM_DEBUG_RMMSG_CTRL (0x121) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE 512 + +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_GET (0x00000000) +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_SET (0x00000001) + +#define NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS { + NvU32 cmd; + NvU32 count; + NvU8 data[NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE]; +} NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS; + +/* + * NV0000_CTRL_SYSTEM_HWBC_INFO + * + * This structure contains information about the HWBC (BR04) specified by + * hwbcId. + * + * hwbcId + * This field specifies the HWBC ID. + * firmwareVersion + * This field returns the version of the firmware on the HWBC (BR04), if + * present. This is a packed binary number of the form 0x12345678, which + * corresponds to a firmware version of 12.34.56.78. + * subordinateBus + * This field returns the subordinate bus number of the HWBC (BR04). + * secondaryBus + * This field returns the secondary bus number of the HWBC (BR04). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +typedef struct NV0000_CTRL_SYSTEM_HWBC_INFO { + NvU32 hwbcId; + NvU32 firmwareVersion; + NvU32 subordinateBus; + NvU32 secondaryBus; +} NV0000_CTRL_SYSTEM_HWBC_INFO; + +#define NV0000_CTRL_SYSTEM_HWBC_INVALID_ID (0xFFFFFFFF) + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_HWBC_INFO + * + * This command returns information about all Hardware Broadcast (HWBC) + * devices present in the system that are BR04s. To get the complete + * list of HWBCs in the system, all GPUs present in the system must be + * initialized. See the description of NV0000_CTRL_CMD_GPU_ATTACH_IDS to + * accomplish this. + * + * hwbcInfo + * This field is an array of NV0000_CTRL_SYSTEM_HWBC_INFO structures into + * which HWBC information is placed. There is one entry for each HWBC + * present in the system. Valid entries are contiguous, invalid entries + * have the hwbcId equal to NV0000_CTRL_SYSTEM_HWBC_INVALID_ID. If no HWBC + * is present in the system, all the entries would be marked invalid, but + * the return value would still be SUCCESS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_HWBC_INFO (0x124) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_MAX_HWBCS (0x00000080) + +#define NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS { + NV0000_CTRL_SYSTEM_HWBC_INFO hwbcInfo[NV0000_CTRL_SYSTEM_MAX_HWBCS]; +} NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS; + + + +/* + * Deprecated. Please use NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 instead. + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS (0x127) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +/* + * NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_SQUARED must remain equal to the square of + * NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS due to Check RM parsing issues. + * NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS is the maximum size of GPU groups + * allowed for batched P2P caps queries provided by the RM control + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX. + */ +#define NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS 32 +#define NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_SQUARED 1024 +#define NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS 8 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER 0xffffffff + +/* P2P capabilities status index values */ +#define NV0000_CTRL_P2P_CAPS_INDEX_READ 0 +#define NV0000_CTRL_P2P_CAPS_INDEX_WRITE 1 +#define NV0000_CTRL_P2P_CAPS_INDEX_NVLINK 2 +#define NV0000_CTRL_P2P_CAPS_INDEX_ATOMICS 3 +#define NV0000_CTRL_P2P_CAPS_INDEX_PROP 4 +#define NV0000_CTRL_P2P_CAPS_INDEX_LOOPBACK 5 +#define NV0000_CTRL_P2P_CAPS_INDEX_PCI 6 +#define NV0000_CTRL_P2P_CAPS_INDEX_C2C 7 +#define NV0000_CTRL_P2P_CAPS_INDEX_PCI_BAR1 8 + +#define NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE 9 + + +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS_MESSAGE_ID (0x27U) + +typedef struct NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS { + NvU32 gpuIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; + NvU32 gpuCount; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NV_DECLARE_ALIGNED(NvP64 busPeerIds, 8); +} NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS; + +/* valid p2pCaps values */ +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED 0:0 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED 1:1 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED 2:2 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED 3:3 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED 4:4 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED 5:5 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED 6:6 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED 7:7 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED 8:8 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED 9:9 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED 10:10 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED_TRUE (0x00000001) + + +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED 12:12 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED_TRUE (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_BAR1_SUPPORTED 13:13 +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_BAR1_SUPPORTED_FALSE (0x00000000) +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_BAR1_SUPPORTED_TRUE (0x00000001) + +/* P2P status codes */ +#define NV0000_P2P_CAPS_STATUS_OK (0x00) +#define NV0000_P2P_CAPS_STATUS_CHIPSET_NOT_SUPPORTED (0x01) +#define NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED (0x02) +#define NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED (0x03) +#define NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY (0x04) +#define NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED (0x05) + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 + * + * This command returns peer to peer capabilities present between GPUs. + * Valid requests must present a list of GPU Ids. + * + * [in] gpuIds + * This member contains the array of GPU IDs for which we query the P2P + * capabilities. Valid entries are contiguous, beginning with the first + * entry in the list. + * [in] gpuCount + * This member contains the number of GPU IDs stored in the gpuIds[] array. + * [out] p2pCaps + * This member returns the peer to peer capabilities discovered between the + * GPUs. Valid p2pCaps values include: + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED + * When this bit is set, peer to peer writes between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED + * When this bit is set, peer to peer reads between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED + * When this bit is set, peer to peer PROP between subdevices owned + * by this device are supported. This is enabled by default + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED + * When this bit is set, PCI is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED + * When this bit is set, NVLINK is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED + * When this bit is set, peer to peer atomics between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED + * When this bit is set, peer to peer loopback is supported for subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED + * When this bit is set, indirect peer to peer writes between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED + * When this bit is set, indirect peer to peer reads between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED + * When this bit is set, indirect peer to peer atomics between + * subdevices owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED + * When this bit is set, indirect NVLINK is supported for subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED + * When this bit is set, C2C P2P is supported between the GPUs + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_BAR1_SUPPORTED + * When this bit is set, BAR1 P2P is supported between the GPUs + * mentioned in @ref gpuIds + * [out] p2pOptimalReadCEs + * For a pair of GPUs, return mask of CEs to use for p2p reads over Nvlink + * [out] p2pOptimalWriteCEs + * For a pair of GPUs, return mask of CEs to use for p2p writes over Nvlink + * [out] p2pCapsStatus + * This member returns status of all supported p2p capabilities. Valid + * status values include: + * NV0000_P2P_CAPS_STATUS_OK + * P2P capability is supported. + * NV0000_P2P_CAPS_STATUS_CHIPSET_NOT_SUPPORTED + * Chipset doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED + * GPU doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED + * IOH topology isn't supported. For e.g. root ports are on different + * IOH. + * NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY + * P2P Capability is disabled by a regkey. + * NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED + * P2P Capability is not supported. + * NV0000_P2P_CAPS_STATUS_NVLINK_SETUP_FAILED + * Indicates that NvLink P2P link setup failed. + * [out] busPeerIds + * Peer ID matrix. It is a one-dimentional array. + * busPeerIds[X * gpuCount + Y] maps from index X to index Y in + * the gpuIds[] table. For invalid or non-existent peer busPeerIds[] + * has the value NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + + +#define NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 (0x12b) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS { + NvU32 gpuIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; + NvU32 gpuCount; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NvU32 busPeerIds[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS_SQUARED]; +} NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX + * + * This command returns peer to peer capabilities present between all pairs of + * GPU IDs {(a, b) : a in gpuIdGrpA and b in gpuIdGrpB}. This can be used to + * collect all P2P capabilities in the system - see the SRT: + * NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX_TEST + * for a demonstration. + * + * The call will query for all pairs between set A and set B, and returns + * results in both link directions. The results are two-dimensional arrays where + * the first dimension is the index within the set-A array of one GPU ID under + * consideration, and the second dimension is the index within the set-B array + * of the other GPU ID under consideration. + * + * That is, the result arrays are *ALWAYS* to be indexed first with the set-A + * index, then with the set-B index. The B-to-A direction of results are put in + * the b2aOptimal(Read|Write)CEs. This makes it unnecessary to call the query + * twice, since the usual use case requires both directions. + * + * If a set is being compared against itself (by setting grpBCount to 0), then + * the result matrices are symmetric - it doesn't matter which index is first. + * However, the choice of indices is effectively a choice of which ID is "B" and + * which is "A" for the "a2b" and "b2a" directional results. + * + * [in] grpACount + * This member contains the number of GPU IDs stored in the gpuIdGrpA[] + * array. Must be >= 0. + * [in] grpBCount + * This member contains the number of GPU IDs stored in the gpuIdGrpB[] + * array. Can be == 0 to specify a check of group A against itself. + * [in] gpuIdGrpA + * This member contains the array of GPU IDs in "group A", each of which + * will have its P2P capabilities returned with respect to each GPU ID in + * "group B". Valid entries are contiguous, beginning with the first entry + * in the list. + * [in] gpuIdGrpB + * This member contains the array of GPU IDs in "group B", each of which + * will have its P2P capabilities returned with respect to each GPU ID in + * "group A". Valid entries are contiguous, beginning with the first entry + * in the list. May be equal to gpuIdGrpA, but best performance requires + * that the caller specifies grpBCount = 0 in this case, and ignores this. + * [out] p2pCaps + * This member returns the peer to peer capabilities discovered between the + * pairs of input GPUs between the groups, indexed by [A_index][B_index]. + * Valid p2pCaps values include: + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED + * When this bit is set, peer to peer writes between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED + * When this bit is set, peer to peer reads between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED + * When this bit is set, peer to peer PROP between subdevices owned + * by this device are supported. This is enabled by default + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED + * When this bit is set, PCI is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED + * When this bit is set, NVLINK is supported for all P2P between subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED + * When this bit is set, peer to peer atomics between subdevices owned + * by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED + * When this bit is set, peer to peer loopback is supported for subdevices + * owned by this device. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED + * When this bit is set, indirect peer to peer writes between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED + * When this bit is set, indirect peer to peer reads between subdevices + * owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED + * When this bit is set, indirect peer to peer atomics between + * subdevices owned by this device are supported. + * NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED + * When this bit is set, indirect NVLINK is supported for subdevices + * owned by this device. + * [out] a2bOptimalReadCes + * For a pair of GPUs, return mask of CEs to use for p2p reads over Nvlink + * in the A-to-B direction. + * [out] a2bOptimalWriteCes + * For a pair of GPUs, return mask of CEs to use for p2p writes over Nvlink + * in the A-to-B direction. + * [out] b2aOptimalReadCes + * For a pair of GPUs, return mask of CEs to use for p2p reads over Nvlink + * in the B-to-A direction. + * [out] b2aOptimalWriteCes + * For a pair of GPUs, return mask of CEs to use for p2p writes over Nvlink + * in the B-to-A direction. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + + + +#define NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_MATRIX (0x13a) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_MESSAGE_ID" */ + +typedef NvU32 NV0000_CTRL_P2P_CAPS_MATRIX_ROW[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; +#define NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS_MESSAGE_ID (0x3AU) + +typedef struct NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS { + NvU32 grpACount; + NvU32 grpBCount; + NvU32 gpuIdGrpA[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NvU32 gpuIdGrpB[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW p2pCaps[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW a2bOptimalReadCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW a2bOptimalWriteCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW b2aOptimalReadCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; + NV0000_CTRL_P2P_CAPS_MATRIX_ROW b2aOptimalWriteCes[NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS]; +} NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS; + + + +#define GPS_MAX_COUNTERS_PER_BLOCK 32 +typedef struct NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS { + NvU32 objHndl; + NvU32 blockId; + NvU32 nextExpectedSampleTimems; + NvU32 countersReq; + NvU32 countersReturned; + NvU32 counterBlock[GPS_MAX_COUNTERS_PER_BLOCK]; +} NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS; + +#define NV0000_CTRL_CMD_SYSTEM_GPS_GET_PERF_SENSORS (0x12c) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | 0x2C" */ + +#define NV0000_CTRL_CMD_SYSTEM_GPS_GET_EXTENDED_PERF_SENSORS (0x12e) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | 0x2E" */ + + + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO + * + * This command returns the current host driver, host OS and + * plugin information. It is only valid when VGX is setup. + * szHostDriverVersionBuffer + * This field returns the host driver version (NV_VERSION_STRING). + * szHostVersionBuffer + * This field returns the host driver version (NV_BUILD_BRANCH_VERSION). + * szHostTitleBuffer + * This field returns the host driver title (NV_DISPLAY_DRIVER_TITLE). + * szPluginTitleBuffer + * This field returns the plugin build title (NV_DISPLAY_DRIVER_TITLE). + * szHostUnameBuffer + * This field returns the call of 'uname' on the host OS. + * iHostChangelistNumber + * This field returns the changelist value of the host driver (NV_BUILD_CHANGELIST_NUM). + * iPluginChangelistNumber + * This field returns the changelist value of the plugin (NV_BUILD_CHANGELIST_NUM). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE 256 +#define NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO (0x133) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS { + char szHostDriverVersionBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szHostVersionBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szHostTitleBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szPluginTitleBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + char szHostUnameBuffer[NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE]; + NvU32 iHostChangelistNumber; + NvU32 iPluginChangelistNumber; +} NV0000_CTRL_SYSTEM_GET_VGX_SYSTEM_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_GPUS_POWER_STATUS + * + * This command returns the power status of the GPUs in the system, successfully attached or not because of + * insufficient power. It is supported on Kepler and up only. + * gpuCount + * This field returns the count into the following arrays. + * busNumber + * This field returns the busNumber of a GPU. + * gpuExternalPowerStatus + * This field returns the corresponding external power status: + * NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_CONNECTED + * NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_NOT_CONNECTED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0000_CTRL_CMD_SYSTEM_GET_GPUS_POWER_STATUS (0x134) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS { + NvU8 gpuCount; + NvU8 gpuBus[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; + NvU8 gpuExternalPowerStatus[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; +} NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS; + +/* Valid gpuExternalPowerStatus values */ +#define NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_CONNECTED 0 +#define NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_NOT_CONNECTED 1 + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_PRIVILEGED_STATUS + * + * This command returns the caller's API access privileges using + * this client handle. + * + * privStatus + * This parameter returns a mask of possible access privileges: + * NV0000_CTRL_SYSTEM_PRIVILEGED_STATUS_PRIV_USER_FLAG + * The caller is running with elevated privileges + * NV0000_CTRL_SYSTEM_PRIVILEGED_STATUS_ROOT_HANDLE_FLAG + * Client is of NV01_ROOT class. + * NV0000_CTRL_SYSTEM_PRIVILEGED_STATUS_PRIV_HANDLE_FLAG + * Client has PRIV bit set. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + + +#define NV0000_CTRL_CMD_SYSTEM_GET_PRIVILEGED_STATUS (0x135) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS_MESSAGE_ID (0x35U) + +typedef struct NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS { + NvU8 privStatusFlags; +} NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS; + + +/* Valid privStatus values */ +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_USER_FLAG (0x00000001) +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_KERNEL_HANDLE_FLAG (0x00000002) +#define NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_HANDLE_FLAG (0x00000004) + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_FABRIC_STATUS + * + * The fabric manager (FM) notifies RM that fabric (system) is ready for peer to + * peer (P2P) use or still initializing the fabric. This command allows clients + * to query fabric status to allow P2P operations. + * + * Note, on systems where FM isn't used, RM just returns _SKIP. + * + * fabricStatus + * This parameter returns current fabric status: + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_SKIP + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_UNINITIALIZED + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_IN_PROGRESS + * NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_INITIALIZED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_PARAM_STRUCT + */ + +typedef enum NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS { + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_SKIP = 1, + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_UNINITIALIZED = 2, + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_IN_PROGRESS = 3, + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_INITIALIZED = 4, +} NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS; + +#define NV0000_CTRL_CMD_SYSTEM_GET_FABRIC_STATUS (0x136) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS { + NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS fabricStatus; +} NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS; + + + +/* + * NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID + * + * This command is used to get a unique identifier for the instance of RM. + * The returned value will only change when the driver is reloaded. A previous + * value will never be reused on a given machine. + * + * rm_instance_id; + * The instance ID of the current RM instance + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_RM_INSTANCE_ID (0x139) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS_MESSAGE_ID" */ + +/* + * NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS + */ +#define NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS { + NV_DECLARE_ALIGNED(NvU64 rm_instance_id, 8); +} NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS; + + + +/* + * NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT + * + * This API is used to sync the external fabric management status with + * GSP-RM + * + * bExternalFabricMgmt + * Whether fabric is externally managed + * + * Possible status values returned are: + * NV_OK + */ +#define NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT (0x13c) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS_MESSAGE_ID (0x3CU) + +typedef struct NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS { + NvBool bExternalFabricMgmt; +} NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS; + +/* + * NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO + * + * This API is used to get information about the RM client + * database. + * + * clientCount [OUT] + * This field indicates the number of clients currently allocated. + * + * resourceCount [OUT] + * This field indicates the number of resources currently allocated + * across all clients. + * + */ +#define NV0000_CTRL_CMD_SYSTEM_GET_CLIENT_DATABASE_INFO (0x13d) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS_MESSAGE_ID (0x3DU) + +typedef struct NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS { + NvU32 clientCount; + NV_DECLARE_ALIGNED(NvU64 resourceCount, 8); +} NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION_V2 + * + * This command returns the current driver information in + * statically sized character arrays. + * + * driverVersionBuffer + * This field returns the version (NV_VERSION_STRING). + * versionBuffer + * This field returns the version (NV_BUILD_BRANCH_VERSION). + * titleBuffer + * This field returns the title (NV_DISPLAY_DRIVER_TITLE). + * changelistNumber + * This field returns the changelist value (NV_BUILD_CHANGELIST_NUM). + * officialChangelistNumber + * This field returns the last official changelist value + * (NV_LAST_OFFICIAL_CHANGELIST_NUM). + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE 256 +#define NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION_V2 (0x13e) /* finn: Evaluated from "(FINN_NV01_ROOT_SYSTEM_INTERFACE_ID << 8) | NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS_MESSAGE_ID (0x3EU) + +typedef struct NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS { + char driverVersionBuffer[NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE]; + char versionBuffer[NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE]; + char titleBuffer[NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_MAX_STRING_SIZE]; + NvU32 changelistNumber; + NvU32 officialChangelistNumber; +} NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS; + +/* _ctrl0000system_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h new file mode 100644 index 000000000..33696f640 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000unix.h @@ -0,0 +1,433 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000unix.finn +// + + + + +#include "ctrl/ctrl0000/ctrl0000base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV01_ROOT (client) Linux control commands and parameters */ + +/* + * NV0000_CTRL_CMD_OS_UNIX_FLUSH_USER_CACHE + * + * This command may be used to force a cache flush for a range of virtual addresses in + * memory. Can be used for either user or kernel addresses. + * + * offset, length + * These parameters specify the offset within the memory block + * and the number of bytes to flush/invalidate + * cacheOps + * This parameter flags whether to flush, invalidate or do both. + * Possible values are: + * NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH + * NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_INVALIDATE + * NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH_INVALIDATE + * hDevice + * This parameter is the handle to the device + * hObject + * This parameter is the handle to the memory structure being operated on. + * internalOnly + * Intended for internal use unless client is running in MODS UNIX environment, + * in which case this parameter specify the virtual address of the memory block + * to flush. + * + * Possible status values are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_COMMAND + * NV_ERR_INVALID_LIMIT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0000_CTRL_CMD_OS_UNIX_FLUSH_USER_CACHE (0x3d02) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvU64 length, 8); + NvU32 cacheOps; + NvHandle hDevice; + NvHandle hObject; + NV_DECLARE_ALIGNED(NvU64 internalOnly, 8); +} NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS; + +#define NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH (0x00000001) +#define NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_INVALIDATE (0x00000002) +#define NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH_INVALIDATE (0x00000003) + + +/* + * NV0000_CTRL_CMD_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR + * + * This command is used to get the control file descriptor. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV0000_CTRL_CMD_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR (0x3d04) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | 0x4" */ + +typedef struct NV0000_CTRL_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR_PARAMS { + NvS32 fd; +} NV0000_CTRL_OS_UNIX_GET_CONTROL_FILE_DESCRIPTOR_PARAMS; + +typedef enum NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE { + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_NONE = 0, + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM = 1, +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE; + +typedef struct NV0000_CTRL_OS_UNIX_EXPORT_OBJECT { + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE type; + + union { + struct { + NvHandle hDevice; + NvHandle hParent; + NvHandle hObject; + } rmObject; + } data; +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECT; + +/* + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD + * + * This command may be used to export NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE + * object to file descriptor. + * + * Note that the 'fd' parameter is an input parameter at the kernel level, but + * an output parameter for usermode RMAPI clients -- the RMAPI library will + * open a new FD automatically if a usermode RMAPI client exports an object. + * + * Kernel-mode RM clients can export an object to an FD in two steps: + * 1. User client calls this RMControl with the flag 'EMPTY_FD_TRUE' to create + * an empty FD to receive the object, then passes that FD to the kernel-mode + * RM client. + * 2. Kernel-mode RM client fills in the rest of the + * NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS as usual and calls RM to + * associate its desired RM object with the empty FD from its usermode + * client. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_PARAMETER + */ +#define NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD (0x3d05) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS { + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT object; /* IN */ + NvS32 fd; /* IN/OUT */ + NvU32 flags; /* IN */ +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS; + +/* + * If EMPTY_FD is TRUE, the 'fd' will be created but no object will be + * associated with it. The hDevice parameter is still required, to determine + * the correct device node on which to create the file descriptor. + * (An empty FD can then be passed to a kernel-mode driver to associate it with + * an actual object.) + */ +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS_EMPTY_FD 0:0 +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS_EMPTY_FD_FALSE (0x00000000) +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS_EMPTY_FD_TRUE (0x00000001) + +/* + * NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD + * + * This command may be used to import back + * NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE object from file descriptor. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_PARAMETER + */ +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD (0x3d06) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS { + NvS32 fd; /* IN */ + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT object; /* IN */ +} NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_GET_GPU_INFO + * + * This command will query the OS specific info for the specified GPU. + * + * gpuId + * This parameter should specify a valid GPU ID value. If there + * is no GPU present with the specified ID, a status of + * NV_ERR_INVALID_ARGUMENT is returned. + * minorNum + * This parameter returns minor number of device node. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0000_CTRL_CMD_OS_GET_GPU_INFO (0x3d07) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | 0x7" */ + +typedef struct NV0000_CTRL_OS_GET_GPU_INFO_PARAMS { + NvU32 gpuId; /* IN */ + NvU32 minorNum; /* OUT */ +} NV0000_CTRL_OS_GET_GPU_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_GET_EXPORT_OBJECT_INFO + * + * This command will query the deviceInstance for the specified FD + * which is referencing an exported object. + * + * fd + * File descriptor parameter is referencing an exported object on a Unix system. + * + * deviceInstatnce + * This parameter returns a deviceInstance on which the object is located. + * + * maxObjects + * This parameter returns the maximum number of object handles that may be + * contained in the file descriptor. + * + * metadata + * This parameter returns the user metadata passed into the + * _EXPORT_OBJECTS_TO_FD control call. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OBJECT_NOT_FOUND + */ + +#define NV0000_CTRL_CMD_OS_UNIX_GET_EXPORT_OBJECT_INFO (0x3d08) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE 64 + +#define NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS { + NvS32 fd; /* IN */ + NvU32 deviceInstance; /* OUT */ + NvU16 maxObjects; /* OUT */ + NvU8 metadata[NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE]; /* OUT */ +} NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_REFRESH_RMAPI_DEVICE_LIST + * + * This command will re-fetch probed GPUs information and update RMAPI library's + * internal detected GPU context information accordingly. Without this, GPUs + * attached to RM after RMAPI client initialization will not be accessible and + * all RMAPI library calls will fail on them. + * Currently this is used by NVSwitch Fabric Manager in conjunction with NVSwitch + * Shared Virtualization feature where GPUs are hot-plugged to OS/RM (by Hypervisor) + * and Fabric Manager is signaled externally by the Hypervisor to initialize those GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_OPERATING_SYSTEM + */ + +#define NV0000_CTRL_CMD_OS_UNIX_REFRESH_RMAPI_DEVICE_LIST (0x3d09) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | 0x9" */ + +/* + * This control call has been deprecated. It will be deleted soon. + * Use NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD (singular) or + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD (plural) instead. + */ +#define NV0000_CTRL_CMD_OS_UNIX_CREATE_EXPORT_OBJECT_FD (0x3d0a) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_BUFFER_SIZE NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE + +#define NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS { + NvHandle hDevice; /* IN */ + NvU16 maxObjects; /* IN */ + NvU8 metadata[NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_BUFFER_SIZE]; /* IN */ + NvS32 fd; /* IN/OUT */ +} NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD + * + * Exports RM handles to an fd that was provided, also creates an FD if + * requested. + * + * The objects in the 'handles' array are exported into the fd + * as the range [index, index + numObjects). + * + * If index + numObjects is greater than the maxObjects value used + * to create the file descriptor, NV_ERR_INVALID_PARAMETER is returned. + * + * If 'numObjects and 'index' overlap with a prior call, the newer call's RM object + * handles will overwrite the previously exported handles from the previous call. + * This overlapping behavior can also be used to unexport a handle by setting + * the appropriate object in 'objects' to 0. + * + * fd + * A file descriptor. If -1, a new FD will be created. + * + * hDevice + * The owning device of the objects to be exported (must be the same for + * all objects). + * + * maxObjects + * The total number of objects that the client wishes to export to the FD. + * This parameter will be honored only when the FD is getting created. + * + * metadata + * A buffer for clients to write some metadata to and pass to the importing + * client. This parameter will be honored only when the FD is getting + * created. + * + * objects + * Array of RM object handles to export to the fd. + * + * numObjects + * The number of handles the user wishes to export in this call. + * + * index + * The index into the export fd at which to start exporting the handles in + * 'objects' (for use in iterative calls). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OUT_OF_RANGE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + */ +#define NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD (0x3d0b) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS_MESSAGE_ID" */ + +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_MAX_OBJECTS 512 + +#define NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS { + NvS32 fd; /* IN/OUT */ + NvHandle hDevice; /* IN */ + NvU16 maxObjects; /* IN */ + NvU8 metadata[NV0000_OS_UNIX_EXPORT_OBJECT_FD_BUFFER_SIZE]; /* IN */ + NvHandle objects[NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_MAX_OBJECTS]; /* IN */ + NvU16 numObjects; /* IN */ + NvU16 index; /* IN */ +} NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS; + +/* + * NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECTS_FROM_FD + * + * This command can be used to import back RM handles + * that were exported to an fd using the + * NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECTS_TO_FD control call. + * + * If index + numObjects is greater than the maxObjects value used + * to create the file descriptor, NV_ERR_INVALID_PARAMETER is returned + * and no objects are imported. + * + * For each valid handle in the 'objects' array parameter at index 'i', + * the corresponding object handle at index ('i' + 'index') contained by + * the fd will be imported. If the object at index ('i' + 'index') has + * not been exported into the fd, no object will be imported. + * + * If any of handles contained in the 'objects' array parameter are invalid + * and the corresponding export object handle is valid, + * NV_ERR_INVALID_PARAMETER will be returned and no handles will be imported. + * + * fd + * The export fd on which to import handles out of. + * + * hParent + * The parent RM handle of which all of the exported objects will + * be duped under. + * + * objects + * An array of RM handles. The exported objects will be duped under + * these handles during the import process. + * + * objectTypes + * An array of RM handle types. The type _NONE will be returned if + * the object was not imported. Other possible object types are + * mentioned below. + * + * numObjects + * The number of valid object handles in the 'objects' array. This should + * be set to the number of objects that the client wishes to import. + * + * index + * The index into the fd in which to start importing from. For + * use in iterative calls. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OUT_OF_RANGE + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_PARAMETER + */ +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECTS_FROM_FD (0x3d0c) /* finn: Evaluated from "(FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID << 8) | NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS_MESSAGE_ID" */ + +// +// TODO Bump this back up to 512 after the FLA revamp is complete +// +#define NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS 128 + +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_NONE 0 +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_VIDMEM 1 +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_SYSMEM 2 +#define NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC 3 + +#define NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS { + NvS32 fd; /* IN */ + NvHandle hParent; /* IN */ + NvHandle objects[NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS]; /* IN */ + NvU8 objectTypes[NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS]; /* OUT */ + NvU16 numObjects; /* IN */ + NvU16 index; /* IN */ +} NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS; + +/* _ctrl0000unix_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h new file mode 100644 index 000000000..817d1d559 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0000/ctrl0000vgpu.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0000/ctrl0000vgpu.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h new file mode 100644 index 000000000..fcec6291f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0002.h @@ -0,0 +1,181 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0002.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#define NV0002_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0002, NV0002_CTRL_##cat, idx) + +/* Client command categories (6bits) */ +#define NV0002_CTRL_RESERVED (0x00) +#define NV0002_CTRL_DMA (0x01) + + +/* + * NV0002_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0002_CTRL_CMD_NULL (0x20000) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV0002_CTRL_CMD_UPDATE_CONTEXTDMA + * + * This command will update the parameters of the specified context dma. The + * context dma must be bound to a display channel. The update is limited + * to the display view of the context dma. Other use cases will continue to + * use the original allocation parameters. + * + * This is used on platforms where memory may be moved by the operating + * system after allocation. + * + * This control call supports the NVOS54_FLAGS_LOCK_BYPASS flag. + * + * baseAddress + * This parameter, if selected by flags, indicates the new baseAddress for + * the ctxdma + * limit + * This parameter, if selected by flags, indicates the new limit of the + * ctxdma. + * hCtxDma + * ContextDma handle on which to operate. Must match the handle given to the control + * call. + * hChannel + * Display channel handle. This field is ignored. + * hintHandle + * Hint value returned from HeapAllocHint which encodes information about + * the surface. This is used by chips without generic kind. Newer chips + * use the COMPR_INFO flag and the hintHandle must be zero. + * flags + * This parameter specifies flags which indicate which other parameters are + * valid. + * FLAGS_PAGESIZE updates the context DMA pagesize field, if not _DEFAULT + * FLAGS_USE_COMPR_INFO uses the surface format specified in the params, instead of hintHandle. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_NOT_SUPPORTED + */ +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA (0x20101) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID << 8) | NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS_MESSAGE_ID" */ + +#define NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS { + NV_DECLARE_ALIGNED(NvU64 baseAddress, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvHandle hSubDevice; + NvHandle hCtxDma; + NvHandle hChannel; + NvHandle hintHandle; + NvU32 flags; +} NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS; + +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_BASEADDRESS 0:0 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_BASEADDRESS_INVALID (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_BASEADDRESS_VALID (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_LIMIT 1:1 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_LIMIT_INVALID (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_LIMIT_VALID (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_HINT 2:2 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_HINT_INVALID (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_HINT_VALID (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE 4:3 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE_DEFAULT (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE_4K (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_PAGESIZE_BIG (0x00000002) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO 6:5 +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_NONE (0x00000000) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_FORMAT_PITCH (0x00000001) +#define NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_FORMAT_BLOCK_LINEAR (0x00000002) + +/* + * NV0002_CTRL_CMD_BIND_CONTEXTDMA + * + * Bind a context dma to a display channel. Binding is no longer required for + * Host channels, but does silently succeed. + * + * This control call supports the NVOS54_FLAGS_LOCK_BYPASS flag. + * + * This control replaces the obsolete RmBindContextDma() API. + * + * hChannel + * The channel for ctxdma bind + * + * Possible error codes include + * NV_OK + * NV_ERR_TOO_MANY_PRIMARIES hash table is full + * NV_ERR_NO_MEMORY instance memory is full + * NV_ERR_INVALID_OFFSET surface is not correctly aligned + * NV_ERR_STATE_IN_USE context dma was already bound given channel + */ +#define NV0002_CTRL_CMD_BIND_CONTEXTDMA (0x20102) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID << 8) | NV0002_CTRL_BIND_CONTEXTDMA_PARAMS_MESSAGE_ID" */ + +#define NV0002_CTRL_BIND_CONTEXTDMA_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0002_CTRL_BIND_CONTEXTDMA_PARAMS { + NvHandle hChannel; +} NV0002_CTRL_BIND_CONTEXTDMA_PARAMS; + +/* + * NV0002_CTRL_CMD_UNBIND_CONTEXTDMA + * + * Unbind a context dma from a display channel. + * + * This control call supports the NVOS54_FLAGS_LOCK_BYPASS flag. + * + * hChannel + * The display channel to unbind from + * + * Possible error codes include + * NV_OK + * NV_ERR_INVALID_STATE channel was not bound + */ +#define NV0002_CTRL_CMD_UNBIND_CONTEXTDMA (0x20103) /* finn: Evaluated from "(FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID << 8) | NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS_MESSAGE_ID" */ + +#define NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS { + NvHandle hChannel; +} NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS; + +/* _ctrl0002.h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h new file mode 100644 index 000000000..39b59d5ee --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0004.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0004.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV01_TIMER control commands and parameters */ + +#define NV0004_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0004, NV0004_CTRL_##cat, idx) + +/* NV01_TIMER command categories (8bits) */ +#define NV0004_CTRL_RESERVED (0x00) +#define NV0004_CTRL_TMR (0x01) + +/* + * NV0004_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0004_CTRL_CMD_NULL (0x40000) /* finn: Evaluated from "(FINN_NV01_TIMER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV0004_CTRL_CMD_TMR_SET_ALARM_NOTIFY + * + * This command can be used to set a PTIMER alarm to trigger at the + * specified time in the future on the subdevice associated with this + * NV01_TIMER object instance. + * + * hEvent + * This parameter specifies the handle of an NV01_EVENT object instance + * that is to be signaled when the alarm triggers. This NV01_EVENT + * object instance must have been allocated with this NV01_TIMER object + * instance as its parent. If this parameter is set to NV01_NULL_OBJECT + * then all NV01_EVENT object instances associated with this NV01_TIMER + * object instance are signaled. + * alarmTimeUsecs + * This parameter specifies the relative time in nanoseconds at which + * the alarm should trigger. Note that the accuracy between the alarm + * trigger and the subsequent notification to the caller can vary + * depending on system conditions. + * + * Possible status values returned include: + * NVOS_STATUS_SUCCES + * NVOS_STATUS_INVALID_PARAM_STRUCT + * NVOS_STATUS_INVALID_OBJECT_HANDLE + */ + +#define NV0004_CTRL_CMD_TMR_SET_ALARM_NOTIFY (0x40110) /* finn: Evaluated from "(FINN_NV01_TIMER_TMR_INTERFACE_ID << 8) | NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS_MESSAGE_ID" */ + +#define NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS { + NvHandle hEvent; + NV_DECLARE_ALIGNED(NvU64 alarmTimeNsecs, 8); +} NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS; + +/* _ctrl0004_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl000f.h b/src/common/sdk/nvidia/inc/ctrl/ctrl000f.h new file mode 100644 index 000000000..afc8fd89e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl000f.h @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl000f.finn +// + + + + +#include "ctrl/ctrlxxxx.h" + +#define NV000F_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x000f, NV000F_CTRL_##cat, idx) + +/* Client command categories (6bits) */ +#define NV000F_CTRL_RESERVED (0x00U) +#define NV000F_CTRL_FM (0x01U) +#define NV000F_CTRL_RESERVED2 (0x02U) + +/* + * NV000f_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV000F_CTRL_CMD_NULL (0xf0000U) /* finn: Evaluated from "(FINN_FABRIC_MANAGER_SESSION_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV000F_CTRL_CMD_SET_FM_STATE + * + * This command will notify RM that the fabric manager is initialized. + * + * RM would block P2P operations such as P2P capability reporting, NV50_P2P object + * allocation etc. until the notification received. + * + * Possible status values returned are: + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + * NV_OK + */ +#define NV000F_CTRL_CMD_SET_FM_STATE (0xf0101U) /* finn: Evaluated from "(FINN_FABRIC_MANAGER_SESSION_FM_INTERFACE_ID << 8) | 0x1" */ + +/* + * NV000F_CTRL_CMD_CLEAR_FM_STATE + * + * This command will notify RM that the fabric manager is uninitialized. + * + * RM would block P2P operations such as P2P capability reporting, NV50_P2P object + * allocation etc. as soon as the notification received. + * + * Possible status values returned are: + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + * NV_OK + */ +#define NV000F_CTRL_CMD_CLEAR_FM_STATE (0xf0102U) /* finn: Evaluated from "(FINN_FABRIC_MANAGER_SESSION_FM_INTERFACE_ID << 8) | 0x2" */ + +/* _ctrl000f.h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h new file mode 100644 index 000000000..484042ecf --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0020.h @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0020.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#define NV0020_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x0020, NV0020_CTRL_##cat, idx) + +/* NV0020_GPU_MANAGEMENT command categories (6bits) */ +#define NV0020_CTRL_RESERVED (0x00) +#define NV0020_CTRL_GPU_MGMT (0x01) + +/* + * NV0020_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0020_CTRL_CMD_NULL (0x200000) /* finn: Evaluated from "(FINN_NV0020_GPU_MANAGEMENT_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* Maximum possible number of bytes of GID information */ +#define NV0020_GPU_MAX_GID_LENGTH (0x00000100) + +/* + * NV0020_CTRL_CMD_GPU_MGMT_SET_SHUTDOWN_STATE + * + * This command modifies GPU zero power state for the desired GPU in the + * database. This state is set by a privileged client, after the GPU is + * completely unregistered from RM as well as PCI subsystem. On Linux, + * clients perform this operation through pci-sysfs. + * This control call requires admin privileges. + * + * uuid (INPUT) + * The UUID of the gpu. + * Supports binary format and SHA-1 type. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV0020_CTRL_CMD_GPU_MGMT_SET_SHUTDOWN_STATE (0x200101) /* finn: Evaluated from "(FINN_NV0020_GPU_MANAGEMENT_GPU_MGMT_INTERFACE_ID << 8) | NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS { + NvU8 uuid[NV0020_GPU_MAX_GID_LENGTH]; +} NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS; + +/* _ctrl0020_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h b/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h new file mode 100644 index 000000000..59d29f4a4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl003e.h @@ -0,0 +1,194 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl003e.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV01_MEMORY_SYSTEM control commands and parameters */ + +#define NV003E_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x003E, NV003E_CTRL_##cat, idx) + +/* NV01_MEMORY_SYSTEM command categories (6bits) */ +#define NV003E_CTRL_RESERVED (0x00) +#define NV003E_CTRL_MEMORY (0x01) + +/* + * NV003E_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV003E_CTRL_CMD_NULL (0x3e0000) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV003E_CTRL_CMD_GET_SURFACE_PHYS_ATTR + * + * This command returns attributes associated with the memory object + * at the given offset. The architecture dependent return parameter + * comprFormat determines the meaningfulness (or not) of comprOffset. + * + * This call is currently only supported in the MODS environment. + * + * memOffset + * This parameter is both an input and an output. As input, this + * parameter holds an offset into the memory surface. The return + * value is the physical address of the surface at the given offset. + * memFormat + * This parameter returns the memory kind of the surface. + * comprOffset + * This parameter returns the compression offset of the surface. + * comprFormat + * This parameter returns the type of compression of the surface. + * gpuCacheAttr + * gpuCacheAttr returns the gpu cache attribute of the surface. + * Legal return values for this field are + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED + * gpuP2PCacheAttr + * gpuP2PCacheAttr returns the gpu peer-to-peer cache attribute of the surface. + * Legal return values for this field are + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED + * NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED + * mmuContext + * mmuContext returns the requested type of physical address + * Legal return values for this field are + * TEGRA_VASPACE_A -- return the non-GPU device physical address ( the system physical address itself) for Tegra engines. + * returns the system physical address, may change to use a class value in future. + * FERMI_VASPACE_A -- return the GPU device physical address( the system physical address, or the SMMU VA) for Big GPU engines. + * 0 -- return the GPU device physical address( the system physical address, or the SMMU VA) for Big GPU engines. + * use of zero may be deprecated in future. + * contigSegmentSize + * If the underlying surface is physically contiguous, this parameter + * returns the size in bytes of the piece of memory starting from + * the offset specified in the memOffset parameter extending to the last + * byte of the surface. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV003E_CTRL_CMD_GET_SURFACE_PHYS_ATTR (0x3e0101) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_MEMORY_INTERFACE_ID << 8) | NV003E_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS_MESSAGE_ID" */ + +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV003E_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS { + NV_DECLARE_ALIGNED(NvU64 memOffset, 8); + NvU32 memFormat; + NvU32 comprOffset; + NvU32 comprFormat; + NvU32 gpuCacheAttr; + NvU32 gpuP2PCacheAttr; + NvU32 mmuContext; + NV_DECLARE_ALIGNED(NvU64 contigSegmentSize, 8); +} NV003E_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS; + +/* valid gpuCacheAttr return values */ +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN (0x00000000) +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED (0x00000001) +#define NV003E_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED (0x00000002) + +/* NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES + * + * This command returns the number of physical pages associated with the + * memory object. + * + * This call is currently only implemented on Linux and assumes that linux + * kernel in which RM module will be loaded has same page size as defined + * in linux kernel source with which RM module was built. + * + * numPages + * This parameter returns total number of physical pages associated with + * the memory object. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES (0x3e0102) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_MEMORY_INTERFACE_ID << 8) | NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS { + NvU32 numPages; +} NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS; + + +/* NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES + * + * This command returns physical pages associated with the memory object. + * + * This call is currently only implemented on Linux and assumes that linux + * kernel in which RM module will be loaded has same page size as defined + * in linux kernel source with which RM module was built. + * + * pPages + * This parameter returns physical pages associated with the memory object. + * + * numPages + * This parameter is both an input and an output. As an input parameter, + * it's value indicates maximum number of physical pages to be copied to + * pPages. As an output parameter, it's value indicates number of physical + * pages copied to pPages. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES (0x3e0103) /* finn: Evaluated from "(FINN_NV01_MEMORY_SYSTEM_MEMORY_INTERFACE_ID << 8) | NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pPages, 8); + NvU32 numPages; +} NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS; + +/* _ctrl003e_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h new file mode 100644 index 000000000..7f87a6e1d --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0041.h @@ -0,0 +1,475 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0041.finn +// + +#include "nvos.h" + + + +#include "ctrl/ctrlxxxx.h" +/* NV04_MEMORY control commands and parameters */ + +#define NV0041_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0041, NV0041_CTRL_##cat, idx) + +/* NV04_MEMORY command categories (6bits) */ +#define NV0041_CTRL_RESERVED (0x00) +#define NV0041_CTRL_MEMORY (0x01) + +/* + * NV0041_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0041_CTRL_CMD_NULL (0x410000) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR + * + * This command returns attributes associated with the memory object + * at the given offset. The architecture dependent return parameter + * comprFormat determines the meaningfulness (or not) of comprOffset. + * + * This call is only currently supported in the MODS environment. + * + * memOffset + * This parameter is both an input and an output. As input, this + * parameter holds an offset into the memory surface. The return + * value is the physical address of the surface at the given offset. + * memFormat + * This parameter returns the memory kind of the surface. + * comprOffset + * This parameter returns the compression offset of the surface. + * comprFormat + * This parameter returns the type of compression of the surface. + * memAperture + * The aperture of the surface is returned in this field. + * Legal return values for this parameter are + * NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_VIDMEM + * NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_SYSMEM + * gpuCacheAttr + * gpuCacheAttr returns the gpu cache attribute of the surface. + * Legal return values for this field are + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED + * gpuP2PCacheAttr + * gpuP2PCacheAttr returns the gpu peer-to-peer cache attribute of the surface. + * Legal return values for this field are + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED + * NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED + * mmuContext + * mmuContext indicates the type of physical address to be returned (input parameter). + * Legal return values for this field are + * TEGRA_VASPACE_A -- return the device physical address for Tegra (non-GPU) engines. This is the system physical address itself. + * returns the system physical address. This may change to use a class value in future. + * FERMI_VASPACE_A -- return the device physical address for GPU engines. This can be a system physical address or a GPU SMMU virtual address. + * 0 -- return the device physical address for GPU engines. This can be a system physical address or a GPU SMMU virtual address. + * use of zero may be deprecated in future. + * contigSegmentSize + * If the underlying surface is physically contiguous, this parameter + * returns the size in bytes of the piece of memory starting from + * the offset specified in the memOffset parameter extending to the last + * byte of the surface. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR (0x410103) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS { + NV_DECLARE_ALIGNED(NvU64 memOffset, 8); + NvU32 memFormat; + NvU32 comprOffset; + NvU32 comprFormat; + NvU32 memAperture; + NvU32 gpuCacheAttr; + NvU32 gpuP2PCacheAttr; + NvU32 mmuContext; + NV_DECLARE_ALIGNED(NvU64 contigSegmentSize, 8); +} NV0041_CTRL_GET_SURFACE_PHYS_ATTR_PARAMS; + +/* valid memAperture return values */ +#define NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_VIDMEM (0x00000000) +#define NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_SYSMEM (0x00000001) + +/* valid gpuCacheAttr return values */ +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN (0x00000000) +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED (0x00000001) +#define NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED (0x00000002) + +/* + * NV0041_CTRL_CMD_GET_SURFACE_ZCULL_ID + * + * This command returns the Z-cull identifier for a surface. + * The value of ~0 is returned if there is none associated. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_ZCULL_ID (0x410104) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS { + NvU32 zcullId; +} NV0041_CTRL_GET_SURFACE_ZCULL_ID_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_SURFACE_PARTITION_STRIDE + * + * This command returns the partition stride (in bytes) for real memory + * associated with the memory object. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_PARTITION_STRIDE (0x410105) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS { + NvU32 partitionStride; +} NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS; + + + +// return values for 'tilingFormat' +// XXX - the names for these are misleading +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_INVALID (0x00000000) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_FB (0x00000001) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_FB_1HIGH (0x00000002) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_FB_4HIGH (0x00000003) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_UMA_1HIGH (0x00000004) +#define NV0041_CTRL_CMD_GET_SURFACE_TILING_FORMAT_UMA_4HIGH (0x00000005) + +/* + * NV0041_CTRL_SURFACE_INFO + * + * This structure represents a single 32bit surface value. Clients + * request a particular surface value by specifying a unique surface + * information index. + * + * Legal surface information index values are: + * NV0041_CTRL_SURFACE_INFO_INDEX_ATTRS + * This index is used to request the set of hw attributes associated + * with the surface. Each distinct attribute is represented by a + * single bit flag in the returned value. + * Legal flags values for this index are: + * NV0041_CTRL_SURFACE_INFO_ATTRS_COMPR + * This surface has compression resources bound to it. + * NV0041_CTRL_SURFACE_INFO_ATTRS_ZCULL + * This surface has zcull resources bound to it. + * NV0041_CTRL_SURFACE_INFO_INDEX_COMPR_COVERAGE + * This index is used to request the compression coverage (if any) + * in units of 64K for the associated surface. A value of zero indicates + * there are no compression resources associated with the surface. + * Legal return values range from zero to a maximum number of 64K units + * that is GPU implementation dependent. + * NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE + * This index is used to request the physically allocated size in units + * of 4K(NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR) for the associated + * surface. + * NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_ATTR + * This index is used to request the surface attribute field. The returned + * field value can be decoded using the NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_* + * DRF-style macros provided below. + * NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE + * This index is used to request the surface address space type. + * Returned values are described by NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE. + */ +typedef struct NV0041_CTRL_SURFACE_INFO { + NvU32 index; + NvU32 data; +} NV0041_CTRL_SURFACE_INFO; + +/* valid surface info index values */ +#define NV0041_CTRL_SURFACE_INFO_INDEX_ATTRS (0x00000001) +#define NV0041_CTRL_SURFACE_INFO_INDEX_COMPR_COVERAGE (0x00000005) +#define NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE (0x00000007) +#define NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_ATTR (0x00000008) +#define NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE (0x00000009) + +/* + * This define indicates the scale factor of the reported physical size to the + * actual size in bytes. We use the scale factor to save space from the + * interface and account for large surfaces. To get the actual size, + * use `(NvU64)reported_size * NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR`. + */ +#define NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR (0x1000) + +/* valid surface info attr flags */ +#define NV0041_CTRL_SURFACE_INFO_ATTRS_COMPR (0x00000002) +#define NV0041_CTRL_SURFACE_INFO_ATTRS_ZCULL (0x00000004) + +/* Valid surface info page size */ +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE NVOS32_ATTR_PAGE_SIZE +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_DEFAULT NVOS32_ATTR_PAGE_SIZE_DEFAULT +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_4KB NVOS32_ATTR_PAGE_SIZE_4KB +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_BIG NVOS32_ATTR_PAGE_SIZE_BIG +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE_HUGE NVOS32_ATTR_PAGE_SIZE_HUGE + +/* Valid surface info CPU coherency */ +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY NVOS32_ATTR_COHERENCY +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_UNCACHED NVOS32_ATTR_COHERENCY_UNCACHED +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_CACHED NVOS32_ATTR_COHERENCY_CACHED +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_COMBINE NVOS32_ATTR_COHERENCY_WRITE_COMBINE +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_THROUGH NVOS32_ATTR_COHERENCY_WRITE_THROUGH +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_PROTECT NVOS32_ATTR_COHERENCY_WRITE_PROTECT +#define NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY_WRITE_BACK NVOS32_ATTR_COHERENCY_WRITE_BACK + +/* + * NV0041_CTRL_CMD_GET_SURFACE_INFO + * + * This command returns surface information for the associated memory object. + * Requests to retrieve surface information use a list of one or more + * NV0041_CTRL_SURFACE_INFO structures. + * + * surfaceInfoListSize + * This field specifies the number of entries on the caller's + * surfaceInfoList. + * surfaceInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the surface information is to be returned. + * This buffer must be at least as big as surfaceInfoListSize multiplied + * by the size of the NV0041_CTRL_SURFACE_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0041_CTRL_CMD_GET_SURFACE_INFO (0x410110) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_INFO_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0041_CTRL_GET_SURFACE_INFO_PARAMS { + NvU32 surfaceInfoListSize; + NV_DECLARE_ALIGNED(NvP64 surfaceInfoList, 8); +} NV0041_CTRL_GET_SURFACE_INFO_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_SURFACE_COMPRESSION_COVERAGE + * + * This command returns the percentage of surface compression tag coverage. + * The value of 0 is returned if there are no tags associated. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_BAD_OBJECT_HANDLE + * NVOS_STATUS_BAD_OBJECT_PARENT + * NVOS_STATUS_NOT_SUPPORTED + * + */ +#define NV0041_CTRL_CMD_GET_SURFACE_COMPRESSION_COVERAGE (0x410112) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS { + NvHandle hSubDevice; /* if non zero subDevice handle of local GPU */ + NvU32 lineMin; + NvU32 lineMax; + NvU32 format; +} NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_FBMEM_BUS_ADDR + * + * This command returns the BAR1 physical address of a + * Memory mapping made using NvRmMapMemory() + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INVALID_DATA + * NV_ERR_INVALID_CLIENT + * NV_ERR_INVALID_OBJECT_HANDLE + * + */ +#define NV0041_CTRL_CMD_GET_FBMEM_BUS_ADDR (0x410114) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pLinearAddress, 8); /* [in] Linear address of CPU mapping */ + NV_DECLARE_ALIGNED(NvU64 busAddress, 8); /* [out] BAR1 address */ +} NV0041_CTRL_GET_FBMEM_BUS_ADDR_PARAMS; + +/* + * NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE + * + * This command flushes a cache on the GPU which all memory accesses go + * through. The types of flushes supported by this API may not be supported by + * all hardware. Attempting an unsupported flush type will result in an error. + * + * flags + * Contains flags to control various aspects of the flush. Valid values + * are defined in NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS*. Not all + * flags are valid for all GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NVOS_STATUS_INVALID_ARGUMENT + * NVOS_STATUS_INVALID_STATE + * + * See Also: + * NV0080_CTRL_CMD_DMA_FLUSH + * Performs flush operations in broadcast for the GPU cache and other hardware + * engines. Use this call if you want to flush all GPU caches in a + * broadcast device. + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE + * Flushes the entire GPU cache or a set of physical addresses (if the + * hardware supports it). Use this call if you want to flush a set of + * addresses or the entire GPU cache in unicast mode. + * + */ +#define NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE (0x410116) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS { + NvU32 flags; +} NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS; + +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK 0:0 +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_NO (0x00000000) +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_YES (0x00000001) +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_INVALIDATE 1:1 +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_NO (0x00000000) +#define NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_YES (0x00000001) + +/* + * NV0041_CTRL_CMD_GET_EME_PAGE_SIZE + * + * This command may be used to get the memory page size + * + * Parameters: + * pageSize [OUT] + * pageSize with associated memory descriptor + * + * Possible status values are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_NOT_SUPPORTED + */ +#define NV0041_CTRL_CMD_GET_MEM_PAGE_SIZE (0x410118) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS { + NvU32 pageSize; /* [out] - page size */ +} NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS; + +/* + * NV0041_CTRL_CMD_UPDATE_SURFACE_COMPRESSION + * + * Acquire/release compression for surface + * + * Parameters: + * bRelease [IN] + * true = release compression; false = acquire compression + */ +#define NV0041_CTRL_CMD_UPDATE_SURFACE_COMPRESSION (0x410119) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS { + NvBool bRelease; /* [in] - acquire/release setting */ +} NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS; + +#define NV0041_CTRL_CMD_PRINT_LABELS_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV0041_CTRL_CMD_PRINT_LABELS_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_PRINT_LABELS_PARAMS; +#define NV0041_CTRL_CMD_SET_LABEL_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV0041_CTRL_CMD_SET_LABEL_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_SET_LABEL_PARAMS; +#define NV0041_CTRL_CMD_SET_LABEL (0x410151) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_SET_LABEL_PARAMS_MESSAGE_ID" */ +#define NV0041_CTRL_CMD_GET_LABEL (0x410152) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_GET_LABEL_PARAMS_MESSAGE_ID" */ +#define NV0041_CTRL_CMD_GET_LABEL_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV0041_CTRL_CMD_GET_LABEL_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_GET_LABEL_PARAMS; + +/* + * NV0041_CTRL_CMD_SET_TAG + * + * This command sets memory allocation tag used for debugging. +* Every client has it's own memory allocation tag and tag is copying when object is duping. + * This control can be used for shared allocations to change it's tag. + */ +#define NV0041_CTRL_CMD_SET_TAG (0x410120) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_SET_TAG_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_CMD_SET_TAG_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV0041_CTRL_CMD_SET_TAG_PARAMS { + NvU32 tag; /* [in] */ +} NV0041_CTRL_CMD_SET_TAG_PARAMS; + +/* + * NV0041_CTRL_CMD_GET_TAG + * + * This command returns memory allocation tag used for debugging. + */ +#define NV0041_CTRL_CMD_GET_TAG (0x410121) /* finn: Evaluated from "(FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID << 8) | NV0041_CTRL_CMD_GET_TAG_PARAMS_MESSAGE_ID" */ + +#define NV0041_CTRL_CMD_GET_TAG_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV0041_CTRL_CMD_GET_TAG_PARAMS { + NvU32 tag; /* [out] */ +} NV0041_CTRL_CMD_GET_TAG_PARAMS; + +/* _ctrl0041_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h new file mode 100644 index 000000000..fa7d8f7eb --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015,2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* category-specific includes */ +#include "ctrl0073/ctrl0073system.h" +#include "ctrl0073/ctrl0073specific.h" +#include "ctrl0073/ctrl0073stereo.h" +#include "ctrl0073/ctrl0073event.h" +#include "ctrl0073/ctrl0073internal.h" +#include "ctrl0073/ctrl0073dfp.h" +#include "ctrl0073/ctrl0073dp.h" +#include "ctrl0073/ctrl0073svp.h" +#include "ctrl0073/ctrl0073dpu.h" +#include "ctrl0073/ctrl0073psr.h" diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h new file mode 100644 index 000000000..d860f9c8b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073base.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073base.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV04_DISPLAY_COMMON control commands and parameters */ + +#define NV0073_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0073, NV0073_CTRL_##cat, idx) + +/* NV04_DISPLAY_COMMON command categories (6bits) */ +#define NV0073_CTRL_RESERVED (0x00U) +#define NV0073_CTRL_SYSTEM (0x01U) +#define NV0073_CTRL_SPECIFIC (0x02U) +#define NV0073_CTRL_EVENT (0x03U) +#define NV0073_CTRL_INTERNAL (0x04U) +#define NV0073_CTRL_DFP (0x11U) +#define NV0073_CTRL_DP (0x13U) +#define NV0073_CTRL_SVP (0x14U) +#define NV0073_CTRL_DPU (0x15U) +#define NV0073_CTRL_PSR (0x16U) +#define NV0073_CTRL_STEREO (0x17U) + +/* + * NV0073_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0073_CTRL_CMD_NULL (0x730000U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl0073base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h new file mode 100644 index 000000000..675aff0de --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h @@ -0,0 +1,1125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073dfp.finn +// + + + + +#include "ctrl/ctrl0073/ctrl0073base.h" + +/* NV04_DISPLAY_COMMON dfp-display-specific control commands and parameters */ + +/* + * NV0073_CTRL_CMD_DFP_GET_INFO + * + * This command can be used to determine the associated display type for + * the specified displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must be a dfp display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * flags + * This parameter returns the information specific to this dfp. Here are + * the possible fields: + * NV0073_CTRL_DFP_FLAGS_SIGNAL + * This specifies the type of signal used for this dfp. + * NV0073_CTRL_DFP_FLAGS_LANES + * This specifies whether the board supports 1, 2, or 4 lanes + * for DISPLAYPORT signals. + * NV0073_CTRL_DFP_FLAGS_LIMIT + * Some GPUs were not qualified to run internal TMDS except at 60 HZ + * refresh rates. So, if LIMIT_60HZ_RR is set, then the client must + * make sure to only allow 60 HZ refresh rate modes to the OS/User. + * NV0073_CTRL_DFP_FLAGS_SLI_SCALER + * While running in SLI, if SLI_SCALER_DISABLE is set, the GPU cannot + * scale any resolutions. So, the output timing must match the + * memory footprint. + * NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE + * This specifies whether the DFP displayId is capable of + * transmitting HDMI. + * NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE + * This specifies whether the displayId is capable of sending a + * limited color range out from the board. + * NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE + * This specifies whether the displayId is capable of auto-configuring + * the color range. + * NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE + * This specifies whether the displayId is capable of sending the + * YCBCR422 color format out from the board. + * NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE + * This specifies whether the displayId is capable of sending + * YCBCR444 color format out from the board. + * NV0073_CTRL_DFP_FLAGS_DP_LINK_BANDWIDTH + * This specifies whether the displayId is capable of doing high + * bit-rate (2.7Gbps) or low bit-rate (1.62Gbps) if the DFP is + * display port. + * NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED + * This specifies whether the DFP displayId is allowed to transmit HDMI + * based on the VBIOS settings. + * NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT + * This specifies whether the DFP displayId is actually an embedded display + * port based on VBIOS connector information AND ASSR cap. + * NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT + * This specifies whether the DFP displayId must be trained to RBR mode + * (if it is using DP protocol) whenever possible. + * NV0073_CTRL_DFP_FLAGS_LINK + * This specifies whether the board supports single or dual links + * for TMDS, LVDS, and SDI signals. + * NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED + * This specifies if PostCursor2 is disabled in the VBIOS + * NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID + * This indicates whether this SOR uses DSI-A, DSI-B or both (ganged mode). + * NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE + * This indicates whether this DFP supports Dynamic MUX + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; +} NV0073_CTRL_DFP_GET_INFO_PARAMS; + +/* valid display types */ +#define NV0073_CTRL_DFP_FLAGS_SIGNAL 2:0 +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK (0x00000005U) +#define NV0073_CTRL_DFP_FLAGS_LANE 5:3 +#define NV0073_CTRL_DFP_FLAGS_LANE_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_LANE_OCT (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT 6:6 +#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER 7:7 +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE 8:8 +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE 9:9 +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE 10:10 +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE 11:11 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE 12:12 +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED 14:14 +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT 15:15 +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT 16:16 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW 19:17 +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS (0x00000004U) +#define NV0073_CTRL_DFP_FLAGS_LINK 21:20 +#define NV0073_CTRL_DFP_FLAGS_LINK_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID 22:22 +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID 24:23 +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B (0x00000002U) +#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED (0x00000003U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED 25:25 +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT 29:26 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE 30:30 +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_DFP_GET_DP2TMDS_DONGLE_INFO + * + * This command can be used to determine information about dongles attached + * to a displayport connection. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the dfp display which owns the + * panel power to adjust. The display ID must be a dfp display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * flags + * This parameter provide state information about the dongle attachments. + * NV0073_CTRL_DFP_GET_DP2TMDS_DONGLE_INFO_FLAGS_CAPABLE + * Specifies if the connection is capable of a dongle. This field + * returns false in all cases of signal types except for those capable + * of outputting TMDS. Even then the if a gpio is not defined, the + * the a statement of false will also be returned. + * NV0073_CTRL_DFP_GET_DP2TMDS_DONGLE_INFO_FLAGS_ATTACHED + * When attached, this value specifies that a dongle is detected and + * attached. The client should read the _TYPE field to determine + * if it is a dp2hdmi or dp2dvi dongle. + * NV0073_CTRL_DFP_GET_DP2TMDS_DONGLE_INFO_FLAGS_TYPE + * _DP2DVI: no response to i2cAddr 0x80 per DP interop guidelines. + * clients MUST avoid outputting HDMI even if capable. + * _DP2HDMI: dongle responds to i2cAddr 0x80 per DP interop guidelines. + * client is allowed to output HDMI when possible. + * _LFH_DVI: DMS59-DVI breakout dongle is in use. + * _LFH_VGA: DMS59-VGA breakout dongle is in use. + * NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE + * _1: Max TMDS Clock rate is 165 MHz for both DVI and HDMI. + * _2: Max TMDS Clock rate will be specified in the dongle + * address space at device address 0x80. + * DVI is up to 165 MHz + * HDMI is up to 300 MHz + * There are type 2 devices that support beyond 600 MHz + * though not defined in the spec. + * maxTmdsClkRateHz + * This defines the max TMDS clock rate for dual mode adaptor in Hz. + */ +#define NV0073_CTRL_CMD_DFP_GET_DISPLAYPORT_DONGLE_INFO (0x731142U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 maxTmdsClkRateHz; +} NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS; + +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_CAPABLE 0:0 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_CAPABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_CAPABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_ATTACHED 1:1 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_ATTACHED_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_ATTACHED_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE 7:4 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_DP2DVI (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_DP2HDMI (0x00000001U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_LFH_DVI (0x00000002U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_TYPE_LFH_VGA (0x00000003U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE 8:8 +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE_1 (0x00000000U) +#define NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE_TYPE_2 (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS + * + * This command is used to inform hardware the receiver's audio capabilities + * using the new EDID Like Data (ELD) memory structure. The ELD memory + * structure is read by the audio driver by issuing the ELD Data command verb. + * This mechanism is used for passing sink device' audio EDID information + * from graphics software to audio software. ELD contents will contain a + * subset of the sink device's EDID information. + * The client should inform hardware at initial boot, a modeset, and whenever + * a hotplug event occurs. + * + * displayId + * This parameter indicates the digital display device's + * mask. This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * numELDSize + * This parameter specifies how many bytes of data RM should write to the + * ELD buffer. Section 7.3.3.36 of the ECN specifies that the ELD buffer + * size of zero based. HDAudio driver will then use this information to + * determine how many bytes of the ELD buffer the HDAudio should read. + * The maximum size of the buffer is 96 bytes. + * bufferELD + * This buffer contains data as defined in the ECR HDMI ELD memory structure. + * Refer to the ELD Memory Structure Specification for more details. + * The format should be: + * - Header block is fixed at 4 bytes + * The header block contains the ELD version and baseline ELD len as + * well as some reserved fields. + * - Baseline block for audio descriptors is 76 bytes + * (15 SAD; each SAD=3 bytes requiring 45 bytes with 31 bytes to spare) + * As well as some other bits used to denote the CEA version, + * the speaker allocation data, monitor name, connector type, and + * hdcp capabilities. + * - Vendor specific block of 16 bytes + * maxFreqSupported + * Supply the maximum frequency supported for the overall audio caps. + * This value should match CEA861-X defines for sample freq. + * ctrl: + * NV0073_CTRL_DFP_SET_ELD_AUDIO_CAPS_CTRL_PD: + * Specifies the presence detect of the receiver. On a hotplug + * or modeset client should set this bit to TRUE. + * NV0073_CTRL_DFP_SET_ELD_AUDIO_CAPS_CTRL_ELDV: + * Specifies whether the ELD buffer contents are valid. + * An intrinsic unsolicited response (UR) is generated whenever + * the ELDV bit changes in value and the PD=1. When _PD=1(hotplug), + * RM will set the ELDV bit after ELD buffer contents are written. + * If _ELDV bit is set to false such as during a unplug, then the + * contents of the ELD buffer will be cleared. + * deviceEntry: + * The deviceEntry number from which the SF should accept packets. + * _NONE if disabling audio. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER 96U + +#define NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 numELDSize; + NvU8 bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER]; + NvU32 maxFreqSupported; + NvU32 ctrl; + NvU32 deviceEntry; +} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS; + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0320KHZ (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0441KHZ (0x00000002U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0480KHZ (0x00000003U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0882KHZ (0x00000004U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0960KHZ (0x00000005U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_1764KHZ (0x00000006U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_1920KHZ (0x00000007U) + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD 0:0 +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV 1:1 +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE (0x00000001U) + +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_0 (0x00000000U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_1 (0x00000001U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_2 (0x00000002U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_3 (0x00000003U) +#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_NONE (0x00000007U) + + + +/* + * NV0073_CTRL_CMD_DFP_GET_SPREAD_SPECTRUM_STATUS + * + * This command is used to get spread spectrum status for a display device. + * + * displayId + * Display ID for which the spread spectrum status is needed. + * checkRMSsState + * Default is to check in Vbios. This flag lets this control call to check in register. + * status + * Return status value. + */ + +#define NV0073_CTRL_CMD_DFP_GET_SPREAD_SPECTRUM (0x73114cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS_MESSAGE_ID (0x4CU) + +typedef struct NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS { + NvU32 displayId; + NvBool enabled; +} NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS; + +/* + * NV0073_CTRL_CMD_DFP_UPDATE_DYNAMIC_DFP_CACHE + * + * Update the Dynamic DFP with Bcaps read from remote display. + * Also updates hdcpFlags, gpu hdcp capable flags in DFP. + * If bResetDfp is true, all the flags are reset before making changes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_UPDATE_DYNAMIC_DFP_CACHE (0x73114eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS_MESSAGE_ID (0x4EU) + +typedef struct NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS { + NvU32 subDeviceInstance; + NvU32 headIndex; + NvU8 bcaps; + NvU8 bksv[5]; + NvU32 hdcpFlags; + NvBool bHdcpCapable; + NvBool bResetDfp; + NvU8 updateMask; +} NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS; + +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BCAPS 0x01U +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_BKSV 0x02U +#define NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_MASK_FLAGS 0x03U + +/* + * NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE + * + * This command sets the audio enable state of the DFP. When disabled, + * no audio stream packets or audio timestamp packets will be sent. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * audio should be enabled or disabled. The display ID must be a dfp display. + * If the displayId is not a dfp, this call will return + * NV_ERR_INVALID_ARGUMENT. + * enable + * This parameter specifies whether to enable (NV_TRUE) or disable (NV_FALSE) + * audio to the display. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool enable; +} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS; + + + +/* + * NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG + * + * This enum defines default/primary/secondary sor sublinks to be configured. + * These access modes are: + * + * NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_NONE + * Default link config + * NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_PRIMARY_SOR_LINK + * Primary sor sublink to be configured + * NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_SECONDARY_SOR_LINK + * Secondary sor sublink to be configured + */ +typedef enum NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG { + NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_NONE = 0, + NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_PRIMARY_SOR_LINK = 1, + NV0073_CTRL_DFP_ASSIGN_SOR_FORCE_SECONDARY_SOR_LINK = 2, +} NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG; + +/* + * NV0073_CTRL_DFP_ASSIGN_SOR_INFO + * + * This structure describes info about assigned SOR + * + * displayMask + * The displayMask for the SOR corresponding to its HW routings + * sorType + * This parameter specifies the SOR type + * Here are the current defined fields: + * NV0073_CTRL_DFP_SOR_TYPE_NONE + * Unallocated SOR + * NV0073_CTRL_DFP_SOR_TYPE_2H1OR_PRIMARY + * Primary SOR for 2H1OR stream + * NV0073_CTRL_DFP_SOR_TYPE_2H1OR_SECONDARY + * Secondary SOR for 2H1OR stream + * NV0073_CTRL_DFP_SOR_TYPE_SINGLE + * Default Single SOR + * Note - sorType should only be referred to identify 2H1OR Primary and Secondary SOR + * + */ + +typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO { + NvU32 displayMask; + NvU32 sorType; +} NV0073_CTRL_DFP_ASSIGN_SOR_INFO; + +#define NV0073_CTRL_DFP_SOR_TYPE_NONE (0x00000000U) +#define NV0073_CTRL_DFP_SOR_TYPE_SINGLE (0x00000001U) +#define NV0073_CTRL_DFP_SOR_TYPE_2H1OR_PRIMARY (0x00000002U) +#define NV0073_CTRL_DFP_SOR_TYPE_2H1OR_SECONDARY (0x00000003U) + +/* + * NV0073_CTRL_CMD_DFP_ASSIGN_SOR + * + * This command is used by the clients to assign SOR to DFP for CROSS-BAR + * when the default SOR-DFP routing that comes from vbios is not considered. + * SOR shall be assigned to a DFP at the runtime. This call should be called + * before a modeset is done on any dfp display and also before LinkTraining for DP displays. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * DisplayId of the primary display for which SOR is to be assigned. However, if + * displayId is 0 then RM shall return the XBAR config it has stored in it's structures. + * sorExcludeMask + * sorMask of the SORs which should not be used for assignment. If this is 0, + * then SW is free to allocate any available SOR. + * slaveDisplayId + * displayId of the slave device in case of dualSST mode. This ctrl call will + * allocate SORs to both slave and the master if slaveDisplayId is set. + * forceSublinkConfig + * forces RM to configure primary or secondary sor sublink on the given diaplayId. + * If not set, then RM will do the default configurations. + * bIs2Head1Or + * Specifies that SOR allocation is required for 2 head 1 OR. This will allocate + * 2 SOR for same displayId - one Master and one Slave. Slave SOR would be disconnected + * from any padlink and get feedback clock from Master SOR's padlink. + * sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS] + * returns the displayMask for all SORs corresponding to their HW routings. + * sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS] + * returns the displayMask for all SORs corresponding to their HW routings along with + * SOR type to identify 2H1OR Primary and Secondary SORs. SOR type would be identified by + * NV0073_CTRL_DFP_SOR_TYPE. sorAssignList would look as below - + * sorAssignListWithTag[] = { DisplayMask, SOR Type + * {0x100, SECONDARY_SOR} + * {0x200, SINGLE_SOR} + * {0x100, PRIMARY_SOR} + * {0, NONE}} + * } + * Here, for display id = 0x100, SOR2 is Primary and SOR0 is Secondary. + * Note - sorAssignList parameter would be removed after Bug 200749158 is resolved + * reservedSorMask + * returns the sorMask reserved for the internal panels. + * flags + * Other detail settings. + * _AUDIO_OPTIMAL: Client requests trying to get audio SOR if possible. + * If there's no audio capable SOR and OD is HDMI/DP, + * RM will fail the control call. + * _AUDIO_DEFAULT: RM does not check audio-capability of SOR. + * + * _ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES : RM returns Active SOR which is not Audio capable. + * _ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO : RM is not returning 'Active non-audio capable SOR'. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + + +#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS 4U + +#define NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 sorExcludeMask; + NvU32 slaveDisplayId; + NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig; + NvBool bIs2Head1Or; + NvU32 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + NV0073_CTRL_DFP_ASSIGN_SOR_INFO sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS]; + NvU8 reservedSorMask; + NvU32 flags; +} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS; + +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO 0:0 +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL (0x00000001U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT (0x00000000U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE 1:1 +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO (0x00000000U) +#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U) + +/* +* NV0073_CTRL_CMD_DFP_GET_PADLINK_MASK +* +* This command will only be used by chipTB tests to get the padlinks corresponding +* to the given displayId. RM gets this information from vbios. This control call is +* only for verif purpose. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior. +* displayId +* DisplayId of the display for which the client needs analog link Mask +* padlinkMask +* analogLinkMask for the given displayId. This value returned is 0xffffffff if +* the given displayId is invalid else RM returns the corresponding padlinkMask. +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +*/ + + +#define NV0073_CTRL_CMD_DFP_GET_PADLINK_MASK (0x731153U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 padlinkMask; +} NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS; + +/* + * NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE + * This enum defines the functions that are supported for which a + * corresponding GPIO pin number could be retrieved + * Values copied from objgpio.h GPIO_FUNC_TYPE_LCD_*. Please keep the + * values in sync between the 2 files + */ + +typedef enum NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE { + // GPIO types of LCD GPIO functions common to all internal panels + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_BACKLIGHT = 268435456, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_POWER = 285212672, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_POWER_OK = 301989888, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_SELF_TEST = 318767104, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_LAMP_STATUS = 335544320, + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE_LCD_BRIGHTNESS = 352321536, +} NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE; + +/* + * NV0073_CTRL_CMD_DFP_GET_LCD_GPIO_PIN_NUM + * + * This command can be used to get the GPIO pin number that corresponds to one + * of the LCD functions + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NVOS_STATUS_ERROR_INVALID_ARGUMENT. + * funcType + * The LDC function for which the GPIO pin number is needed + * lcdGpioPinNum + * The GPIO pin number that corresponds to the LCD function. + * +*/ +#define NV0073_CTRL_CMD_DFP_GET_LCD_GPIO_PIN_NUM (0x731154U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS_MESSAGE_ID (0x54U) + +typedef struct NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_CMD_DFP_LCD_GPIO_FUNC_TYPE funcType; + NvU32 lcdGpioPinNum; +} NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DFP_CONFIG_TWO_HEAD_ONE_OR + * + * This command is used for configuration of 2 head 1 OR. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * Display Id of the panel for which Two Head One OR is going to be used + * bEnable + * Enable/Disable 2 Head 1 OR + * masterSorIdx + * Master SOR Index which will send pixels to panel + * slaveSorIdx + * Slave SOR Index which will take feedback clock from Master SOR's + * padlink + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ + + +#define NV0073_CTRL_CMD_DFP_CONFIG_TWO_HEAD_ONE_OR (0x731156U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnable; + NvU32 masterSorIdx; + NvU32 slaveSorIdx; +} NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS; + +/* + * NV0073_CTRL_CMD_DFP_DSC_CRC_CONTROL + * + * This command is used to enable/disable CRC on the GPU or query the registers + * related to it + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * headIndex + * index of the head + * cmd + * specifying if setup or querying is done + * bEnable + * enable or disable CRC on the GPU + * gpuCrc0 + * 0-indexed CRC register of the GPU + * gpuCrc1 + * 1-indexed CRC register of the GPU + * gpuCrc0 + * 2-indexed CRC register of the GPU + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ + + +#define NV0073_CTRL_CMD_DFP_DSC_CRC_CONTROL (0x731157U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS_MESSAGE_ID (0x57U) + +typedef struct NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS { + NvU32 subDeviceInstance; + NvU32 headIndex; + NvU32 cmd; + NvBool bEnable; + NvU16 gpuCrc0; + NvU16 gpuCrc1; + NvU16 gpuCrc2; +} NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS; + +#define NV0073_CTRL_DP_CRC_CONTROL_CMD 0:0 +#define NV0073_CTRL_DP_CRC_CONTROL_CMD_SETUP (0x00000000U) +#define NV0073_CTRL_DP_CRC_CONTROL_CMD_QUERY (0x00000001U) + +/* + * NV0073_CTRL_CMD_DFP_INIT_MUX_DATA + * + * This control call is used to configure the display MUX related data + * for the given display device. Clients to RM are expected to call this + * control call to initialize the data related to MUX before any MUX related + * operations such mux switch or PSR entry/ exit are performed. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the mux state has to be initialized + * manfId (in) + * Specifies the manufacturer ID of panel obtained from the EDID. This + * parameter is expected to be non-zero only in case of internal panel. + * productId (in) + * Specifies the product ID of panel obtained from the EDID. This + * parameter is expected to be non-zero only in case of internal panel. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_INIT_MUX_DATA (0x731158U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS_MESSAGE_ID (0x58U) + +typedef struct NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU16 manfId; + NvU16 productId; +} NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX + * + * This command is used to switch the dynamic display mux between + * integrated GPU and discrete GPU. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the display MUX has to be switched + * flags (in) + * Flags indicating the action to be performed. Here are the possible + * valid values- + * NV0073_CTRL_DFP_DISP_MUX_SWITCH_IGPU_TO_DGPU + * When set mux is switched from integrated to discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_SWITCH_DGPU_TO_IGPU + * When set mux is switched from discrete to integrated GPU. + * NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS + * Set to true for PSR panels as we skip sideband access. + * auxSettleDelay (in) + * Time, in milliseconds, necessary for AUX channel to settle and become + * accessible after a mux switch. Set to zero to use the default delay. + * muxSwitchLatencyMs (out) + * mux switch latency stats in milli-seconds + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX (0x731160U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 auxSettleDelay; + NvU32 muxSwitchLatencyMs; +} NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS; + +/* valid flags*/ +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH 0:0 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_IGPU_TO_DGPU 0x00000000 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_DGPU_TO_IGPU 0x00000001 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS 1:1 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS_YES 0x00000001 +#define NV0073_CTRL_DFP_DISP_MUX_SWITCH_SKIP_SIDEBAND_ACCESS_NO 0x00000000 + +/* + * NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS + * + * This command is used to perform all the operations that need to be + * performed before a mux switch is started. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the pre mux switch operations have + * to be performed. + * flags (in) + * Flags indicating the action to be performed. Here are the possible + * valid values - + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU + * Indicates a switch from i to d is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU + * Indicates a switch from d to i is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_NO + * When set RM will execute the PSR enter sequence. By default RM will + * not skip SR enter sequence + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_YES + * When set RM will skip the PSR enter sequence + * iGpuBrightness (in) + * iGPU brightness value (scale 0~100) before switching mux from I2D. + * This is used to match brightness after switching mux to dGPU + * preOpsLatencyMs (out) + * premux switch operations latency stats in milli-seconds. This includes - + * - disabling SOR sequencer and enable BL GPIO control + * - toggling LCD VDD, BL EN and PWM MUX GPIOs + * - PSR entry, if not skipped + * psrEntryLatencyMs (out) + * psr entry latency stats in milli-seconds + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS (0x731161U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID (0x61U) + +typedef struct NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 iGpuBrightness; + NvU32 preOpsLatencyMs; + NvU32 psrEntryLatencyMs; +} NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS; + +/* valid flags*/ +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE 0:0 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP 1:1 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_NO 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_ENTER_SKIP_YES 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING 2:2 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_KNOWN 0x00000000 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_UNKNOWN 0x00000001 + +#define NV0073_CTRL_DISP_MUX_BACKLIGHT_BRIGHTNESS_MIN 0U +#define NV0073_CTRL_DISP_MUX_BACKLIGHT_BRIGHTNESS_MAX 100U + +/* + * NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS + * + * This command is used to perform all the operations that need to be + * performed after a successful mux switch is completed. + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the post mux switch operations have + * to be performed. + * flags (in) + * Flags indicating the action to be performed. Here are the possible + * valid values - + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU + * Indicates a switch from i to d is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU + * Indicates a switch from d to i is initiated + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_NO + * When set RM will execute the PSR exit sequence. By default RM will + * not skip SR exit sequence + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_YES + * When set RM will skip the PSR exit sequence + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_KNOWN + * Indicates mux switches where we know when igpu powers up + * NV0073_CTRL_DFP_DISP_MUX_FLAGS_MUX_SWITCH_IGPU_POWER_TIMING_UNKNOWN + * Indicates mux switches where we don't know when igpu powers up + * postOpsLatencyMs (out) + * postmux switch operations latency stats in milli-seconds. This includes - + * - restoring SOR sequencer and BL GPIO control + * - toggling LCD VDD, BL EN and PWM MUX GPIOs + * - PSR exit, if not skipped + * psrExitLatencyMs (out) + * psr exit latency stats in milli-seconds + * psrExitTransitionToInactiveLatencyMs (out) + * psr exit latency stats in milli-seconds, from state 2 (SR active) to state 4 (transition to inactive) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_TIMEOUT in case of SR exit failure + */ + +#define NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS (0x731162U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS_MESSAGE_ID (0x62U) + +typedef struct NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 postOpsLatencyMs; + NvU32 psrExitLatencyMs; + NvU32 psrExitTransitionToInactiveLatencyMs; +} NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS; + +/* valid flags*/ +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE 0:0 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP 1:1 +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_NO 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_FLAGS_SR_EXIT_SKIP_YES 0x00000001U + +/* + * NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS + * + * This command is used to query the display mux status for the given + * display device + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId (in) + * ID of the display device for which the post mux switch operations have + * to be performed. + * muxStatus (out) + * status indicating the current state of the mux. + * valid values - + * NV0073_CTRL_DFP_DISP_MUX_STATE_INTEGRATED_GPU + * Indicates that the MUX is currently switched to integrated GPU. + * NV0073_CTRL_DFP_DISP_MUX_STATE_DISCRETE_GPU + * Indicates that the MUX is currently switched to discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_DISCRETE_ONLY + * Indicates that the MUX mode is set to discrete mode, where all displays + * are driven by discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_INTEGRATED_ONLY + * Indicates that the MUX mode is set to integrated mode, where all + * displays are driven by Integrated GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_HYBRID + * Indicates that the MUX mode is set to hybrid, where internal panel is + * driven by integrated GPU, while external displays might be driven by + * discrete GPU. + * NV0073_CTRL_DFP_DISP_MUX_MODE_DYNAMIC + * Indicates that the MUX mode is dynamic. It is only in this mode, the + * display MUX can be toggled between discrete and hybrid dynamically. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS (0x731163U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS_MESSAGE_ID (0x63U) + +typedef struct NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 muxStatus; +} NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS; + +/* valid flags */ +#define NV0073_CTRL_DFP_DISP_MUX_STATE 1:0 +#define NV0073_CTRL_DFP_DISP_MUX_STATE_INVALID 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_STATE_INTEGRATED_GPU 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_STATE_DISCRETE_GPU 0x00000002U +#define NV0073_CTRL_DFP_DISP_MUX_MODE 4:2 +#define NV0073_CTRL_DFP_DISP_MUX_MODE_INVALID 0x00000000U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_INTEGRATED_ONLY 0x00000001U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_DISCRETE_ONLY 0x00000002U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_HYBRID 0x00000003U +#define NV0073_CTRL_DFP_DISP_MUX_MODE_DYNAMIC 0x00000004U + + + +/* +* NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING +* +* This command can be used to get DSI mode timing parameters. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior. +* displayId +* This parameter specifies the ID of the display on which the DSI +* info will be set. The display ID must be a DSI-capable display. +* hActive +* This parameter specifies the horizontal length of the active pixel +* data in the raster. +* vActive +* This parameter specifies the vertical lines of the active pixel +* data in the raster. +* bpp +* This parameter specifies the depth (Bits per Pixel) of the output +* display stream. +* refresh +* This parameter specifies the refresh rate of the panel (in Hz). +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +*/ + +#define NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING (0x731166U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS_MESSAGE_ID (0x66U) + +typedef struct NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 hActive; + NvU32 vActive; + NvU32 bpp; + NvU32 refresh; +} NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS; + + + +/* _ctrl0073dfp_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h new file mode 100644 index 000000000..2236abd59 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h @@ -0,0 +1,2744 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073dp.finn +// + + + + +#include "ctrl/ctrl0073/ctrl0073base.h" + +/* NV04_DISPLAY_COMMON dfp-display-specific control commands and parameters */ + +/* + * NV0073_CTRL_CMD_DP_AUXCH_CTRL + * + * This command can be used to perform an aux channel transaction to the + * displayPort receiver. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * bAddrOnly + * If set to NV_TRUE, this parameter prompts an address-only + * i2c-over-AUX transaction to be issued, if supported. Else the + * call fails with NVOS_STATUS_ERR_NOT_SUPPORTED. The size parameter is + * expected to be 0 for address-only transactions. + * cmd + * This parameter is an input to this command. The cmd parameter follows + * Section 2.4 AUX channel syntax in the DisplayPort spec. + * Here are the current defined fields: + * NV0073_CTRL_DP_AUXCH_CMD_TYPE + * This specifies the request command transaction + * NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C + * Set this value to indicate a I2C transaction. + * NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX + * Set this value to indicate a DisplayPort transaction. + * NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT + * This field is dependent on NV0073_CTRL_DP_AUXCH_CMD_TYPE. + * It is only valid if NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C + * is specified above and indicates a middle of transaction. + * In the case of AUX, this field should be set to zero. The valid + * values are: + * NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE + * The I2C transaction is not in the middle of a transaction. + * NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE + * The I2C transaction is in the middle of a transaction. + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE + * The request type specifies if we are doing a read/write or write + * status request: + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ + * An I2C or AUX channel read is requested. + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE + * An I2C or AUX channel write is requested. + * NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS + * An I2C write status request desired. This value should + * not be set in the case of an AUX CH request and only applies + * to I2C write transaction command. + * addr + * This parameter is an input to this command. The addr parameter follows + * Section 2.4 in DisplayPort spec and the client should refer to the valid + * address in DisplayPort spec. Only the first 20 bits are valid. + * data[] + * In the case of a read transaction, this parameter returns the data from + * transaction request. In the case of a write transaction, the client + * should write to this buffer for the data to send. The max # of bytes + * allowed is NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE. + * size + * Specifies how many data bytes to read/write depending on the transaction type. + * The input size value should be indexed from 0. That means if you want to read + * 1 byte -> size = 0, 2 bytes -> size = 1, 3 bytes -> size = 2, up to 16 bytes + * where size = 15. On return, this parameter returns total number of data bytes + * successfully read/written from/to the transaction (indexed from 1). That is, + * if you successfully requested 1 byte, you would send down size = 0. On return, + * you should expect size = 1 if all 1 byte were successfully read. (Note that + * it is valid for a display to reply with fewer than the requested number of + * bytes; in that case, it is up to the client to make a new request for the + * remaining bytes.) + * replyType + * This parameter is an output to this command. It returns the auxChannel + * status after the end of the aux Ch transaction. The valid values are + * based on the DisplayPort spec: + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_ACK + * In the case of a write, + * AUX: write transaction completed and all data bytes written. + * I2C: return size bytes has been written to i2c slave. + * In the case of a read, return of ACK indicates ready to reply + * another read request. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_NACK + * In the case of a write, first return size bytes have been written. + * In the case of a read, implies that does not have requested data + * for the read request transaction. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER + * Not ready for the write/read request and client should retry later. + * NV0073_CTRL_DP_DISPLAYPORT_AUXCH_REPLYTYPE_I2CNACK + * Applies to I2C transactions only. For I2C write transaction: + * has written the first return size bytes to I2C slave before getting + * NACK. For a read I2C transaction, the I2C slave has NACKED the I2C + * address. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER + * Applicable to I2C transactions. For I2C write and read + * transactions, I2C slave has yet to ACK or NACK the I2C transaction. + * NV0073_CTRL_DP_AUXCH_REPLYTYPE_TIMEOUT + * The receiver did not respond within the timeout period defined in + * the DisplayPort 1.1a specification. + * retryTimeMs + * This parameter is an output to this command. In case of + * NVOS_STATUS_ERROR_RETRY return status, this parameter returns the time + * duration in milli-seconds after which client should retry this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_RETRY + */ +#define NV0073_CTRL_CMD_DP_AUXCH_CTRL (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U +#define NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bAddrOnly; + NvU32 cmd; + NvU32 addr; + NvU8 data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE]; + NvU32 size; + NvU32 replyType; + NvU32 retryTimeMs; +} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS; + +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE 3:3 +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT 2:2 +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE 1:0 +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS (0x00000002U) + +#define NV0073_CTRL_DP_AUXCH_ADDR 20:0 + +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE 3:0 +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_ACK (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_NACK (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_DEFER (0x00000002U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_TIMEOUT (0x00000003U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CNACK (0x00000004U) +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_I2CDEFER (0x00000008U) + +//This is not the register field, this is software failure case when we +//have invalid argument +#define NV0073_CTRL_DP_AUXCH_REPLYTYPE_INVALID_ARGUMENT (0xffffffffU) + +/* + * NV0073_CTRL_CMD_DP_AUXCH_SET_SEMA + * + * This command can be used to set the semaphore in order to gain control of + * the aux channel. This control is only used in HW verification. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * owner + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RM + * Write the aux channel semaphore for resource manager to own the + * the aux channel. + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_VBIOS + * Write the aux channel semaphore for vbios/efi to own the + * the aux channel. This value is used only for HW verification + * and should not be used in normal driver operation. + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_PMU + * Write the aux channel semaphore for pmu to own the + * the aux channel. This value is used only by pmu + * and should not be used in normal driver operation. + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_DPU + * Write the aux channel semaphore for dpu to own the + * the aux channel and should not be used in normal + * driver operation. + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_SEC2 + * Write the aux channel semaphore for sec2 to own the + * the aux channel and should not be used in normal + * driver operation. + * NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RELEASE + * Write the aux channel semaphore for hardware to own the + * the aux channel. This value is used only for HW verification + * and should not be used in normal driver operation. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_DP_AUXCH_SET_SEMA (0x731342U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 owner; +} NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS; + +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER 2:0 +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RELEASE (0x00000000U) +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_RM (0x00000001U) +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_VBIOS (0x00000002U) +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_PMU (0x00000003U) +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_DPU (0x00000004U) +#define NV0073_CTRL_DP_AUXCH_SET_SEMA_OWNER_SEC2 (0x00000005U) + +/* + * NV0073_CTRL_CMD_DP_CTRL + * + * This command is used to set various displayPort configurations for + * the specified displayId such a lane count and link bandwidth. It + * is assumed that link training has already occurred. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_CMD_SET_LANE_COUNT + * Set to specify the number of displayPort lanes to configure. + * NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE + * No request to set the displayport lane count. + * NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE + * Set this value to indicate displayport lane count change. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH + * Set to specify a request to change the link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_FALSE + * No request to set the displayport link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_TRUE + * Set this value to indicate displayport link bandwidth change. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH + * Set to specify a request to change the link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_FALSE + * No request to set the displayport link bandwidth. + * NV0073_CTRL_DP_CMD_SET_LINK_BANDWIDTH_TRUE + * Set this value to indicate displayport link bandwidth change. + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD + * Set to disable downspread during link training. + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE + * Downspread will be enabled. + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE + * Downspread will be disabled (e.g. for compliance testing). + * NV0073_CTRL_DP_CMD_SET_FORMAT_MODE + * This field specifies the DP stream mode. + * NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM + * This value indicates that single stream mode is specified. + * NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM + * This value indicates that multi stream mode is specified. + * NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING + * Set to do Fast link training (avoid AUX transactions for link + * training). We need to restore all the previous trained link settings + * (e.g. the drive current/preemphasis settings) before doing FLT. + * During FLT, we send training pattern 1 followed by training pattern 2 + * each for a period of 500us. + * NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO + * Not a fast link training scenario. + * NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES + * Do fast link training. + * NV0073_CTRL_DP_CMD_NO_LINK_TRAINING + * Set to do No link training. We need to restore all the previous + * trained link settings (e.g. the drive current/preemphasis settings) + * before doing NLT, but we don't need to do the Clock Recovery and + * Channel Equalization. (Please refer to NVIDIA PANEL SELFREFRESH + * CONTROLLER SPECIFICATION 3.1.6 for detail flow) + * NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO + * Not a no link training scenario. + * NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES + * Do no link training. + * NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING + * Specifies whether RM should use the DP Downspread setting specified by + * NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD command regardless of what the Display + * is capable of. This is used along with the Fake link training option so that + * we can configure the GPU to enable/disable spread when a real display is + * not connected. + * NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE + * RM Always use the DP Downspread setting specified. + * NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT + * RM will enable Downspread only if the display supports it. (default) + * NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING + * Specifies whether RM should skip HW training of the link. + * If this is the case then RM only updates its SW state without actually + * touching any HW registers. Clients should use this ONLY if it has determined - + * a. link is trained and not lost + * b. desired link config is same as current trained link config + * c. link is not in D3 (should be in D0) + * NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO + * RM doesn't skip HW LT as the current Link Config is not the same as the + * requested Link Config. + * NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES + * RM skips HW LT and only updates its SW state as client has determined that + * the current state of the link and the requested Link Config is the same. + * NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG + * Set if the client does not want link training to happen. + * This should ONLY be used for HW verification. + * NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE + * This is normal production behaviour which shall perform + * link training or follow the normal procedure for lane count + * reduction. + * NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE + * Set this value to not perform link config steps, this should + * only be turned on for HW verif testing. If _LINK_BANDWIDTH + * or _LANE_COUNT is set, RM will only write to the TX DP registers + * and perform no link training. + * NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED + * This field specifies if source grants Post Link training Adjustment request or not. + * NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO + * Source does not grant Post Link training Adjustment request + * NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES + * Source grants Post Link training Adjustment request + * Source wants to link train LT Tunable Repeaters + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING + * This field specifies if fake link training is to be done. This will + * program enough of the hardware to avoid any hardware hangs and + * depending upon option chosen by the client, OR will be enabled for + * transmisssion. + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO + * No Fake LT will be performed + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION + * SOR will be not powered up during Fake LT + * NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON + * SOR will be powered up during Fake LT + * NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER + * This field specifies if source wants to link train LT Tunable Repeaters or not. + * NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO + * Source does not want to link train LT Tunable Repeaters + * NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES + * NV0073_CTRL_DP_CMD_BANDWIDTH_TEST + * Set if the client wants to reset the link after the link + * training is done. As a part of uncommtting a DP display. + * NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO + * This is for normal operation, if DD decided not to reset the link. + * NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES + * This is to reset the link, if DD decided to uncommit the display because + * the link is no more required to be enabled, as in a DP compliance test. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE + * Set if the client does not want link training to happen. + * This should ONLY be used for HW verification if necessary. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE + * This is normal production behaviour which shall perform + * pre link training checks such as if both rx and tx are capable + * of the requested config for lane and link bw. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE + * Set this value to bypass link config check, this should + * only be turned on for HW verif testing. If _LINK_BANDWIDTH + * or _LANE_COUNT is set, RM will not check TX and DX caps. + * NV0073_CTRL_DP_CMD_FALLBACK_CONFIG + * Set if requested config by client fails and if link if being + * trained for the fallback config. + * NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE + * This is the normal case when the link is being trained for a requested config. + * NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE + * Set this value in case the link configuration for requested config fails + * and the link is being trained for a fallback config. + * NV0073_CTRL_DP_CMD_ENABLE_FEC + * Specifies whether RM should set NV_DPCD14_FEC_CONFIGURATION_FEC_READY + * before link training if client has determined that FEC is required(for DSC). + * If required to be enabled RM sets FEC enable bit in panel, start link training. + * Enabling/disabling FEC on GPU side is not done during Link training + * and RM Ctrl call NV0073_CTRL_CMD_DP_CONFIGURE_FEC has to be called + * explicitly to enable/disable FEC after LT(including PostLT LQA). + * If enabled, FEC would be disabled while powering down the link. + * Client has to make sure to account for 3% overhead of transmitting + * FEC symbols while calculating DP bandwidth. + * NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE + * This is the normal case when FEC is not required + * NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE + * Set this value in case FEC needs to be enabled + * data + * This parameter is an input and output to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT + * This field specifies the desired setting for lane count. A client + * may choose any lane count as long as it does not exceed the + * capability of DisplayPort receiver as indicated in the + * receiver capability field. The valid values for this field are: + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 + * For zero-lane configurations, link training is shut down. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 + * For one-lane configurations, lane0 is used. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 + * For two-lane configurations, lane0 and lane1 is used. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 + * For four-lane configurations, all lanes are used. + * NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 + * For devices that supports 8-lane DP. + * On return, the lane count setting is returned which may be + * different from the requested input setting. + * NV0073_CTRL_DP_DATA_SET_LINK_BW + * This field specifies the desired setting for link bandwidth. There + * are only four supported main link bandwidth settings. The + * valid values for this field are: + * NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS + * NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS + * NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS + * NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS + * On return, the link bandwidth setting is returned which may be + * different from the requested input setting. + * NV0073_CTRL_DP_DATA_TARGET + * This field specifies which physical repeater or sink to be trained. + * Client should make sure + * 1. Physical repeater should be targeted in order, start from the one closest to GPU. + * 2. All physical repeater is properly trained before targets sink. + * The valid values for this field are: + * NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_X + * 'X' denotes physical repeater index. It's a 1-based index to + * reserve 0 for _SINK. + * 'X' can't be more than 8. + * NV0073_CTRL_DP_DATA_TARGET_SINK + * err + * This parameter specifies provides info regarding the outcome + * of this calling control call. If zero, no errors were found. + * Otherwise, this parameter will specify the error detected. + * The valid parameter is broken down as follows: + * NV0073_CTRL_DP_ERR_SET_LANE_COUNT + * If set to _ERR, set lane count failed. + * NV0073_CTRL_DP_ERR_SET_LINK_BANDWIDTH + * If set to _ERR, set link bandwidth failed. + * NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD + * If set to _ERR, disable downspread failed. + * NV0073_CTRL_DP_ERR_INVALID_PARAMETER + * If set to _ERR, at least one of the calling functions + * failed due to an invalid parameter. + * NV0073_CTRL_DP_ERR_SET_LINK_TRAINING + * If set to _ERR, link training failed. + * NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER + * If set to _ERR, the operation to Link Train repeater is failed. + * NV0073_CTRL_DP_ERR_ENABLE_FEC + * If set to _ERR, the operation to enable FEC is failed. + * retryTimeMs + * This parameter is an output to this command. In case of + * NVOS_STATUS_ERROR_RETRY return status, this parameter returns the time + * duration in milli-seconds after which client should retry this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_RETRY + */ + +#define NV0073_CTRL_CMD_DP_CTRL (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV0073_CTRL_DP_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 data; + NvU32 err; + NvU32 retryTimeMs; + NvU32 eightLaneDpcdBaseAddr; +} NV0073_CTRL_DP_CTRL_PARAMS; + +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT 0:0 +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_SET_LINK_BW 1:1 +#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD 2:2 +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_UNUSED 3:3 +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE 4:4 +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING 5:5 +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING 6:6 +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING 7:7 +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING 8:8 +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT (0x00000000U) +#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE (0x00000001U) +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING 9:9 +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED 10:10 +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING 12:11 +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U) +#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON (0x00000002U) +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER 13:13 +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG 14:14 +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_FEC 15:15 +#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE (0x00000001U) + +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST 29:29 +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO (0x00000000U) +#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES (0x00000001U) +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE 30:30 +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE (0x00000001U) +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG 31:31 +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE (0x00000000U) +#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE (0x00000001U) + +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT 4:0 +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0 (0x00000000U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1 (0x00000001U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2 (0x00000002U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4 (0x00000004U) +#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8 (0x00000008U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW 15:8 +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS (0x00000006U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS (0x00000008U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS (0x00000009U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS (0x0000000AU) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS (0x0000000CU) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS (0x00000010U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS (0x00000014U) +#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS (0x0000001EU) +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING 18:18 +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO (0x00000000U) +#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES (0x00000001U) +#define NV0073_CTRL_DP_DATA_TARGET 22:19 +#define NV0073_CTRL_DP_DATA_TARGET_SINK (0x00000000U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0 (0x00000001U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1 (0x00000002U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2 (0x00000003U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3 (0x00000004U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4 (0x00000005U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5 (0x00000006U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6 (0x00000007U) +#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7 (0x00000008U) + +#define NV0073_CTRL_DP_ERR_SET_LANE_COUNT 0:0 +#define NV0073_CTRL_DP_ERR_SET_LANE_COUNT_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_SET_LANE_COUNT_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_SET_LINK_BW 1:1 +#define NV0073_CTRL_DP_ERR_SET_LINK_BW_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_SET_LINK_BW_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD 2:2 +#define NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_DISABLE_DOWNSPREAD_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_UNUSED 3:3 +#define NV0073_CTRL_DP_ERR_CLOCK_RECOVERY 4:4 +#define NV0073_CTRL_DP_ERR_CLOCK_RECOVERY_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_CLOCK_RECOVERY_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_CHANNEL_EQUALIZATION 5:5 +#define NV0073_CTRL_DP_ERR_CHANNEL_EQUALIZATION_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_CHANNEL_EQUALIZATION_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER 6:6 +#define NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_TRAIN_PHY_REPEATER_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_ENABLE_FEC 7:7 +#define NV0073_CTRL_DP_ERR_ENABLE_FEC_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_ENABLE_FEC_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE 11:8 +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_0_LANE (0x00000000U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_1_LANE (0x00000001U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_2_LANE (0x00000002U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_4_LANE (0x00000004U) +#define NV0073_CTRL_DP_ERR_CR_DONE_LANE_8_LANE (0x00000008U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE 15:12 +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_0_LANE (0x00000000U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_1_LANE (0x00000001U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_2_LANE (0x00000002U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_4_LANE (0x00000004U) +#define NV0073_CTRL_DP_ERR_EQ_DONE_LANE_8_LANE (0x00000008U) +#define NV0073_CTRL_DP_ERR_INVALID_PARAMETER 30:30 +#define NV0073_CTRL_DP_ERR_INVALID_PARAMETER_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_INVALID_PARAMETER_ERR (0x00000001U) +#define NV0073_CTRL_DP_ERR_LINK_TRAINING 31:31 +#define NV0073_CTRL_DP_ERR_LINK_TRAINING_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ERR_LINK_TRAINING_ERR (0x00000001U) + +/* + * NV0073_CTRL_DP_LANE_DATA_PARAMS + * + * This structure provides lane characteristics. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * numLanes + * Indicates number of lanes for which the data is valid + * data + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS + * This field specifies the preemphasis level set in the lane. + * The valid values for this field are: + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE + * No-preemphais for this lane. + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 + * Preemphasis set to 3.5 dB. + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 + * Preemphasis set to 6.0 dB. + * NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 + * Preemphasis set to 9.5 dB. + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT + * This field specifies the drive current set in the lane. + * The valid values for this field are: + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 + * Drive current level is set to 8 mA + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 + * Drive current level is set to 12 mA + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 + * Drive current level is set to 16 mA + * NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 + * Drive current level is set to 24 mA + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_MAX_LANES 8U + +typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 numLanes; + NvU32 data[NV0073_CTRL_MAX_LANES]; +} NV0073_CTRL_DP_LANE_DATA_PARAMS; + +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS 1:0 +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE (0x00000000U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1 (0x00000001U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2 (0x00000002U) +#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3 (0x00000003U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT 3:2 +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U) +#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U) + +/* + * NV0073_CTRL_CMD_GET_DP_LANE_DATA + * + * This command is used to get the current pre-emphasis and drive current + * level values for the specified number of lanes. + * + * The command takes a NV0073_CTRL_DP_LANE_DATA_PARAMS structure as the + * argument with the appropriate subDeviceInstance and displayId filled. + * The arguments of this structure and the format of preemphasis and drive- + * current levels are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. Preemphais + * and drive current level will be set during Link training + * in normal DP operations + * + */ + +#define NV0073_CTRL_CMD_DP_GET_LANE_DATA (0x731345U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | 0x45" */ + + +/* + * NV0073_CTRL_CMD_SET_DP_LANE_DATA + * + * This command is used to set the pre-emphasis and drive current + * level values for the specified number of lanes. + * + * The command takes a NV0073_CTRL_DP_LANE_DATA_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId, number of + * lanes, preemphasis and drive current values filled in. + * The arguments of this structure and the format of preemphasis and drive- + * current levels are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. Preemphais + * and drivecurrent will be set during Link training in + * normal DP operations + * + */ + +#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | 0x46" */ + +/* + * NV0073_CTRL_DP_CSTM + * + * This structure specifies the 80 bit DP CSTM Test Pattern data + * The fields of this structure are to be specified as follows: + * lower takes bits 31:0 + * middle takes bits 63:32 + * upper takes bits 79:64 + * + */ +typedef struct NV0073_CTRL_DP_CSTM { + NvU32 lower; + NvU32 middle; + NvU32 upper; +} NV0073_CTRL_DP_CSTM; + +/* + * NV0073_CTRL_DP_TESTPATTERN + * + * This structure specifies the possible test patterns available in + * display port. The field testPattern can be one of the following + * values. + * NV0073_CTRL_DP_SET_TESTPATTERN_DATA_NONE + * No test pattern on the main link + * NV0073_CTRL_DP_SET_TESTPATTERN_DATA_D10_2 + * D10.2 pattern on the main link + * NV0073_CTRL_DP_SET_TESTPATTERN_DATA_SERMP + * SERMP pattern on main link + * NV0073_CTRL_DP_SET_TESTPATTERN_DATA_PRBS_7 + * PRBS7 pattern on the main link + * + */ + +typedef struct NV0073_CTRL_DP_TESTPATTERN { + NvU32 testPattern; +} NV0073_CTRL_DP_TESTPATTERN; + +#define NV0073_CTRL_DP_TESTPATTERN_DATA 2:0 +#define NV0073_CTRL_DP_TESTPATTERN_DATA_NONE (0x00000000U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_D10_2 (0x00000001U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_SERMP (0x00000002U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_PRBS_7 (0x00000003U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_CSTM (0x00000004U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_HBR2COMPLIANCE (0x00000005U) +#define NV0073_CTRL_DP_TESTPATTERN_DATA_CP2520PAT3 (0x00000006U) + +/* + * NV0073_CTRL_CMD_DP_SET_TESTPATTERN + * + * This command forces the main link to output the selected test patterns + * supported in DP specs. + * + * The command takes a NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId and test pattern + * to be set as inputs. + * The arguments of this structure and the format of test patterns are + * described above. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * testPattern + * This parameter is of type NV0073_CTRL_DP_TESTPATTERN and specifies + * the testpattern to set on displayport. The format of this structure + * is described above. + * laneMask + * This parameter specifies the bit mask of DP lanes on which test + * pattern is to be applied. + * lower + * This parameter specifies the lower 64 bits of the CSTM test pattern + * upper + * This parameter specifies the upper 16 bits of the CSTM test pattern + * bIsHBR2 + * This Boolean parameter is set to TRUE if HBR2 compliance test is + * being performed. + * bSkipLaneDataOverride + * skip override of pre-emp and drive current + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. Preemphais + * and drivecurrent will be set during Link training in + * normal DP operations + * + */ + +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_TESTPATTERN testPattern; + NvU8 laneMask; + NV0073_CTRL_DP_CSTM cstm; + NvBool bIsHBR2; + NvBool bSkipLaneDataOverride; +} NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_TESTPATTERN (0x731347U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_CSTM0 31:0 +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_CSTM1 63:32 +#define NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS_CSTM2 15:0 + +/* + * NV0073_CTRL_CMD_GET_DP_TESTPATTERN + * + * This command returns the current test pattern set on the main link of + * Display Port. + * + * The command takes a NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId as inputs and + * returns the current test pattern in testPattern field of the structure. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * testPattern + * This parameter is of type NV0073_CTRL_DP_TESTPATTERN and specifies the + * testpattern set on displayport. The format of this structure is + * described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * NOTE: This control call is only for testing purposes and + * should not be used in normal DP operations. + * + */ + +#define NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS_MESSAGE_ID (0x48U) + +typedef struct NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_TESTPATTERN testPattern; +} NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS; + + +#define NV0073_CTRL_CMD_DP_GET_TESTPATTERN (0x731348U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_TESTPATTERN_PARAMS_MESSAGE_ID" */ + +/* + * NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * + * This structure specifies the Pre-emphasis/Drive Current/Postcursor2/TxPu information + * for a display port device. These are the the current values that RM is + * using to map the levels for Pre-emphasis and Drive Current for Link + * Training. + * preEmphasis + * This field specifies the preemphasis values. + * driveCurrent + * This field specifies the driveCurrent values. + * postcursor2 + * This field specifies the postcursor2 values. + * TxPu + * This field specifies the pull-up current source drive values. + */ +#define NV0073_CTRL_MAX_DRIVECURRENT_LEVELS 4U +#define NV0073_CTRL_MAX_PREEMPHASIS_LEVELS 4U +#define NV0073_CTRL_MAX_POSTCURSOR2_LEVELS 4U + +typedef struct NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_1 { + NvU32 preEmphasis; + NvU32 driveCurrent; + NvU32 postCursor2; + NvU32 TxPu; +} NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_1; + +typedef NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_1 NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE1[NV0073_CTRL_MAX_PREEMPHASIS_LEVELS]; + +typedef NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE1 NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE2[NV0073_CTRL_MAX_DRIVECURRENT_LEVELS]; + +typedef NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_SLICE2 NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA[NV0073_CTRL_MAX_POSTCURSOR2_LEVELS]; + + +/* + * NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * + * This command is used to override the Pre-emphasis/Drive Current/PostCursor2/TxPu + * data in the RM. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the digital display for which the + * data should be returned. The display ID must a digital display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * dpData + * This parameter is of type NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * and specifies the Pre-emphasis/Drive Current/Postcursor2/TxPu information + * for a display port device. + * The command takes a NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS + * structure as the argument with the appropriate subDeviceInstance, displayId, + * and dpData. The fields of this structure are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA dpData; +} NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA (0x731351U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID" */ + +/* + * NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA + * + * This command is used to get the Pre-emphasis/Drive Current/PostCursor2/TxPu data. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the digital display for which the + * data should be returned. The display ID must a digital display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * The command takes a NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS + * structure as the argument with the appropriate subDeviceInstance, displayId, + * and dpData. The fields of this structure are described above. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID (0x52U) + +typedef struct NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NV0073_CTRL_DP_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA dpData; +} NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA (0x731352U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS_MESSAGE_ID" */ + + + +/* + * NV0073_CTRL_CMD_DP_MAIN_LINK_CTRL + * + * This command is used to set various Main Link configurations for + * the specified displayId such as powering up/down Main Link. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * ctrl + * Here are the current defined fields: + * NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERDOWN + * This value will power down Main Link. + * NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERUP + * This value will power up Main Link. + * +*/ +#define NV0073_CTRL_CMD_DP_MAIN_LINK_CTRL (0x731356U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 ctrl; +} NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS; + +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE 0:0 +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERDOWN (0x00000000U) +#define NV0073_CTRL_DP_MAIN_LINK_CTRL_POWER_STATE_POWERUP (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_DP_GET_AUDIO_MUTESTREAM + * + * This command returns the current audio mute state on the main link of Display Port + * + * The command takes a NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId as inputs and returns the + * current mute status in mute field of the structure. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the audio stream + * state should be returned. The display ID must a DP display. + * If the display ID is invalid or if it is not a DP display, + * this call will return NV_ERR_INVALID_ARGUMENT. + * mute + * This parameter will return one of the following values: + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_DISABLE + * Audio mute is currently disabled. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_ENABLE + * Audio mute is currently enabled. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_AUTO + * Audio mute is automatically controlled by hardware. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_UNKNOWN + * Audio mute is currently in an unknown state. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV0073_CTRL_CMD_DP_GET_AUDIO_MUTESTREAM (0x731358U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID (0x58U) + +typedef struct NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mute; +} NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS; + +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_DISABLE (0x00000000U) +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_ENABLE (0x00000001U) +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_AUTO (0x00000002U) +#define NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_UNKNOWN (0x00000003U) + +/* + * NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM + * + * This command sets the current audio mute state on the main link of Display Port + * + * The command takes a NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS structure as the + * argument with the appropriate subDeviceInstance, displayId as inputs and whether to enable + * or disable mute in the parameter - mute. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the audio stream + * state should be returned. The display ID must a DP display. + * If the display ID is invalid or if it is not a DP display, + * this call will return NV_ERR_INVALID_ARGUMENT. + * mute + * This parameter is an input to this command. + * Here are the current defined values: + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_DISABLE + * Audio mute will be disabled. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_ENABLE + * Audio mute will be enabled. + * NV0073_CTRL_DP_AUDIO_MUTESTREAM_MUTE_AUTO + * Audio mute will be automatically controlled by hardware. + * + * Note: Any other value for mute in NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS is not allowed and + * the API will return an error. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID (0x59U) + +typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mute; +} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_ASSR_CTRL + * + * This command is used to control and query DisplayPort ASSR + * settings for the specified displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd + * This input parameter specifies the command to execute. Legal + * values for this parameter include: + * NV0073_CTRL_DP_ASSR_CMD_QUERY_STATE + * This field can be used to query ASSR state. When used the ASSR + * state value is returned in the data parameter. + * NV0073_CTRL_DP_ASSR_CMD_DISABLE + * This field can be used to control the ASSR disable state. + * NV0073_CTRL_DP_ASSR_CMD_FORCE_STATE + * This field can be used to control ASSR State without looking at + * whether the display supports it. Used in conjunction with + * Fake link training. Note that this updates the state on the + * source side only. The sink is assumed to be configured for ASSR + * by the client (DD). + * data + * This parameter specifies the data associated with the cmd + * parameter. + * NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED + * This field indicates the state of ASSR when queried using cmd + * parameter. When used to control the State, it indicates whether + * ASSR should be enabled or disabled. + * NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_NO + * When queried this flag indicates that ASSR is not enabled on the sink. + * When used as the data for CMD_FORCE_STATE, it requests ASSR to + * to be disabled on the source side. + * NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_YES + * When queried this flag indicates that ASSR is not enabled on the sink. + * When used as the data for CMD_FORCE_STATE, it requests ASSR to + * to be enabled on the source side. + * err + * This output parameter specifies any errors associated with the cmd + * parameter. + * NV0073_CTRL_DP_ASSR_ERR_CAP + * This field indicates the error pertaining to ASSR capability of + * the sink device. + * NV0073_CTRL_DP_ASSR_ERR_CAP_NOERR + * This flag indicates there is no error. + * NV0073_CTRL_DP_ASSR_ERR_CAP_ERR + * This flag indicates that the sink is not ASSR capable. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_DP_ASSR_CTRL (0x73135aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_ASSR_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_ASSR_CTRL_PARAMS_MESSAGE_ID (0x5AU) + +typedef struct NV0073_CTRL_DP_ASSR_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 data; + NvU32 err; +} NV0073_CTRL_DP_ASSR_CTRL_PARAMS; + +#define NV0073_CTRL_DP_ASSR_CMD 31:0 +#define NV0073_CTRL_DP_ASSR_CMD_QUERY_STATE (0x00000001U) +#define NV0073_CTRL_DP_ASSR_CMD_DISABLE (0x00000002U) +#define NV0073_CTRL_DP_ASSR_CMD_FORCE_STATE (0x00000003U) +#define NV0073_CTRL_DP_ASSR_CMD_ENABLE (0x00000004U) +#define NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED 0:0 +#define NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_NO (0x00000000U) +#define NV0073_CTRL_DP_ASSR_DATA_STATE_ENABLED_YES (0x00000001U) +#define NV0073_CTRL_DP_ASSR_ERR_CAP 0:0 +#define NV0073_CTRL_DP_ASSR_ERR_CAP_NOERR (0x00000000U) +#define NV0073_CTRL_DP_ASSR_ERR_CAP_ERR (0x00000001U) + +/* + * NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID + * + * This command is used to assign a displayId from the free pool + * to a specific AUX Address in a DP 1.2 topology. The topology + * is uniquely identified by the DisplayId of the DP connector. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This is the DisplayId of the DP connector to which the topology + * is rooted. + * preferredDisplayId + * Client can sent a preferredDisplayID which RM can use during allocation + * if available. If this Id is a part of allDisplayMask in RM then we return + * a free available Id to the client. However, if this is set to + * NV0073_CTRL_CMD_DP_INVALID_PREFERRED_DISPLAY_ID then we return allDisplayMask value. + * useBFM + * Set to true if DP-BFM is used during emulation/RTL Sim. + * + * [out] displayIdAssigned + * This is the out field that will receive the new displayId. If the + * function fails this is guaranteed to be 0. + * [out] allDisplayMask + * This is allDisplayMask RM variable which is returned only when + * preferredDisplayId is set to NV0073_CTRL_CMD_DP_INVALID_PREFERRED_DISPLAY_ID + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */ + +/* + * There cannot be more than 128 devices in a topology (also by DP 1.2 specification) + * NOTE: Temporarily lowered to pass XAPI RM tests. Should be reevaluated! + */ +#define NV0073_CTRL_CMD_DP_MAX_TOPOLOGY_NODES 120U +#define NV0073_CTRL_CMD_DP_INVALID_PREFERRED_DISPLAY_ID 0xffffffffU + +#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 preferredDisplayId; + + NvBool force; + NvBool useBFM; + + NvU32 displayIdAssigned; + NvU32 allDisplayMask; +} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID + * + * This command is used to return a multistream displayid to the unused pool. + * You must not call this function while either the ARM or ASSEMBLY state cache + * refers to this display-id. The head must not be attached. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This is the displayId to free. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + * + */ +#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID (0x5CU) + +typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DP_GET_LINK_CONFIG + * + * This command is used to query DisplayPort link config + * settings on the transmitter side. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the DP display which owns + * the Main Link to be queried. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * laneCount + * Number of lanes the DP transmitter hardware is set up to drive. + * linkBW + * The BW of each lane that the DP transmitter hardware is set up to drive. + * The values returned will be according to the DP specifications. + * + */ +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG (0x731360U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 laneCount; + NvU32 linkBW; +} NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT 3:0 +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_0 (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_2 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_4 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW 3:0 +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_1_62GBPS (0x00000006U) +#define NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LINK_BW_2_70GBPS (0x0000000aU) + +/* + * NV0073_CTRL_CMD_DP_GET_EDP_DATA + * + * This command is used to query Embedded DisplayPort information. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the eDP display which owns + * the Main Link to be queried. + * If more than one displayId bit is set or the displayId is not a eDP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * data + * This output parameter specifies the data associated with the eDP display. + * It is only valid if this function returns NV_OK. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_PANEL_POWER + * This field indicates the state of the eDP panel power. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_PANEL_POWER_OFF + * This eDP panel is powered off. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_PANEL_POWER_ON + * This eDP panel is powered on. + * NV0073_CTRL_CMD_DP_GET_EDP_DATA_DPCD_POWER_OFF + * This field tells the client if DPCD power off command + * should be used for the current eDP panel. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_ENABLE + * This eDP panel can use DPCD to power off the panel. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_DISABLE + * This eDP panel cannot use DPCD to power off the panel. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_SET_POWER + * This field tells the client current eDP panel DPCD SET_POWER (0x600) status + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_SET_POWER_D0 + * This eDP panel is current up and in full power mode. + * NV0073_CTRL_DP_GET_EDP_DATA_DPCD_SET_POWER_D3 + * This eDP panel is current standby. + */ +#define NV0073_CTRL_CMD_DP_GET_EDP_DATA (0x731361U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_EDP_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GET_EDP_DATA_PARAMS_MESSAGE_ID (0x61U) + +typedef struct NV0073_CTRL_DP_GET_EDP_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 data; +} NV0073_CTRL_DP_GET_EDP_DATA_PARAMS; + +#define NV0073_CTRL_DP_GET_EDP_DATA_PANEL_POWER 0:0 +#define NV0073_CTRL_DP_GET_EDP_DATA_PANEL_POWER_OFF (0x00000000U) +#define NV0073_CTRL_DP_GET_EDP_DATA_PANEL_POWER_ON (0x00000001U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF 1:1 +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_ENABLE (0x00000000U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_OFF_DISABLE (0x00000001U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_STATE 2:2 +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_STATE_D0 (0x00000000U) +#define NV0073_CTRL_DP_GET_EDP_DATA_DPCD_POWER_STATE_D3 (0x00000001U) +/* + * NV0073_CTRL_CMD_DP_CONFIG_STREAM + * + * This command sets various multi/single stream related params for + * for a given head. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * Head + * Specifies the head index for the stream. + * sorIndex + * Specifies the SOR index for the stream. + * dpLink + * Specifies the DP link: either 0 or 1 (A , B) + * bEnableOverride + * Specifies whether we're manually configuring this stream. + * If not set, none of the remaining settings have any effect. + * bMST + * Specifies whether in Multistream or Singlestream mode. + * MST/SST + * Structures for passing in either Multistream or Singlestream params + * slotStart + * Specifies the start value of the timeslot + * slotEnd + * Specifies the end value of the timeslot + * PBN + * Specifies the PBN for the timeslot. + * minHBlank + * Specifies the min HBlank + * minVBlank + * Specifies the min VBlank + * sendACT -- deprecated. A new control call has been added. + * Specifies whether ACT has to be sent or not. + * tuSize + * Specifies TU size value + * watermark + * Specifies stream watermark. + * linkClkFreqHz -- moving to MvidWarParams. Use that instead. + * Specifies the link freq in Hz. Note that this is the byte clock. + * eg: = (5.4 Ghz / 10) + * actualPclkHz; -- moving to MvidWarParams. Use that instead. + * Specifies the actual pclk freq in Hz. + * mvidWarEnabled + * Specifies whether MVID WAR is enabled. + * MvidWarParams + * Is valid if mvidWarEnabled is true. + * bEnableTwoHeadOneOr + * Whether two head one OR is enabled. If this is set then RM will + * replicate SF settings of Master head on Slave head. Head index + * passed should be of Master Head. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC: when this command has already been called + * + */ +#define NV0073_CTRL_CMD_DP_CONFIG_STREAM (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID (0x62U) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 dpLink; + + NvBool bEnableOverride; + NvBool bMST; + NvU32 singleHeadMultistreamMode; + NvU32 hBlankSym; + NvU32 vBlankSym; + NvU32 colorFormat; + NvBool bEnableTwoHeadOneOr; + + struct { + NvU32 slotStart; + NvU32 slotEnd; + NvU32 PBN; + NvU32 Timeslice; + NvBool sendACT; // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT + NvU32 singleHeadMSTPipeline; + NvBool bEnableAudioOverRightPanel; + } MST; + + struct { + NvBool bEnhancedFraming; + NvU32 tuSize; + NvU32 waterMark; + NvU32 actualPclkHz; // deprecated -Use MvidWarParams + NvU32 linkClkFreqHz; // deprecated -Use MvidWarParams + NvBool bEnableAudioOverRightPanel; + struct { + NvU32 activeCnt; + NvU32 activeFrac; + NvU32 activePolarity; + NvBool mvidWarEnabled; + struct { + NvU32 actualPclkHz; + NvU32 linkClkFreqHz; + } MvidWarParams; + } Legacy; + } SST; +} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_SET_RATE_GOV + * + * This command enables rate governing for a MST. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * Head + * Specifies the head index for the stream. + * sorIndex + * Specifies the SOR index for the stream. + * flags + * Specifies Rate Governing, trigger type and wait on trigger and operation type. + * + * _FLAGS_OPERATION: whether this control call should program or check for status of previous operation. + * + * _FLAGS_STATUS: Out only. Caller should check the status for _FLAGS_OPERATION_CHECK_STATUS through + * this bit. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC: when this command has already been called + * + */ +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV (0x731363U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS_MESSAGE_ID (0x63U) + +typedef struct NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 flags; +} NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS; + +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_ENABLE_RG 0:0 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_ENABLE_RG_OFF (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_ENABLE_RG_ON (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_TRIGGER_MODE 1:1 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_TRIGGER_MODE_LOADV (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_TRIGGER_MODE_IMMEDIATE (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_WAIT_TRIGGER 2:2 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_WAIT_TRIGGER_OFF (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_WAIT_TRIGGER_ON (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_OPERATION 3:3 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_OPERATION_PROGRAM (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_OPERATION_CHECK_STATUS (0x00000001U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_STATUS 31:31 +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_STATUS_FAIL (0x00000000U) +#define NV0073_CTRL_CMD_DP_SET_RATE_GOV_FLAGS_STATUS_PASS (0x00000001U) + +/* + * NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT + * + * This call is used by the displayport library. Once + * all of the platforms have ported, this call will be + * deprecated and made the default behavior. + * + * Disables automatic watermark programming + * Disables automatic DP IRQ handling (CP IRQ) + * Disables automatic retry on defers + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID (0x65U) + +typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS { + NvU32 subDeviceInstance; +} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_SET_ECF + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * sorIndex + * This parameter specifies the Index of sor for which ecf + * should be updated. + * ecf + * This parameter has the ECF bit mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_DP_SET_ECF (0x731366U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_ECF_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_ECF_PARAMS_MESSAGE_ID (0x66U) + +typedef struct NV0073_CTRL_CMD_DP_SET_ECF_PARAMS { + NvU32 subDeviceInstance; + NvU32 sorIndex; + NV_DECLARE_ALIGNED(NvU64 ecf, 8); + NvBool bForceClearEcf; + NvBool bAddStreamBack; +} NV0073_CTRL_CMD_DP_SET_ECF_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_SEND_ACT + * + * This command sends ACT. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * Specifies the root port displayId for which the trigger has to be done. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC: when this command has already been called + * + */ +#define NV0073_CTRL_CMD_DP_SEND_ACT (0x731367U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS_MESSAGE_ID (0x67U) + +typedef struct NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DP_GET_CAPS + * + * This command returns the following info + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * sorIndex + * Specifies the SOR index. + * bIsDp12Supported + * Returns NV_TRUE if DP1.2 is supported by the GPU else NV_FALSE + * bIsMultistreamSupported + * Returns NV_TRUE if MST is supported by the GPU else NV_FALSE + * bIsSCEnabled + * Returns NV_TRUE if Stream cloning is supported by the GPU else NV_FALSE + * maxLinkRate + * Returns Maximum allowed orclk for DP mode of SOR + * 1 signifies 5.40(HBR2), 2 signifies 2.70(HBR), 3 signifies 1.62(RBR) + * bHasIncreasedWatermarkLimits + * Returns NV_TRUE if the GPU uses higher watermark limits, else NV_FALSE + * bIsPC2Disabled + * Returns NV_TRUE if VBIOS flag to disable PostCursor2 is set, else NV_FALSE + * bFECSupported + * Returns NV_TRUE if GPU supports FEC, else NV_FALSE + * bIsTrainPhyRepeater + * Returns NV_TRUE if LTTPR Link Training feature is set + * bOverrideLinkBw + * Returns NV_TRUE if DFP limits defined in DCB have to be honored, else NV_FALSE + * + * DSC caps - + * bDscSupported + * If GPU supports DSC or not + * + * encoderColorFormatMask + * Mask of all color formats for which DSC + * encoding is supported by GPU + * + * lineBufferSizeKB + * Size of line buffer. + * + * rateBufferSizeKB + * Size of rate buffer per slice. + * + * bitsPerPixelPrecision + * Bits per pixel precision for DSC e.g. 1/16, 1/8, 1/4, 1/2, 1bpp + * + * maxNumHztSlices + * Maximum number of horizontal slices supported by DSC encoder + * + * lineBufferBitDepth + * Bit depth used by the GPU to store the reconstructed pixels within + * the line buffer + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DP_GET_CAPS (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 sorIndex; + NvU32 maxLinkRate; + NvBool bIsDp12Supported; + NvBool bIsDp14Supported; + NvBool bIsMultistreamSupported; + NvBool bIsSCEnabled; + NvBool bHasIncreasedWatermarkLimits; + NvBool bIsPC2Disabled; + NvBool isSingleHeadMSTSupported; + NvBool bFECSupported; + NvBool bIsTrainPhyRepeater; + NvBool bOverrideLinkBw; + + struct { + NvBool bDscSupported; + NvU32 encoderColorFormatMask; + NvU32 lineBufferSizeKB; + NvU32 rateBufferSizeKB; + NvU32 bitsPerPixelPrecision; + NvU32 maxNumHztSlices; + NvU32 lineBufferBitDepth; + } DSC; +} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE 2:0 +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE (0x00000000U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10 (0x00000004U) + +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U) + +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16 (0x00000001U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8 (0x00000002U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4 (0x00000003U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2 (0x00000004U) +#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1 (0x00000005U) + +/* + * NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES + * + * This command returns the following info + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * should be for DP only + * bEnableMSA + * To enable or disable MSA + * bStereoPhaseInverse + * To enable or disable Stereo Phase Inverse value + * bCacheMsaOverrideForNextModeset + * Cache the values and don't apply them until next modeset + * featureMask + * Enable/Disable mask of individual MSA property + * featureValues + * MSA property value to write + * pFeatureDebugValues + * It will actual MSA property value being written on HW. + * If its NULL then no error but return nothing + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_TIMEOUT + * + */ +#define NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES (0x73136aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_MSA_PROPERTIES_SYNC_POLARITY_LOW (0U) +#define NV0073_CTRL_CMD_DP_MSA_PROPERTIES_SYNC_POLARITY_HIGH (1U) + +typedef struct NV0073_CTRL_DP_MSA_PROPERTIES_MASK { + NvU8 miscMask[2]; + NvBool bRasterTotalHorizontal; + NvBool bRasterTotalVertical; + NvBool bActiveStartHorizontal; + NvBool bActiveStartVertical; + NvBool bSurfaceTotalHorizontal; + NvBool bSurfaceTotalVertical; + NvBool bSyncWidthHorizontal; + NvBool bSyncPolarityHorizontal; + NvBool bSyncHeightVertical; + NvBool bSyncPolarityVertical; + NvBool bReservedEnable[3]; +} NV0073_CTRL_DP_MSA_PROPERTIES_MASK; + +typedef struct NV0073_CTRL_DP_MSA_PROPERTIES_VALUES { + NvU8 misc[2]; + NvU16 rasterTotalHorizontal; + NvU16 rasterTotalVertical; + NvU16 activeStartHorizontal; + NvU16 activeStartVertical; + NvU16 surfaceTotalHorizontal; + NvU16 surfaceTotalVertical; + NvU16 syncWidthHorizontal; + NvU16 syncPolarityHorizontal; + NvU16 syncHeightVertical; + NvU16 syncPolarityVertical; + NvU8 reserved[3]; +} NV0073_CTRL_DP_MSA_PROPERTIES_VALUES; + +#define NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS_MESSAGE_ID (0x6AU) + +typedef struct NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnableMSA; + NvBool bStereoPhaseInverse; + NvBool bCacheMsaOverrideForNextModeset; + NV0073_CTRL_DP_MSA_PROPERTIES_MASK featureMask; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES featureValues; + NV_DECLARE_ALIGNED(struct NV0073_CTRL_DP_MSA_PROPERTIES_VALUES *pFeatureDebugValues, 8); +} NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT + * + * This command can be used to invoke a fake interrupt for the operation of DP1.2 branch device + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * interruptType + * This parameter specifies the type of fake interrupt to be invoked. Possible values are: + * 0 => IRQ + * 1 => HPDPlug + * 2 => HPDUnPlug + * displayId + * should be for DP only + * + */ + +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT (0x73136bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS_MESSAGE_ID (0x6BU) + +typedef struct NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 interruptType; +} NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS; + +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_IRQ (0x00000000U) +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PLUG (0x00000001U) +#define NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_UNPLUG (0x00000002U) + +/* + * NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG + * + * This command sets the MS displayId lit up by driver for further use of VBIOS + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * should be for DP only + * activeDevAddr + * Active MS panel address + * sorIndex + * SOR Index + * dpLink + * DP Sub Link Index + * hopCount + * Maximum hopcounts in MS address + * dpMsDevAddrState + * DP Multistream Device Address State. The values can be + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_TIMEOUT + * + */ +#define NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG (0x73136cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS_MESSAGE_ID (0x6CU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 activeDevAddr; + NvU32 sorIndex; + NvU32 dpLink; + NvU32 hopCount; + NvU32 dpMsDevAddrState; +} NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS; + + + +/* +* NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT +* +* This command configures a new bit, NV_PDISP_SF_DP_LINKCTL_TRIGGER_SELECT +* to indicate which pipeline will handle the +* time slots allocation in single head MST mode +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior +* Head +* Specifies the head index for the stream +* sorIndex +* Specifies the SOR index for the stream +* streamIndex +* Stream Identifier +* +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_GENERIC: when this command has already been called +* +*/ +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT (0x73136fU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS_MESSAGE_ID (0x6FU) + +typedef struct NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 sorIndex; + NvU32 singleHeadMSTPipeline; +} NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS; + +/* +* NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM +* +* This call is used by the displayport library.& clients of RM +* Its main function is to configure single Head Multi stream mode + * this call configures internal RM datastructures to support required mode. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior. +* +* displayIDs +* This parameter specifies array of DP displayIds to be configured which are driven out from a single head. +* +* numStreams +* This parameter specifies number of streams driven from a single head +* ex: for 2SST & 2MST its value is 2. +* +* mode +* This parameter specifies single head multi stream mode to be configured. +* +* bSetConfigure +* This parameter configures single head multistream mode +* if TRUE it sets SST or MST based on 'mode' parameter and updates internal driver data structures with the given information. +* if FALSE clears the configuration of single head multi stream mode. +* +* vbiosPrimaryDispIdIndex +* This parameter specifies vbios master displayID index in displayIDs input array. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +* +*/ +#define NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM (0x73136eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MAX_STREAMS (0x00000002U) +#define NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS_MESSAGE_ID (0x6EU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayIDs[NV0073_CTRL_CMD_DP_SINGLE_HEAD_MAX_STREAMS]; + NvU32 numStreams; + NvU32 mode; + NvBool bSetConfig; + NvU8 vbiosPrimaryDispIdIndex; +} NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS; + +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_NONE (0x00000000U) +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST (0x00000001U) +#define NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST (0x00000002U) + +/* +* NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL +* +* This command configures a new bit, NV_PDISP_SF_DP_LINKCTL_TRIGGER_ALL +* to indicate which if all the pipelines to take affect on ACT (sorFlushUpdates) +* in single head MST mode +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. This parameter must specify a value between zero and the +* total number of subdevices within the parent device. This parameter +* should be set to zero for default behavior +* Head +* Specifies the head index for the stream +* sorIndex +* Specifies the SOR index for the stream +* streamIndex +* Stream Identifier +* +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_GENERIC: when this command has already been called +* +*/ +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL (0x731370U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS_MESSAGE_ID (0x70U) + +typedef struct NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool enable; +} NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS; + + + +/* +* NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA +* +* This command collects the DP AUX log from the RM aux buffer and +* sends it to the application. +* +* dpAuxBufferReadSize +* Specifies the number of logs to be read from the +* AUX buffer in RM +* dpNumMessagesRead +* Specifies the number of logs read from the AUX buffer +* dpAuxBuffer +* The local buffer to copy the specified number of logs +* from RM to user application +* +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_GENERIC: when this command has already been called +* +* +*DPAUXPACKET - This structure holds the log information +* auxPacket - carries the hex dump of the message transaction +* auxEvents - Contains the information as in what request and reply type where +* auxRequestTimeStamp - Request timestamp +* auxMessageReqSize - Request Message size +* auxMessageReplySize - Reply message size(how much information was actually send by receiver) +* auxOutPort - DP port number +* auxPortAddress - Address to which data was requested to be read or written +* auxReplyTimeStamp - Reply timestamp +* auxCount - Serial number to keep track of transactions +*/ + +/*Maximum dp messages size is 16 as per the protocol*/ +#define DP_MAX_MSG_SIZE 16U +#define MAX_LOGS_PER_POLL 50U + +/* Various kinds of DP Aux transactions */ +#define NV_DP_AUXLOGGER_REQUEST_TYPE 3:0 +#define NV_DP_AUXLOGGER_REQUEST_TYPE_NULL 0x00000000U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_I2CWR 0x00000001U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_I2CREQWSTAT 0x00000002U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_MOTWR 0x00000003U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_MOTREQWSTAT 0x00000004U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_AUXWR 0x00000005U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_I2CRD 0x00000006U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_MOTRD 0x00000007U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_AUXRD 0x00000008U +#define NV_DP_AUXLOGGER_REQUEST_TYPE_UNKNOWN 0x00000009U + +#define NV_DP_AUXLOGGER_REPLY_TYPE 7:4 +#define NV_DP_AUXLOGGER_REPLY_TYPE_NULL 0x00000000U +#define NV_DP_AUXLOGGER_REPLY_TYPE_SB_ACK 0x00000001U +#define NV_DP_AUXLOGGER_REPLY_TYPE_RETRY 0x00000002U +#define NV_DP_AUXLOGGER_REPLY_TYPE_TIMEOUT 0x00000003U +#define NV_DP_AUXLOGGER_REPLY_TYPE_DEFER 0x00000004U +#define NV_DP_AUXLOGGER_REPLY_TYPE_DEFER_TO 0x00000005U +#define NV_DP_AUXLOGGER_REPLY_TYPE_ACK 0x00000006U +#define NV_DP_AUXLOGGER_REPLY_TYPE_ERROR 0x00000007U +#define NV_DP_AUXLOGGER_REPLY_TYPE_UNKNOWN 0x00000008U + +#define NV_DP_AUXLOGGER_EVENT_TYPE 9:8 +#define NV_DP_AUXLOGGER_EVENT_TYPE_AUX 0x00000000U +#define NV_DP_AUXLOGGER_EVENT_TYPE_HOT_PLUG 0x00000001U +#define NV_DP_AUXLOGGER_EVENT_TYPE_HOT_UNPLUG 0x00000002U +#define NV_DP_AUXLOGGER_EVENT_TYPE_IRQ 0x00000003U + +#define NV_DP_AUXLOGGER_AUXCTL_CMD 15:12 +#define NV_DP_AUXLOGGER_AUXCTL_CMD_INIT 0x00000000U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_I2CWR 0x00000000U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_I2CRD 0x00000001U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_I2CREQWSTAT 0x00000002U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_MOTWR 0x00000004U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_MOTRD 0x00000005U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_MOTREQWSTAT 0x00000006U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_AUXWR 0x00000008U +#define NV_DP_AUXLOGGER_AUXCTL_CMD_AUXRD 0x00000009U + + +typedef struct DPAUXPACKET { + NvU32 auxEvents; + NvU32 auxRequestTimeStamp; + NvU32 auxMessageReqSize; + NvU32 auxMessageReplySize; + NvU32 auxOutPort; + NvU32 auxPortAddress; + NvU32 auxReplyTimeStamp; + NvU32 auxCount; + NvU8 auxPacket[DP_MAX_MSG_SIZE]; +} DPAUXPACKET; +typedef struct DPAUXPACKET *PDPAUXPACKET; + +#define NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA (0x731373U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS { + //In + NvU32 subDeviceInstance; + NvU32 dpAuxBufferReadSize; + + //Out + NvU32 dpNumMessagesRead; + DPAUXPACKET dpAuxBuffer[MAX_LOGS_PER_POLL]; +} NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS; + + + + +/* NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES + * + * This setup link rate table for target display to enable indexed link rate + * and export valid link rates back to client. Client may pass empty table to + * reset previous setting. + * + * subDeviceInstance + * client will give a subdevice to get right pGpu/pDisp for it + * displayId + * DisplayId of the display for which the client targets + * linkRateTbl + * Link rates in 200KHz as native granularity from eDP 1.4 + * linkBwTbl + * Link rates in 270MHz and valid for client to apply to + * linkBwCount + * Total valid link rates + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES 8U + +#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID (0x77U) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS { + // In + NvU32 subDeviceInstance; + NvU32 displayId; + NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + + // Out + NvU8 linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES]; + NvU8 linkBwCount; +} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS; + + +/* + * NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES + * + * This command is used to not depend on supervisor interrupts for setting the + * stereo msa params. We will not cache the values and can toggle stereo using + * this ctrl call on demand. Note that this control call will only change stereo + * settings and will leave other settings as is. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * should be for DP only + * bEnableMSA + * To enable or disable MSA + * bStereoPhaseInverse + * To enable or disable Stereo Phase Inverse value + * featureMask + * Enable/Disable mask of individual MSA property. + * featureValues + * MSA property value to write + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_TIMEOUT + * + */ +#define NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES (0x731378U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS_MESSAGE_ID (0x78U) + +typedef struct NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnableMSA; + NvBool bStereoPhaseInverse; + NV0073_CTRL_DP_MSA_PROPERTIES_MASK featureMask; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES featureValues; +} NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS; + + + +/* + * NV0073_CTRL_CMD_DP_CONFIGURE_FEC + * + * This command is used to enable/disable FEC on DP Mainlink. + * FEC is a prerequisite to DSC. This should be called only + * after LT completes (including PostLT LQA) while enabling. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * Can only be 1 and must be DP. + * + * bEnableFec + * To enable or disable FEC + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV0073_CTRL_CMD_DP_CONFIGURE_FEC (0x73137aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS_MESSAGE_ID (0x7AU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bEnableFec; +} NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior + * cmd + * This parameter is an input to this command. + * Here are the current defined fields: + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER + * Set to specify what operation to run. + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_UP + * Request to power up pad. + * NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_DOWN + * Request to power down the pad. + * linkBw + * This parameter is used to pass in the link bandwidth required to run the + * power up sequence. Refer enum DP_LINK_BANDWIDTH for valid values. + * laneCount + * This parameter is used to pass the lanecount. + * sorIndex + * This parameter is used to pass the SOR index. + * sublinkIndex + * This parameter is used to pass the sublink index. Please refer + * enum DFPLINKINDEX for valid values + * priPadLinkIndex + * This parameter is used to pass the padlink index for primary link. + * Please refer enum DFPPADLINK for valid index values for Link A~F. + * secPadLinkIndex + * This parameter is used to pass the padlink index for secondary link. + * For Single SST pass in NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PADLINK_INDEX_INVALID + * bEnableSpread + * This parameter is boolean value used to indicate if spread is to be enabled or disabled. + */ + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD (0x73137bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS_MESSAGE_ID (0x7BU) + +typedef struct NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS { + NvU32 subDeviceInstance; + NvU32 cmd; + NvU32 linkBw; + NvU32 laneCount; + NvU32 sorIndex; + NvU32 sublinkIndex; // sublink A/B + NvU32 priPadLinkIndex; // padlink A/B/C/D/E/F + NvU32 secPadLinkIndex; // padlink A/B/C/D/E/F for secondary link in DualSST case. + NvBool bEnableSpread; +} NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS; + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER 0:0 +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_UP (0x00000000U) +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_CMD_POWER_DOWN (0x00000001U) + +#define NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PADLINK_INDEX_INVALID (0x000000FFU) + +/* + * NV0073_CTRL_CMD_DP_AUXCH_CTRL + * + * This command can be used to perform the I2C Bulk transfer over + * DP Aux channel. This is the display port specific implementation + * for sending bulk data over the DpAux channel, by splitting up the + * data into pieces and retrying for pieces that aren't ACK'd. + * + * subDeviceInstance [IN] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId [IN] + * This parameter specifies the ID of the display for which the dfp + * caps should be returned. The display ID must a dfp display. + * If more than one displayId bit is set or the displayId is not a dfp, + * this call will return NV_ERR_INVALID_ARGUMENT. + * addr [IN] + * This parameter is an input to this command. The addr parameter follows + * Section 2.4 in DisplayPort spec and the client should refer to the valid + * address in DisplayPort spec. Only the first 20 bits are valid. + * bWrite [IN] + * This parameter specifies whether the command is a I2C write (NV_TRUE) or + * a I2C read (NV_FALSE). + * data [IN/OUT] + * In the case of a read transaction, this parameter returns the data from + * transaction request. In the case of a write transaction, the client + * should write to this buffer for the data to send. + * size [IN/OUT] + * Specifies how many data bytes to read/write depending on the + * transaction type. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_AUXCH_I2C_TRANSFER_CTRL (0x73137cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE 256U + +#define NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS_MESSAGE_ID (0x7CU) + +typedef struct NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 addr; + NvBool bWrite; + NvU8 data[NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE]; + NvU32 size; +} NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS; + +/* + * NV0073_CTRL_CMD_DP_ENABLE_VRR + * + * The command is used to enable VRR. + * + * subDeviceInstance [IN] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior + * displayId [IN] + * This parameter is an input to this command, specifies the ID of the display + * for client targeted to. + * The display ID must a DP display. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd [IN] + * This parameter is an input to this command. + * + * _STAGE: specifies the stage id to execute in the VRR enablement sequence. + * _MONITOR_ENABLE_BEGIN: Send command to the monitor to start monitor + * enablement procedure. + * _MONITOR_ENABLE_CHALLENGE: Send challenge to the monitor + * _MONITOR_ENABLE_CHECK: Read digest from the monitor, and verify + * if the result is valid. + * _DRIVER_ENABLE_BEGIN: Send command to the monitor to start driver + * enablement procedure. + * _DRIVER_ENABLE_CHALLENGE: Read challenge from the monitor and write back + * corresponding digest. + * _DRIVER_ENABLE_CHECK: Check if monitor enablement worked. + * _RESET_MONITOR: Set the FW state m/c to a known state. + * _INIT_PUBLIC_INFO: Send command to the monitor to prepare public info. + * _GET_PUBLIC_INFO: Read public info from the monitor. + * _STATUS_CHECK: Check if monitor is ready for next command. + * result [OUT] + * This is an output parameter to reflect the result of the operation. + */ +#define NV0073_CTRL_CMD_DP_ENABLE_VRR (0x73137dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS_MESSAGE_ID (0x7DU) + +typedef struct NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 cmd; + NvU32 result; +} NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS; + +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE 3:0 +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK (0x00000002U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN (0x00000003U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE (0x00000004U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK (0x00000005U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR (0x00000006U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO (0x00000007U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO (0x00000008U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK (0x00000009U) + +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK (0x00000000U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING (0x80000001U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR (0x80000002U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR (0x80000003U) +#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR (0x80000004U) + +/* + * NV0073_CTRL_CMD_DP_GET_GENERIC_INFOFRAME + * + * This command is used to capture the display output packets for DP protocol. + * Common supported packets are Dynamic Range and mastering infoframe SDP for HDR, + * VSC SDP for colorimetry and pixel encoding info. + * + * displayID (in) + * This parameter specifies the displayID for the display resource to configure. + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the NV04_DISPLAY_COMMON + * parent device to which the operation should be directed. + * infoframeIndex (in) + * HW provides support to program 2 generic infoframes per frame for DP. + * This parameter indicates which infoframe packet is to be captured. + * Possible flags are as follows: + * NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE + * This flag indicates the INFOFRAME that needs to be read. + * Set to _INFOFRAME0 if RM should read GENERIC_INFOFRAME + * Set to _INFOFRAME1 if RM should read GENERIC_INFOFRAME1 + * packet (out) + * pPacket points to the memory for reading the infoframe packet. + * bTransmitControl (out) + * This gives the transmit mode of infoframes. + * If set, means infoframe will be sent as soon as possible and then on + * every frame during vblank. + * If cleared, means the infoframe will be sent once as soon as possible. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_GET_GENERIC_INFOFRAME (0x73137eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_GENERIC_INFOFRAME_MAX_PACKET_SIZE 36U + +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 infoframeIndex; + NvU8 packet[NV0073_CTRL_DP_GENERIC_INFOFRAME_MAX_PACKET_SIZE]; + NvBool bTransmitControl; +} NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS; + + +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE 0:0 +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE_INFOFRAME0 (0x0000000U) +#define NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_CAPTURE_MODE_INFOFRAME1 (0x0000001U) + + +/* + * NV0073_CTRL_CMD_DP_GET_MSA_ATTRIBUTES + * + * This command is used to capture the various data attributes sent in the MSA for DP protocol. + * Refer table 2-94 'MSA Data Fields' in DP1.4a spec document for MSA data field description. + * + * displayID (in) + * This parameter specifies the displayID for the display resource to configure. + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the NV04_DISPLAY_COMMON + * parent device to which the operation should be directed. + * mvid, nvid (out) + * Video timestamp used by DP sink for regenerating pixel clock. + * misc0, misc1 (out) + * Miscellaneous MSA attributes. + * hTotal, vTotal (out) + * Htotal measured in pixel count and vtotal measured in line count. + * hActiveStart, vActiveStart (out) + * Active start measured from start of leading edge of the sync pulse. + * hActiveWidth, vActiveWidth (out) + * Active video width and height. + * hSyncWidth, vSyncWidth (out) + * Width of sync pulse. + * hSyncPolarity, vSyncPolarity (out) + * Polarity of sync pulse. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_GET_MSA_ATTRIBUTES (0x73137fU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_DP_MSA_MAX_DATA_SIZE 7U + +#define NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS_MESSAGE_ID (0x7FU) + +typedef struct NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 mvid; + NvU32 nvid; + NvU8 misc0; + NvU8 misc1; + NvU16 hTotal; + NvU16 vTotal; + NvU16 hActiveStart; + NvU16 vActiveStart; + NvU16 hActiveWidth; + NvU16 vActiveWidth; + NvU16 hSyncWidth; + NvU16 vSyncWidth; + NvBool hSyncPolarity; + NvBool vSyncPolarity; +} NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS; + +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_MVID 23:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_NVID 23:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_MISC0 7:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_MISC1 15:8 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HTOTAL 15:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VTOTAL 31:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HACTIVE_START 15:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VACTIVE_START 31:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HACTIVE_WIDTH 15:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VACTIVE_WIDTH 31:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HSYNC_WIDTH 14:0 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_HSYNC_POLARITY 15:15 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VSYNC_WIDTH 30:16 +#define NV0073_CTRL_DP_MSA_ATTRIBUTES_VSYNC_POLARITY 31:31 + +/* + * NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL + * + * This command is used to query OD capability and status as well as + * control OD functionality of eDP LCD panels. + * + * subDeviceInstance [in] + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId [in] + * This parameter specifies the ID of the DP display which owns + * the Main Link to be adjusted. The display ID must a DP display + * as determined with the NV0073_CTRL_CMD_SPECIFIC_GET_TYPE command. + * If more than one displayId bit is set or the displayId is not a DP, + * this call will return NV_ERR_INVALID_ARGUMENT. + * cmd [in] + * This parameter is an input to this command. The cmd parameter tells + * whether we have to get the value of a specific field or set the + * value in case of a writeable field. + * control [in] + * This parameter is input by the user. It is used by the user to decide the control + * value to be written to change the Sink OD mode. The command to write is + * the NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET command. + * bOdCapable [out] + * This parameter reflects the OD capability of the Sink which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_OD_CAPABLE_QUERY command. + * bOdControlCapable [out] + * This parameter reflects the OD control capability of the Sink which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_CAPABLE_QUERY command. + * bOdStatus [out] + * This parameter reflects the Sink OD status which can be + * fetched by using the NV0073_CTRL_CMD_DP_AUXCH_OD_STATUS_QUERY command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL (0x731380U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 control; + NvU8 cmd; + NvBool bOdCapable; + NvBool bOdControlCapable; + NvBool bOdStatus; +} NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS; + +/* _ctrl0073dp_h_ */ + +/* valid commands */ +#define NV0073_CTRL_CMD_DP_AUXCHQUERY_OD_CAPABLE 0x00000000 +#define NV0073_CTRL_CMD_DP_AUXCHQUERY_OD_CTL_CAPABLE 0x00000001 +#define NV0073_CTRL_CMD_DP_AUXCHQUERY_OD_STATUS 0x00000002 +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET 0x00000003 + +/* valid state values */ +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET_AUTONOMOUS 0x00000000 +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET_DISABLE_OD 0x00000002 +#define NV0073_CTRL_CMD_DP_AUXCH_OD_CTL_SET_ENABLE_OD 0x00000003 diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h new file mode 100644 index 000000000..1ac3e6fec --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dpu.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073dpu.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h new file mode 100644 index 000000000..a2423a888 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073event.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073event.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h new file mode 100644 index 000000000..ce48ef905 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073internal.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073internal.finn +// + + + + +#include "ctrl/ctrl0073/ctrl0073base.h" +#include "ctrl/ctrl0073/ctrl0073system.h" + +#define NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE (0x730401U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_INTERNAL_INTERFACE_ID << 8) | NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_FINN_PARAMS_MESSAGE_ID" */ + + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_FINN_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_FINN_PARAMS { + NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS params; +} NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE_FINN_PARAMS; + + +/* ctrl0073internal_h */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h new file mode 100644 index 000000000..7ac6f6e3f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073psr.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073psr.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h new file mode 100644 index 000000000..c79409ab2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h @@ -0,0 +1,1793 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073specific.finn +// + + + + +#include "ctrl/ctrl0073/ctrl0073base.h" + +#include "ctrl/ctrlxxxx.h" +/* NV04_DISPLAY_COMMON display-specific control commands and parameters */ + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_TYPE + * + * This command can be used to determine the associated display type for + * the specified displayId. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the display + * type is to be returned. Only one display may be indicated in this + * parameter. + * displayType + * This parameter returns the display type associated with the specified + * displayId parameter. Valid displayType values are: + * NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT + * NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP + * NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_TV + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_TYPE (0x730240U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 displayType; +} NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS; + +/* valid display types */ +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_UNKNOWN (0x00000000U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT (0x00000001U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP (0x00000002U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_TV (0x00000003U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 + * + * This command can be used to request the EDID for the specified displayId. + * + * [in] subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the total + * number of subdevices within the parent device. This parameter should + * be set to zero for default behavior. + * [in] displayId + * This parameter specifies the display to read the EDID. The display ID + * must specify a display with a positive connect state as determined + * with the NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE command. Only one + * display may be indicated in this parameter. If a more than one + * display Id is used, the RM will return NV_ERR_INVALID_ARGUMENT. + * [out] bufferSize + * This parameter returns the number of bytes copied into edidBuffer after + * performing the requested EDID operations. + * [out] edidBuffer + * The array of EDIDs that RM will fill after the requested operations. If + * the size of the array is not large enough to hold the number of bytes to + * be copied, NV_ERR_INVALID_ARGUMENT will be returned. + * [in] flags + * This parameter defines the specific operations that will be performed + * in reading the EDID. + * Here are the current defined fields: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE + * A client uses this field to indicate whether to return the cached + * copy of the EDID or to use DDC to read the EDID from the display. + * Possible values are: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_NO + * The RM will use DDC to grab the EDID. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_YES + * The RM will copy the last EDID found into the clients + * buffer. No DDC will be performed. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE + * A client uses this field to indicate whether to read from + * the HW and return the EDID w/o any patching + * Possible values are: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_COOKED + * Use the _COPY_CACHE policy + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_RAW + * Perform the read and return an unadulterated EDID. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE + * A client uses this field to indicate whether to read EDID + * from SBIOS using ACPI sub function for display dynamic switching + * feature. This flag should only be set on internal display with + * dynamic switching feature enabled. + * Possible values are: + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_ACPI + * RM reads the EDID from SBIOS and returns the raw EDID provided + * by SBIOS. + * NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_DEFAULT + * EDID is read based on rest of the 'flags' that are passed to + * this function. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2 (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U + +#define NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID (0x45U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU32 flags; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS; + +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE 0:0 +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_NO 0x00000000U +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_YES 0x00000001U + +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE 1:1 +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_COOKED 0x00000000U +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_READ_MODE_RAW 0x00000001U + +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE 3:2 +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_DEFAULT 0x00000000U +#define NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_DISPMUX_READ_MODE_ACPI 0x00000001U + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2 + * + * This command can be used to set or remove a complete EDID for the + * specified displayId. Once the EDID is set, any requests + * to read the EDID or use DDC detection will always use a cached copy of + * the EDID. That is, the EDID becomes static until disabled by calling + * this same function with edidBuffer. Note, that DDC based + * detection will always pass for any displayId that has set an EDID. Also, + * this path will not store any value across reboots. If an EDID needs to + * remain set after a reboot, RM clients must call this function again. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the total + * number of subdevices within the parent device. This parameter should + * be set to zero for default behavior. + * displayId + * This parameter specifies the display to store or the EDID. Only one + * display may be indicated in this parameter. If more than one displayId + * is used, the RM will return NV_ERR_INVALID_ARGUMENT. + * If the displayId does not use DDC and hence would not have an EDID, + * then the RM could also return NV_ERR_INVALID_ARGUMENT. + * bufferSize + * This parameter specifies the size of the EDID buffer pointed to by + * pEdidBuffer. If the EDID write contains more bytes than bufferSize, + * the RM will extend the bufferSize of the EDID inside the RM to match. + * Note a bufferSize of 0 would mean no bytes will be copied, but set the + * current cached EDID as static. + * edidBuffer + * This parameter specifies the EDID buffer that the RM will copy into + * the RM buffer. If the EDID buffer is empty, the RM will remove any + * previous set EDID and allow further detection and EDID reads to use DDC. + * The RM will not check to see if the EDID is valid here or not. + * The client should validate the EDID if needed before calling this function. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2 (0x730246U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS_MESSAGE_ID (0x46U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE + * + * This Control Cmd is for providing the Faking device(s) support from RM. + * This command serves as the entry point for all interaction of RM with + * user mode component of the any internal [test] tool. The Faking framework + * in RM will be activated only after the usermode app sends in a proper ENABLE + * cmd first. Any attempt to issue other cmds while the faking code has not + * been enabled will result in RM err _INVALID_DATA. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the total + * number of subdevices within the parent device. This parameter should + * be set to zero for default behavior. + * cmd + * This field will carry the command to be executed by the framework. + * This includes Enabling/Disabling the test framework and faking devices + * like CRT/DVI/TV. + * data + * This field is to carry the data required for executing the cmd. + * Except for Enable and Disable, the other faking device commands will + * require the device mask of the device to be faked/removed. + * tvType + * This field specifies a specific TV type while faking a TV. + * Possible values are: + * NV0073_FAKE_DEVICE_TV_NONE + * NV0073_FAKE_DEVICE_TV_SVIDEO + * NV0073_FAKE_DEVICE_TV_COMPOSITE + * NV0073_FAKE_DEVICE_TV_COMPONENT + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_DATA + * + */ + +#define NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE (0x730243U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS_MESSAGE_ID" */ + +/* valid fake device TV connector types */ +#define NV0073_FAKE_DEVICE_TV_NONE (0U) +#define NV0073_FAKE_DEVICE_TV_SVIDEO (1U) +#define NV0073_FAKE_DEVICE_TV_COMPOSITE (2U) +#define NV0073_FAKE_DEVICE_TV_COMPONENT (3U) + +#define NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS { + NvU32 subDeviceInstance; + NvU32 cmd; + NvU32 data; + NvU32 tvType; +} NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS; + +/* Faking Support commands */ +/* some random value to enable/disable test code */ +#define NV0073_FAKE_DEVICE_SUPPORT_ENABLE 0x11faU +#define NV0073_FAKE_DEVICE_SUPPORT_DISABLE 0x99ceU +#define NV0073_FAKE_DEVICE_SUPPORT_ATTACH_DEVICES 0x100U +#define NV0073_FAKE_DEVICE_SUPPORT_REMOVE_DEVICES 0x101U + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID + * + * This command returns the I2C portID for the specified display device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * commPortId + * This parameter returns the I2C communication port ID of the + * display device indicated by the displayId parameter. + * ddcPortId + * This parameter returns the I2C DDC port ID of the display device + * indicated by the displayId parameter. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID (0x730211U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 commPortId; + NvU32 ddcPortId; +} NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS; + +#define NV0073_CTRL_SPECIFIC_I2C_PORT_NONE (0x0U) + + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA + * + * This command can be used to get display connector data. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * DDCPartners + * This parameter specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that share the same DDC line as displayId. This + * parameter will always be returned even if we also return the + * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT_NO flag. + * flags + * This parameter specifies optional flags to be used while retrieving + * the connector data for a given displayId. + * Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT + * This flag describes whether the connector data is present + * inside the firmware. + * count + * This parameter returns the number of connectors associated with + * the displayId argument. This value indicates the number of +* valid entries returned in the data parameter. + * data + * This parameter returns an array of structures containing the connector + * data associated with each connector for the given displayId argument. + * The count field specifies how many entries in this array are returned. + * Each entry in the array contains the following members: + * index + * This value is the index associated with the given connector. If + * two displayIds share the same index, then they share the same + * connector. + * type + * This value defines the type of connector associated with the + * displayId argument. + * location + * This value provides a possible means to determine the relative + * location of the connector in association to other connectors. + * For desktop boards, a value of zero defines the south most + * connector (the connector closest to the bus slot into which + * the board is inserted). + * platform + * This value defines the type of system with which to associate the + * location of each connector. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */ + +/* maximum number of connectors */ +#define NV0073_CTRL_MAX_CONNECTORS 4U + +#define NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 flags; + NvU32 DDCPartners; + NvU32 count; + struct { + NvU32 index; + NvU32 type; + NvU32 location; + } data[NV0073_CTRL_MAX_CONNECTORS]; + NvU32 platform; +} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS; + +/* defines for the flags field */ +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT 0:0 +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT_NO 0x00000000U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_FLAGS_PRESENT_YES 0x00000001U + +/* defines for the data[].type field */ +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VGA_15_PIN 0x00000000U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_COMPOSITE 0x00000010U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_SVIDEO 0x00000011U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_HDTV_COMPONENT 0x00000013U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_SCART 0x00000014U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_COMPOSITE_SCART_OVER_EIAJ4120_BLUE 0x00000016U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_TV_HDTV_EIAJ4120 0x00000017U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_PC_POD_HDTV_YPRPB 0x00000018U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_PC_POD_SVIDEO 0x00000019U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_PC_POD_COMPOSITE 0x0000001AU +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_SVIDEO 0x00000020U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_COMPOSITE 0x00000021U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I 0x00000030U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_D 0x00000031U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_ADC 0x00000032U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_1 0x00000038U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_2 0x00000039U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_SPWG 0x00000040U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_OEM 0x00000041U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_EXT 0x00000046U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_INT 0x00000047U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_MINI_EXT 0x00000048U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_SERIALIZER 0x00000049U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_A 0x00000061U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_C_MINI 0x00000063U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_1 0x00000064U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_2 0x00000065U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VIRTUAL_WFD 0x00000070U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C 0x00000071U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DSI 0x00000072U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_STEREO_3PIN_DIN 0x00000073U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_UNKNOWN 0xFFFFFFFFU + +/* defines for the platform field */ +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_DEFAULT_ADD_IN_CARD 0x00000000U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_TWO_PLATE_ADD_IN_CARD 0x00000001U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_CONFIGURABLE 0x00000002U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_DESKTOP_FULL_DP 0x00000007U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_ADD_IN_CARD 0x00000008U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MXM 0x00000009U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_BACK 0x00000010U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_BACK_LEFT 0x00000011U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_MOBILE_BACK_DOCK 0x00000018U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_CRUSH_DEFAULT 0x00000020U +#define NV0073_CTRL_SPECIFIC_CONNECTOR_PLATFORM_UNKNOWN 0xFFFFFFFFU + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE + * + * This command is used to signal the resource manager that the upcoming mode + * shall be hdmi vs dvi. This is required since the resource manager + * does not read the display edid. The resource manager shall enable hdmi + * components such as turning on the audio engine for instance. This should + * be called prior to every modeset in which the displayId is capable of hdmi. + * displayId + * This parameter specifies the displayId of HDMI resource to configure. + * This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * enable + * This field specifies the legal values: + * NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_TRUE + * NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_FALSE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvU8 enable; +} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_FALSE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_TRUE (0x00000001U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI + * + * This command can be used to enable HDMI communication on the associated GPU. + * This should be called prior to every modeset in which the displayId is capable of HDMI. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * displayId + * This parameter specifies the displayId of HDMI resource to configure. + * This comes as input to this command. + * enable + * This field specifies the legal values: + * NV0073_CTRL_SPECIFIC_CTRL_HDMI_DISABLE + * NV0073_CTRL_SPECIFIC_CTRL_HDMI_ENABLE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_CTRL_HDMI (0x730274U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS { + NvU8 subDeviceInstance; + NvU32 displayId; + NvBool bEnable; +} NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS; + +#define NV0073_CTRL_SPECIFIC_CTRL_HDMI_DISABLE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_CTRL_HDMI_ENABLE (0x00000001U) + + + +/* + * NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING + * + * This structure defines the mapping between the ACPI ID and the corresponding + * display ID of a display device + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * acpiId + * The ACPI ID of the display device + * displayId + * The corresponding display ID + * dodIndex + * The corresponding DOD index + */ +typedef struct NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING { + NvU32 subDeviceInstance; + NvU32 acpiId; + NvU32 displayId; + NvU32 dodIndex; +} NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING; + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING + * + * This call will update the RM data structure which holds the + * ACPI ID to display ID mapping of the display devices + * + * The input parameter is an array of structures of type + * NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING + * + * If some of the array elements remain unused, the acpiId field of the + * structure must be set to 0x0000 + * + * The size of the array is given by + * NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES (defined below) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * +*/ +#define NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING (0x730284U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES 16U + +#define NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS_MESSAGE_ID (0x84U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS { + NV0073_CTRL_SPECIFIC_ACPI_ID_MAPPING mapTable[NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES]; +} NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK + * + * This call will return all head mask. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * headMask + * headMask is the mask of all heads that are usable. For example, if + * head 0 and head 2 are present, headMask would be NVBIT(0)|NVBIT(2). This + * parameter returns to the client. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS { + NvU32 subDeviceInstance; + NvU32 headMask; +} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET + * + * This command is used to program the display output packets. + * Currently it supports DP and HDMI. + * Common supported packets are AVI infoframes, Audio Infoframes, Gamma + * Metadata, Vendor Specific infoframes and General Control Packets (GCP). + * + GCP AVMute Enable should be performed before the start of the modeset. + * + GCP AVMute Disable should be performed after the end of the modeset. + * GCP AVMute should contain HDR + 7 bytes. + * + AVI infoframes should occur after the modeset but before a GCP AVMute + * Disable. AVI infoframe should contain HDR + 14 bytes + * + Audio infoframes should occur after the modeset but before a GCP AVMute + * Enable. + * Audio infoframe should contain HDR + 11 bytes. + * + Gamma Metadata packets should contain HDR + 28 bytes. + * + Vendor Specific packets are variable length. + * By HDMI 1.4 June 5 2009 spec, payload can be 5 bytes, 6 bytes, 7 bytes or + * 16 bytes, depending on the packets spec. + * Unused data bytes should be zero-ed out. + * + * displayID + * This parameter specifies the displayID for the display resource to + * configure. + * This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * transmitControl + * This parameter controls how the packet is to be sent by setting the + * control bits. + * Possible flags are as follows: + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE + * Set to _ENABLE to start sending the packet at next frame, set to + * _DISABLE to stop sending. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME + * Set to _ENABLE to send the packet at other frame, set to _DISABLE to + * send at every frame. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME + * Set to _ENABLE to send once next frame, set to _DISABLE to send at + * every frame. + * Note: A setting to set both _OTHER_FRAME and _SINGLE_FRAME is invalid + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK + * Set to _ENABLE to send the packet once on next HBLANK, set to + * _DISABLE to send on VBLANK. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE + * Set to _ENABLE to send the info frame packet as soon as possible. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT + * Set to _SW_CONTROLLED to set HDMI_Video_Format field and 3D_Structure field + * from NV_PDISP_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 and PB5, if it is set to _HW_CONTROLLED + * then HW will get them based on the state of the setHdmiCtrl method. + * Btw this applies only for stereo ovverides. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY + * Set to TRUE to send Vendor specific info frame used for 3D stereo LR sync. + * Set PACKET_TYPE=pktType_VendorSpecInfoFrame along with this flag. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING + * Set to TRUE to send Vendor specific info frame used for Self Refresh panels + * Set PACKET_TYPE=pktType_VendorSpecInfoFrame along with this flag. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE + * HW provides support to program 2 generic infoframes per frame for DP with GP10X+. + * This flag indicates the INFOFRAME that needs to be programmed. + * Set to _INFOFRAME0 if RM should program GENERIC_INFOFRAME + * Set to _INFOFRAME1 if RM should program GENERIC_INFOFRAME1 + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE + * This option is reserved for backward compatibility with + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_PACKET and + * NV0073_CTRL_CMD_DP_SET_PACKET. + * It is temporal and will be deprecated soon. + * packetSize + * packet size of packets in pPacket to send, including header and payload. + * targetHead + * Specifies the target head number for which SDP needs to be updated. + * bUsePsrHeadforSdp + * Indicates use targetHead field for setting SDP or infoframe packet instead + * of deriving the active head from displayID. + * pPacket + * pPacket points to the packets to send. + * For HDMI 1.1, the maximum allowed bytes is 31. + * The packet array includes the 3 bytes of header + data depending on + * the type of packet. For an infoframe, the header bytes refer to type, + * version and length respectively. This comes as input to this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE 36U + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 transmitControl; + NvU32 packetSize; + NvU32 targetHead; + NvBool bUsePsrHeadforSdp; + NvU8 aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE]; +} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE 0:0 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME 1:1 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME 2:2 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK 3:3 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE 4:4 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT 5:5 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY 6:6 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING 7:7 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE 9:8 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0 (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1 (0x0000001U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE 31:31 +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO (0x0000000U) +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES (0x0000001U) + + +/* + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS + * + * This command is used to enable/disable sending of display output packets. + * Currently it supports HDMI only. + * Unused data bytes should be zero-ed out. + * + * displayID + * This parameter specifies the displayID for the display output resource to + * configure. + * This comes as input to this command. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * type + * The client shall specify the type of display output packet. For HDMI, set + * this according to HDMI specification 1.4. + * This comes as input to this command. + * transmitControl + * This parameter controls how the packet is to be sent by setting the control + * bits. + * Possible flags are as follows: + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE + * Set to _ENABLE to start sending the packet at next frame, set to + * _DISABLE to stop sending. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME + * Set to _ENABLE to send the packet at other frame, set to _DISABLE to + * send at every frame. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME + * Set to _ENABLE to send once next frame, set to _DISABLE to send at + * every frame. + * Note: A setting to set both _OTHER_FRAME and _SINGLE_FRAME is invalid + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK + * Set to _ENABLE to send the packet once on next HBLANK, set to _DISABLE + * to send on VBLANK. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT + * Set to _SW_CONTROLLED to set HDMI_Video_Format field and 3D_Structure field + * from NV_PDISP_SF_HDMI_VSI_SUBPACK0_HIGH_PB4 and PB5, if it is set to _HW_CONTROLLED + * then HW will get them based on the state of the setHdmiCtrl method. + * Btw this applies only for stereo ovverides. + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY + * Set to TRUE to enable Vendor specific info frame used for 3D stereo LR sync + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING + * Set to TRUE to enable Vendor specific info frame used for Self Refresh panels + * NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE + * This option is reserved for backward compatibility with + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_PACKET and + * NV0073_CTRL_CMD_DP_SET_PACKET. + * It is temporal and will be deprecated soon. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL (0x730289U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 type; + NvU32 transmitControl; +} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE_NO NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ENABLE_YES NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK_DISABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_ON_HBLANK_ENABLE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO +#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_PCLK_LIMIT + * + * This command returns the maximum supported pixel clock rate that is + * supported by the specified display device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * pclkLimit + * This parameter returns the min of orPclkLimit and vbPclkLimit in KHz. + * It may be used for SLI configs that use a video bridge. For non-SLI + * configs and bridgeless SLI configs, the client should use orPclkLimit instead. + * orPclkLimit + * This parameter returns the maximum pixel clock frequency of OR in KHz. + * vbPclkLimit + * This parameter returns the maximum pixel clock frequency of the + * video bridge (SLI) in KHz (or zero if there is no video bridge). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_PCLK_LIMIT (0x73028aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS_MESSAGE_ID (0x8AU) + +typedef struct NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 pclkLimit; + NvU32 orPclkLimit; + NvU32 vbPclkLimit; +} NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO + * + * This command returns output resource information for the specified display + * device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the display for which information is to be + * returned. Only one display may be indicated in this parameter. + * If more than one displayId is used a failing status of + * NV_ERR_INVALID_ARGUMENT will be returned. + * type + * This parameter returns the output resource type. Legal values for + * this parameter include: + * NV0073_CTRL_SPECIFIC_OR_TYPE_DAC + * The output resource is a DAC. + * NV0073_CTRL_SPECIFIC_OR_TYPE_SOR + * The output resource is a serial output resource. + * NV0073_CTRL_SPECIFIC_OR_TYPE_DSI + * The output resource is a Display Serial Interface output resource. + * NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR + * The output resource is a parallel input output resource. + * index + * This parameter returns the type-specific index of the output + * resource associated with the specified displayId. + * protocol + * This parameter returns the type-specific protocol used by the + * output resource. Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI + * NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN + * ditherType + * This parameter returns the dither type for the output resource. + * Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_10_BITS + * NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF + * ditherAlgo + * This parameter returns the dithering algorithm used by the output + * resource. Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_DYNAMIC_ERR_ACC + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_STATIC_ERR_ACC + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_DYNAMIC_2X2 + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_STATIC_2X2 + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_TEMPORAL + * NV0073_CTRL_SPECIFIC_OR_DITHER_ALOGO_UNKNOWN + * location + * This parameter returns the physical location of the output resource. + * Legal values for this parameter include: + * NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP + * NV0073_CTRL_SPECIFIC_OR_LOCATION_BOARD + * rootPortId + * This parameter specifies the Root-Port ID for the given display. + * dcbIndex + * This parameter returns the DCB index of the display device. + * vbiosAddress + * This parameter is the VBIOS IP address which will have valid value + * only if displayId is allocated by VBIOS. + * bIsLitByVbios + * This parameter specifies that whether displayID allocation was + * requested by VBIOS or not. + * bIsDispDynamic + * Returns NV_TRUE if DisplayID is allocated Dynamically else NV_FALSE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID (0x8BU) + +typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 index; + NvU32 type; + NvU32 protocol; + NvU32 ditherType; + NvU32 ditherAlgo; + NvU32 location; + NvU32 rootPortId; + NvU32 dcbIndex; + NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8); + NvBool bIsLitByVbios; + NvBool bIsDispDynamic; +} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS; + +/* valid type values */ +#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR (0x00000003U) + + +#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI (0x00000005U) + +/* valid DAC protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT (0x00000000U) + + + +/* valid SOR protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS (0x00000005U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A (0x00000008U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B (0x00000009U) +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI (0x00000010U) + +/* valid DSI protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI (0x00000011U) + +/* valid PIOR protocol values */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC (0x00000000U) + +/* valid UNKNOWN protocol value */ +#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN (0xFFFFFFFFU) + +/* valid ditherType values */ +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_10_BITS (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF (0x00000003U) + +/* valid ditherAlgo values */ +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_ERR_ACC (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_ERR_ACC (0x00000001U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2 (0x00000002U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2 (0x00000003U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL (0x00000004U) +#define NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN (0xFFFFFFFFU) + +/* valid location values */ +#define NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP (0x00000000U) +#define NV0073_CTRL_SPECIFIC_OR_LOCATION_BOARD (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS + * NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS + * + * These commands retrieve and set the user backlight brightness for + * the specified display. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId + * Display for which brightness is to be retrieved or set. + * brightness + * The backlight brightness in the range [0,100], inclusive. This + * is an input for SET_BACKLIGHT_BRIGHTNESS, and an output for + * GET_BACKLIGHT_BRIGHTNESS. + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | 0x91" */ + +#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | 0x92" */ + +#define NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MIN_VALUE 0U +#define NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE 100U + +typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 brightness; +} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS + * + * This command is used to inform RM about the scrambling, clock mode, FRL and + * DSC caps of the HDMI sink device. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed.. + * displayID + * This parameter specifies the displayID for the display output resource to + * configure. + * caps + * This parameter specifies the sink caps. + * GT_340MHZ_CLOCK_SUPPORTED refers to whether sink supports TMDS clock (sorClk) rate greater than 340 MHz + * LTE_340MHZ_SCRAMBLING_SUPPORTED refers to whether scrambling is supported for clock rate at or below 340 MHz + * SCDC_SUPPORTED refers to whether SCDC access is supported on sink + * MAX_FRL_RATE_SUPPORTED refers to the maximum HDMI 2.1 FRL rate supported + * DSC_12_SUPPORTED refers to whether VESA DSC v1.2a is supported + * DSC_12_MAX_FRL_RATE_SUPPORTED refers to the maximum HDMI 2.1 FRL rate supported when VESA DSC v1.2a is supported + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID (0x93U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 caps; +} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED 0:0 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED 1:1 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED 2:2 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED 5:3 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED 6:6 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED 9:7 +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U) + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER + * + * This command sets monitor power on/off. + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId + * This parameter specifies the displayID for the display output resource to + * configure. + * powerState + * This parameter should be one of the valid + * NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_* values. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER (0x730295U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS_MESSAGE_ID (0x95U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 powerState; +} NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_OFF (0x00000000U) +#define NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_ON (0x00000001U) + + + +/* +* NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG +* +* This command is used to perform HDMI FRL link training and enable FRL mode for +* the specified displayId. The link configuration will be returned after link +* training success. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. +* displayID +* This parameter specifies the displayID for the display output resource to +* configure. +* data +* This parameter is an input and output to this command. +* Here are the current defined fields: +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE +* This field specifies the desired setting for lane count. A client may +* choose any lane count as long as it does not exceed the capability of +* HDMI FRL sink as indicated in the sink capability field. +* The valid values for this field are: +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE +* For 0 lane configuration, link training is shut down (disable FRL). +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_3G +* For FRL 3-lane configuration and 3 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_6G +* For FRL 3-lane configuration and 6 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_6G +* For FRL 4-lane configuration and 6 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_8G +* For FRL 4-lane configuration and 8 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_10G +* For FRL 4-lane configuration and 10 Gbps bandwidth per lane. +* NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_12G +* For FRL 4-lane configuration and 12 Gbps bandwidth per lane. +* On return, the link bandwidth setting is returned which may be +* different from the requested input setting. +* bFakeLt +* This flag as input to this command. +* It indicates the FRL link training is a fake link training or not. +* TRUE if the FRL link training is fake and no real sink device attached. +* bLtSkipped +* The flag returned indicating whether link training is skipped or not. +* TRUE if link training is skipped due to the link config is not changed. +* +* Possible status values returned include: +* NV_OK - +* Affter finishing link tranning, NV_OK status will be returned along with +* the updated link congiration. In case of link training failure, FRL_RATE_NONE +* will be returned with NV_OK. +* NV_ERR_NOT_SUPPORTED - +* If the GPU/sink is not capable for HDMI FRL, NV_ERR_NOT_SUPPORTED status +* will be returned. +* NV_ERR_INVALID_ARGUMENT +* If any argument is valid for this control call, NV_ERR_INVALID_ARGUMENT +* status will be returned. +*/ +#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_FRL_CONFIG (0x73029aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS_MESSAGE_ID (0x9AU) + +typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 data; + NvBool bFakeLt; + NvBool bLtSkipped; +} NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS; + +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE 2:0 +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_NONE (0x00000000U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_3G (0x00000001U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_3LANES_6G (0x00000002U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_6G (0x00000003U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_8G (0x00000004U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_10G (0x00000005U) +#define NV0073_CTRL_HDMI_FRL_DATA_SET_FRL_RATE_4LANES_12G (0x00000006U) + + + +#define NV0073_CTRL_SPECIFIC_MAX_CRC_REGIONS 9U + +#define NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS_MESSAGE_ID (0xA0U) + +typedef struct NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 regionCrcs[NV0073_CTRL_SPECIFIC_MAX_CRC_REGIONS]; + NvU16 reqRegionCrcMask; +} NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS + * + * This command is used to capture the active viewport region CRCs + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * [in]displayId + * This parameter specifies the displayId of panel, for which region CRC to be captured +.* + * [out]regionCrcs + * This field holds the region CRC values to be returned after successful completion of the control command. + * + * [in]reqRegionCrcMask + * This parameter specifies BIT mask value of requested CRC regions. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS (0x7302a0U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_GET_REGIONAL_CRCS_PARAMS_MESSAGE_ID" */ + +/* +* NV0073_CTRL_CMD_SPECIFIC_APPLY_EDID_OVERRIDE_V2 +* +* Apply EDID override on specific OD. +* +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. +* displayId (in) +* ID of panel on which the operation is to be performed. +* bufferSize (in) +* Size of the EDID buffer. +* edidBuffer (in/out) +* The buffer which stores the EDID before and after override. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_PARAMETER +*/ +#define NV0073_CTRL_CMD_SPECIFIC_APPLY_EDID_OVERRIDE_V2 (0x7302a1U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS_MESSAGE_ID (0xA1U) + +typedef struct NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU32 bufferSize; + NvU8 edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES]; +} NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS + * + * This command is used to get the HDMI FRL caps of GPU side. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * caps + * This parameter specifies the GPU caps. + * MAX_FRL_RATE_SUPPORTED refers to the maximum HDMI 2.1 FRL link rate supported + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS (0x7302a2U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS_MESSAGE_ID (0xA2U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS { + NvU32 subDeviceInstance; + NvU32 caps; +} NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED 2:0 +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_3LANES_3G (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_3LANES_6G (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_6G (0x00000003U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_8G (0x00000004U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_10G (0x00000005U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_GPU_CAPS_MAX_FRL_LINK_RATE_SUPPORTED_4LANES_12G (0x00000006U) + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE + * + * Notifies the system that a display change is about to begin/end. + * Also performs the necessary synchronizations for the same. + * + * The command takes a NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS structure as an + * argument with appropriate subDeviceInstance. + * + * [in]subDeviceInstance + * The sub-device instance + * [in]newDevices + * Bitmask of devices that are planned on being enabled with the + * pending device change. See NV_CFGEX_GET_DEVICES_CONFIGURATION for bit defs. + * [in]properties + * Bitmask of display attributes for new configuration (none used at the moment). + * [in]enable + * Parameter to decide between display change start and end. Can take values + * NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START or NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END. + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS_MESSAGE_ID (0xA4U) + +typedef struct NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS { + + NvU32 subDeviceInstance; + NvU32 newDevices; + NvU32 properties; + NvU32 enable; +} NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END (0x00000000U) +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START (0x00000001U) + +#define NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PROPERTIES_SPANNING (0x00000001U) + +#define NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE (0x7302a4U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS_MESSAGE_ID" */ + + + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA + * + * This command is used to get the HDMI sink status/caps via Status and Control + * Data Channel (SCDC). + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayId + * This parameter specifies the displayId of HDMI sink. + * offset + * This parameter specifies the SCDC offset which the read operation + * should be used. + * data + * This field specifies the return data from sink for reading the specified + * SCDC offset. + * + * + * Possible status values returned include: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA (0x7302a6U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS_MESSAGE_ID (0xA6U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvU8 offset; + NvU8 data; +} NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS; + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET 7:0 +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_NONE (0x00000000U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SINK_VERSION (0x00000001U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SOURCE_VERSION (0x00000002U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_UPDATE_FLAGS_0 (0x00000010U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_TMDS_CONFIGURATION (0x00000020U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SCRAMBLER_STATUS (0x00000021U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CONFIGURATION_0 (0x00000030U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CONFIGURATION_1 (0x00000031U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_SOURCE_TEST_CONFIGURATION (0x00000035U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_STATUS_FLAGS_0 (0x00000040U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_STATUS_FLAGS_1 (0x00000041U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_STATUS_FLAGS_2 (0x00000042U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_0 (0x00000050U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_1 (0x00000051U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_2 (0x00000052U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_3 (0x00000053U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_4 (0x00000054U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_5 (0x00000055U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_6 (0x00000056U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_7 (0x00000057U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_CED_8 (0x00000058U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_RSED_0 (0x00000059U) +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_SCDC_DATA_OFFSET_RSED_1 (0x0000005AU) + +/* + * NV0073_CTRL_CMD_SPECIFIC_IS_DIRECTMODE_DISPLAY + * + * This command is used to query whether the specified monitor should be used + * with directmode. + * + * [in]manufacturerID + * This parameter specifies the 16-bit EDID Manufacturer ID. + * [in]productID + * This parameter specifies the 16-bit EDID Product ID. + * [out]bIsDirectmode; + * This indicates whether the monitor should be used with directmode. + * Possible return values: + * NV_OK + */ + +#define NV0073_CTRL_CMD_SPECIFIC_IS_DIRECTMODE_DISPLAY (0x7302a7U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS_MESSAGE_ID (0xA7U) + +typedef struct NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS { + NvU16 manufacturerID; + NvU16 productID; + NvBool bIsDirectmode; +} NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION + * + * This command is used to get the HDMI FRL capacity computation result. + * + * [in] cmd + * This parameter specifies the command for the HDMI FRL capacity computation. + * [in] input + * This parameter specifies the input data for the HDMI FRL capacity + * computation. + * [out] result + * This indicates the computation result of HDMI FRL capacity computation. + * [in/out] preCalc + * This indicates the pre-caculation result of HDMI FRL capacity computation. + * [in/out] dsc + * This indicates the DSC parameters of HDMI FRL capacity computation. + * Possible return values: + * NV_OK + */ + +#define NV0073_CTRL_CMD_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION (0x7302a8U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS { + NvU32 numLanes; + NvU32 frlBitRateGbps; + NvU32 pclk10KHz; + NvU32 hTotal; + NvU32 hActive; + NvU32 bpc; + NvU32 pixelPacking; + NvU32 audioType; + NvU32 numAudioChannels; + NvU32 audioFreqKHz; + + struct { + NvU32 bppTargetx16; + NvU32 hSlices; + NvU32 sliceWidth; + NvU32 dscTotalChunkKBytes; + } compressionInfo; +} NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS; + +typedef struct NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT { + NvU32 frlRate; + NvU32 bppTargetx16; + NvBool engageCompression; + NvBool isAudioSupported; + NvBool dataFlowDisparityReqMet; + NvBool dataFlowMeteringReqMet; + NvBool isVideoTransportSupported; + NvU32 triBytesBorrowed; + NvU32 hcActiveBytes; + NvU32 hcActiveTriBytes; + NvU32 hcBlankTriBytes; + NvU32 tBlankToTTotalX1k; +} NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT; + +typedef struct NV0073_CTRL_FRL_PRE_CALC_CONFIG { + NvU32 vic; + NvU32 packing; + NvU32 bpc; + NvU32 frlRate; + NvU32 bppX16; + NvBool bHasPreCalcFRLData; +} NV0073_CTRL_FRL_PRE_CALC_CONFIG; + +typedef struct NV0073_CTRL_IS_FRL_DSC_POSSIBLE_PARAMS { + NvU32 maxSliceCount; + NvU32 maxSliceWidth; + NvBool bIsDSCPossible; +} NV0073_CTRL_IS_FRL_DSC_POSSIBLE_PARAMS; + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS_MESSAGE_ID (0xA8U) + +typedef struct NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS { + NvU8 cmd; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_PARAMS input; + NV0073_CTRL_FRL_CAPACITY_COMPUTATION_RESULT result; + NV0073_CTRL_FRL_PRE_CALC_CONFIG preCalc; + NV0073_CTRL_IS_FRL_DSC_POSSIBLE_PARAMS dsc; +} NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS; + +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_NULL (0x00000000U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_UNCOMPRESSED_VIDEO (0x00000001U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_COMPRESSED_VIDEO (0x00000002U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_HAS_PRECAL_FRL_DATA (0x00000003U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_UNCOMPRESSED_FRL_CONFIG (0x00000004U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_GET_PRECAL_COMPRESSED_FRL_CONFIG (0x00000005U) +#define NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_CMD_IS_FRL_DSC_POSSIBLE (0x00000006U) + +/* + * NV0073_CTRL_CMD_SPECIFIC_SET_SHARED_GENERIC_PACKET + * + * This command is used to program the display output packets. + * This generic packets can be used for both HDMI and DP. + * HW has added 6 new generic packets for each head because some usecases have + * requirement to send infoframe in particular location (vsync, vblank, loadV). + * + * Note: 1. Client first needs to reserve or acquire a free infoframe index + * using NV0073_CTRL_CMD_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET. + * 2. Client needs to update the SDP index for head through control call + * NV0073_CTRL_CMD_SPECIFIC_SET_SHARED_GENERIC_PACKET + * 3. Client needs to Release the infoframe index using control call + * NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * [in]transmitControl + * This parameter controls how the packet is to be sent by setting the + * control bits. + * Possible flags are as follows: + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE + * Setting this field to _YES will enable this generic infoframe, + * Setting this field to _NO will disable this generic infoframe. + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE + * Set to _YES will cause new infoframe to be transmitted exactly once. + * Set to _NO will cause new infoframe to be transmitted every frame. + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC + * SDP can be sent in 3 different locations: + * VBLANK - new infoframe will be sent at Vblank. + * VSYNC - new infoframe will be sent at Vsync. + * LOADV - new infoframe will be triggered by LOADV, and sent at Vsync + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE + * _ENABLE: override DB1 bit1 with existence of loadv (for Panel Self Refresh) + * _DISABLE: do not override shared generic infoframe subpacker DB1 bit1. + * NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE + * _ENABLE: override DB1 bit3 with existence of loadv (for Panel Replay) + * _DISABLE: do not override shared generic infoframe subpacker DB1 bit3. + * [in]packetSize + * size of packets in Packet array to send, including header and payload. + * [in]infoframeIndex + * Specifies the target head number for which SDP needs to be updated. + * [in]infoframeIndex + * Specifies the index of infoframe. + * [in]packet + * pPacket points to the packets to send. + * For HDMI 1.1, the maximum allowed bytes is 31. + * The packet array includes the 3 bytes of header + data depending on + * the type of packet. For an infoframe, the header bytes refer to type, + * version and length respectively. This comes as input to this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_SET_SHARED_GENERIC_PACKET (0x7302a9) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID (0xA9U) + +typedef struct NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 transmitControl; + NvU32 packetSize; + NvU32 targetHeadIndex; + NvU32 infoframeIndex; + NvU8 packet[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE]; +} NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS; + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE 0:0 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE_NO (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_ENABLE_YES (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE 1:1 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE_NO (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_SINGLE_YES (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC 5:2 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC_VBLANK (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC_VSYNC (0x0000001) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_LOC_LOADV (0x0000002) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_STATE_OVERRIDE 6:6 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_STATE_OVERRIDE_DISABLE (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_STATE_OVERRIDE_ENABLE (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE 7:7 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE_DISABLE (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_UPDATE_RFB_OVERRIDE_ENABLE (0x0000001) + +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE 8:8 +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE_DISABLE (0x0000000) +#define NV0073_CTRL_SPECIFIC_SHARED_GENERIC_CTRL_VSC_SDP_SU_COORDINATES_VALID_OVERRIDE_ENABLE (0x0000001) + +/* + * NV0073_CTRL_CMD_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET + * + * This command is used to reserve the infoframe for head and RM would assign + * free infoframe index and return the index of infoframe. Later client needs + * to call control call NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET + * to release the index. + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * [in]targetHeadIndex + * target Head for which SDP needs to be sent + * [out]infoframeIndex + * return Infoframe Index for head. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFIENT_RESOURCES + */ + +#define NV0073_CTRL_CMD_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET (0x7302aa) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID (0xAAU) + +typedef struct NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 targetHeadIndex; + NvU32 infoframeIndex; +} NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS; + +/* + * NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET + * + * This command is used to release the infoframe index which was acquired by + * client. + * + * [in]subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * This parameter must specify a value between zero and the total number + * of subdevices within the parent device. This parameter should be set + * to zero for default behavior. + * [in]targetHeadIndex + * Specifies the target head number for which SDP needs to be updated. + * [in]infoframeIndex + * Infoframe index for the target head + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET (0x7302ab) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS_MESSAGE_ID (0xABU) + +typedef struct NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS { + NvU32 subDeviceInstance; + NvU32 targetHeadIndex; + NvU32 infoframeIndex; +} NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS; + +/* _ctrl0073specific_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h new file mode 100644 index 000000000..b636475a4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073stereo.h @@ -0,0 +1,169 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073stereo.finn +// + + + + +#include "ctrl/ctrl0073/ctrl0073base.h" + + + +/* + * NV0073_CTRL_CMD_STEREO_DONGLE_SET_TIMINGS + * + * Sets new video mode timings + * E.g. from display driver on mode set + * + * Parameters: + * [IN] subDeviceInstance - This parameter specifies the subdevice instance + * within the NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. This parameter must specify a value between + * zero and the total number of subdevices within the parent device. + * This parameter should be set to zero for default behavior. + * [IN] head - head to be passed to stereoDongleControl + * [IN] timings - new timings to be set + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED - stereo is not initialized on the GPU + */ +#define NV0073_CTRL_CMD_STEREO_DONGLE_SET_TIMINGS (0x731703U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID << 8) | NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0073_CTRL_STEREO_VIDEO_MODE_TIMINGS { + NvU32 PixelClock; + NvU16 TotalWidth; + NvU16 VisibleImageWidth; + NvU16 HorizontalBlankStart; + NvU16 HorizontalBlankWidth; + NvU16 HorizontalSyncStart; + NvU16 HorizontalSyncWidth; + NvU16 TotalHeight; + NvU16 VisibleImageHeight; + NvU16 VerticalBlankStart; + NvU16 VerticalBlankHeight; + NvU16 VerticalSyncStart; + NvU16 VerticalSyncHeight; + NvU16 InterlacedMode; + NvU16 DoubleScanMode; + + NvU16 MonitorVendorId; + NvU16 MonitorProductId; +} NV0073_CTRL_STEREO_VIDEO_MODE_TIMINGS; + +#define NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NV0073_CTRL_STEREO_VIDEO_MODE_TIMINGS timings; +} NV0073_CTRL_STEREO_DONGLE_SET_TIMINGS_PARAMS; + +/* + * NV0073_CTRL_CMD_STEREO_DONGLE_ACTIVATE + * + * stereoDongleActivate wrapper / NV_STEREO_DONGLE_ACTIVATE_DATA_ACTIVE_YES + * Updates sbios of 3D stereo state active + * + * Parameters: + * [IN] subDeviceInstance - This parameter specifies the subdevice instance + * within the NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. This parameter must specify a value between + * zero and the total number of subdevices within the parent device. + * This parameter should be set to zero for default behavior. + * [IN] head - head to be passed to stereoDongleActivate + * [IN] bSDA - enable stereo on DDC SDA + * [IN] bWorkStation - is workstation stereo? + * [IN] bDLP - is checkerboard DLP Stereo? + * [IN] IRPower - IR power value + * [IN] flywheel - FlyWheel value + * [IN] bRegIgnore - use reg? + * [IN] bI2cEmitter - Sets NV_STEREO_DONGLE_ACTVATE_DATA_I2C_EMITTER_YES and pStereo->bAegisDT + * [IN] bForcedSupported - Sets NV_STEREO_DONGLE_FORCED_SUPPORTED_YES and pStereo->GPIOControlledDongle + * [IN] bInfoFrame - Aegis DT with DP InfoFrame + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - if (head > OBJ_MAX_HEADS) + * NV_ERR_NOT_SUPPORTED - stereo is not initialized on the GPU + */ +#define NV0073_CTRL_CMD_STEREO_DONGLE_ACTIVATE (0x731704U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID << 8) | NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool bSDA; + NvBool bWorkStation; + NvBool bDLP; + NvU8 IRPower; + NvU8 flywheel; + NvBool bRegIgnore; + NvBool bI2cEmitter; + NvBool bForcedSupported; + NvBool bInfoFrame; +} NV0073_CTRL_STEREO_DONGLE_ACTIVATE_PARAMS; + +/* + * NV0073_CTRL_CMD_STEREO_DONGLE_DEACTIVATE + * + * stereoDongleActivate wrapper / NV_STEREO_DONGLE_ACTIVATE_DATA_ACTIVE_NO + * + * If active count<=0 then no 3D app is running which indicates + * that we have really deactivated the stereo, updates sbios of 3D stereo state NOT ACTIVE. + * + * Parameters: + * [IN] subDeviceInstance - This parameter specifies the subdevice instance + * within the NV04_DISPLAY_COMMON parent device to which the operation + * should be directed. This parameter must specify a value between + * zero and the total number of subdevices within the parent device. + * This parameter should be set to zero for default behavior. + * [IN] head - head to be passed to stereoDongleActivate + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - if (head > OBJ_MAX_HEADS) + * NV_ERR_NOT_SUPPORTED - stereo is not initialized on the GPU + */ +#define NV0073_CTRL_CMD_STEREO_DONGLE_DEACTIVATE (0x731705U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID << 8) | NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; +} NV0073_CTRL_STEREO_DONGLE_DEACTIVATE_PARAMS; + + + +/* _ctrl0073stereo_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h new file mode 100644 index 000000000..08a06ae72 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073svp.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073svp.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h new file mode 100644 index 000000000..987f2fcb2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h @@ -0,0 +1,1075 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0073/ctrl0073system.finn +// + + + + +#include "ctrl/ctrl0073/ctrl0073base.h" + +/* NV04_DISPLAY_COMMON system-level control commands and parameters */ + +/* extract cap bit setting from tbl */ +#define NV0073_CTRL_SYSTEM_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* Caps format is byte_index:bit_mask. + * Important: keep the number of bytes needed for these fields in sync with + * NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE + */ +#define NV0073_CTRL_SYSTEM_CAPS_AA_FOS_GAMMA_COMP_SUPPORTED 0:0x01 +#define NV0073_CTRL_SYSTEM_CAPS_TV_LOWRES_BUG_85919 0:0x02 +#define NV0073_CTRL_SYSTEM_CAPS_DFP_GPU_SCALING_BUG_154102 0:0x04 +#define NV0073_CTRL_SYSTEM_CAPS_SLI_INTERLACED_MODE_BUG_235218 0:0x08 // Deprecated +#define NV0073_CTRL_SYSTEM_CAPS_STEREO_DIN_AVAILABLE 0:0x10 +#define NV0073_CTRL_SYSTEM_CAPS_OFFSET_PCLK_DFP_FOR_EMI_BUG_443891 0:0x20 +#define NV0073_CTRL_SYSTEM_CAPS_GET_DMI_SCANLINE_SUPPORTED 0:0x40 +/* + * Indicates support for HDCP Key Selection Vector (KSV) list and System + * Renewability Message (SRM) validation +*/ +#define NV0073_CTRL_SYSTEM_CAPS_KSV_SRM_VALIDATION_SUPPORTED 0:0x80 + +#define NV0073_CTRL_SYSTEM_CAPS_SINGLE_HEAD_MST_SUPPORTED 1:0x01 +#define NV0073_CTRL_SYSTEM_CAPS_SINGLE_HEAD_DUAL_SST_SUPPORTED 1:0x02 +#define NV0073_CTRL_SYSTEM_CAPS_HDMI_2_0_SUPPORTED 1:0x04 +#define NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED 1:0x08 +#define NV0073_CTRL_SYSTEM_CAPS_RASTER_LOCK_NEEDS_MIO_POWER 1:0x10 +/* + * Indicates that modesets where no heads are increasing resource requirements, + * or no heads are decreasing resource requirements, can be done glitchlessly. + */ +#define NV0073_CTRL_SYSTEM_CAPS_GLITCHLESS_MODESET_SUPPORTED 1:0x20 +/* Indicates the SW ACR is enabled for HDMI 2.1 due to Bug 3275257. */ +#define NV0073_CTRL_SYSTEM_CAPS_HDMI21_SW_ACR_BUG_3275257 1:0x40 + +/* Size in bytes of display caps table. Keep in sync with # of fields above. */ +#define NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE 2U + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2 + * + * This command returns the set of display capabilities for the parent device + * in the form of an array of unsigned bytes. Display capabilities + * include supported features and required workarounds for the display + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. The set of display capabilities + * will be normalized across all GPUs within the device (a feature capability + * will be set only if it's supported on all GPUs while a required workaround + * capability will be set if any of the GPUs require it). + * + * [out] capsTbl + * The display caps bits will be transferred by the RM into this array of + * unsigned bytes. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2 (0x730138U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE]; +} NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS + * + * This commands returns the number of heads supported by the specified + * subdevice and available for use by displays. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used to while retrieving + * the number of heads. + * Possible valid flags are: + * NV0073_CTRL_SYSTEM_GET_NUM_HEADS_CLIENT + * This flag is used to request the number of heads that are + * currently in use by an NV client using a user display class + * instance (see NV15_VIDEO_LUT_CURSOR_DAC for an examle). If this + * flag is disabled then the total number of heads supported is + * returned. + * numHeads + * This parameter returns the number of usable heads for the specified + * subdevice. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 numHeads; +} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS; + +/* valid get num heads flags */ +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_FLAGS_CLIENT 0:0 +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_FLAGS_CLIENT_DISABLE (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_NUM_HEADS_FLAGS_CLIENT_ENABLE (0x00000001U) + + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE + * + * This command returns the current RG scanline of the specified head on the + * specified subdevice. To get the DMI scanline on supported chips, use + * NV0073_CTRL_CMD_SYSTEM_GET_DMI_SCANLINE + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the active display + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * currentScanline + * This parameter returns the current RG scanline value for the specified + * head. If the head does not have a valid mode enabled then a scanline + * value of 0xffffffff is returned. + * bStereoEyeSupported (out) + * This parameter specifies whether stereoEye reporting is supported (this + * is hw dependent). Note that this value doesn't actually reflect whether + * given head is really in stereo mode. + * stereoEye (out) + * If supported (ie bStereoEyeSupported is TRUE), this parameter returns + * either NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_RIGHT_EYE or + * NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_LEFT_EYE, reflecting the + * stereo eye that is currently scanned out. Although this value typically + * changes at the beginning of vblank, the exact guarantee isn't more + * accurate than "somewhere in vblank". + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE (0x730108U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE_RIGHT_EYE 0x00000000U +#define NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE_LEFT_EYE 0x00000001U + +#define NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 currentScanline; + NvBool bStereoEyeSupported; + NvU32 stereoEye; +} NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_COUNTER + * + * This command returns the current VBlank counter of the specified head on the + * specified subdevice. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the vblank counter + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * verticalBlankCounter + * This parameter returns the vblank counter value for the specified + * head. If the display mode is not valid or vblank not active then + * the verticalBlankCounter value is undefined. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_COUNTER (0x730109U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 verticalBlankCounter; +} NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_ENABLE + * + * This command returns the current VBlank enable status for the specified + * head. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the vblank status + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * bEnabled + * This parameter returns the vblank enable status for the specified head. + * A value of NV_FALSE indicates that vblank interrupts are not currently + * enabled while a value of NV_TRUE indicates that vblank are currently + * enabled. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_VBLANK_ENABLE (0x73010aU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvBool bEnabled; +} NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED + * + * This command returns the set of supported display IDs for the specified + * subdevice in the form of a 32bit display mask. State from internal + * display connectivity tables is used to determine the set of possible + * display connections for the GPU. The presence of a display in the + * display mask only indicates the display is supported. The connectivity + * status of the display should be determined using the + * NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE command. The displayMask + * value returned by NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED is static + * and will remain consistent across boots of the system. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayMask + * This parameter returns a NV0073_DISPLAY_MASK value describing the set + * of displays supported by the subdevice. An enabled bit in displayMask + * indicates the support of a display device with that displayId. + * displayMaskDDC + * This parameter returns a NV0073_DISPLAY_MASK value, indicating the + * subset of displayMask that supports DDC. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayMask; + NvU32 displayMaskDDC; +} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE + * + * This command can be used to check the presence of a mask of display + * devices on the specified subdevice. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used while retrieving + * the connection state information. + * Here are the current defined fields: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD + * A client uses this field to indicate what method it wishes the + * system to use when determining the presence of attached displays. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_DEFAULT + * The system decides what method to use. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_CACHED + * Return the last full detection state for the display mask. + * safety.) + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_ECONODDC + * Ping the DDC address of the given display mask to check for + * a connected device. This is a lightweight method to check + * for a present device. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC + * A client uses this field to indicate whether to allow DDC during + * this detection or to not use it. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DEFAULT + * The system will use DDC as needed for each display. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DISABLE + * The system will not use DDC for any display. If DDC is + * disabled, this detection state will not be cached. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD + * A client uses this field to indicate whether to detect loads + * during this detection or to not use it. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DEFAULT + * The system will use load detection as needed for each display. + * NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DISABLE + * The system will not use load detection for any display. If + * load detection is disabled, this detection state will not + * be cached. + * displayMask + * This parameter specifies an NV0073_DISPLAY_MASK value describing + * the set of displays for which connectivity status is to be checked. + * If a display is present then the corresponding bit in the display + * mask is left enabled. If the display is not present then the + * corresponding bit in the display mask is disabled. Upon return this + * parameter contains the subset of displays in the mask that are + * connected. + * + * If displayMask includes bit(s) that correspond to a TV encoder, the + * result will be simply 'yes' or 'no' without any indication of which + * connector(s) are actually attached. For fine-grained TV attachment + * detection, please see NV0073_CTRL_CMD_TV_GET_ATTACHMENT_STATUS. + * retryTimeMs + * This parameter is an output to this command. In case of + * NVOS_STATUS_ERROR_RETRY return status, this parameter returns the time + * duration in milli-seconds after which client should retry this command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_RETRY + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 displayMask; + NvU32 retryTimeMs; +} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS; + +/* valid get connect state flags */ +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD 1:0 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_CACHED (0x00000001U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_METHOD_ECONODDC (0x00000002U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC 4:4 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_DDC_DISABLE (0x00000001U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD 5:5 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_LOAD_DISABLE (0x00000001U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_VBLANK 6:6 +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_VBLANK_DEFAULT (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_FLAGS_VBLANK_SAFE (0x00000001U) + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_CONFIG + * + * This command can be used to retrieve dynamic hotplug state information that + * are currently recorded by the RM. This information can be used by the client + * to determine which displays to detect after a hotplug event occurs. Or if + * the client knows that this device generates a hot plug/unplug signal on all + * connectors, then this can be used to cull displays from detection. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used while retrieving + * or changing the hotplug configuration. + * No flags are currently defined. + * hotplugEventMask + * For _GET_HOTPLUG_CONFIG, this returns which connectors the client + * has asked for notifications for, when a hotplug event is detected. + * Events can only be provided for connectors whose displayID is set + * by the system in the hotplugInterruptible field. + * hotplugPollable + * For _GET_HOTPLUG_CONFIG, this returns which connectors are pollable + * in some non-destructive fashion. + * hotplugInterruptible + * For _GET_HOTPLUG_CONFIG, this returns which connectors are capable + * of generating interrupts. + * + * This display mask specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that have seen a hotplug or hotunplug event + * sometime after the last valid EDID read. If the device never has + * a valid EDID read, then it will always be listed here. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_CONFIG (0x730123U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 hotplugEventMask; + NvU32 hotplugPollable; + NvU32 hotplugInterruptible; + NvU32 hotplugAlwaysAttached; +} NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP + * + * This command can be used to retrieve the suggested head routing map + * for the specified display mask. A head routing map describes the + * suggested crtc (or head) assignments for each display in the specified + * mask. + * + * Up to MAX_DISPLAYS displays may be specified in the display mask. Displays + * are numbered from zero beginning with the lowest bit position set in the + * mask. The corresponding head assignment for each of specified displays can + * then be found in the respective per-device field in the routing map. + * + * If a particular display cannot be successfully assigned a position in the + * head routing map then it is removed from the display mask. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayMask + * This parameter specifies the NV0073_DISPLAY_MASK value for which + * the head routing map is desired. Each enabled bit indicates + * a display device to include in the routing map. Enabled bits + * must represent supported displays as indicated by the + * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED command. If a particular + * display cannot be included in the routing map then it's corresponding + * bit in the displayMask will be disabled. A return value of 0 in + * displayMask indicates that a head routing map could not be constructed + * with the given display devices. + * oldDisplayMask + * This optional parameter specifies a prior display mask to be + * used when generating the head routing map to be returned in + * headRoutingMap. Displays set in oldDisplayMask are retained + * if possible in the new routing map. + * oldHeadRoutingMap + * This optional parameter specifies a prior head routing map to be + * used when generating the new routing map to be returned in + * headRoutingMap. Head assignments in oldHeadRoutingMap are + * retained if possible in the new routing map. + * headRoutingMap + * This parameter returns the new head routing map. This parameter + * is organized into eight distinct fields, each containing the head + * assignment for the corresponding display in display mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP (0x730125U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayMask; + NvU32 oldDisplayMask; + NvU32 oldHeadRoutingMap; + NvU32 headRoutingMap; +} NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS; + +/* maximum number of allowed displays in a routing map */ +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_MAX_DISPLAYS (8U) + +/* per-display head assignments in a routing map */ +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY0 3:0 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY1 7:4 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY2 11:8 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY3 15:12 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY4 19:16 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY5 23:20 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY6 27:24 +#define NV0073_CTRL_SYSTEM_HEAD_ROUTING_MAP_DISPLAY7 31:28 + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE + * + * This command returns the active display ID for the specified head + * on the specified subdevice. The active display may be established + * at system boot by low-level software and can then be later modified + * by an NV client using a user display class instance (see + * NV15_VIDEO_LUT_CURSOR_DAC). + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * head + * This parameter specifies the head for which the active display + * should be retrieved. This value must be between zero and the + * maximum number of heads supported by the subdevice. + * flags + * This parameter specifies optional flags to be used to while retrieving + * the active display information. + * Possible valid flags are: + * NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT + * This flag is used to limit the search for the active display to + * that established by an NV client. If this flag is not specified, + * then any active display is returned (setup at system boot by + * low-level software or later by an NV client). + * displayId + * This parameter returns the displayId of the active display. A value + * of zero indicates no display is active. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID (0x26U) + +typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS { + NvU32 subDeviceInstance; + NvU32 head; + NvU32 flags; + NvU32 displayId; +} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS; + +/* valid get active flags */ +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT 0:0 +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT_DISABLE (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_ACTIVE_FLAGS_CLIENT_ENABLE (0x00000001U) + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS + * + * This command returns the set of internal (safe) display IDs for the specified + * subdevice in the form of a 32bit display mask. Safe means the displays do + * not require copy protection as they are on the motherboard. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * internalDisplaysMask + * This parameter returns a NV0073_DISPLAY_MASK value describing the set + * of displays that are internal (safe) and which do not require copy + * protection schemes. + * availableInternalDisplaysMask + * This parameter returns a NV0073_DISPLAY_MASK value describing the set + * of displays that are internal and available for use. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS (0x73015bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS { + NvU32 subDeviceInstance; + NvU32 internalDisplaysMask; + NvU32 availableInternalDisplaysMask; +} NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS + * + * This command returns a mask of boot display IDs. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * bootDisplayMask + * This parameter returns the mask of boot display IDs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS (0x730166U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS_MESSAGE_ID (0x66U) + +typedef struct NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS { + NvU32 subDeviceInstance; + NvU32 bootDisplayMask; +} NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE + * + * This command can be used to retrieve hotplug and unplug state + * information that are currently recorded by the RM. This information is + * used by the client to determine which displays to detect after a + * hotplug event occurs. Or if the client knows that this device generates + * a hot plug/unplug signal on all connectors, then this can be used to call + * displays from detection. The displayIds on which hotplug/unplug has + * happened will be reported only ONCE to the client. That is if the call + * is done multiple times for the same event update, then for consequent + * calls the display mask will be reported as 0. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * flags + * This parameter specifies optional flags to be used while retrieving + * the hotplug state information. + * Here are the current defined fields: + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID + * A client uses this field to determine the lid state. + * Possible values are: + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID_OPEN + * The lid is open. + * NV0073_CTRL_SYSTEM_GET_HOTPLUG_STATE_FLAGS_LID_CLOSED + * The lid is closed. The client should remove devices a + * reported inside the + * NV0073_CTRL_SYSTEM_GET_CONNECT_POLICY_PARAMS.lidClosedMask. + * hotPlugMask + * This display mask specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that have seen a hotplug. + * hotUnplugMask + * This display mask specifies an NV0073_DISPLAY_MASK value describing + * the set of displays that have seen a hot unplug + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE (0x73017bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | 0x7B" */ + +typedef struct NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS { + NvU32 subDeviceInstance; + NvU32 flags; + NvU32 hotPlugMask; + NvU32 hotUnplugMask; +} NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS; + +/* valid get hoplug state flags */ +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_FLAGS_LID 0:0 +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_FLAGS_LID_OPEN (0x00000000U) +#define NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_FLAGS_LID_CLOSED (0x00000001U) + +/* + * NV0073_CTRL_CMD_SYSTEM_CLEAR_ELV_BLOCK + * + * This command instructs the RM to explicitly clear any + * ELV block. Clients should call this before attempting core-channel + * updates when in VRR one-shot mode. ELV block mode will be + * properly restored to its appropriate setting based on the stall-lock + * in Supervisor3 after the core channel update + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * The public ID of the Output Display which is to be used for VRR. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV0073_CTRL_CMD_SYSTEM_CLEAR_ELV_BLOCK (0x73017dU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS_MESSAGE_ID (0x7DU) + +typedef struct NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; +} NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR + * + * This command arms the display modeset supervisor to operate in + * a lightweight mode. By calling this, the client is implicitly + * promising not to make any changes in the next modeset that require + * the full supervisor. After SV3, the LWSV will disarm and any subsequent + * modesets will revert to full supervisors. This must be called separately + * for every display that will be part of the modeset. + * It is recommended that the client explicitly disarm the lightweight + * supervisor after every modeset as null modesets will not trigger the + * supervisor interrupts and the RM will not be able to disarm automatically + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * + * displayId + * The public ID of the Output Display which is to be used for VRR. + * + * bArmLWSV + * If this is set to NV_TRUE, the RM will arm the lightweight supervisor + * for the next modeset. + * If this is set to NV_FALSE, the RM will disarm the lightweight supervisor + * + * bVrrState + * VRR state to be changed. + * + * vActive + * GPU-SRC vertical active value + * + * vfp + * GPU-SRC vertical front porch + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR (0x73017eU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bArmLWSV; + NvBool bVrrState; + NvU32 vActive; + NvU32 vfp; +} NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS; + + + +/* +* NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS +* +* This command is used to configure pstate switch parameters on VRR monitors +* subDeviceInstance +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed.This parameter must specify a value between zero and the +* total number of subdevices within the parent device.This parameter +* should be set to zero for default behavior. +* +* displayId +* DisplayId of the monitor being vrr configured +* +* bVrrState +* When set to NV_TRUE, signifies that the vrr is about to become active. +* When set to NV_FALSE, signifies that the vrr is about to become suspended. +* +* bVrrDirty +* When set to NV_TRUE, indicates that vrr configuration has been changed +* When set to NV_FALSE, this will indicate transitions from One shot mode to +* Continuous mode and vice versa +* +* bVrrEnabled +* When set to NV_TRUE, indicates that vrr has been enabled, i.e. vBp extended by 2 lines +* +* maxVblankExtension +* When VRR is enabled, this is the maximum amount of lines that the vblank can be extended. +* Only updated when bVrrDirty = true +* +* internalVRRHeadVblankStretch +* When VRR is enabled, this is the maximum amount of lines that the vblank can be extended. +* On NVSR and DD panels . Only updated when bVrrDirty = true +* +* minVblankExtension +* When VRR is enabled, this is the minimum amount of lines that should be present in the Vblank. The purpose is to cap the maximum refresh (currently only for HDMI 2.1 VRR compliance) +*/ +#define NV0073_CTRL_CMD_SYSTEM_CONFIG_VRR_PSTATE_SWITCH (0x730184U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS_MESSAGE_ID (0x84U) + +typedef struct NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bVrrState; + NvBool bVrrDirty; + NvBool bVrrEnabled; + NvU32 maxVblankExtension; + NvU32 internalVRRHeadVblankStretch; + NvU32 minVblankExtension; +} NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX + * + * This command is used to query the display mask of all displays + * that support dynamic display MUX. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * displayMask (out) + * Mask of all displays that support dynamic display MUX + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX (0x730190U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS_MESSAGE_ID (0x90U) + +typedef struct NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS { + NvU32 subDeviceInstance; + NvU32 muxDisplayMask; +} NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS; + + + +/* + * NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH + * + * This command allocates a specified amount of ISO memory bandwidth for + * display. If the requested amount of bandwidth cannot be allocated (either + * because it exceeds the total bandwidth available to the system, or because + * too much bandwidth is already allocated to other clients), the call will + * fail and NV_ERR_INSUFFICIENT_RESOURCES will be returned. + * + * If bandwidth has already been allocated via a prior call, and a new + * allocation is requested, the new allocation will replace the old one. (If + * the new allocation fails, the old allocation remains in effect.) + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * averageBandwidthKBPS + * This parameter specifies the amount of ISO memory bandwidth requested. + * floorBandwidthKBPS + * This parameter specifies the minimum required (i.e., floor) dramclk + * frequency, multiplied by the width of the pipe over which the display + * data will travel. (It is understood that the bandwidth calculated by + * multiplying the clock frequency by the pipe width will not be + * realistically achievable, due to overhead in the memory subsystem. The + * API will not actually use the bandwidth value, except to reverse the + * calculation to get the required dramclk frequency.) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_NOT_SUPPORTED + * NV_ERR_GENERIC + */ + +#define NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH (0x730196U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS { + NvU32 subDeviceInstance; + NvU32 averageBandwidthKBPS; + NvU32 floorBandwidthKBPS; +} NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS; + +/* + * NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS + * + * This structure represents the hotplug event config control parameters. + * + * subDeviceInstance + * This parameter should specify the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * + * deviceMapFilter + * This parameter returns (in GET) or should specify (in SET) a device map + * indicating device(s) to sense. + * + * hotPluginSense + * This parameter returns (in GET) or should specify (in SET) a device map + * indicating device(s) plugged in that caused the most recent hotplug + * event. + * + * hotUnplugSense + * This parameter returns (in GET) or should specify (in SET) a device map + * indicating device(s) un plugged that caused the most recent hotplug + * event. + */ + +typedef struct NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS { + NvU32 subDeviceInstance; + NvU32 deviceMapFilter; + NvU32 hotPluginSense; + NvU32 hotUnplugSense; +} NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_EVENT_CONFIG + * + * This command fetches the hotplug event configuration. + * + * See @ref NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS for documentation on + * the parameters. + */ + +#define NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_EVENT_CONFIG (0x730197U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | 0x97" */ + +/* + * NV0073_CTRL_CMD_SYSTEM_SET_HOTPLUG_EVENT_CONFIG + * + * This command sets the hotplug event configuration. + * + * See @ref NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS for documentation on + * the parameters. + */ + +#define NV0073_CTRL_CMD_SYSTEM_SET_HOTPLUG_EVENT_CONFIG (0x730198U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | 0x98" */ + + + +/* +* NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS +* +* This command is used to read Core channel, Cursor channel, Window channel, and Head register values and encode these values with ProtoDmp. +* +* subDeviceInstance (in) +* This parameter specifies the subdevice instance within the +* NV04_DISPLAY_COMMON parent device to which the operation should be +* directed. +* headMask (in) +* Head mask representing which register values should be encoded +* windowMask (in) +* Window channel mask whose register values should be encoded +* bRecordCoreChannel (in) +* Indicates whether or not to encode core channel register values +* bRecordCursorChannel (in) +* Indicates whether or not to encode cursor channel register values +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +* NV_ERR_NOT_SUPPORTED +*/ +#define NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS (0x73019bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS_MESSAGE_ID (0x9BU) + +typedef struct NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS { + NvU32 subDeviceInstance; + NvU32 headMask; + NvU32 windowMask; + NvBool bRecordCoreChannel; + NvBool bRecordCursorChannel; +} NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS; + +/* + * NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT + * + * This command is used to query the display mux status for the given + * display device + * + * subDeviceInstance (in) + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT (0x73019cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS_MESSAGE_ID (0x9CU) + +typedef struct NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS { + NvU32 subDeviceInstance; + NvBool bIsSidebandI2cSupported; +} NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS; + +/* _ctrl0073system_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h new file mode 100644 index 000000000..238c0d611 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080.finn +// + + + + +#include "ctrl0080/ctrl0080bif.h" +#include "ctrl0080/ctrl0080gpu.h" +#include "ctrl0080/ctrl0080clk.h" +#include "ctrl0080/ctrl0080dma.h" +#include "ctrl0080/ctrl0080gr.h" +#include "ctrl0080/ctrl0080cipher.h" +#include "ctrl0080/ctrl0080fb.h" +#include "ctrl0080/ctrl0080fifo.h" +#include "ctrl0080/ctrl0080host.h" + + +#include "ctrl0080/ctrl0080perf.h" +#include "ctrl0080/ctrl0080msenc.h" +#include "ctrl0080/ctrl0080bsp.h" +#include "ctrl0080/ctrl0080rc.h" +#include "ctrl0080/ctrl0080nvjpg.h" +#include "ctrl0080/ctrl0080unix.h" +#include "ctrl0080/ctrl0080internal.h" diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h new file mode 100644 index 000000000..f79b8f988 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080base.h @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080base.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV01_DEVICE_XX/NV03_DEVICE control commands and parameters */ + +#define NV0080_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x0080, NV0080_CTRL_##cat, idx) + +/* GPU device command categories (6bits) */ +#define NV0080_CTRL_RESERVED (0x00) +#define NV0080_CTRL_BIF (0x01) +#define NV0080_CTRL_GPU (0x02) +#define NV0080_CTRL_CLK (0x10) +#define NV0080_CTRL_GR (0x11) +#define NV0080_CTRL_CIPHER (0x12) +#define NV0080_CTRL_FB (0x13) +#define NV0080_CTRL_HOST (0x14) +#define NV0080_CTRL_VIDEO (0x15) +#define NV0080_CTRL_FIFO (0x17) +#define NV0080_CTRL_DMA (0x18) +#define NV0080_CTRL_PERF (0x19) +#define NV0080_CTRL_PERF_LEGACY_NON_PRIVILEGED (0x99) /* finn: Evaluated from "(NV0080_CTRL_PERF | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV0080_CTRL_MSENC (0x1B) +#define NV0080_CTRL_BSP (0x1C) +#define NV0080_CTRL_RC (0x1D) +#define NV0080_CTRL_OS_UNIX (0x1E) +#define NV0080_CTRL_NVJPG (0x1F) +#define NV0080_CTRL_INTERNAL (0x20) +#define NV0080_CTRL_NVLINK (0x21) + +/* + * NV0080_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0080_CTRL_CMD_NULL (0x800000) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl0080base_h_ */ + +/* extract device cap setting from specified category-specific caps table */ +#define NV0080_CTRL_GET_CAP(cat,tbl,c) \ + NV0080_CTRL_##cat##_GET_CAP(tbl, NV0080_CTRL_##cat##_CAPS_##c) diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h new file mode 100644 index 000000000..a1a80229b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bif.h @@ -0,0 +1,121 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080bif.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* + * NV0080_CTRL_CMD_BIF_RESET + * + * This command initiates the specified reset type on the GPU. + * + * flags + * Specifies various arguments to the reset operation. + * + * Supported fields include: + * + * NV0080_CTRL_BIF_RESET_FLAGS_TYPE + * When set to _SW_RESET, a SW (fullchip) reset is performed. When set + * to _SBR, a secondary-bus reset is performed. When set to + * _FUNDAMENTAL, a fundamental reset is performed. + * + * NOTE: _FUNDAMENTAL is not yet supported. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_BIF_RESET (0x800102) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_BIF_RESET_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BIF_RESET_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_BIF_RESET_PARAMS { + NvU32 flags; +} NV0080_CTRL_BIF_RESET_PARAMS; + +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE 2:0 +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_SW_RESET (0x00000001) +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_SBR (0x00000002) +#define NV0080_CTRL_BIF_RESET_FLAGS_TYPE_FUNDAMENTAL (0x00000003) + +/* + * NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR + * + * baseDmaSysmemAddr + * This parameter represents the base DMA address for sysmem which will be + * added to all DMA accesses issued by GPU. Currently GPUs do not support 64-bit physical address, + * hence if sysmem is greater than max GPU supported physical address width, this address + * will be non-zero + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_PARENT + */ + +#define NV0080_CTRL_CMD_BIF_GET_DMA_BASE_SYSMEM_ADDR (0x800103) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS { + NV_DECLARE_ALIGNED(NvU64 baseDmaSysmemAddr, 8); +} NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS; + +/* + * NV0080_CTRL_BIF_SET_ASPM_FEATURE + * + * aspmFeatureSupported + * ASPM feature override by client + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0080_CTRL_CMD_BIF_SET_ASPM_FEATURE (0x800104) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BIF_INTERFACE_ID << 8) | NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS { + NvU32 aspmFeatureSupported; +} NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS; + +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L0S 0:0 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L0S_ENABLED 0x000000001 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L0S_DISABLED 0x000000000 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L1 1:1 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L1_ENABLED 0x000000001 +#define NV0080_CTRL_BIF_ASPM_FEATURE_DT_L1_DISABLED 0x000000000 + +/* _ctrl0080bif_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h new file mode 100644 index 000000000..c0164684c --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080bsp.h @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080bsp.finn +// + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE bit stream processor control commands and parameters */ + +/* + * NV0080_CTRL_CMD_BSP_GET_CAPS + * + * This command returns the set of BSP capabilities for the device + * in the form of an array of unsigned bytes. BSP capabilities + * include supported features and required workarounds for the decoder + * within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_BSP_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the BSP caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * instanceId + * This parameter specifies the instance Id of NVDEC for which + * cap bits are requested. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_BSP_GET_CAPS (0x801c01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BSP_INTERFACE_ID << 8) | NV0080_CTRL_BSP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_BSP_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_BSP_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); + NvU32 instanceId; +} NV0080_CTRL_BSP_GET_CAPS_PARAMS; + + + +/* + * Size in bytes of bsp caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV0080_CTRL_BSP_CAPS_TBL_SIZE 8 + +/* + * NV0080_CTRL_CMD_BSP_GET_CAPS_V2 + * + * This command returns the set of BSP capabilities for the device + * in the form of an array of unsigned bytes. BSP capabilities + * include supported features and required workarounds for the decoder + * within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * (The V2 version flattens the capsTbl array pointer). + * + * capsTbl + * This parameter is an array of unsigned bytes where the BSP caps bits + * will be transferred by the RM. + * instanceId + * This parameter specifies the instance Id of NVDEC for which + * cap bits are requested. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_BSP_GET_CAPS_V2 (0x801c02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_BSP_INTERFACE_ID << 8) | NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2_MESSAGE_ID" */ + +#define NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2 { + NvU8 capsTbl[NV0080_CTRL_BSP_CAPS_TBL_SIZE]; + NvU32 instanceId; +} NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2; + +/* _ctrl0080bsp_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h new file mode 100644 index 000000000..279151bf3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080cipher.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080cipher.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h new file mode 100644 index 000000000..de61930d5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080clk.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080clk.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h new file mode 100644 index 000000000..c92b35798 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080dma.h @@ -0,0 +1,911 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080dma.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE dma control commands and parameters */ + +/* + * NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK + * + * This parameter returns the parameters specific to a PTE as follows: + * pageSize + * GET: This parameter returns the page size of the PTE information + * being returned. If 0, then this pteBlock[] array entry is + * invalid or not used. (pteBlock[0] is always used.) + * SET: This parameter specifies the page size of the PTE information + * to be set. If 0, then this pteBlock[] array entry is invalid + * or not used. (pteBlock[0] is always used.) + * pteEntrySize + * GET: This parameter returns the size of the PTE in bytes for this GPU. + * SET: N/A + * comptagLine + * GET: This parameter returns the comptagline field of the corresponding PTE. + * SET: This parameter sets the comptagline field of the corresponding PTE. + * Incorrect values may lead to dire consequences. + * kind + * GET: This parameter returns the kind field of the corresponding PTE. + * SET: This parameter sets the kind field of the corresponding PTE. + * Incorrect values may lead to undesirable consequences. + * pteFlags + * This parameter returns various fields from the PTE, these are: + * FLAGS_VALID: + * GET: This flag returns the valid bit of the PTE. + * SET: This flag sets the valid bit of the PTE. + * FLAGS_ENCRYPTED: + * GET: This flag returns the encrypted bit of the PTE. Not all GPUs + * support encryption. If not supported, this flag will be set to + * NOT_SUPPORTED. + * SET: This flag sets the encrypted bit of the PTE. + * FLAGS_APERTURE: + * GET: This flag returns the aperture field of the PTE. See + * NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_FLAGS_APERTURE_* for values. + * SET: This flag sets the aperture field of the PTE. See + * NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_FLAGS_APERTURE_* for values. + * FLAGS_COMPTAGS: + * GET: This flag returns the comptags field of the PTE. (Not used on Fermi) + * SET: N/A + * FLAGS_GPU_CACHED: + * GET: This flag returns the GPU cacheable bit of the PTE. GPU caching of + * sysmem was added in iGT21a and Fermi. If not supported, this flag + * will be set to NOT_SUPPORTED. + * SET: N/A for specific chips, e.g., GF100 + * FLAGS_SHADER_ACCESS: + * GET: This flag returns the shader access control of the PTE. This feature + * was introduced in Kepler. If not supported, this flag will be set to + * NOT_SUPPORTED. + * SET: N/A + */ + +typedef struct NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK { + NvU32 pageSize; + NV_DECLARE_ALIGNED(NvU64 pteEntrySize, 8); + NvU32 comptagLine; + NvU32 kind; + NvU32 pteFlags; +} NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK; + +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_VALID 0:0 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_VALID_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_VALID_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED 2:1 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ENCRYPTED_NOT_SUPPORTED (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE 6:3 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_VIDEO_MEMORY (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_PEER_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_SYSTEM_COHERENT_MEMORY (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_APERTURE_SYSTEM_NON_COHERENT_MEMORY (0x00000003U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS 10:7 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_NONE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_1 (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_2 (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS_4 (0x00000004U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED 12:11 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_GPU_CACHED_NOT_SUPPORTED (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS 14:13 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_READ_WRITE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_READ_ONLY (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_WRITE_ONLY (0x00000002U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_SHADER_ACCESS_NOT_SUPPORTED (0x00000003U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_READ_ONLY 15:15 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_READ_ONLY_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_READ_ONLY_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ATOMIC 16:16 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ATOMIC_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ATOMIC_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ACCESS_COUNTING 17:17 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ACCESS_COUNTING_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_ACCESS_COUNTING_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_PRIVILEGED 18:18 +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_PRIVILEGED_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_PRIVILEGED_TRUE (0x00000001U) + +/* + * NV0080_CTRL_DMA_GET_PTE_INFO + * + * This command queries PTE information for the specified GPU virtual address. + * + * gpuAddr + * This parameter specifies the GPU virtual address for which PTE + * information is to be returned. + * skipVASpaceInit + * This parameter specifies(true/false) whether the VA Space + * initialization should be skipped in this ctrl call. + * pteBlocks + * This parameter returns the page size-specific attributes of a PTE. + * Please see NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * NV_ERR_GENERIC + */ + +#define NV0080_CTRL_CMD_DMA_GET_PTE_INFO (0x801801U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_GET_PTE_INFO_PTE_BLOCKS 4U + +#define NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NvU32 subDeviceId; + NvU8 skipVASpaceInit; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK pteBlocks[NV0080_CTRL_DMA_GET_PTE_INFO_PTE_BLOCKS], 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS; + +/* + * NV0080_CTRL_DMA_SET_PTE_INFO + * + * This command sets PTE information for the specified GPU virtual address. + * Usage of parameter and field definitions is identical to that of + * NV0080_CTRL_DMA_GET_PTE_INFO, with the following exception: + * + * - pteFlags field NV0080_CTRL_DMA_PTE_INFO_PARAMS_FLAGS_COMPTAGS is ignored, + * as this setting is specified via the kind specification. + * - pteEntrySize is ignored, as this setting is read-only in the GET case. + * - hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * + */ + +#define NV0080_CTRL_CMD_DMA_SET_PTE_INFO (0x80180aU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_PTE_INFO_PTE_BLOCKS 4U + +#define NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NvU32 subDeviceId; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK pteBlocks[NV0080_CTRL_DMA_SET_PTE_INFO_PTE_BLOCKS], 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS; + + +#define NV0080_CTRL_CMD_DMA_FILL_PTE_MEM (0x801802U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS { + NvU32 pageCount; + struct { + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU32 subDeviceId; + } hwResource; + struct { + NvU32 fbKind; + NvU32 sysKind; + NvU32 compTagStartOffset; + } comprInfo; + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NV_DECLARE_ALIGNED(NvP64 pageArray, 8); + NV_DECLARE_ALIGNED(NvP64 pteMem, 8); + NvU32 pteMemPfn; + NvU32 pageSize; + NvU32 startPageIndex; + NvU32 flags; + NvHandle hSrcVASpace; + NvHandle hTgtVASpace; + NvU32 peerId; +} NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS; + + + +/* + * NV0080_CTRL_DMA_FLUSH + * + * This command flushes the specified target unit + * + * targetUnit + * The unit to flush, either L2 cache or compression tag cache. + * This field is a logical OR of the individual fields such as + * L2 cache or compression tag cache. Also L2 invalidation for + * either SYSMEM/PEERMEM is triggered. But this invalidation is + * for FERMI. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * + * See Also: + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE + * Flushes the entire GPU cache or a set of physical addresses (if the + * hardware supports it). Use this call if you want to flush a set of + * addresses or the entire GPU cache in unicast mode. + * NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE + * Flushes memory associated with a single allocation if the hardware + * supports it. Use this call if you want to flush a single allocation and + * you have a memory object describing the physical memory. + */ +#define NV0080_CTRL_CMD_DMA_FLUSH (0x801805U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_FLUSH_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_FLUSH_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0080_CTRL_DMA_FLUSH_PARAMS { + NvU32 targetUnit; +} NV0080_CTRL_DMA_FLUSH_PARAMS; + +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2 0:0 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_COMPTAG 1:1 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_COMPTAG_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_COMPTAG_ENABLE (0x00000001U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_FB 2:2 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_FB_DISABLE (0x00000000U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_FB_ENABLE (0x00000001U) + +// This is exclusively for Fermi +// The selection of non-zero valued bit-fields avoids the routing +// into the above cases and vice-versa +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_INVALIDATE 4:3 +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_INVALIDATE_SYSMEM (0x00000001U) +#define NV0080_CTRL_DMA_FLUSH_TARGET_UNIT_L2_INVALIDATE_PEERMEM (0x00000002U) + + +/** + * NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS + * + * This command returns information about the VA caps on the GPU + * + * vaBitCount + * Returns number of bits in a virtual address + * pdeCoverageBitCount + * Returns number of VA bits covered in each PDE. One PDE covers + * 2^pdeCoverageBitCount bytes. + * + * bigPageSize + * Size of the big page + * compressionPageSize + * Size of region each compression tag covers + * dualPageTableSupported + * TRUE if one page table can map with both 4KB and big pages + * + * numPageTableFormats + * Returns the number of different page table sizes supported by the RM + * pageTableBigFormat + * pageTable4KFormat[] + * Returns size in bytes and number of VA bits covered by each page table + * format. Up to MAX_NUM_PAGE_TABLE_FORMATS can be returned. The most + * compact format will be pageTableSize[0] and the least compact format + * will be last. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * vaRangeLo + * Indicates the start of usable VA range. + * + * hugePageSize + * Size of the huge page if supported, 0 otherwise. + * + * vaSpaceId + * Virtual Address Space id assigned by RM. + * Only relevant on AMODEL. + * + * pageSize512MB + * Size of the 512MB page if supported, 0 otherwise. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV0080_CTRL_CMD_DMA_ADV_SCHED_GET_VA_CAPS (0x801806U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT { + NvU32 pageTableSize; + NvU32 pageTableCoverage; +} NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT; + +#define NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_MAX_NUM_PAGE_TABLE_FORMATS (16U) +#define NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS { + NvU32 vaBitCount; + NvU32 pdeCoverageBitCount; + NvU32 num4KPageTableFormats; + NvU32 bigPageSize; + NvU32 compressionPageSize; + NvU32 dualPageTableSupported; + NvU32 idealVRAMPageSize; + NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT pageTableBigFormat; + NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PAGE_TABLE_FORMAT pageTable4KFormat[NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_MAX_NUM_PAGE_TABLE_FORMATS]; + NvHandle hVASpace; + NV_DECLARE_ALIGNED(NvU64 vaRangeLo, 8); + NvU32 hugePageSize; + NvU32 vaSpaceId; + NvU32 pageSize512MB; +} NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS; + +/* + * Adding a version define to allow clients to access valid + * parameters based on version. + */ +#define NV0080_CTRL_CMD_DMA_ADV_SCHED_GET_VA_CAPS_WITH_VA_RANGE_LO 0x1U + +/* + * NV0080_CTRL_DMA_GET_PDE_INFO + * + * This command queries PDE information for the specified GPU virtual address. + * + * gpuAddr + * This parameter specifies the GPU virtual address for which PDE + * information is to be returned. + * pdeVirtAddr + * This parameter returns the GPU virtual address of the PDE. + * pdeEntrySize + * This parameter returns the size of the PDE in bytes for this GPU. + * pdeAddrSpace + * This parameter returns the GPU address space of the PDE. + * pdeSize + * This parameter returns the fractional size of the page table(s) as + * actually set in the PDE, FULL, 1/2, 1/4 or 1/8. (This amount may + * differ from that derived from pdeVASpaceSize.) Intended for VERIF only. + * pteBlocks + * This parameter returns the page size-specific parameters as follows: + * ptePhysAddr + * This parameter returns the GPU physical address of the page table. + * pteCacheAttrib + * This parameter returns the caching attribute of the + * GPU physical address of the page table. + * pteEntrySize + * This parameter returns the size of the PTE in bytes for this GPU. + * pageSize + * This parameter returns the page size of the page table. + * If pageSize == 0, then this PTE block is not valid. + * pteAddrSpace + * This parameter returns the GPU address space of the page table. + * pdeVASpaceSize + * This parameter returns the size of the VA space addressable by + * the page table if fully used (i.e., if all PTEs marked VALID). + * pdbAddr + * This parameter returns the PDB address for the PDE. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV0080_CTRL_CMD_DMA_GET_PDE_INFO (0x801809U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCK { + NV_DECLARE_ALIGNED(NvU64 ptePhysAddr, 8); + NvU32 pteCacheAttrib; + NvU32 pteEntrySize; + NvU32 pageSize; + NvU32 pteAddrSpace; + NvU32 pdeVASpaceSize; + NvU32 pdeFlags; +} NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCK; + +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_VIDEO_MEMORY (0x00000000U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_SYSTEM_COHERENT_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_SYSTEM_NON_COHERENT_MEMORY (0x00000002U) + +#define NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCKS 4U + +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 gpuAddr, 8); + NV_DECLARE_ALIGNED(NvU64 pdeVirtAddr, 8); + NvU32 pdeEntrySize; + NvU32 pdeAddrSpace; + NvU32 pdeSize; + NvU32 subDeviceId; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCK pteBlocks[NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCKS], 8); + NV_DECLARE_ALIGNED(NvU64 pdbAddr, 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS; + +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_VIDEO_MEMORY (0x00000000U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_SYSTEM_COHERENT_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_SYSTEM_NON_COHERENT_MEMORY (0x00000002U) +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_FULL 1U +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_HALF 2U +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_QUARTER 3U +#define NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_SIZE_EIGHTH 4U + +/* + * NV0080_CTRL_CMD_DMA_INVALIDATE_PDB_TARGET + * + * This command invalidates PDB target setting in hardware. + * After execeution of this command PDB target would be in undefined state. + * + * Returns error if the PDB target can not be invalidate. + * + * This call is only supported on chips fermi and later chips. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0080_CTRL_CMD_DMA_INVALIDATE_PDB_TARGET (0x80180bU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | 0xB" */ + +/* + * NV0080_CTRL_CMD_DMA_INVALIDATE_TLB + * + * This command invalidates the GPU TLB. This is intended to be used + * for RM clients that manage their own TLB consistency when updating + * page tables on their own, or with DEFER_TLB_INVALIDATION options + * to other RM APIs. + * + * hVASpace + * This parameter specifies the VASpace object whose MMU TLB entries + * needs to be invalidated, if the flag is set to NV0080_CTRL_DMA_INVALIDATE_TLB_ALL_FALSE. + * Specifying a GMMU VASpace object handle will invalidate the GMMU TLB for the particular VASpace. + * Specifying a SMMU VASpace object handle will flush the entire SMMU TLB & PTC. + * + * flags + * This parameter can be used to specify any flags needed + * for the invlalidation request. + * NV0080_CTRL_DMA_INVALIDATE_TLB_ALL + * When set to TRUE this flag requests a global invalidate. + * When set to FALSE this flag requests a chip-specfic + * optimization to invalidate only the address space bound + * to the associated hDevice. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_TIMEOUT_RETRY + * NV_ERR_NOT_SUPPORTED + */ + +#define NV0080_CTRL_CMD_DMA_INVALIDATE_TLB (0x80180cU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS { + NvHandle hVASpace; + NvU32 flags; +} NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS; + +#define NV0080_CTRL_DMA_INVALIDATE_TLB_ALL 0:0 +#define NV0080_CTRL_DMA_INVALIDATE_TLB_ALL_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_INVALIDATE_TLB_ALL_TRUE (0x00000001U) + +/** + * NV0080_CTRL_CMD_DMA_GET_CAPS + * + * This command returns the set of DMA capabilities for the device + * in the form of an array of unsigned bytes. DMA capabilities + * include supported features and required workarounds for address + * translation system within the device, each represented by a byte + * offset into the table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_DMA_CAPS_TBL_SIZE. + * + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * 32BIT_POINTER_ENFORCED + * If this property is TRUE NVOS32 and NVOS46 calls with + * 32BIT_POINTER_DISABLED will return addresses above 4GB. + * + * SHADER_ACCESS_SUPPORTED + * If this property is set, the MMU in the system supports the independent + * access bits for the shader. This is accessed with the following fields: + * NVOS46_FLAGS_SHADER_ACCESS + * NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS_FLAGS_SHADER_ACCESS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_DMA_GET_CAPS (0x80180dU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_GET_CAPS_PARAMS_MESSAGE_ID" */ +/* size in bytes of fb caps table */ +#define NV0080_CTRL_DMA_CAPS_TBL_SIZE 8U +#define NV0080_CTRL_DMA_GET_CAPS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV0080_CTRL_DMA_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NvU8 capsTbl[NV0080_CTRL_DMA_CAPS_TBL_SIZE]; +} NV0080_CTRL_DMA_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_DMA_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_DMA_CAPS_32BIT_POINTER_ENFORCED 0:0x01 +#define NV0080_CTRL_DMA_CAPS_SHADER_ACCESS_SUPPORTED 0:0x04 +#define NV0080_CTRL_DMA_CAPS_SPARSE_VIRTUAL_SUPPORTED 0:0x08 +#define NV0080_CTRL_DMA_CAPS_MULTIPLE_VA_SPACES_SUPPORTED 0:0x10 + +/* + * NV0080_CTRL_DMA_SET_VA_SPACE_SIZE + * + * Change the size of an existing VA space. + * NOTE: Currently this only supports growing the size, not shrinking. + * + * 1. Allocate new page directory able to map extended range. + * 2. Copy existing PDEs from old directory to new directory. + * 3. Initialize new PDEs to invalid. + * 4. Update instmem to point to new page directory. + * 5. Free old page directory. + * + * vaSpaceSize + * On input, the requested size of the VA space in bytes. + * On output, the actual resulting VA space size. + * + * The actual size will be greater than or equal to the requested size, + * unless NV0080_CTRL_DMA_GROW_VA_SPACE_SIZE_MAX is requested, which + * requests the maximum available. + * + * NOTE: Specific size requests (e.g. other than SIZE_MAX) must account + * for the VA hole at the beginning of the range which is used to + * distinguish NULL pointers. This region is not counted as part + * of the vaSpaceSize since it is not allocatable. + * + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV0080_CTRL_CMD_DMA_SET_VA_SPACE_SIZE (0x80180eU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8); + NvHandle hVASpace; +} NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS; + +#define NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_MAX (0xFFFFFFFFFFFFFFFFULL) + +/* + * NV0080_CTRL_DMA_UPDATE_PDE_2 + * + * This command updates a single PDE for the given (hClient, hDevice) + * with specific attributes. + * This command is only available on Windows and MODS platforms. + * This command can be called by kernel clients only. + * + * The VA range the PDE maps must be contained by a VA allocation marked with + * NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED. + * However if the MODS-only FORCE_OVERRIDE flag is set this restriction is relaxed. + * + * RM does not track the PDE's attributes in SW - this control simply stuffs + * the PDE in memory after translating and checking the parameters. + * + * Parameters are checked for relative consistency (e.g. valid domains), + * but it is the client's responsibility to provide correct page table + * addresses, e.g. global consistency is not checked. + * + * It is also the client's responsibility to flush/invalidate the MMU + * when appropriate, either by setting the _FLUSH_PDE_CACHE flag for this + * call or by flushing through other APIs. + * This control does not flush automatically to allow batches of calls + * to be made before a single flush. + * + * ptParams + * Page-size-specific parameters, as follows: + * + * physAddr + * Base address of physically contiguous memory of page table. + * Must be aligned sufficiently for the PDE address field. + * numEntries + * Deprecated and ignored. + * Use FLAGS_PDE_SIZE that applies to the tables for all page sizes. + * aperture + * Address space the base address applies to. + * Can be left as INVALID to ignore this page table size. + * + * pdeIndex + * The PDE index this update applies to. + * flags + * See NV0080_CTRL_DMA_UPDATE_PDE_FLAGS_*. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + * pPdeBuffer [out] + * Kernel pointer to 64 bit unsigned integer representing a Page Dir Entry + * that needs to be updated. It should point to memory as wide as the Page Dir + * Entry. + * + * If NULL, Page Dir Entry updates will go to the internally managed Page Dir. + * If not NULL, the updates will be written to this buffer. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_DMA_UPDATE_PDE_2 (0x80180fU) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); + NvU32 numEntries; // deprecated + NvU32 aperture; +} NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS; + +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_INVALID (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_VIDEO_MEMORY (0x00000001U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_SYSTEM_COHERENT_MEMORY (0x00000002U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_SYSTEM_NON_COHERENT_MEMORY (0x00000003U) + +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX_SMALL 0U +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX_BIG 1U +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE 2U + +#define NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS { + NvU32 pdeIndex; + NvU32 flags; + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS ptParams[NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE], 8); + NvHandle hVASpace; + NV_DECLARE_ALIGNED(NvP64 pPdeBuffer, 8); // NV_MMU_VER2_PDE__SIZE + NvU32 subDeviceId; // ID+1, 0 for BC +} NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS; + +/*! + * If set a PDE cache flush (MMU invalidate) will be performed. + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FLUSH_PDE_CACHE 0:0 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FLUSH_PDE_CACHE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FLUSH_PDE_CACHE_TRUE (0x00000001U) + +/*! + * For verification purposes (MODS-only) this flag may be set to modify any PDE + * in the VA space (RM managed or externally managed). + * It is up to caller to restore any changes properly (or to expect faults). + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FORCE_OVERRIDE 1:1 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FORCE_OVERRIDE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_FORCE_OVERRIDE_TRUE (0x00000001U) + +/*! + * Directly controls the PDE_SIZE field (size of the page tables pointed to by this PDE). + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE 3:2 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_FULL (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_HALF (0x00000001U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_QUARTER (0x00000002U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_PDE_SIZE_EIGHTH (0x00000003U) + +/*! + * Used to specify if the allocation is sparse. Applicable only in case of + * VA Space managed by OS, as in WDDM2.0 + */ +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_SPARSE 4:4 +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_SPARSE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_UPDATE_PDE_2_FLAGS_SPARSE_TRUE (0x00000001U) + +/* + * NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE + * This interface will create a corresponding privileged + * kernel address space that will mirror user space allocations in this + * VASPACE. + * The user can either pass a FERMI_VASPACE_A handle or RM will use the + * vaspace associated with the client/device if hVaspace is passed as + * NULL. + * Once this property is set, the user will not be able to make allocations + * from the top most PDE of this address space. + * + * The user is expected to call this function as soon as he has created + * the device/Vaspace object. If the user has already made VA allocations + * in this vaspace then this call will return a failure + * (NV_ERR_INVALID_STATE). + * The Vaspace should have no VA allocations when this call is made. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE +*/ +#define NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE (0x801810U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS { + NvHandle hVASpace; +} NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS; + +/* + * NV0080_CTRL_DMA_SET_DEFAULT_VASPACE + * This is a special control call provided for KMD to use. + * It will associate an allocated Address Space Object as the + * default address space of the device. + * + * This is added so that the USER can move to using address space objects when they + * want to specify the size of the big page size they want to use but still want + * to use the rest of the relevant RM apis without specifying the hVASpace. + * + * This call will succeed only if there is already no VASPACE associated with the + * device. This means the user will have to call this before he has made any allocations + * on this device/address space. + * + * The hVASpace that is passed in to be associated shoould belong to the parent device that + * this call is made for. This call will fail if we try to associate a VASpace belonging to + * some other client/device. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * + */ +#define NV0080_CTRL_DMA_SET_DEFAULT_VASPACE (0x801812U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS { + NvHandle hVASpace; +} NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS; + +/*! + * NV0080_CTRL_DMA_SET_PAGE_DIRECTORY + * + * Move an existing VA space to an externally-managed top-level page directory. + * The VA space must have been created in SHARED_MANAGEMENT mode. + * For lifecycle details, see NV_VASPACE_ALLOCATION_PARAMETERS documentation in nvos.h. + * + * RM will propagate the update to all channels using the VA space. + * + * NOTE: All channels using this VA space are expected to be idle and unscheduled prior + * to and during this control call - it is responsibility of caller to ensure this. + * + * physAddress + * Physical address of the new page directory within the aperture specified by flags. + * numEntries + * Number of entries in the new page directory. + * The backing phyical memory must be at least this size (multiplied by entry size). + * flags + * APERTURE + * Specifies which physical aperture the page directory resides. + * PRESERVE_PDES + * Deprecated - RM will always copy the RM-managed PDEs from the old page directory + * to the new page directory. + * ALL_CHANNELS + * If true, RM will update the instance blocks for all channels using + * the VAS and ignore the chId parameter. + * EXTEND_VASPACE + * If true, RM will use the client VA for client VA requests in VASPACE_SHARED_MANAGEMENT mode + * If false, RM will use the internal VA for client VA requests. + * IGNORE_CHANNEL_BUSY + * If true, RM will ignore the channel busy status during set page + * directory operation. + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + * chId + * ID of the Channel to be updated. + * pasid + * PASID (Process Address Space IDentifier) of the process corresponding to + * the VA space. Ignored unless the VA space has ATS enabled. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_LIMIT + * NV_ERR_GENERIC + */ +#define NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (0x801813U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS { + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + NvU32 numEntries; + NvU32 flags; + NvHandle hVASpace; + NvU32 chId; + NvU32 subDeviceId; // ID+1, 0 for BC + NvU32 pasid; +} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS; + +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE 1:0 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_VIDMEM (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_COH (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_NONCOH (0x00000002U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES 2:2 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_PRESERVE_PDES_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS 3:3 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_ALL_CHANNELS_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY 4:4 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_IGNORE_CHANNEL_BUSY_TRUE (0x00000001U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE 5:5 +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_FALSE (0x00000000U) +#define NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_EXTEND_VASPACE_TRUE (0x00000001U) + +/*! + * NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY + * + * Restore an existing VA space to an RM-managed top-level page directory. + * The VA space must have been created in SHARED_MANAGEMENT mode and + * previously relocated to an externally-managed page directory with + * NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY (these two API are symmetric operations). + * For lifecycle details, see NV_VASPACE_ALLOCATION_PARAMETERS documentation in nvos.h. + * + * RM will propagate the update to all channels using the VA space. + * + * NOTE: All channels using this VA space are expected to be idle and unscheduled prior + * to and during this control call - it is responsibility of caller to ensure this. + * + * hVASpace + * handle for the allocated VA space that this control call should operate + * on. If it's 0, it assumes to use the implicit allocated VA space + * associated with the client/device pair. + */ +#define NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY (0x801814U) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_DMA_INTERFACE_ID << 8) | NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS { + NvHandle hVASpace; + NvU32 subDeviceId; // ID+1, 0 for BC +} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS; + + + +/* _ctrl0080dma_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h new file mode 100644 index 000000000..21c2a1002 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fb.h @@ -0,0 +1,235 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080fb.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE fb control commands and parameters */ + +/** + * NV0080_CTRL_CMD_FB_GET_CAPS + * + * This command returns the set of framebuffer capabilities for the device + * in the form of an array of unsigned bytes. Framebuffer capabilities + * include supported features and required workarounds for the framebuffer + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_FB_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + */ +#define NV0080_CTRL_CMD_FB_GET_CAPS (0x801301) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FB_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_FB_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_FB_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_FB_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_FB_CAPS_SUPPORT_RENDER_TO_SYSMEM 0:0x01 +#define NV0080_CTRL_FB_CAPS_BLOCKLINEAR 0:0x02 +#define NV0080_CTRL_FB_CAPS_SUPPORT_SCANOUT_FROM_SYSMEM 0:0x04 +#define NV0080_CTRL_FB_CAPS_SUPPORT_CACHED_SYSMEM 0:0x08 +#define NV0080_CTRL_FB_CAPS_SUPPORT_C24_COMPRESSION 0:0x10 // Deprecated +#define NV0080_CTRL_FB_CAPS_SUPPORT_SYSMEM_COMPRESSION 0:0x20 +#define NV0080_CTRL_FB_CAPS_NISO_CFG0_BUG_534680 0:0x40 // Deprecated +#define NV0080_CTRL_FB_CAPS_ISO_FETCH_ALIGN_BUG_561630 0:0x80 // Deprecated + +#define NV0080_CTRL_FB_CAPS_BLOCKLINEAR_GOBS_512 1:0x01 +#define NV0080_CTRL_FB_CAPS_L2_TAG_BUG_632241 1:0x02 +#define NV0080_CTRL_FB_CAPS_SINGLE_FB_UNIT 1:0x04 // Deprecated +#define NV0080_CTRL_FB_CAPS_CE_RMW_DISABLE_BUG_897745 1:0x08 // Deprecated +#define NV0080_CTRL_FB_CAPS_OS_OWNS_HEAP_NEED_ECC_SCRUB 1:0x10 +#define NV0080_CTRL_FB_CAPS_ASYNC_CE_L2_BYPASS_SET 1:0x20 // Deprecated +#define NV0080_CTRL_FB_CAPS_DISABLE_TILED_CACHING_INVALIDATES_WITH_ECC_BUG_1521641 1:0x40 + +#define NV0080_CTRL_FB_CAPS_DISABLE_MSCG_WITH_VR_BUG_1681803 2:0x01 +#define NV0080_CTRL_FB_CAPS_VIDMEM_ALLOCS_ARE_CLEARED 2:0x02 +#define NV0080_CTRL_FB_CAPS_DISABLE_PLC_GLOBALLY 2:0x04 +#define NV0080_CTRL_FB_CAPS_PLC_BUG_3046774 2:0x08 + + +/* size in bytes of fb caps table */ +#define NV0080_CTRL_FB_CAPS_TBL_SIZE 3 + + + +/*! + * NV0080_CTRL_CMD_FB_COMPBIT_STORE_GET_INFO + * + * This command returns compbit backing store-related information. + * + * size + * [out] Size of compbit store, in bytes + * address + * [out] Address of compbit store + * addressSpace + * [out] Address space of compbit store (corresponds to type NV_ADDRESS_SPACE in nvrm.h) + * maxCompbitLine + * [out] Maximum compbitline possible, determined based on size + * comptagsPerCacheLine + * [out] Number of compression tags per compression cache line, across all + * L2 slices. + * cacheLineSize + * [out] Size of compression cache line, across all L2 slices. (bytes) + * cacheLineSizePerSlice + * [out] Size of the compression cache line per slice (bytes) + * cacheLineFetchAlignment + * [out] Alignment used while fetching the compression cacheline range in FB. + * If start offset of compcacheline in FB is S and end offset is E, then + * the range to fetch to ensure entire compcacheline data is extracted is: + * (align_down(S) , align_up(E)) + * This is needed in GM20X+ because of interleaving of data in Linear FB space. + * Example - In GM204 every other 1K FB chunk of data is offset by 16K. + * backingStoreBase + * [out] Address of start of Backing Store in linear FB Physical Addr space. + * This is the actual offset in FB which HW starts using as the Backing Store and + * in general will be different from the start of the region that driver allocates + * as the backing store. This address is expected to be 2K aligned. + * gobsPerComptagPerSlice + * [out] (Only on Pascal) Number of GOBS(512 bytes of surface PA) that correspond to one 64KB comptgaline, per slice. + * One GOB stores 1 byte of compression bits. + * 0 value means this field is not applicable for the current architecture. + * backingStoreCbcBase + * [out] 2KB aligned base address of CBC (post divide address) + * comptaglineAllocationPolicy + * [out] Policy used to allocate comptagline from CBC for the device + * privRegionStartOffset + * [out] Starting offset for any priv region allocated by clients. only used by MODS + * Possible status values returned are: + * NV_OK + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO (0x801306) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 Size, 8); + NV_DECLARE_ALIGNED(NvU64 Address, 8); + NvU32 AddressSpace; + NvU32 MaxCompbitLine; + NvU32 comptagsPerCacheLine; + NvU32 cacheLineSize; + NvU32 cacheLineSizePerSlice; + NvU32 cacheLineFetchAlignment; + NV_DECLARE_ALIGNED(NvU64 backingStoreBase, 8); + NvU32 gobsPerComptagPerSlice; + NvU32 backingStoreCbcBase; + NvU32 comptaglineAllocationPolicy; + NV_DECLARE_ALIGNED(NvU64 privRegionStartOffset, 8); +} NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS; + +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_ADDRESS_SPACE_UNKNOWN 0 // ADDR_UNKNOWN +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_ADDRESS_SPACE_SYSMEM 1 // ADDR_SYSMEM +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_ADDRESS_SPACE_FBMEM 2 // ADDR_FBMEM + +// Policy used to allocate comptaglines +/** + * Legacy mode allocates a comptagline for 64kb page. This mode will always allocate + * contiguous comptaglines from a ctag heap. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_LEGACY 0 +/** + * 1TO1 mode allocates a comptagline for 64kb page. This mode will calculate + * comptagline offset based on physical address. This mode will allocate + * contiguous comptaglines if the surface is contiguous and non-contiguous + * comptaglines for non-contiguous surfaces. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_1TO1 1 +/** + * 1TO4_Heap mode allocates a comptagline for 256kb page granularity. This mode + * will allocate comptagline from a heap. This mode will align the surface allocations + * to 256kb before allocating comptaglines. The comptaglines allocated will always be + * contiguous here. + * TODO: For GA10x, this mode will support < 256kb surface allocations, by sharing + * a comptagline with at most 3 different 64Kb allocations. This will result in + * miixed-contiguity config where comptaglines will be allocated contiguously as well + * as non-contiguous when shared with other allocations. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_1TO4 2 +/** + * Rawmode will transfer allocation of comptaglines to HW, where HW manages + * comptaglines based on physical offset. The comptaglines are cleared when SW + * issues physical/virtual scrub to the surface before reuse. + */ +#define NV0080_CTRL_CMD_FB_GET_COMPBIT_STORE_INFO_POLICY_RAWMODE 3 + +/** + * NV0080_CTRL_CMD_FB_GET_CAPS_V2 + * + * This command returns the same set of framebuffer capabilities for the + * device as @ref NV0080_CTRL_CMD_FB_GET_CAPS. The difference is in the structure + * NV0080_CTRL_FB_GET_CAPS_V2_PARAMS, which contains a statically sized array, + * rather than a caps table pointer and a caps table size in + * NV0080_CTRL_FB_GET_CAPS_PARAMS. + * + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be written by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + */ +#define NV0080_CTRL_CMD_FB_GET_CAPS_V2 (0x801307) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FB_INTERFACE_ID << 8) | NV0080_CTRL_FB_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FB_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0080_CTRL_FB_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_FB_CAPS_TBL_SIZE]; +} NV0080_CTRL_FB_GET_CAPS_V2_PARAMS; + + + + +/* _ctrl0080fb_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h new file mode 100644 index 000000000..f7538255b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h @@ -0,0 +1,645 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080fifo.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE fifo control commands and parameters */ + +/** + * NV0080_CTRL_FIFO_GET_CAPS + * + * This command returns the set of FIFO engine capabilities for the device + * in the form of an array of unsigned bytes. FIFO capabilities + * include supported features and required workarounds for the FIFO + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_FIFO_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the framebuffer caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_GET_CAPS (0x801701) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_FIFO_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_FIFO_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_FIFO_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_SCHED_EVENT 0:0x01 +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_PCI_PB 0:0x02 +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_VID_PB 0:0x04 +#define NV0080_CTRL_FIFO_CAPS_USERD_IN_SYSMEM 0:0x40 +/* do not use pipelined PTE BLITs to update PTEs: call the RM */ +#define NV0080_CTRL_FIFO_CAPS_NO_PIPELINED_PTE_BLIT 0:0x80 +#define NV0080_CTRL_FIFO_CAPS_GPU_MAP_CHANNEL 1:0x01 +#define NV0080_CTRL_FIFO_CAPS_BUFFEREDMODE_SCHEDULING 1:0x02 // Deprecated +#define NV0080_CTRL_FIFO_CAPS_WFI_BUG_898467 1:0x08 // Deprecated +#define NV0080_CTRL_FIFO_CAPS_HAS_HOST_LB_OVERFLOW_BUG_1667921 1:0x10 +/* + * To indicate Volta subcontext support with multiple VA spaces in a TSG. + * We are not using "subcontext" tag for the property, since we also use + * subcontext to represent pre-VOlta SCG feature, which only allows a single + * VA space in a TSG. + */ +#define NV0080_CTRL_FIFO_CAPS_MULTI_VAS_PER_CHANGRP 1:0x20 + + +#define NV0080_CTRL_FIFO_CAPS_SUPPORT_WDDM_INTERLEAVING 1:0x40 + +/* size in bytes of fifo caps table */ +#define NV0080_CTRL_FIFO_CAPS_TBL_SIZE 2 + +/* + * NV0080_CTRL_CMD_FIFO_ENABLE_SCHED_EVENTS + * + * This command enables the GPU to place various scheduling events in the + * off chip event buffer (with optional interrupt) for those GPUs that support + * it. + * + * record + * This parameter specifies a mask of event types to record. + * interrupt + * This parameter specifies a mask of event types for which to interrupt + * the CPU when the event occurs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_ENABLE_SCHED_EVENTS (0x801703) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | 0x3" */ + +typedef struct NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PARAMS { + NvU32 record; + NvU32 interrupt; +} NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PARAMS; + +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_START_CTX 0:0 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_START_CTX_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_START_CTX_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_END_CTX 1:1 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_END_CTX_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_END_CTX_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_NEW_RUNLIST 2:2 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_NEW_RUNLIST_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_NEW_RUNLIST_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_SEM_ACQUIRE 3:3 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_SEM_ACQUIRE_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_SEM_ACQUIRE_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PAGE_FAULT 4:4 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PAGE_FAULT_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PAGE_FAULT_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PREEMPT 5:5 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PREEMPT_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_PREEMPT_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_YIELD 6:6 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_YIELD_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_YIELD_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_IDLE_CTX 7:7 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_IDLE_CTX_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_IDLE_CTX_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_HI_PRI 8:8 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_HI_PRI_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_HI_PRI_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ENG_STALLED 9:9 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ENG_STALLED_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ENG_STALLED_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_VSYNC 10:10 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_VSYNC_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_VSYNC_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_FGCS_FAULT 11:11 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_FGCS_FAULT_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_FGCS_FAULT_ENABLE (0x00000001) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ALL 11:0 +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ALL_DISABLE (0x00000000) +#define NV0080_CTRL_FIFO_ENABLE_SCHED_EVENTS_ALL_ENABLE (0x00000fff) + +/* + * NV0080_CTRL_CMD_FIFO_START_SELECTED_CHANNELS + * + * This command allows the caller to request that a set of channels + * be added to the runlist. + * + * fifoStartChannelListSize + * Size of the fifoStartChannelList. The units are in entries, not + * bytes. + * fifoStartChannelList + * This will be a list of NV0080_CTRL_FIFO_CHANNEL data structures, + * one for each channel that is to be started. + * channelHandle + * deprecated + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +/* + * hChannel + * This is the handle to the channel that is scheduled to be started. + */ +typedef struct NV0080_CTRL_FIFO_CHANNEL { + NvHandle hChannel; +} NV0080_CTRL_FIFO_CHANNEL; + +#define NV0080_CTRL_CMD_FIFO_START_SELECTED_CHANNELS (0x801705) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS { + NvU32 fifoStartChannelListSize; + NvHandle channelHandle[8]; + NV_DECLARE_ALIGNED(NvP64 fifoStartChannelList, 8); +} NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS; + +#define NV0080_CTRL_FIFO_ENGINE_ID_GRAPHICS (0x00000000) +#define NV0080_CTRL_FIFO_ENGINE_ID_MPEG (0x00000001) +#define NV0080_CTRL_FIFO_ENGINE_ID_MOTION_ESTIMATION (0x00000002) +#define NV0080_CTRL_FIFO_ENGINE_ID_VIDEO (0x00000003) +#define NV0080_CTRL_FIFO_ENGINE_ID_BITSTREAM (0x00000004) +#define NV0080_CTRL_FIFO_ENGINE_ID_ENCRYPTION (0x00000005) +#define NV0080_CTRL_FIFO_ENGINE_ID_FGT (0x00000006) + +/* + * NV0080_CTRL_CMD_FIFO_GET_ENGINE_CONTEXT_PROPERTIES + * + * This command is used to provide the caller with the alignment and size + * of the context save region for an engine + * + * engineId + * This parameter is an input parameter specifying the engineId for which + * the alignment/size is requested. + * alignment + * This parameter is an output parameter which will be filled in with the + * minimum alignment requirement. + * size + * This parameter is an output parameter which will be filled in with the + * minimum size of the context save region for the engine. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_FIFO_GET_ENGINE_CONTEXT_PROPERTIES (0x801707) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID 4:0 +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS (0x00000000) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD (0x00000001) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO (0x00000002) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG (0x00000003) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE (0x00000004) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY (0x00000005) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION (0x00000006) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS (0x00000007) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL (0x00000008) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM (0x00000009) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT (0x0000000a) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT (0x0000000b) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL (0x0000000c) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL (0x0000000d) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB (0x0000000e) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV (0x0000000f) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH (0x00000010) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB (0x00000011) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB (0x00000013) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL (0x00000014) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL (0x00000015) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK (0x00000016) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT (0x00000017) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT (0x00000019) +#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS { + NvU32 engineId; + NvU32 alignment; + NvU32 size; +} NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS; + +/* + * NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS + * + * This command allows the caller to group two sets of channels. A channel + * set includes one or more channels. After grouping, the grouped channel IDs + * are set to next to each other in the runlist. This command can be used + * several times to group more than two channels. + * + * Using a NV0080_CTRL_CMD_FIFO_RUNLIST_DIVIDE_TIMESLICE after + * NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS is the general usage. A + * NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS after a + * NV0080_CTRL_CMD_FIFO_RUNLIST_DIVIDE_TIMESLICE for a channel handle is not + * allowed. + * + * NV0080_CTRL_FIFO_RUNLIST_GROUP_MAX_CHANNELS defines the max channels in a + * group. + * + * hChannel1 + * This parameter specifies the handle of the channel that belongs to the + * base set of channels. + * hChannel2 + * This parameter specifies the handle of the channel that belongs to the + * additional set of channels. + + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS (0x801709) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | 0x9" */ + +typedef struct NV0080_CTRL_FIFO_RUNLIST_GROUP_CHANNELS_PARAM { + NvHandle hChannel1; + NvHandle hChannel2; +} NV0080_CTRL_FIFO_RUNLIST_GROUP_CHANNELS_PARAM; + +#define NV0080_CTRL_FIFO_RUNLIST_GROUP_MAX_CHANNELS (8) + +/* + * NV0080_CTRL_CMD_FIFO_RUNLIST_DIVIDE_TIMESLICE + * + * This command allows the caller to divide the timeslice (DMA_TIMESLICE) of a + * channel between the channels in the group in which the channel resides. + * After applying this command, a timeslice divided channel (group) has a + * short timeslice and repeats more than once in the runlist. The total + * available execution time is not changed. + * + * Using this command after NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS is the + * general usage. A NV0080_CTRL_CMD_FIFO_RUNLIST_GROUP_CHANNELS after a + * NV0080_CTRL_CMD_FIFO_RUNLIST_DIVIDE_TIMESLICE for a channel handle is not + * allowed. + * + * hChannel + * This parameter specifies the handle of the channel for the channel + * group to which the divided timeslice operation will apply. + * tsDivisor + * This parameter specifies the timeslice divisor value. This value + * should not exceed NV0080_CTRL_FIFO_RUNLIST_MAX_TIMESLICE_DIVISOR + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV0080_CTRL_CMD_FIFO_RUNLIST_DIVIDE_TIMESLICE (0x80170b) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | 0xB" */ + +typedef struct NV0080_CTRL_FIFO_RUNLIST_DIVIDE_TIMESLICE_PARAM { + NvHandle hChannel; + NvU32 tsDivisor; +} NV0080_CTRL_FIFO_RUNLIST_DIVIDE_TIMESLICE_PARAM; + +#define NV0080_CTRL_FIFO_RUNLIST_MAX_TIMESLICE_DIVISOR (12) + +/* + * NV0080_CTRL_CMD_FIFO_PREEMPT_RUNLIST + * + * This command preepmts the engine represented by the specified runlist. + * + * hRunlist + * This parameter specifies the per engine runlist handle. This + * parameter is being retained to maintain backwards compatibility + * with clients that have not transitioned over to using runlists + * on a per subdevice basis. + * + * engineID + * This parameter specifies the engine to be preempted. Engine defines + * can be found in cl2080.h. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_PREEMPT_RUNLIST (0x80170c) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | 0xC" */ + +typedef struct NV0080_CTRL_FIFO_PREEMPT_RUNLIST_PARAMS { + NvHandle hRunlist; + NvU32 engineID; +} NV0080_CTRL_FIFO_PREEMPT_RUNLIST_PARAMS; + + +/* + * NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST + * + * Takes a list of hChannels as input and returns the + * corresponding Channel IDs that they corresponding to + * on hw. + * + * numChannels + * Size of input hChannellist + * pChannelHandleList + * List of input channel handles + * pChannelList + * List of Channel ID's corresponding to the + * each entry in the hChannelList. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST (0x80170d) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS { + NvU32 numChannels; + NV_DECLARE_ALIGNED(NvP64 pChannelHandleList, 8); + NV_DECLARE_ALIGNED(NvP64 pChannelList, 8); +} NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS; + + +/* + * NV0080_CTRL_CMD_FIFO_GET_LATENCY_BUFFER_SIZE + * + * This control call is used to return the number of gp methods(gpsize) and push buffer methods(pbsize) + * allocated to each engine. + * + *engineID + * The engine ID which is an input + * + *gpEntries + * number of gp entries + * + *pbEntries + * number of pb entries (in units of 32B rows) + * + */ + + +#define NV0080_CTRL_CMD_FIFO_GET_LATENCY_BUFFER_SIZE (0x80170e) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS { + NvU32 engineID; + NvU32 gpEntries; + NvU32 pbEntries; +} NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS; + +#define NV0080_CTRL_FIFO_GET_CHANNELLIST_INVALID_CHANNEL (0xffffffff) + +/* + * NV0080_CTRL_CMD_FIFO_SET_CHANNEL_PROPERTIES + * + * This command allows internal properties of the channel + * to be modified even when the channel is active. Most of these properties + * are not meant to be modified during normal runs hence have been + * kept separate from channel alloc params. It is the + * responsibility of the underlying hal routine to make + * sure the channel properties are changed while the channel + * is *NOT* in a transient state. + * + * hChannel + * The handle to the channel. + * + * property + * The channel property to be modified. + * NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_xxx provides the entire list + * of properties. + * + * value + * The new value for the property. + * When property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEINMICROSECONDS + * value = timeslice in microseconds + * desc: Used to change a channel's engine timeslice in microseconds + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEINMICROSECONDS + * value = timeslice in microseconds + * desc: Used to change a channel's pbdma timeslice in microseconds + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEDISABLE + * value is ignored + * desc: Disables a channel from being timesliced out from an engine. + * Other scheduling events like explicit yield, acquire failures will + * switch out the channel though. + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEDISABLE + * value is ignored + * desc: Disables a channel from being timesliced out from its pbdma. + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_INVALIDATE_PDB_TARGET + * value is ignored + * desc: Override the channel's page directory pointer table with an + * erroneous aperture value. (TODO: make test calls NV_VERIF_FEATURES + * only)(VERIF ONLY) + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT + * value = engineID of engine that will have its context pointer reset. + * engineID defines can be found in cl2080.h + * (e.g., NV2080_ENGINE_TYPE_GRAPHICS) + * desc: Override the channel's engine context pointer with a non existent + * buffer forcing it to fault. (VERIF ONLY) + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT_NOPREEMPT + * value = engineID of engine that will have its context pointer reset. + * engineID defines can be found in cl2080.h + * (e.g., NV2080_ENGINE_TYPE_GRAPHICS) + * desc: Override the channel's engine context pointer with a non existent + * buffer forcing it to fault. However the channel will not be preempted + * before having its channel state modified.(VERIF ONLY) + * + * property = NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_NOOP + * value is ignored + * desc: does not change any channel state exercises a full channel preempt/ + * unbind/bind op. (VERIF ONLY) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV0080_CTRL_CMD_FIFO_SET_CHANNEL_PROPERTIES (0x80170f) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS { + NvHandle hChannel; + NvU32 property; + NV_DECLARE_ALIGNED(NvU64 value, 8); +} NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS; + +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEINMICROSECONDS (0x00000000) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEINMICROSECONDS (0x00000001) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_ENGINETIMESLICEDISABLE (0x00000002) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PBDMATIMESLICEDISABLE (0x00000003) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_INVALIDATE_PDB_TARGET (0x00000004) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT (0x00000005) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_NOOP (0x00000007) +#define NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_RESETENGINECONTEXT_NOPREEMPT (0x00000008) + + + +/* + * NV0080_CTRL_CMD_FIFO_STOP_RUNLIST + * + * Stops all processing on the runlist for the given engine. This is only + * valid in per-engine round-robin scheduling mode. + * + * engineID + * This parameter specifies the engine to be stopped. Engine defines + * can be found in cl2080.h. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0080_CTRL_CMD_FIFO_STOP_RUNLIST (0x801711) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS { + NvU32 engineID; +} NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS; + +/* + * NV0080_CTRL_CMD_FIFO_START_RUNLIST + * + * Restarts a runlist previously stopped with NV0080_CTRL_CMD_FIFO_STOP_RUNLIST. + * This is only valid for per-engine round-robin mode. + * + * engineID + * This parameter specifies the engine to be started. Engine defines + * can be found in cl2080.h. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV0080_CTRL_CMD_FIFO_START_RUNLIST (0x801712) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_START_RUNLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_START_RUNLIST_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV0080_CTRL_FIFO_START_RUNLIST_PARAMS { + NvU32 engineID; +} NV0080_CTRL_FIFO_START_RUNLIST_PARAMS; + +/** + * NV0080_CTRL_FIFO_GET_CAPS_V2 + * + * This command returns the same set of FIFO engine capabilities for the device + * as @ref NV0080_CTRL_FIFO_GET_CAPS. The difference is in the structure + * NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS, which contains a statically sized array, + * rather than a caps table pointer and a caps table size in + * NV0080_CTRL_FIFO_GET_CAPS_PARAMS. + * + * capsTbl + * This parameter is an array of the client's caps table buffer. + * The framebuffer caps bits will be written by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_FIFO_GET_CAPS_V2 (0x801713) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_FIFO_CAPS_TBL_SIZE]; +} NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS; + +/** + * NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS + * + * @brief This command idles (deschedules and waits for pending work to complete) channels + * belonging to a particular device. + * + * numChannels + * Number of channels to idle + * + * hChannels + * Array of channel handles to idle + * + * flags + * NVOS30_FLAGS that control aspects of how the channel is idled + * + * timeout + * GPU timeout in microseconds, for each CHID Manager's idling operation + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_TIMEOUT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_LOCK_STATE + */ +#define NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS (0x801714) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID << 8) | NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS_MESSAGE_ID" */ +#define NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS_MAX_CHANNELS 4096 + +#define NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS { + NvU32 numChannels; + NvHandle hChannels[NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS_MAX_CHANNELS]; + NvU32 flags; + NvU32 timeout; +} NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS; + +/* _ctrl0080fifo_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h new file mode 100644 index 000000000..f49875a6d --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h @@ -0,0 +1,588 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080gpu.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" +#include "nvlimits.h" + + +/* NV01_DEVICE_XX/NV03_DEVICE gpu control commands and parameters */ + +/* + * NV0080_CTRL_CMD_GPU_GET_CLASSLIST + * + * This command returns supported class information for the specified device. + * If the device is comprised of more than one GPU, the class list represents + * the set of supported classes common to all GPUs within the device. + * + * It has two modes: + * + * If the classList pointer is NULL, then this command returns the number + * of classes supported by the device in the numClasses field. The value + * should then be used by the client to allocate a classList buffer + * large enough to hold one 32bit value per numClasses entry. + * + * If the classList pointer is non-NULL, then this command returns the + * set of supported class numbers in the specified buffer. + * + * numClasses + * If classList is NULL, then this parameter will return the + * number of classes supported by the device. If classList is non-NULL, + * then this parameter indicates the number of entries in classList. + * classList + * This parameter specifies a pointer to the client's buffer into + * which the supported class numbers should be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV0080_CTRL_CMD_GPU_GET_CLASSLIST (0x800201) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS { + NvU32 numClasses; + NV_DECLARE_ALIGNED(NvP64 classList, 8); +} NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS; + +/** + * NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES + * + * This command returns the number of subdevices for the device. + * + * numSubDevices + * This parameter returns the number of subdevices within the device. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES (0x800280) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS { + NvU32 numSubDevices; +} NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER + * + * This command returns the video link order of each subdevice id inside the + * device. This call can only be made after SLI is enabled. This call is + * intended for 3D clients to use to determine the vidlink order of the + * devices. The Display Output Parent will always be the first subdevice + * mask listed in the array. Note that this command should not be used in + * case of bridgeless SLI. The order of the subdevices returned by this + * command will not be correct in case of bridgeless SLI. + * + * ConnectionCount + * Each HW can provide 1 or 2 links between all GPUs in a device. This + * number tells how many links are available between GPUs. This data + * also represents the number of concurrent SLI heads that can run at + * the same time over this one device. + * + * Order + * This array returns the order of subdevices that are used through + * the vidlink for display output. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER (0x800281) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS { + NvU32 ConnectionCount; + NvU32 Order[NV_MAX_SUBDEVICES]; +} NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER + * + * This command sets display ownership within the device to the specified + * subdevice instance. The actual transfer of display ownership will take + * place at the next modeset. + * + * subDeviceInstance + * This member specifies the subdevice instance of the new display + * owner. The subdevice instance must be in the legal range + * indicated by the NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER (0x800282) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS_MESSAGE_ID (0x82U) + +typedef struct NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS { + NvU32 subDeviceInstance; +} NV0080_CTRL_GPU_SET_DISPLAY_OWNER_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_GET_DISPLAY_OWNER + * + * This command returns the subdevice instance of the current display owner + * within the device. + * + * subDeviceInstance + * This member returns the subdevice instance of the current display + * owner. The subdevice instance will be in the legal range + * indicated by the NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV0080_CTRL_CMD_GPU_GET_DISPLAY_OWNER (0x800283) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS { + NvU32 subDeviceInstance; +} NV0080_CTRL_GPU_GET_DISPLAY_OWNER_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_SET_VIDLINK + * + * This command enables or disables the VIDLINK of all subdevices in the + * current SLI configuration. + * + * enable + * Enables or disables the vidlink + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_GPU_SET_VIDLINK (0x800285) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_VIDLINK_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_VIDLINK_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV0080_CTRL_GPU_SET_VIDLINK_PARAMS { + NvU32 enable; +} NV0080_CTRL_GPU_SET_VIDLINK_PARAMS; + +#define NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_FALSE (0x00000000) +#define NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_TRUE (0x00000001) + +/* commands */ +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_GET_STATUS 0 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_POWERDOWN 1 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_POWERUP 2 + +/* status */ +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_POWER_ON 0 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_POWERING_DOWN 1 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_GATED 2 +#define NV0080_CTRL_CMD_GPU_VIDEO_POWERGATE_STATUS_POWERING_UP 3 + +/* + * NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE + * + * This command is used to enable or disable the persistence of a GPU's + * software state when no clients exist. With persistent software state enabled + * the GPU's software state is not torn down when the last client exits, but is + * retained until either the kernel module unloads or persistent software state + * is disabled. + * + * newState + * This input parameter is used to enable or disable the persistence of the + * software state of all subdevices within the device. + * Possible values are: + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE (0x800287) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID" */ + +/* Possible values of persistentSwState */ +#define NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED (0x00000000) +#define NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED (0x00000001) + +#define NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID (0x87U) + +typedef struct NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS { + NvU32 newState; +} NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_QUERY_SW_STATE_PERSISTENCE + * + * swStatePersistence + * This parameter returns a value indicating if persistent software + * state is currently enabled or not for the specified GPU. See the + * description of NV0080_CTRL_CMD_GPU_MODIFY_SW_STATE_PERSISTENCE. + * Possible values are: + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED + * NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV0080_CTRL_CMD_GPU_QUERY_SW_STATE_PERSISTENCE (0x800288) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS { + NvU32 swStatePersistence; +} NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS; + +/** + * NV0080_CTRL_CMD_GPU_GET_VIRTUALIZATION_MODE + * + * This command returns a value indicating virtualization mode in + * which the GPU is running. + * + * virtualizationMode + * This parameter returns the virtualization mode of the device. + * Possible values are: + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE + * This value indicates that there is no virtualization mode associated with the + * device (i.e. it's a baremetal GPU). + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS + * This value indicates that the device is associated with the NMOS. + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_VGX + * This value indicates that the device is associated with VGX(guest GPU). + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VGPU + * This value indicates that the device is associated with vGPU(host GPU). + * NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VSGA + * This value indicates that the device is associated with vSGA(host GPU). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_GPU_GET_VIRTUALIZATION_MODE (0x800289) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE (0x00000000) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS (0x00000001) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_VGX (0x00000002) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST (0x00000003) +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VGPU NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST +#define NV0080_CTRL_GPU_VIRTUALIZATION_MODE_HOST_VSGA (0x00000004) + +#define NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS { + NvU32 virtualizationMode; +} NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS; + + + +/* + * NV0080_CTRL_CMD_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE + * + * This command returns the setting information for sparse texture compute + * mode optimization on the associated GPU. This setting indicates how the + * large page size should be selected by the RM for the GPU. + * + * defaultSetting + * This field specifies what the OS default setting is for the associated + * GPU. See NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE for a list + * of possible values. + * currentSetting + * This field specifies which optimization mode was applied when the + * driver was loaded. See + * NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE for a list of + * possible values. + * pendingSetting + * This field specifies which optimization mode will be applied on the + * next driver reload. See + * NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE for a list of + * possible values. + * + * Possible status values returned are: + * NV_OK + */ +#define NV0080_CTRL_CMD_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE (0x80028c) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID (0x8CU) + +typedef struct NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS { + NvU32 defaultSetting; + NvU32 currentSetting; + NvU32 pendingSetting; +} NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE + * + * This command sets the pending setting for sparse texture compute mode. This + * setting indicates how the large page size should be selected by the RM for + * the GPU on the next driver reload. + * + * setting + * This field specifies which use case the RM should optimize the large + * page size for on the next driver reload. Possible values for this + * field are: + * NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_DEFAULT + * This value indicates that the RM should use the default setting for + * the GPU's large page size. The default setting is reported by + * NV0080_CTRL_CMD_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE. + * NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_COMPUTE + * This value indicates that the RM should select the GPU's large page + * size to optimize for compute use cases. + * NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_SPARSE_TEXTURE + * This value indicates that the RM should select the GPU's large page + * size to optimize for sparse texture use cases. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NV0080_CTRL_CMD_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE (0x80028d) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS_MESSAGE_ID (0x8DU) + +typedef struct NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS { + NvU32 setting; +} NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS; + +/* Possible sparse texture compute mode setting values */ +#define NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_DEFAULT 0 +#define NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_COMPUTE 1 +#define NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_SPARSE_TEXTURE 2 + +/* + * NV0080_CTRL_CMD_GPU_GET_VGX_CAPS + * + * This command gets the VGX capability of the GPU depending on the status of + * the VGX hardware fuse. + * + * isVgx + * This field is set to NV_TRUE is VGX fuse is enabled for the GPU otherwise + * it is set to NV_FALSE. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_GPU_GET_VGX_CAPS (0x80028e) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS_MESSAGE_ID (0x8EU) + +typedef struct NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS { + NvBool isVgx; +} NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS; + + + +/* + * NV0080_CTRL_CMD_GPU_GET_SRIOV_CAPS + * + * This command is used to query GPU SRIOV capabilities + * totalVFs + * Total number of virtual functions supported. + * + * firstVfOffset + * Offset of the first VF. + * + * vfFeatureMask + * Bitmask of features managed by the guest + * + * FirstVFBar0Address + * Address of BAR0 region of first VF. + * + * FirstVFBar1Address + * Address of BAR1 region of first VF. + * + * FirstVFBar2Address + * Address of BAR2 region of first VF. + * + * bar0Size + * Size of BAR0 region on VF. + * + * bar1Size + * Size of BAR1 region on VF. + * + * bar2Size + * Size of BAR2 region on VF. + * + * b64bitBar0 + * If the VF BAR0 is 64-bit addressable. + * + * b64bitBar1 + * If the VF BAR1 is 64-bit addressable. + * + * b64bitBar2 + * If the VF BAR2 is 64-bit addressable. + * + * bSriovEnabled + * Flag for SR-IOV enabled or not. + * + * bSriovHeavyEnabled + * Flag for whether SR-IOV is enabled in standard or heavy mode. + * + * bEmulateVFBar0TlbInvalidationRegister + * Flag for whether VF's TLB Invalidate Register region needs emulation. + * + * bClientRmAllocatedCtxBuffer + * Flag for whether engine ctx buffer is managed by client RM. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV0080_CTRL_CMD_GPU_GET_SRIOV_CAPS (0x800291) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS_MESSAGE_ID (0x91U) + +typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS { + NvU32 totalVFs; + NvU32 firstVfOffset; + NvU32 vfFeatureMask; + NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8); + NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8); + NV_DECLARE_ALIGNED(NvU64 bar0Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar1Size, 8); + NV_DECLARE_ALIGNED(NvU64 bar2Size, 8); + NvBool b64bitBar0; + NvBool b64bitBar1; + NvBool b64bitBar2; + NvBool bSriovEnabled; + NvBool bSriovHeavyEnabled; + NvBool bEmulateVFBar0TlbInvalidationRegister; + NvBool bClientRmAllocatedCtxBuffer; +} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS; + + +// Update this macro if new HW exceeds GPU Classlist MAX_SIZE +#define NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE 116 + +#define NV0080_CTRL_CMD_GPU_GET_CLASSLIST_V2 (0x800292) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS_MESSAGE_ID (0x92U) + +typedef struct NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS { + NvU32 numClasses; // __OUT__ + NvU32 classList[NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE]; // __OUT__ +} NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS; + +/* + * NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE + * + * Find a subdevice handle allocated under this device + */ +#define NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE (0x800293) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM_MESSAGE_ID (0x93U) + +typedef struct NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM { + NvU32 subDeviceInst; // [in] + NvHandle hSubDevice; // [out] +} NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM; + +/* + * NV0080_CTRL_CMD_GPU_GET_BRAND_CAPS + * + * This command gets branding information for the device. + * + * brands + * Mask containing branding information. A bit in this + * mask is set if the GPU has particular branding. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_QUADRO NVBIT(0) +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_NVS NVBIT(1) +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_TITAN NVBIT(2) + +#define NV0080_CTRL_CMD_GPU_GET_BRAND_CAPS (0x800294) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS_MESSAGE_ID (0x94U) + +typedef struct NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS { + NvU32 brands; +} NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS; + +/* + * These are the per-VF BAR1 sizes that we support in MB. + * They are used with the NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE control call and + * should match the NV_XVE_BAR1_CONFIG_SIZE register defines. + */ +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_64M 64 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_128M 128 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_256M 256 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_512M 512 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_1G 1024 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_2G 2048 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_4G 4096 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_8G 8192 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_16G 16384 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_32G 32768 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_64G 65536 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_128G 131072 +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_MIN NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_64M +#define NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_MAX NV0080_CTRL_GPU_VGPU_VF_BAR1_SIZE_128G + +#define NV0080_CTRL_GPU_VGPU_NUM_VFS_INVALID NV_U32_MAX + +/* + * NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE + * + * @brief Resize BAR1 per-VF on the given GPU + * vfBar1SizeMB[in] size of per-VF BAR1 size in MB + * numVfs[out] number of VFs that can be created given the new BAR1 size + */ +#define NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE (0x800296) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GPU_INTERFACE_ID << 8) | NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS { + NvU32 vfBar1SizeMB; + NvU32 numVfs; +} NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS; + +/* _ctrl0080gpu_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h new file mode 100644 index 000000000..c6f465a12 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h @@ -0,0 +1,277 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080gr.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +typedef struct NV0080_CTRL_GR_ROUTE_INFO { + NvU32 flags; + NV_DECLARE_ALIGNED(NvU64 route, 8); +} NV0080_CTRL_GR_ROUTE_INFO; + +/* NV01_DEVICE_XX/NV03_DEVICE gr engine control commands and parameters */ + +/** + * NV0080_CTRL_CMD_GR_GET_CAPS + * + * This command returns the set of graphics capabilities for the device + * in the form of an array of unsigned bytes. Graphics capabilities + * include supported features and required workarounds for the graphics + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_GR_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the graphics caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + */ +#define NV0080_CTRL_CMD_GR_GET_CAPS (0x801102) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_CAPS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_GR_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_GR_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_GR_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + + + +/* + * Size in bytes of gr caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV0080_CTRL_GR_CAPS_TBL_SIZE 23 + + + +/* + * NV0080_CTRL_CMD_GR_INFO + * + * This structure represents a single 32bit graphics engine value. Clients + * request a particular graphics engine value by specifying a unique bus + * information index. + * + * Legal graphics information index values are: + * NV0080_CTRL_GR_INFO_INDEX_MAXCLIPS + * This index is used to request the number of clip IDs supported by + * the device. + * NV0080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 + * This index is used to request the minimum number of attributes that + * need to be enabled to avoid bug 261894. A return value of 0 + * indicates that there is no minimum and the bug is not present on this + * system. + */ +typedef struct NV0080_CTRL_GR_INFO { + NvU32 index; + NvU32 data; +} NV0080_CTRL_GR_INFO; + +/* valid graphics info index values */ +#define NV0080_CTRL_GR_INFO_INDEX_MAXCLIPS (0x00000000) +#define NV0080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 (0x00000001) +#define NV0080_CTRL_GR_INFO_XBUF_MAX_PSETS_PER_BANK (0x00000002) +#define NV0080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT (0x00000003) +#define NV0080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT (0x00000004) +#define NV0080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE (0x00000005) +#define NV0080_CTRL_GR_INFO_INDEX_VPE_COUNT (0x00000006) +#define NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT (0x00000007) +#define NV0080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR (0x00000008) +#define NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT (0x00000009) +#define NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT (0x0000000A) +#define NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT (0x0000000B) +#define NV0080_CTRL_GR_INFO_INDEX_SM_VERSION (0x0000000C) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM (0x0000000D) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP (0x0000000E) +#define NV0080_CTRL_GR_INFO_INDEX_GEOM_GS_OBUF_ENTRIES (0x0000000F) +#define NV0080_CTRL_GR_INFO_INDEX_GEOM_XBUF_ENTRIES (0x00000010) +#define NV0080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY (0x00000011) +#define NV0080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY (0x00000012) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM (0x00000013) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS (0x00000014) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPS (0x00000015) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ZCULL_BANKS (0x00000016) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC (0x00000017) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MIN_FBPS (0x00000018) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_FBP_PORTS (0x00000019) +#define NV0080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED (0x0000001A) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPAS (0x0000001B) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_PES_PER_GPC (0x0000001C) +#define NV0080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT (0x0000001D) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPCS_PER_PES (0x0000001E) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_HUB_PORTS (0x0000001F) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SM_PER_TPC (0x00000020) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_HSHUB_FBP_PORTS (0x00000021) +#define NV0080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT (0x00000022) +#define NV0080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT (0x00000023) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GRS (0x00000024) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTCS (0x00000025) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_SLICES (0x00000026) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCMMU_PER_GPC (0x00000027) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_PER_FBP (0x00000028) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ROP_PER_GPC (0x00000029) +#define NV0080_CTRL_GR_INFO_INDEX_FAMILY_MAX_TPC_PER_GPC (0x0000002A) +#define NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPA_PER_FBP (0x0000002B) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT (0x0000002C) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_LEGACY_SUBCONTEXT_COUNT (0x0000002D) +#define NV0080_CTRL_GR_INFO_INDEX_MAX_PER_ENGINE_SUBCONTEXT_COUNT (0x0000002E) + + + +/* When adding a new INDEX, please update MAX_SIZE accordingly + * NOTE: 0080 functionality is merged with 2080 functionality, so this max size + * reflects that. + */ +#define NV0080_CTRL_GR_INFO_INDEX_MAX (0x00000031) +#define NV0080_CTRL_GR_INFO_MAX_SIZE (0x32) /* finn: Evaluated from "(NV0080_CTRL_GR_INFO_INDEX_MAX + 1)" */ + +/* + * NV0080_CTRL_CMD_GR_GET_INFO + * + * This command returns graphics engine information for the associate GPU. + * Request to retrieve graphics information use a list of one or more + * NV0080_CTRL_GR_INFO structures. + * + * grInfoListSize + * This field specifies the number of entries on the caller's + * grInfoList. + * grInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the bus information is to be returned. + * This buffer must be at least as big as grInfoListSize multiplied + * by the size of the NV0080_CTRL_GR_INFO structure. + */ +#define NV0080_CTRL_CMD_GR_GET_INFO (0x801104) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV0080_CTRL_GR_GET_INFO_PARAMS { + NvU32 grInfoListSize; + NV_DECLARE_ALIGNED(NvP64 grInfoList, 8); +} NV0080_CTRL_GR_GET_INFO_PARAMS; + +/* + * NV0080_CTRL_CMD_GR_GET_TPC_PARTITION_MODE + * This command gets the current partition mode of a TSG context. + * + * NV0080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE + * This command sets the partition mode of a TSG context. + * + * NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS + * This structure defines the parameters used for TPC partitioning mode SET/GET commands + * + * hChannelGroup [IN] + * RM Handle to the TSG + * + * mode [IN/OUT] + * Partitioning mode enum value + * For the SET cmd, this is an input parameter + * For the GET cmd, this is an output parameter + * + * bEnableAllTpcs [IN] + * Flag to enable all TPCs by default + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + */ +#define NV0080_CTRL_CMD_GR_GET_TPC_PARTITION_MODE (0x801107) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | 0x7" */ + +#define NV0080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE (0x801108) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | 0x8" */ + +/* Enum for listing TPC partitioning modes */ +typedef enum NV0080_CTRL_GR_TPC_PARTITION_MODE { + NV0080_CTRL_GR_TPC_PARTITION_MODE_NONE = 0, + NV0080_CTRL_GR_TPC_PARTITION_MODE_STATIC = 1, + NV0080_CTRL_GR_TPC_PARTITION_MODE_DYNAMIC = 2, +} NV0080_CTRL_GR_TPC_PARTITION_MODE; + +typedef struct NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS { + NvHandle hChannelGroup; // [in] + NV0080_CTRL_GR_TPC_PARTITION_MODE mode; // [in/out] + NvBool bEnableAllTpcs; // [in/out] + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); // [in] +} NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS; + +/** + * NV0080_CTRL_CMD_GR_GET_CAPS_V2 + * + * This command returns the same set of graphics capabilities for the device + * as @ref NV0080_CTRL_CMD_GR_GET_CAPS. The difference is in the structure + * NV0080_CTRL_GR_GET_INFO_V2_PARAMS, which contains a statically sized array, + * rather than a caps table pointer and a caps table size in + * NV0080_CTRL_GR_GET_INFO_PARAMS. Additionally, + * NV0080_CTRL_GR_GET_INFO_V2_PARAMS contains a parameter for specifying routing + * information, used for MIG. + * + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the graphics caps bits will be written by the RM. + * The caps table is an array of unsigned bytes. + * + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * bCapsPopulated + * This parameter indicates that the capsTbl has been partially populated by + * previous calls to NV0080_CTRL_CMD_GR_GET_CAPS_V2 on other subdevices. + */ +#define NV0080_CTRL_CMD_GR_GET_CAPS_V2 (0x801109) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0080_CTRL_GR_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_GR_CAPS_TBL_SIZE]; + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvBool bCapsPopulated; +} NV0080_CTRL_GR_GET_CAPS_V2_PARAMS; + +#define NV0080_CTRL_CMD_GR_GET_INFO_V2 (0x801110) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_GR_INTERFACE_ID << 8) | NV0080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV0080_CTRL_GR_GET_INFO_V2_PARAMS { + NvU32 grInfoListSize; + NV0080_CTRL_GR_INFO grInfoList[NV0080_CTRL_GR_INFO_MAX_SIZE]; + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV0080_CTRL_GR_GET_INFO_V2_PARAMS; + +/* _ctrl0080gr_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h new file mode 100644 index 000000000..99a138eaa --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080host.h @@ -0,0 +1,115 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080host.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE host control commands and parameters */ + +/* + * NV0080_CTRL_CMD_HOST_GET_CAPS + * + * This command returns the set of host capabilities for the device + * in the form of an array of unsigned bytes. Host capabilities + * include supported features and required workarounds for the host-related + * engine(s) within the device, each represented by a byte offset into + * the table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_HOST_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the host caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + */ +#define NV0080_CTRL_CMD_HOST_GET_CAPS (0x801401) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_HOST_INTERFACE_ID << 8) | NV0080_CTRL_HOST_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_HOST_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_HOST_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_HOST_GET_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV0080_CTRL_HOST_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV0080_CTRL_HOST_CAPS_SEMA_ACQUIRE_BUG_105665 0:0x01 +#define NV0080_CTRL_HOST_CAPS_DUP_CMPLT_BUG_126020 0:0x02 +/* + * This bit indicates whether CPU mappings obtained with NvRmMapMemory() are + * coherent with the GPU. When this bit is _not_ set, all mappings are to the + * "raw" memory; i.e., they behave as it the NVOS33_FLAGS_MAPPING_DIRECT flag + * were used on a sysmem mapping. + */ +#define NV0080_CTRL_HOST_CAPS_GPU_COHERENT_MAPPING_SUPPORTED 0:0x04 +#define NV0080_CTRL_HOST_CAPS_SYS_SEMA_DEADLOCK_BUG_148216 0:0x08 +#define NV0080_CTRL_HOST_CAPS_SLOWSLI 0:0x10 +#define NV0080_CTRL_HOST_CAPS_SEMA_READ_ONLY_BUG 0:0x20 +#define NV0080_CTRL_HOST_CAPS_LARGE_NONCOH_UPSTR_WRITE_BUG_114871 0:0x40 +#define NV0080_CTRL_HOST_CAPS_LARGE_UPSTREAM_WRITE_BUG_115115 0:0x80 +#define NV0080_CTRL_HOST_CAPS_SEP_VIDMEM_PB_NOTIFIERS_BUG_83923 1:0x02 +#define NV0080_CTRL_HOST_CAPS_P2P_4_WAY 1:0x08 // Deprecated +#define NV0080_CTRL_HOST_CAPS_P2P_8_WAY 1:0x10 // Deprecated +#define NV0080_CTRL_HOST_CAPS_P2P_DEADLOCK_BUG_203825 1:0x20 // Deprecated +#define NV0080_CTRL_HOST_CAPS_VIRTUAL_P2P 1:0x40 +#define NV0080_CTRL_HOST_CAPS_BUG_254580 1:0x80 +#define NV0080_CTRL_HOST_CAPS_COMPRESSED_BL_P2P_BUG_257072 2:0x02 // Deprecated +#define NV0080_CTRL_HOST_CAPS_CROSS_BLITS_BUG_270260 2:0x04 // Deprecated +/* unused 2:0x08 */ +#define NV0080_CTRL_HOST_CAPS_MEM2MEM_BUG_365782 2:0x10 +#define NV0080_CTRL_HOST_CAPS_CPU_WRITE_WAR_BUG_420495 2:0x20 +#define NV0080_CTRL_HOST_CAPS_EXPLICIT_CACHE_FLUSH_REQD 2:0x40 +#define NV0080_CTRL_HOST_CAPS_BAR1_READ_DEADLOCK_BUG_511418 2:0x80 // Deprecated + +/* size in bytes of host caps table */ +#define NV0080_CTRL_HOST_CAPS_TBL_SIZE 3 + +#define NV0080_CTRL_CMD_HOST_GET_CAPS_V2 (0x801402) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_HOST_INTERFACE_ID << 8) | NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_HOST_CAPS_TBL_SIZE]; +} NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS; + +/* _ctrl0080host_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h new file mode 100644 index 000000000..6255ecfc0 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080internal.h @@ -0,0 +1,106 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080internal.finn +// + +#include "nvlimits.h" +#include "ctrl0080gr.h" + + + +#include "ctrl/ctrl0080/ctrl0080base.h" +#include "ctrl/ctrl0080/ctrl0080perf.h" + + + + +/*! + * @ref NV0080_CTRL_CMD_GR_GET_TPC_PARTITION_MODE + */ +#define NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE (0x802002) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID" */ + + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS { + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS params, 8); +} NV0080_CTRL_CMD_INTERNAL_GR_GET_TPC_PARTITION_MODE_FINN_PARAMS; + + +/*! + * @ref NV0080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE + */ +#define NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE (0x802003) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID" */ + + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS { + NV_DECLARE_ALIGNED(NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS params, 8); +} NV0080_CTRL_CMD_INTERNAL_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS; + + +/*! + * @ref NV0080_CTRL_CMD_PERF_CUDA_LIMIT_SET_CONTROL + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_CUDA_LIMIT_SET_CONTROL (0x802009) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS_MESSAGE_ID" */ + + +/*! + * This command disables cuda limit activation at teardown of the client. + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_CUDA_LIMIT_DISABLE (0x802004) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x4" */ + +/*! + * @ref NV0080_CTRL_CMD_PERF_SLI_GPU_BOOST_SYNC_SET_CONTROL + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_SLI_GPU_BOOST_SYNC_SET_CONTROL (0x802007) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID" */ + + + +/*! + * @ref NV0080_CTRL_CMD_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT + */ +#define NV0080_CTRL_CMD_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT (0x802006) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS { + NvU8 powerDisconnectedGpuBus[NV_MAX_DEVICES]; + NvU8 powerDisconnectedGpuCount; +} NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS; + +/* ctrl0080internal_h */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h new file mode 100644 index 000000000..9d46708dc --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080msenc.h @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080msenc.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE MSENC control commands and parameters */ + +/* + * NV0080_CTRL_CMD_MSENC_GET_CAPS + * + * This command returns the set of MSENC capabilities for the device + * in the form of an array of unsigned bytes. MSENC capabilities + * include supported features and required workarounds for the MSENC-related + * engine(s) within the device, each represented by a byte offset into + * the table and a bit position within that byte. + * + * capsTblSize + * This parameter specifies the size in bytes of the caps table. + * This value should be set to NV0080_CTRL_MSENC_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the MSENC caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_MSENC_GET_CAPS (0x801b01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_MSENC_INTERFACE_ID << 8) | NV0080_CTRL_MSENC_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_MSENC_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_MSENC_GET_CAPS_PARAMS { + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV0080_CTRL_MSENC_GET_CAPS_PARAMS; + + + +/* size in bytes of MSENC caps table */ +#define NV0080_CTRL_MSENC_CAPS_TBL_SIZE 4 + +/* _ctrl0080msenc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h new file mode 100644 index 000000000..30a461430 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080nvjpg.h @@ -0,0 +1,78 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080nvjpg.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE NVJPG control commands and parameters */ + + + +/* + * Size in bytes of NVJPG caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV0080_CTRL_NVJPG_CAPS_TBL_SIZE 9 + +/* + * NV0080_CTRL_CMD_NVJPG_GET_CAPS_V2 + * + * This command returns the set of NVJPG capabilities for the device + * in the form of an array of unsigned bytes. NVJPG capabilities + * include supported features of the NVJPG engine(s) within the device, + * each represented by a byte offset into the table and a bit position within + * that byte. + * + * [out] capsTbl + * This caps table array is where the NVJPG caps bits will be transferred + * by the RM. The caps table is an array of unsigned bytes. + * instanceId + * This parameter specifies the instance Id of NVDEC for which + * cap bits are requested. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_NVJPG_GET_CAPS_V2 (0x801f02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_NVJPG_INTERFACE_ID << 8) | NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS { + NvU8 capsTbl[NV0080_CTRL_NVJPG_CAPS_TBL_SIZE]; + NvU32 instanceId; +} NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS; + +/* _ctrl0080NVJPG_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h new file mode 100644 index 000000000..f5f16a3b5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080perf.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080perf.finn +// + + + +#define NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS { + NvBool bActivate; +} NV0080_CTRL_PERF_SLI_GPU_BOOST_SYNC_CONTROL_PARAMS; + +#define NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS { + NvBool bCudaLimit; +} NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS; + + + +/* _ctrl0080perf_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h new file mode 100644 index 000000000..ccd000918 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080rc.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080rc.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE gpu control commands and parameters */ + +/* + * NV0080_CTRL_CMD_RC_DISABLE_RESET_CHANNEL_CALLBACK + * + * This command prevents RM from using callbacks when resetting a channel due + * to a page fault. + * + * Possible status return values are: + * NV_OK + */ +#define NV0080_CTRL_CMD_RC_DISABLE_RESET_CHANNEL_CALLBACK (0x801d01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_RC_INTERFACE_ID << 8) | 0x1" */ + +/* + * NV0080_CTRL_CMD_RC_ENABLE_RESET_CHANNEL_CALLBACK + * + * This command permits RM to use callbacks when resetting a channel due + * to a page fault. + * + * Possible status return values are: + * NV_OK + */ +#define NV0080_CTRL_CMD_RC_ENABLE_RESET_CHANNEL_CALLBACK (0x801d02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_RC_INTERFACE_ID << 8) | 0x2" */ + +/* _ctrl0080rc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h new file mode 100644 index 000000000..297e55ab2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080unix.h @@ -0,0 +1,107 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0080/ctrl0080unix.finn +// + + + + +#include "ctrl/ctrl0080/ctrl0080base.h" + +/* NV01_DEVICE_XX/NV03_DEVICE UNIX-specific control commands and parameters */ + +/* + * NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH + * + * This command notifies RM to save or restore the current console state. It is + * intended to be called just before the display driver starts using the display + * engine, and after it has finished using it. + * + * cmd + * Indicates which operation should be performed. + * + * SAVE_VT_STATE + * Records the current state of the console, to be restored later. + * RESTORE_VT_STATE + * Restores the previously-saved console state. + * + * fbInfo + * Returns information about the system's framebuffer console, if one + * exists. If no console is present, all fields will be zero. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH (0x801e01) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS_MESSAGE_ID" */ + +typedef struct NV0080_CTRL_OS_UNIX_VT_SWITCH_FB_INFO { + NvU32 subDeviceInstance; + + NvU16 width; + NvU16 height; + NvU16 depth; + NvU16 pitch; +} NV0080_CTRL_OS_UNIX_VT_SWITCH_FB_INFO; + +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS { + NvU32 cmd; /* in */ + + NV0080_CTRL_OS_UNIX_VT_SWITCH_FB_INFO fbInfo; /* out */ +} NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS; + +/* Called when the display driver needs RM to save the console data, + * which will be used in RM based console restore */ +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE (0x00000001) + +/* Called when the display driver needs RM to restore the console */ +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_RESTORE_VT_STATE (0x00000002) + +/* Called when the display driver has restored the console -- RM doesn't + * need to do anything further, but needs to be informed to avoid turning the + * GPU off and thus destroying the console state. */ +#define NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_CONSOLE_RESTORED (0x00000003) + +#define NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO (0x801e02) /* finn: Evaluated from "(FINN_NV01_DEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS_MESSAGE_ID" */ + +#define NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS { + NvU32 subDeviceInstance; /* out */ + + NvU16 width; /* out */ + NvU16 height; /* out */ + NvU16 depth; /* out */ + NvU16 pitch; /* out */ +} NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS; + +/* _ctrl0080unix_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl0090.h b/src/common/sdk/nvidia/inc/ctrl/ctrl0090.h new file mode 100644 index 000000000..83af68427 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl0090.h @@ -0,0 +1,130 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl0090.finn +// + + + +// NV0080_CTRL_GR_TPC_PARTITION_MODE +#include "ctrl/ctrl0080/ctrl0080gr.h" + +/*! + * This file defines control call interfaces for the KernelGraphicsContext + * objects Each channel running within a TSG contains a HW context represented + * by the above object(s). GR permits multiple channels to share a single + * context, and these APIs operate upon that context and may be issued from any + * TSG, channel, or context handle. + */ + +/* + * NV0090_CTRL_CMD_NULL + * + * @brief This command does nothing. + * + * @return NV_OK + */ +#define NV0090_CTRL_CMD_NULL (0x900100) /* finn: Evaluated from "(FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID << 8) | 0x0" */ + +/*! + * NV0090_CTRL_CMD_SET_TPC_PARTITION_MODE + * NV0090_CTRL_CMD_GET_TPC_PARTITION_MODE + * + * @brief sets or gets the TPC partition mode for this context + * + * @param[in/out] mode Partitioning mode enum value + * @param[in/out] bEnableAllTpcs Flag to enable all TPCs by default + * + * @return NV_OK on success + * NV_ERR_OBJECT_NOT_FOUND if issued on non-GR ctx + */ +typedef struct NV0090_CTRL_TPC_PARTITION_MODE_PARAMS { + NV0080_CTRL_GR_TPC_PARTITION_MODE mode; + NvBool bEnableAllTpcs; +} NV0090_CTRL_TPC_PARTITION_MODE_PARAMS; + +#define NV0090_CTRL_CMD_SET_TPC_PARTITION_MODE (0x900101) /* finn: Evaluated from "(FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID << 8) | 0x1" */ +#define NV0090_CTRL_CMD_INTERNAL_SET_TPC_PARTITION_MODE (0x900102) /* finn: Evaluated from "(FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID << 8) | 0x2" */ +#define NV0090_CTRL_CMD_GET_TPC_PARTITION_MODE (0x900103) /* finn: Evaluated from "(FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID << 8) | 0x3" */ +#define NV0090_CTRL_CMD_INTERNAL_GET_TPC_PARTITION_MODE (0x900104) /* finn: Evaluated from "(FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID << 8) | 0x4" */ + +/** + * NV0090_CTRL_GET_MMU_DEBUG_MODE + * + * @brief retrieve the current MMU debug mode for the grctx according to the HW + * + * @param[out] bMode current MMU debug mode + */ +typedef struct NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS { + NvBool bMode; +} NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS; + +#define NV0090_CTRL_CMD_GET_MMU_DEBUG_MODE (0x900105) /* finn: Evaluated from "(FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID << 8) | 0x5" */ +#define NV0090_CTRL_CMD_INTERNAL_GET_MMU_DEBUG_MODE (0x900106) /* finn: Evaluated from "(FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID << 8) | 0x6" */ + +/** + * NV0090_CTRL_PROGRAM_VIDEMEM_PROMOTE + * + * @brief This control call is used to set the promotion policies to vidmem through + * per-TSG config + * + * If the request is for less bytes than the current PROMOTE setting, then + * the request is promoted. For example if the request size is for 32B and + * 64B promotion is turned on for that unit, then the request to FB will be + * for all the bytes to the 64B aligned address. + * + * @param[in] l1 + * An input parameter which represents VIDMEM_L1_PROMOTE[17:16]. + * @param[in] t1 + * An input parameter which represents VIDMEM_T1_PROMOTE[19:18]. + * + * @return NV_OK on success, or + * NV_ERR_INVALID_ARGUMENT or + * NV_ERR_INVALID_STATE + */ +typedef enum NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE { + NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE_NONE = 0, + NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE_64B = 1, + NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE_128B = 2, +} NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE; + +typedef struct NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_FIELD { + NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE size; +} NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_FIELD; + +#define NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS { + NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_FIELD l1; + NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_FIELD t1; +} NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS; + +#define NV0090_CTRL_CMD_PROGRAM_VIDMEM_PROMOTE (0x900107) /* finn: Evaluated from "(FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID << 8) | 0x7" */ +#define NV0090_CTRL_CMD_INTERNAL_PROGRAM_VIDMEM_PROMOTE (0x900108) /* finn: Evaluated from "(FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID << 8) | 0x8" */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl00f8.h b/src/common/sdk/nvidia/inc/ctrl/ctrl00f8.h new file mode 100644 index 000000000..c62c1998e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl00f8.h @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl00f8.finn +// + + + + +#include "ctrl/ctrlxxxx.h" + +#define NV00F8_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x00f8, NV00F8_CTRL_##cat, idx) + +/* NV00F8 command categories (6bits) */ +#define NV00F8_CTRL_RESERVED (0x00U) +#define NV00F8_CTRL_FABRIC (0x01U) + +/* + * NV00F8_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV00F8_CTRL_CMD_NULL (0xf80000U) /* finn: Evaluated from "(FINN_NV_MEMORY_FABRIC_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV00F8_CTRL_CMD_GET_INFO + * + * Queries memory allocation attributes. + * + * size [OUT] + * Size of the allocation. + * + * pageSize [OUT] + * Page size of the allocation. + * + * allocFlags [OUT] + * Flags passed during the allocation. + */ +#define NV00F8_CTRL_CMD_GET_INFO (0xf80101U) /* finn: Evaluated from "(FINN_NV_MEMORY_FABRIC_FABRIC_INTERFACE_ID << 8) | NV00F8_CTRL_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV00F8_CTRL_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV00F8_CTRL_GET_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 pageSize; + NvU32 allocFlags; +} NV00F8_CTRL_GET_INFO_PARAMS; + +/* + * NV00F8_CTRL_CMD_DESCRIBE + * + * Queries the physical attributes of the fabric memory allocation. + * + * offset [IN] + * Offset into memory allocation to query physical addresses for. + * + * totalPfns [OUT] + * Number of PFNs in memory allocation. + * + * pfnArray [OUT] + * Array of PFNs in memory allocation (2MB page size shifted). + * + * numPfns [OUT] + * Number of valid entries in pfnArray. + * + * Note: This ctrl call is only available for kerenl mode client in vGPU platforms. + */ + +#define NV00F8_CTRL_CMD_DESCRIBE (0xf80102) /* finn: Evaluated from "(FINN_NV_MEMORY_FABRIC_FABRIC_INTERFACE_ID << 8) | NV00F8_CTRL_DESCRIBE_PARAMS_MESSAGE_ID" */ + +#define NV00F8_CTRL_DESCRIBE_PFN_ARRAY_SIZE 512 + +#define NV00F8_CTRL_DESCRIBE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV00F8_CTRL_DESCRIBE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvU64 totalPfns, 8); + NvU32 pfnArray[NV00F8_CTRL_DESCRIBE_PFN_ARRAY_SIZE]; + NvU32 numPfns; +} NV00F8_CTRL_DESCRIBE_PARAMS; + +/* _ctrl00f8_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h new file mode 100644 index 000000000..c7fc38e9a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080.h @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl2080/ctrl2080gpu.h" +#include "ctrl2080/ctrl2080fuse.h" +#include "ctrl2080/ctrl2080event.h" +#include "ctrl2080/ctrl2080tmr.h" +#include "ctrl2080/ctrl2080bios.h" +#include "ctrl2080/ctrl2080mc.h" +#include "ctrl2080/ctrl2080fifo.h" +#include "ctrl2080/ctrl2080fb.h" +#include "ctrl2080/ctrl2080gr.h" +#include "ctrl2080/ctrl2080bus.h" +#include "ctrl2080/ctrl2080thermal.h" +#include "ctrl2080/ctrl2080fan.h" +#include "ctrl2080/ctrl2080i2c.h" +#include "ctrl2080/ctrl2080internal.h" +#include "ctrl2080/ctrl2080spi.h" +#include "ctrl2080/ctrl2080gpio.h" +#include "ctrl2080/ctrl2080clk.h" +#include "ctrl2080/ctrl2080perf.h" +#include "ctrl2080/ctrl2080perf_cf.h" + + +#include "ctrl2080/ctrl2080rc.h" +#include "ctrl2080/ctrl2080dma.h" +#include "ctrl2080/ctrl2080dmabuf.h" +#include "ctrl2080/ctrl2080nvd.h" +#include "ctrl2080/ctrl2080boardobj.h" +#include "ctrl2080/ctrl2080pmgr.h" +#include "ctrl2080/ctrl2080power.h" +#include "ctrl2080/ctrl2080lpwr.h" +#include "ctrl2080/ctrl2080acr.h" +#include "ctrl2080/ctrl2080ce.h" +#include "ctrl2080/ctrl2080nvlink.h" +#include "ctrl2080/ctrl2080flcn.h" +#include "ctrl2080/ctrl2080volt.h" +#include "ctrl2080/ctrl2080ecc.h" +#include "ctrl2080/ctrl2080cipher.h" +#include "ctrl2080/ctrl2080fla.h" +#include "ctrl2080/ctrl2080gsp.h" + + +#include "ctrl2080/ctrl2080grmgr.h" +#include "ctrl2080/ctrl2080ucodefuzzer.h" + + +#include "ctrl2080/ctrl2080hshub.h" +/* include appropriate os-specific command header */ + + +#include "ctrl2080/ctrl2080unix.h" diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h new file mode 100644 index 000000000..322e119a2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080acr.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080acr.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h new file mode 100644 index 000000000..6578262af --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080base.h @@ -0,0 +1,115 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080base.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV20_SUBDEVICE_XX control commands and parameters */ + +#define NV2080_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x2080, NV2080_CTRL_##cat, idx) + +/* Subdevice command categories (6bits) */ +#define NV2080_CTRL_RESERVED (0x00) +#define NV2080_CTRL_GPU (0x01) +#define NV2080_CTRL_GPU_LEGACY_NON_PRIVILEGED (0x81) /* finn: Evaluated from "(NV2080_CTRL_GPU | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_FUSE (0x02) +#define NV2080_CTRL_FUSE_LEGACY_NON_PRIVILEGED (0x82) /* finn: Evaluated from "(NV2080_CTRL_FUSE | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_EVENT (0x03) +#define NV2080_CTRL_TIMER (0x04) +#define NV2080_CTRL_THERMAL (0x05) +#define NV2080_CTRL_THERMAL_LEGACY_PRIVILEGED (0xc5) /* finn: Evaluated from "(NV2080_CTRL_THERMAL | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_THERMAL_LEGACY_NON_PRIVILEGED (0x85) /* finn: Evaluated from "(NV2080_CTRL_THERMAL | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_I2C (0x06) +#define NV2080_CTRL_EXTI2C (0x07) +#define NV2080_CTRL_BIOS (0x08) +#define NV2080_CTRL_CIPHER (0x09) +#define NV2080_CTRL_INTERNAL (0x0A) +#define NV2080_CTRL_CLK_LEGACY_PRIVILEGED (0xd0) /* finn: Evaluated from "(NV2080_CTRL_CLK | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_CLK_LEGACY_NON_PRIVILEGED (0x90) /* finn: Evaluated from "(NV2080_CTRL_CLK | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_CLK (0x10) +#define NV2080_CTRL_FIFO (0x11) +#define NV2080_CTRL_GR (0x12) +#define NV2080_CTRL_FB (0x13) +#define NV2080_CTRL_MC (0x17) +#define NV2080_CTRL_BUS (0x18) +#define NV2080_CTRL_PERF_LEGACY_PRIVILEGED (0xe0) /* finn: Evaluated from "(NV2080_CTRL_PERF | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_PERF_LEGACY_NON_PRIVILEGED (0xa0) /* finn: Evaluated from "(NV2080_CTRL_PERF | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_PERF (0x20) +#define NV2080_CTRL_NVIF (0x21) +#define NV2080_CTRL_RC (0x22) +#define NV2080_CTRL_GPIO (0x23) +#define NV2080_CTRL_GPIO_LEGACY_NON_PRIVILEGED (0xa3) /* finn: Evaluated from "(NV2080_CTRL_GPIO | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_NVD (0x24) +#define NV2080_CTRL_DMA (0x25) +#define NV2080_CTRL_PMGR (0x26) +#define NV2080_CTRL_PMGR_LEGACY_PRIVILEGED (0xe6) /* finn: Evaluated from "(NV2080_CTRL_PMGR | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_PMGR_LEGACY_NON_PRIVILEGED (0xa6) /* finn: Evaluated from "(NV2080_CTRL_PMGR | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_POWER (0x27) +#define NV2080_CTRL_POWER_LEGACY_NON_PRIVILEGED (0xa7) /* finn: Evaluated from "(NV2080_CTRL_POWER | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_LPWR (0x28) +#define NV2080_CTRL_LPWR_LEGACY_NON_PRIVILEGED (0xa8) /* finn: Evaluated from "(NV2080_CTRL_LPWR | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_LPWR_LEGACY_PRIVILEGED (0xe8) /* finn: Evaluated from "(NV2080_CTRL_LPWR | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_ACR (0x29) +#define NV2080_CTRL_CE (0x2A) +#define NV2080_CTRL_SPI (0x2B) +#define NV2080_CTRL_NVLINK (0x30) +#define NV2080_CTRL_FLCN (0x31) +#define NV2080_CTRL_VOLT (0x32) +#define NV2080_CTRL_VOLT_LEGACY_PRIVILEGED (0xf2) /* finn: Evaluated from "(NV2080_CTRL_VOLT | NVxxxx_CTRL_LEGACY_PRIVILEGED)" */ +#define NV2080_CTRL_VOLT_LEGACY_NON_PRIVILEGED (0xb2) /* finn: Evaluated from "(NV2080_CTRL_VOLT | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_FAS (0x33) +#define NV2080_CTRL_ECC (0x34) +#define NV2080_CTRL_ECC_NON_PRIVILEGED (0xb4) /* finn: Evaluated from "(NV2080_CTRL_ECC | NVxxxx_CTRL_LEGACY_NON_PRIVILEGED)" */ +#define NV2080_CTRL_FLA (0x35) +#define NV2080_CTRL_GSP (0x36) +#define NV2080_CTRL_NNE (0x37) +#define NV2080_CTRL_GRMGR (0x38) +#define NV2080_CTRL_UCODE_FUZZER (0x39) +#define NV2080_CTRL_DMABUF (0x3A) + +// per-OS categories start at highest category and work backwards +#define NV2080_CTRL_OS_WINDOWS (0x3F) +#define NV2080_CTRL_OS_MACOS (0x3E) +#define NV2080_CTRL_OS_UNIX (0x3D) + + +/* + * NV2080_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_NULL (0x20800000) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl2080base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h new file mode 100644 index 000000000..e70480be8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h @@ -0,0 +1,242 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080bios.finn +// + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX bios-related control commands and parameters */ + + + +typedef struct NV2080_CTRL_BIOS_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_BIOS_INFO; + +/* Maximum number of bios infos that can be queried at once */ +#define NV2080_CTRL_BIOS_INFO_MAX_SIZE (0x0000000F) + +#define NV2080_CTRL_BIOS_INFO_INDEX_REVISION (0x00000000) +#define NV2080_CTRL_BIOS_INFO_INDEX_OEM_REVISION (0x00000001) + + + +/* + * NV2080_CTRL_CMD_BIOS_GET_INFO + * + * This command returns bios information for the associated GPU. + * Requests to retrieve bios information use a list of one or more + * NV2080_CTRL_BIOS_INFO structures. + * + * biosInfoListSize + * This field specifies the number of entries on the caller's + * biosInfoList. + * biosInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the bios information is to be returned. + * This buffer must be at least as big as biosInfoListSize multiplied + * by the size of the NV2080_CTRL_BIOS_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_BIOS_GET_INFO (0x20800802) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | 0x2" */ + +typedef struct NV2080_CTRL_BIOS_GET_INFO_PARAMS { + NvU32 biosInfoListSize; + NV_DECLARE_ALIGNED(NvP64 biosInfoList, 8); +} NV2080_CTRL_BIOS_GET_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_BIOS_GET_INFO_V2 + * + * This command returns bios information for the associated GPU. + * Requests to retrieve bios information use a list of one or more + * NV2080_CTRL_BIOS_INFO structures. + * + * biosInfoListSize + * This field specifies the number of entries on the caller's + * biosInfoList. + * biosInfoList + * Bios information to be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_BIOS_GET_INFO_V2 (0x20800810) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS { + NvU32 biosInfoListSize; + NV2080_CTRL_BIOS_INFO biosInfoList[NV2080_CTRL_BIOS_INFO_MAX_SIZE]; +} NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS; + + + +/* + * NV2080_CTRL_CMD_BIOS_GET_SKU_INFO + * + * This command returns information about the current board SKU. + * NV_ERR_INVALID_OWNER will be returned if the call + * isn't made with the OS as the administrator. + * + * chipSKU + * This field returns the sku for the current chip. + * chipSKUMod + * This field returns the SKU modifier. + * project + * This field returns the Project (Board) number. + * projectSKU + * This field returns the Project (Board) SKU number. + * CDP + * This field returns the Collaborative Design Project Number. + * projectSKUMod + * This field returns the Project (Board) SKU Modifier. + * businessCycle + * This field returns the business cycle the board is associated with. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OWNER + */ +#define NV2080_CTRL_CMD_BIOS_GET_SKU_INFO (0x20800808) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum length of parameter strings */ + + +#define NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS { + NvU32 BoardID; + char chipSKU[4]; + char chipSKUMod[2]; + char project[5]; + char projectSKU[5]; + char CDP[6]; + char projectSKUMod[2]; + NvU32 businessCycle; +} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_BIOS_GET_POST_TIME + + * This command is used to get the GPU POST time (in milliseconds). + * If the associated GPU is the master GPU this value will be recorded + * by the VBIOS and retrieved from the KDA buffer. If the associated + * GPU is a secondaryGPU then this value will reflect the devinit + * processing time. + * + * vbiosPostTime + * This parameter returns the vbios post time in msec. + * + * Possible return status values are + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_BIOS_GET_POST_TIME (0x20800809) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS { + NV_DECLARE_ALIGNED(NvU64 vbiosPostTime, 8); +} NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS; + + + +/* + * NV2080_CTRL_CMD_BIOS_GET_UEFI_SUPPORT + * + * This function is used to give out the UEFI version, UEFI image presence and + * Graphics Firmware Mode i.e. whether system is running in UEFI or not. + * + * version + * This parameter returns the UEFI version. + * + * flags + * This parameter indicates UEFI image presence and Graphics Firmware mode. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE + * This field returns UEFI presence value. Legal values for this + * field include: + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_NO + * This value indicates that UEFI image is not present. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_YES + * This value indicates that UEFI image is present. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_PLACEHOLDER + * This value indicates that there is a dummy UEFI placeholder, + * which can later be updated with a valid UEFI image. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_HIDDEN + * This value indicates that UEFI image is hidden. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING + * This field indicates the UEFI running value. Legal values for + * this parameter include: + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_FALSE + * This value indicates that UEFI is not running. + * NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_TRUE + * This value indicates that UEFI is running. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_READY + * NV_ERR_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_BIOS_GET_UEFI_SUPPORT (0x2080080b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS { + NvU32 version; + NvU32 flags; +} NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS; + +/* Legal values for flags parameter */ +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE 1:0 +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_NO (0x00000000) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_YES (0x00000001) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_PLACEHOLDER (0x00000002) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_HIDDEN (0x00000003) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING 2:2 +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_FALSE (0x00000000) +#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_TRUE (0x00000001) + + + +/* _ctrl2080bios_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h new file mode 100644 index 000000000..6f69c5c52 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobj.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080boardobj.finn +// + + + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h new file mode 100644 index 000000000..caafd7431 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080boardobjgrpclasses.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080boardobjgrpclasses.finn +// + + + +#include "nvtypes.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h new file mode 100644 index 000000000..0cdd10536 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bus.h @@ -0,0 +1,1294 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080bus.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX bus control commands and parameters */ + +/** + * NV2080_CTRL_CMD_BUS_GET_PCI_INFO + * + * This command returns PCI bus identifier information for the specified GPU. + * + * pciDeviceId + * This parameter specifies the internal PCI device and vendor + * identifiers for the GPU. + * pciSubSystemId + * This parameter specifies the internal PCI subsystem identifier for + * the GPU. + * pciRevisionId + * This parameter specifies the internal PCI device-specific revision + * identifier for the GPU. + * pciExtDeviceId + * This parameter specifies the external PCI device identifier for + * the GPU. It contains only the 16-bit device identifier. This + * value is identical to the device identifier portion of + * pciDeviceId since non-transparent bridges are no longer supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_GET_PCI_INFO (0x20801801) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS { + NvU32 pciDeviceId; + NvU32 pciSubSystemId; + NvU32 pciRevisionId; + NvU32 pciExtDeviceId; +} NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS; + +/* + * NV2080_CTRL_BUS_INFO + * + * This structure represents a single 32bit bus engine value. Clients + * request a particular bus engine value by specifying a unique bus + * information index. + * + * Legal bus information index values are: + * NV2080_CTRL_BUS_INFO_INDEX_TYPE + * This index is used to request the bus type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + * NV2080_CTRL_BUS_INFO_INDEX_INTLINE + * This index is used to request the interrupt line (or irq) assignment + * for the GPU. The return value is system-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_CAPS + * This index is used to request the bus engine capabilities for the GPU. + * The return value is specified as a mask of capabilities. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_CAPS_NEED_IO_FLUSH + * NV2080_CTRL_BUS_INFO_CAPS_CHIP_INTEGRATED + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CAPS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CAPS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CAPS + * These indices are used to request PCI Express link-specific + * capabilities values. A value of zero is returned for non-PCIE GPUs. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CTRL_STATUS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CTRL_STATUS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CTRL_STATUS + * These indices are used to request PCI Express link-specific + * control status values. A value of zero is returned for non-PCIE GPUs. + * NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS + * This index is used to request coherent dma transfer flags. + * Valid coherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART + * NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS + * This index is used to request noncoherent dma transfer flags. + * Valid noncoherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE + * NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE + * This index is used to request the size of the GPU GART in MBytes. + * NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_FLAGS + * This index is used to request GPU GART flags. + * Valid gart flags include: + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH + * This flag indicates that GPU GART clients need to do an explicit + * flush via an appropriate SetContextDma method. + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED + * This flag indicates that the GART address range includes both + * system and video memory. + * NV2080_CTRL_BUS_INFO_INDEX_BUS_NUMBER + * This index is used to request the PCI-based bus number of the GPU. + * Support for this index is platform-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_DEVICE_NUMBER + * This index is used to request the PCI-based device number of the GPU. + * Support for this index is platform-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_DOMAIN_NUMBER + * This index is used to request the PCI-based domain number of the GPU. + * Support for this index is platform-dependent. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_ERRORS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_ERRORS + * These indices are used to request PCI Express error status. + * The current status is cleared as part of these requests. + * Valid PCI Express error status values include: + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR + * NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST + * NV2080_CTRL_BUS_INFO_INDEX_INTERFACE_TYPE + * This index is used to request the bus interface type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_INFO // DEPRECATED + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN_INFO // REPLACES "GEN2" variant + * This index is used to retrieve PCI Express Gen configuration support + * This index is used to retrieve PCI Express Gen2 configuration support + * for the GPU. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN1 + * The GPU is PCI Express Gen1 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN2 + * The GPU is PCI Express Gen2 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN3 + * The GPU is PCI Express Gen3 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN4 + * The GPU is PCI Express Gen4 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN5 + * The GPU is PCI Express Gen5 capable. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN1 + * The GPU is configured in PCI Express Gen1 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN2 + * The GPU is configured in PCI Express Gen2 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN3 + * The GPU is configured in PCI Express Gen3 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN4 + * The GPU is configured in PCI Express Gen4 mode. + * NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN5 + * The GPU is configured in PCI Express Gen5 mode. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_AER + * This index retrieves PCI Express Advanced Error Reporting (AER) errors + * for the GPU. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CAPS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CAPS + * This index retrieves the PCI Express link capabilities for the + * board. For example, a Quadro FX4700X2 has two GPUs and PCIe + * switch. With this board, this index returns the link + * capabilities of the PCIe switch. In a single GPU board, this + * index returns the link capabilities of the GPU. A value of + * zero is returned for non-PCIE GPUs. + * UPSTREAM_LINK_CAPS is kept for backwards compatibility. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CTRL_STATUS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CTRL_STATUS + * This index retrieves the PCI Express link status for the board. + * For example, a Quadro FX4700X2 has two GPUs and PCIe switch. + * With this board, this index returns the link capabilities of + * the PCIe switch. In a single GPU board, this index returns the + * link status of the GPU. A value of zero is returned for + * non-PCIE GPUs. + * UPSTREAM_LINK_CTRL_STATUS is kept for backwards compatibility. + * NV2080_CTRL_BUS_INFO_INDEX_ASLM_STATUS + * This index is used to request the PCI Express ASLM settings. + * This index is only valid when NV2080_CTRL_BUS_INFO_TYPE indicates PCIE. + * A value of zero is returned for non-PCI Express bus type. + * _ASLM_STATUS_PCIE is always _PRESENT if PCI Express bus type. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_WIDTH_SWITCH_ERROR_COUNT + * This index is used to get the ASLM switching error count. + * A value of zero will be returned if no errors occurs while + * ASLM switching + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_SWITCH_ERROR_COUNT + * This index is used to get the Gen1<-->Gen2 switching error count + * A value of zero will be returned in case speed change from Gen1 to + * Gen2 is clean or if chipset is not gen2 capable or if gen1<-->gen2 + * switching is disabled. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_CYA_ASPM + * This index is used to get the ASPM CYA L0s\L1 enable\disable status. + * Legal return value is specified as a mask of valid and data field + * possible return values are: + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_NO + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_YES + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_DISABLED + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L1 + * NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S_L1 + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS + * These indices are used to request detailed PCI Express error counters. + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS_CLEAR + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS_CLEAR + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED_CLEAR + * NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS_CLEAR + * These indices are used to clear detailed PCI Express error counters. + * NV2080_CTRL_BUS_INFO_INDEX_GPU_INTERFACE_TYPE + * This index is used to request the internal interface type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE + * This index queries the type of sysmem connection to CPU + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_PCIE + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_NVLINK + * NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_C2C + * + */ + +typedef struct NV2080_CTRL_BUS_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_BUS_INFO; + +/* valid bus info index values */ + +/** + * This index is used to request the bus type of the GPU. + * Legal return values for this index are: + * NV2080_CTRL_BUS_INFO_TYPE_PCI + * NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS + * NV2080_CTRL_BUS_INFO_TYPE_FPCI + */ +#define NV2080_CTRL_BUS_INFO_INDEX_TYPE (0x00000000) +#define NV2080_CTRL_BUS_INFO_INDEX_INTLINE (0x00000001) +#define NV2080_CTRL_BUS_INFO_INDEX_CAPS (0x00000002) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CAPS (0x00000003) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CAPS (0x00000004) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CAPS (0x00000005) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CAPS (0x00000006) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CTRL_STATUS (0x00000007) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CTRL_STATUS (0x00000008) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CTRL_STATUS (0x00000009) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CTRL_STATUS (0x0000000A) +/** + * This index is used to request coherent dma transfer flags. + * Valid coherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART + */ +#define NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS (0x0000000B) +/** + * This index is used to request noncoherent dma transfer flags. + * Valid noncoherent dma transfer flags include: + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART + * NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE + */ +#define NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS (0x0000000C) +/** + * This index is used to request the size of the GPU GART in MBytes. + */ +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE (0x0000000D) +/** + * This index is used to request GPU GART flags. + * Valid gart flags include: + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH + * This flag indicates that GPU GART clients need to do an explicit + * flush via an appropriate SetContextDma method. + * NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED + * This flag indicates that the GART address range includes both + * system and video memory. + */ +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_FLAGS (0x0000000E) +#define NV2080_CTRL_BUS_INFO_INDEX_BUS_NUMBER (0x0000000F) +#define NV2080_CTRL_BUS_INFO_INDEX_DEVICE_NUMBER (0x00000010) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_ERRORS (0x00000011) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_ERRORS (0x00000012) +#define NV2080_CTRL_BUS_INFO_INDEX_INTERFACE_TYPE (0x00000013) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_INFO (0x00000014) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_AER (0x00000015) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CAPS (0x00000016) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CTRL_STATUS (0x00000017) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_ASLM_STATUS (0x00000018) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_WIDTH_SWITCH_ERROR_COUNT (0x00000019) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_SPEED_SWITCH_ERROR_COUNT (0x0000001A) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_CYA_ASPM (0x0000001B) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS (0x0000001C) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS (0x0000001D) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED (0x0000001E) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS (0x0000001F) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS_CLEAR (0x00000020) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS_CLEAR (0x00000021) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED_CLEAR (0x00000022) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS_CLEAR (0x00000023) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CORRECTABLE_ERRORS (0x00000024) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NONFATAL_ERRORS (0x00000025) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FATAL_ERRORS (0x00000026) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS (0x00000027) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CORRECTABLE_ERRORS_CLEAR (0x00000028) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NONFATAL_ERRORS_CLEAR (0x00000029) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FATAL_ERRORS_CLEAR (0x0000002A) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS_CLEAR (0x0000002B) +#define NV2080_CTRL_BUS_INFO_INDEX_DOMAIN_NUMBER (0x0000002C) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN_INFO (0x0000002D) +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_INTERFACE_TYPE (0x0000002E) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_GEN_INFO (0x0000002F) +#define NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_GEN_INFO (0x00000030) +#define NV2080_CTRL_BUS_INFO_INDEX_MSI_INFO (0x00000031) +/** + * This index is used to request the top 32 bits of the size of the GPU + * GART in MBytes. + */ +#define NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE_HI (0x00000032) +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE (0x00000033) +#define NV2080_CTRL_BUS_INFO_INDEX_MAX NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE +#define NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE (0x00000034) + +/* valid bus info type return values */ +#define NV2080_CTRL_BUS_INFO_TYPE_PCI (0x00000001) +#define NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS (0x00000003) +#define NV2080_CTRL_BUS_INFO_TYPE_FPCI (0x00000004) +#define NV2080_CTRL_BUS_INFO_TYPE_AXI (0x00000008) + +/* valid bus capability flags */ +#define NV2080_CTRL_BUS_INFO_CAPS_NEED_IO_FLUSH (0x00000001) +#define NV2080_CTRL_BUS_INFO_CAPS_CHIP_INTEGRATED (0x00000002) + +/* + * Format of PCIE link caps return values + * Note that Link Capabilities register format is followed only for bits 11:0 + */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED 3:0 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_5000MBPS (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_8000MBPS (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_16000MBPS (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_32000MBPS (0x00000005) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_WIDTH 9:4 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM 11:10 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM_NONE (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM_L0S (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_ASPM_L0S_L1 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN 15:12 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN2 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN3 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN4 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GEN_GEN5 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL 19:16 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN2 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN3 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN4 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_CURR_LEVEL_GEN5 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN 23:20 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN2 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN3 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN4 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_GPU_GEN_GEN5 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_SPEED_CHANGES 24:24 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_SPEED_CHANGES_ENABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_SPEED_CHANGES_DISABLED (0x00000001) + +/* format of PCIE control status return values */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM 1:0 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_DISABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_L0S (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_L1 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_ASPM_L0S_L1 (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED 19:16 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_5000MBPS (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_8000MBPS (0x00000003) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_16000MBPS (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_32000MBPS (0x00000005) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH 25:20 +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_UNDEFINED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X1 (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X2 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X4 (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X8 (0x00000008) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X12 (0x0000000C) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X16 (0x00000010) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_WIDTH_X32 (0x00000020) + +/* coherent dma transfer flags */ +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA 0:0 +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_CTXDMA_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART 2:2 +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_COHERENT_DMA_FLAGS_GPUGART_TRUE (0x00000001) + +/* noncoherent dma transfer flags */ +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA 0:0 +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_CTXDMA_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART 2:2 +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_GPUGART_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE 3:3 +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS_COH_MODE_TRUE (0x00000001) + +/* GPU GART flags */ +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH 0:0 +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_REQFLUSH_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED 1:1 +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_GPU_GART_FLAGS_UNIFIED_TRUE (0x00000001) + +/* format of PCIE errors return values */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST (0x00000008) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_ENTERED_RECOVERY (0x00000010) + +/* PCIE Gen2 capability and current level */ +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CAP 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CAP_FALSE (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CAP_TRUE (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CURR_LEVEL 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CURR_LEVEL_GEN1 (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GEN2_INFO_CURR_LEVEL_GEN2 (0x00000001) + +/* format of PCIE AER return values */ +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_TRAINING_ERR (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP (0x00000004) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_FC_PROTO_ERR (0x00000008) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT (0x00000010) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_ABORT (0x00000020) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL (0x00000040) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_RCVR_OVERFLOW (0x00000080) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP (0x00000100) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_ECRC_ERROR (0x00000200) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ (0x00000400) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR (0x00010000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP (0x00020000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP (0x00040000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER (0x00080000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT (0x00100000) +#define NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL (0x00200000) + +/* format of PCIE ASLM status return value */ +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_PCIE 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_PCIE_ERROR (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_PCIE_PRESENT (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_SUPPORTED 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_CL_CAPABLE 2:2 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_CL_CAPABLE_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_CL_CAPABLE_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_OS_SUPPORTED 3:3 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_OS_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_OS_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_BR04 4:4 +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_BR04_MISSING (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_ASLM_STATUS_BR04_PRESENT (0x00000001) + +/* format of GPU CYA CAPS return value */ +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_VALID_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM 2:1 +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_DISABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L1 (0x00000002) +#define NV2080_CTRL_BUS_INFO_PCIE_GPU_CYA_ASPM_L0S_L1 (0x00000003) + +/* format of MSI INFO return value */ +#define NV2080_CTRL_BUS_INFO_MSI_STATUS 0:0 +#define NV2080_CTRL_BUS_INFO_MSI_STATUS_DISABLED (0x00000000) +#define NV2080_CTRL_BUS_INFO_MSI_STATUS_ENABLED (0x00000001) + +/*format of L1PM Substates capabilities information */ +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_2_SUPPORTED 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_2_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_2_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_1_SUPPORTED 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_1_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PCIPM_L1_1_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_2_SUPPORTED 2:2 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_2_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_2_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_1_SUPPORTED 3:3 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_1_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_ASPM_L1_1_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_L1PM_SUPPORTED 4:4 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_L1PM_SUPPORTED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_L1PM_SUPPORTED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_RESERVED 7:5 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_PORT_RESTORE_TIME 15:8 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_T_POWER_ON_SCALE 17:16 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CAP_T_POWER_ON_VALUE 23:19 + +/*format of L1 PM Substates Control 1 Register */ +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_2_ENABLED 0:0 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_2_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_2_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_1_ENABLED 1:1 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_1_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_PCIPM_L1_1_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_2_ENABLED 2:2 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_2_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_2_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_1_ENABLED 3:3 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_1_ENABLED_YES (0x00000001) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_ASPM_L1_1_ENABLED_NO (0x00000000) +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_COMMON_MODE_RESTORE_TIME 15:8 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_LTR_L1_2_THRESHOLD_VALUE 25:16 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL1_LTR_L1_2_THRESHOLD_SCALE 31:29 + +/*format of L1 PM Substates Control 2 Register */ +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL2_T_POWER_ON_SCALE 1:0 +#define NV2080_CTRL_BUS_INFO_PCIE_L1_SS_CTRL2_T_POWER_ON_VALUE 7:3 + +/* valid sysmem connection type values */ +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_PCIE (0x00000000) +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_NVLINK (0x00000001) +#define NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_C2C (0x00000002) + +/** + * NV2080_CTRL_CMD_BUS_GET_INFO + * + * This command returns bus engine information for the associated GPU. + * Requests to retrieve bus information use a list of one or more + * NV2080_CTRL_BUS_INFO structures. + * + * busInfoListSize + * This field specifies the number of entries on the caller's + * busInfoList. + * busInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the bus information is to be returned. + * This buffer must be at least as big as busInfoListSize multiplied + * by the size of the NV2080_CTRL_BUS_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_BUS_GET_INFO (0x20801802) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_INFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_BUS_GET_INFO_PARAMS { + NvU32 busInfoListSize; + NV_DECLARE_ALIGNED(NvP64 busInfoList, 8); +} NV2080_CTRL_BUS_GET_INFO_PARAMS; + +#define NV2080_CTRL_CMD_BUS_GET_INFO_V2 (0x20801823) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_INFO_V2_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV2080_CTRL_BUS_GET_INFO_V2_PARAMS { + NvU32 busInfoListSize; + NV2080_CTRL_BUS_INFO busInfoList[NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_BUS_GET_INFO_V2_PARAMS; + +/* + * NV2080_CTRL_BUS_PCI_BAR_INFO + * + * This structure describes PCI bus BAR information. + * + * flags + * This field contains any flags for the associated BAR. + * barSize + * This field contains the size in megabytes of the associated BAR. + * DEPRECATED, please use barSizeBytes. + * barSizeBytes + * This field contains the size in bytes of the associated BAR. + * barOffset + * This field contains the PCI bus offset in bytes of the associated BAR. + */ +typedef struct NV2080_CTRL_BUS_PCI_BAR_INFO { + NvU32 flags; + NvU32 barSize; + NV_DECLARE_ALIGNED(NvU64 barSizeBytes, 8); + NV_DECLARE_ALIGNED(NvU64 barOffset, 8); +} NV2080_CTRL_BUS_PCI_BAR_INFO; + +/* + * NV2080_CTRL_CMD_BUS_GET_PCI_BAR_INFO + * + * This command returns PCI bus BAR information. + * + * barCount + * This field returns the number of BARs for the associated subdevice. + * Legal values for this parameter will be between one to + * NV2080_CTRL_BUS_MAX_BARS. + * barInfo + * This field returns per-BAR information in the form of an array of + * NV2080_CTRL_BUS_PCI_BAR_INFO structures. Information for as many as + * NV2080_CTRL_BUS_MAX_PCI_BARS will be returned. Any unused entries will + * be initialized to zero. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_BUS_GET_PCI_BAR_INFO (0x20801803) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum number of BARs per subdevice */ +#define NV2080_CTRL_BUS_MAX_PCI_BARS (8) + +#define NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS { + NvU32 pciBarCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_BUS_PCI_BAR_INFO pciBarInfo[NV2080_CTRL_BUS_MAX_PCI_BARS], 8); +} NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_SET_PCIE_LINK_WIDTH + * + * This command sets PCI-E link width to the specified new value. + * + * pcieLinkWidth + * This field specifies the new PCI-E link width. + * + * failingReason + * This field specifies the reason why the change of link width fails. + * It is valid only when this routine returns NV_ERR_GENERIC. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_GENERIC + */ +#define NV2080_CTRL_CMD_BUS_SET_PCIE_LINK_WIDTH (0x20801804) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS { + NvU32 pcieLinkWidth; + NvU32 failingReason; +} NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS; + +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_ERROR_PSTATE (0x00000001) +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_ERROR_PCIE_CFG_ACCESS (0x00000002) +#define NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_ERROR_TRAINING (0x00000004) + +/* + * NV2080_CTRL_CMD_BUS_SET_PCIE_SPEED + * + * This command Initiates a change in PCIE Bus Speed + * + * busSpeed + * This field is the target speed to train to. + * Legal values for this parameter are: + * NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_8000MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_16000MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_32000MBPS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_SET_PCIE_SPEED (0x20801805) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS { + NvU32 busSpeed; +} NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS; + +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS (0x00000002) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_8000MBPS (0x00000003) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_16000MBPS (0x00000004) +#define NV2080_CTRL_BUS_SET_PCIE_SPEED_32000MBPS (0x00000005) + +/* + * NV2080_CTRL_CMD_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED + * + * This command Initiates a change in PCIE Bus Speed for a HWBC device's upstream + * link. + * + * busSpeed + * This field specifies the target speed to which to train. + * Legal values for this parameter are: + * NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS + * primaryBus + * This field is the PCI Express Primary Bus number that uniquely identifies + * a HWBC device's upstream port, i.e. the BR04 Upstream Port. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED (0x20801806) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS { + NvU32 busSpeed; + NvU8 primaryBus; +} NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS; + +#define NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_5000MBPS (0x00000002) + +/* + * NV2080_CTRL_CMD_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED + * + * This command gets the current PCIE Bus Speed for a HWBC device's upstream + * link. + * + * primaryBus + * This field is the PCI Express Primary Bus number that uniquely identifies + * a HWBC device's upstream port, i.e. the BR04 Upstream Port. + * busSpeed + * This field specifies a pointer in the caller's address space + * to the NvU32 variable into which the bus speed is to be returned. + * On success, this parameter will contain one of the following values: + * NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS + * NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED (0x20801807) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS { + NvU32 busSpeed; + NvU8 primaryBus; +} NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS; + +#define NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_2500MBPS (0x00000001) +#define NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_5000MBPS (0x00000002) + +/* + * NV2080_CTRL_CMD_BUS_MAP_BAR2 + * + * This command sets up BAR2 page tables for passed-in memory handle. + * This command MUST be executed before NV2080_CTRL_CMD_BUS_UNMAP_BAR2 + * or NV2080_CTRL_CMD_BUS_VERIFY_BAR2. Not supported on SLI. + * + * hMemory + * This field is a handle to physical memory. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_BUS_MAP_BAR2 (0x20801809) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_MAP_BAR2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_MAP_BAR2_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_BUS_MAP_BAR2_PARAMS { + NvHandle hMemory; +} NV2080_CTRL_BUS_MAP_BAR2_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_UNMAP_BAR2 + * + * This command unmaps any pending BAR2 page tables created with + * NV2080_CTRL_CMD_BUS_MAP_BAR2 command. The handle passed in must + * match the handle used to map the page tables. Not supported on SLI. + * + * hMemory + * This field is a handle to physical memory. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_BUS_UNMAP_BAR2 (0x2080180a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS { + NvHandle hMemory; +} NV2080_CTRL_BUS_UNMAP_BAR2_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_VERIFY_BAR2 + * + * This command tests BAR2 against BAR0 if there are BAR2 page tables + * set up with NV2080_CTRL_CMD_BUS_MAP_BAR2 command. The handle passed + * in must match the handle used to map the page tables. Not supported on SLI. + * + * hMemory + * This field is a handle to physical memory. + * offset + * Base offset of the surface where the test will make its first dword write. + * size + * Test will write '(size/4)*4' bytes starting at surface offset `offset'. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_BUS_VERIFY_BAR2 (0x2080180b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS { + NvHandle hMemory; + NvU32 offset; + NvU32 size; +} NV2080_CTRL_BUS_VERIFY_BAR2_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_HWBC_GET_UPSTREAM_BAR0 + * + * This command gets the BAR0 for a HWBC device's upstream port. + * + * primaryBus + * This field is the PCI Express Primary Bus number that uniquely identifies + * a HWBC device's upstream port, i.e. the BR04 Upstream Port. + * physBAR0 + * This field returns the BAR0 physical address of the HWBC device's + * upstream port. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_BUS_HWBC_GET_UPSTREAM_BAR0 (0x2080180e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS { + NV_DECLARE_ALIGNED(NvU64 physBAR0, 8); + NvU8 primaryBus; +} NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_SERVICE_GPU_MULTIFUNC_STATE + * This command would reports the current Audio device power state or Sets new power state. + * + * command + * This parametrer specifies the target GPU multifunction state. + * NV2080_CTRL_BUS_ENABLE_GPU_MULTIFUNC_STATE Enables the multi function state + * NV2080_CTRL_BUS_DISABLE_GPU_MULTIFUNC_STATE Disables the multi function state. + * NV2080_CTRL_BUS_GET_GPU_MULTIFUNC_STATE Get the Current device power state. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + */ + +#define NV2080_CTRL_CMD_BUS_SERVICE_GPU_MULTIFUNC_STATE (0x20801812) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS { + NvU8 command; + NvU32 deviceState; +} NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS; + +#define NV2080_CTRL_BUS_ENABLE_GPU_MULTIFUNC_STATE (0x00000000) +#define NV2080_CTRL_BUS_DISABLE_GPU_MULTIFUNC_STATE (0x00000001) +#define NV2080_CTRL_BUS_GET_GPU_MULTIFUNC_STATE (0x00000002) + +/* + * NV2080_CTRL_CMD_BUS_GET_PEX_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counter types. + * + * pexTotalCorrectableErrors + * This parameter gives the total correctable errors which includes + * NV_XVE_ERROR_COUNTER1 plus LCRC Errors, 8B10B Errors, NAKS and Failed L0s + * + * pexCorrectableErrors + * This parameter only includes NV_XVE_ERROR_COUNTER1 value. + * + * pexTotalNonFatalErrors + * This parameter returns total Non-Fatal Errors which may or may not + * include Correctable Errors. + * + * pexTotalFatalErrors + * This parameter returns Total Fatal Errors + * + * pexTotalUnsupportedReqs + * This parameter returns Total Unsupported Requests + * + * pexErrors + * This array contains the error counts for each error type as requested from + * the pexCounterMask. The array indexes correspond to the mask bits one-to-one. + */ + +#define NV2080_CTRL_CMD_BUS_GET_PEX_COUNTERS (0x20801813) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PEX_MAX_COUNTER_TYPES 31 +#define NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS { + NvU32 pexCounterMask; + NvU32 pexTotalCorrectableErrors; + NvU16 pexCorrectableErrors; + NvU8 pexTotalNonFatalErrors; + NvU8 pexTotalFatalErrors; + NvU8 pexTotalUnsupportedReqs; + NvU16 pexCounters[NV2080_CTRL_PEX_MAX_COUNTER_TYPES]; +} NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS; + +/* + * Note that MAX_COUNTER_TYPES will need to be updated each time + * a new counter type gets added to the list below. The value + * depends on the bits set for the last valid define. Look + * at pexCounters[] comments above for details. + * + */ +#define NV2080_CTRL_BUS_PEX_COUNTER_TYPE 0x00000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_RECEIVER_ERRORS 0x00000001 +#define NV2080_CTRL_BUS_PEX_COUNTER_REPLAY_COUNT 0x00000002 +#define NV2080_CTRL_BUS_PEX_COUNTER_REPLAY_ROLLOVER_COUNT 0x00000004 +#define NV2080_CTRL_BUS_PEX_COUNTER_BAD_DLLP_COUNT 0x00000008 +#define NV2080_CTRL_BUS_PEX_COUNTER_BAD_TLP_COUNT 0x00000010 +#define NV2080_CTRL_BUS_PEX_COUNTER_8B10B_ERRORS_COUNT 0x00000020 +#define NV2080_CTRL_BUS_PEX_COUNTER_SYNC_HEADER_ERRORS_COUNT 0x00000040 +#define NV2080_CTRL_BUS_PEX_COUNTER_LCRC_ERRORS_COUNT 0x00000080 +#define NV2080_CTRL_BUS_PEX_COUNTER_FAILED_L0S_EXITS_COUNT 0x00000100 +#define NV2080_CTRL_BUS_PEX_COUNTER_NAKS_SENT_COUNT 0x00000200 +#define NV2080_CTRL_BUS_PEX_COUNTER_NAKS_RCVD_COUNT 0x00000400 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_ERRORS 0x00000800 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_TO_RECOVERY_COUNT 0x00001000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L0_TO_RECOVERY_COUNT 0x00002000 +#define NV2080_CTRL_BUS_PEX_COUNTER_RECOVERY_COUNT 0x00004000 +#define NV2080_CTRL_BUS_PEX_COUNTER_CHIPSET_XMIT_L0S_ENTRY_COUNT 0x00008000 +#define NV2080_CTRL_BUS_PEX_COUNTER_GPU_XMIT_L0S_ENTRY_COUNT 0x00010000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_ENTRY_COUNT 0x00020000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1P_ENTRY_COUNT 0x00040000 +#define NV2080_CTRL_BUS_PEX_COUNTER_DEEP_L1_ENTRY_COUNT 0x00080000 +#define NV2080_CTRL_BUS_PEX_COUNTER_ASLM_COUNT 0x00100000 +#define NV2080_CTRL_BUS_PEX_COUNTER_TOTAL_CORR_ERROR_COUNT 0x00200000 +#define NV2080_CTRL_BUS_PEX_COUNTER_CORR_ERROR_COUNT 0x00400000 +#define NV2080_CTRL_BUS_PEX_COUNTER_NON_FATAL_ERROR_COUNT 0x00800000 +#define NV2080_CTRL_BUS_PEX_COUNTER_FATAL_ERROR_COUNT 0x01000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_UNSUPP_REQ_COUNT 0x02000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_1_ENTRY_COUNT 0x04000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_2_ENTRY_COUNT 0x08000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_2_ABORT_COUNT 0x10000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1SS_TO_DEEP_L1_TIMEOUT_COUNT 0x20000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_L1_SHORT_DURATION_COUNT 0x40000000 + +/* + * NV2080_CTRL_CMD_BUS_CLEAR_PEX_COUNTER_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counters to be + * cleared. Note that all counters cannot be cleared. + */ + +#define NV2080_CTRL_CMD_BUS_CLEAR_PEX_COUNTERS (0x20801814) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS { + NvU32 pexCounterMask; +} NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_FREEZE_PEX_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counters to be + * freezed. Note that all counters cannot be frozen. + * + * bFreezeRmCounter + * This parameter decides whether API will freeze it or unfreeze it. + * NV_TRUE for freeze and NV_FALSE for unfreeze. + */ + +#define NV2080_CTRL_CMD_BUS_FREEZE_PEX_COUNTERS (0x20801815) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS { + NvU32 pexCounterMask; + NvBool bFreezeRmCounter; +} NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS + * This command gets the per Lane Counters and the type of errors. + * + * pexLaneErrorStatus + * This mask specifies the type of error detected on any of the Lanes. + * + * pexLaneCounter + * This array gives the counters per Lane. Each index corresponds to Lane + * index + 1 + */ + +#define NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS (0x20801816) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PEX_MAX_LANES 16 +#define NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS { + NvU16 pexLaneErrorStatus; + NvU8 pexLaneCounter[NV2080_CTRL_PEX_MAX_LANES]; +} NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS; + +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_TYPE 0x00000000 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_SYNC_HDR_CODING_ERR 0x00000001 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_SYNC_HDR_ORDER_ERR 0x00000002 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_OS_DATA_SEQ_ERR 0x00000004 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_TSX_DATA_SEQ_ERR 0x00000008 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_SKPOS_LFSR_ERR 0x00000010 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_RX_CLK_FIFO_OVERFLOW 0x00000020 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_ELASTIC_FIFO_OVERFLOW 0x00000040 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_RCVD_LINK_NUM_ERR 0x00000080 +#define NV2080_CTRL_BUS_PEX_COUNTER_LANE_RCVD_LANE_NUM_ERR 0x00000100 + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY (0x20801817) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS { + NvBool bPexLtrRegkeyOverride; + NvBool bPexRootPortLtrSupported; + NvBool bPexGpuLtrSupported; + NvU16 pexLtrSnoopLatencyValue; + NvU8 pexLtrSnoopLatencyScale; + NvU16 pexLtrNoSnoopLatencyValue; + NvU8 pexLtrNoSnoopLatencyScale; +} NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS; + +#define NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY (0x20801818) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS { + NvU16 pexLtrSnoopLatencyValue; + NvU8 pexLtrSnoopLatencyScale; + NvU16 pexLtrNoSnoopLatencyValue; + NvU8 pexLtrNoSnoopLatencyScale; +} NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_PEX_UTIL_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counter types. + * + */ +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_TX_BYTES 0x00000001 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_RX_BYTES 0x00000002 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_TX_L0 0x00000004 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_RX_L0 0x00000008 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_TX_L0S 0x00000010 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_RX_L0S 0x00000020 +#define NV2080_CTRL_BUS_PEX_UTIL_COUNTER_NON_L0_L0S 0x00000040 +#define NV2080_CTRL_PEX_UTIL_MAX_COUNTER_TYPES 7 + +#define NV2080_CTRL_CMD_BUS_GET_PEX_UTIL_COUNTERS (0x20801819) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS { + NvU32 pexCounterMask; + NvU32 pexCounters[NV2080_CTRL_PEX_UTIL_MAX_COUNTER_TYPES]; +} NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_CLEAR_PEX_UTIL_COUNTER_COUNTERS + * This command gets the counts for different counter types. + * + * pexCounterMask + * This parameter specifies the input mask for desired counters to be + * cleared. Note that all counters cannot be cleared. + * + * NOTE: EX_UTIL_COUNTER_UPSTREAM & NV2080_CTRL_BUS_PEX_UTIL_COUNTER_DOWNSTREAM + * belongs to PMU. The ctrl function will not reset nor disable/enable them. + */ +#define NV2080_CTRL_CMD_BUS_CLEAR_PEX_UTIL_COUNTERS (0x20801820) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS { + NvU32 pexCounterMask; +} NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_BUS_GET_BFD (0x20801821) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_BFD_PARAMSARR_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_BUS_GET_BFD_PARAMS { + NvBool valid; + NvU16 deviceID; + NvU16 vendorID; + NvU32 domain; + NvU16 bus; + NvU16 device; + NvU8 function; +} NV2080_CTRL_BUS_GET_BFD_PARAMS; + +#define NV2080_CTRL_BUS_GET_BFD_PARAMSARR_MESSAGE_ID (0x21U) + +typedef struct NV2080_CTRL_BUS_GET_BFD_PARAMSARR { + NV2080_CTRL_BUS_GET_BFD_PARAMS params[32]; +} NV2080_CTRL_BUS_GET_BFD_PARAMSARR; + +/* + * NV2080_CTRL_CMD_BUS_GET_ASPM_DISABLE_FLAGS + * This command gets the following mentioned PDB Properties + * + * aspmDisableFlags[] + * NvBool array stores each of the properties' state. the array size can + * be increased as per requirement. + * + * NOTE: When adding more properties, increment NV2080_CTRL_ASPM_DISABLE_FLAGS_MAX_FLAGS. + */ + +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_L1_MASK_REGKEY_OVERRIDE 0x00000000 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_OS_RM_MAKES_POLICY_DECISIONS 0x00000001 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_BEHIND_BRIDGE 0x00000002 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_UNSUPPORTED 0x00000003 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED 0x00000004 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY 0x00000005 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_CL_ASPM_L1_CHIPSET_DISABLED 0x00000006 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY 0x00000007 +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_BIF_ENABLE_ASPM_DT_L1 0x00000008 +//append properties here + +#define NV2080_CTRL_ASPM_DISABLE_FLAGS_MAX_FLAGS 9 + +#define NV2080_CTRL_CMD_BUS_GET_ASPM_DISABLE_FLAGS (0x20801822) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS { + NvBool aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_MAX_FLAGS]; +} NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS; + +#define NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS (0x20801824) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS { + NvBool bEnable; +} NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_NVLINK_PEER_ID_MASK + * + * This command returns cached(SW only) NVLINK peer id mask. Currently, this control + * call is only needed inside a SR-IOV enabled guest where page table management is + * being done by the guest. Guest needs this mask to derive the peer id corresponding + * to the peer GPU. This peer id will then be programmed inside the PTEs by guest RM. + * + * nvlinkPeerIdMask[OUT] + * - The peer id mask is returned in this array. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_BUS_GET_NVLINK_PEER_ID_MASK (0x20801825) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_MAX_NUM_GPUS 32 + +#define NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS { + NvU32 nvlinkPeerIdMask[NV2080_CTRL_BUS_MAX_NUM_GPUS]; +} NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS + * This command takes parameters eomMode, eomNblks and eomNerrs from the client + * and then sends it out to PMU. + */ +#define NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS (0x20801826) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS_MESSAGE_ID (0x26U) + +typedef struct NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS { + NvU8 eomMode; + NvU8 eomNblks; + NvU8 eomNerrs; +} NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE + * This command takes parameters UPHY register's address and lane from the client + * and then sends it out to PMU. + */ +#define NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE (0x20801827) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS_MESSAGE_ID (0x27U) + +typedef struct NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS { + NvU32 regAddress; + NvU32 laneSelectMask; + NvU16 regValue; +} NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS; + +/* + * NV2080_CTRL_CMD_BUS_GET_EOM_STATUS + * + */ +#define NV2080_CTRL_CMD_BUS_GET_EOM_STATUS (0x20801828) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_MAX_NUM_LANES 32 + +#define NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS { + NvU8 eomMode; + NvU8 eomNblks; + NvU8 eomNerrs; + NvU8 eomBerEyeSel; + NvU8 eomPamEyeSel; + NvU32 laneMask; + NvU16 eomStatus[NV2080_CTRL_BUS_MAX_NUM_LANES]; +} NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS; + + + +/* + * NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS + * + * This command disables the GPU system memory access after quiescing the GPU, + * or re-enables sysmem access. + * + * bDisable + * If NV_TRUE the GPU is quiesced and system memory access is disabled . + * If NV_FALSE the GPU system memory access is re-enabled and the GPU is resumed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS (0x2080182c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID << 8) | NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS_MESSAGE_ID (0x2CU) + +typedef struct NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS { + NvBool bDisable; +} NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS; + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h new file mode 100644 index 000000000..f36ab7e1f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h @@ -0,0 +1,309 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080ce.finn +// + + + +/* NV20_SUBDEVICE_XX ce control commands and parameters */ + +#include "ctrl2080common.h" + +/* + * NV2080_CTRL_CMD_CE_GET_CAPS + * + * This command returns the set of CE capabilities for the device + * in the form of an array of unsigned bytes. + * + * ceEngineType + * This parameter specifies the copy engine type + * capsTblSize + * This parameter specifies the size in bytes of the caps table per CE. + * This value should be set to NV2080_CTRL_CE_CAPS_TBL_SIZE. + * capsTbl + * This parameter specifies a pointer to the client's caps table buffer + * into which the CE caps bits will be transferred by the RM. + * The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_CE_GET_CAPS (0x20802a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_CAPS_PARAMS_MESSAGE_ID" */ + +/* + * Size in bytes of CE caps table. This value should be one greater + * than the largest byte_index value below. + */ +#define NV2080_CTRL_CE_CAPS_TBL_SIZE 2 + +#define NV2080_CTRL_CE_GET_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CE_GET_CAPS_PARAMS { + NvU32 ceEngineType; + NvU32 capsTblSize; + NV_DECLARE_ALIGNED(NvP64 capsTbl, 8); +} NV2080_CTRL_CE_GET_CAPS_PARAMS; + +#define NV2080_CTRL_CMD_CE_GET_CAPS_V2 (0x20802a03) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_CE_GET_CAPS_V2_PARAMS { + NvU32 ceEngineType; + NvU8 capsTbl[NV2080_CTRL_CE_CAPS_TBL_SIZE]; +} NV2080_CTRL_CE_GET_CAPS_V2_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV2080_CTRL_CE_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV2080_CTRL_CE_CAPS_CE_GRCE 0:0x01 +#define NV2080_CTRL_CE_CAPS_CE_SHARED 0:0x02 +#define NV2080_CTRL_CE_CAPS_CE_SYSMEM_READ 0:0x04 +#define NV2080_CTRL_CE_CAPS_CE_SYSMEM_WRITE 0:0x08 +#define NV2080_CTRL_CE_CAPS_CE_NVLINK_P2P 0:0x10 +#define NV2080_CTRL_CE_CAPS_CE_SYSMEM 0:0x20 +#define NV2080_CTRL_CE_CAPS_CE_P2P 0:0x40 +#define NV2080_CTRL_CE_CAPS_CE_BL_SIZE_GT_64K_SUPPORTED 0:0x80 +#define NV2080_CTRL_CE_CAPS_CE_SUPPORTS_NONPIPELINED_BL 1:0x01 +#define NV2080_CTRL_CE_CAPS_CE_SUPPORTS_PIPELINED_BL 1:0x02 + + + +/* + * NV2080_CTRL_CE_CAPS_CE_GRCE + * Set if the CE is synchronous with GR + * + * NV2080_CTRL_CE_CAPS_CE_SHARED + * Set if the CE shares physical CEs with any other CE + * + * NV2080_CTRL_CE_CAPS_CE_SYSMEM_READ + * Set if the CE can give enhanced performance for SYSMEM reads over other CEs + * + * NV2080_CTRL_CE_CAPS_CE_SYSMEM_WRITE + * Set if the CE can give enhanced performance for SYSMEM writes over other CEs + * + * NV2080_CTRL_CE_CAPS_CE_NVLINK_P2P + * Set if the CE can be used for P2P transactions using NVLINK + * Once a CE is exposed for P2P over NVLINK, it will remain available for the life of RM + * PCE2LCE mapping may change based on the number of GPUs registered in RM however + * + * NV2080_CTRL_CE_CAPS_CE_SYSMEM + * Set if the CE can be used for SYSMEM transactions + * + * NV2080_CTRL_CE_CAPS_CE_P2P + * Set if the CE can be used for P2P transactions + * + * NV2080_CTRL_CE_CAPS_CE_BL_SIZE_GT_64K_SUPPORTED + * Set if the CE supports BL copy size greater than 64K + * + * NV2080_CTRL_CE_CAPS_CE_SUPPORTS_NONPIPELINED_BL + * Set if the CE supports non-pipelined Block linear + * + * NV2080_CTRL_CE_CAPS_CE_SUPPORTS_PIPELINED_BL + * Set if the CE supports pipelined Block Linear + */ + + + +/* + * NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK + * + * This command returns the mapping of PCE's for the given LCE + * + * ceEngineType + * This parameter specifies the copy engine type + * pceMask + * This parameter specifies a mask of PCEs that correspond + * to the LCE specified in ceEngineType + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK (0x20802a02) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS { + NvU32 ceEngineType; + NvU32 pceMask; +} NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_SET_PCE_LCE_CONFIG + * + * This command sets the PCE2LCE configuration + * + * pceLceConfig[NV2080_CTRL_MAX_PCES] + * This parameter specifies the PCE-LCE mapping requested + * grceLceConfig[NV2080_CTRL_MAX_GRCES] + * This parameter specifies which LCE is the GRCE sharing with + * 0xF -> Does not share with any LCE + * 0-MAX_LCE -> Shares with the given LCE + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_CE_SET_PCE_LCE_CONFIG (0x20802a04) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MAX_PCES 18 +#define NV2080_CTRL_MAX_GRCES 2 + +#define NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS { + NvU32 ceEngineType; + NvU32 pceLceMap[NV2080_CTRL_MAX_PCES]; + NvU32 grceSharedLceMap[NV2080_CTRL_MAX_GRCES]; +} NV2080_CTRL_CE_SET_PCE_LCE_CONFIG_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_UPDATE_PCE_LCE_MAPPINGS + * + * This command updates the PCE-LCE mappings + * + * pPceLceMap [IN] + * This parameter tracks the array of PCE-LCE mappings. + * + * pGrceConfig [IN] + * This parameter tracks the array of GRCE configs. + * 0xF -> GRCE does not share with any LCE + * 0-MAX_LCE -> GRCE shares with the given LCE + * + * exposeCeMask [IN] + * This parameter specifies the mask of LCEs to export to the + * clients after the update. + * + * bUpdateNvlinkPceLce [IN] + * Whether PCE-LCE mappings need to be updated for nvlink topology. + * If this is NV_FALSE, RM would ignore the above values. However, + * PCE-LCE mappings will still be updated if there were any regkey + * overrides. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ + +#define NV2080_CTRL_CMD_CE_UPDATE_PCE_LCE_MAPPINGS (0x20802a05) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS { + NvU32 pceLceMap[NV2080_CTRL_MAX_PCES]; + NvU32 grceConfig[NV2080_CTRL_MAX_GRCES]; + NvU32 exposeCeMask; + NvBool bUpdateNvlinkPceLce; +} NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS; + +#define NV2080_CTRL_CMD_CE_UPDATE_CLASS_DB (0x20802a06) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS { + NvU32 stubbedCeMask; +} NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS + * + * Query _CE_GRCE, _CE_SHARED, _CE_SUPPORTS_PIPELINED_BL, _CE_SUPPORTS_NONPIPELINED_BL bits of CE + * capabilities. + * + */ + +#define NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS (0x20802a07) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | 0x7" */ + +#define NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS { + NvU32 size; +} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS; + +#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_CE_GET_HUB_PCE_MASKS + * + * Get HSHUB and FBHUB PCE masks. + * + * [out] hshubPceMasks + * PCE mask for each HSHUB + * [out] fbhubPceMask + * FBHUB PCE mask + */ + +#define NV2080_CTRL_CMD_CE_GET_HUB_PCE_MASK (0x20802a09) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_MAX_HSHUBS 5 + +#define NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS { + NvU32 hshubPceMasks[NV2080_CTRL_CE_MAX_HSHUBS]; + NvU32 fbhubPceMask; +} NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_CE_GET_ALL_CAPS + * + * Query caps of all CEs. + * + * [out] capsTbl + * Array of CE caps in the order of CEs. The caps bits interpretation is the same as in + * NV2080_CTRL_CMD_CE_GET_CAPS. + * [out] present + * Bit mask indicating which CEs are usable by the client and have their caps indicated in capsTbl. + * If a CE is not marked present, its caps bits should be ignored. + * If client is subscribed to a MIG instance, only the CEs present in the instance are tagged as such. + */ + +#define NV2080_CTRL_CMD_CE_GET_ALL_CAPS (0x20802a0a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS_MESSAGE_ID (0xaU) + +typedef struct NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS { + NvU8 capsTbl[NV2080_CTRL_MAX_PCES][NV2080_CTRL_CE_CAPS_TBL_SIZE]; + NvU32 present; +} NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS; + +#define NV2080_CTRL_CMD_CE_GET_ALL_PHYSICAL_CAPS (0x20802a0b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | 0xb" */ + +/* _ctrl2080ce_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h new file mode 100644 index 000000000..55b996412 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080cipher.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080cipher.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h new file mode 100644 index 000000000..4f1956af8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clk.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080clk.finn +// + + + +/* _ctrl2080clk_h_ */ + + +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080gpumon.h" +#include "ctrl/ctrl2080/ctrl2080clkavfs.h" +#include "ctrl/ctrl2080/ctrl2080volt.h" +#include "ctrl/ctrl2080/ctrl2080pmumon.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h new file mode 100644 index 000000000..97ae7dc18 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080clkavfs.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080clkavfs.finn +// + + + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080volt.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h new file mode 100644 index 000000000..4857c3eb7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080common.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2004 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080common.finn +// + + + + +#define NV2080_CTRL_CMD_MAX_HEADS 2 diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h new file mode 100644 index 000000000..eb5ecab51 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dma.h @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080dma.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX dma control commands and parameters */ + +#include "ctrl2080common.h" + +/* + * NV2080_CTRL_CMD_DMA_INVALIDATE_TLB + * + * This command invalidates the GPU TLB. This is intended to be used + * by RM clients that manage their own TLB consistency when updating + * page tables on their own, or with DEFER_TLB_INVALIDATION options + * to other RM APIs. + * + * hVASpace + * This parameter specifies the VASpace object whose MMU TLB entries needs to be invalidated. + * Specifying a GMMU VASpace object handle will invalidate the GMMU TLB for the particular VASpace. + * Specifying a SMMU VASpace object handle will flush the entire SMMU TLB & PTC. + * + * This call can be used with the NV50_DEFERRED_API_CLASS (class 0x5080). + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_TIMEOUT_RETRY + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_DMA_INVALIDATE_TLB (0x20802502) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_DMA_INTERFACE_ID << 8) | NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS { + NvHandle hClient; // Deprecated. Kept here for compactibility with chips_GB9-2-1-1 + NvHandle hDevice; // Deprecated. Kept here for compactibility with chips_GB9-2-1-1 + NvU32 engine; // Deprecated. Kept here for compactibility with chips_GB9-2-1-1 + NvHandle hVASpace; +} NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS; + +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_GRAPHICS 0:0 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_GRAPHICS_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_GRAPHICS_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VIDEO 1:1 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VIDEO_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VIDEO_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_DISPLAY 2:2 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_DISPLAY_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_DISPLAY_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_CAPTURE 3:3 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_CAPTURE_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_CAPTURE_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_IFB 4:4 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_IFB_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_IFB_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MV 5:5 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MV_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MV_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MPEG 6:6 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MPEG_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_MPEG_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VLD 7:7 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VLD_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_VLD_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_ENCRYPTION 8:8 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_ENCRYPTION_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_ENCRYPTION_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_PERFMON 9:9 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_PERFMON_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_PERFMON_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_POSTPROCESS 10:10 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_POSTPROCESS_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_POSTPROCESS_TRUE (0x00000001) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_BAR 11:11 +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_BAR_FALSE (0x00000000) +#define NV2080_CTRL_DMA_INVALIDATE_TLB_ENGINE_BAR_TRUE (0x00000001) + +/* + * NV2080_CTRL_DMA_INFO + * + * This structure represents a single 32bit dma engine value. Clients + * request a particular DMA engine value by specifying a unique dma + * information index. + * + * Legal dma information index values are: + * NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE + * This index can be used to request the system address size in bits. + */ +typedef struct NV2080_CTRL_DMA_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_DMA_INFO; + +/* valid dma info index values */ +#define NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE (0x000000000) + +/* set INDEX_MAX to greatest possible index value */ +#define NV2080_CTRL_DMA_INFO_INDEX_MAX NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE + +/* + * NV2080_CTRL_CMD_DMA_GET_INFO + * + * This command returns dma engine information for the associated GPU. + * Requests to retrieve dma information use an array of one or more + * NV2080_CTRL_DMA_INFO structures. + * + * dmaInfoTblSize + * This field specifies the number of valid entries in the dmaInfoList + * array. This value cannot exceed NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES. + * dmaInfoTbl + * This parameter contains the client's dma info table into + * which the dma info values will be transferred by the RM. + * The dma info table is an array of NV2080_CTRL_DMA_INFO structures. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_DMA_GET_INFO (0x20802503) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_DMA_INTERFACE_ID << 8) | NV2080_CTRL_DMA_GET_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum number of NV2080_CTRL_DMA_INFO entries per request */ +#define NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES (256) + +#define NV2080_CTRL_DMA_GET_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_DMA_GET_INFO_PARAMS { + NvU32 dmaInfoTblSize; + /* + * C form: + * NV2080_CTRL_DMA_INFO dmaInfoTbl[NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES]; + */ + NV2080_CTRL_DMA_INFO dmaInfoTbl[NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES]; +} NV2080_CTRL_DMA_GET_INFO_PARAMS; + +typedef struct NV2080_CTRL_DMA_UPDATE_COMPTAG_INFO_TILE_INFO { + /*! + * 64KB aligned address of source 64KB tile for comptag reswizzle. + */ + NvU32 srcAddr; + + /*! + * 64KB aligned address of destination 64KB tile for comptag reswizzle. + */ + NvU32 dstAddr; + + /*! + * Comptag index assigned to the 64K sized tile relative to + * the compcacheline. Absolute comptag index would be: + * startComptagIndex + relComptagIndex. + */ + NvU16 relComptagIndex; +} NV2080_CTRL_DMA_UPDATE_COMPTAG_INFO_TILE_INFO; + +// _ctrl2080dma_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h new file mode 100644 index 000000000..982381ffd --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080dmabuf.h @@ -0,0 +1,105 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080dmabuf.finn +// + + + +/* + * NV2080_CTRL_CMD_DMABUF_EXPORT_OBJECTS_TO_FD + * + * Exports RM vidmem handles to a dma-buf fd. + * + * The objects in the 'handles' array are exported to the fd as range: + * [index, index + numObjects). + * + * A dma-buf fd is created the first time this control call is called. + * The fd is an input parameter for subsequent calls to attach additional handles + * over NV2080_CTRL_DMABUF_MAX_HANDLES. + * + * fd + * A dma-buf file descriptor. If -1, a new FD will be created. + * + * totalObjects + * The total number of objects that the client wishes to export to the FD. + * This parameter will be honored only when the FD is getting created. + * + * numObjects + * The number of handles the user wishes to export in this call. + * + * index + * The index into the export fd at which to start exporting the handles in + * 'handles'. This index cannot overlap a previously used index. + * + * totalSize + * The total size of memory being exported in bytes, needed to create the dma-buf. + * This size includes the memory that will be exported in future export calls + * for this dma-buf. + * + * handles + * An array of {handle, offset, size} that describes the dma-buf. + * The offsets and sizes must be OS page-size aligned. + * + * Limitations: + * 1. This call only supports vidmem objects for now. + * 2. All memory handles should belong to the same GPU or the same GPU MIG instance. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_NO_MEMORY + * NV_ERR_OPERATING_SYSTEM + * NV_ERR_IN_USE + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_OBJECT_PARENT + */ +#define NV2080_CTRL_CMD_DMABUF_EXPORT_OBJECTS_TO_FD (0x20803a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_DMABUF_INTERFACE_ID << 8) | NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_DMABUF_MAX_HANDLES 128 + +typedef struct NV2080_CTRL_DMABUF_MEM_HANDLE_INFO { + NvHandle hMemory; + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_DMABUF_MEM_HANDLE_INFO; + +#define NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS { + NvS32 fd; + NvU32 totalObjects; + NvU32 numObjects; + NvU32 index; + NV_DECLARE_ALIGNED(NvU64 totalSize, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_DMABUF_MEM_HANDLE_INFO handles[NV2080_CTRL_DMABUF_MAX_HANDLES], 8); +} NV2080_CTRL_DMABUF_EXPORT_MEM_PARAMS; + +// _ctrl2080dmabuf_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h new file mode 100644 index 000000000..59c7f0c44 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ecc.h @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080ecc.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + + + +#define NV2080_CTRL_CMD_ECC_GET_CLIENT_EXPOSED_COUNTERS (0x20803400) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS + * + * sramLastClearedTimestamp [out] + * dramLastClearedTimestamp [out] + * unix-epoch based timestamp. These fields indicate when the error counters + * were last cleared by the user. + * + * sramErrorCounts [out] + * dramErrorCounts [out] + * Aggregate error counts for SRAM and DRAM + */ + +#define NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS_MESSAGE_ID (0x0U) + +typedef struct NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS { + NvU32 sramLastClearedTimestamp; + NvU32 dramLastClearedTimestamp; + + NV_DECLARE_ALIGNED(NvU64 sramCorrectedTotalCounts, 8); + NV_DECLARE_ALIGNED(NvU64 sramUncorrectedTotalCounts, 8); + NV_DECLARE_ALIGNED(NvU64 dramCorrectedTotalCounts, 8); + NV_DECLARE_ALIGNED(NvU64 dramUncorrectedTotalCounts, 8); +} NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS; +/* _ctrl2080ecc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h new file mode 100644 index 000000000..c1a47c9cd --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h @@ -0,0 +1,375 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080event.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "nv_vgpu_types.h" +/* NV20_SUBDEVICE_XX event-related control commands and parameters */ + +/* + * NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification state for the associated subdevice. + * This command requires that an instance of NV01_EVENT has been previously + * bound to the associated subdevice object. + * + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. This parameter must specify a valid + * NV2080_NOTIFIERS value (see cl2080.h for more details) and should + * not exceed one less NV2080_NOTIFIERS_MAXCOUNT. + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV2080_CTRL_SET_EVENT_NOTIFICATION_DISABLE + * This action disables event notification for the specified + * event for the associated subdevice object. + * NV2080_CTRL_SET_EVENT_NOTIFICATION_SINGLE + * This action enables single-shot event notification for the + * specified event for the associated subdevice object. + * NV2080_CTRL_SET_EVENT_NOTIFICATION_REPEAT + * This action enables repeated event notification for the specified + * event for the associated system controller object. + * bNotifyState + * This boolean is used to indicate the current state of the notifier + * at the time of event registration. This is optional and its semantics + * needs to be agreed upon by the notifier and client using the notifier + * info32 + * This is used to send 32-bit initial state info with the notifier at + * time of event registration + * info16 + * This is used to send 16-bit initial state info with the notifier at + * time of event registration + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; + NvBool bNotifyState; + NvU32 info32; + NvU16 info16; +} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +/* XUSB/PPC D-state defines */ +#define NV2080_EVENT_DSTATE_XUSB_D0 (0x00000000) +#define NV2080_EVENT_DSTATE_XUSB_D3 (0x00000003) +#define NV2080_EVENT_DSTATE_XUSB_INVALID (0xFFFFFFFF) +#define NV2080_EVENT_DSTATE_PPC_D0 (0x00000000) +#define NV2080_EVENT_DSTATE_PPC_D3 (0x00000003) +#define NV2080_EVENT_DSTATE_PPC_INVALID (0xFFFFFFFF) + +// HDACODEC Decice DState, D3_COLD is only for verbose mapping, it cannot be logged +typedef enum NV2080_EVENT_HDACODEC_DSTATE { + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D0 = 0, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D1 = 1, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D2 = 2, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D3_HOT = 3, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_D3_COLD = 4, + NV2080_EVENT_HDACODEC_DEVICE_DSTATE_DSTATE_MAX = 5, +} NV2080_EVENT_HDACODEC_DSTATE; + +/* + * NV2080_CTRL_CMD_EVENT_SET_TRIGGER + * + * This command triggers a software event for the associated subdevice. + * This command accepts no parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_EVENT_SET_TRIGGER (0x20800302) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | 0x2" */ + +/* + * NV2080_CTRL_CMD_EVENT_SET_NOTIFIER_MEMORY + * + * hMemory + * This parameter specifies the handle of the memory object + * that identifies the memory address translation for this + * subdevice instance's notification(s). The beginning of the + * translation points to an array of notification data structures. + * The size of the translation must be at least large enough to hold the + * maximum number of notification data structures identified by + * the NV2080_MAX_NOTIFIERS value. + * Legal argument values must be instances of the following classes: + * NV01_NULL + * NV04_MEMORY + * When hMemory specifies the NV01_NULL_OBJECT value then any existing + * memory translation connection is cleared. There must not be any + * pending notifications when this command is issued. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_EVENT_SET_MEMORY_NOTIFIES (0x20800303) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS { + NvHandle hMemory; +} NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS; + +#define NV2080_EVENT_MEMORY_NOTIFIES_STATUS_NOTIFIED 0 +#define NV2080_EVENT_MEMORY_NOTIFIES_STATUS_PENDING 1 +#define NV2080_EVENT_MEMORY_NOTIFIES_STATUS_ERROR 2 + +/* + * NV2080_CTRL_CMD_EVENT_SET_SEMAPHORE_MEMORY + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * semOffset + * This parameter indicates the memory offset of the semaphore. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_EVENT_SET_SEMAPHORE_MEMORY (0x20800304) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS { + NvHandle hSemMemory; + NvU32 semOffset; +} NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS; + +/* + * NV2080_CTRL_CMD_EVENT_SET_GUEST_MSI + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * guestMSIAddr + * This parameter indicates the guest allocated MSI address. + * + * guestMSIData + * This parameter indicates the MSI data set by the guest OS. + * + * vmIdType + * This parameter specifies the type of guest virtual machine identifier + * + * guestVmId + * This parameter specifies the guest virtual machine identifier + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_EVENT_SET_GUEST_MSI (0x20800305) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS { + NV_DECLARE_ALIGNED(NvU64 guestMSIAddr, 8); + NvU32 guestMSIData; + NvHandle hSemMemory; + NvBool isReset; + VM_ID_TYPE vmIdType; + NV_DECLARE_ALIGNED(VM_ID guestVmId, 8); +} NV2080_CTRL_EVENT_SET_GUEST_MSI_PARAMS; + + +/* + * NV2080_CTRL_CMD_EVENT_SET_SEMA_MEM_VALIDATION + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * isSemaMemValidationEnabled + * This parameter used to enable/disable change in sema value check + * while generating an event. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_INVALID_OBJECT_HANDLE + * NVOS_STATUS_ERROR_INVALID_ARGUMENT + */ + + +#define NV2080_CTRL_CMD_EVENT_SET_SEMA_MEM_VALIDATION (0x20800306) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS { + NvHandle hSemMemory; + NvBool isSemaMemValidationEnabled; +} NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS; + + +/* + * NV2080_CTRL_CMD_EVENT_SET_VMBUS_CHANNEL + * + * hSemMemory + * This parameter specifies the handle of the memory object that + * identifies the semaphore memory associated with this subdevice + * event notification. Once this is set RM will generate an event + * only when there is a change in the semaphore value. It is + * expected that the semaphore memory value will be updated by + * the GPU indicating that there is an event pending. This + * command is used by VGX plugin to determine which virtual + * machine has generated a particular event. + * + * vmIdType + * This parameter specifies the type of guest virtual machine identifier + * + * guestVmId + * This parameter specifies the guest virtual machine identifier + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_EVENT_SET_VMBUS_CHANNEL (0x20800307) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_VMBUS_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_VMBUS_CHANNEL_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_EVENT_SET_VMBUS_CHANNEL_PARAMS { + NvHandle hSemMemory; + VM_ID_TYPE vmIdType; + NV_DECLARE_ALIGNED(VM_ID guestVmId, 8); +} NV2080_CTRL_EVENT_SET_VMBUS_CHANNEL_PARAMS; + + +/* + * NV2080_CTRL_CMD_EVENT_SET_TRIGGER_FIFO + * + * This command triggers a FIFO event for the associated subdevice. + * + * hEvent + * Handle of the event that should be notified. If zero, all + * non-stall interrupt events for this subdevice will be notified. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_EVENT_SET_TRIGGER_FIFO (0x20800308) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS { + NvHandle hEvent; +} NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS; + +/* + * NV2080_CTRL_CMD_EVENT_VIDEO_BIND_EVTBUF_FOR_UID + * + * This command is used to create a video bind-point to an event buffer that + * is filtered by UID. + * + * hEventBuffer[IN] + * The event buffer to bind to + * + * recordSize[IN] + * The size of the FECS record in bytes + * + * levelOfDetail[IN] + * One of NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_: + * FULL: Report all CtxSw events + * SIMPLE: Report engine start and engine end events only + * CUSTOM: Report events in the eventFilter field + * NOTE: RM may override the level-of-detail depending on the caller + * + * eventFilter[IN] + * Bitmask of events to report if levelOfDetail is CUSTOM + * + * bAllUsers[IN] + * Only report video data for the current user if false, for all users if true + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_EVENT_VIDEO_BIND_EVTBUF (0x20800309) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD { + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_FULL = 0, + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_SIMPLE = 1, + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD_CUSTOM = 2, +} NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD; + +#define NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS { + NvHandle hEventBuffer; + NvU32 recordSize; + NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_LOD levelOfDetail; + NvU32 eventFilter; + NvBool bAllUsers; +} NV2080_CTRL_EVENT_VIDEO_BIND_EVTBUF_PARAMS; + +/* _ctrl2080event_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h new file mode 100644 index 000000000..a338bab8e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fan.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fan.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h new file mode 100644 index 000000000..3ca18bbc8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h @@ -0,0 +1,2910 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fb.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX fb control commands and parameters */ + +#include "ctrl2080common.h" +#include "nvlimits.h" + +/* + * NV2080_CTRL_FB_INFO + * + * This structure represents a single 32bit fb engine value. Clients + * request a particular fb engine value by specifying a unique fb + * information index. + * + * Legal fb information index values are: + * NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_COUNT + * This index is used to request the number of tiled regions supported + * by the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU + * does not support tiling. + * NV2080_CTRL_FB_INFO_INDEX_COMPRESSION_SIZE + * This index is used to request the amount of compression (in bytes) + * supported by the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU + * does not support compression. + * Nv2080_CTRL_FB_INFO_INDEX_DRAM_PAGE_STRIDE + * This index is used to request the DRAM page stride (in bytes) + * supported by the associated subdevice. The return value is GPU + * implementation-dependent. + * NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_FREE_COUNT + * This index is used to request the number of free tiled regions on + * the associated subdevice. The return value represents the current + * number of free tiled regions at the time the command is processed and + * is not guaranteed to remain unchanged. A return value of 0 indicates + * that there are no available tiled regions on the associated subdevice. + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_COUNT + * This index is used to request the number of frame buffer partitions + * on the associated subdevice. Starting with Fermi there are now two units + * with the name framebuffer partitions. On those chips this index returns + * the number of FBPAs. For number of FBPs use + * NV2080_CTRL_FB_INFO_INDEX_FBP_COUNT. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_RAM_SIZE + * This index is used to request the amount of framebuffer memory in + * kilobytes physically present on the associated subdevice. This + * value will never exceed the value reported by + * NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE. + * This an SMC aware attribute, so the per-partition framebuffer memory + * size will be returned when the client has a partition subscription. + * NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE + * This index is used to request the total amount of video memory in + * kilobytes for use with the associated subdevice. This value will + * reflect both framebuffer memory as well as any system memory dedicated + * for use with the subdevice. + * This an SMC aware attribute, so the per-partition video memory size + * will be returned when the client has a partition subscription. + * NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE + * This index is used to request the amount of total RAM in kilobytes + * available for user allocations. This value reflects the total ram + * size less the amount of memory reserved for internal use. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_HEAP_START + * This index is used to request the offset for start of heap in + * kilobytes. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_HEAP_FREE + * This index is used to request the available amount of video memory in + * kilobytes for use with the associated subdevice or the SMC partition. + * This an SMC aware attribute, thus necessary partition subscription is + * required to query per partition information, if the device is partitioned. + * Alternatively, the SMC/MIG monitor capability can be acquired to query + * aggregate available memory across all the valid partitions. + * NV2080_CTRL_FB_INFO_INDEX_MAPPABLE_HEAP_SIZE + * This index reflects the amount of heap memory in kilobytes that + * is accessible by the CPU. On subdevices with video memory sizes that + * exceed the amount that can be bus mappable this value will be less + * than that reported by NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_BUS_WIDTH + * This index is used to request the FB bus bandwidth on the associated + * subdevice. + * NV2080_CTRL_FB_INFO_INDEX_RAM_CFG + * This index is used to request the implementation-dependent RAM + * configuration value of the associated subdevice. + * NV2080_CTRL_FB_INFO_INDEX_RAM_TYPE + * This index is used to request the type of RAM used for the framebuffer + * on the associated subdevice. Legal RAM types include: + * NV2080_CTRL_FB_INFO_RAM_TYPE_UNKNOWN + * NV2080_CTRL_FB_INFO_RAM_TYPE_SDRAM + * NV2080_CTRL_FB_INFO_RAM_TYPE_DDR1 + * NV2080_CTRL_FB_INFO_RAM_TYPE_DDR2 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR2 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR3 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR4 + * NV2080_CTRL_FB_INFO_RAM_TYPE_DDR3 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5X + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6 + * NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6X + * NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR2 + * NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR4 + * NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR5 + * NV2080_CTRL_FB_INFO_INDEX_BANK_COUNT + * This index is used to request the number of FB banks on the associated + * subdevice. + * NV2080_CTRL_FB_INFO_INDEX_OVERLAY_OFFSET_ADJUSTMENT + * This index is used to request the offset relative to the start of the + * overlay surface(s), in bytes, at which scanout should happen if the + * primary and the overlay surfaces are all aligned on large page + * boundaries. + * NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_SPACE_SIZE_KB + * This index is used to request the size of the GPU's virtual address + * space in kilobytes. + * NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_HEAP_SIZE_KB + * This index is used to request the size of the GPU's virtual address + * space heap (minus RM-reserved space) in kilobytes. + * NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_MAPPBLE_SIZE_KB + * This index is used to request the size of the GPU's BAR1 mappable + * virtual address space in kilobytes. + * NV2080_CTRL_FB_INFO_INDEX_EFFECTIVE_BW + * This index is deprecated, and returns zero value. + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK + * This index is used to request the mask of currently active partitions. + * Each active partition has an ID that's equivalent to the corresponding + * bit position in the mask. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_VISTA_RESERVED_HEAP_SIZE + * This index is used to request the amount of total RAM in kilobytes + * reserved for internal RM allocations on Vista. This will need to + * be subtracted from the total heap size to get the amount available to + * KMD. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_RAM_LOCATION + * This index is used to distinguish between different memory + * configurations. + * NV2080_CTRL_FB_INFO_INDEX_FB_IS_BROKEN + * This index is used to check if the FB is functional + * NV2080_CTRL_FB_INFO_INDEX_FBP_COUNT + * This index is used to get the number of FBPs on the subdevice. This + * field is not to be confused with + * NV2080_CTRL_FB_INFO_INDEX_PARTITION_COUNT (returns number of FBPAs). + * Starting with Fermi the term partition is an ambiguous term, both FBP + * and FBPA mean FB partitions. The FBPA is the low level DRAM controller, + * while a FBP is the aggregation of one or more FBPAs, L2, ROP, and some + * other units. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_L2CACHE_SIZE + * This index is used to get the size of the L2 cache in Bytes. + * A value of zero indicates that the L2 cache isn't supported on the + * associated subdevice. + * NV2080_CTRL_FB_INFO_INDEX_MEMORYINFO_VENDOR_ID + * This index is used to get the memory vendor ID information from + * the Memory Information Table in the VBIOS. Legal memory Vendor ID + * values include: + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_UNKNOWN + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_RESERVED + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_SAMSUNG + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_QIMONDA + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ELPIDA + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ETRON + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_NANYA + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_HYNIX + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MOSEL + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_WINBOND + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ESMT + * NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MICRON + * NV2080_CTRL_FB_INFO_INDEX_BAR1_AVAIL_SIZE + * This index is used to request the amount of unused bar1 space. The + * data returned is a value in KB. It is not guaranteed to be entirely + * accurate since it is a snapshot at a particular time and can + * change quickly. + * NV2080_CTRL_FB_INFO_INDEX_BAR1_MAX_CONTIGUOUS_AVAIL_SIZE + * This index is used to request the amount of largest unused contiguous + * block in bar1 space. The data returned is a value in KB. It is not + * guaranteed to be entirely accurate since it is a snapshot at a particular + * time and can change quickly. + * NV2080_CTRL_FB_INFO_INDEX_USABLE_RAM_SIZE + * This index is used to request the amount of usable framebuffer memory in + * kilobytes physically present on the associated subdevice. This + * value will never exceed the value reported by + * NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_LTC_COUNT + * Returns the active LTC count across all active FBPs. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_LTS_COUNT + * Returns the active LTS count across all active LTCs. + * This an SMC aware attribute, thus necessary partition subscription is + * required if the device is partitioned. + * NV2080_CTRL_FB_INFO_INDEX_PSEUDO_CHANNEL_MODE + * This is used to identify if pseudo-channel mode is enabled for HBM + * NV2080_CTRL_FB_INFO_INDEX_SMOOTHDISP_RSVD_BAR1_SIZE + * This is used by WDDM-KMD to determine whether and how much RM reserved BAR1 for smooth transition + * NV2080_CTRL_FB_INFO_INDEX_HEAP_OFFLINE_SIZE + * Returns the total size of the all dynamically offlined pages in KiB + * NV2080_CTRL_FB_INFO_INDEX_1TO1_COMPTAG_ENABLED + * Returns true if 1to1 comptag is enabled + * NV2080_CTRL_FB_INFO_INDEX_SUSPEND_RESUME_RSVD_SIZE + * Returns the total size of the memory(FB) that will saved/restored during save/restore cycle + * NV2080_CTRL_FB_INFO_INDEX_ALLOW_PAGE_RETIREMENT + * Returns true if page retirement is allowed + * NV2080_CTRL_FB_INFO_POISON_FUSE_ENABLED + * Returns true if poison fuse is enabled + * NV2080_CTRL_FB_INFO_FBPA_ECC_ENABLED + * Returns true if ECC is enabled for FBPA + * NV2080_CTRL_FB_INFO_DYNAMIC_PAGE_OFFLINING_ENABLED + * Returns true if dynamic page blacklisting is enabled + * NV2080_CTRL_FB_INFO_INDEX_FORCED_BAR1_64KB_MAPPING_ENABLED + * Returns true if 64KB mapping on BAR1 is force-enabled + * NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_SIZE + * Returns the P2P mailbox size to be allocated by the client. + * Returns 0 if the P2P mailbox is allocated by RM. + * NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_ALIGNMENT_SIZE + * Returns the P2P mailbox alignment requirement. + * Returns 0 if the P2P mailbox is allocated by RM. + * NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_BAR1_MAX_OFFSET_64KB + * Returns the P2P mailbox max offset requirement. + * Returns 0 if the P2P mailbox is allocated by RM. + * NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_TOTAL_KB + * Returns total protected memory when memory protection is enabled + * Returns 0 when memory protection is not enabled. + * NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_FREE_KB + * Returns protected memory available for allocation when memory + * protection is enabled. + * Returns 0 when memory protection is not enabled. + * NV2080_CTRL_FB_INFO_INDEX_ECC_STATUS_SIZE + * Returns the ECC status size (corresponds to subpartitions or channels + * depending on architecture/memory type). + */ +typedef struct NV2080_CTRL_FB_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_FB_INFO; + +/* valid fb info index values */ +#define NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_COUNT (0x00000000) // Deprecated +#define NV2080_CTRL_FB_INFO_INDEX_COMPRESSION_SIZE (0x00000001) +#define NV2080_CTRL_FB_INFO_INDEX_DRAM_PAGE_STRIDE (0x00000002) +#define NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_FREE_COUNT (0x00000003) +#define NV2080_CTRL_FB_INFO_INDEX_PARTITION_COUNT (0x00000004) +#define NV2080_CTRL_FB_INFO_INDEX_BAR1_SIZE (0x00000005) +#define NV2080_CTRL_FB_INFO_INDEX_BANK_SWIZZLE_ALIGNMENT (0x00000006) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_SIZE (0x00000007) +#define NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE (0x00000008) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE (0x00000009) +#define NV2080_CTRL_FB_INFO_INDEX_MAPPABLE_HEAP_SIZE (0x0000000A) +#define NV2080_CTRL_FB_INFO_INDEX_BUS_WIDTH (0x0000000B) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_CFG (0x0000000C) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_TYPE (0x0000000D) +#define NV2080_CTRL_FB_INFO_INDEX_BANK_COUNT (0x0000000E) +#define NV2080_CTRL_FB_INFO_INDEX_OVERLAY_OFFSET_ADJUSTMENT (0x0000000F) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_SPACE_SIZE_KB (0x0000000F) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_HEAP_SIZE_KB (0x0000000F) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_GPU_VADDR_MAPPBLE_SIZE_KB (0x0000000F) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_EFFECTIVE_BW (0x0000000F) // Deprecated (index reused to return 0) +#define NV2080_CTRL_FB_INFO_INDEX_FB_TAX_SIZE_KB (0x00000010) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_BASE_KB (0x00000011) +#define NV2080_CTRL_FB_INFO_INDEX_LARGEST_FREE_REGION_SIZE_KB (0x00000012) +#define NV2080_CTRL_FB_INFO_INDEX_LARGEST_FREE_REGION_BASE_KB (0x00000013) +#define NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK (0x00000014) +#define NV2080_CTRL_FB_INFO_INDEX_VISTA_RESERVED_HEAP_SIZE (0x00000015) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_FREE (0x00000016) +#define NV2080_CTRL_FB_INFO_INDEX_RAM_LOCATION (0x00000017) +#define NV2080_CTRL_FB_INFO_INDEX_FB_IS_BROKEN (0x00000018) +#define NV2080_CTRL_FB_INFO_INDEX_FBP_COUNT (0x00000019) +#define NV2080_CTRL_FB_INFO_INDEX_FBP_MASK (0x0000001A) +#define NV2080_CTRL_FB_INFO_INDEX_L2CACHE_SIZE (0x0000001B) +#define NV2080_CTRL_FB_INFO_INDEX_MEMORYINFO_VENDOR_ID (0x0000001C) +#define NV2080_CTRL_FB_INFO_INDEX_BAR1_AVAIL_SIZE (0x0000001D) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_START (0x0000001E) +#define NV2080_CTRL_FB_INFO_INDEX_BAR1_MAX_CONTIGUOUS_AVAIL_SIZE (0x0000001F) +#define NV2080_CTRL_FB_INFO_INDEX_USABLE_RAM_SIZE (0x00000020) +#define NV2080_CTRL_FB_INFO_INDEX_TRAINIG_2T (0x00000021) +#define NV2080_CTRL_FB_INFO_INDEX_LTC_COUNT (0x00000022) +#define NV2080_CTRL_FB_INFO_INDEX_LTS_COUNT (0x00000023) +#define NV2080_CTRL_FB_INFO_INDEX_L2CACHE_ONLY_MODE (0x00000024) +#define NV2080_CTRL_FB_INFO_INDEX_PSEUDO_CHANNEL_MODE (0x00000025) +#define NV2080_CTRL_FB_INFO_INDEX_SMOOTHDISP_RSVD_BAR1_SIZE (0x00000026) +#define NV2080_CTRL_FB_INFO_INDEX_HEAP_OFFLINE_SIZE (0x00000027) +#define NV2080_CTRL_FB_INFO_INDEX_1TO1_COMPTAG_ENABLED (0x00000028) +#define NV2080_CTRL_FB_INFO_INDEX_SUSPEND_RESUME_RSVD_SIZE (0x00000029) +#define NV2080_CTRL_FB_INFO_INDEX_ALLOW_PAGE_RETIREMENT (0x0000002A) +#define NV2080_CTRL_FB_INFO_INDEX_LTC_MASK (0x0000002B) +#define NV2080_CTRL_FB_INFO_POISON_FUSE_ENABLED (0x0000002C) +#define NV2080_CTRL_FB_INFO_FBPA_ECC_ENABLED (0x0000002D) +#define NV2080_CTRL_FB_INFO_DYNAMIC_PAGE_OFFLINING_ENABLED (0x0000002E) +#define NV2080_CTRL_FB_INFO_INDEX_FORCED_BAR1_64KB_MAPPING_ENABLED (0x0000002F) +#define NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_SIZE (0x00000030) +#define NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_ALIGNMENT (0x00000031) +#define NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_BAR1_MAX_OFFSET_64KB (0x00000032) +#define NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_TOTAL_KB (0x00000033) +#define NV2080_CTRL_FB_INFO_INDEX_PROTECTED_MEM_SIZE_FREE_KB (0x00000034) +#define NV2080_CTRL_FB_INFO_INDEX_ECC_STATUS_SIZE (0x00000035) +#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE (0x00000036) + +#define NV2080_CTRL_FB_INFO_INDEX_MAX (0x35) /* finn: Evaluated from "(NV2080_CTRL_FB_INFO_MAX_LIST_SIZE - 1)" */ + +/* valid fb RAM type values */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_UNKNOWN (0x00000000) +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDRAM (0x00000001) +#define NV2080_CTRL_FB_INFO_RAM_TYPE_DDR1 (0x00000002) /* SDDR and GDDR (aka DDR1 and GDDR1) */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR2 (0x00000003) /* SDDR2 Used on NV43 and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_DDR2 NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR2 /* Deprecated alias */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR2 (0x00000004) /* GDDR2 Used on NV30 and some NV36 */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR3 (0x00000005) /* GDDR3 Used on NV40 and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR4 (0x00000006) /* GDDR4 Used on G80 and later (deprecated) */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR3 (0x00000007) /* SDDR3 Used on G9x and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_DDR3 NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR3 /* Deprecated alias */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5 (0x00000008) /* GDDR5 Used on GT21x and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR2 (0x00000009) /* LPDDR (Low Power SDDR) used on T2x and later. */ + + +#define NV2080_CTRL_FB_INFO_RAM_TYPE_SDDR4 (0x0000000C) /* SDDR4 Used on Maxwell and later */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR4 (0x0000000D) /* LPDDR (Low Power SDDR) used on T21x and later.*/ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_HBM1 (0x0000000E) /* HBM1 (High Bandwidth Memory) used on GP100 */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_HBM2 (0x0000000F) /* HBM2 (High Bandwidth Memory-pseudo channel) */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR5X (0x00000010) /* GDDR5X Used on GP10x */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6 (0x00000011) /* GDDR6 Used on TU10x */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_GDDR6X (0x00000012) /* GDDR6X Used on GA10x */ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_LPDDR5 (0x00000013) /* LPDDR (Low Power SDDR) used on T23x and later.*/ +#define NV2080_CTRL_FB_INFO_RAM_TYPE_HBM3 (0x00000014) /* HBM3 (High Bandwidth Memory) v3 */ + +/* valid RAM LOCATION types */ +#define NV2080_CTRL_FB_INFO_RAM_LOCATION_GPU_DEDICATED (0x00000000) +#define NV2080_CTRL_FB_INFO_RAM_LOCATION_SYS_SHARED (0x00000001) +#define NV2080_CTRL_FB_INFO_RAM_LOCATION_SYS_DEDICATED (0x00000002) + +/* valid Memory Vendor ID values */ +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_SAMSUNG (0x00000001) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_QIMONDA (0x00000002) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ELPIDA (0x00000003) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ETRON (0x00000004) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_NANYA (0x00000005) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_HYNIX (0x00000006) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MOSEL (0x00000007) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_WINBOND (0x00000008) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_ESMT (0x00000009) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_MICRON (0x0000000F) +#define NV2080_CTRL_FB_INFO_MEMORYINFO_VENDOR_ID_UNKNOWN (0xFFFFFFFF) + +#define NV2080_CTRL_FB_INFO_PSEUDO_CHANNEL_MODE_UNSUPPORTED (0x00000000) +#define NV2080_CTRL_FB_INFO_PSEUDO_CHANNEL_MODE_DISABLED (0x00000001) +#define NV2080_CTRL_FB_INFO_PSEUDO_CHANNEL_MODE_ENABLED (0x00000002) + +/** + * NV2080_CTRL_CMD_FB_GET_INFO + * + * This command returns fb engine information for the associated GPU. + * Requests to retrieve fb information use a list of one or more + * NV2080_CTRL_FB_INFO structures. + * + * fbInfoListSize + * This field specifies the number of entries on the caller's + * fbInfoList. + * fbInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the fb information is to be returned. + * This buffer must be at least as big as fbInfoListSize multiplied + * by the size of the NV2080_CTRL_FB_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_FB_GET_INFO (0x20801301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_FB_GET_INFO_PARAMS { + NvU32 fbInfoListSize; + NV_DECLARE_ALIGNED(NvP64 fbInfoList, 8); +} NV2080_CTRL_FB_GET_INFO_PARAMS; + +#define NV2080_CTRL_CMD_FB_GET_INFO_V2 (0x20801303) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_INFO_V2_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_FB_GET_INFO_V2_PARAMS { + NvU32 fbInfoListSize; + NV2080_CTRL_FB_INFO fbInfoList[NV2080_CTRL_FB_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_FB_GET_INFO_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_TILE_ADDRESS_INFO + * + * This command returns tile addressing information. + * + * StartAddr + * This parameter returns BAR1 plus the size of the local FB. + * SpaceSize + * This parameter returns the BAR1 aperture size less the size of the + * local FB. + * + * Note that both parameters will contain zero if there is no system tile + * address space. + */ +#define NV2080_CTRL_CMD_FB_GET_TILE_ADDRESS_INFO (0x20801302) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2" */ + +typedef struct NV2080_CTRL_FB_GET_SYSTEM_TILE_ADDRESS_SPACE_INFO { + NV_DECLARE_ALIGNED(NvU64 StartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 SpaceSize, 8); +} NV2080_CTRL_FB_GET_SYSTEM_TILE_ADDRESS_SPACE_INFO; + +/* + * NV2080_CTRL_CMD_FB_GET_BAR1_OFFSET + * + * This command returns the GPU virtual address of a bar1 + * allocation, given the CPU virtual address. + * + * cpuVirtAddress + * This field specifies the associated CPU virtual address of the + * memory allocation. + * gpuVirtAddress + * The GPU virtual address associated with the allocation + * is returned in this field. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_BAR1_OFFSET (0x20801310) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS { + NV_DECLARE_ALIGNED(NvP64 cpuVirtAddress, 8); + NV_DECLARE_ALIGNED(NvU64 gpuVirtAddress, 8); +} NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS; + +/* + * Note: Returns Zeros if no System carveout address info + * + * NV2080_CTRL_CMD_FB_GET_CARVEOUT_ADDRESS_INFO + * + * This command returns FB carveout address space information + * + * StartAddr + * Returns the system memory address of the start of carveout space. + * SpaceSize + * Returns the size of carveout space. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_FB_GET_CARVEOUT_ADDRESS_INFO (0x2080130b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO { + NV_DECLARE_ALIGNED(NvU64 StartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 SpaceSize, 8); +} NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO; + +/* + * NV2080_CTRL_FB_CMD_GET_CALIBRATION_LOCK_FAILED + * + * This command returns the failure counts for calibration. + * + * uFlags + * Just one for now -- ehether to reset the counts. + * driveStrengthRiseCount + * This parameter specifies the failure count for drive strength rising. + * driveStrengthFallCount + * This parameter specifies the failure count for drive strength falling. + * driveStrengthTermCount + * This parameter specifies the failure count for drive strength + * termination. + * slewStrengthRiseCount + * This parameter specifies the failure count for slew strength rising. + * slewStrengthFallCount + * This parameter specifies the failure count for slew strength falling. + * slewStrengthTermCount + * This parameter specifies the failure count for slew strength + * termination. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INVALID_PARAM_STRUCT + * NVOS_STATUS_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_CALIBRATION_LOCK_FAILED (0x2080130c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS { + NvU32 flags; + NvU32 driveStrengthRiseCount; + NvU32 driveStrengthFallCount; + NvU32 driveStrengthTermCount; + NvU32 slewStrengthRiseCount; + NvU32 slewStrengthFallCount; +} NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS; + +/* valid flags parameter values */ +#define NV2080_CTRL_CMD_FB_GET_CAL_FLAG_NONE (0x00000000) +#define NV2080_CTRL_CMD_FB_GET_CAL_FLAG_RESET (0x00000001) + +/* + * NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_ALLOWED + * + * This command specifies to RM if scanout compaction feature is allowed or + * not in the current configuration. In hybrid mode when dGPU is rendering the + * image, the dGPU blit to the scanout surface happens without mGPU's + * knowledge (directly to system memory), which results in stale compacted + * data resulting in corruption. + * + * This control call can be used to disable the compaction whenever the KMD + * (client) is switching to the pref mode in Hybrid i.e., whenever there is a + * possibility of dGPU doing a blit to mGpu scanout surface. Compaction can + * be enabled when system is back in hybrid power mode as mGpu will be + * rendering the image. + * + * allowCompaction + * This parameter specifies if the display compaction feature is allowed + * or not allowed. + * immediate + * This parameter specifies whether compaction has to be enabled or + * disabled immediately (based on the value of allowCompaction field) or + * during the next modeset. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INVALID_PARAM_STRUCT + * NVOS_STATUS_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_ALLOWED (0x2080130d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0xD" */ // Deprecated, removed form RM + +typedef struct NV2080_CTRL_FB_SET_SCANOUT_COMPACTION_ALLOWED_PARAMS { + NvU32 allowCompaction; + NvU32 immediate; +} NV2080_CTRL_FB_SET_SCANOUT_COMPACTION_ALLOWED_PARAMS; + +/* valid allowCompaction values */ +#define NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_ALLOW (0x00000001) +#define NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_DISALLOW (0x00000000) + +/* valid immediate values */ +#define NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_IMMEDIATE (000000001) +#define NV2080_CTRL_CMD_FB_SET_SCANOUT_COMPACTION_NOT_IMMEDIATE (000000000) + +/* + * NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE + * + * This command flushes a cache on the GPU which all memory accesses go + * through. The types of flushes supported by this API may not be supported by + * all hardware. Attempting an unsupported flush type will result in an error. + * + * addressArray + * An array of physical addresses in the aperture defined by + * NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE. Each entry points to a + * contiguous block of memory of size memBlockSizeBytes. The addresses are + * aligned down to addressAlign before coalescing adjacent addresses and + * sending flushes to hardware. + * addressAlign + * Used to align-down addresses held in addressArray. A value of 0 will be + * forced to 1 to avoid a divide by zero. Value is treated as minimum + * alignment and any hardware alignment requirements above this value will + * be honored. + * addressArraySize + * The number of entries in addressArray. + * memBlockSizeBytes + * The size in bytes of each memory block pointed to by addressArray. + * flags + * Contains flags to control various aspects of the flush. Valid values + * are defined in NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS*. Not all flags are + * valid for all defined FLUSH_MODEs or all GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * + * See Also: + * NV0080_CTRL_CMD_DMA_FLUSH + * Performs flush operations in broadcast for the GPU cache and other hardware + * engines. Use this call if you want to flush all GPU caches in a + * broadcast device. + * NV0041_CTRL_CMD_SURFACE_FLUSH_GPU_CACHE + * Flushes memory associated with a single allocation if the hardware + * supports it. Use this call if you want to flush a single allocation and + * you have a memory object describing the physical memory. + */ +#define NV2080_CTRL_CMD_FB_FLUSH_GPU_CACHE (0x2080130e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_MAX_ADDRESSES 500 + +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 addressArray[NV2080_CTRL_FB_FLUSH_GPU_CACHE_MAX_ADDRESSES], 8); + NvU32 addressArraySize; + NvU32 addressAlign; + NV_DECLARE_ALIGNED(NvU64 memBlockSizeBytes, 8); + NvU32 flags; +} NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS; + +/* valid fields and values for flags */ +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE 1:0 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_VIDEO_MEMORY (0x00000000) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_SYSTEM_MEMORY (0x00000001) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_PEER_MEMORY (0x00000002) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK 2:2 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_NO (0x00000000) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_WRITE_BACK_YES (0x00000001) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_INVALIDATE 3:3 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_NO (0x00000000) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_INVALIDATE_YES (0x00000001) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE 4:4 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE_ADDRESS_ARRAY (0x00000000) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE_FULL_CACHE (0x00000001) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FB_FLUSH 5:5 +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FB_FLUSH_NO (0x00000000) +#define NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FB_FLUSH_YES (0x00000001) + +/* + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY (deprecated; use NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2 instead) + * + * These commands access the cache allocation policy on a specific + * engine, if supported. + * + * engine + * Specifies the target engine. Possible values are defined in + * NV2080_ENGINE_TYPE. + * allocPolicy + * Specifies the read/write allocation policy of the cache on the specified + * engine. Possible values are defined in + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_READS and + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_WRITES. + * + */ +typedef struct NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS { + NvU32 engine; + NvU32 allocPolicy; +} NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS; + +/* valid values for allocPolicy */ +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_READS 0:0 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_READS_NO (0x00000000) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_READS_YES (0x00000001) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_WRITES 1:1 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_WRITES_NO (0x00000000) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_WRITES_YES (0x00000001) + + +/* + * NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY + * + * This command is deprecated. + * Use NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY_V2 instead. + * + * This command sets the state of the cache allocation policy on a specific + * engine, if supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY (0x2080130f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0xF" */ + +/* + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAM + * + * These commands access the cache allocation policy on a specific + * client, if supported. + * + * count + * Specifies the number of entries in entry. + * entry + * Specifies an array of allocation policy entries. + * + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY + * + * clients + * Specifies the target client. Possible values are defined in + * NV2080_CLIENT_TYPE_*. + * allocPolicy + * Specifies the read/write allocation policy of the cache on the specified + * engine. Possible values are defined in + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS and + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES. + * + * NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY_SIZE + * + * Specifies the maximum number of allocation policy entries allowed + */ +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY_SIZE 11 + +typedef struct NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY { + NvU32 client; + NvU32 allocPolicy; +} NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY; + +typedef struct NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS { + NvU32 count; + NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY entry[NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_ENTRY_SIZE]; +} NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS; + +/* valid values for allocPolicy */ +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS 0:0 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS_DISABLE (0x00000000) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS_ENABLE (0x00000001) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS_ALLOW 1:1 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS_ALLOW_NO (0x00000000) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_READS_ALLOW_YES (0x00000001) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES 2:2 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES_DISABLE (0x00000000) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES_ENABLE (0x00000001) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES_ALLOW 3:3 +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES_ALLOW_NO (0x00000000) +#define NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_WRITES_ALLOW_YES (0x00000001) + + +/* + * NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY_V2 + * + * This command sets the state of the cache allocation policy on a specific + * engine, if supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY_V2 (0x20801318) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x18" */ + +/* + * NV2080_CTRL_CMD_FB_GET_GPU_CACHE_ALLOC_POLICY (deprecated; use NV2080_CTRL_CMD_FB_GET_GPU_CACHE_ALLOC_POLICY_V2 instead) + * + * This command gets the state of the cache allocation policy on a specific + * engine, if supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_GPU_CACHE_ALLOC_POLICY (0x20801312) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x12" */ + +/* + * NV2080_CTRL_CMD_FB_GET_GPU_CACHE_ALLOC_POLICY_V2 + * + * This command gets the state of the cache allocation policy on a specific + * engine, if supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_GPU_CACHE_ALLOC_POLICY_V2 (0x20801319) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x19" */ + + +/* + * NV2080_CTRL_CMD_FB_IS_KIND + * + * This command is used to perform various operations like 'IS_KIND_VALID', + * 'IS_KIND_COMPRESSIBLE'on the kind passed by the caller. The operation to be + * performed should be passed in the 'operation' parameter of + * NV2080_CTRL_FB_IS_KIND_PARAMS, the kind on which the operation is to be + * performed should be passed in the 'kind' parameter. The result of the + * operation (true/false) will be returned in the 'result' parameter. + * + * operation + * Specifies what operation is to be performed on the kind passed by the + * caller. The supported operations are + * NV2080_CTRL_FB_IS_KIND_OPERATION_SUPPORTED + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure is + * supported for this GPU. Returns nonzero value in 'result' parameter + * if the input kind is supported, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure is + * compressible. Returns nonzero value in 'result' parameter if the + * input kind is compressible, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_1 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure supports + * 1 bit compression. Returns nonzero value in 'result' parameter if + * kind supports 1 bit compression, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_2 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure supports + * 2 bit compression. Returns nonzero value in 'result' parameter if + * kind supports 1 bit compression, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_4 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure supports + * 4 bit compression. Returns nonzero value in 'result' parameter if + * kind supports 4 bit compression, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports ZBC. Returns nonzero value in 'result' parameter if the + * input kind supports ZBC, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_1 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports 1 bit ZBC. Returns nonzero value in 'result' parameter if + * the input kind supports 1 bit ZBC, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_2 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports 2 bit ZBC. Returns nonzero value in 'result' parameter if + * the input kind supports 2 bit ZBC, else returns zero in the result. + * NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_4 + * This operation checks whether the kind passed in the 'kind' + * parameter of the 'NV2080_CTRL_FB_IS_KIND_PARAMS' structure + * supports 4 bit ZBC. Returns nonzero value in 'result' parameter if + * the input kind supports 4 bit ZBC, else returns zero in the result. + * kind + * Specifies the kind on which the operation is to be carried out. The + * legal range of values for the kind parameter is different on different + * GPUs. For e.g. on Fermi, valid range is 0x00 to 0xfe. Still, some values + * inside this legal range can be invalid i.e. not defined. + * So its always better to first check if a particular kind is supported on + * the current GPU with 'NV2080_CTRL_FB_IS_KIND_SUPPORTED' operation. + * result + * Upon return, this parameter will hold the result (true/false) of the + * operation performed on the kind. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_IS_KIND (0x20801313) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_IS_KIND_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_IS_KIND_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_FB_IS_KIND_PARAMS { + NvU32 operation; + NvU32 kind; + NvBool result; +} NV2080_CTRL_FB_IS_KIND_PARAMS; + +/* valid values for operation */ +#define NV2080_CTRL_FB_IS_KIND_OPERATION_SUPPORTED (0x00000000) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE (0x00000001) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_1 (0x00000002) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_2 (0x00000003) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_4 (0x00000004) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC (0x00000005) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_1 (0x00000006) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_2 (0x00000007) +#define NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_4 (0x00000008) + +/** + * NV2080_CTRL_CMD_FB_GET_GPU_CACHE_INFO + * + * This command returns the state of a cache which all GPU memory accesess go + * through. + * + * powerState + * Returns the power state of the cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_POWER_STATE. + * + * writeMode + * Returns the write mode of the cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_WRITE_MODE. + * + * bypassMode + * Returns the bypass mode of the L2 cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_BYPASS_MODE. + * + * rcmState + * Returns the RCM state of the cache. Possible values are defined in + * NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_FB_GET_GPU_CACHE_INFO (0x20801315) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS { + NvU32 powerState; + NvU32 writeMode; + NvU32 bypassMode; + NvU32 rcmState; +} NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS; + +/* valid values for powerState */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_POWER_STATE_ENABLED (0x00000000) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_POWER_STATE_DISABLED (0x00000001) +/* valid values for writeMode */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_WRITE_MODE_WRITETHROUGH (0x00000000) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_WRITE_MODE_WRITEBACK (0x00000001) +/* valid values for bypassMode */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_BYPASS_MODE_DISABLED (0x00000000) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_BYPASS_MODE_ENABLED (0x00000001) +/* valid values for rcmState */ +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_FULL (0x00000000) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_TRANSITIONING (0x00000001) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_REDUCED (0x00000002) +#define NV2080_CTRL_FB_GET_GPU_CACHE_INFO_RCM_STATE_ZERO_CACHE (0x00000003) + +/* + * NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY + * + * These commands access the cache promotion policy on a specific + * engine, if supported by the hardware. + * + * Cache promotion refers to the GPU promoting a memory read to a larger + * size to preemptively fill the cache so future reads to nearby memory + * addresses will hit in the cache. + * + * engine + * Specifies the target engine. Possible values are defined in + * NV2080_ENGINE_TYPE. + * promotionPolicy + * Specifies the promotion policy of the cache on the specified + * engine. Possible values are defined by + * NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_*. These values are in terms + * of the hardware cache line size. + * + */ +typedef struct NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_PARAMS { + NvU32 engine; + NvU32 promotionPolicy; +} NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_PARAMS; + +/* valid values for promotionPolicy */ +#define NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_NONE (0x00000000) +#define NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_QUARTER (0x00000001) +#define NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_HALF (0x00000002) +#define NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_FULL (0x00000003) + + +/* + * NV2080_CTRL_CMD_FB_SET_GPU_CACHE_PROMOTION_POLICY + * + * This command sets the cache promotion policy on a specific engine, if + * supported by the hardware. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_SET_GPU_CACHE_PROMOTION_POLICY (0x20801316) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x16" */ // Deprecated, removed form RM + + +/* + * NV2080_CTRL_CMD_FB_GET_GPU_CACHE_PROMOTION_POLICY + * + * This command gets the cache promotion policy on a specific engine, if + * supported by the hardware. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_GPU_CACHE_PROMOTION_POLICY (0x20801317) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x17" */ // Deprecated, removed form RM + +/* + * NV2080_CTRL_FB_CMD_GET_FB_REGION_INFO + * + * This command returns the FB memory region characteristics. + * + * numFBRegions + * Number of valid regions returned in fbRegion[] + * fbRegion[].base + * Base address of region. The first valid address in the range + * [base..limit]. + * fbRegion[].limit + * Last/end address of region. The last valid address in the range + * [base..limit]. + * (limit - base + 1) = size of the region + * fbRegion[].reserved + * Amount of memory that RM speculatively needs within the region. A + * client doing its own memory management should leave at least this much + * memory available for RM use. This particularly applies to a driver + * model like LDDM. + * fbRegion[].performance + * Relative performance of this region compared to other regions. + * The definition is vague, and only connotes relative bandwidth or + * performance. The higher the value, the higher the performance. + * fbRegion[].supportCompressed + * TRUE if compressed surfaces/kinds are supported + * FALSE if compressed surfaces/kinds are not allowed to be allocated in + * this region + * fbRegion[].supportISO + * TRUE if ISO surfaces/kinds are supported (Display, cursor, video) + * FALSE if ISO surfaces/kinds are not allowed to be allocated in this + * region + * fbRegion[].bProtected + * TRUE if this region is a protected memory region. If true only + * allocations marked as protected (NVOS32_ALLOC_FLAGS_PROTECTED) can be + * allocated in this region. + * fbRegion[].blackList[] - DEPRECATED: Use supportISO + * TRUE for each NVOS32_TYPE_IMAGE* that is NOT allowed in this region. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO (0x20801320) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES 17 + +typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES]; + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NV_DECLARE_ALIGNED(NvU64 reserved, 8); + NvU32 performance; + NvBool supportCompressed; + NvBool supportISO; + NvBool bProtected; + NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList; +} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16 + +#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS { + NvU32 numFBRegions; + NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8); +} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_OFFLINE_PAGES + * + * This command adds video memory page addresses to the list of offlined + * addresses so that they're not allocated to any client. The newly offlined + * addresses take effect after a reboot. + * + * offlined + * This input parameter is an array of NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO + * structures, containing the video memory physical page numbers that + * are to be blacklisted. This array can hold a maximum of NV2080_CTRL_FB_ + * BLACKLIST_PAGES_MAX_PAGES address pairs. Valid entries are adjacent. + * pageSize + * This input parameter contains the size of the page that is to be + * blacklisted. + * validEntries + * This input parameter specifies the number of valid entries in the + * offlined array. + * numPagesAdded + * This output parameter specifies how many of the validEntries were + * actually offlined. If numPagesAdded < validEntries, it + * means that only addresses from offlined[0] to offlined[numPagesAdded - 1] + * were added to the list of offlined addresses. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_OFFLINE_PAGES (0x20801321) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES (0x00000040) +#define NV2080_CTRL_FB_OFFLINED_PAGES_INVALID_ADDRESS (0xffffffffffffffffULL) +#define NV2080_CTRL_FB_OFFLINED_PAGES_PAGE_SIZE_4K (0x00000000) +#define NV2080_CTRL_FB_OFFLINED_PAGES_PAGE_SIZE_64K (0x00000001) +#define NV2080_CTRL_FB_OFFLINED_PAGES_PAGE_SIZE_128K (0x00000002) + +/* + * NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO + * + * pageAddressWithEccOn + * Address of the memory page retired when ECC is enabled on the board. + * pageAddressWithEccOff + * Address of the memory page retired when ECC is disabled on the board. + * rbcAddress + * Row/Bank/Column Address of the faulty memory which caused the page to + * be retired + * source + * The reason for the page to be retired + * status + * Non-exceptional reasons for a page retirement failure + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_OK + * No error + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_PENDING_RETIREMENT + * The given address is already pending retirement or has + * been retired during the current driver run. The page + * will be offlined during the next driver run. + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_BLACKLISTING_FAILED + * The given page was retired on a previous driver run, + * so it should not be accessible unless offlining failed. + * Failing to offline a page is strongly indicative of a + * driver offlining bug. + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_TABLE_FULL + * The PBL is full and no more pages can be retired + * NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_INTERNAL_ERROR + * Internal driver error + * + */ + + + +typedef struct NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO { + NV_DECLARE_ALIGNED(NvU64 pageAddressWithEccOn, 8); + NV_DECLARE_ALIGNED(NvU64 pageAddressWithEccOff, 8); + NvU32 rbcAddress; + NvU32 source; + NvU32 status; + NvU32 timestamp; +} NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO; + +/* valid values for source */ +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_MULTIPLE_SBE (0x00000002) +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE (0x00000004) + + + +/* valid values for status */ +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_OK (0x00000000) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_PENDING_RETIREMENT (0x00000001) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_BLACKLISTING_FAILED (0x00000002) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_TABLE_FULL (0x00000003) +#define NV2080_CTRL_FB_OFFLINED_PAGES_STATUS_INTERNAL_ERROR (0x00000004) + +/* deprecated */ +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_MULTIPLE_SBE NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_MULTIPLE_SBE +#define NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DBE NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE + + +#define NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO offlined[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES], 8); + NvU32 pageSize; + NvU32 validEntries; + NvU32 numPagesAdded; +} NV2080_CTRL_FB_OFFLINE_PAGES_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_OFFLINED_PAGES + * + * This command returns the list of video memory page addresses in the + * Inforom's blacklist. + * + * offlined + * This output parameter is an array of NV2080_CTRL_FB_BLACKLIST_ADDRESS_ + * INFO structures, containing the video memory physical page numbers that + * are blacklisted. This array can hold a maximum of NV2080_CTRL_FB_ + * BLACKLIST_PAGES_MAX_PAGES address pairs. Valid entries are adjacent. + * The array also contains the Row/Bank/Column Address and source. + * validEntries + * This output parameter specifies the number of valid entries in the + * offlined array. + * bRetirementPending (DEPRECATED, use retirementPending instead) + * This output parameter returns if any pages on the list are pending + * retirement. + * retirementPending + * Communicates to the caller whether retirement updates are pending and the + * reason for the updates. Possible fields are: + * NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_*: + * NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE: + * Indicates whether pages are pending retirement due to SBE. + * NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE: + * Indicates whether pages are pending retirement due to DBE. Driver + * reload needed to retire bad memory pages and allow compute app runs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV2080_CTRL_CMD_FB_GET_OFFLINED_PAGES (0x20801322) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE 0:0 +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE_FALSE 0 +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_SBE_TRUE 1 +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE 1:1 +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE_FALSE 0 +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_RETIREMENT_PENDING_DBE_TRUE 1 + + + +#define NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO offlined[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES], 8); + NvU32 validEntries; + NvBool bRetirementPending; + NvU8 retirementPending; +} NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_QUERY_ACR_REGION + * + * This control command is used to query the secured region allocated + * + * queryType + * NV2080_CTRL_CMD_FB_ACR_QUERY_GET_REGION_STATUS: Provides the alloc + * status and ACR region ID. + * NV2080_CTRL_CMD_FB_QUERY_MAP_REGION : Maps the region on BAR1 + * it returns the "pCpuAddr" and pPriv to user. + * NV2080_CTRL_CMD_FB_QUERY_UNMAP_REGION: Unmaps the mapped region. + * it takes the pPriv as input + * + * clientReq : struct ACR_REQUEST_PARAMS + * It is used to find the allocated ACR region for that client + * clientId : ACR Client ID + * reqReadMask : read mask of ACR region + * reqWriteMask : Write mask of ACR region + * regionSize : ACR region Size + * + * clientReqStatus : struct ACR_STATUS_PARAMS + * This struct is stores the output of requested ACR region. + * allocStatus : Allocated Status of ACR region + * regionId : ACR region ID + * physicalAddress : Physical address on FB + * + * + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_NONE : Control command executed successfully + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_INVALID_CLIENT_REQUEST : Please check the parameter + * for ACR client request + * NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_FAILED_TO_MAP_ON_BAR1 : RM Fails to map ACR region + * on BAR1 + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED +*/ +#define NV2080_CTRL_CMD_FB_QUERY_ACR_REGION (0x20801325) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS_MESSAGE_ID" */ + +// +// We can create an ACR region by using RMCreateAcrRegion[1|2] regkey or mods -acr[1|2]_size +// Client ID for such region is 2 in RM. +// +#define NV2080_CTRL_CMD_FB_ACR_CLIENT_ID 2 + +typedef enum NV2080_CTRL_CMD_FB_ACR_QUERY_TYPE { + NV2080_CTRL_CMD_FB_ACR_QUERY_GET_CLIENT_REGION_STATUS = 0, + NV2080_CTRL_CMD_FB_ACR_QUERY_GET_REGION_PROPERTY = 1, + NV2080_CTRL_CMD_FB_ACR_QUERY_GET_FALCON_STATUS = 2, +} NV2080_CTRL_CMD_FB_ACR_QUERY_TYPE; + +typedef enum NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE { + NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_NONE = 0, + NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_INVALID_CLIENT_REQUEST = 1, +} NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE; + +typedef struct ACR_REQUEST_PARAMS { + NvU32 clientId; + NvU32 reqReadMask; + NvU32 reqWriteMask; + NvU32 regionSize; +} ACR_REQUEST_PARAMS; + +typedef struct ACR_REGION_ID_PROP { + NvU32 regionId; + NvU32 readMask; + NvU32 writeMask; + NvU32 regionSize; + NvU32 clientMask; + NV_DECLARE_ALIGNED(NvU64 physicalAddress, 8); +} ACR_REGION_ID_PROP; + +typedef struct ACR_STATUS_PARAMS { + NvU32 allocStatus; + NvU32 regionId; + NV_DECLARE_ALIGNED(NvU64 physicalAddress, 8); +} ACR_STATUS_PARAMS; + +typedef struct ACR_REGION_HANDLE { + NvHandle hClient; + NvHandle hParent; + NvHandle hMemory; + NvU32 hClass; + NvHandle hDevice; +} ACR_REGION_HANDLE; + +typedef struct ACR_FALCON_LS_STATUS { + NvU16 falconId; + NvBool bIsInLs; +} ACR_FALCON_LS_STATUS; + +#define NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS { + NV2080_CTRL_CMD_FB_ACR_QUERY_TYPE queryType; + NV2080_CTRL_CMD_FB_ACR_QUERY_ERROR_CODE errorCode; + NV_DECLARE_ALIGNED(ACR_REGION_ID_PROP acrRegionIdProp, 8); + ACR_REQUEST_PARAMS clientReq; + NV_DECLARE_ALIGNED(ACR_STATUS_PARAMS clientReqStatus, 8); + ACR_REGION_HANDLE handle; + ACR_FALCON_LS_STATUS falconStatus; +} NV2080_CTRL_CMD_FB_QUERY_ACR_REGION_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_CLEAR_OFFLINED_PAGES + * + * This command clears offlined video memory page addresses from the Inforom. + * + * sourceMask + * This is a bit mask of NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE. Pages + * offlined from the specified sources will be cleared/removed from the + * Inforom PBL object denylist. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_CLEAR_OFFLINED_PAGES (0x20801326) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x26U) + +typedef struct NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS { + NvU32 sourceMask; +} NV2080_CTRL_FB_CLEAR_OFFLINED_PAGES_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO + * + * Gets pointer to then object of class CompBitCopy, which is used for swizzling + * compression bits in the compression backing store. The caller is expected to + * have the appropriate headers for class CompBitCopy. Also retrieves values of some + * parameters needed to call the compbit swizzling method. + * + * @params[out] void *pCompBitCopyObj + * Opaque pointer to object of class CompBitCopy + * @params[out] void *pSwizzleParams + * Opaque pointer to values needed to call the compbit + * swizzle method. + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO (0x20801327) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS_MESSAGE_ID (0x27U) + +typedef struct NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pCompBitCopyObj, 8); + NV_DECLARE_ALIGNED(NvP64 pSwizzleParams, 8); +} NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_LTC_INFO_FOR_FBP + * + * Gets the count and mask of LTCs for a given FBP. + * + * fbpIndex + * The physical index of the FBP to get LTC info for. + * ltcMask + * The mask of active LTCs for the given FBP. + * ltcCount + * The count of active LTCs for the given FBP. + * ltsMask + * The mask of active LTSs for the given FBP + * ltsCount + * The count of active LTSs for the given FBP + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_LTC_INFO_FOR_FBP (0x20801328) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS { + NvU8 fbpIndex; + NvU32 ltcMask; + NvU32 ltcCount; + NvU32 ltsMask; + NvU32 ltsCount; +} NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS; + + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT < Deprecated > + * + * "set the context" for following CompBitCopy member functions. + * These are the CompBitCopy member variables that remain connstant + * over multiple CompBitCopy member function calls, yet stay the same + * throughout a single surface eviction. + * + * @params[in] UINT64 backingStorePA; + * Physical Address of the Backing Store + * @params[in] UINT08 *backingStoreVA; + * Virtual Address of the Backing Store + * @params[in] UINT64 backingStoreChunkPA; + * Physical Address of the "Chunk Buffer" + * @params[in] UINT08 *backingStoreChunkVA; + * Virtual Address of the "Chunk Buffer" + * @params[in] UINT32 backingStoreChunkSize; + * Size of the "Chunk Buffer" + * @params[in] UINT08 *cacheWriteBitMap; + * Pointer to the bitmap which parts of the + * "Chunk" was updated. + * @params[in] bool backingStoreChunkOverfetch; + * Overfetch factor. + * @params[in] UINT32 PageSizeSrc; + * Page size of Source Surface. + * @params[in] UINT32 PageSizeDest; + * page size of Destination Surface. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT (0x20801329) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x29" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT_PARAMS { + NvU32 CBCBaseAddress; + NV_DECLARE_ALIGNED(NvU64 backingStorePA, 8); + NV_DECLARE_ALIGNED(NvU8 *backingStoreVA, 8); + NV_DECLARE_ALIGNED(NvU64 backingStoreChunkPA, 8); + NV_DECLARE_ALIGNED(NvU8 *backingStoreChunkVA, 8); + NvU32 backingStoreChunkSize; + NV_DECLARE_ALIGNED(NvU8 *cacheWriteBitMap, 8); + NvBool backingStoreChunkOverfetch; + NvU32 PageSizeSrc; + NvU32 PageSizeDest; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_CONTEXT_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS < Deprecated > + * + * Retrieves the Compression and Fast Clear bits for the surface+offset given. + * + * @params[out] NvU32 *fcbits; + * Fast Clear Bits returned + * @params[out] NvU32 *compbits; + * Compression Bits returned + * @params[in] NvU64 dataPhysicalStart; + * Start Address of Data + * @params[in] NvU64 surfaceOffset; + * Offset in the surface + * @params[in] NvU32 comptagLine; + * Compression Tag Number + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTEDD + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS (0x2080132a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2A" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS_PARAMS { + NV_DECLARE_ALIGNED(NvU32 *fcbits, 8); + NV_DECLARE_ALIGNED(NvU32 *compbits, 8); + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); + NvU32 comptagLine; + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS < Deprecated > + * + * Sets the Compression and Fast Clear bits for the surface+offset given. + * + * @params[in] NvU32 fcbits; + * Fast Clear Bits to write. + * @params[in] NvU32 compbits; + * Compression Bits to write + * @params[in] NvBool writeFc; + * Indicates if Fast Clear Bits should be written + * @params[in] NvU64 dataPhysicalStart; + * Start Address of Data + * @params[in] NvU64 surfaceOffset; + * Offset in the surface + * @params[in] NvU32 comptagLine; + * Compression Tag Number + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS (0x2080132b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2B" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS_PARAMS { + NvU32 fcbits; + NvU32 compbits; + NvBool writeFc; + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); + NvU32 comptagLine; + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB < Deprecated > + * + * Read 64KB chunk of CompBits + * + * @params[in] NvU64 SrcDataPhysicalStart; + * Start Address of Data + * @params[in] NvU32 SrcComptagLine; + * Compression Tag Number + * @params[in] NvU32 page64KB; + * Which 64K block to read from. + * @params[out] NvU32 *compbitBuffer; + * Buffer for CompBits read, + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB (0x2080132c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2C" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB_PARAMS { + NV_DECLARE_ALIGNED(NvU64 SrcDataPhysicalStart, 8); + NvU32 SrcComptagLine; + NvU32 page64KB; + NV_DECLARE_ALIGNED(NvU32 *compbitBuffer, 8); + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPBITS64KB_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB < Deprecated > + * + * Write 64K chunk of COmpBits. + * + * @params[in] NvU64 SrcDataPhysicalStart; + * Start Address of Data + * @params[in] NvU32 SrcComptagLine; + * Compression Tag Number + * @params[in] NvU32 page64KB; + * Which 64K block to read from. + * @params[in] NvU32 *compbitBuffer; + * Buffer for CompBits to write. + * @params[in] NvBool upper64KBCompbitSel + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB (0x2080132d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2D" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB_PARAMS { + NV_DECLARE_ALIGNED(NvU64 DstDataPhysicalStart, 8); + NvU32 DstComptagLine; + NvU32 page64KB; + NV_DECLARE_ALIGNED(NvU32 *compbitBuffer, 8); + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPBITS64KB_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITSPS < Deprecated > + * + * The PS (Performance Path, or Optimized path, or Per Slice version) + * of PutCompBits. + * + * @params[out] NvU32 *fcbits; + * Buffer to receive Fast Clear Bits. + * @params[out] NvU32 *compbits; + * Buffer to receive Compression Bits. + * @params[out] NvU32 *compCacheLine; + * Buffer to receive Comp Cache Line data. + * @params[in] NvU64 dataPhysicalStart; + * Start Address of Data + * @params[in] NvU64 surfaceOffset; + * Offset in the surface + * @params[in] NvU32 comptagLine; + * Compression Tag Line Number + * @params[in] NvU32 ROPTile_offset; + * Offset in the surface of the ROP tile. + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * @params[in] NvBool getFcBits; + * Indicates if fast clear bits should be returned. + * @params[in] NvP64 derivedParams + * Actually a CompBitDerivedParams structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITSPS (0x2080132e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2E" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITSPS_PARAMS { + NV_DECLARE_ALIGNED(NvU32 *fcbits, 8); + NV_DECLARE_ALIGNED(NvU32 *compbits, 8); + NV_DECLARE_ALIGNED(NvU32 *compCacheLine, 8); + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); + NvU32 comptagLine; + NvU32 ROPTile_offset; + NvBool upper64KBCompbitSel; + NvBool getFcBits; + NV_DECLARE_ALIGNED(NvP64 derivedParams, 8); +} NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPBITSPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITSPS < Deprecated > + * + * The PS (Performance Path, or Optimized path, or Per Slice version) + * of GetCompBits. + * + * @params[in] NvU32 fcbits; + * Buffer with Fast Clear Bits to write. + * @params[in] NvU32 compbits; + * Buffer to receive Compression Bits. + * @params[in] NvBool writeFc + * Indicates of Fast Clear Bits should be written. + * @params[in] NvU32 *compCacheLine; + * Buffer to receive Comp Cache Line data. + * @params[in] NvU64 dataPhysicalStart; + * Start Address of Data + * @params[in] NvU64 surfaceOffset; + * Offset in the surface + * @params[in] NvU32 comptagLine; + * Compression Tag Line Number + * @params[in] NvU32 ROPTile_offset; + * Offset in the surface of the ROP tile. + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * @params[in] NvP64 derivedParams + * Actually a CompBitDerivedParams structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITSPS (0x2080132f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x2F" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITSPS_PARAMS { + NvU32 fcbits; + NvU32 compbits; + NvBool writeFc; + NV_DECLARE_ALIGNED(NvU32 *compCacheLine, 8); + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); + NvU32 comptagLine; + NvU32 ROPTile_offset; + NvBool upper64KBCompbitSel; + NV_DECLARE_ALIGNED(NvP64 derivedParams, 8); +} NV2080_CTRL_CMD_FB_COMPBITCOPY_PUT_COMPBITSPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPCACHELINEPS < Deprecated > + * + * The PS (Performance Path, or Optimized path, or Per Slice version) + * of ReadCompCacheLine. + * + * @paramsNvU32 *compCacheLine; + * Buffer for Comp Cache Line Read + * @paramsNvU32 comptagLine; + * Comp Tag Line Number to read + * @paramsNvU32 partition; + * FB Partition of the desired Comp Cache Line + * @paramsNvU32 slice; + * Slice of the desired Comp Cache Line + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPCACHELINEPS (0x20801330) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x30" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPCACHELINEPS_PARAMS { + NV_DECLARE_ALIGNED(NvU32 *compCacheLine, 8); + NvU32 comptagLine; + NvU32 partition; + NvU32 slice; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_READ_COMPCACHELINEPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPCACHELINEPS < Deprecated > + * + * The PS (Performance Path, or Optimized path, or Per Slice version) + * of WriteCompCacheLine. + * + * @params[in] NvU32 *compCacheLine; + * Buffer for Comp Cache Line to Write + * @params[in] NvU32 comptagLine; + * Comp Tag Line Number to Write + * @params[in] NvU32 partition; + * FB Partition of the desired Comp Cache Line + * @params[in] NvU32 slice; + * Slice of the desired Comp Cache Line + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPCACHELINEPS (0x20801331) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x31" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPCACHELINEPS_PARAMS { + NV_DECLARE_ALIGNED(NvU32 *compCacheLine, 8); + NvU32 comptagLine; + NvU32 partition; + NvU32 slice; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_WRITE_COMPCACHELINEPS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPCACHELINE_BOUNDS < Deprecated > + * + * Used by PS (Performance Path, or Optimized path, or Per Slice version) + * to retrieve upper and lower Address of the CompCacheLine. + * + * @params[out] NvU64 *minCPUAddress; + * Minimum (lower bound) of the ComCacheLine. + * @params[out] NvU64 *minCPUAddress; + * Minimum (lower bound) of the ComCacheLine. + * @params[in] NvU32 comptagLine; + * CompTagLine to fetch the bounds of. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPCACHELINE_BOUNDS (0x20801332) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x32" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPCACHELINE_BOUNDS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 *minCPUAddress, 8); + NV_DECLARE_ALIGNED(NvU64 *maxCPUAddress, 8); + NvU32 comptagLine; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_COMPCACHELINE_BOUNDS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_PART_SLICE_OFFSET < Deprecated > + * + * Used by PS (Performance Path, or Optimized path, or Per Slice version) + * to retrieve partition, slice and ROP Tile Offset of the passed in + * surface location. + * + * @params[out] NvU64 *part; + * Partition in which the target part of the surface resides. + * @params[out] NvU64 *slice; + * Slice in which the target part of the surface resides. + * @params[out] NvU64 *ropTileoffset; + * Offset to the start of the ROP Tile in which the target part of + * the surface resides. + * @params[in] NvU64 *dataPhysicalStart; + * Start address of data for which part/slice/offset is desired. + * @params[in] NvU64 surfaceOffset; + * Byte offset of data for which part/slice/offset is desired. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_PART_SLICE_OFFSET (0x20801333) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x33" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_PART_SLICE_OFFSET_PARAMS { + NV_DECLARE_ALIGNED(NvU64 *part, 8); + NV_DECLARE_ALIGNED(NvU64 *slice, 8); + NV_DECLARE_ALIGNED(NvU64 *ropTileoffset, 8); + NV_DECLARE_ALIGNED(NvU64 dataPhysicalStart, 8); + NV_DECLARE_ALIGNED(NvU64 surfaceOffset, 8); +} NV2080_CTRL_CMD_FB_COMPBITCOPY_GET_PART_SLICE_OFFSET_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_ALLOC_AND_INIT_DERIVEDPARAMS < Deprecated > + * + * Used by PS (Performance Path, or Optimized path, or Per Slice version) + * to create a CompBitCopy::CompBitDerivedParams object + * + * @params[out] NvP64 derivedParams + * Actually a CompBitDerivedParams structure. + * @params[in] NvU32 comptagLine; + * Compression Tag Line Number + * @params[in] NvU32 ROPTile_offset; + * Offset in the surface of the ROP tile. + * @params[in] NvBool upper64KBCompbitSel; + * Selects Upper or Lower 64K + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_ALLOC_AND_INIT_DERIVEDPARAMS (0x20801334) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x34" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_ALLOC_AND_INIT_DERIVEDPARAMS_PARAMS { + NV_DECLARE_ALIGNED(NvP64 derivedParams, 8); + NvU32 comptagLine; + NvBool upper64KBCompbitSel; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_ALLOC_AND_INIT_DERIVEDPARAMS_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1 < Deprecated > + * + * Used by MODS (and possibly other clients) to have compbit code write + * write directly to BAR1, rather than a intermediate buffer. + * + * @params[in] NvBool bForceBar1; + * Enables or disables direct writes to BAR1. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1 (0x20801335) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | 0x35" */ + +typedef struct NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1_PARAMS { + NvBool bForceBar1; +} NV2080_CTRL_CMD_FB_COMPBITCOPY_SET_FORCE_BAR1_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_AMAP_CONF + * + * Fills in fields of a structure of class ConfParamsV1, which is used for + * swizzling compression bits in the compression backing store. + * The caller is expected to have the appropriate headers for class ConfParamsV1. + * + * @params[in|out] void *pAmapConfParms + * Opaque pointer to structure of values for ConfParamsV1 + * @params[in|out] void *pCbcSwizzleParms + * Opaque pointer to structure of values for CbcSwizzleParamsV1 + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED + * + * pCbcSwizzleParams will be filled in with certain parameters from + * @CbcSwizzleParamsV1. However, the caller is responsible for making sure + * all parameters are filled in before using it. + */ +#define NV2080_CTRL_CMD_FB_GET_AMAP_CONF (0x20801336) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pAmapConfParams, 8); + NV_DECLARE_ALIGNED(NvP64 pCbcSwizzleParams, 8); +} NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_CBC_OP + * + * Provides a way for clients to request a CBC Operation + * + * @params[in] CTRL_CMD_FB_CBC_OP fbCBCOp + * CBC Operation requested. + * Valid Values: + * CTRL_CMD_FB_CBC_OP_CLEAN + * CTRL_CMD_FB_CBC_OP_INVALIDATE + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED NV_ERR_INVALID_ARGUMENT NV_ERR_TIMEOUT + */ +#define NV2080_CTRL_CMD_FB_CBC_OP (0x20801337) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_CBC_OP_PARAMS_MESSAGE_ID" */ + +/*! + * Permitted CBC Operations + */ +typedef enum CTRL_CMD_FB_CBC_OP { + CTRL_CMD_FB_CBC_OP_CLEAN = 0, + CTRL_CMD_FB_CBC_OP_INVALIDATE = 1, +} CTRL_CMD_FB_CBC_OP; + +#define NV2080_CTRL_CMD_FB_CBC_OP_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV2080_CTRL_CMD_FB_CBC_OP_PARAMS { + CTRL_CMD_FB_CBC_OP fbCBCOp; +} NV2080_CTRL_CMD_FB_CBC_OP_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_CTAGS_FOR_CBC_EVICTION + * + * The call will fetch the compression tags reserved for CBC eviction. + * + * Each comptag will correspond to a unique compression cacheline. The usage of + * these comptags is to evict the CBC by making accesses to a dummy compressed page, + * thereby evicting each CBC line. + * + * @param [in][out] NvU32 pCompTags + * Array of reserved compression tags of size @ref NV2080_MAX_CTAGS_FOR_CBC_EVICTION + * @param [out] numCompTags + * Number of entries returned in @ref pCompTags + * + * @returns + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_OUT_OF_RANGE + * NV_ERR_INVALID_PARAMETER + */ +#define NV2080_CTRL_CMD_FB_GET_CTAGS_FOR_CBC_EVICTION (0x20801338) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS_MESSAGE_ID" */ + +/*! + * Max size of @ref NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS::pCompTags + * Arbitrary, but sufficiently large number. Should be checked against CBC size. + */ +#define NV2080_MAX_CTAGS_FOR_CBC_EVICTION 0x7F + + +#define NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS { + NvU32 pCompTags[NV2080_MAX_CTAGS_FOR_CBC_EVICTION]; + NvU32 numCompTags; +} NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE + * + * This Call will allocate compression tag + * + * @params[in] NvU32 attr + * Stores the information: + * 1. NVOS32_ATTR_COMPR_REQUIRED or not + * 2. NVOS32_ATTR_PAGE_SIZE + * @params[in] NvU32 attr2 + * Determine whether to allocate + * an entire cache line or allocate by size + * @params[in] NvU32 size + * Specify the size of allocation, in pages not bytes + * @params[in] NvU32 ctagOffset + * Determine the offset usage of the allocation + * @params[out] NvU32 hwResId + * Stores the result of the allocation + * @params[out] NvU32 RetcompTagLineMin + * The resulting min Ctag Number from the allocation + * @params[out] NvU32 RetcompTagLineMax + * The resulting max Ctag Number from the allocation + * @returns + * NV_OK + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE (0x20801339) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS { + NvU32 attr; + NvU32 attr2; + NvU32 size; + NvU32 ctagOffset; + NvU32 hwResId; + NvU32 retCompTagLineMin; + NvU32 retCompTagLineMax; +} NV2080_CTRL_CMD_FB_ALLOC_COMP_RESOURCE_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_FREE_TILE + * + * This control call is used to release tile back to the free pool + * + * @params[in] NvU32 hwResId + * Stores the information of a previous allocation + * @returns + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_FB_FREE_TILE (0x2080133a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS_MESSAGE_ID (0x3AU) + +typedef struct NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS { + NvU32 hwResId; +} NV2080_CTRL_CMD_FB_FREE_TILE_PARAMS; + + +/* + * NV2080_CTRL_CMD_FB_SETUP_VPR_REGION + * + * This control command is used to request vpr region setup + * + * requestType + * NV2080_CTRL_CMD_FB_SET_VPR: Request to setup VPR + * + * requestParams : struct VPR_REQUEST_PARAMS + * It contains the VPR region request details like, + * startAddr : FB offset from which we need to setup VPR + * size : required size of the region + * + * statusParams : struct VPR_STATUS_PARAMS + * This struct stores the output of requested VPR region + * status : Whether the request was successful + * + * NV2080_CTRL_CMD_FB_VPR_ERROR_CODE : + * NV2080_CTRL_CMD_FB_VPR_ERROR_GENERIC : Some unknown error occurred + * NV2080_CTRL_CMD_FB_VPR_ERROR_INVALID_CLIENT_REQUEST : Request was invalid + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_SETUP_VPR_REGION (0x2080133b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_CMD_FB_VPR_REQUEST_TYPE { + NV2080_CTRL_CMD_FB_SET_VPR = 0, +} NV2080_CTRL_CMD_FB_VPR_REQUEST_TYPE; + +typedef enum NV2080_CTRL_CMD_FB_VPR_ERROR_CODE { + NV2080_CTRL_CMD_FB_VPR_ERROR_GENERIC = 0, + NV2080_CTRL_CMD_FB_VPR_ERROR_INVALID_CLIENT_REQUEST = 1, +} NV2080_CTRL_CMD_FB_VPR_ERROR_CODE; + +typedef struct VPR_REQUEST_PARAMS { + NvU32 startAddr; + NvU32 size; +} VPR_REQUEST_PARAMS; + +typedef struct VPR_STATUS_PARAMS { + NvU32 status; +} VPR_STATUS_PARAMS; + +#define NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS_MESSAGE_ID (0x3BU) + +typedef struct NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS { + NV2080_CTRL_CMD_FB_VPR_REQUEST_TYPE requestType; + VPR_REQUEST_PARAMS requestParams; + VPR_STATUS_PARAMS statusParams; +} NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS; +typedef struct NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS *PNV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_CLI_MANAGED_OFFLINED_PAGES + * + * This command returns the list of offlined video memory page addresses in the + * region managed by Client + * + * offlinedPages + * This output parameter is an array of video memory physical page numbers that + * are offlined. This array can hold a maximum of NV2080_CTRL_FB_ + * OFFLINED_PAGES_MAX_PAGES addresses. + * pageSize + * This output parameter contains the size of the page that is offlined. + * validEntries + * This output parameter specifies the number of valid entries in the + * offlined array. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_FB_GET_CLI_MANAGED_OFFLINED_PAGES (0x2080133c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x3CU) + +typedef struct NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS { + NvU32 offlinedPages[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES]; // A 32B can hold enough. + NvU32 pageSize; + NvU32 validEntries; +} NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO + * + * This command returns parameters required to initialize compbit copy object + * used by address mapping library + * + * defaultPageSize + * Page size used by @ref CompBitCopy methods + * comptagsPerCacheLine + * Number of compression tags in a single compression cache line. + * unpackedComptagLinesPerCacheLine; + * From hw (not adjusted for CompBits code) Number of compression tags + * in a single compression cache line. + * compCacheLineSizePerLTC; + * Size of compression cache line per L2 slice. Size in Bytes. + * unpackedCompCacheLineSizePerLTC; + * From hw (not adjusted for CompBits code) size of compression + * cache line per L2 slice. Size in Bytes + * slicesPerLTC; + * Number of L2 slices per L2 cache. + * numActiveLTCs; + * Number of active L2 caches. (Not floorswept) + * familyName; + * Family name for the GPU. + * chipName; + * Chip name for the GPU. + * bitsPerRAMEntry; + * Bits per RAM entry. (Need better doc) + * ramBankWidth; + * Width of RAM bank. (Need better doc) + * bitsPerComptagLine; + * Number of bits per compression tag line. + * ramEntriesPerCompCacheLine; + * Number of RAM entries spanned by 1 compression cache line. + * comptagLineSize; + * Size of compression tag line, in Bytes. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO (0x2080133d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS_MESSAGE_ID (0x3DU) + +typedef struct NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS { + NvU32 defaultPageSize; + NvU32 comptagsPerCacheLine; + NvU32 unpackedComptagLinesPerCacheLine; + NvU32 compCacheLineSizePerLTC; + NvU32 unpackedCompCacheLineSizePerLTC; + NvU32 slicesPerLTC; + NvU32 numActiveLTCs; + NvU32 familyName; + NvU32 chipName; + NvU32 bitsPerRAMEntry; + NvU32 ramBankWidth; + NvU32 bitsPerComptagLine; + NvU32 ramEntriesPerCompCacheLine; + NvU32 comptagLineSize; +} NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_SET_RRD + * + * Sets the row-to-row delay on the GPU's FB + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_SET_RRD (0x2080133e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_SET_RRD_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_SET_RRD_RESET_VALUE (~((NvU32)0)) +#define NV2080_CTRL_FB_SET_RRD_PARAMS_MESSAGE_ID (0x3EU) + +typedef struct NV2080_CTRL_FB_SET_RRD_PARAMS { + NvU32 rrd; +} NV2080_CTRL_FB_SET_RRD_PARAMS; + +/* + * NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS + * + * This is not a control call of it's own, but there are common definitions for + * the two NV2080_CTRL_CMD_FB_SET_READ/WRITE_LIMIT control calls. + */ +typedef struct NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS { + NvU8 limit; +} NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS; +#define NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_RESET_VALUE (0xff) + +/* + * NV2080_CTRL_CMD_FB_SET_READ_LIMIT + * + * Sets the READ_LIMIT to be used in the NV_PFB_FBPA_DIR_ARB_CFG0 register + * + * limit + * The limit value to use + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_SET_READ_LIMIT (0x2080133f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_SET_READ_LIMIT_RESET_VALUE NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_RESET_VALUE +#define NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS_MESSAGE_ID (0x3FU) + +typedef NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_SET_WRITE_LIMIT + * + * Sets the WRITE_LIMIT to be used in the NV_PFB_FBPA_DIR_ARB_CFG0 register + * + * limit + * The limit value to us + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_SET_WRITE_LIMIT (0x20801340) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_SET_WRITE_LIMIT_RESET_VALUE NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_RESET_VALUE +#define NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS_MESSAGE_ID (0x40U) + +typedef NV2080_CTRL_FB_SET_READ_WRITE_LIMIT_PARAMS NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_PATCH_PBR_FOR_MINING + * + * Patches some VBIOS values related to PBR to better suit mining applications + * + * bEnable + * Set the mining-specific values or reset to the original values + * + * Possible status values returned are: + * NV_OK + * Any error code + */ +#define NV2080_CTRL_CMD_FB_PATCH_PBR_FOR_MINING (0x20801341) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS { + NvBool bEnable; +} NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_MEM_ALIGNMENT + * + * Get memory alignment. Replacement for NVOS32_FUNCTION_GET_MEM_ALIGNMENT + */ +#define NV2080_CTRL_CMD_FB_GET_MEM_ALIGNMENT (0x20801342) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_MEM_ALIGNMENT_MAX_BANKS (4) +#define NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS { + NvU32 alignType; // Input + NvU32 alignAttr; + NvU32 alignInputFlags; + NvU32 alignHead; + NV_DECLARE_ALIGNED(NvU64 alignSize, 8); + NvU32 alignHeight; + NvU32 alignWidth; + NvU32 alignPitch; + NvU32 alignPad; + NvU32 alignMask; + NvU32 alignOutputFlags[NV2080_CTRL_FB_GET_MEM_ALIGNMENT_MAX_BANKS]; + NvU32 alignBank[NV2080_CTRL_FB_GET_MEM_ALIGNMENT_MAX_BANKS]; + NvU32 alignKind; + NvU32 alignAdjust; // Output -- If non-zero the amount we need to adjust the offset + NvU32 alignAttr2; +} NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_CBC_BASEADDR + * + * Get the CBC Base physical address + * This control call is required by error containment tests + * NV2080_CTRL_CMD_FB_GET_AMAP_CONF can also return CBC base address + * but it requires kernel privilege, and it not callalble from SRT test + * + * @params[out] NvU64 cbcBaseAddr + * Base physical address for CBC data. + * + * Possible status values returned are: + * NV_OK NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR (0x20801343) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS { + NvU32 cbcBaseAddress; + NvU32 compCacheLineSize; + NV_DECLARE_ALIGNED(NvU64 backingStoreStartPA, 8); + NV_DECLARE_ALIGNED(NvU64 backingStoreAllocPA, 8); + NvU32 backingStoreChunkOverfetch; +} NV2080_CTRL_CMD_FB_GET_CBC_BASE_ADDR_PARAMS; + +#define NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING 0:0 +#define NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_FALSE 0 +#define NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_TRUE 1 + + + +typedef struct NV2080_CTRL_FB_REMAP_ENTRY { + NvU32 remapRegVal; + NvU32 timestamp; + NvU8 fbpa; + NvU8 sublocation; + NvU8 source; + NvU8 flags; +} NV2080_CTRL_FB_REMAP_ENTRY; + +/* valid values for source */ + + +#define NV2080_CTRL_FB_REMAPPED_ROW_SOURCE_SBE_FIELD (0x00000002) +#define NV2080_CTRL_FB_REMAPPED_ROW_SOURCE_DBE_FIELD (0x00000003) + +#define NV2080_CTRL_FB_REMAPPED_ROWS_MAX_ROWS (0x00000200) + +/* + * NV2080_CTRL_CMD_FB_GET_REMAPPED_ROWS + * + * This command returns the list of remapped rows stored in the Inforom. + * + * entryCount + * This output parameter specifies the number of remapped rows + * flags + * This output parameter contains info on whether or not there are pending + * remappings and whether or not a remapping failed + * entries + * This output parameter is an array of NV2080_CTRL_FB_REMAP_ENTRY + * containing inforomation on the remapping that occurred. This array can + * hold a maximum of NV2080_CTRL_FB_REMAPPED_ROWS_MAX_ROWS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_REMAPPED_ROWS (0x20801344) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_PENDING \ + NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_PENDING_FALSE NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_FALSE +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_PENDING_TRUE NV2080_CTRL_FB_REMAP_ENTRY_FLAGS_PENDING_TRUE +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_FAILURE 1:1 +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_FAILURE_FALSE 0 +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_FLAGS_FAILURE_TRUE 1 + +#define NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS { + NvU32 entryCount; + NvU8 flags; + NV2080_CTRL_FB_REMAP_ENTRY entries[NV2080_CTRL_FB_REMAPPED_ROWS_MAX_ROWS]; +} NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS; + +// Max size of the queryParams in Bytes, so that the NV2080_CTRL_FB_FS_INFO_QUERY struct is still 32B +#define NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE 24 + +/*! + * Structure holding the out params for NV2080_CTRL_FB_FS_INFO_INVALID_QUERY. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS { + // Unused param, will ensure the size of NV2080_CTRL_FB_FS_INFO_QUERY struct to be 32B + NvU8 data[NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE]; +} NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS { + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. Currently used only for a + * device monitoring client to get the physical values of the FB. The client needs to pass + * 'NV2080_CTRL_GPU_PARTITION_ID_INVALID' explicitly if it wants RM to ignore the swizzId. + * RM will consider this request similar to a legacy case. + * The client's subscription is used only as a capability check and not as an input swizzId. + */ + NvU32 swizzId; + /*! + * [OUT]: physical/local fbp mask. + */ + NV_DECLARE_ALIGNED(NvU64 fbpEnMask, 8); +} NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_LTC_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: physical/local ltc mask. + */ + NvU32 ltcEnMask; +} NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_LTS_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: physical/local lts mask. + * Note that lts bits are flattened out for all ltc with in a fbp. + */ + NvU32 ltsEnMask; +} NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBPA_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: physical/local FBPA mask. + */ + NvU32 fbpaEnMask; +} NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: physical/local FBPA-SubPartition mask. + */ + NvU32 fbpaSubpEnMask; +} NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP + */ +typedef struct NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: Logical/local FBP index + */ + NvU32 fbpLogicalIndex; +} NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_ROP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS { + /*! + * [IN]: physical/local FB partition index. + */ + NvU32 fbpIndex; + /*! + * [OUT]: physical/local ROP mask. + */ + NvU32 ropEnMask; +} NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS { + /*! + * [IN]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [OUT]: physical ltc mask. + */ + NvU32 ltcEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS { + /*! + * [IN]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [OUT]: physical lts mask. + */ + NvU32 ltsEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS { + /*! + * [IN]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [OUT]: physical fbpa mask. + */ + NvU32 fbpaEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS { + /*! + * [IN]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. + */ + NvU32 swizzId; + /*! + * [OUT]: physical rop mask. + */ + NvU32 ropEnMask; +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS; + +/*! + * Structure holding the in/out params for NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK. + */ +typedef struct NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS { + /*! + * [IN]: Physical FB partition index. + */ + NvU32 fbpIndex; + /*! + * [IN]: swizzId + * PartitionID associated with a created smc partition. Currently used only for a + * device monitoring client to get the physical values of the FB. The client needs to pass + * 'NV2080_CTRL_GPU_PARTITION_ID_INVALID' explicitly if it wants RM to ignore the swizzId. + * RM will consider this request similar to a legacy case. + * The client's subscription is used only as a capability check and not as an input swizzId. + */ + NvU32 swizzId; + /*! + * [OUT]: physical FBPA_SubPartition mask associated with requested partition. + */ + NV_DECLARE_ALIGNED(NvU64 fbpaSubpEnMask, 8); +} NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS; + +// Possible values for queryType +#define NV2080_CTRL_FB_FS_INFO_INVALID_QUERY 0x0 +#define NV2080_CTRL_FB_FS_INFO_FBP_MASK 0x1 +#define NV2080_CTRL_FB_FS_INFO_LTC_MASK 0x2 +#define NV2080_CTRL_FB_FS_INFO_LTS_MASK 0x3 +#define NV2080_CTRL_FB_FS_INFO_FBPA_MASK 0x4 +#define NV2080_CTRL_FB_FS_INFO_ROP_MASK 0x5 +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK 0x6 +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK 0x7 +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK 0x8 +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK 0x9 +#define NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK 0xA +#define NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK 0xB +#define NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP 0xC + +typedef struct NV2080_CTRL_FB_FS_INFO_QUERY { + NvU16 queryType; + NvU8 reserved[2]; + NvU32 status; + union { + NV2080_CTRL_FB_FS_INFO_INVALID_QUERY_PARAMS inv; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_FBP_MASK_PARAMS fbp, 8); + NV2080_CTRL_FB_FS_INFO_LTC_MASK_PARAMS ltc; + NV2080_CTRL_FB_FS_INFO_LTS_MASK_PARAMS lts; + NV2080_CTRL_FB_FS_INFO_FBPA_MASK_PARAMS fbpa; + NV2080_CTRL_FB_FS_INFO_ROP_MASK_PARAMS rop; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTC_MASK_PARAMS dmLtc; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_LTS_MASK_PARAMS dmLts; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_MASK_PARAMS dmFbpa; + NV2080_CTRL_FB_FS_INFO_PROFILER_MON_ROP_MASK_PARAMS dmRop; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_PROFILER_MON_FBPA_SUBP_MASK_PARAMS dmFbpaSubp, 8); + NV2080_CTRL_FB_FS_INFO_FBPA_SUBP_MASK_PARAMS fbpaSubp; + NV2080_CTRL_FB_FS_INFO_FBP_LOGICAL_MAP_PARAMS fbpLogicalMap; + } queryParams; +} NV2080_CTRL_FB_FS_INFO_QUERY; + +// Max number of queries that can be batched in a single call to NV2080_CTRL_CMD_FB_GET_FS_INFO +#define NV2080_CTRL_FB_FS_INFO_MAX_QUERIES 96 + +#define NV2080_CTRL_FB_GET_FS_INFO_PARAMS_MESSAGE_ID (0x46U) + +typedef struct NV2080_CTRL_FB_GET_FS_INFO_PARAMS { + NvU16 numQueries; + NvU8 reserved[6]; + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_FS_INFO_QUERY queries[NV2080_CTRL_FB_FS_INFO_MAX_QUERIES], 8); +} NV2080_CTRL_FB_GET_FS_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_FS_INFO + * + * This control call returns the fb engine information for a partition/GPU. + * Supports an interface so that the caller can issue multiple queries by batching them + * in a single call. Returns the first error it encounters. + * + * numQueries[IN] + * - Specifies the number of valid queries. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_FS_INFO (0x20801346) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_FS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_HISTOGRAM_IDX_NO_REMAPPED_ROWS (0x0) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAPPED_ROW (0x1) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_MIXED_REMAPPED_REMAINING_ROWS (0x2) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAINING_ROW (0x3) +#define NV2080_CTRL_FB_HISTOGRAM_IDX_MAX_REMAPPED_ROWS (0x4) + +#define NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS { + NvU32 histogram[5]; +} NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS; + +/*! + * NV2080_CTRL_CMD_FB_GET_ROW_REMAPPER_HISTOGRAM + * + * This control call returns stats on the number of banks that have a certain + * number of rows remapped in the bank. Specifically the number of banks that + * have 0, 1, 2 through (max-2), max-1 and max number of rows remapped in the + * bank. Values will be returned in an array. + * + * Index values are: + * + * NV2080_CTRL_FB_HISTOGRAM_IDX_NO_REMAPPED_ROWS + * Number of banks with zero rows remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAPPED_ROW + * Number of banks with one row remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_MIXED_REMAPPED_REMAINING_ROWS + * Number of banks with 2 through (max-2) rows remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_SINGLE_REMAINING_ROW + * Number of banks with (max-1) rows remapped + NV2080_CTRL_FB_HISTOGRAM_IDX_MAX_REMAPPED_ROWS + * Number of banks with max rows remapped + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FB_GET_ROW_REMAPPER_HISTOGRAM (0x20801347) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_FB_GET_DYNAMICALLY_BLACKLISTED_PAGES + * + * This command returns the list of dynamically blacklisted video memory page addresses + * after last driver load. + * + * blackList + * This output parameter is an array of NV2080_CTRL_FB_DYNAMIC_BLACKLIST_ADDRESS_INFO + * This array can hold a maximum of NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES. + * validEntries + * This output parameter specifies the number of valid entries in the + * blackList array. + * baseIndex + * With the limit of up to 512 blacklisted pages, the size of this array + * exceeds the rpc buffer limit. This control call will collect the data + * in multiple passes. This parameter indicates the start index of the + * data to be passed back to the caller + * This cannot be greater than NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_PAGES + * bMore + * This parameter indicates whether there are more valid elements to be + * fetched. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_FB_GET_DYNAMIC_OFFLINED_PAGES (0x20801348) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS_MESSAGE_ID" */ + +/* Maximum pages that can be dynamically blacklisted */ +#define NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_PAGES 512 + +/* + * Maximum entries that can be sent in a single pass of + * NV2080_CTRL_CMD_FB_GET_DYNAMIC_OFFLINED_PAGES + */ +#define NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES 64 + +/** + * NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO + * pageNumber + * This output parameter specifies the dynamically blacklisted page number. + * source + * The reason for the page to be retired. Valid values for + * this parameter include: + * NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_INVALID + * Invalid source. + * NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE + * Page retired by dynamic page retirement due to a double bit + * error seen. + */ +typedef struct NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO { + NV_DECLARE_ALIGNED(NvU64 pageNumber, 8); + NvU8 source; +} NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO; + +#define NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS_MESSAGE_ID (0x48U) + +typedef struct NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_FB_DYNAMIC_OFFLINED_ADDRESS_INFO offlined[NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES], 8); + NvU32 validEntries; + NvU32 baseIndex; + NvBool bMore; +} NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS; + +/* valid values for source */ + +#define NV2080_CTRL_FB_DYNAMIC_BLACKLISTED_PAGES_SOURCE_INVALID (0x00000000) +#define NV2080_CTRL_FB_DYNAMIC_BLACKLISTED_PAGES_SOURCE_DPR_DBE (0x00000001) + +/* + * NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO + * + * This control command is used by clients to query information pertaining to client allocations. + * + * + * @params [IN/OUT] NvU64 allocCount: + * Client specifies the allocation count that it received using the + * previous NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO control call. + * RM will get the total number of allocations known by RM and fill + * allocCount with it. + * + * @params [IN] NvP64 pAllocInfo: + * Pointer to the buffer allocated by client of size NV2080_CTRL_CMD_FB_ALLOCATION_INFO * + * allocCount. RM returns the info pertaining to each of the contiguous client + * allocation chunks in pAllocInfo. The format of the allocation information is given by + * NV2080_CTRL_CMD_FB_ALLOCATION_INFO. The client has to sort the returned information if + * it wants to retain the legacy behavior of SORTED BY OFFSET. Information is only returned + * if and only if allocCount[IN]>=allocCount[OUT] and clientCount[IN]>=clientCount[OUT]. + * + * @params [IN/OUT] NvP64 clientCount: + * Client specifies the client count that it received using the + * previous NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO control call. + * RM will get the total number of clients that have allocations with RM + * and fill clientCount with it. + * + * @params [IN] NvP64 pClientInfo: + * Pointer to the buffer allocated by client of size NV2080_CTRL_CMD_FB_CLIENT_INFO * + * clientCount. RM returns the info pertaining to each of the clients that have allocations + * known about by RM in pClientInfo. The format of the allocation information is given by + * NV2080_CTRL_CMD_FB_CLIENT_INFO. Information is only returned if and only if + * allocCount[IN]>=allocCount[OUT] and clientCount[IN]>=clientCount[OUT]. + * + * @returns Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_POINTER + * NV_ERR_NO_MEMORY + * + * @Usage: All privileged RM clients for debugging only. Initially, call this with allocCount = + * clientCount = 0 to get client count, and then call again with allocated memory and sizes. + * Client can repeat with the new count-sized allocations until a maximum try count is + * reached or client is out of memory. + */ + +#define NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO (0x20801349) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS_MESSAGE_ID" */ + +/* + * These work with the FLD_SET_REF_NUM and FLD_TEST_REF macros and describe the 'flags' member + * of the NV2080_CTRL_CMD_FB_ALLOCATION_INFO struct. + */ + +// Address space of the allocation +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE 4:0 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE_SYSMEM 0 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE_VIDMEM 1 + +// Whether the allocation is shared +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_SHARED 5:5 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_SHARED_FALSE 0 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_SHARED_TRUE 1 + +// Whether this client owns this allocation +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_OWNER 6:6 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_OWNER_FALSE 0 +#define NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_OWNER_TRUE 1 + +typedef struct NV2080_CTRL_CMD_FB_ALLOCATION_INFO { + NvU32 client; /* [OUT] Identifies the client that made or shares the allocation (index into pClientInfo)*/ + NvU32 flags; /* [OUT] Flags associated with the allocation (see previous defines) */ + NV_DECLARE_ALIGNED(NvU64 beginAddr, 8); /* [OUT] Starting physical address of the chunk */ + NV_DECLARE_ALIGNED(NvU64 size, 8); /* [OUT] Size of the allocated contiguous chunk in bytes */ +} NV2080_CTRL_CMD_FB_ALLOCATION_INFO; + +typedef struct NV2080_CTRL_CMD_FB_CLIENT_INFO { + NvHandle handle; /* [OUT] Handle of the client that made or shares the allocation */ + NvU32 pid; /* [OUT] PID of the client that made or shares the allocation */ + + /* For the definition of the subprocessID and subprocessName params, see NV0000_CTRL_CMD_SET_SUB_PROCESS_ID */ + NvU32 subProcessID; /* [OUT] Subprocess ID of the client that made or shares the allocation */ + char subProcessName[NV_PROC_NAME_MAX_LENGTH]; /* [OUT] Subprocess Name of the client that made or shares the allocation */ +} NV2080_CTRL_CMD_FB_CLIENT_INFO; + +#define NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS_MESSAGE_ID (0x49U) + +typedef struct NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 allocCount, 8); + NV_DECLARE_ALIGNED(NvP64 pAllocInfo, 8); + NV_DECLARE_ALIGNED(NvU64 clientCount, 8); + NV_DECLARE_ALIGNED(NvP64 pClientInfo, 8); +} NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_UPDATE_NUMA_STATUS + * + * This control command is used by clients to update the NUMA status. + * + * @params [IN] NvBool bOnline: + * + * @returns Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * + */ +#define NV2080_CTRL_CMD_FB_UPDATE_NUMA_STATUS (0x20801350) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS_MESSAGE_ID (0x50U) + +typedef struct NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS { + NvBool bOnline; +} NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_FB_GET_NUMA_INFO + * + * This control command is used by clients to get per-subdevice NUMA memory + * information as assigned by the system. + * + * numaNodeId[OUT] + * - Specifies the NUMA node ID. + * + * numaMemAddr[OUT] + * - Specifies the NUMA memory address. + * + * numaMemSize[OUT] + * - Specifies the NUMA memory size. + * + * numaOfflineAddressesCount[IN/OUT] + * - If non-zero, then it specifies the maximum number of entries in + * numaOfflineAddresses[] for which the information is required. + * It will be updated with the actual number of entries present in + * the numaOfflineAddresses[]. + * + * numaOfflineAddresses[OUT] + * - If numaOfflineAddressesCount is non-zero, it contains the addresses + * of offline pages in the NUMA region. + * + * @returns Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FB_GET_NUMA_INFO (0x20801351) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID << 8) | NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FB_NUMA_INFO_MAX_OFFLINE_ADDRESSES 64 + +#define NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS { + NvS32 numaNodeId; + NV_DECLARE_ALIGNED(NvU64 numaMemAddr, 8); + NV_DECLARE_ALIGNED(NvU64 numaMemSize, 8); + NvU32 numaOfflineAddressesCount; + NV_DECLARE_ALIGNED(NvU64 numaOfflineAddresses[NV2080_CTRL_FB_NUMA_INFO_MAX_OFFLINE_ADDRESSES], 8); +} NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS; + +/* _ctrl2080fb_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h new file mode 100644 index 000000000..97666947b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h @@ -0,0 +1,762 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fifo.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* + * NV2080_CTRL_CMD_SET_GPFIFO + * + * This command set the GPFIFO offset and number of entries for a channel + * after it has been allocated. The channel must be idle and not pending, + * otherwise ERROR_IN_USE will be returned. + * + * hChannel + * The handle to the channel. + * base + * The base of the GPFIFO in the channel ctxdma. + * numEntries + * The number of entries in the GPFIFO. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_SET_GPFIFO (0x20801102) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_SET_GPFIFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_SET_GPFIFO_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_CMD_SET_GPFIFO_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 base, 8); + NvU32 numEntries; +} NV2080_CTRL_CMD_SET_GPFIFO_PARAMS; + +/* + * NV2080_CTRL_FIFO_BIND_CHANNEL + * + * This structure is used to describe a channel that is to have + * it's bindable engines bound to those of other channels. + * + * hClient + * This structure member contains the handle of the client object + * that owns the channel object specified by hChannel. + * + * hChannel + * This structure member contains the channel handle of the channel + * object. + */ + +typedef struct NV2080_CTRL_FIFO_BIND_CHANNEL { + NvHandle hClient; + NvHandle hChannel; +} NV2080_CTRL_FIFO_BIND_CHANNEL; + +/* + * NV2080_CTRL_CMD_FIFO_BIND_ENGINES + * + * This control call is now deprecated. + * This command can be used to bind different video engines on G8X from separate + * channels together for operations such as idling. The set of bindable engines + * includes the NV2080_ENGINE_TYPE_BSP, NV2080_ENGINE_TYPE_VP and + * NV2080_ENGINE_TYPE_PPP engines. + * + * bindChannelCount + * This parameter specifies the number of channels to bind together. This + * parameter cannot exceed NV2080_CTRL_FIFO_BIND_ENGINES_MAX_CHANNELS. + * + * bindChannels + * The parameter specifies the array of channels to bind together. The first + * bindChannelCount entries are used in the bind channel operation. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_FIFO_BIND_ENGINES_MAX_CHANNELS (16) + +#define NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS { + NvU32 bindChannelCount; + NV2080_CTRL_FIFO_BIND_CHANNEL bindChannels[NV2080_CTRL_FIFO_BIND_ENGINES_MAX_CHANNELS]; +} NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS; + +#define NV2080_CTRL_CMD_FIFO_BIND_ENGINES (0x20801103) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES + * + * This command is used for a client to setup specialized custom operational + * properties that may be specific to an environment, or properties that + * should be set generally but are not for reasons of backward compatibility + * with previous chip generations + * + * flags + * This field specifies the operational properties to be applied + * + * Possible return status values returned are + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES (0x20801104) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS { + NvU32 flags; +} NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS; + +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_FLAGS_ERROR_ON_STUCK_SEMAPHORE 0:0 +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_FLAGS_ERROR_ON_STUCK_SEMAPHORE_FALSE (0x00000000) +#define NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_FLAGS_ERROR_ON_STUCK_SEMAPHORE_TRUE (0x00000001) + +/* + * NV2080_CTRL_CMD_FIFO_GET_PHYSICAL_CHANNEL_COUNT + * + * This command returns the maximum number of physical channels available for + * allocation on the current GPU. This may be less than or equal to the total + * number of channels supported by the current hardware. + * + * physChannelCount + * This output parameter contains the maximum physical channel count. + * + * physChannelCountInUse + * This output parameter contains the number of physical channels in use + * + * Possible return status values returned are + * NV_OK + * + */ +#define NV2080_CTRL_CMD_FIFO_GET_PHYSICAL_CHANNEL_COUNT (0x20801108) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS { + NvU32 physChannelCount; + NvU32 physChannelCountInUse; +} NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS; + +/* + * NV2080_CTRL_FIFO_INFO + * + * This structure represents a single 32bit fifo engine value. Clients + * request a particular FIFO engine value by specifying a unique fifo + * information index. + * + * Legal fifo information index values are: + * NV2080_CTRL_FIFO_INFO_INDEX_INSTANCE_TOTAL + * This index can be used to request the amount of instance space + * in kilobytes reserved by the fifo engine. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS + * This index can be used to query the maximum number of channel groups + * that can be allocated on the GPU. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNELS_PER_GROUP + * This index can be used to query the maximum number of channels that can + * be allocated in a single channel group. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP + * This index can be used to query the maximum number of subcontext that can + * be allocated in a single channel group. + * NV2080_CTRL_FIFO_INFO_INDEX_BAR1_USERD_START_OFFSET + * This index can be used to query the starting offset of the RM + * pre-allocated USERD range in BAR1. This index query is honored only + * on Legacy-vGPU host RM. + * NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE + * This index can be used to query the default timeslice value + * (microseconds) used for a channel or channel group. + * NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE + * This index can be used to query the number of channel groups that are + * already allocated on the GPU. + * NV2080_CTRL_FIFO_INFO_INDEX_IS_PER_RUNLIST_CHANNEL_RAM_SUPPORTED + * This index can be used to check if per runlist channel ram is supported, and + * to query the supported number of channels per runlist. + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS_PER_ENGINE + * This index can be used to get max channel groups supported per engine/runlist. + * NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE_PER_ENGINE + * This index can be used too get channel groups currently in use per engine/runlist. + * + */ +typedef struct NV2080_CTRL_FIFO_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_FIFO_INFO; + +/* valid fifo info index values */ +#define NV2080_CTRL_FIFO_INFO_INDEX_INSTANCE_TOTAL (0x000000000) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS (0x000000001) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNELS_PER_GROUP (0x000000002) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP (0x000000003) +#define NV2080_CTRL_FIFO_INFO_INDEX_BAR1_USERD_START_OFFSET (0x000000004) +#define NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE (0x000000005) +#define NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE (0x000000006) +#define NV2080_CTRL_FIFO_INFO_INDEX_IS_PER_RUNLIST_CHANNEL_RAM_SUPPORTED (0x000000007) +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS_PER_ENGINE (0x000000008) +#define NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE_PER_ENGINE (0x000000009) + + +/* set INDEX_MAX to greatest possible index value */ +#define NV2080_CTRL_FIFO_INFO_INDEX_MAX NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE + +#define NV2080_CTRL_FIFO_GET_INFO_USERD_OFFSET_SHIFT (12) + +/* + * NV2080_CTRL_CMD_FIFO_GET_INFO + * + * This command returns fifo engine information for the associated GPU. + * Requests to retrieve fifo information use an array of one or more + * NV2080_CTRL_FIFO_INFO structures. + * + * fifoInfoTblSize + * This field specifies the number of valid entries in the fifoInfoList + * array. This value cannot exceed NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES. + * fifoInfoTbl + * This parameter contains the client's fifo info table into + * which the fifo info values will be transferred by the RM. + * The fifo info table is an array of NV2080_CTRL_FIFO_INFO structures. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_GET_INFO (0x20801109) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum number of NV2080_CTRL_FIFO_INFO entries per request */ +#define NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES (256) + +#define NV2080_CTRL_FIFO_GET_INFO_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_FIFO_GET_INFO_PARAMS { + NvU32 fifoInfoTblSize; + /* + * C form: + * NV2080_CTRL_FIFO_INFO fifoInfoTbl[NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES]; + */ + NV2080_CTRL_FIFO_INFO fifoInfoTbl[NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES]; + NvU32 engineType; +} NV2080_CTRL_FIFO_GET_INFO_PARAMS; + + + +/* + * NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL + * + * This command removes the specified channel from the associated GPU's runlist + * and then initiates RC recovery. If the channel is active it will first be preempted. + * hChannel + * The handle to the channel to be preempted. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + */ +#define NV2080_CTRL_CMD_FIFO_CHANNEL_PREEMPTIVE_REMOVAL (0x2080110a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS { + NvHandle hChannel; +} NV2080_CTRL_FIFO_CHANNEL_PREEMPTIVE_REMOVAL_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS + * + * This command will disable or enable scheduling of channels described in the + * list provided. Whether or not the channels are also preempted off the GPU + * can be controlled by bOnlyDisableScheduling. By default channels are preempted + * off the GPU. + * + * bDisable + * This value determines whether to disable or + * enable the set of channels. + * numChannels + * The number of channels to be stopped. + * bOnlyDisableScheduling + * When false and bDisable=NV_TRUE,the call will ensure none of the listed + * channels are running in hardware and will not run until a call with + * bDisable=NV_FALSE is made. When true and bDisable=NV_TRUE, the control + * call will ensure that none of the listed channels can be scheduled on the + * GPU until a call with bDisable=NV_FALSE is made, but will not remove any + * of the listed channels from hardware if they are currently running. When + * bDisable=NV_FALSE this field is ignored. + * bRewindGpPut + * If a channel is being disabled and bRewindGpPut=NV_TRUE, the channel's RAMFC + * will be updated so that GP_PUT is reset to the value of GP_GET. + * hClientList + * An array of NvU32 listing the client handles + * hChannelList + * An array of NvU32 listing the channel handles + * to be stopped. + * pRunlistPreemptEvent + * KEVENT handle for Async HW runlist preemption (unused on preMaxwell) + * When NULL, will revert to synchronous preemption with spinloop + * + * Possible status values returned are: + * NV_OK + * NVOS_INVALID_STATE + */ + +#define NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS (0x2080110b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES (64) + +#define NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS { + NvBool bDisable; + NvU32 numChannels; + NvBool bOnlyDisableScheduling; + NvBool bRewindGpPut; + NV_DECLARE_ALIGNED(NvP64 pRunlistPreemptEvent, 8); + // C form: NvHandle hClientList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES] + NvHandle hClientList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES]; + // C form: NvHandle hChannelList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES] + NvHandle hChannelList[NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES]; +} NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS; + +#define NV2080_CTRL_FIFO_DISABLE_CHANNEL_FALSE (0x00000000) +#define NV2080_CTRL_FIFO_DISABLE_CHANNEL_TRUE (0x00000001) +#define NV2080_CTRL_FIFO_ONLY_DISABLE_SCHEDULING_FALSE (0x00000000) +#define NV2080_CTRL_FIFO_ONLY_DISABLE_SCHEDULING_TRUE (0x00000001) + +/* + * NV2080_CTRL_FIFO_MEM_INFO + * + * This structure describes the details of a block of memory. It consists + * of the following fields + * + * aperture + * One of the NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_* values + * base + * Physical base address of the memory + * size + * Size in bytes of the memory +*/ +typedef struct NV2080_CTRL_FIFO_MEM_INFO { + NvU32 aperture; + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_FIFO_MEM_INFO; + +/* + * NV2080_CTRL_FIFO_CHANNEL_MEM_INFO + * + * This structure describes the details of the instance memory, ramfc + * and method buffers a channel. It consists of the following fields + * + * inst + * Structure describing the details of instance memory + * ramfc + * Structure describing the details of ramfc + * methodBuf + * Array of structures describing the details of method buffers + * methodBufCount + * Number of method buffers(one per runqueue) + */ + +// max runqueues +#define NV2080_CTRL_FIFO_GET_CHANNEL_MEM_INFO_MAX_COUNT 0x2 + +typedef struct NV2080_CTRL_FIFO_CHANNEL_MEM_INFO { + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_MEM_INFO inst, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_MEM_INFO ramfc, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_MEM_INFO methodBuf[NV2080_CTRL_FIFO_GET_CHANNEL_MEM_INFO_MAX_COUNT], 8); + NvU32 methodBufCount; +} NV2080_CTRL_FIFO_CHANNEL_MEM_INFO; + +/* + * NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM + * + * This command returns the memory aperture, physical base address and the + * size of each of the instance memory, cache1 and ramfc of a channel. + * + * hChannel + * The handle to the channel for which the memory information is desired. + * chMemInfo + * A NV2080_CTRL_FIFO_CHANNEL_MEM_INFO structure + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL +*/ + +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO (0x2080110c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO chMemInfo, 8); +} NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS; + +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_INVALID 0x00000000 +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_VIDMEM 0x00000001 +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_COH 0x00000002 +#define NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_NCOH 0x00000003 + +/* + * NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION + * + * This command determines the location (vidmem/sysmem) + * and attribute (cached/uncached/write combined) of memory where USERD is located. + * + * aperture + * One of the NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_* values. + * + * attribute + * One of the NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_* values. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_POINTER +*/ + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION (0x2080110d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS { + NvU32 aperture; + NvU32 attribute; +} NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS; + +// support for CPU coherent vidmem (VIDMEM_NVILINK_COH) is not yet available in RM + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_VIDMEM 0x00000000 +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_SYSMEM 0x00000001 + +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_CACHED 0x00000000 +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_UNCACHED 0X00000001 +#define NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_WRITECOMBINED 0X00000002 + + + +/* + * NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE + * + * This command retrieves entries from the SW encoded GPU device info table + * from Host RM. + * + * Parameters: + * + * baseIndex [in] + * The starting index to read from the devinfo table. Must be a multiple of + * MAX_ENTRIES. + * + * entries [out] + * A buffer to store up to MAX_ENTRIES entries of the devinfo table. + * + * numEntries [out] + * Number of populated entries in the provided buffer. + * + * bMore [out] + * A boolean flag indicating whether more valid entries are available to be + * read. A value of NV_TRUE indicates that a further call to this control + * with baseIndex incremented by MAX_ENTRIES will yield further valid data. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_DEVICES 256 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES 32 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES 16 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA 2 +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16 + +/* + * NV2080_CTRL_FIFO_DEVICE_ENTRY + * + * This structure contains the engine, engine name and + * push buffers information of FIFO device entry. It consists of the following fields + * + * engineData + * Type of the engine + * pbdmaIds + * List of pbdma ids associated with engine + * pbdmaFaultIds + * List of pbdma fault ids associated with engine + * numPbdmas + * Number of pbdmas + * engineName + * Name of the engine + */ +typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY { + NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES]; + NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; + NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA]; + NvU32 numPbdmas; + char engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN]; +} NV2080_CTRL_FIFO_DEVICE_ENTRY; + +#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS { + NvU32 baseIndex; + NvU32 numEntries; + NvBool bMore; + // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; + NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES]; +} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT + * + * This command clears the ENGINE or PBDMA FAULTED bit and reschedules the faulted channel + * by ringing channel's doorbell + * + * Parameters: + * + * engineType [in] + * The NV2080_ENGINE_TYPE of the engine to which the faulted + * channel is bound. This may be a logical id for guest RM in + * case of SMC. + * + * vChid [in] + * Virtual channel ID on which the fault occurred + * + * faultType [in] + * Whether fault was triggered by engine (_ENGINE_FAULTED) or PBDMA (_PBDMA_FAULTED) + * The value specified must be one of the NV2080_CTRL_FIFO_CLEAR_FAULTED_BIT_FAULT_TYPE_* values + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT (0x20801113) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_CLEAR_FAULTED_BIT_FAULT_TYPE_ENGINE 0x00000001 +#define NV2080_CTRL_FIFO_CLEAR_FAULTED_BIT_FAULT_TYPE_PBDMA 0x00000002 + +#define NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS { + NvU32 engineType; + NvU32 vChid; + NvU32 faultType; +} NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS; + + + +/* + * NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY + * + * Allows clients to set the global scheduling policy for all runlists + * associated to the given subdevice. + * + * Currently, this is only supported for HW runlists. + * + * Since this is a global setting, only privileged clients will be allowed to + * set it. Regular clients will get NV_ERR_INSUFFICIENT_PERMISSIONS error. + * + * Once a certain scheduling policy is set, that policy cannot be changed to a + * different one unless all clients which set it have either restored the policy + * (using the corresponding restore flag) or died. Clients trying to set a + * policy while a different one is locked by another client will get a + * NV_ERR_INVALID_STATE error. + * + * The same client can set a scheduling policy and later change to another one + * only when no other clients have set the same policy. Such sequence will be + * equivalent to restoring the policy in between. + * + * For instance, the following sequence: + * + * 1. Set policy A + * 2. Set policy B + * + * is equivalent to: + * + * 1. Set policy A + * 2. Restore policy + * 3. Set policy B + * + * Parameters: + * + * flags + * This field specifies the operational properties to be applied: + * + * - NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_FALSE + * Try to set the provided 'schedPolicy' scheduling policy. If the + * operation succeeds, other clients will be prevented from setting a + * different scheduling policy until all clients using it have either + * restored it or died. + * + * - NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_TRUE + * Let the scheduler know the client no longer requires the current + * scheduling policy. This may or may not actually change the + * scheduling policy, depending on how many other clients are also + * using the current policy. + * + * The 'schedPolicy' parameter is ignored when this flag is set. + * + * schedPolicy + * One of: + * + * - NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_DEFAULT + * Set the default scheduling policy and prevent other clients from + * changing it. + * + * - NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED + * This scheduling policy will make channels to be scheduled according + * to their interleave level. See NVA06C_CTRL_CMD_SET_INTERLEAVE_LEVEL + * description for more details. + * - NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED_WDDM + * This scheduling policy will make channels to be scheduled according + * to their interleave level per WDDM policy. + * See NVA06C_CTRL_CMD_SET_INTERLEAVE_LEVEL description for more details. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY (0x20801115) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS_MESSAGE_ID" */ + +/* schedPolicy values */ +#define NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_DEFAULT 0x0 +#define NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED 0x1 +#define NV2080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED_WDDM 0x2 + +/* SET_SCHED_POLICY flags */ +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE 0:0 +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_FALSE (0x00000000) +#define NV2080_CTRL_CMD_FIFO_RUNLIST_SET_SCHED_POLICY_FLAGS_RESTORE_TRUE (0x00000001) + +#define NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS { + NvU32 flags; + NvU32 schedPolicy; +} NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_UPDATE_CHANNEL_INFO + * + * This command updates the channel info params for an existing channel + * + * Can be a deferred Api. The control call can be used for migrating a + * + * channel to a new userd and gpfifo + * + * Parameters: + * [in] hClient - Client handle + * [in] hChannel - Channel handle + * [in] hUserdMemory - UserD handle + * [in] gpFifoEntries - Number of Gpfifo Entries + * [in] gpFifoOffset - Gpfifo Virtual Offset + * [in] userdOffset - UserD offset + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_UPDATE_CHANNEL_INFO (0x20801116) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS { + NvHandle hClient; + NvHandle hChannel; + NvHandle hUserdMemory; + NvU32 gpFifoEntries; + NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8); + NV_DECLARE_ALIGNED(NvU64 userdOffset, 8); +} NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_DISABLE_USERMODE_CHANNELS + * + * This command will disable or enable scheduling of all usermode channels. + * + * bDisable + * This value determines whether to disable or enable the usermode channels. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FIFO_DISABLE_USERMODE_CHANNELS (0x20801117) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS { + NvBool bDisable; +} NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS; + +/* + * NV2080_CTRL_CMD_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB + * + * When a VF subcontext is marked as a zombie, host RM points its PDB to a dummy + * page allocated by guest RM in GPA space. This command provides the parameters + * of the guest RMs memory descriptor to be able to create a corresponding + * memory descriptor on the host RM. Host RM uses this to program the PDB of a + * zombie subcontext. + * + * Parameters: + * Input parameters to describe the memory descriptor + * [in] base + * [in] size + * [in] addressSpace + * [in] cacheAttrib + */ +#define NV2080_CTRL_CMD_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB (0x20801118) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 addressSpace; + NvU32 cacheAttrib; +} NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS; + +/* _ctrl2080fifo_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h new file mode 100644 index 000000000..c3031a29e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fla.h @@ -0,0 +1,213 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fla.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX FLA control commands and parameters */ + +#include "ctrl2080common.h" + +/* + * NV2080_CTRL_CMD_FLA_RANGE + * + * This command is used to initialize/destroy FLA VAS for a GPU.This is intended + * to be used by RM clients that manages the FLA VASpace range. The mode of the + * command is decided based on the parameter passed by the client. + * + * base + * This parameter specifies the base of the FLA VAS that needs to be allocated + * for this GPU + * + * size + * This parameter specifies the size of the FLA VAS that needs to be allocated + * for this GPU + * + * mode + * This parameter specifies the functionality of the command. + * MODE_INITIALIZE + * Setting this mode, will initialize the FLA VASpace for the gpu with + * base and size passed as arguments. FLA VASpace will be owned by RM. + * if the client calls the command more than once before destroying + * the FLA VAS, then this command will verify the range exported before and + * return success if it matches. If FLA is not supported for the platform, + * will return NV_ERR_NOT_SUPPORTED. + * MODE_DESTROY (deprecated) + * This command is NOP. + * MODE_HOST_MANAGED_VAS_INITIALIZE + * This mode will initialize the FLA VASpace for the gpu with hVASpace + * handle in addition to base and size arguments. FLA VASpace will be initiated + * and owned by guest RM. Used only in virtualization platforms by internal clients. + * MODE_HOST_MANAGED_VAS_DESTROY + * This mode will destroy the FLA VAS associated with the device. It will destruct + * only the resources associated with host RM side. Used only in virtualization platforms + * by internal clients. + * + * hVASpace + * This paramete specifies the FLA VAspace that needs to be associated with + * device. This parameter takes effect only for internal client in virtualization + * platforms. For any other platform and external clients, this parameter has no effect. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_IN_USE + * NV_ERR_INVALID_OWNER + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_FLA_RANGE (0x20803501) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_RANGE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_RANGE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_FLA_RANGE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 mode; + NvHandle hVASpace; +} NV2080_CTRL_FLA_RANGE_PARAMS; + +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_NONE 0x00000000 +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_INITIALIZE NVBIT(0) +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_DESTROY NVBIT(1) +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_INITIALIZE NVBIT(2) +#define NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_DESTROY NVBIT(3) + + +/* + * NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK + * + * This command is used to (un)bind FLA Instance Memory Block(IMB) with MMU. + * This control call is created for vGPU platform, when a FLA VAS is created/destroyed + * by Guest RM. Guest RM doesn't have privilege to (un)bind the IMB with MMU, hence + * need to be RPC-ed to Host RM to (un)bind + * The mode of the command is decided based on the actionParam passed by the client. + * + * imbPhysAddr + * This parameter specifies the FLA Instance Memory Block PA to be programmed + * to MMU. IMB address should be 4k aligned. This parameter is needed only + * for ACTION_BIND. + * + * addrSpace + * This parameter specifies the address space of FLA Instance Memory Block. This + * parmater is needed only for ACTION_BIND. + * Available options are: + * NV2080_CTRL_FLA_ADDRSPACE_SYSMEM + * Clients need to use this address space if the IMB is located in sysmem + * NV2080_CTRL_FLA_ADDRSPACE_FBMEM + * Clients need to use this address space if the IMB is located in FB + * + * actionParam + * This parameter specifies the functionality of the command. + * NV2080_CTRL_FLA_ACTION_BIND + * Setting this type, will call busBindFla helper HAL + * NV2080_CTRL_FLA_ACTION_UNBIND + * Setting this type, will call busUnbindFla helper HAL + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +typedef enum NV2080_CTRL_FLA_ADDRSPACE { + NV2080_CTRL_FLA_ADDRSPACE_SYSMEM = 0, + NV2080_CTRL_FLA_ADDRSPACE_FBMEM = 1, +} NV2080_CTRL_FLA_ADDRSPACE; + +typedef enum NV2080_CTRL_FLA_ACTION { + NV2080_CTRL_FLA_ACTION_BIND = 0, + NV2080_CTRL_FLA_ACTION_UNBIND = 1, +} NV2080_CTRL_FLA_ACTION; + +#define NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK (0x20803502) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS { + NV_DECLARE_ALIGNED(NvU64 imbPhysAddr, 8); + NV2080_CTRL_FLA_ADDRSPACE addrSpace; + NV2080_CTRL_FLA_ACTION flaAction; +} NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS; + + +/* + * NV2080_CTRL_CMD_FLA_GET_RANGE + * + * This command is used to query the FLA base and size from plugin to return as static info to Guest RM. + * + * base + * This parameter returns the base address of FLA range registered to the subdevice. + * size + * This parameter returns the size of FLA range registered to the subdevice. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FLA_GET_RANGE (0x20803503) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_GET_RANGE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_GET_RANGE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_FLA_GET_RANGE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_FLA_GET_RANGE_PARAMS; + +/* + * NV2080_CTRL_CMD_FLA_GET_FABRIC_MEM_STATS + * + * This command returns the total size and the free size of the fabric vaspace. + * Note: This returns the information for the FABRIC_VASPACE_A class. + * + * totalSize[OUT] + * - Total fabric vaspace. + * + * freeSize [OUT] + * - Available fabric vaspace. + * + * Possible status values returned are: + * + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FLA_GET_FABRIC_MEM_STATS (0x20803504) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID << 8) | NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 totalSize, 8); + NV_DECLARE_ALIGNED(NvU64 freeSize, 8); +} NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS; + +// _ctrl2080fla_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h new file mode 100644 index 000000000..1961ec35d --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080flcn.h @@ -0,0 +1,550 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080flcn.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + + +// +// XAPICHK/XAPI_TEST breaks on including "nvmisc.h". Workaround: don't include +// the header in that case and just redefine the macros we need. +// +#include "nvmisc.h" +/* + * Obsolete Falcon ID type. Use NV2080_ENGINE_TYPE_ instead + */ +#define FALCON_ID_PMU (NV2080_ENGINE_TYPE_PMU) +#define FALCON_ID_DPU (NV2080_ENGINE_TYPE_DPU) +#define FALCON_ID_SEC2 (NV2080_ENGINE_TYPE_SEC2) +#define FALCON_ID_FBFLCN (NV2080_ENGINE_TYPE_FBFLCN) + +/* + * NV2080_CTRL_CMD_FLCN_GET_DMEM_USAGE + * + * This command returns total heap size and free heap size of a falcon engine + * + * flcnID + * The falcon ID + * + * heapSize + * Total heap size in byte + * + * heapFree + * Total free heap size in byte + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT + */ +#define NV2080_CTRL_CMD_FLCN_GET_DMEM_USAGE (0x20803101) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS { + NvU32 flcnID; + NvU32 heapSize; + NvU32 heapFree; +} NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS; + +/*! + * @defgroup NVOS_INST_EVT Instrumentation event types. + * @{ + */ +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_RECALIBRATE 0x00U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_CTXSW_TICK 0x01U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_CTXSW_YIELD 0x02U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_CTXSW_INT0 0x03U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_CTXSW_BLOCK 0x04U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_UNBLOCK 0x05U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_HANDLER_BEGIN 0x06U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_HANDLER_END 0x07U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_QUEUE_SEND 0x08U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_QUEUE_RECV 0x09U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_RPC_BEGIN 0x0AU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_RPC_END 0x0BU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_SKIPPED 0x0CU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_EXEC_PROFILE_BEGIN 0x0DU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_EXEC_PROFILE_END 0x0EU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_LOAD_PROFILE_BEGIN 0x0FU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_LOAD_PROFILE_END 0x10U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_ODP_CODE_BEGIN 0x11U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_ODP_CODE_END 0x12U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_ODP_DATA_BEGIN 0x13U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_ODP_DATA_END 0x14U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_DMA_PROFILE_BEGIN 0x15U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_DMA_PROFILE_END 0x16U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_DMA_ODP_PROFILE_BEGIN 0x17U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_TASK_DMA_ODP_PROFILE_END 0x18U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_USER_CUSTOM_BEGIN 0x19U +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_USER_CUSTOM_END 0x1AU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_USER_CUSTOM_2_BEGIN 0x1BU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_USER_CUSTOM_2_END 0x1CU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_USER_CUSTOM_3_BEGIN 0x1DU +#define NV2080_CTRL_FLCN_NVOS_INST_EVT_USER_CUSTOM_3_END 0x1EU +/*!@}*/ + +#define NV2080_CTRL_FLCN_NVOS_INST_INVALID_TASK_ID 0xFFU + +/*! + * Interrupts and exceptions both use the same event type. Set the first bit + * for exceptions to differentiate between the two. + */ +#define NV2080_CTRL_FLCN_NVOS_INST_IRQ_ID(_irqId) (_irqId) +#define NV2080_CTRL_FLCN_NVOS_INST_EXCI_ID(_exciId) (_exciId | (1 << 7)) + +/*! + * @defgroup NV_INSTRUMENTATION_EVENT_DATA Instrumentation event struct entry. + * + * This is a binary-packed representation of event type and additional data, + * including timing data and tracking IDs. + * + * @{ + */ + +/*! + * Below DRF needs constants assigned to start and end so it can be represented in FINN properly + * This is because FINN v1 will not have the ability to represent DRF's and bit fields yet + */ +#define NV_INSTRUMENTATION_EVENT_DATA_EVENT_TYPE_DRF_EXTENT (31) +#define NV_INSTRUMENTATION_EVENT_DATA_EVENT_TYPE_DRF_BASE (27) +#define NV_INSTRUMENTATION_EVENT_DATA_EVENT_TYPE \ + (NV_INSTRUMENTATION_EVENT_DATA_EVENT_TYPE_DRF_EXTENT) : \ + (NV_INSTRUMENTATION_EVENT_DATA_EVENT_TYPE_DRF_BASE) + +#define NV_INSTRUMENTATION_EVENT_DATA_TASK_ID 26:19 +#define NV_INSTRUMENTATION_EVENT_DATA_EXTRA 26:19 +#define NV_INSTRUMENTATION_EVENT_DATA_TIME_DELTA 18:0 +#define NV_INSTRUMENTATION_EVENT_DATA_TIME_ABS 26:0 +/*!@}*/ + +/*! + * The maximum number of event types, calculated from the number of bits in the + * event structure. + */ +#define NV2080_CTRL_FLCN_NVOS_INST_NUM_EVT_TYPES (0x20U) /* finn: Evaluated from "(1 << (NV_INSTRUMENTATION_EVENT_DATA_EVENT_TYPE_DRF_EXTENT - NV_INSTRUMENTATION_EVENT_DATA_EVENT_TYPE_DRF_BASE + 1))" */ + +/*! + * The number of bytes required in the event mask to contain all event types. + */ +#define NV2080_CTRL_FLCN_NVOS_INST_MASK_SIZE_BYTES (0x4U) /* finn: Evaluated from "(NV2080_CTRL_FLCN_NVOS_INST_NUM_EVT_TYPES / 8)" */ + +/*! + * Instrumentation event bitfield structure. Exact structure depends on the + * first five bits, which represent event type. + * + * For most event types, the structure is: + * - 5 bits of event type + * - 8 bits of ID + * - 19 bits of delta time (time since last event). If we've missed some + * events, it's the amount of time since the last event that was not + * skipped. If this time would overflow, a recalibration event is inserted + * instead (see below). + * + * The main exception is the recalibration event, which has no ID/delta time + * fields and instead has a 27-bit absolute timestamp. This event is used + * when the gap between two events is greater than the maximum 20-bit integer. + * + * All timestamps are represented in increments of 32ns + * (the finest possible timer resolution). + */ +typedef struct NVOS_INSTRUMENTATION_EVENT { + /*! + * Field containing the event type and data. + * + * Bitmask of @ref NV_INSTRUMENTATION_EVENT_DATA. + */ + NvU32 data; +} NVOS_INSTRUMENTATION_EVENT; +typedef struct NVOS_INSTRUMENTATION_EVENT *PNVOS_INSTRUMENTATION_EVENT; + +/* + * NV2080_CTRL_CMD_FLCN_INSTRUMENTATION_MAP/UNMAP + * + * Params for both RmCtrls are the same (out for _MAP, in for _UNMAP) + */ +#define NV2080_CTRL_CMD_FLCN_INSTRUMENTATION_MAP (0x20803112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | 0x12" */ + +#define NV2080_CTRL_CMD_FLCN_INSTRUMENTATION_UNMAP (0x20803113) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | 0x13" */ + +typedef struct NV2080_CTRL_FLCN_INSTRUMENTATION_MAP_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + //! The beginning of the instrumentation buffer, mapped to user memory. + NV_DECLARE_ALIGNED(NvP64 begin, 8); + + // Priv pointer for memory mapping. + NV_DECLARE_ALIGNED(NvP64 pPriv, 8); + + // The size of the user-mapped instrumentation buffer. + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_FLCN_INSTRUMENTATION_MAP_PARAMS; + +/* + * NV2080_CTRL_CMD_FLCN_INSTRUMENTATION_GET_INFO + * + * Get static information about FLCN instrumentation. + */ +#define NV2080_CTRL_CMD_FLCN_INSTRUMENTATION_GET_INFO (0x20803114) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_INSTRUMENTATION_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_INSTRUMENTATION_GET_INFO_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV2080_CTRL_FLCN_INSTRUMENTATION_GET_INFO_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + //! Whether or not instrumentation is enabled + NvBool bEnabled; + + /*! + * Whether we use PTIMER (resolution 32ns) or the 30us timer tick (NV_TRUE + * is PTIMER). + */ + NvBool bIsTimerPrecise; +} NV2080_CTRL_FLCN_INSTRUMENTATION_GET_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FLCN_INSTRUMENTATION_GET/SET_CONTROL + * + * Get/set the event bitmask. + */ +#define NV2080_CTRL_CMD_FLCN_INSTRUMENTATION_GET_CONTROL (0x20803115) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | 0x15" */ + +#define NV2080_CTRL_CMD_FLCN_INSTRUMENTATION_SET_CONTROL (0x20803116) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | 0x16" */ + +typedef struct NV2080_CTRL_FLCN_INSTRUMENTATION_CONTROL_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + /*! + * The bitmask of which event types to log. An event type corresponding to + * a bit with a zero will be ignored at the log site, which prevents it + * from filling up the resident buffer in the PMU. In general, set this to + * only log the event types you actually want to use. + * Refer to NVOS_BM_* in nvos_utility.h for usage. + */ + NvU8 mask[4]; +} NV2080_CTRL_FLCN_INSTRUMENTATION_CONTROL_PARAMS; + +/* + * NV2080_CTRL_CMD_FLCN_INSTRUMENTATION_RECALIBRATE + * + * Send a recalibrate event to the intstrumentation. + */ +#define NV2080_CTRL_CMD_FLCN_INSTRUMENTATION_RECALIBRATE (0x20803117) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_INSTRUMENTATION_RECALIBRATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_INSTRUMENTATION_RECALIBRATE_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV2080_CTRL_FLCN_INSTRUMENTATION_RECALIBRATE_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; +} NV2080_CTRL_FLCN_INSTRUMENTATION_RECALIBRATE_PARAMS; + + +/* + * NV2080_CTRL_CMD_FLCN_GET_ENGINE_ARCH + * + * Get the egine arch i.e FALCON, RISCV etc given the NV2080_ENGINE_TYPE_*. + * + */ +#define NV2080_CTRL_CMD_FLCN_GET_ENGINE_ARCH (0x20803118) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + //! The engine architecture - FALCON or RISC-V + NvU32 engineArch; +} NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS; + +/*! + * @defgroup Engine Arch types + * @{ + */ +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_DEFAULT 0x0 +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_FALCON 0x1 +#define NV2080_CTRL_FLCN_GET_ENGINE_ARCH_RISCV 0x2 +/*!@}*/ + + +/* ----------------------- uStreamer (INST v2) ------------------------------ */ +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_EVENT uStreamer event fields. + * + * This is a binary-packed representation of uStreamer events. There are + * three main types of entry: Head, Payload, and Tail. COMM here is used + * when a field is shared among multiple event types. + * + * @{ + */ +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_COMM_FLAG 31:31 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_COMM_HEAD 30:30 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_VARIABLE 29:29 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EXTEND 28:28 + +/*! + * Below DRF's need constants assigned to start and end so they can be represented in FINN properly + * This is because FINN v1 will not have the ability to represent DRF's and bit fields yet. + */ +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_EXTENT (27) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_BASE (20) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_EXTENT) : \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_BASE) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_EXTENT (28) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_BASE (24) +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_EXTENT) : \ + (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_BASE) + +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_LENGTH 19:8 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOAD 7:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_PAYLOADCOMPACT 23:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_HEAD_TIME 29:0 +#define NV2080_CTRL_FLCN_USTREAMER_EVENT_DATA_PAYLOAD 30:0 +/*!@}*/ + + +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_FEATURE + * + * This defines all the features currently supported by uStreamer. For a new + * usecase of uStreamer, a feature should be defined here describing the usecase. + * This value should be unique for each queue. + * + * @{ + */ +#define NV2080_CTRL_FLCN_USTREAMER_FEATURE_DEFAULT 0U +#define NV2080_CTRL_FLCN_USTREAMER_FEATURE_PMUMON 1U +#define NV2080_CTRL_FLCN_USTREAMER_FEATURE__COUNT 2U +/*!@}*/ + +/*! + * @defgroup NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY + * + * This defines the DRF used for ustreamer queue policy + * + * @{ + */ + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_FLUSH 0:0 +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_FLUSH_DISABLED 0U +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_FLUSH_ENABLED 1U + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_FULL_FLUSH 1:1 +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_FULL_FLUSH_DISABLED 0U +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_FULL_FLUSH_ENABLED 1U + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IMMEDIATE_FLUSH 2:2 +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IMMEDIATE_FLUSH_DISABLED 0U +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IMMEDIATE_FLUSH_ENABLED 1U + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_POLICY_IDLE_THRESHOLD 31:8 + +/*!@}*/ + +/*! + * The maximum number of compact event types, calculated from the number of bits + * in the event structure. + */ +#define NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES_COMPACT (0x20U) /* finn: Evaluated from "(1 << (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_EXTENT - NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTIDCOMPACT_DRF_BASE + 1))" */ + +/*! + * The maximum number of event types, calculated from the number of bits in the + * event structure. + */ +#define NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES (0x120U) /* finn: Evaluated from "((1 << (NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_EXTENT - NV2080_CTRL_FLCN_USTREAMER_EVENT_TAIL_EVENTID_DRF_BASE + 1)) + NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES_COMPACT)" */ + +/*! + * The number of bytes required in the event mask to contain all event types. + */ +#define NV2080_CTRL_FLCN_USTREAMER_MASK_SIZE_BYTES (0x24U) /* finn: Evaluated from "((NV2080_CTRL_FLCN_USTREAMER_NUM_EVT_TYPES + 7) / 8)" */ + +/*! + * uStreamer Event Filter type, stored as a bitmask. + */ +typedef struct NV2080_CTRL_FLCN_USTREAMER_EVENT_FILTER { + NvU8 mask[NV2080_CTRL_FLCN_USTREAMER_MASK_SIZE_BYTES]; +} NV2080_CTRL_FLCN_USTREAMER_EVENT_FILTER; + +/*! + * NV2080_CTRL_CMD_FLCN_USTREAMER_QUEUE_INFO + * Get queue info for mapping / unmapping + */ +#define NV2080_CTRL_CMD_FLCN_USTREAMER_QUEUE_INFO (0x20803120) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + //! + // The page size of the requested queue in bytes. + // + NvU32 pageSize; + + //! Offset of the queue buffer in FB. + NV_DECLARE_ALIGNED(NvUPtr offset, 8); + + //! + // The size of the user-mapped instrumentation buffer. Measured in bytes. + // + NvU32 size; + + //! + // The feature ID of the queue. + // + NvU8 queueFeatureId; +} NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_FLCN_USTREAMER_CONTROL_GET/SET + * + * Get/set the event bitmask for the default queue. + */ +#define NV2080_CTRL_CMD_FLCN_USTREAMER_CONTROL_GET (0x20803122) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | 0x22" */ + +#define NV2080_CTRL_CMD_FLCN_USTREAMER_CONTROL_SET (0x20803123) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | 0x23" */ + +typedef struct NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS { + //! The engine type, from NV2080_ENGINE_TYPE_* + NvU32 engine; + + /*! + * The bitmask of which event types to log. An event type corresponding to + * a bit with a zero will be ignored at the log site, which prevents it + * from filling up the resident buffer in the PMU. In general, set this to + * only log the event types you actually want to use. + * Refer to NVOS_BM_* in nvos_utility.h for usage. + */ + NV2080_CTRL_FLCN_USTREAMER_EVENT_FILTER eventFilter; + + //! The queueId of the queue whose eventFilter we want to interact with + NvU8 queueId; +} NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS; + +/* + * NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_INFO + * + * This command provides the attributes of the falcon engine context buffer + * + * hUserClient [IN] + * This parameter specifies the client handle that owns this channel. + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * alignment + * Specifies the alignment requirement for each context buffer + * size + * Aligned size of context buffer + * bufferHandle + * Opaque pointer to memdesc. Used by kernel clients for tracking purpose only. + * pageCount + * allocation size in the form of pageCount + * physAddr + * Physical address of the buffer first page + * aperture + * allocation aperture. Could be SYSMEM, VIDMEM, UNKNOWN + * kind + * PTE kind of this allocation. + * pageSize + * Page size of the buffer. + * bIsContigous + * States if physical allocation for this buffer is contiguous. PageSize will + * have no meaning if this flag is set. + * bDeviceDescendant + * TRUE if the allocation is a constructed under a Device or Subdevice. + * uuid + * SHA1 UUID of the Device or Subdevice. Valid when bDeviceDescendant is TRUE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_INFO (0x20803124) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS { + NvHandle hUserClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvP64 bufferHandle, 8); + NV_DECLARE_ALIGNED(NvU64 pageCount, 8); + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); + NvU32 aperture; + NvU32 kind; + NvU32 pageSize; + NvBool bIsContigous; + NvBool bDeviceDescendant; + NvU8 uuid[16]; +} NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS; + +// Aperture flags +#define NV2080_CTRL_FLCN_CTX_BUFFER_INFO_APERTURE_UNKNWON ADDR_UNKNOWN +#define NV2080_CTRL_FLCN_CTX_BUFFER_INFO_APERTURE_SYSMEM ADDR_SYSMEM +#define NV2080_CTRL_FLCN_CTX_BUFFER_INFO_APERTURE_FBMEM ADDR_FBMEM + +/* + * NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_SIZE + * + * This command provides the size of the falcon engine context buffer + * + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * totalBufferSize [OUT] + * This parameter returns the total context buffers size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_SIZE (0x20803125) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID << 8) | NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 totalBufferSize, 8); +} NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS; + + + +/* _ctrl2080flcn_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h new file mode 100644 index 000000000..a208afb28 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fuse.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080fuse.finn +// + + + +/* _ctrl2080fuse_h_ */ + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h new file mode 100644 index 000000000..774fc40a9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpio.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gpio.finn +// + + + +/* _ctrl2080gpio_h_ */ + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h new file mode 100644 index 000000000..aa53cfbd9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h @@ -0,0 +1,3782 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gpu.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080gr.h" +#include "ctrl/ctrl0000/ctrl0000system.h" + + + +/* NV20_SUBDEVICE_XX gpu control commands and parameters */ + +/* Valid feature values */ +#define NV2080_CTRL_GPU_GET_FEATURES_CLK_ARCH_DOMAINS 0:0 +#define NV2080_CTRL_GPU_GET_FEATURES_CLK_ARCH_DOMAINS_FALSE (0x00000000) +#define NV2080_CTRL_GPU_GET_FEATURES_CLK_ARCH_DOMAINS_TRUE (0x00000001) + + + +typedef struct NV2080_CTRL_GPU_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_GPU_INFO; + +/* valid gpu info index values */ + + + +#define NV2080_CTRL_GPU_INFO_INDEX_MINOR_REVISION_EXT (0x00000004) + + +#define NV2080_CTRL_GPU_INFO_INDEX_NETLIST_REV0 (0x00000012) +#define NV2080_CTRL_GPU_INFO_INDEX_NETLIST_REV1 (0x00000013) + + +#define NV2080_CTRL_GPU_INFO_INDEX_SYSMEM_ACCESS (0x0000001f) + + +#define NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD (0x00000022) + + +#define NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE (0x00000025) +#define NV2080_CTRL_GPU_INFO_INDEX_IBMNPU_RELAXED_ORDERING (0x00000026) +#define NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED (0x00000027) +#define NV2080_CTRL_GPU_INFO_INDEX_NVSWITCH_PROXY_DETECTED (0x00000028) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SR_SUPPORT (0x00000029) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SMC_MODE (0x0000002a) +#define NV2080_CTRL_GPU_INFO_INDEX_SPLIT_VAS_MGMT_SERVER_CLIENT_RM (0x0000002b) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SM_VERSION (0x0000002c) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY (0x0000002d) + + +#define NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM (0x0000002f) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY (0x00000030) +#define NV2080_CTRL_GPU_INFO_INDEX_NVENC_STATS_REPORTING_STATE (0x00000031) + + +#define NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED (0x00000033) +#define NV2080_CTRL_GPU_INFO_INDEX_DISPLAY_ENABLED (0x00000034) +#define NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED (0x00000035) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY (0x00000036) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY (0x00000037) + + +#define NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU (0x0000003c) +#define NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY (0x0000003d) +#define NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE (0x0000003e) + +/* valid minor revision extended values */ +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_NONE (0x00000000) +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_P (0x00000001) +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_V (0x00000002) +#define NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_PV (0x00000003) + + + +/* valid system memory access capability values */ +#define NV2080_CTRL_GPU_INFO_SYSMEM_ACCESS_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_SYSMEM_ACCESS_YES (0x00000001) + + + +/* valid gemini board values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD_YES (0x00000001) + +/* valid surprise removal values */ +#define NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE_YES (0x00000001) + +/* valid relaxed ordering values */ +#define NV2080_CTRL_GPU_INFO_IBMNPU_RELAXED_ORDERING_DISABLED (0x00000000) +#define NV2080_CTRL_GPU_INFO_IBMNPU_RELAXED_ORDERING_ENABLED (0x00000001) +#define NV2080_CTRL_GPU_INFO_IBMNPU_RELAXED_ORDERING_UNSUPPORTED (0xFFFFFFFF) + +/* valid poison fuse capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED_YES (0x00000001) + +/* valid nvswitch proxy detected values */ +#define NV2080_CTRL_GPU_INFO_NVSWITCH_PROXY_DETECTED_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_NVSWITCH_PROXY_DETECTED_YES (0x00000001) + +/* valid NVSR GPU support info values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SR_SUPPORT_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_SR_SUPPORT_YES (0x00000001) + +/* valid SMC mode values */ +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_UNSUPPORTED (0x00000000) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_ENABLED (0x00000001) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_DISABLED (0x00000002) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_ENABLE_PENDING (0x00000003) +#define NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_DISABLE_PENDING (0x00000004) + +/* valid split VAS mode values */ +#define NV2080_CTRL_GPU_INFO_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_YES (0x00000001) + +/* valid grid capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY_YES (0x00000001) + +/* valid per runlist channel ram capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM_DISABLED (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM_ENABLED (0x00000001) + +/* valid ATS capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY_YES (0x00000001) + +/* valid Nvenc Session Stats reporting state values */ +#define NV2080_CTRL_GPU_INFO_NVENC_STATS_REPORTING_STATE_DISABLED (0x00000000) +#define NV2080_CTRL_GPU_INFO_NVENC_STATS_REPORTING_STATE_ENABLED (0x00000001) +#define NV2080_CTRL_GPU_INFO_NVENC_STATS_REPORTING_STATE_NOT_SUPPORTED (0x00000002) + +/* valid 4K PAGE isolation requirement values */ +#define NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_YES (0x00000001) + +/* valid display enabled values */ +#define NV2080_CTRL_GPU_INFO_DISPLAY_ENABLED_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_DISPLAY_ENABLED_YES (0x00000001) + +/* valid mobile config enabled values */ +#define NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_YES (0x00000001) + + +/* valid profiling capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY_DISABLED (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY_ENABLED (0x00000001) + +/* valid debugging capability values */ +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY_DISABLED (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY_ENABLED (0x00000001) + + + +/* valid CMP (Crypto Mining Processor) SKU values */ +#define NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU_YES (0x00000001) + + +/* valid dma-buf suport values */ +#define NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_NO (0x00000000) +#define NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_YES (0x00000001) + +/* + * NV2080_CTRL_CMD_GPU_GET_INFO + * + * This command returns gpu information for the associated GPU. Requests + * to retrieve gpu information use a list of one or more NV2080_CTRL_GPU_INFO + * structures. + * + * gpuInfoListSize + * This field specifies the number of entries on the caller's + * gpuInfoList. + * gpuInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the gpu information is to be returned. + * This buffer must be at least as big as gpuInfoListSize multiplied + * by the size of the NV2080_CTRL_GPU_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_INFO (0x20800101) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GPU_GET_INFO_PARAMS { + NvU32 gpuInfoListSize; + NV_DECLARE_ALIGNED(NvP64 gpuInfoList, 8); +} NV2080_CTRL_GPU_GET_INFO_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_INFO_V2 (0x20800102) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x2" */ + +typedef struct NV2080_CTRL_GPU_GET_INFO_V2_PARAMS { + NvU32 gpuInfoListSize; + NV2080_CTRL_GPU_INFO gpuInfoList[NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_INFO_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_NAME_STRING + * + * This command returns the name of the GPU in string form in either ASCII + * or UNICODE format. + * + * gpuNameStringFlags + * This field specifies flags to use while creating the GPU name string. + * Valid flags values: + * NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII + * The returned name string should be in standard ASCII format. + * NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_UNICODE + * The returned name string should be in unicode format. + * gpuNameString + * This field contains the buffer into which the name string should be + * returned. The length of the returned string will be no more than + * NV2080_CTRL_GPU_MAX_NAME_STRING_LENGTH bytes in size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_NAME_STRING (0x20800110) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS_MESSAGE_ID" */ + +#define NV2080_GPU_MAX_NAME_STRING_LENGTH (0x0000040) + +// This field is deprecated - 'gpuNameStringFlags' is now a simple scalar. +// Field maintained (and extended from 0:0) for compile-time compatibility. +#define NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE 31:0 + +/* valid gpu name string flags */ +#define NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII (0x00000000) +#define NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_UNICODE (0x00000001) + +#define NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS { + NvU32 gpuNameStringFlags; + union { + NvU8 ascii[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU16 unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + } gpuNameString; +} NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_SHORT_NAME_STRING + * + * This command returns the short name of the GPU in ASCII string form. + * + * gpuShortNameString + * This field contains the buffer into which the short name string should + * be returned. The length of the returned string will be no more than + * NV2080_MAX_NAME_STRING_LENGTH bytes in size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_SHORT_NAME_STRING (0x20800111) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS { + NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; +} NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_POWER + * + * This command sets the power state for the GPU as a whole, various engines, + * or clocks. + * + * target + * One of NV2080_CTRL_GPU_SET_POWER_TARGET_* + * + * newLevel + * One of NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_* + * NV2080_CTRL_GPU_SET_POWER_STATE_ENGINE_LEVEL_* + * NV2080_CTRL_GPU_SET_POWER_STATE_CLOCK_LEVEL_* + * depending on the target above. + * + * oldLevel + * Previous level as appropriate. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_SET_POWER (0x20800112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_POWER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_POWER_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_GPU_SET_POWER_PARAMS { + NvU32 target; + NvU32 newLevel; + NvU32 oldLevel; +} NV2080_CTRL_GPU_SET_POWER_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_SDM + * + * This command returns the subdevice mask value for the associated subdevice. + * The subdevice mask value can be used with the SET_SUBDEVICE_MASK instruction + * provided by the NV36_CHANNEL_DMA and newer channel dma classes. + * + * subdeviceMask [out] + * This field return the subdevice mask value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_SDM (0x20800118) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_SDM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_SDM_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_GPU_GET_SDM_PARAMS { + NvU32 subdeviceMask; +} NV2080_CTRL_GPU_GET_SDM_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_SDM + * + * This command sets the subdevice instance and mask value for the associated subdevice. + * The subdevice mask value can be used with the SET_SUBDEVICE_MASK instruction + * provided by the NV36_CHANNEL_DMA and newer channel dma classes. + * It must be called before the GPU HW is initialized otherwise + * NV_ERR_INVALID_STATE is being returned. + * + * subdeviceMask [in] + * This field configures the subdevice mask value for the GPU/Subdevice + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_DATA + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_SET_SDM (0x20800120) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_SDM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_SDM_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_GPU_SET_SDM_PARAMS { + NvU32 subdeviceMask; +} NV2080_CTRL_GPU_SET_SDM_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO + * + * This command returns the associated subdevices' simulation information. + * + * type + * This field returns the simulation type. + * One of NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_* + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO (0x20800119) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS { + NvU32 type; +} NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS; + +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE (0x00000000) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_MODS_AMODEL (0x00000001) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_LIVE_AMODEL (0x00000002) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_FMODEL (0x00000003) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_RTL (0x00000004) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_EMU (0x00000005) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_EMU_LOW_POWER (0x00000006) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_DFPGA (0x00000007) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_DFPGA_RTL (0x00000008) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_DFPGA_FMODEL (0x00000009) +#define NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_UNKNOWN (0xFFFFFFFF) + +/* + * NV2080_CTRL_GPU_REG_OP + * + * This structure describes register operation information for use with + * the NV2080_CTRL_CMD_GPU_EXEC_REG_OPS command. The structure describes + * a single register operation. The operation can be a read or write and + * can involve either 32bits or 64bits of data. + * + * For 32bit read operations, the operation takes the following form: + * + * regValueLo = read(bar0 + regOffset) + * regValueHi = 0 + * + * For 64bit read operations, the operation takes the following form: + * + * regValueLo = read(bar0 + regOffset) + * regValueHi = read(bar0 + regOffset + 4) + * + * For 32bit write operations, the operation takes the following form: + * + * new = ((read(bar0 + regOffset) & ~regAndNMaskLo) | regValueLo) + * write(bar0 + regOffset, new) + * + * For 64bit write operations, the operation takes the following form: + * + * new_lo = ((read(bar0 + regOffset) & ~regAndNMaskLo) | regValueLo) + * new_hi = ((read(bar0 + regOffset + 4) & ~regAndNMaskHi) | regValueHi) + * write(bar0 + regOffset, new_lo) + * write(bar0 + regOffset + 4, new_hi) + * + * Details on the parameters follow: + * + * regOp + * This field specifies the operation to be applied to the register + * specified by the regOffset parameter. Valid values for this + * parameter are: + * NV2080_CTRL_GPU_REG_OP_READ_08 + * The register operation should be a 8bit global privileged register read. + * NV2080_CTRL_GPU_REG_OP_WRITE_08 + * The register operation should be a 8bit global privileged register write. + * NV2080_CTRL_GPU_REG_OP_READ_32 + * The register operation should be a 32bit register read. + * NV2080_CTRL_GPU_REG_OP_WRITE_32 + * The register operation should be a 32bit register write. + * NV2080_CTRL_GPU_REG_OP_READ_64 + * The register operation should be a 64bit register read. + * NV2080_CTRL_GPU_REG_OP_WRITE_64 + * The register operation should be a 64bit register write. + * regType + * This field specifies the type of the register specified by the + * regOffset parameter. Valid values for this parameter are: + * NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL + * The register is a global privileged register. Read operations + * return the current value from the associated global register. + * Write operations for registers of this type take effect immediately. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX + * The register is a graphics context register. Read operations + * return the current value from the associated global register. + * Write operations are applied to all existing graphics engine + * contexts. Any newly created graphics engine contexts will also + * be modified. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC + * This is a graphics context TPC register group. Write operations are + * applied to TPC group(s) specified by regGroupMask. + * This field is ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_SM + * This is a graphics context SM register group that is inside TPC + * group. Write operations are applied to SM group(s) specified by + * regGroupMask (TPC) and regSubGroupMask (SM). This field is ignored + * for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_CROP + * This is a graphics context CROP register group. Write operations + * are applied to registers specified by regGroupMask. This field is + * ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_ZROP + * This is a graphics context ZROP register group. Write operations + * are applied to registers specified by regGroupMask. This field is + * ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_FB + * This is a fb register group. Write operations are applied to + * registers specified by regGroupMask. This field is + * ignored for read operations. + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_QUAD + * This is a graphics context QUAD register group. Operations + * are applied to registers specified by regQuad value. + * regQuad + * This field specifies the quad to be accessed for register regOffsetwhen + * the regType specified is NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_QUAD. + * regGroupMask + * This field specifies which registers inside an array should be updated. + * This field is used when regType is one of below: + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_SM + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_CROP + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_ZROP + * NV2080_CTRL_GPU_REG_OP_TYPE_FB + * When regGroupMask is used, the regOffset MUST be the first register in + * an array. + * regSubGroupMask + * This field specifies which registers inside a group should be updated. + * This field is used for updating SM registers when regType is: + * NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC + * When regSubGroupMask is used, regOffset MUST be the first register in an + * array AND also the first one in sub array. regGroupMask specifies + * TPC(X) and regSubGroupMask specifies SM_CTX_N(Y) + * regStatus + * This field returns the completion status for the associated register + * operation in the form of a bitmask. Possible status values for this + * field are: + * NV2080_CTRL_GPU_REG_OP_STATUS_SUCCESS + * This value indicates the operation completed successfully. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OP + * This bit value indicates that the regOp value is not valid. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_TYPE + * This bit value indicates that the regType value is not valid. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OFFSET + * This bit value indicates that the regOffset value is invalid. + * The regOffset value must be within the legal BAR0 range for the + * associated GPU and must target a supported register with a + * supported operation. + * NV2080_CTRL_GPU_REG_OP_STATUS_UNSUPPORTED_OFFSET + * This bit value indicates that the operation to the register + * specified by the regOffset value is not supported for the + * associated GPU. + * NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_MASK + * This bit value indicates that the regTpcMask value is invalid. + * The regTpcMask must be a subset of TPCs that are enabled on the + * associated GPU. + * NV2080_CTRL_GPU_REG_OP_STATUS_NOACCESS + * The caller does not have access to the register at given offset + * regOffset + * This field specifies the register offset to access. The specified + * offset must be a valid BAR0 offset for the associated GPU. + * regValueLo + * This field contains the low 32bits of the register value. + * For read operations, this value returns the current value of the + * register specified by regOffset. For write operations, this field + * specifies the logical OR value applied to the current value + * contained in the register specified by regOffset. + * regValueHi + * This field contains the high 32bits of the register value. + * For read operations, this value returns the current value of the + * register specified by regOffset + 4. For write operations, this field + * specifies the logical OR value applied to the current value + * contained in the register specified by regOffset + 4. + * regAndNMaskLo + * This field contains the mask used to clear a desired field from + * the current value contained in the register specified by regOffsetLo. + * This field is negated and ANDed to this current register value. + * This field is only used for write operations. This field is ignored + * for read operations. + * regAndNMaskHi + * This field contains the mask used to clear a desired field from + * the current value contained in the register specified by regOffsetHi. + * This field is negated and ANDed to this current register value. + * This field is only used for write operations. This field is ignored + * for read operations. + */ +typedef struct NV2080_CTRL_GPU_REG_OP { + NvU8 regOp; + NvU8 regType; + NvU8 regStatus; + NvU8 regQuad; + NvU32 regGroupMask; + NvU32 regSubGroupMask; + NvU32 regOffset; + NvU32 regValueHi; + NvU32 regValueLo; + NvU32 regAndNMaskHi; + NvU32 regAndNMaskLo; +} NV2080_CTRL_GPU_REG_OP; + +/* valid regOp values */ +#define NV2080_CTRL_GPU_REG_OP_READ_32 (0x00000000) +#define NV2080_CTRL_GPU_REG_OP_WRITE_32 (0x00000001) +#define NV2080_CTRL_GPU_REG_OP_READ_64 (0x00000002) +#define NV2080_CTRL_GPU_REG_OP_WRITE_64 (0x00000003) +#define NV2080_CTRL_GPU_REG_OP_READ_08 (0x00000004) +#define NV2080_CTRL_GPU_REG_OP_WRITE_08 (0x00000005) + +/* valid regType values */ +#define NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL (0x00000000) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX (0x00000001) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_TPC (0x00000002) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_SM (0x00000004) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_CROP (0x00000008) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_ZROP (0x00000010) +#define NV2080_CTRL_GPU_REG_OP_TYPE_FB (0x00000020) +#define NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX_QUAD (0x00000040) +#define NV2080_CTRL_GPU_REG_OP_TYPE_DEVICE (0x00000080) + +/* valid regStatus values (note: NvU8 ie, 1 byte) */ +#define NV2080_CTRL_GPU_REG_OP_STATUS_SUCCESS (0x00) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OP (0x01) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_TYPE (0x02) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OFFSET (0x04) +#define NV2080_CTRL_GPU_REG_OP_STATUS_UNSUPPORTED_OP (0x08) +#define NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_MASK (0x10) +#define NV2080_CTRL_GPU_REG_OP_STATUS_NOACCESS (0x20) + +/* + * NV2080_CTRL_CMD_GPU_EXEC_REG_OPS + * + * This command is used to submit a buffer containing one or more + * NV2080_CTRL_GPU_REG_OP structures for processing. Each entry in the + * buffer specifies a single read or write operation. Each entry is checked + * for validity in an initial pass over the buffer with the results for + * each operation stored in the corresponding regStatus field. Unless + * bNonTransactional flag is set to true, if any invalid entries are found + * during this initial pass then none of the operations are executed. Entries + * are processed in order within each regType with NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL + * entries processed first followed by NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX entries. + * + * hClientTarget + * This parameter specifies the handle of the client that owns the channel + * specified by hChannelTarget. If this parameter is set to 0 then the set + * of channel-specific register operations are applied to all current and + * future channels. + * hChannelTarget + * This parameter specifies the handle of the target channel (or channel + * group) object instance to which channel-specific register operations are + * to be directed. If hClientTarget is set to 0 then this parameter must + * also be set to 0. + * bNonTransactional + * This field specifies if command is non-transactional i.e. if set to + * true, all the valid operations will be executed. + * reserved00 + * This parameter is reserved for future use. It should be initialized to + * zero for correct operation. + * regOpCount + * This field specifies the number of entries on the caller's regOps + * list. + * regOps + * This field specifies a pointer in the caller's address space + * to the buffer from which the desired register information is to be + * retrieved. This buffer must be at least as big as regInfoCount + * multiplied by the size of the NV2080_CTRL_GPU_REG_OP structure. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. When SMC is enabled, this + * is a mandatory parameter. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_EXEC_REG_OPS (0x20800122) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x22" */ + +typedef struct NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS { + NvHandle hClientTarget; + NvHandle hChannelTarget; + NvU32 bNonTransactional; + NvU32 reserved00[2]; + NvU32 regOpCount; + NV_DECLARE_ALIGNED(NvP64 regOps, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINES + * + * Returns a list of supported engine types along with the number of instances + * of each type. Querying with engineList NULL returns engineCount. + * + * engineCount + * This field specifies the number of entries on the caller's engineList + * field. + * engineList + * This field is a pointer to a buffer of NvU32 values representing the + * set of engines supported by the associated subdevice. Refer to cl2080.h + * for the complete set of supported engine types. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINES (0x20800123) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINES_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINES_PARAMS { + NvU32 engineCount; + NV_DECLARE_ALIGNED(NvP64 engineList, 8); +} NV2080_CTRL_GPU_GET_ENGINES_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_ENGINES_V2 (0x20800170) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS_MESSAGE_ID" */ + +/* Must match NV2080_ENGINE_TYPE_LAST from cl2080.h */ +#define NV2080_GPU_MAX_ENGINES_LIST_SIZE 0x34 + +#define NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS_MESSAGE_ID (0x70U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS { + NvU32 engineCount; + NvU32 engineList[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST + * + * Returns a list of classes supported by a given engine type. + * + * engineType + * This field specifies the engine type being queried. + * NV2080_CTRL_ENGINE_TYPE_ALLENGINES will return classes + * supported by all engines. + * + * numClasses + * This field specifies the number of classes supported by + * engineType. + * + * classList + * This field is an array containing the list of supported + * classes. Is of type (NvU32*) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST (0x20800124) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS { + NvU32 engineType; + NvU32 numClasses; + NV_DECLARE_ALIGNED(NvP64 classList, 8); +} NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS; + + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_FAULT_INFO + * + * This command returns the fault properties of the specified engine type. + * + * engineType + * Input parameter. + * This field specifies the engine type being queried. + * Engine type is specified using the NV2080_ENGINE_TYPE_* defines in cl2080.h. + * The list of engines supported by a chip can be got using the + * NV2080_CTRL_CMD_GPU_GET_ENGINES ctrl call. + * + * mmuFaultId + * Output parameter. + * This field returns the MMU fault ID for the specified engine. + * If the engine supports subcontext, this field provides the base fault id. + * + * bSubcontextSupported + * Output parameter. + * Returns TRUE if subcontext faulting is supported by the engine. + * Engine that support subcontext use fault IDs in the range [mmuFaultId, mmuFaultId + maxSubCtx). + * "maxSubctx" can be found using the NV2080_CTRL_FIFO_INFO ctrl call with + * NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP as the index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_FAULT_INFO (0x20800125) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS { + NvU32 engineType; + NvU32 mmuFaultId; + NvBool bSubcontextSupported; +} NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_QUERY_MODE + * + * This command is used to detect the mode of the GPU associated with the + * subdevice. + * + * mode + * This parameter returns the current mode of GPU. Legal values for + * this parameter include: + * NV2080_CTRL_GPU_QUERY_MODE_GRAPHICS_MODE + * The GPU is currently operating in graphics mode. + * NV2080_CTRL_GPU_QUERY_MODE_COMPUTE_MODE + * The GPU is currently operating in compute mode. + * NV2080_CTRL_GPU_QUERY_MODE_UNKNOWN_MODE + * The current mode of the GPU could not be determined. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_QUERY_MODE (0x20800128) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_MODE_PARAMS_MESSAGE_ID" */ + +/* valid mode parameter values */ +#define NV2080_CTRL_GPU_QUERY_MODE_UNKNOWN_MODE (0x00000000) +#define NV2080_CTRL_GPU_QUERY_MODE_GRAPHICS_MODE (0x00000001) +#define NV2080_CTRL_GPU_QUERY_MODE_COMPUTE_MODE (0x00000002) + +#define NV2080_CTRL_GPU_QUERY_MODE_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_GPU_QUERY_MODE_PARAMS { + NvU32 mode; +} NV2080_CTRL_GPU_QUERY_MODE_PARAMS; + + + +/*! + * NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY + * Data block describing a virtual context buffer to be promoted + * + * gpuPhysAddr [IN] + * GPU Physical Address for the buffer + * gpuVirtAddr [IN] + * GPU Virtual Address for the buffer + * size[IN] + * Size of this virtual context buffer + * physAttr [IN] + * Physical memory attributes (aperture, cacheable) + * bufferId [IN] + * Virtual context buffer type, data type NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_* + * bInitialize [IN] + * Flag indicating that this virtual context buffer should be initialized prior to promotion. + * The client must clear (memset) the buffer to 0x0 prior to initialization. + * Following buffers need initialization: + * 1. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN + * 2. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH + * 3. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP + * 4. NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP + * bNonmapped [IN] + * Flag indicating that the virtual address is not to be promoted with this + * call. It is illegal to set this flag and not set bInitialize. + */ +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY { + NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 physAttr; + NvU16 bufferId; + NvU8 bInitialize; + NvU8 bNonmapped; +} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY; + +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN 0 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM 1 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH 2 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB 3 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL 4 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB 5 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL 6 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL 7 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK 8 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT 9 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP 10 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11 +#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP 12 + +#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES 16 + +/* + * NV2080_CTRL_CMD_GPU_PROMOTE_CTX + * + * This command is used to promote a Virtual Context + * + * engineType + * Engine Virtual Context is for + * hClient + * Client Handle for hVirtMemory + * ChID + * Hw Channel -- Actually hw index for channel (deprecated) + * hChanClient + * The client handle for hObject + * hObject + * Passed in object handle for either a single channel or a channel group + * hVirtMemory + * Virtual Address handle to map Virtual Context to + * virtAddress + * Virtual Address to map Virtual Context to + * size + * size of the Virtual Context + * entryCount + * Number of valid entries in the promotion entry list + * promoteEntry + * List of context buffer entries to issue promotions for. + * + * When not using promoteEntry, only hVirtMemory or (virtAddress, size) should be + * specified, the code cases based on hVirtMemory(NULL vs non-NULL) so + * if both are specified, hVirtMemory has precedence. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - The Class does not support version info retrieval + * NV_ERR_INVALID_DEVICE - The Class/Device is not yet ready to provide this info. + * NV_ERR_INVALID_ARGUMENT - Bad/Unknown Class ID specified. + */ +#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX (0x2080012b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; + NvHandle hVirtMemory; + NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NvU32 entryCount; + // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8); +} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS; +typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *PNV2080_CTRL_GPU_PROMOTE_CTX_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_EVICT_CTX + * + * This command is used to evict a Virtual Context + * + * engineType + * Engine Virtual Context is for + * hClient + * Client Handle + * ChID + * Hw Channel -- Actually hw index for channel (deprecated) + * hChanClient + * Client handle for hObject + * hObject + * Passed in object handle for either a single channel or a channel group + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - The Class does not support version info retrieval + * NV_ERR_INVALID_DEVICE - The Class/Device is not yet ready to provide this info. + * NV_ERR_INVALID_ARGUMENT - Bad/Unknown Class ID specified. + */ +#define NV2080_CTRL_CMD_GPU_EVICT_CTX (0x2080012c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_EVICT_CTX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_EVICT_CTX_PARAMS_MESSAGE_ID (0x2CU) + +typedef struct NV2080_CTRL_GPU_EVICT_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; +} NV2080_CTRL_GPU_EVICT_CTX_PARAMS; +typedef struct NV2080_CTRL_GPU_EVICT_CTX_PARAMS *PNV2080_CTRL_GPU_EVICT_CTX_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_INITIALIZE_CTX + * + * This command is used to initialize a Virtual Context. The ctx buffer must be + * cleared (zerod) by the caller prior to invoking this method. + * + * engineType + * Engine Virtual Context is for + * hClient + * Client Handle for the hVirtMemory + * ChID + * Hw channel -- Actually channel index (deprecated) + * hChanClient + * The client handle for hObject + * hObject + * Passed in object handle for either a single channel or a channel group + * hVirtMemory + * Virtual Address where to map Virtual Context to + * physAddress + * Physical offset in FB to use as Virtual Context + * physAttr + * Physical memory attributes + * hDmaHandle + * Dma Handle when using discontiguous context buffers + * index + * Start offset in Virtual DMA Context + * size + * Size of the Virtual Context + * + * Only hVirtMemory or size should be specified, the code cases based on hVirtMemory + * (NULL vs non-NULL) so if both are specified, hVirtMemory has precedence. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - The Class does not support version info retrieval + * NV_ERR_INVALID_DEVICE - The Class/Device is not yet ready to provide this info. + * NV_ERR_INVALID_ARGUMENT - Bad/Unknown Class ID specified. + */ +#define NV2080_CTRL_CMD_GPU_INITIALIZE_CTX (0x2080012d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS { + NvU32 engineType; + NvHandle hClient; + NvU32 ChID; + NvHandle hChanClient; + NvHandle hObject; + NvHandle hVirtMemory; + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + NvU32 physAttr; + NvHandle hDmaHandle; + NvU32 index; + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS; +typedef struct NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *PNV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS; + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE 1:0 +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE_VIDMEM (0x00000000) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE_COH_SYS (0x00000001) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_APERTURE_NCOH_SYS (0x00000002) + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_GPU_CACHEABLE 2:2 +#define NV2080_CTRL_GPU_INITIALIZE_CTX_GPU_CACHEABLE_YES (0x00000000) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_GPU_CACHEABLE_NO (0x00000001) + +/* + * NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX - Tells RM Whether this Ctx buffer needs to + * do a full initialization (Load the golden image). When a context is promoted on a different + * channel than it was originally inited, the client can use this flag to tell RM + * that this is an already inited Context. In such cases RM will update the internal state + * to update the context address and state variables. + */ + +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX 3:3 +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX_NO (0x00000000) +#define NV2080_CTRL_GPU_INITIALIZE_CTX_PRESERVE_CTX_YES (0x00000001) + +/* + * NV2080_CTRL_CMD_CPU_QUERY_ECC_INTR + * Queries the top level ECC PMC PRI register + * TODO remove these parameters, tracked in bug #1975721 + */ +#define NV2080_CTRL_CMD_GPU_QUERY_ECC_INTR (0x2080012e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x2E" */ + +typedef struct NV2080_CTRL_GPU_QUERY_ECC_INTR_PARAMS { + NvU32 eccIntrStatus; +} NV2080_CTRL_GPU_QUERY_ECC_INTR_PARAMS; + +/** + * NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS + * + * This command is used to query the ECC status of a GPU by a subdevice + * handle. Please see the NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS + * data structure description below for details on the data reported + * per hardware unit. + * + * units + * Array of structures used to describe per-unit state + * + * flags + * See interface flag definitions below. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS (0x2080012f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS_MESSAGE_ID" */ + + +#define NV2080_CTRL_GPU_ECC_UNIT_COUNT (0x00000016) + + + +// Deprecated do not use +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE 0:0 +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE_FILTERED (0x00000000) +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE_RAW (0x00000001) + +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_UNC_ERR_FALSE 0 +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_UNC_ERR_TRUE 1 +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_UNC_ERR_INDETERMINATE 2 + +/* + * NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS + * + * This structure represents the exception status of a class of per-unit + * exceptions + * + * count + * number of exceptions that have occurred since boot + */ +typedef struct NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS { + NV_DECLARE_ALIGNED(NvU64 count, 8); +} NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS; + +/* + * NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS + * + * This structure represents the per-unit ECC exception status + * + * enabled + * ECC enabled yes/no for this unit + * scrubComplete + * Scrub has completed yes/no. A scrub is performed for some units to ensure + * the checkbits are consistent with the protected data. + * supported + * Whether HW supports ECC in this unit for this GPU + * dbe + * Double bit error (DBE) status. The value returned reflects a counter + * that is monotonic, but can be reset by clients. + * dbeNonResettable (deprecated do not use) + * Double bit error (DBE) status, not client resettable. + * sbe + * Single bit error (SBE) status. The value returned reflects a counter + * that is monotonic, but can be reset by clients. + * sbeNonResettable (deprecated do not use) + * Single bit error (SBE) status, not client resettable. + * + */ +typedef struct NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS { + NvBool enabled; + NvBool scrubComplete; + NvBool supported; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS dbe, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS dbeNonResettable, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS sbe, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_EXCEPTION_STATUS sbeNonResettable, 8); +} NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS; + +/* + * NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS + * + * This structure returns ECC exception status and GPU Fatal Poison for all units + * + * units + * This structure represents ECC exception status for all Units. + * bFatalPoisonError + * Whether GPU Fatal poison error occurred in this GPU. This will be set for Ampere_and_later + * uncorrectableError + * Indicates whether any uncorrectable GR ECC errors have occurred. When + * SMC is enabled, uncorrectableError is only valid when the client is + * subscribed to a partition. Check QUERY_ECC_STATUS_UNC_ERR_* + * flags + * Flags passed by caller. Refer NV2080_CTRL_GPU_QUERY_ECC_STATUS_FLAGS_TYPE_* for details. + * grRouteInfo + * SMC partition information. This input is only valid when SMC is + * enabled on Ampere_and_later. + * + */ +#define NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS_MESSAGE_ID (0x2FU) + +typedef struct NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_QUERY_ECC_UNIT_STATUS units[NV2080_CTRL_GPU_ECC_UNIT_COUNT], 8); + NvBool bFatalPoisonError; + NvU8 uncorrectableError; + NvU32 flags; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_COMPUTE_MODE_RULES + * + * This command sets the compute mode rules for the associated subdevice. The + * default mode is equivalent to NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE. This + * command is available to clients with administrator privileges only. An + * attempt to use this command by a client without administrator privileged + * results in the return of an NV_ERR_INSUFFICIENT_PERMISSIONS status. + * + * rules + * This parameter is used to specify the rules that govern the GPU with + * respect to NV50_COMPUTE objects. Legal values for this parameter include: + * + * NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE + * This mode indicate that no special restrictions apply to the + * allocation of NV50_COMPUTE objects. + * + * NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE + * This mode means that only one instance of NV50_COMPUTE will be + * allowed at a time. This restriction is enforced at each subsequent + * NV50_COMPUTE allocation attempt. Setting this mode will not affect + * any existing compute programs that may be running. For example, + * if this mode is set while three compute programs are running, then + * all of those programs will be allowed to continue running. However, + * until they all finish running, no new NV50_COMPUTE objects may be + * allocated. User-mode clients should treat this as restricting access + * to a NV50_COMPUTE object to a single thread within a process. + * + * NV2080_CTRL_GPU_COMPUTE_MODE_RULES_COMPUTE_PROHIBITED + * This mode means that that GPU is not ever allowed to instantiate an + * NV50_COMPUTE object, and thus cannot run any new compute programs. + * This restriction is enforced at each subsequent NV50_COMPUTE object + * allocation attempt. Setting this mode will not affect any existing + * compute programs that may be running. For example, if this mode is + * set while three compute programs are running, then all of those + * programs will be allowed to continue running. However, no new + * NV50_COMPUTE objects may be allocated. + * + * + * NV2080_CTRL_GPU_COMPUTE_MODE_EXCLUSIVE_COMPUTE_PROCESS + * This mode is identical to EXCLUSIVE_COMPUTE, where only one instance + * of NV50_COMPUTE will be allowed at a time. It is separate from + * EXCLUSIVE_COMPUTE to allow user-mode clients to differentiate + * exclusive access to a compute object from a single thread of a + * process from exclusive access to a compute object from all threads + * of a process. User-mode clients should not limit access to a + * NV50_COMPUTE object to a single thread when the GPU is set to + * EXCLUSIVE_COMPUTE_PROCESS. + * + * An invalid rules parameter value results in the return of an + * NV_ERR_INVALID_ARGUMENT status. + * + * flags + * Reserved. Caller should set this field to zero. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT (if an invalid rule number is provided) + * NV_ERR_INSUFFICIENT_PERMISSIONS (if the user is not the Administrator or superuser) + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_COMPUTE_MODE_RULES (0x20800130) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID" */ + +/* valid rules parameter values */ +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE (0x00000000) +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE (0x00000001) +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_COMPUTE_PROHIBITED (0x00000002) +#define NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE_PROCESS (0x00000003) + +#define NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS { + NvU32 rules; + NvU32 flags; +} NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_QUERY_COMPUTE_MODE_RULES + * + * This command queries the compute mode rules for the associated subdevice. + * Please see the NV2080_CTRL_CMD_GPU_SET_COMPUTE_MODE_RULES command, above, for + * details as to what the rules mean. + * + * rules + * Specifies the rules that govern the GPU, with respect to NV50_COMPUTE + * objects. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_QUERY_COMPUTE_MODE_RULES (0x20800131) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS { + NvU32 rules; +} NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_QUERY_ECC_CONFIGURATION + * + * This command returns the current ECC configuration setting for + * a GPU given its subdevice handle. The value returned is + * the current ECC setting for the GPU stored in non-volatile + * memory on the board. + * + * currentConfiguration + * The current ECC configuration setting. + * + * defaultConfiguration + * The factory default ECC configuration setting. + * + * Please see the NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS command if + * you wish to determine if ECC is currently enabled. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_GPU_QUERY_ECC_CONFIGURATION (0x20800133) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_DISABLED (0x00000000) +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_ENABLED (0x00000001) + +#define NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS { + NvU32 currentConfiguration; + NvU32 defaultConfiguration; +} NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_ECC_CONFIGURATION + * + * This command changes the ECC configuration setting for a GPU + * given its subdevice handle. The value specified is + * stored in non-volatile memory on the board and will take + * effect with the next GPU reset + * + * newConfiguration + * The new configuration setting to take effect with + * the next GPU reset. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_ECC_CONFIGURATION (0x20800134) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_DISABLE (0x00000000) +#define NV2080_CTRL_GPU_ECC_CONFIGURATION_ENABLE (0x00000001) + +#define NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS { + NvU32 newConfiguration; +} NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_RESET_ECC_ERROR_STATUS + * + * This command resets volatile and/or persistent ECC error + * status information for a GPU given its subdevice + * handle. + * + * statuses + * The ECC error statuses (the current, volatile + * and/or the persistent error counter(s)) to + * be reset by the command. + * flags + * FORCE_PURGE + * Forcibly clean all the ECC InfoROM state if this flag is set + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_RESET_ECC_ERROR_STATUS (0x20800136) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_ECC_ERROR_STATUS_NONE (0x00000000) +#define NV2080_CTRL_GPU_ECC_ERROR_STATUS_VOLATILE (0x00000001) +#define NV2080_CTRL_GPU_ECC_ERROR_STATUS_AGGREGATE (0x00000002) + +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_FLAGS_FORCE_PURGE 0:0 +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_FLAGS_FORCE_PURGE_FALSE 0 +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_FLAGS_FORCE_PURGE_TRUE 1 + +#define NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS { + NvU32 statuses; + NvU8 flags; +} NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO + * + * This command returns a mask of enabled GPCs for the associated GPU. + * + * gpcMask + * This parameter returns a mask of enabled GPCs. Each GPC has an ID + * that's equivalent to the corresponding bit position in the mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO (0x20800137) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS { + NvU32 gpcMask; +} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO + * + * This command returns a mask of enabled TPCs for a specified GPC. + * + * gpcId + * This parameter specifies the GPC for which TPC information is + * to be retrieved. If the GPC with this ID is not enabled this command + * will return an tpcMask value of zero. + * + * tpcMask + * This parameter returns a mask of enabled TPCs for the specified GPC. + * Each TPC has an ID that's equivalent to the corresponding bit + * position in the mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO (0x20800138) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS { + NvU32 gpcId; + NvU32 tpcMask; +} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_FERMI_ZCULL_INFO + * + * This command returns a mask of enabled ZCULLs for a specified GPC. + * + * gpcId + * This parameter specifies the GPC for which ZCULL information is to be + * retrieved. If the GPC with this ID is not enabled this command will + * return an zcullMask value of zero. + * + * zcullMask + * This parameter returns a mask of enabled ZCULLs for the specified GPC. + * Each ZCULL has an ID that's equivalent to the corresponding bit + * position in the mask. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_PARAM_STRUCT + * + * Deprecated: Please use GR based control call + * NV2080_CTRL_CMD_GR_GET_ZCULL_MASK + * + */ +#define NV2080_CTRL_CMD_GPU_GET_FERMI_ZCULL_INFO (0x20800139) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS { + NvU32 gpcId; + NvU32 zcullMask; +} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO + * + * If an InfoROM with a valid OEM Board Object is present, this + * command returns relevant information from the object to the + * caller. + * + * The following data are currently reported: + * + * buildDate + * The board's build date (8 digit BCD in format YYYYMMDD). + * + * marketingName + * The board's marketing name (24 ASCII letters e.g. "Quadro FX5800"). + * + * boardSerialNumber + * The board's serial number. + * + * memoryManufacturer + * The board's memory manufacturer ('S'amsung/'H'ynix/'I'nfineon). + * + * memoryDateCode + * The board's memory datecode (LSB justified ASCII field with 0x00 + * denoting empty space). + * + * productPartNumber + * The board's 900 product part number (LSB justified ASCII field with 0x00 + * denoting empty space e.g. "900-21228-0208-200"). + * + * boardRevision + * The board's revision (for e.g. A02, B01) + * + * boardType + * The board's type ('E'ngineering/'P'roduction) + * + * board699PartNumber + * The board's 699 product part number (LSB justified ASCII field with 0x00 + * denoting empty space e.g. "699-21228-0208-200"). + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO (0x2080013f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_GPU_MAX_MARKETING_NAME_LENGTH (0x00000018) +#define NV2080_GPU_MAX_SERIAL_NUMBER_LENGTH (0x00000010) +#define NV2080_GPU_MAX_MEMORY_PART_ID_LENGTH (0x00000014) +#define NV2080_GPU_MAX_MEMORY_DATE_CODE_LENGTH (0x00000006) +#define NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH (0x00000014) + +#define NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS_MESSAGE_ID (0x3FU) + +typedef struct NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS { + NvU32 buildDate; + NvU8 marketingName[NV2080_GPU_MAX_MARKETING_NAME_LENGTH]; + NvU8 serialNumber[NV2080_GPU_MAX_SERIAL_NUMBER_LENGTH]; + NvU8 memoryManufacturer; + NvU8 memoryPartID[NV2080_GPU_MAX_MEMORY_PART_ID_LENGTH]; + NvU8 memoryDateCode[NV2080_GPU_MAX_MEMORY_DATE_CODE_LENGTH]; + NvU8 productPartNumber[NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH]; + NvU8 boardRevision[3]; + NvU8 boardType; + NvU8 board699PartNumber[NV2080_GPU_MAX_PRODUCT_PART_NUMBER_LENGTH]; +} NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_ID + * + * This command returns the gpuId of the associated object. + * + * gpuId + * This field return the gpuId. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ID (0x20800142) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ID_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV2080_CTRL_GPU_GET_ID_PARAMS { + NvU32 gpuId; +} NV2080_CTRL_GPU_GET_ID_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_GPU_DEBUG_MODE + * + * This command is used to enable or disable GPU debug mode. While this mode + * is enabled, some client RM calls that can potentially timeout return + * NV_ERR_BUSY_RETRY, signalling the client to try again once GPU + * debug mode is disabled. + * + * mode + * This parameter specifies whether GPU debug mode is to be enabled or + * disabled. Possible values are: + * + * NV2080_CTRL_GPU_DEBUG_MODE_ENABLED + * NV2080_CTRL_GPU_DEBUG_MODE_DISABLED + * + * Possible return status values are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_GPU_SET_GPU_DEBUG_MODE (0x20800143) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID (0x43U) + +typedef struct NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS { + NvU32 mode; +} NV2080_CTRL_GPU_SET_GPU_DEBUG_MODE_PARAMS; + +#define NV2080_CTRL_GPU_DEBUG_MODE_ENABLED (0x00000001) +#define NV2080_CTRL_GPU_DEBUG_MODE_DISABLED (0x00000002) + +/* + * NV2080_CTRL_CMD_GPU_GET_GPU_DEBUG_MODE + * + * This command is used to query whether debug mode is enabled on the current + * GPU. Please see the description of NV2080_CTRL_CMD_GPU_SET_GPU_DEBUG_MODE + * for more details on GPU debug mode. + * + * currentMode + * This parameter returns the state of GPU debug mode for the current GPU. + * Possible values are: + * + * NV2080_CTRL_GPU_DEBUG_MODE_ENABLED + * NV2080_CTRL_GPU_DEBUG_MODE_DISABLED + * + * Possible return status values are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV2080_CTRL_CMD_GPU_GET_GPU_DEBUG_MODE (0x20800144) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS_MESSAGE_ID (0x44U) + +typedef struct NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS { + NvU32 currentMode; +} NV2080_CTRL_GPU_GET_GPU_DEBUG_MODE_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_PARTNERLIST + * + * Returns a list of engines that can partner or coexist + * when using the target channel or partnership class. + * This list may include all engines (pre-Kepler), or as few + * as 1 engine (Kepler and beyond). + * + * engineType + * This field specifies the target engine type. + * See cl2080.h for a list of valid engines. + * + * partnershipClassId + * This field specifies the target channel + * or partnership class ID. + * An example of such a class is GF100_CHANNEL_GPFIFO. + * + * runqueue + * This field is an index which indicates the runqueue to + * return the list of supported engines for. This is the + * same field as what NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE + * specifies. This is only valid for TSG. + * + * numPartners; + * This field returns the number of + * valid entries in the partnersList array + * + * partnerList + * This field is an array containing the list of supported + * partner engines types, in no particular order, and + * may even be empty (numPartners = 0). + * See cl2080.h for a list of possible engines. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_PARTNERLIST (0x20800147) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS_MESSAGE_ID" */ + +/* this macro specifies the maximum number of partner entries */ +#define NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS (0x00000020) + +#define NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS_MESSAGE_ID (0x47U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS { + NvU32 engineType; + NvU32 partnershipClassId; + NvU32 runqueue; + NvU32 numPartners; + // C form: NvU32 partnerList[NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS]; + NvU32 partnerList[NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS]; +} NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_GID_INFO + * + * This command returns the GPU ID (GID) string for the associated + * GPU. This value can be useful for GPU identification and security + * system validation. + * + * The GPU ID is a SHA-1 based 16 byte ID, formatted as a 32 character + * hexadecimal string as "GPU-%08x-%04x-%04x-%04x-%012x" (the + * canonical format of a UUID) + * + * The GPU IDs are generated using the ECID, PMC_BOOT_0, and + * PMC_BOOT_42 of the GPU as the hash message. + * + * index + * (Input) "Select which GID set to get." Or so the original documentation + * said. In reality, there is only one GID per GPU, and the implementation + * completely ignores this parameter. You can too. + * + * flags (Input) The _FORMAT* flags designate ascii or binary format. Binary + * format returns the raw bytes of either the 16-byte SHA-1 ID or the + * 32-byte SHA-256 ID. + * + * The _TYPE* flags needs to specify the _SHA1 type. + * + * length + * (Output) Actual GID length, in bytes. + * + * data[NV2080_BUS_MAX_GID_LENGTH] + * (Output) Result buffer: the GID itself, in a format that is determined by + * the "flags" field (described above). + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_GPU_GET_GID_INFO (0x2080014a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_GID_INFO_PARAMS_MESSAGE_ID" */ + +/* maximum possible number of bytes of GID information returned */ +#define NV2080_GPU_MAX_GID_LENGTH (0x000000100) + +#define NV2080_CTRL_GPU_GET_GID_INFO_PARAMS_MESSAGE_ID (0x4AU) + +typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS { + NvU32 index; + NvU32 flags; + NvU32 length; + NvU8 data[NV2080_GPU_MAX_GID_LENGTH]; +} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS; + +/* valid flags values */ +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT 1:0 +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT_ASCII (0x00000000) +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT_BINARY (0x00000002) + +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_TYPE 2:2 +#define NV2080_GPU_CMD_GPU_GET_GID_FLAGS_TYPE_SHA1 (0x00000000) + +/* + * NV2080_CTRL_CMD_GPU_GET_INFOROM_OBJECT_VERSION + * + * This command can be used by clients to retrieve the version of an + * InfoROM object. + * + * objectType + * This parameter specifies the name of the InfoROM object whose version + * should be queried. + * + * version + * This parameter returns the version of the InfoROM object specified by + * the objectType parameter. + * + * subversion + * This parameter returns the subversion of the InfoROM object specified + * by the objectType parameter. + * + * Possible return status values: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_CMD_GPU_GET_INFOROM_OBJECT_VERSION (0x2080014b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_INFOROM_OBJ_TYPE_LEN 3 + +#define NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS_MESSAGE_ID (0x4BU) + +typedef struct NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS { + char objectType[NV2080_CTRL_GPU_INFOROM_OBJ_TYPE_LEN]; + NvU8 version; + NvU8 subversion; +} NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS; + + +/* + * NV2080_CTRL_CMD_SET_GPU_OPTIMUS_INFO + * + * This command will specify that system is Optimus enabled. + * + * isOptimusEnabled + * Set NV_TRUE if system is Optimus enabled. + * + * Possible status return values are: + * NV_OK + */ +#define NV2080_CTRL_CMD_SET_GPU_OPTIMUS_INFO (0x2080014c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS_MESSAGE_ID (0x4CU) + +typedef struct NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS { + NvBool isOptimusEnabled; +} NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_IP_VERSION + * + * Will return the IP VERSION on the given engine for engines that support + * this capability. + * + * targetEngine + * This parameter specifies the target engine type to query for IP_VERSION. + * + * ipVersion + * This parameter returns the IP VERSION read from the unit's IP_VER + * register. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_GET_IP_VERSION (0x2080014d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS_MESSAGE_ID (0x4DU) + +typedef struct NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS { + NvU32 targetEngine; + NvU32 ipVersion; +} NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS; + +#define NV2080_CTRL_GPU_GET_IP_VERSION_DISPLAY (0x00000001) +#define NV2080_CTRL_GPU_GET_IP_VERSION_HDACODEC (0x00000002) +#define NV2080_CTRL_GPU_GET_IP_VERSION_PMGR (0x00000003) +#define NV2080_CTRL_GPU_GET_IP_VERSION_PPWR_PMU (0x00000004) +#define NV2080_CTRL_GPU_GET_IP_VERSION_DISP_FALCON (0x00000005) + + + +/* + * NV2080_CTRL_CMD_GPU_ID_ILLUM_SUPPORT + * + * This command returns an indicator which reports if the specified Illumination control + * attribute is supported + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_GPU_ILLUM_ATTRIB_LOGO_BRIGHTNESS 0 +#define NV2080_CTRL_GPU_ILLUM_ATTRIB_SLI_BRIGHTNESS 1 +#define NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT (0x20800153) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS { + NvU32 attribute; + NvBool bSupported; +} NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ID_ILLUM + * + * This command returns the current value of the specified Illumination control attribute. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ILLUM (0x20800154) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x54" */ + +typedef struct NV2080_CTRL_CMD_GPU_ILLUM_PARAMS { + NvU32 attribute; + NvU32 value; +} NV2080_CTRL_CMD_GPU_ILLUM_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_SET_ID_ILLUM + * + * This command sets a new valuefor the specified Illumination control attribute. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_ILLUM (0x20800155) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x55" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_INFOROM_IMAGE_VERSION + * + * This command can be used by clients to retrieve the version of the entire + * InfoROM image. + * + * version + * This parameter returns the version of the InfoROM image as a NULL- + * terminated character string of the form "XXXX.XXXX.XX.XX" where each + * 'X' is an integer character. + * + * Possible status return values are: + * NVOS_STATUS_SUCCES + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_DATA + */ +#define NV2080_CTRL_CMD_GPU_GET_INFOROM_IMAGE_VERSION (0x20800156) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_INFOROM_IMAGE_VERSION_LEN 16 + +#define NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS { + NvU8 version[NV2080_CTRL_GPU_INFOROM_IMAGE_VERSION_LEN]; +} NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_QUERY_INFOROM_ECC_SUPPORT + * + * This command returns whether or not ECC is supported via the InfoROM. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_QUERY_INFOROM_ECC_SUPPORT (0x20800157) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x57" */ + +/* + * NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION + * + * This structure contains information about a single physical bridge. + * + * fwVersion + * This field specifies Firmware Version of the bridge stored in + * bridge EEPROM. + * oemVersion + * This field specifies Oem Version of the firmware stored in + * bridge EEPROM. + * siliconRevision + * This field contains the silicon revision of the bridge hardware. + * It is set by the chip manufacturer. + * hwbcResourceType + * This field specifies the hardware broadcast resource type. + * Value denotes the kind of bridge - PLX or BR04 + * + */ + +typedef struct NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION_PARAMS { + NvU32 fwVersion; + NvU8 oemVersion; + NvU8 siliconRevision; + NvU8 hwbcResourceType; +} NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO + * + * This command returns physical bridge information in the system. + * Information consists of bridgeCount and a list of bridgeId's. + * The bridge Id's are used by NV2080_CTRL_CMD_GPU_GET_PHYSICAL_BRIDGE_VERSION + * to get firmware version, oem version and silicon revision info. + * + * bridgeCount + * This field specifies the number of physical brides present + * in the system. + * hPhysicalBridges + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge Id's are stored. + * bridgeList + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge version details are stored. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO (0x2080015a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MAX_PHYSICAL_BRIDGE (100) +#define NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS_MESSAGE_ID (0x5AU) + +typedef struct NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS { + NvU8 bridgeCount; + NvHandle hPhysicalBridges[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; + NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION_PARAMS bridgeList[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; +} NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS; + +/* + * NV2080_CTRL_GPU_BRIDGE_VERSION + * + * This structure contains information about a single physical bridge. + * + * bus + * This field specifies the bus id of the bridge. + * device + * This field specifies the device id of the bridge. + * func + * This field specifies the function id of the bridge. + * oemVersion + * This field specifies Oem Version of the firmware stored in + * bridge EEPROM. + * siliconRevision + * This field contains the silicon revision of the bridge hardware. + * It is set by the chip manufacturer. + * hwbcResourceType + * This field specifies the hardware broadcast resource type. + * Value denotes the kind of bridge - PLX or BR04 + * domain + * This field specifies the respective domain of the PCI device. + * fwVersion + * This field specifies Firmware Version of the bridge stored in + * bridge EEPROM. + * + * If (fwVersion, oemVersion, siliconRevision) == 0, it would mean that RM + * was unable to fetch the value from the bridge device. + * + */ + +typedef struct NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS { + NvU8 bus; + NvU8 device; + NvU8 func; + NvU8 oemVersion; + NvU8 siliconRevision; + NvU8 hwbcResourceType; + NvU32 domain; + NvU32 fwVersion; +} NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU + * + * This command returns information about all the upstream bridges of the GPU. + * Information consists of bridge firmware version and its bus topology. + * + * bridgeCount + * This field specifies the number of physical brides present + * in the system. + * physicalBridgeIds + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge Ids are stored. + * bridgeList + * This field specifies an array of size NV2080_CTRL_MAX_PHYSICAL_BRIDGE. + * In this array, the bridge version details are stored. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU (0x2080015b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS { + NvU8 bridgeCount; + NvU32 physicalBridgeIds[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; + NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS bridgeList[NV2080_CTRL_MAX_PHYSICAL_BRIDGE]; +} NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_QUERY_SCRUBBER_STATUS + * + * This command is used to query the status of the HW scrubber. If a scrub is + * in progress then the range which is being scrubbed is also reported back. + * + * scrubberStatus + * Reports the status of the scrubber unit - running/idle. + * + * remainingtimeMs + * If scrubbing is going on, reports the remaining time in milliseconds + * required to finish the scrub. + * + * scrubStartAddr + * This parameter reports the start address of the ongoing scrub if scrub + * is going on, otherwise reports the start addr of the last finished scrub + * + * scrubEndAddr + * This parameter reports the end address of the ongoing scrub if scrub + * is going on, otherwise reports the end addr of the last finished scrub. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_QUERY_SCRUBBER_STATUS (0x2080015f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS_MESSAGE_ID (0x5FU) + +typedef struct NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS { + NvU32 scrubberStatus; + NvU32 remainingTimeMs; + NV_DECLARE_ALIGNED(NvU64 scrubStartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 scrubEndAddr, 8); +} NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS; + +/* valid values for scrubber status */ +#define NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_SCRUBBER_RUNNING (0x00000000) +#define NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_SCRUBBER_IDLE (0x00000001) + +/* + * NV2080_CTRL_CMD_GPU_GET_VPR_CAPS + * + * This command is used to query the VPR capability information for a + * GPU. If VPR is supported, the parameters are filled accordingly. + * The addresses returned are all physical addresses. + * + * minStartAddr + * Returns the minimum start address that can be possible for VPR. + * + * maxEndAddr + * Returns the maximum end address that can be possible for VPR. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_GET_VPR_CAPS (0x20800160) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 minStartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 maxEndAddr, 8); +} NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_HANDLE_GPU_SR + * + * Communicates to RM to handle GPU Surprise Removal + * Called from client when it receives SR IRP from OS + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + */ +#define NV2080_CTRL_CMD_GPU_HANDLE_GPU_SR (0x20800167) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x67" */ + + +/* + * NV2080_CTRL_CMD_GPU_GET_PES_INFO + * + * This command provides the PES count and mask of enabled PES for a + * specified GPC. It also returns the TPC to PES mapping information + * for a given GPU. + * + * gpcId[IN] + * This parameter specifies the GPC for which PES information is to be + * retrieved. If the GPC with this ID is not enabled this command will + * return an activePesMask of zero + * + * numPesInGpc[OUT] + * This parameter returns the number of PES in this GPC. + * + * activePesMask[OUT] + * This parameter returns a mask of enabled PESs for the specified GPC. + * Each PES has an ID that is equivalent to the corresponding bit position + * in the mask. + * + * maxTpcPerGpcCount[OUT] + * This parameter returns the max number of TPCs in a GPC. + * + * tpcToPesMap[OUT] + * This array stores the TPC to PES mappings. The value at tpcToPesMap[tpcIndex] + * is the index of the PES it belongs to. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_PES_INFO (0x20800168) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PES_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GPU_GET_PES_INFO_MAX_TPC_PER_GPC_COUNT 10 + +#define NV2080_CTRL_GPU_GET_PES_INFO_PARAMS_MESSAGE_ID (0x68U) + +typedef struct NV2080_CTRL_GPU_GET_PES_INFO_PARAMS { + NvU32 gpcId; + NvU32 numPesInGpc; + NvU32 activePesMask; + NvU32 maxTpcPerGpcCount; + NvU32 tpcToPesMap[NV2080_CTRL_CMD_GPU_GET_PES_INFO_MAX_TPC_PER_GPC_COUNT]; +} NV2080_CTRL_GPU_GET_PES_INFO_PARAMS; + +/* NV2080_CTRL_CMD_GPU_GET_OEM_INFO + * + * If an InfoROM with a valid OEM Object is present, this + * command returns relevant information from the object to the + * caller. + * + * oemInfo + * This array stores information specifically for OEM use + * (e.g. "their own serial number", "lot codes", etc) + * "The byte definition is up to the OEM" + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_GPU_GET_OEM_INFO (0x20800169) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_GPU_MAX_OEM_INFO_LENGTH (0x000001F8) + +#define NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS { + NvU8 oemInfo[NV2080_GPU_MAX_OEM_INFO_LENGTH]; +} NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS; + +/* NV2080_CTRL_CMD_GPU_PROCESS_POST_GC6_EXIT_TASKS + * + * Complete any pending tasks the need to be run after GC6 exit is complete at OS/KMD level + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_READY + */ +#define NV2080_CTRL_CMD_GPU_PROCESS_POST_GC6_EXIT_TASKS (0x2080016a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x6A" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_VPR_INFO + * + * This command is used to query the VPR information for a GPU. + * The following VPR related information can be queried by selecting the queryType: + * 1. The current VPR range. + * 2. The max VPR range ever possible on this GPU. + * + * queryType [in] + * This input parameter is used to select the type of information to query. + * Possible values for this parameter are: + * 1. NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_VPR_CAPS: Use this to query the + * max VPR range ever possible on this GPU. + * 2. NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_CUR_VPR_RANGE: Use this to query + * the current VPR range on this GPU. + * + * bVprEnabled [out] + * For query type "NV2080_CTRL_GPU_GET_VPR_INFO_CUR_RANGE", this parameter + * returns if VPR is currently enabled or not. + * + * vprStartAddress [out] + * For NV2080_CTRL_GPU_GET_VPR_INFO_CAPS, it returns minimum allowed VPR start address. + * For NV2080_CTRL_GPU_GET_VPR_INFO_RANGE, it returns current VPR start address. + * + * vprEndAddress [out] + * For NV2080_CTRL_GPU_GET_VPR_INFO_CAPS, it returns maximum allowed VPR end address. + * For NV2080_CTRL_GPU_GET_VPR_INFO_RANGE, it returns current VPR end address. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_VPR_INFO (0x2080016b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS_MESSAGE_ID" */ + + +typedef enum NV2080_CTRL_VPR_INFO_QUERY_TYPE { + NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_VPR_CAPS = 0, + NV2080_CTRL_GPU_GET_VPR_INFO_QUERY_CUR_VPR_RANGE = 1, +} NV2080_CTRL_VPR_INFO_QUERY_TYPE; + +#define NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS_MESSAGE_ID (0x6BU) + +typedef struct NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS { + NV2080_CTRL_VPR_INFO_QUERY_TYPE queryType; + NvBool bIsVprEnabled; + NV_DECLARE_ALIGNED(NvU64 vprStartAddressInBytes, 8); + NV_DECLARE_ALIGNED(NvU64 vprEndAddressInBytes, 8); +} NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ENCODER_CAPACITY + * + * This command is used to query the encoder capacity of the GPU. + * + * queryType [in] + * This input parameter is used to select the type of information to query. + * Possible values for this parameter are: + * 1. NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_H264: Use this to query the + * H.264 encoding capacity on this GPU. + * 2. NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_HEVC: Use this to query the + * H.265/HEVC encoding capacity on this GPU. + * + * encoderCapacity [out] + * Encoder capacity value from 0 to 100. Value of 0x00 indicates encoder performance + * may be minimal for this GPU and software should fall back to CPU-based encode. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_GPU_GET_ENCODER_CAPACITY (0x2080016c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_ENCODER_CAPACITY_QUERY_TYPE { + NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_H264 = 0, + NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_HEVC = 1, +} NV2080_CTRL_ENCODER_CAPACITY_QUERY_TYPE; + +#define NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS_MESSAGE_ID (0x6CU) + +typedef struct NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS { + NV2080_CTRL_ENCODER_CAPACITY_QUERY_TYPE queryType; + NvU32 encoderCapacity; +} NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS; + +/* + * NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS + * + * This command is used to retrieve the GPU's count of encoder sessions, + * trailing average FPS and encode latency over all active sessions. + * + * encoderSessionCount + * This field specifies count of all active encoder sessions on this GPU. + * + * averageEncodeFps + * This field specifies the average encode FPS for this GPU. + * + * averageEncodeLatency + * This field specifies the average encode latency in microseconds for this GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS (0x2080016d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS_MESSAGE_ID (0x6DU) + +typedef struct NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS { + NvU32 encoderSessionCount; + NvU32 averageEncodeFps; + NvU32 averageEncodeLatency; +} NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS; + +#define NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES 0x200 // 512 entries. + +/* + * NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO + * + * This command returns NVENC software sessions information for the associate GPU. + * Request to retrieve session information use a list of one or more + * NV2080_CTRL_NVENC_SW_SESSION_INFO structures. + * + * sessionInfoTblEntry + * This field specifies the number of entries on the that are filled inside + * sessionInfoTbl. Max value of this field once returned from RM would be + * NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES, + * + * sessionInfoTbl + * This field specifies a pointer in the caller's address space + * to the buffer into which the NVENC session information is to be returned. + * When buffer is NULL, RM assume that client is querying sessions count value + * and return the current encoder session counts in sessionInfoTblEntry field. + * To get actual buffer data, client should allocate sessionInfoTbl of size + * NV2080_CTRL_GPU_NVENC_SESSION_INFO_MAX_COPYOUT_ENTRIES multiplied by the + * size of the NV2080_CTRL_NVENC_SW_SESSION_INFO structure. RM will fill the + * current session data in sessionInfoTbl buffer and then update the + * sessionInfoTblEntry to reflect current session count value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NO_MEMORY + * NV_ERR_INVALID_LOCK_STATE + * NV_ERR_INVALID_ARGUMENT + */ + +typedef struct NV2080_CTRL_NVENC_SW_SESSION_INFO { + NvU32 processId; + NvU32 subProcessId; + NvU32 sessionId; + NvU32 codecType; + NvU32 hResolution; + NvU32 vResolution; + NvU32 averageEncodeFps; + NvU32 averageEncodeLatency; +} NV2080_CTRL_NVENC_SW_SESSION_INFO; + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS_MESSAGE_ID (0x6EU) + +typedef struct NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS { + NvU32 sessionInfoTblEntry; + NV_DECLARE_ALIGNED(NvP64 sessionInfoTbl, 8); +} NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS; + +#define NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO (0x2080016e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_SET_FABRIC_BASE_ADDR + * + * The command sets fabric base address which represents top N bits of a + * peer memory address. These N bits will be used to index NvSwitch routing + * tables to forward peer memory accesses to associated GPUs. + * + * The command is available to clients with administrator privileges only. + * An attempt to use this command by a client without administrator privileged + * results in the return of NV_ERR_INSUFFICIENT_PERMISSIONS status. + * + * The command allows to set fabricAddr once in a lifetime of a GPU. A GPU must + * be destroyed in order to re-assign a different fabricAddr. An attempt to + * re-assign address without destroying a GPU would result in the return of + * NV_ERR_STATE_IN_USE status. + * + * fabricBaseAddr[IN] + * - An address with at least 32GB alignment. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + */ + +#define NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS_MESSAGE_ID (0x6FU) + +typedef struct NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS { + NV_DECLARE_ALIGNED(NvU64 fabricBaseAddr, 8); +} NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS; + +#define NV2080_CTRL_CMD_GPU_SET_FABRIC_BASE_ADDR (0x2080016f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_INTERRUPT_FUNCTION + * + * The command will trigger an interrupt to a specified PCIe Function. + * + * gfid[IN] + * - The GPU function identifier + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_GPU_INTERRUPT_FUNCTION_PARAMS_MESSAGE_ID (0x71U) + +typedef struct NV2080_CTRL_GPU_INTERRUPT_FUNCTION_PARAMS { + NvU32 gfid; +} NV2080_CTRL_GPU_INTERRUPT_FUNCTION_PARAMS; + +#define NV2080_CTRL_CMD_GPU_INTERRUPT_FUNCTION (0x20800171) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_INTERRUPT_FUNCTION_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_VIRTUAL_INTERRUPT + * + * The command will trigger the specified interrupt on the host from a guest. + * + * handle[IN] + * - An opaque handle that will be passed in along with the interrupt + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS { + NvU32 handle; +} NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS; + +#define NV2080_CTRL_CMD_GPU_VIRTUAL_INTERRUPT (0x20800172) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_VIRTUAL_INTERRUPT_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS + * + * This control call is to query the status of gpu function registers + * + * statusMask[IN] + * - Input mask of required status registers + * xusbData[OUT] + * - data from querying XUSB status register + * ppcData[OUT] + * - data from querying PPC status register + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS { + NvU32 statusMask; + NvU32 xusbData; + NvU32 ppcData; +} NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS (0x20800173) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GPU_PARTITION_SPAN + * + * This struct represents the span of a memory partition, which represents the + * slices a given partition occupies (or may occupy) within a fixed range which + * is defined per-chip. A partition containing more resources will cover more + * GPU slices and therefore cover a larger span. + * + * lo + * - The starting unit of this span, inclusive + * + * hi + * - The ending unit of this span, inclusive + * + */ +typedef struct NV2080_CTRL_GPU_PARTITION_SPAN { + NV_DECLARE_ALIGNED(NvU64 lo, 8); + NV_DECLARE_ALIGNED(NvU64 hi, 8); +} NV2080_CTRL_GPU_PARTITION_SPAN; + +/* + * NV2080_CTRL_GPU_SET_PARTITION_INFO + * + * This command partitions a GPU into different SMC-Memory partitions. + * The command will configure HW partition table to create work and memory + * isolation. + * + * The command is available to clients with administrator privileges only. + * An attempt to use this command by a client without administrator privileged + * results in the return of NV_ERR_INSUFFICIENT_PERMISSIONS status. + * + * The command allows partitioning an invalid partition only. An attempt to + * re-partition a valid partition will resule in NV_ERR_STATE_IN_USE. + * Repartitioning can be done only if a partition has been destroyed/invalidated + * before re-partitioning. + * + * swizzId[IN/OUT] + * - PartitionID associated with a newly created partition. Input in case + * of partition invalidation. + * + * partitionFlag[IN] + * - Flags to determine if GPU is requested to be partitioned in FULL, + * HALF, QUARTER or ONE_EIGHTHED and whether the partition requires + * any additional resources. + * When flags include NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA + * partition will be created with at least one video decode, jpeg and + * optical flow engines. This flag is valid only for partitions with + * a single GPC. + * + * bValid[IN] + * - NV_TRUE if creating a partition. NV_FALSE if destroying a partition. + * + * placement[IN] + * - Optional placement span to allocate the partition into. Valid + * placements are returned from NV2080_CTRL_CMD_GPU_GET_PARTITION_CAPACITY. + * The partition flag NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN must + * be set for this parameter to be used. If the flag is set and the given + * placement is not valid, an error will be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + */ +typedef struct NV2080_CTRL_GPU_SET_PARTITION_INFO { + NvU32 swizzId; + NvU32 partitionFlag; + NvBool bValid; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN placement, 8); +} NV2080_CTRL_GPU_SET_PARTITION_INFO; + +#define PARTITIONID_INVALID NV2080_CTRL_GPU_PARTITION_ID_INVALID +#define NV2080_CTRL_GPU_PARTITION_ID_INVALID 0xFFFFFFFF +#define NV2080_CTRL_GPU_MAX_PARTITIONS 0x00000008 +#define NV2080_CTRL_GPU_MAX_PARTITION_IDS 0x00000009 +#define NV2080_CTRL_GPU_MAX_SMC_IDS 0x00000008 +#define NV2080_CTRL_GPU_MAX_GPC_PER_SMC 0x0000000c +#define NV2080_CTRL_GPU_MAX_CE_PER_SMC 0x00000008 + +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE 1:0 +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_FULL 0x00000000 +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_HALF 0x00000001 +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_QUARTER 0x00000002 +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH 0x00000003 +#define NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE__SIZE 4 +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE 4:2 +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_FULL 0x00000000 +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_HALF 0x00000001 +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_HALF 0x00000002 +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_QUARTER 0x00000003 +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH 0x00000004 +#define NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE__SIZE 5 +#define NV2080_CTRL_GPU_PARTITION_MAX_TYPES 8 +#define NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA 30:30 +#define NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA_DISABLE 0 +#define NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA_ENABLE 1 +#define NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN 31:31 +#define NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN_DISABLE 0 +#define NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN_ENABLE 1 + +// TODO XXX Bug 2657907 Remove these once clients update +#define NV2080_CTRL_GPU_PARTITION_FLAG_FULL_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _FULL) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _FULL)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_HALF_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _HALF) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _HALF)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_MINI_HALF_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _HALF) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _MINI_HALF)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_QUARTER_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _QUARTER) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _QUARTER)) +#define NV2080_CTRL_GPU_PARTITION_FLAG_ONE_EIGHTHED_GPU (DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _MEMORY_SIZE, _EIGHTH) | DRF_DEF(2080, _CTRL_GPU_PARTITION_FLAG, _COMPUTE_SIZE, _EIGHTH)) + +#define NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS { + NvU32 partitionCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_SET_PARTITION_INFO partitionInfo[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); +} NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_SET_PARTITIONS (0x20800174) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GPU_GET_PARTITION_INFO + * + * This command gets the partition information for requested partitions. + * If GPU is not partitioned, the control call will return NV_ERR_NOT_SUPPORTED. + * + * The command will can return global partition information as well as single + * partition information if global flag is not set. + * In bare-metal user-mode can request all partition info while in virtualization + * plugin should make an RPC with swizzId which is assigned to the requesting + * VM. + * + * swizzId[IN] + * - HW Partition ID associated with the requested partition. + * + * partitionFlag[OUT] + * - partitionFlag that was provided during partition creation. + * + * grEngCount[OUT] + * - Number of SMC engines/GR engines allocated in partition + * GrIDs in a partition will always start from 0 and end at grEngCount-1 + * + * veidCount[OUT] + * - VEID Count assigned to a partition. These will be divided across + * SMC engines once CONFIGURE_PARTITION call has been made. The current + * algorithm is to assign veidPerGpc * gpcCountPerSmc to a SMC engine. + * + * smCount[OUT] + * - SMs assigned to a partition. + * + * ceCount[OUT] + * - Copy Engines assigned to a partition. + * + * nvEncCount[OUT] + * - NvEnc Engines assigned to a partition. + * + * nvDecCount[OUT] + * - NvDec Engines assigned to a partition. + * + * nvJpgCount[OUT] + * - NvJpg Engines assigned to a partition. + * + * gpcCount[OUT] + * - Max GPCs assigned to a partition. + * + * gpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS][OUT] + * - GPC count associated with every valid SMC/Gr. + * + * veidsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS][OUT] + * - VEID count associated with every valid SMC. VEIDs within this SMC + * will start from 0 and go till veidCount[SMC_ID] - 1. + * + * span[OUT] + * - The span covered by this partition + * + * bValid[OUT] + * - NV_TRUE if partition is valid else NV_FALSE. + * + * bPartitionError[OUT] + * - NV_TRUE if partition had poison error which requires drain and reset + * else NV_FALSE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_NOT_SUPPORTED + */ +typedef struct NV2080_CTRL_GPU_GET_PARTITION_INFO { + NvU32 swizzId; + NvU32 partitionFlag; + NvU32 grEngCount; + NvU32 veidCount; + NvU32 smCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 nvOfaCount; + NvU32 gpcCount; + NvU32 gpcsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NvU32 veidsPerGr[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NV_DECLARE_ALIGNED(NvU64 memSize, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN span, 8); + NvBool bValid; + NvBool bPartitionError; +} NV2080_CTRL_GPU_GET_PARTITION_INFO; + +/* + * NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS + * + * queryPartitionInfo[IN] + * - Max sized array of NV2080_CTRL_GPU_GET_PARTITION_INFO to get partition + * Info + * + * bGetAllPartitionInfo[In] + * - Flag to get all partitions info. Only root client will receive all + * partition's info. Non-Root clients should not use this flag + * + * validPartitionCount[Out] + * - Valid partition count which has been filled by RM as part of the call + * + */ +#define NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_GET_PARTITION_INFO queryPartitionInfo[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); + NvU32 validPartitionCount; + NvBool bGetAllPartitionInfo; +} NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_PARTITIONS (0x20800175) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_GPU_CONFIGURE_PARTITION + * + * This command configures a partition by associating GPCs with SMC Engines + * available in that partition. Engines which are to have GPCs assigned to them + * shall not already have any GPCs assigned to them. It is not valid to both + * assign GPCs and remove GPCs as part of a single call to this function. + * + * swizzId[IN] + * - PartitionID for configuring partition. If partition has a valid + * context created, then configuration is not allowed. + * + * gpcCountPerSmcEng[IN] + * - Number of GPCs expected to be configured per SMC. Supported + * configurations are 0, 1, 2, 4 or 8. "0" means a particular SMC + * engine will be disabled with no GPC connected to it. + * + * updateSmcEngMask[IN] + * - Mask tracking valid entries of gpcCountPerSmcEng. A value of + * 0 in bit index i indicates that engine i will keep its current + * configuration. + * + * bUseAllGPCs[IN] + * - Flag specifying alternate configuration mode, indicating that in + * swizzid 0 only, all non-floorswept GPCs should be connected to the + * engine indicated by a raised bit in updateSmcEngMask. Only a single + * engine may be targeted by this operation. The gpcCountPerSmcEng + * parameter should not be used with this flag. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + */ +#define NV2080_CTRL_CMD_GPU_CONFIGURE_PARTITION (0x20800176) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS_MESSAGE_ID (0x76U) + +typedef struct NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS { + NvU32 swizzId; + NvU32 gpcCountPerSmcEng[NV2080_CTRL_GPU_MAX_SMC_IDS]; + NvU32 updateSmcEngMask; + NvBool bUseAllGPCs; +} NV2080_CTRL_GPU_CONFIGURE_PARTITION_PARAMS; + + +/* + * NV2080_CTRL_GPU_FAULT_PACKET + * + * This struct represents a GMMU fault packet. + * + */ +#define NV2080_CTRL_GPU_FAULT_PACKET_SIZE 32 +typedef struct NV2080_CTRL_GPU_FAULT_PACKET { + NvU8 data[NV2080_CTRL_GPU_FAULT_PACKET_SIZE]; +} NV2080_CTRL_GPU_FAULT_PACKET; + +/* + * NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT + * + * This command reports a nonreplayable fault packet to RM. + * It is only used by UVM. + * + * pFaultPacket[IN] + * - A fault packet that will be later cast to GMMU_FAULT_PACKET *. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_REPORT_NON_REPLAYABLE_FAULT (0x20800177) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS_MESSAGE_ID (0x77U) + +typedef struct NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS { + NV2080_CTRL_GPU_FAULT_PACKET faultPacket; +} NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_VGPU + * + * This command is similar to NV2080_CTRL_CMD_GPU_EXEC_REG_OPS, except it is used + * by the VGPU plugin client only. This command provides access to the subset of + * privileged registers. + * + * See confluence page "vGPU UMED Security" for details. + * + */ +#define NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_VGPU (0x20800178) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x78" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_RUNLIST_PRI_BASE + * + * This command returns the runlist pri base of the specified engine(s). + * + * engineList + * Input array. + * This array specifies the engines being queried for information. + * The list of engines supported by a chip can be fetched using the + * NV2080_CTRL_CMD_GPU_GET_ENGINES/GET_ENGINES_V2 ctrl call. + * + * runlistPriBase + * Output array. + * Returns the runlist pri base for the specified engines + * Else, will return _NULL when the input is a NV2080_ENGINE_TYPE_NULL + * and will return _ERROR when the control call fails due to an invalid argument + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_RUNLIST_PRI_BASE (0x20800179) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS_MESSAGE_ID (0x79U) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS { + NvU32 engineList[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; + NvU32 runlistPriBase[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS; + +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_NULL (0xFFFFFFFF) +#define NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_ERROR (0xFFFFFFFB) + +/* + * NV2080_CTRL_CMD_GPU_GET_HW_ENGINE_ID + * + * This command returns the host hardware defined engine ID of the specified engine(s). + * + * engineList + * Input array. + * This array specifies the engines being queried for information. + * The list of engines supported by a chip can be fetched using the + * NV2080_CTRL_CMD_GPU_GET_ENGINES/GET_ENGINES_V2 ctrl call. + * + * hwEngineID + * Output array. + * Returns the host hardware engine ID(s) for the specified engines + * Else, will return _NULL when the input is a NV2080_ENGINE_TYPE_NULL + * and will return _ERROR when the control call fails due to an invalid argument + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_HW_ENGINE_ID (0x2080017a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS_MESSAGE_ID (0x7AU) + +typedef struct NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS { + NvU32 engineList[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; + NvU32 hwEngineID[NV2080_GPU_MAX_ENGINES_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS; + +#define NV2080_CTRL_GPU_GET_HW_ENGINE_ID_NULL (0xFFFFFFFF) +#define NV2080_CTRL_GPU_GET_HW_ENGINE_ID_ERROR (0xFFFFFFFB) + +/* + * NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS + * + * This command is used to retrieve the GPU's count of FBC sessions, + * average FBC calls and FBC latency over all active sessions. + * + * sessionCount + * This field specifies count of all active fbc sessions on this GPU. + * + * averageFPS + * This field specifies the average frames captured. + * + * averageLatency + * This field specifies the average FBC latency in microseconds. + * + * Possible status values returned are : + * NV_OK + * NV_ERR_INVALID_ARGUMENT +*/ +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS (0x2080017b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS_MESSAGE_ID (0x7BU) + +typedef struct NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS { + NvU32 sessionCount; + NvU32 averageFPS; + NvU32 averageLatency; +} NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS; + +/* +* NV2080_CTRL_NVFBC_SW_SESSION_INFO +* +* processId[OUT] +* Process id of the process owning the NvFBC session. +* On VGX host, this will specify the vGPU plugin process id. +* subProcessId[OUT] +* Process id of the process owning the NvFBC session if the +* session is on VGX guest, else the value is zero. +* vgpuInstanceId[OUT] +* vGPU on which the process owning the NvFBC session +* is running if session is on VGX guest, else +* the value is zero. +* sessionId[OUT] +* Unique session id of the NvFBC session. +* sessionType[OUT] +* Type of NvFBC session. +* displayOrdinal[OUT] +* Display identifier associated with the NvFBC session. +* sessionFlags[OUT] +* One or more of NV2080_CTRL_NVFBC_SESSION_FLAG_xxx. +* hMaxResolution[OUT] +* Max horizontal resolution supported by the NvFBC session. +* vMaxResolution[OUT] +* Max vertical resolution supported by the NvFBC session. +* hResolution[OUT] +* Horizontal resolution requested by caller in grab call. +* vResolution[OUT] +* Vertical resolution requested by caller in grab call. +* averageFPS[OUT] +* Average no. of frames captured per second. +* averageLatency[OUT] +* Average frame capture latency in microseconds. +*/ + +#define NV2080_CTRL_NVFBC_SESSION_FLAG_DIFFMAP_ENABLED 0x00000001 +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED 0x00000002 +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT 0x00000004 +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE 0x00000008 +#define NV2080_CTRL_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT 0x00000010 + +typedef struct NV2080_CTRL_NVFBC_SW_SESSION_INFO { + NvU32 processId; + NvU32 subProcessId; + NvU32 vgpuInstanceId; + NvU32 sessionId; + NvU32 sessionType; + NvU32 displayOrdinal; + NvU32 sessionFlags; + NvU32 hMaxResolution; + NvU32 vMaxResolution; + NvU32 hResolution; + NvU32 vResolution; + NvU32 averageFPS; + NvU32 averageLatency; +} NV2080_CTRL_NVFBC_SW_SESSION_INFO; + +/* +* NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO +* +* This command returns NVFBC software sessions information for the associate GPU. +* +* sessionInfoCount +* This field specifies the number of entries that are filled inside +* sessionInfoTbl. Max value of this field once returned from RM would be +* NV2080_GPU_NVFBC_MAX_COUNT. +* +* sessionInfoTbl +* This field specifies the array in which the NVFBC session information is to +* be returned. RM will fill the current session data in sessionInfoTbl array +* and then update the sessionInfoCount to reflect current session count value. +* +* Possible status values returned are: +* NV_OK +* NV_ERR_NO_MEMORY +* NV_ERR_INVALID_LOCK_STATE +* NV_ERR_INVALID_ARGUMENT +*/ + +#define NV2080_GPU_NVFBC_MAX_SESSION_COUNT 256 + +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS_MESSAGE_ID (0x7CU) + +typedef struct NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS { + NvU32 sessionInfoCount; + NV2080_CTRL_NVFBC_SW_SESSION_INFO sessionInfoTbl[NV2080_GPU_NVFBC_MAX_SESSION_COUNT]; +} NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS; + +#define NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO (0x2080017c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS_MESSAGE_ID" */ + + + +/* + * NV2080_CTRL_CMD_GPU_GET_VMMU_SEGMENT_SIZE + * + * This command returns the VMMU page size + * + * vmmuSegmentSize + * Output parameter. + * Returns the VMMU segment size (in bytes) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_VMMU_SEGMENT_SIZE (0x2080017e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 vmmuSegmentSize, 8); +} NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS; + +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_32MB 0x02000000 +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_64MB 0x04000000 +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_128MB 0x08000000 +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_256MB 0x10000000 +#define NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_512MB 0x20000000 + + + +/* + * NV2080_CTRL_GPU_GET_PARTITION_CAPACITY + * + * This command returns the count of partitions of given size (represented by + * NV2080_CTRL_GPU_PARTITION_FLAG_*) which can be requested via + * NV2080_CTRL_GPU_SET_PARTITIONS ctrl call. + * Note that this API does not "reserve" any partitions, and there is no + * guarantee that the reported count of available partitions of a given size + * will remain consistent following creation of partitions of different size + * through NV2080_CTRL_GPU_SET_PARTITIONS. + * Note that this API is unsupported if SMC is feature-disabled. + * + * partitionFlag[IN] + * - Partition flag indicating size of requested partitions + * + * partitionCount[OUT] + * - Available number of partitions of the given size which can currently be created. + * + * availableSpans[OUT] + * - For each partition able to be created of the specified size, the span + * it could occupy. + * + * availableSpansCount[OUT] + * - Number of valid entries in availableSpans. + * + * totalPartitionCount[OUT] + * - Total number of partitions of the given size which can be created. + * + * totalSpans[OUT] + * - List of spans which can possibly be occupied by partitions of the + * given type. + * + * totalSpansCount[OUT] + * - Number of valid entries in totalSpans. + * + * bStaticInfo[IN] + * - Flag indicating that client requests only the information from + * totalPartitionCount and totalSpans. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_PARTITION_CAPACITY (0x20800181) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS { + NvU32 partitionFlag; + NvU32 partitionCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN availableSpans[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); + NvU32 availableSpansCount; + NvU32 totalPartitionCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PARTITION_SPAN totalSpans[NV2080_CTRL_GPU_MAX_PARTITIONS], 8); + NvU32 totalSpansCount; + NvBool bStaticInfo; +} NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_CACHED_INFO + * + * This command returns cached(SW only) gpu information for the associated GPU. + * Requests to retrieve gpu information use a list of one or more NV2080_CTRL_GPU_INFO + * structures. + * The gpuInfoList is aligned with NV2080_CTRL_GPU_GET_INFO_V2_PARAMS for security concern + * + * gpuInfoListSize + * This field specifies the number of entries on the caller's + * gpuInfoList. + * gpuInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the gpu information is to be returned. + * This buffer must be at least as big as gpuInfoListSize multiplied + * by the size of the NV2080_CTRL_GPU_INFO structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OPERATING_SYSTEM + */ +#define NV2080_CTRL_CMD_GPU_GET_CACHED_INFO (0x20800182) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | 0x82" */ + +typedef struct NV2080_CTRL_GPU_GET_CACHED_INFO_PARAMS { + NvU32 gpuInfoListSize; + NV2080_CTRL_GPU_INFO gpuInfoList[NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE]; +} NV2080_CTRL_GPU_GET_CACHED_INFO_PARAMS; + +/* + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE + * + * This command configures this GPU to control global mode for partitioning. + * This command may not be sent to a GPU with any active partitions. + * This command may be used to set the following modes: + * + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_LEGACY + * This is the default mode. While this GPU is in this mode, no partitions + * will be allowed to be created via SET_PARTITIONS - a client must set one + * of the below modes prior to partitioning the GPU. When a client sets a + * GPU into this mode, any performance changes resulting from partitions + * made while in either of the below modes will be cleared. A + * physical-function-level reset is required after setting this mode. + * + * NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_MAX_PERF + * In this mode, when the GPU is partitioned, each partition will have the + * maximum possible performance which can be evenly distributed among all + * partitions. The total performance of the GPU, taking into account all + * partitions created in this mode, may be less than that of a GPU running + * in legacy non-SMC mode. Partitions created while in this mode require a + * physical-function-level reset before the partitioning may take full + * effect. Destroying all partitions while in this mode may be + * insufficient to restore full performance to the GPU - only by setting + * the mode to _LEGACY can this be achieved. A physical-function-level + * reset is NOT required after setting this mode. + * + * NV2080_CTRL_GPU_SET_PARTIITONING_MODE_REPARTITIONING_FAST_RECONFIG + * By setting this mode, the performance of the GPU will be restricted such + * that all partitions will have a consistent fraction of the total + * available performance, which may be less than the maximum possible + * performance available to each partition. Creating or destroying + * partitions on this GPU while in this mode will not require a + * physical-function-level reset, and will not affect other active + * partitions. Destroying all partitions while in this mode may be + * insufficient to restore full performance to the GPU - only by setting + * the mode to _LEGACY can this be achieved. A physical-function-level + * reset is required after setting this mode. + * + * Parameters: + * partitioningMode[IN] + * - Partitioning Mode to set for this GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_SET_PARTITIONING_MODE (0x20800183) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING 1:0 +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_LEGACY 0 +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_MAX_PERF 1 +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_REPARTITIONING_FAST_RECONFIG 2 + +#define NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS_MESSAGE_ID (0x83U) + +typedef struct NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS { + NvU32 partitioningMode; +} NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS; + + + +/* NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO + * + * This structure describes resources available in a partition requested of a + * given type. + * + * [OUT] partitionFlag + * - Flags to specify in NV2080_CTRL_CMD_GPU_SET_PARTITIONS to request this + * partition + * + * [OUT] grCount + * - Number of SMC engines/GR engines + * + * [OUT] gpcCount + * - Number of GPCs in this partition + * + * [OUT] veidCount + * - Number of VEIDS in this partition + * + * [OUT] smCount + * - Number of SMs in this partition + * + * [OUT] ceCount + * - Copy Engines in this partition + * + * [OUT] nvEncCount + * - Encoder Engines in this partition + * + * [OUT] nvDecCount + * - Decoder Engines in this partition + * + * [OUT] nvJpgCount + * - Jpg Engines in this partition + * + * [OUT] nvOfaCount + * - Ofa engines in this partition + * [OUT] memorySize + * - Total available memory within this partition + */ +typedef struct NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO { + NvU32 partitionFlag; + NvU32 grCount; + NvU32 gpcCount; + NvU32 veidCount; + NvU32 smCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 nvOfaCount; + NV_DECLARE_ALIGNED(NvU64 memorySize, 8); +} NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO; + +/* + * NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS + * + * This command returns information regarding GPU partitions which can be + * requested via NV2080_CTRL_CMD_GPU_SET_PARTITIONS. + * + * [OUT] descCount + * - Number of valid partition types + * + * [OUT] partitionDescs + * - Information describing available partitions + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS { + NvU32 descCount; + // C form: NV2080_CTRL_GPU_DESCRIBE_PARTITION_INFO partitionDescs[NV2080_CTRL_GPU_PARTITION_MAX_TYPES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_INFO partitionDescs[NV2080_CTRL_GPU_PARTITION_MAX_TYPES], 8); +} NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_DESCRIBE_PARTITIONS (0x20800185) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS_MESSAGE_ID" */ + + + +/* + * NV2080_CTRL_CMD_GPU_GET_MAX_SUPPORTED_PAGE_SIZE + * + * This command returns information regarding maximum page size supported + * by GMMU on the platform on which RM is running. + * + * [OUT] maxSupportedPageSize + * - Maximum local vidmem page size supported by GMMU of a given GPU (HW) + * on a given platform (OS) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_GPU_GET_MAX_SUPPORTED_PAGE_SIZE (0x20800188) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS { + NvU32 maxSupportedPageSize; +} NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS; + + + +/* + * NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC + * + * This command returns the max number of MMUs per GPC + * + * gpcId [IN] + * Logical GPC id + * count [OUT] + * The number of MMUs per GPC + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. When SMC is enabled, this + * is a mandatory parameter. + */ +#define NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS_MESSAGE_ID (0x8AU) + +typedef struct NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS { + NvU32 gpcId; + NvU32 count; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_NUM_MMUS_PER_GPC (0x2080018a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_GPU_GET_ACTIVE_PARTITION_IDS + * + * This command returns the GPU partition IDs for all active partitions + * If GPU is not partitioned, the control call will return partition count as "0" + * + * swizzId[OUT] + * - HW Partition ID associated with the active partitions + * + * partitionCount[OUT] + * - Number of active partitions in system + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS_MESSAGE_ID (0x8BU) + +typedef struct NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS { + NvU32 swizzId[NV2080_CTRL_GPU_MAX_PARTITION_IDS]; + NvU32 partitionCount; +} NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS; + +#define NV2080_CTRL_CMD_GPU_GET_ACTIVE_PARTITION_IDS (0x2080018b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS_MESSAGE_ID" */ + + + +/* + * NV2080_CTRL_CMD_GPU_GET_PIDS + * + * Given a resource identifier and its type, this command returns a set of + * process identifiers (PIDs) of processes that have instantiated this resource. + * For example, given a class number, this command returns a list of all + * processes with clients that have matching object allocations. + * This is a SMC aware call and the scope of the information gets restricted + * based on partition subscription. + * The call enforces partition subscription if SMC is enabled, and client is not + * a monitoring client. + * Monitoring clients get global information without any scope based filtering. + * Monitoring clients are also not expected to subscribe to a partition when + * SMC is enabled. + * + * idType + * Type of the resource identifier. See below for a list of valid types. + * id + * Resource identifier. + * pidTblCount + * Number of entries in the PID table. + * pidTbl + * Table which will contain the PIDs. Each table entry is of type NvU32. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_GET_PIDS (0x2080018d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PIDS_PARAMS_MESSAGE_ID" */ + +/* max size of pidTable */ +#define NV2080_CTRL_GPU_GET_PIDS_MAX_COUNT 950 + +#define NV2080_CTRL_GPU_GET_PIDS_PARAMS_MESSAGE_ID (0x8DU) + +typedef struct NV2080_CTRL_GPU_GET_PIDS_PARAMS { + NvU32 idType; + NvU32 id; + NvU32 pidTblCount; + NvU32 pidTbl[NV2080_CTRL_GPU_GET_PIDS_MAX_COUNT]; +} NV2080_CTRL_GPU_GET_PIDS_PARAMS; + +/* + * Use class NV20_SUBDEVICE_0 with NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_CLASS to query + * PIDs with or without GPU contexts. For any other class id, PIDs only with GPU + * contexts are returned. + */ +#define NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_CLASS (0x00000000) +#define NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_VGPU_GUEST (0x00000001) + +/* + * NV2080_CTRL_SMC_SUBSCRIPTION_INFO + * + * This structure contains information about the SMC subscription type. + * If MIG is enabled a valid ID is returned, it is set to PARTITIONID_INVALID otherwise. + * + * computeInstanceId + * This parameter returns a valid compute instance ID + * gpuInstanceId + * This parameter returns a valid GPU instance ID + */ +typedef struct NV2080_CTRL_SMC_SUBSCRIPTION_INFO { + NvU32 computeInstanceId; + NvU32 gpuInstanceId; +} NV2080_CTRL_SMC_SUBSCRIPTION_INFO; + +/* + * NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA + * + * This structure contains the video memory usage information. + * + * memPrivate + * This parameter returns the amount of memory exclusively owned + * (i.e. private) to the client + * memSharedOwned + * This parameter returns the amount of shared memory owned by the client + * memSharedDuped + * This parameter returns the amount of shared memory duped by the client + * protectedMemPrivate + * This parameter returns the amount of protected memory exclusively owned + * (i.e. private) to the client whenever memory protection is enabled + * protectedMemSharedOwned + * This parameter returns the amount of shared protected memory owned by the + * client whenever memory protection is enabled + * protectedMemSharedDuped + * This parameter returns the amount of shared protected memory duped by the + * client whenever memory protection is enabled + */ +typedef struct NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA { + NV_DECLARE_ALIGNED(NvU64 memPrivate, 8); + NV_DECLARE_ALIGNED(NvU64 memSharedOwned, 8); + NV_DECLARE_ALIGNED(NvU64 memSharedDuped, 8); + NV_DECLARE_ALIGNED(NvU64 protectedMemPrivate, 8); + NV_DECLARE_ALIGNED(NvU64 protectedMemSharedOwned, 8); + NV_DECLARE_ALIGNED(NvU64 protectedMemSharedDuped, 8); +} NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA; + +#define NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE (0x00000000) + +#define NV2080_CTRL_GPU_PID_INFO_INDEX_MAX NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE + +typedef union NV2080_CTRL_GPU_PID_INFO_DATA { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA vidMemUsage, 8); +} NV2080_CTRL_GPU_PID_INFO_DATA; + + +/* + * NV2080_CTRL_GPU_PID_INFO + * + * This structure contains the per pid information. Each type of information + * retrievable via NV2080_CTRL_CMD_GET_PID_INFO is assigned a unique index + * below. In addition the process for which the lookup is for is also defined. + * This is a SMC aware call and the scope of the information gets restricted + * based on partition subscription. + * The call enforces partition subscription if SMC is enabled, and client is not + * a monitoring client. + * Monitoring clients get global information without any scope based filtering. + * Monitoring clients are also not expected to subscribe to a partition when + * SMC is enabled. + * + * pid + * This parameter specifies the PID of the process for which information is + * to be queried. + * index + * This parameter specifies the type of information being queried for the + * process of interest. + * result + * This parameter returns the result of the instruction's execution. + * data + * This parameter returns the data corresponding to the information which is + * being queried. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + * + * Valid PID information indices are: + * + * NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE + * This index is used to request the amount of video memory on this GPU + * allocated to the process. + */ +typedef struct NV2080_CTRL_GPU_PID_INFO { + NvU32 pid; + NvU32 index; + NvU32 result; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PID_INFO_DATA data, 8); + NV2080_CTRL_SMC_SUBSCRIPTION_INFO smcSubscription; +} NV2080_CTRL_GPU_PID_INFO; + +/* + * NV2080_CTRL_CMD_GPU_GET_PID_INFO + * + * This command allows querying per-process information from the RM. Clients + * request information by specifying a unique informational index and the + * Process ID of the process in question. The result is set to indicate success + * and the information queried (if available) is returned in the data parameter. + * + * pidInfoListCount + * The number of valid entries in the pidInfoList array. + * pidInfoList + * An array of NV2080_CTRL_GPU_PID_INFO of maximum length + * NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GPU_GET_PID_INFO (0x2080018e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_PID_INFO_PARAMS_MESSAGE_ID" */ + +/* max size of pidInfoList */ +#define NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT 200 + +#define NV2080_CTRL_GPU_GET_PID_INFO_PARAMS_MESSAGE_ID (0x8EU) + +typedef struct NV2080_CTRL_GPU_GET_PID_INFO_PARAMS { + NvU32 pidInfoListCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PID_INFO pidInfoList[NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT], 8); +} NV2080_CTRL_GPU_GET_PID_INFO_PARAMS; + + +/*! + * Compute policy types to be specified by callers to set a config. + * + * _TIMESLICE + * Set the timeslice config for the requested GPU. + * Check @ref NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE for + * permissible timeslice values. + */ +#define NV2080_CTRL_GPU_COMPUTE_POLICY_TIMESLICE 0 +#define NV2080_CTRL_GPU_COMPUTE_POLICY_MAX 1 + +/*! + * Enum consisting of permissible timeslice options that can configured + * for a GPU. These can be queried by compute clients and the exact + * timeslice values can be chosen appropriately as per GPU support + */ +typedef enum NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE { + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_DEFAULT = 0, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_SHORT = 1, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_MEDIUM = 2, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_LONG = 3, + NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_MAX = 4, +} NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE; + +typedef struct NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG { + /*! + * NV2080_CTRL_GPU_COMPUTE_POLICY_ + */ + NvU32 type; + + /*! + * Union of type-specific data + */ + union { + NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_DATA_TIMESLICE timeslice; + } data; +} NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG; + +#define NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS_MESSAGE_ID (0x94U) + +typedef struct NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS { + NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG config; +} NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG + * + * This command retrieves all compute policies configs for the associated gpu. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG (0x20800195) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS_MESSAGE_ID" */ + +/*! + * This define limits the max number of policy configs that can be handled by + * NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG command. + * + * @note Needs to be in sync (greater or equal) to NV2080_CTRL_GPU_COMPUTE_POLICY_MAX. + */ + +#define NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_LIST_MAX 32 + +#define NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS_MESSAGE_ID (0x95U) + +typedef struct NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS { + NvU32 numConfigs; + + /*! + * C form: + * NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG configList[NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_LIST_MAX]; + */ + NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG configList[NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_LIST_MAX]; +} NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS; + + +/*! + * NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST + * + * @brief Validate the address range for memory map request by comparing the + * user supplied address range with GPU BAR0/BAR1 range. + * + * @param[in] addressStart Start address for memory map request + * @param[in] addressLength Length for for memory map request + * @param[out] protection NV_PROTECT_READ_WRITE, if both read/write is allowed + * NV_PROTECT_READABLE, if only read is allowed + * + * Possible status values returned are: + * NV_OK + * NV_ERR_PROTECTION_FAULT + * + */ +#define NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST (0x20800198) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS_MESSAGE_ID (0x98U) + +typedef struct NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS { + NV_DECLARE_ALIGNED(NvU64 addressStart, 8); + NV_DECLARE_ALIGNED(NvU64 addressLength, 8); + NvU32 protection; +} NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GPU_GET_ENGINE_LOAD_TIMES + * + * This command is used to retrieve the load time (latency) of each engine. + * + * engineCount + * This field specifies the number of entries of the following + * three arrays. + * + * engineList[NV2080_GPU_MAX_ENGINE_OBJECTS] + * An array of NvU32 which stores each engine's descriptor. + * + * engineStateLoadTime[NV2080_GPU_MAX_ENGINE_OBJECTS] + * A array of NvU64 which stores each engine's load time. + * + * engineIsInit[NV2080_GPU_MAX_ENGINE_OBJECTS] + * A array of NvBool which stores each engine's initialization status. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_GET_ENGINE_LOAD_TIMES (0x2080019b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS 0x90 + +#define NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS_MESSAGE_ID (0x9BU) + +typedef struct NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS { + NvU32 engineCount; + NvU32 engineList[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS]; + NV_DECLARE_ALIGNED(NvU64 engineStateLoadTime[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS], 8); + NvBool engineIsInit[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS]; +} NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_GET_ID_NAME_MAPPING + * + * This command is used to retrieve the mapping of engine ID and engine Name. + * + * engineCount + * This field specifies the size of the mapping. + * + * engineID + * An array of NvU32 which stores each engine's descriptor. + * + * engineName + * An array of char[100] which stores each engine's name. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_GPU_GET_ID_NAME_MAPPING (0x2080019c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS_MESSAGE_ID (0x9CU) + +typedef struct NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS { + NvU32 engineCount; + NvU32 engineID[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS]; + char engineName[NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS][100]; +} NV2080_CTRL_GPU_GET_ID_NAME_MAPPING_PARAMS; + +/* + * NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_NOPTRS + * + * Same as above NV2080_CTRL_CMD_GPU_EXEC_REG_OPS except that this CTRL CMD will + * not allow any embedded pointers. The regOps array is inlined as part of the + * struct. + * NOTE: This intended for gsp plugin only as it may override regOp access + * restrictions + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_GPU_EXEC_REG_OPS_NOPTRS (0x2080019d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS_MESSAGE_ID" */ + +/* setting this to 100 keeps it right below 4k in size */ +#define NV2080_CTRL_REG_OPS_ARRAY_MAX 100 +#define NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS_MESSAGE_ID (0x9DU) + +typedef struct NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS { + NvHandle hClientTarget; + NvHandle hChannelTarget; + NvU32 bNonTransactional; + NvU32 reserved00[2]; + NvU32 regOpCount; + NV2080_CTRL_GPU_REG_OP regOps[NV2080_CTRL_REG_OPS_ARRAY_MAX]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GPU_EXEC_REG_OPS_NOPTRS_PARAMS; + + + + +/*! + * NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO + * + * [in/out] gpuId + * GPU ID for which the capabilities are queried. + * For the NV2080_CTRL_CMD_GET_P2P_CAPS control: + * If bAllCaps == NV_TRUE, this parameter is an out parameter and equals to + * the GPU ID of an attached GPU. + * If bAllCaps == NV_FALSE, this parameter is an in parameter and the requester + * should set it to the ID of the GPU that needs to be queried from. + * [out] p2pCaps + * Peer to peer capabilities discovered between the GPUs. + * See NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 for the list of valid values. + * [out] p2pOptimalReadCEs + * Mask of CEs to use for p2p reads over Nvlink. + * [out] p2pOptimalWriteCEs + * Mask of CEs to use for p2p writes over Nvlink. + * [out] p2pCapsStatus + * Status of all supported p2p capabilities. + * See NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2 for the list of valid values. + * [out] busPeerId + * Bus peer ID. For an invalid or a non-existent peer this field + * has the value NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INVALID_PEER. + */ +typedef struct NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO { + NvU32 gpuId; + NvU32 p2pCaps; + NvU32 p2pOptimalReadCEs; + NvU32 p2pOptimalWriteCEs; + NvU8 p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE]; + NvU32 busPeerId; +} NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO; + +/*! + * NV2080_CTRL_CMD_GET_P2P_CAPS + * + * Returns peer to peer capabilities present between GPUs. + * The caller must either specify bAllCaps to query the capabilities for + * all the attached GPUs or they must pass a valid list of GPU IDs. + * + * [in] bAllCaps + * Set to NV_TRUE to query the capabilities for all the attached GPUs. + * Set to NV_FALSE and specify peerGpuCount and peerGpuCaps[].gpuId + * to retrieve the capabilities only for the specified GPUs. + * [in/out] peerGpuCount + * The number of the peerGpuCaps entries. + * If bAllCaps == NV_TRUE, this parameter is an out parameter and equals to + * the total number of the attached GPUs. + * If bAllCaps == NV_FALSE, this parameter is an in parameter and the requester + * should set it to the number of the peerGpuCaps entries. + * [in/out] peerGpuCaps + * The array of NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO entries, describing + * the peer to peer capabilities of the GPUs. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT - Invalid peerGpuCount + * NV_ERR_OBJECT_NOT_FOUND - Invalid peerGpuCaps[].gpuId + */ +#define NV2080_CTRL_CMD_GET_P2P_CAPS (0x208001a0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GET_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GET_P2P_CAPS_PARAMS_MESSAGE_ID (0xA0U) + +typedef struct NV2080_CTRL_GET_P2P_CAPS_PARAMS { + NvBool bAllCaps; + NvU32 peerGpuCount; + NV2080_CTRL_GPU_P2P_PEER_CAPS_PEER_INFO peerGpuCaps[NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS]; +} NV2080_CTRL_GET_P2P_CAPS_PARAMS; + +/* _ctrl2080gpu_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h new file mode 100644 index 000000000..742b21aa4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpumon.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gpumon.finn +// + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/*! + * This structure represents base class of GPU monitoring sample. + */ +typedef struct NV2080_CTRL_GPUMON_SAMPLE { + /*! + * Timestamps in nano-seconds. + */ + NV_DECLARE_ALIGNED(NvU64 timeStamp, 8); +} NV2080_CTRL_GPUMON_SAMPLE; + +/*! + * This structure represents base GPU monitoring sample. + */ +typedef struct NV2080_CTRL_GPUMON_SAMPLES { + /*! + * Type of the sample, see NV2080_CTRL_GPUMON_SAMPLE_TYPE_* for reference. + */ + NvU8 type; + /*! + * Size of the buffer, this should be + * bufSize == NV2080_CTRL_*_GPUMON_SAMPLE_COUNT_* + * sizeof(derived type of NV2080_CTRL_GPUMON_SAMPLE). + */ + NvU32 bufSize; + /*! + * Number of samples in ring buffer. + */ + NvU32 count; + /*! + * tracks the offset of the tail in the circular queue array pSamples. + */ + NvU32 tracker; + /*! + * Pointer to a circular queue based on array of NV2080_CTRL_GPUMON_SAMPLE + * or its derived types structs with size == bufSize. + * + * @note This circular queue wraps around after 10 seconds of sampling, + * and it is clients' responsibility to query within this time frame in + * order to avoid losing samples. + * @note With one exception, this queue contains last 10 seconds of samples + * with tracker poiniting to oldest entry and entry before tracker as the + * newest entry. Exception is when queue is not full (i.e. tracker is + * pointing to a zeroed out entry), in that case valid entries are between 0 + * and tracker. + * @note Clients can store tracker from previous query in order to provide + * samples since last read. + */ + NV_DECLARE_ALIGNED(NvP64 pSamples, 8); +} NV2080_CTRL_GPUMON_SAMPLES; + +/*! + * Enumeration of GPU monitoring sample types. + */ +#define NV2080_CTRL_GPUMON_SAMPLE_TYPE_PWR_MONITOR_STATUS 0x00000001 +#define NV2080_CTRL_GPUMON_SAMPLE_TYPE_PERFMON_UTIL 0x00000002 + +/*! + * Macro for invalid PID. + */ +#define NV2080_GPUMON_PID_INVALID ((NvU32)(~0)) diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h new file mode 100644 index 000000000..bb7993e3c --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h @@ -0,0 +1,1789 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gr.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "ctrl/ctrl0080/ctrl0080gr.h" /* 2080 is partially derivative of 0080 */ +/* + * NV2080_CTRL_GR_ROUTE_INFO + * + * This structure specifies the routing information used to + * disambiguate the target GR engine. + * + * flags + * This field decides how the route field is interpreted + * + * route + * This field has the data to identify target GR engine + * + */ +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE 1:0 +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_NONE 0x0U +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_ENGID 0x1U +#define NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_CHANNEL 0x2U + +#define NV2080_CTRL_GR_ROUTE_INFO_DATA_CHANNEL_HANDLE 31:0 +#define NV2080_CTRL_GR_ROUTE_INFO_DATA_ENGID 31:0 + +typedef NV0080_CTRL_GR_ROUTE_INFO NV2080_CTRL_GR_ROUTE_INFO; + +/* NV20_SUBDEVICE_XX gr control commands and parameters */ + +/* + * NV2080_CTRL_GR_INFO + * + * This structure represents a single 32bit gr engine value. Clients + * request a particular gr engine value by specifying a unique gr + * information index. + * + * Legal gr information index values are: + * NV2080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT + * This index is used to request the surface buffer alignment (in bytes) + * required by the associated subdevice. The return value is GPU + * implementation-dependent. + * NV2080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT + * This index is used to request the required swizzled surface alignment + * (in bytes) supported by the associated subdevice. The return value + * is GPU implementation-dependent. A return value of 0 indicates the GPU + * does not support swizzled surfaces. + * NV2080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE + * This index is used to request the vertex cache size (in entries) + * supported by the associated subdevice. The return value is GPU + * implementation-dependent. A value of 0 indicates the GPU does + * have a vertex cache. + * NV2080_CTRL_GR_INFO_INDEX_VPE_COUNT + * This index is used to request the number of VPE units supported by the + * associated subdevice. The return value is GPU implementation-dependent. + * A return value of 0 indicates the GPU does not contain VPE units. + * NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT + * This index is used to request the number of shader pipes supported by + * the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU does + * not contain dedicated shader units. + * For tesla: this value is the number of enabled TPCs + * NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT + * This index is used to request the number of sub units per + * shader pipes supported by the associated subdevice. The return + * value is GPU implementation-dependent. A return value of 0 indicates + * the GPU does not contain dedicated shader units. + * For tesla: this value is the number of enabled SMs (per TPC) + * NV2080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR + * This index is used to request the scaling factor for thread stack + * memory. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT + * This index is used to request the number of SM register banks supported. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT + * This index is used to request the number of registers per SM register + * bank. A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_SM_VERSION + * This index is used to determine the SM version. + * A value of 0 indicates the GPU does not support this function. + * Otherwise one of NV2080_CTRL_GR_INFO_SM_VERSION_*. + * NV2080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM + * This index is used to determine the maximum number of warps + * (thread groups) per SM. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP + * This index is used to determine the maximum number of threads + * in each warp (thread group). + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY + * This index is used to request the default fb memory read/write request + * size in bytes (typically based on the memory configuration/controller). + * Smaller memory requests are likely to take as long as a full one. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY + * This index is used to request the default host memory read/write request + * size in bytes (typically based on the memory configuration/controller). + * Smaller memory requests are likely to take as long as a full one. + * A value of 0 indicates the GPU does not support this function. + * NV2080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM + * This index is used to request the maximum number of streaming processors + * per SM. + * NV2080_CTRL_GR_INFO_INDEX_LITTER_* + * This index is used to query the various LITTER size information from + * the chip. + * NV2080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED + * This index is used to query whether the chip has timeslice mode enabled. + * NV2080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT + * This index is used to return the number of "GPU Cores" + * supported by the graphics pipeline + * NV2080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT + * This index is used to return the number of "Ray Tracing Cores" + * supported by the graphics pipeline + * NV2080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT + * This index is used to return the number of "Tensor Cores" + * supported by the graphics pipeline + */ +typedef NV0080_CTRL_GR_INFO NV2080_CTRL_GR_INFO; + +/* + * Valid GR info index values + * These indices are offset from supporting the 0080 version of this call + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAXCLIPS NV0080_CTRL_GR_INFO_INDEX_MAXCLIPS +#define NV2080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 NV0080_CTRL_GR_INFO_INDEX_MIN_ATTRS_BUG_261894 +#define NV2080_CTRL_GR_INFO_XBUF_MAX_PSETS_PER_BANK NV0080_CTRL_GR_INFO_XBUF_MAX_PSETS_PER_BANK +/** + * This index is used to request the surface buffer alignment (in bytes) + * required by the associated subdevice. The return value is GPU + * implementation-dependent. + */ +#define NV2080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT NV0080_CTRL_GR_INFO_INDEX_BUFFER_ALIGNMENT +#define NV2080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT NV0080_CTRL_GR_INFO_INDEX_SWIZZLE_ALIGNMENT +#define NV2080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE NV0080_CTRL_GR_INFO_INDEX_VERTEX_CACHE_SIZE +/** + * This index is used to request the number of VPE units supported by the + * associated subdevice. The return value is GPU implementation-dependent. + * A return value of 0 indicates the GPU does not contain VPE units. + */ +#define NV2080_CTRL_GR_INFO_INDEX_VPE_COUNT NV0080_CTRL_GR_INFO_INDEX_VPE_COUNT +/** + * This index is used to request the number of shader pipes supported by + * the associated subdevice. The return value is GPU + * implementation-dependent. A return value of 0 indicates the GPU does + * not contain dedicated shader units. + * For tesla: this value is the number of enabled TPCs + */ +#define NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_COUNT +/** + * This index is used to request the scaling factor for thread stack + * memory. + * A value of 0 indicates the GPU does not support this function. + */ +#define NV2080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR NV0080_CTRL_GR_INFO_INDEX_THREAD_STACK_SCALING_FACTOR +/** + * This index is used to request the number of sub units per + * shader pipes supported by the associated subdevice. The return + * value is GPU implementation-dependent. A return value of 0 indicates + * the GPU does not contain dedicated shader units. + * For tesla: this value is the number of enabled SMs (per TPC) + */ +#define NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT NV0080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT NV0080_CTRL_GR_INFO_INDEX_SM_REG_BANK_REG_COUNT +/** + * This index is used to determine the SM version. + * A value of 0 indicates the GPU does not support this function. + * Otherwise one of NV2080_CTRL_GR_INFO_SM_VERSION_*. + */ +#define NV2080_CTRL_GR_INFO_INDEX_SM_VERSION NV0080_CTRL_GR_INFO_INDEX_SM_VERSION +/** + * This index is used to determine the maximum number of warps + * (thread groups) per SM. + * A value of 0 indicates the GPU does not support this function. + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM NV0080_CTRL_GR_INFO_INDEX_MAX_WARPS_PER_SM +/** + * This index is used to determine the maximum number of threads + * in each warp (thread group). + * A value of 0 indicates the GPU does not support this function. + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP NV0080_CTRL_GR_INFO_INDEX_MAX_THREADS_PER_WARP +#define NV2080_CTRL_GR_INFO_INDEX_GEOM_GS_OBUF_ENTRIES NV0080_CTRL_GR_INFO_INDEX_GEOM_GS_OBUF_ENTRIES +#define NV2080_CTRL_GR_INFO_INDEX_GEOM_XBUF_ENTRIES NV0080_CTRL_GR_INFO_INDEX_GEOM_XBUF_ENTRIES +#define NV2080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY NV0080_CTRL_GR_INFO_INDEX_FB_MEMORY_REQUEST_GRANULARITY +#define NV2080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY NV0080_CTRL_GR_INFO_INDEX_HOST_MEMORY_REQUEST_GRANULARITY +#define NV2080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM NV0080_CTRL_GR_INFO_INDEX_MAX_SP_PER_SM +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_ZCULL_BANKS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ZCULL_BANKS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_MIN_FBPS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MIN_FBPS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_FBP_PORTS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_FBP_PORTS +#define NV2080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED NV0080_CTRL_GR_INFO_INDEX_TIMESLICE_ENABLED +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPAS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPAS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_PES_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_PES_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT NV0080_CTRL_GR_INFO_INDEX_GPU_CORE_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPCS_PER_PES NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPCS_PER_PES +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_HUB_PORTS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_MXBAR_HUB_PORTS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_SM_PER_TPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SM_PER_TPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_HSHUB_FBP_PORTS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_HSHUB_FBP_PORTS +/** + * This index is used to return the number of "Ray Tracing Cores" + * supported by the graphics pipeline + */ +#define NV2080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT NV0080_CTRL_GR_INFO_INDEX_RT_CORE_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT NV0080_CTRL_GR_INFO_INDEX_TENSOR_CORE_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GRS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GRS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_SLICES NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_SLICES +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCMMU_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCMMU_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_PER_FBP NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_LTC_PER_FBP +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_ROP_PER_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_ROP_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_FAMILY_MAX_TPC_PER_GPC NV0080_CTRL_GR_INFO_INDEX_FAMILY_MAX_TPC_PER_GPC +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPA_PER_FBP NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_FBPA_PER_FBP +#define NV2080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT NV0080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_MAX_LEGACY_SUBCONTEXT_COUNT NV0080_CTRL_GR_INFO_INDEX_MAX_LEGACY_SUBCONTEXT_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_MAX_PER_ENGINE_SUBCONTEXT_COUNT NV0080_CTRL_GR_INFO_INDEX_MAX_PER_ENGINE_SUBCONTEXT_COUNT +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_SINGLETON_GPCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_SINGLETON_GPCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_GPCS NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_GPCS +#define NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_TPCS_PER_GFXC_GPC NV0080_CTRL_GR_INFO_INDEX_LITTER_NUM_GFXC_TPCS_PER_GFXC_GPC + +/* When adding a new INDEX, please update INDEX_MAX and MAX_SIZE accordingly + * NOTE: 0080 functionality is merged with 2080 functionality, so this max size + * reflects that. + */ +#define NV2080_CTRL_GR_INFO_INDEX_MAX NV0080_CTRL_GR_INFO_INDEX_MAX +#define NV2080_CTRL_GR_INFO_MAX_SIZE NV0080_CTRL_GR_INFO_MAX_SIZE + +/* valid SM version return values */ + +#define NV2080_CTRL_GR_INFO_SM_VERSION_NONE (0x00000000U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_05 (0x00000105U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_1 (0x00000110U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_2 (0x00000120U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_3 (0x00000130U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_4 (0x00000140U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_1_5 (0x00000150U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_2_0 (0x00000200U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_2_1 (0x00000210U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_2_2 (0x00000220U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_0 (0x00000300U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_1 (0x00000310U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_2 (0x00000320U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_3 (0x00000330U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_5 (0x00000350U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_6 (0x00000360U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_8 (0x00000380U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_3_9 (0x00000390U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_4_0 (0x00000400U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_0 (0x00000500U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_02 (0x00000502U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_03 (0x00000503U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_0 (0x00000600U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_01 (0x00000601U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_02 (0x00000602U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_0 (0x00000700U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_01 (0x00000701U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_02 (0x00000702U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_03 (0x00000703U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_05 (0x00000705U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_02 (0x00000802U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_06 (0x00000806U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_07 (0x00000807U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_08 (0x00000808U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_09 (0x00000809U) +#define NV2080_CTRL_GR_INFO_SM_VERSION_9_00 (0x00000900U) + +/* compatibility SM versions to match the official names in the ISA (e.g., SM5.2) */ +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_2 (NV2080_CTRL_GR_INFO_SM_VERSION_5_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_5_3 (NV2080_CTRL_GR_INFO_SM_VERSION_5_03) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_1 (NV2080_CTRL_GR_INFO_SM_VERSION_6_01) +#define NV2080_CTRL_GR_INFO_SM_VERSION_6_2 (NV2080_CTRL_GR_INFO_SM_VERSION_6_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_1 (NV2080_CTRL_GR_INFO_SM_VERSION_7_01) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_2 (NV2080_CTRL_GR_INFO_SM_VERSION_7_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_3 (NV2080_CTRL_GR_INFO_SM_VERSION_7_03) +#define NV2080_CTRL_GR_INFO_SM_VERSION_7_5 (NV2080_CTRL_GR_INFO_SM_VERSION_7_05) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_2 (NV2080_CTRL_GR_INFO_SM_VERSION_8_02) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_6 (NV2080_CTRL_GR_INFO_SM_VERSION_8_06) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_7 (NV2080_CTRL_GR_INFO_SM_VERSION_8_07) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_8 (NV2080_CTRL_GR_INFO_SM_VERSION_8_08) +#define NV2080_CTRL_GR_INFO_SM_VERSION_8_9 (NV2080_CTRL_GR_INFO_SM_VERSION_8_09) +#define NV2080_CTRL_GR_INFO_SM_VERSION_9_0 (NV2080_CTRL_GR_INFO_SM_VERSION_9_00) + + +/** + * NV2080_CTRL_CMD_GR_GET_INFO + * + * This command returns gr engine information for the associated GPU. + * Requests to retrieve gr information use a list of one or more + * NV2080_CTRL_GR_INFO structures. + * + * grInfoListSize + * This field specifies the number of entries on the caller's + * grInfoList. + * grInfoList + * This field specifies a pointer in the caller's address space + * to the buffer into which the gr information is to be returned. + * This buffer must be at least as big as grInfoListSize multiplied + * by the size of the NV2080_CTRL_GR_INFO structure. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. When MIG is enabled, this + * is a mandatory parameter. + */ +#define NV2080_CTRL_CMD_GR_GET_INFO (0x20801201U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GR_GET_INFO_PARAMS { + NvU32 grInfoListSize; + NV_DECLARE_ALIGNED(NvP64 grInfoList, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_CTXSW_ZCULL_MODE + * + * This command is used to set the zcull context switch mode for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support zcull context switch mode changes. + * + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have it's zcull context switch mode changed. + * hShareClient + * Support for sharing zcull buffers across RM clients is no longer + * supported. To maintain API compatibility, this field must match + * the hClient used in the control call. + * hShareChannel + * This parameter specifies the channel handle of + * the channel with which the zcull context buffer is to be shared. This + * parameter is valid when zcullMode is set to SEPARATE_BUFFER. This + * parameter should be set to the same value as hChannel if no + * sharing is intended. + * zcullMode + * This parameter specifies the new zcull context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_GLOBAL + * This mode is the normal zcull operation where it is not + * context switched and there is one set of globally shared + * zcull memory and tables. This mode is only supported as + * long as all channels use this mode. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_NO_CTXSW + * This mode causes the zcull tables to be reset on a context + * switch, but the zcull buffer will not be saved/restored. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_SEPARATE_BUFFER + * This mode will cause the zcull buffers and tables to be + * saved/restored on context switches. If a share channel + * ID is given (shareChID), then the 2 channels will share + * the zcull context buffers. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_ZCULL_MODE (0x20801205U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x5" */ + +typedef struct NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS { + NvHandle hChannel; + NvHandle hShareClient; + NvHandle hShareChannel; + NvU32 zcullMode; +} NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS; +/* valid zcullMode values */ +#define NV2080_CTRL_CTXSW_ZCULL_MODE_GLOBAL (0x00000000U) +#define NV2080_CTRL_CTXSW_ZCULL_MODE_NO_CTXSW (0x00000001U) +#define NV2080_CTRL_CTXSW_ZCULL_MODE_SEPARATE_BUFFER (0x00000002U) + +/** + * NV2080_CTRL_CMD_GR_GET_ZCULL_INFO + * + * This command is used to query the RM for zcull information that the + * driver will need to allocate and manage the zcull regions. + * + * widthAlignPixels + * This parameter returns the width alignment restrictions in pixels + * used to adjust a surface for proper aliquot coverage (typically + * #TPC's * 16). + * + * heightAlignPixels + * This parameter returns the height alignment restrictions in pixels + * used to adjust a surface for proper aliquot coverage (typically 32). + * + * pixelSquaresByAliquots + * This parameter returns the pixel area covered by an aliquot + * (typically #Zcull_banks * 16 * 16). + * + * aliquotTotal + * This parameter returns the total aliquot pool available in HW. + * + * zcullRegionByteMultiplier + * This parameter returns multiplier used to convert aliquots in a region + * to the number of bytes required to save/restore them. + * + * zcullRegionHeaderSize + * This parameter returns the region header size which is required to be + * allocated and accounted for in any save/restore operation on a region. + * + * zcullSubregionHeaderSize + * This parameter returns the subregion header size which is required to be + * allocated and accounted for in any save/restore operation on a region. + * + * subregionCount + * This parameter returns the subregion count. + * + * subregionWidthAlignPixels + * This parameter returns the subregion width alignment restrictions in + * pixels used to adjust a surface for proper aliquot coverage + * (typically #TPC's * 16). + * + * subregionHeightAlignPixels + * This parameter returns the subregion height alignment restrictions in + * pixels used to adjust a surface for proper aliquot coverage + * (typically 62). + * + * The callee should compute the size of a zcull region as follows. + * (numBytes = aliquots * zcullRegionByteMultiplier + + * zcullRegionHeaderSize + zcullSubregionHeaderSize) + */ +#define NV2080_CTRL_CMD_GR_GET_ZCULL_INFO (0x20801206U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS_SUBREGION_SUPPORTED +#define NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS { + NvU32 widthAlignPixels; + NvU32 heightAlignPixels; + NvU32 pixelSquaresByAliquots; + NvU32 aliquotTotal; + NvU32 zcullRegionByteMultiplier; + NvU32 zcullRegionHeaderSize; + NvU32 zcullSubregionHeaderSize; + NvU32 subregionCount; + NvU32 subregionWidthAlignPixels; + NvU32 subregionHeightAlignPixels; +} NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_CTXSW_PM_MODE + * + * This command is used to set the pm context switch mode for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support pm context switch mode changes. + * + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its pm context switch mode changed. + * pmMode + * This parameter specifies the new pm context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_CTXSW_PM_MODE_NO_CTXSW + * This mode says that the pms are not to be context switched. + * NV2080_CTRL_CTXSW_PM_MODE_CTXSW + * This mode says that the pms in Mode-B are to be context switched. + * NV2080_CTRL_CTXSW_PM_MODE_STREAM_OUT_CTXSW + * This mode says that the pms in Mode-E (stream out) are to be context switched. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_PM_MODE (0x20801207U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x7" */ + +typedef struct NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS { + NvHandle hChannel; + NvU32 pmMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS; + +/* valid pmMode values */ +#define NV2080_CTRL_CTXSW_PM_MODE_NO_CTXSW (0x00000000U) +#define NV2080_CTRL_CTXSW_PM_MODE_CTXSW (0x00000001U) +#define NV2080_CTRL_CTXSW_PM_MODE_STREAM_OUT_CTXSW (0x00000002U) + +/* + * NV2080_CTRL_CMD_GR_CTXSW_ZCULL_BIND + * + * This command is used to set the zcull context switch mode and virtual address + * for the specified channel. A value of NV_ERR_NOT_SUPPORTED is + * returned if the target channel does not support zcull context switch mode + * changes. + * + * hClient + * This parameter specifies the client handle of + * that owns the zcull context buffer. + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its zcull context switch mode changed. + * vMemPtr + * This parameter specifies the 64 bit virtual address + * for the allocated zcull context buffer. + * zcullMode + * This parameter specifies the new zcull context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_GLOBAL + * This mode is the normal zcull operation where it is not + * context switched and there is one set of globally shared + * zcull memory and tables. This mode is only supported as + * long as all channels use this mode. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_NO_CTXSW + * This mode causes the zcull tables to be reset on a context + * switch, but the zcull buffer will not be saved/restored. + * NV2080_CTRL_GR_SET_CTXSW_ZCULL_MODE_SEPARATE_BUFFER + * This mode will cause the zcull buffers and tables to be + * saved/restored on context switches. If a share channel + * ID is given (shareChID), then the 2 channels will share + * the zcull context buffers. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_ZCULL_BIND (0x20801208U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x8" */ + +typedef struct NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS { + NvHandle hClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 vMemPtr, 8); + NvU32 zcullMode; +} NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS; +/* valid zcullMode values same as above NV2080_CTRL_CTXSW_ZCULL_MODE */ + +/* + * NV2080_CTRL_CMD_GR_CTXSW_PM_BIND + * + * This command is used to set the PM context switch mode and virtual address + * for the specified channel. A value of NV_ERR_NOT_SUPPORTED is + * returned if the target channel does not support PM context switch mode + * changes. + * + * hClient + * This parameter specifies the client handle of + * that owns the PM context buffer. + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its PM context switch mode changed. + * vMemPtr + * This parameter specifies the 64 bit virtual address + * for the allocated PM context buffer. + * pmMode + * This parameter specifies the new PM context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_PM_MODE_NO_CTXSW + * This mode says that the pms are not to be context switched + * NV2080_CTRL_GR_SET_CTXSW_PM_MODE_CTXSW + * This mode says that the pms are to be context switched + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_PM_BIND (0x20801209U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x9" */ + +typedef struct NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS { + NvHandle hClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 vMemPtr, 8); + NvU32 pmMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS; +/* valid pmMode values same as above NV2080_CTRL_CTXSW_PM_MODE */ + +/* + * NV2080_CTRL_CMD_GR_SET_GPC_TILE_MAP + * + * Send a list of values used to describe GPC/TPC tile mapping tables. + * + * mapValueCount + * This field specifies the number of actual map entries. This count + * should equal the number of TPCs in the system. + * mapValues + * This field is a pointer to a buffer of NvU08 values representing map + * data. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_GR_SET_GPC_TILE_MAP_MAX_VALUES 128U +#define NV2080_CTRL_CMD_GR_SET_GPC_TILE_MAP (0x2080120aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0xA" */ + +typedef struct NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS { + NvU32 mapValueCount; + NvU8 mapValues[NV2080_CTRL_GR_SET_GPC_TILE_MAP_MAX_VALUES]; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_CTXSW_SMPC_MODE + * + * This command is used to set the SMPC context switch mode for the specified + * channel or channel group (TSG). A value of NV_ERR_NOT_SUPPORTED + * is returned if the target channel/TSG does not support SMPC context switch + * mode changes. If a channel is part of a TSG, the user must send in the TSG + * handle and not an individual channel handle, an error will be returned if a + * channel handle is used in this case. + * + * SMPC = SM Performance Counters + * + * hChannel + * This parameter specifies the channel or channel group (TSG) handle + * that is to have its SMPC context switch mode changed. + * If this parameter is set to 0, then the mode below applies to all current + * and future channels (i.e. we will be enabling/disabling global mode) + * smpcMode + * This parameter specifies the new SMPC context switch mode. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_CTXSW_SMPC_MODE_NO_CTXSW + * This mode says that the SMPC data is not to be context switched. + * NV2080_CTRL_GR_SET_CTXSW_SMPC_MODE_CTXSW + * This mode says that the SMPC data is to be context switched. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_SMPC_MODE (0x2080120eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0xE" */ + +typedef struct NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS { + NvHandle hChannel; + NvU32 smpcMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS; + +/* valid smpcMode values */ +#define NV2080_CTRL_CTXSW_SMPC_MODE_NO_CTXSW (0x00000000U) +#define NV2080_CTRL_CTXSW_SMPC_MODE_CTXSW (0x00000001U) + +/* + * NV2080_CTRL_CMD_GR_GET_SM_TO_GPC_TPC_MAPPINGS + * + * This command returns an array of the mappings between SMs and GPC/TPCs. + * + * smId + * An array of the mappings between SMs and GPC/TPCs. + * smCount + * Returns the number of valid mappings in the array. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_GET_SM_TO_GPC_TPC_MAPPINGS (0x2080120fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_MAX_SM_COUNT 144U +#define NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS { + struct { + NvU32 gpcId; + NvU32 tpcId; + } smId[NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_MAX_SM_COUNT]; + NvU32 smCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE + * + * This command is used to set the preemption context switch mode for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support preemption context switch mode changes. + * + * flags + * This field specifies flags for the preemption mode changes. + * These flags can tell callee which mode is valid in the call + * since we handle graphics and/or compute + * hChannel + * This parameter specifies the channel handle of the channel + * that is to have it's preemption context switch mode set. + * gfxpPreemptMode + * This parameter specifies the new Graphics preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP + * This mode causes the graphics engine to allow preempting the + * channel mid-triangle. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP_POOL + * This mode causes the graphics engine to use a shared pool of buffers + * to support GfxP with lower memory overhead + * cilpPreemptMode + * This parameter specifies the new Compute preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CTA + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CILP + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE (0x20801210U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x10" */ + +typedef struct NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS { + NvU32 flags; + NvHandle hChannel; + NvU32 gfxpPreemptMode; + NvU32 cilpPreemptMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS; + +/* valid preemption flags */ +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_CILP 0:0 +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_CILP_IGNORE (0x00000000U) +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_CILP_SET (0x00000001U) +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_GFXP 1:1 +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_GFXP_IGNORE (0x00000000U) +#define NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_FLAGS_GFXP_SET (0x00000001U) + +/* valid Graphics mode values */ +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_WFI (0x00000000U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP (0x00000001U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_GFX_GFXP_POOL (0x00000002U) + +/* valid Compute mode values */ +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_WFI (0x00000000U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CTA (0x00000001U) +#define NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE_COMPUTE_CILP (0x00000002U) + +/* valid preemption buffers */ +typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS { + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7, + NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8, +} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS; + +/* + * NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND + * + * This command is used to set the preemption context switch mode and virtual + * addresses of the preemption buffers for the specified channel. A value of + * NV_ERR_NOT_SUPPORTED is returned if the target channel does not + * support preemption context switch mode changes. + * + * flags + * This field specifies flags for the preemption mode changes. + * These flags can tell callee which mode is valid in the call + * since we handle graphics and/or compute + * hClient + * This parameter specifies the client handle of + * that owns the preemption context buffer. + * hChannel + * This parameter specifies the channel handle of the channel + * that is to have its preemption context switch mode set. + * vMemPtr + * This parameter specifies the 64 bit virtual address + * for the allocated preemption context buffer. + * gfxpPreemptMode + * This parameter specifies the new Graphics preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_GFX_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_GFX_GFXP + * This mode causes the graphics engine to allow preempting the + * channel mid-triangle. + * cilpPreemptMode + * This parameter specifies the new Compute preemption context switch + * mode. Legal values for this parameter include: + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_COMPUTE_WFI + * This mode is the normal wait-for-idle context switch mode. + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_COMPUTE_CTA + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * NV2080_CTRL_CTXSW_PREEMPTION_MODE_COMPUTE_CILP + * This mode causes the compute engine to allow preempting the channel + * at the instruction level. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND (0x20801211U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x11" */ + +typedef struct NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS { + NvU32 flags; + NvHandle hClient; + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 vMemPtrs[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END], 8); + NvU32 gfxpPreemptMode; + NvU32 cilpPreemptMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS; +/* valid mode and flag values same as above NV2080_CTRL_SET_CTXSW_PREEMPTION_MODE */ + +/* + * NV2080_CTRL_CMD_GR_PC_SAMPLING_MODE + * + * This command is used to apply the WAR for PC sampling to avoid hang in + * multi-ctx scenario. + * + * hChannel + * This parameter specifies the channel or channel group (TSG) handle + * that is to have its PC Sampling mode changed. + * samplingMode + * This parameter specifies whether sampling is turned ON or OFF. + * Legal values for this parameter include: + * NV2080_CTRL_GR_SET_PC_SAMPLING_MODE_DISABLED + * This mode says that PC sampling is disabled for current context. + * NV2080_CTRL_GR_SET_PC_SAMPLING_MODE_ENABLED + * This mode says that PC sampling is disabled for current context. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_PC_SAMPLING_MODE (0x20801212U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x12" */ + +typedef struct NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS { + NvHandle hChannel; + NvU32 samplingMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS; + +/* valid samplingMode values */ +#define NV2080_CTRL_PC_SAMPLING_MODE_DISABLED (0x00000000U) +#define NV2080_CTRL_PC_SAMPLING_MODE_ENABLED (0x00000001U) + +/* + * NV2080_CTRL_CMD_GR_GET_ROP_INFO + * + * Gets information about ROPs including the ROP unit count and information + * about ROP operations per clock. + * + * ropUnitCount + * The count of active ROP units. + * ropOperationsFactor. + * The number of ROP operations per clock for a single ROP unit. + * ropOperationsCount + * The number of ROP operations per clock across all active ROP units. + */ +#define NV2080_CTRL_CMD_GR_GET_ROP_INFO (0x20801213U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ROP_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ROP_INFO_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_GR_GET_ROP_INFO_PARAMS { + NvU32 ropUnitCount; + NvU32 ropOperationsFactor; + NvU32 ropOperationsCount; +} NV2080_CTRL_GR_GET_ROP_INFO_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_CTXSW_STATS + * + * This command is used to get the context switch statistics. The user can + * also add a flag to tell RM to reset the stats counters back to 0. + * + * hChannel + * This parameter specifies the channel or channel group (TSG) handle + * that is to have the stats returned. Note, must be the TSG handle if + * channel is part of a TSG. + * flags + * This parameter specifies processing flags. See possible flags below. + * saveCnt + * This parameter returns the number of saves on the channel. + * restoreCnt + * This parameter returns the number of restores on the channel. + * wfiSaveCnt + * This parameter returns the number of WFI saves on the channel. + * ctaSaveCnt + * This parameter returns the number of CTA saves on the channel. + * cilpSaveCnt + * This parameter returns the number of CILP saves on the channel. + * gfxpSaveCnt + * This parameter returns the number of GfxP saves on the channel. + */ +#define NV2080_CTRL_CMD_GR_GET_CTXSW_STATS (0x20801215U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x15" */ + +typedef struct NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS { + NvHandle hChannel; + NvU32 flags; + NvU32 saveCnt; + NvU32 restoreCnt; + NvU32 wfiSaveCnt; + NvU32 ctaSaveCnt; + NvU32 cilpSaveCnt; + NvU32 gfxpSaveCnt; +} NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS; +/* valid GET_CTXSW_STATS flags settings */ +#define NV2080_CTRL_GR_GET_CTXSW_STATS_FLAGS_RESET 0:0 +#define NV2080_CTRL_GR_GET_CTXSW_STATS_FLAGS_RESET_FALSE (0x00000000U) +#define NV2080_CTRL_GR_GET_CTXSW_STATS_FLAGS_RESET_TRUE (0x00000001U) + + + +/* + * NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_SIZE + * + * This command provides the size, alignment of all context buffers including global and + * local context buffers which has been created & will be mapped on a context + * + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * totalBufferSize [OUT] + * This parameter returns the total context buffers size. + */ +#define NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_SIZE (0x20801218U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 totalBufferSize, 8); +} NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS; + +/* + * NV2080_CTRL_GR_CTX_BUFFER_INFO + * alignment + * Specifies the alignment requirement for each context buffer + * size + * Aligned size of context buffer + * bufferHandle [deprecated] + * Opaque pointer to memdesc. Used by kernel clients for tracking purpose only. + * pageCount + * allocation size in the form of pageCount + * physAddr + * Physical address of the buffer first page + * bufferType + * NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID type of this buffer + * aperture + * allocation aperture. Could be SYSMEM, VIDMEM, UNKNOWN + * kind + * PTE kind of this allocation. + * pageSize + * Page size of the buffer. + * bIsContigous + * States if physical allocation for this buffer is contiguous. PageSize will + * have no meaning if this flag is set. + * bGlobalBuffer + * States if a defined buffer is global as global buffers need to be mapped + * only once in TSG. + * bLocalBuffer + * States if a buffer is local to a channel. + * bDeviceDescendant + * TRUE if the allocation is a constructed under a Device or Subdevice. + * uuid + * SHA1 UUID of the Device or Subdevice. Valid when deviceDescendant is TRUE. + */ +typedef struct NV2080_CTRL_GR_CTX_BUFFER_INFO { + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvP64 bufferHandle, 8); + NV_DECLARE_ALIGNED(NvU64 pageCount, 8); + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); + NvU32 bufferType; + NvU32 aperture; + NvU32 kind; + NvU32 pageSize; + NvBool bIsContigous; + NvBool bGlobalBuffer; + NvBool bLocalBuffer; + NvBool bDeviceDescendant; + NvU8 uuid[16]; +} NV2080_CTRL_GR_CTX_BUFFER_INFO; +typedef struct NV2080_CTRL_GR_CTX_BUFFER_INFO *PNV2080_CTRL_GR_CTX_BUFFER_INFO; + +#define NV2080_CTRL_GR_MAX_CTX_BUFFER_COUNT 64U + +/* + * NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_INFO + * + * This command provides the size, alignment of all context buffers including global and + * local context buffers which has been created & will be mapped on a context. + * If the client invoking the command is a kernel client, the buffers are retained. + * + * hUserClient [IN] + * This parameter specifies the client handle that owns this channel. + * hChannel [IN] + * This parameter specifies the channel or channel group (TSG) handle + * bufferCount [OUT] + * This parameter specifies the number of entries in ctxBufferInfo filled + * by the command. + * ctxBufferInfo [OUT] + * Array of context buffer info containing alignment, size etc. + */ +#define NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_INFO (0x20801219U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS { + NvHandle hUserClient; + NvHandle hChannel; + NvU32 bufferCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_CTX_BUFFER_INFO ctxBufferInfo[NV2080_CTRL_GR_MAX_CTX_BUFFER_COUNT], 8); +} NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS; + +// Aperture flags +#define NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_UNKNWON ADDR_UNKNOWN +#define NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_SYSMEM ADDR_SYSMEM +#define NV2080_CTRL_GR_CTX_BUFFER_INFO_APERTURE_FBMEM ADDR_FBMEM + +/* + * NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER + * This command returns the global logical ordering of SM w.r.t GPCs/TPCs. + * + * NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS + * This structure holds the TPC/SM ordering info. + * + * gpcId + * Logical GPC Id. + * This is the ordering of enabled GPCs post floor sweeping. + * The GPCs are numbered from 0 to N-1, where N is the enabled GPC count. + * + * localTpcId + * Local Logical TPC Id. + * This is the ordering of enabled TPCs within a GPC post floor sweeping. + * This ID is used in conjunction with the gpcId. + * The TPCs are numbered from 0 to N-1, where N is the enabled TPC count for the given GPC. + * + * localSmId + * Local Logical SM Id. + * This is the ordering of enabled SMs within a TPC post floor sweeping. + * This ID is used in conjunction with the localTpcId. + * The SMs are numbered from 0 to N-1, where N is the enabled SM count for the given TPC. + * + * globalTpcId + * Global Logical TPC Id. + * This is the ordering of all enabled TPCs in the GPU post floor sweeping. + * The TPCs are numbered from 0 to N-1, where N is the enabled TPC count across all GPCs + * + * globalSmId + * Global Logical SM Id array. + * This is the global ordering of all enabled SMs in the GPU post floor sweeping. + * The SMs are numbered from 0 to N-1, where N is the enabled SM count across all GPCs. + * + * virtualGpcId + * Virtual GPC Id. + * This is the ordering of enabled GPCs post floor sweeping (ordered in increasing + * number of TPC counts) The GPCs are numbered from 0 to N-1, where N is the + * enabled GPC count and 8-23 for singleton TPC holders. + * + * migratableTpcId + * Migratable TPC Id. + * This is the same as the Local Tpc Id for virtual GPC 0-8 (true physical gpcs) and 0 for + * virtual gpcs 8-23 that represent singleton tpcs. + * + * numSm + * Enabled SM count across all GPCs. + * This represent the valid entries in the globalSmId array + * + * numTpc + * Enabled TPC count across all GPCs. + * + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + */ +#define NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER (0x2080121bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER_MAX_SM_COUNT 512U + +#define NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS_MESSAGE_ID (0x1BU) + +typedef struct NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS { + struct { + NvU16 gpcId; + NvU16 localTpcId; + NvU16 localSmId; + NvU16 globalTpcId; + NvU16 virtualGpcId; + NvU16 migratableTpcId; + } globalSmId[NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER_MAX_SM_COUNT]; + + NvU16 numSm; + NvU16 numTpc; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS; + +/* +* NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL +* +* This command gives current resident channel on GR engine +* +* chID [OUT] +* RM returns current resident channel on GR engine +* grRouteInfo [IN] +* This parameter specifies the routing information used to +* disambiguate the target GR engine. +*/ +#define NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL (0x2080121cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x1C" */ + +typedef struct NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS { + NvU32 chID; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_VAT_ALARM_DATA + * + * This command provides the _VAT_ALARM data i.e. error and warning, counter and + * timestamps along with max GPC and TPC per GPC count. + * + * smVatAlarm [OUT] + * VAT Alarm data array per SM containing per GPC per TPC, counter and + * timestamp values for error and warning alarms. + * maxGpcCount [OUT] + * This parameter returns max GPC count. + * maxTpcPerGpcCount [OUT] + * This parameter returns the max TPC per GPC count. + */ +#define NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_GPC_COUNT 10U +#define NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_TPC_PER_GPC_COUNT 10U + +#define NV2080_CTRL_CMD_GR_GET_VAT_ALARM_DATA (0x2080121dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x1D" */ + +typedef struct NV2080_CTRL_GR_VAT_ALARM_DATA_PER_TPC { + NV_DECLARE_ALIGNED(NvU64 errorCounter, 8); + NV_DECLARE_ALIGNED(NvU64 errorTimestamp, 8); + NV_DECLARE_ALIGNED(NvU64 warningCounter, 8); + NV_DECLARE_ALIGNED(NvU64 warningTimestamp, 8); +} NV2080_CTRL_GR_VAT_ALARM_DATA_PER_TPC; + +typedef struct NV2080_CTRL_GR_VAT_ALARM_DATA_PER_GPC { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_VAT_ALARM_DATA_PER_TPC tpc[NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_TPC_PER_GPC_COUNT], 8); +} NV2080_CTRL_GR_VAT_ALARM_DATA_PER_GPC; + +typedef struct NV2080_CTRL_GR_VAT_ALARM_DATA { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_VAT_ALARM_DATA_PER_GPC gpc[NV2080_CTRL_CMD_GR_GET_VAT_ALARM_MAX_GPC_COUNT], 8); +} NV2080_CTRL_GR_VAT_ALARM_DATA; + +typedef struct NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_VAT_ALARM_DATA smVatAlarm, 8); + NvU32 maxGpcCount; + NvU32 maxTpcPerGpcCount; +} NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS; +typedef struct NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS *PNV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_ATTRIBUTE_BUFFER_SIZE + * + * This command provides the size of GR attribute buffer. + * + * attribBufferSize [OUT] + * This parameter returns the attribute buffer size. + */ +#define NV2080_CTRL_CMD_GR_GET_ATTRIBUTE_BUFFER_SIZE (0x2080121eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x1EU) + +typedef struct NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS { + NvU32 attribBufferSize; +} NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_QUERY_SIZE + * + * This API queries size parameters for a request maximum graphics preemption + * pool size. It is only available to kernel callers + * + * NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS + * struct to return the size parameters + * + * maxSlots + * Input specifying the maximum number of slots, RM will calculate the output + * parameters based on this. Must be non-zero + * ctrlStructSize + * Output indicating the required size in bytes of the control structure to + * support a pool of maxSlots size. + * ctrlStructAlign + * Output indicating the required alignment of the control structure + * poolSize + * Output indicating the required size in bytes of the GfxP Pool. + * poolAlign + * Output indicating the required alignment of the GfxP Pool + * slotStride + * The number of bytes in each slot, i * slotStride gives the offset from the + * base of the pool to a given slot + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_QUERY_SIZE (0x2080121fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x1F" */ + +typedef struct NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS { + NvU32 maxSlots; + NvU32 slotStride; + NV_DECLARE_ALIGNED(NvU64 ctrlStructSize, 8); + NV_DECLARE_ALIGNED(NvU64 ctrlStructAlign, 8); + NV_DECLARE_ALIGNED(NvU64 poolSize, 8); + NV_DECLARE_ALIGNED(NvU64 poolAlign, 8); +} NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_INITIALIZE + * + * This API takes a CPU pointer to a GFxP Pool Control Structure and does the + * required onetime initialization. It should be called once and only once + * before a pool is used. It is only accessible to kernel callers. + * + * NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS + * struct to hand in the required info to RM + * + * pControlStructure + * This input is the kernel CPU pointer to the control structure. + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_INITIALIZE (0x20801220U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x20" */ + +typedef struct NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pControlStructure, 8); + NvU32 maxSlots; +} NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS; + +#define NV2080_CTRL_GR_GFX_POOL_MAX_SLOTS 64U + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_ADD_SLOTS + * + * This API adds a list of buffer slots to a given control structure. It can + * only be called when no channel using the given pool is running or may become + * running for the duration of this call. If more slots are added than there + * is room for in the control structure the behavior is undefined. It is only + * accessible to kernel callers. + * + * NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS + * + * pControlStructure + * This input is the kernel CPU pointer to the control structure + * numSlots + * This input indicates how many slots are being added and are contained in the slots parameter + * slots + * This input contains an array of the slots to be added to the control structure + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_ADD_SLOTS (0x20801221U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x21" */ + +typedef struct NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pControlStructure, 8); + NvU32 numSlots; + NvU32 slots[NV2080_CTRL_GR_GFX_POOL_MAX_SLOTS]; +} NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GFX_POOL_REMOVE_SLOTS + * + * This API removes buffer slots from a given control structure. It can + * only be called when no channel using the given pool is running or may become + * running for the duration of this call. It can operate in two modes, either + * it will a specified number of slots, or a specified list of slots. + * + * It is only accessible to kernel callers. + * + * NV2080_CTRL_CMD_GR_GFX_POOL_REMOVE_SLOTS_PARAMS + * + * pControlStructure + * This input is the kernel CPU pointer to the control structure + * numSlots + * This input indicates how many slots are being removed. if + * bRemoveSpecificSlots is true, then it also indicates how many entries in + * the slots array are populated. + * slots + * This array is either an input or output. If bRemoveSpecificSlots is true, + * then this will contain the list of slots to remove. If it is false, then + * it will be populated by RM with the indexes of the slots that were + * removed. + * bRemoveSpecificSlots + * This input determines which mode the call will run in. If true the caller + * will specify the list of slots they want removed, if any of those slots + * are not on the freelist, the call will fail. If false they only specify + * the number of slots they want removed and RM will pick up to that + * many. If there are not enough slots on the freelist to remove the + * requested amount, RM will return the number it was able to remove. + */ +#define NV2080_CTRL_CMD_GR_GFX_POOL_REMOVE_SLOTS (0x20801222U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x22" */ + +typedef struct NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pControlStructure, 8); + NvU32 numSlots; + NvU32 slots[NV2080_CTRL_GR_GFX_POOL_MAX_SLOTS]; + NvBool bRemoveSpecificSlots; +} NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS; + + + +#define NV2080_CTRL_CMD_GR_GET_CAPS_V2 (0x20801227U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x27U) + +typedef NV0080_CTRL_GR_GET_CAPS_V2_PARAMS NV2080_CTRL_GR_GET_CAPS_V2_PARAMS; + +#define NV2080_CTRL_CMD_GR_GET_INFO_V2 (0x20801228U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_INFO_V2_PARAMS_MESSAGE_ID (0x28U) + +typedef NV0080_CTRL_GR_GET_INFO_V2_PARAMS NV2080_CTRL_GR_GET_INFO_V2_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_GPC_MASK + * + * This command returns a mask of enabled GPCs for the associated subdevice. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * gpcMask[OUT] + * This parameter returns a mask of enabled GPCs. Each GPC has an ID + * that's equivalent to the corresponding bit position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_GPC_MASK (0x2080122aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_GPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_GPC_MASK_PARAMS_MESSAGE_ID (0x2AU) + +typedef struct NV2080_CTRL_GR_GET_GPC_MASK_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 gpcMask; +} NV2080_CTRL_GR_GET_GPC_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_TPC_MASK + * + * This command returns a mask of enabled TPCs for a specified GPC. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * gpcId[IN] + * This parameter specifies the GPC for which TPC information is + * to be retrieved. If the GPC with this ID is not enabled this command + * will return an tpcMask value of zero. + * + * tpcMask[OUT] + * This parameter returns a mask of enabled TPCs for the specified GPC. + * Each TPC has an ID that's equivalent to the corresponding bit + * position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_TPC_MASK (0x2080122bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_TPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_TPC_MASK_PARAMS_MESSAGE_ID (0x2BU) + +typedef struct NV2080_CTRL_GR_GET_TPC_MASK_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 gpcId; + NvU32 tpcMask; +} NV2080_CTRL_GR_GET_TPC_MASK_PARAMS; + +#define NV2080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE (0x2080122cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x2C" */ + +typedef NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV2080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS_MESSAGE_ID (0x2CU) + +typedef struct NV2080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS params, 8); +} NV2080_CTRL_CMD_GR_SET_TPC_PARTITION_MODE_FINN_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_ENGINE_CONTEXT_PROPERTIES + * + * This command is used to provide the caller with the alignment and size + * of the context save region for an engine + * + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * engineId + * This parameter is an input parameter specifying the engineId for which + * the alignment/size is requested. + * alignment + * This parameter is an output parameter which will be filled in with the + * minimum alignment requirement. + * size + * This parameter is an output parameter which will be filled in with the + * minimum size of the context save region for the engine. + * bInfoPopulated + * This parameter will be set if alignment and size are already set with + * valid values from a previous call. + */ + +#define NV2080_CTRL_CMD_GR_GET_ENGINE_CONTEXT_PROPERTIES (0x2080122dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS_MESSAGE_ID (0x2DU) + +typedef struct NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 engineId; + NvU32 alignment; + NvU32 size; + NvBool bInfoPopulated; +} NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER + * + * This command provides an interface to retrieve the speed select values of + * various instruction types. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * imla0[OUT] + * The current speed select for IMLA0. + * + * fmla16[OUT] + * The current speed select for FMLA16. + * + * dp[OUT] + * The current speed select for DP. + * + * fmla32[OUT] + * The current speed select for FMLA32. + * + * ffma[OUT] + * The current speed select for FFMA. + * + * imla1[OUT] + * The current speed select for IMLA1. + * + * imla2[OUT] + * The current speed select for IMLA2. + * + * imla3[OUT] + * The current speed select for IMLA3. + * + * imla4[OUT] + * The current speed select for IMLA4. + */ +#define NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER (0x20801230U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA0_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA16_REDUCED_SPEED_1_32 (0x5U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_DP_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_DP_REDUCED_SPEED (0x1U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FMLA32_REDUCED_SPEED_1_32 (0x5U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_FFMA_REDUCED_SPEED_1_32 (0x5U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA1_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA2_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA3_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_FULL_SPEED (0x0U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_2 (0x1U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_4 (0x2U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_8 (0x3U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_16 (0x4U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_32 (0x5U) +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_IMLA4_REDUCED_SPEED_1_64 (0x6U) + +#define NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU8 imla0; + NvU8 fmla16; + NvU8 dp; + NvU8 fmla32; + NvU8 ffma; + NvU8 imla1; + NvU8 imla2; + NvU8 imla3; + NvU8 imla4; +} NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID + * + * *DEPRECATED* Use NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID_V2 instead + * + * This command is used to create a FECS bind-point to an event buffer that + * is filtered by UID. + * + * hEventBuffer[IN] + * The event buffer to bind to + * + * recordSize[IN] + * The size of the FECS record in bytes + * + * levelOfDetail[IN] + * One of NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_: + * FULL: Report all CtxSw events + * SIMPLE: Report ACTIVE_REGION_START and ACTIVE_REGION_END only + * COMPAT: Events that KMD is interested in (for backwards compatibility) + * CUSTOM: Report events in the eventFilter field + * NOTE: RM may override the level-of-detail depending on the caller + * + * eventFilter[IN] + * Bitmask of events to report if levelOfDetail is CUSTOM + * + * bAllUsers[IN] + * Only report FECS CtxSw data for the current user if false, for all users if true + */ + +#define NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID (0x20801231U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD { + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_FULL = 0, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_SIMPLE = 1, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_COMPAT = 2, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_CUSTOM = 3, +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD; + +#define NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS { + NvHandle hEventBuffer; + NvU32 recordSize; + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD levelOfDetail; + NvU32 eventFilter; + NvBool bAllUsers; +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_PHYS_GPC_MASK + * + * This command returns a mask of physical GPC Ids for the associated syspipe + * + * physSyspipeId[IN] + * This parameter specifies syspipe for which phys GPC mask is requested + * + * gpcMask[OUT] + * This parameter returns a mask of mapped GPCs to provided syspipe. + * Each GPC-ID has a corresponding bit position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_PHYS_GPC_MASK (0x20801232U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS_MESSAGE_ID (0x32U) + +typedef struct NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS { + NvU32 physSyspipeId; + NvU32 gpcMask; +} NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_PPC_MASK + * + * This command returns a mask of enabled PPCs for a specified GPC. + * + * grRouteInfo[IN] + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + * + * gpcId[IN] + * This parameter specifies the GPC for which TPC information is + * to be retrieved. If the GPC with this ID is not enabled this command + * will return an ppcMask value of zero. + * + * ppcMask[OUT] + * This parameter returns a mask of enabled PPCs for the specified GPC. + * Each PPC has an ID that's equivalent to the corresponding bit + * position in the mask. + */ +#define NV2080_CTRL_CMD_GR_GET_PPC_MASK (0x20801233U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_PPC_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_PPC_MASK_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV2080_CTRL_GR_GET_PPC_MASK_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 gpcId; + NvU32 ppcMask; +} NV2080_CTRL_GR_GET_PPC_MASK_PARAMS; + +#define NV2080_CTRL_CMD_GR_GET_NUM_TPCS_FOR_GPC (0x20801234U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS { + NvU32 gpcId; + NvU32 numTpcs; +} NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_CTXSW_MODES + * + * This command is used to get context switch modes for the specified + * channel. A value of NV_ERR_NOT_SUPPORTED is returned if the + * target channel does not support context switch mode changes. + * + * hChannel + * This parameter specifies the channel handle of + * the channel that is to have its context switch modes retrieved. + * zcullMode + * See NV2080_CTRL_CMD_GR_CTXSW_ZCULL_MODE for possible return values + * pmMode + * See NV2080_CTRL_CMD_GR_CTXSW_PM_MODE for possible return values + * smpcMode + * See NV2080_CTRL_CMD_GR_CTXSW_SMPC_MODE for possible return values + * cilpPreemptMode + * See NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE for possible return values + * gfxpPreemptMode + * See NV2080_CTRL_CMD_GR_SET_CTXSW_PREEMPTION_MODE for possible return values + */ +#define NV2080_CTRL_CMD_GR_GET_CTXSW_MODES (0x20801235U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS_MESSAGE_ID (0x35U) + +typedef struct NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS { + NvHandle hChannel; + NvU32 zcullMode; + NvU32 pmMode; + NvU32 smpcMode; + NvU32 cilpPreemptMode; + NvU32 gfxpPreemptMode; +} NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP + * + * Get a list of values used to describe GPC/TPC tile mapping tables. + * + * mapValueCount + * This field specifies the number of actual map entries. This count + * should equal the number of TPCs in the system. + * mapValues + * This field is a pointer to a buffer of NvU08 values representing map + * data. + * grRouteInfo + * This parameter specifies the routing information used to + * disambiguate the target GR engine. + */ +#define NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP (0x20801236U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | 0x36" */ + +typedef NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP_FINN_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP_FINN_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS params, 8); +} NV2080_CTRL_CMD_GR_GET_GPC_TILE_MAP_FINN_PARAMS; + + + +/* + * NV2080_CTRL_CMD_GR_GET_ZCULL_MASK + * + * This command returns a mask of enabled ZCULLs for a specified GPC. + * + * gpcId[IN] + * This parameter, physical GPC index, specifies the GPC for which ZCULL + * information is to be retrieved. If the GPC with this ID is not enabled + * this command will return a zcullMask value of zero. + * + * zcullMask[OUT] + * This parameter returns a mask of enabled ZCULLs for the specified GPC. + * Each ZCULL has an ID that's equivalent to the corresponding bit + * position in the mask. + */ + +#define NV2080_CTRL_CMD_GR_GET_ZCULL_MASK (0x20801237U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS { + NvU32 gpcId; + NvU32 zcullMask; +} NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS; + +/* + * NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID_V2 + * + * This command is used to create a FECS bind-point to an event buffer that + * is filtered by UID. + * + * hEventBuffer[IN] + * The event buffer to bind to + * + * recordSize[IN] + * The size of the FECS record in bytes + * + * levelOfDetail[IN] + * One of NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_: + * FULL: Report all CtxSw events + * SIMPLE: Report ACTIVE_REGION_START and ACTIVE_REGION_END only + * COMPAT: Events that KMD is interested in (for backwards compatibility) + * CUSTOM: Report events in the eventFilter field + * NOTE: RM may override the level-of-detail depending on the caller + * + * eventFilter[IN] + * Bitmask of events to report if levelOfDetail is CUSTOM + * + * bAllUsers[IN] + * Only report FECS CtxSw data for the current user if false, for all users if true + * + * reasonCode [OUT] + * Reason for failure + */ +#define NV2080_CTRL_CMD_GR_FECS_BIND_EVTBUF_FOR_UID_V2 (0x20801238U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID << 8) | NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_GR_FECS_BIND_EVTBUF_REASON_CODE { + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NONE = 0, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_GPU_TOO_OLD = 1, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NOT_ENABLED_GPU = 2, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NOT_ENABLED = 3, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NEED_ADMIN = 4, + NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NEED_CAPABILITY = 5, +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_REASON_CODE; + +#define NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS { + NvHandle hEventBuffer; + NvU32 recordSize; + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD levelOfDetail; + NvU32 eventFilter; + NvBool bAllUsers; + NvU32 reasonCode; +} NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS; + +/* _ctrl2080gr_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h new file mode 100644 index 000000000..c36848a0f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080grmgr.h @@ -0,0 +1,266 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080grmgr.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX grmgr control commands and parameters */ + +// +// NV2080_CTRL_CMD_GRMGR_GET_GR_FS_INFO +// +// This control call works as a batched query interface where we +// have multiple different queries that can be passed in +// and RM will return the associated data and status type +// If there is any error in NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS, +// we will immediately fail the call. +// However, if there is an error in the query-specific calls, we will +// log the error and march on. +// +// NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS +// numQueries[IN] +// - Specifies the number of valid queries that the caller will be passing in +// +// Possible status values returned are: +// NV_OK +// NV_ERR_INVALID_ARGUMENT +// NV_ERR_INVALID_STATE +// +#define NV2080_CTRL_CMD_GRMGR_GET_GR_FS_INFO (0x20803801) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GRMGR_INTERFACE_ID << 8) | NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_MESSAGE_ID" */ + +// Max number of queries that can be batched in a single call to NV2080_CTRL_CMD_GRMGR_GET_GR_FS_INFO +#define NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES 96 + +// +// Preference is to keep max.size of union at 24 bytes (i.e. 6 32-bit members) +// so that the size of entire query struct is maintained at 32 bytes, to ensure +// that overall params struct does not exceed 4kB +// +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_MAX_SIZE 32 +#define NV2080_CTRL_GRMGR_MAX_SMC_IDS 8 + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS + * gpcCount[OUT] + * - No. of logical/local GPCs which client can use to create the + * logical/local mask respectively + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS { + NvU32 gpcCount; // param[out] - logical/local GPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS + * gpcId[IN] + * - Logical/local GPC ID + * chipletGpcMap[OUT] + * - Returns chiplet GPC ID for legacy case and device monitoring client + * - Returns local GPC ID (== input gpcId) for SMC client + * - Does not support DM attribution case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 chipletGpcMap; // param[out] - chiplet GPC ID +} NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS + * gpcId[IN] + * - Logical/local GPC ID + * tpcMask[OUT] + * - Returns physical TPC mask for legacy, DM client and SMC cases + * - Does not support DM attribution case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 tpcMask; // param[out] - physical TPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS + * gpcId[IN] + * - Logical/local GPC ID + * ppcMask[OUT] + * - Returns physical PPC mask for legacy, DM client and SMC cases + * - Does not support DM attribution case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 ppcMask; // param[out] - physical PPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS; + +/*! + * !!! DEPRECATED - This query will return NV_ERR_NOT_SUPPORTED since deleting + * it would break driver compatibility !!! + * + * NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS + * swizzId[IN] + * - Swizz ID of partition + * - A DM client with an invalid swizz ID, will fail this call + * - This parameter is not compulsory for an SMC client; the subscription + * itself will do the necessary validation. + * gpcId[IN] + * - Logical/local GPC ID + * chipletGpcMap[OUT] + * - Returns chiplet GPC ID for legacy case and device monitoring client + * - Returns local GPC ID (== input gpcId) for SMC client + * - Does not support non-attribution case for DM client + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS { + NvU32 swizzId; // param[in] - swizz ID of partition + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 chipletGpcMap; // param[out] - chiplet GPC ID +} NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS + * gpcId[IN] + * - Logical/local GPC ID + * ropMask[OUT] + * - Returns physical ROP mask for legacy, DM client + * - Returns logical ROP mask for SMC + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS { + NvU32 gpcId; // param[in] - logical/local GPC ID + NvU32 ropMask; // param[out] - physical ROP mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS + * chipletSyspipeMask [OUT] + * - Mask of chiplet SMC-IDs for DM client attribution case + * - Mask of local SMC-IDs for SMC client + * - Legacy case returns 1 GR + * - Does not support attribution case for DM client + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS { + NvU32 chipletSyspipeMask; // param[out] - Mask of chiplet SMC IDs +} NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS + * swizzId[IN] + * - Swizz ID of partition + * - A DM client with an invalid swizz ID, will fail this call + * physSyspipeId[GRMGR_MAX_SMC_IDS] [OUT] + * - Physical SMC-IDs mapped to partition local idx for DM client attribution case + * - Does not support non-attribution case for DM client, SMC clients, legacy case + * physSyspipeIdCount[OUT] + * - Valid count of physSmcIds which has been populated in above array. + * - Failure case will return 0 + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS { + NvU16 swizzId; // param[in] - swizz ID of partition + NvU16 physSyspipeIdCount; // param[out] - Count of physSmcIds in above array + NvU8 physSyspipeId[NV2080_CTRL_GRMGR_MAX_SMC_IDS]; // param[out] - physical/local SMC IDs +} NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS + * swizzId[IN] + * - Swizz ID of partition + * - Mandatory parameter + * - A DM client with an invalid swizz ID, will fail this call + * grIdx[IN] + * - Local grIdx for a partition + * - Mandatory parameter + * gpcEnMask[OUT] + * - Logical enabled GPC mask associated with requested grIdx of the partition i.e swizzid->engineId->gpcMask + * - These Ids should be used as input further + * - Does not support non-attribution case for DM client, SMC clients, legacy case + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS { + NvU32 swizzId; // param[in] - swizz ID of partition + NvU32 grIdx; // param[in] - partition local GR ID + NvU32 gpcEnMask; // param[out] - logical enabled GPC mask +} NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS; + +/*! + * NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID + * syspipeId[OUT] + * - Partition-local GR idx for client subscribed to exec partition + * - Does not support legacy case, DM client, or SMC client subscribed only to partition + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS { + NvU32 syspipeId; // param[out] - partition-local Gr idx +} NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS; + +/*! + * queryType[IN] + * - Use queryType defines to specify what information is being requested + * status[OUT] + * - Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +typedef struct NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS { + NvU16 queryType; + NvU8 reserved[2]; // To keep the struct aligned for now and available for future use (if needed) + NvU32 status; + union { + NV2080_CTRL_GRMGR_GR_FS_INFO_GPC_COUNT_PARAMS gpcCountData; + NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_GPC_MAP_PARAMS chipletGpcMapData; + NV2080_CTRL_GRMGR_GR_FS_INFO_TPC_MASK_PARAMS tpcMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PPC_MASK_PARAMS ppcMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_GPC_MAP_PARAMS partitionGpcMapData; + NV2080_CTRL_GRMGR_GR_FS_INFO_CHIPLET_SYSPIPE_MASK_PARAMS syspipeMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_CHIPLET_SYSPIPE_IDS_PARAMS partitionChipletSyspipeData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PROFILER_MON_GPC_MASK_PARAMS dmGpcMaskData; + NV2080_CTRL_GRMGR_GR_FS_INFO_PARTITION_SYSPIPE_ID_PARAMS partitionSyspipeIdData; + NV2080_CTRL_GRMGR_GR_FS_INFO_ROP_MASK_PARAMS ropMaskData; + } queryData; +} NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS; + +#define NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS { + NvU16 numQueries; + NvU8 reserved[6]; // To keep the struct aligned for now and available for future use (if needed) + NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARAMS queries[NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES]; +} NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS; + +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_INVALID 0 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_GPC_COUNT 1 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_GPC_MAP 2 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_TPC_MASK 3 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PPC_MASK 4 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_GPC_MAP 5 /* deprecated */ +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_CHIPLET_SYSPIPE_MASK 6 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_CHIPLET_SYSPIPE_IDS 7 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PROFILER_MON_GPC_MASK 8 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_PARTITION_SYSPIPE_ID 9 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_QUERY_ROP_MASK 10 + +/* _ctrl2080grmgr_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h new file mode 100644 index 000000000..19874c588 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gsp.h @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080gsp.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX GSP control commands and parameters */ + +/* + * NV2080_CTRL_CMD_GSP_GET_FEATURES + * + * This command is used to determine which GSP features are + * supported on this GPU. + * + * gspFeatures + * Bit mask that specifies GSP features supported. + * bValid + * If this field is set to NV_TRUE, then above bit mask is + * considered valid. Otherwise, bit mask should be ignored + * as invalid. bValid will be set to NV_TRUE when RM is a + * GSP client with GPU support offloaded to GSP firmware. + * bDefaultGspRmGpu + * If this field is set to NV_TRUE, it indicates that the + * underlying GPU has GSP-RM enabled by default. If set to NV_FALSE, + * it indicates that the GPU has GSP-RM disabled by default. + * firmwareVersion + * This field contains the buffer into which the firmware build version + * should be returned, if GPU is offloaded. Otherwise, the buffer + * will remain untouched. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_GSP_GET_FEATURES (0x20803601) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GSP_INTERFACE_ID << 8) | NV2080_CTRL_GSP_GET_FEATURES_PARAMS_MESSAGE_ID" */ + +#define NV2080_GSP_MAX_BUILD_VERSION_LENGTH (0x0000040) + +#define NV2080_CTRL_GSP_GET_FEATURES_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_GSP_GET_FEATURES_PARAMS { + NvU32 gspFeatures; + NvBool bValid; + NvBool bDefaultGspRmGpu; + NvU8 firmwareVersion[NV2080_GSP_MAX_BUILD_VERSION_LENGTH]; +} NV2080_CTRL_GSP_GET_FEATURES_PARAMS; + +/* Valid feature values */ +#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED 0:0 +#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED_FALSE (0x00000000) +#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED_TRUE (0x00000001) + +// _ctrl2080gsp_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h new file mode 100644 index 000000000..35cbf239e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080hshub.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080hshub.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* + * NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK + * + * This command get active HSHUB masks. + * + * hshubNcisocMask + * NCISOC enabled active HSHUBs + * hshubNvlMask + * NVLINK capable active HSHUBs. + */ + +#define NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS { + NvU32 hshubNcisocMask; + NvU32 hshubNvlMask; +} NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS; + +#define NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK (0x20804101) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_HSHUB_INTERFACE_ID << 8) | NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS_MESSAGE_ID" */ + +/* _ctrl2080hshub_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h new file mode 100644 index 000000000..07495303f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080i2c.h @@ -0,0 +1,368 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080i2c.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX i2c-related control commands and parameters */ + +/* + * NV2080_CTRL_I2C_VERSION + * + * NV2080_CTRL_I2C_VERSION_0_0: + * This return state specifies that support is only available + * for single subAddr reads. + * + */ +#define NV2080_CTRL_I2C_VERSION_0 0x00 + +/* maximum number of i2c entries support */ +#define NV2080_CTRL_I2C_MAX_ENTRIES 256 +#define NV2080_CTRL_I2C_MAX_REG_LEN 8 +#define NV2080_CTRL_I2C_MAX_ADDR_ENTRIES 20 + +/* + * NV2080_CTRL_I2C_FLAGS + * + * NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC: + * This option specified that non-compliant i2c for SI1930UC is required + * + * NV2080_CTRL_I2C_FLAGS_PRIVILEGE + * This option specified that the i2c access is privileged + * + * NV2080_CTRL_I2C_FLAGS_PX3540 + * This option specified that the i2c device -PX3540/3544- is accessed + */ +#define NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC (0x00000001) +#define NV2080_CTRL_I2C_FLAGS_PRIVILEGE (0x00000002) +#define NV2080_CTRL_I2C_FLAGS_DATA_ENCRYPTED (0x00000004) +#define NV2080_CTRL_I2C_FLAGS_PX3540 (0x00000010) +#define NV2080_CTRL_I2C_FLAGS_ADDR_AUTO_INC_NOT_SUPPORTED (0x00000008) + +/* + * NV2080_CTRL_CMD_I2C_READ_BUFFER + * + * This command allocates video memory for a particular subset of microcode. + * + * version + * This field is returned to the client and indicates the current + * supported I2C controls available. + * + * port + * This field must be specified by the client to indicate which port/bus + * in which i2c access is desired. + * + * flags + * This field is specified by the client to request additional options + * as provided by NV2080_CTRL_I2C_FLAGS. + * + * inputCount + * This field specifies the total # of elements contained in inputBuffer + * + * inputBuffer + * This should contain the chipaddr as the first element, followed by + * the each subAddress in which to access the first element of data + * Eg. ... + * In general, client will only have 2 elements + * + * outputCount + * This field specifies how many registers from the start register index. + * The maximum values allow are NV2080_CTRL_I2C_MAX_ENTRIES. + * + * outputBuffer + * This buffer is returned to the client with the data read from + * the start register index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV2080_CTRL_I2C_READ_BUFFER_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_I2C_READ_BUFFER_PARAMS { + NvU32 version; + NvU32 port; + NvU32 flags; + NvU32 inputCount; + // C form: NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU32 outputCount; + // C form: NvU8 outputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU8 outputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; +} NV2080_CTRL_I2C_READ_BUFFER_PARAMS; + +#define NV2080_CTRL_CMD_I2C_READ_BUFFER (0x20800601) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_READ_BUFFER_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_I2C_WRITE_BUFFER + * + * This command allocates video memory for a particular subset of microcode. + * + * version + * This field is returned to the client and indicates the current + * supported I2C controls available. + * + * port + * This field must be specified by the client to indicate which port/bus + * in which i2c access is desired. + * + * flags + * This field is specified by the client to request additional options. + * NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC: + * - Specifies that non-compliant i2c access for SI1930UC is required + * + * inputCount + * This field specifies the total # of elements contained in inputBuffer + * + * inputBuffer + * This should contain the chipaddr as the first element, followed by + * the each subAddress in which to access the first element of data, + * and finally the data to be programmed. + * Eg. ... ... + * In general, client will have 2 elements + data to be programmed. + * ... + * + * encrClientID + * This field is specified by client, which is used to uniquely access + * the client's encryption context + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_STATE + * + */ + +#define NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS { + NvU32 version; + NvU32 port; + NvU32 flags; + NvU32 inputCount; + // C form: NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU8 inputBuffer[NV2080_CTRL_I2C_MAX_ENTRIES]; + NvU32 encrClientID; +} NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS; + +#define NV2080_CTRL_CMD_I2C_WRITE_BUFFER (0x20800602) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS_MESSAGE_ID" */ + + +/* + * NV2080_CTRL_CMD_I2C_READ_REG + * + * This command allocates video memory for a particular subset of microcode. + * + * version + * This field is returned to the client and indicates the current + * supported I2C controls available. + * + * port + * This field must be specified by the client to indicate which port/bus + * in which i2c access is desired. + * + * flags + * This field is specified by the client to request additional options. + * NV2080_CTRL_I2C_FLAGS_NONSTD_SI1930UC: + * - Specifies that non-compliant i2c access for SI1930UC is required + * addr + * This field is specified by the client to target address. + * reg + * This field is specified by the client to target register address. + * + * bufsize + * This field specifies the total bytes # of register size + * + * buffer + * when used for read, it used as buffer that store returned register content + * when used for write, It include data that will be written. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_STATE + * + */ +typedef struct NV2080_CTRL_I2C_RW_REG_PARAMS { + NvU32 version; + NvU32 port; + NvU32 flags; + NvU32 addr; + NvU8 reg; + NvU8 bufsize; + // C form: NvU8 buffer[NV2080_CTRL_I2C_MAX_ENTRIES - 1]; + NvU8 buffer[(NV2080_CTRL_I2C_MAX_ENTRIES - 1)]; +} NV2080_CTRL_I2C_RW_REG_PARAMS; + +// provide NV2080_CTRL_I2C_READ_REG_PARAMS as the historical name +typedef NV2080_CTRL_I2C_RW_REG_PARAMS NV2080_CTRL_I2C_READ_REG_PARAMS; +#define NV2080_CTRL_CMD_I2C_READ_REG (0x20800603) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | 0x3" */ + +#define NV2080_CTRL_CMD_I2C_WRITE_REG (0x20800604) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | 0x4" */ + +/* + * NV006F_CTRL_CMD_SYSTEM_I2C_ACCESS + * + * This command allows Clients to read and write data using the I2C ports + * + * token [IN] + * This used in i2cAcquirePort + * + * cmd [IN] + * The I2CAccess command + * + * port [IN] + * The port ID of the concerned display + * + * flags [IN] + * The I2CAccess Flags such ack,start,stop + * + * data [OUT/IN] + * Data that needs to be pass or read out + * + * dataBuffSize [IN] + * Size of the data buffer. + * + * speed [IN] + * Speed of transaction. + * + * status [OUT] + * The I2CAccess Status returned + * + * encrClientID [IN] + * This field is specified by client, which is used to uniquely access + * the client's encryption context + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_I2C_ACCESS (0x20800610) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_ACCESS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_I2C_ACCESS_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV2080_CTRL_I2C_ACCESS_PARAMS { + NvU32 token; + NvU32 cmd; + NvU32 port; + NvU32 flags; + NV_DECLARE_ALIGNED(NvP64 data, 8); + NvU32 status; + NvU32 dataBuffSize; + NvU32 speed; + NvU32 encrClientID; +} NV2080_CTRL_I2C_ACCESS_PARAMS; + +// commands +#define NV2080_CTRL_I2C_ACCESS_CMD_ACQUIRE 0x1 +#define NV2080_CTRL_I2C_ACCESS_CMD_RELEASE 0x2 +#define NV2080_CTRL_I2C_ACCESS_CMD_WRITE_BYTE 0x3 +#define NV2080_CTRL_I2C_ACCESS_CMD_READ_BYTE 0x4 +#define NV2080_CTRL_I2C_ACCESS_CMD_NULL 0x5 +#define NV2080_CTRL_I2C_ACCESS_CMD_RESET 0x6 +#define NV2080_CTRL_I2C_ACCESS_CMD_TEST_PORT 0x11 +#define NV2080_CTRL_I2C_ACCESS_CMD_SET_FAST_MODE 0x12 +#define NV2080_CTRL_I2C_ACCESS_CMD_SET_NORMAL_MODE 0x13 +#define NV2080_CTRL_I2C_ACCESS_CMD_WRITE_BUFFER 0x14 +#define NV2080_CTRL_I2C_ACCESS_CMD_READ_BUFFER 0x15 +#define NV2080_CTRL_I2C_ACCESS_CMD_START 0x17 +#define NV2080_CTRL_I2C_ACCESS_CMD_STOP 0x18 +#define NV2080_CTRL_I2C_ACCESS_CMD_SET_SLOW_MODE 0x20 + +// flags +#define NV2080_CTRL_I2C_ACCESS_FLAG_START 0x1 +#define NV2080_CTRL_I2C_ACCESS_FLAG_STOP 0x2 +#define NV2080_CTRL_I2C_ACCESS_FLAG_ACK 0x4 +#define NV2080_CTRL_I2C_ACCESS_FLAG_RAB 0x8 +#define NV2080_CTRL_I2C_ACCESS_FLAG_ADDR_10BITS 0x10 +#define NV2080_CTRL_I2C_ACCESS_FLAG_PRIVILEGE 0x20 +#define NV2080_CTRL_I2C_ACCESS_FLAG_DATA_ENCRYPTED 0x40 +#define NV2080_CTRL_I2C_ACCESS_FLAG_RESTART 0x80 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_33_33PCT 0x100 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_33PCT 0x200 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_10PCT 0x400 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_3_33PCT 0x800 +#define NV2080_CTRL_I2C_ACCESS_FLAG_SLOW_MODE_3PCT 0x1000 + +// port +#define NV2080_CTRL_I2C_ACCESS_PORT_DYNAMIC 0x0 +#define NV2080_CTRL_I2C_ACCESS_PORT_PRIMARY 0x1 +#define NV2080_CTRL_I2C_ACCESS_PORT_SECONDARY 0x2 +#define NV2080_CTRL_I2C_ACCESS_PORT_TERTIARY 0x3 +#define NV2080_CTRL_I2C_ACCESS_PORT_QUARTIARY 0x4 + +// Alternate numeric port designators +#define NV2080_CTRL_I2C_ACCESS_PORT_1 0x1 +#define NV2080_CTRL_I2C_ACCESS_PORT_2 0x2 +#define NV2080_CTRL_I2C_ACCESS_PORT_3 0x3 +#define NV2080_CTRL_I2C_ACCESS_PORT_4 0x4 +#define NV2080_CTRL_I2C_ACCESS_PORT_5 0x5 +#define NV2080_CTRL_I2C_ACCESS_PORT_6 0x6 +#define NV2080_CTRL_I2C_ACCESS_PORT_7 0x7 +#define NV2080_CTRL_I2C_ACCESS_PORT_8 0x8 +#define NV2080_CTRL_I2C_ACCESS_PORT_9 0x9 +#define NV2080_CTRL_I2C_ACCESS_PORT_10 0x10 + +// Total ports count +#define NV2080_CTRL_I2C_ACCESS_NUM_PORTS NV2080_CTRL_I2C_ACCESS_PORT_10 + +// status +#define NV2080_CTRL_I2C_ACCESS_STATUS_SUCCESS 0x0 +#define NV2080_CTRL_I2C_ACCESS_STATUS_ERROR 0x1 +#define NV2080_CTRL_I2C_ACCESS_STATUS_PROTOCOL_ERROR 0x2 +#define NV2080_CTRL_I2C_ACCESS_STATUS_DEVICE_BUSY 0x3 +#define NV2080_CTRL_I2C_ACCESS_STATUS_NACK_AFTER_SEND 0x4 +#define NV2080_CTRL_I2C_ACCESS_STATUS_DP2TMDS_DONGLE_MISSING 0x5 + +#define NV2080_CTRL_CMD_I2C_ENABLE_MONITOR_3D_MODE (0x20800620) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID << 8) | NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS { + NvU32 head; + NvU32 authType; + NvU32 status; +} NV2080_CTRL_I2C_ENABLE_MONITOR_3D_MODE_PARAMS; + +/* _ctrl2080i2c_h_ */ + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h new file mode 100644 index 000000000..73db05198 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080illum.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080illum.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h new file mode 100644 index 000000000..249795c96 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h @@ -0,0 +1,2211 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080internal.finn +// + +#include "nvimpshared.h" + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl2080/ctrl2080gr.h" /* Some controls derivative of 2080gr */ +#include "ctrl/ctrl0080/ctrl0080msenc.h" /* NV0080_CTRL_MSENC_CAPS_TBL_SIZE */ +#include "ctrl/ctrl0080/ctrl0080bsp.h" /* NV0080_CTRL_BSP_CAPS_TBL_SIZE */ +#include "ctrl/ctrl2080/ctrl2080fifo.h" /* NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO */ +#include "ctrl/ctrl0000/ctrl0000system.h" +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO + * + * This command obtains information from physical RM for use by CPU-RM. + * + * feHwSysCap + * Display IP v03_00 and later. + * Contents of capability register. + * + * windowPresentMask + * Display IP v03_00 and later. + * Mask for the present WINDOWs actually on the current chip. + * bFbRemapperEnabled + * Display IP v02_01 and later. + * Indicates that the display remapper HW exists and is enabled. + */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS { + NvU32 feHwSysCap; + NvU32 windowPresentMask; + NvBool bFbRemapperEnabled; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS; + + + +// +// MemorySystem settings that are static after GPU state init/load is finished. +// +// Fields are shared between the VGPU guest/GSP Client as well as the VGPU +// host/GSP-RM. +// +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS_MESSAGE_ID (0x1CU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS { + /*! Determines if RM should use 1 to 1 Comptagline allocation policy */ + NvBool bOneToOneComptagLineAllocation; + + /*! Determines if RM should use 1 to 4 Comptagline allocation policy */ + NvBool bUseOneToFourComptagLineAllocation; + + /*! Determines if RM should use raw Comptagline allocation policy */ + NvBool bUseRawModeComptaglineAllocation; + + /*! Has COMPBIT_BACKING_SIZE been overridden to zero (i.e. disabled)? */ + NvBool bDisableCompbitBacking; + + /*! Determine if we need to disable post L2 compression */ + NvBool bDisablePostL2Compression; + + /*! Is ECC DRAM feature supported? */ + NvBool bEnabledEccFBPA; + + NvBool bL2PreFill; + + /*! L2 cache size */ + NV_DECLARE_ALIGNED(NvU64 l2CacheSize, 8); + + NvBool bReservedMemAtBottom; + + /*! Indicate whether fpba is present or not */ + NvBool bFbpaPresent; + + /*! Size covered by one comptag */ + NvU32 comprPageSize; + + /*! log32(comprPageSize) */ + NvU32 comprPageShift; + + /*! Maximum number of pages that can be dynamaically blacklisted */ + NvU16 maximumBlacklistPages; + + /*! RAM type */ + NvU32 ramType; + + /*! LTC count */ + NvU32 ltcCount; + + /*! LTS per LTC count */ + NvU32 ltsPerLtcCount; + + /*! Ampere PLC bug */ + NvBool bDisablePlcForCertainOffsetsBug3046774; +} NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS; + +/*! + * Retrieve Memory System Static data. + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_STATIC_CONFIG (0x20800a1c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_REGISTER_UVM_ACCESS_CNTR_BUFFER + * + * This command sends access counter buffer pages allocated by CPU-RM + * to be setup and enabled in physical RM. + * + * bufferSize + * Size of the access counter buffer to register. + * + * bufferPteArray + * Pages of access counter buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER (0x20800a1d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_UVM_ACCESS_CNTR_BUFFER_MAX_PAGES 64 +#define NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS_MESSAGE_ID (0x1DU) + +typedef struct NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS { + NvU32 bufferSize; + NV_DECLARE_ALIGNED(NvU64 bufferPteArray[NV2080_CTRL_INTERNAL_UVM_ACCESS_CNTR_BUFFER_MAX_PAGES], 8); +} NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER + * + * This command requests physical RM to disable the access counter buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER (0x20800a1e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x1E" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_UVM_SERVICE_ACCESS_CNTR_BUFFER + * + * This command requests physical RM to service the access counter buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_UVM_SERVICE_ACCESS_CNTR_BUFFER (0x20800a21) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x21" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE + * + * This command retrieves the access counter buffer size from physical RM. + * + * bufferSize[OUT] + * Size of the access counter buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE (0x20800a29) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS_MESSAGE_ID (0x29U) + +typedef struct NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS { + NvU32 bufferSize; +} NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS; + +#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES 8 + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_CAPS_V2 + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CAPS (0x20800a1f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x1F" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CAPS { + NvU8 capsTbl[NV0080_CTRL_GR_CAPS_TBL_SIZE]; +} NV2080_CTRL_INTERNAL_STATIC_GR_CAPS; +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_CAPS engineCaps[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER + * @ref NV2080_CTRL_CMD_GR_GET_SM_TO_GPC_TPC_MAPPINGS + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_GLOBAL_SM_ORDER (0x20800a22) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x22" */ + + + +#define NV2080_CTRL_INTERNAL_GR_MAX_SM 240 + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GLOBAL_SM_ORDER { + struct { + NvU16 gpcId; + NvU16 localTpcId; + NvU16 localSmId; + NvU16 globalTpcId; + NvU16 virtualGpcId; + NvU16 migratableTpcId; + } globalSmId[NV2080_CTRL_INTERNAL_GR_MAX_SM]; + + NvU16 numSm; + NvU16 numTpc; +} NV2080_CTRL_INTERNAL_STATIC_GR_GLOBAL_SM_ORDER; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_GLOBAL_SM_ORDER globalSmOrder[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS; + + +/*! + * Retrieve BSP Static data. + */ +#define NV2080_CTRL_CMD_INTERNAL_BSP_GET_CAPS (0x20800a24) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BSP_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_MAX_BSPS 8 + +typedef struct NV2080_CTRL_INTERNAL_BSP_CAPS { + NvU8 capsTbl[NV0080_CTRL_BSP_CAPS_TBL_SIZE]; +} NV2080_CTRL_INTERNAL_BSP_CAPS; + +#define NV2080_CTRL_INTERNAL_BSP_GET_CAPS_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_INTERNAL_BSP_GET_CAPS_PARAMS { + NV2080_CTRL_INTERNAL_BSP_CAPS caps[NV2080_CTRL_CMD_INTERNAL_MAX_BSPS]; + NvBool valid[NV2080_CTRL_CMD_INTERNAL_MAX_BSPS]; +} NV2080_CTRL_INTERNAL_BSP_GET_CAPS_PARAMS; + +/*! + * Retrieve MSENC Static data. + */ +#define NV2080_CTRL_CMD_INTERNAL_MSENC_GET_CAPS (0x20800a25) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS 3 + +typedef struct NV2080_CTRL_INTERNAL_MSENC_CAPS { + NvU8 capsTbl[NV0080_CTRL_MSENC_CAPS_TBL_SIZE]; +} NV2080_CTRL_INTERNAL_MSENC_CAPS; + +#define NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS { + NV2080_CTRL_INTERNAL_MSENC_CAPS caps[NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS]; + NvBool valid[NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS]; +} NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS; + + +#define NV2080_CTRL_INTERNAL_GR_MAX_GPC 12 +#define NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT 10 + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_GPC_MASK + * @ref NV2080_CTRL_CMD_GR_GET_TPC_MASK + * @ref NV2080_CTRL_CMD_GR_GET_PHYS_GPC_MASK + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FLOORSWEEPING_MASKS (0x20800a26) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x26" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS { + NvU32 gpcMask; + + /*! + * tpcMask is indexed by logical GPC ID for MIG case + * and indexed by physical GPC ID for non-MIG case + */ + NvU32 tpcMask[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + + /*! + * tpcCount is always indexed by logical GPC ID + */ + NvU32 tpcCount[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + NvU32 physGpcMask; + NvU32 mmuPerGpc[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + + NvU32 tpcToPesMap[NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT]; + NvU32 numPesPerGpc[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; + + /*! + * zcullMask is always indexed by physical GPC ID + */ + NvU32 zcullMask[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; +} NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS { + /*! + * floorsweeping masks which are indexed via local GR index + */ + NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS floorsweepingMasks[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS; + +/* + * NV2080_CTRL_CMD_KGR_GET_CTX_BUFFER_PTES + * + * This command returns physical addresses of specified context buffer. + * To obtain addresses of whole buffer firstPage has to be advanced on + * subsequent invocations of the control until whole buffer is probed. + * If the buffer is contiguous, only single address will be returned by + * this control. + * + * bufferType[IN] + * Buffer type as returned by GET_CTX_BUFFER_INFO. + * + * firstPage[IN] + * Index of the first page to return in 'physAddrs' array. + * + * numPages[OUT] + * Number of entries filled in 'physAddrs' array. This will be 0 + * if firstPage is greater or equal to number of pages managed by 'hBuffer'. + * + * physAddrs[OUT] + * Physical addresses of pages comprising specified buffer. + * + * bNoMorePages[OUT] + * End of buffer reached. Either 'physAddrs' contains last page of the + * buffer or 'firstPage' specifies index past the buffer. + */ +#define NV2080_CTRL_KGR_MAX_BUFFER_PTES 128 +#define NV2080_CTRL_CMD_KGR_GET_CTX_BUFFER_PTES (0x20800a28) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS { + NvHandle hUserClient; + NvHandle hChannel; + NvU32 bufferType; + NvU32 firstPage; + NvU32 numPages; + NV_DECLARE_ALIGNED(NvU64 physAddrs[NV2080_CTRL_KGR_MAX_BUFFER_PTES], 8); + NvBool bNoMorePages; +} NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS; + +/*! + * @ref NV0080_CTRL_CMD_GR_GET_INFO + * @ref NV0080_CTRL_CMD_GR_GET_INFO_V2 + * @ref NV2080_CTRL_CMD_GR_GET_INFO + * @ref NV2080_CTRL_CMD_GR_GET_INFO_V2 + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_INFO (0x20800a2a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x2A" */ + + + +/*! + * @ref NV2080_CTRL_GR_INFO + */ +typedef struct NV2080_CTRL_INTERNAL_GR_INFO { + NvU32 index; + NvU32 data; +} NV2080_CTRL_INTERNAL_GR_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_INFO { + NV2080_CTRL_INTERNAL_GR_INFO infoList[NV0080_CTRL_GR_INFO_MAX_SIZE]; +} NV2080_CTRL_INTERNAL_STATIC_GR_INFO; +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_INFO engineInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_ZCULL_INFO + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_ZCULL_INFO (0x20800a2c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x2C" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_ZCULL_INFO { + NvU32 widthAlignPixels; + NvU32 heightAlignPixels; + NvU32 pixelSquaresByAliquots; + NvU32 aliquotTotal; + NvU32 zcullRegionByteMultiplier; + NvU32 zcullRegionHeaderSize; + NvU32 zcullSubregionHeaderSize; + NvU32 subregionCount; + NvU32 subregionWidthAlignPixels; + NvU32 subregionHeightAlignPixels; +} NV2080_CTRL_INTERNAL_STATIC_GR_ZCULL_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_ZCULL_INFO engineZcullInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_ROP_INFO + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_ROP_INFO (0x20800a2e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x2E" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_ROP_INFO { + NvU32 ropUnitCount; + NvU32 ropOperationsFactor; + NvU32 ropOperationsCount; +} NV2080_CTRL_INTERNAL_STATIC_GR_ROP_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_ROP_INFO engineRopInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_PPC_MASK + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_PPC_MASKS (0x20800a30) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x30" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS { + NvU32 mask[NV2080_CTRL_INTERNAL_GR_MAX_GPC]; +} NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS enginePpcMasks[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_ENGINE_CONTEXT_PROPERTIES + * @ref NV2080_CTRL_CMD_GR_GET_ATTRIBUTE_BUFFER_SIZE + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x32" */ + + + +#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19 + +typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO { + NvU32 size; + NvU32 alignment; +} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO { + NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT]; +} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER (0x20800a34) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x34" */ + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER { + NvU8 imla0; + NvU8 fmla16; + NvU8 dp; + NvU8 fmla32; + NvU8 ffma; + NvU8 imla1; + NvU8 imla2; + NvU8 imla3; + NvU8 imla4; +} NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER smIssueRateModifier[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS; + +/* + * NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS + * + * This command obtains information from physical RM for use by CPU-RM. + */ + +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_CHIP_INFO (0x20800a36) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS_MESSAGE_ID" */ + +/* + * Maximum number of register bases to return. + * These are indexed by NV_REG_BASE_* constants from gpu.h, and this value needs + * to be updated if NV_REG_BASE_LAST ever goes over it. See the ct_assert() in gpu.h + */ +#define NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX 16 +#define NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS { + NvU8 chipSubRev; + NvU32 emulationRev1; + NvBool isCmpSku; + NvU32 bar1Size; + NvU32 pciDeviceId; + NvU32 pciSubDeviceId; + NvU32 pciRevisionId; + NvU32 regBases[NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX]; +} NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE + * + * Set whether or not context switch logging is enabled + * + * bEnable + * Enable/Disable status for context switch logging + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE (0x20800a37) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID" */ + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE + * + * Retrieve whether or not context switch logging is enabled + * + * bEnable + * Enable/Disable status for context switch logging + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE (0x20800a38) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvBool bEnable; +} NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS; +#define NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID (0x37U) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS; +#define NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS_MESSAGE_ID (0x38U) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_HW_ENABLE_PARAMS NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET + * + * Set read offset into FECS context switch trace record + * + * offset + * Value indicating number of records by which to offset + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET (0x20800a39) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID" */ + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET + * + * Set write offset into FECS context switch trace record + * + * offset + * Value indicating number of records by which to offset + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET (0x20800a3a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS_MESSAGE_ID" */ + +/** + * NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET + * + * Get read offset into FECS context switch trace record + * + * offset + * Value indicating number of records by which to offset + */ + +#define NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET (0x20800a3b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); + NvU32 offset; +} NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS; + +#define NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID (0x39U) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS; +#define NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS_MESSAGE_ID (0x3AU) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS; +#define NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS_MESSAGE_ID (0x3BU) + +typedef NV2080_CTRL_INTERNAL_GR_FECS_TRACE_OFFSET_PARAMS NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE + * + * Get size of FECS record + * + * fecsRecordSize + * Size of FECS record + */ + + + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE (0x20800a3d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE { + NvU32 fecsRecordSize; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE; + +#define NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS_MESSAGE_ID (0x3CU) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE fecsRecordSize[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS; +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE_PARAMS_MESSAGE_ID (0x3DU) + +typedef NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES + * + * Get HW defines used to extract information from FECS records + * + * fecsRecordSize + * Size of FECS record + * + * timestampHiTagMask + * Mask for high bits of raw timestamp to extract tag + * + * timestampHiTagShift + * Shift for high bits of raw timestamp to extract tag + * + * timestampVMask + * Mask to extract timestamp from raw timestamp + * + * numLowerBitsZeroShift + * Number of bits timestamp is shifted by + */ + + + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES (0x20800a3f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x3F" */ + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES { + NvU32 fecsRecordSize; + NvU32 timestampHiTagMask; + NvU8 timestampHiTagShift; + NV_DECLARE_ALIGNED(NvU64 timestampVMask, 8); + NvU8 numLowerBitsZeroShift; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES fecsTraceDefines[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES], 8); +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS; + +/** + * NV2080_CTRL_CMD_INTERNAL_GET_DEVICE_INFO_TABLE + * + * Parse the DEVICE_INFO2_TABLE on the physical side and return it to kernel. + */ +typedef struct NV2080_CTRL_INTERNAL_DEVICE_INFO { + NvU32 faultId; + NvU32 instanceId; + NvU32 typeEnum; + NvU32 resetId; + NvU32 devicePriBase; + NvU32 isEngine; + NvU32 rlEngId; + NvU32 runlistPriBase; +} NV2080_CTRL_INTERNAL_DEVICE_INFO; +#define NV2080_CTRL_CMD_INTERNAL_DEVICE_INFO_MAX_ENTRIES 88 + +#define NV2080_CTRL_CMD_INTERNAL_GET_DEVICE_INFO_TABLE (0x20800a40) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID (0x40U) + +typedef struct NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS { + NvU32 numEntries; + NV2080_CTRL_INTERNAL_DEVICE_INFO deviceInfoTable[NV2080_CTRL_CMD_INTERNAL_DEVICE_INFO_MAX_ENTRIES]; +} NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP (0x20800a41) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_COMPRESSED_SIZE 4096 +#define NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_PROFILING_RANGES 4096 + +#define NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS { + NvU32 userRegisterAccessMapSize; + NvU32 compressedSize; + NvU8 compressedData[NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_COMPRESSED_SIZE]; + NvU32 profilingRangesSize; + NvU8 profilingRanges[NV2080_CTRL_INTERNAL_GPU_USER_REGISTER_ACCESS_MAP_MAX_PROFILING_RANGES]; +} NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS; + +typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO { + NvU32 engDesc; + NvU32 ctxAttr; + NvU32 ctxBufferSize; + NvU32 addrSpaceList; + NvU32 registerBase; +} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO; +#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS 0x40 + +#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID (0x42U) + +typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS { + NvU32 numConstructedFalcons; + NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS]; +} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS; + +/** + * Get GR PDB properties synchronized between Kernel and Physical + * + * bPerSubCtxheaderSupported + * @ref PDB_PROP_GR_SUPPORTS_PER_SUBCONTEXT_CONTEXT_HEADER + */ + + + +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_PDB_PROPERTIES (0x20800a48) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x48" */ + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES { + NvBool bPerSubCtxheaderSupported; +} NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES; + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS { + NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES pdbTable[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES]; +} NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM + * + * This command writes instance memory information in the display hardware registers. + * + * instMemPhysAddr + * GPU physical address or IOVA address of the display instance memory. + * + * instMemSize + * Size of the display instance memory. + * + * instMemAddrSpace + * Address space of the display instance memory. + * + * instMemCpuCacheAttr + * Cache attribute of the display instance memory. + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID (0x49U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS { + NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8); + NV_DECLARE_ALIGNED(NvU64 instMemSize, 8); + NvU32 instMemAddrSpace; + NvU32 instMemCpuCacheAttr; +} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS; + +/* + * NV2080_CTRL_INTERNAL_GPU_RECOVER_ALL_COMPUTE_CONTEXTS + * + * This command issues RC recovery for all compute contexts running on the given GPU. + */ +#define NV2080_CTRL_CMD_INTERNAL_RECOVER_ALL_COMPUTE_CONTEXTS (0x20800a4a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x4A" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION + * + * This command obtains IP version of display engine for use by Kernel RM. + * + * ipVersion + * IP Version of display engine. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED - DISP has been disabled + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION (0x20800a4b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS_MESSAGE_ID (0x4BU) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS { + NvU32 ipVersion; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GPU_GET_SMC_MODE + * + * This command determines the current status of MIG MODE from Physical RM. + * + * smcMode [OUT] + * Current MIG MODE of the GPU. Values range NV2080_CTRL_GPU_INFO_GPU_SMC_MODE* + */ +#define NV2080_CTRL_CMD_INTERNAL_GPU_GET_SMC_MODE (0x20800a4c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS_MESSAGE_ID (0x4CU) + +typedef struct NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS { + NvU32 smcMode; +} NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR + * + * head + * This parameter specifies the head for which the callback is to be registered/unregistered. This value must be + * less than the maximum number of heads supported by the GPU subdevice. + * + * rgLineNum + * This indicates the RG scanout line number on which the callback will be executed. + * + * intrLine + * Enable: [out] Which interrupt line was allocated for this head. + * Disable: [in] Which interrupt line to deallocate. + * + * bEnable + * Should we allocate or deallocate an interrupt line? + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC - There was no available interrupt to allocate. + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR (0x20800a4d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS_MESSAGE_ID (0x4DU) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS { + NvU32 head; + NvU32 rgLineNum; + NvU32 intrLine; + NvBool bEnable; +} NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS; + +/*! + * NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO + * + * Description of a supported GPU instance. + * + * partitionFlag [OUT] + * Allocation flag to be used to allocate a partition with this profile. + * + * grCount [OUT] + * # GR engines + * + * gpcCount [OUT] + * # total gpcs + * + * veidCount [OUT] + * # total veids + * + * smCount [OUT] + * # total SMs + * + * ceCount [OUT] + * # CE engines + * + * nvEncCount [OUT] + * # NVENC engines + * + * nvDecCount [OUT] + * # NVDEC engines + * + * nvJpgCount [OUT] + * # NVJPG engines + * + * nvOfaCount [OUT] + * # NVOFA engines + */ +#define NV2080_CTRL_INTERNAL_GRMGR_PARTITION_MAX_TYPES 10 + + + +typedef struct NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO { + NvU32 partitionFlag; + NvU32 grCount; + NvU32 gpcCount; + NvU32 veidCount; + NvU32 smCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 nvOfaCount; +} NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO; + +/*! + * NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS + * + * Returns the list of supported GPU instance profiles. + * + * count [OUT] + * Number of supported profiles. + * + * table [OUT] + * Supported profiles. + */ +typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS { + NvU32 count; + NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO table[NV2080_CTRL_INTERNAL_GRMGR_PARTITION_MAX_TYPES]; +} NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM + * + * This command instructs the physical MemorySystem to set up memory partitioning + * exterior boundaries in hardware. + * + * partitionableMemSize [input] + * Size of the partitionable memory in bytes + * + * bottomRsvdSize [input] + * Size of the reserved region below partitionable memory in bytes + * + * topRsvdSize [input] + * Size of the reserved region above partitionable memory in bytes + * + * partitionableStartAddr [output] + * Start address of the partitionable memory, aligned to HW constraints + * + * partitionableEndAddr [output] + * End address of the partitionable memory, aligned to HW constraints + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM (0x20800a51) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS { + NV_DECLARE_ALIGNED(NvU64 partitionableMemSize, 8); + NV_DECLARE_ALIGNED(NvU64 bottomRsvdSize, 8); + NV_DECLARE_ALIGNED(NvU64 topRsvdSize, 8); + NV_DECLARE_ALIGNED(NvU64 partitionableStartAddr, 8); + NV_DECLARE_ALIGNED(NvU64 partitionableEndAddr, 8); +} NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS; + + + +typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS { + NV_DECLARE_ALIGNED(NvU64 engineMask, 8); +} NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS; + + +/*! + * NV2080_CTRL_INTERNAL_MEMDESC_INFO + * + * A generic container structure representing a memory region to be used as a + * component of other control call parameters. + * + */ +typedef struct NV2080_CTRL_INTERNAL_MEMDESC_INFO { + NV_DECLARE_ALIGNED(NvU64 base, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvU64 alignment, 8); + NvU32 addressSpace; + NvU32 cpuCacheAttrib; +} NV2080_CTRL_INTERNAL_MEMDESC_INFO; + +#define NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS 2 +#define NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_ID 64 +/*! + * NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS + * + * Promote a single partition's runlist buffers allocated by kernel Client RM to Physical RM + * + * rlBuffers [IN] + * 2D array of runlist buffers for a single partition + * + * runlistIdMask [IN] + * Mask of runlists belonging to partition + * + */ +#define NV2080_CTRL_CMD_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS (0x20800a53) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_MEMDESC_INFO rlBuffers[NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_ID][NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS], 8); + NV_DECLARE_ALIGNED(NvU64 runlistIdMask, 8); + NvU32 swizzId; +} NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_SET_IMP_INIT_INFO (0x20800a54) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS_MESSAGE_ID (0x54U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS { + TEGRA_IMP_IMPORT_DATA tegraImpImportData; +} NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P + * + * Binds local GFID for SR-IOV P2P requests + * + * localGfid [IN] + * GFID to bind in the P2P source GPU + * + * peerId [IN] + * Peer ID of the P2P destination GPU + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P (0x20800a55) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS_MESSAGE_ID (0x55U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS { + NvU32 localGfid; + NvU32 peerId; +} NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P + * + * Binds remote GFID for SR-IOV P2P requests + * + * remoteGfid [IN] + * GFID to bind in the P2P destination GPU + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P (0x20800a56) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS_MESSAGE_ID (0x56U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS { + NvU32 remoteGfid; +} NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_BUS_FLUSH_WITH_SYSMEMBAR + * + * This command triggers a sysmembar to flush VIDMEM writes. + * This command accepts no parameters. + * + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_FLUSH_WITH_SYSMEMBAR (0x20800a70) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x70" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL + * + * Setup local PCIE P2P Mailbox + * + * local2Remote[IN] + * Local peer ID of remote gpu on local gpu + * + * remote2Local[IN] + * Remote peer ID of local gpu on remote gpu + * + * localP2PDomainRemoteAddr[IN] + * P2P domain remote address on local gpu + * + * remoteP2PDomainLocalAddr[IN] + * P2P domain local address on remote gpu + * + * remoteWMBoxLocalAddr[IN] + * Local mailbox address on remote gpu + * + * p2pWmbTag[OUT] + * Tag for mailbox to transport from local to remote GPU + * + * bNeedWarBug999673[IN] + * Set to true if WAR for bug 999673 is required + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL (0x20800a71) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS_MESSAGE_ID (0x71U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS { + NvU32 local2Remote; + NvU32 remote2Local; + NV_DECLARE_ALIGNED(NvU64 localP2PDomainRemoteAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteP2PDomainLocalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteWMBoxLocalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 p2pWmbTag, 8); + NvBool bNeedWarBug999673; +} NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS; + + /* + * NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE + * + * Setup remote PCIE P2P Mailbox + * + * local2Remote[IN] + * Local peer ID of remote gpu on local gpu + * + * remote2Local[IN] + * Remote peer ID of local gpu on remote gpu + * + * localP2PDomainRemoteAddr[IN] + * P2P domain remote address on local gpu + * + * remoteP2PDomainLocalAddr[IN] + * P2P domain local address on remote gpu + * + * remoteWMBoxAddrU64[IN] + * Mailbox address on remote gpu + * + * p2pWmbTag[IN] + * Tag for mailbox to transport from local to remote GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE (0x20800a72) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS { + NvU32 local2Remote; + NvU32 remote2Local; + NV_DECLARE_ALIGNED(NvU64 localP2PDomainRemoteAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteP2PDomainLocalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 remoteWMBoxAddrU64, 8); + NV_DECLARE_ALIGNED(NvU64 p2pWmbTag, 8); +} NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_DESTROY_P2P_MAILBOX + * + * Destroy PCIE P2P Mailbox + * + * peerIdx[IN] + * Peer ID of the P2P destination GPU + * + * bNeedWarBug999673[IN] + * Set to true if WAR for bug 999673 is required + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_DESTROY_P2P_MAILBOX (0x20800a73) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS_MESSAGE_ID (0x73U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS { + NvU32 peerIdx; + NvBool bNeedWarBug999673; +} NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING + * + * Create C2C mapping to a given peer GPU + * + * peerId[IN] + * Peer ID for local to remote GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING (0x20800a74) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID (0x74U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS { + NvU32 peerId; +} NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING + * + * Remove C2C mapping to a given peer GPU + * + * peerId[IN] + * Peer ID for local to remote GPU + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING (0x20800a75) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS_MESSAGE_ID (0x75U) + +typedef struct NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS { + NvU32 peerId; +} NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES + * + * Retrieves the corresponding SPAs (per the given GFID's VMMU mappings) + * for the given array of GPAs. + * + * gfid [IN] + * GFID to translate GPAs for + * + * numEntries [IN] + * Number of entries (<= NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES) + * to translate (i.e. number of elements in gpaEntries) + * + * gpaEntries [IN] + * Array of GPAs to translate + * + * spaEntries [OUT] + * Resulting array of SPAs + */ +#define NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES (0x20800a57) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES 128 + +#define NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS_MESSAGE_ID (0x57U) + +typedef struct NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS { + NvU32 gfid; + NvU32 numEntries; + NV_DECLARE_ALIGNED(NvU64 gpaEntries[NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES], 8); + NV_DECLARE_ALIGNED(NvU64 spaEntries[NV2080_CTRL_INTERNAL_VMMU_MAX_SPA_FOR_GPA_ENTRIES], 8); +} NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER + * + * Pass required pushbuffer parameters from kernel RM to Physical RM + * + * addressSpace [IN] + * Address space of pushbuffer whtether it is ADDR_SYSMEM or ADDR_FBMEM + * + * physicalAddr [IN] + * Physical address of pushbuffer + * + * addressSpace [IN] + * Limit of the pushbuffer address, it should be less than 4K + * + * cacheSnoop [IN] + * Cachesnoop supported or not + * + * channelInstance [IN] + * Channel instance pass by the client to get corresponding dispchannel + * + * hclass [IN] + * External class ID pass by the client to get the channel class + * + * valid [IN] + * This bit indicates whether pushbuffer parameters are valid or not + * + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID (0x58U) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS { + NvU32 addressSpace; + NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8); + NV_DECLARE_ALIGNED(NvU64 limit, 8); + NvU32 cacheSnoop; + NvU32 hclass; + NvU32 channelInstance; + NvBool valid; +} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_GMMU_GET_STATIC_INFO + * + * This command obtains information from physical RM for use by CPU-RM. + * + * replayableFaultBufferSize + * Default size of replayable fault buffer + * + * nonReplayableFaultBufferSize + * Default size of non-replayable fault buffer + * + */ + +#define NV2080_CTRL_CMD_INTERNAL_GMMU_GET_STATIC_INFO (0x20800a59) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS_MESSAGE_ID (0x59U) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS { + NvU32 replayableFaultBufferSize; + NvU32 nonReplayableFaultBufferSize; +} NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GR_GET_CTXSW_MODES + */ +#define NV2080_CTRL_CMD_INTERNAL_GR_GET_CTXSW_MODES (0x20800a5a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x5A" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE + * + * Get heap reservation size needed by different module + */ +#define NV2080_CTRL_CMD_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE (0x20800a5b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS_MESSAGE_ID (0x5BU) + +typedef struct NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS { + NvU32 moduleIndex; + NvU32 size; +} NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE + * + * tableLen [OUT] + * Number of valid records in table field. + * + * table [OUT] + * Interrupt table for Kernel RM. + */ +#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE 128 + +typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY { + NvU16 engineIdx; + NvU32 pmcIntrMask; + NvU32 vectorStall; + NvU32 vectorNonStall; +} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY; + +#define NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID (0x5CU) + +typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS { + NvU32 tableLen; + NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE]; +} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS; + +/* Index to retrieve the needed heap space for specific module */ +#define NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_GR (0x00000000) + +/* + * NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK + * + * Checking if the reservation / release of the PERFMON HW is possible + * + * bReservation [IN] + * NV_TRUE -> request for reservation, NV_FALSE -> request for release + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK (0x20800a98) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS_MESSAGE_ID" */ + + +#define NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS_MESSAGE_ID (0x98U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS { + NvBool bReservation; +} NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET + * + * Reserving / Releasing PERFMON HW + * + * bReservation [IN] + * NV_TRUE -> request for reservation, NV_FALSE -> request for release + * + * bClientHandlesGrGating [IN] + * DM-TODO: Add comment for this + * + * bRmHandlesIdleSlow [IN] + * If the IDLE slowdown is required + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET (0x20800a99) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS_MESSAGE_ID (0x99U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS { + NvBool bReservation; + NvBool bClientHandlesGrGating; + NvBool bRmHandlesIdleSlow; +} NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES + * + * Get active display devices + * + * displayMask [OUT] + * Get the mask of the active display devices in VBIOS + * + * numHeads [OUT] + * Number of heads display supported. + * + */ +#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES (0x20800a5d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS_MESSAGE_ID (0x5DU) + +typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS { + + NvU32 displayMask; + NvU32 numHeads; +} NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS; + + + +/* + * NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES + * + * Get FB Mem page ranges for all possible swizzIds + * + * fbMemPageRanges [OUT] + * Mem page ranges for each swizzId in the form of {lo, hi} + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES (0x20800a60) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MAX_SWIZZ_ID 15 + +typedef struct NV2080_CTRL_INTERNAL_NV_RANGE { + NV_DECLARE_ALIGNED(NvU64 lo, 8); + NV_DECLARE_ALIGNED(NvU64 hi, 8); +} NV2080_CTRL_INTERNAL_NV_RANGE; + +#define NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_NV_RANGE fbMemPageRanges[NV2080_CTRL_INTERNAL_MAX_SWIZZ_ID], 8); +} NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_FIFO_GET_NUM_CHANNELS + * + * This command is an internal command sent from Kernel RM to Physical RM + * to get number of channels for a given runlist ID + * + * runlistId [IN] + * numChannels [OUT] + */ +#define NV2080_CTRL_CMD_INTERNAL_FIFO_GET_NUM_CHANNELS (0x20800a61) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS_MESSAGE_ID (0x61U) + +typedef struct NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS { + NvU32 runlistId; + NvU32 numChannels; +} NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PROFILES + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_PROFILES + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PROFILES (0x20800a63) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x63" */ + +/*! + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_VALID_SWIZZID_MASK + * @ref NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_VALID_SWIZZID_MASK + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_VALID_SWIZZID_MASK (0x20800a64) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x64" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PARTITIONABLE_ENGINES + * NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PARTITIONABLE_ENGINES (0x20800a65) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x65" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES + * NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES (0x20800a66) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x66" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_KMEMSYS_GET_MIG_MEMORY_CONFIG + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG + * + * This command retrieves memory config from HW + * + * memBoundaryCfgA [OUT] + * Memory boundary config A (4KB aligned) + * + * memBoundaryCfgB [OUT] + * Memory boundary config B (4KB aligned) + * + * memBoundaryCfgC [OUT] + * Memory boundary config C (64KB aligned) + * + * memBoundaryCfg [OUT] + * Memory boundary config (64KB aligned) + * + * memBoundaryCfgValInit [OUT] + * Memory boundary config initial value (64KB aligned) + */ +#define NV2080_CTRL_CMD_INTERNAL_KMEMSYS_GET_MIG_MEMORY_CONFIG (0x20800a67) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x67" */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG (0x20800a68) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x68" */ + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS { + NV_DECLARE_ALIGNED(NvU64 memBoundaryCfgA, 8); + NV_DECLARE_ALIGNED(NvU64 memBoundaryCfgB, 8); + NvU32 memBoundaryCfgC; + NvU32 memBoundaryCfg; + NvU32 memBoundaryCfgValInit; +} NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE (0x20800a6b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_SIZE 8 + +#define NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS_MESSAGE_ID (0x6BU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS { + NvU32 data[NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_SIZE]; +} NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_RC_WATCHDOG_TIMEOUT + * + * Invoke RC recovery after watchdog timeout is hit. + */ +#define NV2080_CTRL_CMD_INTERNAL_RC_WATCHDOG_TIMEOUT (0x20800a6a) /* finn: Evaluated from "((FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x6a)" */ + +/* ! + * This command disables cuda limit activation at teardown of the client. + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_CUDA_LIMIT_DISABLE (0x20800a7a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x7A" */ + +/* + * This command is cleaning up OPTP when a client is found to have + * been terminated unexpectedly. + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_OPTP_CLI_CLEAR (0x20800a7c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x7C" */ + + +/*! + * This command can be used to boost P-State up one level or to the highest for a limited + * duration for the associated subdevice. Boosts from different clients are being tracked + * independently. Note that there are other factors that can limit P-States so the resulting + * P-State may differ from expectation. + * + * flags + * This parameter specifies the actual command. _CLEAR is to clear existing boost. + * _BOOST_1LEVEL is to boost P-State one level higher. _BOOST_TO_MAX is to boost + * to the highest P-State. + * duration + * This parameter specifies the duration of the boost in seconds. This has to be less + * than NV2080_CTRL_PERF_BOOST_DURATION_MAX. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_SET_2X (0x20800a9a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X_MESSAGE_ID (0x9AU) + +typedef struct NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X { + + NvBool flags; + NvU32 duration; +} NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X; + +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_PSTATE 0U +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_GPCCLK 1U +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_LAST NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_GPCCLK +#define NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM (0x2U) /* finn: Evaluated from "NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_LAST + 1" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_CONTROL + * + * Activate/Deactivate GPU Boost Sync algorithm + * + * bActivate [IN] + * GPU Boost Sync algorithm: + * NV_TRUE -> activate + * NV_FALSE -> deactivate + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_CONTROL (0x20800a7e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS_MESSAGE_ID (0x7EU) + +typedef struct NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS { + NvBool bActivate; +} NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS + * + * Apply given limits to a specific subdevice + * + * flags [IN] + * DM-TODO: write description here + * + * bBridgeless [IN] + * Bridgeless information, for now supporting only MIO bridges + * + * currLimits + * Array of limits that will be applied + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS (0x20800a7f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_MESSAGE_ID (0x7FU) + +typedef struct NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS { + NvU32 flags; + NvBool bBridgeless; + NvU32 currLimits[NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM]; +} NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO + * + * Data for GPU Boost Sync structure initialization + * + * hysteresisus [OUT] + * Hysteresis value for GPU Boost synchronization hysteresis algorithm. + * + * bHystersisEnable [OUT] + * hysteresis algorithm for SLI GPU Boost synchronization: + * NV_TRUE -> enabled, + * NV_FALSE -> disabled + * + * bSliGpuBoostSyncEnable [OUT] + * SLI GPU Boost feature is: + * NV_TRUE -> enabled, + * NV_FALSE -> disabled + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO (0x20800a80) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS { + NV_DECLARE_ALIGNED(NvU64 hysteresisus, 8); + NvBool bHystersisEnable; + NvBool bSliGpuBoostSyncEnable; +} NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_FAULT_BUFFER + * + * This command sends replayable fault buffer pages allocated by CPU-RM + * to be setup and enabled in physical RM. + * + * hClient + * Client handle. + * + * hObject + * Object handle. + * + * faultBufferSize + * Size of the replayable fault buffer to register. + * + * faultBufferPteArray + * Pages of replayable fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_FAULT_BUFFER (0x20800a9b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_FAULT_BUFFER_MAX_PAGES 256 +#define NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS_MESSAGE_ID (0x9BU) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS { + NvHandle hClient; + NvHandle hObject; + NvU32 faultBufferSize; + NV_DECLARE_ALIGNED(NvU64 faultBufferPteArray[NV2080_CTRL_INTERNAL_GMMU_FAULT_BUFFER_MAX_PAGES], 8); +} NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_FAULT_BUFFER + * + * This command requests physical RM to disable the replayable fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_FAULT_BUFFER (0x20800a9c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x9C" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER + * + * This command sends client shadow fault buffer pages allocated by CPU-RM + * to be setup and enabled in physical RM. + * + * shadowFaultBufferQueuePhysAddr + * Physical address of shadow fault buffer queue. + * + * faultBufferSize + * Size of the client shadow fault buffer to register. + * + * shadowFaultBufferPteArray + * Pages of client shadow fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER (0x20800a9d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GMMU_CLIENT_SHADOW_FAULT_BUFFER_MAX_PAGES 1500 +#define NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS_MESSAGE_ID (0x9DU) + +typedef struct NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS { + NV_DECLARE_ALIGNED(NvU64 shadowFaultBufferQueuePhysAddr, 8); + NvU32 shadowFaultBufferSize; + NV_DECLARE_ALIGNED(NvU64 shadowFaultBufferPteArray[NV2080_CTRL_INTERNAL_GMMU_CLIENT_SHADOW_FAULT_BUFFER_MAX_PAGES], 8); +} NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER + * + * This command requests physical RM to disable the client shadow fault buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER (0x20800a9e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x9E" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_SET_3X + * + * This command can be used to boost P-State the highest for a limited + * duration for the associated subdevice. Boosts from different clients are being tracked + * independently. Note that there are other factors that can limit P-States so the resulting + * P-State may differ from expectation. + * + * flags [IN] + * This parameter specifies the actual command. _CLEAR is to clear existing boost. + * and _BOOST_TO_MAX is to boost to the highest P-State. + * + * boostDuration [IN] + * This parameter specifies the duration of the boost in seconds. This has to be less + * than NV2080_CTRL_PERF_BOOST_DURATION_MAX. + * + * gfId [IN] + * This specifies Id of the Kernel RM that is requesting the Boost + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_SET_3X (0x20800aa0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X_MESSAGE_ID (0xA0U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X { + + NvU32 flags; + NvU32 boostDuration; + NvU32 gfId; +} NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_CLEAR_3X + * + * Clear the boost for specific Kernel RM + * + * bIsCudaClient [IN] + * Specifies if the request is for clearing the CUDA boost or regular boost + * NV_TRUE -> CUDA boost, NV_FALSE otherwise + * + * gfId [IN] + * Specifies Id of the Kernel RM that is requesting Boost clear + * + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_CLEAR_3X (0x20800aa1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X_MESSAGE_ID (0xA1U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X { + + NvBool bIsCudaClient; + NvU32 gfId; +} NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X; + +/* + * NV2080_CTRL_CMD_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO + * + * Retrieves skyline information about the GPU. Params are sized to currently known max + * values, but will need to be modified in the future should that change. + */ +#define NV2080_CTRL_CMD_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO (0x208038a2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GRMGR_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_SKYLINES 8 +#define NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_NON_SINGLETON_VGPCS 8 +/*! + * NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO + * skylineVgpcSize[OUT] + * - TPC count of non-singleton VGPCs + * singletonVgpcMask[OUT] + * - Mask of active Singletons + * maxInstances[OUT] + * - Max allowed instances of this skyline concurrently on a GPU + * computeSizeFlag + * - One of NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_* flags which is associated with this skyline + * numNonSingletonVgpcs + * - Number of VGPCs with non-zero TPC counts which are not singletons + */ +typedef struct NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO { + NvU8 skylineVgpcSize[NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_NON_SINGLETON_VGPCS]; + NvU32 singletonVgpcMask; + NvU32 maxInstances; + NvU32 computeSizeFlag; + NvU32 numNonSingletonVgpcs; +} NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO; + +/*! + * NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS + * skylineTable[OUT] + * - TPC count of non-singleton VGPCs + * - Mask of singleton vGPC IDs active + * - Max Instances of this skyline possible concurrently + * - Associated compute size with the indexed skyline + * - Number of VGPCs with non-zero TPC counts which are not singletons + * validEntries[OUT] + * - Number of entries which contain valid info in skylineInfo + */ +#define NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS_MESSAGE_ID (0xA2U) + +typedef struct NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS { + NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO skylineTable[NV2080_CTRL_INTERNAL_GRMGR_SKYLINE_INFO_MAX_SKYLINES]; + NvU32 validEntries; +} NV2080_CTRL_INTERNAL_STATIC_GRMGR_GET_SKYLINE_INFO_PARAMS; + +/*! + * @ref NV2080_CTRL_CMD_GPU_SET_PARTITIONING_MODE + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_PARTITIONING_MODE (0x20800aa3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA3" */ + +/*! + * @ref NV2080_CTRL_CMD_GPU_CONFIGURE_PARTITION + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_CONFIGURE_GPU_INSTANCE (0x20800aa4) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA4" */ + +/*! + * @ref NV2080_CTRL_CMD_GPU_SET_PARTITIONS + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_GPU_INSTANCES (0x20800aa5) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA5" */ + +/*! + * @ref NV2080_CTRL_CMD_GPU_GET_PARTITIONS + */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_GET_GPU_INSTANCES (0x20800aa6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA6" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED + * + * Tell Physical RM whether any ZBC-kind surfaces are allocated. + * If PF and all VFs report false, ZBC table can be flushed by Physical RM. + * + * bZbcReferenced [IN] + * NV_TRUE -> ZBC-kind (and no _SKIP_ZBCREFCOUNT flag) are allocated in Kernel RM + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED (0x20800a69) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS_MESSAGE_ID (0x69U) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS { + NvBool bZbcSurfacesExist; +} NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_KMIGMGR_EXPORT_GPU_INSTANCE + * + * Export the resource and placement information about a gpu instance such that a + * similar gpu instance can be recreated from scratch in the same position on the + * same or similar GPU. Note that different GPUs may have different physical + * resources due to floorsweeping, and an imported gpu instance is not guaranteed + * to get the exact same resources as the exported gpu instance, but the imported + * gpu instance should behave identically with respect to fragmentation and + * placement / span positioning. + */ +#define NV2080_CTRL_CMD_INTERNAL_KMIGMGR_EXPORT_GPU_INSTANCE (0x20800aa7) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA7" */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_EXPORT_GPU_INSTANCE (0x20800aa8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA8" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_KMIGMGR_IMPORT_GPU_INSTANCE + * + * Create a gpu instance resembling the exported instance info. Note that + * different GPUs may have different physical resources due to floorsweeping, + * and an imported gpu instance is not guaranteed to get the exact same resources + * as the exported gpu instance, but the imported gpu instance should behave + * identically with respect to fragmentation and placement / span positioning. + */ +#define NV2080_CTRL_CMD_INTERNAL_KMIGMGR_IMPORT_GPU_INSTANCE (0x20800aa9) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xA9" */ +#define NV2080_CTRL_CMD_INTERNAL_MIGMGR_IMPORT_GPU_INSTANCE (0x20800aaa) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xAA" */ + +#define NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_MAX_ENGINES_MASK_SIZE 4 +typedef struct NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO { + NV_DECLARE_ALIGNED(NvU64 enginesMask[NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_MAX_ENGINES_MASK_SIZE], 8); + NvU32 partitionFlags; + NvU32 gpcMask; + NvU32 veidOffset; + NvU32 veidCount; +} NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO; + +typedef struct NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS { + NvU32 swizzId; + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO info, 8); +} NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT + * + * Invalidate and/or evict the L2 cache + * + * flags [IN] + * flags that specify required actions + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT (0x20800a6c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS_MESSAGE_ID (0x6cU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS { + NvU32 flags; +} NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS; + +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_ALL (0x00000001) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_FIRST (0x00000002) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_LAST (0x00000004) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_NORMAL (0x00000008) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_CLEAN (0x00000010) +#define NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_WAIT_FB_PULL (0x00000020) + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_FLUSH_L2_ALL_RAMS_AND_CACHES + * + * Flush all L2 Rams and Caches using the ELPG flush + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_FLUSH_L2_ALL_RAMS_AND_CACHES (0x20800a6d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x6D" */ + + + +/*! + * NV2080_CTRL_CMD_INTERNAL_BIF_GET_STATIC_INFO + * + * This command obtains information from physical RM for use by CPU-RM + * + * Data fields -> + * bPcieGen4Capable - tells whether PCIe is Gen4 capable + */ +#define NV2080_CTRL_CMD_INTERNAL_BIF_GET_STATIC_INFO (0x20800aac) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS_MESSAGE_ID (0xacU) + +typedef struct NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS { + NvBool bPcieGen4Capable; + NvBool bIsC2CLinkUp; + NV_DECLARE_ALIGNED(NvU64 dmaWindowStartAddress, 8); +} NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG + * + * Program HSHUB Peer Conn Config space. + * + * programPeerMask[IN] + * If nonzero, the peer mask for programming peers based on hshub connectivity. + * + * invalidatePeerMask[IN] + * If nonzero, the peer mask for invalidating peers. + * + * programPciePeerMask[IN] + * If nonzero, the peer mask for programming peers in pcie case. + * + * Possible status values returned are: + * NV_OK + * NV_WARN_NOTHING_TO_DO + * If all peer masks are zero. + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG (0x20800a88) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS_MESSAGE_ID (0x88U) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS { + NvU32 programPeerMask; + NvU32 invalidatePeerMask; + NvU32 programPciePeerMask; +} NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_FIRST_LINK_PEER_ID + * + * Given a mask of link ids, find the first with a valid peerId. + * + * linkMask[IN] + * Mask of linkIds to check. + * + * peerId[OUT] + * The peerId for the lowest-index link with a valid peerId, if any. + * If none found, NV2080_CTRLINTERNAL_HSHUB_FIRST_LINK_PEER_ID_INVALID_PEER (return value will still be NV_OK). + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_FIRST_LINK_PEER_ID (0x20800a89) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_INVALID_PEER 0xffffffff + +#define NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS { + NvU32 linkMask; + NvU32 peerId; +} NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS + * + * Get the Hshub Ids connected to the other end of links. + * + * linkMask[IN] + * A mask of link ids to query. + * + * hshubIds[OUT] + * For each set bit in the link mask, the peer Hshub Id. + * + * Possible status values returned are: + * NV_OK + * NV_WARN_NOTHING_TO_DO + * If the mask is zero. + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS (0x20800a8a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS_MESSAGE_ID" */ +#define NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_TABLE_SIZE 32 + +#define NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS_MESSAGE_ID (0x8aU) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS { + NvU32 linkMask; + NvU8 hshubIds[NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_TABLE_SIZE]; +} NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_NUM_UNITS + * + * Return the number of HSHUB units. + * + * numHshubs[OUT] + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_NUM_UNITS (0x20800a8b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS_MESSAGE_ID (0x8bU) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS { + NvU32 numHshubs; +} NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_HSHUB_NEXT_HSHUB_ID + * + * Return the next hshubId after the given hshubId. + * + * hshubId[IN/OUT] + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_INTERNAL_HSHUB_NEXT_HSHUB_ID (0x20800a8c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS_MESSAGE_ID (0x8cU) + +typedef struct NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS { + NvU8 hshubId; +} NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS; + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_COMPUTE_PEER_ADDR + * + * Enable compute peer addressing mode + * This command accepts no parameters. + */ + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_COMPUTE_PEER_ADDR (0x20800aad) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0xad" */ + +/* + * NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR + * + * Get/Set NVSwitch fabric address for FLA + * + * [In] bGet + * Whether to get or set the NVSwitch fabric address + * [In/Out] addr + * Address that is to be set or retrieved. + */ +#define NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS_MESSAGE_ID (0xaeU) + +typedef struct NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS { + NvBool bGet; + NV_DECLARE_ALIGNED(NvU64 addr, 8); +} NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS; + +#define NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR (0x20800aae) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS_MESSAGE_ID" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_BIF_GET_ASPM_L1_FLAGS + * + * This command obtains information from physical RM for use by CPU-RM + * + * Data fields -> + * bCyaMaskL1 + * bEnableAspmDtL1 + */ +#define NV2080_CTRL_CMD_INTERNAL_BIF_GET_ASPM_L1_FLAGS (0x20800ab0) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS_MESSAGE_ID (0xb0U) + +typedef struct NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS { + NvBool bCyaMaskL1; + NvBool bEnableAspmDtL1; +} NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT + * + * Sets number of VM slots that are active in VGPU's scheduler + * + * maxActiveVGpuVMCount [IN] + * Number of VM slots that are active in vGPU's scheduler. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_OUT_OF_RANGE - Passed value is out of range + * NV_ERR_NO_MEMORY - Out of memory + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT (0x20800ab1) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS_MESSAGE_ID" */ + +/*! + * Maximum value of VM slots that are active in vGPU's scheduler. + */ +#define NV2080_CTRL_CMD_INTERNAL_PERF_CF_CONTROLLERS_MAX_ACTIVE_VGPU_VM_COUNT_MAX_VALUE 32 + +#define NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS_MESSAGE_ID (0xB1U) + +typedef struct NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS { + NvU8 maxActiveVGpuVMCount; +} NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS; + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_DISABLE_NVLINK_PEERS + * + * Disable all NVLINK FB peers + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_DISABLE_NVLINK_PEERS (0x20800a6e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | 0x6E" */ + +/*! + * NV2080_CTRL_CMD_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE + * + * Program GPU in raw / legacy compression mode + * + */ +#define NV2080_CTRL_CMD_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE (0x20800a6f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS_MESSAGE_ID (0x6fU) + +typedef struct NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS { + NvBool bRawMode; +} NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS; + + + +/*! + * NV2080_CTRL_CMD_INTERNAL_BUS_GET_PCIE_P2P_CAPS + * + * This command returns the GPU's PCIE P2P caps + * + * [in] bCommonPciSwitchFound + * All GPUs are under the same PCI switch + * [out] p2pReadCapsStatus + * [out] p2pWriteCapsStatus + * These members returns status of all supported p2p capabilities. Valid + * status values include: + * NV0000_P2P_CAPS_STATUS_OK + * P2P capability is supported. + * NV0000_P2P_CAPS_STATUS_CHIPSET_NOT_SUPPORTED + * Chipset doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED + * GPU doesn't support p2p capability. + * NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED + * IOH topology isn't supported. For e.g. root ports are on different + * IOH. + * NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY + * P2P Capability is disabled by a regkey. + * NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED + * P2P Capability is not supported. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_INTERNAL_GET_PCIE_P2P_CAPS (0x20800ab8) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS_MESSAGE_ID (0xB8U) + +typedef struct NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS { + NvBool bCommonPciSwitchFound; + NvU8 p2pReadCapsStatus; + NvU8 p2pWriteCapsStatus; +} NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS; + +/* ctrl2080internal_h */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h new file mode 100644 index 000000000..1ca6add91 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080lpwr.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080lpwr.finn +// + + + +// _ctrl2080lpwr_h_ + + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h new file mode 100644 index 000000000..dcdebaeb4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080mc.h @@ -0,0 +1,324 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080mc.finn +// + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX mc control commands and parameters */ + +/** + * NV2080_CTRL_CMD_MC_GET_ARCH_INFO + * + * This command returns chip architecture information from the + * master control engine in the specified GPU. + * + * architecture + * This parameter specifies the architecture level for the GPU. + * implementation + * This parameter specifies the implementation of the architecture + * for the GPU. + * revision + * This parameter specifies the revision of the mask used to produce + * the GPU. + * subRevision + * This parameter specific the sub revision of the GPU. Value is one of + * NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_* + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_MC_GET_ARCH_INFO (0x20801701) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS { + NvU32 architecture; + NvU32 implementation; + NvU32 revision; + NvU8 subRevision; +} NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS; + +/* valid architecture values */ + + +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_T23X (0xE0000023) + + +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_TU100 (0x00000160) +#define NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_GA100 (0x00000170) + + + +/* valid ARCHITECTURE_T23X implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_T234 (0x00000004) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_T234D (0x00000005) + + +/* valid ARCHITECTURE_TU10x implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU100 (0x00000000) + + +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU102 (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU104 (0x00000004) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU106 (0x00000006) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU116 (0x00000008) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_TU117 (0x00000007) + + +/* valid ARCHITECTURE_GA10x implementation values */ +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA100 (0x00000000) + + +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA102 (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA103 (0x00000003) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA104 (0x00000004) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA106 (0x00000006) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA107 (0x00000007) +#define NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_GA10B (0x0000000B) + + + +/* Valid Chip sub revisions */ +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_NO_SUBREVISION (0x00000000) +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_P (0x00000001) +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_Q (0x00000002) +#define NV2080_CTRL_MC_ARCH_INFO_SUBREVISION_R (0x00000003) + +/* + * NV2080_CTRL_CMD_MC_SERVICE_INTERRUPTS + * + * This command instructs the RM to service interrupts for the specified + * engine(s). + * + * engines + * This parameter specifies which engines should have their interrupts + * serviced. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_MC_SERVICE_INTERRUPTS (0x20801702) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_ENGINE_ID_GRAPHICS 0x00000001 +#define NV2080_CTRL_MC_ENGINE_ID_ALL 0xFFFFFFFF + +#define NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS { + NvU32 engines; +} NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS; + + +/* + * NV2080_CTRL_CMD_MC_GET_MANUFACTURER + * + * This command returns the GPU manufacturer information for the associated + * subdevice. + * + * manufacturer + * This parameter returns the manufacturer value for the GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_MC_GET_MANUFACTURER (0x20801703) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS { + NvU32 manufacturer; +} NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS; + + + +/* + * NV2080_CTRL_CMD_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS + * + * This command is used to allow clients to query whether hostclk slowdown is + * disabled. + * + * bDisabled + * This parameter will hold the status of hostclk slowdown + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV2080_CTRL_CMD_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS (0x20801708) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS { + NvBool bDisabled; +} NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_MC_SET_HOSTCLK_SLOWDOWN_STATUS + * + * This command is used to allow clients to disable/enable hostclk slowdown. + * + * bDisable + * When this parameter is set to TRUE, RM should disable hostclk slowdown. + * If it is set to FALSE, RM will attempt to enable hostclk slowdown, but + * in this case, slowdown is NOT guaranteed to be enabled since there may + * be other reason (like regkey) preventing slowdown. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV2080_CTRL_CMD_MC_SET_HOSTCLK_SLOWDOWN_STATUS (0x20801709) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS { + NvBool bDisable; +} NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS; + + + +/* + * NV2080_CTRL_CMD_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP + * + * This call will setup RM to either service or ignore the + * repayable fault interrupt. + * This is a privileged call that can only be called by the UVM driver + * when it will take ownership of the repalayable fault interrupt. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_INVALID_ARGUMENT + * NVOS_STATUS_ERROR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP (0x2080170c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS { + NvBool bOwnedByRm; +} NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS; + +/* + * NV2080_CTRL_CMD_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS + * + * This command gets the notification interrupt vectors device for all VGPU engines from Host RM. + * + * Parameters: + * + * entries [out] + * A buffer to store up to MAX_ENGINES entries of type + * NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY. + * + * numEntries [out] + * Number of populated entries in the provided buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS (0x2080170d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_MAX_ENGINES 256 + +typedef struct NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY { + NvU32 nv2080EngineType; + NvU32 notificationIntrVector; +} NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY; + +#define NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS { + NvU32 numEntries; + NV2080_CTRL_MC_ENGINE_NOTIFICATION_INTR_VECTOR_ENTRY entries[NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_MAX_ENGINES]; +} NV2080_CTRL_MC_GET_ENGINE_NOTIFICATION_INTR_VECTORS_PARAMS; + +/* + * NV2080_CTRL_CMD_MC_GET_STATIC_INTR_TABLE + * + * This command gets the static interrupts needed by VGPU from Host RM. + * + * Parameters: + * + * entries [out] + * A buffer to store up to MAX_ENGINES entries of type + * NV2080_CTRL_MC_STATIC_INTR_ENTRY. + * + * numEntries [out] + * Number of populated entries in the provided buffer. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_MC_GET_STATIC_INTR_TABLE (0x2080170e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID << 8) | NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX 32 + +// Interface defines for static MC_ENGINE_IDX defines +#define NV2080_INTR_TYPE_NULL (0x00000000) +#define NV2080_INTR_TYPE_NON_REPLAYABLE_FAULT (0x00000001) +#define NV2080_INTR_TYPE_NON_REPLAYABLE_FAULT_ERROR (0x00000002) +#define NV2080_INTR_TYPE_INFO_FAULT (0x00000003) +#define NV2080_INTR_TYPE_REPLAYABLE_FAULT (0x00000004) +#define NV2080_INTR_TYPE_REPLAYABLE_FAULT_ERROR (0x00000005) +#define NV2080_INTR_TYPE_ACCESS_CNTR (0x00000006) +#define NV2080_INTR_TYPE_TMR (0x00000007) +#define NV2080_INTR_TYPE_CPU_DOORBELL (0x00000008) +#define NV2080_INTR_TYPE_GR0_FECS_LOG (0x00000009) +#define NV2080_INTR_TYPE_GR1_FECS_LOG (0x0000000A) +#define NV2080_INTR_TYPE_GR2_FECS_LOG (0x0000000B) +#define NV2080_INTR_TYPE_GR3_FECS_LOG (0x0000000C) +#define NV2080_INTR_TYPE_GR4_FECS_LOG (0x0000000D) +#define NV2080_INTR_TYPE_GR5_FECS_LOG (0x0000000E) +#define NV2080_INTR_TYPE_GR6_FECS_LOG (0x0000000F) +#define NV2080_INTR_TYPE_GR7_FECS_LOG (0x00000010) + +typedef struct NV2080_CTRL_MC_STATIC_INTR_ENTRY { + NvU32 nv2080IntrType; + NvU32 pmcIntrMask; + NvU32 intrVectorStall; + NvU32 intrVectorNonStall; +} NV2080_CTRL_MC_STATIC_INTR_ENTRY; + +#define NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS { + NvU32 numEntries; + NV2080_CTRL_MC_STATIC_INTR_ENTRY entries[NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX]; +} NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_PARAMS; + +/* _ctrl2080mc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h new file mode 100644 index 000000000..8c428735b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvd.h @@ -0,0 +1,342 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080nvd.finn +// + + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +#include "ctrl/ctrlxxxx.h" +/* + * NV2080_CTRL_CMD_NVD_GET_DUMP_SIZE + * + * This command gets the expected dump size of a particular GPU dump component. + * Note that events that occur between this command and a later + * NV2080_CTRL_CMD_NVD_GET_DUMP command could alter the size of + * the buffer required. + * + * component + * One of NVDUMP_COMPONENT < 0x400 defined in nvdump.h to estimate + * the size of. + * size + * This parameter returns the expected size. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if component is invalid. + * + */ + +#define NV2080_CTRL_CMD_NVD_GET_DUMP_SIZE (0x20802401) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS { + NvU32 component; + NvU32 size; +} NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVD_GET_DUMP + * + * This command gets a dump of a particular GPU dump component. If triggers + * is non-zero, the command waits for the trigger to occur + * before it returns. + * + * pBuffer + * This parameter points to the buffer for the data. + * component + * One of NVDUMP_COMPONENT < 0x400 defined in nvdump.h to select + * for dumping. + * size + * On entry, this parameter specifies the maximum length for + * the returned data. On exit, it specifies the number of bytes + * returned. + * + * Possible status values returned are: + * NV_OK + * NVOS_ERROR_INVALID_ARGUMENT if component is invalid. + * NVOS_ERROR_INVALID_ADDRESS if pBuffer is invalid + * NVOS_ERROR_INVALID_???? if the buffer was too small + * + * + */ +#define NV2080_CTRL_CMD_NVD_GET_DUMP (0x20802402) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVD_GET_DUMP_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_NVD_GET_DUMP_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pBuffer, 8); + NvU32 component; + NvU32 size; +} NV2080_CTRL_NVD_GET_DUMP_PARAMS; + +/* + * NV2080_CTRL_CMD_NVD_GET_NOCAT_JOURNAL + * + * This command returns the contents of the Journal used by NOCAT, and + * optionally clears the data + * + * clear: + * [IN] indicates if should the data be cleared after reporting + * + * JournalRecords : + * [OUT] an array of Journal records reported. + * + * outstandingAssertCount: + * [OUT] number of asserts that remain to be reported on. + * + * reportedAssertCount: + * [OUT] the number of asserts contained in the report + * + * asserts: + * [OUT] an array of up to NV2080_NOCAT_JOURNAL_MAX_ASSERT_RECORDS assert reports + */ + + +#define NV2080_CTRL_CMD_NVD_GET_NOCAT_JOURNAL (0x20802409) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS_MESSAGE_ID" */ + +#define NV2080_NOCAT_JOURNAL_MAX_DIAG_BUFFER 1024 +#define NV2080_NOCAT_JOURNAL_MAX_STR_LEN 65 +#define NV2080_NOCAT_JOURNAL_MAX_JOURNAL_RECORDS 10 +#define NV2080_NOCAT_JOURNAL_MAX_ASSERT_RECORDS 32 + +// structure to hold clock details. +typedef struct NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS { + NvS32 userMinOffset; + NvS32 userMaxOffset; + NvU32 factoryMinOffset; + NvU32 factoryMaxOffset; + NvU32 lastActiveClock; + NvU32 lastActiveVolt; + NvU32 lastActivePoint; + NvU32 kappa; +} NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS; + + +// structure to hold clock configuration & state. +typedef struct NV2080_NOCAT_JOURNAL_OVERCLOCK_CFG { + NvU32 pstateVer; + NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS gpcOverclock; + NV2080_NOCAT_JOURNAL_OVERCLOCK_DETAILS mclkOverclock; + NvBool bUserOverclocked; + NvBool bFactoryOverclocked; +} NV2080_NOCAT_JOURNAL_OVERCLOCK_CFG; + +// structure to hold the GPU context at the time of the report. +typedef struct NV2080_NOCAT_JOURNAL_GPU_STATE { + NvBool bValid; + NvU32 strap; + NvU16 deviceId; + NvU16 vendorId; + NvU16 subsystemVendor; + NvU16 subsystemId; + NvU16 revision; + NvU16 type; + NvU32 vbiosVersion; + NvBool bOptimus; + NvBool bMsHybrid; + NvBool bFullPower; + NvU32 vbiosOemVersion; + NvU16 memoryType; + NvU8 tag[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU8 vbiosProject[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvBool bInFullchipReset; + NvBool bInSecBusReset; + NvBool bInGc6Reset; + NV2080_NOCAT_JOURNAL_OVERCLOCK_CFG overclockCfg; +} NV2080_NOCAT_JOURNAL_GPU_STATE; + +#define NV2080_NOCAT_JOURNAL_REC_TYPE_UNKNOWN 0 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_BUGCHECK 1 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_ENGINE 2 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_TDR 3 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_RC 4 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_ASSERT 5 +#define NV2080_NOCAT_JOURNAL_REC_TYPE_ANY 6 + +// this should be relative to the highest type value +#define NV2080_NOCAT_JOURNAL_REC_TYPE_COUNT (0x7) /* finn: Evaluated from "NV2080_NOCAT_JOURNAL_REC_TYPE_ANY + 1" */ +typedef struct NV2080_NOCAT_JOURNAL_ENTRY { + NvU8 recType; + NvU32 bugcheck; + NvU32 tdrBucketId; + NvU8 source[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 subsystem; + NV_DECLARE_ALIGNED(NvU64 errorCode, 8); + NvU32 diagBufferLen; + NvU8 diagBuffer[NV2080_NOCAT_JOURNAL_MAX_DIAG_BUFFER]; + NvU8 faultingEngine[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 mmuFaultType; + NvU32 mmuErrorSrc; + NvU8 tdrReason[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; +} NV2080_NOCAT_JOURNAL_ENTRY; + +typedef struct NV2080_NOCAT_JOURNAL_RECORD { + NvU32 GPUTag; + NV_DECLARE_ALIGNED(NvU64 loadAddress, 8); + NV_DECLARE_ALIGNED(NvU64 timeStamp, 8); + NV_DECLARE_ALIGNED(NvU64 stateMask, 8); + NV2080_NOCAT_JOURNAL_GPU_STATE nocatGpuState; + NV_DECLARE_ALIGNED(NV2080_NOCAT_JOURNAL_ENTRY nocatJournalEntry, 8); +} NV2080_NOCAT_JOURNAL_RECORD; + +// NOCAT activity counter indexes +// collection activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_REQ_IDX 0 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_ALLOCATED_IDX 1 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECTED_IDX 2 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_ALLOC_FAILED_IDX 3 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_FAILED_IDX 4 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_REQ_DROPPED_IDX 5 + +// reporting activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_REQUESTED_IDX 6 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_REPORTED_IDX 7 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_DROPPED_IDX 8 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_MISSED_IDX 9 + +// update activity +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATE_REQ_IDX 10 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATED_IDX 11 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATE_FAILED_IDX 12 + +// general errors +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BUSY_IDX 13 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BAD_PARAM_IDX 14 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BAD_TYPE_IDX 15 + +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES1_IDX 16 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES2_IDX 17 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_CACHE_UPDATE_IDX 18 +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_INSERT_RECORDS_IDX 19 + +// this should be relative to the highest counter index +#define NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COUNTER_COUNT (0x14) /* finn: Evaluated from "NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_INSERT_RECORDS_IDX + 1" */ + +#define NV2080_CTRL_NOCAT_GET_COUNTERS_ONLY 0:0 +#define NV2080_CTRL_NOCAT_GET_COUNTERS_ONLY_YES 1 +#define NV2080_CTRL_NOCAT_GET_COUNTERS_ONLY_NO 0 + +#define NV2080_CTRL_NOCAT_GET_RESET_COUNTERS 1:1 +#define NV2080_CTRL_NOCAT_GET_RESET_COUNTERS_YES 1 +#define NV2080_CTRL_NOCAT_GET_RESET_COUNTERS_NO 0 + + +#define NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS { + NvU32 flags; + NvU32 nocatRecordCount; + NvU32 nocatOutstandingRecordCount; + NV_DECLARE_ALIGNED(NV2080_NOCAT_JOURNAL_RECORD journalRecords[NV2080_NOCAT_JOURNAL_MAX_JOURNAL_RECORDS], 8); + NvU32 activityCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COUNTER_COUNT]; + NvU8 reserved[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; +} NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS; + + /* + * NV2080_CTRL_CMD_NVD_SET_NOCAT_JOURNAL_DATA + * + * This command reports the TDR data collected by KMD to be added to the + * nocat record + * + * dataType: + * [IN] specifies the type of data provided. + * targetRecordType + * [IN] specifies record type the data is intended for. + * nocatJournalData + * [IN] specifies the data to be added. + */ + +#define NV2080_CTRL_CMD_NVD_SET_NOCAT_JOURNAL_DATA (0x2080240b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID << 8) | NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS_MESSAGE_ID" */ + +// data types & structures +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_EMPTY 0 +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_TDR_REASON 1 +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_INSERT_RECORD 2 +#define NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_SET_TAG 3 + +#define NV2080_CTRL_NOCAT_TDR_TYPE_NONE 0 +#define NV2080_CTRL_NOCAT_TDR_TYPE_LEGACY 1 +#define NV2080_CTRL_NOCAT_TDR_TYPE_FULLCHIP 2 +#define NV2080_CTRL_NOCAT_TDR_TYPE_BUSRESET 3 +#define NV2080_CTRL_NOCAT_TDR_TYPE_GC6_RESET 4 +#define NV2080_CTRL_NOCAT_TDR_TYPE_SURPRISE_REMOVAL 5 +#define NV2080_CTRL_NOCAT_TDR_TYPE_UCODE_RESET 6 +#define NV2080_CTRL_NOCAT_TDR_TYPE_TEST 7 + +typedef struct NV2080CtrlNocatJournalDataTdrReason { + NvU32 flags; + NvU8 source[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 subsystem; + NV_DECLARE_ALIGNED(NvU64 errorCode, 8); + NvU32 reasonCode; +} NV2080CtrlNocatJournalDataTdrReason; + +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_NULL_STR 0:0 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_NULL_STR_YES 1 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_NULL_STR_NO 0 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_0_LEN_BUFFER 1:1 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_0_LEN_BUFFER_YES 1 +#define NV2080_CTRL_NOCAT_INSERT_ALLOW_0_LEN_BUFFER_NO 0 +typedef struct NV2080CtrlNocatJournalInsertRecord { + NvU32 flags; + NvU8 recType; + NvU32 bugcheck; + NvU8 source[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 subsystem; + NV_DECLARE_ALIGNED(NvU64 errorCode, 8); + NvU8 faultingEngine[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU32 tdrReason; +} NV2080CtrlNocatJournalInsertRecord; + +#define NV2080_CTRL_NOCAT_TAG_CLEAR 0:0 +#define NV2080_CTRL_NOCAT_TAG_CLEAR_YES 1 +#define NV2080_CTRL_NOCAT_TAG_CLEAR_NO 0 +typedef struct NV2080CtrlNocatJournalSetTag { + NvU32 flags; + NvU8 tag[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; +} NV2080CtrlNocatJournalSetTag; + +#define NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS { + NvU32 dataType; + NvU32 targetRecordType; + union { + NV_DECLARE_ALIGNED(NV2080CtrlNocatJournalDataTdrReason tdrReason, 8); + NV_DECLARE_ALIGNED(NV2080CtrlNocatJournalInsertRecord insertData, 8); + NV2080CtrlNocatJournalSetTag tagData; + } nocatJournalData; +} NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS; +/* _ctr2080nvd_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h new file mode 100644 index 000000000..9cbfd5a66 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080nvlink.h @@ -0,0 +1,2723 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080nvlink.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX bus control commands and parameters */ + +/* + * NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS + * + * This command returns the NVLink capabilities supported by the subdevice. + * + * capsTbl + * This is bit field for getting different global caps. The individual bitfields are specified by NV2080_CTRL_NVLINK_CAPS_* + * lowestNvlinkVersion + * This field specifies the lowest supported NVLink version for this subdevice. + * highestNvlinkVersion + * This field specifies the highest supported NVLink version for this subdevice. + * lowestNciVersion + * This field specifies the lowest supported NCI version for this subdevice. + * highestNciVersion + * This field specifies the highest supported NCI version for this subdevice. + * discoveredLinkMask + * This field provides a bitfield mask of NVLink links discovered on this subdevice. + * enabledLinkMask + * This field provides a bitfield mask of NVLink links enabled on this subdevice. + * + */ +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS { + NvU32 capsTbl; + + NvU8 lowestNvlinkVersion; + NvU8 highestNvlinkVersion; + NvU8 lowestNciVersion; + NvU8 highestNciVersion; + + NvU32 discoveredLinkMask; + NvU32 enabledLinkMask; +} NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS; + +/* extract cap bit setting from tbl */ +#define NV2080_CTRL_NVLINK_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* + * NV2080_CTRL_NVLINK_CAPS + * + * SUPPORTED + * Set if NVLink is present and supported on this subdevice, NV_FALSE otherwise. This field is used for *global* caps only and NOT for per-link caps + * P2P_SUPPORTED + * Set if P2P over NVLink is supported on this subdevice, NV_FALSE otherwise. + * SYSMEM_ACCESS + * Set if sysmem can be accessed over NVLink on this subdevice, NV_FALSE otherwise. + * PEER_ATOMICS + * Set if P2P atomics are supported over NVLink on this subdevice, NV_FALSE otherwise. + * SYSMEM_ATOMICS + * Set if sysmem atomic transcations are supported over NVLink on this subdevice, NV_FALSE otherwise. + * PEX_TUNNELING + * Set if PEX tunneling over NVLink is supported on this subdevice, NV_FALSE otherwise. + * SLI_BRIDGE + * GLOBAL: Set if SLI over NVLink is supported on this subdevice, NV_FALSE otherwise. + * LINK: Set if SLI over NVLink is supported on a link, NV_FALSE otherwise. + * SLI_BRIDGE_SENSABLE + * GLOBAL: Set if the subdevice is capable of sensing SLI bridges, NV_FALSE otherwise. + * LINK: Set if the link is capable of sensing an SLI bridge, NV_FALSE otherwise. + * POWER_STATE_L0 + * Set if L0 is a supported power state on this subdevice/link, NV_FALSE otherwise. + * POWER_STATE_L1 + * Set if L1 is a supported power state on this subdevice/link, NV_FALSE otherwise. + * POWER_STATE_L2 + * Set if L2 is a supported power state on this subdevice/link, NV_FALSE otherwise. + * POWER_STATE_L3 + * Set if L3 is a supported power state on this subdevice/link, NV_FALSE otherwise. + * VALID + * Set if this link is supported on this subdevice, NV_FALSE otherwise. This field is used for *per-link* caps only and NOT for global caps. + * + */ + +/* caps format is byte_index:bit_mask */ +#define NV2080_CTRL_NVLINK_CAPS_SUPPORTED 0:0x01 +#define NV2080_CTRL_NVLINK_CAPS_P2P_SUPPORTED 0:0x02 +#define NV2080_CTRL_NVLINK_CAPS_SYSMEM_ACCESS 0:0x04 +#define NV2080_CTRL_NVLINK_CAPS_P2P_ATOMICS 0:0x08 +#define NV2080_CTRL_NVLINK_CAPS_SYSMEM_ATOMICS 0:0x10 +#define NV2080_CTRL_NVLINK_CAPS_PEX_TUNNELING 0:0x20 +#define NV2080_CTRL_NVLINK_CAPS_SLI_BRIDGE 0:0x40 +#define NV2080_CTRL_NVLINK_CAPS_SLI_BRIDGE_SENSABLE 0:0x80 +#define NV2080_CTRL_NVLINK_CAPS_POWER_STATE_L0 1:0x01 +#define NV2080_CTRL_NVLINK_CAPS_POWER_STATE_L1 1:0x02 +#define NV2080_CTRL_NVLINK_CAPS_POWER_STATE_L2 1:0x04 +#define NV2080_CTRL_NVLINK_CAPS_POWER_STATE_L3 1:0x08 +#define NV2080_CTRL_NVLINK_CAPS_VALID 1:0x10 + +/* + * Size in bytes of nvlink caps table. This value should be one greater + * than the largest byte_index value above. + */ +#define NV2080_CTRL_NVLINK_CAPS_TBL_SIZE 2U + +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_INVALID (0x00000000U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_0 (0x00000002U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_2 (0x00000004U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_0 (0x00000005U) +#define NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_1 (0x00000006U) + + +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_INVALID (0x00000000U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_2_0 (0x00000002U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_2_2 (0x00000004U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_3_0 (0x00000005U) +#define NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_3_1 (0x00000006U) + + + +/* + * NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS (0x20803001U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_NVLINK_DEVICE_INFO + * + * This structure stores information about the device to which this link is associated + * + * deviceIdFlags + * Bitmask that specifies which IDs are valid for the device + * Refer NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_* for possible values + * If NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_PCI is set, PCI information is valid + * If NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_UUID is set, UUID is valid + * domain, bus, device, function, pciDeviceId + * PCI information for the device + * deviceType + * Type of the device + * See NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_* for possible values + * deviceUUID + * This field specifies the device UUID of the device. Useful for identifying the device (or version) + */ +typedef struct NV2080_CTRL_NVLINK_DEVICE_INFO { + // ID Flags + NvU32 deviceIdFlags; + + // PCI Information + NvU32 domain; + NvU16 bus; + NvU16 device; + NvU16 function; + NvU32 pciDeviceId; + + // Device Type + NV_DECLARE_ALIGNED(NvU64 deviceType, 8); + + // Device UUID + NvU8 deviceUUID[16]; +} NV2080_CTRL_NVLINK_DEVICE_INFO; + +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS 31:0 +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_NONE (0x00000000U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_PCI (0x00000001U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_UUID (0x00000002U) + +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_EBRIDGE (0x00000000U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_NPU (0x00000001U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU (0x00000002U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH (0x00000003U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_TEGRA (0x00000004U) +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE (0x000000FFU) + +#define NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_UUID_INVALID (0xFFFFFFFFU) + +/* + * NV2080_CTRL_NVLINK_NVLINK_LINK_STATUS_INFO + * + * This structure stores the per-link status of different NVLink parameters. + * + * capsTbl + * This is bit field for getting different global caps. The individual bitfields + * are specified by NV2080_CTRL_NVLINK_CAPS_* + * phyType + * This field specifies the type of PHY (NVHS or GRS) being used for this link. + * subLinkWidth + * This field specifies the no. of lanes per sublink. + * linkState + * This field specifies the current state of the link. + * See NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_LINK_STATE_* for possible values. + * rxSublinkStatus + * This field specifies the current state of RX sublink. + * See NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_SUBLINK_RX_STATE_* for possible values. + * txSublinkStatus + * This field specifies the current state of TX sublink. + * See NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_SUBLINK_TX_STATE_* for possible values. + * bLaneReversal + * This field indicates that lane reversal is in effect on this link. + * nvlinkVersion + * This field specifies the NVLink version supported by the link. + * nciVersion + * This field specifies the NCI version supported by the link. + * phyVersion + * This field specifies the version of PHY being used by the link. + * nvlinkLineRateMbps + * Bit rate at which bits toggle on wires in megabits per second. + * NOTE: This value is the full speed line rate, not the instantaneous line rate of the link. + * nvlinkLinkClockMhz + * Clock corresponding to link logic in mega hertz + * nvlinkRefClkType + * This field specifies whether the link clock is taken from NVHS reflck + * or PEX refclk for the current GPU. + * nvlinkLinkDataRateKiBps + * Effective rate available for transactions after subtracting overhead, + * as seen at Data Layer in kibibytes (1024 bytes) per second. + * Only valid in GA100+, reported as 0 otherwise + * NOTE: Because minion calculates these values, it will only be valid if + * links are in ACTIVE state + * nvlinkRefClkSpeedMhz + * The input reference frequency to the PLL + * connected + * This field specifies if any device is connected on the other end of the link + * loopProperty + * This field specifies if the link is a loopback/loopout link. See NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_* for possible values. + * remoteDeviceLinkNumber + * This field specifies the link number on the remote end of the link + * remoteDeviceInfo + * This field stores the device information for the remote end of the link + * + */ +typedef struct NV2080_CTRL_NVLINK_LINK_STATUS_INFO { + // Top level capablilites + NvU32 capsTbl; + + NvU8 phyType; + NvU8 subLinkWidth; + + // Link and sublink states + NvU32 linkState; + NvU8 rxSublinkStatus; + NvU8 txSublinkStatus; + + // Indicates that lane reversal is in effect on this link. + NvBool bLaneReversal; + + NvU8 nvlinkVersion; + NvU8 nciVersion; + NvU8 phyVersion; + + // Legacy clock information (to be deprecated) + NvU32 nvlinkLinkClockKHz; + NvU32 nvlinkCommonClockSpeedKHz; + NvU32 nvlinkRefClkSpeedKHz; + + NvU32 nvlinkCommonClockSpeedMhz; + + // Clock Speed and Data Rate Reporting + NvU32 nvlinkLineRateMbps; + NvU32 nvlinkLinkClockMhz; + NvU8 nvlinkRefClkType; + NvU32 nvlinkLinkDataRateKiBps; + NvU32 nvlinkRefClkSpeedMhz; + + // Connection information + NvBool connected; + NvU8 loopProperty; + NvU8 remoteDeviceLinkNumber; + NvU8 localDeviceLinkNumber; + + // + // Added as part of NvLink 3.0 + // Note: SID has link info appended to it when provided by minion + // + NV_DECLARE_ALIGNED(NvU64 remoteLinkSid, 8); + NV_DECLARE_ALIGNED(NvU64 localLinkSid, 8); + + // Ampere+ only + NvU32 laneRxdetStatusMask; + + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_DEVICE_INFO remoteDeviceInfo, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_DEVICE_INFO localDeviceInfo, 8); +} NV2080_CTRL_NVLINK_LINK_STATUS_INFO; + +// NVLink link states +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_INIT (0x00000000U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_HWCFG (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_SWCFG (0x00000002U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_ACTIVE (0x00000003U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_FAULT (0x00000004U) + + +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_RECOVERY (0x00000006U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_RECOVERY_AC (0x00000008U) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_RECOVERY_RX (0x0000000aU) +#define NV2080_CTRL_NVLINK_STATUS_LINK_STATE_INVALID (0xFFFFFFFFU) + +// NVLink Rx sublink states +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_HIGH_SPEED_1 (0x00000000U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_SINGLE_LANE (0x00000004U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_TRAINING (0x00000005U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_SAFE_MODE (0x00000006U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_OFF (0x00000007U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_TEST (0x00000008U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_FAULT (0x0000000eU) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_INVALID (0x000000FFU) + +// NVLink Tx sublink states +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_HIGH_SPEED_1 (0x00000000U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_SINGLE_LANE (0x00000004U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_TRAINING (0x00000005U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_SAFE_MODE (0x00000006U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_OFF (0x00000007U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_TEST (0x00000008U) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_FAULT (0x0000000eU) +#define NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_INVALID (0x000000FFU) + +#define NV2080_CTRL_NVLINK_STATUS_PHY_NVHS (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_PHY_GRS (0x00000002U) +#define NV2080_CTRL_NVLINK_STATUS_PHY_INVALID (0x000000FFU) + +// Version information +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_2_0 (0x00000002U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_2_2 (0x00000004U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_3_0 (0x00000005U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_3_1 (0x00000006U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_4_0 (0x00000007U) +#define NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID (0x000000FFU) + +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_2_0 (0x00000002U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_2_2 (0x00000004U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_3_0 (0x00000005U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_3_1 (0x00000006U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_4_0 (0x00000007U) +#define NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_INVALID (0x000000FFU) + +#define NV2080_CTRL_NVLINK_STATUS_NVHS_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_NVHS_VERSION_INVALID (0x000000FFU) + +#define NV2080_CTRL_NVLINK_STATUS_GRS_VERSION_1_0 (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_GRS_VERSION_INVALID (0x000000FFU) + +// Connection properties +#define NV2080_CTRL_NVLINK_STATUS_CONNECTED_TRUE (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_CONNECTED_FALSE (0x00000000U) + +#define NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_LOOPBACK (0x00000001U) +#define NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_LOOPOUT (0x00000002U) +#define NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_NONE (0x00000000U) + +#define NV2080_CTRL_NVLINK_STATUS_REMOTE_LINK_NUMBER_INVALID (0x000000FFU) + +#define NV2080_CTRL_NVLINK_MAX_LINKS 32 + +// NVLink REFCLK types +#define NV2080_CTRL_NVLINK_REFCLK_TYPE_INVALID (0x00U) +#define NV2080_CTRL_NVLINK_REFCLK_TYPE_NVHS (0x01U) +#define NV2080_CTRL_NVLINK_REFCLK_TYPE_PEX (0x02U) + +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS { + NvU32 enabledLinkMask; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_LINK_STATUS_INFO linkInfo[NV2080_CTRL_NVLINK_MAX_LINKS], 8); +} NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS + * + * enabledLinkMask + * This field specifies the mask of available links on this subdevice. + * linkInfo + * This structure stores the per-link status of different NVLink parameters. The link is identified using an index. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS (0x20803002U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_NVLINK_ERR_INFO + * Error information per link + * + * TLErrlog + * Returns the error mask for NVLINK TL errors + * Used in Pascal + * + * TLIntrEn + * Returns the intr enable mask for NVLINK TL errors + * Used in Pascal + * + * TLCTxErrStatus0 + * Returns the TLC Tx Error Mask 0 + * Used in Volta and later + * + * TLCTxErrStatus1 + * Returns the TLC Tx Error Mask 1 + * Used in Ampere and later + * + * TLCTxSysErrStatus0 + * Returns the TLC Tx Sys Error Mask 0 + * Used in Ampere and later. + * + * TLCRxErrStatus0 + * Returns the TLC Rx Error Mask 0 + * Used in Volta and later + * + * TLCRxErrStatus1 + * Returns the TLC Rx Error Mask 1 + * Used in Volta and later + * + * TLCRxSysErrStatus0 + * Returns the TLC Rx Sys Error Mask 0 + * Used in Ampere and later. + * + * TLCTxErrLogEn0 + * Returns the TLC Tx Error Log En 0 + * Used in Volta and later + * + * TLCTxErrLogEn1 + * Returns the TLC Tx Error Log En 1 + * Used in Ampere and later + * + * TLCTxSysErrLogEn0 + * Returns the TLC Tx Sys Error Log En 0 + * Used in Ampere and later + * + * TLCRxErrLogEn0 + * Returns the TLC Rx Error Log En 0 + * Used in Volta and later + * + * TLCRxErrLogEn1 + * Returns the TLC Rx Error Log En 1 + * Used in Volta and later + * + * TLCRxSysErrLogEn0 + * Returns the TLC Rx Sys Error Log En 0 + * Used in Ampere and later + * + * MIFTxErrStatus0 + * Returns the MIF Rx Error Mask 0 + * Used in Volta and Turing + * + * MIFRxErrStatus0 + * Returns the MIF Tx Error Mask 0 + * Used in Volta and Turing + * + * NVLIPTLnkErrStatus0 + * Returns the NVLIPT_LNK Error Mask 0 + * Used in Ampere and later + * + * NVLIPTLnkErrLogEn0 + * Returns the NVLIPT_LNK Log En Mask 0 + * Used in Ampere and later + * + * DLSpeedStatusTx + * Returns the NVLINK DL speed status for sublink Tx + * + * DLSpeedStatusRx + * Returns the NVLINK DL speed status for sublink Rx + * + * bExcessErrorDL + * Returns true for excessive error rate interrupt from DL + */ +typedef struct NV2080_CTRL_NVLINK_ERR_INFO { + NvU32 TLErrlog; + NvU32 TLIntrEn; + NvU32 TLCTxErrStatus0; + NvU32 TLCTxErrStatus1; + NvU32 TLCTxSysErrStatus0; + NvU32 TLCRxErrStatus0; + NvU32 TLCRxErrStatus1; + NvU32 TLCRxSysErrStatus0; + NvU32 TLCTxErrLogEn0; + NvU32 TLCTxErrLogEn1; + NvU32 TLCTxSysErrLogEn0; + NvU32 TLCRxErrLogEn0; + NvU32 TLCRxErrLogEn1; + NvU32 TLCRxSysErrLogEn0; + NvU32 MIFTxErrStatus0; + NvU32 MIFRxErrStatus0; + NvU32 NVLIPTLnkErrStatus0; + NvU32 NVLIPTLnkErrLogEn0; + NvU32 DLSpeedStatusTx; + NvU32 DLSpeedStatusRx; + NvBool bExcessErrorDL; +} NV2080_CTRL_NVLINK_ERR_INFO; + +/* + * NV2080_CTRL_NVLINK_COMMON_ERR_INFO + * Error information per IOCTRL + * + * NVLIPTErrStatus0 + * Returns the NVLIPT_COMMON Error Mask 0 + * Used in Ampere and later + * + * NVLIPTErrLogEn0 + * Returns the NVLIPT_COMMON Log En Mask 0 + * Used in Ampere and later + */ +typedef struct NV2080_CTRL_NVLINK_COMMON_ERR_INFO { + NvU32 NVLIPTErrStatus0; + NvU32 NVLIPTErrLogEn0; +} NV2080_CTRL_NVLINK_COMMON_ERR_INFO; + +/* Extract the error status bit for a given TL error index i */ +#define NV2080_CTRL_NVLINK_GET_TL_ERRLOG_BIT(intr, i) (((1U << i) & (intr)) >> i) + +/* Extract the intr enable bit for a given TL error index i */ +#define NV2080_CTRL_NVLINK_GET_TL_INTEN_BIT(intr, i) NV2080_CTRL_NVLINK_GET_TL_ERRLOG_BIT(intr, i) + +/* Error status values for a given NVLINK TL error */ +#define NV2080_CTRL_NVLINK_TL_ERRLOG_TRUE (0x00000001U) +#define NV2080_CTRL_NVLINK_TL_ERRLOG_FALSE (0x00000000U) + +/* Intr enable/disable for a given NVLINK TL error */ +#define NV2080_CTRL_NVLINK_TL_INTEN_TRUE (0x00000001U) +#define NV2080_CTRL_NVLINK_TL_INTEN_FALSE (0x00000000U) + +/* NVLINK TL interrupt enable fields for errors */ +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXDLDATAPARITYEN 0U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXDLCTRLPARITYEN 1U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXPROTOCOLEN 2U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXOVERFLOWEN 3U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXRAMDATAPARITYEN 4U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXRAMHDRPARITYEN 5U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXRESPEN 6U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_RXPOISONEN 7U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_TXRAMDATAPARITYEN 8U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_TXRAMHDRPARITYEN 9U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_DLFLOWPARITYEN 10U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_DLHDRPARITYEN 12U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_TXCREDITEN 13U +#define NV2080_CTRL_NVLINK_TL_INTEN_IDX_MAX 14U + +/* NVLINK TL error fields */ +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXDLDATAPARITYERR 0U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXDLCTRLPARITYERR 1U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXPROTOCOLERR 2U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXOVERFLOWERR 3U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXRAMDATAPARITYERR 4U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXRAMHDRPARITYERR 5U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXRESPERR 6U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_RXPOISONERR 7U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_TXRAMDATAPARITYERR 8U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_TXRAMHDRPARITYERR 9U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_DLFLOWPARITYERR 10U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_DLHDRPARITYERR 12U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_TXCREDITERR 13U +#define NV2080_CTRL_NVLINK_TL_ERRLOG_IDX_MAX 14U + +/* NVLINK DL speed status for sublink Tx*/ +#define NV2080_CTRL_NVLINK_SL0_SLSM_STATUS_TX_PRIMARY_STATE_HS (0x00000000U) +#define NV2080_CTRL_NVLINK_SL0_SLSM_STATUS_TX_PRIMARY_STATE_SINGLE_LANE (0x00000004U) +#define NV2080_CTRL_NVLINK_SL0_SLSM_STATUS_TX_PRIMARY_STATE_TRAIN (0x00000005U) +#define NV2080_CTRL_NVLINK_SL0_SLSM_STATUS_TX_PRIMARY_STATE_SAFE (0x00000006U) +#define NV2080_CTRL_NVLINK_SL0_SLSM_STATUS_TX_PRIMARY_STATE_OFF (0x00000007U) + +/* NVLINK DL speed status for sublink Rx*/ +#define NV2080_CTRL_NVLINK_SL1_SLSM_STATUS_RX_PRIMARY_STATE_HS (0x00000000U) +#define NV2080_CTRL_NVLINK_SL1_SLSM_STATUS_RX_PRIMARY_STATE_SINGLE_LANE (0x00000004U) +#define NV2080_CTRL_NVLINK_SL1_SLSM_STATUS_RX_PRIMARY_STATE_TRAIN (0x00000005U) +#define NV2080_CTRL_NVLINK_SL1_SLSM_STATUS_RX_PRIMARY_STATE_SAFE (0x00000006U) +#define NV2080_CTRL_NVLINK_SL1_SLSM_STATUS_RX_PRIMARY_STATE_OFF (0x00000007U) + +#define NV2080_CTRL_NVLINK_MAX_IOCTRLS 3U +/* + * NV2080_CTRL_NVLINK_GET_ERR_INFO_PARAMS + * + * linkMask + * Returns the mask of links enabled + * + * linkErrInfo + * Returns the error information for all the links + * + * ioctrlMask + * Returns the mask of ioctrls + * + * commonErrInfo + * Returns the error information common to each IOCTRL + */ +#define NV2080_CTRL_NVLINK_GET_ERR_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_NVLINK_GET_ERR_INFO_PARAMS { + NvU32 linkMask; + NV2080_CTRL_NVLINK_ERR_INFO linkErrInfo[NV2080_CTRL_NVLINK_MAX_LINKS]; + NvU32 ioctrlMask; + NV2080_CTRL_NVLINK_COMMON_ERR_INFO commonErrInfo[NV2080_CTRL_NVLINK_MAX_IOCTRLS]; +} NV2080_CTRL_NVLINK_GET_ERR_INFO_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_ERR_INFO + * This command is used to query the NVLINK error information + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_NVLINK_GET_ERR_INFO (0x20803003U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_ERR_INFO_PARAMS_MESSAGE_ID" */ + +/* + * APIs for getting NVLink counters + */ + +// These are the bitmask definitions for different counter types + +#define NV2080_CTRL_NVLINK_COUNTER_INVALID 0x00000000U + +#define NV2080_CTRL_NVLINK_COUNTER_TL_TX0 0x00000001U +#define NV2080_CTRL_NVLINK_COUNTER_TL_TX1 0x00000002U +#define NV2080_CTRL_NVLINK_COUNTER_TL_RX0 0x00000004U +#define NV2080_CTRL_NVLINK_COUNTER_TL_RX1 0x00000008U + +#define NV2080_CTRL_NVLINK_LP_COUNTERS_DL 0x00000010U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE_L(i) (1 << (i + 8)) +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE__SIZE 4U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE_L0 0x00000100U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE_L1 0x00000200U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE_L2 0x00000400U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_ECC_LANE_L3 0x00000800U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_FLIT 0x00010000U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L(i) (1 << (i + 17)) +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE__SIZE 8U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L0 0x00020000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L1 0x00040000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L2 0x00080000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L3 0x00100000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L4 0x00200000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L5 0x00400000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L6 0x00800000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_LANE_L7 0x01000000U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_TX_ERR_REPLAY 0x02000000U +#define NV2080_CTRL_NVLINK_COUNTER_DL_TX_ERR_RECOVERY 0x04000000U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_REPLAY 0x08000000U + +#define NV2080_CTRL_NVLINK_COUNTER_DL_RX_ERR_CRC_MASKED 0x10000000U + +/* + * Note that COUNTER_MAX_TYPES will need to be updated each time + * a new counter type gets added to the list above. + * + */ +#define NV2080_CTRL_NVLINK_COUNTER_MAX_TYPES 32U + +/* + * NV2080_CTRL_CMD_NVLINK_GET_COUNTERS + * This command gets the counts for different counter types. + * + * [in] counterMask + * Mask of counter types to be queried + * One of NV2080_CTRL_NVLINK_COUNTERS_TYPE_* macros + * + * [in] linkMask + * Mask of links to be queried + * + * [out] counters + * Counter value returned + * + * [out] bTx0TlCounterOverflow + * This boolean is set to NV_TRUE if TX Counter 0 has rolled over. + * + * [out] bTx1TlCounterOverflow + * This boolean is set to NV_TRUE if TX Counter 1 has rolled over. + * + * [out] bRx0TlCounterOverflow + * This boolean is set to NV_TRUE if RX Counter 0 has rolled over. + * + * [out] bRx1TlCounterOverflow + * This boolean is set to NV_TRUE if RX Counter 1 has rolled over. + * + * [out] value + * This array contains the error counts for each error type as requested from + * the counterMask. The array indexes correspond to the mask bits one-to-one. + */ +typedef struct NV2080_CTRL_NVLINK_GET_COUNTERS_VALUES { + NvBool bTx0TlCounterOverflow; + NvBool bTx1TlCounterOverflow; + NvBool bRx0TlCounterOverflow; + NvBool bRx1TlCounterOverflow; + NV_DECLARE_ALIGNED(NvU64 value[NV2080_CTRL_NVLINK_COUNTER_MAX_TYPES], 8); +} NV2080_CTRL_NVLINK_GET_COUNTERS_VALUES; + +#define NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS { + NvU32 counterMask; + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_GET_COUNTERS_VALUES counters[NV2080_CTRL_NVLINK_MAX_LINKS], 8); +} NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_COUNTERS (0x20803004U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS_MESSAGE_ID" */ + + +/* + * NV2080_CTRL_CMD_NVLINK_CLEAR_COUNTERS + * This command clears/resets the counters for the specified types. + * + * [in] linkMask + * This parameter specifies for which links we want to clear the + * counters. + * + * [in] counterMask + * This parameter specifies the input mask for desired counters to be + * cleared. Note that all counters cannot be cleared. + * + * NOTE: Bug# 2098529: On Turing all DL errors and LP counters are cleared + * together. They cannot be cleared individually per error type. RM + * would possibly move to a new API on Ampere and beyond + */ + +#define NV2080_CTRL_CMD_NVLINK_CLEAR_COUNTERS (0x20803005U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS { + NvU32 counterMask; + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); +} NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_INJECT_ERROR + * This command causes all the same actions to occur as if the related + * error were to occur, either fatal or recoverable. + * + * [in] linkMask size: 32 bits + * Controls which links to apply error injection to. + * [in] bFatal + * This parameter specifies that the error should be fatal. + * + */ +#define NV2080_CTRL_CMD_NVLINK_INJECT_ERROR (0x20803006U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_INJECT_ERROR_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_INJECT_ERROR_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_NVLINK_INJECT_ERROR_PARAMS { + NvU32 linkMask; + NvBool bFatalError; +} NV2080_CTRL_NVLINK_INJECT_ERROR_PARAMS; + +/* NVLINK unit list - to be used with error notifiers */ +#define NV2080_CTRL_NVLINK_UNIT_DL 0x01U +#define NV2080_CTRL_NVLINK_UNIT_TL 0x02U +#define NV2080_CTRL_NVLINK_UNIT_TLC_RX_0 0x03U +#define NV2080_CTRL_NVLINK_UNIT_TLC_RX_1 0x04U +#define NV2080_CTRL_NVLINK_UNIT_TLC_TX_0 0x05U +#define NV2080_CTRL_NVLINK_UNIT_MIF_RX_0 0x06U +#define NV2080_CTRL_NVLINK_UNIT_MIF_TX_0 0x07U + +/* + * NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES + * This command gets the number of successful error recoveries + * + * [in] linkMask size: 32 bits + * This parameter controls which links to get recoveries for. + * [out] numRecoveries + * This parameter specifies the number of successful per link error recoveries + */ +#define NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES (0x20803007U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES_PARAMS { + NvU32 linkMask; + NvU32 numRecoveries[NV2080_CTRL_NVLINK_MAX_LINKS]; +} NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE + * + * This command queries the remote endpoint type of the link recorded at the + * time the last error occurred on the link. + * + * [in] linkId + * This parameter specifies the link to get the last remote endpoint type + * recorded for. + * + * [out] remoteType + * This parameter returns the remote endpoint type of the link recorded at + * the time the last error occurred on the link. Possible values are: + * NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE + * The link is not connected to an active remote endpoint. + * NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU + * The remote endpoint of the link is a peer GPU. + * NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_NPU + * The remote endpoint of the link is the host system (e.g., an NPU + * on IBM POWER platforms). + * NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_TEGRA + * The remote endpoint of the link a tegra device + * + * Possible return status values are: + * NV_OK + * If the remoteType parameter value is valid upon return. + * NV_ERR_INVALID_ARGUMENT + * If the linkId parameter does not specify a valid link. + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on this GPU or the remote endpoint type is + * not recorded in non-volatile storage. + */ +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE (0x20803008U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE_PARAMS { + NvU32 linkId; + NvU32 remoteType; +} NV2080_CTRL_NVLINK_GET_LINK_LAST_ERROR_REMOTE_TYPE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LINK_FATAL_ERROR_COUNTS + * + * This command queries the number of each type of fatal errors that have + * occurred on the given link. + * + * [in] linkId + * This parameter specifies the link to get the fatal error information + * for. + * + * [out] supportedCounts + * This parameter identifies which counts in the fatalErrorCounts array + * are valid for the given link. A bit set in this field means that the + * corresponding index is valid in the fatalErrorCounts array. + * + * [out] fatalErrorCounts + * This parameter returns an array of 8-bit counts, one for each type of + * fatal error that can occur on the link. The valid indices of this array + * are: + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_RX_DL_DATA_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_RX_DL_CTRL_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_PROTOCOL + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_RX_RAM_DATA_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_RX_RAM_HDR_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_RESP + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_POISON + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DATA_POISONED_PKT_RCVD + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_TX_RAM_DATA_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL(C)_TX_RAM_HDR_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_TX_CREDIT + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_DL_FLOW_CTRL_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DL_FLOW_CTRL_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_DL_HDR_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_RECOVERY_LONG + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_RAM + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_INTERFACE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_SUBLINK_CHANGE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_RX_FAULT_SUBLINK_CHANGE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_RX_FAULT_DL_PROTOCOL + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_LTSSM_FAULT + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DL_HDR_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_AE_FLIT_RCVD + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_BE_FLIT_RCVD + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_ADDR_ALIGN + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_PKT_LEN + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CMD_ENC + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_DAT_LEN_ENC + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_ADDR_TYPE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_RSP_STATUS + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_PKT_STATUS + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_REQ + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_RESP + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_GT_ATOMIC_REQ_MAX_SIZE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_GT_RMW_REQ_MAX_SIZE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_LT_ATR_RESP_MIN_SIZE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_PO_FOR_CACHE_ATTR + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_COMPRESSED_RESP + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RESP_STATUS_TARGET + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RESP_STATUS_UNSUPPORTED_REQUEST + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_HDR_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DATA_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_STOMPED_PKT_RCVD + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_CORRECTABLE_INTERNAL + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_VC_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_NVLINK_CREDIT_RELEASE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_NCISOC_CREDIT_RELEASE + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_HDR_CREDIT_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DATA_CREDIT_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DL_REPLAY_CREDIT_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_UNSUPPORTED_VC_OVERFLOW + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_STOMPED_PKT_SENT + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DATA_POISONED_PKT_SENT + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RESP_STATUS_TARGET + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RESP_STATUS_UNSUPPORTED_REQUEST + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_RX_RAM_DATA_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_RX_RAM_HDR_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_TX_RAM_DATA_PARITY + * NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_TX_RAM_HDR_PARITY + * + * Possible return status values are: + * NV_OK + * If the values in the fatalErrorCounts array are valid upon return. + * NV_ERR_INVALID_ARGUMENT + * If the linkId parameter does not specify a valid link. + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on this GPU or aggregate NVLINK fatal error + * counts are not recorded in non-volatile storage. + */ +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_FATAL_ERROR_COUNTS (0x20803009U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS_MESSAGE_ID" */ + +/* + * NVLink 1 Fatal Error Types + */ +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_DL_DATA_PARITY 0U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_DL_CTRL_PARITY 1U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_PROTOCOL 2U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_OVERFLOW 3U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_RAM_DATA_PARITY 4U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_RAM_HDR_PARITY 5U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_RESP 6U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_RX_POISON 7U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_TX_RAM_DATA_PARITY 8U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_TX_RAM_HDR_PARITY 9U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_TX_CREDIT 10U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_DL_FLOW_CTRL_PARITY 11U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TL_DL_HDR_PARITY 12U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_RECOVERY_LONG 13U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_RAM 14U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_INTERFACE 15U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_TX_FAULT_SUBLINK_CHANGE 16U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_RX_FAULT_SUBLINK_CHANGE 17U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_RX_FAULT_DL_PROTOCOL 18U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_LTSSM_FAULT 19U + +/* + * NVLink 2 Fatal Error Types + */ +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DL_DATA_PARITY 0U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DL_CTRL_PARITY 1U +// No direct equivalent to: TL_RX_PROTOCOL 2 +// No direct equivalent to: TL_RX_OVERFLOW 3 +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RAM_DATA_PARITY 4U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RAM_HDR_PARITY 5U +// No direct equivalent to: TL_RX_RESP 6 +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DATA_POISONED_PKT_RCVD 7U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RAM_DATA_PARITY 8U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RAM_HDR_PARITY 9U +// No direct equivalent to: TL_TX_CREDIT 10 +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DL_FLOW_CONTROL_PARITY 11U +// No direct equivalent to: TL_DL_HDR_PARITY 12 +// Identical to NVLink 1: DL_TX_RECOVERY_LONG 13 +// Identical to NVLink 1: DL_TX_FAULT_RAM 14 +// Identical to NVLink 1: DL_TX_FAULT_INTERFACE 15 +// Identical to NVLink 1: DL_TX_FAULT_SUBLINK_CHANGE 16 +// Identical to NVLink 1: DL_RX_FAULT_SUBLINK_CHANGE 17 +// Identical to NVLink 1: DL_RX_FAULT_DL_PROTOCOL 18 +// Identical to NVLink 1: DL_LTSSM_FAULT 19 +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DL_HDR_PARITY 20U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_AE_FLIT_RCVD 21U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_BE_FLIT_RCVD 22U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_ADDR_ALIGN 23U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_PKT_LEN 24U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CMD_ENC 25U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_DAT_LEN_ENC 26U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_ADDR_TYPE 27U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_RSP_STATUS 28U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_PKT_STATUS 29U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_REQ 30U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_RESP 31U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_GT_ATOMIC_REQ_MAX_SIZE 32U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_GT_RMW_REQ_MAX_SIZE 33U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DAT_LEN_LT_ATR_RESP_MIN_SIZE 34U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_PO_FOR_CACHE_ATTR 35U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_COMPRESSED_RESP 36U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RESP_STATUS_TARGET 37U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_RESP_STATUS_UNSUPPORTED_REQUEST 38U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_HDR_OVERFLOW 39U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_DATA_OVERFLOW 40U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_STOMPED_PKT_RCVD 41U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_CORRECTABLE_INTERNAL 42U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_VC_OVERFLOW 43U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_NVLINK_CREDIT_RELEASE 44U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_UNSUPPORTED_NCISOC_CREDIT_RELEASE 45U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_HDR_CREDIT_OVERFLOW 46U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DATA_CREDIT_OVERFLOW 47U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DL_REPLAY_CREDIT_OVERFLOW 48U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_UNSUPPORTED_VC_OVERFLOW 49U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_STOMPED_PKT_SENT 50U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_DATA_POISONED_PKT_SENT 51U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RESP_STATUS_TARGET 52U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_RESP_STATUS_UNSUPPORTED_REQUEST 53U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_RX_RAM_DATA_PARITY 54U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_RX_RAM_HDR_PARITY 55U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_TX_RAM_DATA_PARITY 56U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_MIF_TX_RAM_HDR_PARITY 57U + +/* + * NVLink 3 Fatal Error Types + */ +// Identical to NVLink 2: TLC_RX_DL_DATA_PARITY 0 +// Identical to NVLink 2: TLC_RX_DL_CTRL_PARITY 1 +// No direct equivalent to: TL_RX_PROTOCOL 2 +// No direct equivalent to: TL_RX_OVERFLOW 3 +// No direct equivalent to: TLC_RX_RAM_DATA_PARITY 4 +// No direct equivalent to: RX_RAM_HDR_PARITY 5 +// No direct equivalent to: TL_RX_RESP 6 +// No direct equivalent to: TLC_RX_DATA_POISONED_PKT_RCVD 7 +// No direct equivalent to: TLC_TX_RAM_DATA_PARITY 8 +// No direct equivalent to: TLC_TX_RAM_HDR_PARITY 9 +// No direct equivalent to: TL_TX_CREDIT 10 +// Identical to NVLink 2: TLC_TX_DL_FLOW_CONTROL_PARITY 11 +// No direct equivalent to: TL_DL_HDR_PARITY 12 +// No direct equivalent to: DL_TX_RECOVERY_LONG 13 +// Identical to NVLink 1: DL_TX_FAULT_RAM 14 +// Identical to NVLink 1: DL_TX_FAULT_INTERFACE 15 +// Identical to NVLink 1: DL_TX_FAULT_SUBLINK_CHANGE 16 +// Identical to NVLink 1: DL_RX_FAULT_SUBLINK_CHANGE 17 +// Identical to NVLink 1: DL_RX_FAULT_DL_PROTOCOL 18 +// No direct equivalent to: DL_LTSSM_FAULT 19 +// Identical to NVLink 2: TLC_RX_DL_HDR_PARITY 20 +// Identical to NVLink 2: TLC_RX_INVALID_AE_FLIT_RCVD 21 +// Identical to NVLink 2: TLC_RX_INVALID_BE_FLIT_RCVD 22 +// Identical to NVLink 2: TLC_RX_INVALID_ADDR_ALIGN 23 +// Identical to NVLink 2: TLC_RX_PKT_LEN 24 +// Identical to NVLink 2: TLC_RX_RSVD_CMD_ENC 25 +// Identical to NVLink 2: TLC_RX_RSVD_DAT_LEN_ENC 26 +// No direct equivalent to: TLC_RX_RSVD_ADDR_TYPE 27 +// No direct equivalent to: TLC_RX_RSVD_RSP_STATUS 28 +// Identical to NVLink 2: TLC_RX_RSVD_PKT_STATUS 29 +// Identical to NVLink 2: TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_REQ 30 +// Identical to NVLink 2: TLC_RX_RSVD_CACHE_ATTR_ENC_IN_PROBE_RESP 31 +// No direct equivalent to: TLC_RX_DAT_LEN_GT_ATOMIC_REQ_MAX_SIZE 32 +// Identical to NVLink 2: TLC_RX_DAT_LEN_GT_RMW_REQ_MAX_SIZE 33 +// Identical to NVLink 2: TLC_RX_DAT_LEN_LT_ATR_RESP_MIN_SIZE 34 +// Identical to NVLink 2: TLC_RX_INVALID_PO_FOR_CACHE_ATTR 35 +// Identical to NVLink 2: TLC_RX_INVALID_COMPRESSED_RESP 36 +// No direct equivalent to: TLC_RX_RESP_STATUS_TARGET 37 +// No direct equivalent to: TLC_RX_RESP_STATUS_UNSUPPORTED_REQUEST 38 +// Identical to NVLink 2: TLC_RX_HDR_OVERFLOW 39 +// Identical to NVLink 2: TLC_RX_DATA_OVERFLOW 40 +// Identical to NVLink 2: TLC_RX_STOMPED_PKT_RCVD 41 +// No direct equivalent to: TLC_RX_CORRECTABLE_INTERNAL 42 +// No direct equivalent to: TLC_RX_UNSUPPORTED_VC_OVERFLOW 43 +// No direct equivalent to: TLC_RX_UNSUPPORTED_NVLINK_CREDIT_RELEASE 44 +// No direct equivalent to: TLC_RX_UNSUPPORTED_NCISOC_CREDIT_RELEASE 45 +// No direct equivalent to: TLC_TX_HDR_CREDIT_OVERFLOW 46 +// No direct equivalent to: TLC_TX_DATA_CREDIT_OVERFLOW 47 +// No direct equivalent to: TLC_TX_DL_REPLAY_CREDIT_OVERFLOW 48 +// No direct equivalent to: TLC_TX_UNSUPPORTED_VC_OVERFLOW 49 +// No direct equivalent to: TLC_TX_STOMPED_PKT_SENT 50 +// No direct equivalent to: TLC_TX_DATA_POISONED_PKT_SENT 51 +// No direct equivalent to: TLC_TX_RESP_STATUS_TARGET 52 +// No direct equivalent to: TLC_TX_RESP_STATUS_UNSUPPORTED_REQUEST 53 +// No direct equivalent to: MIF_RX_RAM_DATA_PARITY 54 +// No direct equivalent to: MIF_RX_RAM_HDR_PARITY 55 +// No direct equivalent to: MIF_TX_RAM_DATA_PARITY 56 +// No direct equivalent to: MIF_TX_RAM_HDR_PARITY 57 +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_RX_INVALID_COLLAPSED_RESPONSE 58U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_NCISOC_HDR_ECC_DBE 59U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_TLC_TX_NCISOC_PARITY 60U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_LTSSM_FAULT_UP 61U +#define NV2080_CTRL_NVLINK_FATAL_ERROR_TYPE_DL_LTSSM_FAULT_DOWN 62U + +#define NV2080_CTRL_NVLINK_NUM_FATAL_ERROR_TYPES 63U + +#define NV2080_CTRL_NVLINK_IS_FATAL_ERROR_COUNT_VALID(count, supportedCounts) \ + (!!((supportedCounts) & NVBIT64(count))) + +#define NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS { + NvU32 linkId; + NV_DECLARE_ALIGNED(NvU64 supportedCounts, 8); + NvU8 fatalErrorCounts[NV2080_CTRL_NVLINK_NUM_FATAL_ERROR_TYPES]; +} NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LINK_NONFATAL_ERROR_RATES + * + * This command queries recent non-fatal error rates for the given link. + * + * The error rates specify the maximum number of errors per minute recorded + * for the given link within a 24-hour period for daily maximums or a 30-day + * period for monthly maximums. + * + * [in] linkId + * This parameter specifies the link to get the nonfatal error information + * for. + * + * [out] numDailyMaxNonfatalErrorRates + * This parameter returns the number of valid nonfatal error rate entries + * in the dailyMaxNonfatalErrorRates parameter. + * + * [out] dailyMaxNonfatalErrorRates + * This parameter returns maximum nonfatal error rate entries recorded + * over the last few 24-hour periods. For example, index 0 contains the + * maximum nonfatal error rate recorded in the current day, index 1 + * contains the maximum nonfatal error rate recorded yesterday ago, etc. + * + * [out] numMonthlyMaxNonfatalErrorRates + * This parameter returns the number of valid nonfatal error rate entries + * in the monthlyMaxNonfatalErrorRates parameter. + * + * [out] monthlyMaxNonfatalErrorRates + * THis parameter returns maximum nonfatal error rate entries recorded + * over the last few 30-day periods. For example, index 0 contains the + * maximum nonfatal error rate recorded in the current month, index 1 + * contains the maximum nonfatal error recorded last month, etc. + * + * Possible status values returned are: + * NV_OK + * If any nonfatal error rates are valid upon return. + * NV_ERR_INVALID_ARGUMENT + * If the linkId parameter does not specify a valid link. + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on this GPU or NVLINK nonfatal error rates + * are not recorded in non-volatile storage. + */ +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_NONFATAL_ERROR_RATES (0x2080300aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LINK_NONFATAL_ERROR_RATES_PARAMS_MESSAGE_ID" */ + +typedef struct NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE { + NvU32 errorsPerMinute; + NvU32 timestamp; +} NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE; + +#define NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE_ENTRIES 5U + +#define NV2080_CTRL_NVLINK_GET_LINK_NONFATAL_ERROR_RATES_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_NVLINK_GET_LINK_NONFATAL_ERROR_RATES_PARAMS { + NvU32 linkId; + NvU32 numDailyMaxNonfatalErrorRates; + NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE dailyMaxNonfatalErrorRates[NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE_ENTRIES]; + NvU32 numMonthlyMaxNonfatalErrorRates; + NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE monthlyMaxNonfatalErrorRates[NV2080_CTRL_NVLINK_NONFATAL_ERROR_RATE_ENTRIES]; +} NV2080_CTRL_NVLINK_GET_LINK_NONFATAL_ERROR_RATES_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_SET_ERROR_INJECTION_MODE + * + * This command sets the injection mode so that error handling and error + * logging software can be aware that errors cropping up on links are + * intentional and not due to HW failures. + * + * [in] bEnabled + * This parameter specifies whether injection mode should be enabled or + * disabled. + * + * Possible status values returned are: + * NV_OK + * If injection mode is enabled or disabled according to the parameters. + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on this GPU. + */ +#define NV2080_CTRL_CMD_NVLINK_SET_ERROR_INJECTION_MODE (0x2080300bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_ERROR_INJECTION_MODE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_SET_ERROR_INJECTION_MODE_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV2080_CTRL_NVLINK_SET_ERROR_INJECTION_MODE_PARAMS { + NvBool bEnabled; +} NV2080_CTRL_NVLINK_SET_ERROR_INJECTION_MODE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_SETUP_EOM + * + * This command passes a packed 32bit params value to NV_PMINION_MISC_0_SCRATCH_SWRW_0 + * and then issues an EOM DLCMD to minion for the desired link. Only one DLCMD + * at a time can be issued to any given link. + * + * Params Packing is specified in Minion IAS + */ +#define NV2080_CTRL_CMD_NVLINK_SETUP_EOM (0x2080300cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS { + NvU8 linkId; + NvU32 params; +} NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_SET_POWER_STATE + * + * This command sets the mask of links associated with the GPU + * to a target power state + * + * [in] linkMask + * Mask of links that will be put to desired power state + * Note: In Turing RM supports only tansitions into/out of L2 + * [in] powerState + * Target power state to which the links will transition + * This can be any one of NV2080_CTRL_NVLINK_POWER_STATE_* states + * + * Possible status values returned are: + * NV_OK + * If all links transitioned successfully to the target state + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip or if the power state + * is not enabled on the chip + * NV_ERR_INVALID_ARGUMENT + * If the any of the links in the mask is not enabled + * NV_ERR_INVALID_REQUEST + * If the power state transition is not supported + * NV_WARN_MORE_PROCESSING_REQUIRED + * Link has received the request for the power transition + * The transition will happen when the remote end also agrees + * + * Note: Currently only L0->L2 and L2->L0 is supported + */ +#define NV2080_CTRL_CMD_NVLINK_SET_POWER_STATE (0x2080300dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_POWER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_SET_POWER_STATE_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV2080_CTRL_NVLINK_SET_POWER_STATE_PARAMS { + NvU32 linkMask; + NvU32 powerState; +} NV2080_CTRL_NVLINK_SET_POWER_STATE_PARAMS; + +// NVLink Power States +#define NV2080_CTRL_NVLINK_POWER_STATE_L0 (0x00U) +#define NV2080_CTRL_NVLINK_POWER_STATE_L1 (0x01U) +#define NV2080_CTRL_NVLINK_POWER_STATE_L2 (0x02U) +#define NV2080_CTRL_NVLINK_POWER_STATE_L3 (0x03U) + +/* + * NV2080_CTRL_CMD_NVLINK_GET_POWER_STATE + * + * This command gets the power state of a link associated + * with the GPU + * + * [in] linkId + * Link whose power state is being requested + * [out] powerState + * Current power state of the link + * Is any one the NV2080_CTRL_NVLINK_POWER_STATE_* states + * + * Possible status values returned are: + * NV_OK + * If the power state is retrieved successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If the link is not enabled on the GPU + * NV_ERR_INVALID_STATE + * If the link is in an invalid state + */ +#define NV2080_CTRL_CMD_NVLINK_GET_POWER_STATE (0x2080300eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS { + NvU32 linkId; + NvU32 powerState; +} NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_INJECT_TLC_ERROR + * + * This command injects TLC_*_REPORT_INJECT error. An RM interrupt + * will be triggered after injection. Currently the injection call + * only deals with HW_ERR, UR_ERR, PRIV_ERR in TX_SYS and RX_LNK devices + * + * [in] linkId + * Link whose power state is being requested. + * [in] errorType + * error type that needs to be injected. + * [in] device + * The device this injection is intended for. + * [in] bBroadcast + * Whether the link report error should be fired in multiple links. + + * Possible status values returned are: + * NV_OK + * If the injection succeeds. + * NV_ERR_NOT_SUPPORTED + * If the error type of NVLINK is not supported on the chip + */ +#define NV2080_CTRL_CMD_NVLINK_INJECT_TLC_ERROR (0x2080300fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_PARAMS_MESSAGE_ID" */ + +typedef enum NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_DEVICE { + TLC_RX_LNK = 0, + TLC_TX_SYS = 1, +} NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_DEVICE; + +typedef enum NV2080_CTRL_NVLINK_INJECT_TLC_TX_SYS_REPORT_ERROR_TYPE { + TX_SYS_TX_RSP_STATUS_HW_ERR = 0, + TX_SYS_TX_RSP_STATUS_UR_ERR = 1, + TX_SYS_TX_RSP_STATUS_PRIV_ERR = 2, +} NV2080_CTRL_NVLINK_INJECT_TLC_TX_SYS_REPORT_ERROR_TYPE; + +typedef enum NV2080_CTRL_NVLINK_INJECT_TLC_RX_LNK_REPORT_ERROR_TYPE { + RX_LNK_RX_RSP_STATUS_HW_ERR = 0, + RX_LNK_RX_RSP_STATUS_UR_ERR = 1, + RX_LNK_RX_RSP_STATUS_PRIV_ERR = 2, +} NV2080_CTRL_NVLINK_INJECT_TLC_RX_LNK_REPORT_ERROR_TYPE; + +typedef union NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_TYPE { + NV2080_CTRL_NVLINK_INJECT_TLC_TX_SYS_REPORT_ERROR_TYPE txSysErrorType; + NV2080_CTRL_NVLINK_INJECT_TLC_RX_LNK_REPORT_ERROR_TYPE rxLnkErrorType; +} NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_TYPE; + + +#define NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_PARAMS { + NvU32 linkId; + NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_DEVICE device; + NvBool bBroadcast; + NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_TYPE errorType; +} NV2080_CTRL_NVLINK_INJECT_TLC_ERROR_PARAMS; + + + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES + * + * This command returns the per-lane Figure Of Merit (FOM) Values from a link + * + * [in] linkId + * The NVLink link ID to report FOM values for + * [out] numLanes + * This field specifies the no. of lanes per link + * [out] figureOfMeritValues + * This field contains the FOM values per lane + * + */ +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES (0x20803011U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_MAX_LANES 4U + +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES_PARAMS { + NvU32 linkId; + NvU8 numLanes; + NvU16 figureOfMeritValues[NV2080_CTRL_NVLINK_MAX_LANES]; +} NV2080_CTRL_CMD_NVLINK_GET_LINK_FOM_VALUES_PARAMS; + +/* + * NV2080_CTRL_NVLINK_SET_NVLINK_PEER + * + * This command sets/unsets the USE_NVLINK_PEER bit for a given + * mask of peers + * + * [in] peerMask + * Mask of Peer IDs for which USE_NVLINK_PEER needs to be updated + * [in] bEnable + * Whether the bit needs to be set or unset + * + * Possible status values returned are: + * NV_OK + * If the USE_NVLINK_PEER bit was updated successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip, or + * If unsetting USE_NVLINK_PEER bit is not supported + * + * NOTE: This is only supported on Windows + * + */ +#define NV2080_CTRL_CMD_NVLINK_SET_NVLINK_PEER (0x20803012U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_NVLINK_PEER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_SET_NVLINK_PEER_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV2080_CTRL_NVLINK_SET_NVLINK_PEER_PARAMS { + NvU32 peerMask; + NvBool bEnable; +} NV2080_CTRL_NVLINK_SET_NVLINK_PEER_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_READ_UPHY_PAD_LANE_REG + * + * This command packs the lane and addr values into NV_PMINION_MISC_0_SCRATCH_SWRW_0 + * and then issues a READPADLANEREG DLCMD to minion for the desired link. Only one DLCMD + * at a time can be issued to any given link. + * + * After this command completes it is necessary to read the appropriate + * NV_PNVL_BR0_PAD_CTL_7_CFG_RDATA register to retrieve the results of the read + * Only GV100 should read NV_PNVL_BR0_PAD_CTL_7_CFG_RDATA. + * From TU102+ the ctrl the required data would be updated in phyConfigData. + * + * [in] linkId + * Link whose pad lane register is being read + * [in] lane + * Lane whose pad lane register is being read + * [in] addr + * Address of the pad lane register to read + * [out] phyConfigData + * Provides phyconfigaddr and landid + * + * Possible status values returned are: + * NV_OK + * If the minion command completed successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If the link is not enabled on the GPU or the lane is invalid + * NV_ERR_TIMEOUT + * If a timeout occurred waiting for minion response + */ +#define NV2080_CTRL_CMD_NVLINK_READ_UPHY_PAD_LANE_REG (0x20803013U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_READ_UPHY_PAD_LANE_REG_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_READ_UPHY_PAD_LANE_REG_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_NVLINK_READ_UPHY_PAD_LANE_REG_PARAMS { + NvU8 linkId; + NvU8 lane; + NvU16 addr; + NvU32 phyConfigData; +} NV2080_CTRL_NVLINK_READ_UPHY_PAD_LANE_REG_PARAMS; + +/* + * Structure to store the ECC error data. + * valid + * Is the lane valid or not + * eccErrorValue + * Value of the Error. + * overflowed + * If the error overflowed or not + */ +typedef struct NV2080_CTRL_NVLINK_LANE_ERROR { + NvBool bValid; + NvU32 eccErrorValue; + NvBool overflowed; +} NV2080_CTRL_NVLINK_LANE_ERROR; + +/* + * Structure to store ECC error data for Links + * errorLane array index corresponds to the lane number. + * + * errorLane[] + * Stores the ECC error data per lane. + */ +typedef struct NV2080_CTRL_NVLINK_LINK_ECC_ERROR { + NV2080_CTRL_NVLINK_LANE_ERROR errorLane[NV2080_CTRL_NVLINK_MAX_LANES]; + NvU32 eccDecFailed; + NvBool eccDecFailedOverflowed; +} NV2080_CTRL_NVLINK_LINK_ECC_ERROR; + +/* + * NV2080_CTRL_NVLINK_GET_NVLINK_ECC_ERRORS + * + * Control to get the values of ECC ERRORS + * + * Parameters: + * linkMask [IN] + * Links on which the ECC error data requested + * A valid link/port mask returned by the port masks returned by + * NVSWITCH_GET_INFO + * errorLink[] [OUT] + * Stores the ECC error related information for each link. + * errorLink array index corresponds to the link Number. + */ + +#define NV2080_CTRL_NVLINK_GET_NVLINK_ECC_ERRORS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV2080_CTRL_NVLINK_GET_NVLINK_ECC_ERRORS_PARAMS { + NvU32 linkMask; + NV2080_CTRL_NVLINK_LINK_ECC_ERROR errorLink[NV2080_CTRL_NVLINK_MAX_LINKS]; +} NV2080_CTRL_NVLINK_GET_NVLINK_ECC_ERRORS_PARAMS; + + +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_ECC_ERRORS (0x20803014U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_NVLINK_ECC_ERRORS_PARAMS_MESSAGE_ID" */ + +// Nvlink throughput counters reading data flits in TX +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_DATA_TX 0U + +// Nvlink throughput counters reading data flits in RX +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_DATA_RX 1U + +// Nvlink throughput counters reading all flits in TX +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_RAW_TX 2U + +// Nvlink throughput counters reading all flits in RX +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_RAW_RX 3U + +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_MAX 4U + +/* + * NV2080_CTRL_CMD_NVLINK_READ_TP_COUNTERS + * + * Reads reserved monotonically increasing NVLINK throughput counters for given linkIds + * + * [in] counterMask + * Mask of counter types to be queried + * One of NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_* macros + * [in] linkMask + * Mask of links to be queried + * [out] value + * Throughput counter value returned + * + * Possible status values returned are: + * NV_OK + * If command completed successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If numLinks is out-of-range or requested link is inactive + * + * Note: + * The following commands will be deprecated in favor of NV2080_CTRL_CMD_NVLINK_READ_TP_COUNTERS: + * NV90CC_CTRL_CMD_NVLINK_GET_COUNTERS + * NV2080_CTRL_CMD_NVLINK_GET_COUNTERS + * Other commands that will be deprecated due to the change in design: + * NV90CC_CTRL_CMD_NVLINK_RESERVE_COUNTERS + * NV90CC_CTRL_CMD_NVLINK_RELEASE_COUNTERS + * NV90CC_CTRL_CMD_NVLINK_SET_COUNTERS_FROZEN + * NV90CC_CTRL_CMD_NVLINK_GET_TL_COUNTER_CFG + * NV90CC_CTRL_CMD_NVLINK_SET_TL_COUNTER_CFG + * NV90CC_CTRL_CMD_NVLINK_CLEAR_COUNTERS + * + * Also, note that there is no counter overflow handling for these calls. + * These counters would be counting in flits and assuming 25GB/s bandwidth per link, + * with traffic flowing continuously, it would take 174 years for overflow to happen. + * It is reasonable to assume an overflow will not occur within the GPU operation, + * given that the counters get reset at system reboot or GPU reset. Counters are 63-bit. + */ + +typedef struct NV2080_CTRL_NVLINK_READ_TP_COUNTERS_VALUES { + NV_DECLARE_ALIGNED(NvU64 value[NV2080_CTRL_NVLINK_READ_TP_COUNTERS_TYPE_MAX], 8); +} NV2080_CTRL_NVLINK_READ_TP_COUNTERS_VALUES; + +#define NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS { + NvU16 counterMask; + NV_DECLARE_ALIGNED(NvU64 linkMask, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_READ_TP_COUNTERS_VALUES counters[NV2080_CTRL_NVLINK_MAX_LINKS], 8); +} NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_READ_TP_COUNTERS (0x20803015U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE + * + * This command locks the link power state so that RM doesn't modify the state + * of the link during pstate switch. + * + * [in] linkMask Links for which power mode needs to be locked. + */ +#define NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE (0x20803016U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE_PARAMS { + NvBool bLockPowerMode; +} NV2080_CTRL_CMD_NVLINK_LOCK_LINK_POWER_STATE_PARAMS; + +/* + * NV2080_CTRL_CMD_NVLINK_ENABLE_NVLINK_PEER + * + * This command is used to enable RM NVLink enabled peer state. + * Note: This just updates the RM state. To reflect the state in the registers, + * use NV2080_CTRL_CMD_NVLINK_SET_NVLINK_PEER + * + * [in] peerMask + * Mask of Peer IDs for which USE_NVLINK_PEER needs to be enabled + * [in] bEnable + * Whether the bit needs to be set or unset + * + * Possible status values returned are: + * NV_OK + * If the USE_NVLINK_PEER bit was enabled successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip, or + * If unsetting USE_NVLINK_PEER bit is not supported + * + */ +#define NV2080_CTRL_CMD_NVLINK_ENABLE_NVLINK_PEER (0x20803017U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS { + NvU32 peerMask; + NvBool bEnable; +} NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS; + +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_COUNT_TX_NVHS 0U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_COUNT_TX_EIGHTH 1U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_COUNT_TX_OTHER 2U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_NUM_TX_LP_ENTER 3U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_NUM_TX_LP_EXIT 4U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_COUNT_TX_SLEEP 5U +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_MAX_COUNTERS 6U + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LP_COUNTERS + * + * Reads NVLINK low power counters for given linkId + * + * [in] linkId + * ID of the link to be queried + * [in,out] counterValidMask + * Mask of valid counters + * [out] counterValues + * Low power counter values returned + * + * Possible status values returned are: + * NV_OK + * If command completed successfully + * NV_ERR_NOT_SUPPORTED + * If NVLINK is not supported on the chip + * NV_ERR_INVALID_ARGUMENT + * If linkId is out-of-range or requested link is inactive + */ + +#define NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS { + NvU32 linkId; + NvU32 counterValidMask; + NvU32 counterValues[NV2080_CTRL_NVLINK_GET_LP_COUNTERS_MAX_COUNTERS]; +} NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_LP_COUNTERS (0x20803018U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS_MESSAGE_ID" */ + +/* + * NVLINK Link states + * These should ALWAYS match the nvlink core library defines in nvlink.h + */ +#define NV2080_NVLINK_CORE_LINK_STATE_OFF 0x00U +#define NV2080_NVLINK_CORE_LINK_STATE_HS 0x01U +#define NV2080_NVLINK_CORE_LINK_STATE_SAFE 0x02U +#define NV2080_NVLINK_CORE_LINK_STATE_FAULT 0x03U +#define NV2080_NVLINK_CORE_LINK_STATE_RECOVERY 0x04U +#define NV2080_NVLINK_CORE_LINK_STATE_FAIL 0x05U +#define NV2080_NVLINK_CORE_LINK_STATE_DETECT 0x06U +#define NV2080_NVLINK_CORE_LINK_STATE_RESET 0x07U +#define NV2080_NVLINK_CORE_LINK_STATE_ENABLE_PM 0x08U +#define NV2080_NVLINK_CORE_LINK_STATE_DISABLE_PM 0x09U + + +#define NV2080_NVLINK_CORE_LINK_STATE_SAVE_STATE 0x0BU +#define NV2080_NVLINK_CORE_LINK_STATE_RESTORE_STATE 0x0CU +#define NV2080_NVLINK_CORE_LINK_STATE_PRE_HS 0x0EU +#define NV2080_NVLINK_CORE_LINK_STATE_DISABLE_ERR_DETECT 0x0FU +#define NV2080_NVLINK_CORE_LINK_STATE_LANE_DISABLE 0x10U +#define NV2080_NVLINK_CORE_LINK_STATE_LANE_SHUTDOWN 0x11U +#define NV2080_NVLINK_CORE_LINK_STATE_TRAFFIC_SETUP 0x12U +#define NV2080_NVLINK_CORE_LINK_STATE_INITPHASE1 0x13U +#define NV2080_NVLINK_CORE_LINK_STATE_INITNEGOTIATE 0x14U +#define NV2080_NVLINK_CORE_LINK_STATE_POST_INITNEGOTIATE 0x15U +#define NV2080_NVLINK_CORE_LINK_STATE_INITOPTIMIZE 0x16U +#define NV2080_NVLINK_CORE_LINK_STATE_POST_INITOPTIMIZE 0x17U +#define NV2080_NVLINK_CORE_LINK_STATE_DISABLE_HEARTBEAT 0x18U +#define NV2080_NVLINK_CORE_LINK_STATE_CONTAIN 0x19U +#define NV2080_NVLINK_CORE_LINK_STATE_INITTL 0x1AU + + +#define NV2080_NVLINK_CORE_LINK_STATE_INVALID 0xFFU + +/* + * NVLINK TX Sublink states + * These should ALWAYS match the nvlink core library defines in nvlink.h + */ +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_HS 0x00U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_SINGLE_LANE 0x04U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_TRAIN 0x05U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_SAFE 0x06U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_OFF 0x07U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_COMMON_MODE 0x08U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_COMMON_MODE_DISABLE 0x09U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_DATA_READY 0x0AU +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_EQ 0x0BU +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_PRBS_EN 0x0CU +#define NV2080_NVLINK_CORE_SUBLINK_STATE_TX_POST_HS 0x0DU + +/* + * NVLINK RX Sublink states + * These should ALWAYS match the nvlink core library defines in nvlink.h + */ +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_HS 0x00U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_SINGLE_LANE 0x04U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_TRAIN 0x05U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_SAFE 0x06U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_OFF 0x07U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_RXCAL 0x08U +#define NV2080_NVLINK_CORE_SUBLINK_STATE_RX_INIT_TERM 0x09U + +/* + * Link training seed values + * These should ALWAYS match the values defined in nvlink.h + */ +#define NV2080_CTRL_NVLINK_MAX_SEED_NUM 6U +#define NV2080_CTRL_NVLINK_MAX_SEED_BUFFER_SIZE (0x7U) /* finn: Evaluated from "NV2080_CTRL_NVLINK_MAX_SEED_NUM + 1" */ + +// NVLINK callback types +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_DL_LINK_MODE 0x00U +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_SET_DL_LINK_MODE 0x01U +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_TL_LINK_MODE 0x02U +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_SET_TL_LINK_MODE 0x03U +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_TX_SUBLINK_MODE 0x04U +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_SET_TX_SUBLINK_MODE 0x05U +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_RX_SUBLINK_MODE 0x06U +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_SET_RX_SUBLINK_MODE 0x07U +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_RX_SUBLINK_DETECT 0x08U +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_SET_RX_SUBLINK_DETECT 0x09U +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_WRITE_DISCOVERY_TOKEN 0x0AU +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_READ_DISCOVERY_TOKEN 0x0BU +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_TRAINING_COMPLETE 0x0CU +#define NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_UPHY_LOAD 0x0DU + +/* + * Structure to store the GET_DL_MODE callback params. + * mode + * The current Nvlink DL mode + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_GET_DL_LINK_MODE_PARAMS { + NvU32 mode; +} NV2080_CTRL_NVLINK_CALLBACK_GET_DL_LINK_MODE_PARAMS; + +/* + * Structure to store the SET_DL_LINK_MODE callback OFF params + * seedData + * The output seed data + */ +typedef struct NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_OFF_PARAMS { + NvU32 seedData[NV2080_CTRL_NVLINK_MAX_SEED_BUFFER_SIZE]; +} NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_OFF_PARAMS; + +/* + * Structure to store the SET_DL_LINK_MODE callback PRE_HS params + * remoteDeviceType + * The input remote Device Type + * ipVerDlPl + * The input DLPL version + */ +typedef struct NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_PRE_HS_PARAMS { + NvU32 remoteDeviceType; + NvU32 ipVerDlPl; +} NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_PRE_HS_PARAMS; + +/* + * Structure to store SET_DL_LINK_MODE callback INIT_PHASE1 params + * seedData[] + * The input seed data + */ +typedef struct NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_INIT_PHASE1_PARAMS { + NvU32 seedData[NV2080_CTRL_NVLINK_MAX_SEED_BUFFER_SIZE]; +} NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_INIT_PHASE1_PARAMS; + +/* + * Structure to store the Nvlink Remote and Local SID info + * remoteSid + * The output remote SID + * remoteDeviceType + * The output remote Device Type + * remoteLinkId + * The output remote link ID + * localSid + * The output local SID + */ +typedef struct NV2080_CTRL_NVLINK_REMOTE_LOCAL_SID_INFO { + NV_DECLARE_ALIGNED(NvU64 remoteSid, 8); + NvU32 remoteDeviceType; + NvU32 remoteLinkId; + NV_DECLARE_ALIGNED(NvU64 localSid, 8); +} NV2080_CTRL_NVLINK_REMOTE_LOCAL_SID_INFO; + +/* + * Structure to store the SET_DL_LINK_MODE callback POST_INITNEGOTIATE params + * bInitnegotiateConfigGood + * The output bool if the config is good + * remoteLocalSidInfo + * The output structure containing the Nvlink Remote/Local SID info + */ +typedef struct NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_POST_INITNEGOTIATE_PARAMS { + NvBool bInitnegotiateConfigGood; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_REMOTE_LOCAL_SID_INFO remoteLocalSidInfo, 8); +} NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_POST_INITNEGOTIATE_PARAMS; + +/* + * Structure to store the SET_DL_LINK_MODE callback POST_INITOPTIMIZE params + * bPollDone + * The output bool if the polling has finished + */ +typedef struct NV2080_CTRLNVLINK_SET_DL_LINK_MODE_POST_INITOPTIMIZE_PARAMS { + NvBool bPollDone; +} NV2080_CTRLNVLINK_SET_DL_LINK_MODE_POST_INITOPTIMIZE_PARAMS; + +/* + * Structure to store the SET_DL_LINK_MODE callback params + * mode + * The input nvlink state to set + * bSync + * The input sync boolean + * linkMode + * The input link mode to be set for the callback + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_SET_DL_LINK_MODE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 mode, 8); + NvBool bSync; + NvU32 linkMode; + union { + NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_OFF_PARAMS linkModeOffParams; + NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_PRE_HS_PARAMS linkModePreHsParams; + NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_INIT_PHASE1_PARAMS linkModeInitPhase1Params; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_POST_INITNEGOTIATE_PARAMS linkModePostInitNegotiateParams, 8); + NV2080_CTRLNVLINK_SET_DL_LINK_MODE_POST_INITOPTIMIZE_PARAMS linkModePostInitOptimizeParams; + } linkModeParams; +} NV2080_CTRL_NVLINK_CALLBACK_SET_DL_LINK_MODE_PARAMS; + +/* + * Structure to store the GET_TL_MODE callback params. + * mode + * The current Nvlink TL mode + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_GET_TL_LINK_MODE_PARAMS { + NvU32 mode; +} NV2080_CTRL_NVLINK_CALLBACK_GET_TL_LINK_MODE_PARAMS; + +/* + * Structure to store the SET_TL_LINK_MODE callback params + * mode + * The input nvlink mode to set + * bSync + * The input sync boolean + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_SET_TL_LINK_MODE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 mode, 8); + NvBool bSync; +} NV2080_CTRL_NVLINK_CALLBACK_SET_TL_LINK_MODE_PARAMS; + +/* + * Structure to store the GET_RX/TX_SUBLINK_MODE callback params + * sublinkMode + * The current Sublink mode + * sublinkSubMode + * The current Sublink sub mode + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_GET_SUBLINK_MODE_PARAMS { + NvU32 sublinkMode; + NvU32 sublinkSubMode; +} NV2080_CTRL_NVLINK_CALLBACK_GET_SUBLINK_MODE_PARAMS; + +/* + * Structure to store the SET_TL_LINK_MODE callback params + * mode + * The input nvlink mode to set + * bSync + * The input sync boolean + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_SET_TX_SUBLINK_MODE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 mode, 8); + NvBool bSync; +} NV2080_CTRL_NVLINK_CALLBACK_SET_TX_SUBLINK_MODE_PARAMS; + +/* + * Structure to store the SET_RX_SUBLINK_MODE callback params + * mode + * The input nvlink mode to set + * bSync + * The input sync boolean + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_SET_RX_SUBLINK_MODE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 mode, 8); + NvBool bSync; +} NV2080_CTRL_NVLINK_CALLBACK_SET_RX_SUBLINK_MODE_PARAMS; + +/* + * Structure to store the GET_RX_SUBLINK_DETECT callback params + * laneRxdetStatusMask + * The output RXDET per-lane status mask + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_GET_RX_DETECT_PARAMS { + NvU32 laneRxdetStatusMask; +} NV2080_CTRL_NVLINK_CALLBACK_GET_RX_DETECT_PARAMS; + +/* + * Structure to store the SET_RX_DETECT callback params + * bSync + * The input bSync boolean + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_SET_RX_DETECT_PARAMS { + NvBool bSync; +} NV2080_CTRL_NVLINK_CALLBACK_SET_RX_DETECT_PARAMS; + +/* + * Structure to store the RD_WR_DISCOVERY_TOKEN callback params + * ipVerDlPl + * The input DLPL version + * token + * The output token + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_RD_WR_DISCOVERY_TOKEN_PARAMS { + NvU32 ipVerDlPl; + NV_DECLARE_ALIGNED(NvU64 token, 8); +} NV2080_CTRL_NVLINK_CALLBACK_RD_WR_DISCOVERY_TOKEN_PARAMS; + +/* + * Structure to store the GET_UPHY_LOAD callback params + * bUnlocked + * The output unlocked boolean + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_GET_UPHY_LOAD_PARAMS { + NvBool bUnlocked; +} NV2080_CTRL_NVLINK_CALLBACK_GET_UPHY_LOAD_PARAMS; + +/* + * Structure to store the Union of Callback params + * type + * The input type of callback to be executed + */ +typedef struct NV2080_CTRL_NVLINK_CALLBACK_TYPE { + NvU8 type; + union { + NV2080_CTRL_NVLINK_CALLBACK_GET_DL_LINK_MODE_PARAMS getDlLinkMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_CALLBACK_SET_DL_LINK_MODE_PARAMS setDlLinkMode, 8); + NV2080_CTRL_NVLINK_CALLBACK_GET_TL_LINK_MODE_PARAMS getTlLinkMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_CALLBACK_SET_TL_LINK_MODE_PARAMS setTlLinkMode, 8); + NV2080_CTRL_NVLINK_CALLBACK_GET_SUBLINK_MODE_PARAMS getTxSublinkMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_CALLBACK_SET_TX_SUBLINK_MODE_PARAMS setTxSublinkMode, 8); + NV2080_CTRL_NVLINK_CALLBACK_GET_SUBLINK_MODE_PARAMS getRxSublinkMode; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_CALLBACK_SET_RX_SUBLINK_MODE_PARAMS setRxSublinkMode, 8); + NV2080_CTRL_NVLINK_CALLBACK_GET_RX_DETECT_PARAMS getRxSublinkDetect; + NV2080_CTRL_NVLINK_CALLBACK_SET_RX_DETECT_PARAMS setRxSublinkDetect; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_CALLBACK_RD_WR_DISCOVERY_TOKEN_PARAMS writeDiscoveryToken, 8); + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_CALLBACK_RD_WR_DISCOVERY_TOKEN_PARAMS readDiscoveryToken, 8); + NV2080_CTRL_NVLINK_CALLBACK_GET_UPHY_LOAD_PARAMS getUphyLoad; + } callbackParams; +} NV2080_CTRL_NVLINK_CALLBACK_TYPE; + +/* + * NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK + * + * Generic NvLink callback RPC to route commands to GSP + * + * [In] linkdId + * ID of the link to be used + * [In/Out] callBackType + * Callback params + */ +#define NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS { + NvU32 linkId; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_CALLBACK_TYPE callbackType, 8); +} NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK (0x20803019U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS_MESSAGE_ID" */ + + + +/* + * NV2080_CTRL_CMD_NVLINK_UPDATE_REMOTE_LOCAL_SID + * + * Update Remote and Local Sid info via GSP + * + * [In] linkId + * ID of the link to be used + * [Out] remoteLocalSidInfo + * The output structure containing the Nvlink Remote/Local SID info + */ +#define NV2080_CTRL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS_MESSAGE_ID (0x1bU) + +typedef struct NV2080_CTRL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS { + NvU32 linkId; + NV_DECLARE_ALIGNED(NV2080_CTRL_NVLINK_REMOTE_LOCAL_SID_INFO remoteLocalSidInfo, 8); +} NV2080_CTRL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_UPDATE_REMOTE_LOCAL_SID (0x2080301bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_TYPE_PROGRAM 0x0U +#define NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_TYPE_RESET 0x1U + +/* + * NV2080_CTRL_CMD_NVLINK_UPDATE_HSHUB_MUX + * + * Generic Hshub Mux Update RPC to route commands to GSP + * + * [In] updateType + * HSHUB Mux update type to program or reset Mux + * [In] bSysMem + * Boolean to differentiate between sysmen and peer mem + * [In] peerMask + * Mask of peer IDs. Only parsed when bSysMem is false + */ +#define NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_PARAMS_MESSAGE_ID (0x1cU) + +typedef struct NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_PARAMS { + NvBool updateType; + NvBool bSysMem; + NvU32 peerMask; +} NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_UPDATE_HSHUB_MUX (0x2080301cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_PRE_SETUP_NVLINK_PEER + * + * Performs all the necessary actions required before setting a peer on NVLink + * + * [In] peerId + * Peer ID which will be set on NVLink + * [In] peerLinkMask + * Mask of links that connects the given peer + * [In] bNvswitchConn + * Is the GPU connected to NVSwitch + */ +#define NV2080_CTRL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS_MESSAGE_ID (0x1dU) + +typedef struct NV2080_CTRL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS { + NvU32 peerId; + NvU32 peerLinkMask; + NvBool bNvswitchConn; +} NV2080_CTRL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS; +#define NV2080_CTRL_CMD_NVLINK_PRE_SETUP_NVLINK_PEER (0x2080301dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_POST_SETUP_NVLINK_PEER + * + * Performs all the necessary actions required after setting a peer on NVLink + * + * [In] peerMask + * Mask of Peer IDs which has been set on NVLink + */ +#define NV2080_CTRL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS_MESSAGE_ID (0x1eU) + +typedef struct NV2080_CTRL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS { + NvU32 peerMask; +} NV2080_CTRL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS; +#define NV2080_CTRL_CMD_NVLINK_POST_SETUP_NVLINK_PEER (0x2080301eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_TYPE_SYSMEM 0x1U +#define NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_TYPE_PEER 0x2U + +/* + * NV2080_CTRL_CMD_NVLINK_REMOVE_NVLINK_MAPPING + * + * Performs all the necessary actions required to remove NVLink mapping (sysmem or peer or both) + * + * [In] mapTypeMask + * Remove NVLink mapping for the given map types (sysmem or peer or both) + * [In] peerMask + * Mask of Peer IDs which needs to be removed on NVLink + * Only parsed if mapTypeMask accounts peer + * [In] bL2Entry + * Is the peer removal happening because links are entering L2 low power state? + * Only parsed if mapTypeMask accounts peer + */ +#define NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS_MESSAGE_ID (0x1fU) + +typedef struct NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS { + NvU32 mapTypeMask; + NvU32 peerMask; + NvBool bL2Entry; +} NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS; +#define NV2080_CTRL_CMD_NVLINK_REMOVE_NVLINK_MAPPING (0x2080301fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_SAVE_RESTORE_HSHUB_STATE + * + * Performs all the necessary actions required to save/restore HSHUB state during NVLink L2 entry/exit + * + * [In] bSave + * Whether this is a save/restore operation + * [In] linkMask + * Mask of links for which HSHUB config registers need to be saved/restored + */ +#define NV2080_CTRL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV2080_CTRL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS { + NvBool bSave; + NvU32 linkMask; +} NV2080_CTRL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS; +#define NV2080_CTRL_CMD_NVLINK_SAVE_RESTORE_HSHUB_STATE (0x20803020U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_FLAGS_SET (0x00000000) +#define NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_FLAGS_SAVE (0x00000001) +#define NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_FLAGS_RESTORE (0x00000002) + +/* + * NV2080_CTRL_CMD_NVLINK_PROGRAM_BUFFERREADY + * + * Performs all the necessary actions required to save/restore bufferready state during NVLink L2 entry/exit + * + * [In] flags + * Whether to set, save or restore bufferready + * [In] bSysmem + * Whether to perform the operation for sysmem links or peer links + * [In] peerLinkMask + * Mask of peer links for which bufferready state need to be set/saved/restored + */ +#define NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS { + NvU32 flags; + NvBool bSysmem; + NvU32 peerLinkMask; +} NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS; +#define NV2080_CTRL_CMD_NVLINK_PROGRAM_BUFFERREADY (0x20803021U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_UPDATE_CURRENT_CONFIG + * + * Performs all the necessary actions required to update the current Nvlink configuration + * + * [out] bNvlinkSysmemEnabled + * Whether sysmem nvlink support was enabled + */ +#define NV2080_CTRL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV2080_CTRL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS { + NvBool bNvlinkSysmemEnabled; +} NV2080_CTRL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS; +#define NV2080_CTRL_CMD_NVLINK_UPDATE_CURRENT_CONFIG (0x20803022U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS_MESSAGE_ID" */ + +// +// Set the near end loopback mode using the following +// Currently, three modes - NEA, NEDR, NEW +// +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_DEFAULT (0x00000000) +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_NEA (0x00000001) +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_NEDR (0x00000002) +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_NEDW (0x00000003) + +/* + * NV2080_CTRL_CMD_NVLINK_SET_LOOPBACK_MODE + * + * Generic NvLink callback for MODS + * + * [In] linkdId + * ID of the link to be used + * [In] loopbackMode + * This value will decide which loopback mode need to + * set on the specified link. + * Modes are NEA / NEDR / NEDW + */ +#define NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS { + NvU32 linkId; + NvU8 loopbackMode; +} NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_SET_LOOPBACK_MODE (0x20803023U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_UPDATE_PEER_LINK_MASK + * + * Synchronizes the peerLinkMask between CPU-RM and GSP-RM + * + * [In] gpuInst + * Gpu instance + * [In] peerLinkMask + * Mask of links to the given peer GPU + */ +#define NV2080_CTRL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV2080_CTRL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS { + NvU32 gpuInst; + NvU32 peerLinkMask; +} NV2080_CTRL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_UPDATE_PEER_LINK_MASK (0x20803024U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_UPDATE_LINK_CONNECTION + * + * Updates the remote connection information for a link + * + * [In] linkId + * Id of the link to be used + * [In] bConnected + * Boolean that tracks whether the link is connected + * [In] remoteDeviceType + * Tracks whether the remote device is switch/gpu/ibmnpu/tegra + * [In] remoteLinkNumber + * Tracks the link number for the connected remote device + */ +#define NV2080_CTRL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV2080_CTRL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS { + NvU32 linkId; + NvBool bConnected; + NV_DECLARE_ALIGNED(NvU64 remoteDeviceType, 8); + NvU32 remoteLinkNumber; +} NV2080_CTRL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_UPDATE_LINK_CONNECTION (0x20803025U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_ENABLE_LINKS_POST_TOPOLOGY + * + * Enable links post topology via GSP + * + * [In] linkMask + * Mask of links to enable + * [Out] initializedLinks + * Mask of links that were initialized + */ +#define NV2080_CTRL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS_MESSAGE_ID (0x26U) + +typedef struct NV2080_CTRL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS { + NvU32 linkMask; + NvU32 initializedLinks; +} NV2080_CTRL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_ENABLE_LINKS_POST_TOPOLOGY (0x20803026U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS_MESSAGE_ID" */ + + + +// +// Read Refresh counter - the pass/fail occurrences +// + +typedef struct NV2080_CTRL_NVLINK_PHY_REFRESH_STATUS_INFO { + // requested links or not + NvBool bValid; + + // counters + NvU16 passCount; + NvU16 failCount; +} NV2080_CTRL_NVLINK_PHY_REFRESH_STATUS_INFO; + +#define NV2080_CTRL_NVLINK_MAX_LINK_COUNT 32 + +/* + * NV2080_CTRL_CMD_NVLINK_GET_REFRESH_COUNTERS + * + * + * [In] linkMask + * Specifies for which links we want to read the counters + * [Out] refreshCountPass + * Count of number of times PHY refresh pass + * [Out] refreshCountFail + * Count of number of times PHY refresh fail + */ +#define NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS_MESSAGE_ID (0x28U) + +typedef struct NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS { + NvU32 linkMask; + NV2080_CTRL_NVLINK_PHY_REFRESH_STATUS_INFO refreshCount[NV2080_CTRL_NVLINK_MAX_LINK_COUNT]; +} NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_REFRESH_COUNTERS (0x20803028U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS_MESSAGE_ID" */ + +// +// Clear Refresh counter - the pass/fail occurrences +// + +/* + * NV2080_CTRL_CMD_NVLINK_CLEAR_REFRESH_COUNTERS + * + * + * [In] linkMask + * Specifies for which links we want to clear the counters + */ +#define NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS_MESSAGE_ID (0x29U) + +typedef struct NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS { + NvU32 linkMask; +} NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_CLEAR_REFRESH_COUNTERS (0x20803029U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LINK_MASK_POST_RX_DET + * + * Get link mask post Rx detection + * + * [Out] postRxDetLinkMask + * Mask of links discovered + */ +#define NV2080_CTRL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS_MESSAGE_ID (0x2aU) + +typedef struct NV2080_CTRL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS { + NvU32 postRxDetLinkMask; +} NV2080_CTRL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_MASK_POST_RX_DET (0x2080302aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS_MESSAGE_ID" */ + + + +typedef struct NV2080_CTRL_NVLINK_DEVICE_LINK_VALUES { + NvBool bValid; + NvU8 linkId; + NvU32 ioctrlId; + NvU8 pllMasterLinkId; + NvU8 pllSlaveLinkId; + NvU32 ipVerDlPl; +} NV2080_CTRL_NVLINK_DEVICE_LINK_VALUES; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_NVLINK_DEVICE_INFO + * + * [Out] ioctrlMask + * Mask of IOCTRLs discovered from PTOP device info table + * [Out] ioctrlNumEntries + * Number of IOCTRL entries in the PTOP device info table + * [Out] ioctrlSize + * Maximum number of entries in the PTOP device info table + * [Out] discoveredLinks + * Mask of links discovered from all the IOCTRLs + * [Out] ipVerNvlink + * IP revision of the NVLink HW + * [Out] linkInfo + * Per link information + */ + +#define NV2080_CTRL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS_MESSAGE_ID (0x2cU) + +typedef struct NV2080_CTRL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS { + NvU32 ioctrlMask; + NvU8 ioctrlNumEntries; + NvU32 ioctrlSize; + NvU32 discoveredLinks; + NvU32 ipVerNvlink; + NV2080_CTRL_NVLINK_DEVICE_LINK_VALUES linkInfo[NV2080_CTRL_NVLINK_MAX_LINKS]; +} NV2080_CTRL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_NVLINK_DEVICE_INFO (0x2080302cU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_MAX_LINKS_PER_IOCTRL_SW 6U + +typedef struct NV2080_CTRL_NVLINK_DEVICE_IP_REVISION_VALUES { + NvU32 ipVerIoctrl; + NvU32 ipVerMinion; +} NV2080_CTRL_NVLINK_DEVICE_IP_REVISION_VALUES; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_IOCTRL_DEVICE_INFO + * + * [In] ioctrlIdx + * IOCTRL index + * [Out] PublicId + * PublicId of the IOCTRL discovered + * [Out] localDiscoveredLinks + * Mask of discovered links local to the IOCTRL + * [Out] localGlobalLinkOffset + * Global link offsets for the locally discovered links + * [Out] ioctrlDiscoverySize + * IOCTRL table size + * [Out] numDevices + * Number of devices discovered from the IOCTRL + * [Out] deviceIpRevisions + * IP revisions for the devices discovered in the IOCTRL + */ + +#define NV2080_CTRL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS_MESSAGE_ID (0x2dU) + +typedef struct NV2080_CTRL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS { + NvU32 ioctrlIdx; + NvU32 PublicId; + NvU32 localDiscoveredLinks; + NvU32 localGlobalLinkOffset; + NvU32 ioctrlDiscoverySize; + NvU8 numDevices; + NV2080_CTRL_NVLINK_DEVICE_IP_REVISION_VALUES ipRevisions; +} NV2080_CTRL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_IOCTRL_DEVICE_INFO (0x2080302dU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_PROGRAM_LINK_SPEED + * + * Program NVLink Speed from OS/VBIOS + * + * [In] bPlatformLinerateDefined + * Whether line rate is defined in the platform + * [In] platformLineRate + * Platform defined line rate + * [Out] nvlinkLinkSpeed + * The line rate that was programmed for the links + */ +#define NV2080_CTRL_NVLINK_PROGRAM_LINK_SPEED_PARAMS_MESSAGE_ID (0x2eU) + +typedef struct NV2080_CTRL_NVLINK_PROGRAM_LINK_SPEED_PARAMS { + NvBool bPlatformLinerateDefined; + NvU32 platformLineRate; + NvU32 nvlinkLinkSpeed; +} NV2080_CTRL_NVLINK_PROGRAM_LINK_SPEED_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PROGRAM_LINK_SPEED (0x2080302eU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PROGRAM_LINK_SPEED_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_ARE_LINKS_TRAINED + * + * [In] linkMask + * Mask of links whose state will be checked + * [In] bActiveOnly + * The input boolean to check for Link Active state + * [Out] bIsLinkActive + * Boolean array to track if the link is trained + */ +#define NV2080_CTRL_NVLINK_ARE_LINKS_TRAINED_PARAMS_MESSAGE_ID (0x2fU) + +typedef struct NV2080_CTRL_NVLINK_ARE_LINKS_TRAINED_PARAMS { + NvU32 linkMask; + NvBool bActiveOnly; + NvBool bIsLinkActive[NV2080_CTRL_NVLINK_MAX_LINKS]; +} NV2080_CTRL_NVLINK_ARE_LINKS_TRAINED_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_ARE_LINKS_TRAINED (0x2080302fU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_ARE_LINKS_TRAINED_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_NVLINK_RESET_FLAGS_ASSERT (0x00000000) +#define NV2080_CTRL_NVLINK_RESET_FLAGS_DEASSERT (0x00000001) +#define NV2080_CTRL_NVLINK_RESET_FLAGS_TOGGLE (0x00000002) + +/* + * NV2080_CTRL_CMD_NVLINK_RESET_LINKS + * + * [In] linkMask + * Mask of links which need to be reset + * [In] flags + * Whether to assert, de-assert or toggle the Nvlink reset + */ + +#define NV2080_CTRL_NVLINK_RESET_LINKS_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV2080_CTRL_NVLINK_RESET_LINKS_PARAMS { + NvU32 linkMask; + NvU32 flags; +} NV2080_CTRL_NVLINK_RESET_LINKS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_RESET_LINKS (0x20803030U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_RESET_LINKS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_DISABLE_DL_INTERRUPTS + * + * [In] linkMask + * Mask of links for which DL interrrupts need to be disabled + */ +#define NV2080_CTRL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV2080_CTRL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS { + NvU32 linkMask; +} NV2080_CTRL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_DISABLE_DL_INTERRUPTS (0x20803031U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS_MESSAGE_ID" */ + +/* + * Structure to store the GET_LINK_AND_CLOCK__INFO params + * + * [Out] bLinkConnectedToSystem + * Boolean indicating sysmem connection of a link + * [Out] bLinkConnectedToPeer + * Boolean indicating peer connection of a link + * [Out] bLinkReset + * Whether the link is in reset + * [Out] subLinkWidth + * Number of lanes per sublink + * [Out] linkState + * Mode of the link + * [Out] txSublinkState + * Tx sublink state + * [Out] rxSublinkState + * Rx sublink state + * [Out] bLaneReversal + * Boolean indicating if a link's lanes are reversed + * [Out] nvlinkLinkClockKHz + * Link clock value in KHz + * [Out] nvlinkLineRateMbps + * Link line rate in Mbps + * [Out] nvlinkLinkClockMhz + * Link clock in MHz + * [Out] nvlinkLinkDataRateKiBps + * Link Data rate in KiBps + * [Out] nvlinkRefClkType + * Current Nvlink refclk source + * [Out] nvlinkReqLinkClockMhz + * Requested link clock value + */ +typedef struct NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_VALUES { + NvBool bLinkConnectedToSystem; + NvBool bLinkConnectedToPeer; + NvBool bLinkReset; + NvU8 subLinkWidth; + NvU32 linkState; + NvU32 txSublinkState; + NvU32 rxSublinkState; + NvBool bLaneReversal; + NvU32 nvlinkLinkClockKHz; + NvU32 nvlinkLineRateMbps; + NvU32 nvlinkLinkClockMhz; + NvU32 nvlinkLinkDataRateKiBps; + NvU8 nvlinkRefClkType; + NvU32 nvlinkReqLinkClockMhz; +} NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_VALUES; + +/* + * NV2080_CTRL_CMD_NVLINK_GET_LINK_AND_CLOCK_INFO + * + * [In] linkMask + * Mask of enabled links to loop over + * [Out] nvlinkRefClkSpeedKHz + * Ref clock value n KHz + * [Out] linkInfo + * Per link information + */ +#define NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS_MESSAGE_ID (0x32U) + +typedef struct NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS { + NvU32 linkMask; + NvU32 nvlinkRefClkSpeedKHz; + NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_VALUES linkInfo[NV2080_CTRL_NVLINK_MAX_LINKS]; +} NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_LINK_AND_CLOCK_INFO (0x20803032U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_SETUP_NVLINK_SYSMEM + * + * Updates the HSHUB sysmem config resgister state to reflect sysmem NVLinks + * + * [In] sysmemLinkMask + * Mask of discovered sysmem NVLinks + */ +#define NV2080_CTRL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS_MESSAGE_ID (0x33U) + +typedef struct NV2080_CTRL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS { + NvU32 sysmemLinkMask; +} NV2080_CTRL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS; +#define NV2080_CTRL_CMD_NVLINK_SETUP_NVLINK_SYSMEM (0x20803033U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_PROCESS_FORCED_CONFIGS + * + * Process NVLink forced configurations which includes setting of HSHUB and memory system + * + * [In] bLegacyForcedConfig + * Tracks whether the forced config is legacy forced config or chiplib config + * [Out] bOverrideComputePeerMode + * Whether compute peer mode was enabled + * [In] phase + * Only applicable when bLegacyForcedConfig is true + * Tracks the set of registers to program from the NVLink table + * [In] linkConnection + * Array of chiplib configurations + */ +#define NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS_MESSAGE_ID (0x34U) + +typedef struct NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS { + NvBool bLegacyForcedConfig; + NvBool bOverrideComputePeerMode; + NvU32 phase; + NvU32 linkConnection[NV2080_CTRL_NVLINK_MAX_LINKS]; +} NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PROCESS_FORCED_CONFIGS (0x20803034U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS + * + * Sync the NVLink lane shutdown properties with GSP-RM + * + * [In] bLaneShutdownEnabled + * Whether nvlink shutdown is enabled for the chip + * [In] bLaneShutdownOnUnload + * Whether nvlink shutdown should be triggered on driver unload + */ +#define NV2080_CTRL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS_MESSAGE_ID (0x35U) + +typedef struct NV2080_CTRL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS { + NvBool bLaneShutdownEnabled; + NvBool bLaneShutdownOnUnload; +} NV2080_CTRL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS (0x20803035U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_ENABLE_SYSMEM_NVLINK_ATS + * + * Enable ATS functionality related to NVLink sysmem if hardware support is available + * + * [In] notUsed + */ +#define NV2080_CTRL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS_MESSAGE_ID (0x36U) + +typedef struct NV2080_CTRL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS { + NvU32 notUsed; +} NV2080_CTRL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_ENABLE_SYSMEM_NVLINK_ATS (0x20803036U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK + * + * Get the mask of Nvlink links connected to system + * + * [Out] sysmemLinkMask + * Mask of Nvlink links connected to system + */ +#define NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS_MESSAGE_ID (0x37U) + +typedef struct NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS { + NvU32 sysmemLinkMask; +} NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK (0x20803037U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_GET_SET_NVSWITCH_FLA_ADDR + * + * Get/Set NVSwitch FLA address + * + * [In] bGet + * Whether to get or set the NVSwitch FLA address + * [In/Out] addr + * Address that is to be set or retrieved. + */ +#define NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS_MESSAGE_ID (0x38U) + +typedef struct NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS { + NvBool bGet; + NV_DECLARE_ALIGNED(NvU64 addr, 8); +} NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_GET_SET_NVSWITCH_FLA_ADDR (0x20803038) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO + * + * Syncs the different link masks and vbios defined values between CPU-RM and GSP-RM + * + * [in] discoveredLinks + * Mask of links discovered from IOCTRLs + * + * [in] connectedLinksMask + * Mask of links which are connected (remote present) + * + * [in] bridgeSensableLinks + * Mask of links whose remote endpoint presence can be sensed + * + * [in] bridgedLinks + * Mask of links which are connected (remote present) + * Same as connectedLinksMask, but also tracks the case where link + * is connected but marginal and could not initialize + * + * [out] initDisabledLinksMask + * Mask of links for which initialization is disabled + * + * [out] vbiosDisabledLinkMask + * Mask of links disabled in the VBIOS + * + * [out] initializedLinks + * Mask of initialized links + * + * [out] bEnableTrainingAtLoad + * Whether the links should be trained to active during driver load + * + * [out] bEnableSafeModeAtLoad + * Whether the links should be initialized to swcfg during driver load + */ + +#define NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS_MESSAGE_ID (0x39U) + +typedef struct NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS { + NvU32 discoveredLinks; + NvU32 connectedLinksMask; + NvU32 bridgeSensableLinks; + NvU32 bridgedLinks; + NvU32 initDisabledLinksMask; + NvU32 vbiosDisabledLinkMask; + NvU32 initializedLinks; + NvBool bEnableTrainingAtLoad; + NvBool bEnableSafeModeAtLoad; +} NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO (0x20803039U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_NVLINK_ENABLE_LINKS + * + * Enable pre-topology setup on the mask of enabled links + * This command accepts no parameters. + */ + +#define NV2080_CTRL_CMD_NVLINK_ENABLE_LINKS (0x2080303aU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | 0x3a" */ + +/* + * NV2080_CTRL_CMD_NVLINK_PROCESS_INIT_DISABLED_LINKS + * + * Process the init disabled NVLinks and filter those out + * + * [in/out] initDisabledLinksMask + * Mask of links initdisabled on a given GPU + * + * [in] bSkipHwNvlinkDisable + * Whether to consider skipping the HW initdisable links + */ + +#define NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS_MESSAGE_ID (0x3bU) + +typedef struct NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS { + NvU32 initDisabledLinksMask; + NvBool bSkipHwNvlinkDisable; +} NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS; + +#define NV2080_CTRL_CMD_NVLINK_PROCESS_INIT_DISABLED_LINKS (0x2080303bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID << 8) | NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS_MESSAGE_ID" */ + +/* _ctrl2080nvlink_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h new file mode 100644 index 000000000..0c5d77957 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf.h @@ -0,0 +1,508 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080perf.finn +// + + + + +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080base.h" + +#define NV_SUBPROC_NAME_MAX_LENGTH 100 + +#include "nvmisc.h" + +#include "ctrl/ctrl2080/ctrl2080clk.h" +#include "ctrl/ctrl2080/ctrl2080gpumon.h" +#include "ctrl/ctrl2080/ctrl2080volt.h" +#include "ctrl/ctrl2080/ctrl2080vfe.h" +#include "ctrl/ctrl2080/ctrl2080pmumon.h" +#include "ctrl/ctrl0080/ctrl0080perf.h" +// +// XAPICHK/XAPI_TEST chokes on the "static NVINLINE" defines in nvmisc.h. +// However, we don't need any of those definitions for those tests (XAPICHK is a +// syntactical check, not a functional test). So, instead, just #define out the +// macros referenced below. +// + +/* + * NV2080_CTRL_CMD_PERF_BOOST + * + * This command can be used to boost P-State up one level or to the highest for a limited + * duration for the associated subdevice. Boosts from different clients are being tracked + * independently. Note that there are other factors that can limit P-States so the resulting + * P-State may differ from expectation. + * + * flags + * This parameter specifies the actual command. _CLEAR is to clear existing boost. + * _BOOST_1LEVEL is to boost P-State one level higher. _BOOST_TO_MAX is to boost + * to the highest P-State. + * duration + * This parameter specifies the duration of the boost in seconds. This has to be less + * than NV2080_CTRL_PERF_BOOST_DURATION_MAX. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD 1:0 +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD_CLEAR (0x00000000) +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD_BOOST_1LEVEL (0x00000001) +#define NV2080_CTRL_PERF_BOOST_FLAGS_CMD_BOOST_TO_MAX (0x00000002) + +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA 4:4 +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_NO (0x00000000) +#define NV2080_CTRL_PERF_BOOST_FLAGS_CUDA_YES (0x00000001) + +#define NV2080_CTRL_PERF_BOOST_FLAGS_ASYNC 5:5 +#define NV2080_CTRL_PERF_BOOST_FLAGS_ASYNC_NO (0x00000000) +#define NV2080_CTRL_PERF_BOOST_FLAGS_ASYNC_YES (0x00000001) + +#define NV2080_CTRL_PERF_BOOST_DURATION_MAX 3600 //The duration can be specified up to 1 hour +#define NV2080_CTRL_PERF_BOOST_DURATION_INFINITE 0xffffffff // If set this way, the boost will last until cleared. + +#define NV2080_CTRL_CMD_PERF_BOOST (0x2080200a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_BOOST_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_BOOST_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV2080_CTRL_PERF_BOOST_PARAMS { + NvU32 flags; + NvU32 duration; +} NV2080_CTRL_PERF_BOOST_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_RESERVE_PERFMON_HW + * + * This command reserves HW Performance Monitoring capabilities for exclusive + * use by the requester. If the HW Performance Monitoring capabilities are + * currently in use then NVOS_STATUS_ERROR_STATE_IN_USE is returned. + * + * bAcquire + * When set to TRUE this parameter indicates that the client wants to + * acquire the Performance Monitoring capabilities on the subdevice. + * When set to FALSE this parameter releases the Performance Monitoring + * capabilities on the subdevice. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_ERROR_STATE_IN_USE + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV2080_CTRL_CMD_PERF_RESERVE_PERFMON_HW (0x20802093) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS_MESSAGE_ID (0x93U) + +typedef struct NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS { + NvBool bAcquire; +} NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS; + +/*! + * Enumeration of the RATED_TDP arbitration clients which make requests to force + * enable/disable VF points above the RATED_TDP point. + * + * These clients are sorted in descending priority - the RM will arbitrate + * between all clients in order of priority, taking as output the first client + * whose input action != @ref NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT. + */ +typedef enum NV2080_CTRL_PERF_RATED_TDP_CLIENT { + /*! + * Internal RM client corresponding to the RM's internal state and features. + * The RM client will either allow default behavior (@ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT) or will limit to RATED_TDP + * (@ref NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT) when no power + * controllers are active. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM = 0, + /*! + * This Client is specifically for Bug 1785342 where we need to limit the TDP + * to Min value on boot. And clear the Max TDP limit. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_WAR_BUG_1785342 = 1, + /*! + * Global client request. This client is expected to be used by a global + * switch functionality in an end-user tool, such as EVGA Precision, to + * either force enabling boost above RATED_TDP (@ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED) or to force limiting to + * RATED_TDP (@ref NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT) across the + * board, regardless of any app-profie settings. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_GLOBAL = 2, + /*! + * Operating system request. This client is expected to be used by the + * operating system to set @ref NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LOCK + * for performance profiling. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_OS = 3, + /*! + * App profile client requests. This client is expected to be used by the + * app-profile settings to either default to whatever was requested by + * higher-priority clients (@ref NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT) + * or to limit to RATED_TDP (@ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT) for apps which have shown + * bad behavior when boosting. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_PROFILE = 4, + /*! + * Number of supported clients. + * + * @Note MUST ALWAYS BE LAST! + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS = 5, +} NV2080_CTRL_PERF_RATED_TDP_CLIENT; + +/*! + * Enumeration RATED_TDP actions - these are the requested actions clients can + * make to change the behavior of the RATED_TDP functionality. + */ +typedef enum NV2080_CTRL_PERF_RATED_TDP_ACTION { + /*! + * The default action - meaning no explicit request from the client other + * than to take the default behavior (allowing boosting above RATED_TDP) or + * any explicit actions from lower priority clients. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT = 0, + /*! + * Force allow boosting above RATED_TDP - this action explicitly requests + * boosting above RATED_TDP, preventing lower priority clients to limit to + * RATED_TDP. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED = 1, + /*! + * Force to limit above RATED_TDP - this action explicitly requests to limit + * to RATED_TDP. This is the opposite of the default behavior to allow + * boosting above RATED_TDP. Clients specify this action when they + * explicitly need boost to be disabled (e.g. eliminating perf variation, + * special apps which exhibit bad behavior, etc.). + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT = 2, + /*! + * Lock to RATED_TDP - this action requests the clocks to be fixed at the + * RATED_TDP. Used for achieving stable clocks required for profiling. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LOCK = 3, + /*! + * Lock to Min TDP - This requests min to be fixed at RATED_TDP but allow + * boosting for max + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_FLOOR = 4, +} NV2080_CTRL_PERF_RATED_TDP_ACTION; + +/*! + * Structure describing dynamic state of the RATED_TDP feature. + */ +#define NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS_MESSAGE_ID (0x6DU) + +typedef struct NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS { + /*! + * Structure of internal RM state - these values are used to determine the + * behavior of NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM per the RM's @ref + * perfPwrRatedTdpLimitRegisterClientActive() interface. + */ + struct { + /*! + * [out] - Mask of active client controllers (@ref + * PERF_PWR_RATED_TDP_CLIENT) which are currently regulating TDP. When + * this mask is zero, NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM will request + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT. + */ + NvU32 clientActiveMask; + /*! + * [out] - Boolean indicating that user has requested locking to + * RATED_TDP vPstate via corresponding regkey + * NV_REG_STR_RM_PERF_RATED_TDP_LIMIT. When the boolean value is true, + * NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM will request + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT. + */ + NvU8 bRegkeyLimitRatedTdp; + } rm; + + /*! + * [out] - Arbitrated output action of all client requests (@ref inputs). + * This is the current state of the RATED_TDP feature. Will only be @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED or @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_LIMIT. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION output; + /*! + * [out] - Array of input client request actions, indexed via @ref + * NV2080_CTRL_PERF_RATED_TDP_CLIENT_. RM will arbitrate between these + * requests, choosing the highest priority request != @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_DEFAULT or fallback to choosing @ref + * NV2080_CTRL_PERF_RATED_TDP_ACTION_FORCE_EXCEED. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION inputs[NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS]; +} NV2080_CTRL_PERF_RATED_TDP_STATUS_PARAMS; + +/*! + * NV2080_CTRL_CMD_PERF_RATED_TDP_GET_CONTROL + * + * This command retrieves the current requested RATED_TDP action corresponding + * to the specified client. + * + * See @ref NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS for documentation of + * parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_PERF_RATED_TDP_GET_CONTROL (0x2080206e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | 0x6E" */ + +/*! + * NV2080_CTRL_CMD_PERF_RATED_TDP_SET_CONTROL + * + * This command sets the requested RATED_TDP action corresponding to the + * specified client. @Note, however, that this command is unable to set @ref + * NV2080_CTRL_PERF_RATED_TDP_CLIENT_RM. + * + * See @ref NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS for documentation of + * parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_PERF_RATED_TDP_SET_CONTROL (0x2080206f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | 0x6F" */ + +/*! + * Structure containing the requested action for a RATED_TDP client (@ref + * NV2080_CTRL_PERF_RATED_TDP_CLIENT). + */ +typedef struct NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS { + /*! + * [in] - Specified client for request. + */ + NV2080_CTRL_PERF_RATED_TDP_CLIENT client; + /*! + * [in/out] - Client's requested action. + */ + NV2080_CTRL_PERF_RATED_TDP_ACTION input; +} NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS; + +/*! + * This struct represents the GPU monitoring perfmon sample for an engine. + */ +typedef struct NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE { + /*! + * Percentage during the sample that the engine remains busy. This + * is in units of pct*100. + */ + NvU32 util; + /*! + * Scaling factor to convert utilization from full GPU to per vGPU. + */ + NvU32 vgpuScale; + /*! + * Process ID of the process that was active on the engine when the + * sample was taken. If no process is active then NV2080_GPUMON_PID_INVALID + * will be returned. + */ + NvU32 procId; + /*! + * Process ID of the process in the vGPU VM that was active on the engine when + * the sample was taken. If no process is active then NV2080_GPUMON_PID_INVALID + * will be returned. + */ + NvU32 subProcessID; + /*! + * Process name of the process in the vGPU VM that was active on the engine when + * the sample was taken. If no process is active then NULL will be returned. + */ + char subProcessName[NV_SUBPROC_NAME_MAX_LENGTH]; +} NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE; + +/*! + * This struct represents the GPU monitoring perfmon sample. + */ +typedef struct NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE { + /*! + * Base GPU monitoring sample. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_GPUMON_SAMPLE base, 8); + /*! + * FB bandwidth utilization sample. + */ + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE fb; + /*! + * GR utilization sample. + */ + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE gr; + /*! + * NV ENCODER utilization sample. + */ + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE nvenc; + /*! + * NV DECODER utilization sample. + */ + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE nvdec; +} NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE; + +/*! + * This struct represents the GPU monitoring samples of perfmon values that + * client wants the access to. + */ +#define NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM_MESSAGE_ID (0x83U) + +typedef NV2080_CTRL_GPUMON_SAMPLES NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM; + +/*! + * Number of GPU monitoring sample in their respective buffers. + */ +#define NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL 100 + +#define NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_BUFFER_SIZE \ + NV_SIZEOF32(NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE) * \ + NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL + +/*! + * NV2080_CTRL_CMD_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2 + * + * This command returns perfmon gpu monitoring utilization samples. + * This command is not supported with SMC enabled. + * + * See NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM_V2 for documentation + * on the parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + * Note this is the same as NV2080_CTRL_CMD_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES + * but without the embedded pointer. + * + */ +#define NV2080_CTRL_CMD_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2 (0x20802096) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_MESSAGE_ID" */ + +/*! + * This structure represents the GPU monitoring samples of utilization values that + * the client wants access to. + */ +#define NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_MESSAGE_ID (0x96U) + +typedef struct NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS { + /*! + * Type of the sample, see NV2080_CTRL_GPUMON_SAMPLE_TYPE_* for reference. + */ + NvU8 type; + /*! + * Size of the buffer, this should be + * bufSize == NV2080_CTRL_*_GPUMON_SAMPLE_COUNT_* + * sizeof(derived type of NV2080_CTRL_GPUMON_SAMPLE). + */ + NvU32 bufSize; + /*! + * Number of samples in ring buffer. + */ + NvU32 count; + /*! + * tracks the offset of the tail in the circular queue array pSamples. + */ + NvU32 tracker; + /*! + * A circular queue with size == bufSize. + * + * @note This circular queue wraps around after 10 seconds of sampling, + * and it is clients' responsibility to query within this time frame in + * order to avoid losing samples. + * @note With one exception, this queue contains last 10 seconds of samples + * with tracker poiniting to oldest entry and entry before tracker as the + * newest entry. Exception is when queue is not full (i.e. tracker is + * pointing to a zeroed out entry), in that case valid entries are between 0 + * and tracker. + * @note Clients can store tracker from previous query in order to provide + * samples since last read. + */ + NV_DECLARE_ALIGNED(NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE samples[NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL], 8); +} NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS; + +/* + * NV2080_CTRL_CMD_PERF_GPU_IS_IDLE + * + * This command notifies RM to make p state switching aggressive by setting + * required limiting factors to speed up GC6 Entry initiation. + * + * prevPstate [out] + * This parameter will contain the pstate before the switch was initiated + * + * Possible status return values are: + * NV_OK : If P State Switch is successful + * NV_INVALID_STATE : If unable to access P State structure + * NVOS_STATUS_ERROR : If P State Switch is unsuccessful + */ +#define NV2080_CTRL_CMD_PERF_GPU_IS_IDLE (0x20802089) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS_MESSAGE_ID (0x89U) + +typedef struct NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS { + NvU32 prevPstate; + NvU32 action; +} NV2080_CTRL_PERF_GPU_IS_IDLE_PARAMS; + +#define NV2080_CTRL_PERF_GPU_IS_IDLE_TRUE (0x00000001) +#define NV2080_CTRL_PERF_GPU_IS_IDLE_FALSE (0x00000002) + +/* + * NV2080_CTRL_CMD_PERF_AGGRESSIVE_PSTATE_NOTIFY + * + * This command is for the KMD Aggressive P-state feature. + * + * bGpuIsIdle [in] + * When true, applies cap to lowest P-state/GPCCLK. When false, releases cap. + * idleTimeUs [in] + * The amount of time (in microseconds) the GPU was idle since previous + * call, part of the GPU utilization data from KMD. + * busyTimeUs [in] + * The amount of time (in microseconds) the GPU was not idle since + * previous call, part of the GPU utilization data from KMD. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + + +#define NV2080_CTRL_CMD_PERF_AGGRESSIVE_PSTATE_NOTIFY (0x2080208f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID << 8) | NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS_MESSAGE_ID (0x8FU) + +typedef struct NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS { + NvBool bGpuIsIdle; + NvBool bRestoreToMax; + NV_DECLARE_ALIGNED(NvU64 idleTimeUs, 8); + NV_DECLARE_ALIGNED(NvU64 busyTimeUs, 8); +} NV2080_CTRL_PERF_AGGRESSIVE_PSTATE_NOTIFY_PARAMS; + + +/* _ctrl2080perf_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h new file mode 100644 index 000000000..f1a7149bb --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080perf_cf.finn +// + + +/* _ctrl2080perf_cf_h_ */ + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h new file mode 100644 index 000000000..0a3e6b2b8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080perf_cf_pwr_model.finn +// + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h new file mode 100644 index 000000000..abbc4cf38 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmgr.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080pmgr.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h new file mode 100644 index 000000000..53cb620a6 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080pmumon.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080pmumon.finn +// + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h new file mode 100644 index 000000000..2d01ed555 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080power.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080power.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h new file mode 100644 index 000000000..0421473de --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080rc.h @@ -0,0 +1,371 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080rc.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* + * NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM + * + * This command returns data read from the specified virtual memory address for + * the associated subdevice. + * + * hChannel + * This parameter specifies the channel object handle from which the virtual + * memory range applies. + * virtAddress + * This parameter specifies the GPU base virtual memory address from which data should + * be read. The amount of data read is specified by the bufferSize parameter. + * bufferPtr + * This parameter specifies the buffer address in the caller's address space into which + * the data is to be returned. The address must be aligned on an 8-byte boundary. + * The buffer must be at least as big as the value specified bufferSize parameter (in bytes). + * bufferSize + * This parameter specifies the size of the buffer referenced by the bufferPtr parameter. + * This parameter also indicates the total number of bytes to be returned. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_XLATE + */ +#define NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS { + NvHandle hChannel; + NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); + NV_DECLARE_ALIGNED(NvP64 bufferPtr, 8); + NvU32 bufferSize; +} NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS; + +#define NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM (0x20802204) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_RC_GET_ERROR_COUNT + * + * This command returns the number of RC errors. + * + * errorCount + * Number of RC errors. + * + * Note: If SMC is enabled, mig/monitor capability must be acquired to query + * aggregate information. Otherwise, the control call returns + * NV_ERR_INSUFFICIENT_PERMISSIONS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_PERMISSIONS. + */ +#define NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS { + NvU32 errorCount; +} NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS; + +#define NV2080_CTRL_CMD_RC_GET_ERROR_COUNT (0x20802205) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_RC_ERROR_PARAMS_BUFFER_SIZE (0x2000) /* finn: Evaluated from "(8 * 1024)" */ + +#define NV2080_CTRL_CMD_RC_GET_ERROR (0x20802206) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x6" */ + +/* + * NV2080_CTRL_CMD_RC_GET_ERROR_V2 + * + * This command returns an error element in the RC error list. + * + * whichBuffer + * Which Error to return (0 is oldest) + * outputRecordSize + * Output Size of Buffer -- Zero if error record doesn't exist + * recordBuffer + * buffer + * + * Note: If SMC is enabled, mig/monitor capability must be acquired to query + * aggregate information. Otherwise, the control call returns + * NV_ERR_INSUFFICIENT_PERMISSIONS. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INSUFFICIENT_PERMISSIONS. + * + */ + +#define NV2080_CTRL_RC_GET_ERROR_V2_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV2080_CTRL_RC_GET_ERROR_V2_PARAMS { + + NvU32 whichBuffer; // [IN] - which error to return (0 is oldest) + NvU32 outputRecordSize; // [OUT] + NvU8 recordBuffer[NV2080_CTRL_RC_ERROR_PARAMS_BUFFER_SIZE]; +} NV2080_CTRL_RC_GET_ERROR_V2_PARAMS; + +#define NV2080_CTRL_CMD_RC_GET_ERROR_V2 (0x20802213) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_GET_ERROR_V2_PARAMS_MESSAGE_ID" */ + +/* + * NV2080_CTRL_CMD_RC_SET_CLEAN_ERROR_HISTORY + * + * This command cleans error history. + * + * This command has no input parameters. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV2080_CTRL_CMD_RC_SET_CLEAN_ERROR_HISTORY (0x20802207) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x7" */ + +/* + * NV2080_CTRL_CMD_RC_GET_WATCHDOG_INFO + * + * This command returns information about the RC watchdog. + * + * watchdogStatusFlags + * This output parameter is a combination of one or more of the following: + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_NONE + * This is the value of watchdogStatusFlags if no flags are set. + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_DISABLED + * This means that the watchdog is disabled. + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_RUNNING + * This means that the watchdog is running. + * + * NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_INITIALIZED + * This means that the watchdog has been initialized. + * + * A typical result would be either "running and initialized", or + * "disabled". However, "initialized, but not running, and not disabled" + * is also quite reasonable (if the computer is hibernating, for example). + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS { + NvU32 watchdogStatusFlags; +} NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS; + +#define NV2080_CTRL_CMD_RC_GET_WATCHDOG_INFO (0x20802209) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS_MESSAGE_ID" */ + +/* valid values for watchdogStatusFlags */ +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_NONE (0x00000000) +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_DISABLED (0x00000001) +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_RUNNING (0x00000002) +#define NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_INITIALIZED (0x00000004) + +/* + * NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG + * + * This command disables the RC watchdog, if possible. + * If, however, another RM client has already explicitly (via NV2080 call) enabled + * the RC watchdog, then this method returns NV_ERR_STATE_IN_USE. + * + * This command, if successful, will prevent other clients from enabling the + * watchdog until the calling RM client releases its request with + * NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS or frees its NV20_SUBDEVICE. + * + * See NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG for disabling the watchdog + * without preventing other clients from enabling it. + * + * Possible status return values are: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG (0x2080220a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xA" */ + +/* + * NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG + * + * This command enables the RC watchdog, if possible. + * If, however, another RM client has already explicitly (via NV2080 call) disabled + * the RC watchdog, then this method returns NV_ERR_STATE_IN_USE. + * + * Possible status return values are: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG (0x2080220b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xB" */ + +/* + * NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS + * + * This command releases all of the RM client's outstanding requests to enable + * or disable the watchdog. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS (0x2080220c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xC" */ + +/* + * NV2080_CTRL_CMD_SET_RC_RECOVERY/NV2080_CTRL_CMD_GET_RC_RECOVERY + * + * This command disables/enables RC recovery. + * + * rcEnable + * NV2080_CTRL_CMD_SET_RC_RECOVERY_DISABLED + * Disable robust channel recovery. + * + * NV2080_CTRL_CMD_SET_RC_RECOVERY_ENABLED + * Enable robust channel recovery with default breakpoint handling. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +typedef struct NV2080_CTRL_CMD_RC_RECOVERY_PARAMS { + NvU32 rcEnable; +} NV2080_CTRL_CMD_RC_RECOVERY_PARAMS; + +#define NV2080_CTRL_CMD_SET_RC_RECOVERY (0x2080220d) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xD" */ + +#define NV2080_CTRL_CMD_GET_RC_RECOVERY (0x2080220e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0xE" */ + +/* valid values for rcEnable */ +#define NV2080_CTRL_CMD_RC_RECOVERY_DISABLED (0x00000000) +#define NV2080_CTRL_CMD_RC_RECOVERY_ENABLED (0x00000001) + +/* + * NV2080_CTRL_CMD_TDR_SET_TIMEOUT_STATE + * + * This command can be used to set TDR timeout state. + * + * It can be used to indicate that a timeout has occurred and that a GPU + * reset will start. It can also be used to indicate that the reset has + * completed along with the corresponding complition status. + * + * cmd + * This parameter is used to indicate the stage of the TDR recovery + * process. Legal values for this parameter are: + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_BEGIN + * This value indicates that TDR recovery is about to begin. + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_END + * This value indicates that TDR recovery has completed. + * + * status + * This parameter is valid when the cmd parameter is set to + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_END. It is used + * to specify the completion status of the TDR recovery. Legal + * values for this parameter include: + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_FAIL + * This value indicates the recovery failed. + * NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_SUCCESS + * This value indicates the recovery succeeded. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_COMMAND + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_TDR_SET_TIMEOUT_STATE (0x2080220f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS { + NvU32 cmd; + NvS32 status; +} NV2080_CTRL_TDR_SET_TIMEOUT_STATE_PARAMS; + +/* valid cmd values */ +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_BEGIN (0x00000000) +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_CMD_GPU_RESET_END (0x00000001) + +/* valid status values */ +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_SUCCESS (0x00000000) +#define NV2080_CTRL_TDR_SET_TIMEOUT_STATE_STATUS_FAIL (0x00000001) + +/* + * NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG + * + * This command disables the RC watchdog, similarly to how + * NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG does. However, unlike that command, this + * command will not prevent another RM client from explicitly enabling the RC + * watchdog with NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG. + * + * Possible status return values are: + * NV_OK + * NV_ERR_STATE_IN_USE + * NV_ERR_NOT_SUPPORTED + */ +#define NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG (0x20802210) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x10" */ + +/* + * NV2080_CTRL_CMD_GET_RC_INFO/NV2080_CTRL_CMD_SET_RC_INFO + * + * This command can be used to set robust channel parameters. + * + * rcMode + * NV2080_CTRL_CMD_SET_RC_INFO_MODE_DISABLE + * Disable robust channel operation. + * + * NV2080_CTRL_CMD_SET_RC_INFO_MODE_ENABLE + * Enable robust channel operation. + * + * rcBreak + * NV2080_CTRL_CMD_SET_RC_INFO_BREAK_DISABLE + * Disable breakpoint handling during robust channel operation. + * + * NV2080_CTRL_CMD_SET_RC_INFO_BREAK_ENABLE + * Enable breakpoint handling during robust channel operation. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +typedef struct NV2080_CTRL_CMD_RC_INFO_PARAMS { + NvU32 rcMode; + NvU32 rcBreak; +} NV2080_CTRL_CMD_RC_INFO_PARAMS; + +#define NV2080_CTRL_CMD_SET_RC_INFO (0x20802211) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x11" */ + +#define NV2080_CTRL_CMD_GET_RC_INFO (0x20802212) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID << 8) | 0x12" */ + +/* valid rcMode values */ +#define NV2080_CTRL_CMD_RC_INFO_MODE_DISABLE (0x00000000) +#define NV2080_CTRL_CMD_RC_INFO_MODE_ENABLE (0x00000001) + +/* valid rcBreak values */ +#define NV2080_CTRL_CMD_RC_INFO_BREAK_DISABLE (0x00000000) +#define NV2080_CTRL_CMD_RC_INFO_BREAK_ENABLE (0x00000001) + +/* _ctrl2080rc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h new file mode 100644 index 000000000..448723c8c --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080spi.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080spi.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h new file mode 100644 index 000000000..abe66effa --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080thermal.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080thermal.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h new file mode 100644 index 000000000..3bf7b5c8e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080tmr.h @@ -0,0 +1,236 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080tmr.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_TIMER related control commands and parameters */ + +/* + * NV2080_CTRL_CMD_TIMER_SCHEDULE + * + * This command schedules a GPU timer event to fire at the specified time interval. + * Can be called without API & GPU locks if NVOS54_FLAGS_IRQL_RAISED and + * NVOS54_FLAGS_LOCK_BYPASS are set in NVOS54_PARAMETERS.flags + * + * time_nsec + * This parameter specifies the time in nanoseconds at which the GPU timer + * event is to fire. + * flags + * This parameter determines the interpretation of the value specified by + * the time_nsec parameter: + * NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_ABS + * This flag indicates that time_nsec is in absolute time. + * NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_REL + * This flag indicates that time_nsec is in relative time. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_TIMER_SCHEDULE (0x20800401) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 time_nsec, 8); + NvU32 flags; +} NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS; + +/* valid flag values */ +#define NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME 0:0 +#define NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_ABS (0x00000000) +#define NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_REL (0x00000001) + +/* + * NV2080_CTRL_CMD_TIMER_CANCEL + * + * This command cancels any pending timer events initiated with the + * NV2080_CTRL_CMD_TIMER_SCHEDULE command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_TIMER_CANCEL (0x20800402) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | 0x2" */ + +/* + * NV2080_CTRL_CMD_TIMER_GET_TIME + * + * This command returns the current GPU timer value. The current time is + * expressed in elapsed nanoseconds since 00:00 GMT, January 1, 1970 + * (zero hour) with a resolution of 32 nanoseconds. + * + * Can be called without API & GPU locks if NVOS54_FLAGS_IRQL_RAISED and + * NVOS54_FLAGS_LOCK_BYPASS are set in NVOS54_PARAMETERS.flags + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_TIMER_GET_TIME (0x20800403) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_TIMER_GET_TIME_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TIMER_GET_TIME_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_TIMER_GET_TIME_PARAMS { + NV_DECLARE_ALIGNED(NvU64 time_nsec, 8); +} NV2080_CTRL_TIMER_GET_TIME_PARAMS; + +/* + * NV2080_CTRL_CMD_TIMER_GET_REGISTER_OFFSET + * + * The command returns the offset of the timer registers, so that clients may + * map them directly. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV2080_CTRL_CMD_TIMER_GET_REGISTER_OFFSET (0x20800404) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS { + NvU32 tmr_offset; +} NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS; + +/* + * NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE + * + * This structure describes the information obtained with + * NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO command. + * + * gpuTime + * GPU time is the value of GPU global timer (PTIMER) with a resolution + * of 32 nano seconds. + * cpuTime + * CPU time. Resolution of the cpu time depends on its source. Refer to + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_* for more information. + + */ +typedef struct NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE { + NV_DECLARE_ALIGNED(NvU64 cpuTime, 8); + NV_DECLARE_ALIGNED(NvU64 gpuTime, 8); +} NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE; + + +/* + * NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO + * + * This command returns correlation information between GPU time and CPU time + * for a given CPU clock type. + * + * cpuClkId + * This parameter specifies the source of the CPU clock. Legal values for + * this parameter include: + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_OSTIME + * This clock id will provide real time in microseconds since + * 00:00:00 UTC on January 1, 1970. + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PLATFORM_API + * This clock id will provide time stamp that is constant-rate, high + * precision using platform API that is also available in the user mode. + * NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_TSC + * This clock id will provide time stamp using CPU's time stamp counter. + * + * sampleCount + * This field specifies the number of clock samples to be taken. + * This value cannot exceed NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES. + * + * samples + * This field returns an array of requested samples. Refer to + * NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE to get details about each entry + * in the array. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV2080_CTRL_CMD_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO (0x20800406) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES 16 + +#define NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS { + NvU8 cpuClkId; + NvU8 sampleCount; + NV_DECLARE_ALIGNED(NV2080_CTRL_TIMER_GPU_CPU_TIME_SAMPLE samples[NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES], 8); +} NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS; + +/* Legal cpuClkId values */ +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_OSTIME (0x00000001) +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_TSC (0x00000002) +#define NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PLATFORM_API (0x00000003) +/*! + * NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ + * + * This command changes the frequency at which Graphics Engine time stamp is + * updated. Frequency can either be set to max or restored to default. + * Clients can independently use this call to increase the timer frequency + * as a global reference count is maintained for requests to Max frequency. + * Client is assured that the system stays in this state till the requested + * client releases the state or is killed. Timer frequency will automatically + * be restored to default when there is no pending request to increase. + * + * Note that recursive requests for the same state from the same client + * are considered invalid. + * + * bSetMaxFreq + * Set to NV_TRUE if GR tick frequency needs to be set to Max. + * + * See @ref NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS for + * documentation of parameters. + * + * Possible status values returned are + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_STATE_IN_USE + * NV_ERR_INVALID_OPERATION + * NV_ERR_INVALID_STATE + */ +#define NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ (0x20800407) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID << 8) | NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_MESSAGE_ID" */ + +/*! + * This struct contains bSetMaxFreq flag. + */ +#define NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS { + NvBool bSetMaxFreq; +} NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS; + +/* _ctrl2080tmr_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h new file mode 100644 index 000000000..93b1113e1 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ucodefuzzer.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080ucodefuzzer.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h new file mode 100644 index 000000000..16d6cac35 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080unix.h @@ -0,0 +1,193 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080unix.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" + +/* NV20_SUBDEVICE_XX OS control commands and parameters */ + +/* + * NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT + * + * This command increases or decreases the value of the per-GPU GC6 blocker + * refCount used by Linux kernel clients to prevent the GPU from entering GC6. + * + * When the refCount is non-zero, the GPU cannot enter GC6. When the refCount + * transitions from zero to non-zero as a result of this command, the GPU will + * automatically come out of GC6. + * + * action Whether to increment or decrement the value of the refCount. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT (0x20803d01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS { + NvU32 action; +} NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS; + +// Possible values for action +#define NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC (0x00000001) +#define NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC (0x00000002) + +/* + * NV2080_CTRL_CMD_OS_UNIX_ALLOW_DISALLOW_GCOFF + * + * RM by default allows GCOFF but when the X driver disallows to enter in GCOFF + * then this rmcontrol sets flag as NV_FALSE and if it allows to enter in GCOFF + * then the flag is set as NV_TRUE. + * + * action Whether to allow or disallow the user mode clients to enter in GCOFF. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_NOT_SUPPORTED + */ + +#define NV2080_CTRL_CMD_OS_UNIX_ALLOW_DISALLOW_GCOFF (0x20803d02) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS { + NvU32 action; +} NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS; + +// Possible values for action +#define NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_ALLOW (0x00000001) +#define NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_DISALLOW (0x00000002) + +/* + * NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER + * + * GPU can have integrated HDA (High Definition Audio) controller which + * can be in active or suspended state during dynamic power management. + * This command will perform HDA controller wakeup (if bEnter is false) or + * suspend (if bEnter is true). + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER (0x20803d03) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS { + NvBool bEnter; +} NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS; + +/* + * NV2080_CTRL_CMD_OS_UNIX_INSTALL_PROFILER_HOOKS + * + * Initialize cyclestats HWPM support in the kernel. This will set up a callback + * event for the channel indicated by hNotifierResource. This callback will execute + * perf register read / write commands enqueued in the shared buffer indicated by + * hNotifyDataMemory. Only one client may use HWPM functionality at a time. + * + * Additionally, if perfmonIdCount is greater than zero, mode-e HWPM streaming into + * the buffer indicated by hSnapshotMemory will be initialized (but not turned on). + * Data will be copied into the provided buffer every 10ms, or whenever a + * NV2080_CTRL_CMD_OS_UNIX_FLUSH_SNAPSHOT_BUFFER command is issued. + */ +#define NV2080_CTRL_CMD_OS_UNIX_INSTALL_PROFILER_HOOKS (0x20803d04) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS { + NvHandle hNotifierResource; + NvU32 notifyDataSize; + NvHandle hNotifyDataMemory; + NvU32 perfmonIdCount; + NvU32 snapshotBufferSize; + NvHandle hSnapshotMemory; +} NV2080_CTRL_OS_UNIX_INSTALL_PROFILER_HOOKS_PARAMS; + +/* + * NV2080_CTRL_CMD_OS_UNIX_FLUSH_SNAPSHOT_BUFFER + * + * Immediately copies any pending mode-e HWPM data into the previously + * installed snapshot buffer instead of waiting for the timer. + */ +#define NV2080_CTRL_CMD_OS_UNIX_FLUSH_SNAPSHOT_BUFFER (0x20803d05) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | 0x5" */ + +/* + * NV2080_CTRL_CMD_OS_UNIX_STOP_PROFILER + * + * Stop the timer responsible for copying mode-e HWPM data to the snapshot buffer. + * The snapshot buffer must not be freed by the client before this command is issued. + */ +#define NV2080_CTRL_CMD_OS_UNIX_STOP_PROFILER (0x20803d06) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | 0x6" */ + +/* + * NV2080_CTRL_CMD_OS_UNIX_VIDMEM_PERSISTENCE_STATUS + * + * This command will be used by clients to check if the GPU video memory will + * be persistent during system suspend/resume cycle. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_OS_UNIX_VIDMEM_PERSISTENCE_STATUS (0x20803d07) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS { + NvBool bVidmemPersistent; +} NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS; + +/* + * NV2080_CTRL_CMD_OS_UNIX_UPDATE_TGP_STATUS + * + * This command will be used by clients to set restore TGP flag which will + * help to restore TGP limits when clients are destroyed. + * + * Possible status values returned are: + * NV_OK + */ +#define NV2080_CTRL_CMD_OS_UNIX_UPDATE_TGP_STATUS (0x20803d08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID << 8) | NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS { + NvBool bUpdateTGP; +} NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS; +/* _ctrl2080unix_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h new file mode 100644 index 000000000..193e3dff8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080vfe.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080vfe.finn +// + + + +/* _ctrl2080vfe_h_ */ + + +#include "nvfixedtypes.h" +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080bios.h" + +/* --------------------------- Forward Defines ----------------------------- */ +/* --------------------------- VFE Variable -------------------------------- */ +/* --------------------------- VFE Equation -------------------------------- */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h new file mode 100644 index 000000000..37348eb4a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080volt.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl2080/ctrl2080volt.finn +// + + + +/* _ctrl2080volt_h_ */ + + + + +#include "ctrl/ctrl2080/ctrl2080base.h" +#include "ctrl/ctrl2080/ctrl2080boardobj.h" +#include "ctrl/ctrl2080/ctrl2080pmumon.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f.h new file mode 100644 index 000000000..b526aa3a4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl208f/ctrl208fpower.h" +#include "ctrl208f/ctrl208ffifo.h" +#include "ctrl208f/ctrl208ffb.h" +#include "ctrl208f/ctrl208ffbio.h" + + +#include "ctrl208f/ctrl208fbif.h" +#include "ctrl208f/ctrl208fbus.h" +#include "ctrl208f/ctrl208fevent.h" +#include "ctrl208f/ctrl208fgpu.h" +#include "ctrl208f/ctrl208fgr.h" +#include "ctrl208f/ctrl208fclk.h" +#include "ctrl208f/ctrl208fpmgr.h" +#include "ctrl208f/ctrl208fdma.h" +#include "ctrl208f/ctrl208fmmu.h" + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h new file mode 100644 index 000000000..114a6a090 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbase.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fbase.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV20_SUBDEVICE_DIAG: diagnostic class control commands and parameters */ + +#define NV208F_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x208F, NV208F_CTRL_##cat, idx) + +/* Subdevice diag command categories (6bits) */ +#define NV208F_CTRL_RESERVED (0x00) +#define NV208F_CTRL_POWER (0x01) +#define NV208F_CTRL_THERMAL (0x02) +#define NV208F_CTRL_SEQ (0x03) +#define NV208F_CTRL_FIFO (0x04) +#define NV208F_CTRL_FB (0x05) +#define NV208F_CTRL_MC (0x06) +#define NV208F_CTRL_BIF (0x07) +#define NV208F_CTRL_CLK (0x08) +#define NV208F_CTRL_PERF (0x09) +#define NV208F_CTRL_FBIO (0x0A) +#define NV208F_CTRL_MMU (0x0B) +#define NV208F_CTRL_PMU (0x0C) +#define NV208F_CTRL_EVENT (0x10) +#define NV208F_CTRL_GPU (0x11) +#define NV208F_CTRL_GR (0x12) +#define NV208F_CTRL_PMGR (0x13) +#define NV208F_CTRL_DMA (0x14) +// const NV208F_CTRL_TMR = (0x15); // not supported +#define NV208F_CTRL_RMFS (0x16) +#define NV208F_CTRL_GSPMSGTIMING (0x17) +#define NV208F_CTRL_BUS (0x18) + +/* + * NV208F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV208F_CTRL_CMD_NULL (0x208f0000) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl208fbase_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbif.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbif.h new file mode 100644 index 000000000..f8511c9de --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbif.h @@ -0,0 +1,137 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fbif.finn +// + + + + +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* +* NV208F_CTRL_CMD_BIF_PBI_WRITE_COMMAND +* +* Control command to send a write command to the Post Box Interface +* +* Parameters: +* +*cmdFuncId +* this specifies the function that needs to be performed on pbi +*data +* the data to be set in the data in register +* status +* this corresponds to pbi status register +* sysNotify +* this corresponds to system notify event, i.e. whether system +* needs to be notified of command completion +* drvNotify +* this corresponds to driver notify event, i.e. whether driver +* needs to be notified of command completion +* +* For the possible values of the above parameters refer rmpbicmdif.h +*/ +#define NV208F_CTRL_CMD_BIF_PBI_WRITE_COMMAND (0x208f0701) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_BIF_INTERFACE_ID << 8) | NV208F_CTRL_BIF_PBI_WRITE_COMMAND_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_BIF_PBI_WRITE_COMMAND_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV208F_CTRL_BIF_PBI_WRITE_COMMAND_PARAMS { + NvU8 cmdFuncId; + NvU32 data; + NvU8 status; + NvBool sysNotify; + NvBool drvNotify; +} NV208F_CTRL_BIF_PBI_WRITE_COMMAND_PARAMS; + +/* +* NV208F_CTRL_CMD_BIF_CONFIG_REG_READ +* This command is used to read any of the PBI registers in the config space +* +* Parameters: +* +* RegIndex +* Defines the index of the PBI register +* data +* Data that is read +*/ +#define NV208F_CTRL_CMD_BIF_CONFIG_REG_READ (0x208f0702) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_BIF_INTERFACE_ID << 8) | NV208F_CTRL_BIF_CONFIG_REG_READ_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_BIF_CONFIG_REG_READ_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV208F_CTRL_BIF_CONFIG_REG_READ_PARAMS { + NvU8 RegIndex; + NvU32 data; +} NV208F_CTRL_BIF_CONFIG_REG_READ_PARAMS; + +/* +* NV208F_CTRL_CMD_BIF_CONFIG_REG_WRITE +* This command is used to write any of the PBI registers in the config space +* +* Parameters: +* +* RegIndex +* Defines the index of the PBI register +* data +* Data that is to be written +*/ +#define NV208F_CTRL_CMD_BIF_CONFIG_REG_WRITE (0x208f0703) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_BIF_INTERFACE_ID << 8) | NV208F_CTRL_BIF_CONFIG_REG_WRITE_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_BIF_CONFIG_REG_WRITE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV208F_CTRL_BIF_CONFIG_REG_WRITE_PARAMS { + NvU8 RegIndex; + NvU32 data; +} NV208F_CTRL_BIF_CONFIG_REG_WRITE_PARAMS; + +/* +* NV208F_CTRL_CMD_BIF_INFO +* This command is used to read a bif property +* +* Parameters: +* +* index +* Defines the index of the property to read +* data +* Data that is read +*/ +#define NV208F_CTRL_CMD_BIF_INFO (0x208f0704) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_BIF_INTERFACE_ID << 8) | NV208F_CTRL_BIF_INFO_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_BIF_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV208F_CTRL_BIF_INFO_PARAMS { + NvU32 index; + NvU32 data; +} NV208F_CTRL_BIF_INFO_PARAMS; + +/* valid bif info index values */ +#define NV208F_CTRL_BIF_INFO_INDEX_L0S_ENABLED (0x00000000) +#define NV208F_CTRL_BIF_INFO_INDEX_L1_ENABLED (0x00000001) + + +/* _ctrl208fbif_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbus.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbus.h new file mode 100644 index 000000000..62cdc66f3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fbus.h @@ -0,0 +1,116 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fbus.finn +// + + + + +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* + * NV208F_CTRL_CMD_BUS_IS_BAR1_VIRTUAL + * + * This command checks whether or not BAR1 is in virtual mode. + * This API is intended for internal testing only. + * + * bIsVirtual + * Whether or not Bar1 is in virtual mode. + */ +#define NV208F_CTRL_CMD_BUS_IS_BAR1_VIRTUAL (0x208f1801) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_BUS_INTERFACE_ID << 8) | NV208F_CTRL_BUS_IS_BAR1_VIRTUAL_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_BUS_IS_BAR1_VIRTUAL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV208F_CTRL_BUS_IS_BAR1_VIRTUAL_PARAMS { + NvBool bIsVirtual; +} NV208F_CTRL_BUS_IS_BAR1_VIRTUAL_PARAMS; + +/* + * NV208F_CTRL_CMD_BUS_ECC_INJECT_ERROR + * + * This ctrl call injects BUS ECC errors. Please see the confluence + * page "ECC" for more information on ECC and ECC injection: + * + * Parameters: + * + * location + * Specifies the BUS HW unit where the injection will occur. + * + * errorType + * Specifies whether the injected error will be correctable or uncorrectable. + * Correctable errors have no effect on running programs while uncorrectable + * errors will cause all channels to be torn down. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV208F_CTRL_CMD_BUS_ECC_INJECT_ERROR (0x208f1802) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_BUS_INTERFACE_ID << 8) | NV208F_CTRL_BUS_ECC_INJECT_ERROR_PARAMS_MESSAGE_ID" */ + + + +#define NV208F_CTRL_BUS_ECC_INJECT_ERROR_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV208F_CTRL_BUS_ECC_INJECT_ERROR_PARAMS { + NvU8 errorType; + NvU8 errorUnit; +} NV208F_CTRL_BUS_ECC_INJECT_ERROR_PARAMS; + +/* + * NV208F_CTRL_CMD_BUS_ECC_INJECTION_SUPPORTED + * + * Reports if error injection is supported for BUS units + * + * bCorrectableSupported [out]: + * Boolean value that shows if correcatable errors can be injected. + * + * bUncorrectableSupported [out]: + * Boolean value that shows if uncorrecatable errors can be injected. + * + * Return values: + * NV_OK on success + * NV_ERR_INSUFFICIENT_PERMISSIONS if priv write not enabled. + * NV_ERR_NOT_SUPPORTED otherwise + * + * + */ +#define NV208F_CTRL_CMD_BUS_ECC_INJECTION_SUPPORTED (0x208f1803) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_BUS_INTERFACE_ID << 8) | NV208F_CTRL_BUS_ECC_INJECTION_SUPPORTED_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_BUS_ECC_INJECTION_SUPPORTED_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV208F_CTRL_BUS_ECC_INJECTION_SUPPORTED_PARAMS { + NvU8 errorUnit; + NvBool bCorrectableSupported; + NvBool bUncorrectableSupported; +} NV208F_CTRL_BUS_ECC_INJECTION_SUPPORTED_PARAMS; + +/* _ctrl208fbus_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fclk.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fclk.h new file mode 100644 index 000000000..450d88182 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fclk.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fclk.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fdma.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fdma.h new file mode 100644 index 000000000..2fbdd3954 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fdma.h @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fdma.finn +// + + + + +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* + * NV208F_CTRL_CMD_DMA_IS_SUPPORTED_SPARSE_VIRTUAL + * + * This command checks whether or not "sparse" virtual address ranges are + * supported for a given chip. This API is intended for debug-use only. + * + * bIsSupported + * Whether or not "sparse" virtual address ranges are supported. + */ +#define NV208F_CTRL_CMD_DMA_IS_SUPPORTED_SPARSE_VIRTUAL (0x208f1401) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_DMA_INTERFACE_ID << 8) | NV208F_CTRL_DMA_IS_SUPPORTED_SPARSE_VIRTUAL_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_DMA_IS_SUPPORTED_SPARSE_VIRTUAL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV208F_CTRL_DMA_IS_SUPPORTED_SPARSE_VIRTUAL_PARAMS { + NvBool bIsSupported; +} NV208F_CTRL_DMA_IS_SUPPORTED_SPARSE_VIRTUAL_PARAMS; + +/* + * NV208F_CTRL_CMD_DMA_GET_VAS_BLOCK_DETAILS + * + * This command retrieves various details of the virtual address space block + * allocated from the virtual address space heap for the given virtual address. + * + * virtualAddress + * Virtual address to get information about. + * + * beginAddress + * Start address of the corresponding virtual address space block. + * + * endAddress + * End address (inclusive) of the corresponding virtual address space + * block. + * + * alignedAddress + * Aligned address of the corresponding virtual address space block. + * + * pageSize + * Page size of the virtual address space block. + * + * hVASpace + * Handle to an allocated VA space. If 0, it is assumed that the device's + * VA space should be used. + */ +#define NV208F_CTRL_CMD_DMA_GET_VAS_BLOCK_DETAILS (0x208f1402) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_DMA_INTERFACE_ID << 8) | NV208F_CTRL_DMA_GET_VAS_BLOCK_DETAILS_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_DMA_GET_VAS_BLOCK_DETAILS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV208F_CTRL_DMA_GET_VAS_BLOCK_DETAILS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 virtualAddress, 8); + NV_DECLARE_ALIGNED(NvU64 beginAddress, 8); + NV_DECLARE_ALIGNED(NvU64 endAddress, 8); + NV_DECLARE_ALIGNED(NvU64 alignedAddress, 8); + NvU32 pageSize; + NvHandle hVASpace; +} NV208F_CTRL_DMA_GET_VAS_BLOCK_DETAILS_PARAMS; + +/* _ctrl208fdma_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fevent.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fevent.h new file mode 100644 index 000000000..9cffa1a01 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fevent.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fevent.finn +// + + + + +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* NV20_SUBDEVICE_DIAG event-related control commands and parameters */ + +/* + * NV208F_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification state for the associated subdevice. + * This command requires that an instance of NV01_EVENT has been previously + * bound to the associated subdevice object. + * + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. This parameter must specify a valid + * NV208F_NOTIFIERS value (see cl208f.h for more details) and should + * not exceed one less NV208F_NOTIFIERS_MAXCOUNT. + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV208F_CTRL_SET_EVENT_NOTIFICATION_DISABLE + * This action disables event notification for the specified + * event for the associated subdevice object. + * NV208F_CTRL_SET_EVENT_NOTIFICATION_SINGLE + * This action enables single-shot event notification for the + * specified event for the associated subdevice object. + * NV208F_CTRL_SET_EVENT_NOTIFICATION_REPEAT + * This action enables repeated event notification for the specified + * event for the associated system controller object. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV208F_CTRL_CMD_EVENT_SET_NOTIFICATION (0x208f1001) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_EVENT_INTERFACE_ID << 8) | NV208F_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV208F_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; +} NV208F_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV208F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV208F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NV208F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +/* + * NV208F_CTRL_CMD_EVENT_SET_TRIGGER + * + * This command triggers a software event for the associated subdevice. + * This command accepts no parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV208F_CTRL_CMD_EVENT_SET_TRIGGER (0x208f1002) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_EVENT_INTERFACE_ID << 8) | 0x2" */ + +/* _ctrl208fevent_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208ffb.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208ffb.h new file mode 100644 index 000000000..e9a81f840 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208ffb.h @@ -0,0 +1,589 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208ffb.finn +// + + + + +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* + * NV208F_CTRL_CMD_FB_GET_INFO + * + * This command returns fb engine information for the associated GPU. + * The client specifies what information to query through 'index' param. + * On success, the information is stored in the 'data' param. + * + * index + * Specify what information to query. Please see below for valid values of + * indexes for this command. + * data + * On success, this param will hold the data that the client queried for. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ +#define NV208F_CTRL_CMD_FB_GET_INFO (0x208f0501) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FB_GET_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV208F_CTRL_FB_GET_INFO_PARAMS { + NvU32 index; + NV_DECLARE_ALIGNED(NvU64 data, 8); +} NV208F_CTRL_FB_GET_INFO_PARAMS; + +/* valid fb info index values */ +#define NV208F_CTRL_FB_INFO_INDEX_FREE_CONTIG_COMPRESSION_SIZE (0x00000001) + +#define NV208F_CTRL_FB_INFO_INDEX_MAX NV208F_CTRL_FB_INFO_INDEX_FREE_CONTIG_COMPRESSION_SIZE + +/* + * NV208F_CTRL_CMD_FB_GET_ZBC_REFCOUNT + * + * This command gets the ZBC reference count associated with a given + * compression tag address. It is not supported on GPUs which support class + * GF100_ZBC_CLEAR as it is specific to a different hardware implementation. + * + * compTagAddress + * The input parameter indicating the compression tag address for which the + * associated ZBC refcount should be looked up. + * zbcRefCount + * An array of reference counts for the ZBC clear values associated with + * compTagAddress. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV208F_CTRL_CMD_FB_GET_ZBC_REFCOUNT (0x208f0505) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | 0x5" */ // Deprecated, removed form RM + +#define NV208F_CTRL_FB_GET_ZBC_REFCOUNT_MAX_REFCOUNTS 2 +typedef struct NV208F_CTRL_FB_GET_ZBC_REFCOUNT_PARAMS { + NvU32 compTagAddress; + NvU32 zbcRefCount[NV208F_CTRL_FB_GET_ZBC_REFCOUNT_MAX_REFCOUNTS]; +} NV208F_CTRL_FB_GET_ZBC_REFCOUNT_PARAMS; + +/* + * NV208F_CTRL_CMD_FB_CTRL_GPU_CACHE + * + * This command controls the state of a cache which all GPU memory accesses go + * through. If supported, it allows changing of the power state and the write + * mode. This is only supported when NV_VERIF_FEATURES is defined. An error + * will be returned if the requested combination of settings is not possible. + * + * writeMode + * Specifies the write mode of the cache. Possible values are defined in + * NV208F_CTRL_FB_CTRL_GPU_CACHE_WRITE_MODE. Passing _DEFAULT means to + * maintain the current write mode. It is illegal to change the write mode + * while the cache is disabled or in the same call as a request to disable + * it. + * powerState + * Specifies the power state of the cache. Possible values are defined in + * NV208F_CTRL_FB_CTRL_GPU_CACHE_POWER_STATE. Passing _DEFAULT means + * to maintain the current power state. + * rcmState + * Specifies the reduced cache mode of the cache. Possible values are + * defined in NV208F_CTRL_FB_CTRL_GPU_CACHE_RCM_STATE. Passing _DEFAULT + * means to maintain the current RCM state. + * vgaCacheMode + * Specifies whether or not to enable VGA out-of-cache mode. Possible + * values are defined in NV208F_CTRL_FB_CTRL_GPU_CACHE_VGA_MODE. Passing + * _DEFAULT means to maintain the current VGA caching mode. + * cacheReset + * Triggers a hardware reset of the cache. Possible values are defined in + * NV208F_CTRL_FB_CTRL_GPU_CACHE_CACHE_RESET. Passing _DEFAULT does + * nothing while passing _RESET clears all data in the cache. + * flags + * Contains flags to control the details of how transitions should be + * handled. Possible values are defined in + * NV208F_CTRL_FB_CTRL_GPU_CACHE_FLAGS. Passing _DEFAULT for any of + * the fields means to use the defaults specified by the Resource Manager. + * Note not all options are available for all transitions. Flags that are + * set but not applicable will be silently ignored. + * bypassMode + * (Fermi only) Specifies the bypass mode of the L2 cache. Normal GPU + * operation is _DISABLE. For TEST ONLY, setting _ENABLE enables a debug + * mode where all transactions miss in L2 and no writes are combined, + * essentially disabling the caching feature of the L2 cache. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV208F_CTRL_CMD_FB_CTRL_GPU_CACHE (0x208f0506) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_CTRL_GPU_CACHE_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV208F_CTRL_FB_CTRL_GPU_CACHE_PARAMS { + NvU32 writeMode; + NvU32 powerState; + NvU32 rcmState; + NvU32 vgaCacheMode; + NvU32 cacheReset; + NvU32 flags; + NvU32 bypassMode; +} NV208F_CTRL_FB_CTRL_GPU_CACHE_PARAMS; + +/* valid values for writeMode */ +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_WRITE_MODE_DEFAULT (0x00000000) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_WRITE_MODE_WRITETHROUGH (0x00000001) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_WRITE_MODE_WRITEBACK (0x00000002) + +/* valid values for powerState */ +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_POWER_STATE_DEFAULT (0x00000000) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_POWER_STATE_ENABLED (0x00000001) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_POWER_STATE_DISABLED (0x00000002) + +/* valid values for rcmState */ +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_RCM_STATE_DEFAULT (0x00000000) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_RCM_STATE_FULL (0x00000001) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_RCM_STATE_REDUCED (0x00000002) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_RCM_STATE_ZERO_CACHE (0x00000003) + +/* valid values for vgaCacheMode */ +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_VGA_MODE_DEFAULT (0x00000000) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_VGA_MODE_ENABLED (0x00000001) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_VGA_MODE_DISABLED (0x00000002) + +/* valid values for cacheReset */ +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_CACHE_RESET_DEFAULT (0x00000000) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_CACHE_RESET_RESET (0x00000001) + +/* valid fields and values for flags */ +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_FLAGS_MODE 1:0 +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_FLAGS_MODE_DEFAULT (0x00000000) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_FLAGS_MODE_RM (0x00000001) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_FLAGS_MODE_PMU (0x00000002) + +/* valid values for bypassMode */ +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_BYPASS_MODE_DEFAULT (0x00000000) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_BYPASS_MODE_DISABLED (0x00000001) +#define NV208F_CTRL_FB_CTRL_GPU_CACHE_BYPASS_MODE_ENABLED (0x00000002) + + + +/* + * NV208F_CTRL_CMD_FB_SET_STATE + * + * This command is used to put fb engine in a state requested by the caller. + * + * state + * This parameter specifies the desired engine state: + * NV208F_CTRL_FB_SET_STATE_STOPPED + * This value stops/halts the fb engine. + * NV208F_CTRL_FB_SET_STATE_RESTART + * This value restarts fb from a stopped state. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV208F_CTRL_CMD_FB_SET_STATE (0x208f0508) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_SET_STATE_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FB_SET_STATE_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV208F_CTRL_FB_SET_STATE_PARAMS { + NvU32 state; +} NV208F_CTRL_FB_SET_STATE_PARAMS; + +/* valid values for state */ +#define NV208F_CTRL_FB_SET_STATE_STOPPED (0x00000000) +#define NV208F_CTRL_FB_SET_STATE_RESTART (0x00000001) + +/* + * NV208F_CTRL_CMD_GPU_ECC_SCRUB_DIAG + * + * This command reads all the settings internal to scrubbing (both asynchronous + * and synchronous. + * + * Currently implemented: FB offset scrubber has completed, FB offset that scrubber + * is completing to, whether or not asynchronous scrubbing is enabled. + * + * fbOffsetCompleted + * This is the offset into FB that the scrubber has completed up to at the + * time this function is invoked. Note that the scrubber is top-down. Therefore + * the memory that remains unscrubbed is from 0x0 to fbOffsetCompleted. + * + * fbEndOffset + * This is the offset of the base of the last block that ECC asynchronous + * scrubber has been tasked to scrub. + * + * bAsyncScru bDisabled + * This is NV_TRUE if asynchronous scrubbing is disabled and NV_FALSE if + * asynchronous scrubbing is enabled. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV208F_CTRL_CMD_FB_ECC_SCRUB_DIAG (0x208f0509) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_CMD_FB_ECC_SCRUB_DIAG_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_CMD_FB_ECC_SCRUB_DIAG_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV208F_CTRL_CMD_FB_ECC_SCRUB_DIAG_PARAMS { + NV_DECLARE_ALIGNED(NvU64 fbOffsetCompleted, 8); + NV_DECLARE_ALIGNED(NvU64 fbEndOffset, 8); + NvBool bAsyncScrubDisabled; +} NV208F_CTRL_CMD_FB_ECC_SCRUB_DIAG_PARAMS; + +/* + * NV208F_CTRL_CMD_GPU_ECC_ASYNCH_SCRUB_REGION + * + * This command launches the ECC scrubber in asynchronous mode. The scrubber, as + * in normal operation, will continue to operate until all of FB (excluding + * dedicated system memory) has been scrubbed. Like usual operation, scrubbing is + * only done on silicon. + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV208F_CTRL_CMD_FB_ECC_ASYNC_SCRUB_REGION (0x208f050a) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_CMD_FB_ECC_ASYNC_SCRUB_REGION_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_CMD_FB_ECC_ASYNC_SCRUB_REGION_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV208F_CTRL_CMD_FB_ECC_ASYNC_SCRUB_REGION_PARAMS { + NV_DECLARE_ALIGNED(NvU64 startBlock, 8); + NV_DECLARE_ALIGNED(NvU64 endBlock, 8); +} NV208F_CTRL_CMD_FB_ECC_ASYNC_SCRUB_REGION_PARAMS; + +/* + * NV208F_CTRL_CMD_GPU_ECC_ERROR_INFO + * This is a structure that is defined here for diag/debug purposes in mods. + * It is used to return the error information as part of the callback to + * kernel clients registering for SBE/DBE callbacks. + */ + +typedef struct NV208F_CTRL_CMD_FB_ECC_ERROR_INFO { + NvU32 row; + NvU32 bank; + NvU32 col; + NvU32 extBank; + NvU32 xbarAddress; + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); +} NV208F_CTRL_CMD_FB_ECC_ERROR_INFO; + + +#define NV208F_CTRL_CMD_FB_ECC_GET_FORWARD_MAP_ADDRESS (0x208f050c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_CMD_FB_ECC_GET_FORWARD_MAP_ADDRESS_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_CMD_FB_ECC_GET_FORWARD_MAP_ADDRESS_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV208F_CTRL_CMD_FB_ECC_GET_FORWARD_MAP_ADDRESS_PARAMS { + NV_DECLARE_ALIGNED(NvU64 pAddr, 8); + NvU32 row; + NvU32 bank; + NvU32 col; + NvU32 extBank; + NvU32 rank; + NvU32 sublocation; + NvU32 partition; + NvU32 writeKillPtr0; + NvU32 injectionAddr; + NvU32 injectionAddrExt; + NvU32 rbcAddress; + NvU32 rbcAddressExt; + NvU32 rbcAddressExt2; + NvU32 eccCol; +} NV208F_CTRL_CMD_FB_ECC_GET_FORWARD_MAP_ADDRESS_PARAMS; + +/* + * NV208F_CTRL_CMD_FB_ECC_SET_KILL_PTR + * + * This command sets the kill pointer for the specified DRAM address. If + * the kill pointer is set to NV208F_CTRL_FB_ERROR_TYPE_CORRECTABLE + * or NV208F_CTRL_FB_ERROR_TYPE_UNCORRECTABLE, accesses to the specified + * address will result in ECC errors until all kill pointers are unset + * using NV208F_CTRL_FB_ERROR_TYPE_NONE. + * + * Only one kill pointer can be set at a time. Setting a kill pointer will + * clear all currently set kill pointers and set the new kill pointer. + * Calling NV208F_CTRL_FB_ERROR_TYPE_NONE simply clears all currently set + * kill pointers. + * + * errorType + * The type of kill pointer to set. NV208F_CTRL_FB_ERROR_TYPE_CORRECTABLE + * will set a single kill pointer resulting in a correctable error. + * NV208F_CTRL_FB_ERROR_TYPE_UNCORRECTABLE will set both kill pointers + * resulting in an uncorrectable error. NV208F_CTRL_FB_ERROR_TYPE_NONE + * will clear all kill pointers, which stops the associated addresses + * from generating ECC errors if NV208F_CTRL_FB_ERROR_TYPE_CORRECTABLE + * or NV208F_CTRL_FB_ERROR_TYPE_UNCORRECTABLE was previously set. + * Only one kill pointer can be set at a time and setting a new + * kill pointer will clear the previous kill pointer. + * + * address + * The physical DRAM address to be targeted by the kill pointer + */ +#define NV208F_CTRL_CMD_FB_ECC_SET_KILL_PTR (0x208f050e) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_ECC_SET_KILL_PTR_PARAMS_MESSAGE_ID" */ + +typedef enum NV208F_CTRL_FB_ERROR_TYPE { + NV208F_CTRL_FB_ERROR_TYPE_CORRECTABLE = 0, + NV208F_CTRL_FB_ERROR_TYPE_UNCORRECTABLE = 1, + NV208F_CTRL_FB_ERROR_TYPE_NONE = 2, +} NV208F_CTRL_FB_ERROR_TYPE; + +#define NV208F_CTRL_FB_ECC_SET_KILL_PTR_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV208F_CTRL_FB_ECC_SET_KILL_PTR_PARAMS { + NV208F_CTRL_FB_ERROR_TYPE errorType; + NV_DECLARE_ALIGNED(NvU64 address, 8); +} NV208F_CTRL_FB_ECC_SET_KILL_PTR_PARAMS; + + + +/* + * NV208F_CTRL_CMD_FB_INJECT_LTC_ECC_ERROR + * + * This API allows a client to inject ECC errors in the L2. + * + * ltc: + * The physical LTC number to inject the error into. + * slice: + * THe physical slice number within the LTC to inject the error into. + * locationMask + * LTC location subtype(s) where error is to be injected. (Valid on Ampere and later) + * errorType + * Type of error to inject + * NV208F_CTRL_FB_ERROR_TYPE_CORRECTABLE for SBE. + * NV208F_CTRL_FB_ERROR_TYPE_UNCORRECTABLE for DBE. + * + */ +#define NV208F_CTRL_CMD_FB_INJECT_LTC_ECC_ERROR (0x208f050f) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_INJECT_LTC_ECC_ERROR_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FB_INJECT_LTC_ECC_ERROR_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV208F_CTRL_FB_INJECT_LTC_ECC_ERROR_PARAMS { + NvU8 ltc; + NvU8 slice; + NvU8 locationMask; + NV208F_CTRL_FB_ERROR_TYPE errorType; +} NV208F_CTRL_FB_INJECT_LTC_ECC_ERROR_PARAMS; + +/* + * NV208F_CTRL_CMD_FB_ECC_INJECTION_SUPPORTED + * + * Reports if error injection is supported for a given HW unit + * + * location [in]: + * The ECC protected unit for which ECC injection support is being checked. + * The location type is defined by NV208F_CTRL_FB_ECC_INJECTION_SUPPORTED_LOC. + * + * bCorrectableSupported [out]: + * Boolean value that shows if correcatable errors can be injected. + * + * bUncorrectableSupported [out]: + * Boolean value that shows if uncorrecatable errors can be injected. + * + * Return values: + * NV_OK on success + * NV_ERR_INVALID_ARGUMENT if the requested location is invalid. + * NV_ERR_INSUFFICIENT_PERMISSIONS if priv write not enabled. + * NV_ERR_NOT_SUPPORTED otherwise + * + * + */ +#define NV208F_CTRL_CMD_FB_ECC_INJECTION_SUPPORTED (0x208f0510) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_ECC_INJECTION_SUPPORTED_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FB_ECC_INJECTION_SUPPORTED_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV208F_CTRL_FB_ECC_INJECTION_SUPPORTED_PARAMS { + NvU8 location; + NvBool bCorrectableSupported; + NvBool bUncorrectableSupported; +} NV208F_CTRL_FB_ECC_INJECTION_SUPPORTED_PARAMS; + +#define NV208F_CTRL_FB_ECC_INJECTION_SUPPORTED_LOC 0:0 +#define NV208F_CTRL_FB_ECC_INJECTION_SUPPORTED_LOC_LTC (0x00000000) +#define NV208F_CTRL_FB_ECC_INJECTION_SUPPORTED_LOC_DRAM (0x00000001) + +/* + * NV208F_CTRL_CMD_FB_ECC_SET_WRITE_KILL + * + * This command sets the write kill for the specified DRAM address. If set, + * writes to the specified address won't update the ECC checkbits. When unset, + * writes the specified address will update the ECC checkbits. + * + * Only one write kill register can be set at a time. Setting a write kill + * will clear all currently set write kills and set the new write kill. + * Calling this ctrl call with setWriteKill = false simply clears all currently + * set write kills. + * + * setWriteKill + * When true, the ECC checkbits for the specified address won't update on + * writes. When false, the ECC checkbits for the specified address will + * revert to normal behavior and update on all writes. + * + * address + * The physical DRAM address to be targeted by the write kill + */ +#define NV208F_CTRL_CMD_FB_ECC_SET_WRITE_KILL (0x208f0511) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_ECC_SET_WRITE_KILL_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FB_ECC_SET_WRITE_KILL_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NV208F_CTRL_FB_ECC_SET_WRITE_KILL_PARAMS { + NvBool setWriteKill; + NV_DECLARE_ALIGNED(NvU64 address, 8); +} NV208F_CTRL_FB_ECC_SET_WRITE_KILL_PARAMS; + +/* + * NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO + * + * physicalAddress + * Physical address to be remapped + * source + * The reason for retirement. Valid values for this parameter are + * from NV2080_CTRL_FB_REMAPPED_ROW_SOURCE_* + * status + * Reason for row remapping failure. Valid values are: + * NV208F_CTRL_FB_REMAP_ROW_STATUS_OK + * No error + * NV208F_CTRL_FB_REMAP_ROW_STATUS_REMAPPING_PENDING + * The remapping is pending + * NV208F_CTRL_FB_REMAP_ROW_STATUS_TABLE_FULL + * Table full + * NV208F_CTRL_FB_REMAP_ROW_STATUS_ALREADY_REMAPPED + * Attempting to remap a reserved row + * NV208F_CTRL_FB_REMAP_ROW_STATUS_INTERNAL_ERROR + * Some other RM failure + */ +typedef struct NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO { + NV_DECLARE_ALIGNED(NvU64 physicalAddress, 8); + NvU8 source; + NvU32 status; +} NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO; + +/* valid values for status */ +#define NV208F_CTRL_FB_REMAP_ROW_STATUS_OK (0x00000000) +#define NV208F_CTRL_FB_REMAP_ROW_STATUS_REMAPPING_PENDING (0x00000001) +#define NV208F_CTRL_FB_REMAP_ROW_STATUS_TABLE_FULL (0x00000002) +#define NV208F_CTRL_FB_REMAP_ROW_STATUS_ALREADY_REMAPPED (0x00000003) +#define NV208F_CTRL_FB_REMAP_ROW_STATUS_INTERNAL_ERROR (0x00000004) + +#define NV208F_CTRL_FB_REMAPPED_ROWS_MAX_ROWS (0x00000200) +/* + * NV208F_CTRL_CMD_FB_REMAP_ROW + * + * This command will write entries to Inforom. During init the entries will be + * read and used to remap a row. + * + * addressList + * This input parameter is an array of NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO + * structures containing information used for row remapping. Valid entries + * are adjacent + * validEntries + * This input parameter specifies the number of valid entries in the + * address array + * numEntriesAdded + * This output parameter specifies how many validEntries were successfully + * added to the Inforom + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV208F_CTRL_CMD_FB_REMAP_ROW (0x208f0512) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_REMAP_ROW_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FB_REMAP_ROW_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV208F_CTRL_FB_REMAP_ROW_PARAMS { + NV_DECLARE_ALIGNED(NV208F_CTRL_FB_REMAPPING_ADDRESS_INFO addressList[NV208F_CTRL_FB_REMAPPED_ROWS_MAX_ROWS], 8); + NvU32 validEntries; + NvU32 numEntriesAdded; +} NV208F_CTRL_FB_REMAP_ROW_PARAMS; + + +#define NV208F_CTRL_CMD_FB_REVERSE_MAP_RBC_ADDR_TO_PA (0x208f0513) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_REVERSE_MAP_RBC_ADDR_TO_PA_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FB_REVERSE_MAP_RBC_ADDR_TO_PA_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV208F_CTRL_FB_REVERSE_MAP_RBC_ADDR_TO_PA_PARAMS { + NV_DECLARE_ALIGNED(NvU64 address, 8); + NvU32 rbcAddress; + NvU32 rbcAddressExt; + NvU32 rbcAddressExt2; + NvU32 partition; + NvU32 sublocation; +} NV208F_CTRL_FB_REVERSE_MAP_RBC_ADDR_TO_PA_PARAMS; + +/** + * NV208F_CTRL_CMD_FB_TOGGLE_PHYSICAL_ADDRESS_ECC_ON_OFF + * + * This command will convert a physical address when ECC is on to the physical + * address when ECC is off or vice versa + * + * @params[in] NvU64 inputAddress + * Input physical address + * + * @params[in] NvBool eccOn + * Whether or not input physical address is with ECC on or off + * + * @params[out] NvU64 outputAddress + * Output physical address + */ +#define NV208F_CTRL_CMD_FB_TOGGLE_PHYSICAL_ADDRESS_ECC_ON_OFF (0x208f0514) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_TOGGLE_PHYSICAL_ADDRESS_ECC_ON_OFF_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FB_TOGGLE_PHYSICAL_ADDRESS_ECC_ON_OFF_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV208F_CTRL_FB_TOGGLE_PHYSICAL_ADDRESS_ECC_ON_OFF_PARAMS { + NV_DECLARE_ALIGNED(NvU64 inputAddress, 8); + NvBool eccOn; + NV_DECLARE_ALIGNED(NvU64 outputAddress, 8); +} NV208F_CTRL_FB_TOGGLE_PHYSICAL_ADDRESS_ECC_ON_OFF_PARAMS; + +/* + * NV208F_CTRL_CMD_FB_CLEAR_REMAPPED_ROWS + * + * This command clears remapping entries from the Inforom's row remapping table. + * + * sourceMask + * This is a bit mask of NV2080_CTRL_FB_REMAPPED_ROW_SOURCE. Rows + * remapped from the specified sources will be cleared/removed from the + * Inforom RRL object entries list. + * + * Possbile status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV208F_CTRL_CMD_FB_CLEAR_REMAPPED_ROWS (0x208f0515) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID << 8) | NV208F_CTRL_FB_CLEAR_REMAPPED_ROWS_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FB_CLEAR_REMAPPED_ROWS_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV208F_CTRL_FB_CLEAR_REMAPPED_ROWS_PARAMS { + NvU32 sourceMask; +} NV208F_CTRL_FB_CLEAR_REMAPPED_ROWS_PARAMS; + +/* _ctrl208ffb_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208ffbio.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208ffbio.h new file mode 100644 index 000000000..ee7d3937b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208ffbio.h @@ -0,0 +1,252 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208ffbio.finn +// + + + + +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* + * NV208F_CTRL_CMD_FBIO_SETUP_TRAINING_EXP + * + * This command defines an fbio training experiment for later use. + * + * This command has the form of a 'cmd' operation descriminant + * followed by a union populated with an operand to match the 'cmd'. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_COMMAND + * + */ +#define NV208F_CTRL_CMD_FBIO_SETUP_TRAINING_EXP (0x208f0a03) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FBIO_INTERFACE_ID << 8) | NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_PARAMS_MESSAGE_ID" */ + +/* + * NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_NULL + * + * This command has no effect when used. + */ +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_NULL 0 + +/* + * NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_NUMBER_OF_MOD_SLOTS + * + * This command defines the number of register modification slots to be used + * in the setup phase of the pass/fail training exercise. Using this cmd + * causes any previously defined modification table to be cleared/released. + * The maximum size of the table is defined by + * _SETUP_FBIO_TRAINING_EXP_SET_NUMBER_OF_MOD_SLOTS__MAX + * Using the value of zero for number of mod slots is valid. "Mod slots" + * are defined below. + * + * Possible status values returned are: + * NV_ERR_INVALID_LIMIT + * if modSlots is > __MAX + * NV_ERR_INSUFFICIENT_RESOURCES + * if we're out of memory setting up the mod slot table + */ +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_NUMBER_OF_MOD_SLOTS 1 +typedef struct NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_NUMBER_OF_MOD_SLOTS_OPERANDS { + NvU32 modSlots; +} NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_NUMBER_OF_MOD_SLOTS_OPERANDS; + +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_NUMBER_OF_MOD_SLOTS__MAX 256 + +/* + * NV208F_CTRL_FBIO_SETUPFBIO_TRAINING_EXP_SET_MOD_SLOT + * + * This command is used to define each individual element of the the sequence + * of operations which will be performed before running the pass/fail training + * exercise. Note that this cmd is multi-cmd (with a null cmd all its own, + * etc). + * + * Possible status values returned are: + * NV_ERR_INVALID_INDEX + * if seq is out of range for previously set SET_NUMBER_OF_MOD_SLOTS + * operation. + * NV_ERR_INVALID_COMMAND + * if cmd isn't recognized + */ +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT 2 + +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_NULL 0 + +/* + * NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_SET_REGISTER + * + * This cmd is used to define a register transaction to be applied in + * sequence before running the pass/fail experiment. This is where + * NV_PFB_TRAINING_ADR, NV_PFB_TRAINING_CMD, NV_PFB_TRAINING_DP(i), + * NV_PFB_TRAINING_THRESHOLD and NV_PFB_TRAINING_MASK, etc. should be + * configured before calling back with cmd + * NV208F_CTRL_CMD_FBIO_RUN_TRAINING_EXP. + * + * reg + * This parameter specifies the BAR0 register offset to affect. + * andMask + * orMask + * These parameter specify the RMW values to be used in the following: + * write32(reg, (read32(reg) & andMask) | orMask) + */ +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_SET_REGISTER 1 +typedef struct NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_SET_REGISTER_OPERANDS { + NvU32 reg; + NvU32 andMask; + NvU32 orMask; +} NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_SET_REGISTER_OPERANDS; + +/* + * NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_DELAY + * + * This cmd is used to define a delay to be applied in the setup sequence + * before running the pass/fail experiment. + * + * usec + * Specifies delay to be used in microseconds. + */ + +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_DELAY 2 +typedef struct NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_DELAY_OPERANDS { + NvU32 usec; +} NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_DELAY_OPERANDS; + +/* + * NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_INIT_LT + * + * This cmd is used to define a point where normal link training initialization + * may be executed in the sequence before running the pass/fail experiment. + * In general, this is not needed since it is done during normal + * initialization, but does allow re-initialization during the sequence. + */ +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_INIT_LT 3 + +/* + * NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_RUN_LT + * + * This cmd is used to define a point where normal link training may be + * executed in the sequence before running the pass/fail experiment. + * In general, this is not needed since it is somewhat redundant. + */ +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_RUN_LT 4 + +/* + * NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_STOP_SEQ + * + * This cmd is used to define a point where the sequence stops without + * running the pass/fail experiment. + */ +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_STOP_SEQ 5 + + + +typedef struct NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_PARAMS { + NvU32 cmd; + /* C form: NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_OPERANDS op; */ + union { + + NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_SET_REGISTER_OPERANDS setRegister; + + NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_DELAY_OPERANDS delay; + } op; + NvU32 seq; +} NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_PARAMS; + + + +#define NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_PARAMS { + NvU32 cmd; + + /* C form: NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_OPERANDS op; */ + union { + + NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_NUMBER_OF_MOD_SLOTS_OPERANDS setNumberOfModSlots; + + NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_SET_MOD_SLOT_PARAMS setModSlot; + } op; +} NV208F_CTRL_FBIO_SETUP_TRAINING_EXP_PARAMS; + +/* + * NV208F_CTRL_CMD_FBIO_RUN_TRAINING_EXP + * + * This command runs the previously defined experiment and returns status on + * pass/fail. NV_OK is returned in the case of both pass + * and fail. + * + * The risingPasses and fallingPasses outputs represents the results across + * all partitions and bytelanes. Each array entry corresponds to a partition + * and the bits within each member represent the bytelane. So e.g.: + * risingPasses[4] represents the rising pass results for all of partition 4's + * byte lanes. + * + * The partitionsValid bitmask represents the partitions for which the results + * in risingPasses and fallingPasses are valid (not floorswept). + * + * The bytelanesValid bitmask represents the bytelanes for which the results + * are valid (available bytelanes). + * + * The failingDebug[] represent debug data for why (if so) a test fails. + * This is basically NV_PFB_TRAINING_DEBUG(i) + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_TIMEOUT + * if hardware doesn't respond to request in a timely manner. + * NV_ERR_INVALID_DATA + * if there was some sort of very weird data corruption issue. + */ +#define NV208F_CTRL_CMD_FBIO_RUN_TRAINING_EXP (0x208f0a04) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FBIO_INTERFACE_ID << 8) | NV208F_CTRL_FBIO_RUN_TRAINING_EXP_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FBIO_RUN_TRAINING_EXP_RESULT__SIZE 8 +#define NV208F_CTRL_FBIO_RUN_TRAINING_EXP_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV208F_CTRL_FBIO_RUN_TRAINING_EXP_PARAMS {NvU32 risingPasses[NV208F_CTRL_FBIO_RUN_TRAINING_EXP_RESULT__SIZE]; + NvU32 fallingPasses[NV208F_CTRL_FBIO_RUN_TRAINING_EXP_RESULT__SIZE]; + NvU32 failingDebug[NV208F_CTRL_FBIO_RUN_TRAINING_EXP_RESULT__SIZE]; + NvU32 partitionsValid; + NvU32 bytelanesValid; +} NV208F_CTRL_FBIO_RUN_TRAINING_EXP_PARAMS; + +#define NV208F_CTRL_CMD_FBIO_GET_TRAINING_CAPS (0x208f0a05) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FBIO_INTERFACE_ID << 8) | NV208F_CTRL_FBIO_GET_TRAINING_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FBIO_GET_TRAINING_CAPS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV208F_CTRL_FBIO_GET_TRAINING_CAPS_PARAMS { + NvU32 supported; +} NV208F_CTRL_FBIO_GET_TRAINING_CAPS_PARAMS; + +// _ctrl208ffbio_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208ffifo.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208ffifo.h new file mode 100644 index 000000000..2a7f3db42 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208ffifo.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208ffifo.finn +// + + + + +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* + * NV208F_CTRL_CMD_FIFO_CHECK_ENGINE_CONTEXT + * + * This command checks whether or not engine context exists for a given + * engine for the channel with a given channel ID. This API is intended + * for testing virtual context. For debug only. + * + * hChannel + * The handle to the channel. + * engine + * The engine ID. + * Valid values are: + * NV2080_ENGINE_TYPE_GRAPHICS + * exists + * The output are TRUE or FALSE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_ARGUMENT + */ +#define NV208F_CTRL_CMD_FIFO_CHECK_ENGINE_CONTEXT (0x208f0401) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FIFO_INTERFACE_ID << 8) | NV208F_CTRL_FIFO_CHECK_ENGINE_CONTEXT_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FIFO_CHECK_ENGINE_CONTEXT_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV208F_CTRL_FIFO_CHECK_ENGINE_CONTEXT_PARAMS { + NvHandle hChannel; + NvU32 engine; + NvBool exists; +} NV208F_CTRL_FIFO_CHECK_ENGINE_CONTEXT_PARAMS; + +/* + * NV208F_CTRL_CMD_FIFO_ENABLE_VIRTUAL_CONTEXT + * + * This command enables virtual context for a given channel (for all engines). + * This API is intended for testing virtual context. For debug only. + * + * hChannel + * The handle to the channel. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + */ +#define NV208F_CTRL_CMD_FIFO_ENABLE_VIRTUAL_CONTEXT (0x208f0402) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FIFO_INTERFACE_ID << 8) | NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS { + NvHandle hChannel; +} NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS; + +/* + * NV208F_CTRL_CMD_FIFO_GET_CHANNEL_STATE + * + * This command returns the fifo channel state for the given channel. + * This is for testing channel behavior. For debug only. + * + * hChannel + * The handle to the channel + * hClient + * The handle to the client + * bound + * The channel has been bound to channel RAM + * enabled + * The channel is able to run. + * scheduled + * The channel has been scheduled to run. + * cpuMap + * There is a cpu mapping available to this channel. + * contention + * The virtual channel is under contention + * runlistSet + * A runlist has been chosen for this channel + * deferRC + * An RC error has occurred, but recovery will occur at channel teardown. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_CHANNEL + */ +#define NV208F_CTRL_CMD_FIFO_GET_CHANNEL_STATE (0x208f0403) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FIFO_INTERFACE_ID << 8) | NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS { + NvHandle hChannel; + NvHandle hClient; + NvBool bBound; + NvBool bEnabled; + NvBool bScheduled; + NvBool bCpuMap; + NvBool bContention; + NvBool bRunlistSet; + NvBool bDeferRC; +} NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS; + +/* + * NV208F_CTRL_CMD_FIFO_GET_CONTIG_RUNLIST_POOL + * + * This command returns the location of the pool runlists are allocated from for + * WPR testing. + * For debug only. + * + * physAddr [out] + * Physical address of the pool + * + * size [out] + * Size in bytes of the pool + * + */ +#define NV208F_CTRL_CMD_FIFO_GET_CONTIG_RUNLIST_POOL (0x208f0404) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_FIFO_INTERFACE_ID << 8) | NV208F_CTRL_FIFO_GET_CONTIG_RUNLIST_POOL_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_FIFO_GET_CONTIG_RUNLIST_POOL_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV208F_CTRL_FIFO_GET_CONTIG_RUNLIST_POOL_PARAMS { + NV_DECLARE_ALIGNED(NvU64 physAddr, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); +} NV208F_CTRL_FIFO_GET_CONTIG_RUNLIST_POOL_PARAMS; + +/* _ctrl208ffifo_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h new file mode 100644 index 000000000..4b0f00e0a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgpu.h @@ -0,0 +1,164 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fgpu.finn +// + + + + +#include "ctrl/ctrl2080/ctrl2080gr.h" /* 208F is partially derivative of 2080 */ +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* + * NV208F_CTRL_CMD_GPU_GET_RAM_SVOP_VALUES + * + * This command can be used to get the RAM SVOP values. + * + * sp + * This field outputs RAM_SVOP_SP + * rg + * This field outputs RAM_SVOP_REG + * pdp + * This field outputs RAM_SVOP_PDP + * dp + * This field outputs RAM_SVOP_DP + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV208F_CTRL_CMD_GPU_GET_RAM_SVOP_VALUES (0x208f1101) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_GPU_INTERFACE_ID << 8) | 0x1" */ + +typedef struct NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS { + NvU32 sp; + NvU32 rg; + NvU32 pdp; + NvU32 dp; +} NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS; + +/* + * NV208F_CTRL_CMD_GPU_SET_RAM_SVOP_VALUES + * + * This command can be used to set the RAM SVOP values. + * + * sp + * Input for RAM_SVOP_SP + * rg + * Input for RAM_SVOP_REG + * pdp + * Input for RAM_SVOP_PDP + * dp + * Input for RAM_SVOP_DP + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV208F_CTRL_CMD_GPU_SET_RAM_SVOP_VALUES (0x208f1102) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_GPU_INTERFACE_ID << 8) | 0x2" */ + + + +/* + * NV208F_CTRL_CMD_GPU_VERIFY_INFOROM + * + * This command can be used by clients to determine if an InfoROM + * with a valid image is present. If the SKU in question does + * not feature an InfoROM, the NV_ERR_NOT_SUPPORTED + * error is returned. Else the RM attempts to read the ROM object + * and any objects listed in the ROM object. The checksum of + * each object read is verified. If all checksums are valid, the + * RM will report the InfoROM as being valid. If image is valid then + * RM will return a checksum for all of the dynamically configurable + * data in InfoROM. This checksum is expected to be same for all the + * boards with an identical InfoROM version and similarly configured. + * + * result + * The result of the InfoROM verification attempt. Possible + * values are: + * NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULTS_NONE + * This value indicates that a validation couldn't be done + * due to some software/OS related error. + * NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULTS_IO_ERROR + * This value indicates that a validation couldn't be done + * due to some IO error. + * NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULTS_VALID + * This value indicates that all InfoROM objects have valid + * checksum. + * NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULTS_INVALID + * This value indicates that some InfoROM objects have invalid + * checksum. + * checksum + * Checksum for all of the dynamically configurable data + * in InfoROM for e.g. PWR and CFG objects. + * + * NOTE: For the result values to be valid, return status should be: + * NV_OK + * + * Possible return status values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * + */ +#define NV208F_CTRL_CMD_GPU_VERIFY_INFOROM (0x208f1105) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_GPU_INTERFACE_ID << 8) | NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS { + NvU32 result; + NvU32 checksum; +} NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS; + +/* valid result values */ +#define NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULT_NONE (0x00000000) +#define NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULT_IO_ERROR (0x00000001) +#define NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULT_VALID (0x00000002) +#define NV208F_CTRL_GPU_INFOROM_VERIFICATION_RESULT_INVALID (0x00000003) + +/* + * NV208F_CTRL_CMD_GPU_DISABLE_ECC_INFOROM_REPORTING + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + + + +#define NV208F_CTRL_CMD_GPU_DISABLE_ECC_INFOROM_REPORTING (0x208f1107) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_GPU_INTERFACE_ID << 8) | 0x7" */ + + + +/* _ctrl208fgpu_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgr.h new file mode 100644 index 000000000..c5f7997b9 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fgr.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fgr.finn +// + + + + +#include "ctrl/ctrl208f/ctrl208fbase.h" + +#include "ctrl/ctrl2080/ctrl2080gr.h" + + + +/* + * NV208F_CTRL_CMD_GR_ECC_INJECT_ERROR + * + * Control command to inject a gr ecc error + * + * Parameters: + * + * location + * location index + * sublocation + * sublocation index + * unit + * unit to inject error into + * errorType + * SBE or DBE + * grRouteInfo + * Routing info for SMC + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV208F_CTRL_CMD_GR_ECC_INJECT_ERROR (0x208f1203) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_GR_INTERFACE_ID << 8) | NV208F_CTRL_GR_ECC_INJECT_ERROR_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_GR_ECC_INJECT_ERROR_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV208F_CTRL_GR_ECC_INJECT_ERROR_PARAMS { + NvU32 location; + NvU32 sublocation; + NvU8 unit; + NvU8 errorType; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV208F_CTRL_GR_ECC_INJECT_ERROR_PARAMS; + + + +/* + * NV208F_CTRL_CMD_GR_ECC_INJECTION_SUPPORTED + * + * Reports if error injection is supported for a given HW unit + * + * unit [in]: + * The ECC protected unit for which ECC injection support is being checked. + * + * bCorrectableSupported [out]: + * Boolean value that shows if correcatable errors can be injected. + * + * bUncorrectableSupported [out]: + * Boolean value that shows if uncorrecatable errors can be injected. + * + * Return values: + * NV_OK on success + * NV_ERR_INVALID_ARGUMENT if the requested unit is invalid. + * NV_ERR_INSUFFICIENT_PERMISSIONS if priv write not enabled. + * NV_ERR_NOT_SUPPORTED otherwise + * + * + */ +#define NV208F_CTRL_CMD_GR_ECC_INJECTION_SUPPORTED (0x208f1204) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_GR_INTERFACE_ID << 8) | NV208F_CTRL_GR_ECC_INJECTION_SUPPORTED_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_GR_ECC_INJECTION_SUPPORTED_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV208F_CTRL_GR_ECC_INJECTION_SUPPORTED_PARAMS { + NvU8 unit; + NvBool bCorrectableSupported; + NvBool bUncorrectableSupported; + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, 8); +} NV208F_CTRL_GR_ECC_INJECTION_SUPPORTED_PARAMS; + +/* _ctrl208fgr_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fmmu.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fmmu.h new file mode 100644 index 000000000..00cbaf0a7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fmmu.h @@ -0,0 +1,131 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fmmu.finn +// + + + + +#include "ctrl/ctrl208f/ctrl208fbase.h" + +/* + * NV208F_CTRL_CMD_MMU_ECC_INJECT_ERROR + * + * This ctrl call injects MMU ECC errors. Please see the confluence + * page "ECC" for more information on ECC and ECC injection: + * + * Parameters: + * + * location + * Only used for HSHUB. + * + * sublocation + * Only used for HSHUB. + * + * unit + * Specifies the MMU HW unit where the injection will occur. + * + * errorType + * Specifies whether the injected error will be correctable or uncorrectable. + * Correctable errors have no effect on running programs while uncorrectable + * errors will cause all channels to be torn down. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + */ +#define NV208F_CTRL_CMD_MMU_ECC_INJECT_ERROR (0x208f0b01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_MMU_INTERFACE_ID << 8) | NV208F_CTRL_MMU_ECC_INJECT_ERROR_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_MMU_ECC_INJECT_ERROR_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV208F_CTRL_MMU_ECC_INJECT_ERROR_PARAMS { + NvU32 location; + NvU32 sublocation; + NvU8 unit; + NvU8 errorType; +} NV208F_CTRL_MMU_ECC_INJECT_ERROR_PARAMS; + + + +/* + * NV208F_CTRL_CMD_MMU_ECC_INJECTION_SUPPORTED + * + * Reports if error injection is supported for a given HW unit + * + * unit [in]: + * The ECC protected unit for which ECC injection support is being checked. + * + * bCorrectableSupported [out]: + * Boolean value that shows if correcatable errors can be injected. + * + * bUncorrectableSupported [out]: + * Boolean value that shows if uncorrecatable errors can be injected. + * + * Return values: + * NV_OK on success + * NV_ERR_INVALID_ARGUMENT if the requested location is invalid. + * NV_ERR_INSUFFICIENT_PERMISSIONS if priv write not enabled. + * NV_ERR_NOT_SUPPORTED otherwise + * + */ +#define NV208F_CTRL_CMD_MMU_ECC_INJECTION_SUPPORTED (0x208f0b02) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_MMU_INTERFACE_ID << 8) | NV208F_CTRL_MMU_ECC_INJECTION_SUPPORTED_PARAMS_MESSAGE_ID" */ + + + +#define NV208F_CTRL_MMU_ECC_INJECTION_SUPPORTED_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV208F_CTRL_MMU_ECC_INJECTION_SUPPORTED_PARAMS { + NvU8 unit; + NvBool bCorrectableSupported; + NvBool bUncorrectableSupported; +} NV208F_CTRL_MMU_ECC_INJECTION_SUPPORTED_PARAMS; + +/* + * NV208F_CTRL_CMD_MMU_GET_NUM_HSHUBMMUS + * + * Returns the number of ECC Capable HSHUBMMUS. + * + * numHshubmmus [out] + * Number of Hshubmmus + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NV208F_CTRL_CMD_MMU_GET_NUM_HSHUBMMUS (0x208f0b03) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_DIAG_MMU_INTERFACE_ID << 8) | NV208F_CTRL_MMU_GET_NUM_HSHUBMMUS_PARAMS_MESSAGE_ID" */ + +#define NV208F_CTRL_MMU_GET_NUM_HSHUBMMUS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV208F_CTRL_MMU_GET_NUM_HSHUBMMUS_PARAMS { + NvU32 numHshubmmus; +} NV208F_CTRL_MMU_GET_NUM_HSHUBMMUS_PARAMS; + +/* _ctrl208fmmu_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fpmgr.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fpmgr.h new file mode 100644 index 000000000..d7ea299ac --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fpmgr.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fpmgr.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fpower.h b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fpower.h new file mode 100644 index 000000000..e225c7bba --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl208f/ctrl208fpower.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl208f/ctrl208fpower.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h b/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h new file mode 100644 index 000000000..7854a45e4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl30f1.h @@ -0,0 +1,1496 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl30f1.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV30_GSYNC_CTRL control commands and parameters */ + +#define NV30F1_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x30F1, NV30F1_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV30F1_CTRL_RESERVED (0x00) +#define NV30F1_CTRL_GSYNC (0x01) + +/* + * NV30F1_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV30F1_CTRL_CMD_NULL (0x30f10000) /* finn: Evaluated from "(FINN_NV30_GSYNC_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* NV30F1_CTRL_GSYNC + * + * Gsync board access/control functionality. + * + */ + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_VERSION + * + * This command will get the current gsync api version info. + * + * version + * The api's major version. Does not change often. + * + * revision + * The api's minor version. + * Bumped with each change, no matter how minor. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_VERSION (0x30f10101) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS { + NvU32 version; + NvU32 revision; +} NV30F1_CTRL_GSYNC_GET_VERSION_PARAMS; + +#define NV30F1_CTRL_GSYNC_API_VER 1 +#define NV30F1_CTRL_GSYNC_API_REV 0 + +/* + * NV30F1_CTRL_GSYNC api + * + * The gsync interface provides access to gsync devices in the system. + * + * There are commands: + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SIGNALS + * Status on input sync signals. + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS + * Get gsync parameters. + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS + * Get gsync parameters. + * NV30F1_CTRL_CMD_GSYNC_GET_INFO_CAPS + * Get basic info about the device and its connected displays + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC + * Enable frame sync. + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC + * Disable frame sync. + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS + * Get status info relevant for the control panel + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING + * Test signal enabling/disabling + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG + * Control the gsync watchdog + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_INTERLACE_MODE + * Set the interlace mode + * + */ + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SIGNALS + * + * This command returns information associated with incoming signals to the + * gsync device. + * + * RJ45 + * This parameter contains the signal information for each of the two RJ45 + * ports on the gsync device. A value of ~0 indicates that a signal is + * detected, but no rate information is available. Anything else is a rate + * in units of 10e-4 Hz. + * house + * This parameter contains the signal information for the house sync signal + * (i.e. the bnc port). A value of 0 means that no signal is present. A value + * of ~0 indicates that a signal is detected, but no rate information is + * available. Anything else is a rate in units of 10e-4 Hz. + * rate + * A mask representing signals for which we would like rate information (if + * available). + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + * + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SIGNALS (0x30f10102) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS { + NvU32 RJ45[2]; + NvU32 house; + NvU32 rate; +} NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_PARAMS; + +/* + * rate values + * + */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_RJ45_0 (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SIGNALS_RJ45_1 (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_SIGNALS_HOUSE (0x00000004) + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS + * + * These commands respectively return and set state related to the operation + * of the gsync device. + * + * which + * This value is a mask set by the client representing which parameters are + * to be updated. In the case of a GET command, these parameters will + * be used to change the state of the hardware. For both a GET + * command and a SET command, the hardware state will be returned by + * the RM in the respective parameter. All other parameters are untouched. + * syncPolarity + * This parameter specifies which edge of the house sync signal to sync with. + * videoMode + * This parameter specifies which video mode to use to decode the house sync + * signal. + * nSync + * This parameter specifies the number of pulses to wait between frame lock + * signal generation. 0 indicates that every incomming pulse should result in + * a frame lock sync pulse being generated (i.e. the input and output rate + * matches). + * syncSkew + * This parameter specifies the time delay between the frame sync signal and + * the GPUs signal in units of 0.977 us. Maximum value for SyncSkew is defined + * in respective header files of gsync boards. e.g. For P2060 board value + * is defined in drivers/resman/kernel/inc/dacp2060.h + * syncStartDelay + * In master mode, the amount of time to wait before generating the first + * sync pulse in units of 7.81 us, max 512 ms (i.e 65535 units). + * useHouseSync + * When a house sync signal is detected, this parameter indicates that it + * should be used as the reference to generate the frame sync signal. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS (0x30f10103) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS (0x30f10104) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS { + NvU32 which; + NvU32 syncPolarity; + NvU32 syncVideoMode; + NvU32 nSync; + NvU32 syncSkew; + NvU32 syncStartDelay; + NvU32 useHouseSync; +} NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS; +#define NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS_MESSAGE_ID (0x4U) + +typedef NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS; + +/* + * which values + * + */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY 0x0001 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE 0x0002 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_NSYNC 0x0004 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_SKEW 0x0008 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_START_DELAY 0x0010 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_USE_HOUSE 0x0020 + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_POLARITY NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE +#define NV30F1_CTRL_GSYNC_GET_CONTROL_NSYNC NV30F1_CTRL_GSYNC_SET_CONTROL_NSYNC +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_SKEW NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_SKEW +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_START_DELAY NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_START_DELAY +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_USE_HOUSE NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_USE_HOUSE + +/* + * syncPolarity values + * + */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_RISING_EDGE 0 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_FALLING_EDGE 1 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_BOTH_EDGES 2 + +/* + * syncVideoMode values + * Video_Mode_Composite is valid for P2060 only. + * + */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NONE 0 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_TTL 1 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NTSCPALSECAM 2 +#define NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_HDTV 3 + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_NONE NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NONE +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_TTL NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_TTL +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_NTSCPALSECAM NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NTSCPALSECAM +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_HDTV NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_HDTV +#define NV30F1_CTRL_GSYNC_GET_CONTROL_VIDEO_MODE_COMPOSITE 4 + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_CAPS + * + * This command returns the capabilities of this gsync device. + * + * revId + * This parameter is set by the RM to indicate the combined + * FPGA revision (low 4 bits) and board ID (high 4 bits). + * + * boardId + * This parameter is set by the RM to indicate the board ID, + * allowing disambiguation of P2060 and so forth. + * + * minRevRequired + * This parameter is set by the RM to indicate the minimum + * Qsync FPGA revision required for a specific CHIP Familiy + * + * isFirmwareRevMismatch + * This parameter is set to TRUE by RM when the Qsync Firmware + * Revision is incompatibled with the connected GPU chip family. + * + * revision + * This parameter is set by the RM to indicate the device revision, + * also known as major version. + * + * extendedRevision + * This parameter is set by the RM to indicate the device extended + * revision, also known as minor version. + * + * capFlags + * This parameter is set by the RM to indicate capabilities of + * the board, preventing the client from needing to keep track + * of the feature lists supported by each revision of each board. + * + * maxSyncSkew + * This parameter returns that maximum units of sync skew the + * board supports. The value prgrammed into the board has to be + * between 0 and maxSyncSkew, inclusive. The value of each unit + * can be learned from the syncSkewResolution parameter. + * + * syncSkewResolution + * This parameter returns the number of nanoseconds that one unit + * of sync skew corresponds to. + * + * maxStartDelay + * This parameter returns that maximum units of sync start delay + * the board supports. The value prgrammed into the board has to be + * between 0 and maxStartDelay, inclusive. The value of each unit + * can be learned from the startDelayResolution parameter. + * + * startDelayResolution + * This parameter returns the number of nanoseconds that one unit + * of sync start delay corresponds to. + * + * maxSyncInterval + * This parameter returns the maximum duration of house sync interval + * between frame lock sync cycle that the board supports. The value + * programmed into the board has to be between 0 and maxSyncInterval, + * inclusive. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_CAPS (0x30f10105) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS { + NvU32 revId; + NvU32 boardId; + NvU32 minRevRequired; + NvBool isFirmwareRevMismatch; + NvU32 revision; + NvU32 extendedRevision; + NvU32 capFlags; + NvU32 maxSyncSkew; + NvU32 syncSkewResolution; + NvU32 maxStartDelay; + NvU32 startDelayResolution; + NvU32 maxSyncInterval; +} NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS; + +#define NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2060 (0x00002060) +#define NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061 (0x00002061) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_2DPS (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_3DPS (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_4DPS (0x00000004) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_NEED_MASTER_BARRIER_WAR (0x00000010) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_SYNC_LOCK_EVENT (0x10000000) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_HOUSE_SYNC_EVENT (0x20000000) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FRAME_COUNT_EVENT (0x40000000) + +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_PRIMARY_CONNECTOR_EVENT (0x01000000) +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ALL_CONNECTOR_EVENT (0x02000000) + +// For P2060, clients can only request for video modes at BNC connector +// e.g. NO HS, TTL and Composite etc. +#define NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_GET_VIDEO_MODE (0x00100000) + +/* + * NV30F1_CTRL_CMD_GET_GSYNC_GPU_TOPOLOGY + * + * This command returns the list of GPU IDs connected with the associated + * gsync device. + * + * gpus + * This array is set by RM to contain the gpu connection information + * for gpus attached to the gsync device. Valid entries are contiguous, + * beginning with the first entry in the list. The elements of this array contain + * the following fields: + * gpuId + * This field contains the ID of the connected GPU. If the entry in the + * table is invalid, this fields contains NV30F1_CTRL_GPU_INVALID_ID. + * connector + * This field indicates which connector on the device the GPU is connected + * to (i.e. the primary or secondary connector), if any. + * proxyGpuId + * If the 'connector' field indicates that the GPU is not connected to + * a G-Sync device directly, then this field contains the ID of the + * GPU that acts as a proxy, i.e. the GPU to which this GPU should be + * a RasterLock slave. + * connectorCount + * This parameter indicates the number of GPU connectors available on + * the gsync device. The connector count of the gsync device may be + * less than NV30F1_CTRL_MAX_GPUS_PER_GSYNC. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV30F1_CTRL_CMD_GET_GSYNC_GPU_TOPOLOGY (0x30f10106) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_MAX_GPUS_PER_GSYNC 4 +#define NV30F1_CTRL_GPU_INVALID_ID (0xffffffff) + +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS { + struct { + NvU32 gpuId; + NvU32 connector; + NvU32 proxyGpuId; + } gpus[NV30F1_CTRL_MAX_GPUS_PER_GSYNC]; + NvU32 connectorCount; +} NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS; + +/* + * connector values + * + */ +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE 1 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_TWO 2 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_THREE 3 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_FOUR 4 + +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_NONE 0 +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PRIMARY NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE +#define NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_SECONDARY NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_TWO + + + + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC + * + * This command enables frame sync on displays. + * + * gpuId + * The parameter is set by the client to indicate the gpuId on which + * frame lock will be enabled. + * master + * This parameter is set by the client to specify whether this/these + * displays should be set as the master or as slaves. If this is a GET + * and displays is not 0, this will be set by the RM to indicate if + * the display can be the master. + * displays + * This is a device mask set by the client to indicate which display(s) + * are to be synched. Note that only one display may be set as master. + * If this is a GET, this set by the client to indicate which display + * is to be queried. If the display cannot be synched to this device, + * the RM will overwrite the mask with a 0. + * validateExternal + * This parameter is set by the client to tell the RM to validate the + * presence of an external sync source when enabling a master. + * refresh + * This parameter is set by the client to indicate the desired refresh rate + * The value is in 0.0001 Hertz (i.e. it has been multiplied by 10000). + * configFlags + * contains flags for specific options. So far only + * NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_CONFIG_FLAGS_KEEP_MASTER_SWAPBARRIER_DISABLED + * is supported which allows the caller to prevent the rm code to automatically + * enable the swapbarrier on framelock masters on fpga revisions <= 5. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SYNC (0x30f10110) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC (0x30f10111) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS_MESSAGE_ID" */ + +// If set the swapbarrier is not enable automatically when enablign a framelock master on fpga revs <= 5. +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_CONFIG_FLAGS_KEEP_MASTER_SWAPBARRIER_DISABLED (0x00000001) + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS { + NvU32 gpuId; + NvU32 master; + NvU32 displays; + NvU32 validateExternal; + NvU32 refresh; + NvU32 configFlags; +} NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS; +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS_MESSAGE_ID (0x11U) + +typedef NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC + * + * This command disables frame sync on displays + * + * gpuId + * The parameter is set by the client to indicate the gpuId on which + * frame lock will be disabled. + * master + * This parameter is set by the client to specify whether this/these + * display(s) to be unset is a master/are slaves. + * displays + * This is a device mask set by the client to indicate which display(s) + * are to be unsynched. + * retainMaster + * Retain the designation as master, but unsync the displays. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC (0x30f10112) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS { + NvU32 gpuId; + NvU32 master; + NvU32 displays; + NvU32 retainMaster; +} NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC + * + * This command gets the sync state for the gpus attached to the + * framelock device. Note that the frame lock device only has + * knowledge of sync status at the gpu level, not the display + * device level. + * + * gpuId + * The parameter is set by the client to indicate which gpuId is to be + * queried. + * bTiming + * This parameter is set by the RM to indicate that timing on the GPU is + * in sync with the master sync signal. + * bStereoSync + * This parameter is set by the RM to indicate whether the phase of the + * timing signal coming from the GPU is the same as the phase of the + * master sync signal. + * bSyncReady + * This parameter is set by the RM to indicate if a sync signal has + * been detected. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC (0x30f10113) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS { + NvU32 gpuId; + NvU32 bTiming; + NvU32 bStereoSync; + NvU32 bSyncReady; +} NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS; + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS + * + * This command gets status information for the frame lock device + * relevant to a control panel. + * + * which + * This is a mask set by the client describing which of the other + * parameters we should collect status information for. + * bLeadingEdge + * This parameter is set by the RM to indicate that the gsync device is + * set to sync to the leading edge of a house sync signal. Note that + * this does not mean that house sync is the signal source. + * bFallingEdge + * This parameter is set by the RM to indicate that the gsync device is + * set to sync to the falling edge of a house sync signal. Note that + * this does not mean that house sync is the signal source. + * syncDelay + * This parameter is set by the RM to indicate the sync delay in + * microseconds, + * refresh + * This parameter is set by the RM to indicate the rate of frame sync pulse in + * 0.0001 Hertz (i.e. it has been multiplied by 10000). This is not the refresh + * rate of display device. This is same as incoming house sync rate if + * framelocked to an external house sync signal. Otherwise, this is same + * as the refresh rate of the master display device. + * houseSyncIncomming + * This parameter is set by the RM to indicate the rate of an incomming + * house sync signal in 0.0001 Hertz (i.e. it has been multiplied by 10000). + * syncInterval + * This parameter is set by the RM to indicate the number of incoming + * sync pulses to wait before the generation of the frame sync pulse. + * bSyncReady + * This paramater is set by the RM to indicate if a sync signal has + * been detected (this parameter is also available from the + * NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC method). + * bSwapReady + * This paramater is set by the RM to indicate if the hardware is + * ready to swap. + * bHouseSync + * This parameter is set by the RM to indicate that a house sync signal + * should be used as the source signal if it is available. + * bPort0Input + * This parameter is set by the RM to indicate that RJ45 port 0 is + * configured as an input. + * bPort1Input + * This parameter is set by the RM to indicate that RJ45 port 1 is + * configured as an input + * bPort0Ehternet + * This parameter is set by the RM to indicate that RJ45 port 0 has + * been connected to an ethernet hub (this is not the right thing to do). + * bPort1Ehternet + * This parameter is set by the RM to indicate that RJ45 port 1 has + * been connected to an ethernet hub (this is not the right thing to do). + * universalFrameCount + * This parameter is set by the RM to indicate the value of the + * Universal frame counter. + * bInternalSlave + * This parameter is set by the RM to indicate that a p2061 has been + * configured as internal slave. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_STATUS (0x30f10114) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS { + NvU32 which; + NvU32 bLeadingEdge; + NvU32 bFallingEdge; + NvU32 syncDelay; + NvU32 refresh; + NvU32 houseSyncIncoming; + NvU32 syncInterval; + NvU32 bSyncReady; + NvU32 bSwapReady; + NvU32 bHouseSync; + NvU32 bPort0Input; + NvU32 bPort1Input; + NvU32 bPort0Ethernet; + NvU32 bPort1Ethernet; + NvU32 universalFrameCount; + NvU32 bInternalSlave; +} NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS; + +/* + * which values + * + */ + +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_POLARITY (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_LEADING_EDGE (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_FALLING_EDGE (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_DELAY (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_STATUS_REFRESH (0x00000004) +#define NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC_INCOMING (0x00000008) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_INTERVAL (0x00000010) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_READY (0x00000020) +#define NV30F1_CTRL_GSYNC_GET_STATUS_SWAP_READY (0x00000040) +#define NV30F1_CTRL_GSYNC_GET_STATUS_TIMING (0x00000080) +#define NV30F1_CTRL_GSYNC_GET_STATUS_STEREO_SYNC (0x00000100) +#define NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC (0x00000200) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT_INPUT (0x00000400) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_INPUT (0x00000400) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_INPUT (0x00000400) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT_ETHERNET (0x00000800) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_ETHERNET (0x00000800) +#define NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_ETHERNET (0x00000800) +#define NV30F1_CTRL_GSYNC_GET_STATUS_UNIVERSAL_FRAME_COUNT (0x00001000) +#define NV30F1_CTRL_GSYNC_GET_STATUS_INTERNAL_SLAVE (0x00002000) + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING + * + * This command controls the test signal on the gsync device. + * + * bEmitTestSignal + * This parameter is set by the client to emit or stop emitting the test + * signal. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_TESTING (0x30f10120) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_TESTING_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING (0x30f10121) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS { + NvU32 bEmitTestSignal; +} NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS; +#define NV30F1_CTRL_GSYNC_GET_CONTROL_TESTING_PARAMS_MESSAGE_ID (0x20U) + +typedef NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_TESTING_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG + * + * This command enables and disables the gsync watchdog + * + * enable + * This parameter is set by the client to enable or disable the + * gsync watchdog. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG (0x30f10130) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS { + NvU32 enable; +} NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS; + + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_INTERLACE_MODE + * + * This command enables or disables interlace mode. + * + * enable + * This parameter is set by the client to enable or disable + * interlace mode + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_INTERLACE_MODE (0x30f10140) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_INTERLACE_MODE (0x30f10141) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID (0x41U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS { + NvU32 enable; +} NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS; +#define NV30F1_CTRL_GSYNC_GET_CONTROL_INTERLACE_MODE_PARAMS_MESSAGE_ID (0x40U) + +typedef NV30F1_CTRL_GSYNC_SET_CONTROL_INTERLACE_MODE_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_INTERLACE_MODE_PARAMS; + +/* + * + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_BARRIER + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SWAP_BARRIER + * + * These commands enables or disables the swap barrier + * connection between a GPU and the rest of the gsync + * network + * + * gpuId + * The parameter is set by the client to indicate which gpuId is to be + * queried. + * enable + * In a set command, this parameter is set by the client to + * indicate if the barrier should be enabled (i.e. connected + * to the rest of the network) or disabled (disconnected). + * In both a set and a get command, if successful, the RM + * uses this parameter to return the current (i.e. post-set) + * value. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_BARRIER (0x30f10150) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SWAP_BARRIER (0x30f10151) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID (0x51U) + +typedef struct NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS { + NvU32 gpuId; + NvBool enable; +} NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS; +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_BARRIER_PARAMS_MESSAGE_ID (0x50U) + +typedef NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_BARRIER_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW + * + * This command allow clients to obtain time period for which SwapLock window will + * remain HIGH for GSYNC III (P2060) i.e. TswapRdyHi. RM clients will use this value + * for programming SWAP_LOCKOUT_START on all heads of GPU connected to P2060. + * + * tSwapRdyHi + * RM will return swap lock window High time period in this variable. By default + * tSwapRdyHi is 250 micro seconds. RM also provide regkey to change this value. + * tSwapRdyHi also used by RM to configure value of LSR_MIN_TIME while programming + * swap barrier. + * Client should consider tSwapRdyHi only for Gsync III (P2060) network. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW (0x30f10153) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS_MESSAGE_ID (0x53U) + +typedef struct NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS { + NvU32 tSwapRdyHi; +} NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS; + + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING + * + * This command allows the client to obtain suggested + * adjustments to vertical and horizontal timing values + * that will improve the ability of gsync to lock. + * + * gpuId + * This parameter is set by the client to indicate the + * gpuId of the GPU to which the display to be optimized + * is attached. + * display + * This parameter is not used by RM currently. + * Clients can ignore this parameter. Note that this + * parameter will be removed in future. + * output + * This parameter is set by the client to indicate the + * output resource type of the display to be optimized. + * For example, CRTs use DAC output, while DFPs use SOR + * (Serial Output Resource) type. + * protocol + * This parameter is set by the client to indicate the + * data protocol of output resource. For DAC displays, + * the format of the standard mode most closely matching + * the desired mode is used. For SOR display devices, + * the LVDS/TMDS/etc format is the protocol. + * structure + * This parameter is set by the client to indicate the + * raster structure of the mode, either progressive or + * interlaced. Diagrams of the raster structures are + * provided below. + * adjust + * This parameter is set by the client to specify which + * of the timing values, other than hTotal and vTotal, + * may be adjusted during optimization. + * If the client does not obtain instructions from the + * user about where adjustments should be applied, safe + * default values for progressive/interlaced modes are + * provided below. + * hTotal + * This parameter is set by the client to specify the + * initial Horizontal Pixel Total, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vTotal + * This parameter is set by the client to specify the + * initial Vertical Pixel Total, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * hBlankStart + * This parameter is set by the client to specify the + * initial Horizontal Blanking Start, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vBlankStart + * This parameter is set by the client to specify the + * initial Vertical Blanking Start, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * hBlankEnd + * This parameter is set by the client to specify the + * initial Horizontal Blanking End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vBlankEnd + * This parameter is set by the client to specify the + * initial Vertical Blanking End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vInterlacedBlankStart + * This parameter is set by the client to specify the + * initial Interlaced Vertical Blanking Start, from + * which the RM will begin optimizing. The RM will + * ignore this parameter for non-interlaced modes, as + * it has no meaning in those modes. In modes where + * it is meaningful, the RM also uses the parameter + * to return the optimized value. + * vInterlacedBlankEnd + * This parameter is set by the client to specify the + * initial Interlaced Vertical Blanking End, from + * which the RM will begin optimizing. The RM will + * ignore this parameter for non-interlaced modes, as + * it has no meaning in those modes. In modes where + * it is meaningful, the RM also uses the parameter + * to return the optimized value. + * hSyncEnd + * This parameter is set by the client to specify the + * initial Horizontal Raster Sync End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * vSyncEnd + * This parameter is set by the client to specify the + * initial Vertical Raster Sync End, from which the + * RM will begin optimizing. The RM also uses the + * parameter to return the optimized value. + * hDeltaStep + * This parameter is set by the client to specify the + * increments by which the Horizontal Pixel Total may + * be adjusted by the RM, during optimization. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * vDeltaStep + * This parameter is set by the client to specify the + * increments by which the vertical timings of each + * frame (in interlaced modes, each field) may be + * adjusted by the RM, during optimization. + * In interlaced modes, the adjustments to vTotal, + * vInterlacedBlankStart, and vInterlacedBlankEnd may + * be in increments of vDeltaStep or twice vDeltaStep, + * depending on where adjustments are made. + * In progressive modes, the adjustment to the vTotal + * will simply be in increments of vDeltaStep. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * hDeltaMax + * This parameter is set by the client to specify the + * maximum amount that the Horizontal Pixel Total may + * be adjusted by the RM, during optimization. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * vDeltaMax + * This parameter is set by the client to specify the + * maximum amount that vertical timings of each frame + * (in interlaced modes, each field) may be adjusted + * by the RM, during optimization. + * In interlaced modes, the adjustments to vTotal, + * vInterlacedBlankStart, and vInterlacedBlankEnd may + * be up to twice vDeltaMax. + * In progressive modes, the adjustment to the vTotal + * may simply be up to vDeltaMax. + * If the client does not obtain a custom value for + * this parameter from the user, setting all four of + * hDeltaStep, vDeltaStep, hDeltaMax, and vDeltaMax + * to zero will result in a safe default for all four. + * refreshX10K + * This parameter is set by the client to specify the + * desired refresh rate, multiplied by 10000. This + * allows refresh rate to be set in units of 0.0001 Hz. + * For example, a 59.94 Hz rate would be set as 599400. + * The client can alternatively specify a the + * pixelClockHz parameter (if the passed in refreshX10K + * parameter is set to 0, the pixelClockHz parameter + * will be used). + * pixelClockHz + * This parameter is set by the client to specify the + * desired pixel clock frequency in units of Hz. The + * client can alternatively specify the refreshX10K parameter. + * This parameter is returned by the RM to report the + * optimal pixel clock to use with the adjusted mode, + * in units of Hz. + * + * Progressive Raster Structure + * + * hSyncEnd hTotal + * 0 | hBlankEnd hBlankStart | + * | | | | | vSync vBlank + * 0--+--------------------------------------------+ +-+ | + * | Sync | | | + * vSyncEnd--| +----------------------------------------+ +-+ | + * | | Back Porch | | | + * vBlankEnd--| | +--------------------------------+ | | +-+ + * | | | Active Area | | | | + * | | | +------------------------+ | | | | + * | | | | | | | | | + * | S | B | A | | A | F | | | + * | y | a | c | | c | r | | | + * | n | c | t | | t | o | | | + * | c | k | i | | i | n | | | + * | | | v | | v | t | | | + * | | P | e | Output Viewport | e | | | | + * | | o | | | | P | | | + * | | r | A | | A | o | | | + * | | c | r | | r | r | | | + * | | h | e | | e | c | | | + * | | | a | | a | h | | | + * | | | | | | | | | + * | | | +------------------------+ | | | | + * | | | Active Area | | | | + * vBlankStart-| | +--------------------------------+ | | +-+ + * | | Front Porch | | | + * vTotal--+---+----------------------------------------+ +-+ | + * ___ + * / \________________________________________/ hSync + * ________ ____ + * \________________________________/ hBlank + * + * + * + * Interlaced Raster Structure + * + * hSyncEnd hTotal + * 0 | hBlankEnd hBlankStart | + * | | | | | vSync vBlank + * 0--+--------------------------------------------+ +-+ | + * | Sync | | | + * vSyncEnd--| +----------------------------------------+ +-+ | + * | | Back Porch | | | + * vBlankEnd--| | +--------------------------------+ | | +-+ + * | | | Active Area | | | | + * | | | +------------------------+ | | | | + * | | | | | | | | | + * | S | B | A | | A | F | | | + * | y | a | c | | c | r | | | + * | n | c | t | | t | o | | | + * | c | k | i | | i | n | | | + * | | | v | | v | t | | | + * | | P | e | Output Viewport | e | | | | + * | | o | | | | P | | | + * | | r | A | | A | o | | | + * | | c | r | | r | r | | | + * | | h | e | | e | c | | | + * | | | a | | a | h | | | + * | | | | | | | | | + * | | | +------------------------+ | | | | + * | | | Active Area | | | | + * vBlankStart-| | +--------------------------------+ | | +-+ + * | | | | | + * | | Front Porch +--------------------+ | | + * | | | | +-+ | + * | +-------------------+ | | | + * | | | | + * | Sync +--------------------+ | | + * | | | +-+ | + * | +-------------------+ | | | + * vInterlaced | | Back Porch | | | + * BlankEnd--| | +--------------------------------+ | | +-+ + * | | | Active Area | | | | + * | | | +------------------------+ | | | | + * | | | | | | | | | + * | S | B | A | | A | F | | | + * | y | a | c | | c | r | | | + * | n | c | t | | t | o | | | + * | c | k | i | | i | n | | | + * | | | v | | v | t | | | + * | | P | e | Output Viewport | e | | | | + * | | o | | | | P | | | + * | | r | A | | A | o | | | + * | | c | r | | r | r | | | + * | | h | e | | e | c | | | + * | | | a | | a | h | | | + * | | | | | | | | | + * | | | +------------------------+ | | | | + * vInterlaced | | | Active Area | | | | + * BlankStart-| | +--------------------------------+ | | +-+ + * | | Front Porch | | | + * vTotal--+---+----------------------------------------+ +-+ | + * ___ + * / \________________________________________/ hSync + * ________ ____ + * \________________________________/ hBlank + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * +*/ + +#define NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING (0x30f10160) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS_MESSAGE_ID (0x60U) + +typedef struct NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS { + NvU32 gpuId; + NvU32 display; + NvU32 output; + NvU32 protocol; + NvU32 structure; + NvU32 adjust; + NvU32 hDeltaStep; + NvU32 hDeltaMax; + NvU32 vDeltaStep; + NvU32 vDeltaMax; + NvU32 hSyncEnd; + NvU32 hBlankEnd; + NvU32 hBlankStart; + NvU32 hTotal; + NvU32 vSyncEnd; + NvU32 vBlankEnd; + NvU32 vBlankStart; + NvU32 vInterlacedBlankEnd; + NvU32 vInterlacedBlankStart; + NvU32 vTotal; + NvU32 refreshX10K; + NvU32 pixelClockHz; +} NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS; + +/* output values */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_DAC (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_SOR (0x00000004) + +/* protocol values for DAC displays (e.g. CRTs) */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_DAC_RGB_CRT (0x00000000) + +/* protocol values for SOR displays (e.g. DFPs) */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_LVDS_CUSTOM (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_A (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_B (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DUAL_TMDS (0x00000005) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_A (0x00000008) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_B (0x00000009) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_HDMI_FRL (0x0000000C) + +/* structure values */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED (0x00000001) + +/* adjust values */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_FRONT_PORCH (0x00000001) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_FRONT_PORCH (0x00000002) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_ACTIVE_AREA (0x00000004) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_ACTIVE_AREA (0x00000008) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_BACK_PORCH (0x00000010) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_BACK_PORCH (0x00000020) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_H_RASTER_SYNC (0x00000040) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_V_RASTER_SYNC (0x00000080) + +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_CRT (0x00000030) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_DFP (0x00000020) + +/* DeltaStep and DeltaMax values to trigger default settings */ +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_STEP_USE_DEFAULTS (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_MAX_USE_DEFAULTS (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_STEP_USE_DEFAULTS (0x00000000) +#define NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_MAX_USE_DEFAULTS (0x00000000) + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_EVENT_NOTIFICATION + * + * This command sets event notification state for the associated Gsync + * object. This command requires that an instance of NV01_EVENT has + * been previously bound to the associated Gsync object. + * + * If one or more of the "smart event notification" options are set in the + * action parameter, multiple sequential events of the same type will only + * trigger one notification. After that, only an event of a different type + * will trigger a new notification. + * + * action + * This member specifies the desired event notification action. + * Valid notification actions include: + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_DISABLE + * This action disables event notification for the associated + * Gsync object. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "sync loss" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "sync gained" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "stereo lost" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "stereo gained" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "house sync (BNC) plug in" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "house sync (BNC) plug out" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_GAIN + * This action enables smart event notification for the + * associated Gsync object, for "ethernet (RJ45) plug in" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_LOSS + * This action enables smart event notification for the + * associated Gsync object, for "ethernet (RJ45) plug out" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_COUNT_MATCH + * This action enables smart event notification for the + * associated Gsync object, for "frame counter match" events. + * NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_ALL + * This action enables smart event notification for the + * associated Gsync object, for any type of event. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_EVENT_NOTIFICATION (0x30f10170) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS_MESSAGE_ID (0x70U) + +typedef struct NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS { + NvU32 action; +} NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_PARAMS; + +/* valid action values */ + +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_LOSS (0x00000001) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_SYNC_GAIN (0x00000002) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_LOSS (0x00000004) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_STEREO_GAIN (0x00000008) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_GAIN (0x00000010) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_HOUSE_LOSS (0x00000020) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_GAIN (0x00000040) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_RJ45_LOSS (0x00000080) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_COUNT_MATCH (0x00000100) +#define NV30F1_CTRL_GSYNC_SET_EVENT_NOTIFICATION_ACTION_SMART_ALL (0x000001FF) + +#define NV30F1_CTRL_GSYNC_EVENT_TYPES 9 + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE + * NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE + * + * These commands can be used to get/set the stereo lock assistance mode of + * the GSYNC device. This is supported by GSYNC III device only. + * 1] In this mode the GSYNC recreates the hidden VS either by -> + * (a) using local stereo edge if stereo is toggle or + * (b) counting lines and generate the missing VS. + * 2] Master GSYNC card recreates the stereo and passes it along to + * the slave GSYNC cards. + * 3] Slave GSYNC cards generates the stereo raster sync structure to + * synchronize the GPU. + * 4] For stereo sync status reporting, under this mode, the GSYNC automatically + * reports stereo lock whenever it gets the master stereo signal. The + * assumption is local stereo will be in synced with the new structure. + * 5] If the slave GSYNC card does not observed master stereo for any reason, + * (a) it clears the stereo sync bit and + * (b) it generates its own version of stereo and sync the GPU. + * + * Parameters: + * gpuId + * This parameter is set by the client to indicate the gpuId on which + * the stereo lock mode should be enabled/disabled. + * + * enable + * In SET query, this parameter is set by the client to indicate whether + * RM should enable or disable stereo lock mode for GPU specified in gpuId. + * 1 and 0 indicates enable and disable stereo lock mode respectively. In + * GET query, RM will set this parameter to 1 or 0 depending on StereoLock + * mode is enabled or not respectively for specified GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE (0x30f10172) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE (0x30f10173) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID (0x72U) + +typedef struct NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS { + NvU32 gpuId; + NvU32 enable; +} NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS; +#define NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE_PARAMS_MESSAGE_ID (0x73U) + +typedef NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_STEREO_LOCK_MODE_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_READ_REGISTER + * + * This command is used to read raw i2c registers from the gsync device, via + * the given GPU (registers on the same gsync device may have different values + * depending on which GPU is used to do the read). + * + * This may only be used by a privileged client. + * + * Parameters: + * gpuId + * This parameter is set by the client to specify which GPU to use to + * perform the read. + * + * reg + * This parameter is set by the client to specify which i2c register to + * read. + * + * data + * This parameter is written by the RM and returned to the client upon a + * successful read. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NV30F1_CTRL_CMD_GSYNC_READ_REGISTER (0x30f10180) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS_MESSAGE_ID (0x80U) + +typedef struct NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS { + NvU32 gpuId; + NvU8 reg; + NvU8 data; +} NV30F1_CTRL_GSYNC_READ_REGISTER_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_WRITE_REGISTER + * + * This command is used to write raw i2c registers on the gsync device, via the + * given GPU (registers on the same gsync device may have different values + * depending on which GPU is used to do the write). + * + * This may only be used by a privileged client. + * + * Parameters: + * gpuId + * This parameter is set by the client to specify which GPU to use to + * perform the write. + * + * reg + * This parameter is set by the client to specify which i2c register to + * write. + * + * data + * This parameter is set by the client to specify what data to write to the + * given i2c register. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ + +#define NV30F1_CTRL_CMD_GSYNC_WRITE_REGISTER (0x30f10181) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS_MESSAGE_ID (0x81U) + +typedef struct NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS { + NvU32 gpuId; + NvU8 reg; + NvU8 data; +} NV30F1_CTRL_GSYNC_WRITE_REGISTER_PARAMS; + + + +/* + * NV30F1_CTRL_CMD_GSYNC_SET_LOCAL_SYNC + * + * This command enables/disables raster sync on displays i.e. + * mosaic groups between gpus. + * + * gpuTimingSource + * The parameter is set by the client to indicate the gpuId of the + * Timing Source gpu for specified mosaic group. + * gpuTimingSlaves[] + * This parameter is set by the client to indicate the gpuIds of the + * timing slave gpus for specified mosaic group. It should not contain + * more gpuids than slaveGpuCount. + * slaveGpuCount + * This parameter is set by the client to indicate the count of timing + * slave gpus under specified group. + * Referring to gsync3-P2060, slaveGpuCount can vary from 0x01 to 0x03 + * as maximum possible connected gpus are four and one gpu must be + * timing master for mosaic group. + * mosaicGroupNumber + * This parameter is set by the client to tell the RM to which mosaic + * group it should refer. + * Referring to gsync3-P2060, mosaicGroupNumber can contain 0x00 or + * 0x01 as only two mosaic groups are possible. + * enableMosaic + * This parameter is set by the client to indicate RM that whether RM + * should enable mosaic or disable mosaic. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_SET_LOCAL_SYNC (0x30f10185) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS_MESSAGE_ID (0x85U) + +typedef struct NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS { + NvU32 gpuTimingSource; + NvU32 gpuTimingSlaves[NV30F1_CTRL_MAX_GPUS_PER_GSYNC]; + NvU32 slaveGpuCount; + NvU32 mosaicGroupNumber; + NvBool enableMosaic; +} NV30F1_CTRL_GSYNC_SET_LOCAL_SYNC_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH + * + * This command configure GSYNC registers for pre-flash and post-flash + * operations. This is currenly used for GSYNC-3 (P2060) only. RM clients + * has to make sure that they perform both pre-flash and post-flash + * operations on GSYNC board. Avoiding, post-flash will cause mismatch + * between RM cached-data and GSYNC register values. + * + * Parameters: + * gpuId + * This parameter is set by the client to indicate the gpuId for which + * GSYNC board connected to that GPU will be configured for pre-flash + * or post-flash operation depending on preFlash value. + * + * preFlash + * This parameter is set by the client to indicate whether RM has to configure + * GSYNC registers and SW state for pre-flash or post-flash operation. Values + * 1 and 0 indicates that RM will configure GSYNC board for pre-flash and + * post-flash operations respectively. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH (0x30f10186) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS_MESSAGE_ID" */ + +#define NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS_MESSAGE_ID (0x86U) + +typedef struct NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS { + NvU32 gpuId; + NvU32 preFlash; +} NV30F1_CTRL_CMD_GSYNC_CONFIG_FLASH_PARAMS; + +/* + * NV30F1_CTRL_CMD_GSYNC_GET_HOUSE_SYNC_MODE + * NV30F1_CTRL_CMD_GSYNC_SET_HOUSE_SYNC_MODE + * + * These two commands gets/sets house sync mode as input or output. + * + * Parameters: + * houseSyncMode + * This parameter indicates whether the house sync mode is input or + * output. For GET_HOUSE_SYNC_MODE, the current mode will be written + * by RM and returned to the client; for SET_HOUSE_SYNC_MODE, the client + * will write the new mode value to this parameter and pass it to RM + * for execution. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_DEVICE + * NV_ERR_INVALID_STATE + * + */ +#define NV30F1_CTRL_CMD_GSYNC_GET_HOUSE_SYNC_MODE (0x30f10187) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | 0x87" */ + +#define NV30F1_CTRL_CMD_GSYNC_SET_HOUSE_SYNC_MODE (0x30f10188) /* finn: Evaluated from "(FINN_NV30_GSYNC_GSYNC_INTERFACE_ID << 8) | 0x88" */ + +typedef struct NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS { + NvU8 houseSyncMode; +} NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS; + + +#define NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_INPUT (0x00) +#define NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_OUTPUT (0x01) + +/* _ctrl30f1_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h b/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h new file mode 100644 index 000000000..928031296 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl402c.h @@ -0,0 +1,976 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl402c.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV40_I2C control commands and parameters */ +#define NV402C_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x402C, NV402C_CTRL_##cat, idx) + +/* I2C command categories (6 bits) */ +#define NV402C_CTRL_RESERVED (0x00) +#define NV402C_CTRL_I2C (0x01) + + +/* This field specifies the maximum regular port identifier allowed. */ +#define NV402C_CTRL_NUM_I2C_PORTS 16 +/* This temporary field specifies the dynamic port identifier. */ +#define NV402C_CTRL_DYNAMIC_PORT NV_U8_MAX + +/* + * NV402C_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV402C_CTRL_CMD_NULL (0x402c0000) /* finn: Evaluated from "(FINN_NV40_I2C_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED + * The port exists on this hardware. + * NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED + * The port has an entry in the DCB. + * NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL + * The port is used to read EDIDs via DDC. + * NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED + * The port is accessible via the CRTC register space. + * NV402C_CTRL_I2C_GET_PORT_INFO_VALID + * The port is validated using I2C device. + */ +#define NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED 0:0 +#define NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_IMPLEMENTED_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED 1:1 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DCB_DECLARED_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL 2:2 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL_ABSENT 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_DDC_CHANNEL_PRESENT 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED 3:3 +#define NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_CRTC_MAPPED_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_VALID 4:4 +#define NV402C_CTRL_I2C_GET_PORT_INFO_VALID_NO 0x00 +#define NV402C_CTRL_I2C_GET_PORT_INFO_VALID_YES 0x01 +#define NV402C_CTRL_I2C_GET_PORT_INFO_ALL 4:0 +#define NV402C_CTRL_I2C_GET_PORT_INFO_ALL_DEFAULT 0x00 + +/* + * NV402C_CTRL_CMD_I2C_GET_PORT_INFO + * + * Returns information for the first eight I2C ports. + * + * info + * This parameter is an output from the command and is ignored as an + * input. Each element contains the flags described previously named + * NV402C_CTRL_I2C_GET_PORT_INFO*. Note that the index into the info + * array is one less than the port identifier that would be returned from + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID; the port numbers here are + * 0-indexed as opposed to 1-indexed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + + + + +#define NV402C_CTRL_CMD_I2C_GET_PORT_INFO (0x402c0101) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS_MESSAGE_ID" */ + + + +#define NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS { + NvU8 info[NV402C_CTRL_NUM_I2C_PORTS]; +} NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS; +#define NV402C_CTRL_I2C_INDEX_LENGTH_MAX 4 +#define NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX 4096 + +//! Minimum and maximum valid read/write message length for block process protocol. +#define NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MIN 3 +#define NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX 32 + +/* + * NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE + * A client uses this field to indicate the I2C addressing mode to be + * used. + * Possible values are: + * NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_7BIT + * The default, this value specifies the master to operate in the + * basic 7-bit addressing mode, which is available on all + * implementations. + * NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_10BIT + * This I2C mode allows for 10 bits of addressing space and is + * reverse compatible with 7-bit addressing. + */ +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE 0:0 +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_7BIT (0x00000000) +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_10BIT (0x00000001) +#define NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_DEFAULT NV402C_CTRL_I2C_FLAGS_ADDRESS_MODE_7BIT +/* + * NV402C_CTRL_I2C_FLAGS_SPEED_MODE + * A client uses this field to indicate the target speed at which the + * I2C master should attempt to drive the bus. The master may throttle + * its own speed for various reasons, and devices may slow the bus + * using clock-streching. Neither of these possibilities are + * considered failures. + */ +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE 4:1 +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_100KHZ (0x00000000) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_200KHZ (0x00000001) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_400KHZ (0x00000002) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_33KHZ (0x00000003) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_10KHZ (0x00000004) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_3KHZ (0x00000005) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_DEFAULT (0x00000006) +#define NV402C_CTRL_I2C_FLAGS_SPEED_MODE_300KHZ (0x00000007) + + +/* + * NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE + * A client uses this field to specify a transaction mode. + * Possible values are: + * NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL + * The default, this value indicates to use the normal I2C transaction + * mode which will involve read/write operations depending on client's + * needs. + * NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_PING + * This value specifies that the device only needs to be pinged. No need + * of performing a complete read/write transaction. This will send a + * single byte to the device to be pinged. On receiving an ACK, we will + * get a confirmation on the device's availability. + */ +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE 11:10 +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL (0x00000000) +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_PING (0x00000001) +#define NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_DEFAULT NV402C_CTRL_I2C_FLAGS_TRANSACTION_MODE_NORMAL +/*! + * NV402C_CTRL_I2C_FLAGS_RESERVED + * A client must leave this field as 0, as it is reserved for future use. + */ +#define NV402C_CTRL_I2C_FLAGS_RESERVED 31:12 + +/*! + * The following defines specify WAR flags that can be specified during + * I2C Quick Read or Write command (Refer NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW). + * + * _NONE + * No workaround is needed. + * + * _TEST_PORT + * Use this flag to have the client sent a request to test a port instead + * of performing any transaction on it. Transaction type has to be + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW. + */ +#define NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS 0:0 +#define NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS_NONE 0x00000000 +#define NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS_TEST_PORT 0x00000001 + +/*! + * The following defines specify WAR flags that can be specified during + * I2C Register Read or Write buffer command + * (Refer NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW) + * + * _NONE + * No workaround is needed. + * + * _SI1930 + * SI1930 microcontroller register read or write requested by a client. + * Transaction type has to be NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW. + * + * _PX3540 + * Register read from PX3540 or PX3544 device. Transaction type has to be + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW and bWrite must be TRUE to + * indicate READ operation + */ +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS 1:0 +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_NONE 0x00000000 +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_SI1930 0x00000001 +#define NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_PX3540 0x00000002 + +/*! + * The following defines specify WAR flags that can be specified during + * I2C buffer Read or Write to Multibyte Register + * (Refer NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW) + * + * _NONE + * No workaround is needed. + * + * _NO_AUTO_INC + * This value specifies that the device does not support auto-increment. + * Most devices allow you to write multiple bytes after specifying a + * register address, and the subsequent bytes will go to incremented + * addresses. Without auto-increment, we write a buffer of data as a + * sequence of address-register-value triplets separated by starts. + */ +#define NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS 0:0 +#define NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_NONE 0x00000000 +#define NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_NO_AUTO_INC 0x00000001 + +/* + * NV402C_CTRL_CMD_I2C_SYNC + * + * Perform a basic I2C transaction synchronously. + * + * portId + * This field must be specified by the client to indicate the logical + * port/bus for which the transaction is requested. The port identifier + * is one less than the value returned by + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID unless that value was 0 (the + * 'dynamic' port). For the 'dynamic' port, this should be 0xFF. Note + * that future versions of the API may obsolete use of the 'dynamic' port; + * please contact the RM if you begin using this portion of the API so we + * can help you migrate when the time comes. + * + * bIsWrite + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + * + * flags + * This parameter specifies optional flags used to control certain modal + * features such as target speed and addressing mode. The currently + * defined fields are described previously; see NV402C_I2C_FLAGS_*. + * + * address + * The address of the I2C slave. The address should be shifted left by + * one. For example, the I2C address 0x50, often used for reading EDIDs, + * would be stored here as 0xA0. This matches the position within the + * byte sent by the master, as the last bit is reserved to specify the + * read or write direction. + * + * indexLength + * This required parameter specifies how many bytes to write as part of the + * first index. If zero is specified, then no index will be sent. + * + * index + * This parameter, required of the client if index is one or more, + * specifies the index to be written. The buffer should be arranged such + * that index[0] will be the first byte sent. + * + * messageLength + * This parameter, required of the client, specifies the number of bytes to + * read or write from the slave after the index is written. + * + * pMessage + * This parameter, required of the client, specifies the data to be written + * to the slave. The buffer should be arranged such that pMessage[0] will + * be the first byte read or written. If the transaction is a read, then + * it will follow the combined format described in the I2C specification. + * If the transaction is a write, the message will immediately follow the + * index without a restart. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_STATE_IN_USE + * NV_ERR_GENERIC, if the I2C transaction fails. + */ +#define NV402C_CTRL_I2C_INDEXED_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV402C_CTRL_I2C_INDEXED_PARAMS { + NvU8 portId; + NvU8 bIsWrite; + NvU16 address; + NvU32 flags; + + NvU32 indexLength; + NvU8 index[NV402C_CTRL_I2C_INDEX_LENGTH_MAX]; + + NvU32 messageLength; + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_INDEXED_PARAMS; + +#define NV402C_CTRL_CMD_I2C_INDEXED (0x402c0102) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_INDEXED_PARAMS_MESSAGE_ID" */ + +/* + * NV402C_CTRL_CMD_I2C_GET_PORT_SPEED + * + * Returns information for the I2C ports. + * + * portSpeed + * This parameter is an output from the command and is ignored as an + * input. Each element contains the current I2C speed of the port. + * Note that the index into the info array is one less than the + * port identifier that would be returned from + * NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID; the port numbers here are + * 0-indexed as opposed to 1-indexed. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS { + NvU32 portSpeed[NV402C_CTRL_NUM_I2C_PORTS]; +} NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS; + +#define NV402C_CTRL_CMD_I2C_GET_PORT_SPEED (0x402c0103) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS_MESSAGE_ID" */ + +/* + * NV402C_CTRL_I2C_DEVICE_INFO + * + * This structure describes the basic I2C Device information. + * + * type + * This field return the type of device NV_DCB4X_I2C_DEVICE_TYPE_ + * i2cAddress + * This field contains the 7 bit/10 bit address of the I2C device. + * i2cLogicalPort + * This field contains the Logical port of the I2C device. + */ +typedef struct NV402C_CTRL_I2C_DEVICE_INFO { + NvU8 type; + NvU16 i2cAddress; + NvU8 i2cLogicalPort; + NvU8 i2cDevIdx; +} NV402C_CTRL_I2C_DEVICE_INFO; + +/* Maximum number of I2C devices in DCB */ +#define NV402C_CTRL_I2C_MAX_DEVICES 32 + +/* + * NV402C_CTRL_CMD_I2C_TABLE_GET_DEV_INFO + * + * RM Control to get I2C device info from the DCB I2C Devices Table. + * + * i2cDevCount + * The value of this parameter will give the number of + * I2C devices found in DCB. + * + * i2cDevInfo + * For each device found in DCB the control call will write the info + * in this parameter. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS { + NvU8 i2cDevCount; + NV402C_CTRL_I2C_DEVICE_INFO i2cDevInfo[NV402C_CTRL_I2C_MAX_DEVICES]; +} NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS; + +#define NV402C_CTRL_CMD_I2C_TABLE_GET_DEV_INFO (0x402c0104) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS_MESSAGE_ID" */ + +/*! + * The IDs of each type of I2C command available. + */ +typedef enum NV402C_CTRL_I2C_TRANSACTION_TYPE { + /*! + * This transaction type is used to perform the Quick SMBus Read/write command + * on a slave device. No data is sent or received, just used to verify the + * presence of the device. + * Refer SMBus spec 2.0 (section 5.5.1 Quick Command) + * SMBus Quick Write : S Addr|Wr [A] P + * SMBus Quick Read : S Addr|Rd [A] P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW = 0, + /*! + * This transaction type is used to perform the I2C byte read/write from/to + * a slave device. As per the spec last byte should be NA (Not Acknolwedged) + * by slave. + * Refer I2CBus spec 3.0 (section 9 Fig 11 and Fig 12) or Refer SMBus spec + * 2.0 (section 5.5.2 Send Byte and 5.5.3 Receive Byte). + * I2C Byte Write : S Addr|Wr [A] Data [NA] P + * I2C Byte Read : S Addr|Rd [A] Data NA P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW = 1, + /*! + * This transaction type is used to perform the I2C block (buffer) + * read/write from/to a slave device. As per the spec last byte should be NA + * (Not Acknolwedged) by slave. + * Refer I2CBus spec 3.0 (section 9 Fig 11 and Fig 12) + * I2C Byte Write : S Addr|Wr [A] Data1 [A]...Data(N-1) [A] DataN [NA] P + * I2C Byte Read : S Addr|Rd [A] Data1 A...Data(N-1) A DataN NA P + * + * Distinction between I2C_BLOCK and SMBUS_BLOCK protocol: + * In I2C Block write it is the slave device (and in I2C Block read it's + * the master device) that determines the number of bytes to transfer by + * asserting the NAK at last bit before stop. This differs from the SMBus + * block mode write command in which the master determines the block + * write transfer size. In I2c Block read there is no limit to maximum size + * of data that could be transferred whereas in SMBus block it is restricted + * to 255 bytes (0xFF). + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW = 2, + /*! + * This transaction type is used to perform the I2C Buffer read/write + * from/to a register of a slave device. It does not send bytecount as + * part of data buffer. + * Not a part of SMBus spec. + * I2C Buffer Write : S Addr|Wr [A] cmd [A] Data1 [A]...DataN[A] P + * I2C Buffer Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] Data1 A... + * DataN-1 A DataN A P + * + * Distinction between SMBUS_BLOCK and I2C_BUFFER protocol: + * In SMBUS_BLOCK Read/write the first byte of data buffer contains the + * count size (The number of bytes to be transferred) and it is restricted + * to 255 bytes whereas in I2C_BUFFER, count size is not sent during the + * transfer and there is no restriction in terms of size. + * + * Distinction between I2C_BLOCK and I2C_BUFFER protocol: + * I2C_BUFFER takes the register address as argument whereas I2C_BLOCK does + * not have any register or command provision. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW = 3, + /*! + * This transaction type is used to perform the I2C byte read/write from/to + * a slave device + * Refer SMBus spec 2.0 (section 5.5.4 Write Byte and 5.5.5 Read Byte) + * SMBus Byte Write : S Addr|Wr [A] cmd [A] Data [A] P + * SMBus Byte Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] Data A P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW = 4, + /*! + * This transaction type is used to perform the SMBus byte read/write + * from/to a register of a slave device + * Refer SMBus spec 2.0 (section 5.5.4 Write Word and 5.5.5 Read Word) + * SMBus Word Write : S Addr|Wr [A] cmd [A] DataLow [A] DataHigh [A] P + * SMBus Word Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] DataLow A + * DataHigh A P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW = 5, + /*! + * This transaction type is used to perform the SMBus Block read/write + * from/to a register of a slave device + * Refer SMBus spec 2.0 (section 5.5.7 Block Write/Read) + * SMBus Block Write : S Addr|Wr [A] cmd [A] ByteCount [A] Data1 [A]... + * DataN-1 [A] DataN[A] P + * SMBus Block Read : S Addr|Wr [A] cmd [A] Sr Addr|Rd [A] ByteCount A + * Data1 A...DataN-1 A DataN A P + * + * Distinction between I2C_BLOCK and SMBUS_BLOCK protocol: + * In I2C Block write it is the slave device (and in I2C Block read it's + * the master device) that determines the number of bytes to transfer by + * asserting the NAK at last bit before stop. This differs from the SMBus + * block mode write/Read command in which the master determines the block + * write transfer size. In I2c Block read/Write there is no limit to maximum + * size of data that could be transferred whereas in SMBus block it is + * restricted to 255 bytes (0xFF). + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW = 6, + /*! + * This transaction type is used to perform the SMBus process call. It sends + * data and waits for the slave to return a value dependent on that data. + * The protocol is simply a SMBus write Word followed by a SMBus Read Word + * without the Read-Word command field and the Write-Word STOP bit. + * Note that there is no STOP condition before the repeated START condition, + * and that a NACK signifies the end of the read transfer. + * + * Refer SMBus spec 2.0 (section 5.5.6 Process Call) + * SMBus Process Call : S Addr|Wr [A] cmd [A] DataLow [A] DataHigh [A] + * Sr Addr|Rd [A] DataLow [A] DataHigh [NA] P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL = 7, + /*! + * This transaction type is used to perform the SMBus Block Write Block Read + * process call. + * The block write-block read process call is a two-part message. The call + * begins with a slave address and a write condition. After the command code + * the host issues a write byte count (M) that describes how many more bytes + * will be written in the first part of the message. + * If a master has 6 bytes to send, the byte count field will have the value + * 6 (0000 0110b), followed by the 6 bytes of data. The write byte count (M) + * cannot be zero. + * The second part of the message is a block of read data beginning with a + * repeated start condition followed by the slave address and a Read bit. + * The next byte is the read byte count (N), which may differ from the write + * byte count (M). The read byte count (N) cannot be zero. The combined data + * payload must not exceed 32 bytes. + * The byte length restrictions of this process call are summarized as + * follows: + * M >= 1 byte + * N >= 1 byte + * M + N <= 32 bytes + * Note that there is no STOP condition before the repeated START condition, + * and that a NACK signifies the end of the read transfer. + * + * Refer SMBus spec 2.0 (section 5.5.8 Block Write Block Read Process Call) + * SMBus Process Call : S Addr|Wr [A] cmd [A] ByteCount=M [A] Data1 [A]... + * DataN-1 [A] DataM[A] Sr Addr|Rd [A] ByteCount=N [A] + * Data1 [A]...DataN [NA] P + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL = 8, + /*! + * This transaction type is used to perform SMBus buffer read/write + * from/to multiple registers of a slave device known as Auto Increment. + * It is not a part of any standard I2C/SMBus spec but a feature of many + * SMBus devices like EEPROM. + * It is also used for reading a block of bytes from a designated register + * that is specified through the two Comm bytes.of a slave device or writing + * a block of bytes from a designated register of a slave device (Note : The + * command byte in this case could be 0, 2 or 4 Bytes) + * SMBus Multi-Byte Register Block Write : S Addr|Wr [A] cmd1 A cmd 2 [A]... + * cmdN [A] data1 [A] Data2 [A].....DataN [A] P + * SMBus Multi-Byte Register Block Read : S Addr|Rd [A] cmd1 A cmd 2 [A]... + * cmdN [A] data1 [A] Sr Addr [A] Data1 A Data2 A...DataN A P + * + * This transaction type could be also used for those devices which supports + * AUTO_INC. Even though it is frequently related to I2C/SMBus, automatic + * incrementation is not part of any I2C standard but rather a common + * feature found in many I2C devices. What it means is that the device + * maintains in internal pointer which is automatically incremented upon + * data read or write activities and which can be manually set to a fixed + * value. This comes in handy when storing larger amounts of data for + * instance in an ordinary I2C RAM or EEPROM. + * SMBus AUTO_INC Write : S Addr|Wr [A] cmd1 A Data1 [A] Data2 [A]... + * DataN [A] P + * SMBus AUTO_INC Read : S Addr|Rd [A] cmd1 A data1 [A] Sr Addr [A] Data1 A + * Data2 A...DataN A P + * If the device does not support AUTO_INC set warFlags of + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW to + * NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_NO_AUTO_INC. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW = 9, + /*! + * This transaction type is used to perform the EDID read via DDC. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC = 10, +} NV402C_CTRL_I2C_TRANSACTION_TYPE; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * Transaction specific flags + * (see NV402C_CTRL_I2C_SMBUS_QUICK_RW_WAR_FLAGS_*). + */ + NvU32 warFlags; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * The main message data. + */ + NvU8 message; +} NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * The address of the register. + */ + NvU8 registerAddress; + /*! + * The main message data. + */ + NvU8 message; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * The address of the register. + */ + NvU8 registerAddress; + /*! + * The main message data. + */ + NvU16 message; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BUFFER_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BUFFER_RW. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * Transaction specific flags to be set (see + * NV_NV402C_CTRL_I2C_BUFFER_RW_WAR_FLAGS_*) + */ + NvU32 warFlags; + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL { + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * The message data to be written to the slave. + */ + NvU16 writeMessage; + /*! + * The message data to be read from the slave. + */ + NvU16 readMessage; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL { + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to write the the slave + * after the writeByteCount is sent to the slave. + */ + NvU32 writeMessageLength; + /*! + * The message buffer to be written to the slave. + * C form: NvU8 writeMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX] + */ + NvU8 writeMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX]; + /*! + * This parameter specifies the number of bytes to read from the slave + * after the readByteCount is sent to the slave. + */ + NvU32 readMessageLength; + /*! + * The message buffer to be read from the slave. + * C form: NvU8 readMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX] + */ + NvU8 readMessage[NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX]; +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW. + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW { + /*! + * This field must be specified by the client to indicate whether the + * command is a write (TRUE) or a read (FALSE). + */ + NvBool bWrite; + /*! + * Transaction specific flags (see + * NV402C_CTRL_I2C_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW_WAR_FLAGS_*) + */ + NvU32 warFlags; + /*! + * This parameter specifies how many bytes to write as part of the + * register address. If zero is specified, then no index will be sent. + */ + NvU32 indexLength; + /*! + * Optional indexing data; aka register address. + * C form: NvU8 index[NV402C_CTRL_I2C_INDEX_LENGTH_MAX] + */ + NvU8 index[NV402C_CTRL_I2C_INDEX_LENGTH_MAX]; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC + * + * Specifies the structure of data filled by the client for I2C transaction + * when the.transaction type is NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC. + * + */ +typedef struct NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC { + /*! + * The segment number of the EDID block which is to be read. + */ + NvU8 segmentNumber; + /*! + * The address of the register. + */ + NvU8 registerAddress; + /*! + * This parameter specifies the number of bytes to read or + * write from the slave after the register address is written. + */ + NvU32 messageLength; + /*! + * The main message data. + * C form: NvU8 message[NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX] + */ + NV_DECLARE_ALIGNED(NvP64 pMessage, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC; + +/*! + * NV402C_CTRL_I2C_TRANSACTION_DATA + * + * This union encapsulates the transaction data corresponding to the + * transaction type enlisted above. + */ +typedef union NV402C_CTRL_I2C_TRANSACTION_DATA { + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW smbusQuickData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW i2cByteData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW i2cBlockData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW i2cBufferData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW smbusByteData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW smbusWordData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW smbusBlockData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL smbusProcessData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL. + */ + NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL smbusBlockProcessData; + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW smbusMultibyteRegisterData, 8); + /*! + * This transaction data is to be filled when transaction type is + * NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC edidData, 8); +} NV402C_CTRL_I2C_TRANSACTION_DATA; + + +/*! + * NV402C_CTRL_I2C_TRANSACTION_PARAMS + * + * The params data structure for NV402C_CTRL_CMD_I2C_TRANSACTION. + */ +#define NV402C_CTRL_I2C_TRANSACTION_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV402C_CTRL_I2C_TRANSACTION_PARAMS { + /*! + * The logical port ID. + */ + NvU8 portId; + /*! + * This parameter specifies optional flags used to control certain modal + * features such as target speed and addressing mode. The currently + * defined fields are described previously; see NV402C_CTRL_I2C_FLAGS_* + */ + NvU32 flags; + /*! + * The address of the I2C slave. + */ + NvU16 deviceAddress; + /*! + * The transaction type. + */ + NV402C_CTRL_I2C_TRANSACTION_TYPE transType; + /*! + * The transaction data corresponding transaction type. + */ + NV_DECLARE_ALIGNED(NV402C_CTRL_I2C_TRANSACTION_DATA transData, 8); +} NV402C_CTRL_I2C_TRANSACTION_PARAMS; + +#define NV402C_CTRL_CMD_I2C_TRANSACTION (0x402c0105) /* finn: Evaluated from "(FINN_NV40_I2C_I2C_INTERFACE_ID << 8) | NV402C_CTRL_I2C_TRANSACTION_PARAMS_MESSAGE_ID" */ + + +/* _ctrl402c_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl503c.h b/src/common/sdk/nvidia/inc/ctrl/ctrl503c.h new file mode 100644 index 000000000..6f3af2f0e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl503c.h @@ -0,0 +1,203 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl503c.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl503c/ctrl503cbase.h" + +/* + * NV503C_CTRL_CMD_REGISTER_VA_SPACE + * + * This command registers the specified GPU VA space with the given + * NV50_THIRD_PARTY_P2P object, and returns a token that + * uniquely identifies the VA space within the object's parent + * client. + * + * Its parameter structure has the following fields: + * + * hVASpace + * This field specifies the GPU VA space to be registered + * with the third-party P2P object. + * + * vaSpaceToken + * Upon successful completion of the regristration attempt, + * this field holds the new VA space identifier. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INVALID_OBJECT_HANDLE + * NVOS_STATUS_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV503C_CTRL_CMD_REGISTER_VA_SPACE (0x503c0102) /* finn: Evaluated from "(FINN_NV50_THIRD_PARTY_P2P_P2P_INTERFACE_ID << 8) | NV503C_CTRL_REGISTER_VA_SPACE_PARAMS_MESSAGE_ID" */ + +#define NV503C_CTRL_REGISTER_VA_SPACE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV503C_CTRL_REGISTER_VA_SPACE_PARAMS { + NvHandle hVASpace; + NV_DECLARE_ALIGNED(NvU64 vaSpaceToken, 8); +} NV503C_CTRL_REGISTER_VA_SPACE_PARAMS; + + +/* + * NV503C_CTRL_CMD_UNREGISTER_VA_SPACE + * + * This command unregisters (a previously registered) GPU VA space. + * + * Its parameter structure has the following field: + * + * hVASpace + * This field specifies the GPU VA space to be + * unregistered. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV503C_CTRL_CMD_UNREGISTER_VA_SPACE (0x503c0103) /* finn: Evaluated from "(FINN_NV50_THIRD_PARTY_P2P_P2P_INTERFACE_ID << 8) | NV503C_CTRL_UNREGISTER_VA_SPACE_PARAMS_MESSAGE_ID" */ + +#define NV503C_CTRL_UNREGISTER_VA_SPACE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV503C_CTRL_UNREGISTER_VA_SPACE_PARAMS { + NvHandle hVASpace; +} NV503C_CTRL_UNREGISTER_VA_SPACE_PARAMS; + + +/* + * NV503C_CTRL_CMD_REGISTER_VIDMEM + * + * This command registers a video memory allocation with the given + * NV50_THIRD_PARTY_P2P object. Registration of video memory + * allocations is required if they are to be made accessible via the + * third-party P2P infrastructure. + * + * The vidmem allocation is made available to the users of the third-party P2P + * APIs. It's exposed at the range specified by address and size starting at the + * specified offset within the physical allocation. The same physical memory is + * exposed as the NvRmMapMemoryDma() API would make accessible to the GPU if + * used with equivalent parameters. Notably this API doesn't create any virtual + * mappings nor verifies that any mappings are present, it only registers the + * memory for the purpose of the third-party P2P infrastructure. + * + * The address range specified by address and size cannot overlap any previously + * registered ranges for the given NV50_THIRD_PARTY_P2P object. + * + * Its parameter structure has the following field: + * + * hMemory + * This field specifies the video memory allocation to be + * registered with the third-party P2P object. + * + * address + * The address to register the video memory allocation at. Has to be + * aligned to 64K. + * + * size + * Size in bytes, has to be non-0 and aligned to 64K. Offset + size cannot + * be larger than the vidmem allocation. + * + * offset + * Offset within the video memory allocation where the registered address + * range starts. Has to be aligned to 64K. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV503C_CTRL_CMD_REGISTER_VIDMEM (0x503c0104) /* finn: Evaluated from "(FINN_NV50_THIRD_PARTY_P2P_P2P_INTERFACE_ID << 8) | NV503C_CTRL_REGISTER_VIDMEM_PARAMS_MESSAGE_ID" */ + +#define NV503C_CTRL_REGISTER_VIDMEM_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV503C_CTRL_REGISTER_VIDMEM_PARAMS { + NvHandle hMemory; + NV_DECLARE_ALIGNED(NvU64 address, 8); + NV_DECLARE_ALIGNED(NvU64 size, 8); + NV_DECLARE_ALIGNED(NvU64 offset, 8); +} NV503C_CTRL_REGISTER_VIDMEM_PARAMS; + + +/* + * NV503C_CTRL_CMD_UNREGISTER_VIDMEM + * + * This command unregisters (a previously registered) video memory + * allocation. + * + * Its parameter structure has the following field: + * + * hMemory + * This field specifies the video memory allocation to be + * unregistered. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV503C_CTRL_CMD_UNREGISTER_VIDMEM (0x503c0105) /* finn: Evaluated from "(FINN_NV50_THIRD_PARTY_P2P_P2P_INTERFACE_ID << 8) | NV503C_CTRL_UNREGISTER_VIDMEM_PARAMS_MESSAGE_ID" */ + +#define NV503C_CTRL_UNREGISTER_VIDMEM_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV503C_CTRL_UNREGISTER_VIDMEM_PARAMS { + NvHandle hMemory; +} NV503C_CTRL_UNREGISTER_VIDMEM_PARAMS; + +/* + * NV503C_CTRL_CMD_REGISTER_PID + * + * This command registers the PID of the process that allocated + * the RM client identified by the hClient argument with the + * third-party P2P object, granting this process access to any + * underlying video memory. + * + * Its parameter structure has the following field: + * + * hClient + * This field specifies the client id and should be the handle + * to a valid NV01_ROOT_USER instance. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCES + * NV_ERR_INVALID_ARGUMENT + */ +#define NV503C_CTRL_CMD_REGISTER_PID (0x503c0106) /* finn: Evaluated from "(FINN_NV50_THIRD_PARTY_P2P_P2P_INTERFACE_ID << 8) | NV503C_CTRL_REGISTER_PID_PARAMS_MESSAGE_ID" */ + +#define NV503C_CTRL_REGISTER_PID_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV503C_CTRL_REGISTER_PID_PARAMS { + NvHandle hClient; +} NV503C_CTRL_REGISTER_PID_PARAMS; + +/* _ctrl503c_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl503c/ctrl503cbase.h b/src/common/sdk/nvidia/inc/ctrl/ctrl503c/ctrl503cbase.h new file mode 100644 index 000000000..ee7d8b60c --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl503c/ctrl503cbase.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl503c/ctrl503cbase.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV50_THIRD_PARTY_P2P control commands and parameters */ +#define NV503C_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x503C,NV503C_CTRL_##cat,idx) + +/* NV50_THIRD_PARTY_P2P command categories (6bits) */ +#define NV503C_CTRL_RESERVED (0x00) +#define NV503C_CTRL_P2P (0x01) + +/* + * NV503C_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV503C_CTRL_CMD_NULL (0x503c0000) /* finn: Evaluated from "(FINN_NV50_THIRD_PARTY_P2P_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl503cbase_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl506f.h b/src/common/sdk/nvidia/inc/ctrl/ctrl506f.h new file mode 100644 index 000000000..4297ac870 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl506f.h @@ -0,0 +1,101 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl506f.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV50_GPFIFO control commands and parameters */ + +#define NV506F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x506F, NV506F_CTRL_##cat, idx) + +/* NV50_GPFIFO command categories (6bits) */ +#define NV506F_CTRL_RESERVED (0x00) +#define NV506F_CTRL_GPFIFO (0x01) +#define NV506F_CTRL_EVENT (0x02) + +/* + * NV506F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV506F_CTRL_CMD_NULL (0x506f0000) /* finn: Evaluated from "(FINN_NV50_CHANNEL_GPFIFO_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* + * NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL + * + * This command resets a channel which was isolated previously by RC recovery. + * + * exceptType + * This input parameter specifies the type of RC error that occurred. See the + * description of the ROBUST_CHANNEL_* values in nverror.h for valid exceptType + * values. info32 field of the error notifier is set with the exceptType when + * the error notifier is signaled. + * + * engineID + * This input parameter specifies the engine to be reset. See the description + * of the NV2080_ENGINE_TYPE values in cl2080.h for valid engineID values. info16 + * field of the error notifier is set with the engineID when the error notifier is + * signaled. + * + * Possible status return values are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + + + + +#define NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL (0x506f0105) /* finn: Evaluated from "(FINN_NV50_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID << 8) | NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS_MESSAGE_ID" */ + +/* + * NV506F_CTRL_CMD_EVENT_SET_TRIGGER (deprecated on Fermi+) + * + * This command triggers a software event for the associated channel. + * This command accepts no parameters. + * + * Possible status values returned are: + * NV_OK + */ +// #define NV506F_CTRL_CMD_EVENT_SET_TRIGGER NV506F_CTRL_CMD(EVENT, 0x09) + +#define NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS { + NvU32 exceptType; + NvU32 engineID; +} NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS;/* _ctrl506f.h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h new file mode 100644 index 000000000..f795e866d --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070base.h @@ -0,0 +1,70 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070base.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV5070_DISPLAY control commands and parameters */ + +#define NV5070_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x5070, NV5070_CTRL_##cat, idx) + +/* Display command categories (6bits) */ +#define NV5070_CTRL_RESERVED (0x00) +#define NV5070_CTRL_CHNCTL (0x01) +#define NV5070_CTRL_RG (0x02) +#define NV5070_CTRL_SEQ (0x03) +#define NV5070_CTRL_OR (0x04) +#define NV5070_CTRL_INST (0x05) +#define NV5070_CTRL_VERIF (0x06) +#define NV5070_CTRL_SYSTEM (0x07) +#define NV5070_CTRL_EVENT (0x09) + +/* + * NV5070_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV5070_CTRL_CMD_NULL (0x50700000) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + +// This struct must be the first member of all +// 5070 control calls +typedef struct NV5070_CTRL_CMD_BASE_PARAMS { + NvU32 subdeviceIndex; +} NV5070_CTRL_CMD_BASE_PARAMS; + +/* _ctrl5070base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h new file mode 100644 index 000000000..fb40cb8bc --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070chnc.h @@ -0,0 +1,1184 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070chnc.finn +// + + + + +#include "ctrl/ctrl5070/ctrl5070base.h" +#include "ctrl5070common.h" +#include "nvdisptypes.h" + +#define NV5070_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD 2 + + + +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_NONE (0x00000000) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI (NVBIT(0)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF (NVBIT(1)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA (NVBIT(2)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK (NVBIT(3)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK (NVBIT(4)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY (NVBIT(5)) +#define NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT (NVBIT(6)) + +#define NV5070_CTRL_IDLE_CHANNEL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV5070_CTRL_IDLE_CHANNEL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + + NvU32 desiredChannelStateMask; + NvU32 accelerators; // For future expansion. Not yet implemented + NvU32 timeout; // For future expansion. Not yet implemented + NvBool restoreDebugMode; +} NV5070_CTRL_IDLE_CHANNEL_PARAMS; + +/* + * NV5070_CTRL_CMD_STOP_OVERLAY + * + * This command tries to turn the overlay off ASAP. + * + * channelInstance + * This field indicates which of the two instances of the overlay + * channel the cmd is meant for. + * + * notifyMode + * This field indicates the action RM should take once the overlay has + * been successfully stopped. The options are (1) Set a notifier + * (2) Set the notifier and generate and OS event + * + * hNotifierCtxDma + * Handle to the ctx dma for the notifier that must be written once + * overlay is stopped. The standard NvNotification notifier structure + * is used. + * + * offset + * Offset within the notifier context dma where the notifier begins + * Offset must be 16 byte aligned. + * + * hEvent + * Handle to the event that RM must use to awaken the client when + * notifyMode is WRITE_AWAKEN. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT: Invalid notify mode + * NV_ERR_INVALID_CHANNEL: When the overlay is unallocated + * NV_ERR_INVALID_OWNER: Callee isn't the owner of the channel + * NV_ERR_INVALID_OBJECT_HANDLE: Notif ctx dma not found + * NV_ERR_INVALID_OFFSET: Bad offset within notif ctx dma + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_TIMEOUT: RM timedout waiting to inject methods + */ +#define NV5070_CTRL_CMD_STOP_OVERLAY (0x50700102) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_STOP_OVERLAY_NOTIFY_MODE_WRITE (0x00000000) +#define NV5070_CTRL_CMD_STOP_OVERLAY_NOTIFY_MODE_WRITE_AWAKEN (0x00000001) + +#define NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelInstance; + NvU32 notifyMode; + NvHandle hNotifierCtxDma; + NvU32 offset; + NV_DECLARE_ALIGNED(NvP64 hEvent, 8); +} NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS; + + + +/* + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE + * + * This command is used by DD to determine whether or not a given mode + * is possible given the current nvclk, mclk, dispclk and potentially some + * other parameters that are normally hidden from it. All the parameters + * except IsPossible (output), Force422(output), MinPstate (input/output), + * minPerfLevel (output), CriticalWatermark (output), worstCaseMargin (output), + * and worstCaseDomain (output) params are supplied by the caller. + * + * HeadActive + * Whether or not the params for this head are relevant. + * + * PixelClock + * Frequency: Pixel clk frequency in KHz. + * Adj1000Div1001: 1000/1001 multiplier for pixel clock. + * + * RasterSize + * Width: Total width of the raster. Also referred to as HTotal. + * Height: Total height of the raster. Also referred to as VTotal. + * + * RasterBlankStart + * X: Start of horizontal blanking for the raster. + * Y: Start of vertical blanking for the raster. + * + * RasterBlankEnd + * X: End of horizontal blanking for the raster. + * Y: End of vertical blanking for the raster. + * + * RasterVertBlank2 + * YStart: Start of second blanking for second field for an + * interlaced raster. This field is irrelevant when raster is + * progressive. + * YEnd: End of second blanking for second field for an + * interlaced raster. This field is irrelevant when raster is + * progressive. + * + * Control + * RasterStructure: Whether the raster ir progressive or interlaced. + * + * OutputScaler + * VerticalTaps: Vertical scaler taps. + * HorizontalTaps: Horizontal scaler taps. + * Force422: Whether OutputScaler is operating in 422 mode or not. + * + * ViewportSizeOut + * Width: Width of output viewport. + * Height: Height of output viewport. + * Both the above fields are irrelevant for G80. + * + * ViewportSizeOutMin + * Width: Minimum possible/expected width of output viewport. + * Height: Minimum possible/expected height of output viewport. + * + * ViewportSizeIn + * Width: Width of input viewport. + * Height: Height of input viewport. + * + * Params + * Format: Core channel's pixel format. See the enumerants following + * the variable declaration for possible options. + * SuperSample: Whether to use X1AA or X4AA in core channel. + * This parameter is ignored for G80. + * + * BaseUsageBounds + * Usable: Whether or not the base channel is expected to be used. + * PixelDepth: Maximum pixel depth allowed in base channel. + * SuperSample: Whether or not X4AA is allowed in base channel. + * BaseLutUsage: Base LUT Size + * OutputLutUsage: Output LUT size + * + * OverlayUsageBounds + * Usable: Whether or not the overlay channel is expected to be used. + * PixelDepth: Maximum pixel depth allowed in overlay channel. + * OverlayLutUsage: Overlay LUT Size + * + * BaseLutLo + * Enable: Specifies Core Channel's Base LUT is enable or not. + * Mode: Specifies the LUT Mode. + * NeverYieldToBase: Specifies whether NEVER_YIELD_TO_BASE is enabled or not. + * + * OutputLutLo + * Enable: Specifies Core Channel's Output LUT is enable or not. + * Mode: Specifies the LUT Mode. + * NeverYieldToBase: Specifies whether NEVER_YIELD_TO_BASE is enabled or not. + * + * outputResourcePixelDepthBPP + * Specifies the output pixel depth with scaler mode. + * + * CriticalWatermark + * If MinPState is set to _NEED_MIN_PSTATE, this will return the critical + * watermark level at the minimum Pstate. Otherwise, this will return + * the critical watermark at the level that the IMP calculations are + * otherwise performed at. + * + * pixelReplicateMode + * Specifies the replication mode whether it is X2 or X4. Need to set the parameter + * to OFF if there is no pixel replication. + * + * numSSTLinks + * Number of Single Stream Transport links which will be used by the + * SOR. "0" means to use the number indicated by the most recent + * NV0073_CTRL_CMD_DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST call. + * + * RequestedOperation + * This parameter is used to determine whether + * 1. DD is simplying querying whether or not the specified mode is + * possible (REQUESTED_OPER = _QUERY) or + * 2. DD is about to set the specified mode and RM should make + * appropriate preparations to make the mode possible. DD should + * never pass in a mode that was never indicated by RM as possible + * when DD queried for the possibility of the mode. This + * corresponds to REQUESTED_OPER = _PRE_MODESET. + * 3. DD just finished setting the specified mode. RM can go ahead + * and make changes like lowering the perf level if desired. This + * corresponds to REQUESTED_OPER = _POST_MODESET. This parameter is + * useful when we are at a higher perf level in a mode that's not + * possible at a lower perf level and want to go to a mode that is + * possible even at a lower perf level. In such cases, lowering + * perf level before modeset is complete is dangerous as it will + * cause underflow. RM will wait until the end of modeset to lower + * the perf level. + * + * options + * Specifies a bitmask for options. + * NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN + * Tells IMP to calculate worstCaseMargin and worstCaseDomain. + * + * IsPossible + * This is the first OUT param for this call. It indicates whether + * or not the current mode is possible. + * + * MinPState + * MinPState is an IO (in/out) variable; it gives the minimum p-state + * value at which the mode is possible on a PStates 2.0 system if the + * parameter is initialized by the caller with _NEED_MIN_PSTATE. If + * _NEED_MIN_PSTATE is not specified, IMP query will just run at the + * max available perf level and return results for that pstate. + * + * If the minimum pstate is required, then MasterLockMode, + * MasterLockPin, SlaveLockMode, and SlaveLockPin must all be + * initialized. + * + * On a PStates 3.0 system, the return value for MinPState is + * undefined, but minPerfLevel can return the minimum IMP v-pstate. + * + * minPerfLevel + * On a PStates 3.0 system, minPerfLevel returns the minimum IMP + * v-pstate at which the mode is possible. On a PStates 2.0 system, + * minPerfLevel returns the minimum perf level at which the mode is + * possible. + * + * minPerfLevel is valid only if MinPState is initialized to + * _NEED_MIN_PSTATE. + * + * worstCaseMargin + * Returns the ratio of available bandwidth to required bandwidth, + * multiplied by NV5070_CTRL_IMP_MARGIN_MULTIPLIER. Available + * bandwidth is calculated in the worst case bandwidth domain, i.e., + * the domain with the least available margin. Bandwidth domains + * include the IMP-relevant clock domains, and possibly other virtual + * bandwidth domains such as AWP. + * + * Note that IMP checks additional parameters besides the bandwidth + * margins, but only the bandwidth margin is reported here, so it is + * possible for a mode to have a more restrictive domain that is not + * reflected in the reported margin result. + * + * This result is not guaranteed to be valid if the mode is not + * possible. + * + * Note also that the result is generally calculated for the highest + * pstate possible (usually P0). But if _NEED_MIN_PSTATE is specified + * with the MinPState parameter, the result will be calculated for the + * min possible pstate (or the highest possible pstate, if the mode is + * not possible). + * + * The result is valid only if + * NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN is set in + * "options". + * + * worstCaseDomain + * Returns a short text string naming the domain for the margin + * returned in "worstCaseMargin". See "worstCaseMargin" for more + * information. + * + * bUseCachedPerfState + * Indicates that RM should use cached values for the fastest + * available perf level (v-pstate for PStates 3.0 or pstate for + * PStates 2.0) and dispclk. This feature allows the query call to + * execute faster, and is intended to be used, for example, during + * mode enumeration, when many IMP query calls are made in close + * succession, and perf conditions are not expected to change between + * query calls. When IMP has not been queried recently, it is + * recommended to NOT use cached values, in case perf conditions have + * changed and the cached values no longer reflect the current + * conditions. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_GENERIC + * + * Assumptions/Limitations: + * - If the caller sends any methods to alter the State Cache, before calling of + * the following functions: + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY_USE_SC + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET_USE_SC + * NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET_USE_SC + * the caller must repeatedly issue NV5070_CTRL_CMD_GET_CHANNEL_INFO, and delay until the + * returned channelState is either: + * NV5070_CTRL_CMD_GET_CHANNEL_INFO_STATE_IDLE, + * NV5070_CTRL_CMD_GET_CHANNEL_INFO_STATE_WRTIDLE, or + * NV5070_CTRL_CMD_GET_CHANNEL_INFO_STATE_EMPTY. + * This ensures that all commands have reached the State Cache before RM reads + * them. + * + * + */ +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE (0x50700109) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY_USE_SC (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET_USE_SC (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET_USE_SC (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_SUPERVISOR (0x00000007) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_IS_POSSIBLE_NO (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_IS_POSSIBLE_YES (0x00000001) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_UNDEFINED (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P0 (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P1 (0x00000002) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P2 (0x00000004) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P3 (0x00000008) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P8 (0x00000100) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P10 (0x00000400) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P12 (0x00001000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P15 (0x00008000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_MAX NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P15 +#define NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE (0x10101010) +#define NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE_DEFAULT (0x00000000) + +#define NV5070_CTRL_IMP_MARGIN_MULTIPLIER (0x00000400) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_HEAD_ACTIVE_NO (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_HEAD_ACTIVE_YES (0x00000001) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_DISPLAY_ID_SKIP_IMP_OUTPUT_CHECK (0xAAAAAAAA) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_DEFAULT (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 (0x00000002) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 (0x00000003) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 (0x00000004) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 (0x00000005) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 (0x00000006) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 (0x00000007) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 (0x00000008) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 (0x00000009) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PIXEL_CLOCK_ADJ1000DIV1001_NO (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PIXEL_CLOCK_ADJ1000DIV1001_YES (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_PROGRESSIVE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_INTERLACED (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_1 (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_2 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_3 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_3_ADAPTIVE (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_VERTICAL_TAPS_5 (0x00000004) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_HORIZONTAL_TAPS_1 (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_HORIZONTAL_TAPS_2 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_SCALER_HORIZONTAL_TAPS_8 (0x00000002) + +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_SCALER_FORCE422_MODE_DISABLE (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_SCALER_FORCE422_MODE_ENABLE (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_I8 (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_VOID16 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_VOID32 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8 (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A2B10G10R10 (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8B8G8R8 (0x00000006) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_R5G6B5 (0x00000007) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A1R5G5B5 (0x00000008) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_SUPER_SAMPLE_X1AA (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_SUPER_SAMPLE_X4AA (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_NO (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_YES (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_8 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_16 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_32 (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64 (0x00000004) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_X1AA (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_X4AA (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_BASE_LUT_USAGE_NONE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_BASE_LUT_USAGE_257 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_BASE_LUT_USAGE_1025 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_OUTPUT_LUT_USAGE_NONE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_OUTPUT_LUT_USAGE_257 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_OUTPUT_LUT_USAGE_1025 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_NO (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_YES (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_USE_CURRENT (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_16 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32 (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_64 (0x00000003) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_NONE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_257 (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_OVERLAY_LUT_USAGE_1025 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_ENABLE_ENABLE (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_LORES (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_HIRES (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000006) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000007) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000001) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_LORES (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000002) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000003) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000004) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000005) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000006) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000007) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_DISABLE (0x00000000) +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OUTPUT_LUT_LO_NEVER_YIELD_TO_BASE_ENABLE (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X4 (0x00000002) + +#define NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + struct { + NvU32 HeadActive; + struct { + NvU32 Frequency; + + NvU32 Adj1000Div1001; + } PixelClock; + + struct { + NvU32 Width; + NvU32 Height; + } RasterSize; + + struct { + NvU32 X; + NvU32 Y; + } RasterBlankStart; + + struct { + NvU32 X; + NvU32 Y; + } RasterBlankEnd; + + struct { + NvU32 YStart; + NvU32 YEnd; + } RasterVertBlank2; + + struct { + NvU32 Structure; +/* + * Note: For query calls, the lock modes and lock pins are used only if the min + * pstate is required (i.e., if MinPState is set to + * NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE). + */ + NV_DISP_LOCK_MODE MasterLockMode; + NV_DISP_LOCK_PIN MasterLockPin; + NV_DISP_LOCK_MODE SlaveLockMode; + NV_DISP_LOCK_PIN SlaveLockPin; + } Control; + + struct { + NvU32 VerticalTaps; + NvU32 HorizontalTaps; + NvBool Force422; + } OutputScaler; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeOut; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeOutMin; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeOutMax; + + struct { + NvU32 Width; + NvU32 Height; + } ViewportSizeIn; + + struct { + NvU32 Format; + NvU32 SuperSample; + } Params; + + struct { + NvU32 Usable; + NvU32 PixelDepth; + NvU32 SuperSample; + NvU32 BaseLutUsage; + NvU32 OutputLutUsage; + } BaseUsageBounds; + + struct { + NvU32 Usable; + NvU32 PixelDepth; + NvU32 OverlayLutUsage; + } OverlayUsageBounds; + + struct { + NvBool Enable; + NvU32 Mode; + NvBool NeverYieldToBase; + } BaseLutLo; + + struct { + NvBool Enable; + NvU32 Mode; + NvBool NeverYieldToBase; + } OutputLutLo; + + NvU32 displayId[NV5070_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD]; + NvU32 outputResourcePixelDepthBPP; + + NvU32 CriticalWatermark; // in pixels + + } Head[NV5070_CTRL_CMD_MAX_HEADS]; + + struct { + NvU32 owner; + NvU32 protocol; + } Dac[NV5070_CTRL_CMD_MAX_DACS]; + + struct { +// +// owner field is deprecated. In the future, all client calls should set +// ownerMask and bUseSorOwnerMask. bUseSorOwnerMask must be set in order +// to use ownerMask. +// + NvU32 owner; + NvU32 ownerMask; // Head mask owned this sor + + NvU32 protocol; + NvU32 pixelReplicateMode; + + NvU8 numSSTLinks; + } Sor[NV5070_CTRL_CMD_MAX_SORS]; + + NvBool bUseSorOwnerMask; + + struct { + NvU32 owner; + NvU32 protocol; + } Pior[NV5070_CTRL_CMD_MAX_PIORS]; + + + NvU32 RequestedOperation; +// This argument is for VERIF and INTERNAL use only + NvU32 options; + NvU32 IsPossible; + NvU32 MinPState; + + NvU32 minPerfLevel; +// +// Below are the possible Output values for MinPState variable. +// Lower the p-state value higher the power consumption; if no p-states are defined on chip +// then it will return as zero. +// + +// +// Below are the possible input values for MinPstate Variable, by default it calculate +// mode is possible or not at max available p-state and return the same state in that variable. +// + NvU32 worstCaseMargin; + +// +// The calculated margin is multiplied by a constant, so that it can be +// represented as an integer with reasonable precision. "0x400" was chosen +// because it is a power of two, which might allow some compilers/CPUs to +// simplify the calculation by doing a shift instead of a multiply/divide. +// (And 0x400 is 1024, which is close to 1000, so that may simplify visual +// interpretation of the raw margin value.) +// + char worstCaseDomain[8]; + + NvBool bUseCachedPerfState; +} NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS; + + + +/* + * NV5070_CTRL_CMD_GET_CHANNEL_INFO + * + * This command returns the current channel state. + * + * channelClass + * This field indicates the hw class number (507A-507E) + * + * channelInstance + * This field indicates which of the two instances of the channel + * (in case there are two. ex: base, overlay etc) the cmd is meant for. + * Note that core channel has only one instance and the field should + * be set to 0 for core channel. + * + * channelState + * This field indicates the desired channel state in a mask form that + * is compatible with NV5070_CTRL_CMD_IDLE_CHANNEL. A mask format + * allows clients to check for one from a group of states. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * + * Display driver uses this call to ensure that all it's methods have + * propagated through hardware's internal fifo + * (NV5070_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING) before it calls + * RM to check whether or not the mode it set up in Assembly State Cache will + * be possible. Note that display driver can not use completion notifier in + * this case because completion notifier is associated with Update and Update + * will propagate the state from Assembly to Armed and when checking the + * possibility of a mode, display driver wouldn't want Armed state to be + * affected. + */ +#define NV5070_CTRL_CMD_GET_CHANNEL_INFO (0x5070010b) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_IDLE NV5070_CTRL_CMD_CHANNEL_STATE_IDLE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_WRTIDLE NV5070_CTRL_CMD_CHANNEL_STATE_WRTIDLE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_EMPTY NV5070_CTRL_CMD_CHANNEL_STATE_EMPTY +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_FLUSHED NV5070_CTRL_CMD_CHANNEL_STATE_FLUSHED +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_BUSY NV5070_CTRL_CMD_CHANNEL_STATE_BUSY +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC_LIMBO NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_LIMBO1 NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO1 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_LIMBO2 NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO2 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_FCODEINIT NV5070_CTRL_CMD_CHANNEL_STATE_FCODEINIT +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_FCODE NV5070_CTRL_CMD_CHANNEL_STATE_FCODE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_VBIOSINIT NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSINIT +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_VBIOSOPER NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSOPER +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_UNCONNECTED NV5070_CTRL_CMD_CHANNEL_STATE_UNCONNECTED +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_INITIALIZE NV5070_CTRL_CMD_CHANNEL_STATE_INITIALIZE +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN1 NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN2 NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 +#define NV5070_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING (NV5070_CTRL_GET_CHANNEL_INFO_STATE_EMPTY | NV5070_CTRL_GET_CHANNEL_INFO_STATE_WRTIDLE | NV5070_CTRL_GET_CHANNEL_INFO_STATE_IDLE) +#define NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvBool IsChannelInDebugMode; + + NvU32 channelState; +} NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS; + + + +/* + * NV5070_CTRL_CMD_SET_ACCL + * + * This command turns accelerators on and off. The use of this command + * should be restricted as it may have undesirable effects. It's + * purpose is to provide a mechanism for clients to use the + * accelerator bits to get into states that are either not detectable + * by the RM or may take longer to reach than we think is reasonable + * to wait in the RM. + * + * NV5070_CTRL_CMD_GET_ACCL + * + * This command queries the current state of the accelerators. + * + * channelClass + * This field indicates the hw class number (507A-507E) + * + * channelInstance + * This field indicates which of the two instances of the channel + * (in case there are two. ex: base, overlay etc) the cmd is meant for. + * Note that core channel has only one instance and the field should + * be set to 0 for core channel. + * + * accelerators + * Accelerators to be set in the SET_ACCEL command. Returns the + * currently set accelerators on the GET_ACCEL command. + * + * accelMask + * A mask to specify which accelerators to change with the + * SET_ACCEL command. This field does nothing in the GET_ACCEL + * command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_OWNER + * NV_ERR_GENERIC + * + */ + +#define NV5070_CTRL_CMD_SET_ACCL (0x5070010c) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_SET_ACCL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_ACCL (0x5070010d) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_GET_ACCL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_ACCL_NONE NV5070_CTRL_IDLE_CHANNEL_ACCL_NONE +#define NV5070_CTRL_ACCL_IGNORE_PI NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI +#define NV5070_CTRL_ACCL_SKIP_NOTIF NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF +#define NV5070_CTRL_ACCL_SKIP_SEMA NV5070_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA +#define NV5070_CTRL_ACCL_IGNORE_INTERLOCK NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK +#define NV5070_CTRL_ACCL_IGNORE_FLIPLOCK NV5070_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK +#define NV5070_CTRL_ACCL_TRASH_ONLY NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY +#define NV5070_CTRL_ACCL_TRASH_AND_ABORT NV5070_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT +#define NV5070_CTRL_SET_ACCL_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV5070_CTRL_SET_ACCL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + + NvU32 accelerators; + NvU32 accelMask; +} NV5070_CTRL_SET_ACCL_PARAMS; +#define NV5070_CTRL_GET_ACCL_PARAMS_MESSAGE_ID (0xDU) + +typedef NV5070_CTRL_SET_ACCL_PARAMS NV5070_CTRL_GET_ACCL_PARAMS; + +/* + * NV5070_CTRL_CMD_STOP_BASE + * + * This command tries to turn the base channel off ASAP. + * + * channelInstance + * This field indicates which of the two instances of the base + * channel the cmd is meant for. + * + * notifyMode + * This field indicates the action RM should take once the base + * channel has been successfully stopped. The options are (1) Set a + * notifier (2) Set the notifier and generate and OS event + * + * hNotifierCtxDma + * Handle to the ctx dma for the notifier that must be written once + * base channel is stopped. The standard NvNotification notifier + * structure is used. + * + * offset + * Offset within the notifier context dma where the notifier begins + * Offset must be 16 byte aligned. + * + * hEvent + * Handle to the event that RM must use to awaken the client when + * notifyMode is WRITE_AWAKEN. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT: Invalid notify mode + * NV_ERR_INVALID_CHANNEL: When the overlay is unallocated + * NV_ERR_INVALID_OWNER: Callee isn't the owner of the channel + * NV_ERR_INVALID_OBJECT_HANDLE: Notif ctx dma not found + * NV_ERR_INVALID_OFFSET: Bad offset within notif ctx dma + * NV_ERR_INSUFFICIENT_RESOURCES + * NV_ERR_TIMEOUT: RM timedout waiting to inject methods + */ +#define NV5070_CTRL_CMD_STOP_BASE (0x5070010e) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_STOP_BASE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_STOP_BASE_NOTIFY_MODE_WRITE (0x00000000) +#define NV5070_CTRL_CMD_STOP_BASE_NOTIFY_MODE_WRITE_AWAKEN (0x00000001) + +#define NV5070_CTRL_CMD_STOP_BASE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NV5070_CTRL_CMD_STOP_BASE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 channelInstance; + NvU32 notifyMode; + NvHandle hNotifierCtxDma; + NvU32 offset; + NV_DECLARE_ALIGNED(NvP64 hEvent, 8); +} NV5070_CTRL_CMD_STOP_BASE_PARAMS; + + + +/* + * NV5070_CTRL_CMD_GET_PINSET_COUNT + * + * Get number of pinsets on this GPU. + * + * pinsetCount [out] + * Number of pinsets on this GPU is returned in this parameter. + * This count includes pinsets that are not connected. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_GET_PINSET_COUNT (0x50700115) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_GET_PINSET_COUNT_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_PINSET_COUNT_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV5070_CTRL_GET_PINSET_COUNT_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 pinsetCount; +} NV5070_CTRL_GET_PINSET_COUNT_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_PINSET_PEER + * + * Retrieve the pinset/GPU that is connected to the specified pinset on + * this GPU. + * + * pinset [in] + * Pinset on this GPU for which peer info is to be returned must be + * specified in this parameter. + * + * peerGpuId [out] + * Instance of the GPU on the other side of the connection is + * returned in this parameter. + * + * peerPinset [out] + * Pinset on the other side of the connection is returned in this + * parameter. If there is no connection then the value is + * NV5070_CTRL_CMD_GET_PINSET_PEER_PEER_PINSET_NONE. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_GET_PINSET_PEER (0x50700116) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_GET_PINSET_PEER_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_PINSET_PEER_PEER_GPUINSTANCE_NONE (0xffffffff) + +#define NV5070_CTRL_CMD_GET_PINSET_PEER_PEER_PINSET_NONE (0xffffffff) + +#define NV5070_CTRL_GET_PINSET_PEER_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV5070_CTRL_GET_PINSET_PEER_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 pinset; + + NvU32 peerGpuInstance; + NvU32 peerPinset; +} NV5070_CTRL_GET_PINSET_PEER_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_RMFREE_FLAGS + * + * This command sets the flags for an upcoming call to RmFree(). + * After the RmFree() API runs successfully or not, the flags are cleared. + * + * flags + * This parameter holds the NV0000_CTRL_GPU_SET_RMFREE_FLAGS_* + * flags to be passed for the next RmFree() command only. + * The flags can be one of those: + * - NV0000_CTRL_GPU_SET_RMFREE_FLAGS_NONE: + * explicitly clears the flags + * - NV0000_CTRL_GPU_SET_RMFREE_FLAGS_FREE_PRESERVES_HW: + * instructs RmFree() to preserve the HW configuration. After + * RmFree() is run this flag is cleared. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_SET_RMFREE_FLAGS (0x50700117) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SET_RMFREE_FLAGS_NONE 0x00000000 +#define NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW 0x00000001 +#define NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS_MESSAGE_ID (0x17U) + +typedef struct NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 flags; +} NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS; + + +/* + * NV5070_CTRL_CMD_IMP_SET_GET_PARAMETER + * + * This command allows to set or get certain IMP parameters. Change of + * values take effect on next modeset and is persistent across modesets + * until the driver is unloaded or user changes the override. + * + * index + * One of NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_XXX defines - + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IMP_ENABLE + * Only supports "get" operation. If FALSE, IMP is being bypassed and + * all Is Mode Possible queries are answered with "mode is possible" + * and registers normally set by IMP are not changed from their defaults. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED + * Should IMP consider using ASR. ASR won't be allowed unless it is set to + * "allowed" through both _IS_ASR_ALLOWED and _IS_ASR_ALLOWED_PER_PSTATE. + * Note that IMP will not run ASR and MSCG at the same time. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE + * Should IMP consider using ASR when this pstate is being used. ASR won't + * be allowed unless it is set to "allowed" through both + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED and + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE. + * So when NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED + * returns FALSE, IMP won't consider ASR for any p-state. Note that IMP + * will not run ASR and MSCG at the same time. This function is valid + * only on PStates 2.0 systems. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE + * Should IMP consider using MSCG when this pstate is being used. MSCG + * won't be allowed if the MSCG feature isn't enabled even if we set to + * "allowed" through + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE. + * Use NV2080_CTRL_CMD_MC_QUERY_POWERGATING_PARAMETER to query if MSCG is + * supported and enabled. Note that IMP will not run ASR and MSCG at the + * same time. This function is valid only on PStates 2.0 systems. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PER_PSTATE + * Only supports "get" operation. Returns which stutter feature is being + * engaged in hardware when running on the given pstate. Valid values are: + * NV5070_CTRL_IMP_STUTTER_FEATURE_NONE + * This value indicates no stutter feature is enabled. + * NV5070_CTRL_IMP_STUTTER_FEATURE_ASR + * This value indicates ASR is the current enabled stutter feature. + * NV5070_CTRL_IMP_STUTTER_FEATURE_MSCG + * This value indicates MSCG is the current enabled stutter feature. + * Note that system will not run ASR and MSCG at the same time. This + * function is valid only on PStates 2.0 systems. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PREDICTED_EFFICIENCY_PER_PSTATE + * Only supports "get" operation. Returns the efficiency which IMP + * predicted for the engaged stutter feature (ASR or MSCG) when running + * on the given pstate. Normally, the actual efficiency should be higher + * than the calculated predicted efficiency. For MSCG, the predicted + * efficiency assumes no mempool compression. If compression is enabled + * with MSCG, the actual efficiency may be significantly higher. Returns + * 0 if no stutter feature is running. On PStates 3.0 systems, the + * pstateApi parameter is ignored, and the result is returned for the min + * IMP v-pstate possible. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS + * Only supports "get" operation. Returns information about what the possible + * mclk switch is. Valid fields are: + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE + * This field is not head-specific and indicates if mclk switch is + * possible with the current mode. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL + * This field is not head-specific and indicates if mclk switch is + * possible with the nominal mempool settings (_NO) or if special + * settings are required in order for mclk switch to be possible (_YES). + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK + * Each head has its own setting for this field. If this field is + * set to _YES, then the specified head will allow mclk switch to + * begin if mempool occupancy exceeds the MID_WATERMARK setting. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF + * Each head has its own setting for this field. If this field is + * set to _YES, then the specified head will allow mclk switch to + * begin if the head is in its DWCF interval, and the mempool + * occupancy is greater than or equal to the DWCF watermark. + * Note: If neither _MID_WATERMARK nor _DWCF is set to _YES, then the + * specified head is ignored when determining when it is OK to start an + * mclk switch. Mclk switch must be allowed (or ignored) by all heads + * before an mclk switch will actually begin. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_FORCE_MIN_MEMPOOL + * Should min mempool be forced. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MEMPOOL_COMPRESSION + * Should mempool compression be enabled. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_CURSOR_SIZE + * The cursor size (in horizontal pixels) used by IMP (rather than the + * actual cursor size) for its computation. + * A maximum value is in place for what can be set. It can be queried + * after resetting the value - it gets reset to the maximum possible + * value. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_ENABLE + * This is to Enable/Disable ISO FB Latency Test. + * The test records the max ISO FB latency for all heads during the test period (excluding modeset time). + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_WC_TOTAL_LATENCY + * This is used to retrieve calculated wcTotalLatency of ISO FB Latency Test. + * wcTotalLatency is the worst case time for a request's data to come back after the request is issued. + * It is the sum of IMP calculated FbLatency and stream delay. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_LATENCY + * This is used to retrieve the max latency among all heads during the whole ISO FB Latency Test. + * The max latency can be used to compare with the wcTotalLatency we calculated. + * It decides whether the ISO FB Latency Test is passed or not. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_TEST_PERIOD + * This is used to retrieve the max test period during the whole ISO FB Latency Test. + * By experimental result, the test period should be at least 10 secs to approximate the + * worst case Fb latency in real situation. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_GLITCHLESS_MODESET_ENABLE + * This enables or disables glitchless modesets. Modesets can be + * glitchless if: + * (1) There are no raster timing changes, and + * (2) The resource requirements of all bandwidth clients are either not + * changing, or they are all changing in the same direction (all + * increasing or all decreasing). + * If glitchless modeset is disabled, or is not possible, heads will be + * blanked during the modeset transition. + * pstateApi + * NV2080_CTRL_PERF_PSTATES_PXXX value. + * Required for NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE, + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE, + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PER_PSTATE and + * NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PREDICTED_EFFICIENCY_PER_PSTATE + * on PStates 2.0 systems. For other indices must be + * NV2080_CTRL_PERF_PSTATES_UNDEFINED. Not used on PStates 3.0 systems. + * head + * Head index, which is required when querying Mclk switch feature. + * (index = NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS) + * operation + * NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_GET + * Indicates a "get" operation. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_SET + * Indicates a "set" operation. + * NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_RESET + * Indicates a "reset" operation. This operation will reset the values for + * all indices to their RM defaults. + * value + * Value for new setting of a "set" operation, or the returned value of a + * "get" operation; for enable/disable operations, "enable" is non-zero, + * and "disable" is zero. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_POINTER + * NV_ERR_INVALID_INDEX specified index is not supported + * NV_ERR_INSUFFICIENT_RESOURCES cannot handle any more overrides + * NV_ERR_INVALID_OBJECT the struct needed to get the specified information + * is not marked as valid + * NV_ERR_INVALID_STATE the parameter has been set but resetting will + * not be possible + */ +#define NV5070_CTRL_CMD_IMP_SET_GET_PARAMETER (0x50700118) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS_MESSAGE_ID (0x18U) + +typedef struct NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 index; + NvU32 pstateApi; + NvU32 head; + NvU32 operation; + NvU32 value; +} NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS; + +/* valid operation values */ +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_GET 0 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_SET 1 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_OPERATION_RESET 2 + +/* valid index value */ +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_NONE (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IMP_ENABLE (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED (0x00000002) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_ASR_ALLOWED_PER_PSTATE (0x00000003) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_IS_MSCG_ALLOWED_PER_PSTATE (0x00000004) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PER_PSTATE (0x00000005) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_STUTTER_FEATURE_PREDICTED_EFFICIENCY_PER_PSTATE (0x00000006) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS (0x00000007) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_FORCE_MIN_MEMPOOL (0x00000008) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MEMPOOL_COMPRESSION (0x00000009) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_CURSOR_SIZE (0x0000000A) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_ENABLE (0x0000000B) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_WC_TOTAL_LATENCY (0x0000000C) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_LATENCY (0x0000000D) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOFBLATENCY_TEST_MAX_TEST_PERIOD (0x0000000E) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_GLITCHLESS_MODESET_ENABLE (0x0000000F) + +/* valid NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_ISOHUB_STUTTER_FEATURE values */ +#define NV5070_CTRL_IMP_STUTTER_FEATURE_NONE 0 +#define NV5070_CTRL_IMP_STUTTER_FEATURE_ASR 1 +#define NV5070_CTRL_IMP_STUTTER_FEATURE_MSCG 2 + +/* valid NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE values */ +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE 0:0 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_POSSIBLE_YES (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL 1:1 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_OVERRIDE_MEMPOOL_YES (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK 2:2 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_MID_WATERMARK_YES (0x00000001) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF 3:3 +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF_NO (0x00000000) +#define NV5070_CTRL_IMP_SET_GET_PARAMETER_INDEX_MCLK_SWITCH_FEATURE_OUTPUTS_VALUE_DWCF_YES (0x00000001) + +/* + * NV5070_CTRL_CMD_SET_MEMPOOL_WAR_FOR_BLIT_TEARING + * + * This command engages the WAR for blit tearing caused by huge mempool size and + * mempool compression. The EVR in aero off mode uses scanline info to predict + * where the scanline will be at a later time. Since RG scanline is used to perform + * front buffer blits and isohub buffers large amount of display data it may have + * fetched several lines of data ahead of where the RG is scanning out leading to + * video tearing. The WAR for this problem is to reduce the amount of data fetched. + * + * base + * This struct must be the first member of all 5070 control calls containing + * the subdeviceIndex. + * bEngageWAR + * Indicates if mempool WAR has to be engaged or disengaged. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV5070_CTRL_CMD_SET_MEMPOOL_WAR_FOR_BLIT_TEARING (0x50700119) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS_MESSAGE_ID (0x19U) + +typedef struct NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvBool bEngageWAR; +} NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS; +typedef struct NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS *PNV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS; + +#define NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE (0x50700120) /* finn: Evaluated from "(FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + NvU32 activeViewportBase; +} NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS; +typedef struct NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS *PNV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS; + +/* _ctrl5070chnc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h new file mode 100644 index 000000000..bbfa794d8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070common.h @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070common.finn +// + + + +#define NV5070_CTRL_CMD_CHANNEL_STATE_IDLE NVBIT(0) +#define NV5070_CTRL_CMD_CHANNEL_STATE_WRTIDLE NVBIT(1) +#define NV5070_CTRL_CMD_CHANNEL_STATE_QUIESCENT1 NVBIT(2) +#define NV5070_CTRL_CMD_CHANNEL_STATE_QUIESCENT2 NVBIT(3) +#define NV5070_CTRL_CMD_CHANNEL_STATE_EMPTY NVBIT(4) +#define NV5070_CTRL_CMD_CHANNEL_STATE_FLUSHED NVBIT(5) +#define NV5070_CTRL_CMD_CHANNEL_STATE_BUSY NVBIT(6) +#define NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC NVBIT(7) +#define NV5070_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO NVBIT(8) +#define NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO1 NVBIT(9) +#define NV5070_CTRL_CMD_CHANNEL_STATE_LIMBO2 NVBIT(10) +#define NV5070_CTRL_CMD_CHANNEL_STATE_FCODEINIT NVBIT(11) +#define NV5070_CTRL_CMD_CHANNEL_STATE_FCODE NVBIT(12) +#define NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSINIT NVBIT(13) +#define NV5070_CTRL_CMD_CHANNEL_STATE_VBIOSOPER NVBIT(14) +#define NV5070_CTRL_CMD_CHANNEL_STATE_UNCONNECTED NVBIT(15) +#define NV5070_CTRL_CMD_CHANNEL_STATE_INITIALIZE NVBIT(16) +#define NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 NVBIT(17) +#define NV5070_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 NVBIT(18) +#define NV5070_CTRL_CMD_CHANNEL_STATE_INIT NVBIT(19) + +#define NV5070_CTRL_CMD_MAX_HEADS 4U +#define NV5070_CTRL_CMD_MAX_DACS 4U +#define NV5070_CTRL_CMD_MAX_SORS 8U +#define NV5070_CTRL_CMD_MAX_PIORS 4U + +#define NV5070_CTRL_CMD_OR_OWNER_NONE (0xFFFFFFFFU) +#define NV5070_CTRL_CMD_OR_OWNER_HEAD(i) (i) +#define NV5070_CTRL_CMD_OR_OWNER_HEAD__SIZE_1 NV5070_CTRL_CMD_MAX_HEADS + +#define NV5070_CTRL_CMD_SOR_OWNER_MASK_NONE (0x00000000U) +#define NV5070_CTRL_CMD_SOR_OWNER_MASK_HEAD(i) (1 << i) + +#define NV5070_CTRL_CMD_DAC_PROTOCOL_RGB_CRT (0x00000000U) + + + +#define NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_A (0x00000000U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_B (0x00000001U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_DUAL_TMDS (0x00000002U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_LVDS_CUSTOM (0x00000003U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_DP_A (0x00000004U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_DP_B (0x00000005U) +#define NV5070_CTRL_CMD_SOR_PROTOCOL_SUPPORTED (0xFFFFFFFFU) + +#define NV5070_CTRL_CMD_PIOR_PROTOCOL_EXT_TMDS_ENC (0x00000000U) + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h new file mode 100644 index 000000000..83284cdfb --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070event.h @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070event.finn +// + + + + +#include "ctrl/ctrl5070/ctrl5070base.h" + +/* NV50_DISPLAY event-related control commands and parameters */ + +/* + * NV5070_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification state for the NV50_DISPLAY object. + * This command requires that an instance of NV01_EVENT has been previously + * bound to the NV50_DISPLAY object. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the NV50_DISPLAY + * parent device to which the operation should be directed. This parameter + * must specify a value between zero and the total number of subdevices + * within the parent device. This parameter should be set to zero for + * default behavior. + * hEvent + * This parameter specifies the handle of the NV01_EVENT instance + * to be bound to the given subDeviceInstance. + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. This parameter must specify a valid + * NOTIFIERS value of display class. + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV5070_CTRL_SET_EVENT_NOTIFICATION_DISABLE + * This action disables event notification for the specified + * event for the associated subdevice object. + * NV5070_CTRL_SET_EVENT_NOTIFICATION_SINGLE + * This action enables single-shot event notification for the + * specified event for the associated subdevice object. + * NV5070_CTRL_SET_EVENT_NOTIFICATION_REPEAT + * This action enables repeated event notification for the specified + * event for the associated system controller object. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV5070_CTRL_CMD_EVENT_SET_NOTIFICATION (0x50700901) /* finn: Evaluated from "(FINN_NV50_DISPLAY_EVENT_INTERFACE_ID << 8) | NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 subDeviceInstance; + NvHandle hEvent; + NvU32 event; + NvU32 action; +} NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + + +/* + * NV5070_CTRL_CMD_EVENT_SET_TRIGGER + * + * This command triggers a software event for the NV50_DISPLAY object. + * This command accepts no parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV5070_CTRL_CMD_EVENT_SET_TRIGGER (0x50700902) /* finn: Evaluated from "(FINN_NV50_DISPLAY_EVENT_INTERFACE_ID << 8) | 0x2" */ + + +/* + * NV5070_CTRL_CMD_EVENT_SET_NOTIFIER_MEMORY + * + * hMemory + * This parameter specifies the handle of the memory object + * that identifies the memory address translation for this + * subdevice instance's notification(s). The beginning of the + * translation points to an array of notification data structures. + * The size of the translation must be at least large enough to hold the + * maximum number of notification data structures. + * Legal argument values must be instances of the following classes: + * NV01_NULL + * NV04_MEMORY + * When hMemory specifies the NV01_NULL_OBJECT value then any existing + * memory translation connection is cleared. There must not be any + * pending notifications when this command is issued. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NV5070_CTRL_CMD_EVENT_SET_MEMORY_NOTIFIES (0x50700903) /* finn: Evaluated from "(FINN_NV50_DISPLAY_EVENT_INTERFACE_ID << 8) | NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS { + NvU32 subDeviceInstance; + NvHandle hMemory; +} NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS; + +#define NV5070_EVENT_MEMORY_NOTIFIES_STATUS_NOTIFIED 0 +#define NV5070_EVENT_MEMORY_NOTIFIES_STATUS_PENDING 1 +#define NV5070_EVENT_MEMORY_NOTIFIES_STATUS_ERROR 2 + + + +/* _ctrl5070event_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h new file mode 100644 index 000000000..11b79b280 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070impoverrides.h @@ -0,0 +1,33 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070impoverrides.finn +// + + + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h new file mode 100644 index 000000000..6c38ab340 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070or.h @@ -0,0 +1,939 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070or.finn +// + +#include "ctrl5070common.h" + + + +#include "ctrl/ctrl5070/ctrl5070base.h" + + + +/* + * NV5070_CTRL_CMD_SET_DAC_PWR + * + * This command sets the DAC power control register. orNumber, normalPower, + * and safePower will always have to be specified. However, HSync, VSync, + * and data for normal and/or safe mode can be empty, leaving the current + * values intact. + * + * orNumber + * The dac for which the settings need to be programmed. + * + * normalHSync + * The normal operating state for the H sync signal. + * + * normalVSync + * The normal operating state for the V sync signal. + * + * normalData + * The normal video data input pin of the d/a converter. + * + * normalPower + * The normal state of the dac macro power. + * + * safeHSync + * The safe operating state for the H sync signal. + * + * safeVSync + * The safe operating state for the V sync signal. + * + * safeData + * The safe video data input pin of the d/a converter. + * + * safePower + * The safe state of the dac macro power. + * + * flags + * The following flags have been defined: + * (1) SPECIFIED_NORMAL: Indicates whether HSync, VSync, data, + * for normal state have been specified in the parameters. + * (2) SPECIFIED_SAFE: Indicates whether HSync, VSync, data, + * for safe state have been specified in the parameters. + * (3) SPECIFIED_FORCE_SWITCH: Indicates whether to force the + * change immediately instead of waiting for VSync + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * NV_ERR_TIMEOUT + */ +#define NV5070_CTRL_CMD_SET_DAC_PWR (0x50700404) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_HSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_VSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_DATA_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_PWR 0:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_PWR_OFF (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_NORMAL_PWR_ON (0x00000001) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_HSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_VSYNC_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA 1:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA_ENABLE (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA_LO (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_DATA_HI (0x00000002) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_PWR 0:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_PWR_OFF (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_SAFE_PWR_ON (0x00000001) + +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_NORMAL 0:0 +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_NORMAL_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_NORMAL_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_SAFE 1:1 +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_SAFE_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_SPECIFIED_SAFE_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_FORCE_SWITCH 2:2 +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_FORCE_SWITCH_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_DAC_PWR_FLAGS_FORCE_SWITCH_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 normalHSync; + NvU32 normalVSync; + NvU32 normalData; + NvU32 normalPower; + NvU32 safeHSync; + NvU32 safeVSync; + NvU32 safeData; + NvU32 safePower; + NvU32 flags; +} NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS; + + + +/* + * NV5070_CTRL_CMD_GET_SOR_PWM + * + * This command returns SOR's current PWM settings. + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * targetFreq + * The target PWM freq. This is the PWM frequency we planned on + * programming. + * + * actualFreq + * Actual PWM freq programmed into PWM. + * + * div + * The divider being used currently for generating PWM clk. + * A valued of 0 means that PWM is disabled. + * + * resolution + * The resolution of steps currently programmed or the max number of + * clocks per cycle. The possible values for NV50 are 128, 256, 512 + * and 1024. This field is irrelevant when div is 0. + * + * dutyCycle + * Duty cycle in range 0-1024 + * + * sourcePCLK (OUT) + * The PWM source clock selector. This field is non-zero if the PCLK + * is selected as the PWM source clock. Otherwise, the PWM source + * clock is XTAL. + * + * head (IN) + * The head for which the pixel clock is sourced from. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_SOR_PWM (0x50700420) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + NvU32 targetFreq; + NvU32 actualFreq; + NvU32 div; + NvU32 resolution; + NvU32 dutyCycle; + NvU32 sourcePCLK; + NvU32 head; +} NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS; + + +/* + * NV5070_CTRL_CMD_SET_SOR_PWM + * + * This command returns SOR's current PWM settings. + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * targetFreq + * The target PWM freq to be programmed. + * + * actualFreq + * Actual PWM freq programmed into PWM after all the specified + * settings have been applied. + * + * div + * The divider to use for generating PWM clk. + * Set this to 0 to disable PWM. Note that only one of div + * or targetFreq can be specified at a time since specifying one + * automatically determines the value of the other. Selection is + * done via USE_SPECIFIED_DIV flag. + * + * resolution + * The resolution or the max number of clocks per cycle desired. + * Note that if it's not possible to program the given resolution + * and frequency (or div) combination, RM would not attempt to + * smartly lower the resolution. The call would return failure. + * The possible values for NV50 are 128, 256, 512 and 1024. This + * field is irrelevant when div is 0. + * + * dutyCycle + * Duty cycle in range 0-1024 + * + * flags + * The following flags have been defined: + * (1) USE_SPECIFIED_DIV: Indicates whether RM should use + * specified div or targetFreq when determining the divider + * for xtal clock. + * (2) PROG_DUTY_CYCLE: Indicates whether or not the caller + * desires to program duty cycle. Normally whenever pwm freq + * and range need to be programmed, it's expected that duty + * cycle would be reprogrammed as well but this is not + * enforced. + * (3) PROG_FREQ_AND_RANGE: Indicates whether or not the caller + * desires to program a new PWM setting (div and resolution). + * (4) SOURCE_CLOCK: Indicates whether the PCLK or XTAL is used + * as the PWM clock source. GT21x and better. + * + * head (IN) + * The head for which the pixel clock is sourced from. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_SOR_PWM (0x50700421) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_USE_SPECIFIED_DIV 0:0 +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_USE_SPECIFIED_DIV_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_USE_SPECIFIED_DIV_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_DUTY_CYCLE 1:1 +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_DUTY_CYCLE_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_DUTY_CYCLE_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_FREQ_AND_RANGE 2:2 +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_FREQ_AND_RANGE_NO (0x00000000) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_PROG_FREQ_AND_RANGE_YES (0x00000001) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_SOURCE_CLOCK 3:3 +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_SOURCE_CLOCK_XTAL (0x00000000) +#define NV5070_CTRL_CMD_SET_SOR_PWM_FLAGS_SOURCE_CLOCK_PCLK (0x00000001) + +#define NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + NvU32 targetFreq; + NvU32 actualFreq; + NvU32 div; // equivalent of NV_PDISP_SOR_PWM_DIV_DIVIDE + NvU32 resolution; // equivalent of NV_PDISP_SOR_PWM_DIV_RANGE + NvU32 dutyCycle; + NvU32 flags; + NvU32 head; +} NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS; + + +/* + * NV5070_CTRL_CMD_GET_SOR_OP_MODE + * + * This command returns current settings for the specified SOR. + * + * orNumber + * The OR number for which the operating mode needs to be read. + * + * category + * Whether LVDS or CSTM setting are desired. + * + * puTxda + * Status of data pins of link A + * + * puTxdb + * Status of data pins of link B + * + * puTxca + * Status of link A clock + * + * puTxcb + * Status of link B clock + * + * upper + * Whether LVDS bank A is the upper, odd, or first pixel. + * + * mode + * Current protocol. + * + * linkActA + * Status of link B clock + * + * linkActB + * Status of link B clock + * + * lvdsEn + * Output driver configuration. + * + * lvdsDual + * Whether LVDS dual-link mode is turned on or not. + * + * dupSync + * Whether DE, HSYNC, and VSYNC are used for encoding instead of + * RES, CNTLE, and CNTLF. + * + * newMode + * Whether new or old mode is being used. + * + * balanced + * Whether balanced encoding is enabled. + * + * plldiv + * Feedback divider for the hi-speed pll + * + * rotClk + * Skew of TXC clock. + * + * rotDat + * How much are the 8 bits of each color channel rotated by + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE (0x50700422) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_CATEGORY 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_CATEGORY_LVDS 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_CATEGORY_CUSTOM 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_0 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_1 1:1 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_2 2:2 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_3 3:3 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDA_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_0 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_1 1:1 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_2 2:2 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_3 3:3 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXDB_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCA 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCB 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PU_TXCB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_UPPER 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_UPPER_UPPER_RESET 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_MODE 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_MODE_LVDS 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_MODE_TMDS 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTA 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTB 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LINKACTB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_EN 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_EN_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_EN_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_DUAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_DUAL_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_LVDS_DUAL_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_DUP_SYNC 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_DUP_SYNC_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_DUP_SYNC_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_NEW_MODE 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_NEW_MODE_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_NEW_MODE_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_BALANCED 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_BALANCED_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_BALANCED_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PLLDIV 0:0 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PLLDIV_BY_7 0x00000000 +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PLLDIV_BY_10 0x00000001 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_ROTCLK 3:0 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_ROTDAT 2:0 + +#define NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 category; + NvU32 puTxda; + NvU32 puTxdb; + NvU32 puTxca; + NvU32 puTxcb; + NvU32 upper; + NvU32 mode; + NvU32 linkActA; + NvU32 linkActB; + NvU32 lvdsEn; + NvU32 lvdsDual; + NvU32 dupSync; + NvU32 newMode; + NvU32 balanced; + NvU32 plldiv; + NvU32 rotClk; + NvU32 rotDat; +} NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS; + + +/* + * NV5070_CTRL_CMD_SET_SOR_OP_MODE + * + * This command applies the specified settings to the specified SOR. + * + * orNumber + * The OR number for which the operating mode needs to be read. + * Note that if DCB doesn't report LVDS for the specified orNumber, + * the call will return failure. + * + * category + * Whether LVDS or CSTM settings are specified. + * + * puTxda + * Used to enable or disable the data pins of link A. + * + * puTxdb + * Used to enable or disable the data pins of link B. + * + * puTxca + * Used to enable or disable link A clock. + * + * puTxcb + * Used to enable or disable link B clock. + * + * upper + * Whether LVDS bank A should be the upper, odd, or first pixel. + * + * mode + * What protocol (LVDS/TMDS to use). + * + * linkActA + * Used to enable or disable the digital logic of link A. + * + * linkActB + * Used to enable or disable the digital logic of link B. + * + * lvdsEn + * Output driver configuration. + * + * lvdsDual + * Whether to turn on LVDS dual-link mode. + * + * dupSync + * Whether to use DE, HSYNC, and VSYNC for encoding instead of + * RES, CNTLE, and CNTLF. + * + * newMode + * Whether to use new or old mode. + * + * balanced + * Whether or not to use balanced encoding. + * + * plldiv + * Feedback divider to use for the hi-speed pll. + * + * rotClk + * How much to skew TXC clock. + * + * rotDat + * How much to rotate the 8 bits of each color channel by. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE (0x50700423) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_CATEGORY 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_CATEGORY_LVDS 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_CATEGORY_CUSTOM 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_0 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_1 1:1 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_2 2:2 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_3 3:3 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDA_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_0 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_0_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_0_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_1 1:1 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_1_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_1_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_2 2:2 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_2_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_2_ENABLE 0x00000001 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_3 3:3 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_3_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXDB_3_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCA 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCB 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PU_TXCB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_UPPER 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_UPPER_UPPER_RESET 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_MODE 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_MODE_LVDS 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_MODE_TMDS 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTA 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTA_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTA_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTB 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTB_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LINKACTB_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_EN 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_EN_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_EN_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_DUAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_DUAL_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_LVDS_DUAL_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_DUP_SYNC 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_DUP_SYNC_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_DUP_SYNC_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_NEW_MODE 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_NEW_MODE_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_NEW_MODE_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_BALANCED 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_BALANCED_DISABLE 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_BALANCED_ENABLE 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PLLDIV 0:0 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PLLDIV_BY_7 0x00000000 +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PLLDIV_BY_10 0x00000001 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_ROTCLK 3:0 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_ROTDAT 2:0 + +#define NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 category; + NvU32 puTxda; + NvU32 puTxdb; + NvU32 puTxca; + NvU32 puTxcb; + NvU32 upper; + NvU32 mode; + NvU32 linkActA; + NvU32 linkActB; + NvU32 lvdsEn; + NvU32 lvdsDual; + NvU32 dupSync; + NvU32 newMode; + NvU32 balanced; + NvU32 plldiv; + NvU32 rotClk; + NvU32 rotDat; +} NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_PIOR_OP_MODE + * + * This command returns current settings for the specified PIOR. + * + * orNumber + * The OR number for which the operating mode needs to be programmed. + * + * category + * Whether ext TMDS, TV, DRO or DRI settings are desired. + * EXT TV is not supported at the moment. + * EXT DisplayPort is specified through EXT 10BPC 444. + * + * clkPolarity + * Whether or not output clock is inverted relative to generated clock. + * + * clkMode + * Whether data being transmitted is SDR or DDR. + * + * clkPhs + * Position of the edge on which data is launched. + * + * unusedPins + * Status of unused pins of this PIOR. + * + * polarity + * Whether or not sync and DE pin polarities are inverted. + * + * dataMuxing + * How are the bits are multiplexed together. + * + * clkDelay + * Extra delay for the clock. + * + * dataDelay + * Extra delay for the data. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE (0x50700430) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY 2:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY_EXT_TMDS 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY_EXT_TV 0x00000001 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY_DRO 0x00000003 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY_DRI 0x00000004 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CATEGORY_EXT_10BPC_444 0x00000005 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_POLARITY 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_POLARITY_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_POLARITY_INV 0x00000001 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_MODE 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_MODE_SDR 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_MODE_DDR 0x00000001 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_PHS 1:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_PHS_0 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_PHS_1 0x00000001 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_PHS_2 0x00000002 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_PHS_3 0x00000003 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_UNUSED_PINS 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_UNUSED_PINS_LO 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_UNUSED_PINS_TS 0x00000001 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_H 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_H_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_H_INV 0x00000001 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_V 1:1 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_V_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_V_INV 0x00000001 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_DE 2:2 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_DE_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_POLARITY_DE_INV 0x00000001 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING 3:0 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING_RGB_0 0x00000000 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING_RGB_1 0x00000001 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING_DIST_RNDR 0x00000003 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING_YUV_0 0x00000004 +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_MUXING_UYVY 0x00000005 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_CLK_DLY 2:0 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_DATA_DLY 2:0 + +#define NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS_MESSAGE_ID (0x30U) + +typedef struct NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 category; + NvU32 clkPolarity; + NvU32 clkMode; + NvU32 clkPhs; + NvU32 unusedPins; + NvU32 polarity; + NvU32 dataMuxing; + NvU32 clkDelay; + NvU32 dataDelay; +} NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS; + + +/* + * NV5070_CTRL_CMD_SET_PIOR_OP_MODE + * + * This command applies the specified settings to the specified PIOR. + * + * orNumber + * The OR number for which the operating mode needs to be programmed. + * + * category + * Whether ext TMDS, TV, DRO or DRI settings are to be programmed. + * EXT TV is not supported at the moment. + * EXT DisplayPort is specified through EXT 10BPC 444. + * + * clkPolarity + * Whether or not to invert output clock relative to generated clock. + * + * clkMode + * Whether data being transmitted should be SDR or DDR. + * + * clkPhs + * Position of the edge on which data should be launched. + * + * unusedPins + * What to do with unused pins of this PIOR. + * + * polarity + * Whether or not to invert sync and DE pin polarities. + * + * dataMuxing + * How to multiplex the bits together. + * + * clkDelay + * Extra delay for the clock. + * + * dataDelay + * Extra delay for the data. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE (0x50700431) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY 2:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY_EXT_TMDS 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY_EXT_TV 0x00000001 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY_DRO 0x00000003 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY_DRI 0x00000004 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CATEGORY_EXT_10BPC_444 0x00000005 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_POLARITY 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_POLARITY_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_POLARITY_INV 0x00000001 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_MODE 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_MODE_SDR 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_MODE_DDR 0x00000001 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_PHS 1:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_PHS_0 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_PHS_1 0x00000001 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_PHS_2 0x00000002 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_PHS_3 0x00000003 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_UNUSED_PINS 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_UNUSED_PINS_LO 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_UNUSED_PINS_TS 0x00000001 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_H 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_H_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_H_INV 0x00000001 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_V 1:1 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_V_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_V_INV 0x00000001 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_DE 2:2 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_DE_NORMAL 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_POLARITY_DE_INV 0x00000001 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING 3:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING_RGB_0 0x00000000 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING_RGB_1 0x00000001 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING_DIST_RNDR 0x00000003 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING_YUV_0 0x00000004 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_MUXING_UYVY 0x00000005 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_CLK_DLY 2:0 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DATA_DLY 2:0 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DRO_MASTER 1:0 + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DRO_DRIVE_PIN_SET 2:0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DRO_DRIVE_PIN_SET_NEITHER 0 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DRO_DRIVE_PIN_SET_A 1 +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_DRO_DRIVE_PIN_SET_B 2 + + +#define NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS_MESSAGE_ID (0x31U) + +typedef struct NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 category; + NvU32 clkPolarity; + NvU32 clkMode; + NvU32 clkPhs; + NvU32 unusedPins; + NvU32 polarity; + NvU32 dataMuxing; + NvU32 clkDelay; + NvU32 dataDelay; + NvU32 dro_master; + NvU32 dro_drive_pin_set; +} NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE + * + * Set the given SOR number into flush mode in preparation for DP link training. + * + * orNumber [in] + * The SOR number to set into flush mode. + * + * bEnable [in] + * Whether to enable or disable flush mode on this SOR. + * + * bImmediate [in] + * If set to true, will enable flush in immediate mode. + * If not, will enable flush in loadv mode. + * NOTE: We do not support exit flush in LoadV mode. + * + * headMask [in] + * Optional. If set brings only the heads in the head mask out of flush + * OR will stay in flush mode until last head is out of flush mode. + * Caller can use _HEAD__ALL to specify all the heads are to be brought out. + * NOTE: headMask would be considered only while exiting from flush mode. + * + * bForceRgDiv [in] + * If set forces RgDiv. Should be used only for HW/SW testing + * + * bUseBFM [in] + * If Set then it mean we are using BFM else executing on non-BFM paltforms. + * + * bFireAndForget [in] + * Fire the flush mode & perform post-processing without waiting for it + * to be done. This is required for special cases like GC5 where we have + * ELV blocked, RG stall & we trigger flush for one shot mode & then do + * a modeset by disabling it without actually waiting for it to get + * disabled. We will not get any vblank interrupt in this case as we have + * stalled RG. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE (0x50700457) /* finn: Evaluated from "(FINN_NV50_DISPLAY_OR_INTERFACE_ID << 8) | NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_MESSAGE_ID (0x57U) + +typedef struct NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 sorNumber; + NvBool bEnable; + NvBool bImmediate; + NvU32 headMask; + NvBool bForceRgDiv; + NvBool bUseBFM; + NvBool bFireAndForget; +} NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS; + +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_HEADMASK_HEAD(i) (i):(i) +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_HEADMASK_HEAD__SIZE_1 NV5070_CTRL_CMD_MAX_HEADS +#define NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS_HEADMASK_HEAD_ALL 0xFFFFFFFF + + + +/* _ctrl5070or_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h new file mode 100644 index 000000000..088ab11cd --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070rg.h @@ -0,0 +1,567 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070rg.finn +// + + + + +#include "ctrl/ctrl5070/ctrl5070base.h" + + + +/* + * NV5070_CTRL_CMD_GET_RG_STATUS + * + * This 'get' command returns the status of raster generator + * + * head + * The head for which RG status is desired. + * + * scanLocked + * Whether or not RG is scan (raster or frame) locked. + * flipLocked + * Whether or not RG is flip locked. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_STATUS (0x50700202) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_STATUS_STALLED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_STATUS_STALLED_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 scanLocked; // [OUT] + NvU32 flipLocked; // [OUT] + NvU32 rgStalled; +} NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS; + +/* + * NV5070_CTRL_CMD_UNDERFLOW_PARAMS + * + * This structure contains data for + * NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP and + * NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP CTRL calls + * + * head + * The head for which RG underflow properties needed to be set/get. + * Valid values for this parameter are 0 to NV5070_CTRL_CMD_MAX_HEADS. + * enable + * _SET_RG_: Whether to enable or disable RG underflow reporting. + * _GET_RG_: Whether or not RG underflow reporting is enabled. + * underflow + * _SET_RG_: Clear underflow (TRUE) or leave it alone (FALSE). + * _GET_RG_: RG underflow underflowed (TRUE) or not underflowed (FALSE). + * mode + * _SET_RG_: What mode to use when underflow occurs. This is + * independent from enable field. This is always active. + * _GET_RG_: What mode is used when underflow occurs. This is + * independent from enable field. This is always active. + */ +typedef struct NV5070_CTRL_CMD_UNDERFLOW_PARAMS { + NvU32 head; + NvU32 enable; + NvU32 underflow; + NvU32 mode; +} NV5070_CTRL_CMD_UNDERFLOW_PARAMS; + +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLED_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLED_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_UNDERFLOWED_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_UNDERFLOWED_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_MODE_REPEAT (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_MODE_RED (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLE_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_ENABLE_YES (0x00000001) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_CLEAR_UNDERFLOW_NO (0x00000000) +#define NV5070_CTRL_CMD_UNDERFLOW_PROP_CLEAR_UNDERFLOW_YES (0x00000001) + +/* + * NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP + * + * This command returns the underflow reporting parameters inside + * NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure + * + * underflowParams + * Contains data for underflow logging. + * Check NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP (0x50700203) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NV5070_CTRL_CMD_UNDERFLOW_PARAMS underflowParams; +} NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS; + + +/* + * NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP + * + * This command sets up the underflow parameters using + * NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure + * + * underflowParams + * Contains data for underflow logging. + * Check NV5070_CTRL_CMD_UNDERFLOW_PARAMS structure. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP (0x50700204) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NV5070_CTRL_CMD_UNDERFLOW_PARAMS underflowParams; +} NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS; + + +/* + * NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP + * + * This command gets the timing parameters associated with the lockout period. + * + * head + * The head for which RG fliplock properties are desired. + * + * maxSwapLockoutSkew + * The maximum possible skew between the swap lockout signals for all + * heads which are fliplocked to this head. + * + * swapLockoutStart + * Determines the start of the start lockout period, expressed as the + * number of lines before the end of the frame. The minimum allowed + * value is 1. + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP (0x50700205) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW 9:0 + +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START 15:0 + +#define NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 maxSwapLockoutSkew; + NvU32 swapLockoutStart; +} NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP + * + * This command sets the timing parameters associated with the lockout period. + * + * head + * The head for which RG fliplock properties are desired. + * + * maxSwapLockoutSkew + * The maximum possible skew between the swap lockout signals for all + * heads which are fliplocked to this head. + * + * swapLockoutStart + * Determines the start of the start lockout period, expressed as the + * number of lines before the end of the frame. The minimum allowed + * value is 1. + + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP (0x50700206) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW 9:0 +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW_INIT (0x00000000) + +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START 15:0 +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START_INIT (0x00000000) + +#define NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 maxSwapLockoutSkew; + NvU32 swapLockoutStart; +} NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN + * + * This command returns which lockpin has been connected for the specified + * subdevice in the current SLI and/or framelock configuration. + * + * head + * The head for which the locking is associated with + * + * masterScanLock + * Indicate the connection status and pin number of master scanlock + * + * slaveScanLock + * Indicate the connection status and pin number of slave scanlock + * + * flipLock + * Indicate the connection status and pin number of fliplock + * + * stereoLock + * Indicate the connection status and pin number of stereo lock + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN (0x50700207) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_MASTER_SCAN_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_MASTER_SCAN_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_MASTER_SCAN_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_MASTER_SCAN_LOCK_PIN 3:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_SLAVE_SCAN_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_SLAVE_SCAN_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_SLAVE_SCAN_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_SLAVE_SCAN_LOCK_PIN 3:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_FLIP_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_FLIP_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_FLIP_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_FLIP_LOCK_PIN 3:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STEREO_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STEREO_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STEREO_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STEREO_LOCK_PIN 3:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + NvU32 masterScanLock; + NvU32 slaveScanLock; + NvU32 flipLock; + NvU32 stereoLock; +} NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_VIDEO_STATUS + * + * This command is used to set the current video playback status for use + * by the Display Power Saving (nvDPS) feature. The playback status is + * used to maximize power savings by altering the DFP refresh rate used for + * video playback. + * + * displayId + * This parameter specifies the ID of the video playback display. + * Only one display may be indicated in this parameter. + * clientId + * This parameter specifies the opaque client ID associated with + * the video playback application. + * mode + * This parameter specifies the video playback mode. Valid values + * for this parameter include: + * NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_NON_FULLSCREEN + * This value indicates that there is either no video playback or + * that video playback is windowed. + * NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_FULLSCREEN + * This value indicates that video playback is fullscreen. + * NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_D3D + * This value indicates that there is a D3D app started. + * frameRate + * The parameter indicates the current video playback frame rate. + * The value is a 32 bit unsigned fixed point number, 24 bit unsigned + * integer (bits 31:7), and 8 fraction bits (bits 7:0), measured in + * number of frames per second. + * A value of 0 indicates that video playback is stopped or not playing. + * frameRateAlarmUpperLimit + * The parameter indicates the upper limit which will can be tolerated in + * notifying frame rate change. If the frame rate changed but is still + * below the limit. The newer frame rate doesn't have to be set till it's + * over the limit. + * The value is a 32 bit unsigned fixed point number, 24 bit unsigned + * integer (bits 31:7), and 8 fraction bits (bits 7:0), measured in + * number of frames per second. + * A value of 0 indicates no tolerance of frame rate notifying. Instant + * frame rate has to be set when it has changed. + * frameRateAlarmLowerLimit + * The parameter indicates the lower limit which will can be tolerated in + * notifying frame rate change. If the frame rate changed but is still + * above the limit. The newer frame rate doesn't have to be set till it's + * below the limit. + * The value is a 32 bit unsigned fixed point number, 24 bit unsigned + * integer (bits 31:7), and 8 fraction bits (bits 7:0), measured in + * number of frames per second. + * A value of 0 indicates no tolerance of frame rate notifying. Instant + * frame rate has to be set when it has changed. + * + * The frameRateAlarm limit values can be used by the video client to + * indicate the the range in which frame rate changes do not require + * notification (i.e. frame rates outside these limits will result in + * notification). + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV5070_CTRL_CMD_SET_VIDEO_STATUS (0x50700209) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_DFP_SET_VIDEO_STATUS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_DFP_SET_VIDEO_STATUS_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV5070_CTRL_DFP_SET_VIDEO_STATUS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + + NvU32 displayId; + NvU32 clientId; + NvU32 mode; + NvU32 frameRate; + NvU32 frameRateAlarmUpperLimit; + NvU32 frameRateAlarmLowerLimit; +} NV5070_CTRL_DFP_SET_VIDEO_STATUS_PARAMS; + +/* valid mode flags */ +#define NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_NON_FULLSCREEN (0x00000000) +#define NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_FULLSCREEEN (0x00000001) +#define NV5070_CTRL_DFP_SET_VIDEO_STATUS_MODE_D3D (0x00000002) + +/* + * NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS + * + * This command returns which set of lockpins needs to be used in order to + * successfully raster lock two heads on different GPUs together. The + * second GPU is not inferred from linked SLI state, if any, and needs to + * be specified explicitly. + * + * head + * The local head to be locked with the peer head. + * + * peer.hDisplay + * The handle identifying a display object allocated on another + * GPU. It specifies the peer of interest with a subdevice + * index (see below) and needs to be be distinct from the handle + * supplied directly to NvRmControl(). + * + * peer.subdeviceIndex + * The index of the peer subdevice of interest. + * + * peer.head + * The peer head to be locked with the local head. + * + * masterScanLockPin + * slaveScanLockPin + * Returns the master and slave scanlock pins that would need to + * be used to lock the specified heads together, if any. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_OBJECT_PARENT + */ +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS (0x5070020a) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_MASTER_SCAN_LOCK_PIN 2:1 + +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_CONNECTED 0:0 +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_CONNECTED_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_CONNECTED_YES (0x00000001) +#define NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS_SLAVE_SCAN_LOCK_PIN 2:1 + +#define NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + + struct { + NvHandle hDisplay; + NvU32 subdeviceIndex; + NvU32 head; + } peer; + + NvU32 masterScanLock; + NvU32 slaveScanLock; +} NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_PINSET_LOCKPINS + * + * Get the lockpins for the specified pinset. + * + * pinset [in] + * The pinset whose corresponding lockpin numbers need to be determined + * must be specified with this parameter. + * + * scanLockPin [out] + * The scanlock lockpin (rasterlock or framelock) index, which can be + * either master or slave, is returned in this parameter. + * + * flipLockPin [out] + * The fliplock lockpin index, is returned in this parameter. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV5070_CTRL_CMD_GET_PINSET_LOCKPINS (0x5070020b) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_PINSET_LOCKPINS_SCAN_LOCK_PIN_NONE 0xffffffff + +#define NV5070_CTRL_GET_PINSET_LOCKPINS_FLIP_LOCK_PIN_NONE 0xffffffff + +#define NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 pinset; + NvU32 scanLockPin; + NvU32 flipLockPin; +} NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_RG_SCAN_LINE + * + * This 'get' command returns the current scan line value from raster generator + * + * head + * The head for which current scan line number is desired. + * + * scanLine + * Current scan line number. + * + * inVblank + * Whether or not in vblank. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE (0x5070020c) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE_IN_VBLANK_NO (0x00000000) +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE_IN_VBLANK_YES (0x00000001) + +#define NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 head; + NvU32 scanLine; // [OUT] + NvU32 inVblank; // [OUT] +} NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS + * + * This command returns FrameLock header lock pin information. + * Lock pin index returned by this command corresponds to the + * evo lock pin number. Example - lock pin index 0 means + * LOCKPIN_0. + * + * frameLockPin [out] + * This parameter returns the FrameLock pin index + * connected to FrameLock header. + * + * rasterLockPin [out] + * This parameter returns the RasterLock pin index + * connected to FrameLock header. + * + * flipLockPin [out] + * This parameter returns the FlipLock pin index + * connected to FrameLock header. + * + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS (0x5070020d) /* finn: Evaluated from "(FINN_NV50_DISPLAY_RG_INTERFACE_ID << 8) | NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_FRAME_LOCK_PIN_NONE (0xffffffff) +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_RASTER_LOCK_PIN_NONE (0xffffffff) +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_FLIP_LOCK_PIN_NONE (0xffffffff) +#define NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 frameLockPin; + NvU32 rasterLockPin; + NvU32 flipLockPin; +} NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS; + + + +/* _ctrl5070rg_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h new file mode 100644 index 000000000..5467bf702 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070seq.h @@ -0,0 +1,524 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070seq.finn +// + + + + +#include "ctrl/ctrl5070/ctrl5070base.h" + +/* + * NV5070_CTRL_CMD_GET_SOR_SEQ_CTL + * + * This command returns SOR sequencer's power up and down PCs and sequencer + * program to be used for power up and dowm. + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * puPcAlt + * Alternate power up PC. + * + * pdPc + * Power down PC. + * + * pdPcAlt + * Alternate power down PC. + * + * normalStart + * Whether normal mode is using normal or alt PC + * + * safeStart + * Whether safe mode is using normal or alt PC + * + * normalState + * Whether normal state is PD or PU. + * + * safeState + * Whether safe state is PD or PU. + * + * flags + * There is only one flag defined currently + * 1. GET_SEQ_PROG: Whether or not current seq program must be + * return back. Caller should set this to _YES to read the + * current seq program. + * + * seqProgram + * The sequencer program consisting of power up and down sequences. + * For NV50, this consists of 16 DWORDS. The program is + * relevant only when GET_SEQ_PROG flags is set to _YES. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL (0x50700301U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PD_PC_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_START_VAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U) + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_START_VAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U) + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U) + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U) + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_FLAGS_GET_SEQ_PROG 0:0 +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_NO (0x00000000U) +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_YES (0x00000001U) + +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SEQ_PROG_SIZE 16U +#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 puPcAlt; + NvU32 pdPc; + NvU32 pdPcAlt; + NvU32 normalStart; + NvU32 safeStart; + NvU32 normalState; + NvU32 safeState; + NvU32 flags; + NvU32 seqProgram[NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SEQ_PROG_SIZE]; +} NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_SOR_SEQ_CTL + * + * This command does the following in that order + * (a) Loads a specified sequencer program for power up and down. + * (b) Updates SOR sequencer's power up and down PCs, tells seq to SKIP + * current wait for vsync and waits until sequencer actually SKIPs or halts + * (see more below under SKIP_WAIT_FOR_VSYNC flag) and + * (c) Update power settings (safe/normal start and state). + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * puPcAlt + * Alternate power up PC. + * + * pdPc + * Power down PC. + * + * pdPcAlt + * Alternate power down PC. + * + * normalStart + * Whether normal mode should use normal or alt PC. + * + * safeStart + * Whether safe mode should use normal or alt PC. + * + * normalState + * Whether normal state should be PD or PU. + * + * safeState + * Whether safe state should be PD or PU. + * + * flags + * The following flags have been defined + * 1. SKIP_WAIT_FOR_VSYNC: Should seq be forced to skip waiting + * for vsync if it's currently waiting on such an instruction. + * If the current instruction doesn't have a wait for vsync, + * SKIP will be applied to the next one and so on until + * either sequencer halts or an instruction with a wait for + * vsync is found. The call will block until seq halts or + * SKIPs a wait for vsync. + * 2. SEQ_PROG_PRESENT: Whether or not a new seq program has + * been specified. + * + * seqProgram + * The sequencer program consisting of power up and down sequences. + * For NV50, this consists of 16 DWORDS. The program is + * relevant only when SEQ_PROG_PRESENT flags is set to _YES. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL (0x50700302U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_VAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_VAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC 0:0 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT 1:1 +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_YES (0x00000001U) + + +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SEQ_PROG_SIZE 16U +#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 puPcAlt; + NvU32 pdPc; + NvU32 pdPcAlt; + NvU32 normalStart; + NvU32 safeStart; + NvU32 normalState; + NvU32 safeState; + NvU32 flags; + NvU32 seqProgram[NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SEQ_PROG_SIZE]; +} NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS; + +/* + * NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL + * + * This command returns PIOR sequencer's power up and down PCs and sequencer + * program to be used for power up and dowm. + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * puPcAlt + * Alternate power up PC. + * + * pdPc + * Power down PC. + * + * pdPcAlt + * Alternate power down PC. + * + * normalStart + * Whether normal mode is using normal or alt PC + * + * safeStart + * Whether safe mode is using normal or alt PC + * + * normalState + * Whether normal state is PD or PU. + * + * safeState + * Whether safe state is PD or PU. + * + * flags + * There is only one flag defined currently + * 1. GET_SEQ_PROG: Whether or not current seq program must be + * return back. Caller should set this to _YES to read the + * current seq program. + * + * seqProgram + * The sequencer program consisting of power up and down sequences. + * For NV50, this consists of 16 DWORDS. The program is + * relevant only when GET_SEQ_PROG flags is set to _YES. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL (0x50700303U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PD_PC_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0 + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_START_VAL 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U) + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_START_VAL 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U) + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U) + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U) + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_FLAGS_GET_SEQ_PROG 0:0 +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_NO (0x00000000U) +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_YES (0x00000001U) + +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SEQ_PROG_SIZE 8U +#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 puPcAlt; + NvU32 pdPc; + NvU32 pdPcAlt; + NvU32 normalStart; + NvU32 safeStart; + NvU32 normalState; + NvU32 safeState; + NvU32 flags; + NvU32 seqProgram[NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SEQ_PROG_SIZE]; +} NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS; + +/* + * NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL + * + * This command does the following in that order + * (a) Loads a specified sequencer program for power up and down. + * (b) Updates PIOR sequencer's power up and down PCs, tells seq to SKIP + * current wait for vsync and waits until sequencer actually SKIPs or halts + * (see more below under SKIP_WAIT_FOR_VSYNC flag) and + * (c) Update power settings (safe/normal start and state). + * + * orNumber + * The OR number for which the seq ctrls are to be modified. + * + * puPcAlt + * Alternate power up PC. + * + * pdPc + * Power down PC. + * + * pdPcAlt + * Alternate power down PC. + * + * normalStart + * Whether normal mode should use normal or alt PC + * + * safeStart + * Whether safe mode should use normal or alt PC + * + * normalState + * Whether normal state should be PD or PU. + * + * safeState + * Whether safe state should be PD or PU. + * + * flags + * The following flags have been defined + * 1. SKIP_WAIT_FOR_VSYNC: Should seq be forced to skip waiting + * for vsync if it's currently waiting on such an instruction. + * If the current instruction doesn't have a wait for vsync, + * SKIP will be applied to the next one and so on until + * either sequencer halts or an instruction with a wait for + * vsync is found. The call will block until seq halts or + * SKIPs a wait for vsync. + * 2. SEQ_PROG_PRESENT: Whether or not a new seq program has + * been specified. + * + * seqProgram + * The sequencer program consisting of power up and down sequences. + * For NV50, this consists of 8 DWORDS. The program is + * relevant only when SEQ_PROG_PRESENT flags is set to _YES. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL (0x50700304U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_YES (0x00000001U) + + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_VAL 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_VAL 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_VAL 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_SPECIFIED 31:31 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_SPECIFIED_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_SPECIFIED_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC 0:0 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT 1:1 +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_NO (0x00000000U) +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_YES (0x00000001U) + +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SEQ_PROG_SIZE 8U +#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + NvU32 orNumber; + + NvU32 puPcAlt; + NvU32 pdPc; + NvU32 pdPcAlt; + NvU32 normalStart; + NvU32 safeStart; + NvU32 normalState; + NvU32 safeState; + NvU32 flags; + NvU32 seqProgram[NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SEQ_PROG_SIZE]; +} NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS; + +/* + * NV5070_CTRL_CMD_CTRL_SEQ_PROG_SPEED + * + * This call allows a fast sequencer program to be selected. It's intended for + * situations where panel sequencing is not required and the usual sequencing + * delays cost too much time. + * + * displayId + * The corresponding display ID. (Note that this call is currently only + * supported for LVDS on an internal encoder, i.e. a SOR.) + * cmd + * The command to perform. Valid values are: + * NV5070_CTRL_SEQ_PROG_SPEED_CMD_GET + * Get the current state. + * NV5070_CTRL_SEQ_PROG_SPEED_CMD_SET + * Set the current state. + * state + * The state of panel sequencing for this displayId. This is an input + * when cmd = SET and an output when cmd = GET. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_NOT_SUPPORTED + * + */ + +#define NV5070_CTRL_CMD_CTRL_SEQ_PROG_SPEED (0x50700305U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_SEQ_PROG_SPEED_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SEQ_PROG_SPEED_CMD_GET (0x00000000U) +#define NV5070_CTRL_SEQ_PROG_SPEED_CMD_SET (0x00000001U) + +#define NV5070_CTRL_SEQ_PROG_SPEED_STATE_NORMAL (0x00000000U) +#define NV5070_CTRL_SEQ_PROG_SPEED_STATE_FAST (0x00000001U) + +#define NV5070_CTRL_SEQ_PROG_SPEED_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV5070_CTRL_SEQ_PROG_SPEED_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + + NvU32 displayId; + + NvU32 cmd; + NvU32 state; +} NV5070_CTRL_SEQ_PROG_SPEED_PARAMS; + +/* _ctrl5070seq_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h new file mode 100644 index 000000000..7efc5fc47 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070system.h @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070system.finn +// + + + + +#include "ctrl/ctrl5070/ctrl5070base.h" +#include "ctrl/ctrl5070/ctrl5070common.h" // NV5070_CTRL_CMD_MAX_HEADS + +/* extract cap bit setting from tbl */ +#define NV5070_CTRL_SYSTEM_GET_CAP(tbl,c) (((NvU8)tbl[(1?c)]) & (0?c)) + +/* caps format is byte_index:bit_mask */ +#define NV5070_CTRL_SYSTEM_CAPS_BUG_237734_REQUIRES_DMI_WAR 0:0x01 // Deprecated +#define NV5070_CTRL_SYSTEM_CAPS_STEREO_DIN_AVAILABLE 0:0x02 +#define NV5070_CTRL_SYSTEM_CAPS_BUG_381003_MULTIWAY_AFR_WAR 0:0x04 +#define NV5070_CTRL_SYSTEM_CAPS_BUG_538079_COLOR_COMPRESSION_SUPPORTED 0:0x08 // Deprecated +#define NV5070_CTRL_SYSTEM_CAPS_BUG_2052012_GLITCHY_MCLK_SWITCH 0:0x10 +#define NV5070_CTRL_SYSTEM_CAPS_DEEP_COLOR_SUPPORT 0:0x20 +#define NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY 0:0x40 + + +/* size in bytes of display caps table */ +#define NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE 1 + +/* + * NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2 + * + * This command returns the set of display capabilities for the parent device + * in the form of an array of unsigned bytes. Display capabilities + * include supported features and required workarounds for the display + * engine(s) within the device, each represented by a byte offset into the + * table and a bit position within that byte. The set of display capabilities + * will be normalized across all GPUs within the device (a feature capability + * will be set only if it's supported on all GPUs while a required workaround + * capability will be set if any of the GPUs require it). + * + * [out] capsTbl + * This caps table array is where the display cap bits will be transferred + * by the RM. The caps table is an array of unsigned bytes. + * + * Possible status values returned are: + * NV_OK + */ +#define NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2 (0x50700709) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SYSTEM_INTERFACE_ID << 8) | NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID" */ + +#define NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS { + NV5070_CTRL_CMD_BASE_PARAMS base; + + NvU8 capsTbl[NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE]; +} NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS; + +/* _ctrl5070system_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h new file mode 100644 index 000000000..6741891c4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5070/ctrl5070verif.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5070/ctrl5070verif.finn +// + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl5080.h b/src/common/sdk/nvidia/inc/ctrl/ctrl5080.h new file mode 100644 index 000000000..88510e95f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl5080.h @@ -0,0 +1,215 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl5080.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl0080/ctrl0080dma.h" /* NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS */ +#include "ctrl/ctrl2080/ctrl2080dma.h" /* NV2080_CTRL_DMA_* */ +#include "ctrl/ctrl2080/ctrl2080fb.h" /* NV2080_CTRL_FB_* */ +#include "ctrl/ctrl2080/ctrl2080fifo.h" /* NV2080_CTRL_FIFO_* */ +#include "ctrl/ctrl2080/ctrl2080gpu.h" /* NV2080_CTRL_GPU_* */ +#include "ctrl/ctrl2080/ctrl2080gr.h" /* NV2080_CTRL_GR_* */ +/* NV5080_DEFERRED_API - deferred RmControl commands */ + + +#define NV5080_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x5080, NV5080_CTRL_##cat,idx) + +/* Command categories (6bits) */ +#define NV5080_CTRL_RESERVED (0x00) +#define NV5080_CTRL_DEFERRED (0x01) + +/* + * NV5080_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV5080_CTRL_CMD_NULL (0x50800000) /* finn: Evaluated from "(FINN_NV50_DEFERRED_API_CLASS_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + + +/* + * NV5080_CTRL_CMD_DEFERRED_API (deprecated; use NV5080_CTRL_CMD_DEFERRED_API_V2 instead) + * + * This command sets up a deferred api call. + * hApiHandle + * Client Unique Handle which is used as the data of a sw method to invoke + * the api in the future + * cmd + * The rmcontrol cmd to invoke as a deferred api. + * flags_delete + * Indicates if an explicit delete is needed (default behavior is to + * auto delete after SW method has executed/completed). + * flags_wait_for_tlb_flush + * Indicates if the API should wait for an InvalidateTlb to also occur + * (not just that it's executed) before being considered completed and + * works in conjunction with flags_delete. + * hClientVA, hDeviceVA + * Client/Device handles of the owner of the virtual address space to + * to be updated (used with the FillPteMem API bundle) + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV5080_CTRL_CMD_DEFERRED_API (0x50800101) /* finn: Evaluated from "(FINN_NV50_DEFERRED_API_CLASS_DEFERRED_INTERFACE_ID << 8) | NV5080_CTRL_DEFERRED_API_PARAMS_MESSAGE_ID" */ + +#define NV5080_CTRL_DEFERRED_API_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV5080_CTRL_DEFERRED_API_PARAMS { + NvHandle hApiHandle; + NvU32 cmd; + NvU32 flags; + NvHandle hClientVA; + NvHandle hDeviceVA; + + union { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS InitCtx, 8); + + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS PromoteCtx, 8); + + NV2080_CTRL_GPU_EVICT_CTX_PARAMS EvictCtx; + + NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS InvalidateTlb; + + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS FillPteMem, 8); + + NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS CacheAllocPolicy; + + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS ZcullCtxsw, 8); + + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS PmCtxsw, 8); + + NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_PARAMS CachePromotePolicy; + + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS PreemptionCtxsw, 8); + } api_bundle; +} NV5080_CTRL_DEFERRED_API_PARAMS; + +#define NV5080_CTRL_CMD_DEFERRED_API_FLAGS_DELETE 0:0 +#define NV5080_CTRL_CMD_DEFERRED_API_FLAGS_DELETE_EXPLICIT (0x00000001) +#define NV5080_CTRL_CMD_DEFERRED_API_FLAGS_DELETE_IMPLICIT (0x00000000) + +#define NV5080_CTRL_CMD_DEFERRED_API_FLAGS_WAIT_FOR_TLB_FLUSH 1:1 +#define NV5080_CTRL_CMD_DEFERRED_API_FLAGS_WAIT_FOR_TLB_FLUSH_FALSE (0x00000000) +#define NV5080_CTRL_CMD_DEFERRED_API_FLAGS_WAIT_FOR_TLB_FLUSH_TRUE (0x00000001) +/* + * NV5080_CTRL_CMD_DEFERRED_API_V2 + * + * This command sets up a deferred api call. + * hApiHandle + * Client Unique Handle which is used as the data of a sw method to invoke + * the api in the future + * cmd + * The rmcontrol cmd to invoke as a deferred api. + * flags_delete + * Indicates if an explicit delete is needed (default behavior is to + * auto delete after SW method has executed/completed). + * flags_wait_for_tlb_flush + * Indicates if the API should wait for an InvalidateTlb to also occur + * (not just that it's executed) before being considered completed and + * works in conjunction with flags_delete. + * hClientVA, hDeviceVA + * Client/Device handles of the owner of the virtual address space to + * to be updated (used with the FillPteMem API bundle) + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV5080_CTRL_CMD_DEFERRED_API_V2 (0x50800103) /* finn: Evaluated from "(FINN_NV50_DEFERRED_API_CLASS_DEFERRED_INTERFACE_ID << 8) | NV5080_CTRL_DEFERRED_API_V2_PARAMS_MESSAGE_ID" */ + +#define NV5080_CTRL_DEFERRED_API_V2_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV5080_CTRL_DEFERRED_API_V2_PARAMS { + NvHandle hApiHandle; + NvU32 cmd; + NvU32 flags; + NvHandle hClientVA; + NvHandle hDeviceVA; + + union { + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS InitCtx, 8); + + NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS PromoteCtx, 8); + + NV2080_CTRL_GPU_EVICT_CTX_PARAMS EvictCtx; + + NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS InvalidateTlb; + + NV_DECLARE_ALIGNED(NV0080_CTRL_DMA_FILL_PTE_MEM_PARAMS FillPteMem, 8); + + NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS CacheAllocPolicy; + + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS ZcullCtxsw, 8); + + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS PmCtxsw, 8); + + NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_PARAMS CachePromotePolicy; + + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS DisableChannels, 8); + + NV_DECLARE_ALIGNED(NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS PreemptionCtxsw, 8); + + NV_DECLARE_ALIGNED(NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS ChannelInfoUpdate, 8); + } api_bundle; +} NV5080_CTRL_DEFERRED_API_V2_PARAMS; + +/* + * NV5080_CTRL_CMD_REMOVE_API + * + * This command removes an explicit deferred api call. + * hApiHandle + * Client Unique Handle which is used as the data of a sw method to invoke + * the api in the future + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + */ + +#define NV5080_CTRL_CMD_REMOVE_API (0x50800102) /* finn: Evaluated from "(FINN_NV50_DEFERRED_API_CLASS_DEFERRED_INTERFACE_ID << 8) | NV5080_CTRL_REMOVE_API_PARAMS_MESSAGE_ID" */ + +#define NV5080_CTRL_REMOVE_API_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV5080_CTRL_REMOVE_API_PARAMS { + NvHandle hApiHandle; +} NV5080_CTRL_REMOVE_API_PARAMS; + +/* _ctrl5080_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl83de.h b/src/common/sdk/nvidia/inc/ctrl/ctrl83de.h new file mode 100644 index 000000000..12d792a2a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl83de.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl83de.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl83de/ctrl83dedebug.h" + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h b/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h new file mode 100644 index 000000000..f54d5a6f2 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83debase.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl83de/ctrl83debase.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* GT200_DEBUG control commands and parameters */ + +#define NV83DE_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x83DEU, NV83DE_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV83DE_CTRL_RESERVED (0x00) +#define NV83DE_CTRL_GR (0x01) +#define NV83DE_CTRL_FIFO (0x02) +#define NV83DE_CTRL_DEBUG (0x03) +#define NV83DE_CTRL_INTERNAL (0x04) + + +/* + * NV83DE_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV83DE_CTRL_CMD_NULL (0x83de0000) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl83debase_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h b/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h new file mode 100644 index 000000000..cf07d7235 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl83de/ctrl83dedebug.h @@ -0,0 +1,1075 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl83de/ctrl83dedebug.finn +// + + + + +#include "ctrl/ctrl83de/ctrl83debase.h" +#include "nvstatus.h" + +#include "ctrl/ctrl2080/ctrl2080gpu.h" + + + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG + * + * This command sets the MMU DEBUG mode. This is Fermi-onwards feature. + * If the query is made on an incorrect platform (for example, pre-Fermi) + * the call will return with an NV_ERR_NOT_SUPPORTED error. + * + * action + * The possible action values are: + * - NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG_ENABLE + * This enables the MMU debug mode if possible. If however, any another + * client has already disabled the mode (via NV83DE call) then this + * operation returns NV_ERR_STATE_IN_USE. + * + * - NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG_DISABLE + * This disables the MMU debug mode if possible. If however, any another + * client has already enabled the mode (via NV83DE call) then this + * operation returns NV_ERR_STATE_IN_USE. + * + * - NV83DE_CTRL_CMD_DEBUG_RELEASE_MMU_DEBUG_REQUESTS + * This operation releases all the client's outstanding requests to enable + * or disable the MMU debug mode. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG (0x83de0307) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS { + NvU32 action; +} NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS; + +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG_ENABLE (0x00000001) +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_MMU_DEBUG_DISABLE (0x00000002) +#define NV83DE_CTRL_CMD_DEBUG_RELEASE_MMU_DEBUG_REQUESTS (0x00000003) + +/* + * NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_DEBUG + * + * This command gets the value of currently configured MMU DEBUG mode. + * This is Fermi-onwards feature. If the query is made on an incorrect + * platform (for example, pre-Fermi) the call will return with an + * NV_ERR_NOT_SUPPORTED error. + * + * value + * This parameter returns the configured value. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_DEBUG (0x83de0308) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS { + NvU32 value; +} NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS; + +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_DEBUG_ENABLED (0x00000001) +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_MMU_DEBUG_DISABLED (0x00000002) + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_EXCEPTION_MASK + * + * This command allows the caller to filter events (which are also referred to + * as "notifications", not to be confused with true notifiers), in the RM, + * fairly close to the source of the events. In other words, depending on the + * value of the exceptionMask, some events may not be raised. + * + * The original reason for creating this command is that the CUDA driver needs + * to place the RM and the GPU(s) into SM debug mode, for some GPUs, in order to + * activate various features and HW bug WARs. Being in SM debug mode has the + * side effect of exposing the caller to debug events, which are generally + * undesirable for the CUDA driver, but desirable for the CUDA debugger. This + * command allows each client to receive only the events that it is + * specifically interested in. + * + * If this command is never invoked, then the RM will behave as if + * exceptionMask==NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_ALL. + * + * As with many of the debugger features, this is Fermi-onwards feature. If this + * API call is issued on an earlier platform, it will return an + * NV_ERR_NOT_SUPPORTED error. + * + * exceptionMask + * This identifies the category of notifications that the debug client + * is interested in. + * + * Here are the allowed values for exceptionMask: + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_FATAL + * This means that the caller wishes to receive events for any exceptions + * that are classified as fatal. For example, + * HWW_WARP_ESR_ERROR_PC_OVERFLOW is one such exception. + * + * If any debug object, in any channel, has registered to receive events + * for _FATAL exceptions, then RC recovery will be deferred if such an + * exception occurs. + * + * Also, if a client is registered for fatal exceptions, RC error recovery + * will be deferred. If not registered for fatal exceptions, then fatal + * errors will (as usual) cause RC recovery to run immediately. + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_TRAP + * This means that an event will be raised when an SM executes a bpt.pause + * instruction. Note that on Fermi, the SM raises HWW when bpt.trap is + * executed as well, so this event will also be raised in that situation. + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_SINGLE_STEP + * This means that an event will be raised a single step completion + * interrupt is received. + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_INT + * This means that an event will be raised when an SM executes a bpt.int + * instruction. + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_NONE + * This means that no debug events will be raised. + * + * - NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_ALL + * This means that an event will be raised for any and all debug + * exceptions. This is the default behavior. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_EXCEPTION_MASK (0x83de0309) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS { + NvU32 exceptionMask; +} NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS; + +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_FATAL (0x00000001) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_TRAP (0x00000002) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_SINGLE_STEP (0x00000004) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_INT (0x00000008) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_CILP (0x00000010) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PREEMPTION_STARTED (0x00000020) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_NONE (0x00000000) +#define NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_ALL (0x0000FFFF) + + + +/* + * NV83DE_CTRL_CMD_READ_SINGLE_SM_ERROR_STATE + * + * This command reads the SM error state of a single SM. The error state + * consists of several 32-bit values. + * + * Note that this acts upon the currently resident GR (graphics) context. It is + * up to the RM client to ensure that the desired GR context is resident, before + * making this API call. + * + * See also: NV83DE_CTRL_CMD_READ_ALL_SM_ERROR_STATES. + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * Parameters: + * + * hTargetChannel (input) + * This identifies the channel. + * + * smID (input) + * This identifies the SM. Allowed values are any valid SM ID. The RM + * grProgramSmIdNumbering_HAL() routines are a good place to look, in order + * to see how SM IDs are set up. The main idea is that the RM chooses a + * numbering scheme, and then informs the GPU hardware of that scheme, by + * actually recording each SM ID into the GPU, via a series of PRI (GPU + * register) writes. + * + * smErrorState.hwwGlobalEsr (output) + * Value of the Global Error Status Register. + * + * smErrorState.hwwWarpEsr (output) + * Value of the Warp Error Status Register. + * + * smErrorState.hwwWarpEsrPc (output) : DEPRECATED for 64b PC below, will hold low 32b for now + * Value of the Warp Error Status Register Program Counter. + * + * smErrorState.hwwGlobalEsrReportMask (output) + * Value of the Global Error Status Register Report Mask. + * + * smErrorState.hwwWarpEsrReportMask (output) + * Value of the Error Status Register Report Mask. + * + * smErrorState.hwwWarpEsrPc64 (output) + * Value of the 64b Warp Error Status Register Program Counter. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_READ_SINGLE_SM_ERROR_STATE (0x83de030b) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_MESSAGE_ID" */ + +typedef struct NV83DE_SM_ERROR_STATE_REGISTERS { + NvU32 hwwGlobalEsr; + NvU32 hwwWarpEsr; + NvU32 hwwWarpEsrPc; + NvU32 hwwGlobalEsrReportMask; + NvU32 hwwWarpEsrReportMask; + NV_DECLARE_ALIGNED(NvU64 hwwEsrAddr, 8); + NV_DECLARE_ALIGNED(NvU64 hwwWarpEsrPc64, 8); + NvU32 hwwCgaEsr; + NvU32 hwwCgaEsrReportMask; +} NV83DE_SM_ERROR_STATE_REGISTERS; + +#define NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS { + NvHandle hTargetChannel; + NvU32 smID; + NV_DECLARE_ALIGNED(NV83DE_SM_ERROR_STATE_REGISTERS smErrorState, 8); +} NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS; + +/* + * NV83DE_CTRL_CMD_READ_ALL_SM_ERROR_STATES + * + * This command reads the SM error state of all SMs. + * + * Note that this acts upon the currently resident GR (graphics) context. It is + * up to the RM client to ensure that the desired GR context is resident, before + * making this API call. + * + * Parameters: + * + * hTargetChannel (input) + * This identifies the channel. + * + * numSMsToRead (input) + * This should be set to the number of SMs that the RM is supposed to read. + * It will typically be the total number of SMs in the GPU. For best + * results, you should not pass in a value that is greater than the number + * of SMs that the GPU actually contains. + * + * startingSM (input) + * This should be set to the starting index of the first SM to read. + * Clients may use this to read data from SMs beyond the maximum specified + * in NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL. + * + * smErrorStateArray (output) + * This is an array of NV83DE_SM_ERROR_STATE_REGISTERS structs. Please see + * the description of NV83DE_CTRL_CMD_READ_SINGLE_SM_ERROR_STATE, above, for + * a description of the individual fields. + * + * mmuFault.valid (output) + * This is NV_TRUE if an MMU fault occurred on the target channel since the last call to + * NV83DE_CTRL_CMD_CLEAR_ALL_SM_ERROR_STATES to this channel. + * + * mmuFault.faultInfo (output) + * This is the value of the first NV_PFIFO_INTR_MMU_FAULT_INFO that caused the MMU fault. + * + * mmuFaultInfo (output) + * Deprecated field, see mmuFault.faultInfo + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_READ_ALL_SM_ERROR_STATES (0x83de030c) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL 100 + +typedef struct NV83DE_MMU_FAULT_INFO { + NvBool valid; + NvU32 faultInfo; +} NV83DE_MMU_FAULT_INFO; + +#define NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS { + NvHandle hTargetChannel; + NvU32 numSMsToRead; + NV_DECLARE_ALIGNED(NV83DE_SM_ERROR_STATE_REGISTERS smErrorStateArray[NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL], 8); + NvU32 mmuFaultInfo; // Deprecated, use mmuFault field instead + NV83DE_MMU_FAULT_INFO mmuFault; + NvU32 startingSM; +} NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS; + +/* + * NV83DE_CTRL_CMD_CLEAR_SINGLE_SM_ERROR_STATE + * + * This command clears the SM error state of a single SM. The error state + * consists of several 32-bit values. + * + * Note that this acts upon the currently resident GR (graphics) context. It is + * up to the RM client to ensure that the desired GR context is resident, before + * making this API call. + * + * See also: NV83DE_CTRL_CMD_CLEAR_ALL_SM_ERROR_STATES. + * + * This API call has a slightly different effect than what would occur as a + * result of issuing a read-modify-write via _READ_SINGLE_SM_ERROR_STATE and + * _WRITE_SINGLE_SM_ERROR_STATE. The difference arises due to the fact that RM + * is caching the error state, to compensate for the fact that the real GPU + * error state must be cleared very early on in the exception handling routine. + * + * In other words, the _READ data is stale by design, and cannot be used in a + * read-modify-write routine from user space. Therefore, in order to clear the + * SM error state, a separate RM API call is required. + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * Parameters: + * + * hTargetChannel (input) + * This identifies the channel. + * + * smID (input) + * This identifies the SM. Allowed values are any valid SM ID. Please see + * NV83DE_CTRL_CMD_READ_SINGLE_SM_ERROR_STATE for further details. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE (0x83de030f) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS { + NvHandle hTargetChannel; + NvU32 smID; +} NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS; + +/* + * NV83DE_CTRL_CMD_CLEAR_ALL_SM_ERROR_STATES + * + * This command clears the SM error state of all SMs. + * + * Note that this acts upon the currently resident GR (graphics) context. It is + * up to the RM client to ensure that the desired GR context is resident, before + * making this API call. + * + * Parameters: + * + * hTargetChannel (input) + * This identifies the channel. + * + * numSMsToClear (input) + * This should be set to the number of SMs that the RM is supposed to write. + * It will typically be the total number of SMs in the GPU. For best + * results, you should not pass in a value that is greater than the number + * of SMs that the GPU actually contains. + * + * Please see the description of + * NV83DE_CTRL_CMD_CLEAR_SINGLE_SM_ERROR_STATE, above, for a description of + * why these two _CLEAR API calls are required. + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_CLEAR_ALL_SM_ERROR_STATES (0x83de0310) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS { + NvHandle hTargetChannel; + NvU32 numSMsToClear; +} NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS; + + + +#define NV83DE_CTRL_CMD_DEBUG_SUSPEND_ALL_CONTEXTS_FOR_CLIENT_PARAMS_DEFINED 1 +#define NV83DE_CTRL_CMD_DEBUG_SUSPEND_ALL_CONTEXTS_FOR_CLIENT_HAS_RESIDENT_CHANNEL 1 +typedef struct NV83DE_CTRL_CMD_DEBUG_SUSPEND_ALL_CONTEXTS_FOR_CLIENT_PARAMS { + NvU32 waitForEvent; + NvHandle hResidentChannel; +} NV83DE_CTRL_CMD_DEBUG_SUSPEND_ALL_CONTEXTS_FOR_CLIENT_PARAMS; + + + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE + * + * This command set the type of action we want on RM encountering an error + * and issuing a STOP_TRIGGER. The action will be to either braodcast the + * STOP_TRIGGER to all SM's, or just send to the SM hitting an exception. + * + * stopTriggerType + * This identifies trigger type to initiate. + * + * Here are the allowed values for stopTriggerType: + * + * - NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_SINGLE_SM + * This means that we will issue STOP_TRIGGER to the single SM + * noted in the exception + * + * - NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_BROADCAST + * This means that we will issue STOP_TRIGGER to all SM's + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE (0x83de0313) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS_MESSAGE_ID (0x13U) + +typedef struct NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS { + NvU32 stopTriggerType; +} NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS; + +#define NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_SINGLE_SM (0x00000001) +#define NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_BROADCSAT (0x00000002) + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING + * + * This command sets the type of action we want on RM encountering a + * SINGLE_STEP exception while in CILP debug mode. In the normal case, + * non-pausing, we ignore these exceptions as on prior chips. When the + * user selects pausing, it will cause the exception to be treated just + * like we had seen an SM error or BPT_PAUSE. + * + * singleStepHandling + * This identifies single step handling type to use. + * + * Here are the allowed values for singleStepHandling: + * + * - NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_NONPAUSING + * Treat SINGLE_STEP exceptions while in debug mode as non-pausing, + * which is the default/normal mode in the interrupt pre-process + * function, where they are ignored. + * + * - NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PAUSING + * Treat SINGLE_STEP exceptions while in debug mode as pausing, + * which means in the interrupt pre-process function they will + * be treated like BPT_PAUSE and SM error exceptions + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING (0x83de0314) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS_MESSAGE_ID (0x14U) + +typedef struct NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS { + NvU32 singleStepHandling; +} NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS; + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_NONPAUSING (0x00000001) +#define NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PAUSING (0x00000002) + +/* + * NV83DE_CTRL_CMD_DEBUG_READ_MEMORY + * + * This command reads a block of memory. + * This command is deprecated in favor of NV83DE_CTRL_CMD_DEBUG_READ_BATCH_MEMORY + * + * hMemory [IN] + * The handle to the memory being accessed. If hMemory is not accessible + * from the caller's address space, NV_ERR_INSUFFICIENT_PERMISSIONS + * is returned. + * + * length [IN/OUT] + * Number of bytes to read, as well as the number of bytes actually read + * returned. + * + * offset [IN] + * The offset into the physical memory region given by the handle above. + * + * buffer [OUT] + * The data read is returned in this buffer. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ACCESS_TYPE + * NV_ERR_INSUFFICIENT_PERMISSIONS + * + */ +#define NV83DE_CTRL_CMD_DEBUG_READ_MEMORY (0x83de0315) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS_MESSAGE_ID (0x15U) + +typedef struct NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS { + NvU32 hMemory; + NvU32 length; + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvP64 buffer, 8); +} NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_WRITE_MEMORY + * + * This command writes a block of memory. + * This command is deprecated in favor of NV83DE_CTRL_CMD_DEBUG_WRITE_BATCH_MEMORY + * + * hMemory [IN] + * The handle to the memory being accessed. If hMemory is not accessible + * from the caller's address space, NV_ERR_INSUFFICIENT_PERMISSIONS + * is returned. + * + * length [IN/OUT] + * Number of bytes to write, as well as the number of bytes actually + * written. + * + * offset [IN] + * The offset into the physical memory region given by the handle above. + * + * buffer [IN] + * The data to be written is sent in this buffer. + * + * Possible status values returned are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ACCESS_TYPE + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NV83DE_CTRL_CMD_DEBUG_WRITE_MEMORY (0x83de0316) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS_MESSAGE_ID (0x16U) + +typedef struct NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS { + NvU32 hMemory; + NvU32 length; + NV_DECLARE_ALIGNED(NvU64 offset, 8); + NV_DECLARE_ALIGNED(NvP64 buffer, 8); +} NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT + * + * This command suspends a SM context associated with the debugger object. + * + * When the suspend call returns, context associated with the debugger object + * should not be actively executing any code on any SM. The channel will have + * been disabled if not resident on GR, or have its SM suspended if it was resident. + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * waitForEvent + * This return param indicates if the call had to issue a Preempt, + * therefore it is in process and user may need to wait for it. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_STATE + */ +#define NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT (0x83de0317) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS_MESSAGE_ID (0x17U) + +typedef NV83DE_CTRL_CMD_DEBUG_SUSPEND_ALL_CONTEXTS_FOR_CLIENT_PARAMS NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_RESUME_CONTEXT + * + * This command safely resumes the SM context associated with the debugger object. + * + * This is a Fermi-and-later feature. If this API call is issued on an earlier + * platform, it will return an NV_ERR_NOT_SUPPORTED error. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_CLIENT + * NV_ERR_OBJECT_NOT_FOUND + * NV_ERR_INVALID_STATE + */ +#define NV83DE_CTRL_CMD_DEBUG_RESUME_CONTEXT (0x83de0318) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x18" */ + +/* + * NV83DE_CTRL_CMD_DEBUG_GET_HANDLES + * + * This command returns relevant handles for the debug object + * This command is only available on debug and develop builds. + * + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_CLIENT + * NV_ERR_INVALID_OBJECT_HANDLE + * + */ +#define NV83DE_CTRL_CMD_DEBUG_GET_HANDLES (0x83de0319) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x19" */ + +typedef struct NV83DE_CTRL_DEBUG_GET_HANDLES_PARAMS { + NvHandle hChannel; + NvHandle hSubdevice; +} NV83DE_CTRL_DEBUG_GET_HANDLES_PARAMS; + +/* + * NV83DE_CTRL_CMD_READ_SURFACE + * + * This command allows the caller to copy the data from a specified gpuVA + * to a usermode buffer. Before copying, this command validates whether or + * not the virtual address (VA) range provided as input has valid and allocated + * pages mapped to it in its entirety. + * + * This command's input is NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS which + * contains a buffer of NV83DE_CTRL_DEBUG_ACCESS_OPs + * + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_XLATE + */ +#define NV83DE_CTRL_CMD_READ_SURFACE (0x83de031a) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x1A" */ + +/* + * NV83DE_CTRL_CMD_WRITE_SURFACE + * + * This command allows the caller to copy the data from a provided usermode + * buffer to a specified GPU VA. Before copying, this command validates whether or + * not the virtual address (VA) range provided as input has valid and allocated + * pages mapped to it in its entirety. + * + * This command's input is NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS which + * contains a buffer of NV83DE_CTRL_DEBUG_ACCESS_OPs + * + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_XLATE + */ +#define NV83DE_CTRL_CMD_WRITE_SURFACE (0x83de031b) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x1B" */ + +#define MAX_ACCESS_OPS 64 + +typedef struct NV83DE_CTRL_DEBUG_ACCESS_OP { + NV_DECLARE_ALIGNED(NvU64 gpuVA, 8); // IN + NV_DECLARE_ALIGNED(NvP64 pCpuVA, 8); // IN/OUT Debugger CPU Pointer of buffer + NvU32 size; // IN Size in bytes + NvU32 valid; // OUT Whether the GpuVA is accessible +} NV83DE_CTRL_DEBUG_ACCESS_OP; + +typedef struct NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS { + NvU32 count; // IN Number of ops in this call + NV_DECLARE_ALIGNED(NV83DE_CTRL_DEBUG_ACCESS_OP opsBuffer[MAX_ACCESS_OPS], 8); +} NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS; + +/* + * NV83DE_CTRL_CMD_GET_MAPPINGS + * + * This command traverses through the virtual memory page hierarchy and + * fetches the valid virtual mappings and their sizes for a provided virtual + * address (VA) range. + * If a given VA range has more than MAX_GET_MAPPINGS_OPS valid mappings, + * hasMore is set to 1, and opsBuffer is still filled with MAX_GET_MAPPINGS_OPS + * valid mappings. In this case, this command should be called again with + * vaLo = opsBuffer[MAX_GET_MAPPINGS_OPS - 1].gpuVA + + * opsBuffer[MAX_GET_MAPPINGS_OPS - 1].size; + * and vaHi set to the next desired upper limit. + * + * This command's input is NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS which + * contains a buffer of NV83DE_CTRL_DEBUG_GET_MAPPINGS_OP + * + * Possible return values: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_XLATE + */ +#define NV83DE_CTRL_CMD_GET_MAPPINGS (0x83de031c) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x1C" */ + +#define MAX_GET_MAPPINGS_OPS 64 + +typedef struct NV83DE_CTRL_DEBUG_GET_MAPPINGS_OP { + NV_DECLARE_ALIGNED(NvU64 gpuVA, 8); // OUT Start of GPU VA for this mapping + NvU32 size; // OUT Size in bytes of this mapping +} NV83DE_CTRL_DEBUG_GET_MAPPINGS_OP; + +typedef struct NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS { + NV_DECLARE_ALIGNED(NvU64 vaLo, 8); // IN Lower VA range, inclusive + NV_DECLARE_ALIGNED(NvU64 vaHi, 8); // IN Upper VA range, inclusive + NvU32 count; // OUT Number of ops in this call + NvU32 hasMore; // OUT Whether there are more valid mappings in this range than MAX_GET_MAPPINGS_OPS + NV_DECLARE_ALIGNED(NV83DE_CTRL_DEBUG_GET_MAPPINGS_OP opsBuffer[MAX_GET_MAPPINGS_OPS], 8); +} NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS; + +/* + * NV83DE_CTRL_CMD_DEBUG_EXEC_REG_OPS + * + * This command is used to submit a buffer containing one or more + * NV2080_CTRL_GPU_REG_OP structures for processing. Each entry in the + * buffer specifies a single read or write operation. Each entry is checked + * for validity in an initial pass over the buffer with the results for + * each operation stored in the corresponding regStatus field. Unless + * bNonTransactional flag is set to true, if any invalid entries are found + * during this initial pass then none of the operations are executed. Entries + * are processed in order within each regType with NV2080_CTRL_GPU_REG_OP_TYPE_GLOBAL + * entries processed first followed by NV2080_CTRL_GPU_REG_OP_TYPE_GR_CTX entries. + * + * [IN] bNonTransactional + * This field specifies if command is non-transactional i.e. if set to + * true, all the valid operations will be executed. + * + * [IN] regOpCount + * This field specifies the number of valid entries in the regops list. + * + * [IN/OUT] regOps + * This field is to be filled with the desired register information that is + * to be retrieved. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAM_STRUCT + */ +#define NV83DE_CTRL_CMD_DEBUG_EXEC_REG_OPS (0x83de031d) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_GPU_EXEC_REG_OPS_MAX_OPS 100 +#define NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS_MESSAGE_ID (0x1DU) + +typedef struct NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS { + NvBool bNonTransactional; + NvU32 regOpCount; + // C form: NV2080_CTRL_GPU_REG_OP regOps[NV2080_CTRL_GPU_EXEC_REG_OPS_MAX_OPS] + NV2080_CTRL_GPU_REG_OP regOps[NV83DE_CTRL_GPU_EXEC_REG_OPS_MAX_OPS]; +} NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR + * + * This command sets the Errbar Debug mode. This is Volta-onwards feature. + * If the query is made on an incorrect platform (for example, pre-Volta) + * the call will return with an NV_ERR_NOT_SUPPORTED error. + * + * action + * The possible action values are: + * - NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR_ENABLE + * This enables the Errbar debug mode. + * + * - NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR_DISABLE + * This disables the Errbar debug mode. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_WRITE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR_DEBUG (0x83de031f) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS_MESSAGE_ID (0x1FU) + +typedef struct NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS { + NvU32 action; +} NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS; + +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR_DEBUG_DISABLE (0x00000000) +#define NV83DE_CTRL_CMD_DEBUG_SET_MODE_ERRBAR_DEBUG_ENABLE (0x00000001) + +/* + * NV83DE_CTRL_CMD_DEBUG_GET_MODE_ERRBAR + * + * This command gets the value of currently configured Errbar DEBUG mode. + * This is Volta-onwards feature. If the query is made on an incorrect + * platform (for example, pre-Volta) the call will return with an + * NV_ERR_NOT_SUPPORTED error. + * + * value + * This parameter returns the configured value. + * + * Possible return values: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_READ + * NV_ERR_INVALID_ARGUMENT + */ +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_ERRBAR_DEBUG (0x83de0320) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS_MESSAGE_ID (0x20U) + +typedef struct NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS { + NvU32 value; +} NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS; + +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_ERRBAR_DEBUG_DISABLED (0x00000000) +#define NV83DE_CTRL_CMD_DEBUG_GET_MODE_ERRBAR_DEBUG_ENABLED (0x00000001) + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_SINGLE_STEP + * + * This command either enables or disables single step mode for the given SM. + * + * smID (input) + * This identifies the SM. + * bSingleStep (input) + * This indicates the single step mode. NV_TRUE for ENABLED. + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_SINGLE_STEP (0x83de0321) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS_MESSAGE_ID (0x21U) + +typedef struct NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS { + NvU32 smID; + NvBool bSingleStep; +} NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_STOP_TRIGGER + * + * This command sets or clears the stop trigger for the given SM. + * + * smID (input) + * This identifies the SM. + * bStopTrigger (input) + * This indicates whether to set or clear the trigger. NV_TRUE for ENABLED. + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_STOP_TRIGGER (0x83de0322) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_BROADCAST ((NvU32)~0) + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS_MESSAGE_ID (0x22U) + +typedef struct NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS { + NvU32 smID; + NvBool bStopTrigger; +} NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_RUN_TRIGGER + * + * This command sets or clears the run trigger for the given SM. + * + * smID (input) + * This identifies the SM. + * bRunTrigger (input) + * This indicates whether to set or clear the trigger. NV_TRUE for ENABLED. + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_RUN_TRIGGER (0x83de0323) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS_MESSAGE_ID (0x23U) + +typedef struct NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS { + NvU32 smID; + NvBool bRunTrigger; +} NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT + * + * This command enables or disables skip idle warp detect for the given sm. + * + * smID (input) + * This identifies the SM. + * bSkipIdleWarpDetect (input) + * This indicates whether to enable or disable the mode. NV_TRUE for ENABLED. + */ +#define NV83DE_CTRL_CMD_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT (0x83de0324) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS_MESSAGE_ID" */ + +#define NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS_MESSAGE_ID (0x24U) + +typedef struct NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS { + NvU32 smID; + NvBool bSkipIdleWarpDetect; +} NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS; + +/* + * NV83DE_CTRL_CMD_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS + * + * This command retrieves the debugger status states of the given SM. + * + * smID (input) + * This identifies the SM. + * bInTrapMode (output) + * This indicates whether the SM is in trap mode. + * bCrsFlushDone (output) + * Deprecated GK110+. Always 0 Volta+. + * bRunTriggerInProgress (output) + * Deprecated GM10X+. Always 0 Volta+. + * bComputeContext (output) + * Deprecated GM10X+. Always 0 Volta+. + * bLockedDown (output) + * This indicates whether the SM is locked down. + */ +#define NV83DE_CTRL_CMD_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS (0x83de0325) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS_MESSAGE_ID" */ + +typedef struct NV83DE_CTRL_DEBUG_SINGLE_SM_DEBUGGER_STATUS { + NvBool bInTrapMode; + NvBool bCrsFlushDone; + NvBool bRunTriggerInProgress; + NvBool bComputeContext; + NvBool bLockedDown; +} NV83DE_CTRL_DEBUG_SINGLE_SM_DEBUGGER_STATUS; + +#define NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS_MESSAGE_ID (0x25U) + +typedef struct NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS { + NvU32 smID; + NV83DE_CTRL_DEBUG_SINGLE_SM_DEBUGGER_STATUS smDebuggerStatus; +} NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS; + +/*! + * NV83DE_CTRL_CMD_DEBUG_ACCESS_MEMORY_ENTRY + * + * This struct represents a requet to read/write a block of memory. + * + * hMemory [IN] + * The handle to the memory being accessed. If hMemory is not accessible + * from the caller's address space, NV_ERR_INSUFFICIENT_PERMISSIONS + * is returned. + * + * length [IN] + * Number of bytes to read/write + * + * memOffset [IN] + * The offset into the physical memory region given by the handle above. + * + * dataOffset [IN] + * An offset into the usermode memory region provided by the enclosing + * params indicating where to read/write data from/to. + * + * status [OUT] + * The result status of the operation will be output. If NV_OK, even if + * command returned error status, the given operation was successful. If + * not NV_OK, it is guaranteed that the command will return error status. + */ +typedef struct NV83DE_CTRL_DEBUG_ACCESS_MEMORY_ENTRY { + NvHandle hMemory; + NvU32 length; + NV_DECLARE_ALIGNED(NvU64 memOffset, 8); + NvU32 dataOffset; + NV_STATUS status; +} NV83DE_CTRL_DEBUG_ACCESS_MEMORY_ENTRY; + +/*! + * NV83DE_CTRL_CMD_DEBUG_READ_BATCH_MEMORY + * + * Execute a batch of read memory operations. + * + * count [IN] + * Number of read/write operations to perform. + * + * dataLength [IN] + * Length of the usermode buffer passed in, in bytes. + * + * pData [OUT] + * Usermode buffer to store the output of the read operations. Each + * operation is expected to provide an offset into this buffer. + * + * entries [IN] + * List of operations to perform. First `count` entries are used. + */ +#define NV83DE_CTRL_CMD_DEBUG_READ_BATCH_MEMORY (0x83de0326) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x26" */ + +/*! + * NV83DE_CTRL_CMD_DEBUG_WRITE_BATCH_MEMORY + * + * Execute a batch of write memory operations. + * + * count [IN] + * Number of read/write operations to perform. + * + * dataLength [IN] + * Length of the usermode buffer passed in, in bytes. + * + * pData [IN] + * Usermode buffer to store the input of the write operations. Each + * operation is expected to provide an offset into this buffer. + * + * entries [IN] + * List of operations to perform. First `count` entries are used. + */ +#define NV83DE_CTRL_CMD_DEBUG_WRITE_BATCH_MEMORY (0x83de0327) /* finn: Evaluated from "(FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID << 8) | 0x27" */ + +#define MAX_ACCESS_MEMORY_OPS 150 +typedef struct NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pData, 8); + NvU32 dataLength; + NvU32 count; + NV_DECLARE_ALIGNED(NV83DE_CTRL_DEBUG_ACCESS_MEMORY_ENTRY entries[MAX_ACCESS_MEMORY_OPS], 8); +} NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS; + +/* _ctrl83dedebug_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl9010.h b/src/common/sdk/nvidia/inc/ctrl/ctrl9010.h new file mode 100644 index 000000000..e4a622555 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl9010.h @@ -0,0 +1,86 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl9010.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#define NV9010_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x9010, NV9010_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV9010_CTRL_RESERVED (0x00) +#define NV9010_CTRL_VBLANK_CALLBACK (0x01) + + +/* + * NV9010_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV9010_CTRL_CMD_NULL (0x90100000) /* finn: Evaluated from "(FINN_NV9010_VBLANK_CALLBACK_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + +/* + * NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION + * + * This command is used to enable and disable vblank notifications. This + * is specially intended for cases where the RM client is calling from a + * high IRQL context, where other mechanisms to toggle vblank notification + * (such as freeing and reallocating the NV9010_VBLANK_CALLBACK object) + * would not be suitable. As this is being invoked at the high IRQL, + * locking can be bypassed, if the NVOS54_FLAGS_LOCK_BYPASS flag is set on + * the control call.Here the OS will take care of the synchronization. + * The Windows Display Driver for Cobalt requires this, for example. + * + * bSetVBlankNotifyEnable + * This parameter tell whether to enable or disable the Vblank notification + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION (0x90100101) /* finn: Evaluated from "(FINN_NV9010_VBLANK_CALLBACK_INTERFACE_ID << 8) | NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION_PARAMS_MESSAGE_ID (0x01U) + +typedef struct NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION_PARAMS { + NvBool bSetVBlankNotifyEnable; +} NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION_PARAMS; + +/* _ctrl9010_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl9067.h b/src/common/sdk/nvidia/inc/ctrl/ctrl9067.h new file mode 100644 index 000000000..491f7c8ae --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl9067.h @@ -0,0 +1,155 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl9067.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* Subcontext control commands and parameters */ +#define NV9067_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x9067, NV9067_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV9067_CTRL_RESERVED (0x00) +#define NV9067_CTRL_TPC_PARTITION (0x01) +#define NV9067_CTRL_CWD_WATERMARK (0x02) + +/*! + * Does nothing. + */ +#define NV9067_CTRL_CMD_NULL (0x90670000) /* finn: Evaluated from "(FINN_FERMI_CONTEXT_SHARE_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/*! + * NV9067_CTRL_CMD_GET_TPC_PARTITION_TABLE + * This command gets the current partition table configuration of a subcontext + * + * NV9067_CTRL_CMD_SET_TPC_PARTITION_TABLE + * This command sets the partition table of a subcontext + * + * NV9067_CTRL_TPC_PARTITION_TABLE_PARAMS + * This structure defines the parameters used for SET/GET per-subcontext TPC partitioning table configuration + * + * numUsedTpc [in/out] + * Specifies the number of TPCs used by the subcontext + * While querying the enabled TPCs, this is an output paramter + * While configuring the TPCs, this is an input parameter + * + * tpcList [in/out] + * Array containing the TPCs enabled for the subcontext. + * The first numUsedTpc in the array interpreted as the valid entries. + * + * Only applicable for STATIC and DYNAMIC modes. + * + * NV9067_CTRL_TPC_PARTITION_TABLE_MAX_TPC_COUNT + * Max TPC count supported by this ctrl call + * + * NV9067_CTRL_TPC_PARTITION_TABLE_TPC_INFO + * This structure defines the parameters for a TPC + * + * globalTpcIndex + * Global logical index of the enabled TPC + * + * lmemBlockIndex + * Block index of the Local memory backing store for the enabled TPC. + * For GET command, we will return the current lmem block assigment for STATIC & DYNAMIC modes. + * For SET command, this index is relevant only for STATIC mode. + * HW automatically assign it for other modes. So should be zeroed out for other modes. + * + */ +#define NV9067_CTRL_CMD_GET_TPC_PARTITION_TABLE (0x90670101) /* finn: Evaluated from "(FINN_FERMI_CONTEXT_SHARE_A_TPC_PARTITION_INTERFACE_ID << 8) | 0x1" */ + +#define NV9067_CTRL_CMD_SET_TPC_PARTITION_TABLE (0x90670102) /* finn: Evaluated from "(FINN_FERMI_CONTEXT_SHARE_A_TPC_PARTITION_INTERFACE_ID << 8) | NV9067_CTRL_TPC_PARTITION_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV9067_CTRL_TPC_PARTITION_TABLE_TPC_COUNT_MAX 256 + +typedef struct NV9067_CTRL_TPC_PARTITION_TABLE_TPC_INFO { + NvU16 globalTpcIndex; + NvU16 lmemBlockIndex; +} NV9067_CTRL_TPC_PARTITION_TABLE_TPC_INFO; + +#define NV9067_CTRL_TPC_PARTITION_TABLE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV9067_CTRL_TPC_PARTITION_TABLE_PARAMS { + NvU16 numUsedTpc; + NV9067_CTRL_TPC_PARTITION_TABLE_TPC_INFO tpcList[NV9067_CTRL_TPC_PARTITION_TABLE_TPC_COUNT_MAX]; +} NV9067_CTRL_TPC_PARTITION_TABLE_PARAMS; + + +/*! + * NV9067_CTRL_CMD_GET_CWD_WATERMARK + * This command gets the cached watermark value for a subcontext + * + * NV9067_CTRL_CMD_SET_CWD_WATERMARK + * This command sets the watermark value for a subcontexts + * + * NV9067_CTRL_CWD_WATERMARK_PARAMS + * This structure defines the parameters used to SET/GET watermark value per-subcontext. + * + * watermarkValue [in/out] + * Value of watermark per-subcontext + * Acts as a output parameter to get the current value of watermark for a subcontext. + * Acts as a input parameter to set the current value of watermark for a subcontexts. + * + * NV9067_CTRL_CWD_WATERMARK_VALUE_MIN + * Minimum value of watermark for any subcontext + * RM will throw an error if any value less than this value is specified + * + * NV9067_CTRL_CWD_WATERMARK_VALUE_DEFAULT + * Default value of watermark for any subcontext + * RM will set watermark of a subcontext to this value when the subcontext + * is created/initialized for the first time + * + * NV9067_CTRL_CWD_WATERMARK_VALUE_MAX + * Maximum value of watermark for any subcontext + * RM will throw an error if any value more than this value is specified + * + */ + + + +#define NV9067_CTRL_CMD_GET_CWD_WATERMARK (0x90670201) /* finn: Evaluated from "(FINN_FERMI_CONTEXT_SHARE_A_CWD_WATERMARK_INTERFACE_ID << 8) | 0x1" */ + +#define NV9067_CTRL_CMD_SET_CWD_WATERMARK (0x90670202) /* finn: Evaluated from "(FINN_FERMI_CONTEXT_SHARE_A_CWD_WATERMARK_INTERFACE_ID << 8) | 0x2" */ + +#define NV9067_CTRL_CWD_WATERMARK_VALUE_MIN 1 +#define NV9067_CTRL_CWD_WATERMARK_VALUE_DEFAULT 2 +#define NV9067_CTRL_CWD_WATERMARK_VALUE_MAX 256 + +typedef struct NV9067_CTRL_CWD_WATERMARK_PARAMS { + NvU32 watermarkValue; +} NV9067_CTRL_CWD_WATERMARK_PARAMS; + + +/* _ctrl9067_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl906f.h b/src/common/sdk/nvidia/inc/ctrl/ctrl906f.h new file mode 100644 index 000000000..a491cfd62 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl906f.h @@ -0,0 +1,265 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl906f.finn +// + + + + +/* GF100_GPFIFO control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#define NV906F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x906F, NV906F_CTRL_##cat, idx) + +/* GF100_GPFIFO command categories (6bits) */ +#define NV906F_CTRL_RESERVED (0x00) +#define NV906F_CTRL_GPFIFO (0x01) +#define NV906F_CTRL_EVENT (0x02) + + +/* + * NV906F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV906F_CTRL_CMD_NULL (0x906f0000) /* finn: Evaluated from "(FINN_GF100_CHANNEL_GPFIFO_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + + +/* + * NV906F_CTRL_GET_CLASS_ENGINEID + * + * Takes an object handle as input and returns + * the Class and Engine that this object uses. + * + * hObject + * Handle to an object created. For example a + * handle to object of type FERMI_A created by + * the client. This is supplied by the client + * of this call. + * + * classEngineID + * A concatenation of class and engineid + * that the object with handle hObject + * belongs to. This is returned by RM. The internal + * format of this data structure is opaque to clients. + * + * classID + * ClassID for object represented by hObject + * + * engineID + * EngineID for object represented by hObject + * + * Possible status values returned are: + * NV_OK + * If the call was successful. + * + * NV_ERR_INVALID_OBJECT_HANDLE + * No object of handle hObject was found. + */ +#define NV906F_CTRL_GET_CLASS_ENGINEID (0x906f0101) /* finn: Evaluated from "(FINN_GF100_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID << 8) | NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS_MESSAGE_ID" */ + +#define NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS { + NvHandle hObject; + NvU32 classEngineID; + NvU32 classID; + NvU32 engineID; +} NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS; + +/* + * NV906F_CTRL_RESET_CHANNEL + * + * This command resets the channel corresponding to specified engine and also + * resets the specified engine. + * + * Takes an engine ID as input. + * + * engineID + * This parameter specifies the engine to be reset. See the description of the + * NV2080_ENGINE_TYPE values in cl2080.h for more information. + * subdeviceInstance + * This parameter specifies the subdevice to be reset when in SLI. + * resetReason + * Specifies reason to reset a channel. + * + * Possible status values returned are: + * NV_OK + */ +#define NV906F_CTRL_CMD_RESET_CHANNEL_REASON_DEFAULT 0 +#define NV906F_CTRL_CMD_RESET_CHANNEL_REASON_VERIF 1 +#define NV906F_CTRL_CMD_RESET_CHANNEL_REASON_MMU_FLT 2 +#define NV906F_CTRL_CMD_RESET_CHANNEL_REASON_ENUM_MAX 3 +/* + * Internal values for NV906F_CTRL_CMD_RESET_REASON. External values will be + * checked and enforced to be < NV906F_CTRL_CMD_RESET_CHANNEL_REASON_ENUM_MAX + */ +#define NV906F_CTRL_CMD_INTERNAL_RESET_CHANNEL_REASON_FAKE_ERROR (0x4) /* finn: Evaluated from "NV906F_CTRL_CMD_RESET_CHANNEL_REASON_ENUM_MAX + 1" */ + + +#define NV906F_CTRL_CMD_RESET_CHANNEL (0x906f0102) /* finn: Evaluated from "((FINN_GF100_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID << 8) | NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_MESSAGE_ID)" */ + +#define NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS { + NvU32 engineID; + NvU32 subdeviceInstance; + NvU32 resetReason; +} NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS; + +/* + * NV906F_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification state for the associated channel. + * This command requires that an instance of NV01_EVENT has been previously + * bound to the associated channel object. + * + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. This parameter must specify a valid + * NV906F_NOTIFIERS value (see cl906f.h for more details) and should + * not exceed one less NV906F_NOTIFIERS_MAXCOUNT. + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NV906F_CTRL_SET_EVENT_NOTIFICATION_ACTION_DISABLE + * This action disables event notification for the specified + * event for the associated channel object. + * NV906F_CTRL_SET_EVENT_NOTIFICATION_ACTION_SINGLE + * This action enables single-shot event notification for the + * specified event for the associated channel object. + * NV906F_CTRL_SET_EVENT_NOTIFICATION_ACTION_REPEAT + * This action enables repeated event notification for the specified + * event for the associated channel object. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ + + +#define NV906F_CTRL_CMD_EVENT_SET_NOTIFICATION (0x906f0203) /* finn: Evaluated from "(FINN_GF100_CHANNEL_GPFIFO_EVENT_INTERFACE_ID << 8) | NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; +} NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +/* + * NV906F_CTRL_CMD_GET_DEFER_RC_STATE + * + * If SM Debugger is attached then on a MMU fault, RM defers the RC error + * recovery and keeps a flag indicating that RC is deferred. This command + * checks whether or not deferred RC is pending in RM for the associated + * channel. + * + * bDeferRCPending + * The output are TRUE and FALSE. + * + * Possible status values returned are: + * NV_OK + */ + + +#define NV906F_CTRL_CMD_GET_DEFER_RC_STATE (0x906f0105) /* finn: Evaluated from "(FINN_GF100_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID << 8) | NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS_MESSAGE_ID" */ + +#define NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS { + NvBool bDeferRCPending; +} NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS; + +#define NV906F_CTRL_CMD_GET_MMU_FAULT_INFO (0x906f0106) /* finn: Evaluated from "(FINN_GF100_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID << 8) | NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS_MESSAGE_ID" */ + +/* + * Shader types supported by MMU fault info + * The types before compute shader refer to NV9097_SET_PIPELINE_SHADER_TYPE + */ +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_VERTEX_CULL_BEFORE_FETCH 0x00000000 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_VERTEX 0x00000001 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_TESSELLATION_INIT 0x00000002 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_TESSELLATION 0x00000003 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_GEOMETRY 0x00000004 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_PIXEL 0x00000005 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPE_COMPUTE 0x00000006 +#define NV906F_CTRL_MMU_FAULT_SHADER_TYPES 7 + +/* + * NV906F_CTRL_CMD_GET_MMU_FAULT_INFO + * + * This command returns MMU fault information for a given channel. The MMU + * fault information will be cleared once this command is executed. + * + * addrHi - [out] + * Upper 32 bits of faulting address + * addrLo [out] + * Lower 32 bits of faulting address + * faultType [out] + * MMU fault type. Please see NV_PFIFO_INTR_MMU_FAULT_INFO_TYPE_* in + * dev_fifo.h for details about MMU fault type. + * faultString [out] + * String indicating the MMU fault type + * shaderProgramVA [out] + * an array of shader program virtual addresses to indicate faulted shaders in the pipeline + * + * Possible status values returned are: + * NV_OK + */ +#define NV906F_CTRL_MMU_FAULT_STRING_LEN 32 +#define NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS { + NvU32 addrHi; + NvU32 addrLo; + NvU32 faultType; + char faultString[NV906F_CTRL_MMU_FAULT_STRING_LEN]; + NV_DECLARE_ALIGNED(NvU64 shaderProgramVA[NV906F_CTRL_MMU_FAULT_SHADER_TYPES], 8); +} NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS; + + +/* _ctrl906f.h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl9072.h b/src/common/sdk/nvidia/inc/ctrl/ctrl9072.h new file mode 100644 index 000000000..75d9c1b80 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl9072.h @@ -0,0 +1,89 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl9072.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#define NV9072_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x9072, NV9072_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV9072_CTRL_RESERVED (0x00) +#define NV9072_CTRL_DISP_SW (0x01) + + +/* + * NV9072_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV9072_CTRL_CMD_NULL (0x90720000) /* finn: Evaluated from "(FINN_GF100_DISP_SW_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV9072_CTRL_CMD_NOTIFY_ON_VBLANK + * + * This command implements an out-of-band version of the + * GF100_DISP_SW class's NV9072_NOTIFY_ON_VBLANK method. + * + * Parameters: + * + * data + * Valid data accepted by the NV9072_NOTIFY_ON_VBLANK method. + * bHeadDisabled + * Specifies whether head is active while adding vblank + * callback. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_ARGUMENT + */ +#define NV9072_CTRL_CMD_NOTIFY_ON_VBLANK (0x90720101) /* finn: Evaluated from "(FINN_GF100_DISP_SW_DISP_SW_INTERFACE_ID << 8) | NV9072_CTRL_CMD_NOTIFY_ON_VBLANK_PARAMS_MESSAGE_ID" */ + +#define NV9072_CTRL_CMD_NOTIFY_ON_VBLANK_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV9072_CTRL_CMD_NOTIFY_ON_VBLANK_PARAMS { + NvU32 data; + NvBool bHeadDisabled; +} NV9072_CTRL_CMD_NOTIFY_ON_VBLANK_PARAMS; + +/* _ctrl9072.h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl9074.h b/src/common/sdk/nvidia/inc/ctrl/ctrl9074.h new file mode 100644 index 000000000..d14fb3c20 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl9074.h @@ -0,0 +1,151 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl9074.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#define NV9074_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x9074, NV9074_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV9074_CTRL_RESERVED (0x00) +#define NV9074_CTRL_SEM (0x01) + +/* + * NV9074_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV9074_CTRL_CMD_NULL (0x90740000) /* finn: Evaluated from "(FINN_GF100_TIMED_SEMAPHORE_SW_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV9074_CTRL_CMD_FLUSH + * + * This command is intended to aid in idling/flushing a channel containing + * methods invoked against a specific NV9074 object. + * + * This control manipulates an NV9074 object's "flushing" state. + * + * Typical usage is: + * NvRmControl(NV9074_CTRL_CMD_FLUSH, {TRUE, flushDelay}); + * NvRmIdleChannel(channel_containing_the_9074_object); + * NvRmControl(NV9074_CTRL_CMD_FLUSH, {FALSE, 0}); + * + * When an NV9074 object is placed into the flushing state, a snaphot of the + * current timer value is taken, and "maxFlushTime" is added to this. This + * value is the "flush limit timestamp". Any previously or newly invoked + * NV9074_SEMAPHORE_SCHED methods and NV9074_CTRL_CMD_RELEASE requests that + * specify a release timestamp at or after this "flush limit timestamp" will + * immediately release the specified semaphore, without waiting for the + * specified timestamp, and write a DONE_FORCED value to the specified notifier. + */ +#define NV9074_CTRL_CMD_FLUSH (0x90740101) /* finn: Evaluated from "(FINN_GF100_TIMED_SEMAPHORE_SW_SEM_INTERFACE_ID << 8) | NV9074_CTRL_CMD_FLUSH_PARAMS_MESSAGE_ID" */ + +#define NV9074_CTRL_CMD_FLUSH_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV9074_CTRL_CMD_FLUSH_PARAMS { + NvU32 isFlushing; + NV_DECLARE_ALIGNED(NvU64 maxFlushTime, 8); +} NV9074_CTRL_CMD_FLUSH_PARAMS; + +/* + * NV9074_CTRL_CMD_GET_TIME + * + * Retrieve the current time value. + */ +#define NV9074_CTRL_CMD_GET_TIME (0x90740102) /* finn: Evaluated from "(FINN_GF100_TIMED_SEMAPHORE_SW_SEM_INTERFACE_ID << 8) | NV9074_CTRL_CMD_GET_TIME_PARAMS_MESSAGE_ID" */ + +#define NV9074_CTRL_CMD_GET_TIME_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV9074_CTRL_CMD_GET_TIME_PARAMS { + NV_DECLARE_ALIGNED(NvU64 currentTime, 8); +} NV9074_CTRL_CMD_GET_TIME_PARAMS; + +/* + * NV9074_CTRL_CMD_RELEASE + * + * This command adds a timed semaphore release request. When the desired time is + * reached, the semaphore is written with the release value, the notifier is + * filled with status and timestamp, and optionally an event is sent to all the + * client waiting on it. + * + * notifierGPUVA + * This parameter specifies the GPU VA of the notifier to receive the status + * for this particular release. + * + * semaphoreGPUVA + * This parameter specifies the GPU VA of the semaphore to release. + * + * waitTimestamp + * This parameter specifies the timestamp at which to release the semaphore. + * + * releaseValue + * This parameter specifies the semaphore value to release. + * + * releaseFlags + * This parameter specifies the flags: + * _NOTIFY wake client or not. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ADDRESS + * NV_ERR_INVALID_EVENT + * NV_ERR_INVALID_STATE + * + */ +#define NV9074_CTRL_CMD_RELEASE (0x90740103) /* finn: Evaluated from "(FINN_GF100_TIMED_SEMAPHORE_SW_SEM_INTERFACE_ID << 8) | NV9074_CTRL_CMD_RELEASE_PARAMS_MESSAGE_ID" */ + +#define NV9074_CTRL_CMD_RELEASE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV9074_CTRL_CMD_RELEASE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 notifierGPUVA, 8); + NV_DECLARE_ALIGNED(NvU64 semaphoreGPUVA, 8); + NV_DECLARE_ALIGNED(NvU64 waitTimestamp, 8); + NvU32 releaseValue; + NvU32 releaseFlags; +} NV9074_CTRL_CMD_RELEASE_PARAMS; + +#define NV9074_CTRL_CMD_RELEASE_FLAGS +#define NV9074_CTRL_CMD_RELEASE_FLAGS_NOTIFY 1:0 +#define NV9074_CTRL_CMD_RELEASE_FLAGS_NOTIFY_WRITE_ONLY (0x00000000) +#define NV9074_CTRL_CMD_RELEASE_FLAGS_NOTIFY_WRITE_THEN_AWAKEN (0x00000001) + +/* _ctrl9074.h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl9096.h b/src/common/sdk/nvidia/inc/ctrl/ctrl9096.h new file mode 100644 index 000000000..67bd155bd --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl9096.h @@ -0,0 +1,449 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl9096.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#define NV9096_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x9096, NV9096_CTRL_##cat, idx) + +/* NV9096 command categories (6bits) */ +#define NV9096_CTRL_RESERVED (0x00U) +#define NV9096_CTRL_ZBC (0x01U) + + +/* + * NV9096_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV9096_CTRL_CMD_NULL (0x90960000U) /* finn: Evaluated from "(FINN_GF100_ZBC_CLEAR_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* + * NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_ + * These are various possible CtFormats which + * the Client passes down to RM to set in the DS + * Color Table. + */ + + + +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_INVALID 0x00000000U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_ZERO 0x00000001U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_UNORM_ONE 0x00000002U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_RF32_GF32_BF32_AF32 0x00000004U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_R16_G16_B16_A16 0x00000008U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_RN16_GN16_BN16_AN16 0x0000000cU +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_RS16_GS16_BS16_AS16 0x00000010U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_RU16_GU16_BU16_AU16 0x00000014U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_RF16_GF16_BF16_AF16 0x00000016U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_A8R8G8B8 0x00000018U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_A8RL8GL8BL8 0x0000001cU +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_A2B10G10R10 0x00000020U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_AU2BU10GU10RU10 0x00000024U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_A8B8G8R8 0x00000028U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_A8BL8GL8RL8 0x0000002cU +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_AN8BN8GN8RN8 0x00000030U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_AS8BS8GS8RS8 0x00000034U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_AU8BU8GU8RU8 0x00000038U +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_A2R10G10B10 0x0000003cU +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL_BF10GF11RF11 0x00000040U + +/* + * NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR + * + * This command attempts to add a new entry to Color ZBC Tables. + * + * colorFB + * This field sets the raw framebuffer values for the ZBC table entries. Prior to + * GA10x, these values are written into the "L2" table. + * From GA10x and later, these values are written in "CROP" table. + * colorDS + * This field sets format-independent values for ZBC table entries. Prior to GA10X, + * these values are written in the "DS" table and matched with the format-independent + * clear color sent in the 3D class. These values are ignored on GA10X and later + * format + * This field specifies color format for ZBC table entries and should be one of the + * NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT* enums. Prior to GA10X, these values + * are written in the format field of the hardware ZBC table entry and are matched + * against the color format sent in the 3D class. These values are ignored on GA10X and later. + * bSkipL2Table + * This parameter specifies if the L2 ZBC table should be updated or not. If + * this parameter is set to a non-zero value, the L2 ZBC table will not be + * updated. If this parameter is set to 0, the L2 ZBC table will be updated. This + * parameter will skip programming DS table values in Pre-GA10x. From GA10x and later + * this parameter will skip programming CROP table entries. + * Note: This parameter will only be supported in verification platforms. + * + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INSUFFICIENT_RESOURCES + */ + + +#define NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR (0x90960101U) /* finn: Evaluated from "(FINN_GF100_ZBC_CLEAR_ZBC_INTERFACE_ID << 8) | NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS_MESSAGE_ID" */ + +#define NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE 4U +#define NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS { + NvU32 colorFB[NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE]; + NvU32 colorDS[NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE]; + NvU32 format; + NvBool bSkipL2Table; +} NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS; + + +/* + * NV9096_CTRL_CMD_SET_ZBC_DEPTH_CLEAR_FMT_VAL + * These are various possible formats which the Client passes down to RM to set in the ZBC clear + * Table. + * + * Though the "Depth" data written in both the DS as well as L2 Version of the + * depth table are of (only available!) FP32 Format. Still the format + * support is currently given with _CTRL_CMD_SET_ZBC_DEPTH_CLEAR as this format will + * be used later on to disable/remove an entry from the table. + * In future this field is going to be significant for + * "Depth" entries too. + */ + +#define NV9096_CTRL_CMD_SET_ZBC_DEPTH_CLEAR_FMT_VAL_INVALID 0x00000000U +// Fix me: Fix the name to FMT_VAL_FP32 +#define NV9096_CTRL_CMD_SET_ZBC_DEPTH_CLEAR_FMT_FP32 0x00000001U + +/* + * NV9096_CTRL_CMD_SET_ZBC_DEPTH_CLEAR + * + * This command attempts to add a new entry to Depth ZBC Tables. + * + * depth + * This field specifies the ZBC depth clear value to be set. + * format + * This field specifies the Depth format for the data send in by the client. + * bSkipL2Table + * This parameter specifies if the L2 ZBC table should be updated or not. If + * this parameter is set to a non-zero value, the L2 ZBC table will not be + * updated. If this parameter is set to 0, the L2 ZBC table will be updated. + * Note: This parameter will only be supported in verification platforms. + * + + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INSUFFICIENT_RESOURCES + */ +#define NV9096_CTRL_CMD_SET_ZBC_DEPTH_CLEAR (0x90960102U) /* finn: Evaluated from "(FINN_GF100_ZBC_CLEAR_ZBC_INTERFACE_ID << 8) | NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS_MESSAGE_ID" */ + +#define NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS { + NvU32 depth; + NvU32 format; + NvBool bSkipL2Table; +} NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS; + +/* + * NV9096_CTRL_CMD_GET_ZBC_CLEAR_TABLE + * + * + * This command is used to get the ZBC Clear Color/Depth/Stencil table data i.e. color + * values and the status "Use-satatus" of each value in the table from RM. + * + * colorFB + * This value returns raw framebuffer values for color ZBC table entries. Pre-GA10x, these values + * are taken from the "L2" table. From GA10x+, these values are taken from CROP table. This value is + * set only when valType is chosen as "NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_COLOR" + * colorDS + * Pre-GA10x, returns the DS color value set for ZBC. From GA10x+, returns zeroes since this format is + * deprecated in HW. This value is set only when valType is chosen as "NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_COLOR" + * depth + * This field returns the ZBC depth clear value set, when valType is chosen as "NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_DEPTH". + * stencil + * This field returns the ZBC stencil clear value set, when valType is chosen as "NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_STENCIL" + * format + * This field returns the format of color, depth, or stencil ZBC table entries, using the + * NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT, + * NV9096_CTRL_CMD_SET_ZBC_DEPTH_CLEAR_FMT, + * NV9096_CTRL_CMD_SET_ZBC_STENCIL_CLEAR_FMT* enums depending on the table identified by valType. + * On GA10X and later, color ZBC table entries do not have a format in hardware and this query + * returns a format of "INVALID". + * valType + * This filed specifies the type of the Table data to be fetched. + * Possible Types are : + * NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_COLOR + * NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_DEPTH + * NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_STENCIL + * indexUsed + * This boolean parameter indicates if a particular index of the table is valid or not. + * + * Note: The following parameters are deprecated after Tegra interface is also changed. Clients need + * to use NV9096_CTRL_CMD_GET_ZBC_CLEAR_TABLE_SIZE to query the (start, end) indexes of respective ZBC tables + * + * indexSize + * This parameter is used to fetch the table size when "valType" parameter + * is specified as " 0 ".(INVALID TYPE). It is also used to pass in the + * index of the ZBC table for which we want the COLOR/DEPTH info. + * indexStart + * This parameter is used to return the valid starting index of ZBC table, when + * "valType" parameter is specified as "INVALID_TYPE". It will also be used + * as input index to query the ZBC table for COLOR/QUERY/STENCIL Info. + * indexEnd + * This parameter is used to return the valid ending index of ZBC table, when + * "valType" parameter is specified as "INVALID_TYPE". + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV9096_CTRL_CMD_GET_ZBC_CLEAR_TABLE (0x90960103U) /* finn: Evaluated from "(FINN_GF100_ZBC_CLEAR_ZBC_INTERFACE_ID << 8) | NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_INVALID 0U +#define NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_COLOR 1U +#define NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_DEPTH 2U +#define NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_STENCIL 3U +#define NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS { + struct { + NvU32 colorFB[NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE]; + NvU32 colorDS[NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE]; + NvU32 depth; + NvU32 stencil; + } value; + NvU32 indexSize; + NvU32 indexStart; + NvU32 indexEnd; + NvU32 indexUsed; /* TODO: Change to NvBool, need to fix the tegra interface too */ + NvU32 format; + NvU32 valType; +} NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS; + +/* + * Note: This ctrl call is deprecated. To program ZBC table entries, please use + * NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR + * NV9096_CTRL_CMD_SET_ZBC_DEPTH_CLEAR + * NV9096_CTRL_CMD_SET_ZBC_STENCIL_CLEAR + * which will update a single entry in single table at a time. + * + * + * NV9096_CTRL_CMD_SET_ZBC_CLEAR_TABLE + * + * This command is used to set the ZBC Clear Color/Depth/Stencil table data at a specified + * index. The parameters to this command are described below. + * + * colorFB + * This array field specifies the L2 color value to be written to the ZBC table. + * colorDS + * This array field specifies the DS color value to be written to the ZBC table. + * colorFormat + * This field specifies the ZBC color format to be set. This field must be set + * to one of the valid NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL* defines. + * depth + * This field specifies the ZBC depth clear value to be set. + * depthFormat + * This field specifies the ZBC depth format to be set. This field must be set + * to one of the valid NV9096_CTRL_CMD_SET_ZBC_DEPTH_CLEAR_FMT_VAL* defines. + * stencil + * This field specifies the ZBC stencil clear value to be set. + * stencilFormat + * This field specifies the ZBC stencil format to be set. This field must be set + * to one of the valid NV9096_CTRL_CMD_SET_ZBC_STENCIL_CLEAR_FMT_VAL* defines. + * index + * This field specifies the index at which the color/depth data is to be + * written into the ZBC table. Legal values for this field must lie between + * 1 and the value returned in the indexSize parameter of the + * NV9096_CTRL_CMD_GET_ZBC_CLEAR_TABLE control call when called with the + * parameter valType set to NV9096_CTRL_ZBC_CLEAR_OBJECT_TYPE_INVALID. + * bSkipL2Table + * This parameter specifies if the L2 ZBC table should be updated or not. If + * this parameter is set to a non-zero value, the L2 ZBC table will not be + * updated. If this parameter is set to 0, the L2 ZBC table will be updated. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INSUFFICIENT_RESOURCES + */ +#define NV9096_CTRL_CMD_SET_ZBC_CLEAR_TABLE (0x90960104U) /* finn: Evaluated from "(FINN_GF100_ZBC_CLEAR_ZBC_INTERFACE_ID << 8) | NV9096_CTRL_SET_ZBC_CLEAR_TABLE_PARAMS_MESSAGE_ID" */ + +#define NV9096_CTRL_SET_ZBC_CLEAR_TABLE_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV9096_CTRL_SET_ZBC_CLEAR_TABLE_PARAMS { + NvU32 colorFB[NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE]; + NvU32 colorDS[NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE]; + NvU32 colorFormat; + NvU32 depth; + NvU32 depthFormat; + NvU32 stencil; + NvU32 stencilFormat; + NvU32 index; + NvBool bSkipL2Table; +} NV9096_CTRL_SET_ZBC_CLEAR_TABLE_PARAMS; + +/* + * NV9096_CTRL_CMD_SET_ZBC_STENCIL_CLEAR_FMT_VAL_ + * These are various possible Formats which the Client passes down to RM to set in the + * ZBC clear(DSS) Table. + */ + +#define NV9096_CTRL_CMD_SET_ZBC_STENCIL_CLEAR_FMT_VAL_INVALID 0x00000000U +// Fix me: Change it to CLEAR_FMT_VAL_U8 +#define NV9096_CTRL_CMD_SET_ZBC_STENCIL_CLEAR_FMT_U8 0x00000001U + +/* + * NV9096_CTRL_CMD_SET_ZBC_STENCIL_CLEAR + * + * This command attempts to add a new entry to Stencil ZBC Tables. + * + * stencil + * This field specifies the ZBC stencil clear value to be set. + * format + * This field specifies the stencil format for the data send in by the client. + * bSkipL2Table + * This parameter specifies if the L2 ZBC table should be updated or not. If + * this parameter is set to a non-zero value, the L2 ZBC table will not be + * updated. If this parameter is set to 0, the L2 ZBC table will be updated. + * Note: This parameter will only be supported in verification platforms. + * Possible status values returned are: + * NV_OK + * NVOS_STATUS_INSUFFICIENT_RESOURCES + */ +#define NV9096_CTRL_CMD_SET_ZBC_STENCIL_CLEAR (0x90960105U) /* finn: Evaluated from "(FINN_GF100_ZBC_CLEAR_ZBC_INTERFACE_ID << 8) | NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS_MESSAGE_ID" */ + +#define NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS { + NvU32 stencil; + NvU32 format; + NvBool bSkipL2Table; +} NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS; + +/* + * NV9096_CTRL_CMD_GET_ZBC_CLEAR_TABLE_SIZE + * This command returns the range of valid indices in the color, depth, or stencil ZBC tables. + * + * indexStart + * This parameter is used to return the first valid index in the color, depth, or stencil ZBC table, + * depending on the value passed in the tableType + * indexEnd + * This parameter is used to return the last valid index in the color, depth, or stencil ZBC table, + * depending on the value passed in the tableType + * tableType + * This field specifies the type of the Table data to be fetched. + * Possible Types are : + * NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COLOR + * NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_DEPTH + * NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_STENCIL + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +typedef enum NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE { + NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_INVALID = 0, + NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COLOR = 1, + NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_DEPTH = 2, + NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_STENCIL = 3, + NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COUNT = 4, +} NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE; + +#define NV9096_CTRL_CMD_GET_ZBC_CLEAR_TABLE_SIZE (0x90960106U) /* finn: Evaluated from "(FINN_GF100_ZBC_CLEAR_ZBC_INTERFACE_ID << 8) | NV9096_CTRL_GET_ZBC_CLEAR_TABLE_SIZE_PARAMS_MESSAGE_ID" */ + +#define NV9096_CTRL_GET_ZBC_CLEAR_TABLE_SIZE_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV9096_CTRL_GET_ZBC_CLEAR_TABLE_SIZE_PARAMS { + NvU32 indexStart; + NvU32 indexEnd; + NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE tableType; +} NV9096_CTRL_GET_ZBC_CLEAR_TABLE_SIZE_PARAMS; + +/* + * NV9096_CTRL_CMD_GET_ZBC_CLEAR_TABLE_ENTRY + * This command returns the ZBC entry stored in the color, depth or stencil ZBC tables + * + * colorFB[out] + * This value returns raw framebuffer values for color ZBC table entries. Pre-GA10x, these values + * are taken from the "L2" table. From GA10x+, these values are taken from CROP table. This value is + * set only when valType is chosen as "NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COLOR" + * colorDS[out] + * Pre-GA10x, returns the DS color value set for ZBC. From GA10x+, returns zeroes since this format is + * deprecated in HW. This value is set only when valType is chosen as "NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COLOR" + * depth[out] + * This field specifies the ZBC depth clear value set, when valType is chosen as "NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_DEPTH". + * stencil[out] + * This field specifies the ZBC stencil clear value set, when valType is chosen as "NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_STENCIL" + * format + * This field returns the format of color, depth, or stencil ZBC table entries, using the + * NV9096_CTRL_CMD_SET_ZBC_COLOR_CLEAR_FMT_VAL*, + * NV9096_CTRL_CMD_SET_ZBC_DEPTH_CLEAR_FMT*, + * NV9096_CTRL_CMD_SET_ZBC_STENCIL_CLEAR_FMT* enums depending on the table identified by valType. + * On GA10X and later, color ZBC table entries do not have a format in hardware and this query + * returns a format of "INVALID". + * index[in] + * This field specifies table index for which the ZBC entry information needs to be fetched. + * bIndexValid[out] + * This field specifies whether the entry is valid or not. + * tableType[in] + * This field specifies the type of the Table data to be fetched. + * Possible Types are : + * NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_COLOR + * NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_DEPTH + * NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE_STENCIL + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NV9096_CTRL_CMD_GET_ZBC_CLEAR_TABLE_ENTRY (0x90960107U) /* finn: Evaluated from "(FINN_GF100_ZBC_CLEAR_ZBC_INTERFACE_ID << 8) | NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_MESSAGE_ID" */ + +#define NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS { + struct { + NvU32 colorFB[NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE]; + NvU32 colorDS[NV9096_CTRL_SET_ZBC_COLOR_CLEAR_VALUE_SIZE]; + NvU32 depth; + NvU32 stencil; + } value; + NvU32 format; + NvU32 index; + NvBool bIndexValid; + NV9096_CTRL_ZBC_CLEAR_TABLE_TYPE tableType; +} NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS; + +/* _ctrl9096_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90cc.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90cc.h new file mode 100644 index 000000000..fa02e7532 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90cc.h @@ -0,0 +1,455 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90cc.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl90cc/ctrl90ccbase.h" + +#include "ctrl90cc/ctrl90cchwpm.h" + + +#include "ctrl90cc/ctrl90ccpower.h" + + +/* + * NV90CC_CTRL_CMD_PROFILER_RESERVE_HWPM + * + * This command is deprecated; please use NV90CC_CTRL_CMD_HWPM_RESERVE. + * + * This command attempts to reserve the perfmon for use by the calling client. + * If this object was allocated as a child of a subdevice, then the + * reservation will be global among all contexts on that subdevice. If this + * object was allocated as a child of a channel group or a channel, then the + * reservation will only be for the hardware context of that channel group or + * channel. + * + * If the global reservation is held on a subdevice by another client, then + * this command will fail, regardless of the parent class. + * + * If one or more per-context reservations are held by other clients, then + * this command will fail if the parent object is a subdevice or another + * client already holds the perfmon reservation for the parent context. + * + * This command will return NV_ERR_STATE_IN_USE for all of the failure + * cases described above. A return status of NV_OK guarantees + * that the client holds the perfmon reservation of the appropriate scope. + * + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_STATE_IN_USE + */ +#define NV90CC_CTRL_CMD_PROFILER_RESERVE_HWPM NV90CC_CTRL_CMD_HWPM_RESERVE + +/* + * NV90CC_CTRL_CMD_PROFILER_RELEASE_HWPM + * + * This command is deprecated; please use NV90CC_CTRL_CMD_HWPM_RELEASE. + * + * This command releases an existing reservation of the perfmon for the + * calling client. If the calling client does not currently have the perfmon + * reservation as acquired by NV90CC_CTRL_CMD_PROFILER_RESERVE_HWPM, this + * command will return NV_ERR_INVALID_REQUEST. + * + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + */ +#define NV90CC_CTRL_CMD_PROFILER_RELEASE_HWPM NV90CC_CTRL_CMD_HWPM_RELEASE + + + +/* + * NV90CC_CTRL_CMD_PROFILER_REQUEST_CG_CONTROLS + * + * This command is deprecated; please use + * NV90CC_CTRL_CMD_POWER_REQUEST_POWER_FEATURES. + * + * This command attempts to enable or disable various clock-gating features of + * the GPU on behalf of the profiler. If this command is unable to set the + * clock-gating feature state of any of the requested features, this command + * will fail and none of the requested features will be modified. If this + * command fails because one or more clock-gating feature requests were + * rejected, it will return NV_ERR_STATE_IN_USE in the globalStatus + * parameter and the fields in the statusMask parameter for the features for + * which the requests were rejected will have the value + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_*_REQUEST_REJECTED. + * If a given feature is not supported on the GPU, the field for that clock- + * gating feature will have the value + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_*_REQUEST_NOT_SUPPORTED in the + * statusMask parameter, but this condition by itself will not cause the + * command to fail. Even if this command fails, the field for clock-gating + * features which would have successfully changed will have the value + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_*_FULFILLED in the statusMask + * parameter. + * + * Each of the clock-gating features is reference-counted individually, so + * that multiple GF100_PROFILER objects may request and rely on the same + * settings for the features simultaneously. Each clock-gating feature is + * locked to the requested state until the GF100_PROFILER object is freed or + * the NV90CC_CTRL_CMD_PROFILER_RELEASE_CG_CONTROLS command is called for that + * feature. + * + * Currently, only GF100_PROFILER requests for power features using this + * command are reference counted. Changes to the power feature settings made + * either by other control commands or the RM itself may interfere with the + * settings requested by GF100_PROFILER instances. + * + * This command will always return NV_OK when given valid + * parameters. If there is any other failure that prevents the clock-gating + * features from being set appropriately, the globalStatus parameter will + * indicate this and the statusMask parameter will indicate which clock-gating + * feature requests failed and why. + * + * controlMask + * This parameter indicates which clock-gating features the request should + * apply to. This parameter has the following fields: + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG + * The value of this field indicates whether this request should apply + * to engine-level clock-gating of the GR engine. Valid values for + * this field are: + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_IGNORE + * This value indicates that the GR engine-level clock-gating + * should be ignored. This will not affect the reference count for + * this feature. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_DISABLE + * This value indicates that the GR engine-level clock-gating + * should be disabled. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_ENABLE + * This value indicates that the GR engine-level clock-gating + * should be enabled. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG + * The value of this field indicates whether this request should apply + * to block-level clock-gating. Valid values for this field are: + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_IGNORE + * This value indicates that block-level clock-gating should be + * ignored. This will not affect the reference count for this + * feature. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_DISABLE + * This value indicates that block-level clock-gating should be + * disabled. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_ENABLE + * This value indicates that block-level clock-gating should be + * enabled. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG + * The value of this field indicates whether this request should apply + * to second-level clock-gating. Valid values for this field are: + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_IGNORE + * This value indicates that second-level clock-gating should be + * ignored. This will not affect the reference count for this + * feature. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_DISABLE + * This value indicates that second-level clock-gating should be + * disabled. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_ENABLE + * This value indicates that second-level clock-gating should be + * enabled. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG + * The value of this field indicates whether this request should apply + * to GR engine-level power-gating. Valid values for this field are: + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_IGNORE + * This value indicates that engine-level power-gating should be + * ignored. This will not affect the reference count for this + * feature. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_DISABLE + * This value indicates that engine-level power-gating should be + * disabled. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_ENABLE + * This value indicates that engine-level power-gating should be + * enabled. + * Note that this field is only temporary to allow reference counting + * restricted to GF100_PROFILER instances, until the time when the + * existing controls for this power feature can be updated to support + * reference counting across all clients and the RM. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN + * The value of this field indicates whether this request should apply + * to SM idle slowdown. Valid values for this field are: + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_IGNORE + * This value indicates that SM idle slowdown should be ignored. + * This will not affect the reference count for this feature. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_DISABLE + * This value indicates that SM idle slowdown should be disabled. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_ENABLE + * This value indicates that SM idle slowdown should be enabled. + * Note that this field is only temporary to allow reference counting + * restricted to GF100_PROFILER instances, until the time when the + * existing controls for this power feature can be updated to support + * reference counting across all clients and the RM. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT + * The value of this field indicates whether this request should apply + * to VAT. Valid values for this field are: + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_IGNORE + * This value indicates that VAT should be ignored. + * This will not affect the reference count for this feature. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_DISABLE + * This value indicates that VAT should be disabled. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_ENABLE + * This value indicates that VAT should be enabled. + * Note that this field is only temporary to allow reference counting + * restricted to GF100_PROFILER instances, until the time when the + * existing controls for this power feature can be updated to support + * reference counting across all clients and the RM. + * globalStatus + * This parameter returns the overall status of the requests for all + * clock-gating controls. If the value of this parameter is not + * NV_OK, none of the clock-gating controls will be set as + * requested. Possible values for this parameter are: + * NV_OK + * This value indicates that all of the clock-gating control requests + * were either fulfilled or not supported on the hardware. + * NV_ERR_INVALID_REQUEST + * This value indicates that at least one of the clock-gating control + * requests were invalid given the GF100_PROFILER instance's + * outstanding requests. + * NV_ERR_STATE_IN_USE + * This value indicates that at least one of the clock-gating controls + * has already been locked to a conflicting state by another + * GF100_PROFILER instance or the RM itself. + * statusMask + * This parameter returns the status of the request to set each clock- + * gating control specified by the controlMask parameter. The fields are + * identical to those of the controlMask parameter. For each field for + * which the corresponding field in the controlMask parameter has the + * value NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_*_IGNORE, the value is + * undefined. For each field for which the corresponding field in the + * controlMask parameter has the value + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_*_REQUEST, the value will be + * one of the following: + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_*_REQUEST_FULFILLED + * This value indicates that the clock-gating feature corresponding to + * the field in question was enabled or disabled according to the + * controlMask parameter, and the reference count for the feature was + * incremented accordingly. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_*_REQUEST_REJECTED + * This value indicates that the clock-gating feature corresponding to + * the field in question was not set to the expected state according + * to the controlMask parameter because another conflicting request is + * currently outstanding for the clock-gating feature. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_*_REQUEST_FAILED + * This value indicates that the clock-gating feature corresponding to + * the field in question was not set to the expected state according + * to the controlMask parameter because the attempt to do so failed + * with an error other than a conflicting request. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_*_REQUEST_NOT_SUPPORTED + * This value indicates that the clock-gating feature corresponding to + * the field in question is not supported on this GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV90CC_CTRL_CMD_PROFILER_REQUEST_CG_CONTROLS NV90CC_CTRL_CMD_POWER_REQUEST_FEATURES + +typedef NV90CC_CTRL_POWER_REQUEST_FEATURES_PARAMS NV90CC_CTRL_PROFILER_REQUEST_CG_CONTROLS_PARAMS; + +/* valid fields for the controlMask and statusMask parameters */ +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG 1:0 +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG 3:2 +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG 5:4 + +/* + * The following are temporary fields for the controlMask and statusMask + * parameters. They are required to reference count their respective features + * until the existing RM controls can be safely updated, and the definitions + * for these features will be removed soon after that. + */ +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG 7:6 +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN 9:8 +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT 11:10 + +/* valid values for fields in the controlMask parameter */ +#define NV90CC_CTRL_PROFILER_CG_CONTROL_IGNORE (0x00000000) +#define NV90CC_CTRL_PROFILER_CG_CONTROL_DISABLE (0x00000001) +#define NV90CC_CTRL_PROFILER_CG_CONTROL_ENABLE (0x00000002) + +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_IGNORE NV90CC_CTRL_PROFILER_CG_CONTROL_IGNORE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_DISABLE NV90CC_CTRL_PROFILER_CG_CONTROL_DISABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_ENABLE NV90CC_CTRL_PROFILER_CG_CONTROL_ENABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_IGNORE NV90CC_CTRL_PROFILER_CG_CONTROL_IGNORE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_DISABLE NV90CC_CTRL_PROFILER_CG_CONTROL_DISABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_ENABLE NV90CC_CTRL_PROFILER_CG_CONTROL_ENABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_IGNORE NV90CC_CTRL_PROFILER_CG_CONTROL_IGNORE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_DISABLE NV90CC_CTRL_PROFILER_CG_CONTROL_DISABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_ENABLE NV90CC_CTRL_PROFILER_CG_CONTROL_ENABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_IGNORE NV90CC_CTRL_PROFILER_CG_CONTROL_IGNORE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_DISABLE NV90CC_CTRL_PROFILER_CG_CONTROL_DISABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_ENABLE NV90CC_CTRL_PROFILER_CG_CONTROL_ENABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_IGNORE NV90CC_CTRL_PROFILER_CG_CONTROL_IGNORE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_DISABLE NV90CC_CTRL_PROFILER_CG_CONTROL_DISABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_ENABLE NV90CC_CTRL_PROFILER_CG_CONTROL_ENABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_IGNORE NV90CC_CTRL_PROFILER_CG_CONTROL_IGNORE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_DISABLE NV90CC_CTRL_PROFILER_CG_CONTROL_DISABLE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_ENABLE NV90CC_CTRL_PROFILER_CG_CONTROL_ENABLE +/* possible values for fields in the statusMask parameter */ +#define NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FULFILLED (0x00000000) +#define NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_REJECTED (0x00000001) +#define NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_NOT_SUPPORTED (0x00000002) +#define NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FAILED (0x00000003) + +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_REQUEST_FULFILLED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FULFILLED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_REQUEST_REJECTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_REJECTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_REQUEST_NOT_SUPPORTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_REQUEST_FAILED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FAILED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_REQUEST_FULFILLED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FULFILLED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_REQUEST_REJECTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_REJECTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_REQUEST_NOT_SUPPORTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_REQUEST_FAILED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FAILED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_REQUEST_FULFILLED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FULFILLED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_REQUEST_REJECTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_REJECTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_REQUEST_NOT_SUPPORTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_REQUEST_FAILED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FAILED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_REQUEST_FULFILLED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FULFILLED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_REQUEST_REJECTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_REJECTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_REQUEST_NOT_SUPPORTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_REQUEST_FAILED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FAILED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_REQUEST_FULFILLED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FULFILLED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_REQUEST_REJECTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_REJECTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_REQUEST_NOT_SUPPORTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_REQUEST_FAILED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FAILED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_REQUEST_FULFILLED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FULFILLED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_REQUEST_REJECTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_REJECTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_REQUEST_NOT_SUPPORTED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_REQUEST_FAILED NV90CC_CTRL_PROFILER_CG_CONTROL_REQUEST_FAILED + +/* utility masks for the controlMask parameter for all controls */ +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ALL \ + DRF_SHIFTMASK(NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG) | \ + DRF_SHIFTMASK(NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG) | \ + DRF_SHIFTMASK(NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG) | \ + DRF_SHIFTMASK(NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG) | \ + DRF_SHIFTMASK(NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN) | \ + DRF_SHIFTMASK(NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT) +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ALL_IGNORE \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _ELCG, _IGNORE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _BLCG, _IGNORE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _SLCG, _IGNORE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _ELPG, _IGNORE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _IDLE_SLOWDOWN, _IGNORE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _VAT, _IGNORE) +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ALL_DISABLE \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _ELCG, _DISABLE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _BLCG, _DISABLE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _SLCG, _DISABLE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _ELPG, _DISABLE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _IDLE_SLOWDOWN, _DISABLE)| \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _VAT, _DISABLE) +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ALL_ENABLE \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _ELCG, _ENABLE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _BLCG, _ENABLE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _SLCG, _ENABLE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _ELPG, _ENABLE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _IDLE_SLOWDOWN, _ENABLE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _VAT, _ENABLE) + +/* + * NV90CC_CTRL_CMD_PROFILER_RELEASE_CG_CONTROLS + * + * This command is deprecated; please us + * NV90CC_CTRL_CMD_POWER_RELEASE_FEATURES. + * + * This command releases the GF100_PROFILER's request for the given clock- + * gating features that was previously created by the + * NV90CC_CTRL_CMD_PROFILER_REQUEST_CG_CONTROLS command. If the GF100_PROFILER + * object does not have an outstanding request to one or more of the given + * clock-gating features, those features will be ignored while the other + * feature requests will be released. + * + * After calling this command, the calling client may not rely on the current + * value of any of the released clock-gating features to remain, even if + * several identical requests for the given clock-gating features were made + * using NV90CC_CTRL_CMD_PROFILER_REQUEST_CG_CONTROLS. The RM only reference- + * counts the settings requested by GF100_PROFILER instances - it does not + * reference-count multiple identical requests made using the same + * GF100_PROFILER instance. + * + * All outstanding requests made using this GF100_PROFILER object are + * implicitly released when the GF100_PROFILER is freed. + * + * controlMask + * This parameter indicates which clock-gating features the RM should + * release the GF100_PROFILER's reference to. See + * NV90CC_CTRL_CMD_PROFILER_REQUEST_CG_CONTROLS for valid fields. Valid + * values for each field are: + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_IGNORE + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_IGNORE + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_IGNORE + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_IGNORE + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_IGNORE + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_IGNORE + * This value indicates that the clock-gating feature associated with + * the field should not be released, even if the GF100_PROFILER has an + * outstanding request for it. This will not affect the reference + * count for the feature. + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_RELEASE + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_RELEASE + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_RELEASE + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_RELEASE + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_RELEASE + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_RELEASE + * This value indicates that the GF100_PROFILER's outstanding request + * for the clock-gating feature associated with the field should be + * released. This will decrement the reference count for the feature + * if the GF100_PROFILER has an outstanding request for it. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV90CC_CTRL_CMD_PROFILER_RELEASE_CG_CONTROLS NV90CC_CTRL_CMD_POWER_RELEASE_FEATURES + +typedef NV90CC_CTRL_POWER_RELEASE_FEATURES_PARAMS NV90CC_CTRL_PROFILER_RELEASE_CG_CONTROLS_PARAMS; + +/* + * valid values for the controlMask parameter in addition to + * NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_*_IGNORE + */ +#define NV90CC_CTRL_PROFILER_CG_CONTROL_RELEASE (0x00000003) + +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELCG_RELEASE NV90CC_CTRL_PROFILER_CG_CONTROL_RELEASE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_BLCG_RELEASE NV90CC_CTRL_PROFILER_CG_CONTROL_RELEASE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_SLCG_RELEASE NV90CC_CTRL_PROFILER_CG_CONTROL_RELEASE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ELPG_RELEASE NV90CC_CTRL_PROFILER_CG_CONTROL_RELEASE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_IDLE_SLOWDOWN_RELEASE NV90CC_CTRL_PROFILER_CG_CONTROL_RELEASE +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_VAT_RELEASE NV90CC_CTRL_PROFILER_CG_CONTROL_RELEASE + +/* utility mask for the controlMask parameter for all fields */ +#define NV90CC_CTRL_PROFILER_CG_CONTROL_MASK_ALL_RELEASE \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _ELCG, _RELEASE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _BLCG, _RELEASE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _SLCG, _RELEASE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _ELPG, _RELEASE) | \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _IDLE_SLOWDOWN, _RELEASE)| \ + DRF_DEF(90CC, _CTRL_PROFILER_CG_CONTROL_MASK, _VAT, _RELEASE) + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90cc/ctrl90ccbase.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90cc/ctrl90ccbase.h new file mode 100644 index 000000000..f0874a39e --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90cc/ctrl90ccbase.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90cc/ctrl90ccbase.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* GF100_PROFILER control commands and parameters */ + +#define NV90CC_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x90CC, NV90CC_CTRL_##cat, idx) + +/* GF100_PROFILER command categories (6 bits) */ +#define NV90CC_CTRL_RESERVED (0x00) +#define NV90CC_CTRL_HWPM (0x01) + + +#define NV90CC_CTRL_POWER (0x03) + +/* + * NV90CC_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV90CC_CTRL_CMD_NULL (0x90cc0000) /* finn: Evaluated from "(FINN_GF100_PROFILER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrl90ccbase_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90cc/ctrl90cchwpm.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90cc/ctrl90cchwpm.h new file mode 100644 index 000000000..e8971fcb3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90cc/ctrl90cchwpm.h @@ -0,0 +1,138 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90cc/ctrl90cchwpm.finn +// + + + + +#include "ctrl/ctrl90cc/ctrl90ccbase.h" + +/* GF100_PROFILER HWPM control commands and parameters */ + +/* + * NV90CC_CTRL_CMD_HWPM_RESERVE + * + * This command attempts to reserve the perfmon for use by the calling client. + * If this object was allocated as a child of a subdevice, then the + * reservation will be global among all contexts on that subdevice. If this + * object was allocated as a child of a channel group or a channel, then the + * reservation will only be for the hardware context of that channel group or + * channel. + * + * If the global reservation is held on a subdevice by another client, then + * this command will fail, regardless of the parent class. + * + * If one or more per-context reservations are held by other clients, then + * this command will fail if the parent object is a subdevice or another + * client already holds the perfmon reservation for the parent context. + * + * This command will return NV_ERR_STATE_IN_USE for all of the failure + * cases described above. A return status of NV_OK guarantees + * that the client holds the perfmon reservation of the appropriate scope. + * + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_STATE_IN_USE + */ +#define NV90CC_CTRL_CMD_HWPM_RESERVE (0x90cc0101) /* finn: Evaluated from "(FINN_GF100_PROFILER_HWPM_INTERFACE_ID << 8) | 0x1" */ + +/* + * NV90CC_CTRL_CMD_HWPM_RELEASE + * + * This command releases an existing reservation of the perfmon for the + * calling client. If the calling client does not currently have the perfmon + * reservation as acquired by NV90CC_CTRL_CMD_PROFILER_RESERVE_HWPM, this + * command will return NV_ERR_INVALID_REQUEST. + * + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_REQUEST + */ +#define NV90CC_CTRL_CMD_HWPM_RELEASE (0x90cc0102) /* finn: Evaluated from "(FINN_GF100_PROFILER_HWPM_INTERFACE_ID << 8) | 0x2" */ + +/* + * NV90CC_CTRL_CMD_HWPM_GET_RESERVATION_INFO + * + * This command returns information about any outstanding perfmon + * reservations. If this object was allocated as a child of a subdevice, then + * this command will return information about all reservations on the + * subdevice (global or per-context). If this object was allocated as a child + * of a channel group or channel, then this command will only return + * information about the per-context reservation for that context or the + * global reservation, if one exists. + * + * reservationCount + * This parameter returns the number of outstanding perfmon reservations + * in the applicable scope. If the value of the bGlobal parameter is + * NV_TRUE, then this parameter will have a value of 1. If this object was + * allocated as a child of a channel group or channel, then this parameter + * will have a value of either 0 or 1. If this object was allocated as a + * child of a subdevice and the bGlobal parameter is NV_FALSE, then this + * parameter will return the number of per-context reservations on the + * subdevice. + * pid + * This parameter returns the PID of a process that holds a reservation in + * the applicable scope. If the value of the bGlobal parameter is NV_TRUE, + * then this parameter will be the PID of the process holding the global + * perfmon reservation on the subdevice. Otherwise, if the value of + * reservationCount is greater than 0, the value of this parameter will be + * the PID of one of the process that holds the per-context lock in the + * applicable scope. If the value of the reservationCount parameter is 0, + * the value of this parameter is undefined. + * bGlobal + * This parameter returns whether the outstanding perfmon reservation held + * by any client is global or per-context. If the value of this parameter + * is NV_TRUE, then the value of the reservationCount parameter should be 1 + * and the value of the pid parameter should be the pid of the process + * that holds the global perfmon reservation. The value of this parameter + * will be NV_FALSE when there is no global perfmon reservation. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV90CC_CTRL_CMD_HWPM_GET_RESERVATION_INFO (0x90cc0103) /* finn: Evaluated from "(FINN_GF100_PROFILER_HWPM_INTERFACE_ID << 8) | NV90CC_CTRL_HWPM_GET_RESERVATION_INFO_PARAMS_MESSAGE_ID" */ + +#define NV90CC_CTRL_HWPM_GET_RESERVATION_INFO_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV90CC_CTRL_HWPM_GET_RESERVATION_INFO_PARAMS { + NvU32 reservationCount; + NvU32 pid; + NvBool bGlobal; +} NV90CC_CTRL_HWPM_GET_RESERVATION_INFO_PARAMS; + + + +/* _ctrl90cchwpm_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90cc/ctrl90ccpower.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90cc/ctrl90ccpower.h new file mode 100644 index 000000000..9717fd349 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90cc/ctrl90ccpower.h @@ -0,0 +1,406 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90cc/ctrl90ccpower.finn +// + + + + +#include "ctrl/ctrl90cc/ctrl90ccbase.h" + +/* GF100_PROFILER GPU control commands and parameters */ +/* + * NV90CC_CTRL_CMD_POWER_REQUEST_FEATURES + * + * This command attempts to enable or disable various clock-gating features of + * the GPU on behalf of the profiler. If this command is unable to set the + * clock-gating feature state of any of the requested features, this command + * will fail and none of the requested features will be modified. If this + * command fails because one or more clock-gating feature requests were + * rejected, it will return NV_ERR_STATE_IN_USE in the globalStatus + * parameter and the fields in the statusMask parameter for the features for + * which the requests were rejected will have the value + * NV90CC_CTRL_POWER_FEATURE_MASK_*_REQUEST_REJECTED. + * If a given feature is not supported on the GPU, the field for that clock- + * gating feature will have the value + * NV90CC_CTRL_POWER_FEATURE_MASK_*_REQUEST_NOT_SUPPORTED in the + * statusMask parameter, but this condition by itself will not cause the + * command to fail. Even if this command fails, the field for clock-gating + * features which would have successfully changed will have the value + * NV90CC_CTRL_POWER_FEATURE_MASK_*_FULFILLED in the statusMask + * parameter. + * + * Each of the clock-gating features is reference-counted individually, so + * that multiple GF100_PROFILER objects may request and rely on the same + * settings for the features simultaneously. Each clock-gating feature is + * locked to the requested state until the GF100_PROFILER object is freed or + * the NV90CC_CTRL_CMD_POWER_RELEASE_FEATURES command is called for that + * feature. + * + * Currently, only GF100_PROFILER requests for power features using this + * command are reference counted. Changes to the power feature settings made + * either by other control commands or the RM itself may interfere with the + * settings requested by GF100_PROFILER instances. + * + * This command will always return NV_OK when given valid + * parameters. If there is any other failure that prevents the clock-gating + * features from being set appropriately, the globalStatus parameter will + * indicate this and the statusMask parameter will indicate which clock-gating + * feature requests failed and why. + * + * controlMask + * This parameter indicates which clock-gating features the request should + * apply to. This parameter has the following fields: + * NV90CC_CTRL_POWER_FEATURE_MASK_ELCG + * The value of this field indicates whether this request should apply + * to engine-level clock-gating of the GR engine. Valid values for + * this field are: + * NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_IGNORE + * This value indicates that the GR engine-level clock-gating + * should be ignored. This will not affect the reference count for + * this feature. + * NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_DISABLE + * This value indicates that the GR engine-level clock-gating + * should be disabled. + * NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_ENABLE + * This value indicates that the GR engine-level clock-gating + * should be enabled. + * NV90CC_CTRL_POWER_FEATURE_MASK_BLCG + * The value of this field indicates whether this request should apply + * to block-level clock-gating. Valid values for this field are: + * NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_IGNORE + * This value indicates that block-level clock-gating should be + * ignored. This will not affect the reference count for this + * feature. + * NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_DISABLE + * This value indicates that block-level clock-gating should be + * disabled. + * NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_ENABLE + * This value indicates that block-level clock-gating should be + * enabled. + * NV90CC_CTRL_POWER_FEATURE_MASK_SLCG + * The value of this field indicates whether this request should apply + * to second-level clock-gating. Valid values for this field are: + * NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_IGNORE + * This value indicates that second-level clock-gating should be + * ignored. This will not affect the reference count for this + * feature. + * NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_DISABLE + * This value indicates that second-level clock-gating should be + * disabled. + * NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_ENABLE + * This value indicates that second-level clock-gating should be + * enabled. + * NV90CC_CTRL_POWER_FEATURE_MASK_ELPG + * The value of this field indicates whether this request should apply + * to GR engine-level power-gating. Valid values for this field are: + * NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_IGNORE + * This value indicates that engine-level power-gating should be + * ignored. This will not affect the reference count for this + * feature. + * NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_DISABLE + * This value indicates that engine-level power-gating should be + * disabled. + * NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_ENABLE + * This value indicates that engine-level power-gating should be + * enabled. + * Note that this field is only temporary to allow reference counting + * restricted to GF100_PROFILER instances, until the time when the + * existing controls for this power feature can be updated to support + * reference counting across all clients and the RM. + * NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN + * The value of this field indicates whether this request should apply + * to SM idle slowdown. Valid values for this field are: + * NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_IGNORE + * This value indicates that SM idle slowdown should be ignored. + * This will not affect the reference count for this feature. + * NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_DISABLE + * This value indicates that SM idle slowdown should be disabled. + * NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_ENABLE + * This value indicates that SM idle slowdown should be enabled. + * Note that this field is only temporary to allow reference counting + * restricted to GF100_PROFILER instances, until the time when the + * existing controls for this power feature can be updated to support + * reference counting across all clients and the RM. + * NV90CC_CTRL_POWER_FEATURE_MASK_VAT + * The value of this field indicates whether this request should apply + * to VAT. Valid values for this field are: + * NV90CC_CTRL_POWER_FEATURE_MASK_VAT_IGNORE + * This value indicates that VAT should be ignored. + * This will not affect the reference count for this feature. + * NV90CC_CTRL_POWER_FEATURE_MASK_VAT_DISABLE + * This value indicates that VAT should be disabled. + * NV90CC_CTRL_POWER_FEATURE_MASK_VAT_ENABLE + * This value indicates that VAT should be enabled. + * Note that this field is only temporary to allow reference counting + * restricted to GF100_PROFILER instances, until the time when the + * existing controls for this power feature can be updated to support + * reference counting across all clients and the RM. + * globalStatus + * This parameter returns the overall status of the requests for all + * clock-gating controls. If the value of this parameter is not + * NV_OK, none of the clock-gating controls will be set as + * requested. Possible values for this parameter are: + * NV_OK + * This value indicates that all of the clock-gating control requests + * were either fulfilled or not supported on the hardware. + * NV_ERR_INVALID_REQUEST + * This value indicates that at least one of the clock-gating control + * requests were invalid given the GF100_PROFILER instance's + * outstanding requests. + * NV_ERR_STATE_IN_USE + * This value indicates that at least one of the clock-gating controls + * has already been locked to a conflicting state by another + * GF100_PROFILER instance or the RM itself. + * statusMask + * This parameter returns the status of the request to set each clock- + * gating control specified by the controlMask parameter. The fields are + * identical to those of the controlMask parameter. For each field for + * which the corresponding field in the controlMask parameter has the + * value NV90CC_CTRL_POWER_FEATURE_MASK_*_IGNORE, the value is + * undefined. For each field for which the corresponding field in the + * controlMask parameter has the value + * NV90CC_CTRL_POWER_FEATURE_MASK_*_REQUEST, the value will be + * one of the following: + * NV90CC_CTRL_POWER_FEATURE_MASK_*_REQUEST_FULFILLED + * This value indicates that the clock-gating feature corresponding to + * the field in question was enabled or disabled according to the + * controlMask parameter, and the reference count for the feature was + * incremented accordingly. + * NV90CC_CTRL_POWER_FEATURE_MASK_*_REQUEST_REJECTED + * This value indicates that the clock-gating feature corresponding to + * the field in question was not set to the expected state according + * to the controlMask parameter because another conflicting request is + * currently outstanding for the clock-gating feature. + * NV90CC_CTRL_POWER_FEATURE_MASK_*_REQUEST_FAILED + * This value indicates that the clock-gating feature corresponding to + * the field in question was not set to the expected state according + * to the controlMask parameter because the attempt to do so failed + * with an error other than a conflicting request. + * NV90CC_CTRL_POWER_FEATURE_MASK_*_REQUEST_NOT_SUPPORTED + * This value indicates that the clock-gating feature corresponding to + * the field in question is not supported on this GPU. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV90CC_CTRL_CMD_POWER_REQUEST_FEATURES (0x90cc0301) /* finn: Evaluated from "(FINN_GF100_PROFILER_POWER_INTERFACE_ID << 8) | NV90CC_CTRL_POWER_REQUEST_FEATURES_PARAMS_MESSAGE_ID" */ + +#define NV90CC_CTRL_POWER_REQUEST_FEATURES_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV90CC_CTRL_POWER_REQUEST_FEATURES_PARAMS { + NvU32 globalStatus; + NvU32 controlMask; + NvU32 statusMask; +} NV90CC_CTRL_POWER_REQUEST_FEATURES_PARAMS; + +/* valid fields for the controlMask and statusMask parameters */ +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELCG 1:0 +#define NV90CC_CTRL_POWER_FEATURE_MASK_BLCG 3:2 +#define NV90CC_CTRL_POWER_FEATURE_MASK_SLCG 5:4 + +/* + * The following are temporary fields for the controlMask and statusMask + * parameters. They are required to reference count their respective features + * until the existing RM controls can be safely updated, and the definitions + * for these features will be removed soon after that. + */ +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELPG 7:6 +#define NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN 9:8 +#define NV90CC_CTRL_POWER_FEATURE_MASK_VAT 11:10 + +/* valid values for fields in the controlMask parameter */ +#define NV90CC_CTRL_POWER_FEATURE_IGNORE (0x00000000) +#define NV90CC_CTRL_POWER_FEATURE_DISABLE (0x00000001) +#define NV90CC_CTRL_POWER_FEATURE_ENABLE (0x00000002) + +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_IGNORE NV90CC_CTRL_POWER_FEATURE_IGNORE +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_DISABLE NV90CC_CTRL_POWER_FEATURE_DISABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_ENABLE NV90CC_CTRL_POWER_FEATURE_ENABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_IGNORE NV90CC_CTRL_POWER_FEATURE_IGNORE +#define NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_DISABLE NV90CC_CTRL_POWER_FEATURE_DISABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_ENABLE NV90CC_CTRL_POWER_FEATURE_ENABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_IGNORE NV90CC_CTRL_POWER_FEATURE_IGNORE +#define NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_DISABLE NV90CC_CTRL_POWER_FEATURE_DISABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_ENABLE NV90CC_CTRL_POWER_FEATURE_ENABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_IGNORE NV90CC_CTRL_POWER_FEATURE_IGNORE +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_DISABLE NV90CC_CTRL_POWER_FEATURE_DISABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_ENABLE NV90CC_CTRL_POWER_FEATURE_ENABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_IGNORE NV90CC_CTRL_POWER_FEATURE_IGNORE +#define NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_DISABLE NV90CC_CTRL_POWER_FEATURE_DISABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_ENABLE NV90CC_CTRL_POWER_FEATURE_ENABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_VAT_IGNORE NV90CC_CTRL_POWER_FEATURE_IGNORE +#define NV90CC_CTRL_POWER_FEATURE_MASK_VAT_DISABLE NV90CC_CTRL_POWER_FEATURE_DISABLE +#define NV90CC_CTRL_POWER_FEATURE_MASK_VAT_ENABLE NV90CC_CTRL_POWER_FEATURE_ENABLE +/* possible values for fields in the statusMask parameter */ +#define NV90CC_CTRL_POWER_FEATURE_REQUEST_FULFILLED (0x00000000) +#define NV90CC_CTRL_POWER_FEATURE_REQUEST_REJECTED (0x00000001) +#define NV90CC_CTRL_POWER_FEATURE_REQUEST_NOT_SUPPORTED (0x00000002) +#define NV90CC_CTRL_POWER_FEATURE_REQUEST_FAILED (0x00000003) + +#define NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_REQUEST_FULFILLED NV90CC_CTRL_POWER_FEATURE_REQUEST_FULFILLED +#define NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_REQUEST_REJECTED NV90CC_CTRL_POWER_FEATURE_REQUEST_REJECTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_REQUEST_NOT_SUPPORTED NV90CC_CTRL_POWER_FEATURE_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_REQUEST_FAILED NV90CC_CTRL_POWER_FEATURE_REQUEST_FAILED +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_REQUEST_FULFILLED NV90CC_CTRL_POWER_FEATURE_REQUEST_FULFILLED +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_REQUEST_REJECTED NV90CC_CTRL_POWER_FEATURE_REQUEST_REJECTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_REQUEST_NOT_SUPPORTED NV90CC_CTRL_POWER_FEATURE_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_REQUEST_FAILED NV90CC_CTRL_POWER_FEATURE_REQUEST_FAILED +#define NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_REQUEST_FULFILLED NV90CC_CTRL_POWER_FEATURE_REQUEST_FULFILLED +#define NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_REQUEST_REJECTED NV90CC_CTRL_POWER_FEATURE_REQUEST_REJECTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_REQUEST_NOT_SUPPORTED NV90CC_CTRL_POWER_FEATURE_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_REQUEST_FAILED NV90CC_CTRL_POWER_FEATURE_REQUEST_FAILED +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_REQUEST_FULFILLED NV90CC_CTRL_POWER_FEATURE_REQUEST_FULFILLED +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_REQUEST_REJECTED NV90CC_CTRL_POWER_FEATURE_REQUEST_REJECTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_REQUEST_NOT_SUPPORTED NV90CC_CTRL_POWER_FEATURE_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_REQUEST_FAILED NV90CC_CTRL_POWER_FEATURE_REQUEST_FAILED +#define NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_REQUEST_FULFILLED NV90CC_CTRL_POWER_FEATURE_REQUEST_FULFILLED +#define NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_REQUEST_REJECTED NV90CC_CTRL_POWER_FEATURE_REQUEST_REJECTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_REQUEST_NOT_SUPPORTED NV90CC_CTRL_POWER_FEATURE_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_REQUEST_FAILED NV90CC_CTRL_POWER_FEATURE_REQUEST_FAILED +#define NV90CC_CTRL_POWER_FEATURE_MASK_VAT_REQUEST_FULFILLED NV90CC_CTRL_POWER_FEATURE_REQUEST_FULFILLED +#define NV90CC_CTRL_POWER_FEATURE_MASK_VAT_REQUEST_REJECTED NV90CC_CTRL_POWER_FEATURE_REQUEST_REJECTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_VAT_REQUEST_NOT_SUPPORTED NV90CC_CTRL_POWER_FEATURE_REQUEST_NOT_SUPPORTED +#define NV90CC_CTRL_POWER_FEATURE_MASK_VAT_REQUEST_FAILED NV90CC_CTRL_POWER_FEATURE_REQUEST_FAILED + +/* utility masks for the controlMask parameter for all controls */ +#define NV90CC_CTRL_POWER_FEATURE_MASK_ALL \ + DRF_SHIFTMASK(NV90CC_CTRL_POWER_FEATURE_MASK_ELCG) | \ + DRF_SHIFTMASK(NV90CC_CTRL_POWER_FEATURE_MASK_BLCG) | \ + DRF_SHIFTMASK(NV90CC_CTRL_POWER_FEATURE_MASK_SLCG) | \ + DRF_SHIFTMASK(NV90CC_CTRL_POWER_FEATURE_MASK_ELPG) | \ + DRF_SHIFTMASK(NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN) | \ + DRF_SHIFTMASK(NV90CC_CTRL_POWER_FEATURE_MASK_VAT) +#define NV90CC_CTRL_POWER_FEATURE_MASK_ALL_IGNORE \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _ELCG, _IGNORE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _BLCG, _IGNORE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _SLCG, _IGNORE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _ELPG, _IGNORE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _IDLE_SLOWDOWN, _IGNORE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _VAT, _IGNORE) +#define NV90CC_CTRL_POWER_FEATURE_MASK_ALL_DISABLE \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _ELCG, _DISABLE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _BLCG, _DISABLE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _SLCG, _DISABLE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _ELPG, _DISABLE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _IDLE_SLOWDOWN, _DISABLE)| \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _VAT, _DISABLE) +#define NV90CC_CTRL_POWER_FEATURE_MASK_ALL_ENABLE \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _ELCG, _ENABLE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _BLCG, _ENABLE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _SLCG, _ENABLE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _ELPG, _ENABLE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _IDLE_SLOWDOWN, _ENABLE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _VAT, _ENABLE) + +/* + * NV90CC_CTRL_CMD_POWER_RELEASE_FEATUERS + * + * This command releases the GF100_PROFILER's request for the given clock- + * gating features that was previously created by the + * NV90CC_CTRL_CMD_POWER_REQUEST_FEATURES command. If the GF100_PROFILER + * object does not have an outstanding request to one or more of the given + * clock-gating features, those features will be ignored while the other + * feature requests will be released. + * + * After calling this command, the calling client may not rely on the current + * value of any of the released clock-gating features to remain, even if + * several identical requests for the given clock-gating features were made + * using NV90CC_CTRL_CMD_POWER_REQUEST_FEATURES. The RM only reference- + * counts the settings requested by GF100_PROFILER instances - it does not + * reference-count multiple identical requests made using the same + * GF100_PROFILER instance. + * + * All outstanding requests made using this GF100_PROFILER object are + * implicitly released when the GF100_PROFILER is freed. + * + * controlMask + * This parameter indicates which clock-gating features the RM should + * release the GF100_PROFILER's reference to. See + * NV90CC_CTRL_CMD_POWER_REQUEST_FEATURES for valid fields. Valid + * values for each field are: + * NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_IGNORE + * NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_IGNORE + * NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_IGNORE + * NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_IGNORE + * NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_IGNORE + * NV90CC_CTRL_POWER_FEATURE_MASK_VAT_IGNORE + * This value indicates that the clock-gating feature associated with + * the field should not be released, even if the GF100_PROFILER has an + * outstanding request for it. This will not affect the reference + * count for the feature. + * NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_RELEASE + * NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_RELEASE + * NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_RELEASE + * NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_RELEASE + * NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_RELEASE + * NV90CC_CTRL_POWER_FEATURE_MASK_VAT_RELEASE + * This value indicates that the GF100_PROFILER's outstanding request + * for the clock-gating feature associated with the field should be + * released. This will decrement the reference count for the feature + * if the GF100_PROFILER has an outstanding request for it. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NV90CC_CTRL_CMD_POWER_RELEASE_FEATURES (0x90cc0302) /* finn: Evaluated from "(FINN_GF100_PROFILER_POWER_INTERFACE_ID << 8) | NV90CC_CTRL_POWER_RELEASE_FEATURES_PARAMS_MESSAGE_ID" */ + +#define NV90CC_CTRL_POWER_RELEASE_FEATURES_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV90CC_CTRL_POWER_RELEASE_FEATURES_PARAMS { + NvU32 controlMask; +} NV90CC_CTRL_POWER_RELEASE_FEATURES_PARAMS; + +/* + * valid values for the controlMask parameter in addition to + * NV90CC_CTRL_POWER_FEATURE_MASK_*_IGNORE + */ +#define NV90CC_CTRL_POWER_FEATURE_RELEASE (0x00000003) + +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELCG_RELEASE NV90CC_CTRL_POWER_FEATURE_RELEASE +#define NV90CC_CTRL_POWER_FEATURE_MASK_BLCG_RELEASE NV90CC_CTRL_POWER_FEATURE_RELEASE +#define NV90CC_CTRL_POWER_FEATURE_MASK_SLCG_RELEASE NV90CC_CTRL_POWER_FEATURE_RELEASE +#define NV90CC_CTRL_POWER_FEATURE_MASK_ELPG_RELEASE NV90CC_CTRL_POWER_FEATURE_RELEASE +#define NV90CC_CTRL_POWER_FEATURE_MASK_IDLE_SLOWDOWN_RELEASE NV90CC_CTRL_POWER_FEATURE_RELEASE +#define NV90CC_CTRL_POWER_FEATURE_MASK_VAT_RELEASE NV90CC_CTRL_POWER_FEATURE_RELEASE + + +/* _ctrl90ccpower_h_ */ + +/* utility mask for the controlMask parameter for all fields */ +#define NV90CC_CTRL_POWER_FEATURE_MASK_ALL_RELEASE \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _ELCG, _RELEASE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _BLCG, _RELEASE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _SLCG, _RELEASE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _ELPG, _RELEASE) | \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _IDLE_SLOWDOWN, _RELEASE)| \ + DRF_DEF(90CC, _CTRL_POWER_FEATURE_MASK, _VAT, _RELEASE) diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h new file mode 100644 index 000000000..6e031bd35 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90cd.h @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90cd.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NV_EVENT_BUFFER control commands and parameters */ + +#define NV_EVENT_BUFFER_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x90CD, NV90CD_CTRL_##cat, idx) + +#define NV90CD_CTRL_RESERVED (0x00) +#define NV90CD_CTRL_EVENT (0x01) + +/* +* NV_EVENT_BUFFER_CTRL_CMD_NULL +* +* This command does nothing. +* This command does not take any parameters. +* +* Possible status values returned are: +* NV_OK +*/ +#define NV_EVENT_BUFFER_CTRL_CMD_NULL (0x90cd0000) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* +* NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS +* This interface enables all the events that are associated to the event buffer +*/ +#define NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS (0x90cd0101) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x1" */ + +#define NV_EVENT_BUFFER_FLAG 0:32 + +/* +* NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY +* This flag defines the kernel behavior when the buffer is full +* +* DEFAULT/DISABLED: By default kernel doesn't assume any policy. To enable events +* an overflow policy has to be set to retain older or newer events +* +* KEEP_OLDEST: kernel would retain older events and drop newer events if the buffer is full +* +* KEEP_NEWEST: kernel would retain newer events and drop older events if the buffer is full +* +*/ +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY 0:1 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_DISABLED 0 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST 1 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST 2 +#define NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_DEFAULT NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_DISABLED + +/* +* NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS +* +* enable [IN] +* This field is used to enable or disable events +* +* flags[IN] +* This field sets NV_EVENT_BUFFER_FLAG parameter used to configure event buffer overflow options +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT +*/ +typedef struct NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS { + NvBool enable; + NvU32 flags; +} NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS; + +/* +* NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET +* This interface allows the user to update get pointers. +* This call is useful in the KEEP_OLDEST policy to update free space available in the buffer. +* In keep oldest policy, kernel adds new entries in the buffer only if there is free space. +* The full/empty decision is made as follows: +* - when GET==PUT, the fifo is empty +* - when GET==PUT+1, the fifo is full +*/ +#define NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET (0x90cd0102) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x2" */ + +/* +* NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS +* +* recordBufferGet [IN] +* Value to be used to update the get offset of record buffer +* +* varDataBufferGet[IN] +* This is the buffer offset up to which user has consumed the vardataBuffer +* +* Possible status values returned are: +* NV_OK +* NV_ERR_INVALID_ARGUMENT: if any of the get offsets is greater than respective bufferSize. +*/ +typedef struct NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS { + NvU32 recordBufferGet; + NvU32 varDataBufferGet; +} NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS; + +/* + * Send a test event-buffer notification (verification-only) + */ +#define NV_EVENT_BUFFER_CTRL_CMD_VERIF_NOTIFY (0x90cd0103) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x3" */ + +/* + * Synchronous flush + */ +#define NV_EVENT_BUFFER_CTRL_CMD_FLUSH (0x90cd0104) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x4" */ + +/* + * post event + */ +#define NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT (0x90cd0105) /* finn: Evaluated from "(FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID << 8) | 0x5" */ + + /* + * NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS + * + * eventType [IN] + * the NvTelemetry event type. + * typeVersion [IN] + * the version of the event structure + * eventData [IN] + * an array of 256 bytes used to hold the event data. + * eventDataSz [IN] + * the amount of valid data in the eventData buffer. + * varData [IN] + * an array of 256 bytes used to hold the var data. + * varDataSz [IN] + * the amount of valid data in the varData buffer. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +typedef struct NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS { + NvU32 eventType; + NvU16 typeVersion; + NvU8 eventData[256]; + NvU16 eventDataSz; + NvU8 varData[256]; + NvU16 varDataSz; +} NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS; + +/* _ctr l90cd_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90e6.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90e6.h new file mode 100644 index 000000000..544f5613c --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90e6.h @@ -0,0 +1,140 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90e6.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#define NV90E6_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0x90E6, NV90E6_CTRL_##cat, idx) + + +/* NV90E6 command categories (6bits) */ +#define NV90E6_CTRL_RESERVED (0x00) +#define NV90E6_CTRL_MASTER (0x01) + + +/* + * NV90E6_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ + +#define NV90E6_CTRL_CMD_NULL (0x90e60000) /* finn: Evaluated from "(FINN_GF100_SUBDEVICE_MASTER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV90E6_CTRL_CMD_MASTER_GET_ERROR_INTR_OFFSET_MASK + * + * This command is used to query the offset and mask within the object mapping + * that can be used to query for ECC and NVLINK interrupts. + * + * If a read of the given offset+mask is non-zero then it is possible an ECC or + * an NVLINK error has been reported and not yet handled. If this is true then + * the caller must either wait until the read returns zero or call into the + * corresponding count reporting APIs to get updated counts. + * + * offset + * The offset into a GF100_SUBDEVICE_MASTSER's mapping where the top level + * interrupt register can be found. + * mask + * Compatibility field that contains the same bits as eccMask. This field is + * deprecated and will be removed. + * eccMask + * The mask to AND with the value found at offset to determine if any ECC + * interrupts are pending. + * nvlinkMask + * The mask to AND with the value found at offset to determine if any NVLINK + * interrupts are pending. + * + * Possible return status values are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV90E6_CTRL_CMD_MASTER_GET_ERROR_INTR_OFFSET_MASK (0x90e60101) /* finn: Evaluated from "(FINN_GF100_SUBDEVICE_MASTER_MASTER_INTERFACE_ID << 8) | NV90E6_CTRL_MASTER_GET_ERROR_INTR_OFFSET_MASK_PARAMS_MESSAGE_ID" */ + +#define NV90E6_CTRL_MASTER_GET_ERROR_INTR_OFFSET_MASK_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV90E6_CTRL_MASTER_GET_ERROR_INTR_OFFSET_MASK_PARAMS { + NvU32 offset; + NvU32 mask; // TODO: remove after all users have switched to use eccMask + NvU32 eccMask; + NvU32 nvlinkMask; +} NV90E6_CTRL_MASTER_GET_ERROR_INTR_OFFSET_MASK_PARAMS; + +// TODO: remove once users of this interface have switched to the new name. +#define NV90E6_CTRL_CMD_MASTER_GET_ECC_INTR_OFFSET_MASK NV90E6_CTRL_CMD_MASTER_GET_ERROR_INTR_OFFSET_MASK + +typedef NV90E6_CTRL_MASTER_GET_ERROR_INTR_OFFSET_MASK_PARAMS NV90E6_CTRL_MASTER_GET_ECC_INTR_OFFSET_MASK_PARAMS; + +/* + * NV90E6_CTRL_CMD_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK + * + * This command is used to query the mask within the fastpath register + * (VIRTUAL_FUNCTION_ERR_CONT) that can be used to query for ECC and NVLINK interrupts. + * + * If a read of the given mask is non-zero then it is possible an ECC or + * an NVLINK error has been reported and not yet handled. If this is true then + * the caller must either wait until the read returns zero or call into the + * corresponding count reporting APIs to get updated counts. + * + * [out] eccMask + * The mask to AND with the value found at offset to determine if any ECC + * interrupts are possibly pending. + * [out] nvlinkMask + * The mask to AND with the value found at offset to determine if any NVLINK + * interrupts are possibly pending. + * + * Possible return status values are + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ +#define NV90E6_CTRL_CMD_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK (0x90e60102) /* finn: Evaluated from "(FINN_GF100_SUBDEVICE_MASTER_MASTER_INTERFACE_ID << 8) | NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS_MESSAGE_ID" */ + +#define NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS { + NvU32 eccMask; + NvU32 nvlinkMask; +} NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS; + +/* _ctrl90e6_h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h new file mode 100644 index 000000000..59fe63a32 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90ec.h @@ -0,0 +1,127 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90ec.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* GK104 HDACODEC control commands and parameters */ + +#define NV90EC_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x90EC, NV90EC_CTRL_##cat, idx) + +/* NV04_DISPLAY_COMMON command categories (6bits) */ +#define NV90EC_CTRL_RESERVED (0x00) +#define NV90EC_CTRL_HDACODEC (0x01) + +/* + * NV90EC_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NV90EC_CTRL_CMD_NULL (0x90ec0000) /* finn: Evaluated from "(FINN_GF100_HDACODEC_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE + * + * This command sets the CP_READY bit. It basically informs RM whether + * the DD has worked upon the HDCP request requested by the Audio driver + * or not. DD asks RM to enable CP_READY bit (by setting CpReadyEnable to NV_TRUE) + * once it is done honouring/dishonouring the request. + * + * subDeviceInstance + * This parameter specifies the subdevice instance within the + * NV04_DISPLAY_COMMON parent device to which the operation should be + * directed. This parameter must specify a value between zero and the + * total number of subdevices within the parent device. This parameter + * should be set to zero for default behavior. + * displayId + * This parameter specifies the ID of the display for which the cp ready + * bit should be enabled. The display ID must a dfp display. + * If the displayId is not a dfp, this call will return + * NV_ERR_INVALID_ARGUMENT. + * CpReadyEnable + * This parameter specifies whether to enable (NV_TRUE) or not. If CpReady + * is enabled then AudioCodec can send more HDCP requests. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE (0x90ec0101) /* finn: Evaluated from "(FINN_GF100_HDACODEC_HDACODEC_INTERFACE_ID << 8) | NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS { + NvU32 subDeviceInstance; + NvU32 displayId; + NvBool bCpReadyEnable; +} NV90EC_CTRL_CMD_HDACODEC_SET_CP_READY_ENABLE_PARAMS; + +/* + * NV90EC_CTRL_CMD_HDACODEC_NOTIFY_AUDIO_EVENT + * + * This command notifies Audio of any events to audio + * like notification of PD bit being set. + * + * audioEvent + * This parameter specifies the event type. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + * + */ +#define NV90EC_CTRL_CMD_HDACODEC_NOTIFY_AUDIO_EVENT (0x90ec0102) /* finn: Evaluated from "(FINN_GF100_HDACODEC_HDACODEC_INTERFACE_ID << 8) | NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS_MESSAGE_ID" */ + +#define NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS { + NvU32 audioEvent; +} NV90EC_CTRL_HDACODEC_NOTIFY_AUDIO_EVENT_PARAMS; + +/* + * This command notifies audio driver that PD bit is set by DD, by writing to scratch register + */ +#define NV90EC_CTRL_HDACODEC_AUDIOEVENT_PD_BIT_SET (0x00000001) + +/* _ctrl90ec_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h new file mode 100644 index 000000000..5b647f412 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrl90f1.h @@ -0,0 +1,312 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrl90f1.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "mmu_fmt_types.h" + +#define GMMU_FMT_MAX_LEVELS 6 + +/* Fermi+ GPU VASpace control commands and parameters */ +#define NV90F1_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0x90F1, NV90F1_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NV90F1_CTRL_RESERVED (0x00) +#define NV90F1_CTRL_VASPACE (0x01) + +/*! + * Does nothing. + */ +#define NV90F1_CTRL_CMD_NULL (0x90f10000) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/*! + * Get VAS GPU MMU format. + */ +#define NV90F1_CTRL_CMD_VASPACE_GET_GMMU_FORMAT (0x90f10101) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS_MESSAGE_ID" */ + +#define NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [out] GMMU format struct. This is of RM-internal type "struct GMMU_FMT*" + * which can only be accessed by kernel builds since this is a kernel + * only API. + */ + NV_DECLARE_ALIGNED(NvP64 pFmt, 8); +} NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS; + +/*! + * Get VAS page level information. + */ +#define NV90F1_CTRL_CMD_VASPACE_GET_PAGE_LEVEL_INFO (0x90f10102) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | 0x2" */ + +typedef struct NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [in] GPU virtual address to query. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddress, 8); + + /*! + * [in] Page size to query. + */ + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); + + /*! + * [out] Number of levels populated. + */ + NvU32 numLevels; + + /*! + * [out] Per-level information. + */ + struct { + /*! + * Format of this level. + */ + NV_DECLARE_ALIGNED(struct MMU_FMT_LEVEL *pFmt, 8); + + /*! + * Level/Sublevel Formats flattened + */ + NV_DECLARE_ALIGNED(MMU_FMT_LEVEL levelFmt, 8); + NV_DECLARE_ALIGNED(MMU_FMT_LEVEL sublevelFmt[MMU_FMT_MAX_SUB_LEVELS], 8); + + /*! + * Physical address of this page level instance. + */ + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + + /*! + * Aperture in which this page level instance resides. + */ + NvU32 aperture; + + /*! + * Size in bytes allocated for this level instance. + */ + NV_DECLARE_ALIGNED(NvU64 size, 8); + } levels[GMMU_FMT_MAX_LEVELS]; +} NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS; + +/*! + * Reserve (allocate and bind) page directory/table entries up to + * a given level of the MMU format. Also referred to as "lock-down". + * + * Each range that has been reserved must be released + * eventually with @ref NV90F1_CTRL_CMD_VASPACE_RELEASE_ENTRIES. + * A particular VA range and level (page size) combination may only be + * locked down once at a given time, but each level is independent. + */ +#define NV90F1_CTRL_CMD_VASPACE_RESERVE_ENTRIES (0x90f10103) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS_MESSAGE_ID" */ + +#define NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [in] Page size (VA coverage) of the level to reserve. + * This need not be a leaf (page table) page size - it can be + * the coverage of an arbitrary level (including root page directory). + */ + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); + + /*! + * [in] First GPU virtual address of the range to reserve. + * This must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8); + + /*! + * [in] Last GPU virtual address of the range to reserve. + * This (+1) must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8); +} NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS; + +/*! + * Release (unbind and free) page directory/table entries up to + * a given level of the MMU format that has been reserved through a call to + * @ref NV90F1_CTRL_CMD_VASPACE_RESERVE_ENTRIES. Also referred to as "unlock". + */ +#define NV90F1_CTRL_CMD_VASPACE_RELEASE_ENTRIES (0x90f10104) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS_MESSAGE_ID" */ + +#define NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [in] Page size (VA coverage) of the level to release. + * This need not be a leaf (page table) page size - it can be + * the coverage of an arbitrary level (including root page directory). + */ + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); + + /*! + * [in] First GPU virtual address of the range to release. + * This must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8); + + /*! + * [in] Last GPU virtual address of the range to release. + * This (+1) must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8); +} NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS; + +/*! + * Get VAS page level information without kernel priviledge. This will internally call + * NV90F1_CTRL_CMD_VASPACE_GET_PAGE_LEVEL_INFO. + */ +#define NV90F1_CTRL_CMD_VASPACE_GET_PAGE_LEVEL_INFO_VERIF (0x90f10105) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | 0x5" */ + +/*! + * Pin PDEs for a given VA range on the server RM and then mirror the client's page + * directory/tables in the server. + * + * @ref + */ +#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */ + +#define NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS { + /*! + * [in] GPU sub-device handle - this API only supports unicast. + * Pass 0 to use subDeviceId instead. + */ + NvHandle hSubDevice; + + /*! + * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero. + */ + NvU32 subDeviceId; + + /*! + * [in] Page size (VA coverage) of the level to reserve. + * This need not be a leaf (page table) page size - it can be + * the coverage of an arbitrary level (including root page directory). + */ + NV_DECLARE_ALIGNED(NvU64 pageSize, 8); + + /*! + * [in] First GPU virtual address of the range to reserve. + * This must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8); + + /*! + * [in] Last GPU virtual address of the range to reserve. + * This (+1) must be aligned to pageSize. + */ + NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8); + + /*! + * [in] Number of PDE levels to copy. + */ + NvU32 numLevelsToCopy; + + /*! + * [in] Per-level information. + */ + struct { + /*! + * Physical address of this page level instance. + */ + NV_DECLARE_ALIGNED(NvU64 physAddress, 8); + + /*! + * Size in bytes allocated for this level instance. + */ + NV_DECLARE_ALIGNED(NvU64 size, 8); + + /*! + * Aperture in which this page level instance resides. + */ + NvU32 aperture; + + /*! + * Page shift corresponding to the level + */ + NvU8 pageShift; + } levels[GMMU_FMT_MAX_LEVELS]; +} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS; + +/* _ctrl90f1_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla06c.h b/src/common/sdk/nvidia/inc/ctrl/ctrla06c.h new file mode 100644 index 000000000..775ce948b --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla06c.h @@ -0,0 +1,421 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla06c.finn +// + + + + +/* + * KEPLER_CHANNEL_GROUP_A control commands and parameters + * + * A channel group is a collection of channels which are executed in a specific + * order by hardware. Hardware will not move from one channel in the group + * until it is finished executing all its work or gives up control to the next + * channel. + * + * Channels are added to the group by allocating them as a child of the + * KEPLER_CHANNEL_GROUP_A object. + * + * A channel in a channel group may be restricted with respect to what objects + * can be allocated on it and what APIs are allowed to operate on the channel. + * For more details on specifics of how groups behave see the Software and + * Hardware documetation for the specific architecture in which you are + * interested. + * + */ + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrla06f.h" /* A06C is partially derived from A06F */ + +#include "ctrl/ctrl2080/ctrl2080internal.h" /* For NV2080_CTRL_INTERNAL_MEMDESC_INFO */ +#include "ctrl/ctrlc36f.h" /* For NVC36F_CTRL_CMD_GPFIFO_FAULT_METHOD_BUFFER_MAX_RUNQUEUES */ + +#define NVA06C_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xA06C, NVA06C_CTRL_##cat, idx) + +/* KEPLER_CHANNEL_GROUP_A command categories (6bits) */ +#define NVA06C_CTRL_RESERVED (0x00) +#define NVA06C_CTRL_GPFIFO (0x01) +#define NVA06C_CTRL_INTERNAL (0x02) + +/* + * NVA06C_CTRL_CMD_NULL + * + * Please see description of NV906D_CTRL_CMD_NULL. + * + */ +#define NVA06C_CTRL_CMD_NULL (0xa06c0000) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NVA06C_CTRL_CMD_GPFIFO_SCHEDULE + * + * This command schedules a channel group in hardware. This command should be + * called after objects have been allocated on the channel or a call to + * NVA06C_CTRL_CMD_BIND has been made. + * + * See NVA06F_CTRL_CMD_GPFIFO_SCHEDULE for parameter definitions. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_OPERATION + * + */ +#define NVA06C_CTRL_CMD_GPFIFO_SCHEDULE (0xa06c0101) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8) | 0x1" */ +typedef NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA06C_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVA06C_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS { + NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS params; +} NVA06C_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS; + + + + +/* + * NVA06C_CTRL_CMD_BIND + * + * This command uses the given engine to configure the group for scheduling. + * It alleviates the need to call NVA06C_CTRL_CMD_GPFIFO_SCHEDULE after objects + * have been allocated. However, it requires that the caller know which engine + * they want to be able to execute in the group. Once this has been called + * only objects that can be allocated on the specified engine or other engines + * allowed to coexist in the group will be allowed. See + * NV2080_CTRL_CMD_GPU_GET_ENGINE_PARTNERLIST to determine which engines can + * share a parent. + * + * See NVA06F_CTRL_CMD_BIND for parameter definitions. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06C_CTRL_CMD_BIND (0xa06c0102) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8) | NVA06C_CTRL_BIND_PARAMS_MESSAGE_ID" */ + +#define NVA06C_CTRL_BIND_PARAMS_MESSAGE_ID (0x2U) + +typedef NVA06F_CTRL_BIND_PARAMS NVA06C_CTRL_BIND_PARAMS; + +/* + * NVA06C_CTRL_CMD_SET_TIMESLICE + * + * This command modifies the timeslice for a channel group. Hardware may not + * support all possible values, so the input will be rounded down to a valid + * hardware value. + * + * This takes effect immediately, meaning the channel may be forced off of the + * GPU so the change can be made. + * + * timesliceUs + * This parameter is an input containing the desired timeslice in microseconds + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06C_CTRL_CMD_SET_TIMESLICE (0xa06c0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8) | 0x3" */ + +/* + * NVA06C_CTRL_CMD_GET_TIMESLICE + * + * This command returns the timeslice for a channel group. If + * NVA06C_CTRL_CMD_SET_TIMESLICE has been used this call will return the exact + * value passed to that function, which may not be exactly what is programmed + * to hardware (see NVA06C_CTRL_CMD_SET_TIMESLICE for more details). + * + * timesliceUs + * This parameter is an output containing current timeslice in microseconds. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06C_CTRL_CMD_GET_TIMESLICE (0xa06c0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8) | 0x4" */ + +typedef struct NVA06C_CTRL_TIMESLICE_PARAMS { + NV_DECLARE_ALIGNED(NvU64 timesliceUs, 8); +} NVA06C_CTRL_TIMESLICE_PARAMS; + +/* + * NVA06C_CTRL_CMD_PREEMPT + * + * This command preempts a channel group. It optionally waits for the preempt to + * complete before returning. + * + * bWait + * If bWait evaluates to NV_TRUE then this control call waits till the preempt + * completes, else it returns immediately after issuing the preempt. Calling + * this control call multiple times with bWait = NV_FALSE without waiting for + * the preempt to complete by some other means can lead to undefined results. + * bManualTimeout + * If bManualTimer evaluates to NV_TRUE then timeoutUs will be used as the + * timeout value. + * timeoutUs + * The desired timeout in microseconds. Only used if bManualTimeout evaluates + * to NV_TRUE. The maximum value of timeoutUs is + * NVA06C_CTRL_CMD_PREEMPT_MAX_MANUAL_TIMEOUT_US. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * + */ +#define NVA06C_CTRL_CMD_PREEMPT (0xa06c0105) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8) | NVA06C_CTRL_PREEMPT_PARAMS_MESSAGE_ID" */ + +#define NVA06C_CTRL_PREEMPT_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NVA06C_CTRL_PREEMPT_PARAMS { + NvBool bWait; + NvBool bManualTimeout; + NvU32 timeoutUs; +} NVA06C_CTRL_PREEMPT_PARAMS; + +#define NVA06C_CTRL_CMD_PREEMPT_MAX_MANUAL_TIMEOUT_US (1000000) // 1 second + +/* + * NVA06C_CTRL_CMD_GET_INFO + * + * This command returns information about the channel group. + * + * tsgID + * Output parameter containing the hardware TSG ID for this channel group. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NVA06C_CTRL_CMD_GET_INFO (0xa06c0106) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8) | NVA06C_CTRL_GET_INFO_PARAMS_MESSAGE_ID" */ + +#define NVA06C_CTRL_GET_INFO_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NVA06C_CTRL_GET_INFO_PARAMS { + NvU32 tsgID; +} NVA06C_CTRL_GET_INFO_PARAMS; + +/* + * NVA06C_CTRL_CMD_SET_INTERLEAVE_LEVEL + * + * Used to change the target TSG's interleave level in the runlist. The + * interleave level indicates how often channels in the TSG appear in the + * runlist according to the following policy: + * + * - LOW: Appear once + * - MEDIUM: If L > 0, appear L times + * Else, appear once + * - HIGH: If L > 0, appear (M + 1) * L times + * Else if M > 0, appear M times + * Else, appear once + * where + * - L = # of LOW interleave level TSGs + * - M = # of MEDIUM interleave level TSGs + * + * For safety reasons, setting this property requires PRIVILEGED user level. + * + * tsgInterleaveLevel + * Input parameter. One of: + * - NVA06C_CTRL_INTERLEAVE_LEVEL_LOW + * - NVA06C_CTRL_INTERLEAVE_LEVEL_MEDIUM + * - NVA06C_CTRL_INTERLEAVE_LEVEL_HIGH + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NVA06C_CTRL_CMD_SET_INTERLEAVE_LEVEL (0xa06c0107) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8) | 0x7" */ + +/* + * NVA06C_CTRL_CMD_GET_INTERLEAVE_LEVEL + * + * Returns the target TSG's interleave level. + * + * tsgInterleaveLevel + * Output parameter. One of: + * - NVA06C_CTRL_INTERLEAVE_LEVEL_LOW + * - NVA06C_CTRL_INTERLEAVE_LEVEL_MEDIUM + * - NVA06C_CTRL_INTERLEAVE_LEVEL_HIGH + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06C_CTRL_CMD_GET_INTERLEAVE_LEVEL (0xa06c0108) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8) | 0x8" */ + +typedef struct NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS { + NvU32 tsgInterleaveLevel; +} NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS; + +#define NVA06C_CTRL_INTERLEAVE_LEVEL_LOW (0x00000000) +#define NVA06C_CTRL_INTERLEAVE_LEVEL_MEDIUM (0x00000001) +#define NVA06C_CTRL_INTERLEAVE_LEVEL_HIGH (0x00000002) + +/* + * NVA06C_CTRL_CMD_PROGRAM_VIDMEM_PROMOTE + * This ctrl call is deprecated in favor of NV0090_CTRL_CMD_PROGRAM_VIDMEM_PROMOTE. + * @see NV0090_CTRL_CMD_PROGRAM_VIDMEM_PROMOTE + */ +#define NVA06C_CTRL_CMD_PROGRAM_VIDMEM_PROMOTE (0xa06c0109) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8) | NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS_MESSAGE_ID" */ + +typedef enum NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE { + NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE_NONE = 0, + NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE_64B = 1, + NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE_128B = 2, +} NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE; + +typedef struct NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_FIELD { + NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_SIZE size; +} NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_FIELD; + +#define NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS { + NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_FIELD l1; + NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_FIELD t1; +} NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS; + +/*! + * NVA06C_CTRL_CMD_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS + * + * This control call is used to + * - set the fault method buffer addresses in vGpu host / GSP after the fault + * method buffers are allocated by vGpu guest / GSP_CLIENT during channel + * group allocation + * - destroy the method buffer memory descriptors during channel group free + * + * methodBufferMemdesc [input] + * Details about the memory allocated for method buffer for each runqueue. + * If the size of the memory region is zero, the descriptor will be destroyed. + * + * bar2Addr [input] + * CPU invisible BAR2 address that the method buffer is mapped to for each + * runqueue + * + * numValidEntries [input] + * The number of valid entries upto + * NVC36F_CTRL_CMD_GPFIFO_FAULT_METHOD_BUFFER_MAX_RUNQUEUES actually filled in + * the control call parameters. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ +#define NVA06C_CTRL_CMD_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS (0xa06c010a) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8 | NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS_MESSAGE_ID)" */ + +#define NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES (NVC36F_CTRL_CMD_GPFIFO_FAULT_METHOD_BUFFER_MAX_RUNQUEUES) + +#define NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS_MESSAGE_ID (0xaU) + +typedef struct NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS { + NV_DECLARE_ALIGNED(NV2080_CTRL_INTERNAL_MEMDESC_INFO methodBufferMemdesc[NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES], 8); + NV_DECLARE_ALIGNED(NvU64 bar2Addr[NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES], 8); + NvU32 numValidEntries; +} NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS; + +/* + * NVA06C_CTRL_CMD_MAKE_REALTIME + * + * Promote the specified TSG to realtime, or demote to non-realtime, + * depending on the given bRealtime parameter. A realtime TSG will have the + * highest interleave level when the scheduling policy is CHANNEL_INTERLEAVED, + * and will also precede any non-realtime channel/TSG in the order channels are + * added to the corresponding runlist. + * + * Whenever a realtime TSG is added to a runlist, all non-realtime channels/TSGs + * are made preemptible by setting a COMPUTE preemption mode to CTA. + * + * Preemption modes for realtime TSGs remain WFI. + * + * A client must then issue a NVA06F_CTRL_CMD_RESTART_RUNLIST control call when + * kicking off work on a realtime TSG to preempt non-realtime channels and + * restart the runlist such that the realtime TSG will start executing next. + * + * bRealtime + * Whether the TSG is to be promoted to/demoted from realtime. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_PARAMETER + * NV_ERR_INVALID_OBJECT + * NV_ERR_INVALID_STATE + * NV_ERR_NOT_SUPPORTED + */ +#define NVA06C_CTRL_CMD_MAKE_REALTIME (0xa06c0110) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID << 8) | NVA06C_CTRL_MAKE_REALTIME_PARAMS_MESSAGE_ID" */ + +#define NVA06C_CTRL_MAKE_REALTIME_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NVA06C_CTRL_MAKE_REALTIME_PARAMS { + NvBool bRealtime; +} NVA06C_CTRL_MAKE_REALTIME_PARAMS; + + + +/* + * NVA06C_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE + * + * This command is an internal command sent from Kernel RM to Physical RM + * to schedule a channel group in hardware. + * + * Please see description of NVA06C_CTRL_CMD_GPFIFO_SCHEDULE for more information. + * + */ +#define NVA06C_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE (0xa06c0201) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_INTERNAL_INTERFACE_ID << 8) | 0x1" */ + +/* + * NVA06C_CTRL_CMD_INTERNAL_SET_TIMESLICE + * + * This command is an internal command sent from Kernel RM to Physical RM + * to modify the timeslice for a channel group in hardware. + * + * Please see description of NVA06C_CTRL_CMD_SET_TIMESLICE for more information. + * + */ +#define NVA06C_CTRL_CMD_INTERNAL_SET_TIMESLICE (0xa06c0202) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GROUP_A_INTERNAL_INTERFACE_ID << 8) | 0x2" */ + +/* _ctrla06c.h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla06f.h b/src/common/sdk/nvidia/inc/ctrl/ctrla06f.h new file mode 100644 index 000000000..b768326f5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla06f.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla06f.finn +// + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrla06f/ctrla06fbase.h" +#include "ctrl/ctrla06f/ctrla06fgpfifo.h" +#include "ctrl/ctrla06f/ctrla06fevent.h" +#include "ctrl/ctrla06f/ctrla06finternal.h" diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h new file mode 100644 index 000000000..39c977a5a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fbase.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla06f/ctrla06fbase.finn +// + + + + +/* GK100_GPFIFO control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl906f.h" /* A06F is partially derived from 906F */ + +#define NVA06F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xA06F, NVA06F_CTRL_##cat, idx) + +/* GK100_GPFIFO command categories (6bits) */ +#define NVA06F_CTRL_RESERVED (0x00) +#define NVA06F_CTRL_GPFIFO (0x01) +#define NVA06F_CTRL_EVENT (0x02) +#define NVA06F_CTRL_INTERNAL (0x03) + +/* + * NVA06F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + * + */ +#define NVA06F_CTRL_CMD_NULL (0xa06f0000) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/* _ctrla06fbase_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fevent.h b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fevent.h new file mode 100644 index 000000000..7a7b7670d --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fevent.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla06f/ctrla06fevent.finn +// + + + +#include "ctrl/ctrla06f/ctrla06fbase.h" + +/* + * NVA06F_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * This command sets event notification state for the associated channel. + * This command requires that an instance of NV01_EVENT has been previously + * bound to the associated channel object. + * + * event + * This parameter specifies the type of event to which the specified + * action is to be applied. This parameter must specify a valid + * NVA06F_NOTIFIERS value (see cla06f.h for more details) and should + * not exceed one less NVA06F_NOTIFIERS_MAXCOUNT. + * action + * This parameter specifies the desired event notification action. + * Valid notification actions include: + * NVA06F_CTRL_SET_EVENT_NOTIFICATION_ACTION_DISABLE + * This action disables event notification for the specified + * event for the associated channel object. + * NVA06F_CTRL_SET_EVENT_NOTIFICATION_ACTION_SINGLE + * This action enables single-shot event notification for the + * specified event for the associated channel object. + * NVA06F_CTRL_SET_EVENT_NOTIFICATION_ACTION_REPEAT + * This action enables repeated event notification for the specified + * event for the associated channel object. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06F_CTRL_CMD_EVENT_SET_NOTIFICATION (0xa06f0205) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID << 8) | NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS { + NvU32 event; + NvU32 action; +} NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000) +#define NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001) +#define NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002) + +/* + * NVA06F_CTRL_CMD_EVENT_SET_TRIGGER + * + * This command triggers a software event for the associated channel. + * This command accepts no parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVA06F_CTRL_CMD_EVENT_SET_TRIGGER (0xa06f0206) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID << 8) | 0x6" */ + + +/* _ctrla06fevent_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h new file mode 100644 index 000000000..326bafbbc --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h @@ -0,0 +1,291 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla06f/ctrla06fgpfifo.finn +// + + + + +#include "ctrl/ctrla06f/ctrla06fbase.h" + +/* + * NVA06F_CTRL_GET_CLASS_ENGINEID + * + * Please see description of NV906F_CTRL_GET_CLASS_ENGINEID for more information. + * + */ +#define NVA06F_CTRL_GET_CLASS_ENGINEID (0xa06f0101) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x1" */ + +typedef NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS NVA06F_CTRL_GET_CLASS_ENGINEID_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA06F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVA06F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS { + NVA06F_CTRL_GET_CLASS_ENGINEID_PARAMS params; +} NVA06F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS; + + + +/* + * NVA06F_CTRL_RESET_CHANNEL + * + * Please see description of NV906F_CTRL_RESET_CHANNEL for more information. + */ +#define NVA06F_CTRL_CMD_RESET_CHANNEL (0xa06f0102) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x2" */ + +typedef NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS NVA06F_CTRL_CMD_RESET_CHANNEL_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA06F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVA06F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS { + NVA06F_CTRL_CMD_RESET_CHANNEL_PARAMS params; +} NVA06F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS; + + + +/* + * NVA06F_CTRL_CMD_GPFIFO_SCHEDULE + * + * This command schedules a channel in hardware. This command should be called + * after objects have been allocated on the channel or a call to + * NVA06F_CTRL_CMD_BIND has been made. + * + * bEnable + * This parameter indicates whether or not the channel should be scheduled in hardware. + * When set, the channel will be enabled in addition to being added to the appropriate runlist. + * When not set, the channel will be disabled and removed from runlist. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_STATE + * NV_ERR_INVALID_OPERATION + * + */ +#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */ +#define NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS { + NvBool bEnable; + NvBool bSkipSubmit; +} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +/* + * NVA06F_CTRL_CMD_BIND + * + * This command uses the given engine to configure the channel for scheduling. + * It alleviates the need to call NVA06F_CTRL_CMD_GPFIFO_SCHEDULE after objects + * have been allocated. However, it requires that the caller know which engine + * they want to be able to execute on the channel. Once this has been called + * only objects that can be allocated on the specified engine or other engines + * allowed to coexist on the channel will be allowed. See + * NV2080_CTRL_CMD_GPU_GET_ENGINE_PARTNERLIST to determine which engines can + * share a parent. + * + * engineType + * This parameter specifies an NV2080_ENGINE_TYPE value indicating the + * engine to which this channel should be bound. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVA06F_CTRL_BIND_PARAMS { + NvU32 engineType; +} NVA06F_CTRL_BIND_PARAMS; + + + +/* + * NVA06F_CTRL_CMD_GET_MMU_FAULT_INFO + * + * Please see description of NV906F_CTRL_CMD_GET_MMU_FAULT_INFO for more information. + * + */ +#define NVA06F_CTRL_CMD_GET_MMU_FAULT_INFO (0xa06f0107) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x7" */ + +typedef NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS NVA06F_CTRL_GET_MMU_FAULT_INFO_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA06F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NVA06F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS { + NV_DECLARE_ALIGNED(NVA06F_CTRL_GET_MMU_FAULT_INFO_PARAMS params, 8); +} NVA06F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS; + + + +/* + * NVA06F_CTRL_CMD_SET_ERROR_NOTIFIER + * + * This command sets the channel error notifier of the target channel. + * bNotifyEachChannelInTSG + * When true, the error notifier will be set on every channel in + * the TSG that contains the channel. + * + * Possible status values returned are: + * NV_OK + */ +#define NVA06F_CTRL_CMD_SET_ERROR_NOTIFIER (0xa06f0108) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS { + NvBool bNotifyEachChannelInTSG; +} NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS; + +/* + * NVA06F_CTRL_CMD_SET_INTERLEAVE_LEVEL + * + * Symmetric to NVA06C_CTRL_CMD_SET_INTERLEAVE_LEVEL, applied to the individual + * target channel. + * + * When belonging to a TSG, same interleave level will be set to every channel + * in the TSG. + * + * channelInterleaveLevel + * Input parameter. One of: + * - NVA06C_CTRL_INTERLEAVE_LEVEL_LOW + * - NVA06C_CTRL_INTERLEAVE_LEVEL_MEDIUM + * - NVA06C_CTRL_INTERLEAVE_LEVEL_HIGH + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + * NV_ERR_INSUFFICIENT_PERMISSIONS + */ +#define NVA06F_CTRL_CMD_SET_INTERLEAVE_LEVEL (0xa06f0109) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x9" */ + +/* + * NVA06F_CTRL_CMD_GET_INTERLEAVE_LEVEL + * + * Returns the target channel's interleave level. + * + * channelInterleaveLevel + * Output parameter. One of: + * - NVA06C_CTRL_INTERLEAVE_LEVEL_LOW + * - NVA06C_CTRL_INTERLEAVE_LEVEL_MEDIUM + * - NVA06C_CTRL_INTERLEAVE_LEVEL_HIGH + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06F_CTRL_CMD_GET_INTERLEAVE_LEVEL (0xa06f0110) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x10" */ + +typedef struct NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS { + NvU32 channelInterleaveLevel; +} NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS; + +/* + * NVA06F_CTRL_CMD_RESTART_RUNLIST + * + * This command expires the current timeslice and restarts the runlist the given + * channel belongs to. This effectively preempts the current channel on the + * corresponding engine. + * + * This is useful for clients to trigger preemption manually and reduce start + * latency for higher priority channels as they are added first to the runlist + * if NV0080_CTRL_FIFO_RUNLIST_SCHED_POLICY_CHANNEL_INTERLEAVED policy is + * configured. + * + * This command interacts with the scheduler and may cause certain low priority + * channels to starve under certain circumstances. Therefore, it is only + * available to privileged clients. + * + * bForceRestart + * Input parameter. If NV_FALSE, the runlist restart will be skipped + * whenever the given channel (or its group) is already running on the + * corresponding engine. + * + * bBypassWait + * Input parameter. If NV_TRUE, the command will return immediately after + * issuing the hardware preemption request, without actually waiting for the + * context switch to complete. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06F_CTRL_CMD_RESTART_RUNLIST (0xa06f0111) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_RESTART_RUNLIST_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_RESTART_RUNLIST_PARAMS_MESSAGE_ID (0x11U) + +typedef struct NVA06F_CTRL_RESTART_RUNLIST_PARAMS { + NvBool bForceRestart; + NvBool bBypassWait; +} NVA06F_CTRL_RESTART_RUNLIST_PARAMS; + +/* + * NVA06F_CTRL_CMD_STOP_CHANNEL + * + * This command is used to stop the channel + * + * Stopping the channel here means disabling and unbinding the channel and removing it from runlist. + * So, if the channel needs to run again, it has to be scheduled, bound and enabled again. + * If we fail to preempt channel or remove it from runlist, then we RC the channel. + * Also set an error notifier to notify user space that channel is stopped. + * + * bImmediate + * Input parameter. If NV_FALSE, we will wait for default RM timeout + * for channel to idle. If NV_TRUE, we don't wait for channel to idle. + * If channel is not idle, we forcefully preempt it off the runlist. + * If the preempt times out, we will RC the channel. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_INVALID_STATE + */ +#define NVA06F_CTRL_CMD_STOP_CHANNEL (0xa06f0112) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_STOP_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NVA06F_CTRL_STOP_CHANNEL_PARAMS_MESSAGE_ID (0x12U) + +typedef struct NVA06F_CTRL_STOP_CHANNEL_PARAMS { + NvBool bImmediate; +} NVA06F_CTRL_STOP_CHANNEL_PARAMS; + +/* _ctrla06fgpfifo_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h new file mode 100644 index 000000000..058f47a1f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06finternal.h @@ -0,0 +1,69 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla06f/ctrla06finternal.finn +// + + + +#include "ctrl/ctrla06f/ctrla06fbase.h" +#include "ctrl/ctrla06f/ctrla06fgpfifo.h" + +/* + * NVA06F_CTRL_CMD_INTERNAL_STOP_CHANNEL + * + * This command is an internal command sent from Kernel RM to Physical RM + * to stop the channel in hardware + * + * Please see description of NVA06F_CTRL_CMD_STOP_CHANNEL for more information. + * + */ +#define NVA06F_CTRL_CMD_INTERNAL_STOP_CHANNEL (0xa06f0301) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_INTERNAL_INTERFACE_ID << 8) | 0x1" */ + +/* + * NVA06F_CTRL_CMD_INTERNAL_RESET_CHANNEL + * + * This command is an internal command sent from Kernel RM to Physical RM + * to perform the channel reset operations in hardware + * + * Please see description of NV906F_CTRL_CMD_RESET_CHANNEL for more information. + * + */ +#define NVA06F_CTRL_CMD_INTERNAL_RESET_CHANNEL (0xa06f0302) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_INTERNAL_INTERFACE_ID << 8) | 0x2" */ + +/* + * NVA06F_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE + * + * This command is an internal command sent from Kernel RM to Physical RM + * to schedule the channel in hardware + * + * Please see description of NVA06F_CTRL_CMD_GPFIFO_SCHEDULE for more information. + * + */ +#define NVA06F_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE (0xa06f0303) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_INTERNAL_INTERFACE_ID << 8) | 0x3" */ + +/* ctrla06finternal_h */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla16f.h b/src/common/sdk/nvidia/inc/ctrl/ctrla16f.h new file mode 100644 index 000000000..a9df5fdd4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla16f.h @@ -0,0 +1,202 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla16f.finn +// + + + + +/* GK100_GPFIFO control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrla06f.h" /* A16F is partially derived from A06F */ +#include "ctrl/ctrl906f.h" /* A16F is partially derived from 906F */ +#define NVA16F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xA16F, NVA16F_CTRL_##cat, idx) + +/* GK100_GPFIFO command categories (6bits) */ +#define NVA16F_CTRL_RESERVED (0x00) +#define NVA16F_CTRL_GPFIFO (0x01) +#define NVA16F_CTRL_EVENT (0x02) + +/* + * NVA16F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVA16F_CTRL_CMD_NULL (0xa16f0000) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_B_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NVA16F_CTRL_GET_CLASS_ENGINEID + * + * Please see description of NVA06F_CTRL_GET_CLASS_ENGINEID for more information. + * + */ +#define NVA16F_CTRL_GET_CLASS_ENGINEID (0xa16f0101) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_B_GPFIFO_INTERFACE_ID << 8) | 0x1" */ + +typedef NVA06F_CTRL_GET_CLASS_ENGINEID_PARAMS NVA16F_CTRL_GET_CLASS_ENGINEID_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA16F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVA16F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS { + NVA16F_CTRL_GET_CLASS_ENGINEID_PARAMS params; +} NVA16F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS; + + + +/* + * NVA16F_CTRL_RESET_CHANNEL + * + * Please see description of NVA06F_CTRL_RESET_CHANNEL for more information. + * + */ +#define NVA16F_CTRL_CMD_RESET_CHANNEL (0xa16f0102) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_B_GPFIFO_INTERFACE_ID << 8) | 0x2" */ +typedef NVA06F_CTRL_CMD_RESET_CHANNEL_PARAMS NVA16F_CTRL_CMD_RESET_CHANNEL_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA16F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVA16F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS { + NVA16F_CTRL_CMD_RESET_CHANNEL_PARAMS params; +} NVA16F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS; + + + +/* + * NVA16F_CTRL_CMD_GPFIFO_SCHEDULE + * + * Please see description of NVA06F_CTRL_CMD_GPFIFO_SCHEDULE for more information. + * + */ +#define NVA16F_CTRL_CMD_GPFIFO_SCHEDULE (0xa16f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_B_GPFIFO_INTERFACE_ID << 8) | 0x3" */ +typedef NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS NVA16F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA16F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVA16F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS { + NVA16F_CTRL_GPFIFO_SCHEDULE_PARAMS params; +} NVA16F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS; + + + +/* + * NVA16F_CTRL_CMD_BIND + * + * Please see description of NVA06F_CTRL_CMD_BIND for more information. + */ +#define NVA16F_CTRL_CMD_BIND (0xa16f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_B_GPFIFO_INTERFACE_ID << 8) | 0x4" */ + +typedef NVA06F_CTRL_BIND_PARAMS NVA16F_CTRL_BIND_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA16F_CTRL_CMD_BIND_FINN_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVA16F_CTRL_CMD_BIND_FINN_PARAMS { + NVA16F_CTRL_BIND_PARAMS params; +} NVA16F_CTRL_CMD_BIND_FINN_PARAMS; + + + +/* + * NVA16F_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_NOTIFICATION for more information. +*/ + + +#define NVA16F_CTRL_CMD_EVENT_SET_NOTIFICATION (0xa16f0205) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_B_EVENT_INTERFACE_ID << 8) | 0x5" */ + +typedef NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS NVA16F_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA16F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NVA16F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS { + NVA16F_CTRL_EVENT_SET_NOTIFICATION_PARAMS params; +} NVA16F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS; + + + +/* valid action values */ +#define NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE +#define NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE +#define NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + +/* + * NVA16F_CTRL_CMD_EVENT_SET_TRIGGER + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_TRIGGER for more information. + */ +#define NVA16F_CTRL_CMD_EVENT_SET_TRIGGER (0xa16f0206) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_B_EVENT_INTERFACE_ID << 8) | 0x6" */ + + + + + +/* + * NVA16F_CTRL_CMD_GET_MMU_FAULT_INFO + * + * Please see description of NV906F_CTRL_CMD_GET_MMU_FAULT_INFO for more information. + * + */ +#define NVA16F_CTRL_CMD_GET_MMU_FAULT_INFO (0xa16f0107) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_B_GPFIFO_INTERFACE_ID << 8) | 0x7" */ + +typedef NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS NVA16F_CTRL_GET_MMU_FAULT_INFO_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA16F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NVA16F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS { + NV_DECLARE_ALIGNED(NVA16F_CTRL_GET_MMU_FAULT_INFO_PARAMS params, 8); +} NVA16F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS; + + + +/* _ctrla16f.h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrla26f.h b/src/common/sdk/nvidia/inc/ctrl/ctrla26f.h new file mode 100644 index 000000000..cb5169f04 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrla26f.h @@ -0,0 +1,202 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrla26f.finn +// + + + + +/* GK20A_GPFIFO control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrla06f.h" /* A26F is partially derived from A06F */ +#include "ctrl/ctrl906f.h" /* A26F is partially derived from 906F */ +#define NVA26F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xA26F, NVA26F_CTRL_##cat, idx) + +/* GK20A_GPFIFO command categories (6bits) */ +#define NVA26F_CTRL_RESERVED (0x00) +#define NVA26F_CTRL_GPFIFO (0x01) +#define NVA26F_CTRL_EVENT (0x02) + +/* + * NVA26F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVA26F_CTRL_CMD_NULL (0xa26f0000) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_C_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NVA26F_CTRL_GET_CLASS_ENGINEID + * + * Please see description of NV906F_CTRL_GET_CLASS_ENGINEID for more information. + * + */ +#define NVA26F_CTRL_GET_CLASS_ENGINEID (0xa26f0101) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_C_GPFIFO_INTERFACE_ID << 8) | 0x1" */ + +typedef NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS NVA26F_CTRL_GET_CLASS_ENGINEID_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA26F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVA26F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS { + NVA26F_CTRL_GET_CLASS_ENGINEID_PARAMS params; +} NVA26F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS; + + + +/* + * NVA26F_CTRL_RESET_CHANNEL + * + * Please see description of NVA06F_CTRL_CMD_RESET_CHANNEL for more information. + * + */ +#define NVA26F_CTRL_CMD_RESET_CHANNEL (0xa26f0102) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_C_GPFIFO_INTERFACE_ID << 8) | 0x2" */ +typedef NVA06F_CTRL_CMD_RESET_CHANNEL_PARAMS NVA26F_CTRL_CMD_RESET_CHANNEL_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA26F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVA26F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS { + NVA26F_CTRL_CMD_RESET_CHANNEL_PARAMS params; +} NVA26F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS; + + + +/* + * NVA26F_CTRL_CMD_GPFIFO_SCHEDULE + * + * Please see description of NVA06F_CTRL_CMD_GPFIFO_SCHEDULE for more information. + * + */ +#define NVA26F_CTRL_CMD_GPFIFO_SCHEDULE (0xa26f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_C_GPFIFO_INTERFACE_ID << 8) | 0x3" */ +typedef NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS NVA26F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA26F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVA26F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS { + NVA26F_CTRL_GPFIFO_SCHEDULE_PARAMS params; +} NVA26F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS; + + + +/* + * NVA26F_CTRL_CMD_BIND + * + * Please see description of NVA06F_CTRL_CMD_BIND for more information. + */ +#define NVA26F_CTRL_CMD_BIND (0xa26f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_C_GPFIFO_INTERFACE_ID << 8) | 0x4" */ + +typedef NVA06F_CTRL_BIND_PARAMS NVA26F_CTRL_BIND_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA26F_CTRL_CMD_BIND_FINN_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVA26F_CTRL_CMD_BIND_FINN_PARAMS { + NVA26F_CTRL_BIND_PARAMS params; +} NVA26F_CTRL_CMD_BIND_FINN_PARAMS; + + + +/* + * NVA26F_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_NOTIFICATION for more information. +*/ + + +#define NVA26F_CTRL_CMD_EVENT_SET_NOTIFICATION (0xa26f0205) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_C_EVENT_INTERFACE_ID << 8) | 0x5" */ + +typedef NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS NVA26F_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA26F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NVA26F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS { + NVA26F_CTRL_EVENT_SET_NOTIFICATION_PARAMS params; +} NVA26F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS; + + + +/* valid action values */ +#define NVA26F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE +#define NVA26F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE +#define NVA26F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + +/* + * NVA26F_CTRL_CMD_EVENT_SET_TRIGGER + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_TRIGGER for more information. + */ +#define NVA26F_CTRL_CMD_EVENT_SET_TRIGGER (0xa26f0206) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_C_EVENT_INTERFACE_ID << 8) | 0x6" */ + + + + + +/* + * NVA26F_CTRL_CMD_GET_MMU_FAULT_INFO + * + * Please see description of NV906F_CTRL_CMD_GET_MMU_FAULT_INFO for more information. + * + */ +#define NVA26F_CTRL_CMD_GET_MMU_FAULT_INFO (0xa26f0107) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_C_GPFIFO_INTERFACE_ID << 8) | 0x7" */ + +typedef NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS NVA26F_CTRL_GET_MMU_FAULT_INFO_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVA26F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NVA26F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS { + NV_DECLARE_ALIGNED(NVA26F_CTRL_GET_MMU_FAULT_INFO_PARAMS params, 8); +} NVA26F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS; + + + +/* _ctrla26f.h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlb069.h b/src/common/sdk/nvidia/inc/ctrl/ctrlb069.h new file mode 100644 index 000000000..8d4a65435 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlb069.h @@ -0,0 +1,183 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlb069.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* MAXWELL_FAULT_BUFFER_A control commands and parameters */ + +#define NVB069_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0xB069, NVB069_CTRL_##cat, idx) + +/* MAXWELL_FAULT_BUFFER_A command categories (6bits) */ +#define NVB069_CTRL_RESERVED (0x00) +#define NVB069_CTRL_FAULTBUFFER (0x01) + +/* + * NVB069_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVB069_CTRL_CMD_NULL (0xb0690000) /* finn: Evaluated from "(FINN_MAXWELL_FAULT_BUFFER_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + + +/* + * NVB069_CTRL_CMD_FAULTBUFFER_READ_GET + * + * This command returns the current HW GET pointer for the requested type fault buffer + * + * faultBufferGetOffset + * Value of current HW GET pointer + * faultBufferType + * Type of fault buffer. FAULT_BUFFER_REPLAYABLE or FAULT_BUFFER_NON_REPLAYABLE + */ +#define NVB069_CTRL_CMD_FAULTBUFFER_READ_GET (0xb0690101) /* finn: Evaluated from "(FINN_MAXWELL_FAULT_BUFFER_A_FAULTBUFFER_INTERFACE_ID << 8) | NVB069_CTRL_FAULTBUFFER_READ_GET_PARAMS_MESSAGE_ID" */ + +#define NVB069_CTRL_FAULTBUFFER_READ_GET_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVB069_CTRL_FAULTBUFFER_READ_GET_PARAMS { + NvU32 faultBufferGetOffset; + NvU32 faultBufferType; +} NVB069_CTRL_FAULTBUFFER_READ_GET_PARAMS; + +// +// Valid Fault buffer Types +// NON_REPLAYABLE is only supported in Volta+ GPUs. +// +#define NVB069_CTRL_FAULT_BUFFER_NON_REPLAYABLE (0x00000000) +#define NVB069_CTRL_FAULT_BUFFER_REPLAYABLE (0x00000001) + +/* + * NVB069_CTRL_CMD_FAULTBUFFER_WRITE_GET + * + * This command writes the HW GET pointer for the requested type of fault buffer + * + * NOTE: The caller must issue a write barrier before this function to + * ensure modifications to the current buffer entry are committed before + * the GET pointer is updated. + * + * faultBufferGetOffset + * Value to be written to HW GET pointer + * faultBufferType + * Type of fault buffer. FAULT_BUFFER_REPLAYABLE or FAULT_BUFFER_NON_REPLAYABLE + */ +#define NVB069_CTRL_CMD_FAULTBUFFER_WRITE_GET (0xb0690102) /* finn: Evaluated from "(FINN_MAXWELL_FAULT_BUFFER_A_FAULTBUFFER_INTERFACE_ID << 8) | NVB069_CTRL_FAULTBUFFER_WRITE_GET_PARAMS_MESSAGE_ID" */ + +#define NVB069_CTRL_FAULTBUFFER_WRITE_GET_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVB069_CTRL_FAULTBUFFER_WRITE_GET_PARAMS { + NvU32 faultBufferGetOffset; + NvU32 faultBufferType; +} NVB069_CTRL_FAULTBUFFER_WRITE_GET_PARAMS; + +/* + * NVB069_CTRL_CMD_FAULTBUFFER_READ_PUT + * + * This command returns the current HW PUT pointer for the requested type fault buffer + * + * faultBufferGetOffset + * Value of current HW PUT pointer + * faultBufferType + * Type of fault buffer. FAULT_BUFFER_REPLAYABLE or FAULT_BUFFER_NON_REPLAYABLE + */ +#define NVB069_CTRL_CMD_FAULTBUFFER_READ_PUT (0xb0690103) /* finn: Evaluated from "(FINN_MAXWELL_FAULT_BUFFER_A_FAULTBUFFER_INTERFACE_ID << 8) | NVB069_CTRL_FAULTBUFFER_READ_PUT_PARAMS_MESSAGE_ID" */ + +#define NVB069_CTRL_FAULTBUFFER_READ_PUT_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVB069_CTRL_FAULTBUFFER_READ_PUT_PARAMS { + NvU32 faultBufferPutOffset; + NvU32 faultBufferType; +} NVB069_CTRL_FAULTBUFFER_READ_PUT_PARAMS; + +#define NVB069_CTRL_CMD_FAULTBUFFER_ENABLE_NOTIFICATION (0xb0690104) /* finn: Evaluated from "(FINN_MAXWELL_FAULT_BUFFER_A_FAULTBUFFER_INTERFACE_ID << 8) | NVB069_CTRL_FAULTBUFFER_ENABLE_NOTIFICATION_PARAMS_MESSAGE_ID" */ + +#define NVB069_CTRL_FAULTBUFFER_ENABLE_NOTIFICATION_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVB069_CTRL_FAULTBUFFER_ENABLE_NOTIFICATION_PARAMS { + NvBool Enable; +} NVB069_CTRL_FAULTBUFFER_ENABLE_NOTIFICATION_PARAMS; + +#define NVB069_CTRL_CMD_FAULTBUFFER_GET_SIZE (0xb0690105) /* finn: Evaluated from "(FINN_MAXWELL_FAULT_BUFFER_A_FAULTBUFFER_INTERFACE_ID << 8) | NVB069_CTRL_FAULTBUFFER_GET_SIZE_PARAMS_MESSAGE_ID" */ + +#define NVB069_CTRL_FAULTBUFFER_GET_SIZE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NVB069_CTRL_FAULTBUFFER_GET_SIZE_PARAMS { + NvU32 faultBufferSize; +} NVB069_CTRL_FAULTBUFFER_GET_SIZE_PARAMS; + + +/* + * NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS + * + * This command provides kernel mapping to a few registers. + * These mappings are needed by UVM driver to handle non fatal gpu faults + * + * pFaultBufferGet + * Mapping for fault buffer's get pointer (NV_PFIFO_REPLAYABLE_FAULT_BUFFER_GET) + * pFaultBufferPut + * Mapping for fault buffer's put pointer (NV_PFIFO_REPLAYABLE_FAULT_BUFFER_PUT) + * pFaultBufferInfo + * Mapping for fault buffer's Info pointer (NV_PFIFO_REPLAYABLE_FAULT_BUFFER_INFO) + * Note: this variable is deprecated since buffer overflow is not a seperate register from Volta + * pPmcIntr + * Mapping for PMC intr register (NV_PMC_INTR(0)) + * pPmcIntrEnSet + * Mapping for PMC intr set register - used to enable an intr (NV_PMC_INTR_EN_SET(0)) + * pPmcIntrEnClear + * Mapping for PMC intr clear register - used to disable an intr (NV_PMC_INTR_EN_CLEAR(0)) + * replayableFaultMask + * Mask for the replayable fault bit(NV_PMC_INTR_REPLAYABLE_FAULT) + */ +#define NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS (0xb0690106) /* finn: Evaluated from "(FINN_MAXWELL_FAULT_BUFFER_A_FAULTBUFFER_INTERFACE_ID << 8) | NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS_PARAMS_MESSAGE_ID" */ + +#define NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pFaultBufferGet, 8); + NV_DECLARE_ALIGNED(NvP64 pFaultBufferPut, 8); + NV_DECLARE_ALIGNED(NvP64 pFaultBufferInfo, 8); + NV_DECLARE_ALIGNED(NvP64 pPmcIntr, 8); + NV_DECLARE_ALIGNED(NvP64 pPmcIntrEnSet, 8); + NV_DECLARE_ALIGNED(NvP64 pPmcIntrEnClear, 8); + NvU32 replayableFaultMask; + NV_DECLARE_ALIGNED(NvP64 pPrefetchCtrl, 8); +} NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS_PARAMS; + +/* _ctrlb069_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h b/src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h new file mode 100644 index 000000000..dbe489fb3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlb06f.h @@ -0,0 +1,469 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlb06f.finn +// + + + + +/* MAXWELL_CHANNEL_GPFIFO_A control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrla06f.h" /* B06F is partially derived from A06F */ +#include "ctrl/ctrl906f.h" /* B06F is partially derived from 906F */ +#define NVB06F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xB06F, NVB06F_CTRL_##cat, idx) + +/* MAXWELL_CHANNEL_GPFIFO_A command categories (6bits) */ +#define NVB06F_CTRL_RESERVED (0x00) +#define NVB06F_CTRL_GPFIFO (0x01) +#define NVB06F_CTRL_EVENT (0x02) + +/* + * NVB06F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVB06F_CTRL_CMD_NULL (0xb06f0000) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NVB06F_CTRL_GET_CLASS_ENGINEID + * + * Please see description of NV906F_CTRL_GET_CLASS_ENGINEID for more information. + * + */ +#define NVB06F_CTRL_GET_CLASS_ENGINEID (0xb06f0101) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x1" */ + +typedef NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS NVB06F_CTRL_GET_CLASS_ENGINEID_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVB06F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVB06F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS { + NVB06F_CTRL_GET_CLASS_ENGINEID_PARAMS params; +} NVB06F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS; + + + +/* + * NVB06F_CTRL_RESET_CHANNEL + * + * Please see description of NVA06F_CTRL_RESET_CHANNEL for more information. + * + */ +#define NVB06F_CTRL_CMD_RESET_CHANNEL (0xb06f0102) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x2" */ + +typedef NVA06F_CTRL_CMD_RESET_CHANNEL_PARAMS NVB06F_CTRL_CMD_RESET_CHANNEL_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVB06F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVB06F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS { + NVB06F_CTRL_CMD_RESET_CHANNEL_PARAMS params; +} NVB06F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS; + + + +/* + * NVB06F_CTRL_CMD_GPFIFO_SCHEDULE + * + * Please see description of NVA06F_CTRL_CMD_GPFIFO_SCHEDULE for more information. + * + */ +#define NVB06F_CTRL_CMD_GPFIFO_SCHEDULE (0xb06f0103) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x3" */ + +typedef NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS NVB06F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVB06F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVB06F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS { + NVB06F_CTRL_GPFIFO_SCHEDULE_PARAMS params; +} NVB06F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS; + + + +/* + * NVB06F_CTRL_CMD_BIND + * + * Please see description of NVA06F_CTRL_CMD_BIND for more information. + */ +#define NVB06F_CTRL_CMD_BIND (0xb06f0104) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x4" */ + +typedef NVA06F_CTRL_BIND_PARAMS NVB06F_CTRL_BIND_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVB06F_CTRL_CMD_BIND_FINN_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVB06F_CTRL_CMD_BIND_FINN_PARAMS { + NVB06F_CTRL_BIND_PARAMS params; +} NVB06F_CTRL_CMD_BIND_FINN_PARAMS; + + + +/* + * NVB06F_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_NOTIFICATION for more information. +*/ + + +#define NVB06F_CTRL_CMD_EVENT_SET_NOTIFICATION (0xb06f0205) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID << 8) | 0x5" */ + +typedef NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS NVB06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVB06F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NVB06F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS { + NVB06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS params; +} NVB06F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS; + + + +/* valid action values */ +#define NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE +#define NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE +#define NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + +/* + * NVB06F_CTRL_CMD_EVENT_SET_TRIGGER + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_TRIGGER for more information. + */ +#define NVB06F_CTRL_CMD_EVENT_SET_TRIGGER (0xb06f0206) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID << 8) | 0x6" */ + + + + + +/* + * NVB06F_CTRL_CMD_GET_MMU_FAULT_INFO + * + * Please see description of NV906F_CTRL_CMD_GET_MMU_FAULT_INFO for more information. + * + */ +#define NVB06F_CTRL_CMD_GET_MMU_FAULT_INFO (0xb06f0107) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x7" */ + +typedef NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS NVB06F_CTRL_GET_MMU_FAULT_INFO_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVB06F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NVB06F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS { + NV_DECLARE_ALIGNED(NVB06F_CTRL_GET_MMU_FAULT_INFO_PARAMS params, 8); +} NVB06F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS; + + + +/* + * NVB06F_CTRL_CMD_GET_ENGINE_CTX_SIZE + * + * This command returns the size of the engine context. + * + * engineID + * This parameter specifies the engine context size to be retrieved. + * + * See the description of the NV2080_ENGINE_TYPE values in cl2080.h for more + * information + * + * size + * This parameter returns the size of the engine context + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NVB06F_CTRL_CMD_GET_ENGINE_CTX_SIZE (0xb06f010b) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS_MESSAGE_ID" */ + +#define NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS { + NvU32 engineID; + NvU32 size; +} NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS; + +/* + * NVB06F_CTRL_CMD_GET_ENGINE_CTX_DATA + * + * This command returns the context buffer data for the given engine for vGPU motion. + * + * engineID + * This parameter specifies the engine context to be retrieved. + * + * See the description of the NV2080_ENGINE_TYPE values in cl2080.h for + * more information + * + * size + * This parameter specifies the size of the context buffer. + * + * pEngineCtxBuff + * This parameter specifies the context buffer for motion operation to be filled in. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ + +#define NVB06F_CTRL_CMD_GET_ENGINE_CTX_DATA (0xb06f010c) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS_MESSAGE_ID" */ + +typedef struct SW_OBJECT_ENGINE_CTX { + NvU32 hObject; + NvU32 subCh; +} SW_OBJECT_ENGINE_CTX; + +#define NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS_MESSAGE_ID (0xCU) + +typedef struct NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS { + NvU32 engineID; + NvU32 size; + NV_DECLARE_ALIGNED(NvP64 pEngineCtxBuff, 8); +} NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS; + +/* + * NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA + * + * This command restores the context buffer for the given engine for vGPU motion. + * + * engineID + * This parameter specifies the engine context to be restored. + * + * See the description of the NV2080_ENGINE_TYPE values in cl2080.h for + * more information + * + * size + * This parameter specifies the size of the context buffer. + * + * pEngineCtxBuff + * This parameter specifies the context buffer for motion operation restore. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ + +#define NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA (0xb06f010d) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS_MESSAGE_ID" */ + +typedef NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS { + NV_DECLARE_ALIGNED(NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS params, 8); +} NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS; + + + +/* + * NVB06F_CTRL_CMD_GET_ENGINE_CTX_STATE + * + * This command returns the context buffer state of the given engine for vGPU motion. + * + * engineID + * This input parameter specifies the engine context to be restored. + * + * See the description of the NV2080_ENGINE_TYPE values in cl2080.h for + * more information + * + * hObject + * This parameter specifies the channel object that is running on the SW engine. + * + * engineCtxState + * This parameter specifies the engine context state. For SW engine, the only meaningful + * field is INVALID, INITIALIZED and the subch. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ + +#define NVB06F_CTRL_CMD_GET_ENGINE_CTX_STATE (0xb06f010e) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS_MESSAGE_ID" */ + +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS_MESSAGE_ID (0xEU) + +typedef struct NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS { + NvU32 engineID; + NvU32 hObject; + NvU32 engineCtxState; +} NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS; + +#define NVB06F_CTRL_GET_ENGINE_CTX_VIRTUAL_CONTEXT 0:0 +#define NVB06F_CTRL_GET_ENGINE_CTX_VIRTUAL_CONTEXT_DISABLED (0x00000000) +#define NVB06F_CTRL_GET_ENGINE_CTX_VIRTUAL_CONTEXT_ENABLED (0x00000001) +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE 2:1 +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE_INVALID (0x00000000) +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE_INITIALIZED (0x00000001) +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE_PROMOTED (0x00000002) +#define NVB06F_CTRL_GET_ENGINE_CTX_STATE_EVICTED (0x00000003) +#define NVB06F_CTRL_GET_ENGINE_CTX_SUBCH 6:4 + +/* + * NVB06F_CTRL_CMD_GET_CHANNEL_HW_STATE + * + * This command returns the channel HW state. + * + * state + * This parameter stores single bit-fields corresponding to the following + * channel HW states: + * NEXT + * A value of NV_TRUE indicates that this channel should be scheduled + * first when GPU Host chooses this TSG to run next on the runlist. + * + * CTX_RELOAD + * A value of NV_TRUE indicates that this channel's context was + * preempted and needs to be reloaded. + * + * PENDING + * A value of NV_TRUE indicates that this channel is not loaded on the + * PBDMA but methods still remain. This includes the completion of + * semaphores acquires and WFI methods. This field is deprecated + * from Volta onwards, and can be ignored. + * + * ENG_FAULTED + * A value of NV_TRUE indicates that the channel's engine has faulted, + * and the channel will not be rescheduled until the fault has been + * cleared. This bit should only be set as part of migration, and will + * not necessarily cause the channel to be prevented from being + * scheduled. + * + * PBDMA_FAULTED + * A value of NV_TRUE indicates that the channel's PBDMA has faulted, + * and the channel will not be rescheduled until the fault has been + * cleared. This bit should only be set as part of migration, and will + * not necessarily cause the channel to be prevented from being + * scheduled. + * + * ACQUIRE_FAIL + * A value of NV_TRUE indicates that the engine scheduler failed to + * acquire a semaphore for this channel. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ +#define NVB06F_CTRL_CMD_GET_CHANNEL_HW_STATE (0xb06f010f) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS_MESSAGE_ID" */ + +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_NEXT 0:0 +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_CTX_RELOAD 1:1 +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_PENDING 2:2 +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_ENG_FAULTED 3:3 +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_PBDMA_FAULTED 4:4 +#define NVB06F_CTRL_CMD_CHANNEL_HW_STATE_ACQUIRE_FAIL 5:5 + +#define NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS_MESSAGE_ID (0xFU) + +typedef struct NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS { + NvU32 state; +} NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS; + +/* + * NVB06F_CTRL_CMD_SET_CHANNEL_HW_STATE + * + * This command restores the channel HW state. + * + * state + * This parameter stores single bit-fields corresponding to the following + * channel HW states: + * NEXT + * A value of NV_TRUE indicates that this channel should be scheduled + * first when GPU Host chooses this TSG to run next on the runlist. + * + * CTX_RELOAD + * A value of NV_TRUE indicates that this channel's context was + * preempted and needs to be reloaded. + * + * PENDING + * A value of NV_TRUE indicates that this channel is not loaded on the + * PBDMA but methods still remain. This includes the completion of + * semaphores acquires and WFI methods. This field is deprecated + * from Volta onwards, and can be ignored. + * + * ENG_FAULTED + * A value of NV_TRUE indicates that the channel's engine has faulted, + * and the channel will not be rescheduled until the fault has been + * cleared. This bit should only be set as part of migration, and will + * not necessarily cause the channel to be prevented from being + * scheduled. + * + * PBDMA_FAULTED + * A value of NV_TRUE indicates that the channel's PBDMA has faulted, + * and the channel will not be rescheduled until the fault has been + * cleared. This bit should only be set as part of migration, and will + * not necessarily cause the channel to be prevented from being + * scheduled. + * + * ACQUIRE_FAIL + * A value of NV_TRUE indicates that the engine scheduler failed to + * acquire a semaphore for this channel. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_STATE + */ +#define NVB06F_CTRL_CMD_SET_CHANNEL_HW_STATE (0xb06f0110) /* finn: Evaluated from "(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x10" */ + +typedef NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS NVB06F_CTRL_SET_CHANNEL_HW_STATE_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVB06F_CTRL_CMD_SET_CHANNEL_HW_STATE_FINN_PARAMS_MESSAGE_ID (0x10U) + +typedef struct NVB06F_CTRL_CMD_SET_CHANNEL_HW_STATE_FINN_PARAMS { + NVB06F_CTRL_SET_CHANNEL_HW_STATE_PARAMS params; +} NVB06F_CTRL_CMD_SET_CHANNEL_HW_STATE_FINN_PARAMS; + + + +/* _ctrlb06f.h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc.h b/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc.h new file mode 100644 index 000000000..decc79af6 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlb0cc.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" // NV2080_CTRL_GPU_REG_OP +#include "ctrlb0cc/ctrlb0ccprofiler.h" +#include "ctrlb0cc/ctrlb0ccinternal.h" diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccbase.h b/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccbase.h new file mode 100644 index 000000000..a16ae91ca --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccbase.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlb0cc/ctrlb0ccbase.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" // NV2080_CTRL_GPU_REG_OP +#define NVB0CC_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0xB0CC, NVB0CC_CTRL_##cat, idx) + +/* MAXWELL_PROFILER command categories (6 bits) */ +#define NVB0CC_CTRL_RESERVED (0x00) +#define NVB0CC_CTRL_PROFILER (0x01) +#define NVB0CC_CTRL_INTERNAL (0x02) + +/*! + * NVB0CC_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVB0CC_CTRL_CMD_NULL (0xb0cc0000) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + +/* _ctrlb0ccbase_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccinternal.h b/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccinternal.h new file mode 100644 index 000000000..f98f2c88f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccinternal.h @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlb0cc/ctrlb0ccinternal.finn +// + + + + +#include "ctrl/ctrlb0cc/ctrlb0ccbase.h" + +#include "ctrl/ctrlb0cc/ctrlb0ccprofiler.h" /* Some controls derivative of b0ccprofiler */ +/*! + * @ref NVB0CC_CTRL_CMD_ALLOC_PMA_STREAM + */ +#define NVB0CC_CTRL_CMD_INTERNAL_ALLOC_PMA_STREAM (0xb0cc0200) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | NVB0CC_CTRL_CMD_INTERNAL_ALLOC_PMA_STREAM_FINN_PARAMS_MESSAGE_ID" */ + + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVB0CC_CTRL_CMD_INTERNAL_ALLOC_PMA_STREAM_FINN_PARAMS_MESSAGE_ID (0x0U) + +typedef struct NVB0CC_CTRL_CMD_INTERNAL_ALLOC_PMA_STREAM_FINN_PARAMS { + NV_DECLARE_ALIGNED(NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS params, 8); +} NVB0CC_CTRL_CMD_INTERNAL_ALLOC_PMA_STREAM_FINN_PARAMS; + + + +/*! + * NVB0CC_CTRL_CMD_INTERNAL_PERMISSIONS_INIT + * + * This command is used to convey the client permissions for appropriate + * profiling capability initialization + */ +#define NVB0CC_CTRL_CMD_INTERNAL_PERMISSIONS_INIT (0xb0cc0203) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID << 8) | NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS_MESSAGE_ID" */ + +#define NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS { + /*! + * [in] Is Admin profiling permitted + */ + NvBool bAdminProfilingPermitted; + /*! + * [in] Is Device profiling permitted + */ + NvBool bDevProfilingPermitted; + + /*! + * [in] Is Memory profiling permitted + */ + NvBool bMemoryProfilingPermitted; +} NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS; + +/* _ctrlb0ccinternal_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccprofiler.h b/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccprofiler.h new file mode 100644 index 000000000..eb0c71db5 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlb0cc/ctrlb0ccprofiler.h @@ -0,0 +1,480 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlb0cc/ctrlb0ccprofiler.finn +// + + + + +#include "ctrl/ctrlb0cc/ctrlb0ccbase.h" + + +/*! + * NVB0CC_CTRL_CMD_RESERVE_HWPM_LEGACY + * + * Reserves the HWPM legacy PM system for use by the calling client. + * This PM system will only be accessible if this reservation is + * taken. + * + * If a device level reservation is held by another client, then this command + * will fail regardless of reservation scope. + * + * This reservation can be released with @ref NVB0CC_CTRL_CMD_RELEASE_HWPM_LEGACY. + * + */ + + +#define NVB0CC_CTRL_CMD_RESERVE_HWPM_LEGACY (0xb0cc0101) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS_MESSAGE_ID" */ + +#define NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS { + /*! + * [in] Enable ctxsw for HWPM. + */ + NvBool ctxsw; +} NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS; + +/*! + * NVB0CC_CTRL_CMD_RELEASE_HWPM_LEGACY + * + * Releases the reservation taken with @ref NVB0CC_CTRL_CMD_RESERVE_HWPM_LEGACY. + * + * This command does not take any parameters. + * + */ +#define NVB0CC_CTRL_CMD_RELEASE_HWPM_LEGACY (0xb0cc0102) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | 0x2" */ + +/*! + * NVB0CC_CTRL_CMD_RESERVE_PM_AREA_SMPC + * + * Reserves the SMPC PM system for use by the calling client. + * This PM system will only be accessible if this reservation is + * taken. + * + * Reservation scope and rules are same as for @ref NVB0CC_CTRL_CMD_RESERVE_HWPM_LEGACY. + * + * This reservation can be released with @ref NVB0CC_CTRL_CMD_RELEASE_PM_AREA_SMPC. + * + */ +#define NVB0CC_CTRL_CMD_RESERVE_PM_AREA_SMPC (0xb0cc0103) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS_MESSAGE_ID" */ + +#define NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS { + /*! + * [in] Enable ctxsw for SMPC. + */ + NvBool ctxsw; +} NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS; + +/*! + * NVB0CC_CTRL_CMD_RELEASE_PM_AREA_SMPC + * + * Releases the reservation taken with @ref NVB0CC_CTRL_CMD_RESERVE_PM_AREA_SMPC. + * + * This command does not take any parameters. + * + */ +#define NVB0CC_CTRL_CMD_RELEASE_PM_AREA_SMPC (0xb0cc0104) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | 0x4" */ + +/*! + * NVB0CC_CTRL_CMD_ALLOC_PMA_STREAM + * + * Allocates PMA VA and map it to the buffers for streaming records and for + * for streaming the updated bytes available in the buffer. + * + */ +#define NVB0CC_CTRL_CMD_ALLOC_PMA_STREAM (0xb0cc0105) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | 0x5" */ + +/*! + * Defines the maximum size of PMA buffer for streamout. It can be up to 4GB minus one page + * reserved for streaming mem_bytes (see @ref NVB0CC_PMA_BYTES_AVAILABLE_SIZE). + */ +#define NVB0CC_PMA_BUFFER_SIZE_MAX (0xffe00000ULL) /* finn: Evaluated from "(4 * 1024 * 1024 * 1024 - 2 * 1024 * 1024)" */ +#define NVB0CC_PMA_BYTES_AVAILABLE_SIZE (0x1000) /* finn: Evaluated from "(4 * 1024)" */ + +typedef struct NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS { + /*! + * [in] Memory handle (RW memory) for streaming records. + * Size of this must be >= @ref pmaBufferOffset + @ref pmaBufferSize. + */ + NvHandle hMemPmaBuffer; + + /*! + * [in] Start offset of PMA buffer (offset in @ref hMemPmaBuffer). + */ + NV_DECLARE_ALIGNED(NvU64 pmaBufferOffset, 8); + + /*! + * [in] size of the buffer. This must be <= NVB0CC_PMA_BUFFER_SIZE_MAX. + */ + NV_DECLARE_ALIGNED(NvU64 pmaBufferSize, 8); + + /*! + * [in] Memory handle (RO memory) for streaming number of bytes available. + * Size of this must be of at least @ref pmaBytesAvailableOffset + + * @ref NVB0CC_PMA_BYTES_AVAILABLE_SIZE. + */ + NvHandle hMemPmaBytesAvailable; + + /*! + * [in] Start offset of PMA bytes available buffer (offset in @ref hMemPmaBytesAvailable). + */ + NV_DECLARE_ALIGNED(NvU64 pmaBytesAvailableOffset, 8); + + /*! + * [in] Enable ctxsw for PMA stream. + */ + NvBool ctxsw; + + /*! + * [out] The PMA Channel Index associated with a given PMA stream. + */ + NvU32 pmaChannelIdx; + + /*! + * [out] PMA buffer VA. Note that this is a HWPM Virtual address. + */ + NV_DECLARE_ALIGNED(NvU64 pmaBufferVA, 8); +} NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS; + +/*! + * NVB0CC_CTRL_CMD_FREE_PMA_STREAM + * + * Releases (unmap and free) PMA stream allocated through + * @ref NVB0CC_CTRL_CMD_ALLOC_PMA_STREAM. + * + */ +#define NVB0CC_CTRL_CMD_FREE_PMA_STREAM (0xb0cc0106) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS_MESSAGE_ID" */ + +#define NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS { + /*! + * [in] The PMA channel index associated with a given PMA stream. + */ + NvU32 pmaChannelIdx; +} NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS; + +/*! + * NVB0CC_CTRL_CMD_BIND_PM_RESOURCES + * + * Binds all PM resources reserved through @ref NVB0CC_CTRL_CMD_RESERVE_* + * and with @ref NVB0CC_CTRL_CMD_ALLOC_PMA_STREAM with PMA engine. + * After this call, interface is ready for programming a collection + * of counters. + * @Note: Any new PM resource reservation via NVB0CC_CTRL_CMD_RESERVE_* or + * @ref NVB0CC_CTRL_CMD_ALLOC_PMA_STREAM request after this call will fail, + * clients need to unbind (see @ref NVB0CC_CTRL_CMD_UNBIND_PM_RESOURCES) to + * reserve more resources. + * + * This can be unbound with @ref NVB0CC_CTRL_CMD_UNBIND_PM_RESOURCES. + * + */ +#define NVB0CC_CTRL_CMD_BIND_PM_RESOURCES (0xb0cc0107) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | 0x7" */ + +/*! + * NVB0CC_CTRL_CMD_UNBIND_PM_RESOURCES + * + * Unbinds PM resources that were bound with @ref NVB0CC_CTRL_CMD_BIND_PM_RESOURCES + * + */ +#define NVB0CC_CTRL_CMD_UNBIND_PM_RESOURCES (0xb0cc0108) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | 0x8" */ + +/*! + * NVB0CC_CTRL_CMD_PMA_STREAM_UPDATE_GET_PUT + * + * This command updates bytes consumed by the SW and optionally gets the + * current available bytes in the buffer. + * + */ +#define NVB0CC_CTRL_CMD_PMA_STREAM_UPDATE_GET_PUT (0xb0cc0109) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_MESSAGE_ID" */ + +#define NVB0CC_AVAILABLE_BYTES_DEFAULT_VALUE 0xFFFFFFFF +#define NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS { + /*! + * [in] Total bytes consumed by SW since last update. + */ + NV_DECLARE_ALIGNED(NvU64 bytesConsumed, 8); + + /*! + * [in] Initiate streaming of the bytes available (see @ref hMemPmaBytesAvailable). + * RM will set the memory for streaming (see @ref hMemPmaBytesAvailable) to NVB0CC_AVAILABLE_BYTES_DEFAULT_VALUE and + * client can optionally wait (see @ref bWait) for it to change from this value. + */ + NvBool bUpdateAvailableBytes; + + /*! + * [in] Waits for available bytes to get updated + */ + NvBool bWait; + + /*! + * [out] Bytes available in the PMA buffer (see @ref hMemPmaBuffer) for SW to consume. + * This will only be populated if both bUpdateAvailableBytes and bWait are set + * to TRUE. + */ + NV_DECLARE_ALIGNED(NvU64 bytesAvailable, 8); + + /*! + * [in] If set to TRUE, current put pointer will be returned in @ref putPtr. + */ + NvBool bReturnPut; + + /*! + * [out] Current PUT pointer (MEM_HEAD). + * This will only be populated if bReturnPut is set to TRUE. + */ + NV_DECLARE_ALIGNED(NvU64 putPtr, 8); + + /*! + * [in] The PMA Channel Index associated with a given PMA stream. + */ + NvU32 pmaChannelIdx; +} NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS; + +/*! + * Maximum number of register operations allowed in a single request. + * @NOTE: @ref NVB0CC_REGOPS_MAX_COUNT is chosen to keep struct size + * of @ref NVB0CC_CTRL_EXEC_REG_OPS_PARAMS under 4KB. + */ +#define NVB0CC_REGOPS_MAX_COUNT (124) + +/*! + * NVB0CC_CTRL_CMD_EXEC_REG_OPS + * + * This command is used to submit an array containing one or more + * register operations for processing. Each entry in the + * array specifies a single read or write operation. Each entry is + * checked for validity in the initial pass: Only registers from PM area + * are allowed using this interface and only register from PM systems for + * which user has a valid reservation are allowed (see @ref NVB0CC_CTRL_CMD_RESERVE_*). + * Operation type (@ref NV2080_CTRL_GPU_REG_OP_TYPE_*) is not required to be passed in. + */ +#define NVB0CC_CTRL_CMD_EXEC_REG_OPS (0xb0cc010a) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_MESSAGE_ID" */ + +/*! + * Structure definition for register operation. See @ref NV2080_CTRL_GPU_REG_OP. + */ +typedef NV2080_CTRL_GPU_REG_OP NVB0CC_GPU_REG_OP; + +/*! + * Enumeration of different REG_OPS modes. This mode determines how a failure + * of a regop is handled in a batch of regops. + */ +typedef enum NVB0CC_REGOPS_MODE { + + /*! + * Either all regops will be executed or none of them will be executed. + * Failing regop will have the appropriate status (see @ref NVB0CC_GPU_REG_OP::regStatus). + */ + NVB0CC_REGOPS_MODE_ALL_OR_NONE = 0, + /*! + * All regops will be attempted and the ones that failed will have the + * the appropriate status (see @ref NVB0CC_GPU_REG_OP::regStatus). + */ + NVB0CC_REGOPS_MODE_CONTINUE_ON_ERROR = 1, +} NVB0CC_REGOPS_MODE; + +#define NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NVB0CC_CTRL_EXEC_REG_OPS_PARAMS { + /*! + * [in] Number of valid entries in the regOps array. This value cannot + * exceed NVB0CC_REGOPS_MAX_COUNT. + */ + NvU32 regOpCount; + + /*! + * [in] Specifies the mode for the entire operation see @ref NVB0CC_REGOPS_MODE. + */ + NVB0CC_REGOPS_MODE mode; + + /*! + * [out] Provides status for the entire operation. This is only valid for + * mode @ref NVB0CC_REGOPS_MODE_CONTINUE_ON_ERROR. + */ + NvBool bPassed; + + /*! + * [out] This is currently not populated. + */ + NvBool bDirect; + + /*! + * [in/out] An array (of fixed size NVB0CC_REGOPS_MAX_COUNT) of register read or write + * operations (see @ref NVB0CC_GPU_REG_OP) + * + */ + NVB0CC_GPU_REG_OP regOps[NVB0CC_REGOPS_MAX_COUNT]; +} NVB0CC_CTRL_EXEC_REG_OPS_PARAMS; + +/*! + * NVB0CC_CTRL_CMD_RESERVE_PM_AREA_PC_SAMPLER + * + * Reserves the PC sampler system for use by the calling client. + * + * This reservation can be released with @ref NVB0CC_CTRL_CMD_RELEASE_PM_AREA_PC_SAMPLER. + * + * This command does not take any parameters. + * + * PC sampler is always context switched with a GR context, so reservation scope is + * always context. This requires that profiler object is instantiated with a valid GR + * context. See @ref NVB2CC_ALLOC_PARAMETERS. + */ + +#define NVB0CC_CTRL_CMD_RESERVE_PM_AREA_PC_SAMPLER (0xb0cc010b) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | 0xB" */ + +/*! + * NVB0CC_CTRL_CMD_RELEASE_PM_AREA_PC_SAMPLER + * + * Releases the reservation taken with @ref NVB0CC_CTRL_CMD_RESERVE_PM_AREA_PC_SAMPLER. + * + * This command does not take any parameters. + * + */ +#define NVB0CC_CTRL_CMD_RELEASE_PM_AREA_PC_SAMPLER (0xb0cc010c) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | 0xC" */ + +/*! + * NVB0CC_CTRL_CMD_GET_TOTAL_HS_CREDITS + * + * Gets the total high speed streaming credits available for the client. + * + * This command can only be performed after a bind using NVB0CC_CTRL_CMD_BIND_PM_RESOURCES. + * + */ +#define NVB0CC_CTRL_CMD_GET_TOTAL_HS_CREDITS (0xb0cc010d) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS_MESSAGE_ID" */ + +#define NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS_MESSAGE_ID (0xDU) + +typedef struct NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS { + NvU32 numCredits; +} NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS; + +/*! + * NVB0CC_CTRL_CMD_SET_HS_CREDITS_CHIPLET + * + * Sets per chiplet (pmm router) credits for high speed streaming for a pma channel. + * + * @note: This command resets the current credits to 0 before setting the new values also + * if programming fails, it will reset credits to 0 for all the chiplets. + * + */ +#define NVB0CC_CTRL_CMD_SET_HS_CREDITS (0xb0cc010e) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | 0xE" */ + +typedef enum NVB0CC_CHIPLET_TYPE { + NVB0CC_CHIPLET_TYPE_INVALID = 0, + NVB0CC_CHIPLET_TYPE_FBP = 1, + NVB0CC_CHIPLET_TYPE_GPC = 2, + NVB0CC_CHIPLET_TYPE_SYS = 3, +} NVB0CC_CHIPLET_TYPE; + +typedef enum NVB0CC_HS_CREDITS_CMD_STATUS { + NVB0CC_HS_CREDITS_CMD_STATUS_OK = 0, + /*! + * More credits are requested than the total credits. Total credits can be queried using @ref NVB0CC_CTRL_CMD_GET_TOTAL_HS_CREDITS + */ + NVB0CC_HS_CREDITS_CMD_STATUS_INVALID_CREDITS = 1, + /*! + * Chiplet index is invalid. + */ + NVB0CC_HS_CREDITS_CMD_STATUS_INVALID_CHIPLET = 2, +} NVB0CC_HS_CREDITS_CMD_STATUS; + +typedef struct NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_INFO { + /*! + * Specifies the chiplet type @ref NVB0CC_CHIPLET_TYPE. + */ + NvU8 chipletType; + + /*! + * Specifies the logical index of the chiplet. + */ + NvU8 chipletIndex; + + /*! + * Specifies the number of credits for the chiplet. + */ + NvU16 numCredits; +} NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_INFO; + +typedef struct NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_STATUS { + /*! + * Status for the command @ref NVB0CC_HS_CREDITS_CMD_STATUS. + */ + NvU8 status; + + /*! + * Index of the failing @ref NVB0CC_CTRL_SET_HS_CREDITS_PARAMS::creditInfo entry. This + * is only relevant if status is NVB0CC_HS_CREDITS_CMD_STATUS_INVALID_CHIPLET. + */ + NvU8 entryIndex; +} NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_STATUS; + +#define NVB0CC_MAX_CREDIT_INFO_ENTRIES (63) + +typedef struct NVB0CC_CTRL_SET_HS_CREDITS_PARAMS { + /*! + * [in] The PMA Channel Index associated with a given PMA stream. + */ + NvU8 pmaChannelIdx; + + /*! + * [in] Number of valid entries in creditInfo. + */ + NvU8 numEntries; + + /*! + * [out] Provides status for the entire operation. + */ + NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_STATUS statusInfo; + + /*! + * [in] Credit programming per chiplet + */ + NVB0CC_CTRL_PMA_STREAM_HS_CREDITS_INFO creditInfo[NVB0CC_MAX_CREDIT_INFO_ENTRIES]; +} NVB0CC_CTRL_SET_HS_CREDITS_PARAMS; + +/*! + * NVB0CC_CTRL_CMD_GET_HS_CREDITS + * + * Gets per chiplet (pmm router) high speed streaming credits for a pma channel. + * + */ +#define NVB0CC_CTRL_CMD_GET_HS_CREDITS (0xb0cc010f) /* finn: Evaluated from "(FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID << 8) | 0xF" */ + +typedef NVB0CC_CTRL_SET_HS_CREDITS_PARAMS NVB0CC_CTRL_GET_HS_CREDITS_PARAMS; + + + +/* _ctrlb0ccprofiler_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc06f.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc06f.h new file mode 100644 index 000000000..61fe390b4 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc06f.h @@ -0,0 +1,202 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc06f.finn +// + + + + +/* PASCAL_CHANNEL_GPFIFO_A control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrla06f.h" /* C06F is partially derived from A06F */ +#include "ctrl/ctrl906f.h" /* C06F is partially derived from 906F */ +#define NVC06F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xC06F, NVC06F_CTRL_##cat, idx) + +/* PASCAL_CHANNEL_GPFIFO_B command categories (6bits) */ +#define NVC06F_CTRL_RESERVED (0x00) +#define NVC06F_CTRL_GPFIFO (0x01) +#define NVC06F_CTRL_EVENT (0x02) + +/* + * NVC06F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC06F_CTRL_CMD_NULL (0xc06f0000) /* finn: Evaluated from "(FINN_PASCAL_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + +/* + * NVC06F_CTRL_GET_CLASS_ENGINEID + * + * Please see description of NV906F_CTRL_GET_CLASS_ENGINEID for more information. + * + */ +#define NVC06F_CTRL_GET_CLASS_ENGINEID (0xc06f0101) /* finn: Evaluated from "(FINN_PASCAL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x1" */ + +typedef NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS NVC06F_CTRL_GET_CLASS_ENGINEID_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC06F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC06F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS { + NVC06F_CTRL_GET_CLASS_ENGINEID_PARAMS params; +} NVC06F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS; + + + +/* + * NVC06F_CTRL_RESET_CHANNEL + * + * Please see description of NVA06F_CTRL_RESET_CHANNEL for more information. + * + */ +#define NVC06F_CTRL_CMD_RESET_CHANNEL (0xc06f0102) /* finn: Evaluated from "(FINN_PASCAL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x2" */ +typedef NVA06F_CTRL_CMD_RESET_CHANNEL_PARAMS NVC06F_CTRL_CMD_RESET_CHANNEL_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC06F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC06F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS { + NVC06F_CTRL_CMD_RESET_CHANNEL_PARAMS params; +} NVC06F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS; + + + +/* + * NVC06F_CTRL_CMD_GPFIFO_SCHEDULE + * + * Please see description of NVA06F_CTRL_CMD_GPFIFO_SCHEDULE for more information. + * + */ +#define NVC06F_CTRL_CMD_GPFIFO_SCHEDULE (0xc06f0103) /* finn: Evaluated from "(FINN_PASCAL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x3" */ +typedef NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS NVC06F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC06F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVC06F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS { + NVC06F_CTRL_GPFIFO_SCHEDULE_PARAMS params; +} NVC06F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS; + + + +/* + * NVC06F_CTRL_CMD_BIND + * + * Please see description of NVA06F_CTRL_CMD_BIND for more information. + */ +#define NVC06F_CTRL_CMD_BIND (0xc06f0104) /* finn: Evaluated from "(FINN_PASCAL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x4" */ + +typedef NVA06F_CTRL_BIND_PARAMS NVC06F_CTRL_BIND_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC06F_CTRL_CMD_BIND_FINN_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC06F_CTRL_CMD_BIND_FINN_PARAMS { + NVC06F_CTRL_BIND_PARAMS params; +} NVC06F_CTRL_CMD_BIND_FINN_PARAMS; + + + +/* + * NVC06F_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_NOTIFICATION for more information. +*/ + + +#define NVC06F_CTRL_CMD_EVENT_SET_NOTIFICATION (0xc06f0205) /* finn: Evaluated from "(FINN_PASCAL_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID << 8) | 0x5" */ + +typedef NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS NVC06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC06F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NVC06F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS { + NVC06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS params; +} NVC06F_CTRL_CMD_EVENT_SET_NOTIFICATION_FINN_PARAMS; + + + +/* valid action values */ +#define NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE +#define NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE +#define NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + +/* + * NVC06F_CTRL_CMD_EVENT_SET_TRIGGER + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_TRIGGER for more information. + */ +#define NVC06F_CTRL_CMD_EVENT_SET_TRIGGER (0xc06f0206) /* finn: Evaluated from "(FINN_PASCAL_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID << 8) | 0x6" */ + + + + + +/* + * NVC06F_CTRL_CMD_GET_MMU_FAULT_INFO + * + * Please see description of NV906F_CTRL_CMD_GET_MMU_FAULT_INFO for more information. + * + */ +#define NVC06F_CTRL_CMD_GET_MMU_FAULT_INFO (0xc06f0107) /* finn: Evaluated from "(FINN_PASCAL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x7" */ + +typedef NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS NVC06F_CTRL_GET_MMU_FAULT_INFO_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC06F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NVC06F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS { + NV_DECLARE_ALIGNED(NVC06F_CTRL_GET_MMU_FAULT_INFO_PARAMS params, 8); +} NVC06F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS; + + + +/* _ctrlc06f.h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc365.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc365.h new file mode 100644 index 000000000..9829b381a --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc365.h @@ -0,0 +1,334 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc365.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +#define NVC365_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0xC365, NVC365_CTRL_##cat, idx) + + +#define NVC365_CTRL_RESERVED (0x00) +#define NVC365_CTRL_ACCESS_CNTR_BUFFER (0x01) + + +/* + * NVC365_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC365_CTRL_CMD_NULL (0xc3650000) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + + +/* + * NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_READ_GET + * + * This command provides the value of the GET register + * + * accessCntrBufferGetOffset [OUT] + * This parameter returns the value of the GET register + * + * Possible status values returned are: + * NV_OK + */ + +#define NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_READ_GET (0xc3650101) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID << 8) | NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_GET_PARAMS_MESSAGE_ID" */ + +#define NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_GET_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_GET_PARAMS { + NvU32 accessCntrBufferGetOffset; +} NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_GET_PARAMS; + + +/* + * NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_WRITE_GET + * + * This command writes a value into the GET register + * + * accessCntrBufferGetValue [IN] + * This parameter specifies the new value of the GET register + * + * Possible status values returned are: + * NV_OK + */ + +#define NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_WRITE_GET (0xc3650102) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID << 8) | NVC365_CTRL_ACCESS_CNTR_BUFFER_WRITE_GET_PARAMS_MESSAGE_ID" */ + +#define NVC365_CTRL_ACCESS_CNTR_BUFFER_WRITE_GET_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC365_CTRL_ACCESS_CNTR_BUFFER_WRITE_GET_PARAMS { + NvU32 accessCntrBufferGetValue; +} NVC365_CTRL_ACCESS_CNTR_BUFFER_WRITE_GET_PARAMS; + + +/* + * NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_READ_PUT + * + * This command provides the value of the PUT register + * + * accessCntrBufferPutOffset [OUT] + * This parameter returns the value of the PUT register + * + * Possible status values returned are: + * NV_OK + */ + +#define NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_READ_PUT (0xc3650103) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID << 8) | NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_PUT_PARAMS_MESSAGE_ID" */ + +#define NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_PUT_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_PUT_PARAMS { + NvU32 accessCntrBufferPutOffset; +} NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_PUT_PARAMS; + + +/* + * NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_ENABLE + * + * This command enables/disables the access counters + * It also sets up RM to either service or ignore the Access Counter interrupts. + * + * intrOwnership [IN] + * This parameter specifies whether RM should own the interrupt upon return + * enable [IN] + * NV_TRUE = Access counters will be enabled + * NV_FALSE = Access counters will be disabled + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_ENABLE (0xc3650104) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID << 8) | NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS_MESSAGE_ID" */ + +#define NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS { + NvU32 intrOwnership; + NvBool enable; +} NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS; + +#define NVC365_CTRL_ACCESS_COUNTER_INTERRUPT_OWNERSHIP_NO_CHANGE (0x0) +#define NVC365_CTRL_ACCESS_COUNTER_INTERRUPT_OWNERSHIP_RM (0x1) +#define NVC365_CTRL_ACCESS_COUNTER_INTERRUPT_OWNERSHIP_NOT_RM (0x2) + +/* + * NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_GET_SIZE + * + * This command provides the size of the notification buffer + * + * accessCntrBufferSize [OUT] + * This parameter returns the size of the notification buffer + * + * Possible status values returned are: + * NV_OK + */ + +#define NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_GET_SIZE (0xc3650105) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID << 8) | NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_SIZE_PARAMS_MESSAGE_ID" */ + +#define NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_SIZE_PARAMS_MESSAGE_ID (0x5U) + +typedef struct NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_SIZE_PARAMS { + NvU32 accessCntrBufferSize; +} NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_SIZE_PARAMS; + + +/* + * NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS + * + * This command provides the access counter register mappings + * + * pAccessCntrBufferGet [OUT] + * This parameter returns the pointer to the GET register + * pAccessCntrBufferPut [OUT] + * This parameter returns the pointer to the PUT register + * pAccessCntrlBufferFull [OUT] + * This parameter returns the pointer to the FULL register + * pHubIntr [OUT] + * This parameter returns the pointer to the hub interrupt register + * pHubIntrEnSet [OUT] + * This parameter returns the pointer to the set register + * pHubIntrEnClear [OUT] + * This parameter returns the pointer to the clear register + * accessCntrMask [OUT] + * This parameter returns the interrupt mask + * + * Possible status values returned are: + * NV_OK + */ + +#define NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS (0xc3650106) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID << 8) | NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS_PARAMS_MESSAGE_ID" */ + +#define NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS_PARAMS_MESSAGE_ID (0x6U) + +typedef struct NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pAccessCntrBufferGet, 8); + NV_DECLARE_ALIGNED(NvP64 pAccessCntrBufferPut, 8); + NV_DECLARE_ALIGNED(NvP64 pAccessCntrBufferFull, 8); + NV_DECLARE_ALIGNED(NvP64 pHubIntr, 8); + NV_DECLARE_ALIGNED(NvP64 pHubIntrEnSet, 8); + NV_DECLARE_ALIGNED(NvP64 pHubIntrEnClear, 8); + NvU32 accessCntrMask; +} NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS_PARAMS; + + +/* + * NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_GET_FULL_INFO + * + * This command gives information whether the buffer is full + * + * fullFlag [OUT] + * This parameter specifies whether the buffer is full + * + * Possible status values returned are: + * NV_OK + */ + +#define NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_GET_FULL_INFO (0xc3650107) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID << 8) | NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_FULL_INFO_PARAMS_MESSAGE_ID" */ + +#define NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_FULL_INFO_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_FULL_INFO_PARAMS { + NvBool fullFlag; +} NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_FULL_INFO_PARAMS; + + +/* + * NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_RESET_COUNTERS + * + * This command resets access counters of specified type + * + * resetFlag [OUT] + * This parameter specifies that counters have been reset + * counterType [IN] + * This parameter specifies the type of counters that should be reset (MIMC, MOMC or ALL) + * + * Possible status values returned are: + * NV_OK + */ + +#define NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_RESET_COUNTERS (0xc3650108) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID << 8) | NVC365_CTRL_ACCESS_CNTR_BUFFER_RESET_COUNTERS_PARAMS_MESSAGE_ID" */ + +#define NVC365_CTRL_ACCESS_CNTR_BUFFER_RESET_COUNTERS_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NVC365_CTRL_ACCESS_CNTR_BUFFER_RESET_COUNTERS_PARAMS { + NvBool resetFlag; + NvU32 counterType; +} NVC365_CTRL_ACCESS_CNTR_BUFFER_RESET_COUNTERS_PARAMS; + +#define NVC365_CTRL_ACCESS_COUNTER_TYPE_MIMC (0x0) +#define NVC365_CTRL_ACCESS_COUNTER_TYPE_MOMC (0x1) +#define NVC365_CTRL_ACCESS_COUNTER_TYPE_ALL (0x2) + +/* + * NVC365_CTRL_CMD_ACCESS_CNTR_SET_CONFIG + * + * This command configures the access counters + * + * mimcGranularity [IN] + * This parameter specifies the desired granularity for mimc (64K, 2M, 16M, 16G) + * momcGranularity [IN] + * This parameter specifies the desired granularity for momc (64K, 2M, 16M, 16G) + * mimcLimit [IN] + * This parameter specifies mimc limit (none, qtr, half, full) + * momcLimit [IN] + * This parameter specifies momc limit (none, qtr, half, full) + * threshold [IN] + * This parameter specifies the threshold + * flag [IN] + * This parameter is a bitmask denoting what configurations should be made + * + * Possible status values returned are: + * NV_OK + */ + +#define NVC365_CTRL_CMD_ACCESS_CNTR_SET_CONFIG (0xc3650109) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID << 8) | NVC365_CTRL_ACCESS_CNTR_SET_CONFIG_PARAMS_MESSAGE_ID" */ + +#define NVC365_CTRL_ACCESS_CNTR_SET_CONFIG_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NVC365_CTRL_ACCESS_CNTR_SET_CONFIG_PARAMS { + NvU32 mimcGranularity; + NvU32 momcGranularity; + NvU32 mimcLimit; + NvU32 momcLimit; + NvU32 threshold; + NvU32 cmd; +} NVC365_CTRL_ACCESS_CNTR_SET_CONFIG_PARAMS; + +#define NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_64K (0x0) +#define NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_2M (0x1) +#define NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16M (0x2) +#define NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16G (0x3) + +#define NVC365_CTRL_ACCESS_COUNTER_MIMC_LIMIT (0x0) +#define NVC365_CTRL_ACCESS_COUNTER_MOMC_LIMIT (0x1) + +#define NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_NONE (0x0) +#define NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_QTR (0x1) +#define NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_HALF (0x2) +#define NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_FULL (0x3) + +#define NVC365_CTRL_ACCESS_COUNTER_SET_MIMC_GRANULARITY (0x1) +#define NVC365_CTRL_ACCESS_COUNTER_SET_MOMC_GRANULARITY (0x2) +#define NVC365_CTRL_ACCESS_COUNTER_SET_MIMC_LIMIT (0x4) +#define NVC365_CTRL_ACCESS_COUNTER_SET_MOMC_LIMIT (0x8) +#define NVC365_CTRL_ACCESS_COUNTER_SET_THRESHOLD (0x10) + +/* + * NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_ENABLE_INTR + * + * This command enables the access counters interrupts + * + * enable [OUT] + * This parameter specifies that the access counters interrupts are enabled + * + * Possible status values returned are: + * NV_OK + */ + +#define NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_ENABLE_INTR (0xc365010b) /* finn: Evaluated from "(FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID << 8) | NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_INTR_PARAMS_MESSAGE_ID" */ + +#define NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_INTR_PARAMS_MESSAGE_ID (0xBU) + +typedef struct NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_INTR_PARAMS { + NvBool enable; +} NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_INTR_PARAMS; +/* _ctrlc365_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc369.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc369.h new file mode 100644 index 000000000..c4edd6afd --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc369.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc369.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* MMU_FAULT_BUFFER control commands and parameters */ + +#define NVC369_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0xC369, NVC369_CTRL_##cat, idx) + +/* MMU_FAULT_BUFFER command categories (6bits) */ +#define NVC369_CTRL_RESERVED (0x00) +#define NVC369_CTRL_MMU_FAULT_BUFFER (0x01) + +/* + * NVC369_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC369_CTRL_CMD_NULL (0xc3690000) /* finn: Evaluated from "(FINN_MMU_FAULT_BUFFER_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + + +/* + * NVC369_CTRL_CMD_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUFFER + * + * This call creates and registers a client buffer for the non replayable faults + * + * pShadowBuffer [OUT] + * This parameter represents the pointer to the shadow buffer + * + * pShadowBufferContext [OUT] + * Execution context for pShadowBuffer queue + * + * bufferSize [OUT] + * Size in bytes of the shadow buffer for non replayable faults + * + * Possible status values returned are: + * NV_OK + */ + +#define NVC369_CTRL_CMD_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF (0xc3690101) /* finn: Evaluated from "(FINN_MMU_FAULT_BUFFER_MMU_FAULT_BUFFER_INTERFACE_ID << 8) | NVC369_CTRL_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF_PARAMS_MESSAGE_ID" */ + +#define NVC369_CTRL_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC369_CTRL_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pShadowBuffer, 8); + NV_DECLARE_ALIGNED(NvP64 pShadowBufferContext, 8); + NvU32 bufferSize; +} NVC369_CTRL_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF_PARAMS; + + +/* + * NVC369_CTRL_CMD_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUFFER + * + * This call unregisters and destroys a client buffer for the non replayable + * faults + * + * pShadowBuffer [IN] + * This parameter represents the pointer to the shadow buffer + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + */ + +#define NVC369_CTRL_CMD_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF (0xc3690102) /* finn: Evaluated from "(FINN_MMU_FAULT_BUFFER_MMU_FAULT_BUFFER_INTERFACE_ID << 8) | NVC369_CTRL_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF_PARAMS_MESSAGE_ID" */ + +#define NVC369_CTRL_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC369_CTRL_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF_PARAMS { + NV_DECLARE_ALIGNED(NvP64 pShadowBuffer, 8); +} NVC369_CTRL_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF_PARAMS; + +/* _ctrlc369_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h new file mode 100644 index 000000000..a4d405787 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc36f.h @@ -0,0 +1,277 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc36f.finn +// + + + + +/* VOLTA_CHANNEL_GPFIFO_A control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl906f.h" /* C36F is partially derived from 906F */ +#include "ctrl/ctrla06f.h" /* C36F is partially derived from a06F */ +#define NVC36F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xC36F, NVC36F_CTRL_##cat, idx) + +/* VOLTA_CHANNEL_GPFIFO_A command categories (6bits) */ +#define NVC36F_CTRL_RESERVED (0x00) +#define NVC36F_CTRL_GPFIFO (0x01) +#define NVC36F_CTRL_EVENT (0x02) + +/* + * NVC36F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned is: NV_OK +*/ +#define NVC36F_CTRL_CMD_NULL (0xc36f0000) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + + + + + +/* + * NVC36F_CTRL_GET_CLASS_ENGINEID + * + * Please see description of NV906F_CTRL_GET_CLASS_ENGINEID for more information. + * + */ +#define NVC36F_CTRL_GET_CLASS_ENGINEID (0xc36f0101) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x1" */ + +typedef NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS NVC36F_CTRL_GET_CLASS_ENGINEID_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC36F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC36F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS { + NVC36F_CTRL_GET_CLASS_ENGINEID_PARAMS params; +} NVC36F_CTRL_GET_CLASS_ENGINEID_FINN_PARAMS; + + + +/* + * NVC36F_CTRL_RESET_CHANNEL + * + * Please see description of NV906F_CTRL_CMD_RESET_CHANNEL for more information. + * + */ +#define NVC36F_CTRL_CMD_RESET_CHANNEL (0xc36f0102) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x2" */ + +typedef NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS NVC36F_CTRL_CMD_RESET_CHANNEL_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC36F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC36F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS { + NVC36F_CTRL_CMD_RESET_CHANNEL_PARAMS params; +} NVC36F_CTRL_CMD_RESET_CHANNEL_FINN_PARAMS; + + + +/* + * NVC36F_CTRL_CMD_GPFIFO_SCHEDULE + * + * Please see description of NVA06F_CTRL_CMD_GPFIFO_SCHEDULE for more information. + * + */ +#define NVC36F_CTRL_CMD_GPFIFO_SCHEDULE (0xc36f0103) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x3" */ + +typedef NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS NVC36F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC36F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVC36F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS { + NVC36F_CTRL_GPFIFO_SCHEDULE_PARAMS params; +} NVC36F_CTRL_CMD_GPFIFO_SCHEDULE_FINN_PARAMS; + + + +/* + * NVC36F_CTRL_CMD_BIND + * + * Please see description of NVA06F_CTRL_CMD_BIND for more information. + */ +#define NVC36F_CTRL_CMD_BIND (0xc36f0104) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x4" */ + +typedef NVA06F_CTRL_BIND_PARAMS NVC36F_CTRL_BIND_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC36F_CTRL_CMD_BIND_FINN_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC36F_CTRL_CMD_BIND_FINN_PARAMS { + NVC36F_CTRL_BIND_PARAMS params; +} NVC36F_CTRL_CMD_BIND_FINN_PARAMS; + + + +/* + * NVC36F_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_NOTIFICATION for more information. +*/ + + +#define NVC36F_CTRL_CMD_EVENT_SET_NOTIFICATION (0xc36f0205) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID << 8) | 0x5" */ + +typedef NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS NVC36F_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE +#define NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE +#define NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + +/* + * NVC36F_CTRL_CMD_EVENT_SET_TRIGGER + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_TRIGGER for more information. + */ +#define NVC36F_CTRL_CMD_EVENT_SET_TRIGGER (0xc36f0206) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID << 8) | 0x6" */ + + + + + +/* + * NVC36F_CTRL_CMD_GET_MMU_FAULT_INFO + * + * Please see description of NV906F_CTRL_CMD_GET_MMU_FAULT_INFO for more information. + * + */ +#define NVC36F_CTRL_CMD_GET_MMU_FAULT_INFO (0xc36f0107) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | 0x7" */ + +typedef NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS NVC36F_CTRL_GET_MMU_FAULT_INFO_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC36F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS_MESSAGE_ID (0x7U) + +typedef struct NVC36F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS { + NV_DECLARE_ALIGNED(NVC36F_CTRL_GET_MMU_FAULT_INFO_PARAMS params, 8); +} NVC36F_CTRL_CMD_GET_MMU_FAULT_INFO_FINN_PARAMS; + + + +/* + * NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN + * + * This command returns an opaque work submit token to the caller which + * can be used to write to doorbell register to finish submitting work. If + * the client has provided an error context handle during channel allocation, + * and the error context is large enough to write the doorbell token, a + * notification at index NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN + * will be sent with the work submit token value. This index may be modified + * by NVC36F_CTRL_CMD_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX. + * + * workSubmitToken The 32-bit work submit token + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN (0xc36f0108) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_MESSAGE_ID" */ + +#define NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS_MESSAGE_ID (0x8U) + +typedef struct NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS { + NvU32 workSubmitToken; +} NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS; + +/** + * NVC36F_CTRL_CMD_GPFIFO_UPDATE_FAULT_METHOD_BUFFER + * + * This command updates the HOST CE Fault method buffer + * data structure of Virtual channel created for SR-IOV guest. It will also + * update the fault method buffer address in the instance block of the channel + * + * bar2Addr[] + * Array contains the Virtual BAR2 address mapped by the Guest during channel + * creation. + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_INSUFFICIENT_RESOURCES + * + */ +#define NVC36F_CTRL_CMD_GPFIFO_FAULT_METHOD_BUFFER_MAX_RUNQUEUES 0x2 +#define NVC36F_CTRL_CMD_GPFIFO_UPDATE_FAULT_METHOD_BUFFER (0xc36f0109) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS_MESSAGE_ID" */ + +#define NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS_MESSAGE_ID (0x9U) + +typedef struct NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS { + NV_DECLARE_ALIGNED(NvU64 bar2Addr[NVC36F_CTRL_CMD_GPFIFO_FAULT_METHOD_BUFFER_MAX_RUNQUEUES], 8); +} NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS; + +/* + * NVC36F_CTRL_CMD_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX + * + * This command updates the notifier index within the error context notifier + * to write the doorbell token to. This interface cannot be used to cause the + * token to overwrite the RC notification slot. The notification slot + * referred to by the passed index must be within the bounds of the error + * context notifier object. In the case that multiple channels share the same + * error context, it is not illegal for the client to set the same index for + * multiple channels (however it is not recommended). + * + * [IN] index + * Notification slot to write the doorbell token. The initial value of this + * index is NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + */ + +#define NVC36F_CTRL_CMD_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX (0xc36f010a) /* finn: Evaluated from "(FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS_MESSAGE_ID" */ + +#define NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS_MESSAGE_ID (0xAU) + +typedef struct NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS { + NvU32 index; +} NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS; + +/* _ctrlc36f.h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h new file mode 100644 index 000000000..42a3ac982 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370base.h @@ -0,0 +1,70 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370base.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NVC370_DISPLAY control commands and parameters */ + +#define NVC370_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0XC370, NVC370_CTRL_##cat, idx) + +/* NVC370_DISPLAY command categories (6bits) */ +#define NVC370_CTRL_RESERVED (0x00) +#define NVC370_CTRL_CHNCTL (0x01) +#define NVC370_CTRL_RG (0x02) +#define NVC370_CTRL_SEQ (0x03) +#define NVC370_CTRL_OR (0x04) +#define NVC370_CTRL_INST (0x05) +#define NVC370_CTRL_VERIF (0x06) +#define NVC370_CTRL_SYSTEM (0x07) +#define NVC370_CTRL_EVENT (0x09) + +// This struct must be the first member of all C370 control calls +typedef struct NVC370_CTRL_CMD_BASE_PARAMS { + NvU32 subdeviceIndex; +} NVC370_CTRL_CMD_BASE_PARAMS; + + +/* + * NVC370_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC370_CTRL_CMD_NULL (0xc3700000) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + +/* _ctrlc370base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h new file mode 100644 index 000000000..355e93481 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370chnc.h @@ -0,0 +1,302 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370chnc.finn +// + + + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070chnc.h" + + + + +#define NVC370_CTRL_CMD_CHANNEL_STATE_IDLE NVBIT(0) +#define NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT1 NVBIT(2) +#define NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT2 NVBIT(3) +#define NVC370_CTRL_CMD_CHANNEL_STATE_BUSY NVBIT(6) +#define NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC NVBIT(7) +#define NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO NVBIT(8) +#define NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT1 NVBIT(11) +#define NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT2 NVBIT(12) +#define NVC370_CTRL_CMD_CHANNEL_STATE_EFI_OPERATION NVBIT(13) +#define NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT1 NVBIT(14) +#define NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT2 NVBIT(15) +#define NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_OPERATION NVBIT(16) +#define NVC370_CTRL_CMD_CHANNEL_STATE_UNCONNECTED NVBIT(17) +#define NVC370_CTRL_CMD_CHANNEL_STATE_INIT1 NVBIT(18) +#define NVC370_CTRL_CMD_CHANNEL_STATE_INIT2 NVBIT(19) +#define NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 NVBIT(20) +#define NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 NVBIT(21) + +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE 1 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW 32 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM 32 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK 8 +#define NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR 8 + +/* + * NVC370_CTRL_CMD_IDLE_CHANNEL + * + * This command tries to wait or forces the desired channel state. + * + * channelClass + * This field indicates the hw class number (0xC378 - 0xC37E). + * It's defined in the h/w header (i.e. clc37d.h, etc.). + * + * channelInstance + * This field indicates which instance of the channelClass the cmd is + * meant for. (zero-based) + * + * desiredChannelStateMask + * This field indicates the desired channel states. When more than + * one bit is set, RM will return whenever it finds hardware on one + * of the states in the bistmask. + * Normal options are IDLE, QUIESCENT1 and QUIESCENT2. + * Verif only option includes BUSY as well. + * Note: + * (1) When QUIESCENT1 or QUIESCENT2 is chosen only one bit should + * be set in the bitmask. RM will ignore any other state. + * (2) Accelerators should not be required for QUIESCENT states as + * RM tries to ensure QUIESCENT forcibly on it's own. + * + * accelerators + * What accelerator bits should be used if RM timesout trying to + * wait for the desired state. This is not yet implemented since it + * should normally not be required to use these. Usage of accelerators + * should be restricted and be done very carefully as they may have + * undesirable effects. + * NOTE: accelerators should not be used directly in production code. + * + * timeout + * Timeout to use when waiting for the desired state. This is also for + * future expansion and not yet implemented. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_TIMEOUT + */ +#define NVC370_CTRL_CMD_IDLE_CHANNEL (0xc3700101) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_IDLE_CHANNEL_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_CORE NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_WINDOW NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_WINDOW_IMM NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_WRITEBACK NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK +#define NVC370_CTRL_IDLE_CHANNEL_MAX_INSTANCE_CURSOR NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR + +#define NVC370_CTRL_IDLE_CHANNEL_STATE_IDLE NVC370_CTRL_CMD_CHANNEL_STATE_IDLE +#define NVC370_CTRL_IDLE_CHANNEL_STATE_QUIESCENT1 NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT1 +#define NVC370_CTRL_IDLE_CHANNEL_STATE_QUIESCENT2 NVC370_CTRL_CMD_CHANNEL_STATE_QUIESCENT2 + +#define NVC370_CTRL_IDLE_CHANNEL_STATE_BUSY NVC370_CTRL_CMD_CHANNEL_STATE_BUSY + +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_NONE (0x00000000) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI (NVBIT(0)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF (NVBIT(1)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA (NVBIT(2)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK (NVBIT(3)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK (NVBIT(4)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY (NVBIT(5)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT (NVBIT(6)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SYNCPOINT (NVBIT(7)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_TIMESTAMP (NVBIT(8)) +#define NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_MGI (NVBIT(9)) + +#define NVC370_CTRL_IDLE_CHANNEL_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC370_CTRL_IDLE_CHANNEL_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvU32 desiredChannelStateMask; + NvU32 accelerators; // For future expansion. Not yet implemented + NvU32 timeout; // For future expansion. Not yet implemented + NvBool restoreDebugMode; +} NVC370_CTRL_IDLE_CHANNEL_PARAMS; + +/* + * NVC370_CTRL_CMD_SET_ACCL + * + * This command turns accelerators on and off. The use of this command + * should be restricted as it may have undesirable effects. It's + * purpose is to provide a mechanism for clients to use the + * accelerator bits to get into states that are either not detectable + * by the RM or may take longer to reach than we think is reasonable + * to wait in the RM. + * + * NVC370_CTRL_CMD_GET_ACCL + * + * This command queries the current state of the accelerators. + * + * channelClass + * This field indicates the hw class number (0xC378 - 0xC37E). + * It's defined in the h/w header (i.e. clc37d.h, etc.). + * + * channelInstance + * This field indicates which instance of the channelClass the cmd is + * meant for. (zero-based) + * + * accelerators + * Accelerators to be set in the SET_ACCEL command. Returns the + * currently set accelerators on the GET_ACCEL command. + */ + + +/* + * + * accelMask + * A mask to specify which accelerators to change with the + * SET_ACCEL command. This field does nothing in the GET_ACCEL + * command. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_CHANNEL + * NV_ERR_INVALID_OWNER + * NV_ERR_GENERIC + * + */ + +#define NVC370_CTRL_CMD_SET_ACCL (0xc3700102) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_SET_ACCL_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_CMD_GET_ACCL (0xc3700103) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | 0x3" */ + +#define NVC370_CTRL_ACCL_MAX_INSTANCE_CORE NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE +#define NVC370_CTRL_ACCL_MAX_INSTANCE_WINDOW NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW +#define NVC370_CTRL_ACCL_MAX_INSTANCE_WINDOW_IMM NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM +#define NVC370_CTRL_ACCL_MAX_INSTANCE_WRITEBACK NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK +#define NVC370_CTRL_ACCL_MAX_INSTANCE_CURSOR NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR + +#define NVC370_CTRL_ACCL_NONE NVC370_CTRL_IDLE_CHANNEL_ACCL_NONE +#define NVC370_CTRL_ACCL_IGNORE_PI NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_PI +#define NVC370_CTRL_ACCL_SKIP_NOTIF NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_NOTIF +#define NVC370_CTRL_ACCL_SKIP_SEMA NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SEMA +#define NVC370_CTRL_ACCL_IGNORE_INTERLOCK NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_INTERLOCK +#define NVC370_CTRL_ACCL_IGNORE_FLIPLOCK NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_FLIPLOCK +#define NVC370_CTRL_ACCL_TRASH_ONLY NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_ONLY +#define NVC370_CTRL_ACCL_TRASH_AND_ABORT NVC370_CTRL_IDLE_CHANNEL_ACCL_TRASH_AND_ABORT +#define NVC370_CTRL_ACCL_SKIP_SYNCPOINT NVC370_CTRL_IDLE_CHANNEL_ACCL_SKIP_SYNCPOINT +#define NVC370_CTRL_ACCL_IGNORE_TIMESTAMP NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_TIMESTAMP +#define NVC370_CTRL_ACCL_IGNORE_MGI NVC370_CTRL_IDLE_CHANNEL_ACCL_IGNORE_MGI +#define NVC370_CTRL_SET_ACCL_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC370_CTRL_SET_ACCL_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvU32 accelerators; + NvU32 accelMask; +} NVC370_CTRL_SET_ACCL_PARAMS; +typedef NVC370_CTRL_SET_ACCL_PARAMS NVC370_CTRL_GET_ACCL_PARAMS; + +// FINN PORT: The below type was generated by the FINN port to +// ensure that all API's have a unique structure associated +// with them! +#define NVC370_CTRL_CMD_GET_ACCL_FINN_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVC370_CTRL_CMD_GET_ACCL_FINN_PARAMS { + NVC370_CTRL_GET_ACCL_PARAMS params; +} NVC370_CTRL_CMD_GET_ACCL_FINN_PARAMS; + + + + +/* + * NVC370_CTRL_CMD_GET_CHANNEL_INFO + * + * This command returns the current channel state. + * + * channelClass + * This field indicates the hw class number (0xC378 - 0xC37E). + * It's defined in the h/w header (i.e. clc37d.h, etc.). + * + * channelInstance + * This field indicates which instance of the channelClass the cmd is + * meant for. (zero-based) + * + * channelState + * This field indicates the desired channel state in a mask form that + * is compatible with NVC370_CTRL_CMD_IDLE_CHANNEL. A mask format + * allows clients to check for one from a group of states. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + * + * Display driver uses this call to ensure that all it's methods have + * propagated through hardware's internal fifo + * (NVC370_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING) before it calls + * RM to check whether or not the mode it set up in Assembly State Cache will + * be possible. Note that display driver can not use completion notifier in + * this case because completion notifier is associated with Update and Update + * will propagate the state from Assembly to Armed and when checking the + * possibility of a mode, display driver wouldn't want Armed state to be + * affected. + */ + + + +#define NVC370_CTRL_CMD_GET_CHANNEL_INFO (0xc3700104) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID << 8) | NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_CORE NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CORE +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_WINDOW NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_WINDOW_IMM NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WINDOW_IMM +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_WRITEBACK NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_WRITEBACK +#define NVC370_CTRL_GET_CHANNEL_INFO_MAX_INSTANCE_CURSOR NVC370_CTRL_CMD_MAX_CHANNEL_INSTANCE_CURSOR + +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE NVC370_CTRL_CMD_CHANNEL_STATE_IDLE +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_BUSY NVC370_CTRL_CMD_CHANNEL_STATE_BUSY +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_DEALLOC_LIMBO NVC370_CTRL_CMD_CHANNEL_STATE_DEALLOC_LIMBO +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_EFI_INIT1 NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_EFI_INIT2 NVC370_CTRL_CMD_CHANNEL_STATE_EFI_INIT2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_EFI_OPERATION NVC370_CTRL_CMD_CHANNEL_STATE_EFI_OPERATION +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_VBIOS_INIT1 NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_VBIOS_INIT2 NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_INIT2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_VBIOS_OPERATION NVC370_CTRL_CMD_CHANNEL_STATE_VBIOS_OPERATION +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_UNCONNECTED NVC370_CTRL_CMD_CHANNEL_STATE_UNCONNECTED +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_INIT1 NVC370_CTRL_CMD_CHANNEL_STATE_INIT1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_INIT2 NVC370_CTRL_CMD_CHANNEL_STATE_INIT2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN1 NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN1 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_SHUTDOWN2 NVC370_CTRL_CMD_CHANNEL_STATE_SHUTDOWN2 +#define NVC370_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE +#define NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 channelClass; + NvU32 channelInstance; + NvBool IsChannelInDebugMode; + NvU32 channelState; +} NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS; + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h new file mode 100644 index 000000000..23a26a381 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370event.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370event.finn +// + + + + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070event.h" + + + + + +/* valid action values */ +#define NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE +#define NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE +#define NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + +/* _ctrlc370event_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h new file mode 100644 index 000000000..6f7e5f21c --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370rg.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370rg.finn +// + + + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070rg.h" + + + + +/* + * NVC370_CTRL_CMD_GET_LOCKPINS_CAPS + * + * This command returns lockpins for the specified pinset, + * as well as lockpins' HW capabilities. + * + * pinset [in] + * This parameter takes the pinset whose corresponding + * lockpin numbers need to be determined. This only affects + * the return value for the RaterLock and FlipLock pins. + * + * frameLockPin [out] + * This parameter returns the FrameLock pin index. + * + * rasterLockPin [out] + * This parameter returns the RasterLock pin index. + * + * flipLockPin [out] + * This parameter returns the FlipLock pin index. + * + * stereoPin [out] + * This parameter returns the Stereo pin index. + * + * numScanLockPins [out] + * This parameter returns the HW capability of ScanLock pins. + * + * numFlipLockPins [out] + * This parameter returns the HW capability of FlipLock pins. + * + * numStereoPins [out] + * This parameter returns the HW capability of Stereo pins. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_PARAM_STRUCT + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_NOT_SUPPORTED + */ + +#define NVC370_CTRL_CMD_GET_LOCKPINS_CAPS (0xc3700201) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_RG_INTERFACE_ID << 8) | NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_GET_LOCKPINS_CAPS_FRAME_LOCK_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_RASTER_LOCK_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_FLIP_LOCK_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_STEREO_PIN_NONE (0xffffffff) +#define NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 pinset; + NvU32 frameLockPin; + NvU32 rasterLockPin; + NvU32 flipLockPin; + NvU32 stereoPin; + NvU32 numScanLockPins; + NvU32 numFlipLockPins; + NvU32 numStereoPins; +} NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS; + +/* + * NVC370_CTRL_CMD_SET_SWAPRDY_GPIO_WAR + * + * This command switches SWAP_READY_OUT GPIO between SW + * and HW control to WAR bug 200374184 + * + * bEnable [in]: + * This parameter indicates enable/disable external fliplock + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_ERR_GENERIC + */ + +#define NVC370_CTRL_CMD_SET_SWAPRDY_GPIO_WAR (0xc3700202) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_RG_INTERFACE_ID << 8) | NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvBool bEnable; +} NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS; + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h new file mode 100644 index 000000000..9520eb55c --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc370/ctrlc370verif.h @@ -0,0 +1,135 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc370/ctrlc370verif.finn +// + + + +#include "ctrl/ctrlc370/ctrlc370base.h" +/* C370 is partially derived from 5070 */ +#include "ctrl/ctrl5070/ctrl5070verif.h" + + + + +#define NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES (0xc3700601) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_VERIF_INTERFACE_ID << 8) | NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 headInstance; + NvU32 modesetValue; +} NVC370_CTRL_CMD_GET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS; + +/* + * NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES + * + * This command sets modeswitch flags, prior to a modeset. The flags will be + * automatically cleared at the end of each modeset, so this function must be + * called for each modeset where overrides are desired. + * + * headInstance + * this field specifies the head for which modeswitch flags will be overridden + * + * modesetMaskValid + * this field specifies the maskset at which modeswitch flags will be overridden + * + * modesetValue + * this field specifies the override value + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * + */ +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES (0xc3700602) /* finn: Evaluated from "(FINN_NVC370_DISPLAY_VERIF_INTERFACE_ID << 8) | NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID" */ + +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS { + NVC370_CTRL_CMD_BASE_PARAMS base; + NvU32 headInstance; + NvU32 modesetMaskValid; + NvU32 modesetValue; +} NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS; + +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_BLANK 0:0 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_BLANK_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_BLANK_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK 1:1 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_SHUTDOWN 2:2 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_SHUTDOWN_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_SHUTDOWN_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOSHUTDOWN 3:3 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOSHUTDOWN_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOSHUTDOWN_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_CHANGE_VPLL 4:4 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_CHANGE_VPLL_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_CHANGE_VPLL_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOCHANGE_VPLL 5:5 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOCHANGE_VPLL_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOCHANGE_VPLL_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_WAKEUP 6:6 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_WAKEUP_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_WAKEUP_INVALID 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_SHUTDOWN 7:7 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_SHUTDOWN_VALID 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_MASK_VALID_FORCE_NOBLANK_SHUTDOWN_INVALID 0x00000000 + + +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_BLANK 0:0 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_BLANK_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_BLANK_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK 1:1 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_SHUTDOWN 2:2 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_SHUTDOWN_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_SHUTDOWN_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOSHUTDOWN 3:3 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOSHUTDOWN_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOSHUTDOWN_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_CHANGE_VPLL 4:4 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_CHANGE_VPLL_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_CHANGE_VPLL_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOCHANGE_VPLL 5:5 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOCHANGE_VPLL_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOCHANGE_VPLL_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_WAKEUP 6:6 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_WAKEUP_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_WAKEUP_NO 0x00000000 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_SHUTDOWN 7:7 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_SHUTDOWN_YES 0x00000001 +#define NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_VALUE_FORCE_NOBLANK_SHUTDOWN_NO 0x00000000 + + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h new file mode 100644 index 000000000..50b017e6f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372base.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc372/ctrlc372base.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* NVC372_DISPLAY_SW control commands and parameters */ + +#define NVC372_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0XC372, NVC372_CTRL_##cat, idx) + +/* NVC372_DISPLAY_SW command categories (6 bits) */ +#define NVC372_CTRL_RESERVED (0x00) +#define NVC372_CTRL_CHNCTL (0x01) +#define NVC372_CTRL_VERIF (0x02) + +/* + * NVC372_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC372_CTRL_CMD_NULL (0xc3720000) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_RESERVED_INTERFACE_ID << 8) | 0x0" */ + + +// This struct must be the first member of all +// C372 control calls +typedef struct NVC372_CTRL_CMD_BASE_PARAMS { + NvU32 subdeviceIndex; +} NVC372_CTRL_CMD_BASE_PARAMS; + +/* _ctrlc372base_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h new file mode 100644 index 000000000..b4bc93435 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc372/ctrlc372chnc.h @@ -0,0 +1,683 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc372/ctrlc372chnc.finn +// + + + + +#include "nvdisptypes.h" +#include "ctrl/ctrlc372/ctrlc372base.h" + +#define NVC372_CTRL_MAX_POSSIBLE_HEADS 8 +#define NVC372_CTRL_MAX_POSSIBLE_WINDOWS 32 + +#define NVC372_CTRL_CMD_IS_MODE_POSSIBLE (0xc3720101) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_IS_MODE_POSSIBLE + * + * This command tells whether or not the specified display config is possible. + * A config might not be possible if the display requirements exceed the GPU + * capabilities. Display requirements will be higher with more display + * surfaces, higher resolutions, higher downscaling factors, etc. GPU + * capabilities are determined by clock frequencies, the width of data pipes, + * amount of mempool available, number of thread groups available, etc. + * + * Inputs: + * head.headIndex + * This is the hardware index number for the head. Only active heads + * should be included in the input structure. + * + * head.maxPixelClkKHz + * This parameter specifies the pixel scanout rate (in KHz). + * + * head.rasterSize + * This structure specifies the total width and height of the raster that + * is sent to the display. (The width and height are also referred to as + * HTotal and VTotal, respectively.) + * + * head.rasterBlankStart + * X specifies the pixel column where horizontal blanking begins; + * Y specifies the pixel row where vertical blanking begins. + * + * head.rasterBlankEnd + * X specifies the pixel column where horizontal blanking ends; + * Y specifies the pixel row where vertical blanking ends. + * + * head.rasterVertBlank2 + * X and Y specify the pixel column/row where horizontal/vertical blanking + * ends on the second field of every pair for an interlaced raster. This + * field is not used when the raster is progressive. + * + * head.control.masterLockMode + * head.control.masterLockPin + * head.control.slaveLockMode + * head.control.slaveLockPin + * Heads that are raster locked or frame locked together will have + * synchronized timing. For example, vblank will occur at the same time on + * all of the heads that are locked together. + * + * "LockMode" tells if a head is raster locked, frame locked, or not locked. + * + * "LockPin" tells which heads are in a group of locked heads. There + * should be one master per group, and all slave heads that are locked to + * that master should have the same slaveLockPin number as the master's + * masterLockPin number. + * + * Note: The LockModes and LockPins are used only if the min v-pstate is + * required (i.e., if NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE + * is set). + * + * head.maxDownscaleFactorH + * head.maxDownscaleFactorV + * maxDownscaleFactorH and maxDownscaleFactorV represent the maximum amount + * by which the the composited image can be reduced in size, horizontally + * and vertically, respectively, multiplied by 0x400. For example, if the + * scaler input width is 1024, and the scaler output width is 2048, the + * downscale factor would be 1024 / 2048 = 0.5, and multiplying by 0x400 + * would give 512. + * + * head.outputScalerVerticalTaps + * scalerVerticalTaps indicates the maximum number of vertical taps + * allowed in the output scaler. + * + * Note that there are no #defines for tap values; the parameter is simply + * the number of taps (e.g., "2" for 2 taps). + * + * head.bUpscalingAllowedV + * bUpscalingAllowed indicates whether or not the composited image can be + * increased in size, vertically. + * + * head.bOverfetchEnabled + * bOverfetchEnabled indicates whether or not the vertical overfetch is + * enabled in postcomp scaler. + * + * head.minFrameIdle.leadingRasterLines + * leadingRasterLines defines the number of lines between the start of the + * frame (vsync) and the start of the active region. This includes Vsync, + * Vertical Back Porch, and the top part of the overscan border. The + * minimum value is 2 because vsync and VBP must be at least 1 line each. + * + * head.minFrameIdle.trailingRasterLines + * trailingRasterLines defines the number of lines between the end of the + * active region and the end of the frame. This includes the bottom part + * of the overscan border and the Vertical Front Porch. + * + * head.lut + * This parameter specifies whether or not the output LUT is enabled, and + * the size of the LUT. The parameter should be an + * NVC372_CTRL_IMP_LUT_USAGE_xxx value. + * + * head.cursorSize32p + * This parameter specifies the width of the cursor, in units of 32 pixels. + * So, for example, "8" would mean 8 * 32 = 256, for a 256x256 cursor. Zero + * means the cursor is disabled. + * + * head.bEnableDsc + * bEnableDsc indicates whether or not DSC is enabled + * + * head.bYUV420Format + * This parameter indicates output format is YUV420. + * Refer to NVD_YUV420_Output_Functional_Description.docx for more details. + * + * head.bIs2Head1Or + * This parameter specifies if the head operates in 2Head1Or mode. + * Refer to NVD_2_Heads_Driving_1_OR_Functional_Description.docx for more details. + * + * head.bDisableMidFrameAndDWCFWatermark + * WAR for bug 200508242. + * In linux it is possible that there will be no fullscreen window visible + * for a head. For these cases we would not hit dwcf or midframe watermarks + * leading to fbflcn timing out waiting on ok_to_switch and forcing mclk + * switch. This could lead to underflows. So if that scenario is caught (by + * Display Driver) bDisableMidFrameAndDWCFWatermark will be set to true and + * IMP will exclude dwcf and midframe contribution from the "is mclk switch + * guaranteed" calculation for the bandwidth clients of that head. + * + * window.windowIndex + * This is the hardware index number for the window. Only active windows + * should be included in the input structure. + * + * window.owningHead + * This is the hardware index of the head that will receive the window's + * output. + * + * window.formatUsageBound + * This parameter is a bitmask of all possible non-rotated mode data + * formats (NVC372_CTRL_FORMAT_xxx values). + * + * window.rotatedFormatUsageBound + * This parameter is a bitmask of all possible rotated mode data formats + * (NVC372_CTRL_FORMAT_xxx values). + * + * window.maxPixelsFetchedPerLine + * This parameter defines the maximum number of pixels that may need to be + * fetched in a single line for this window. Often, this can be set to the + * viewportSizeIn.Width. But if the window is known to be clipped, such + * that an entire line will never be fetched, then this parameter can be + * set to the clipped size (to improve the chances of the mode being + * possible, or possible at a lower v-pstate). + * + * In some cases, the value of this parameter must be increased by a few + * pixels in order to account for scaling overfetch, input chroma overfetch + * (420/422->444), and/or chroma output low pass filter overfetch + * (444->422/420). This value is chip dependent; refer to the + * MaxPixelsFetchedPerLine parameter in nvdClass_01.mfs for the exact + * value. In no case does the maxPixelsFetchedPerLine value need to exceed + * the surface width. + * + * window.maxDownscaleFactorH + * window.maxDownscaleFactorV + * maxDownscaleFactorH and maxDownscaleFactorV represent the maximum amount + * by which the the window image can be reduced in size, horizontally and + * vertically, respectively, multiplied by + * NVC372_CTRL_SCALING_FACTOR_MULTIPLIER. For example, + * if the scaler input width is 1024, and the scaler output width is 2048, + * the downscale factor would be 1024 / 2048 = 0.5, and multiplying by + * NVC372_CTRL_SCALING_FACTOR_MULTIPLIER if 0x400 would give 512. + * + * window.inputScalerVerticalTaps + * scalerVerticalTaps indicates the maximum number of vertical taps + * allowed in the input scaler. + * + * Note that there are no #defines for tap values; the parameter is simply + * the number of taps (e.g., "2" for 2 taps). + * + * window.bUpscalingAllowedV + * bUpscalingAllowed indicates whether or not the composited image can be + * increased in size, vertically. + * + * window.bOverfetchEnabled + * bOverfetchEnabled indicates whether or not the vertical overfetch is + * enabled in precomp scaler. + * + * window.lut + * This parameter specifies whether or not the input LUT is enabled, and + * the size of the LUT. The parameter should be an + * NVC372_CTRL_IMP_LUT_USAGE_xxx value. + * + * window.tmoLut + * This parameter specifies whether or not the tmo LUT is enabled, and + * the size of the LUT. This lut is used for HDR. The parameter should be + * an NVC372_CTRL_IMP_LUT_USAGE_xxx value. + * + * numHeads + * This is the number of heads in the "head" array of the + * NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS struct. Only active heads should be + * included in the struct. + * + * numWindows + * This is the number of windows in the "window" array of the + * NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS struct. Only active windows should + * be included in the struct. + * + * options + * This parameter specifies a bitmask for options. + * + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN + * tells IMP to calculate worstCaseMargin and worstCaseDomain. + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE + * tells IMP to calculate and report the minimum v-pstate at which the + * mode is possible. + * + * bUseCachedPerfState + * Indicates that RM should use cached values for the fastest + * available perf level (v-pstate for PStates 3.0 or pstate for + * PStates 2.0) and dispclk. This feature allows the query call to + * execute faster, and is intended to be used, for example, during + * mode enumeration, when many IMP query calls are made in close + * succession, and perf conditions are not expected to change between + * query calls. When IMP has not been queried recently, it is + * recommended to NOT use cached values, in case perf conditions have + * changed and the cached values no longer reflect the current + * conditions. + * + * testMclkFreqKHz + * This is the mclk frequency specified by the client, in KHz. RM will + * use this value to compare with the minimum dramclk required by the + * given mode. The parameter will have value 0 if the client doesn't want + * IMP query to consider this. This input is valid only on Tegra and only + * for verification purposes on internal builds. + * For this input to work, client must set + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE in the + * "options" field. + * + * Outputs: + * bIsPossible + * This output tells if the specified mode can be supported. + * + * minImpVPState + * minImpVPState returns the minimum v-pstate at which the mode is possible + * (assuming bIsPossible is TRUE). This output is valid only on dGPU, and + * only if NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in + * the "options" field. + * + * If the minimum v-pstate is required for a multi-head config, then + * masterLockMode, masterLockPin, slaveLockMode, and slaveLockPin must all + * be initialized. + * minPState + * minPState returns the pstate value corresponding to minImpVPState. It + * is returned as the numeric value of the pstate (P0 -> 0, P1 -> 1, etc.). + * This output is valid only on dGPU, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set + * in the "options" field. + * + * Note that the pstate returned by minPstateForGlitchless is not + * necessarily sufficient to meet IMP requirements. The pstate corresponds + * to the vpstate returned by minImpVPState, and this vpstate represents + * clocks that are sufficient for IMP requirements, but the pstate + * typically covers a range of frequencies (depending on the clock), and it + * is possible that only part of the range is sufficient for IMP. + * + * minRequiredBandwidthKBPS + * minRequiredBandwidthKBPS returns the minimum bandwidth that must be + * allocated to display in order to make the mode possible (assuming + * bIsPossible is TRUE). This output is valid only on Tegra, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field. + * + * floorBandwidthKBPS + * floorBandwidthKBPS returns the minimum mclk frequency that can support + * the mode, and allow glitchless mclk switch, multiplied by the width of + * the data pipe. (This is an approximation of the bandwidth that can be + * provided by the min required mclk frequency, ignoring overhead.) If the + * mode is possible, but glitchless mclk switch is not, floorBandwidthKBPS + * will be calculated based on the maximum possible mclk frequency. This + * output is valid only on Tegra, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field. + * + * minRequiredHubclkKHz + * minRequiredHubclkKHz returns the minimum hubclk frequency that can + * support the mode. This output is valid only on Tegra, and only if + * NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE was set in the + * "options" field. + * + * worstCaseMargin + * worstCaseMargin returns the ratio of available bandwidth to required + * bandwidth, multiplied by NV5070_CTRL_IMP_MARGIN_MULTIPLIER. Available + * bandwidth is calculated in the worst case bandwidth domain, i.e., the + * domain with the least available margin. Bandwidth domains include the + * IMP-relevant clock domains, and possibly other virtual bandwidth + * domains such as AWP. + * + * Note that IMP checks additional parameters besides the bandwidth margins + * but only the bandwidth margin is reported here, so it is possible for a + * mode to have a more restrictive domain that is not reflected in the + * reported margin result. + * + * This result is not guaranteed to be valid if the mode is not possible. + * + * Note also that the result is generally calculated for the highest + * v-pstate possible (usually P0). But if the _NEED_MIN_VPSTATE is + * specified, the result will be calculated for the min possible v-pstate + * (or the highest possible v-pstate, if the mode is not possible). + * + * The result is valid only if + * NV5070_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN is set in "options". + * + * dispClkKHz + * This is the dispclk frequency selected by IMP for this mode. For dGPU, + * it will be one of the fixed frequencies from the list of frequencies + * supported by the vbios. + * + * worstCaseDomain + * Returns a short text string naming the domain for the margin returned in + * "worstCaseMargin". See "worstCaseMargin" for more information. + * + * Possible status values returned are: + * NVOS_STATUS_SUCCESS + * NVOS_STATUS_ERROR_GENERIC + */ +#define NVC372_CTRL_IMP_LUT_USAGE_NONE 0 +#define NVC372_CTRL_IMP_LUT_USAGE_257 1 +#define NVC372_CTRL_IMP_LUT_USAGE_1025 2 + +typedef struct NVC372_CTRL_IMP_HEAD { + NvU8 headIndex; + + NvU32 maxPixelClkKHz; + + struct { + NvU32 width; + NvU32 height; + } rasterSize; + + struct { + NvU32 X; + NvU32 Y; + } rasterBlankStart; + + struct { + NvU32 X; + NvU32 Y; + } rasterBlankEnd; + + struct { + NvU32 yStart; + NvU32 yEnd; + } rasterVertBlank2; + + struct { + NV_DISP_LOCK_MODE masterLockMode; + NV_DISP_LOCK_PIN masterLockPin; + NV_DISP_LOCK_MODE slaveLockMode; + NV_DISP_LOCK_PIN slaveLockPin; + } control; + + NvU32 maxDownscaleFactorH; + NvU32 maxDownscaleFactorV; + NvU8 outputScalerVerticalTaps; + NvBool bUpscalingAllowedV; + NvBool bOverfetchEnabled; + + struct { + NvU16 leadingRasterLines; + NvU16 trailingRasterLines; + } minFrameIdle; + + NvU8 lut; + NvU8 cursorSize32p; + + NvBool bEnableDsc; + + NvBool bYUV420Format; + + NvBool bIs2Head1Or; + + NvBool bDisableMidFrameAndDWCFWatermark; +} NVC372_CTRL_IMP_HEAD; +typedef struct NVC372_CTRL_IMP_HEAD *PNVC372_CTRL_IMP_HEAD; + +typedef struct NVC372_CTRL_IMP_WINDOW { + NvU32 windowIndex; + NvU32 owningHead; + NvU32 formatUsageBound; + NvU32 rotatedFormatUsageBound; + NvU32 maxPixelsFetchedPerLine; + NvU32 maxDownscaleFactorH; + NvU32 maxDownscaleFactorV; + NvU8 inputScalerVerticalTaps; + NvBool bUpscalingAllowedV; + NvBool bOverfetchEnabled; + NvU8 lut; + NvU8 tmoLut; +} NVC372_CTRL_IMP_WINDOW; +typedef struct NVC372_CTRL_IMP_WINDOW *PNVC372_CTRL_IMP_WINDOW; + +#define NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_GET_MARGIN (0x00000001) +#define NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE (0x00000002) + +#define NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS { + NVC372_CTRL_CMD_BASE_PARAMS base; + + NvU8 numHeads; + NvU8 numWindows; + + NVC372_CTRL_IMP_HEAD head[NVC372_CTRL_MAX_POSSIBLE_HEADS]; + + // C form: NVC372_CTRL_IMP_WINDOW window[NVC372_CTRL_MAX_POSSIBLE_WINDOWS]; + NVC372_CTRL_IMP_WINDOW window[NVC372_CTRL_MAX_POSSIBLE_WINDOWS]; + + NvU32 options; + + NvU32 testMclkFreqKHz; + + NvBool bIsPossible; + + NvU32 minImpVPState; + + NvU32 minPState; + + NvU32 minRequiredBandwidthKBPS; + + NvU32 floorBandwidthKBPS; + + NvU32 minRequiredHubclkKHz; + + NvU32 worstCaseMargin; + + NvU32 dispClkKHz; + + char worstCaseDomain[8]; + + NvBool bUseCachedPerfState; +} NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS; +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *PNVC372_CTRL_IS_MODE_POSSIBLE_PARAMS; + +/* valid format values */ +#define NVC372_CTRL_FORMAT_RGB_PACKED_1_BPP (0x00000001) +#define NVC372_CTRL_FORMAT_RGB_PACKED_2_BPP (0x00000002) +#define NVC372_CTRL_FORMAT_RGB_PACKED_4_BPP (0x00000004) +#define NVC372_CTRL_FORMAT_RGB_PACKED_8_BPP (0x00000008) +#define NVC372_CTRL_FORMAT_YUV_PACKED_422 (0x00000010) +#define NVC372_CTRL_FORMAT_YUV_PLANAR_420 (0x00000020) +#define NVC372_CTRL_FORMAT_YUV_PLANAR_444 (0x00000040) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_420 (0x00000080) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_422 (0x00000100) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_422R (0x00000200) +#define NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_444 (0x00000400) +#define NVC372_CTRL_FORMAT_EXT_YUV_PLANAR_420 (0x00000800) +#define NVC372_CTRL_FORMAT_EXT_YUV_PLANAR_444 (0x00001000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_420 (0x00002000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_422 (0x00004000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_422R (0x00008000) +#define NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_444 (0x00010000) + +/* valid impResult values */ +#define NVC372_CTRL_IMP_MODE_POSSIBLE 0 +#define NVC372_CTRL_IMP_NOT_ENOUGH_MEMPOOL 1 +#define NVC372_CTRL_IMP_REQ_LIMIT_TOO_HIGH 2 +#define NVC372_CTRL_IMP_VBLANK_TOO_SMALL 3 +#define NVC372_CTRL_IMP_HUBCLK_TOO_LOW 4 +#define NVC372_CTRL_IMP_INSUFFICIENT_BANDWIDTH 5 +#define NVC372_CTRL_IMP_DISPCLK_TOO_LOW 6 +#define NVC372_CTRL_IMP_ELV_START_TOO_HIGH 7 +#define NVC372_CTRL_IMP_INSUFFICIENT_THREAD_GROUPS 8 +#define NVC372_CTRL_IMP_INVALID_PARAMETER 9 +#define NVC372_CTRL_IMP_UNRECOGNIZED_FORMAT 10 +#define NVC372_CTRL_IMP_UNSPECIFIED 11 + +/* + * The calculated margin is multiplied by a constant, so that it can be + * represented as an integer with reasonable precision. "0x400" was chosen + * because it is a power of two, which might allow some compilers/CPUs to + * simplify the calculation by doing a shift instead of a multiply/divide. + * (And 0x400 is 1024, which is close to 1000, so that may simplify visual + * interpretation of the raw margin value.) + */ +#define NVC372_CTRL_IMP_MARGIN_MULTIPLIER (0x00000400) + +/* scaling factor */ +#define NVC372_CTRL_SCALING_FACTOR_MULTIPLIER (0x00000400) + +#define NVC372_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD 2 +#define NVC372_CTRL_CMD_MAX_SORS 4 + +#define NVC372_CTRL_CMD_IS_MODE_POSSIBLE_OR_SETTINGS (0xc3720102) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_IS_MODE_POSSIBLE_OR_SETTINGS + * + * This command tells us if output resource pixel clocks requested by client + * is possible or not. Note that this will not be used for displayport sor as + * it will be handled by displayport library. + * + * Inputs: + * numHeads + * This is the number of heads in the "head" array of the + * NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS struct. Only active heads + * should be included in the struct. + * + * head.headIndex + * This is the hardware index number for the head. Only an active head + * should be included in the input structure. + * + * head.maxPixelClkKHz + * This parameter specifies the pixel scanout rate (in KHz). + * + * head.displayId + * Array of displayId's associated with the head. This is limited by + * NVC372_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD. + * + * sor.ownerMask + * Consists of a mask of all heads that drive this sor. + * + * sor.protocol + * Defines the protocol of the sor in question. + * + * sor.pixelReplicateMode + * Defines which pixel replication mode is requested. This can be off + * or X2 or X4 mode. + * + * Outputs: + * bIsPossible + * This tells us that the requested pixel clock can be supported. + */ + + +#define NVC372_CTRL_IS_MODE_POSSIBLE_DISPLAY_ID_SKIP_IMP_OUTPUT_CHECK (0xAAAAAAAA) + +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_HEAD { + NvU8 headIndex; + NvU32 maxPixelClkKhz; + + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP outputResourcePixelDepthBPP; + + NvU32 displayId[NVC372_CTRL_CMD_NUM_DISPLAY_ID_DWORDS_PER_HEAD]; +} NVC372_CTRL_IMP_OR_SETTINGS_HEAD; +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_HEAD *PNVC372_CTRL_IMP_OR_SETTINGS_HEAD; + +#define NVC372_CTRL_CMD_SOR_OWNER_MASK_NONE (0x00000000) +#define NVC372_CTRL_CMD_SOR_OWNER_MASK_HEAD(i) (1 << i) + +#define NVC372_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_A (0x00000000) +#define NVC372_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_B (0x00000001) +#define NVC372_CTRL_CMD_SOR_PROTOCOL_DUAL_TMDS (0x00000002) +#define NVC372_CTRL_CMD_SOR_PROTOCOL_SUPPORTED (0xFFFFFFFF) + +#define NVC372_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_OFF (0x00000000) +#define NVC372_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X2 (0x00000001) +#define NVC372_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_X4 (0x00000002) + +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_SOR { + NvU32 ownerMask; + NvU32 protocol; + NvU32 pixelReplicateMode; +} NVC372_CTRL_IMP_OR_SETTINGS_SOR; +typedef struct NVC372_CTRL_IMP_OR_SETTINGS_SOR *PNVC372_CTRL_IMP_OR_SETTINGS_SOR; + +#define NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS { + NVC372_CTRL_CMD_BASE_PARAMS base; + + NvU32 numHeads; + + NVC372_CTRL_IMP_OR_SETTINGS_HEAD head[NVC372_CTRL_MAX_POSSIBLE_HEADS]; + + NVC372_CTRL_IMP_OR_SETTINGS_SOR sor[NVC372_CTRL_CMD_MAX_SORS]; + + NvBool bIsPossible; +} NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS; +typedef struct NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *PNVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS; + +#define NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE (0xc3720103) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE + * + * This control call is used by clients to inform RM about video adaptive refresh rate enable/disable. + * Based on the state, RM will enable/disable supported low power features. + * + * Inputs: + * displayID + * displayId of panel on which video adaptive refresh rate is enabled/disabled. + * + * bEnable + * NV_TRUE to enable video adaptive refresh rate mode. + * NV_FALSE to disable video adaptive refresh rate mode. + * + * Outputs: + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS { + NvU32 displayID; + NvBool bEnable; +} NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS; +typedef struct NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *PNVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS; + + +#define NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN (0xc3720104) /* finn: Evaluated from "(FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID << 8) | NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS_MESSAGE_ID" */ + +/* + * NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN + * + * This control call is used by clients to query the active viewport for the + * provided window precalculated at the beginning of each frame. + * + * Inputs: + * windowIndex + * Index of the window to be queried. Must be connected to an active head. + * + * Outputs: + * activeViewportPointIn + * X and Y coordinates of the active viewport on the provided window for + * the most recent frame. + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_ARGUMENT if the window index is invalid + * NV_ERR_INVALID_STATE if the window index isn't connected to a head + * NV_ERR_NOT_SUPPORTED + */ +#define NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS { + NVC372_CTRL_CMD_BASE_PARAMS base; + + NvU32 windowIndex; + + struct { + NvU32 x; + NvU32 y; + } activeViewportPointIn; +} NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS; +typedef struct NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *PNVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS; + +/* _ctrlc372chnc_h_ */ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc46f.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc46f.h new file mode 100644 index 000000000..142818d2f --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc46f.h @@ -0,0 +1,164 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc46f.finn +// + + + + +/* TURING_CHANNEL_GPFIFO_A control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl906f.h" /* C36F is partially derived from 906F */ +#include "ctrl/ctrla06f.h" /* C36F is partially derived from a06F */ +#include "ctrl/ctrlc36f.h" // This control call interface is an ALIAS of C36F + +#define NVC46F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xC36F, NVC46F_CTRL_##cat, idx) + +/* TURING_CHANNEL_GPFIFO_A command categories (6bits) */ +#define NVC46F_CTRL_RESERVED (0x00) +#define NVC46F_CTRL_GPFIFO (0x01) +#define NVC46F_CTRL_EVENT (0x02) + +/* + * NVC46F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned is: NV_OK +*/ +#define NVC46F_CTRL_CMD_NULL (NVC36F_CTRL_CMD_NULL) + + + + + + +/* + * NVC46F_CTRL_GET_CLASS_ENGINEID + * + * Please see description of NV906F_CTRL_GET_CLASS_ENGINEID for more information. + * + */ +#define NVC46F_CTRL_GET_CLASS_ENGINEID (NVC36F_CTRL_GET_CLASS_ENGINEID) + +typedef NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS NVC46F_CTRL_GET_CLASS_ENGINEID_PARAMS; + +/* + * NVC46F_CTRL_RESET_CHANNEL + * + * Please see description of NV906F_CTRL_CMD_RESET_CHANNEL for more information. + * + */ +#define NVC46F_CTRL_CMD_RESET_CHANNEL (NVC36F_CTRL_CMD_RESET_CHANNEL) + +typedef NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS NVC46F_CTRL_CMD_RESET_CHANNEL_PARAMS; + +/* + * NVC46F_CTRL_CMD_GPFIFO_SCHEDULE + * + * Please see description of NVA06F_CTRL_CMD_GPFIFO_SCHEDULE for more information. + * + */ +#define NVC46F_CTRL_CMD_GPFIFO_SCHEDULE (NVC36F_CTRL_CMD_GPFIFO_SCHEDULE) + +typedef NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS NVC46F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +/* + * NVC46F_CTRL_CMD_BIND + * + * Please see description of NVA06F_CTRL_CMD_BIND for more information. + */ +#define NVC46F_CTRL_CMD_BIND (NVC36F_CTRL_CMD_BIND) + +typedef NVA06F_CTRL_BIND_PARAMS NVC46F_CTRL_BIND_PARAMS; + +/* + * NVC46F_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_NOTIFICATION for more information. +*/ + + +#define NVC46F_CTRL_CMD_EVENT_SET_NOTIFICATION (NVC36F_CTRL_CMD_EVENT_SET_NOTIFICATION) + +typedef NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS NVC46F_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE +#define NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE +#define NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + +/* + * NVC46F_CTRL_CMD_EVENT_SET_TRIGGER + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_TRIGGER for more information. + */ +#define NVC46F_CTRL_CMD_EVENT_SET_TRIGGER (NVC36F_CTRL_CMD_EVENT_SET_TRIGGER) + + + + + +/* + * NVC46F_CTRL_CMD_GET_MMU_FAULT_INFO + * + * Please see description of NV906F_CTRL_CMD_GET_MMU_FAULT_INFO for more information. + * + */ +#define NVC46F_CTRL_CMD_GET_MMU_FAULT_INFO (NVC36F_CTRL_CMD_GET_MMU_FAULT_INFO) + +typedef NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS NVC46F_CTRL_GET_MMU_FAULT_INFO_PARAMS; + +/* + * NVC46F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN + * + * This command returns an opaque work submit token to the caller which + * can be used to write to doorbell register to finish submitting work. + * + * workSubmitToken The 32-bit work submit token + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NVC46F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN (NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN) + +typedef struct NVC46F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS { + NvU32 workSubmitToken; +} NVC46F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS; + +/* _ctrlc46f.h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc56f.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc56f.h new file mode 100644 index 000000000..fa9090481 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc56f.h @@ -0,0 +1,168 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc56f.finn +// + + + + +/* AMPERE_CHANNEL_GPFIFO_A control commands and parameters */ + +#include "ctrl/ctrlxxxx.h" +#include "ctrl/ctrl906f.h" /* C36F is partially derived from 906F */ +#include "ctrl/ctrla06f.h" /* C36F is partially derived from a06F */ +#include "ctrl/ctrlc36f.h" // This control call interface is an ALIAS of C36F + + + +#define NVC56F_CTRL_CMD(cat,idx) \ + NVXXXX_CTRL_CMD(0xC36F, NVC56F_CTRL_##cat, idx) + +/* AMPERE_CHANNEL_GPFIFO_A command categories (6bits) */ +#define NVC56F_CTRL_RESERVED (0x00) +#define NVC56F_CTRL_GPFIFO (0x01) +#define NVC56F_CTRL_EVENT (0x02) + +/* + * NVC56F_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned is: NV_OK +*/ +#define NVC56F_CTRL_CMD_NULL (NVC36F_CTRL_CMD_NULL) + + + + + + +/* + * NVC56F_CTRL_GET_CLASS_ENGINEID + * + * Please see description of NV906F_CTRL_GET_CLASS_ENGINEID for more information. + * + */ +#define NVC56F_CTRL_GET_CLASS_ENGINEID (NVC36F_CTRL_GET_CLASS_ENGINEID) + +typedef NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS NVC56F_CTRL_GET_CLASS_ENGINEID_PARAMS; + +/* + * NVC56F_CTRL_RESET_CHANNEL + * + * Please see description of NV906F_CTRL_CMD_RESET_CHANNEL for more information. + * + */ +#define NVC56F_CTRL_CMD_RESET_CHANNEL (NVC36F_CTRL_CMD_RESET_CHANNEL) + +typedef NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS NVC56F_CTRL_CMD_RESET_CHANNEL_PARAMS; + +/* + * NVC56F_CTRL_CMD_GPFIFO_SCHEDULE + * + * Please see description of NVA06F_CTRL_CMD_GPFIFO_SCHEDULE for more information. + * + */ +#define NVC56F_CTRL_CMD_GPFIFO_SCHEDULE (NVC36F_CTRL_CMD_GPFIFO_SCHEDULE) + +typedef NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS NVC56F_CTRL_GPFIFO_SCHEDULE_PARAMS; + +/* + * NVC56F_CTRL_CMD_BIND + * + * Please see description of NVA06F_CTRL_CMD_BIND for more information. + */ +#define NVC56F_CTRL_CMD_BIND (NVC36F_CTRL_CMD_BIND) + +typedef NVA06F_CTRL_BIND_PARAMS NVC56F_CTRL_BIND_PARAMS; + +/* + * NVC56F_CTRL_CMD_EVENT_SET_NOTIFICATION + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_NOTIFICATION for more information. +*/ + + +#define NVC56F_CTRL_CMD_EVENT_SET_NOTIFICATION (NVC36F_CTRL_CMD_EVENT_SET_NOTIFICATION) + +typedef NVA06F_CTRL_EVENT_SET_NOTIFICATION_PARAMS NVC56F_CTRL_EVENT_SET_NOTIFICATION_PARAMS; + +/* valid action values */ +#define NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE +#define NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE +#define NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT + +/* + * NVC56F_CTRL_CMD_EVENT_SET_TRIGGER + * + * Please see description of NVA06F_CTRL_CMD_EVENT_SET_TRIGGER for more information. + */ +#define NVC56F_CTRL_CMD_EVENT_SET_TRIGGER (NVC36F_CTRL_CMD_EVENT_SET_TRIGGER) + + + + + +/* + * NVC56F_CTRL_CMD_GET_MMU_FAULT_INFO + * + * Please see description of NV906F_CTRL_CMD_GET_MMU_FAULT_INFO for more information. + * + */ +#define NVC56F_CTRL_CMD_GET_MMU_FAULT_INFO (NVC36F_CTRL_CMD_GET_MMU_FAULT_INFO) + +typedef NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS NVC56F_CTRL_GET_MMU_FAULT_INFO_PARAMS; + +/* + * NVC56F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN + * + * This command returns an opaque work submit token to the caller which + * can be used to write to doorbell register to finish submitting work. + * + * workSubmitToken The 32-bit work submit token + * + * Possible status values returned are: + * NV_OK + * NV_ERR_INVALID_OBJECT_HANDLE + * NV_ERR_INVALID_ARGUMENT + * + */ + +#define NVC56F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN (NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN) + +typedef struct NVC56F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS { + NvU32 workSubmitToken; +} NVC56F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS; + + + +/* _ctrlc56f.h_ */ + diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc637.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc637.h new file mode 100644 index 000000000..d5cd13803 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc637.h @@ -0,0 +1,295 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc637.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* AMPERE_SMC_PARTITION_REF commands and parameters */ + +#define NVC637_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0xC637, NVC637_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NVC637_CTRL_RESERVED (0x00) +#define NVC637_CTRL_EXEC_PARTITIONS (0x01) + + +/*! + * NVC637_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC637_CTRL_CMD_NULL (0xc6370000) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/*! + * NVC637_CTRL_EXEC_PARTITIONS_INFO + * + * This structure specifies resources in an execution partition + * + * GpcCount[IN/OUT] + * - Number of GPCs in this partition + * + * VeidCount[OUT] + * - Number of VEIDs available in this partition. + * + * ceCount[IN/OUT] + * - Copy Engines in this partition + * + * nvEncCount[IN/OUT] + * - Encoder Engines in this partition + * + * nvDecCount[IN/OUT] + * - Decoder Engines in this partition + * + * nvJpgCount[IN/OUT] + * - Jpg Engines in this partition + * + * nvOfaCount[IN/OUT] + * - Ofa engines in this partition + * + * sharedEngFlags[IN/OUT] + * - Flags determining whether above engines are shared with other execution partitions + * + * veidStartOffset[OUT] + * - VEID start offset within GPU partition + */ +typedef struct NVC637_CTRL_EXEC_PARTITIONS_INFO { + NvU32 gpcCount; + NvU32 veidCount; + NvU32 ceCount; + NvU32 nvEncCount; + NvU32 nvDecCount; + NvU32 nvJpgCount; + NvU32 ofaCount; + NvU32 sharedEngFlag; + NvU32 veidStartOffset; +} NVC637_CTRL_EXEC_PARTITIONS_INFO; + +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG 31:0 +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NONE 0x0 +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_CE NVBIT(0) +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVDEC NVBIT(1) +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVENC NVBIT(2) +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_OFA NVBIT(3) +#define NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVJPG NVBIT(4) + +#define NVC637_CTRL_MAX_EXEC_PARTITIONS 8 +#define NVC637_CTRL_EXEC_PARTITIONS_ID_INVALID 0xFFFFFFFF + +/*! + * NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS + * + * This command will create requested execution partitions under the subscribed + * memory partition. The memory partition is expected to be configured before + * execution partition creation. + * + * bQuery[IN] + * - If NV_TRUE, execution partitions will not be created, but return + * status of NV_OK will indicate that the request is valid and can + * currently be fulfilled + * flag [IN] + * REQUEST_WITH_PART_ID + * - If set, RM will try to assign execution partition id requested by clients. + * This flag is only supported on vGPU enabled RM build and will be removed + * when vgpu plugin implements virtualized execution partition ID support. + * (bug 2938187) + * + * execPartCount[IN] + * - Number of execution partitions requested + * + * execPartInfo[IN] + * - Requested execution partition resources for each requested partition + * + * execPartId[OUT] + * - ID of each requested execution partition + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +#define NVC637_CTRL_DMA_EXEC_PARTITIONS_CREATE_REQUEST_WITH_PART_ID 0:0 +#define NVC637_CTRL_DMA_EXEC_PARTITIONS_CREATE_REQUEST_WITH_PART_ID_FALSE (0x00000000) +#define NVC637_CTRL_DMA_EXEC_PARTITIONS_CREATE_REQUEST_WITH_PART_ID_TRUE (0x00000001) + + + + + +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_CREATE (0xc6370101) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS_MESSAGE_ID" */ + +/*! + * NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS + * + * This command will delete requested execution partitions. + * + * execPartCount[IN] + * - Number of execution partitions to delete. + * + * execPartId[IN] + * - Execution partition IDs to delete + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_OBJECT_NOT_FOUND + */ +#define NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS { + NvBool bQuery; + NvU32 flags; + NvU32 execPartCount; + // C form: NVC637_CTRL_EXEC_PARTITIONS_INFO execPartInfo[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + NVC637_CTRL_EXEC_PARTITIONS_INFO execPartInfo[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + // C form: NvU32 execPartId[NVC637_CTRL_MAX_EXECUTION_PARTITIONS]; + NvU32 execPartId[NVC637_CTRL_MAX_EXEC_PARTITIONS]; +} NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS; +#define NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS_MESSAGE_ID (0x2U) + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS { + NvU32 execPartCount; + NvU32 execPartId[NVC637_CTRL_MAX_EXEC_PARTITIONS]; +} NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS; + +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_DELETE (0xc6370102) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS_MESSAGE_ID" */ + +/*! + * NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS + * + * This command will return information about execution partitions which + * currently exist within the subscribed memory partition. + * + * execPartCount[OUT] + * - Number of existing execution partitions + * + * execPartId[OUT] + * - ID of existing execution partitions + * + * execPartInfo[OUT] + * - Resources within each existing execution partition + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ +#define NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS_MESSAGE_ID (0x3U) + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS { + NvU32 execPartCount; + // C form: NvU32 execPartId[NVC637_CTRL_MAX_EXECUTION_PARTITIONS]; + NvU32 execPartId[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + // C form: NVC637_CTRL_EXEC_PARTITIONS_INFO execPartInfo[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + NVC637_CTRL_EXEC_PARTITIONS_INFO execPartInfo[NVC637_CTRL_MAX_EXEC_PARTITIONS]; +} NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS; + +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_GET (0xc6370103) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS_MESSAGE_ID" */ + +/*! + * NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS + * + * This command will return IDs of all active execution partitions in a memory + * partition + * + * execPartCount[OUT] + * - Number of existing execution partitions + * + * execPartId[OUT] + * - ID of existing execution partitions + * + * execPartUuid[OUT] + * - ASCII UUID string of existing execution partitions + * + * Possible status values returned are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + */ + +/* 'M' 'I' 'G' '-'(x5), '\0x0', extra = 9 */ +#define NVC637_UUID_LEN 16 +#define NVC637_UUID_STR_LEN (0x29) /* finn: Evaluated from "((NVC637_UUID_LEN << 1) + 9)" */ + +typedef struct NVC637_EXEC_PARTITION_UUID { + // C form: char str[NVC638_UUID_STR_LEN]; + char str[NVC637_UUID_STR_LEN]; +} NVC637_EXEC_PARTITION_UUID; + +#define NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS_MESSAGE_ID (0x4U) + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS { + NvU32 execPartCount; + + // C form: NvU32 execPartId[NVC637_CTRL_MAX_EXECUTION_PARTITIONS]; + NvU32 execPartId[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + + // C form: NVC637_EXEC_PARTITION_UUID execPartUuid[NVC637_CTRL_MAX_EXEC_PARTITIONS]; + NVC637_EXEC_PARTITION_UUID execPartUuid[NVC637_CTRL_MAX_EXEC_PARTITIONS]; +} NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS; + +#define NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS (0xc6370104) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS_MESSAGE_ID" */ + +/* + * NVC637_CTRL_CMD_EXEC_PARTITIONS_EXPORT + * + * Export the resource and placement information about an exec partition such + * that a similar partition can be recreated from scratch in the same position. + */ +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_EXPORT (0xc6370105) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | 0x5" */ + +/* + * NVC637_CTRL_CMD_EXEC_PARTITIONS_IMPORT + * + * Create an exec partition resembling the exported partition info. The imported + * partition should behave identically with respect to fragmentation. + */ +#define NVC637_CTRL_CMD_EXEC_PARTITIONS_IMPORT (0xc6370106) /* finn: Evaluated from "(FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID << 8) | 0x6" */ + +#define NVC637_CTRL_EXEC_PARTITIONS_EXPORT_MAX_ENGINES_MASK_SIZE 4 +typedef struct NVC637_CTRL_EXEC_PARTITIONS_EXPORTED_INFO { + NV_DECLARE_ALIGNED(NvU64 enginesMask[NVC637_CTRL_EXEC_PARTITIONS_EXPORT_MAX_ENGINES_MASK_SIZE], 8); + NvU8 uuid[NVC637_UUID_LEN]; + NvU32 sharedEngFlags; + NvU32 gpcMask; + NvU32 veidOffset; + NvU32 veidCount; +} NVC637_CTRL_EXEC_PARTITIONS_EXPORTED_INFO; + +typedef struct NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS { + NvU32 id; + NV_DECLARE_ALIGNED(NVC637_CTRL_EXEC_PARTITIONS_EXPORTED_INFO info, 8); +} NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS; + +// _ctrlc637_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlc638.h b/src/common/sdk/nvidia/inc/ctrl/ctrlc638.h new file mode 100644 index 000000000..47429b0a7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlc638.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlc638.finn +// + + + + +#include "ctrl/ctrlxxxx.h" +/* AMPERE_SMC_EXEC_PARTITION_REF commands and parameters */ + +#define NVC638_CTRL_CMD(cat,idx) NVXXXX_CTRL_CMD(0xC638, NVC638_CTRL_##cat, idx) + +/* Command categories (6bits) */ +#define NVC638_CTRL_RESERVED (0x00) +#define NVC638_CTRL_EXEC_PARTITION (0x01) + +/*! + * NVC638_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * + * Possible status values returned are: + * NV_OK + */ +#define NVC638_CTRL_CMD_NULL (0xc6380000) /* finn: Evaluated from "(FINN_AMPERE_SMC_EXEC_PARTITION_REF_RESERVED_INTERFACE_ID << 8) | 0x0" */ + +/*! + * NVC638_CTRL_CMD_GET_UUID + * + * This command returns SHA1 ASCII UUID string as well as the binary UUID for + * the execution partition. The ASCII string format is, + * "MIG-%16x-%08x-%08x-%08x-%024x" (the canonical format of a UUID) + * + * uuid[OUT] + * - Raw UUID bytes + * + * uuidStr[OUT] + * - ASCII UUID string + * + * Possible status return values are: + * NV_OK + * NV_ERR_NOT_SUPPORTED + * NV_ERR_INVALID_STATE + */ + +#define NVC638_UUID_LEN 16 + +/* 'M' 'I' 'G' '-'(x5), '\0x0', extra = 9 */ +#define NVC638_UUID_STR_LEN (0x29) /* finn: Evaluated from "((NVC638_UUID_LEN << 1) + 9)" */ + + + + + +#define NVC638_CTRL_CMD_GET_UUID (0xc6380101) /* finn: Evaluated from "(FINN_AMPERE_SMC_EXEC_PARTITION_REF_EXEC_PARTITION_INTERFACE_ID << 8) | NVC638_CTRL_GET_UUID_PARAMS_MESSAGE_ID" */ + + + +#define NVC638_CTRL_GET_UUID_PARAMS_MESSAGE_ID (0x1U) + +typedef struct NVC638_CTRL_GET_UUID_PARAMS { + // C form: NvU8 uuid[NVC638_UUID_LEN]; + NvU8 uuid[NVC638_UUID_LEN]; + + // C form: char uuidStr[NVC638_UUID_STR_LEN]; + char uuidStr[NVC638_UUID_STR_LEN]; +} NVC638_CTRL_GET_UUID_PARAMS;// _ctrlc638_h_ diff --git a/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h b/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h new file mode 100644 index 000000000..7a5eaad23 --- /dev/null +++ b/src/common/sdk/nvidia/inc/ctrl/ctrlxxxx.h @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2008,2013,2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: ctrl/ctrlxxxx.finn +// + + + +#include "nvtypes.h" + +/* definitions shared by all CTRL interfaces */ + +/* Basic command format: +* cmd_class [31:16], +* cmd_reserved [15:15], +* cmd_reserved [14:14], +* cmd_category [13:8], +* cmd_index [7:0] +*/ + +#define NVXXXX_CTRL_CMD_CLASS 31:16 + +#define NVXXXX_CTRL_CMD_CATEGORY 13:8 +#define NVXXXX_CTRL_CMD_INDEX 7:0 + +/* don't use DRF_NUM - not always available */ +# define NVXXXX_CTRL_CMD(cls,cat,idx) \ + (((cls) << 16) | ((0) << 15) | ((0) << 14) \ + | ((cat) << 8) | ((idx) & 0xFF)) +/* + * NVXXXX_CTRL_CMD_NULL + * + * This command does nothing. + * This command does not take any parameters. + * This command is valid for all classes. + * + * Possible status values returned are: + * NV_OK + */ +#define NVXXXX_CTRL_CMD_NULL (0x00000000) + +#define NVxxxx_CTRL_LEGACY_PRIVILEGED (0xC0) +#define NVxxxx_CTRL_LEGACY_NON_PRIVILEGED (0x80) diff --git a/src/common/sdk/nvidia/inc/dpringbuffertypes.h b/src/common/sdk/nvidia/inc/dpringbuffertypes.h new file mode 100644 index 000000000..b61abaff7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/dpringbuffertypes.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef INCLUDED_DPRINGBUFFERTYPES_H +#define INCLUDED_DPRINGBUFFERTYPES_H + +#define MAX_MESSAGE_LEN 100 +#define MAX_RECORD_COUNT 15 + +typedef enum _DP_RECORD_TYPE +{ + ASSERT_HIT = 135, + LOG_CALL = 136, +} DP_RECORD_TYPE; + +typedef struct _DpAssertHitRecord +{ + NvU64 breakpointAddr; +} DpAssertHitRecord, *PDpAssertHitRecord; + +typedef struct _DpLogCallRecord +{ + char msg[MAX_MESSAGE_LEN]; + NvU64 addr; +} DpLogCallRecord, *PDpLogCallRecord; + +typedef union _DpLogRecord +{ + DpAssertHitRecord dpAssertHitRecord; + DpLogCallRecord dpLogCallRecord; +} DpLogRecord, *PDpLogRecord; + +typedef enum _DpLogQueryType +{ + DpLogQueryTypeAssert = 1, + DpLogQueryTypeCallLog = 2, +} DpLogQueryType, *PDpLogQueryType; + +#endif //INCLUDED_DPRINGBUFFERTYPES_H diff --git a/src/common/sdk/nvidia/inc/finn_rm_api.h b/src/common/sdk/nvidia/inc/finn_rm_api.h new file mode 100644 index 000000000..e3aeb0f45 --- /dev/null +++ b/src/common/sdk/nvidia/inc/finn_rm_api.h @@ -0,0 +1,1028 @@ +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// + + +#include + + +#include + +#define FINN_INTERFACE_ID(T) (T ## _INTERFACE_ID) +#define FINN_MESSAGE_ID(T) (T ## _MESSAGE_ID) + +#if (defined(__cplusplus) && __cplusplus >= 201103L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) +#define FINN_OFFSETOF(T,f) (offsetof(T, f)) +#else +#define FINN_OFFSETOF(T,f) ((NvU64)&((T*)0)->f) +#endif + +#if !defined(_MSC_VER) && (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) && !defined(__arm) +#define FINN_PACK_COMPOSITE(b) b __attribute__ ((packed)) +#else +#define FINN_PACK_COMPOSITE(b) b +#endif + +/* + * FINN serialization version. A version mismatch indicates incompatibility + * between the serializer and the deserializer. + * + * WARNING: Current serialization version is 0. This is a pre-release version of + * serialization and is only intended to be used in a driver and client compiled + * together. DO NOT use this in firmware or versioned clients. + */ +#define FINN_SERIALIZATION_VERSION 0 + +/* + * FINN compiler version + */ +#define FINN_VERSION_MAJOR 1 +#define FINN_VERSION_MINOR 11 +#define FINN_VERSION_PATCH 0 + +typedef struct FINN_RM_API +{ + NV_DECLARE_ALIGNED(NvU64 version, 8); + NV_DECLARE_ALIGNED(NvU64 payloadSize, 8); + NV_DECLARE_ALIGNED(NvU64 interface, 8); + NV_DECLARE_ALIGNED(NvU64 message, 8); + NV_DECLARE_ALIGNED(NvU64 fieldMask[0], 8); + } FINN_RM_API; + +/*! + * @brief Serializes an RM API control params struct and copies it into the + * serialization buffer as a FINN message. + * + * @note FinnRmApiSerializeDown is for serializing down the call stack. + * + * FinnRmApiSerializeUp is for serializing up the call stack. It + * frees memory allocated by FinnRmApiDeserializeDown. Use only + * when handling RM API control requests. + * + * @warning One of these may be unimplemented depending on platform. If both + * are implemented, misuse causes memory corruption and memory leaks. + * + * @param[in] interface FINN interface ID of the param struct. + * @param[in] message FINN message ID of the param struct. + * @param[in] src Pointer to the source param struct from which to + * copy the data. + * @param[in, out] dst Double pointer to the destination buffer into + * which to copy the data. *dst will be set to + * *dst + serialized size on success and to the + * location of failed serialization on failure. + * @param[in] dst_size Maximum size of the destination buffer measured in + * `NvU8` units. + * + * @retval NV_OK Serialization successful. + * @retval NV_ERR_INVALID_ARGUMENT Bad function arguments, invalid union + * selector, or invalid enum value. + * @retval NV_ERR_NOT_SUPPORTED Unserializable or nonexistent ID. + * @retval NV_ERR_NOT_COMPATIBLE Container count too large. + * @retval NV_ERR_OUT_OF_RANGE Ranged field exceeded bounds. + * @retval NV_ERR_BUFFER_TOO_SMALL Destination buffer size too small. + */ +/**@{*/ +NV_STATUS FinnRmApiSerializeUp(NvU64 interface, NvU64 message, const void *src, NvU8 **dst, NvLength dst_size); +NV_STATUS FinnRmApiSerializeDown(NvU64 interface, NvU64 message, const void *src, NvU8 **dst, NvLength dst_size); +/**@}*/ + +/*! + * @brief The following APIs deserialize a FINN message from the serialization + * buffer and copy it into an RM API control params struct. + * + * @note FinnRmApiDeserializeDown is for deserializing down the call stack. It + * allocates deep buffers for primitive pointers in the serialization + * buffer, assuming that it remains in memory. Use only when handling + * RM API control requests. + * + * FinnRmApiDeserializeUp is for deserializing up the call stack. It + * copies deep buffers of primitive pointers into the params struct, + * assuming that memory is already allocated for them. Use only when + * receiving RM API control results. + * + * @warning One of these may be unimplemented depending on platform. If both + * are implemented, misuse causes memory corruption and memory leaks. + * + * @param[in, out] src Double pointer to the source buffer from which to + * copy the data. *src is set to *src + serialized + * size on success and to the location of failed + * deserialization on failure. + * @param[in] src_size Maximum size of the source buffer measured in + * `NvU8` units. + * @param[in, out] dst Pointer to the destination param struct into which + * to copy the data. + * @param[in] dst_size Size of the destination param struct measured in + * `char` units per `sizeof` operator. + * + * @retval NV_OK Deserialization successful. + * @retval NV_ERR_INVALID_ARGUMENT Bad function arguments, invalid union + * selector, invalid enum value, or + * mismatch between expected and actual + * serialized size. + * @retval NV_ERR_NOT_SUPPORTED Unserializable or nonexistent ID. + * @retval NV_ERR_OUT_OF_RANGE Ranged field exceeded bounds. + * @retval NV_ERR_BUFFER_TOO_SMALL Source/destination buffer too small. + * @retval NV_ERR_LIB_RM_VERSION_MISMATCH Version mismatch. + */ +/**@{*/ +NV_STATUS FinnRmApiDeserializeDown(NvU8 **src, NvLength src_size, void *dst, NvLength dst_size); +NV_STATUS FinnRmApiDeserializeUp(NvU8 * const *src, NvLength src_size, void *dst, NvLength dst_size); +/**@}*/ + +/*! + * @brief Calculates the serialized size of an RM API param struct. + * + * @param[in] interface FINN interface ID of the param struct. + * @param[in] message FINN message ID of the param struct. + * @param[in] src Pointer to the param struct. + * + * @retval Non-zero serialized size of param struct on success. + * @retval 0 if the API is unsupported by serialization or src is NULL. + */ +NvU64 FinnRmApiGetSerializedSize(NvU64 interface, NvU64 message, const NvP64 src); + +/*! + * @brief Fetches the unserialized size of an API param struct. + * + * @note The size is measured in `char` units like the `sizeof` operator. + * + * @param[in] interface FINN interface ID of the param struct. + * @param[in] message FINN message ID of the param struct. + * + * @retval Non-zero sizeof param struct on success. + * @retval 0 if the API is unsupported by serialization. + */ +NvU64 FinnRmApiGetUnserializedSize(NvU64 interface, NvU64 message); + + + +#define FINN_NV01_ROOT_RESERVED_INTERFACE_ID (0x0U) +typedef FINN_RM_API FINN_NV01_ROOT_RESERVED; + + +#define FINN_NV01_ROOT_CLIENT_INTERFACE_ID (0xdU) +typedef FINN_RM_API FINN_NV01_ROOT_CLIENT; + + +#define FINN_NV01_ROOT_DIAG_INTERFACE_ID (0x4U) +typedef FINN_RM_API FINN_NV01_ROOT_DIAG; + + +#define FINN_NV01_ROOT_EVENT_INTERFACE_ID (0x5U) +typedef FINN_RM_API FINN_NV01_ROOT_EVENT; + + +#define FINN_NV01_ROOT_GPU_INTERFACE_ID (0x2U) +typedef FINN_RM_API FINN_NV01_ROOT_GPU; + + +#define FINN_NV01_ROOT_GPUACCT_INTERFACE_ID (0xbU) +typedef FINN_RM_API FINN_NV01_ROOT_GPUACCT; + + + +#define FINN_NV01_ROOT_GSYNC_INTERFACE_ID (0x3U) +typedef FINN_RM_API FINN_NV01_ROOT_GSYNC; + + +#define FINN_NV01_ROOT_NVD_INTERFACE_ID (0x6U) +typedef FINN_RM_API FINN_NV01_ROOT_NVD; + + +#define FINN_NV01_ROOT_PROC_INTERFACE_ID (0x9U) +typedef FINN_RM_API FINN_NV01_ROOT_PROC; + + + +#define FINN_NV01_ROOT_SYNC_GPU_BOOST_INTERFACE_ID (0xaU) +typedef FINN_RM_API FINN_NV01_ROOT_SYNC_GPU_BOOST; + + +#define FINN_NV01_ROOT_SYSTEM_INTERFACE_ID (0x1U) +typedef FINN_RM_API FINN_NV01_ROOT_SYSTEM; + + +#define FINN_NV01_ROOT_OS_UNIX_INTERFACE_ID (0x3dU) +typedef FINN_RM_API FINN_NV01_ROOT_OS_UNIX; + + + +#define FINN_NV01_ROOT_OS_WINDOWS_INTERFACE_ID (0x3fU) +typedef FINN_RM_API FINN_NV01_ROOT_OS_WINDOWS; + + +#define FINN_NV01_CONTEXT_DMA_RESERVED_INTERFACE_ID (0x200U) +typedef FINN_RM_API FINN_NV01_CONTEXT_DMA_RESERVED; +#define FINN_NV01_CONTEXT_DMA_DMA_INTERFACE_ID (0x201U) +typedef FINN_RM_API FINN_NV01_CONTEXT_DMA_DMA; + + +#define FINN_NV01_TIMER_RESERVED_INTERFACE_ID (0x400U) +typedef FINN_RM_API FINN_NV01_TIMER_RESERVED; +#define FINN_NV01_TIMER_TMR_INTERFACE_ID (0x401U) +typedef FINN_RM_API FINN_NV01_TIMER_TMR; + + +#define FINN_FABRIC_MANAGER_SESSION_RESERVED_INTERFACE_ID (0xf00U) +typedef FINN_RM_API FINN_FABRIC_MANAGER_SESSION_RESERVED; +#define FINN_FABRIC_MANAGER_SESSION_FM_INTERFACE_ID (0xf01U) +typedef FINN_RM_API FINN_FABRIC_MANAGER_SESSION_FM; + + +#define FINN_FABRIC_MANAGER_SESSION_IMEX_INTERFACE_ID (0xf02U) +typedef FINN_RM_API FINN_FABRIC_MANAGER_SESSION_IMEX; + + +#define FINN_NV0020_GPU_MANAGEMENT_RESERVED_INTERFACE_ID (0x2000U) +typedef FINN_RM_API FINN_NV0020_GPU_MANAGEMENT_RESERVED; +#define FINN_NV0020_GPU_MANAGEMENT_GPU_MGMT_INTERFACE_ID (0x2001U) +typedef FINN_RM_API FINN_NV0020_GPU_MANAGEMENT_GPU_MGMT; + + +#define FINN_NV01_MEMORY_SYSTEM_RESERVED_INTERFACE_ID (0x3e00U) +typedef FINN_RM_API FINN_NV01_MEMORY_SYSTEM_RESERVED; +#define FINN_NV01_MEMORY_SYSTEM_MEMORY_INTERFACE_ID (0x3e01U) +typedef FINN_RM_API FINN_NV01_MEMORY_SYSTEM_MEMORY; + + +#define FINN_NV01_ROOT_USER_RESERVED_INTERFACE_ID (0x4100U) +typedef FINN_RM_API FINN_NV01_ROOT_USER_RESERVED; +#define FINN_NV01_ROOT_USER_MEMORY_INTERFACE_ID (0x4101U) +typedef FINN_RM_API FINN_NV01_ROOT_USER_MEMORY; + + +#define FINN_NV04_DISPLAY_COMMON_RESERVED_INTERFACE_ID (0x7300U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_RESERVED; + + +#define FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID (0x7311U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_DFP; + + +#define FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID (0x7313U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_DP; + + + +#define FINN_NV04_DISPLAY_COMMON_INTERNAL_INTERFACE_ID (0x7304U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_INTERNAL; + + + +#define FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID (0x7302U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_SPECIFIC; + + +#define FINN_NV04_DISPLAY_COMMON_STEREO_INTERFACE_ID (0x7317U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_STEREO; + + + +#define FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID (0x7301U) +typedef FINN_RM_API FINN_NV04_DISPLAY_COMMON_SYSTEM; + + +#define FINN_NV01_DEVICE_0_RESERVED_INTERFACE_ID (0x8000U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_RESERVED; + + +#define FINN_NV01_DEVICE_0_BIF_INTERFACE_ID (0x8001U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_BIF; + + +#define FINN_NV01_DEVICE_0_BSP_INTERFACE_ID (0x801cU) +typedef FINN_RM_API FINN_NV01_DEVICE_0_BSP; + + + +#define FINN_NV01_DEVICE_0_DMA_INTERFACE_ID (0x8018U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_DMA; + + +#define FINN_NV01_DEVICE_0_FB_INTERFACE_ID (0x8013U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_FB; + + +#define FINN_NV01_DEVICE_0_FIFO_INTERFACE_ID (0x8017U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_FIFO; + + +#define FINN_NV01_DEVICE_0_GPU_INTERFACE_ID (0x8002U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_GPU; + + +#define FINN_NV01_DEVICE_0_GR_INTERFACE_ID (0x8011U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_GR; + + +#define FINN_NV01_DEVICE_0_HOST_INTERFACE_ID (0x8014U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_HOST; + + +#define FINN_NV01_DEVICE_0_INTERNAL_INTERFACE_ID (0x8020U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_INTERNAL; + + +#define FINN_NV01_DEVICE_0_MSENC_INTERFACE_ID (0x801bU) +typedef FINN_RM_API FINN_NV01_DEVICE_0_MSENC; + + +#define FINN_NV01_DEVICE_0_NVJPG_INTERFACE_ID (0x801fU) +typedef FINN_RM_API FINN_NV01_DEVICE_0_NVJPG; + + +#define FINN_NV01_DEVICE_0_PERF_INTERFACE_ID (0x8019U) +typedef FINN_RM_API FINN_NV01_DEVICE_0_PERF; + + + +#define FINN_NV01_DEVICE_0_RC_INTERFACE_ID (0x801dU) +typedef FINN_RM_API FINN_NV01_DEVICE_0_RC; + + +#define FINN_NV01_DEVICE_0_OS_UNIX_INTERFACE_ID (0x801eU) +typedef FINN_RM_API FINN_NV01_DEVICE_0_OS_UNIX; + + + +#define FINN_NV0090_KERNEL_GRAPHICS_CONTEXT_INTERFACE_ID (0x9001U) +typedef FINN_RM_API FINN_NV0090_KERNEL_GRAPHICS_CONTEXT; + + +#define FINN_NV01_MEMORY_FABRIC_EXPORT_RESERVED_INTERFACE_ID (0xf400U) +typedef FINN_RM_API FINN_NV01_MEMORY_FABRIC_EXPORT_RESERVED; +#define FINN_NV01_MEMORY_FABRIC_EXPORT_EXPORT_INTERFACE_ID (0xf401U) +typedef FINN_RM_API FINN_NV01_MEMORY_FABRIC_EXPORT_EXPORT; + + +#define FINN_NV01_MEMORY_FABRIC_IMPORT_RESERVED_INTERFACE_ID (0xf500U) +typedef FINN_RM_API FINN_NV01_MEMORY_FABRIC_IMPORT_RESERVED; +#define FINN_NV01_MEMORY_FABRIC_IMPORT_IMPORT_INTERFACE_ID (0xf501U) +typedef FINN_RM_API FINN_NV01_MEMORY_FABRIC_IMPORT_IMPORT; + + +#define FINN_NV_MEMORY_FABRIC_RESERVED_INTERFACE_ID (0xf800U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_RESERVED; +#define FINN_NV_MEMORY_FABRIC_FABRIC_INTERFACE_ID (0xf801U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_FABRIC; + + +#define FINN_NV_MEMORY_FABRIC_IMPORT_V2_RESERVED_INTERFACE_ID (0xf900U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_IMPORT_V2_RESERVED; +#define FINN_NV_MEMORY_FABRIC_IMPORT_V2_IMPORT_INTERFACE_ID (0xf901U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_IMPORT_V2_IMPORT; + + +#define FINN_NV_MEMORY_FABRIC_EXPORTED_REF_RESERVED_INTERFACE_ID (0xfa00U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_EXPORTED_REF_RESERVED; +#define FINN_NV_MEMORY_FABRIC_EXPORTED_REF_EXPORT_REF_INTERFACE_ID (0xfa01U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_EXPORTED_REF_EXPORT_REF; + + +#define FINN_NV_MEMORY_FABRIC_IMPORTED_REF_RESERVED_INTERFACE_ID (0xfb00U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_IMPORTED_REF_RESERVED; +#define FINN_NV_MEMORY_FABRIC_IMPORTED_REF_IMPORT_REF_INTERFACE_ID (0xfb01U) +typedef FINN_RM_API FINN_NV_MEMORY_FABRIC_IMPORTED_REF_IMPORT_REF; + + + +#define FINN_NV20_SUBDEVICE_0_RESERVED_INTERFACE_ID (0x208000U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_RESERVED; + + +#define FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID (0x208008U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_BIOS; + + +#define FINN_NV20_SUBDEVICE_0_BUS_INTERFACE_ID (0x208018U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_BUS; + + +#define FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID (0x20802aU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_CE; + + + + + +#define FINN_NV20_SUBDEVICE_0_CLK_INTERFACE_ID (0x208010U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_CLK; + + + +#define FINN_NV20_SUBDEVICE_0_DMA_INTERFACE_ID (0x208025U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_DMA; + + +#define FINN_NV20_SUBDEVICE_0_DMABUF_INTERFACE_ID (0x20803AU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_DMABUF; + + +#define FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID (0x208034U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_ECC; + + + +#define FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID (0x208003U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_EVENT; + + + +#define FINN_NV20_SUBDEVICE_0_FB_INTERFACE_ID (0x208013U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_FB; + + +#define FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID (0x208011U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_FIFO; + + +#define FINN_NV20_SUBDEVICE_0_FLA_INTERFACE_ID (0x208035U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_FLA; + + +#define FINN_NV20_SUBDEVICE_0_FLCN_INTERFACE_ID (0x208031U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_FLCN; + + +#define FINN_NV20_SUBDEVICE_0_FUSE_INTERFACE_ID (0x208002U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_FUSE; + + + +#define FINN_NV20_SUBDEVICE_0_GPIO_INTERFACE_ID (0x208023U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_GPIO; + + + +#define FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID (0x208001U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_GPU; + + + +#define FINN_NV20_SUBDEVICE_0_GR_INTERFACE_ID (0x208012U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_GR; + + +#define FINN_NV20_SUBDEVICE_0_GRMGR_INTERFACE_ID (0x208038U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_GRMGR; + + +#define FINN_NV20_SUBDEVICE_0_GSP_INTERFACE_ID (0x208036U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_GSP; + + +#define FINN_NV20_SUBDEVICE_0_HSHUB_INTERFACE_ID (0x208041U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_HSHUB; + + +#define FINN_NV20_SUBDEVICE_0_I2C_INTERFACE_ID (0x208006U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_I2C; + + + +#define FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID (0x20800aU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_INTERNAL; + + +#define FINN_NV20_SUBDEVICE_0_LPWR_INTERFACE_ID (0x208028U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_LPWR; + + + +#define FINN_NV20_SUBDEVICE_0_MC_INTERFACE_ID (0x208017U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_MC; + + + +#define FINN_NV20_SUBDEVICE_0_NVD_INTERFACE_ID (0x208024U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_NVD; + + + +#define FINN_NV20_SUBDEVICE_0_NVLINK_INTERFACE_ID (0x208030U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_NVLINK; + + + + + + +#define FINN_NV20_SUBDEVICE_0_PERF_INTERFACE_ID (0x208020U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_PERF; + + + +#define FINN_NV20_SUBDEVICE_0_RC_INTERFACE_ID (0x208022U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_RC; + + + +#define FINN_NV20_SUBDEVICE_0_TIMER_INTERFACE_ID (0x208004U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_TIMER; + + + +#define FINN_NV20_SUBDEVICE_0_OS_UNIX_INTERFACE_ID (0x20803dU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_OS_UNIX; + + +#define FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL_INTERFACE_ID (0x208040U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_VGPU_MGR_INTERNAL; + + +#define FINN_NV20_SUBDEVICE_0_VOLT_INTERFACE_ID (0x208032U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_0_VOLT; + + + +#define FINN_NV2081_BINAPI_RESERVED_INTERFACE_ID (0x208100U) +typedef FINN_RM_API FINN_NV2081_BINAPI_RESERVED; +#define FINN_NV2081_BINAPI_INTERFACE_ID (0x208101U) +typedef FINN_RM_API FINN_NV2081_BINAPI; + + +#define FINN_NV2082_BINAPI_RESERVED_INTERFACE_ID (0x208200U) +typedef FINN_RM_API FINN_NV2082_BINAPI_RESERVED; +#define FINN_NV2082_BINAPI_INTERFACE_ID (0x208201U) +typedef FINN_RM_API FINN_NV2082_BINAPI; + + +#define FINN_NV20_SUBDEVICE_DIAG_RESERVED_INTERFACE_ID (0x208f00U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_RESERVED; + + +#define FINN_NV20_SUBDEVICE_DIAG_BIF_INTERFACE_ID (0x208f07U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_BIF; + + +#define FINN_NV20_SUBDEVICE_DIAG_BUS_INTERFACE_ID (0x208f18U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_BUS; + + + +#define FINN_NV20_SUBDEVICE_DIAG_DMA_INTERFACE_ID (0x208f14U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_DMA; + + +#define FINN_NV20_SUBDEVICE_DIAG_EVENT_INTERFACE_ID (0x208f10U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_EVENT; + + +#define FINN_NV20_SUBDEVICE_DIAG_FB_INTERFACE_ID (0x208f05U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_FB; + + +#define FINN_NV20_SUBDEVICE_DIAG_FBIO_INTERFACE_ID (0x208f0aU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_FBIO; + + +#define FINN_NV20_SUBDEVICE_DIAG_FIFO_INTERFACE_ID (0x208f04U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_FIFO; + + +#define FINN_NV20_SUBDEVICE_DIAG_GPU_INTERFACE_ID (0x208f11U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_GPU; + + +#define FINN_NV20_SUBDEVICE_DIAG_GR_INTERFACE_ID (0x208f12U) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_GR; + + + +#define FINN_NV20_SUBDEVICE_DIAG_MMU_INTERFACE_ID (0x208f0bU) +typedef FINN_RM_API FINN_NV20_SUBDEVICE_DIAG_MMU; + + + +#define FINN_NV30_GSYNC_RESERVED_INTERFACE_ID (0x30f100U) +typedef FINN_RM_API FINN_NV30_GSYNC_RESERVED; +#define FINN_NV30_GSYNC_GSYNC_INTERFACE_ID (0x30f101U) +typedef FINN_RM_API FINN_NV30_GSYNC_GSYNC; + + +#define FINN_NV40_I2C_RESERVED_INTERFACE_ID (0x402c00U) +typedef FINN_RM_API FINN_NV40_I2C_RESERVED; +#define FINN_NV40_I2C_I2C_INTERFACE_ID (0x402c01U) +typedef FINN_RM_API FINN_NV40_I2C_I2C; + + +#define FINN_NV50_THIRD_PARTY_P2P_P2P_INTERFACE_ID (0x503c01U) +typedef FINN_RM_API FINN_NV50_THIRD_PARTY_P2P_P2P; + + +#define FINN_NV50_THIRD_PARTY_P2P_RESERVED_INTERFACE_ID (0x503c00U) +typedef FINN_RM_API FINN_NV50_THIRD_PARTY_P2P_RESERVED; + + +#define FINN_NV50_CHANNEL_GPFIFO_RESERVED_INTERFACE_ID (0x506f00U) +typedef FINN_RM_API FINN_NV50_CHANNEL_GPFIFO_RESERVED; +#define FINN_NV50_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID (0x506f01U) +typedef FINN_RM_API FINN_NV50_CHANNEL_GPFIFO_GPFIFO; + + +#define FINN_NV50_DISPLAY_RESERVED_INTERFACE_ID (0x507000U) +typedef FINN_RM_API FINN_NV50_DISPLAY_RESERVED; + + +#define FINN_NV50_DISPLAY_CHNCTL_INTERFACE_ID (0x507001U) +typedef FINN_RM_API FINN_NV50_DISPLAY_CHNCTL; + + +#define FINN_NV50_DISPLAY_EVENT_INTERFACE_ID (0x507009U) +typedef FINN_RM_API FINN_NV50_DISPLAY_EVENT; + + +#define FINN_NV50_DISPLAY_OR_INTERFACE_ID (0x507004U) +typedef FINN_RM_API FINN_NV50_DISPLAY_OR; + + +#define FINN_NV50_DISPLAY_RG_INTERFACE_ID (0x507002U) +typedef FINN_RM_API FINN_NV50_DISPLAY_RG; + + +#define FINN_NV50_DISPLAY_SEQ_INTERFACE_ID (0x507003U) +typedef FINN_RM_API FINN_NV50_DISPLAY_SEQ; + + +#define FINN_NV50_DISPLAY_SYSTEM_INTERFACE_ID (0x507007U) +typedef FINN_RM_API FINN_NV50_DISPLAY_SYSTEM; + + + +#define FINN_NV50_DEFERRED_API_CLASS_RESERVED_INTERFACE_ID (0x508000U) +typedef FINN_RM_API FINN_NV50_DEFERRED_API_CLASS_RESERVED; +#define FINN_NV50_DEFERRED_API_CLASS_DEFERRED_INTERFACE_ID (0x508001U) +typedef FINN_RM_API FINN_NV50_DEFERRED_API_CLASS_DEFERRED; + + +#define FINN_GT200_DEBUGGER_RESERVED_INTERFACE_ID (0x83de00U) +typedef FINN_RM_API FINN_GT200_DEBUGGER_RESERVED; + + +#define FINN_GT200_DEBUGGER_DEBUG_INTERFACE_ID (0x83de03U) +typedef FINN_RM_API FINN_GT200_DEBUGGER_DEBUG; + + +#define FINN_GT200_DEBUGGER_FIFO_INTERFACE_ID (0x83de02U) +typedef FINN_RM_API FINN_GT200_DEBUGGER_FIFO; + + +#define FINN_GT200_DEBUGGER_INTERNAL_INTERFACE_ID (0x83de04U) +typedef FINN_RM_API FINN_GT200_DEBUGGER_INTERNAL; + + + +#define FINN_NV9010_VBLANK_CALLBACK_RESERVED_INTERFACE_ID (0x901000U) +typedef FINN_RM_API FINN_NV9010_VBLANK_CALLBACK_RESERVED; +#define FINN_NV9010_VBLANK_CALLBACK_INTERFACE_ID (0x901001U) +typedef FINN_RM_API FINN_NV9010_VBLANK_CALLBACK; + + +#define FINN_FERMI_CONTEXT_SHARE_A_RESERVED_INTERFACE_ID (0x906700U) +typedef FINN_RM_API FINN_FERMI_CONTEXT_SHARE_A_RESERVED; +#define FINN_FERMI_CONTEXT_SHARE_A_TPC_PARTITION_INTERFACE_ID (0x906701U) +typedef FINN_RM_API FINN_FERMI_CONTEXT_SHARE_A_TPC_PARTITION; +#define FINN_FERMI_CONTEXT_SHARE_A_CWD_WATERMARK_INTERFACE_ID (0x906702U) +typedef FINN_RM_API FINN_FERMI_CONTEXT_SHARE_A_CWD_WATERMARK; + + +#define FINN_GF100_CHANNEL_GPFIFO_RESERVED_INTERFACE_ID (0x906f00U) +typedef FINN_RM_API FINN_GF100_CHANNEL_GPFIFO_RESERVED; +#define FINN_GF100_CHANNEL_GPFIFO_GPFIFO_INTERFACE_ID (0x906f01U) +typedef FINN_RM_API FINN_GF100_CHANNEL_GPFIFO_GPFIFO; +#define FINN_GF100_CHANNEL_GPFIFO_EVENT_INTERFACE_ID (0x906f02U) +typedef FINN_RM_API FINN_GF100_CHANNEL_GPFIFO_EVENT; + + +#define FINN_GF100_DISP_SW_RESERVED_INTERFACE_ID (0x907200U) +typedef FINN_RM_API FINN_GF100_DISP_SW_RESERVED; +#define FINN_GF100_DISP_SW_DISP_SW_INTERFACE_ID (0x907201U) +typedef FINN_RM_API FINN_GF100_DISP_SW_DISP_SW; + + +#define FINN_GF100_TIMED_SEMAPHORE_SW_RESERVED_INTERFACE_ID (0x907400U) +typedef FINN_RM_API FINN_GF100_TIMED_SEMAPHORE_SW_RESERVED; +#define FINN_GF100_TIMED_SEMAPHORE_SW_SEM_INTERFACE_ID (0x907401U) +typedef FINN_RM_API FINN_GF100_TIMED_SEMAPHORE_SW_SEM; + + +#define FINN_GF100_REMAPPER_RESERVED_INTERFACE_ID (0x907f00U) +typedef FINN_RM_API FINN_GF100_REMAPPER_RESERVED; +#define FINN_GF100_REMAPPER_REMAPPER_INTERFACE_ID (0x907f01U) +typedef FINN_RM_API FINN_GF100_REMAPPER_REMAPPER; + + +#define FINN_GF100_ZBC_CLEAR_RESERVED_INTERFACE_ID (0x909600U) +typedef FINN_RM_API FINN_GF100_ZBC_CLEAR_RESERVED; +#define FINN_GF100_ZBC_CLEAR_ZBC_INTERFACE_ID (0x909601U) +typedef FINN_RM_API FINN_GF100_ZBC_CLEAR_ZBC; + + +#define FINN_GF100_PROFILER_RESERVED_INTERFACE_ID (0x90cc00U) +typedef FINN_RM_API FINN_GF100_PROFILER_RESERVED; + + +#define FINN_GF100_PROFILER_HWPM_INTERFACE_ID (0x90cc01U) +typedef FINN_RM_API FINN_GF100_PROFILER_HWPM; + + +#define FINN_GF100_PROFILER_NVLINK_INTERFACE_ID (0x90cc02U) +typedef FINN_RM_API FINN_GF100_PROFILER_NVLINK; + + +#define FINN_GF100_PROFILER_POWER_INTERFACE_ID (0x90cc03U) +typedef FINN_RM_API FINN_GF100_PROFILER_POWER; + + +#define FINN_NV_EVENT_BUFFER_RESERVED_INTERFACE_ID (0x90cd00U) +typedef FINN_RM_API FINN_NV_EVENT_BUFFER_RESERVED; +#define FINN_NV_EVENT_BUFFER_EVENT_INTERFACE_ID (0x90cd01U) +typedef FINN_RM_API FINN_NV_EVENT_BUFFER_EVENT; + + +#define FINN_GF100_SUBDEVICE_GRAPHICS_RESERVED_INTERFACE_ID (0x90e000U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_GRAPHICS_RESERVED; +#define FINN_GF100_SUBDEVICE_GRAPHICS_GRAPHICS_INTERFACE_ID (0x90e001U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_GRAPHICS_GRAPHICS; + + +#define FINN_GF100_SUBDEVICE_FB_RESERVED_INTERFACE_ID (0x90e100U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_FB_RESERVED; +#define FINN_GF100_SUBDEVICE_FB_FB_INTERFACE_ID (0x90e101U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_FB_FB; + + +#define FINN_GF100_SUBDEVICE_MASTER_RESERVED_INTERFACE_ID (0x90e600U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_MASTER_RESERVED; +#define FINN_GF100_SUBDEVICE_MASTER_MASTER_INTERFACE_ID (0x90e601U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_MASTER_MASTER; + + +#define FINN_GF100_SUBDEVICE_INFOROM_RESERVED_INTERFACE_ID (0x90e700U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_INFOROM_RESERVED; +#define FINN_GF100_SUBDEVICE_INFOROM_BBX_INTERFACE_ID (0x90e701U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_INFOROM_BBX; +#define FINN_GF100_SUBDEVICE_INFOROM_RPR_INTERFACE_ID (0x90e702U) +typedef FINN_RM_API FINN_GF100_SUBDEVICE_INFOROM_RPR; + + + +#define FINN_GF100_HDACODEC_RESERVED_INTERFACE_ID (0x90ec00U) +typedef FINN_RM_API FINN_GF100_HDACODEC_RESERVED; +#define FINN_GF100_HDACODEC_HDACODEC_INTERFACE_ID (0x90ec01U) +typedef FINN_RM_API FINN_GF100_HDACODEC_HDACODEC; + + +#define FINN_FERMI_VASPACE_A_RESERVED_INTERFACE_ID (0x90f100U) +typedef FINN_RM_API FINN_FERMI_VASPACE_A_RESERVED; +#define FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID (0x90f101U) +typedef FINN_RM_API FINN_FERMI_VASPACE_A_VASPACE; + + +#define FINN_KEPLER_CHANNEL_GROUP_A_RESERVED_INTERFACE_ID (0xa06c00U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GROUP_A_RESERVED; +#define FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO_INTERFACE_ID (0xa06c01U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GROUP_A_GPFIFO; +#define FINN_KEPLER_CHANNEL_GROUP_A_INTERNAL_INTERFACE_ID (0xa06c02U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GROUP_A_INTERNAL; + + +#define FINN_KEPLER_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID (0xa06f00U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_A_RESERVED; + + +#define FINN_KEPLER_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID (0xa06f02U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_A_EVENT; + + + +#define FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID (0xa06f01U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO; + + +#define FINN_KEPLER_CHANNEL_GPFIFO_A_INTERNAL_INTERFACE_ID (0xa06f03U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_A_INTERNAL; + + +#define FINN_KEPLER_DEVICE_VGPU_VGPU_DISPLAY_INTERFACE_ID (0xa08001U) +typedef FINN_RM_API FINN_KEPLER_DEVICE_VGPU_VGPU_DISPLAY; +#define FINN_KEPLER_DEVICE_VGPU_VGPU_MEMORY_INTERFACE_ID (0xa08002U) +typedef FINN_RM_API FINN_KEPLER_DEVICE_VGPU_VGPU_MEMORY; +#define FINN_KEPLER_DEVICE_VGPU_VGPU_OTHERS_INTERFACE_ID (0xa08003U) +typedef FINN_RM_API FINN_KEPLER_DEVICE_VGPU_VGPU_OTHERS; + + + +#define FINN_NVA083_GRID_DISPLAYLESS_RESERVED_INTERFACE_ID (0xa08300U) +typedef FINN_RM_API FINN_NVA083_GRID_DISPLAYLESS_RESERVED; +#define FINN_NVA083_GRID_DISPLAYLESS_VIRTUAL_DISPLAY_INTERFACE_ID (0xa08301U) +typedef FINN_RM_API FINN_NVA083_GRID_DISPLAYLESS_VIRTUAL_DISPLAY; + + +#define FINN_NVA084_HOST_VGPU_DEVICE_KERNEL_HOST_VGPU_DEVICE_KERNEL_INTERFACE_ID (0xa08401U) +typedef FINN_RM_API FINN_NVA084_HOST_VGPU_DEVICE_KERNEL_HOST_VGPU_DEVICE_KERNEL; + + +#define FINN_NVENC_SW_SESSION_NVENC_SW_SESSION_INTERFACE_ID (0xa0bc01U) +typedef FINN_RM_API FINN_NVENC_SW_SESSION_NVENC_SW_SESSION; + + +#define FINN_NVFBC_SW_SESSION_NVFBC_SW_SESSION_INTERFACE_ID (0xa0bd01U) +typedef FINN_RM_API FINN_NVFBC_SW_SESSION_NVFBC_SW_SESSION; + + +#define FINN_GK110_SUBDEVICE_GRAPHICS_RESERVED_INTERFACE_ID (0xa0e000U) +typedef FINN_RM_API FINN_GK110_SUBDEVICE_GRAPHICS_RESERVED; +#define FINN_GK110_SUBDEVICE_GRAPHICS_GRAPHICS_INTERFACE_ID (0xa0e001U) +typedef FINN_RM_API FINN_GK110_SUBDEVICE_GRAPHICS_GRAPHICS; + + +#define FINN_GK110_SUBDEVICE_FB_RESERVED_INTERFACE_ID (0xa0e100U) +typedef FINN_RM_API FINN_GK110_SUBDEVICE_FB_RESERVED; +#define FINN_GK110_SUBDEVICE_FB_FB_INTERFACE_ID (0xa0e101U) +typedef FINN_RM_API FINN_GK110_SUBDEVICE_FB_FB; + + +#define FINN_KEPLER_CHANNEL_GPFIFO_B_RESERVED_INTERFACE_ID (0xa16f00U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_B_RESERVED; +#define FINN_KEPLER_CHANNEL_GPFIFO_B_GPFIFO_INTERFACE_ID (0xa16f01U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_B_GPFIFO; +#define FINN_KEPLER_CHANNEL_GPFIFO_B_EVENT_INTERFACE_ID (0xa16f02U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_B_EVENT; + + +#define FINN_KEPLER_CHANNEL_GPFIFO_C_RESERVED_INTERFACE_ID (0xa26f00U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_C_RESERVED; +#define FINN_KEPLER_CHANNEL_GPFIFO_C_GPFIFO_INTERFACE_ID (0xa26f01U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_C_GPFIFO; +#define FINN_KEPLER_CHANNEL_GPFIFO_C_EVENT_INTERFACE_ID (0xa26f02U) +typedef FINN_RM_API FINN_KEPLER_CHANNEL_GPFIFO_C_EVENT; + + +#define FINN_MAXWELL_FAULT_BUFFER_A_RESERVED_INTERFACE_ID (0xb06900U) +typedef FINN_RM_API FINN_MAXWELL_FAULT_BUFFER_A_RESERVED; +#define FINN_MAXWELL_FAULT_BUFFER_A_FAULTBUFFER_INTERFACE_ID (0xb06901U) +typedef FINN_RM_API FINN_MAXWELL_FAULT_BUFFER_A_FAULTBUFFER; + + +#define FINN_MAXWELL_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID (0xb06f00U) +typedef FINN_RM_API FINN_MAXWELL_CHANNEL_GPFIFO_A_RESERVED; +#define FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID (0xb06f01U) +typedef FINN_RM_API FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO; +#define FINN_MAXWELL_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID (0xb06f02U) +typedef FINN_RM_API FINN_MAXWELL_CHANNEL_GPFIFO_A_EVENT; + + +#define FINN_MAXWELL_PROFILER_RESERVED_INTERFACE_ID (0xb0cc00U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_RESERVED; + + +#define FINN_MAXWELL_PROFILER_INTERNAL_INTERFACE_ID (0xb0cc02U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_INTERNAL; + + +#define FINN_MAXWELL_PROFILER_PROFILER_INTERFACE_ID (0xb0cc01U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_PROFILER; + + +#define FINN_MAXWELL_PROFILER_CONTEXT_RESERVED_INTERFACE_ID (0xb1cc00U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_CONTEXT_RESERVED; +#define FINN_MAXWELL_PROFILER_CONTEXT_PROFILER_INTERFACE_ID (0xb1cc01U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_CONTEXT_PROFILER; + + +#define FINN_MAXWELL_PROFILER_DEVICE_RESERVED_INTERFACE_ID (0xb2cc00U) +typedef FINN_RM_API FINN_MAXWELL_PROFILER_DEVICE_RESERVED; + + + +#define FINN_MAXWELL_SEC2_SEC2_INTERFACE_ID (0xb6b901U) +typedef FINN_RM_API FINN_MAXWELL_SEC2_SEC2; + + +#define FINN_PASCAL_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID (0xc06f00U) +typedef FINN_RM_API FINN_PASCAL_CHANNEL_GPFIFO_A_RESERVED; +#define FINN_PASCAL_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID (0xc06f01U) +typedef FINN_RM_API FINN_PASCAL_CHANNEL_GPFIFO_A_GPFIFO; +#define FINN_PASCAL_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID (0xc06f02U) +typedef FINN_RM_API FINN_PASCAL_CHANNEL_GPFIFO_A_EVENT; + + +#define FINN_GP100_SUBDEVICE_GRAPHICS_RESERVED_INTERFACE_ID (0xc0e000U) +typedef FINN_RM_API FINN_GP100_SUBDEVICE_GRAPHICS_RESERVED; +#define FINN_GP100_SUBDEVICE_GRAPHICS_GRAPHICS_INTERFACE_ID (0xc0e001U) +typedef FINN_RM_API FINN_GP100_SUBDEVICE_GRAPHICS_GRAPHICS; + + +#define FINN_GP100_SUBDEVICE_FB_RESERVED_INTERFACE_ID (0xc0e100U) +typedef FINN_RM_API FINN_GP100_SUBDEVICE_FB_RESERVED; +#define FINN_GP100_SUBDEVICE_FB_FB_INTERFACE_ID (0xc0e101U) +typedef FINN_RM_API FINN_GP100_SUBDEVICE_FB_FB; + + +#define FINN_VOLTA_GSP_GSP_INTERFACE_ID (0xc31001U) +typedef FINN_RM_API FINN_VOLTA_GSP_GSP; + + +#define FINN_ACCESS_COUNTER_NOTIFY_BUFFER_RESERVED_INTERFACE_ID (0xc36500U) +typedef FINN_RM_API FINN_ACCESS_COUNTER_NOTIFY_BUFFER_RESERVED; +#define FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER_INTERFACE_ID (0xc36501U) +typedef FINN_RM_API FINN_ACCESS_COUNTER_NOTIFY_BUFFER_ACCESS_CNTR_BUFFER; + + +#define FINN_MMU_FAULT_BUFFER_RESERVED_INTERFACE_ID (0xc36900U) +typedef FINN_RM_API FINN_MMU_FAULT_BUFFER_RESERVED; +#define FINN_MMU_FAULT_BUFFER_MMU_FAULT_BUFFER_INTERFACE_ID (0xc36901U) +typedef FINN_RM_API FINN_MMU_FAULT_BUFFER_MMU_FAULT_BUFFER; + + +#define FINN_VOLTA_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID (0xc36f00U) +typedef FINN_RM_API FINN_VOLTA_CHANNEL_GPFIFO_A_RESERVED; +#define FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID (0xc36f01U) +typedef FINN_RM_API FINN_VOLTA_CHANNEL_GPFIFO_A_GPFIFO; +#define FINN_VOLTA_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID (0xc36f02U) +typedef FINN_RM_API FINN_VOLTA_CHANNEL_GPFIFO_A_EVENT; + + +#define FINN_NVC370_DISPLAY_RESERVED_INTERFACE_ID (0xc37000U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_RESERVED; + + +#define FINN_NVC370_DISPLAY_CHNCTL_INTERFACE_ID (0xc37001U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_CHNCTL; + + +#define FINN_NVC370_DISPLAY_EVENT_INTERFACE_ID (0xc37009U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_EVENT; + + +#define FINN_NVC370_DISPLAY_RG_INTERFACE_ID (0xc37002U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_RG; + + +#define FINN_NVC370_DISPLAY_VERIF_INTERFACE_ID (0xc37006U) +typedef FINN_RM_API FINN_NVC370_DISPLAY_VERIF; + + +#define FINN_NVC372_DISPLAY_SW_RESERVED_INTERFACE_ID (0xc37200U) +typedef FINN_RM_API FINN_NVC372_DISPLAY_SW_RESERVED; + + +#define FINN_NVC372_DISPLAY_SW_CHNCTL_INTERFACE_ID (0xc37201U) +typedef FINN_RM_API FINN_NVC372_DISPLAY_SW_CHNCTL; + + + +#define FINN_GV100_SUBDEVICE_GRAPHICS_RESERVED_INTERFACE_ID (0xc3e000U) +typedef FINN_RM_API FINN_GV100_SUBDEVICE_GRAPHICS_RESERVED; +#define FINN_GV100_SUBDEVICE_GRAPHICS_GRAPHICS_INTERFACE_ID (0xc3e001U) +typedef FINN_RM_API FINN_GV100_SUBDEVICE_GRAPHICS_GRAPHICS; + + +#define FINN_GV100_SUBDEVICE_FB_RESERVED_INTERFACE_ID (0xc3e100U) +typedef FINN_RM_API FINN_GV100_SUBDEVICE_FB_RESERVED; +#define FINN_GV100_SUBDEVICE_FB_FB_INTERFACE_ID (0xc3e101U) +typedef FINN_RM_API FINN_GV100_SUBDEVICE_FB_FB; + + +#define FINN_TURING_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID (0xc46f00U) +typedef FINN_RM_API FINN_TURING_CHANNEL_GPFIFO_A_RESERVED; +#define FINN_TURING_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID (0xc46f01U) +typedef FINN_RM_API FINN_TURING_CHANNEL_GPFIFO_A_GPFIFO; +#define FINN_TURING_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID (0xc46f02U) +typedef FINN_RM_API FINN_TURING_CHANNEL_GPFIFO_A_EVENT; + + +#define FINN_AMPERE_CHANNEL_GPFIFO_A_RESERVED_INTERFACE_ID (0xc56f00U) +typedef FINN_RM_API FINN_AMPERE_CHANNEL_GPFIFO_A_RESERVED; +#define FINN_AMPERE_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID (0xc56f01U) +typedef FINN_RM_API FINN_AMPERE_CHANNEL_GPFIFO_A_GPFIFO; +#define FINN_AMPERE_CHANNEL_GPFIFO_A_EVENT_INTERFACE_ID (0xc56f02U) +typedef FINN_RM_API FINN_AMPERE_CHANNEL_GPFIFO_A_EVENT; + + +#define FINN_AMPERE_SMC_PARTITION_REF_RESERVED_INTERFACE_ID (0xc63700U) +typedef FINN_RM_API FINN_AMPERE_SMC_PARTITION_REF_RESERVED; +#define FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS_INTERFACE_ID (0xc63701U) +typedef FINN_RM_API FINN_AMPERE_SMC_PARTITION_REF_EXEC_PARTITIONS; + + +#define FINN_AMPERE_SMC_EXEC_PARTITION_REF_RESERVED_INTERFACE_ID (0xc63800U) +typedef FINN_RM_API FINN_AMPERE_SMC_EXEC_PARTITION_REF_RESERVED; +#define FINN_AMPERE_SMC_EXEC_PARTITION_REF_EXEC_PARTITION_INTERFACE_ID (0xc63801U) +typedef FINN_RM_API FINN_AMPERE_SMC_EXEC_PARTITION_REF_EXEC_PARTITION; + + +#define FINN_MMU_VIDMEM_ACCESS_BIT_BUFFER_RESERVED_INTERFACE_ID (0xc76300U) +typedef FINN_RM_API FINN_MMU_VIDMEM_ACCESS_BIT_BUFFER_RESERVED; +#define FINN_MMU_VIDMEM_ACCESS_BIT_BUFFER_VIDMEM_ACCESS_BIT_BUFFER_INTERFACE_ID (0xc76301U) +typedef FINN_RM_API FINN_MMU_VIDMEM_ACCESS_BIT_BUFFER_VIDMEM_ACCESS_BIT_BUFFER; + + + +#define FINN_NVE2_SYNCPOINT_BASE_RESERVED_INTERFACE_ID (0xe2ad00U) +typedef FINN_RM_API FINN_NVE2_SYNCPOINT_BASE_RESERVED; +#define FINN_NVE2_SYNCPOINT_BASE_SYNCPOINT_BASE_INTERFACE_ID (0xe2ad01U) +typedef FINN_RM_API FINN_NVE2_SYNCPOINT_BASE_SYNCPOINT_BASE; diff --git a/src/common/sdk/nvidia/inc/mmu_fmt_types.h b/src/common/sdk/nvidia/inc/mmu_fmt_types.h new file mode 100644 index 000000000..5a81c3d67 --- /dev/null +++ b/src/common/sdk/nvidia/inc/mmu_fmt_types.h @@ -0,0 +1,131 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: mmu_fmt_types.finn +// + + + + +/*! + * @file mmu_fmt_types.h + * + * @brief Types used to describre MMU HW formats. + */ +#include "nvtypes.h" + +// Forward declarations. + + +/*! + * Generic MMU page directory/table level format description. + * + * Since the terminology of page directories and tables varies, + * the following describes the interpretation assumed here. + * + * Each level of virtual address translation is described by a range of + * virtual address bits. + * These bits index into a contiguous range of physical memory referred to + * generally as a "page level." + * Page level memory is interpreted as an array of entries, with each entry + * describing the next step of virtual to physical translation. + * + * Each entry in a given level may be interpreted as either a PDE or PTE. + * 1. A PDE (page directory entry) points to one or more "sub-levels" that + * continue the VA translation recursively. + * 2. A PTE (page table entry) is the base case, pointing to a physical page. + * + * The decision to treat an entry as a PDE or PTE may be static for a level. + * Levels that only contain PDEs are referred to as page directories. + * Levels that only contain PTEs are referred to as page tables. + * + * However, some formats have levels that may contain a mix of PDEs and PTEs, + * with the intpretation based on a "cutoff" bit in each entry (e.g. PTE valid bit). + * Such levels are referred to as "polymorphic page levels" since they can be + * viewed as both a page directory and a page table. + */ +typedef struct MMU_FMT_LEVEL { + /*! + * First virtual address bit that this page level covers. + */ + NvU8 virtAddrBitLo; + + /*! + * Last virtual address bit that this page level covers. + */ + NvU8 virtAddrBitHi; + + /*! + * Size in bytes of each entry within a level instance. + */ + NvU8 entrySize; + + /*! + * Indicates if this level can contain PTEs. + */ + NvBool bPageTable; + + /*! + * Number of sub-levels pointed to by PDEs in this level in + * range [0, MMU_FMT_MAX_SUB_LEVELS]. + * 0 indicates this level cannot contain PDEs. + */ + NvU8 numSubLevels; + + /*! + * Array of sub-level formats of length numSubLevels. + * + * @warning This array results in a circular reference to MMU_FMT_LEVEL. + * This can present an issue for FINN serialization and may have to + * be refactored before MMU_FMT_LEVEL can be serialized. + */ + NV_DECLARE_ALIGNED(struct MMU_FMT_LEVEL *subLevels, 8); +} MMU_FMT_LEVEL; + +/*! + * Maximum number of pointers to sub-levels within a page directory entry. + * + * Standard page directory entries (PDEs) point to a single sub-level, + * either the next page directory level in the topology or a leaf page table. + * + * However, some formats contain PDEs that point to more than one sub-level. + * These sub-levels are translated by HW in parallel to support multiple + * page sizes at a higher granularity (e.g. for migration between + * 4K system memory pages and big video memory pages for GPU MMU). + * + * The current supported formats have a maximum of 2 parallel sub-levels, + * often referred to as "dual PDE" or "dual page table" support. + * + * Example for Fermi GPU HW: + * Sub-level 0 corresponds to big page table pointer. + * Sub-level 1 corresponds to small page table pointer. + * + * This number is very unlikely to change, but it is defined to + * simplify SW handling, encouraging loops over "dual copy-paste." + */ +#define MMU_FMT_MAX_SUB_LEVELS 2 diff --git a/src/common/sdk/nvidia/inc/nv-hypervisor.h b/src/common/sdk/nvidia/inc/nv-hypervisor.h new file mode 100644 index 000000000..ddc6a9134 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nv-hypervisor.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_HYPERVISOR_H_ +#define _NV_HYPERVISOR_H_ + +#include + +// Enums for supported hypervisor types. +// New hypervisor type should be added before OS_HYPERVISOR_CUSTOM_FORCED +typedef enum _HYPERVISOR_TYPE +{ + OS_HYPERVISOR_XEN = 0, + OS_HYPERVISOR_VMWARE, + OS_HYPERVISOR_HYPERV, + OS_HYPERVISOR_KVM, + OS_HYPERVISOR_PARALLELS, + OS_HYPERVISOR_CUSTOM_FORCED, + OS_HYPERVISOR_UNKNOWN +} HYPERVISOR_TYPE; + +#define CMD_VGPU_VFIO_WAKE_WAIT_QUEUE 0 +#define CMD_VGPU_VFIO_INJECT_INTERRUPT 1 +#define CMD_VGPU_VFIO_REGISTER_MDEV 2 +#define CMD_VGPU_VFIO_PRESENT 3 + +#define MAX_VF_COUNT_PER_GPU 64 + +typedef enum _VGPU_TYPE_INFO +{ + VGPU_TYPE_NAME = 0, + VGPU_TYPE_DESCRIPTION, + VGPU_TYPE_INSTANCES, +} VGPU_TYPE_INFO; + +typedef struct +{ + void *vgpuVfioRef; + void *waitQueue; + void *nv; + NvU32 *vgpuTypeIds; + NvU32 numVgpuTypes; + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; + NvBool is_virtfn; +} vgpu_vfio_info; + +typedef struct +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU8 function; + NvBool isNvidiaAttached; + NvBool isMdevAttached; +} vgpu_vf_pci_info; + +typedef enum VGPU_CMD_PROCESS_VF_INFO_E +{ + NV_VGPU_SAVE_VF_INFO = 0, + NV_VGPU_REMOVE_VF_PCI_INFO = 1, + NV_VGPU_REMOVE_VF_MDEV_INFO = 2, + NV_VGPU_GET_VF_INFO = 3 +} VGPU_CMD_PROCESS_VF_INFO; + +typedef enum VGPU_DEVICE_STATE_E +{ + NV_VGPU_DEV_UNUSED = 0, + NV_VGPU_DEV_OPENED = 1, + NV_VGPU_DEV_IN_USE = 2 +} VGPU_DEVICE_STATE; + +typedef enum _VMBUS_CMD_TYPE +{ + VMBUS_CMD_TYPE_INVALID = 0, + VMBUS_CMD_TYPE_SETUP = 1, + VMBUS_CMD_TYPE_SENDPACKET = 2, + VMBUS_CMD_TYPE_CLEANUP = 3, +} VMBUS_CMD_TYPE; + +typedef struct +{ + NvU32 request_id; + NvU32 page_count; + NvU64 *pPfns; + void *buffer; + NvU32 bufferlen; +} vmbus_send_packet_cmd_params; + + +typedef struct +{ + NvU32 override_sint; + NvU8 *nv_guid; +} vmbus_setup_cmd_params; + +/* + * Function prototypes + */ + +HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void); + +#endif // _NV_HYPERVISOR_H_ diff --git a/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h b/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h new file mode 100644 index 000000000..183f9b431 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nv-kernel-interface-api.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_KERNEL_INTERFACE_API_H +#define _NV_KERNEL_INTERFACE_API_H +/************************************************************************************************************** +* +* File: nv-kernel-interface-api.h +* +* Description: +* Defines the NV API related macros. +* +**************************************************************************************************************/ + +#if NVOS_IS_UNIX && NVCPU_IS_X86_64 && defined(__use_altstack__) +#define NV_API_CALL __attribute__((altstack(0))) +#else +#define NV_API_CALL +#endif + +#endif /* _NV_KERNEL_INTERFACE_API_H */ diff --git a/src/common/sdk/nvidia/inc/nv_stdarg.h b/src/common/sdk/nvidia/inc/nv_stdarg.h new file mode 100644 index 000000000..b23f7f7b8 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nv_stdarg.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_STDARG_H_ +#define _NV_STDARG_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include "conftest.h" + #if defined(NV_LINUX_STDARG_H_PRESENT) + #include + #else + #include + #endif +#else + #include +#endif + +#endif // _NV_STDARG_H_ diff --git a/src/common/sdk/nvidia/inc/nv_vgpu_types.h b/src/common/sdk/nvidia/inc/nv_vgpu_types.h new file mode 100644 index 000000000..388625a52 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nv_vgpu_types.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nv_vgpu_types.finn +// + + + + +/* XAPIGEN - this file is not suitable for (nor needed by) xapigen. */ +/* Rather than #ifdef out every such include in every sdk */ +/* file, punt here. */ +#include "nvtypes.h" + /* ! XAPIGEN */ + +#define VM_UUID_SIZE 16 +#define INVALID_VGPU_DEV_INST 0xFFFFFFFF +#define MAX_VGPU_DEVICES_PER_VM 16 + +/* This enum represents the current state of guest dependent fields */ +typedef enum GUEST_VM_INFO_STATE { + GUEST_VM_INFO_STATE_UNINITIALIZED = 0, + GUEST_VM_INFO_STATE_INITIALIZED = 1, +} GUEST_VM_INFO_STATE; + +/* This enum represents types of VM identifiers */ +typedef enum VM_ID_TYPE { + VM_ID_DOMAIN_ID = 0, + VM_ID_UUID = 1, +} VM_ID_TYPE; + +/* This structure represents VM identifier */ +typedef union VM_ID { + NvU8 vmUuid[VM_UUID_SIZE]; + NV_DECLARE_ALIGNED(NvU64 vmId, 8); +} VM_ID; diff --git a/src/common/sdk/nvidia/inc/nvcd.h b/src/common/sdk/nvidia/inc/nvcd.h new file mode 100644 index 000000000..6c462eb26 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvcd.h @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2002 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVCD_H +#define NVCD_H + +//****************************************************************************** +// +// Module Name: NVCD.H +// +// This file contains structures and constants that define the NV specific +// data to be returned by the miniport's new VideoBugCheckCallback. The callback +// can return up to 4k bytes of data that will be appended to the dump file. +// The bugcheck callback is currently only invoked for bugcheck 0xEA failures. +// The buffer returned contains a top level header, followed by a variable +// number of data records. The top level header contains an ASCII signature +// that can be located with a search as well as a GUID for unique identification +// of the crash dump layout, i.e. future bugcheck callbacks can define a new +// GUID to redefine the entire crash dump layout. A checksum and crash dump +// size values are also included to insure crash dump data integrity. The +// data records each contain a header indicating what group the data belongs to +// as well as the actual record type and size. This flexibility allows groups +// to define and extend the information in their records without adversely +// affecting the code in the debugger extension that has to parse and display +// this information. The structures for these individual data records are +// contained in separate header files for each group. +// +//****************************************************************************** +#include "nvtypes.h" + +// Define the GUID type for non-Windows OSes + +#ifndef GUID_DEFINED +#define GUID_DEFINED +typedef struct _GUID { + NvU32 Data1; + NvU16 Data2; + NvU16 Data3; + NvU8 Data4[8]; +} GUID, *LPGUID; +#endif + +// Define the crash dump ASCII tag value and the dump format GUIDs +#define NVCD_SIGNATURE 0x4443564E /* ASCII crash dump signature "NVCD" */ + +#define GUID_NVCD_DUMP_V1 { /* e3d5dc6e-db7d-4e28-b09e-f59a942f4a24 */ \ + 0xe3d5dc6e, 0xdb7d, 0x4e28, \ + {0xb0, 0x9e, 0xf5, 0x9a, 0x94, 0x2f, 0x4a, 0x24}\ +}; +#define GUID_NVCD_DUMP_V2 { /* cd978ac1-3aa1-494b-bb5b-e93daf2b0536 */ \ + 0xcd978ac1, 0x3aa1, 0x494b, \ + {0xbb, 0x5b, 0xe9, 0x3d, 0xaf, 0x2b, 0x05, 0x36}\ +}; +#define GUID_NVCDMP_RSVD1 { /* 391fc656-a37c-4574-8d57-b29a562f909b */ \ + 0x391fc656, 0xa37c, 0x4574, \ + {0x8d, 0x57, 0xb2, 0x9a, 0x56, 0x2f, 0x90, 0x9b}\ +}; +#define GUID_NVCDMP_RSVD2 { /* c6d9982d-1ba9-4f80-badd-3dc992d41b46 */ \ + 0xc6d9982d, 0x1ba9, 0x4f80, \ + {0xba, 0xdd, 0x3d, 0xc9, 0x92, 0xd4, 0x1b, 0x46}\ +}; + +// RC 2.0 NVCD (NV crash dump) GUID +#define GUID_NVCD_RC2_V1 { /* d3793533-a4a6-46d3-97f2-1446cfdc1ee7 */ \ + 0xd3793533, 0xa4a6, 0x46d3, \ + {0x97, 0xf2, 0x14, 0x46, 0xcf, 0xdc, 0x1e, 0xe7}\ +}; + + +// Define NVIDIA crash dump header structure (First data block in crash dump) +typedef struct +{ + NvU32 dwSignature; // ASCII crash dump signature "NVCD" + GUID gVersion; // GUID for crashdump file (Version) + NvU32 dwSize; // Size of the crash dump data + NvU8 cCheckSum; // Crash dump checksum (Zero = ignore) + NvU8 cFiller[3]; // Filler (Possible CRC value) +} NVCD_HEADER; +typedef NVCD_HEADER *PNVCD_HEADER; + +// Define the crash dump record groups +typedef enum +{ + NvcdGroup = 0, // NVIDIA crash dump group (System NVCD records) + RmGroup = 1, // Resource manager group (RM records) + DriverGroup = 2, // Driver group (Driver/miniport records) + HardwareGroup = 3, // Hardware group (Hardware records) + InstrumentationGroup = 4, // Instrumentation group (Special records) +} NVCD_GROUP_TYPE; + +// Define the crash dump group record types (Single end of data record type) +typedef enum +{ + EndOfData = 0, // End of crash dump data record + CompressedDataHuffman = 1, // Compressed huffman data +} NVCD_RECORD_TYPE; + +// Define the crash dump data record header +typedef struct +{ + NvU8 cRecordGroup; // Data record group (NVCD_GROUP_TYPE) + NvU8 cRecordType; // Data record type (See group header) + NvU16 wRecordSize; // Size of the data record in bytes +} NVCD_RECORD; +typedef NVCD_RECORD *PNVCD_RECORD; + +// Define the EndOfData record structure +typedef struct +{ + NVCD_RECORD Header; // End of data record header +} EndOfData_RECORD; +typedef EndOfData_RECORD *PEndOfData_RECORD; + +// +// Generic mini-record type (keep the size at 64bits) +// +typedef struct +{ + NVCD_RECORD Header; // header for mini record + NvU32 Payload; // 32 bit payload value +} NVCDMiniRecord; +typedef NVCDMiniRecord *PNVCDMiniRecord; + +// +// Generic record collection type +// +typedef struct +{ + NVCD_RECORD Header; // generic header to binary type this in OCA buffer + // size is actual size of this struct + all items in collection + NvU32 NumRecords; // number of records this collection contain + NVCD_RECORD FirstRecord; // first record, its data follow +} NVCDRecordCollection; +typedef NVCDRecordCollection *PNVCDRecordCollection; + +#define COLL_HEADER_SIZEOF (sizeof(NVCDRecordCollection) - sizeof(NVCD_RECORD)) + + +#endif // NVCD_H diff --git a/src/common/sdk/nvidia/inc/nvdeprecated.h b/src/common/sdk/nvidia/inc/nvdeprecated.h new file mode 100644 index 000000000..75d99ccf7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvdeprecated.h @@ -0,0 +1,133 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NV_DEPRECATED_H +#define NV_DEPRECATED_H + +/*! + * @file nvdeprecated.h + * + * @brief Deprecation in the NVIDIA SDK + * + * Why deprecate: + * Code is deprecated when you want to remove a feature entirely, but cannot + * do so immediately, nor in a single step, due to a requirement to remain + * backward compatible (keep older clients working). + * + * Backwards compatibility: + * Deprecated symbols and features may be supported for an unknown amount of + * time. "Deprecated" means that we want that time interval to be small, but + * that may not be under our control. + * + * This file provides the following ways to support deprecated features: + * + * 1) Defining NV_STRICT_SDK before including a SDK headers. This will + * remove *all* deprecated APIs from the NV SDK, for example: + * + * #define NV_STRICT_SDK + * #include "sdk/foo.h" + * + * 2) Defining the per-feature compatibility setting before including the + * SDK, for example: + * + * #define NV_DEPRECATED_NVOS_STATUS 0 // enable compatibility mode + * #include "sdk/foo.h" + * + * How to deprecate a feature in the SDK: + * + * 1) Define the deprecated feature in this file. Often, you'll want to + * start with SDK compatibility enabled by default, for example: + * + * #ifndef NV_DEPRECATED_FEATURE_NAME + * #define NV_DEPRECATED_FEATURE_NAME 0 + * #endif + * + * 2) Wrap SDK definitions with compatibility #ifdefs: + * + * #if NV_DEPRECATED_COMPAT(FEATURE_NAME) + * ...legacy definitions... + * #endif + * + * 3) In the API implementation, consider stubbing or wrapping the new API. + * + * 4) Update older clients: file bugs to track this effort. Bug numbers + * should be placed in comments near the deprecated features that RM is + * supporting. That way, people reading the code can easily find the + * bug(s) that show the status of completely removing the deprecated + * feature. + * + * 5) Once all the client (calling) code has been upgraded, change the + * macro to "compatibility off". This is a little more cautious and + * conservative than jumping directly to step (6), because it allows you + * to recover from a test failure (remember, there are extended, offline + * tests that are not, unfortunately, run in DVS, nor per-CL checkin) + * with a tiny change in code. + * + * 6) Once the code base has migrated, remove all definitions from the SDK. + */ + +/* + * \defgroup Deprecated SDK Features + * + * 0 = Compatibility on by default (i.e.: defines present in SDK) + * 1 = Compatibility off by default (i.e.: defines NOT in SDK) + * + * @{ + */ + +/*! + * RM Config Get/Set API is deprecated and RmControl should be used instead. + * Bugs: XXXXXX, XXXXXX, etc + */ +#ifndef NV_DEPRECATED_RM_CONFIG_GET_SET +#define NV_DEPRECATED_RM_CONFIG_GET_SET 0 +#endif + +#ifndef NV_DEPRECATED_NVOS_STATUS +/* NVOS_STATUS codes is deprecated. NV_STATUS to be used instead */ +#define NV_DEPRECATED_NVOS_STATUS 0 +#endif + +#ifndef NV_DEPRECATED_RM_STATUS +/* RM_STATUS codes is deprecated. NV_STATUS to be used instead */ +#define NV_DEPRECATED_RM_STATUS 0 +#endif + +#ifndef NV_DEPRECATED_UNSAFE_HANDLES +/* Using NvU32 for handles is deprecated. NvHandle to be used instead */ +#define NV_DEPRECATED_UNSAFE_HANDLES 0 +#endif + +/**@}*/ + +/*! + * Utility Macros + */ + +#ifdef NV_STRICT_SDK +// In strict mode, all obsolete features are unavailable in the SDK. +#define NV_DEPRECATED_COMPAT(feature) 0 +#else +#define NV_DEPRECATED_COMPAT(feature) (!NV_DEPRECATED_##feature) +#endif + +#endif diff --git a/src/common/sdk/nvidia/inc/nvdisptypes.h b/src/common/sdk/nvidia/inc/nvdisptypes.h new file mode 100644 index 000000000..90d8ac015 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvdisptypes.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /***************************************************************************\ +|* *| +|* NV Display Common Types *| +|* *| +|* defines the common display types. *| +|* *| + \***************************************************************************/ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvdisptypes.finn +// + + + + +#include "nvtypes.h" + + + +typedef enum NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP { + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_DEFAULT = 0, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_16_422 = 1, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444 = 2, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_20_422 = 3, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_422 = 4, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444 = 5, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444 = 6, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_32_422 = 7, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_36_444 = 8, + NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_48_444 = 9, +} NV_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP; + + + +typedef NvU32 NV_DISP_LOCK_PIN; + +#define NV_DISP_LOCK_PIN_0 0x0 +#define NV_DISP_LOCK_PIN_1 0x1 +#define NV_DISP_LOCK_PIN_2 0x2 +#define NV_DISP_LOCK_PIN_3 0x3 +#define NV_DISP_LOCK_PIN_4 0x4 +#define NV_DISP_LOCK_PIN_5 0x5 +#define NV_DISP_LOCK_PIN_6 0x6 +#define NV_DISP_LOCK_PIN_7 0x7 +#define NV_DISP_LOCK_PIN_8 0x8 +#define NV_DISP_LOCK_PIN_9 0x9 +#define NV_DISP_LOCK_PIN_A 0xA +#define NV_DISP_LOCK_PIN_B 0xB +#define NV_DISP_LOCK_PIN_C 0xC +#define NV_DISP_LOCK_PIN_D 0xD +#define NV_DISP_LOCK_PIN_E 0xE +#define NV_DISP_LOCK_PIN_F 0xF + +// Value used solely for HW initialization +#define NV_DISP_LOCK_PIN_UNSPECIFIED 0x10 + + + +typedef NvU32 NV_DISP_LOCK_MODE; + +#define NV_DISP_LOCK_MODE_NO_LOCK 0x0 +#define NV_DISP_LOCK_MODE_FRAME_LOCK 0x1 +#define NV_DISP_LOCK_MODE_RASTER_LOCK 0x3 + diff --git a/src/common/sdk/nvidia/inc/nvdump.h b/src/common/sdk/nvidia/inc/nvdump.h new file mode 100644 index 000000000..32c6c76d3 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvdump.h @@ -0,0 +1,163 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//////////////////////////////////////////////////////////////////////////////// +// +// Module: nvDump.h +// Shared definitions for HW/SW dumping facility residing in resman/rc. +// +// ************************************************************************** + +#ifndef _NVDUMP_H_ +#define _NVDUMP_H_ + +#include "nvtypes.h" + +//------------------------------------------------------------------------- +// DEFINTIONS +//------------------------------------------------------------------------- + +#define NVDUMP_SUB_ALLOC_NOT_ENCODED 0x01 +#define NVDUMP_SUB_ALLOC_VALID 0x02 +#define NVDUMP_SUB_ALLOC_HAS_MORE 0x04 + +#define NVDUMP_DEBUG_BUFFER_MAX_SIZE 4096 // max of 4K per buffer +#define NVDUMP_DEBUG_BUFFER_MAX_SUBALLOCATIONS 256 + +#define NVDUMP_CONFIG_SIGNATURE "NVDUMPCONFIGSIG" // multiple of 8 bytes + +typedef enum +{ + // The following components are GPU instance specific: + NVDUMP_COMPONENT_DEBUG_BUFFERS = 0, + NVDUMP_COMPONENT_ENG_MC, + NVDUMP_COMPONENT_ENG_FIFO, + NVDUMP_COMPONENT_ENG_GRAPHICS, + NVDUMP_COMPONENT_ENG_FB, + NVDUMP_COMPONENT_ENG_DISP, + NVDUMP_COMPONENT_ENG_FAN, + NVDUMP_COMPONENT_ENG_THERMAL, + NVDUMP_COMPONENT_ENG_FUSE, + NVDUMP_COMPONENT_ENG_VBIOS, + NVDUMP_COMPONENT_ENG_PERF, + NVDUMP_COMPONENT_ENG_BUS, + NVDUMP_COMPONENT_ENG_PMU, + NVDUMP_COMPONENT_ENG_ALL, + NVDUMP_COMPONENT_ENG_CE, + NVDUMP_COMPONENT_ENG_GPU, + NVDUMP_COMPONENT_ENG_LPWR, + NVDUMP_COMPONENT_ENG_NVD, + NVDUMP_COMPONENT_ENG_VOLT, + NVDUMP_COMPONENT_ENG_CLK, + NVDUMP_COMPONENT_ENG_SEC2, + NVDUMP_COMPONENT_ENG_NVLINK, + NVDUMP_COMPONENT_ENG_BSP, + NVDUMP_COMPONENT_ENG_DPU, + NVDUMP_COMPONENT_ENG_FBFLCN, + NVDUMP_COMPONENT_ENG_HDA, + NVDUMP_COMPONENT_ENG_MSENC, + NVDUMP_COMPONENT_ENG_GSP, + NVDUMP_COMPONENT_ENG_INFOROM, + NVDUMP_COMPONENT_ENG_GCX, + // The following components are global to the system: + NVDUMP_COMPONENT_SYS_RCDB = 0x400, + NVDUMP_COMPONENT_SYS_SYSINFO, + NVDUMP_COMPONENT_SYS_ALL, + // The following components are nvlog related. + NVDUMP_COMPONENT_NVLOG_RM = 0x800, + NVDUMP_COMPONENT_NVLOG_ALL, + // Reserved + NVDUMP_COMPONENT_NVLOG_RESERVED = 0xB00, +} NVDUMP_COMPONENT; + +#define NVDUMP_IS_GPU_COMPONENT(c) ((c) < NVDUMP_COMPONENT_SYS_RCDB) +#define NVDUMP_IS_SYS_COMPONENT(c) (((c) >= NVDUMP_COMPONENT_SYS_RCDB) && \ + ((c) < NVDUMP_COMPONENT_NVLOG_RM)) +#define NVDUMP_IS_NVLOG_COMPONENT(c) (((c) >= NVDUMP_COMPONENT_NVLOG_RM) && \ + ((c) < NVDUMP_COMPONENT_NVLOG_RESERVED)) + +typedef enum +{ + NVDUMP_BUFFER_PROVIDED = 0, // Dump buffer provided by caller + NVDUMP_BUFFER_ALLOCATE, // Dump buffer to be allocated + NVDUMP_BUFFER_COUNT, // Just count, no buffer needed +} NVDUMP_BUFFER_POLICY; + +typedef enum +{ + NVDUMP_STATUS_IDLE, + NVDUMP_STATUS_ERROR, + NVDUMP_STATUS_COUNT_REQUESTED, + NVDUMP_STATUS_COUNT_COMPLETE, + NVDUMP_STATUS_DUMP_REQUESTED, + NVDUMP_STATUS_DUMP_BUFFER_FULL, + NVDUMP_STATUS_DUMP_END_OF_MSG, + NVDUMP_STATUS_DUMP_COMPLETE, +} NVDUMP_STATUS; + +// +// The following structures require that all elements are precisely sized +// and aligned on natural boundaries. +// + +typedef struct +{ + NvP64 address NV_ALIGN_BYTES(8); + NvU32 size; + NvU32 curNumBytes; +} NVDUMP_BUFFER; + +// Max number of bytes that can be returned in a dump buffer +#define NVDUMP_MAX_DUMP_SIZE (1 << 20) // 1 MB + +typedef struct +{ + NvU8 sigHead[sizeof(NVDUMP_CONFIG_SIGNATURE)] NV_ALIGN_BYTES(8); + + NvP64 debuggerControlFuncAddr NV_ALIGN_BYTES(8); + NVDUMP_BUFFER buffer; + NvU32 gpuSelect; + NvU32 component; + NvU32 dumpStatus; + NvU32 rmStatus; + + NvU8 sigTail[sizeof(NVDUMP_CONFIG_SIGNATURE)] NV_ALIGN_BYTES(8); +} NVDUMP_CONFIG; + +typedef struct +{ + NvU16 length; + NvU16 start; + NvU16 end; + NvU16 flags; + NvU16 tag; +} NVDUMP_SUB_ALLOC_HEADER; + +// +// Export is needed to allow remote kernel debuggers without symbols +// to find global NVDUMP_CONFIG variables in driver export address table. +// +#define NVDUMP_EXPORT + +#endif // _NVDUMP_H_ + diff --git a/src/common/sdk/nvidia/inc/nverror.h b/src/common/sdk/nvidia/inc/nverror.h new file mode 100644 index 000000000..0d32daae7 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nverror.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVERROR_H +#define NVERROR_H +/****************************************************************************** +* +* File: nverror.h +* +* Description: +* This file contains the error codes set when the error notifier +* is signaled. +* +******************************************************************************/ + +#define ROBUST_CHANNEL_GR_EXCEPTION (13) +#define ROBUST_CHANNEL_GR_ERROR_SW_NOTIFY (13) +#define ROBUST_CHANNEL_FAKE_ERROR (14) +#define ROBUST_CHANNEL_DISP_MISSED_NOTIFIER (19) +#define ROBUST_CHANNEL_MPEG_ERROR_SW_METHOD (20) +#define ROBUST_CHANNEL_ME_ERROR_SW_METHOD (21) +#define ROBUST_CHANNEL_VP_ERROR_SW_METHOD (22) +#define ROBUST_CHANNEL_RC_LOGGING_ENABLED (23) +#define ROBUST_CHANNEL_VP_ERROR (27) +#define ROBUST_CHANNEL_VP2_ERROR (28) +#define ROBUST_CHANNEL_BSP_ERROR (29) +#define ROBUST_CHANNEL_BAD_ADDR_ACCESS (30) +#define ROBUST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT (31) +#define ROBUST_CHANNEL_PBDMA_ERROR (32) +#define ROBUST_CHANNEL_SEC_ERROR (33) +#define ROBUST_CHANNEL_MSVLD_ERROR (34) +#define ROBUST_CHANNEL_MSPDEC_ERROR (35) +#define ROBUST_CHANNEL_MSPPP_ERROR (36) +#define ROBUST_CHANNEL_CE0_ERROR (39) +#define ROBUST_CHANNEL_CE1_ERROR (40) +#define ROBUST_CHANNEL_CE2_ERROR (41) +#define ROBUST_CHANNEL_VIC_ERROR (42) +#define ROBUST_CHANNEL_RESETCHANNEL_VERIF_ERROR (43) +#define ROBUST_CHANNEL_GR_FAULT_DURING_CTXSW (44) +#define ROBUST_CHANNEL_PREEMPTIVE_REMOVAL (45) +#define ROBUST_CHANNEL_NVENC0_ERROR (47) +#define ROBUST_CHANNEL_GPU_ECC_DBE (48) +#define PMU_ERROR (59) +#define ROBUST_CHANNEL_SEC2_ERROR (60) +#define PMU_BREAKPOINT (61) +#define PMU_HALT_ERROR (62) +#define INFOROM_PAGE_RETIREMENT_EVENT (63) +#define INFOROM_PAGE_RETIREMENT_FAILURE (64) +#define INFOROM_DRAM_RETIREMENT_EVENT (63) +#define INFOROM_DRAM_RETIREMENT_FAILURE (64) +#define ROBUST_CHANNEL_NVENC1_ERROR (65) +#define ROBUST_CHANNEL_NVDEC0_ERROR (68) +#define ROBUST_CHANNEL_GR_CLASS_ERROR (69) +#define ROBUST_CHANNEL_CE3_ERROR (70) +#define ROBUST_CHANNEL_CE4_ERROR (71) +#define ROBUST_CHANNEL_CE5_ERROR (72) +#define ROBUST_CHANNEL_NVENC2_ERROR (73) +#define NVLINK_ERROR (74) +#define ROBUST_CHANNEL_CE6_ERROR (75) +#define ROBUST_CHANNEL_CE7_ERROR (76) +#define ROBUST_CHANNEL_CE8_ERROR (77) +#define VGPU_START_ERROR (78) +#define ROBUST_CHANNEL_GPU_HAS_FALLEN_OFF_THE_BUS (79) +#define PBDMA_PUSHBUFFER_CRC_MISMATCH (80) +#define ROBUST_CHANNEL_VGA_SUBSYSTEM_ERROR (81) +#define ROBUST_CHANNEL_NVJPG0_ERROR (82) +#define ROBUST_CHANNEL_NVDEC1_ERROR (83) +#define ROBUST_CHANNEL_NVDEC2_ERROR (84) +#define ROBUST_CHANNEL_CE9_ERROR (85) +#define ROBUST_CHANNEL_OFA0_ERROR (86) +#define NVTELEMETRY_DRIVER_REPORT (87) +#define ROBUST_CHANNEL_NVDEC3_ERROR (88) +#define ROBUST_CHANNEL_NVDEC4_ERROR (89) +#define LTC_ERROR (90) +#define RESERVED_XID (91) +#define EXCESSIVE_SBE_INTERRUPTS (92) +#define INFOROM_ERASE_LIMIT_EXCEEDED (93) +#define ROBUST_CHANNEL_CONTAINED_ERROR (94) +#define ROBUST_CHANNEL_UNCONTAINED_ERROR (95) +#define GSP_RPC_TIMEOUT (119) +#define GSP_ERROR (120) +#define C2C_ERROR (121) +#define ROBUST_CHANNEL_LAST_ERROR (C2C_ERROR) + + +// Indexed CE reference +#define ROBUST_CHANNEL_CE_ERROR(x) \ + (x < 3 ? ROBUST_CHANNEL_CE0_ERROR + (x) : \ + ((x < 6) ? (ROBUST_CHANNEL_CE3_ERROR + (x - 3)) : \ + ((x < 9) ? (ROBUST_CHANNEL_CE6_ERROR + (x - 6)) : \ + ROBUST_CHANNEL_CE9_ERROR))) + +#define ROBUST_CHANNEL_IS_CE_ERROR(x) \ + ((x == ROBUST_CHANNEL_CE0_ERROR) || (x == ROBUST_CHANNEL_CE1_ERROR) || \ + (x == ROBUST_CHANNEL_CE2_ERROR) || (x == ROBUST_CHANNEL_CE3_ERROR) || \ + (x == ROBUST_CHANNEL_CE4_ERROR) || (x == ROBUST_CHANNEL_CE5_ERROR) || \ + (x == ROBUST_CHANNEL_CE6_ERROR) || (x == ROBUST_CHANNEL_CE7_ERROR) || \ + (x == ROBUST_CHANNEL_CE8_ERROR) || (x == ROBUST_CHANNEL_CE9_ERROR)) + +#define ROBUST_CHANNEL_CE_ERROR_IDX(x) \ + (((x >= ROBUST_CHANNEL_CE0_ERROR) && (x <= ROBUST_CHANNEL_CE2_ERROR)) ? \ + (x - ROBUST_CHANNEL_CE0_ERROR) : \ + (((x >= ROBUST_CHANNEL_CE3_ERROR) && \ + (x <= ROBUST_CHANNEL_CE5_ERROR)) ? \ + (x - ROBUST_CHANNEL_CE3_ERROR) : \ + (((x >= ROBUST_CHANNEL_CE6_ERROR) && \ + (x <= ROBUST_CHANNEL_CE8_ERROR)) ? \ + (x - ROBUST_CHANNEL_CE6_ERROR) : \ + (x - ROBUST_CHANNEL_CE9_ERROR)))) + +// Indexed NVDEC reference +#define ROBUST_CHANNEL_NVDEC_ERROR(x) \ + ((x == 0) ? \ + (ROBUST_CHANNEL_NVDEC0_ERROR) : \ + (((x >= 1) && (x <= 2)) ? (ROBUST_CHANNEL_NVDEC1_ERROR + x - 1) : \ + (ROBUST_CHANNEL_NVDEC3_ERROR + x - 3))) + +#define ROBUST_CHANNEL_IS_NVDEC_ERROR(x) \ + ((x == ROBUST_CHANNEL_NVDEC0_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC1_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC2_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC3_ERROR) || \ + (x == ROBUST_CHANNEL_NVDEC4_ERROR)) + +#define ROBUST_CHANNEL_NVDEC_ERROR_IDX(x) \ + (((x == ROBUST_CHANNEL_NVDEC0_ERROR)) ? \ + (x - ROBUST_CHANNEL_NVDEC0_ERROR) : \ + (((x >= ROBUST_CHANNEL_NVDEC1_ERROR) && \ + (x <= ROBUST_CHANNEL_NVDEC2_ERROR)) ? \ + (x - ROBUST_CHANNEL_NVDEC1_ERROR + 1) : \ + (x - ROBUST_CHANNEL_NVDEC3_ERROR + 3))) + +// Indexed NVENC reference +#define ROBUST_CHANNEL_NVENC_ERROR(x) \ + ((x == 0) ? (ROBUST_CHANNEL_NVENC0_ERROR) : \ + ((x == 1) ? (ROBUST_CHANNEL_NVENC1_ERROR) : \ + (ROBUST_CHANNEL_NVENC2_ERROR))) + +#define ROBUST_CHANNEL_IS_NVENC_ERROR(x) \ + ((x == ROBUST_CHANNEL_NVENC0_ERROR) || \ + (x == ROBUST_CHANNEL_NVENC1_ERROR) || \ + (x == ROBUST_CHANNEL_NVENC2_ERROR)) + +#define ROBUST_CHANNEL_NVENC_ERROR_IDX(x) \ + (((x == ROBUST_CHANNEL_NVENC0_ERROR)) ? \ + (x - ROBUST_CHANNEL_NVENC0_ERROR) : \ + (((x == ROBUST_CHANNEL_NVENC1_ERROR)) ? \ + (x - ROBUST_CHANNEL_NVENC1_ERROR + 1) : \ + (x - ROBUST_CHANNEL_NVENC2_ERROR + 2))) + +// Error Levels +#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_INFO (0) +#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_NON_FATAL (1) +#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_FATAL (2) + +#define ROBUST_CHANNEL_ERROR_STR_PUBLIC_PUBLISHED \ + {"Unknown Error", \ + "DMA Engine Error (FIFO Error 1)", \ + "DMA Engine Error (FIFO Error 2)", \ + "DMA Engine Error (FIFO Error 3)", \ + "DMA Engine Error (FIFO Error 4)", \ + "DMA Engine Error (FIFO Error 5)", \ + "DMA Engine Error (FIFO Error 6)", \ + "DMA Engine Error (FIFO Error 7)", \ + "DMA Engine Error (FIFO Error 8)", \ + "Graphics Engine Error (GR Error 1)", \ + "Graphics Engine Error (GR Error 2)", \ + "Graphics Engine Error (GR Error 3)", \ + "Graphics Engine Error (GR Error 4)", \ + "Graphics Engine Error (GR Exception Error)",\ + "Fake Error", \ + "Display Engine Error (CRTC Error 1)", \ + "Display Engine Error (CRTC Error 2)", \ + "Display Engine Error (CRTC Error 3)", \ + "Bus Interface Error (BIF Error)", \ + "Client Reported Error", \ + "Video Engine Error (MPEG Error)", \ + "Video Engine Error (ME Error)", \ + "Video Engine Error (VP Error 1)", \ + "Error Reporting Enabled", \ + "Graphics Engine Error (GR Error 6)", \ + "Graphics Engine Error (GR Error 7)", \ + "DMA Engine Error (FIFO Error 9)", \ + "Video Engine Error (VP Error 2)", \ + "Video Engine Error (VP2 Error)", \ + "Video Engine Error (BSP Error)", \ + "Access Violation Error (MMU Error 1)", \ + "Access Violation Error (MMU Error 2)", \ + "DMA Engine Error (PBDMA Error)", \ + "Security Engine Error (SEC Error)", \ + "Video Engine Error (MSVLD Error)", \ + "Video Engine Error (MSPDEC Error)", \ + "Video Engine Error (MSPPP Error)", \ + "Graphics Engine Error (FECS Error 1)", \ + "Graphics Engine Error (FECS Error 2)", \ + "DMA Engine Error (CE Error 1)", \ + "DMA Engine Error (CE Error 2)", \ + "DMA Engine Error (CE Error 3)", \ + "Video Engine Error (VIC Error)", \ + "Verification Error", \ + "Access Violation Error (MMU Error 3)", \ + "Operating System Error (OS Error 1)", \ + "Operating System Error (OS Error 2)", \ + "Video Engine Error (MSENC/NVENC0 Error)",\ + "ECC Error (DBE Error)", \ + "Power State Locked", \ + "Power State Event (RC Error)", \ + "Power State Event (Stress Test Error)", \ + "Power State Event (Thermal Event 1)", \ + "Power State Event (Thermal Event 2)", \ + "Power State Event (Power Event)", \ + "Power State Event (Thermal Event 3)", \ + "Display Engine Error (EVO Error)", \ + "FB Interface Error (FBPA Error 1)", \ + "FB Interface Error (FBPA Error 2)", \ + "PMU error", \ + "SEC2 error", \ + "PMU Breakpoint (non-fatal)", \ + "PMU Halt Error", \ + "INFOROM Page Retirement Event", \ + "INFOROM Page Retirement Failure", \ + "Video Engine Error (NVENC1 Error)", \ + "Graphics Engine Error (FECS Error 3)", \ + "Graphics Engine Error (FECS Error 4)", \ + "Video Engine Error (NVDEC0 Error)", \ + "Graphics Engine Error (GR Class Error)",\ + "DMA Engine Error (CE Error 4)", \ + "DMA Engine Error (CE Error 5)", \ + "DMA Engine Error (CE Error 6)", \ + "Video Engine Error (NVENC2 Error)", \ + "NVLink Error", \ + "DMA Engine Error (CE Error 6)", \ + "DMA Engine Error (CE Error 7)", \ + "DMA Engine Error (CE Error 8)", \ + "vGPU device cannot be started", \ + "GPU has fallen off the bus", \ + "DMA Engine Error (Pushbuffer CRC mismatch)",\ + "VGA Subsystem Error", \ + "Video JPEG Engine Error (NVJPG Error)", \ + "Video Engine Error (NVDEC1 Error)", \ + "Video Engine Error (NVDEC2 Error)", \ + "DMA Engine Error (CE Error 9)", \ + "Video OFA Engine Error (OFA0 Error)", \ + "NvTelemetry Driver Reoprt", \ + "Video Engine Error (NVDEC3 Error)", \ + "Video Engine Error (NVDEC4 Error)", \ + "FB Interface Error (FBPA Error 3)", \ + "Reserved Xid", \ + "Excessive SBE interrupts", \ + "INFOROM Erase Limit Exceeded", \ + "Contained error", \ + "Uncontained error" + +#define ROBUST_CHANNEL_ERROR_STR_PUBLIC \ + ROBUST_CHANNEL_ERROR_STR_PUBLIC_PUBLISHED} + +#endif // NVERROR_H diff --git a/src/common/sdk/nvidia/inc/nvfixedtypes.h b/src/common/sdk/nvidia/inc/nvfixedtypes.h new file mode 100644 index 000000000..53b6882ce --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvfixedtypes.h @@ -0,0 +1,379 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVFIXEDTYPES_INCLUDED +#define NVFIXEDTYPES_INCLUDED + +#include "nvtypes.h" + +/*! + * Fixed-point master data types. + * + * These are master-types represent the total number of bits contained within + * the FXP type. All FXP types below should be based on one of these master + * types. + */ +typedef NvS16 NvSFXP16; +typedef NvS32 NvSFXP32; +typedef NvS64 NvSFXP64; +typedef NvU16 NvUFXP16; +typedef NvU32 NvUFXP32; +typedef NvU64 NvUFXP64; + + +/*! + * Fixed-point data types. + * + * These are all integer types with precision indicated in the naming of the + * form: NvFXP_. The actual + * size of the data type is calculated as num_bits_above_radix + + * num_bit_below_radix. + * + * All of these FXP types should be based on one of the master types above. + */ +typedef NvSFXP16 NvSFXP11_5; +typedef NvSFXP16 NvSFXP4_12; +typedef NvSFXP16 NvSFXP8_8; +typedef NvSFXP32 NvSFXP8_24; +typedef NvSFXP32 NvSFXP10_22; +typedef NvSFXP32 NvSFXP16_16; +typedef NvSFXP32 NvSFXP18_14; +typedef NvSFXP32 NvSFXP20_12; +typedef NvSFXP32 NvSFXP24_8; +typedef NvSFXP32 NvSFXP27_5; +typedef NvSFXP32 NvSFXP28_4; +typedef NvSFXP32 NvSFXP29_3; +typedef NvSFXP32 NvSFXP31_1; +typedef NvSFXP64 NvSFXP52_12; + +typedef NvUFXP16 NvUFXP0_16; +typedef NvUFXP16 NvUFXP4_12; +typedef NvUFXP16 NvUFXP8_8; +typedef NvUFXP32 NvUFXP3_29; +typedef NvUFXP32 NvUFXP4_28; +typedef NvUFXP32 NvUFXP7_25; +typedef NvUFXP32 NvUFXP8_24; +typedef NvUFXP32 NvUFXP9_23; +typedef NvUFXP32 NvUFXP10_22; +typedef NvUFXP32 NvUFXP15_17; +typedef NvUFXP32 NvUFXP16_16; +typedef NvUFXP32 NvUFXP18_14; +typedef NvUFXP32 NvUFXP20_12; +typedef NvUFXP32 NvUFXP24_8; +typedef NvUFXP32 NvUFXP25_7; +typedef NvUFXP32 NvUFXP26_6; +typedef NvUFXP32 NvUFXP28_4; + +typedef NvUFXP64 NvUFXP40_24; +typedef NvUFXP64 NvUFXP48_16; +typedef NvUFXP64 NvUFXP52_12; + +/*! + * Utility macros used in converting between signed integers and fixed-point + * notation. + * + * - COMMON - These are used by both signed and unsigned. + */ +#define NV_TYPES_FXP_INTEGER(x, y) ((x)+(y)-1):(y) +#define NV_TYPES_FXP_FRACTIONAL(x, y) ((y)-1):0 +#define NV_TYPES_FXP_FRACTIONAL_MSB(x, y) ((y)-1):((y)-1) +#define NV_TYPES_FXP_FRACTIONAL_MSB_ONE 0x00000001 +#define NV_TYPES_FXP_FRACTIONAL_MSB_ZERO 0x00000000 +#define NV_TYPES_FXP_ZERO (0) + +/*! + * - UNSIGNED - These are only used for unsigned. + */ +#define NV_TYPES_UFXP_INTEGER_MAX(x, y) (~(NVBIT((y))-1U)) +#define NV_TYPES_UFXP_INTEGER_MIN(x, y) (0U) + +/*! + * - SIGNED - These are only used for signed. + */ +#define NV_TYPES_SFXP_INTEGER_SIGN(x, y) ((x)+(y)-1):((x)+(y)-1) +#define NV_TYPES_SFXP_INTEGER_SIGN_NEGATIVE 0x00000001 +#define NV_TYPES_SFXP_INTEGER_SIGN_POSITIVE 0x00000000 +#define NV_TYPES_SFXP_S32_SIGN_EXTENSION(x, y) 31:(x) +#define NV_TYPES_SFXP_S32_SIGN_EXTENSION_POSITIVE(x, y) 0x00000000 +#define NV_TYPES_SFXP_S32_SIGN_EXTENSION_NEGATIVE(x, y) (NVBIT(32-(x))-1U) +#define NV_TYPES_SFXP_INTEGER_MAX(x, y) (NVBIT((x))-1U) +#define NV_TYPES_SFXP_INTEGER_MIN(x, y) (~(NVBIT((x))-1U)) +#define NV_TYPES_SFXP_S64_SIGN_EXTENSION(x, y) 63:(x) +#define NV_TYPES_SFXP_S64_SIGN_EXTENSION_POSITIVE(x, y) 0x0000000000000000 +#define NV_TYPES_SFXP_S64_SIGN_EXTENSION_NEGATIVE(x, y) (NVBIT64(64-(x))-1U) +#define NV_TYPES_SFXP_S64_INTEGER_MAX(x, y) (NVBIT64((x)-1)-1U) +#define NV_TYPES_SFXP_S64_INTEGER_MIN(x, y) (~(NVBIT64((x)-1)-1U)) + +/*! + * Conversion macros used for converting between integer and fixed point + * representations. Both signed and unsigned variants. + * + * Warning: + * Note that most of the macros below can overflow if applied on values that can + * not fit the destination type. It's caller responsibility to ensure that such + * situations will not occur. + * + * Some conversions perform some commonly performed tasks other than just + * bit-shifting: + * + * - _SCALED: + * For integer -> fixed-point we add handling divisors to represent + * non-integer values. + * + * - _ROUNDED: + * For fixed-point -> integer we add rounding to integer values. + */ + +// 32-bit Unsigned FXP: +#define NV_TYPES_U32_TO_UFXP_X_Y(x, y, integer) \ + ((NvUFXP##x##_##y) (((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_U32_TO_UFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvUFXP##x##_##y) ((((((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y))))) / \ + (scale)) + \ + ((((((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y)))) % \ + (scale)) > ((scale) >> 1)) ? 1U : 0U))) + +#define NV_TYPES_UFXP_X_Y_TO_U32(x, y, fxp) \ + ((NvU32) (DRF_VAL(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvUFXP##x##_##y) (fxp))))) + +#define NV_TYPES_UFXP_X_Y_TO_U32_ROUNDED(x, y, fxp) \ + (NV_TYPES_UFXP_X_Y_TO_U32(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvUFXP##x##_##y) (fxp))) ? \ + 1U : 0U)) + +// 64-bit Unsigned FXP +#define NV_TYPES_U64_TO_UFXP_X_Y(x, y, integer) \ + ((NvUFXP##x##_##y) (((NvU64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_U64_TO_UFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvUFXP##x##_##y) (((((NvU64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y)))) + \ + ((scale) >> 1)) / \ + (scale))) + +#define NV_TYPES_UFXP_X_Y_TO_U64(x, y, fxp) \ + ((NvU64) (DRF_VAL64(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvUFXP##x##_##y) (fxp))))) + +#define NV_TYPES_UFXP_X_Y_TO_U64_ROUNDED(x, y, fxp) \ + (NV_TYPES_UFXP_X_Y_TO_U64(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM64(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvUFXP##x##_##y) (fxp))) ? \ + 1U : 0U)) + +// +// 32-bit Signed FXP: +// Some compilers do not support left shift negative values +// so typecast integer to NvU32 instead of NvS32 +// +// Note that there is an issue with the rounding in +// NV_TYPES_S32_TO_SFXP_X_Y_SCALED. In particular, when the signs of the +// numerator and denominator don't match, the rounding is done towards positive +// infinity, rather than away from 0. This will need to be fixed in a follow-up +// change. +// +#define NV_TYPES_S32_TO_SFXP_X_Y(x, y, integer) \ + ((NvSFXP##x##_##y) (((NvU32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_S32_TO_SFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvSFXP##x##_##y) (((((NvS32) (integer)) << \ + DRF_SHIFT(NV_TYPES_FXP_INTEGER((x), (y)))) + \ + ((scale) >> 1)) / \ + (scale))) + +#define NV_TYPES_SFXP_X_Y_TO_S32(x, y, fxp) \ + ((NvS32) ((DRF_VAL(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvSFXP##x##_##y) (fxp)))) | \ + ((DRF_VAL(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_NEGATIVE) ? \ + DRF_NUM(_TYPES, _SFXP, _S32_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S32_SIGN_EXTENSION_NEGATIVE((x), (y))) : \ + DRF_NUM(_TYPES, _SFXP, _S32_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S32_SIGN_EXTENSION_POSITIVE((x), (y)))))) + +/*! + * Note: The rounding action for signed numbers should ideally round away from + * 0 in both the positive and the negative regions. + * For positive numbers, we add 1 if the fractional MSb is 1. + * For negative numbers, we add -1 (equivalent to subtracting 1) if the + * fractional MSb is 1. + */ +#define NV_TYPES_SFXP_X_Y_TO_S32_ROUNDED(x, y, fxp) \ + (NV_TYPES_SFXP_X_Y_TO_S32(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvSFXP##x##_##y) (fxp))) ? \ + ((DRF_VAL(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_POSITIVE) ? 1 : -1) : 0)) + +#define NV_TYPES_SFXP_X_Y_TO_FLOAT32(x, y, fxp) \ + ((NvF32) NV_TYPES_SFXP_X_Y_TO_S32(x, y, (fxp)) + \ + ((NvF32) DRF_NUM(_TYPES, _FXP, _FRACTIONAL((x), (y)), \ + ((NvSFXP##x##_##y) (fxp))) / (NvF32) (1 << (y)))) + +// +// 64-bit Signed FXP: +// Some compilers do not support left shift negative values +// so typecast integer to NvU64 instead of NvS64 +// +// Note that there is an issue with the rounding in +// NV_TYPES_S64_TO_SFXP_X_Y_SCALED. In particular, when the signs of the +// numerator and denominator don't match, the rounding is done towards positive +// infinity, rather than away from 0. This will need to be fixed in a follow-up +// change. +// +#define NV_TYPES_S64_TO_SFXP_X_Y(x, y, integer) \ + ((NvSFXP##x##_##y) (((NvU64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y))))) + +#define NV_TYPES_S64_TO_SFXP_X_Y_SCALED(x, y, integer, scale) \ + ((NvSFXP##x##_##y) (((((NvS64) (integer)) << \ + DRF_SHIFT64(NV_TYPES_FXP_INTEGER((x), (y)))) + \ + ((scale) >> 1)) / \ + (scale))) + +#define NV_TYPES_SFXP_X_Y_TO_S64(x, y, fxp) \ + ((NvS64) ((DRF_VAL64(_TYPES, _FXP, _INTEGER((x), (y)), \ + ((NvSFXP##x##_##y) (fxp)))) | \ + ((DRF_VAL64(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_NEGATIVE) ? \ + DRF_NUM64(_TYPES, _SFXP, _S64_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S64_SIGN_EXTENSION_NEGATIVE((x), (y))) : \ + DRF_NUM64(_TYPES, _SFXP, _S64_SIGN_EXTENSION((x), (y)), \ + NV_TYPES_SFXP_S64_SIGN_EXTENSION_POSITIVE((x), (y)))))) + +/*! + * Note: The rounding action for signed numbers should ideally round away from + * 0 in both the positive and the negative regions. + * For positive numbers, we add 1 if the fractional MSb is 1. + * For negative numbers, we add -1 (equivalent to subtracting 1) if the + * fractional MSb is 1. + */ +#define NV_TYPES_SFXP_X_Y_TO_S64_ROUNDED(x, y, fxp) \ + (NV_TYPES_SFXP_X_Y_TO_S64(x, y, (fxp)) + \ + (FLD_TEST_DRF_NUM64(_TYPES, _FXP, _FRACTIONAL_MSB((x), (y)), \ + NV_TYPES_FXP_FRACTIONAL_MSB_ONE, ((NvSFXP##x##_##y) (fxp))) ? \ + ((DRF_VAL64(_TYPES, _SFXP, _INTEGER_SIGN((x), (y)), (fxp)) == \ + NV_TYPES_SFXP_INTEGER_SIGN_POSITIVE) ? 1 : -1) : 0)) + +/*! + * Macros representing the single-precision IEEE 754 floating point format for + * "binary32", also known as "single" and "float". + * + * Single precision floating point format wiki [1] + * + * _SIGN + * Single bit representing the sign of the number. + * _EXPONENT + * Unsigned 8-bit number representing the exponent value by which to scale + * the mantissa. + * _BIAS - The value by which to offset the exponent to account for sign. + * _MANTISSA + * Explicit 23-bit significand of the value. When exponent != 0, this is an + * implicitly 24-bit number with a leading 1 prepended. This 24-bit number + * can be conceptualized as FXP 9.23. + * + * With these definitions, the value of a floating point number can be + * calculated as: + * (-1)^(_SIGN) * + * 2^(_EXPONENT - _EXPONENT_BIAS) * + * (1 + _MANTISSA / (1 << 23)) + */ +// [1] : http://en.wikipedia.org/wiki/Single_precision_floating-point_format +#define NV_TYPES_SINGLE_SIGN 31:31 +#define NV_TYPES_SINGLE_SIGN_POSITIVE 0x00000000 +#define NV_TYPES_SINGLE_SIGN_NEGATIVE 0x00000001 +#define NV_TYPES_SINGLE_EXPONENT 30:23 +#define NV_TYPES_SINGLE_EXPONENT_ZERO 0x00000000 +#define NV_TYPES_SINGLE_EXPONENT_BIAS 0x0000007F +#define NV_TYPES_SINGLE_MANTISSA 22:0 + + +/*! + * Helper macro to return a IEEE 754 single-precision value's mantissa as an + * unsigned FXP 9.23 value. + * + * @param[in] single IEEE 754 single-precision value to manipulate. + * + * @return IEEE 754 single-precision values mantissa represented as an unsigned + * FXP 9.23 value. + */ +#define NV_TYPES_SINGLE_MANTISSA_TO_UFXP9_23(single) \ + ((NvUFXP9_23)(FLD_TEST_DRF(_TYPES, _SINGLE, _EXPONENT, _ZERO, single) ? \ + NV_TYPES_U32_TO_UFXP_X_Y(9, 23, 0) : \ + (NV_TYPES_U32_TO_UFXP_X_Y(9, 23, 1) + \ + DRF_VAL(_TYPES, _SINGLE, _MANTISSA, single)))) + +/*! + * Helper macro to return an IEEE 754 single-precision value's exponent, + * including the bias. + * + * @param[in] single IEEE 754 single-precision value to manipulate. + * + * @return Signed exponent value for IEEE 754 single-precision. + */ +#define NV_TYPES_SINGLE_EXPONENT_BIASED(single) \ + ((NvS32)(DRF_VAL(_TYPES, _SINGLE, _EXPONENT, single) - \ + NV_TYPES_SINGLE_EXPONENT_BIAS)) + +/*! + * NvTemp - temperature data type introduced to avoid bugs in conversion between + * various existing notations. + */ +typedef NvSFXP24_8 NvTemp; + +/*! + * Macros for NvType <-> Celsius temperature conversion. + */ +#define NV_TYPES_CELSIUS_TO_NV_TEMP(cel) \ + NV_TYPES_S32_TO_SFXP_X_Y(24,8,(cel)) +#define NV_TYPES_NV_TEMP_TO_CELSIUS_TRUNCED(nvt) \ + NV_TYPES_SFXP_X_Y_TO_S32(24,8,(nvt)) +#define NV_TYPES_NV_TEMP_TO_CELSIUS_ROUNDED(nvt) \ + NV_TYPES_SFXP_X_Y_TO_S32_ROUNDED(24,8,(nvt)) +#define NV_TYPES_NV_TEMP_TO_CELSIUS_FLOAT(nvt) \ + NV_TYPES_SFXP_X_Y_TO_FLOAT32(24,8,(nvt)) + +/*! + * Macro for NvType -> number of bits conversion + */ +#define NV_NBITS_IN_TYPE(type) (8 * sizeof(type)) + +/*! + * Macro to convert SFXP 11.5 to NvTemp. + */ +#define NV_TYPES_NVSFXP11_5_TO_NV_TEMP(x) ((NvTemp)(x) << 3) + +/*! + * Macro to convert UFXP11.5 Watts to NvU32 milli-Watts. + */ +#define NV_TYPES_NVUFXP11_5_WATTS_TO_NVU32_MILLI_WATTS(x) ((((NvU32)(x)) * ((NvU32)1000)) >> 5) + +#endif /* NVFIXEDTYPES_INCLUDED */ diff --git a/src/common/sdk/nvidia/inc/nvgputypes.h b/src/common/sdk/nvidia/inc/nvgputypes.h new file mode 100644 index 000000000..59ba45b3d --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvgputypes.h @@ -0,0 +1,179 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2006 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + /***************************************************************************\ +|* *| +|* NV GPU Types *| +|* *| +|* This header contains definitions describing NVIDIA's GPU hardware state. *| +|* *| + \***************************************************************************/ + + +#ifndef NVGPUTYPES_INCLUDED +#define NVGPUTYPES_INCLUDED +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + + /***************************************************************************\ +|* NvNotification *| + \***************************************************************************/ + +/***** NvNotification Structure *****/ +/* + * NV objects return information about method completion to clients via an + * array of notification structures in main memory. + * + * The client sets the status field to NV???_NOTIFICATION_STATUS_IN_PROGRESS. + * NV fills in the NvNotification[] data structure in the following order: + * timeStamp, otherInfo32, otherInfo16, and then status. + */ + +/* memory data structures */ +typedef volatile struct NvNotificationRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvV32 info32; /* info returned depends on method 0008-000b*/ + NvV16 info16; /* info returned depends on method 000c-000d*/ + NvV16 status; /* user sets bit 15, NV sets status 000e-000f*/ +} NvNotification; + + /***************************************************************************\ +|* NvGpuSemaphore *| + \***************************************************************************/ + +/***** NvGpuSemaphore Structure *****/ +/* + * NvGpuSemaphore objects are used by the GPU to synchronize multiple + * command-streams. + * + * Please refer to class documentation for details regarding the content of + * the data[] field. + */ + +/* memory data structures */ +typedef volatile struct NvGpuSemaphoreRec { + NvV32 data[2]; /* Payload/Report data 0000-0007*/ + struct { /* 0008- */ + NvV32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 8- f*/ + } timeStamp; /* -000f*/ +} NvGpuSemaphore; + + /***************************************************************************\ +|* NvGetReport *| + \***************************************************************************/ + +/* + * NV objects, starting with Kelvin, return information such as pixel counts to + * the user via the NV*_GET_REPORT method. + * + * The client fills in the "zero" field to any nonzero value and waits until it + * becomes zero. NV fills in the timeStamp, value, and zero fields. + */ +typedef volatile struct NVGetReportRec { + struct { /* 0000- */ + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 value; /* info returned depends on method 0008-000b*/ + NvU32 zero; /* always written to zero 000c-000f*/ +} NvGetReport; + + /***************************************************************************\ +|* NvRcNotification *| + \***************************************************************************/ + +/* + * NV robust channel notification information is reported to clients via + * standard NV01_EVENT objects bound to instance of the NV*_CHANNEL_DMA and + * NV*_CHANNEL_GPFIFO objects. + */ +typedef struct NvRcNotificationRec { + struct { + NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/ + } timeStamp; /* -0007*/ + NvU32 exceptLevel; /* exception level 000c-000f*/ + NvU32 exceptType; /* exception type 0010-0013*/ +} NvRcNotification; + + /***************************************************************************\ +|* NvSyncPointFence *| + \***************************************************************************/ + +/***** NvSyncPointFence Structure *****/ +/* + * NvSyncPointFence objects represent a syncpoint event. The syncPointID + * identifies the syncpoint register and the value is the value that the + * register will contain right after the event occurs. + * + * If syncPointID contains NV_INVALID_SYNCPOINT_ID then this is an invalid + * event. This is often used to indicate an event in the past (i.e. no need to + * wait). + * + * For more info on syncpoints refer to Mobile channel and syncpoint + * documentation. + */ +typedef struct NvSyncPointFenceRec { + NvU32 syncPointID; + NvU32 value; +} NvSyncPointFence; + +#define NV_INVALID_SYNCPOINT_ID ((NvU32)-1) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +#if !defined(XAPIGEN) /* NvOffset is XAPIGEN builtin type, so skip typedef */ +typedef NvU64 NvOffset; /* GPU address */ +#endif + +#define NvOffset_HI32(n) ((NvU32)(((NvU64)(n)) >> 32)) +#define NvOffset_LO32(n) ((NvU32)((NvU64)(n))) + +/* +* There are two types of GPU-UUIDs available: +* +* (1) a SHA-256 based 32 byte ID, formatted as a 64 character +* hexadecimal string as "GPU-%16x-%08x-%08x-%08x-%024x"; this is +* deprecated. +* +* (2) a SHA-1 based 16 byte ID, formatted as a 32 character +* hexadecimal string as "GPU-%08x-%04x-%04x-%04x-%012x" (the +* canonical format of a UUID); this is the default. +*/ +#define NV_GPU_UUID_SHA1_LEN (16) +#define NV_GPU_UUID_SHA256_LEN (32) +#define NV_GPU_UUID_LEN NV_GPU_UUID_SHA1_LEN + +#ifdef __cplusplus +}; +#endif + +#endif /* NVGPUTYPES_INCLUDED */ diff --git a/src/common/sdk/nvidia/inc/nvi2c.h b/src/common/sdk/nvidia/inc/nvi2c.h new file mode 100644 index 000000000..28c1ba52f --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvi2c.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_I2C_H_ +#define _NV_I2C_H_ + +#define NV_I2C_MSG_WR 0x0000 +#define NV_I2C_MSG_RD 0x0001 + +typedef struct nv_i2c_msg_s +{ + NvU16 addr; + NvU16 flags; + NvU16 len; + NvU8* buf; +} nv_i2c_msg_t; + +#endif diff --git a/src/common/sdk/nvidia/inc/nvimpshared.h b/src/common/sdk/nvidia/inc/nvimpshared.h new file mode 100644 index 000000000..eb4dc7258 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvimpshared.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************************************************************\ +* * +* Description: * +* Accommodates sharing of IMP-related structures between kernel interface * +* files and core RM. * +* * +\******************************************************************************/ + +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvimpshared.finn +// + + + + +// +// There are only a small number of discrete dramclk frequencies available on +// the system. This structure contains IMP-relevant information associated +// with a specific dramclk frequency. +// +typedef struct DRAM_CLK_INSTANCE { + NvU32 dram_clk_freq_khz; + + NvU32 mchub_clk_khz; + + NvU32 mc_clk_khz; + + NvU32 max_iso_bw_kbps; + + // + // switch_latency_ns is the maximum time required to switch the dramclk + // frequency to the frequency specified in dram_clk_freq_khz. + // + NvU32 switch_latency_ns; +} DRAM_CLK_INSTANCE; + +// +// This table is used to collect information from other modules that is needed +// for RM IMP calculations. (Used on Tegra only.) +// +#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_UNKNOWN 0U +#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR4 1U +#define TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR5 2U + +typedef struct TEGRA_IMP_IMPORT_DATA { + // + // max_iso_bw_kbps stores the maximum possible ISO bandwidth available to + // display, assuming display is the only active ISO client. (Note that ISO + // bandwidth will typically be allocated to multiple clients, so display + // will generally not have access to the maximum possible bandwidth.) + // + NvU32 max_iso_bw_kbps; + + NvU32 dram_type; + // On Orin, each dram channel is 16 bits wide. + NvU32 num_dram_channels; + + // + // dram_clk_instance stores entries for all possible dramclk frequencies, + // sorted by dramclk frequency in increasing order. + // + // "24" is expected to be larger than the actual number of required entries + // (which is provided by a BPMP API), but it can be increased if necessary. + // + // num_dram_clk_entries is filled in with the actual number of distinct + // dramclk entries. + // + NvU32 num_dram_clk_entries; + DRAM_CLK_INSTANCE dram_clk_instance[24]; +} TEGRA_IMP_IMPORT_DATA; diff --git a/src/common/sdk/nvidia/inc/nvlimits.h b/src/common/sdk/nvidia/inc/nvlimits.h new file mode 100644 index 000000000..a896adf00 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvlimits.h @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: nvlimits.finn +// + + + + +/* + * This is the maximum number of GPUs supported in a single system. + */ +#define NV_MAX_DEVICES 32 + +/* + * This is the maximum number of subdevices within a single device. + */ +#define NV_MAX_SUBDEVICES 8 + +/* + * This is the maximum length of the process name string. + */ +#define NV_PROC_NAME_MAX_LENGTH 100U + +/* + * This is the maximum number of heads per GPU. + */ +#define NV_MAX_HEADS 4 diff --git a/src/common/sdk/nvidia/inc/nvmisc.h b/src/common/sdk/nvidia/inc/nvmisc.h new file mode 100644 index 000000000..210e23798 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvmisc.h @@ -0,0 +1,915 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * nvmisc.h + */ +#ifndef __NV_MISC_H +#define __NV_MISC_H + +#ifdef __cplusplus +extern "C" { +#endif //__cplusplus + +#include "nvtypes.h" + +#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS) +// +// Miscellaneous macros useful for bit field manipulations +// +// STUPID HACK FOR CL 19434692. Will revert when fix CL is delivered bfm -> chips_a. +#ifndef BIT +#define BIT(b) (1U<<(b)) +#endif +#ifndef BIT32 +#define BIT32(b) ((NvU32)1U<<(b)) +#endif +#ifndef BIT64 +#define BIT64(b) ((NvU64)1U<<(b)) +#endif + +#endif + +// +// It is recommended to use the following bit macros to avoid macro name +// collisions with other src code bases. +// +#ifndef NVBIT +#define NVBIT(b) (1U<<(b)) +#endif +#ifndef NVBIT_TYPE +#define NVBIT_TYPE(b, t) (((t)1U)<<(b)) +#endif +#ifndef NVBIT32 +#define NVBIT32(b) NVBIT_TYPE(b, NvU32) +#endif +#ifndef NVBIT64 +#define NVBIT64(b) NVBIT_TYPE(b, NvU64) +#endif + +// Helper macro's for 32 bit bitmasks +#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3) +#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5) +#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F)) +#define NV_BITMASK32_SET(pChannelMask, chId) \ + (pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId)) +#define NV_BITMASK32_GET(pChannelMask, chId) \ + ((pChannelMask)[NV_BITMASK32_IDX(chId)] & NVBIT(NV_BITMASK32_OFFSET(chId))) + + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-31. +#define BIT_IDX_32(n) \ + (((((n) & 0xFFFF0000U) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00U) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0U) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCU) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAU) != 0U) ? 0x01U: 0U) ) + +// Index of the 'on' bit (assuming that there is only one). +// Even if multiple bits are 'on', result is in range of 0-63. +#define BIT_IDX_64(n) \ + (((((n) & 0xFFFFFFFF00000000ULL) != 0U) ? 0x20U: 0U) | \ + ((((n) & 0xFFFF0000FFFF0000ULL) != 0U) ? 0x10U: 0U) | \ + ((((n) & 0xFF00FF00FF00FF00ULL) != 0U) ? 0x08U: 0U) | \ + ((((n) & 0xF0F0F0F0F0F0F0F0ULL) != 0U) ? 0x04U: 0U) | \ + ((((n) & 0xCCCCCCCCCCCCCCCCULL) != 0U) ? 0x02U: 0U) | \ + ((((n) & 0xAAAAAAAAAAAAAAAAULL) != 0U) ? 0x01U: 0U) ) + +/*! + * DRF MACRO README: + * + * Glossary: + * DRF: Device, Register, Field + * FLD: Field + * REF: Reference + * + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA 0xDEADBEEF + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_GAMMA 27:0 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA 31:28 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ZERO 0x00000000 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ONE 0x00000001 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_TWO 0x00000002 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_THREE 0x00000003 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FOUR 0x00000004 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FIVE 0x00000005 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SIX 0x00000006 + * #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SEVEN 0x00000007 + * + * + * Device = _DEVICE_OMEGA + * This is the common "base" that a group of registers in a manual share + * + * Register = _REGISTER_ALPHA + * Register for a given block of defines is the common root for one or more fields and constants + * + * Field(s) = _FIELD_GAMMA, _FIELD_ZETA + * These are the bit ranges for a given field within the register + * Fields are not required to have defined constant values (enumerations) + * + * Constant(s) = _ZERO, _ONE, _TWO, ... + * These are named values (enums) a field can contain; the width of the constants should not be larger than the field width + * + * MACROS: + * + * DRF_SHIFT: + * Bit index of the lower bound of a field + * DRF_SHIFT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 28 + * + * DRF_SHIFT_RT: + * Bit index of the higher bound of a field + * DRF_SHIFT_RT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 31 + * + * DRF_MASK: + * Produces a mask of 1-s equal to the width of a field + * DRF_MASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF (four 1s starting at bit 0) + * + * DRF_SHIFTMASK: + * Produces a mask of 1s equal to the width of a field at the location of the field + * DRF_SHIFTMASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF0000000 + * + * DRF_DEF: + * Shifts a field constant's value to the correct field offset + * DRF_DEF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE) == 0x30000000 + * + * DRF_NUM: + * Shifts a number to the location of a particular field + * DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 3) == 0x30000000 + * NOTE: If the value passed in is wider than the field, the value's high bits will be truncated + * + * DRF_SIZE: + * Provides the width of the field in bits + * DRF_SIZE(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 4 + * + * DRF_VAL: + * Provides the value of an input within the field specified + * DRF_VAL(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xABCD1234) == 0xA + * This is sort of like the inverse of DRF_NUM + * + * DRF_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_SET_DRF: + * Set the field bits in a given value with the given field constant + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, x); + * x == 0x30001234; + * + * FLD_SET_DRF_NUM: + * Same as FLD_SET_DRF but instead of using a field constant a literal/variable is passed in + * NvU32 x = 0x00001234; + * x = FLD_SET_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_IDX...: + * These macros are similar to the above but for fields that accept an index argumment + * + * FLD_TEST_DRF: + * Test if location specified by drf in 'v' has the same value as NV_drfc + * FLD_TEST_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * FLD_TEST_DRF_NUM: + * Test if locations specified by drf in 'v' have the same value as n + * FLD_TEST_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0x3, 0x3000ABCD) == NV_TRUE + * + * REF_DEF: + * Like DRF_DEF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_DEF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE) == 0x30000000 + * + * REF_VAL: + * Like DRF_VAL but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_VAL(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xABCD1234) == 0xA + * + * REF_NUM: + * Like DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xA) == 0xA00000000 + * + * FLD_SET_REF_NUM: + * Like FLD_SET_DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * NvU32 x = 0x00001234; + * x = FLD_SET_REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xF, x); + * x == 0xF0001234; + * + * FLD_TEST_REF: + * Like FLD_TEST_DRF but maintains full symbol name (use in cases where "NV" is not prefixed to the field) + * FLD_TEST_REF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE + * + * Other macros: + * There a plethora of other macros below that extend the above (notably Multi-Word (MW), 64-bit, and some + * reg read/write variations). I hope these are self explanatory. If you have a need to use them, you + * probably have some knowledge of how they work. + */ + +// tegra mobile uses nvmisc_macros.h and can't access nvmisc.h... and sometimes both get included. +#ifndef _NVMISC_MACROS_H +// Use Coverity Annotation to mark issues as false positives/ignore when using single bit defines. +#define DRF_ISBIT(bitval,drf) \ + ( /* coverity[identical_branches] */ \ + (bitval != 0) ? drf ) +#define DEVICE_BASE(d) (0?d) // what's up with this name? totally non-parallel to the macros below +#define DEVICE_EXTENT(d) (1?d) // what's up with this name? totally non-parallel to the macros below +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#ifdef MISRA_14_3 +#define DRF_BASE(drf) (drf##_LOW_FIELD) +#define DRF_EXTENT(drf) (drf##_HIGH_FIELD) +#define DRF_SHIFT(drf) ((drf##_LOW_FIELD) % 32U) +#define DRF_SHIFT_RT(drf) ((drf##_HIGH_FIELD) % 32U) +#define DRF_MASK(drf) (0xFFFFFFFFU >> (31U - ((drf##_HIGH_FIELD) % 32U) + ((drf##_LOW_FIELD) % 32U))) +#else +#define DRF_BASE(drf) (NV_FALSE?drf) // much better +#define DRF_EXTENT(drf) (NV_TRUE?drf) // much better +#define DRF_SHIFT(drf) (((NvU32)DRF_BASE(drf)) % 32U) +#define DRF_SHIFT_RT(drf) (((NvU32)DRF_EXTENT(drf)) % 32U) +#define DRF_MASK(drf) (0xFFFFFFFFU>>(31U - DRF_SHIFT_RT(drf) + DRF_SHIFT(drf))) +#endif +#define DRF_DEF(d,r,f,c) (((NvU32)(NV ## d ## r ## f ## c))<>(31-((DRF_ISBIT(1,drf)) % 32)+((DRF_ISBIT(0,drf)) % 32))) +#define DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV ## d ## r ## f))&DRF_MASK(NV ## d ## r ## f)) +#endif + +// Signed version of DRF_VAL, which takes care of extending sign bit. +#define DRF_VAL_SIGNED(d,r,f,v) (((DRF_VAL(d,r,f,(v)) ^ (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U)))) - (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U))) +#define DRF_IDX_DEF(d,r,f,i,c) ((NV ## d ## r ## f ## c)<>DRF_SHIFT(NV##d##r##f(i)))&DRF_MASK(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL(d,r,f,i,o,v) (((v)>>DRF_SHIFT(NV##d##r##f(i,o)))&DRF_MASK(NV##d##r##f(i,o))) +// Fractional version of DRF_VAL which reads Fx.y fixed point number (x.y)*z +#define DRF_VAL_FRAC(d,r,x,y,v,z) ((DRF_VAL(d,r,x,(v))*z) + ((DRF_VAL(d,r,y,v)*z) / (1<>(63-((DRF_ISBIT(1,drf)) % 64)+((DRF_ISBIT(0,drf)) % 64))) +#define DRF_SHIFTMASK64(drf) (DRF_MASK64(drf)<<(DRF_SHIFT64(drf))) + +#define DRF_DEF64(d,r,f,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV ## d ## r ## f))&DRF_MASK64(NV ## d ## r ## f)) + +#define DRF_VAL_SIGNED64(d,r,f,v) (((DRF_VAL64(d,r,f,(v)) ^ (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1)))) - (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1))) +#define DRF_IDX_DEF64(d,r,f,i,c) (((NvU64)(NV ## d ## r ## f ## c))<>DRF_SHIFT64(NV##d##r##f(i)))&DRF_MASK64(NV##d##r##f(i))) +#define DRF_IDX_OFFSET_VAL64(d,r,f,i,o,v) (((NvU64)(v)>>DRF_SHIFT64(NV##d##r##f(i,o)))&DRF_MASK64(NV##d##r##f(i,o))) + +#define FLD_SET_DRF64(d,r,f,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c)) +#define FLD_SET_DRF_NUM64(d,r,f,n,v) ((((NvU64)(v)) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_NUM64(d,r,f,n)) +#define FLD_IDX_SET_DRF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_OFFSET_SET_DRF64(d,r,f,i,o,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF64(d,r,f,i,o,c)) +#define FLD_IDX_SET_DRF_DEF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c)) +#define FLD_IDX_SET_DRF_NUM64(d,r,f,i,n,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_NUM64(d,r,f,i,n)) +#define FLD_SET_DRF_IDX64(d,r,f,c,i,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c(i))) + +#define FLD_TEST_DRF64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) == NV##d##r##f##c) +#define FLD_TEST_DRF_AND64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) & NV##d##r##f##c) +#define FLD_TEST_DRF_NUM64(d,r,f,n,v) (DRF_VAL64(d, r, f, (v)) == (n)) +#define FLD_IDX_TEST_DRF64(d,r,f,i,c,v) (DRF_IDX_VAL64(d, r, f, i, (v)) == NV##d##r##f##c) +#define FLD_IDX_OFFSET_TEST_DRF64(d,r,f,i,o,c,v) (DRF_IDX_OFFSET_VAL64(d, r, f, i, o, (v)) == NV##d##r##f##c) + +#define REF_DEF64(drf,d) (((drf ## d)&DRF_MASK64(drf))<>DRF_SHIFT64(drf))&DRF_MASK64(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM64(drf,n) (((NvU64)(n)&(0xFFFFFFFFFFFFFFFFU>>(63U-((drf##_HIGH_FIELD) % 63U)+((drf##_LOW_FIELD) % 63U)))) << ((drf##_LOW_FIELD) % 63U)) +#else +#define REF_NUM64(drf,n) (((NvU64)(n)&DRF_MASK64(drf))<>DRF_SHIFT(drf))&DRF_MASK(drf)) +#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3) +#define REF_NUM(drf,n) (((n)&(0xFFFFFFFFU>>(31U-((drf##_HIGH_FIELD) % 32U)+((drf##_LOW_FIELD) % 32U)))) << ((drf##_LOW_FIELD) % 32U)) +#else +#define REF_NUM(drf,n) (((n)&DRF_MASK(drf))<>DRF_SHIFT(CR ## d ## r ## f))&DRF_MASK(CR ## d ## r ## f)) + +// Multi-word (MW) field manipulations. For multi-word structures (e.g., Fermi SPH), +// fields may have bit numbers beyond 32. To avoid errors using "classic" multi-word macros, +// all the field extents are defined as "MW(X)". For example, MW(127:96) means +// the field is in bits 0-31 of word number 3 of the structure. +// +// DRF_VAL_MW() macro is meant to be used for native endian 32-bit aligned 32-bit word data, +// not for byte stream data. +// +// DRF_VAL_BS() macro is for byte stream data used in fbQueryBIOS_XXX(). +// +#define DRF_EXPAND_MW(drf) drf // used to turn "MW(a:b)" into "a:b" +#define DRF_PICK_MW(drf,v) ((v)? DRF_EXPAND_##drf) // picks low or high bits +#define DRF_WORD_MW(drf) (DRF_PICK_MW(drf,0)/32) // which word in a multi-word array +#define DRF_BASE_MW(drf) (DRF_PICK_MW(drf,0)%32) // which start bit in the selected word? +#define DRF_EXTENT_MW(drf) (DRF_PICK_MW(drf,1)%32) // which end bit in the selected word +#define DRF_SHIFT_MW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_MASK_MW(drf) (0xFFFFFFFFU>>((31-(DRF_EXTENT_MW(drf))+(DRF_BASE_MW(drf)))%32)) +#define DRF_SHIFTMASK_MW(drf) ((DRF_MASK_MW(drf))<<(DRF_SHIFT_MW(drf))) +#define DRF_SIZE_MW(drf) (DRF_EXTENT_MW(drf)-DRF_BASE_MW(drf)+1) + +#define DRF_DEF_MW(d,r,f,c) ((NV##d##r##f##c) << DRF_SHIFT_MW(NV##d##r##f)) +#define DRF_NUM_MW(d,r,f,n) (((n)&DRF_MASK_MW(NV##d##r##f))<>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_SPANS(drf) ((DRF_PICK_MW(drf,0)/32) != (DRF_PICK_MW(drf,1)/32)) +#define DRF_WORD_MW_LOW(drf) (DRF_PICK_MW(drf,0)/32) +#define DRF_WORD_MW_HIGH(drf) (DRF_PICK_MW(drf,1)/32) +#define DRF_MASK_MW_LOW(drf) (0xFFFFFFFFU) +#define DRF_MASK_MW_HIGH(drf) (0xFFFFFFFFU>>(31-(DRF_EXTENT_MW(drf)))) +#define DRF_SHIFT_MW_LOW(drf) (DRF_PICK_MW(drf,0)%32) +#define DRF_SHIFT_MW_HIGH(drf) (0) +#define DRF_MERGE_SHIFT(drf) ((32-((DRF_PICK_MW(drf,0)%32)))%32) +#define DRF_VAL_MW_2WORD(d,r,f,v) (((((v)[DRF_WORD_MW_LOW(NV##d##r##f)])>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((((v)[DRF_WORD_MW_HIGH(NV##d##r##f)])>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) +#define DRF_VAL_MW(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_MW_2WORD(d,r,f,v) : DRF_VAL_MW_1WORD(d,r,f,v) ) + +#define DRF_IDX_DEF_MW(d,r,f,i,c) ((NV##d##r##f##c)<>DRF_SHIFT_MW(NV##d##r##f(i)))&DRF_MASK_MW(NV##d##r##f(i))) + +// +// Logically OR all DRF_DEF constants indexed from zero to s (semiinclusive). +// Caution: Target variable v must be pre-initialized. +// +#define FLD_IDX_OR_DRF_DEF(d,r,f,c,s,v) \ +do \ +{ NvU32 idx; \ + for (idx = 0; idx < (NV ## d ## r ## f ## s); ++idx)\ + { \ + v |= DRF_IDX_DEF(d,r,f,idx,c); \ + } \ +} while(0) + + +#define FLD_MERGE_MW(drf,n,v) (((v)[DRF_WORD_MW(drf)] & ~DRF_SHIFTMASK_MW(drf)) | n) +#define FLD_ASSIGN_MW(drf,n,v) ((v)[DRF_WORD_MW(drf)] = FLD_MERGE_MW(drf, n, v)) +#define FLD_IDX_MERGE_MW(drf,i,n,v) (((v)[DRF_WORD_MW(drf(i))] & ~DRF_SHIFTMASK_MW(drf(i))) | n) +#define FLD_IDX_ASSIGN_MW(drf,i,n,v) ((v)[DRF_WORD_MW(drf(i))] = FLD_MERGE_MW(drf(i), n, v)) + +#define FLD_SET_DRF_MW(d,r,f,c,v) FLD_MERGE_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_SET_DRF_NUM_MW(d,r,f,n,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_NUM_MW(d,r,f,n), v) +#define FLD_SET_DRF_DEF_MW(d,r,f,c,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v) +#define FLD_IDX_SET_DRF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_DEF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v) +#define FLD_IDX_SET_DRF_NUM_MW(d,r,f,i,n,v) FLD_IDX_ASSIGN_MW(NV##d##r##f, i, DRF_IDX_NUM_MW(d,r,f,i,n), v) + +#define FLD_TEST_DRF_MW(d,r,f,c,v) ((DRF_VAL_MW(d, r, f, (v)) == NV##d##r##f##c)) +#define FLD_TEST_DRF_NUM_MW(d,r,f,n,v) ((DRF_VAL_MW(d, r, f, (v)) == n)) +#define FLD_IDX_TEST_DRF_MW(d,r,f,i,c,v) ((DRF_IDX_VAL_MW(d, r, f, i, (v)) == NV##d##r##f##c)) + +#define DRF_VAL_BS(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_BS_2WORD(d,r,f,(v)) : DRF_VAL_BS_1WORD(d,r,f,(v)) ) + +//------------------------------------------------------------------------// +// // +// Common defines for engine register reference wrappers // +// // +// New engine addressing can be created like: // +// \#define ENG_REG_PMC(o,d,r) NV##d##r // +// \#define ENG_IDX_REG_CE(o,d,i,r) CE_MAP(o,r,i) // +// // +// See FB_FBPA* for more examples // +//------------------------------------------------------------------------// + +#define ENG_RD_REG(g,o,d,r) GPU_REG_RD32(g, ENG_REG##d(o,d,r)) +#define ENG_WR_REG(g,o,d,r,v) GPU_REG_WR32(g, ENG_REG##d(o,d,r), (v)) +#define ENG_RD_DRF(g,o,d,r,f) ((GPU_REG_RD32(g, ENG_REG##d(o,d,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_WR_DRF_DEF(g,o,d,r,f,c) GPU_REG_WR32(g, ENG_REG##d(o,d,r),(GPU_REG_RD32(g,ENG_REG##d(o,d,r))&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define ENG_TEST_IDX_DRF_DEF(g,o,d,r,f,c,i) (ENG_RD_IDX_DRF(g, o, d, r, f, (i)) == NV##d##r##f##c) + +#define ENG_IDX_RD_REG(g,o,d,i,r) GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r)) +#define ENG_IDX_WR_REG(g,o,d,i,r,v) GPU_REG_WR32(g, ENG_IDX_REG##d(o,d,i,r), (v)) + +#define ENG_IDX_RD_DRF(g,o,d,i,r,f) ((GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +// +// DRF_READ_1WORD_BS() and DRF_READ_1WORD_BS_HIGH() do not read beyond the bytes that contain +// the requested value. Reading beyond the actual data causes a page fault panic when the +// immediately following page happened to be protected or not mapped. +// +#define DRF_VAL_BS_1WORD(d,r,f,v) ((DRF_READ_1WORD_BS(d,r,f,v)>>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f)) +#define DRF_VAL_BS_2WORD(d,r,f,v) (((DRF_READ_4BYTE_BS(NV##d##r##f,v)>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \ + (((DRF_READ_1WORD_BS_HIGH(d,r,f,v)>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f))) + +#define DRF_READ_1BYTE_BS(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4])) +#define DRF_READ_2BYTE_BS(drf,v) (DRF_READ_1BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS(drf,v) (DRF_READ_2BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS(drf,v) (DRF_READ_3BYTE_BS(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+3])<<24)) + +#define DRF_READ_1BYTE_BS_HIGH(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4])) +#define DRF_READ_2BYTE_BS_HIGH(drf,v) (DRF_READ_1BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+1])<<8)) +#define DRF_READ_3BYTE_BS_HIGH(drf,v) (DRF_READ_2BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+2])<<16)) +#define DRF_READ_4BYTE_BS_HIGH(drf,v) (DRF_READ_3BYTE_BS_HIGH(drf,v)| \ + ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+3])<<24)) + +// Calculate 2^n - 1 and avoid shift counter overflow +// +// On Windows amd64, 64 << 64 => 1 +// +#define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1) + +#define DRF_READ_1WORD_BS(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS(NV##d##r##f,(v))))) + +#define DRF_READ_1WORD_BS_HIGH(d,r,f,v) \ + ((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS_HIGH(NV##d##r##f,(v)): \ + ((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS_HIGH(NV##d##r##f,(v)): \ + DRF_READ_4BYTE_BS_HIGH(NV##d##r##f,(v))))) + +#define LOWESTBIT(x) ( (x) & (((x) - 1U) ^ (x)) ) +// Destructive operation on n32 +#define HIGHESTBIT(n32) \ +{ \ + HIGHESTBITIDX_32(n32); \ + n32 = NVBIT(n32); \ +} +#define ONEBITSET(x) ( ((x) != 0U) && (((x) & ((x) - 1U)) == 0U) ) + +// Destructive operation on n32 +#define NUMSETBITS_32(n32) \ +{ \ + n32 = n32 - ((n32 >> 1) & 0x55555555); \ + n32 = (n32 & 0x33333333) + ((n32 >> 2) & 0x33333333); \ + n32 = (((n32 + (n32 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; \ +} + +/*! + * Calculate number of bits set in a 32-bit unsigned integer. + * Pure typesafe alternative to @ref NUMSETBITS_32. + */ +static NV_FORCEINLINE NvU32 +nvPopCount32(const NvU32 x) +{ + NvU32 temp = x; + temp = temp - ((temp >> 1) & 0x55555555U); + temp = (temp & 0x33333333U) + ((temp >> 2) & 0x33333333U); + temp = (((temp + (temp >> 4)) & 0x0F0F0F0FU) * 0x01010101U) >> 24; + return temp; +} + +/*! + * Calculate number of bits set in a 64-bit unsigned integer. + */ +static NV_FORCEINLINE NvU32 +nvPopCount64(const NvU64 x) +{ + NvU64 temp = x; + temp = temp - ((temp >> 1) & 0x5555555555555555ULL); + temp = (temp & 0x3333333333333333ULL) + ((temp >> 2) & 0x3333333333333333ULL); + temp = (temp + (temp >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + temp = (temp * 0x0101010101010101ULL) >> 56; + return (NvU32)temp; +} + +/*! + * Determine how many bits are set below a bit index within a mask. + * This assigns a dense ordering to the set bits in the mask. + * + * For example the mask 0xCD contains 5 set bits: + * nvMaskPos32(0xCD, 0) == 0 + * nvMaskPos32(0xCD, 2) == 1 + * nvMaskPos32(0xCD, 3) == 2 + * nvMaskPos32(0xCD, 6) == 3 + * nvMaskPos32(0xCD, 7) == 4 + */ +static NV_FORCEINLINE NvU32 +nvMaskPos32(const NvU32 mask, const NvU32 bitIdx) +{ + return nvPopCount32(mask & (NVBIT32(bitIdx) - 1U)); +} + +// Destructive operation on n32 +#define LOWESTBITIDX_32(n32) \ +{ \ + n32 = BIT_IDX_32(LOWESTBIT(n32));\ +} + +// Destructive operation on n32 +#define HIGHESTBITIDX_32(n32) \ +{ \ + NvU32 count = 0; \ + while (n32 >>= 1) \ + { \ + count++; \ + } \ + n32 = count; \ +} + +// Destructive operation on n32 +#define ROUNDUP_POW2(n32) \ +{ \ + n32--; \ + n32 |= n32 >> 1; \ + n32 |= n32 >> 2; \ + n32 |= n32 >> 4; \ + n32 |= n32 >> 8; \ + n32 |= n32 >> 16; \ + n32++; \ +} + +/*! + * Round up a 32-bit unsigned integer to the next power of 2. + * Pure typesafe alternative to @ref ROUNDUP_POW2. + * + * param[in] x must be in range [0, 2^31] to avoid overflow. + */ +static NV_FORCEINLINE NvU32 +nvNextPow2_U32(const NvU32 x) +{ + NvU32 y = x; + y--; + y |= y >> 1; + y |= y >> 2; + y |= y >> 4; + y |= y >> 8; + y |= y >> 16; + y++; + return y; +} + + +static NV_FORCEINLINE NvU32 +nvPrevPow2_U32(const NvU32 x ) +{ + NvU32 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + return y - (y >> 1); +} + +static NV_FORCEINLINE NvU64 +nvPrevPow2_U64(const NvU64 x ) +{ + NvU64 y = x; + y |= (y >> 1); + y |= (y >> 2); + y |= (y >> 4); + y |= (y >> 8); + y |= (y >> 16); + y |= (y >> 32); + return y - (y >> 1); +} + +// Destructive operation on n64 +#define ROUNDUP_POW2_U64(n64) \ +{ \ + n64--; \ + n64 |= n64 >> 1; \ + n64 |= n64 >> 2; \ + n64 |= n64 >> 4; \ + n64 |= n64 >> 8; \ + n64 |= n64 >> 16; \ + n64 |= n64 >> 32; \ + n64++; \ +} + +#define NV_SWAP_U8(a,b) \ +{ \ + NvU8 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +#define NV_SWAP_U32(a,b) \ +{ \ + NvU32 temp; \ + temp = a; \ + a = b; \ + b = temp; \ +} + +/*! + * @brief Macros allowing simple iteration over bits set in a given mask. + * + * @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64) + * + * @param[in,out] index lvalue that is used as a bit index in the loop + * (can be declared as any NvU* or NvS* variable) + * @param[in] mask expression, loop will iterate over set bits only + */ +#define FOR_EACH_INDEX_IN_MASK(maskWidth,index,mask) \ +{ \ + NvU##maskWidth lclMsk = (NvU##maskWidth)(mask); \ + for ((index) = 0U; lclMsk != 0U; (index)++, lclMsk >>= 1U)\ + { \ + if (((NvU##maskWidth)NVBIT64(0) & lclMsk) == 0U) \ + { \ + continue; \ + } +#define FOR_EACH_INDEX_IN_MASK_END \ + } \ +} + +// +// Size to use when declaring variable-sized arrays +// +#define NV_ANYSIZE_ARRAY 1 + +// +// Returns ceil(a/b) +// +#define NV_CEIL(a,b) (((a)+(b)-1)/(b)) + +// Clearer name for NV_CEIL +#ifndef NV_DIV_AND_CEIL +#define NV_DIV_AND_CEIL(a, b) NV_CEIL(a,b) +#endif + +#ifndef NV_MIN +#define NV_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef NV_MAX +#define NV_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +// +// Returns absolute value of provided integer expression +// +#define NV_ABS(a) ((a)>=0?(a):(-(a))) + +// +// Returns 1 if input number is positive, 0 if 0 and -1 if negative. Avoid +// macro parameter as function call which will have side effects. +// +#define NV_SIGN(s) ((NvS8)(((s) > 0) - ((s) < 0))) + +// +// Returns 1 if input number is >= 0 or -1 otherwise. This assumes 0 has a +// positive sign. +// +#define NV_ZERO_SIGN(s) ((NvS8)((((s) >= 0) * 2) - 1)) + +// Returns the offset (in bytes) of 'member' in struct 'type'. +#ifndef NV_OFFSETOF + #if defined(__GNUC__) && (__GNUC__ > 3) + #define NV_OFFSETOF(type, member) ((NvU32)__builtin_offsetof(type, member)) + #else + #define NV_OFFSETOF(type, member) ((NvU32)(NvU64)&(((type *)0)->member)) // shouldn't we use PtrToUlong? But will need to include windows header. + #endif +#endif + +// +// Performs a rounded division of b into a (unsigned). For SIGNED version of +// NV_ROUNDED_DIV() macro check the comments in bug 769777. +// +#define NV_UNSIGNED_ROUNDED_DIV(a,b) (((a) + ((b) / 2U)) / (b)) + +/*! + * Performs a ceiling division of b into a (unsigned). A "ceiling" division is + * a division is one with rounds up result up if a % b != 0. + * + * @param[in] a Numerator + * @param[in] b Denominator + * + * @return a / b + a % b != 0 ? 1 : 0. + */ +#define NV_UNSIGNED_DIV_CEIL(a, b) (((a) + (b - 1)) / (b)) + +/*! + * Performs subtraction where a negative difference is raised to zero. + * Can be used to avoid underflowing an unsigned subtraction. + * + * @param[in] a Minuend + * @param[in] b Subtrahend + * + * @return a > b ? a - b : 0. + */ +#define NV_SUBTRACT_NO_UNDERFLOW(a, b) ((a)>(b) ? (a)-(b) : 0) + +/*! + * Performs a rounded right-shift of 32-bit unsigned value "a" by "shift" bits. + * Will round result away from zero. + * + * @param[in] a 32-bit unsigned value to shift. + * @param[in] shift Number of bits by which to shift. + * + * @return Resulting shifted value rounded away from zero. + */ +#define NV_RIGHT_SHIFT_ROUNDED(a, shift) \ + (((a) >> (shift)) + !!((NVBIT((shift) - 1) & (a)) == NVBIT((shift) - 1))) + +// +// Power of 2 alignment. +// (Will give unexpected results if 'gran' is not a power of 2.) +// +#ifndef NV_ALIGN_DOWN +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_DOWN(v, gran) ((v) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP +// +// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type. +// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed. +// +#define NV_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~((v) - (v) + (gran) - 1)) +#endif + +#ifndef NV_ALIGN_DOWN64 +#define NV_ALIGN_DOWN64(v, gran) ((v) & ~(((NvU64)gran) - 1)) +#endif + +#ifndef NV_ALIGN_UP64 +#define NV_ALIGN_UP64(v, gran) (((v) + ((gran) - 1)) & ~(((NvU64)gran)-1)) +#endif + +#ifndef NV_IS_ALIGNED +#define NV_IS_ALIGNED(v, gran) (0U == ((v) & ((gran) - 1U))) +#endif + +#ifndef NV_IS_ALIGNED64 +#define NV_IS_ALIGNED64(v, gran) (0U == ((v) & (((NvU64)gran) - 1U))) +#endif + +#ifndef NVMISC_MEMSET +static NV_FORCEINLINE void *NVMISC_MEMSET(void *s, NvU8 c, NvLength n) +{ + NvU8 *b = (NvU8 *) s; + NvLength i; + + for (i = 0; i < n; i++) + { + b[i] = c; + } + + return s; +} +#endif + +#ifndef NVMISC_MEMCPY +static NV_FORCEINLINE void *NVMISC_MEMCPY(void *dest, const void *src, NvLength n) +{ + NvU8 *destByte = (NvU8 *) dest; + const NvU8 *srcByte = (const NvU8 *) src; + NvLength i; + + for (i = 0; i < n; i++) + { + destByte[i] = srcByte[i]; + } + + return dest; +} +#endif + +static NV_FORCEINLINE char *NVMISC_STRNCPY(char *dest, const char *src, NvLength n) +{ + NvLength i; + + for (i = 0; i < n; i++) + { + dest[i] = src[i]; + if (src[i] == '\0') + { + break; + } + } + + for (; i < n; i++) + { + dest[i] = '\0'; + } + + return dest; +} + +/*! + * Convert a void* to an NvUPtr. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting NvUPtr + */ +static NV_FORCEINLINE NvUPtr NV_PTR_TO_NVUPTR(void *ptr) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.p = ptr; + return uAddr.v; +} + +/*! + * Convert an NvUPtr to a void*. This is used when MISRA forbids us from doing a direct cast. + * + * @param[in] ptr Pointer to be converted + * + * @return Resulting void * + */ +static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address) +{ + union + { + NvUPtr v; + void *p; + } uAddr; + + uAddr.v = address; + return uAddr.p; +} + +#ifdef __cplusplus +} +#endif //__cplusplus + +#endif // __NV_MISC_H + diff --git a/src/common/sdk/nvidia/inc/nvos.h b/src/common/sdk/nvidia/inc/nvos.h new file mode 100644 index 000000000..2b127116b --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvos.h @@ -0,0 +1,3200 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /***************************************************************************\ +|* *| +|* NV Architecture Interface *| +|* *| +|* defines the Operating System function and ioctl interfaces to *| +|* NVIDIA's Unified Media Architecture (TM). *| +|* *| + \***************************************************************************/ + +#ifndef NVOS_INCLUDED +#define NVOS_INCLUDED +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvstatus.h" + +#include "nvgputypes.h" +#include "rs_access.h" + +/* local defines here */ +#define FILE_DEVICE_NV 0x00008000 +#define NV_IOCTL_FCT_BASE 0x00000800 + +// This is the maximum number of subdevices supported in an SLI +// configuration. +#define NVOS_MAX_SUBDEVICES 8 + +/* Define to indicate the use of Unified status codes - bug 200043705*/ +#define UNIFIED_NV_STATUS 1 + + /***************************************************************************\ +|* NV OS Functions *| + \***************************************************************************/ + +/* + Result codes for RM APIs, shared for all the APIs + + *** IMPORTANT *** + + Ensure that no NVOS_STATUS value has the highest bit set. That bit + is used to passthrough the NVOS_STATUS on code expecting an RM_STATUS. +*/ +#define NVOS_STATUS NV_STATUS + +#define NVOS_STATUS_SUCCESS NV_OK +#define NVOS_STATUS_ERROR_CARD_NOT_PRESENT NV_ERR_CARD_NOT_PRESENT +#define NVOS_STATUS_ERROR_DUAL_LINK_INUSE NV_ERR_DUAL_LINK_INUSE +#define NVOS_STATUS_ERROR_GENERIC NV_ERR_GENERIC +#define NVOS_STATUS_ERROR_GPU_NOT_FULL_POWER NV_ERR_GPU_NOT_FULL_POWER +#define NVOS_STATUS_ERROR_ILLEGAL_ACTION NV_ERR_ILLEGAL_ACTION +#define NVOS_STATUS_ERROR_IN_USE NV_ERR_STATE_IN_USE +#define NVOS_STATUS_ERROR_INSUFFICIENT_RESOURCES NV_ERR_INSUFFICIENT_RESOURCES +#define NVOS_STATUS_ERROR_INVALID_ACCESS_TYPE NV_ERR_INVALID_ACCESS_TYPE +#define NVOS_STATUS_ERROR_INVALID_ARGUMENT NV_ERR_INVALID_ARGUMENT +#define NVOS_STATUS_ERROR_INVALID_BASE NV_ERR_INVALID_BASE +#define NVOS_STATUS_ERROR_INVALID_CHANNEL NV_ERR_INVALID_CHANNEL +#define NVOS_STATUS_ERROR_INVALID_CLASS NV_ERR_INVALID_CLASS +#define NVOS_STATUS_ERROR_INVALID_CLIENT NV_ERR_INVALID_CLIENT +#define NVOS_STATUS_ERROR_INVALID_COMMAND NV_ERR_INVALID_COMMAND +#define NVOS_STATUS_ERROR_INVALID_DATA NV_ERR_INVALID_DATA +#define NVOS_STATUS_ERROR_INVALID_DEVICE NV_ERR_INVALID_DEVICE +#define NVOS_STATUS_ERROR_INVALID_DMA_SPECIFIER NV_ERR_INVALID_DMA_SPECIFIER +#define NVOS_STATUS_ERROR_INVALID_EVENT NV_ERR_INVALID_EVENT +#define NVOS_STATUS_ERROR_INVALID_FLAGS NV_ERR_INVALID_FLAGS +#define NVOS_STATUS_ERROR_INVALID_FUNCTION NV_ERR_INVALID_FUNCTION +#define NVOS_STATUS_ERROR_INVALID_HEAP NV_ERR_INVALID_HEAP +#define NVOS_STATUS_ERROR_INVALID_INDEX NV_ERR_INVALID_INDEX +#define NVOS_STATUS_ERROR_INVALID_LIMIT NV_ERR_INVALID_LIMIT +#define NVOS_STATUS_ERROR_INVALID_METHOD NV_ERR_INVALID_METHOD +#define NVOS_STATUS_ERROR_INVALID_OBJECT_BUFFER NV_ERR_BUFFER_TOO_SMALL +#define NVOS_STATUS_ERROR_INVALID_OBJECT_ERROR NV_ERR_INVALID_OBJECT +#define NVOS_STATUS_ERROR_INVALID_OBJECT_HANDLE NV_ERR_INVALID_OBJECT_HANDLE +#define NVOS_STATUS_ERROR_INVALID_OBJECT_NEW NV_ERR_INVALID_OBJECT_NEW +#define NVOS_STATUS_ERROR_INVALID_OBJECT_OLD NV_ERR_INVALID_OBJECT_OLD +#define NVOS_STATUS_ERROR_INVALID_OBJECT_PARENT NV_ERR_INVALID_OBJECT_PARENT +#define NVOS_STATUS_ERROR_INVALID_OFFSET NV_ERR_INVALID_OFFSET +#define NVOS_STATUS_ERROR_INVALID_OWNER NV_ERR_INVALID_OWNER +#define NVOS_STATUS_ERROR_INVALID_PARAM_STRUCT NV_ERR_INVALID_PARAM_STRUCT +#define NVOS_STATUS_ERROR_INVALID_PARAMETER NV_ERR_INVALID_PARAMETER +#define NVOS_STATUS_ERROR_INVALID_POINTER NV_ERR_INVALID_POINTER +#define NVOS_STATUS_ERROR_INVALID_REGISTRY_KEY NV_ERR_INVALID_REGISTRY_KEY +#define NVOS_STATUS_ERROR_INVALID_STATE NV_ERR_INVALID_STATE +#define NVOS_STATUS_ERROR_INVALID_STRING_LENGTH NV_ERR_INVALID_STRING_LENGTH +#define NVOS_STATUS_ERROR_INVALID_XLATE NV_ERR_INVALID_XLATE +#define NVOS_STATUS_ERROR_IRQ_NOT_FIRING NV_ERR_IRQ_NOT_FIRING +#define NVOS_STATUS_ERROR_MULTIPLE_MEMORY_TYPES NV_ERR_MULTIPLE_MEMORY_TYPES +#define NVOS_STATUS_ERROR_NOT_SUPPORTED NV_ERR_NOT_SUPPORTED +#define NVOS_STATUS_ERROR_OPERATING_SYSTEM NV_ERR_OPERATING_SYSTEM +#define NVOS_STATUS_ERROR_LIB_RM_VERSION_MISMATCH NV_ERR_LIB_RM_VERSION_MISMATCH +#define NVOS_STATUS_ERROR_PROTECTION_FAULT NV_ERR_PROTECTION_FAULT +#define NVOS_STATUS_ERROR_TIMEOUT NV_ERR_TIMEOUT +#define NVOS_STATUS_ERROR_TOO_MANY_PRIMARIES NV_ERR_TOO_MANY_PRIMARIES +#define NVOS_STATUS_ERROR_IRQ_EDGE_TRIGGERED NV_ERR_IRQ_EDGE_TRIGGERED +#define NVOS_STATUS_ERROR_INVALID_OPERATION NV_ERR_INVALID_OPERATION +#define NVOS_STATUS_ERROR_NOT_COMPATIBLE NV_ERR_NOT_COMPATIBLE +#define NVOS_STATUS_ERROR_MORE_PROCESSING_REQUIRED NV_WARN_MORE_PROCESSING_REQUIRED +#define NVOS_STATUS_ERROR_INSUFFICIENT_PERMISSIONS NV_ERR_INSUFFICIENT_PERMISSIONS +#define NVOS_STATUS_ERROR_TIMEOUT_RETRY NV_ERR_TIMEOUT_RETRY +#define NVOS_STATUS_ERROR_NOT_READY NV_ERR_NOT_READY +#define NVOS_STATUS_ERROR_GPU_IS_LOST NV_ERR_GPU_IS_LOST +#define NVOS_STATUS_ERROR_IN_FULLCHIP_RESET NV_ERR_GPU_IN_FULLCHIP_RESET +#define NVOS_STATUS_ERROR_INVALID_LOCK_STATE NV_ERR_INVALID_LOCK_STATE +#define NVOS_STATUS_ERROR_INVALID_ADDRESS NV_ERR_INVALID_ADDRESS +#define NVOS_STATUS_ERROR_INVALID_IRQ_LEVEL NV_ERR_INVALID_IRQ_LEVEL +#define NVOS_STATUS_ERROR_MEMORY_TRAINING_FAILED NV_ERR_MEMORY_TRAINING_FAILED +#define NVOS_STATUS_ERROR_BUSY_RETRY NV_ERR_BUSY_RETRY +#define NVOS_STATUS_ERROR_INSUFFICIENT_POWER NV_ERR_INSUFFICIENT_POWER +#define NVOS_STATUS_ERROR_OBJECT_NOT_FOUND NV_ERR_OBJECT_NOT_FOUND +#define NVOS_STATUS_ERROR_RESOURCE_LOST NV_ERR_RESOURCE_LOST +#define NVOS_STATUS_ERROR_BUFFER_TOO_SMALL NV_ERR_BUFFER_TOO_SMALL +#define NVOS_STATUS_ERROR_RESET_REQUIRED NV_ERR_RESET_REQUIRED +#define NVOS_STATUS_ERROR_INVALID_REQUEST NV_ERR_INVALID_REQUEST + +#define NVOS_STATUS_ERROR_PRIV_SEC_VIOLATION NV_ERR_PRIV_SEC_VIOLATION +#define NVOS_STATUS_ERROR_GPU_IN_DEBUG_MODE NV_ERR_GPU_IN_DEBUG_MODE + +/* + Note: + This version of the architecture has been changed to allow the + RM to return a client handle that will subsequently used to + identify the client. NvAllocRoot() returns the handle. All + other functions must specify this client handle. + +*/ +/* macro NV01_FREE */ +#define NV01_FREE (0x00000000) + +/* NT ioctl data structure */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +} NVOS00_PARAMETERS; + +/* valid hClass values. */ +#define NV01_ROOT (0x00000000) +// +// Redefining it here to maintain consistency with current code +// This is also defined in class cl0001.h +// +#define NV01_ROOT_NON_PRIV (0x00000001) + +// Deprecated, please use NV01_ROOT_CLIENT +#define NV01_ROOT_USER NV01_ROOT_CLIENT + +// +// This will eventually replace NV01_ROOT_USER in RM client code. Please use this +// RM client object type for any new RM client object allocations that are being +// added. +// +#define NV01_ROOT_CLIENT (0x00000041) + +/* macro NV01_ALLOC_MEMORY */ +#define NV01_ALLOC_MEMORY (0x00000002) + +/* parameter values */ +#define NVOS02_FLAGS_PHYSICALITY 7:4 +#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS (0x00000000) +#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS (0x00000001) +#define NVOS02_FLAGS_LOCATION 11:8 +#define NVOS02_FLAGS_LOCATION_PCI (0x00000000) +#define NVOS02_FLAGS_LOCATION_AGP (0x00000001) +#define NVOS02_FLAGS_LOCATION_VIDMEM (0x00000002) +#define NVOS02_FLAGS_COHERENCY 15:12 +#define NVOS02_FLAGS_COHERENCY_UNCACHED (0x00000000) +#define NVOS02_FLAGS_COHERENCY_CACHED (0x00000001) +#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE (0x00000002) +#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH (0x00000003) +#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT (0x00000004) +#define NVOS02_FLAGS_COHERENCY_WRITE_BACK (0x00000005) +#define NVOS02_FLAGS_ALLOC 17:16 +#define NVOS02_FLAGS_ALLOC_NONE (0x00000001) +#define NVOS02_FLAGS_GPU_CACHEABLE 18:18 +#define NVOS02_FLAGS_GPU_CACHEABLE_NO (0x00000000) +#define NVOS02_FLAGS_GPU_CACHEABLE_YES (0x00000001) +// If requested, RM will create a kernel mapping of this memory. +// Default is no map. +#define NVOS02_FLAGS_KERNEL_MAPPING 19:19 +#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP (0x00000000) +#define NVOS02_FLAGS_KERNEL_MAPPING_MAP (0x00000001) +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY 20:20 +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES (0x00000001) + +// +// If the flag is set, the RM will only allow read-only CPU user mappings to the +// allocation. +// +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY 21:21 +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES (0x00000001) + +// +// If the flag is set, the RM will only allow read-only DMA mappings to the +// allocation. +// +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY 22:22 +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO (0x00000000) +#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES (0x00000001) + +// +// If the flag is set, the IO memory allocation can be registered with the RM if +// the RM regkey peerMappingOverride is set or the client is privileged. +// +// See Bug 1630288 "[PeerSync] threat related to GPU.." for more details. +// +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE 23:23 +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT (0x00000000) +#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED (0x00000001) + +// If the flag is set RM will assume the memory pages are of type syncpoint. +#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT 24:24 +#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE (0x00000001) + +// +// If _NO_MAP is requested, the RM in supported platforms will not map the +// allocated system or IO memory into user space. The client can later map +// memory through the RmMapMemory() interface. +// If _NEVER_MAP is requested, the RM will never map the allocated system or +// IO memory into user space +// +#define NVOS02_FLAGS_MAPPING 31:30 +#define NVOS02_FLAGS_MAPPING_DEFAULT (0x00000000) +#define NVOS02_FLAGS_MAPPING_NO_MAP (0x00000001) +#define NVOS02_FLAGS_MAPPING_NEVER_MAP (0x00000002) + +// ------------------------------------------------------------------------------------- + +/* parameters */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvV32 flags; + NvP64 pMemory NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS02_PARAMETERS; + +/* parameter values */ +#define NVOS03_FLAGS_ACCESS 1:0 +#define NVOS03_FLAGS_ACCESS_READ_WRITE (0x00000000) +#define NVOS03_FLAGS_ACCESS_READ_ONLY (0x00000001) +#define NVOS03_FLAGS_ACCESS_WRITE_ONLY (0x00000002) + +#define NVOS03_FLAGS_PREALLOCATE 2:2 +#define NVOS03_FLAGS_PREALLOCATE_DISABLE (0x00000000) +#define NVOS03_FLAGS_PREALLOCATE_ENABLE (0x00000001) + +#define NVOS03_FLAGS_GPU_MAPPABLE 15:15 +#define NVOS03_FLAGS_GPU_MAPPABLE_DISABLE (0x00000000) +#define NVOS03_FLAGS_GPU_MAPPABLE_ENABLE (0x00000001) + +// ------------------------------------------------------------------------------------ +// This flag is required for a hack to be placed inside DD that allows it to +// access a dummy ctxdma as a block linear surface. Refer bug 1562766 for details. +// +// This flag is deprecated, use NVOS03_FLAGS_PTE_KIND. +// +#define NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE 16:16 +#define NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE_FALSE (0x00000000) +#define NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE_TRUE (0x00000001) + +/* + * This field allows to specify the page kind. If the page kind + * is not specified then the page kind associated with the memory will be used. + * + * In tegra display driver stack, the page kind remains unknown at the time + * of memory allocation/import, the page kind can only be known when display + * driver client creates a framebuffer from allocated/imported memory. + * + * This field compatible with NVOS03_FLAGS_PTE_KIND_BL_OVERRIDE flag. + */ +#define NVOS03_FLAGS_PTE_KIND 17:16 +#define NVOS03_FLAGS_PTE_KIND_NONE (0x00000000) +#define NVOS03_FLAGS_PTE_KIND_BL (0x00000001) +#define NVOS03_FLAGS_PTE_KIND_PITCH (0x00000002) + +#define NVOS03_FLAGS_TYPE 23:20 +#define NVOS03_FLAGS_TYPE_NOTIFIER (0x00000001) + +/* + * This is an alias into the LSB of the TYPE field which + * actually indicates if a Kernel Mapping should be created. + * If the RM should have access to the memory then Enable this + * flag. + * + * Note that the NV_OS03_FLAGS_MAPPING is an alias to + * the LSB of the NV_OS03_FLAGS_TYPE. And in fact if + * type is NV_OS03_FLAGS_TYPE_NOTIFIER (bit 20 set) + * then it implicitly means that NV_OS03_FLAGS_MAPPING + * is _MAPPING_KERNEL. If the client wants to have a + * Kernel Mapping, it should use the _MAPPING_KERNEL + * flag set and the _TYPE_NOTIFIER should be used only + * with NOTIFIERS. + */ + +#define NVOS03_FLAGS_MAPPING 20:20 +#define NVOS03_FLAGS_MAPPING_NONE (0x00000000) +#define NVOS03_FLAGS_MAPPING_KERNEL (0x00000001) + +#define NVOS03_FLAGS_CACHE_SNOOP 28:28 +#define NVOS03_FLAGS_CACHE_SNOOP_ENABLE (0x00000000) +#define NVOS03_FLAGS_CACHE_SNOOP_DISABLE (0x00000001) + +// HASH_TABLE:ENABLE means that the context DMA is automatically bound into all +// channels in the client. This can lead to excessive hash table usage. +// HASH_TABLE:DISABLE means that the context DMA must be explicitly bound into +// any channel that needs to use it via NvRmBindContextDma. +// HASH_TABLE:ENABLE is not supported on NV50 and up, and HASH_TABLE:DISABLE should +// be preferred for all new code. +#define NVOS03_FLAGS_HASH_TABLE 29:29 +#define NVOS03_FLAGS_HASH_TABLE_ENABLE (0x00000000) +#define NVOS03_FLAGS_HASH_TABLE_DISABLE (0x00000001) + +/* macro NV01_ALLOC_OBJECT */ +#define NV01_ALLOC_OBJECT (0x00000005) + +/* parameters */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvV32 status; +} NVOS05_PARAMETERS; + +/* Valid values for hClass in Nv01AllocEvent */ +/* Note that NV01_EVENT_OS_EVENT is same as NV01_EVENT_WIN32_EVENT */ +/* TODO: delete the WIN32 name */ +#define NV01_EVENT_KERNEL_CALLBACK (0x00000078) +#define NV01_EVENT_OS_EVENT (0x00000079) +#define NV01_EVENT_WIN32_EVENT NV01_EVENT_OS_EVENT +#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007E) + +/* NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. Please use NV01_EVENT_KERNEL_CALLBACK_EX. */ +/* For use with NV01_EVENT_KERNEL_CALLBACK. */ +/* NVOS10_EVENT_KERNEL_CALLBACK data structure storage needs to be retained by the caller. */ +typedef void (*Callback1ArgVoidReturn)(void *arg); +typedef void (*Callback5ArgVoidReturn)(void *arg1, void *arg2, NvHandle hEvent, NvU32 data, NvU32 status); + +/* NOTE: the 'void* arg' below is ok (but unfortunate) since this interface + can only be used by other kernel drivers which must share the same ptr-size */ +typedef struct +{ + Callback1ArgVoidReturn func; + void *arg; +} NVOS10_EVENT_KERNEL_CALLBACK; + +/* For use with NV01_EVENT_KERNEL_CALLBACK_EX. */ +/* NVOS10_EVENT_KERNEL_CALLBACK_EX data structure storage needs to be retained by the caller. */ +/* NOTE: the 'void* arg' below is ok (but unfortunate) since this interface + can only be used by other kernel drivers which must share the same ptr-size */ +typedef struct +{ + Callback5ArgVoidReturn func; + void *arg; +} NVOS10_EVENT_KERNEL_CALLBACK_EX; + +/* Setting this bit in index will set the Event to a Broadcast type */ +/* i.e. each subdevice under a device needs to see the Event before it's signaled */ +#define NV01_EVENT_BROADCAST (0x80000000) + +/* allow non-root resman client to create NV01_EVENT_KERNEL_CALLBACK events */ +/* -- this works in debug/develop drivers only (for security reasons)*/ +#define NV01_EVENT_PERMIT_NON_ROOT_EVENT_KERNEL_CALLBACK_CREATION (0x40000000) + +/* RM event should be triggered only by the specified subdevice; see cl0005.h + * for details re: how to specify subdevice. */ +#define NV01_EVENT_SUBDEVICE_SPECIFIC (0x20000000) + +/* RM should trigger the event but shouldn't do the book-keeping of data + * associated with that event */ +#define NV01_EVENT_WITHOUT_EVENT_DATA (0x10000000) + +/* RM event should be triggered only by the non-stall interrupt */ +#define NV01_EVENT_NONSTALL_INTR (0x08000000) + +/* RM event was allocated from client RM, post events back to client RM */ +#define NV01_EVENT_CLIENT_RM (0x04000000) + +/* function OS19 */ +#define NV04_I2C_ACCESS (0x00000013) + +#define NVOS_I2C_ACCESS_MAX_BUFFER_SIZE 2048 + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 paramSize; + NvP64 paramStructPtr NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS_I2C_ACCESS_PARAMS; + +/* current values for command */ +#define NVOS20_COMMAND_unused0001 0x0001 +#define NVOS20_COMMAND_unused0002 0x0002 +#define NVOS20_COMMAND_STRING_PRINT 0x0003 + +/* function OS21 */ +#define NV04_ALLOC (0x00000015) + +/* parameters */ +typedef struct +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS21_PARAMETERS; + +/* New struct with rights requested */ +typedef struct +{ + NvHandle hRoot; // [IN] client handle + NvHandle hObjectParent; // [IN] parent handle of new object + NvHandle hObjectNew; // [INOUT] new object handle, 0 to generate + NvV32 hClass; // [in] class num of new object + NvP64 pAllocParms NV_ALIGN_BYTES(8); // [IN] class-specific alloc parameters + NvP64 pRightsRequested NV_ALIGN_BYTES(8); // [IN] RS_ACCESS_MASK to request rights, or NULL + NvV32 status; // [OUT] status +} NVOS64_PARAMETERS; + +/* RM Alloc header + * + * Replacement for NVOS21/64_PARAMETERS where embedded pointers are not allowed. + * Input layout for RM Alloc user space calls should be + * + * +--- NVOS62_PARAMETERS ---+--- RM Alloc parameters ---+ + * +--- NVOS65_PARAMETERS ---+--- Rights Requested ---+--- RM Alloc parameters ---+ + * + * NVOS62_PARAMETERS::paramsSize is the size of RM Alloc parameters + * If NVOS65_PARAMETERS::maskSize is 0, Rights Requested will not be present in memory. + * + */ +typedef struct +{ + NvHandle hRoot; // [IN] client handle + NvHandle hObjectParent; // [IN] parent handle of the new object + NvHandle hObjectNew; // [IN] new object handle + NvV32 hClass; // [IN] class num of the new object + NvU32 paramSize; // [IN] size in bytes of the RM alloc parameters + NvV32 status; // [OUT] status +} NVOS62_PARAMETERS; + +#define NVOS65_PARAMETERS_VERSION_MAGIC 0x77FEF81E + +typedef struct +{ + NvHandle hRoot; // [IN] client handle + NvHandle hObjectParent; // [IN] parent handle of the new object + NvHandle hObjectNew; // [INOUT] new object handle, 0 to generate + NvV32 hClass; // [IN] class num of the new object + NvU32 paramSize; // [IN] size in bytes of the RM alloc parameters + NvU32 versionMagic; // [IN] NVOS65_PARAMETERS_VERISON_MAGIC + NvU32 maskSize; // [IN] size in bytes of access mask, or 0 if NULL + NvV32 status; // [OUT] status +} NVOS65_PARAMETERS; + +/* function OS30 */ +#define NV04_IDLE_CHANNELS (0x0000001E) + +/* parameter values */ +#define NVOS30_FLAGS_BEHAVIOR 3:0 +#define NVOS30_FLAGS_BEHAVIOR_SPIN (0x00000000) +#define NVOS30_FLAGS_BEHAVIOR_SLEEP (0x00000001) +#define NVOS30_FLAGS_BEHAVIOR_QUERY (0x00000002) +#define NVOS30_FLAGS_BEHAVIOR_FORCE_BUSY_CHECK (0x00000003) +#define NVOS30_FLAGS_CHANNEL 7:4 +#define NVOS30_FLAGS_CHANNEL_LIST (0x00000000) +#define NVOS30_FLAGS_CHANNEL_SINGLE (0x00000001) +#define NVOS30_FLAGS_IDLE 30:8 +#define NVOS30_FLAGS_IDLE_PUSH_BUFFER (0x00000001) +#define NVOS30_FLAGS_IDLE_CACHE1 (0x00000002) +#define NVOS30_FLAGS_IDLE_GRAPHICS (0x00000004) +#define NVOS30_FLAGS_IDLE_MPEG (0x00000008) +#define NVOS30_FLAGS_IDLE_MOTION_ESTIMATION (0x00000010) +#define NVOS30_FLAGS_IDLE_VIDEO_PROCESSOR (0x00000020) +#define NVOS30_FLAGS_IDLE_MSPDEC (0x00000020) +#define NVOS30_FLAGS_IDLE_BITSTREAM_PROCESSOR (0x00000040) +#define NVOS30_FLAGS_IDLE_MSVLD (0x00000040) +#define NVOS30_FLAGS_IDLE_NVDEC0 NVOS30_FLAGS_IDLE_MSVLD +#define NVOS30_FLAGS_IDLE_CIPHER_DMA (0x00000080) +#define NVOS30_FLAGS_IDLE_SEC (0x00000080) +#define NVOS30_FLAGS_IDLE_CALLBACKS (0x00000100) +#define NVOS30_FLAGS_IDLE_MSPPP (0x00000200) +#define NVOS30_FLAGS_IDLE_CE0 (0x00000400) +#define NVOS30_FLAGS_IDLE_CE1 (0x00000800) +#define NVOS30_FLAGS_IDLE_CE2 (0x00001000) +#define NVOS30_FLAGS_IDLE_CE3 (0x00002000) +#define NVOS30_FLAGS_IDLE_CE4 (0x00004000) +#define NVOS30_FLAGS_IDLE_CE5 (0x00008000) +#define NVOS30_FLAGS_IDLE_VIC (0x00010000) +#define NVOS30_FLAGS_IDLE_MSENC (0x00020000) +#define NVOS30_FLAGS_IDLE_NVENC0 NVOS30_FLAGS_IDLE_MSENC +#define NVOS30_FLAGS_IDLE_NVENC1 (0x00040000) +#define NVOS30_FLAGS_IDLE_NVENC2 (0x00080000) +#define NVOS30_FLAGS_IDLE_NVJPG (0x00100000) +#define NVOS30_FLAGS_IDLE_NVDEC1 (0x00200000) +#define NVOS30_FLAGS_IDLE_NVDEC2 (0x00400000) +#define NVOS30_FLAGS_IDLE_ACTIVECHANNELS (0x00800000) +#define NVOS30_FLAGS_IDLE_ALL_ENGINES (NVOS30_FLAGS_IDLE_GRAPHICS | \ + NVOS30_FLAGS_IDLE_MPEG | \ + NVOS30_FLAGS_IDLE_MOTION_ESTIMATION | \ + NVOS30_FLAGS_IDLE_VIDEO_PROCESSOR | \ + NVOS30_FLAGS_IDLE_BITSTREAM_PROCESSOR | \ + NVOS30_FLAGS_IDLE_CIPHER_DMA | \ + NVOS30_FLAGS_IDLE_MSPDEC | \ + NVOS30_FLAGS_IDLE_NVDEC0 | \ + NVOS30_FLAGS_IDLE_SEC | \ + NVOS30_FLAGS_IDLE_MSPPP | \ + NVOS30_FLAGS_IDLE_CE0 | \ + NVOS30_FLAGS_IDLE_CE1 | \ + NVOS30_FLAGS_IDLE_CE2 | \ + NVOS30_FLAGS_IDLE_CE3 | \ + NVOS30_FLAGS_IDLE_CE4 | \ + NVOS30_FLAGS_IDLE_CE5 | \ + NVOS30_FLAGS_IDLE_NVENC0 | \ + NVOS30_FLAGS_IDLE_NVENC1 | \ + NVOS30_FLAGS_IDLE_NVENC2 | \ + NVOS30_FLAGS_IDLE_VIC | \ + NVOS30_FLAGS_IDLE_NVJPG | \ + NVOS30_FLAGS_IDLE_NVDEC1 | \ + NVOS30_FLAGS_IDLE_NVDEC2) +#define NVOS30_FLAGS_WAIT_FOR_ELPG_ON 31:31 +#define NVOS30_FLAGS_WAIT_FOR_ELPG_ON_NO (0x00000000) +#define NVOS30_FLAGS_WAIT_FOR_ELPG_ON_YES (0x00000001) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hChannel; + NvV32 numChannels; + + NvP64 phClients NV_ALIGN_BYTES(8); + NvP64 phDevices NV_ALIGN_BYTES(8); + NvP64 phChannels NV_ALIGN_BYTES(8); + + NvV32 flags; + NvV32 timeout; + NvV32 status; +} NVOS30_PARAMETERS; + +/* function OS32 */ +typedef void (*BindResultFunc)(void * pVoid, NvU32 gpuMask, NvU32 bState, NvU32 bResult); + +#define NV04_VID_HEAP_CONTROL (0x00000020) +/************************************************************************* +************************ New Heap Interface ****************************** +*************************************************************************/ +// NVOS32 Descriptor types +// +// NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR: The dma-buf object +// pointer, provided by the linux kernel buffer sharing sub-system. +// This descriptor can only be used by kernel space rm-clients. +// +#define NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS 0 +#define NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY 1 +#define NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY 2 +#define NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR 3 +#define NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE 4 +#define NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR 5 +#define NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR 6 +#define NVOS32_DESCRIPTOR_TYPE_KERNEL_VIRTUAL_ADDRESS 7 +// NVOS32 function +#define NVOS32_FUNCTION_ALLOC_DEPTH_WIDTH_HEIGHT 1 +#define NVOS32_FUNCTION_ALLOC_SIZE 2 +#define NVOS32_FUNCTION_FREE 3 +// #define NVOS32_FUNCTION_HEAP_PURGE 4 +#define NVOS32_FUNCTION_INFO 5 +#define NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT 6 +// #define NVOS32_FUNCTION_DESTROY 7 +// #define NVOS32_FUNCTION_RETAIN 9 +// #define NVOS32_FUNCTION_REALLOC 10 +#define NVOS32_FUNCTION_DUMP 11 +// #define NVOS32_FUNCTION_INFO_TYPE_ALLOC_BLOCKS 12 +#define NVOS32_FUNCTION_ALLOC_SIZE_RANGE 14 +#define NVOS32_FUNCTION_REACQUIRE_COMPR 15 +#define NVOS32_FUNCTION_RELEASE_COMPR 16 +// #define NVOS32_FUNCTION_MODIFY_DEFERRED_TILES 17 +#define NVOS32_FUNCTION_GET_MEM_ALIGNMENT 18 +#define NVOS32_FUNCTION_HW_ALLOC 19 +#define NVOS32_FUNCTION_HW_FREE 20 +// #define NVOS32_FUNCTION_SET_OFFSET 21 +// #define NVOS32_FUNCTION_IS_TILED 22 +// #define NVOS32_FUNCTION_ENABLE_RESOURCE 23 +// #define NVOS32_FUNCTION_BIND_COMPR 24 +#define NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR 27 + +typedef struct +{ + NvP64 sgt NV_ALIGN_BYTES(8); + NvP64 gem NV_ALIGN_BYTES(8); +} NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS; + +#define NVOS32_FLAGS_BLOCKINFO_VISIBILITY_CPU (0x00000001) +typedef struct +{ + NvU64 startOffset NV_ALIGN_BYTES(8); + NvU64 size NV_ALIGN_BYTES(8); + NvU32 flags; +} NVOS32_BLOCKINFO; + +// NVOS32 IVC-heap number delimiting value +#define NVOS32_IVC_HEAP_NUMBER_DONT_ALLOCATE_ON_IVC_HEAP 0 // When IVC heaps are present, + // IVC-heap number specified + // as part of 'NVOS32_PARAMETERS' + // which is less or equal to this + // constant indicates that allocation + // should not be done on IVC heap. + // Explanation of IVC-heap number is + // under 'AllocSize' structure below. + +typedef struct +{ + NvHandle hRoot; // [IN] - root object handle + NvHandle hObjectParent; // [IN] - device handle + NvU32 function; // [IN] - heap function, see below FUNCTION* defines + NvHandle hVASpace; // [IN] - VASpace handle + NvS16 ivcHeapNumber; // [IN] - When IVC heaps are present: either 1) number of the IVC heap + // shared between two VMs or 2) number indicating that allocation + // should not be done on an IVC heap. Values greater than constant + // 'NVOS32_IVC_HEAP_NUMBER_DONT_ALLOCATE_ON_IVC_HEAP' define set 1) + // and values less or equal to that constant define set 2). + // When IVC heaps are present, correct IVC-heap number must be specified. + // When IVC heaps are absent, IVC-heap number is diregarded. + // RM provides for each VM a bitmask of heaps with each bit + // specifying the other peer that can use the partition. + // Each bit set to one can be enumerated, such that the bit + // with lowest significance is enumerated with one. + // 'ivcHeapNumber' parameter specifies this enumeration value. + // This value is used to uniquely identify a heap shared between + // two particular VMs. + // Illustration: + // bitmask: 1 1 0 1 0 = 0x1A + // possible 'ivcHeapNumber' values: 3, 2, 1 + NvV32 status; // [OUT] - returned NVOS32* status code, see below STATUS* defines + NvU64 total NV_ALIGN_BYTES(8); // [OUT] - returned total size of heap + NvU64 free NV_ALIGN_BYTES(8); // [OUT] - returned free space available in heap + + union + { + // NVOS32_FUNCTION_ALLOC_DEPTH_WIDTH_HEIGHT + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 depth; // [IN] - depth of surface in bits + NvU32 width; // [IN] - width of surface in pixels + NvU32 height; // [IN] - height of surface in pixels + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + } AllocDepthWidthHeight; + + // NVOS32_FUNCTION_ALLOC_SIZE + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU32 width; // [IN] - width "hint" used for zcull region allocations + NvU32 height; // [IN] - height "hint" used for zcull region allocations + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + } AllocSize; + + // NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 height; // [IN] - height of surface in pixels + NvS32 pitch; // [IN/OUT] - desired pitch AND returned actual pitch allocated + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 width; //[IN] - width of surface in pixels + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + } AllocTiledPitchHeight; + + // NVOS32_FUNCTION_FREE + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN] - unique memory handle + NvU32 flags; // [IN] - heap free flags (must be NVOS32_FREE_FLAGS_MEMORY_HANDLE_PROVIDED) + } Free; + + // NVOS32_FUNCTION_RELEASE_COMPR + struct + { + NvU32 owner; // [IN] - memory owner ID + NvU32 flags; // [IN] - must be NVOS32_RELEASE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + NvHandle hMemory; // [IN] - unique memory handle (valid if _RELEASE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + } ReleaseCompr; + + // NVOS32_FUNCTION_REACQUIRE_COMPR + struct + { + NvU32 owner; // [IN] - memory owner ID + NvU32 flags; // [IN] - must be NVOS32_REACQUIRE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + NvHandle hMemory; // [IN] - unique memory handle (valid if _REACQUIRE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED + } ReacquireCompr; + + // NVOS32_FUNCTION_INFO + struct + { + NvU32 attr; // [IN] - memory heap attributes requested + NvU64 offset NV_ALIGN_BYTES(8); // [OUT] - base of largest free block + NvU64 size NV_ALIGN_BYTES(8); // [OUT] - size of largest free block + NvU64 base NV_ALIGN_BYTES(8); // [OUT] - returned heap phys base + } Info; + + // NVOS32_FUNCTION_DUMP + struct + { + NvU32 flags; // [IN] - see _DUMP_FLAGS + // [IN] - if NULL, numBlocks is the returned number of blocks in + // heap, else returns all blocks in eHeap + // if non-NULL points to a buffer that is at least numBlocks + // * sizeof(NVOS32_HEAP_DUMP_BLOCK) bytes. + NvP64 pBuffer NV_ALIGN_BYTES(8); + // [IN/OUT] - if pBuffer is NULL, will number of blocks in heap + // if pBuffer is non-NULL, is input containing the size of + // pBuffer in units of NVOS32_HEAP_DUMP_BLOCK. This must + // be greater than or equal to the number of blocks in the + // heap. + NvU32 numBlocks; + } Dump; + + // NVOS32_FUNCTION_DESTROY - no extra parameters needed + + // NVOS32_FUNCTION_ALLOC_SIZE_RANGE + struct + { + NvU32 owner; // [IN] - memory owner ID + NvHandle hMemory; // [IN] - unique memory handle + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + NvU32 partitionStride; // [IN/OUT] - 0 means "RM" chooses + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvU64 rangeBegin NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeEnd NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + NvP64 address NV_ALIGN_BYTES(8);// [OUT] - returned address + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + } AllocSizeRange; + + // additions for Longhorn +#define NVAL_MAX_BANKS (4) +#define NVAL_MAP_DIRECTION 0:0 +#define NVAL_MAP_DIRECTION_DOWN 0x00000000 +#define NVAL_MAP_DIRECTION_UP 0x00000001 + + // NVOS32_FUNCTION_GET_MEM_ALIGNMENT + struct + { + NvU32 alignType; // Input + NvU32 alignAttr; + NvU32 alignInputFlags; + NvU64 alignSize NV_ALIGN_BYTES(8); + NvU32 alignHeight; + NvU32 alignWidth; + NvU32 alignPitch; + NvU32 alignPad; + NvU32 alignMask; + NvU32 alignOutputFlags[NVAL_MAX_BANKS]; // We could compress this information but it is probably not that big of a deal + NvU32 alignBank[NVAL_MAX_BANKS]; + NvU32 alignKind; + NvU32 alignAdjust; // Output -- If non-zero the amount we need to adjust the offset + NvU32 alignAttr2; + } AllocHintAlignment; + + struct + { + NvU32 allocOwner; // [IN] - memory owner ID + NvHandle allochMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 flags; + NvU32 allocType; // Input + NvU32 allocAttr; + NvU32 allocInputFlags; + NvU64 allocSize NV_ALIGN_BYTES(8); + NvU32 allocHeight; + NvU32 allocWidth; + NvU32 allocPitch; + NvU32 allocMask; + NvU32 allocComprCovg; + NvU32 allocZcullCovg; + NvP64 bindResultFunc NV_ALIGN_BYTES(8); // BindResultFunc + NvP64 pHandle NV_ALIGN_BYTES(8); + NvHandle hResourceHandle; // Handle to RM container + NvU32 retAttr; // Output Indicates the resources that we allocated + NvU32 kind; + NvU64 osDeviceHandle NV_ALIGN_BYTES(8); + NvU32 allocAttr2; + NvU32 retAttr2; // Output Indicates the resources that we allocated + NvU64 allocAddr NV_ALIGN_BYTES(8); + // [out] from GMMU_COMPR_INFO in drivers/common/shared/inc/mmu/gmmu_fmt.h + struct + { + NvU32 compPageShift; + NvU32 compressedKind; + NvU32 compTagLineMin; + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMultiplier; + } comprInfo; + // [out] fallback uncompressed kind. + NvU32 uncompressedKind; + } HwAlloc; + + // NVOS32_FUNCTION_HW_FREE + struct + { + NvHandle hResourceHandle; // Handle to RM Resource Info + NvU32 flags; // Indicate if HW Resources and/or Memory + } HwFree; +// Updated interface check. +#define NV_RM_OS32_ALLOC_OS_DESCRIPTOR_WITH_OS32_ATTR 1 + + // NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR + struct + { + NvHandle hMemory; // [IN/OUT] - unique memory handle - IN only if MEMORY_HANDLE_PROVIDED is set (otherwise generated) + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN] - attributes for memory placement/properties, see below + NvU32 attr2; // [IN] - attributes GPU_CACHEABLE + NvP64 descriptor NV_ALIGN_BYTES(8); // [IN] - descriptor address + NvU64 limit NV_ALIGN_BYTES(8); // [IN] - allocated size -1 + NvU32 descriptorType; // [IN] - descriptor type(Virtual | nvmap Handle) + } AllocOsDesc; + + } data; +} NVOS32_PARAMETERS; + +typedef struct +{ + NvU32 owner; // owner id - NVOS32_BLOCK_TYPE_FREE or defined by client during heap_alloc + NvU32 format; // arch specific format/kind + NvU64 begin NV_ALIGN_BYTES(8); // start of allocated memory block + NvU64 align NV_ALIGN_BYTES(8); // actual start of usable memory, aligned to chip specific boundary + NvU64 end NV_ALIGN_BYTES(8); // end of usable memory. end - align + 1 = size of block +} NVOS32_HEAP_DUMP_BLOCK; + + +#define NVOS32_DELETE_RESOURCES_ALL 0 + +// type field +#define NVOS32_TYPE_IMAGE 0 +#define NVOS32_TYPE_DEPTH 1 +#define NVOS32_TYPE_TEXTURE 2 +#define NVOS32_TYPE_VIDEO 3 +#define NVOS32_TYPE_FONT 4 +#define NVOS32_TYPE_CURSOR 5 +#define NVOS32_TYPE_DMA 6 +#define NVOS32_TYPE_INSTANCE 7 +#define NVOS32_TYPE_PRIMARY 8 +#define NVOS32_TYPE_ZCULL 9 +#define NVOS32_TYPE_UNUSED 10 +#define NVOS32_TYPE_SHADER_PROGRAM 11 +#define NVOS32_TYPE_OWNER_RM 12 +#define NVOS32_TYPE_NOTIFIER 13 +#define NVOS32_TYPE_RESERVED 14 +#define NVOS32_TYPE_PMA 15 +#define NVOS32_TYPE_STENCIL 16 +#define NVOS32_NUM_MEM_TYPES 17 + +// Surface attribute field - bitmask of requested attributes the surface +// should have. +// This value is updated to reflect what was actually allocated, and so this +// field must be checked after every allocation to determine what was +// allocated. Pass in the ANY tags to indicate that RM should fall back but +// still succeed the alloc. +// for example, if tiled_any is passed in, but no tile ranges are available, +// RM will allocate normal memory and indicate that in the returned attr field. +// Each returned attribute will have the REQUIRED field set if that attribute +// applies to the allocated surface. + +#define NVOS32_ATTR_NONE 0x00000000 + +#define NVOS32_ATTR_DEPTH 2:0 +#define NVOS32_ATTR_DEPTH_UNKNOWN 0x00000000 +#define NVOS32_ATTR_DEPTH_8 0x00000001 +#define NVOS32_ATTR_DEPTH_16 0x00000002 +#define NVOS32_ATTR_DEPTH_24 0x00000003 +#define NVOS32_ATTR_DEPTH_32 0x00000004 +#define NVOS32_ATTR_DEPTH_64 0x00000005 +#define NVOS32_ATTR_DEPTH_128 0x00000006 + +#define NVOS32_ATTR_COMPR_COVG 3:3 +#define NVOS32_ATTR_COMPR_COVG_DEFAULT 0x00000000 +#define NVOS32_ATTR_COMPR_COVG_PROVIDED 0x00000001 + +// Surface description - number of AA samples +// This number should only reflect AA done in hardware, not in software. For +// example, OpenGL's 8x AA mode is a mix of 2x hardware multisample and 2x2 +// software supersample. +// OpenGL should specify ATTR_AA_SAMPLES of 2 in this case, not 8, because +// the hardware will be programmed to run in 2x AA mode. +// Note that X_VIRTUAL_Y means X real samples with Y samples total (i.e. Y +// does not indicate the number of virtual samples). For instance, what +// arch and HW describe as NV_PGRAPH_ANTI_ALIAS_SAMPLES_MODE_2X2_VC_12 +// corresponds to NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16 here. + +#define NVOS32_ATTR_AA_SAMPLES 7:4 +#define NVOS32_ATTR_AA_SAMPLES_1 0x00000000 +#define NVOS32_ATTR_AA_SAMPLES_2 0x00000001 +#define NVOS32_ATTR_AA_SAMPLES_4 0x00000002 +#define NVOS32_ATTR_AA_SAMPLES_4_ROTATED 0x00000003 +#define NVOS32_ATTR_AA_SAMPLES_6 0x00000004 +#define NVOS32_ATTR_AA_SAMPLES_8 0x00000005 +#define NVOS32_ATTR_AA_SAMPLES_16 0x00000006 +#define NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_8 0x00000007 +#define NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16 0x00000008 +#define NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_16 0x00000009 +#define NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_32 0x0000000A + +// Tiled region +#define NVOS32_ATTR_TILED 9:8 +#define NVOS32_ATTR_TILED_NONE 0x00000000 +#define NVOS32_ATTR_TILED_REQUIRED 0x00000001 +#define NVOS32_ATTR_TILED_ANY 0x00000002 +#define NVOS32_ATTR_TILED_DEFERRED 0x00000003 + +// Zcull region (NV40 and up) +// If ATTR_ZCULL is REQUIRED or ANY and ATTR_DEPTH is UNKNOWN, the +// allocation will fail. +// If ATTR_DEPTH or ATTR_AA_SAMPLES is not accurate, erroneous rendering +// may result. +#define NVOS32_ATTR_ZCULL 11:10 +#define NVOS32_ATTR_ZCULL_NONE 0x00000000 +#define NVOS32_ATTR_ZCULL_REQUIRED 0x00000001 +#define NVOS32_ATTR_ZCULL_ANY 0x00000002 +#define NVOS32_ATTR_ZCULL_SHARED 0x00000003 + +// Compression (NV20 and up) +// If ATTR_COMPR is REQUIRED or ANY and ATTR_DEPTH is UNKNOWN, the +// allocation will fail. +// If ATTR_DEPTH or ATTR_AA_SAMPLES is not accurate, performance will +// suffer heavily +#define NVOS32_ATTR_COMPR 13:12 +#define NVOS32_ATTR_COMPR_NONE 0x00000000 +#define NVOS32_ATTR_COMPR_REQUIRED 0x00000001 +#define NVOS32_ATTR_COMPR_ANY 0x00000002 +#define NVOS32_ATTR_COMPR_PLC_REQUIRED NVOS32_ATTR_COMPR_REQUIRED +#define NVOS32_ATTR_COMPR_PLC_ANY NVOS32_ATTR_COMPR_ANY +#define NVOS32_ATTR_COMPR_DISABLE_PLC_ANY 0x00000003 + +// Format +// _BLOCK_LINEAR is only available for nv50+. +#define NVOS32_ATTR_FORMAT 17:16 +// Macros representing the low/high bits of NVOS32_ATTR_FORMAT +// bit range. These provide direct access to the range limits +// without needing to split the low:high representation via +// ternary operator, thereby avoiding MISRA 14.3 violation. +#define NVOS32_ATTR_FORMAT_LOW_FIELD 16 +#define NVOS32_ATTR_FORMAT_HIGH_FIELD 17 +#define NVOS32_ATTR_FORMAT_PITCH 0x00000000 +#define NVOS32_ATTR_FORMAT_SWIZZLED 0x00000001 +#define NVOS32_ATTR_FORMAT_BLOCK_LINEAR 0x00000002 + +#define NVOS32_ATTR_Z_TYPE 18:18 +#define NVOS32_ATTR_Z_TYPE_FIXED 0x00000000 +#define NVOS32_ATTR_Z_TYPE_FLOAT 0x00000001 + +#define NVOS32_ATTR_ZS_PACKING 21:19 +#define NVOS32_ATTR_ZS_PACKING_S8 0x00000000 // Z24S8 and S8 share definition +#define NVOS32_ATTR_ZS_PACKING_Z24S8 0x00000000 +#define NVOS32_ATTR_ZS_PACKING_S8Z24 0x00000001 +#define NVOS32_ATTR_ZS_PACKING_Z32 0x00000002 +#define NVOS32_ATTR_ZS_PACKING_Z24X8 0x00000003 +#define NVOS32_ATTR_ZS_PACKING_X8Z24 0x00000004 +#define NVOS32_ATTR_ZS_PACKING_Z32_X24S8 0x00000005 +#define NVOS32_ATTR_ZS_PACKING_X8Z24_X24S8 0x00000006 +#define NVOS32_ATTR_ZS_PACKING_Z16 0x00000007 +// NOTE: ZS packing and color packing fields are overlaid +#define NVOS32_ATTR_COLOR_PACKING NVOS32_ATTR_ZS_PACKING +#define NVOS32_ATTR_COLOR_PACKING_A8R8G8B8 0x00000000 +#define NVOS32_ATTR_COLOR_PACKING_X8R8G8B8 0x00000001 + + + +// +// For virtual allocs to choose page size for the region. Specifying +// _DEFAULT will select a virtual page size that allows for a surface +// to be mixed between video and system memory and allow the surface +// to be migrated between video and system memory. For tesla chips, +// 4KB will be used. For fermi chips with dual page tables, a virtual +// address with both page tables will be used. +// +// For physical allocation on chips with page swizzle this field is +// used to select the page swizzle. This later also sets the virtual +// page size, but does not have influence over selecting a migratable +// virtual address. That must be selected when mapping the physical +// memory. +// +// BIG_PAGE = 64 KB on PASCAL +// = 64 KB or 128 KB on pre_PASCAL chips +// +// HUGE_PAGE = 2 MB on PASCAL+ +// = 2 MB or 512 MB on AMPERE+ +// = not supported on pre_PASCAL chips. +// +// To request for a HUGE page size, +// set NVOS32_ATTR_PAGE_SIZE to _HUGE and NVOS32_ATTR2_PAGE_SIZE_HUGE to +// the desired size. +// +#define NVOS32_ATTR_PAGE_SIZE 24:23 +#define NVOS32_ATTR_PAGE_SIZE_DEFAULT 0x00000000 +#define NVOS32_ATTR_PAGE_SIZE_4KB 0x00000001 +#define NVOS32_ATTR_PAGE_SIZE_BIG 0x00000002 +#define NVOS32_ATTR_PAGE_SIZE_HUGE 0x00000003 + +#define NVOS32_ATTR_LOCATION 26:25 +#define NVOS32_ATTR_LOCATION_VIDMEM 0x00000000 +#define NVOS32_ATTR_LOCATION_PCI 0x00000001 +#define NVOS32_ATTR_LOCATION_AGP 0x00000002 +#define NVOS32_ATTR_LOCATION_ANY 0x00000003 + +// +// _DEFAULT implies _CONTIGUOUS for video memory currently, but +// may be changed to imply _NONCONTIGUOUS in the future. +// _ALLOW_NONCONTIGUOUS enables falling back to the noncontiguous +// vidmem allocator if contig allocation fails. +// +#define NVOS32_ATTR_PHYSICALITY 28:27 +#define NVOS32_ATTR_PHYSICALITY_DEFAULT 0x00000000 +#define NVOS32_ATTR_PHYSICALITY_NONCONTIGUOUS 0x00000001 +#define NVOS32_ATTR_PHYSICALITY_CONTIGUOUS 0x00000002 +#define NVOS32_ATTR_PHYSICALITY_ALLOW_NONCONTIGUOUS 0x00000003 + +#define NVOS32_ATTR_COHERENCY 31:29 +#define NVOS32_ATTR_COHERENCY_UNCACHED 0x00000000 +#define NVOS32_ATTR_COHERENCY_CACHED 0x00000001 +#define NVOS32_ATTR_COHERENCY_WRITE_COMBINE 0x00000002 +#define NVOS32_ATTR_COHERENCY_WRITE_THROUGH 0x00000003 +#define NVOS32_ATTR_COHERENCY_WRITE_PROTECT 0x00000004 +#define NVOS32_ATTR_COHERENCY_WRITE_BACK 0x00000005 + +// ATTR2 fields +#define NVOS32_ATTR2_NONE 0x00000000 + +// +// DEFAULT - Let lower level drivers pick optimal page kind. +// PREFER_NO_ZBC - Prefer other types of compression over ZBC when +// selecting page kind. +// PREFER_ZBC - Prefer ZBC over other types of compression when +// selecting page kind. +// REQUIRE_ONLY_ZBC - Require a page kind that enables ZBC but disables +// other types of compression (i.e. 2C page kind). +// INVALID - Aliases REQUIRE_ONLY_ZBC, which is not supported +// by all RM implementations. +// +#define NVOS32_ATTR2_ZBC 1:0 +#define NVOS32_ATTR2_ZBC_DEFAULT 0x00000000 +#define NVOS32_ATTR2_ZBC_PREFER_NO_ZBC 0x00000001 +#define NVOS32_ATTR2_ZBC_PREFER_ZBC 0x00000002 +#define NVOS32_ATTR2_ZBC_REQUIRE_ONLY_ZBC 0x00000003 +#define NVOS32_ATTR2_ZBC_INVALID 0x00000003 + +// +// DEFAULT - Highest performance cache policy that is coherent with the highest +// performance CPU mapping. Typically this is gpu cached for video +// memory and gpu uncached for system memory. +// YES - Enable gpu caching if supported on this surface type. For system +// memory this will not be coherent with direct CPU mappings. +// NO - Disable gpu caching if supported on this surface type. +// INVALID - Clients should never set YES and NO simultaneously. +// +#define NVOS32_ATTR2_GPU_CACHEABLE 3:2 +#define NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT 0x00000000 +#define NVOS32_ATTR2_GPU_CACHEABLE_YES 0x00000001 +#define NVOS32_ATTR2_GPU_CACHEABLE_NO 0x00000002 +#define NVOS32_ATTR2_GPU_CACHEABLE_INVALID 0x00000003 + +// +// DEFAULT - GPU-dependent cache policy +// YES - Enable gpu caching for p2p mem +// NO - Disable gpu caching for p2p mem +// +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE 5:4 +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE_DEFAULT 0x00000000 +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE_YES 0x00000001 +#define NVOS32_ATTR2_P2P_GPU_CACHEABLE_NO 0x00000002 + +// This applies to virtual allocs only. See NVOS46_FLAGS_32BIT_POINTER. +#define NVOS32_ATTR2_32BIT_POINTER 6:6 +#define NVOS32_ATTR2_32BIT_POINTER_DISABLE 0x00000000 +#define NVOS32_ATTR2_32BIT_POINTER_ENABLE 0x00000001 + +// +// Indicates address conversion to be used, which affects what +// pitch alignment needs to be used +// +#define NVOS32_ATTR2_TILED_TYPE 7:7 +#define NVOS32_ATTR2_TILED_TYPE_LINEAR 0x00000000 +#define NVOS32_ATTR2_TILED_TYPE_XY 0x00000001 + +// +// Force SMMU mapping on GPU physical allocation in Tegra +// SMMU mapping for GPU physical allocation decided internally by RM +// This attribute provide an override to RM policy for verification purposes. +// +#define NVOS32_ATTR2_SMMU_ON_GPU 10:8 +#define NVOS32_ATTR2_SMMU_ON_GPU_DEFAULT 0x00000000 +#define NVOS32_ATTR2_SMMU_ON_GPU_DISABLE 0x00000001 +#define NVOS32_ATTR2_SMMU_ON_GPU_ENABLE 0x00000002 + +// +// Make comptag allocation aligned to compression cacheline size. +// Specifying this attribute will make RM allocate comptags worth an entire +// comp cacheline. The allocation will be offset aligned to number of comptags/comp cacheline. +// +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN 11:11 +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_OFF 0x0 +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_ON 0x1 +#define NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_DEFAULT \ + NVOS32_ATTR2_ALLOC_COMPCACHELINE_ALIGN_OFF + +// Allocation preferred in high or low priority memory +#define NVOS32_ATTR2_PRIORITY 13:12 +#define NVOS32_ATTR2_PRIORITY_DEFAULT 0x0 +#define NVOS32_ATTR2_PRIORITY_HIGH 0x1 +#define NVOS32_ATTR2_PRIORITY_LOW 0x2 + +// PMA: Allocation is an RM internal allocation (RM-only) +#define NVOS32_ATTR2_INTERNAL 14:14 +#define NVOS32_ATTR2_INTERNAL_NO 0x0 +#define NVOS32_ATTR2_INTERNAL_YES 0x1 + +// Allocate 2C instead of 2CZ +#define NVOS32_ATTR2_PREFER_2C 15:15 +#define NVOS32_ATTR2_PREFER_2C_NO 0x00000000 +#define NVOS32_ATTR2_PREFER_2C_YES 0x00000001 + +// Allocation used by display engine; RM verifies display engine has enough +// address bits or remapper available. +#define NVOS32_ATTR2_NISO_DISPLAY 16:16 +#define NVOS32_ATTR2_NISO_DISPLAY_NO 0x00000000 +#define NVOS32_ATTR2_NISO_DISPLAY_YES 0x00000001 + +// +// !!WARNING!!! +// +// This flag is introduced as a temporary WAR to enable color compression +// without ZBC. +// +// This dangerous flag can be used by UMDs to instruct RM to skip the zbc +// table refcounting that RM does today, when the chosen PTE kind has ZBC +// support. +// +// Currently we do not have a safe per process zbc slot management and +// refcounting mechanism between RM and UMD and hence, any process can +// access any other process's zbc entry in the global zbc table (without mask) +// Inorder to flush the ZBC table for slot reuse RM cannot track which +// process is using which zbc slot. Hence RM has a global refcount for the +// zbc table to flush and reuse the entries if the PTE kind supports zbc. +// +// This scheme poses a problem if there are apps that are persistent such as +// the desktop components that can have color compression enabled which will +// always keep the refcount active. Since these apps can live without +// ZBC, UMD can disable ZBC using masks. +// +// In such a case, if UMD so chooses to disable ZBC, this flag should be used +// to skip refcounting as by default RM would refcount the ZBC table. +// +// NOTE: There is no way for RM to enforce/police this, and we totally rely +// on UMD to use a zbc mask in the pushbuffer method to prevent apps from +// accessing the ZBC table. +// +#define NVOS32_ATTR2_ZBC_SKIP_ZBCREFCOUNT 17:17 +#define NVOS32_ATTR2_ZBC_SKIP_ZBCREFCOUNT_NO 0x00000000 +#define NVOS32_ATTR2_ZBC_SKIP_ZBCREFCOUNT_YES 0x00000001 + +// Allocation requires ISO bandwidth guarantees +#define NVOS32_ATTR2_ISO 18:18 +#define NVOS32_ATTR2_ISO_NO 0x00000000 +#define NVOS32_ATTR2_ISO_YES 0x00000001 + +// +// Turn off blacklist feature for video memory allocation +// This attribute should be used only by Kernel client (KMD), to mask +// the blacklisted pages for the allocation. This is done so that the clients +// will manage the above masked blacklisted pages after the allocation. It will +// return to RM's pool after the allocation was free-d.RmVidHeapCtrl returns +// NV_ERR_INSUFFICIENT_PERMISSIONS if it is being called by non-kernel clients. +// + +// TODO: Project ReLingo - This term is marked for deletion. Use PAGE_OFFLINING. +#define NVOS32_ATTR2_BLACKLIST 19:19 +#define NVOS32_ATTR2_BLACKLIST_ON 0x00000000 +#define NVOS32_ATTR2_BLACKLIST_OFF 0x00000001 +#define NVOS32_ATTR2_PAGE_OFFLINING 19:19 +#define NVOS32_ATTR2_PAGE_OFFLINING_ON 0x00000000 +#define NVOS32_ATTR2_PAGE_OFFLINING_OFF 0x00000001 + +// +// For virtual allocs to choose the HUGE page size for the region. +// NVOS32_ATTR_PAGE_SIZE must be set to _HUGE to use this. +// Currently, the default huge page is 2MB, so a request with _DEFAULT +// set will always be interpreted as 2MB. +// Not supported on pre_AMPERE chips. +// +#define NVOS32_ATTR2_PAGE_SIZE_HUGE 21:20 +#define NVOS32_ATTR2_PAGE_SIZE_HUGE_DEFAULT 0x00000000 +#define NVOS32_ATTR2_PAGE_SIZE_HUGE_2MB 0x00000001 +#define NVOS32_ATTR2_PAGE_SIZE_HUGE_512MB 0x00000002 + +// Allow read-only or read-write user CPU mappings +#define NVOS32_ATTR2_PROTECTION_USER 22:22 +#define NVOS32_ATTR2_PROTECTION_USER_READ_WRITE 0x00000000 +#define NVOS32_ATTR2_PROTECTION_USER_READ_ONLY 0x00000001 + +// Allow read-only or read-write device mappings +#define NVOS32_ATTR2_PROTECTION_DEVICE 23:23 +#define NVOS32_ATTR2_PROTECTION_DEVICE_READ_WRITE 0x00000000 +#define NVOS32_ATTR2_PROTECTION_DEVICE_READ_ONLY 0x00000001 + +// +// Force the allocation to go to guest subheap. +// This flag is used by vmiop plugin to allocate from GPA +// +#define NVOS32_ATTR2_ALLOCATE_FROM_SUBHEAP 27:27 +#define NVOS32_ATTR2_ALLOCATE_FROM_SUBHEAP_NO 0x00000000 +#define NVOS32_ATTR2_ALLOCATE_FROM_SUBHEAP_YES 0x00000001 + +/** + * NVOS32 ALLOC_FLAGS + * + * NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT + * + * NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP + * + * NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN + * + * NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE + * + * NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE + * + * NVOS32_ALLOC_FLAGS_BANK_HINT + * + * NVOS32_ALLOC_FLAGS_BANK_FORCE + * + * NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT + * + * NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE + * + * NVOS32_ALLOC_FLAGS_BANK_GROW_UP + * Only relevant if bank_hint or bank_force are set + * + * NVOS32_ALLOC_FLAGS_BANK_GROW_DOWN + * Only relevant if bank_hint or bank_force are set + * + * NVOS32_ALLOC_FLAGS_LAZY + * Lazy allocation (deferred pde, pagetable creation) + * + * NVOS32_ALLOC_FLAGS_NO_SCANOUT + * Set if surface will never be scanned out + * + * NVOS32_ALLOC_FLAGS_PITCH_FORCE + * Fail alloc if supplied pitch is not aligned + * + * NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED + * Memory handle provided to be associated with this allocation + * + * NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED + * By default memory is mapped into the CPU address space + * + * NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM + * Allocate persistent video memory + * + * NVOS32_ALLOC_FLAGS_USE_BEGIN_END + * Use rangeBegin & rangeEnd fields in allocs other than size/range + * + * NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED + * Allocate TurboCipher encrypted region + * + * NVOS32_ALLOC_FLAGS_VIRTUAL + * Allocate virtual memory address space + * + * NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX + * Force allocation internal index + * + * NVOS32_ALLOC_FLAGS_ZCULL_COVG_SPECIFIED + * This flag is depreciated and allocations will fail. + * + * NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED + * Must be used with NVOS32_ALLOC_FLAGS_VIRTUAL. + * Page tables for this allocation will be managed outside of RM. + * + * NVOS32_ALLOC_FLAGS_FORCE_DEDICATED_PDE + * + * NVOS32_ALLOC_FLAGS_PROTECTED + * Allocate in a protected memory region if available + * + * NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP + * Map kernel os descriptor + * + * NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE + * On WDDM all address spaces are created with MINIMIZE_PTETABLE_SIZE + * to reduce the overhead of private address spaces per application, + * at the cost of holes in the virtual address space. + * + * Shaders have short pointers that are required to be within a + * GPU dependent 32b range. + * + * MAXIMIZE_ADDRESS_SPACE will reverse the MINIMIZE_PTE_TABLE_SIZE + * flag with certain restrictions: + * - This flag only has an effect when the allocation has the side + * effect of creating a new PDE. It does not affect existing PDEs. + * - The first few PDEs of the address space are kept minimum to allow + * small applications to use fewer resources. + * - By default this operations on the 0-4GB address range. + * - If USE_BEGIN_END is specified the setting will apply to the + * specified range instead of the first 4GB. + * + * NVOS32_ALLOC_FLAGS_SPARSE + * Denote that a virtual address range is "sparse". Must be used with + * NVOS32_ALLOC_FLAGS_VIRTUAL. Creation of a "sparse" virtual address range + * denotes that an unmapped virtual address range should "not" fault but simply + * return 0's. + * + * NVOS32_ALLOC_FLAGS_ALLOCATE_KERNEL_PRIVILEGED + * This a special flag that can be used only by kernel(root) clients + * to allocate memory out of a protected region of the address space + * If this flag is set by non kernel clients then the allocation will + * fail. + * + * NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC + * + * NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY + * If new pagetable need to be allocated prefer them in sysmem (if supported by the gpu) + * + * NVOS32_ALLOC_FLAGS_SKIP_ALIGN_PAD + * As per KMD request to eliminate extra allocation + * + * NVOS32_ALLOC_FLAGS_WPR1 + * Allocate in a WPR1 region if available + * + * NVOS32_ALLOC_FLAGS_ZCULL_DONT_ALLOCATE_SHARED_1X + * If using zcull sharing and this surface is fsaa, then don't allocate an additional non-FSAA region. + * + * NVOS32_ALLOC_FLAGS_WPR2 + * Allocate in a WPR1 region if available + */ +#define NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT 0x00000001 +#define NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP 0x00000002 +#define NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN 0x00000004 +#define NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE 0x00000008 +#define NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE 0x00000010 +#define NVOS32_ALLOC_FLAGS_BANK_HINT 0x00000020 +#define NVOS32_ALLOC_FLAGS_BANK_FORCE 0x00000040 +#define NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT 0x00000080 +#define NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE 0x00000100 +#define NVOS32_ALLOC_FLAGS_BANK_GROW_UP 0x00000000 +#define NVOS32_ALLOC_FLAGS_BANK_GROW_DOWN 0x00000200 +#define NVOS32_ALLOC_FLAGS_LAZY 0x00000400 +// unused 0x00000800 +#define NVOS32_ALLOC_FLAGS_NO_SCANOUT 0x00001000 +#define NVOS32_ALLOC_FLAGS_PITCH_FORCE 0x00002000 +#define NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED 0x00004000 +#define NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED 0x00008000 +#define NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM 0x00010000 +#define NVOS32_ALLOC_FLAGS_USE_BEGIN_END 0x00020000 +#define NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED 0x00040000 +#define NVOS32_ALLOC_FLAGS_VIRTUAL 0x00080000 +#define NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX 0x00100000 +#define NVOS32_ALLOC_FLAGS_ZCULL_COVG_SPECIFIED 0x00200000 +#define NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED 0x00400000 +#define NVOS32_ALLOC_FLAGS_FORCE_DEDICATED_PDE 0x00800000 +#define NVOS32_ALLOC_FLAGS_PROTECTED 0x01000000 +#define NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP 0x02000000 // TODO BUG 2488679: fix alloc flag aliasing +#define NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE 0x02000000 +#define NVOS32_ALLOC_FLAGS_SPARSE 0x04000000 +#define NVOS32_ALLOC_FLAGS_USER_READ_ONLY 0x04000000 // TODO BUG 2488682: remove this after KMD transition +#define NVOS32_ALLOC_FLAGS_DEVICE_READ_ONLY 0x08000000 // TODO BUG 2488682: remove this after KMD transition +#define NVOS32_ALLOC_FLAGS_ALLOCATE_KERNEL_PRIVILEGED 0x08000000 +#define NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC 0x10000000 +#define NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY 0x20000000 +#define NVOS32_ALLOC_FLAGS_SKIP_ALIGN_PAD 0x40000000 +#define NVOS32_ALLOC_FLAGS_WPR1 0x40000000 // TODO BUG 2488672: fix alloc flag aliasing +#define NVOS32_ALLOC_FLAGS_ZCULL_DONT_ALLOCATE_SHARED_1X 0x80000000 +#define NVOS32_ALLOC_FLAGS_WPR2 0x80000000 // TODO BUG 2488672: fix alloc flag aliasing + +// Internal flags used for RM's allocation paths +#define NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC 0x00000001 // RM internal flags - not sure if this should be exposed even. Keeping it here. +#define NVOS32_ALLOC_INTERNAL_FLAGS_SKIP_SCRUB 0x00000004 // RM internal flags - not sure if this should be exposed even. Keeping it here. +#define NVOS32_ALLOC_FLAGS_MAXIMIZE_4GB_ADDRESS_SPACE NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE // Legacy name + +// +// Bitmask of flags that are only valid for virtual allocations. +// +#define NVOS32_ALLOC_FLAGS_VIRTUAL_ONLY ( \ + NVOS32_ALLOC_FLAGS_VIRTUAL | \ + NVOS32_ALLOC_FLAGS_LAZY | \ + NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED | \ + NVOS32_ALLOC_FLAGS_SPARSE | \ + NVOS32_ALLOC_FLAGS_MAXIMIZE_ADDRESS_SPACE | \ + NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY ) + +// COMPR_COVG_* allows for specification of what compression resources +// are required (_MIN) and necessary (_MAX). Default behavior is for +// RM to provide as much as possible, including none if _ANY is allowed. +// Values for min/max are (0-100, a %) * _COVG_SCALE (so max value is +// 100*100==10000). _START is used to specify the % offset into the +// region to begin the requested coverage. +// _COVG_BITS allows specification of the number of comptags per ROP tile. +// A value of 0 is default and allows RM to choose based upon MMU/FB rules. +// All other values for _COVG_BITS are arch-specific. +// Note: NVOS32_ATTR_COMPR_COVG_PROVIDED must be set for this feature +// to be available (verif-only). +#define NVOS32_ALLOC_COMPR_COVG_SCALE 10 +#define NVOS32_ALLOC_COMPR_COVG_BITS 1:0 +#define NVOS32_ALLOC_COMPR_COVG_BITS_DEFAULT 0x00000000 +#define NVOS32_ALLOC_COMPR_COVG_BITS_1 0x00000001 +#define NVOS32_ALLOC_COMPR_COVG_BITS_2 0x00000002 +#define NVOS32_ALLOC_COMPR_COVG_BITS_4 0x00000003 +#define NVOS32_ALLOC_COMPR_COVG_MAX 11:2 +#define NVOS32_ALLOC_COMPR_COVG_MIN 21:12 +#define NVOS32_ALLOC_COMPR_COVG_START 31:22 + + +// Note: NVOS32_ALLOC_FLAGS_ZCULL_COVG_SPECIFIED must be set for this feature +// to be enabled. +// If FALLBACK_ALLOW is set, a fallback from LOW_RES_Z or LOW_RES_ZS +// to HIGH_RES_Z is allowed if the surface can't be fully covered. +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT 3:0 +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT_LOW_RES_Z 0x00000000 +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT_HIGH_RES_Z 0x00000002 +#define NVOS32_ALLOC_ZCULL_COVG_FORMAT_LOW_RES_ZS 0x00000003 +#define NVOS32_ALLOC_ZCULL_COVG_FALLBACK 4:4 +#define NVOS32_ALLOC_ZCULL_COVG_FALLBACK_DISALLOW 0x00000000 +#define NVOS32_ALLOC_ZCULL_COVG_FALLBACK_ALLOW 0x00000001 + + +// _ALLOC_COMPTAG_OFFSET allows the caller to specify the starting +// offset for the comptags for a given surface, primarily for test only. +// To specify an offset, set _USAGE_FIXED or _USAGE_MIN in conjunction +// with _START. +// +// _USAGE_FIXED sets a surface's comptagline to start at the given +// starting value. If the offset has already been assigned, then +// the alloc call fails. +// +// _USAGE_MIN sets a surface's comptagline to start at the given +// starting value or higher, depending on comptagline availability. +// In this case, if the offset has already been assigned, the next +// available comptagline (in increasing order) will be assigned. +// +// For Fermi, up to 2^17 comptags may be allowed, but the actual, +// usable limit depends on the size of the compbit backing store. +// +// For Pascal, up to 2 ^ 18 comptags may be allowed +// From Turing. up to 2 ^ 20 comptags may be allowed +// +// See also field ctagOffset in struct NVOS32_PARAMETERS. +#define NVOS32_ALLOC_COMPTAG_OFFSET_START 19:0 +#define NVOS32_ALLOC_COMPTAG_OFFSET_START_DEFAULT 0x00000000 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE 31:30 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_DEFAULT 0x00000000 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_OFF 0x00000000 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_FIXED 0x00000001 +#define NVOS32_ALLOC_COMPTAG_OFFSET_USAGE_MIN 0x00000002 + + +// REALLOC flags field +#define NVOS32_REALLOC_FLAGS_GROW_ALLOCATION 0x00000000 +#define NVOS32_REALLOC_FLAGS_SHRINK_ALLOCATION 0x00000001 +#define NVOS32_REALLOC_FLAGS_REALLOC_UP 0x00000000 // towards/from high memory addresses +#define NVOS32_REALLOC_FLAGS_REALLOC_DOWN 0x00000002 // towards/from memory address 0 + +// RELEASE_COMPR, REACQUIRE_COMPR flags field +#define NVOS32_RELEASE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED 0x000000001 + +#define NVOS32_REACQUIRE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED 0x000000001 + + +// FREE flags field +#define NVOS32_FREE_FLAGS_MEMORY_HANDLE_PROVIDED 0x00000001 + +// DUMP flags field +#define NVOS32_DUMP_FLAGS_TYPE 1:0 +#define NVOS32_DUMP_FLAGS_TYPE_FB 0x00000000 +#define NVOS32_DUMP_FLAGS_TYPE_CLIENT_PD 0x00000001 +#define NVOS32_DUMP_FLAGS_TYPE_CLIENT_VA 0x00000002 +#define NVOS32_DUMP_FLAGS_TYPE_CLIENT_VAPTE 0x00000003 + +#define NVOS32_BLOCK_TYPE_FREE 0xFFFFFFFF +#define NVOS32_INVALID_BLOCK_FREE_OFFSET 0xFFFFFFFF + +#define NVOS32_MEM_TAG_NONE 0x00000000 + +/* + * NV_CONTEXT_DMA_ALLOCATION_PARAMS - Allocation params to create context dma + through NvRmAlloc. + */ +typedef struct +{ + NvHandle hSubDevice; + NvV32 flags; + NvHandle hMemory; + NvU64 offset NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); +} NV_CONTEXT_DMA_ALLOCATION_PARAMS; + +/* + * NV_MEMORY_ALLOCATION_PARAMS - Allocation params to create memory through + * NvRmAlloc. Flags are populated with NVOS32_ defines. + */ +typedef struct +{ + NvU32 owner; // [IN] - memory owner ID + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + + NvU32 width; // [IN] - width of surface in pixels + NvU32 height; // [IN] - height of surface in pixels + NvS32 pitch; // [IN/OUT] - desired pitch AND returned actual pitch allocated + + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + + NvU32 format; // [IN/OUT] - format requested, and format allocated + NvU32 comprCovg; // [IN/OUT] - compr covg requested, and allocated + NvU32 zcullCovg; // [OUT] - zcull covg allocated + + NvU64 rangeLo NV_ALIGN_BYTES(8); // [IN] - allocated memory will be limited to the range + NvU64 rangeHi NV_ALIGN_BYTES(8); // [IN] - from rangeBegin to rangeEnd, inclusive. + + NvU64 size NV_ALIGN_BYTES(8); // [IN/OUT] - size of allocation - also returns the actual size allocated + NvU64 alignment NV_ALIGN_BYTES(8); // [IN] - requested alignment - NVOS32_ALLOC_FLAGS_ALIGNMENT* must be on + NvU64 offset NV_ALIGN_BYTES(8); // [IN/OUT] - desired offset if NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE is on AND returned offset + NvU64 limit NV_ALIGN_BYTES(8); // [OUT] - returned surface limit + NvP64 address NV_ALIGN_BYTES(8); // [OUT] - returned address + + NvU32 ctagOffset; // [IN] - comptag offset for this surface (see NVOS32_ALLOC_COMPTAG_OFFSET) + NvHandle hVASpace; // [IN] - VASpace handle. Used when flag is VIRTUAL. + + NvU32 internalflags; // [IN] - internal flags to change allocation behaviors from internal paths + + NvU32 tag; // [IN] - memory tag used for debugging +} NV_MEMORY_ALLOCATION_PARAMS; + +/* + * NV_OS_DESC_MEMORY_ALLOCATION_PARAMS - Allocation params to create OS + * described memory through NvRmAlloc. Flags are populated with NVOS32_ defines. + */ +typedef struct +{ + NvU32 type; // [IN] - surface type, see below TYPE* defines + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 attr; // [IN] - attributes for memory placement/properties, see below + NvU32 attr2; // [IN] - attributes GPU_CACHEABLE + NvP64 descriptor NV_ALIGN_BYTES(8); // [IN] - descriptor address + NvU64 limit NV_ALIGN_BYTES(8); // [IN] - allocated size -1 + NvU32 descriptorType; // [IN] - descriptor type(Virtual | nvmap Handle) + NvU32 tag; // [IN] - memory tag used for debugging +} NV_OS_DESC_MEMORY_ALLOCATION_PARAMS; + +/* + * NV_USER_LOCAL_DESC_MEMORY_ALLOCATION_PARAMS - Allocation params to create a memory + * object from user allocated video memory. Flags are populated with NVOS32_* + * defines. + */ +typedef struct +{ + NvU32 flags; // [IN] - allocation modifier flags, see NVOS02_FLAGS* defines + NvU64 physAddr NV_ALIGN_BYTES(8); // [IN] - physical address + NvU64 size NV_ALIGN_BYTES(8); // [IN] - mem size + NvU32 tag; // [IN] - memory tag used for debugging + NvBool bGuestAllocated; // [IN] - Set if memory is guest allocated (mapped by VMMU) +} NV_USER_LOCAL_DESC_MEMORY_ALLOCATION_PARAMS; + +/* + * NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS - Allocation params to create + * memory HW resources through NvRmAlloc. Flags are populated with NVOS32_ + * defines. + */ +typedef struct +{ + NvU32 owner; // [IN] - memory owner ID + NvU32 flags; // [IN] - allocation modifier flags, see below ALLOC_FLAGS* defines + NvU32 type; // [IN] - surface type, see below TYPE* defines + + NvU32 attr; // [IN/OUT] - surface attributes requested, and surface attributes allocated + NvU32 attr2; // [IN/OUT] - surface attributes requested, and surface attributes allocated + + NvU32 height; + NvU32 width; + NvU32 pitch; + NvU32 alignment; + NvU32 comprCovg; + NvU32 zcullCovg; + + NvU32 kind; + + NvP64 bindResultFunc NV_ALIGN_BYTES(8); // BindResultFunc + NvP64 pHandle NV_ALIGN_BYTES(8); + NvU64 osDeviceHandle NV_ALIGN_BYTES(8); + NvU64 size NV_ALIGN_BYTES(8); + NvU64 allocAddr NV_ALIGN_BYTES(8); + + // [out] from GMMU_COMPR_INFO in drivers/common/shared/inc/mmu/gmmu_fmt.h + NvU32 compPageShift; + NvU32 compressedKind; + NvU32 compTagLineMin; + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMultiplier; + + // [out] fallback uncompressed kind. + NvU32 uncompressedKind; + + NvU32 tag; // [IN] - memory tag used for debugging +} NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS; + +/* function OS33 */ +#define NV04_MAP_MEMORY (0x00000021) + +// Legacy map and unmap memory flags that don't use DRF_DEF scheme +#define NV04_MAP_MEMORY_FLAGS_NONE (0x00000000) +#define NV04_MAP_MEMORY_FLAGS_USER (0x00004000) + +// New map and unmap memory flags. These flags are used for both NvRmMapMemory +// and for NvRmUnmapMemory. + +// Mappings can have restricted permissions (read-only, write-only). Some +// RM implementations may choose to ignore these flags, or they may work +// only for certain memory spaces (system, AGP, video memory); in such cases, +// you may get a read/write mapping even if you asked for a read-only or +// write-only mapping. +#define NVOS33_FLAGS_ACCESS 1:0 +#define NVOS33_FLAGS_ACCESS_READ_WRITE (0x00000000) +#define NVOS33_FLAGS_ACCESS_READ_ONLY (0x00000001) +#define NVOS33_FLAGS_ACCESS_WRITE_ONLY (0x00000002) + +// Persistent mappings are no longer supported +#define NVOS33_FLAGS_PERSISTENT 4:4 +#define NVOS33_FLAGS_PERSISTENT_DISABLE (0x00000000) +#define NVOS33_FLAGS_PERSISTENT_ENABLE (0x00000001) + +// This flag is a hack to work around bug 150889. It disables the error +// checking in the RM that verifies that the client is not trying to map +// memory past the end of the memory object. This error checking needs to +// be shut off in some cases for a PAE bug workaround in certain kernels. +#define NVOS33_FLAGS_SKIP_SIZE_CHECK 8:8 +#define NVOS33_FLAGS_SKIP_SIZE_CHECK_DISABLE (0x00000000) +#define NVOS33_FLAGS_SKIP_SIZE_CHECK_ENABLE (0x00000001) + +// Normally, a mapping is created in the same memory space as the client -- in +// kernel space for a kernel RM client, or in user space for a user RM client. +// However, a kernel RM client can specify MEM_SPACE:USER to create a user-space +// mapping in the current RM client. +#define NVOS33_FLAGS_MEM_SPACE 14:14 +#define NVOS33_FLAGS_MEM_SPACE_CLIENT (0x00000000) +#define NVOS33_FLAGS_MEM_SPACE_USER (0x00000001) + +// The client can ask for direct memory mapping (i.e. no BAR1) if remappers and +// blocklinear are not required. RM can do direct mapping in this case if +// carveout is available. +// DEFAULT: Use direct mapping if available and no address/data translation +// is necessary; reflected otherwise +// DIRECT: Use direct mapping if available, even if some translation is +// necessary (the client is responsible for translation) +// REFLECTED: Always use reflected mapping +#define NVOS33_FLAGS_MAPPING 16:15 +#define NVOS33_FLAGS_MAPPING_DEFAULT (0x00000000) +#define NVOS33_FLAGS_MAPPING_DIRECT (0x00000001) +#define NVOS33_FLAGS_MAPPING_REFLECTED (0x00000002) + +// The client requests a fifo mapping but doesn't know the offset or length +// DEFAULT: Do error check length and offset +// ENABLE: Don't error check length and offset but have the RM fill them in +#define NVOS33_FLAGS_FIFO_MAPPING 17:17 +#define NVOS33_FLAGS_FIFO_MAPPING_DEFAULT (0x00000000) +#define NVOS33_FLAGS_FIFO_MAPPING_ENABLE (0x00000001) + +// The client can require that the CPU mapping be to a specific CPU address +// (akin to MAP_FIXED for mmap). +// DISABLED: RM will map the allocation at a CPU VA that RM selects. +// ENABLED: RM will map the allocation at the CPU VA specified by the address +// pass-back parameter to NvRmMapMemory +// NOTES: +// - Used for controlling CPU addresses in CUDA's unified CPU+GPU virtual +// address space +// - Only valid on NvRmMapMemory +// - Only implemented on Linux +#define NVOS33_FLAGS_MAP_FIXED 18:18 +#define NVOS33_FLAGS_MAP_FIXED_DISABLE (0x00000000) +#define NVOS33_FLAGS_MAP_FIXED_ENABLE (0x00000001) + +// The client can specify to the RM that the CPU virtual address range for an +// allocation should remain reserved after the allocation is unmapped. +// DISABLE: When this mapping is destroyed, RM will unmap the CPU virtual +// address space used by this allocation. On Linux this corresponds +// to calling munmap on the CPU VA region. +// ENABLE: When the map object is freed, RM will leave the CPU virtual +// address space used by allocation reserved. On Linux this means +// that RM will overwrite the previous mapping with an anonymous +// mapping of instead calling munmap. +// NOTES: +// - When combined with MAP_FIXED, this allows the client to exert +// significant control over the CPU heap +// - Used in CUDA's unified CPU+GPU virtual address space +// - Only valid on NvRmMapMemory (specifies RM's behavior whenever the +// mapping is destroyed, regardless of mechanism) +// - Only implemented on Linux +#define NVOS33_FLAGS_RESERVE_ON_UNMAP 19:19 +#define NVOS33_FLAGS_RESERVE_ON_UNMAP_DISABLE (0x00000000) +#define NVOS33_FLAGS_RESERVE_ON_UNMAP_ENABLE (0x00000001) + +// Systems with a coherent NVLINK2 connection between the CPU and GPU +// have the option of directly mapping video memory over that connection. +// During mapping you may specify a preference. +// +#define NVOS33_FLAGS_BUS 21:20 +#define NVOS33_FLAGS_BUS_ANY 0 +#define NVOS33_FLAGS_BUS_NVLINK_COHERENT 1 +#define NVOS33_FLAGS_BUS_PCIE 2 + +// Internal use only +#define NVOS33_FLAGS_OS_DESCRIPTOR 22:22 +#define NVOS33_FLAGS_OS_DESCRIPTOR_DISABLE (0x00000000) +#define NVOS33_FLAGS_OS_DESCRIPTOR_ENABLE (0x00000001) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; // device or sub-device handle + NvHandle hMemory; // handle to memory object if provided -- NULL if not + NvU64 offset NV_ALIGN_BYTES(8); + NvU64 length NV_ALIGN_BYTES(8); + NvP64 pLinearAddress NV_ALIGN_BYTES(8); // pointer for returned address + NvU32 status; + NvU32 flags; +} NVOS33_PARAMETERS; + + +/* function OS34 */ +#define NV04_UNMAP_MEMORY (0x00000022) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress NV_ALIGN_BYTES(8); // ptr to virtual address of mapped memory + NvU32 status; + NvU32 flags; +} NVOS34_PARAMETERS; + +/* function OS37 */ +#define NV04_UPDATE_CONTEXT_DMA (0x00000025) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hDma; + NvHandle hDmaPteArray; // ctx dma for pte's + NvV32 dmaFirstPage; // first page in "real" context dma to update + NvV32 pteArrayOffset; // first pte to use from input pte array + NvV32 pteCount; // count of PTE entries to update + NvHandle hResourceHandle; // bind data handle + NvV32 status; +} NVOS37_PARAMETERS; + +/* function OS38 */ +#define NV04_ACCESS_REGISTRY (0x00000026) + +/* parameter values */ +#define NVOS38_ACCESS_TYPE_READ_DWORD 1 +#define NVOS38_ACCESS_TYPE_WRITE_DWORD 2 +#define NVOS38_ACCESS_TYPE_READ_BINARY 6 +#define NVOS38_ACCESS_TYPE_WRITE_BINARY 7 + +#define NVOS38_MAX_REGISTRY_STRING_LENGTH 256 +#define NVOS38_MAX_REGISTRY_BINARY_LENGTH 256 + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hObject; + NvV32 AccessType; + + NvV32 DevNodeLength; + NvP64 pDevNode NV_ALIGN_BYTES(8); + + NvV32 ParmStrLength; + NvP64 pParmStr NV_ALIGN_BYTES(8); + + NvV32 BinaryDataLength; + NvP64 pBinaryData NV_ALIGN_BYTES(8); + + NvV32 Data; + NvV32 Entry; + NvV32 status; +} NVOS38_PARAMETERS; + +#define NV04_ALLOC_CONTEXT_DMA (0x00000027) + +/* parameter values are the same as NVOS03 -- not repeated here */ + +/* parameters */ +typedef struct +{ + NvHandle hObjectParent; + NvHandle hSubDevice; + NvHandle hObjectNew; + NvV32 hClass; + NvV32 flags; + NvU32 selector; + NvHandle hMemory; + NvU64 offset NV_ALIGN_BYTES(8); + NvU64 limit NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS39_PARAMETERS; + + +#define NV04_GET_EVENT_DATA (0x00000028) + +typedef struct +{ + NvHandle hObject; + NvV32 NotifyIndex; + + // + // Holds same information as that of nvgputypes.h::NvNotification's + // info32 and info16. + // + NvV32 info32; + NvU16 info16; +} NvUnixEvent; + +/* parameters */ +typedef struct +{ + NvP64 pEvent NV_ALIGN_BYTES(8); + NvV32 MoreEvents; + NvV32 status; +} NVOS41_PARAMETERS; + +/* function NVOS43 -- deleted 4/09 */ +/* #define NV04_UNIFIED_FREE (0x0000002B) */ + + +#define NVSIM01_BUS_XACT (0x0000002C) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // n/a currently + NvHandle hDevice; // n/a currently + NvU32 offset; // phy bus offset + NvU32 bar; // ~0 := phy addr, {0..2} specify gpu bar + NvU32 bytes; // # of bytes + NvU32 write; // 0 := read request + NvU32 data; // in/out based upon 'write' + NvU32 status; +} NVOS2C_PARAMETERS; + +/* function NVOS2D -- deleted 4/09 */ +/* #define NVSIM01_BUS_GET_IFACES (0x0000002D) */ + + +/* function OS46 */ +#define NV04_MAP_MEMORY_DMA (0x0000002E) + +/* parameter values */ +#define NVOS46_FLAGS_ACCESS 1:0 +#define NVOS46_FLAGS_ACCESS_READ_WRITE (0x00000000) +#define NVOS46_FLAGS_ACCESS_READ_ONLY (0x00000001) +#define NVOS46_FLAGS_ACCESS_WRITE_ONLY (0x00000002) + +// +// Compute shaders support both 32b and 64b pointers. This allows mappings +// to be restricted to the bottom 4GB of the address space. How _DISABLE +// is handled is chip specific and may force a pointer above 4GB. +// +#define NVOS46_FLAGS_32BIT_POINTER 2:2 +#define NVOS46_FLAGS_32BIT_POINTER_DISABLE (0x00000000) +#define NVOS46_FLAGS_32BIT_POINTER_ENABLE (0x00000001) + +#define NVOS46_FLAGS_PAGE_KIND 3:3 +#define NVOS46_FLAGS_PAGE_KIND_PHYSICAL (0x00000000) +#define NVOS46_FLAGS_PAGE_KIND_VIRTUAL (0x00000001) + +#define NVOS46_FLAGS_CACHE_SNOOP 4:4 +#define NVOS46_FLAGS_CACHE_SNOOP_DISABLE (0x00000000) +#define NVOS46_FLAGS_CACHE_SNOOP_ENABLE (0x00000001) + +// The client requests a CPU kernel mapping so that SW class could use it +// DEFAULT: Don't map CPU address +// ENABLE: Map CPU address +#define NVOS46_FLAGS_KERNEL_MAPPING 5:5 +#define NVOS46_FLAGS_KERNEL_MAPPING_NONE (0x00000000) +#define NVOS46_FLAGS_KERNEL_MAPPING_ENABLE (0x00000001) + +// +// Compute shader access control. +// GPUs that support this feature set the NV0080_CTRL_DMA_CAPS_SHADER_ACCESS_SUPPORTED +// property. These were first supported in Kepler. _DEFAULT will match the ACCESS field. +// +#define NVOS46_FLAGS_SHADER_ACCESS 7:6 +#define NVOS46_FLAGS_SHADER_ACCESS_DEFAULT (0x00000000) +#define NVOS46_FLAGS_SHADER_ACCESS_READ_ONLY (0x00000001) +#define NVOS46_FLAGS_SHADER_ACCESS_WRITE_ONLY (0x00000002) +#define NVOS46_FLAGS_SHADER_ACCESS_READ_WRITE (0x00000003) + +// +// How the PAGE_SIZE field is interpreted is architecture specific. +// +// On Curie chips it is ignored. +// +// On Tesla it is used to guide is used to select which type PDE +// to use. By default the RM will select 4KB for system memory +// and BIG (64KB) for video memory. BOTH is not supported. +// +// Likewise on Fermi this used to select the PDE type. Fermi cannot +// mix page sizes to a single mapping so the page size is determined +// at surface alloation time. 4KB or BIG may be specified but they +// must match the page size selected at allocation time. DEFAULT +// allows the RM to select either a single page size or both PDE, +// while BOTH forces the RM to select a dual page size PDE. +// +// BIG_PAGE = 64 KB on PASCAL +// = 64 KB or 128 KB on pre_PASCAL chips +// +// HUGE_PAGE = 2 MB on PASCAL +// = not supported on pre_PASCAL chips. +// +#define NVOS46_FLAGS_PAGE_SIZE 11:8 +#define NVOS46_FLAGS_PAGE_SIZE_DEFAULT (0x00000000) +#define NVOS46_FLAGS_PAGE_SIZE_4KB (0x00000001) +#define NVOS46_FLAGS_PAGE_SIZE_BIG (0x00000002) +#define NVOS46_FLAGS_PAGE_SIZE_BOTH (0x00000003) +#define NVOS46_FLAGS_PAGE_SIZE_HUGE (0x00000004) + +// Some systems allow the device to use the system L3 cache when accessing the +// system memory. For example, the iGPU on T19X can allocate from the system L3 +// provided the SoC L3 cache is configured for device allocation. +// +// NVOS46_FLAGS_SYSTEM_L3_ALLOC_DEFAULT - Use the default L3 allocation +// policy. When using this policy, device memory access will be coherent with +// non-snooping devices such as the display on Tegra. +// +// NVOS46_FLAGS_SYSTEM_L3_ALLOC_ENABLE_HINT - Enable L3 allocation if possible. +// When L3 allocation is enabled, device memory access may be cached, and the +// memory access will be coherent only with other snoop-enabled access. This +// flag is a hint and will be ignored if the system does not support L3 +// allocation for the device. NVOS46_FLAGS_CACHE_SNOOP_ENABLE must also be set +// for this flag to be effective. +// +// Note: This flag is implemented only by rmapi_tegra. It is not implemented by +// Resman. +// +#define NVOS46_FLAGS_SYSTEM_L3_ALLOC 13:13 +#define NVOS46_FLAGS_SYSTEM_L3_ALLOC_DEFAULT (0x00000000) +#define NVOS46_FLAGS_SYSTEM_L3_ALLOC_ENABLE_HINT (0x00000001) + +#define NVOS46_FLAGS_DMA_OFFSET_GROWS 14:14 +#define NVOS46_FLAGS_DMA_OFFSET_GROWS_UP (0x00000000) +#define NVOS46_FLAGS_DMA_OFFSET_GROWS_DOWN (0x00000001) + +// +// DMA_OFFSET_FIXED is overloaded for two purposes. +// +// 1. For CTXDMA mappings that use DMA_UNICAST_REUSE_ALLOC_FALSE, +// DMA_OFFSET_FIXED_TRUE indicates to use the dmaOffset parameter +// for a fixed address allocation out of the VA space heap. +// DMA_OFFSET_FIXED_FALSE indicates dmaOffset input will be ignored. +// +// 2. For CTXDMA mappings that use DMA_UNICAST_REUSE_ALLOC_TRUE and +// for *ALL* non-CTXDMA mappings, DMA_OFFSET_FIXED_TRUE indicates +// to treat the input dmaOffset as an absolute virtual address +// instead of an offset relative to the virtual allocation being +// mapped into. Whether relative or absolute, the resulting +// virtual address *must* be contained within the specified +// virtual allocation. +// +// Internally, it is also required that the virtual address be aligned +// to the page size of the mapping (obviously cannot map sub-pages). +// For client flexibility the physical offset does not require page alignment. +// This is handled by adding the physical misalignment +// (internally called pteAdjust) to the returned virtual address. +// The *input* dmaOffset can account for this pteAdjust (or not), +// but the returned virtual address always will. +// +#define NVOS46_FLAGS_DMA_OFFSET_FIXED 15:15 +#define NVOS46_FLAGS_DMA_OFFSET_FIXED_FALSE (0x00000000) +#define NVOS46_FLAGS_DMA_OFFSET_FIXED_TRUE (0x00000001) + +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP 19:16 +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_DEFAULT (0x00000000) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_1 (0x00000001) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_2 (0x00000002) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_4 (0x00000003) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_8 (0x00000004) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_16 (0x00000005) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_32 (0x00000006) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_64 (0x00000007) +#define NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_128 (0x00000008) +#define NVOS46_FLAGS_P2P 27:20 + +#define NVOS46_FLAGS_P2P_ENABLE 21:20 +#define NVOS46_FLAGS_P2P_ENABLE_NO (0x00000000) +#define NVOS46_FLAGS_P2P_ENABLE_YES (0x00000001) +#define NVOS46_FLAGS_P2P_ENABLE_NONE NVOS46_FLAGS_P2P_ENABLE_NO +#define NVOS46_FLAGS_P2P_ENABLE_SLI NVOS46_FLAGS_P2P_ENABLE_YES +#define NVOS46_FLAGS_P2P_ENABLE_NOSLI (0x00000002) +// Subdevice ID. Reserved 3 bits for the possibility of 8-way SLI +#define NVOS46_FLAGS_P2P_SUBDEVICE_ID 24:22 +#define NVOS46_FLAGS_P2P_SUBDEV_ID_SRC NVOS46_FLAGS_P2P_SUBDEVICE_ID +#define NVOS46_FLAGS_P2P_SUBDEV_ID_TGT 27:25 +#define NVOS46_FLAGS_TLB_LOCK 28:28 +#define NVOS46_FLAGS_TLB_LOCK_DISABLE (0x00000000) +#define NVOS46_FLAGS_TLB_LOCK_ENABLE (0x00000001) +#define NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC 29:29 +#define NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC_FALSE (0x00000000) +#define NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC_TRUE (0x00000001) +#define NVOS46_FLAGS_DR_SURF 30:30 +#define NVOS46_FLAGS_DR_SURF_FALSE (0x00000000) +#define NVOS46_FLAGS_DR_SURF_TRUE (0x00000001) +// +// This flag must be used with caution. Improper use can leave stale entries in the TLB, +// and allow access to memory no longer owned by the RM client or cause page faults. +// Also see corresponding flag for NvUnmapMemoryDma. +// +#define NVOS46_FLAGS_DEFER_TLB_INVALIDATION 31:31 +#define NVOS46_FLAGS_DEFER_TLB_INVALIDATION_FALSE (0x00000000) +#define NVOS46_FLAGS_DEFER_TLB_INVALIDATION_TRUE (0x00000001) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hDma; // [IN] dma handle for mapping + NvHandle hMemory; // [IN] memory handle for mapping + NvU64 offset NV_ALIGN_BYTES(8); // [IN] offset of region + NvU64 length NV_ALIGN_BYTES(8); // [IN] limit of region + NvV32 flags; // [IN] flags + NvU64 dmaOffset NV_ALIGN_BYTES(8); // [OUT] offset of mapping + // [IN] if FLAGS_DMA_OFFSET_FIXED_TRUE + // *OR* hDma is NOT a CTXDMA handle + // (see NVOS46_FLAGS_DMA_OFFSET_FIXED) + NvV32 status; // [OUT] status +} NVOS46_PARAMETERS; + + +/* function OS47 */ +#define NV04_UNMAP_MEMORY_DMA (0x0000002F) + +#define NVOS47_FLAGS_DEFER_TLB_INVALIDATION 0:0 +#define NVOS47_FLAGS_DEFER_TLB_INVALIDATION_FALSE (0x00000000) +#define NVOS47_FLAGS_DEFER_TLB_INVALIDATION_TRUE (0x00000001) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hDma; // [IN] dma handle for mapping + NvHandle hMemory; // [IN] memory handle for mapping + NvV32 flags; // [IN] flags + NvU64 dmaOffset NV_ALIGN_BYTES(8); // [IN] dma offset from NV04_MAP_MEMORY_DMA + NvV32 status; // [OUT] status +} NVOS47_PARAMETERS; + + +#define NV04_BIND_CONTEXT_DMA (0x00000031) +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hChannel; // [IN] channel handle for binding + NvHandle hCtxDma; // [IN] ctx dma handle for binding + NvV32 status; // [OUT] status +} NVOS49_PARAMETERS; + + +/* function OS54 */ +#define NV04_CONTROL (0x00000036) + +#define NVOS54_FLAGS_NONE (0x00000000) +#define NVOS54_FLAGS_IRQL_RAISED (0x00000001) +#define NVOS54_FLAGS_LOCK_BYPASS (0x00000002) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hObject; + NvV32 cmd; + NvU32 flags; + NvP64 params NV_ALIGN_BYTES(8); + NvU32 paramsSize; + NvV32 status; +} NVOS54_PARAMETERS; + +/* RM Control header + * + * Replacement for NVOS54_PARAMETERS where embedded pointers are not allowed. + * Input layout for user space RM Control calls should be: + * + * +--- NVOS63_PARAMETERS ---+--- RM Control parameters ---+ + * + * NVOS63_PARAMETERS::paramsSize is the size of RM Control parameters + * + */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hObject; // [IN] object handle + NvV32 cmd; // [IN] control command ID + NvU32 paramsSize; // [IN] size in bytes of the RM Control parameters + NvV32 status; // [OUT] status +} NVOS63_PARAMETERS; + + +/* function OS55 */ +#define NV04_DUP_OBJECT (0x00000037) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] destination client handle + NvHandle hParent; // [IN] parent of new object + NvHandle hObject; // [INOUT] destination (new) object handle + NvHandle hClientSrc; // [IN] source client handle + NvHandle hObjectSrc; // [IN] source (old) object handle + NvU32 flags; // [IN] flags + NvU32 status; // [OUT] status +} NVOS55_PARAMETERS; + +#define NV04_DUP_HANDLE_FLAGS_NONE (0x00000000) +#define NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE (0x00000001) // If set, prevents an RM kernel client from duping unconditionally + // NOTE: Do not declare a NV04_DUP_HANDLE_FLAGS_* value of 0x00000008 + // until Bug 2859347 is resolved! This is due to conflicting usage + // of RS_RES_DUP_PARAMS_INTERNAL.flags to pass + // NVOS32_ALLOC_INTERNAL_FLAGS_FLA_MEMORY to an object constructor. + +/* function OS56 */ +#define NV04_UPDATE_DEVICE_MAPPING_INFO (0x00000038) + +/* parameters */ +typedef struct +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pOldCpuAddress NV_ALIGN_BYTES(8); + NvP64 pNewCpuAddress NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS56_PARAMETERS; + +/* function OS57 */ +#define NV04_SHARE (0x0000003E) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] owner client handle + NvHandle hObject; // [IN] resource to share + RS_SHARE_POLICY sharePolicy; // [IN] share policy entry + NvU32 status; // [OUT] status +} NVOS57_PARAMETERS; + +/* parameters */ +typedef struct +{ + NvU32 deviceReference; + NvU32 head; + NvU32 state; + NvU8 forceMonitorState; + NvU8 bForcePerfBiosLevel; + NvU8 bIsD3HotTransition; // [OUT] To tell client if it's a D3Hot transition + NvU32 fastBootPowerState; +} NVPOWERSTATE_PARAMETERS, *PNVPOWERSTATE_PARAMETERS; + + /***************************************************************************\ +|* Object Allocation Parameters *| + \***************************************************************************/ + +// GR engine creation parameters +typedef struct { + NvU32 version; // set to 0x2 + NvU32 flags; // input param from a rm client (no flags are currently defined) + NvU32 size; // sizeof(NV_GR_ALLOCATION_PARAMETERS) + NvU32 caps; // output param for a rm client - class dependent +} NV_GR_ALLOCATION_PARAMETERS; + +// +// NvAlloc parameters for NV03_DEVICE_XX class +// hClientShare +// For NV50+ this can be set to virtual address space for this +// device. On previous chips this field is ignored. There are +// three possible settings +// NV01_NULL_OBJECT - Use the default global VA space +// Handle to current client - Create a new private address space +// Handle to another client - Attach to other clients address space +// flags +// MAP_PTE_GLOBALLY Deprecated. +// MINIMIZE_PTETABLE_SIZE Pass hint to DMA HAL to use partial page +// tables. Depending on allocation pattern +// this may actually use more instance memory. +// RETRY_PTE_ALLOC_IN_SYS Fallback to PTEs allocation in sysmem. This +// is now enabled by default. +// VASPACE_SIZE Honor vaSpaceSize field. +// +// MAP_PTE Deprecated. +// +// VASPACE_IS_MIRRORED This flag will tell RM to create a mirrored +// kernel PDB for the address space associated +// with this device. When this flag is set +// the address space covered by the top PDE +// is restricted and cannot be allocated out of. +// +// +// VASPACE_BIG_PAGE_SIZE_64k ***Warning this flag will be deprecated do not use***** +// VASPACE_BIG_PAGE_SIZE_128k This flag will choose the big page size of the VASPace +// to 64K/128k if the system supports a configurable size. +// If the system does not support a configurable size then +// defaults will be chosen. +// If the user sets both these bits then this API will fail. +// +// SHARED_MANAGEMENT +// *** Warning: This will be deprecated - see NV_VASPACE_ALLOCATION_PARAMETERS. *** +// +// +// hTargetClient/hTargetDevice +// Deprecated. Can be deleted once client code has removed references. +// +// vaBase +// *** Warning: This will be deprecated - see NV_VASPACE_ALLOCATION_PARAMETERS. *** +// +// vaSpaceSize +// Set the size of the VA space used for this client if allocating +// a new private address space. Is expressed as a size such as +// (1<<32) for a 32b address space. Reducing the size of the address +// space allows the dma chip specific code to reduce the instance memory +// used for page tables. +// +// vaMode +// The vaspace allocation mode. There are three modes supported: +// 1. SINGLE_VASPACE +// An old abstraction that provides a single VA space under a +// device and it's allocated implicityly when an object requires a VA +// space. Typically, this VA space is also shared across clients. +// +// 2. OPTIONAL_MULTIPLE_VASPACES +// Global + multiple private va spaces. In this mode, the old abstraction, +// a single vaspace under a device that is allocated implicitly is still +// being supported. A private VA space is an entity under a device, which/ +// cannot be shared with other clients, but multiple channels under the +// same device can still share a private VA space. +// Private VA spaces (class:90f1,FERMI_VASPACE_A) can be allocated as +// objects through RM APIs. This mode requires the users to know what they +// are doing in terms of using VA spaces. Page fault can easily occur if +// one is not careful with a mixed of an implicit VA space and multiple +// VA spaces. +// +// 3. MULTIPLE_VASPACES +// In this mode, all VA spaces have to be allocated explicitly through RM +// APIs and users have to specify which VA space to use for each object. +// This case prevents users to use context dma, which is not supported and +// can be misleading if used. Therefore, it's more a safeguard mode to +// prevent people making mistakes that are hard to debug. +// +// DEFAULT MODE: 2. OPTIONAL_MULTIPLE_VASPACES +// +// See NV0080_ALLOC_PARAMETERS for allocation parameter structure. +// + +#define NV_DEVICE_ALLOCATION_SZNAME_MAXLEN 128 +#define NV_DEVICE_ALLOCATION_FLAGS_NONE (0x00000000) +#define NV_DEVICE_ALLOCATION_FLAGS_MAP_PTE_GLOBALLY (0x00000001) +#define NV_DEVICE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE (0x00000002) +#define NV_DEVICE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS (0x00000004) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SIZE (0x00000008) +#define NV_DEVICE_ALLOCATION_FLAGS_MAP_PTE (0x00000010) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_TARGET (0x00000020) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SHARED_MANAGEMENT (0x00000100) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_64k (0x00000200) +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_128k (0x00000400) +#define NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS (0x00000800) + +/* + *TODO: Delete this flag once CUDA moves to the ctrl call + */ +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_MIRRORED (0x00000040) + +// XXX NV_DEVICE_ALLOCATION_FLAGS_VASPACE_PTABLE_PMA_MANAGED should not +// should not be exposed to clients. It should be the default RM +// behavior. +// +// Until it is made the default, certain clients such as OpenGL +// might still need PTABLE allocations to go through PMA, so this +// flag has been temporary exposed. +// +// See bug 1880192 +#define NV_DEVICE_ALLOCATION_FLAGS_VASPACE_PTABLE_PMA_MANAGED (0x00001000) + +// +// Indicates this device is being created by guest and requires a +// HostVgpuDeviceKernel creation in client. +// +#define NV_DEVICE_ALLOCATION_FLAGS_HOST_VGPU_DEVICE (0x00002000) + +// +// Indicates this device is being created for VGPU plugin use. +// Requires a HostVgpuDevice handle to indicate the guest on which +// this plugin operates. +// +#define NV_DEVICE_ALLOCATION_FLAGS_PLUGIN_CONTEXT (0x00004000) + +#define NV_DEVICE_ALLOCATION_VAMODE_OPTIONAL_MULTIPLE_VASPACES (0x00000000) +#define NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE (0x00000001) +#define NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES (0x00000002) + +/* + * NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS.flags values. + * + * These flags may apply to all channel types: PIO, DMA, and GPFIFO. + * They are also designed so that zero is always the correct default. + * + * NVOS04_FLAGS_CHANNEL_TYPE: + * This flag specifies the type of channel to allocate. Legal values + * for this flag include: + * + * NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL: + * This flag specifies that a physical channel is to be allocated. + * + * NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL: + * OBSOLETE - NOT SUPPORTED + * + * NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL: + * OBSOLETE - NOT SUPPORTED + */ + +/* valid NVOS04_FLAGS_CHANNEL_TYPE values */ +#define NVOS04_FLAGS_CHANNEL_TYPE 1:0 +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL 0x00000000 +#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL 0x00000001 // OBSOLETE +#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL 0x00000002 // OBSOLETE + +/* + * NVOS04_FLAGS_VPR: + * This flag specifies if channel is intended for work with + * Video Protected Regions (VPR) + * + * NVOS04_FLAGS_VPR_TRUE: + * The channel will only write to protected memory regions. + * + * NVOS04_FLAGS_VPR_FALSE: + * The channel will never read from protected memory regions. + */ +#define NVOS04_FLAGS_VPR 2:2 +#define NVOS04_FLAGS_VPR_FALSE 0x00000000 +#define NVOS04_FLAGS_VPR_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING: + * This flag specifies if the channel can skip refcounting of potentially + * accessed mappings on job kickoff. This flag is only meaningful for + * kernel drivers which perform refcounting of memory mappings. + * + * NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE: + * The channel cannot not skip refcounting of memory mappings + * + * NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE: + * The channel can skip refcounting of memory mappings + */ +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING 3:3 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE: + * This flag specifies which "runqueue" the allocated channel will be + * executed on in a TSG. Channels on different runqueues within a TSG + * may be able to feed methods into the engine simultaneously. + * Non-default values are only supported on GP10x and later and only for + * channels within a TSG. + */ +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE 4:4 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE 0x00000001 + +/* + * NVOS04_FLAGS_PRIVILEGED_CHANNEL: + * This flag tells RM whether to give the channel admin privilege. This + * flag will only take effect if the client is GSP-vGPU plugin. It is + * needed so that guest can update page tables in physical mode and do + * scrubbing. + */ +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL 5:5 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING: + * This flags tells RM not to schedule a newly created channel within a + * channel group immediately even if channel group is currently scheduled. + * Channel will not be scheduled until NVA06F_CTRL_GPFIFO_SCHEDULE is + * invoked. This is used eg. for CUDA which needs to do additional + * initialization before starting up a channel. + * Default is FALSE. + */ +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING 6:6 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE 0x00000000 +#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_DENY_PHYSICAL_MODE_CE: + * This flag specifies whether or not to deny access to the physical + * mode of CopyEngine regardless of whether or not the client handle + * is admin. If set to true, this channel allocation will always result + * in an unprivileged channel. If set to false, the privilege of the channel + * will depend on the privilege level of the client handle. + * This is primarily meant for vGPU since all client handles + * granted to guests are admin. + */ +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE 7:7 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE + * + * This flag specifies the channel offset in terms of within a page of + * USERD. For example, value 3 means the 4th channel within a USERD page. + * Given the USERD size is 512B, we will have 8 channels total, so 3 bits + * are reserved. + * + * When _USERD_INDEX_FIXED_TRUE is set but INDEX_PAGE_FIXED_FALSE is set, + * it will ask for a new USERD page. + * + */ +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE 10:8 + +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED 11:11 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE + * + * This flag specifies the channel offset in terms of USERD page. When + * this PAGE_FIXED_TRUE is set, the INDEX_FIXED_FALSE bit should also + * be set, otherwise INVALID_STATE will be returned. + * + * And the field _USERD_INDEX_VALUE will be used to request the specific + * offset within a USERD page. + */ + +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE 20:12 + +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED 21:21 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_DENY_AUTH_LEVEL_PRIV + * This flag specifies whether or not to deny access to the privileged + * host methods TLB_INVALIDATE and ACCESS_COUNTER_CLR + */ +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV 22:22 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER + * + * This flag specifies scrubbing should be skipped for any internal + * allocations made for this channel from PMA using ctx buf pools. + * Only kernel clients are allowed to use this setting. + */ +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER 23:23 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO + * + * This flag specifies that the client is expected to map USERD themselves + * and RM need not do so. + */ +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO 24:24 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL + */ +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL 25:25 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT + * + * This flag specifies whether the channel calling context is from CPU + * VGPU plugin. + */ +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT 26:26 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE 0x00000001 + + /* + * NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT + * + * This flag specifies the channel PBDMA ACQUIRE timeout option. + * _FALSE to disable it, _TRUE to enable it. + * When this flag is enabled, if a host semaphore acquire does not + * complete in about 2 sec, it will time out and trigger a RC error. + */ +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT 27:27 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE 0x00000000 +#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE 0x00000001 + +/* + * NVOS04_FLAGS_GROUP_CHANNEL_THREAD: + * This flags specifies the thread id in which an allocated channel + * will be executed in a TSG. The relationship between the thread id + * in A TSG and respective definitions are implementation specific. + * Also, not all classes will be supported at thread > 0. + * This field cannot be used on non-TSG channels and must be set to + * the default value (0) in that case. If thread > 0 on a non-TSG + * channel, the allocation will fail + */ +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD 29:28 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT 0x00000000 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE 0x00000001 +#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO 0x00000002 + +#define NVOS04_FLAGS_MAP_CHANNEL 30:30 +#define NVOS04_FLAGS_MAP_CHANNEL_FALSE 0x00000000 +#define NVOS04_FLAGS_MAP_CHANNEL_TRUE 0x00000001 + +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC 31:31 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE 0x00000000 +#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE 0x00000001 + +typedef struct +{ + NvU64 base NV_ALIGN_BYTES(8); + NvU64 size NV_ALIGN_BYTES(8); + NvU32 addressSpace; + NvU32 cacheAttrib; +} NV_MEMORY_DESC_PARAMS; + +typedef struct +{ + NvHandle hObjectError; // error context DMA + NvHandle hObjectBuffer; // no longer used + NvU64 gpFifoOffset NV_ALIGN_BYTES(8); // offset to beginning of GP FIFO + NvU32 gpFifoEntries; // number of GP FIFO entries + NvU32 flags; + NvHandle hContextShare; // context share handle + NvHandle hVASpace; // VASpace for the channel + NvHandle hUserdMemory[NVOS_MAX_SUBDEVICES]; // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0 + NvU64 userdOffset[NVOS_MAX_SUBDEVICES] NV_ALIGN_BYTES(8); // offset to beginning of UserD within hUserdMemory[x] + NvU32 engineType; // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated + NvU32 cid; // Channel identifier that is unique for the duration of a RM session + NvU32 subDeviceId; // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods + NvHandle hObjectEccError; // ECC error context DMA + NV_MEMORY_DESC_PARAMS instanceMem; + NV_MEMORY_DESC_PARAMS userdMem; + NV_MEMORY_DESC_PARAMS ramfcMem; + NV_MEMORY_DESC_PARAMS mthdbufMem; + + NvHandle hPhysChannelGroup; // reserved + NvU32 internalFlags; // reserved + NV_MEMORY_DESC_PARAMS errorNotifierMem; // reserved + NV_MEMORY_DESC_PARAMS eccErrorNotifierMem; // reserved + NvU32 ProcessID; // reserved + NvU32 SubProcessID; // reserved +} NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS; + +#define NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR 0x00000000 +#define NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN 0x00000001 +#define NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1 2 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_VALUE 14:0 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_IN_PROGRESS 15:15 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_IN_PROGRESS_TRUE 0x1 +#define NV_CHANNELGPFIFO_NOTIFICATION_STATUS_IN_PROGRESS_FALSE 0x0 + +typedef struct +{ + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS gpfifoAllocationParams; + NvHandle hKernelChannel; +} NV_PHYSICALCHANNEL_ALLOC_PARAMS; + +typedef struct +{ + NvHandle hRunlistBase; // Handle to physmem runlist base + NvU32 engineID; // Engine associated with the runlist +} NV_CHANNELRUNLIST_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // Note that core channel has only one instance + // while all others have two (one per head). + NvHandle hObjectBuffer; // ctx dma handle for DMA push buffer + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications + NvU32 offset; // Initial offset for put/get, usually zero. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs + + NvU32 flags; +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB 1:1 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES 0x00000000 +#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO 0x00000001 + +} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvV32 channelInstance; // One of the n channel instances of a given channel type. + // All PIO channels have two instances (one per head). + NvHandle hObjectNotify; // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors. + NvP64 pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel +} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS; + +// Used for allocating a channel group +typedef struct +{ + NvHandle hObjectError; // Error notifier for TSG + NvHandle hObjectEccError; // ECC Error notifier for TSG + NvHandle hVASpace; // VA space handle for TSG + NvU32 engineType; // Engine to which all channels in this TSG are associated with + NvBool bIsCallingContextVgpuPlugin; +} NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS; + +/* +* @params: +* @engineId : Engine to which the software runlist be associated with. +* @maxTSGs : Maximum number of TSG entries that will be submitted in this software runlist +* The size of the runlist buffer will be determined by +* 2 * // double buffer +* maxTSGs * // determined by KMD +* maxChannelPerTSG * // Determined by RM +* sizeof(RunlistEntry) // Determined by HW format +* @qosIntrEnableMask: QOS Interrupt bitmask that needs to be enabled for the SW runlist defined below. +*/ +typedef struct +{ + NvU32 engineId; //(IN) + NvU32 maxTSGs; //(IN) // Size of the RM could return error if the request cannot be accommodated. + NvU32 qosIntrEnableMask; //(IN) // Bitmask for QOS interrupts that needs to be enabled +} NV_SWRUNLIST_ALLOCATION_PARAMS; + +#define NV_SWRUNLIST_QOS_INTR_NONE 0x00000000 +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_AND_ENG_IDLE_ENABLE NVBIT32(0) +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_IDLE_ENABLE NVBIT32(1) +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_ACQUIRE_ENABLE NVBIT32(2) +#define NV_SWRUNLIST_QOS_INTR_RUNLIST_ACQUIRE_AND_ENG_IDLE_ENABLE NVBIT32(3) + +typedef struct +{ + NvU32 size; + NvU32 caps; +} NV_ME_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; + NvU32 engineInstance; // Select NVDEC0 or NVDEC1 or NVDEC2 +} NV_BSP_ALLOCATION_PARAMETERS; + +// +// These are referenced by mdiag mods tests, but do not appear to be used during +// in the RM any longer +// +#define NV_VP_ALLOCATION_FLAGS_STANDARD_UCODE (0x00000000) +#define NV_VP_ALLOCATION_FLAGS_STATIC_UCODE (0x00000001) +#define NV_VP_ALLOCATION_FLAGS_DYNAMIC_UCODE (0x00000002) + +// +// NV_VP_ALLOCATION_PARAMETERS.flags +// +// NV_VP_ALLOCATION_FLAGS_AVP_CLIENT are used by Tegra to specify if +// the current allocation with be used by Video or Audio +// +#define NV_VP_ALLOCATION_FLAGS_AVP_CLIENT_VIDEO (0x00000000) +#define NV_VP_ALLOCATION_FLAGS_AVP_CLIENT_AUDIO (0x00000001) + +typedef struct +{ + NvU32 size; + NvU32 caps; + NvU32 flags; + NvU32 altUcode; + NvP64 rawUcode NV_ALIGN_BYTES(8); + NvU32 rawUcodeSize; + NvU32 numSubClasses; + NvU32 numSubSets; + NvP64 subClasses NV_ALIGN_BYTES(8); + NvU32 prohibitMultipleInstances; + NvP64 pControl NV_ALIGN_BYTES(8); // Used by Tegra to return a mapping to NvE276Control + NvHandle hMemoryCmdBuffer NV_ALIGN_BYTES(8); // Used by Tegra to specify cmd buffer + NvU64 offset NV_ALIGN_BYTES(8); // Used by Tegra to specify an offset into the cmd buffer + +} NV_VP_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; +} NV_PPP_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of MSENC? + NvU32 engineInstance; // Select MSENC/NVENC0 or NVENC1 or NVENC2 +} NV_MSENC_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of SEC2? +} NV_SEC2_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of NVJPG? + NvU32 engineInstance; +} NV_NVJPG_ALLOCATION_PARAMETERS; + +typedef struct +{ + NvU32 size; + NvU32 prohibitMultipleInstances; // Prohibit multiple allocations of OFA? +} NV_OFA_ALLOCATION_PARAMETERS; + +#define NV04_BIND_ARBITRARY_CONTEXT_DMA (0x00000039) + +/* parameters */ + +#define NV04_GET_MEMORY_INFO (0x0000003A) + +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hMemory; // [IN] memory handle for mapping + NvU64 offset NV_ALIGN_BYTES(8); // [IN] offset of region + NvU64 physAddr NV_ALIGN_BYTES(8); // [OUT] Physical Addr + NvV32 status; // [OUT] status +} NVOS58_PARAMETERS; + +/* function OS59 */ +#define NV04_MAP_MEMORY_DMA_OFFSET (0x0000003B) + +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hDma; // [IN] dma handle for mapping + NvU32 dmaFirstPage; // [IN] numPages + NvU32 numPages; // [IN] numPages + NvV32 flags; // [IN] flags + NvU64 offset NV_ALIGN_BYTES(8); // [IN] Dma Offset + NvHandle hDmaPteArray; // ctx dma for pte's + NvV32 status; // [OUT] status +} NVOS59_PARAMETERS; + +/* function OS60 */ +#define NV04_UNMAP_MEMORY_DMA_OFFSET (0x0000003C) +/* parameters */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hDma; // [IN] dma handle for mapping + NvU32 numPages; // [IN] numPages + NvU64 dmaOffset NV_ALIGN_BYTES(8); // [IN] dmaOffset + NvV32 status; // [OUT] status +} NVOS60_PARAMETERS; + + +#define NV04_ADD_VBLANK_CALLBACK (0x0000003D) + +#include "class/cl9010.h" // for OSVBLANKCALLBACKPROC + +/* parameters */ +/* NOTE: the "void* pParm's" below are ok (but unfortunate) since this interface + can only be used by other kernel drivers which must share the same ptr-size */ +typedef struct +{ + NvHandle hClient; // [IN] client handle + NvHandle hDevice; // [IN] device handle for mapping + NvHandle hVblank; // [IN] Vblank handle for control + OSVBLANKCALLBACKPROC pProc; // Routine to call at vblank time + + NvV32 LogicalHead; // Logical Head + void *pParm1; + void *pParm2; + NvU32 bAdd; // Add or Delete + NvV32 status; // [OUT] status +} NVOS61_PARAMETERS; + +/** + * @brief NvAlloc parameters for VASPACE classes + * + * Used to create a new private virtual address space. + * + * index + * Tegra: With TEGRA_VASPACE_A, index specifies the IOMMU + * virtual address space to be created. Based on the + * index, RM/NVMEM will decide the HW ASID to be used with + * this VA Space. "index" takes values from the + * NVMEM_CLIENT_* defines in + * "drivers/common/inc/tegra/memory/ioctl.h". + * + * Big GPU: With FERMI_VASPACE_A, see NV_VASPACE_ALLOCATION_INDEX_GPU_*. + * + * flags + * MINIMIZE_PTETABLE_SIZE Pass hint to DMA HAL to use partial page tables. + * Depending on allocation pattern this may actually + * use more instance memory. + * + * RETRY_PTE_ALLOC_IN_SYS Fallback to PTEs allocation in sysmem. This is now + * enabled by default. + * + * SHARED_MANAGEMENT + * Indicates management of the VA space is shared with another + * component (e.g. driver layer, OS, etc.). + * + * The initial VA range from vaBase (inclusive) through vaSize (exclusive) + * is managed by RM. The range must be aligned to a top-level PDE's VA + * coverage since backing page table levels for this range are managed by RM. + * All normal RM virtual memory management APIs work within this range. + * + * An external component can manage the remaining VA ranges, + * from 0 (inclusive) to vaBase (exclusive) and from vaSize (inclusive) up to the + * maximum VA limit supported by HW. + * Management of these ranges includes VA sub-allocation and the + * backing lower page table levels. + * + * The top-level page directory is special since it is a shared resource. + * Management of the page directory is as follows: + * 1. Initially RM allocates a page directory for RM-managed PDEs. + * 2. The external component may create a full page directory and commit it + * with NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY. + * This will copy the RM-managed PDEs from the RM-managed page directory + * into the external page directory and commit channels to the external page directory. + * After this point RM will update the external page directory directly for + * operations that modify RM-managed PDEs. + * 3. The external component may use NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY repeatedly + * if it needs to update the page directory again (e.g. to resize or migrate). + * This will copy the RM-managed PDEs from the old external page directory + * into the new external page directory and commit channels to the new page directory. + * 4. The external component may restore management of the page directory back to + * RM with NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY. + * This will copy the RM-managed PDEs from the external page directory + * into the RM-managed page directory and commit channels to the RM-managed page directory. + * After this point RM will update the RM-managed page directory for + * operations that modify RM-managed PDEs. + * Note that operations (2) and (4) are symmetric - the RM perspective of management is identical + * before and after a sequence of SET => ... => UNSET. + * + * IS_MIRRORED + * This flag will tell RM to create a mirrored + * kernel PDB for the address space associated + * with this device. When this flag is set + * the address space covered by the top PDE + * is restricted and cannot be allocated out of. + * ENABLE_PAGE_FAULTING + * Enable page faulting if the architecture supports it. + * As of now page faulting is only supported for compute on pascal+. + * IS_EXTERNALLY_OWNED + * This vaspace that has been allocated will be managed by + * an external driver. RM will not own the pagetables for this vaspace. + * + * ENABLE_NVLINK_ATS + * Enables VA translation for this address space using NVLINK ATS. + * Note, the GMMU page tables still exist and take priority over NVLINK ATS. + * VA space object creation will fail if: + * - hardware support is not available (NV_ERR_NOT_SUPPORTED) + * - incompatible options IS_MIRRORED or IS_EXTERNALLY_OWNED are set (NV_ERR_INVALID_ARGUMENT) + * IS_FLA + * Sets FLA flag for this VASPACE + * + * ALLOW_ZERO_ADDRESS + * Allows VASPACE Range to start from zero + * SKIP_SCRUB_MEMPOOL + * Skip scrubbing in MemPool + * + * vaBase [in, out] + * On input, the lowest usable base address of the VA space. + * If 0, RM will pick a default value - 0 is always reserved to respresent NULL pointers. + * The value must be aligned to the largest page size of the VA space. + * Larger values aid in debug since offsets added to NULL pointers will still fault. + * + * On output, the actual usable base address is returned. + * + * vaSize [in,out] + * On input, requested size of the virtual address space in bytes. + * Requesting a smaller size reduces the memory required for the initial + * page directory, but the VAS may be resized later (NV0080_CTRL_DMA_SET_VA_SPACE_SIZE). + * If 0, the default VA space size will be used. + * + * On output, the actual size of the VAS in bytes. + * NOTE: This corresponds to the VA_LIMIT + 1, so the usable size is (vaSize - vaBase). + * + * bigPageSIze + * Set the size of the big page in this address space object. Current HW supports + * either 64k or 128k as the size of the big page. HW that support multiple big + * page size per address space will use this size. Hw that do not support this feature + * will override to the default big page size that is supported by the system. + * If the big page size value is set to ZERO then we will pick the default page size + * of the system. + **/ +typedef struct +{ + NvU32 index; + NvV32 flags; + NvU64 vaSize NV_ALIGN_BYTES(8); + NvU64 vaStartInternal NV_ALIGN_BYTES(8); + NvU64 vaLimitInternal NV_ALIGN_BYTES(8); + NvU32 bigPageSize; + NvU64 vaBase NV_ALIGN_BYTES(8); +} NV_VASPACE_ALLOCATION_PARAMETERS; + +#define NV_VASPACE_ALLOCATION_FLAGS_NONE (0x00000000) +#define NV_VASPACE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE BIT(0) +#define NV_VASPACE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS BIT(1) +#define NV_VASPACE_ALLOCATION_FLAGS_SHARED_MANAGEMENT BIT(2) +#define NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED BIT(3) +#define NV_VASPACE_ALLOCATION_FLAGS_ENABLE_NVLINK_ATS BIT(4) +#define NV_VASPACE_ALLOCATION_FLAGS_IS_MIRRORED BIT(5) +#define NV_VASPACE_ALLOCATION_FLAGS_ENABLE_PAGE_FAULTING BIT(6) +#define NV_VASPACE_ALLOCATION_FLAGS_VA_INTERNAL_LIMIT BIT(7) +#define NV_VASPACE_ALLOCATION_FLAGS_ALLOW_ZERO_ADDRESS BIT(8) +#define NV_VASPACE_ALLOCATION_FLAGS_IS_FLA BIT(9) +#define NV_VASPACE_ALLOCATION_FLAGS_SKIP_SCRUB_MEMPOOL BIT(10) +#define NV_VASPACE_ALLOCATION_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE BIT(11) + +#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW 0x00 // NV_STATUS_LEVEL_WARN + * to determine success v. failure of a call. + */ +#define NV_STATUS_LEVEL_WARN 1 + +/*! + * @def NV_STATUS_LEVEL_ERR + * @see NV_STATUS_LEVEL + * @brief Unrecoverable error condition + */ +#define NV_STATUS_LEVEL_ERR 3 + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Level of the status code + * + * @warning IMPORTANT: When comparing NV_STATUS_LEVEL(_S) against one of + * these constants, it is important to use '<=' or '>' (rather + * than '<' or '>='). + * + * For example. do: + * if (NV_STATUS_LEVEL(status) <= NV_STATUS_LEVEL_WARN) + * rather than: + * if (NV_STATUS_LEVEL(status) < NV_STATUS_LEVEL_ERR) + * + * By being consistent in this manner, it is easier to systematically + * add additional level constants. New levels are likely to lower + * (rather than raise) the severity of _ERR codes. For example, + * if we were to add NV_STATUS_LEVEL_RETRY to indicate hardware + * failures that may be recoverable (e.g. RM_ERR_TIMEOUT_RETRY + * or RM_ERR_BUSY_RETRY), it would be less severe than + * NV_STATUS_LEVEL_ERR the level to which these status codes now + * belong. Using '<=' and '>' ensures your code is not broken in + * cases like this. + */ +#define NV_STATUS_LEVEL(_S) \ + ((_S) == NV_OK? NV_STATUS_LEVEL_OK: \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? NV_STATUS_LEVEL_WARN: \ + NV_STATUS_LEVEL_ERR)) + +/*! + * @def NV_STATUS_LEVEL + * @see NV_STATUS_LEVEL_OK + * @see NV_STATUS_LEVEL_WARN + * @see NV_STATUS_LEVEL_ERR + * @brief Character representing status code level + */ +#define NV_STATUS_LEVEL_CHAR(_S) \ + ((_S) == NV_OK? '0': \ + ((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? 'W': \ + 'E')) + +// Function definitions +const char *nvstatusToString(NV_STATUS nvStatusIn); + +#ifdef __cplusplus +} +#endif + +#endif // XAPIGEN + +#endif /* SDK_NVSTATUS_H */ diff --git a/src/common/sdk/nvidia/inc/nvstatuscodes.h b/src/common/sdk/nvidia/inc/nvstatuscodes.h new file mode 100644 index 000000000..09256bf70 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvstatuscodes.h @@ -0,0 +1,169 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef SDK_NVSTATUSCODES_H +#define SDK_NVSTATUSCODES_H + +/* XAPIGEN - this file is not suitable for (nor needed by) xapigen. */ +/* Rather than #ifdef out every such include in every sdk */ +/* file, punt here. */ +#if !defined(XAPIGEN) /* rest of file */ + +NV_STATUS_CODE(NV_OK, 0x00000000, "Success") +NV_STATUS_CODE(NV_ERR_GENERIC, 0x0000FFFF, "Failure: Generic Error") + +NV_STATUS_CODE(NV_ERR_BROKEN_FB, 0x00000001, "Frame-Buffer broken") +NV_STATUS_CODE(NV_ERR_BUFFER_TOO_SMALL, 0x00000002, "Buffer passed in is too small") +NV_STATUS_CODE(NV_ERR_BUSY_RETRY, 0x00000003, "System is busy, retry later") +NV_STATUS_CODE(NV_ERR_CALLBACK_NOT_SCHEDULED, 0x00000004, "The requested callback API not scheduled") +NV_STATUS_CODE(NV_ERR_CARD_NOT_PRESENT, 0x00000005, "Card not detected") +NV_STATUS_CODE(NV_ERR_CYCLE_DETECTED, 0x00000006, "Call cycle detected") +NV_STATUS_CODE(NV_ERR_DMA_IN_USE, 0x00000007, "Requested DMA is in use") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_LOCKED, 0x00000008, "Requested DMA memory is not locked") +NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_UNLOCKED, 0x00000009, "Requested DMA memory is not unlocked") +NV_STATUS_CODE(NV_ERR_DUAL_LINK_INUSE, 0x0000000A, "Dual-Link is in use") +NV_STATUS_CODE(NV_ERR_ECC_ERROR, 0x0000000B, "Generic ECC error") +NV_STATUS_CODE(NV_ERR_FIFO_BAD_ACCESS, 0x0000000C, "FIFO: Invalid access") +NV_STATUS_CODE(NV_ERR_FREQ_NOT_SUPPORTED, 0x0000000D, "Requested frequency is not supported") +NV_STATUS_CODE(NV_ERR_GPU_DMA_NOT_INITIALIZED, 0x0000000E, "Requested DMA not initialized") +NV_STATUS_CODE(NV_ERR_GPU_IS_LOST, 0x0000000F, "GPU lost from the bus") +NV_STATUS_CODE(NV_ERR_GPU_IN_FULLCHIP_RESET, 0x00000010, "GPU currently in full-chip reset") +NV_STATUS_CODE(NV_ERR_GPU_NOT_FULL_POWER, 0x00000011, "GPU not in full power") +NV_STATUS_CODE(NV_ERR_GPU_UUID_NOT_FOUND, 0x00000012, "GPU UUID not found") +NV_STATUS_CODE(NV_ERR_HOT_SWITCH, 0x00000013, "System in hot switch") +NV_STATUS_CODE(NV_ERR_I2C_ERROR, 0x00000014, "I2C Error") +NV_STATUS_CODE(NV_ERR_I2C_SPEED_TOO_HIGH, 0x00000015, "I2C Error: Speed too high") +NV_STATUS_CODE(NV_ERR_ILLEGAL_ACTION, 0x00000016, "Current action is not allowed") +NV_STATUS_CODE(NV_ERR_IN_USE, 0x00000017, "Generic busy error") +NV_STATUS_CODE(NV_ERR_INFLATE_COMPRESSED_DATA_FAILED, 0x00000018, "Failed to inflate compressed data") +NV_STATUS_CODE(NV_ERR_INSERT_DUPLICATE_NAME, 0x00000019, "Found a duplicate entry in the requested btree") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_RESOURCES, 0x0000001A, "Ran out of a critical resource, other than memory") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_PERMISSIONS, 0x0000001B, "The requester does not have sufficient permissions") +NV_STATUS_CODE(NV_ERR_INSUFFICIENT_POWER, 0x0000001C, "Generic Error: Low power") +NV_STATUS_CODE(NV_ERR_INVALID_ACCESS_TYPE, 0x0000001D, "This type of access is not allowed") +NV_STATUS_CODE(NV_ERR_INVALID_ADDRESS, 0x0000001E, "Address not valid") +NV_STATUS_CODE(NV_ERR_INVALID_ARGUMENT, 0x0000001F, "Invalid argument to call") +NV_STATUS_CODE(NV_ERR_INVALID_BASE, 0x00000020, "Invalid base") +NV_STATUS_CODE(NV_ERR_INVALID_CHANNEL, 0x00000021, "Given channel-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLASS, 0x00000022, "Given class-id not valid") +NV_STATUS_CODE(NV_ERR_INVALID_CLIENT, 0x00000023, "Given client not valid") +NV_STATUS_CODE(NV_ERR_INVALID_COMMAND, 0x00000024, "Command passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DATA, 0x00000025, "Invalid data passed") +NV_STATUS_CODE(NV_ERR_INVALID_DEVICE, 0x00000026, "Current device is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_DMA_SPECIFIER, 0x00000027, "The requested DMA specifier is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_EVENT, 0x00000028, "Invalid event occurred") +NV_STATUS_CODE(NV_ERR_INVALID_FLAGS, 0x00000029, "Invalid flags passed") +NV_STATUS_CODE(NV_ERR_INVALID_FUNCTION, 0x0000002A, "Called function is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_HEAP, 0x0000002B, "Heap corrupted") +NV_STATUS_CODE(NV_ERR_INVALID_INDEX, 0x0000002C, "Index invalid") +NV_STATUS_CODE(NV_ERR_INVALID_IRQ_LEVEL, 0x0000002D, "Requested IRQ level is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_LIMIT, 0x0000002E, "Generic Error: Invalid limit") +NV_STATUS_CODE(NV_ERR_INVALID_LOCK_STATE, 0x0000002F, "Requested lock state not valid") +NV_STATUS_CODE(NV_ERR_INVALID_METHOD, 0x00000030, "Requested method not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT, 0x00000031, "Object not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_BUFFER, 0x00000032, "Object buffer passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_HANDLE, 0x00000033, "Object handle is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_NEW, 0x00000034, "New object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_OLD, 0x00000035, "Old object is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_PARENT, 0x00000036, "Object parent is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OFFSET, 0x00000037, "The offset passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OPERATION, 0x00000038, "Requested operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_OWNER, 0x00000039, "Owner not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PARAM_STRUCT, 0x0000003A, "Invalid structure parameter") +NV_STATUS_CODE(NV_ERR_INVALID_PARAMETER, 0x0000003B, "At least one of the parameters passed is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_PATH, 0x0000003C, "The requested path is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_POINTER, 0x0000003D, "Pointer not valid") +NV_STATUS_CODE(NV_ERR_INVALID_REGISTRY_KEY, 0x0000003E, "Found an invalid registry key") +NV_STATUS_CODE(NV_ERR_INVALID_REQUEST, 0x0000003F, "Generic Error: Invalid request") +NV_STATUS_CODE(NV_ERR_INVALID_STATE, 0x00000040, "Generic Error: Invalid state") +NV_STATUS_CODE(NV_ERR_INVALID_STRING_LENGTH, 0x00000041, "The string length is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_READ, 0x00000042, "The requested read operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_WRITE, 0x00000043, "The requested write operation is not valid") +NV_STATUS_CODE(NV_ERR_INVALID_XLATE, 0x00000044, "The requested translate operation is not valid") +NV_STATUS_CODE(NV_ERR_IRQ_NOT_FIRING, 0x00000045, "Requested IRQ is not firing") +NV_STATUS_CODE(NV_ERR_IRQ_EDGE_TRIGGERED, 0x00000046, "IRQ is edge triggered") +NV_STATUS_CODE(NV_ERR_MEMORY_TRAINING_FAILED, 0x00000047, "Failed memory training sequence") +NV_STATUS_CODE(NV_ERR_MISMATCHED_SLAVE, 0x00000048, "Slave mismatch") +NV_STATUS_CODE(NV_ERR_MISMATCHED_TARGET, 0x00000049, "Target mismatch") +NV_STATUS_CODE(NV_ERR_MISSING_TABLE_ENTRY, 0x0000004A, "Requested entry missing not found in the table") +NV_STATUS_CODE(NV_ERR_MODULE_LOAD_FAILED, 0x0000004B, "Failed to load the requested module") +NV_STATUS_CODE(NV_ERR_MORE_DATA_AVAILABLE, 0x0000004C, "There is more data available") +NV_STATUS_CODE(NV_ERR_MORE_PROCESSING_REQUIRED, 0x0000004D, "More processing required for the given call") +NV_STATUS_CODE(NV_ERR_MULTIPLE_MEMORY_TYPES, 0x0000004E, "Multiple memory types found") +NV_STATUS_CODE(NV_ERR_NO_FREE_FIFOS, 0x0000004F, "No more free FIFOs found") +NV_STATUS_CODE(NV_ERR_NO_INTR_PENDING, 0x00000050, "No interrupt pending") +NV_STATUS_CODE(NV_ERR_NO_MEMORY, 0x00000051, "Out of memory") +NV_STATUS_CODE(NV_ERR_NO_SUCH_DOMAIN, 0x00000052, "Requested domain does not exist") +NV_STATUS_CODE(NV_ERR_NO_VALID_PATH, 0x00000053, "Caller did not specify a valid path") +NV_STATUS_CODE(NV_ERR_NOT_COMPATIBLE, 0x00000054, "Generic Error: Incompatible types") +NV_STATUS_CODE(NV_ERR_NOT_READY, 0x00000055, "Generic Error: Not ready") +NV_STATUS_CODE(NV_ERR_NOT_SUPPORTED, 0x00000056, "Call not supported") +NV_STATUS_CODE(NV_ERR_OBJECT_NOT_FOUND, 0x00000057, "Requested object not found") +NV_STATUS_CODE(NV_ERR_OBJECT_TYPE_MISMATCH, 0x00000058, "Specified objects do not match") +NV_STATUS_CODE(NV_ERR_OPERATING_SYSTEM, 0x00000059, "Generic operating system error") +NV_STATUS_CODE(NV_ERR_OTHER_DEVICE_FOUND, 0x0000005A, "Found other device instead of the requested one") +NV_STATUS_CODE(NV_ERR_OUT_OF_RANGE, 0x0000005B, "The specified value is out of bounds") +NV_STATUS_CODE(NV_ERR_OVERLAPPING_UVM_COMMIT, 0x0000005C, "Overlapping unified virtual memory commit") +NV_STATUS_CODE(NV_ERR_PAGE_TABLE_NOT_AVAIL, 0x0000005D, "Requested page table not available") +NV_STATUS_CODE(NV_ERR_PID_NOT_FOUND, 0x0000005E, "Process-Id not found") +NV_STATUS_CODE(NV_ERR_PROTECTION_FAULT, 0x0000005F, "Protection fault") +NV_STATUS_CODE(NV_ERR_RC_ERROR, 0x00000060, "Generic RC error") +NV_STATUS_CODE(NV_ERR_REJECTED_VBIOS, 0x00000061, "Given Video BIOS rejected/invalid") +NV_STATUS_CODE(NV_ERR_RESET_REQUIRED, 0x00000062, "Reset required") +NV_STATUS_CODE(NV_ERR_STATE_IN_USE, 0x00000063, "State in use") +NV_STATUS_CODE(NV_ERR_SIGNAL_PENDING, 0x00000064, "Signal pending") +NV_STATUS_CODE(NV_ERR_TIMEOUT, 0x00000065, "Call timed out") +NV_STATUS_CODE(NV_ERR_TIMEOUT_RETRY, 0x00000066, "Call timed out, please retry later") +NV_STATUS_CODE(NV_ERR_TOO_MANY_PRIMARIES, 0x00000067, "Too many primaries") +NV_STATUS_CODE(NV_ERR_UVM_ADDRESS_IN_USE, 0x00000068, "Unified virtual memory requested address already in use") +NV_STATUS_CODE(NV_ERR_MAX_SESSION_LIMIT_REACHED, 0x00000069, "Maximum number of sessions reached") +NV_STATUS_CODE(NV_ERR_LIB_RM_VERSION_MISMATCH, 0x0000006A, "Library version doesn't match driver version") //Contained within the RMAPI library +NV_STATUS_CODE(NV_ERR_PRIV_SEC_VIOLATION, 0x0000006B, "Priv security violation") +NV_STATUS_CODE(NV_ERR_GPU_IN_DEBUG_MODE, 0x0000006C, "GPU currently in debug mode") +NV_STATUS_CODE(NV_ERR_FEATURE_NOT_ENABLED, 0x0000006D, "Requested Feature functionality is not enabled") +NV_STATUS_CODE(NV_ERR_RESOURCE_LOST, 0x0000006E, "Requested resource has been destroyed") +NV_STATUS_CODE(NV_ERR_PMU_NOT_READY, 0x0000006F, "PMU is not ready or has not yet been initialized") +NV_STATUS_CODE(NV_ERR_FLCN_ERROR, 0x00000070, "Generic falcon assert or halt") +NV_STATUS_CODE(NV_ERR_FATAL_ERROR, 0x00000071, "Fatal/unrecoverable error") +NV_STATUS_CODE(NV_ERR_MEMORY_ERROR, 0x00000072, "Generic memory error") +NV_STATUS_CODE(NV_ERR_INVALID_LICENSE, 0x00000073, "License provided is rejected or invalid") +NV_STATUS_CODE(NV_ERR_NVLINK_INIT_ERROR, 0x00000074, "Nvlink Init Error") +NV_STATUS_CODE(NV_ERR_NVLINK_MINION_ERROR, 0x00000075, "Nvlink Minion Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CLOCK_ERROR, 0x00000076, "Nvlink Clock Error") +NV_STATUS_CODE(NV_ERR_NVLINK_TRAINING_ERROR, 0x00000077, "Nvlink Training Error") +NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Configuration Error") +NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt") + +// Warnings: +NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch") +NV_STATUS_CODE(NV_WARN_INCORRECT_PERFMON_DATA, 0x00010002, "WARNING Incorrect performance monitor data") +NV_STATUS_CODE(NV_WARN_MISMATCHED_SLAVE, 0x00010003, "WARNING Slave mismatch") +NV_STATUS_CODE(NV_WARN_MISMATCHED_TARGET, 0x00010004, "WARNING Target mismatch") +NV_STATUS_CODE(NV_WARN_MORE_PROCESSING_REQUIRED, 0x00010005, "WARNING More processing required for the call") +NV_STATUS_CODE(NV_WARN_NOTHING_TO_DO, 0x00010006, "WARNING Nothing to do") +NV_STATUS_CODE(NV_WARN_NULL_OBJECT, 0x00010007, "WARNING NULL object found") +NV_STATUS_CODE(NV_WARN_OUT_OF_RANGE, 0x00010008, "WARNING value out of range") + +#endif // XAPIGEN + +#endif /* SDK_NVSTATUSCODES_H */ diff --git a/src/common/sdk/nvidia/inc/nvtypes.h b/src/common/sdk/nvidia/inc/nvtypes.h new file mode 100644 index 000000000..53a60f916 --- /dev/null +++ b/src/common/sdk/nvidia/inc/nvtypes.h @@ -0,0 +1,625 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVTYPES_INCLUDED +#define NVTYPES_INCLUDED + +#ifdef __cplusplus +extern "C" { +#endif + +#include "cpuopsys.h" + +#ifndef NVTYPES_USE_STDINT +#define NVTYPES_USE_STDINT 0 +#endif + +#if NVTYPES_USE_STDINT +#ifdef __cplusplus +#include +#include +#else +#include +#include +#endif // __cplusplus +#endif // NVTYPES_USE_STDINT + +#ifndef __cplusplus +// Header includes to make sure wchar_t is defined for C-file compilation +// (C++ is not affected as it is a fundamental type there) +// _MSC_VER is a hack to avoid failures for old setup of UEFI builds which are +// currently set to msvc100 but do not properly set the include paths +#endif // __cplusplus + +#if defined(MAKE_NV64TYPES_8BYTES_ALIGNED) && defined(__i386__) +// ensure or force 8-bytes alignment of NV 64-bit types +#define OPTIONAL_ALIGN8_ATTR __attribute__((aligned(8))) +#else +// nothing needed +#define OPTIONAL_ALIGN8_ATTR +#endif // MAKE_NV64TYPES_8BYTES_ALIGNED && i386 + + /***************************************************************************\ +|* Typedefs *| + \***************************************************************************/ + +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +//Typedefs for MISRA COMPLIANCE +typedef unsigned long long UInt64; +typedef signed long long Int64; +typedef unsigned int UInt32; +typedef signed int Int32; +typedef unsigned short UInt16; +typedef signed short Int16; +typedef unsigned char UInt8 ; +typedef signed char Int8 ; + +typedef void Void; +typedef float float32_t; +typedef double float64_t; +#endif + + +// Floating point types +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef float32_t NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef float64_t NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#else +typedef float NvF32; /* IEEE Single Precision (S1E8M23) */ +typedef double NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */ +#endif + + +// 8-bit: 'char' is the only 8-bit in the C89 standard and after. +#if NVTYPES_USE_STDINT +typedef uint8_t NvV8; /* "void": enumerated or multiple fields */ +typedef uint8_t NvU8; /* 0 to 255 */ +typedef int8_t NvS8; /* -128 to 127 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt8 NvV8; /* "void": enumerated or multiple fields */ +typedef UInt8 NvU8; /* 0 to 255 */ +typedef Int8 NvS8; /* -128 to 127 */ +#else +typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ +typedef unsigned char NvU8; /* 0 to 255 */ +typedef signed char NvS8; /* -128 to 127 */ +#endif +#endif // NVTYPES_USE_STDINT + + +#if NVTYPES_USE_STDINT +typedef uint16_t NvV16; /* "void": enumerated or multiple fields */ +typedef uint16_t NvU16; /* 0 to 65535 */ +typedef int16_t NvS16; /* -32768 to 32767 */ +#else +// 16-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT16_TYPE__ +typedef unsigned __INT16_TYPE__ NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned __INT16_TYPE__ NvU16; /* 0 to 65535 */ +typedef signed __INT16_TYPE__ NvS16; /* -32768 to 32767 */ + +// The minimal standard for C89 and after +#else // __INT16_TYPE__ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt16 NvV16; /* "void": enumerated or multiple fields */ +typedef UInt16 NvU16; /* 0 to 65535 */ +typedef Int16 NvS16; /* -32768 to 32767 */ +#else +typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ +typedef unsigned short NvU16; /* 0 to 65535 */ +typedef signed short NvS16; /* -32768 to 32767 */ +#endif +#endif // __INT16_TYPE__ +#endif // NVTYPES_USE_STDINT + +// wchar type (fixed size types consistent across Linux/Windows boundaries) +#if defined(NV_HAS_WCHAR_T_TYPEDEF) + typedef wchar_t NvWchar; +#else + typedef NvV16 NvWchar; +#endif + +// Macro to build an NvU32 from four bytes, listed from msb to lsb +#define NvU32_BUILD(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d)) + +#if NVTYPES_USE_STDINT +typedef uint32_t NvV32; /* "void": enumerated or multiple fields */ +typedef uint32_t NvU32; /* 0 to 4294967295 */ +typedef int32_t NvS32; /* -2147483648 to 2147483647 */ +#else +// 32-bit: If the compiler tells us what we can use, then use it. +#ifdef __INT32_TYPE__ +typedef unsigned __INT32_TYPE__ NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned __INT32_TYPE__ NvU32; /* 0 to 4294967295 */ +typedef signed __INT32_TYPE__ NvS32; /* -2147483648 to 2147483647 */ + +// Older compilers +#else // __INT32_TYPE__ + +// For historical reasons, NvU32/NvV32 are defined to different base intrinsic +// types than NvS32 on some platforms. +// Mainly for 64-bit linux, where long is 64 bits and win9x, where int is 16 bit. +#if (defined(NV_UNIX) || defined(vxworks) || defined(NV_WINDOWS_CE) || \ + defined(__arm) || defined(__IAR_SYSTEMS_ICC__) || defined(NV_QNX) || \ + defined(NV_INTEGRITY) || defined(NV_MODS) || \ + defined(__GNUC__) || defined(__clang__) || defined(NV_MACINTOSH_64)) && \ + (!defined(NV_MACINTOSH) || defined(NV_MACINTOSH_64)) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt32 NvV32; /* "void": enumerated or multiple fields */ +typedef UInt32 NvU32; /* 0 to 4294967295 */ +#else +typedef unsigned int NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned int NvU32; /* 0 to 4294967295 */ +#endif + +// The minimal standard for C89 and after +#else // (defined(NV_UNIX) || defined(vxworks) || ... +typedef unsigned long NvV32; /* "void": enumerated or multiple fields */ +typedef unsigned long NvU32; /* 0 to 4294967295 */ +#endif // (defined(NV_UNIX) || defined(vxworks) || ... + +// Mac OS 32-bit still needs this +#if defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +typedef signed long NvS32; /* -2147483648 to 2147483647 */ +#else +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef Int32 NvS32; /* -2147483648 to 2147483647 */ +#else +typedef signed int NvS32; /* -2147483648 to 2147483647 */ +#endif +#endif // defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64) +#endif // __INT32_TYPE__ +#endif // NVTYPES_USE_STDINT + + + +#if NVTYPES_USE_STDINT +typedef uint64_t NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef int64_t NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX PRIX64 +#define NvU64_fmtx PRIx64 +#define NvU64_fmtu PRIu64 +#define NvU64_fmto PRIo64 +#define NvS64_fmtd PRId64 +#define NvS64_fmti PRIi64 +#else +// 64-bit types for compilers that support them, plus some obsolete variants +#if defined(__GNUC__) || defined(__clang__) || defined(__arm) || \ + defined(__IAR_SYSTEMS_ICC__) || defined(__ghs__) || defined(_WIN64) || \ + defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined (__xlC__) +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +typedef UInt64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef Int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#else +typedef unsigned long long NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef long long NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ +#endif + +#define NvU64_fmtX "llX" +#define NvU64_fmtx "llx" +#define NvU64_fmtu "llu" +#define NvU64_fmto "llo" +#define NvS64_fmtd "lld" +#define NvS64_fmti "lli" + +// Microsoft since 2003 -- https://msdn.microsoft.com/en-us/library/29dh1w7z.aspx +#else +typedef unsigned __int64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */ +typedef __int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */ + +#define NvU64_fmtX "I64X" +#define NvU64_fmtx "I64x" +#define NvU64_fmtu "I64u" +#define NvU64_fmto "I64o" +#define NvS64_fmtd "I64d" +#define NvS64_fmti "I64i" + +#endif +#endif // NVTYPES_USE_STDINT + +#ifdef NV_TYPESAFE_HANDLES +/* + * Can't use opaque pointer as clients might be compiled with mismatched + * pointer sizes. TYPESAFE check will eventually be removed once all clients + * have transistioned safely to NvHandle. + * The plan is to then eventually scale up the handle to be 64-bits. + */ +typedef struct +{ + NvU32 val; +} NvHandle; +#else +/* + * For compatibility with modules that haven't moved typesafe handles. + */ +typedef NvU32 NvHandle; +#endif // NV_TYPESAFE_HANDLES + +/* Boolean type */ +typedef NvU8 NvBool; +#define NV_TRUE ((NvBool)(0 == 0)) +#define NV_FALSE ((NvBool)(0 != 0)) + +/* Tristate type: NV_TRISTATE_FALSE, NV_TRISTATE_TRUE, NV_TRISTATE_INDETERMINATE */ +typedef NvU8 NvTristate; +#define NV_TRISTATE_FALSE ((NvTristate) 0) +#define NV_TRISTATE_TRUE ((NvTristate) 1) +#define NV_TRISTATE_INDETERMINATE ((NvTristate) 2) + +/* Macros to extract the low and high parts of a 64-bit unsigned integer */ +/* Also designed to work if someone happens to pass in a 32-bit integer */ +#ifdef NV_MISRA_COMPLIANCE_REQUIRED +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffffU)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffffU)) +#else +#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffff)) +#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffff)) +#endif +#define NvU40_HI32(n) ((NvU32)((((NvU64)(n)) >> 8) & 0xffffffffU)) +#define NvU40_HI24of32(n) ((NvU32)( (NvU64)(n) & 0xffffff00U)) + +/* Macros to get the MSB and LSB of a 32 bit unsigned number */ +#define NvU32_HI16(n) ((NvU16)((((NvU32)(n)) >> 16) & 0xffffU)) +#define NvU32_LO16(n) ((NvU16)(( (NvU32)(n)) & 0xffffU)) + + /***************************************************************************\ +|* *| +|* 64 bit type definitions for use in interface structures. *| +|* *| + \***************************************************************************/ + +#if defined(NV_64_BITS) + +typedef void* NvP64; /* 64 bit void pointer */ +typedef NvU64 NvUPtr; /* pointer sized unsigned int */ +typedef NvS64 NvSPtr; /* pointer sized signed int */ +typedef NvU64 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) (n) +#define NvP64_fmt "%p" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(v)) +#define NvP64_PLUS_OFFSET(p,o) (NvP64)((NvU64)(p) + (NvU64)(o)) + +#define NvUPtr_fmtX NvU64_fmtX +#define NvUPtr_fmtx NvU64_fmtx +#define NvUPtr_fmtu NvU64_fmtu +#define NvUPtr_fmto NvU64_fmto +#define NvSPtr_fmtd NvS64_fmtd +#define NvSPtr_fmti NvS64_fmti + +#else + +typedef NvU64 NvP64; /* 64 bit void pointer */ +typedef NvU32 NvUPtr; /* pointer sized unsigned int */ +typedef NvS32 NvSPtr; /* pointer sized signed int */ +typedef NvU32 NvLength; /* length to agree with sizeof */ + +#define NvP64_VALUE(n) ((void *)(NvUPtr)(n)) +#define NvP64_fmt "0x%llx" + +#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(NvUPtr)(v)) +#define NvP64_PLUS_OFFSET(p,o) ((p) + (NvU64)(o)) + +#define NvUPtr_fmtX "X" +#define NvUPtr_fmtx "x" +#define NvUPtr_fmtu "u" +#define NvUPtr_fmto "o" +#define NvSPtr_fmtd "d" +#define NvSPtr_fmti "i" + +#endif + +#define NvP64_NULL (NvP64)0 + +/*! + * Helper macro to pack an @ref NvU64_ALIGN32 structure from a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64_ALIGN32 structure to pack + * @param[in] pSrc Pointer to NvU64 with which to pack + */ +#define NvU64_ALIGN32_PACK(pDst, pSrc) \ +do { \ + (pDst)->lo = NvU64_LO32(*(pSrc)); \ + (pDst)->hi = NvU64_HI32(*(pSrc)); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure into a @ref NvU64. + * + * @param[out] pDst Pointer to NvU64 in which to unpack + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure from which to unpack + */ +#define NvU64_ALIGN32_UNPACK(pDst, pSrc) \ +do { \ + (*(pDst)) = NvU64_ALIGN32_VAL(pSrc); \ +} while (NV_FALSE) + +/*! + * Helper macro to unpack a @ref NvU64_ALIGN32 structure as a @ref NvU64. + * + * @param[in] pSrc Pointer to NvU64_ALIGN32 structure to unpack + */ +#define NvU64_ALIGN32_VAL(pSrc) \ + ((NvU64) ((NvU64)((pSrc)->lo) | (((NvU64)(pSrc)->hi) << 32U))) + +/*! + * Helper macro to check whether the 32 bit aligned 64 bit number is zero. + * + * @param[in] _pU64 Pointer to NvU64_ALIGN32 structure. + * + * @return + * NV_TRUE _pU64 is zero. + * NV_FALSE otherwise. + */ +#define NvU64_ALIGN32_IS_ZERO(_pU64) \ + (((_pU64)->lo == 0U) && ((_pU64)->hi == 0U)) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_ADD(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 + __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor. + * + * @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure. + * @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure. + * @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure. + */ +#define NvU64_ALIGN32_SUB(pDst, pSrc1, pSrc2) \ +do { \ + NvU64 __dst, __src1, __scr2; \ + \ + NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \ + NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \ + __dst = __src1 - __scr2; \ + NvU64_ALIGN32_PACK((pDst), &__dst); \ +} while (NV_FALSE) + +/*! + * Structure for representing 32 bit aligned NvU64 (64-bit unsigned integer) + * structures. This structure must be used because the 32 bit processor and + * 64 bit processor compilers will pack/align NvU64 differently. + * + * One use case is RM being 64 bit proc whereas PMU being 32 bit proc, this + * alignment difference will result in corrupted transactions between the RM + * and PMU. + * + * See the @ref NvU64_ALIGN32_PACK and @ref NvU64_ALIGN32_UNPACK macros for + * packing and unpacking these structures. + * + * @note The intention of this structure is to provide a datatype which will + * packed/aligned consistently and efficiently across all platforms. + * We don't want to use "NV_DECLARE_ALIGNED(NvU64, 8)" because that + * leads to memory waste on our 32-bit uprocessors (e.g. FALCONs) where + * DMEM efficiency is vital. + */ +typedef struct +{ + /*! + * Low 32 bits. + */ + NvU32 lo; + /*! + * High 32 bits. + */ + NvU32 hi; +} NvU64_ALIGN32; + +/* Useful macro to hide required double cast */ +#define NV_PTR_TO_NvP64(n) (NvP64)(NvUPtr)(n) +#define NV_SIGN_EXT_PTR_TO_NvP64(p) ((NvP64)(NvS64)(NvSPtr)(p)) +#define KERNEL_POINTER_TO_NvP64(p) ((NvP64)(uintptr_t)(p)) + + /***************************************************************************\ +|* *| +|* Limits for common types. *| +|* *| + \***************************************************************************/ + +/* Explanation of the current form of these limits: + * + * - Decimal is used, as hex values are by default positive. + * - Casts are not used, as usage in the preprocessor itself (#if) ends poorly. + * - The subtraction of 1 for some MIN values is used to get around the fact + * that the C syntax actually treats -x as NEGATE(x) instead of a distinct + * number. Since 214748648 isn't a valid positive 32-bit signed value, we + * take the largest valid positive signed number, negate it, and subtract 1. + */ +#define NV_S8_MIN (-128) +#define NV_S8_MAX (+127) +#define NV_U8_MIN (0U) +#define NV_U8_MAX (+255U) +#define NV_S16_MIN (-32768) +#define NV_S16_MAX (+32767) +#define NV_U16_MIN (0U) +#define NV_U16_MAX (+65535U) +#define NV_S32_MIN (-2147483647 - 1) +#define NV_S32_MAX (+2147483647) +#define NV_U32_MIN (0U) +#define NV_U32_MAX (+4294967295U) +#define NV_S64_MIN (-9223372036854775807LL - 1LL) +#define NV_S64_MAX (+9223372036854775807LL) +#define NV_U64_MIN (0ULL) +#define NV_U64_MAX (+18446744073709551615ULL) + +/* Aligns fields in structs so they match up between 32 and 64 bit builds */ +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_ALIGN_BYTES(size) __attribute__ ((aligned (size))) +#elif defined(__arm) +#define NV_ALIGN_BYTES(size) __align(ALIGN) +#else +// XXX This is dangerously nonportable! We really shouldn't provide a default +// version of this that doesn't do anything. +#define NV_ALIGN_BYTES(size) +#endif + +// NV_DECLARE_ALIGNED() can be used on all platforms. +// This macro form accounts for the fact that __declspec on Windows is required +// before the variable type, +// and NV_ALIGN_BYTES is required after the variable name. +#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) TYPE_VAR __attribute__ ((aligned (ALIGN))) +#elif defined(__arm) +#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __align(ALIGN) TYPE_VAR +#endif + + /***************************************************************************\ +|* Function Declaration Types *| + \***************************************************************************/ + +// stretching the meaning of "nvtypes", but this seems to least offensive +// place to re-locate these from nvos.h which cannot be included by a number +// of builds that need them + + #if defined(__GNUC__) + #if (__GNUC__ > 3) || \ + ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) && (__GNUC_PATCHLEVEL__ >= 1)) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__clang__) + #if __has_attribute(noinline) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 300000) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) ||\ + (defined(__SUNPRO_CC) && (__SUNPRO_CC >= 0x590)) + #define NV_NOINLINE __attribute__((__noinline__)) + #elif defined (__INTEL_COMPILER) + #define NV_NOINLINE __attribute__((__noinline__)) + #endif + + #if !defined(NV_NOINLINE) + #define NV_NOINLINE + #endif + + /* GreenHills compiler defines __GNUC__, but doesn't support + * __inline__ keyword. */ + #if defined(__ghs__) + #define NV_INLINE inline + #elif defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_INLINE __inline__ + #elif defined (macintosh) || defined(__SUNPRO_C) || defined(__SUNPRO_CC) + #define NV_INLINE inline + #elif defined(__arm) + #define NV_INLINE __inline + #else + #define NV_INLINE + #endif + + /* Don't force inline on DEBUG builds -- it's annoying for debuggers. */ + #if !defined(DEBUG) + /* GreenHills compiler defines __GNUC__, but doesn't support + * __attribute__ or __inline__ keyword. */ + #if defined(__ghs__) + #define NV_FORCEINLINE inline + #elif defined(__GNUC__) + // GCC 3.1 and beyond support the always_inline function attribute. + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__clang__) + #if __has_attribute(always_inline) + #define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__ + #else + #define NV_FORCEINLINE __inline__ + #endif + #elif defined(__arm) && (__ARMCC_VERSION >= 220000) + // RVDS 2.2 also supports forceinline, but ADS 1.2 does not + #define NV_FORCEINLINE __forceinline + #else /* defined(__GNUC__) */ + #define NV_FORCEINLINE NV_INLINE + #endif + #else + #define NV_FORCEINLINE NV_INLINE + #endif + + #define NV_APIENTRY + #define NV_FASTCALL + #define NV_CDECLCALL + #define NV_STDCALL + + /* + * The 'warn_unused_result' function attribute prompts GCC to issue a + * warning if the result of a function tagged with this attribute + * is ignored by a caller. In combination with '-Werror', it can be + * used to enforce result checking in RM code; at this point, this + * is only done on UNIX. + */ + #if defined(__GNUC__) && defined(NV_UNIX) + #if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #elif defined(__clang__) + #if __has_attribute(warn_unused_result) + #define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__)) + #else + #define NV_FORCERESULTCHECK + #endif + #else /* defined(__GNUC__) */ + #define NV_FORCERESULTCHECK + #endif + + #if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER) + #define NV_ATTRIBUTE_UNUSED __attribute__((__unused__)) + #else + #define NV_ATTRIBUTE_UNUSED + #endif + + /* + * Functions decorated with NV_FORMAT_PRINTF(f, a) have a format string at + * parameter number 'f' and variadic arguments start at parameter number 'a'. + * (Note that for C++ methods, there is an implicit 'this' parameter so + * explicit parameters are numbered from 2.) + */ + #if defined(__GNUC__) + #define NV_FORMAT_PRINTF(_f, _a) __attribute__((format(printf, _f, _a))) + #else + #define NV_FORMAT_PRINTF(_f, _a) + #endif + +#ifdef __cplusplus +} +#endif + +#endif /* NVTYPES_INCLUDED */ diff --git a/src/common/sdk/nvidia/inc/rmcd.h b/src/common/sdk/nvidia/inc/rmcd.h new file mode 100644 index 000000000..b4d8abcef --- /dev/null +++ b/src/common/sdk/nvidia/inc/rmcd.h @@ -0,0 +1,407 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef RMCD_H +#define RMCD_H + +//****************************************************************************** +// +// Module Name: RMCD.H +// +// This file contains structures and constants that define the resource manager +// specific data for the crash dump file. The record definitions defined here +// are always stored after the crash dump file header. Each record defined here +// is preceded by the NVCD_RECORD structure. +// +//****************************************************************************** + +#if !defined(XAPIGEN) /* avoid duplicate xapi fns generated */ +#include "nvgputypes.h" +#include "nvcd.h" +#endif + +#define NV_RMCD_VERSION (20) + +// Define RC Reset Callback function type +#if !defined(XAPIGEN) /* xapigen doesn't do fn ptrs */ +#define RM_RC_CALLBACK_HANDLE_UPDATE 1 +#if RM_RC_CALLBACK_HANDLE_UPDATE +typedef NvU32 RC_RESET_CALLBACK(NvHandle hClient, NvHandle hDevice, NvHandle hFifo, NvHandle hChannel, + void *pContext, NvBool clearRc); +#else +typedef NvU32 RC_RESET_CALLBACK(NvHandle hClient, NvHandle hDevice, NvHandle hChannel, void *pContext, NvBool clearRc); +#endif +#endif + +#if !defined(XAPIGEN) /* not needed for xapigen */ +typedef struct _rc_error_context { + struct OBJGPU *pGpu; // GPU device + NvU32 ChId; // Channel ID of channel in RC recovery + NvU32 secChId; // Channel ID of secondary channel + NvHandle sechClient; // Client handle of secondary channel + NvU32 exceptType; // ROBUST_CHANNEL_* error identifier + NvU32 EngineId; // Engine ID of faulting engine + NvU32 subdeviceInstance; // This parameter returns the unicast subdevice instance number + // associated with the specified GPU. This value can be used to + // instantiate a unicast reference to the GPU using the NV20_SUBDEVICE + // classes. + + // FIFO_MMU_EXCEPTION_DATA + NvU32 addrLo; // MMU fault address (Lo) + NvU32 addrHi; // MMU fault address (Hi) + NvU32 faultType; // MMU fault code + const char *faultStr; // MMU fault string + +} RC_ERROR_CONTEXT, *PRC_ERROR_CONTEXT; + +#endif + +#define MAX_FBBAS 0x2 +#define FBBA_MUX_SEL_MAX 0xF +#define FBBA_RXB_STATUS_SEL_MAX 0x10 +#define RDSTAT_MAX 0x2 + +// Define the resource manager group record types +typedef enum +{ + RmCurrentStateInfo_V2 = 4, // Revised Version 2 RM current state info. record + RmMissedNotifierInfo = 9, // RM Driver Indicates Missed Notifier + RmGlobalInfo_V3 = 10, // Revised Version 3 of Global Info + + RmProtoBuf = 131, // ProtoBuf + RmProtoBuf_V2 = 132, // ProtoBuf + NvDump + RmDclMsg = 133, // One of the optional DlcMsg fields, encoded + RmJournalEngDump = 134, + DP_ASSERT_HIT = 135, + DP_LOG_CALL = 136, + RmPrbFullDump = 137, // Full NvDump protobuf message + + // Includes RmRCCommonJournal_RECORD structure. + RmJournalBugCheck = 138, // Bugcheck record + RmRC2SwRmAssert_V3 = 139, // Retail logged RM_ASSERT info + RmRC2GpuTimeout_V3 = 140, // Retail logged RM_TIMEOUT events + RmRC2SwDbgBreakpoint_V3 = 141, // Uses same format as RmRC2SwRmAssert_V3 + RmBadRead_V2 = 142, // Record with Bad Read Information + RmSurpriseRemoval_V2 = 143, // Surprise Removal + RmPowerState = 144, // PowerState + RmPrbErrorInfo_V2 = 145, // Protobuf error record + RmPrbFullDump_V2 = 146, // Full NvDump protobuf message + RmRcDiagReport = 147, // Rc Diagnostic report message + RmNocatReport = 149, // Nocat reports. + RmDispState = 150, // Display error record + +} RMCD_RECORD_TYPE; + +typedef struct { + NVCD_RECORD Header; // Global information record header + NvU32 dwSize; // Total Protobuf Message Size +} RmProtoBuf_RECORD; +typedef RmProtoBuf_RECORD *PRmProtoBuf_RECORD; + +// +// Please include RmRCCommonJournal_RECORD structure at the top of all new and +// revised journal record structures. +// +#define NV_RM_JOURNAL_STATE_MASK_GC6_STATE 3:0 + +#define NV_RM_JOURNAL_STATE_MASK_IS_NOT_FULL_POWER 0x00000010 +#define NV_RM_JOURNAL_STATE_MASK_IS_NOT_CONNECTED 0x00000020 +#define NV_RM_JOURNAL_STATE_MASK_IS_IN_STANDBY 0x00000040 +#define NV_RM_JOURNAL_STATE_MASK_IS_IN_HIBERNATE 0x00000080 +#define NV_RM_JOURNAL_STATE_MASK_IS_IN_PM_CODEPATH 0x00000100 +#define NV_RM_JOURNAL_STATE_MASK_IS_IN_GC6_RESET 0x00000200 +#define NV_RM_JOURNAL_STATE_MASK_IS_IN_FULLCHIP_RESET 0x00000400 +#define NV_RM_JOURNAL_STATE_MASK_IS_IN_SEC_BUS_RESET 0x00000800 +#define NV_RM_JOURNAL_STATE_MASK_IS_IN_TIMEOUT_RECOVERY 0x00001000 +#define NV_RM_JOURNAL_STATE_MASK_IS_LOST 0x00002000 +#define NV_RM_JOURNAL_STATE_MASK_VIDMEM_FAILED_BAR0 0x00004000 +#define NV_RM_JOURNAL_STATE_MASK_VIDMEM_FAILED_BAR2 0x00008000 +#define NV_RM_JOURNAL_STATE_MASK_VIDMEM_FAILED_FBHUB 0x00010000 + + +typedef struct _RmRCCommonJournal_RECORD +{ + NVCD_RECORD Header; + NvU32 GPUTag; + NvU64 CPUTag; + NvU64 timeStamp NV_ALIGN_BYTES(8); + NvU64 stateMask; + NvU8 *pNext; // temporary link used for sorting. + // note using type NvU8 because Linux + // generated code does not properly + // resove the refeerence to the struct type. + // & balks about void * type. +} RmRCCommonJournal_RECORD; +typedef RmRCCommonJournal_RECORD *PRmRCCommonJournal_RECORD; + +// RM_ASSERT + DBG_BREAKPOINT info +typedef struct +{ + RmRCCommonJournal_RECORD common; + NvU64 lastTimeStamp NV_ALIGN_BYTES(8); + NvU64 breakpointAddrHint NV_ALIGN_BYTES(8); // address that can identify bp module + NvU64 callStack[10] NV_ALIGN_BYTES(8); // Call stack when the assert occurred. + NvU32 count; + NvU32 lineNum; + NvU32 lastReportedCount; // last count reported to NOCAT + NvU64 nextReportedTimeStamp; // time stamp to use as start time for the + // next NOCAT report. +} RmRCCommonAssert_RECORD; +typedef RmRCCommonAssert_RECORD *PRmRCCommonAssert_RECORD; +#define NV_RM_ASSERT_UNKNOWN_LINE_NUM 0 + +typedef RmRCCommonAssert_RECORD RmRC2GpuTimeout3_RECORD; +typedef RmRC2GpuTimeout3_RECORD *PRmRC2GpuTimeout3_RECORD; + +// How Serious is this RM_ASSERT/DBG_BREAKPOINT +// (1) Info -- This is unexpected but we should continue to run +// (2) Error -- +// (3) Fatal -- This is hopeless -- FBI Timeout, Bus Error Etc +#define NV_RM_ASSERT_TYPE 3:0 +#define NV_RM_ASSERT_TYPE_INFO 0x00000001 +#define NV_RM_ASSERT_TYPE_ERROR 0x00000002 +#define NV_RM_ASSERT_TYPE_FATAL 0x00000003 + +// HW Unit which is having the issue +#define NV_RM_ASSERT_HW_UNIT 15:8 +#define NV_RM_ASSERT_HW_UNIT_NULL (0x00) +#define NV_RM_ASSERT_HW_UNIT_GRAPHICS (0x01) +#define NV_RM_ASSERT_HW_UNIT_COPY0 (0x02) +#define NV_RM_ASSERT_HW_UNIT_COPY1 (0x03) +#define NV_RM_ASSERT_HW_UNIT_VP (0x04) +#define NV_RM_ASSERT_HW_UNIT_ME (0x05) +#define NV_RM_ASSERT_HW_UNIT_PPP (0x06) +#define NV_RM_ASSERT_HW_UNIT_BSP (0x07) +#define NV_RM_ASSERT_HW_UNIT_NVDEC0 NV_RM_ASSERT_HW_UNIT_BSP +#define NV_RM_ASSERT_HW_UNIT_MPEG (0x08) +#define NV_RM_ASSERT_HW_UNIT_SW (0x09) +#define NV_RM_ASSERT_HW_UNIT_CIPHER (0x0a) +#define NV_RM_ASSERT_HW_UNIT_VIC (0x0b) +#define NV_RM_ASSERT_HW_UNIT_MSENC (0x0c) +#define NV_RM_ASSERT_HW_UNIT_NVENC0 NV_RM_ASSERT_HW_UNIT_MSENC +#define NV_RM_ASSERT_HW_UNIT_NVENC1 (0x0d) +#define NV_RM_ASSERT_HW_UNIT_HOST (0x0e) +#define NV_RM_ASSERT_HW_UNIT_ROM (0x0f) +#define NV_RM_ASSERT_HW_UNIT_INSTMEM (0x10) +#define NV_RM_ASSERT_HW_UNIT_DISP (0x11) +#define NV_RM_ASSERT_HW_UNIT_NVENC2 (0x12) +#define NV_RM_ASSERT_HW_UNIT_NVDEC1 (0x13) +#define NV_RM_ASSERT_HW_UNIT_NVDEC2 (0x14) +#define NV_RM_ASSERT_HW_UNIT_NVDEC3 (0x15) +#define NV_RM_ASSERT_HW_UNIT_ALLENGINES (0xff) +// SW Module which generated the error +#define NV_RM_ASSERT_SW_MODULE 15:8 + +// This is a specific error number which we wish to follow regardless of builds +// We want to use this for backend processing +// This is also a compromise. Ideally, each event would have a unique id but +// instead of doing this we use EIP which is unique per load. If we subtracted off +// the Module Load Address then it would be unique per build, Using EIP allows us +// to use the debugger to lookup the source code that corresponds to the event. +#define NV_RM_ASSERT_LEVEL_TAG 30:16 +// Host Errors +#define NV_RM_ASSERT_LEVEL_TAG_BAR1_PAGE_FAULT (0x0001) +#define NV_RM_ASSERT_LEVEL_TAG_IFB_PAGE_FAULT (0x0002) +#define NV_RM_ASSERT_LEVEL_TAG_PIO_ERROR (0x0003) +#define NV_RM_ASSERT_LEVEL_TAG_CHSW_SAVE_INVALID (0x0004) +#define NV_RM_ASSERT_LEVEL_TAG_CHSW_ERROR (0x0005) +#define NV_RM_ASSERT_LEVEL_TAG_FBIRD_TIMEOUT (0x0006) +#define NV_RM_ASSERT_LEVEL_TAG_CPUQ_FBIBUSY_TIMEOUT (0x0007) +#define NV_RM_ASSERT_LEVEL_TAG_CHSW_FSM_TIMEOUT (0x0008) +#define NV_RM_ASSERT_LEVEL_TAG_FB_FLUSH_TIMEOUT (0x0009) +#define NV_RM_ASSERT_LEVEL_TAG_P2PSTATE_TIMEOUT (0x000a) +#define NV_RM_ASSERT_LEVEL_TAG_VBIOS_CHECKSUM (0x000b) +#define NV_RM_ASSERT_LEVEL_TAG_DISP_SYNC (0x000c) + +// What is the generating Source -- GPU or SW +#define NV_RM_ASSERT_LEVEL_SOURCE 31:31 +#define NV_RM_ASSERT_LEVEL_SOURCE_SW 0x00000000 +#define NV_RM_ASSERT_LEVEL_SOURCE_HW 0x00000001 + + +// RM_ASSERT + DBG_BREAKPOINT info +typedef struct +{ + RmRCCommonAssert_RECORD commonAssert; + NvU32 level; +} RmRC2SwRmAssert3_RECORD; +typedef RmRC2SwRmAssert3_RECORD *PRmRC2SwRmAssert3_RECORD; + +// RM JOURNAL BUG CHECK +typedef struct +{ + RmRCCommonJournal_RECORD common; + NvU32 bugCheckCode; +} RmJournalBugcheck_RECORD, *PRmJournalBugcheck_RECORD; + +typedef struct +{ + NVCD_RECORD Header; + NvS32 GpuTemp; + NvS32 LocTemp; +} RmRC2TempReading_RECORD; +typedef RmRC2TempReading_RECORD *PRmRC2TempReading_RECORD; + +typedef enum { + MEMORY_BAR0 = 1, + MEMORY_FB, + MEMORY_INSTANCE, + MEMORY_PCI, +} RMCD_BAD_READ_SPACE; + +typedef struct +{ + RmRCCommonJournal_RECORD common; + NvU32 MemorySpace; // Which Memory Space + NvU32 Offset; // Offset in Memory Space + NvU32 Mask; // Mask used to detect bad read + NvU32 Value; // Value Return + NvU32 Reason; // Potential Reason why this might have happened +} RmRC2BadRead2_RECORD; + +typedef RmRC2BadRead2_RECORD *PRmRC2BadRead2_RECORD; + +typedef struct +{ + RmRCCommonJournal_RECORD common; + NvU32 hostEngTag; + NvU32 exceptType; + NvU32 exceptLevel; +} RmRCRecovery_RECORD; + +typedef RmRCRecovery_RECORD *PRmRCRecovery_RECORD; + +typedef struct +{ + RmRCCommonJournal_RECORD common; +} RmPrbInfo_RECORD_V2, *PRmPrbInfo_RECORD_V2; + +// Counters per type of RC Error. WIll only keep first n (10) types. +#define MAX_RC_ERROR_COUNTER 10 +#define RC_ERROR_COUNTER_OTHER_INDEX MAX_RC_ERROR_COUNTER-1 +#define RC_ERROR_COUNTER_TYPE_INVALID ((NvU32)(~0)) +#define RC_ERROR_COUNTER_OTHER_TYPE 1024 + +typedef struct +{ + NvU32 rcFaultType; + NvU32 rcErrorSrc; + NvU64 mmuFaultVA; + NvU32 mmuAccessType; +} rcExtraMMUInfo; + +typedef struct +{ + NvU32 rcGRErrType; + NvU32 rcGRExcptnSubTyp; +} rcExtraGRInfo; + +typedef struct +{ + NvU32 rcPBDMAID; + NvU64 rcPBDMAErrFlag; +} rcExtraPBDMAInfo; + +/* +* Union for storing extra data values for each kind of RC Error +* +* MMU +* ===== +* rcFaultType +* rcErrorSrc +* +* GR +* ======= +* rcGRErrType +* rcGRExcptnSubTyp +* +* PBDMA +* ========= +* rcPBDMAID +* rcPBDMAErrFlag +*/ + +// XAPIGEN: hack around union with no discriminant. +#if defined XAPIGEN +typedef struct +{ + rcExtraMMUInfo rcMMUData; + rcExtraGRInfo rcGRData; + rcExtraPBDMAInfo rcPBDMAData; +} rcExtraInfo; +#else +typedef union +{ + rcExtraMMUInfo rcMMUData; + rcExtraGRInfo rcGRData; + rcExtraPBDMAInfo rcPBDMAData; +} rcExtraInfo; +#endif + + +typedef struct +{ + NvU32 rcErrorType; + NvU32 rcErrorCount; + NvU32 rcLastCHID; + NvU64 rcLastTime; + rcExtraInfo rcExtrInfo; +} rcErrorCounterEntry; + +typedef struct +{ + NvU32 powerEvent; // The name of the point we are tracing + // Such as NVRM_OCA_PWR_EVENT_SET_POWER_ENTRY + NvU32 powerState; + NvU32 fastBootPowerState; // Tracking fast boot shutdown/hibernate states +} RmPowerState_RECORD, *PRmPowerState_RECORD; + +#define MAX_RCDB_RCDIAG_ENTRIES 200 +typedef struct +{ + NvU32 offset; + NvU32 tag; + NvU32 value; + NvU32 attribute; +} RmRcDiagRecordEntry; + +#define RCDB_RCDIAG_DEFAULT_OWNER 0 +typedef struct +{ + NvU16 idx; + NvU32 timeStamp; // time in seconds since 1/1/1970 + NvU16 type; + NvU32 flags; + NvU16 count; + NvU32 owner; // a handle indicating the ownership + NvU32 processId; // process Id of the originating process. + RmRcDiagRecordEntry data[MAX_RCDB_RCDIAG_ENTRIES]; +} RmRcDiag_RECORD, *PRmRcDiag_RECORD; + +#define MAX_RCDB_RCDIAG_WRAP_BUFF 32 +#define INVALID_RCDB_RCDIAG_INDEX 0xffffffff + +#define MAX_RCDB_NOCAT_WRAP_BUFF 100 +#define INVALID_RCDB_NOCAT_ID 0xffffffff + +#endif // RMCD_H diff --git a/src/common/sdk/nvidia/inc/rs_access.h b/src/common/sdk/nvidia/inc/rs_access.h new file mode 100644 index 000000000..7c71a7802 --- /dev/null +++ b/src/common/sdk/nvidia/inc/rs_access.h @@ -0,0 +1,272 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include + +// +// This file was generated with FINN, an NVIDIA coding tool. +// Source file: rs_access.finn +// + + + + +#include "nvtypes.h" +#include "nvmisc.h" + + +/****************************************************************************/ +/* Access right definitions */ +/****************************************************************************/ + +// +// The meaning of each access right is documented in +// resman/docs/rmapi/resource_server/rm_capabilities.adoc +// +// RS_ACCESS_COUNT is the number of access rights that have been defined +// and are in use. All integers in the range [0, RS_ACCESS_COUNT) should +// represent valid access rights. +// +// When adding a new access right, don't forget to update +// 1) The descriptions in the resman/docs/rmapi/resource_server/rm_capabilities.adoc +// 2) RS_ACCESS_COUNT, defined below +// 3) The declaration of g_rsAccessMetadata in rs_access_rights.c +// 4) The list of access rights in drivers/common/chip-config/Chipcontrols.pm +// 5) Any relevant access right callbacks +// + +#define RS_ACCESS_DUP_OBJECT 0U +#define RS_ACCESS_NICE 1U +#define RS_ACCESS_DEBUG 2U +#define RS_ACCESS_COUNT 3U + + +/****************************************************************************/ +/* Access right data structures */ +/****************************************************************************/ + +/*! + * @brief A type that can be used to represent any access right. + */ +typedef NvU16 RsAccessRight; + +/*! + * @brief An internal type used to represent one limb in an access right mask. + */ +typedef NvU32 RsAccessLimb; +#define SDK_RS_ACCESS_LIMB_BITS 32 + +/*! + * @brief The number of limbs in the RS_ACCESS_MASK struct. + */ +#define SDK_RS_ACCESS_MAX_LIMBS 1 + +/*! + * @brief The maximum number of possible access rights supported by the + * current data structure definition. + * + * You probably want RS_ACCESS_COUNT instead, which is the number of actual + * access rights defined. + */ +#define SDK_RS_ACCESS_MAX_COUNT (0x20) /* finn: Evaluated from "(SDK_RS_ACCESS_LIMB_BITS * SDK_RS_ACCESS_MAX_LIMBS)" */ + +/** + * @brief A struct representing a set of access rights. + * + * Note that the values of bit positions larger than RS_ACCESS_COUNT is + * undefined, and should not be assumed to be 0 (see RS_ACCESS_MASK_FILL). + */ +typedef struct RS_ACCESS_MASK { + RsAccessLimb limbs[SDK_RS_ACCESS_MAX_LIMBS]; +} RS_ACCESS_MASK; + +/** + * @brief A struct representing auxiliary information about each access right. + */ +typedef struct RS_ACCESS_INFO { + NvU32 flags; +} RS_ACCESS_INFO; + + +/****************************************************************************/ +/* Access right macros */ +/****************************************************************************/ + +#define SDK_RS_ACCESS_LIMB_INDEX(index) ((index) / SDK_RS_ACCESS_LIMB_BITS) +#define SDK_RS_ACCESS_LIMB_POS(index) ((index) % SDK_RS_ACCESS_LIMB_BITS) + +#define SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) \ + ((pAccessMask)->limbs[SDK_RS_ACCESS_LIMB_INDEX(index)]) +#define SDK_RS_ACCESS_OFFSET_MASK(index) \ + NVBIT_TYPE(SDK_RS_ACCESS_LIMB_POS(index), RsAccessLimb) + +/*! + * @brief Checks that accessRight represents a valid access right. + * + * The valid range of access rights is [0, RS_ACCESS_COUNT). + * + * @param[in] accessRight The access right value to check + * + * @return true if accessRight is valid + * @return false otherwise + */ +#define RS_ACCESS_BOUNDS_CHECK(accessRight) \ + (accessRight < RS_ACCESS_COUNT) + +/*! + * @brief Test whether an access right is present in a set + * + * @param[in] pAccessMask The set of access rights to read + * @param[in] index The access right to examine + * + * @return NV_TRUE if the access right specified by index was present in the set, + * and NV_FALSE otherwise + */ +#define RS_ACCESS_MASK_TEST(pAccessMask, index) \ + (RS_ACCESS_BOUNDS_CHECK(index) && \ + (SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) & SDK_RS_ACCESS_OFFSET_MASK(index)) != 0) + +/*! + * @brief Add an access right to a mask + * + * @param[in] pAccessMask The set of access rights to modify + * @param[in] index The access right to set + */ +#define RS_ACCESS_MASK_ADD(pAccessMask, index) \ + do \ + { \ + if (RS_ACCESS_BOUNDS_CHECK(index)) { \ + SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) |= SDK_RS_ACCESS_OFFSET_MASK(index); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Remove an access right from a mask + * + * @param[in] pAccessMask The set of access rights to modify + * @param[in] index The access right to unset + */ +#define RS_ACCESS_MASK_REMOVE(pAccessMask, index) \ + do \ + { \ + if (RS_ACCESS_BOUNDS_CHECK(index)) { \ + SDK_RS_ACCESS_LIMB_ELT(pAccessMask, index) &= ~SDK_RS_ACCESS_OFFSET_MASK(index); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Performs an in-place union between two access right masks + * + * @param[in,out] pMaskOut The access rights mask to be updated + * @param[in] pMaskIn The set of access rights to be added to pMaskOut + */ +#define RS_ACCESS_MASK_UNION(pMaskOut, pMaskIn) \ + do \ + { \ + NvLength limb; \ + for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \ + { \ + SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) |= SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Performs an in-place subtract of one mask's rights from another + * + * @param[in,out] pMaskOut The access rights mask to be updated + * @param[in] pMaskIn The set of access rights to be removed from pMaskOut + */ +#define RS_ACCESS_MASK_SUBTRACT(pMaskOut, pMaskIn) \ + do \ + { \ + NvLength limb; \ + for (limb = 0; limb < SDK_RS_ACCESS_MAX_LIMBS; limb++) \ + { \ + SDK_RS_ACCESS_LIMB_ELT(pMaskOut, limb) &= ~SDK_RS_ACCESS_LIMB_ELT(pMaskIn, limb); \ + } \ + } while (NV_FALSE) + +/*! + * @brief Removes all rights from an access rights mask + * + * @param[in,out] pAccessMask The access rights mask to be updated + */ +#define RS_ACCESS_MASK_CLEAR(pAccessMask) \ + do \ + { \ + portMemSet(pAccessMask, 0, sizeof(*pAccessMask)); \ + } while (NV_FALSE) + +/*! + * @brief Adds all rights to an access rights mask + * + * @param[in,out] pAccessMask The access rights mask to be updated + */ +#define RS_ACCESS_MASK_FILL(pAccessMask) \ + do \ + { \ + portMemSet(pAccessMask, 0xff, sizeof(*pAccessMask)); \ + } while (NV_FALSE) + + +/****************************************************************************/ +/* Share definitions */ +/****************************************************************************/ + +// +// The usage of Share Policy and the meaning of each share type is documented in +// resman/docs/rmapi/resource_server/rm_capabilities.adoc +// +#define RS_SHARE_TYPE_NONE (0U) +#define RS_SHARE_TYPE_ALL (1U) +#define RS_SHARE_TYPE_OS_SECURITY_TOKEN (2U) +#define RS_SHARE_TYPE_CLIENT (3U) +#define RS_SHARE_TYPE_PID (4U) +#define RS_SHARE_TYPE_SMC_PARTITION (5U) +#define RS_SHARE_TYPE_GPU (6U) +#define RS_SHARE_TYPE_FM_CLIENT (7U) +// Must be last. Update when a new SHARE_TYPE is added +#define RS_SHARE_TYPE_MAX (8U) + + +// +// Use Revoke to remove an existing policy from the list. +// Allow is based on OR logic, Require is based on AND logic. +// To share a right, at least one Allow (non-Require) must match, and all Require must pass. +// If Compose is specified, policies will be added to the list. Otherwise, they will replace the list. +// +#define RS_SHARE_ACTION_FLAG_REVOKE NVBIT(0) +#define RS_SHARE_ACTION_FLAG_REQUIRE NVBIT(1) +#define RS_SHARE_ACTION_FLAG_COMPOSE NVBIT(2) + +/****************************************************************************/ +/* Share flag data structures */ +/****************************************************************************/ + +typedef struct RS_SHARE_POLICY { + NvU32 target; + RS_ACCESS_MASK accessMask; + NvU16 type; ///< RS_SHARE_TYPE_ + NvU8 action; ///< RS_SHARE_ACTION_ +} RS_SHARE_POLICY; diff --git a/src/common/shared/inc/compat.h b/src/common/shared/inc/compat.h new file mode 100644 index 000000000..655e40f79 --- /dev/null +++ b/src/common/shared/inc/compat.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __COMPAT_H__ +#define __COMPAT_H__ + +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvstatus.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(PORT_IS_KERNEL_BUILD) + +#include "utils/nvassert.h" +#include "nvport/nvport.h" + +#else + +#error NvPort must be enabled to use compat.h + +#endif + +#ifdef __cplusplus +} +#endif + +#endif //__COMPAT_H__ diff --git a/src/common/shared/msgq/inc/msgq/msgq.h b/src/common/shared/msgq/inc/msgq/msgq.h new file mode 100644 index 000000000..7d37fc13c --- /dev/null +++ b/src/common/shared/msgq/inc/msgq/msgq.h @@ -0,0 +1,220 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef MSGQ_H +#define MSGQ_H + +// Handle used to refer to queues. +typedef void *msgqHandle; + +// Minimal size of message +#define MSGQ_MSG_SIZE_MIN 16 + +// Minimal alignment of metadata, must be power of 2 +#define MSGQ_META_MIN_ALIGN 3U // 2^3 = 8 +#define MSGQ_META_MAX_ALIGN 12U // 4096. Used to sanity-check alignment + // parameters. Increase if needed. +// If set we swap read pointers (for bidirectional communication). +// That way each peers can have their "receive" channel mapped RO +#define MSGQ_FLAGS_SWAP_RX 1 + +#define FCN_FLAG_NOTIFY_MSG_WRITE 0 +#define FCN_FLAG_NOTIFY_MSG_READ 1 + +// msgqFcnBackendRw flags +#define FCN_FLAG_BACKEND_ACCESS_MASK 0x0001 +#define FCN_FLAG_BACKEND_ACCESS_READ 0x0000 +#define FCN_FLAG_BACKEND_ACCESS_WRITE 0x0001 + +#define FCN_FLAG_BACKEND_QUEUE_MASK 0x0100 +#define FCN_FLAG_BACKEND_QUEUE_RX 0x0000 // Access rx queue backing store +#define FCN_FLAG_BACKEND_QUEUE_TX 0x0100 // Access tx queue backing store + +/* + * Hook functions. In future it should be possible to replace them (as an + * option) with compile time macros. + */ + +// Notify other peer that queue state change. +// Should return 0 on success. +typedef int (*msgqFcnNotifyRemote)(int isRead, void *pArg); + +// Generic cache operation function (may be flush, zero, invalidate) +typedef void (*msgqFcnCacheOp)(const volatile void *pAddr, unsigned size); + +// Generic barrier +typedef void (*msgqFcnBarrier)(void); + +// Function to access backend memory (if it's not memory mapped). +// Keep in mind than when using it, pointers given by peek can't be trusted +// Should return 0 on success. +typedef int (*msgqFcnBackendRw)(void *pDest, const void *pSrc, unsigned size, + unsigned flags, void *pArg); + +/** + * @brief Return size of metadata (that must be allocated) + */ +unsigned msgqGetMetaSize(void); + +/** + * @brief Create queue object. + * @param handle Pointer to handle. + * @param metaBuf Pre-allocated buffer. It's size must be at least + * msgqGetMetaSize() bytes, and must be MSGQ_META_MIN_ALIGN aligned. + * @return 0 on success. + */ +int msgqInit(msgqHandle *pHandle, void *pBuffer); + +/* + * Queue configuration. Should be done after Init, before linking (unless you + * know what you're doing). + */ + +void msgqSetNotification(msgqHandle handle, msgqFcnNotifyRemote fcn, void *pArg); +void msgqSetBackendRw(msgqHandle handle, msgqFcnBackendRw fcn, void *pArg); +void msgqSetRxInvalidate(msgqHandle handle, msgqFcnCacheOp fcn); +void msgqSetTxFlush(msgqHandle handle, msgqFcnCacheOp fcn); +void msgqSetZero(msgqHandle handle, msgqFcnCacheOp fcn); +void msgqSetBarrier(msgqHandle handle, msgqFcnBarrier fcn); + +/** + * @brief Creates outgoing queue. That includes initializing of backend. + * @param handle queue (must be already initialized with msgqInit()) + * @param pBackingStore memory buffer (or cookie if backendRW is used). As a + * general rule it must be mapped RW to us. + * @param size Size of buffer (in bytes). + * @param msgSize Size of message (in bytes). + * @param hdrAlign Alignment of header (2^n). + * @param entryAlign Alignment of entry (2^n). + * @param flags For now only SWAP_RX is to be used. + * @return 0 on success. + * + * After TX was created, optional notification callback is executed. + */ +int msgqTxCreate(msgqHandle handle, void *pBackingStore, unsigned size, unsigned msgSize, + unsigned hdrAlign, unsigned entryAlign, unsigned flags); + +/** + * @brief Links into RX buffer (initialized by third party). + * @param handle queue (must be already initialized with msgqInit()) + * @param pBackingStore memory buffer (or cookie if backendRW is used). As a + * general rule it must be mapped RW to us. With SWAP_RX it can be RO. + * @param size Size of buffer (in bytes) + * @param msgSize Size of message (in bytes) + * @return 0 on success + * Note that msgSize and size are there only to do sanity check. Backing store + * must be already initialized. + * + * After link is estabilished, optional notification callback is executed. + */ +int msgqRxLink(msgqHandle handle, const void *pBackingStore, unsigned size, + unsigned msgSize); + +/** + * @brief Get number of free out messages. + * @param handle + * @return 0 if outgoing queue is full or not initialized + * + * WARING: This function doesn NOT flush caches. It's fast, but in reality + * there may be more space than is reported. + */ +unsigned msgqTxGetFreeSpace(msgqHandle handle); + +/** + * @brief Get pointer to "out" message + * @param handle + * @param n number of message (0..msgqTxGetFreeSpace()-1) + * @return Writable memory + * + * WARNING: If memory cookie is used, returned pointer can't be accessed + * directly, wrappers must be used. + * Note that it should be not assumed that pointers returned for n=0 and n=1 + * are continuous. + * This function is cheap and should be used for batch submissions. + */ +void *msgqTxGetWriteBuffer(msgqHandle handle, unsigned n); + +/** + * @brief Submits message(s) + * @param handle + * @param n Number of messages to submit (0..msgqTxGetFreeSpace()-1) + * @return 0 on success + * + * This is basically "send" function. It submits buffers that were previously + * filled with msgqTxGetWriteBuffer(). Obviously it may send "trash", if + * we send more than we filled. + */ +int msgqTxSubmitBuffers(msgqHandle handle, unsigned n); + +/** + * @brief Synchronize TX channel. + * @param handle + * @return Number of free buffers. + * + * This function is similar to msgqTxGetFreeSpace(), except it invalidates cache + * to get latest read pointer. + */ +int msgqTxSync(msgqHandle handle); + +/** + * @brief Get number of unread messages + * @param handle + * @return Number of messages. 0 if queue is empty or not linked. + */ +unsigned msgqRxGetReadAvailable(msgqHandle handle); + +/** + * @brief Read messages + * @param handle + * @param n Number of message, (0..msgqRxGetReadAvailable()-1) + * @return Pointer to message payload or NULL. + * + * WARNING: If memory cookie is used, returned pointer can't be accessed + * directly, wrappers must be used. + * Note that it should be not assumed that pointers returned for n=0 and n=1 + * are continuous. + * This function is cheap and should be used for batch receive. + */ +const void *msgqRxGetReadBuffer(msgqHandle handle, unsigned n); + +/** + * @brief Mark messages as read + * @param handle + * @param n Number of messages to be read, (0..msgqRxGetReadAvailable()-1) + * @return 0 on success + * + * This function (may) notify other side that messages were consumed. + */ +int msgqRxMarkConsumed(msgqHandle handle, unsigned n); + +/** + * @brief Synchronize RX channel. + * @param handle + * @return Number of pending messages + * + * This function is similar to msgqRxGetReadAvailable(), except it invalidates + * cache to get latest write pointer. + */ +int msgqRxSync(msgqHandle handle); + +#endif // MSGQ_H diff --git a/src/common/shared/msgq/inc/msgq/msgq_priv.h b/src/common/shared/msgq/inc/msgq/msgq_priv.h new file mode 100644 index 000000000..840773f52 --- /dev/null +++ b/src/common/shared/msgq/inc/msgq/msgq_priv.h @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * Private header for message queues + * Should not be used except for (direct) higher layer of messaging (rm, rpc) + * Keep structures padded to 64bytes (to avoid cache issues) + */ + +#ifndef MSGQ_PRIV_H +#define MSGQ_PRIV_H + +#include + +#include "msgq.h" + +// Version, gets increased with incompatible changes. +#define MSGQ_VERSION 0 + +/* + * (Shared) queue Layout: + * TX header (padded to cache line) + * RX header (padded to cache line) + * Ring buffer of messages + * + */ + +// buffer metadata, written by source, at start of block +typedef struct +{ + NvU32 version; // queue version + NvU32 size; // bytes, page aligned + NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum + NvU32 msgCount; // number of entries in queue + NvU32 writePtr; // message id of next slot + NvU32 flags; // if set it means "i want to swap RX" + NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store. + NvU32 entryOff; // Offset of entries from start of backing store. +} msgqTxHeader; + +// buffer metadata, written by sink +typedef struct +{ + NvU32 readPtr; // message id of last message read +} msgqRxHeader; + +// Internal tracking structure (handle) +typedef struct +{ + // Those are also bases of buffers; our / their means belonging to our/their buffer + msgqTxHeader *pOurTxHdr; + volatile const msgqTxHeader *pTheirTxHdr; + msgqRxHeader *pOurRxHdr; // Can't set either RxHdr to volatile + msgqRxHeader *pTheirRxHdr; // const due to MSGQ_FLAGS_SWAP_RX. + + NvU8 *pOurEntries; // first tx entry + const NvU8 *pTheirEntries; // first rx entry + + // To simplify things - those elements are at *destination* surfaces + volatile const NvU32 *pReadIncoming; // rx we read, they write + volatile const NvU32 *pWriteIncoming; // tx we read, they write + NvU32 *pReadOutgoing; // rx we write they read + NvU32 *pWriteOutgoing; // tx we write they read + + // tx == our + msgqTxHeader tx; + NvU32 txReadPtr; // Local cache for pQueue->pReadIncoming. + NvU32 txFree; // Cached copy of msgqTxGetFreeSpace. + NvBool txLinked; + + // rx == theirs + msgqTxHeader rx; + NvU32 rxReadPtr; // Local cache for pQueue->pReadOutgoing. + NvU32 rxAvail; // Cached copy of msgqRxGetReadAvailable. + NvBool rxLinked; + + // swap rx backing store + NvBool rxSwapped; + + // notifications + msgqFcnNotifyRemote fcnNotify; + void *fcnNotifyArg; + msgqFcnBackendRw fcnBackendRw; + void *fcnBackendRwArg; + msgqFcnCacheOp fcnInvalidate; + msgqFcnCacheOp fcnFlush; + msgqFcnCacheOp fcnZero; + msgqFcnBarrier fcnBarrier; +} msgqMetadata; + +#endif // MSGQ_PRIV_H diff --git a/src/common/shared/msgq/msgq.c b/src/common/shared/msgq/msgq.c new file mode 100644 index 000000000..4cf1a2c94 --- /dev/null +++ b/src/common/shared/msgq/msgq.c @@ -0,0 +1,716 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#if defined(UPROC_RISCV) && !defined(NVRM) +#include "msgq/msgq.h" +#include "msgq/msgq_priv.h" +#include +#include +#else // defined(UPROC_RISCV) && !defined(NVRM) +#include "inc/msgq/msgq.h" +#include "inc/msgq/msgq_priv.h" +/* This is because this code will be shared with CPU */ +#define sysSHARED_CODE +// MK TODO: we should have unified memset/memcpy interface at some point +#if PORT_MODULE_memory +#include "nvport/nvport.h" +#define memcpy(d,s,l) portMemCopy(d,l,s,l) +#define memset portMemSet +#else // PORT_MODULE_memory +#include +#endif // PORT_MODULE_memory +#endif // defined(UPROC_RISCV) && !defined(NVRM) + +sysSHARED_CODE unsigned +msgqGetMetaSize(void) +{ + return sizeof(msgqMetadata); +} + +sysSHARED_CODE void +msgqSetNotification(msgqHandle handle, msgqFcnNotifyRemote fcn, void *pArg) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + pQueue->fcnNotify = fcn; + pQueue->fcnNotifyArg = pArg; +} + +sysSHARED_CODE void +msgqSetBackendRw(msgqHandle handle, msgqFcnBackendRw fcn, void *pArg) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + pQueue->fcnBackendRw = fcn; + pQueue->fcnBackendRwArg = pArg; +} + +sysSHARED_CODE void +msgqSetRxInvalidate(msgqHandle handle, msgqFcnCacheOp fcn) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + pQueue->fcnInvalidate = fcn; +} + +sysSHARED_CODE void +msgqSetTxFlush(msgqHandle handle, msgqFcnCacheOp fcn) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + pQueue->fcnFlush = fcn; +} + +sysSHARED_CODE void +msgqSetZero(msgqHandle handle, msgqFcnCacheOp fcn) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + pQueue->fcnZero = fcn; +} + +sysSHARED_CODE void +msgqSetBarrier(msgqHandle handle, msgqFcnBarrier fcn) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + pQueue->fcnBarrier = fcn; +} + +/* + * Helper functions to access indirect backend. + */ + +sysSHARED_CODE static void +_backendRead32(msgqMetadata *pQueue, volatile const void *pAddr, NvU32 *pVal, unsigned flags) +{ + if (pQueue->fcnBackendRw != NULL) + { + pQueue->fcnBackendRw(pVal, (const void *)pAddr, sizeof(*pVal), + flags | FCN_FLAG_BACKEND_ACCESS_READ, + pQueue->fcnBackendRwArg); + } + else + { + *pVal = *(volatile const NvU32*)pAddr; + } +} + +sysSHARED_CODE static void +_backendWrite32(msgqMetadata *pQueue, volatile void *pAddr, NvU32 *pVal, unsigned flags) +{ + if (pQueue->fcnBackendRw != NULL) + { + pQueue->fcnBackendRw((void*)pAddr, pVal, sizeof(*pVal), + flags | FCN_FLAG_BACKEND_ACCESS_WRITE, + pQueue->fcnBackendRwArg); + } + else + { + *(volatile NvU32*)pAddr = *pVal; + } +} + +/** + * @brief Default barrier for (RISC-V) systems. + */ +#ifdef UPROC_RISCV +sysSHARED_CODE static void +msgqRiscvDefaultBarrier(void) +{ + asm volatile("fence iorw,iorw"); +} +#endif + +/* + * + * Init and linking code + * + */ + +sysSHARED_CODE int msgqInit(msgqHandle *pHandle, void *pBuffer) +{ + msgqMetadata *pQueue = pBuffer; + + if (pQueue == NULL) + { + return -1; + } + + memset(pQueue, 0, sizeof *pQueue); + +#ifdef UPROC_RISCV + pQueue->fcnBarrier = msgqRiscvDefaultBarrier; +#endif + + if (pHandle != NULL) + { + *pHandle = pQueue; + } + return 0; +} + +sysSHARED_CODE int +msgqTxCreate +( + msgqHandle handle, + void *pBackingStore, + unsigned size, + unsigned msgSize, + unsigned hdrAlign, + unsigned entryAlign, + unsigned flags +) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + msgqTxHeader *pTx; + + if ((pQueue == NULL) || pQueue->txLinked) + { + return -1; + } + + if ((msgSize < MSGQ_MSG_SIZE_MIN) || (msgSize > size)) + { + return -1; + } + + // Check that alignments are in range. + if ((hdrAlign < MSGQ_META_MIN_ALIGN) || + (hdrAlign > MSGQ_META_MAX_ALIGN)) + { + return -1; + } + + if ((entryAlign < MSGQ_META_MIN_ALIGN) || + (entryAlign > MSGQ_META_MAX_ALIGN)) + { + return -1; + } + + if (pBackingStore == NULL) + { + return -1; + } + + // Make sure backing store is aligned for hdrAlign. + if ((NvUPtr)pBackingStore & ((hdrAlign - 1))) + { + return -1; + } + + pQueue->tx.rxHdrOff = NV_ALIGN_UP(sizeof(msgqTxHeader), 1 << hdrAlign); + pQueue->tx.entryOff = NV_ALIGN_UP(pQueue->tx.rxHdrOff + sizeof(msgqRxHeader), + 1 << entryAlign); + + if (size < (pQueue->tx.entryOff + msgSize)) + { + return -1; + } + + // Fill in local copy of msgqTxHeader. + pQueue->tx.version = MSGQ_VERSION; + pQueue->tx.size = size; + pQueue->tx.msgSize = msgSize; + pQueue->tx.writePtr = 0; + pQueue->tx.flags = flags; + pQueue->tx.msgCount = (NvU32)((size - pQueue->tx.entryOff) / msgSize); + + // Write our tracking metadata + pQueue->pOurTxHdr = (msgqTxHeader*)pBackingStore; + pQueue->pOurRxHdr = (msgqRxHeader*)((NvU8*)pBackingStore + pQueue->tx.rxHdrOff); + pQueue->pOurEntries = (NvU8*)pBackingStore + pQueue->tx.entryOff; + pQueue->txLinked = NV_TRUE; + pQueue->rxAvail = 0; + + // Allow adding queue messages before rx is linked. + pQueue->txFree = pQueue->tx.msgCount - 1; + + // Swap only if both sides agree on it + pQueue->rxSwapped = (flags & MSGQ_FLAGS_SWAP_RX) && + (pQueue->rx.flags & MSGQ_FLAGS_SWAP_RX); + + pQueue->pWriteOutgoing = &pQueue->pOurTxHdr->writePtr; + + // if set, other side is already linked + if (pQueue->rxSwapped) + { + pQueue->pReadOutgoing = &pQueue->pOurRxHdr->readPtr; + pQueue->pReadIncoming = &pQueue->pTheirRxHdr->readPtr; + } + else + { + pQueue->pReadIncoming = &pQueue->pOurRxHdr->readPtr; + if (pQueue->rxLinked) + { + pQueue->pReadOutgoing = &pQueue->pTheirRxHdr->readPtr; + } + } + + // write shared buffer (backend) + pTx = pQueue->pOurTxHdr; + + if (pQueue->fcnZero != NULL) + { + pQueue->fcnZero(pTx, sizeof *pTx); + } + + // Indirect access to backend + if (pQueue->fcnBackendRw != NULL) + { + pQueue->fcnBackendRw(pTx, &pQueue->tx, sizeof *pTx, + FCN_FLAG_BACKEND_ACCESS_WRITE | FCN_FLAG_BACKEND_QUEUE_TX, + pQueue->fcnBackendRwArg); + } else + { + memcpy(pTx, &pQueue->tx, sizeof *pTx); + } + + // Flush + if (pQueue->fcnFlush != NULL) + { + pQueue->fcnFlush(pTx, sizeof *pTx); + } + + // Barrier + if (pQueue->fcnBarrier != NULL) + { + pQueue->fcnBarrier(); + } + + // Notify that pQueue was created + if (pQueue->fcnNotify != NULL) + { + pQueue->fcnNotify(FCN_FLAG_NOTIFY_MSG_WRITE, pQueue->fcnNotifyArg); + } + + return 0; +} + +sysSHARED_CODE int +msgqRxLink(msgqHandle handle, const void *pBackingStore, unsigned size, unsigned msgSize) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + if ((pQueue == NULL) || pQueue->rxLinked) + { + return -1; + } + + if (msgSize < MSGQ_MSG_SIZE_MIN) + { + return -2; + } + + if (msgSize > size) + { + return -3; + } + + if (pBackingStore == NULL) + { + return -5; + } + + pQueue->pTheirTxHdr = (msgqTxHeader*)pBackingStore; + + // Invalidate + if (pQueue->fcnInvalidate != NULL) + { + pQueue->fcnInvalidate(pQueue->pTheirTxHdr, sizeof(msgqTxHeader)); + } + + // copy their metadata + if (pQueue->fcnBackendRw != NULL) + { + pQueue->fcnBackendRw(&pQueue->rx, (const void *)pQueue->pTheirTxHdr, + sizeof pQueue->rx, + FCN_FLAG_BACKEND_ACCESS_READ | FCN_FLAG_BACKEND_QUEUE_RX, + pQueue->fcnBackendRwArg); + } + else + { + memcpy(&pQueue->rx, (const void *)pQueue->pTheirTxHdr, sizeof pQueue->rx); + } + + if (size < (pQueue->rx.entryOff + msgSize)) + { + return -6; + } + + // Sanity check + if (pQueue->rx.size != size) + { + return -7; + } + if (pQueue->rx.msgSize != msgSize) + { + return -8; + } + if (pQueue->rx.version != MSGQ_VERSION) + { + return -9; + } + + // Also check the calculated fields, to make sure the header arrived intact. + if ((pQueue->rx.rxHdrOff < sizeof(msgqTxHeader)) || + (pQueue->rx.entryOff < pQueue->tx.rxHdrOff + sizeof(msgqRxHeader)) || + (pQueue->rx.msgCount != (NvU32)((size - pQueue->rx.entryOff) / msgSize))) + { + return -10; + } + + pQueue->pTheirRxHdr = (msgqRxHeader*)((NvU8*)pBackingStore + pQueue->rx.rxHdrOff); + pQueue->pTheirEntries = (NvU8*)pBackingStore + pQueue->rx.entryOff; + + pQueue->rxLinked = NV_TRUE; + pQueue->rxSwapped = (pQueue->tx.flags & MSGQ_FLAGS_SWAP_RX) && + (pQueue->rx.flags & MSGQ_FLAGS_SWAP_RX); + pQueue->pWriteIncoming = &pQueue->pTheirTxHdr->writePtr; + + // if set, other side is always linked + if (pQueue->rxSwapped) + { + pQueue->pReadOutgoing = &pQueue->pOurRxHdr->readPtr; + pQueue->pReadIncoming = &pQueue->pTheirRxHdr->readPtr; + } + else // may be unidir + { + pQueue->pReadOutgoing = &pQueue->pTheirRxHdr->readPtr; + if (pQueue->txLinked) + { + pQueue->pReadIncoming = &pQueue->pOurRxHdr->readPtr; + } + } + + if (pQueue->fcnZero != NULL) + { + pQueue->fcnZero(pQueue->pReadOutgoing, sizeof(NvU32)); + } + + pQueue->rxReadPtr = 0; + _backendWrite32(pQueue, pQueue->pReadOutgoing, &pQueue->rxReadPtr, + pQueue->rxSwapped ? FCN_FLAG_BACKEND_QUEUE_TX : FCN_FLAG_BACKEND_QUEUE_RX); + if (pQueue->fcnFlush != NULL) + { + pQueue->fcnFlush(pQueue->pReadOutgoing, sizeof(NvU32)); + } + + // Barrier, notify + if (pQueue->fcnBarrier != NULL) + { + pQueue->fcnBarrier(); + } + + // Notify that pQueue was created + if (pQueue->fcnNotify != NULL) + { + pQueue->fcnNotify(FCN_FLAG_NOTIFY_MSG_READ, pQueue->fcnNotifyArg); + } + + return 0; +} + +/* + * + * Send code (outgoing messages) + * + */ + +sysSHARED_CODE unsigned +msgqTxGetFreeSpace(msgqHandle handle) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + if ((pQueue == NULL) || !pQueue->txLinked) + { + return 0; + } + + _backendRead32(pQueue, pQueue->pReadIncoming, &pQueue->txReadPtr, + pQueue->rxSwapped ? FCN_FLAG_BACKEND_QUEUE_RX : FCN_FLAG_BACKEND_QUEUE_TX); + if (pQueue->txReadPtr >= pQueue->tx.msgCount) + { + return 0; + } + + pQueue->txFree = pQueue->txReadPtr + pQueue->tx.msgCount - pQueue->tx.writePtr - 1; + + // Avoid % operator due to performance issues on RISC-V. + if (pQueue->txFree >= pQueue->tx.msgCount) + { + pQueue->txFree -= pQueue->tx.msgCount; + } + + return pQueue->txFree; +} + +sysSHARED_CODE void * +msgqTxGetWriteBuffer(msgqHandle handle, unsigned n) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + NvU32 wp; + + if ((pQueue == NULL) || !pQueue->txLinked) + { + return NULL; + } + + // + // Look at the cached free space first. If the cached value shows enough + // remaining free space from last time, there is no reason to read and + // calculate the free space again. Depending on the location of txReadPtr, + // msgqTxGetFreeSpace can be a very costly operation. + // + if ((n >= pQueue->txFree) && + (n >= msgqTxGetFreeSpace(handle))) + { + return NULL; + } + + wp = pQueue->tx.writePtr + n; + if (wp >= pQueue->tx.msgCount) + { + wp -= pQueue->tx.msgCount; + } + + return pQueue->pOurEntries + (wp * pQueue->tx.msgSize); +} + +sysSHARED_CODE int +msgqTxSubmitBuffers(msgqHandle handle, unsigned n) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + if ((pQueue == NULL) || !pQueue->txLinked) + { + return -1; + } + + if ((n > pQueue->txFree) && + (n > msgqTxGetFreeSpace(handle))) + { + return -1; + } + + // flush queues - TODO: make it more precise possibly + if (pQueue->fcnFlush != NULL) + { + pQueue->fcnFlush(pQueue->pOurEntries, + pQueue->tx.msgCount * pQueue->tx.msgSize); + } + + // write pointer + pQueue->tx.writePtr += n; + if (pQueue->tx.writePtr >= pQueue->tx.msgCount) + { + pQueue->tx.writePtr -= pQueue->tx.msgCount; + } + + _backendWrite32(pQueue, pQueue->pWriteOutgoing, + &pQueue->tx.writePtr, FCN_FLAG_BACKEND_QUEUE_TX); + + // Adjust cached value for number of free elements. + pQueue->txFree -= n; + + // flush tx header + if (pQueue->fcnFlush != NULL) + { + pQueue->fcnFlush(pQueue->pWriteOutgoing, sizeof(NvU32)); + } + + // barrier + if (pQueue->fcnBarrier != NULL) + { + pQueue->fcnBarrier(); + } + + // Send notification + if (pQueue->fcnNotify != NULL) + { + pQueue->fcnNotify(FCN_FLAG_NOTIFY_MSG_WRITE, pQueue->fcnNotifyArg); + } + + return 0; +} + +sysSHARED_CODE int +msgqTxSync(msgqHandle handle) // "transmit" +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + if ((pQueue == NULL) || !pQueue->txLinked) + { + return -1; + } + + if (pQueue->fcnInvalidate != NULL) + { + // Invalidate caches for read / write pointers + pQueue->fcnInvalidate((void*)pQueue->pReadIncoming, sizeof(NvU32)); + } + + return msgqTxGetFreeSpace(handle); +} + +/* + * + * Receive code (incoming messages) + * + */ + +sysSHARED_CODE unsigned +msgqRxGetReadAvailable(msgqHandle handle) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + if ((pQueue == NULL) || !pQueue->rxLinked) + { + return 0; + } + + _backendRead32(pQueue, pQueue->pWriteIncoming, &pQueue->rx.writePtr, FCN_FLAG_BACKEND_QUEUE_RX); + if (pQueue->rx.writePtr >= pQueue->rx.msgCount) + { + return 0; + } + + pQueue->rxAvail = pQueue->rx.writePtr + pQueue->rx.msgCount - pQueue->rxReadPtr; + + // Avoid % operator due to performance issues on RISC-V. + if (pQueue->rxAvail >= pQueue->rx.msgCount) + { + pQueue->rxAvail -= pQueue->rx.msgCount; + } + + return pQueue->rxAvail; +} + +sysSHARED_CODE const void * +msgqRxGetReadBuffer(msgqHandle handle, unsigned n) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + NvU32 rp; + + if ((pQueue == NULL) || !pQueue->rxLinked) + { + return NULL; + } + + // + // Look at the cached elements available first. If the cached value shows + // enough elements available from last time, there is no reason to read and + // calculate the elements available again. Depending on the location of + // rx.writePtr, msgqRxGetReadAvailable can be a very costly operation. + // + if ((n >= pQueue->rxAvail) && + (n >= msgqRxGetReadAvailable(handle))) + { + return NULL; + } + + rp = pQueue->rxReadPtr + n; + if (rp >= pQueue->rx.msgCount) + { + rp -= pQueue->rx.msgCount; + } + + return pQueue->pTheirEntries + (rp * pQueue->rx.msgSize); +} + +sysSHARED_CODE int +msgqRxMarkConsumed(msgqHandle handle, unsigned n) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + if ((pQueue == NULL) || !pQueue->rxLinked) + { + return -1; + } + + if ((n > pQueue->rxAvail) && + (n > msgqRxGetReadAvailable(handle))) + { + return -1; + } + + // read pointer + pQueue->rxReadPtr += n; + if (pQueue->rxReadPtr >= pQueue->rx.msgCount) + { + pQueue->rxReadPtr -= pQueue->rx.msgCount; + } + + // Copy to backend + _backendWrite32(pQueue, pQueue->pReadOutgoing, &pQueue->rxReadPtr, + pQueue->rxSwapped ? FCN_FLAG_BACKEND_QUEUE_TX : FCN_FLAG_BACKEND_QUEUE_RX); + + // Adjust cached value for number of available elements. + pQueue->rxAvail -= n; + + // flush rx header + if (pQueue->fcnFlush != NULL) + { + pQueue->fcnFlush(pQueue->pReadOutgoing, sizeof(NvU32)); + } + + // barrier + if (pQueue->fcnBarrier != NULL) + { + pQueue->fcnBarrier(); + } + + // Send notification + if (pQueue->fcnNotify != NULL) + { + pQueue->fcnNotify(FCN_FLAG_NOTIFY_MSG_READ, pQueue->fcnNotifyArg); + } + + return 0; +} + +sysSHARED_CODE int +msgqRxSync(msgqHandle handle) +{ + msgqMetadata *pQueue = (msgqMetadata*)handle; + + if ((pQueue == NULL) || !pQueue->rxLinked) + { + return -1; + } + + // flush queues - TODO: make it more precise :) + if (pQueue->fcnInvalidate != NULL) + { + pQueue->fcnInvalidate(pQueue->pTheirEntries, + pQueue->rx.msgCount * pQueue->rx.msgSize); + // Invalidate caches for read / write pointers + pQueue->fcnInvalidate((void*)pQueue->pWriteIncoming, sizeof(NvU32)); + } + + return msgqRxGetReadAvailable(handle); +} diff --git a/src/common/shared/nvstatus/nvstatus.c b/src/common/shared/nvstatus/nvstatus.c new file mode 100644 index 000000000..e377dd9f9 --- /dev/null +++ b/src/common/shared/nvstatus/nvstatus.c @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" + +#if !defined(NV_PRINTF_STRING_SECTION) +#if defined(NVRM) && NVCPU_IS_RISCV64 +#define NV_PRINTF_STRING_SECTION __attribute__ ((section (".logging"))) +#else // defined(NVRM) && NVCPU_IS_RISCV64 +#define NV_PRINTF_STRING_SECTION +#endif // defined(NVRM) && NVCPU_IS_RISCV64 +#endif // !defined(NV_PRINTF_STRING_SECTION) + +/* + * Include nvstatuscodes.h twice. Once for creating constant strings in the + * the NV_PRINTF_STRING_SECTION section of the ececutable, and once to build + * the g_StatusCodeList table. + */ +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) static NV_PRINTF_STRING_SECTION \ + const char rm_pvt_##name##_str[] = string " [" #name "]"; +#include "nvstatuscodes.h" + +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) { name, rm_pvt_##name##_str }, +static struct NvStatusCodeString +{ + NV_STATUS statusCode; + const char *statusString; +} g_StatusCodeList[] = { + #include "nvstatuscodes.h" + { 0xffffffff, "Unknown error code!" } // Some compilers don't like the trailing ',' +}; +#undef NV_STATUS_CODE + +/*! + * @brief Given an NV_STATUS code, returns the corresponding status string. + * + * @param[in] nvStatusIn NV_STATUS code for which the string is required + * + * @returns Corresponding status string from the nvstatuscodes.h + * + * TODO: Bug 200025711: convert this to an array-indexed lookup, instead of a linear search + * +*/ +const char *nvstatusToString(NV_STATUS nvStatusIn) +{ + static NV_PRINTF_STRING_SECTION const char rm_pvt_UNKNOWN_str[] = "Unknown error code!"; + NvU32 i; + NvU32 n = ((NvU32)(sizeof(g_StatusCodeList))/(NvU32)(sizeof(g_StatusCodeList[0]))); + for (i = 0; i < n; i++) + { + if (g_StatusCodeList[i].statusCode == nvStatusIn) + { + return g_StatusCodeList[i].statusString; + } + } + + return rm_pvt_UNKNOWN_str; +} diff --git a/src/common/softfloat/COPYING.txt b/src/common/softfloat/COPYING.txt new file mode 100644 index 000000000..b577946b3 --- /dev/null +++ b/src/common/softfloat/COPYING.txt @@ -0,0 +1,37 @@ + +License for Berkeley SoftFloat Release 3d + +John R. Hauser +2017 August 10 + +The following applies to the whole of SoftFloat Release 3d as well as to +each source file individually. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions, and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/src/common/softfloat/nvidia/nv-softfloat.h b/src/common/softfloat/nvidia/nv-softfloat.h new file mode 100644 index 000000000..51680ab30 --- /dev/null +++ b/src/common/softfloat/nvidia/nv-softfloat.h @@ -0,0 +1,163 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_SOFTFLOAT_H__ +#define __NV_SOFTFLOAT_H__ + +/* + * This header file provides utility code built on top of the softfloat floating + * point emulation library. + */ + +#include "softfloat.h" +#include "nvtypes.h" +#include "platform.h" + +/* + * float32_t stores the bit pattern for a 32-bit single-precision IEEE floating + * point value in a structure containing an uint32_t: + * + * typedef struct { uint32_t v; } float32_t; + * + * In some cases, clients pass in a 32-bit single-precision IEEE floating + * point value in an NvU32. + * + * Define functions to change the "view" between an NvU32 and a float32_t. + */ +INLINE float32_t NvU32viewAsF32(NvU32 u) +{ + float32_t f = { .v = u }; + return f; +} + +INLINE NvU32 F32viewAsNvU32(float32_t f) +{ + return f.v; +} + +/* + * Convert the value of a float32_t to an NvU16. + * + * The conversion requires several steps: + * + * - Clamp the float32_t value to the [0,NV_U16_MAX] range of NvU16. + * + * - Use softfloat to convert the float32_t to ui32, with appropriate rounding. + * + * - Due to the clamping and rounding above, the value in the ui32 should be in + * the range of NvU16 and can be safely returned as NvU16. + */ +INLINE NvU16 F32toNvU16(float32_t f) +{ + const float32_t minF32 = NvU32viewAsF32(0); + const float32_t maxF32 = ui32_to_f32(NV_U16_MAX); + NvU32 u; + + /* clamp to zero: f = (f < minF32) ? minF32 : f */ + f = f32_lt(f, minF32) ? minF32 : f; + + /* clamp to NV_U16_MAX: f = (maxF32 < f) ? maxF32 : f */ + f = f32_lt(maxF32, f) ? maxF32 : f; + + /* + * The "_r_minMag" in "f32_to_ui32_r_minMag" means round "to minimum + * magnitude" (i.e., round towards zero). + * + * The "exact = FALSE" argument means do not raise the inexact exception + * flag, even if the conversion is inexact. + * + * For more on f32_to_ui32_r_minMag() semantics, see + * drivers/common/softfloat/doc/SoftFloat.html + */ + u = f32_to_ui32_r_minMag(f, NV_FALSE /* exact */); + nvAssert(u <= NV_U16_MAX); + + return (NvU16) u; +} + +/* + * Perform the following with float32_t: (a * b) + (c * d) + e + */ +INLINE float32_t F32_AxB_plus_CxD_plus_E( + float32_t a, + float32_t b, + float32_t c, + float32_t d, + float32_t e) +{ + const float32_t tmpA = f32_mul(a, b); + const float32_t tmpB = f32_mul(c, d); + const float32_t tmpC = f32_add(tmpA, tmpB); + + return f32_add(tmpC, e); +} + +/* + * Perform the following with float32_t: (a * b) - (c * d) + */ +INLINE float32_t F32_AxB_minus_CxD( + float32_t a, + float32_t b, + float32_t c, + float32_t d) +{ + const float32_t tmpA = f32_mul(a, b); + const float32_t tmpB = f32_mul(c, d); + + return f32_sub(tmpA, tmpB); +} + +/* + * Perform the following with float64_t: a * -1 + */ +INLINE float64_t F64_negate(float64_t a) +{ + const float64_t negOneF64 = i32_to_f64(-1); + return f64_mul(negOneF64, a); +} + +INLINE float16_t nvUnormToFp16(NvU16 unorm, float32_t maxf) +{ + const float32_t unormf = ui32_to_f32(unorm); + const float32_t normf = f32_div(unormf, maxf); + + return f32_to_f16(normf); +} + +INLINE float16_t nvUnorm10ToFp16(NvU16 unorm10) +{ + const float32_t maxf = NvU32viewAsF32(0x44800000U); // 1024.0f + return nvUnormToFp16(unorm10, maxf); +} + +INLINE float32_t f32_min(float32_t a, float32_t b) +{ + return (f32_lt(a, b)) ? a : b; +} + +INLINE float32_t f32_max(float32_t a, float32_t b) +{ + return (f32_lt(a, b)) ? b : a; +} + +#endif /* __NV_SOFTFLOAT_H__ */ diff --git a/src/common/softfloat/nvidia/platform.h b/src/common/softfloat/nvidia/platform.h new file mode 100644 index 000000000..f6db38315 --- /dev/null +++ b/src/common/softfloat/nvidia/platform.h @@ -0,0 +1,56 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef nvidia_softfloat_platform_h +#define nvidia_softfloat_platform_h 1 + +#include "nvtypes.h" + +/* + * Build softfloat for little endian CPUs: all NVIDIA target platforms are + * little endian. + */ +#define LITTLEENDIAN 1 + +/* + * "INLINE" is used by softfloat like this: + * + * INLINE uint32_t softfloat_foo(...) + * { + * ... + * } + */ +#define INLINE static NV_INLINE + +#if !defined(nvAssert) +#define nvAssert(x) +#endif + +/* + * softfloat will use THREAD_LOCAL to tag variables that should be per-thread; + * it could be set to, e.g., gcc's "__thread" keyword. If THREAD_LOCAL is left + * undefined, these variables will default to being ordinary global variables. + */ +#undef THREAD_LOCAL + +#endif /* nvidia_softfloat_platform_h */ diff --git a/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c b/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c new file mode 100644 index 000000000..cc7383345 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 16-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast16_t softfloat_commonNaNToF16UI( const struct commonNaN *aPtr ) +{ + + return (uint_fast16_t) aPtr->sign<<15 | 0x7E00 | aPtr->v64>>54; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c b/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c new file mode 100644 index 000000000..278cdcf14 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 32-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_commonNaNToF32UI( const struct commonNaN *aPtr ) +{ + + return (uint_fast32_t) aPtr->sign<<31 | 0x7FC00000 | aPtr->v64>>41; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c b/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c new file mode 100644 index 000000000..2346b0654 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c @@ -0,0 +1,53 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 64-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast64_t softfloat_commonNaNToF64UI( const struct commonNaN *aPtr ) +{ + + return + (uint_fast64_t) aPtr->sign<<63 | UINT64_C( 0x7FF8000000000000 ) + | aPtr->v64>>12; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c b/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c new file mode 100644 index 000000000..0c6e6105e --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 32-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f32UIToCommonNaN( uint_fast32_t uiA, struct commonNaN *zPtr ) +{ + + if ( softfloat_isSigNaNF32UI( uiA ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + zPtr->sign = uiA>>31; + zPtr->v64 = (uint_fast64_t) uiA<<41; + zPtr->v0 = 0; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c b/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c new file mode 100644 index 000000000..c81dfa956 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 64-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f64UIToCommonNaN( uint_fast64_t uiA, struct commonNaN *zPtr ) +{ + + if ( softfloat_isSigNaNF64UI( uiA ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + zPtr->sign = uiA>>63; + zPtr->v64 = uiA<<12; + zPtr->v0 = 0; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c b/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c new file mode 100644 index 000000000..daaa31d54 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c @@ -0,0 +1,63 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 32-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast32_t + softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + bool isSigNaNA; + + isSigNaNA = softfloat_isSigNaNF32UI( uiA ); + if ( isSigNaNA || softfloat_isSigNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + if ( isSigNaNA ) return uiA | 0x00400000; + } + return (isNaNF32UI( uiA ) ? uiA : uiB) | 0x00400000; + +} + diff --git a/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c b/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c new file mode 100644 index 000000000..78a29dafa --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c @@ -0,0 +1,63 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 64-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ) +{ + bool isSigNaNA; + + isSigNaNA = softfloat_isSigNaNF64UI( uiA ); + if ( isSigNaNA || softfloat_isSigNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + if ( isSigNaNA ) return uiA | UINT64_C( 0x0008000000000000 ); + } + return (isNaNF64UI( uiA ) ? uiA : uiB) | UINT64_C( 0x0008000000000000 ); + +} + diff --git a/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c b/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c new file mode 100644 index 000000000..f2c25adee --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/softfloat_raiseFlags.c @@ -0,0 +1,52 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include "platform.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Raises the exceptions specified by `flags'. Floating-point traps can be +| defined here if desired. It is currently not possible for such a trap +| to substitute a result value. If traps are not implemented, this routine +| should be simply `softfloat_exceptionFlags |= flags;'. +*----------------------------------------------------------------------------*/ +void softfloat_raiseFlags( uint_fast8_t flags ) +{ + + softfloat_exceptionFlags |= flags; + +} + diff --git a/src/common/softfloat/source/8086-SSE/specialize.h b/src/common/softfloat/source/8086-SSE/specialize.h new file mode 100644 index 000000000..235442c92 --- /dev/null +++ b/src/common/softfloat/source/8086-SSE/specialize.h @@ -0,0 +1,208 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef specialize_h +#define specialize_h 1 + +#include +#include +#include "softfloat_types.h" + +/*---------------------------------------------------------------------------- +| Default value for `softfloat_detectTininess'. +*----------------------------------------------------------------------------*/ +#define init_detectTininess softfloat_tininess_afterRounding + +/*---------------------------------------------------------------------------- +| The values to return on conversions to 32-bit integer formats that raise an +| invalid exception. +*----------------------------------------------------------------------------*/ +#define ui32_fromPosOverflow 0xFFFFFFFF +#define ui32_fromNegOverflow 0 +#define ui32_fromNaN 0xFFFFFFFF +#define i32_fromPosOverflow 0x7FFFFFFF +#define i32_fromNegOverflow (-0x7FFFFFFF - 1) +#define i32_fromNaN 0x7FFFFFFF + +/*---------------------------------------------------------------------------- +| The values to return on conversions to 64-bit integer formats that raise an +| invalid exception. +*----------------------------------------------------------------------------*/ +#define ui64_fromPosOverflow UINT64_C( 0xFFFFFFFFFFFFFFFF ) +#define ui64_fromNegOverflow 0 +#define ui64_fromNaN UINT64_C( 0xFFFFFFFFFFFFFFFF ) +#define i64_fromPosOverflow UINT64_C( 0x7FFFFFFFFFFFFFFF ) +#define i64_fromNegOverflow (-UINT64_C( 0x7FFFFFFFFFFFFFFF ) - 1) +#define i64_fromNaN UINT64_C( 0x7FFFFFFFFFFFFFFF ) + +/*---------------------------------------------------------------------------- +| "Common NaN" structure, used to transfer NaN representations from one format +| to another. +*----------------------------------------------------------------------------*/ +struct commonNaN { + bool sign; +#ifdef LITTLEENDIAN + uint64_t v0, v64; +#else + uint64_t v64, v0; +#endif +}; + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 16-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF16UI 0xFE00 + +/*---------------------------------------------------------------------------- +| Returns true when 16-bit unsigned integer `uiA' has the bit pattern of a +| 16-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF16UI( uiA ) ((((uiA) & 0x7E00) == 0x7C00) && ((uiA) & 0x01FF)) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 16-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast16_t softfloat_commonNaNToF16UI( const struct commonNaN *aPtr ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 32-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF32UI 0xFFC00000 + +/*---------------------------------------------------------------------------- +| Returns true when 32-bit unsigned integer `uiA' has the bit pattern of a +| 32-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF32UI( uiA ) ((((uiA) & 0x7FC00000) == 0x7F800000) && ((uiA) & 0x003FFFFF)) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 32-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f32UIToCommonNaN( uint_fast32_t uiA, struct commonNaN *zPtr ); + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 32-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_commonNaNToF32UI( const struct commonNaN *aPtr ); + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 32-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast32_t + softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 64-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF64UI UINT64_C( 0xFFF8000000000000 ) + +/*---------------------------------------------------------------------------- +| Returns true when 64-bit unsigned integer `uiA' has the bit pattern of a +| 64-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF64UI( uiA ) ((((uiA) & UINT64_C( 0x7FF8000000000000 )) == UINT64_C( 0x7FF0000000000000 )) && ((uiA) & UINT64_C( 0x0007FFFFFFFFFFFF ))) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 64-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +void softfloat_f64UIToCommonNaN( uint_fast64_t uiA, struct commonNaN *zPtr ); + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 64-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +uint_fast64_t softfloat_commonNaNToF64UI( const struct commonNaN *aPtr ); + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 64-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 80-bit extended floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNExtF80UI64 0xFFFF +#define defaultNaNExtF80UI0 UINT64_C( 0xC000000000000000 ) + +/*---------------------------------------------------------------------------- +| Returns true when the 80-bit unsigned integer formed from concatenating +| 16-bit `uiA64' and 64-bit `uiA0' has the bit pattern of an 80-bit extended +| floating-point signaling NaN. +| Note: This macro evaluates its arguments more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNExtF80UI( uiA64, uiA0 ) ((((uiA64) & 0x7FFF) == 0x7FFF) && ! ((uiA0) & UINT64_C( 0x4000000000000000 )) && ((uiA0) & UINT64_C( 0x3FFFFFFFFFFFFFFF ))) + + +/*---------------------------------------------------------------------------- +| The following functions are needed only when `SOFTFLOAT_FAST_INT64' is +| defined. +*----------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 128-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF128UI64 UINT64_C( 0xFFFF800000000000 ) +#define defaultNaNF128UI0 UINT64_C( 0 ) + +/*---------------------------------------------------------------------------- +| Returns true when the 128-bit unsigned integer formed from concatenating +| 64-bit `uiA64' and 64-bit `uiA0' has the bit pattern of a 128-bit floating- +| point signaling NaN. +| Note: This macro evaluates its arguments more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF128UI( uiA64, uiA0 ) ((((uiA64) & UINT64_C( 0x7FFF800000000000 )) == UINT64_C( 0x7FFF000000000000 )) && ((uiA0) || ((uiA64) & UINT64_C( 0x00007FFFFFFFFFFF )))) + + +#endif + diff --git a/src/common/softfloat/source/f32_add.c b/src/common/softfloat/source/f32_add.c new file mode 100644 index 000000000..314c76ee8 --- /dev/null +++ b/src/common/softfloat/source/f32_add.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_add( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( signF32UI( uiA ^ uiB ) ) { + return softfloat_subMagsF32( uiA, uiB ); + } else { + return softfloat_addMagsF32( uiA, uiB ); + } + +} + diff --git a/src/common/softfloat/source/f32_div.c b/src/common/softfloat/source/f32_div.c new file mode 100644 index 000000000..d817bc0e2 --- /dev/null +++ b/src/common/softfloat/source/f32_div.c @@ -0,0 +1,176 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_div( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; +#ifdef SOFTFLOAT_FAST_DIV64TO32 + uint_fast64_t sig64A; + uint_fast32_t sigZ; +#else + uint_fast32_t sigZ; + uint_fast64_t rem; +#endif + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x7E; + sigA |= 0x00800000; + sigB |= 0x00800000; +#ifdef SOFTFLOAT_FAST_DIV64TO32 + if ( sigA < sigB ) { + --expZ; + sig64A = (uint_fast64_t) sigA<<31; + } else { + sig64A = (uint_fast64_t) sigA<<30; + } + sigZ = sig64A / sigB; + if ( ! (sigZ & 0x3F) ) sigZ |= ((uint_fast64_t) sigB * sigZ != sig64A); +#else + if ( sigA < sigB ) { + --expZ; + sigA <<= 8; + } else { + sigA <<= 7; + } + sigB <<= 8; + sigZ = ((uint_fast64_t) sigA * softfloat_approxRecip32_1( sigB ))>>32; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZ += 2; + if ( (sigZ & 0x3F) < 2 ) { + sigZ &= ~3; + rem = ((uint_fast64_t) sigA<<31) - (uint_fast64_t) sigZ * sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + sigZ -= 4; + } else { + if ( rem ) sigZ |= 1; + } + } +#endif + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_eq.c b/src/common/softfloat/source/f32_eq.c new file mode 100644 index 000000000..5f07eee30 --- /dev/null +++ b/src/common/softfloat/source/f32_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_eq( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); + +} + diff --git a/src/common/softfloat/source/f32_eq_signaling.c b/src/common/softfloat/source/f32_eq_signaling.c new file mode 100644 index 000000000..f5fcc8242 --- /dev/null +++ b/src/common/softfloat/source/f32_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_eq_signaling( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); + +} + diff --git a/src/common/softfloat/source/f32_isSignalingNaN.c b/src/common/softfloat/source/f32_isSignalingNaN.c new file mode 100644 index 000000000..5004a5aae --- /dev/null +++ b/src/common/softfloat/source/f32_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_isSignalingNaN( float32_t a ) +{ + union ui32_f32 uA; + + uA.f = a; + return softfloat_isSigNaNF32UI( uA.ui ); + +} + diff --git a/src/common/softfloat/source/f32_le.c b/src/common/softfloat/source/f32_le.c new file mode 100644 index 000000000..77595fbbc --- /dev/null +++ b/src/common/softfloat/source/f32_le.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_le( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f32_le_quiet.c b/src/common/softfloat/source/f32_le_quiet.c new file mode 100644 index 000000000..1ec910107 --- /dev/null +++ b/src/common/softfloat/source/f32_le_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_le_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f32_lt.c b/src/common/softfloat/source/f32_lt.c new file mode 100644 index 000000000..9e12843fb --- /dev/null +++ b/src/common/softfloat/source/f32_lt.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_lt( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f32_lt_quiet.c b/src/common/softfloat/source/f32_lt_quiet.c new file mode 100644 index 000000000..9f83b8105 --- /dev/null +++ b/src/common/softfloat/source/f32_lt_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_lt_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f32_mul.c b/src/common/softfloat/source/f32_mul.c new file mode 100644 index 000000000..a2a673f1c --- /dev/null +++ b/src/common/softfloat/source/f32_mul.c @@ -0,0 +1,137 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_mul( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + uint_fast32_t magBits; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ, uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x7F; + sigA = (sigA | 0x00800000)<<7; + sigB = (sigB | 0x00800000)<<8; + sigZ = softfloat_shortShiftRightJam64( (uint_fast64_t) sigA * sigB, 32 ); + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + } else { + uiZ = packToF32UI( signZ, 0xFF, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_mulAdd.c b/src/common/softfloat/source/f32_mulAdd.c new file mode 100644 index 000000000..e98021b75 --- /dev/null +++ b/src/common/softfloat/source/f32_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_mulAdd( float32_t a, float32_t b, float32_t c ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + union ui32_f32 uC; + uint_fast32_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF32( uiA, uiB, uiC, 0 ); + +} + diff --git a/src/common/softfloat/source/f32_rem.c b/src/common/softfloat/source/f32_rem.c new file mode 100644 index 000000000..771b1b94c --- /dev/null +++ b/src/common/softfloat/source/f32_rem.c @@ -0,0 +1,168 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_rem( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + int_fast16_t expB; + uint_fast32_t sigB; + struct exp16_sig32 normExpSig; + uint32_t rem; + int_fast16_t expDiff; + uint32_t q, recip32, altRem, meanRem; + bool signRem; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | 0x00800000; + sigB |= 0x00800000; + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 6; + if ( expDiff ) { + rem <<= 5; + q = 0; + } else { + rem <<= 6; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( sigB<<8 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 7; + expDiff -= 31; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | which is believed to be the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 6; + for (;;) { + q = (rem * (uint_fast64_t) recip32)>>32; + if ( expDiff < 0 ) break; + rem = -(q * (uint32_t) sigB); + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -30 here.) + *--------------------------------------------------------------------*/ + q >>= ~expDiff & 31; + rem = (rem<<(expDiff + 30)) - q * (uint32_t) sigB; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & 0x80000000) ); + meanRem = rem + altRem; + if ( (meanRem & 0x80000000) || (! meanRem && (q & 1)) ) rem = altRem; + signRem = signA; + if ( 0x80000000 <= rem ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF32( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_roundToInt.c b/src/common/softfloat/source/f32_roundToInt.c new file mode 100644 index 000000000..84e3c62dd --- /dev/null +++ b/src/common/softfloat/source/f32_roundToInt.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_roundToInt( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t uiZ, lastBitMask, roundBitsMask; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0x7E ) { + if ( ! (uint32_t) (uiA<<1) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF32UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF32UI( uiA ) ) break; + /* fall through */ + case softfloat_round_near_maxMag: + if ( exp == 0x7E ) uiZ |= packToF32UI( 0, 0x7F, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF32UI( 1, 0x7F, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF32UI( 0, 0x7F, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x96 <= exp ) { + if ( (exp == 0xFF) && fracF32UI( uiA ) ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast32_t) 1<<(0x96 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF32UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_sqrt.c b/src/common/softfloat/source/f32_sqrt.c new file mode 100644 index 000000000..5ef659e4f --- /dev/null +++ b/src/common/softfloat/source/f32_sqrt.c @@ -0,0 +1,121 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_sqrt( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA, uiZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ, shiftedSigZ; + uint32_t negRem; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x7F)>>1) + 0x7E; + expA &= 1; + sigA = (sigA | 0x00800000)<<8; + sigZ = + ((uint_fast64_t) sigA * softfloat_approxRecipSqrt32_1( expA, sigA )) + >>32; + if ( expA ) sigZ >>= 1; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZ += 2; + if ( (sigZ & 0x3F) < 2 ) { + shiftedSigZ = sigZ>>2; + negRem = shiftedSigZ * shiftedSigZ; + sigZ &= ~3; + if ( negRem & 0x80000000 ) { + sigZ |= 1; + } else { + if ( negRem ) --sigZ; + } + } + return softfloat_roundPackToF32( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_sub.c b/src/common/softfloat/source/f32_sub.c new file mode 100644 index 000000000..604d3bd94 --- /dev/null +++ b/src/common/softfloat/source/f32_sub.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_sub( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( signF32UI( uiA ^ uiB ) ) { + return softfloat_addMagsF32( uiA, uiB ); + } else { + return softfloat_subMagsF32( uiA, uiB ); + } + +} + diff --git a/src/common/softfloat/source/f32_to_f16.c b/src/common/softfloat/source/f32_to_f16.c new file mode 100644 index 000000000..7a9715899 --- /dev/null +++ b/src/common/softfloat/source/f32_to_f16.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f32_to_f16( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + uint_fast16_t uiZ, frac16; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF16UI( &commonNaN ); + } else { + uiZ = packToF16UI( sign, 0x1F, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac16 = frac>>9 | ((frac & 0x1FF) != 0); + if ( ! (exp | frac16) ) { + uiZ = packToF16UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF16( sign, exp - 0x71, frac16 | 0x4000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_to_f64.c b/src/common/softfloat/source/f32_to_f64.c new file mode 100644 index 000000000..f9e02f227 --- /dev/null +++ b/src/common/softfloat/source/f32_to_f64.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f32_to_f64( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + uint_fast64_t uiZ; + struct exp16_sig32 normExpSig; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF64UI( &commonNaN ); + } else { + uiZ = packToF64UI( sign, 0x7FF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ = packToF64UI( sign, 0, 0 ); + goto uiZ; + } + normExpSig = softfloat_normSubnormalF32Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = packToF64UI( sign, exp + 0x380, (uint_fast64_t) frac<<29 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f32_to_i32.c b/src/common/softfloat/source/f32_to_i32.c new file mode 100644 index 000000000..c9f2cf9b3 --- /dev/null +++ b/src/common/softfloat/source/f32_to_i32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f32_to_i32( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + uint_fast64_t sig64; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0xFF) && sig ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<32; + shiftDist = 0xAA - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f32_to_i32_r_minMag.c b/src/common/softfloat/source/f32_to_i32_r_minMag.c new file mode 100644 index 000000000..1a94dcc68 --- /dev/null +++ b/src/common/softfloat/source/f32_to_i32_r_minMag.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f32_to_i32_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x9E - exp; + if ( 32 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( shiftDist <= 0 ) { + if ( uiA == packToF32UI( 1, 0x9E, 0 ) ) return -0x7FFFFFFF - 1; + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig | 0x00800000)<<8; + absZ = sig>>shiftDist; + if ( exact && ((uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f32_to_i64( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + uint_fast64_t sig64, extra; + struct uint64_extra sig64Extra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( shiftDist < 0 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + extra = 0; + if ( shiftDist ) { + sig64Extra = softfloat_shiftRightJam64Extra( sig64, 0, shiftDist ); + sig64 = sig64Extra.v; + extra = sig64Extra.extra; + } + return softfloat_roundToI64( sign, sig64, extra, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f32_to_i64_r_minMag.c b/src/common/softfloat/source/f32_to_i64_r_minMag.c new file mode 100644 index 000000000..7d336a47b --- /dev/null +++ b/src/common/softfloat/source/f32_to_i64_r_minMag.c @@ -0,0 +1,94 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f32_to_i64_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t sig64; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( 64 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( shiftDist <= 0 ) { + if ( uiA == packToF32UI( 1, 0xBE, 0 ) ) { + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + absZ = sig64>>shiftDist; + shiftDist = 40 - shiftDist; + if ( exact && (shiftDist < 0) && (uint32_t) (sig<<(shiftDist & 31)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sign ? -absZ : absZ; + +} + diff --git a/src/common/softfloat/source/f32_to_ui32.c b/src/common/softfloat/source/f32_to_ui32.c new file mode 100644 index 000000000..5ec279ba6 --- /dev/null +++ b/src/common/softfloat/source/f32_to_ui32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f32_to_ui32( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + uint_fast64_t sig64; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0xFF) && sig ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<32; + shiftDist = 0xAA - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToUI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f32_to_ui32_r_minMag.c b/src/common/softfloat/source/f32_to_ui32_r_minMag.c new file mode 100644 index 000000000..12f72619b --- /dev/null +++ b/src/common/softfloat/source/f32_to_ui32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f32_to_ui32_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x9E - exp; + if ( 32 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( sign || (shiftDist < 0) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig | 0x00800000)<<8; + z = sig>>shiftDist; + if ( exact && (z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f32_to_ui64( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + uint_fast64_t sig64, extra; + struct uint64_extra sig64Extra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( shiftDist < 0 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + extra = 0; + if ( shiftDist ) { + sig64Extra = softfloat_shiftRightJam64Extra( sig64, 0, shiftDist ); + sig64 = sig64Extra.v; + extra = sig64Extra.extra; + } + return softfloat_roundToUI64( sign, sig64, extra, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f32_to_ui64_r_minMag.c b/src/common/softfloat/source/f32_to_ui64_r_minMag.c new file mode 100644 index 000000000..f96f3e1fe --- /dev/null +++ b/src/common/softfloat/source/f32_to_ui64_r_minMag.c @@ -0,0 +1,90 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f32_to_ui64_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t sig64, z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( 64 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( sign || (shiftDist < 0) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + z = sig64>>shiftDist; + shiftDist = 40 - shiftDist; + if ( exact && (shiftDist < 0) && (uint32_t) (sig<<(shiftDist & 31)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + +} + diff --git a/src/common/softfloat/source/f64_add.c b/src/common/softfloat/source/f64_add.c new file mode 100644 index 000000000..b1969cad4 --- /dev/null +++ b/src/common/softfloat/source/f64_add.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_add( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + if ( signA == signB ) { + return softfloat_addMagsF64( uiA, uiB, signA ); + } else { + return softfloat_subMagsF64( uiA, uiB, signA ); + } + +} + diff --git a/src/common/softfloat/source/f64_div.c b/src/common/softfloat/source/f64_div.c new file mode 100644 index 000000000..c5a2d4fe3 --- /dev/null +++ b/src/common/softfloat/source/f64_div.c @@ -0,0 +1,172 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_div( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t recip32, sig32Z, doubleTerm; + uint_fast64_t rem; + uint32_t q; + uint_fast64_t sigZ; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x3FE; + sigA |= UINT64_C( 0x0010000000000000 ); + sigB |= UINT64_C( 0x0010000000000000 ); + if ( sigA < sigB ) { + --expZ; + sigA <<= 11; + } else { + sigA <<= 10; + } + sigB <<= 11; + recip32 = softfloat_approxRecip32_1( sigB>>32 ) - 2; + sig32Z = ((uint32_t) (sigA>>32) * (uint_fast64_t) recip32)>>32; + doubleTerm = sig32Z<<1; + rem = + ((sigA - (uint_fast64_t) doubleTerm * (uint32_t) (sigB>>32))<<28) + - (uint_fast64_t) doubleTerm * ((uint32_t) sigB>>4); + q = (((uint32_t) (rem>>32) * (uint_fast64_t) recip32)>>32) + 4; + sigZ = ((uint_fast64_t) sig32Z<<32) + ((uint_fast64_t) q<<4); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (sigZ & 0x1FF) < 4<<4 ) { + q &= ~7; + sigZ &= ~(uint_fast64_t) 0x7F; + doubleTerm = q<<1; + rem = + ((rem - (uint_fast64_t) doubleTerm * (uint32_t) (sigB>>32))<<28) + - (uint_fast64_t) doubleTerm * ((uint32_t) sigB>>4); + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + sigZ -= 1<<7; + } else { + if ( rem ) sigZ |= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_eq.c b/src/common/softfloat/source/f64_eq.c new file mode 100644 index 000000000..ccb602a38 --- /dev/null +++ b/src/common/softfloat/source/f64_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_eq( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); + +} + diff --git a/src/common/softfloat/source/f64_eq_signaling.c b/src/common/softfloat/source/f64_eq_signaling.c new file mode 100644 index 000000000..ee5a4414f --- /dev/null +++ b/src/common/softfloat/source/f64_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_eq_signaling( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); + +} + diff --git a/src/common/softfloat/source/f64_isSignalingNaN.c b/src/common/softfloat/source/f64_isSignalingNaN.c new file mode 100644 index 000000000..f55acb4a0 --- /dev/null +++ b/src/common/softfloat/source/f64_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_isSignalingNaN( float64_t a ) +{ + union ui64_f64 uA; + + uA.f = a; + return softfloat_isSigNaNF64UI( uA.ui ); + +} + diff --git a/src/common/softfloat/source/f64_le.c b/src/common/softfloat/source/f64_le.c new file mode 100644 index 000000000..91fc994ab --- /dev/null +++ b/src/common/softfloat/source/f64_le.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_le( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f64_le_quiet.c b/src/common/softfloat/source/f64_le_quiet.c new file mode 100644 index 000000000..a5d332a5b --- /dev/null +++ b/src/common/softfloat/source/f64_le_quiet.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_le_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f64_lt.c b/src/common/softfloat/source/f64_lt.c new file mode 100644 index 000000000..abf62fd3d --- /dev/null +++ b/src/common/softfloat/source/f64_lt.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_lt( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f64_lt_quiet.c b/src/common/softfloat/source/f64_lt_quiet.c new file mode 100644 index 000000000..6531f577e --- /dev/null +++ b/src/common/softfloat/source/f64_lt_quiet.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_lt_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/src/common/softfloat/source/f64_mul.c b/src/common/softfloat/source/f64_mul.c new file mode 100644 index 000000000..caac42424 --- /dev/null +++ b/src/common/softfloat/source/f64_mul.c @@ -0,0 +1,139 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_mul( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + uint_fast64_t magBits; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + struct uint128 sig128Z; + uint_fast64_t sigZ, uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FF; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<11; + sig128Z = softfloat_mul64To128( sigA, sigB ); + sigZ = sig128Z.v64 | (sig128Z.v0 != 0); + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + } else { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_mulAdd.c b/src/common/softfloat/source/f64_mulAdd.c new file mode 100644 index 000000000..67fc44d38 --- /dev/null +++ b/src/common/softfloat/source/f64_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_mulAdd( float64_t a, float64_t b, float64_t c ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + union ui64_f64 uC; + uint_fast64_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF64( uiA, uiB, uiC, 0 ); + +} + diff --git a/src/common/softfloat/source/f64_rem.c b/src/common/softfloat/source/f64_rem.c new file mode 100644 index 000000000..79d41058c --- /dev/null +++ b/src/common/softfloat/source/f64_rem.c @@ -0,0 +1,185 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_rem( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + int_fast16_t expB; + uint_fast64_t sigB; + struct exp16_sig64 normExpSig; + uint64_t rem; + int_fast16_t expDiff; + uint32_t q, recip32; + uint_fast64_t q64; + uint64_t altRem, meanRem; + bool signRem; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA < expB - 1 ) return a; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | UINT64_C( 0x0010000000000000 ); + sigB |= UINT64_C( 0x0010000000000000 ); + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 9; + if ( expDiff ) { + rem <<= 8; + q = 0; + } else { + rem <<= 9; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( sigB>>21 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 9; + expDiff -= 30; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 9; + for (;;) { + q64 = (uint32_t) (rem>>32) * (uint_fast64_t) recip32; + if ( expDiff < 0 ) break; + q = (q64 + 0x80000000)>>32; + rem <<= 29; + rem -= q * (uint64_t) sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) rem += sigB; + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -29 here.) + *--------------------------------------------------------------------*/ + q = (uint32_t) (q64>>32)>>(~expDiff & 31); + rem = (rem<<(expDiff + 30)) - q * (uint64_t) sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + altRem = rem + sigB; + goto selectRem; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & UINT64_C( 0x8000000000000000 )) ); + selectRem: + meanRem = rem + altRem; + if ( + (meanRem & UINT64_C( 0x8000000000000000 )) || (! meanRem && (q & 1)) + ) { + rem = altRem; + } + signRem = signA; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF64( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_roundToInt.c b/src/common/softfloat/source/f64_roundToInt.c new file mode 100644 index 000000000..3129a5552 --- /dev/null +++ b/src/common/softfloat/source/f64_roundToInt.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_roundToInt( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t uiZ, lastBitMask, roundBitsMask; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0x3FE ) { + if ( ! (uiA & UINT64_C( 0x7FFFFFFFFFFFFFFF )) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF64UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF64UI( uiA ) ) break; + /* fall through */ + case softfloat_round_near_maxMag: + if ( exp == 0x3FE ) uiZ |= packToF64UI( 0, 0x3FF, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF64UI( 1, 0x3FF, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF64UI( 0, 0x3FF, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x433 <= exp ) { + if ( (exp == 0x7FF) && fracF64UI( uiA ) ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast64_t) 1<<(0x433 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF64UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_sqrt.c b/src/common/softfloat/source/f64_sqrt.c new file mode 100644 index 000000000..9a06cfad3 --- /dev/null +++ b/src/common/softfloat/source/f64_sqrt.c @@ -0,0 +1,133 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_sqrt( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t sig32A, recipSqrt32, sig32Z; + uint_fast64_t rem; + uint32_t q; + uint_fast64_t sigZ, shiftedSigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + | (`sig32Z' is guaranteed to be a lower bound on the square root of + | `sig32A', which makes `sig32Z' also a lower bound on the square root of + | `sigA'.) + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x3FF)>>1) + 0x3FE; + expA &= 1; + sigA |= UINT64_C( 0x0010000000000000 ); + sig32A = sigA>>21; + recipSqrt32 = softfloat_approxRecipSqrt32_1( expA, sig32A ); + sig32Z = ((uint_fast64_t) sig32A * recipSqrt32)>>32; + if ( expA ) { + sigA <<= 8; + sig32Z >>= 1; + } else { + sigA <<= 9; + } + rem = sigA - (uint_fast64_t) sig32Z * sig32Z; + q = ((uint32_t) (rem>>2) * (uint_fast64_t) recipSqrt32)>>32; + sigZ = ((uint_fast64_t) sig32Z<<32 | 1<<5) + ((uint_fast64_t) q<<3); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (sigZ & 0x1FF) < 0x22 ) { + sigZ &= ~(uint_fast64_t) 0x3F; + shiftedSigZ = sigZ>>6; + rem = (sigA<<52) - shiftedSigZ * shiftedSigZ; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + --sigZ; + } else { + if ( rem ) sigZ |= 1; + } + } + return softfloat_roundPackToF64( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_sub.c b/src/common/softfloat/source/f64_sub.c new file mode 100644 index 000000000..14ea57575 --- /dev/null +++ b/src/common/softfloat/source/f64_sub.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_sub( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + if ( signA == signB ) { + return softfloat_subMagsF64( uiA, uiB, signA ); + } else { + return softfloat_addMagsF64( uiA, uiB, signA ); + } + +} + diff --git a/src/common/softfloat/source/f64_to_f32.c b/src/common/softfloat/source/f64_to_f32.c new file mode 100644 index 000000000..99b13dda4 --- /dev/null +++ b/src/common/softfloat/source/f64_to_f32.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f64_to_f32( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t frac; + struct commonNaN commonNaN; + uint_fast32_t uiZ, frac32; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + frac = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FF ) { + if ( frac ) { + softfloat_f64UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF32UI( &commonNaN ); + } else { + uiZ = packToF32UI( sign, 0xFF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac32 = softfloat_shortShiftRightJam64( frac, 22 ); + if ( ! (exp | frac32) ) { + uiZ = packToF32UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF32( sign, exp - 0x381, frac32 | 0x40000000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/f64_to_i32.c b/src/common/softfloat/source/f64_to_i32.c new file mode 100644 index 000000000..8712c0ac5 --- /dev/null +++ b/src/common/softfloat/source/f64_to_i32.c @@ -0,0 +1,82 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f64_to_i32( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0x7FF) && sig ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x427 - exp; + if ( 0 < shiftDist ) sig = softfloat_shiftRightJam64( sig, shiftDist ); + return softfloat_roundToI32( sign, sig, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f64_to_i32_r_minMag.c b/src/common/softfloat/source/f64_to_i32_r_minMag.c new file mode 100644 index 000000000..b7e1e0305 --- /dev/null +++ b/src/common/softfloat/source/f64_to_i32_r_minMag.c @@ -0,0 +1,96 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f64_to_i32_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( shiftDist < 22 ) { + if ( + sign && (exp == 0x41E) && (sig < UINT64_C( 0x0000000000200000 )) + ) { + if ( exact && sig ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return -0x7FFFFFFF - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig>>shiftDist; + if ( exact && ((uint_fast64_t) (uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f64_to_i64( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + struct uint64_extra sigExtra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sigExtra.v = sig<<-shiftDist; + sigExtra.extra = 0; + } else { + sigExtra = softfloat_shiftRightJam64Extra( sig, 0, shiftDist ); + } + return + softfloat_roundToI64( + sign, sigExtra.v, sigExtra.extra, roundingMode, exact ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && fracF64UI( uiA ) ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/f64_to_i64_r_minMag.c b/src/common/softfloat/source/f64_to_i64_r_minMag.c new file mode 100644 index 000000000..3822606db --- /dev/null +++ b/src/common/softfloat/source/f64_to_i64_r_minMag.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f64_to_i64_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -10 ) { + if ( uiA == packToF64UI( 1, 0x43E, 0 ) ) { + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig<<-shiftDist; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig>>shiftDist; + if ( exact && (absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f64_to_ui32( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0x7FF) && sig ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x427 - exp; + if ( 0 < shiftDist ) sig = softfloat_shiftRightJam64( sig, shiftDist ); + return softfloat_roundToUI32( sign, sig, roundingMode, exact ); + +} + diff --git a/src/common/softfloat/source/f64_to_ui32_r_minMag.c b/src/common/softfloat/source/f64_to_ui32_r_minMag.c new file mode 100644 index 000000000..11f0b0501 --- /dev/null +++ b/src/common/softfloat/source/f64_to_ui32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f64_to_ui32_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( sign || (shiftDist < 21) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + z = sig>>shiftDist; + if ( exact && ((uint_fast64_t) z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f64_to_ui64( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + struct uint64_extra sigExtra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sigExtra.v = sig<<-shiftDist; + sigExtra.extra = 0; + } else { + sigExtra = softfloat_shiftRightJam64Extra( sig, 0, shiftDist ); + } + return + softfloat_roundToUI64( + sign, sigExtra.v, sigExtra.extra, roundingMode, exact ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && fracF64UI( uiA ) ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/f64_to_ui64_r_minMag.c b/src/common/softfloat/source/f64_to_ui64_r_minMag.c new file mode 100644 index 000000000..25918c486 --- /dev/null +++ b/src/common/softfloat/source/f64_to_ui64_r_minMag.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f64_to_ui64_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( sign ) goto invalid; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + z = (sig | UINT64_C( 0x0010000000000000 ))<<-shiftDist; + } else { + sig |= UINT64_C( 0x0010000000000000 ); + z = sig>>shiftDist; + if ( exact && (uint64_t) (sig<<(-shiftDist & 63)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/i32_to_f32.c b/src/common/softfloat/source/i32_to_f32.c new file mode 100644 index 000000000..b1aedbacf --- /dev/null +++ b/src/common/softfloat/source/i32_to_f32.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t i32_to_f32( int32_t a ) +{ + bool sign; + union ui32_f32 uZ; + uint_fast32_t absA; + + sign = (a < 0); + if ( ! (a & 0x7FFFFFFF) ) { + uZ.ui = sign ? packToF32UI( 1, 0x9E, 0 ) : 0; + return uZ.f; + } + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + return softfloat_normRoundPackToF32( sign, 0x9C, absA ); + +} + diff --git a/src/common/softfloat/source/i32_to_f64.c b/src/common/softfloat/source/i32_to_f64.c new file mode 100644 index 000000000..d3901eb4a --- /dev/null +++ b/src/common/softfloat/source/i32_to_f64.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t i32_to_f64( int32_t a ) +{ + uint_fast64_t uiZ; + bool sign; + uint_fast32_t absA; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + if ( ! a ) { + uiZ = 0; + } else { + sign = (a < 0); + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + shiftDist = softfloat_countLeadingZeros32( absA ) + 21; + uiZ = + packToF64UI( + sign, 0x432 - shiftDist, (uint_fast64_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t i64_to_f32( int64_t a ) +{ + bool sign; + uint_fast64_t absA; + int_fast8_t shiftDist; + union ui32_f32 u; + uint_fast32_t sig; + + sign = (a < 0); + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + shiftDist = softfloat_countLeadingZeros64( absA ) - 40; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF32UI( + sign, 0x95 - shiftDist, (uint_fast32_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t i64_to_f64( int64_t a ) +{ + bool sign; + union ui64_f64 uZ; + uint_fast64_t absA; + + sign = (a < 0); + if ( ! (a & UINT64_C( 0x7FFFFFFFFFFFFFFF )) ) { + uZ.ui = sign ? packToF64UI( 1, 0x43E, 0 ) : 0; + return uZ.f; + } + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + return softfloat_normRoundPackToF64( sign, 0x43C, absA ); + +} + diff --git a/src/common/softfloat/source/include/internals.h b/src/common/softfloat/source/include/internals.h new file mode 100644 index 000000000..af1580045 --- /dev/null +++ b/src/common/softfloat/source/include/internals.h @@ -0,0 +1,144 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef internals_h +#define internals_h 1 + +#include +#include +#include "primitives.h" +#include "softfloat_types.h" + +union ui16_f16 { uint16_t ui; float16_t f; }; +union ui32_f32 { uint32_t ui; float32_t f; }; +union ui64_f64 { uint64_t ui; float64_t f; }; + +union extF80M_extF80 { struct extFloat80M fM; extFloat80_t f; }; +union ui128_f128 { struct uint128 ui; float128_t f; }; + +enum { + softfloat_mulAdd_subC = 1, + softfloat_mulAdd_subProd = 2 +}; + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_roundToUI32( bool, uint_fast64_t, uint_fast8_t, bool ); + +uint_fast64_t + softfloat_roundToUI64( + bool, uint_fast64_t, uint_fast64_t, uint_fast8_t, bool ); + +int_fast32_t softfloat_roundToI32( bool, uint_fast64_t, uint_fast8_t, bool ); + +int_fast64_t + softfloat_roundToI64( + bool, uint_fast64_t, uint_fast64_t, uint_fast8_t, bool ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF16UI( a ) ((bool) ((uint16_t) (a)>>15)) +#define expF16UI( a ) ((int_fast8_t) ((a)>>10) & 0x1F) +#define fracF16UI( a ) ((a) & 0x03FF) +#define packToF16UI( sign, exp, sig ) (((uint16_t) (sign)<<15) + ((uint16_t) (exp)<<10) + (sig)) + +#define isNaNF16UI( a ) (((~(a) & 0x7C00) == 0) && ((a) & 0x03FF)) + +float16_t softfloat_roundPackToF16( bool, int_fast16_t, uint_fast16_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF32UI( a ) ((bool) ((uint32_t) (a)>>31)) +#define expF32UI( a ) ((int_fast16_t) ((a)>>23) & 0xFF) +#define fracF32UI( a ) ((a) & 0x007FFFFF) +#define packToF32UI( sign, exp, sig ) (((uint32_t) (sign)<<31) + ((uint32_t) (exp)<<23) + (sig)) + +#define isNaNF32UI( a ) (((~(a) & 0x7F800000) == 0) && ((a) & 0x007FFFFF)) + +struct exp16_sig32 { int_fast16_t exp; uint_fast32_t sig; }; +struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t ); + +float32_t softfloat_roundPackToF32( bool, int_fast16_t, uint_fast32_t ); +float32_t softfloat_normRoundPackToF32( bool, int_fast16_t, uint_fast32_t ); + +float32_t softfloat_addMagsF32( uint_fast32_t, uint_fast32_t ); +float32_t softfloat_subMagsF32( uint_fast32_t, uint_fast32_t ); +float32_t + softfloat_mulAddF32( + uint_fast32_t, uint_fast32_t, uint_fast32_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF64UI( a ) ((bool) ((uint64_t) (a)>>63)) +#define expF64UI( a ) ((int_fast16_t) ((a)>>52) & 0x7FF) +#define fracF64UI( a ) ((a) & UINT64_C( 0x000FFFFFFFFFFFFF )) +#define packToF64UI( sign, exp, sig ) ((uint64_t) (((uint_fast64_t) (sign)<<63) + ((uint_fast64_t) (exp)<<52) + (sig))) + +#define isNaNF64UI( a ) (((~(a) & UINT64_C( 0x7FF0000000000000 )) == 0) && ((a) & UINT64_C( 0x000FFFFFFFFFFFFF ))) + +struct exp16_sig64 { int_fast16_t exp; uint_fast64_t sig; }; +struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t ); + +float64_t softfloat_roundPackToF64( bool, int_fast16_t, uint_fast64_t ); +float64_t softfloat_normRoundPackToF64( bool, int_fast16_t, uint_fast64_t ); + +float64_t softfloat_addMagsF64( uint_fast64_t, uint_fast64_t, bool ); +float64_t softfloat_subMagsF64( uint_fast64_t, uint_fast64_t, bool ); +float64_t + softfloat_mulAddF64( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signExtF80UI64( a64 ) ((bool) ((uint16_t) (a64)>>15)) +#define expExtF80UI64( a64 ) ((a64) & 0x7FFF) +#define packToExtF80UI64( sign, exp ) ((uint_fast16_t) (sign)<<15 | (exp)) + +#define isNaNExtF80UI( a64, a0 ) ((((a64) & 0x7FFF) == 0x7FFF) && ((a0) & UINT64_C( 0x7FFFFFFFFFFFFFFF ))) + + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF128UI64( a64 ) ((bool) ((uint64_t) (a64)>>63)) +#define expF128UI64( a64 ) ((int_fast32_t) ((a64)>>48) & 0x7FFF) +#define fracF128UI64( a64 ) ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF )) +#define packToF128UI64( sign, exp, sig64 ) (((uint_fast64_t) (sign)<<63) + ((uint_fast64_t) (exp)<<48) + (sig64)) + +#define isNaNF128UI( a64, a0 ) (((~(a64) & UINT64_C( 0x7FFF000000000000 )) == 0) && (a0 || ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF )))) + + +#endif + diff --git a/src/common/softfloat/source/include/primitiveTypes.h b/src/common/softfloat/source/include/primitiveTypes.h new file mode 100644 index 000000000..781d82fcd --- /dev/null +++ b/src/common/softfloat/source/include/primitiveTypes.h @@ -0,0 +1,83 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef primitiveTypes_h +#define primitiveTypes_h 1 + +#include + + +#ifdef LITTLEENDIAN +struct uint128 { uint64_t v0, v64; }; +struct uint64_extra { uint64_t extra, v; }; +struct uint128_extra { uint64_t extra; struct uint128 v; }; +#else +struct uint128 { uint64_t v64, v0; }; +struct uint64_extra { uint64_t v, extra; }; +struct uint128_extra { struct uint128 v; uint64_t extra; }; +#endif + + +/*---------------------------------------------------------------------------- +| These macros are used to isolate the differences in word order between big- +| endian and little-endian platforms. +*----------------------------------------------------------------------------*/ +#ifdef LITTLEENDIAN +#define wordIncr 1 +#define indexWord( total, n ) (n) +#define indexWordHi( total ) ((total) - 1) +#define indexWordLo( total ) 0 +#define indexMultiword( total, m, n ) (n) +#define indexMultiwordHi( total, n ) ((total) - (n)) +#define indexMultiwordLo( total, n ) 0 +#define indexMultiwordHiBut( total, n ) (n) +#define indexMultiwordLoBut( total, n ) 0 +#define INIT_UINTM4( v3, v2, v1, v0 ) { v0, v1, v2, v3 } +#else +#define wordIncr -1 +#define indexWord( total, n ) ((total) - 1 - (n)) +#define indexWordHi( total ) 0 +#define indexWordLo( total ) ((total) - 1) +#define indexMultiword( total, m, n ) ((total) - 1 - (m)) +#define indexMultiwordHi( total, n ) 0 +#define indexMultiwordLo( total, n ) ((total) - (n)) +#define indexMultiwordHiBut( total, n ) 0 +#define indexMultiwordLoBut( total, n ) (n) +#define INIT_UINTM4( v3, v2, v1, v0 ) { v3, v2, v1, v0 } +#endif + +#endif + diff --git a/src/common/softfloat/source/include/primitives.h b/src/common/softfloat/source/include/primitives.h new file mode 100644 index 000000000..a0fcfd824 --- /dev/null +++ b/src/common/softfloat/source/include/primitives.h @@ -0,0 +1,282 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef primitives_h +#define primitives_h 1 + +#include +#include +#include "primitiveTypes.h" + +/*---------------------------------------------------------------------------- +| Shifts 'a' right by the number of bits given in 'dist', which must be in +| the range 1 to 63. If any nonzero bits are shifted off, they are "jammed" +| into the least-significant bit of the shifted value by setting the least- +| significant bit to 1. This shifted-and-jammed value is returned. +*----------------------------------------------------------------------------*/ +INLINE +uint64_t softfloat_shortShiftRightJam64( uint64_t a, uint_fast8_t dist ) + { return a>>dist | ((a & (((uint_fast64_t) 1<>dist | ((uint32_t) (a<<(-dist & 31)) != 0) : (a != 0); +} + +/*---------------------------------------------------------------------------- +| Shifts 'a' right by the number of bits given in 'dist', which must not +| be zero. If any nonzero bits are shifted off, they are "jammed" into the +| least-significant bit of the shifted value by setting the least-significant +| bit to 1. This shifted-and-jammed value is returned. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than 64, the result will be either 0 or 1, depending on whether 'a' +| is zero or nonzero. +*----------------------------------------------------------------------------*/ +INLINE uint64_t softfloat_shiftRightJam64( uint64_t a, uint_fast32_t dist ) +{ + return + (dist < 63) ? a>>dist | ((uint64_t) (a<<(-dist & 63)) != 0) : (a != 0); +} + +/*---------------------------------------------------------------------------- +| A constant table that translates an 8-bit unsigned integer (the array index) +| into the number of leading 0 bits before the most-significant 1 of that +| integer. For integer zero (index 0), the corresponding table element is 8. +*----------------------------------------------------------------------------*/ +extern const uint_least8_t softfloat_countLeadingZeros8[256]; + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 32 is returned. +*----------------------------------------------------------------------------*/ +INLINE uint_fast8_t softfloat_countLeadingZeros32( uint32_t a ) +{ + uint_fast8_t count = 0; + if ( a < 0x10000 ) { + count = 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + count += 8; + a <<= 8; + } + count += softfloat_countLeadingZeros8[a>>24]; + return count; +} + +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 64 is returned. +*----------------------------------------------------------------------------*/ +uint_fast8_t softfloat_countLeadingZeros64( uint64_t a ); + +extern const uint16_t softfloat_approxRecip_1k0s[16]; +extern const uint16_t softfloat_approxRecip_1k1s[16]; + +/*---------------------------------------------------------------------------- +| Returns an approximation to the reciprocal of the number represented by 'a', +| where 'a' is interpreted as an unsigned fixed-point number with one integer +| bit and 31 fraction bits. The 'a' input must be "normalized", meaning that +| its most-significant bit (bit 31) must be 1. Thus, if A is the value of +| the fixed-point interpretation of 'a', then 1 <= A < 2. The returned value +| is interpreted as a pure unsigned fraction, having no integer bits and 32 +| fraction bits. The approximation returned is never greater than the true +| reciprocal 1/A, and it differs from the true reciprocal by at most 2.006 ulp +| (units in the last place). +*----------------------------------------------------------------------------*/ +#ifdef SOFTFLOAT_FAST_DIV64TO32 +#define softfloat_approxRecip32_1( a ) ((uint32_t) (UINT64_C( 0x7FFFFFFFFFFFFFFF ) / (uint32_t) (a))) +#endif + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[16]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[16]; + +/*---------------------------------------------------------------------------- +| Returns an approximation to the reciprocal of the square root of the number +| represented by 'a', where 'a' is interpreted as an unsigned fixed-point +| number either with one integer bit and 31 fraction bits or with two integer +| bits and 30 fraction bits. The format of 'a' is determined by 'oddExpA', +| which must be either 0 or 1. If 'oddExpA' is 1, 'a' is interpreted as +| having one integer bit, and if 'oddExpA' is 0, 'a' is interpreted as having +| two integer bits. The 'a' input must be "normalized", meaning that its +| most-significant bit (bit 31) must be 1. Thus, if A is the value of the +| fixed-point interpretation of 'a', it follows that 1 <= A < 2 when 'oddExpA' +| is 1, and 2 <= A < 4 when 'oddExpA' is 0. +| The returned value is interpreted as a pure unsigned fraction, having +| no integer bits and 32 fraction bits. The approximation returned is never +| greater than the true reciprocal 1/sqrt(A), and it differs from the true +| reciprocal by at most 2.06 ulp (units in the last place). The approximation +| returned is also always within the range 0.5 to 1; thus, the most- +| significant bit of the result is always set. +*----------------------------------------------------------------------------*/ +uint32_t softfloat_approxRecipSqrt32_1( unsigned int oddExpA, uint32_t a ); + + +/*---------------------------------------------------------------------------- +| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is +| defined. +*----------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' left by the +| number of bits given in 'dist', which must be in the range 1 to 63. +*----------------------------------------------------------------------------*/ +INLINE +struct uint128 + softfloat_shortShiftLeft128( uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + struct uint128 z; + z.v64 = a64<>(-dist & 63); + z.v0 = a0<>dist; + z.v0 = + a64<<(negDist & 63) | a0>>dist + | ((uint64_t) (a0<<(negDist & 63)) != 0); + return z; +} + +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a' and 'extra' right by 64 +| _plus_ the number of bits given in 'dist', which must not be zero. This +| shifted value is at most 64 nonzero bits and is returned in the 'v' field +| of the 'struct uint64_extra' result. The 64-bit 'extra' field of the result +| contains a value formed as follows from the bits that were shifted off: The +| _last_ bit shifted off is the most-significant bit of the 'extra' field, and +| the other 63 bits of the 'extra' field are all zero if and only if _all_but_ +| _the_last_ bits shifted off were all zero. +| (This function makes more sense if 'a' and 'extra' are considered to form +| an unsigned fixed-point number with binary point between 'a' and 'extra'. +| This fixed-point value is shifted right by the number of bits given in +| 'dist', and the integer part of this shifted value is returned in the 'v' +| field of the result. The fractional part of the shifted value is modified +| as described above and returned in the 'extra' field of the result.) +*----------------------------------------------------------------------------*/ +INLINE +struct uint64_extra + softfloat_shiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast32_t dist ) +{ + struct uint64_extra z; + if ( dist < 64 ) { + z.v = a>>dist; + z.extra = a<<(-dist & 63); + } else { + z.v = 0; + z.extra = (dist == 64) ? a : (a != 0); + } + z.extra |= (extra != 0); + return z; +} + +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' right by the +| number of bits given in 'dist', which must not be zero. If any nonzero bits +| are shifted off, they are "jammed" into the least-significant bit of the +| shifted value by setting the least-significant bit to 1. This shifted-and- +| jammed value is returned. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than 128, the result will be either 0 or 1, depending on whether the +| original 128 bits are all zeros. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_shiftRightJam128( uint64_t a64, uint64_t a0, uint_fast32_t dist ); + +/*---------------------------------------------------------------------------- +| Returns the sum of the 128-bit integer formed by concatenating 'a64' and +| 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'. The +| addition is modulo 2^128, so any carry out is lost. +*----------------------------------------------------------------------------*/ +INLINE +struct uint128 + softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 + b0; + z.v64 = a64 + b64 + (z.v0 < a0); + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the difference of the 128-bit integer formed by concatenating 'a64' +| and 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'. +| The subtraction is modulo 2^128, so any borrow out (carry out) is lost. +*----------------------------------------------------------------------------*/ +INLINE +struct uint128 + softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 - b0; + z.v64 = a64 - b64; + z.v64 -= (a0 < b0); + return z; +} + +/*---------------------------------------------------------------------------- +| Returns the 128-bit product of 'a' and 'b'. +*----------------------------------------------------------------------------*/ +struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ); + + +#endif + diff --git a/src/common/softfloat/source/include/softfloat.h b/src/common/softfloat/source/include/softfloat.h new file mode 100644 index 000000000..9e28a575c --- /dev/null +++ b/src/common/softfloat/source/include/softfloat.h @@ -0,0 +1,167 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + + +/*============================================================================ +| Note: If SoftFloat is made available as a general library for programs to +| use, it is strongly recommended that a platform-specific version of this +| header, "softfloat.h", be created that folds in "softfloat_types.h" and that +| eliminates all dependencies on compile-time macros. +*============================================================================*/ + + +#ifndef softfloat_h +#define softfloat_h 1 + +#include +#include +#include "softfloat_types.h" + +#ifndef THREAD_LOCAL +#define THREAD_LOCAL +#endif + +/*---------------------------------------------------------------------------- +| Software floating-point underflow tininess-detection mode. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_detectTininess; +enum { + softfloat_tininess_beforeRounding = 0, + softfloat_tininess_afterRounding = 1 +}; + +/*---------------------------------------------------------------------------- +| Software floating-point rounding mode. (Mode "odd" is supported only if +| SoftFloat is compiled with macro 'SOFTFLOAT_ROUND_ODD' defined.) +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_roundingMode; +enum { + softfloat_round_near_even = 0, + softfloat_round_minMag = 1, + softfloat_round_min = 2, + softfloat_round_max = 3, + softfloat_round_near_maxMag = 4, + softfloat_round_odd = 5 +}; + +/*---------------------------------------------------------------------------- +| Software floating-point exception flags. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_exceptionFlags; +enum { + softfloat_flag_inexact = 1, + softfloat_flag_underflow = 2, + softfloat_flag_overflow = 4, + softfloat_flag_infinite = 8, + softfloat_flag_invalid = 16 +}; + +/*---------------------------------------------------------------------------- +| Routine to raise any or all of the software floating-point exception flags. +*----------------------------------------------------------------------------*/ +void softfloat_raiseFlags( uint_fast8_t ); + +/*---------------------------------------------------------------------------- +| Integer-to-floating-point conversion routines. +*----------------------------------------------------------------------------*/ +float32_t ui32_to_f32( uint32_t ); +float64_t ui32_to_f64( uint32_t ); +float32_t ui64_to_f32( uint64_t ); +float64_t ui64_to_f64( uint64_t ); +float32_t i32_to_f32( int32_t ); +float64_t i32_to_f64( int32_t ); +float32_t i64_to_f32( int64_t ); +float64_t i64_to_f64( int64_t ); + +/*---------------------------------------------------------------------------- +| 32-bit (single-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast32_t f32_to_ui32( float32_t, uint_fast8_t, bool ); +uint_fast64_t f32_to_ui64( float32_t, uint_fast8_t, bool ); +int_fast32_t f32_to_i32( float32_t, uint_fast8_t, bool ); +int_fast64_t f32_to_i64( float32_t, uint_fast8_t, bool ); +uint_fast32_t f32_to_ui32_r_minMag( float32_t, bool ); +uint_fast64_t f32_to_ui64_r_minMag( float32_t, bool ); +int_fast32_t f32_to_i32_r_minMag( float32_t, bool ); +int_fast64_t f32_to_i64_r_minMag( float32_t, bool ); +float16_t f32_to_f16( float32_t ); +float64_t f32_to_f64( float32_t ); +float32_t f32_roundToInt( float32_t, uint_fast8_t, bool ); +float32_t f32_add( float32_t, float32_t ); +float32_t f32_sub( float32_t, float32_t ); +float32_t f32_mul( float32_t, float32_t ); +float32_t f32_mulAdd( float32_t, float32_t, float32_t ); +float32_t f32_div( float32_t, float32_t ); +float32_t f32_rem( float32_t, float32_t ); +float32_t f32_sqrt( float32_t ); +bool f32_eq( float32_t, float32_t ); +bool f32_le( float32_t, float32_t ); +bool f32_lt( float32_t, float32_t ); +bool f32_eq_signaling( float32_t, float32_t ); +bool f32_le_quiet( float32_t, float32_t ); +bool f32_lt_quiet( float32_t, float32_t ); +bool f32_isSignalingNaN( float32_t ); + +/*---------------------------------------------------------------------------- +| 64-bit (double-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast32_t f64_to_ui32( float64_t, uint_fast8_t, bool ); +uint_fast64_t f64_to_ui64( float64_t, uint_fast8_t, bool ); +int_fast32_t f64_to_i32( float64_t, uint_fast8_t, bool ); +int_fast64_t f64_to_i64( float64_t, uint_fast8_t, bool ); +uint_fast32_t f64_to_ui32_r_minMag( float64_t, bool ); +uint_fast64_t f64_to_ui64_r_minMag( float64_t, bool ); +int_fast32_t f64_to_i32_r_minMag( float64_t, bool ); +int_fast64_t f64_to_i64_r_minMag( float64_t, bool ); +float32_t f64_to_f32( float64_t ); +float64_t f64_roundToInt( float64_t, uint_fast8_t, bool ); +float64_t f64_add( float64_t, float64_t ); +float64_t f64_sub( float64_t, float64_t ); +float64_t f64_mul( float64_t, float64_t ); +float64_t f64_mulAdd( float64_t, float64_t, float64_t ); +float64_t f64_div( float64_t, float64_t ); +float64_t f64_rem( float64_t, float64_t ); +float64_t f64_sqrt( float64_t ); +bool f64_eq( float64_t, float64_t ); +bool f64_le( float64_t, float64_t ); +bool f64_lt( float64_t, float64_t ); +bool f64_eq_signaling( float64_t, float64_t ); +bool f64_le_quiet( float64_t, float64_t ); +bool f64_lt_quiet( float64_t, float64_t ); +bool f64_isSignalingNaN( float64_t ); + +#endif + diff --git a/src/common/softfloat/source/include/softfloat_types.h b/src/common/softfloat/source/include/softfloat_types.h new file mode 100644 index 000000000..af1888f9b --- /dev/null +++ b/src/common/softfloat/source/include/softfloat_types.h @@ -0,0 +1,81 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef softfloat_types_h +#define softfloat_types_h 1 + +#include + +/*---------------------------------------------------------------------------- +| Types used to pass 16-bit, 32-bit, 64-bit, and 128-bit floating-point +| arguments and results to/from functions. These types must be exactly +| 16 bits, 32 bits, 64 bits, and 128 bits in size, respectively. Where a +| platform has "native" support for IEEE-Standard floating-point formats, +| the types below may, if desired, be defined as aliases for the native types +| (typically 'float' and 'double', and possibly 'long double'). +*----------------------------------------------------------------------------*/ +typedef struct { uint16_t v; } float16_t; +typedef struct { uint32_t v; } float32_t; +typedef struct { uint64_t v; } float64_t; +typedef struct { uint64_t v[2]; } float128_t; + +/*---------------------------------------------------------------------------- +| The format of an 80-bit extended floating-point number in memory. This +| structure must contain a 16-bit field named 'signExp' and a 64-bit field +| named 'signif'. +*----------------------------------------------------------------------------*/ +#ifdef LITTLEENDIAN +struct extFloat80M { uint64_t signif; uint16_t signExp; }; +#else +struct extFloat80M { uint16_t signExp; uint64_t signif; }; +#endif + +/*---------------------------------------------------------------------------- +| The type used to pass 80-bit extended floating-point arguments and +| results to/from functions. This type must have size identical to +| 'struct extFloat80M'. Type 'extFloat80_t' can be defined as an alias for +| 'struct extFloat80M'. Alternatively, if a platform has "native" support +| for IEEE-Standard 80-bit extended floating-point, it may be possible, +| if desired, to define 'extFloat80_t' as an alias for the native type +| (presumably either 'long double' or a nonstandard compiler-intrinsic type). +| In that case, the 'signif' and 'signExp' fields of 'struct extFloat80M' +| must align exactly with the locations in memory of the sign, exponent, and +| significand of the native type. +*----------------------------------------------------------------------------*/ +typedef struct extFloat80M extFloat80_t; + +#endif + diff --git a/src/common/softfloat/source/s_addMagsF32.c b/src/common/softfloat/source/s_addMagsF32.c new file mode 100644 index 000000000..ba647814d --- /dev/null +++ b/src/common/softfloat/source/s_addMagsF32.c @@ -0,0 +1,126 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float32_t softfloat_addMagsF32( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + bool signZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + signZ = signF32UI( uiA ); + expZ = expA; + sigZ = 0x01000000 + sigA + sigB; + if ( ! (sigZ & 1) && (expZ < 0xFE) ) { + uiZ = packToF32UI( signZ, expZ, sigZ>>1 ); + goto uiZ; + } + sigZ <<= 6; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + signZ = signF32UI( uiA ); + sigA <<= 6; + sigB <<= 6; + if ( expDiff < 0 ) { + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + } + expZ = expB; + sigA += expA ? 0x20000000 : sigA; + sigA = softfloat_shiftRightJam32( sigA, -expDiff ); + } else { + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigB += expB ? 0x20000000 : sigB; + sigB = softfloat_shiftRightJam32( sigB, expDiff ); + } + sigZ = 0x20000000 + sigA + sigB; + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/s_addMagsF64.c b/src/common/softfloat/source/s_addMagsF64.c new file mode 100644 index 000000000..63e1afe9d --- /dev/null +++ b/src/common/softfloat/source/s_addMagsF64.c @@ -0,0 +1,128 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float64_t + softfloat_addMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigZ = UINT64_C( 0x0020000000000000 ) + sigA + sigB; + sigZ <<= 9; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sigA <<= 9; + sigB <<= 9; + if ( expDiff < 0 ) { + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + } + expZ = expB; + if ( expA ) { + sigA += UINT64_C( 0x2000000000000000 ); + } else { + sigA <<= 1; + } + sigA = softfloat_shiftRightJam64( sigA, -expDiff ); + } else { + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + if ( expB ) { + sigB += UINT64_C( 0x2000000000000000 ); + } else { + sigB <<= 1; + } + sigB = softfloat_shiftRightJam64( sigB, expDiff ); + } + sigZ = UINT64_C( 0x2000000000000000 ) + sigA + sigB; + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/s_approxRecipSqrt32_1.c b/src/common/softfloat/source/s_approxRecipSqrt32_1.c new file mode 100644 index 000000000..2695f7fea --- /dev/null +++ b/src/common/softfloat/source/s_approxRecipSqrt32_1.c @@ -0,0 +1,74 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +#ifndef softfloat_approxRecipSqrt32_1 + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[]; + +uint32_t softfloat_approxRecipSqrt32_1( unsigned int oddExpA, uint32_t a ) +{ + int index; + uint16_t eps, r0; + uint_fast32_t ESqrR0; + uint32_t sigma0; + uint_fast32_t r; + uint32_t sqrSigma0; + + index = (a>>27 & 0xE) + oddExpA; + eps = (uint16_t) (a>>12); + r0 = softfloat_approxRecipSqrt_1k0s[index] + - ((softfloat_approxRecipSqrt_1k1s[index] * (uint_fast32_t) eps) + >>20); + ESqrR0 = (uint_fast32_t) r0 * r0; + if ( ! oddExpA ) ESqrR0 <<= 1; + sigma0 = ~(uint_fast32_t) (((uint32_t) ESqrR0 * (uint_fast64_t) a)>>23); + r = ((uint_fast32_t) r0<<16) + ((r0 * (uint_fast64_t) sigma0)>>25); + sqrSigma0 = ((uint_fast64_t) sigma0 * sigma0)>>32; + r += ((uint32_t) ((r>>1) + (r>>3) - ((uint_fast32_t) r0<<14)) + * (uint_fast64_t) sqrSigma0) + >>48; + if ( ! (r & 0x80000000) ) r = 0x80000000; + return r; + +} + +#endif + diff --git a/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c b/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c new file mode 100644 index 000000000..a60cf8255 --- /dev/null +++ b/src/common/softfloat/source/s_approxRecipSqrt_1Ks.c @@ -0,0 +1,49 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint16_t softfloat_approxRecipSqrt_1k0s[16] = { + 0xB4C9, 0xFFAB, 0xAA7D, 0xF11C, 0xA1C5, 0xE4C7, 0x9A43, 0xDA29, + 0x93B5, 0xD0E5, 0x8DED, 0xC8B7, 0x88C6, 0xC16D, 0x8424, 0xBAE1 +}; +const uint16_t softfloat_approxRecipSqrt_1k1s[16] = { + 0xA5A5, 0xEA42, 0x8C21, 0xC62D, 0x788F, 0xAA7F, 0x6928, 0x94B6, + 0x5CC7, 0x8335, 0x52A6, 0x74E2, 0x4A3E, 0x68FE, 0x432B, 0x5EFD +}; + diff --git a/src/common/softfloat/source/s_countLeadingZeros64.c b/src/common/softfloat/source/s_countLeadingZeros64.c new file mode 100644 index 000000000..00457418b --- /dev/null +++ b/src/common/softfloat/source/s_countLeadingZeros64.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_countLeadingZeros64 + +#define softfloat_countLeadingZeros64 softfloat_countLeadingZeros64 +#include "primitives.h" + +uint_fast8_t softfloat_countLeadingZeros64( uint64_t a ) +{ + uint_fast8_t count; + uint32_t a32; + + count = 0; + a32 = a>>32; + if ( ! a32 ) { + count = 32; + a32 = a; + } + /*------------------------------------------------------------------------ + | From here, result is current count + count leading zeros of `a32'. + *------------------------------------------------------------------------*/ + if ( a32 < 0x10000 ) { + count += 16; + a32 <<= 16; + } + if ( a32 < 0x1000000 ) { + count += 8; + a32 <<= 8; + } + count += softfloat_countLeadingZeros8[a32>>24]; + return count; + +} + +#endif + diff --git a/src/common/softfloat/source/s_countLeadingZeros8.c b/src/common/softfloat/source/s_countLeadingZeros8.c new file mode 100644 index 000000000..1158d01c3 --- /dev/null +++ b/src/common/softfloat/source/s_countLeadingZeros8.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint_least8_t softfloat_countLeadingZeros8[256] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + diff --git a/src/common/softfloat/source/s_mul64To128.c b/src/common/softfloat/source/s_mul64To128.c new file mode 100644 index 000000000..3b0fb9653 --- /dev/null +++ b/src/common/softfloat/source/s_mul64To128.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" +#include "primitives.h" + +#ifndef softfloat_mul64To128 + +struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ) +{ + uint32_t a32, a0, b32, b0; + struct uint128 z; + uint64_t mid1, mid; + + a32 = a>>32; + a0 = a; + b32 = b>>32; + b0 = b; + z.v0 = (uint_fast64_t) a0 * b0; + mid1 = (uint_fast64_t) a32 * b0; + mid = mid1 + (uint_fast64_t) a0 * b32; + z.v64 = (uint_fast64_t) a32 * b32; + z.v64 += (uint_fast64_t) (mid < mid1)<<32 | mid>>32; + mid <<= 32; + z.v0 += mid; + z.v64 += (z.v0 < mid); + return z; + +} + +#endif + diff --git a/src/common/softfloat/source/s_mulAddF32.c b/src/common/softfloat/source/s_mulAddF32.c new file mode 100644 index 000000000..d163ea02e --- /dev/null +++ b/src/common/softfloat/source/s_mulAddF32.c @@ -0,0 +1,224 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t + softfloat_mulAddF32( + uint_fast32_t uiA, uint_fast32_t uiB, uint_fast32_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signC; + int_fast16_t expC; + uint_fast32_t sigC; + bool signProd; + uint_fast32_t magBits, uiZ; + struct exp16_sig32 normExpSig; + int_fast16_t expProd; + uint_fast64_t sigProd; + bool signZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + int_fast16_t expDiff; + uint_fast64_t sig64Z, sig64C; + int_fast8_t shiftDist; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signC = signF32UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF32UI( uiC ); + sigC = fracF32UI( uiC ); + signProd = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0xFF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expProd = expA + expB - 0x7E; + sigA = (sigA | 0x00800000)<<7; + sigB = (sigB | 0x00800000)<<7; + sigProd = (uint_fast64_t) sigA * sigB; + if ( sigProd < UINT64_C( 0x2000000000000000 ) ) { + --expProd; + sigProd <<= 1; + } + signZ = signProd; + if ( ! expC ) { + if ( ! sigC ) { + expZ = expProd - 1; + sigZ = softfloat_shortShiftRightJam64( sigProd, 31 ); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF32Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | 0x00800000)<<6; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expProd - expC; + if ( signProd == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + expZ = expC; + sigZ = sigC + softfloat_shiftRightJam64( sigProd, 32 - expDiff ); + } else { + expZ = expProd; + sig64Z = + sigProd + + softfloat_shiftRightJam64( + (uint_fast64_t) sigC<<32, expDiff ); + sigZ = softfloat_shortShiftRightJam64( sig64Z, 32 ); + } + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64C = (uint_fast64_t) sigC<<32; + if ( expDiff < 0 ) { + signZ = signC; + expZ = expC; + sig64Z = sig64C - softfloat_shiftRightJam64( sigProd, -expDiff ); + } else if ( ! expDiff ) { + expZ = expProd; + sig64Z = sigProd - sig64C; + if ( ! sig64Z ) goto completeCancellation; + if ( sig64Z & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + sig64Z = -sig64Z; + } + } else { + expZ = expProd; + sig64Z = sigProd - softfloat_shiftRightJam64( sig64C, expDiff ); + } + shiftDist = softfloat_countLeadingZeros64( sig64Z ) - 1; + expZ -= shiftDist; + shiftDist -= 32; + if ( shiftDist < 0 ) { + sigZ = softfloat_shortShiftRightJam64( sig64Z, -shiftDist ); + } else { + sigZ = (uint_fast32_t) sig64Z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + + +float64_t + softfloat_mulAddF64( + uint_fast64_t uiA, uint_fast64_t uiB, uint_fast64_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signC; + int_fast16_t expC; + uint_fast64_t sigC; + bool signZ; + uint_fast64_t magBits, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + struct uint128 sig128Z; + uint_fast64_t sigZ; + int_fast16_t expDiff; + struct uint128 sig128C; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signC = signF64UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF64UI( uiC ); + sigC = fracF64UI( uiC ); + signZ = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0x7FF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FE; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<10; + sig128Z = softfloat_mul64To128( sigA, sigB ); + if ( sig128Z.v64 < UINT64_C( 0x2000000000000000 ) ) { + --expZ; + sig128Z = + softfloat_add128( + sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); + } + if ( ! expC ) { + if ( ! sigC ) { + --expZ; + sigZ = sig128Z.v64<<1 | (sig128Z.v0 != 0); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF64Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | UINT64_C( 0x0010000000000000 ))<<9; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expZ - expC; + if ( expDiff < 0 ) { + expZ = expC; + if ( (signZ == signC) || (expDiff < -1) ) { + sig128Z.v64 = softfloat_shiftRightJam64( sig128Z.v64, -expDiff ); + } else { + sig128Z = + softfloat_shortShiftRightJam128( sig128Z.v64, sig128Z.v0, 1 ); + } + } else if ( expDiff ) { + sig128C = softfloat_shiftRightJam128( sigC, 0, expDiff ); + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signZ == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + sigZ = (sigC + sig128Z.v64) | (sig128Z.v0 != 0); + } else { + sig128Z = + softfloat_add128( + sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); + sigZ = sig128Z.v64 | (sig128Z.v0 != 0); + } + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff < 0 ) { + signZ = signC; + sig128Z = softfloat_sub128( sigC, 0, sig128Z.v64, sig128Z.v0 ); + } else if ( ! expDiff ) { + sig128Z.v64 = sig128Z.v64 - sigC; + if ( ! (sig128Z.v64 | sig128Z.v0) ) goto completeCancellation; + if ( sig128Z.v64 & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + sig128Z = softfloat_sub128( 0, 0, sig128Z.v64, sig128Z.v0 ); + } + } else { + sig128Z = + softfloat_sub128( + sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! sig128Z.v64 ) { + expZ -= 64; + sig128Z.v64 = sig128Z.v0; + sig128Z.v0 = 0; + } + shiftDist = softfloat_countLeadingZeros64( sig128Z.v64 ) - 1; + expZ -= shiftDist; + if ( shiftDist < 0 ) { + sigZ = softfloat_shortShiftRightJam64( sig128Z.v64, -shiftDist ); + } else { + sig128Z = + softfloat_shortShiftLeft128( + sig128Z.v64, sig128Z.v0, shiftDist ); + sigZ = sig128Z.v64; + } + sigZ |= (sig128Z.v0 != 0); + } + roundPack: + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN_ABC: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto propagateNaN_ZC; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infProdArg: + if ( magBits ) { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + if ( expC != 0x7FF ) goto uiZ; + if ( sigC ) goto propagateNaN_ZC; + if ( signZ == signC ) goto uiZ; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + propagateNaN_ZC: + uiZ = softfloat_propagateNaNF64UI( uiZ, uiC ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zeroProd: + uiZ = uiC; + if ( ! (expC | sigC) && (signZ != signC) ) { + completeCancellation: + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + + diff --git a/src/common/softfloat/source/s_normRoundPackToF32.c b/src/common/softfloat/source/s_normRoundPackToF32.c new file mode 100644 index 000000000..14e08116b --- /dev/null +++ b/src/common/softfloat/source/s_normRoundPackToF32.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" + +float32_t + softfloat_normRoundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + int_fast8_t shiftDist; + union ui32_f32 uZ; + + shiftDist = softfloat_countLeadingZeros32( sig ) - 1; + exp -= shiftDist; + if ( (7 <= shiftDist) && ((unsigned int) exp < 0xFD) ) { + uZ.ui = packToF32UI( sign, sig ? exp : 0, sig<<(shiftDist - 7) ); + return uZ.f; + } else { + return softfloat_roundPackToF32( sign, exp, sig< +#include +#include "platform.h" +#include "internals.h" + +float64_t + softfloat_normRoundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + int_fast8_t shiftDist; + union ui64_f64 uZ; + + shiftDist = softfloat_countLeadingZeros64( sig ) - 1; + exp -= shiftDist; + if ( (10 <= shiftDist) && ((unsigned int) exp < 0x7FD) ) { + uZ.ui = packToF64UI( sign, sig ? exp : 0, sig<<(shiftDist - 10) ); + return uZ.f; + } else { + return softfloat_roundPackToF64( sign, exp, sig< +#include "platform.h" +#include "internals.h" + +struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t sig ) +{ + int_fast8_t shiftDist; + struct exp16_sig32 z; + + shiftDist = softfloat_countLeadingZeros32( sig ) - 8; + z.exp = 1 - shiftDist; + z.sig = sig< +#include "platform.h" +#include "internals.h" + +struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t sig ) +{ + int_fast8_t shiftDist; + struct exp16_sig64 z; + + shiftDist = softfloat_countLeadingZeros64( sig ) - 11; + z.exp = 1 - shiftDist; + z.sig = sig< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t + softfloat_roundPackToF16( bool sign, int_fast16_t exp, uint_fast16_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + bool isTiny; + uint_fast16_t uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x8; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xF + : 0; + } + roundBits = sig & 0xF; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x1D <= (unsigned int) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) || (sig + roundIncrement < 0x8000); + sig = softfloat_shiftRightJam32( sig, -exp ); + exp = 0; + roundBits = sig & 0xF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( (0x1D < exp) || (0x8000 <= sig + roundIncrement) ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF16UI( sign, 0x1F, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>4; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast16_t) (! (roundBits ^ 8) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF16UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/s_roundPackToF32.c b/src/common/softfloat/source/s_roundPackToF32.c new file mode 100644 index 000000000..cc3450857 --- /dev/null +++ b/src/common/softfloat/source/s_roundPackToF32.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t + softfloat_roundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + bool isTiny; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x40; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x7F + : 0; + } + roundBits = sig & 0x7F; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0xFD <= (unsigned int) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) || (sig + roundIncrement < 0x80000000); + sig = softfloat_shiftRightJam32( sig, -exp ); + exp = 0; + roundBits = sig & 0x7F; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( (0xFD < exp) || (0x80000000 <= sig + roundIncrement) ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF32UI( sign, 0xFF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>7; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast32_t) (! (roundBits ^ 0x40) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF32UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/s_roundPackToF64.c b/src/common/softfloat/source/s_roundPackToF64.c new file mode 100644 index 000000000..aaff008c1 --- /dev/null +++ b/src/common/softfloat/source/s_roundPackToF64.c @@ -0,0 +1,117 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t + softfloat_roundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + bool isTiny; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x200; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x3FF + : 0; + } + roundBits = sig & 0x3FF; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x7FD <= (uint16_t) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) + || (sig + roundIncrement < UINT64_C( 0x8000000000000000 )); + sig = softfloat_shiftRightJam64( sig, -exp ); + exp = 0; + roundBits = sig & 0x3FF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( + (0x7FD < exp) + || (UINT64_C( 0x8000000000000000 ) <= sig + roundIncrement) + ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF64UI( sign, 0x7FF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>10; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast64_t) (! (roundBits ^ 0x200) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF64UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/src/common/softfloat/source/s_roundToI32.c b/src/common/softfloat/source/s_roundToI32.c new file mode 100644 index 000000000..20a3ff4f6 --- /dev/null +++ b/src/common/softfloat/source/s_roundToI32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t + softfloat_roundToI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + uint_fast32_t sig32; + union { uint32_t ui; int32_t i; } uZ; + int_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x800; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xFFF + : 0; + } + roundBits = sig & 0xFFF; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFF00000000000 ) ) goto invalid; + sig32 = sig>>12; + sig32 &= ~(uint_fast32_t) (! (roundBits ^ 0x800) & roundNearEven); + uZ.ui = sign ? -sig32 : sig32; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i32_fromNegOverflow : i32_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/s_roundToI64.c b/src/common/softfloat/source/s_roundToI64.c new file mode 100644 index 000000000..fcddbc278 --- /dev/null +++ b/src/common/softfloat/source/s_roundToI64.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundToI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + union { uint64_t ui; int64_t i; } uZ; + int_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/s_roundToUI32.c b/src/common/softfloat/source/s_roundToUI32.c new file mode 100644 index 000000000..180899bd8 --- /dev/null +++ b/src/common/softfloat/source/s_roundToUI32.c @@ -0,0 +1,80 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t + softfloat_roundToUI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x800; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xFFF + : 0; + } + roundBits = sig & 0xFFF; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFF00000000000 ) ) goto invalid; + z = sig>>12; + z &= ~(uint_fast32_t) (! (roundBits ^ 0x800) & roundNearEven); + if ( sign && z ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/s_roundToUI64.c b/src/common/softfloat/source/s_roundToUI64.c new file mode 100644 index 000000000..de35b5eb0 --- /dev/null +++ b/src/common/softfloat/source/s_roundToUI64.c @@ -0,0 +1,85 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundToUI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/src/common/softfloat/source/s_shiftRightJam128.c b/src/common/softfloat/source/s_shiftRightJam128.c new file mode 100644 index 000000000..7f3d4c88b --- /dev/null +++ b/src/common/softfloat/source/s_shiftRightJam128.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" +#include "primitives.h" + +#ifndef softfloat_shiftRightJam128 + +struct uint128 + softfloat_shiftRightJam128( uint64_t a64, uint64_t a0, uint_fast32_t dist ) +{ + uint_fast8_t u8NegDist; + struct uint128 z; + + if ( dist < 64 ) { + u8NegDist = -dist; + z.v64 = a64>>dist; + z.v0 = + a64<<(u8NegDist & 63) | a0>>dist + | ((uint64_t) (a0<<(u8NegDist & 63)) != 0); + } else { + z.v64 = 0; + z.v0 = + (dist < 127) + ? a64>>(dist & 63) + | (((a64 & (((uint_fast64_t) 1<<(dist & 63)) - 1)) | a0) + != 0) + : ((a64 | a0) != 0); + } + return z; + +} + +#endif + diff --git a/src/common/softfloat/source/s_subMagsF32.c b/src/common/softfloat/source/s_subMagsF32.c new file mode 100644 index 000000000..86e89f2ec --- /dev/null +++ b/src/common/softfloat/source/s_subMagsF32.c @@ -0,0 +1,143 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t softfloat_subMagsF32( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + int_fast32_t sigDiff; + bool signZ; + int_fast8_t shiftDist; + int_fast16_t expZ; + uint_fast32_t sigX, sigY; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF32UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + signZ = signF32UI( uiA ); + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros32( sigDiff ) - 8; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + uiZ = packToF32UI( signZ, expZ, sigDiff< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t + softfloat_subMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast64_t sigDiff; + int_fast8_t shiftDist; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros64( sigDiff ) - 11; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + uiZ = packToF64UI( signZ, expZ, sigDiff< +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +#ifndef THREAD_LOCAL +#define THREAD_LOCAL +#endif + +THREAD_LOCAL uint_fast8_t softfloat_roundingMode = softfloat_round_near_even; +THREAD_LOCAL uint_fast8_t softfloat_detectTininess = init_detectTininess; +THREAD_LOCAL uint_fast8_t softfloat_exceptionFlags = 0; diff --git a/src/common/softfloat/source/ui32_to_f32.c b/src/common/softfloat/source/ui32_to_f32.c new file mode 100644 index 000000000..7e5ece68a --- /dev/null +++ b/src/common/softfloat/source/ui32_to_f32.c @@ -0,0 +1,57 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t ui32_to_f32( uint32_t a ) +{ + union ui32_f32 uZ; + + if ( ! a ) { + uZ.ui = 0; + return uZ.f; + } + if ( a & 0x80000000 ) { + return softfloat_roundPackToF32( 0, 0x9D, a>>1 | (a & 1) ); + } else { + return softfloat_normRoundPackToF32( 0, 0x9C, a ); + } + +} + diff --git a/src/common/softfloat/source/ui32_to_f64.c b/src/common/softfloat/source/ui32_to_f64.c new file mode 100644 index 000000000..5e5f843af --- /dev/null +++ b/src/common/softfloat/source/ui32_to_f64.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t ui32_to_f64( uint32_t a ) +{ + uint_fast64_t uiZ; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + if ( ! a ) { + uiZ = 0; + } else { + shiftDist = softfloat_countLeadingZeros32( a ) + 21; + uiZ = + packToF64UI( 0, 0x432 - shiftDist, (uint_fast64_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t ui64_to_f32( uint64_t a ) +{ + int_fast8_t shiftDist; + union ui32_f32 u; + uint_fast32_t sig; + + shiftDist = softfloat_countLeadingZeros64( a ) - 40; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF32UI( + 0, 0x95 - shiftDist, (uint_fast32_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t ui64_to_f64( uint64_t a ) +{ + union ui64_f64 uZ; + + if ( ! a ) { + uZ.ui = 0; + return uZ.f; + } + if ( a & UINT64_C( 0x8000000000000000 ) ) { + return + softfloat_roundPackToF64( + 0, 0x43D, softfloat_shortShiftRightJam64( a, 1 ) ); + } else { + return softfloat_normRoundPackToF64( 0, 0x43C, a ); + } + +} + diff --git a/src/common/src/nvSha256.c b/src/common/src/nvSha256.c new file mode 100644 index 000000000..3b8a6d293 --- /dev/null +++ b/src/common/src/nvSha256.c @@ -0,0 +1,342 @@ +/* + * FIPS 180-2 SHA-224/256/384/512 implementation + * Last update: 02/02/2007 + * Issue date: 04/30/2005 + * + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/******************************************************************************** + * Modified from the original source by Steve Motto, Atmel Corp. * + * Changes: * + * 1. Removed all code / data which supported SHA224, SHA384, SHA512 * + * 2. Moved stack variables wv and v to zram (needs 288 bytes of RAM) * + * 3. Moved SHA256 constants to flash segment * + ********************************************************************************/ +/** \file SHA256.c + * \brief Implements SHA256 Algorithm + * \author SEM + * \date Sept 16, 2009 +*/ +#if 0 +#define UNROLL_LOOPS /* Enable loops unrolling */ +#endif + +#include "nvSha256.h" +#include "nvmisc.h" + +#define SHFR(x, n) (x >> n) +#define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n))) +#define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n))) +#define CH(x, y, z) ((x & y) ^ (~x & z)) +#define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) + +#define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) +#define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) +#define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3)) +#define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10)) + +#define UNPACK32(x, str) \ +{ \ + *((str) + 3) = (NvU8) ((x) ); \ + *((str) + 2) = (NvU8) ((x) >> 8); \ + *((str) + 1) = (NvU8) ((x) >> 16); \ + *((str) + 0) = (NvU8) ((x) >> 24); \ +} + +#define PACK32(str, x) \ +{ \ + *(x) = ((NvU32) *((str) + 3) ) \ + | ((NvU32) *((str) + 2) << 8) \ + | ((NvU32) *((str) + 1) << 16) \ + | ((NvU32) *((str) + 0) << 24); \ +} + + +/* Macros used for loops unrolling */ + +#define SHA256_SCR(i) \ +{ \ + w[i] = SHA256_F4(w[i - 2]) + w[i - 7] \ + + SHA256_F3(w[i - 15]) + w[i - 16]; \ +} + +#define SHA256_EXP(a, b, c, d, e, f, g, h, j) \ +{ \ + t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) \ + + sha256_k[j] + w[j]; \ + t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \ + wv[d] += t1; \ + wv[h] = t1 + t2; \ +} + +static const NvU32 sha256_h0[8] = + {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, + 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; + +static const NvU32 sha256_k[64] = + {0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2}; + +/* SHA-256 functions */ + +static void sha256_transf(nv_sha256_ctx *ctx, const NvU8 *message, NvU32 block_nb) +{ + NvU32 w[64]; + NvU32 wv[8]; + + NvU32 t1, t2; + const NvU8 *sub_block; + int i; + +#ifndef UNROLL_LOOPS + int j; +#endif + + for (i = 0; i < (int) block_nb; i++) { + sub_block = message + (i << 6); + +#ifndef UNROLL_LOOPS + for (j = 0; j < 16; j++) { + PACK32(&sub_block[j << 2], &w[j]); + } + + for (j = 16; j < 64; j++) { + SHA256_SCR(j); + } + + for (j = 0; j < 8; j++) { + wv[j] = ctx->h[j]; + } + + for (j = 0; j < 64; j++) { + t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + + sha256_k[j] + w[j]; + t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]); + wv[7] = wv[6]; + wv[6] = wv[5]; + wv[5] = wv[4]; + wv[4] = wv[3] + t1; + wv[3] = wv[2]; + wv[2] = wv[1]; + wv[1] = wv[0]; + wv[0] = t1 + t2; + } + + for (j = 0; j < 8; j++) { + ctx->h[j] += wv[j]; + } +#else + PACK32(&sub_block[ 0], &w[ 0]); PACK32(&sub_block[ 4], &w[ 1]); + PACK32(&sub_block[ 8], &w[ 2]); PACK32(&sub_block[12], &w[ 3]); + PACK32(&sub_block[16], &w[ 4]); PACK32(&sub_block[20], &w[ 5]); + PACK32(&sub_block[24], &w[ 6]); PACK32(&sub_block[28], &w[ 7]); + PACK32(&sub_block[32], &w[ 8]); PACK32(&sub_block[36], &w[ 9]); + PACK32(&sub_block[40], &w[10]); PACK32(&sub_block[44], &w[11]); + PACK32(&sub_block[48], &w[12]); PACK32(&sub_block[52], &w[13]); + PACK32(&sub_block[56], &w[14]); PACK32(&sub_block[60], &w[15]); + + SHA256_SCR(16); SHA256_SCR(17); SHA256_SCR(18); SHA256_SCR(19); + SHA256_SCR(20); SHA256_SCR(21); SHA256_SCR(22); SHA256_SCR(23); + SHA256_SCR(24); SHA256_SCR(25); SHA256_SCR(26); SHA256_SCR(27); + SHA256_SCR(28); SHA256_SCR(29); SHA256_SCR(30); SHA256_SCR(31); + SHA256_SCR(32); SHA256_SCR(33); SHA256_SCR(34); SHA256_SCR(35); + SHA256_SCR(36); SHA256_SCR(37); SHA256_SCR(38); SHA256_SCR(39); + SHA256_SCR(40); SHA256_SCR(41); SHA256_SCR(42); SHA256_SCR(43); + SHA256_SCR(44); SHA256_SCR(45); SHA256_SCR(46); SHA256_SCR(47); + SHA256_SCR(48); SHA256_SCR(49); SHA256_SCR(50); SHA256_SCR(51); + SHA256_SCR(52); SHA256_SCR(53); SHA256_SCR(54); SHA256_SCR(55); + SHA256_SCR(56); SHA256_SCR(57); SHA256_SCR(58); SHA256_SCR(59); + SHA256_SCR(60); SHA256_SCR(61); SHA256_SCR(62); SHA256_SCR(63); + + wv[0] = ctx->h[0]; wv[1] = ctx->h[1]; + wv[2] = ctx->h[2]; wv[3] = ctx->h[3]; + wv[4] = ctx->h[4]; wv[5] = ctx->h[5]; + wv[6] = ctx->h[6]; wv[7] = ctx->h[7]; + + SHA256_EXP(0,1,2,3,4,5,6,7, 0); SHA256_EXP(7,0,1,2,3,4,5,6, 1); + SHA256_EXP(6,7,0,1,2,3,4,5, 2); SHA256_EXP(5,6,7,0,1,2,3,4, 3); + SHA256_EXP(4,5,6,7,0,1,2,3, 4); SHA256_EXP(3,4,5,6,7,0,1,2, 5); + SHA256_EXP(2,3,4,5,6,7,0,1, 6); SHA256_EXP(1,2,3,4,5,6,7,0, 7); + SHA256_EXP(0,1,2,3,4,5,6,7, 8); SHA256_EXP(7,0,1,2,3,4,5,6, 9); + SHA256_EXP(6,7,0,1,2,3,4,5,10); SHA256_EXP(5,6,7,0,1,2,3,4,11); + SHA256_EXP(4,5,6,7,0,1,2,3,12); SHA256_EXP(3,4,5,6,7,0,1,2,13); + SHA256_EXP(2,3,4,5,6,7,0,1,14); SHA256_EXP(1,2,3,4,5,6,7,0,15); + SHA256_EXP(0,1,2,3,4,5,6,7,16); SHA256_EXP(7,0,1,2,3,4,5,6,17); + SHA256_EXP(6,7,0,1,2,3,4,5,18); SHA256_EXP(5,6,7,0,1,2,3,4,19); + SHA256_EXP(4,5,6,7,0,1,2,3,20); SHA256_EXP(3,4,5,6,7,0,1,2,21); + SHA256_EXP(2,3,4,5,6,7,0,1,22); SHA256_EXP(1,2,3,4,5,6,7,0,23); + SHA256_EXP(0,1,2,3,4,5,6,7,24); SHA256_EXP(7,0,1,2,3,4,5,6,25); + SHA256_EXP(6,7,0,1,2,3,4,5,26); SHA256_EXP(5,6,7,0,1,2,3,4,27); + SHA256_EXP(4,5,6,7,0,1,2,3,28); SHA256_EXP(3,4,5,6,7,0,1,2,29); + SHA256_EXP(2,3,4,5,6,7,0,1,30); SHA256_EXP(1,2,3,4,5,6,7,0,31); + SHA256_EXP(0,1,2,3,4,5,6,7,32); SHA256_EXP(7,0,1,2,3,4,5,6,33); + SHA256_EXP(6,7,0,1,2,3,4,5,34); SHA256_EXP(5,6,7,0,1,2,3,4,35); + SHA256_EXP(4,5,6,7,0,1,2,3,36); SHA256_EXP(3,4,5,6,7,0,1,2,37); + SHA256_EXP(2,3,4,5,6,7,0,1,38); SHA256_EXP(1,2,3,4,5,6,7,0,39); + SHA256_EXP(0,1,2,3,4,5,6,7,40); SHA256_EXP(7,0,1,2,3,4,5,6,41); + SHA256_EXP(6,7,0,1,2,3,4,5,42); SHA256_EXP(5,6,7,0,1,2,3,4,43); + SHA256_EXP(4,5,6,7,0,1,2,3,44); SHA256_EXP(3,4,5,6,7,0,1,2,45); + SHA256_EXP(2,3,4,5,6,7,0,1,46); SHA256_EXP(1,2,3,4,5,6,7,0,47); + SHA256_EXP(0,1,2,3,4,5,6,7,48); SHA256_EXP(7,0,1,2,3,4,5,6,49); + SHA256_EXP(6,7,0,1,2,3,4,5,50); SHA256_EXP(5,6,7,0,1,2,3,4,51); + SHA256_EXP(4,5,6,7,0,1,2,3,52); SHA256_EXP(3,4,5,6,7,0,1,2,53); + SHA256_EXP(2,3,4,5,6,7,0,1,54); SHA256_EXP(1,2,3,4,5,6,7,0,55); + SHA256_EXP(0,1,2,3,4,5,6,7,56); SHA256_EXP(7,0,1,2,3,4,5,6,57); + SHA256_EXP(6,7,0,1,2,3,4,5,58); SHA256_EXP(5,6,7,0,1,2,3,4,59); + SHA256_EXP(4,5,6,7,0,1,2,3,60); SHA256_EXP(3,4,5,6,7,0,1,2,61); + SHA256_EXP(2,3,4,5,6,7,0,1,62); SHA256_EXP(1,2,3,4,5,6,7,0,63); + + ctx->h[0] += wv[0]; ctx->h[1] += wv[1]; + ctx->h[2] += wv[2]; ctx->h[3] += wv[3]; + ctx->h[4] += wv[4]; ctx->h[5] += wv[5]; + ctx->h[6] += wv[6]; ctx->h[7] += wv[7]; +#endif /* !UNROLL_LOOPS */ + } +} + +void nv_sha256(const NvU8 *message, NvU32 len, NvU8 *digest) +{ + nv_sha256_ctx ctx; + + nv_sha256_init(&ctx); + nv_sha256_update(&ctx, message, len); + nv_sha256_final(&ctx, digest); +} + +void nv_sha256_init(nv_sha256_ctx *ctx) +{ +#ifndef UNROLL_LOOPS + int i; + for (i = 0; i < 8; i++) { + ctx->h[i] = sha256_h0[i]; + } +#else + ctx->h[0] = sha256_h0[0]; ctx->h[1] = sha256_h0[1]; + ctx->h[2] = sha256_h0[2]; ctx->h[3] = sha256_h0[3]; + ctx->h[4] = sha256_h0[4]; ctx->h[5] = sha256_h0[5]; + ctx->h[6] = sha256_h0[6]; ctx->h[7] = sha256_h0[7]; +#endif /* !UNROLL_LOOPS */ + + ctx->len = 0; + ctx->tot_len = 0; +} + +void nv_sha256_update(nv_sha256_ctx *ctx, const NvU8 *message, + NvU32 len) +{ + NvU32 block_nb; + NvU32 new_len, rem_len, tmp_len; + const NvU8 *shifted_message; + + tmp_len = NV_SHA256_BLOCK_SIZE - ctx->len; + rem_len = len < tmp_len ? len : tmp_len; + + NVMISC_MEMCPY(&ctx->block[ctx->len], message, rem_len); + + if (ctx->len + len < NV_SHA256_BLOCK_SIZE) { + ctx->len += len; + return; + } + + new_len = len - rem_len; + block_nb = new_len / NV_SHA256_BLOCK_SIZE; + + shifted_message = message + rem_len; + + sha256_transf(ctx, ctx->block, 1); + sha256_transf(ctx, shifted_message, block_nb); + + rem_len = new_len % NV_SHA256_BLOCK_SIZE; + + NVMISC_MEMCPY(ctx->block, &shifted_message[block_nb << 6], rem_len); + + ctx->len = rem_len; + ctx->tot_len += (block_nb + 1) << 6; +} + +void nv_sha256_final(nv_sha256_ctx *ctx, NvU8 *digest) +{ + NvU32 block_nb; + NvU32 pm_len; + NvU32 len_b; + +#ifndef UNROLL_LOOPS + int i; +#endif + + block_nb = (1 + ((NV_SHA256_BLOCK_SIZE - 9) + < (ctx->len % NV_SHA256_BLOCK_SIZE))); + + len_b = (ctx->tot_len + ctx->len) << 3; + pm_len = block_nb << 6; + + NVMISC_MEMSET(ctx->block + ctx->len, 0, pm_len - ctx->len); + ctx->block[ctx->len] = 0x80; + UNPACK32(len_b, ctx->block + pm_len - 4); + + sha256_transf(ctx, ctx->block, block_nb); + +#ifndef UNROLL_LOOPS + for (i = 0 ; i < 8; i++) { + UNPACK32(ctx->h[i], &digest[i << 2]); + } +#else + UNPACK32(ctx->h[0], &digest[ 0]); + UNPACK32(ctx->h[1], &digest[ 4]); + UNPACK32(ctx->h[2], &digest[ 8]); + UNPACK32(ctx->h[3], &digest[12]); + UNPACK32(ctx->h[4], &digest[16]); + UNPACK32(ctx->h[5], &digest[20]); + UNPACK32(ctx->h[6], &digest[24]); + UNPACK32(ctx->h[7], &digest[28]); +#endif /* !UNROLL_LOOPS */ +} + +void nv_sha256_noPad(nv_sha256_ctx *ctx, NvU8 *digest) +{ + int i; + + for (i = 0 ; i < 8; i++) { + UNPACK32(ctx->h[i], &digest[i << 2]); + } +} diff --git a/src/common/unix/common/inc/nv-float.h b/src/common/unix/common/inc/nv-float.h new file mode 100644 index 000000000..95fc71941 --- /dev/null +++ b/src/common/unix/common/inc/nv-float.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NV_FLOAT_H) +#define NV_FLOAT_H + +/* Floating-point constants, expressed as integer constants */ + +#define NV_FLOAT_NEG_ONE 0xBF800000 /* -1.00f */ +#define NV_FLOAT_NEG_QUARTER 0xBE800000 /* -0.25f */ +#define NV_FLOAT_ZERO 0x00000000 /* 0.00f */ +#define NV_FLOAT_QUARTER 0x3E800000 /* 0.25f */ +#define NV_FLOAT_HALF 0x3F000000 /* 0.50f */ +#define NV_FLOAT_ONE 0x3F800000 /* 1.00f */ +#define NV_FLOAT_TWO 0x40000000 /* 2.00f */ +#define NV_FLOAT_255 0x437F0000 /* 255.00f */ +#define NV_FLOAT_1024 0x44800000 /* 1024.00f */ +#define NV_FLOAT_65536 0x47800000 /* 65536.00f */ + +#endif diff --git a/src/common/unix/common/inc/nv_assert.h b/src/common/unix/common/inc/nv_assert.h new file mode 100644 index 000000000..8c62ef527 --- /dev/null +++ b/src/common/unix/common/inc/nv_assert.h @@ -0,0 +1,82 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_ASSERT_H__ +#define __NV_ASSERT_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * nvAssert() has three possible configurations: __COVERITY__, DEBUG, + * and non-DEBUG. In DEBUG builds, the includer should provide an + * implementation of nvDebugAssert(). + */ + +#if defined(__COVERITY__) + /* + * Coverity assert handling -- basically inform coverity that the + * condition is verified independently and coverity can assume that + * it is true. + */ + void __coverity_panic__(void); + + #define nvAssert(exp) \ + do { \ + if (exp) { \ + } else { \ + __coverity_panic__(); \ + } \ + } while (0) + +#elif defined(DEBUG) + + void nvDebugAssert(const char *expString, const char *filenameString, + const char *funcString, const unsigned int lineNumber); + + /* + * Assert that (exp) is TRUE. We use 'if (exp) { } else { fail }' + * instead of 'if (!(exp)) { fail }' to cause warnings when people + * accidentally write nvAssert(foo = bar) instead of nvAssert(foo == + * bar). + */ + #define nvAssert(exp) \ + do { \ + if (exp) { \ + } else { \ + nvDebugAssert(#exp, __FILE__, __FUNCTION__, __LINE__); \ + } \ + } while (0) + +#else + + #define nvAssert(exp) {} + +#endif + +#ifdef __cplusplus +}; +#endif + +#endif /* __NV_ASSERT_H__ */ diff --git a/src/common/unix/common/inc/nv_common_utils.h b/src/common/unix/common/inc/nv_common_utils.h new file mode 100644 index 000000000..a7701d1ac --- /dev/null +++ b/src/common/unix/common/inc/nv_common_utils.h @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_COMMON_UTILS_H__ +#define __NV_COMMON_UTILS_H__ + +#include "nvtypes.h" + +#if !defined(TRUE) +#define TRUE NV_TRUE +#endif + +#if !defined(FALSE) +#define FALSE NV_FALSE +#endif + +#define NV_IS_UNSIGNED(x) ((__typeof__(x))-1 > 0) + +/* Get the length of a statically-sized array. */ +#define ARRAY_LEN(_arr) (sizeof(_arr) / sizeof(_arr[0])) + +#define NV_INVALID_HEAD 0xFFFFFFFF + +#define NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION (~0) + +#if !defined(NV_MIN) +# define NV_MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +#define NV_MIN3(a,b,c) NV_MIN(NV_MIN(a, b), c) +#define NV_MIN4(a,b,c,d) NV_MIN3(NV_MIN(a,b),c,d) + +#if !defined(NV_MAX) +# define NV_MAX(a,b) (((a)>(b))?(a):(b)) +#endif + +#define NV_MAX3(a,b,c) NV_MAX(NV_MAX(a, b), c) +#define NV_MAX4(a,b,c,d) NV_MAX3(NV_MAX(a,b),c,d) + +static inline int NV_LIMIT_VAL_TO_MIN_MAX(int val, int min, int max) +{ + if (val < min) { + return min; + } + if (val > max) { + return max; + } + return val; +} + +#define NV_ROUNDUP_DIV(x,y) ((x) / (y) + (((x) % (y)) ? 1 : 0)) + +/* + * Macros used for computing palette entries: + * + * NV_UNDER_REPLICATE(val, source_size, result_size) expands a value + * of source_size bits into a value of target_size bits by shifting + * the source value into the high bits and replicating the high bits + * of the value into the low bits of the result. + * + * PALETTE_DEPTH_SHIFT(val, w) maps a colormap entry for a component + * that has w bits to an appropriate entry in a LUT of 256 entries. + */ +static inline unsigned int NV_UNDER_REPLICATE(unsigned short val, + int source_size, + int result_size) +{ + return (val << (result_size - source_size)) | + (val >> ((source_size << 1) - result_size)); +} + + +static inline unsigned short PALETTE_DEPTH_SHIFT(unsigned short val, int depth) +{ + return NV_UNDER_REPLICATE(val, depth, 8); +} + +#endif /* __NV_COMMON_UTILS_H__ */ diff --git a/src/common/unix/common/inc/nv_dpy_id.h b/src/common/unix/common/inc/nv_dpy_id.h new file mode 100644 index 000000000..7c50e9767 --- /dev/null +++ b/src/common/unix/common/inc/nv_dpy_id.h @@ -0,0 +1,369 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * This header file defines the types NVDpyId and NVDpyIdList, as well + * as inline functions to manipulate these types. NVDpyId and + * NVDpyIdList should be treated as opaque by includers of this header + * file. + */ + +#ifndef __NV_DPY_ID_H__ +#define __NV_DPY_ID_H__ + +#include "nvtypes.h" +#include "nvmisc.h" +#include /* NV_MAX_SUBDEVICES */ + +typedef struct { + NvU32 opaqueDpyId; +} NVDpyId; + +typedef struct { + NvU32 opaqueDpyIdList; +} NVDpyIdList; + +#define NV_DPY_ID_MAX_SUBDEVICES NV_MAX_SUBDEVICES +#define NV_DPY_ID_MAX_DPYS_IN_LIST 32 + +/* + * For use in combination with nvDpyIdToPrintFormat(); e.g., + * + * printf("dpy id: " NV_DPY_ID_PRINT_FORMAT "\n", + * nvDpyIdToPrintFormat(dpyId)); + * + * The includer should not make assumptions about the return type of + * nvDpyIdToPrintFormat(). + */ +#define NV_DPY_ID_PRINT_FORMAT "0x%08x" + +/* functions to return an invalid DpyId and empty DpyIdList */ + +static inline NVDpyId nvInvalidDpyId(void) +{ + NVDpyId dpyId = { 0 }; + return dpyId; +} + +static inline NVDpyIdList nvEmptyDpyIdList(void) +{ + NVDpyIdList dpyIdList = { 0 }; + return dpyIdList; +} + +static inline NVDpyIdList nvAllDpyIdList(void) +{ + NVDpyIdList dpyIdList = { ~0U }; + return dpyIdList; +} + +static inline void +nvEmptyDpyIdListSubDeviceArray(NVDpyIdList dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex; + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + dpyIdList[dispIndex] = nvEmptyDpyIdList(); + } +} + +/* set operations on DpyIds and DpyIdLists: Add, Subtract, Intersect, Xor */ + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdToDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList | + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +/* Passing an invalid display ID makes this function return an empty list. */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdToEmptyDpyIdList(NVDpyId dpyId) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvAddDpyIdListToDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListB.opaqueDpyIdList | + dpyIdListA.opaqueDpyIdList; + return tmpDpyIdList; +} + +/* Returns: dpyIdList - dpyId */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvDpyIdListMinusDpyId(NVDpyIdList dpyIdList, NVDpyId dpyId) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList & + (~dpyId.opaqueDpyId); + return tmpDpyIdList; +} + +/* Returns: dpyIdListA - dpyIdListB */ +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvDpyIdListMinusDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList & + (~dpyIdListB.opaqueDpyIdList); + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvIntersectDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList & + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvIntersectDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList & + dpyIdListB.opaqueDpyIdList; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvXorDpyIdAndDpyIdList(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdList.opaqueDpyIdList ^ + dpyId.opaqueDpyId; + return tmpDpyIdList; +} + +static inline __attribute__ ((warn_unused_result)) +NVDpyIdList nvXorDpyIdListAndDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList tmpDpyIdList; + tmpDpyIdList.opaqueDpyIdList = dpyIdListA.opaqueDpyIdList ^ + dpyIdListB.opaqueDpyIdList; + return tmpDpyIdList; +} + + +/* boolean checks */ + +static inline NvBool nvDpyIdIsInDpyIdList(NVDpyId dpyId, + NVDpyIdList dpyIdList) +{ + return !!(dpyIdList.opaqueDpyIdList & dpyId.opaqueDpyId); +} + +static inline NvBool nvDpyIdIsInvalid(NVDpyId dpyId) +{ + return (dpyId.opaqueDpyId == 0); +} + +static inline NvBool nvDpyIdListIsEmpty(NVDpyIdList dpyIdList) +{ + return (dpyIdList.opaqueDpyIdList == 0); +} + +static inline NvBool +nvDpyIdListSubDeviceArrayIsEmpty(NVDpyIdList + dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex; + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + if (!nvDpyIdListIsEmpty(dpyIdList[dispIndex])) { + return NV_FALSE; + } + } + return NV_TRUE; +} + + +static inline NvBool nvDpyIdsAreEqual(NVDpyId dpyIdA, NVDpyId dpyIdB) +{ + return (dpyIdA.opaqueDpyId == dpyIdB.opaqueDpyId); +} + +static inline NvBool nvDpyIdListsAreEqual(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + return (dpyIdListA.opaqueDpyIdList == dpyIdListB.opaqueDpyIdList); +} + +static inline NvBool nvDpyIdListIsASubSetofDpyIdList(NVDpyIdList dpyIdListA, + NVDpyIdList dpyIdListB) +{ + NVDpyIdList intersectedDpyIdList = + nvIntersectDpyIdListAndDpyIdList(dpyIdListA, dpyIdListB); + + return nvDpyIdListsAreEqual(intersectedDpyIdList, dpyIdListA); +} + + +/* + * retrieve the individual dpyIds from dpyIdList; if dpyId is invalid, + * start at the beginning of the list; otherwise, start at the dpyId + * after the specified dpyId + */ + +static inline __attribute__ ((warn_unused_result)) +NVDpyId nvNextDpyIdInDpyIdListUnsorted(NVDpyId dpyId, NVDpyIdList dpyIdList) +{ + if (nvDpyIdIsInvalid(dpyId)) { + dpyId.opaqueDpyId = 1; + } else { + dpyId.opaqueDpyId <<= 1; + } + + while (dpyId.opaqueDpyId) { + + if (nvDpyIdIsInDpyIdList(dpyId, dpyIdList)) { + return dpyId; + } + + dpyId.opaqueDpyId <<= 1; + } + + /* no dpyIds left in dpyIdlist; return the invalid dpyId */ + + return nvInvalidDpyId(); +} + +#define FOR_ALL_DPY_IDS(_dpyId, _dpyIdList) \ + for ((_dpyId) = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), \ + (_dpyIdList)); \ + !nvDpyIdIsInvalid(_dpyId); \ + (_dpyId) = nvNextDpyIdInDpyIdListUnsorted((_dpyId), \ + (_dpyIdList))) + +/* report how many dpyIds are in the dpyIdList */ + +static inline int nvCountDpyIdsInDpyIdList(NVDpyIdList dpyIdList) +{ + return nvPopCount32(dpyIdList.opaqueDpyIdList); +} + +static inline int +nvCountDpyIdsInDpyIdListSubDeviceArray(NVDpyIdList + dpyIdList[NV_DPY_ID_MAX_SUBDEVICES]) +{ + int dispIndex, n = 0; + + for (dispIndex = 0; dispIndex < NV_DPY_ID_MAX_SUBDEVICES; dispIndex++) { + n += nvCountDpyIdsInDpyIdList(dpyIdList[dispIndex]); + } + + return n; +} + +/* convert between dpyId/dpyIdList and NV-CONTROL values */ + +static inline int nvDpyIdToNvControlVal(NVDpyId dpyId) +{ + return (int) dpyId.opaqueDpyId; +} + +static inline int nvDpyIdListToNvControlVal(NVDpyIdList dpyIdList) +{ + return (int) dpyIdList.opaqueDpyIdList; +} + +static inline NVDpyId nvNvControlValToDpyId(int val) +{ + NVDpyId dpyId; + dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (__builtin_ffs(val)-1); + return dpyId; +} + +static inline NVDpyIdList nvNvControlValToDpyIdList(int val) +{ + NVDpyIdList dpyIdList; + dpyIdList.opaqueDpyIdList = val; + return dpyIdList; +} + + +/* convert between dpyId and NvU32 */ + +static inline NVDpyId nvNvU32ToDpyId(NvU32 val) +{ + NVDpyId dpyId; + dpyId.opaqueDpyId = (val == 0) ? 0 : 1 << (__builtin_ffs(val)-1); + return dpyId; +} + +static inline NVDpyIdList nvNvU32ToDpyIdList(NvU32 val) +{ + NVDpyIdList dpyIdList; + dpyIdList.opaqueDpyIdList = val; + return dpyIdList; +} + +static inline NvU32 nvDpyIdToNvU32(NVDpyId dpyId) +{ + return dpyId.opaqueDpyId; +} + +static inline NvU32 nvDpyIdListToNvU32(NVDpyIdList dpyIdList) +{ + return dpyIdList.opaqueDpyIdList; +} + +/* Return the bit position of dpyId: a number in the range [0..31]. */ +static inline NvU32 nvDpyIdToIndex(NVDpyId dpyId) +{ + return __builtin_ffs(dpyId.opaqueDpyId) - 1; +} + +/* Return a display ID that is not in the list passed in. */ + +static inline NVDpyId nvNewDpyId(NVDpyIdList excludeList) +{ + NVDpyId dpyId; + if (~excludeList.opaqueDpyIdList == 0) { + return nvInvalidDpyId(); + } + dpyId.opaqueDpyId = + 1U << (__builtin_ffs(~excludeList.opaqueDpyIdList) - 1); + return dpyId; +} + +/* See comment for NV_DPY_ID_PRINT_FORMAT. */ +static inline NvU32 nvDpyIdToPrintFormat(NVDpyId dpyId) +{ + return nvDpyIdToNvU32(dpyId); +} + +/* Prevent usage of opaque values. */ +#define opaqueDpyId __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H +#define opaqueDpyIdList __ERROR_ACCESS_ME_VIA_NV_DPY_ID_H + +#endif /* __NV_DPY_ID_H__ */ diff --git a/src/common/unix/common/inc/nv_mode_timings.h b/src/common/unix/common/inc/nv_mode_timings.h new file mode 100644 index 000000000..7854d8ae1 --- /dev/null +++ b/src/common/unix/common/inc/nv_mode_timings.h @@ -0,0 +1,163 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_MODE_TIMINGS_H__ +#define __NV_MODE_TIMINGS_H__ + +#include "nvtypes.h" + +/* + * NvModeTimings: hardware-independent modetiming structure. + * + * For interlaced modes, the vertical values are stored in frame size, + * rather than field size (e.g., for 1080i modes, vVisible is 1080, + * not 540); similarly, for doublescan modes, the vertical values are + * stored in normal framesize (not doubled vertically). + * + * RRx1k should be field rate for interlaced modes, and should be + * frame rate for doubleScan modes; e.g., 1920x1080@60i and + * 640x480@60d, not 1920x1080@30i or 640x480@120d. + * + * RRx1k is also the "real" refresh rate (time spent displaying one eye) + * for HDMI 3D frame packed modes, e.g. 47940 (2x24hz) for 1920x1080@24 + * HDMI 3D mode. This needs to be halved again for all user-visible reported + * refresh rates (which needs to report time spent between each vblank, or + * each pair of eyes). + * + * pixelClock is doubled for doubleScan and HDMI 3D frame packed modes. + * + * The intent is that this structure match the X configuration file + * ModeLine. + * + * hdmi3D reflects whether this mode is a HDMI 3D frame packed mode. True only + * if the user selected HDMI 3D stereo mode and the GPU supports it. If true, + * then pixelClock is doubled. + * + * yuv420Mode reflects whether this mode requires YUV 4:2:0 decimation into a + * half-width output through headsurface (SW YUV420) or >=nvdisplay 4.0 HW CSC + * (HW YUV420). + * + * If a mode requires SW YUV 4:2:0 emulation, the pixelClock and width values + * in NvModeTimings will still be the full width values specified by the mode + * parsed from the EDID (e.g. 3840x2160@60), but the pixelClock and width values + * in NVHwModeTimingsEvo will be the "real" half width values programmed in HW + * and rendered to through a headSurface transform (e.g. 1920x2160@60). If a + * mode requires HW YUV 4:2:0 CSC, the pixelClock and width values in both + * NvModeTimings and NVHwModeTimingsEvo will be full width, and the decimation + * to the half width scanout surface is performed in HW. In both cases, only + * the full width values should ever be reported to the client. + */ + +enum NvYuv420Mode { + NV_YUV420_MODE_NONE = 0, + NV_YUV420_MODE_SW, + NV_YUV420_MODE_HW, +}; + +typedef struct _NvModeTimings { + NvU32 RRx1k; + NvU32 pixelClockHz; /* in Hz units */ + NvU16 hVisible; + NvU16 hSyncStart; + NvU16 hSyncEnd; + NvU16 hTotal; + NvU16 hSkew; /* Just placeholder for XRRModeInfo.hSkew */ + NvU16 vVisible; + NvU16 vSyncStart; + NvU16 vSyncEnd; + NvU16 vTotal; + struct { + NvU16 w; + NvU16 h; + } sizeMM; + NvBool interlaced; + NvBool doubleScan; + /* + * Note: hSyncPos and vSyncPos are ignored, and the polarity is positive if + * [hv]SyncNeg is false. However, X.Org has separate flags for each, and + * treats modes with positive, negative, both, and neither as separate + * modes. + */ + NvBool hSyncPos; + NvBool hSyncNeg; + NvBool vSyncPos; + NvBool vSyncNeg; + NvBool hdmi3D; + enum NvYuv420Mode yuv420Mode; +} NvModeTimings, *NvModeTimingsPtr; + +static inline NvBool NvModeTimingsMatch(const NvModeTimings *pA, + const NvModeTimings *pB, + NvBool ignoreSizeMM, + NvBool ignoreRRx1k) +{ + /* + * Ignore sizeMM and/or RRx1k, if requested. The sizeMM and RRx1k fields + * don't impact hardware modetiming values, so it is reasonable that some + * callers may choose to ignore them when comparing NvModeTimings. + */ + NvBool sizeMMmatches = ignoreSizeMM || ((pA->sizeMM.w == pB->sizeMM.w) && + (pA->sizeMM.h == pB->sizeMM.h)); + + NvBool rrx1kMatches = ignoreRRx1k || (pA->RRx1k == pB->RRx1k); + + return (sizeMMmatches && rrx1kMatches && + (pA->pixelClockHz == pB->pixelClockHz) && + (pA->hVisible == pB->hVisible) && + (pA->hSyncStart == pB->hSyncStart) && + (pA->hSyncEnd == pB->hSyncEnd) && + (pA->hTotal == pB->hTotal) && + (pA->hSkew == pB->hSkew) && + (pA->vVisible == pB->vVisible) && + (pA->vSyncStart == pB->vSyncStart) && + (pA->vSyncEnd == pB->vSyncEnd) && + (pA->vTotal == pB->vTotal) && + (pA->interlaced == pB->interlaced) && + (pA->doubleScan == pB->doubleScan) && + (pA->hSyncPos == pB->hSyncPos) && + (pA->hSyncNeg == pB->hSyncNeg) && + (pA->vSyncPos == pB->vSyncPos) && + (pA->vSyncNeg == pB->vSyncNeg) && + (pA->hdmi3D == pB->hdmi3D) && + (pA->yuv420Mode == pB->yuv420Mode)); +} + +/* + * Convert between Hz and kHz. + * + * Note that Hz ==> kHz ==> Hz is lossy. + * + * We do +500 before /1000 in order to round, rather than truncate. + */ +static inline NvU32 HzToKHz(NvU32 hz) +{ + return (hz + 500) / 1000; +} + +static inline NvU32 KHzToHz(NvU32 kHz) +{ + return kHz * 1000; +} + + +#endif /* __NV_MODE_TIMINGS_H__ */ diff --git a/src/common/unix/common/utils/interface/nv_memory_tracker.h b/src/common/unix/common/utils/interface/nv_memory_tracker.h new file mode 100644 index 000000000..75f0c03fe --- /dev/null +++ b/src/common/unix/common/utils/interface/nv_memory_tracker.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_MEMORY_TRACKER_H__ +#define __NV_MEMORY_TRACKER_H__ + +#include "nv_list.h" + +#include /* size_t */ + +/* + * The following functions allocate and free memory, and track the + * allocations in a linked list, such that the includer can call + * nvMemoryTrackerPrintUnfreedAllocations() to print any leaked + * allocations. + */ + +void *nvMemoryTrackerTrackedAlloc(NVListPtr list, size_t size, + int line, const char *file); + +void *nvMemoryTrackerTrackedCalloc(NVListPtr list, size_t nmemb, size_t size, + int line, const char *file); + +void *nvMemoryTrackerTrackedRealloc(NVListPtr list, void *ptr, size_t size, + int line, const char *file); + +void nvMemoryTrackerTrackedFree(void *ptr); + +void nvMemoryTrackerPrintUnfreedAllocations(NVListPtr list); + +/* + * Users of nv_memory_tracker must provide implementations of the + * following helper functions. + */ +void *nvMemoryTrackerAlloc(size_t size); +void nvMemoryTrackerFree(void *ptr, size_t size); +void nvMemoryTrackerPrintf(const char *format, ...) + __attribute__((format (printf, 1, 2))); +void nvMemoryTrackerMemset(void *s, int c, size_t n); +void nvMemoryTrackerMemcpy(void *dest, const void *src, size_t n); + +#endif /* __NV_MEMORY_TRACKER_H__ */ diff --git a/src/common/unix/common/utils/interface/nv_mode_timings_utils.h b/src/common/unix/common/utils/interface/nv_mode_timings_utils.h new file mode 100644 index 000000000..72deb14f6 --- /dev/null +++ b/src/common/unix/common/utils/interface/nv_mode_timings_utils.h @@ -0,0 +1,135 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_MODE_TIMINGS_UTILS_H__ +#define __NV_MODE_TIMINGS_UTILS_H__ + +/* + * Utility code to operate on NvModeTimings and NVT_TIMINGs. + */ + +#include "nvtypes.h" + +#include "nv_mode_timings.h" +#include "timing/nvtiming.h" + +#include /* size_t */ + +/* + * Macros used for printing values divided by 1000 without floating + * point division and printing. + * + * Example: + * printf("pclk is %.2f MHz\n", pclk_khz / 1000.0f); + * becomes: + * printf("pclk is " NV_FMT_DIV_1000_POINT_2 " MHz\n", + * NV_VA_DIV_1000_POINT_2(pclk_khz)); + * + * Different precision controls the number of digits printed after the + * decimal point. Bias is added for correct rounding. + */ +#define NV_FMT_DIV_1000_POINT_1 "%d.%d" +#define NV_FMT_DIV_1000_POINT_2 "%d.%02d" +#define NV_VA_DIV_1000_POINT_1(x) \ + ((x) + 49) / 1000, (((x) + 49) % 1000) / 100 +#define NV_VA_DIV_1000_POINT_2(x) \ + ((x) + 4) / 1000, (((x) + 4) % 1000) / 10 + +/* + * macro to use integer math to convert an NvU32 kHz value to Hz; we + * add 500 Hz before dividing by 1000 to round rather than truncate. + */ + +#define NV_U32_KHZ_TO_HZ(_x) (((_x) + 500) / 1000) + +/* + * NVT_TIMING stores HVisible multiplied by the horizontal replication + * factor (e.g., a 720 mode with hrep=2 has HVisible of 1440). For + * reporting purposes, divide HVisible by hrep. + */ +static inline NvU16 NV_NVT_TIMING_HVISIBLE(const NVT_TIMING *pTiming) +{ + if (pTiming->etc.rep > 1) { + return pTiming->HVisible / pTiming->etc.rep; + } else { + return pTiming->HVisible; + } +} + +/* + * NVT_TIMING stores VVisible as half height when interlaced (e.g., + * 1920x1080i has VVisible 540). + */ +static inline NvU16 NV_NVT_TIMING_VVISIBLE(const NVT_TIMING *pTiming) +{ + return pTiming->VVisible * (pTiming->interlaced ? 2 : 1); +} + +/* + * When non-zero, NVT_TIMING::etc::aspect contains bytes 12, 13, and + * 14 from the Detailed Timing Definition of the EDID. This contains + * a packed width and height. The width and height is either an + * aspect ratio (16:9 or 4:3), or a physical image size in + * millimeters. See Table 3.21, and the subsequent notes, in the + * E-EDID 1.4 specification. + */ +static inline NvU16 NV_NVT_TIMING_IMAGE_SIZE_WIDTH(const NVT_TIMING *pTiming) +{ + return (pTiming->etc.aspect >> 16) & 0xFFFF; +} + +static inline NvU16 NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(const NVT_TIMING *pTiming) +{ + return pTiming->etc.aspect & 0xFFFF; +} + +static inline NvBool NV_NVT_TIMING_HAS_ASPECT_RATIO(const NVT_TIMING *pTiming) +{ + NvU16 w = NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming); + NvU16 h = NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming); + + return (((w == 16) && (h == 9)) || + ((w == 4) && (h == 3))); +} + +static inline NvBool NV_NVT_TIMING_HAS_IMAGE_SIZE(const NVT_TIMING *pTiming) +{ + return ((pTiming->etc.aspect != 0) && + !NV_NVT_TIMING_HAS_ASPECT_RATIO(pTiming)); +} + +NvBool IsEdid640x480_60_NVT_TIMING(const NVT_TIMING *pTiming); + +void NVT_TIMINGtoNvModeTimings(const NVT_TIMING *pTiming, + NvModeTimingsPtr pModeTimings); + +void nvBuildModeName(NvU16 width, NvU16 height, char *name, size_t nameLen); + +/* + * Users of nvBuildModeName() should provide an implementation of + * nvBuildModeNameSnprintf(). + */ +int nvBuildModeNameSnprintf(char *str, size_t size, const char *format, ...) + __attribute__((format (printf, 3, 4))); + +#endif /* __NV_MODE_TIMINGS_UTILS_H__ */ diff --git a/src/common/unix/common/utils/interface/nv_vasprintf.h b/src/common/unix/common/utils/interface/nv_vasprintf.h new file mode 100644 index 000000000..ec94beb51 --- /dev/null +++ b/src/common/unix/common/utils/interface/nv_vasprintf.h @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_VASPRINTF_H__ +#define __NV_VASPRINTF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/* + * nv_vasprintf() depends on nv_vasprintf_{alloc,free,vsnprintf}(). + * Those functions should be implemented by the user of + * nv_vasprintf(). + */ +void *nv_vasprintf_alloc(size_t size); +void nv_vasprintf_free(void *ptr); +int nv_vasprintf_vsnprintf(char *str, size_t size, + const char *format, va_list ap); + +char* nv_vasprintf(const char *f, va_list ap); + +/* + * NV_VSNPRINTF(): macro that assigns b using nv_vasprintf(); intended to + * be used by vararg printing functions. + * + * This macro allocates memory for b; the caller should free the + * memory when done. + */ + +#define NV_VSNPRINTF(b, f) do { \ + va_list ap; \ + va_start(ap, f); \ + (b) = nv_vasprintf(f, ap); \ + va_end(ap); \ +} while(0) + +#ifdef __cplusplus +}; +#endif + +#endif /* __NV_VASPRINTF_H__ */ diff --git a/src/common/unix/common/utils/interface/unix_rm_handle.h b/src/common/unix/common/utils/interface/unix_rm_handle.h new file mode 100644 index 000000000..d795fdac1 --- /dev/null +++ b/src/common/unix/common/utils/interface/unix_rm_handle.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __UNIX_RM_HANDLE_H__ +#define __UNIX_RM_HANDLE_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +#define NV_UNIX_RM_HANDLE_INITIAL_HANDLES 512 +#define NV_UNIX_RM_HANDLE_BITMAP_SIZE(_numHandles) ((_numHandles) >> 5) + +#if defined(DEBUG) +typedef struct _nv_unix_rm_handle_allocation *NVUnixRmHandleAllocationPtr; + +typedef struct _nv_unix_rm_handle_allocation { + const char *file; + int line; +} NVUnixRmHandleAllocationRec; +#endif + +typedef struct _nv_unix_rm_handle_allocator *NVUnixRmHandleAllocatorPtr; + +typedef struct _nv_unix_rm_handle_allocator { + NvU32 rmClient; + NvU32 clientData; + + NvU32 *bitmap; + NvU32 maxHandles; + +#if defined(DEBUG) + NVUnixRmHandleAllocationRec *allocationTable; +#endif +} NVUnixRmHandleAllocatorRec; + +NvBool nvInitUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 rmClient, NvU32 clientData); + +NvU32 nvGenerateUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator); +void nvFreeUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 UnixRmHandle); + +void nvTearDownUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator); + +#if defined(DEBUG) + +#define NV_UNIX_RM_HANDLE_DEBUG_ERROR 0 +#define NV_UNIX_RM_HANDLE_DEBUG_MSG 1 +#define NV_UNIX_RM_HANDLE_DEBUG_VERBOSE 2 + +/* + * Users of the handle generator need to provide implementations + * of nvUnixRmHandleDebugAssert() and nvUnixRmHandleLogMsg() + * in builds where DEBUG is defined. + */ +void nvUnixRmHandleDebugAssert(const char *expString, + const char *filenameString, + const char *funcString, + const unsigned lineNumber); +#define nvUnixRmHandleAssert(_exp) \ + do { \ + if (_exp) { \ + } else { \ + nvUnixRmHandleDebugAssert(#_exp, __FILE__, __FUNCTION__, __LINE__); \ + } \ + } while (0) + +void nvUnixRmHandleLogMsg(NvU32 level, const char *fmt, ...) __attribute__((format (printf, 2, 3))); + +NvU32 nvDebugGenerateUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, + const char *file, int line); +#define nvGenerateUnixRmHandle(s) \ + nvDebugGenerateUnixRmHandle((s), __FILE__, __LINE__) + +void nvDebugFreeUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, NvU32 handle); +#define nvFreeUnixRmHandle(n,s) nvDebugFreeUnixRmHandle((n), (s)) + +#else + +#define nvUnixRmHandleAssert(_exp) do {} while(0) +#define nvUnixRmHandleLogMsg(__fmt, ...) do {} while(0) + +#define nvGenerateUnixRmHandle(s) nvGenerateUnixRmHandleInternal((s)) +#define nvFreeUnixRmHandle(n, s) nvFreeUnixRmHandleInternal((n), (s)) + +#endif /* DEBUG */ + +/* + * Users of the handle generator always need to provide implementations + * of nvUnixRmHandleReallocMem(), and nvUnixRmHandleFreeMem(). + */ +void *nvUnixRmHandleReallocMem(void *oldPtr, NvLength newSize); +void nvUnixRmHandleFreeMem(void *ptr); + +#ifdef __cplusplus +}; +#endif + +#endif diff --git a/src/common/unix/common/utils/nv_memory_tracker.c b/src/common/unix/common/utils/nv_memory_tracker.c new file mode 100644 index 000000000..7b467d9ad --- /dev/null +++ b/src/common/unix/common/utils/nv_memory_tracker.c @@ -0,0 +1,230 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if defined(DEBUG) + +#include "nv_memory_tracker.h" + +/* + * Define NV_MEMORY_TRACKER_BACKTRACES in the including makefile to enable + * backtrace capture/reporting for memory leaks. E.g., + * NV_DEFINES += NV_MEMORY_TRACKER_BACKTRACES + * Note that this probably only works with glibc (backtrace() and friends are + * GNU extensions). + */ + +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + #include /* backtrace() and backtrace_symbols() */ + #include /* free(3) */ + #define MAX_BACKTRACE_DEPTH 30 +#endif + + +typedef union { + struct { + NVListRec entry; + const char *file; + int line; + size_t size; +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + void *backtrace[MAX_BACKTRACE_DEPTH]; + int backtraceSize; +#endif + } header; + /* + * Unused. For alignment purposes only. Guarantee alignment to + * twice pointer size. That is the alignment guaranteed by glibc: + * http://www.gnu.org/software/libc/manual/html_node/Aligned-Memory-Blocks.html + * which seems reasonable to match here. + */ + NvU8 align __attribute__((aligned(sizeof(void*) * 2))); +} NvMemoryAllocation; + + +static void PrintAllocationBacktrace(const NvMemoryAllocation *alloc) +{ +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + char **symbols; + const int numSymbols = alloc->header.backtraceSize; + int j; + + symbols = backtrace_symbols(alloc->header.backtrace, numSymbols); + + if (symbols == NULL) { + return; + } + + nvMemoryTrackerPrintf("Allocation context:"); + + for (j = 0; j < numSymbols; j++) { + if (symbols[j] == NULL) { + continue; + } + + nvMemoryTrackerPrintf("#%-2d %s", j, symbols[j]); + } + free(symbols); +#endif +} + + +static void RegisterAllocation(NVListPtr list, NvMemoryAllocation *alloc, + const char *file, int line, size_t size) +{ + nvListAdd(&alloc->header.entry, list); + + alloc->header.file = file; + alloc->header.line = line; + alloc->header.size = size; + +#if defined(NV_MEMORY_TRACKER_BACKTRACES) + /* Record the backtrace at this point (only addresses, not symbols) */ + alloc->header.backtraceSize = + backtrace(alloc->header.backtrace, MAX_BACKTRACE_DEPTH); +#endif +} + + +static NvBool IsAllocationSane(NvMemoryAllocation *alloc) +{ + NVListPtr entry = &alloc->header.entry; + if (entry->prev->next != entry || entry->next->prev != entry) { + /* + * This will likely have already crashed, but we might as well + * report it if we can. + */ + nvMemoryTrackerPrintf("Attempted to free untracked memory %p!", + alloc + 1); + return NV_FALSE; + } + return NV_TRUE; +} + + +static void UnregisterAllocation(NvMemoryAllocation *alloc) +{ + if (!IsAllocationSane(alloc)) { + return; + } + + nvListDel(&alloc->header.entry); +} + + +void *nvMemoryTrackerTrackedAlloc(NVListPtr list, size_t size, + int line, const char *file) +{ + NvMemoryAllocation *alloc = nvMemoryTrackerAlloc(sizeof(*alloc) + size); + + if (alloc == NULL) { + return NULL; + } + + RegisterAllocation(list, alloc, file, line, size); + + return alloc + 1; +} + + +void *nvMemoryTrackerTrackedCalloc(NVListPtr list, size_t nmemb, size_t size, + int line, const char *file) +{ + size_t totalSize = size * nmemb; + void *ptr = nvMemoryTrackerTrackedAlloc(list, totalSize, line, file); + + if (ptr == NULL) { + return NULL; + } + + nvMemoryTrackerMemset(ptr, 0, totalSize); + + return ptr; +} + + +void *nvMemoryTrackerTrackedRealloc(NVListPtr list, void *ptr, size_t size, + int line, const char *file) +{ + NvMemoryAllocation *oldAlloc = NULL; + void *newptr; + + if (ptr == NULL) { + /* realloc with a ptr of NULL is equivalent to malloc. */ + return nvMemoryTrackerTrackedAlloc(list, size, line, file); + } + + if (size == 0) { + /* realloc with a size of 0 is equivalent to free. */ + nvMemoryTrackerTrackedFree(ptr); + return NULL; + } + + oldAlloc = ((NvMemoryAllocation *) ptr) - 1; + newptr = nvMemoryTrackerTrackedAlloc(list, size, line, file); + + if (newptr != NULL) { + nvMemoryTrackerMemcpy(newptr, ptr, NV_MIN(size, oldAlloc->header.size)); + nvMemoryTrackerTrackedFree(ptr); + } + + return newptr; +} + + +void nvMemoryTrackerTrackedFree(void *ptr) +{ + NvMemoryAllocation *alloc; + size_t size; + + if (ptr == NULL) { + return; + } + + alloc = ((NvMemoryAllocation *) ptr) - 1; + + UnregisterAllocation(alloc); + + size = alloc->header.size + sizeof(NvMemoryAllocation); + + /* Poison the memory. */ + nvMemoryTrackerMemset(alloc, 0x55, size); + + nvMemoryTrackerFree(alloc, size); +} + + +void nvMemoryTrackerPrintUnfreedAllocations(NVListPtr list) +{ + NvMemoryAllocation *iter; + + nvListForEachEntry(iter, list, header.entry) { + nvMemoryTrackerPrintf("Unfreed allocation: %18p (size: %5u) (%s:%d)", + iter + 1, + (unsigned int) iter->header.size, + iter->header.file, + iter->header.line); + PrintAllocationBacktrace(iter); + } +} + +#endif /* defined(DEBUG) */ diff --git a/src/common/unix/common/utils/nv_mode_timings_utils.c b/src/common/unix/common/utils/nv_mode_timings_utils.c new file mode 100644 index 000000000..26c0197d7 --- /dev/null +++ b/src/common/unix/common/utils/nv_mode_timings_utils.c @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv_mode_timings_utils.h" + +/* + * Check if this NVT_TIMING is the 640x480@60Hz Industry standard + * mode; but due to the lack of precision of the pclk field in the + * NVT_TIMING data structure, is not quite correct: pclk should be + * 2517.5, and rrx1k should be 59940. See bug 263631. + * + * Note that we check if rrx1k is either 60000 or 59940 because we may + * use this function immediately after receiving the NVT_TIMINGs from + * the EDID and patch rrx1k, or we may use this function later from + * NVT_TIMINGtoNvModeTimings(), at which point we'll have already + * patched rrx1k. + */ + +NvBool IsEdid640x480_60_NVT_TIMING(const NVT_TIMING *pTiming) +{ + return ((pTiming->pclk == 2518) && + (pTiming->HVisible == 640) && + (pTiming->VVisible == 480) && + (pTiming->HTotal == 800) && + (pTiming->HFrontPorch == 16) && + (pTiming->HSyncWidth == 96) && + (pTiming->VTotal == 525) && + (pTiming->VFrontPorch == 10) && + (pTiming->VSyncWidth == 2) && + (pTiming->HBorder == 0) && + (pTiming->VBorder == 0) && + (pTiming->HSyncPol == NVT_H_SYNC_NEGATIVE) && + (pTiming->VSyncPol == NVT_V_SYNC_NEGATIVE) && + (pTiming->interlaced == 0) && + ((pTiming->etc.flag & + NVT_FLAG_NV_DOUBLE_SCAN_TIMING) == 0) && + ((pTiming->etc.rrx1k == 60000) || + (pTiming->etc.rrx1k == 59940))); +} + +/* + * Convert from NVT_TIMING to NvModeTimings; this is a safe operation + * to perform because NvModeTimings has higher precision (pixelclockHz + * in Hz, and vertical values doubled for interlaced) than NVT_TIMING + */ + +void NVT_TIMINGtoNvModeTimings(const NVT_TIMING *pTiming, + NvModeTimingsPtr pModeTimings) +{ + char *bytePtr = (char *)pModeTimings; + size_t i; + + for (i = 0; i < sizeof(*pModeTimings); i++) { + bytePtr[i] = 0; + } + + pModeTimings->RRx1k = pTiming->etc.rrx1k; + + /* pTiming->pclk is in 10*kHz; pModeTimings->pixelClockHz is in Hz */ + + pModeTimings->pixelClockHz = KHzToHz(pTiming->pclk) * 10; + + pModeTimings->hVisible = pTiming->HVisible; + pModeTimings->hSyncStart = pTiming->HFrontPorch + pTiming->HVisible; + pModeTimings->hSyncEnd = + pTiming->HFrontPorch + pTiming->HVisible + pTiming->HSyncWidth; + pModeTimings->hTotal = pTiming->HTotal; + + pModeTimings->vVisible = pTiming->VVisible; + pModeTimings->vSyncStart = pTiming->VFrontPorch + pTiming->VVisible; + pModeTimings->vSyncEnd = + pTiming->VFrontPorch + pTiming->VVisible + pTiming->VSyncWidth; + pModeTimings->vTotal = pTiming->VTotal; + + pModeTimings->interlaced = pTiming->interlaced; + pModeTimings->doubleScan = + !!(pTiming->etc.flag & NVT_FLAG_NV_DOUBLE_SCAN_TIMING); + + /* + * pTiming stores vertical values divided by two when interlaced; so + * double the vertical values in pModeTimings + */ + + if (pModeTimings->interlaced) { + pModeTimings->vVisible *= 2; + pModeTimings->vSyncStart *= 2; + pModeTimings->vSyncEnd *= 2; + pModeTimings->vTotal *= 2; + } + + /* + * pTiming: 0 is positive, 1 is negative + * pModeTimings: FALSE is positive, TRUE is negative + */ + + if (pTiming->HSyncPol == NVT_H_SYNC_POSITIVE) { + pModeTimings->hSyncNeg = NV_FALSE; + } else { + pModeTimings->hSyncNeg = NV_TRUE; + } + + if (pTiming->VSyncPol == NVT_V_SYNC_POSITIVE) { + pModeTimings->vSyncNeg = NV_FALSE; + } else { + pModeTimings->vSyncNeg = NV_TRUE; + } + + pModeTimings->hSyncPos = !pModeTimings->hSyncNeg; + pModeTimings->vSyncPos = !pModeTimings->vSyncNeg; + + /* + * Save any physical size information for this mode from the + * Detailed Timing Definition of the EDID. + */ + if (NV_NVT_TIMING_HAS_IMAGE_SIZE(pTiming)) { + pModeTimings->sizeMM.w = NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming); + pModeTimings->sizeMM.h = NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming); + } + + /* + * XXX work around lack of precision in NVT_TIMING: catch the + * 640x480@60Hz EDID mode and patch pixelClockHz and RRx1k. + */ + + if (IsEdid640x480_60_NVT_TIMING(pTiming)) { + pModeTimings->RRx1k = 59940; + pModeTimings->pixelClockHz = 25175000; + } +} + + +/*! + * Build a mode name, of the format 'WIDTHxHEIGHT'. + */ +void nvBuildModeName(NvU16 width, NvU16 height, char *name, size_t nameLen) +{ + nvBuildModeNameSnprintf(name, nameLen, "%dx%d", width, height); + name[nameLen - 1] = '\0'; +} diff --git a/src/common/unix/common/utils/nv_vasprintf.c b/src/common/unix/common/utils/nv_vasprintf.c new file mode 100644 index 000000000..390ad9f45 --- /dev/null +++ b/src/common/unix/common/utils/nv_vasprintf.c @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2003-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv_vasprintf.h" + +/* + * nv_vasprintf(): function that returns a string using vsnprintf(); intended to + * be used by vararg printing functions. This is supposedly correct + * for differing semantics of vsnprintf() in different versions of + * glibc: + * + * different semantics of the return value from (v)snprintf: + * + * -1 when the buffer is not long enough (glibc < 2.1) + * + * or + * + * the length the string would have been if the buffer had been large + * enough (glibc >= 2.1) + * + * This function allocates memory for the returned string; the caller should use + * free() the memory when done. + * + * The includer should implement nv_vasprintf_{alloc,free,vsnprintf}. + */ + +#define __NV_VASPRINTF_LEN 64 + +char* nv_vasprintf(const char *f, va_list ap) +{ + int len, current_len = __NV_VASPRINTF_LEN; + char *b = (char *)nv_vasprintf_alloc(current_len); + + while (b) { + va_list tmp_ap; + + va_copy(tmp_ap, ap); + len = nv_vasprintf_vsnprintf(b, current_len, f, tmp_ap); + va_end(tmp_ap); + + if ((len > -1) && (len < current_len)) { + break; + } else if (len > -1) { + current_len = len + 1; + } else { + current_len += __NV_VASPRINTF_LEN; + } + + nv_vasprintf_free(b); + b = (char *)nv_vasprintf_alloc(current_len); + } + + return b; +} diff --git a/src/common/unix/common/utils/unix_rm_handle.c b/src/common/unix/common/utils/unix_rm_handle.c new file mode 100644 index 000000000..ed6ebbfb5 --- /dev/null +++ b/src/common/unix/common/utils/unix_rm_handle.c @@ -0,0 +1,385 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains functions for dealing with dynamic allocation and + * management of resource handles. + * + * Note that dynamic handles are not suitable for all use cases. If a + * handle is placed in the pushbuffer, and the pushbuffer will be + * replayed during channel recovery, the handle value must be kept + * constant. For such handles, use an invariant handle value. + * + * We keep a bitmap of which handles we've used. + * + * Composition of an object handle: + * [31:16] Client data + * [15:00] Handle constant + */ + +#include + +#include "unix_rm_handle.h" + +#define INVALID_HANDLE 0 +#define UNIX_RM_HANDLE_CLIENT_DATA_SHIFT 16 + +/* Mask to AND only client data */ +#define CLIENT_DATA_MASK ((~(NvU32)0) << UNIX_RM_HANDLE_CLIENT_DATA_SHIFT) +/* Mask to AND off client data */ +#define HANDLE_MASK (~(CLIENT_DATA_MASK)) +/* Handle 0 is reserved, so subtract one from a handle to get its index */ +#define HANDLE_INDEX(_handle) (((_handle) - 1) & HANDLE_MASK) + +/* Bits to OR in for client data */ +#define GET_CLIENT_DATA_BITS(_data) \ + (((_data) << UNIX_RM_HANDLE_CLIENT_DATA_SHIFT)) + +#define DWORD_FROM_HANDLE(_handle) (HANDLE_INDEX(_handle) >> 5) +#define BIT_FROM_HANDLE(_handle) (HANDLE_INDEX(_handle) & 0x1f) + +/* Check if a handle is used */ +#define USED(_bitmap, _handle) \ + ((_bitmap)[DWORD_FROM_HANDLE(_handle)] & (1U << BIT_FROM_HANDLE(_handle))) +/* Reserve a handle in the bitmap */ +#define RESERVE(_bitmap, _handle) \ + ((_bitmap)[DWORD_FROM_HANDLE(_handle)] |= (1U << BIT_FROM_HANDLE(_handle))) +/* Unreserve a handle in the bitmap */ +#define UNRESERVE(_bitmap, _handle) \ + ((_bitmap)[DWORD_FROM_HANDLE(_handle)] &= (~(1U << BIT_FROM_HANDLE(_handle)))) + +#if defined(DEBUG) +static void +nvReportUnfreedUnixRmHandleAllocations(NVUnixRmHandleAllocatorPtr pAllocator); +#endif + + +static void UnixRmHandleMemset(void *ptr, char data, NvLength size) +{ + char *byte = (char *)ptr; + NvLength i; + + for (i = 0; i < size; i++) { + byte[i] = data; + } +} + +static NvBool UnixRmHandleReallocBitmap(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 newMaxHandles) +{ + NvU32 *newBitmap; +#if defined(DEBUG) + NVUnixRmHandleAllocationPtr newAllocationTable; +#endif /* defined(DEBUG) */ + const NvLength newMemSize = NV_UNIX_RM_HANDLE_BITMAP_SIZE(newMaxHandles) * + sizeof(*newBitmap); + const NvU32 oldBitmapSize = + NV_UNIX_RM_HANDLE_BITMAP_SIZE(pAllocator->maxHandles); + + /* New handle limit must fit in the bitmask */ + if (newMaxHandles > GET_CLIENT_DATA_BITS(1)) { + return NV_FALSE; + } + + /* New handle limit must be a power of 2 */ + nvUnixRmHandleAssert(!(newMaxHandles & (newMaxHandles - 1))); + + newBitmap = (NvU32 *)nvUnixRmHandleReallocMem(pAllocator->bitmap, newMemSize); + + if (!newBitmap) { + return NV_FALSE; + } + + UnixRmHandleMemset(&newBitmap[oldBitmapSize], 0, + newMemSize - (oldBitmapSize * sizeof(*newBitmap))); + pAllocator->bitmap = newBitmap; + +#if defined(DEBUG) + newAllocationTable = + (NVUnixRmHandleAllocationPtr) + nvUnixRmHandleReallocMem(pAllocator->allocationTable, + newMaxHandles * + sizeof(*pAllocator->allocationTable)); + + if (!newAllocationTable) { + /* + * Leave the new bitmap allocation in place. If that realloc + * succeeded, the old bitmap allocation is gone, and it is at + * least big enough to hold the old pAllocator->maxHandles, + * since a shrinking of the allocation table shouldn't have + * failed, and maxHandles currently never decreases anyway. + */ + nvUnixRmHandleAssert(newMaxHandles >= pAllocator->maxHandles); + + return NV_FALSE; + } + + pAllocator->allocationTable = newAllocationTable; +#endif /* defined(DEBUG) */ + + pAllocator->maxHandles = newMaxHandles; + + return NV_TRUE; +} + +NvBool nvInitUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 rmClient, NvU32 clientData) +{ + nvUnixRmHandleAssert(pAllocator != NULL && + rmClient != 0 && clientData != 0); + nvUnixRmHandleAssert((clientData & 0x0000FFFF) == clientData); + + UnixRmHandleMemset(pAllocator, 0, sizeof(*pAllocator)); + + pAllocator->rmClient = rmClient; + pAllocator->clientData = clientData; + + if (!UnixRmHandleReallocBitmap(pAllocator, + NV_UNIX_RM_HANDLE_INITIAL_HANDLES)) { + nvUnixRmHandleAssert(!"Failed to init RM handle allocator bitmap"); + nvTearDownUnixRmHandleAllocator(pAllocator); + + return NV_FALSE; + } + + /* + * If the RM-provided client handle falls within the allocator range + * then reserve it up-front. + */ + if ((pAllocator->rmClient & CLIENT_DATA_MASK) == + GET_CLIENT_DATA_BITS(pAllocator->clientData)) { + NvU32 handleId = pAllocator->rmClient & HANDLE_MASK; + + if ((handleId <= pAllocator->maxHandles) && + (handleId != INVALID_HANDLE)) { + RESERVE(pAllocator->bitmap, handleId); + } + } + + return NV_TRUE; +} + +/* + * nvGenerateUnixRmHandleInternal() + * Return a unique, random handle. Be sure to free the handle + * when you're done with it! Returns 0 if we run out of handles. + */ +NvU32 nvGenerateUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator) +{ + NvU32 handleId; + NvU32 handle; + + nvUnixRmHandleAssert(pAllocator != NULL && + pAllocator->rmClient != 0 && + pAllocator->clientData != 0); + + /* Find free handle */ + handleId = 1; + while (USED(pAllocator->bitmap, handleId) && + (handleId <= pAllocator->maxHandles)) { + handleId++; + } + + if (handleId > pAllocator->maxHandles) { + if (!UnixRmHandleReallocBitmap(pAllocator, pAllocator->maxHandles * 2)) { + nvUnixRmHandleAssert(!"Failed to grow RM handle allocator bitmap"); + return INVALID_HANDLE; + } + } + + nvUnixRmHandleAssert(!USED(pAllocator->bitmap, handleId)); + + RESERVE(pAllocator->bitmap, handleId); + + handle = GET_CLIENT_DATA_BITS(pAllocator->clientData) | handleId; + + nvUnixRmHandleAssert(handle != pAllocator->rmClient); + + return handle; +} + +/* + * nvFreeUnixRmHandleInternal() + * Mark the handle passed in as free in the bitmap. + */ +void nvFreeUnixRmHandleInternal(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 unixHandle) +{ + NvU32 handle = unixHandle & HANDLE_MASK; + + if (!unixHandle) { + return; + } + + nvUnixRmHandleAssert(pAllocator != NULL && + pAllocator->rmClient != 0 && pAllocator->clientData != 0); + + nvUnixRmHandleAssert(USED(pAllocator->bitmap, handle)); + + UNRESERVE(pAllocator->bitmap, handle); +} + +/* + * This function just makes sure we freed all of the handles we allocated, for + * debugging purposes. + */ +void nvTearDownUnixRmHandleAllocator(NVUnixRmHandleAllocatorPtr pAllocator) +{ + if (pAllocator == NULL) { + return; + } + + /* + * If the RM-provided client handle falls within the allocator range, + * then it is reserved up-front. so make sure that it is get unreserved + * before teardown. + */ + if ((pAllocator->rmClient & CLIENT_DATA_MASK) == + GET_CLIENT_DATA_BITS(pAllocator->clientData)) { + NvU32 handleId = pAllocator->rmClient & HANDLE_MASK; + + if ((handleId <= pAllocator->maxHandles) && + (handleId != INVALID_HANDLE)) { + UNRESERVE(pAllocator->bitmap, handleId); + } + } + +#if defined(DEBUG) + nvReportUnfreedUnixRmHandleAllocations(pAllocator); + nvUnixRmHandleFreeMem(pAllocator->allocationTable); +#endif + + nvUnixRmHandleFreeMem(pAllocator->bitmap); + + UnixRmHandleMemset(pAllocator, 0, sizeof(*pAllocator)); +} + +/* + * Handle allocation tracking code; in a debug build, the below + * functions wrap the actual allocation functions above. + */ + +#if defined(DEBUG) + +#define UNIX_RM_HANDLE_ALLOC_LABEL "NVIDIA UNIX RM HANDLE TRACKER: " + +static NVUnixRmHandleAllocationPtr +FindUnixRmHandleAllocation(NVUnixRmHandleAllocatorPtr pAllocator, NvU32 handle) +{ + if (((handle & HANDLE_MASK) == INVALID_HANDLE) || + ((handle & HANDLE_MASK) > pAllocator->maxHandles)) { + return NULL; + } + + return &pAllocator->allocationTable[HANDLE_INDEX(handle)]; +} + +static void RecordUnixRmHandleAllocation(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 handle, const char *file, int line) +{ + /* Find a free allocation table slot. */ + NVUnixRmHandleAllocationPtr alloc = FindUnixRmHandleAllocation(pAllocator, handle); + + if (!alloc) { + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_ERROR, + UNIX_RM_HANDLE_ALLOC_LABEL + "NVUnixRmHandleAllocator is corrupted." + "(table entry not found for handle)"); + return; + } + + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_VERBOSE, + UNIX_RM_HANDLE_ALLOC_LABEL + "Recording handle allocation: 0x%08x (%s:%d)", + handle, file, line); + + alloc->file = file; + alloc->line = line; +} + +static void FreeUnixRmHandleAllocation(NVUnixRmHandleAllocatorPtr pAllocator, + NvU32 handle) +{ + NVUnixRmHandleAllocationPtr alloc = + FindUnixRmHandleAllocation(pAllocator, handle); + + if (!alloc) { + return; + } + + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_VERBOSE, + UNIX_RM_HANDLE_ALLOC_LABEL + "Freeing handle allocation: 0x%08x (%s:%d)", + handle, alloc->file, alloc->line); + + UnixRmHandleMemset(alloc, 0, sizeof(*alloc)); +} + + +NvU32 +nvDebugGenerateUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, + const char *file, int line) +{ + NvU32 handle = nvGenerateUnixRmHandleInternal(pAllocator); + + RecordUnixRmHandleAllocation(pAllocator, handle, file, line); + return handle; +} + +void nvDebugFreeUnixRmHandle(NVUnixRmHandleAllocatorPtr pAllocator, NvU32 handle) +{ + if (!handle) { + return; + } + + FreeUnixRmHandleAllocation(pAllocator, handle); + + nvFreeUnixRmHandleInternal(pAllocator, handle); +} + +void nvReportUnfreedUnixRmHandleAllocations(NVUnixRmHandleAllocatorPtr pAllocator) +{ + NvU32 handleId; + + for (handleId = 1; handleId <= pAllocator->maxHandles; handleId++) { + if (USED(pAllocator->bitmap, handleId)) { + + NVUnixRmHandleAllocationPtr alloc = + FindUnixRmHandleAllocation(pAllocator, handleId); + + if (alloc == NULL) { + continue; + } + + nvUnixRmHandleLogMsg(NV_UNIX_RM_HANDLE_DEBUG_MSG, + UNIX_RM_HANDLE_ALLOC_LABEL + "Unfreed handle ID allocation: 0x%08x (%s:%d)", + handleId, + alloc->file, + alloc->line); + } + } +} + +#endif /* DEBUG */ + diff --git a/src/common/uproc/os/libos-v2.0.0/debug/elf.c b/src/common/uproc/os/libos-v2.0.0/debug/elf.c new file mode 100644 index 000000000..451053c84 --- /dev/null +++ b/src/common/uproc/os/libos-v2.0.0/debug/elf.c @@ -0,0 +1,187 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifdef NVRM + +# include +# define memcpy(d, s, l) portMemCopy(d, l, s, l) +# define strcmp(str1, str2) portStringCompare(str1, str2, 0x1000) + +#else // NVRM + +# include +# include +# include + +#endif // NVRM + +#include "elf.h" +#include "nvtypes.h" + +/** + * + * @brief Find the start and end of the ELF section in memory + * from the section name. + * + * @param[in] elf + * @param[in] sectionName + * The section to find such as .text or .data + * @param[out] start, end + * The start and end of the section in the loaded ELF file + * e.g validPtr >= start && validPtr < end + * @param[out] va_baase + * The virtual address this section is loaded at + */ +NvBool +libosElfFindSectionByName(elf64_header *elf, const char *targetName, NvU8 **start, NvU8 **end, NvU64 *va_base) +{ + elf64_shdr *shdr = (elf64_shdr *)(((char *)elf) + elf->shoff); + const char *string_base = ((char *)elf) + shdr[elf->shstrndx].offset; + NvU32 i; + + for (i = 0; i < elf->shnum; i++, shdr++) + { + const char *name = string_base + shdr->name; + if (strcmp(name, targetName) == 0) + { + *start = (NvU8 *)elf + shdr->offset; + *end = (NvU8 *)elf + shdr->offset + shdr->size; + *va_base = shdr->addr; + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/** + * + * @brief Find the start and end of the ELF section in memory + * from the section name. + * + * @param[in] elf + * @param[in] sectionName + * The section to find such as .text or .data + * @param[out] start, end + * The start and end of the section in the loaded ELF file + * e.g validPtr >= start && validPtr < end + * @param[out] va_base + * The virtual address this section is loaded at + */ +NvBool +libosElfFindSectionByAddress(elf64_header *elf, NvU64 address, NvU8 **start, NvU8 **end, NvU64 *va_base) +{ + elf64_shdr *shdr = (elf64_shdr *)(((char *)elf) + elf->shoff); + NvU32 i; + + for (i = 0; i < elf->shnum; i++, shdr++) + { + if (address >= shdr->addr && address < (shdr->addr + shdr->size)) + { + *start = (NvU8 *)elf + shdr->offset; + *end = (NvU8 *)elf + shdr->offset + shdr->size; + *va_base = shdr->addr; + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/** + * + * @brief Reads an arbitrary sized block of memory by loaded VA through + * the ELF. This can be used to read data from the perspective + * of a processor who has loaded the ELF. + * + * @param[in] elf + * @param[in] address + * The absolute virtual address to read + * @param[out] size + * The number of bytes that must be valid. + * @returns + * The pointer to the data in question, or NULL if the operation failed) + */ +void *libosElfReadVirtual(elf64_header *elf, NvU64 address, NvU64 size) +{ + NvU8 *start, *end; + NvU64 va_base; + NvU64 section_offset; + NvU64 section_offset_tail; + + // @todo This really should be using the PHDR as theoretically section headers + // might be stripped + if (!libosElfFindSectionByAddress(elf, address, &start, &end, &va_base)) + return 0; + + section_offset = address - va_base; + + // Compute section offset (overflow check) + section_offset_tail = section_offset + size; + if (section_offset_tail < section_offset) + return 0; + + // Bounds check the tail + if (section_offset_tail > (NvLength)(end - start)) + return 0; + + return (address - va_base) + start; +} + +/** + * + * @brief Reads an arbitrary length string by loaded VA through + * the ELF. This can be used to read data from the perspective + * of a processor who has loaded the ELF. + * + * @param[in] elf + * @param[in] address + * The absolute virtual address to read + * @returns + * The pointer to the data in question, or NULL if the operation failed) + * Ensures that all bytes of the string lie within the ELF same section. + */ +const char *libosElfReadStringVirtual(elf64_header *elf, NvU64 address) +{ + NvU8 *start, *end; + NvU64 base; + NvU8 *region; + NvU8 *i; + + // @todo This really should be using the PHDR as theoretically section headers + // might be stripped + if (!libosElfFindSectionByAddress(elf, address, &start, &end, &base)) + return 0; + + region = (address - base) + start; + i = region; + + while (i >= start && i < end) + { + if (!*i) + return (const char *)region; + i++; + } + + return 0; +} diff --git a/src/common/uproc/os/libos-v2.0.0/debug/elf.h b/src/common/uproc/os/libos-v2.0.0/debug/elf.h new file mode 100644 index 000000000..58261c5a8 --- /dev/null +++ b/src/common/uproc/os/libos-v2.0.0/debug/elf.h @@ -0,0 +1,107 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef GSP_ELF_H_ +#define GSP_ELF_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// Structures for Loader +typedef struct +{ + NvU8 ident[16]; + NvU16 type; + NvU16 machine; + NvU32 version; + NvU64 entry; + NvU64 phoff; + NvU64 shoff; + NvU32 flags; + NvU16 ehsize; + NvU16 phentsize; + NvU16 phnum; + NvU16 shentsize; + NvU16 shnum; + NvU16 shstrndx; +} elf64_header; + +typedef struct +{ + NvU32 type; + NvU32 flags; + NvU64 offset; + NvU64 vaddr; + NvU64 paddr; + NvU64 filesz; + NvU64 memsz; + NvU64 align; +} elf64_phdr; + +#define PF_X 1 +#define PF_W 2 +#define PF_R 4 + +#define PT_LOAD 1 + +// Structures for finding Symbols +typedef struct +{ + NvU32 name; + NvU32 type; + NvU64 flags; + NvU64 addr; + NvU64 offset; + NvU64 size; + NvU32 link; + NvU32 info; + NvU64 addralign; + NvU64 entsize; +} elf64_shdr; + +typedef struct +{ + NvU32 name; + NvU8 info; + NvU8 other; + NvU16 shndx; + NvU64 value; + NvU64 size; +} elf64_sym; + +// Core ELF API +NvBool libosElfFindSectionByName( + elf64_header *elf, const char *sectionName, NvU8 **start, NvU8 **end, NvU64 *va_base); +NvBool +libosElfFindSectionByAddress(elf64_header *elf, NvU64 address, NvU8 **start, NvU8 **end, NvU64 *va_base); +void *libosElfReadVirtual(elf64_header *elf, NvU64 address, NvU64 size); +const char *libosElfReadStringVirtual(elf64_header *elf, NvU64 address); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/common/uproc/os/libos-v2.0.0/debug/lines.c b/src/common/uproc/os/libos-v2.0.0/debug/lines.c new file mode 100644 index 000000000..3a43f3230 --- /dev/null +++ b/src/common/uproc/os/libos-v2.0.0/debug/lines.c @@ -0,0 +1,885 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifdef NVRM + +# include +# include +# define printf(fmt, ...) nv_printf(LEVEL_ERROR, fmt, ##__VA_ARGS__) + +#else // NVRM + +# include +# include +# include +# include + +# define portMemCopy(d, l, s, m) memcpy(d, s, l) +# define portMemSet(d, v, l) memset(d, v, l) +# define portStringCompare(str1, str2, l) strcmp(str1, str2) +# define portMemAllocNonPaged(l) malloc(l); +# define portMemFree(p) free(p); + +#endif // NVRM + +#include "nvtypes.h" +#include "lines.h" + +typedef struct +{ + const NvU8 *buffer; + NvU64 offset; + NvU64 size; +} DwarfStream; + +static void libosDwarfBuildTables(libosDebugResolver *pThis); +static void dwarfBuildARangeTable(libosDebugResolver *pThis); +static void dwarfSetARangeTableLineUnit(libosDebugResolver *pThis, DwarfStream unit, NvU64 address); + +// http://www.dwarfstd.org/doc/dwarf-2.0.0.pdf + +// Debug Line information related structures +// (for branch history and call stacks) +enum +{ + DW_LNS_extended_operation = 0, + DW_LNS_copy = 1, + DW_LNS_advance_pc = 2, + DW_LNS_advance_line = 3, + DW_LNS_set_file = 4, + DW_LNS_set_column = 5, + DW_LNS_negate_stmt = 6, + DW_LNS_set_basic_block = 7, + DW_LNS_const_add_pc = 8, + DW_LNS_fixed_advance_pc = 9, + DW_LNS_set_prologue_end = 10, + DW_LNS_set_epilogue_begin = 11, +}; + +enum +{ + DW_LNE_end_sequence = 1, + DW_LNE_set_address = 2, + DW_LNE_define_file = 3, + DW_LNE_set_discriminator = 4, +}; + +/** + * + * @brief Creates a resolver object for a given ELF. + * No resources or memory are retained by this call. + * + * @param[in] elf + * An elf containing .debug_line and or .symtab data + * @param[in] pThis + * An uninitialized resolver object. + */ +void libosDebugResolverConstruct(libosDebugResolver *pThis, elf64_header *elf) +{ + NvU64 vabase; + libosElfFindSectionByName(elf, ".debug_line", &pThis->debugLineStart, &pThis->debugLineEnd, &vabase); + libosElfFindSectionByName( + elf, ".debug_aranges", &pThis->debugARangesStart, &pThis->debugARangesEnd, &vabase); + libosElfFindSectionByName(elf, ".symtab", &pThis->symtabStart, &pThis->symtabEnd, &vabase); + libosElfFindSectionByName(elf, ".strtab", &pThis->strtabStart, &pThis->strtabEnd, &vabase); + libosDwarfBuildTables(pThis); +} + +void libosDebugResolverDestroy(libosDebugResolver *pThis) +{ + if (pThis->arangeTable != NULL) + { + portMemFree(pThis->arangeTable); + pThis->arangeTable = NULL; + } +} + +/** + * + * @brief Helper method to resolve symbol name to VA + * + * @param[in] pThis + * An initialized resolver object. + * @param[in] symbolName + * Name of a symbol (code or data) for lookup. + * @param[out] address + * The virtual address for the given symbol.\ + */ +NvBool libosDebugResolveSymbolToVA(libosDebugResolver *pThis, const char *symbolName, NvU64 *address) +{ + elf64_sym *i = (elf64_sym *)pThis->symtabStart; + + NvU64 count = (pThis->symtabEnd - pThis->symtabStart) / sizeof(elf64_sym); + while (count--) + { + if (i->name != 0) + { + if (!portStringCompare(i->name + (const char *)pThis->strtabStart, symbolName, 0x1000)) + { + *address = i->value; + return NV_TRUE; + } + } + i++; + } + return NV_FALSE; +} + +/** + * + * @brief Helper method to resolve symbol VA back to name + * @note This will match on addresses within a sized symbol (!) + * + * @param[in] pThis + * An initialized resolver object. + * @param[in] symbolAddress + * An address for which we want either + * (1) The symbol that exactly resolves to this address + * (2) The symbol that contains this address + * @param[out] offset + * Offsets are returned if the match is within a symbol. + * @param[out] name + * The symbol name containing or matching the search address + */ +NvBool libosDebugResolveSymbolToName( + libosDebugResolver *pThis, NvU64 symbolAddress, const char **name, NvU64 *offset) +{ + elf64_sym *i = (elf64_sym *)pThis->symtabStart; + NvU64 count = (pThis->symtabEnd - pThis->symtabStart) / sizeof(elf64_sym); + while (count--) + { + if (symbolAddress == i->value || (symbolAddress >= i->value && symbolAddress < i->value + i->size)) + { + if (i->name) + *name = i->name + (const char *)pThis->strtabStart; + else + *name = ""; + *offset = symbolAddress - i->value; + return NV_TRUE; + } + i++; + } + return NV_FALSE; +} + +/** + * + * @brief Helper method to look up the symbol for a VA and return the VA range + * that symbol covers. + * @note This will match on addresses within a sized symbol (!) + * + * @param[in] pThis + * An initialized resolver object. + * @param[in] symbolAddress + * An address for which we want either + * (1) The symbol that exactly resolves to this address + * (2) The symbol that contains this address + * @param[out] symStart + * First address covered by the symbol.. + * @param[out] symEnd + * One past the last address covered by the symbol. + */ +NvBool libosDebugGetSymbolRange( + libosDebugResolver *pThis, NvU64 symbolAddress, NvU64 *symStart, NvU64 *symEnd) +{ + elf64_sym *i = (elf64_sym *)pThis->symtabStart; + elf64_sym *symtabEnd = (elf64_sym *)pThis->symtabEnd; + + for (; i < symtabEnd; i++) + { + if (symbolAddress == i->value || + ((symbolAddress >= i->value) && (symbolAddress < i->value + i->size))) + { + *symStart = i->value; + *symEnd = i->value + i->size; + return NV_TRUE; + } + } + return NV_FALSE; +} + +static NvBool libosDwarfReadRaw(DwarfStream *stream, void *buffer, NvU64 size) +{ + NvU64 newOffset = stream->offset + size; + if (newOffset > stream->size) + return NV_FALSE; + portMemCopy(buffer, (size_t)size, stream->buffer + stream->offset, size); + stream->offset = newOffset; + return NV_TRUE; +} + +static NvBool libosDwarfExtractString(DwarfStream *stream, const char **string) +{ + NvU64 offsetEnd = stream->offset; + while (1) + { + if (offsetEnd >= stream->size) + return NV_FALSE; + if (!stream->buffer[offsetEnd]) + break; + offsetEnd++; + } + + *string = (const char *)stream->buffer + stream->offset; + stream->offset = offsetEnd + 1; + + return NV_TRUE; +} + +#define DWARF_READ(stream, ptr) libosDwarfReadRaw(stream, ptr, sizeof(*ptr)) + +static NvBool dwarfReadLeb128Generic(DwarfStream *stream, NvU64 *presult, NvBool sextension) +{ + NvU8 byte; + NvU64 bitsRead = 0, result = 0; + while (bitsRead < 64 && DWARF_READ(stream, &byte)) + { + // Read 7 bits + result |= (NvU64)(byte & 0x7f) << bitsRead; + bitsRead += 7; + + // Was this the last byte? + if (!(byte & 0x80)) + { + // Sign extend based on the top bit we just read + if (sextension && bitsRead < 64 && (byte & 0x40)) + result |= 0xFFFFFFFFFFFFFFFFULL << bitsRead; + + // Return result + *presult = result; + return NV_TRUE; + } + } + return NV_FALSE; +} + +static NvBool dwarfReadSleb128(DwarfStream *stream, NvS64 *result) +{ + return dwarfReadLeb128Generic(stream, (NvU64 *)result, NV_TRUE /* sign extend */); +} + +static NvBool dwarfReadUleb128(DwarfStream *stream, NvU64 *result) +{ + return dwarfReadLeb128Generic(stream, result, NV_FALSE); +} + +static NvBool dwarfReadFilename(DwarfStream *names, const char **directory, const char **filename, NvU64 file) +{ + // Skip the directory names stream + DwarfStream dirnames = *names; + const char *name; + + NvU64 directoryEntryId = 0; + NvU64 i; + + *directory = ""; + *filename = ""; + + if (!file) + return NV_FALSE; + + do + { + if (!libosDwarfExtractString(names, &name)) + return (NV_FALSE); + } while (*name); + + // Walk through the file entries + for (i = 0; i < file; i++) + { + NvU64 mtime, size; + if (!libosDwarfExtractString(names, filename) || !dwarfReadUleb128(names, &directoryEntryId) || + !dwarfReadUleb128(names, &mtime) || !dwarfReadUleb128(names, &size)) + return NV_FALSE; + } + + // Walk the directory table up until the required point + for (i = 0; i < directoryEntryId; i++) + if (!libosDwarfExtractString(&dirnames, directory)) + return NV_FALSE; + + return NV_TRUE; +} + +/** + * + * @brief DWARF-2 Virtual machine interpreter for debug data + * + * @param[in] pThis + * An initialized resolver object. + * @param[in/out] unit + * A dwarf stream object initialized to the start of a unit in the + * .debug_lines elf section. + * @param[in] pc + * Virtual address of the code to resolve. + * @param[out] directory + * The source file directory for this line of code. Requires a .debug_line section. + * @param[out] filename + * The source file for this line of code. Requires a .debug_line section. + * @param[out] outputLine + * The source line for address. Requires a .debug_line section. + * @param[out] outputColumn + * The source column for this address. Requires additional debug info -g3. + * @param[in] matchedAddress + * Returns the virtual address to the start of the matched line/col. + * @param[in] bBuildTable + * Set to true when building the aranges index table. + */ +static NvBool dwarfParseUnitLines( + libosDebugResolver *pThis, DwarfStream unit, NvU64 pc, const char **directory, const char **filename, + NvU64 *outputLine, NvU64 *outputColumn, NvU64 *matchedAddress, NvBool bBuildTable) +{ + NvU16 version; + NvU32 prologueLength; + + DwarfStream saveUnit = unit; + + NvU64 headerOffset; + NvU8 minimumInstructionLength, defaultIsStmt, lineRange, opcodeBase; + NvS8 line_base; + NvU64 i; + + DwarfStream names; + + // Initial state of virtual machine + NvU64 previousAddress = 0, previousLine = 0, previousColumn = 0, previousFile = 0; + NvU64 address = 0, file = 1, line = 1, column = 0; + NvU8 isStmt; + //NvBool basicBlock = NV_FALSE + NvBool postEmitResetState = NV_FALSE; + //NvBool prologueEnd = NV_FALSE; + //NvBool epilogueBegin = NV_FALSE; + NvBool postEmitResetStateIsStmt = NV_FALSE; + //NvBool endSequence = NV_FALSE; + NvU8 opcode; + + if (!DWARF_READ(&unit, &version) || version > 2 || !DWARF_READ(&unit, &prologueLength)) + { + return NV_FALSE; + } + + headerOffset = unit.offset; + if (!DWARF_READ(&unit, &minimumInstructionLength) || !DWARF_READ(&unit, &defaultIsStmt) || + !DWARF_READ(&unit, &line_base) || !DWARF_READ(&unit, &lineRange) || !DWARF_READ(&unit, &opcodeBase)) + { + return NV_FALSE; + } + + // Skip over the opcode lengths + for (i = 1; i < opcodeBase; i++) + { + NvU64 dummy; + if (!dwarfReadUleb128(&unit, &dummy)) + { + return NV_FALSE; + } + } + + // Names section starts here inside after the prologue + names = unit; + + // Skip prologue + unit.offset = headerOffset; + if (unit.size - unit.offset < prologueLength) + { + //printf("Bailing 3...\n"); + return NV_FALSE; + } + unit.offset += prologueLength; + + isStmt = defaultIsStmt; + + // Run the line number information program for this unit + while (NV_TRUE) + { + NvBool emit_row = NV_FALSE; + + //NvU64 offset = unit.offset; + if (!DWARF_READ(&unit, &opcode)) + break; + //NvBool reset_basic_block = NV_FALSE; + + // 6.2.5.1 Special Opcodes + if (opcode >= opcodeBase) + { + NvU8 normalizedOpcode = opcode - opcodeBase; + address += (normalizedOpcode / lineRange) * minimumInstructionLength; + line += line_base + normalizedOpcode % lineRange; + //reset_basic_block = NV_TRUE; + emit_row = NV_TRUE; + //printf(" [0x%08llx] Special opcode\n", offset); + } + // 6.2.5.3 Extended Opcodes + else if (opcode == DW_LNS_extended_operation) + { + NvU64 extBaseOffset = unit.offset, extraSize; + DwarfStream extra; + + if (!dwarfReadUleb128(&unit, &extraSize)) + return NV_FALSE; + + if (unit.size - unit.offset < extraSize) + return NV_FALSE; + extra.buffer = unit.buffer + extBaseOffset; + extra.offset = unit.offset - extBaseOffset; + extra.size = extraSize + unit.offset - extBaseOffset; + unit.offset += extraSize; + + if (!DWARF_READ(&extra, &opcode)) + return NV_FALSE; + + switch (opcode) + { + case DW_LNE_end_sequence: + emit_row = NV_TRUE; + postEmitResetStateIsStmt = isStmt; + postEmitResetState = NV_TRUE; + //printf(" [0x%08llx] Extended opcode 1: End of Sequence\n", offset); + break; + + case DW_LNE_set_address: + switch (extra.size - extra.offset) + { + case 8: + if (!DWARF_READ(&extra, &address)) + return NV_FALSE; + break; + + case 4: { + NvU32 address32; + if (!DWARF_READ(&extra, &address32)) + return NV_FALSE; + address = address32; + break; + } + + default: + //printf("unexpected address length: %zu", extra.size - extra.offset); + return NV_FALSE; + } + //printf(" [0x%08llx] Extended opcode 2: set Address to 0x%llx\n", offset, address); + break; + + case DW_LNE_define_file: { + const char *fname = ""; + NvU64 dir, time, size; + libosDwarfExtractString(&unit, &fname); + //printf(" [0x%08x] Define file: %s\n", fname, offset); + dwarfReadUleb128(&unit, &dir); + dwarfReadUleb128(&unit, &time); + dwarfReadUleb128(&unit, &size); + } + break; + + case DW_LNE_set_discriminator: // For profilers, how many code paths pass through this insn + { + NvU64 discriminator; + dwarfReadUleb128(&extra, &discriminator); + //printf(" [0x%08llx] Extended opcode 4: set Discriminator to %lld\n", offset, discriminator); + break; + } + + default: + //printf(" [0x%08llx] unknown extended opcode: %d\n", offset, opcode); + return NV_FALSE; + } + } + else + { + /* "Standard" opcodes. */ + switch (opcode) + { + case DW_LNS_copy: + emit_row = NV_TRUE; + //reset_basic_block = NV_TRUE; + //printf(" [0x%08llx] Copy\n", offset); + break; + + case DW_LNS_advance_pc: { + NvU64 delta; + if (!dwarfReadUleb128(&unit, &delta)) + return NV_FALSE; + address += delta * minimumInstructionLength; + //printf(" [0x%08llx] Advance PC by %lld to 0x%llx\n", offset, delta, address); + break; + } + + case DW_LNS_fixed_advance_pc: { + NvU16 delta = 0; + DWARF_READ(&unit, &delta); + address += delta * minimumInstructionLength; + //printf(" [0x%08llx] Advance PC by fixed size amount %d to 0x%llx\n",offset , delta, address); + break; + } + + case DW_LNS_advance_line: { + NvS64 delta; + if (!dwarfReadSleb128(&unit, &delta)) + return NV_FALSE; + line += delta; + //printf(" [0x%08llx] Advance Line by %lld to %lld\n", offset, delta, line); + break; + } + + case DW_LNS_set_file: + if (!dwarfReadUleb128(&unit, &file)) + return NV_FALSE; + //printf(" [0x%08llx] Set File Name to entry %lld in the File Name Table\n", offset, file); + break; + + case DW_LNS_set_column: + if (!dwarfReadUleb128(&unit, &column)) + return NV_FALSE; + //printf(" [0x%08llx] Set column to %lld\n", offset, column); + break; + + case DW_LNS_negate_stmt: + isStmt = !isStmt; + //printf(" [0x%08llx] Set isStmt to %d\n", offset, isStmt); + break; + + case DW_LNS_set_basic_block: + //basicBlock = NV_TRUE; + //printf(" [0x%08llx] Set basic block\n", offset); + break; + + case DW_LNS_const_add_pc: { + NvU64 delta = ((255 - opcodeBase) / lineRange) * minimumInstructionLength; + address += delta; + //printf(" [0x%08llx] Add pc by %lld to %08llx\n", offset, delta, address); + break; + } + + case DW_LNS_set_prologue_end: + //printf(" [0x%08llx] Set prologue end\n", offset); + //prologueEnd = NV_TRUE; + break; + + case DW_LNS_set_epilogue_begin: + //printf(" [0x%08llx] Set epilogie begin\n", offset); + //epilogueBegin = NV_TRUE; + break; + + default: + //printf(" [0x%08llx] unknown standard opcode: %d\n", offset, opcode); + return NV_FALSE; + } + } + + if (emit_row) + { + if (bBuildTable) + { + dwarfSetARangeTableLineUnit(pThis, saveUnit, address); + return NV_FALSE; + } + + if ((previousAddress && (pc >= previousAddress && pc < address))) + { + dwarfReadFilename(&names, directory, filename, previousFile); + *outputLine = previousLine; + *outputColumn = previousColumn; + *matchedAddress = pc; + return NV_TRUE; + } + + previousAddress = address; + previousFile = file; + previousLine = line; + previousColumn = column; + } + + if (postEmitResetState) + { + address = 0; + file = 1; + line = 1; + column = 0; + isStmt = postEmitResetStateIsStmt; + //basicBlock = NV_FALSE; + postEmitResetState = NV_FALSE; + //endSequence = NV_FALSE; + //prologueEnd = NV_FALSE; + //epilogueBegin = NV_FALSE; + } + + //if (reset_basic_block) + // basicBlock = NV_FALSE; + } + return NV_FALSE; +} + +/** + * + * @brief Resolve an address to source file and line location for DWARF-2 + * + * @param[in] pThis + * An initialized resolver object. + * @param[in] address + * Virtual address of the code to resolve. + * @param[out] filename + * The source file for this line of code. Requires a .debug_line section. + * @param[out] outputLine + * The source line for address. Requires a .debug_line section. + * @param[out] outputColumn + * The source column for this address. Requires additional debug info -g3. + * @param[in] matchedAddress + * Returns the virtual address to the start of the matched line/col. + */ +NvBool libosDwarfResolveLine( + libosDebugResolver *pThis, NvU64 address, const char **directory, const char **filename, + NvU64 *outputLine, NvU64 *outputColumn, NvU64 *matchedAddress) +{ + ARangeTupple *pFoundARange = NULL; + DwarfStream unit; + NvU32 i; + + // Find entry in aranges table + for (i = 0; i < pThis->nARangeEntries; i++) + { + if ((address >= pThis->arangeTable[i].address) && + (address < pThis->arangeTable[i].address + pThis->arangeTable[i].length)) + { + pFoundARange = &pThis->arangeTable[i]; + + if (pFoundARange->lineUnitBuffer != NULL) + { + // Found cached line into in ARange table. + unit.buffer = pFoundARange->lineUnitBuffer; + unit.offset = 0; + unit.size = pFoundARange->lineUnitSize; + + if (dwarfParseUnitLines( + pThis, unit, address, directory, filename, outputLine, outputColumn, matchedAddress, + NV_FALSE)) + { + return NV_TRUE; + } + } + + break; + } + } + + return NV_FALSE; +} + +/** + * + * @brief Create a table indexing the units in .debug_line elf section + * + * + * @param[in/out] pThis + * An initialized resolver object. + */ +static void libosDwarfBuildTables(libosDebugResolver *pThis) +{ + NvU32 tableSize; + DwarfStream debugLines; + DwarfStream unit; + NvU32 currentUnit = 1; + NvU32 unitSize; + + pThis->arangeTable = NULL; + pThis->nARangeEntries = 0; + + // Run through the .debug_aranges elf section to get a count of consolidated ranges. + dwarfBuildARangeTable(pThis); + + tableSize = (pThis->nARangeEntries + 1) * sizeof(ARangeTupple); + + // Allocate the table. + pThis->arangeTable = portMemAllocNonPaged(tableSize); + portMemSet(pThis->arangeTable, 0, tableSize); + + // Run through the .debug_aranges elf section again to populate the table. + dwarfBuildARangeTable(pThis); + + debugLines.buffer = pThis->debugLineStart; debugLines.offset = 0; debugLines.size = pThis->debugLineEnd - pThis->debugLineStart; + + // Run through the .debug_line elf section to match units to the arange table. + for (currentUnit = 1;; currentUnit++) + { + if (!DWARF_READ(&debugLines, &unitSize) || unitSize >= 0xfffffff0) + { + break; + } + + if (debugLines.size - debugLines.offset < unitSize) + { + break; + } + + unit.buffer = debugLines.buffer + debugLines.offset; + unit.offset = 0; + unit.size = unitSize; + + dwarfParseUnitLines(pThis, unit, 0, NULL, NULL, NULL, NULL, NULL, NV_TRUE); + debugLines.offset += unitSize; + } +} + +/** + * + * @brief Walk the .debug_aranges elf section, consolidate adjacent ranges, + * and create a table with an entry for each range. + * + * @param[in/out] pThis + * An initialized resolver object. + */ +static void dwarfBuildARangeTable(libosDebugResolver *pThis) +{ + DwarfStream debugARanges = { + pThis->debugARangesStart, 0, pThis->debugARangesEnd - pThis->debugARangesStart}; + DwarfStream unit; + NvU32 nUnit = 0; + NvU32 nARangeEntries = 0; + + for (nUnit = 1;; nUnit++) + { + NvU32 unit_size = 0xffffffff; + NvU32 debugInfoOffset = 0xffffffff; + NvU16 version = 0xffff; + NvU8 addressSize = 0xff; + NvU8 selectorSize = 0xff; + + NvU32 nUnitTupples; + NvU64 combAddress = 0; + NvU64 combLength = 0; + + if (!DWARF_READ(&debugARanges, &unit_size) || unit_size >= 0xfffffff0) + { + break; + } + + if (debugARanges.size - debugARanges.offset < unit_size) + { + break; + } + + unit.buffer = debugARanges.buffer + debugARanges.offset - sizeof unit_size; + unit.offset = sizeof unit_size; + unit.size = unit_size + sizeof unit_size; + debugARanges.offset += unit_size; + + if (!DWARF_READ(&unit, &version) || version != 2 || !DWARF_READ(&unit, &debugInfoOffset) || + !DWARF_READ(&unit, &addressSize) || !DWARF_READ(&unit, &selectorSize) || addressSize != 8 || + selectorSize != 0) + { + break; + } + + // Pad to natural alignment + unit.offset = (unit.offset + 15) & ~15; + + for (nUnitTupples = 0;; nUnitTupples++) + { + NvU64 address; + NvU64 length; + + if (!DWARF_READ(&unit, &address) || !DWARF_READ(&unit, &length)) + { + address = 0; + length = 0; + } + + if (address == combAddress + combLength) + { + combLength += length; + } + else + { + if (combAddress != 0 && combLength != 0) + { + if (pThis->arangeTable != NULL && nARangeEntries < pThis->nARangeEntries) + { + // Table has been allocated -- fill it in. + ARangeTupple *pEntry = &pThis->arangeTable[nARangeEntries]; + pEntry->address = combAddress; + pEntry->length = (NvU32)combLength; + pEntry->arangeUnit = nUnit; + } + nARangeEntries++; + } + + combAddress = address; + combLength = length; + } + + if (address == 0 && length == 0) + break; + } + } + + pThis->nARangeEntries = nARangeEntries; +} + +/** + * + * @brief Set a .debug_line reference in the table of consolidated aranges. + * + * @param[in] pThis + * An initialized resolver object. + * @param[in/out] unit + * A dwarf stream object initialized to the start of a unit in the + * .debug_lines elf section. + * @param[in] address + * Any virtual address contained in teh above .debug_lines unit. + */ +static void dwarfSetARangeTableLineUnit(libosDebugResolver *pThis, DwarfStream unit, NvU64 address) +{ + NvU32 foundUnit = 0; + NvU32 i; + + // Find entry in aranges table + for (i = 0; i < pThis->nARangeEntries; i++) + { + if ((address >= pThis->arangeTable[i].address) && + (address < pThis->arangeTable[i].address + pThis->arangeTable[i].length)) + { + foundUnit = pThis->arangeTable[i].arangeUnit; + break; + } + } + + if (foundUnit == 0) + { + return; + } + + // + // Walk backwards to first table entry with the same areange unit. + // Just in case -- I think i should always point to the first unit entry. + // + while ((i > 0) && (foundUnit == pThis->arangeTable[i - 1].arangeUnit)) + { + i--; + } + + // Walk forwards setting the line unit info for all entries with the same arange unit. + for (; (foundUnit == pThis->arangeTable[i].arangeUnit); i++) + { + pThis->arangeTable[i].lineUnitBuffer = unit.buffer; + pThis->arangeTable[i].lineUnitSize = (NvU32)unit.size; + } +} diff --git a/src/common/uproc/os/libos-v2.0.0/debug/lines.h b/src/common/uproc/os/libos-v2.0.0/debug/lines.h new file mode 100644 index 000000000..0aabcb59e --- /dev/null +++ b/src/common/uproc/os/libos-v2.0.0/debug/lines.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LINES_H_ +#define LINES_H_ + +#include "elf.h" + +typedef struct +{ + NvU64 address; + NvU32 length; + NvU32 arangeUnit; + const NvU8 *lineUnitBuffer; + NvU32 lineUnitSize; + +} ARangeTupple; + +typedef struct +{ + NvU8 *debugLineStart, *debugLineEnd; + NvU8 *debugARangesStart, *debugARangesEnd; + NvU8 *symtabStart, *symtabEnd; + NvU8 *strtabStart, *strtabEnd; + ARangeTupple *arangeTable; + NvU32 nARangeEntries; +} libosDebugResolver; + +void libosDebugResolverConstruct(libosDebugResolver *pThis, elf64_header *elf); +void libosDebugResolverDestroy(libosDebugResolver *pThis); + +// @note Optimized for single lookup (no search structures are created) +NvBool libosDebugResolveSymbolToVA(libosDebugResolver *pThis, const char *symbol, NvU64 *address); +NvBool libosDebugResolveSymbolToName( + libosDebugResolver *pThis, NvU64 symbolAddress, const char **name, NvU64 *offset); +NvBool libosDwarfResolveLine( + libosDebugResolver *pThis, NvU64 address, const char **directory, const char **filename, + NvU64 *outputLine, NvU64 *outputColumn, NvU64 *matchedAddress); +NvBool libosDebugGetSymbolRange( + libosDebugResolver *pThis, NvU64 symbolAddress, NvU64 *symStart, NvU64 *symEnd); + +#endif diff --git a/src/common/uproc/os/libos-v2.0.0/debug/logdecode.c b/src/common/uproc/os/libos-v2.0.0/debug/logdecode.c new file mode 100644 index 000000000..f898adadb --- /dev/null +++ b/src/common/uproc/os/libos-v2.0.0/debug/logdecode.c @@ -0,0 +1,1390 @@ +/* + * ---------------------------------------------------------------------- + * Copyright (c) 2005-2014 Rich Felker, et al. + * Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * ---------------------------------------------------------------------- + */ + +#ifdef NVRM + +# include +# include // size_t + +# define printf(fmt, ...) portDbgExPrintfLevel(LEVEL_ERROR, fmt, ##__VA_ARGS__) +# define snprintf nvDbgSnprintf + +#else // NVRM + +# include +# include +# include +# include + +# define portStringCopy(d, ld, s, ls) strncpy(d, s, ld) +# define portStringLength(s) strlen(s) +# define portStringLengthSafe(s, l) strlen(s) + +# define portMemCopy(d, ld, s, ls) memcpy(d, s, ld) +# define portMemSet(d, v, l) memset(d, v, l) +# define portMemCmp(d, s, l) memcmp(d, s, l) +# define portMemAllocNonPaged(l) malloc(l) +# define portMemFree(p) free(p) + +# if defined(PROTODMP_BUILD) +int logPrintf(const char *, ...); // Forward declaration. TODO: allow libos code to #include protodmp headers? +# define printf(fmt, ...) logPrintf(fmt, ##__VA_ARGS__) +# elif defined(NVSYM_STANDALONE) +# define printf(fmt, ...) fprintf(logDecode->fout, fmt, ##__VA_ARGS__) +# elif defined(NVWATCH) +# pragma warning(push) +# pragma warning(disable : 4200) +# pragma warning(disable : 4244) +# pragma warning(disable : 4267) +# define snprintf _snprintf +# define printf(fmt, ...) do { logDecode->dest += sprintf(logDecode->dest, fmt, ##__VA_ARGS__); } while (NV_FALSE) +# endif + +#endif // NVRM + +#include "nvtypes.h" +#include "logdecode.h" + +#if LIBOS_LOG_DECODE_ENABLE + +# define SYM_DECODED_LINE_MAX_SIZE 1024 + +// These defines assume RISCV with -mabi=lp64/lp64f/lp64d +# define LOG_INT_MAX NV_S32_MAX +# define LOG_UINT_MAX NV_U32_MAX +# define LOG_LONG_MAX NV_S64_MAX +# define LOG_ULONG_MAX NV_U64_MAX +# define LOG_INTMAX_MAX NV_S64_MAX +# define LOG_UINTMAX_MAX NV_U64_MAX +# define LOG_LLONG_MAX NV_S64_MAX +# define LOG_ULLONG_MAX NV_U64_MAX +# define LOG_SIZE_MAX NV_U64_MAX +# define NL_ARGMAX 32 + +/* Some useful macros */ +# define MAX(a, b) ((int)(a) > (int)(b) ? (int)(a) : (int)(b)) +# define MIN(a, b) ((int)(a) < (int)(b) ? (int)(a) : (int)(b)) + +# define IS_DIGIT(c) (((c) >= '0') && ((c) <= '9')) + +/* Convenient bit representation for modifier flags, which all fall + * within 31 codepoints of the space character. */ +# define ALT_FORM (1U << ('#' - ' ')) +# define ZERO_PAD (1U << ('0' - ' ')) +# define LEFT_ADJ (1U << ('-' - ' ')) +# define PAD_POS (1U << (' ' - ' ')) +# define MARK_POS (1U << ('+' - ' ')) +# define GROUPED (1U << ('\'' - ' ')) +# define FLAGMASK (ALT_FORM | ZERO_PAD | LEFT_ADJ | PAD_POS | MARK_POS | GROUPED) +# if LOG_UINT_MAX == LOG_ULONG_MAX +# define LOG_LONG_IS_INT +# endif +# if LOG_SIZE_MAX != LOG_ULONG_MAX || LOG_UINTMAX_MAX != LOG_ULLONG_MAX +# define LOG_ODD_TYPES +# endif + +#if defined(LOG_LONG_IS_INT) || defined(LOG_ODD_TYPES) +#error "Type sizes don't match RISCV lp64 ABI!" +#endif // defined(LOG_LONG_IS_INT) || defined(LOG_ODD_TYPES) + +/* State machine to accept length modifiers + conversion specifiers. + * Result is 0 on failure, or an argument type to pop on success. */ +enum +{ + LOG_BARE, + LOG_LPRE, + LOG_LLPRE, + LOG_HPRE, + LOG_HHPRE, + LOG_BIGLPRE, + LOG_ZTPRE, + LOG_JPRE, + LOG_STOP, + LOG_PTR, + LOG_INT, + LOG_UINT, + LOG_ULLONG, +# ifndef LOG_LONG_IS_INT + LOG_LONG, + LOG_ULONG, +# else +# define LOG_LONG LOG_INT +# define LOG_ULONG LOG_UINT +# endif + LOG_SHORT, + LOG_USHORT, + LOG_CHAR, + LOG_UCHAR, +# ifdef LOG_ODD_TYPES + LOG_LLONG, + LOG_SIZET, + LOG_IMAX, + LOG_UMAX, + LOG_PDIFF, + LOG_UIPTR, +# else +# define LOG_LLONG LOG_ULLONG +# define LOG_SIZET LOG_ULONG +# define LOG_IMAX LOG_LLONG +# define LOG_UMAX LOG_ULLONG +# define LOG_PDIFF LOG_LONG +# define LOG_UIPTR LOG_ULONG +# endif + LOG_NOARG, + LOG_MAXSTATE +}; +# define S(i,x) states[i][(x) - 'A'] +static unsigned char states[]['z' - 'A' + 1] = { + { + 0, + }, + { + 0, + }, + { + 0, + }, + { + 0, + }, + { + 0, + }, + { + 0, + }, + { + 0, + }, + { + 0, + },}; +# define OOB(x) ((unsigned)(x) - 'A' > 'z' - 'A') +union arg +{ + NvU64 i; + void *p; +}; + +/** + * @brief Initialize the print state struct. + */ +static void states_init(void) +{ + { + /* 0: bare types */ + S(0,'d') = LOG_INT, S(0,'i') = LOG_INT, S(0,'o') = LOG_UINT, S(0,'u') = LOG_UINT, S(0,'x') = LOG_UINT, + S(0,'X') = LOG_UINT, S(0,'c') = LOG_CHAR, S(0,'C') = LOG_INT, S(0,'s') = LOG_PTR, S(0,'S') = LOG_PTR, + S(0,'p') = LOG_UIPTR, S(0,'n') = LOG_PTR, S(0,'a') = LOG_UIPTR, /* NVIDIA decoded address extension */ + S(0,'m') = LOG_NOARG, S(0,'l') = LOG_LPRE, S(0,'h') = LOG_HPRE, S(0,'L') = LOG_BIGLPRE, S(0,'z') = LOG_ZTPRE, + S(0,'j') = LOG_JPRE, S(0,'t') = LOG_ZTPRE; + } + { + /* 1: l-prefixed */ + S(1,'d') = LOG_LONG, + S(1,'i') = LOG_LONG, + S(1,'o') = LOG_ULONG, + S(1,'u') = LOG_ULONG, + S(1,'x') = LOG_ULONG, + S(1,'X') = LOG_ULONG, + S(1,'c') = LOG_INT, + S(1,'s') = LOG_PTR, + S(1,'n') = LOG_PTR, + S(1,'l') = LOG_LLPRE; + } + { + /* 2: ll-prefixed */ + S(2,'d') = LOG_LLONG, + S(2,'i') = LOG_LLONG, + S(2,'o') = LOG_ULLONG, + S(2,'u') = LOG_ULLONG, + S(2,'x') = LOG_ULLONG, + S(2,'X') = LOG_ULLONG, + S(2,'n') = LOG_PTR; + } + { + /* 3: h-prefixed */ + S(3,'d') = LOG_SHORT, + S(3,'i') = LOG_SHORT, + S(3,'o') = LOG_USHORT, + S(3,'u') = LOG_USHORT, + S(3,'x') = LOG_USHORT, + S(3,'X') = LOG_USHORT, + S(3,'n') = LOG_PTR, + S(3,'h') = LOG_HHPRE; + } + { + /* 4: hh-prefixed */ + S(4,'d') = LOG_CHAR, + S(4,'i') = LOG_CHAR, + S(4,'o') = LOG_UCHAR, + S(4,'u') = LOG_UCHAR, + S(4,'x') = LOG_UCHAR, + S(4,'X') = LOG_UCHAR, + S(4,'n') = LOG_PTR; + } + { + /* 5: L-prefixed */ + S(5,'n') = LOG_PTR; + } + { + /* 6: z- or t-prefixed (assumed to be same size) */ + S(6,'d') = LOG_PDIFF, + S(6,'i') = LOG_PDIFF, + S(6,'o') = LOG_SIZET, + S(6,'u') = LOG_SIZET, + S(6,'x') = LOG_SIZET, + S(6,'X') = LOG_SIZET, + S(6,'n') = LOG_PTR; + } + { + /* 7: j-prefixed */ + S(7,'d') = LOG_IMAX, + S(7,'i') = LOG_IMAX, + S(7,'o') = LOG_UMAX, + S(7,'u') = LOG_UMAX, + S(7,'x') = LOG_UMAX, + S(7,'X') = LOG_UMAX, + S(7,'n') = LOG_PTR; + } +} + +/** + * + * @brief Print out the line buffer and reset the current line buffer pointer. + * + * @param[in/out] logDecode + * Structure used to decode log. Contains both data set at init and working fields. + */ +static void flush_line_buffer(LIBOS_LOG_DECODE *logDecode) +{ + if (logDecode->curLineBufPtr != logDecode->lineBuffer) + { + /* Make sure line is NULL terminated */ + *logDecode->curLineBufPtr = 0; + printf("%s", logDecode->lineBuffer); + logDecode->curLineBufPtr = logDecode->lineBuffer; + } +} + +/** + * + * @brief Copy string to the line buffer. + * + * Copy string until 0 encountered up to maximum length l. + * Flush the line buffer if it gets full. + * + * @param[in] s + * String to copy. May be zero-terminated. + * @param[in] l + * Maximum length to copy, if zero is not encountered first. + * @param[in/out] logDecode + * Structure used to decode log. Contains both data set at init and working fields. + */ +static void emit_string(const char *s, int l, LIBOS_LOG_DECODE *logDecode) +{ + char *line_buffer_end = logDecode->lineBuffer + LIBOS_LOG_LINE_BUFFER_SIZE - 1; + for (; (l > 0) && (*s != 0); s++) + { + if (logDecode->curLineBufPtr >= line_buffer_end) + flush_line_buffer(logDecode); + + *logDecode->curLineBufPtr++ = *s; + l--; + } +} + +static void s_getSymbolDataStr(LIBOS_LOG_DECODE *logDecode, char *decodedLine, NvLength decodedLineSize, NvUPtr addr) +{ + const char *directory; + const char *filename; + const char *name; + NvU64 offset; + NvU64 outputLine; + NvU64 outputColumn; + NvU64 matchedAddress; + if (!libosDebugResolveSymbolToName(&logDecode->resolver, addr, &name, &offset)) + { + name = 0; + } + decodedLine[decodedLineSize - 1U] = '\0'; + + if (libosDwarfResolveLine( + &logDecode->resolver, addr, &directory, &filename, &outputLine, &outputColumn, + &matchedAddress)) + { + if (name) + { + snprintf( + decodedLine, decodedLineSize - 1, "%s+%lld (%s:%lld)", name, offset, filename, + outputLine); + } + else + { + snprintf(decodedLine, decodedLineSize - 1, "??? (%s:%lld)", filename, outputLine); + } + } + else + { + snprintf(decodedLine, decodedLineSize - 1, "%s+%lld", name, offset); + } +} + +/** + * + * @brief Pad a field with ' ' or '0'. + * + * This routine is called with different options for left-justified and + * right-justified fields. + * + * @param[in] c + * Pad with this character. Usually '0' or ' '. + * @param[in] w + * Desired width after padding. + * @param[in] l + * Length of field so far. Pad for w - l. + * @param[in] fl + * Modifier flags. See FLAGMASK above. + * @param[in/out] logDecode + * Structure used to decode log. Contains both data set at init and working fields. + */ +static void pad(char c, int w, int l, int fl, LIBOS_LOG_DECODE *logDecode) +{ + char *line_buffer_end = logDecode->lineBuffer + LIBOS_LOG_LINE_BUFFER_SIZE - 1; + if (fl & (LEFT_ADJ | ZERO_PAD) || l >= w) + return; + l = w - l; + for (; l > 0; l--) + { + if (logDecode->curLineBufPtr >= line_buffer_end) + flush_line_buffer(logDecode); + *logDecode->curLineBufPtr++ = c; + } +} +static char *fmt_x(NvU64 x, char *s, int lower) +{ + static const char xdigits[16] = {"0123456789ABCDEF"}; + for (; x; x >>= 4) + *--s = xdigits[(x & 15)] | lower; + return s; +} +static char *fmt_o(NvU64 x, char *s) +{ + for (; x; x >>= 3) + *--s = '0' + (x & 7); + return s; +} +static char *fmt_u(NvU64 x, char *s) +{ + unsigned long y; + for (; x > LOG_ULONG_MAX; x /= 10) + *--s = '0' + x % 10; + for (y = x; y; y /= 10) + *--s = '0' + y % 10; + return s; +} +static int getint(char **s) +{ + int i; + for (i = 0; IS_DIGIT(**s); (*s)++) + i = 10 * i + (**s - '0'); + return i; +} +static int libos_printf_a( + LIBOS_LOG_DECODE *logDecode, LIBOS_LOG_DECODE_RECORD *pRec, const char *fmt, const char *filename) +{ + NvU64 *args = pRec->args; + NvU64 arg_count = pRec->meta->argumentCount; + union arg nl_arg[NL_ARGMAX + 1] = {{0}}; + char *a, *z, *s = (char *)fmt; + unsigned l10n = 0, fl; + int w, p; + union arg arg = {0}; + int argpos; + unsigned st, ps; + int cnt = 0, l = 0; + char buf[sizeof(NvU64) * 3 + 3]; + const char *prefix; + int t, pl; + NvU64 arg_index = 0; + char wc[2]; + char *line_buffer_end = logDecode->lineBuffer + LIBOS_LOG_LINE_BUFFER_SIZE - 1; + NvBool bResolvePtrVal = NV_FALSE; + + for (;;) + { + /* Update output count, end loop when fmt is exhausted */ + if (cnt >= 0) + { + if (l > LOG_INT_MAX - cnt) + { + cnt = -1; + } + else + cnt += l; + } + +# if defined(NVRM) + if (logDecode->curLineBufPtr == logDecode->lineBuffer) + { + // Prefix every line with NVRM GPUn Ucode-task: filename(lineNumber): + snprintf( + logDecode->curLineBufPtr, LIBOS_LOG_LINE_BUFFER_SIZE - 1, + "NVRM GPU%u %s-%s: %s(%u): ", pRec->log->gpuInstance, + logDecode->sourceName, pRec->log->taskPrefix, filename, pRec->meta->lineNumber); + logDecode->curLineBufPtr += portStringLength(logDecode->curLineBufPtr); + } +# else + if (logDecode->curLineBufPtr == logDecode->lineBuffer) + { + // Prefix every line with GPUn Ucode-task: filename(lineNumber): + snprintf( + logDecode->curLineBufPtr, LIBOS_LOG_LINE_BUFFER_SIZE - 1, + "T:%llu GPU%u %s-%s: %s(%u): ", pRec->timeStamp, + pRec->log->gpuInstance, logDecode->sourceName, pRec->log->taskPrefix, + filename, pRec->meta->lineNumber); + logDecode->curLineBufPtr += portStringLength(logDecode->curLineBufPtr); + } +# endif + + /* Handle literal text and %% format specifiers */ + for (; *s; s++) + { + if (logDecode->curLineBufPtr >= line_buffer_end) + flush_line_buffer(logDecode); + + if (*s == '%') + { + if (s[1] == '%') + s++; + else + break; + } + + *logDecode->curLineBufPtr++ = *s; + + if (*s == '\n') + break; + } + + if (*s == '\n') + { + flush_line_buffer(logDecode); + s++; + + if (!*s) + break; + else + continue; + } + if (!*s) + break; + + a = s; + z = s; + l = z - a; + if (IS_DIGIT(s[1]) && s[2] == '$') + { + l10n = 1; + argpos = s[1] - '0'; + s += 3; + } + else + { + argpos = -1; + s++; + } + /* Read modifier flags */ + for (fl = 0; (unsigned)*s - ' ' < 32 && (FLAGMASK & (1U << (*s - ' '))); s++) + fl |= 1U << (*s - ' '); + /* Read field width */ + if (*s == '*') + { + if (IS_DIGIT(s[1]) && s[2] == '$') + { + l10n = 1; + w = nl_arg[s[1] - '0'].i; + s += 3; + } + else if (!l10n) + { + if (arg_index >= arg_count) + return 0; + w = args[arg_index++]; + s++; + } + else + return -1; + if (w < 0) + fl |= LEFT_ADJ, w = -w; + } + else if ((w = getint(&s)) < 0) + return -1; + /* Read precision */ + if (*s == '.' && s[1] == '*') + { + if (IS_DIGIT(s[2]) && s[3] == '$') + { + p = nl_arg[s[2] - '0'].i; + s += 4; + } + else if (!l10n) + { + if (arg_index >= arg_count) + return 0; + p = args[arg_index++]; + s += 2; + } + else + return -1; + } + else if (*s == '.') + { + s++; + p = getint(&s); + } + else + p = -1; + /* Format specifier state machine */ + st = 0; + do + { + if (OOB(*s)) + return -1; + ps = st; + st = S(st, *s++); + } while (st - 1 < LOG_STOP); + if (!st) + return -1; + /* Check validity of argument type (nl/normal) */ + if (st == LOG_NOARG) + { + if (argpos >= 0) + return -1; + } + else + { + if (argpos < 0) + { + if (arg_index >= arg_count) + return 0; + arg.i = args[arg_index++]; + } + } + z = buf + sizeof(buf); + prefix = "-+ 0X0x"; + pl = 0; + t = s[-1]; + /* Transform ls,lc -> S,C */ + if (ps && (t & 15) == 3) + t &= ~32; + /* - and 0 flags are mutually exclusive */ + if (fl & LEFT_ADJ) + fl &= ~ZERO_PAD; + + bResolvePtrVal = NV_FALSE; + switch (t) + { + case 'n': +#if !LIBOS_LOG_DECODE_ENABLE + // We can't support %n when decoding, these pointers do not exist here! + switch (ps) + { + case LOG_BARE: + *(int *)arg.p = cnt; + break; + case LOG_LPRE: + *(long *)arg.p = cnt; + break; + case LOG_LLPRE: + *(long long *)arg.p = cnt; + break; + case LOG_HPRE: + *(unsigned short *)arg.p = cnt; + break; + case LOG_HHPRE: + *(unsigned char *)arg.p = cnt; + break; + case LOG_ZTPRE: + *(size_t *)arg.p = cnt; + break; + case LOG_JPRE: + *(NvU64 *)arg.p = cnt; + break; + } +#endif // !LIBOS_LOG_DECODE_ENABLE + continue; + case 'p': + t = 'x'; + fl |= ALT_FORM; + + if (logDecode->bPtrSymbolResolve) + { + bResolvePtrVal = NV_TRUE; + } + case 'x': + case 'X': + a = fmt_x(arg.i, z, t & 32); + if (arg.i && (fl & ALT_FORM)) + prefix += (t >> 4), pl = 2; + if (0) + { + case 'o': + a = fmt_o(arg.i, z); + if ((fl & ALT_FORM) && arg.i) + prefix += 5, pl = 1; + } + if (0) + { + case 'd': + case 'i': + pl = 1; + if (arg.i > LOG_INTMAX_MAX) + { + arg.i = -(NvS64)arg.i; + } + else if (fl & MARK_POS) + { + prefix++; + } + else if (fl & PAD_POS) + { + prefix += 2; + } + else + pl = 0; + case 'u': + a = fmt_u(arg.i, z); + } + if (p >= 0) + fl &= ~ZERO_PAD; + if (!arg.i && !p) + { + a = z; + break; + } + p = MAX(p, z - a + !arg.i); + break; + case 'c': + *(a = z - (p = 1)) = arg.i; + fl &= ~ZERO_PAD; + break; + case 'a': + { + static char symDecodedLine[SYM_DECODED_LINE_MAX_SIZE]; + + s_getSymbolDataStr(logDecode, symDecodedLine, sizeof(symDecodedLine), (NvUPtr)arg.i); + + // Set common vars + a = &symDecodedLine[0]; + z = &symDecodedLine[sizeof(symDecodedLine) - 1]; + } + goto print_string; + case 's': + a = (char *)libosElfReadStringVirtual(logDecode->elf, (NvUPtr)arg.p); + if (!a) + a = (char *)"(bad-pointer)"; + print_string: + p = portStringLengthSafe(a, p); + z = a + p; + fl &= ~ZERO_PAD; + break; + case 'C': + wc[0] = arg.i; + wc[1] = 0; + arg.p = wc; + p = -1; + } + if (p < z - a) + p = z - a; + if (w < pl + p) + w = pl + p; + pad(' ', w, pl + p, fl, logDecode); + emit_string(prefix, pl, logDecode); + pad('0', w, pl + p, fl ^ ZERO_PAD, logDecode); + pad('0', p, z - a, 0, logDecode); + emit_string(a, z - a, logDecode); + + if (bResolvePtrVal) + { + // Append symbol info to ptr addr value in the following format: 0x123 + + static char symDecodedLine[SYM_DECODED_LINE_MAX_SIZE]; + NvLength symDecodedLineLen; + NvU32 prefixLen = 0; + NvU32 suffixLen = 0; + + portMemSet(symDecodedLine, 0, SYM_DECODED_LINE_MAX_SIZE); + + symDecodedLine[prefixLen++] = ' '; + +#ifdef NVWATCH + // Windbg nvwatch uses DML, so < becomes < + prefixLen += sprintf(symDecodedLine + prefixLen, "<"); +#else // NVWATCH + symDecodedLine[prefixLen++] = '<'; +#endif // NVWATCH + + s_getSymbolDataStr(logDecode, symDecodedLine + prefixLen, sizeof(symDecodedLine) - prefixLen, (NvUPtr)arg.i); + + symDecodedLineLen = portStringLength(symDecodedLine); + symDecodedLineLen = MIN(symDecodedLineLen, sizeof(symDecodedLine) - 1); // just in case + +#ifdef NVWATCH + // Windbg nvwatch uses DML, so > becomes > + suffixLen += sprintf(symDecodedLine + symDecodedLineLen + suffixLen, ">"); +#else // NVWATCH + symDecodedLine[symDecodedLineLen + (suffixLen++)] = '>'; +#endif // NVWATCH + + symDecodedLine[symDecodedLineLen + suffixLen] = '\0'; + + // Set common vars + a = &symDecodedLine[0]; + z = &symDecodedLine[sizeof(symDecodedLine) - 1]; + + emit_string(a, z - a, logDecode); + } + + pad(' ', w, pl + p, fl ^ LEFT_ADJ, logDecode); + l = w; + } + return cnt; +} + +/** + * + * @brief Print log records from scratch buffer. + * + * Prints log records from the scratch buffer scanning forwards. + * + * @param[in/out] logDecode + * Pointer to LIBOS_LOG_DECODE structure. + * @param[in] scratchBuffer + * Pointer to first byte in scratch buffer that contains valid data. + * @param[int] valid_elements + * Number of valid elements in the scratch buffer. + */ +static void libosPrintLogRecords(LIBOS_LOG_DECODE *logDecode, NvU64 *scratchBuffer, NvU64 valid_elements) +{ + NvU64 index; + NvU64 i; + + if (valid_elements == 0) + return; + + for (index = 0; index < valid_elements;) + { + LIBOS_LOG_DECODE_RECORD *pRec = (LIBOS_LOG_DECODE_RECORD *)&scratchBuffer[index]; + const char *format; + const char *filename; + + if (pRec->meta == NULL) + { + printf( + "**** Bad metadata. Lost %lld entries from %s ****\n", valid_elements - index, + logDecode->sourceName); + return; + } + + // Locate format string + format = libosElfReadStringVirtual(logDecode->elf, (NvU64)(NvUPtr)pRec->meta->format); + if (!format) + break; + + // Locate filename + filename = libosElfReadStringVirtual(logDecode->elf, (NvU64)(NvUPtr)pRec->meta->filename); + if (!filename || !filename[0]) + filename = "unknown"; + + // Strip off path + for (i = portStringLength(filename) - 1; i > 0; i--) + { + if (filename[i] == '/') + { + i++; + break; + } + } + filename = &filename[i]; + + // Format + libos_printf_a(logDecode, pRec, format, filename); + + // Advance + index += pRec->meta->argumentCount + LIBOS_LOG_DECODE_RECORD_BASE; + } +} + +# define LIBOS_LOG_TIMESTAMP_END 0 +# define LIBOS_LOG_TIMESTAMP_MAX NV_U64_MAX + +/** + * + * @brief Extract a single log record from one log buffer. + * + * This routine is designed to scan backwards from the put pointer. It changes + * the order of the parameters from backward scanning order {args, meta data, + * timestamp} to forward scanning order {pLog, meta data, timestamp args}. + * It also decodes meta data into a pointer. + * + * pLog->putIter points to the start of the entry last successfully extracted. + * + * @param[in/out] logDecode + * Pointer to LIBOS_LOG_DECODE structure. + * @param[in/out] pLog + * Pointer to LIBOS_LOG_DECODE_LOG structure for the log to extract from. + * + * timeStamp is set to LIBOS_LOG_TIMESTAMP_END (0) when there is an error or we + * run out of records. + */ +static void libosExtractLog_ReadRecord(LIBOS_LOG_DECODE *logDecode, LIBOS_LOG_DECODE_LOG *pLog) +{ + NvU64 log_entries = pLog->logBufferSize / sizeof(NvU64) - 1 /* -1 for PUT pointer */; + NvU64 previousPut = pLog->previousPut; + NvU64 i = pLog->putIter; + NvU64 argCount; + NvU64 j; + + if (pLog->putIter == pLog->previousPut) + { + pLog->record.timeStamp = LIBOS_LOG_TIMESTAMP_END; + return; + } + + // If we wrapped, adjust local copy of previousPut. + if (previousPut + log_entries < pLog->putCopy) + previousPut = pLog->putCopy - log_entries; + + pLog->record.log = pLog; + + if (logDecode->bSynchronousBuffer) + { + // Fake timestamp for sync buffer, marked as different from the "end" timestamp + pLog->record.timeStamp = LIBOS_LOG_TIMESTAMP_MAX; + } + else + { + // Check whether record goes past previous put (buffer wrapped). + if (i < previousPut + 1) + goto buffer_wrapped; + + pLog->record.timeStamp = pLog->physicLogBuffer[1 + (--i % log_entries)]; + } + + // Check whether record goes past previous put (buffer wrapped). + if (i < previousPut + 1) + goto buffer_wrapped; + + pLog->record.meta = (libosLogMetadata *)libosElfReadVirtual( + logDecode->elf, pLog->physicLogBuffer[1 + (--i % log_entries)], sizeof(libosLogMetadata)); + + // Sanity check meta data. + if (pLog->record.meta == NULL || pLog->record.meta->argumentCount > LIBOS_LOG_MAX_ARGS) + { + printf( + "**** Bad metadata. Lost %lld entries from %s-%s ****\n", pLog->putIter - previousPut, + logDecode->sourceName, pLog->taskPrefix); + goto error_ret; + } + + argCount = pLog->record.meta->argumentCount; + + // Check whether record goes past previous put (buffer wrapped). + if (i < previousPut + argCount) + goto buffer_wrapped; + + for (j = argCount; j > 0; j--) + { + pLog->record.args[j - 1] = pLog->physicLogBuffer[1 + (--i % log_entries)]; + } + + pLog->putIter = i; + return; + +buffer_wrapped: + // Put pointer wrapped and caught up to us. This means we lost entries. + printf( + "**** Buffer wrapped. Lost %lld entries from %s-%s ****\n", pLog->putIter - pLog->previousPut, + logDecode->sourceName, pLog->taskPrefix); + +error_ret: + pLog->record.timeStamp = LIBOS_LOG_TIMESTAMP_END; + return; +} + +/** + * + * @brief Extract all log records from all log buffers. + * + * Copy log records from all buffers to the scratch buffer in order of time stamp. + * + * @param[in/out] logDecode + * Pointer to LIBOS_LOG_DECODE structure. + */ +static void libosExtractLogs_decode(LIBOS_LOG_DECODE *logDecode) +{ + LIBOS_LOG_DECODE_LOG *pLog; + LIBOS_LOG_DECODE_RECORD *pPrevRec = NULL; + NvU64 timeStamp; + NvU64 scratchSize = logDecode->scratchBufferSize / sizeof(NvU64); + NvU64 dst = scratchSize; + NvU64 recSize; + NvU64 i; + + // Initialize iterators and prime the pump. + for (i = 0; i < logDecode->numLogBuffers; i++) + { + pLog = &logDecode->log[i]; + + if (!pLog->physicLogBuffer) + { + printf("logDecode->physicLogBuffer is NULL\n"); + return; + } + + pLog->putCopy = pLog->physicLogBuffer[0]; + pLog->putIter = pLog->putCopy; + libosExtractLog_ReadRecord(logDecode, pLog); + } + + // Copy records in order of highest time stamp. + for (;;) + { + timeStamp = LIBOS_LOG_TIMESTAMP_END; + pLog = NULL; // for debugging. + + // Find log with the highest timestamp. + for (i = 0; i < logDecode->numLogBuffers; i++) + { + if (timeStamp < logDecode->log[i].record.timeStamp) + { + pLog = &logDecode->log[i]; + timeStamp = pLog->record.timeStamp; + } + } + + if (timeStamp == LIBOS_LOG_TIMESTAMP_END) + break; + + // Copy records with highest timestamp. + recSize = pLog->record.meta->argumentCount + LIBOS_LOG_DECODE_RECORD_BASE; + + // Skip duplicate records. The same record can be in both wrap and nowrap buffers. + if ((pPrevRec == NULL) || + (pPrevRec->log->gpuInstance != pLog->gpuInstance) || + (portMemCmp(&pPrevRec->meta, &pLog->record.meta, (recSize - 1) * sizeof(NvU64)) != 0)) + { + // Record is not identical to previous record. + if (dst < recSize) + { + printf("**** scratch buffer overflow. lost entries ****\n"); + break; + } + + dst -= recSize; + portMemCopy( + &logDecode->scratchBuffer[dst], recSize * sizeof(NvU64), &pLog->record, + recSize * sizeof(NvU64)); + + pPrevRec = (LIBOS_LOG_DECODE_RECORD *)&logDecode->scratchBuffer[dst]; + } + + // Read in the next record from the log we just copied. + libosExtractLog_ReadRecord(logDecode, pLog); + } + + // Update the previous put pointers. + for (i = 0; i < logDecode->numLogBuffers; i++) + logDecode->log[i].previousPut = logDecode->log[i].putCopy; + + // Print out the copied records. + if (dst != scratchSize) + libosPrintLogRecords(logDecode, &logDecode->scratchBuffer[dst], scratchSize - dst); +} + +#endif // LIBOS_LOG_DECODE_ENABLE + +#if LIBOS_LOG_TO_NVLOG + +# define LIBOS_LOG_NVLOG_BUFFER_TAG(_name, _i) NvU32_BUILD((_name)[2], (_name)[1], (_name)[0], (NvU8)('1' + _i)) + +static NvBool libosCopyLogToNvlog_nowrap(LIBOS_LOG_DECODE_LOG *pLog) +{ + NVLOG_BUFFER *pNvLogBuffer = NvLogLogger.pBuffers[pLog->hNvLogNoWrap]; + NV_ASSERT_OR_RETURN((pLog->hNvLogNoWrap != 0) && (pNvLogBuffer != NULL), NV_FALSE); + + LIBOS_LOG_NVLOG_BUFFER *pNoWrapBuf = (LIBOS_LOG_NVLOG_BUFFER *)pNvLogBuffer->data; + NvU64 putCopy = pLog->physicLogBuffer[0]; + NvU64 putOffset = putCopy * sizeof(NvU64) + sizeof(NvU64); + + if (putOffset == pNvLogBuffer->pos) + { + // No new data + return NV_TRUE; + } + + if (putOffset > pNvLogBuffer->size - NV_OFFSETOF(LIBOS_LOG_NVLOG_BUFFER, data) - sizeof(NvU64)) + { + // Are we done filling nowrap? + return NV_FALSE; + } + + NvU64 len = putOffset - pNvLogBuffer->pos; + NvU8 *pSrc = ((NvU8 *)pLog->physicLogBuffer) + pNvLogBuffer->pos; + NvU8 *pDst = pNoWrapBuf->data + pNvLogBuffer->pos; + portMemCopy(pDst, len, pSrc, len); + pNvLogBuffer->pos = putOffset; // TODO: usage of NVLOG_BUFFER::pos is sus here, reconsider? + *(NvU64 *)(pNoWrapBuf->data) = putCopy; + return NV_TRUE; +} + +static NvBool libosCopyLogToNvlog_wrap(LIBOS_LOG_DECODE_LOG *pLog) +{ + NVLOG_BUFFER *pNvLogBuffer = NvLogLogger.pBuffers[pLog->hNvLogWrap]; + NV_ASSERT_OR_RETURN((pLog->hNvLogWrap != 0) && (pNvLogBuffer != NULL), NV_FALSE); + + LIBOS_LOG_NVLOG_BUFFER *pWrapBuf = (LIBOS_LOG_NVLOG_BUFFER *)pNvLogBuffer->data; + NvU64 putCopy = pLog->physicLogBuffer[0]; + NvU64 putOffset = putCopy * sizeof(NvU64) + sizeof(NvU64); + + portMemCopy(pWrapBuf->data, pLog->logBufferSize, (void *)pLog->physicLogBuffer, pLog->logBufferSize); + pNvLogBuffer->pos = putOffset; // TODO: usage of NVLOG_BUFFER::pos is sus here, reconsider? + return NV_TRUE; +} + +static void libosExtractLogs_nvlog(LIBOS_LOG_DECODE *logDecode, NvBool bSyncNvLog) +{ + NvU64 i; + for (i = 0; i < logDecode->numLogBuffers; i++) + { + LIBOS_LOG_DECODE_LOG *pLog = &logDecode->log[i]; + + if (pLog->bNvLogNoWrap) + { + pLog->bNvLogNoWrap = libosCopyLogToNvlog_nowrap(pLog); + } + + if (bSyncNvLog) + { + libosCopyLogToNvlog_wrap(pLog); + } + } +} + +#endif // LIBOS_LOG_TO_NVLOG + +/** + * + * @brief Helper functions for creating and destroying log buffers. + * + * Call the functions in this order: + * libosLogCreate - Just clears the LIBOS_LOG_DECODE structure. + * libosLogAddLog - Call once for each log buffer. + * libosLogInit - Sizes/allocates scratch buffer, and inits resolver. + * ... + * libosLogDestroy - Destroys resolver and frees scratch buffer. * + */ + +static void libosLogCreateGeneric(LIBOS_LOG_DECODE *logDecode) +{ +#if LIBOS_LOG_DECODE_ENABLE + states_init(); +#endif // LIBOS_LOG_DECODE_ENABLE + + portMemSet(logDecode, 0, sizeof *logDecode); + + // Default name value: GSP + portStringCopy(logDecode->sourceName, sizeof(logDecode->sourceName), "GSP", sizeof(logDecode->sourceName)); +} +static void libosLogCreateExGeneric(LIBOS_LOG_DECODE *logDecode, const char *pSourceName) +{ +#if LIBOS_LOG_DECODE_ENABLE + states_init(); +#endif // LIBOS_LOG_DECODE_ENABLE + + portMemSet(logDecode, 0, sizeof *logDecode); + + // Extended args - set name value + portStringCopy(logDecode->sourceName, sizeof(logDecode->sourceName), pSourceName, sizeof(logDecode->sourceName)); +} + +#if defined(NVSYM_STANDALONE) && !defined(PROTODMP_BUILD) +void libosLogCreate(LIBOS_LOG_DECODE *logDecode, FILE *fout) +{ + libosLogCreateGeneric(logDecode); + logDecode->fout = fout; +} +void libosLogCreateEx(LIBOS_LOG_DECODE *logDecode, const char *pSourceName, FILE *fout) +{ + libosLogCreateExGeneric(logDecode, pSourceName); + logDecode->fout = fout; +} +#elif defined(NVWATCH) +void libosLogCreate(LIBOS_LOG_DECODE *logDecode, char *dest) +{ + libosLogCreateGeneric(logDecode); + logDecode->dest = dest; +} +void libosLogCreateEx(LIBOS_LOG_DECODE *logDecode, const char *pSourceName, char *dest) +{ + libosLogCreateExGeneric(logDecode, pSourceName); + logDecode->dest = dest; +} +#else +void libosLogCreate(LIBOS_LOG_DECODE *logDecode) +{ + libosLogCreateGeneric(logDecode); +} +void libosLogCreateEx(LIBOS_LOG_DECODE *logDecode, const char *pSourceName) +{ + libosLogCreateExGeneric(logDecode, pSourceName); +} +#endif + +void libosLogAddLogEx(LIBOS_LOG_DECODE *logDecode, void *buffer, NvU64 bufferSize, NvU32 gpuInstance, NvU32 gpuArch, NvU32 gpuImpl, const char *name) +{ + NvU32 i; + LIBOS_LOG_DECODE_LOG *pLog; + + if (logDecode->numLogBuffers >= LIBOS_LOG_MAX_LOGS) + { + printf("LIBOS_LOG_DECODE::log array is too small. Increase LIBOS_LOG_MAX_LOGS.\n"); + return; + } + + i = logDecode->numLogBuffers++; + pLog = &logDecode->log[i]; + pLog->physicLogBuffer = (volatile NvU64 *)buffer; + pLog->logBufferSize = bufferSize; + pLog->previousPut = 0; + pLog->putCopy = 0; + pLog->putIter = 0; + + pLog->gpuInstance = gpuInstance; + + if (name) + portStringCopy(pLog->taskPrefix, sizeof(pLog->taskPrefix), name, sizeof(pLog->taskPrefix)); + + +#if LIBOS_LOG_TO_NVLOG + NV_STATUS status; + + const NvU32 libosNoWrapBufferFlags = + DRF_DEF(LOG, _BUFFER_FLAGS, _DISABLED, _NO) | DRF_DEF(LOG, _BUFFER_FLAGS, _TYPE, _NOWRAP) | + DRF_DEF(LOG, _BUFFER_FLAGS, _EXPANDABLE, _NO) | DRF_DEF(LOG, _BUFFER_FLAGS, _NONPAGED, _YES) | + DRF_DEF(LOG, _BUFFER_FLAGS, _LOCKING, _NONE) | DRF_DEF(LOG, _BUFFER_FLAGS, _OCA, _YES) | + DRF_DEF(LOG, _BUFFER_FLAGS, _FORMAT, _LIBOS_LOG) | + DRF_NUM(LOG, _BUFFER_FLAGS, _GPU_INSTANCE, gpuInstance); + + const NvU32 libosWrapBufferFlags = + DRF_DEF(LOG, _BUFFER_FLAGS, _DISABLED, _NO) | DRF_DEF(LOG, _BUFFER_FLAGS, _TYPE, _RING) | + DRF_DEF(LOG, _BUFFER_FLAGS, _EXPANDABLE, _NO) | DRF_DEF(LOG, _BUFFER_FLAGS, _NONPAGED, _YES) | + DRF_DEF(LOG, _BUFFER_FLAGS, _LOCKING, _NONE) | DRF_DEF(LOG, _BUFFER_FLAGS, _OCA, _YES) | + DRF_DEF(LOG, _BUFFER_FLAGS, _FORMAT, _LIBOS_LOG) | + DRF_NUM(LOG, _BUFFER_FLAGS, _GPU_INSTANCE, gpuInstance); + + pLog->hNvLogNoWrap = 0; + pLog->hNvLogWrap = 0; + pLog->bNvLogNoWrap = NV_FALSE; + + LIBOS_LOG_NVLOG_BUFFER *pNoWrapBuf; + + status = nvlogAllocBuffer( + bufferSize + NV_OFFSETOF(LIBOS_LOG_NVLOG_BUFFER, data), libosNoWrapBufferFlags, + LIBOS_LOG_NVLOG_BUFFER_TAG(logDecode->sourceName, i * 2), + &pLog->hNvLogNoWrap); + + if (status == NV_OK) + { + pNoWrapBuf = (LIBOS_LOG_NVLOG_BUFFER *)NvLogLogger.pBuffers[pLog->hNvLogNoWrap]->data; + if (name) + { + portStringCopy( + pNoWrapBuf->taskPrefix, sizeof pNoWrapBuf->taskPrefix, name, sizeof pNoWrapBuf->taskPrefix); + } + + pNoWrapBuf->gpuArch = gpuArch; + pNoWrapBuf->gpuImpl = gpuImpl; + + NvLogLogger.pBuffers[pLog->hNvLogNoWrap]->pos = sizeof(NvU64); // offset to account for put pointer + pLog->bNvLogNoWrap = NV_TRUE; + } + else + { + printf("nvlogAllocBuffer nowrap failed\n"); + } + + LIBOS_LOG_NVLOG_BUFFER *pWrapBuf; + + status = nvlogAllocBuffer( + bufferSize + NV_OFFSETOF(LIBOS_LOG_NVLOG_BUFFER, data), libosWrapBufferFlags, + LIBOS_LOG_NVLOG_BUFFER_TAG(logDecode->sourceName, i * 2 + 1), + &pLog->hNvLogWrap); + + if (status == NV_OK) + { + pWrapBuf = (LIBOS_LOG_NVLOG_BUFFER *)NvLogLogger.pBuffers[pLog->hNvLogWrap]->data; + if (name) + { + portStringCopy( + pWrapBuf->taskPrefix, sizeof pWrapBuf->taskPrefix, name, sizeof pWrapBuf->taskPrefix); + } + + pWrapBuf->gpuArch = gpuArch; + pWrapBuf->gpuImpl = gpuImpl; + } + else + { + printf("nvlogAllocBuffer wrap failed\n"); + } +#endif // LIBOS_LOG_TO_NVLOG +} + +void libosLogAddLog(LIBOS_LOG_DECODE *logDecode, void *buffer, NvU64 bufferSize, NvU32 gpuInstance, const char *name) +{ + // Use defaults for gpuArch and gpuImpl + libosLogAddLogEx(logDecode, buffer, bufferSize, gpuInstance, 0, 0, name); +} + +#if LIBOS_LOG_DECODE_ENABLE + +void libosLogInit(LIBOS_LOG_DECODE *logDecode, elf64_header *elf) +{ + NvU64 scratchBufferSize = 0; + NvU64 i; + + // + // The scratch buffer holds the sorted records in flight from all logs. + // If we overflow it, we lose records. + // + + // + // First, calculate the smallest possible length (in 64-bit words) + // of a log buffer entry for the current log (0 args). + // This will allow us to calculate for max possible number of log entries, + // i.e. if none of them have args and are thus the smallest size possible. + // + NvU64 minLogBufferEntryLength = 0; + minLogBufferEntryLength++; // account for metadata pointer + if (!logDecode->bSynchronousBuffer) + { + minLogBufferEntryLength++; // account for timestamp + } + + for (i = 0; i < logDecode->numLogBuffers; i++) + { + scratchBufferSize += logDecode->log[i].logBufferSize; + } + + // The scratch buffer is sized to handle worst-case overhead + scratchBufferSize = (scratchBufferSize * LIBOS_LOG_DECODE_RECORD_BASE) / minLogBufferEntryLength; + + + logDecode->elf = elf; + logDecode->scratchBuffer = portMemAllocNonPaged(scratchBufferSize); + logDecode->scratchBufferSize = scratchBufferSize; + logDecode->curLineBufPtr = logDecode->lineBuffer; + + if (elf != NULL) + { + libosDebugResolverConstruct(&logDecode->resolver, elf); + } +} + +void libosLogInitEx( + LIBOS_LOG_DECODE *logDecode, elf64_header *elf, NvBool bSynchronousBuffer, NvBool bPtrSymbolResolve) +{ + // Set extended config + logDecode->bSynchronousBuffer = bSynchronousBuffer; + logDecode->bPtrSymbolResolve = bPtrSymbolResolve; + + // Complete init + libosLogInit(logDecode, elf); +} + +#else // LIBOS_LOG_DECODE_ENABLE + +void libosLogInit(LIBOS_LOG_DECODE *logDecode, void *elf) {} + +void libosLogInitEx( + LIBOS_LOG_DECODE *logDecode, void *elf, + NvBool bSynchronousBuffer, NvBool bPtrSymbolResolve) +{ + // No extended config to set when decode is disabled +} + +#endif // LIBOS_LOG_DECODE_ENABLE + +void libosLogDestroy(LIBOS_LOG_DECODE *logDecode) +{ +#if LIBOS_LOG_TO_NVLOG + NvU64 i; + for (i = 0; i < logDecode->numLogBuffers; i++) + { + LIBOS_LOG_DECODE_LOG *pLog = &logDecode->log[i]; + + if (pLog->hNvLogNoWrap != 0) + { + nvlogDeallocBuffer(pLog->hNvLogNoWrap); + pLog->hNvLogNoWrap = 0; + } + + if (pLog->hNvLogWrap != 0) + { + nvlogDeallocBuffer(pLog->hNvLogWrap); + pLog->hNvLogWrap = 0; + } + } +#endif // LIBOS_LOG_TO_NVLOG + +#if LIBOS_LOG_DECODE_ENABLE + libosDebugResolverDestroy(&logDecode->resolver); + + if (logDecode->scratchBuffer) + { + portMemFree(logDecode->scratchBuffer); + logDecode->scratchBuffer = NULL; + } +#endif // LIBOS_LOG_DECODE_ENABLE +} + +void libosExtractLogs(LIBOS_LOG_DECODE *logDecode, NvBool bSyncNvLog) +{ +#if LIBOS_LOG_DECODE_ENABLE + if (logDecode->elf != NULL) + libosExtractLogs_decode(logDecode); +#endif + +#if LIBOS_LOG_TO_NVLOG + libosExtractLogs_nvlog(logDecode, bSyncNvLog); +#endif +} + + +#if defined(NVWATCH) +# pragma warning(pop) +#endif // defined(NVWATCH) diff --git a/src/common/uproc/os/libos-v2.0.0/debug/logdecode.h b/src/common/uproc/os/libos-v2.0.0/debug/logdecode.h new file mode 100644 index 000000000..48e6ea048 --- /dev/null +++ b/src/common/uproc/os/libos-v2.0.0/debug/logdecode.h @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LOGDECODE_H_ +#define LOGDECODE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef NVRM + +# define LIBOS_LOG_DECODE_ENABLE 1 + +# define LIBOS_LOG_TO_NVLOG 1 + +# define LIBOS_LOG_MAX_LOGS 5 // Max logs per GPU + +#else // NVRM +# include + +# define LIBOS_LOG_DECODE_ENABLE 1 +# define LIBOS_LOG_TO_NVLOG 0 + +# define LIBOS_LOG_MAX_LOGS 160 // Max logs for all GPUs for offline decoder + +#endif // NVRM + +#define LIBOS_LOG_ENABLE (LIBOS_LOG_TO_NVLOG || LIBOS_LOG_DECODE_ENABLE) + +#if LIBOS_LOG_DECODE_ENABLE +# include "../include/libos_log.h" +# include "lines.h" +#endif + +// Forward declarations. +struct LIBOS_LOG_DECODE_LOG; +typedef struct LIBOS_LOG_DECODE_LOG LIBOS_LOG_DECODE_LOG; + +#define LIBOS_LOG_LINE_BUFFER_SIZE 128 +#define LIBOS_LOG_MAX_ARGS 20 + +#if LIBOS_LOG_DECODE_ENABLE + +# include "nvctassert.h" + +typedef struct +{ + NV_DECLARE_ALIGNED(LIBOS_LOG_DECODE_LOG *log, 8); + NV_DECLARE_ALIGNED(libosLogMetadata *meta, 8); + NV_DECLARE_ALIGNED(NvU64 timeStamp, 8); + NvU64 args[LIBOS_LOG_MAX_ARGS]; +} LIBOS_LOG_DECODE_RECORD; + +// Size of LIBOS_LOG_DECODE_RECORD without args, in number of NvU64 entries. +# define LIBOS_LOG_DECODE_RECORD_BASE 3 + +// Ensure that the size matches up (no padding in the struct) +ct_assert((LIBOS_LOG_DECODE_RECORD_BASE * sizeof(NvU64)) == (sizeof(LIBOS_LOG_DECODE_RECORD) - sizeof(((LIBOS_LOG_DECODE_RECORD*)NULL)->args))); + +#endif // LIBOS_LOG_DECODE_ENABLE + +#define TASK_NAME_MAX_LENGTH (8) +#define SOURCE_NAME_MAX_LENGTH (4) + +// NvLog buffer +typedef struct +{ + NvU32 gpuArch; + NvU32 gpuImpl; + NvU32 rsvd1; + NvU32 rsvd2; + char taskPrefix[TASK_NAME_MAX_LENGTH]; // Prefix string printed before each line. + NvU8 data[0]; +} LIBOS_LOG_NVLOG_BUFFER; + +struct LIBOS_LOG_DECODE_LOG +{ + volatile NvU64 *physicLogBuffer; + NvU64 logBufferSize; // Includes put pointer located in first 8 bytes. + NvU64 previousPut; // Keeps track of records already printed. + NvU64 putCopy; // End pointer for this batch. + NvU64 putIter; // Iterator for this batch. + NvU32 gpuInstance; // GPU that this log is associated with. + char taskPrefix[TASK_NAME_MAX_LENGTH]; // Prefix string printed before each line. + +#if LIBOS_LOG_TO_NVLOG + NvU32 hNvLogNoWrap; // No wrap buffer captures first records. + NvU32 hNvLogWrap; // Wrap buffer captures last records. + NvBool bNvLogNoWrap; // NV_TRUE if no wrap buffer not full. +#endif + +#if LIBOS_LOG_DECODE_ENABLE + LIBOS_LOG_DECODE_RECORD record; +#endif +}; + +typedef struct +{ + char sourceName[SOURCE_NAME_MAX_LENGTH]; // GSP, PMU etc + + NvU64 numLogBuffers; + LIBOS_LOG_DECODE_LOG log[LIBOS_LOG_MAX_LOGS]; + +#if LIBOS_LOG_DECODE_ENABLE + elf64_header *elf; + libosDebugResolver resolver; + NvU64 *scratchBuffer; // Sorted by timestamp. + NvU64 scratchBufferSize; // Sum of logBufferSize. + char *curLineBufPtr; // Current position in lineBuffer. + // Decodes into lineBuffer, then prints as a string. + char lineBuffer[LIBOS_LOG_LINE_BUFFER_SIZE]; + NvBool bSynchronousBuffer; + NvBool bPtrSymbolResolve; +#endif // LIBOS_LOG_DECODE_ENABLE + +#if defined(NVSYM_STANDALONE) && !defined(PROTODMP_BUILD) + FILE *fout; // nvlog_decoder outputs to FILE descriptors +#elif defined(NVWATCH) + char *dest; // nvwatch dumps data into a string +#endif + +} LIBOS_LOG_DECODE; + +#if defined(NVSYM_STANDALONE) && !defined(PROTODMP_BUILD) +void libosLogCreate(LIBOS_LOG_DECODE *logDecode, FILE *fout); +void libosLogCreateEx(LIBOS_LOG_DECODE *logDecode, const char *pSourceName, FILE *fout); +#elif defined(NVWATCH) +void libosLogCreate(LIBOS_LOG_DECODE *logDecode, char *dest); +void libosLogCreateEx(LIBOS_LOG_DECODE *logDecode, const char *pSourceName, char *dest); +#else +void libosLogCreate(LIBOS_LOG_DECODE *logDecode); +void libosLogCreateEx(LIBOS_LOG_DECODE *logDecode, const char *pSourceName); +#endif + +void libosLogAddLogEx(LIBOS_LOG_DECODE *logDecode, void *buffer, NvU64 bufferSize, NvU32 gpuInstance, NvU32 gpuArch, NvU32 gpuImpl, const char *name); +void libosLogAddLog(LIBOS_LOG_DECODE *logDecode, void *buffer, NvU64 bufferSize, NvU32 gpuInstance, const char *name); + +#if LIBOS_LOG_DECODE_ENABLE +void libosLogInit(LIBOS_LOG_DECODE *logDecode, elf64_header *elf); +void libosLogInitEx( + LIBOS_LOG_DECODE *logDecode, elf64_header *elf, NvBool bSynchronousBuffer, NvBool bPtrSymbolResolve); +#else +void libosLogInit(LIBOS_LOG_DECODE *logDecode, void *elf); +void libosLogInitEx( + LIBOS_LOG_DECODE *logDecode, void *elf, NvBool bSynchronousBuffer, NvBool bPtrSymbolResolve); +#endif // LIBOS_LOG_DECODE_ENABLE + +void libosLogDestroy(LIBOS_LOG_DECODE *logDecode); + +void libosExtractLogs(LIBOS_LOG_DECODE *logDecode, NvBool bSyncNvLog); + +#ifdef __cplusplus +} +#endif + +#endif // LOGDECODE_H_ diff --git a/src/common/uproc/os/libos-v2.0.0/include/gsp_fw_wpr_meta.h b/src/common/uproc/os/libos-v2.0.0/include/gsp_fw_wpr_meta.h new file mode 100644 index 000000000..6aee28648 --- /dev/null +++ b/src/common/uproc/os/libos-v2.0.0/include/gsp_fw_wpr_meta.h @@ -0,0 +1,154 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// TODO: This file is is not libos specific, so it really does not belong under +// uproc/os/libos-v2.0.0. It is used by booter, bootloader, libos, GSP-RM, +// and kernel RM. An appropriate location might be uproc/common/inc, but +// that directory is not currently synced by DVS for driver builds. + +#pragma once + +#ifndef GSP_FW_WPR_META_H_ +#define GSP_FW_WPR_META_H_ + +/*! + * GSP firmware WPR metadata + * + * Initialized by CPU-RM and DMA'd to FB, at the end of what will be WPR2. + * Verified, and locked in WPR2 by Booter. + * + * Firmware scrubs the last 256mb of FB, no memory outside of this region + * may be used until the FW RM has scrubbed the remainder of memory. + * + * ---------------------------- <- fbSize (end of FB, 1M aligned) + * | VGA WORKSPACE | + * ---------------------------- <- vbiosReservedOffset (64K? aligned) + * | (potential align. gap) | + * ---------------------------- <- gspFwWprEnd (128K aligned) + * | FRTS data | (frtsSize is 0 on GA100) + * | ------------------------ | <- frtsOffset + * | BOOT BIN (e.g. SK + BL) | + * ---------------------------- <- bootBinOffset + * | GSP FW ELF | + * ---------------------------- <- gspFwOffset + * | GSP FW (WPR) HEAP | + * ---------------------------- <- gspFwHeapOffset + * | Booter-placed metadata | + * | (struct GspFwWprMeta) | + * ---------------------------- <- gspFwWprStart (128K aligned) + * | GSP FW (non-WPR) HEAP | + * ---------------------------- <- nonWprHeapOffset, gspFwRsvdStart + * (GSP_CARVEOUT_SIZE bytes from end of FB) + */ +typedef struct +{ + // Magic + // BL to use for verification (i.e. Booter locked it in WPR2) + NvU64 magic; // = 0xdc3aae21371a60b3; + + // Revision number of Booter-BL-Sequencer handoff interface + // Bumped up when we change this interface so it is not backward compatible. + // Bumped up when we revoke GSP-RM ucode + NvU64 revision; // = 1; + + // ---- Members regarding data in SYSMEM ---------------------------- + // Consumed by Booter for DMA + + NvU64 sysmemAddrOfRadix3Elf; + NvU64 sizeOfRadix3Elf; + + NvU64 sysmemAddrOfBootloader; + NvU64 sizeOfBootloader; + + // Offsets inside bootloader image needed by Booter + NvU64 bootloaderCodeOffset; + NvU64 bootloaderDataOffset; + NvU64 bootloaderManifestOffset; + + NvU64 sysmemAddrOfSignature; + NvU64 sizeOfSignature; + + // ---- Members describing FB layout -------------------------------- + NvU64 gspFwRsvdStart; + + NvU64 nonWprHeapOffset; + NvU64 nonWprHeapSize; + + NvU64 gspFwWprStart; + + // GSP-RM to use to setup heap. + NvU64 gspFwHeapOffset; + NvU64 gspFwHeapSize; + + // BL to use to find ELF for jump + NvU64 gspFwOffset; + // Size is sizeOfRadix3Elf above. + + NvU64 bootBinOffset; + // Size is sizeOfBootloader above. + + NvU64 frtsOffset; + NvU64 frtsSize; + + NvU64 gspFwWprEnd; + + // GSP-RM to use for fbRegionInfo? + NvU64 fbSize; + + // ---- Other members ----------------------------------------------- + + // GSP-RM to use for fbRegionInfo? + NvU64 vgaWorkspaceOffset; + NvU64 vgaWorkspaceSize; + + // Boot count. Used to determine whether to load the firmware image. + NvU64 bootCount; + + // TODO: the partitionRpc* fields below do not really belong in this + // structure. The values are patched in by the partition bootstrapper + // when GSP-RM is booted in a partition, and this structure was a + // convenient place for the bootstrapper to access them. These should + // be moved to a different comm. mechanism between the bootstrapper + // and the GSP-RM tasks. + + // Shared partition RPC memory (physical address) + NvU64 partitionRpcAddr; + + // Offsets relative to partitionRpcAddr + NvU16 partitionRpcRequestOffset; + NvU16 partitionRpcReplyOffset; + + // Pad structure to exactly 256 bytes. Can replace padding with additional + // fields without incrementing revision. Padding initialized to 0. + NvU32 padding[7]; + + // BL to use for verification (i.e. Booter says OK to boot) + NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified + +} GspFwWprMeta; + +#define GSP_FW_WPR_META_VERIFIED 0xa0a0a0a0a0a0a0a0ULL +#define GSP_FW_WPR_META_REVISION 1 +#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL + +#endif // GSP_FW_WPR_META_H_ diff --git a/src/common/uproc/os/libos-v2.0.0/include/libos_init_args.h b/src/common/uproc/os/libos-v2.0.0/include/libos_init_args.h new file mode 100644 index 000000000..fed3bf6a1 --- /dev/null +++ b/src/common/uproc/os/libos-v2.0.0/include/libos_init_args.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#pragma once + +// @todo: Remove this once chips_a/drivers/resman/arch/nvalloc/common/inc/riscvifriscv.h has been +// switched to directly include this file. +#if !defined(RISCVIFRISCV_H) && !defined(LIBOS_INIT_H_) +#define LIBOS_INIT_H_ + +#define LIBOS_MEMORY_REGION_INIT_ARGUMENTS_MAX 4096 + +typedef NvU64 LibosAddress; + +typedef enum { + LIBOS_MEMORY_REGION_NONE, + LIBOS_MEMORY_REGION_CONTIGUOUS, + LIBOS_MEMORY_REGION_RADIX3 +} LibosMemoryRegionKind; + +typedef enum { + LIBOS_MEMORY_REGION_LOC_NONE, + LIBOS_MEMORY_REGION_LOC_SYSMEM, + LIBOS_MEMORY_REGION_LOC_FB +} LibosMemoryRegionLoc; + +#define LIBOS_MEMORY_REGION_RADIX_PAGE_SIZE 4096 +#define LIBOS_MEMORY_REGION_RADIX_PAGE_LOG2 12 +typedef struct +{ + LibosAddress id8; // Id tag. + LibosAddress pa; // Physical address. + LibosAddress size; // Size of memory area. + NvU8 kind; // See LibosMemoryRegionKind above. + NvU8 loc; // See LibosMemoryRegionLoc above. +} LibosMemoryRegionInitArgument; + +#endif diff --git a/src/common/uproc/os/libos-v2.0.0/include/libos_log.h b/src/common/uproc/os/libos-v2.0.0/include/libos_log.h new file mode 100644 index 000000000..890161fcf --- /dev/null +++ b/src/common/uproc/os/libos-v2.0.0/include/libos_log.h @@ -0,0 +1,195 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LIBOS_LOGGER_H_ +#define LIBOS_LOGGER_H_ + +#include "nvtypes.h" + +/** + * @brief The log metadata structures and format strings are stripped + * These structures are emitted into the .logging section + * which is stripped from the image as the final build step. + * + */ +typedef struct +{ + NV_DECLARE_ALIGNED(const char *filename, 8); + NV_DECLARE_ALIGNED(const char *format, 8); + NV_DECLARE_ALIGNED(NvU32 lineNumber, 8); + NvU8 argumentCount; //! Count of arguments not including format string. + NvU8 printLevel; +} libosLogMetadata; + +/*! + * Count arguments + * + */ +#define LIBOS_MACRO_GET_COUNT(...) \ + LIBOS_MACRO_GET_19TH(__VA_ARGS__, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define LIBOS_MACRO_GET_19TH( \ + _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, N, ...) \ + N + +/*! + * Utility + * + */ +#define LIBOS_MACRO_PASTE(fmt, b) fmt##b +#define LIBOS_MACRO_PASTE_EVAL(fmt, b) LIBOS_MACRO_PASTE(fmt, b) +#define LIBOS_MACRO_FIRST(format_string, ...) format_string + +/*! + * Cast remaining log arguments to integers for storage + * + */ +#define LIBOS_LOG_BUILD_ARG(a) (NvU64)(a), +#define LIBOS_LOG_BUILD_1(fmt) +#define LIBOS_LOG_BUILD_2(fmt, b) LIBOS_LOG_BUILD_ARG(b) +#define LIBOS_LOG_BUILD_3(fmt, b, c) LIBOS_LOG_BUILD_ARG(b) LIBOS_LOG_BUILD_ARG(c) +#define LIBOS_LOG_BUILD_4(fmt, b, c, d) LIBOS_LOG_BUILD_ARG(b) LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) +#define LIBOS_LOG_BUILD_5(fmt, b, c, d, e) \ + LIBOS_LOG_BUILD_ARG(b) LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) +#define LIBOS_LOG_BUILD_6(fmt, b, c, d, e, f) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) +#define LIBOS_LOG_BUILD_7(fmt, b, c, d, e, f, g) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) +#define LIBOS_LOG_BUILD_8(fmt, b, c, d, e, f, g, h) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) +#define LIBOS_LOG_BUILD_9(fmt, b, c, d, e, f, g, h, i) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) LIBOS_LOG_BUILD_ARG(i) +#define LIBOS_LOG_BUILD_10(fmt, b, c, d, e, f, g, h, i, j) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) LIBOS_LOG_BUILD_ARG(i) LIBOS_LOG_BUILD_ARG(j) +#define LIBOS_LOG_BUILD_11(fmt, b, c, d, e, f, g, h, i, j, k) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) LIBOS_LOG_BUILD_ARG(i) LIBOS_LOG_BUILD_ARG(j) \ + LIBOS_LOG_BUILD_ARG(k) +#define LIBOS_LOG_BUILD_12(fmt, b, c, d, e, f, g, h, i, j, k, l) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) LIBOS_LOG_BUILD_ARG(i) LIBOS_LOG_BUILD_ARG(j) \ + LIBOS_LOG_BUILD_ARG(k) LIBOS_LOG_BUILD_ARG(l) +#define LIBOS_LOG_BUILD_13(fmt, b, c, d, e, f, g, h, i, j, k, l, m) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) LIBOS_LOG_BUILD_ARG(i) LIBOS_LOG_BUILD_ARG(j) \ + LIBOS_LOG_BUILD_ARG(k) LIBOS_LOG_BUILD_ARG(l) LIBOS_LOG_BUILD_ARG(m) +#define LIBOS_LOG_BUILD_14(fmt, b, c, d, e, f, g, h, i, j, k, l, m, n) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) LIBOS_LOG_BUILD_ARG(i) LIBOS_LOG_BUILD_ARG(j) \ + LIBOS_LOG_BUILD_ARG(k) LIBOS_LOG_BUILD_ARG(l) LIBOS_LOG_BUILD_ARG(m) LIBOS_LOG_BUILD_ARG(n) +#define LIBOS_LOG_BUILD_15(fmt, b, c, d, e, f, g, h, i, j, k, l, m, n, o) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) LIBOS_LOG_BUILD_ARG(i) LIBOS_LOG_BUILD_ARG(j) \ + LIBOS_LOG_BUILD_ARG(k) LIBOS_LOG_BUILD_ARG(l) LIBOS_LOG_BUILD_ARG(m) LIBOS_LOG_BUILD_ARG(n) \ + LIBOS_LOG_BUILD_ARG(o) +#define LIBOS_LOG_BUILD_16(fmt, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) LIBOS_LOG_BUILD_ARG(i) LIBOS_LOG_BUILD_ARG(j) \ + LIBOS_LOG_BUILD_ARG(k) LIBOS_LOG_BUILD_ARG(l) LIBOS_LOG_BUILD_ARG(m) LIBOS_LOG_BUILD_ARG(n) \ + LIBOS_LOG_BUILD_ARG(o) LIBOS_LOG_BUILD_ARG(p) +#define LIBOS_LOG_BUILD_17(fmt, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) LIBOS_LOG_BUILD_ARG(i) LIBOS_LOG_BUILD_ARG(j) \ + LIBOS_LOG_BUILD_ARG(k) LIBOS_LOG_BUILD_ARG(l) LIBOS_LOG_BUILD_ARG(m) LIBOS_LOG_BUILD_ARG(n) \ + LIBOS_LOG_BUILD_ARG(o) LIBOS_LOG_BUILD_ARG(p) LIBOS_LOG_BUILD_ARG(q) +#define LIBOS_LOG_BUILD_18(fmt, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r) \ + LIBOS_LOG_BUILD_ARG(b) \ + LIBOS_LOG_BUILD_ARG(c) LIBOS_LOG_BUILD_ARG(d) LIBOS_LOG_BUILD_ARG(e) LIBOS_LOG_BUILD_ARG(f) \ + LIBOS_LOG_BUILD_ARG(g) LIBOS_LOG_BUILD_ARG(h) LIBOS_LOG_BUILD_ARG(i) LIBOS_LOG_BUILD_ARG(j) \ + LIBOS_LOG_BUILD_ARG(k) LIBOS_LOG_BUILD_ARG(l) LIBOS_LOG_BUILD_ARG(m) LIBOS_LOG_BUILD_ARG(n) \ + LIBOS_LOG_BUILD_ARG(o) LIBOS_LOG_BUILD_ARG(p) LIBOS_LOG_BUILD_ARG(q) LIBOS_LOG_BUILD_ARG(r) + +#define LIBOS_LOG_BUILD_APPLY(F, ...) F(__VA_ARGS__) +#define APPLY_REMAINDER(...) \ + LIBOS_LOG_BUILD_APPLY( \ + LIBOS_MACRO_PASTE_EVAL(LIBOS_LOG_BUILD_, LIBOS_MACRO_GET_COUNT(__VA_ARGS__)), __VA_ARGS__) + +#define LOG_LEVEL_INFO 0 +#define LOG_LEVEL_ERROR 1 + +# define LIBOS_SECTION_LOGGING __attribute__((section(".logging"))) + +#ifdef LIBOS_LOGGING_METADATA_SPLIT +/*! + * When the METADATA_SPLIT feature is enabled, libos print data is split between 4 input sections (all of which + * must be dropped from the final image). The default .logging is used for str literals and custom strings which + * are directly referenced by pointer (for %s substitution); .logging_const is used for format strings and the + * aux metadata; and .logging_metadata is used for metadata vars. + * + * The idea is to have a split where metadata-only changes (i.e. changing what data gets printed, line number changes + * in file with prints, file renames) will usually not affect the main image, and can be bundled together and + * relied upon to trigger custom ucode build release behavior. + * The only exception is renaming headers with prints inside static inline functions; since these don't overlap + * with basenames, and we can't reliably get a nonvolatile version of a header file name here, such name changes + * alone won't be able to trigger ucode releases. + */ +# define LIBOS_SECTION_LOGGING_CONST __attribute__((section(".logging_const"))) +# define LIBOS_SECTION_LOGGING_METADATA __attribute__((section(".logging_metadata"))) +# define LIBOS_LOGGING_AUX_METADATA_DUMP \ + static const LIBOS_SECTION_LOGGING_CONST int libos_dummy_line[] LIBOS_ATTR_USED = {__LINE__}; +#else // LIBOS_LOGGING_VOLATILE_METADATA_SPLIT +# define LIBOS_SECTION_LOGGING_CONST LIBOS_SECTION_LOGGING +# define LIBOS_SECTION_LOGGING_METADATA LIBOS_SECTION_LOGGING +# define LIBOS_LOGGING_AUX_METADATA_DUMP +#endif // LIBOS_LOGGING_VOLATILE_METADATA_SPLIT + +/*! + * Used for log variables which we want to dump; clients may want to pick these up to check for metadata changes + */ +#define LIBOS_ATTR_USED __attribute__((used)) + +/*! + * Cast remaining log arguments to integers for storage + */ +#define LIBOS_LOG_INTERNAL(dispatcher, level, ...) \ + do \ + { \ + static const LIBOS_SECTION_LOGGING_CONST char libos_pvt_format[] = {LIBOS_MACRO_FIRST(__VA_ARGS__)}; \ + static const LIBOS_SECTION_LOGGING_CONST char libos_pvt_file[] = {__FILE__}; \ + LIBOS_LOGGING_AUX_METADATA_DUMP; \ + static const LIBOS_SECTION_LOGGING_METADATA libosLogMetadata libos_pvt_meta = { \ + .filename = &libos_pvt_file[0], \ + .format = &libos_pvt_format[0], \ + .lineNumber = __LINE__, \ + .argumentCount = LIBOS_MACRO_GET_COUNT(__VA_ARGS__) - 1, \ + .printLevel = level}; \ + const NvU64 tokens[] = {APPLY_REMAINDER(__VA_ARGS__)(NvU64) & libos_pvt_meta}; \ + dispatcher(sizeof(tokens) / sizeof(*tokens), &tokens[0]); \ + } while (0) + +#endif diff --git a/src/nvidia-modeset/Makefile b/src/nvidia-modeset/Makefile new file mode 100644 index 000000000..c63b86bcd --- /dev/null +++ b/src/nvidia-modeset/Makefile @@ -0,0 +1,143 @@ +########################################################################### +# Makefile for nv-modeset-kernel.o +########################################################################### + +NV_MODULE_LOGGING_NAME ?= nvidia-modeset + +VERSION_MK_DIR = ../../ +include ../../utils.mk + +include srcs.mk + +# The source files for nv-modeset-kernel.o are all SRCS and SRCS_CXX defined in +# srcs.mk, and the NVIDIA ID string +ALL_SRCS = $(SRCS) $(SRCS_CXX) +ALL_SRCS += $(NVIDSTRING) + +SRC_COMMON = ../common + +CFLAGS += -include $(SRC_COMMON)/sdk/nvidia/inc/cpuopsys.h + +CFLAGS += -I $(SRC_COMMON)/sdk/nvidia/inc +CFLAGS += -I $(SRC_COMMON)/shared/inc +CFLAGS += -I $(SRC_COMMON)/inc +CFLAGS += -I $(SRC_COMMON)/softfloat/nvidia +CFLAGS += -I $(SRC_COMMON)/softfloat/source/include +CFLAGS += -I $(SRC_COMMON)/softfloat/source/8086-SSE +CFLAGS += -I $(SRC_COMMON)/unix/common/utils/interface +CFLAGS += -I $(SRC_COMMON)/unix/common/inc +CFLAGS += -I $(SRC_COMMON)/modeset +CFLAGS += -I os-interface/include +CFLAGS += -I kapi/interface +CFLAGS += -I ../nvidia/arch/nvalloc/unix/include +CFLAGS += -I interface +CFLAGS += -I include +CFLAGS += -I kapi/include +CFLAGS += -I generated +CFLAGS += -I $(SRC_COMMON)/displayport/inc +CFLAGS += -I $(SRC_COMMON)/displayport/inc/dptestutil +CFLAGS += -I $(SRC_COMMON)/inc/displayport + +CFLAGS += -DNDEBUG +CFLAGS += -D_LANGUAGE_C +CFLAGS += -D__NO_CTYPE + +CFLAGS += -DNV_CPU_INTRINSICS_KERNEL +CFLAGS += -DNVHDMIPKT_RM_CALLS_INTERNAL=0 + +# XXX it would be nice to only define these for appropriate files... +CFLAGS += -DSOFTFLOAT_ROUND_ODD +CFLAGS += -DSOFTFLOAT_FAST_DIV32TO16 +CFLAGS += -DSOFTFLOAT_FAST_DIV64TO32 + +# Tell nvtiming to use nvkms import functions +CFLAGS += -DNVT_USE_NVKMS + +CFLAGS += -Wformat +CFLAGS += -Wreturn-type +CFLAGS += -Wswitch +CFLAGS += -Wunused-local-typedefs +CFLAGS += -Wchar-subscripts +CFLAGS += -Wparentheses +CFLAGS += -Wpointer-arith +CFLAGS += -Wcast-qual +CFLAGS += -Wall +CFLAGS += -Wextra +CFLAGS += -Wno-sign-compare +CFLAGS += -Wno-unused-parameter +CFLAGS += -Wno-missing-field-initializers +CFLAGS += -Wno-format-zero-length +CFLAGS += -Wmissing-declarations +CFLAGS += -Wno-cast-qual + +CFLAGS += -O2 + +ifeq ($(TARGET_ARCH),x86_64) + CFLAGS += -msoft-float + CFLAGS += -mno-red-zone + CFLAGS += -mcmodel=kernel + CFLAGS += -mno-mmx + CFLAGS += -mno-sse + CFLAGS += -mno-sse2 + CFLAGS += -mno-3dnow +endif + +ifeq ($(TARGET_ARCH),aarch64) + CFLAGS += -mgeneral-regs-only + CFLAGS += -march=armv8-a +endif + +CFLAGS += -fno-pic +CFLAGS += -fno-common +CFLAGS += -fomit-frame-pointer +CFLAGS += -fno-strict-aliasing +CFLAGS += -ffunction-sections +CFLAGS += -fdata-sections +CFLAGS += -ffreestanding + +CONDITIONAL_CFLAGS := $(call TEST_CC_ARG, -fcf-protection=none) +CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-overflow=2) +CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -Wformat-truncation=1) +ifeq ($(TARGET_ARCH),x86_64) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch=thunk-extern) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch-register) +endif + +CFLAGS += $(CONDITIONAL_CFLAGS) + +CC_ONLY_CFLAGS += -Wimplicit +CC_ONLY_CFLAGS += -Wstrict-prototypes +CC_ONLY_CFLAGS += -Wmissing-prototypes +CC_ONLY_CFLAGS += -std=gnu11 + +CXX_ONLY_CFLAGS += -std=gnu++11 +CXX_ONLY_CFLAGS += -fno-operator-names +CXX_ONLY_CFLAGS += -fno-rtti +CXX_ONLY_CFLAGS += -fno-exceptions +CXX_ONLY_CFLAGS += -fcheck-new + +SHADER_OBJS = + + CFLAGS += -DNVKMS_INCLUDE_HEADSURFACE=0 + +OBJS = $(call BUILD_OBJECT_LIST,$(ALL_SRCS)) +OBJS += $(SHADER_OBJS) + +# Define how to generate the NVIDIA ID string +$(eval $(call GENERATE_NVIDSTRING, \ + NV_KMS_ID, \ + UNIX Open Kernel Mode Setting Driver, $(OBJS))) + +# Define how to build each object file from the corresponding source file. +$(foreach src, $(ALL_SRCS), $(eval $(call DEFINE_OBJECT_RULE,TARGET,$(src)))) + +NV_MODESET_KERNEL_O = $(OUTPUTDIR)/nv-modeset-kernel.o + +.PNONY: all clean +all: $(NV_MODESET_KERNEL_O) + +$(NV_MODESET_KERNEL_O): $(OBJS) + $(call quiet_cmd,LD) -r -o $(NV_MODESET_KERNEL_O) $(OBJS) + +clean: + $(RM) -rf $(OUTPUTDIR) diff --git a/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h b/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h new file mode 100644 index 000000000..03ce91c80 --- /dev/null +++ b/src/nvidia-modeset/include/dp/nvdp-connector-event-sink.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__ +#define __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvDPLibSetAdaptiveSync(const NVDispEvoRec *pDispEvo, NvU32 head, + NvBool enable); +void nvDPLibUpdateDpyLinkConfiguration(NVDpyEvoPtr pDpyEvo); +NvBool nvDPLibDpyIsConnected(NVDpyEvoPtr pDpyEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_CONNECTOR_EVENT_SINK_H__ */ + diff --git a/src/nvidia-modeset/include/dp/nvdp-connector.h b/src/nvidia-modeset/include/dp/nvdp-connector.h new file mode 100644 index 000000000..4e2823f5a --- /dev/null +++ b/src/nvidia-modeset/include/dp/nvdp-connector.h @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_CONNECTOR_H__ +#define __NVKMS_DP_NVDP_CONNECTOR_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" + +NVDPLibConnectorPtr nvDPCreateConnector(NVConnectorEvoPtr pConnectorEvo); + +void nvDPNotifyLongPulse(NVConnectorEvoPtr pConnectorEvo, + NvBool connected); + +void nvDPNotifyShortPulse(NVDPLibConnectorPtr pNVDpLibConnector); + +void nvDPDestroyConnector(NVDPLibConnectorPtr pNVDpLibConnector); + +NvBool nvDPIsLinkAwaitingTransition(NVConnectorEvoPtr pConnectorEvo); + +NVDPLibModesetStatePtr nvDPLibCreateModesetState( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + NVHwModeTimingsEvo *pTimings); + +void nvDPLibFreeModesetState(NVDPLibModesetStatePtr pDpLibModesetState); + +void nvDPBeginValidation(NVDispEvoPtr pDispEvo); + +NvBool nvDPLibValidateTimings( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const struct NvKmsModeValidationParams *pModeValidationParams, + NVHwModeTimingsEvo *pTimings); + +NvBool nvDPEndValidation(NVDispEvoPtr pDispEvo); + +NvBool nvDPValidateModeForDpyEvo( + const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const struct NvKmsModeValidationParams *pModeValidationParams, + NVHwModeTimingsEvo *pTimings); + +void nvDPPreSetMode(NVDPLibConnectorPtr pDpLibConnector, + const NVEvoModesetUpdateState *pModesetUpdateState); + +void nvDPPostSetMode(NVDPLibConnectorPtr pDpLibConnector); + +void nvDPPause(NVDPLibConnectorPtr pNVDpLibConnector); + +NvBool nvDPResume(NVDPLibConnectorPtr pNVDpLibConnector, NvBool plugged); + +void nvDPSetAllowMultiStreamingOneConnector( + NVDPLibConnectorPtr pDpLibConnector, + NvBool allowMST); + +void nvDPSetAllowMultiStreaming(NVDevEvoPtr pDevEvo, NvBool allowMST); + +enum NVDpLinkMode { + NV_DP_LINK_MODE_OFF, + NV_DP_LINK_MODE_SST, + NV_DP_LINK_MODE_MST, +}; + +enum NVDpLinkMode nvDPGetActiveLinkMode(NVDPLibConnectorPtr pDpLibConnector); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_CONNECTOR_H__ */ diff --git a/src/nvidia-modeset/include/dp/nvdp-device.h b/src/nvidia-modeset/include/dp/nvdp-device.h new file mode 100644 index 000000000..93d8a1c30 --- /dev/null +++ b/src/nvidia-modeset/include/dp/nvdp-device.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_DEVICE_H__ +#define __NVKMS_DP_NVDP_DEVICE_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvDPDeviceSetPowerState(NVDpyEvoPtr pDpyEvo, NvBool on); +unsigned int nvDPGetEDIDSize(const NVDpyEvoRec *pDpyEvo); +NvBool nvDPGetEDID(const NVDpyEvoRec *pDpyEvo, void *buffer, unsigned int size); +void nvDPGetDpyGUID(NVDpyEvoPtr pDpyEvo); +void nvDPDpyFree(NVDpyEvoPtr pDpyEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_DEVICE_H__ */ diff --git a/src/nvidia-modeset/include/dp/nvdp-timer.h b/src/nvidia-modeset/include/dp/nvdp-timer.h new file mode 100644 index 000000000..1126eff2c --- /dev/null +++ b/src/nvidia-modeset/include/dp/nvdp-timer.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DP_NVDP_TIMER_H__ +#define __NVKMS_DP_NVDP_TIMER_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvBool nvDPTimersPending(void); +NVDPLibTimerPtr nvDPAllocTimer(NVDevEvoPtr pDevEvo); +void nvDPFreeTimer(NVDPLibTimerPtr pTimer); +void nvDPFireExpiredTimers(NVDevEvoPtr pDevEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DP_NVDP_TIMER_H__ */ diff --git a/src/nvidia-modeset/include/g_nvkms-evo-states.h b/src/nvidia-modeset/include/g_nvkms-evo-states.h new file mode 100644 index 000000000..c3b5ce2e4 --- /dev/null +++ b/src/nvidia-modeset/include/g_nvkms-evo-states.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __EVO_STATE_H__ +#define __EVO_STATE_H__ + + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvEvoStateStartNoLock(NVEvoSubDevPtr); + +#ifdef __cplusplus +}; +#endif + +#endif /* __EVO_STATE_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-3dvision.h b/src/nvidia-modeset/include/nvkms-3dvision.h new file mode 100644 index 000000000..1dae05621 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-3dvision.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_3DVISION_H__ +#define __NVKMS_3DVISION_H__ + +#include "nvkms-types.h" + +void nv3DVisionAuthenticationEvo(NVDispEvoRec *pDispEvo, const NvU32 head); + +void nvDpyCheck3DVisionCapsEvo(NVDpyEvoPtr pDpyEvo); +NvBool +nvPatch3DVisionModeTimingsEvo(NVT_TIMING *pTiming, NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString); +void nvDisable3DVisionAegis(const NVDpyEvoRec *pDpyEvo); +void nvSendHwModeTimingsToAegisEvo(const NVDispEvoRec *pDispEvo, + const NvU32 head); + +#endif /* __NVKMS_3DVISION_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-attributes.h b/src/nvidia-modeset/include/nvkms-attributes.h new file mode 100644 index 000000000..d2cea1dbf --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-attributes.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_ATTRIBUTES_H__ +#define __NVKMS_ATTRIBUTES_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvS64 nvRMLaneCountToNvKms(NvU32 rmLaneCount); + +NvBool nvSetDpyAttributeEvo(NVDpyEvoPtr pDpyEvo, + struct NvKmsSetDpyAttributeParams *pParams); + +NvBool nvGetDpyAttributeEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeParams *pParams); + +NvBool nvGetDpyAttributeValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeValidValuesParams *pParams); + +NvBool nvDpyValidateColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 value); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_ATTRIBUTES_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-console-restore.h b/src/nvidia-modeset/include/nvkms-console-restore.h new file mode 100644 index 000000000..5b5abebee --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-console-restore.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_CONSOLE_RESTORE_H__ +#define __NVKMS_CONSOLE_RESTORE_H__ + +#include "nvkms-types.h" + +NvBool nvEvoRestoreConsole(NVDevEvoPtr pDevEvo, const NvBool allowMST); + +#endif // __NVKMS_CONSOLE_RESTORE_H__ diff --git a/src/nvidia-modeset/include/nvkms-cursor.h b/src/nvidia-modeset/include/nvkms-cursor.h new file mode 100644 index 000000000..f8b235817 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-cursor.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_CURSOR_H__ +#define __NVKMS_CURSOR_H__ + +#include "nvkms-types.h" + +NvBool nvGetCursorImageSurfaces( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsSetCursorImageCommonParams *pParams, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]); + +NvBool nvSetCursorImage( + NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvU32 head, + const struct NvKmsSetCursorImageCommonParams *pParams); + +void nvEvoMoveCursorInternal(NVDispEvoPtr pDispEvo, + NvU32 head, NvS16 x, NvS16 y); + +void nvEvoMoveCursor(NVDispEvoPtr pDispEvo, NvU32 head, + const struct NvKmsMoveCursorCommonParams *pParams); + +NvBool nvAllocCursorEvo(NVDevEvoPtr pDevEvo); +void nvFreeCursorEvo(NVDevEvoPtr pDevEvo); + +enum NvKmsAllocDeviceStatus nvInitDispHalCursorEvo(NVDevEvoPtr pDevEvo); + +#endif /* __NVKMS_CURSOR_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-dma.h b/src/nvidia-modeset/include/nvkms-dma.h new file mode 100644 index 000000000..b1802301b --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-dma.h @@ -0,0 +1,286 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* this file contains dma push buffer inlined routines */ + +#ifndef __NVKMS_DMA_H__ +#define __NVKMS_DMA_H__ + +#include + +#include "nvkms-types.h" +#include "nvkms-utils.h" + +#include "class/cl917d.h" + +/* declare prototypes: */ +void nvDmaKickoffEvo(NVEvoChannelPtr); + +void nvEvoMakeRoom(NVEvoChannelPtr pChannel, NvU32 count); +void nvWriteEvoCoreNotifier(const NVDispEvoRec *, NvU32 offset, NvU32 value); + +NvBool nvEvoIsCoreNotifierComplete(NVDispEvoPtr pDispEvo, + NvU32 offset, NvU32 done_base_bit, + NvU32 done_extent_bit, + NvU32 done_false_value); +void nvEvoWaitForCoreNotifier(const NVDispEvoRec *pDispEvo, NvU32 offset, + NvU32 done_base_bit, + NvU32 done_extent_bit, NvU32 done_false_value); +void nvEvoSetSubdeviceMask(NVEvoChannelPtr pChannel, NvU32 mask); + +NvU32 nvEvoReadCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 entry_stride, + NvU32 entry_count, + NvU32 status_offset, + NvU32 field_count, + NvU32 flag_count, + const CRC32NotifierEntryRec *field_info, + const CRC32NotifierEntryFlags *flag_info); +void nvEvoResetCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 reset_base_bit, + NvU32 reset_value); +NvBool nvEvoWaitForCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 done_base_bit, + NvU32 done_extent_bit, + NvU32 done_value); + +#define SUBDEVICE_MASK_ALL DRF_MASK(NV917D_DMA_SET_SUBDEVICE_MASK_VALUE) + +static inline void nvDmaStorePioMethod( + void *pBase, NvU32 offset, NvU32 value) +{ + NvU32 *ptr = ((NvU32 *)pBase) + (offset/sizeof(NvU32)); + + /* + * Use gcc built-in atomic store to ensure the write happens exactly once + * and to ensure ordering. We can use the weaker "relaxed" model because we + * separately use appropriate fencing on anything that needs to preceed this + * write. + */ + __atomic_store_n(ptr, value, __ATOMIC_RELAXED); +} + +static inline NvU32 nvDmaLoadPioMethod( + const void *pBase, NvU32 offset) +{ + const NvU32 *ptr = ((const NvU32 *)pBase) + (offset/sizeof(NvU32)); + + /* + * Use gcc built-in atomic load to ensure the read happens exactly once and + * to ensure ordering. We use the "acquire" model to ensure anything after + * this read doesn't get reordered earlier than this read. (E.g., we don't + * want any writes to the pushbuffer that are waiting on GET to advance to + * get reordered before this read, potentially clobbering the pushbuffer + * before it's been read.) + */ + return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); +} + +static inline NvBool nvDmaSubDevMaskMatchesCurrent( + const NVEvoChannel *pChannel, + const NvU32 subDevMask) +{ + const NvU32 allSubDevices = (1 << pChannel->pb.num_channels) - 1; + + return (subDevMask & allSubDevices) == + (pChannel->pb.currentSubDevMask & allSubDevices); +} + +static inline void nvDmaSetEvoMethodData( + NVEvoChannelPtr pChannel, + const NvU32 data) +{ + *(pChannel->pb.buffer) = data; + pChannel->pb.buffer++; +} + +static inline void nvDmaSetEvoMethodDataU64( + NVEvoChannelPtr pChannel, + const NvU64 data) +{ + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(data)); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(data)); +} + + +/* Get the SDM for a given pDisp */ +static inline NvU32 nvDispSubDevMaskEvo(const NVDispEvoRec *pDispEvo) +{ + return NVBIT(pDispEvo->displayOwner); +} + +/* Initialize the EVO SDM stack */ +static inline void nvInitEvoSubDevMask(NVDevEvoPtr pDevEvo) { + pDevEvo->subDevMaskStackDepth = 0; + pDevEvo->subDevMaskStack[0] = SUBDEVICE_MASK_ALL; +} + +/* Return the SDM at the top of the stack (i.e. the currently active one) */ +static inline NvU32 nvPeekEvoSubDevMask(NVDevEvoPtr pDevEvo) { + return pDevEvo->subDevMaskStack[pDevEvo->subDevMaskStackDepth]; +} + +/* Push the given mask onto the stack and set it. */ +static inline void nvPushEvoSubDevMask(NVDevEvoPtr pDevEvo, NvU32 mask) { + pDevEvo->subDevMaskStackDepth++; + + nvAssert(pDevEvo->subDevMaskStackDepth < NV_EVO_SUBDEV_STACK_SIZE); + + pDevEvo->subDevMaskStack[pDevEvo->subDevMaskStackDepth] = mask; +} + +/* Automagically push the SDM for broadcast to disp. */ +static inline void nvPushEvoSubDevMaskDisp(const NVDispEvoRec *pDispEvo) { + NvU32 mask = nvDispSubDevMaskEvo(pDispEvo); + + nvPushEvoSubDevMask(pDispEvo->pDevEvo, mask); +} + +/* Pop the last entry on the stack */ +static inline void nvPopEvoSubDevMask(NVDevEvoPtr pDevEvo) { + pDevEvo->subDevMaskStackDepth--; +} + +/* + * Update the state tracked in updateState to indicate that pChannel has + * pending methods and requires an update/kickoff. + */ +static inline void nvUpdateUpdateState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].channelMask |= pChannel->channelMask; + } + } +} + +/* + * Update the state tracked in updateState to indicate that pChannel has + * pending WindowImmediate methods. + */ +static inline void nvWinImmChannelUpdateState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].winImmChannelMask |= pChannel->channelMask; + } + } +} + +/* + * Update the state tracked in updateState to prevent pChannel from + * interlocking with the core channel on the next UPDATE. + */ +static inline +void nvDisableCoreInterlockUpdateState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].noCoreInterlockMask |= + pChannel->channelMask; + } + } +} + +// These macros verify that the values used in the methods fit +// into the defined ranges. +#define ASSERT_DRF_NUM(d, r, f, n) \ + nvAssert(!(~DRF_MASK(NV ## d ## r ## f) & (n))) + +// From resman nv50/dev_disp.h +#define NV_UDISP_DMA_OPCODE 31:29 /* RWXUF */ +#define NV_UDISP_DMA_OPCODE_METHOD 0x00000000 /* RW--V */ +#define NV_UDISP_DMA_METHOD_COUNT 27:18 /* RWXUF */ +// Technically, the METHOD_OFFSET field is 13:2 for nvdisplay (classes c3*), +// and only 11:2 for older display classes. But, the higher bits were +// unused in the older classes, and we should never push any methods of that +// size on them anyway, so we always use the wider definition here. +#define NV_UDISP_DMA_METHOD_OFFSET 13:2 /* RWXUF */ + +// Start an EVO method. +static inline void nvDmaSetStartEvoMethod( + NVEvoChannelPtr pChannel, + NvU32 method, + NvU32 count) +{ + NVDmaBufferEvoPtr p = &pChannel->pb; + const NvU32 sdMask = nvPeekEvoSubDevMask(p->pDevEvo); + + // We add 1 to the count for the method header. + const NvU32 countPlusHeader = count + 1; + + const NvU32 methodDwords = method >> 2; + + nvAssert((method & 0x3) == 0); + + ASSERT_DRF_NUM(_UDISP, _DMA, _METHOD_COUNT, count); + ASSERT_DRF_NUM(_UDISP, _DMA, _METHOD_OFFSET, methodDwords); + + if (!nvDmaSubDevMaskMatchesCurrent(pChannel, sdMask)) { + if (p->num_channels > 1) { + nvEvoSetSubdeviceMask(pChannel, sdMask); + } + } + + if (p->fifo_free_count <= countPlusHeader) { + nvEvoMakeRoom(pChannel, countPlusHeader); + } + + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(_UDISP, _DMA, _OPCODE, _METHOD) | + DRF_NUM(_UDISP, _DMA, _METHOD_COUNT, count) | + DRF_NUM(_UDISP, _DMA, _METHOD_OFFSET, methodDwords)); + + p->fifo_free_count -= countPlusHeader; +} + +static inline NvBool nvIsUpdateStateEmpty(const NVDevEvoRec *pDevEvo, + const NVEvoUpdateState *updateState) +{ + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (updateState->subdev[sd].channelMask != 0x0) { + return FALSE; + } + } + return TRUE; +} + +#endif /* __NVKMS_DMA_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-dpy.h b/src/nvidia-modeset/include/nvkms-dpy.h new file mode 100644 index 000000000..65dfadfd1 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-dpy.h @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_DPY_H__ +#define __NVKMS_DPY_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvDpyProbeMaxPixelClock(NVDpyEvoPtr pDpyEvo); +void nvDpySetValidSyncsEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsModeValidationValidSyncs *pValidSyncs); +NVDpyEvoPtr nvAllocDpyEvo(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NVDpyId dpyId, const char *dpAddress); +void nvFreeDpyEvo(NVDispEvoPtr pDispEvo, NVDpyEvoPtr pDpyEvo); +NVConnectorEvoPtr nvGetConnectorFromDisp(NVDispEvoPtr pDispEvo, NVDpyId dpyId); + +void nvUpdateInfoFrames(const NVDispEvoRec *pDispEvo, const NvU32 head); + +NvBool nvDpyRequiresDualLinkEvo(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings); + +NVHwModeTimingsEvoPtr +nvGetCurrentModeTimingsForDpyEvo(const NVDpyEvoRec *pDpyEvo); + +NVDpyEvoPtr nvGetDpyEvoFromDispEvo(const NVDispEvoRec *pDispEvo, NVDpyId dpyId); + +NVDpyEvoPtr nvGetDPMSTDpyEvo(NVConnectorEvoPtr pConnectorEvo, + const char *address, NvBool *pDynamicDpyCreated); + +typedef enum { + NVKMS_EDID_READ_MODE_DEFAULT, + NVKMS_EDID_READ_MODE_ACPI, +} NvKmsEdidReadMode; + +NvBool nvDpyReadAndParseEdidEvo( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsQueryDpyDynamicDataRequest *pRequest, + NvKmsEdidReadMode readMode, + NVEdidRec *pEdid, + NVParsedEdidEvoPtr *ppParsedEdid, + NVEvoInfoStringPtr pInfoString); + +char *nvGetDpyIdListStringEvo(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList); + +NvBool nvDpyGetDynamicData( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams); + +void nvDpyUpdateCurrentAttributes(NVDpyEvoRec *pDpyEvo); + +NvBool nvDpyIsAdaptiveSync(const NVDpyEvoRec *pDpyEvo); + +NvBool nvDpyIsAdaptiveSyncDefaultlisted(const NVParsedEdidEvoRec *pParsedEdid); + +enum NvKmsDpyAttributeDigitalSignalValue +nvGetDefaultDpyAttributeDigitalSignalValue(const NVConnectorEvoRec *pConnectorEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_DPY_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-event.h b/src/nvidia-modeset/include/nvkms-event.h new file mode 100644 index 000000000..087476e28 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-event.h @@ -0,0 +1,32 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_EVENT_H__ +#define __NVKMS_EVENT_H__ + +#include "nvkms.h" + +void nvHandleHotplugEventDeferredWork(void *dataPtr, NvU32 dataU32); +void nvHandleDPIRQEventDeferredWork(void *dataPtr, NvU32 dataU32); + +#endif /* __NVKMS_EVENT_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-evo-states.h b/src/nvidia-modeset/include/nvkms-evo-states.h new file mode 100644 index 000000000..dcde326d7 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-evo-states.h @@ -0,0 +1,107 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_STATES_H__ +#define __NVKMS_STATES_H__ + +#include "nvkms-types.h" + +#include "g_nvkms-evo-states.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum NVEvoLockSignal { + NV_EVO_LOCK_SIGNAL_FLIP_LOCK, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK, + NV_EVO_LOCK_SIGNAL_STEREO, +} NVEvoLockSignal; + +typedef enum NVEvoLockAction { + NV_EVO_LOCK_HEADS, + NV_EVO_UNLOCK_HEADS, + NV_EVO_ADD_FRAME_LOCK_SERVER, + NV_EVO_REM_FRAME_LOCK_SERVER, + NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC, + NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC, + NV_EVO_ADD_FRAME_LOCK_CLIENT, + NV_EVO_REM_FRAME_LOCK_CLIENT, + NV_EVO_ENABLE_VRR, + NV_EVO_DISABLE_VRR, + NV_EVO_ADD_FRAME_LOCK_REF, + NV_EVO_REM_FRAME_LOCK_REF, + NV_EVO_ADD_SLI_SECONDARY, + NV_EVO_ADD_SLI_LAST_SECONDARY, + NV_EVO_ADD_SLI_PRIMARY, + NV_EVO_REM_SLI, +} NVEvoLockAction; + +/* nv_evo.c */ + +NVEvoLockPin nvEvoGetPinForSignal(const NVDispEvoRec *, + NVEvoSubDevPtr, + NVEvoLockSignal); +NvBool nvEvoRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads); +NvBool nvEvoUnRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads); + +/* nvkms-hw-states.c */ + +NvBool nvEvoLockHWStateNoLock(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondary(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, const NvU32 *pHeads); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_STATES_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-evo.h b/src/nvidia-modeset/include/nvkms-evo.h new file mode 100644 index 000000000..744db402f --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-evo.h @@ -0,0 +1,297 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_H__ +#define __NVKMS_H__ + +#include "nvkms-types.h" +#include "nvkms-modeset-types.h" +#include "nvkms-api.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern NVEvoInfoStringRec dummyInfoString; + +NVDevEvoPtr nvFindDevEvoByDeviceId(NvU32 deviceId); +NvU8 nvGetGpuLogIndex(void); +void nvEvoDetachConnector(NVConnectorEvoRec *pConnectorEvo, const NvU32 head, + NVEvoModesetUpdateState *pModesetUpdateState); +void nvEvoAttachConnector(NVConnectorEvoRec *pConnectorEvo, + const NvU32 head, + NVDPLibModesetStatePtr pDpLibModesetState, + NVEvoModesetUpdateState *pModesetUpdateState); +void nvEvoUpdateAndKickOff(const NVDispEvoRec *pDispEvo, NvBool sync, + NVEvoUpdateState *updateState, NvBool releaseElv); +void nvDoIMPUpdateEvo(NVDispEvoPtr pDispEvo, + NVEvoUpdateState *updateState); +void nvEvoArmLightweightSupervisor(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvBool isVrr, + NvBool enable); + +void nvSetViewPortsEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *updateState); +void nvSetViewPortPointInEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NvU16 x, + NvU16 y, + NVEvoUpdateState *updateState); +void +nvConstructNvModeTimingsFromHwModeTimings(const NVHwModeTimingsEvo *pTimings, + NvModeTimingsPtr pModeTimings); +void nvEvoSetTimings(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState); +NvBool nvGetDfpProtocol(const NVDpyEvoRec *pDpyEvo, + NVHwModeTimingsEvoPtr pTimings); +void nvInitScalingUsageBounds(const NVDevEvoRec *pDevEvo, + struct NvKmsScalingUsageBounds *pScaling); +NvBool nvComputeScalingUsageBounds(const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NVEvoScalerTaps hTaps, NVEvoScalerTaps vTaps, + struct NvKmsScalingUsageBounds *out); +NvBool nvAssignScalerTaps(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NvBool doubleScan, + NVEvoScalerTaps *hTapsOut, NVEvoScalerTaps *vTapsOut); +NvBool nvValidateHwModeTimingsViewPort(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString); +void nvAssignDefaultUsageBounds(const NVDispEvoRec *pDispEvo, + NVHwModeViewPortEvo *pViewPort); +struct NvKmsUsageBounds nvUnionUsageBounds(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b); +NvBool UsageBoundsEqual(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b); +NvU64 nvEvoGetFormatsWithEqualOrLowerUsageBound( + const enum NvKmsSurfaceMemoryFormat format, + const NvU64 supportedFormatsCapMask); +void nvCancelLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo); +void nvScheduleLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo); +void nvAssertAllDpysAreInactive(NVDevEvoPtr pDevEvo); +void nvEvoLockStatePreModeset(NVDevEvoPtr pDevEvo, NvU32 *dispNeedsEarlyUpdate, + NVEvoUpdateState *updateState); +void nvEvoLockStatePostModeset(NVDevEvoPtr pDevEvo, const NvBool doRasterLock); +NvBool nvSetUsageBoundsEvo( + NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState); +void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState); + +void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *pUpdateState); + +void nvSetDitheringEvo( + NVDispEvoPtr pDispEvo, const NvU32 head, + enum NvKmsDpyAttributeRequestedDitheringValue configState, + const enum NvKmsDpyAttributeRequestedDitheringDepthValue configDepth, + const enum NvKmsDpyAttributeRequestedDitheringModeValue configMode, + NVEvoUpdateState *pUpdateState); + +NvBool nvEnableFrameLockEvo(NVDispEvoPtr pDispEvo); +NvBool nvDisableFrameLockEvo(NVDispEvoPtr pDispEvo); +NvBool nvQueryRasterLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val); +NvBool nvSetFlipLockEvo(NVDpyEvoPtr pDpyEvo, NvS64 value); +NvBool nvGetFlipLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue); +NvBool nvAllowFlipLockEvo(NVDispEvoPtr pDispEvo, NvS64 value); +NvBool nvSetStereoEvo(const NVDispEvoRec *pDispEvo, + const NvU32 head, NvBool enable); +NvBool nvGetStereoEvo(const NVDispEvoRec *pDispEvo, const NvU32 head); +NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo); +void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo); + +void nvEvoUpdateSliVideoBridge(NVDevEvoPtr pDevEvo); + +void nvSetDVCEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvS32 dvc, + NVEvoUpdateState *updateState); +void nvSetImageSharpeningEvo(NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 value, NVEvoUpdateState *updateState); + +NvBool nvLayerSetPositionEvo( + NVDevEvoPtr pDevEvo, + const struct NvKmsSetLayerPositionRequest *pRequest); + +NvBool nvConstructHwModeTimingsEvo(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams + *pParams, + NVEvoInfoStringPtr pInfoString); + +NvBool nvConstructHwModeTimingsImpCheckEvo( + const NVConnectorEvoRec *pConnectorEvo, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + const int head); + +NvBool nvDowngradeHwModeTimingsDpPixelDepthEvo( + NVHwModeTimingsEvoPtr pTimings, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace); + +NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams *pParams); + +NvBool nvEvoUpdateHwModeTimingsViewPort( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pModeValidationParams, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvo *pTimings); + +typedef struct _NVValidateImpOneDispHeadParamsRec +{ + const NVConnectorEvoRec *pConnectorEvo; + const struct NvKmsUsageBounds *pUsage; + NvU32 activeRmId; + NVHwModeTimingsEvoPtr pTimings; +} NVValidateImpOneDispHeadParamsRec; + +NvBool nvValidateImpOneDisp( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 *pMinIsoBandwidthKBPS, + NvU32 *pMinDramFloorKBPS); + +NvBool nvAllocateDisplayBandwidth( + NVDispEvoPtr pDispEvo, + NvU32 newIsoBandwidthKBPS, + NvU32 newDramFloorKBPS); + +NvBool nvValidateImpOneDispDowngrade( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 downgradePossibleHeadsBitMask); + +NvBool nvFrameLockServerPossibleEvo(const NVDpyEvoRec *pDpyEvo); +NvBool nvFrameLockClientPossibleEvo(const NVDpyEvoRec *pDpyEvo); + +void nvEvoSetLut(NVDispEvoPtr pDispEvo, NvU32 head, NvBool kickoff, + const struct NvKmsSetLutCommonParams *pParams); +NvBool nvValidateSetLutCommonParams( + const NVDevEvoRec *pDevEvo, + const struct NvKmsSetLutCommonParams *pParams); + +void nvChooseCurrentColorSpaceAndRangeEvo( + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace, + const enum NvKmsDpyAttributeColorRangeValue requestedColorRange, + enum NvKmsDpyAttributeCurrentColorSpaceValue *pCurrentColorSpace, + enum NvKmsDpyAttributeColorRangeValue *pCurrentColorRange); + +void nvUpdateCurrentHardwareColorSpaceAndRangeEvo( + NVDispEvoPtr pDispEvo, + const NvU32 head, + NVEvoUpdateState *pUpdateState); + +void nvSetColorSpaceAndRangeEvo( + NVDispEvoPtr pDispEvo, const NvU32 head, + const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace, + const enum NvKmsDpyAttributeColorRangeValue requestedColorRange, + NVEvoUpdateState *pUpdateState); + +NvBool nvAssignSOREvo(NVConnectorEvoPtr pConnectorEvo, NvU32 sorExcludeMask); +void nvRestoreSORAssigmentsEvo(NVDevEvoRec *pDevEvo); + +void nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo, + NvBool enable, NvBool isPre); + +void nvUnbloatHwModeTimingsEvo(NVHwModeTimingsEvoPtr pTimings, NvU32 factor); + +NvBool nvReadCRC32Evo(NVDispEvoPtr pDispEvo, NvU32 head, + CRC32NotifierCrcOut *crcOut /* out */); + +NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo); +NVDevEvoPtr nvAllocDevEvo(const struct NvKmsAllocDeviceRequest *pRequest, + enum NvKmsAllocDeviceStatus *pStatus); +NvU32 nvGetActiveSorMask(const NVDispEvoRec *pDispEvo); +NvBool nvUpdateFlipLockEvoOneHead(NVDispEvoPtr pDispEvo, const NvU32 head, + NvU32 *val, NvBool set, + NvBool *needsEarlyUpdate, + NVEvoUpdateState *updateState); + +void nvEvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo, + NvU32 head, NvBool kickOff); + +NvBool nvEvoPollForNoMethodPending(NVDevEvoPtr pDevEvo, + const NvU32 sd, + NVEvoChannelPtr pChannel, + NvU64 *pStartTime, + const NvU32 timeout); + +static inline void nvAssertSameSemaphoreSurface( + const NVFlipChannelEvoHwState *pHwState) +{ + + /*! + * pHwState->syncObject contains separate fields to track the semaphore + * surface used for acquire, and the semaphore surface used for release. + * Prior to NvDisplay 4.0, display HW only supports using a single semaphore + * surface for both acquire and release. As such, assert that the semaphore + * surfaces in pHwState->syncObject are the same, and that we're also not + * using syncpoints. This is enforced during flip validation. + */ + + nvAssert(pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == + pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo); + + nvAssert(!pHwState->syncObject.usingSyncpt); +} + +void nvDPSerializerHandleDPIRQ(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo); + +void nvDPSerializerPreSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo); + +void nvDPSerializerPostSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo); + +NvBool nvFramelockSetControlUnsyncEvo(NVDispEvoPtr pDispEvo, const NvU32 headMask, + NvBool server); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-evo1.h b/src/nvidia-modeset/include/nvkms-evo1.h new file mode 100644 index 000000000..8b74629cb --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-evo1.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_EVO_1_H__ +#define __NVKMS_EVO_1_H__ + +#include "nvkms-types.h" + +NvBool nvEvo1IsChannelIdle(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); +NvBool nvEvo1IsChannelMethodPending(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); + +void nvEvo1IsModePossible(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVEvoIsModePossibleDispOutput *pOutput); +void nvEvo1PrePostIMP(NVDispEvoPtr pDispEvo, NvBool isPre); + +void nvEvo1SetDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings); + +NVEvoChannel* nvEvo1AllocateCoreChannel(NVDevEvoRec *pDevEvo); +void nvEvo1FreeCoreChannel(NVDevEvoRec *pDevEvo, NVEvoChannel *pChannel); + +static inline NvU16 nvEvo1GetColorSpaceFlag(NVDevEvoPtr pDevEvo, + const NvBool colorSpaceOverride) +{ + NvU16 colorSpaceFlag = 0; + + if (colorSpaceOverride) { + nvAssert(pDevEvo->caps.supportsDP13); + colorSpaceFlag = 1 << 11; + } + + return colorSpaceFlag; +} + +#endif /* __NVKMS_EVO_1_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-flip-workarea.h b/src/nvidia-modeset/include/nvkms-flip-workarea.h new file mode 100644 index 000000000..a2bb25b82 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-flip-workarea.h @@ -0,0 +1,51 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_FLIP_WORKAREA_H__ +#define __NVKMS_FLIP_WORKAREA_H__ + +#include "nvkms-types.h" + +struct NvKmsFlipWorkArea { + struct { + NvBool changed; + struct { + /* + * Pre flip usage bounds are the union of current and new + * usable usage bounds: the unioned usage bounds have to + * allow both the current state and the state being flipped to. + * This field is set and used by PreFlipIMP() and its + * helper functions. + */ + struct NvKmsUsageBounds preFlipUsage; + + NVFlipEvoHwState newState; + NVFlipEvoHwState oldState; + + NvU32 oldAccelerators; + NvBool accelerated; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } sd[NVKMS_MAX_SUBDEVICES]; +}; + +#endif /* __NVKMS_FLIP_WORKAREA_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-flip.h b/src/nvidia-modeset/include/nvkms-flip.h new file mode 100644 index 000000000..e02f7f69d --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-flip.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_FLIP_H__ +#define __NVKMS_FLIP_H__ + + +#include "nvkms-types.h" + +void nvClearFlipEvoHwState( + NVFlipEvoHwState *pFlipState); + +void nvInitFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + NVFlipEvoHwState *pFlipState); + +NvBool nvUpdateFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState, + NvBool allowVrr, + const struct NvKmsUsageBounds *pPossibleUsage); + +NvBool nvValidateFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings, + const NVFlipEvoHwState *pFlipState); + +void +nvUpdateSurfacesFlipRefCount( + NVDevEvoPtr pDevEvo, + const NvU32 head, + NVFlipEvoHwState *pFlipState, + NvBool increase); + +void nvFlipEvoOneHead( + NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const NVFlipEvoHwState *pFlipState, + NvBool allowFlipLock, + NVEvoUpdateState *updateState); + +void nvEvoCancelPostFlipIMPTimer( + NVDevEvoPtr pDevEvo); + +NvBool nvHandleSyncptRegistration( + NVDevEvoRec *pDevEvo, + NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState); + +void nvFillPostSyncptReplyOneChannel( + NVEvoChannel *pChannel, + enum NvKmsSyncptType postType, + struct NvKmsSyncpt *postSyncpt, + const NVFlipSyncObjectEvoHwState *pHwSyncObject); + +NvBool nvFlipEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipRequest *request, + struct NvKmsFlipReply *reply, + NvBool skipUpdate, + NvBool allowFlipLock); + +#endif /* __NVKMS_FLIP_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-framelock.h b/src/nvidia-modeset/include/nvkms-framelock.h new file mode 100644 index 000000000..9579fb0db --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-framelock.h @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_FRAMELOCK_H__ +#define __NVKMS_FRAMELOCK_H__ + +#include "nvkms-types.h" + +void nvAllocFrameLocksEvo(NVDevEvoPtr pDevEvo); +void nvFreeFrameLocksEvo(NVDevEvoPtr pDevEvo); + +NvBool nvFrameLockSetUseHouseSyncEvo(NVFrameLockEvoPtr, NvU32); +NvBool nvFrameLockGetStatusEvo(const NVFrameLockEvoRec *, + enum NvKmsFrameLockAttribute attribute, + NvS64*); + +NvBool nvSetFrameLockDisplayConfigEvo(NVDpyEvoRec *pDpyEvo, NvS64 val); +NvBool nvGetFrameLockDisplayConfigEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val); +NvBool nvGetFrameLockDisplayConfigValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues); + +NvBool nvSetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsSetDispAttributeParams *pParams); + +NvBool nvGetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsGetDispAttributeParams *pParams); + +NvBool nvGetDispAttributeValidValuesEvo( + const NVDispEvoRec *pDispEvo, + struct NvKmsGetDispAttributeValidValuesParams *pParams); + +NvBool nvSetFrameLockAttributeEvo( + NVFrameLockEvoRec *pFrameLockEvo, + const struct NvKmsSetFrameLockAttributeParams *pParams); + +NvBool nvGetFrameLockAttributeEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeParams *pParams); + +NvBool nvGetFrameLockAttributeValidValuesEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeValidValuesParams *pParams); + +NvU32 nvGetFramelockServerHead(const NVDispEvoRec *pDispEvo); +NvU32 nvGetFramelockClientHeadsMask(const NVDispEvoRec *pDispEvo); + +static inline NvBool +nvIsFramelockableHead(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + return (head != NV_INVALID_HEAD) && + ((head == nvGetFramelockServerHead(pDispEvo)) || + ((NVBIT(head) & nvGetFramelockClientHeadsMask(pDispEvo)) != 0x0)); +} + +void nvUpdateGLSFramelock(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvBool enable, const NvBool server); + +#endif /* __NVKMS_FRAMELOCK_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-hal.h b/src/nvidia-modeset/include/nvkms-hal.h new file mode 100644 index 000000000..6675a0b87 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-hal.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HAL_H__ +#define __NVKMS_HAL_H__ + +#include "nvkms-types.h" + +enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo); + +#endif /* __NVKMS_HAL_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-hdmi.h b/src/nvidia-modeset/include/nvkms-hdmi.h new file mode 100644 index 000000000..69f6cd958 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-hdmi.h @@ -0,0 +1,78 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_HDMI_H__ +#define __NVKMS_HDMI_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" + +void nvUpdateHdmiInfoFrames(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVAttributesSetEvoRec *pAttributesSet, + const NvBool hdTimings, + const NVT_VIDEO_INFOFRAME_CTRL *pCtrl, + NVDpyEvoRec *pDpyEvo); + +void nvDpyUpdateHdmiPreModesetEvo(NVDpyEvoPtr pDpyEvo); +void nvDpyUpdateHdmiVRRCaps(NVDpyEvoPtr pDpyEvo); +void nvUpdateHdmiCaps(NVDpyEvoPtr pDpyEvo); + +void nvLogEdidCea861InfoEvo(NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString); +NvBool nvDpyIsHdmiEvo(const NVDpyEvoRec *pDpyEvo); + +NvBool nvHdmi204k60HzRGB444Allowed(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming); + +void nvHdmiDpEnableDisableAudio(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable); + +void nvRemoveUnusedHdmiDpAudioDevice(const NVDispEvoRec *pDispEvo); + +void nvHdmiSetVRR(NVDispEvoPtr pDispEvo, NvU32 head, NvBool enable); + +NvBool nvInitHdmiLibrary(NVDevEvoRec *pDevEvo); +void nvTeardownHdmiLibrary(NVDevEvoRec *pDevEvo); + +NvBool nvHdmiFrlAssessLink(NVDpyEvoPtr pDpyEvo); +NvBool nvHdmiFrlQueryConfig(const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + NVHwModeTimingsEvo *pTimings, + const struct NvKmsModeValidationParams *pParams); +void nvHdmiFrlClearConfig(NVDispEvoRec *pDispEvo, NvU32 activeRmId); +void nvHdmiFrlSetConfig(NVDispEvoRec *pDispEvo, NvU32 head); + +void nvHdmiDpConstructHeadAudioState(const NvU32 displayId, + const NVDpyEvoRec *pDpyEvo, + NVDispHeadAudioStateEvoRec *pAudioState); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_HDMI_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-lut.h b/src/nvidia-modeset/include/nvkms-lut.h new file mode 100644 index 000000000..a9462ce4b --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-lut.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_LUT_H__ +#define __NVKMS_LUT_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" + +NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo); + +void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo); + +void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo, + const NVEvoLutDataRec *pLUTBuffer, + NVDispEvoPtr pDispEvo); + +static inline void nvCancelLutUpdateEvo( + const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + nvkms_free_timer(pDevEvo->lut.head[head].disp[pDispEvo->displayOwner].updateTimer); + pDevEvo->lut.head[head].disp[pDispEvo->displayOwner].updateTimer = NULL; +} + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_LUT_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-modepool.h b/src/nvidia-modeset/include/nvkms-modepool.h new file mode 100644 index 000000000..3ac3e3e9e --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-modepool.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODEPOOL_H__ +#define __NVKMS_MODEPOOL_H__ + +#include "nvkms-types.h" +#include "nvkms-utils.h" /* NVEvoLogType */ + +#ifdef __cplusplus +extern "C" { +#endif + +void +nvValidateModeIndex(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeIndexRequest *pRequest, + struct NvKmsValidateModeIndexReply *pReply); +void +nvValidateModeEvo(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeRequest *pRequest, + struct NvKmsValidateModeReply *pReply); + +void nvEvoLogModeValidationModeTimings(NVEvoInfoStringPtr + pInfoString, + const NvModeTimings *pModeTimings); + +NvBool nvValidateModeForModeset(NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvo *pTimingsEvo); + +const NVT_TIMING *nvFindEdidNVT_TIMING( + const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_MODEPOOL_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-modeset-types.h b/src/nvidia-modeset/include/nvkms-modeset-types.h new file mode 100644 index 000000000..35bede3b0 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-modeset-types.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODESET_TYPES_H__ +#define __NVKMS_MODESET_TYPES_H__ + +/* This header file defines types used internally by the modeset path. */ + +#include "nvkms-types.h" + +typedef struct { + NVHwModeTimingsEvo timings; + NVDpyIdList dpyIdList; + NVConnectorEvoRec *pConnectorEvo; + NvU32 activeRmId; + struct NvKmsSetLutCommonParams lut; + NvU8 allowFlipLockGroup; + enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace; + enum NvKmsDpyAttributeColorRangeValue colorRange; + struct NvKmsModeValidationParams modeValidationParams; + NvBool changed : 1; + NvBool allowGsync : 1; + NvBool hs10bpcHint : 1; + enum NvKmsAllowAdaptiveSync allowAdaptiveSync; + NvU32 vrrOverrideMinRefreshRate; + NVDPLibModesetStatePtr pDpLibModesetState; + NVDispHeadAudioStateEvoRec audio; +} NVProposedModeSetHwStateOneHead; + +typedef struct { + NVProposedModeSetHwStateOneHead head[NVKMS_MAX_HEADS_PER_DISP]; +} NVProposedModeSetHwStateOneDisp; + +typedef struct { + struct { + NVFlipEvoHwState flip; + } head[NVKMS_MAX_HEADS_PER_DISP]; +} NVProposedModeSetHwStateOneSubDev; + +typedef struct { + NVProposedModeSetHwStateOneDisp disp[NVKMS_MAX_SUBDEVICES]; + NVProposedModeSetHwStateOneSubDev sd[NVKMS_MAX_SUBDEVICES]; + NvBool allowHeadSurfaceInNvKms : 1; +} NVProposedModeSetHwState; + +struct _NVEvoModesetUpdateState { + NVEvoUpdateState updateState; + NVDpyIdList connectorIds; + const NVDPLibModesetStateRec + *pDpLibModesetState[NVKMS_MAX_HEADS_PER_DISP]; + NvBool windowMappingChanged; +}; + +#endif /* __NVKMS_MODESET_TYPES_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-modeset-workarea.h b/src/nvidia-modeset/include/nvkms-modeset-workarea.h new file mode 100644 index 000000000..9890b8725 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-modeset-workarea.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODESET_WORKAREA_H__ +#define __NVKMS_MODESET_WORKAREA_H__ + +typedef struct { + struct { + struct { + NVFlipEvoHwState newState; + NVFlipEvoHwState oldState; + NvU32 oldActiveRmId; + } head[NVKMS_MAX_HEADS_PER_DISP]; + + NVDpyIdList changedDpyIdList; + + NVDpyIdList sorAssignedConnectorsList; + NvU32 assignedSorMask; + + } sd[NVKMS_MAX_SUBDEVICES]; + NVEvoUpdateState earlyUpdateState; + NVEvoModesetUpdateState modesetUpdateState; + + /* + * The display bandwidth values that NVKMS needs to allocate after the + * modeset is complete. + */ + NvU32 postModesetIsoBandwidthKBPS; + NvU32 postModesetDramFloorKBPS; +} NVModeSetWorkArea; + +struct NvKmsVrrTimings { + struct { + struct { + NVHwModeTimingsEvo timings; + NvBool adjusted; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +#endif /* __NVKMS_MODESET_WORKAREA_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-modeset.h b/src/nvidia-modeset/include/nvkms-modeset.h new file mode 100644 index 000000000..3b63b235d --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-modeset.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_MODESET_H__ +#define __NVKMS_MODESET_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + NvBool bypassComposition, + NvBool doRasterLock); + +typedef NvBool (*NVShutDownHeadsTestFunc)( + const NVDispEvoRec *pDispEvo, + const NvU32 head); + +void nvShutDownHeads(NVDevEvoPtr pDevEvo, NVShutDownHeadsTestFunc pTestFunc); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_MODESET_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-prealloc-types.h b/src/nvidia-modeset/include/nvkms-prealloc-types.h new file mode 100644 index 000000000..947a2a55d --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-prealloc-types.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_PREALLOC_TYPES_H__ +#define __NVKMS_PREALLOC_TYPES_H__ + +#include "nvtypes.h" + +enum NVPreallocType { + PREALLOC_TYPE_IMP_PARAMS, + PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE, + PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE, + PREALLOC_TYPE_MODE_SET_WORK_AREA, + PREALLOC_TYPE_FLIP_WORK_AREA, + PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE, + PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE, + PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS, + PREALLOC_TYPE_MAX +}; + +struct NVDevPreallocRec { + void *ptr[PREALLOC_TYPE_MAX]; + NvU8 used[(PREALLOC_TYPE_MAX + 7) / 8]; +}; + +#endif /* __NVKMS_PREALLOC_TYPES_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-prealloc.h b/src/nvidia-modeset/include/nvkms-prealloc.h new file mode 100644 index 000000000..2616dce00 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-prealloc.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_PREALLOC_H__ +#define __NVKMS_PREALLOC_H__ + +#include "nvkms-types.h" +#include "nvkms-prealloc-types.h" + +void *nvPreallocGet(NVDevEvoPtr pDevEvo, enum NVPreallocType type, size_t sizeCheck); +void nvPreallocRelease(NVDevEvoPtr pDevEvo, enum NVPreallocType type); + +NvBool nvPreallocAlloc(NVDevEvoPtr pDevEvo); +void nvPreallocFree(NVDevEvoPtr pDevEvo); + +#endif /* __NVKMS_PREALLOC_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-private.h b/src/nvidia-modeset/include/nvkms-private.h new file mode 100644 index 000000000..cfb5eb9e5 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-private.h @@ -0,0 +1,81 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KMS_PRIVATE_H__ +#define __NV_KMS_PRIVATE_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen, + NVDevEvoPtr pDevEvo, NvBool isPrivileged); + +void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev); + +void nvSendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, const NvU32 eventType); + +void nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyAttribute attribute, + const NvS64 value); + +void nvSendFrameLockAttributeChangedEventEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + const enum NvKmsFrameLockAttribute attribute, + const NvS64 value); + +void nvSendFlipOccurredEventEvo( + const NVDevEvoRec *pDevEvo, + NVEvoChannelMask channelMask); + +void nvSendUnicastEvent(struct NvKmsPerOpen *pOpen); + +void nvRemoveUnicastEvent(struct NvKmsPerOpen *pOpen); + +#if defined(DEBUG) +NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo); +#endif + +const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev); + +const struct NvKmsModesetPermissions *nvGetModesetPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev); + +NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev( + struct NvKmsPerOpenDev *pOpenDev); +const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst( + const struct NvKmsPerOpenDev *pOpenDev); +NVDevEvoPtr nvGetDevEvoFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev); + +void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NV_KMS_PRIVATE_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-rm.h b/src/nvidia-modeset/include/nvkms-rm.h new file mode 100644 index 000000000..a0e2e901c --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-rm.h @@ -0,0 +1,152 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_RM_H__ +#define __NVKMS_RM_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvkms-types.h" +#include /* NV0092_REGISTER_RG_LINE_CALLBACK_FN */ +#include /* OSVBLANKCALLBACKPROC */ + +NvBool nvWriteDPCDReg(NVConnectorEvoPtr pConnectorEvo, + NvU32 dpcdAddr, + NvU8 dpcdData); + +NvBool nvRmRegisterCallback(const NVDevEvoRec *pDevEvo, + NVOS10_EVENT_KERNEL_CALLBACK_EX *cb, + struct nvkms_ref_ptr *ref_ptr, + NvU32 parentHandle, + NvU32 eventHandle, + Callback5ArgVoidReturn func, + NvU32 event); + +enum NvKmsAllocDeviceStatus nvRmAllocDisplays(NVDevEvoPtr pDevEvo); +void nvRmDestroyDisplays(NVDevEvoPtr pDevEvo); +enum NvKmsBeginEndModeset { + BEGIN_MODESET, + END_MODESET +}; +void nvRmBeginEndModeset(NVDispEvoPtr pDispEvo, enum NvKmsBeginEndModeset, NvU32 mask); +NvU32 nvRmAllocDisplayId(const NVDispEvoRec *pDispEvo, const NVDpyIdList dpyList); +void nvRmFreeDisplayId(const NVDispEvoRec *pDispEvo, NvU32 dpyId); +void nvRmGetConnectorORInfo(NVConnectorEvoPtr pConnectorEvo, NvBool assertOnly); +NVDpyIdList nvRmGetConnectedDpys(const NVDispEvoRec *pDispEvo, + NVDpyIdList dpyIdList); +NvBool nvRmResumeDP(NVDevEvoPtr pDevEvo); +void nvRmPauseDP(NVDevEvoPtr pDevEvo); +NvBool nvRmSetDpmsEvo(NVDpyEvoPtr pDpyEvo, NvS64 value); +NvBool nvRmAllocSysmem(NVDevEvoPtr pDevEvo, NvU32 memoryHandle, + NvU32 *ctxDmaFlags, void **ppBase, NvU64 size, + NvKmsMemoryIsoType isoType); +NvBool nvRMAllocateBaseChannels(NVDevEvoPtr pDevEvo); +NvBool nvRMAllocateOverlayChannels(NVDevEvoPtr pDevEvo); +NvBool nvRMAllocateWindowChannels(NVDevEvoPtr pDevEvo); +NvBool nvRMSetupEvoCoreChannel(NVDevEvoPtr pDevEvo); +void nvRMFreeBaseChannels(NVDevEvoPtr pDevEvo); +void nvRMFreeOverlayChannels(NVDevEvoPtr pDevEvo); +void nvRMFreeWindowChannels(NVDevEvoPtr pDevEvo); +void nvRMFreeEvoCoreChannel(NVDevEvoPtr pDevEvo); +NvBool nvRMSyncEvoChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 errorToken); +NvBool nvRMIdleBaseChannel(NVDevEvoPtr pDevEvo, NvU32 head, NvU32 sd, + NvBool *stoppedBase); +NvBool nvRmEvoClassListCheck(const NVDevEvoRec *pDevEvo, NvU32 classID); +NvU32 nvRmEvoBindDispContextDMA( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 hCtxDma); +NvU32 nvRmEvoAllocateAndBindDispContextDMA( + NVDevEvoPtr pDevEvo, + NvU32 hMemory, + const enum NvKmsSurfaceMemoryLayout layout, + NvU64 limit); +NvBool nvRmEvoAllocAndBindSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + NvU32 id, + NvU32 *pSyncptHandle, + NvU32 *pSyncptCtxDmaHandle); +void nvRmEvoFreePreSyncpt(NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel); +NvBool nvRmGarbageCollectSyncpts( + NVDevEvoRec *pDevEvo); +void nvRmEvoFreeSyncpt(NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pEvoSyncpt); +void nvRmEvoFreeDispContextDMA(NVDevEvoPtr pDevEvo, + NvU32 *hDispCtxDma); +void nvRmEvoUnMapVideoMemory(NVDevEvoPtr pDevEvo, + NvU32 memoryHandle, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]); +NvBool nvRmEvoMapVideoMemory(NVDevEvoPtr pDevEvo, + NvU32 memoryHandle, NvU64 size, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES], + NvU32 subDeviceMask); +NvBool nvRmAllocDeviceEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsAllocDeviceRequest *pRequest); +void nvRmFreeDeviceEvo(NVDevEvoPtr pDevEvo); +NvBool nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList); +NvBool nvRmVTSwitch(NVDevEvoPtr pDevEvo, NvU32 cmd); +NvBool nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo); +void nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo); +NvBool nvRmAllocEvoDma(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU64 limit, + NvU32 ctxDmaFlags, + NvU32 subDeviceMask); +void nvRmFreeEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma); +NvBool nvRmQueryDpAuxLog(NVDispEvoRec *pDispEvo, NvS64 *pValue); +NvU64 nvRmGetGpuTime(NVDevEvoPtr pDevEvo); +NvBool nvRmSetGc6Allowed(NVDevEvoPtr pDevEvo, NvBool allowed); +NvU32 nvRmAddRgLine1Callback( + const NVDispEvoRec *pDispEvo, + NvU32 head, + NV0092_REGISTER_RG_LINE_CALLBACK_FN pCallback); +void nvRmRemoveRgLine1Callback(const NVDispEvoRec *pDispEvo, + NvU32 callbackObjectHandle); +NvU32 nvRmAddVBlankCallback( + const NVDispEvoRec *pDispEvo, + NvU32 head, + OSVBLANKCALLBACKPROC pCallback); +void nvRmRemoveVBlankCallback(const NVDispEvoRec *pDispEvo, + NvU32 callbackObjectHandle); +void nvRmMuxInit(NVDevEvoPtr pDevEvo); +NvBool nvRmMuxPre(const NVDpyEvoRec *pDpyEvo, NvMuxState state); +NvBool nvRmMuxSwitch(const NVDpyEvoRec *pDpyEvo, NvMuxState state); +NvBool nvRmMuxPost(const NVDpyEvoRec *pDpyEvo, NvMuxState state); +NvMuxState nvRmMuxState(const NVDpyEvoRec *pDpyEvo); + +void nvRmRegisterBacklight(NVDispEvoRec *pDispEvo); +void nvRmUnregisterBacklight(NVDispEvoRec *pDispEvo); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_RM_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-rmapi.h b/src/nvidia-modeset/include/nvkms-rmapi.h new file mode 100644 index 000000000..a4f5cf677 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-rmapi.h @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_RMAPI_H__ + +#define __NVKMS_RMAPI_H__ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NvU32 nvRmApiAlloc( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject, + NvU32 hClass, + void *pAllocParams); + +NvU32 nvRmApiAllocMemory64( + NvU32 hClient, + NvU32 hParent, + NvU32 hMemory, + NvU32 hClass, + NvU32 flags, + void **ppAddress, + NvU64 *pLimit); + +NvU32 nvRmApiControl( + NvU32 hClient, + NvU32 hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize); + +NvU32 nvRmApiDupObject( + NvU32 hClient, + NvU32 hParent, + NvU32 hObjectDest, + NvU32 hClientSrc, + NvU32 hObjectSrc, + NvU32 flags); + +NvU32 nvRmApiFree( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject); + +NvU32 nvRmApiVidHeapControl( + void *pVidHeapControlParams); + +NvU32 nvRmApiMapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + void **ppLinearAddress, + NvU32 flags); + +NvU32 nvRmApiUnmapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + const void *pLinearAddress, + NvU32 flags); + +NvU32 nvRmApiMapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset); + +NvU32 nvRmApiUnmapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU32 flags, + NvU64 dmaOffset); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_RMAPI_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-softfloat.h b/src/nvidia-modeset/include/nvkms-softfloat.h new file mode 100644 index 000000000..43f9fa5e8 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-softfloat.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_SOFTFLOAT_H__ +#define __NVKMS_SOFTFLOAT_H__ + +/* + * This header file provides utility code built on top of the softfloat floating + * point emulation library. + */ + +#include "nv-softfloat.h" +#include "nvkms-api-types.h" + +/* + * A 3x3 row-major matrix of float32_t's. + */ +struct NvKmsMatrixF32 { + float32_t m[3][3]; +}; + +/* + * A 3x4 row-major matrix of float32_t's. + */ +struct NvKms3x4MatrixF32 { + float32_t m[3][4]; +}; + +/* + * Convert from an NvKmsMatrix (stores floating point values in NvU32s) to an + * NvKmsMatrixF32 (stores floating point values in float32_t). + */ +static inline struct NvKmsMatrixF32 NvKmsMatrixToNvKmsMatrixF32( + const struct NvKmsMatrix in) +{ + struct NvKmsMatrixF32 out = { }; + int i, j; + + for (j = 0; j < 3; j++) { + for (i = 0; i < 3; i++) { + out.m[i][j] = NvU32viewAsF32(in.m[i][j]); + } + } + + return out; +} + +/* + * Compute the matrix product A * B, where A is a 3x3 matrix and B is a 3x4 matrix, + * and return the resulting 3x4 matrix. + */ +static inline struct NvKms3x4MatrixF32 nvMultiply3x4Matrix(const struct NvKmsMatrixF32 *A, + const struct NvKms3x4MatrixF32 *B) +{ + struct NvKms3x4MatrixF32 C = { }; + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 4; ++j) { + for (int k = 0; k < 3; ++k) { + C.m[i][j] = f32_mulAdd(A->m[i][k], B->m[k][j], C.m[i][j]); + } + } + } + + return C; +} + +/* return x**y */ +float64_t nvKmsPow(float64_t x, float64_t y); + +#endif /* __NVKMS_SOFTFLOAT_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-surface.h b/src/nvidia-modeset/include/nvkms-surface.h new file mode 100644 index 000000000..89dd4e5e7 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-surface.h @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_SURFACE_H__ +#define __NVKMS_SURFACE_H__ + +#include "nvkms-types.h" + +void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsRegisterSurfaceParams *pParams, + enum NvHsMapPermissions hsMapPermissions); + +void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle, + NvBool skipUpdate); +void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle); + +void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NVEvoApiHandlesRec *pOpenDevSurfaceHandles); + +void nvEvoIncrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo); +void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo); + +void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo); +void nvEvoDecrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo); + +NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo); + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandle( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandle, + const NVEvoChannelMask channelMask); + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandleNoCtxDmaOk( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvKmsSurfaceHandle surfaceHandle); + +NVDeferredRequestFifoRec *nvEvoRegisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo); + +void nvEvoUnregisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo); + +static inline NvBool nvEvoIsSurfaceOwner(const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle) +{ + return ((pSurfaceEvo->owner.pOpenDev == pOpenDev) && + (pSurfaceEvo->owner.surfaceHandle == surfaceHandle)); +} + +#define ASSERT_EYES_MATCH(_arr, _field) \ + nvAssert((_arr)[NVKMS_RIGHT] == NULL || \ + (_arr)[NVKMS_LEFT]->_field == (_arr)[NVKMS_RIGHT]->_field); + +ct_assert((NVKMS_RIGHT - NVKMS_LEFT) == 1); + +#define FOR_ALL_EYES(_eye) \ + for ((_eye) = NVKMS_LEFT; (_eye) <= NVKMS_RIGHT; (_eye)++) + +#define FOR_ALL_VALID_PLANES(_planeIndex, _pSurface) \ + for ((_planeIndex) = 0; \ + (_planeIndex) < \ + (nvKmsGetSurfaceMemoryFormatInfo((_pSurface)->format))->numPlanes; \ + (_planeIndex)++) + +#endif /* __NVKMS_SURFACE_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-types.h b/src/nvidia-modeset/include/nvkms-types.h new file mode 100644 index 000000000..11338146e --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-types.h @@ -0,0 +1,2638 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_TYPES_H__ +#define __NVKMS_TYPES_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvidia-modeset-os-interface.h" + +#include "nvctassert.h" +#include "nv_list.h" + +#include /* NV0073_CTRL_SPECIFIC_OR_PROTOCOL_* */ +#include /* NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE */ +#include /* NV0000_CTRL_GPU_MAX_ATTACHED_GPUS */ +#include /* NV0080_CTRL_OS_UNIX_VT_SWITCH_FB_INFO */ +#include /* NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_* */ +#include /* NV30F1_CTRL_MAX_GPUS_PER_GSYNC */ +#include /* NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE */ +#include + +#include "nvkms-api.h" +#include "nvkms-prealloc-types.h" + +#include "nvos.h" + +#include "nv_common_utils.h" +#include "nv_assert.h" +#include "unix_rm_handle.h" + +#include "nvmisc.h" + +#include "timing/nvtiming.h" +#include "timing/nvt_dsc_pps.h" +#include "hdmipacket/nvhdmi_frlInterface.h" // HDMI_{SRC,SINK}_CAPS + +#include + +#if defined(DEBUG) || defined(DEVELOP) +#define NVKMS_PROCFS_ENABLE 1 +#else +#define NVKMS_PROCFS_ENABLE 0 +#endif + +#define NV_DMA_EVO_PUSH_BUFFER_SIZE (4 * 1024) +#define NV_DMA_EVO_PUSH_BUFFER_PAD_SIZE (4 * 12) +#define NV_DMA_EVO_NOTIFIER_SIZE 4096 + +#define NV_NUM_EVO_LUT_ENTRIES 1025 +/* + * Size of the nvdisplay 3 LUT variable segment size header, in LUT entries + * (which are 8 bytes each). + */ +#define NV_LUT_VSS_HEADER_SIZE 4 + +#define NV_EVO_SUBDEV_STACK_SIZE 10 + +#define NV_DP_READ_EDID_RETRIES 18 +#define NV_DP_REREAD_EDID_DELAY_USEC 500 /* in microseconds */ + +#define NV_EVO_SURFACE_ALIGNMENT 0x1000 + +/* + * Prior to nvdisplay 4.0, the final address for all scanout surfaces must be + * 256B-aligned. + * + * For nvdisplay 4.0, the final address for all scanout surfaces must be + * 512B-aligned for GPU, and 1KB-aligned for Tegra. + * + * NVKMS already uses NV_EVO_SURFACE_ALIGNMENT to force 4KB-alignment for the + * base address of each scanout surface. As such, we're forcing 1KB-alignment + * for the corresponding ctxdma offsets in order to be compatible with all + * display architectures. + */ +#define NV_SURFACE_OFFSET_ALIGNMENT_SHIFT 10 + +#define NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH 6U /* 64 bytes (2^6) */ +#define NVKMS_BLOCK_LINEAR_GOB_WIDTH ((NvU32)1 << NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH) + +#define NV_INVALID_OR 0xFFFFFFFF + +#define NVKMS_RM_HEAP_ID 0xDCBA + +#define NVKMS_MAX_WINDOWS_PER_DISP 32 + +#define NV_SYNCPT_GLOBAL_TABLE_LENGTH 1024 + +#define HEAD_MASK_QUERY(_mask, _head) (!!((_mask) & (1 << (_head)))) +#define HEAD_MASK_SET(_mask, _head) ((_mask) | (1 << (_head))) +#define HEAD_MASK_UNSET(_mask, _head) ((_mask) & ~(1 << (_head))) + +#define NVKMS_COMPOSITION_FOR_MATCH_BITS(__colorKeySelect, __match) \ + for ((__match) = (((__colorKeySelect) == \ + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) ? 1 : 0); \ + (__match) <= 1; (__match)++) + +typedef struct _NVEvoApiHandlesRec *NVEvoApiHandlesPtr; +typedef struct _NVEvoSubDeviceRec *NVSubDeviceEvoPtr; +typedef struct _NVEvoDevRec *NVDevEvoPtr; +typedef struct _NVDmaBufferEvoRec *NVDmaBufferEvoPtr; +typedef struct _NVEvoChannel *NVEvoChannelPtr; +typedef struct _NVEvoHeadControl *NVEvoHeadControlPtr; +typedef struct _NVEvoCapabilities *NVEvoCapabilitiesPtr; +typedef struct _NVEvoSubDevHeadStateRec *NVEvoSubDevHeadStatePtr; +typedef struct _NVEvoSubDevRec *NVEvoSubDevPtr; +typedef struct _NVEvoColorRec *NVEvoColorPtr; +typedef struct _NVHwModeViewPortEvo *NVHwModeViewPortEvoPtr; +typedef struct _NVHwModeTimingsEvo *NVHwModeTimingsEvoPtr; +typedef struct _NVConnectorEvoRec *NVConnectorEvoPtr; +typedef struct _NVVblankSyncObjectRec *NVVblankSyncObjectPtr; +typedef struct _NVDispHeadStateEvoRec *NVDispHeadStateEvoPtr; +typedef struct _NVDispEvoRec *NVDispEvoPtr; +typedef struct _NVParsedEdidEvoRec *NVParsedEdidEvoPtr; +typedef struct _NVDpyEvoRec *NVDpyEvoPtr; +typedef struct _NVLutSurfaceEvo *NVLutSurfaceEvoPtr; +typedef struct _NVFrameLockEvo *NVFrameLockEvoPtr; +typedef struct _NVEvoInfoString *NVEvoInfoStringPtr; +typedef struct _NVSurfaceEvoRec NVSurfaceEvoRec, *NVSurfaceEvoPtr; +typedef struct _NVDeferredRequestFifoRec *NVDeferredRequestFifoPtr; +typedef struct _NVSwapGroupRec *NVSwapGroupPtr; + +/* + * _NVHs*EvoRec are defined in nvkms-headsurface-priv.h; they are intentionally + * opaque outside of the nvkms-headsurface code. + */ +typedef struct _NVHsDeviceEvoRec *NVHsDeviceEvoPtr; +typedef struct _NVHsChannelEvoRec *NVHsChannelEvoPtr; +typedef struct _NVHsSurfaceRec *NVHsSurfacePtr; + +/* _nv_dplibXXX are defined in dp/nvdp-connector-event-sink.h */ +typedef struct _nv_dplibconnector NVDPLibConnectorRec, *NVDPLibConnectorPtr; +typedef struct _nv_dplibdevice NVDPLibDeviceRec, *NVDPLibDevicePtr; +typedef struct __nv_dplibmodesetstate NVDPLibModesetStateRec, *NVDPLibModesetStatePtr; + +/* _nv_dplibtimer is defined in nvdp-timer.hpp */ +typedef struct _nv_dplibtimer NVDPLibTimerRec, *NVDPLibTimerPtr; + +/* _NVEvoModesetUpdateState defined in nvkms-modeset-types.h */ +typedef struct _NVEvoModesetUpdateState NVEvoModesetUpdateState; + +typedef struct _NVEvoApiHandlesRec { + void **pointers; /* Dynamically allocated array of pointers. */ + NvU32 numPointers; /* Number of elements in pointers array. */ + NvU32 defaultSize; +} NVEvoApiHandlesRec; + +typedef struct _NVEvoDma +{ + NvU32 memoryHandle; + NvU32 ctxHandle; + + NvU64 limit; + + /* Whether this is sysmem, or vidmem accessed through a BAR1 mapping. */ + NvBool isBar1Mapping; + + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]; +} NVEvoDma, *NVEvoDmaPtr; + +typedef struct _NVDmaBufferEvoRec +{ + NVEvoDma dma; + + NvU32 channel_handle; // handles + NvU32 num_channels; + void *control[NVKMS_MAX_SUBDEVICES]; + NvU32 *base; // Push buffer start pointer + NvU32 *buffer;// Push buffer current pointer + NvU32 *end; // Push buffer end pointer + NvU32 offset_max; // Push buffer max offset (in bytes) + NvU32 put_offset; // Push buffer last kicked off offset + NvU32 fifo_free_count; // fifo free space (in words) + NvU32 currentSubDevMask; + NVDevEvoPtr pDevEvo; +} NVDmaBufferEvoRec; + +/* EVO capabilities */ +typedef struct { + NvBool flipLock; + NvBool stereo; + NvBool scanLock; +} NVEvoLockPinCaps; +#define NV_EVO_NUM_LOCK_PIN_CAPS 16 + +typedef struct { + NvBool supportsInterlaced; + NvBool supportsSemiPlanar; + NvBool supportsPlanar; + NvBool supportsHVFlip; + NvBool supportsDSI; +} NVEvoMiscCaps; + +static inline NvU8 NVEvoScalerTapsToNum(NVEvoScalerTaps taps) +{ + NvU8 numTaps = 1; + + switch (taps) { + case NV_EVO_SCALER_8TAPS: + numTaps = 8; + break; + case NV_EVO_SCALER_5TAPS: + numTaps = 5; + break; + case NV_EVO_SCALER_3TAPS: + numTaps = 3; + break; + case NV_EVO_SCALER_2TAPS: + numTaps = 2; + break; + case NV_EVO_SCALER_1TAP: + numTaps = 1; + break; + } + + return numTaps; +} + +#define NV_EVO_SCALE_FACTOR_1X (1 << 10) +#define NV_EVO_SCALE_FACTOR_2X (2 << 10) +#define NV_EVO_SCALE_FACTOR_3X (3 << 10) +#define NV_EVO_SCALE_FACTOR_4X (4 << 10) + +typedef struct { + NvU32 maxPixelsVTaps; + NvU16 maxVDownscaleFactor; /* Scaled by 1024 */ + NvU16 maxHDownscaleFactor; /* Scaled by 1024 */ +} NVEvoScalerTapsCaps; + +typedef struct { + NvBool present; + NVEvoScalerTapsCaps taps[NV_EVO_SCALER_TAPS_MAX + 1]; +} NVEvoScalerCaps; + +typedef struct { + NvBool usable; + NvBool supportsHDMIYUV420HW; + NVEvoScalerCaps scalerCaps; +} NVEvoHeadCaps; +#define NV_EVO_NUM_HEAD_CAPS 8 + +typedef struct { + NvBool dualTMDS; + NvU32 maxTMDSClkKHz; +} NVEvoSorCaps; +#define NV_EVO_NUM_SOR_CAPS 8 + +typedef struct { +} NVEvoPiorCaps; +#define NV_EVO_NUM_PIOR_CAPS 4 + +typedef struct { + NvBool usable; + NvBool csc0MatricesPresent; + NvBool cscLUTsPresent; + NvBool csc1MatricesPresent; + NVEvoScalerCaps scalerCaps; +} NVEvoWindowCaps; +#define NV_EVO_NUM_WINDOW_CAPS 32 + +typedef NvU64 NVEvoChannelMask; + +#define NV_EVO_CHANNEL_MASK_CORE 0:0 +#define NV_EVO_CHANNEL_MASK_CORE_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_CORE_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_WINDOW_FIELD 32:1 +#define NV_EVO_CHANNEL_MASK_WINDOW(_n) (1+(_n)):(1+(_n)) +#define NV_EVO_CHANNEL_MASK_WINDOW__SIZE 32 +#define NV_EVO_CHANNEL_MASK_WINDOW_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_WINDOW_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_CURSOR_FIELD 40:33 +#define NV_EVO_CHANNEL_MASK_CURSOR(_n) (33+(_n)):(33+(_n)) +#define NV_EVO_CHANNEL_MASK_CURSOR__SIZE 8 +#define NV_EVO_CHANNEL_MASK_CURSOR_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_CURSOR_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_BASE_FIELD 44:41 +#define NV_EVO_CHANNEL_MASK_BASE(_n) (41+(_n)):(41+(_n)) +#define NV_EVO_CHANNEL_MASK_BASE__SIZE 4 +#define NV_EVO_CHANNEL_MASK_BASE_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_BASE_DISABLE 0 +#define NV_EVO_CHANNEL_MASK_OVERLAY_FIELD 48:45 +#define NV_EVO_CHANNEL_MASK_OVERLAY(_n) (45+(_n)):(45+(_n)) +#define NV_EVO_CHANNEL_MASK_OVERLAY__SIZE 4 +#define NV_EVO_CHANNEL_MASK_OVERLAY_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_OVERLAY_DISABLE 0 +/* Window Immediate channels get only one bit. */ +#define NV_EVO_CHANNEL_MASK_WINDOW_IMM 49:49 +#define NV_EVO_CHANNEL_MASK_WINDOW_IMM_ENABLE 1 +#define NV_EVO_CHANNEL_MASK_WINDOW_IMM_DISABLE 0 + +#define NV_EVO_CHANNEL_MASK_WINDOW_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_WINDOW_FIELD) +#define NV_EVO_CHANNEL_MASK_CURSOR_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_CURSOR_FIELD) +#define NV_EVO_CHANNEL_MASK_BASE_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_BASE_FIELD) +#define NV_EVO_CHANNEL_MASK_OVERLAY_ALL \ + DRF_SHIFTMASK64(NV_EVO_CHANNEL_MASK_OVERLAY_FIELD) + +static inline NvU32 NV_EVO_CHANNEL_MASK_POPCOUNT(NvU64 mask) +{ + // It's tempting to use __builtin_popcountll here, but that depends on + // intrinsics not available to nvkms in the kernel. + return nvPopCount64(mask); +} + +static inline NvU32 NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(NvU64 mask) +{ + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(mask) == 1); + return BIT_IDX_64(DRF_VAL64(_EVO, _CHANNEL_MASK, _BASE_FIELD, mask)); +} +static inline NvU32 NV_EVO_CHANNEL_MASK_OVERLAY_HEAD_NUMBER(NvU64 mask) +{ + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(mask) == 1); + return BIT_IDX_64(DRF_VAL64(_EVO, _CHANNEL_MASK, _OVERLAY_FIELD, mask)); +} +static inline NvU32 NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(NvU64 mask) +{ + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(mask) == 1); + return BIT_IDX_64(DRF_VAL64(_EVO, _CHANNEL_MASK, _WINDOW_FIELD, mask)); +} + +/* EVO structures */ + +typedef struct { + struct { + NVEvoChannelMask channelMask; + NVEvoChannelMask noCoreInterlockMask; + /* Each channel in this mask was programmed with a "flip lock + * qualifying" flip. */ + NVEvoChannelMask flipLockQualifyingMask; + /* Channels set here are transitioning from NULL ctxdma to non-NULL + * ctxdma or vice-versa on this update. Only necessary/valid on Turing + * (class C5*). */ + NVEvoChannelMask flipTransitionWAR; + + struct { + NvBool vrrTearing; + } base[NVKMS_MAX_HEADS_PER_DISP]; + + /* + * Window immediate channels with pending methods are represented + * here by NV_EVO_CHANNEL_MASK_WINDOW(n) for window immediate + * channel n. + */ + NVEvoChannelMask winImmChannelMask; + + /* + * Each window channel NV_EVO_CHANNEL_MASK_WINDOW(n) needs to + * be interlocked with its corresponding window immediate channel n. + */ + NVEvoChannelMask winImmInterlockMask; + + } subdev[NVKMS_MAX_SUBDEVICES]; + +} NVEvoUpdateState; + +typedef struct { + struct { + NVEvoChannelMask channelMask; + } subdev[NVKMS_MAX_SUBDEVICES]; +} NVEvoIdleChannelState; + +typedef struct { + NvU8 validTimeStampBits; + NvU8 legacyNotifierFormatSizeBytes; + NvBool tearingFlips :1; + NvBool vrrTearingFlips :1; + NvBool perEyeStereoFlips :1; +} NVEvoChannelCaps; + +enum NVEvoImmChannel { + NV_EVO_IMM_CHANNEL_NONE, + NV_EVO_IMM_CHANNEL_PIO, + NV_EVO_IMM_CHANNEL_DMA, +}; + +typedef struct { + NvU32 handle; + void *control[NVKMS_MAX_SUBDEVICES]; +} NVEvoPioChannel; + +/*! basic syncpt structure used for pre and post syncpt usage */ +typedef struct _NVEvoSyncpt { + /*! syncpt id (only useful for post-syncpt) */ + NvU32 id; + /*! bitmask of channels using this syncpt */ + NVEvoChannelMask channelMask; + /*! handle of context dma allocated to this syncpt */ + NvU32 hCtxDma; + /*! handle of syncpt object */ + NvU32 hSyncpt; +} NVEvoSyncpt; + +/* Tracks internal state of a vblank sync object. */ +typedef struct _NVVblankSyncObjectRec { + /* Whether the vblank sync object is currently in use by some client. */ + NvBool inUse; + + /* Whether the vblank sync object is enabled or disabled. */ + NvBool enabled; + + /* + * The index of this Rec inside of the HeadState's vblankSyncObjects array. + * Also corresponds with the index of the sync object in hardware. + */ + NvU32 index; + + /* + * This syncpoint object should be created as part of + * nvRmSetupEvoCoreChannel(). + */ + NVEvoSyncpt evoSyncpt; +} NVVblankSyncObjectRec; + +/* EVO channel, encompassing multiple subdevices and a single pushbuf */ +typedef struct _NVEvoChannel { + /* Pointer to array of per subdev notifier dma structs */ + NVEvoDmaPtr notifiersDma; + + NvU32 hwclass; + NvU32 instance; + NVEvoChannelMask channelMask; /* only one bit should be set */ + + NVDmaBufferEvoRec pb; + + NVOS10_EVENT_KERNEL_CALLBACK_EX completionNotifierEventCallback; + NvU32 completionNotifierEventHandle; + struct nvkms_ref_ptr *ref_ptr; + + /* + * GV100 timestamped flips need a duplicate update which only changes + * TIMESTAMP_MODE and MIN_PRESENT_INTERVAL fields in SET_PRESENT_CONTROL; + * to allow updating these fields without changing anything else in + * SET_PRESENT_CONTROL, normal updates to SET_PRESENT_CONTROL are cached + * here. (bug 1990958) + */ + NvU32 oldPresentControl; + + // On Turing, RM wants to be notified when the tearing mode changes. + NvBool oldTearingMode; + + struct { + enum NVEvoImmChannel type; + union { + NVEvoPioChannel *pio; + struct _NVEvoChannel *dma; + } u; + } imm; + + NVEvoChannelCaps caps; + + NVEvoSyncpt postSyncpt; +} NVEvoChannel; + +typedef enum { + NV_EVO_NO_LOCK, + NV_EVO_FRAME_LOCK, + NV_EVO_RASTER_LOCK, +} NVEvoLockMode; + +typedef enum { + NV_EVO_LOCK_PIN_ERROR = -1, + NV_EVO_LOCK_PIN_INTERNAL_0 = 0, + NV_EVO_LOCK_PIN_0 = 0x20, +} NVEvoLockPin; + +typedef struct _NVEvoHeadControl { + NvBool interlaced; + NVEvoLockMode clientLock; + NVEvoLockPin clientLockPin; + int clientLockoutWindow; + NVEvoLockMode serverLock; + NVEvoLockPin serverLockPin; + NvBool flipLock; + NVEvoLockPin flipLockPin; + NVEvoLockPin stereoPin; + + /* + * Whether or not this GPU is stereo locked. True if all heads are either + * frame or raster locked, and all heads are driving non-interlaced modes. + */ + NvBool stereoLocked; + + /* + * Whether or not this head is driving a HDMI 3D frame packed mode. Used + * in headcontrol only on >=GV100. + */ + NvBool hdmi3D; + + /* + * Whether or not this head is driving a mode requiring the HW YUV420 + * packer. Used in headcontrol only on >=nvdisplay 4.0. + */ + NvBool hwYuv420; + + /* This isn't actually part of HeadControl, but it's convenient */ + NvU32 lockChainPosition; +} NVEvoHeadControl; + +typedef struct _NVEvoCapabilities { + NVEvoLockPinCaps pin[NV_EVO_NUM_LOCK_PIN_CAPS]; + NVEvoMiscCaps misc; + NVEvoHeadCaps head[NV_EVO_NUM_HEAD_CAPS]; + NVEvoSorCaps sor[NV_EVO_NUM_SOR_CAPS]; + NVEvoPiorCaps pior[NV_EVO_NUM_PIOR_CAPS]; + NVEvoWindowCaps window[NV_EVO_NUM_WINDOW_CAPS]; +} NVEvoCapabilities; + +typedef struct { + NVSurfaceEvoPtr pSurfaceEvo; + enum NvKmsNIsoFormat format; + NvU16 offsetInWords; +} NVFlipNIsoSurfaceEvoHwState; + +typedef struct { + NVFlipNIsoSurfaceEvoHwState surface; + NvBool awaken; +} NVFlipCompletionNotifierEvoHwState; + +typedef struct { + NvBool usingSyncpt; + union { + struct { + NVFlipNIsoSurfaceEvoHwState acquireSurface; + NvU32 acquireValue; + NVFlipNIsoSurfaceEvoHwState releaseSurface; + NvU32 releaseValue; + } semaphores; + struct { + NvU32 preCtxDma; + NvU32 preValue; + NvU32 postCtxDma; + NvU32 postValue; + } syncpts; + } u; +} NVFlipSyncObjectEvoHwState; + +typedef struct { + NVLutSurfaceEvoPtr pLutSurfaceEvo; +} NVFlipLutHwState; + +typedef struct { + NVSurfaceEvoPtr pSurfaceEvo; + NvS16 x, y; + + struct NvKmsCompositionParams cursorCompParams; +} NVFlipCursorEvoHwState; + +typedef struct { + NVSurfaceEvoPtr pSurfaceEvo[NVKMS_MAX_EYES]; + NVFlipCompletionNotifierEvoHwState completionNotifier; + NVFlipSyncObjectEvoHwState syncObject; + + // Non-zero timeStamp value is only allowed if the channel's + // 'timeStampFlipBits' capability is > 0. + NvU64 timeStamp; + NvU8 minPresentInterval; + // True means immediate or tearing flip. False means flip-at-vblank. + NvBool tearing; + // The tearing mode passed to RM's VRR code via + // NV_VRR_TRAP_ARGUMENT_MAX_FPS_TEARING. + NvBool vrrTearing; + NvBool perEyeStereoFlip; + + struct NvKmsSize sizeIn; + struct NvKmsSize sizeOut; + struct NvKmsSignedPoint outputPosition; + + NVEvoScalerTaps hTaps; + NVEvoScalerTaps vTaps; + + struct NvKmsCscMatrix cscMatrix; + + NVFlipLutHwState inputLut; + + struct NvKmsRRParams rrParams; + + struct NvKmsCompositionParams composition; +} NVFlipChannelEvoHwState; + +typedef struct { + struct NvKmsPoint viewPortPointIn; + NVFlipCursorEvoHwState cursor; + NVFlipChannelEvoHwState layer[NVKMS_MAX_LAYERS_PER_HEAD]; + struct NvKmsUsageBounds usage; + NvBool disableMidFrameAndDWCFWatermark; + struct { + NvBool viewPortPointIn : 1; + NvBool cursorSurface : 1; + NvBool cursorPosition : 1; + + NvBool layerPosition[NVKMS_MAX_LAYERS_PER_HEAD]; + NvBool layer[NVKMS_MAX_LAYERS_PER_HEAD]; + } dirty; +} NVFlipEvoHwState; + +/*! + * State requested through the NVKMS API. This may differ from + * the current hardware state (e.g., if the head has been + * momentarily blanked during DP link training). + */ +typedef struct _NVEvoSubDevHeadStateRec { + struct NvKmsPoint viewPortPointIn; + NVFlipCursorEvoHwState cursor; + NVFlipChannelEvoHwState layer[NVKMS_MAX_LAYERS_PER_HEAD]; + // Current usage bounds programmed into the hardware. + struct NvKmsUsageBounds usage; + // Usage bounds required after the last scheduled flip completes. + struct NvKmsUsageBounds targetUsage; + // Preallocated usage bounds that will be required for upcoming flips. + struct NvKmsUsageBounds preallocatedUsage; + + // Current state of MidFrameAndDWCFWatermark programmed into the hardware. + NvBool disableMidFrameAndDWCFWatermark; + // + // State of MidFrameAndDWCFWatermark required after the last scheduled + // flip completes. + // + NvBool targetDisableMidFrameAndDWCFWatermark; +} NVEvoSubDevHeadStateRec; + +#define NVKMS_HEAD_SURFACE_MAX_BUFFERS 2 + +/* + * HeadSurface state that applies to a single head, but spans across + * all subdevices. + */ +typedef struct { + /* + * The size of the headSurfaces for this head, across all subdevices. + * headSurface might only use a subset of the surfaces on one or more + * subdevices in SLI Mosaic. + */ + struct NvKmsSize size; + struct NvKmsSize stagingSize; + + /* + * The surfaces allocated for use by headSurface on this head. + * Surface allocations are broadcast across subdevices, though + * headSurface may unicast its rendering to the headSurface + * surface allocations on specific subdevices. + */ + struct { + NVHsSurfacePtr pSurface; + NVHsSurfacePtr pStagingSurface; + } surfaces[NVKMS_MAX_EYES][NVKMS_HEAD_SURFACE_MAX_BUFFERS]; + + /* + * The number of surfaces in the NVKMS_HEAD_SURFACE_MAX_BUFFERS dimension of + * the surfaces[][] array. Elements [0,surfaceCount-1] in the surfaces[][] + * array will be populated. + */ + NvU32 surfaceCount; +} NVHsStateOneHeadAllDisps; + +/* Subdevice-specific, channel-independent state */ +typedef struct _NVEvoSubDevRec { + NvU32 subDeviceInstance; + + NVEvoCapabilities capabilities; + + NVDispEvoPtr pDispEvo; + + NvU32 setSwSpareA[NVKMS_MAX_HEADS_PER_DISP]; + + NVEvoSubDevHeadStateRec headState[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoHeadControl headControl[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoHeadControl headControlAssy[NVKMS_MAX_HEADS_PER_DISP]; + void *cursorPio[NVKMS_MAX_HEADS_PER_DISP]; + NvBool (*scanLockState)(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NvU32 action, + /* NV_INVALID_HEAD-terminated + * array of head indices */ + const NvU32 *pHeads); + + /* + * EVO state machine refcounter for the number of SLI or proxy framelock + * clients that are connected to this server. + */ + NvU32 frameLockSliProxyClients; + + /* + * Since we add all active heads as framelock clients whenever we enable + * the second head as a framelock client, there's no need for EVO state + * transitions for heads 3 and more. Instead of those state transitions, + * we use the frameLockExtraClients ref counter to keep track of heads + * 3 and greater being added as framelock clients. + * + * XXX The state machine currently will naively framelock heads 3 and + * greater during this state transition, even if they're not capable + * of being framelocked (for example, when they have very different + * refresh rates). Bug 976532 + */ + NvU32 frameLockExtraClients; + + /* + * All of the following except the "armed" versions are set by the EVO + * state machine to the desired HW configuration given the current locking + * state. + * The "armed" versions represent the current hardware configuration, used + * to avoid excess hardware updates. + */ + NvU32 frameLockServerMaskArmed; + NvU32 frameLockServerMaskAssy; + NvU32 frameLockClientMaskArmed; + NvU32 frameLockClientMaskAssy; + NvU32 frameLockExtRefClkMaskArmed; + NvU32 frameLockExtRefClkMaskAssy; + NvBool frameLockHouseSync; + + NvU8 flipLockPinSetForFrameLockHeadMask; + NvU8 flipLockEnabledForFrameLockHeadMask; + NvU8 flipLockPinSetForSliHeadMask; + NvU8 flipLockEnabledForSliHeadMask; + + NvU32 flipLockProhibitedHeadMask; + + NvU32 sliRasterLockServerMask; + NvU32 sliRasterLockClientMask; + + NVEvoLockPin sliServerLockPin; + NVEvoLockPin sliClientLockPin; + NvBool forceZeroClientLockoutWindow; +} NVEvoSubDevRec; + +typedef struct _NVEvoColorRec { + NvU16 red; + NvU16 green; + NvU16 blue; +} NVEvoColorRec; + +typedef struct { + NvU16 Red; + NvU16 Green; + NvU16 Blue; + NvU16 Unused; +} NVEvoLutEntryRec; + +typedef struct { + NVEvoLutEntryRec base[NV_LUT_VSS_HEADER_SIZE + NV_NUM_EVO_LUT_ENTRIES]; + // The output LUT requires 8-bit alignment. + NVEvoLutEntryRec output[NV_LUT_VSS_HEADER_SIZE + NV_NUM_EVO_LUT_ENTRIES] + __attribute__((aligned(0x100))); +} NVEvoLutDataRec; + +typedef struct { + NvBool supportsDP13 :1; + NvBool supportsInbandStereoSignaling :1; + NvBool supportsHDMI20 :1; + NvBool inputLutAppliesToBase :1; + NvU8 validNIsoFormatMask; + NvU8 genericPageKind; + NvU32 maxPitchValue; + int maxWidthInBytes; + int maxWidthInPixels; + int maxHeight; + NvU32 maxRasterWidth; + NvU32 maxRasterHeight; + struct NvKmsCompositionCapabilities cursorCompositionCaps; + NvU16 validLayerRRTransforms; + struct NvKmsLayerCapabilities layerCaps[NVKMS_MAX_LAYERS_PER_HEAD]; +} NVEvoCapsRec; + +typedef struct { + NvU32 coreChannelClass; + size_t dmaArmedSize; + NvU32 dmaArmedOffset; +} NVEvoCoreChannelDmaRec; + + +typedef struct _NVEvoSubDeviceRec { + NvU32 handle; + NvU32 gpuId; +#define NV_INVALID_GPU_LOG_INDEX 0xFF + NvU8 gpuLogIndex; + char gpuString[NVKMS_GPU_STRING_SIZE]; + + NvU32 numEngines; + NvU32 *supportedEngines; + + /* Core channel memory mapping for ARM values */ + void *pCoreDma; + + /* ISO ctxdma programmed by EVO2 hal, into the overlay channel */ + NvU32 overlayContextDmaIso[NVKMS_MAX_HEADS_PER_DISP]; + enum NvKmsSurfaceMemoryFormat overlaySurfFormat[NVKMS_MAX_HEADS_PER_DISP]; + + /* Per head surface programmed into the core channel */ + const NVSurfaceEvoRec *pCoreChannelSurface[NVKMS_MAX_HEADS_PER_DISP]; + + /* EVO2 only, TRUE if a valid base surface passed to ->Flip() */ + NvBool isBaseSurfSpecified[NVKMS_MAX_HEADS_PER_DISP]; + enum NvKmsSurfaceMemoryFormat baseSurfFormat[NVKMS_MAX_HEADS_PER_DISP]; + + /* Composition parameters considered for hardware programming by EVO2 hal */ + struct { + NvBool initialized; + enum NvKmsCompositionColorKeySelect colorKeySelect; + NVColorKey colorKey; + } baseComp[NVKMS_MAX_HEADS_PER_DISP], overlayComp[NVKMS_MAX_HEADS_PER_DISP]; + +} NVEvoSubDeviceRec; + +/* Device-specific EVO state (subdevice- and channel-independent) */ +typedef struct _NVEvoDevRec { + + NvU8 gpuLogIndex; + NvU32 allocRefCnt; /* number of ALLOC_DEVICE calls */ + NVListRec devListEntry; + + /* array of gpuIds opened with nvkms_open_gpu() */ + NvU32 openedGpuIds[NV0000_CTRL_GPU_MAX_ATTACHED_GPUS]; + + NVUnixRmHandleAllocatorRec handleAllocator; + NvU32 deviceId; + + NvU32 deviceHandle; + struct NvKmsPerOpenDev *pNvKmsOpenDev; + + + /* SLI Info */ + struct { + NvBool mosaic; + struct { + NvBool present :1; + + /* Current hardware state */ + NvBool powered :1; + + /* Software state tracking needs from hardware */ + NvBool powerNeededForRasterLock :1; + } bridge; + } sli; + + NvU32 numHeads; + NvU32 numWindows; /* NVDisplay only. */ + + NvU32 displayHandle; + + /*! + * modesetOwner points to the pOpenDev of the client that called + * NVKMS_IOCTL_GRAB_OWNERSHIP. + */ + const struct NvKmsPerOpenDev *modesetOwner; + + /*! + * The first modeset after a modeset ownership transition should not inherit + * state from the previous modeset that we don't want inherited: LUTs or + * heads not specified in the new modeset request. + */ + NvBool modesetOwnerChanged; + + /*! + * NVEvoDevRec::numSubDevices is the number of GPUs in the SLI + * device. This is the number of NVEvoSubDevPtrs in + * NVEvoDevRec::gpus[] and the number of NVSubDeviceEvoPtr in + * NVEvoDevRec::pSubDevices. + * + * The data structure organization is summarized by the following table: + * + * NVDevEvoRec::numSubDevices (# of pSubDevs) + * | NVDevEvoRec::nDispEvo (# of pDispEvos) + * | | NVDispEvoRec::numSubDevices (# of sd per disp) + * | | | + * no SLI 1 1 1 + * SLI Mosaic N N 1 + */ + NvU32 numSubDevices; + NVSubDeviceEvoPtr pSubDevices[NVKMS_MAX_SUBDEVICES]; + + NvU32 dispClass; + NvU32 displayCommonHandle; + NvU32 rmCtrlHandle; + + unsigned int nDispEvo; + NVDispEvoPtr pDispEvo[NVKMS_MAX_SUBDEVICES]; + + NVEvoChannelPtr base[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoChannelPtr core; + NVEvoChannelPtr overlay[NVKMS_MAX_HEADS_PER_DISP]; + NVEvoChannelPtr window[NVKMS_MAX_WINDOWS_PER_DISP]; + + /* NVDisplay head<->window mapping */ + NvU32 headForWindow[NVKMS_MAX_WINDOWS_PER_DISP]; + + struct { + NVEvoChannelPtr layer[NVKMS_MAX_LAYERS_PER_HEAD]; + NvU32 numLayers; + } head[NVKMS_MAX_HEADS_PER_DISP]; + + /* Pointer to array of subdev structs */ + NVEvoSubDevPtr gpus; + + NvU32 subDevMaskStack[NV_EVO_SUBDEV_STACK_SIZE]; + NvU32 subDevMaskStackDepth; + + NvU32 cursorHandle[NVKMS_MAX_HEADS_PER_DISP]; + + NVDPLibTimerPtr dpTimer; + + NvU8 capsBits[NV5070_CTRL_SYSTEM_CAPS_TBL_SIZE]; + NvU8 commonCapsBits[NV0073_CTRL_SYSTEM_CAPS_TBL_SIZE]; + + NVEvoCapsRec caps; + + NVEvoCoreChannelDmaRec coreChannelDma; + + NvBool mobile : 1; + NvBool usesTegraDevice : 1; + + /* + * IO coherency modes that display supports for ISO and NISO memory + * allocations, respectively. + */ + NvKmsDispIOCoherencyModes isoIOCoherencyModes; + NvKmsDispIOCoherencyModes nisoIOCoherencyModes; + + /* + * Indicates whether the init_no_update methods that were pushed by the + * hardware during core channel allocation are still pending. + */ + NvBool coreInitMethodsPending : 1; + /* + * Indicates that NVKMS restored the console and freeing the core channel + * should leave the display configuration alone. + * + * This should be set to FALSE whenever an update is sent that flips away + * from the framebuffer console. + * + * TODO: Remove this in favor of passing a parameter explicitly to the + * functions that use it. + */ + NvBool skipConsoleRestore : 1; + /* + * Indicates that hotplug events that occur while NVKMS is the modeset owner + * should trigger console restore modesets. + */ + NvBool handleConsoleHotplugs : 1; + /* + * Cached from NvKmsSetModeRequest::allowHeadSurfaceInNvKms when the + * modeset owner does a modeset. This is needed so that when non-modeset + * owners do a partial modeset they don't override this value. + */ + NvBool allowHeadSurfaceInNvKms : 1; + + NvBool gc6Allowed : 1; + + /* + * Indicates whether NVKMS is driving an SOC display device, or an external + * dGPU device. + */ + NvBool isSOCDisplay : 1; + + /* + * Indicates whether NVKMS is supporting syncpts. + */ + NvBool supportsSyncpts : 1; + + /* + * Indicates whether the display device that NVKMS is driving requires all + * memory allocations that display will access to come from sysmem. + * + * For SOC display devices, this should be set to TRUE since the only + * memory aperture that they support is sysmem. + */ + NvBool requiresAllAllocationsInSysmem : 1; + /* + * Indicates whether the device that NVKMS is driving supports headSurface + * composition. + * + * For SOC display devices (e.g., Orin), this should be set to FALSE since + * there's currently zero nvgpu support, and no Tegra clients should be + * using headSurface right now. + */ + NvBool isHeadSurfaceSupported : 1; + + NvU32 validResamplingMethodMask; + + nvkms_timer_handle_t *postFlipIMPTimer; + nvkms_timer_handle_t *consoleRestoreTimer; + + nvkms_timer_handle_t *lowerDispBandwidthTimer; + + NvU32 simulationType; + + NvU32 numClasses; + NvU32 *supportedClasses; + + struct { + /* name[0] == '\0' for unused registryKeys[] array elements. */ + char name[NVKMS_MAX_DEVICE_REGISTRY_KEYNAME_LEN]; + NvU32 value; + } registryKeys[NVKMS_MAX_DEVICE_REGISTRY_KEYS]; + + /* Returns true if the Quadro Sync card connected to this GPU has + * a firmware version incompatible with this GPU. + */ + NvBool badFramelockFirmware; + + const struct _nv_evo_hal *hal; + const struct _nv_evo_cursor_hal *cursorHal; + + /*! + * ref_ptr to the structure. + * + * nvkms_timer_handle_t objects refer to the pDevEvo via references to this, + * so that timers that fire after the pDevEvo has been freed can detect that + * case and do nothing. + */ + struct nvkms_ref_ptr *ref_ptr; + + struct { + void *handle; + } hdmiLib; + + struct { + NvU32 semaphoreHandle; + void *pSemaphores; + NvBool enabled; + NvBool active; + NvU32 flipCounter; + } vrr; + + /* + * Information about the framebuffer console returned by + * NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO. + */ + NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS vtFbInfo; + + /* + * Handle referencing the memory reserved by RM that is used by the kernel + * as the framebuffer console surface. + */ + NvKmsSurfaceHandle fbConsoleSurfaceHandle; + + NVHsDeviceEvoPtr pHsDevice; + + /* The current headSurface configuration. */ + NVHsStateOneHeadAllDisps headSurfaceAllDisps[NVKMS_MAX_HEADS_PER_DISP]; + + struct NVDevPreallocRec prealloc; + + struct { + NvU32 handle; + NVOS10_EVENT_KERNEL_CALLBACK_EX callback; + } nonStallInterrupt; + + /* + * Track the LUT with per-head, per-pDisp scope. The LUT itself + * is triple buffered. + * + * RM surface allocations are broadcast in SLI, so LUT is allocated with + * per-device scope. However, writes into the LUT are unicast with + * per-pDisp scope. + * + * The LUT surface in the core channel contains both the base and output + * LUTs. + */ + struct { + struct { + NVLutSurfaceEvoPtr LUT[3]; + struct { + NvBool waitForPreviousUpdate; + NvBool curBaseLutEnabled; + NvBool curOutputLutEnabled; + NvU8 curLUTIndex; + nvkms_timer_handle_t *updateTimer; + } disp[NVKMS_MAX_SUBDEVICES]; + } head[NVKMS_MAX_HEADS_PER_DISP]; + NVLutSurfaceEvoPtr defaultLut; + } lut; + + /*! stores pre-syncpts */ + NVEvoSyncpt *preSyncptTable; + NvBool *pAllSyncptUsedInCurrentFlip; + +} NVDevEvoRec; + +/* + * The NVHwModeTimingsEvo structure stores all the values necessary to + * perform a modeset with EVO + */ + +typedef struct _NVHwModeViewPortEvo { + struct { + /* + * note that EVO centers ViewPortOut within the active raster, + * so xAdjust,yAdjust are signed; to position ViewPortOut at + * 0,0 within active raster: + * + * viewPortOut.xAdjust = (activeRaster.w - viewPortOut.w)/2 * -1; + * viewPortOut.yAdjust = (activeRaster.h - viewPortOut.h)/2 * -1; + */ + NvS16 xAdjust; + NvS16 yAdjust; + NvU16 width; + NvU16 height; + } out; + + struct { + NvU16 width; + NvU16 height; + } in; + + NVEvoScalerTaps hTaps; + NVEvoScalerTaps vTaps; + + // These are the window features that may be possible if the required ISO + // bw is available at the time that the feature needs to be enabled. By + // default possibleUsage is set considering that everything is supported + // by the HW and for dGPU, IMP will scale it as needed. + struct NvKmsUsageBounds possibleUsage; + + // Guaranteed usage bounds allowed by IMP. These are never assigned to + // NVDpyEvoRec::usage or the hardware directly, but rather are used to + // validate usage bound change requests. + struct NvKmsUsageBounds guaranteedUsage; +} NVHwModeViewPortEvo; + +enum nvKmsPixelDepth { + NVKMS_PIXEL_DEPTH_18_444, + NVKMS_PIXEL_DEPTH_24_444, + NVKMS_PIXEL_DEPTH_30_444, +}; + +enum nvKmsTimingsProtocol { + NVKMS_PROTOCOL_DAC_RGB, + + NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A, + NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B, + NVKMS_PROTOCOL_SOR_DUAL_TMDS, + NVKMS_PROTOCOL_SOR_DP_A, + NVKMS_PROTOCOL_SOR_DP_B, + NVKMS_PROTOCOL_SOR_LVDS_CUSTOM, + NVKMS_PROTOCOL_SOR_HDMI_FRL, + + NVKMS_PROTOCOL_DSI, + + NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC, +}; + +/* + * This structure defines all of the values necessary to program mode timings + * on EVO hardware. + * NOTE: if you add anything to this, consider adding it to + * RasterLockPossible() in nvkms-evo.c + */ +typedef struct _NVHwModeTimingsEvo { + struct NvKmsPoint rasterSize; + struct NvKmsPoint rasterSyncEnd; + struct NvKmsPoint rasterBlankEnd; + struct NvKmsPoint rasterBlankStart; + NvU32 rasterVertBlank2Start; + NvU32 rasterVertBlank2End; + + NvU32 pixelClock; /* in kHz */ + enum nvKmsTimingsProtocol protocol; + enum nvKmsPixelDepth pixelDepth; + /* + * yuv420Mode reflects whether this mode requires YUV 4:2:0 decimation into + * a half-width output through headsurface (SW YUV420) or >=nvdisplay 4.0 HW + * CSC (HW YUV420). + * + * If a mode requires SW YUV 4:2:0 emulation, the pixelClock and width + * values in NvModeTimings will still be the full width values specified by + * the mode parsed from the EDID (e.g. 3840x2160@60), but the pixelClock + * and width values in NVHwModeTimingsEvo will be the "real" half width + * values programmed in HW and rendered to through a headSurface transform + * (e.g. 1920x2160@60). If a mode requires HW YUV 4:2:0 CSC, the + * pixelClock and width values in both NvModeTimings and NVHwModeTimingsEvo + * will be full width, and the decimation to the half width scanout surface + * is performed in HW. In both cases, only the full width values should + * ever be reported to the client. + */ + enum NvYuv420Mode yuv420Mode; + /* *SyncPol is TRUE if negative */ + NvBool hSyncPol : 1; + NvBool vSyncPol : 1; + NvBool interlaced : 1; + NvBool doubleScan : 1; + /* + * hdmi3D reflects whether this mode is a HDMI 3D frame packed mode. True + * only if the user selected HDMI 3D stereo mode and the GPU supports it. + * If true, then pixelClock is doubled. + */ + NvBool hdmi3D : 1; + + struct { + /* The vrr type for which this mode is adjusted. */ + enum NvKmsDpyVRRType type; + } vrr; + + /* DisplayPort Display Stream Compression */ + struct { + NvBool enable; + + /* + * The DSC target bits per pixel (bpp) rate value multiplied by 16 that + * is being used by the DSC encoder. + * + * It maps respectively to {pps4[1:0], pps5[7:0]}. + */ + NvU32 bitsPerPixelX16; + + /* + * The DSC picture parameter set (PPS), which the DSC encoder must + * communicate to the decoder. + */ + NvU32 pps[DSC_MAX_PPS_SIZE_DWORD]; + } dpDsc; + + HDMI_FRL_CONFIG hdmiFrlConfig; + + NVHwModeViewPortEvo viewPort; + + NVT_VIDEO_INFOFRAME_CTRL infoFrameCtrl; + + struct { + enum NvKmsStereoMode mode; + NvBool isAegis; + } stereo; +} NVHwModeTimingsEvo; + +static inline NvU64 nvEvoFrametimeUsFromTimings(const NVHwModeTimingsEvo *pTimings) +{ + NvU64 pixelsPerFrame = pTimings->rasterSize.x * pTimings->rasterSize.y; + NvU64 pixelsPerSecond = KHzToHz(pTimings->pixelClock); + NvU64 framesPerSecond = pixelsPerSecond / pixelsPerFrame; + + return 1000000ULL / framesPerSecond; +} + +static inline NvU16 nvEvoVisibleWidth(const NVHwModeTimingsEvo *pTimings) +{ + return pTimings->rasterBlankStart.x - pTimings->rasterBlankEnd.x; +} + +static inline NvU16 nvEvoVisibleHeight(const NVHwModeTimingsEvo *pTimings) +{ + /* rasterVertBlank2{Start,End} should only be != 0 for interlaced modes. */ + nvAssert(pTimings->interlaced || + ((pTimings->rasterVertBlank2Start == 0) && + (pTimings->rasterVertBlank2End == 0))); + + return pTimings->rasterBlankStart.y - pTimings->rasterBlankEnd.y + + pTimings->rasterVertBlank2Start - pTimings->rasterVertBlank2End; +} + +/* + * Calculate BackendSizeHeight, based on this HD or SD quality is + * defined. + */ +static inline NvBool nvEvoIsHDQualityVideoTimings( + const NVHwModeTimingsEvo *pTimings) +{ + NvU32 height = nvEvoVisibleHeight(pTimings); + + // as per windows code, nvva uses < 720. + if (height <= 576) { + // SD quality: 240, 288, 480, 576 + return FALSE; + } + + // HD quality: 720, 1080 + return TRUE; +} + +static inline struct NvKmsRect nvEvoViewPortOutHwView( + const NVHwModeTimingsEvo *pTimings) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + const NvU16 hVisible = nvEvoVisibleWidth(pTimings); + const NvU16 vVisible = nvEvoVisibleHeight(pTimings); + struct NvKmsRect viewPortOut = { 0 }; + + viewPortOut.width = pViewPort->out.width; + viewPortOut.height = pViewPort->out.height; + viewPortOut.x = pViewPort->out.xAdjust + + (hVisible - pViewPort->out.width) / 2; + viewPortOut.y = (pViewPort->out.yAdjust + + (vVisible - pViewPort->out.height) / 2); + + return viewPortOut; +} + +static inline struct NvKmsRect nvEvoViewPortOutClientView( + const NVHwModeTimingsEvo *pTimings) +{ + struct NvKmsRect viewPortOut = nvEvoViewPortOutHwView(pTimings); + + if (pTimings->doubleScan) { + + nvAssert((viewPortOut.x % 2) == 0); + viewPortOut.x /= 2; + + nvAssert((viewPortOut.height % 2) == 0); + viewPortOut.height /= 2; + } + + return viewPortOut; +} + +/* OR indices are per OR-type. The maximum OR index for each type + * on each GPU is: + * + * Pre-GV10X : 8 SORs, 4 PIORs and 4 Dacs; + * GV10X : 8 SORs, 4 PIORs; + * TU10X+ : 8 SORs; + */ +#define NV_EVO_MAX_ORS 8 + +/* + * The scoping of heads, ORs, and dpys relative to connectors can be + * complicated. Here is how objects are scoped for various configurations: + * + * #heads #ORs #dpys #NVConnectorEvoRecs + * DP 1.1 1 1 1 1 + * DP-MST n 1 n 1 + * DP cloning: 1 1 n 1 + * 2-Heads-1-OR: 2 2 1 1 + */ +typedef struct _NVConnectorEvoRec { + char name[NVKMS_DPY_NAME_SIZE]; + + NVDispEvoPtr pDispEvo; + + NVListRec connectorListEntry; + + NvBool detectComplete; /* For sync'ing dpy detection w/ DP lib */ + NVDPLibConnectorPtr pDpLibConnector; // DP Lib + NvBool dpSerializerEnabled; + + struct { + NvU8 maxLinkBW; + NvU8 maxLaneCount; + NvBool supportsMST; + } dpSerializerCaps; + + NVDpyId displayId; // RM Display ID + NvKmsConnectorSignalFormat signalFormat; + NvKmsConnectorType type; + NvU32 typeIndex; + NvU32 legacyType; /* NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_ */ + NvU32 legacyTypeIndex; + NvU32 physicalIndex; + NvU32 physicalLocation; + NvU32 validHeadMask; + + NvU32 dfpInfo; /* DFP info query through NV0073_CTRL_CMD_DFP_GET_INFO */ + + NVDpyIdList ddcPartnerDpyIdsList; + + struct { + NvU32 type; + NvU32 protocol; // NV0073_CTRL_SPECIFIC_OR_PROTOCOL_* + NvU32 location; // NV0073_CTRL_SPECIFIC_OR_LOCATION_* + NvU32 ditherType; + NvU32 ditherAlgo; + /* Hardware heads attached to assigned OR */ + NvU32 ownerHeadMask[NV_EVO_MAX_ORS]; + /* ORs mask assigned to this connector */ + NvU32 mask; + } or; + + struct { + NvBool ycbcr422Capable; + NvBool ycbcr444Capable; + } colorSpaceCaps; +} NVConnectorEvoRec; + +static inline NvU32 nvConnectorGetAttachedHeadMaskEvo( + const NVConnectorEvoRec *pConnectorEvo) +{ + NvU32 headMask = 0x0; + NvU32 orIndex; + + FOR_EACH_INDEX_IN_MASK(32, orIndex, pConnectorEvo->or.mask) { + headMask |= pConnectorEvo->or.ownerHeadMask[orIndex]; + } FOR_EACH_INDEX_IN_MASK_END; + + return headMask; +} + +static inline +NvBool nvIsConnectorActiveEvo(const NVConnectorEvoRec *pConnectorEvo) +{ + NvU32 orIndex; + + FOR_EACH_INDEX_IN_MASK(32, orIndex, pConnectorEvo->or.mask) { + if (pConnectorEvo->or.ownerHeadMask[orIndex] != 0x0) { + return TRUE; + } + } FOR_EACH_INDEX_IN_MASK_END; + + return FALSE; +} + +/* + * In case of 2-Heads-1-OR: NV0073_CTRL_CMD_DFP_ASSIGN_SOR assigns 2 SORs, + * lowest SOR index is for primary head. + */ +static inline NvU32 nvEvoConnectorGetPrimaryOr( + const NVConnectorEvoRec *pConnectorEvo) +{ + return (pConnectorEvo->or.mask == 0x0 ? + NV_INVALID_OR : + BIT_IDX_32(LOWESTBIT(pConnectorEvo->or.mask))); +} + +typedef struct __NVAttributesSetEvoRec { + +#define NV_EVO_DVC_MIN (-1024) +#define NV_EVO_DVC_MAX 1023 +#define NV_EVO_DVC_DEFAULT 0 + + NvS32 dvc; + + /* + * For both colorSpace and colorRange, the value for + * NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_{SPACE,RANGE} sent by the client is + * stored in NVDpyEvoRec::requestedColor(Space, Range}. The structure stores + * the actual color space and color range in use. + * + * Since YUV444 mode only allows limited color range, changes to the + * current color space may trigger changes to the current color + * range (see nvSetColorSpaceAndRangeEvo()). + * + * For SW YUV420 mode, these values are ignored in + * HEAD_SET_PROCAMP and applied in the headSurface composite shader. + */ + enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace; + enum NvKmsDpyAttributeColorRangeValue colorRange; + + struct { + NvBool enabled; + enum NvKmsDpyAttributeCurrentDitheringDepthValue depth; + enum NvKmsDpyAttributeCurrentDitheringModeValue mode; + } dithering; + +#define NV_EVO_IMAGE_SHARPENING_MIN 0 +#define NV_EVO_IMAGE_SHARPENING_MAX 255 +#define NV_EVO_IMAGE_SHARPENING_DEFAULT 127 + + struct { + NvBool available; + NvU32 value; + } imageSharpening; + + enum NvKmsDpyAttributeDigitalSignalValue digitalSignal; +} NVAttributesSetEvoRec; + +#define NV_EVO_DEFAULT_ATTRIBUTES_SET \ + (NVAttributesSetEvoRec) { \ + .dvc = NV_EVO_DVC_DEFAULT, \ + .colorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB, \ + .colorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL, \ + .dithering = { \ + .enabled = FALSE, \ + .mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE, \ + .depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE, \ + }, \ + .imageSharpening = { \ + .value = NV_EVO_IMAGE_SHARPENING_DEFAULT, \ + }, \ + } + + +typedef struct _NVEldEvoRec { + NvU32 size; + NvU8 buffer[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER]; +} NVEldEvoRec; + +/* + * This structure stores information about the active per-head audio state. + */ +typedef struct _NVDispHeadAudioStateEvoRec { + NvU32 maxFreqSupported; + NVEldEvoRec eld; + + NvBool isAudioOverHdmi : 1; + NvBool supported : 1; + NvBool enabled : 1; +} NVDispHeadAudioStateEvoRec; + +/* + * This structure stores information about the active per-head display state. + */ +typedef struct _NVDispHeadStateEvoRec { + + NVAttributesSetEvoRec attributes; + + NVEvoScalerTaps hTaps; + NVEvoScalerTaps vTaps; + + /*! Cached, to preserve across modesets. */ + struct NvKmsModeValidationParams modeValidationParams; + + /* + * Heads on the same NVDevEvoRec with the same non-zero + * NVDispHeadStateEvoRec::allowFlipLockGroup value are eligible to + * be flipLocked, from an NVKMS client point of view, if the + * hardware requirements for flipLock are met. + */ + NvU8 allowFlipLockGroup; + + /* + * For Turing and newer, enable display composition bypass mode. + * + * This is intended to be used by console restore to avoid bug 2168873. + */ + NvBool bypassComposition : 1; + NvBool hs10bpcHint : 1; + + struct { + NVT_COLOR_FORMAT colorFormat; + NVT_COLORIMETRY colorimetry; + NVT_COLOR_RANGE colorRange; + NvU32 satCos; + } procAmp; + + /* + * The activeRmId is the identifier that we use to talk to RM + * about the display device(s) on this head. It is zero except + * when a mode is being driven by this head. For DP MST, it is the + * identifier of the displayport library group to which the driven + * DP device belongs. Otherwise, it is the identifier of the connector + * driven by the head. + */ + NvU32 activeRmId; + + NVHwModeTimingsEvo timings; + NVConnectorEvoRec *pConnectorEvo; /* NULL if the head is not active */ + NVDpyIdList activeDpys; /* empty if the head is not active */ + + /* + * Each head can have up to NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD + * programmable Core semaphores. + * + * The numVblankSyncObjectsCreated will ideally always be equal to + * NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD, but could be lower if errors + * occured during syncpt allocation in nvRMSetupEvoCoreChannel(). + */ + NvU8 numVblankSyncObjectsCreated; + NVVblankSyncObjectRec vblankSyncObjects[NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD]; + NVDispHeadAudioStateEvoRec audio; +} NVDispHeadStateEvoRec; + +typedef struct _NVDispEvoRec { + NvU8 gpuLogIndex; + NVDevEvoPtr pDevEvo; + NvU32 hotplugEventHandle; + NvU32 DPIRQEventHandle; + NVOS10_EVENT_KERNEL_CALLBACK_EX rmHotplugCallback; + NVOS10_EVENT_KERNEL_CALLBACK_EX rmDPIRQCallback; + + NVDispHeadStateEvoRec headState[NVKMS_MAX_HEADS_PER_DISP]; + + NVDpyIdList vbiosDpyConfig[NVKMS_MAX_HEADS_PER_DISP]; + + NvU32 isoBandwidthKBPS; + NvU32 dramFloorKBPS; + + /* + * The list of physical connector display IDs. This is the union + * of pConnectorEvo->displayId values, which is also the union of + * pDpyEvo->id values for non-MST pDpys. + */ + NVDpyIdList connectorIds; + + NVListRec connectorList; + + NvU32 displayOwner; + + NVListRec dpyList; + + NVDpyIdList bootDisplays; + NVDpyIdList validDisplays; + NVDpyIdList connectedDisplays; + + /* + * displayPortMSTIds is a superset of dynamicDpyIds because not all DP MST + * dpys are dynamic dpys. For example, the DP MST dpys that are driven by + * a DP serializer connector are part of a fixed topology, and are static in + * nature. + */ + NVDpyIdList displayPortMSTIds; /* DP MST dpys */ + NVDpyIdList dynamicDpyIds; + + NVDpyIdList muxDisplays; + + struct { + nvkms_timer_handle_t *unstallTimer; + } vrr; + + NVFrameLockEvoPtr pFrameLockEvo; + struct { + NVDpyId server; + NVDpyIdList clients; + NvBool syncEnabled; /* GPU is syncing to framelock */ + NvU32 connectorIndex;/* NV30F1_GSYNC_CONNECTOR_* */ + NvU32 currentServerHead; /* used for disabling */ + NvU32 currentClientHeadsMask; /* used for disabling */ + NvBool currentHouseSync; /* if state machine thinks house sync + is enabled -- used for disabling */ + + /* Framelock event-related data */ +#define NV_FRAMELOCK_SYNC_LOSS 0 +#define NV_FRAMELOCK_SYNC_GAIN 1 +#define NV_FRAMELOCK_NUM_EVENTS 2 + + struct { + NvU32 handle; + NVOS10_EVENT_KERNEL_CALLBACK_EX callback; + } gsyncEvent[NV_FRAMELOCK_NUM_EVENTS]; + + } framelock; + + NVHsChannelEvoPtr pHsChannel[NVKMS_MAX_HEADS_PER_DISP]; + + NVSwapGroupPtr pSwapGroup[NVKMS_MAX_HEADS_PER_DISP]; + + /*! + * ref_ptr to the structure. + * + * nvkms_timer_handle_t objects refer to the pDispEvo via references to + * this, so that timers that fire after the pDispEvo has been freed can + * detect that case and do nothing. + */ + struct nvkms_ref_ptr *ref_ptr; + + /* + * Indicates that NV_KMS_DISP_ATTRIBUTE_QUERY_DP_AUX_LOG has been queried at + * least once on this device. If set, nvRmDestroyDisplays() will flush any + * remaining AUX log messages to the system log. + */ + NvBool dpAuxLoggingEnabled; + + struct nvkms_backlight_device *backlightDevice; +} NVDispEvoRec; + +typedef enum { + NV_EVO_PASSIVE_DP_DONGLE_UNUSED, + NV_EVO_PASSIVE_DP_DONGLE_DP2DVI, + NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_1, + NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_2, +} NVEvoPassiveDpDongleType; + +typedef struct NVEdidRec { + NvU8 *buffer; + size_t length; +} NVEdidRec, *NVEdidPtr; + +typedef struct _NVParsedEdidEvoRec { + NvBool valid; + NVT_EDID_INFO info; + NVT_EDID_RANGE_LIMIT limits; + char monitorName[NVT_EDID_MONITOR_NAME_STRING_LENGTH]; + char serialNumberString[NVT_EDID_LDD_PAYLOAD_SIZE+1]; +} NVParsedEdidEvoRec; + +typedef struct _NVDpyEvoRec { + NVListRec dpyListEntry; + NVDpyId id; + + char name[NVKMS_DPY_NAME_SIZE]; + + /* The hardware head to use with this dpy. */ + NvU32 head; + + struct _NVDispEvoRec *pDispEvo; + NVConnectorEvoPtr pConnectorEvo; + + NvBool hasBacklightBrightness : 1; + NvBool internal : 1; + NvBool allowDVISpecPClkOverride : 1; + + /* whether the connected dpy is HDMI capable */ + NvBool hdmiCapable : 1; + NvBool isVrHmd : 1; + + /* + * Maximum single link and total allowed pixel clock. This is first + * reported by RM through DpyProbeMaxPixelClock, and then potentially + * overridden by the EVO SOR capabilities for HDMI and DVI through + * UpdateMaxPixelClock. + */ + NvU32 maxPixelClockKHz; + NvU32 maxSingleLinkPixelClockKHz; + + NVEdidRec edid; + NVParsedEdidEvoRec parsedEdid; + + struct { + enum NvKmsDpyAttributeRequestedDitheringValue state; + enum NvKmsDpyAttributeRequestedDitheringDepthValue depth; + enum NvKmsDpyAttributeRequestedDitheringModeValue mode; + } requestedDithering; + + enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace; + enum NvKmsDpyAttributeColorRangeValue requestedColorRange; + + NVAttributesSetEvoRec currentAttributes; + + struct { + char *addressString; + NVDPLibDevicePtr pDpLibDevice; // DP Lib's notion of the device. + NvBool inbandStereoSignaling; + + NvU8 laneCount; // NV0073_CTRL_DP_DATA_SET_LANE_COUNT + NvU8 linkRate; // NV0073_CTRL_DP_DATA_SET_LINK_BW + enum NvKmsDpyAttributeDisplayportConnectorTypeValue connectorType; + NvBool sinkIsAudioCapable; + + struct { + NvBool valid; + NvU8 buffer[NVKMS_GUID_SIZE]; + char str[NVKMS_GUID_STRING_SIZE]; + } guid; + } dp; + + struct { + HDMI_SRC_CAPS srcCaps; + HDMI_SINK_CAPS sinkCaps; + } hdmi; + + struct { + NvBool ycbcr422Capable; + NvBool ycbcr444Capable; + } colorSpaceCaps; + + struct { + NvBool supported : 1; + NvBool requiresModetimingPatching : 1; + NvBool isDLP : 1; + NvBool isAegis : 1; + NvBool requiresVbiAdjustment : 1; + NvU32 subType; + int indexInOverrideTimings; + } stereo3DVision; + + struct { + enum NvKmsDpyVRRType type; + } vrr; + +} NVDpyEvoRec; + +static inline NvBool nvDpyEvoIsDPMST(const NVDpyEvoRec *pDpyEvo) +{ + return nvDpyIdIsInDpyIdList(pDpyEvo->id, + pDpyEvo->pDispEvo->displayPortMSTIds); +} + +// Return a pDpy's connector's display ID +static inline NvU32 nvDpyEvoGetConnectorId(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 rmDpyId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + // This function shouldn't be used for DP MST dynamic devices. + nvAssert(!nvDpyEvoIsDPMST(pDpyEvo)); + nvAssert(ONEBITSET(rmDpyId)); + + return rmDpyId; +} + +static inline +NvBool nvConnectorIsInternal(const NVConnectorEvoRec *pConnectorEvo) +{ + /* For mobile GPUs check for LVDS or embedded DisplayPort signal flag. + * If found, DFP is internal*/ + return (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) && + (((pConnectorEvo->pDispEvo->pDevEvo->mobile) && + (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _SIGNAL, _LVDS, + pConnectorEvo->dfpInfo))) || + (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _SIGNAL, _DSI, + pConnectorEvo->dfpInfo)) || + (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _EMBEDDED_DISPLAYPORT, _TRUE, + pConnectorEvo->dfpInfo))); +} + +static inline NvU32 NV_EVO_LOCK_PIN(NvU32 n) +{ + return NV_EVO_LOCK_PIN_0 + n; +} + +static inline NvU32 NV_EVO_LOCK_PIN_INTERNAL(NvU32 n) +{ + return NV_EVO_LOCK_PIN_INTERNAL_0 + n; +} + +static inline NvBool NV_EVO_LOCK_PIN_IS_INTERNAL(NvU32 n) +{ + ct_assert(NV_IS_UNSIGNED(n) && NV_EVO_LOCK_PIN_INTERNAL_0 == 0); + return n < NV_EVO_LOCK_PIN_0; +} + + +/* + * Utility macro for looping over all the pConnectorsEvo on a pDispEvo. + */ +#define FOR_ALL_EVO_CONNECTORS(_pConnectorEvo, _pDispEvo) \ + nvListForEachEntry((_pConnectorEvo), \ + &(_pDispEvo)->connectorList, connectorListEntry) + +/* + * Utility macro for declaring a for loop to walk over all the + * pDispEvos on a particular pDevEvo. + */ +#define FOR_ALL_EVO_DISPLAYS(_pDispEvo, _i, _pDevEvo) \ + for ((_i) = 0, \ + (_pDispEvo) = (_pDevEvo)->pDispEvo[0]; \ + (_pDispEvo); \ + (_i)++, (_pDispEvo) = ((_i) < (_pDevEvo)->nDispEvo) ? \ + (_pDevEvo)->pDispEvo[(_i)] : NULL) + +#define FOR_ALL_EVO_DPYS(_pDpyEvo, _dpyIdList, _pDispEvo) \ + nvListForEachEntry((_pDpyEvo), &(_pDispEvo)->dpyList, dpyListEntry) \ + if (nvDpyIdIsInDpyIdList((_pDpyEvo)->id, (_dpyIdList))) + +#define FOR_ALL_EVO_FRAMELOCKS(_pFrameLockEvo) \ + nvListForEachEntry(_pFrameLockEvo, &nvEvoGlobal.frameLockList, \ + frameLockListEntry) + +#define FOR_ALL_EVO_DEVS(_pDevEvo) \ + nvListForEachEntry(_pDevEvo, &nvEvoGlobal.devList, devListEntry) + +#define FOR_ALL_DEFERRED_REQUEST_FIFOS_IN_SWAP_GROUP( \ + _pSwapGroup, _pDeferredRequestFifo) \ + nvListForEachEntry((_pDeferredRequestFifo), \ + &(_pSwapGroup)->deferredRequestFifoList, \ + swapGroup.deferredRequestFifoListEntry) + +#define FOR_EACH_SUBDEV_IN_MASK(_sd, _mask) \ + FOR_EACH_INDEX_IN_MASK(32, _sd, _mask) + +#define FOR_EACH_SUBDEV_IN_MASK_END \ + FOR_EACH_INDEX_IN_MASK_END + +static inline NVDpyEvoPtr nvGetOneArbitraryDpyEvo(NVDpyIdList dpyIdList, + const NVDispEvoRec *pDispEvo) +{ + NVDpyEvoPtr pDpyEvo; + + nvAssert(nvDpyIdListIsASubSetofDpyIdList(dpyIdList, + pDispEvo->validDisplays)); + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + return pDpyEvo; + } + + return NULL; +} + + +/* + * Return whether or not the display devices on the connector should + * be handled by the DP library. + */ +static inline NvBool nvConnectorUsesDPLib(const NVConnectorEvoRec + *pConnectorEvo) +{ + return (pConnectorEvo->pDpLibConnector != NULL); +} + +static inline +NvBool nvConnectorIsDPSerializer(const NVConnectorEvoRec *pConnectorEvo) +{ + return (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_DP_SERIALIZER); +} + +/* + * Return whether or not the display device given is handled by the DP + * library. + */ +static inline NvBool nvDpyUsesDPLib(const NVDpyEvoRec *pDpyEvo) +{ + return nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo); +} + +/* + * Return whether this dpy is active. The dpy is active if it is + * driven by a head. + */ +static inline NvBool nvDpyEvoIsActive(const NVDpyEvoRec *pDpyEvo) +{ + return (pDpyEvo->head != NV_INVALID_HEAD); +} + +/* + * Return true if this dpy reports an EDID supporting HDMI 3D and + * isn't connected via active DisplayPort. + */ +static inline NvBool nvDpyEvoSupportsHdmi3D(const NVDpyEvoRec *pDpyEvo) +{ + return (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.HDMI3DSupported && + !((pDpyEvo->pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A) || + (pDpyEvo->pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B))); + +} + +static inline NvBool nvHeadIsActive(const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + return (head < ARRAY_LEN(pDispEvo->headState)) && + (pDispEvo->headState[head].pConnectorEvo != NULL); +} + +/*! + * Return the mask of active heads on this pDispEvo. + */ +static inline NvU32 nvGetActiveHeadMask(NVDispEvoPtr pDispEvo) +{ + NvU32 head; + NvU32 headMask = 0; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + headMask |= 1 << head; + } + } + + return headMask; +} + +static inline NvBool nvAllHeadsInactive(const NVDevEvoRec *pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvU32 head; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + return FALSE; + } + } + } + + return TRUE; +} + +/* + * Return the list of dpys that are currently active on the given disp. + */ +static inline NVDpyIdList nvActiveDpysOnDispEvo(const NVDispEvoRec *pDispEvo) +{ + NVDpyIdList dpyIdList = nvEmptyDpyIdList(); + NvU32 head; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + dpyIdList = nvAddDpyIdListToDpyIdList(dpyIdList, + pHeadState->activeDpys); + } + + return dpyIdList; +} + +static inline NvU32 nvGpuIdOfDispEvo(const NVDispEvoRec *pDispEvo) +{ + nvAssert(pDispEvo->displayOwner < pDispEvo->pDevEvo->numSubDevices); + return pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner]->gpuId; +} + +static inline NvBool nvIsEmulationEvo(const NVDevEvoRec *pDevEvo) +{ + return pDevEvo->simulationType != + NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE; +} + +static inline NvBool nvIs3DVisionStereoEvo(const enum NvKmsStereoMode stereo) +{ + return (stereo == NVKMS_STEREO_NVIDIA_3D_VISION || + stereo == NVKMS_STEREO_NVIDIA_3D_VISION_PRO); +} + +/* + * Utility macro for iterating over all head bits set in a head bit mask + */ +#define FOR_ALL_HEADS(_head, _headMask) \ + for((_head) = 0; \ + (_headMask) >> (_head); \ + (_head)++) \ + if ((_headMask) & (1 << (_head))) + +typedef struct _NVLutSurfaceEvo { + NVDevEvoPtr pDevEvo; + + NvU32 handle; + NvU32 size; + + NvU32 dispCtxDma; + + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]; +} NVLutSurfaceEvoRec; + +typedef struct _NVFrameLockEvo { + NVListRec frameLockListEntry; + + /* array of subdev GPU IDs */ + NvU32 nGpuIds; + NvU32 gpuIds[NV30F1_CTRL_MAX_GPUS_PER_GSYNC]; + + NvU32 gsyncId; + NvU32 device; /* RM device handle for this object */ + + int fpgaIdAndRevision; /* FPGA revId (including firmware version + * and board ID) */ + + int firmwareMajorVersion; /* FPGA firmware major version */ + int firmwareMinorVersion; /* FPGA firmware minor version */ + NvU32 boardId; /* NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_* */ + NvU32 caps; /* Various capabilities flags */ + + NvU32 maxSyncSkew; /* Max sync skew increment */ + NvU32 syncSkewResolution; /* In nanoseconds */ + NvU32 maxSyncInterval; /* Max sync interval */ + + NvU32 houseSyncUseable; + + /* House sync mode requested by user */ + enum NvKmsFrameLockAttributeHouseSyncModeValue houseSyncMode; + NvU32 houseSyncModeValidValues; + + NvBool houseSyncAssy; /* Current desired state */ + NvBool houseSyncArmed; /* Current hardware state */ + + NvU8 connectedGpuMask; /* bitmask of GPUs that are connected */ + NvU8 syncReadyGpuMask; /* bitmask of GPUs that are syncReady */ + + NvBool syncReadyLast; /* Previous NV_CTRL_FRAMELOCK_SYNC_READY + * value changed either from nvctrl or + * the RM, used to avoid resending events + * since RM doesn't trigger a SYNC_READY + * event on framelock disable */ + + NvBool videoModeReadOnly; /* If video mode is read-only */ + + /* Current device state */ + enum NvKmsFrameLockAttributePolarityValue polarity; + NvU32 syncDelay; + NvU32 syncInterval; + enum NvKmsFrameLockAttributeVideoModeValue videoMode; + NvBool testMode; + +} NVFrameLockEvoRec; + +/*! + * The buffer that accumulates a string with information returned to + * the client. + */ +typedef struct _NVEvoInfoString { + NvU16 length; /*! strlen(s); excludes the nul terminator */ + NvU16 totalLength; /*! number of bytes in the buffer pointed to by 's' */ + char *s; /*! pointer to the buffer to be written to */ +} NVEvoInfoStringRec; + +enum NvHsMapPermissions { + NvHsMapPermissionsNone, + NvHsMapPermissionsReadOnly, + NvHsMapPermissionsReadWrite, +}; + +#define NV_HS_BAD_GPU_ADDRESS ((NvU64) -1) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatI8)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatA1R5G5B5) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX1R5G5B5) | \ + NVBIT64(NvKmsSurfaceMemoryFormatR5G6B5)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatA8R8G8B8) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX8R8G8B8) | \ + NVBIT64(NvKmsSurfaceMemoryFormatA2B10G10R10) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX2B10G10R10) | \ + NVBIT64(NvKmsSurfaceMemoryFormatA8B8G8R8) | \ + NVBIT64(NvKmsSurfaceMemoryFormatX8B8G8R8)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP ( \ + NVBIT64(NvKmsSurfaceMemoryFormatRF16GF16BF16AF16) | \ + NVBIT64(NvKmsSurfaceMemoryFormatR16G16B16A16)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8V8_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___V8U8_N420)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8V8_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___V8U8_N422)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8V8_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___V8U8_N444)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___U10V10_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___V10U10_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___U12V12_N420) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___V12U12_N420)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___U10V10_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___V10U10_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___U12V12_N422) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___V12U12_N422)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___U10V10_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY10___V10U10_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___U12V12_N444) | \ + NVBIT64(NvKmsSurfaceMemoryFormatY12___V12U12_N444)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8___V8_N444)) + +#define NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 ( \ + NVBIT64(NvKmsSurfaceMemoryFormatY8___U8___V8_N420)) + +struct _NVSurfaceEvoRec { + /* + * By default, all NVSurfaceEvoRecs will have requireCtxDma == TRUE, and + * have a ctxDma allocated and placed in the display engine hash table for + * each plane. + * + * But, if the client specified the noDisplayHardwareAccess flag, + * requireCtxDma will be FALSE, and ctxDma will be 0 for all planes. + * + * requireCtxDma is used to remember what the client requested, so that + * we correctly honor noDisplayHardwareAccess across + * FreeSurfaceCtxDmasForAllOpens() / + * AllocSurfaceCtxDmasForAllOpens() cycles. + */ + NvBool requireCtxDma; + + struct { + NvU32 rmHandle; + NvU32 ctxDma; + NvU32 pitch; + NvU64 offset; + NvU64 rmObjectSizeInBytes; + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + struct { + const struct NvKmsPerOpenDev *pOpenDev; + NvKmsSurfaceHandle surfaceHandle; + } owner; + + NvU32 widthInPixels; + NvU32 heightInPixels; + + NvU32 log2GobsPerBlockY; + + /* + * HeadSurface needs a CPU mapping of surfaces containing semaphores. + */ + void *cpuAddress[NVKMS_MAX_SUBDEVICES]; + + enum NvKmsSurfaceMemoryLayout layout; + enum NvKmsSurfaceMemoryFormat format; + + NvKmsMemoryIsoType isoType; + + /* + * A surface has two reference counts: + * + * - rmRefCnt indicates how many uses of the surface reference + * NVSurfaceEvoRec::planes[]::rmHandle (the surface owner who registered + * the surface, EVO currently displaying the surface, an open + * surface grant file descriptor). + * + * - structRefCnt indicates how many uses of the surface reference + * the NVSurfaceEvoRec. In addition to the rmRefCnt uses, this + * will also count NVKMS clients who acquired the surface + * through GRANT_SURFACE/ACQUIRE_SURFACE. + * + * When a client registers a surface, both reference counts will + * be initialized to 1. The RM surface for each plane will be unduped when + * rmRefCnt reaches zero. The NVSurfaceEvoRec structure will be + * freed when structRefCnt reaches zero. + * + * In most cases, one of the following will be true: + * (rmRefCnt == 0) && (structRefCnt == 0) + * (rmRefCnt != 0) && (structRefCnt != 0) + * The only exception is when the owner of the surface unregisters it while + * other clients still have references to it; in that case, the rmRefCnt + * can drop to zero while structRefCnt is still non-zero. + * + * If rmRefCnt reaches zero before structRefCnt, the surface is + * "orphaned": it still exists in ACQUIRE_SURFACE clients' handle + * namespaces and/or granted FDs, but is not usable in subsequent API + * requests (e.g., to flip, specify cursor image, etc). + * + * Described in a table: + * + * ACTION rmRefCnt structRefCnt + * a) NVKMS_IOCTL_REGISTER_SURFACE =1 =1 + * b) flip to surface +1 +1 + * c) NVKMS_IOCTL_GRANT_SURFACE(grantFd) n/a +1 + * d) NVKMS_IOCTL_ACQUIRE_SURFACE n/a +1 + * e) NVKMS_IOCTL_UNREGISTER_SURFACE -1 -1 + * f) flip away from surface -1 -1 + * g) close(grantFd) n/a -1 + * h) NVKMS_IOCTL_RELEASE_SURFACE n/a -1 + * i) ..._REGISTER_DEFERRED_REQUEST_FIFO +1 +1 + * j) ..._UNREGISTER_DEFERRED_REQUEST_FIFO -1 -1 + * + * (e) complements (a) + * (f) complements (b) + * (g) complements (c) + * (h) complements (d) + * (j) complements (i) + */ + NvU64 rmRefCnt; + NvU64 structRefCnt; + +#if NVKMS_PROCFS_ENABLE + NvBool procFsFlag; +#endif + +}; + +typedef struct _NVDeferredRequestFifoRec { + NVSurfaceEvoPtr pSurfaceEvo; + struct NvKmsDeferredRequestFifo *fifo; + + /* A deferred request fifo may be joined to a swapGroup. */ + struct { + NVSwapGroupPtr pSwapGroup; + NVListRec deferredRequestFifoListEntry; + NvBool ready; + NvBool perEyeStereo; + NvBool pendingJoined; + NvBool pendingReady; + NvU32 semaphoreIndex; + struct NvKmsPerOpen *pOpenUnicastEvent; + } swapGroup; +} NVDeferredRequestFifoRec; + +typedef struct _NVSwapGroupRec { + NVListRec deferredRequestFifoList; + NvBool zombie; + NvBool pendingFlip; + NvU32 nMembers; + NvU32 nMembersReady; + NvU32 nMembersPendingJoined; + + NvU16 nClips; + struct NvKmsRect *pClipList; + NvBool swapGroupIsFullscreen; + + NvU64 refCnt; +} NVSwapGroupRec; + +typedef struct { + NvU32 clientHandle; + + NVListRec devList; + NVListRec frameLockList; + +#if defined(DEBUG) + NVListRec debugMemoryAllocationList; +#endif + + struct NvKmsPerOpen *nvKmsPerOpen; + +} NVEvoGlobal; + +extern NVEvoGlobal nvEvoGlobal; + +/* + * These enums are used during IMP validation: + * - NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE means that no changes will be made to + * the current display bandwidth values. + * - NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE means that NVKMS will increase the + * current display bandwidth values if required by IMP. This is typically + * specified pre-modeset/flip. + * - NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST means that NVKMS may potentially + * decrease the current display bandwidth values to match the current display + * configuration. This is typically specified post-modeset/flip. + */ +typedef enum { + NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE = 0, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE = 1, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST = 2, +} NVEvoReallocateBandwidthMode; + +typedef struct { + struct { + /* pTimings == NULL => this head is disabled */ + const NVHwModeTimingsEvo *pTimings; + const struct NvKmsUsageBounds *pUsage; + NvU32 displayId; + NvU32 orIndex; + NvU8 orType; /* NV0073_CTRL_SPECIFIC_OR_TYPE_* */ + } head[NVKMS_MAX_HEADS_PER_DISP]; + + NvBool requireBootClocks; + NVEvoReallocateBandwidthMode reallocBandwidth; +} NVEvoIsModePossibleDispInput; + +typedef struct { + NvBool possible; + NvU32 minRequiredBandwidthKBPS; + NvU32 floorBandwidthKBPS; +} NVEvoIsModePossibleDispOutput; + +/* CRC-query specific defines */ +/*! + * Structure that defines information about where a single variable is stored in + * the CRC32NotifierEntry structure + */ +typedef struct _CRC32NotifierEntryRec { + NvU32 field_offset; /* Var's offset from start of CRC32Notifier Struct */ + NvU32 field_base_bit; /* LSB bit index for variable in entry */ + NvU32 field_extent_bit; /* MSB bit index for variable in entry */ + struct NvKmsDpyCRC32 *field_frame_values; /* Array to store read field values across frames */ +} CRC32NotifierEntryRec; + +/*! + * Internally identifies flag read from CRC32Notifier's Status for error-checking + */ +enum CRC32NotifierFlagType { + NVEvoCrc32NotifierFlagCount, + NVEvoCrc32NotifierFlagCrcOverflow, +}; + +/*! + * Structure that defines information about where a single flag is stored in + * the Status of the CRC32NotifierEntry structure + */ +typedef struct _CRC32NotifierEntryFlags { + NvU32 flag_base_bit; /* LSB bit index for flag in entry */ + NvU32 flag_extent_bit; /* MSB bit index for flag in entry */ + enum CRC32NotifierFlagType flag_type; /* Type of error-checking to perform on flag */ +} CRC32NotifierEntryFlags; + +/*! + * Internal Crc32NotifierRead structure used to collect multiple frames of CRC + * data from a QueryCRC32 call. Arrays should be allocated to match + * entry_count frames. + */ +typedef struct _CRC32NotifierCrcOut { + /*! + * Array of CRCs generated from the Compositor hardware + */ + struct NvKmsDpyCRC32 *compositorCrc32; + + /*! + * CRCs generated from the RG hardware, if head is driving RG/SF. + */ + struct NvKmsDpyCRC32 *rasterGeneratorCrc32; + + /*! + * Crc values generated from the target SF/OR depending on connector's OR type + */ + struct NvKmsDpyCRC32 *outputCrc32; + +} CRC32NotifierCrcOut; + + +typedef const struct _nv_evo_hal { + void (*SetRasterParams) (NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState); + void (*SetProcAmp) (NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState); + void (*SetHeadControl) (NVDevEvoPtr, int sd, int head, + NVEvoUpdateState *updateState); + void (*SetHeadRefClk) (NVDevEvoPtr, int head, NvBool external, + NVEvoUpdateState *updateState); + void (*HeadSetControlOR) (NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState); + void (*ORSetControl) (NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState); + void (*HeadSetDisplayId) (NVDevEvoPtr pDevEvo, + const NvU32 head, const NvU32 displayId, + NVEvoUpdateState *updateState); + NvBool (*SetUsageBounds) (NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState); + void (*Update) (NVDevEvoPtr, + const NVEvoUpdateState *updateState, + NvBool releaseElv); + void (*IsModePossible) (NVDispEvoPtr, + const NVEvoIsModePossibleDispInput *, + NVEvoIsModePossibleDispOutput *); + void (*PrePostIMP) (NVDispEvoPtr, NvBool isPre); + void (*SetNotifier) (NVDevEvoRec *pDevEvo, + const NvBool notify, + const NvBool awaken, + const NvU32 notifier, + NVEvoUpdateState *updateState); + NvBool (*GetCapabilities) (NVDevEvoPtr); + void (*Flip) (NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + void (*FlipTransitionWAR) (NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState); + void (*FillLUTSurface) (NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth); + void (*SetLUTContextDma) (const NVDispEvoRec *pDispEvo, + const int head, + NVLutSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NvBool enableOutputLut, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + void (*SetOutputScaler) (const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 imageSharpeningValue, + NVEvoUpdateState *updateState); + void (*SetViewportPointIn) (NVDevEvoPtr pDevEvo, const int head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState); + void (*SetViewportInOut) (NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortOutMin, + const NVHwModeViewPortEvo *pViewPortOut, + const NVHwModeViewPortEvo *pViewPortOutMax, + NVEvoUpdateState *updateState); + void (*SetCursorImage) (NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams); + NvBool (*ValidateCursorSurface)(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo); + NvBool (*ValidateWindowFormat)(const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut); + void (*InitCompNotifier) (const NVDispEvoRec *pDispEvo, int idx); + NvBool (*IsCompNotifierComplete) (NVDispEvoPtr pDispEvo, int idx); + void (*WaitForCompNotifier) (const NVDispEvoRec *pDispEvo, int idx); + void (*SetDither) (NVDispEvoPtr pDispEvo, const int head, + const NvBool enabled, const NvU32 type, + const NvU32 algo, + NVEvoUpdateState *updateState); + void (*SetStallLock) (NVDispEvoPtr pDispEvo, const int head, + NvBool enable, NVEvoUpdateState *updateState); + void (*SetDisplayRate) (NVDispEvoPtr pDispEvo, const int head, + NvBool enable, + NVEvoUpdateState *updateState, + NvU32 timeoutMicroseconds); + void (*InitChannel) (NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel); + void (*InitDefaultLut) (NVDevEvoPtr pDevEvo); + void (*InitWindowMapping) (const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState); + NvBool (*IsChannelIdle) (NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); + NvBool (*IsChannelMethodPending)(NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NvBool *result); + NvBool (*ForceIdleSatelliteChannel)(NVDevEvoPtr, + const NVEvoIdleChannelState *idleChannelState); + NvBool (*ForceIdleSatelliteChannelIgnoreLock)(NVDevEvoPtr, + const NVEvoIdleChannelState *idleChannelState); + + void (*AccelerateChannel)(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 *pOldAccelerators); + + void (*ResetChannelAccelerators)(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 oldAccelerators); + + NvBool (*AllocRmCtrlObject) (NVDevEvoPtr); + void (*FreeRmCtrlObject) (NVDevEvoPtr); + void (*SetImmPointOut) (NVDevEvoPtr, NVEvoChannelPtr, NvU32 sd, + NVEvoUpdateState *updateState, + NvU16 x, NvU16 y); + void (*StartCRC32Capture) (NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NVConnectorEvoPtr pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + NvU32 head, + NvU32 sd, + NVEvoUpdateState *updateState /* out */); + void (*StopCRC32Capture) (NVDevEvoPtr pDevEvo, + NvU32 head, + NVEvoUpdateState *updateState /* out */); + NvBool (*QueryCRC32) (NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU32 sd, + NvU32 entry_count, + CRC32NotifierCrcOut *crc32 /* out */, + NvU32 *numCRC32 /* out */); + void (*GetScanLine) (const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod); + void (*ConfigureVblankSyncObject) (NVDevEvoPtr pDevEvo, + NvU16 rasterLine, + NvU32 head, + NvU32 semaphoreIndex, + NvU32 hCtxDma, + NVEvoUpdateState* pUpdateState); + + void (*SetDscParams) (const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings); + + void (*EnableMidFrameAndDWCFWatermark)(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState); + + NvU32 (*GetActiveViewportOffset)(NVDispEvoRec *pDispEvo, NvU32 head); + + void (*ClearSurfaceUsage) (NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo); + + NvBool (*ComputeWindowScalingTaps)(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState); + + const NVEvoScalerCaps* (*GetWindowScalingCaps)(const NVDevEvoRec *pDevEvo); + + struct { + NvU32 supportsNonInterlockedUsageBoundsUpdate :1; + NvU32 supportsDisplayRate :1; + NvU32 supportsFlipLockRGStatus :1; + NvU32 needDefaultLutSurface :1; + NvU32 hasUnorm16OLUT :1; + NvU32 supportsDigitalVibrance :1; + NvU32 supportsImageSharpening :1; + NvU32 supportsHDMIVRR :1; + NvU32 supportsCoreChannelSurface :1; + NvU32 supportsHDMIFRL :1; + NvU32 supportsSetStorageMemoryLayout :1; + NvU32 supportsIndependentAcqRelSemaphore :1; + NvU32 supportsCoreLut :1; + NvU32 supportsSynchronizedOverlayPositionUpdate :1; + NvU32 supportsVblankSyncObjects :1; + NvU32 requiresScalingTapsInBothDimensions :1; + + NvU32 supportedDitheringModes; + size_t impStructSize; + NVEvoScalerTaps minScalerTaps; + } caps; +} NVEvoHAL, *NVEvoHALPtr; + +typedef const struct _nv_evo_cursor_hal { + NvU32 klass; + + void (*MoveCursor) (NVDevEvoPtr, NvU32 sd, NvU32 head, + NvS16 x, NvS16 y); + void (*ReleaseElv) (NVDevEvoPtr, NvU32 sd, NvU32 head); + + struct { + NvU16 maxSize; + } caps; +} NVEvoCursorHAL, *NVEvoCursorHALPtr; + +NvU32 nvEvoGetHeadSetStoragePitchValue(const NVDevEvoRec *pDevEvo, + enum NvKmsSurfaceMemoryLayout layout, + NvU32 pitch); + +NvBool nvEvoGetHeadSetControlCursorValue90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + NvU32 *pValue); + +static inline NvBool nvEvoScalingUsageBoundsEqual( + const struct NvKmsScalingUsageBounds *a, + const struct NvKmsScalingUsageBounds *b) +{ + return (a->maxVDownscaleFactor == b->maxVDownscaleFactor) && + (a->maxHDownscaleFactor == b->maxHDownscaleFactor) && + (a->vTaps == b->vTaps) && + (a->vUpscalingAllowed == b->vUpscalingAllowed); +} + +static inline NvBool +nvEvoLayerUsageBoundsEqual(const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b, + const NvU32 layer) +{ + return (a->layer[layer].usable == b->layer[layer].usable) && + (a->layer[layer].supportedSurfaceMemoryFormats == + b->layer[layer].supportedSurfaceMemoryFormats) && + nvEvoScalingUsageBoundsEqual(&a->layer[layer].scaling, + &b->layer[layer].scaling); +} + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_TYPES_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-utils.h b/src/nvidia-modeset/include/nvkms-utils.h new file mode 100644 index 000000000..e44394bb0 --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-utils.h @@ -0,0 +1,273 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_UTILS_H__ +#define __NVKMS_UTILS_H__ + +#include "nvkms-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvidia-modeset-os-interface.h" + +/*! + * Subtract B from A, and handle wrap around. + * + * This is useful for cases where A is a number that is incremented and wrapped; + * e.g., + * + * a = (a + 1) % max; + * + * and we want to subtract some amount from A to get one of its previous values. + */ +static inline NvU8 A_minus_b_with_wrap_U8(NvU8 a, NvU8 b, NvU8 max) +{ + return (a + max - b) % max; +} + +/*! + * Return whether (A + B) > C, avoiding integer overflow in the addition. + */ +static inline NvBool A_plus_B_greater_than_C_U16(NvU16 a, NvU16 b, NvU16 c) +{ + return (NV_U16_MAX - a < b) || ((a + b) > c); +} + +static inline NvS32 clamp_S32(NvS32 val, NvS32 lo, NvS32 hi) +{ + if (val < lo) { + return lo; + } else if (val > hi) { + return hi; + } else { + return val; + } +} + +/*! + * Return whether the bitmask contains bits greater than or equal to + * the maximum. + */ +static inline NvBool nvHasBitAboveMax(NvU32 bitmask, NvU8 max) +{ + nvAssert(max <= 32); + if (max == 32) { + return FALSE; + } + return (bitmask & ~((1 << max) - 1)) != 0; +} + +/*! + * Check if a timeout is exceeded. + * + * This is intended to be used when busy waiting in a loop, like this: + * + * NvU64 startTime = 0; + * + * do { + * if (SOME-CONDITION) { + * break; + * } + * + * if (nvExceedsTimeoutUSec(&startTime, TIMEOUT-IN-USEC)) { + * break; + * } + * + * nvkms_yield(); + * + * } while (TRUE); + * + * The caller should zero-initialize startTime, and nvExceedsTimeoutUSec() will + * set startTime to the starting time on the first call. This is structured + * this way to avoid the nvkms_get_usec() call in the common case where + * SOME-CONDITION is true on the first iteration (nvkms_get_usec() is not + * expected to be a large penalty, but it still seems nice to avoid it when not + * needed). + */ +static inline NvBool nvExceedsTimeoutUSec( + NvU64 *pStartTime, + NvU64 timeoutPeriod) +{ + const NvU64 currentTime = nvkms_get_usec(); + + if (*pStartTime == 0) { + *pStartTime = currentTime; + return FALSE; + } + + if (currentTime < *pStartTime) { /* wraparound?! */ + return TRUE; + } + + return (currentTime - *pStartTime) > timeoutPeriod; +} + +/*! + * Return a non-NULL string. + * + * The first argument, stringMightBeNull, could be NULL. In which + * case, return the second argument, safeString, which the caller + * should ensure is not NULL (e.g., by providing a literal). + * + * This is intended as a convenience for situations like this: + * + * char *s = FunctionThatMightReturnNull(); + * printf("%s\n", nvSafeString(s, "stringLiteral")); + */ +static inline const char *nvSafeString(char *stringMightBeNull, + const char *safeString) +{ + return (stringMightBeNull != NULL) ? stringMightBeNull : safeString; +} + +static inline NvU64 nvCtxDmaOffsetFromBytes(NvU64 ctxDmaOffset) +{ + nvAssert((ctxDmaOffset & ((1 << NV_SURFACE_OFFSET_ALIGNMENT_SHIFT) - 1)) + == 0); + + return (ctxDmaOffset >> 8); +} + +NvU8 nvPixelDepthToBitsPerComponent(enum nvKmsPixelDepth pixelDepth); + +typedef enum { + EVO_LOG_WARN, + EVO_LOG_ERROR, + EVO_LOG_INFO, +} NVEvoLogType; + +void *nvInternalAlloc(size_t size, NvBool zero); +void *nvInternalRealloc(void *ptr, size_t size); +void nvInternalFree(void *ptr); +char *nvInternalStrDup(const char *str); +NvBool nvGetRegkeyValue(const NVDevEvoRec *pDevEvo, + const char *key, NvU32 *val); + +#if defined(DEBUG) + +void nvReportUnfreedAllocations(void); + +void *nvDebugAlloc(size_t size, int line, const char *file); +void *nvDebugCalloc(size_t nmemb, size_t size, int line, const char *file); +void *nvDebugRealloc(void *ptr, size_t size, int line, const char *file); +void nvDebugFree(void *ptr); +char *nvDebugStrDup(const char *str, int line, const char *file); + +#define nvAlloc(s) nvDebugAlloc((s), __LINE__, __FILE__) +#define nvCalloc(n,s) nvDebugCalloc((n), (s), __LINE__, __FILE__) +#define nvFree(p) nvDebugFree(p) +#define nvRealloc(p,s) nvDebugRealloc((p), (s), __LINE__, __FILE__) +#define nvStrDup(s) nvDebugStrDup((s), __LINE__, __FILE__) + +#else + +#define nvAlloc(s) nvInternalAlloc((s), FALSE) +#define nvCalloc(n,s) nvInternalAlloc((n)*(s), TRUE) +#define nvRealloc(p,s) nvInternalRealloc((p),(s)) +#define nvFree(s) nvInternalFree(s) +#define nvStrDup(s) nvInternalStrDup(s) + +#endif + +void nvVEvoLog(NVEvoLogType logType, NvU8 gpuLogIndex, + const char *fmt, va_list ap); + +void nvEvoLogDev(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +void nvEvoLogDisp(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +void nvEvoLog(NVEvoLogType logType, const char *fmt, ...) + __attribute__((format (printf, 2, 3))); + + + +#if defined(DEBUG) + +void nvEvoLogDebug(NVEvoLogType logType, const char *fmt, ...) + __attribute__((format (printf, 2, 3))); + +void nvEvoLogDevDebug(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +void nvEvoLogDispDebug(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) + __attribute__((format (printf, 3, 4))); + +#else + +# define nvEvoLogDebug(...) +# define nvEvoLogDevDebug(pDevEvo, ...) +# define nvEvoLogDispDebug(pDispEvo, ...) + +#endif /* DEBUG */ + +void nvInitInfoString(NVEvoInfoStringPtr pInfoString, + char *s, NvU16 totalLength); + +void nvEvoLogInfoStringRaw(NVEvoInfoStringPtr pInfoString, + const char *format, ...) + __attribute__((format (printf, 2, 3))); +void nvEvoLogInfoString(NVEvoInfoStringPtr pInfoString, + const char *format, ...) + __attribute__((format (printf, 2, 3))); + + +typedef NvU32 NvKmsGenericHandle; + +NvBool nvEvoApiHandlePointerIsPresent(NVEvoApiHandlesPtr pEvoApiHandles, + void *pointer); +NvKmsGenericHandle nvEvoCreateApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, + void *pointer); +void *nvEvoGetPointerFromApiHandle(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle handle); +void *nvEvoGetPointerFromApiHandleNext(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle *pHandle); +void nvEvoDestroyApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, + NvKmsGenericHandle handle); +NvBool nvEvoInitApiHandles(NVEvoApiHandlesPtr pEvoApiHandles, + NvU32 defaultSize); +void nvEvoDestroyApiHandles(NVEvoApiHandlesPtr pEvoApiHandles); + +#define FOR_ALL_POINTERS_IN_EVO_API_HANDLES(_pEvoApiHandles, \ + _pointer, _handle) \ + for ((_handle) = 0, \ + (_pointer) = nvEvoGetPointerFromApiHandleNext(_pEvoApiHandles, \ + &(_handle)); \ + (_pointer) != NULL; \ + (_pointer) = nvEvoGetPointerFromApiHandleNext(_pEvoApiHandles, \ + &(_handle))) + + + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_UTILS_H__ */ diff --git a/src/nvidia-modeset/include/nvkms-vrr.h b/src/nvidia-modeset/include/nvkms-vrr.h new file mode 100644 index 000000000..91c375a8c --- /dev/null +++ b/src/nvidia-modeset/include/nvkms-vrr.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_VRR_H__ +#define __NVKMS_VRR_H__ + +#include "nvkms-types.h" +#include "nvkms-modeset-types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void nvAllocVrrEvo(NVDevEvoPtr pDevEvo); +void nvFreeVrrEvo(NVDevEvoPtr pDevEvo); +void nvDisableVrr(NVDevEvoPtr pDevEvo); +void nvEnableVrr(NVDevEvoPtr pDevEvo, + const struct NvKmsSetModeRequest *pRequest); +void nvCancelVrrFrameReleaseTimers(NVDevEvoPtr pDevEvo); +void nvSetVrrActive(NVDevEvoPtr pDevEvo, NvBool active); +void nvApplyVrrBaseFlipOverrides(const NVDispEvoRec *pDispEvo, NvU32 head, + const NVFlipChannelEvoHwState *pOld, + NVFlipChannelEvoHwState *pNew); +void nvSetNextVrrFlipTypeAndIndex(NVDevEvoPtr pDevEvo, + struct NvKmsFlipReply *reply); +void nvTriggerVrrUnstallMoveCursor(NVDispEvoPtr pDispEvo); +void nvTriggerVrrUnstallSetCursorImage(NVDispEvoPtr pDispEvo, + NvBool ctxDmaChanged); +void nvGetDpyMinRefreshRateValidValues( + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyVRRType vrrType, + const NvU32 edidTimeoutMicroseconds, + NvU32 *minMinRefreshRate, + NvU32 *maxMinRefreshRate); + +NvBool nvDispSupportsVrr(const NVDispEvoRec *pDispEvo); + +NvBool nvExportVrrSemaphoreSurface(const NVDevEvoRec *pDevEvo, int fd); + +#ifdef __cplusplus +}; +#endif + +#endif /* __NVKMS_VRR_H__ */ diff --git a/src/nvidia-modeset/interface/nvkms-api-types.h b/src/nvidia-modeset/interface/nvkms-api-types.h new file mode 100644 index 000000000..0f59c8304 --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-api-types.h @@ -0,0 +1,533 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_API_TYPES_H) +#define NVKMS_API_TYPES_H + +#include +#include +#include + +#define NVKMS_MAX_SUBDEVICES NV_MAX_SUBDEVICES + +#define NVKMS_LEFT 0 +#define NVKMS_RIGHT 1 +#define NVKMS_MAX_EYES 2 + +#define NVKMS_MAIN_LAYER 0 +#define NVKMS_OVERLAY_LAYER 1 +#define NVKMS_MAX_LAYERS_PER_HEAD 8 + +#define NVKMS_MAX_PLANES_PER_SURFACE 3 + +#define NVKMS_DP_ADDRESS_STRING_LENGTH 64 + +#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff + +typedef NvU32 NvKmsDeviceHandle; +typedef NvU32 NvKmsDispHandle; +typedef NvU32 NvKmsConnectorHandle; +typedef NvU32 NvKmsSurfaceHandle; +typedef NvU32 NvKmsFrameLockHandle; +typedef NvU32 NvKmsDeferredRequestFifoHandle; +typedef NvU32 NvKmsSwapGroupHandle; +typedef NvU32 NvKmsVblankSyncObjectHandle; + +struct NvKmsSize { + NvU16 width; + NvU16 height; +}; + +struct NvKmsPoint { + NvU16 x; + NvU16 y; +}; + +struct NvKmsSignedPoint { + NvS16 x; + NvS16 y; +}; + +struct NvKmsRect { + NvU16 x; + NvU16 y; + NvU16 width; + NvU16 height; +}; + +/* + * A 3x3 row-major matrix. + * + * The elements are 32-bit single-precision IEEE floating point values. The + * floating point bit pattern should be stored in NvU32s to be passed into the + * kernel. + */ +struct NvKmsMatrix { + NvU32 m[3][3]; +}; + +typedef enum { + NVKMS_CONNECTOR_TYPE_DP = 0, + NVKMS_CONNECTOR_TYPE_VGA = 1, + NVKMS_CONNECTOR_TYPE_DVI_I = 2, + NVKMS_CONNECTOR_TYPE_DVI_D = 3, + NVKMS_CONNECTOR_TYPE_ADC = 4, + NVKMS_CONNECTOR_TYPE_LVDS = 5, + NVKMS_CONNECTOR_TYPE_HDMI = 6, + NVKMS_CONNECTOR_TYPE_USBC = 7, + NVKMS_CONNECTOR_TYPE_DSI = 8, + NVKMS_CONNECTOR_TYPE_DP_SERIALIZER = 9, + NVKMS_CONNECTOR_TYPE_UNKNOWN = 10, + NVKMS_CONNECTOR_TYPE_MAX = NVKMS_CONNECTOR_TYPE_UNKNOWN, +} NvKmsConnectorType; + +static inline +const char *NvKmsConnectorTypeString(const NvKmsConnectorType connectorType) +{ + switch (connectorType) { + case NVKMS_CONNECTOR_TYPE_DP: return "DP"; + case NVKMS_CONNECTOR_TYPE_VGA: return "VGA"; + case NVKMS_CONNECTOR_TYPE_DVI_I: return "DVI-I"; + case NVKMS_CONNECTOR_TYPE_DVI_D: return "DVI-D"; + case NVKMS_CONNECTOR_TYPE_ADC: return "ADC"; + case NVKMS_CONNECTOR_TYPE_LVDS: return "LVDS"; + case NVKMS_CONNECTOR_TYPE_HDMI: return "HDMI"; + case NVKMS_CONNECTOR_TYPE_USBC: return "USB-C"; + case NVKMS_CONNECTOR_TYPE_DSI: return "DSI"; + case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: return "DP-SERIALIZER"; + default: break; + } + return "Unknown"; +} + +typedef enum { + NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA = 0, + NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS = 1, + NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS = 2, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DP = 3, + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI = 4, + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN = 5, + NVKMS_CONNECTOR_SIGNAL_FORMAT_MAX = + NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN, +} NvKmsConnectorSignalFormat; + +/*! + * Description of Notifiers and Semaphores (Non-isochronous (NISO) surfaces). + * + * When flipping, the client can optionally specify a notifier and/or + * a semaphore to use with the flip. The surfaces used for these + * should be registered with NVKMS to get an NvKmsSurfaceHandle. + * + * NvKmsNIsoSurface::offsetInWords indicates the starting location, in + * 32-bit words, within the surface where EVO should write the + * notifier or semaphore. Note that only the first 4096 bytes of a + * surface can be used by semaphores or notifiers; offsetInWords must + * allow for the semaphore or notifier to be written within the first + * 4096 bytes of the surface. I.e., this must be satisfied: + * + * ((offsetInWords * 4) + elementSizeInBytes) <= 4096 + * + * Where elementSizeInBytes is: + * + * if NISO_FORMAT_FOUR_WORD*, elementSizeInBytes = 16 + * if NISO_FORMAT_LEGACY, + * if overlay && notifier, elementSizeInBytes = 16 + * else, elementSizeInBytes = 4 + * + * Note that different GPUs support different semaphore and notifier formats. + * Check NvKmsAllocDeviceReply::validNIsoFormatMask to determine which are + * valid for the given device. + * + * Note also that FOUR_WORD and FOUR_WORD_NVDISPLAY are the same size, but + * FOUR_WORD uses a format compatible with display class 907[ce], and + * FOUR_WORD_NVDISPLAY uses a format compatible with c37e (actually defined by + * the NV_DISP_NOTIFIER definition in clc37d.h). + */ +enum NvKmsNIsoFormat { + NVKMS_NISO_FORMAT_LEGACY, + NVKMS_NISO_FORMAT_FOUR_WORD, + NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY, +}; + +enum NvKmsEventType { + NVKMS_EVENT_TYPE_DPY_CHANGED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED, + NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED, + NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED, + NVKMS_EVENT_TYPE_FLIP_OCCURRED, +}; + +typedef enum { + NV_EVO_SCALER_1TAP = 0, + NV_EVO_SCALER_2TAPS = 1, + NV_EVO_SCALER_3TAPS = 2, + NV_EVO_SCALER_5TAPS = 3, + NV_EVO_SCALER_8TAPS = 4, + NV_EVO_SCALER_TAPS_MIN = NV_EVO_SCALER_1TAP, + NV_EVO_SCALER_TAPS_MAX = NV_EVO_SCALER_8TAPS, +} NVEvoScalerTaps; + +/* This structure describes the scaling bounds for a given layer. */ +struct NvKmsScalingUsageBounds { + /* + * Maximum vertical downscale factor (scaled by 1024) + * + * For example, if the downscale factor is 1.5, then maxVDownscaleFactor + * would be 1.5 x 1024 = 1536. + */ + NvU16 maxVDownscaleFactor; + + /* + * Maximum horizontal downscale factor (scaled by 1024) + * + * See the example above for maxVDownscaleFactor. + */ + NvU16 maxHDownscaleFactor; + + /* Maximum vertical taps allowed */ + NVEvoScalerTaps vTaps; + + /* Whether vertical upscaling is allowed */ + NvBool vUpscalingAllowed; +}; + +struct NvKmsUsageBounds { + struct { + NvBool usable; + struct NvKmsScalingUsageBounds scaling; + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +/* + * A 3x4 row-major colorspace conversion matrix. + * + * The output color C' is the CSC matrix M times the column vector + * [ R, G, B, 1 ]. + * + * Each entry in the matrix is a signed 2's-complement fixed-point number with + * 3 integer bits and 16 fractional bits. + */ +struct NvKmsCscMatrix { + NvS32 m[3][4]; +}; + +#define NVKMS_IDENTITY_CSC_MATRIX \ + (struct NvKmsCscMatrix){{ \ + { 0x10000, 0, 0, 0 }, \ + { 0, 0x10000, 0, 0 }, \ + { 0, 0, 0x10000, 0 } \ + }} + +/*! + * A color key match bit used in the blend equations and one can select the src + * or dst Color Key when blending. Assert key bit means match, de-assert key + * bit means nomatch. + * + * The src Color Key means using the key bit from the current layer, the dst + * Color Key means using key bit from the previous layer composition stage. The + * src or dst key bit will be inherited by blended pixel for the preparation of + * next blending, as dst Color Key. + * + * src: Forward the color key match bit from the current layer pixel to next layer + * composition stage. + * + * dst: Forward the color key match bit from the previous composition stage + * pixel to next layer composition stage. + * + * disable: Forward “1” to the next layer composition stage as the color key. + */ +enum NvKmsCompositionColorKeySelect { + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE = 0, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC, + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST, +}; + +#define NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS 3 + +/*! + * Composition modes used for surfaces in general. + * The various types of composition are: + * + * Opaque: source pixels are opaque regardless of alpha, + * and will occlude the destination pixel. + * + * Alpha blending: aka opacity, which could be specified + * for a surface in its entirety, or on a per-pixel basis. + * + * Non-premultiplied: alpha value applies to source pixel, + * and also counter-weighs the destination pixel. + * Premultiplied: alpha already applied to source pixel, + * so it only counter-weighs the destination pixel. + * + * Color keying: use a color key structure to decide + * the criteria for matching and compositing. + * (See NVColorKey below.) + */ +enum NvKmsCompositionBlendingMode { + /*! + * Modes that use no other parameters. + */ + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE, + + /*! + * Mode that ignores both per-pixel alpha provided + * by client and the surfaceAlpha, makes source pixel + * totally transparent. + */ + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT, + + /*! + * Modes that use per-pixel alpha provided by client, + * and the surfaceAlpha must be set to 0. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA, + + /*! + * These use both the surface-wide and per-pixel alpha values. + * surfaceAlpha is treated as numerator ranging from 0 to 255 + * of a fraction whose denominator is 255. + */ + NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA, + NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA, +}; + +static inline NvBool +NvKmsIsCompositionModeUseAlpha(enum NvKmsCompositionBlendingMode mode) +{ + return mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA || + mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA; +} + +/*! + * Abstract description of a color key. + * + * a, r, g, and b are component values in the same width as the framebuffer + * values being scanned out. + * + * match[ARGB] defines whether that component is considered when matching the + * color key -- TRUE means that the value of the corresponding component must + * match the given value for the given pixel to be considered a 'key match'; + * FALSE means that the value of that component is not a key match criterion. + */ +typedef struct { + NvU16 a, r, g, b; + NvBool matchA, matchR, matchG, matchB; +} NVColorKey; + +/*! + * Describes the composition parameters for the single layer. + */ +struct NvKmsCompositionParams { + enum NvKmsCompositionColorKeySelect colorKeySelect; + NVColorKey colorKey; + /* + * It is possible to assign different blending mode for match pixels and + * nomatch pixels. blendingMode[0] is used to blend a pixel with the color key + * match bit "0", and blendingMode[1] is used to blend a pixel with the color + * key match bit "1". + * + * But because of the hardware restrictions match and nomatch pixels can + * not use blending mode PREMULT_ALPHA, NON_PREMULT_ALPHA, + * PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA at once. + */ + enum NvKmsCompositionBlendingMode blendingMode[2]; + NvU8 surfaceAlpha; /* Applies to all pixels of entire surface */ + /* + * Defines the composition order. A smaller value moves the layer closer to + * the top (away from the background). No need to pick consecutive values, + * requirements are that the value should be different for each of the + * layers owned by the head and the value for the main layer should be + * the greatest one. + * + * Cursor always remains at the top of all other layers, this parameter + * has no effect on cursor. NVKMS assigns default depth to each of the + * supported layers, by default depth of the layer is calculated as + * (NVKMS_MAX_LAYERS_PER_HEAD - index of the layer). If depth is set to + * '0' then default depth value will get used. + */ + NvU8 depth; +}; + +/*! + * Describes the composition capabilities supported by the hardware for + * cursor or layer. It describes supported the color key selects and for each + * of the supported color key selects it describes supported blending modes + * for match and nomatch pixles. + */ +struct NvKmsCompositionCapabilities { + + struct { + /* + * A bitmask of the supported blending modes for match and nomatch + * pixels. It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_*) values. + */ + NvU32 supportedBlendModes[2]; + } colorKeySelect[NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS]; + + /* + * A bitmask of the supported color key selects. + * + * It should be the bitwise 'or' of one or more + * NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_*) + * values. + */ + NvU32 supportedColorKeySelects; +}; + +struct NvKmsLayerCapabilities { + /*! + * Whether Layer supports the window mode. If window mode is supported, + * then clients can set the layer's dimensions so that they're smaller than + * the viewport, and can also change the output position of the layer to a + * non-(0, 0) position. + * + * NOTE: Dimension changes are currently unsupported for the main layer, + * and output position changes for the main layer are currently only + * supported via IOCTL_SET_LAYER_POSITION but not via flips. Support for + * these is coming soon, via changes to flip code. + */ + NvBool supportsWindowMode :1; + + /*! + * Whether layer supports HDR pipe. + */ + NvBool supportsHDR :1; + + + /*! + * Describes the supported Color Key selects and blending modes for + * match and nomatch layer pixels. + */ + struct NvKmsCompositionCapabilities composition; + + /*! + * Which NvKmsSurfaceMemoryFormat enum values are supported by the NVKMS + * device on the given scanout surface layer. + * + * Iff a particular enum NvKmsSurfaceMemoryFormat 'value' is supported, + * then (1 << value) will be set in the appropriate bitmask. + * + * Note that these bitmasks just report the static SW/HW capabilities, + * and are a superset of the formats that IMP may allow. Clients are + * still expected to honor the NvKmsUsageBounds for each head. + */ + NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8); +}; + +/*! + * Surface layouts. + * + * BlockLinear is the NVIDIA GPU native tiling format, arranging pixels into + * blocks or tiles for better locality during common GPU operations. + * + * Pitch is the naive "linear" surface layout with pixels laid out sequentially + * in memory line-by-line, optionally with some padding at the end of each line + * for alignment purposes. + */ +enum NvKmsSurfaceMemoryLayout { + NvKmsSurfaceMemoryLayoutBlockLinear = 0, + NvKmsSurfaceMemoryLayoutPitch = 1, +}; + +static inline const char *NvKmsSurfaceMemoryLayoutToString( + enum NvKmsSurfaceMemoryLayout layout) +{ + switch (layout) { + default: + return "Unknown"; + case NvKmsSurfaceMemoryLayoutBlockLinear: + return "BlockLinear"; + case NvKmsSurfaceMemoryLayoutPitch: + return "Pitch"; + } +} + +typedef enum { + MUX_STATE_GET = 0, + MUX_STATE_INTEGRATED = 1, + MUX_STATE_DISCRETE = 2, + MUX_STATE_UNKNOWN = 3, +} NvMuxState; + +enum NvKmsRotation { + NVKMS_ROTATION_0 = 0, + NVKMS_ROTATION_90 = 1, + NVKMS_ROTATION_180 = 2, + NVKMS_ROTATION_270 = 3, + NVKMS_ROTATION_MIN = NVKMS_ROTATION_0, + NVKMS_ROTATION_MAX = NVKMS_ROTATION_270, +}; + +struct NvKmsRRParams { + enum NvKmsRotation rotation; + NvBool reflectionX; + NvBool reflectionY; +}; + +/*! + * Convert each possible NvKmsRRParams to a unique integer [0..15], + * so that we can describe possible NvKmsRRParams with an NvU16 bitmask. + * + * E.g. + * rotation = 0, reflectionX = F, reflectionY = F == 0|0|0 == 0 + * ... + * rotation = 270, reflectionX = T, reflectionY = T == 3|4|8 == 15 + */ +static inline NvU8 NvKmsRRParamsToCapBit(const struct NvKmsRRParams *rrParams) +{ + NvU8 bitPosition = (NvU8)rrParams->rotation; + if (rrParams->reflectionX) { + bitPosition |= NVBIT(2); + } + if (rrParams->reflectionY) { + bitPosition |= NVBIT(3); + } + return bitPosition; +} + +/* + * NVKMS_MEMORY_ISO is used to tag surface memory that will be accessed via + * display's isochronous interface. Examples of this type of memory are pixel + * data and LUT entries. + * + * NVKMS_MEMORY_NISO is used to tag surface memory that will be accessed via + * display's non-isochronous interface. Examples of this type of memory are + * semaphores and notifiers. + */ +typedef enum { + NVKMS_MEMORY_ISO = 0, + NVKMS_MEMORY_NISO = 1, +} NvKmsMemoryIsoType; + +typedef struct { + NvBool coherent; + NvBool noncoherent; +} NvKmsDispIOCoherencyModes; + +#endif /* NVKMS_API_TYPES_H */ diff --git a/src/nvidia-modeset/interface/nvkms-api.h b/src/nvidia-modeset/interface/nvkms-api.h new file mode 100644 index 000000000..dcae93ee8 --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-api.h @@ -0,0 +1,4063 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_API_H) +#define NVKMS_API_H + +/* + * NVKMS API + * + * + * All file operations described in this header file go through a + * single device file that has system-wide scope. The individual + * ioctl request data structures specify the objects to which the + * request is targeted. + * + * + * OBJECTS + * + * The NVKMS API is organized into several objects: + * + * - A device, which corresponds to an RM device. This can either be + * a single GPU, or multiple GPUs linked into SLI. Each GPU is + * called a "subdevice". The subdevices used by an NVKMS device are + * reported in NvKmsAllocDeviceReply::subDeviceMask. + * + * A device is specified by a deviceHandle, returned by + * NVKMS_IOCTL_ALLOC_DEVICE. + * + * - A disp, which represents an individually programmable display + * engine of a GPU. In SLI Mosaic, there is one disp per physical + * GPU. In all other configurations there is one disp for the + * entire device. A disp is specified by a (deviceHandle, + * dispHandle) duple. A dispHandle is only unique within a single + * device: multiple devices may have disps with the same dispHandle + * value. + * + * A disp contains one or more subdevices, as reported by + * NvKmsQueryDispReply::subDeviceMask. A disp will only have + * multiple subdevices in cases where the device only has a single + * disp. Any subdevice specified in + * NvKmsQueryDispReply::subDeviceMask will also be in + * NvKmsAllocDeviceReply::subDeviceMask. + * + * - A connector, which represents an electrical connection to the + * GPU. E.g., a physical DVI-I connector has two NVKMS connector + * objects (a VGA NVKMS connector and a TMDS NVKMS connector). + * However, a physical DisplayPort connector has one NVKMS connector + * object, even if there is a tree of DisplayPort1.2 Multistream + * monitors connected to it. + * + * Connectors are associated with a specific disp. A connector is + * specified by a (deviceHandle, dispHandle, connectorHandle) + * triplet. A connectorHandle is only unique within a single disp: + * multiple disps may have connectors with the same connectorHandle + * value. + * + * - A dpy, which represents a connection of a display device to the + * system. Multiple dpys can map to the same connector in the case + * of DisplayPort1.2 MultiStream. A dpy is specified by a + * (deviceHandle, dispHandle, dpyId) triplet. A dpyId is only + * unique within a single disp: multiple disps may have dpys with + * the same dpyId value. + * + * - A surface, which represents memory to be scanned out. Surfaces + * should be allocated by resman, and then registered and + * unregistered with NVKMS. The NvKmsSurfaceHandle value of 0 is + * reserved to mean no surface. + * + * NVKMS clients should treat the device, disp, connector, and surface + * handles as opaque values. They are specific to the file descriptor + * through which a client allocated and queried them. Dpys should + * also be treated as opaque, though they can be passed between + * clients. + * + * NVKMS clients initialize NVKMS by allocating an NVKMS device. The + * device can either be a single GPU, or an SLI group. It is expected + * that the client has already attached/linked the GPUs through + * resman and created a resman device. + * + * NVKMS device allocation returns a device handle, the disp handles, + * and capabilities of the device. + * + * + * MODE VALIDATION + * + * When a client requests to set a mode via NVKMS_IOCTL_SET_MODE, + * NVKMS will validate the mode at that point in time, honoring the + * NvKmsModeValidationParams specified as part of the request. + * + * Clients can use NVKMS_IOCTL_VALIDATE_MODE to test if a mode is valid. + * + * Clients can use NVKMS_IOCTL_VALIDATE_MODE_INDEX to get the list of + * modes that NVKMS currently considers valid for the dpy (modes from + * the EDID, etc). + * + * IMPLEMENTATION NOTE: the same mode validation common code will be + * used in each of NVKMS_IOCTL_SET_MODE, NVKMS_IOCTL_VALIDATE_MODE, + * and NVKMS_IOCTL_VALIDATE_MODE_INDEX, but NVKMS won't generally maintain + * a "mode pool" with an exhaustive list of the allowable modes for a + * dpy. + * + * + * DYNAMIC DPY HANDLING + * + * Dynamic dpys (namely, DisplayPort multistream dpys) share the NVDpyId + * namespace with non-dynamic dpys on the same disp. However, dynamic dpys will + * not be listed in NvKmsQueryDispReply::validDpys. Instead, dynamic dpys are + * added and removed from the system dynamically. + * + * When a dynamic dpy is first connected, NVKMS will allocate a new NVDpyId for + * it and generate an NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED event. When the + * dynamic dpy is disconnected, NVKMS will generate an + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED event. Whether the corresponding + * NVDpyId is immediately freed and made available for subsequent dynamic dpys + * depends on client behavior. + * + * Clients may require that a dynamic NVDpyId persist even after the dynamic dpy + * is disconnected. Clients who require this can use + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST. NVKMS will retain the NVDpyId + * until the dynamic dpy is disconnected and there are no clients who have + * declared "interest" on the particular dynamic dpy. While the NVDpyId + * persists, it will be used for any monitor that is connected at the same + * dynamic dpy address (i.e., port address, in the case of DP MST). + * + * + * FILE DESCRIPTOR HANDLING + * + * With the exception of NVDpyIds, all handles should be assumed to be + * specific to the current file descriptor on which the ioctls are + * performed. + * + * Multiple devices can be allocated on the same file descriptor. + * E.g., to drive the display of multiple GPUs. + * + * If a file descriptor is closed prematurely, either explicitly by + * the client or implicitly by the operating system because the client + * process was terminated, NVKMS will perform an + * NVKMS_IOCTL_FREE_DEVICE for any devices currently allocated by the + * client on the closed file descriptor. + * + * NVKMS file descriptors are normally used as the first argument of + * ioctl(2). However, NVKMS file descriptors are also used for + * granting surfaces (see NVKMS_IOCTL_GRANT_SURFACE) or permissions + * (see NVKMS_IOCTL_GRANT_PERMISSIONS). Any given NVKMS file + * descriptor can only be used for one of these uses. + * + * QUESTIONS: + * + * - Is there any reason for errors to be returned through a status field + * in the Param structures, rather than the ioctl(2) return value? + * + * - Is it too asymmetric that NVKMS_IOCTL_SET_MODE can set a + * mode across heads/disps, but other requests (e.g., + * NVKMS_IOCTL_SET_CURSOR_IMAGE) operate on a single head? + * + * + * IOCTL PARAMETER ORGANIZATION + * + * For table-driven processing of ioctls, it is useful for all ioctl + * parameters to follow the same convention: + * + * struct NvKmsFooRequest { + * (...) + * }; + * + * struct NvKmsFooReply { + * (...) + * }; + * + * struct NvKmsFooParams { + * struct NvKmsFooRequest request; //! in + * struct NvKmsFooReply reply; //! out + * }; + * + * I.e., all ioctl parameter structures NvKmsFooParams should have + * "request" and "reply" fields, with types "struct NvKmsFooRequest" + * and "struct NvKmsFooReply". C doesn't technically support empty + * structures, so the convention is to place a "padding" NvU32 in + * request or reply structures that would otherwise be empty. + */ + +#include "nvtypes.h" +#include "nvlimits.h" +#include "nv_dpy_id.h" +#include "nv_mode_timings.h" +#include "nvkms-api-types.h" +#include "nvgputypes.h" /* NvGpuSemaphore */ +#include "nvkms-format.h" + +/* + * The NVKMS ioctl commands. See the ioctl parameter declarations + * later in this header file for an explanation of each ioctl command. + */ +enum NvKmsIoctlCommand { + NVKMS_IOCTL_ALLOC_DEVICE, + NVKMS_IOCTL_FREE_DEVICE, + NVKMS_IOCTL_QUERY_DISP, + NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, + NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA, + NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, + NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, + NVKMS_IOCTL_VALIDATE_MODE_INDEX, + NVKMS_IOCTL_VALIDATE_MODE, + NVKMS_IOCTL_SET_MODE, + NVKMS_IOCTL_SET_CURSOR_IMAGE, + NVKMS_IOCTL_MOVE_CURSOR, + NVKMS_IOCTL_SET_LUT, + NVKMS_IOCTL_IDLE_BASE_CHANNEL, + NVKMS_IOCTL_FLIP, + NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST, + NVKMS_IOCTL_REGISTER_SURFACE, + NVKMS_IOCTL_UNREGISTER_SURFACE, + NVKMS_IOCTL_GRANT_SURFACE, + NVKMS_IOCTL_ACQUIRE_SURFACE, + NVKMS_IOCTL_RELEASE_SURFACE, + NVKMS_IOCTL_SET_DPY_ATTRIBUTE, + NVKMS_IOCTL_GET_DPY_ATTRIBUTE, + NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES, + NVKMS_IOCTL_SET_DISP_ATTRIBUTE, + NVKMS_IOCTL_GET_DISP_ATTRIBUTE, + NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES, + NVKMS_IOCTL_QUERY_FRAMELOCK, + NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE, + NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE, + NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES, + NVKMS_IOCTL_GET_NEXT_EVENT, + NVKMS_IOCTL_DECLARE_EVENT_INTEREST, + NVKMS_IOCTL_CLEAR_UNICAST_EVENT, + NVKMS_IOCTL_GET_3DVISION_DONGLE_PARAM_BYTES, + NVKMS_IOCTL_SET_3DVISION_AEGIS_PARAMS, + NVKMS_IOCTL_SET_LAYER_POSITION, + NVKMS_IOCTL_GRAB_OWNERSHIP, + NVKMS_IOCTL_RELEASE_OWNERSHIP, + NVKMS_IOCTL_GRANT_PERMISSIONS, + NVKMS_IOCTL_ACQUIRE_PERMISSIONS, + NVKMS_IOCTL_REVOKE_PERMISSIONS, + NVKMS_IOCTL_QUERY_DPY_CRC32, + NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO, + NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO, + NVKMS_IOCTL_ALLOC_SWAP_GROUP, + NVKMS_IOCTL_FREE_SWAP_GROUP, + NVKMS_IOCTL_JOIN_SWAP_GROUP, + NVKMS_IOCTL_LEAVE_SWAP_GROUP, + NVKMS_IOCTL_SET_SWAP_GROUP_CLIP_LIST, + NVKMS_IOCTL_GRANT_SWAP_GROUP, + NVKMS_IOCTL_ACQUIRE_SWAP_GROUP, + NVKMS_IOCTL_RELEASE_SWAP_GROUP, + NVKMS_IOCTL_SWITCH_MUX, + NVKMS_IOCTL_GET_MUX_STATE, + NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE, + NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT, + NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT, +}; + + +#define NVKMS_NVIDIA_DRIVER_VERSION_STRING_LENGTH 32 +#define NVKMS_MAX_CONNECTORS_PER_DISP 16 +#define NVKMS_MAX_HEADS_PER_DISP 4 +#define NVKMS_MAX_GPUS_PER_FRAMELOCK 4 +#define NVKMS_MAX_DEVICE_REGISTRY_KEYS 16 +#define NVKMS_MAX_DEVICE_REGISTRY_KEYNAME_LEN 32 +#define NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD 6 + + +/* + * There can be at most one SwapGroup per-head, per-disp (and, + * in the extreme, there is one disp per-GPU). + */ +#define NVKMS_MAX_SWAPGROUPS (NVKMS_MAX_HEADS_PER_DISP * NV_MAX_DEVICES) + +#define NVKMS_MAX_VALID_SYNC_RANGES 8 + +#define NVKMS_DPY_NAME_SIZE 128 +#define NVKMS_GUID_SIZE 16 +#define NVKMS_3DVISION_DONGLE_PARAM_BYTES 20 +#define NVKMS_GPU_STRING_SIZE 80 + +#define NVKMS_LOG2_LUT_ARRAY_SIZE 10 +#define NVKMS_LUT_ARRAY_SIZE (1 << NVKMS_LOG2_LUT_ARRAY_SIZE) +#define NVKMS_VRR_SEMAPHORE_SURFACE_SIZE 1024 + +/* + * The GUID string has the form: + * XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX + * Two Xs per byte, plus four dashes and a NUL byte. + */ +#define NVKMS_GUID_STRING_SIZE ((NVKMS_GUID_SIZE * 2) + 5) + +#define NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH 2048 +#define NVKMS_EDID_INFO_STRING_LENGTH (32 * 1024) + +/*! + * A base EDID is 128 bytes, with 128 bytes per extension block. 2048 + * should be large enough for any EDID we see. + */ +#define NVKMS_EDID_BUFFER_SIZE 2048 + +/*! + * Description of modetimings. + * + * YUV420 modes require special care since some GPUs do not support YUV420 + * scanout in hardware. When timings::yuv420Mode is NV_YUV420_SW, NVKMS will + * set a mode with horizontal values that are half of what are described in + * NvKmsMode, and not enable any color space conversion. When clients allocate + * a surface and populate it with content, the region of interest within the + * surface should be half the width of the NvKmsMode, and the surface content + * should be RGB->YUV color space converted, and decimated from 4:4:4 to 4:2:0. + * + * The NvKmsMode and viewPortOut, specified by the NVKMS client, + * should be in "full" horizontal space, but the surface and + * viewPortIn should be in "half" horizontal space. + */ +struct NvKmsMode { + NvModeTimings timings; + char name[32]; +}; + +/*! + * Mode validation override bit flags, for use in + * NvKmsModeValidationParams::overrides. + */ +enum NvKmsModeValidationOverrides { + NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK = (1 << 0), + NVKMS_MODE_VALIDATION_NO_EDID_MAX_PCLK_CHECK = (1 << 1), + NVKMS_MODE_VALIDATION_NO_HORIZ_SYNC_CHECK = (1 << 2), + NVKMS_MODE_VALIDATION_NO_VERT_REFRESH_CHECK = (1 << 3), + NVKMS_MODE_VALIDATION_NO_EDID_DFP_MAX_SIZE_CHECK = (1 << 4), + NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK = (1 << 5), + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS = (1 << 6), + NVKMS_MODE_VALIDATION_NO_TOTAL_SIZE_CHECK = (1 << 7), + NVKMS_MODE_VALIDATION_NO_DUAL_LINK_DVI_CHECK = (1 << 8), + NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK = (1 << 9), + NVKMS_MODE_VALIDATION_ALLOW_NON_3DVISION_MODES = (1 << 10), + NVKMS_MODE_VALIDATION_ALLOW_NON_EDID_MODES = (1 << 11), + NVKMS_MODE_VALIDATION_ALLOW_NON_HDMI3D_MODES = (1 << 12), + NVKMS_MODE_VALIDATION_NO_MAX_SIZE_CHECK = (1 << 13), + NVKMS_MODE_VALIDATION_NO_HDMI2_CHECK = (1 << 14), + NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK = (1 << 15), + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS = (1 << 16), + NVKMS_MODE_VALIDATION_ALLOW_DP_INTERLACED = (1 << 17), + NVKMS_MODE_VALIDATION_NO_INTERLACED_MODES = (1 << 18), +}; + +/*! + * Frequency information used during mode validation (for HorizSync + * and VertRefresh) can come from several possible sources. NVKMS + * selects the frequency information by prioritizing the input sources + * and then reports the selected source. + * + * Without client input, NVKMS will use frequency ranges from the + * EDID, if available. If there is no EDID, NVKMS will fall back to + * builtin conservative defaults. + * + * The client can specify frequency ranges that are used instead of + * anything in the EDID (_CLIENT_BEFORE_EDID), or frequency ranges + * that are used only if no EDID-reported ranges are available + * (_CLIENT_AFTER_EDID). + */ +enum NvKmsModeValidationFrequencyRangesSource { + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_NONE = 0, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_BEFORE_EDID = 1, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID = 2, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_AFTER_EDID = 3, + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CONSERVATIVE_DEFAULTS = 4, +}; + + +/*! + * Mode validation parameters. + */ +struct NvKmsModeValidationFrequencyRanges { + enum NvKmsModeValidationFrequencyRangesSource source; + NvU32 numRanges; + struct { + NvU32 high; + NvU32 low; + } range[NVKMS_MAX_VALID_SYNC_RANGES]; +}; + +struct NvKmsModeValidationValidSyncs { + + /*! If TRUE, ignore frequency information from the EDID. */ + NvBool ignoreEdidSource; + + /*! values are in Hz */ + struct NvKmsModeValidationFrequencyRanges horizSyncHz; + + /*! values are in 1/1000 Hz */ + struct NvKmsModeValidationFrequencyRanges vertRefreshHz1k; +}; + +enum NvKmsStereoMode { + NVKMS_STEREO_DISABLED = 0, + NVKMS_STEREO_NVIDIA_3D_VISION, + NVKMS_STEREO_NVIDIA_3D_VISION_PRO, + NVKMS_STEREO_HDMI_3D, + NVKMS_STEREO_OTHER, +}; + +struct NvKmsModeValidationParams { + NvBool verboseModeValidation; + NvBool moreVerboseModeValidation; + + /*! + * Normally, if a mode supports both YUV 4:2:0 and RGB 4:4:4, + * NVKMS will prefer RGB 4:4:4 if both the monitor and the GPU + * support it. Use preferYUV420 to override that preference. + */ + NvBool preferYUV420; + + enum NvKmsStereoMode stereoMode; + NvU32 overrides; + + struct NvKmsModeValidationValidSyncs validSyncs; + + /*! + * Normally, NVKMS will determine on its own whether to use Display + * Stream Compression (DSC). Use forceDsc to force NVKMS to use DSC, + * when the GPU supports it. + */ + NvBool forceDsc; + + /*! + * When enabled, Display Stream Compression (DSC) has an + * associated bits/pixel rate, which NVKMS normally computes. + * Use dscOverrideBitsPerPixelX16 to override the DSC bits/pixel rate. + * This is in units of 1/16 of a bit per pixel. + * + * This target bits/pixel rate should be >= 8.0 and <= 32.0, i.e. the valid + * bits/pixel values are members of the sequence 8.0, 8.0625, 8.125, ..., + * 31.9375, 32.0. You can convert bits/pixel value to + * the dscOverrideBitsPerPixelX16 as follow: + * + * +------------------+--------------------------------------------+ + * | bits_per_pixel | dscBitsPerPixelX16 = bits_per_pixel * 16 | + * +------------------+--------------------------------------------+ + * | 8.0 | 128 | + * | 8.0625 | 129 | + * | . | . | + * | . | . | + * | . | . | + * | 31.9375 | 511 | + * | 32.0 | 512 | + * +------------------+--------------------------------------------+ + * + * If the specified dscOverrideBitsPerPixelX16 is out of range, + * then mode validation may fail. + * + * When dscOverrideBitsPerPixelX16 is 0, NVKMS compute the rate itself. + */ + NvU32 dscOverrideBitsPerPixelX16; +}; + +/*! + * The list of pixelShift modes. + */ +enum NvKmsPixelShiftMode { + NVKMS_PIXEL_SHIFT_NONE = 0, + NVKMS_PIXEL_SHIFT_4K_TOP_LEFT, + NVKMS_PIXEL_SHIFT_4K_BOTTOM_RIGHT, + NVKMS_PIXEL_SHIFT_8K, +}; + +/*! + * The available resampling methods used when viewport scaling is requested. + */ +enum NvKmsResamplingMethod { + NVKMS_RESAMPLING_METHOD_BILINEAR = 0, + NVKMS_RESAMPLING_METHOD_BICUBIC_TRIANGULAR, + NVKMS_RESAMPLING_METHOD_BICUBIC_BELL_SHAPED, + NVKMS_RESAMPLING_METHOD_BICUBIC_BSPLINE, + NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_TRIANGULAR, + NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_BELL_SHAPED, + NVKMS_RESAMPLING_METHOD_BICUBIC_ADAPTIVE_BSPLINE, + NVKMS_RESAMPLING_METHOD_NEAREST, + NVKMS_RESAMPLING_METHOD_DEFAULT = NVKMS_RESAMPLING_METHOD_BILINEAR, +}; + +enum NvKmsWarpMeshDataType { + NVKMS_WARP_MESH_DATA_TYPE_TRIANGLES_XYUVRQ, + NVKMS_WARP_MESH_DATA_TYPE_TRIANGLE_STRIP_XYUVRQ, +}; + +/*! + * Description of a cursor image on a single head; this is used by any + * NVKMS request that needs to specify the cursor image. + */ +struct NvKmsSetCursorImageCommonParams { + /*! The surface to display in the cursor. */ + NvKmsSurfaceHandle surfaceHandle[NVKMS_MAX_EYES]; + /*! + * The cursor composition parameters gets read and applied only if the + * specified cursor surface is not null. + */ + struct NvKmsCompositionParams cursorCompParams; +}; + + +/*! + * Description of the cursor position on a single head; this is used + * by any NVKMS request that needs to specify the cursor position. + * + * x,y are relative to the current viewPortIn configured on the head. + */ +struct NvKmsMoveCursorCommonParams { + NvS16 x; /*! in */ + NvS16 y; /*! in */ +}; + +/*! + * Per-component arrays of NvU16s describing the LUT; used for both the input + * LUT and output LUT. + */ +struct NvKmsLutRamps { + NvU16 red[NVKMS_LUT_ARRAY_SIZE]; /*! in */ + NvU16 green[NVKMS_LUT_ARRAY_SIZE]; /*! in */ + NvU16 blue[NVKMS_LUT_ARRAY_SIZE]; /*! in */ +}; + +/*! + * Description of the main layer LUT on a single head; this is used by any NVKMS + * request that needs to specify the LUT. + */ +struct NvKmsSetInputLutParams { + NvBool specified; + NvU32 depth; /*! used bits per pixel (8, 15, 16, 24, 30) */ + + /*! + * The first and last elements (inclusive) in the color arrays to + * use. Valid values are in the range [0,N], where N is a + * function of depth: + * + * Depth N + * 8 256 + * 15 32 + * 16 64 + * 24 256 + * 30 1024 + * + * 'start' is the first element in the color arrays to use. + */ + NvU32 start; + + /*! + * 'end' is the last element (inclusive) in the color arrays to + * use. If end == 0, this command will disable the HW LUT for + * this head. + * + * The other fields in this structure, besides 'specified', are ignored if + * end == 0. + */ + NvU32 end; + + /*! + * Pointer to struct NvKmsLutRamps describing the LUT. + * Elements [start,end] will be used. + * + * Each entry in the input LUT has valid values in the range [0, 65535]. + * However, on pre-Turing GPUs only 11 bits are significant; NVKMS will + * convert values in this range into the appropriate internal format. + * + * Use nvKmsPointerToNvU64() to assign pRamps. + */ + NvU64 pRamps NV_ALIGN_BYTES(8); +}; + + +/*! + * Description of the output LUT on a single head; this is used by any NVKMS + * request that needs to specify the LUT. + * + * Unlike the input LUT: + * - specifying the output LUT updates all values at once. + * + * Each entry in the output LUT has valid values in the range [0, 65535]. + * However, only 11 bits are significant; NVKMS will convert values in this + * range into the appropriate internal format. + */ +struct NvKmsSetOutputLutParams { + NvBool specified; + NvBool enabled; + + /*! + * Pointer to struct NvKmsLutRamps containing the actual LUT data, if + * required. + * Use nvKmsPointerToNvU64() to assign pRamps. + */ + NvU64 pRamps NV_ALIGN_BYTES(8); +}; + +/*! + * Description of the LUT on a single head; this is used by any NVKMS + * request that needs to specify the LUT. + */ +struct NvKmsSetLutCommonParams { + struct NvKmsSetInputLutParams input; + struct NvKmsSetOutputLutParams output; + + NvBool synchronous; /*! block until the LUT update is complete */ +}; + +struct NvKmsNIsoSurface { + NvKmsSurfaceHandle surfaceHandle; + enum NvKmsNIsoFormat format; + NvU16 offsetInWords; +}; + +struct NvKmsCompletionNotifierDescription { + struct NvKmsNIsoSurface surface; + NvBool awaken; +}; + +struct NvKmsSemaphore { + struct NvKmsNIsoSurface surface; + NvU32 value; +}; + +enum NvKmsSyncptType { + NVKMS_SYNCPT_TYPE_NONE, + NVKMS_SYNCPT_TYPE_RAW, + NVKMS_SYNCPT_TYPE_FD, +}; + +struct NvKmsSyncpt { + enum NvKmsSyncptType type; + union { + int fd; + struct { + NvU32 id; + NvU32 value; + } raw; + } u; +}; + +struct NvKmsChannelSyncObjects { + /* + * If useSyncpt is set to FALSE, clients can provide an acquisition and/or + * release semaphore via the 'syncObjects.semaphores' struct. + * + * If NvKmsAllocDeviceReply::supportsIndependentAcqRelSemaphore is + * FALSE, then 'syncObjects.semaphores.acquire.surface' must be the same + * as 'syncObjects.semaphores.release.surface'. In other words, the same + * exact semaphore surface must be used for both acquire and release. + * + * If NvKmsAllocDeviceReply::supportsIndependentAcqRelSemaphore is + * TRUE, then the client is allowed to provide different semaphore + * surfaces for acquire and release. + * + * If useSyncpt is set to TRUE, clients can provide a pre-syncpt that they + * want the display engine to wait on before scanning out from the given + * buffer, and can specify that they want NVKMS to return a post-syncpt + * that they can wait on, via the 'syncObjects.syncpts' struct. + * + * The post-syncpt that NVKMS returns will be signaled once the + * buffer that was activated by this flip is displaced. As a typical + * example: + * - Client flips buffer A, and requests a post-syncpt PS. + * - Buffer A becomes active at the next frame boundary, and display + * starts scanning out buffer A. + * - Client flips buffer B. + * - Once the UPDATE for the buffer B flip is processed and display + * has finished sending the last pixel of buffer A to precomp for + * the current frame, post-syncpt PS will get signaled. + * + * Clients can use this option iff + * NvKmsAllocDeviceReply::supportsSyncpts is TRUE. + */ + NvBool useSyncpt; + + union { + struct { + struct NvKmsSemaphore acquire; + struct NvKmsSemaphore release; + } semaphores; + + struct { + struct NvKmsSyncpt pre; + enum NvKmsSyncptType requestedPostType; + } syncpts; + } u; +}; + +enum NvKmsOutputTf { + /* + * NVKMS itself won't apply any OETF (clients are still + * free to provide a custom OLUT) + */ + NVKMS_OUTPUT_TF_NONE = 0, + NVKMS_OUTPUT_TF_PQ = 1, +}; + +/*! + * HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec. + * This is expected to match exactly with the spec. + */ +struct NvKmsHDRStaticMetadata { + /*! + * Color primaries of the data. + * These are coded as unsigned 16-bit values in units of 0.00002, + * where 0x0000 represents zero and 0xC350 represents 1.0000. + */ + struct { + NvU16 x, y; + } displayPrimaries[3]; + + /*! + * White point of colorspace data. + * These are coded as unsigned 16-bit values in units of 0.00002, + * where 0x0000 represents zero and 0xC350 represents 1.0000. + */ + struct { + NvU16 x, y; + } whitePoint; + + /** + * Maximum mastering display luminance. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxDisplayMasteringLuminance; + + /*! + * Minimum mastering display luminance. + * This value is coded as an unsigned 16-bit value in units of + * 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF + * represents 6.5535 cd/m2. + */ + NvU16 minDisplayMasteringLuminance; + + /*! + * Maximum content light level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxCLL; + + /*! + * Maximum frame-average light level. + * This value is coded as an unsigned 16-bit value in units of 1 cd/m2, + * where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2. + */ + NvU16 maxFALL; +}; + +enum NvKmsInputColorSpace { + /* Unknown colorspace; no de-gamma will be applied */ + NVKMS_SURFACE_COLORSPACE_NONE = 0, +}; + +/*! + * Description of how to flip on a single head. + * + * viewPortIn::point describes the position of the viewPortIn that + * should be scaled to the viewPortOut of the head. The + * viewPortSizeIn is specified by NvKmsSetModeOneHeadRequest. Note + * that viewPortIn::point is in desktop coordinate space, and + * therefore applies across all layers. + * + * For YUV420 modes, the surfaces and position should be in "half" + * horizontal space. See the explanation in NvKmsMode. + * + * If 'specified' is FALSE for any of the layers, then the current + * hardware value is used. + */ +struct NvKmsFlipCommonParams { + + struct { + NvBool specified; + struct NvKmsPoint point; + } viewPortIn; + + struct { + struct NvKmsSetCursorImageCommonParams image; + NvBool imageSpecified; + + struct NvKmsMoveCursorCommonParams position; + NvBool positionSpecified; + } cursor; + + /* + * Set the output transfer function. + * + * If output transfer function is HDR and staticMetadata is disabled + * for all the layers, flip request will be rejected. + * + * If output transfer function is HDR and staticMetadata is enabled + * for any of the layers, HDR output will be enabled. In this case, + * output lut values specified during modeset will be ignored and + * output lut will be set with the specified HDR transfer function. + * + * If output transfer function is SDR and staticMetadata is enabled, + * HDR content for that layer will be tonemapped to the SDR output + * range. + */ + struct { + enum NvKmsOutputTf val; + NvBool specified; + } tf; + + struct { + struct { + NvKmsSurfaceHandle handle[NVKMS_MAX_EYES]; + struct NvKmsRRParams rrParams; + NvBool specified; + } surface; + + /* + * sizeIn/sizeOut can be used when + * NvKmsAllocDeviceReply::layerCaps[layer].supportsWindowMode is TRUE. + */ + struct { + struct NvKmsSize val; + NvBool specified; + } sizeIn; + + struct { + struct NvKmsSize val; + NvBool specified; + } sizeOut; + + /* + * Set the position of the layer, relative to the upper left + * corner of the surface. This controls the same state as + * NVKMS_IOCTL_SET_LAYER_POSITION. + * + * This field can be used when + * NvKmsAllocDeviceReply::layerCaps[layer].supportsWindowMode is TRUE. + */ + struct { + struct NvKmsSignedPoint val; + NvBool specified; + } outputPosition; + + struct { + struct NvKmsCompletionNotifierDescription val; + NvBool specified; + } completionNotifier; + + struct { + struct NvKmsChannelSyncObjects val; + + /* If 'specified' is FALSE, then the current hardware value is used. */ + NvBool specified; + } syncObjects; + + /* + * If 'maxDownscaleFactors::specified' is true, nvkms will set the + * max H/V downscale usage bounds to the values specified in + * 'maxDownscaleFactors::horizontal' and 'maxDownscaleFactors::vertical'. + * + * If the 'maxDownscaleFactors::specified' values are within the bounds + * of 'NvKmsSetModeOneHeadReply::guaranteedUsage', then clients can expect + * the flip to succeed. If the 'maxDownscaleFactors::specified' values are + * beyond the bounds of 'NvKmsSetModeOneHeadReply::guaranteedUsage' but + * within 'NvKmsSetModeOneHeadReply::possibleUsage', then the request may + * legitimately fail due to insufficient display bandwidth and clients + * need to be prepared to handle that flip request failure. + * + * If 'maxDownscaleFactors::specified' is false, nvkms will calculate max + * H/V downscale factor by quantizing the range. E.g., max H/V downscale + * factor supported by HW is 4x for 5-tap and 2x for 2-tap mode. If + * 5-tap mode is required, the target usage bound that nvkms will + * attempt to program will either allow up to 2x downscaling, or up to + * 4x downscaling. If 2-tap mode is required, the target usage bound + * that NVKMS will attempt to program will allow up to 2x downscaling. + * Example: to downscale from 4096x2160 -> 2731x864 in 5-tap mode, + * NVKMS would specify up to 2x for the H downscale bound (required is + * 1.5x), and up to 4x for the V downscale bound (required is 2.5x). + */ + struct { + /* + * Maximum vertical downscale factor (scaled by 1024) + * + * For example, if the downscale factor is 1.5, then maxVDownscaleFactor + * would be 1.5 x 1024 = 1536. + */ + NvU16 vertical; + + /* + * Maximum horizontal downscale factor (scaled by 1024) + * + * See the example above for vertical. + */ + NvU16 horizontal; + + NvBool specified; + } maxDownscaleFactors; + + NvBool tearing; + + /* + * When true, we will flip to this buffer whenever the current eye is + * finished scanning out. Otherwise, this flip will only execute after + * both eyes have finished scanout. + * + * Note that if this is FALSE and a vsynced stereo flip is requested, + * the buffers in this flip will be displayed for minPresentInterval*2 + * vblanks, one for each eye. + * + * This flag cannot be used for the overlay layer. + */ + NvBool perEyeStereoFlip; + + /* When non-zero, block the flip until PTIMER >= timeStamp. */ + NvU64 timeStamp NV_ALIGN_BYTES(8); + NvU8 minPresentInterval; + + /* This field cannot be used for the main layer right now. */ + struct { + struct NvKmsCompositionParams val; + NvBool specified; + } compositionParams; + + /* + * Color-space conversion matrix applied to the layer before + * compositing. + * + * If csc::specified is TRUE and csc::useMain is TRUE, then the CSC + * matrix specified in the main layer is used instead of the one here. + * If csc::specified is FALSE, then the CSC matrix used from the previous + * flip is used. csc::useMain must be set to FALSE for the main layer. + */ + struct { + NvBool specified; + NvBool useMain; + struct NvKmsCscMatrix matrix; + } csc; + + /* + * When true, all pending flips and synchronization operations get + * ignored, and channel flips to given buffer. Notifier and semaphore + * should not be specified if this flag is true. This flag does + * nothing if set true for NVKMS_IOCTL_SET_MODE ioctl. + * + * This flag allows client to remove stalled flips and unblock + * the channel. + * + * This flag cannot be used for the overlay layer. + */ + NvBool skipPendingFlips; + + /* + * This field can be used when + * NvKmsAllocDeviceReply::layerCaps[layer].supportsHDR = TRUE. + * + * If staticMetadata is enabled for multiple layers, flip request + * will be rejected. + */ + struct { + NvBool specified; + /*! + * If TRUE, enable HDR static metadata. If FALSE, disable it. + * + * Note that “specified” serves to mark the field as being changed + * in this flip request, rather than as specified for this frame. + * So to disable HDR static metadata, set hdr.specified = TRUE and + * hdr.staticMetadata.enabled = FALSE. + */ + NvBool enabled; + struct NvKmsHDRStaticMetadata staticMetadata; + } hdr; + + struct { + enum NvKmsInputColorSpace val; + NvBool specified; + } colorspace; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +struct NvKmsFlipCommonReplyOneHead { + struct { + struct NvKmsSyncpt postSyncpt; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; +}; + +/*! + * NVKMS_IOCTL_ALLOC_DEVICE: Allocate an NVKMS device object. + * + * This has the scope of a resman SLI device. + * + * Multiple clients can allocate devices (DRM-KMS, multiple X + * servers). Clients should configure SLI before initializing NVKMS. + * NVKMS will query resman for the current SLI topology. + * + * The SLI configuration (both the linked SLI device, and the sliMosaic + * boolean below) will be latched when the specified GPU transitions + * from zero NVKMS devices allocated to one NVKMS device allocated. + * + * The returned information will remain static until the NVKMS device + * object is freed. + */ + +struct NvKmsAllocDeviceRequest { + /*! + * Clients should populate versionString with the value of + * NV_VERSION_STRING from nvUnixVersion.h. This is used for a + * version handshake. + */ + char versionString[NVKMS_NVIDIA_DRIVER_VERSION_STRING_LENGTH]; + + /*! + * The (primary) GPU for this device; this is used as the value + * for NV0080_ALLOC_PARAMETERS::deviceId. + */ + NvU32 deviceId; + + /*! + * Whether SLI Mosaic is requested: i.e., multiple disps, one + * per physical GPU, for the SLI device. + */ + NvBool sliMosaic; + + /*! + * When tryInferSliMosaicFromExistingDevice=TRUE, then the above + * 'sliMosaic' field is ignored and the ALLOC_DEVICE request will + * inherit the current sliMosaic state of the existing device + * identified by deviceId. If there is not an existing device for + * deviceId, then the ALLOC_DEVICE request will proceed normally, honoring + * the requested sliMosaic state. + */ + NvBool tryInferSliMosaicFromExistingDevice; + + /*! + * NVKMS will use the 3D engine for headSurface. If clients want to avoid + * the use of the 3D engine, set no3d = TRUE. Note this will cause modesets + * that require headSurface to fail. + * + * This flag is only honored when there is not already an existing device + * for the deviceId. + */ + NvBool no3d; + + /*! + * When enableConsoleHotplugHandling is TRUE, NVKMS will start handling + * hotplug events at the console when no modeset owner is present. + * + * If FALSE, console hotplug handling behavior is not changed. + * + * This should be set to TRUE for clients that intend to allocate the device + * but don't intend to become the modeset owner right away. It should be set + * to FALSE for clients that may take modeset ownership immediately, in + * order to suppress hotplug handling between the NVKMS_IOCTL_ALLOC_DEVICE + * and NVKMS_IOCTL_GRAB_OWNERSHIP calls when the calling client is the first + * to allocate the device. + * + * Note that NVKMS_IOCTL_RELEASE_OWNERSHIP also enables console hotplug + * handling. Once enabled, console hotplug handling remains enabled until + * the last client frees the device. + */ + NvBool enableConsoleHotplugHandling; + + struct { + /* name[0] == '\0' for unused registryKeys[] array elements. */ + char name[NVKMS_MAX_DEVICE_REGISTRY_KEYNAME_LEN]; + NvU32 value; + } registryKeys[NVKMS_MAX_DEVICE_REGISTRY_KEYS]; +}; + +enum NvKmsAllocDeviceStatus { + NVKMS_ALLOC_DEVICE_STATUS_SUCCESS, + NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH, + NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST, + NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR, + NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE, + NVKMS_ALLOC_DEVICE_STATUS_CORE_CHANNEL_ALLOC_FAILED, +}; + + +struct NvKmsAllocDeviceReply { + + enum NvKmsAllocDeviceStatus status; + + /*! + * The handle to use when identifying this NVKMS device in + * subsequent calls. + */ + NvKmsDeviceHandle deviceHandle; + + /*! + * A bitmask, indicating the GPUs, one per bit, contained by this + * device. + */ + NvU32 subDeviceMask; + + /*! The number of heads on each disp. */ + NvU32 numHeads; + + /*! The number of disps. */ + NvU32 numDisps; + + /*! The handle to identify each disp, in dispHandles[0..numDisps). */ + NvKmsDispHandle dispHandles[NVKMS_MAX_SUBDEVICES]; + + /*! + * Device-wide Capabilities: of the display engine. + * + * IMPLEMENTATION NOTE: this is the portion of DispHalRec::caps + * that can vary between EVO classes. + */ + NvBool supportsInbandStereoSignaling; + NvBool requiresVrrSemaphores; + NvBool inputLutAppliesToBase; + + /*! + * Whether the client can allocate and manipulate SwapGroup objects via + * NVKMS_IOCTL_ALLOC_SWAP_GROUP and friends. + */ + NvBool supportsSwapGroups; + + /*! + * Whether the NVKMS SwapGroup implementation supports Warp and Blend on + * this device. + */ + NvBool supportsWarpAndBlend; + + /*! + * When nIsoSurfacesInVidmemOnly=TRUE, then only video memory + * surfaces can be used for the surface in + * NvKmsCompletionNotifierDescription or NvKmsSemaphore. + */ + NvBool nIsoSurfacesInVidmemOnly; + + /* + * When requiresAllAllocationsInSysmem=TRUE, then all memory allocations + * that will be accessed by display must come from sysmem. + */ + NvBool requiresAllAllocationsInSysmem; + + /* + * Whether the device that NVKMS is driving supports headSurface GPU + * composition. + */ + NvBool supportsHeadSurface; + + /*! + * The display engine supports a "legacy" format for notifiers and + * semaphores (one word for semaphores and base channel notifiers; + * two words for overlay notifiers). On newer GPUs, the display + * engine also supports a similar four word semaphore and notifier + * format used by graphics. + * + * This describes which values are valid for NvKmsNIsoFormat. + * + * Iff a particular enum NvKmsNIsoFormat 'value' is supported, + * then (1 << value) will be set in validNIsoFormatMask. + */ + NvU8 validNIsoFormatMask; + + /*! + * Which NvKmsResamplingMethod enum values are supported by the NVKMS + * device. + * + * Iff a particular enum NvKmsResamplingMethod 'value' is supported, then (1 + * << value) will be set in validResamplingMethodMask. + */ + NvU32 validResamplingMethodMask; + + NvU32 surfaceAlignment; + NvU32 maxWidthInBytes; + NvU32 maxWidthInPixels; + NvU32 maxHeightInPixels; + NvU32 maxCursorSize; + + /*! + * The page kind used by the GPU's MMU for uncompressed block-linear color + * formats. + */ + NvU8 genericPageKind; + + /*! + * Describes the supported Color Key selects and blending modes for match + * and nomatch cursor pixels. + */ + struct NvKmsCompositionCapabilities cursorCompositionCaps; + + /*! The number of layers attached to each head. */ + NvU32 numLayers[NVKMS_MAX_HEADS_PER_DISP]; + + /*! + * Describes supported functionalities for each layer. + */ + struct NvKmsLayerCapabilities layerCaps[NVKMS_MAX_LAYERS_PER_HEAD]; + + /*! + * This bitmask specifies all of the (rotation, reflectionX, reflectionY) + * combinations that are supported for the main and overlay layers. + * Each bit in this bitmask is mapped to one combination per the scheme + * in NvKmsRRParamsToCapBit(). + */ + NvU16 validLayerRRTransforms; + + /*! + * IO coherency modes that display supports for ISO and NISO memory + * allocations, respectively. + */ + NvKmsDispIOCoherencyModes isoIOCoherencyModes; + NvKmsDispIOCoherencyModes nisoIOCoherencyModes; + + /*! + * 'supportsSyncpts' indicates whether NVKMS supports the use of syncpts + * for synchronization. + */ + NvBool supportsSyncpts; + + /*! + * 'supportsIndependentAcqRelSemaphore' indicates whether HW supports + * configuring different semaphores for acquire and release for a buffer + * flip on a given layer. + */ + NvBool supportsIndependentAcqRelSemaphore; + + /*! + * 'supportsVblankSyncObjects' indicates whether HW supports raster + * generator sync objects that signal at vblank. + */ + NvBool supportsVblankSyncObjects; +}; + +struct NvKmsAllocDeviceParams { + struct NvKmsAllocDeviceRequest request; /*! in */ + struct NvKmsAllocDeviceReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_FREE_DEVICE: Free the NVKMS device object specified by + * deviceHandle. + * + * The underlying device is not actually freed until all callers of + * NVKMS_IOCTL_ALLOC_DEVICE have freed their reference to the device. + * + * When a client calls FREE_DEVICE, any configuration specified by + * that client will be removed: + * - Any EDID overrides. + * - Any interest declared on dynamic dpys. + * - Any cursor image on any head. + * - Any custom LUT contents. + * - Any interest declared on any events. + * + * XXX define how FREE_DEVICE interacts with: + * - concurrent X servers on different VTs + * - console restore + */ + +struct NvKmsFreeDeviceRequest { + NvKmsDeviceHandle deviceHandle; +}; + +struct NvKmsFreeDeviceReply { + NvU32 padding; +}; + +struct NvKmsFreeDeviceParams { + struct NvKmsFreeDeviceRequest request; /*! in */ + struct NvKmsFreeDeviceReply reply; /*!out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DISP: Query information about the NVKMS disp + * object specified by the tuple (deviceHandle, dispHandle). + * + * The returned information will remain static until the NVKMS device + * object is freed. + */ + +struct NvKmsQueryDispRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; +}; + +struct NvKmsQueryDispReply { + /*! + * The instance of the subdevice that owns this disp. + * NVBIT(displayOwner) will be present in subDeviceMask. + */ + NvU32 displayOwner; + + /*! A bitmask of the device's subdevices used by this disp. */ + NvU32 subDeviceMask; + + /*! The possible dpys for this disp, excluding any dynamic dpys. */ + NVDpyIdList validDpys; + + /*! The dpys that were driven at boot-time, if any. */ + NVDpyIdList bootDpys; + + /*! The dpys that are capable of dynamic mux switching, if any. */ + NVDpyIdList muxDpys; + + /*! The framelock device, if any, connected to this disp. */ + NvKmsFrameLockHandle frameLockHandle; + + /*! The number of connectors on this disp. */ + NvU32 numConnectors; + + /*! + * The handle to identify each connector, in + * connectorHandles[0..numConnectors) + */ + NvKmsConnectorHandle connectorHandles[NVKMS_MAX_CONNECTORS_PER_DISP]; + + /*! + * A string describing one of the the GPUs used by this disp. The + * NVKMS log will also print this string to the kernel log. Users + * should be able to correlate GPUs between NVKMS and NVKMS + * clients using this string. + */ + char gpuString[NVKMS_GPU_STRING_SIZE]; +}; + +struct NvKmsQueryDispParams { + struct NvKmsQueryDispRequest request; /*! in */ + struct NvKmsQueryDispReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA: Query information about the NVKMS + * connector object specified by the triplet (deviceHandle, dispHandle, + * connectorHandle). + * + * The returned information will remain static until the NVKMS device + * object is freed. + */ + +struct NvKmsQueryConnectorStaticDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvKmsConnectorHandle connectorHandle; +}; + +struct NvKmsQueryConnectorStaticDataReply { + NVDpyId dpyId; + NvBool isDP; + NvBool isLvds; + NvBool locationOnChip; + NvU32 legacyTypeIndex; + NvKmsConnectorType type; + NvU32 typeIndex; + NvKmsConnectorSignalFormat signalFormat; + NvU32 physicalIndex; + NvU32 physicalLocation; + + /* Bitmask of valid heads to drive dpy(s) on this connector. */ + NvU32 headMask; +}; + +struct NvKmsQueryConnectorStaticDataParams { + struct NvKmsQueryConnectorStaticDataRequest request; /*! in */ + struct NvKmsQueryConnectorStaticDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA: Query dynamic information about the + * NVKMS connector object specified by the triplet (deviceHandle, dispHandle, + * connectorHandle). + */ + +struct NvKmsQueryConnectorDynamicDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvKmsConnectorHandle connectorHandle; +}; + +struct NvKmsQueryConnectorDynamicDataReply { +#define NVKMS_DP_DETECT_COMPLETE_POLL_INTERVAL_USEC 100000 /* in microseconds */ +#define NVKMS_DP_DETECT_COMPLETE_TIMEOUT_USEC 10000000 /* in microseconds */ + + /* + * For DisplayPort devices, indicates whether the DisplayPort library is + * finished detecting devices on this connector. This is set to TRUE for + * other devices because NVKMS knows as soon as ALLOC_DEVICE is complete + * whether the device is connected or not. + */ + NvBool detectComplete; + /* + * Contains the list of display IDs for dynamic dpys detected on this + * connector. + */ + NVDpyIdList dynamicDpyIdList; +}; + +struct NvKmsQueryConnectorDynamicDataParams { + struct NvKmsQueryConnectorDynamicDataRequest request; /*! in */ + struct NvKmsQueryConnectorDynamicDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DPY_STATIC_DATA: Query static information about + * the NVKMS dpy object specified by the triplet (deviceHandle, + * dispHandle, dpyId). This information should remain static for the + * lifetime of the dpy. + */ + +struct NvKmsQueryDpyStaticDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + +struct NvKmsQueryDpyStaticDataReply { + NvKmsConnectorHandle connectorHandle; /*! The connector driving this dpy. */ + NvU32 type; /*! NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_ */ + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + NvBool mobileInternal; + NvBool isDpMST; +}; + +struct NvKmsQueryDpyStaticDataParams { + struct NvKmsQueryDpyStaticDataRequest request; /*! in */ + struct NvKmsQueryDpyStaticDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA: Query dynamic information about + * the NVKMS dpy object specified by the triplet (deviceHandle, + * dispHandle, dpyId). + * + * This information should be re-queried after an + * NVKMS_EVENT_TYPE_DPY_CHANGED event. + */ + +struct NvKmsQueryDpyDynamicDataRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + + NvBool forceConnected; + NvBool forceDisconnected; + NvBool overrideEdid; + NvBool ignoreEdid; + NvBool ignoreEdidChecksum; + NvBool allowDVISpecPClkOverride; + NvBool dpInbandStereoSignaling; + NvBool disableACPIBrightnessHotkeys; + + /* + * If overrideEdid is TRUE, then edid::buffer[] contains an EDID + * to override anything detected. + */ + struct { + NvU16 bufferSize; + NvU8 buffer[NVKMS_EDID_BUFFER_SIZE]; + } edid; +}; + +enum NvKmsDpyVRRType { + NVKMS_DPY_VRR_TYPE_NONE, + NVKMS_DPY_VRR_TYPE_GSYNC, + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_DEFAULTLISTED, + NVKMS_DPY_VRR_TYPE_ADAPTIVE_SYNC_NON_DEFAULTLISTED, +}; + +struct NvKmsQueryDpyDynamicDataReply { + char name[NVKMS_DPY_NAME_SIZE]; + + NvU32 maxPixelClockKHz; + NvBool connected; + NvBool isVirtualRealityHeadMountedDisplay; + + struct { + NvU8 heightInCM; /* vertical screen size */ + NvU8 widthInCM; /* horizontal screen size */ + } physicalDimensions; + + /*! + * Which VRR type has been selected for this display, either true + * G-SYNC, Adaptive-Sync defaultlisted, or Adaptive-Sync non-defaultlisted. + */ + enum NvKmsDpyVRRType vrrType; + + struct { + NvBool supported; + NvBool isDLP; + NvBool isAegis; + NvU32 subType; /*! STEREO_PLUG_AND_PLAY_ from nvStereoDisplayDef.h */ + } stereo3DVision; + + struct { + struct { + NvBool valid; + NvU8 buffer[NVKMS_GUID_SIZE]; + char str[NVKMS_GUID_STRING_SIZE]; + } guid; + } dp; + + struct { + /*! + * The size of the EDID in buffer[], or 0 if there is no EDID + * available in buffer[]. + */ + NvU16 bufferSize; + + /*! + * Whether NVKMS determined that the EDID is valid. If the + * EDID is not valid, there may still be information available + * in infoString: the infoString will describe why the EDID + * was deemed invalid. + */ + NvBool valid; + + /*! + * The raw EDID bytes. + */ + NvU8 buffer[NVKMS_EDID_BUFFER_SIZE]; + + /*! + * Parsed information from the EDID. For the raw EDID bytes, + * see NvKmsQueryDpyDynamicDataParams::edid::buffer[]. + */ + char infoString[NVKMS_EDID_INFO_STRING_LENGTH]; + } edid; +}; + +struct NvKmsQueryDpyDynamicDataParams { + struct NvKmsQueryDpyDynamicDataRequest request; /*! in */ + struct NvKmsQueryDpyDynamicDataReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_VALIDATE_MODE_INDEX: Validate a particular mode from a + * dpy's candidate modes. + * + * NVKMS can consider modes from a dpy's EDID, as well as a + * variety of builtin modes. + * + * This ioctl identifies one of those candidate modes by index. NVKMS + * will attempt to validate that candidate mode for the dpy, using the + * specified mode validation parameters. + * + * If the mode index is larger than the list of candidate modes, + * reply::end will be TRUE. Otherwise, reply::end will be FALSE, and + * reply::mode will contain the candidate mode. + * + * If the mode is valid, then reply::valid will be TRUE. Otherwise, + * reply::valid will be FALSE. In either case, request::pInfoString[] + * will contain a description of what happened during mode validation. + * + * To query the full modepool, clients should repeatedly call + * NVKMS_IOCTL_VALIDATE_MODE_INDEX with increasing mode index values, + * until NVKMS reports end==TRUE. + * + * Note that the candidate mode list can change when the dpy changes + * (reported by the NVKMS_EVENT_TYPE_DPY_CHANGED event). The client + * should restart its modepool querying if it receives a DPY_CHANGED + * event. The candidate mode list can also change based on the + * parameters in request::modeValidation. Clients should not change + * request::modeValidation while looping over candidate mode indices. + * + * Pseudocode example usage pattern: + * + * struct NvKmsModeValidationParams modeValidation = Initialize(); + * + * retry: + * NvU32 modeIndex = 0; + * + * while (1) { + * char infoString[INFO_STRING_LENGTH]; + * memset(¶ms); + * params.request.dpyId = dpyId; + * params.request.modeIndex = modeIndex++; + * params.request.modeValidation = modeValidation; + * params.request.pInfoString = nvKmsPointerToNvU64(infoString); + * params.request.infoStringLength = sizeof(infoString); + * + * ioctl(¶ms); + * + * if (params.reply.end) break; + * + * print(infoString); + * + * if (params.reply.valid) { + * AddToModePool(params.reply.mode); + * } + * } + * + * if (dpyChanged) goto retry; + * + */ + +struct NvKmsValidateModeIndexRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + struct NvKmsModeValidationParams modeValidation; + NvU32 modeIndex; + + /* + * Pointer to a string of size 'infoStringSize'. + * Use nvKmsPointerToNvU64() to assign pInfoString. + * The maximum size allowed is + * NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH. + */ + NvU32 infoStringSize; + NvU64 pInfoString NV_ALIGN_BYTES(8); +}; + +struct NvKmsValidateModeIndexReply { + NvBool end; + NvBool valid; + + struct NvKmsMode mode; + + /*! The validSyncs used by NVKMS when validating the mode. */ + struct NvKmsModeValidationValidSyncs validSyncs; + + /*! Whether this mode is marked as "preferred" by the EDID. */ + NvBool preferredMode; + + /*! A text description of the mode. */ + char description[64]; + + /*! Where the mode came from. */ + enum NvKmsModeSource { + NvKmsModeSourceUnknown = 0, + NvKmsModeSourceEdid = 1, + NvKmsModeSourceVesa = 2, + } source; + + /* The number of bytes written to 'pInfoString' (from the request) */ + NvU32 infoStringLenWritten; + + /*! + * These are the usage bounds that may be possible with this mode, + * assuming that only one head is active. For actual usage bounds, + * see guaranteedUsage and possibleUsage returned in + * NvKmsSetModeOneHeadReply. + */ + struct NvKmsUsageBounds modeUsage; +}; + +struct NvKmsValidateModeIndexParams { + struct NvKmsValidateModeIndexRequest request; /*! in */ + struct NvKmsValidateModeIndexReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_VALIDATE_MODE: Validate an individual mode for the + * specified dpy. + * + * Given the validation parameters, NVKMS will test whether the given + * mode is currently valid for the specified dpy. + * + * If the mode is valid, then reply::valid will be TRUE. Otherwise, + * reply::valid will be FALSE. In either case, reply::infoString[] + * will contain a description of what happened during mode validation. + */ + +struct NvKmsValidateModeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + struct NvKmsModeValidationParams modeValidation; + struct NvKmsMode mode; + + /* + * Pointer to a string of size 'infoStringSize'. + * Use nvKmsPointerToNvU64() to assign pInfoString. + * The maximum size allowed is + * NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH. + */ + NvU32 infoStringSize; + NvU64 pInfoString NV_ALIGN_BYTES(8); +}; + +struct NvKmsValidateModeReply { + NvBool valid; + + /*! The validSyncs used by NVKMS when validating the mode. */ + struct NvKmsModeValidationValidSyncs validSyncs; + + /* The number of bytes written to 'pInfoString' (from the request) */ + NvU32 infoStringLenWritten; + + /*! + * These are the usage bounds that may be possible with this mode, + * assuming that only one head is active. For actual usage bounds, + * see guaranteedUsage and possibleUsage returned in + * NvKmsSetModeOneHeadReply. + */ + struct NvKmsUsageBounds modeUsage; +}; + +struct NvKmsValidateModeParams { + struct NvKmsValidateModeRequest request; /*! in */ + struct NvKmsValidateModeReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_MODE: Perform a modeset. + * + * NvKmsSetModeRequest can describe the modetiming configuration + * across all heads of all disps within the SLI device. + * + * The elements in NvKmsSetModeRequest::disp[] correspond to the disps + * returned in NvKmsAllocDeviceReply::dispHandles[]. + * + * To only touch certain heads and disps, use the + * requestedHeadsBitMask and requestedDispsBitMask fields to limit + * which array elements are honored. + * + * If the request is invalid, one or more of the + * NvKmsSetMode{,OneDisp,OneHead}Reply::status fields will have a + * non-SUCCESS value. If the mode set completed successfully, then + * all {NvKmsSetMode{,OneDisp,OneHead}Reply::status fields should be + * SUCCESS. + */ + +struct NvKmsSetModeHeadSurfaceParams { + NvBool forceCompositionPipeline; + NvBool forceFullCompositionPipeline; + NvBool fakeOverlay; + NvBool blendAfterWarp; + NvBool transformSpecified; + + /* Reflect the image along the X axis. */ + NvBool reflectionX; + + /* Reflect the image along the Y axis. */ + NvBool reflectionY; + + /* + * Rotate the image counter-clockwise in 90 degree increments. + * + * Reflection (specified above by ::reflection[XY]) is applied + * before rotation. This matches the semantics of RandR. From: + * + * https://cgit.freedesktop.org/xorg/proto/randrproto/tree/randrproto.txt + * + * "Rotation and reflection and how they interact can be confusing. In + * Randr, the coordinate system is rotated in a counter-clockwise direction + * relative to the normal orientation. Reflection is along the window system + * coordinate system, not the physical screen X and Y axis, so that rotation + * and reflection do not interact. The other way to consider reflection is + * to is specified in the 'normal' orientation, before rotation, if you find + * the other way confusing." + */ + enum NvKmsRotation rotation; + enum NvKmsPixelShiftMode pixelShift; + enum NvKmsResamplingMethod resamplingMethod; + struct NvKmsMatrix transform; /* Only honored if transformSpecified. */ + + NvKmsSurfaceHandle blendTexSurfaceHandle; + NvKmsSurfaceHandle offsetTexSurfaceHandle; + + /* + * When warpMesh::surfaceHandle is non-zero, it indicates a surface + * containing warp mesh vertex data. The surface should: + * + * - Have a width multiple of 1024 pixels. + * - Have a depth of 32. + * - Contain a binary representation of a list of six-component + * vertices. Each of these components is a 32-bit floating point value. + * + * The X, Y components should contain normalized vertex coordinates, to be + * rendered as a triangle list or strip. The X and Y components' [0,1] + * range map to the head's ViewportOut X and Y, respectively. + * + * The U, V, R, and Q components should contain normalized, projective + * texture coordinates: + * + * U, V: 2D texture coordinate. U and V components' [0,1] range maps to the + * display's MetaMode ViewportIn X and Y, respectively. + * + * R: unused + * + * Q: Used for interpolation purposes. This is typically the third + * component of the result of a multiplication by a 3x3 projective transform + * matrix. + * + * warpMesh::vertexCount should contain the amount of vertices stored in the + * surface. + * + * warpMesh::dataType indicates if the vertices describe a triangle list or + * a triangle strip. A triangle list must have a vertexCount that is a + * multiple of 3. + */ + struct { + NvKmsSurfaceHandle surfaceHandle; + NvU32 vertexCount; + enum NvKmsWarpMeshDataType dataType; + } warpMesh; +}; + +#define NVKMS_VRR_MIN_REFRESH_RATE_MAX_VARIANCE 10 // 10hz + +enum NvKmsAllowAdaptiveSync { + NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED = 0, + NVKMS_ALLOW_ADAPTIVE_SYNC_DEFAULTLISTED_ONLY, + NVKMS_ALLOW_ADAPTIVE_SYNC_ALL, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE attribute. */ +enum NvKmsDpyAttributeRequestedColorSpaceValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422 = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444 = 2, +}; + +/*! + * Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_RANGE and + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE attributes. + */ +enum NvKmsDpyAttributeColorRangeValue { + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL = 0, + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED = 1, +}; + +struct NvKmsSetModeOneHeadRequest { + /*! + * The list of dpys to drive with this head; or, empty to disable + * the head. + */ + NVDpyIdList dpyIdList; + + /*! The modetimings to set on the head. */ + struct NvKmsMode mode; + + /*! The above mode will be validated, using these validation parameters. */ + struct NvKmsModeValidationParams modeValidationParams; + + /*! + * The region within the raster timings that should contain an image. + * This is only used when viewPortOutSpecified is TRUE. Otherwise, the + * viewPortOut is inferred from the raster timings. + * + * For YUV420 modes, the viewPortOut should be in "full" + * horizontal space. See the explanation in NvKmsMode. + */ + struct NvKmsRect viewPortOut; + + /*! + * The size, in pixels, that the head will fetch from any surface + * it scans from. The viewPortPointIn is specified in + * NvKmsFlipCommonParams. + * + * For YUV420 modes, the viewPortSizeIn should be in "half" + * horizontal space. See the explanation in NvKmsMode. + */ + struct NvKmsSize viewPortSizeIn; + + /*! + * Describe the LUT to be used with the modeset. + */ + struct NvKmsSetLutCommonParams lut; + + /*! + * Describe the surfaces to present on this head. + */ + struct NvKmsFlipCommonParams flip; + + /*! + * The headSurface configuration requested, if any. + */ + struct NvKmsSetModeHeadSurfaceParams headSurface; + + NvBool viewPortOutSpecified; /*! Whether to use viewPortOut. */ + + /*! + * Allow this head to be flipLocked to any other heads, set as + * part of this NVKMS_IOCTL_SET_MODE, who also have allowFlipLock + * set. FlipLock will only be enabled if additional criteria, + * such as identical modetimings, are also met. + */ + NvBool allowFlipLock; + + /*! + * Allow G-SYNC to be enabled on this head if it is supported by the GPU + * and monitor. + */ + NvBool allowGsync; + + /*! + * Whether to allow Adaptive-Sync to be enabled on this head if it is + * supported by the GPU: + * + * NVKMS_ALLOW_ADAPTIVE_SYNC_ALL: + * VRR is enabled as long as this monitor supports Adaptive-Sync. + * + * NVKMS_ALLOW_ADAPTIVE_SYNC_DEFAULTLISTED_ONLY: + * VRR is only enabled on this head if the monitor is on the + * Adaptive-Sync defaultlist. + * + * NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED: + * VRR is forced to be disabled if this is an Adaptive-Sync monitor. + */ + enum NvKmsAllowAdaptiveSync allowAdaptiveSync; + + /*! + * Override the minimum refresh rate for VRR monitors specified by the + * EDID (0 to not override the EDID-provided value). Clamped at modeset + * time to within NVKMS_VRR_MIN_REFRESH_RATE_MAX_VARIANCE of the + * EDID-specified minimum refresh rate, as long as the minimum is no + * lower than 1hz and the maximum does not exceed the maximum refresh rate + * defined by the mode timings. The current minimum refresh rate and this + * valid range are exposed through + * NV_KMS_DPY_ATTRIBUTE_VRR_MIN_REFRESH_RATE. + * + * Does not affect G-SYNC monitors, which do not have a minimum refresh + * rate. + */ + NvU32 vrrOverrideMinRefreshRate; + + /*! + * Output colorspace. Valid only when colorSpaceSpecified is true. + */ + enum NvKmsDpyAttributeRequestedColorSpaceValue colorSpace; + NvBool colorSpaceSpecified; + + /*! + * Output color range. Valid only when colorRangeSpecified is true. + */ + enum NvKmsDpyAttributeColorRangeValue colorRange; + NvBool colorRangeSpecified; +}; + +struct NvKmsSetModeOneDispRequest { + /*! + * The bit mask of which head[] elements to look at on this disp; + * any other head will use its existing configuration. + */ + NvU32 requestedHeadsBitMask; + struct NvKmsSetModeOneHeadRequest head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsSetModeRequest { + NvKmsDeviceHandle deviceHandle; + + /*! + * When a modeset request is made, NVKMS will first perform + * validation to confirm whether the request can be satisfied. If + * the requested configuration cannot be fulfilled, the request + * returns FALSE. + * + * Only the modeset owner can issue a modeset with commit set to TRUE. + * + * If 'commit' is FALSE, then the status of validation will be returned. + * + * If 'commit' is TRUE, and validation passes, then NVKMS will + * apply the requested configuration. + */ + NvBool commit; + + /*! + * The bitmask of which indices within disp[] describe requested + * configuration changes. Any other disps will use their existing + * configuration. + */ + NvU32 requestedDispsBitMask; + + /* + * disp[n] corresponds to the disp named by + * NvKmsAllocDeviceReply::dispHandles[n]. + */ + struct NvKmsSetModeOneDispRequest disp[NVKMS_MAX_SUBDEVICES]; + + /*! + * Whether to use NVKMS's builtin headSurface support when necessary. + * + * XXX NVKMS HEADSURFACE TODO: Make this the default and remove this field. + */ + NvBool allowHeadSurfaceInNvKms; +}; + +enum NvKmsSetModeOneHeadStatus { + NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS = 0, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE = 1, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY = 2, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_CURSOR_IMAGE = 3, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_CURSOR_POSITION = 4, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_LUT = 5, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP = 6, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_PERMISSIONS = 7, + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_HEAD_SURFACE = 8, + NVKMS_SET_MODE_ONE_HEAD_STATUS_UNSUPPORTED_HEAD_SURFACE_COMBO = 9, + NVKMS_SET_MODE_ONE_HEAD_STATUS_UNSUPPORTED_HEAD_SURFACE_FEATURE = 10, +}; + +struct NvKmsSetModeOneHeadReply { + /*! + * When the NVKMS_IOCTL_SET_MODE succeeds, then this will be SUCCESS. + * Otherwise, 'status' will be a non-SUCCESS value for one or more + * heads and/or one or more disps. + * + * Note that a failure could occur for a preexisting head + * configuration, so this status could be != SUCCESS for a head + * not listed in NvKmsSetModeOneDispRequest::requestedHeadsBitMask. + */ + enum NvKmsSetModeOneHeadStatus status; + + /*! + * The identifier that we use to talk to RM about the display + * device(s) driven by this head. For DP MST, it is the identifier + * of the DisplayPort library group to which the MST device belongs. + * Otherwise, it is the identifier of the connector. + */ + NvU32 activeRmId; + + /*! + * The usage bounds that may be possible on this head based on the ISO + * BW at that point. + * + * If a flip request is within the bounds of NvKmsSetModeOneHeadReply:: + * guaranteedUsage, then clients can expect the flip to succeed. + * If a flip request is beyond the bounds of NvKmsSetModeOneHeadReply:: + * guaranteedUsage but within NvKmsSetModeOneHeadReply::possibleUsage, + * then the request may legitimately fail due to insufficient display + * bandwidth and clients need to be prepared to handle that flip + * request failure. + */ + struct NvKmsUsageBounds possibleUsage; + + /*! + * The guaranteed usage bounds usable on this head. + */ + struct NvKmsUsageBounds guaranteedUsage; + + /*! + * Whether NVKMS chose to use headSurface on this head. + */ + NvBool usingHeadSurface; + + /*! + * Whether NVKMS enabled VRR on this head. + */ + NvBool vrrEnabled; + + /*! + * Contains the 'postSyncObject' that the client requested via + * NvKmsSetModeOneHeadRequest::flip. + */ + struct NvKmsFlipCommonReplyOneHead flipReply; +}; + +enum NvKmsSetModeOneDispStatus { + NVKMS_SET_MODE_ONE_DISP_STATUS_SUCCESS = 0, + NVKMS_SET_MODE_ONE_DISP_STATUS_INVALID_REQUESTED_HEADS_BITMASK = 1, + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_EXTENDED_GPU_CAPABILITIES_CHECK = 2, + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_DISPLAY_PORT_BANDWIDTH_CHECK = 3, + NVKMS_SET_MODE_ONE_DISP_STATUS_INCOMPATIBLE_DPYS = 4, + NVKMS_SET_MODE_ONE_DISP_STATUS_DUPLICATE_DPYS = 5, +}; + +struct NvKmsSetModeOneDispReply { + /*! + * When the NVKMS_IOCTL_SET_MODE succeeds, then this will be SUCCESS. + * Otherwise, 'status' will be a non-SUCCESS value for one or more + * heads and/or one or more disps. + * + * Note that a failure could occur for a preexisting disp + * configuration, so this status could be != SUCCESS for a disp + * not listed in NvKmsSetModeRequest::requestedDispsBitMask. + */ + enum NvKmsSetModeOneDispStatus status; + struct NvKmsSetModeOneHeadReply head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +enum NvKmsSetModeStatus { + NVKMS_SET_MODE_STATUS_SUCCESS = 0, + NVKMS_SET_MODE_STATUS_INVALID_REQUESTED_DISPS_BITMASK = 1, + NVKMS_SET_MODE_STATUS_NOT_MODESET_OWNER = 2, +}; + +struct NvKmsSetModeReply { + enum NvKmsSetModeStatus status; + struct NvKmsSetModeOneDispReply disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsSetModeParams { + struct NvKmsSetModeRequest request; /*! in */ + struct NvKmsSetModeReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_CURSOR_IMAGE: Set the cursor image for the + * specified head. + */ + +struct NvKmsSetCursorImageRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + struct NvKmsSetCursorImageCommonParams common; +}; + +struct NvKmsSetCursorImageReply { + NvU32 padding; +}; + +struct NvKmsSetCursorImageParams { + struct NvKmsSetCursorImageRequest request; /*! in */ + struct NvKmsSetCursorImageReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_MOVE_CURSOR: Set the cursor position for the specified + * head. + * + * x,y are relative to the current viewPortIn configured on the head. + */ + +struct NvKmsMoveCursorRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + struct NvKmsMoveCursorCommonParams common; +}; + +struct NvKmsMoveCursorReply { + NvU32 padding; +}; + +struct NvKmsMoveCursorParams { + struct NvKmsMoveCursorRequest request; /*! in */ + struct NvKmsMoveCursorReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_LUT: Set the LUT contents for the specified head. + */ + +struct NvKmsSetLutRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + struct NvKmsSetLutCommonParams common; +}; + +struct NvKmsSetLutReply { + NvU32 padding; +}; + +struct NvKmsSetLutParams { + struct NvKmsSetLutRequest request; /*! in */ + struct NvKmsSetLutReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_IDLE_BASE_CHANNEL: Wait for the base channel to be idle on + * the requested heads on the requested subdevices of a device. + * + * Each (head,sd) pair to be idled is described by: + * + * subDevicesPerHead[head] |= NVBIT(sd) + */ + +struct NvKmsIdleBaseChannelRequest { + NvKmsDeviceHandle deviceHandle; + NvU32 subDevicesPerHead[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsIdleBaseChannelReply { + /*! + * If stopping the base channel is necessary due to a timeout, (head,sd) + * pairs will be described with: + * + * stopSubDevicesPerHead[head] |= NVBIT(sd) + * + * indicating that semaphore releases from the stalled channels may not have + * occurred. + */ + NvU32 stopSubDevicesPerHead[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsIdleBaseChannelParams { + struct NvKmsIdleBaseChannelRequest request; /*! in */ + struct NvKmsIdleBaseChannelReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_FLIP: Flip one or more heads on the subdevices of a device. + * + * At least one head on one subdevice must be specified in a flip request. + */ + +struct NvKmsFlipRequestOneSubDevice { + /*! + * The bit mask of which head[] elements to look at on this disp. + */ + NvU32 requestedHeadsBitMask; + struct NvKmsFlipCommonParams head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsFlipRequest { + NvKmsDeviceHandle deviceHandle; + + /* + * sd[n] corresponds to bit N in NvKmsQueryDispReply::subDeviceMask and + * NvKmsAllocDeviceReply::subDeviceMask. + */ + struct NvKmsFlipRequestOneSubDevice sd[NVKMS_MAX_SUBDEVICES]; + + /*! + * When a flip request is made, NVKMS will first perform + * validation to confirm whether the request can be satisfied. If + * the requested configuration cannot be fulfilled, the request + * returns FALSE. + * + * If 'commit' is FALSE, then the status of validation will be returned. + * + * If 'commit' is TRUE, and validation passes, then NVKMS will + * apply the requested configuration. + */ + NvBool commit; + + /*! + * When set, indicates that the client is capable of releasing the VRR + * semaphore to indicate when the flip is ready. Setting this to FALSE + * disables VRR. + */ + NvBool allowVrr; +}; + +enum NvKmsVrrFlipType { + NV_KMS_VRR_FLIP_NON_VRR = 0, + NV_KMS_VRR_FLIP_GSYNC, + NV_KMS_VRR_FLIP_ADAPTIVE_SYNC, +}; + +struct NvKmsFlipReplyOneSubDevice { + struct NvKmsFlipCommonReplyOneHead head[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsFlipReply { + /*! + * If vrrFlipType != NV_KMS_VRR_FLIP_NON_VRR, then VRR was used for the + * requested flip. In this case, vrrSemaphoreIndex indicates the index + * into the VRR semaphore surface that the client should release to + * trigger the flip. + * + * A value of -1 indicates that no VRR semaphore release is needed. + */ + NvS32 vrrSemaphoreIndex; + + /*! + * Indicates whether the flip was non-VRR, was a VRR flip on one or more + * G-SYNC displays, or was a VRR flip exclusively on Adaptive-Sync + * displays. + */ + enum NvKmsVrrFlipType vrrFlipType; + + /*! + * sd[n] corresponds to bit N in NvKmsQueryDispReply::subDeviceMask and + * NvKmsAllocDeviceReply::subDeviceMask. + */ + struct NvKmsFlipReplyOneSubDevice sd[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsFlipParams { + struct NvKmsFlipRequest request; /*! in */ + struct NvKmsFlipReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST: "Dynamic dpy" reference + * counting. + * + * Most dpys have a lifetime equal to the NVKMS device. However, some + * dpys are dynamic and are created and destroyed in response to + * getting connected or disconnected. DisplayPort MST dpys are dynamic dpys. + * + * When a dynamic dpy is disconnected, its NVDpyId will be freed and + * made available for use by dynamic dpys connected later, unless any + * client has declared "interest" in the NVDpyId. The dynamic NVDpyId + * will persist as long as a client has declared interest on it, and + * will be reused for newly connected monitors at the same dynamic dpy + * address (port address, in the case of DP MST dynamic dpys). + * + * The 'interest' field selects interest in the dynamic dpy. + * + * If the dynamic dpy has already been disconnected (and therefore + * removed) before the client has declared interest in it, this ioctl + * will fail. + * + * The recommended usage pattern is: + * + * - Declare interest in the event types: + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED + * + * - When a DYNAMIC_DPY_CONNECTED event is received, call + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST + * to declare interest on the dpy. Be sure to check the return + * value, in case the dynamic dpy was already removed. Update any + * client bookkeeping, to start tracking the dpy. + * + * - When a DYNAMIC_DPY_DISCONNECTED event is received, update any + * client bookkeeping, to stop tracking this dynamic dpy. Call + * NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST + * to remove interest on the dpy. + */ + +struct NvKmsDeclareDynamicDpyInterestRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + NvBool interest; +}; + +struct NvKmsDeclareDynamicDpyInterestReply { + NvU32 padding; +}; + +struct NvKmsDeclareDynamicDpyInterestParams { + struct NvKmsDeclareDynamicDpyInterestRequest request; /*! in */ + struct NvKmsDeclareDynamicDpyInterestReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_{,UN}REGISTER_SURFACE: Register and unregister an + * RM-allocated surface with NVKMS. + * + * A surface must be registered with NVKMS before NVKMS can display + * it. Note that NVKMS will create its own RM object for the registered + * surface. The surface will not be freed by resman until the surface + * is unregistered by the client. + */ + +struct NvKmsRegisterSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + + /*! + * Surfaces can be specified either by file descriptor or by + * (rmClient, rmObject) tuple. useFd indicates which is specified + * in this request. Userspace clients are required to specify surface by + * file descriptor. + */ + NvBool useFd; + + /*! + * The RM client handle that was used to allocate the surface. + * NVKMS will use this as the hClientSrc argument to + * NvRmDupObject(). Only used when useFd is FALSE. + */ + NvU32 rmClient; + + /* + * For multi-plane formats, clients are free to use one memory allocation + * for all planes, or a separate memory allocation per plane: + * - For the first usecase, 'rmObject'/'fd' and 'rmObjectSizeInBytes' + * should be the same for all planes, and each plane should have a + * different 'offset'. + * - For the second usecase, 'rmObject'/'fd' should be different for each + * plane. + * + * The 'planes' array is indexed as follows: + * - For RGB and YUV packed formats, 'planes[0]' refers to the single plane + * that's used for these formats. + * - For YUV semi-planar formats, 'planes[0]' refers to the Y-plane and + * 'planes[1]' refers to the UV-plane. + * - For YUV planar formats, 'planes[0]' refers to the Y-plane, 'planes[1]' + * refers to the U plane, and 'planes[2]' refers to the V plane. + */ + struct { + + union { + NvU32 rmObject; /* RM memory handle */ + NvS32 fd; /* file descriptor describing memory */ + } u; + + /* + * This byte offset will be added to the base address of the RM memory + * allocation, and determines the starting address of this plane within + * that allocation. This offset must be 1KB-aligned. + */ + NvU64 offset NV_ALIGN_BYTES(8); + + /* + * If the surface layout is NvKmsSurfaceMemoryLayoutPitch, then + * 'pitch' should be the pitch of this plane in bytes, and must + * have an alignment of 256 bytes. If the surface layout is + * NvKmsSurfaceMemoryLayoutBlockLinear, then 'pitch' should be the + * pitch of this plane in _blocks_. Blocks are always 64 bytes + * wide. + */ + NvU32 pitch; + + /* + * This is the size of the entire RM memory allocation pointed to by + * rmObject or fd prior to taking the offset into account. This is + * _not_ always the size of this plane since a single RM memory + * allocation can contain multiple planes, and we're also not taking + * the offset into account. + */ + NvU64 rmObjectSizeInBytes NV_ALIGN_BYTES(8); + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + NvU32 widthInPixels; + NvU32 heightInPixels; + + enum NvKmsSurfaceMemoryLayout layout; + enum NvKmsSurfaceMemoryFormat format; + + NvBool noDisplayHardwareAccess; + + /* + * If isoType == NVKMS_MEMORY_NISO, NVKMS will create CPU and GPU mappings + * for the surface memory. + */ + NvKmsMemoryIsoType isoType; + + NvU32 log2GobsPerBlockY; +}; + +struct NvKmsRegisterSurfaceReply { + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsRegisterSurfaceParams { + struct NvKmsRegisterSurfaceRequest request; /*! in */ + struct NvKmsRegisterSurfaceReply reply; /*! out */ +}; + +struct NvKmsUnregisterSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsUnregisterSurfaceReply { + NvU32 padding; +}; + +struct NvKmsUnregisterSurfaceParams { + struct NvKmsUnregisterSurfaceRequest request; /*! in */ + struct NvKmsUnregisterSurfaceReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRANT_SURFACE: + * NVKMS_IOCTL_ACQUIRE_SURFACE: + * NVKMS_IOCTL_RELEASE_SURFACE: + * + * An NVKMS client can "grant" a registered surface to another NVKMS + * client through the following steps: + * + * - The granting NVKMS client should open /dev/nvidia-modeset, and + * call NVKMS_IOCTL_GRANT_SURFACE to associate an NvKmsSurfaceHandle + * with the file descriptor. + * + * - The granting NVKMS client should pass the file descriptor over a + * UNIX domain socket to one or more clients who should acquire the + * surface. + * + * - The granting NVKMS client can optionally close the file + * descriptor now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_ACQUIRE_SURFACE, + * and pass in the file descriptor it received. This returns an + * NvKmsSurfaceHandle that the acquiring client can use to refer to + * the surface in any other NVKMS API call that takes an + * NvKmsSurfaceHandle. + * + * - The acquiring clients can optionally close the file descriptor + * now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_RELEASE_SURFACE to + * release it when they are done with the surface. + * + * - When the granting client unregisters the surface, it is + * "orphaned": NVKMS will flip away from the surface if necessary, + * the RM surface allocation is unduped, and the surface is + * unregistered from EVO. But, the acquiring clients will continue + * to hold a reference to this orphaned surface until they release + * it. + * + * Notes: + * + * - It is an error to call NVKMS_IOCTL_GRANT_SURFACE more than once + * on a /dev/nvidia-modeset file descriptor, or to use a file + * descriptor other than one created by opening /dev/nvidia-modeset, + * or to use a file descriptor that was previously used as the first + * argument to ioctl(2). + * + * - The special handling of surfaces when the granting client + * unregisters the surface might be a little asymmetric. However, + * this strikes a balance between: + * + * (a) Making sure modesetting NVKMS clients can free memory when + * they intend to. + * + * (b) Making sure acquiring clients don't get a stale view of their + * surface handle namespace: if the surface were completely + * unregistered out from under them, the surface handle could be + * recycled without them knowing. If they later attempted to + * release the original surface, they could inadvertently release a + * different surface that happened to have the recycled handle. + * + * - Do we need an NVKMS_IOCTL_REVOKE_SURFACE? Or is the + * automatic-unregistration-in-acquiring-clients behavior + * sufficient? + */ + +struct NvKmsGrantSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; + int fd; +}; + +struct NvKmsGrantSurfaceReply { + NvU32 padding; +}; + +struct NvKmsGrantSurfaceParams { + struct NvKmsGrantSurfaceRequest request; /*! in */ + struct NvKmsGrantSurfaceReply reply; /*! out */ +}; + +struct NvKmsAcquireSurfaceRequest { + int fd; +}; + +struct NvKmsAcquireSurfaceReply { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsAcquireSurfaceParams { + struct NvKmsAcquireSurfaceRequest request; /*! in */ + struct NvKmsAcquireSurfaceReply reply; /*! out */ +}; + +struct NvKmsReleaseSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsReleaseSurfaceReply { + NvU32 padding; +}; + +struct NvKmsReleaseSurfaceParams { + struct NvKmsReleaseSurfaceRequest request; /*! in */ + struct NvKmsReleaseSurfaceReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_DPY_ATTRIBUTE: + * NVKMS_IOCTL_GET_DPY_ATTRIBUTE: + * NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES: + * + * Dpys have several attributes that can be queried and set. + * + * An attribute has a type (defined by NvKmsAttributeType), read/write + * permissions, and potentially other descriptions of its valid + * values. Use NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES to get the + * valid values of an attribute. + */ + +enum NvKmsAttributeType { + NV_KMS_ATTRIBUTE_TYPE_INTEGER = 0, + NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + NV_KMS_ATTRIBUTE_TYPE_INTBITS, + NV_KMS_ATTRIBUTE_TYPE_RANGE, + NV_KMS_ATTRIBUTE_TYPE_BITMASK, + NV_KMS_ATTRIBUTE_TYPE_DPY_ID, + NV_KMS_ATTRIBUTE_TYPE_DPY_ID_LIST, +}; + +enum NvKmsDpyAttribute { + NV_KMS_DPY_ATTRIBUTE_BACKLIGHT_BRIGHTNESS = 0, + NV_KMS_DPY_ATTRIBUTE_SCANLINE, + NV_KMS_DPY_ATTRIBUTE_HEAD, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_VIBRANCE, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_AVAILABLE, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_DEFAULT, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_RANGE, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG, + /* + * XXX NVKMS TODO: Delete UPDATE_GLS_FRAMELOCK; this event-only + * attribute is a kludge to tell GLS about a change in framelock + * configuration made by NVKMS. Eventually, NVKMS should manage + * framelock itself and GLS shouldn't need to be notified. + * + * Note that the event data reports two boolean values: enable + * (bit 0) and server (bit 1). + */ + NV_KMS_DPY_ATTRIBUTE_UPDATE_GLS_FRAMELOCK, + NV_KMS_DPY_ATTRIBUTE_RASTER_LOCK, + NV_KMS_DPY_ATTRIBUTE_UPDATE_FLIPLOCK, + NV_KMS_DPY_ATTRIBUTE_UPDATE_STEREO, + NV_KMS_DPY_ATTRIBUTE_DPMS, + NV_KMS_DPY_ATTRIBUTE_VRR_MIN_REFRESH_RATE, + + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_IS_MULTISTREAM, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_SINK_IS_AUDIO_CAPABLE, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING attribute. */ +enum NvKmsDpyAttributeRequestedDitheringValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE attribute. */ +enum NvKmsDpyAttributeRequestedDitheringModeValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2 = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2 = 2, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE attribute. */ +enum NvKmsDpyAttributeCurrentDitheringModeValue { + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE = 0, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2 = 1, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2 = 2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH attribute. */ +enum NvKmsDpyAttributeRequestedDitheringDepthValue { + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO = 0, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS = 1, + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH attribute. */ +enum NvKmsDpyAttributeCurrentDitheringDepthValue { + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE = 0, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS = 1, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE attribute. */ +enum NvKmsDpyAttributeCurrentColorSpaceValue { + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB = 0, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 = 1, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444 = 2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL attribute. */ +enum NvKmsDpyAttributeDigitalSignalValue { + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_LVDS = 0, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_TMDS = 1, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DISPLAYPORT = 2, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_HDMI_FRL = 3, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DSI = 4, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE attribute. */ +enum NvKmsDpyAttributeDigitalLinkTypeValue { + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_SINGLE = 0, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_DUAL = 1, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_QUAD = 3, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG attribute. */ +enum NvKmsDpyAttributeFrameLockDisplayConfigValue { + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED = 0, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT = 1, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_SERVER = 2, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DPMS attribute. */ +enum NvKmsDpyAttributeDpmsValue { + NV_KMS_DPY_ATTRIBUTE_DPMS_ON, + NV_KMS_DPY_ATTRIBUTE_DPMS_STANDBY, + NV_KMS_DPY_ATTRIBUTE_DPMS_SUSPEND, + NV_KMS_DPY_ATTRIBUTE_DPMS_OFF, +}; + +/*! Values for the NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE attribute */ +enum NvKmsDpyAttributeDisplayportConnectorTypeValue { + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN = 0, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DISPLAYPORT, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_HDMI, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DVI, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_VGA, +}; + +struct NvKmsSetDpyAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetDpyAttributeReply { + NvU32 padding; +}; + +struct NvKmsSetDpyAttributeParams { + struct NvKmsSetDpyAttributeRequest request; /*! in */ + struct NvKmsSetDpyAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetDpyAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; +}; + +struct NvKmsGetDpyAttributeReply { + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsGetDpyAttributeParams { + struct NvKmsGetDpyAttributeRequest request; /*! in */ + struct NvKmsGetDpyAttributeReply reply; /*! out */ +}; + + +struct NvKmsAttributeValidValuesCommonReply { + NvBool readable; + NvBool writable; + enum NvKmsAttributeType type; + union { + struct { + NvS64 min NV_ALIGN_BYTES(8); + NvS64 max NV_ALIGN_BYTES(8); + } range; /*! Used when type == NV_KMS_ATTRIBUTE_TYPE_RANGE. */ + struct { + NvU32 ints; + } bits; /*! Used when type == NV_KMS_ATTRIBUTE_TYPE_INTBITS. */ + } u; +}; + +struct NvKmsGetDpyAttributeValidValuesRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; +}; + +struct NvKmsGetDpyAttributeValidValuesReply { + struct NvKmsAttributeValidValuesCommonReply common; +}; + + +struct NvKmsGetDpyAttributeValidValuesParams { + struct NvKmsGetDpyAttributeValidValuesRequest request; /*! in */ + struct NvKmsGetDpyAttributeValidValuesReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_DISP_ATTRIBUTE: + * NVKMS_IOCTL_GET_DISP_ATTRIBUTE: + * NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES: + */ + + +enum NvKmsDispAttribute { + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK = 0, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SYNC, + NV_KMS_DISP_ATTRIBUTE_GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TEST_SIGNAL, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_RESET, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SET_SWAP_BARRIER, + NV_KMS_DISP_ATTRIBUTE_ALLOW_FLIPLOCK, + NV_KMS_DISP_ATTRIBUTE_QUERY_DP_AUX_LOG, +}; + + +struct NvKmsSetDispAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + enum NvKmsDispAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetDispAttributeReply { + NvU32 padding; +}; + +struct NvKmsSetDispAttributeParams { + struct NvKmsSetDispAttributeRequest request; /*! in */ + struct NvKmsSetDispAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetDispAttributeRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + enum NvKmsDispAttribute attribute; +}; + +struct NvKmsGetDispAttributeReply { + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsGetDispAttributeParams { + struct NvKmsGetDispAttributeRequest request; /*! in */ + struct NvKmsGetDispAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetDispAttributeValidValuesRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + enum NvKmsDispAttribute attribute; +}; + +struct NvKmsGetDispAttributeValidValuesReply { + struct NvKmsAttributeValidValuesCommonReply common; +}; + +struct NvKmsGetDispAttributeValidValuesParams { + struct NvKmsGetDispAttributeValidValuesRequest request; /*! in */ + struct NvKmsGetDispAttributeValidValuesReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_FRAMELOCK: Query information about a framelock + * device. + */ + +struct NvKmsQueryFrameLockRequest { + NvKmsFrameLockHandle frameLockHandle; +}; + +struct NvKmsQueryFrameLockReply { + NvU32 gpuIds[NVKMS_MAX_GPUS_PER_FRAMELOCK]; +}; + +struct NvKmsQueryFrameLockParams { + struct NvKmsQueryFrameLockRequest request; /*! in */ + struct NvKmsQueryFrameLockReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE: + * NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE: + * NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES: + */ + +enum NvKmsFrameLockAttribute { + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_INTERVAL, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE, + NV_KMS_FRAMELOCK_ATTRIBUTE_FPGA_REVISION, + NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MAJOR_VERSION, + NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MINOR_VERSION, + NV_KMS_FRAMELOCK_ATTRIBUTE_BOARD_ID, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY_RESOLUTION, + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS, + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS, + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4, + NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY attribute. */ +enum NvKmsFrameLockAttributePolarityValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE = 0x1, + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE = 0x2, + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_BOTH_EDGES = 0x3, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE attribute. */ +enum NvKmsFrameLockAttributeHouseSyncModeValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_DISABLED = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT = 0x1, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_OUTPUT = 0x2, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED attribute. */ +enum NvKmsFrameLockAttributeEthernetDetectedValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_NONE = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT0 = 0x1, + NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT1 = 0x2, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE attribute. */ +enum NvKmsFrameLockAttributeVideoModeValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_TTL = 1, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_BI_LEVEL = 2, + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_TRI_LEVEL = 3, +}; + +/*! Values for the NV_KMS_FRAMELOCK_ATTRIBUTE_PORT[01]_STATUS attributes. */ +enum NvKmsFrameLockAttributePortStatusValue { + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_INPUT = 0, + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_OUTPUT = 1, +}; + +struct NvKmsSetFrameLockAttributeRequest { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetFrameLockAttributeReply { + NvU32 padding; +}; + +struct NvKmsSetFrameLockAttributeParams { + struct NvKmsSetFrameLockAttributeRequest request; /*! in */ + struct NvKmsSetFrameLockAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetFrameLockAttributeRequest { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; +}; + +struct NvKmsGetFrameLockAttributeReply { + NvS64 value NV_ALIGN_BYTES(8); +}; + +struct NvKmsGetFrameLockAttributeParams { + struct NvKmsGetFrameLockAttributeRequest request; /*! in */ + struct NvKmsGetFrameLockAttributeReply reply; /*! out */ +}; + + +struct NvKmsGetFrameLockAttributeValidValuesRequest { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; +}; + +struct NvKmsGetFrameLockAttributeValidValuesReply { + struct NvKmsAttributeValidValuesCommonReply common; +}; + +struct NvKmsGetFrameLockAttributeValidValuesParams { + struct NvKmsGetFrameLockAttributeValidValuesRequest request; /*! in */ + struct NvKmsGetFrameLockAttributeValidValuesReply reply; /*! out */ +}; + + + +/*! + * NVKMS_IOCTL_GET_NEXT_EVENT, NVKMS_IOCTL_DECLARE_EVENT_INTEREST: + * Event handling. + * + * Clients should call NVKMS_IOCTL_DECLARE_EVENT_INTEREST to indicate + * the events in which they are interested. Then, block on poll(2) or + * select(2) until there are events available to read on the file + * descriptor. + * + * When events are available, the client should call + * NVKMS_IOCTL_GET_NEXT_EVENT to get an NvKmsEvent structure, and + * interpret the union based on eventType. + * + * Clients can remove interest for events by calling + * NVKMS_IOCTL_DECLARE_EVENT_INTEREST again, specifying a new + * interestMask. + * + * Note that there may still be events queued for the client when the + * client calls NVKMS_IOCTL_DECLARE_EVENT_INTEREST to change its + * interestMask. So, clients should be prepared to ignore unexpected + * events after calling NVKMS_IOCTL_DECLARE_EVENT_INTEREST. + */ + + + +/*! + * NVKMS_EVENT_TYPE_DPY_CHANGED + * + * When a dpy changes, this event will be generated. The client + * should call NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA to get an updated + * NvKmsQueryDpyDynamicDataReply. + */ + +struct NvKmsEventDpyChanged { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + + +/*! + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED + * + * When a dynamic dpy is connected, this event will be generated. + */ + +struct NvKmsEventDynamicDpyConnected { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + + +/*! + * NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED + * + * When a dynamic dpy is disconnected, this event will be generated. + */ + +struct NvKmsEventDynamicDpyDisconnected { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + + +/*! + * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED + * + * When a dpy attribute changes, this event will be generated. + */ + +struct NvKmsEventDpyAttributeChanged { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsDpyAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + + +/*! + * NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED + * + * When a framelock attribute changes, this event will be generated. + */ + +struct NvKmsEventFrameLockAttributeChanged { + NvKmsFrameLockHandle frameLockHandle; + enum NvKmsFrameLockAttribute attribute; + NvS64 value NV_ALIGN_BYTES(8); +}; + + +/*! + * NVKMS_EVENT_TYPE_FLIP_OCCURRED + * + * When a client requests a flip and specifies a completion notifier + * with NvKmsCompletionNotifierDescription::awaken == TRUE, this event + * will be generated. This event is only delivered to clients with + * flipping permission. + */ + +struct NvKmsEventFlipOccurred { + NvKmsDeviceHandle deviceHandle; + /* XXX NVKMS TODO: the dispHandle is currently hard-coded to 0. */ + NvKmsDispHandle dispHandle; + NvU32 head; + NvU32 layer; +}; + + +struct NvKmsEvent { + enum NvKmsEventType eventType; + union { + struct NvKmsEventDpyChanged dpyChanged; + struct NvKmsEventDynamicDpyConnected dynamicDpyConnected; + struct NvKmsEventDynamicDpyDisconnected dynamicDpyDisconnected; + struct NvKmsEventDpyAttributeChanged dpyAttributeChanged; + struct NvKmsEventFrameLockAttributeChanged frameLockAttributeChanged; + struct NvKmsEventFlipOccurred flipOccurred; + } u; +}; + + +struct NvKmsGetNextEventRequest { + NvU32 padding; +}; + +struct NvKmsGetNextEventReply { + /*! + * If an event is available, valid = TRUE and the NvKmsEvent + * contains the event. If no event is available, valid = FALSE. + */ + NvBool valid; + struct NvKmsEvent event; +}; + +struct NvKmsGetNextEventParams { + struct NvKmsGetNextEventRequest request; /*! in */ + struct NvKmsGetNextEventReply reply; /*! out */ +}; + + +struct NvKmsDeclareEventInterestRequest { + /*! + * Mask of event types, where each event type is indicated by (1 + * << NVKMS_EVENT_TYPE_). + */ + NvU32 interestMask; +}; + +struct NvKmsDeclareEventInterestReply { + NvU32 padding; +}; + +struct NvKmsDeclareEventInterestParams { + struct NvKmsDeclareEventInterestRequest request; /*! in */ + struct NvKmsDeclareEventInterestReply reply; /*! out */ +}; + +/*! + * NVKMS_IOCTL_CLEAR_UNICAST_EVENT + * + * The events generated through NVKMS_IOCTL_DECLARE_EVENT_INTEREST and + * NVKMS_IOCTL_GET_NEXT_EVENT are most useful for system-wide events which + * multiple clients may be interested in. Clients declare their interest in a + * collection of event types, and when they are notified that some number of + * events arrived, they have to query the events from the event queue. + * + * In contrast, "Unicast Events" are for use in cases where a client is only + * interested in a particular type of event on a particular object. + * + * To use a Unicast Event: + * + * - Create an fd through nvKmsOpen(). + * + * - Do _not_ use the fd for anything else (the first argument to ioctl(2), the + * fd in any of the granting APIs such as NvKmsGrantSurfaceParams::request:fd, + * etc). + * + * - Pass the fd into an API that allows a unicast event. E.g., + * NvKmsJoinSwapGroupParams::request::member::unicastEvent::fd + * + * - Clear the unicast event with NVKMS_IOCTL_CLEAR_UNICAST_EVENT. + * + * - Check if the event arrived; if it hasn't, then wait for the event through + * poll(2) or select(2). + */ + +struct NvKmsClearUnicastEventRequest { + int unicastEventFd; +}; + +struct NvKmsClearUnicastEventReply { + NvU32 padding; +}; + +struct NvKmsClearUnicastEventParams { + struct NvKmsClearUnicastEventRequest request; /*! in */ + struct NvKmsClearUnicastEventReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_LAYER_POSITION: Set the position of the layer + * for the specified heads on the specified disps. The layer + * position is in "desktop coordinate space", i.e., relative to the + * upper left corner of the input viewport. + * + * Note that this is only valid if + * NvKmsAllocDeviceReply::layerCaps[layer].supportsWindowMode is TRUE. + */ +struct NvKmsSetLayerPositionRequest { + NvKmsDeviceHandle deviceHandle; + + /*! + * The bitmask of which indices within disp[] describe requested + * configuration changes. Any other disps will use their existing + * configuration. + */ + NvU32 requestedDispsBitMask; + + struct { + /*! + * The bitmask of which head[] elements to look at on this + * disp; any other head will use its existing configuration. + */ + NvU32 requestedHeadsBitMask; + + struct { + struct NvKmsSignedPoint layerPosition[NVKMS_MAX_LAYERS_PER_HEAD]; + /*! + * The bitmask of which layerPosition[] elements to look at on this + * head; any other layer will use its existing configuration. + */ + NvU32 requestedLayerBitMask; + } head[NVKMS_MAX_HEADS_PER_DISP]; + + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsSetLayerPositionReply { + NvU32 padding; +}; + +struct NvKmsSetLayerPositionParams { + struct NvKmsSetLayerPositionRequest request; /*! in */ + struct NvKmsSetLayerPositionReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRAB_OWNERSHIP: + * NVKMS_IOCTL_RELEASE_OWNERSHIP: + * + * NVKMS_IOCTL_GRAB_OWNERSHIP notifies NVKMS that the calling client wants to + * control modesets on the device, and NVKMS_IOCTL_RELEASE_OWNERSHIP indicates + * that the modeset ownership should be released and the VT console mode + * restored. + * + * It is not necessary to call NVKMS_IOCTL_RELEASE_OWNERSHIP during shutdown; + * NVKMS will implicitly clear modeset ownership in nvKmsClose(). + * + * Releasing modeset ownership enables console hotplug handling. See the + * explanation in the comment for enableConsoleHotplugHandling above. + */ + +struct NvKmsGrabOwnershipRequest { + NvKmsDeviceHandle deviceHandle; +}; + +struct NvKmsGrabOwnershipReply { + NvU32 padding; +}; + +struct NvKmsGrabOwnershipParams { + struct NvKmsGrabOwnershipRequest request; /*! in */ + struct NvKmsGrabOwnershipReply reply; /*! out */ +}; + +struct NvKmsReleaseOwnershipRequest { + NvKmsDeviceHandle deviceHandle; +}; + +struct NvKmsReleaseOwnershipReply { + NvU32 padding; +}; + +struct NvKmsReleaseOwnershipParams { + struct NvKmsReleaseOwnershipRequest request; /*! in */ + struct NvKmsReleaseOwnershipReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRANT_PERMISSIONS: + * NVKMS_IOCTL_ACQUIRE_PERMISSIONS: + * NVKMS_IOCTL_REVOKE_PERMISSIONS: + * + * By default, only the modeset owning NVKMS client (the one who + * successfully called NVKMS_IOCTL_GRAB_OWNERSHIP) is allowed to flip + * or set modes. + * + * However, the modeset owner can grant various permissions to other + * clients through the following steps: + * + * - The modeset owner should open /dev/nvidia-modeset, and call + * NVKMS_IOCTL_GRANT_PERMISSIONS to define a set of permissions + * associated with the file descriptor. + * + * - The modeset owner should pass the file descriptor over a UNIX + * domain socket to one or more clients who should acquire these + * permissions. + * + * - The modeset owner can optionally close the file descriptor now or + * later. + * + * - The acquiring clients should call NVKMS_IOCTL_ACQUIRE_PERMISSIONS + * and pass in the file descriptor they received, to update their + * client connection to include the permissions specified by the modeset + * owner in the first bullet. + * + * - The acquiring clients can optionally close the file descriptor + * now or later. + * + * - From this point forward, both the modeset owner and the clients + * are allowed to perform the actions allowed by the granted + * permissions. + * + * - The modeset owner can optionally revoke any previously granted + * permissions with NVKMS_IOCTL_REVOKE_PERMISSIONS. + * + * Notes: + * + * - NVKMS_IOCTL_REVOKE_PERMISSIONS has device-scope. It could be + * made finer-grained (e.g., take the file descriptor that was used + * to grant permissions) if that were needed. + * + * - NvKmsPermissions::disp[n] corresponds to the disp named by + * NvKmsAllocDeviceReply::dispHandles[n]. + * + * - It is an error to call NVKMS_IOCTL_GRANT_PERMISSIONS more than + * once on a /dev/nvidia-modeset file descriptor, or to use a file + * descriptor other than one created by opening /dev/nvidia-modeset, + * or to use a file descriptor that was previously used as the first + * argument to ioctl(2). + * + * - Calling NVKMS_IOCTL_ACQUIRE_PERMISSIONS more than once on the + * same NVKMS client will cause the new permissions for that client + * to be the union of the previous permissions and the latest + * permissions being acquired. + */ + +enum NvKmsPermissionsType { + NV_KMS_PERMISSIONS_TYPE_FLIPPING = 1, + NV_KMS_PERMISSIONS_TYPE_MODESET = 2, +}; + +struct NvKmsFlipPermissions { + struct { + struct { + /* + * Bitmask of flippable layers, where each layer is + * indicated by '1 << layer'. It is an error for bits + * above NVKMS_MAX_LAYERS_PER_HEAD to be set. + * + * Only applicable when type==FLIPPING. + */ + NvU8 layerMask; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsModesetPermissions { + struct { + struct { + /* + * A list of dpys which a particular NVKMS client is + * allowed to use when performing a modeset on this head. + * + * If the NVKMS client is not allowed to set a mode on + * this head, this list will be empty. + * + * If an NVKMS client can drive the head without + * restrictions, this will be nvAllDpyIdList(). + * + * Only applicable when type==MODESET. + */ + NVDpyIdList dpyIdList; + } head[NVKMS_MAX_HEADS_PER_DISP]; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsPermissions { + enum NvKmsPermissionsType type; + union { + struct NvKmsFlipPermissions flip; + struct NvKmsModesetPermissions modeset; + }; +}; + +struct NvKmsGrantPermissionsRequest { + int fd; + NvKmsDeviceHandle deviceHandle; + struct NvKmsPermissions permissions; +}; + +struct NvKmsGrantPermissionsReply { + NvU32 padding; +}; + +struct NvKmsGrantPermissionsParams { + struct NvKmsGrantPermissionsRequest request; /*! in */ + struct NvKmsGrantPermissionsReply reply; /*! out */ +}; + +struct NvKmsAcquirePermissionsRequest { + int fd; +}; + +struct NvKmsAcquirePermissionsReply { + /*! This client's handle for the device which acquired new permissions */ + NvKmsDeviceHandle deviceHandle; + + /*! + * The acquired permissions. + * + * If permissions::type == FLIPPING, the new combined flipping + * permissions of the calling client on this device, including + * prior permissions and permissions added by this operation. + */ + struct NvKmsPermissions permissions; +}; + +struct NvKmsAcquirePermissionsParams { + struct NvKmsAcquirePermissionsRequest request; /*! in */ + struct NvKmsAcquirePermissionsReply reply; /*! out */ +}; + +struct NvKmsRevokePermissionsRequest { + NvKmsDeviceHandle deviceHandle; + + /* + * A bitmask of permission types to be revoked for this device. + * It should be the bitwise 'or' of one or more + * NVBIT(NV_KMS_PERMISSIONS_TYPE_*) values. + */ + NvU32 permissionsTypeBitmask; +}; + +struct NvKmsRevokePermissionsReply { + NvU32 padding; +}; + +struct NvKmsRevokePermissionsParams { + struct NvKmsRevokePermissionsRequest request; /*! in */ + struct NvKmsRevokePermissionsReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_QUERY_DPY_CRC32 + * + * Query last CRC32 value from the NVKMS disp head specified by the triplet + * (deviceHandle, dispHandle, head). + */ + +struct NvKmsQueryDpyCRC32Request { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; +}; + +/*! + * Generic CRC-structure type to represent CRC value obtained and if + * hardware architecture supports collection of the CRC type. If + * the CRC is not supported by hardware, its value is undefined. + */ +struct NvKmsDpyCRC32 { + /*! + * Value of the CRC. If it is not supported, value is undefined. + */ + NvU32 value; + + /*! + * Boolean which represents if hardware supports CRC collection + * If this boolean is FALSE, CRC hardware collection is not supported. + */ + NvBool supported; +}; + +/*! + * Reply structure that contains CRC32 values returned from hardware. + * Supported CRCs obtained are represented by supported boolean in crc struct + * Note- Crcs that are not supported will not be updated and will remain at 0 + */ +struct NvKmsQueryDpyCRC32Reply { + /*! + * CRC generated from the Compositor hardware + */ + struct NvKmsDpyCRC32 compositorCrc32; + + /*! + * CRC generated from the RG hardware, if head is driving RG/SF. + * Note that if Dithering is enabled, this CRC will vary across reads + * from the same frame. + */ + struct NvKmsDpyCRC32 rasterGeneratorCrc32; + + /*! + * Crc value generated from the target SF/OR depending on connector's OR type + * Note that if Dithering is enabled, this CRC will vary across reads + * from the same frame. + */ + struct NvKmsDpyCRC32 outputCrc32; + +}; + +struct NvKmsQueryDpyCRC32Params { + struct NvKmsQueryDpyCRC32Request request; /*! in */ + struct NvKmsQueryDpyCRC32Reply reply; /*! out */ +}; + +/*! + * User-space pointers are always passed to NVKMS in an NvU64. + * This user-space address is eventually passed into the platform's + * copyin/copyout functions, in a void* argument. + * + * This utility function converts from a pointer to an NvU64. + */ + +static inline NvU64 nvKmsPointerToNvU64(const void *ptr) +{ + return (NvU64)(NvUPtr)ptr; +} + + +/*! + * NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO: + * NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO: + * + * To make a request that is deferred until after a specific point in a client's + * graphics channel, a client should register a surface with NVKMS as a + * "deferred request fifo". The surface is interpreted as having the layout of + * struct NvKmsDeferredRequestFifo. + * + * To make deferred requests, the client should: + * + * - Write the NVKMS_DEFERRED_REQUEST_OPCODE for the desired operation to + * NvKmsDeferredRequestFifo::request[i], where 'i' is the next available + * element in the request[] array. Repeat as necessary. + * + * - Push NV906F_SEMAPHORE[ABCD] methods in its graphics channel to write + * '(i + 1) % NVKMS_MAX_DEFERRED_REQUESTS' to + * NvKmsDeferredRequestFifo::put. + * + * - Push an NV906F_NON_STALL_INTERRUPT method in its graphics channel. + * + * NVKMS will be notified of the non-stall interrupt, and scan all clients' + * deferred request fifos for put != get. NVKMS will then perform the requests + * specified in request[get] through request[put-1]. Finally, NVKMS will update + * get to indicate how much of the fifo it consumed. + * + * Wrapping behaves as expected. In pseudo code: + * + * while (get != put) { + * do(request[get]); + * get = (get + 1) % NVKMS_MAX_DEFERRED_REQUESTS; + * } + * + * The only time it is safe for clients to write to get is when get == put and + * there are no outstanding semaphore releases to gpuPut. + * + * The surface used for the deferred request fifo must be: + * + * - In system memory (NVKMS will create one device-scoped mapping, not one per + * subdevice, as would be needed if the surface were in video memory). + * + * - At least as large as sizeof(NvKmsDeferredRequestFifo). + * + * Some NVKMS_DEFERRED_REQUESTs may need to write to a semaphore after some + * operation is performed (e.g., to indicate that a SwapGroup is ready, or that + * we've reached vblank). The NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX field + * within the request specifies a semaphore within the + * NvKmsDeferredRequestFifo::semaphore[] array. The semantics of that semaphore + * index are opcode-specific. + * + * The opcode and semaphore index are in the low 16-bits of the request. The + * upper 16-bits are opcode-specific. + */ + +#define NVKMS_MAX_DEFERRED_REQUESTS 128 + +#define NVKMS_DEFERRED_REQUEST_OPCODE 7:0 + +#define NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX 15:8 + +#define NVKMS_DEFERRED_REQUEST_OPCODE_NOP 0 + +/* + * The SWAP_GROUP_READY request means that this NvKmsDeferredRequestFifo is + * ready for the next swap of the SwapGroup (see NVKMS_IOCTL_JOIN_SWAP_GROUP, + * below). NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX should specify an element in + * the semaphore[] array which will be released to + * + * NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY + * + * when the SwapGroup actually swaps. + */ +#define NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY 1 +#define NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_NOT_READY 0x00000000 +#define NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY 0xFFFFFFFF + + +/* + * The SWAP_GROUP_READY_PER_EYE_STEREO field indicates whether this deferred + * request fifo wants the SwapGroup to present new content at every eye boundary + * (PER_EYE), or present new content only when transitioning from the right eye + * to the left eye (PER_PAIR). + */ +#define NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO 16:16 +#define NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_PAIR 0 +#define NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_EYE 1 + + +struct NvKmsDeferredRequestFifo { + NvU32 put; + NvU32 get; + NvU32 request[NVKMS_MAX_DEFERRED_REQUESTS]; + NvGpuSemaphore semaphore[NVKMS_MAX_DEFERRED_REQUESTS]; +}; + +struct NvKmsRegisterDeferredRequestFifoRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSurfaceHandle surfaceHandle; +}; + +struct NvKmsRegisterDeferredRequestFifoReply { + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; +}; + +struct NvKmsRegisterDeferredRequestFifoParams { + struct NvKmsRegisterDeferredRequestFifoRequest request; /*! in */ + struct NvKmsRegisterDeferredRequestFifoReply reply; /*! out */ +}; + +struct NvKmsUnregisterDeferredRequestFifoRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; +}; + +struct NvKmsUnregisterDeferredRequestFifoReply { + NvU32 padding; +}; + +struct NvKmsUnregisterDeferredRequestFifoParams { + struct NvKmsUnregisterDeferredRequestFifoRequest request; /*! in */ + struct NvKmsUnregisterDeferredRequestFifoReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_ALLOC_SWAP_GROUP + * NVKMS_IOCTL_FREE_SWAP_GROUP + * + * An NVKMS client creates a SwapGroup by calling NVKMS_IOCTL_ALLOC_SWAP_GROUP + * and specifying the heads in the SwapGroup with + * NvKmsAllocSwapGroupRequest::disp[]::headMask. + * + * The SwapGroup can be shared with clients through + * NVKMS_IOCTL_GRANT_SWAP_GROUP, and it is destroyed once all clients that have + * acquired the swap group through NVKMS_IOCTL_ACQUIRE_SWAP_GROUP have released + * it through NVKMS_IOCTL_RELEASE_SWAP_GROUP and when the client that created + * the swap group has called NVKMS_IOCTL_FREE_SWAP_GROUP or freed the device. + * + * The SwapGroup allocation is expected to have a long lifetime (e.g., the X + * driver might call ALLOC_SWAP_GROUP from ScreenInit and FREE_SWAP_GROUP from + * CloseScreen). The point of these requests is to define the head topology of + * the SwapGroup (for X driver purposes, presumably all the heads that are + * assigned to the X screen). + * + * As such: + * + * - Not all heads described in the ALLOC_SWAP_GROUP request need to be active + * (they can come and go with different modesets). + * + * - The SwapGroup persists across modesets. + * + * - SwapGroup allocation is expected to be lightweight: the heavyweight + * operations like allocating and freeing headSurface resources are done when + * the number of SwapGroup members (see {JOIN,LEAVE}_SWAP_GROUP below) + * transitions between 0 and 1. + * + * Only an NVKMS modeset owner can alloc or free a SwapGroup. + */ + +struct NvKmsSwapGroupConfig { + struct { + NvU32 headMask; + } disp[NVKMS_MAX_SUBDEVICES]; +}; + +struct NvKmsAllocSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + struct NvKmsSwapGroupConfig config; +}; + +struct NvKmsAllocSwapGroupReply { + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsAllocSwapGroupParams { + struct NvKmsAllocSwapGroupRequest request; /*! in */ + struct NvKmsAllocSwapGroupReply reply; /*! out */ +}; + +struct NvKmsFreeSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsFreeSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsFreeSwapGroupParams { + struct NvKmsFreeSwapGroupRequest request; /*! in */ + struct NvKmsFreeSwapGroupReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_JOIN_SWAP_GROUP + * NVKMS_IOCTL_LEAVE_SWAP_GROUP + * + * Clients can join NvKmsDeferredRequestFifos to SwapGroups using + * NVKMS_IOCTL_JOIN_SWAP_GROUP, and remove NvKmsDeferredRequestFifos from + * SwapGroups using NVKMS_IOCTL_LEAVE_SWAP_GROUP (or freeing the + * NvKmsDeferredRequestFifo, or freeing the device). + * + * Once an NvKmsDeferredRequestFifo is joined to a SwapGroup, the SwapGroup will + * not become ready again until the SwapGroup member sends the + * NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY request through their + * NvKmsDeferredRequestFifo. The NVKMS_DEFERRED_REQUEST_SEMAPHORE_INDEX + * specified as part of the request indicates an index into + * NvKmsDeferredRequestFifo::semaphore[] where NVKMS will write + * + * NVKMS_DEFERRED_REQUEST_SEMAPHORE_VALUE_SWAP_GROUP_READY + * + * when the SwapGroup becomes ready. + * + * If unicastEvent::specified is TRUE, then unicastEvent::fd will be interpreted + * as a unicast event file descriptor. See NVKMS_IOCTL_CLEAR_UNICAST_EVENT for + * details. Whenever SWAP_GROUP_READY is written to a semaphore within + * NvKmsDeferredRequestFifo, the unicastEvent fd will be notified. + * + * An NvKmsDeferredRequestFifo can be joined to at most one SwapGroup at a time. + * + * If one client uses multiple NvKmsDeferredRequestFifos joined to multiple + * SwapGroups and wants to synchronize swaps between these fifos, it should + * bundle all of the (deviceHandle, swapGroupHandle, deferredRequestFifoHandle) + * tuples into a single join/leave request. + * + * If any client joins multiple NvKmsDeferredRequestFifos to multiple + * SwapGroups, all NVKMS_IOCTL_JOIN_SWAP_GROUP requests must specify the same + * set of SwapGroups. + */ + +struct NvKmsJoinSwapGroupRequestOneMember { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; + + struct { + int fd; + NvBool specified; + } unicastEvent; +}; + +struct NvKmsJoinSwapGroupRequest { + NvU32 numMembers; + struct NvKmsJoinSwapGroupRequestOneMember member[NVKMS_MAX_SWAPGROUPS]; +}; + +struct NvKmsJoinSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsJoinSwapGroupParams { + struct NvKmsJoinSwapGroupRequest request; /*! in */ + struct NvKmsJoinSwapGroupReply reply; /*! out */ +}; + +struct NvKmsLeaveSwapGroupRequestOneMember { + NvKmsDeviceHandle deviceHandle; + NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle; +}; + +struct NvKmsLeaveSwapGroupRequest { + NvU32 numMembers; + struct NvKmsLeaveSwapGroupRequestOneMember member[NVKMS_MAX_SWAPGROUPS]; +}; + +struct NvKmsLeaveSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsLeaveSwapGroupParams { + struct NvKmsLeaveSwapGroupRequest request; /*! in */ + struct NvKmsLeaveSwapGroupReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_SET_SWAP_GROUP_CLIP_LIST + * + * The X driver needs to define which pixels on-screen are owned by the + * SwapGroup. NVKMS will use this to prevent those pixels from updating until + * all SwapGroup members indicate that they are ready. + * + * The clip list is interpreted by NVKMS as relative to the surface specified + * during a flip or modeset. The clip list is intersected with the ViewPortIn + * of the head, described by + * + * NvKmsFlipCommonParams::viewPortIn::point + * + * and + * + * NvKmsSetModeOneHeadRequest::viewPortSizeIn + * + * The clip list is exclusive. I.e., each NvKmsRect is a region outside of the + * SwapGroup. One surface-sized NvKmsRect would mean that there are no + * SwapGroup-owned pixels. + * + * When no clip list is specified, NVKMS behaves as if there were no + * SwapGroup-owned pixels. + * + * Only an NVKMS modeset owner can set the clip list of a SwapGroup. + */ + +struct NvKmsSetSwapGroupClipListRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; + + /*! The number of struct NvKmsRects pointed to by pClipList. */ + NvU16 nClips; + + /*! + * Pointer to an array of struct NvKmsRects describing the inclusive clip + * list for the SwapGroup. The NvKmsRects are in desktop coordinate space. + * + * Use nvKmsPointerToNvU64() to assign pClipList. + */ + NvU64 pClipList NV_ALIGN_BYTES(8); +}; + +struct NvKmsSetSwapGroupClipListReply { + NvU32 padding; +}; + +struct NvKmsSetSwapGroupClipListParams { + struct NvKmsSetSwapGroupClipListRequest request; /*! in */ + struct NvKmsSetSwapGroupClipListReply reply; /*! out */ +}; + + +/*! + * NVKMS_IOCTL_GRANT_SWAP_GROUP: + * NVKMS_IOCTL_ACQUIRE_SWAP_GROUP: + * NVKMS_IOCTL_RELEASE_SWAP_GROUP: + * + * An NVKMS client can "grant" a swap group that it has allocated through + * NVKMS_IOCTL_ALLOC_SWAP_GROUP to another NVKMS client through the following + * steps: + * + * - The granting NVKMS client should open /dev/nvidia-modeset, and call + * NVKMS_IOCTL_GRANT_SWAP_GROUP to associate an NvKmsSwapGroupHandle + * with the file descriptor. + * + * - The granting NVKMS client should pass the file descriptor over a + * UNIX domain socket to one or more clients who should acquire the + * swap group. + * + * - The granting NVKMS client can optionally close the file + * descriptor now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_ACQUIRE_SWAP_GROUP, + * and pass in the file descriptor it received. This returns an + * NvKmsSwapGroupHandle that the acquiring client can use to refer to + * the swap group in any other NVKMS API call that takes an + * NvKmsSwapGroupHandle. + * + * - The acquiring clients can optionally close the file descriptor + * now or later. + * + * - Each acquiring client should call NVKMS_IOCTL_RELEASE_SWAP_GROUP to + * release it when they are done with the swap group. + */ + +struct NvKmsGrantSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; + int fd; +}; + +struct NvKmsGrantSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsGrantSwapGroupParams { + struct NvKmsGrantSwapGroupRequest request; /*! in */ + struct NvKmsGrantSwapGroupReply reply; /*! out */ +}; + +struct NvKmsAcquireSwapGroupRequest { + int fd; +}; + +struct NvKmsAcquireSwapGroupReply { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsAcquireSwapGroupParams { + struct NvKmsAcquireSwapGroupRequest request; /*! in */ + struct NvKmsAcquireSwapGroupReply reply; /*! out */ +}; + +struct NvKmsReleaseSwapGroupRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsSwapGroupHandle swapGroupHandle; +}; + +struct NvKmsReleaseSwapGroupReply { + NvU32 padding; +}; + +struct NvKmsReleaseSwapGroupParams { + struct NvKmsReleaseSwapGroupRequest request; /*! in */ + struct NvKmsReleaseSwapGroupReply reply; /*! out */ +}; + +/*! + * NVKMS_IOCTL_SWITCH_MUX: + * + * Switch the mux for the given Dpy in the given direction. The mux switch is + * performed in three stages. + */ + +enum NvKmsMuxOperation { + NVKMS_SWITCH_MUX_PRE, + NVKMS_SWITCH_MUX, + NVKMS_SWITCH_MUX_POST, +}; + +struct NvKmsSwitchMuxRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; + enum NvKmsMuxOperation operation; + NvMuxState state; +}; + +struct NvKmsSwitchMuxReply { + NvU32 padding; +}; + +struct NvKmsSwitchMuxParams { + struct NvKmsSwitchMuxRequest request; + struct NvKmsSwitchMuxReply reply; +}; + +struct NvKmsGetMuxStateRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NVDpyId dpyId; +}; + +struct NvKmsGetMuxStateReply { + NvMuxState state; +}; + +struct NvKmsGetMuxStateParams { + struct NvKmsGetMuxStateRequest request; + struct NvKmsGetMuxStateReply reply; +}; + +/*! + * NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE: + * + * Export the VRR semaphore surface onto the provided RM 'memFd'. + * The RM memory FD should be "empty". An empty FD can be allocated by calling + * NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD with 'EMPTY_FD' set. + */ + +struct NvKmsExportVrrSemaphoreSurfaceRequest { + NvKmsDeviceHandle deviceHandle; + int memFd; +}; + +struct NvKmsExportVrrSemaphoreSurfaceReply { + NvU32 padding; +}; + +struct NvKmsExportVrrSemaphoreSurfaceParams { + struct NvKmsExportVrrSemaphoreSurfaceRequest request; + struct NvKmsExportVrrSemaphoreSurfaceReply reply; +}; + +/*! + * NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT: + * NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT: + * + * The NVKMS client can use NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT to request a + * vblank syncpt that continuously triggers each time the raster generator + * reaches the start of vblank. NVKMS will return the syncpt id in + * 'NvKmsEnableVblankSyncObjectReply::syncptId'. + * + * The NVKMS client can use NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT to disable + * the vblank syncpt. + * + * If a vblank syncpt is currently enabled on a head, and a modeset request is + * issued to reconfigure that head with a new set of mode timings, NVKMS will + * automatically reenable the vblank syncpt so it continues to trigger with the + * new mode timings. + * + * Clients can use these IOCTLs only if both NvKmsAllocDeviceReply:: + * supportsVblankSyncObjects and NvKmsAllocDeviceReply::supportsSyncpts are + * TRUE. + */ + +struct NvKmsEnableVblankSyncObjectRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; +}; + +struct NvKmsEnableVblankSyncObjectReply { + /* + * Clients should explicitly disable the vblank sync object to consume the + * handle. + */ + NvKmsVblankSyncObjectHandle vblankHandle; + + NvU32 syncptId; +}; + +struct NvKmsEnableVblankSyncObjectParams { + struct NvKmsEnableVblankSyncObjectRequest request; /*! in */ + struct NvKmsEnableVblankSyncObjectReply reply; /*! out */ +}; + +struct NvKmsDisableVblankSyncObjectRequest { + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + NvU32 head; + + /* This handle is received in NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT. */ + NvKmsVblankSyncObjectHandle vblankHandle; +}; + +struct NvKmsDisableVblankSyncObjectReply { + NvU32 padding; +}; + +struct NvKmsDisableVblankSyncObjectParams { + struct NvKmsDisableVblankSyncObjectRequest request; /*! in */ + struct NvKmsDisableVblankSyncObjectReply reply; /*! out */ +}; + +#endif /* NVKMS_API_H */ diff --git a/src/nvidia-modeset/interface/nvkms-format.h b/src/nvidia-modeset/interface/nvkms-format.h new file mode 100644 index 000000000..d1483f875 --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-format.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_FORMAT_H) +#define NVKMS_FORMAT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" + +/* + * In order to interpret these pixel format namings, please take note of these + * conventions: + * - The Y8_U8__Y8_V8_N422 and U8_Y8__V8_Y8_N422 formats are both packed formats + * that have an interleaved chroma component across every two pixels. The + * double-underscore is a separator between these two pixel groups. + * - The triple-underscore is a separator between planes. + * - The 'N' suffix is a delimiter for the chroma decimation factor. + * + * As examples of the above rules: + * - The Y8_U8__Y8_V8_N422 format has one 8-bit luma component (Y8) and one + * 8-bit chroma component (U8) in pixel N, and one 8-bit luma component (Y8) + * and one 8-bit chroma component (V8) in pixel (N + 1). This format is + * 422-decimated since the U and V chroma samples are shared between each + * pair of adjacent pixels per line. + * - The Y10___U10V10_N444 format has one plane of 10-bit luma (Y10) components, + * and another plane of 10-bit chroma components (U10V10). This format has no + * chroma decimation since the luma and chroma components are sampled at the + * same rate. + */ +enum NvKmsSurfaceMemoryFormat { + NvKmsSurfaceMemoryFormatI8 = 0, + NvKmsSurfaceMemoryFormatA1R5G5B5 = 1, + NvKmsSurfaceMemoryFormatX1R5G5B5 = 2, + NvKmsSurfaceMemoryFormatR5G6B5 = 3, + NvKmsSurfaceMemoryFormatA8R8G8B8 = 4, + NvKmsSurfaceMemoryFormatX8R8G8B8 = 5, + NvKmsSurfaceMemoryFormatA2B10G10R10 = 6, + NvKmsSurfaceMemoryFormatX2B10G10R10 = 7, + NvKmsSurfaceMemoryFormatA8B8G8R8 = 8, + NvKmsSurfaceMemoryFormatX8B8G8R8 = 9, + NvKmsSurfaceMemoryFormatRF16GF16BF16AF16 = 10, + NvKmsSurfaceMemoryFormatR16G16B16A16 = 11, + NvKmsSurfaceMemoryFormatRF32GF32BF32AF32 = 12, + NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422 = 13, + NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422 = 14, + NvKmsSurfaceMemoryFormatY8___U8V8_N444 = 15, + NvKmsSurfaceMemoryFormatY8___V8U8_N444 = 16, + NvKmsSurfaceMemoryFormatY8___U8V8_N422 = 17, + NvKmsSurfaceMemoryFormatY8___V8U8_N422 = 18, + NvKmsSurfaceMemoryFormatY8___U8V8_N420 = 19, + NvKmsSurfaceMemoryFormatY8___V8U8_N420 = 20, + NvKmsSurfaceMemoryFormatY10___U10V10_N444 = 21, + NvKmsSurfaceMemoryFormatY10___V10U10_N444 = 22, + NvKmsSurfaceMemoryFormatY10___U10V10_N422 = 23, + NvKmsSurfaceMemoryFormatY10___V10U10_N422 = 24, + NvKmsSurfaceMemoryFormatY10___U10V10_N420 = 25, + NvKmsSurfaceMemoryFormatY10___V10U10_N420 = 26, + NvKmsSurfaceMemoryFormatY12___U12V12_N444 = 27, + NvKmsSurfaceMemoryFormatY12___V12U12_N444 = 28, + NvKmsSurfaceMemoryFormatY12___U12V12_N422 = 29, + NvKmsSurfaceMemoryFormatY12___V12U12_N422 = 30, + NvKmsSurfaceMemoryFormatY12___U12V12_N420 = 31, + NvKmsSurfaceMemoryFormatY12___V12U12_N420 = 32, + NvKmsSurfaceMemoryFormatY8___U8___V8_N444 = 33, + NvKmsSurfaceMemoryFormatY8___U8___V8_N420 = 34, + NvKmsSurfaceMemoryFormatMin = NvKmsSurfaceMemoryFormatI8, + NvKmsSurfaceMemoryFormatMax = NvKmsSurfaceMemoryFormatY8___U8___V8_N420, +}; + +typedef struct NvKmsSurfaceMemoryFormatInfo { + enum NvKmsSurfaceMemoryFormat format; + const char *name; + NvU8 depth; + NvBool isYUV; + NvU8 numPlanes; + + union { + struct { + NvU8 bytesPerPixel; + NvU8 bitsPerPixel; + } rgb; + + struct { + NvU8 depthPerComponent; + NvU8 storageBitsPerComponent; + NvU8 horizChromaDecimationFactor; + NvU8 vertChromaDecimationFactor; + } yuv; + }; +} NvKmsSurfaceMemoryFormatInfo; + +const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo( + const enum NvKmsSurfaceMemoryFormat format); + +const char *nvKmsSurfaceMemoryFormatToString( + const enum NvKmsSurfaceMemoryFormat format); + +#ifdef __cplusplus +}; +#endif + +#endif /* NVKMS_FORMAT_H */ diff --git a/src/nvidia-modeset/interface/nvkms-ioctl.h b/src/nvidia-modeset/interface/nvkms-ioctl.h new file mode 100644 index 000000000..cb2757313 --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-ioctl.h @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_IOCTL_H) +#define NVKMS_IOCTL_H + +#include "nvtypes.h" + +/*! + * Some of the NVKMS ioctl parameter data structures are quite large + * and would exceed the parameter size constraints on at least SunOS. + * + * Redirect ioctls through a level of indirection: user-space assigns + * NvKmsIoctlParams with the real command, size, and pointer, and + * passes the NvKmsIoctlParams through the ioctl. + */ + +struct NvKmsIoctlParams { + NvU32 cmd; + NvU32 size; + NvU64 address NV_ALIGN_BYTES(8); +}; + +#define NVKMS_IOCTL_MAGIC 'm' +#define NVKMS_IOCTL_CMD 0 + +#define NVKMS_IOCTL_IOWR \ + _IOWR(NVKMS_IOCTL_MAGIC, NVKMS_IOCTL_CMD, struct NvKmsIoctlParams) + +/*! + * User-space pointers are always passed to NVKMS in an NvU64. + * This user-space address is eventually passed into the platform's + * copyin/copyout functions, in a void* argument. + * + * This utility function converts from an NvU64 to a pointer. + */ + +static inline void *nvKmsNvU64ToPointer(NvU64 value) +{ + return (void *)(NvUPtr)value; +} + +/*! + * Before casting the NvU64 to a void*, check that casting to a pointer + * size within the kernel does not lose any precision in the current + * environment. + */ +static inline NvBool nvKmsNvU64AddressIsSafe(NvU64 address) +{ + return address == (NvU64)(NvUPtr)address; +} + +#endif /* NVKMS_IOCTL_H */ diff --git a/src/nvidia-modeset/interface/nvkms-sync.h b/src/nvidia-modeset/interface/nvkms-sync.h new file mode 100644 index 000000000..4f4e1ddf2 --- /dev/null +++ b/src/nvidia-modeset/interface/nvkms-sync.h @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(NVKMS_SYNC_H) +#define NVKMS_SYNC_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvkms-api-types.h" + +/* These functions are implemented in nvkms-lib. */ + +enum nvKmsNotifierStatus { + NVKMS_NOTIFIER_STATUS_NOT_BEGUN, + NVKMS_NOTIFIER_STATUS_BEGUN, + NVKMS_NOTIFIER_STATUS_FINISHED, +}; + +struct nvKmsParsedNotifier { + NvU64 timeStamp; + NvBool timeStampValid; + enum nvKmsNotifierStatus status; + NvU8 presentCount; +}; + +static inline NvU32 nvKmsSizeOfNotifier(enum NvKmsNIsoFormat format, + NvBool overlay) { + switch (format) { + default: + case NVKMS_NISO_FORMAT_LEGACY: + return overlay ? 16 : 4; + case NVKMS_NISO_FORMAT_FOUR_WORD: + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + return 16; + } +} + +void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, void *base); + +void nvKmsParseNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, const void *base, + struct nvKmsParsedNotifier *out); + +struct nvKmsParsedSemaphore { + NvU32 payload; +}; + +static inline NvU32 nvKmsSizeOfSemaphore(enum NvKmsNIsoFormat format) { + switch (format) { + default: + case NVKMS_NISO_FORMAT_LEGACY: + return 4; + case NVKMS_NISO_FORMAT_FOUR_WORD: + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + return 16; + } +} + +NvU32 nvKmsSemaphorePayloadOffset(enum NvKmsNIsoFormat format); + +void nvKmsResetSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, void *base, + NvU32 payload); + +void nvKmsParseSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, const void *base, + struct nvKmsParsedSemaphore *out); + +#ifdef __cplusplus +}; +#endif + +#endif /* NVKMS_SYNC_H */ diff --git a/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h b/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h new file mode 100644 index 000000000..e37cb9598 --- /dev/null +++ b/src/nvidia-modeset/kapi/include/nvkms-kapi-internal.h @@ -0,0 +1,181 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_KAPI_INTERNAL_H__ + +#define __NVKMS_KAPI_INTERNAL_H__ + +#include "unix_rm_handle.h" + +#include "nvkms-utils.h" +#include "nvkms-kapi-private.h" + +//XXX Decouple functions like nvEvoLog used for logging from NVKMS + +#define nvKmsKapiLogDebug(__format...) \ + nvEvoLogDebug(EVO_LOG_INFO, "[kapi] "__format) + +#define nvKmsKapiLogDeviceDebug(__device, __format, ...) \ + nvEvoLogDebug(EVO_LOG_INFO, "[kapi][GPU Id 0x%08x] "__format, \ + device->gpuId, ##__VA_ARGS__) + +struct NvKmsKapiDevice { + + NvU32 gpuId; + + nvkms_sema_handle_t *pSema; + + /* RM handles */ + + NvU32 hRmClient; + NvU32 hRmDevice, hRmSubDevice; + NvU32 deviceInstance; + + NVUnixRmHandleAllocatorRec handleAllocator; + + /* NVKMS handles */ + + struct nvkms_per_open *pKmsOpen; + + NvKmsDeviceHandle hKmsDevice; + NvKmsDispHandle hKmsDisp; + NvU32 dispIdx; + + NvU32 subDeviceMask; + + NvBool isSOC; + NvKmsDispIOCoherencyModes isoIOCoherencyModes; + NvKmsDispIOCoherencyModes nisoIOCoherencyModes; + NvBool supportsSyncpts; + + /* Device capabilities */ + + struct { + struct NvKmsCompositionCapabilities cursorCompositionCaps; + struct NvKmsCompositionCapabilities overlayCompositionCaps; + + NvU16 validLayerRRTransforms; + + NvU32 maxWidthInPixels; + NvU32 maxHeightInPixels; + NvU32 maxCursorSizeInPixels; + + NvU8 genericPageKind; + } caps; + + NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX]; + + NvU32 numHeads; + NvU32 numLayers[NVKMS_KAPI_MAX_HEADS]; + + struct { + NvU32 hRmHandle; + NvKmsSurfaceHandle hKmsHandle; + + NvBool mapped; + void *pLinearAddress; + + enum NvKmsNIsoFormat format; + } notifier; + + struct { + NvU32 currFlipNotifierIndex; + } layerState[NVKMS_KAPI_MAX_HEADS][NVKMS_MAX_LAYERS_PER_HEAD]; + + void *privateData; + + void (*eventCallback)(const struct NvKmsKapiEvent *event); +}; + +struct NvKmsKapiMemory { + NvU32 hRmHandle; + NvU64 size; + + struct NvKmsKapiPrivSurfaceParams surfaceParams; +}; + +struct NvKmsKapiSurface { + NvKmsSurfaceHandle hKmsHandle; +}; + + +enum NvKmsKapiAllocationType { + NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT = 0, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER = 1, +}; + +static inline void *nvKmsKapiCalloc(size_t nmem, size_t size) +{ + return nvInternalAlloc(nmem * size, NV_TRUE); +} + +static inline void nvKmsKapiFree(void *ptr) +{ + return nvInternalFree(ptr); +} + +static inline NvU32 nvKmsKapiGenerateRmHandle(struct NvKmsKapiDevice *device) +{ + NvU32 handle; + + nvkms_sema_down(device->pSema); + handle = nvGenerateUnixRmHandle(&device->handleAllocator); + nvkms_sema_up(device->pSema); + + return handle; +} + +static inline void nvKmsKapiFreeRmHandle(struct NvKmsKapiDevice *device, + NvU32 handle) +{ + nvkms_sema_down(device->pSema); + nvFreeUnixRmHandle(&device->handleAllocator, handle); + nvkms_sema_up(device->pSema); +} + +NvBool nvKmsKapiAllocateVideoMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible); + +NvBool nvKmsKapiAllocateSystemMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible); + +struct NvKmsKapiChannelEvent* +nvKmsKapiAllocateChannelEvent(struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize); + +void +nvKmsKapiFreeChannelEvent(struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb); + +#endif /* __NVKMS_KAPI_INTERNAL_H__ */ diff --git a/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h b/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h new file mode 100644 index 000000000..13fc8d955 --- /dev/null +++ b/src/nvidia-modeset/kapi/include/nvkms-kapi-notifiers.h @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVKMS_KAPI_NOTIFIERS_H__ + +#define __NVKMS_KAPI_NOTIFIERS_H__ + +#include "nvkms-kapi-internal.h" + +#define NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER 0x2 +#define NVKMS_KAPI_NOTIFIER_SIZE 0x10 + +static inline NvU32 NVKMS_KAPI_INC_NOTIFIER_INDEX(const NvU32 index) +{ + return (index + 1) % NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; +} + +static inline NvU32 NVKMS_KAPI_DEC_NOTIFIER_INDEX(const NvU32 index) +{ + if (index == 0) { + /* + * Wrap "backwards" to the largest allowed notifier index. + */ + return NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER - 1; + } + + return index - 1; +} + +static inline NvU32 NVKMS_KAPI_NOTIFIER_INDEX(NvU32 head, NvU32 layer, + NvU32 index) +{ + NvU64 notifierIndex = 0; + + notifierIndex = head * + NVKMS_MAX_LAYERS_PER_HEAD * + NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; + + notifierIndex += layer * + NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; + + notifierIndex += index; + + return notifierIndex; +} + +static inline NvU32 NVKMS_KAPI_NOTIFIER_OFFSET(NvU32 head, + NvU32 layer, NvU32 index) +{ + return NVKMS_KAPI_NOTIFIER_INDEX(head, layer, index) * + NVKMS_KAPI_NOTIFIER_SIZE; +} + +NvBool nvKmsKapiAllocateNotifiers(struct NvKmsKapiDevice *device, NvBool inVideoMemory); + +void nvKmsKapiFreeNotifiers(struct NvKmsKapiDevice *device); + +NvBool nvKmsKapiIsNotifierFinish(const struct NvKmsKapiDevice *device, + const NvU32 head, const NvU32 layer, + const NvU32 index); + +void nvKmsKapiNotifierSetNotBegun(struct NvKmsKapiDevice *device, + NvU32 head, NvU32 layer, NvU32 index); + +#endif /* __NVKMS_KAPI_NOTIFIERS_H__ */ diff --git a/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h b/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h new file mode 100644 index 000000000..cd32ac87c --- /dev/null +++ b/src/nvidia-modeset/kapi/interface/nvkms-kapi-private.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__NVKMS_KAPI_PRIVATE_H__) +#define __NVKMS_KAPI_PRIVATE_H__ + +#include "nvtypes.h" +#include "nvkms-api.h" + +struct NvKmsKapiPrivAllocateChannelEventParams { + NvU32 hClient; + NvU32 hChannel; +}; + +struct NvKmsKapiPrivSurfaceParams { + enum NvKmsSurfaceMemoryLayout layout; + + struct { + struct { + NvU32 x; + NvU32 y; + NvU32 z; + } log2GobsPerBlock; + + NvU32 pitchInBlocks; + NvBool genericMemory; + } blockLinear; +}; + +struct NvKmsKapiPrivImportMemoryParams { + int memFd; + struct NvKmsKapiPrivSurfaceParams surfaceParams; +}; + +struct NvKmsKapiPrivExportMemoryParams { + int memFd; +}; + +#endif /* !defined(__NVKMS_KAPI_PRIVATE_H__) */ diff --git a/src/nvidia-modeset/kapi/interface/nvkms-kapi.h b/src/nvidia-modeset/kapi/interface/nvkms-kapi.h new file mode 100644 index 000000000..b9fdd1b7c --- /dev/null +++ b/src/nvidia-modeset/kapi/interface/nvkms-kapi.h @@ -0,0 +1,1061 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(__NVKMS_KAPI_H__) + +#include "nvtypes.h" + +#include "nv-gpu-info.h" +#include "nvkms-api-types.h" +#include "nvkms-format.h" + +#define __NVKMS_KAPI_H__ + +#define NVKMS_KAPI_MAX_HEADS 4 + +#define NVKMS_KAPI_MAX_CONNECTORS 16 +#define NVKMS_KAPI_MAX_CLONE_DISPLAYS 16 + +#define NVKMS_KAPI_EDID_BUFFER_SIZE 2048 + +#define NVKMS_KAPI_MODE_NAME_LEN 32 + +/** + * \defgroup Objects + * @{ + */ + +struct NvKmsKapiDevice; +struct NvKmsKapiMemory; +struct NvKmsKapiSurface; +struct NvKmsKapiChannelEvent; + +typedef NvU32 NvKmsKapiConnector; +typedef NvU32 NvKmsKapiDisplay; + +/** @} */ + +/** + * \defgroup FuncPtrs + * @{ + */ + +/* + * Note: The channel event proc should not call back into NVKMS-KAPI driver. + * The callback into NVKMS-KAPI from the channel event proc, may cause + * deadlock. + */ +typedef void NvKmsChannelEventProc(void *dataPtr, NvU32 dataU32); + +/** @} */ + +/** + * \defgroup Structs + * @{ + */ + +struct NvKmsKapiDisplayModeTimings { + + NvU32 refreshRate; + NvU32 pixelClockHz; + NvU32 hVisible; + NvU32 hSyncStart; + NvU32 hSyncEnd; + NvU32 hTotal; + NvU32 hSkew; + NvU32 vVisible; + NvU32 vSyncStart; + NvU32 vSyncEnd; + NvU32 vTotal; + + struct { + + NvU32 interlaced : 1; + NvU32 doubleScan : 1; + NvU32 hSyncPos : 1; + NvU32 hSyncNeg : 1; + NvU32 vSyncPos : 1; + NvU32 vSyncNeg : 1; + + } flags; + + NvU32 widthMM; + NvU32 heightMM; + +}; + +struct NvKmsKapiDisplayMode { + struct NvKmsKapiDisplayModeTimings timings; + char name[NVKMS_KAPI_MODE_NAME_LEN]; +}; + +#define NVKMS_KAPI_LAYER_MAX 8 + +#define NVKMS_KAPI_LAYER_INVALID_IDX 0xff +#define NVKMS_KAPI_LAYER_PRIMARY_IDX 0 + +struct NvKmsKapiDeviceResourcesInfo { + + NvU32 numHeads; + NvU32 numLayers[NVKMS_KAPI_MAX_HEADS]; + + NvU32 numConnectors; + NvKmsKapiConnector connectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + + struct { + NvU32 validCursorCompositionModes; + NvU64 supportedCursorSurfaceMemoryFormats; + + struct { + NvU16 validRRTransforms; + NvU32 validCompositionModes; + } layer[NVKMS_KAPI_LAYER_MAX]; + + NvU32 minWidthInPixels; + NvU32 maxWidthInPixels; + + NvU32 minHeightInPixels; + NvU32 maxHeightInPixels; + + NvU32 maxCursorSizeInPixels; + + NvU32 pitchAlignment; + + NvU32 hasVideoMemory; + + NvU8 genericPageKind; + + NvBool supportsSyncpts; + } caps; + + NvU64 supportedSurfaceMemoryFormats[NVKMS_KAPI_LAYER_MAX]; +}; + +#define NVKMS_KAPI_LAYER_MASK(layerType) (1 << (layerType)) + +typedef enum NvKmsKapiMappingTypeRec { + NVKMS_KAPI_MAPPING_TYPE_USER = 1, + NVKMS_KAPI_MAPPING_TYPE_KERNEL = 2, +} NvKmsKapiMappingType; + +struct NvKmsKapiConnectorInfo { + + NvKmsKapiConnector handle; + + NvU32 physicalIndex; + + NvU32 headMask; + + NvKmsConnectorSignalFormat signalFormat; + NvKmsConnectorType type; + + /* + * List of connectors, not possible to serve together with this connector + * because they are competing for same resources. + */ + NvU32 numIncompatibleConnectors; + NvKmsKapiConnector incompatibleConnectorHandles[NVKMS_KAPI_MAX_CONNECTORS]; + +}; + +struct NvKmsKapiStaticDisplayInfo { + + NvKmsKapiDisplay handle; + + NvKmsKapiConnector connectorHandle; + + /* Set for DisplayPort MST displays (dynamic displays) */ + char dpAddress[NVKMS_DP_ADDRESS_STRING_LENGTH]; + + NvBool internal; + + /* List of potential sibling display for cloning */ + NvU32 numPossibleClones; + NvKmsKapiDisplay possibleCloneHandles[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + +}; + +struct NvKmsKapiSyncpt { + + /*! + * Possible syncpt use case in kapi. + * For pre-syncpt, use only id and value + * and for post-syncpt, use only fd. + */ + NvBool preSyncptSpecified; + NvU32 preSyncptId; + NvU32 preSyncptValue; + + NvBool postSyncptRequested; +}; + +struct NvKmsKapiLayerConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + struct NvKmsRRParams rrParams; + struct NvKmsKapiSyncpt syncptParams; + + NvU8 minPresentInterval; + NvBool tearing; + + NvU16 srcX, srcY; + NvU16 srcWidth, srcHeight; + + NvS16 dstX, dstY; + NvU16 dstWidth, dstHeight; +}; + +struct NvKmsKapiLayerRequestedConfig { + struct NvKmsKapiLayerConfig config; + struct { + NvBool surfaceChanged : 1; + NvBool srcXYChanged : 1; + NvBool srcWHChanged : 1; + NvBool dstXYChanged : 1; + NvBool dstWHChanged : 1; + } flags; +}; + +struct NvKmsKapiCursorRequestedConfig { + struct NvKmsKapiSurface *surface; + struct { + enum NvKmsCompositionBlendingMode compMode; + NvU8 surfaceAlpha; + } compParams; + + NvS16 dstX, dstY; + + struct { + NvBool surfaceChanged : 1; + NvBool dstXYChanged : 1; + } flags; +}; + +struct NvKmsKapiHeadModeSetConfig { + /* + * DRM distinguishes between the head state "enabled" (the specified + * configuration for the head is valid, its resources are allocated, + * etc, but the head may not necessarily be currently driving pixels + * to its output resource) and the head state "active" (the head is + * "enabled" _and_ the head is actively driving pixels to its output + * resource). + * + * This distinction is for DPMS: + * + * DPMS On : enabled=true, active=true + * DPMS Off : enabled=true, active=false + * + * "Enabled" state is indicated by numDisplays != 0. + * "Active" state is indicated by bActive == true. + */ + NvBool bActive; + + NvU32 numDisplays; + NvKmsKapiDisplay displays[NVKMS_KAPI_MAX_CLONE_DISPLAYS]; + + struct NvKmsKapiDisplayMode mode; +}; + +struct NvKmsKapiHeadRequestedConfig { + struct NvKmsKapiHeadModeSetConfig modeSetConfig; + struct { + NvBool activeChanged : 1; + NvBool displaysChanged : 1; + NvBool modeChanged : 1; + } flags; + + struct NvKmsKapiCursorRequestedConfig cursorRequestedConfig; + + struct NvKmsKapiLayerRequestedConfig + layerRequestedConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiRequestedModeSetConfig { + NvU32 headsMask; + struct NvKmsKapiHeadRequestedConfig + headRequestedConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiLayerReplyConfig { + int postSyncptFd; +}; + +struct NvKmsKapiHeadReplyConfig { + struct NvKmsKapiLayerReplyConfig + layerReplyConfig[NVKMS_KAPI_LAYER_MAX]; +}; + +struct NvKmsKapiModeSetReplyConfig { + struct NvKmsKapiHeadReplyConfig + headReplyConfig[NVKMS_KAPI_MAX_HEADS]; +}; + +struct NvKmsKapiEventDisplayChanged { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventDynamicDisplayConnected { + NvKmsKapiDisplay display; +}; + +struct NvKmsKapiEventFlipOccurred { + NvU32 head; + NvU32 layer; +}; + +struct NvKmsKapiDpyCRC32 { + NvU32 value; + NvBool supported; +}; + +struct NvKmsKapiCrcs { + struct NvKmsKapiDpyCRC32 compositorCrc32; + struct NvKmsKapiDpyCRC32 rasterGeneratorCrc32; + struct NvKmsKapiDpyCRC32 outputCrc32; +}; + +struct NvKmsKapiEvent { + enum NvKmsEventType type; + + struct NvKmsKapiDevice *device; + + void *privateData; + + union { + struct NvKmsKapiEventDisplayChanged displayChanged; + struct NvKmsKapiEventDynamicDisplayConnected dynamicDisplayConnected; + struct NvKmsKapiEventFlipOccurred flipOccurred; + } u; +}; + +struct NvKmsKapiAllocateDeviceParams { + /* [IN] GPU ID obtained from enumerateGpus() */ + NvU32 gpuId; + + /* [IN] Private data of device allocator */ + void *privateData; + /* [IN] Event callback */ + void (*eventCallback)(const struct NvKmsKapiEvent *event); +}; + +struct NvKmsKapiDynamicDisplayParams { + /* [IN] Display Handle returned by getDisplays() */ + NvKmsKapiDisplay handle; + + /* [OUT] Connection status */ + NvU32 connected; + + /* [IN/OUT] EDID of connected monitor/ Input to override EDID */ + struct { + NvU16 bufferSize; + NvU8 buffer[NVKMS_KAPI_EDID_BUFFER_SIZE]; + } edid; + + /* [IN] Set true to override EDID */ + NvBool overrideEdid; + + /* [IN] Set true to force connected status */ + NvBool forceConnected; + + /* [IN] Set true to force disconnect status */ + NvBool forceDisconnected; +}; + +struct NvKmsKapiCreateSurfaceParams { + + /* [IN] Parameter of each plane */ + struct { + /* [IN] Memory allocated for plane, using allocateMemory() */ + struct NvKmsKapiMemory *memory; + /* [IN] Offsets within the memory object */ + NvU32 offset; + /* [IN] Byte pitch of plane */ + NvU32 pitch; + } planes[NVKMS_MAX_PLANES_PER_SURFACE]; + + /* [IN] Width of the surface, in pixels */ + NvU32 width; + /* [IN] Height of the surface, in pixels */ + NvU32 height; + + /* [IN] The format describing number of planes and their content */ + enum NvKmsSurfaceMemoryFormat format; + + /* [IN] Whether to override the surface objects memory layout parameters + * with those provided here. */ + NvBool explicit_layout; + /* [IN] Whether the surface layout is block-linear or pitch. Used only + * if explicit_layout is NV_TRUE */ + enum NvKmsSurfaceMemoryLayout layout; + /* [IN] block-linear block height of surface. Used only when + * explicit_layout is NV_TRUE and layout is + * NvKmsSurfaceMemoryLayoutBlockLinear */ + NvU8 log2GobsPerBlockY; +}; + +struct NvKmsKapiFunctionsTable { + + /*! + * NVIDIA Driver version string. + */ + const char *versionString; + + /*! + * System Information. + */ + struct { + /* Availability of write combining support for video memory */ + NvBool bAllowWriteCombining; + } systemInfo; + + /*! + * Enumerate the available physical GPUs that can be used with NVKMS. + * + * \param [out] gpuInfo The information of the enumerated GPUs. + * It is an array of NVIDIA_MAX_GPUS elements. + * + * \return Count of enumerated gpus. + */ + NvU32 (*enumerateGpus)(nv_gpu_info_t *gpuInfo); + + /*! + * Allocate an NVK device using which you can query/allocate resources on + * GPU and do modeset. + * + * \param [in] params Parameters required for device allocation. + * + * \return An valid device handle on success, NULL on failure. + */ + struct NvKmsKapiDevice* (*allocateDevice) + ( + const struct NvKmsKapiAllocateDeviceParams *params + ); + + /*! + * Frees a device allocated by allocateDevice() and all its resources. + * + * \param [in] device A device returned by allocateDevice(). + * This function is a no-op if device is not valid. + */ + void (*freeDevice)(struct NvKmsKapiDevice *device); + + /*! + * Grab ownership of device, ownership is required to do modeset. + * + * \param [in] device A device returned by allocateDevice(). + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*grabOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Release ownership of device. + * + * \param [in] device A device returned by allocateDevice(). + */ + void (*releaseOwnership)(struct NvKmsKapiDevice *device); + + /*! + * Registers for notification, via + * NvKmsKapiAllocateDeviceParams::eventCallback, of the events specified + * in interestMask. + * + * This call does nothing if eventCallback is NULL when NvKmsKapiDevice + * is allocated. + * + * Supported events are DPY_CHANGED and DYNAMIC_DPY_CONNECTED. + * + * \param [in] device A device returned by allocateDevice(). + * + * \param [in] interestMask A mask of events requested to listen. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*declareEventInterest) + ( + const struct NvKmsKapiDevice *device, + const NvU32 interestMask + ); + + /*! + * Retrieve various static resources like connector, head etc. present on + * device and capacities. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in/out] info A pointer to an NvKmsKapiDeviceResourcesInfo + * struct that the call will fill out with number + * of resources and their handles. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDeviceResourcesInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDeviceResourcesInfo *info + ); + + /*! + * Retrieve the number of displays on a device and an array of handles to + * those displays. + * + * \param [in] device A device allocated using + * allocateDevice(). + * + * \param [in/out] displayCount The caller should set this to the size + * of the displayHandles array it passed + * in. The function will set it to the + * number of displays returned, or the + * total number of displays on the device + * if displayHandles is NULL or array size + * of less than number of number of displays. + * + * \param [out] displayHandles An array of display handles with + * displayCount entries. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDisplays) + ( + struct NvKmsKapiDevice *device, + NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles + ); + + /*! + * Retrieve information about a specified connector. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] connector Which connector to query, handle return by + * getDeviceResourcesInfo(). + * + * \param [out] info A pointer to an NvKmsKapiConnectorInfo struct + * that the call will fill out with information + * about connector. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getConnectorInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info + ); + + /*! + * Retrieve information about a specified display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display Which connector to query, handle return by + * getDisplays(). + * + * \param [out] info A pointer to an NvKmsKapiStaticDisplayInfo struct + * that the call will fill out with information + * about display. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getStaticDisplayInfo) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info + ); + + /*! + * Detect/force connection status/EDID of display. + * + * \param [in/out] params Parameters containing display + * handle, EDID and flags to force connection + * status. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getDynamicDisplayInfo) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDynamicDisplayParams *params + ); + + /*! + * Allocate some unformatted video memory of the specified size. + * + * This function allocates video memory on the specified GPU. + * It should be suitable for mapping on the CPU as a pitch + * linear or block-linear surface. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] layout BlockLinear or Pitch. + * + * \param [in] size Size, in bytes, of the memory to allocate. + * + * \param [in/out] compressible For input, non-zero if compression + * backing store should be allocated for + * the memory, for output, non-zero if + * compression backing store was + * allocated for the memory. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*allocateVideoMemory) + ( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + NvU8 *compressible + ); + + /*! + * Allocate some unformatted system memory of the specified size. + * + * This function allocates system memory . It should be suitable + * for mapping on the CPU as a pitch linear or block-linear surface. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] layout BlockLinear or Pitch. + * + * \param [in] size Size, in bytes, of the memory to allocate. + * + * \param [in/out] compressible For input, non-zero if compression + * backing store should be allocated for + * the memory, for output, non-zero if + * compression backing store was + * allocated for the memory. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*allocateSystemMemory) + ( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + NvU8 *compressible + ); + + /*! + * Import some unformatted memory of the specified size. + * + * This function accepts a driver-specific parameter structure representing + * memory allocated elsewhere and imports it to a NVKMS KAPI memory object + * of the specified size. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported must have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] size Size, in bytes, of the memory being imported. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the memory object being + * imported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*importMemory) + ( + struct NvKmsKapiDevice *device, NvU64 size, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Duplicate an existing NVKMS KAPI memory object, taking a reference on the + * underlying memory. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being imported need not have been allocated + * against the same physical device this device object + * represents. + * + * \param [in] srcDevice The device associated with srcMemory. + * + * \param [in] srcMemory The memory object to duplicate. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* (*dupMemory) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiDevice *srcDevice, + const struct NvKmsKapiMemory *srcMemory + ); + + /*! + * Export the specified memory object to a userspace object handle. + * + * This function accepts a driver-specific parameter structure representing + * a new handle to be assigned to an existing NVKMS KAPI memory object. + * + * \param [in] device A device allocated using allocateDevice(). The + * memory being exported must have been created against + * or imported to the same device object, and the + * destination object handle must be valid for this + * device as well. + * + * \param [in] memory The memory object to export. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters specifying a handle to add to the + * memory object being exported. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*exportMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free memory allocated using allocateMemory() + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory(). + * + * \return NV_TRUE on success, NV_FALSE if memory is in use. + */ + void (*freeMemory) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory + ); + + /*! + * Create MMIO mappings for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [out] ppLinearAddress The MMIO address where memory object is + * mapped. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*mapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + void **ppLinearAddress + ); + + /*! + * Destroy MMIO mappings created for a memory object allocated using + * allocateMemory(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory Memory allocated using allocateMemory() + * + * \param [in] type Userspace or kernelspace mapping + * + * \param [in] pLinearAddress The MMIO address return by mapMemory() + */ + void (*unmapMemory) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + const void *pLinearAddress + ); + + /*! + * Create a formatted surface from an NvKmsKapiMemory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] params Parameters to the surface creation. + * + * \return An valid surface handle on success. NULL on failure. + */ + struct NvKmsKapiSurface* (*createSurface) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiCreateSurfaceParams *params + ); + + /*! + * Destroy a surface created by createSurface(). + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] surface A surface created using createSurface() + */ + void (*destroySurface) + ( + struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface + ); + + /*! + * Enumerate the mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] modeIndex A mode index (Any integer >= 0). + * + * \param [out] mode A pointer to an NvKmsKapiDisplayMode struct that + * the call will fill out with mode-timings of mode + * at index modeIndex. + * + * \param [out] valid Returns TRUE in this param if mode-timings of + * mode at index modeIndex are valid on display. + * + * \param [out] preferredMode Returns TRUE if this mode is marked as + * "preferred" by the EDID. + * + * \return Value >= 1 if more modes are available, 0 if no more modes are + * available, and Value < 0 on failure. + */ + int (*getDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, NvU32 modeIndex, + struct NvKmsKapiDisplayMode *mode, NvBool *valid, + NvBool *preferredMode + ); + + /*! + * Validate given mode timings available on a given display. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] display A display handle returned by getDisplays(). + * + * \param [in] mode A pointer to an NvKmsKapiDisplayMode struct that + * filled with mode-timings to validate. + * + * \return NV_TRUE if mode-timings are valid, NV_FALSE on failure. + */ + NvBool (*validateDisplayMode) + ( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode + ); + + /*! + * Apply a mode configuration to the device. + * + * Client can describe damaged part of configuration but still it is must + * to describe entire configuration. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] requestedConfig Parameters describing a device-wide + * display configuration. + * + * \param [in] commit If set to 0 them call will only validate + * mode configuration, will not apply it. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*applyModeSetConfig) + ( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit + ); + + /*! + * Return status of flip. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [in] layer A layer index. + * + * \param [out] pending Return TRUE if head has pending flip for + * given layer. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getFlipPendingStatus) + ( + const struct NvKmsKapiDevice *device, + const NvU32 head, + const NvU32 layer, + NvBool *pending + ); + + /*! + * Allocate an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] proc Function pointer to call when triggered. + * + * \param [in] data Argument to pass into function. + * + * \param [in] nvKmsParamsUser Userspace pointer to driver-specific + * parameters describing the event callback + * being created. + * + * \param [in] nvKmsParamsSize Size of the driver-specific parameter struct. + * + * \return struct NvKmsKapiChannelEvent* on success, NULL on failure. + */ + struct NvKmsKapiChannelEvent* (*allocateChannelEvent) + ( + struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize + ); + + /*! + * Free an event callback. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] cb struct NvKmsKapiChannelEvent* returned from + * allocateChannelEvent() + */ + void (*freeChannelEvent) + ( + struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb + ); + + /*! + * Get 32-bit CRC value for the last contents presented on the specified + * head. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] head A head returned by getDeviceResourcesInfo(). + * + * \param [out] crc32 The CRC32 generated from the content currently + * presented onto the given head + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getCRC32) + ( + struct NvKmsKapiDevice *device, + NvU32 head, + struct NvKmsKapiCrcs *crc32 + ); + + /*! + * Get the list allocation pages corresponding to the specified memory object. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] memory The memory object for which we need to find the + * list of allocation pages and number of pages. + * + * \param [out] pPages A pointer to the list of NvU64 pointers. Caller + * should free pPages on success using freeMemoryPages(). + * + * \param [out] pNumPages It gives the total number of NvU64 pointers + * returned in pPages. + * + * \return NV_TRUE on success, NV_FALSE on failure. + */ + NvBool (*getMemoryPages) + ( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 **pPages, + NvU32 *pNumPages + ); + + /*! + * Free the list of allocation pages returned by getMemoryPages() + * + * \param [in] pPages A list of NvU64 pointers allocated by getMemoryPages(). + * + */ + void (*freeMemoryPages) + ( + NvU64 *pPages + ); + + /* + * Import SGT as a memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] sgt SGT pointer. + * \param [in] gem GEM pointer that pinned SGT, to be refcounted. + * + * \param [in] limit Size, in bytes, of the memory backed by the SGT. + * + * \return A valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromSgt)(struct NvKmsKapiDevice *device, + NvP64 sgt, + NvP64 gem, + NvU32 limit); + + /* + * Import dma-buf in the memory handle. + * + * \param [in] device A device allocated using allocateDevice(). + * + * \param [in] dmaBuf DMA-BUF pointer. + * + * \param [in] limit Size, in bytes, of the dma-buf. + * + * \return An valid memory handle on success, NULL on failure. + */ + struct NvKmsKapiMemory* + (*getSystemMemoryHandleFromDmaBuf)(struct NvKmsKapiDevice *device, + NvP64 dmaBuf, + NvU32 limit); + +}; + +/** @} */ + +/** + * \defgroup Functions + * @{ + */ + +NvBool nvKmsKapiGetFunctionsTable +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +/** @} */ + +#endif /* defined(__NVKMS_KAPI_H__) */ diff --git a/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c b/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c new file mode 100644 index 000000000..3ca110d0e --- /dev/null +++ b/src/nvidia-modeset/kapi/src/nvkms-kapi-channelevent.c @@ -0,0 +1,150 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-rmapi.h" + +#include "nvkms-kapi.h" +#include "nvkms-kapi-private.h" +#include "nvkms-kapi-internal.h" + +#include "class/cl0005.h" + +struct NvKmsKapiChannelEvent { + struct NvKmsKapiDevice *device; + + NvKmsChannelEventProc *proc; + void *data; + + struct NvKmsKapiPrivAllocateChannelEventParams nvKmsParams; + + NvHandle hCallback; + NVOS10_EVENT_KERNEL_CALLBACK_EX rmCallback; +}; + +static void ChannelEventHandler(void *arg1, void *arg2, NvHandle hEvent, + NvU32 data, NvU32 status) +{ + struct NvKmsKapiChannelEvent *cb = arg1; + cb->proc(cb->data, 0); +} + +struct NvKmsKapiChannelEvent* nvKmsKapiAllocateChannelEvent +( + struct NvKmsKapiDevice *device, + NvKmsChannelEventProc *proc, + void *data, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize +) +{ + int status; + NvU32 ret; + + struct NvKmsKapiChannelEvent *cb = NULL; + NV0005_ALLOC_PARAMETERS eventParams = { }; + + if (device == NULL || proc == NULL) { + goto fail; + } + + cb = nvKmsKapiCalloc(1, sizeof(*cb)); + if (cb == NULL) { + goto fail; + } + + /* Verify the driver-private params size and copy it in from userspace */ + + if (nvKmsParamsSize != sizeof(cb->nvKmsParams)) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameter size mismatch - " + "expected: 0x%llx, caller specified: 0x%llx", + (NvU64)sizeof(cb->nvKmsParams), nvKmsParamsSize); + goto fail; + } + + status = nvkms_copyin(&cb->nvKmsParams, + nvKmsParamsUser, sizeof(cb->nvKmsParams)); + if (status != 0) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameters could not be read from " + "userspace"); + goto fail; + } + + cb->device = device; + + cb->proc = proc; + cb->data = data; + + cb->rmCallback.func = ChannelEventHandler; + cb->rmCallback.arg = cb; + + cb->hCallback = nvGenerateUnixRmHandle(&device->handleAllocator); + if (cb->hCallback == 0x0) { + nvKmsKapiLogDeviceDebug(device, + "Failed to allocate event callback handle"); + goto fail; + } + + eventParams.hParentClient = cb->nvKmsParams.hClient; + eventParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + eventParams.notifyIndex = 0; + eventParams.data = NV_PTR_TO_NvP64(&cb->rmCallback); + + ret = nvRmApiAlloc(device->hRmClient, + cb->nvKmsParams.hChannel, + cb->hCallback, + NV01_EVENT_KERNEL_CALLBACK_EX, + &eventParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate event callback"); + nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallback); + goto fail; + } + + return cb; +fail: + nvKmsKapiFree(cb); + return NULL; +} + +void nvKmsKapiFreeChannelEvent +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiChannelEvent *cb +) +{ + if (device == NULL || cb == NULL) { + return; + } + + nvRmApiFree(device->hRmClient, + device->hRmClient, + cb->hCallback); + + nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallback); + + nvKmsKapiFree(cb); +} diff --git a/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c b/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c new file mode 100644 index 000000000..ef44285c8 --- /dev/null +++ b/src/nvidia-modeset/kapi/src/nvkms-kapi-notifiers.c @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-api.h" +#include "nvkms-sync.h" +#include "nvkms-rmapi.h" +#include "nvkms-kapi-notifiers.h" + +#define NVKMS_KAPI_MAX_NOTIFIERS \ + (NVKMS_KAPI_MAX_HEADS * \ + NVKMS_MAX_LAYERS_PER_HEAD * \ + NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER) + +void nvKmsKapiFreeNotifiers(struct NvKmsKapiDevice *device) +{ + if (device->notifier.hKmsHandle != 0) { + struct NvKmsUnregisterSurfaceParams paramsUnreg = { }; + NvBool status; + + paramsUnreg.request.deviceHandle = device->hKmsDevice; + paramsUnreg.request.surfaceHandle = device->notifier.hKmsHandle; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_UNREGISTER_SURFACE, + ¶msUnreg, sizeof(paramsUnreg)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_UNREGISTER_SURFACE failed"); + } + + device->notifier.hKmsHandle = 0; + } + + if (device->notifier.mapped) { + NV_STATUS status; + + status = nvRmApiUnmapMemory(device->hRmClient, + device->hRmSubDevice, + device->notifier.hRmHandle, + device->notifier.pLinearAddress, + 0); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "UnmapMemory failed with error code 0x%08x", + status); + } + + device->notifier.mapped = NV_FALSE; + } + + if (device->notifier.hRmHandle != 0) { + NvU32 status; + + status = nvRmApiFree(device->hRmClient, + device->hRmDevice, + device->notifier.hRmHandle); + + if (status != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "RmFree failed with error code 0x%08x", + status); + } + + nvFreeUnixRmHandle(&device->handleAllocator, device->notifier.hRmHandle); + device->notifier.hRmHandle = 0; + } +} + +static void InitNotifier(struct NvKmsKapiDevice *device, + NvU32 head, NvU32 layer, NvU32 index) +{ + nvKmsResetNotifier(device->notifier.format, + (layer == NVKMS_OVERLAY_LAYER), + NVKMS_KAPI_NOTIFIER_INDEX(head, layer, index), + device->notifier.pLinearAddress); +} + +#define NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE 0x1000 + +NvBool nvKmsKapiAllocateNotifiers(struct NvKmsKapiDevice *device, + NvBool inVideoMemory) +{ + struct NvKmsRegisterSurfaceParams surfParams = {}; + NV_STATUS status = 0; + NvU8 compressible = 0; + NvBool ret; + + ct_assert((NVKMS_KAPI_MAX_NOTIFIERS * NVKMS_KAPI_NOTIFIER_SIZE) <= + (NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE)); + + ct_assert(NVKMS_KAPI_NOTIFIER_SIZE >= sizeof(NvNotification)); + nvAssert(NVKMS_KAPI_NOTIFIER_SIZE >= + nvKmsSizeOfNotifier(device->notifier.format, TRUE /* overlay */)); + nvAssert(NVKMS_KAPI_NOTIFIER_SIZE >= + nvKmsSizeOfNotifier(device->notifier.format, FALSE /* overlay */)); + + device->notifier.hRmHandle = + nvGenerateUnixRmHandle(&device->handleAllocator); + + if (device->notifier.hRmHandle == 0x0) { + nvKmsKapiLogDeviceDebug( + device, + "nvGenerateUnixRmHandle() failed"); + return NV_FALSE; + } + + if (inVideoMemory) { + ret = nvKmsKapiAllocateVideoMemory(device, + device->notifier.hRmHandle, + NvKmsSurfaceMemoryLayoutPitch, + NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER, + &compressible); + } else { + ret = nvKmsKapiAllocateSystemMemory(device, + device->notifier.hRmHandle, + NvKmsSurfaceMemoryLayoutPitch, + NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE, + NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER, + &compressible); + } + + if (!ret) { + nvFreeUnixRmHandle(&device->handleAllocator, device->notifier.hRmHandle); + device->notifier.hRmHandle = 0x0; + goto failed; + } + + status = nvRmApiMapMemory(device->hRmClient, + device->hRmSubDevice, + device->notifier.hRmHandle, + 0, + NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE, + &device->notifier.pLinearAddress, + 0); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "MapMemory failed with error code 0x%08x", + status); + goto failed; + } + + device->notifier.mapped = NV_TRUE; + + surfParams.request.deviceHandle = device->hKmsDevice; + surfParams.request.useFd = FALSE; + surfParams.request.rmClient = device->hRmClient; + + surfParams.request.widthInPixels = NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE; + surfParams.request.heightInPixels = 1; + surfParams.request.layout = NvKmsSurfaceMemoryLayoutPitch; + surfParams.request.format = NvKmsSurfaceMemoryFormatI8; + surfParams.request.log2GobsPerBlockY = 0; + surfParams.request.isoType = NVKMS_MEMORY_NISO; + + surfParams.request.planes[0].u.rmObject = device->notifier.hRmHandle; + surfParams.request.planes[0].pitch = NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE; + surfParams.request.planes[0].rmObjectSizeInBytes = + NVKMS_KAPI_NOTIFIERS_SURFACE_SIZE; + + if (!nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_REGISTER_SURFACE, + &surfParams, sizeof(surfParams))) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_REGISTER_SURFACE failed"); + goto failed; + } + + device->notifier.hKmsHandle = surfParams.reply.surfaceHandle; + + /* Init Notifiers */ + + { + NvU32 head; + + for (head = 0; head < device->numHeads; head++) { + NvU32 layer; + + for (layer = 0; layer < NVKMS_MAX_LAYERS_PER_HEAD; layer++) { + NvU32 index; + + for (index = 0; + index < NVKMS_KAPI_MAX_NOTIFERS_PER_LAYER; index++) { + InitNotifier(device, head, layer, index); + } + } + } + } + + return NV_TRUE; + +failed: + + nvKmsKapiFreeNotifiers(device); + + return NV_FALSE; +} diff --git a/src/nvidia-modeset/kapi/src/nvkms-kapi.c b/src/nvidia-modeset/kapi/src/nvkms-kapi.c new file mode 100644 index 000000000..4c6100cc9 --- /dev/null +++ b/src/nvidia-modeset/kapi/src/nvkms-kapi.c @@ -0,0 +1,3092 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvUnixVersion.h" + +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-api.h" +#include "nvkms-rmapi.h" + +#include "nvkms-kapi.h" +#include "nvkms-kapi-private.h" +#include "nvkms-kapi-internal.h" +#include "nvkms-kapi-notifiers.h" + +#include /* NV01_ROOT/NV01_NULL_OBJECT */ +#include /* NV01_MEMORY_SYSTEM */ +#include /* NV01_DEVICE */ +#include /* NV01_MEMORY_LOCAL_USER */ +#include /* NV01_MEMORY_SYSTEM_OS_DESCRIPTOR */ +#include /* NV20_SUBDEVICE_0 */ + +#include /* NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2 */ +#include /* NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD */ +#include /* NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES */ +#include /* NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT */ + +#include "ctrl/ctrl003e.h" /* NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES */ + + +ct_assert(NVKMS_KAPI_LAYER_PRIMARY_IDX == NVKMS_MAIN_LAYER); +ct_assert(NVKMS_KAPI_LAYER_MAX == NVKMS_MAX_LAYERS_PER_HEAD); + +/* XXX Move to NVKMS */ +#define NV_EVO_PITCH_ALIGNMENT 0x100 + +#define NVKMS_KAPI_SUPPORTED_EVENTS_MASK \ + ((1 << NVKMS_EVENT_TYPE_DPY_CHANGED) | \ + (1 << NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED) | \ + (1 << NVKMS_EVENT_TYPE_FLIP_OCCURRED)) + +static NvU32 EnumerateGpus(nv_gpu_info_t *gpuInfo) +{ + return nvkms_enumerate_gpus(gpuInfo); +} + +/* + * Helper function to free RM objects allocated for NvKmsKapiDevice. + */ +static void RmFreeDevice(struct NvKmsKapiDevice *device) +{ + if (device->hRmSubDevice != 0x0) { + nvRmApiFree(device->hRmClient, + device->hRmDevice, + device->hRmSubDevice); + nvKmsKapiFreeRmHandle(device, device->hRmSubDevice); + device->hRmSubDevice = 0x0; + } + + /* Free RM device object */ + + if (device->hRmDevice != 0x0) { + nvRmApiFree(device->hRmClient, + device->hRmClient, + device->hRmDevice); + nvKmsKapiFreeRmHandle(device, device->hRmDevice); + + device->hRmDevice = 0x0; + } + + nvTearDownUnixRmHandleAllocator(&device->handleAllocator); + + device->deviceInstance = 0; + + /* Free RM client */ + + if (device->hRmClient != 0x0) { + nvRmApiFree(device->hRmClient, + device->hRmClient, + device->hRmClient); + + device->hRmClient = 0x0; + } +} + +/* + * Helper function to allocate RM objects for NvKmsKapiDevice. + */ +static NvBool RmAllocateDevice(struct NvKmsKapiDevice *device) +{ + NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS getNumSubDevicesParams = { 0 }; + NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS idInfoParams = { }; + NV2080_ALLOC_PARAMETERS subdevAllocParams = { 0 }; + NV0080_ALLOC_PARAMETERS allocParams = { }; + + NvU32 hRmDevice, hRmSubDevice; + NvU32 ret; + + /* Allocate RM client */ + + ret = nvRmApiAlloc(NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &device->hRmClient); + + if (ret != NVOS_STATUS_SUCCESS || device->hRmClient == 0x0) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM client"); + goto failed; + } + + /* Query device instance */ + + idInfoParams.gpuId = device->gpuId; + + ret = nvRmApiControl(device->hRmClient, + device->hRmClient, + NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2, + &idInfoParams, + sizeof(idInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to query device instance"); + goto failed; + } + + device->deviceInstance = idInfoParams.deviceInstance; + device->isSOC = + FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE, + idInfoParams.gpuFlags); + + /* Initialize RM handle allocator */ + + if (!nvInitUnixRmHandleAllocator(&device->handleAllocator, + device->hRmClient, + device->deviceInstance + 1)) { + nvKmsKapiLogDeviceDebug(device, "Failed to initialize RM handle allocator"); + goto failed; + } + + /* Allocate RM device object */ + + hRmDevice = nvKmsKapiGenerateRmHandle(device); + + if (hRmDevice == 0x0) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM handle"); + goto failed; + } + + allocParams.deviceId = device->deviceInstance; + + allocParams.hClientShare = device->hRmClient; + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmClient, + hRmDevice, + NV01_DEVICE_0, + &allocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM device object"); + nvKmsKapiFreeRmHandle(device, hRmDevice); + goto failed; + } + + device->hRmDevice = hRmDevice; + + ret = nvRmApiControl(device->hRmClient, + device->hRmDevice, + NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES, + &getNumSubDevicesParams, + sizeof(getNumSubDevicesParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to determine number of GPUs"); + goto failed; + } + + if (getNumSubDevicesParams.numSubDevices != 1) { + nvKmsKapiLogDeviceDebug( + device, + "Unsupported number of GPUs: %d", + getNumSubDevicesParams.numSubDevices); + goto failed; + } + + hRmSubDevice = nvKmsKapiGenerateRmHandle(device); + + if (hRmDevice == 0x0) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate RM handle"); + goto failed; + } + + subdevAllocParams.subDeviceId = 0; + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmSubDevice, + NV20_SUBDEVICE_0, + &subdevAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug(device, "Failed to initialize subDevice"); + nvKmsKapiFreeRmHandle(device, hRmSubDevice); + goto failed; + } + + device->hRmSubDevice = hRmSubDevice; + + return NV_TRUE; + +failed: + + RmFreeDevice(device); + + return NV_FALSE; +} + +/* + * Helper function to free NVKMS objects allocated for NvKmsKapiDevice. + */ +static void KmsFreeDevice(struct NvKmsKapiDevice *device) +{ + /* Free notifier memory */ + + nvKmsKapiFreeNotifiers(device); + + /* Free NVKMS device */ + + if (device->hKmsDevice != 0x0) { + struct NvKmsFreeDeviceParams paramsFree = { }; + + paramsFree.request.deviceHandle = device->hKmsDevice; + + nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_FREE_DEVICE, + ¶msFree, sizeof(paramsFree)); + + device->hKmsDevice = device->hKmsDisp = 0x0; + } + + /* Close NVKMS */ + + if (device->pKmsOpen != NULL) { + nvkms_close_from_kapi(device->pKmsOpen); + device->pKmsOpen = NULL; + } +} + +/* + * Helper function to allocate NVKMS objects for NvKmsKapiDevice. + */ +static NvBool KmsAllocateDevice(struct NvKmsKapiDevice *device) +{ + struct NvKmsAllocDeviceParams *paramsAlloc; + NvBool status; + NvBool inVideoMemory = FALSE; + NvU32 head; + NvBool ret = FALSE; + NvU32 layer; + + paramsAlloc = nvKmsKapiCalloc(1, sizeof(*paramsAlloc)); + if (paramsAlloc == NULL) { + return FALSE; + } + + /* Open NVKMS */ + + device->pKmsOpen = nvkms_open_from_kapi(device); + + if (device->pKmsOpen == NULL) { + nvKmsKapiLogDeviceDebug(device, "Failed to Open NVKMS"); + goto done; + } + + /* Allocate NVKMS device */ + + nvkms_strncpy( + paramsAlloc->request.versionString, + NV_VERSION_STRING, + sizeof(paramsAlloc->request.versionString)); + + if (device->isSOC) { + paramsAlloc->request.deviceId = NVKMS_DEVICE_ID_TEGRA; + } else { + paramsAlloc->request.deviceId = device->deviceInstance; + } + paramsAlloc->request.sliMosaic = NV_FALSE; + paramsAlloc->request.enableConsoleHotplugHandling = NV_TRUE; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_ALLOC_DEVICE, + paramsAlloc, sizeof(*paramsAlloc)); + + if (!status || + paramsAlloc->reply.status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + + if (paramsAlloc->reply.status == + NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE) { + nvKmsKapiLogDeviceDebug( + device, + "Display hardware is not available; falling back to " + "displayless mode"); + + ret = TRUE; + goto done; + } + + nvKmsKapiLogDeviceDebug( + device, + "Failed to NVKM device %u(%u): %d %d\n", + device->gpuId, + paramsAlloc->request.deviceId, + status, + paramsAlloc->reply.status); + + goto done; + } + + device->hKmsDevice = paramsAlloc->reply.deviceHandle; + + device->caps.cursorCompositionCaps = + paramsAlloc->reply.cursorCompositionCaps; + + device->caps.overlayCompositionCaps = + paramsAlloc->reply.layerCaps[NVKMS_OVERLAY_LAYER].composition; + + device->caps.validLayerRRTransforms = + paramsAlloc->reply.validLayerRRTransforms; + + device->caps.maxWidthInPixels = paramsAlloc->reply.maxWidthInPixels; + device->caps.maxHeightInPixels = paramsAlloc->reply.maxHeightInPixels; + device->caps.maxCursorSizeInPixels = paramsAlloc->reply.maxCursorSize; + device->caps.genericPageKind = paramsAlloc->reply.genericPageKind; + + /* XXX Add LUT support */ + + device->numHeads = paramsAlloc->reply.numHeads; + + for (head = 0; head < device->numHeads; head++) { + if (paramsAlloc->reply.numLayers[head] < 2) { + goto done; + } + device->numLayers[head] = paramsAlloc->reply.numLayers[head]; + } + + for (layer = 0; layer < NVKMS_KAPI_LAYER_MAX; layer++) { + device->supportedSurfaceMemoryFormats[layer] = + paramsAlloc->reply.layerCaps[layer].supportedSurfaceMemoryFormats; + } + + if (paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY)) { + device->notifier.format = NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY; + } else if (paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_FOUR_WORD)) { + device->notifier.format = NVKMS_NISO_FORMAT_FOUR_WORD; + } else { + nvAssert(paramsAlloc->reply.validNIsoFormatMask & + (1 << NVKMS_NISO_FORMAT_LEGACY)); + device->notifier.format = NVKMS_NISO_FORMAT_LEGACY; + } + + /* XXX Add support for SLI/multiple display engines per device */ + if (paramsAlloc->reply.numDisps != 1) + { + nvKmsKapiLogDeviceDebug(device, "Found unsupported SLI configuration"); + goto done; + } + + device->hKmsDisp = paramsAlloc->reply.dispHandles[0]; + device->dispIdx = 0; + + device->subDeviceMask = paramsAlloc->reply.subDeviceMask; + + device->isoIOCoherencyModes = paramsAlloc->reply.isoIOCoherencyModes; + device->nisoIOCoherencyModes = paramsAlloc->reply.nisoIOCoherencyModes; + + device->supportsSyncpts = paramsAlloc->reply.supportsSyncpts; + + if (paramsAlloc->reply.nIsoSurfacesInVidmemOnly) { + inVideoMemory = TRUE; + } + + /* Allocate notifier memory */ + if (!nvKmsKapiAllocateNotifiers(device, inVideoMemory)) { + nvKmsKapiLogDebug( + "Failed to allocate Notifier objects for GPU ID 0x%08x", + device->gpuId); + goto done; + } + + ret = NV_TRUE; + +done: + if (!ret) { + KmsFreeDevice(device); + } + + nvKmsKapiFree(paramsAlloc); + + return ret; +} + +static void FreeDevice(struct NvKmsKapiDevice *device) +{ + /* Free NVKMS objects allocated for NvKmsKapiDevice */ + + KmsFreeDevice(device); + + /* Free RM objects allocated for NvKmsKapiDevice */ + + RmFreeDevice(device); + + /* Lower the reference count of gpu. */ + + nvkms_close_gpu(device->gpuId); + + if (device->pSema != NULL) { + nvkms_sema_free(device->pSema); + } + + nvKmsKapiFree(device); +} + +NvBool nvKmsKapiAllocateSystemMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible) +{ + NvU32 ret; + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + const NvKmsDispIOCoherencyModes *pIOCoherencyModes; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.size = size; + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, + memAllocParams.attr); + if (*compressible) { + /* + * RM will choose a compressed page kind and hence allocate + * comptags for color surfaces >= 32bpp. The actual kind + * chosen isn't important, as it can be overridden by creating + * a virtual alloc with a different kind when mapping the + * memory into the GPU. + */ + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32, + memAllocParams.attr); + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COMPR, _ANY, + memAllocParams.attr); + } else { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _DEPTH, _UNKNOWN, + memAllocParams.attr); + } + break; + + case NvKmsSurfaceMemoryLayoutPitch: + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, + memAllocParams.attr); + break; + + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Memory Layout"); + return NV_FALSE; + } + + switch (type) { + case NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT: + /* XXX Note compression and scanout do not work together on + * any current GPUs. However, some use cases do involve scanning + * out a compression-capable surface: + * + * 1) Mapping the compressible surface as non-compressed when + * generating its content. + * + * 2) Using decompress-in-place to decompress the surface content + * before scanning it out. + * + * Hence creating compressed allocations of TYPE_SCANOUT is allowed. + */ + + pIOCoherencyModes = &device->isoIOCoherencyModes; + + break; + case NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER: + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + nvKmsKapiLogDeviceDebug(device, + "Attempting creation of BlockLinear notifier memory"); + return NV_FALSE; + } + + memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _NISO_DISPLAY, + _YES, memAllocParams.attr2); + + pIOCoherencyModes = &device->nisoIOCoherencyModes; + + break; + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Allocation Type"); + return NV_FALSE; + } + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, + memAllocParams.attr); + memAllocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + memAllocParams.attr2); + + if (!pIOCoherencyModes->coherent) { + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, + _WRITE_COMBINE, memAllocParams.attr); + } else { + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, + _WRITE_BACK, memAllocParams.attr); + } + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, + memAllocParams.attr); + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmHandle, + NV01_MEMORY_SYSTEM, + &memAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "nvRmApiAlloc failed with error code 0x%08x", + ret); + + return NV_FALSE; + } + + if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, + memAllocParams.attr)) { + *compressible = 0; + } else { + *compressible = 1; + } + + return TRUE; +} + +NvBool nvKmsKapiAllocateVideoMemory(struct NvKmsKapiDevice *device, + NvU32 hRmHandle, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + enum NvKmsKapiAllocationType type, + NvU8 *compressible) +{ + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + NvU32 ret; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.size = size; + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, + memAllocParams.attr); + + if (*compressible) { + /* + * RM will choose a compressed page kind and hence allocate + * comptags for color surfaces >= 32bpp. The actual kind + * chosen isn't important, as it can be overridden by creating + * a virtual alloc with a different kind when mapping the + * memory into the GPU. + */ + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32, + memAllocParams.attr); + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COMPR, _ANY, + memAllocParams.attr); + } else { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _DEPTH, _UNKNOWN, + memAllocParams.attr); + } + break; + + case NvKmsSurfaceMemoryLayoutPitch: + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, + memAllocParams.attr); + break; + + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Memory Layout"); + return NV_FALSE; + } + + + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, + memAllocParams.attr); + memAllocParams.attr2 = + FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + memAllocParams.attr2); + + switch (type) { + case NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT: + /* XXX [JRJ] Not quite right. This can also be used to allocate + * cursor images. The stuff RM does with this field is kind of + * black magic, and I can't tell if it actually matters. + */ + memAllocParams.type = NVOS32_TYPE_PRIMARY; + + memAllocParams.alignment = NV_EVO_SURFACE_ALIGNMENT; + memAllocParams.flags |= + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE | /* Pick up above EVO alignment */ + NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP; /* X sets this for cursors */ + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, + memAllocParams.attr); + + /* XXX [JRJ] Note compression and scanout do not work together on + * any current GPUs. However, some use cases do involve scanning + * out a compression-capable surface: + * + * 1) Mapping the compressible surface as non-compressed when + * generating its content. + * + * 2) Using decompress-in-place to decompress the surface content + * before scanning it out. + * + * Hence creating compressed allocations of TYPE_SCANOUT is allowed. + */ + + break; + case NVKMS_KAPI_ALLOCATION_TYPE_NOTIFIER: + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + nvKmsKapiLogDeviceDebug(device, + "Attempting creation of BlockLinear notifier memory"); + return NV_FALSE; + } + + memAllocParams.type = NVOS32_TYPE_DMA; + + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, + memAllocParams.attr); + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, + memAllocParams.attr); + + break; + default: + nvKmsKapiLogDeviceDebug(device, "Unknown Allocation Type"); + return NV_FALSE; + } + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmHandle, + NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "VidHeapControl failed with error code 0x%08x", + ret); + + return NV_FALSE; + } + + if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, + memAllocParams.attr)) { + *compressible = 0; + } else { + *compressible = 1; + } + + return NV_TRUE; +} + +static struct NvKmsKapiDevice* AllocateDevice +( + const struct NvKmsKapiAllocateDeviceParams *params +) +{ + struct NvKmsKapiDevice *device = NULL; + + device = nvKmsKapiCalloc(1, sizeof(*device)); + + if (device == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate memory for NvKmsKapiDevice of GPU ID 0x%08x", + params->gpuId); + goto failed; + } + + device->pSema = nvkms_sema_alloc(); + + if (device->pSema == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate semaphore for NvKmsKapiDevice of GPU ID 0x%08x", + params->gpuId); + goto failed; + } + + /* Raise the reference count of gpu. */ + + if (!nvkms_open_gpu(params->gpuId)) { + nvKmsKapiLogDebug("Failed to open GPU ID 0x%08x", params->gpuId); + goto failed; + } + + device->gpuId = params->gpuId; + + nvKmsKapiLogDebug( + "Allocating NvKmsKapiDevice 0x%p for GPU ID 0x%08x", + device, + device->gpuId); + + /* Allocate RM object for NvKmsKapiDevice */ + + if (!RmAllocateDevice(device)) { + nvKmsKapiLogDebug( + "Failed to allocate RM objects for GPU ID 0x%08x", + device->gpuId); + goto failed; + } + + /* Allocate NVKMS objects for NvKmsKapiDevice */ + + if (!KmsAllocateDevice(device)) { + nvKmsKapiLogDebug( + "Failed to allocate NVKMS objects for GPU ID 0x%08x", + device->gpuId); + goto failed; + } + + device->privateData = params->privateData; + device->eventCallback = params->eventCallback; + + return device; + +failed: + + FreeDevice(device); + + return NULL; +} + +static NvBool GrabOwnership(struct NvKmsKapiDevice *device) +{ + struct NvKmsGrabOwnershipParams paramsGrab = { }; + + if (device->hKmsDevice == 0x0) { + return NV_TRUE; + } + + paramsGrab.request.deviceHandle = device->hKmsDevice; + + return nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_GRAB_OWNERSHIP, + ¶msGrab, sizeof(paramsGrab)); + +} + +static void ReleaseOwnership(struct NvKmsKapiDevice *device) +{ + struct NvKmsReleaseOwnershipParams paramsRelease = { }; + + if (device->hKmsDevice == 0x0) { + return; + } + + paramsRelease.request.deviceHandle = device->hKmsDevice; + + nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_RELEASE_OWNERSHIP, + ¶msRelease, sizeof(paramsRelease)); +} + +static NvBool DeclareEventInterest +( + const struct NvKmsKapiDevice *device, + const NvU32 interestMask +) +{ + struct NvKmsDeclareEventInterestParams kmsEventParams = { }; + + if (device->hKmsDevice == 0x0 || device->eventCallback == NULL) { + return NV_TRUE; + } + + kmsEventParams.request.interestMask = + interestMask & NVKMS_KAPI_SUPPORTED_EVENTS_MASK; + + return nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_DECLARE_EVENT_INTEREST, + &kmsEventParams, sizeof(kmsEventParams)); +} + +static NvBool GetDeviceResourcesInfo +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDeviceResourcesInfo *info +) +{ + struct NvKmsQueryDispParams paramsDisp = { }; + NvBool status = NV_FALSE; + + NvU32 i; + + nvkms_memset(info, 0, sizeof(*info)); + + info->caps.hasVideoMemory = !device->isSOC; + + if (device->hKmsDevice == 0x0) { + info->caps.pitchAlignment = 0x1; + return NV_TRUE; + } + + paramsDisp.request.deviceHandle = device->hKmsDevice; + paramsDisp.request.dispHandle = device->hKmsDisp; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DISP, + ¶msDisp, sizeof(paramsDisp)); + + if (!status) + { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query display engine information"); + + goto done; + } + + info->numHeads = device->numHeads; + + ct_assert(sizeof(info->numLayers) == sizeof(device->numLayers)); + nvkms_memcpy(info->numLayers, device->numLayers, sizeof(device->numLayers)); + + ct_assert(ARRAY_LEN(info->connectorHandles) >= + ARRAY_LEN(paramsDisp.reply.connectorHandles)); + + info->numConnectors = paramsDisp.reply.numConnectors; + + for (i = 0; i < paramsDisp.reply.numConnectors; i++) { + info->connectorHandles[i] = paramsDisp.reply.connectorHandles[i]; + } + + { + const struct NvKmsCompositionCapabilities *pCaps = + &device->caps.cursorCompositionCaps; + + info->caps.validCursorCompositionModes = + pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE]. + supportedBlendModes[1]; + } + + for (i = 0; i < NVKMS_KAPI_LAYER_MAX; i++) { + if (i == NVKMS_KAPI_LAYER_PRIMARY_IDX) { + info->caps.layer[i].validCompositionModes = + NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE); + } else { + const struct NvKmsCompositionCapabilities *pCaps = + &device->caps.overlayCompositionCaps; + + info->caps.layer[i].validCompositionModes = + pCaps->colorKeySelect[NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE]. + supportedBlendModes[1]; + } + } + + for (i = 0; i < NVKMS_KAPI_LAYER_MAX; i++) { + info->caps.layer[i].validRRTransforms = + device->caps.validLayerRRTransforms; + } + + info->caps.maxWidthInPixels = device->caps.maxWidthInPixels; + info->caps.maxHeightInPixels = device->caps.maxHeightInPixels; + info->caps.maxCursorSizeInPixels = device->caps.maxCursorSizeInPixels; + info->caps.genericPageKind = device->caps.genericPageKind; + + info->caps.pitchAlignment = NV_EVO_PITCH_ALIGNMENT; + + info->caps.supportsSyncpts = device->supportsSyncpts; + + info->caps.supportedCursorSurfaceMemoryFormats = + NVBIT(NvKmsSurfaceMemoryFormatA8R8G8B8); + + ct_assert(sizeof(info->supportedSurfaceMemoryFormats) == + sizeof(device->supportedSurfaceMemoryFormats)); + + nvkms_memcpy(info->supportedSurfaceMemoryFormats, + device->supportedSurfaceMemoryFormats, + sizeof(device->supportedSurfaceMemoryFormats)); +done: + + return status; +} + +/* + * XXX Make it per-connector, query valid dpyId list as dynamic data of + * connector. + */ +static NvBool GetDisplays +( + struct NvKmsKapiDevice *device, + NvU32 *numDisplays, NvKmsKapiDisplay *displayHandles +) +{ + struct NvKmsQueryDispParams paramsDisp = { }; + NvBool status = NV_FALSE; + + NVDpyId dpyId; + + if (device->hKmsDevice == 0x0) { + *numDisplays = 0; + return NV_TRUE; + } + + paramsDisp.request.deviceHandle = device->hKmsDevice; + paramsDisp.request.dispHandle = device->hKmsDisp; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DISP, + ¶msDisp, sizeof(paramsDisp)); + + if (!status) + { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query display engine information"); + + return NV_FALSE; + } + + if (*numDisplays == 0) { + goto done; + } + + if (*numDisplays < nvCountDpyIdsInDpyIdList(paramsDisp.reply.validDpys)) { + nvKmsKapiLogDebug( + "Size of display handle array is less than number of displays"); + goto done; + } + + FOR_ALL_DPY_IDS(dpyId, paramsDisp.reply.validDpys) { + *(displayHandles++) = nvDpyIdToNvU32(dpyId); + } + +done: + + *numDisplays = nvCountDpyIdsInDpyIdList(paramsDisp.reply.validDpys); + + return NV_TRUE; +} + +static NvBool GetConnectorInfo +( + struct NvKmsKapiDevice *device, + NvKmsKapiConnector connector, struct NvKmsKapiConnectorInfo *info +) +{ + struct NvKmsQueryConnectorStaticDataParams paramsConnector = { }; + NvBool status = NV_FALSE; + + if (device == NULL || info == NULL) { + goto done; + } + + paramsConnector.request.deviceHandle = device->hKmsDevice; + paramsConnector.request.dispHandle = device->hKmsDisp; + paramsConnector.request.connectorHandle = connector; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, + ¶msConnector, sizeof(paramsConnector)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query static data of connector 0x%08x", + connector); + + goto done; + } + + info->handle = connector; + + info->physicalIndex = paramsConnector.reply.physicalIndex; + + info->headMask = paramsConnector.reply.headMask; + + info->signalFormat = paramsConnector.reply.signalFormat; + + info->type = paramsConnector.reply.type; + +done: + + return status; +} + +static NvBool GetStaticDisplayInfo +( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, struct NvKmsKapiStaticDisplayInfo *info +) +{ + struct NvKmsQueryDpyStaticDataParams paramsDpyStatic = { }; + NvBool status = NV_FALSE; + + if (device == NULL || info == NULL) { + goto done; + } + + /* Query static data of display */ + + paramsDpyStatic.request.deviceHandle = device->hKmsDevice; + paramsDpyStatic.request.dispHandle = device->hKmsDisp; + + paramsDpyStatic.request.dpyId = nvNvU32ToDpyId(display); + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, + ¶msDpyStatic, sizeof(paramsDpyStatic)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query static data of dpy 0x%08x", + display); + + goto done; + } + + info->handle = display; + + info->connectorHandle = paramsDpyStatic.reply.connectorHandle; + + ct_assert(sizeof(info->dpAddress) == + sizeof(paramsDpyStatic.reply.dpAddress)); + + nvkms_memcpy(info->dpAddress, + paramsDpyStatic.reply.dpAddress, + sizeof(paramsDpyStatic.reply.dpAddress)); + info->dpAddress[sizeof(paramsDpyStatic.reply.dpAddress) - 1] = '\0'; + + info->internal = paramsDpyStatic.reply.mobileInternal; + +done: + + return status; +} + +static NvBool GetDynamicDisplayInfo( + struct NvKmsKapiDevice *device, + struct NvKmsKapiDynamicDisplayParams *params) +{ + struct NvKmsQueryDpyDynamicDataParams *pParamsDpyDynamic = NULL; + NvBool status = NV_FALSE; + + if (device == NULL || params == NULL) { + goto done; + } + + pParamsDpyDynamic = nvKmsKapiCalloc(1, sizeof(*pParamsDpyDynamic)); + + if (pParamsDpyDynamic == NULL) { + goto done; + } + + pParamsDpyDynamic->request.deviceHandle = device->hKmsDevice; + pParamsDpyDynamic->request.dispHandle = device->hKmsDisp; + + pParamsDpyDynamic->request.dpyId = nvNvU32ToDpyId(params->handle); + + if (params->overrideEdid) { + ct_assert(sizeof(params->edid.buffer) == + sizeof(pParamsDpyDynamic->reply.edid.buffer)); + nvkms_memcpy( + pParamsDpyDynamic->request.edid.buffer, + params->edid.buffer, + sizeof(pParamsDpyDynamic->request.edid.buffer)); + + pParamsDpyDynamic->request.edid.bufferSize = params->edid.bufferSize; + + pParamsDpyDynamic->request.overrideEdid = NV_TRUE; + } + + pParamsDpyDynamic->request.forceConnected = params->forceConnected; + + pParamsDpyDynamic->request.forceDisconnected = params->forceDisconnected; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, + pParamsDpyDynamic, sizeof(*pParamsDpyDynamic)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to query dynamic data of dpy 0x%08x", + params->handle); + + goto done; + } + + params->connected = pParamsDpyDynamic->reply.connected; + + if (pParamsDpyDynamic->reply.connected && !params->overrideEdid) { + + nvkms_memcpy( + params->edid.buffer, + pParamsDpyDynamic->reply.edid.buffer, + sizeof(params->edid.buffer)); + + params->edid.bufferSize = pParamsDpyDynamic->reply.edid.bufferSize; + } + +done: + + if (pParamsDpyDynamic != NULL) { + nvKmsKapiFree(pParamsDpyDynamic); + } + + return status; +} + +static void FreeMemory +( + struct NvKmsKapiDevice *device, struct NvKmsKapiMemory *memory +) +{ + if (device == NULL || memory == NULL) { + return; + } + + if (memory->hRmHandle != 0x0) { + NvU32 ret; + + ret = nvRmApiFree(device->hRmClient, + device->hRmDevice, + memory->hRmHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to free RM memory object 0x%08x allocated for " + "NvKmsKapiMemory 0x%p", + memory->hRmHandle, memory); + } + + nvKmsKapiFreeRmHandle(device, memory->hRmHandle); + } + + nvKmsKapiFree(memory); +} + +static struct NvKmsKapiMemory *AllocMemoryObjectAndHandle( + struct NvKmsKapiDevice *device, + NvU32 *handleOut +) +{ + struct NvKmsKapiMemory *memory; + + /* Allocate the container object */ + + memory = nvKmsKapiCalloc(1, sizeof(*memory)); + + if (memory == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate memory for NVKMS memory object on " + "NvKmsKapiDevice 0x%p", + device); + return NULL; + } + + /* Generate RM handle for memory object */ + + *handleOut = nvKmsKapiGenerateRmHandle(device); + + if (*handleOut == 0x0) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to allocate RM handle for memory"); + nvKmsKapiFree(memory); + return NULL; + } + + return memory; +} + +static struct NvKmsKapiMemory* AllocateVideoMemory +( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + NvU8 *compressible +) +{ + struct NvKmsKapiMemory *memory = NULL; + NvU32 hRmHandle; + + memory = AllocMemoryObjectAndHandle(device, &hRmHandle); + + if (!memory) { + return NULL; + } + + if (!nvKmsKapiAllocateVideoMemory(device, + hRmHandle, + layout, + size, + NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT, + compressible)) { + nvKmsKapiFreeRmHandle(device, hRmHandle); + FreeMemory(device, memory); + return NULL; + } + + memory->hRmHandle = hRmHandle; + memory->size = size; + memory->surfaceParams.layout = layout; + + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + memory->surfaceParams.blockLinear.genericMemory = NV_TRUE; + } + + return memory; +} + +static struct NvKmsKapiMemory* AllocateSystemMemory +( + struct NvKmsKapiDevice *device, + enum NvKmsSurfaceMemoryLayout layout, + NvU64 size, + NvU8 *compressible +) +{ + struct NvKmsKapiMemory *memory = NULL; + NvU32 hRmHandle; + + memory = AllocMemoryObjectAndHandle(device, &hRmHandle); + + if (!memory) { + return NULL; + } + + if (!nvKmsKapiAllocateSystemMemory(device, + hRmHandle, + layout, + size, + NVKMS_KAPI_ALLOCATION_TYPE_SCANOUT, + compressible)) { + nvKmsKapiFreeRmHandle(device, hRmHandle); + FreeMemory(device, memory); + return NULL; + } + + memory->hRmHandle = hRmHandle; + memory->size = size; + memory->surfaceParams.layout = layout; + + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + memory->surfaceParams.blockLinear.genericMemory = NV_TRUE; + } + + return memory; +} + +static struct NvKmsKapiMemory* ImportMemory +( + struct NvKmsKapiDevice *device, + NvU64 memorySize, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize +) +{ + struct NvKmsKapiPrivImportMemoryParams nvKmsParams, *pNvKmsParams = NULL; + NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS importParams = { }; + struct NvKmsKapiMemory *memory = NULL; + NvU32 hMemory; + NvU32 ret; + int status; + + /* Verify the driver-private params size and copy it in from userspace */ + + if (nvKmsParamsSize != sizeof(nvKmsParams)) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameter size mismatch - " + "expected: 0x%llx, caller specified: 0x%llx", + (NvU64)sizeof(nvKmsParams), nvKmsParamsSize); + return NULL; + } + + /* + * Use a heap allocation as the destination pointer passed to + * nvkms_copyin; stack allocations created within core NVKMS may not + * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY + * checker, triggering false errors. But then save the result to a + * variable on the stack, so that we can free the heap memory + * immediately and not worry about its lifetime. + */ + + pNvKmsParams = nvKmsKapiCalloc(1, sizeof(*pNvKmsParams)); + + if (pNvKmsParams == NULL) { + nvKmsKapiLogDebug("Failed to allocate memory for ImportMemory"); + return NULL; + } + + status = nvkms_copyin(pNvKmsParams, nvKmsParamsUser, sizeof(*pNvKmsParams)); + + nvKmsParams = *pNvKmsParams; + + nvKmsKapiFree(pNvKmsParams); + + if (status != 0) { + nvKmsKapiLogDebug( + "NVKMS private memory import parameters could not be read from " + "userspace"); + return NULL; + } + + memory = AllocMemoryObjectAndHandle(device, &hMemory); + + if (!memory) { + return NULL; + } + + importParams.fd = nvKmsParams.memFd; + importParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + importParams.object.data.rmObject.hDevice = device->hRmDevice; + importParams.object.data.rmObject.hParent = device->hRmDevice; + importParams.object.data.rmObject.hObject = hMemory; + + ret = nvRmApiControl(device->hRmClient, + device->hRmClient, + NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD, + &importParams, + sizeof(importParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to import RM memory object (%d) of size %llu bytes", + nvKmsParams.memFd, memorySize); + + nvKmsKapiFreeRmHandle(device, hMemory); + goto failed; + } + + memory->hRmHandle = hMemory; + memory->size = memorySize; + memory->surfaceParams = nvKmsParams.surfaceParams; + + return memory; + +failed: + + FreeMemory(device, memory); + + return NULL; +} + +static struct NvKmsKapiMemory* DupMemory +( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiDevice *srcDevice, + const struct NvKmsKapiMemory *srcMemory +) +{ + struct NvKmsKapiMemory *memory; + NvU32 hMemory; + NvU32 ret; + + memory = AllocMemoryObjectAndHandle(device, &hMemory); + + if (!memory) { + return NULL; + } + + ret = nvRmApiDupObject(device->hRmClient, + device->hRmDevice, + hMemory, + srcDevice->hRmClient, + srcMemory->hRmHandle, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to dup NVKMS memory object 0x%p (0x%08x, 0x%08x) " + "of size %llu bytes", + srcMemory, srcDevice->hRmClient, srcMemory->hRmHandle, + srcMemory->size); + + nvKmsKapiFreeRmHandle(device, hMemory); + goto failed; + } + + memory->hRmHandle = hMemory; + memory->size = srcMemory->size; + memory->surfaceParams = srcMemory->surfaceParams; + + return memory; + +failed: + FreeMemory(device, memory); + + return NULL; +} + +static NvBool ExportMemory +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 nvKmsParamsUser, + NvU64 nvKmsParamsSize +) +{ + struct NvKmsKapiPrivExportMemoryParams nvKmsParams, *pNvKmsParams = NULL; + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS exportParams = { }; + int status; + NvU32 ret; + + if (device == NULL || memory == NULL) { + nvKmsKapiLogDebug( + "Invalid device or memory parameter while exporting memory"); + return NV_FALSE; + } + + /* Verify the driver-private params size and copy it in from userspace */ + + if (nvKmsParamsSize != sizeof(nvKmsParams)) { + nvKmsKapiLogDebug( + "NVKMS private memory export parameter size mismatch - " + "expected: 0x%llx, caller specified: 0x%llx", + (NvU64)sizeof(nvKmsParams), nvKmsParamsSize); + return NV_FALSE; + } + + /* + * Use a heap allocation as the destination pointer passed to + * nvkms_copyin; stack allocations created within core NVKMS may not + * be recognizable to the Linux kernel's CONFIG_HARDENED_USERCOPY + * checker, triggering false errors. But then save the result to a + * variable on the stack, so that we can free the heap memory + * immediately and not worry about its lifetime. + */ + + pNvKmsParams = nvKmsKapiCalloc(1, sizeof(*pNvKmsParams)); + + if (pNvKmsParams == NULL) { + nvKmsKapiLogDebug("Failed to allocate scratch memory for ExportMemory"); + return NV_FALSE; + } + + status = nvkms_copyin(pNvKmsParams, nvKmsParamsUser, sizeof(*pNvKmsParams)); + + nvKmsParams = *pNvKmsParams; + nvKmsKapiFree(pNvKmsParams); + + if (status != 0) { + nvKmsKapiLogDebug( + "NVKMS private memory export parameters could not be read from " + "userspace"); + return NV_FALSE; + } + + exportParams.fd = nvKmsParams.memFd; + exportParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + exportParams.object.data.rmObject.hDevice = device->hRmDevice; + exportParams.object.data.rmObject.hParent = device->hRmDevice; + exportParams.object.data.rmObject.hObject = memory->hRmHandle; + + ret = nvRmApiControl(device->hRmClient, + device->hRmClient, + NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD, + &exportParams, + sizeof(exportParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to export RM memory object of size %llu bytes " + "to (%d)", memory->size, nvKmsParams.memFd); + return NV_FALSE; + } + + return NV_TRUE; +} + +static struct NvKmsKapiMemory* +GetSystemMemoryHandleFromDmaBufSgtHelper(struct NvKmsKapiDevice *device, + NvU32 descriptorType, + NvP64 descriptor, + NvU32 limit) +{ + NvU32 ret; + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS memAllocParams = {0}; + struct NvKmsKapiMemory *memory = NULL; + NvU32 hRmHandle; + + memory = AllocMemoryObjectAndHandle(device, &hRmHandle); + + if (!memory) { + return NULL; + } + + memAllocParams.type = NVOS32_TYPE_PRIMARY; + memAllocParams.descriptorType = descriptorType; + memAllocParams.descriptor = descriptor; + memAllocParams.limit = limit; + + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, memAllocParams.attr); + + memAllocParams.attr2 = + FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, memAllocParams.attr2); + + /* dmabuf import is currently only used for ISO memory. */ + if (!device->isoIOCoherencyModes.coherent) { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, + memAllocParams.attr); + } else { + memAllocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, + memAllocParams.attr); + } + + ret = nvRmApiAlloc(device->hRmClient, + device->hRmDevice, + hRmHandle, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + &memAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvKmsKapiLogDeviceDebug( + device, + "nvRmApiAlloc failed with error code 0x%08x", + ret); + nvKmsKapiFreeRmHandle(device, hRmHandle); + FreeMemory(device, memory); + return NULL; + } + + memory->hRmHandle = hRmHandle; + memory->size = limit + 1; + memory->surfaceParams.layout = NvKmsSurfaceMemoryLayoutPitch; + + return memory; +} + +static struct NvKmsKapiMemory* +GetSystemMemoryHandleFromSgt(struct NvKmsKapiDevice *device, + NvP64 sgt, + NvP64 gem, + NvU32 limit) +{ + NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS params = { + .sgt = sgt, + .gem = gem + }; + + return GetSystemMemoryHandleFromDmaBufSgtHelper( + device, NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR, ¶ms, limit); +} + +static struct NvKmsKapiMemory* +GetSystemMemoryHandleFromDmaBuf(struct NvKmsKapiDevice *device, + NvP64 dmaBuf, + NvU32 limit) +{ + return GetSystemMemoryHandleFromDmaBufSgtHelper( + device, NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR, dmaBuf, limit); +} + +static NvBool RmGc6BlockerRefCntAction(const struct NvKmsKapiDevice *device, + NvU32 action) +{ + NV_STATUS status; + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS params = { 0 }; + + nvAssert((action == NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC) || + (action == NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC)); + + params.action = action; + + status = nvRmApiControl(device->hRmClient, + device->hRmSubDevice, + NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT, + ¶ms, + sizeof(params)); + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to modify GC6 blocker refcount for 0x%x, status: 0x%x", + device->hRmSubDevice, status); + return NV_FALSE; + } + + return NV_TRUE; +} + +static NvBool RmGc6BlockerRefCntInc(const struct NvKmsKapiDevice *device) +{ + return RmGc6BlockerRefCntAction( + device, + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC); +} + +static NvBool RmGc6BlockerRefCntDec(const struct NvKmsKapiDevice *device) +{ + return RmGc6BlockerRefCntAction( + device, + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC); +} + +static NvBool GetMemoryPages +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, + NvU64 **pPages, + NvU32 *pNumPages +) +{ + NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS paramsGetNumPages = {}; + NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS paramsGetPages = {}; + NvU64 *pages; + NV_STATUS status; + + if (device == NULL || memory == NULL) { + return NV_FALSE; + } + + status = nvRmApiControl(device->hRmClient, + memory->hRmHandle, + NV003E_CTRL_CMD_GET_SURFACE_NUM_PHYS_PAGES, + ¶msGetNumPages, + sizeof(paramsGetNumPages)); + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug(device, + "Failed to get number of physical allocation pages for RM" + "memory object 0x%x", memory->hRmHandle); + return NV_FALSE; + } + + if (!paramsGetNumPages.numPages) { + return NV_FALSE; + } + + pages = nvKmsKapiCalloc(paramsGetNumPages.numPages, sizeof(pages)); + if (!pages) { + nvKmsKapiLogDeviceDebug(device, "Failed to allocate memory"); + return NV_FALSE; + } + + paramsGetPages.pPages = NV_PTR_TO_NvP64(pages); + paramsGetPages.numPages = paramsGetNumPages.numPages; + + status = nvRmApiControl(device->hRmClient, + memory->hRmHandle, + NV003E_CTRL_CMD_GET_SURFACE_PHYS_PAGES, + ¶msGetPages, + sizeof(paramsGetPages)); + if (status != NV_OK) { + nvKmsKapiFree(pages); + nvKmsKapiLogDeviceDebug(device, + "Failed to get physical allocation pages for RM" + "memory object 0x%x", memory->hRmHandle); + return NV_FALSE; + } + + nvAssert(paramsGetPages.numPages == paramsGetNumPages.numPages); + + *pPages = pages; + *pNumPages = paramsGetPages.numPages; + + return NV_TRUE; +} + +static void FreeMemoryPages +( + NvU64 *pPages +) +{ + nvKmsKapiFree(pPages); +} + +static NvBool MapMemory +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + void **ppLinearAddress +) +{ + NV_STATUS status; + NvU32 flags = 0; + + if (device == NULL || memory == NULL) { + return NV_FALSE; + } + + switch (type) { + case NVKMS_KAPI_MAPPING_TYPE_USER: + /* + * Usermode clients can't be trusted not to access mappings while + * the GPU is in GC6. + * + * TODO: Revoke/restore mappings rather than blocking GC6 + */ + if (!RmGc6BlockerRefCntInc(device)) { + return NV_FALSE; + } + flags |= DRF_DEF(OS33, _FLAGS, _MEM_SPACE, _USER); + break; + case NVKMS_KAPI_MAPPING_TYPE_KERNEL: + /* + * Kernel clients should ensure on their own that the GPU isn't in + * GC6 before making accesses to mapped vidmem surfaces. + */ + break; + } + + status = nvRmApiMapMemory( + device->hRmClient, + device->hRmSubDevice, + memory->hRmHandle, + 0, + memory->size, + ppLinearAddress, + flags); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to Map RM memory object 0x%x allocated for NVKMemory 0x%p", + memory->hRmHandle, memory); + if (type == NVKMS_KAPI_MAPPING_TYPE_USER) { + RmGc6BlockerRefCntDec(device); // XXX Can't handle failure. + } + return NV_FALSE; + } + + return NV_TRUE; +} + +static void UnmapMemory +( + const struct NvKmsKapiDevice *device, + const struct NvKmsKapiMemory *memory, NvKmsKapiMappingType type, + const void *pLinearAddress +) +{ + NV_STATUS status; + NvU32 flags = 0; + + if (device == NULL || memory == NULL) { + return; + } + + switch (type) { + case NVKMS_KAPI_MAPPING_TYPE_USER: + flags |= DRF_DEF(OS33, _FLAGS, _MEM_SPACE, _USER); + break; + case NVKMS_KAPI_MAPPING_TYPE_KERNEL: + break; + } + + status = + nvRmApiUnmapMemory(device->hRmClient, + device->hRmSubDevice, + memory->hRmHandle, + pLinearAddress, + flags); + + if (status != NV_OK) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to Ummap RM memory object 0x%x allocated for NVKMemory 0x%p", + memory->hRmHandle, memory); + } + + if (type == NVKMS_KAPI_MAPPING_TYPE_USER) { + RmGc6BlockerRefCntDec(device); // XXX Can't handle failure. + } +} + +static NvBool GetSurfaceParams( + struct NvKmsKapiCreateSurfaceParams *params, + NvU32 *pNumPlanes, + enum NvKmsSurfaceMemoryLayout *pLayout, + NvU32 *pLog2GobsPerBlockY, + NvU32 pitch[]) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(params->format); + enum NvKmsSurfaceMemoryLayout layout = NvKmsSurfaceMemoryLayoutPitch; + NvU32 log2GobsPerBlockY = 0; + NvU32 i; + + if (pFormatInfo->numPlanes == 0) + { + nvKmsKapiLogDebug("Unknown surface format"); + return NV_FALSE; + } + + for (i = 0; i < pFormatInfo->numPlanes; i++) { + struct NvKmsKapiMemory *memory = + params->planes[i].memory; + + if (memory == NULL) { + return FALSE; + } + + pitch[i] = params->planes[i].pitch; + + if (i == 0) { + if (params->explicit_layout) { + layout = params->layout; + } else { + layout = memory->surfaceParams.layout; + } + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + if (params->explicit_layout) { + log2GobsPerBlockY = params->log2GobsPerBlockY; + } else { + log2GobsPerBlockY = + memory->surfaceParams.blockLinear.log2GobsPerBlock.y; + } + break; + + case NvKmsSurfaceMemoryLayoutPitch: + log2GobsPerBlockY = 0; + break; + + default: + nvKmsKapiLogDebug("Invalid surface layout: %u", layout); + return NV_FALSE; + } + } else { + if (!params->explicit_layout) { + if (layout != memory->surfaceParams.layout) { + nvKmsKapiLogDebug("All planes are not of same layout"); + return FALSE; + } + + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear && + log2GobsPerBlockY != + memory->surfaceParams.blockLinear.log2GobsPerBlock.y) { + + nvKmsKapiLogDebug( + "All planes do not have the same blocklinear parameters"); + return FALSE; + } + } + } + + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + if (params->explicit_layout) { + if (pitch[i] & 63) { + nvKmsKapiLogDebug( + "Invalid block-linear pitch alignment: %u", pitch[i]); + return NV_FALSE; + } + + pitch[i] = pitch[i] >> 6; + } else { + /* + * The caller (nvidia-drm) is not blocklinear-aware, so the + * passed-in pitch cannot accurately reflect block information. + * Override the pitch with what was specified when the surface + * was imported. + */ + pitch[i] = memory->surfaceParams.blockLinear.pitchInBlocks; + } + } else { + pitch[i] = params->planes[i].pitch; + } + + } + + *pNumPlanes = pFormatInfo->numPlanes; + *pLayout = layout; + *pLog2GobsPerBlockY = log2GobsPerBlockY; + + return NV_TRUE; +} +static struct NvKmsKapiSurface* CreateSurface +( + struct NvKmsKapiDevice *device, + struct NvKmsKapiCreateSurfaceParams *params +) +{ + struct NvKmsRegisterSurfaceParams paramsReg = { }; + NvBool status; + + struct NvKmsKapiSurface *surface = NULL; + + enum NvKmsSurfaceMemoryLayout layout = NvKmsSurfaceMemoryLayoutPitch; + NvU32 log2GobsPerBlockY = 0; + NvU32 numPlanes = 0; + NvU32 pitch[NVKMS_MAX_PLANES_PER_SURFACE] = { 0 }; + NvU32 i; + + if (!GetSurfaceParams(params, + &numPlanes, + &layout, + &log2GobsPerBlockY, + pitch)) + { + goto failed; + } + + surface = nvKmsKapiCalloc(1, sizeof(*surface)); + + if (surface == NULL) { + nvKmsKapiLogDebug( + "Failed to allocate memory for NVKMS surface object on " + "NvKmsKapiDevice 0x%p", + device); + goto failed; + } + + if (device->hKmsDevice == 0x0) { + goto done; + } + + /* Create NVKMS surface */ + + paramsReg.request.deviceHandle = device->hKmsDevice; + + paramsReg.request.useFd = FALSE; + paramsReg.request.rmClient = device->hRmClient; + + paramsReg.request.widthInPixels = params->width; + paramsReg.request.heightInPixels = params->height; + + paramsReg.request.format = params->format; + + paramsReg.request.layout = layout; + paramsReg.request.log2GobsPerBlockY = log2GobsPerBlockY; + + for (i = 0; i < numPlanes; i++) { + struct NvKmsKapiMemory *memory = + params->planes[i].memory; + + paramsReg.request.planes[i].u.rmObject = memory->hRmHandle; + paramsReg.request.planes[i].rmObjectSizeInBytes = memory->size; + paramsReg.request.planes[i].offset = params->planes[i].offset; + paramsReg.request.planes[i].pitch = pitch[i]; + } + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_REGISTER_SURFACE, + ¶msReg, sizeof(paramsReg)); + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to register NVKMS surface of dimensions %ux%u " + "and %s format", + params->width, + params->height, + nvKmsSurfaceMemoryFormatToString(params->format)); + + goto failed; + } + + surface->hKmsHandle = paramsReg.reply.surfaceHandle; + +done: + return surface; + +failed: + nvKmsKapiFree(surface); + + return NULL; +} + +static void DestroySurface +( + struct NvKmsKapiDevice *device, struct NvKmsKapiSurface *surface +) +{ + struct NvKmsUnregisterSurfaceParams paramsUnreg = { }; + NvBool status; + + if (device->hKmsDevice == 0x0) { + goto done; + } + + paramsUnreg.request.deviceHandle = device->hKmsDevice; + paramsUnreg.request.surfaceHandle = surface->hKmsHandle; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_UNREGISTER_SURFACE, + ¶msUnreg, sizeof(paramsUnreg)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to unregister NVKMS surface registered for " + "NvKmsKapiSurface 0x%p", + surface); + } + +done: + nvKmsKapiFree(surface); +} + +/* + * Helper function to convert NvKmsMode to NvKmsKapiDisplayMode. + */ +static void NvKmsModeToKapi +( + const struct NvKmsMode *kmsMode, + struct NvKmsKapiDisplayMode *mode +) +{ + const NvModeTimings *timings = &kmsMode->timings; + + nvkms_memset(mode, 0, sizeof(*mode)); + + mode->timings.refreshRate = timings->RRx1k; + mode->timings.pixelClockHz = timings->pixelClockHz; + mode->timings.hVisible = timings->hVisible; + mode->timings.hSyncStart = timings->hSyncStart; + mode->timings.hSyncEnd = timings->hSyncEnd; + mode->timings.hTotal = timings->hTotal; + mode->timings.hSkew = timings->hSkew; + mode->timings.vVisible = timings->vVisible; + mode->timings.vSyncStart = timings->vSyncStart; + mode->timings.vSyncEnd = timings->vSyncEnd; + mode->timings.vTotal = timings->vTotal; + + mode->timings.flags.interlaced = timings->interlaced; + mode->timings.flags.doubleScan = timings->doubleScan; + mode->timings.flags.hSyncPos = timings->hSyncPos; + mode->timings.flags.hSyncNeg = timings->hSyncNeg; + mode->timings.flags.vSyncPos = timings->vSyncPos; + mode->timings.flags.vSyncNeg = timings->vSyncNeg; + + mode->timings.widthMM = timings->sizeMM.w; + mode->timings.heightMM = timings->sizeMM.h; + + ct_assert(sizeof(mode->name) == sizeof(kmsMode->name)); + + nvkms_memcpy(mode->name, kmsMode->name, sizeof(mode->name)); +} + +static void InitNvKmsModeValidationParams( + const struct NvKmsKapiDevice *device, + struct NvKmsModeValidationParams *params) +{ + /* + * Mode timings structures of KAPI clients may not have field like + * RRx1k, it does not guarantee that computed RRx1k value during + * conversion from - + * KAPI client's mode-timings structure + * -> NvKmsKapiDisplayMode -> NvModeTimings + * is same as what we get from edid, this may cause mode-set to fail. + * + * The RRx1k filed don't impact hardware modetiming values, therefore + * override RRx1k check. + * + * XXX NVKMS TODO: Bug 200156338 is filed to delete NvModeTimings::RRx1k + * if possible. + */ + params->overrides = NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK; +} + +static int GetDisplayMode +( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, NvU32 modeIndex, + struct NvKmsKapiDisplayMode *mode, NvBool *valid, + NvBool *preferredMode +) +{ + struct NvKmsValidateModeIndexParams paramsValidate = { }; + NvBool status; + + if (device == NULL) { + return -1; + } + + paramsValidate.request.deviceHandle = device->hKmsDevice; + paramsValidate.request.dispHandle = device->hKmsDisp; + + paramsValidate.request.dpyId = nvNvU32ToDpyId(display); + + InitNvKmsModeValidationParams(device, + ¶msValidate.request.modeValidation); + + paramsValidate.request.modeIndex = modeIndex; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_VALIDATE_MODE_INDEX, + ¶msValidate, sizeof(paramsValidate)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to get validated mode index 0x%x for NvKmsKapiDisplay 0x%08x", + modeIndex, display); + return -1; + } + + if (mode != NULL) { + NvKmsModeToKapi(¶msValidate.reply.mode, mode); + } + + + if (valid != NULL) { + *valid = paramsValidate.reply.valid; + } + + if (preferredMode != NULL) { + *preferredMode = paramsValidate.reply.preferredMode; + } + + return paramsValidate.reply.end ? 0 : 1; +} + +/* + * Helper function to convert NvKmsKapiDisplayMode to NvKmsMode. + */ +static void NvKmsKapiDisplayModeToKapi +( + const struct NvKmsKapiDisplayMode *mode, + struct NvKmsMode *kmsMode +) +{ + NvModeTimings *timings = &kmsMode->timings; + + nvkms_memset(kmsMode, 0, sizeof(*kmsMode)); + + nvkms_memcpy(kmsMode->name, mode->name, sizeof(mode->name)); + + timings->RRx1k = mode->timings.refreshRate; + timings->pixelClockHz = mode->timings.pixelClockHz; + timings->hVisible = mode->timings.hVisible; + timings->hSyncStart = mode->timings.hSyncStart; + timings->hSyncEnd = mode->timings.hSyncEnd; + timings->hTotal = mode->timings.hTotal; + timings->hSkew = mode->timings.hSkew; + timings->vVisible = mode->timings.vVisible; + timings->vSyncStart = mode->timings.vSyncStart; + timings->vSyncEnd = mode->timings.vSyncEnd; + timings->vTotal = mode->timings.vTotal; + + timings->interlaced = mode->timings.flags.interlaced; + timings->doubleScan = mode->timings.flags.doubleScan; + timings->hSyncPos = mode->timings.flags.hSyncPos; + timings->hSyncNeg = mode->timings.flags.hSyncNeg; + timings->vSyncPos = mode->timings.flags.vSyncPos; + timings->vSyncNeg = mode->timings.flags.vSyncNeg; + + timings->sizeMM.w = mode->timings.widthMM; + timings->sizeMM.h = mode->timings.heightMM; +} + +static NvBool ValidateDisplayMode +( + struct NvKmsKapiDevice *device, + NvKmsKapiDisplay display, const struct NvKmsKapiDisplayMode *mode +) +{ + struct NvKmsValidateModeParams paramsValidate; + NvBool status; + + if (device == NULL) { + return NV_FALSE; + } + + nvkms_memset(¶msValidate, 0, sizeof(paramsValidate)); + + paramsValidate.request.deviceHandle = device->hKmsDevice; + paramsValidate.request.dispHandle = device->hKmsDisp; + + paramsValidate.request.dpyId = nvNvU32ToDpyId(display); + + InitNvKmsModeValidationParams(device, + ¶msValidate.request.modeValidation); + + + NvKmsKapiDisplayModeToKapi(mode, ¶msValidate.request.mode); + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_VALIDATE_MODE, + ¶msValidate, sizeof(paramsValidate)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "Failed to get validated mode %ux%u@%uHz for NvKmsKapiDisplay 0x%08x of " + "NvKmsKapiDevice 0x%p", + mode->timings.hVisible, mode->timings.vVisible, + mode->timings.refreshRate/1000, display, + device); + return NV_FALSE; + } + + return paramsValidate.reply.valid; +} + +static NvBool AssignSyncObjectConfig( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerConfig *pLayerConfig, + struct NvKmsChannelSyncObjects *pSyncObject, + NvBool bFromKmsSetMode) +{ + if (!device->supportsSyncpts || bFromKmsSetMode) { + if (pLayerConfig->syncptParams.preSyncptSpecified || + pLayerConfig->syncptParams.postSyncptRequested) { + return NV_FALSE; + } + } + + pSyncObject->useSyncpt = FALSE; + + if (pLayerConfig->syncptParams.preSyncptSpecified) { + pSyncObject->useSyncpt = TRUE; + + pSyncObject->u.syncpts.pre.type = NVKMS_SYNCPT_TYPE_RAW; + pSyncObject->u.syncpts.pre.u.raw.id = pLayerConfig->syncptParams.preSyncptId; + pSyncObject->u.syncpts.pre.u.raw.value = pLayerConfig->syncptParams.preSyncptValue; + } + + if (pLayerConfig->syncptParams.postSyncptRequested) { + pSyncObject->useSyncpt = TRUE; + + pSyncObject->u.syncpts.requestedPostType = NVKMS_SYNCPT_TYPE_FD; + } + return NV_TRUE; +} + +static void NvKmsKapiCursorConfigToKms( + const struct NvKmsKapiCursorRequestedConfig *requestedConfig, + struct NvKmsFlipCommonParams *params, + NvBool bFromKmsSetMode) +{ + if (requestedConfig->flags.surfaceChanged || bFromKmsSetMode) { + params->cursor.imageSpecified = NV_TRUE; + + if (requestedConfig->surface != NULL) { + params->cursor.image.surfaceHandle[NVKMS_LEFT] = + requestedConfig->surface->hKmsHandle; + } + + params->cursor.image.cursorCompParams.colorKeySelect = + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE; + params->cursor.image.cursorCompParams.blendingMode[1] = + requestedConfig->compParams.compMode; + params->cursor.image.cursorCompParams.surfaceAlpha = + requestedConfig->compParams.surfaceAlpha; + } + + if (requestedConfig->flags.dstXYChanged || bFromKmsSetMode) { + params->cursor.position.x = requestedConfig->dstX; + params->cursor.position.y = requestedConfig->dstY; + + params->cursor.positionSpecified = NV_TRUE; + } +} + +static NvBool NvKmsKapiOverlayLayerConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 layer, + const NvU32 head, + struct NvKmsFlipCommonParams *params, + NvBool commit, + NvBool bFromKmsSetMode) +{ + NvBool ret = NV_FALSE; + const struct NvKmsKapiLayerConfig *layerConfig = + &layerRequestedConfig->config; + + if (layerRequestedConfig->flags.surfaceChanged || bFromKmsSetMode) { + params->layer[layer].syncObjects.specified = NV_TRUE; + params->layer[layer].completionNotifier.specified = NV_TRUE; + params->layer[layer].surface.specified = NV_TRUE; + + if (layerConfig->surface != NULL) { + params->layer[layer].surface.handle[NVKMS_LEFT] = + layerConfig->surface->hKmsHandle; + } + + params->layer[layer].surface.rrParams = + layerConfig->rrParams; + + params->layer[layer].compositionParams.val.colorKeySelect = + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE; + params->layer[layer].compositionParams.val.blendingMode[1] = + layerConfig->compParams.compMode; + params->layer[layer].compositionParams.val.surfaceAlpha = + layerConfig->compParams.surfaceAlpha; + params->layer[layer].compositionParams.specified = TRUE; + params->layer[layer].minPresentInterval = + layerConfig->minPresentInterval; + } + + params->layer[layer].sizeIn.val.width = layerConfig->srcWidth; + params->layer[layer].sizeIn.val.height = layerConfig->srcHeight; + params->layer[layer].sizeIn.specified = TRUE; + + params->layer[layer].sizeOut.val.width = layerConfig->dstWidth; + params->layer[layer].sizeOut.val.height = layerConfig->dstHeight; + params->layer[layer].sizeOut.specified = TRUE; + + if (layerRequestedConfig->flags.dstXYChanged || bFromKmsSetMode) { + params->layer[layer].outputPosition.val.x = layerConfig->dstX; + params->layer[layer].outputPosition.val.y = layerConfig->dstY; + + params->layer[layer].outputPosition.specified = NV_TRUE; + } + + if (commit) { + NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX( + device->layerState[head][layer]. + currFlipNotifierIndex); + + if (layerConfig->surface != NULL) { + NvU32 nextIndexOffsetInBytes = + NVKMS_KAPI_NOTIFIER_OFFSET(head, + layer, nextIndex); + + params->layer[layer].completionNotifier.val. + surface.surfaceHandle = device->notifier.hKmsHandle; + + params->layer[layer].completionNotifier.val. + surface.format = device->notifier.format; + + params->layer[layer].completionNotifier.val. + surface.offsetInWords = nextIndexOffsetInBytes >> 2; + + params->layer[layer].completionNotifier.val.awaken = NV_TRUE; + } + + ret = AssignSyncObjectConfig(device, + layerConfig, + ¶ms->layer[layer].syncObjects.val, + bFromKmsSetMode); + if (ret == NV_FALSE) { + return ret; + } + + /* + * XXX Should this be done after commit? + * What if commit fail? + * + * It is not expected to fail any commit in KAPI layer, + * only validated configuration is expected + * to commit. + */ + device->layerState[head][layer]. + currFlipNotifierIndex = nextIndex; + } + + return NV_TRUE; +} + +static NvBool NvKmsKapiPrimaryLayerConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 head, + struct NvKmsFlipCommonParams *params, + NvBool commit, + NvBool bFromKmsSetMode) +{ + NvBool ret = NV_FALSE; + const struct NvKmsKapiLayerConfig *layerConfig = + &layerRequestedConfig->config; + + NvBool changed = FALSE; + + if (layerRequestedConfig->flags.surfaceChanged || bFromKmsSetMode) { + params->layer[NVKMS_MAIN_LAYER].surface.specified = NV_TRUE; + params->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = NV_TRUE; + params->layer[NVKMS_MAIN_LAYER].syncObjects.specified = NV_TRUE; + + + params->layer[NVKMS_MAIN_LAYER].minPresentInterval = + layerConfig->minPresentInterval; + params->layer[NVKMS_MAIN_LAYER].tearing = layerConfig->tearing; + params->layer[NVKMS_MAIN_LAYER].surface.rrParams = layerConfig->rrParams; + + if (layerConfig->surface != NULL) { + params->layer[NVKMS_MAIN_LAYER].surface.handle[0] = + layerConfig->surface->hKmsHandle; + + if (params->layer[NVKMS_MAIN_LAYER].surface.handle[0] != 0) { + params->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = layerConfig->srcWidth; + params->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = layerConfig->srcHeight; + params->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE; + + params->layer[NVKMS_MAIN_LAYER].sizeOut.val.width = layerConfig->dstWidth; + params->layer[NVKMS_MAIN_LAYER].sizeOut.val.height = layerConfig->dstHeight; + params->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE; + } + } + + changed = TRUE; + } + + if (layerRequestedConfig->flags.srcXYChanged || bFromKmsSetMode) { + params->viewPortIn.point.x = layerConfig->srcX; + params->viewPortIn.point.y = layerConfig->srcY; + params->viewPortIn.specified = NV_TRUE; + + changed = TRUE; + } + + if (commit && changed) { + NvU32 nextIndex = NVKMS_KAPI_INC_NOTIFIER_INDEX( + device->layerState[head][NVKMS_MAIN_LAYER]. + currFlipNotifierIndex); + + if (layerConfig->surface != NULL) { + NvU32 nextIndexOffsetInBytes = + NVKMS_KAPI_NOTIFIER_OFFSET(head, + NVKMS_MAIN_LAYER, nextIndex); + + params->layer[NVKMS_MAIN_LAYER].completionNotifier. + val.surface.surfaceHandle = device->notifier.hKmsHandle; + + params->layer[NVKMS_MAIN_LAYER].completionNotifier. + val.surface.format = device->notifier.format; + + params->layer[NVKMS_MAIN_LAYER].completionNotifier. + val.surface.offsetInWords = nextIndexOffsetInBytes >> 2; + + params->layer[NVKMS_MAIN_LAYER].completionNotifier.val.awaken = NV_TRUE; + } + + ret = AssignSyncObjectConfig(device, + layerConfig, + ¶ms->layer[NVKMS_MAIN_LAYER].syncObjects.val, + bFromKmsSetMode); + if (ret == NV_FALSE) { + return ret; + } + + /* + * XXX Should this be done after commit? + * What if commit fail? + * + * It is not expected to fail any commit in KAPI layer, + * only validated configuration is expected + * to commit. + */ + device->layerState[head][NVKMS_MAIN_LAYER]. + currFlipNotifierIndex = nextIndex; + } + + return NV_TRUE; +} + +static NvBool NvKmsKapiLayerConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig, + const NvU32 layer, + const NvU32 head, + struct NvKmsFlipCommonParams *params, + NvBool commit, + NvBool bFromKmsSetMode) +{ + if (layer == NVKMS_KAPI_LAYER_PRIMARY_IDX) { + return NvKmsKapiPrimaryLayerConfigToKms(device, + layerRequestedConfig, + head, + params, + commit, + bFromKmsSetMode); + + } + + return NvKmsKapiOverlayLayerConfigToKms(device, + layerRequestedConfig, + layer, + head, + params, + commit, + bFromKmsSetMode); +} + +/* + * Helper function to convert NvKmsKapiRequestedModeSetConfig + * to NvKmsSetModeParams. + */ +static NvBool NvKmsKapiRequestedModeSetConfigToKms( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsSetModeParams *params, + NvBool commit) +{ + NvU32 dispIdx = device->dispIdx; + NvU32 head; + + nvkms_memset(params, 0, sizeof(*params)); + + params->request.commit = commit; + params->request.deviceHandle = device->hKmsDevice; + params->request.requestedDispsBitMask = 1 << dispIdx; + + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + struct NvKmsSetModeOneHeadRequest *paramsHead; + NvU32 layer; + NvU32 i; + + if ((requestedConfig->headsMask & (1 << head)) == 0x0) { + continue; + } + + params->request.disp[dispIdx].requestedHeadsBitMask |= 1 << head; + + if (headModeSetConfig->numDisplays == 0) { + continue; + } + + if (params->request.commit && !headModeSetConfig->bActive) { + continue; + } + + paramsHead = ¶ms->request.disp[dispIdx].head[head]; + + InitNvKmsModeValidationParams(device, + ¶msHead->modeValidationParams); + + for (i = 0; i < headModeSetConfig->numDisplays; i++) { + paramsHead->dpyIdList = nvAddDpyIdToDpyIdList( + nvNvU32ToDpyId(headModeSetConfig->displays[i]), + paramsHead->dpyIdList); + } + + NvKmsKapiDisplayModeToKapi(&headModeSetConfig->mode, ¶msHead->mode); + + NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig, + ¶msHead->flip, + NV_TRUE /* bFromKmsSetMode */); + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + + const struct NvKmsKapiLayerRequestedConfig *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer]; + + if (!NvKmsKapiLayerConfigToKms(device, + layerRequestedConfig, + layer, + head, + ¶msHead->flip, + commit, + NV_TRUE /* bFromKmsSetMode */)) { + return NV_FALSE; + } + } + + paramsHead->viewPortSizeIn.width = + headModeSetConfig->mode.timings.hVisible; + paramsHead->viewPortSizeIn.height = + headModeSetConfig->mode.timings.vVisible; + } + + return NV_TRUE; +} + + +static NvBool KmsSetMode( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + const NvBool commit) +{ + struct NvKmsSetModeParams *params = NULL; + NvBool status = NV_FALSE; + + params = nvKmsKapiCalloc(1, sizeof(*params)); + + if (params == NULL) { + goto done; + } + + if (!NvKmsKapiRequestedModeSetConfigToKms(device, + requestedConfig, + params, + commit)) { + goto done; + } + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_SET_MODE, + params, sizeof(*params)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_SET_MODE ioctl failed"); + goto done; + } + + if (params->reply.status != NVKMS_SET_MODE_STATUS_SUCCESS) + { + int i; + + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_SET_MODE failed! Status:\n"); + + nvKmsKapiLogDeviceDebug( + device, + " top-level status: %d\n", params->reply.status); + + nvKmsKapiLogDeviceDebug( + device, + " disp0 status: %d\n", params->reply.disp[0].status); + + for (i = 0; i < ARRAY_LEN(params->reply.disp[0].head); i++) + { + nvKmsKapiLogDeviceDebug( + device, + " head%d status: %d\n", + i, params->reply.disp[0].head[i].status); + } + + status = NV_FALSE; + } + +done: + + if (params != NULL) { + nvKmsKapiFree(params); + } + + return status; +} + +static NvBool IsHeadConfigValid( + const struct NvKmsFlipParams *params, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig, + NvU32 head) +{ + if ((requestedConfig->headsMask & (1 << head)) == 0x0) { + return NV_FALSE; + } + + if (headModeSetConfig->numDisplays == 0) { + return NV_FALSE; + } + + if (params->request.commit && !headModeSetConfig->bActive) { + return NV_FALSE; + } + return NV_TRUE; +} + +static NvBool KmsFlip( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit) +{ + NvBool bChanged = NV_FALSE; + struct NvKmsFlipParams *params = NULL; + NvBool status = NV_TRUE; + NvU32 i; + + params = nvKmsKapiCalloc(1, sizeof(*params)); + + if (params == NULL) { + return NV_FALSE; + } + + params->request.deviceHandle = device->hKmsDevice; + params->request.commit = commit; + + for (i = 0; i < ARRAY_LEN(params->request.sd); i++) { + struct NvKmsFlipRequestOneSubDevice *sdParams = ¶ms->request.sd[i]; + NvU32 head; + + if ((device->subDeviceMask & (1 << i)) == 0x0) { + continue; + } + + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + + struct NvKmsFlipCommonParams *flipParams = &sdParams->head[head]; + + NvU32 layer; + + if (!IsHeadConfigValid(params, requestedConfig, headModeSetConfig, head)) { + continue; + } + + sdParams->requestedHeadsBitMask |= 1 << head; + + NvKmsKapiCursorConfigToKms(&headRequestedConfig->cursorRequestedConfig, + flipParams, + NV_FALSE /* bFromKmsSetMode */); + + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + + const struct NvKmsKapiLayerRequestedConfig + *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer]; + + status = NvKmsKapiLayerConfigToKms(device, + layerRequestedConfig, + layer, + head, + flipParams, + commit, + NV_FALSE /* bFromKmsSetMode */); + + if (status != NV_TRUE) { + goto done; + } + + bChanged = NV_TRUE; + } + } + } + + if (!bChanged) { + goto done; + } + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_FLIP, + params, sizeof(*params)); + + if (!status) { + nvKmsKapiLogDeviceDebug( + device, + "NVKMS_IOCTL_FLIP ioctl failed"); + goto done; + } + + if (!bChanged || !commit) { + goto done; + } + + /*! fill back flip reply */ + for (i = 0; i < ARRAY_LEN(params->request.sd); i++) { + + struct NvKmsFlipReplyOneSubDevice *sdReplyParams = ¶ms->reply.sd[i]; + + NvU32 head; + + if ((device->subDeviceMask & (1 << i)) == 0x0) { + continue; + } + + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); + head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + + struct NvKmsKapiHeadReplyConfig *headReplyConfig = + &replyConfig->headReplyConfig[head]; + + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + + struct NvKmsFlipCommonReplyOneHead *flipParams = &sdReplyParams->head[head]; + + NvU32 layer; + + if (!IsHeadConfigValid(params, requestedConfig, headModeSetConfig, head)) { + continue; + } + + for (layer = 0; + layer < ARRAY_LEN(headRequestedConfig->layerRequestedConfig); + layer++) { + + const struct NvKmsKapiLayerConfig *layerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[layer].config; + + struct NvKmsKapiLayerReplyConfig *layerReplyConfig = + &headReplyConfig->layerReplyConfig[layer]; + + /*! initialize explicitly to -1 as 0 is valid file descriptor */ + layerReplyConfig->postSyncptFd = -1; + if (layerRequestedConfig->syncptParams.postSyncptRequested) { + layerReplyConfig->postSyncptFd = + flipParams->layer[layer].postSyncpt.u.fd; + } + } + } + } + +done: + + nvKmsKapiFree(params); + + return status; +} + +static NvBool ApplyModeSetConfig( + struct NvKmsKapiDevice *device, + const struct NvKmsKapiRequestedModeSetConfig *requestedConfig, + struct NvKmsKapiModeSetReplyConfig *replyConfig, + const NvBool commit) +{ + NvBool bRequiredModeset = NV_FALSE; + NvU32 head; + + if (device == NULL || requestedConfig == NULL) { + return NV_FALSE; + } + + for (head = 0; + head < ARRAY_LEN(requestedConfig->headRequestedConfig); head++) { + + const struct NvKmsKapiHeadRequestedConfig *headRequestedConfig = + &requestedConfig->headRequestedConfig[head]; + const struct NvKmsKapiHeadModeSetConfig *headModeSetConfig = + &headRequestedConfig->modeSetConfig; + + const struct NvKmsKapiLayerRequestedConfig *primaryLayerRequestedConfig = + &headRequestedConfig->layerRequestedConfig[NVKMS_KAPI_LAYER_PRIMARY_IDX]; + + if ((requestedConfig->headsMask & (1 << head)) == 0x0) { + continue; + } + + /* + * Source width/height of primary layer represents width/height of + * ViewPortIn. Destination X, Y, width and height of primary layer + * represents dimensions of ViewPortOut. To apply changes in + * width/height of ViewPortIn and/or changes dimensions of + * ViewPortOut requires full modeset. + */ + + bRequiredModeset = + primaryLayerRequestedConfig->flags.srcWHChanged || + primaryLayerRequestedConfig->flags.dstXYChanged || + primaryLayerRequestedConfig->flags.dstWHChanged || + headRequestedConfig->flags.activeChanged || + headRequestedConfig->flags.displaysChanged || + headRequestedConfig->flags.modeChanged; + + /* + * NVKMS flip ioctl could not validate flip configuration for an + * inactive head, therefore use modeset ioctl if configuration contain + * any such head. + */ + if (!commit && + headModeSetConfig->numDisplays != 0 && !headModeSetConfig->bActive) { + bRequiredModeset = TRUE; + } + + if (bRequiredModeset) { + break; + } + } + + if (bRequiredModeset) { + return KmsSetMode(device, requestedConfig, commit); + } + + return KmsFlip(device, requestedConfig, replyConfig, commit); +} + +void nvKmsKapiHandleEventQueueChange +( + struct NvKmsKapiDevice *device +) +{ + if (device == NULL) { + return; + } + + /* + * If the callback is NULL, event interest declaration should be + * rejected, and no events would be reported. + */ + nvAssert(device->eventCallback != NULL); + + do + { + struct NvKmsGetNextEventParams kmsEventParams = { }; + struct NvKmsKapiEvent kapiEvent = { }; + NvBool err = NV_FALSE; + + if (!nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_GET_NEXT_EVENT, + &kmsEventParams, sizeof(kmsEventParams))) { + break; + } + + if (!kmsEventParams.reply.valid) { + break; + } + + kapiEvent.type = kmsEventParams.reply.event.eventType; + + kapiEvent.device = device; + kapiEvent.privateData = device->privateData; + + switch (kmsEventParams.reply.event.eventType) { + case NVKMS_EVENT_TYPE_DPY_CHANGED: + kapiEvent.u.displayChanged.display = + nvDpyIdToNvU32(kmsEventParams. + reply.event.u.dpyChanged.dpyId); + break; + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: + kapiEvent.u.dynamicDisplayConnected.display = + nvDpyIdToNvU32(kmsEventParams. + reply.event.u.dynamicDpyConnected.dpyId); + break; + case NVKMS_EVENT_TYPE_FLIP_OCCURRED: + kapiEvent.u.flipOccurred.head = + kmsEventParams.reply.event.u.flipOccurred.head; + kapiEvent.u.flipOccurred.layer = + kmsEventParams.reply.event.u.flipOccurred.layer; + break; + default: + continue; + } + + if (err) { + nvKmsKapiLogDeviceDebug( + device, + "Error in conversion from " + "NvKmsGetNextEventParams to NvKmsKapiEvent"); + continue; + } + + device->eventCallback(&kapiEvent); + + } while(1); +} + +/* + * Helper function to convert NvKmsQueryDpyCRC32Reply to NvKmsKapiDpyCRC32. + */ +static void NvKmsCrcsToKapi +( + const struct NvKmsQueryDpyCRC32Reply *crcs, + struct NvKmsKapiCrcs *kmsCrcs +) +{ + kmsCrcs->outputCrc32.value = crcs->outputCrc32.value; + kmsCrcs->outputCrc32.supported = crcs->outputCrc32.supported; + kmsCrcs->rasterGeneratorCrc32.value = crcs->rasterGeneratorCrc32.value; + kmsCrcs->rasterGeneratorCrc32.supported = crcs->rasterGeneratorCrc32.supported; + kmsCrcs->compositorCrc32.value = crcs->compositorCrc32.value; + kmsCrcs->compositorCrc32.supported = crcs->compositorCrc32.supported; +} + +static NvBool GetCRC32 +( + struct NvKmsKapiDevice *device, + NvU32 head, + struct NvKmsKapiCrcs *crc32 +) +{ + struct NvKmsQueryDpyCRC32Params params = { }; + NvBool status; + + if (device->hKmsDevice == 0x0) { + return NV_TRUE; + } + + params.request.deviceHandle = device->hKmsDevice; + params.request.dispHandle = device->hKmsDisp; + params.request.head = head; + + status = nvkms_ioctl_from_kapi(device->pKmsOpen, + NVKMS_IOCTL_QUERY_DPY_CRC32, + ¶ms, sizeof(params)); + + if (!status) { + nvKmsKapiLogDeviceDebug(device, "NVKMS QueryDpyCRC32Data failed."); + return NV_FALSE; + } + NvKmsCrcsToKapi(¶ms.reply, crc32); + return NV_TRUE; +} + +NvBool nvKmsKapiGetFunctionsTableInternal +( + struct NvKmsKapiFunctionsTable *funcsTable +) +{ + if (funcsTable == NULL) { + return NV_FALSE; + } + + if (nvkms_strcmp(funcsTable->versionString, NV_VERSION_STRING) != 0) { + funcsTable->versionString = NV_VERSION_STRING; + return NV_FALSE; + } + + funcsTable->systemInfo.bAllowWriteCombining = + nvkms_allow_write_combining(); + + funcsTable->enumerateGpus = EnumerateGpus; + + funcsTable->allocateDevice = AllocateDevice; + funcsTable->freeDevice = FreeDevice; + + funcsTable->grabOwnership = GrabOwnership; + funcsTable->releaseOwnership = ReleaseOwnership; + + funcsTable->declareEventInterest = DeclareEventInterest; + + funcsTable->getDeviceResourcesInfo = GetDeviceResourcesInfo; + funcsTable->getDisplays = GetDisplays; + funcsTable->getConnectorInfo = GetConnectorInfo; + + funcsTable->getStaticDisplayInfo = GetStaticDisplayInfo; + funcsTable->getDynamicDisplayInfo = GetDynamicDisplayInfo; + + funcsTable->allocateVideoMemory = AllocateVideoMemory; + funcsTable->allocateSystemMemory = AllocateSystemMemory; + funcsTable->importMemory = ImportMemory; + funcsTable->dupMemory = DupMemory; + funcsTable->exportMemory = ExportMemory; + funcsTable->freeMemory = FreeMemory; + funcsTable->getSystemMemoryHandleFromSgt = GetSystemMemoryHandleFromSgt; + funcsTable->getSystemMemoryHandleFromDmaBuf = + GetSystemMemoryHandleFromDmaBuf; + + funcsTable->mapMemory = MapMemory; + funcsTable->unmapMemory = UnmapMemory; + + funcsTable->createSurface = CreateSurface; + funcsTable->destroySurface = DestroySurface; + + funcsTable->getDisplayMode = GetDisplayMode; + funcsTable->validateDisplayMode = ValidateDisplayMode; + + funcsTable->applyModeSetConfig = ApplyModeSetConfig; + + funcsTable->allocateChannelEvent = nvKmsKapiAllocateChannelEvent; + funcsTable->freeChannelEvent = nvKmsKapiFreeChannelEvent; + + funcsTable->getCRC32 = GetCRC32; + + funcsTable->getMemoryPages = GetMemoryPages; + funcsTable->freeMemoryPages = FreeMemoryPages; + + return NV_TRUE; +} diff --git a/src/nvidia-modeset/lib/nvkms-format.c b/src/nvidia-modeset/lib/nvkms-format.c new file mode 100644 index 000000000..55901cf82 --- /dev/null +++ b/src/nvidia-modeset/lib/nvkms-format.c @@ -0,0 +1,132 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-format.h" +#include "nv_common_utils.h" +#include "nvctassert.h" + +#include + +#define RGB_ENTRY(_format, _depth, _bytesPerPixel) \ + [NvKmsSurfaceMemoryFormat##_format] = { \ + .format = NvKmsSurfaceMemoryFormat##_format, \ + .name = #_format, \ + .depth = _depth, \ + .isYUV = NV_FALSE, \ + .numPlanes = 1, \ + { \ + .rgb = { \ + .bytesPerPixel = _bytesPerPixel, \ + .bitsPerPixel = _bytesPerPixel * 8, \ + }, \ + }, \ + } + +#define YUV_ENTRY(_format, \ + _depth, \ + _numPlanes, \ + _depthPerComponent, \ + _storageBitsPerComponent, \ + _horizChromaDecimationFactor, \ + _vertChromaDecimationFactor) \ + [NvKmsSurfaceMemoryFormat##_format] = { \ + .format = NvKmsSurfaceMemoryFormat##_format, \ + .name = #_format, \ + .depth = _depth, \ + .isYUV = NV_TRUE, \ + .numPlanes = _numPlanes, \ + { \ + .yuv = { \ + .depthPerComponent = _depthPerComponent, \ + .storageBitsPerComponent = _storageBitsPerComponent, \ + .horizChromaDecimationFactor = _horizChromaDecimationFactor, \ + .vertChromaDecimationFactor = _vertChromaDecimationFactor, \ + }, \ + }, \ + } + +static const NvKmsSurfaceMemoryFormatInfo nvKmsEmptyFormatInfo; + +/* + * For 10/12-bit YUV formats, each component is packed in a 16-bit container in + * memory, and fetched by display HW as such. + */ +static const NvKmsSurfaceMemoryFormatInfo nvKmsSurfaceMemoryFormatInfo[] = { + RGB_ENTRY(I8, 8, 1), + RGB_ENTRY(A1R5G5B5, 16, 2), + RGB_ENTRY(X1R5G5B5, 15, 2), + RGB_ENTRY(R5G6B5, 16, 2), + RGB_ENTRY(A8R8G8B8, 32, 4), + RGB_ENTRY(X8R8G8B8, 24, 4), + RGB_ENTRY(A2B10G10R10, 32, 4), + RGB_ENTRY(X2B10G10R10, 30, 4), + RGB_ENTRY(A8B8G8R8, 32, 4), + RGB_ENTRY(X8B8G8R8, 24, 4), + RGB_ENTRY(RF16GF16BF16AF16, 64, 8), + RGB_ENTRY(R16G16B16A16, 64, 8), + RGB_ENTRY(RF32GF32BF32AF32, 128, 16), + YUV_ENTRY(Y8_U8__Y8_V8_N422, 16, 1, 8, 8, 2, 1), + YUV_ENTRY(U8_Y8__V8_Y8_N422, 16, 1, 8, 8, 2, 1), + YUV_ENTRY(Y8___U8V8_N444, 24, 2, 8, 8, 1, 1), + YUV_ENTRY(Y8___V8U8_N444, 24, 2, 8, 8, 1, 1), + YUV_ENTRY(Y8___U8V8_N422, 16, 2, 8, 8, 2, 1), + YUV_ENTRY(Y8___V8U8_N422, 16, 2, 8, 8, 2, 1), + YUV_ENTRY(Y8___U8V8_N420, 12, 2, 8, 8, 2, 2), + YUV_ENTRY(Y8___V8U8_N420, 12, 2, 8, 8, 2, 2), + YUV_ENTRY(Y10___U10V10_N444, 30, 2, 10, 16, 1, 1), + YUV_ENTRY(Y10___V10U10_N444, 30, 2, 10, 16, 1, 1), + YUV_ENTRY(Y10___U10V10_N422, 20, 2, 10, 16, 2, 1), + YUV_ENTRY(Y10___V10U10_N422, 20, 2, 10, 16, 2, 1), + YUV_ENTRY(Y10___U10V10_N420, 15, 2, 10, 16, 2, 2), + YUV_ENTRY(Y10___V10U10_N420, 15, 2, 10, 16, 2, 2), + YUV_ENTRY(Y12___U12V12_N444, 36, 2, 12, 16, 1, 1), + YUV_ENTRY(Y12___V12U12_N444, 36, 2, 12, 16, 1, 1), + YUV_ENTRY(Y12___U12V12_N422, 24, 2, 12, 16, 2, 1), + YUV_ENTRY(Y12___V12U12_N422, 24, 2, 12, 16, 2, 1), + YUV_ENTRY(Y12___U12V12_N420, 18, 2, 12, 16, 2, 2), + YUV_ENTRY(Y12___V12U12_N420, 18, 2, 12, 16, 2, 2), + YUV_ENTRY(Y8___U8___V8_N444, 24, 3, 8, 8, 1, 1), + YUV_ENTRY(Y8___U8___V8_N420, 12, 3, 8, 8, 2, 2), +}; + +ct_assert(ARRAY_LEN(nvKmsSurfaceMemoryFormatInfo) == + (NvKmsSurfaceMemoryFormatMax + 1)); + +const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo( + const enum NvKmsSurfaceMemoryFormat format) +{ + if (format >= ARRAY_LEN(nvKmsSurfaceMemoryFormatInfo)) { + return &nvKmsEmptyFormatInfo; + } + + return &nvKmsSurfaceMemoryFormatInfo[format]; +} + +const char *nvKmsSurfaceMemoryFormatToString( + const enum NvKmsSurfaceMemoryFormat format) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + + return (pFormatInfo != NULL) ? pFormatInfo->name : NULL; +} diff --git a/src/nvidia-modeset/lib/nvkms-sync.c b/src/nvidia-modeset/lib/nvkms-sync.c new file mode 100644 index 000000000..928fde875 --- /dev/null +++ b/src/nvidia-modeset/lib/nvkms-sync.c @@ -0,0 +1,377 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include /* NV_DISP_BASE_NOTIFIER_1, NV_DISP_NOTIFICATION_2 */ +#include /* NV_DISP_NOTIFIER */ + +/* + * HW will never write 1 to lower 32bits of timestamp + */ +#define NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID 1 + +/* + * Higher 32bits of timestamp will be 0 only during first ~4sec of + * boot. So for practical purposes, we can consider 0 as invalid. + */ +#define NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID 0 + +static void GetNotifierTimeStamp(volatile const NvU32 *notif, + NvU32 timeStampLoIdx, + NvU32 timeStampHiIdx, + struct nvKmsParsedNotifier *out) +{ + NvU32 lo, hi; + NvU32 pollCount = 0; + + /* + * Caller of ParseNotifier() is expected to poll for notifier + * status to become BEGUN/FINISHED for valid timestamp. + */ + if (out->status == NVKMS_NOTIFIER_STATUS_NOT_BEGUN) { + return; + } + + /* + * HW does 4B writes to notifier, so poll till both timestampLo + * and timestampHi bytes become valid. + */ + do { + lo = notif[timeStampLoIdx]; + hi = notif[timeStampHiIdx]; + + if ((lo != NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID) && + (hi != NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_HI_INVALID)) { + out->timeStamp = (NvU64)lo | ((NvU64)hi << 32); + out->timeStampValid = NV_TRUE; + break; + } + + if (++pollCount >= 100) { + break; + } + } while (1); +} + +static void ResetNotifierLegacy(NvBool overlay, volatile void *in) +{ + volatile NvU32 *notif = in; + + if (overlay) { + notif[NV_DISP_NOTIFICATION_2_INFO16_3] = + DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _NOT_BEGUN); + + notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] = + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID; + } else { + notif[NV_DISP_BASE_NOTIFIER_1__0] = + DRF_DEF(_DISP, _BASE_NOTIFIER_1__0, _STATUS, _NOT_BEGUN); + } +} + +static void ResetNotifierFourWord(volatile void *in) +{ + volatile NvU32 *notif = in; + + notif[NV_DISP_NOTIFICATION_2_INFO16_3] = + DRF_DEF(_DISP, _NOTIFICATION_2__3, _STATUS, _NOT_BEGUN); + + notif[NV_DISP_NOTIFICATION_2_TIME_STAMP_0] = + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID; +} + +static void ResetNotifierFourWordNVDisplay(volatile void *in) +{ + volatile NvU32 *notif = in; + + notif[NV_DISP_NOTIFIER__0] = + DRF_DEF(_DISP, _NOTIFIER__0, _STATUS, _NOT_BEGUN); + + notif[NV_DISP_NOTIFIER__2] = + NVKMS_LIB_SYNC_NOTIFIER_TIMESTAMP_LO_INVALID; +} + +void nvKmsResetNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, void *base) +{ + const NvU32 sizeInBytes = nvKmsSizeOfNotifier(format, overlay); + void *notif = + (void *)((char *)base + (sizeInBytes * index)); + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + ResetNotifierLegacy(overlay, notif); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + ResetNotifierFourWord(notif); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + ResetNotifierFourWordNVDisplay(notif); + break; + } +} + +static void ParseNotifierLegacy(NvBool overlay, volatile const void *in, + struct nvKmsParsedNotifier *out) +{ + volatile const NvU32 *notif = in; + + if (overlay) { + NvU32 notif3; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif3 = notif[NV_DISP_NOTIFICATION_2_INFO16_3]; + + switch(DRF_VAL(_DISP, _NOTIFICATION_2__3, _STATUS, notif3)) { + case NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _NOTIFICATION_2_INFO16_3, _PRESENT_COUNT, notif3); + + GetNotifierTimeStamp(notif, + NV_DISP_NOTIFICATION_2_TIME_STAMP_0, + NV_DISP_NOTIFICATION_2_TIME_STAMP_1, + out); + } else { + NvU32 notif0; + + /* There's a timestamp available in this notifier, but it's a weird + * 14-bit "audit timestamp" that's not useful for us. */ + out->timeStampValid = NV_FALSE; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif0 = notif[NV_DISP_BASE_NOTIFIER_1__0]; + + switch(DRF_VAL(_DISP, _BASE_NOTIFIER_1__0, _STATUS, notif0)) { + case NV_DISP_BASE_NOTIFIER_1__0_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_BASE_NOTIFIER_1__0_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_BASE_NOTIFIER_1__0_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _BASE_NOTIFIER_1__0, _PRESENTATION_COUNT, notif0); + } +} + +static void ParseNotifierFourWord(const void *in, + struct nvKmsParsedNotifier *out) +{ + volatile const NvU32 *notif = in; + NvU32 notif3; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif3 = notif[NV_DISP_NOTIFICATION_2_INFO16_3]; + + switch(DRF_VAL(_DISP, _NOTIFICATION_2__3, _STATUS, notif3)) { + case NV_DISP_NOTIFICATION_2__3_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_NOTIFICATION_2__3_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _NOTIFICATION_2_INFO16_3, _PRESENT_COUNT, notif3); + + GetNotifierTimeStamp(notif, + NV_DISP_NOTIFICATION_2_TIME_STAMP_0, + NV_DISP_NOTIFICATION_2_TIME_STAMP_1, + out); +} + +static void ParseNotifierFourWordNVDisplay(const void *in, + struct nvKmsParsedNotifier *out) +{ + volatile const NvU32 *notif = in; + NvU32 notif0; + + /* Read this once since it may be in video memory and we need multiple + * fields */ + notif0 = notif[NV_DISP_NOTIFIER__0]; + + switch(DRF_VAL(_DISP, _NOTIFIER__0, _STATUS, notif0)) { + case NV_DISP_NOTIFIER__0_STATUS_NOT_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_NOT_BEGUN; + break; + case NV_DISP_NOTIFIER__0_STATUS_BEGUN: + out->status = NVKMS_NOTIFIER_STATUS_BEGUN; + break; + case NV_DISP_NOTIFIER__0_STATUS_FINISHED: + out->status = NVKMS_NOTIFIER_STATUS_FINISHED; + break; + } + + out->presentCount = + DRF_VAL(_DISP, _NOTIFIER__0, _PRESENT_COUNT, notif0); + + GetNotifierTimeStamp(notif, + NV_DISP_NOTIFIER__2, + NV_DISP_NOTIFIER__3, + out); +} + +void nvKmsParseNotifier(enum NvKmsNIsoFormat format, NvBool overlay, + NvU32 index, const void *base, + struct nvKmsParsedNotifier *out) +{ + const NvU32 sizeInBytes = nvKmsSizeOfNotifier(format, overlay); + const void *notif = + (const void *)((const char *)base + (sizeInBytes * index)); + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + ParseNotifierLegacy(overlay, notif, out); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + ParseNotifierFourWord(notif, out); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + ParseNotifierFourWordNVDisplay(notif, out); + break; + } +} + +NvU32 nvKmsSemaphorePayloadOffset(enum NvKmsNIsoFormat format) +{ + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + return 0; + case NVKMS_NISO_FORMAT_FOUR_WORD: + return NV_DISP_NOTIFICATION_2_INFO32_2; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + return NV_DISP_NOTIFIER__0; + } + + return 0; +} + +static void ResetSemaphoreLegacy(volatile void *in, NvU32 payload) +{ + volatile NvU32 *sema = in; + + *sema = payload; +} + +static void ResetSemaphoreFourWord(volatile void *in, NvU32 payload) +{ + volatile NvU32 *sema = in; + + sema[NV_DISP_NOTIFICATION_2_INFO32_2] = payload; +} + +static void ResetSemaphoreFourWordNVDisplay(volatile void *in, NvU32 payload) +{ + volatile NvU32 *sema = in; + + sema[NV_DISP_NOTIFIER__0] = payload; +} + +void nvKmsResetSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, void *base, + NvU32 payload) +{ + const NvU32 sizeInBytes = nvKmsSizeOfSemaphore(format); + void *sema = + (void *)((char *)base + (sizeInBytes * index)); + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + ResetSemaphoreLegacy(sema, payload); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + ResetSemaphoreFourWord(sema, payload); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + ResetSemaphoreFourWordNVDisplay(sema, payload); + break; + } +} + +static NvU32 ParseSemaphoreLegacy(const volatile void *in) +{ + const volatile NvU32 *sema = in; + + return *sema; +} + +static NvU32 ParseSemaphoreFourWord(const volatile void *in) +{ + const volatile NvU32 *sema = in; + + return sema[NV_DISP_NOTIFICATION_2_INFO32_2]; +} + +static NvU32 ParseSemaphoreFourWordNVDisplay(const volatile void *in) +{ + const volatile NvU32 *sema = in; + + return sema[NV_DISP_NOTIFIER__0]; +} + +void nvKmsParseSemaphore(enum NvKmsNIsoFormat format, + NvU32 index, const void *base, + struct nvKmsParsedSemaphore *out) +{ + const NvU32 sizeInBytes = nvKmsSizeOfSemaphore(format); + const void *sema = + (const void *)((const char *)base + (sizeInBytes * index)); + NvU32 payload = 0; + + switch (format) { + case NVKMS_NISO_FORMAT_LEGACY: + payload = ParseSemaphoreLegacy(sema); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD: + payload = ParseSemaphoreFourWord(sema); + break; + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + payload = ParseSemaphoreFourWordNVDisplay(sema); + break; + } + + out->payload = payload; +} diff --git a/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h b/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h new file mode 100644 index 000000000..36685a026 --- /dev/null +++ b/src/nvidia-modeset/os-interface/include/nvidia-modeset-os-interface.h @@ -0,0 +1,363 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Define the entry points which the NVKMS kernel interface layer + * provides to core NVKMS. + */ + +#if !defined(_NVIDIA_MODESET_OS_INTERFACE_H_) +#define _NVIDIA_MODESET_OS_INTERFACE_H_ + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif +#include "nvtypes.h" /* NvU8 */ + +#include "nvkms.h" +#include "nv_stdarg.h" + +enum NvKmsSyncPtOp { + NVKMS_SYNCPT_OP_ALLOC, + NVKMS_SYNCPT_OP_GET, + NVKMS_SYNCPT_OP_PUT, + NVKMS_SYNCPT_OP_INCR_MAX, + NVKMS_SYNCPT_OP_CPU_INCR, + NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH, + NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD, + NVKMS_SYNCPT_OP_READ_MINVAL, + NVKMS_SYNCPT_OP_READ_MAXVAL, + NVKMS_SYNCPT_OP_SET_MIN_EQ_MAX, + NVKMS_SYNCPT_OP_SET_MAXVAL, +}; + +typedef struct { + + struct { + const char *syncpt_name; /* in */ + NvU32 id; /* out */ + } alloc; + + struct { + NvU32 id; /* in */ + } get; + + struct { + NvU32 id; /* in */ + } put; + + struct { + NvU32 id; /* in */ + NvU32 incr; /* in */ + NvU32 value; /* out */ + } incr_max; + + struct { + NvU32 id; /* in */ + } cpu_incr; + + struct { + NvS32 fd; /* in */ + NvU32 id; /* out */ + NvU32 thresh; /* out */ + } fd_to_id_and_thresh; + + struct { + NvU32 id; /* in */ + NvU32 thresh; /* in */ + NvS32 fd; /* out */ + } id_and_thresh_to_fd; + + struct { + NvU32 id; /* in */ + NvU32 minval; /* out */ + } read_minval; + + struct { + NvU32 id; /* in */ + NvU32 maxval; /* out */ + } read_maxval; + + struct { + NvU32 id; /* in */ + } set_min_eq_max; + + struct { + NvU32 id; /* in */ + NvU32 val; /* in */ + } set_maxval; +} NvKmsSyncPtOpParams; + + +void nvkms_call_rm (void *ops); +void* nvkms_alloc (size_t size, + NvBool zero); +void nvkms_free (void *ptr, + size_t size); +void* nvkms_memset (void *ptr, + NvU8 c, + size_t size); +void* nvkms_memcpy (void *dest, + const void *src, + size_t n); +void* nvkms_memmove (void *dest, + const void *src, + size_t n); +int nvkms_memcmp (const void *s1, + const void *s2, + size_t n); +size_t nvkms_strlen (const char *s); +int nvkms_strcmp (const char *s1, + const char *s2); +char* nvkms_strncpy (char *dest, + const char *src, + size_t n); +void nvkms_usleep (NvU64 usec); +NvU64 nvkms_get_usec (void); +int nvkms_copyin (void *kptr, + NvU64 uaddr, + size_t n); +int nvkms_copyout (NvU64 uaddr, + const void *kptr, + size_t n); +void nvkms_yield (void); +void nvkms_dump_stack (void); +NvBool nvkms_syncpt_op (enum NvKmsSyncPtOp op, + NvKmsSyncPtOpParams *params); +int nvkms_snprintf (char *str, + size_t size, + const char *format, ...) + __attribute__((format (printf, 3, 4))); + +int nvkms_vsnprintf (char *str, + size_t size, + const char *format, + va_list ap); + +#define NVKMS_LOG_LEVEL_INFO 0 +#define NVKMS_LOG_LEVEL_WARN 1 +#define NVKMS_LOG_LEVEL_ERROR 2 + +void nvkms_log (const int level, + const char *gpuPrefix, + const char *msg); + +/*! + * Refcounted pointer to an object that may be freed while references still + * exist. + * + * This structure is intended to be used for nvkms timers to refer to objects + * that may be freed while timers with references to the object are still + * pending. + * + * When the owner of an nvkms_ref_ptr is freed, the teardown code should call + * nvkms_free_ref_ptr(). That marks the pointer as invalid so that later calls + * to nvkms_dec_ref() (i.e. from a workqueue callback) return NULL rather than + * the pointer originally passed to nvkms_alloc_ref_ptr(). + */ +struct nvkms_ref_ptr; + +/*! + * Allocate and initialize a ref_ptr. + * + * The pointer stored in the ref_ptr is initialized to ptr, and its refcount is + * initialized to 1. + */ +struct nvkms_ref_ptr* nvkms_alloc_ref_ptr(void *ptr); + +/*! + * Clear a ref_ptr. + * + * This function sets the pointer stored in the ref_ptr to NULL and drops the + * reference created by nvkms_alloc_ref_ptr(). This function should be called + * when the object pointed to by the ref_ptr is freed. + * + * A caller should make sure that no code that can call nvkms_inc_ref() can + * execute after nvkms_free_ref_ptr() is called. + */ +void nvkms_free_ref_ptr(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Increment the refcount of a ref_ptr. + * + * This function should be used when a pointer to the ref_ptr is stored + * somewhere. For example, when the ref_ptr is used as the argument to + * nvkms_alloc_timer. + * + * This may be called outside of the nvkms_lock, for example by an RM callback. + */ +void nvkms_inc_ref(struct nvkms_ref_ptr *ref_ptr); + +/*! + * Decrement the refcount of a ref_ptr and extract the embedded pointer. + * + * This should be used by code that needs to atomically determine whether the + * object pointed to by the ref_ptr still exists. To prevent the object from + * being destroyed while the current thread is executing, this should be called + * from inside the nvkms_lock. + */ +void* nvkms_dec_ref(struct nvkms_ref_ptr *ref_ptr); + +typedef void nvkms_timer_proc_t(void *dataPtr, NvU32 dataU32); +typedef struct nvkms_timer_t nvkms_timer_handle_t; + +/*! + * Schedule a callback function to be called in the future. + * + * The callback function 'proc' will be called with the arguments + * 'dataPtr' and 'dataU32' at 'usec' (or later) microseconds from now. + * If usec==0, the callback will be scheduled to be called as soon as + * possible. + * + * The callback function is guaranteed to be called back with the + * nvkms_lock held, and in process context. + * + * Returns an opaque handle, nvkms_timer_handle_t*, or NULL on + * failure. If non-NULL, the caller is responsible for caching the + * handle and eventually calling nvkms_free_timer() to free the + * memory. + * + * The nvkms_lock may be held when nvkms_alloc_timer() is called, but + * the nvkms_lock is not required. + */ +nvkms_timer_handle_t* nvkms_alloc_timer (nvkms_timer_proc_t *proc, + void *dataPtr, NvU32 dataU32, + NvU64 usec); + +/*! + * Schedule a callback function to be called in the future. + * + * This function is like nvkms_alloc_timer() except that instead of returning a + * pointer to a structure that the caller should free later, the timer will free + * itself after executing the callback function. This is only intended for + * cases where the caller cannot cache the nvkms_alloc_timer() return value. + */ +NvBool +nvkms_alloc_timer_with_ref_ptr(nvkms_timer_proc_t *proc, + struct nvkms_ref_ptr *ref_ptr, + NvU32 dataU32, NvU64 usec); + +/*! + * Free the nvkms_timer_t object. If the callback function has not + * yet been called, freeing the nvkms_timer_handle_t will guarantee + * that it is not called. + * + * The nvkms_lock must be held when calling nvkms_free_timer(). + */ +void nvkms_free_timer (nvkms_timer_handle_t *handle); + + + +/*! + * Notify the NVKMS kernel interface that the event queue has changed. + * + * \param[in] pOpenKernel This indicates the file descriptor + * ("per-open") of the client whose event queue + * has been updated. This is the pointer + * passed by the kernel interface to nvKmsOpen(). + * \param[in] eventsAvailable If TRUE, a new event has been added to the + * event queue. If FALSE, the last event has + * been removed from the event queue. + */ +void +nvkms_event_queue_changed(nvkms_per_open_handle_t *pOpenKernel, + NvBool eventsAvailable); + + +/*! + * Get the "per-open" data (the pointer returned by nvKmsOpen()) + * associated with this fd. + */ +void* nvkms_get_per_open_data(int fd); + + +/*! + * Raise and lower the reference count of the specified GPU. + */ +NvBool nvkms_open_gpu(NvU32 gpuId); +void nvkms_close_gpu(NvU32 gpuId); + + +/*! + * Enumerate nvidia gpus. + */ + +NvU32 nvkms_enumerate_gpus(nv_gpu_info_t *gpu_info); + +/*! + * Availability of write combining support for video memory. + */ + +NvBool nvkms_allow_write_combining(void); + +/*! + * Checks whether the fd is associated with an nvidia character device. + */ +NvBool nvkms_fd_is_nvidia_chardev(int fd); + +/*! + * NVKMS interface for kernel space NVKMS clients like KAPI + */ + +struct nvkms_per_open; + +struct nvkms_per_open* nvkms_open_from_kapi +( + struct NvKmsKapiDevice *device +); + +void nvkms_close_from_kapi(struct nvkms_per_open *popen); + +NvBool nvkms_ioctl_from_kapi +( + struct nvkms_per_open *popen, + NvU32 cmd, void *params_address, const size_t params_size +); + +/*! + * APIs for locking. + */ + +typedef struct nvkms_sema_t nvkms_sema_handle_t; + +nvkms_sema_handle_t* + nvkms_sema_alloc (void); +void nvkms_sema_free (nvkms_sema_handle_t *sema); +void nvkms_sema_down (nvkms_sema_handle_t *sema); +void nvkms_sema_up (nvkms_sema_handle_t *sema); + +/*! + * APIs to register/unregister backlight device. + */ +struct nvkms_backlight_device; + +struct nvkms_backlight_device* +nvkms_register_backlight(NvU32 gpu_id, NvU32 display_id, void *drv_priv, + NvU32 current_brightness); + +void nvkms_unregister_backlight(struct nvkms_backlight_device *nvkms_bd); + +#endif /* _NVIDIA_MODESET_OS_INTERFACE_H_ */ + diff --git a/src/nvidia-modeset/os-interface/include/nvkms.h b/src/nvidia-modeset/os-interface/include/nvkms.h new file mode 100644 index 000000000..1276186ce --- /dev/null +++ b/src/nvidia-modeset/os-interface/include/nvkms.h @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NV_KMS_H__ +#define __NV_KMS_H__ + +#include "nvtypes.h" +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) +#include /* size_t */ +#else +#include /* size_t */ +#endif + +#include "nvkms-kapi.h" + +typedef struct nvkms_per_open nvkms_per_open_handle_t; + +typedef void nvkms_procfs_out_string_func_t(void *data, + const char *str); + +typedef void nvkms_procfs_proc_t(void *data, + char *buffer, size_t size, + nvkms_procfs_out_string_func_t *outString); + +typedef struct { + const char *name; + nvkms_procfs_proc_t *func; +} nvkms_procfs_file_t; + +enum NvKmsClientType { + NVKMS_CLIENT_USER_SPACE, + NVKMS_CLIENT_KERNEL_SPACE, +}; + +NvBool nvKmsIoctl( + void *pOpenVoid, + NvU32 cmd, + NvU64 paramsAddress, + const size_t paramSize); + +void nvKmsClose(void *pOpenVoid); + +void* nvKmsOpen( + NvU32 pid, + enum NvKmsClientType clientType, + nvkms_per_open_handle_t *pOpenKernel); + +NvBool nvKmsModuleLoad(void); + +void nvKmsModuleUnload(void); + +void nvKmsSuspend(NvU32 gpuId); +void nvKmsResume(NvU32 gpuId); + +void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles); + +void nvKmsKapiHandleEventQueueChange +( + struct NvKmsKapiDevice *device +); + +NvBool nvKmsKapiGetFunctionsTableInternal +( + struct NvKmsKapiFunctionsTable *funcsTable +); + +NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness); +NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness); + +#endif /* __NV_KMS_H__ */ diff --git a/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp b/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp new file mode 100644 index 000000000..91368b663 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.cpp @@ -0,0 +1,546 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// This file implements the event sink class, which the DisplayPort library +// uses to notify the driver of display devices being connected or +// disconnected. + +#include "dp/nvdp-connector-event-sink.h" + +#include "nvdp-connector-event-sink.hpp" + +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-utils.h" +#include "nvkms-vrr.h" + +#include "nvkms-attributes.h" +#include "nvkms-private.h" + +namespace nvkmsDisplayPort { + +ConnectorEventSink::ConnectorEventSink(NVConnectorEvoPtr pConnectorEvo) + : pConnectorEvo(pConnectorEvo) +{ +} + +static NVDpyEvoPtr FindDpyByDevice(NVConnectorEvoPtr pConnectorEvo, + DisplayPort::Device *device) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDpyEvoPtr pDpyEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice && + pDpyEvo->dp.pDpLibDevice->device == device) { + return pDpyEvo; + } + } + } + return NULL; +} + +// Looks for a display that matches the given DP device from +// the list of disconnected dpys. +static NVDpyEvoPtr FindMatchingDisconnectedDpy(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NVDPLibDevicePtr pDpLibDevice) +{ + NVDpyEvoPtr pDpyEvo; + + // A match is simply that the display appears on the same connector. + // DP MST devices are matched by topology address in nvGetDPMSTDpy. + const NVDpyIdList dpyIdList = + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId); + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + if (!pDpyEvo->dp.pDpLibDevice || !pDpyEvo->dp.pDpLibDevice->isPlugged) { + return pDpyEvo; + } + } + return NULL; +} + +const char *nvDPGetDeviceGUIDStr(DisplayPort::Device *device) +{ + DisplayPort::GUID guid; + + if (!device) { + return NULL; + } + + guid = device->getGUID(); + if (!guid.isGuidZero()) { + static DisplayPort::GUID::StringBuffer sb; + guid.toString(sb); + return sb; + } + + return NULL; +} + +bool nvDPGetDeviceGUID(DisplayPort::Device *device, + NvU8 guidData[DPCD_GUID_SIZE]) +{ + DisplayPort::GUID guid; + + if (!device) { + return false; + } + + guid = device->getGUID(); + if (guid.isGuidZero()) { + return false; + } + + nvkms_memcpy((void*)guidData, (void*)guid.data, sizeof(guid.data)); + + return true; +} + + +static const char *DPGetDevicePortStr(DisplayPort::Device *device, + bool skipLeadingZero) +{ + DisplayPort::Address addr; + + if (!device) { + return NULL; + } + + addr = device->getTopologyAddress(); + if (addr.size() > 0) { + static DisplayPort::Address::StringBuffer sb; + addr.toString(sb, skipLeadingZero); + return sb; + } + + return NULL; +} + + +static void nvDPPrintDeviceInfo(NVConnectorEvoPtr pConnectorEvo, + DisplayPort::Device *device) +{ +#if defined(DEBUG) + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + const char *connectorType; + unsigned major, minor; + const char *tmp; + + device->getDpcdRevision(&major, &minor); + + switch (device->getConnectorType()) { + case DisplayPort::connectorDisplayPort: + connectorType = "DisplayPort"; + break; + + case DisplayPort::connectorHDMI: + connectorType = "HDMI"; + break; + + case DisplayPort::connectorDVI: + connectorType = "DVI"; + break; + + case DisplayPort::connectorVGA: + connectorType = "VGA"; + break; + + default: + connectorType = "unknown"; + break; + } + + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "%s-%d: new DisplayPort %d.%d device detected", + NvKmsConnectorTypeString(pConnectorEvo->type), + pConnectorEvo->typeIndex, major, minor); + tmp = DPGetDevicePortStr(device, false /* skipLeadingZero */); + if (tmp) { + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Address: %s", tmp); + } + tmp = nvDPGetDeviceGUIDStr(device); + if (tmp) { + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " GUID: {%s}", tmp); + } + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Connector: %s", connectorType); + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Video: %s", device->isVideoSink() ? "yes" : "no"); + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + " Audio: %s", device->isAudioSink() ? "yes" : "no"); +#endif +} + +static void nvDPAddDeviceToActiveGroup(NVDpyEvoPtr pDpyEvo) +{ + const NVDPLibConnectorRec *pDpLibConnector = + pDpyEvo->pConnectorEvo->pDpLibConnector; + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + NvU32 head; + + // If the device is being driven by the firmware group, then we're just + // tracking it so that it can be shut down by the modeset path, and we + // don't have any timing information for it. + + if (pDpLibConnector->headInFirmware) { + return; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, + pDpLibConnector->dpyIdList[head])) { + pDpLibConnector->pGroup[head]->insert( + pDpyEvo->dp.pDpLibDevice->device); + break; + } + } +} + +// when we get this event, the DP lib has done link training and the +// EDID has been read (by the DP lib) +void ConnectorEventSink::newDevice(DisplayPort::Device *device) +{ + NVDPLibDevicePtr pDpLibDevice = NULL; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDpyEvoPtr pDpyEvo = NULL; + NvBool dynamicDpyCreated = FALSE; + + // XXX [VM DP MST] Current POR requires we also check/handle: + // - More than 64 DP dpys on a connector = print error. + // - More than 127 dpys on a system = print error. + + nvDPPrintDeviceInfo(pConnectorEvo, device); + + // Only add video sink devices. + if (!device->isVideoSink()) { + return; + } + + // Protect against redundant newDevices() + pDpyEvo = FindDpyByDevice(pConnectorEvo, device); + if (pDpyEvo) { + nvAssert(!"Got (redundant) DP Lib newDevice() on known display, " + "ignoring."); + return; + } + + pDpLibDevice = (NVDPLibDevicePtr)nvCalloc(1, sizeof(*pDpLibDevice)); + if (!pDpLibDevice) { + goto fail; + } + + nvAssert(!device->getOwningGroup()); + + // XXX For DP MST, we'll want to handle dynamic display IDs. For now, + // use the connector's display ID. + pDpLibDevice->device = device; + + if (device->isMultistream()) { + // Get a dynamic pDpy for this device based on its bus topology path. + // This will create one if it doesn't exist. + pDpyEvo = nvGetDPMSTDpyEvo( + pConnectorEvo, + DPGetDevicePortStr(device, true /* skipLeadingZero */), + &dynamicDpyCreated); + + } else { + // Look for a (previously) disconnected pDpy that matches this device. + pDpyEvo = FindMatchingDisconnectedDpy(pDispEvo, pConnectorEvo, + pDpLibDevice); + } + + if (!pDpyEvo) { + goto fail; + } + + nvAssert(pDpyEvo->pConnectorEvo == pConnectorEvo); + + // At this point, the pDpy should no longer be tracking a DP lib device. + if (pDpyEvo->dp.pDpLibDevice) { + nvAssert(!"DP Lib should have already called lostDevice() for this DP " + "device"); + + // Call lost device ourselves, if the DP lib calls this again later, + // we'll ignore it then. + lostDevice(pDpyEvo->dp.pDpLibDevice->device); + } + + nvAssert(device->isPlugged()); + + pDpLibDevice->isPlugged = TRUE; + pDpyEvo->dp.pDpLibDevice = pDpLibDevice; + + // If there's an active group that this pDpy is supposed to be a member of, + // insert it now. + nvDPAddDeviceToActiveGroup(pDpyEvo); + + if (dynamicDpyCreated) { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED); + } + + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + + return; + + fail: + nvAssert(pDpyEvo == NULL); + nvFree(pDpLibDevice); +} + +void ConnectorEventSink::lostDevice(DisplayPort::Device *device) +{ + NVDpyEvoPtr pDpyEvo; + + // Ignore non-video sink devices. + if (!device->isVideoSink()) { + return; + } + + pDpyEvo = FindDpyByDevice(pConnectorEvo, device); + if (!pDpyEvo) { + nvAssert(!"Got DP Lib lostDevice() on unknown display."); + return; + } + + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + nvAssert(pDpLibDevice != NULL); + + if (pDpyEvo->vrr.type != NVKMS_DPY_VRR_TYPE_NONE) { + device->resetVrrEnablement(); + pDpyEvo->vrr.type = NVKMS_DPY_VRR_TYPE_NONE; + } + + if (device->getOwningGroup()) { + device->getOwningGroup()->remove(device); + } + + if (pDpLibDevice->isPlugged) { + pDpLibDevice->isPlugged = FALSE; + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } + + if (device->isMultistream()) { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED); + } + + pDpyEvo->dp.pDpLibDevice = NULL; + nvFree(pDpLibDevice); +} + +void ConnectorEventSink::notifyMustDisconnect(DisplayPort::Group *grp) +{ +} + +// notifyDetectComplete() is called when DP Library has done a full detect on +// the topology. There is no one-to-one relationship between a long pulse to +// a detectCompleted. +void ConnectorEventSink::notifyDetectComplete() +{ + pConnectorEvo->detectComplete = TRUE; + + // XXX[DP MST] potentially use this call to notify NV-CONTROL of topology + // change; + + // issue: not as current as new/lostDevice and may pose sync issues, but + // less chatty. +} + +void ConnectorEventSink::bandwidthChangeNotification(DisplayPort::Device *dev, + bool isComplianceMode) +{ + nvDPLibUpdateDpyLinkConfiguration(FindDpyByDevice(pConnectorEvo, dev)); +} + +void ConnectorEventSink::notifyZombieStateChange(DisplayPort::Device *dev, + bool zombied) +{ + NVDpyEvoPtr pDpyEvo = FindDpyByDevice(pConnectorEvo, dev); + NvBool sendEvent = FALSE; + + if (pDpyEvo == NULL) { + return; + } + + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + if (zombied) { + dev->getOwningGroup()->remove(dev); + + if (pDpLibDevice->isPlugged && !dev->isPlugged()) { + pDpLibDevice->isPlugged = FALSE; + sendEvent = TRUE; + } + + } else { + if (!pDpLibDevice->isPlugged && dev->isPlugged()) { + pDpLibDevice->isPlugged = TRUE; + sendEvent = TRUE; + } + + nvDPAddDeviceToActiveGroup(pDpyEvo); + } + + if (sendEvent) { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } +} + +void ConnectorEventSink::notifyCableOkStateChange(DisplayPort::Device *dev, + bool cableOk) +{ +} + +void ConnectorEventSink::notifyHDCPCapDone(DisplayPort::Device *dev, + bool hdcpCap) +{ +} + +void ConnectorEventSink::notifyMCCSEvent(DisplayPort::Device *dev) +{ +} + +}; // namespace nvkmsDisplayPort + +// The functions below are exported to the rest of nvkms. Declare them outside +// of the 'nvkmsDisplayPort' namespace. Their prototypes in +// nvdp-connector-event-sink.h are declared as extern "C". + +NvBool nvDPLibDpyIsConnected(NVDpyEvoPtr pDpyEvo) +{ + nvAssert(nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); + + return ((pDpyEvo->dp.pDpLibDevice != NULL) && + pDpyEvo->dp.pDpLibDevice->isPlugged); +} + +// Adaptive-Sync is enabled/disabled by setting the MSA_TIMING_PAR_IGNORE_EN +// bit in the DOWNSPREAD_CTRL register (DP spec 1.4a appendix K) +void nvDPLibSetAdaptiveSync(const NVDispEvoRec *pDispEvo, NvU32 head, + NvBool enable) +{ + const NVConnectorEvoRec *pConnectorEvo = + pDispEvo->headState[head].pConnectorEvo; + NVDPLibConnectorPtr pDpLibConnector = pConnectorEvo->pDpLibConnector; + DisplayPort::Group *pGroup = pDpLibConnector->pGroup[head]; + DisplayPort::Device *dev; + + for (dev = pGroup->enumDevices(0); dev != NULL; + dev = pGroup->enumDevices(dev)) { + dev->setIgnoreMSAEnable(enable); + } +} + +// Read the link configuration from the connector and stores it in the pDpy so +// it can be sent to clients via NV-CONTROL. Also generate events if the values +// change. +void nvDPLibUpdateDpyLinkConfiguration(NVDpyEvoPtr pDpyEvo) +{ + if (!pDpyEvo) { + return; + } + + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + DisplayPort::Device *dev = pDpLibDevice ? pDpLibDevice->device : NULL; + DisplayPort::Connector *connector = + pDpyEvo->pConnectorEvo->pDpLibConnector->connector; + unsigned laneCount; + NvU64 linkRate; + enum NvKmsDpyAttributeDisplayportConnectorTypeValue connectorType; + NvBool sinkIsAudioCapable; + + if (!dev || !pDpLibDevice->isPlugged) { + linkRate = 0; + laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1; + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN; + sinkIsAudioCapable = FALSE; + } else { + // XXX[AGP]: Can the path down to a single device have a different link + // configuration from the connector itself? + connector->getCurrentLinkConfig(laneCount, linkRate); + + // The DisplayPort library multiplies the link rate enum value by + // 27000000. Convert back to NV-CONTROL's defines. + linkRate /= 27000000; + + switch (pDpLibDevice->device->getConnectorType()) { + case DisplayPort::connectorDisplayPort: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DISPLAYPORT; + break; + case DisplayPort::connectorHDMI: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_HDMI; + break; + case DisplayPort::connectorDVI: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_DVI; + break; + case DisplayPort::connectorVGA: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_VGA; + break; + default: + connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN; + break; + } + + sinkIsAudioCapable = pDpLibDevice->device->isAudioSink(); + } + + // The DisplayPort library reports a disabled link as 0 lanes. NV-CONTROL, + // for historical reasons, uses a setting of "1 lane @ disabled" for a + // disabled link, so translate to that. + if (laneCount == 0) { + linkRate = 0; + laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1; + } + + // Update pDpy and send events if anything changed. + if (laneCount != pDpyEvo->dp.laneCount) { + pDpyEvo->dp.laneCount = laneCount; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE, + nvRMLaneCountToNvKms(laneCount)); + } + + if (linkRate != pDpyEvo->dp.linkRate) { + pDpyEvo->dp.linkRate = linkRate; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE, + linkRate); + } + + if (connectorType != pDpyEvo->dp.connectorType) { + pDpyEvo->dp.connectorType = connectorType; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE, + connectorType); + } + + if (sinkIsAudioCapable != pDpyEvo->dp.sinkIsAudioCapable) { + pDpyEvo->dp.sinkIsAudioCapable = sinkIsAudioCapable; + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_SINK_IS_AUDIO_CAPABLE, + sinkIsAudioCapable); + } +} diff --git a/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp b/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp new file mode 100644 index 000000000..167c2e898 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-connector-event-sink.hpp @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVDP_CONNECTOR_EVENT_SINK_HPP__ +#define __NVDP_CONNECTOR_EVENT_SINK_HPP__ + +#include +#include + +#include "nvdp-evo-interface.hpp" + + +namespace nvkmsDisplayPort +{ + +class ConnectorEventSink : public DisplayPort::Object, + public DisplayPort::Connector::EventSink +{ +private: + const NVConnectorEvoPtr pConnectorEvo; + +public: + ConnectorEventSink(NVConnectorEvoPtr pConnectorEvo); + + // From DisplayPort::Connector::EventSink + virtual void newDevice(DisplayPort::Device *dev); + virtual void lostDevice(DisplayPort::Device *dev); + virtual void notifyMustDisconnect(DisplayPort::Group *grp); + virtual void notifyDetectComplete(); + virtual void bandwidthChangeNotification(DisplayPort::Device *dev, bool isComplianceMode); + virtual void notifyZombieStateChange(DisplayPort::Device *dev, bool zombied); + virtual void notifyCableOkStateChange(DisplayPort::Device *dev, bool cableOk); + virtual void notifyHDCPCapDone(DisplayPort::Device *dev, bool hdcpCap); + virtual void notifyMCCSEvent(DisplayPort::Device *dev); +}; + +const char *nvDPGetDeviceGUIDStr(DisplayPort::Device *device); +bool nvDPGetDeviceGUID(DisplayPort::Device *device, NvU8 guid[DPCD_GUID_SIZE]); + +}; // namespace nvkmsDisplayPort + +struct _nv_dplibconnector { + DisplayPort::Connector *connector; + nvkmsDisplayPort::EvoInterface *evoInterface; + nvkmsDisplayPort::ConnectorEventSink *evtSink; + DisplayPort::MainLink *mainLink; + DisplayPort::AuxBus *auxBus; + + NvBool isActive; + + // The VBIOS head is actively driving this connector. + bool headInFirmware; + NVConnectorEvoRec *pConnectorEvo; + // Per-head DpLib group, allocated at the time of connector creation: + // In case of multi-streaming, multiple heads can be attached to single + // DP connector driving distinct DP streams. + DisplayPort::Group *pGroup[NVKMS_MAX_HEADS_PER_DISP]; + NVDpyIdList dpyIdList[NVKMS_MAX_HEADS_PER_DISP]; + // Attached heads bitmask + NvU32 headMask; + + // Connection status plugged/unplugged; gets initialized by + // Connector::resume() and gets updated by + // Connector::notifyLongPulse(). + NvBool plugged; +}; + +struct _nv_dplibdevice { + DisplayPort::Device *device; + NvBool isPlugged; +}; + +struct __nv_dplibmodesetstate { + NVDpyIdList dpyIdList; + DisplayPort::DpModesetParams modesetParams; +}; + +#endif // __NVDP_CONNECTOR_EVENT_SINK_HPP__ diff --git a/src/nvidia-modeset/src/dp/nvdp-connector.cpp b/src/nvidia-modeset/src/dp/nvdp-connector.cpp new file mode 100644 index 000000000..47917361e --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-connector.cpp @@ -0,0 +1,1008 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "dp/nvdp-connector.h" +#include "nvdp-timer.hpp" +#include "nvdp-connector-event-sink.hpp" +#include "dp/nvdp-connector-event-sink.h" +#include "dp/nvdp-timer.h" + +#include "nvkms-evo.h" +#include "nvkms-types.h" +#include "nvkms-modeset.h" +#include "nvkms-modeset-types.h" +#include "nvkms-utils.h" +#include "nvkms-rmapi.h" + +#include + +// Loop over all display devices attached to a connector. +// Connector::enumDevices(NULL) returns the first device, and then +// enumDevices(previous) returns each subsequent device. +#define for_each_device(connector, dev) \ + for (DisplayPort::Device *(dev) = NULL; ((dev) = (connector)->enumDevices(dev)); ) + +NVDPLibConnectorPtr nvDPCreateConnector(NVConnectorEvoPtr pConnectorEvo) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + DisplayPort::Timer *pTimer = &pDevEvo->dpTimer->timer; + NVDPLibConnectorPtr pNVDpLibConnector = + (NVDPLibConnectorPtr) nvCalloc(1, sizeof(*pNVDpLibConnector)); + + if (!pNVDpLibConnector) { + return NULL; + } + + pNVDpLibConnector->pConnectorEvo = pConnectorEvo; + + // Create the EVO interface object. + pNVDpLibConnector->evoInterface = + new nvkmsDisplayPort::EvoInterface(pConnectorEvo); + if (!pNVDpLibConnector->evoInterface) { + goto fail; + } + + // Create the event sink object. + pNVDpLibConnector->evtSink = + new nvkmsDisplayPort::ConnectorEventSink(pConnectorEvo); + if (!pNVDpLibConnector->evtSink) { + goto fail; + } + + // Create the MainLink object. + pNVDpLibConnector->mainLink = + DisplayPort::MakeEvoMainLink(pNVDpLibConnector->evoInterface, pTimer); + if (!pNVDpLibConnector->mainLink) { + goto fail; + } + + // Create the AuxBus object. + pNVDpLibConnector->auxBus = + DisplayPort::MakeEvoAuxBus(pNVDpLibConnector->evoInterface, pTimer); + if (!pNVDpLibConnector->auxBus) { + goto fail; + } + + pNVDpLibConnector->connector = + DisplayPort::createConnector(pNVDpLibConnector->mainLink, + pNVDpLibConnector->auxBus, + pTimer, + pNVDpLibConnector->evtSink); + if (!pNVDpLibConnector->connector) { + goto fail; + } + + pNVDpLibConnector->connector->setPolicyAssessLinkSafely(TRUE); + + return pNVDpLibConnector; + + fail: + nvDPDestroyConnector(pNVDpLibConnector); + return NULL; +} + +void nvDPNotifyLongPulse(NVConnectorEvoPtr pConnectorEvo, + NvBool connected) +{ + NVDPLibConnectorPtr pNVDpLibConnector = pConnectorEvo->pDpLibConnector; + DisplayPort::Connector *c = pNVDpLibConnector->connector; + + pNVDpLibConnector->plugged = connected; + + if (connected && !nvAssignSOREvo(pConnectorEvo, 0 /* sorExcludeMask */)) { + // DPLib takes care of skipping LT on unassigned SOR Display. + } + + c->notifyLongPulse(connected); + +} + +void nvDPNotifyShortPulse(NVDPLibConnectorPtr pNVDpLibConnector) +{ + DisplayPort::Connector *c = pNVDpLibConnector->connector; + + c->notifyShortPulse(); +} + +void nvDPDestroyConnector(NVDPLibConnectorPtr pNVDpLibConnector) +{ + if (!pNVDpLibConnector) return; + + if (pNVDpLibConnector->connector) { + pNVDpLibConnector->connector->destroy(); + } + if (pNVDpLibConnector->auxBus) { + delete pNVDpLibConnector->auxBus; + } + if (pNVDpLibConnector->mainLink) { + delete pNVDpLibConnector->mainLink; + } + if (pNVDpLibConnector->evoInterface) { + delete pNVDpLibConnector->evoInterface; + } + if (pNVDpLibConnector->evtSink) { + delete pNVDpLibConnector->evtSink; + } + + nvFree(pNVDpLibConnector); +} + +NvBool nvDPIsLinkAwaitingTransition(NVConnectorEvoPtr pConnectorEvo) +{ + if (nvConnectorUsesDPLib(pConnectorEvo)) { + DisplayPort::Connector *c = pConnectorEvo->pDpLibConnector->connector; + return c->isLinkAwaitingTransition(); + } + + return FALSE; +} + +/* + * Start DisplayPort mode validation on all connectors on a disp. + */ +void nvDPBeginValidation(NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + pConnectorEvo->pDpLibConnector->connector->beginCompoundQuery(); + } + } +} + +/*! + * Create a new DisplayPort group and populate it with the devices specified by + * dpyIdList. For MST groups, this allocates a dynamic RM display ID. + * Otherwise, it uses the connector's display ID. + */ +static DisplayPort::Group* CreateGroup( + const NVDPLibConnectorRec *pDpLibConnector, + const NVDpyIdList dpyIdList) +{ + NVDpyEvoPtr pDpyEvo; + DisplayPort::Group *pGroup = NULL; + + pGroup = pDpLibConnector->connector->newGroup(); + if (pGroup == NULL) { + return NULL; + } + + // Populate the group + FOR_ALL_EVO_DPYS(pDpyEvo, + dpyIdList, pDpLibConnector->pConnectorEvo->pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice) { + pGroup->insert(pDpyEvo->dp.pDpLibDevice->device); + } + } + + return pGroup; +} + +/*! + * Returns the bits per pixel for the pixel depth value given + * + * \param[in] pixelDepth nvKmsPixelDepth value + * + * \return The pixel depth configured by this enum value + */ +static NvU32 GetSORBpp( + const enum nvKmsPixelDepth pixelDepth, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace) +{ + NvU32 bpc = nvPixelDepthToBitsPerComponent(pixelDepth); + if (bpc == 0) { + nvAssert(!"Unrecognized SOR pixel depth"); + /* XXX Assume lowest ? */ + bpc = 6; + } + + /* + * In YUV420, HW is programmed with RGB color space and full color range. + * The color space conversion and color range compression happen in a + * headSurface composite shader. + * + * XXX Add support for + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 over DP. + */ + nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB); + + /* For RGB/YCbCr444, each pixel is always 3 components. For YCbCr/YUV420, + * we currently always scan out from the headSurface as RGB. */ + return bpc * 3; +} + +/* XXX Instead of tracking pixelDepth, you should track bpc and calculate bpp + * from bpc + colorSpace. */ +static NvU32 GetBpc( + const enum nvKmsPixelDepth pixelDepth, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace) +{ + NvU32 bpc = nvPixelDepthToBitsPerComponent(pixelDepth); + if (bpc == 0) { + nvAssert(!"Unrecognized SOR pixel depth"); + /* XXX Assume lowest ? */ + return 6; + } + + /* + * In YUV420, HW is programmed with RGB color space and full color range. + * The color space conversion and color range compression happen in a + * headSurface composite shader. + * + * XXX Add support for + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 over DP. + */ + nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB); + + return bpc; +} + +static void SetDPMSATiming(const NVDispEvoRec *pDispEvo, + const NvU32 displayId, + NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS *msaParams, + const NVHwModeTimingsEvo *pTimings) +{ + nvkms_memset(msaParams, 0, sizeof(*msaParams)); + + /* + * Fill in displayId and subDeviceInstance unconditionally. + * From CL#27980662, dplib started passing the client provided displayId + * to RM for setting MSA properties. + * Default value of displayId is 0, leading to RMControl failure in + * the displayport library. + */ + msaParams->subDeviceInstance = pDispEvo->displayOwner; + msaParams->displayId = displayId; + + if ((pTimings->yuv420Mode == NV_YUV420_MODE_SW) && displayId != 0) { + NV0073_CTRL_DP_MSA_PROPERTIES_MASK *featureMask = &msaParams->featureMask; + NV0073_CTRL_DP_MSA_PROPERTIES_VALUES *featureValues = &msaParams->featureValues; + + msaParams->bEnableMSA = 1; + msaParams->bCacheMsaOverrideForNextModeset = 1; + featureMask->bRasterTotalHorizontal = true; + featureMask->bActiveStartHorizontal = true; + featureMask->bSurfaceTotalHorizontal = true; + featureMask->bSyncWidthHorizontal = true; + featureValues->rasterTotalHorizontal = 2 * pTimings->rasterSize.x; + featureValues->activeStartHorizontal = 2 * (pTimings->rasterBlankEnd.x + 1); + featureValues->surfaceTotalHorizontal = 2 * nvEvoVisibleWidth(pTimings); + featureValues->syncWidthHorizontal = 2 * (pTimings->rasterSyncEnd.x + 1); + } +} + +static void InitDpModesetParams( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + DisplayPort::DpModesetParams *pParams) +{ + pParams->modesetInfo.pixelClockHz = pTimings->pixelClock * 1000; + pParams->modesetInfo.rasterWidth = pTimings->rasterSize.x; + pParams->modesetInfo.rasterHeight = pTimings->rasterSize.y; + pParams->modesetInfo.rasterBlankStartX = pTimings->rasterBlankStart.x; + pParams->modesetInfo.rasterBlankEndX = pTimings->rasterBlankEnd.x; + pParams->modesetInfo.surfaceWidth = nvEvoVisibleWidth(pTimings); + pParams->modesetInfo.surfaceHeight = nvEvoVisibleHeight(pTimings); + + pParams->modesetInfo.depth = + GetSORBpp(pTimings->pixelDepth, colorSpace); + + pParams->modesetInfo.bitsPerComponent = + GetBpc(pTimings->pixelDepth, colorSpace); + + pParams->colorFormat = dpColorFormat_Unknown; + switch (colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + /* HW YUV420 mode is only supported for HDMI, not DP */ + nvAssert(pTimings->yuv420Mode == NV_YUV420_MODE_SW); + pParams->modesetInfo.pixelClockHz *= 2; + pParams->colorFormat = dpColorFormat_YCbCr420; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + pParams->colorFormat = dpColorFormat_YCbCr444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + pParams->colorFormat = dpColorFormat_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pParams->colorFormat = dpColorFormat_RGB; + break; + } + + pParams->headIndex = head; + + SetDPMSATiming(pDispEvo, displayId, &pParams->msaparams, pTimings); +} + +NVDPLibModesetStatePtr nvDPLibCreateModesetState( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + NVHwModeTimingsEvo *pTimings) +{ + bool found = false; + const NVDPLibConnectorRec *pDpLibConnector = NULL; + const NVDpyEvoRec *pDpyEvo; + NVDPLibModesetStatePtr pDpLibModesetState = NULL; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + if (!found) { + pDpLibConnector = pDpyEvo->pConnectorEvo->pDpLibConnector; + found = true; + } else if (pDpLibConnector != pDpyEvo->pConnectorEvo->pDpLibConnector) { + /* All Dpys must belongs to same DP connector */ + return NULL; + } + } + + /* Do nothing if any of the display is not DP */ + if (pDpLibConnector == NULL) { + return NULL; + } + + pDpLibModesetState = + (NVDPLibModesetStatePtr) nvCalloc(1, sizeof(*pDpLibModesetState)); + if (pDpLibModesetState == NULL) { + return NULL; + } + + InitDpModesetParams(pDispEvo, + head, + displayId, + pTimings, + colorSpace, + &pDpLibModesetState->modesetParams); + if (pTimings->dpDsc.enable) { + pDpLibModesetState->modesetParams.modesetInfo.bEnableDsc = true; + + /* + * If DSC is enabled then override normal pixel depth with + * target bpp rate of DSC encoder, the rate at which it is going to + * output compressed stream. + */ + pDpLibModesetState->modesetParams.modesetInfo.depth = + pTimings->dpDsc.bitsPerPixelX16; + } + pDpLibModesetState->dpyIdList = dpyIdList; + + return pDpLibModesetState; +} + +void nvDPLibFreeModesetState(NVDPLibModesetStatePtr pDpLibModesetState) +{ + nvFree(pDpLibModesetState); +} + +/* + * Validate the mode for a given NVHwModeTimingsEvo + dpyIdList. This + * function should be called for each head, and must be called between + * nvDPBeginValidation and nvDPEndValidation. + * + * If validation fails, this function returns FALSE. You must still call + * nvDPEndValidation even if an individual head fails. + * + * If validation succeeds, the DSC fields within pTimings are updated with what + * is returned by compoundQueryAttach(). + */ +NvBool nvDPLibValidateTimings( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NvU32 displayId, + const NVDpyIdList dpyIdList, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const struct NvKmsModeValidationParams *pModeValidationParams, + NVHwModeTimingsEvo *pTimings) +{ + const NVDpyEvoRec *pDpyEvo; + const NVDPLibConnectorRec *pDpLibConnector = NULL; + bool found = false; + + DisplayPort::Group *pGroup = NULL; + DisplayPort::DscOutParams *pDscOutParams = NULL; + DisplayPort::DpModesetParams *pModesetParams = NULL; + DisplayPort::DscParams dpDscParams; + NvBool ret = FALSE; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + if (!found) { + pDpLibConnector = pDpyEvo->pConnectorEvo->pDpLibConnector; + found = true; + } else if (pDpLibConnector != pDpyEvo->pConnectorEvo->pDpLibConnector) { + /* All Dpys must belongs to same DP connector */ + return FALSE; + } + } + + /* Do nothing if any of the display is not DP */ + if (pDpLibConnector == NULL) { + return TRUE; + } + + pGroup = CreateGroup(pDpLibConnector, dpyIdList); + if (pGroup == NULL) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to create a DisplayPort group"); + goto done; + } + + pDscOutParams = + (DisplayPort::DscOutParams*) nvCalloc(1, sizeof(*pDscOutParams)); + if (pDscOutParams == NULL) { + goto done; + } + + + pModesetParams = + (DisplayPort::DpModesetParams*) nvCalloc(1, sizeof(*pModesetParams)); + if (pModesetParams == NULL) { + goto done; + } + + InitDpModesetParams(pDispEvo, + head, + displayId, + pTimings, + colorSpace, + pModesetParams); + + dpDscParams.bCheckWithDsc = true; + dpDscParams.forceDsc = pModeValidationParams->forceDsc ? + DisplayPort::DSC_FORCE_ENABLE : + DisplayPort::DSC_DEFAULT; + dpDscParams.bitsPerPixelX16 = + pModeValidationParams->dscOverrideBitsPerPixelX16; + dpDscParams.pDscOutParams = pDscOutParams; + + ret = pDpLibConnector->connector->compoundQueryAttach( + pGroup, *pModesetParams, + &dpDscParams); + + if (ret) { + pTimings->dpDsc.enable = dpDscParams.bEnableDsc; + pTimings->dpDsc.bitsPerPixelX16 = dpDscParams.bitsPerPixelX16; + + ct_assert(sizeof(pTimings->dpDsc.pps) == sizeof(pDscOutParams->PPS)); + + nvkms_memcpy(pTimings->dpDsc.pps, + pDscOutParams->PPS, sizeof(pTimings->dpDsc.pps)); + } + +done: + nvFree(pDscOutParams); + nvFree(pModesetParams); + if (pGroup != NULL) { + pGroup->destroy(); + } + return ret; +} + +/* + * Finishes DisplayPort mode validation. Returns TRUE if the complete + * configuration is possible, and FALSE if it can't be achieved. + */ +NvBool nvDPEndValidation(NVDispEvoPtr pDispEvo) +{ + NvBool ret = TRUE; + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + DisplayPort::Connector *connector = + pConnectorEvo->pDpLibConnector->connector; + + /* endCompoundQuery() must be called for all dp connectors */ + ret = connector->endCompoundQuery() && ret; + } + } + + return ret; +} + +NvBool nvDPValidateModeForDpyEvo( + const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace, + const struct NvKmsModeValidationParams *pModeValidationParams, + NVHwModeTimingsEvo *pTimings) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + + nvAssert(nvConnectorUsesDPLib(pConnectorEvo)); + + DisplayPort::Connector *connector = + pConnectorEvo->pDpLibConnector->connector; + + connector->beginCompoundQuery(); + NvBool ret = nvDPLibValidateTimings(pDpyEvo->pDispEvo, + 0 /* head */, + 0 /* displayId */, + nvAddDpyIdToEmptyDpyIdList(pDpyEvo->id), + colorSpace, + pModeValidationParams, + pTimings); + connector->endCompoundQuery(); + + return ret; +} + +/* + * Notify the DisplayPort library that a given mode is about to be set on a + * given head. The configuration for this head must have previously been + * validated by a call to nvDPLibValidateTimings. + */ +static +void NotifyAttachBegin(NVDPLibConnectorPtr pDpLibConnector, + const NvU32 head, + const NVDPLibModesetStateRec *pDpLibModesetState) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const DisplayPort::DpModesetParams *pParams = + &pDpLibModesetState->modesetParams; + const NVDpyEvoRec *pDpyEvo = NULL; + + /* Insert active dpys into group */ + pDpLibConnector->dpyIdList[head] = pDpLibModesetState->dpyIdList; + FOR_ALL_EVO_DPYS(pDpyEvo, pDpLibConnector->dpyIdList[head], pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice) { + pDpLibConnector->pGroup[head]->insert( + pDpyEvo->dp.pDpLibDevice->device); + } + } + + pDpLibConnector->connector->notifyAttachBegin( + pDpLibConnector->pGroup[head], + *pParams); +} + +/* + * Notify the DisplayPort library that a modeset on a head begun by + * nvDPNotifyAttachBegin is finished. + */ +static void NotifyAttachEnd(NVDPLibConnectorPtr pDpLibConnector, NvU32 head) +{ + pDpLibConnector->connector->notifyAttachEnd(false); + pDpLibConnector->headMask |= NVBIT(head); +} + +/* + * Notify the DisplayPort library that the given head driving displays on this + * connector is about to be shut down. + */ +static void NotifyDetachBegin(NVDPLibConnectorPtr pDpLibConnector, const NvU32 head) +{ + /* + * The firmware group is the VBIOS monitor group the DP Library manages + * internally. In notifyDetachBegin(NULL), the NULL defaults to firmware + * group. + */ + pDpLibConnector->connector->notifyDetachBegin( + pDpLibConnector->headInFirmware ? + NULL : pDpLibConnector->pGroup[head]); +} + +/* + * Notify the DisplayPort library that the driver has finished shutting down a + * head that was previously driving this connector. + */ +static void NotifyDetachEnd(NVDPLibConnectorPtr pDpLibConnector, const NvU32 head) +{ + pDpLibConnector->connector->notifyDetachEnd(); + + if (!pDpLibConnector->headInFirmware) { + const NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDpyEvoRec *pDpyEvo; + + + /* Empty inactive group */ + FOR_ALL_EVO_DPYS(pDpyEvo, pDpLibConnector->dpyIdList[head], pDispEvo) { + if (pDpyEvo->dp.pDpLibDevice) { + pDpLibConnector->pGroup[head]->remove( + pDpyEvo->dp.pDpLibDevice->device); + } + } + pDpLibConnector->dpyIdList[head] = nvEmptyDpyIdList(); + } else { + nvAssert(pDpLibConnector->pGroup[head]->enumDevices(0) == NULL); + pDpLibConnector->headInFirmware = false; + } + + pDpLibConnector->headMask &= ~NVBIT(head); +} + +/* + * Handles DP stream programming requires to be done before committing MODESET + * update. The function should be called for each of affected(change in + * head-connector attachment) DpLib connectors, before commit. + */ +void nvDPPreSetMode(NVDPLibConnectorPtr pDpLibConnector, + const NVEvoModesetUpdateState *pModesetUpdateState) +{ + const NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NvU32 oldHeadMask = pDpLibConnector->headMask; + const NvU32 newHeadMask = + nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + for (NvU32 head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + + if ((newHeadMask & NVBIT(head)) != 0x0 && + (oldHeadMask & NVBIT(head)) == 0x0) { + + NotifyAttachBegin(pDpLibConnector, + head, + pModesetUpdateState->pDpLibModesetState[head]); + + } else if ((newHeadMask & NVBIT(head)) == 0x0 && + (oldHeadMask & NVBIT(head)) != 0x0) { + + NotifyDetachBegin(pDpLibConnector, head); + + } + } +} + +/* + * Handles DP stream programming requires to be done before committing MODESET + * update. The function should be called for each of affected(change in + * head-connector attachment) DpLib connectors, before commit. + */ +void nvDPPostSetMode(NVDPLibConnectorPtr pDpLibConnector) +{ + const NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NvU32 oldHeadMask = pDpLibConnector->headMask; + const NvU32 newHeadMask = + nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + for (NvU32 head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + + if ((newHeadMask & NVBIT(head)) != 0x0 && + (oldHeadMask & NVBIT(head)) == 0x0) { + + NotifyAttachEnd(pDpLibConnector, head); + + } else if ((newHeadMask & NVBIT(head)) == 0x0 && + (oldHeadMask & NVBIT(head)) != 0x0) { + + NotifyDetachEnd(pDpLibConnector, head); + + } + } + + /* + * Update DisplayPort link information for all displays on DpLib connector + */ + if (newHeadMask != oldHeadMask) { + NVDpyEvoPtr pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pDpyEvo->pConnectorEvo->pDpLibConnector == pDpLibConnector) { + nvDPLibUpdateDpyLinkConfiguration(pDpyEvo); + } + } + } +} + +void nvDPPause(NVDPLibConnectorPtr pNVDpLibConnector) +{ + DisplayPort::Connector *connector = pNVDpLibConnector->connector; + const NVConnectorEvoRec *pConnectorEvo = pNVDpLibConnector->pConnectorEvo; + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + if (!pNVDpLibConnector->isActive) { + return; + } + + if (pDevEvo->skipConsoleRestore && pNVDpLibConnector->headMask != 0) { + /* Clear vbios DisplayPort RAD scratch registers, see bug 200471345 */ + + nvAssert(nvPopCount32(pNVDpLibConnector->headMask) == 1); + nvAssert(connector->isDp11ProtocolForced()); + + NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS params = {0}; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + nvAssert(pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A || + pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B); + + params.dpLink = pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A ? 0 : 1; + params.sorIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + NvU32 ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug( + pDispEvo, + EVO_LOG_ERROR, + "NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG " + "failed, error code 0x%x", + ret); + } + } + + /* Before pausing DpLib, destroy group and clear head bitmask */ + for (NvU32 head = 0; head < ARRAY_LEN(pNVDpLibConnector->pGroup); head++) { + pNVDpLibConnector->pGroup[head]->destroy(); + } + pNVDpLibConnector->headMask = 0x0; + + connector->pause(); + + pNVDpLibConnector->isActive = false; +} + +/*! + * Determine which head, if any, is driving this connector. + */ +static NvU32 GetFirmwareHead(NVConnectorEvoPtr pConnectorEvo) +{ + NvU32 orIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + if (orIndex == NV_INVALID_OR || + pConnectorEvo->or.ownerHeadMask[orIndex] == 0) { + return NV_INVALID_HEAD; + } + + return BIT_IDX_32(pConnectorEvo->or.ownerHeadMask[orIndex]); +} + +/*! + * Determine whether an active connector shares an OR with this connector. + */ +static bool ConnectorIsSharedWithActiveOR(NVConnectorEvoPtr pConnectorEvo) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVConnectorEvoPtr pOtherConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pOtherConnectorEvo, pDispEvo) { + if (pOtherConnectorEvo != pConnectorEvo && + nvIsConnectorActiveEvo(pOtherConnectorEvo) && + (pOtherConnectorEvo->or.mask & pConnectorEvo->or.mask) != 0x0) { + return true; + } + } + + return false; +} + +NvBool nvDPResume(NVDPLibConnectorPtr pNVDpLibConnector, NvBool plugged) +{ + NVConnectorEvoRec *pConnectorEvo = + pNVDpLibConnector->pConnectorEvo; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + DisplayPort::Connector *c = pNVDpLibConnector->connector; + const unsigned int firmwareHead = GetFirmwareHead(pConnectorEvo); + const bool firmwareLinkHandsOff = ConnectorIsSharedWithActiveOR(pConnectorEvo); + bool dpyIdIsDynamic = false; + /* By default allow MST */ + bool allowMST = true; + + if (firmwareHead != NV_INVALID_HEAD) { + NVDpyId firmwareDpyId = nvInvalidDpyId(); + + pNVDpLibConnector->headInFirmware = true; + pNVDpLibConnector->headMask = NVBIT(firmwareHead); + + // Use the first displayId in the boot display list. + // + // TODO: What should we do if more than one dpy ID is listed for a boot + // display? + nvAssert(nvCountDpyIdsInDpyIdList(pDispEvo->vbiosDpyConfig[firmwareHead]) == 1); + firmwareDpyId = + nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), + pDispEvo->vbiosDpyConfig[firmwareHead]); + + dpyIdIsDynamic = !nvDpyIdsAreEqual(firmwareDpyId, + pConnectorEvo->displayId); + + /* Do not allow MST if firmware driving DP connector in SST mode */ + if (!dpyIdIsDynamic) { + allowMST = false; + } + } + + pConnectorEvo->detectComplete = FALSE; + + pNVDpLibConnector->plugged = plugged; + if (plugged && !pNVDpLibConnector->headInFirmware) { + NvBool ret = nvAssignSOREvo(pConnectorEvo, 0 /* sorExcludeMask */); + + nvAssert(ret); + if (!ret) { + // DP lib skips LT for unassigned SOR. + } + } + + c->resume(firmwareLinkHandsOff, + pNVDpLibConnector->headInFirmware, + plugged, + false /* isUefiSystem */, + firmwareHead, + dpyIdIsDynamic /* bFirmwareLinkUseMultistream */, + true /* bDisableVbiosScratchRegisterUpdate, bug 200471345 */, + allowMST); + + for (NvU32 head = 0; head < ARRAY_LEN(pNVDpLibConnector->pGroup); head++) { + pNVDpLibConnector->pGroup[head] = + pNVDpLibConnector->connector->newGroup(); + + if (pNVDpLibConnector->pGroup[head] == NULL) { + for (NvU32 i = 0; i < head; i++) { + pNVDpLibConnector->pGroup[i]->destroy(); + } + goto failed; + } + } + + pNVDpLibConnector->isActive = true; + return TRUE; + +failed: + pNVDpLibConnector->connector->pause(); + return FALSE; +} + +void nvDPSetAllowMultiStreamingOneConnector( + NVDPLibConnectorPtr pDpLibConnector, + NvBool allowMST) +{ + NVConnectorEvoRec *pConnectorEvo = + pDpLibConnector->pConnectorEvo; + + if (pDpLibConnector->connector->getAllowMultiStreaming() == allowMST) { + return; + } + + /* + * If there is change in MST capability and DPlib re-runs device detection + * routine for plugged sink. Reset 'pConnectorEvo->detectComplete' only for + * MST capable sinks, in order to track completion of that fresh detection + * routine. + */ + if (pDpLibConnector->plugged && + pDpLibConnector->connector->getSinkMultiStreamCap()) { + pConnectorEvo->detectComplete = FALSE; + } + pDpLibConnector->connector->setAllowMultiStreaming(allowMST); +} + +static NvBool IsDpSinkMstCapableForceSst(const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + + if (pConnectorEvo == NULL || + pConnectorEvo->pDpLibConnector == NULL) { + return FALSE; + } + + DisplayPort::Connector *c = + pConnectorEvo->pDpLibConnector->connector; + + return (c->getSinkMultiStreamCap() && !c->getAllowMultiStreaming()); +} + +static NvBool IsDpLinkTransitionWaitingForHeadShutDown( + const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + return pHeadState->pConnectorEvo && + nvDPIsLinkAwaitingTransition(pHeadState->pConnectorEvo); +} + +void nvDPSetAllowMultiStreaming(NVDevEvoPtr pDevEvo, NvBool allowMST) +{ + NvBool needUpdate = FALSE; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NVDPLibConnectorPtr pDpLibConnector = + pConnectorEvo->pDpLibConnector; + if (pDpLibConnector && + pDpLibConnector->connector->getAllowMultiStreaming() + != allowMST) { + needUpdate = TRUE; + } + } + } + + if (!needUpdate) { + return; + } + + nvShutDownHeads(pDevEvo, IsDpSinkMstCapableForceSst); + + /* + * Heads driving MST capable sinks in force SST mode, are shut down. Now you + * can allow MST on all DisplayPort Connector, safely in compliance + * of DP 1.2 specification. + * + * The section 5.4 and table 2-75 (of section 2.9.3.1) of DisplayPort 1.2 + * specification, does not allow to enable/disable MST mode of sink while + * transmitting active stream (see description of CL#25551338). + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!pConnectorEvo->pDpLibConnector) { + continue; + } + nvDPSetAllowMultiStreamingOneConnector( + pConnectorEvo->pDpLibConnector, + allowMST); + } + } + + /* Shut down all DisplayPort heads that need to transition to/from SST. */ + nvShutDownHeads(pDevEvo, + IsDpLinkTransitionWaitingForHeadShutDown); + + /* + * Handle any pending timers the DP library scheduled to notify us + * about changes in the connected device list. + */ + nvDPFireExpiredTimers(pDevEvo); +} + +enum NVDpLinkMode nvDPGetActiveLinkMode(NVDPLibConnectorPtr pDpLibConnector) +{ + DisplayPort::LinkConfiguration linkConfig = + pDpLibConnector->connector->getActiveLinkConfig(); + if (linkConfig.lanes == 0) { + return NV_DP_LINK_MODE_OFF; + } + return linkConfig.multistream ? NV_DP_LINK_MODE_MST : + NV_DP_LINK_MODE_SST; +} diff --git a/src/nvidia-modeset/src/dp/nvdp-device.cpp b/src/nvidia-modeset/src/dp/nvdp-device.cpp new file mode 100644 index 000000000..e24e7a211 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-device.cpp @@ -0,0 +1,148 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "dp/nvdp-device.h" +#include "nvdp-connector-event-sink.hpp" +#include "dp/nvdp-connector-event-sink.h" + +#include "nvkms-types.h" +#include "nvkms-rm.h" +#include "nvkms-dpy.h" + +#include "nvctassert.h" + +void nvDPDeviceSetPowerState(NVDpyEvoPtr pDpyEvo, NvBool on) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + if (!pDpyEvo->dp.pDpLibDevice) { + return; + } + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + + DisplayPort::Device *device = pDpyEvo->dp.pDpLibDevice->device; + + nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + device->setPanelPowerParams(on, on); + + /* + * WAR: Some monitors clear the MSA_TIMING_PAR_IGNORE_EN bit in the + * DOWNSPREAD_CTRL DPCD register after changing power state, which will + * cause the monitor to fail to restore the image after powering back on + * while VRR flipping. To work around this, re-enable Adaptive-Sync + * immediately after powering on. (Bug 200488547) + */ + if (nvDpyIsAdaptiveSync(pDpyEvo) && on) { + NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDPLibConnectorPtr pDpLibConnector = pConnectorEvo->pDpLibConnector; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, + pDpLibConnector->dpyIdList[head]) && + (pDispEvo->headState[head].timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE)) { + nvDPLibSetAdaptiveSync(pDispEvo, head, TRUE); + break; + } + } + } +} + +unsigned int nvDPGetEDIDSize(const NVDpyEvoRec *pDpyEvo) +{ + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + + if (!pDpLibDevice) { + return 0; + } + + return pDpLibDevice->device->getEDIDSize(); +} + +NvBool nvDPGetEDID(const NVDpyEvoRec *pDpyEvo, void *buffer, unsigned int size) +{ + NVDPLibDevicePtr pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + + if (!pDpLibDevice) { + return FALSE; + } + + return pDpLibDevice->device->getEDID((char *)buffer, size); +} + +void nvDPGetDpyGUID(NVDpyEvoPtr pDpyEvo) +{ + NVDPLibDevicePtr pDpLibDevice; + const char *str; + + nvkms_memset(&pDpyEvo->dp.guid, 0, sizeof(pDpyEvo->dp.guid)); + + ct_assert(sizeof(pDpyEvo->dp.guid.buffer) == DPCD_GUID_SIZE); + + if (!nvDpyUsesDPLib(pDpyEvo)) { + return; + } + + pDpLibDevice = pDpyEvo->dp.pDpLibDevice; + if (!pDpLibDevice) { + return; + } + + pDpyEvo->dp.guid.valid = + nvkmsDisplayPort::nvDPGetDeviceGUID(pDpLibDevice->device, + pDpyEvo->dp.guid.buffer) == true; + if (!pDpyEvo->dp.guid.valid) { + return; + } + + str = nvkmsDisplayPort::nvDPGetDeviceGUIDStr(pDpLibDevice->device); + if (str != NULL) { + nvkms_strncpy(pDpyEvo->dp.guid.str, str, sizeof(pDpyEvo->dp.guid.str)); + } else { + pDpyEvo->dp.guid.valid = FALSE; + } +} + +// Perform a fake lostDevice during device teardown. This function is called by +// DpyFree before it deletes a pDpy. +void nvDPDpyFree(NVDpyEvoPtr pDpyEvo) +{ + if (!nvDpyUsesDPLib(pDpyEvo)) { + return; + } + + if (!pDpyEvo->dp.pDpLibDevice) { + return; + } + + DisplayPort::Device *device = pDpyEvo->dp.pDpLibDevice->device; + + pDpyEvo->pConnectorEvo->pDpLibConnector->evtSink->lostDevice(device); +} diff --git a/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp b/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp new file mode 100644 index 000000000..70dda08fc --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-evo-interface.cpp @@ -0,0 +1,149 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// This file implements the EVO RM interface used by the DisplayPort library. + +#include "nvkms-utils.h" + +#include "nvdp-evo-interface.hpp" + +#include "nvkms-rmapi.h" + +namespace nvkmsDisplayPort { + +EvoInterface::EvoInterface(NVConnectorEvoPtr pConnectorEvo) + : pConnectorEvo(pConnectorEvo) +{ +} + +NvU32 EvoInterface::rmControl0073(NvU32 command, void * params, + NvU32 paramSize) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + return nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + command, + params, + paramSize); +} + +NvU32 EvoInterface::rmControl5070(NvU32 command, void * params, + NvU32 paramSize) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + return nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + command, + params, + paramSize); +} + +/*! + * Look up the value of a particular key in the DisplayPort-specific registry + * corresponding to this connector. These values are provided at device + * allocation time, copied from the client request during nvAllocDevEvo(). + * + * \param[in] key The name of the key to look up. + * + * \return The unsigned 32-bit value set for the key, or 0 if the key is + * not set. + */ +NvU32 EvoInterface::getRegkeyValue(const char *key) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NvU32 val; + NvBool found = nvGetRegkeyValue(pDevEvo, key, &val); + + if (found) { + return val; + } else { + return 0; + } +} + +bool EvoInterface::isInbandStereoSignalingSupported() +{ + + return FALSE; +} + +NvU32 EvoInterface::getSubdeviceIndex() +{ + return pConnectorEvo->pDispEvo->displayOwner; +} + +NvU32 EvoInterface::getDisplayId() +{ + return nvDpyIdToNvU32(pConnectorEvo->displayId); +} + +NvU32 EvoInterface::getSorIndex() +{ + return nvEvoConnectorGetPrimaryOr(pConnectorEvo); +} + +NvU32 EvoInterface::getLinkIndex() +{ + switch (pConnectorEvo->or.protocol) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + return 0; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + return 1; + } + + nvAssert(!"Unrecognized DP protocol"); + return -1; +} + +NvU32 EvoInterface::monitorDenylistInfo( + NvU32 manufId, NvU32 productId, + DisplayPort::DpMonitorDenylistData *pDenylistData) +{ + // + // WAR for Toshiba/Dell internal(eDP) panel Sharp , overriding + // optimal link configuration to HBR2. + // + // HBR2 is required to drive 4K resolution, which is supported on DP1.2 + // onward specifications. Panel advertises itself as DP1.2 capable, but + // does not have ESI address space, this is violation the specification + // and hence inside DP library we downgrade the DPCD revision to 1.1. + // With this downgrade in DPCD version, link rate also gets downgraded + // to HBR. + // + if (manufId == 0x104d && + (productId == 0x1414 || productId == 0x1430)) { + + NvU32 warFlags = DisplayPort::DP_MONITOR_CAPABILITY_DP_OVERRIDE_OPTIMAL_LINK_CONFIG; + + pDenylistData->dpOverrideOptimalLinkConfig.linkRate = 0x14; // HBR2 + pDenylistData->dpOverrideOptimalLinkConfig.laneCount = laneCount_4; // 4 lanes + + return warFlags; + } + + return 0; +} + +}; // namespace nvkmsDisplayPort diff --git a/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp b/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp new file mode 100644 index 000000000..114aaa466 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-evo-interface.hpp @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVDP_EVO_INTERFACE_HPP__ +#define __NVDP_EVO_INTERFACE_HPP__ + +#include +#include +#include + +namespace nvkmsDisplayPort +{ + +class EvoInterface : public DisplayPort::Object, + public DisplayPort::EvoInterface +{ +public: + const NVConnectorEvoPtr pConnectorEvo; + + EvoInterface(NVConnectorEvoPtr pConnectorEvo); + + // Functions inherited from DisplayPort::EvoInterface + virtual NvU32 rmControl0073(NvU32 command, void * params, NvU32 paramSize); + virtual NvU32 rmControl5070(NvU32 command, void * params, NvU32 paramSize); + + virtual void disconnectHead(unsigned head) { + nvAssert(!"disconnectHead should never be called"); + } + virtual void reattachHead(unsigned head) { + nvAssert(!"reattachHead should never be called"); + } + + virtual NvU32 getSubdeviceIndex(); + virtual NvU32 getDisplayId(); + virtual NvU32 getSorIndex(); + virtual NvU32 getLinkIndex(); + virtual NvU32 getRegkeyValue(const char *key); + virtual bool isInbandStereoSignalingSupported(); + + virtual NvU32 monitorDenylistInfo( + NvU32 manufId, + NvU32 productId, + DisplayPort::DpMonitorDenylistData *pDenylistData); +}; + +}; // namespace nvkmsDisplayPort + +#endif // __NVDP_EVO_INTERFACE_HPP__ diff --git a/src/nvidia-modeset/src/dp/nvdp-host.cpp b/src/nvidia-modeset/src/dp/nvdp-host.cpp new file mode 100644 index 000000000..371483f0b --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-host.cpp @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* DisplayPort management routines */ + +#include + +#include "nvkms-utils.h" + +#include "dp_hostimp.h" + +void *dpMalloc(NvLength sz) +{ + return nvAlloc(sz); +} + +void dpFree(void *p) +{ + nvFree(p); +} + +void dpPrint(const char *format, ...) +{ + va_list ap; + va_start(ap, format); + nvVEvoLog(EVO_LOG_INFO, NV_INVALID_GPU_LOG_INDEX, format, ap); + va_end(ap); +} + +void dpDebugBreakpoint(void) +{ + nvAssert(!"DisplayPort library debug breakpoint"); +} + +#if NV_DP_ASSERT_ENABLED +void dpAssert(const char *expression, const char *file, + const char *function, int line) +{ + nvDebugAssert(expression, file, function, line); +} +#endif + +void dpTraceEvent(NV_DP_TRACING_EVENT event, + NV_DP_TRACING_PRIORITY priority, NvU32 numArgs, ...) +{ + // To support DPlib tracing, implement this function. +} + diff --git a/src/nvidia-modeset/src/dp/nvdp-timer.cpp b/src/nvidia-modeset/src/dp/nvdp-timer.cpp new file mode 100644 index 000000000..bc4d2e29d --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-timer.cpp @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// This file implements the timer callback mechanism for the DisplayPort +// library. + +#include "nvkms-types.h" + +#include "dp/nvdp-timer.h" +#include "nvdp-timer.hpp" + +namespace nvkmsDisplayPort { + Timer::Callback::Callback(DisplayPort::List *pList, + NVDevEvoPtr pDevEvo, + DisplayPort::RawTimer::Callback *dpCallback, + int ms) + : dpCallback(dpCallback), + ref_ptr(pDevEvo->ref_ptr), + handle(nvkms_alloc_timer(onTimerFired, this, 0, ms * 1000)), + expireTimeUs(nvkms_get_usec() + ms * 1000) + { + if (!allocFailed()) { + pList->insertFront(this); + nvkms_inc_ref(ref_ptr); + } + } + + Timer::Callback::~Callback() + { + nvkms_free_timer(handle); + } + + bool Timer::Callback::allocFailed() const + { + return handle == NULL; + } + + bool Timer::Callback::isExpired(NvU64 timeNowUs) const + { + return timeNowUs >= expireTimeUs; + } + + void Timer::Callback::onTimerFired(void *data, NvU32 dataU32) + { + Timer::Callback *cb = static_cast(data); + cb->onTimerFired(); + } + + void Timer::Callback::onTimerFired() + { + if (nvkms_dec_ref(ref_ptr)) { + dpCallback->expired(); + } + delete this; + } + + void Timer::Callback::fireIfExpired(NvU64 timeNowUs) + { + if (isExpired(timeNowUs)) { + onTimerFired(); + } + } + + Timer::Timer(NVDevEvoPtr pDevEvo) + : pDevEvo(pDevEvo) + { + } + + void Timer::queueCallback(DisplayPort::RawTimer::Callback *dpCallback, int ms) + { + Callback *cb = new Callback(&timerList, pDevEvo, dpCallback, ms); + nvAssert(cb && !cb->allocFailed()); + if (!cb || cb->allocFailed()) { + delete cb; + return; + } + } + + NvU64 Timer::getTimeUs() + { + return nvkms_get_usec(); + } + + void Timer::sleep(int ms) + { + nvkms_usleep(ms * 1000); + } + + void Timer::fireExpiredTimers() + { + const NvU64 timeNowUs = getTimeUs(); + DisplayPort::ListElement *pElem = timerList.begin(); + DisplayPort::ListElement *pNext; + + while (pElem != timerList.end()) { + Callback *cb = static_cast(pElem); + pNext = pElem->next; + + cb->fireIfExpired(timeNowUs); + + pElem = pNext; + } + } + +}; // namespace nvkmsDisplayPort + +NvBool nvDPTimersPending(void) +{ + return FALSE; +} + +NVDPLibTimerPtr nvDPAllocTimer(NVDevEvoPtr pDevEvo) +{ + NVDPLibTimerPtr pTimer = new _nv_dplibtimer(pDevEvo); + return pTimer; +} + +void nvDPFreeTimer(NVDPLibTimerPtr pTimer) +{ + delete pTimer; +} + +void nvDPFireExpiredTimers(NVDevEvoPtr pDevEvo) +{ + pDevEvo->dpTimer->rawTimer.fireExpiredTimers(); +} diff --git a/src/nvidia-modeset/src/dp/nvdp-timer.hpp b/src/nvidia-modeset/src/dp/nvdp-timer.hpp new file mode 100644 index 000000000..125739e53 --- /dev/null +++ b/src/nvidia-modeset/src/dp/nvdp-timer.hpp @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVDP_TIMER_HPP__ +#define __NVDP_TIMER_HPP__ + +#include +#include +#include + +namespace nvkmsDisplayPort +{ + +class Timer : public DisplayPort::RawTimer +{ + NVDevEvoPtr pDevEvo; + DisplayPort::List timerList; + + class Callback : public DisplayPort::ListElement { + DisplayPort::RawTimer::Callback *dpCallback; + // ref_ptr to the pDevEvo + nvkms_ref_ptr *ref_ptr; + nvkms_timer_handle_t *handle; + NvU64 expireTimeUs; + + static void onTimerFired(void *data, NvU32 dataU32); + void onTimerFired(); + + public: + // Construct an NVKMS timer callback. Since exceptions cannot be used + // in NVKMS code, callers must call Callback::allocFailed() to query + // whether the constructor succeeded. + // + // Scheduling a callback bumps the refcount on the corresponding + // pDevEvo, so that a device isn't freed until all pending callbacks + // have fired. + Callback(DisplayPort::List *pList, + NVDevEvoPtr pDevEvo, + DisplayPort::RawTimer::Callback *dpCallback, + int ms); + ~Callback(); + + // Returns TRUE if the constructor failed. + bool allocFailed() const; + // Returns TRUE if the timer is ready to fire. + bool isExpired(NvU64 timeNowUs) const; + // Fire the timer if it's ready. + // NOTE: If the timer fires, this deletes it. + void fireIfExpired(NvU64 timeNowUs); + }; +public: + Timer(NVDevEvoPtr pDevEvo); + + virtual void queueCallback(DisplayPort::RawTimer::Callback *cb, int ms); + virtual NvU64 getTimeUs(); + virtual void sleep(int ms); + + void fireExpiredTimers(); +}; + +}; // namespace nvkmsDisplayPort + +struct _nv_dplibtimer : public DisplayPort::Object { + nvkmsDisplayPort::Timer rawTimer; + DisplayPort::Timer timer; + + _nv_dplibtimer(NVDevEvoPtr pDevEvo) + : rawTimer(pDevEvo), timer(&rawTimer) + { + } +}; + +#endif // __NVDP_TIMER_HPP__ diff --git a/src/nvidia-modeset/src/g_nvkms-evo-states.c b/src/nvidia-modeset/src/g_nvkms-evo-states.c new file mode 100644 index 000000000..e8ae3db22 --- /dev/null +++ b/src/nvidia-modeset/src/g_nvkms-evo-states.c @@ -0,0 +1,2818 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-evo-states.h" + +static NvBool EvoLockStateFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockClientManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateLockHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateNoLock(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondary(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimary(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServer(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondary(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryLockHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClient(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClientManyHeads(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); +static NvBool EvoLockStateVrr(NVDispEvoPtr, NVEvoSubDevPtr, NVEvoLockAction, const NvU32 *pHeads); + +static NvBool EvoLockStateFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockClientManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_SLI_PRIMARY: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } + return TRUE; + + case NV_EVO_ADD_SLI_LAST_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeads; + } + return TRUE; + + case NV_EVO_UNLOCK_HEADS: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } + return TRUE; + + case NV_EVO_ADD_SLI_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateLockHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateNoLock( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_SLI_PRIMARY: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + case NV_EVO_ADD_SLI_LAST_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondary; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateFrameLockServer; + } + return TRUE; + + case NV_EVO_LOCK_HEADS: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + case NV_EVO_ENABLE_VRR: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateVrr; + } + return TRUE; + + case NV_EVO_ADD_SLI_SECONDARY: + if (!queryOnly) { + nvEvoLockHWStateSliSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondary( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliLastSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliLastSecondaryLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliLastSecondaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimary( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimary; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServer( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSync; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeads; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockClientPlusRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeads; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliPrimaryLockHeadsFrameLockServerPlusRef( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerManyHeadsPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServerHouseSyncPlusRef; + } + return TRUE; + + case NV_EVO_ADD_FRAME_LOCK_REF: + if (!queryOnly) { + pEvoSubDev->frameLockSliProxyClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_SERVER: + if (!queryOnly) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockRef; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_REF: + if (!queryOnly) { + if (!pEvoSubDev->frameLockSliProxyClients) { + nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliPrimaryLockHeadsFrameLockServer; + } else { + pEvoSubDev->frameLockSliProxyClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondary( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondary(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondary; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryLockHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!nvEvoRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeadsFrameLockClient; + } + return TRUE; + + case NV_EVO_REM_SLI: + if (!queryOnly) { + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClient( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeadsFrameLockClientManyHeads; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!nvEvoUnRefFrameLockSli(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + if (!queryOnly) { + nvEvoLockHWStateSliSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeads; + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateSliSecondaryLockHeadsFrameLockClientManyHeads( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_ADD_FRAME_LOCK_CLIENT: + if (!queryOnly) { + pEvoSubDev->frameLockExtraClients++; + } + return TRUE; + + case NV_EVO_REM_FRAME_LOCK_CLIENT: + if (!queryOnly) { + if (!pEvoSubDev->frameLockExtraClients) { + nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateSliSecondaryLockHeadsFrameLockClient; + } else { + pEvoSubDev->frameLockExtraClients--; + } + } + return TRUE; + + default: + return FALSE; + } +} + +static NvBool EvoLockStateVrr( + NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action, + const NvU32 *pHeads +) +{ + NvBool queryOnly = pHeads == NULL; + + switch (action) { + + case NV_EVO_DISABLE_VRR: + if (!queryOnly) { + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + pEvoSubDev->scanLockState = EvoLockStateNoLock; + } + return TRUE; + + default: + return FALSE; + } +} + +void nvEvoStateStartNoLock( + NVEvoSubDevPtr pEvoSubDev +) +{ + pEvoSubDev->scanLockState = EvoLockStateNoLock; +} + diff --git a/src/nvidia-modeset/src/nvkms-3dvision.c b/src/nvidia-modeset/src/nvkms-3dvision.c new file mode 100644 index 000000000..d38a184b7 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-3dvision.c @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-3dvision.h" + +void nv3DVisionAuthenticationEvo(NVDispEvoRec *pDispEvo, const NvU32 head) +{ + return; +} + +void nvDpyCheck3DVisionCapsEvo(NVDpyEvoPtr pDpyEvo) +{ + return; +} + +NvBool +nvPatch3DVisionModeTimingsEvo(NVT_TIMING *pTiming, NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString) +{ + return FALSE; +} + +void nvDisable3DVisionAegis(const NVDpyEvoRec *pDpyEvo) +{ + return; +} + +void nvSendHwModeTimingsToAegisEvo(const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + return; +} + diff --git a/src/nvidia-modeset/src/nvkms-attributes.c b/src/nvidia-modeset/src/nvkms-attributes.c new file mode 100644 index 000000000..ce736adaa --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-attributes.c @@ -0,0 +1,1273 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-evo.h" +#include "nvkms-types.h" +#include "nvkms-attributes.h" +#include "nvkms-dpy.h" +#include "nvkms-framelock.h" +#include "nvkms-vrr.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvos.h" + +#include // NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_* + +/*! + * Set the current backlight brightness for the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose backlight brightness + * should be assigned. + * \param[in] brightness The backlight brightness value to program + * + * \return TRUE if backlight brightness is available for this pDpyEvo, + * otherwise FALSE. + */ +static NvBool DpySetBacklightBrightness(NVDpyEvoRec *pDpyEvo, NvS64 brightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (!pDpyEvo->hasBacklightBrightness) { + return FALSE; + } + + if (brightness > NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE) { + return FALSE; + } + + if (brightness < NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MIN_VALUE) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + params.brightness = brightness; + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + return (ret == NVOS_STATUS_SUCCESS); +} + +/*! + * Query the current backlight brightness for the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose backlight brightness + * should be queried. + * \param[out] pBrightness The backlight brightness value + * + * \return TRUE if backlight brightness is available for this pDpyEvo, + * otherwise FALSE. + */ +static NvBool DpyGetBacklightBrightness(const NVDpyEvoRec *pDpyEvo, + NvS64 *pBrightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + nvAssert(params.brightness <= NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE); + + *pBrightness = params.brightness; + + return TRUE; +} + +/*! + * Populate NvKmsAttributeValidValuesCommonReply for backlight brightness. + * + * \param[in] pDpyEvo The display device whose backlight brightness + * should be queried. + * \param[out] pValidValues The ValidValues structure to populate. + * + * \return TRUE if backlight brightness is available for this pDpy, + * otherwise FALSE. + */ +static NvBool DpyGetBacklightBrightnessValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!pDpyEvo->hasBacklightBrightness) { + return FALSE; + } + + pValidValues->type = NV_KMS_ATTRIBUTE_TYPE_RANGE; + + pValidValues->u.range.min = NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MIN_VALUE; + pValidValues->u.range.max = NV0073_CTRL_BACKLIGHT_BRIGHTNESS_MAX_VALUE; + + return TRUE; +} + +/*! + * Query RM for the current scanline of the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose scanline + * should be queried. + * \param[out] pScanLine The scanline value. + * + * \return TRUE if the scanline could be queried for this pDpyEvo, + * otherwise FALSE. + */ +static NvBool GetScanLine(const NVDpyEvoRec *pDpyEvo, NvS64 *pScanLine) +{ + NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + const NvU32 head = pDpyEvo->head; + + if (head == NV_INVALID_HEAD) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.head = head; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_SCANLINE, + ¶ms, sizeof(params)); + + if (ret == NVOS_STATUS_SUCCESS) { + *pScanLine = params.currentScanline; + return TRUE; + } + + return FALSE; +} + +/*! + * Retrieve the current head of the given pDpyEvo. + * + * \param[in] pDpyEvo The display device whose head + * should be queried. + * \param[out] pHead The head value. + * + * \return TRUE. If there is no valid head pHead will + * return NV_INVALID_HEAD + */ +static NvBool GetHead(const NVDpyEvoRec *pDpyEvo, NvS64 *pHead) +{ + *pHead = (NvS64)pDpyEvo->head; + return TRUE; +} + +static NvBool DitherConfigurationAllowed(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + return pDevEvo->hal->caps.supportedDitheringModes != 0; +} + +static void SetDitheringCommon(NVDpyEvoPtr pDpyEvo) +{ + NVEvoUpdateState updateState = { }; + + if (pDpyEvo->head == NV_INVALID_HEAD) { + return; + } + + nvSetDitheringEvo(pDpyEvo->pDispEvo, + pDpyEvo->head, + pDpyEvo->requestedDithering.state, + pDpyEvo->requestedDithering.depth, + pDpyEvo->requestedDithering.mode, + &updateState); + + nvEvoUpdateAndKickOff(pDpyEvo->pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); +} + +/*! + * Assigns dithering on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDithering(NVDpyEvoRec *pDpyEvo, NvS64 dithering) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + switch (dithering) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED: + break; + default: + return FALSE; + } + + pDpyEvo->requestedDithering.state = dithering; + + SetDitheringCommon(pDpyEvo); + + return TRUE; +} + +static NvBool GetDithering(const NVDpyEvoRec *pDpyEvo, NvS64 *pDithering) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pDithering = pDpyEvo->requestedDithering.state; + + return TRUE; +} + +static NvBool GetDitheringGenericValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + return DitherConfigurationAllowed(pDpyEvo); +} + +/*! + * Assigns ditheringMode on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDitheringMode(NVDpyEvoRec *pDpyEvo, NvS64 ditheringMode) +{ + NVDevEvoPtr pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + NvU32 mask = (1 << ditheringMode); + + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + if (!(mask & pDevEvo->hal->caps.supportedDitheringModes)) { + return FALSE; + } + + switch (ditheringMode) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL: + break; + default: + return FALSE; + } + + pDpyEvo->requestedDithering.mode = ditheringMode; + + SetDitheringCommon(pDpyEvo); + + return TRUE; +} + +static NvBool GetDitheringMode(const NVDpyEvoRec *pDpyEvo, + NvS64 *pDitheringMode) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pDitheringMode = pDpyEvo->requestedDithering.mode; + + return TRUE; +} + +static NvBool GetDitheringModeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = + pDevEvo->hal->caps.supportedDitheringModes; + + return TRUE; +} + +/*! + * Assigns ditheringDepth on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDitheringDepth(NVDpyEvoRec *pDpyEvo, NvS64 ditheringDepth) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + switch (ditheringDepth) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS: + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS: + break; + default: + return FALSE; + } + + pDpyEvo->requestedDithering.depth = ditheringDepth; + + SetDitheringCommon(pDpyEvo); + + return TRUE; +} + +static NvBool GetDitheringDepth(const NVDpyEvoRec *pDpyEvo, + NvS64 *pDitheringDepth) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pDitheringDepth = pDpyEvo->requestedDithering.depth; + + return TRUE; +} + +static NvBool GetCurrentDithering(const NVDpyEvoRec *pDpyEvo, + NvS64 *pCurrentDithering) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pCurrentDithering = pDpyEvo->currentAttributes.dithering.enabled; + + return TRUE; +} + +static NvBool GetCurrentDitheringMode(const NVDpyEvoRec *pDpyEvo, + NvS64 *pCurrentDitheringMode) +{ + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pCurrentDitheringMode = + pDpyEvo->currentAttributes.dithering.mode; + + return TRUE; +} + +static NvBool GetCurrentDitheringDepth(const NVDpyEvoRec *pDpyEvo, + NvS64 *pCurrentDitheringDepth) +{ + + if (!DitherConfigurationAllowed(pDpyEvo)) { + return FALSE; + } + + *pCurrentDitheringDepth = + pDpyEvo->currentAttributes.dithering.depth; + + return TRUE; +} + +static NvBool DigitalVibranceAvailable(const NVDpyEvoRec *pDpyEvo) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyEvoIsActive(pDpyEvo)) { + return FALSE; + } + + if (!pDevEvo->hal->caps.supportsDigitalVibrance) { + return FALSE; + } + + return TRUE; +} + +/*! + * Assigns dvc on all dpys driven by pDpyEvo's head. + */ +static NvBool SetDigitalVibrance(NVDpyEvoRec *pDpyEvo, NvS64 dvc) +{ + NVEvoUpdateState updateState = { }; + + if (!DigitalVibranceAvailable(pDpyEvo)) { + return FALSE; + } + + dvc = NV_MAX(dvc, NV_EVO_DVC_MIN); + dvc = NV_MIN(dvc, NV_EVO_DVC_MAX); + + nvSetDVCEvo(pDpyEvo->pDispEvo, + pDpyEvo->head, dvc, &updateState); + + nvEvoUpdateAndKickOff(pDpyEvo->pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); + + return TRUE; +} + +static NvBool GetDigitalVibrance(const NVDpyEvoRec *pDpyEvo, NvS64 *pDvc) +{ + if (!DigitalVibranceAvailable(pDpyEvo)) { + return FALSE; + } + + *pDvc = pDpyEvo->currentAttributes.dvc; + + return TRUE; +} + +static NvBool GetDigitalVibranceValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DigitalVibranceAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = NV_EVO_DVC_MIN; + pValidValues->u.range.max = NV_EVO_DVC_MAX; + + return TRUE; +} + +static NvBool ImageSharpeningAvailable(const NVDpyEvoRec *pDpyEvo) +{ + if (!pDpyEvo->pDispEvo->pDevEvo->hal->caps.supportsImageSharpening) { + return FALSE; + } + + if (!nvDpyEvoIsActive(pDpyEvo)) { + return FALSE; + } + + return pDpyEvo->currentAttributes.imageSharpening.available; +} + +/*! + * Assigns imageSharpening on all dpys driven by pDpyEvo's head. + */ +static NvBool SetImageSharpening(NVDpyEvoRec *pDpyEvo, NvS64 imageSharpening) +{ + NVEvoUpdateState updateState = { }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + + if (!ImageSharpeningAvailable(pDpyEvo)) { + return FALSE; + } + + imageSharpening = NV_MAX(imageSharpening, NV_EVO_IMAGE_SHARPENING_MIN); + imageSharpening = NV_MIN(imageSharpening, NV_EVO_IMAGE_SHARPENING_MAX); + + nvSetImageSharpeningEvo(pDispEvo, + pDpyEvo->head, imageSharpening, &updateState); + + nvEvoUpdateAndKickOff(pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); + + return TRUE; +} + +static NvBool GetImageSharpening(const NVDpyEvoRec *pDpyEvo, + NvS64 *pImageSharpening) +{ + if (!ImageSharpeningAvailable(pDpyEvo)) { + return FALSE; + } + + *pImageSharpening = pDpyEvo->currentAttributes.imageSharpening.value; + + return TRUE; +} + +static NvBool GetImageSharpeningValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ImageSharpeningAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = NV_EVO_IMAGE_SHARPENING_MIN; + pValidValues->u.range.max = NV_EVO_IMAGE_SHARPENING_MAX; + + return TRUE; +} + +static NvBool GetImageSharpeningAvailable(const NVDpyEvoRec *pDpyEvo, + NvS64 *pImageSharpeningAvailable) +{ + *pImageSharpeningAvailable = ImageSharpeningAvailable(pDpyEvo); + + return TRUE; +} + +static NvBool GetImageSharpeningDefault(const NVDpyEvoRec *pDpyEvo, + NvS64 *pImageSharpeningDefault) +{ + if (!nvDpyEvoIsActive(pDpyEvo)) { + return FALSE; + } + + *pImageSharpeningDefault = NV_EVO_IMAGE_SHARPENING_DEFAULT; + + return TRUE; +} + +static NvBool ColorSpaceAndRangeAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return ((pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) && + (pDpyEvo->pConnectorEvo->signalFormat != + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI)); +} + +/*! + * Send infoFrame with new color{Space,Range}. + */ +static void DpyPostColorSpaceOrRangeSetEvo(NVDpyEvoPtr pDpyEvo) +{ + NVEvoUpdateState updateState = { }; + + if (pDpyEvo->head == NV_INVALID_HEAD) { + return; + } + + /* + * Recompute the current ColorSpace and ColorRange, given updated requested + * values, and program any changes in EVO hardware. + */ + nvSetColorSpaceAndRangeEvo( + pDpyEvo->pDispEvo, + pDpyEvo->head, + pDpyEvo->requestedColorSpace, + pDpyEvo->requestedColorRange, + &updateState); + + /* Update InfoFrames as needed. */ + nvUpdateInfoFrames(pDpyEvo->pDispEvo, pDpyEvo->head); + + // Kick off + nvEvoUpdateAndKickOff(pDpyEvo->pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); + + // XXX DisplayPort sets color format. +} + +static NvU32 DpyGetValidColorSpaces(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 val = (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB); + + if (pDpyEvo->pConnectorEvo->colorSpaceCaps.ycbcr422Capable && + pDpyEvo->colorSpaceCaps.ycbcr422Capable) { + val |= (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422); + } + + if (pDpyEvo->pConnectorEvo->colorSpaceCaps.ycbcr444Capable && + pDpyEvo->colorSpaceCaps.ycbcr444Capable) { + val |= (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444); + } + + return val; +} + +NvBool nvDpyValidateColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 value) +{ + NvU32 validMask = DpyGetValidColorSpaces(pDpyEvo); + + if (!ColorSpaceAndRangeAvailable(pDpyEvo) || !(validMask & (1 << value))) { + return FALSE; + } + + return TRUE; +} + +static NvBool SetRequestedColorSpace(NVDpyEvoRec *pDpyEvo, NvS64 value) +{ + if (!nvDpyValidateColorSpace(pDpyEvo, value)) { + return FALSE; + } + + pDpyEvo->requestedColorSpace = value; + + DpyPostColorSpaceOrRangeSetEvo(pDpyEvo); + + return TRUE; +} + +static NvBool GetCurrentColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->currentAttributes.colorSpace; + + return TRUE; +} + +static NvBool GetRequestedColorSpace(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->requestedColorSpace; + + return TRUE; +} + +static NvBool GetCurrentColorSpaceValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = DpyGetValidColorSpaces(pDpyEvo); + + /* + * The current color space may be YUV420 depending on the current mode. + * Rather than determine whether this pDpy is capable of driving any + * YUV420 modes, just assume this is always a valid current color space. + */ + pValidValues->u.bits.ints |= + (1 << NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420); + + return TRUE; +} + +static NvBool GetRequestedColorSpaceValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = DpyGetValidColorSpaces(pDpyEvo); + + return TRUE; +} + +static NvBool SetRequestedColorRange(NVDpyEvoRec *pDpyEvo, NvS64 value) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + pDpyEvo->requestedColorRange = value; + + DpyPostColorSpaceOrRangeSetEvo(pDpyEvo); + + return TRUE; +} + +static NvBool GetCurrentColorRange(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->currentAttributes.colorRange; + + return TRUE; +} + +static NvBool GetRequestedColorRange(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->requestedColorRange; + + return TRUE; +} + +static NvBool GetColorRangeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!ColorSpaceAndRangeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + /* + * The preferred color range may always select between full or limited + * range, but the actual resulting color range depends on the current + * color space. Both color ranges are always valid values for both + * preferred and current color range attributes. + */ + pValidValues->u.bits.ints = (1 << NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL) | + (1 << NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED); + + return TRUE; +} + +static NvBool DigitalSignalAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP; +} + +static NvBool GetDigitalSignal(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!DigitalSignalAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->currentAttributes.digitalSignal; + + return TRUE; +} + +static NvBool GetDigitalSignalValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DigitalSignalAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool DigitalLinkTypeAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return (nvDpyEvoIsActive(pDpyEvo) && + (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP)); +} + +static NvBool GetDigitalLinkType(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!DigitalLinkTypeAvailable(pDpyEvo)) { + return FALSE; + } + + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + *pValue = nvRMLaneCountToNvKms(pDpyEvo->dp.laneCount); + } else { + const NVHwModeTimingsEvo *pTimings = + nvGetCurrentModeTimingsForDpyEvo(pDpyEvo); + + if (pTimings == NULL) { + return FALSE; + } + + *pValue = nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings) ? + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_DUAL : + NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_SINGLE; + } + + return TRUE; +} + +static NvBool GetDigitalLinkTypeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DigitalLinkTypeAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool DisplayportLinkRateAvailable(const NVDpyEvoRec *pDpyEvo) +{ + return ((pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) && + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); +} + +static NvBool GetDisplayportLinkRate(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (!DisplayportLinkRateAvailable(pDpyEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->dp.linkRate; + + return TRUE; +} + +static NvBool GetDisplayportLinkRateValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!DisplayportLinkRateAvailable(pDpyEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool GetDisplayportConnectorType(const NVDpyEvoRec *pDpyEvo, + NvS64 *pValue) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->dp.connectorType; + + return TRUE; +} + +static NvBool GetDisplayportConnectorTypeValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTEGER); + + return TRUE; +} + +static NvBool GetDisplayportIsMultistream(const NVDpyEvoRec *pDpyEvo, + NvS64 *pValue) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + *pValue = nvDpyEvoIsDPMST(pDpyEvo); + + return TRUE; +} + +static NvBool GetDisplayportIsMultistreamValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_BOOLEAN); + + return TRUE; +} + +static NvBool GetDisplayportSinkIsAudioCapable(const NVDpyEvoRec *pDpyEvo, + NvS64 *pValue) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + *pValue = pDpyEvo->dp.sinkIsAudioCapable; + + return TRUE; +} + +static NvBool GetDisplayportSinkIsAudioCapableValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_BOOLEAN); + + return TRUE; +} + +NvS64 nvRMLaneCountToNvKms(NvU32 rmLaneCount) +{ + switch (rmLaneCount) { + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_0: + // fallthrough + default: + nvAssert(!"Unexpected DisplayPort lane configuration!"); + // fallthrough + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1: + return NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_SINGLE; + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_2: + return NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_DUAL; + case NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_4: + return NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE_QUAD; + } +} + +static NvBool SetStereoEvo(NVDpyEvoPtr pDpyEvo, NvS64 value) +{ + NvBool enable = !!value; + + if (pDpyEvo->head == NV_INVALID_HEAD) { + return FALSE; + } + + return nvSetStereoEvo(pDpyEvo->pDispEvo, pDpyEvo->head, enable); +} + +static NvBool GetStereoEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + if (pDpyEvo->head == NV_INVALID_HEAD) { + return FALSE; + } + + *pValue = !!nvGetStereoEvo(pDpyEvo->pDispEvo, pDpyEvo->head); + + return TRUE; +} + +static NvBool GetVrrMinRefreshRate(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + return FALSE; +} + +static NvBool GetVrrMinRefreshRateValidValues( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + return FALSE; +} + +static const struct { + NvBool (*set)(NVDpyEvoPtr pDpyEvo, NvS64 value); + NvBool (*get)(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue); + NvBool (*getValidValues)( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues); + enum NvKmsAttributeType type; +} DpyAttributesDispatchTable[] = { + [NV_KMS_DPY_ATTRIBUTE_BACKLIGHT_BRIGHTNESS] = { + .set = DpySetBacklightBrightness, + .get = DpyGetBacklightBrightness, + .getValidValues = DpyGetBacklightBrightnessValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_DPY_ATTRIBUTE_SCANLINE] = { + .set = NULL, + .get = GetScanLine, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_HEAD] = { + .set = NULL, + .get = GetHead, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING] = { + .set = SetDithering, + .get = GetDithering, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE] = { + .set = SetDitheringMode, + .get = GetDitheringMode, + .getValidValues = GetDitheringModeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH] = { + .set = SetDitheringDepth, + .get = GetDitheringDepth, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING] = { + .set = NULL, + .get = GetCurrentDithering, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE] = { + .set = NULL, + .get = GetCurrentDitheringMode, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH] = { + .set = NULL, + .get = GetCurrentDitheringDepth, + .getValidValues = GetDitheringGenericValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DIGITAL_VIBRANCE] = { + .set = SetDigitalVibrance, + .get = GetDigitalVibrance, + .getValidValues = GetDigitalVibranceValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING] = { + .set = SetImageSharpening, + .get = GetImageSharpening, + .getValidValues = GetImageSharpeningValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_AVAILABLE] = { + .set = NULL, + .get = GetImageSharpeningAvailable, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_DEFAULT] = { + .set = NULL, + .get = GetImageSharpeningDefault, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE] = { + .set = SetRequestedColorSpace, + .get = GetRequestedColorSpace, + .getValidValues = GetRequestedColorSpaceValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE] = { + .set = NULL, + .get = GetCurrentColorSpace, + .getValidValues = GetCurrentColorSpaceValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_RANGE] = { + .set = SetRequestedColorRange, + .get = GetRequestedColorRange, + .getValidValues = GetColorRangeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE] = { + .set = NULL, + .get = GetCurrentColorRange, + .getValidValues = GetColorRangeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL] = { + .set = NULL, + .get = GetDigitalSignal, + .getValidValues = GetDigitalSignalValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DIGITAL_LINK_TYPE] = { + .set = NULL, + .get = GetDigitalLinkType, + .getValidValues = GetDigitalLinkTypeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_LINK_RATE] = { + .set = NULL, + .get = GetDisplayportLinkRate, + .getValidValues = GetDisplayportLinkRateValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE] = { + .set = NULL, + .get = GetDisplayportConnectorType, + .getValidValues = GetDisplayportConnectorTypeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_IS_MULTISTREAM] = { + .set = NULL, + .get = GetDisplayportIsMultistream, + .getValidValues = GetDisplayportIsMultistreamValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_SINK_IS_AUDIO_CAPABLE] = { + .set = NULL, + .get = GetDisplayportSinkIsAudioCapable, + .getValidValues = GetDisplayportSinkIsAudioCapableValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG] = { + .set = nvSetFrameLockDisplayConfigEvo, + .get = nvGetFrameLockDisplayConfigEvo, + .getValidValues = nvGetFrameLockDisplayConfigValidValuesEvo, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_DPY_ATTRIBUTE_RASTER_LOCK] = { + .set = NULL, + .get = nvQueryRasterLockEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_UPDATE_FLIPLOCK] = { + .set = nvSetFlipLockEvo, + .get = nvGetFlipLockEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_UPDATE_STEREO] = { + .set = SetStereoEvo, + .get = GetStereoEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DPY_ATTRIBUTE_DPMS] = { + .set = nvRmSetDpmsEvo, + .get = NULL, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_DPY_ATTRIBUTE_VRR_MIN_REFRESH_RATE] = { + .set = NULL, + .get = GetVrrMinRefreshRate, + .getValidValues = GetVrrMinRefreshRateValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, +}; + +/*! + * Set pParams->attribute to pParams->value on the given dpy. + */ +NvBool nvSetDpyAttributeEvo(NVDpyEvoPtr pDpyEvo, + struct NvKmsSetDpyAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DpyAttributesDispatchTable)) { + return FALSE; + } + + if (DpyAttributesDispatchTable[index].set == NULL) { + return FALSE; + } + + if (!DpyAttributesDispatchTable[index].set(pDpyEvo, + pParams->request.value)) { + return FALSE; + } + + if (pDpyEvo->head != NV_INVALID_HEAD) { + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDpyEvoRec *pClonedDpyEvo; + + /* + * The current attributes state should be consistent across all cloned + * dpys. + * + * XXX[2Heads1OR] Optimize this loop in follow on code change when + * apiHead -> pDpyEvo mapping will get implemented. + */ + FOR_ALL_EVO_DPYS(pClonedDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pClonedDpyEvo->head != pDpyEvo->head) { + continue; + } + nvDpyUpdateCurrentAttributes(pClonedDpyEvo); + } + } else { + nvDpyUpdateCurrentAttributes(pDpyEvo); + } + + return TRUE; +} + +/*! + * Get the value of pParams->attribute on the given dpy. + */ +NvBool nvGetDpyAttributeEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DpyAttributesDispatchTable)) { + return FALSE; + } + + if (DpyAttributesDispatchTable[index].get == NULL) { + return FALSE; + } + + return DpyAttributesDispatchTable[index].get(pDpyEvo, + &pParams->reply.value); +} + +/*! + * Get the valid values of pParams->attribute on the given dpy. + */ +NvBool nvGetDpyAttributeValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsGetDpyAttributeValidValuesParams *pParams) +{ + NvU32 index = pParams->request.attribute; + struct NvKmsAttributeValidValuesCommonReply *pReply = + &pParams->reply.common; + + if (index >= ARRAY_LEN(DpyAttributesDispatchTable)) { + return FALSE; + } + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->readable = (DpyAttributesDispatchTable[index].get != NULL); + pReply->writable = (DpyAttributesDispatchTable[index].set != NULL); + + pReply->type = DpyAttributesDispatchTable[index].type; + + /* + * The getValidValues function provides three important things: + * - If type==Range, then assigns reply::u::range. + * - If type==IntBits, then assigns reply::u:bits::ints. + * - If the attribute is not currently available, returns FALSE. + * If the getValidValues function is NULL, assume the attribute is + * available. The type must not be something that requires assigning + * to reply::u. + */ + if (DpyAttributesDispatchTable[index].getValidValues == NULL) { + nvAssert(pReply->type != NV_KMS_ATTRIBUTE_TYPE_INTBITS); + nvAssert(pReply->type != NV_KMS_ATTRIBUTE_TYPE_RANGE); + return TRUE; + } + + return DpyAttributesDispatchTable[index].getValidValues(pDpyEvo, pReply); +} diff --git a/src/nvidia-modeset/src/nvkms-console-restore.c b/src/nvidia-modeset/src/nvkms-console-restore.c new file mode 100644 index 000000000..891fdbd61 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-console-restore.c @@ -0,0 +1,876 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-console-restore.h" +#include "nvkms-dpy.h" +#include "nvkms-flip.h" +#include "nvkms-modepool.h" +#include "nvkms-modeset.h" +#include "nvkms-prealloc.h" +#include "nvkms-private.h" +#include "nvkms-rm.h" +#include "nvkms-utils.h" + +#include "dp/nvdp-connector.h" + +/*! + * Find the first valid mode of given dimensions (width and height) that passes + * IMP at boot clocks. If input dimensions are not given then return first + * valid mode that passes IMP at boot clocks. + */ +static NvBool FindMode(NVDpyEvoPtr pDpyEvo, + const enum NvKmsSurfaceMemoryFormat format, + const NvU32 width, + const NvU32 height, + struct NvKmsMode *pModeOut) +{ + NvU32 index = 0; + + while (TRUE) { + struct NvKmsValidateModeIndexParams params = { }; + + params.request.dpyId = pDpyEvo->id; + params.request.modeIndex = index++; + params.request.modeValidation.overrides = NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS; + + nvValidateModeIndex(pDpyEvo, ¶ms.request, ¶ms.reply); + + if (params.reply.end) { + break; + } + + if (!params.reply.valid) { + continue; + } + + if (!(NVBIT64(format) & + params.reply.modeUsage.layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats)) { + continue; + } + + if (height != 0 && height != params.reply.mode.timings.vVisible) { + continue; + } + + if (width != 0 && width != params.reply.mode.timings.hVisible) { + continue; + } + + *pModeOut = params.reply.mode; + return TRUE; + } + + return FALSE; +} + +/*! + * Make sure pDispEvo->connectedDpys is up to date. + * + * Do this by querying the dpy dynamic data for all dpys. The results aren't + * actually important, but querying the dynamic data has the side effect of + * updating pDispEvo->connectedDpys. + */ +static NVDpyIdList UpdateConnectedDpys(NVDispEvoPtr pDispEvo) +{ + NVDpyEvoPtr pDpyEvo; + struct NvKmsQueryDpyDynamicDataParams *pParams = + nvCalloc(1, sizeof(*pParams)); + + if (!pParams) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_WARN, + "Failed to allocate NvKmsQueryDpyDynamicDataParams"); + return pDispEvo->connectedDisplays; + } + + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + nvkms_memset(pParams, 0, sizeof(*pParams)); + nvDpyGetDynamicData(pDpyEvo, pParams); + } + + nvFree(pParams); + + return pDispEvo->connectedDisplays; +} + +static void FlipBaseToNull(NVDevEvoPtr pDevEvo) +{ + struct NvKmsFlipParams *pParams = nvCalloc(1, sizeof(*pParams)); + struct NvKmsFlipRequest *pRequest; + NvU32 sd; + NVDispEvoPtr pDispEvo; + NvBool ret = TRUE; + + if (!pParams) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Failed to allocate flip parameters for console restore base flip " + "to NULL"); + return; + } + + pRequest = &pParams->request; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + struct NvKmsFlipRequestOneSubDevice *pRequestSd = + &pRequest->sd[sd]; + NvU32 head; + for (head = 0; head < pDevEvo->numHeads; head++) { + struct NvKmsFlipCommonParams *pRequestHead = + &pRequestSd->head[head]; + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + pRequestSd->requestedHeadsBitMask |= NVBIT(head); + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + pRequestHead->layer[layer].surface.specified = TRUE; + // No need to specify sizeIn/sizeOut as we are flipping NULL surface. + pRequestHead->layer[layer].compositionParams.specified = TRUE; + pRequestHead->layer[layer].completionNotifier.specified = TRUE; + pRequestHead->layer[layer].syncObjects.specified = TRUE; + } + + pRequest->commit = TRUE; + } + } + + // If no heads require changes, there's nothing to do. + if (pRequest->commit) { + ret = nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, pRequest, + &pParams->reply, FALSE /* skipUpdate */, + FALSE /* allowFlipLock */); + } + nvFree(pParams); + + if (!ret) { + nvAssert(!"Console restore failed to flip base to NULL"); + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < pDevEvo->numHeads; head++) { + NvBool stoppedBase; + ret = nvRMIdleBaseChannel(pDevEvo, head, sd, &stoppedBase); + if (!ret) { + nvAssert(!"Console restore failed to idle base"); + } + } + } +} + +static NvBool InitModeOneHeadRequest( + NVDpyEvoRec *pDpyEvo, + NVSurfaceEvoPtr pSurfaceEvo, + const struct NvKmsMode *pOverrideMode, + const struct NvKmsSize *pOverrideViewPortSizeIn, + const struct NvKmsPoint *pOverrideViewPortPointIn, + const NvU32 head, + struct NvKmsSetModeOneHeadRequest *pRequestHead) +{ + + struct NvKmsFlipCommonParams *pFlip = &pRequestHead->flip; + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 layer; + + if (pOverrideMode != NULL) { + pRequestHead->mode = *pOverrideMode; + } else { + if (!FindMode(pDpyEvo, + pSurfaceEvo->format, + 0 /* Ignore mode width */, + 0 /* Ignore mode height */, + &pRequestHead->mode)) { + return FALSE; + } + } + + pRequestHead->dpyIdList = nvAddDpyIdToEmptyDpyIdList(pDpyEvo->id); + pRequestHead->modeValidationParams.overrides = + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS; + if (pOverrideViewPortSizeIn != NULL) { + pRequestHead->viewPortSizeIn = *pOverrideViewPortSizeIn; + } else { + pRequestHead->viewPortSizeIn.width = pSurfaceEvo->widthInPixels; + pRequestHead->viewPortSizeIn.height = pSurfaceEvo->heightInPixels; + } + + pFlip->viewPortIn.specified = TRUE; + if (pOverrideViewPortPointIn != NULL) { + pFlip->viewPortIn.point = *pOverrideViewPortPointIn; + } + pFlip->layer[NVKMS_MAIN_LAYER].surface.handle[NVKMS_LEFT] = + pDevEvo->fbConsoleSurfaceHandle; + + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.specified = TRUE; + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val.width = pSurfaceEvo->widthInPixels; + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val.height = pSurfaceEvo->heightInPixels; + + pFlip->layer[NVKMS_MAIN_LAYER].sizeOut.specified = TRUE; + pFlip->layer[NVKMS_MAIN_LAYER].sizeOut.val = + pFlip->layer[NVKMS_MAIN_LAYER].sizeIn.val; + + /* Disable other layers except Main */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + + if (layer == NVKMS_MAIN_LAYER) { + pFlip->layer[layer].csc.matrix = NVKMS_IDENTITY_CSC_MATRIX; + pFlip->layer[layer].csc.specified = TRUE; + } + pFlip->layer[layer].surface.specified = TRUE; + + pFlip->layer[layer].completionNotifier.specified = TRUE; + pFlip->layer[layer].syncObjects.specified = TRUE; + pFlip->layer[layer].compositionParams.specified = TRUE; + } + + // Disable other features. + pFlip->cursor.imageSpecified = TRUE; + pRequestHead->lut.input.specified = TRUE; + pRequestHead->lut.output.specified = TRUE; + pRequestHead->lut.synchronous = TRUE; + pRequestHead->allowGsync = FALSE; + pRequestHead->allowAdaptiveSync = + NVKMS_ALLOW_ADAPTIVE_SYNC_DISABLED; + + return TRUE; +} + +static NvBool +ConstructModeOneHeadRequestForOneDpy(NVDpyEvoRec *pDpyEvo, + NVSurfaceEvoPtr pSurfaceEvo, + struct NvKmsSetModeParams *pParams, + const NvU32 dispIndex, + NvU32 *pAvailableHeadsMask) +{ + NvBool ret = FALSE; + const NvU32 possibleHeads = *pAvailableHeadsMask & + pDpyEvo->pConnectorEvo->validHeadMask; + + if (possibleHeads == 0 || pDpyEvo->isVrHmd) { + goto done; + } + + const NvU32 head = BIT_IDX_32(LOWESTBIT(possibleHeads)); + + NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + struct NvKmsSetModeRequest *pRequest = &pParams->request; + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[head]; + + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + if (!InitModeOneHeadRequest(pDpyEvo, + pSurfaceEvo, + NULL /* Use default Mode */, + NULL /* Use default ViewPortSizeIn */, + NULL /* Use default ViewPortPointIn */, + head, + pRequestHead)) { + goto done; + } + + nvAssert(!pRequestHead->viewPortOutSpecified); + nvAssert(!pRequest->commit); + + while (!nvSetDispModeEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pRequest, + &pParams->reply, + TRUE /* bypassComposition */, + FALSE /* doRasterLock */)) { + /* + * If validation is failing even after disabling scaling then leave + * this dpy inactive. + */ + if (pRequestHead->viewPortOutSpecified) { + nvkms_memset(pRequestHead, 0, sizeof(*pRequestHead)); + goto done; + } + + /* Disable scaling and try again */ + pRequestHead->viewPortOut = (struct NvKmsRect) { + .height = pRequestHead->viewPortSizeIn.height, + .width = pRequestHead->viewPortSizeIn.width, + .x = 0, + .y = 0, + }; + pRequestHead->viewPortOutSpecified = TRUE; + } + + *pAvailableHeadsMask &= ~NVBIT(head); + + ret = TRUE; + +done: + + return ret; +} + +typedef struct _TiledDisplayInfo { + NVDpyIdList detectedDpysList; + NvBool isDetectComplete; + NvBool isCapToScaleSingleTile; +} TiledDisplayInfo; + +/* + * Detect Tiled-display of topology-id described in given pDisplayIdInfo. + * + * Loop over given all dpys from candidateConnectedDpys list, look for matching + * topology-id. Add dpys of matching topology-id into + * detectedTiledDisplayDpysList list. Mark Tiled-Display detect complete if all + * exact number of tiles are found. + */ +static NvBool DetectTiledDisplay(const NVDispEvoRec *pDispEvo, + const NVT_DISPLAYID_INFO *pDisplayIdInfo, + const NVDpyIdList candidateConnectedDpys, + TiledDisplayInfo *pTiledDisplayInfo) +{ + const NVT_TILEDDISPLAY_TOPOLOGY_ID nullTileDisplayTopoId = { 0 }; + const NVDpyEvoRec *pDpyEvo; + const NvU32 numTiles = pDisplayIdInfo->tile_topology.row * + pDisplayIdInfo->tile_topology.col; + const NvU32 numTilesMask = NVBIT(numTiles) - 1; + NvU32 detectedTilesCount = 0; + NvU32 detectedTilesMask = 0; + + NVDpyIdList detectedTiledDisplayDpysList = nvEmptyDpyIdList(); + + /* + * If parsed edid is valid and tile_topology_id is non-zero then the dpy + * is considered a valid tile of a tiled display. + * + * The 'tile_topology_id' is a triplet of ids consisting of vendor_id, + * product_id, and serial_number. The DisplayId specification does not + * clearly define an invalid 'tile_topology_id', but here the + * tile_topology_id is considered invalid only if all three ids are zero + * which is consistent with other protocols like RandR1.2 'The tile group + * identifier'. + */ + if (!nvkms_memcmp(&pDisplayIdInfo->tile_topology_id, + &nullTileDisplayTopoId, sizeof(nullTileDisplayTopoId))) { + return FALSE; + } + + /* + * Reject Tiled-Display consists of multiple physical display enclosures or + * requires to configure bezel. + */ + if (!pDisplayIdInfo->tile_capability.bSingleEnclosure || + pDisplayIdInfo->tile_capability.bHasBezelInfo) { + return FALSE; + } + + /* + * Reject Tiled-Display which has number of horizontal or vertical tiles + * greater than 4. + */ + if (pDisplayIdInfo->tile_topology.row <= 0 || + pDisplayIdInfo->tile_topology.col <= 0 || + pDisplayIdInfo->tile_topology.row > 4 || + pDisplayIdInfo->tile_topology.col > 4) { + return FALSE; + } + + FOR_ALL_EVO_DPYS(pDpyEvo, candidateConnectedDpys, pDispEvo) { + const NVT_EDID_INFO *pEdidInfo = &pDpyEvo->parsedEdid.info; + const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo = + &pEdidInfo->ext_displayid; + + if (!pDpyEvo->parsedEdid.valid) { + continue; + } + + if (nvkms_memcmp(&pDisplayIdInfo->tile_topology_id, + &pDpyDisplayIdInfo->tile_topology_id, + sizeof(&pDpyDisplayIdInfo->tile_topology_id))) { + continue; + } + + /* + * Tiled-Display Topology: + * + * |-----------col + * + * ___ +------------+------------+... + * | | (x=0,y=0) | (x=1,y=0) | + * | | | | + * | | | | + * | +------------+------------+ + * row | (x=0,y=1) | (x=1,y=1) | + * | | | + * | | | + * +------------+------------+ + * . + * . + * . + */ + if (pDpyDisplayIdInfo->tile_topology.row != + pDisplayIdInfo->tile_topology.row) { + continue; + } + + if (pDpyDisplayIdInfo->tile_topology.col != + pDisplayIdInfo->tile_topology.col) { + continue; + } + + if (pDpyDisplayIdInfo->tile_location.x >= + pDpyDisplayIdInfo->tile_topology.col) { + continue; + } + + if (pDpyDisplayIdInfo->tile_location.y >= + pDpyDisplayIdInfo->tile_topology.row) { + continue; + } + + nvAssert(pDpyDisplayIdInfo->tile_capability.single_tile_behavior == + pDisplayIdInfo->tile_capability.single_tile_behavior); + + detectedTiledDisplayDpysList = + nvAddDpyIdToDpyIdList(pDpyEvo->id, detectedTiledDisplayDpysList); + + detectedTilesMask |= NVBIT((pDpyDisplayIdInfo->tile_location.y * + pDpyDisplayIdInfo->tile_topology.col) + + (pDpyDisplayIdInfo->tile_location.x)); + detectedTilesCount++; + } + + pTiledDisplayInfo->detectedDpysList = detectedTiledDisplayDpysList; + + if (detectedTilesCount != numTiles || detectedTilesMask != numTilesMask) { + pTiledDisplayInfo->isDetectComplete = FALSE; + } else { + pTiledDisplayInfo->isDetectComplete = TRUE; + } + + pTiledDisplayInfo->isCapToScaleSingleTile = + pDisplayIdInfo->tile_capability.single_tile_behavior == + NVT_SINGLE_TILE_BEHAVIOR_SCALE; + + return TRUE; +} + +/* Construct modeset request for given Tiled-display */ +static NvBool +ConstructModeRequestForTiledDisplay(const NVDispEvoRec *pDispEvo, + NVSurfaceEvoPtr pSurfaceEvo, + struct NvKmsSetModeParams *pParams, + const NvU32 dispIndex, + NVDpyIdList tiledDisplayDpysList, + NvU32 *pAvailableHeadsMask) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + /* + * Get arbitrary dpy from tiledDisplayDpysList, + * to extract Tiled-Display information which should be same across all + * tiles. + */ + NVDpyEvoRec *pArbitraryDpyEvo = + nvGetOneArbitraryDpyEvo(tiledDisplayDpysList, pDispEvo); + const NVT_DISPLAYID_INFO *pPrimaryDisplayIdInfo = + &pArbitraryDpyEvo->parsedEdid.info.ext_displayid; + const NvU32 numRows = pPrimaryDisplayIdInfo->tile_topology.row; + const NvU32 numColumns = pPrimaryDisplayIdInfo->tile_topology.col; + /* + * Split entire input viewport across all tiles of Tiled-Display. + */ + const struct NvKmsSize viewPortSizeIn = { + .width = (pSurfaceEvo->widthInPixels / numColumns), + .height = (pSurfaceEvo->heightInPixels / numRows), + }; + struct NvKmsSetModeRequest *pRequest = &pParams->request; + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + NvU32 firstClaimedHead = NV_INVALID_HEAD; + NvU32 claimedHeadMask = 0x0; + NVDpyEvoRec *pDpyEvo; + NvU32 head; + + /* + * Return failure if not enough number of heads available to construct + * modeset request for Tiled-Display. + */ + if (nvPopCount32(*pAvailableHeadsMask) < + nvCountDpyIdsInDpyIdList(tiledDisplayDpysList)) { + return FALSE; + } + + /* + * Return failure if input viewport has not been split across + * tiles evenly. + */ + if ((pSurfaceEvo->widthInPixels % numRows != 0) || + (pSurfaceEvo->heightInPixels % numColumns != 0)) { + return FALSE; + } + + FOR_ALL_EVO_DPYS(pDpyEvo, tiledDisplayDpysList, pDispEvo) { + const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo = + &pDpyEvo->parsedEdid.info.ext_displayid; + const struct NvKmsPoint viewPortPointIn = { + .x = pDpyDisplayIdInfo->tile_location.x * viewPortSizeIn.width, + .y = pDpyDisplayIdInfo->tile_location.y * viewPortSizeIn.height + }; + const NvU32 possibleHeads = *pAvailableHeadsMask & + pDpyEvo->pConnectorEvo->validHeadMask & + ~claimedHeadMask; + + if (possibleHeads == 0 || pDpyEvo->isVrHmd) { + goto failed; + } + + const NvU32 head = BIT_IDX_32(LOWESTBIT(possibleHeads)); + struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[head]; + struct NvKmsMode mode; + + if (firstClaimedHead == NV_INVALID_HEAD) { + /* + * Find mode of native dimensions reported in Tiled-Display + * information. + */ + if (!FindMode(pDpyEvo, + pSurfaceEvo->format, + pPrimaryDisplayIdInfo->native_resolution.width, + pPrimaryDisplayIdInfo->native_resolution.height, + &mode)) { + goto failed; + } + + firstClaimedHead = head; + } else { + /* All tiles should support same set of modes */ + mode = pRequestDisp->head[firstClaimedHead].mode; + } + + claimedHeadMask |= NVBIT(head); + + if (!InitModeOneHeadRequest(pDpyEvo, + pSurfaceEvo, + &mode, + &viewPortSizeIn, + &viewPortPointIn, + head, + pRequestHead)) { + goto failed; + } + } + + nvAssert(!pRequest->commit); + + if (!nvSetDispModeEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pRequest, + &pParams->reply, + TRUE /* bypassComposition */, + FALSE /* doRasterLock */)) { + goto failed; + } + *pAvailableHeadsMask &= ~claimedHeadMask; + + return TRUE; + +failed: + + for (head = 0; head < ARRAY_LEN(pRequestDisp->head); head++) { + if ((NVBIT(head) & claimedHeadMask) == 0x0) { + continue; + } + nvkms_memset(&pRequestDisp->head[head], + 0, + sizeof(pRequestDisp->head[head])); + } + + return FALSE; +} + +static NvBool isDpMSTModeActiveOnAnyConnector(NVDevEvoPtr pDevEvo) +{ + NvU32 i; + NVDispEvoPtr pDispEvo; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + + if ((pConnectorEvo != NULL) && + nvConnectorUsesDPLib(pConnectorEvo)) { + const enum NVDpLinkMode activeLinkMode = + nvDPGetActiveLinkMode(pConnectorEvo->pDpLibConnector); + + nvAssert(activeLinkMode != NV_DP_LINK_MODE_OFF); + + if (activeLinkMode == NV_DP_LINK_MODE_MST) { + return TRUE; + } + } + } + } + + return FALSE; +} + +/*! + * Attempt to restore the console. + * + * If a framebuffer console surface was successfully imported from RM, then use + * the core channel to set a mode that displays it. + * + * Enables as many heads as possible in a clone configuration. In first pass + * for connected boot dpys and in second pass for other remaining dpys: + * + * 1. Populate modeset request to enable given dpy. + * + * 2. Do modeset request validation, if fails then disable scaling. If + * modeset request validation fails even after disabling scaling then do not + * enable that dpy. + * + * If console restore succeeds, set pDevEvo->skipConsoleRestore to skip + * deallocating the core channel and triggering RM's console restore code. + */ +NvBool nvEvoRestoreConsole(NVDevEvoPtr pDevEvo, const NvBool allowMST) +{ + NvBool ret = FALSE; + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pDevEvo->pNvKmsOpenDev); + NVSurfaceEvoPtr pSurfaceEvo = + nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, + pDevEvo->fbConsoleSurfaceHandle); + struct NvKmsSetModeParams *params; + + /* + * If this function fails to restore a console then NVKMS frees + * and reallocates the core channel, to attempt the console + * restore using Resman. The core channel reallocation also may + * fail and nvEvoRestoreConsole() again may get called from + * nvFreeDevEvo() when client frees the NVKMS device. + * + * If nvEvoRestoreConsole() gets called after the core channel + * allocation/reallocation failure then do nothing and return + * early. + */ + if (pDevEvo->displayHandle == 0x0) { + goto done; + } + + /* + * If any DP-MST mode is active on any connector of this device but + * DP-MST is disallowed then force console-restore. + */ + if (pDevEvo->skipConsoleRestore && + !allowMST && isDpMSTModeActiveOnAnyConnector(pDevEvo)) { + pDevEvo->skipConsoleRestore = FALSE; + } + + if (pDevEvo->skipConsoleRestore) { + ret = TRUE; + goto done; + } + + if (!pSurfaceEvo) { + // No console surface to restore. + goto done; + } + + FlipBaseToNull(pDevEvo); + + params = nvPreallocGet(pDevEvo, PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE, + sizeof(*params)); + nvkms_memset(params, 0, sizeof(*params)); + + nvDPSetAllowMultiStreaming(pDevEvo, allowMST); + + // Construct the request. + // + // To start with, try to enable as many connected dpys as possible, + // preferring boot displays first. + struct NvKmsSetModeRequest *pRequest = ¶ms->request; + NvBool foundDpysConfigForConsoleRestore = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 availableHeadsMask = NVBIT(pDevEvo->numHeads) - 1; + NVDpyIdList connectedDpys = UpdateConnectedDpys(pDispEvo); + const NVDpyIdList connectedBootDpys = + nvIntersectDpyIdListAndDpyIdList(connectedDpys, + pDispEvo->bootDisplays); + struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + int pass; + + pRequest->requestedDispsBitMask |= NVBIT(dispIndex); + pRequestDisp->requestedHeadsBitMask = availableHeadsMask; + + // Only enable heads on the subdevice that actually contains the + // console. + if (dispIndex != pDevEvo->vtFbInfo.subDeviceInstance) { + continue; + } + + NVDpyIdList handledDpysList = nvEmptyDpyIdList(); + + for (pass = 0; pass < 2; pass++) { + NVDpyIdList candidateDpys; + NVDpyEvoPtr pDpyEvo; + + if (availableHeadsMask == 0) { + break; + } + + if (pass == 0) { + candidateDpys = connectedBootDpys; + } else { + candidateDpys = nvDpyIdListMinusDpyIdList(connectedDpys, + connectedBootDpys); + } + + FOR_ALL_EVO_DPYS(pDpyEvo, candidateDpys, pDispEvo) { + NvBool isTiledDisplayFound = FALSE; + TiledDisplayInfo tiledDisplayInfo = { 0 }; + NvBool isTiledDisplayEnable = FALSE; + const NVT_DISPLAYID_INFO *pDpyDisplayIdInfo = + pDpyEvo->parsedEdid.valid ? + &pDpyEvo->parsedEdid.info.ext_displayid : NULL; + NvBool done = FALSE; + + if (availableHeadsMask == 0) { + break; + } + + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, + handledDpysList)) { + continue; + } + + isTiledDisplayFound = + pDpyDisplayIdInfo != NULL && + DetectTiledDisplay(pDispEvo, + pDpyDisplayIdInfo, + nvDpyIdListMinusDpyIdList( + connectedDpys, handledDpysList), + &tiledDisplayInfo); + + /* + * Construct modeset request for Tiled-Display which don't have + * a capability to scale single tile input across entire + * display. If fails then fallback to construct modeset request + * for this single dpy. + */ + + if (isTiledDisplayFound && + tiledDisplayInfo.isDetectComplete && + !tiledDisplayInfo.isCapToScaleSingleTile) { + + done = ConstructModeRequestForTiledDisplay( + pDispEvo, + pSurfaceEvo, + params, + dispIndex, + tiledDisplayInfo.detectedDpysList, + &availableHeadsMask); + isTiledDisplayEnable = done; + } + + /* + * If Tiled-Display has capability to scale single tile input + * across entire display then for console restore it is + * sufficient to light up any single tile and ignore rest of + * remaining tiles. + */ + + if (!done || + !isTiledDisplayFound || + !tiledDisplayInfo.isDetectComplete || + tiledDisplayInfo.isCapToScaleSingleTile) { + + done = ConstructModeOneHeadRequestForOneDpy( + pDpyEvo, + pSurfaceEvo, + params, + dispIndex, + &availableHeadsMask); + isTiledDisplayEnable = + done && tiledDisplayInfo.isCapToScaleSingleTile; + } + + handledDpysList = + nvAddDpyIdToDpyIdList(pDpyEvo->id, handledDpysList); + + if (isTiledDisplayEnable) { + handledDpysList = nvAddDpyIdListToDpyIdList( + tiledDisplayInfo.detectedDpysList, + handledDpysList); + } + + foundDpysConfigForConsoleRestore = + foundDpysConfigForConsoleRestore || done; + + } + } + } + + /* + * Disable all (flip/raster) locks, dirty locking state in hardware + * left behind by NVKMS console restore causes XID errors and engine hang + * on next modeset because the NVKMS doesn't get back existing display + * hardware state at the time of initialization. + */ + + if (foundDpysConfigForConsoleRestore) { + pRequest->commit = TRUE; + + ret = nvSetDispModeEvo(pDevEvo, + pDevEvo->pNvKmsOpenDev, + pRequest, + ¶ms->reply, + TRUE /* bypassComposition */, + FALSE /* doRasterLock */); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE); + +done: + nvkms_free_timer(pDevEvo->consoleRestoreTimer); + pDevEvo->consoleRestoreTimer = NULL; + + /* If console restore failed then simply shut down all heads */ + if (!ret) { + nvShutDownHeads(pDevEvo, NULL /* pTestFunc, shut down all heads */); + } + + // If restoring the console from here succeeded, then skip triggering RM's + // console restore. + pDevEvo->skipConsoleRestore = ret; + return ret; +} diff --git a/src/nvidia-modeset/src/nvkms-cursor.c b/src/nvidia-modeset/src/nvkms-cursor.c new file mode 100644 index 000000000..7b23105f4 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-cursor.c @@ -0,0 +1,401 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* this source file contains routines for setting and moving the cursor. + * NV50 specific */ + + + +#include "nvkms-cursor.h" +#include "nvkms-types.h" +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvkms-rm.h" +#include "nvkms-evo.h" +#include "nvkms-vrr.h" +#include "nvkms-surface.h" +#include "nvkms-flip.h" + +#include "nvkms-rmapi.h" + +#include /* sizeof(GK104DispCursorControlPio) */ + +#include /* NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS */ + +/*! + * Get the NVSurfaceEvoPtrs described by NvKmsSetCursorImageCommonParams. + * + * Look up the surfaces described by NvKmsSetCursorImageCommonParams, + * and check that the surfaces are valid for use by cursor on the + * given pDevEvo. + * + * \param[in] pDevEvo The device on which the cursor image will be set. + * \param[in] pParams The parameter structure indicating the surfaces. + * \param[out] pSurfaceEvo The array of surfaces to be assigned. + * + * \return If the parameters are valid, return TRUE and assign + * pSurfaceEvo. Otherwise, return FALSE. + */ +NvBool nvGetCursorImageSurfaces( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsSetCursorImageCommonParams *pParams, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]) +{ + NvU32 eye; + + nvkms_memset(pSurfaceEvos, 0, sizeof(NVSurfaceEvoRec *) * NVKMS_MAX_EYES); + + /* XXX NVKMS TODO: add support for stereo cursor */ + nvAssert(pParams->surfaceHandle[NVKMS_RIGHT] == 0); + + for (eye = 0; eye < ARRAY_LEN(pParams->surfaceHandle); eye++) { + if (pParams->surfaceHandle[eye] != 0) { + NVSurfaceEvoPtr pSurfaceEvo = NULL; + pSurfaceEvo = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + pParams->surfaceHandle[eye], + NV_EVO_CHANNEL_MASK_CURSOR_ALL); + if ((pSurfaceEvo == NULL) || + (pSurfaceEvo->isoType != NVKMS_MEMORY_ISO)) { + return FALSE; + } + + pSurfaceEvos[eye] = pSurfaceEvo; + } + } + + return TRUE; +} + +static void +SetCursorImage(NVDispEvoPtr pDispEvo, + const NvU32 head, + NVSurfaceEvoRec *pSurfaceEvoNew, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoUpdateState updateState = { }; + const NvU32 sd = pDispEvo->displayOwner; + NvBool changed = FALSE; + + NVSurfaceEvoPtr pSurfaceEvoOld = + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo; + + if (pSurfaceEvoNew != NULL && + nvkms_memcmp(pCursorCompParams, + &pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams, + sizeof(*pCursorCompParams)) != 0) { + pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams = + *pCursorCompParams; + changed = TRUE; + } + + if (pSurfaceEvoNew != pSurfaceEvoOld) { + + if (pSurfaceEvoNew != NULL) { + nvEvoIncrementSurfaceRefCnts(pSurfaceEvoNew); + } + + if (pSurfaceEvoOld) { + nvEvoDecrementSurfaceRefCnts(pSurfaceEvoOld); + } + + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo = pSurfaceEvoNew; + changed = TRUE; + } + + if (changed) { + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetCursorImage( + pDevEvo, + head, + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo, + &updateState, + &pDevEvo->gpus[sd].headState[head].cursor.cursorCompParams); + nvEvoUpdateAndKickOff(pDispEvo, FALSE, &updateState, + TRUE /* releaseElv */); + nvPopEvoSubDevMask(pDevEvo); + } + + if (pSurfaceEvoNew) { + nvTriggerVrrUnstallSetCursorImage(pDispEvo, changed); + } +} + +static NvBool +FlipCursorImage(NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + NvU32 head, + const struct NvKmsSetCursorImageCommonParams *pImageParams) +{ + const NvU32 sd = pDispEvo->displayOwner; + NvBool ret; + struct NvKmsFlipParams *pFlipParams; + struct NvKmsFlipRequest *pFlipRequest; + + pFlipParams = nvCalloc(1, sizeof(*pFlipParams)); + if (pFlipParams == NULL) { + return FALSE; + } + + pFlipRequest = &pFlipParams->request; + + pFlipRequest->sd[sd].head[head] = (struct NvKmsFlipCommonParams) { + .cursor = { + .image = *pImageParams, + .imageSpecified = TRUE, + }, + }; + + pFlipRequest->sd[sd].requestedHeadsBitMask = NVBIT(head); + + pFlipRequest->commit = TRUE; + + ret = nvFlipEvo(pDispEvo->pDevEvo, + pOpenDevice, + pFlipRequest, + &pFlipParams->reply, + FALSE /* skipUpdate */, + FALSE /* allowFlipLock */); + + nvFree(pFlipParams); + + return ret; +} + +NvBool nvSetCursorImage( + NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvU32 head, + const struct NvKmsSetCursorImageCommonParams *pParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]; + NVSurfaceEvoPtr pSurfaceEvoNew; + NvBool flipCursorImage = FALSE; + + if (!nvGetCursorImageSurfaces(pDevEvo, pOpenDevSurfaceHandles, + pParams, pSurfaceEvos)) { + return FALSE; + } + + pSurfaceEvoNew = pSurfaceEvos[NVKMS_LEFT]; + + /* + * Use flip to apply or remove workaround for hardware bug 2052012 + */ + if (NV5070_CTRL_SYSTEM_GET_CAP( + pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_2052012_GLITCHY_MCLK_SWITCH)) { + const NvU32 sd = pDispEvo->displayOwner; + + NVSurfaceEvoPtr pSurfaceEvoOld = + pDevEvo->gpus[sd].headState[head].cursor.pSurfaceEvo; + + if ((pSurfaceEvoOld != pSurfaceEvoNew) && + (pSurfaceEvoOld == NULL || pSurfaceEvoNew == NULL)) { + flipCursorImage = TRUE; + } + } + + if (flipCursorImage) { + return FlipCursorImage(pDispEvo, + pOpenDevice, head, pParams); + } + + SetCursorImage(pDispEvo, + head, + pSurfaceEvoNew, + &pParams->cursorCompParams); + return TRUE; +} + +void nvEvoMoveCursorInternal(NVDispEvoPtr pDispEvo, + NvU32 head, NvS16 x, NvS16 y) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + + pDevEvo->cursorHal->MoveCursor(pDevEvo, sd, head, x, y); + + /* If the cursor is visible, trigger VRR unstall to display the + * cursor at the new postion */ + if (pEvoSubDev->headState[head].cursor.pSurfaceEvo) { + nvTriggerVrrUnstallMoveCursor(pDispEvo); + } +} + +void nvEvoMoveCursor(NVDispEvoPtr pDispEvo, NvU32 head, + const struct NvKmsMoveCursorCommonParams *pParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + + /* XXX NVKMS TODO: validate x,y against current viewport in? */ + + pDevEvo->gpus[sd].headState[head].cursor.x = pParams->x; + pDevEvo->gpus[sd].headState[head].cursor.y = pParams->y; + + nvEvoMoveCursorInternal(pDispEvo, + head, pParams->x, pParams->y); +} + +// Allocate and map cursor position PIO channels +NvBool nvAllocCursorEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS PioChannelAllocParams = { 0 }; + NVDispEvoPtr pDispEvo; + NvU32 sd; + + PioChannelAllocParams.channelInstance = head; + // No notifiers in cursor channel + PioChannelAllocParams.hObjectNotify = 0; + pDevEvo->cursorHandle[head] = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (nvRmApiAlloc( + nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pDevEvo->cursorHandle[head], + pDevEvo->cursorHal->klass, + &PioChannelAllocParams) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate CURSOR PIO for head %d", + head); + nvFreeCursorEvo(pDevEvo); + return FALSE; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + void *pPioDisplayChannel; + NvU32 status; + + status = nvRmApiMapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pDevEvo->cursorHandle[head], + 0, + sizeof(GK104DispCursorControlPio), + &pPioDisplayChannel, + 0); + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to map CURSOR PIO for head %d", + head); + nvFreeCursorEvo(pDevEvo); + return FALSE; + } + pEvoSubDev->cursorPio[head] = pPioDisplayChannel; + } + } + + return TRUE; +} + +// Free and unmap Cursor PIO Channels +void nvFreeCursorEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispEvoPtr pDispEvo; + NvU32 sd; + NvU32 status; + + if (pDevEvo->cursorHandle[head] == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 status; + + if (pEvoSubDev->cursorPio[head] == NULL) { + continue; + } + + status = nvRmApiUnmapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pDevEvo->cursorHandle[head], + pEvoSubDev->cursorPio[head], + 0); + + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to unmap cursor channel memory"); + } + pEvoSubDev->cursorPio[head] = NULL; + } + + status = nvRmApiFree( + nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pDevEvo->cursorHandle[head]); + + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to tear down Cursor channel"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->cursorHandle[head]); + + pDevEvo->cursorHandle[head] = 0; + } +} + +extern NVEvoCursorHAL nvEvoCursor91; +extern NVEvoCursorHAL nvEvoCursorC3; +extern NVEvoCursorHAL nvEvoCursorC5; +extern NVEvoCursorHAL nvEvoCursorC6; + +enum NvKmsAllocDeviceStatus nvInitDispHalCursorEvo(NVDevEvoPtr pDevEvo) +{ + static const NVEvoCursorHALPtr cursorTable[] = { + &nvEvoCursor91, + &nvEvoCursorC3, + &nvEvoCursorC5, + &nvEvoCursorC6, + }; + + int i; + + for (i = 0; i < ARRAY_LEN(cursorTable); i++) { + if (nvRmEvoClassListCheck(pDevEvo, cursorTable[i]->klass)) { + + pDevEvo->cursorHal = cursorTable[i]; + + return NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + } + } + + return NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; +} diff --git a/src/nvidia-modeset/src/nvkms-cursor2.c b/src/nvidia-modeset/src/nvkms-cursor2.c new file mode 100644 index 000000000..30ba3a830 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-cursor2.c @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include + +static void MoveCursor90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + NvS16 x, NvS16 y) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + GK104DispCursorControlPio *pEvoCursorControl = + pEvoSubDev->cursorPio[head]; + + pEvoCursorControl->SetCursorHotSpotPointsOut[NVKMS_LEFT] = + DRF_NUM(917A, _SET_CURSOR_HOT_SPOT_POINTS_OUT, _X, x) | + DRF_NUM(917A, _SET_CURSOR_HOT_SPOT_POINTS_OUT, _Y, y); + + pEvoCursorControl->Update = + DRF_DEF(917A, _UPDATE, _INTERLOCK_WITH_CORE, _DISABLE); +} + +NVEvoCursorHAL nvEvoCursor91 = { + NV917A_CURSOR_CHANNEL_PIO, /* klass */ + MoveCursor90, /* MoveCursor */ + NULL, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; diff --git a/src/nvidia-modeset/src/nvkms-cursor3.c b/src/nvidia-modeset/src/nvkms-cursor3.c new file mode 100644 index 000000000..354f13864 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-cursor3.c @@ -0,0 +1,114 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include +#include +#include + +static void WaitForFreeSpace(NVDevEvoPtr pDevEvo, + NVC37ADispCursorImmControlPio *pEvoCursorControl) +{ + /* + * Wait for Free to be non-zero, indicating there is space to push a method. + * The only case where Free is expected to be zero is when display + * frontend (FE) hardware is processing a previous method. + * .1s should be more than enough time to wait for that. + */ + NvU64 startTime = 0; + const NvU64 timeout = 100000; /* 0.1 seconds */ + + do { + if (pEvoCursorControl->Free != 0) { + return; + } + + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + break; + } + + nvkms_yield(); + + } while (TRUE); + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Timed out waiting for cursor PIO space"); +} + +static void MoveCursorC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + NvS16 x, NvS16 y) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVC37ADispCursorImmControlPio *pEvoCursorControl = + pEvoSubDev->cursorPio[head]; + + WaitForFreeSpace(pDevEvo, pEvoCursorControl); + pEvoCursorControl->SetCursorHotSpotPointOut[0] = + DRF_NUM(C37A, _SET_CURSOR_HOT_SPOT_POINT_OUT, _X, x) | + DRF_NUM(C37A, _SET_CURSOR_HOT_SPOT_POINT_OUT, _Y, y); + + WaitForFreeSpace(pDevEvo, pEvoCursorControl); + pEvoCursorControl->Update = + DRF_DEF(C37A, _UPDATE, _FLIP_LOCK_PIN, _LOCK_PIN_NONE); +} + +static void ReleaseElvC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVC37ADispCursorImmControlPio *pEvoCursorControl = + pEvoSubDev->cursorPio[head]; + + WaitForFreeSpace(pDevEvo, pEvoCursorControl); + pEvoCursorControl->Update = + DRF_DEF(C37A, _UPDATE, _FLIP_LOCK_PIN, _LOCK_PIN_NONE) | + DRF_DEF(C37A, _UPDATE, _RELEASE_ELV, _TRUE); +} + +NVEvoCursorHAL nvEvoCursorC3 = { + NVC37A_CURSOR_IMM_CHANNEL_PIO, /* klass */ + MoveCursorC3, /* MoveCursor */ + ReleaseElvC3, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; + +NVEvoCursorHAL nvEvoCursorC5 = { + NVC57A_CURSOR_IMM_CHANNEL_PIO, /* klass */ + MoveCursorC3, /* MoveCursor */ + ReleaseElvC3, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; + +NVEvoCursorHAL nvEvoCursorC6 = { + NVC67A_CURSOR_IMM_CHANNEL_PIO, /* klass */ + MoveCursorC3, /* MoveCursor */ + ReleaseElvC3, /* ReleaseElv */ + { /* caps */ + 256, /* maxSize */ + }, +}; diff --git a/src/nvidia-modeset/src/nvkms-dma.c b/src/nvidia-modeset/src/nvkms-dma.c new file mode 100644 index 000000000..c6a3c1f14 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-dma.c @@ -0,0 +1,484 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvkms-rmapi.h" +#include "class/cl917d.h" // NV917DDispControlDma, NV917D_DMA_* +#include // NV0080_CTRL_CMD_DMA_FLUSH +#include "nvos.h" + +#define NV_DMA_PUSHER_CHASE_PAD 5 +#define NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC 3000000 // 3 seconds + +static void EvoCoreKickoff(NVDmaBufferEvoPtr push_buffer, NvU32 putOffset); + +void nvDmaKickoffEvo(NVEvoChannelPtr pChannel) +{ + NVDmaBufferEvoPtr p = &pChannel->pb; + NvU32 putOffset = (NvU32)((char *)p->buffer - (char *)p->base); + + if (p->put_offset == putOffset) { + return; + } + + EvoCoreKickoff(p, putOffset); +} + +static void EvoCoreKickoff(NVDmaBufferEvoPtr push_buffer, NvU32 putOffset) +{ + NVEvoDmaPtr pDma = &push_buffer->dma; + int i; + + nvAssert(putOffset % 4 == 0); + nvAssert(putOffset <= push_buffer->offset_max); + + /* If needed, copy the chunk to be kicked off into each GPU's FB */ + if (pDma->isBar1Mapping) { + NVDevEvoPtr pDevEvo = push_buffer->pDevEvo; + int sd; + + NV0080_CTRL_DMA_FLUSH_PARAMS flushParams = { 0 }; + NvU32 ret; + + NvU32 *endAddress; + + if (putOffset < push_buffer->put_offset) { + /* If we've wrapped, copy to the end of the pushbuffer */ + nvAssert(putOffset == 0); + endAddress = push_buffer->base + push_buffer->offset_max / + sizeof(NvU32); + } else { + endAddress = push_buffer->buffer; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 startOffset = push_buffer->put_offset / sizeof(NvU32); + + NvU32 *src = push_buffer->base; + NvU32 *dst = pDma->subDeviceAddress[sd]; + + nvAssert(dst != NULL); + + src += startOffset; + dst += startOffset; + while (src < endAddress) { + *dst++ = *src++; + } + } + + /* + * Finally, tell RM to flush so that the data actually lands in FB + * before telling the GPU to fetch it. + */ + flushParams.targetUnit = DRF_DEF(0080_CTRL_DMA, _FLUSH_TARGET, + _UNIT_FB, _ENABLE); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_DMA_FLUSH, + &flushParams, sizeof(flushParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0080_CTRL_CMD_DMA_FLUSH failed"); + } + } + +#if NVCPU_IS_X86_64 + __asm__ __volatile__ ("sfence\n\t" : : : "memory"); +#elif NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("dsb sy\n\t" : : : "memory"); +#endif + + /* Kick off all push buffers */ + push_buffer->put_offset = putOffset; + for (i = 0; i < push_buffer->num_channels; i++) { + void *pControl = push_buffer->control[i]; + nvDmaStorePioMethod(pControl, NV917D_PUT, putOffset); + } +} + +/* Read GET from an EVO core channel */ +static NvU32 EvoCoreReadGet(NVDmaBufferEvoPtr push_buffer, int sd) +{ + void *pControl = push_buffer->control[sd]; + return nvDmaLoadPioMethod(pControl, NV917D_GET); +} + +/* Read GET for all devices and return the minimum or maximum*/ +static NvU32 EvoReadGetOffset(NVDmaBufferEvoPtr push_buffer, NvBool minimum) +{ + int i; + NvU32 get, bestGet = 0; + NvS32 distanceToPut, minmaxDistanceToPut = (minimum ? + 0 : + (push_buffer->dma.limit + 1)); + + if (push_buffer->num_channels <= 1) { + return EvoCoreReadGet(push_buffer, 0); + } + + for (i =0; i < push_buffer->num_channels; i++) { + get = EvoCoreReadGet(push_buffer, i); + + /* Compute distance to put, accounting for wraps */ + distanceToPut = push_buffer->put_offset - get; + if (distanceToPut < 0) + distanceToPut += push_buffer->dma.limit + 1; + + /* Accumulate the maximum distance to put and the corresponding get. */ + if ((minimum && (distanceToPut >= minmaxDistanceToPut)) || + (!minimum && (distanceToPut <= minmaxDistanceToPut))) { + minmaxDistanceToPut = distanceToPut; + bestGet = get; + } + } + return bestGet; +} + +void nvEvoMakeRoom(NVEvoChannelPtr pChannel, NvU32 count) +{ + NVDmaBufferEvoPtr push_buffer = &pChannel->pb; + NvU32 getOffset; + NvU32 putOffset; + NvU64 startTime = 0; + const NvU64 timeout = 5000000; /* 5 seconds */ + + putOffset = (NvU32) ((char *)push_buffer->buffer - + (char *)push_buffer->base); + + if (putOffset >= push_buffer->offset_max) { + *(push_buffer->buffer) = 0x20000000; + push_buffer->buffer = push_buffer->base; + nvDmaKickoffEvo(pChannel); + putOffset = 0; + } + + while (1) { + getOffset = EvoReadGetOffset(push_buffer, TRUE); + + if (putOffset >= getOffset) { + push_buffer->fifo_free_count = + (push_buffer->offset_max - putOffset) >> 2; + + if (push_buffer->fifo_free_count <= count) { + if (getOffset) { + *(push_buffer->buffer) = 0x20000000; + push_buffer->buffer = push_buffer->base; + nvDmaKickoffEvo(pChannel); + putOffset = 0; + } + else if (putOffset != push_buffer->put_offset) { + nvDmaKickoffEvo(pChannel); + // Put offset will have changed if a tail was inserted. + putOffset = push_buffer->put_offset; + } + } + } + else { + getOffset = (getOffset > push_buffer->offset_max) ? + push_buffer->offset_max : getOffset; + + if ((putOffset + (NV_DMA_PUSHER_CHASE_PAD * 4)) >= getOffset) + push_buffer->fifo_free_count = 0; + else + push_buffer->fifo_free_count = + ((getOffset - putOffset) >> 2) - 1; + } + if (push_buffer->fifo_free_count > count) { + break; + } + + /* + * If we have been waiting too long, print an error message. There + * isn't much we can do as currently structured, so just reset + * startTime. + */ + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + nvEvoLogDev(push_buffer->pDevEvo, EVO_LOG_ERROR, + "Error while waiting for GPU progress: " + "0x%08x:%d %d:%d:%d:%d", + pChannel->hwclass, pChannel->instance, + count, push_buffer->fifo_free_count, getOffset, putOffset); + startTime = 0; + } + + nvkms_yield(); + } +} + +static inline void EvoWriteNotifier(volatile NvU32 *pNotifier, NvU32 value) +{ + /* + * Note that we don't need to flush to vidmem here; any subsequent GPU + * write will always be triggered by kicking off pushbuffer methods, + * which will perform a general FB flush. This does assume that the + * pushbuffer and its associated notifier surfaces are either both in + * sysmem or both in vidmem, however. + */ + + *pNotifier = value; +} + +/* Write the EVO core notifier at the given offset to the given value. */ +void nvWriteEvoCoreNotifier( + const NVDispEvoRec *pDispEvo, + NvU32 offset, + NvU32 value) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NVEvoDmaPtr pSubChannel = &pDevEvo->core->notifiersDma[sd]; + volatile NvU32 *pNotifiers = pSubChannel->subDeviceAddress[sd]; + + EvoWriteNotifier(pNotifiers + offset, value); +} + +static NvBool EvoCheckNotifier(const NVDispEvoRec *pDispEvo, + NvU32 offset, NvU32 done_base_bit, + NvU32 done_extent_bit, NvU32 done_value, + NvBool wait) +{ + const NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoDmaPtr pSubChannel = &pDevEvo->core->notifiersDma[sd]; + NVDmaBufferEvoPtr p = &pDevEvo->core->pb; + volatile NvU32 *pNotifier; + NvU64 startTime = 0; + + pNotifier = pSubChannel->subDeviceAddress[sd]; + + nvAssert(pNotifier != NULL); + pNotifier += offset; + + // While the completion notifier is not set to done_true + do { + const NvU32 val = *pNotifier; + const NvU32 done_mask = DRF_SHIFTMASK(done_extent_bit:done_base_bit); + const NvU32 done_val = done_value << done_base_bit; + + if ((val & done_mask) == done_val) { + return TRUE; + } + + if (!wait) { + return FALSE; + } + + if (!nvIsEmulationEvo(pDevEvo) && + nvExceedsTimeoutUSec( + &startTime, + NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC) && + (p->put_offset == EvoCoreReadGet(p, sd))) + { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Lost display notification (%d:0x%08x); " + "continuing.", sd, val); + EvoWriteNotifier(pNotifier, done_value << done_base_bit); + return TRUE; + } + + nvkms_yield(); + } while (TRUE); +} + +/* + * Used by NV_EVO_WAIT_FOR_NOTIFIER() and NV_EVO_WAIT_FOR_CAPS_NOTIFIER() + */ +void nvEvoWaitForCoreNotifier(const NVDispEvoRec *pDispEvo, NvU32 offset, + NvU32 done_base_bit, NvU32 done_extent_bit, + NvU32 done_value) +{ + EvoCheckNotifier(pDispEvo, offset, + done_base_bit, done_extent_bit, done_value, TRUE); +} + +/* + * Used by the EVO HAL IsNotifierComplete functions. Returns TRUE if the + * notifier is complete. + */ +NvBool nvEvoIsCoreNotifierComplete(NVDispEvoPtr pDispEvo, NvU32 offset, + NvU32 done_base_bit, NvU32 done_extent_bit, + NvU32 done_value) +{ + return EvoCheckNotifier(pDispEvo, + offset, done_base_bit, done_extent_bit, + done_value, FALSE); +} + +void nvEvoSetSubdeviceMask(NVEvoChannelPtr pChannel, NvU32 mask) +{ + NVDmaBufferEvoPtr p = &pChannel->pb; + + nvAssert(!nvDmaSubDevMaskMatchesCurrent(pChannel, mask)); + + p->currentSubDevMask = mask; + + ASSERT_DRF_NUM(917D, _DMA, _SET_SUBDEVICE_MASK_VALUE, mask); + + if (p->fifo_free_count <= 1) { + nvEvoMakeRoom(pChannel, 1); + } + + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _DMA, _OPCODE, _SET_SUBDEVICE_MASK) | + DRF_NUM(917D, _DMA, _SET_SUBDEVICE_MASK_VALUE, mask)); + p->fifo_free_count -= 1; +} + +/*! + * Reads CRC values from the notifier. + * + * This function will attempt to read in the first 'entry_count' CRC notifier + * entries that HW generated. The actual number of entries that are read may + * be less. + * + * \param[in] pCRC32Notifier Pointer to the CRC notifier memory. + * \param[in] entry_stride Stride of a single CRC notifier entry + * \param[in] entry_count Expected count of notifier entries to read + * \param[in] status_offset Offset for Status flags header in CRC notifier + * \param[in] field_count Number of fields to read from each CRC notifier + * entry. + * \param[in] flag_count Number of flags to read from the Status Header + * \param[in out] field_info Specifies the offset/base/extent info for each field. + * Each 'field_info' contains an output array for + * storing 'entry_count' field values. + * \param[in] flag_info Specifies the base/extent info for each flag. + * Each 'flag_info' contains a 'flag_type' for + * addressing error cases related to the flags. + * + * \return Returns the MIN(count, entry_count) of successfully + * read entries. + */ +NvU32 nvEvoReadCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 entry_stride, + NvU32 entry_count, + NvU32 status_offset, + NvU32 field_count, + NvU32 flag_count, + const CRC32NotifierEntryRec *field_info, + const CRC32NotifierEntryFlags *flag_info) +{ + NvU32 count = 0; + NvU32 i, j, k; + + nvAssert(pCRC32Notifier != NULL); + + // Iterate over flags (unique at start of the CRC32Notifier Struct) + for (k = 0; k < flag_count; k++) { + CRC32NotifierEntryFlags info = flag_info[k]; + volatile NvU32 *pFlag = pCRC32Notifier + status_offset; + NvU32 flag_mask = + DRF_SHIFTMASK((info.flag_extent_bit):(info.flag_base_bit)); + NvU32 flag = (*pFlag & flag_mask) >> info.flag_base_bit; + + switch (info.flag_type) + { + case NVEvoCrc32NotifierFlagCount: + count = flag; + // entry_count is max of each field_frame_values[i] array + if (count > entry_count) { + nvEvoLog(EVO_LOG_WARN, "Too many CRC32 generated entries " + "(%d expected; %d found)", entry_count, count); + count = entry_count; + } + break; + + case NVEvoCrc32NotifierFlagCrcOverflow: + if (flag) { + count = 0; + nvEvoLog(EVO_LOG_ERROR, "CRC Overflow occured, " + "CRC value unable to be processed fast enough.\n" + "Failing flag index in status_info array: %d", + k); + + return count; + } + break; + } + } + + // Iterate over each collection of fields, for count pairs of values + for (i = 0; i < count; i++) { + for (j = 0; j < field_count; j++) { + CRC32NotifierEntryRec info = field_info[j]; + volatile NvU32 *pEntry = pCRC32Notifier + info.field_offset; + NvU32 field_mask = + DRF_SHIFTMASK((info.field_extent_bit):(info.field_base_bit)); + + info.field_frame_values[i].value = + (*pEntry & field_mask) >> info.field_base_bit; + info.field_frame_values[i].supported = TRUE; + } + pCRC32Notifier += entry_stride; + } + + return count; +} + +void nvEvoResetCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 reset_base_bit, + NvU32 reset_value) +{ + const NvU32 reset_val = reset_value << reset_base_bit; + + nvAssert(pCRC32Notifier != NULL); + pCRC32Notifier += offset; + + EvoWriteNotifier(pCRC32Notifier, reset_val); +} + +NvBool nvEvoWaitForCRC32Notifier(volatile NvU32 *pCRC32Notifier, + NvU32 offset, + NvU32 done_base_bit, + NvU32 done_extent_bit, + NvU32 done_value) +{ + const NvU32 done_mask = DRF_SHIFTMASK(done_extent_bit:done_base_bit); + const NvU32 done_val = done_value << done_base_bit; + NvU64 startTime = 0; + + nvAssert(pCRC32Notifier != NULL); + pCRC32Notifier += offset; + + do { + const NvU32 status = *pCRC32Notifier; + + if ((status & done_mask) == done_val) { + return TRUE; + } + + if (nvExceedsTimeoutUSec( + &startTime, + NV_EVO_NOTIFIER_SHORT_TIMEOUT_USEC)) { + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return FALSE; +} diff --git a/src/nvidia-modeset/src/nvkms-dpy.c b/src/nvidia-modeset/src/nvkms-dpy.c new file mode 100644 index 000000000..9c0c2686f --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-dpy.c @@ -0,0 +1,2822 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "dp/nvdp-device.h" +#include "dp/nvdp-connector-event-sink.h" + +#include "nvkms-evo.h" +#include "nvkms-dpy.h" +#include "nvkms-hdmi.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-types.h" +#include "nvkms-attributes.h" +#include "nvkms-utils.h" +#include "nvkms-3dvision.h" + +#include "nv_mode_timings_utils.h" + +#include "nvkms-api.h" +#include "nvkms-private.h" + +#include "nvos.h" +#include "timing/dpsdp.h" + +#include // NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_* + +#define TMDS_SINGLE_LINK_PCLK_MAX 165000 +#define TMDS_DUAL_LINK_PCLK_MAX 330000 + +static void DpyGetDynamicDfpProperties( + NVDpyEvoPtr pDpyEvo, + const NvBool disableACPIBrightnessHotkeys); + +static NVEvoPassiveDpDongleType +DpyGetPassiveDpDongleType(const NVDpyEvoRec *pDpyEvo, + NvU32 *passiveDpDongleMaxPclkKHz); +static void +CreateParsedEdidFromNVT_TIMING(NVT_TIMING *pTimings, + NVParsedEdidEvoPtr pParsedEdid); + +static NvBool ReadEdidFromDP (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid); +static NvBool ReadEdidFromResman (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NvKmsEdidReadMode readMode); +static NvBool ValidateEdid (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NVEvoInfoStringPtr pInfoString, + const NvBool ignoreEdidChecksum); +static void LogEdid (NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString); +static void ClearEdid (NVDpyEvoPtr pDpyEvo); +static void ClearCustomEdid (const NVDpyEvoRec *pDpyEvo); +static void WriteEdidToResman (const NVDpyEvoRec *pDpyEvo, + const NVEdidRec *pEdid); +static void PatchAndParseEdid (const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NVParsedEdidEvoPtr, + NVEvoInfoStringPtr pInfoString); +static void ReadAndApplyEdidEvo (NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams); +static NvBool GetFixedModeTimings (NVDpyEvoPtr pDpyEvo); +static NvBool ReadDSITimingsFromResman (const NVDpyEvoRec *pDpyEvo, + NVT_TIMING *pTimings); +static void AssignDpyEvoName (NVDpyEvoPtr pDpyEvo); + +static NvBool IsConnectorTMDS (NVConnectorEvoPtr); + + +static void DpyDisconnectEvo(NVDpyEvoPtr pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + + pDispEvo->connectedDisplays = + nvDpyIdListMinusDpyId(pDispEvo->connectedDisplays, pDpyEvo->id); + + ClearEdid(pDpyEvo); +} + +static NvBool DpyConnectEvo( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + + pDispEvo->connectedDisplays = + nvAddDpyIdToDpyIdList(pDpyEvo->id, pDispEvo->connectedDisplays); + + DpyGetDynamicDfpProperties(pDpyEvo, pParams->request.disableACPIBrightnessHotkeys); + nvDPGetDpyGUID(pDpyEvo); + + if ((pDpyEvo->pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) || + nvConnectorIsDPSerializer(pDpyEvo->pConnectorEvo)) { + return GetFixedModeTimings(pDpyEvo); + } else { + ReadAndApplyEdidEvo(pDpyEvo, pParams); + } + + if (pDpyEvo->head != NV_INVALID_HEAD) { + nvUpdateInfoFrames(pDpyEvo->pDispEvo, pDpyEvo->head); + } + + return TRUE; +} + +/* + * DpyAssignColorSpaceCaps() - parse both the CEA-861 extension block and + * the EDID 1.4 block to determine YCbCr422/444 capability. + */ +static void DpyAssignColorSpaceCaps(NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString) +{ + NvBool ycbr422_cap = FALSE; + NvBool ycbr444_cap = FALSE; + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + + /* check for edid YCbCr422/YCbCr444 capability */ + if (pParsedEdid->valid) { + NvBool haveCEA861Block = + (pParsedEdid->info.ext861.revision != NVT_CEA861_REV_NONE); + if (haveCEA861Block) { + ycbr422_cap = !!(pParsedEdid->info.ext861.basic_caps & + NVT_CEA861_CAP_YCbCr_422); + ycbr444_cap = !!(pParsedEdid->info.ext861.basic_caps & + NVT_CEA861_CAP_YCbCr_444); + } + /* check EDID 1.4 base block */ + if (pParsedEdid->info.version == 0x104 && + pParsedEdid->info.input.isDigital) { + NvBool edid14_ycbr422 = + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_422; + NvBool edid14_ycbr444 = + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_444; + if (haveCEA861Block && ycbr422_cap != edid14_ycbr422) { + nvEvoLogInfoString(pInfoString, + "%s EDID inconsistency: the EDID 1.4 base block %s " + "YCbCr 4:2:2 support, but the CEA-861 extension block " + "%s. Assuming YCbCr 4:2:2 is supported.", + pDpyEvo->name, + edid14_ycbr422 ? "indicates" : "does not indicate", + ycbr422_cap ? "does" : "does not"); + } + if (edid14_ycbr422) { + ycbr422_cap = TRUE; + } + if (haveCEA861Block && ycbr444_cap != edid14_ycbr444) { + nvEvoLogInfoString(pInfoString, + "%s EDID inconsistency: the EDID 1.4 base block %s " + "YCbCr 4:4:4 support, but the CEA-861 extension block " + "%s. Assuming YCbCr 4:4:4 is supported.", + pDpyEvo->name, + edid14_ycbr444 ? "indicates" : "does not indicate", + ycbr444_cap ? "does" : "does not"); + } + if (edid14_ycbr444) { + ycbr444_cap = TRUE; + } + } + } + pDpyEvo->colorSpaceCaps.ycbcr422Capable = ycbr422_cap; + pDpyEvo->colorSpaceCaps.ycbcr444Capable = ycbr444_cap; +} + + + +static NvBool GetEdidOverride( + const struct NvKmsQueryDpyDynamicDataRequest *pRequest, + NVEdidRec *pEdid) +{ + if ((pRequest == NULL) || + !pRequest->overrideEdid || + (pRequest->edid.bufferSize == 0) || + (pRequest->edid.bufferSize > sizeof(pRequest->edid.buffer))) { + return FALSE; + } + + pEdid->buffer = nvAlloc(pRequest->edid.bufferSize); + + if (pEdid->buffer == NULL) { + return FALSE; + } + + nvkms_memcpy(pEdid->buffer, pRequest->edid.buffer, pRequest->edid.bufferSize); + + pEdid->length = pRequest->edid.bufferSize; + + return TRUE; +} + +/*! + * Query resman for the EDID for the pDpyEvo, then parse the EDID into usable + * data. Do not modify the pDpyEvoRec. + */ + +NvBool nvDpyReadAndParseEdidEvo( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsQueryDpyDynamicDataRequest *pRequest, + NvKmsEdidReadMode readMode, + NVEdidRec *pEdid, + NVParsedEdidEvoPtr *ppParsedEdid, + NVEvoInfoStringPtr pInfoString) +{ + NvBool ignoreEdid = FALSE; + NvBool ignoreEdidChecksum = FALSE; + + if (pRequest != NULL) { + ignoreEdid = pRequest->ignoreEdid; + ignoreEdidChecksum = pRequest->ignoreEdidChecksum; + } + + nvkms_memset(pEdid, 0, sizeof(*pEdid)); + + /* Just return an empty EDID if requested. */ + if (ignoreEdid) { + return TRUE; + } + + /* Load any custom EDID, (or see if DP lib has EDID) */ + ClearCustomEdid(pDpyEvo); + + if ((pRequest && GetEdidOverride(pRequest, pEdid)) || + ReadEdidFromDP(pDpyEvo, pEdid)) { + /* XXX [VSM] Write, clear and re-read the EDID to/from RM here to make + * sure RM and X agree on the final EDID bits. Once RM no longer + * parses the EDID, we can avoid doing this for DP devices. + * + * If it's a DisplayPort 1.2 multistream device then don't bother trying + * to ping-pong the EDID through RM. + */ + if (nvDpyEvoIsDPMST(pDpyEvo)) { + goto validateEdid; + } + + WriteEdidToResman(pDpyEvo, pEdid); + + nvFree(pEdid->buffer); + pEdid->buffer = NULL; + pEdid->length = 0; + } + + if (!ReadEdidFromResman(pDpyEvo, pEdid, readMode)) { + goto fail; + } + +validateEdid: + /* Validate the EDID */ + if (!ValidateEdid(pDpyEvo, pEdid, pInfoString, ignoreEdidChecksum)) { + goto fail; + } + + *ppParsedEdid = nvCalloc(1, sizeof(**ppParsedEdid)); + if (*ppParsedEdid == NULL) { + goto fail; + } + /* Parse the EDID. Note this may *change* the EDID bytes. */ + PatchAndParseEdid(pDpyEvo, pEdid, *ppParsedEdid, pInfoString); + + return TRUE; + +fail: + + /* We failed to read a valid EDID. Free any EDID buffer allocated above. */ + nvFree(pEdid->buffer); + pEdid->buffer = NULL; + pEdid->length = 0; + + return FALSE; +} + +static void AssignIsVrHmd(NVDpyEvoRec *pDpyEvo) +{ + NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS params = { }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + pDpyEvo->isVrHmd = FALSE; + + if (!pDpyEvo->parsedEdid.valid) { + return; + } + + params.manufacturerID = pDpyEvo->parsedEdid.info.manuf_id; + params.productID = pDpyEvo->parsedEdid.info.product_id; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_IS_DIRECTMODE_DISPLAY, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to query VR headset for %s", pDpyEvo->name); + return; + } + + /* + * bIsDirectmode indicates any monitor that by default shouldn't be part of + * a desktop (VR headset, touch panel, etc). But, close enough for our + * usage of isVrHmd. + */ + pDpyEvo->isVrHmd = params.bIsDirectmode; +} + +static NvBool EdidHasChanged( + const NVDpyEvoRec *pDpyEvo, + const NVEdidRec *pEdid, + const NVParsedEdidEvoRec *pParsedEdid) +{ + /* Compare EDID bytes */ + if (pEdid->length != pDpyEvo->edid.length || + nvkms_memcmp(pEdid->buffer, pDpyEvo->edid.buffer, pEdid->length) != 0) { + return TRUE; + } + + /* Compare parsed data */ + if (pParsedEdid != NULL) { + if (nvkms_memcmp(pParsedEdid, &pDpyEvo->parsedEdid, + sizeof(*pParsedEdid)) != 0) { + return TRUE; + } + } else if (pDpyEvo->parsedEdid.valid) { + return TRUE; + } + + return FALSE; +} + +static void ApplyNewEdid( + NVDpyEvoPtr pDpyEvo, + const NVEdidRec *pEdid, + const NVParsedEdidEvoRec *pParsedEdid, + NVEvoInfoStringPtr pInfoString) +{ + if (pDpyEvo->edid.buffer != NULL) { + nvFree(pDpyEvo->edid.buffer); + } + pDpyEvo->edid.buffer = pEdid->buffer; + pDpyEvo->edid.length = pEdid->length; + + if (pParsedEdid != NULL) { + nvkms_memcpy(&pDpyEvo->parsedEdid, pParsedEdid, + sizeof(pDpyEvo->parsedEdid)); + } else { + nvkms_memset(&pDpyEvo->parsedEdid, 0, sizeof(pDpyEvo->parsedEdid)); + } + + /* + * Regenerate the dpy's name, because the parsed EDID monitorName + * may have changed. + */ + AssignDpyEvoName(pDpyEvo); + + /* Write information about the parsed EDID to the infoString. */ + LogEdid(pDpyEvo, pInfoString); + + if (pDpyEvo->parsedEdid.valid) { + /* + * check 3D Vision capability + */ + nvDpyCheck3DVisionCapsEvo(pDpyEvo); + + /* + * Check HDMI VRR capability + */ + nvDpyUpdateHdmiVRRCaps(pDpyEvo); + } + + if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + DpyAssignColorSpaceCaps(pDpyEvo, pInfoString); + } + + nvUpdateHdmiCaps(pDpyEvo); + + nvDpyProbeMaxPixelClock(pDpyEvo); + + AssignIsVrHmd(pDpyEvo); +} + +/* + * ReadDSITimingsFromResman() - Obtains modetimings for a DSI connector, + * passing it into pTimings + */ +static NvBool ReadDSITimingsFromResman( + const NVDpyEvoRec *pDpyEvo, + NVT_TIMING *pTimings) +{ + NvU32 ret; + NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS dsiModeTimingParams = { 0 }; + // Values are currently hardcoded while waiting for full RM support + NvU32 hFrontPorch = 8; + NvU32 vFrontPorch = 2; + NvU32 hBackPorch = 56; + NvU32 vBackPorch = 51; + NvU32 hSyncWidth = 96; + NvU32 vSyncWidth = 2; + + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + dsiModeTimingParams.subDeviceInstance = pDispEvo->displayOwner; + + /* + * Currently displayId must be hardcoded to 0 to receive timings from RM. + * Once the corresponding DCB support is added for DSI, this hack will be + * removed and NVKMS will use the actual displayId instead. + */ + dsiModeTimingParams.displayId = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING, + &dsiModeTimingParams, sizeof(dsiModeTimingParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read DSI mode timings for display device %s", + pDpyEvo->name); + return FALSE; + } + + // Converts refresh (Hz) into appropriate units for rr1k (units of 0.001Hz) + pTimings->etc.rrx1k = dsiModeTimingParams.refresh * 1000; + pTimings->HVisible = dsiModeTimingParams.hActive; + pTimings->HFrontPorch = hFrontPorch; + pTimings->HSyncWidth = hSyncWidth; + pTimings->HTotal = dsiModeTimingParams.hActive + + hFrontPorch + hSyncWidth + hBackPorch; + + pTimings->VVisible = dsiModeTimingParams.vActive; + pTimings->VFrontPorch = vFrontPorch; + pTimings->VSyncWidth = vSyncWidth; + pTimings->VTotal = dsiModeTimingParams.vActive + + vFrontPorch + vSyncWidth + vBackPorch; + + pTimings->pclk = HzToKHz(pTimings->VTotal * + pTimings->HTotal * + dsiModeTimingParams.refresh) / 10; + + return TRUE; +} + +static NvBool ReadDPSerializerTimings( + const NVDpyEvoRec *pDpyEvo, + NVT_TIMING *pTimings) +{ + /* + * TODO Add RM control call that will return the fixed timings + * that can be used with a given display. + */ + pTimings->HVisible = 1920; + pTimings->VVisible = 1080; + pTimings->HFrontPorch = 88; + pTimings->VFrontPorch = 4; + pTimings->HSyncWidth = 44; + pTimings->VSyncWidth = 5; + pTimings->HTotal = 2200; + pTimings->VTotal = 1125; + pTimings->HSyncPol = 0; + pTimings->VSyncPol = 0; + pTimings->interlaced = 0; + pTimings->pclk = 14850; + pTimings->etc.rrx1k = 60000; + + return TRUE; +} + +static NvBool GetFixedModeTimings( + NVDpyEvoPtr pDpyEvo) +{ + NVT_TIMING timings = { }; + NvBool ret = FALSE; + + if (pDpyEvo->pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + ret = ReadDSITimingsFromResman(pDpyEvo, &timings); + } else if (nvConnectorIsDPSerializer(pDpyEvo->pConnectorEvo)) { + ret = ReadDPSerializerTimings(pDpyEvo, &timings); + } + + if (!ret) { + return ret; + } + + CreateParsedEdidFromNVT_TIMING(&timings, &pDpyEvo->parsedEdid); + AssignDpyEvoName(pDpyEvo); + nvDpyProbeMaxPixelClock(pDpyEvo); + + return TRUE; +} + +static void ReadAndApplyEdidEvo( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams) +{ + const struct NvKmsQueryDpyDynamicDataRequest *pRequest = NULL; + NVEdidRec edid = {NULL, 0}; + NVParsedEdidEvoPtr pParsedEdid = NULL; + NVEvoInfoStringRec infoString; + NvBool readSuccess; + + if (pParams != NULL) { + nvInitInfoString(&infoString, pParams->reply.edid.infoString, + sizeof(pParams->reply.edid.infoString)); + pRequest = &pParams->request; + } else { + nvInitInfoString(&infoString, NULL, 0); + } + + readSuccess = nvDpyReadAndParseEdidEvo(pDpyEvo, pRequest, + NVKMS_EDID_READ_MODE_DEFAULT, + &edid, &pParsedEdid, &infoString); + + if (pParams != NULL) { + pParams->reply.edid.valid = readSuccess; + } + + if (EdidHasChanged(pDpyEvo, &edid, pParsedEdid)) { + /* + * Do not plumb pRequest into ApplyNewEdid(). This helps ensure that + * its operation is purely a function of the EDID and parsed EDID data, + * which means that if we get into this function again with the same + * EDID and parsed EDID data, we can safely skip ApplyNewEdid() without + * worrying that this request has different parameters (like CustomEdid + * or mode validation overrides). + */ + ApplyNewEdid(pDpyEvo, &edid, pParsedEdid, &infoString); + } else { + nvFree(edid.buffer); + } + nvFree(pParsedEdid); +} + + +/*! + * Get the maximum allowed pixel clock for pDpyEvo. + * + * This depends on the following conditions: + * + * - The RM's returned value is sufficient for non-TMDS connectors + * - For HDMI, the SOR capabilities exceed the RM's returned value to allow + * for HDMI 1.4 modes that exceed 165MHz on a single link, or + * for HDMI 2.1 modes if the source and sink is capable of FRL + * - For DVI, the user is allowed to set an option to exceed the 165MHz + * per-TMDS limit if the SOR capabilities allow it + * - Contrary to the above, passive DP->DVI and DP->HDMI dongles have their + * own limits + */ +void nvDpyProbeMaxPixelClock(NVDpyEvoPtr pDpyEvo) +{ + NvU32 ret; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NvU32 displayOwner = pDispEvo->displayOwner; + NVEvoPassiveDpDongleType passiveDpDongleType; + NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS params = { 0 }; + NvU32 passiveDpDongleMaxPclkKHz; + + /* First, get the RM-reported value. */ + + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + params.subDeviceInstance = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_PCLK_LIMIT, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failure reading maximum pixel clock value " + "for display device %s.", pDpyEvo->name); + pDpyEvo->maxPixelClockKHz = 100000; + pDpyEvo->maxSingleLinkPixelClockKHz = pDpyEvo->maxPixelClockKHz; + return; + } + + pDpyEvo->maxPixelClockKHz = params.orPclkLimit; + pDpyEvo->maxSingleLinkPixelClockKHz = pDpyEvo->maxPixelClockKHz; + + /* + * The RM's returned max pclk value is sufficient for non-TMDS + * connectors + */ + if (!IsConnectorTMDS(pConnectorEvo)) { + return; + } + + /* + * The RM returns a 165MHz max pclk for single link TMDS and 330MHz + * max pclk for dual link TMDS. We can exceed that in the + * following cases: + * + * - HDMI 1.4a 4Kx2K and 1080p60hz frame packed stereo modes + * require a 297MHz single TMDS link pixel clock, and HDMI 2.0 + * allows an even higher pixel clock. + * - While the DVI spec mandates a limit of 165MHz per TMDS link, + * since certain GPUs and certain displays support DVI + * connections at higher pixel clocks, we allow users to + * override this limit to allow validation of higher maximum + * pixel clocks over DVI. + */ + if (pDevEvo->gpus != NULL) { + + NVEvoSorCaps *sorCaps = pDevEvo->gpus[displayOwner].capabilities.sor; + NvU32 orIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + /* + * With the SOR crossbar, pConnectorEvo->or.mask is unknown, + * and may change at modeset time. Use the caps of SOR 0 + * for validation. + */ + orIndex = 0; + } + + if (nvDpyIsHdmiEvo(pDpyEvo)) { + pDpyEvo->maxPixelClockKHz = + pDpyEvo->maxSingleLinkPixelClockKHz = + sorCaps[orIndex].maxTMDSClkKHz; + + nvkms_memset(&pDpyEvo->hdmi.srcCaps, 0, sizeof(pDpyEvo->hdmi.srcCaps)); + nvkms_memset(&pDpyEvo->hdmi.sinkCaps, 0, sizeof(pDpyEvo->hdmi.sinkCaps)); + + if (pDevEvo->hal->caps.supportsHDMIFRL) { + /* + * This function is called multiple times for each pDpyEvo: + * - Once when the dpy is created + * - Once when the dpy is connected + * - Once when the dpy is disconnected + * In the first and third cases, we don't yet have an EDID so + * we don't know if the sink supports HDMI FRL. Assume it + * doesn't, since if we try to set a mode anyway there won't be + * a sink to do link training with. + */ + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.hdmiForumInfo.max_FRL_Rate) { + /* + * An SOR needs to be assigned temporarily to do FRL + * training. + * Since the only other SORs in use at the moment (if any) + * are those driving heads, we don't need to exclude RM + * from selecting any SOR, so an sorExcludeMask of 0 is + * appropriate. + */ + if (nvAssignSOREvo(pConnectorEvo, 0)) { + if (nvHdmiFrlAssessLink(pDpyEvo)) { + + /* + * Note that although we "assessed" the link above, + * the maximum pixel clock set here doesn't take + * that into account -- it's the maximum the GPU + * hardware is capable of on the most capable link, + * mostly for reporting purposes. + * + * The calculation for if a given mode can fit in + * the assessed FRL configuration is complex and + * depends on things like the amount of blanking, + * rather than a simple pclk cutoff. So, we query + * the hdmi library when validating each individual + * mode, when we know actual timings. + */ + pDpyEvo->maxPixelClockKHz = + /* + * This comes from the Windows display driver: + * (4 lanes * 12Gb per lane * + * FRL encoding i.e 16/18) / 1K + */ + ((4 * 12 * 1000 * 1000 * 16) / 18); + } + } + } + } + } else { + /* + * Connector and SOR both must be capable to drive dual-TMDS + * resolutions. + */ + NvBool bDualTMDS = sorCaps[orIndex].dualTMDS && + FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _LINK, _DUAL, + pDpyEvo->pConnectorEvo->dfpInfo); + + pDpyEvo->maxPixelClockKHz = (bDualTMDS ? + TMDS_DUAL_LINK_PCLK_MAX : + TMDS_SINGLE_LINK_PCLK_MAX); + + pDpyEvo->maxSingleLinkPixelClockKHz = TMDS_SINGLE_LINK_PCLK_MAX; + + if (pDpyEvo->allowDVISpecPClkOverride) { + pDpyEvo->maxPixelClockKHz = sorCaps[orIndex].maxTMDSClkKHz * + (bDualTMDS ? 2 : 1); + pDpyEvo->maxSingleLinkPixelClockKHz = + sorCaps[orIndex].maxTMDSClkKHz; + } + } + } + + /* + * Passive DP->DVI and DP->HDMI dongles may have a limit more + * restrictive than the one described above. Check whether one of + * these dongles is in use, and override the limit accordingly. + */ + passiveDpDongleType = DpyGetPassiveDpDongleType(pDpyEvo, + &passiveDpDongleMaxPclkKHz); + + if (passiveDpDongleType != NV_EVO_PASSIVE_DP_DONGLE_UNUSED) { + pDpyEvo->maxPixelClockKHz = NV_MIN(passiveDpDongleMaxPclkKHz, + pDpyEvo->maxPixelClockKHz); + pDpyEvo->maxSingleLinkPixelClockKHz = pDpyEvo->maxPixelClockKHz; + } +} + +static void DpyGetDynamicDfpProperties( + NVDpyEvoPtr pDpyEvo, + const NvBool disableACPIBrightnessHotkeys) +{ + if (disableACPIBrightnessHotkeys) { + return; + } + if (!disableACPIBrightnessHotkeys) { + struct NvKmsGetDpyAttributeParams params; + nvkms_memset(¶ms, 0, sizeof(params)); + params.request.attribute = NV_KMS_DPY_ATTRIBUTE_BACKLIGHT_BRIGHTNESS; + + pDpyEvo->hasBacklightBrightness = + nvGetDpyAttributeEvo(pDpyEvo, ¶ms); + } +} +/* + * DpyGetDfpProperties() - get DFP properties: reduced blanking flags + * and general DFP flags + */ + +static void DpyGetStaticDfpProperties(NVDpyEvoPtr pDpyEvo) +{ + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (pConnectorEvo->legacyType != NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + return; + } + + if (nvDpyEvoIsDPMST(pDpyEvo)) { + // None of this stuff can be queried directly for dynamic DP MST + // displays. + // XXX DP MST: Should we fill in these fields somehow anyway? + return; + } + + pDpyEvo->internal = FALSE; + pDpyEvo->hdmiCapable = FALSE; + + if (pConnectorEvo->dfpInfo == 0x0) { + return; + } + /* Check if the connected DFP is HDMI capable */ + + if (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS, _HDMI_CAPABLE, _TRUE, + pConnectorEvo->dfpInfo)) { + pDpyEvo->hdmiCapable = TRUE; + } + + pDpyEvo->internal = nvConnectorIsInternal(pDpyEvo->pConnectorEvo); +} + +/*! + * Return true if the connector is single or dual link TMDS (not CRT, not DP). + */ +static NvBool IsConnectorTMDS(NVConnectorEvoPtr pConnectorEvo) +{ + NvU32 protocol = pConnectorEvo->or.protocol; + return ((pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + ((protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A) || + (protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B) || + (protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS))); +} + +/*! + * Query RM for the passive Displayport dongle type; this can influence + * the maximum pixel clock allowed on that display. + */ +static NVEvoPassiveDpDongleType +DpyGetPassiveDpDongleType(const NVDpyEvoRec *pDpyEvo, + NvU32 *passiveDpDongleMaxPclkKHz) +{ + NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS params = { 0 }; + NvU32 ret; + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NVEvoPassiveDpDongleType passiveDpDongleType = + NV_EVO_PASSIVE_DP_DONGLE_UNUSED; + + // The rmcontrol below fails if we try querying the dongle info on + // non-TMDS connectors. + if (!IsConnectorTMDS(pConnectorEvo)) { + return passiveDpDongleType; + } + + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + params.subDeviceInstance = pDispEvo->displayOwner; + params.flags = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_DISPLAYPORT_DONGLE_INFO, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failure reading DP dongle info " + "for display device %s.", pDpyEvo->name); + return passiveDpDongleType; + } + + if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS, + _ATTACHED, _TRUE, params.flags)) + { + if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS, _TYPE, _DP2DVI, + params.flags)) { + + passiveDpDongleType = NV_EVO_PASSIVE_DP_DONGLE_DP2DVI; + + if (passiveDpDongleMaxPclkKHz) { + *passiveDpDongleMaxPclkKHz = TMDS_SINGLE_LINK_PCLK_MAX; + } + } else if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS, _TYPE, _DP2HDMI, + params.flags)) { + if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE, _TYPE, _1, + params.flags)) { + + passiveDpDongleType = NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_1; + + if (passiveDpDongleMaxPclkKHz) { + *passiveDpDongleMaxPclkKHz = params.maxTmdsClkRateHz / 1000; + } + } else if (FLD_TEST_DRF(0073_CTRL_DFP, + _GET_DISPLAYPORT_DONGLE_INFO_FLAGS_DP2TMDS_DONGLE, _TYPE, _2, + params.flags)) { + + passiveDpDongleType = NV_EVO_PASSIVE_DP_DONGLE_DP2HDMI_TYPE_2; + + if (passiveDpDongleMaxPclkKHz) { + *passiveDpDongleMaxPclkKHz = params.maxTmdsClkRateHz / 1000; + } + } + // For other dongle types: LFH_DVI (DMS59-DVI) and LFH_VGA (DMS59-VGA) breakout dongles, + // We consider them as native connection, hence we don't track passiveDpDongleType here + } + } + + return passiveDpDongleType; +} + + +/*! + * Validate an NVKMS client-specified NvKmsModeValidationFrequencyRanges. + */ +static NvBool ValidateFrequencyRanges( + const struct NvKmsModeValidationFrequencyRanges *pRanges) +{ + NvU32 i; + + if (pRanges->numRanges >= ARRAY_LEN(pRanges->range)) { + return FALSE; + } + + for (i = 0; i < pRanges->numRanges; i++) { + if (pRanges->range[i].high < pRanges->range[i].low) { + return FALSE; + } + if (pRanges->range[i].high == 0) { + return FALSE; + } + } + + return TRUE; +} + + +static void DpySetValidSyncsHelper( + struct NvKmsModeValidationFrequencyRanges *pRanges, + const NVParsedEdidEvoRec *pParsedEdid, + NvBool isHorizSync, NvBool ignoreEdidSource) +{ + NvBool found = FALSE; + NvU32 edidMin = 0, edidMax = 0; + + if (pParsedEdid->valid) { + if (isHorizSync) { + edidMin = pParsedEdid->limits.min_h_rate_hz; + edidMax = pParsedEdid->limits.max_h_rate_hz; + } else { + edidMin = pParsedEdid->limits.min_v_rate_hzx1k; + edidMax = pParsedEdid->limits.max_v_rate_hzx1k; + } + } + + /* If the client-specified ranges are invalid, clear them. */ + + if ((pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_BEFORE_EDID) || + (pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_AFTER_EDID)) { + + if (!ValidateFrequencyRanges(pRanges)) { + nvkms_memset(pRanges, 0, sizeof(*pRanges)); + pRanges->source = NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_NONE; + } + } + + /* Use CLIENT_BEFORE_EDID, if provided. */ + + if (pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_BEFORE_EDID) { + found = TRUE; + } + + /* + * Otherwise, if EDID-reported sync ranges are available, use + * those. + */ + if (!found && + !ignoreEdidSource && + (edidMin != 0) && (edidMax != 0)) { + + pRanges->numRanges = 1; + pRanges->range[0].low = edidMin; + pRanges->range[0].high = edidMax; + pRanges->source = NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID; + found = TRUE; + } + + /* + * Otherwise, use CLIENT_AFTER_EDID, if available. + */ + if (!found && + (pRanges->source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CLIENT_AFTER_EDID)) { + found = TRUE; + } + + /* + * Finally, fall back to conservative defaults if we could not + * find anything else; this will validate 1024x768 @ 60Hz. + */ + if (!found) { + + pRanges->numRanges = 1; + + if (isHorizSync) { + pRanges->range[0].low = 28000; + pRanges->range[0].high = 55000; + } else { + pRanges->range[0].low = 43000; + pRanges->range[0].high = 72000; + } + + pRanges->source = + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_CONSERVATIVE_DEFAULTS; + } +} + + +/*! + * Assign NvKmsModeValidationValidSyncs + * + * Assign the HorizSync and VertRefresh ranges in + * NvKmsModeValidationValidSyncs. The priority order is: + * + * (1) Any HorizSync and VertRefresh provided by the client that + * overrides the EDID (CLIENT_BEFORE_EDID). + * (2) Valid range information from the EDID. + * (3) Any HorizSync and VertRefresh specified by the client as a + * fallback for the EDID (CLIENT_AFTER_EDID). + * (4) Conservative builtin defaults. + * + * HorizSync and VertRefresh can come from different sources. (1) and + * (3) are provided through pValidSyncs. (2) and (4) get written to + * pValidSyncs. + * + * \param[in] pDpy The dpy whose EDID will be used. + * \param[in,out] pValidSyncs This is initialized by the client, and + * will be updated based on the frequency + * range priority described above. + */ +void nvDpySetValidSyncsEvo(const NVDpyEvoRec *pDpyEvo, + struct NvKmsModeValidationValidSyncs *pValidSyncs) +{ + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + + DpySetValidSyncsHelper(&pValidSyncs->horizSyncHz, + pParsedEdid, + TRUE, /* isHorizSync */ + pValidSyncs->ignoreEdidSource); + + DpySetValidSyncsHelper(&pValidSyncs->vertRefreshHz1k, + pParsedEdid, + FALSE, /* isHorizSync */ + pValidSyncs->ignoreEdidSource); +} + + +/* + * ReadEdidFromDP() - query the EDID for the specified display device from the + * DP lib. + */ + +static NvBool ReadEdidFromDP(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NvU8 *pNewEdid = NULL; + int newEdidLength; + + if (!nvDpyUsesDPLib(pDpyEvo)) { + return FALSE; + } + + /* get size and allocate space for the EDID data */ + newEdidLength = nvDPGetEDIDSize(pDpyEvo); + if (newEdidLength == 0) { + goto fail; + } + + pNewEdid = nvCalloc(newEdidLength, 1); + + if (pNewEdid == NULL) { + goto fail; + } + + if (!nvDPGetEDID(pDpyEvo, pNewEdid, newEdidLength)) { + goto fail; + } + + pEdid->buffer = pNewEdid; + pEdid->length = newEdidLength; + + return TRUE; + + fail: + + nvFree(pNewEdid); + + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read EDID for display device %s", + pDpyEvo->name); + return FALSE; + +} // ReadEdidFromDP() + + + +/* + * ReadEdidFromResman() - query the EDID for the specified display device + */ + +static NvBool ReadEdidFromResman(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid, + NvKmsEdidReadMode readMode) +{ + NvU32 ret; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *getEdidParams; + int retryEdidReadCount = 0; + NvBool success = FALSE; + + if (nvDpyEvoIsDPMST(pDpyEvo)) { + // RM doesn't track this device, so leave the EDID alone. + return TRUE; + } + + getEdidParams = nvCalloc(sizeof(*getEdidParams), 1); + if (getEdidParams == NULL) { + goto done; + } + + query_edid: + + getEdidParams->subDeviceInstance = pDispEvo->displayOwner; + getEdidParams->displayId = nvDpyEvoGetConnectorId(pDpyEvo); + getEdidParams->flags = NV0073_CTRL_SPECIFIC_GET_EDID_FLAGS_COPY_CACHE_NO; + + if (readMode == NVKMS_EDID_READ_MODE_ACPI) { + getEdidParams->flags |= DRF_DEF(0073_CTRL_SPECIFIC, _GET_EDID_FLAGS, + _DISPMUX_READ_MODE, _ACPI); + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, + getEdidParams, sizeof(*getEdidParams)); + + if ((ret != NVOS_STATUS_SUCCESS) || (getEdidParams->bufferSize <= 0)) { + /* WAR for Bug 777646: retry reading the EDID on error for DP + * devices to avoid possible TDR assertion in the RM. + * + * XXX This should be moved to the DP library. + */ + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + (retryEdidReadCount < NV_DP_READ_EDID_RETRIES)) { + retryEdidReadCount++; + + nvkms_usleep(NV_DP_REREAD_EDID_DELAY_USEC); + + goto query_edid; + } + goto done; + } + + pEdid->buffer = nvCalloc(getEdidParams->bufferSize, 1); + + if (pEdid->buffer == NULL) { + goto done; + } + + nvkms_memcpy(pEdid->buffer, &getEdidParams->edidBuffer, + getEdidParams->bufferSize); + pEdid->length = getEdidParams->bufferSize; + + success = TRUE; + + done: + + nvFree(getEdidParams); + + if (!success) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to read EDID for display device %s", + pDpyEvo->name); + } + + return success; +} // ReadEdidFromResman() + + +/* + * Check if the EDID meets basic validation criteria. + */ +static NvBool ValidateEdid(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid, + NVEvoInfoStringPtr pInfoString, + const NvBool ignoreEdidChecksum) +{ + NvU32 status, tmpStatus; + int errorCount = 0; + + status = NvTiming_EDIDValidationMask(pEdid->buffer, pEdid->length, TRUE); + tmpStatus = status; + + if (status == 0) { + return TRUE; + } + + nvEvoLogInfoString(pInfoString, + "The EDID read for display device %s is invalid:", + pDpyEvo->name); + + /* + * Warn about every error we know about, masking it out of tmpStatus, then + * warn about an unknown error if there are still any bits remaining in + * tmpStatus. + */ + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_VERSION)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has an unrecognized version."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_VERSION); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE)) { + nvEvoLogInfoString(pInfoString, + "- The EDID is too short."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_SIZE); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM)) { + /* + * XXX NVKMS TODO: massage wording to not reference X + * configuration option. + */ + nvEvoLogInfoString(pInfoString, + "- The EDID has a bad checksum. %s", + ignoreEdidChecksum ? "This error will be ignored. Note " + "that an EDID with a bad checksum could indicate a " + "corrupt EDID. A corrupt EDID may have mode timings " + "beyond the capabilities of your display, and could " + "damage your hardware. Please use with care." : + "The \"IgnoreEDIDChecksum\" X configuration option may " + "be used to attempt using mode timings in this EDID in " + "spite of this error. A corrupt EDID may have mode " + "timings beyond the capabilities of your display, and " + "could damage your hardware. Please use with care."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_RANGE_LIMIT)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has a bad range limit."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_RANGE_LIMIT); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has a bad detailed timing descriptor."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_DTD); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD)) { + nvEvoLogInfoString(pInfoString, + "- The EDID has an extension block with a bad detailed " + "timing descriptor."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD); + errorCount++; + } + + if (status & + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT)) { + nvEvoLogInfoString(pInfoString, + "- The EDID extension block is invalid."); + tmpStatus &= ~NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT); + errorCount++; + } + + if (tmpStatus) { + nvEvoLogInfoString(pInfoString, + "- The EDID has an unrecognized error."); + errorCount++; + } + + /* + * Unset the bits for errors we don't care about (invalid DTDs in the + * extension block, or checksum errors if ignoreEdidChecksum is in use) + * then return true if there are any remaining errors we do care about. + */ + if (ignoreEdidChecksum) { + status &= ~(NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_CHECKSUM)); + } + + if (status == + NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD)) { + /* + * If the only problem with the EDID is invalid DTDs in the extension + * block, don't reject the EDID; those timings can be safely skipped in + * NvTiming_ParseEDIDInfo()/parse861ExtDetailedTiming() + */ + nvEvoLogInfoString(pInfoString, + "This bad detailed timing descriptor will be ignored."); + } + + status &= ~(NVT_EDID_VALIDATION_ERR_MASK(NVT_EDID_VALIDATION_ERR_EXT_DTD)); + + return (status == 0); +} + +static const char *GetColorDepthBpc(NVT_COLORDEPTH colorDepth) +{ + static char buffer[32]; + NVEvoInfoStringRec infoString; + NvBool first = TRUE; + int i; + + struct { + NvBool val; + int bpc; + } table[] = { + { colorDepth.bpc.bpc6, 6 }, + { colorDepth.bpc.bpc8, 8 }, + { colorDepth.bpc.bpc10, 10 }, + { colorDepth.bpc.bpc12, 12 }, + { colorDepth.bpc.bpc14, 14 }, + { colorDepth.bpc.bpc16, 16 }, + }; + + nvInitInfoString(&infoString, buffer, sizeof(buffer)); + + buffer[0] = '\0'; + + for (i = 0; i < ARRAY_LEN(table); i++) { + if (table[i].val) { + nvEvoLogInfoStringRaw(&infoString, "%s%d", + first ? "" : ", ", + table[i].bpc); + first = FALSE; + } + } + + return buffer; +} + + +/* + * Log information about the EDID. + */ + +static void LogEdid(NVDpyEvoPtr pDpyEvo, NVEvoInfoStringPtr pInfoString) +{ + int k; + NVParsedEdidEvoPtr pParsedEdid; + + static const struct { + NVT_TIMING_TYPE type; + const char *name; + } mode_type_table[] = { + { NVT_TYPE_DMT, "Display Monitor Timings" }, + { NVT_TYPE_GTF, "Generalized Timing Formula Timings" }, + { NVT_TYPE_ASPR, "ASPR Timings"}, + { NVT_TYPE_NTSC_TV, "NTSC Timings" }, + { NVT_TYPE_PAL_TV, "PAL Timings" }, + { NVT_TYPE_CVT, "Coordinated Video Timings"}, + { NVT_TYPE_CVT_RB, "Reduced Blanking Coordinated Video Timings" }, + { NVT_TYPE_CUST, "Customized Timings" }, + { NVT_TYPE_EDID_STD, "Standard Timings" }, + { NVT_TYPE_EDID_DTD, "Detailed Timings" }, + { NVT_TYPE_EDID_CVT, "Coordinated Video Timings" }, + { NVT_TYPE_EDID_EST, "Established Timings"}, + { NVT_TYPE_EDID_861ST, "CEA-861B Timings" }, + { NVT_TYPE_NV_PREDEFINED, "Predefined Timings" }, + { NVT_TYPE_DMT_RB, "Reduced Blanking Display Monitor Timings" }, + { NVT_TYPE_EDID_EXT_DTD, "Extension Block Detailed Timings" }, + { NVT_TYPE_SDTV, "SDTV Timings "}, + { NVT_TYPE_HDTV, "HDTV Timings" }, + { NVT_TYPE_SMPTE, "SMPTE Timings" }, + { NVT_TYPE_EDID_VTB_EXT, "VTB Extension Timings" }, + { NVT_TYPE_EDID_VTB_EXT_STD, "VTB Extension Detailed Timings" }, + { NVT_TYPE_EDID_VTB_EXT_DTD, "VTB Extension Standard Timings" }, + { NVT_TYPE_EDID_VTB_EXT_CVT, "VTB Extension CVT Timings" }, + { NVT_TYPE_HDMI_STEREO, "HDMI Stereo Timings" }, + { NVT_TYPE_DISPLAYID_1, "DisplayID Type 1 Timings" }, + { NVT_TYPE_DISPLAYID_2, "DisplayID Type 2 Timings" }, + { NVT_TYPE_HDMI_EXT, "HDMI Extended Resolution Timings" }, + { NVT_TYPE_CUST_AUTO, "Customized Auto Timings" }, + { NVT_TYPE_CUST_MANUAL, "Customized Manual Timings" }, + { NVT_TYPE_CVT_RB_2,"Reduced Blanking Coordinated Video Timings, v2" }, + }; + + /* + * Trigger a warning if new NVT_TIMING_TYPE values are added + * without updating this function. + * + * If a warning is produced about unhandled enum in the below + * switch statement, please update both the switch statement and + * mode_type_table[], or contact the sw-nvkms email alias. + */ + if (pDpyEvo->parsedEdid.valid) { + for (k = 0; k < pDpyEvo->parsedEdid.info.total_timings; k++) { + NvU32 status = pDpyEvo->parsedEdid.info.timing[k].etc.status; + NVT_TIMING_TYPE type = NVT_GET_TIMING_STATUS_TYPE(status); + + switch (type) { + case NVT_TYPE_DMT: + case NVT_TYPE_GTF: + case NVT_TYPE_ASPR: + case NVT_TYPE_NTSC_TV: + case NVT_TYPE_PAL_TV: + case NVT_TYPE_CVT: + case NVT_TYPE_CVT_RB: + case NVT_TYPE_CUST: + case NVT_TYPE_EDID_DTD: + case NVT_TYPE_EDID_STD: + case NVT_TYPE_EDID_EST: + case NVT_TYPE_EDID_CVT: + case NVT_TYPE_EDID_861ST: + case NVT_TYPE_NV_PREDEFINED: + case NVT_TYPE_DMT_RB: + case NVT_TYPE_EDID_EXT_DTD: + case NVT_TYPE_SDTV: + case NVT_TYPE_HDTV: + case NVT_TYPE_SMPTE: + case NVT_TYPE_EDID_VTB_EXT: + case NVT_TYPE_EDID_VTB_EXT_STD: + case NVT_TYPE_EDID_VTB_EXT_DTD: + case NVT_TYPE_EDID_VTB_EXT_CVT: + case NVT_TYPE_HDMI_STEREO: + case NVT_TYPE_DISPLAYID_1: + case NVT_TYPE_DISPLAYID_2: + case NVT_TYPE_HDMI_EXT: + case NVT_TYPE_CUST_AUTO: + case NVT_TYPE_CUST_MANUAL: + case NVT_TYPE_CVT_RB_2: + default: + break; + } + break; + } + } + + nvEvoLogInfoString(pInfoString, ""); + nvEvoLogInfoString(pInfoString, + "--- EDID for %s ---", pDpyEvo->name); + + if (!pDpyEvo->parsedEdid.valid) { + nvEvoLogInfoString(pInfoString, ""); + nvEvoLogInfoString(pInfoString, "No EDID Available."); + nvEvoLogInfoString(pInfoString, ""); + goto done; + } + + pParsedEdid = &pDpyEvo->parsedEdid; + + nvEvoLogInfoString(pInfoString, + "EDID Version : %d.%d", + pParsedEdid->info.version >> 8, + pParsedEdid->info.version & 0xff); + + nvEvoLogInfoString(pInfoString, + "Manufacturer : %s", + pParsedEdid->info.manuf_name); + + nvEvoLogInfoString(pInfoString, + "Monitor Name : %s", + pParsedEdid->monitorName); + + nvEvoLogInfoString(pInfoString, + "Product ID : 0x%04x", + pParsedEdid->info.product_id); + + nvEvoLogInfoString(pInfoString, + "32-bit Serial Number : 0x%08x", + pParsedEdid->info.serial_number); + + nvEvoLogInfoString(pInfoString, + "Serial Number String : %s", + pParsedEdid->serialNumberString); + + nvEvoLogInfoString(pInfoString, + "Manufacture Date : %d, week %d", + pParsedEdid->info.year, + pParsedEdid->info.week); + + /* + * despite the name feature_ver_1_3, the below features are + * reported on all EDID versions + */ + nvEvoLogInfoString(pInfoString, + "DPMS Capabilities :%s%s%s", + pParsedEdid->info.u.feature_ver_1_3.support_standby ? + " Standby" : "", + pParsedEdid->info.u.feature_ver_1_3.support_suspend ? + " Suspend" : "", + pParsedEdid->info.u.feature_ver_1_3.support_active_off ? + " Active Off" : ""); + + nvEvoLogInfoString(pInfoString, + "Input Type : %s", + pParsedEdid->info.input.isDigital ? + "Digital" : "Analog"); + + nvEvoLogInfoString(pInfoString, + "Prefer first detailed timing : %s", + pParsedEdid->info.u.feature_ver_1_3.preferred_timing_is_native ? + "Yes" : "No"); + + if (pParsedEdid->info.version == NVT_EDID_VER_1_3) { + nvEvoLogInfoString(pInfoString, + "Supports GTF : %s", + pParsedEdid->info.u.feature_ver_1_3.support_gtf ? + "Yes" : "No"); + } + + if (pParsedEdid->info.version >= NVT_EDID_VER_1_4) { + NvBool continuousFrequency = FALSE; + if (pParsedEdid->info.input.isDigital) { + continuousFrequency = + pParsedEdid->info.u.feature_ver_1_4_digital.continuous_frequency; + } else { + continuousFrequency = + pParsedEdid->info.u.feature_ver_1_4_analog.continuous_frequency; + } + + nvEvoLogInfoString(pInfoString, + "Supports Continuous Frequency: %s", + continuousFrequency ? "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + "EDID 1.4 YCbCr 422 support : %s", + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_422 + ? "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + "EDID 1.4 YCbCr 444 support : %s", + pParsedEdid->info.u.feature_ver_1_4_digital.support_ycrcb_444 + ? "Yes" : "No"); + } + + nvEvoLogInfoString(pInfoString, + "Maximum Image Size : %d mm x %d mm", + pParsedEdid->info.screen_size_x * 10, /* screen_size_* is in cm */ + pParsedEdid->info.screen_size_y * 10); + + nvEvoLogInfoString(pInfoString, + "Valid HSync Range : " + NV_FMT_DIV_1000_POINT_1 + " kHz - " NV_FMT_DIV_1000_POINT_1 " kHz", + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.min_h_rate_hz), + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.max_h_rate_hz)); + + nvEvoLogInfoString(pInfoString, + "Valid VRefresh Range : " + NV_FMT_DIV_1000_POINT_1 " Hz - " + NV_FMT_DIV_1000_POINT_1 " Hz", + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.min_v_rate_hzx1k), + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.max_v_rate_hzx1k)); + + nvEvoLogInfoString(pInfoString, + "EDID maximum pixel clock : " + NV_FMT_DIV_1000_POINT_1 " MHz", + NV_VA_DIV_1000_POINT_1(pParsedEdid->limits.max_pclk_10khz * 10)); + + if (pParsedEdid->info.nvdaVsdbInfo.valid) { + nvEvoLogInfoString(pInfoString, + "G-Sync capable : %s", + pParsedEdid->info.nvdaVsdbInfo.vrrData.v1.supportsVrr + ? "Yes" : "No"); + nvEvoLogInfoString(pInfoString, + "G-Sync minimum refresh rate : %d Hz", + pParsedEdid->info.nvdaVsdbInfo.vrrData.v1.minRefreshRate); + } + + nvLogEdidCea861InfoEvo(pDpyEvo, pInfoString); + + if (pParsedEdid->info.input.isDigital && + pParsedEdid->info.version >= NVT_EDID_VER_1_4) { + nvEvoLogInfoString(pInfoString, + "EDID bits per component : %d", + pParsedEdid->info.input.u.digital.bpc); + } + + /* print the tiled display extension block, if present */ + if (pParsedEdid->info.ext_displayid.tile_topology_id.vendor_id) { + const NVT_DISPLAYID_INFO *tile = &pParsedEdid->info.ext_displayid; + const char *tmp; + + nvEvoLogInfoString(pInfoString, + "Tiled display information :"); + nvEvoLogInfoString(pInfoString, + " Revision : %d", + tile->tiled_display_revision); + nvEvoLogInfoString(pInfoString, + " Single Enclosure : %s", + tile->tile_capability.bSingleEnclosure ? + "Yes" : "No"); + + tmp = "Unknown"; + switch (tile->tile_capability.multi_tile_behavior) { + case NVT_MULTI_TILE_BEHAVIOR_OTHER: + tmp = "Other"; + break; + case NVT_MULTI_TILE_BEHAVIOR_SOURCE_DRIVEN: + tmp = "Source-driven"; + break; + } + nvEvoLogInfoString(pInfoString, + " Multi-tile Behavior : %s", tmp); + + tmp = "Unknown"; + switch (tile->tile_capability.single_tile_behavior) { + case NVT_SINGLE_TILE_BEHAVIOR_OTHER: + tmp = "Other"; + break; + case NVT_SINGLE_TILE_BEHAVIOR_SOURCE_DRIVEN: + tmp = "Source-driven"; + break; + case NVT_SINGLE_TILE_BEHAVIOR_SCALE: + tmp = "Scale"; + break; + case NVT_SINGLE_TILE_BEHAVIOR_CLONE: + tmp = "Clone"; + break; + } + nvEvoLogInfoString(pInfoString, + " Single-tile Behavior : %s", tmp); + nvEvoLogInfoString(pInfoString, + " Topology : %d row%s, %d column%s", + tile->tile_topology.row, + (tile->tile_topology.row == 1) ? "" : "s", + tile->tile_topology.col, + (tile->tile_topology.col == 1) ? "" : "s"); + nvEvoLogInfoString(pInfoString, + " Location : (%d,%d)", + tile->tile_location.x, tile->tile_location.y); + nvEvoLogInfoString(pInfoString, + " Native Resolution : %dx%d", + tile->native_resolution.width, + tile->native_resolution.height); + if (tile->tile_capability.bHasBezelInfo) { + nvEvoLogInfoString(pInfoString, + " Bezel Information :"); + nvEvoLogInfoString(pInfoString, + " Pixel Density : %d", + tile->bezel_info.pixel_density); + nvEvoLogInfoString(pInfoString, + " Top : %d", + tile->bezel_info.top); + nvEvoLogInfoString(pInfoString, + " Bottom : %d", + tile->bezel_info.bottom); + nvEvoLogInfoString(pInfoString, + " Left : %d", + tile->bezel_info.right); + nvEvoLogInfoString(pInfoString, + " Right : %d", + tile->bezel_info.left); + } + nvEvoLogInfoString(pInfoString, + " Vendor ID : 0x%x", + tile->tile_topology_id.vendor_id); + nvEvoLogInfoString(pInfoString, + " Product ID : 0x%x", + tile->tile_topology_id.product_id); + nvEvoLogInfoString(pInfoString, + " Serial Number : 0x%x", + tile->tile_topology_id.serial_number); + } + + for (k = 0; k < ARRAY_LEN(mode_type_table); k++) { + + int i; + + /* scan through the ModeList to find a mode of the current type */ + + for (i = 0; i < pParsedEdid->info.total_timings; i++) { + NVT_TIMING *pTiming = &pParsedEdid->info.timing[i]; + if (mode_type_table[k].type == + NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status)) { + break; + } + } + + /* if there are none of this type, skip to the next mode type */ + + if (i == pParsedEdid->info.total_timings) { + continue; + } + + nvEvoLogInfoString(pInfoString, ""); + nvEvoLogInfoString(pInfoString, "%s:", mode_type_table[k].name); + + for (i = 0; i < pParsedEdid->info.total_timings; i++) { + + NVT_TIMING *pTiming = &pParsedEdid->info.timing[i]; + NVT_TIMING_TYPE type = + NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status); + int vScale = 1; + + if (mode_type_table[k].type != type) { + continue; + } + + if ((type == NVT_TYPE_EDID_EST) || + (type == NVT_TYPE_EDID_STD)) { + + nvEvoLogInfoString(pInfoString, + " %-4d x %-4d @ %d Hz", + NV_NVT_TIMING_HVISIBLE(pTiming), + NV_NVT_TIMING_VVISIBLE(pTiming), + pTiming->etc.rr); + continue; + } + + if (pTiming->interlaced) { + vScale = 2; + } + + nvEvoLogInfoString(pInfoString, + " %-4d x %-4d @ %d Hz", + NV_NVT_TIMING_HVISIBLE(pTiming), + NV_NVT_TIMING_VVISIBLE(pTiming), + pTiming->etc.rr); + + nvEvoLogInfoString(pInfoString, + " Pixel Clock : " + NV_FMT_DIV_1000_POINT_2 " MHz", + NV_VA_DIV_1000_POINT_2(pTiming->pclk + * 10)); + + nvEvoLogInfoString(pInfoString, + " HRes, HSyncStart : %d, %d", + pTiming->HVisible, + pTiming->HVisible + + pTiming->HFrontPorch); + + nvEvoLogInfoString(pInfoString, + " HSyncEnd, HTotal : %d, %d", + pTiming->HVisible + + pTiming->HFrontPorch + + pTiming->HSyncWidth, + pTiming->HTotal); + + nvEvoLogInfoString(pInfoString, + " VRes, VSyncStart : %d, %d", + pTiming->VVisible * vScale, + (pTiming->VVisible + + pTiming->VFrontPorch) * vScale); + + nvEvoLogInfoString(pInfoString, + " VSyncEnd, VTotal : %d, %d", + (pTiming->VVisible + + pTiming->VFrontPorch + + pTiming->VSyncWidth) * vScale, + pTiming->VTotal * vScale); + + nvEvoLogInfoString(pInfoString, + " H/V Polarity : %s/%s", + (pTiming->HSyncPol == NVT_H_SYNC_NEGATIVE) ? + "-" : "+", + (pTiming->VSyncPol == NVT_V_SYNC_NEGATIVE) ? + "-" : "+"); + + if (pTiming->interlaced) { + nvEvoLogInfoString(pInfoString, + " Interlaced : yes"); + } + if (pTiming->etc.flag & NVT_FLAG_NV_DOUBLE_SCAN_TIMING) { + nvEvoLogInfoString(pInfoString, + " Double Scanned : yes"); + } + + if (type == NVT_TYPE_EDID_861ST) { + nvEvoLogInfoString(pInfoString, + " CEA Format : %d", + NVT_GET_CEA_FORMAT(pTiming->etc.status)); + } + + if (NV_NVT_TIMING_HAS_ASPECT_RATIO(pTiming)) { + nvEvoLogInfoString(pInfoString, + " Aspect Ratio : %d:%d", + NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming), + NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming)); + } + + if (NV_NVT_TIMING_HAS_IMAGE_SIZE(pTiming)) { + nvEvoLogInfoString(pInfoString, + " Image Size : %d mm x %d mm", + NV_NVT_TIMING_IMAGE_SIZE_WIDTH(pTiming), + NV_NVT_TIMING_IMAGE_SIZE_HEIGHT(pTiming)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.rgb444.bpcs)) { + nvEvoLogInfoString(pInfoString, + " RGB 444 bpcs : %s", + GetColorDepthBpc(pTiming->etc.rgb444)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv444.bpcs)) { + nvEvoLogInfoString(pInfoString, + " YUV 444 bpcs : %s", + GetColorDepthBpc(pTiming->etc.yuv444)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv422.bpcs)) { + nvEvoLogInfoString(pInfoString, + " YUV 422 bpcs : %s", + GetColorDepthBpc(pTiming->etc.yuv422)); + } + + if (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv420.bpcs)) { + nvEvoLogInfoString(pInfoString, + " YUV 420 bpcs : %s", + GetColorDepthBpc(pTiming->etc.yuv420)); + } + } // i + } // k + + nvEvoLogInfoString(pInfoString, ""); + + done: + nvEvoLogInfoString(pInfoString, + "--- End of EDID for %s ---", pDpyEvo->name); + nvEvoLogInfoString(pInfoString, ""); +} + + + +/* + * Clear the EDID and related fields in the display device data + * structure. + */ + +static void ClearEdid(NVDpyEvoPtr pDpyEvo) +{ + NVEdidRec edid = { }; + NVEvoInfoStringRec infoString; + nvInitInfoString(&infoString, NULL, 0); + + if (EdidHasChanged(pDpyEvo, &edid, NULL)) { + ApplyNewEdid(pDpyEvo, &edid, NULL, &infoString); + } +} + + + +/* + * ClearCustomEdid() - send an empty custom EDID to RM; this is to + * clear out any stale state in RM about custom EDIDs that we may have + * told RM about previous runs of X. + */ + +static void ClearCustomEdid(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *setEdidParams; + + if (nvDpyEvoIsDPMST(pDpyEvo)) { + // RM doesn't track this device, so leave the EDID alone. + return; + } + + setEdidParams = nvCalloc(sizeof(*setEdidParams), 1); + if (setEdidParams == NULL) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Unable to clear custom EDID for display device %s", + pDpyEvo->name); + return; + } + + setEdidParams->subDeviceInstance = pDispEvo->displayOwner; + setEdidParams->displayId = nvDpyEvoGetConnectorId(pDpyEvo); + setEdidParams->bufferSize = 0; + + /* ignore the NvRmControl() return value */ + + (void) nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2, + setEdidParams, sizeof(*setEdidParams)); + + nvFree(setEdidParams); +} // ClearCustomEdid() + + + +/* + * WriteEdidToResman() - send a custom EDID to RM. + */ + +static void WriteEdidToResman(const NVDpyEvoRec *pDpyEvo, + const NVEdidRec *pEdid) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *setEdidParams = NULL; + NvU32 status = NVOS_STATUS_ERROR_OPERATING_SYSTEM; + + if (pEdid->length > sizeof(setEdidParams->edidBuffer)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "EDID for display device %s is too long for NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2", + pDpyEvo->name); + goto done; + } + + setEdidParams = nvCalloc(sizeof(*setEdidParams), 1); + if (setEdidParams == NULL) { + goto done; + } + + setEdidParams->subDeviceInstance = pDispEvo->displayOwner; + setEdidParams->displayId = nvDpyEvoGetConnectorId(pDpyEvo); + nvkms_memcpy(&setEdidParams->edidBuffer, pEdid->buffer, pEdid->length); + setEdidParams->bufferSize = pEdid->length; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_EDID_V2, + setEdidParams, sizeof(*setEdidParams)); + +done: + if (status != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Failure processing EDID for display device " + "%s.", pDpyEvo->name); + } + + nvFree(setEdidParams); +} // WriteEdidToResman() + + +/* + * NvTiming_ParseEDIDInfo() will ignore some modes that are blatantly + * wrong, so we need to apply any patching to the EDID bytes before + * parsing the EDID. + */ +static void PrePatchEdid(const NVDpyEvoRec *pDpyEvo, NVEdidPtr pEdid, + NVEvoInfoStringPtr pInfoString) +{ + NvU8 *pEdidData = pEdid->buffer; + + if (pEdid->buffer == NULL || pEdid->length < 128) { + return; + } + + /* + * Work around bug 628240: some AUO flat panels have invalid + * native modes where HSyncEnd is larger than HTotal, putting the + * end of the sync pulse several columns into the active region of + * the next frame. AUO confirmed these corrected timings: + * + * "1366x768" 69.30 1366 1398 1422 1432 768 771 775 806 -hsync -vsync + */ + if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP && + pEdidData[0x36] == 0x26 && + pEdidData[0x37] == 0x1b && + pEdidData[0x38] == 0x56 && + pEdidData[0x39] == 0x47 && + pEdidData[0x3a] == 0x50 && + pEdidData[0x3b] == 0x00 && + pEdidData[0x3c] == 0x26 && + pEdidData[0x3d] == 0x30 && + pEdidData[0x3e] == 0x30 && + pEdidData[0x3f] == 0x20 && + pEdidData[0x40] == 0x34 && + pEdidData[0x41] == 0x00 && + pEdidData[0x42] == 0x58 && + pEdidData[0x43] == 0xc1 && + pEdidData[0x44] == 0x10 && + pEdidData[0x45] == 0x00 && + pEdidData[0x46] == 0x00 && + pEdidData[0x47] == 0x18 && + pEdidData[0x7f] == 0x2e) { + + pEdidData[0x36] = 0x12; + pEdidData[0x37] = 0x1b; + pEdidData[0x38] = 0x56; + pEdidData[0x39] = 0x42; + pEdidData[0x3a] = 0x50; + pEdidData[0x3b] = 0x00; + pEdidData[0x3c] = 0x26; + pEdidData[0x3d] = 0x30; + pEdidData[0x3e] = 0x20; + pEdidData[0x3f] = 0x18; + pEdidData[0x40] = 0x34; + pEdidData[0x41] = 0x00; + pEdidData[0x42] = 0x58; + pEdidData[0x43] = 0xc1; + pEdidData[0x44] = 0x10; + pEdidData[0x45] = 0x00; + pEdidData[0x46] = 0x00; + pEdidData[0x47] = 0x18; + pEdidData[0x7f] = 0x5f; + + nvEvoLogInfoString(pInfoString, "Fixed invalid mode for 1366x768"); + } +} + +/* + * CreateParsedEdidFromNVT_TIMING() - Puts modetiming data from RM into an EDID format + */ +static void CreateParsedEdidFromNVT_TIMING( + NVT_TIMING *pTimings, + NVParsedEdidEvoPtr pParsedEdid) +{ + nvkms_memset(pParsedEdid, 0, sizeof(*pParsedEdid)); + pParsedEdid->info.total_timings = 1; + nvkms_memcpy(&pParsedEdid->info.timing[0], pTimings, sizeof(*pTimings)); + pParsedEdid->info.timing[0].etc.status = NVT_STATUS_CUST; + pParsedEdid->info.u.feature_ver_1_4_digital.continuous_frequency = FALSE; + pParsedEdid->info.version = NVT_EDID_VER_1_4; + pParsedEdid->info.input.isDigital = TRUE; + pParsedEdid->limits.min_h_rate_hz = 1; + pParsedEdid->limits.min_v_rate_hzx1k = 1; + pParsedEdid->limits.max_h_rate_hz = NV_U32_MAX; + pParsedEdid->limits.max_v_rate_hzx1k = NV_U32_MAX; + pParsedEdid->valid = TRUE; +} + +/* + * PatchAndParseEdid() - use the nvtiming library to parse the EDID data. The + * EDID data provided in the 'pEdid' argument may be patched or modified. + */ + +static void PatchAndParseEdid( + const NVDpyEvoRec *pDpyEvo, + NVEdidPtr pEdid, + NVParsedEdidEvoPtr pParsedEdid, + NVEvoInfoStringPtr pInfoString) +{ + int i; + NVT_STATUS status; + NvU32 edidSize; + + if (pEdid->buffer == NULL || pEdid->length == 0) { + return; + } + + nvkms_memset(pParsedEdid, 0, sizeof(*pParsedEdid)); + + PrePatchEdid(pDpyEvo, pEdid, pInfoString); + + /* parse the majority of information from the EDID */ + + status = NvTiming_ParseEDIDInfo(pEdid->buffer, pEdid->length, + &pParsedEdid->info); + + if (status != NVT_STATUS_SUCCESS) { + return; + } + + /* interpret the frequency range limits from the EDID */ + + NvTiming_CalculateEDIDLimits(&pParsedEdid->info, &pParsedEdid->limits); + + /* get the user-friendly monitor name */ + + NvTiming_GetMonitorName(&pParsedEdid->info, + (NvU8 *) &pParsedEdid->monitorName); + nvAssert(pParsedEdid->monitorName[0] != '\0'); + + /* find the serial number string */ + + pParsedEdid->serialNumberString[0] = '\0'; + + for (i = 0; i < NVT_EDID_MAX_LONG_DISPLAY_DESCRIPTOR; i++) { + if (pParsedEdid->info.ldd[i].tag == NVT_EDID_DISPLAY_DESCRIPTOR_DPSN) { + nvkms_strncpy( + pParsedEdid->serialNumberString, + (const char *)pParsedEdid->info.ldd[i].u.serial_number.str, + sizeof(pParsedEdid->serialNumberString)); + pParsedEdid->serialNumberString[ + sizeof(pParsedEdid->serialNumberString) - 1] = '\0'; + break; + } + } + + + for (i = 0; i < pParsedEdid->info.total_timings; i++) { + NVT_TIMING *pTiming = &pParsedEdid->info.timing[i]; + + /* patch up RRx1k for 640x480@60Hz */ + + if (IsEdid640x480_60_NVT_TIMING(pTiming)) { + pTiming->etc.rrx1k = 59940; + } + + /* + * Invalidate modes that require pixel repetition (i.e., modes + * that don't support Pixel Repetition 0). See bug 1459376. + */ + + nvAssert(pTiming->etc.rep != 0); + + if ((pTiming->etc.rep & NVBIT(0)) == 0) { + pTiming->etc.status = 0; + } + } + + pParsedEdid->valid = TRUE; + + /* resize the EDID buffer, if necessary */ + + edidSize = NVT_EDID_ACTUAL_SIZE(&pParsedEdid->info); + + if (edidSize < pEdid->length) { + NvU8 *pEdidData = nvAlloc(edidSize); + + if (pEdidData != NULL) { + nvkms_memcpy(pEdidData, pEdid->buffer, edidSize); + + nvFree(pEdid->buffer); + + pEdid->buffer = pEdidData; + pEdid->length = edidSize; + } + } +} + + +/*! + * Assign NVDpyEvoRec::name. + * + * The name has the form: + * + * "edidName (typeName-N.dpAddress)" + * + * If edidName is unavailable, then it, and the parentheses are omitted: + * + * "typeName-N.dpAddress" + * "typeName-N" + * + * if dpAddress is unavailable, then the ".dpAddress" is omitted: + * + * "edidName (typeName-N)" + * "typeName-N" + */ +static void AssignDpyEvoName(NVDpyEvoPtr pDpyEvo) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + const char *edidName = ""; + const char *openParen = ""; + const char *closeParen = ""; + const char *dpAddress = ""; + const char *dpAddressSeparator = ""; + + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.monitorName[0] != '\0') { + edidName = pDpyEvo->parsedEdid.monitorName; + openParen = " ("; + closeParen = ")"; + } + + if (pDpyEvo->dp.addressString != NULL) { + dpAddress = pDpyEvo->dp.addressString; + dpAddressSeparator = "."; + } + + nvkms_snprintf(pDpyEvo->name, sizeof(pDpyEvo->name), + "%s%s%s%s%s%s", + edidName, + openParen, + pConnectorEvo->name, + dpAddressSeparator, + dpAddress, + closeParen); + + pDpyEvo->name[sizeof(pDpyEvo->name) - 1] = '\0'; +} + +enum NvKmsDpyAttributeDigitalSignalValue +nvGetDefaultDpyAttributeDigitalSignalValue(const NVConnectorEvoRec *pConnectorEvo) +{ + enum NvKmsDpyAttributeDigitalSignalValue signal = + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_LVDS; + + if (pConnectorEvo->legacyType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DISPLAYPORT; + } else { + nvAssert((pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) || + (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_DSI)); + + if (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM) { + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_LVDS; + } else if (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI) { + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_DSI; + } else { + // May be later changed to HDMI_FRL at modeset time. + signal = NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_TMDS; + } + } + } + + return signal; +} + +NVDpyEvoPtr nvAllocDpyEvo(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NVDpyId dpyId, const char *dpAddress) +{ + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = nvCalloc(1, sizeof(*pDpyEvo)); + + if (pDpyEvo == NULL) { + return NULL; + } + + pDpyEvo->pDispEvo = pDispEvo; + pDpyEvo->pConnectorEvo = pConnectorEvo; + pDpyEvo->head = NV_INVALID_HEAD; + pDpyEvo->id = dpyId; + + nvListAdd(&pDpyEvo->dpyListEntry, &pDispEvo->dpyList); + + if (dpAddress) { + pDpyEvo->dp.addressString = nvStrDup(dpAddress); + pDispEvo->displayPortMSTIds = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->displayPortMSTIds); + + if (!nvConnectorIsDPSerializer(pConnectorEvo)) { + pDispEvo->dynamicDpyIds = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->dynamicDpyIds); + } + } + + AssignDpyEvoName(pDpyEvo); + + nvDpyProbeMaxPixelClock(pDpyEvo); + + pDpyEvo->requestedDithering.state = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO; + pDpyEvo->requestedDithering.mode = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO; + pDpyEvo->requestedDithering.depth = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO; + + // Initialize DP link rate and lane count to sane values. + // This is normally done in nvDPLibUpdateDpyLinkConfiguration, + // but do it here as well in case we query flat panel properties for + // screenless DP devices. + if (nvConnectorUsesDPLib(pConnectorEvo)) { + pDpyEvo->dp.linkRate = 0; + pDpyEvo->dp.laneCount = NV0073_CTRL_CMD_DP_GET_LINK_CONFIG_LANE_COUNT_1; + pDpyEvo->dp.connectorType = NV_KMS_DPY_ATTRIBUTE_DISPLAYPORT_CONNECTOR_TYPE_UNKNOWN; + pDpyEvo->dp.sinkIsAudioCapable = FALSE; + } + + pDpyEvo->requestedColorSpace = + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB; + pDpyEvo->requestedColorRange = + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + + pDpyEvo->currentAttributes = NV_EVO_DEFAULT_ATTRIBUTES_SET; + pDpyEvo->currentAttributes.digitalSignal = + nvGetDefaultDpyAttributeDigitalSignalValue(pConnectorEvo); + + DpyGetStaticDfpProperties(pDpyEvo); + + return pDpyEvo; +} + + +void nvFreeDpyEvo(NVDispEvoPtr pDispEvo, NVDpyEvoPtr pDpyEvo) +{ + DpyDisconnectEvo(pDpyEvo); + + // Let the DP library host implementation handle deleting a pDpy as if the + // library had notified it of a lost device. + nvDPDpyFree(pDpyEvo); + nvAssert(!pDpyEvo->dp.pDpLibDevice); + + pDispEvo->validDisplays = + nvDpyIdListMinusDpyId(pDispEvo->validDisplays, pDpyEvo->id); + + pDispEvo->displayPortMSTIds = + nvDpyIdListMinusDpyId(pDispEvo->displayPortMSTIds, pDpyEvo->id); + pDispEvo->dynamicDpyIds = + nvDpyIdListMinusDpyId(pDispEvo->dynamicDpyIds, pDpyEvo->id); + + nvListDel(&pDpyEvo->dpyListEntry); + + nvFree(pDpyEvo->dp.addressString); + nvFree(pDpyEvo); +} + + +/*! + * Return the pConnectorEvo associated with the given (static) display ID. + * + * XXX[DP] not valid for DP monitors, the connector will be known before + * initialization so this will not be needed. + * + * \param[in] pDisp The pDisp on which to search for the pConnector. + * \param[in] dpyId The ID of the connector to search for. + * + * \return The pConnectorEvo from pDisp that matches the ID, or NULL if + * no connector is found. + */ +NVConnectorEvoPtr nvGetConnectorFromDisp(NVDispEvoPtr pDispEvo, NVDpyId dpyId) +{ + NVConnectorEvoPtr pConnectorEvo; + + nvAssert(nvDpyIdIsInDpyIdList(dpyId, pDispEvo->connectorIds)); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvDpyIdsAreEqual(dpyId, pConnectorEvo->displayId)) { + return pConnectorEvo; + } + } + + nvAssert(!"Failed to find pDpy's connector!"); + return NULL; +} + +/* + * Construct the DP 1.3 YUV420 infoframe, and toggle it on or off based on + * whether or not YUV420 mode is in use. + */ +static void UpdateDpInfoFrames(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + + if (pHeadState->attributes.colorSpace == + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420) { + + // DPSDP_DP_VSC_SDP_DESCRIPTOR has a (dataSize, hb, db) layout, while + // NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS.aPacket needs to contain + // (hb, db) without dataSize, so this makes sdp->hb align with aPacket. + DPSDP_DP_VSC_SDP_DESCRIPTOR *sdp = + (DPSDP_DP_VSC_SDP_DESCRIPTOR *)(params.aPacket - + offsetof(DPSDP_DP_VSC_SDP_DESCRIPTOR, hb)); + + nvAssert((void *)&sdp->hb == (void *)params.aPacket); + + // Header + // Per DP1.3 spec + sdp->hb.hb0 = 0; + sdp->hb.hb1 = SDP_PACKET_TYPE_VSC; + sdp->hb.revisionNumber = SDP_VSC_REVNUM_STEREO_PSR2_COLOR; + sdp->hb.numValidDataBytes = SDP_VSC_VALID_DATA_BYTES_PSR2_COLOR; + + sdp->db.stereoInterface = 0; + sdp->db.psrState = 0; + sdp->db.contentType = SDP_VSC_CONTENT_TYPE_GRAPHICS; + sdp->db.pixEncoding = SDP_VSC_PIX_ENC_YCBCR420; + sdp->db.colorimetryFormat = SDP_VSC_COLOR_FMT_YCBCR_COLORIMETRY_ITU_R_BT709; + + switch (pTimings->pixelDepth) { + case NVKMS_PIXEL_DEPTH_30_444: + sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_YCBCR_10BPC; + break; + case NVKMS_PIXEL_DEPTH_24_444: + sdp->db.bitDepth = SDP_VSC_BIT_DEPTH_YCBCR_8BPC; + break; + default: + nvAssert(!"Invalid pixelDepth value"); + break; + } + + switch (pHeadState->attributes.colorRange) { + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL: + sdp->db.dynamicRange = SDP_VSC_DYNAMIC_RANGE_VESA; + break; + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED: + sdp->db.dynamicRange = SDP_VSC_DYNAMIC_RANGE_CEA; + break; + default: + nvAssert(!"Invalid colorRange value"); + break; + } + + params.packetSize = sizeof(sdp->hb) + sdp->hb.numValidDataBytes; + + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ENABLE, _YES) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _OTHER_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _SINGLE_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ON_HBLANK, _DISABLE); + } else { + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ENABLE, _NO); + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET failed"); + } +} + +void nvUpdateInfoFrames(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pHeadState->activeDpys, pDispEvo); + + if (pDpyEvo == NULL) { + return; + } + + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + UpdateDpInfoFrames(pDispEvo, head); + } else { + nvUpdateHdmiInfoFrames(pDispEvo, + head, + &pHeadState->attributes, + nvEvoIsHDQualityVideoTimings(&pHeadState->timings), + &pHeadState->timings.infoFrameCtrl, + pDpyEvo); + } +} + +/*! + * nvDpyRequiresDualLinkEvo() - Returns whether or not the given mode exceeds + * the maximum single TMDS link pixel clock. + * + * \param[in] pDpyEvo display to check the maximum single link pixel clock + * + * \param[in] pTimings mode timings to check pixel clock + * + * \return TRUE if pixel clock exceeds display's maximum single link pixel + * clock + */ +NvBool nvDpyRequiresDualLinkEvo(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings) +{ + // Dual link HDMI is not possible. + nvAssert(!(nvDpyIsHdmiEvo(pDpyEvo) && + (pTimings->pixelClock > pDpyEvo->maxSingleLinkPixelClockKHz))); + return (pTimings->pixelClock > pDpyEvo->maxSingleLinkPixelClockKHz); +} + + +/*! + * Return EVO mode timings currently used with the given pDpyEvo. + * + * \param[in] pDpyEvo dpy whose mode timings should be returned. + * + * \return NULL if pDpyEvo is not active. Otherwise, return the mode + * timings programmed in the head that is driving the dpy. + */ +NVHwModeTimingsEvoPtr +nvGetCurrentModeTimingsForDpyEvo(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDispHeadStateEvoPtr pHeadState = NULL; + const NvU32 head = pDpyEvo->head; + + if (head == NV_INVALID_HEAD) { + return NULL; + } + + pHeadState = &pDispEvo->headState[head]; + + nvAssert(nvDpyIdIsInDpyIdList(pDpyEvo->id, pHeadState->activeDpys)); + + return &pHeadState->timings; +} + + +/*! + * Return the NVDpyEvoPtr that corresponds to the given dpyId, on the + * given NVDispEvoPtr, or NULL if no matching NVDpyEvoPtr can be + * found. + */ +NVDpyEvoPtr nvGetDpyEvoFromDispEvo(const NVDispEvoRec *pDispEvo, NVDpyId dpyId) +{ + NVDpyEvoPtr pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, nvAddDpyIdToEmptyDpyIdList(dpyId), pDispEvo) { + return pDpyEvo; + } + + return NULL; +} + +/* + * Find or create a pDpy with a given root connector and topology path. + */ +NVDpyEvoPtr nvGetDPMSTDpyEvo(NVConnectorEvoPtr pConnectorEvo, + const char *address, NvBool *pDynamicDpyCreated) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDpyEvoPtr pDpyEvo = NULL, pTmpDpyEvo; + NVDpyId dpyId; + + // Look for a pDpyEvo on pConnectorEvo whose dp address matches. + FOR_ALL_EVO_DPYS(pTmpDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pTmpDpyEvo->pConnectorEvo != pConnectorEvo) { + continue; + } + if (pTmpDpyEvo->dp.addressString == NULL) { + continue; + } + if (nvkms_strcmp(pTmpDpyEvo->dp.addressString, address) == 0) { + pDpyEvo = pTmpDpyEvo; + goto done; + } + } + + // Find a display ID that is not used on this GPU. + dpyId = nvNewDpyId(pDispEvo->validDisplays); + if (nvDpyIdIsInvalid(dpyId)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to allocate a display ID for device %s.%s", + pConnectorEvo->name, + address); + goto done; + } + + // Create a new pDpy for this address. + pDpyEvo = nvAllocDpyEvo(pDispEvo, pConnectorEvo, dpyId, address); + if (pDpyEvo == NULL) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to create a display device object for %s-%u.%s", + NvKmsConnectorTypeString(pConnectorEvo->type), + pConnectorEvo->typeIndex, + address); + goto done; + } + + pDispEvo->validDisplays = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->validDisplays); + + *pDynamicDpyCreated = TRUE; + +done: + return pDpyEvo; +} + +/*! + * Return a string with a comma-separated list of dpy names, for all + * dpys in dpyIdList. + * + * If there are no dpys in the dpyIdList, return "none". + * + * The string is dynamically allocated and should be freed by the caller. + * + * Return NULL if an allocation failure occurs. + */ +char *nvGetDpyIdListStringEvo(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList) +{ + NVDpyEvoPtr pDpyEvo; + char *listString = NULL; + NvU32 lengths[NV_DPY_ID_MAX_DPYS_IN_LIST]; + NvU32 totalLength = 0; + NvU32 currentOffset; + NvU32 index; + + index = 0; + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + + nvAssert(index < ARRAY_LEN(lengths)); + + lengths[index] = nvkms_strlen(pDpyEvo->name); + + totalLength += lengths[index]; + + if (index != 0) { + totalLength += 2; /* nvkms_strlen(", ") */ + } + + index++; + } + + totalLength += 1; /* for nul terminator */ + + if (index == 0) { + return nvStrDup("none"); + } + + listString = nvAlloc(totalLength); + + if (listString == NULL) { + return NULL; + } + + index = 0; + currentOffset = 0; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyIdList, pDispEvo) { + + if (index != 0) { + listString[currentOffset] = ','; + listString[currentOffset+1] = ' '; + currentOffset += 2; + } + + nvkms_memcpy(listString + currentOffset, pDpyEvo->name, lengths[index]); + + currentOffset += lengths[index]; + + index++; + } + + listString[currentOffset] = '\0'; + currentOffset += 1; + + nvAssert(currentOffset == totalLength); + + return listString; +} + +NvBool nvDpyGetDynamicData( + NVDpyEvoPtr pDpyEvo, + struct NvKmsQueryDpyDynamicDataParams *pParams) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + const struct NvKmsQueryDpyDynamicDataRequest *pRequest = &pParams->request; + struct NvKmsQueryDpyDynamicDataReply *pReply = &pParams->reply; + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NVDpyIdList connectedList; + NVDpyIdList oneDpyIdList = nvAddDpyIdToEmptyDpyIdList(pDpyEvo->id); + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + /* + * Check for the connection state of the dpy. + * + * For DP MST, we need to honor the current DPlib state; if a DP + * MST monitor is physically connected but forceDisconnected, its + * hotplug events won't get serviced and DPlib will complain + * loudly. This doesn't apply to DP serializer (which is not managed + * by DPLib) since we don't need to do any topology/branch detection, + * and we can honor force{Connected,Disconnected} in MST & SST mode. + * + * Otherwise, allow the client to override detection. + * + * Otherwise, honor the current DPlib state. + * + * If we're using a DP serializer connector in MST mode, don't expose any + * SST displays as connected. In all other cases, assume that everything + * is connected since the serializer connector has a fixed topology. + * + * Lastly, call RM to check if the dpy is connected. + */ + + if (nvDpyEvoIsDPMST(pDpyEvo) && + nvConnectorUsesDPLib(pConnectorEvo)) { + /* honor DP MST connectedness */ + connectedList = nvDPLibDpyIsConnected(pDpyEvo) ? + oneDpyIdList : nvEmptyDpyIdList(); + } else if (pRequest->forceConnected) { + connectedList = oneDpyIdList; + } else if (pRequest->forceDisconnected) { + connectedList = nvEmptyDpyIdList(); + } else if (nvConnectorUsesDPLib(pConnectorEvo)) { + connectedList = nvDPLibDpyIsConnected(pDpyEvo) ? + oneDpyIdList : nvEmptyDpyIdList(); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + if (pConnectorEvo->dpSerializerCaps.supportsMST && + !nvDpyEvoIsDPMST(pDpyEvo)) { + connectedList = nvEmptyDpyIdList(); + } else { + connectedList = oneDpyIdList; + } + } else { + connectedList = nvRmGetConnectedDpys(pDispEvo, oneDpyIdList); + } + + pDpyEvo->dp.inbandStereoSignaling = pRequest->dpInbandStereoSignaling; + + /* + * XXX NVKMS TODO: once NVKMS is in the kernel and + * nvAllocCoreChannelEvo() is guaranteed to happen before + * nvDpyGetDynamicData(), pass allowDVISpecPClkOverride through to + * nvDpyProbeMaxPixelClock() rather than cache it. + */ + pDpyEvo->allowDVISpecPClkOverride = pRequest->allowDVISpecPClkOverride; + + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, connectedList)) { + if (!DpyConnectEvo(pDpyEvo, pParams)) { + return FALSE; + } + } else { + DpyDisconnectEvo(pDpyEvo); + } + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPLibUpdateDpyLinkConfiguration(pDpyEvo); + } + + ct_assert(sizeof(pDpyEvo->name) == sizeof(pReply->name)); + + nvkms_memcpy(pReply->name, pDpyEvo->name, sizeof(pDpyEvo->name)); + + if (pDpyEvo->parsedEdid.valid) { + pReply->physicalDimensions.heightInCM = + pDpyEvo->parsedEdid.info.screen_size_y; + pReply->physicalDimensions.widthInCM = + pDpyEvo->parsedEdid.info.screen_size_x; + } + + /* + * XXX NVKMS until NVKMS is in the kernel and + * nvAllocCoreChannelEvo() is guaranteed to happen before + * nvDpyGetDynamicData(), pDpyEvo->maxPixelClockKHz could change + * later after the assignment here. + */ + pReply->maxPixelClockKHz = pDpyEvo->maxPixelClockKHz; + + pReply->connected = + nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->connectedDisplays); + + pReply->isVirtualRealityHeadMountedDisplay = pDpyEvo->isVrHmd; + + pReply->vrrType = pDpyEvo->vrr.type; + + pReply->stereo3DVision.supported = pDpyEvo->stereo3DVision.supported; + pReply->stereo3DVision.isDLP = pDpyEvo->stereo3DVision.isDLP; + pReply->stereo3DVision.isAegis = pDpyEvo->stereo3DVision.isAegis; + pReply->stereo3DVision.subType = pDpyEvo->stereo3DVision.subType; + + pReply->dp.guid.valid = pDpyEvo->dp.guid.valid; + + ct_assert(sizeof(pReply->dp.guid.buffer) == + sizeof(pDpyEvo->dp.guid.buffer)); + nvkms_memcpy(pReply->dp.guid.buffer, pDpyEvo->dp.guid.buffer, + sizeof(pDpyEvo->dp.guid.buffer)); + + ct_assert(sizeof(pReply->dp.guid.str) == sizeof(pDpyEvo->dp.guid.str)); + nvkms_memcpy(pReply->dp.guid.str, pDpyEvo->dp.guid.str, + sizeof(pDpyEvo->dp.guid.str)); + + if (pDpyEvo->edid.length > sizeof(pReply->edid.buffer)) { + nvAssert(!"EDID larger than can be returned in NVKMS API"); + return FALSE; + } + + if (pDpyEvo->edid.length > 0) { + pReply->edid.bufferSize = pDpyEvo->edid.length; + nvkms_memcpy(pReply->edid.buffer, pDpyEvo->edid.buffer, pDpyEvo->edid.length); + } + + return TRUE; +} + +void nvDpyUpdateCurrentAttributes(NVDpyEvoRec *pDpyEvo) +{ + NVAttributesSetEvoRec newAttributes = pDpyEvo->currentAttributes; + + if (pDpyEvo->head != NV_INVALID_HEAD) { + newAttributes = + pDpyEvo->pDispEvo->headState[pDpyEvo->head].attributes; + } else { + newAttributes.dithering.enabled = FALSE; + newAttributes.dithering.depth = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE; + newAttributes.dithering.mode = NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE; + newAttributes.digitalSignal = + nvGetDefaultDpyAttributeDigitalSignalValue(pDpyEvo->pConnectorEvo); + } + + if (newAttributes.colorSpace != + pDpyEvo->currentAttributes.colorSpace) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE, + newAttributes.colorSpace); + } + + if (newAttributes.colorRange != + pDpyEvo->currentAttributes.colorRange) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_RANGE, + newAttributes.colorRange); + } + + if (newAttributes.dithering.enabled != + pDpyEvo->currentAttributes.dithering.enabled) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING, + newAttributes.dithering.enabled); + } + + if (newAttributes.dithering.depth != + pDpyEvo->currentAttributes.dithering.depth) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH, + newAttributes.dithering.depth); + } + + if (newAttributes.dithering.mode != + pDpyEvo->currentAttributes.dithering.mode) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE, + newAttributes.dithering.mode); + } + + if (newAttributes.imageSharpening.available != + pDpyEvo->currentAttributes.imageSharpening.available) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_IMAGE_SHARPENING_AVAILABLE, + newAttributes.imageSharpening.available); + } + + if (newAttributes.digitalSignal != + pDpyEvo->currentAttributes.digitalSignal) { + nvSendDpyAttributeChangedEventEvo( + pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL, + newAttributes.digitalSignal); + } + + pDpyEvo->currentAttributes = newAttributes; +} + +// Returns TRUE if this display is capable of Adaptive-Sync +NvBool nvDpyIsAdaptiveSync(const NVDpyEvoRec *pDpyEvo) +{ + return FALSE; +} + +// Returns TRUE if this display is in the Adaptive-Sync defaultlist +NvBool nvDpyIsAdaptiveSyncDefaultlisted(const NVParsedEdidEvoRec *pParsedEdid) +{ + return FALSE; +} + diff --git a/src/nvidia-modeset/src/nvkms-event.c b/src/nvidia-modeset/src/nvkms-event.c new file mode 100644 index 000000000..a2285babc --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-event.c @@ -0,0 +1,207 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvos.h" +#include "dp/nvdp-connector.h" +#include "nvkms-event.h" +#include "nvkms-rm.h" +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" +#include "nvkms-private.h" +#include "nvkms-evo.h" + +/* + * Handle a display device hotplug event. + * + * What "hotplug" means is unclear, but it could mean any of the following: + * - A display device is plugged in. + * - A display device is unlugged. + * - A display device was unplugged and then plugged back in. + * - A display device was plugged in and then unplugged. + * - An already connected display device is turned on. + * - An already connected display device is turned off. + * - A DisplayPort device needs its link status and RX Capabilities fields + * read and may need to be retrained ("long" hotplug event, > 2ms). + * + * DisplayPort "short" hotplug events, which are between 0.25ms and 2ms, are + * handled separately by nvHandleDPIRQEventDeferredWork below. + */ + +void +nvHandleHotplugEventDeferredWork(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS hotplugParams = { 0 }; + NvU32 ret; + NVDpyIdList hotplugged, unplugged, tmpUnplugged, changed; + NVDpyIdList connectedDisplays; + NVDpyEvoPtr pDpyEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + // Get the hotplug state. + hotplugParams.subDeviceInstance = pDispEvo->displayOwner; + + if ((ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE, + &hotplugParams, + sizeof(hotplugParams))) + != NVOS_STATUS_SUCCESS) { + + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, "Failed to determine which " + "devices were hotplugged: 0x%x\n", ret); + return; + } + + /* + * Work around an RM bug in hotplug notification when the GPU is in + * GC6. In this case, the RM will notify us of a hotplug event, but + * NV0073_CTRL_CMD_SYSTEM_GET_HOTPLUG_UNPLUG_STATE returns both + * hotPlugMask and hotUnplugMask as 0. + * Bug 200528641 tracks finding a root cause. Until that bug is + * fixed, call NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE to get the + * full list of connected dpys and construct hotplugged and + * unplugged lists from that if we encounter this case. + */ + if ((hotplugParams.hotPlugMask == 0) && + (hotplugParams.hotUnplugMask == 0)) { + const NVDpyIdList updatedDisplayList = nvRmGetConnectedDpys(pDispEvo, + pDispEvo->connectorIds); + hotplugged = nvDpyIdListMinusDpyIdList(updatedDisplayList, + pDispEvo->connectedDisplays); + unplugged = nvDpyIdListMinusDpyIdList(pDispEvo->connectedDisplays, + updatedDisplayList); + } else { + hotplugged = nvNvU32ToDpyIdList(hotplugParams.hotPlugMask); + unplugged = nvNvU32ToDpyIdList(hotplugParams.hotUnplugMask); + } + + // The RM only reports the latest plug/unplug status of each dpy. + nvAssert(nvDpyIdListIsEmpty(nvIntersectDpyIdListAndDpyIdList(hotplugged, + unplugged))); + nvAssert(nvDpyIdListIsASubSetofDpyIdList(hotplugged, + pDispEvo->connectorIds)); + nvAssert(nvDpyIdListIsASubSetofDpyIdList(unplugged, + pDispEvo->connectorIds)); + + connectedDisplays = pDispEvo->connectedDisplays; + + // Ignore non-DP devices that were reported as unplugged while already + // disconnected. + tmpUnplugged = nvEmptyDpyIdList(); + FOR_ALL_EVO_DPYS(pDpyEvo, unplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo) || + nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedDisplays)) { + + tmpUnplugged = + nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, tmpUnplugged); + } + } + unplugged = tmpUnplugged; + + // Non-DP devices that were disconnected and connected again should generate an + // unplug / plug pair. + FOR_ALL_EVO_DPYS(pDpyEvo, hotplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (!nvConnectorUsesDPLib(pConnectorEvo) && + nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedDisplays)) { + + unplugged = nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, unplugged); + } + } + +#if defined(DEBUG) + if (!nvDpyIdListIsEmpty(hotplugged)) { + char *str = nvGetDpyIdListStringEvo(pDispEvo, hotplugged); + nvEvoLogDispDebug(pDispEvo, EVO_LOG_INFO, + "Received display hotplug event: %s", + nvSafeString(str, "unknown")); + nvFree(str); + } + if (!nvDpyIdListIsEmpty(unplugged)) { + char *str = nvGetDpyIdListStringEvo(pDispEvo, unplugged); + nvEvoLogDispDebug(pDispEvo, EVO_LOG_INFO, + "Received display unplug event: %s", + nvSafeString(str, "unknown")); + nvFree(str); + } +#endif /* DEBUG */ + + // First, the OR configuration of the connector should not change, but + // re-query it to make sure. + changed = nvAddDpyIdListToDpyIdList(hotplugged, unplugged); + FOR_ALL_EVO_DPYS(pDpyEvo, changed, pDispEvo) { + nvRmGetConnectorORInfo(pDpyEvo->pConnectorEvo, TRUE); + } + + // Next, disconnect devices that are in the unplug mask. + FOR_ALL_EVO_DPYS(pDpyEvo, unplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPNotifyLongPulse(pConnectorEvo, FALSE); + } else { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } + } + + // Finally, connect devices that are in the plug mask. + FOR_ALL_EVO_DPYS(pDpyEvo, hotplugged, pDispEvo) { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPNotifyLongPulse(pConnectorEvo, TRUE); + } else { + nvSendDpyEventEvo(pDpyEvo, NVKMS_EVENT_TYPE_DPY_CHANGED); + } + } +} + +void +nvHandleDPIRQEventDeferredWork(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + + // XXX[AGP]: ReceiveDPIRQEvent throws away the DisplayID of the device that + // caused the event, so for now we have to poll all of the connected DP + // devices to see which ones need attention. When RM is fixed, this can be + // improved. + + NVConnectorEvoPtr pConnectorEvo; + + // Notify all connectors which are using DP lib. For DP Serializer connector, + // HPD_IRQ indicates loss of clock/sync, so re-train the link. + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPNotifyShortPulse(pConnectorEvo->pDpLibConnector); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + nvDPSerializerHandleDPIRQ(pDispEvo, pConnectorEvo); + } + } +} diff --git a/src/nvidia-modeset/src/nvkms-evo.c b/src/nvidia-modeset/src/nvkms-evo.c new file mode 100644 index 000000000..055d5c431 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-evo.c @@ -0,0 +1,7352 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" + +#include "nvkms-evo-states.h" +#include "dp/nvdp-connector.h" +#include "nvkms-console-restore.h" +#include "nvkms-rm.h" +#include "nvkms-dpy.h" +#include "nvkms-cursor.h" +#include "nvkms-hal.h" +#include "nvkms-hdmi.h" +#include "nvkms-modepool.h" +#include "nvkms-evo.h" +#include "nvkms-flip.h" +#include "nvkms-dma.h" +#include "nvkms-framelock.h" +#include "nvkms-utils.h" +#include "nvkms-lut.h" +#include "nvkms-modeset.h" +#include "nvkms-prealloc.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" +#include "nvkms-vrr.h" +#include "nvkms-ioctl.h" + +#include "nvctassert.h" + +#include // NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS +#include // NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH +#include // NV0080_CTRL_CMD_GPU_* +#include // NV0080_CTRL_OS_UNIX_VT_SWITCH_* +#include // NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_* +#include // NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS +#include // NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2 +#include // NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE +#include // NV0073_CTRL_DP_CTRL + +#include "nvkms.h" +#include "nvkms-private.h" +#include "nvos.h" + +#include "displayport/dpcd.h" + +#define EVO_RASTER_LOCK 1 +#define EVO_FLIP_LOCK 2 + +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_HEAD 7:0 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT 8:8 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT_DISABLE 0 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_BASE_LUT_ENABLE 1 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT 9:9 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT_DISABLE 0 +#define NVUPDATE_LUT_TIMER_NVKMS_DATAU32_OUTPUT_LUT_ENABLE 1 + +/* + * This struct is used to describe a single set of GPUs to lock together by + * GetRasterLockTopologies(). + * It is initialized to pDispEvoOrder[i] == NULL, and when filled in NULL is + * used as a terminator. + */ +typedef struct { + NVDispEvoPtr pDispEvoOrder[NVKMS_MAX_SUBDEVICES]; +} RasterLockTopology; + + +static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, NvU32 head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState); +static void GetRasterLockPin(NVDispEvoPtr pDispEvo0, NvU32 head0, + NVDispEvoPtr pDispEvo1, NvU32 head1, + NVEvoLockPin *serverPin, NVEvoLockPin *clientPin); +static NvBool EvoWaitForLock(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, NvU32 type); +static void EvoUpdateHeadParams(const NVDispEvoRec *pDispEvo, NvU32 head, + NVEvoUpdateState *updateState); + +static void SetRefClk(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, NvBool external, + NVEvoUpdateState *updateState); +static void UnlockRasterLockGroup(NVDevEvoPtr pDevEvo); +static NvBool ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action); +static void FinishModesetOneTopology(RasterLockTopology *topo); + +static void SyncEvoLockState(void); +static void UpdateEvoLockState(void); + +static void ScheduleLutUpdate(NVDispEvoRec *pDispEvo, + const NvU32 head, const NvU32 data, + const NvU64 usec); + +NVEvoGlobal nvEvoGlobal = { + .clientHandle = 0, + .frameLockList = NV_LIST_INIT(&nvEvoGlobal.frameLockList), + .devList = NV_LIST_INIT(&nvEvoGlobal.devList), +#if defined(DEBUG) + .debugMemoryAllocationList = + NV_LIST_INIT(&nvEvoGlobal.debugMemoryAllocationList), +#endif /* DEBUG */ +}; + +/* + * The dummy infoString should be used in paths that take an + * NVEvoInfoStringPtr where we don't need to log to a + * string. By setting the 's' field to NULL, nothing will be printed + * to the infoString buffer. + */ +NVEvoInfoStringRec dummyInfoString = { + .length = 0, + .totalLength = 0, + .s = NULL, +}; + +/*! + * Return the NVDevEvoPtr, if any, that matches deviceId. + */ +NVDevEvoPtr nvFindDevEvoByDeviceId(NvU32 deviceId) +{ + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + if (pDevEvo->usesTegraDevice && + (deviceId == NVKMS_DEVICE_ID_TEGRA)) { + return pDevEvo; + } else if (pDevEvo->deviceId == deviceId) { + return pDevEvo; + } + }; + + return NULL; +} + +/*! + * Find the first unused gpuLogIndex. + */ +NvU8 nvGetGpuLogIndex(void) +{ + NVDevEvoPtr pDevEvo; + NvU8 gpuLogIndex = 0; + + tryAgain: + FOR_ALL_EVO_DEVS(pDevEvo) { + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pDevEvo->pSubDevices[sd] == NULL) { + continue; + } + if (gpuLogIndex == pDevEvo->pSubDevices[sd]->gpuLogIndex) { + gpuLogIndex++; + if (gpuLogIndex == 0xFF) { + nvAssert(!"Too many GPUs"); + return NV_INVALID_GPU_LOG_INDEX; + } + goto tryAgain; + } + } + } + + return gpuLogIndex; +} + +/*! + * Return whether there are active heads on this pDispEvo. + */ +static NvBool HasActiveHeads(NVDispEvoPtr pDispEvo) +{ + return nvGetActiveHeadMask(pDispEvo) != 0; +} + +static void BlankHeadEvo(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + struct NvKmsCompositionParams emptyCursorCompParams = { }; + + /* + * If core channel surface is supported, ->SetSurface() + * disables Lut along with core channel surface. Otherwise need to disable + * Lut explicitly. + */ + if (!pDevEvo->hal->caps.supportsCoreChannelSurface) { + pDevEvo->hal->SetLUTContextDma(pDispEvo, + head, + NULL /* pSurfEvo */, + FALSE /* baseLutEnabled */, + FALSE /* outputLutEnabled */, + updateState, + pHeadState->bypassComposition); + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + + pDevEvo->hal->SetCursorImage(pDevEvo, + head, + NULL /* pSurfaceEvo */, + updateState, + &emptyCursorCompParams); + + { + NVFlipChannelEvoHwState hwState = { { 0 } }; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + pDevEvo->hal->Flip(pDevEvo, + pDevEvo->head[head].layer[layer], + &hwState, + updateState, + FALSE /* bypassComposition */); + } + } + + nvPopEvoSubDevMask(pDevEvo); +} + +void nvEvoDetachConnector(NVConnectorEvoRec *pConnectorEvo, const NvU32 head, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 orIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + nvAssert(orIndex != NV_INVALID_OR); + nvAssert(pConnectorEvo->or.ownerHeadMask[orIndex] & NVBIT(head)); + + pConnectorEvo->or.ownerHeadMask[orIndex] &= ~NVBIT(head); + + /* Disable the palette, cursor, and ISO ctxDma on this head. */ + BlankHeadEvo(pDispEvo, head, updateState); + + // Only tear down the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + pDevEvo->hal->ORSetControl(pDevEvo, + pConnectorEvo, + pTimings->protocol, + orIndex, + pConnectorEvo->or.ownerHeadMask[orIndex], + updateState); + + /* + * Tell RM that there is no DisplayID is associated with this head anymore. + */ + pDevEvo->hal->HeadSetDisplayId(pDevEvo, head, 0x0, updateState); + + nvPopEvoSubDevMask(pDevEvo); + + pModesetUpdateState->connectorIds = + nvAddDpyIdToDpyIdList(pHeadState->pConnectorEvo->displayId, + pModesetUpdateState->connectorIds); +} + +void nvEvoAttachConnector(NVConnectorEvoRec *pConnectorEvo, + const NvU32 head, + NVDPLibModesetStatePtr pDpLibModesetState, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 orIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + NvU32 i; + + nvAssert(orIndex != NV_INVALID_OR); + nvAssert(!(pConnectorEvo->or.ownerHeadMask[orIndex] & NVBIT(head))); + nvAssert(pHeadState->activeRmId != 0); + + FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.ownerHeadMask[orIndex]) { + nvAssert(pTimings->protocol == + pDispEvo->headState[i].timings.protocol); + } FOR_EACH_INDEX_IN_MASK_END; + + pConnectorEvo->or.ownerHeadMask[orIndex] |= NVBIT(head); + + // Only set up the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + pDevEvo->hal->ORSetControl(pDevEvo, + pConnectorEvo, + pTimings->protocol, + orIndex, + pConnectorEvo->or.ownerHeadMask[orIndex], + updateState); + + + /* Tell RM which DisplayID is associated with the head. */ + pDevEvo->hal->HeadSetDisplayId(pDevEvo, + head, pHeadState->activeRmId, + updateState); + + nvPopEvoSubDevMask(pDevEvo); + + pModesetUpdateState->connectorIds = + nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, + pModesetUpdateState->connectorIds); + pModesetUpdateState->pDpLibModesetState[head] = pDpLibModesetState; +} + +void nvSetViewPortPointInEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NvU16 x, + NvU16 y, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[pDispEvo->displayOwner].headState[head]; + + pSdHeadState->viewPortPointIn.x = x; + pSdHeadState->viewPortPointIn.y = y; + + EvoSetViewportPointIn(pDispEvo, head, x, y, updateState); +} + +// +// Sets the Update method which makes all the other methods in the PB to take effect. +// +static void EvoUpdateAndKickOffWithNotifier( + const NVDispEvoRec *pDispEvo, + NvBool notify, + NvBool sync, int notifier, + NVEvoUpdateState *updateState, + NvBool releaseElv) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + // Calling code should reject operations that send updates while the console + // is active. + nvAssert(!pDevEvo->coreInitMethodsPending); + + // It doesn't make sense to request sync without requesting a notifier. + nvAssert(!sync || notify); + + if (notify) { + // Clear the completion notifier. + pDevEvo->hal->InitCompNotifier(pDispEvo, notifier); + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetNotifier(pDevEvo, notify, sync, notifier, + updateState); + pDevEvo->hal->Update(pDevEvo, updateState, releaseElv); + nvPopEvoSubDevMask(pDevEvo); + + // Wait for completion. + if (sync) { + pDevEvo->hal->WaitForCompNotifier(pDispEvo, notifier); + } + + if (notify) { + const NVDispEvoRec *pDispEvoTmp; + NVEvoUpdateState coreUpdateState = { }; + NvU32 sd; + + // To work around HW bug 1945716 and to prevent subsequent core updates + // from triggering unwanted notifier writes, set the core channel + // completion notifier control and context DMA disables when + // notification is not requested. + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetNotifier(pDevEvo, + FALSE /* notify */, + FALSE /* awaken */, + 0 /* notifier */, + &coreUpdateState); + nvPopEvoSubDevMask(pDevEvo); + + // SetCoreNotifier is only expected to push core channel methods. + FOR_ALL_EVO_DISPLAYS(pDispEvoTmp, sd, pDevEvo) { + if (pDispEvoTmp == pDispEvo) { + nvAssert(coreUpdateState.subdev[sd].channelMask == + DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE)); + } else { + nvAssert(coreUpdateState.subdev[sd].channelMask == 0x0); + } + } + + // We don't really need to kick off here, but might as well to keep the + // state cache up to date. Note that we intentionally don't use + // pDevEvo->hal->Update since we don't want another Update. + nvDmaKickoffEvo(pDevEvo->core); + } + + return; +} + +void nvEvoUpdateAndKickOff(const NVDispEvoRec *pDispEvo, NvBool sync, + NVEvoUpdateState *updateState, NvBool releaseElv) +{ + EvoUpdateAndKickOffWithNotifier(pDispEvo, sync, sync, 0, updateState, + releaseElv); +} + +void nvDoIMPUpdateEvo(NVDispEvoPtr pDispEvo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + // IMP pre-modeset + pDevEvo->hal->PrePostIMP(pDispEvo, TRUE /* isPre */); + + // Do the update + nvEvoUpdateAndKickOff(pDispEvo, TRUE, updateState, TRUE /* releaseElv */); + + // IMP post-modeset + pDevEvo->hal->PrePostIMP(pDispEvo, FALSE /* isPre */); +} + +/*! + * Tell RM not to expect anything other than a stall lock change during the next + * update. + */ +void nvEvoArmLightweightSupervisor(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvBool isVrr, + NvBool enable) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS params = { }; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + nvAssert(!pTimings->interlaced && !pTimings->doubleScan); + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.bArmLWSV = enable; + params.bVrrState = isVrr; + params.vActive = nvEvoVisibleHeight(pTimings); + params.vfp = pTimings->rasterSize.y - + pTimings->rasterBlankStart.y; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR, + ¶ms, sizeof(params)) + != NVOS_STATUS_SUCCESS) { + nvAssert(!"ARM_LIGHTWEIGHT_SUPERVISOR failed"); + } +} + +/* + * Convert from NVHwModeTimingsEvoPtr to NvModeTimingsPtr. + * + * Note that converting from NvModeTimingsPtr to + * NVHwModeTimingsEvoPtr (via + * ConstructHwModeTimingsFromNvModeTimings()) and converting back from + * NVHwModeTimingsEvoPtr to NvModeTimingsPtr (via + * nvConstructNvModeTimingsFromHwModeTimings()) can lose precision in + * the case of interlaced modes due to the division by 2. This + * function should only be used for reporting purposes. + */ + +void +nvConstructNvModeTimingsFromHwModeTimings(const NVHwModeTimingsEvo *pTimings, + NvModeTimingsPtr pModeTimings) +{ + NvU32 rasterBlankEndY, rasterSyncEndY; + + if (!pTimings || !pModeTimings) { + nvAssert(!"Null params"); + return; + } + + pModeTimings->pixelClockHz = KHzToHz(pTimings->pixelClock); + pModeTimings->hVisible = nvEvoVisibleWidth(pTimings); + pModeTimings->hSyncStart = pTimings->rasterSize.x - + pTimings->rasterBlankEnd.x - 1; + pModeTimings->hSyncEnd = pTimings->rasterSize.x - + pTimings->rasterBlankEnd.x + + pTimings->rasterSyncEnd.x; + pModeTimings->hTotal = pTimings->rasterSize.x; + pModeTimings->vVisible = nvEvoVisibleHeight(pTimings); + rasterBlankEndY = pTimings->rasterBlankEnd.y + 1; + rasterSyncEndY = pTimings->rasterSyncEnd.y + 1; + + if (pTimings->interlaced) { + rasterBlankEndY *= 2; + rasterSyncEndY *= 2; + } + + /* + * The real pixel clock and width values for modes using YUV 420 emulation + * are half of the incoming values parsed from the EDID. This conversion is + * performed here, so NvModeTimings will have the user-visible (full width) + * values, and NVHwModeTimingsEvo will have the real (half width) values. + */ + if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) { + pModeTimings->pixelClockHz *= 2; + pModeTimings->hVisible *= 2; + pModeTimings->hSyncStart *= 2; + pModeTimings->hSyncEnd *= 2; + pModeTimings->hTotal *= 2; + } + + pModeTimings->vSyncStart = pTimings->rasterSize.y - rasterBlankEndY; + pModeTimings->vSyncEnd = pTimings->rasterSize.y - rasterBlankEndY + + rasterSyncEndY; + pModeTimings->vTotal = pTimings->rasterSize.y; + pModeTimings->interlaced = pTimings->interlaced; + pModeTimings->doubleScan = pTimings->doubleScan; + pModeTimings->hSyncNeg = pTimings->hSyncPol; + pModeTimings->hSyncPos = !pTimings->hSyncPol; + pModeTimings->vSyncNeg = pTimings->vSyncPol; + pModeTimings->vSyncPos = !pTimings->vSyncPol; + pModeTimings->RRx1k = (pModeTimings->pixelClockHz / + (pModeTimings->hTotal * + pModeTimings->vTotal)); + + if (pModeTimings->doubleScan) { + pModeTimings->vVisible /= 2; + pModeTimings->vSyncStart /= 2; + pModeTimings->vSyncEnd /= 2; + pModeTimings->vTotal /= 2; + } + + pModeTimings->hdmi3D = pTimings->hdmi3D; + pModeTimings->yuv420Mode = pTimings->yuv420Mode; +} + + + +/* + * Tweak pTimings to be compatible with gsync. + */ + +static void TweakTimingsForGsync(const NVDpyEvoRec *pDpyEvo, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString, + const enum NvKmsStereoMode stereo) +{ + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PARAMS gsyncOptTimingParams = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NvModeTimings modeTimings; + NvU32 ret; + + /* + * if 3D Vision Stereo is enabled, do not actually + * tweak the modetimings; WAR for bug 692266 + */ + + if (nvIs3DVisionStereoEvo(stereo)) { + + nvEvoLogInfoString(pInfoString, + "Not adjusting mode timings of %s for Quadro Sync " + "compatibility since 3D Vision Stereo is enabled.", + pDpyEvo->name); + return; + } + + gsyncOptTimingParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + + if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + + gsyncOptTimingParams.output = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_SOR; + gsyncOptTimingParams.adjust = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_DFP; + + } else if (pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + + gsyncOptTimingParams.output = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_OUTPUT_DAC; + gsyncOptTimingParams.adjust = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_ADJUST_DEFAULT_CRT; + } + + gsyncOptTimingParams.pixelClockHz = KHzToHz(pTimings->pixelClock); + + if (pTimings->interlaced) { + gsyncOptTimingParams.structure = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED; + } else { + gsyncOptTimingParams.structure = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_PROGRESSIVE; + } + + gsyncOptTimingParams.hDeltaStep = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_STEP_USE_DEFAULTS; + gsyncOptTimingParams.vDeltaStep = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_STEP_USE_DEFAULTS; + gsyncOptTimingParams.hDeltaMax = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_H_DELTA_MAX_USE_DEFAULTS; + gsyncOptTimingParams.vDeltaMax = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_V_DELTA_MAX_USE_DEFAULTS; + + gsyncOptTimingParams.hSyncEnd = pTimings->rasterSyncEnd.x + 1; + gsyncOptTimingParams.hBlankEnd = pTimings->rasterBlankEnd.x + 1; + gsyncOptTimingParams.hBlankStart = pTimings->rasterBlankStart.x + 1; + gsyncOptTimingParams.hTotal = pTimings->rasterSize.x; + + gsyncOptTimingParams.vSyncEnd = pTimings->rasterSyncEnd.y + 1; + gsyncOptTimingParams.vBlankEnd = pTimings->rasterBlankEnd.y + 1; + gsyncOptTimingParams.vBlankStart = pTimings->rasterBlankStart.y + 1; + gsyncOptTimingParams.vTotal = pTimings->rasterSize.y; + + gsyncOptTimingParams.vInterlacedBlankEnd = pTimings->rasterVertBlank2End; + gsyncOptTimingParams.vInterlacedBlankStart = + pTimings->rasterVertBlank2Start; + + switch (pTimings->protocol) { + case NVKMS_PROTOCOL_DAC_RGB: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_DAC_RGB_CRT; + break; + case NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC: + nvAssert(!"GSYNC_GET_OPTIMIZED_TIMING doesn't handle external TMDS."); + // fallthrough + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_DP_B; + break; + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_HDMI_FRL: + gsyncOptTimingParams.protocol = + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_PROTOCOL_SOR_HDMI_FRL; + break; + case NVKMS_PROTOCOL_DSI: + nvAssert(!"GSYNC_GET_OPTIMIZED_TIMING doesn't handle DSI."); + return; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_OPTIMIZED_TIMING, + &gsyncOptTimingParams, + sizeof(gsyncOptTimingParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to convert to Quadro Sync safe timing"); + /* do not apply the timings returned by RM if the call failed */ + return; + } + + nvConstructNvModeTimingsFromHwModeTimings(pTimings, &modeTimings); + + nvEvoLogInfoString(pInfoString, + "Adjusting Mode Timings for Quadro Sync Compatibility"); + nvEvoLogInfoString(pInfoString, " Old Timings:"); + nvEvoLogModeValidationModeTimings(pInfoString, &modeTimings); + + pTimings->rasterSyncEnd.x = gsyncOptTimingParams.hSyncEnd - 1; + pTimings->rasterSyncEnd.y = gsyncOptTimingParams.vSyncEnd - 1; + pTimings->rasterBlankEnd.x = gsyncOptTimingParams.hBlankEnd - 1; + pTimings->rasterBlankEnd.y = gsyncOptTimingParams.vBlankEnd - 1; + pTimings->rasterBlankStart.x = gsyncOptTimingParams.hBlankStart - 1; + pTimings->rasterBlankStart.y = gsyncOptTimingParams.vBlankStart - 1; + pTimings->rasterSize.x = gsyncOptTimingParams.hTotal; + pTimings->rasterSize.y = gsyncOptTimingParams.vTotal; + + if (gsyncOptTimingParams.structure == + NV30F1_CTRL_GSYNC_GET_OPTIMIZED_TIMING_STRUCTURE_INTERLACED) { + pTimings->rasterVertBlank2Start = + gsyncOptTimingParams.vInterlacedBlankStart; + pTimings->rasterVertBlank2End = + gsyncOptTimingParams.vInterlacedBlankEnd; + } + + pTimings->pixelClock = HzToKHz(gsyncOptTimingParams.pixelClockHz); // Hz to KHz + + nvConstructNvModeTimingsFromHwModeTimings(pTimings, &modeTimings); + + nvEvoLogInfoString(pInfoString, " New Timings:"); + nvEvoLogModeValidationModeTimings(pInfoString, &modeTimings); +} + + + +/*! + * Check whether rasterlock is possible between the two sets of rastertimings. + * Note that we don't compare viewports, but I don't believe the viewport size + * affects whether it is possible to rasterlock. + */ + +static NvBool RasterLockPossible(const NVHwModeTimingsEvo *pTimings1, + const NVHwModeTimingsEvo *pTimings2) +{ + return ((pTimings1->rasterSize.x == pTimings2->rasterSize.x) && + (pTimings1->rasterSize.y == pTimings2->rasterSize.y) && + (pTimings1->rasterSyncEnd.x == pTimings2->rasterSyncEnd.x) && + (pTimings1->rasterSyncEnd.y == pTimings2->rasterSyncEnd.y) && + (pTimings1->rasterBlankEnd.x == pTimings2->rasterBlankEnd.x) && + (pTimings1->rasterBlankEnd.y == pTimings2->rasterBlankEnd.y) && + (pTimings1->rasterBlankStart.x == pTimings2->rasterBlankStart.x) && + (pTimings1->rasterBlankStart.y == pTimings2->rasterBlankStart.y) && + (pTimings1->rasterVertBlank2Start == + pTimings2->rasterVertBlank2Start) && + (pTimings1->rasterVertBlank2End == + pTimings2->rasterVertBlank2End) && + (pTimings1->pixelClock == pTimings2->pixelClock) && + (pTimings1->hSyncPol == pTimings2->hSyncPol) && + (pTimings1->vSyncPol == pTimings2->vSyncPol) && + (pTimings1->interlaced == pTimings2->interlaced) && + (pTimings1->doubleScan == pTimings2->doubleScan)); + +} + +/*! + * Fill the overscan color struct to be passed to SetRasterParams based on + * whether or not SW yuv420 is enabled. + * + * \param[out] pOverscanColor The overscan color struct to be filled + * \param[in] yuv420 Whether or not SW yuv420 is enabled + */ +static void SetOverscanColor(NVEvoColorPtr pOverscanColor, NvBool yuv420) +{ + // Black in RGB format. + // If we're using an emulated YUV 4:2:0 mode, set the equivalent in + // YUV ITU-R BT.709 (64/64/512). + if (yuv420) { + pOverscanColor->red = 64; + pOverscanColor->green = 64; + pOverscanColor->blue = 512; + } else { + pOverscanColor->red = 0; + pOverscanColor->green = 0; + pOverscanColor->blue = 0; + } + +#if defined(DEBUG) + // Override the overscan color to red in debug builds. + // XXX This will look different for YUV 4:2:0 + pOverscanColor->red = 1023; + pOverscanColor->green = 0; + pOverscanColor->blue = 0; +#endif +} + +/* + * Send the raster timings for the pDpyEvo to EVO. + */ +void nvEvoSetTimings(NVDispEvoPtr pDispEvo, + const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVEvoColorRec overscanColor; + + nvPushEvoSubDevMaskDisp(pDispEvo); + SetOverscanColor(&overscanColor, (pTimings->yuv420Mode == + NV_YUV420_MODE_SW)); + + pDevEvo->hal->SetRasterParams(pDevEvo, head, + pTimings, &overscanColor, updateState); + + // Set the head parameters + pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].interlaced = + pTimings->interlaced; + pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hdmi3D = + pTimings->hdmi3D; + + /* + * Current HW does not support the combination of HW YUV420 and DSC. + * HW YUV420 is currently only supported with HDMI, so we should never see + * the combination of DP DSC and HW YUV420. + * The combination of HDMI FRL DSC and HW YUV420 should be disallowed by + * the HDMI library. + */ + nvAssert(!((pTimings->yuv420Mode == NV_YUV420_MODE_HW) && + (pTimings->dpDsc.enable || + pTimings->hdmiFrlConfig.dscInfo.bEnableDSC))); + + pDevEvo->gpus[pDispEvo->displayOwner].headControl[head].hwYuv420 = + (pTimings->yuv420Mode == NV_YUV420_MODE_HW); + + EvoUpdateHeadParams(pDispEvo, head, updateState); + + pDevEvo->hal->SetDscParams(pDispEvo, head, pTimings); + + nvPopEvoSubDevMask(pDevEvo); +} + +/* + * growTopologies() - Increase the size of the provided raster lock topology by + * 1. + * + * This involves incrementing *numTopologies, reallocating the topos array, and + * initializing the new entry. + */ +static RasterLockTopology *growTopologies(RasterLockTopology *topos, + unsigned int *numTopologies) +{ + RasterLockTopology *newTopos, *topo; + unsigned int i, numTopos; + + numTopos = *numTopologies; + + numTopos++; + newTopos = nvRealloc(topos, numTopos * sizeof(RasterLockTopology)); + if (!newTopos) { + nvFree(topos); + return NULL; + } + + topo = &newTopos[numTopos - 1]; + + for (i = 0; i < NVKMS_MAX_SUBDEVICES; i++) { + topo->pDispEvoOrder[i] = NULL; + } + + *numTopologies = numTopos; + + return newTopos; + +} /* growTopologies() */ + +/* + * GetRasterLockTopologies() - Determine which GPUs to consider for locking (or + * unlocking) displays. This is one of the following: + * 1. SLI video bridge order, if SLI is enabled; + * 2. A single GPU, + * in that order. + * + * Note that we still go through the same codepaths for the last degenerate + * case, in order to potentially lock heads on the same GPU together. + */ +static RasterLockTopology *GetRasterLockTopologies(NVDevEvoPtr pDevEvo, + unsigned int *numTopologies) +{ + unsigned int i; + RasterLockTopology *topos = NULL; + + *numTopologies = 0; + + if (pDevEvo->numSubDevices > 1 && pDevEvo->sli.bridge.present) { + NV0080_CTRL_GPU_GET_VIDLINK_ORDER_PARAMS params = { 0 }; + NvU32 ret; + + /* In SLI, with a video bridge. Get the video bridge order from RM. */ + + if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_VIDLINK_ORDER, + ¶ms, sizeof(params))) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "NvRmControl(GET_VIDLINK_ORDER) failed; " + "ret: %d\n", ret); + return NULL; + } + + if (params.ConnectionCount > 0) { + RasterLockTopology *topo; + topos = growTopologies(topos, numTopologies); + + if (!topos) { + return NULL; + } + + topo = &topos[*numTopologies - 1]; + + /* + * For some reason this interface returns a mask instead of an + * index, so we have to convert + */ + for (i = 0; i < pDevEvo->numSubDevices; i++) { + NvU32 subDeviceMask = params.Order[i]; + NvU32 sd = 0; + + nvAssert(nvPopCount32(subDeviceMask) == 1); + + if (!subDeviceMask) continue; + + while (!(subDeviceMask & (1 << sd))) sd++; + + nvAssert(sd < NVKMS_MAX_SUBDEVICES); + nvAssert(pDevEvo->pDispEvo[sd] != NULL); + + /* SLI Mosaic. */ + topo->pDispEvoOrder[i] = pDevEvo->pDispEvo[sd]; + } + } + } else { + /* Single GPU or bridgeless SLI */ + + NVDispEvoPtr pDispEvo; + unsigned int sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + RasterLockTopology *topo; + topos = growTopologies(topos, numTopologies); + + if (!topos) { + return NULL; + } + + topo = &topos[*numTopologies - 1]; + + topo->pDispEvoOrder[0] = pDispEvo; + } + } + + return topos; + +} // GetRasterLockTopologies() + +/* + * ApplyLockActionIfPossible() - Check if the given action is a valid + * transition for this pEvoSubDev's state, and apply it if so. + * Return TRUE if any hardware state needs to be updated, FALSE o.w. + */ +static NvBool ApplyLockActionIfPossible(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockAction action) +{ + NvBool changed = FALSE; + NvU32 head; + + if (!pEvoSubDev) { + return FALSE; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + action, NULL)) { + NvU32 otherHead; + unsigned int i = 0; + NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1]; + + pHeads[i++] = head; + for (otherHead = 0; otherHead < NVKMS_MAX_HEADS_PER_DISP; + otherHead++) { + if (!nvHeadIsActive(pDispEvo, otherHead)) { + continue; + } + if (otherHead == head) { + continue; + } + + pHeads[i++] = otherHead; + } + nvAssert(i <= NVKMS_MAX_HEADS_PER_DISP); + pHeads[i] = NV_INVALID_HEAD; + + pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, pHeads); + + /* + * scanLockState transitions (such as nvEvoLockHWStateLockHeads) + * will update headControlAssy values for all heads, so we should + * update flipLock and flipLockPin for all heads as well. + */ + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[pHeads[i]]; + /* + * Reset the fliplock pin, if it's not in use for framelock, + * and unregister our use of the fliplock pin + */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, + pHeads[i])) { + pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + } + pEvoSubDev->flipLockPinSetForSliHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockPinSetForSliHeadMask, + pHeads[i]); + + /* + * Disable fliplock, if it's not in use for framelock, and + * unregister our need for fliplock to be enabled + */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, + pHeads[i])) { + pHC->flipLock = FALSE; + } + pEvoSubDev->flipLockEnabledForSliHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockEnabledForSliHeadMask, + pHeads[i]); + } + + changed = TRUE; + } + } + + return changed; + +} // ApplyLockActionIfPossible() + + +/* + * UnlockRasterLockGroup() - Unlock all GPUs in the rasterlock group associated + * with the given device. + */ + +static void UnlockRasterLockGroup(NVDevEvoPtr pDevEvo) { + RasterLockTopology *topos, *topo; + unsigned int numTopos; + NvBool changed = FALSE; + + topos = GetRasterLockTopologies(pDevEvo, &numTopos); + if (!topos) { + return; + } + + for (topo = topos; topo < topos + numTopos; topo++) { + int maxDisps = 0, i; + + for (i = 0; i < NVKMS_MAX_SUBDEVICES && topo->pDispEvoOrder[i]; i++) { + maxDisps = i; + } + + for (i = maxDisps; i >= 0; i--) { + NVDispEvoPtr pDispEvo = topo->pDispEvoOrder[i]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 sd = pDispEvo->displayOwner; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* We want to evaluate all of these, so don't use || */ + changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev, + NV_EVO_DISABLE_VRR); + changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev, + NV_EVO_REM_SLI); + changed |= ApplyLockActionIfPossible(pDispEvo, pEvoSubDev, + NV_EVO_UNLOCK_HEADS); + + /* Finally, update the hardware if anything has changed */ + if (changed) { + UpdateEvoLockState(); + changed = FALSE; + } + + pEvoSubDev->flipLockProhibitedHeadMask = 0x0; + } + } + + /* Disable any SLI video bridge features we may have enabled for locking. */ + pDevEvo->sli.bridge.powerNeededForRasterLock = FALSE; + nvEvoUpdateSliVideoBridge(pDevEvo); + + nvFree(topos); + +} // UnlockRasterLockGroup() + +void nvAssertAllDpysAreInactive(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + nvAssert(!nvHeadIsActive(pDispEvo, head)); + } + } +} + +/*! + * Disable locking-related state. + */ +static void DisableLockState(NVDevEvoPtr pDevEvo, + NvU32 *dispNeedsUpdate, + NVEvoUpdateState *updateState) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + *dispNeedsUpdate = 0; + + /* Disable flip lock. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + NvU32 flipLockEnable = 0; + NvBool needsUpdate; + + if (!nvUpdateFlipLockEvoOneHead(pDispEvo, head, + &flipLockEnable, TRUE /* set */, + &needsUpdate, + updateState)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Unable to update fliplock"); + } + + if (needsUpdate) { + *dispNeedsUpdate |= (1 << dispIndex); + } + } + } + + /* Disable raster lock. */ + + UnlockRasterLockGroup(pDevEvo); + + /* Reset the EVO locking state machine. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvEvoStateStartNoLock(&pDevEvo->gpus[pDispEvo->displayOwner]); + } +} + +void nvEvoLockStatePreModeset(NVDevEvoPtr pDevEvo, NvU32 *dispNeedsEarlyUpdate, + NVEvoUpdateState *updateState) +{ + DisableLockState(pDevEvo, dispNeedsEarlyUpdate, updateState); +} + +/*! + * Set up raster lock between GPUs, if applicable. + */ +void nvEvoLockStatePostModeset(NVDevEvoPtr pDevEvo, const NvBool doRasterLock) +{ + RasterLockTopology *topos, *topo; + unsigned int numTopos; + + /* + * Always unlock everything on this rasterlock group to begin with a clean + * slate. We'll relock below, if possible. + */ + + UnlockRasterLockGroup(pDevEvo); + + if (!doRasterLock) { + return; + } + + topos = GetRasterLockTopologies(pDevEvo, &numTopos); + if (!topos) { + return; + } + + for (topo = topos; topo < topos + numTopos; topo++) { + FinishModesetOneTopology(topo); + } + + nvFree(topos); + +} + +static NvBool EnableVrr(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NvBool ret; + + SyncEvoLockState(); + + ret = pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, NV_EVO_ENABLE_VRR, + pHeads); + if (!ret) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to enable VRR frame lock"); + return FALSE; + } + + UpdateEvoLockState(); + + return TRUE; +} + +/*! + * Updates the hardware based on software needs tracked in pDevEvo->sli.bridge. + * Call this function after changing any of those needs variables. + */ +void nvEvoUpdateSliVideoBridge(NVDevEvoPtr pDevEvo) +{ + NV0080_CTRL_GPU_SET_VIDLINK_PARAMS params = { 0 }; + const NvBool enable = pDevEvo->sli.bridge.powerNeededForRasterLock; + NvU32 status; + + if (pDevEvo->sli.bridge.powered == enable) { + return; + } + + if (enable) { + /* SLI should be prohibited earlier if no bridge is present. */ + nvAssert(pDevEvo->sli.bridge.present); + } + + params.enable = enable ? + NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_TRUE : + NV0080_CTRL_GPU_SET_VIDLINK_ENABLE_FALSE; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_SET_VIDLINK, + ¶ms, sizeof(params)); + if (status != NV_OK) { + nvAssert(!"NV0080_CTRL_CMD_GPU_SET_VIDLINK failed"); + } + + pDevEvo->sli.bridge.powered = enable; +} + +/* + * FinishModesetOneTopology() - Set up raster lock between GPUs, if applicable, + * for one RasterLockTopology. Called in a loop from nvFinishModesetEvo(). + */ + +static void FinishModesetOneTopology(RasterLockTopology *topo) +{ + NVDispEvoPtr *pDispEvoOrder = topo->pDispEvoOrder; + NvU32 numUsedGpus = 0; + const NVHwModeTimingsEvo *pPrevTimings = NULL; + NvBool headInUse[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]; + NvBool lockPossible = TRUE, foundUnused = FALSE; + NvBool vrrInUse = FALSE; + NvBool flipLockPossible = TRUE; + unsigned int i, j; + NvU8 allowFlipLockGroup = 0; + + /* + * First, look for devices with VRR enabled. If we find any, go into the + * special VRR framelock mode and don't try to rasterlock any other heads. + */ + for (i = 0; i < NVKMS_MAX_SUBDEVICES && pDispEvoOrder[i]; i++) { + NVDispEvoPtr pDispEvo = pDispEvoOrder[i]; + NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 vrrHeads[NVKMS_MAX_HEADS_PER_DISP + 1]; + unsigned int numVrrHeads = 0; + NvU32 head; + + if (!pDevEvo->gpus || !pDevEvo->vrr.enabled) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head) && + (pDispEvo->headState[head].timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE)) { + vrrHeads[numVrrHeads++] = head; + } + } + + if (numVrrHeads > 0) { + vrrHeads[numVrrHeads] = NV_INVALID_HEAD; + if (EnableVrr(pDispEvo, &pDevEvo->gpus[sd], vrrHeads)) { + vrrInUse = TRUE; + } + } + } + + if (vrrInUse) { + return; + } + + nvkms_memset(headInUse, 0, sizeof(headInUse)); + + /* + * Next, figure out if we can perform locking and which GPUs/heads we can + * use. For now, only attempt locking if all heads on the device have + * compatible timings and consecutive in the video bridge order. + */ + for (i = 0; i < NVKMS_MAX_SUBDEVICES && pDispEvoOrder[i]; i++) { + NVDispEvoPtr pDispEvo = pDispEvoOrder[i]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + + /* + * We can't lock if there is an unused GPU between two used GPUs on the + * video bridge chain. + * We much check if pDevEvo->gpus is NULL in case we haven't been + * through AllocDeviceObject for this pDev (yet?). + */ + if (!HasActiveHeads(pDispEvo) || + !pDevEvo->gpus) { + foundUnused = TRUE; + continue; + } else { + if (foundUnused) { + lockPossible = FALSE; + break; + } + + numUsedGpus++; + } + + /* + * Compare modetimings for each active display with the previous one we + * looked at. If any of them don't match, punt on locking. + */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + /* + * Only flip lock if all of the heads are in the same + * allowFlipLockGroup. + */ + if (allowFlipLockGroup == 0) { + allowFlipLockGroup = pHeadState->allowFlipLockGroup; + } else if (allowFlipLockGroup != pHeadState->allowFlipLockGroup) { + flipLockPossible = FALSE; + } + + if (pPrevTimings && + !RasterLockPossible(pTimings, pPrevTimings)) { + lockPossible = FALSE; + goto exitHeadLoop; + } + + headInUse[i][head] = TRUE; + + pPrevTimings = pTimings; + } + +exitHeadLoop: + if (!lockPossible) { + break; + } + } + + if (!lockPossible) { + return; + } + + /* + * Finally, actually set up locking: go through the video bridge order + * setting it up. + */ + for (i = 0; i < NVKMS_MAX_SUBDEVICES && pDispEvoOrder[i]; i++) { + NVDispEvoPtr pDispEvo = pDispEvoOrder[i]; + NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head[NVKMS_MAX_HEADS_PER_DISP + 1]; + unsigned int usedHeads = 0; + NvBool headsLocked = FALSE, gpusLocked = FALSE; + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* If we're past the end of the chain, we're done. */ + if (i == numUsedGpus) { + break; + } + + for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) { + if (headInUse[i][j]) { + + head[usedHeads] = j; + + usedHeads++; + } + } + head[usedHeads] = NV_INVALID_HEAD; + + nvAssert(head[0] != NV_INVALID_HEAD); + + /* First lock the heads together, if we have enough heads */ + if (usedHeads > 1) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + + if (!pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_HEADS, + head)) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Unable to lock heads"); + } else { + headsLocked = TRUE; + } + } + + /* Then set up cross-GPU locking, if we have enough active GPUs */ + if (numUsedGpus > 1) { + NVEvoLockAction action; + NVEvoLockPin *pServerPin = &pDevEvo->gpus[sd].sliServerLockPin; + NVEvoLockPin *pClientPin = &pDevEvo->gpus[sd].sliClientLockPin; + + *pServerPin = NV_EVO_LOCK_PIN_ERROR; + *pClientPin = NV_EVO_LOCK_PIN_ERROR; + + if (i == 0) { + action = NV_EVO_ADD_SLI_PRIMARY; + } else { + if (i == (numUsedGpus - 1)) { + action = NV_EVO_ADD_SLI_LAST_SECONDARY; + } else { + action = NV_EVO_ADD_SLI_SECONDARY; + } + } + + if (action == NV_EVO_ADD_SLI_PRIMARY || + action == NV_EVO_ADD_SLI_SECONDARY) { + /* Find pin for server to next */ + NVDispEvoPtr pDispEvoNext = pDispEvoOrder[i + 1]; + NvU32 headNext = 0; + + for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) { + if (headInUse[i + 1][j]) { + headNext = j; + break; + } + } + + GetRasterLockPin(pDispEvo, head[0], + pDispEvoNext, headNext, + pServerPin, NULL); + } + + if (action == NV_EVO_ADD_SLI_SECONDARY || + action == NV_EVO_ADD_SLI_LAST_SECONDARY) { + + /* Find pin for client to prev */ + NVDispEvoPtr pDispEvoPrev = pDispEvoOrder[i - 1]; + NvU32 headPrev = 0; + + for (j = 0; j < NVKMS_MAX_HEADS_PER_DISP; j++) { + if (headInUse[i - 1][j]) { + headPrev = j; + break; + } + } + + GetRasterLockPin(pDispEvo, head[0], + pDispEvoPrev, headPrev, + NULL, pClientPin); + } + + /* + * Normally, the scanlock state machine can determine the client + * lockout window most appropriate for the given configuration. + * However, if we are driving pixels over the DR bus (rather than + * driving a monitor directly via an OR), then the RM programs the + * VPLL with a multiplier that is double the rate of the DR primary. + * This can be inexact, so we may need to crash lock more often than + * when the VPLL settings are identical; not doing so may cause + * rasterlock to fail. Frequent crash locking when driving pixels + * over the DR bus is okay, since they are cleaned up before being + * sent to a non-DR OR. + */ + pDevEvo->gpus[sd].forceZeroClientLockoutWindow = + (sd != pDispEvo->displayOwner); + + if (!pDevEvo->gpus[sd].scanLockState(pDispEvo, &pDevEvo->gpus[sd], + action, head)) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Unable to set up SLI locking"); + } else { + gpusLocked = TRUE; + } + } + + /* + * On certain GPUs, we need to enable the video bridge (MIO pads) when + * enabling rasterlock. Note that we don't disable in this function, + * so if gpusLocked is true for any iteration of these loops, this bit + * will be on. + */ + if (gpusLocked && NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_RASTER_LOCK_NEEDS_MIO_POWER)) { + pDevEvo->sli.bridge.powerNeededForRasterLock = TRUE; + nvEvoUpdateSliVideoBridge(pDevEvo); + } + + /* If anything changed, update the hardware */ + if (headsLocked || gpusLocked) { + + UpdateEvoLockState(); + + /* + * Enable fliplock, if we can + * + * XXX this should arguably be done in the state machine proper. + * However, in order to guarantee that we get rasterlock before + * attempting fliplock (and to be symmetric with framelock, which + * turns on and off fliplock from GLS), do it here for now. + */ + if (gpusLocked && flipLockPossible) { + NVEvoUpdateState updateState = { }; + + /* + * Before turning on flip lock, we're supposed to wait for + * raster lock sync. The update performed in + * UpdateEvoLockState() to kick off and apply the rasterlock + * params must be synchronous as EVO reports lock success if + * locking isn't enabled, so we could race through the + * WaitForLock check below otherwise. + */ + + for (j = 0; j < usedHeads; j++) { + NvU32 tmpHead = head[j]; + + NVEvoLockPin pin = + nvEvoGetPinForSignal(pDispEvo, &pDevEvo->gpus[sd], + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + /* Wait for the raster lock to sync in.. */ + if (pin == NV_EVO_LOCK_PIN_ERROR || + !EvoWaitForLock(pDevEvo, sd, tmpHead, EVO_RASTER_LOCK)) { + flipLockPossible = FALSE; + break; + } + + /* + * Enable fliplock, and register that we've enabled + * fliplock for SLI to ensure it doesn't get disabled + * later. + */ + pDevEvo->gpus[sd].headControl[tmpHead].flipLockPin = pin; + pDevEvo->gpus[sd].flipLockPinSetForSliHeadMask = + HEAD_MASK_SET(pDevEvo->gpus[sd].flipLockPinSetForSliHeadMask, tmpHead); + + pDevEvo->gpus[sd].headControl[tmpHead].flipLock = TRUE; + pDevEvo->gpus[sd].flipLockEnabledForSliHeadMask = + HEAD_MASK_SET(pDevEvo->gpus[sd].flipLockEnabledForSliHeadMask, tmpHead); + + EvoUpdateHeadParams(pDispEvo, tmpHead, &updateState); + } + + /* + * This must be synchronous as EVO reports lock success if + * locking isn't enabled, so we could race through the + * WaitForLock check below otherwise. + */ + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + + /* + * Wait for flip lock sync. I'm not sure this is really + * necessary, but the docs say to do this before attempting any + * flips in the base channel. + */ + for (j = 0; j < usedHeads; j++) { + if (flipLockPossible && + !EvoWaitForLock(pDevEvo, sd, head[j], EVO_FLIP_LOCK)) { + flipLockPossible = FALSE; + break; + } + } + } + } + } + +} /* FinishModesetOneTopology() */ + +NvBool nvSetUsageBoundsEvo( + NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + NvBool needCoreUpdate; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + needCoreUpdate = pDevEvo->hal->SetUsageBounds(pDevEvo, sd, head, pUsage, + updateState); + + nvPopEvoSubDevMask(pDevEvo); + + pDevEvo->gpus[sd].headState[head].usage = *pUsage; + + return needCoreUpdate; +} + +void nvEnableMidFrameAndDWCFWatermark(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState) +{ + pDevEvo->gpus[sd].headState[head]. + disableMidFrameAndDWCFWatermark = !enable; + + if (pDevEvo->hal->EnableMidFrameAndDWCFWatermark == NULL) { + nvEvoLogDev(pDevEvo, + EVO_LOG_ERROR, + "EnableMidFrameAndDWCFWatermark() is not defined"); + return; + } + + pDevEvo->hal->EnableMidFrameAndDWCFWatermark(pDevEvo, + sd, + head, + enable, + pUpdateState); +} + + +/*! + * Choose current colorSpace and colorRange based on the current mode timings + * and the requested color space and range, and notify clients of any changes. + * + * This needs to be called during a modeset when YUV420 mode may have been + * enabled or disabled, as well as when the requested color space or range have + * changed. + */ +void nvChooseCurrentColorSpaceAndRangeEvo( + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace, + const enum NvKmsDpyAttributeColorRangeValue requestedColorRange, + enum NvKmsDpyAttributeCurrentColorSpaceValue *pCurrentColorSpace, + enum NvKmsDpyAttributeColorRangeValue *pCurrentColorRange) +{ + enum NvKmsDpyAttributeCurrentColorSpaceValue newColorSpace = + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + enum NvKmsDpyAttributeColorRangeValue newColorRange = + NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL; + + /* At depth 18 only RGB and full range are allowed */ + if (pTimings && pTimings->pixelDepth == NVKMS_PIXEL_DEPTH_18_444) { + goto done; + } + + /* + * If the current mode timing requires YUV420 compression, we override the + * requested color space with YUV420. + */ + if (pTimings && (pTimings->yuv420Mode != NV_YUV420_MODE_NONE)) { + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420; + } else { + /* + * Note this is an assignment between different enum types. Checking the + * value of requested colorSpace and then assigning the value to current + * colorSpace, to avoid warnings about cross-enum assignment. + */ + switch (requestedColorSpace) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB: + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422: + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444: + newColorSpace = NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444; + break; + default: + nvAssert(!"Invalid Requested ColorSpace"); + } + } + + /* Only limited color range is allowed in YUV colorimetry. */ + if ((newColorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) || + (newColorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) || + (newColorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)) { + newColorRange = NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED; + } else { + newColorRange = requestedColorRange; + } + +done: + *pCurrentColorSpace = newColorSpace; + *pCurrentColorRange = newColorRange; +} + +void nvUpdateCurrentHardwareColorSpaceAndRangeEvo( + NVDispEvoPtr pDispEvo, + const NvU32 head, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace = + pHeadState->attributes.colorSpace; + const enum NvKmsDpyAttributeColorRangeValue colorRange = + pHeadState->attributes.colorRange; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + + nvAssert(pConnectorEvo != NULL); + + // In SW YUV420 mode, HW is programmed with RGB color space and full color + // range. The color space conversion and color range compression happen + // in a headSurface composite shader. + if ((pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) && + (colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420)) { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL; + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB; + } else { + + // Set default colorimetry to RGB and default color range to full + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL; + + // Set color format + switch (colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + pHeadState->procAmp.colorFormat = NVT_COLOR_FORMAT_YCbCr420; + break; + default: + nvAssert(!"unrecognized colorSpace"); + } + + switch (pConnectorEvo->legacyType) { + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP: + // program HW with RGB/YCbCr + switch (colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + if (nvEvoIsHDQualityVideoTimings(&pHeadState->timings)) { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_709; + } else { + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_YUV_601; + } + break; + default: + nvAssert(!"unrecognized colorSpace"); + } + break; + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT: + // colorSpace isn't used for DEVICE_TYPE_CRT and + // hence should be set to the "unchanged" value + // (i.e. the default - RGB) + nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB); + + // program HW with RGB only + pHeadState->procAmp.colorimetry = NVT_COLORIMETRY_RGB; + break; + default: + nvAssert(!"ERROR: invalid pDpyEvo->type"); + } + + // Only advertise YCbCr444 or YCbCr422 when the corresponding + // colorSpaceCaps is TRUE. + if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444) && + !pConnectorEvo->colorSpaceCaps.ycbcr444Capable) { + nvAssert(!"!pConnectorEvo->colorSpaceCaps.ycbcr444Capable"); + } + + if ((colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422) && + !pConnectorEvo->colorSpaceCaps.ycbcr422Capable) { + nvAssert(!"!pConnectorEvo->colorSpaceCaps.ycbcr422Capable"); + } + + switch (colorRange) { + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL: + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_FULL; + break; + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED: + pHeadState->procAmp.colorRange = NVT_COLOR_RANGE_LIMITED; + break; + default: + nvAssert(!"Invalid colorRange"); + break; + } + } + + // In YUV colorimetry, only limited color range is allowed. + nvAssert(!((pHeadState->procAmp.colorimetry != NVT_COLORIMETRY_RGB) && + (pHeadState->procAmp.colorRange != NVT_COLOR_RANGE_LIMITED))); + + // Limited color range is not allowed with 18bpp mode + nvAssert(!((pHeadState->timings.pixelDepth == NVKMS_PIXEL_DEPTH_18_444) && + (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED))); + + nvPushEvoSubDevMaskDisp(pDispEvo); + + // Set the procamp head method + pDevEvo->hal->SetProcAmp(pDispEvo, head, pUpdateState); + + // Clean up + nvPopEvoSubDevMask(pDevEvo); +} + +/*! + * nvSetColorSpaceAndRangeEvo() - Select the colorimetry and color range + * values and program into EVO HW based on the currently selected + * color space and color range values and DISPLAY_DEVICE_TYPE. + * + * RGB/YUV would be selected for DFP, only RGB would be selected for CRT and + * only YUV would be selected for TV. + * + * If SW YUV420 mode is enabled, EVO HW is programmed with default (RGB color + * space, FULL color range) values, and the real values are used in a + * headSurface composite shader. + */ +void nvSetColorSpaceAndRangeEvo( + NVDispEvoPtr pDispEvo, const NvU32 head, + const enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace, + const enum NvKmsDpyAttributeColorRangeValue requestedColorRange, + NVEvoUpdateState *pUpdateState) +{ + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + + /* + * Choose current colorSpace and colorRange based on the current mode + * timings and the requested color space and range. + */ + nvChooseCurrentColorSpaceAndRangeEvo(pTimings, + requestedColorSpace, + requestedColorRange, + &pHeadState->attributes.colorSpace, + &pHeadState->attributes.colorRange); + + /* Update hardware's current colorSpace and colorRange */ + nvUpdateCurrentHardwareColorSpaceAndRangeEvo(pDispEvo, head, pUpdateState); +} + +void nvEvoHeadSetControlOR(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoPtr pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NvBool colorSpaceOverride = FALSE; + + /* + * Determine whether or not this dpy will need its color space + * overridden. + * + * This is currently only used for DP 1.3 YUV420 mode, where the + * HW's normal support for carrying color space information + * together with the frame is insufficient. + */ + if ((pTimings->yuv420Mode == NV_YUV420_MODE_SW) && + nvConnectorUsesDPLib(pHeadState->pConnectorEvo)) { + + nvAssert(pDispEvo->pDevEvo->caps.supportsDP13); + colorSpaceOverride = TRUE; + } + + // Only set up the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + pDevEvo->hal->HeadSetControlOR(pDevEvo, head, pTimings, + colorSpaceOverride, + pUpdateState); + + nvPopEvoSubDevMask(pDevEvo); +} + +/*! + * Set dithering based on the values in input config and + * NVConnectorEvo::or::dither. + */ +void nvSetDitheringEvo( + NVDispEvoPtr pDispEvo, const NvU32 head, + enum NvKmsDpyAttributeRequestedDitheringValue configState, + const enum NvKmsDpyAttributeRequestedDitheringDepthValue configDepth, + const enum NvKmsDpyAttributeRequestedDitheringModeValue configMode, + NVEvoUpdateState *pUpdateState) +{ + static const struct { + NvU32 algo; + enum NvKmsDpyAttributeCurrentDitheringModeValue nvKmsDitherMode; + } ditherModeTable[] = { + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_DYNAMIC_2X2 }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_STATIC_2X2 }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_TEMPORAL }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_MODE_NONE } + }; + static const struct { + NvU32 type; + enum NvKmsDpyAttributeCurrentDitheringDepthValue nvKmsDitherDepth; + } ditherDepthTable[] = { + { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_6_BITS }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_8_BITS }, + { NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF, + NV_KMS_DPY_ATTRIBUTE_CURRENT_DITHERING_DEPTH_NONE } + }; + int i; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvBool enabled = FALSE; + NvU32 algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN; + NvU32 type = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + + if (pConnectorEvo != NULL) { + type = pConnectorEvo->or.ditherType; + algo = pConnectorEvo->or.ditherAlgo; + } + enabled = (type != NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF); + + /* + * Make sure algo is a recognizable value that we will be able to program + * in hardware. + */ + if (algo == NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN) { + algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2; + } + + switch (configState) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_ENABLED: + enabled = TRUE; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED: + enabled = FALSE; + break; + default: + nvAssert(!"Unknown Dithering configuration"); + // Fall through + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO: + /* + * Left it initialized + * based on value NVDpyEvoRec::or::dither::init::enabled. + */ + break; + } + + switch (configDepth) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_6_BITS: + type = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_8_BITS: + type = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS; + break; + default: + nvAssert(!"Unknown Dithering Depth"); + // Fall through + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO: + /* + * Left it initialized + * based on value NVDpyEvoRec::or::dither::init::type. + */ + break; + } + + + if (pConnectorEvo != NULL && nvConnectorUsesDPLib(pConnectorEvo) && + configState != NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DISABLED) { + NvU32 lutBits = 11; + + /* If we are using DisplayPort panel with bandwidth constraints + * which lowers the color depth, consider that while applying + * dithering effects. + */ + NvU32 dpBits = nvPixelDepthToBitsPerComponent(pHeadState->timings.pixelDepth); + if (dpBits == 0) { + nvAssert(!"Unknown dpBits"); + dpBits = 8; + } + + /* + * If fewer than 8 DP bits are available, dither. Ideally we'd + * dither from lutBits > 10 to 10 bpc, but EVO doesn't have an + * option for that. + * + * XXX TODO: nvdisplay can dither to 10 bpc. + */ + if (dpBits <= 8 && lutBits > dpBits) { + if (configState == NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_AUTO) { + enabled = TRUE; + } + } + + if (configDepth == NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_DEPTH_AUTO) { + if (dpBits <= 6) { + type = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS; + } else if (dpBits <= 8) { + type = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS; + } + } + } + + if (enabled) { + switch (configMode) { + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL: + algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2: + algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2; + break; + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2: + algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2; + break; + default: + nvAssert(!"Unknown Dithering Mode"); + // Fall through + case NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO: + /* + * Left it initialized + * based on value NVDpyEvoRec::or::dither::init::algo. + */ + break; + } + } else { + algo = NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN; + type = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF; + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + + pDevEvo->hal->SetDither(pDispEvo, head, enabled, type, algo, pUpdateState); + + nvPopEvoSubDevMask(pDevEvo); + + pHeadState->attributes.dithering.enabled = enabled; + + for (i = 0; i < ARRAY_LEN(ditherDepthTable); i++) { + if (type == ditherDepthTable[i].type) { + pHeadState->attributes.dithering.depth = + ditherDepthTable[i].nvKmsDitherDepth; + break; + } + } + nvAssert(i < ARRAY_LEN(ditherModeTable)); + + for (i = 0; i < ARRAY_LEN(ditherModeTable); i++) { + if (algo == ditherModeTable[i].algo) { + pHeadState->attributes.dithering.mode = + ditherModeTable[i].nvKmsDitherMode; + break; + } + } + nvAssert(i < ARRAY_LEN(ditherModeTable)); +} + +/* + * HeadCanStereoLock() - Return whether or not this head can use stereo lock + * mode. This can only be called from UpdateEvoLockState, when the pending + * interlaced/locked values are still in the head control assembly structure. + */ +static NvBool HeadCanStereoLock(NVDevEvoPtr pDevEvo, int sd, int head) +{ + NVEvoHeadControlPtr pHC = &pDevEvo->gpus[sd].headControlAssy[head]; + + return (!pHC->interlaced && + ((pHC->serverLock != NV_EVO_NO_LOCK) || + (pHC->clientLock != NV_EVO_NO_LOCK))); +} + +/* + * SetStereoLockMode() - For stereo lock mode, we need to notify + * the gsync board that this GPU requires stereo lock mode. + */ +static NvBool SetStereoLockMode(NVDispEvoPtr pDispEvo, NvBool stereoLocked) +{ + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE_PARAMS + statusParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + + if (!pFrameLockEvo || + ((pFrameLockEvo->boardId != NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2060) && + (pFrameLockEvo->boardId != NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061))) { + return TRUE; + } + + statusParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + statusParams.enable = stereoLocked ? 1 : 0; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_STEREO_LOCK_MODE, + &statusParams, + sizeof(statusParams)) != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to set stereo lock mode"); + return FALSE; + } + + return TRUE; +} + +/* + * SyncEvoLockState() + * + * Set the Assembly state based on the current Armed state. This should be + * called before transitioning between states in the EVO state machine. + */ +static void SyncEvoLockState(void) +{ + NVDispEvoPtr pDispEvo; + unsigned int sd; + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 updateHeadMask = nvGetActiveHeadMask(pDispEvo); + unsigned int head; + + /* Update the cached HEAD_SET_CONTROL EVO method state */ + FOR_ALL_HEADS(head, updateHeadMask) { + pEvoSubDev->headControlAssy[head] = + pEvoSubDev->headControl[head]; + + /* + * The following are probably not necessary, since no other + * code touches them (as opposed to headControl above which + * is updated beyond the scope of the state machine). But + * update them here anyway to be consistent. + */ + pEvoSubDev->frameLockClientMaskAssy = + pEvoSubDev->frameLockClientMaskArmed; + pEvoSubDev->frameLockServerMaskAssy = + pEvoSubDev->frameLockServerMaskArmed; + pEvoSubDev->frameLockExtRefClkMaskAssy = + pEvoSubDev->frameLockExtRefClkMaskArmed; + } + } + } +} + +/* + * Determine a unique index for the given (pDevEvo, sd) tuple. + * This is used to index into an array of size NV_MAX_DEVICES. + * + * It would be more straightforward to use a two-dimensional array of + * NV_MAX_DEVICES x NV_MAX_SUBDEVICES and index by (devIndex, sd), but + * that makes the array too large to fit on the stack. This is safe because + * we should only ever have at most NV_MAX_DEVICES GPUs in the system + * total, although at any given time they may be split into many single-GPU + * device or a small number of many-GPU SLI devices. + */ +static NvU32 GpuIndex(const NVDevEvoRec *pDevEvo, NvU32 sd) +{ + const NVDevEvoRec *pDevEvoIter; + NvU32 index = 0; + + nvAssert(sd < pDevEvo->numSubDevices); + + FOR_ALL_EVO_DEVS(pDevEvoIter) { + if (pDevEvoIter == pDevEvo) { + index += sd; + nvAssert(index < NV_MAX_DEVICES); + return index; + } + index += pDevEvo->numSubDevices; + } + + nvAssert(!"Failed to look up GPU index"); + return 0; +} + +/*! + * Get the current refresh rate for the heads in headMask, in 0.0001 Hz units. + * All heads in headMask are expected to have the same refresh rate. + */ +static NvU32 GetRefreshRate10kHz(const NVDispEvoRec *pDispEvo, NvU32 headMask) +{ + const NVHwModeTimingsEvo *pTimings = NULL; + NvU32 head; + + FOR_ALL_HEADS(head, headMask) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + + if (head >= pDispEvo->pDevEvo->numHeads && + pHeadState->activeRmId == 0x0) { + continue; + } + + if (pTimings == NULL) { + pTimings = &pHeadState->timings; + } else { + nvAssert(pTimings->rasterSize.x == + pHeadState->timings.rasterSize.x); + nvAssert(pTimings->rasterSize.y == + pHeadState->timings.rasterSize.y); + nvAssert(pTimings->doubleScan == pHeadState->timings.doubleScan); + nvAssert(pTimings->interlaced == pHeadState->timings.interlaced); + nvAssert(pTimings->pixelClock == pHeadState->timings.pixelClock); + } + } + + if (pTimings == NULL) { + return 0; + } + + /* + * pTimings->pixelClock is in 1000/s + * we want 0.0001/s + * factor = 1000/0.0001 = 10000000. + */ + NvU32 factor = 10000000; + NvU32 totalPixels = pTimings->rasterSize.x * pTimings->rasterSize.y; + + if (pTimings->doubleScan) factor /= 2; + if (pTimings->interlaced) factor *= 2; + + return axb_div_c(pTimings->pixelClock, factor, totalPixels); +} + +/*! + * Return a the mask of RmIds from the heads mask. + */ +static NvU32 HeadMaskToActiveRmIdMask(const NVDispEvoRec *pDispEvo, + const NvU32 headMask) +{ + NvU32 head; + NvU32 rmDisplayMask = 0; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if ((NVBIT(head) & headMask) != 0x0) { + rmDisplayMask |= + pDispEvo->headState[head].activeRmId; + } + } + + return rmDisplayMask; +} + +static NvBool FramelockSetControlSync(NVDispEvoPtr pDispEvo, const NvU32 headMask, + NvBool server) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_PARAMS gsyncSetControlSyncParams = { 0 }; + NvU32 ret; + + /* There can only be one server. */ + + nvAssert(!server || (nvPopCount32(headMask) == 1)); + + gsyncSetControlSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncSetControlSyncParams.master = server; + gsyncSetControlSyncParams.displays = + HeadMaskToActiveRmIdMask(pDispEvo, headMask); + + if (gsyncSetControlSyncParams.displays == 0x0) { + return FALSE; + } + + gsyncSetControlSyncParams.refresh = GetRefreshRate10kHz(pDispEvo, headMask); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SYNC, + &gsyncSetControlSyncParams, + sizeof(gsyncSetControlSyncParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +NvBool nvFramelockSetControlUnsyncEvo(NVDispEvoPtr pDispEvo, const NvU32 headMask, + NvBool server) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NV30F1_CTRL_GSYNC_SET_CONTROL_UNSYNC_PARAMS + gsyncSetControlUnsyncParams = { 0 }; + NvU32 ret; + + gsyncSetControlUnsyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncSetControlUnsyncParams.master = server; + gsyncSetControlUnsyncParams.displays = + HeadMaskToActiveRmIdMask(pDispEvo, headMask); + + if (gsyncSetControlUnsyncParams.displays == 0x0) { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_UNSYNC, + &gsyncSetControlUnsyncParams, + sizeof(gsyncSetControlUnsyncParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +/* + * UpdateEvoLockState() + * + * Update the hardware based on the Assembly state, if it is different from the + * current Armed state. This should be called after transitioning through + * states in the EVO state machine to propagate all of the necessary values to + * HW. + */ +static void UpdateEvoLockState(void) +{ + NVDispEvoPtr pDispEvo; + NVFrameLockEvoPtr pFrameLockEvo; + unsigned int sd; + NVDevEvoPtr pDevEvo; + NvBool ret; + enum { + FIRST_ITERATION, + DISABLE_UNNEEDED_CLIENTS = FIRST_ITERATION, + DISABLE_UNNEEDED_SERVER, + COMPUTE_HOUSE_SYNC, + UPDATE_HOUSE_SYNC, + ENABLE_SERVER, + ENABLE_CLIENTS, + LAST_ITERATION = ENABLE_CLIENTS, + } iteration; + struct { + unsigned char disableServer:1; + unsigned char disableClient:1; + unsigned char enableServer:1; + unsigned char enableClient:1; + } cache[NV_MAX_DEVICES][NVKMS_MAX_HEADS_PER_DISP]; + + nvkms_memset(cache, 0, sizeof(cache)); + + /* XXX NVKMS TODO: idle base channel, first? */ + + /* + * Stereo lock mode is enabled if all heads are either raster locked or + * frame locked, and if all heads are not using interlaced mode. + */ + FOR_ALL_EVO_DEVS(pDevEvo) { + if (!pDevEvo->gpus) { + continue; + } + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvBool gpuCanStereoLock = TRUE; + NvBool testedOneHead = FALSE; + + /* + * If at least one head is not locked or driving an interlaced + * mode, then no heads on this GPU will use stereo lock mode. + */ + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoHeadControlPtr pHC = &pDevEvo->gpus[sd].headControlAssy[head]; + + if (!nvHeadIsActive(pDispEvo, head) || + ((pHC->serverLock == NV_EVO_NO_LOCK) && + (pHC->clientLock == NV_EVO_NO_LOCK))) { + /* + * If the heads aren't scan locked then we should skip + * them as if they aren't connected. NOTE this + * conservative approach means that we won't disable + * StereoLockMode when frameLock is turned off. This + * should be harmless. + */ + continue; + } + testedOneHead = TRUE; + if (!HeadCanStereoLock(pDevEvo, sd, head)) { + gpuCanStereoLock = FALSE; + } + } + /* + * Don't set StereoLockMode for screenless GPUs. As above we'll also + * count heads that can't stereoLock as unconnected. + */ + if (!testedOneHead) { + continue; + } + + /* + * Notify the framelock board whether or not we we will use stereo + * lock mode. If it failed, then don't enable stereo lock mode on + * the GPU. + */ + if (!SetStereoLockMode(pDispEvo, gpuCanStereoLock)) { + gpuCanStereoLock = FALSE; + } + + /* + * Cache whether or not we can use stereo lock mode, so we know + * whether or not to enable stereo lock mode on the GPU during + * SetHeadControl + */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + pEvoSubDev->headControlAssy[head].stereoLocked = + gpuCanStereoLock; + } + } + } + } + + /* + * Go through every GPU on the system, making its framelock state match the + * assembly state that we've saved. + * + * We do this in six steps, in order to keep the overall system state sane + * throughout: + * 1. Disable any clients we no longer need + * 2. Disable server we no longer need + * 3. Compute which framelock devices need house sync + * 4. Update framelock devices with new house sync info + * 5. Enable new server + * 6. Enable new clients + */ + for (iteration = FIRST_ITERATION; + iteration <= LAST_ITERATION; + iteration++) { + + if (iteration == COMPUTE_HOUSE_SYNC) { + /* First, clear assy state */ + FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) { + pFrameLockEvo->houseSyncAssy = FALSE; + } + } + + if (iteration == UPDATE_HOUSE_SYNC) { + FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) { + /* + * Since nvFrameLockSetUseHouseSyncEvo sets house sync + * output mode in addition to house sync input mode and + * input polarity, this needs to be done unconditionally, + * even if a house sync state transition hasn't occurred. + */ + if (!nvFrameLockSetUseHouseSyncEvo( + pFrameLockEvo, pFrameLockEvo->houseSyncAssy)) { + nvAssert(!"Setting house sync failed"); + } else { + pFrameLockEvo->houseSyncArmed = + pFrameLockEvo->houseSyncAssy; + } + } + + continue; + } + + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + /* + * This may happen during init, when setting initial modes on + * one device while other devices have not yet been allocated. + * Skip these devices for now; we'll come back later when + * they've been brought up. + */ + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvBool server = FALSE; + NvU32 needsEnableMask = 0, needsDisableMask = 0; + unsigned int head; + + switch (iteration) { + case COMPUTE_HOUSE_SYNC: + /* Accumulate house sync across pDisps */ + if (pEvoSubDev->frameLockHouseSync) { + pDispEvo->pFrameLockEvo->houseSyncAssy = TRUE; + } + break; + case DISABLE_UNNEEDED_CLIENTS: + needsDisableMask = pEvoSubDev->frameLockClientMaskArmed & + ~pEvoSubDev->frameLockClientMaskAssy; + server = FALSE; + break; + case DISABLE_UNNEEDED_SERVER: + needsDisableMask = pEvoSubDev->frameLockServerMaskArmed & + ~pEvoSubDev->frameLockServerMaskAssy; + server = TRUE; + break; + case ENABLE_SERVER: + needsEnableMask = pEvoSubDev->frameLockServerMaskAssy & + ~pEvoSubDev->frameLockServerMaskArmed; + server = TRUE; + break; + case ENABLE_CLIENTS: + needsEnableMask = pEvoSubDev->frameLockClientMaskAssy & + ~pEvoSubDev->frameLockClientMaskArmed; + server = FALSE; + break; + case UPDATE_HOUSE_SYNC: + nvAssert(!"Shouldn't reach here"); + break; + } + + if (needsDisableMask) { + ret = nvFramelockSetControlUnsyncEvo(pDispEvo, + needsDisableMask, + server); + nvAssert(ret); + + if (ret) { + if (server) { + pEvoSubDev->frameLockServerMaskArmed &= + ~needsDisableMask; + + FOR_ALL_HEADS(head, needsDisableMask) { + cache[GpuIndex(pDevEvo, sd)][head].disableServer = TRUE; + } + } else { + pEvoSubDev->frameLockClientMaskArmed &= + ~needsDisableMask; + + FOR_ALL_HEADS(head, needsDisableMask) { + cache[GpuIndex(pDevEvo, sd)][head].disableClient = TRUE; + } + } + } + } + if (needsEnableMask) { + ret = FramelockSetControlSync(pDispEvo, + needsEnableMask, + server); + + nvAssert(ret); + + if (ret) { + if (server) { + pEvoSubDev->frameLockServerMaskArmed |= + needsEnableMask; + + FOR_ALL_HEADS(head, needsEnableMask) { + cache[GpuIndex(pDevEvo, sd)][head].enableServer = TRUE; + } + } else { + pEvoSubDev->frameLockClientMaskArmed |= + needsEnableMask; + + FOR_ALL_HEADS(head, needsEnableMask) { + cache[GpuIndex(pDevEvo, sd)][head].enableClient = TRUE; + } + } + } + } + + /* After the above process, we should have "promoted" assy + * to armed */ + if (iteration == LAST_ITERATION) { + nvAssert(pEvoSubDev->frameLockServerMaskArmed == + pEvoSubDev->frameLockServerMaskAssy); + nvAssert(pEvoSubDev->frameLockClientMaskArmed == + pEvoSubDev->frameLockClientMaskAssy); + } + } + } + } + + /* + * Update the EVO HW state. Make this a separate set of loops to not + * confuse the one above + */ + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvBool needUpdate = FALSE; + NVEvoUpdateState updateState = { }; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 extRefClkMaskAssy, extRefClkUpdateMask; + NvU32 possibleHeadMask; + NvBool refClkChanged[NVKMS_MAX_HEADS_PER_DISP] = { FALSE }; + unsigned int head; + + extRefClkMaskAssy = pEvoSubDev->frameLockExtRefClkMaskAssy; + + /* Set the external reference clock, if different */ + extRefClkUpdateMask = extRefClkMaskAssy ^ + pEvoSubDev->frameLockExtRefClkMaskArmed; + + FOR_ALL_HEADS(head, extRefClkUpdateMask) { + NvBool extRefClkNeeded = + !!(extRefClkMaskAssy & (1 << head)); + + SetRefClk(pDevEvo, sd, head, extRefClkNeeded, &updateState); + refClkChanged[head] = TRUE; + + /* Update armed state for this head */ + pEvoSubDev->frameLockExtRefClkMaskArmed = + (pEvoSubDev->frameLockExtRefClkMaskArmed & + (~(1 << head))) | + (extRefClkMaskAssy & (1 << head)); + } + /* After the above process, the armed state should match + * assembly state */ + nvAssert(extRefClkMaskAssy == + pEvoSubDev->frameLockExtRefClkMaskArmed); + + /* Update the HEAD_SET_CONTROL EVO method state */ + + possibleHeadMask = nvGetActiveHeadMask(pDispEvo); + + FOR_ALL_HEADS(head, possibleHeadMask) { + if (nvkms_memcmp(&pEvoSubDev->headControl[head], + &pEvoSubDev->headControlAssy[head], + sizeof(NVEvoHeadControl))) { + + nvPushEvoSubDevMask(pDevEvo, 1 << sd); + + pEvoSubDev->headControl[head] = + pEvoSubDev->headControlAssy[head]; + pDevEvo->hal->SetHeadControl(pDevEvo, sd, head, + &updateState); + needUpdate = TRUE; + + nvPopEvoSubDevMask(pDevEvo); + } else if (refClkChanged[head]) { + needUpdate = TRUE; + } + } + + if (needUpdate) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } + } + } + + /* + * Inform GLS of framelock changes. It uses this information to do things + * like enable fake stereo to get stereo sync when stereo apps start + * without flickering the displays. + */ + for (iteration = FIRST_ITERATION; + iteration <= LAST_ITERATION; + iteration++) { + + FOR_ALL_EVO_DEVS(pDevEvo) { + + if (!pDevEvo->gpus) { + continue; + } + + if (pDevEvo->displayHandle == 0) { + continue; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NvBool sendEvent = FALSE; + NvBool enable = FALSE, server = FALSE; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + switch (iteration) { + case DISABLE_UNNEEDED_CLIENTS: + if (cache[GpuIndex(pDevEvo, sd)][head].disableClient) { + enable = FALSE; + server = FALSE; + sendEvent = TRUE; + } + break; + case DISABLE_UNNEEDED_SERVER: + if (cache[GpuIndex(pDevEvo, sd)][head].disableServer) { + enable = FALSE; + server = TRUE; + sendEvent = TRUE; + } + break; + case ENABLE_SERVER: + if (cache[GpuIndex(pDevEvo, sd)][head].enableServer) { + enable = TRUE; + server = TRUE; + sendEvent = TRUE; + } + break; + case ENABLE_CLIENTS: + if (cache[GpuIndex(pDevEvo, sd)][head].enableClient) { + enable = TRUE; + server = FALSE; + sendEvent = TRUE; + } + break; + case UPDATE_HOUSE_SYNC: + case COMPUTE_HOUSE_SYNC: + sendEvent = FALSE; + break; + } + + if (sendEvent) { + nvUpdateGLSFramelock(pDispEvo, head, enable, server); + } + } + } + } + } +} + +/* + * For every head in the headMask on pDispEvo, construct a prioritized + * list of heads and call into the EVO locking state machine to + * perform the given transition. + * + * Return the list of heads that actually succeeded. + */ +static NvU32 applyActionForHeads(NVDispEvoPtr pDispEvo, + const NvU32 headMask, + NVEvoLockAction action) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NvU32 appliedHeadMask = 0; + NvU32 head; + + FOR_ALL_HEADS(head, headMask) { + NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1]; + unsigned int i = 0; + NvU32 tmpHead, usedHeadMask = 0; + + /* Fill in the array starting with this head, then with the others in + * the list, and finally any other active heads */ + pHeads[i++] = head; + usedHeadMask |= (1 << head); + + FOR_ALL_HEADS(tmpHead, headMask) { + if (usedHeadMask & (1 << tmpHead)) { + continue; + } + pHeads[i++] = tmpHead; + usedHeadMask |= (1 << tmpHead); + } + + for (tmpHead = 0; tmpHead < NVKMS_MAX_HEADS_PER_DISP; tmpHead++) { + if (!nvHeadIsActive(pDispEvo, tmpHead)) { + continue; + } + if (usedHeadMask & (1 << tmpHead)) { + continue; + } + pHeads[i++] = tmpHead; + usedHeadMask |= (1 << tmpHead); + } + + nvAssert(i <= NVKMS_MAX_HEADS_PER_DISP); + pHeads[i] = NV_INVALID_HEAD; + + if (pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, pHeads)) { + appliedHeadMask |= (1 << head); + } + } + + return appliedHeadMask; +} + +// +// Set up raster lock and frame lock for external frame lock +// + +NvBool nvEnableFrameLockEvo(NVDispEvoPtr pDispEvo) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 serverHead = nvGetFramelockServerHead(pDispEvo); + NvU32 clientHeadsMask = nvGetFramelockClientHeadsMask(pDispEvo); + NvU32 appliedHeadMask; + NvU32 activeClientHeadsMask; + NvBool useHouseSync = FALSE; + NvU32 head; + + nvAssert(pDispEvo->framelock.currentServerHead == NV_INVALID_HEAD); + nvAssert(pDispEvo->framelock.currentClientHeadsMask == 0x0); + + if (serverHead != NV_INVALID_HEAD && + (pFrameLockEvo->houseSyncMode == + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT)) { + + NvS64 houseSync; + + /* + * Only use house sync if present. + * XXX what happens when house sync is unplugged? why not enable it + * now and let the FPGA decide? + */ + if (!nvFrameLockGetStatusEvo(pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS, + &houseSync)) { + return FALSE; + } + + useHouseSync = (houseSync != 0); + } + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* Enable the server */ + if ((serverHead != NV_INVALID_HEAD) && + nvHeadIsActive(pDispEvo, serverHead)) { + NvU32 serverHeadMask; + + serverHeadMask = (1 << serverHead); + appliedHeadMask = applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_ADD_FRAME_LOCK_SERVER); + + nvAssert(appliedHeadMask == serverHeadMask); + pDispEvo->framelock.currentServerHead = serverHead; + + /* Enable house sync, if requested */ + if (useHouseSync) { + appliedHeadMask = + applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_ADD_FRAME_LOCK_HOUSE_SYNC); + + if (appliedHeadMask == serverHeadMask) { + pDispEvo->framelock.currentHouseSync = TRUE; + } + } + } + + /* Enable the clients */ + activeClientHeadsMask = 0; + FOR_ALL_HEADS(head, clientHeadsMask) { + if (nvHeadIsActive(pDispEvo, head)) { + activeClientHeadsMask |= (1 << head); + } + } + appliedHeadMask = applyActionForHeads(pDispEvo, activeClientHeadsMask, + NV_EVO_ADD_FRAME_LOCK_CLIENT); + + nvAssert(appliedHeadMask == activeClientHeadsMask); + pDispEvo->framelock.currentClientHeadsMask = activeClientHeadsMask; + + /* Finally, update the hardware */ + UpdateEvoLockState(); + + return TRUE; +} + +// +// Disable raster lock and frame lock +// + +NvBool nvDisableFrameLockEvo(NVDispEvoPtr pDispEvo) +{ + NvU32 serverHead = nvGetFramelockServerHead(pDispEvo); + NvU32 clientHeadsMask = nvGetFramelockClientHeadsMask(pDispEvo); + NvU32 activeClientHeadsMask; + NvU32 appliedHeadMask; + NvU32 head; + + /* Initialize the assembly state */ + SyncEvoLockState(); + + /* Disable the clients */ + activeClientHeadsMask = 0; + FOR_ALL_HEADS(head, clientHeadsMask) { + if (nvHeadIsActive(pDispEvo, head)) { + activeClientHeadsMask |= (1 << head); + } + } + appliedHeadMask = applyActionForHeads(pDispEvo, + activeClientHeadsMask, + NV_EVO_REM_FRAME_LOCK_CLIENT); + + nvAssert(appliedHeadMask == activeClientHeadsMask); + pDispEvo->framelock.currentClientHeadsMask &= ~activeClientHeadsMask; + + /* Disable house sync */ + if (serverHead != NV_INVALID_HEAD && + nvHeadIsActive(pDispEvo, serverHead)) { + NvU32 serverHeadMask = (1 << serverHead); + + if (pDispEvo->framelock.currentHouseSync) { + appliedHeadMask = + applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_REM_FRAME_LOCK_HOUSE_SYNC); + + nvAssert(appliedHeadMask == serverHeadMask); + pDispEvo->framelock.currentHouseSync = FALSE; + } + + /* Disable the server */ + appliedHeadMask = applyActionForHeads(pDispEvo, serverHeadMask, + NV_EVO_REM_FRAME_LOCK_SERVER); + nvAssert(appliedHeadMask == serverHeadMask); + if (appliedHeadMask == serverHeadMask) { + pDispEvo->framelock.currentServerHead = NV_INVALID_HEAD; + } + } + + /* Finally, update the hardware */ + UpdateEvoLockState(); + + return TRUE; +} + +// +// Enable/Disable External Reference Clock Sync +// +// This function is used by frame lock to make the GPU sync to +// the external device's reference clock. +// +static void SetRefClk(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, NvBool external, + NVEvoUpdateState *updateState) +{ + nvPushEvoSubDevMask(pDevEvo, 1 << sd); + + pDevEvo->hal->SetHeadRefClk(pDevEvo, head, external, updateState); + + nvPopEvoSubDevMask(pDevEvo); +} + + +// +// Query raster lock state +// + +NvBool nvQueryRasterLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev; + const NvU32 head = pDpyEvo->head; + NVEvoHeadControlPtr pHC; + + if ((head == NV_INVALID_HEAD) || (pDevEvo->gpus == NULL)) { + return FALSE; + } + + pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + pHC = &pEvoSubDev->headControl[head]; + + *val = pHC->serverLock == NV_EVO_RASTER_LOCK || + pHC->clientLock == NV_EVO_RASTER_LOCK; + + return TRUE; +} + +/* + * Return the surface format usage bounds that NVKMS will program for the + * requested format. + * + * For an RGB XBPP format, this function will return a bitmask of all RGB YBPP + * formats, where Y <= X. + * + * For a YUV format, this function will return a bitmask of all YUV formats + * that: + * - Have the same number of planes as the requested format + * - Have the same chroma decimation factors as the requested format + * - Have the same or lower effective fetch bpp as the requested format + * + * For example, if the requested format is YUV420 12-bit SP, this function will + * include all YUV420 8/10/12-bit SP formats. + */ +NvU64 nvEvoGetFormatsWithEqualOrLowerUsageBound( + const enum NvKmsSurfaceMemoryFormat format, + NvU64 supportedFormatsCapMask) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + NvU64 supportedFormatsUsageBound = 0; + NvU8 formatIdx; + + FOR_EACH_INDEX_IN_MASK(64, formatIdx, supportedFormatsCapMask) { + + const NvKmsSurfaceMemoryFormatInfo *pOtherFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(formatIdx); + + if ((pFormatInfo->isYUV != pOtherFormatInfo->isYUV) || + (pFormatInfo->numPlanes != pOtherFormatInfo->numPlanes)) { + continue; + } + + if (pFormatInfo->isYUV) { + if ((pFormatInfo->yuv.horizChromaDecimationFactor != + pOtherFormatInfo->yuv.horizChromaDecimationFactor) || + (pFormatInfo->yuv.vertChromaDecimationFactor != + pOtherFormatInfo->yuv.vertChromaDecimationFactor) || + (pFormatInfo->yuv.depthPerComponent < + pOtherFormatInfo->yuv.depthPerComponent)) { + continue; + } + } else { + if (pFormatInfo->rgb.bitsPerPixel < + pOtherFormatInfo->rgb.bitsPerPixel) { + continue; + } + } + + supportedFormatsUsageBound |= NVBIT64(formatIdx); + + } FOR_EACH_INDEX_IN_MASK_END; + + return supportedFormatsUsageBound; +} + +// +// Enable or disable flip lock (or query state) +// + +NvBool nvUpdateFlipLockEvoOneHead(NVDispEvoPtr pDispEvo, const NvU32 head, + NvU32 *val, NvBool set, + NvBool *needsEarlyUpdate, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (needsEarlyUpdate) { + *needsEarlyUpdate = FALSE; + } + + if (set) { + // make sure we're dealing with a bool + NvBool setVal = !!*val; + + if (setVal ^ pHC->flipLock) { + NvBool isMethodPending; + + if (!pDevEvo->hal-> + IsChannelMethodPending(pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + pDispEvo->displayOwner, + &isMethodPending) || + isMethodPending) { + nvAssert(!"Base channel not idle"); + return FALSE; + } + + if (setVal) { + // make sure flip lock is not prohibited and raster lock is enabled + if ((pHC->serverLock == NV_EVO_NO_LOCK && + pHC->clientLock == NV_EVO_NO_LOCK) || + HEAD_MASK_QUERY(pEvoSubDev->flipLockProhibitedHeadMask, + head)) { + return FALSE; + } + pHC->flipLock = TRUE; + } else { + /* Only actually disable fliplock if it's not needed for SLI */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForSliHeadMask, + head)) { + pHC->flipLock = FALSE; + + /* + * When disabling fliplock during a modeset, the core + * channel needs to be updated before issuing further + * base flips. Notify the caller that fliplock has + * been disabled in the core channel's assembly state, + * and needs to be committed before issuing non-fliplocked + * base flips. + */ + if (needsEarlyUpdate) { + *needsEarlyUpdate = TRUE; + } + } + } + + EvoUpdateHeadParams(pDispEvo, head, updateState); + } + + /* Remember if we currently need fliplock enabled for framelock */ + pEvoSubDev->flipLockEnabledForFrameLockHeadMask = + setVal ? + HEAD_MASK_SET(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, head) : + HEAD_MASK_UNSET(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, head); + } + + /* + * XXX should the query return the cached "enabled for framelock" state + * instead? + */ + *val = pHC->flipLock; + + + return TRUE; +} + + +static NvBool UpdateFlipLock50(const NVDpyEvoRec *pDpyEvo, + NvU32 *val, NvBool set) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + const NvU32 head = pDpyEvo->head; + NVEvoUpdateState updateState = { }; + NvBool ret; + + if (head == NV_INVALID_HEAD) { + return FALSE; + } + + ret = nvUpdateFlipLockEvoOneHead(pDispEvo, head, val, set, + NULL /* needsEarlyUpdate */, + &updateState); + + if (set && ret) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } + + return ret; +} + +NvBool nvSetFlipLockEvo(NVDpyEvoPtr pDpyEvo, NvS64 value) +{ + NvU32 val32 = !!value; + return UpdateFlipLock50(pDpyEvo, &val32, TRUE /* set */); +} + +NvBool nvGetFlipLockEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *pValue) +{ + NvBool ret; + NvU32 val32 = 0; + ret = UpdateFlipLock50(pDpyEvo, &val32, FALSE /* set */); + + if (ret) { + *pValue = !!val32; + } + + return ret; +} + +static void ProhibitFlipLock50(NVDispEvoPtr pDispEvo) +{ + NvU32 head; + NvBool needUpdate = FALSE; + NVEvoUpdateState updateState = { }; + + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoHeadControlPtr pHC = NULL; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForFrameLockHeadMask, + head)) { + nvAssert(!"Can not prohibit flip lock " + "because it is already enabled for frame lock"); + continue; + } + + pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock) { + needUpdate = TRUE; + + pHC->flipLock = FALSE; + EvoUpdateHeadParams(pDispEvo, head, &updateState); + } + + pEvoSubDev->flipLockProhibitedHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockProhibitedHeadMask, head); + } + + if (needUpdate) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } +} + +static void AllowFlipLock50(NVDispEvoPtr pDispEvo) +{ + NvU32 head; + NvBool needUpdate = FALSE; + NVEvoUpdateState updateState = { }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoHeadControlPtr pHC = NULL; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + pHC = &pEvoSubDev->headControl[head]; + + if (!pHC->flipLock && + HEAD_MASK_QUERY(pEvoSubDev->flipLockEnabledForSliHeadMask, + head)) { + needUpdate = TRUE; + + nvAssert(pHC->serverLock != NV_EVO_NO_LOCK || + pHC->clientLock != NV_EVO_NO_LOCK); + + pHC->flipLock = TRUE; + EvoUpdateHeadParams(pDispEvo, head, &updateState); + } + + pEvoSubDev->flipLockProhibitedHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockProhibitedHeadMask, head); + } + + if (needUpdate) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } +} + +NvBool nvAllowFlipLockEvo(NVDispEvoPtr pDispEvo, NvS64 value) +{ + if (value == 0) { + ProhibitFlipLock50(pDispEvo); + } else { + AllowFlipLock50(pDispEvo); + } + return TRUE; +} + +/*! + * Enable or disable stereo. + * + * XXX SLI+Stereo For now, just set stereo on the display owner. + */ +NvBool nvSetStereoEvo( + const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvBool enable) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC; + NVEvoLockPin pin; + + nvAssert(head != NV_INVALID_HEAD); + + pHC = &pEvoSubDev->headControl[head]; + pin = NV_EVO_LOCK_PIN_INTERNAL(head); + + // make sure we're dealing with a bool + NvBool stereo = !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin); + + if (enable ^ stereo) { + NVEvoUpdateState updateState = { }; + + if (enable) { + NvU32 otherHead; + NvU32 signalPin; + + // If any other head is already driving stereo, fail + for (otherHead = 0; otherHead < NVKMS_MAX_HEADS_PER_DISP; + otherHead++) { + if (!nvHeadIsActive(pDispEvo, otherHead)) { + continue; + } + if (head == otherHead) { + continue; + } + + const NVEvoHeadControl *pOtherHC = + &pEvoSubDev->headControl[otherHead]; + + if (!NV_EVO_LOCK_PIN_IS_INTERNAL(pOtherHC->stereoPin)) { + return FALSE; + } + } + + signalPin = nvEvoGetPinForSignal(pDispEvo, + pEvoSubDev, + NV_EVO_LOCK_SIGNAL_STEREO); + if (signalPin != NV_EVO_LOCK_PIN_ERROR) { + pin = signalPin; + } + } + + pHC->stereoPin = pin; + + EvoUpdateHeadParams(pDispEvo, head, &updateState); + + // Make method take effect. + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, + TRUE /* releaseElv */); + } + + return TRUE; +} + +/*! + * Query stereo state. + * + * XXX SLI+Stereo For now, just get stereo on the display owner. + */ +NvBool nvGetStereoEvo(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + NVEvoHeadControlPtr pHC; + + nvAssert(head != NV_INVALID_HEAD); + + pHC = &pEvoSubDev->headControl[head]; + + return !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin); +} + +void nvSetViewPortsEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeViewPortEvo *pViewPort = &pHeadState->timings.viewPort; + + // Image sharpening is available when scaling is enabled. + pHeadState->attributes.imageSharpening.available = + (pViewPort->out.width != pViewPort->in.width) || + (pViewPort->out.height != pViewPort->in.height); + + // cache HEAD_SET_CONTROL_OUTPUT_SCALER H/V taps for use in + // SetOutputScaler(). This is needed because SetOutputScaler may be called + // from nvSetImageSharpeningEvo where the NVHwModeViewPortEvo + // isn't as easily accessible. + pHeadState->hTaps = pViewPort->hTaps; + pHeadState->vTaps = pViewPort->vTaps; + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetViewportInOut(pDevEvo, head, + pViewPort, pViewPort, pViewPort, + updateState); + nvPopEvoSubDevMask(pDevEvo); + + /* + * Specify safe default values of 0 for viewPortPointIn x and y; these + * may be changed when panning out of band of a modeset. + */ + EvoSetViewportPointIn(pDispEvo, head, 0 /* x */, 0 /* y */, updateState); +} + + + +static void EvoSetViewportPointIn(NVDispEvoPtr pDispEvo, const NvU32 head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetViewportPointIn(pDevEvo, head, x, y, updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +static inline NvU32 LUTNotifierForHead(const NvU32 head) +{ + nvAssert(head != NV_INVALID_HEAD); + return 1 + head; +} + +//****************************************************************************** +// +// Function: EvoUpdateCurrentPalette +// +// Description: Setting the palette +// +// Arguments: +// +// Return Value: None. +// +//****************************************************************************** +void nvEvoUpdateCurrentPalette(NVDispEvoPtr pDispEvo, + NvU32 head, NvBool kickOff) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const int dispIndex = pDispEvo->displayOwner; + NvU8 lutIndex = pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex; + NVEvoUpdateState updateState = { }; + + pDevEvo->hal->SetLUTContextDma( + pDispEvo, + head, + pDevEvo->lut.head[head].LUT[lutIndex], + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled, + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled, + &updateState, + pHeadState->bypassComposition); + + /* + * EVO2 does not set LUT context DMA if the core channel + * doesn't have a scanout surface set, in that case there is no update + * state to kickoff. + */ + if (kickOff && !nvIsUpdateStateEmpty(pDevEvo, &updateState)) { + // Clear the completion notifier and kick off an update. Wait for it + // here if NV_CTRL_SYNCHRONOUS_PALETTE_UPDATES is enabled. Otherwise, + // don't wait for the notifier -- it'll be checked the next time a LUT + // change request comes in. + EvoUpdateAndKickOffWithNotifier(pDispEvo, + TRUE, /* notify */ + FALSE, /* sync */ + LUTNotifierForHead(head), + &updateState, + TRUE /* releaseElv */); + pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate = TRUE; + } +} + +static void UpdateMaxPixelClock(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NVDpyEvoPtr pDpyEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + nvDpyProbeMaxPixelClock(pDpyEvo); + } + } +} + +static NvBool AllocEvoSubDevs(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 sd; + + pDevEvo->gpus = nvCalloc(pDevEvo->numSubDevices, sizeof(NVEvoSubDevRec)); + + if (pDevEvo->gpus == NULL) { + return FALSE; + } + + /* Assign the pDispEvo for each evoSubDevice */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + pDevEvo->gpus[sd].pDispEvo = pDispEvo; + } + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + nvAssert(pDevEvo->gpus[sd].pDispEvo != NULL); + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 head; + + pDevEvo->gpus[sd].subDeviceInstance = sd; + // Initialize the lock state. + nvEvoStateStartNoLock(pEvoSubDev); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pSdHeadState->layer); i++) { + pSdHeadState->layer[i].cscMatrix = NVKMS_IDENTITY_CSC_MATRIX; + } + } + } + + return TRUE; +} + +static NvBool ValidateConnectorTypes(const NVDevEvoRec *pDevEvo) +{ + const NVDispEvoRec *pDispEvo; + const NVConnectorEvoRec *pConnectorEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + const NVEvoSubDevRec *pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + const NVEvoCapabilities *pEvoCaps = &pEvoSubDev->capabilities; + const NVEvoMiscCaps *pMiscCaps = &pEvoCaps->misc; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!pMiscCaps->supportsDSI && + pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "DSI connectors are unsupported!"); + return FALSE; + } + } + } + return TRUE; +} + +/*! + * Allocate the EVO core channel. + * + * This function trivially succeeds if the core channel is already allocated. + */ +NvBool nvAllocCoreChannelEvo(NVDevEvoPtr pDevEvo) +{ + NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS capsParams = { }; + NvU32 ret; + NvBool bRet; + NVDispEvoRec *pDispEvo; + NvU32 dispIndex; + NvU32 head; + + /* Do nothing if the display was already allocated */ + if (pDevEvo->displayHandle != 0) { + return TRUE; + } + + if (!AllocEvoSubDevs(pDevEvo)) { + goto failed; + } + + // Disallow GC6 in anticipation of touching GPU/displays. + if (!nvRmSetGc6Allowed(pDevEvo, FALSE)) { + goto failed; + } + + /* Query console FB info, and save the result into pDevEvo->vtFbInfo. + * This is done at device allocation time. + * nvRmImportFbConsoleMemory will import the surface for console restore by + * nvEvoRestoreConsole if the surface format is compatible. + * Else, console restore will cause core channel realloc, telling RM to + * restore the console via nvRmVTSwitch. + */ + if (!nvRmGetVTFBInfo(pDevEvo)) { + goto failed; + } + + if (!nvRmVTSwitch(pDevEvo, + NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE)) { + goto failed; + } + + /* Evo object (parent of all other NV50 display stuff) */ + nvAssert(nvRmEvoClassListCheck(pDevEvo, pDevEvo->dispClass)); + pDevEvo->displayHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayHandle, + pDevEvo->dispClass, + NULL); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to initialize display engine: 0x%x (%s)", + ret, nvstatusToString(ret)); + goto failed; + } + + /* Get the display caps bits */ + + ct_assert(sizeof(pDevEvo->capsBits) == sizeof(capsParams.capsTbl)); + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &capsParams, sizeof(capsParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to determine display capabilities"); + goto failed; + } + nvkms_memcpy(pDevEvo->capsBits, capsParams.capsTbl, + sizeof(pDevEvo->capsBits)); + + // Evo core channel. Allocated once, shared per GPU + if (!nvRMSetupEvoCoreChannel(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine core DMA push buffer"); + goto failed; + } + + pDevEvo->coreInitMethodsPending = TRUE; + + bRet = pDevEvo->hal->GetCapabilities(pDevEvo); + + if (!bRet) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query display engine capability bits."); + goto failed; + } + + /* + * XXX NVKMS TODO: if the EVO core channel is allocated (and + * capability notifier queried) before any nvDpyConnectEvo(), then + * we won't need to update the pixelClock here. + */ + UpdateMaxPixelClock(pDevEvo); + + if (pDevEvo->numWindows > 0) { + int win; + + if (!nvRMAllocateWindowChannels(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine window channels"); + goto failed; + } + + for (win = 0; win < pDevEvo->numWindows; win++) { + const NvU32 head = pDevEvo->headForWindow[win]; + + if (head == NV_INVALID_HEAD) { + continue; + } + + pDevEvo->head[head].layer[pDevEvo->head[head].numLayers] = + pDevEvo->window[win]; + pDevEvo->head[head].numLayers++; + } + } else { + // Allocate the base channels + if (!nvRMAllocateBaseChannels(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine base channels"); + goto failed; + } + + // Allocate the overlay channels + if (!nvRMAllocateOverlayChannels(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate display engine overlay channels"); + goto failed; + } + + /* Map base and overlay channels onto main and overlay layers. */ + for (head = 0; head < pDevEvo->numHeads; head++) { + nvAssert(pDevEvo->base[head] != NULL && pDevEvo->overlay[head] != NULL); + + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER] = pDevEvo->base[head]; + pDevEvo->head[head].layer[NVKMS_OVERLAY_LAYER] = pDevEvo->overlay[head]; + pDevEvo->head[head].numLayers = 2; + } + } + + // Allocate and map the cursor controls for all heads + bRet = nvAllocCursorEvo(pDevEvo); + if (!bRet) { + goto failed; + } + + if (!nvAllocLutSurfacesEvo(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate memory for the display color lookup table."); + goto failed; + } + + // Resume the DisplayPort library's control of the device. + if (!nvRmResumeDP(pDevEvo)) { + nvEvoLogDev( + pDevEvo, + EVO_LOG_ERROR, + "Failed to initialize DisplayPort sub-system."); + goto failed; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvRmRegisterBacklight(pDispEvo); + } + + // Allow GC6 if no heads are active. + if (nvAllHeadsInactive(pDevEvo)) { + if (!nvRmSetGc6Allowed(pDevEvo, TRUE)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "No head is active, but failed to allow GC6"); + } + } + + return TRUE; + +failed: + nvFreeCoreChannelEvo(pDevEvo); + + return FALSE; +} + +/*! + * Clear the pConnectorEvo->or.mask tracking. + */ +static void ClearSORAssignmentsOneDisp(NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + + nvAssert(NV0073_CTRL_SYSTEM_GET_CAP(pDispEvo->pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + continue; + } + + pConnectorEvo->or.mask = 0x0; + } +} + +/*! + * Update pConnectorEvo->or.mask from the list given to us by RM. + */ +static void RefreshSORAssignments(NVDispEvoPtr pDispEvo, + const NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams) +{ + NVConnectorEvoPtr pConnectorEvo; + + ClearSORAssignmentsOneDisp(pDispEvo); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + const NvU32 displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + NvU32 sorIndex; + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + continue; + } + + for (sorIndex = 0; + sorIndex < ARRAY_LEN(pParams->sorAssignList) && + sorIndex < ARRAY_LEN(pConnectorEvo->or.ownerHeadMask); + sorIndex++) { + if ((pParams->sorAssignList[sorIndex] & displayId) == displayId) { + pConnectorEvo->or.mask |= NVBIT(sorIndex); + } + } + } +} + +/* + * Ask RM to assign an SOR for the given connector. + * + * Note that this assignment may be temporary. This function will always call + * RM, and unless the connector is currently in use (i.e., being driven by a + * head), a previously-assigned SOR may be reused. + * + * The RM will either: + * a) return an SOR that's already assigned/attached to this connector, or + * b) pick a new "unused" SOR, assign and attach it to this connector, and + * return that -- where "unused" means both not being actively driven by a + * head and not in the "exclude mask" argument. + * The "exclude mask" is useful if we need to assign multiple SORs up front + * before activating heads to drive them. + * + * For example, if head 0 is currently actively scanning out to SOR 0 and we + * are doing a modeset to activate currently-inactive heads 1 and 2: + * 1. nvkms calls RM for nvAssignSOREvo(pConnectorForHead1, 0); + * RM returns any SOR other than 0 (say 3) + * 2. nvkms calls RM for nvAssignSOREvo(pConnectorForHead2, (1 << 3)); + * RM returns any SOR other than 0 and 3 (say 1) + * 3. At this point nvkms can push methods and UPDATE to enable heads 1 and 2 + * to drive SORs 3 and 1. + * In the example above, the sorExcludeMask == (1 << 3) at step 2 is important + * to ensure that RM doesn't reuse the SOR 3 from step 1. It won't reuse SOR 0 + * because it's in use by head 0. + * + * If an SOR is only needed temporarily (e.g., to do link training to "assess" + * a DisplayPort or HDMI FRL link), then sorExcludeMask should be 0 -- any SOR + * that's not actively used by a head can be used, and as soon as nvkms + * finishes the "assessment", the SOR is again eligible for reuse. + * + * Because of the potential for SOR reuse, nvAssignSOREvo() will always call + * RefreshSORAssignments() to update pConnectorEvo->or.mask on *every* + * connector after calling NV0073_CTRL_CMD_DFP_ASSIGN_SOR for *any* connector. + */ +NvBool nvAssignSOREvo(NVConnectorEvoPtr pConnectorEvo, NvU32 sorExcludeMask) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NvU32 displayId = 0x0; + + NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = { 0 }; + NvU32 ret; + + /* + * Skip assigning an SOR for non-SOR connectors or if an SOR is already + * assigned. + */ + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + return TRUE; + } + + if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + return TRUE; + } + + /* Mode-set is not possible without SOR */ + nvAssert(!nvIsConnectorActiveEvo(pConnectorEvo)); + + displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + params.sorExcludeMask = sorExcludeMask; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_ASSIGN_SOR, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + RefreshSORAssignments(pDispEvo, ¶ms); + nvAssert(pConnectorEvo->or.mask != 0); + + return TRUE; +} + +void nvRestoreSORAssigmentsEvo(NVDevEvoRec *pDevEvo) +{ + NVDispEvoRec *pDispEvo; + NvU32 dispIndex; + + if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + return; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + const NVConnectorEvoRec * + sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS] = { }; + const NVConnectorEvoRec *pConnectorEvo; + NvU32 sorIndex; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NvU32 i; + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + continue; + } + + FOR_EACH_INDEX_IN_MASK(32, i, pConnectorEvo->or.mask) { + /* + * RM populates same sor index into more than one connectors if + * they are are DCC partners, this checks make sure SOR + * assignment happens only for a single connector. The sor + * assignment call before modeset/dp-link-training makes sure + * assignment happens for the correct connector. + */ + if (sorAssignList[i] != NULL) { + continue; + } + sorAssignList[i] = pConnectorEvo; + } FOR_EACH_INDEX_IN_MASK_END + } + + for (sorIndex = 0; sorIndex < ARRAY_LEN(sorAssignList); sorIndex++) { + if (sorAssignList[sorIndex] == NULL) { + continue; + } + + NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS params = { + .subDeviceInstance = pDispEvo->displayOwner, + .displayId = nvDpyIdToNvU32(sorAssignList[sorIndex]->displayId), + .sorExcludeMask = ~NVBIT(sorIndex), + }; + NvU32 ret; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_ASSIGN_SOR, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, + EVO_LOG_ERROR, + "Failed to restore SOR-%u -> %s assigment.", + sorIndex, sorAssignList[sorIndex]->name); + } else { + RefreshSORAssignments(pDispEvo, ¶ms); + } + } + } +} + +/*! + * Free the EVO core channel. + * + * This function does nothing if the core channel was already free. + */ +void nvFreeCoreChannelEvo(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvU32 head; + + nvEvoCancelPostFlipIMPTimer(pDevEvo); + nvCancelVrrFrameReleaseTimers(pDevEvo); + + nvCancelLowerDispBandwidthTimer(pDevEvo); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + nvRmUnregisterBacklight(pDispEvo); + + nvAssert(pDevEvo->skipConsoleRestore || + nvDpyIdListIsEmpty(nvActiveDpysOnDispEvo(pDispEvo))); + } + + // Pause the DisplayPort library's control of the device. + nvRmPauseDP(pDevEvo); + + nvFreeLutSurfacesEvo(pDevEvo); + + // Unmap and free the cursor controls for all heads + nvFreeCursorEvo(pDevEvo); + + // TODO: Unregister all surfaces registered with this device. + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + nvRmEvoFreePreSyncpt(pDevEvo, pDevEvo->head[head].layer[layer]); + pDevEvo->head[head].layer[layer] = NULL; + } + pDevEvo->head[head].numLayers = 0; + } + + nvRMFreeWindowChannels(pDevEvo); + nvRMFreeOverlayChannels(pDevEvo); + nvRMFreeBaseChannels(pDevEvo); + + nvRMFreeEvoCoreChannel(pDevEvo); + + if (pDevEvo->displayHandle != 0) { + if (nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayHandle) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to tear down Disp"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->displayHandle); + pDevEvo->displayHandle = 0; + + if (!pDevEvo->skipConsoleRestore) { + nvRmVTSwitch(pDevEvo, + NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_RESTORE_VT_STATE); + } else { + nvRmVTSwitch(pDevEvo, + NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_CONSOLE_RESTORED); + } + } + + // No longer possible that NVKMS is driving any displays, allow GC6. + nvRmSetGc6Allowed(pDevEvo, TRUE); + + nvFree(pDevEvo->gpus); + pDevEvo->gpus = NULL; +} + + +#define ASSIGN_PIN(_pPin, _pin) \ + do { \ + ct_assert(NV_IS_UNSIGNED((_pin))); \ + if ((_pPin)) { \ + if ((_pin) >= NV_EVO_NUM_LOCK_PIN_CAPS) { \ + return FALSE; \ + } \ + *(_pPin) = (_pin); \ + } \ + } while (0) + +static NvBool QueryFrameLockHeaderPins(const NVDispEvoRec *pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NvU32 *pFrameLockPin, + NvU32 *pRasterLockPin, + NvU32 *pFlipLockPin) +{ + NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS params = { }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to query framelock header pins"); + return FALSE; + } + + ASSIGN_PIN(pFrameLockPin, params.frameLockPin); + ASSIGN_PIN(pRasterLockPin, params.rasterLockPin); + ASSIGN_PIN(pFlipLockPin, params.flipLockPin); + + return TRUE; +} + +// Gets the lock pin dedicated for a given signal and returns the corresponding method +NVEvoLockPin nvEvoGetPinForSignal(const NVDispEvoRec *pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoLockSignal signal) +{ + NVEvoLockPinCaps *caps = pEvoSubDev->capabilities.pin; + NvU32 pin; + + switch (signal) { + + case NV_EVO_LOCK_SIGNAL_RASTER_LOCK: + if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev, + NULL, &pin, NULL)) { + break; + } + + if (!caps[pin].scanLock) break; + + return NV_EVO_LOCK_PIN_0 + pin; + + case NV_EVO_LOCK_SIGNAL_FRAME_LOCK: + if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev, + &pin, NULL, NULL)) { + break; + } + + if (!caps[pin].scanLock) break; + + return NV_EVO_LOCK_PIN_0 + pin; + + case NV_EVO_LOCK_SIGNAL_FLIP_LOCK: + if (!QueryFrameLockHeaderPins(pDispEvo, pEvoSubDev, + NULL, NULL, &pin) || + !caps[pin].flipLock) { + // If the query from RM fails (or returns a bogus pin), fall + // back to an alternate mechanism. This may happen on boards + // with no framelock header. Look in the capabilities for the + // pin that has the requested capability. + for (pin = 0; pin < NV_EVO_NUM_LOCK_PIN_CAPS; pin++) { + if (caps[pin].flipLock) + break; + } + + if (pin == NV_EVO_NUM_LOCK_PIN_CAPS) { + // Not found + break; + } + } + + if (!caps[pin].flipLock) { + break; + } + + return NV_EVO_LOCK_PIN_0 + pin; + + case NV_EVO_LOCK_SIGNAL_STEREO: + // Look in the capabilities for the pin that has the requested capability + for (pin = 0; pin < NV_EVO_NUM_LOCK_PIN_CAPS; pin++) { + if (caps[pin].stereo) + break; + } + + if (pin == NV_EVO_NUM_LOCK_PIN_CAPS) break; + + return NV_EVO_LOCK_PIN_0 + pin; + + default: + nvAssert(!"Unknown signal type"); + break; + } + + // Pin not found + return NV_EVO_LOCK_PIN_ERROR; +} + +void nvSetDVCEvo(NVDispEvoPtr pDispEvo, + const NvU32 head, + NvS32 dvc, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + pHeadState->attributes.dvc = dvc; + + nvAssert(dvc >= NV_EVO_DVC_MIN); + nvAssert(dvc <= NV_EVO_DVC_MAX); + + // HW range is from -2048 to + 2047 + // Negative values, are not used they distort the colors + // Values from 1023 to 0 are greying the colors out. + // We use 0 to 2047 with 1024 as default. + dvc += 1024; + nvAssert(dvc >= 0); + pHeadState->procAmp.satCos = dvc; + + // In SW YUV420 mode, HW is programmed with default DVC. The DVC is handled + // in a headSurface composite shader. + if (pHeadState->timings.yuv420Mode == NV_YUV420_MODE_SW) { + pHeadState->procAmp.satCos = 1024; + } + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetProcAmp(pDispEvo, head, updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +void nvSetImageSharpeningEvo(NVDispEvoRec *pDispEvo, const NvU32 head, + NvU32 value, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + pHeadState->attributes.imageSharpening.value = value; + + /* + * Evo values are from -128 to 127, with a default of 0. + * Negative values sharpen. + * Control panel values from 0 (less sharp) to 255 + */ + value = 127 - value; + + nvPushEvoSubDevMaskDisp(pDispEvo); + pDevEvo->hal->SetOutputScaler(pDispEvo, head, value, updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +NvBool nvLayerSetPositionEvo( + NVDevEvoPtr pDevEvo, + const struct NvKmsSetLayerPositionRequest *pRequest) +{ + NVDispEvoPtr pDispEvo; + NvU32 sd; + + /* + * We need this call to not modify any state if it will fail, so we + * first verify that all relevant layers support output positioning, + * then go back through the layers to actually modify the relevant + * state. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + + if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NvU32 layer; + + if ((pRequest->disp[sd].requestedHeadsBitMask & + NVBIT(head)) == 0) { + continue; + } + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + const NvS16 x = pRequest->disp[sd].head[head].layerPosition[layer].x; + const NvS16 y = pRequest->disp[sd].head[head].layerPosition[layer].y; + + if ((pRequest->disp[sd].head[head].requestedLayerBitMask & + NVBIT(layer)) == 0x0) { + continue; + } + + /* + * Error out if a requested layer does not support position + * updates and the requested position is not (0, 0). + */ + if (!pDevEvo->caps.layerCaps[layer].supportsWindowMode && + (x != 0 || y != 0)) { + nvEvoLogDebug(EVO_LOG_ERROR, "Layer %d does not support " + "position updates.", layer); + return FALSE; + } + } + } + } + + /* Checks in above block passed, so make the requested changes. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + + if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NVEvoUpdateState updateState = { }; + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvU32 layer; + + if ((pRequest->disp[sd].requestedHeadsBitMask & + NVBIT(head)) == 0) { + continue; + } + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + const NvS16 x = pRequest->disp[sd].head[head].layerPosition[layer].x; + const NvS16 y = pRequest->disp[sd].head[head].layerPosition[layer].y; + + if ((pRequest->disp[sd].head[head].requestedLayerBitMask & + NVBIT(layer)) == 0x0) { + continue; + } + + if ((pSdHeadState->layer[layer].outputPosition.x != x) || + (pSdHeadState->layer[layer].outputPosition.y != y)) { + + NVEvoChannelPtr pChannel = pDevEvo->head[head].layer[layer]; + + pSdHeadState->layer[layer].outputPosition.x = x; + pSdHeadState->layer[layer].outputPosition.y = y; + + pDevEvo->hal->SetImmPointOut(pDevEvo, pChannel, sd, + &updateState, x, y); + } + } + + pDevEvo->hal->Update(pDevEvo, &updateState, TRUE /* releaseElv */); + } + } + + return TRUE; +} + +/* + * nvConstructHwModeTimingsImpCheckEvo() - perform an IMP check on the + * given raster timings and viewport during the + * nvConstructHwModeTimingsEvo path. If IMP fails, we try multiple + * times, each time scaling back the usage bounds until we find a + * configuration IMP will accept, or until we can't scale back any + * further. If this fails, mark the viewport as invalid. + */ + +NvBool nvConstructHwModeTimingsImpCheckEvo( + const NVConnectorEvoRec *pConnectorEvo, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + const int head) +{ + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP]; + NvBool requireBootClocks = !!(pParams->overrides & + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS); + NvU32 ret; + + /* bypass this checking if the user disabled IMP */ + + if ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK) != 0) { + return TRUE; + } + + nvkms_memset(&timingsParams, 0, sizeof(timingsParams)); + + timingsParams[head].pConnectorEvo = pConnectorEvo; + timingsParams[head].activeRmId = + nvRmAllocDisplayId( + pConnectorEvo->pDispEvo, + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId)); + if (timingsParams[head].activeRmId == 0x0) { + return FALSE; + } + timingsParams[head].pTimings = pTimings; + timingsParams[head].pUsage = &pTimings->viewPort.guaranteedUsage; + + ret = nvValidateImpOneDispDowngrade(pConnectorEvo->pDispEvo, timingsParams, + requireBootClocks, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE, + /* downgradePossibleHeadsBitMask */ + (NVBIT(NVKMS_MAX_HEADS_PER_DISP) - 1UL)); + if (!ret) { + nvEvoLogInfoString(pInfoString, + "ViewPort %dx%d exceeds hardware capabilities.", + pTimings->viewPort.out.width, + pTimings->viewPort.out.height); + } + + nvRmFreeDisplayId(pConnectorEvo->pDispEvo, timingsParams[head].activeRmId); + + return ret; +} + +/* + * Convert from NvModeTimings values to NVHwModeTimingsEvo. + */ + +static void +ConstructHwModeTimingsFromNvModeTimings(const NvModeTimings *pModeTimings, + NVHwModeTimingsEvoPtr pTimings) +{ + NvU32 hBlankStart; + NvU32 vBlankStart; + NvU32 hBlankEnd; + NvU32 vBlankEnd; + NvU32 hSyncWidth; + NvU32 vSyncWidth; + NvU32 vTotalAdjustment = 0; + + NvModeTimings modeTimings; + + modeTimings = *pModeTimings; + + if (modeTimings.doubleScan) { + modeTimings.vVisible *= 2; + modeTimings.vSyncStart *= 2; + modeTimings.vSyncEnd *= 2; + modeTimings.vTotal *= 2; + } + + /* + * The real pixel clock and width values for modes using YUV 420 emulation + * are half of the incoming values parsed from the EDID. This conversion is + * performed here, so NvModeTimings will have the user-visible (full width) + * values, and NVHwModeTimingsEvo will have the real (half width) values. + * + * HW YUV 420 requires setting the full width mode timings, which are then + * converted in HW. RM will recognize YUV420 mode is in use and halve + * these values for IMP. + * + * In either case, only modes with even width are allowed in YUV 420 mode. + */ + if (modeTimings.yuv420Mode != NV_YUV420_MODE_NONE) { + nvAssert(((modeTimings.pixelClockHz & 1) == 0) && + ((modeTimings.hVisible & 1) == 0) && + ((modeTimings.hSyncStart & 1) == 0) && + ((modeTimings.hSyncEnd & 1) == 0) && + ((modeTimings.hTotal & 1) == 0) && + ((modeTimings.vVisible & 1) == 0)); + if (modeTimings.yuv420Mode == NV_YUV420_MODE_SW) { + modeTimings.pixelClockHz /= 2; + modeTimings.hVisible /= 2; + modeTimings.hSyncStart /= 2; + modeTimings.hSyncEnd /= 2; + modeTimings.hTotal /= 2; + } + } + + pTimings->hSyncPol = modeTimings.hSyncNeg; + pTimings->vSyncPol = modeTimings.vSyncNeg; + pTimings->interlaced = modeTimings.interlaced; + pTimings->doubleScan = modeTimings.doubleScan; + + /* pTimings->pixelClock are in KHz but modeTimings.pixelClock are in Hz */ + + pTimings->pixelClock = HzToKHz(modeTimings.pixelClockHz); + + /* + * assign total width, height; note that when the rastertimings + * are interlaced, we need to make sure SetRasterSize.Height is + * odd, per EVO's mfs file + */ + + if (pTimings->interlaced) vTotalAdjustment = 1; + + pTimings->rasterSize.x = modeTimings.hTotal; + pTimings->rasterSize.y = modeTimings.vTotal | vTotalAdjustment; + + /* + * A bit of EVO quirkiness: The hw increases the blank/sync values + * by one. So we need to offset by subtracting one. + * + * In other words, the h/w inserts one extra sync line/pixel thus + * incrementing the raster params by one. The number of blank + * lines/pixels we get is true to what we ask for. Note the hw + * does not increase the TotalImageSize by one so we don't need to + * adjust SetRasterSize. + * + * This is slightly unintuitive. Per Evo's specs, the blankEnd + * comes before blankStart as defined below: BlankStart: The last + * pixel/line at the end of the h/v active area. BlankEnd: The + * last pixel/line at the end of the h/v blanking. + * + * Also: note that in the below computations, we divide by two for + * interlaced modes *before* subtracting; see bug 263622. + */ + + hBlankStart = modeTimings.hVisible + + (modeTimings.hTotal - modeTimings.hSyncStart); + + vBlankStart = modeTimings.vVisible + + (modeTimings.vTotal - modeTimings.vSyncStart); + + hBlankEnd = (modeTimings.hTotal - modeTimings.hSyncStart); + vBlankEnd = (modeTimings.vTotal - modeTimings.vSyncStart); + + hSyncWidth = (modeTimings.hSyncEnd - modeTimings.hSyncStart); + vSyncWidth = (modeTimings.vSyncEnd - modeTimings.vSyncStart); + + if (pTimings->interlaced) { + vBlankStart /= 2; + vBlankEnd /= 2; + vSyncWidth /= 2; + } + + pTimings->rasterSyncEnd.x = hSyncWidth - 1; + pTimings->rasterSyncEnd.y = vSyncWidth - 1; + pTimings->rasterBlankStart.x = hBlankStart - 1; + pTimings->rasterBlankStart.y = vBlankStart - 1; + pTimings->rasterBlankEnd.x = hBlankEnd - 1; + pTimings->rasterBlankEnd.y = vBlankEnd - 1; + + /* assign rasterVertBlank2 */ + + if (pTimings->interlaced) { + const NvU32 firstFieldHeight = modeTimings.vTotal / 2; + + pTimings->rasterVertBlank2Start = firstFieldHeight + vBlankStart - 1; + pTimings->rasterVertBlank2End = firstFieldHeight + vBlankEnd - 1; + } else { + pTimings->rasterVertBlank2Start = 0; + pTimings->rasterVertBlank2End = 0; + } + + pTimings->hdmi3D = modeTimings.hdmi3D; + pTimings->yuv420Mode = modeTimings.yuv420Mode; +} + + + +/* + * Adjust the HwModeTimings as necessary to meet dual link dvi + * requirements; returns TRUE if the timings were successfully + * modified; returns FALSE if the timings cannot be made valid for + * dual link dvi. + */ +static NvBool ApplyDualLinkRequirements(const NVDpyEvoRec *pDpyEvo, + const struct + NvKmsModeValidationParams *pParams, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + int adjust; + + nvAssert(pDpyEvo->pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP); + + if (pTimings->protocol != NVKMS_PROTOCOL_SOR_DUAL_TMDS) { + return TRUE; + } + + if ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_DUAL_LINK_DVI_CHECK) != 0) { + return TRUE; + } + + /* extract the fields we will need below */ + + /* + * hTotal must be even for dual link dvi; we won't try to patch + * the htotal size; just give up if it isn't even + */ + + if ((pTimings->rasterSize.x % 2) != 0) { + nvEvoLogInfoString(pInfoString, + "Horizontal Total (%d) must be even for dual link DVI mode timings.", + pTimings->rasterSize.x); + return FALSE; + } + + /* + * RASTER_BLANK_END_X must be odd, so that the active region + * starts on the following (even) pixel; if it is odd, we are + * already done + */ + + if ((pTimings->rasterBlankEnd.x % 2) == 1) return TRUE; + + /* + * RASTER_BLANK_END_X is even, so we need to adjust both + * RASTER_BLANK_END_X and RASTER_BLANK_START_X by one; we'll first + * try to subtract one pixel from both + */ + + adjust = -1; + + /* + * if RASTER_BLANK_END_X cannot be made smaller (would collide + * with hSyncEnd), see if it would be safe to instead add one to + * RASTER_BLANK_END_X and RASTER_BLANK_START_X + */ + + if (pTimings->rasterBlankEnd.x <= pTimings->rasterSyncEnd.x + 1) { + if (pTimings->rasterBlankStart.x + 1 >= pTimings->rasterSize.x) { + nvEvoLogInfoString(pInfoString, + "Cannot adjust mode timings for dual link DVI requirements."); + return FALSE; + } + adjust = 1; + } + + pTimings->rasterBlankEnd.x += adjust; + pTimings->rasterBlankStart.x += adjust; + + nvEvoLogInfoString(pInfoString, + "Adjusted mode timings for dual link DVI requirements."); + + return TRUE; +} + +/* Query the HDMI 2.1 FRL configuration, if applicable. */ +static NvBool QueryHdmiFrlConfig(const NVDpyEvoRec *pDpyEvo, + const struct + NvKmsModeValidationParams *pParams, + const NvModeTimings *pModeTimings, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + /* TODO: apply any overrides from ModeValidationParams. */ + if (!nvHdmiFrlQueryConfig(pDpyEvo, + pModeTimings, + pTimings, + pParams)) { + nvEvoLogInfoString(pInfoString, + "Unable to determine HDMI 2.1 Fixed Rate Link configuration."); + return FALSE; + } + + return TRUE; +} + +void nvInitScalingUsageBounds(const NVDevEvoRec *pDevEvo, + struct NvKmsScalingUsageBounds *pScaling) +{ + pScaling->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + pScaling->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + pScaling->vTaps = pDevEvo->hal->caps.minScalerTaps; + pScaling->vUpscalingAllowed = FALSE; +} + +/* + * Check if the provided number of vertical taps is possible based on the + * capabilities: the lineStore (the smaller of inWidth and outWidth) must + * not exceed the maximum pixels for the desired taps; see bug 241014 + */ +static NvBool IsVTapsPossible(const NVEvoScalerCaps *pScalerCaps, + NvU32 inWidth, NvU32 outWidth, + NVEvoScalerTaps nTaps) +{ + const NvU32 lineStore = NV_MIN(inWidth, outWidth); + NvU32 maxPixels = pScalerCaps->taps[nTaps].maxPixelsVTaps; + + return lineStore <= maxPixels; +} + +/*! + * Compute the scale factor and check against the maximum. + * + * param[in] max Max scale factor to check against (* 1024) + * param[in] in Input width or height + * param[in] out Output width or height + * param[out] pFactor Output scale factor (* 1024) + */ +static NvBool ComputeScalingFactor(NvU32 max, + NvU16 in, NvU16 out, + NvU16 *pFactor) +{ + /* Use a 32-bit temporary to prevent overflow */ + NvU32 tmp; + + /* Add (out - 1) to round up */ + tmp = ((in * 1024) + (out - 1)) / out; + + /* Check against scaling limits. */ + if (tmp > max) { + return FALSE; + } + + *pFactor = tmp; + return TRUE; +} + +/*! + * Compute scaling factors based on in/out dimensions. + * Used by IMP and when programming viewport and window parameters in HW. + * + * The 'maxScaleFactor' values are defined by nvdClass_01.mfs as: + * SizeIn/SizeOut * 1024 + */ +NvBool nvComputeScalingUsageBounds(const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NVEvoScalerTaps hTaps, NVEvoScalerTaps vTaps, + struct NvKmsScalingUsageBounds *out) +{ + const NVEvoScalerTapsCaps *pTapsCaps = NULL; + + out->vTaps = vTaps; + + /* Start with default values (1.0) */ + out->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + out->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_1X; + + if (outHeight > inHeight) { + out->vUpscalingAllowed = TRUE; + } else if (outHeight < inHeight) { + out->vUpscalingAllowed = FALSE; + + pTapsCaps = &pScalerCaps->taps[vTaps]; + if (!ComputeScalingFactor(pTapsCaps->maxVDownscaleFactor, + inHeight, outHeight, + &out->maxVDownscaleFactor)) { + return FALSE; + } + } + + if (outWidth < inWidth) { + pTapsCaps = &pScalerCaps->taps[hTaps]; + if (!ComputeScalingFactor(pTapsCaps->maxHDownscaleFactor, + inWidth, outWidth, + &out->maxHDownscaleFactor)) { + return FALSE; + } + } + + return TRUE; +} + +NvBool nvAssignScalerTaps(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + const NvU32 inWidth, const NvU32 inHeight, + const NvU32 outWidth, const NvU32 outHeight, + NvBool doubleScan, + NVEvoScalerTaps *hTapsOut, NVEvoScalerTaps *vTapsOut) +{ + NVEvoScalerTaps hTaps, vTaps; + NvBool setHTaps = (outWidth != inWidth); + NvBool setVTaps = (outHeight != inHeight); + + /* + * Select the taps filtering; we select the highest taps allowed with our + * scaling configuration. + * + * Note if requiresScalingTapsInBothDimensions is true and if we are + * scaling in *either* dimension, then we need to program > 1 taps + * in *both* dimensions. + */ + if ((setHTaps || setVTaps) && + pDevEvo->hal->caps.requiresScalingTapsInBothDimensions) { + setHTaps = TRUE; + setVTaps = TRUE; + } + + /* + * Horizontal taps: if not scaling, then no filtering; otherwise, set the + * maximum filtering, because htaps shouldn't have any constraints (unlike + * vtaps... see below). + */ + if (setHTaps) { + /* + * XXX dispClass_01.mfs says: "For text and desktop scaling, the 2 tap + * bilinear frequently looks better than the 8 tap filter which is more + * optimized for video type scaling." Once we determine how best to + * expose configuration of taps, we should choose how to indicate that 8 + * or 5 taps is the maximum. + * + * For now, we'll start with 2 taps as the default, but may end up + * picking a higher taps value if the required H downscaling factor + * isn't possible with 2 taps. + */ + NvBool hTapsFound = FALSE; + + for (hTaps = NV_EVO_SCALER_2TAPS; + hTaps <= NV_EVO_SCALER_TAPS_MAX; + hTaps++) { + NvU16 hFactor; + + if (!ComputeScalingFactor( + pScalerCaps->taps[hTaps].maxHDownscaleFactor, + inWidth, outWidth, + &hFactor)) { + continue; + } + + hTapsFound = TRUE; + break; + } + + if (!hTapsFound) { + return FALSE; + } + } else { + hTaps = pDevEvo->hal->caps.minScalerTaps; + } + + /* + * Vertical taps: if scaling, set the maximum valid filtering, otherwise, no + * filtering. + */ + if (setVTaps) { + /* + * Select the maximum vertical taps based on the capabilities. + * + * For doublescan modes, limit to 2 taps to reduce blurriness. We really + * want plain old line doubling, but EVO doesn't support that. + */ + if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_5TAPS) && + !doubleScan) { + vTaps = NV_EVO_SCALER_5TAPS; + } else if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_3TAPS) && + !doubleScan) { + vTaps = NV_EVO_SCALER_3TAPS; + } else if (IsVTapsPossible(pScalerCaps, inWidth, outWidth, NV_EVO_SCALER_2TAPS)) { + vTaps = NV_EVO_SCALER_2TAPS; + } else { + return FALSE; + } + } else { + vTaps = pDevEvo->hal->caps.minScalerTaps; + } + + *hTapsOut = hTaps; + *vTapsOut = vTaps; + + return TRUE; +} + +/* + * Check that ViewPortIn does not exceed hardware limits and compute vTaps and + * hTaps based on configured ViewPortIn/Out scaling if possible given scaler + * capabilities. + */ +NvBool nvValidateHwModeTimingsViewPort(const NVDevEvoRec *pDevEvo, + const NVEvoScalerCaps *pScalerCaps, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + NVHwModeViewPortEvoPtr pViewPort = &pTimings->viewPort; + const NvU32 inWidth = pViewPort->in.width; + const NvU32 inHeight = pViewPort->in.height; + const NvU32 outWidth = pViewPort->out.width; + const NvU32 outHeight = pViewPort->out.height; + const NvBool scaling = (outWidth != inWidth) || (outHeight != inHeight); + NVEvoScalerTaps hTaps, vTaps; + + /* + * As per the MFS, there is a restriction for the width and height + * of ViewPortIn and ViewPortOut + */ + if (inWidth > 8192 || inHeight > 8192 || + outWidth > 8192 || outHeight > 8192) { + nvEvoLogInfoString(pInfoString, + "Viewport dimensions exceed hardware capabilities"); + return FALSE; + } + + if (!nvAssignScalerTaps(pDevEvo, pScalerCaps, inWidth, inHeight, outWidth, outHeight, + pTimings->doubleScan, &hTaps, &vTaps)) { + nvEvoLogInfoString(pInfoString, + "Unable to configure scaling from %dx%d to %dx%d (exceeds filtering capabilities)", + inWidth, inHeight, + outWidth, outHeight); + return FALSE; + } + + /* + * If this is an interlaced mode but we don't have scaling + * configured, check that the width will fit in the 2-tap vertical + * LineStoreSize; this is an EVO requirement for interlaced + * rasters + */ + if (pTimings->interlaced && !scaling) { + /* !scaling means widths should be same */ + nvAssert(outWidth == inWidth); + + if (outWidth > pScalerCaps->taps[NV_EVO_SCALER_2TAPS].maxPixelsVTaps) { + nvEvoLogInfoString(pInfoString, + "Interlaced mode requires filtering, but line width (%d) exceeds filtering capabilities", + outWidth); + return FALSE; + } + + /* hTaps and vTaps should have been set to minScalerTaps above */ + nvAssert(hTaps == pDevEvo->hal->caps.minScalerTaps); + nvAssert(vTaps == pDevEvo->hal->caps.minScalerTaps); + } + + pViewPort->hTaps = hTaps; + pViewPort->vTaps = vTaps; + return TRUE; +} + +static void AssignGuaranteedSOCBounds(const NVDevEvoRec *pDevEvo, + struct NvKmsUsageBounds *pGuaranteed) +{ + NvU32 layer; + + pGuaranteed->layer[NVKMS_MAIN_LAYER].usable = TRUE; + pGuaranteed->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats = + nvEvoGetFormatsWithEqualOrLowerUsageBound( + NvKmsSurfaceMemoryFormatA8R8G8B8, + pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats); + nvInitScalingUsageBounds(pDevEvo, &pGuaranteed->layer[NVKMS_MAIN_LAYER].scaling); + + for (layer = 1; layer < ARRAY_LEN(pGuaranteed->layer); layer++) { + pGuaranteed->layer[layer].usable = FALSE; + nvInitScalingUsageBounds(pDevEvo, &pGuaranteed->layer[layer].scaling); + } +} + +/* + * Initialize the given NvKmsUsageBounds. Ask for everything supported by the HW + * by default. Later, based on what IMP says, we will scale back as needed. + */ +void nvAssignDefaultUsageBounds(const NVDispEvoRec *pDispEvo, + NVHwModeViewPortEvo *pViewPort) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + struct NvKmsUsageBounds *pPossible = &pViewPort->possibleUsage; + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pPossible->layer); i++) { + struct NvKmsScalingUsageBounds *pScaling = &pPossible->layer[i].scaling; + + pPossible->layer[i].usable = TRUE; + pPossible->layer[i].supportedSurfaceMemoryFormats = + pDevEvo->caps.layerCaps[i].supportedSurfaceMemoryFormats; + nvInitScalingUsageBounds(pDevEvo, pScaling); + + /* Scaling is not currently supported for the main layer. Bug 3488083 */ + if (i != NVKMS_MAIN_LAYER && pDevEvo->hal->GetWindowScalingCaps) { + const NVEvoScalerCaps *pScalerCaps = + pDevEvo->hal->GetWindowScalingCaps(pDevEvo); + int j; + + for (j = NV_EVO_SCALER_TAPS_MAX; j >= NV_EVO_SCALER_TAPS_MIN; j--) { + const NVEvoScalerTapsCaps *pTapsCaps = &pScalerCaps->taps[j]; + + if ((pTapsCaps->maxVDownscaleFactor == 0) && + (pTapsCaps->maxHDownscaleFactor == 0)) { + continue; + } + + pScaling->maxVDownscaleFactor = pTapsCaps->maxVDownscaleFactor; + pScaling->maxHDownscaleFactor = pTapsCaps->maxHDownscaleFactor; + pScaling->vTaps = j; + pScaling->vUpscalingAllowed = (pTapsCaps->maxPixelsVTaps > 0); + break; + } + } + } + + if (pDevEvo->isSOCDisplay) { + AssignGuaranteedSOCBounds(pDevEvo, &pViewPort->guaranteedUsage); + } else { + pViewPort->guaranteedUsage = *pPossible; + } +} + +/* + * ConstructHwModeTimingsViewPort() - determine the ViewPortOut size + * + * ViewPortIn (specified by inWidth, inHeight) selects the pixels to + * extract from the scanout surface; ViewPortOut positions those + * pixels within the raster timings. + * + * If the configuration is not possible, pViewPort->valid will be set + * to false; otherwise, pViewPort->valid will be set to true. + */ + +static NvBool +ConstructHwModeTimingsViewPort(const NVDispEvoRec *pDispEvo, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut) +{ + NVHwModeViewPortEvoPtr pViewPort = &pTimings->viewPort; + NvU32 outWidth, outHeight; + const NvU32 hVisible = nvEvoVisibleWidth(pTimings); + const NvU32 vVisible = nvEvoVisibleHeight(pTimings); + + /* the ViewPortOut should default to the raster size */ + + outWidth = hVisible; + outHeight = vVisible; + + pViewPort->out.xAdjust = 0; + pViewPort->out.yAdjust = 0; + pViewPort->out.width = outWidth; + pViewPort->out.height = outHeight; + + /* + * If custom viewPortOut or viewPortIn were specified, do basic + * validation and then assign them to pViewPort. We'll do more + * extensive checking of these values as part of IMP. Note that + * pViewPort->out.[xy]Adjust are relative to viewPortOut centered + * within the raster timings, but pViewPortOut->[xy]1 are relative + * to 0,0. + */ + if (pViewPortOut) { + NvS16 offset; + struct NvKmsRect viewPortOut = *pViewPortOut; + + /* + * When converting from user viewport out to hardware raster timings, + * double in the vertical dimension + */ + if (pTimings->doubleScan) { + viewPortOut.y *= 2; + viewPortOut.height *= 2; + } + + /* + * The client-specified viewPortOut is in "full" horizontal space for + * SW YUV420 modes. Convert to "half" horizontal space (matching + * NVHwModeTimingsEvo and viewPortIn). + */ + if (pTimings->yuv420Mode == NV_YUV420_MODE_SW) { + viewPortOut.x /= 2; + viewPortOut.width /= 2; + } + + if (A_plus_B_greater_than_C_U16(viewPortOut.x, + viewPortOut.width, + hVisible)) { + return FALSE; + } + + if (A_plus_B_greater_than_C_U16(viewPortOut.y, + viewPortOut.height, + vVisible)) { + return FALSE; + } + + offset = (hVisible - viewPortOut.width) / 2 * -1; + pViewPort->out.xAdjust = offset + viewPortOut.x; + + offset = (vVisible - viewPortOut.height) / 2 * -1; + pViewPort->out.yAdjust = offset + viewPortOut.y; + + pViewPort->out.width = viewPortOut.width; + pViewPort->out.height = viewPortOut.height; + } + + if (pViewPortSizeIn) { + if (pViewPortSizeIn->width <= 0) { + return FALSE; + } + if (pViewPortSizeIn->height <= 0) { + return FALSE; + } + + pViewPort->in.width = pViewPortSizeIn->width; + pViewPort->in.height = pViewPortSizeIn->height; + } else { + pViewPort->in.width = pViewPort->out.width; + pViewPort->in.height = pViewPort->out.height; + + /* When deriving viewportIn from viewportOut, halve the height for + * doubleScan */ + if (pTimings->doubleScan) { + pViewPort->in.height /= 2; + } + } + + nvAssignDefaultUsageBounds(pDispEvo, &pTimings->viewPort); + + return TRUE; +} + + + +/* + * nvGetDfpProtocol()- determine the protocol to use on the given pDpy + * with the given pTimings; assigns pTimings->protocol. + */ + +NvBool nvGetDfpProtocol(const NVDpyEvoRec *pDpyEvo, + NVHwModeTimingsEvoPtr pTimings) +{ + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + const NvU32 rmProtocol = pConnectorEvo->or.protocol; + enum nvKmsTimingsProtocol timingsProtocol; + + nvAssert(pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP); + + if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + /* Override protocol if this mode requires HDMI FRL. */ + if (pTimings->hdmiFrlConfig.frlRate != HDMI_FRL_DATA_RATE_NONE) { + nvAssert(nvDpyIsHdmiEvo(pDpyEvo)); + nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A || + rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B); + timingsProtocol = NVKMS_PROTOCOL_SOR_HDMI_FRL; + } else { + switch (rmProtocol) { + default: + nvAssert(!"unrecognized SOR RM protocol"); + return FALSE; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) { + return FALSE; + } + timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) { + return FALSE; + } + timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + /* + * Override dual/single link TMDS protocol if necessary. + * XXX might be nice to give a way for users to override the + * SingleLink/DualLink decision. + * + * TMDS_A: "use A side of the link" + * TMDS_B: "use B side of the link" + */ + if (nvDpyRequiresDualLinkEvo(pDpyEvo, pTimings)) { + timingsProtocol = NVKMS_PROTOCOL_SOR_DUAL_TMDS; + } else { + timingsProtocol = NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A; + } + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + timingsProtocol = NVKMS_PROTOCOL_SOR_DP_A; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + timingsProtocol = NVKMS_PROTOCOL_SOR_DP_B; + break; + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM: + timingsProtocol = NVKMS_PROTOCOL_SOR_LVDS_CUSTOM; + break; + } + } + } else if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR) { + nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC); + timingsProtocol = NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC; + } else if (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_DSI) { + nvAssert(rmProtocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI); + timingsProtocol = NVKMS_PROTOCOL_DSI; + } else { + nvAssert(!"Unknown OR type"); + return FALSE; + } + + pTimings->protocol = timingsProtocol; + + return TRUE; + +} + + + +/* + * ConstructHwModeTimingsEvoCrt() - construct EVO hardware timings to + * drive a CRT, given the mode timings in pMt + */ + +static NvBool +ConstructHwModeTimingsEvoCrt(const NVConnectorEvoRec *pConnectorEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvoPtr pTimings, + NVEvoInfoStringPtr pInfoString) +{ + ConstructHwModeTimingsFromNvModeTimings(pModeTimings, pTimings); + + /* assign the protocol; we expect DACs to have RGB protocol */ + + nvAssert(pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT); + + pTimings->protocol = NVKMS_PROTOCOL_DAC_RGB; + + /* assign scaling fields */ + + return ConstructHwModeTimingsViewPort(pConnectorEvo->pDispEvo, pTimings, + pInfoString, pViewPortSizeIn, + pViewPortOut); +} + + +/*! + * Construct EVO hardware timings to drive a digital protocol (TMDS, + * DP, etc). + * + * \param[in] pDpy The display device for which to build timings. + * \param[in] pModeTimings The hw-neutral description of the timings. + * \param[out] pTimings The EVO-specific modetimings. + * + * \return TRUE if the EVO modetimings could be built; FALSE if failure. + */ +static NvBool ConstructHwModeTimingsEvoDfp(const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvoPtr pTimings, + const struct + NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString) +{ + NvBool ret; + + ConstructHwModeTimingsFromNvModeTimings(pModeTimings, pTimings); + + ret = QueryHdmiFrlConfig(pDpyEvo, pParams, + pModeTimings, pTimings, + pInfoString); + + if (!ret) { + return FALSE; + } + + ret = nvGetDfpProtocol(pDpyEvo, pTimings); + + if (!ret) { + return FALSE; + } + + ret = ApplyDualLinkRequirements(pDpyEvo, pParams, pTimings, pInfoString); + + if (!ret) { + return FALSE; + } + + return ConstructHwModeTimingsViewPort(pDpyEvo->pDispEvo, pTimings, + pInfoString, pViewPortSizeIn, + pViewPortOut); +} + +NvBool nvDowngradeHwModeTimingsDpPixelDepthEvo( + NVHwModeTimingsEvoPtr pTimings, + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace) +{ + /* + * In YUV420, HW is programmed with RGB color space and full color range. + * The color space conversion and color range compression happen in a + * headSurface composite shader. + * + * XXX Add support for + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 over DP. + */ + nvAssert(colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444 || + colorSpace == NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB); + + switch (pTimings->pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + /* Cannot downgrade pixelDepth further. */ + return FALSE; + + case NVKMS_PIXEL_DEPTH_24_444: + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_18_444; + break; + case NVKMS_PIXEL_DEPTH_30_444: + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_24_444; + break; + } + + return TRUE; +} + +/* + * nvDPValidateModeEvo() - For DP devices handled by the DP lib, check DP + * bandwidth and pick the best possible/supported pixel depth to use for + * the given mode timings. + */ + +NvBool nvDPValidateModeEvo(NVDpyEvoPtr pDpyEvo, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams *pParams) +{ + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + /* XXX Add support for + * NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422 over DP. */ + const enum NvKmsDpyAttributeCurrentColorSpaceValue colorSpace = + (pTimings->yuv420Mode != NV_YUV420_MODE_NONE) ? + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420 : + NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB; + + /* Only do this for DP devices. */ + if (!nvConnectorUsesDPLib(pConnectorEvo)) { + return TRUE; + } + + if ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK) != 0) { + return TRUE; + } + + nvAssert(nvDpyUsesDPLib(pDpyEvo)); + nvAssert(pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR); + + nvAssert(pTimings->pixelDepth == NVKMS_PIXEL_DEPTH_30_444 || + pTimings->pixelDepth == NVKMS_PIXEL_DEPTH_24_444 || + pTimings->pixelDepth == NVKMS_PIXEL_DEPTH_18_444); + + tryAgain: + + if (!nvDPValidateModeForDpyEvo(pDpyEvo, colorSpace, pParams, pTimings)) { + if (nvDowngradeHwModeTimingsDpPixelDepthEvo(pTimings, colorSpace)) { + goto tryAgain; + } + /* + * Cannot downgrade pixelDepth further -- + * this mode is not possible on this DP link, so fail. + */ + + return FALSE; + } + + return TRUE; +} + + + +/* + * Construct the hardware values to program EVO for the specified + * NVModeTimings + */ + +NvBool nvConstructHwModeTimingsEvo(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvoPtr pTimings, + const struct NvKmsModeValidationParams + *pParams, + NVEvoInfoStringPtr pInfoString) +{ + const NVConnectorEvoRec *pConnectorEvo = pDpyEvo->pConnectorEvo; + NvBool ret; + + /* assign the pTimings values */ + + if (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + ret = ConstructHwModeTimingsEvoDfp(pDpyEvo, + &pKmsMode->timings, + pViewPortSizeIn, pViewPortOut, + pTimings, pParams, pInfoString); + } else if (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + ret = ConstructHwModeTimingsEvoCrt(pConnectorEvo, + &pKmsMode->timings, + pViewPortSizeIn, pViewPortOut, + pTimings, pInfoString); + } else { + nvAssert(!"Invalid pDpyEvo->type"); + return FALSE; + } + + if (!ret) return FALSE; + + /* tweak the raster timings for gsync */ + + if (pDpyEvo->pDispEvo->pFrameLockEvo) { + // if this fails, the timing remains untweaked, which just means + // that the mode may not work well with frame lock + TweakTimingsForGsync(pDpyEvo, pTimings, pInfoString, pParams->stereoMode); + } + + /* Defaults, should match EVO displayClass_02.mfs values for _DEFAULT */ + if (pConnectorEvo->legacyType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_30_444; + } else if (pConnectorEvo->legacyType == + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP) { + + /* TMDS default, also acceptable for DSI */ + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_24_444; + + /* Pick displayport pixel depths for raster timings */ + + if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + + // Start off picking best possible depth based on monitor caps + // If the monitor doesn't have an EDID version 1.4 or higher, assume + // it's 8. + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.input.isDigital && + pDpyEvo->parsedEdid.info.version >= NVT_EDID_VER_1_4) { + if (pDpyEvo->parsedEdid.info.input.u.digital.bpc >= 10) { + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_30_444; + } else if (pDpyEvo->parsedEdid.info.input.u.digital.bpc < 8) { + pTimings->pixelDepth = NVKMS_PIXEL_DEPTH_18_444; + } + } + } + } + + /* + * Clear the NVT_VIDEO_INFOFRAME_CTRL; it will be populated when + * considering EDID modes for the modepool, and used when sending + * the infoframe to an HDMI monitor. + */ + nvkms_memset(&pTimings->infoFrameCtrl, + NVT_INFOFRAME_CTRL_DONTCARE, sizeof(pTimings->infoFrameCtrl)); + + pTimings->stereo.mode = pParams->stereoMode; + pTimings->stereo.isAegis = pDpyEvo->stereo3DVision.isAegis; + + return TRUE; +} + +static NvBool DowngradeViewPortTaps(const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NVEvoScalerTaps srcTaps, + NVEvoScalerTaps dstTaps, + NvBool isVert, + NVEvoScalerTaps *pTaps) +{ + const NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps; + NvBool dstPossible; + + if (isVert) { + dstPossible = IsVTapsPossible(pScalerCaps, pViewPort->in.width, + pViewPort->out.width, dstTaps); + } else { + dstPossible = pScalerCaps->taps[dstTaps].maxHDownscaleFactor > 0; + } + + if (*pTaps >= srcTaps && dstPossible) { + *pTaps = dstTaps; + return TRUE; + } + + return FALSE; +} + +/* Downgrade the htaps from 8 to 5 */ +static NvBool DowngradeViewPortHTaps8(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_8TAPS, + NV_EVO_SCALER_5TAPS, + FALSE /* isVert */, + &pViewPort->hTaps); +} + +/* Downgrade the htaps from 5 to 2 */ +static NvBool DowngradeViewPortHTaps5(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_5TAPS, + NV_EVO_SCALER_2TAPS, + FALSE /* isVert */, + &pViewPort->hTaps); +} + +/* Downgrade the vtaps from 5 to 3 */ +static NvBool DowngradeViewPortVTaps5(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_5TAPS, + NV_EVO_SCALER_3TAPS, + TRUE /* isVert */, + &pViewPort->vTaps); +} + +/* Downgrade the vtaps from 3 to 2 */ +static NvBool DowngradeViewPortVTaps3(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + return DowngradeViewPortTaps(pHeadCaps, + pViewPort, + NV_EVO_SCALER_3TAPS, + NV_EVO_SCALER_2TAPS, + TRUE /* isVert */, + &pViewPort->vTaps); +} + +static NvBool +DowngradeLayerDownscaleFactor(NVHwModeViewPortEvoPtr pViewPort, + const NvU32 layer, + NvU16 srcFactor, + NvU16 dstFactor, + NvU16 *pFactor) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + + if (!pUsage->layer[layer].usable) { + return FALSE; + } + + if (*pFactor == srcFactor) { + *pFactor = dstFactor; + return TRUE; + } + + return FALSE; +} + +static NvBool +DowngradeLayerVDownscaleFactor4X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_4X, + NV_EVO_SCALE_FACTOR_3X, + &pScaling->maxVDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool +DowngradeLayerVDownscaleFactor3X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_3X, + NV_EVO_SCALE_FACTOR_2X, + &pScaling->maxVDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool +DowngradeLayerVDownscaleFactor2X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_2X, + NV_EVO_SCALE_FACTOR_1X, + &pScaling->maxVDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool +DowngradeLayerHDownscaleFactor4X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_4X, + NV_EVO_SCALE_FACTOR_3X, + &pScaling->maxHDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeLayerHDownscaleFactor3X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_3X, + NV_EVO_SCALE_FACTOR_2X, + &pScaling->maxHDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeLayerHDownscaleFactor2X(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pViewPort->guaranteedUsage.layer[layer].scaling; + + if (DowngradeLayerDownscaleFactor(pViewPort, + layer, + NV_EVO_SCALE_FACTOR_2X, + NV_EVO_SCALE_FACTOR_1X, + &pScaling->maxHDownscaleFactor)) { + return TRUE; + } + } + + return FALSE; +} + +/* Downgrade the vtaps from 5 to 2 */ +static NvBool DowngradeLayerVTaps5(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pUsage->layer[layer].scaling; + + if (!pUsage->layer[layer].usable) { + continue; + } + + if (pScaling->vTaps == NV_EVO_SCALER_5TAPS) { + pScaling->vTaps = NV_EVO_SCALER_2TAPS; + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeLayerVUpscaling(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 unused) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + struct NvKmsScalingUsageBounds *pScaling = + &pUsage->layer[layer].scaling; + + if (!pUsage->layer[layer].usable) { + continue; + } + + if (pScaling->vUpscalingAllowed) { + pScaling->vUpscalingAllowed = FALSE; + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeViewPortOverlayFormats( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 removeSurfaceMemoryFormats) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer == NVKMS_MAIN_LAYER || !pUsage->layer[layer].usable) { + continue; + } + + if (pUsage->layer[layer].supportedSurfaceMemoryFormats & + removeSurfaceMemoryFormats) { + pUsage->layer[layer].supportedSurfaceMemoryFormats &= + ~removeSurfaceMemoryFormats; + if (pUsage->layer[layer].supportedSurfaceMemoryFormats == 0) { + pUsage->layer[layer].usable = FALSE; + } + + return TRUE; + } + } + + return FALSE; +} + +static NvBool DowngradeViewPortBaseFormats( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 removeSurfaceMemoryFormats) +{ + struct NvKmsUsageBounds *pUsage = &pViewPort->guaranteedUsage; + + if (!pUsage->layer[NVKMS_MAIN_LAYER].usable) { + return FALSE; + } + + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + removeSurfaceMemoryFormats) { + pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats &= + ~removeSurfaceMemoryFormats; + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats == 0) { + pUsage->layer[NVKMS_MAIN_LAYER].usable = FALSE; + } + + return TRUE; + } + + return FALSE; +} + +typedef NvBool (*DowngradeViewPortFuncPtr)(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVEvoHeadCaps *pHeadCaps, + NVHwModeViewPortEvoPtr pViewPort, + NvU64 removeSurfaceMemoryFormats); + +/* + * Try to downgrade the usage bounds of the viewports, keeping the + * viewports roughly equal in capability; we do this from + * ValidateMetaMode50() when IMP rejects the mode. Return TRUE if we + * were able to downgrade something; return FALSE if there was nothing + * left to downgrade. + */ + +static NvBool DownGradeMetaModeUsageBounds( + const NVDevEvoRec *pDevEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvU32 downgradePossibleHeadsBitMask) +{ + static const struct { + DowngradeViewPortFuncPtr downgradeFunc; + NvU64 removeSurfaceMemoryFormats; + } downgradeFuncs[] = { + { DowngradeLayerVDownscaleFactor4X, + 0 }, + { DowngradeLayerHDownscaleFactor4X, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP }, + { DowngradeLayerVDownscaleFactor3X, + 0 }, + { DowngradeLayerHDownscaleFactor3X, + 0 }, + { DowngradeViewPortVTaps5, + 0 }, + { DowngradeViewPortVTaps3, + 0 }, + { DowngradeViewPortHTaps8, + 0 }, + { DowngradeViewPortHTaps5, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP }, + { DowngradeLayerVTaps5, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP }, + { DowngradeLayerVDownscaleFactor2X, + 0 }, + { DowngradeLayerHDownscaleFactor2X, + 0 }, + { DowngradeLayerVUpscaling, + 0 }, + { DowngradeViewPortOverlayFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP }, + { DowngradeViewPortBaseFormats, + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP }, + }; + int i; + + // XXX assume the heads have equal capabilities + // XXX assume the gpus have equal capabilities + + const NVEvoHeadCaps *pHeadCaps = + &pDevEvo->gpus[0].capabilities.head[0]; + + + for (i = 0; i < ARRAY_LEN(downgradeFuncs); i++) { + int head; + FOR_ALL_HEADS(head, downgradePossibleHeadsBitMask) { + if (timingsParams[head].pTimings == NULL) { + continue; + } + + if (downgradeFuncs[i].downgradeFunc( + pDevEvo, + head, + pHeadCaps, + &timingsParams[head].pTimings->viewPort, + downgradeFuncs[i].removeSurfaceMemoryFormats)) { + return TRUE; + } + } + } + + /* Nothing else to downgrade */ + return FALSE; +} + +NvBool nvAllocateDisplayBandwidth( + NVDispEvoPtr pDispEvo, + NvU32 newIsoBandwidthKBPS, + NvU32 newDramFloorKBPS) +{ + NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS params = { }; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (!pDevEvo->isSOCDisplay) { + return TRUE; + } + + params.subDeviceInstance = 0; + params.averageBandwidthKBPS = newIsoBandwidthKBPS; + params.floorBandwidthKBPS = newDramFloorKBPS; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH, + ¶ms, sizeof(params)); + if (ret != NV_OK) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate %u KBPS Iso and %u KBPS Dram", + newIsoBandwidthKBPS, newDramFloorKBPS); + return FALSE; + } + + pDispEvo->isoBandwidthKBPS = newIsoBandwidthKBPS; + pDispEvo->dramFloorKBPS = newDramFloorKBPS; + + return TRUE; +} + +static void AssignNVEvoIsModePossibleDispInput( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NVEvoIsModePossibleDispInput *pImpInput) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + NvU32 nextSorIndex = 0; + + nvkms_memset(pImpInput, 0, sizeof(*pImpInput)); + + pImpInput->requireBootClocks = requireBootClocks; + pImpInput->reallocBandwidth = reallocBandwidth; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVConnectorEvoRec *pConnectorEvo = + timingsParams[head].pConnectorEvo; + NvU32 otherHead = 0; + + nvAssert((timingsParams[head].pTimings == NULL) == + (timingsParams[head].pConnectorEvo == NULL)); + + pImpInput->head[head].orIndex = NV_INVALID_OR; + + if (timingsParams[head].pTimings == NULL) { + continue; + } + + pImpInput->head[head].pTimings = timingsParams[head].pTimings; + pImpInput->head[head].displayId = timingsParams[head].activeRmId; + pImpInput->head[head].orType = pConnectorEvo->or.type; + pImpInput->head[head].pUsage = timingsParams[head].pUsage; + + if (!NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED) || + pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + + nvAssert(pConnectorEvo->or.mask != 0x0); + + pImpInput->head[head].orIndex = + nvEvoConnectorGetPrimaryOr(pConnectorEvo); + continue; + } + + /* + * If more than one head is attached to the same connector, then make + * sure that all of them use the same SOR index. + */ + for (otherHead = 0; otherHead < head; otherHead++) { + if (timingsParams[otherHead].pConnectorEvo == pConnectorEvo) { + pImpInput->head[head].orIndex = pImpInput->head[otherHead].orIndex; + break; + } + } + + /* + * On GPUs with a full crossbar, the SORs are equally capable, so just + * use next unused SOR. + * + * We assume there are as many SORs as there are heads. + */ + if (pImpInput->head[head].orIndex == NV_INVALID_OR) { + pImpInput->head[head].orIndex = nextSorIndex; + nextSorIndex++; + } + } +} + +/*! + * Validate the described disp configuration through IMP. + + * \param[in] pDispEvo The disp of the dpyIdList. + * + * \param[in.out] timingsParams[] The proposed configuration to use on each head + * includes - + * + * pConnectorEvo - + * The proposed connector to drive on each head. + * + * activeRmId - + * The display ID that we use to talk to RM + * about the dpy(s) on each head. + * + * pTimings - + * The proposed timings to use on each head; + * note the usage bounds within pTimings + * may be altered by this function. + * + * depth - + * The depth of the buffer to be displayed on + * each head. + * \param[in] requireBootClocks + * Only validate modes that will work at P8 + * clocks. + * + * \param[in] reallocBandwidth + * Try to allocate the required display + * bandwidth if IMP passes. + * + * \param[out] pMinIsoBandwidthKBPS + * The ISO bandwidth that's required for the + * proposed disp configuration only. This value + * doesn't take the current display state into + * account. + * + * \param[out] pMinDramFloorKBPS + * The DRAM floor that's required for the + * proposed disp configuration only. This value + * doesn't take the current display state into + * account. + * + * \return Return TRUE if the proposed disp configuration is + * considered valid for IMP purposes. + */ +NvBool nvValidateImpOneDisp( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 *pMinIsoBandwidthKBPS, + NvU32 *pMinDramFloorKBPS) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoIsModePossibleDispInput impInput = { }; + NVEvoIsModePossibleDispOutput impOutput = { }; + NvU32 newIsoBandwidthKBPS, newDramFloorKBPS; + NvBool needToRealloc = FALSE; + + AssignNVEvoIsModePossibleDispInput(pDispEvo, + timingsParams, requireBootClocks, + reallocBandwidth, + &impInput); + + pDevEvo->hal->IsModePossible(pDispEvo, &impInput, &impOutput); + if (!impOutput.possible) { + return FALSE; + } + + switch (reallocBandwidth) { + case NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE: + needToRealloc = (impOutput.minRequiredBandwidthKBPS > pDispEvo->isoBandwidthKBPS) || + (impOutput.floorBandwidthKBPS > pDispEvo->dramFloorKBPS); + newIsoBandwidthKBPS = + NV_MAX(pDispEvo->isoBandwidthKBPS, impOutput.minRequiredBandwidthKBPS); + newDramFloorKBPS = + NV_MAX(pDispEvo->dramFloorKBPS, impOutput.floorBandwidthKBPS); + + break; + case NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST: + needToRealloc = (impOutput.minRequiredBandwidthKBPS != pDispEvo->isoBandwidthKBPS) || + (impOutput.floorBandwidthKBPS != pDispEvo->dramFloorKBPS); + newIsoBandwidthKBPS = impOutput.minRequiredBandwidthKBPS; + newDramFloorKBPS = impOutput.floorBandwidthKBPS; + + break; + case NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE: + default: + break; + } + + if (needToRealloc) { + if (!nvAllocateDisplayBandwidth(pDispEvo, + newIsoBandwidthKBPS, + newDramFloorKBPS)) { + return FALSE; + } + } + + if (pMinIsoBandwidthKBPS != NULL) { + *pMinIsoBandwidthKBPS = impOutput.minRequiredBandwidthKBPS; + } + + if (pMinDramFloorKBPS != NULL) { + *pMinDramFloorKBPS = impOutput.floorBandwidthKBPS; + } + + return TRUE; +} + +NvBool nvValidateImpOneDispDowngrade( + NVDispEvoPtr pDispEvo, + const NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP], + NvBool requireBootClocks, + NVEvoReallocateBandwidthMode reallocBandwidth, + NvU32 downgradePossibleHeadsBitMask) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvBool impPassed = FALSE; + + do { + impPassed = nvValidateImpOneDisp(pDispEvo, + timingsParams, + requireBootClocks, + reallocBandwidth, + NULL /* pMinIsoBandwidthKBPS */, + NULL /* pMinDramFloorKBPS */); + if (impPassed) { + break; + } + } while (DownGradeMetaModeUsageBounds(pDevEvo, timingsParams, + downgradePossibleHeadsBitMask)); + + if (impPassed && !pDevEvo->isSOCDisplay) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (timingsParams[head].pTimings != NULL) { + timingsParams[head].pTimings->viewPort.possibleUsage = + timingsParams[head].pTimings->viewPort.guaranteedUsage; + } + } + } + + return impPassed; +} + +/* + * Return TRUE iff this display can be configured as a framelock + * server given the current modetimings/framelock configuration, FALSE + * o.w. + */ + +NvBool nvFrameLockServerPossibleEvo(const NVDpyEvoRec *pDpyEvo) +{ + + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + NV_EVO_ADD_FRAME_LOCK_SERVER, + NULL); +} + +/* + * Return TRUE iff this display can be configured as a framelock client + * given the current modetimings/framelock configuration, FALSE o.w. + */ + +NvBool nvFrameLockClientPossibleEvo(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, + NV_EVO_ADD_FRAME_LOCK_CLIENT, + NULL); +} + + +/* + * FrameLockSli() - Helper function for nvEvoRefFrameLockSli() and + * nvEvoUnRefFrameLockSli(), which are hooked into the EVO locking state + * machine via custom rules. This function will find the GPU acting as the + * given GPU's SLI primary and perform the NV_EVO_{ADD,REM}_FRAMELOCK_REF action + * to increment or decrement the refcount on that GPU. + * If queryOnly, it also figures out which displays to pass into the EVO state + * machine; otherwise, it passes NULLs to perform a query without affecting + * state. + */ + +static NvBool FrameLockSli(NVDevEvoPtr pDevEvo, + NvU32 action, + NvBool queryOnly) +{ + RasterLockTopology *topos; + NVEvoSubDevPtr pEvoSubDev; + NVDispEvoPtr pDispEvo; + unsigned int numTopos; + + topos = GetRasterLockTopologies(pDevEvo, &numTopos); + if (!topos) { + return FALSE; + } + + nvAssert(numTopos == 1); + if (numTopos != 1) { + nvFree(topos); + return FALSE; + } + + /* Want to be framelock server */ + pDispEvo = topos[0].pDispEvoOrder[0]; + + nvFree(topos); + + if (!pDispEvo) { + return FALSE; + } + + nvAssert(pDevEvo == pDispEvo->pDevEvo); + + pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + + if (queryOnly) { + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, NULL); + } else { + NvU32 pHeads[NVKMS_MAX_HEADS_PER_DISP + 1]; + NvU32 i = 0; + NvU32 head; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (nvHeadIsActive(pDispEvo, head)) { + pHeads[i++] = head; + } + } + nvAssert(i > 0 && i <= NVKMS_MAX_HEADS_PER_DISP); + pHeads[i] = NV_INVALID_HEAD; + + return pEvoSubDev->scanLockState(pDispEvo, pEvoSubDev, action, + pHeads); + } +} + + +/* + * nvEvoRefFrameLockSli() - Attempt to set up framelock on the GPU's SLI + * primary. Hooked into EVO state machine via custom rules. + * If pHeads is NULL, only perform a query. + */ + +NvBool nvEvoRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + return FrameLockSli(pDispEvo->pDevEvo, NV_EVO_ADD_FRAME_LOCK_REF, + pHeads == NULL); + +} /* nvEvoRefFrameLockSli */ + + +/* + * nvEvoUnRefFrameLockSli() - Attempt to clean up framelock on the GPU's SLI + * primary. Hooked into EVO state machine via custom rules. + * If pHeads is NULL, only perform a query. + */ + +NvBool nvEvoUnRefFrameLockSli(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + return FrameLockSli(pDispEvo->pDevEvo, NV_EVO_REM_FRAME_LOCK_REF, + pHeads == NULL); + +} /* nvEvoUnRefFrameLockSli */ + + +/* + * GetRasterLockPin() - Ask RM which lockpin to use in order to configure GPU0 + * be a server or client of GPU1, where GPUn is represented by the duple + * (pDispn, headn) (or NV_EVO_LOCK_PIN_ERROR if the two cannot be locked). + */ +static void GetRasterLockPin(NVDispEvoPtr pDispEvo0, NvU32 head0, + NVDispEvoPtr pDispEvo1, NvU32 head1, + NVEvoLockPin *serverPin, NVEvoLockPin *clientPin) +{ + NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS params = { }; + NvU32 displayHandle0 = pDispEvo0->pDevEvo->displayHandle; + NvU32 displayHandle1 = pDispEvo1->pDevEvo->displayHandle; + NvU32 ret; + + params.base.subdeviceIndex = pDispEvo0->displayOwner; + params.head = head0; + + params.peer.hDisplay = displayHandle1; + params.peer.subdeviceIndex = pDispEvo1->displayOwner; + params.peer.head = head1; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + displayHandle0, + NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo0, EVO_LOG_ERROR, + "stateless lockpin query failed; ret: 0x%x", ret); + if (serverPin) *serverPin = NV_EVO_LOCK_PIN_ERROR; + if (clientPin) *clientPin = NV_EVO_LOCK_PIN_ERROR; + return; + } + + if (serverPin) { + if (FLD_TEST_DRF(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _MASTER_SCAN_LOCK_CONNECTED, _NO, + params.masterScanLock)) { + *serverPin = NV_EVO_LOCK_PIN_ERROR; + } else { + int pin = DRF_VAL(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _MASTER_SCAN_LOCK_PIN, + params.masterScanLock); + *serverPin = NV_EVO_LOCK_PIN_0 + pin; + } + } + + if (clientPin) { + if (FLD_TEST_DRF(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _SLAVE_SCAN_LOCK_CONNECTED, _NO, + params.slaveScanLock)) { + *clientPin = NV_EVO_LOCK_PIN_ERROR; + } else { + int pin = DRF_VAL(5070, _CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_STATELESS, + _SLAVE_SCAN_LOCK_PIN, + params.slaveScanLock); + *clientPin = NV_EVO_LOCK_PIN_0 + pin; + } + } +} /* GetRasterLockPin */ + +static NvU32 +UpdateLUTTimer(NVDispEvoPtr pDispEvo, const NvU32 head, NvBool baseLutEnabled, + NvBool outputLutEnabled) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + const int numLUTs = ARRAY_LEN(pDevEvo->lut.head[head].LUT); + + if (!pDevEvo->hal->IsCompNotifierComplete(pDispEvo, + LUTNotifierForHead(head))) { + // If the notifier is still pending, then the previous update is still + // pending and further LUT changes should continue to go into the third + // buffer. Reschedule the timer for another 10 ms. + return 10; + } + + // Update the current LUT index and kick off an update. + pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex++; + pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex %= numLUTs; + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled = baseLutEnabled; + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled = outputLutEnabled; + + nvEvoUpdateCurrentPalette(pDispEvo, head, TRUE); + + // Return 0 to cancel the timer. + return 0; +} + +static void UpdateLUTTimerNVKMS(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + const NvU32 head = DRF_VAL(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _HEAD, + dataU32); + const NvBool baseLutEnabled = FLD_TEST_DRF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, + _BASE_LUT, _ENABLE, dataU32); + const NvBool outputLutEnabled = FLD_TEST_DRF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, + _OUTPUT_LUT, _ENABLE, dataU32); + NvU32 ret = UpdateLUTTimer(pDispEvo, head, baseLutEnabled, + outputLutEnabled); + + if (ret != 0) { + ScheduleLutUpdate(pDispEvo, head, dataU32, ret * 1000); + } +} + +static void ScheduleLutUpdate(NVDispEvoRec *pDispEvo, + const NvU32 head, const NvU32 data, + const NvU64 usec) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + /* Cancel previous update */ + nvCancelLutUpdateEvo(pDispEvo, head); + + /* schedule a new timer */ + pDevEvo->lut.head[head].disp[pDispEvo->displayOwner].updateTimer = + nvkms_alloc_timer(UpdateLUTTimerNVKMS, + pDispEvo, data, + usec); +} + +/* + * The gamma ramp, if specified, has a 16-bit range. Convert it to EVO's 14-bit + * shifted range and zero out the low 3 bits for bug 813188. + */ +static inline NvU16 GammaToEvo(NvU16 gamma) +{ + return ((gamma >> 2) & ~7) + 24576; +} + +static NVEvoLutDataRec *GetNewLutBuffer( + const NVDispEvoRec *pDispEvo, + NvU32 head, + const struct NvKmsSetLutCommonParams *pParams) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoLutDataRec *pLUTBuffer = NULL; + + // XXX NVKMS TODO: If only input or output are specified and the other one + // is enabled in the hardware, this will zero out the one not specified. In + // practice it isn't a problem today because the X driver always specifies + // both, but we should fix this once we start always using the base channel, + // where we have a separate base LUT ctxdma. + // + // This is also a problem if a partial update of the input LUT is attempted + // (i.e. start != 0 or end != numberOfLutEntries-1). + // + // Filed bug: 2042919 to track removing this TODO. + + pLUTBuffer = nvCalloc(1, sizeof(*pLUTBuffer)); + + if (pLUTBuffer == NULL) { + goto done; + } + + if (pParams->input.specified && pParams->input.end != 0) { + const struct NvKmsLutRamps *pRamps = + nvKmsNvU64ToPointer(pParams->input.pRamps); + const NvU16 *red = pRamps->red; + const NvU16 *green = pRamps->green; + const NvU16 *blue = pRamps->blue; + + nvAssert(pRamps != NULL); + + // Update our shadow copy of the LUT. + pDevEvo->hal->FillLUTSurface(pLUTBuffer->base, + red, green, blue, + pParams->input.end + 1, + pParams->input.depth); + } + + if (pParams->output.specified && pParams->output.enabled) { + const struct NvKmsLutRamps *pRamps = + nvKmsNvU64ToPointer(pParams->output.pRamps); + int i; + + nvAssert(pRamps != NULL); + + if (pDevEvo->hal->caps.hasUnorm16OLUT) { + for (i = 0; i < 1024; i++) { + // Copy the client's 16-bit ramp directly to the LUT buffer. + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Red = pRamps->red[i]; + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Green = pRamps->green[i]; + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + i].Blue = pRamps->blue[i]; + } + + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + 1024] = + pLUTBuffer->output[NV_LUT_VSS_HEADER_SIZE + 1023]; + } else { + for (i = 0; i < 1024; i++) { + // Convert from the client's 16-bit range to the EVO 14-bit shifted + // range. + pLUTBuffer->output[i].Red = GammaToEvo(pRamps->red[i]); + pLUTBuffer->output[i].Green = GammaToEvo(pRamps->green[i]); + pLUTBuffer->output[i].Blue = GammaToEvo(pRamps->blue[i]); + } + + pLUTBuffer->output[1024] = pLUTBuffer->output[1023]; + } + } + + /* fall through */ + +done: + return pLUTBuffer; +} + + +/* + * Update the head's LUT with the given colors. + * + * The color LUT is triple-buffered. + * + * curLUTIndex indicates the buffer currently being updated. What the other + * two buffers are used for depends on whether the previous update has + * completed. If not (case 1): + * curLUTIndex + 1 (mod 3): currently being displayed + * curLUTIndex + 2 (mod 3): will be displayed at next vblank + * If so (case 2): + * curLUTIndex + 1 (mod 3): unused + * curLUTIndex + 2 (mod 3): currently being displayed + * + * In case 1, just update the current buffer and kick off a timer to submit the + * update from i+2 to i. If more LUT changes come in before the first update + * happens, kill the timer and start a new one. + * + * In case 2, kill the timer if it still hasn't gone off, update buffer i, and + * kick off an update. No new timer needs to be scheduled. + */ + +void nvEvoSetLut(NVDispEvoPtr pDispEvo, NvU32 head, NvBool kickoff, + const struct NvKmsSetLutCommonParams *pParams) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const int dispIndex = pDispEvo->displayOwner; + const int curLUT = pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex; + const NvBool waitForPreviousUpdate = + pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate; + const int numLUTs = ARRAY_LEN(pDevEvo->lut.head[head].LUT); + const int lutToFill = (curLUT + 1) % numLUTs; + NVLutSurfaceEvoPtr pSurfEvo = pDevEvo->lut.head[head].LUT[lutToFill]; + NvBool baseLutEnabled = + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled ; + NvBool outputLutEnabled = + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled; + + if (!pParams->input.specified && !pParams->output.specified) { + return; + } + + if (pParams->input.specified) { + baseLutEnabled = (pParams->input.end != 0); + } + + if (pParams->output.specified) { + outputLutEnabled = pParams->output.enabled; + } + + nvAssert(pSurfEvo != NULL); + + if ((pParams->input.specified && pParams->input.end != 0) || + (pParams->output.specified && pParams->output.enabled)) { + NVEvoLutDataRec *pLUTBuffer = GetNewLutBuffer(pDispEvo, head, pParams); + + if (pLUTBuffer == NULL) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "LUT Allocation failure; skipping LUT update"); + return; + } + + // Fill in the new LUT buffer. + nvUploadDataToLutSurfaceEvo(pSurfEvo, pLUTBuffer, pDispEvo); + + nvFree(pLUTBuffer); + } + + /* Kill a pending timer */ + nvCancelLutUpdateEvo(pDispEvo, head); + + if (!kickoff) { + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled = baseLutEnabled; + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled = outputLutEnabled; + pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex = lutToFill; + return; + } + + // See if we can just fill the next LUT buffer and kick off an update now. + // We can do that if this is the very first update, or if the previous + // update is complete, or if we need to guarantee that this update + // is synchronous. + NvBool previousUpdateComplete = + pDevEvo->hal->IsCompNotifierComplete(pDispEvo, + LUTNotifierForHead(head)); + if (!waitForPreviousUpdate || previousUpdateComplete || + pParams->synchronous) { + // Kick off an update now. + pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex = lutToFill; + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled = baseLutEnabled; + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled = outputLutEnabled; + nvEvoUpdateCurrentPalette(pDispEvo, head, TRUE); + + // If this LUT update is synchronous, then sync before returning. + if (pParams->synchronous && + pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate) { + + pDevEvo->hal->WaitForCompNotifier(pDispEvo, + LUTNotifierForHead(head)); + pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate = + FALSE; + } + } else { + // Schedule a timer to kick off an update later. + // XXX 5 ms is a guess. We could probably look at this pDpy's refresh + // rate to come up with a more reasonable estimate. + NvU32 dataU32 = DRF_NUM(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _HEAD, head); + + nvAssert((head & ~0xff) == 0); + + if (baseLutEnabled) { + dataU32 |= DRF_DEF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _BASE_LUT, + _ENABLE); + } + + if (outputLutEnabled) { + dataU32 |= DRF_DEF(UPDATE_LUT_TIMER_NVKMS, _DATAU32, _OUTPUT_LUT, + _ENABLE); + } + + ScheduleLutUpdate(pDispEvo, head, dataU32, 5 * 1000); + } +} + +NvBool nvValidateSetLutCommonParams( + const NVDevEvoRec *pDevEvo, + const struct NvKmsSetLutCommonParams *pParams) +{ + NvU32 maxSize = 0; + + if (pParams->output.specified && pParams->output.enabled) { + if (pParams->output.pRamps == 0) { + return FALSE; + } + } + + if (!pParams->input.specified || pParams->input.end == 0) { + return TRUE; + } + + if (pParams->input.pRamps == 0) { + return FALSE; + } + + switch (pParams->input.depth) { + case 8: maxSize = 256; break; + case 15: maxSize = 32; break; + case 16: maxSize = 64; break; + case 24: maxSize = 256; break; + case 30: maxSize = 1024; break; + default: return FALSE; + } + + nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE); + nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE); + nvAssert(maxSize <= NVKMS_LUT_ARRAY_SIZE); + + /* Currently, the implementation assumes start==0. */ + if (pParams->input.start != 0) { + return FALSE; + } + + if (pParams->input.end >= maxSize) { + return FALSE; + } + + return TRUE; +} + +static NvU32 GetSwapLockoutWindowUs(NVDispEvoPtr pDispEvo) +{ + NV30F1_CTRL_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW_PARAMS params = { 0 }; + NvU32 ret; + + nvAssert(pDispEvo->pFrameLockEvo != NULL); + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDispEvo->pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SWAP_LOCK_WINDOW failed"); + } + + return params.tSwapRdyHi; +} + +static NvU32 CalculateSwapLockoutStartP2060(NVDispEvoPtr pDispEvo, + const NvU32 head, + const NvU32 tSwapRdyHiUs) +{ + const NVHwModeTimingsEvo *pTimings; + + nvAssert(head != NV_INVALID_HEAD); + nvAssert(nvHeadIsActive(pDispEvo, head)); + + pTimings = &pDispEvo->headState[head].timings; + + /* + * SWAP_LOCKOUT_START = Vtotal * TswapRdyHi * Refresh_Rate + * + * = Vtotal * TswapRdyHi * (pclk / Refresh_Rate) + * = Vtotal * TswapRdyHi * (pclk / (Votal * Htotal)) + * = Vtotal * TswapRdyHi * (pclk / (Votal * Htotal)) + * = TswapRdyHi * (pclk / Htotal) + * = TswapRdyHiUs * 1e-6 * pclk / Htotal + * = TswapRdyHiUs * pclk / (Htotal * 1000000) + * = TswapRdyHiUs * (pclkKhz * 1000) / (Htotal * 1000000) + * = TswapRdyHiUs * pclkKhz / (Htotal * 1000) + * + * Since SWAP_LOCKOUT_START must be higher than LSR_MIN_TIME, round this + * result up to the nearest integer. + */ + + return NV_ROUNDUP_DIV(tSwapRdyHiUs * pTimings->pixelClock, + pTimings->rasterSize.x * 1000); +} + +/** + * Override the swap lockout start value on heads on this pDisp, or restore the + * default value. + * + * This is called before (with isPre == TRUE) and after (with isPre == FALSE) + * swap barriers are enabled on the G-Sync board. In order to satisfy certain + * timing criteria, we need to set a special value for SWAP_LOCKOUT_START for + * the duration of swap barriers being enabled. + */ +void nvSetSwapBarrierNotifyEvo(NVDispEvoPtr pDispEvo, + NvBool enable, NvBool isPre) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 tSwapRdyHiUs = 0; + NvU32 head; + + if ((isPre && !enable) || (!isPre && enable)) { + return; + } + + if (enable) { + tSwapRdyHiUs = GetSwapLockoutWindowUs(pDispEvo); + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS params = { }; + NvU32 ret; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + params.maxSwapLockoutSkew = + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_MAX_SWAP_LOCKOUT_SKEW_INIT; + + if (enable) { + params.swapLockoutStart = + CalculateSwapLockoutStartP2060(pDispEvo, head, tSwapRdyHiUs); + } else { + params.swapLockoutStart = + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_SWAP_LOCKOUT_START_INIT; + } + + params.head = head; + + params.base.subdeviceIndex = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP failed"); + } + } +} + +/*! + * Release a reference to a pDevEvo + * + * If the refcount of the device drops to 0, this frees the device. + * + * \return TRUE if the device was freed, FALSE otherwise. + */ +NvBool nvFreeDevEvo(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo == NULL) { + return FALSE; + } + + pDevEvo->allocRefCnt--; + + if (pDevEvo->allocRefCnt > 0) { + return FALSE; + } + + if (pDevEvo->pNvKmsOpenDev != NULL) { + /* + * DP-MST allows to attach more than one heads/stream to single DP + * connector, and there is no way to convey that DP-MST configuration to + * next driver load; therefore disallow DP-MST. + */ + nvEvoRestoreConsole(pDevEvo, FALSE /* allowMST */); + + nvEvoUnregisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, + pDevEvo->fbConsoleSurfaceHandle, + TRUE /* skipUpdate */); + pDevEvo->fbConsoleSurfaceHandle = 0; + } + + nvFreeCoreChannelEvo(pDevEvo); + + nvTeardownHdmiLibrary(pDevEvo); + + nvFreePerOpenDev(nvEvoGlobal.nvKmsPerOpen, pDevEvo->pNvKmsOpenDev); + + nvFreeFrameLocksEvo(pDevEvo); + + if (pDevEvo->hal) { + pDevEvo->hal->FreeRmCtrlObject(pDevEvo); + } + + nvRmDestroyDisplays(pDevEvo); + + nvkms_free_timer(pDevEvo->consoleRestoreTimer); + pDevEvo->consoleRestoreTimer = NULL; + + nvPreallocFree(pDevEvo); + + nvRmFreeDeviceEvo(pDevEvo); + + nvListDel(&pDevEvo->devListEntry); + + nvkms_free_ref_ptr(pDevEvo->ref_ptr); + + nvFree(pDevEvo); + return TRUE; +} + +NVDevEvoPtr nvAllocDevEvo(const struct NvKmsAllocDeviceRequest *pRequest, + enum NvKmsAllocDeviceStatus *pStatus) +{ + NVDevEvoPtr pDevEvo = NULL; + enum NvKmsAllocDeviceStatus status = + NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + NvU32 i; + + nvAssert(nvFindDevEvoByDeviceId(pRequest->deviceId) == NULL); + + pDevEvo = nvCalloc(1, sizeof(*pDevEvo)); + + if (pDevEvo == NULL) { + goto done; + } + + pDevEvo->allocRefCnt = 1; + + pDevEvo->gpuLogIndex = NV_INVALID_GPU_LOG_INDEX; + + pDevEvo->gc6Allowed = TRUE; + + nvListAppend(&pDevEvo->devListEntry, &nvEvoGlobal.devList); + + pDevEvo->ref_ptr = nvkms_alloc_ref_ptr(pDevEvo); + if (!pDevEvo->ref_ptr) { + goto done; + } + + for (i = 0; i < ARRAY_LEN(pDevEvo->openedGpuIds); i++) { + pDevEvo->openedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + } + + for (i = 0; i < ARRAY_LEN(pDevEvo->headForWindow); i++) { + pDevEvo->headForWindow[i] = NV_INVALID_HEAD; + } + + if (!nvRmAllocDeviceEvo(pDevEvo, pRequest)) { + goto done; + } + + status = nvAssignEvoCaps(pDevEvo); + + if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + goto done; + } + + if (!nvPreallocAlloc(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + /* + * Copy the registry keys from the alloc device request to the device. + * + * This needs to be set before nvRmAllocDisplays, because nvRmAllocDisplays + * will initialize DP lib which may read registry keys that we want to + * allow clients to override. + */ + ct_assert(ARRAY_LEN(pRequest->registryKeys) == + ARRAY_LEN(pDevEvo->registryKeys)); + ct_assert(ARRAY_LEN(pRequest->registryKeys[0].name) == + ARRAY_LEN(pDevEvo->registryKeys[0].name)); + + for (i = 0; i < ARRAY_LEN(pRequest->registryKeys); i++) { + const size_t nameLen = sizeof(pDevEvo->registryKeys[i].name); + nvkms_memcpy(pDevEvo->registryKeys[i].name, + pRequest->registryKeys[i].name, + nameLen); + pDevEvo->registryKeys[i].name[nameLen - 1] = '\0'; + pDevEvo->registryKeys[i].value = pRequest->registryKeys[i].value; + } + + status = nvRmAllocDisplays(pDevEvo); + + if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + goto done; + } + + nvAllocFrameLocksEvo(pDevEvo); + + if (!pDevEvo->hal->AllocRmCtrlObject(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + if (!nvAllocCoreChannelEvo(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_CORE_CHANNEL_ALLOC_FAILED; + goto done; + } + + pDevEvo->pNvKmsOpenDev = nvAllocPerOpenDev(nvEvoGlobal.nvKmsPerOpen, + pDevEvo, TRUE /* isPrivileged */); + if (!pDevEvo->pNvKmsOpenDev) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + + /* + * Import the framebuffer console, if there is one, + * as a surface we can flip to. + */ + nvRmImportFbConsoleMemory(pDevEvo); + + /* + * This check must be placed after nvAllocCoreChannelEvo() since it depends + * on the HW capabilities that are read in that function. + */ + if (!ValidateConnectorTypes(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + if (!nvInitHdmiLibrary(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + goto done; + } + + nvRmMuxInit(pDevEvo); + + status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + + /* fall through */ + +done: + if (status != NVKMS_ALLOC_DEVICE_STATUS_SUCCESS) { + nvFreeDevEvo(pDevEvo); + pDevEvo = NULL; + } + + *pStatus = status; + + return pDevEvo; +} + + +// How long before we time out waiting for lock? +// In microseconds. +#define LOCK_TIMEOUT 5000000 + +// +// EvoWaitForLock() +// Wait for raster or flip lock to complete +// Note that we use pDev and subdevice here instead of pDisp since this is used +// per-subdev in SLI (including the pDispEvo->numSubDevices > 1 case). +// +static NvBool EvoWaitForLock(NVDevEvoPtr pDevEvo, + NvU32 sd, NvU32 head, NvU32 type) +{ + NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS status = { }; + NvU32 ret; + NvU64 startTime = 0; + + nvAssert(type == EVO_RASTER_LOCK || type == EVO_FLIP_LOCK); + + if ((type == EVO_FLIP_LOCK) && + !pDevEvo->hal->caps.supportsFlipLockRGStatus) { + return TRUE; + } + + status.head = head; + status.base.subdeviceIndex = sd; + status.scanLocked = NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_NO; + status.flipLocked = NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_NO; + + // Just keep looping until we get what we want. + do { + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_RG_STATUS, + &status, + sizeof(status)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to read SLI lock status"); + return FALSE; + } + + if ((type == EVO_RASTER_LOCK) && + (status.scanLocked == + NV5070_CTRL_CMD_GET_RG_STATUS_SCANLOCKED_YES)) { + break; + } + if ((type == EVO_FLIP_LOCK) && + (status.flipLocked == + NV5070_CTRL_CMD_GET_RG_STATUS_FLIPLOCKED_YES)) { + break; + } + + if (nvExceedsTimeoutUSec(&startTime, LOCK_TIMEOUT)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "SLI lock timeout exceeded (type %d)", type); + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + // Once we've exited from the various loops above, we should be locked + // as requested. + return TRUE; +} + +// +// EvoUpdateHeadParams() +// Send GPUs HeadParams updates; accounts for SLI. +// +static void EvoUpdateHeadParams(const NVDispEvoRec *pDispEvo, NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + nvPushEvoSubDevMaskDisp(pDispEvo); + + pDevEvo->hal->SetHeadControl(pDevEvo, pDispEvo->displayOwner, head, updateState); + + nvPopEvoSubDevMask(pDevEvo); +} + +// +// nvReadCRC32Evo() +// Returns the last CRC32 value +NvBool nvReadCRC32Evo(NVDispEvoPtr pDispEvo, NvU32 head, + CRC32NotifierCrcOut *crcOut) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVHwModeTimingsEvo *pTimings = &pHeadState->timings; + NVEvoDmaPtr dma = NULL; + NVConnectorEvoPtr pConnectorEvo = NULL; + NVEvoUpdateState updateState = { }; + NvU32 numCRC32 = 0; + NvBool res = TRUE; + NvBool found = FALSE; + NvU32 ret; + + // Look up the head connector + nvListForEachEntry(pConnectorEvo, + &pDispEvo->connectorList, + connectorListEntry) { + NvU32 activeHeadMask = + nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + if (activeHeadMask & NVBIT(head)) { + found = TRUE; + break; + } + } + + if (!found) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to find active connector for head %d", head); + return FALSE; + } + + // Allocate a temporary DMA notifier + dma = nvCalloc(1, sizeof(NVEvoDma)); + if ((dma == NULL) || + !nvRmAllocEvoDma(pDevEvo, + dma, + NV_DMA_EVO_NOTIFIER_SIZE - 1, + DRF_DEF(OS03, _FLAGS, _TYPE, _NOTIFIER), + 1 << pDispEvo->displayOwner)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "CRC32 notifier DMA allocation failed"); + nvFree(dma); + return FALSE; + } + + // Bind the CRC32 notifier ctxDma + ret = nvRmEvoBindDispContextDMA(pDevEvo, pDevEvo->core, dma->ctxHandle); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to bind display engine CRC32 notify context " + "DMA: 0x%x (%s)", ret, nvstatusToString(ret)); + res = FALSE; + goto done; + } + + // Only set up the actual output for SLI primary. + nvPushEvoSubDevMask(pDevEvo, 1 << pDispEvo->displayOwner); + + /* CRC notifiers are similar to completion notifiers, but work slightly + * different: + * + * 1. In order to start CRC generation for a head, we need to: + * + * - Point an EVO head at a block of memory with + * HEAD_SET_CONTEXT_DMA_CRC(head) + * + * - Program the CRC control with HEAD_SET_CRC_CONTROL(head) to select + * what output we want to capture CRC values from, and kicking off a + * core channel update (this already generates a CRC value for the + * last scanout buffer) + * + * ----> hal->StartCRC32Capture() + * + * 2. From 1) on, a new CRC value is generated per vblank and written to + * an incrementing entry in the CRC notifier. With pre-nvdisplay chips, + * a CRC notifier can hold up to 256 entries. Once filled up, new CRC + * values are discarded. Either case, we are only interested in the + * last CRC32 value. + * + * 3. In order to stop CRC generation, we need to perform the inverse + * operation of 1): + * + * - Program the CRC control with HEAD_SET_CRC_CONTROL(head) to + * unselect all outputs we were capturing CRC values from. + * + * - Unset the CRC context DMA with HEAD_SET_CONTEXT_DMA_CRC(head) + * + * ----> hal->StopCRC32Capture() + * + * 4. From 3) on, it is safe to wait for the CRC notifier and query all + * entries. + * + * ----> hal->QueryCRC32() + */ + pDevEvo->hal->StartCRC32Capture(pDevEvo, + dma, + pConnectorEvo, + pTimings->protocol, + nvEvoConnectorGetPrimaryOr(pConnectorEvo), + head, + pDispEvo->displayOwner, + &updateState); + + // This update should generate one CRC value. + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE /* releaseElv */); + + pDevEvo->hal->StopCRC32Capture(pDevEvo, + head, + &updateState); + + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE /* releaseElv */); + + if (!pDevEvo->hal->QueryCRC32(pDevEvo, + dma, + pDispEvo->displayOwner, + 1, + crcOut, + &numCRC32) || + (numCRC32 == 0)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to query all CRC32 values"); + } + + nvPopEvoSubDevMask(pDevEvo); + +done: + // Clean-up + nvRmFreeEvoDma(pDevEvo, dma); + nvFree(dma); + + return res; +} + +NvU32 nvGetActiveSorMask(const NVDispEvoRec *pDispEvo) +{ + NvU32 activeSorMask = 0; + NvU32 head; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVConnectorEvoPtr pConnectorEvo = + pDispEvo->headState[head].pConnectorEvo; + + if (pConnectorEvo != NULL && + pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + nvAssert(pConnectorEvo->or.mask != 0x0); + + activeSorMask |= pConnectorEvo->or.mask; + } + } + + return activeSorMask; +} + +NvBool nvEvoPollForNoMethodPending(NVDevEvoPtr pDevEvo, + const NvU32 sd, + NVEvoChannelPtr pChannel, + NvU64 *pStartTime, + const NvU32 timeout) +{ + do + { + NvBool isMethodPending = TRUE; + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pChannel, + sd, + &isMethodPending) && !isMethodPending) { + break; + } + + if (nvExceedsTimeoutUSec(pStartTime, timeout)) { + return FALSE; + } + + nvkms_yield(); + } while (TRUE); + + return TRUE; +} + +static NvU32 SetSORFlushMode(NVDevEvoPtr pDevEvo, + NvU32 sorNumber, + NvU32 headMask, + NvBool enable) +{ + NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS params = { }; + + params.base.subdeviceIndex = 0; + params.sorNumber = sorNumber; + params.headMask = headMask; + params.bEnable = enable; + + return nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_SOR_FLUSH_MODE, + ¶ms, sizeof(params)); +} + +static void DPSerializerLinkTrain(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo, + NvBool enableLink, + NvBool reTrain) +{ + const NvU32 displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + const NvU32 sorNumber = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + NvBool force = NV_FALSE; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + /* + * The NV0073_CTRL_DP_DATA_SET_{LANE_COUNT, LINK_BW} defines are the same + * as the actual DPCD values. As such, we can directly assign the + * dpSerializerCaps here. + */ + NvBool isMST = pConnectorEvo->dpSerializerCaps.supportsMST; + NvU32 linkBW = pConnectorEvo->dpSerializerCaps.maxLinkBW; + NvU32 laneCount = pConnectorEvo->dpSerializerCaps.maxLaneCount; + + nvAssert(nvConnectorIsDPSerializer(pConnectorEvo)); + + if (sorNumber == NV_INVALID_OR) { + return; + } + + if (reTrain) { + if (!pConnectorEvo->dpSerializerEnabled) { + nvEvoLogDev(pDevEvo, EVO_LOG_INFO, + "Received expected HPD_IRQ during serializer shutdown"); + return; + } + } else if (enableLink) { + pConnectorEvo->dpSerializerEnabled = NV_TRUE; + } else { + linkBW = 0; + laneCount = NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0; + pConnectorEvo->dpSerializerEnabled = NV_FALSE; + } + + if (isMST) { + NvU32 dpcdData = 0; + + dpcdData = FLD_SET_DRF(_DPCD, _MSTM_CTRL, _EN, _YES, dpcdData); + dpcdData = + FLD_SET_DRF(_DPCD, _MSTM_CTRL, _UPSTREAM_IS_SRC, _YES, dpcdData); + if (!nvWriteDPCDReg(pConnectorEvo, NV_DPCD_MSTM_CTRL, dpcdData)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to enable MST DPCD"); + return; + } + } + + /* + * We cannot perform link training while the OR has an attached head + * since we would be changing the OR clocks and link frequency while + * it's actively encoding pixels, and this could lead to FIFO overflow/ + * underflow issues. Instead, the recommended, safe sequence is to enter + * flush mode first, re-train the link, and exit flush mode after. + */ + if (reTrain) { + if (SetSORFlushMode(pDevEvo, sorNumber, headMask, NV_TRUE) != + NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to enter flush mode"); + return; + } + } + + do { + NvU32 dpCtrlData = 0; + NvU32 dpCtrlCmd = 0; + NV0073_CTRL_DP_CTRL_PARAMS dpCtrlParams = { }; + + dpCtrlCmd = DRF_DEF(0073_CTRL, _DP_CMD, _SET_LANE_COUNT, _TRUE) | + DRF_DEF(0073_CTRL, _DP_CMD, _SET_LINK_BW, _TRUE) | + DRF_DEF(0073_CTRL, _DP_CMD, _SET_ENHANCED_FRAMING, _TRUE); + + if (isMST) { + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _SET_FORMAT_MODE, _MULTI_STREAM); + } + + if (force) { + dpCtrlCmd |= DRF_DEF(0073_CTRL, _DP_CMD, _FAKE_LINK_TRAINING, _DONOT_TOGGLE_TRANSMISSION); + } + + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LINK_BW, + linkBW, dpCtrlData); + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _SET_LANE_COUNT, + laneCount, dpCtrlData); + dpCtrlData = FLD_SET_DRF_NUM(0073_CTRL, _DP_DATA, _TARGET, + NV0073_CTRL_DP_DATA_TARGET_SINK, + dpCtrlData); + + dpCtrlParams.subDeviceInstance = pDispEvo->displayOwner; + dpCtrlParams.displayId = displayId; + dpCtrlParams.cmd = dpCtrlCmd; + dpCtrlParams.data = dpCtrlData; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_CTRL, + &dpCtrlParams, sizeof(dpCtrlParams)) == NVOS_STATUS_SUCCESS) { + break; + } + + if (force) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Fake link training failed"); + break; + } + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Link training failed"); + + /* + * XXX Force the link config on the GPU side to avoid hanging the display + * pipe during modeset. Eventually, we need to figure out how to deal + * with/report these kinds of LT failures. + */ + force = NV_TRUE; + + } while (NV_TRUE); + + if (reTrain) { + if (SetSORFlushMode(pDevEvo, sorNumber, headMask, NV_FALSE) != + NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to exit flush mode"); + } + } +} + +void nvDPSerializerHandleDPIRQ(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo) +{ + DPSerializerLinkTrain(pDispEvo, pConnectorEvo, + NV_TRUE /* enableLink */, + NV_TRUE /* reTrain */); +} + +void nvDPSerializerPreSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo) +{ + const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + if (!pConnectorEvo->dpSerializerEnabled && (headMask != 0)) { + DPSerializerLinkTrain(pDispEvo, pConnectorEvo, + NV_TRUE /* enableLink */, + NV_FALSE /* reTrain */); + } +} + +void nvDPSerializerPostSetMode(NVDispEvoPtr pDispEvo, + NVConnectorEvoPtr pConnectorEvo) +{ + const NvU32 headMask = nvConnectorGetAttachedHeadMaskEvo(pConnectorEvo); + + if (pConnectorEvo->dpSerializerEnabled && (headMask == 0)) { + DPSerializerLinkTrain(pDispEvo, pConnectorEvo, + NV_FALSE /* enableLink */, + NV_FALSE /* reTrain */); + } +} diff --git a/src/nvidia-modeset/src/nvkms-evo1.c b/src/nvidia-modeset/src/nvkms-evo1.c new file mode 100644 index 000000000..3037e5494 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-evo1.c @@ -0,0 +1,539 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains implementations of the EVO HAL methods for display class + * 1.x, found in the Tesla and Fermi 1 (GF10x) chips. + */ + +#include "nvkms-types.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-evo1.h" +#include "nvkms-prealloc.h" +#include "nvkms-utils.h" + +#include // NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS + +/*! + * Initialize head-independent IMP param fields. + * + * Initializes an NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS structure. + * IMP users should call this once, followed by per-head calls to + * AssignPerHeadImpParams(). + * + * \param pImp[in] A pointer to a param structure. + */ +static void InitImpParams(NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp) +{ + int i; + + nvkms_memset(pImp, 0, sizeof(*pImp)); + + /* Initialize to not possible. */ + pImp->IsPossible = NV5070_CTRL_CMD_IS_MODE_POSSIBLE_IS_POSSIBLE_NO; + + /* Set all heads to inactive. */ + for (i = 0; i < NV5070_CTRL_CMD_MAX_HEADS; i++) { + pImp->Head[i].HeadActive = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_HEAD_ACTIVE_NO; + } + + /* Set all ORs to no owner. */ + for (i = 0; i < NV5070_CTRL_CMD_MAX_DACS; i++) { + pImp->Dac[i].owner = NV5070_CTRL_CMD_OR_OWNER_NONE; + } + + pImp->bUseSorOwnerMask = TRUE; + for (i = 0; i < NV5070_CTRL_CMD_MAX_SORS; i++) { + pImp->Sor[i].ownerMask = NV5070_CTRL_CMD_SOR_OWNER_MASK_NONE; + } + + for (i = 0; i < NV5070_CTRL_CMD_MAX_PIORS; i++) { + pImp->Pior[i].owner = NV5070_CTRL_CMD_OR_OWNER_NONE; + } +} + +/*! + * Initialize head-specific IMP param fields. + * + * Initialize the portion of the NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS + * structure that applies to a specific head, and the OR driven by + * that head. + * + * The param structure should be initialized by InitImpParams() + * before calling this per-head function. + * + * \param[out] pImp The param structure to initialize. + * \param[in] pTimings The rastering timings and viewport configuration. + * \param[in] pUsage The usage bounds that will be used for this head. + * \param[in] head The number of the head that will be driven. + * \param[in] orNumber The number of the OR driven by the head. + * \param[in] orType The type of the OR driven by the head. + */ +static void AssignPerHeadImpParams(const NVDevEvoRec *pDevEvo, + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp, + const NVHwModeTimingsEvo *pTimings, + const struct NvKmsUsageBounds *pUsage, + const int head, + const int orNumber, + const int orType) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + NvU64 overlayFormats = 0; + NvU32 protocol; + + nvkms_memset(&pImp->Head[head], 0, sizeof(pImp->Head[head])); + + nvAssert(head < NV5070_CTRL_CMD_MAX_HEADS); + pImp->Head[head].HeadActive = TRUE; + + nvAssert(orType == NV0073_CTRL_SPECIFIC_OR_TYPE_NONE || + orNumber != NV_INVALID_OR); + + /* raster timings */ + + pImp->Head[head].PixelClock.Frequency = pTimings->pixelClock; + + pImp->Head[head].PixelClock.Adj1000Div1001 = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PIXEL_CLOCK_ADJ1000DIV1001_NO; + + pImp->Head[head].RasterSize.Width = pTimings->rasterSize.x; + pImp->Head[head].RasterSize.Height = pTimings->rasterSize.y; + pImp->Head[head].RasterBlankStart.X = pTimings->rasterBlankStart.x; + pImp->Head[head].RasterBlankStart.Y = pTimings->rasterBlankStart.y; + pImp->Head[head].RasterBlankEnd.X = pTimings->rasterBlankEnd.x; + pImp->Head[head].RasterBlankEnd.Y = pTimings->rasterBlankEnd.y; + pImp->Head[head].RasterVertBlank2.YStart = pTimings->rasterVertBlank2Start; + pImp->Head[head].RasterVertBlank2.YEnd = pTimings->rasterVertBlank2End; + pImp->Head[head].Control.Structure = + pTimings->interlaced ? + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_INTERLACED : + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_CONTROL_STRUCTURE_PROGRESSIVE; + + if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_DAC) { + nvAssert(orNumber < ARRAY_LEN(pImp->Dac)); + nvAssert(pImp->Dac[orNumber].owner == NV5070_CTRL_CMD_OR_OWNER_NONE); + pImp->Dac[orNumber].owner = NV5070_CTRL_CMD_OR_OWNER_HEAD(head); + nvAssert(pTimings->protocol == NVKMS_PROTOCOL_DAC_RGB); + pImp->Dac[orNumber].protocol = NV5070_CTRL_CMD_DAC_PROTOCOL_RGB_CRT; + } else if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + nvAssert(orNumber < ARRAY_LEN(pImp->Sor)); + pImp->Sor[orNumber].ownerMask |= NV5070_CTRL_CMD_SOR_OWNER_MASK_HEAD(head); + switch (pTimings->protocol) { + default: + nvAssert(!"Unknown protocol"); + /* fall through */ + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + protocol = NV5070_CTRL_CMD_SOR_PROTOCOL_DP_B; + break; + } + pImp->Sor[orNumber].protocol = protocol; + pImp->Sor[orNumber].pixelReplicateMode = + NV5070_CTRL_IS_MODE_POSSIBLE_PIXEL_REPLICATE_MODE_OFF; + } else if (orType == NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR) { + nvAssert(orNumber < ARRAY_LEN(pImp->Pior)); + nvAssert(pImp->Pior[orNumber].owner == NV5070_CTRL_CMD_OR_OWNER_NONE); + pImp->Pior[orNumber].owner = NV5070_CTRL_CMD_OR_OWNER_HEAD(head); + switch (pTimings->protocol) { + default: + nvAssert(!"Unknown protocol"); + /* fall through */ + case NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC: + protocol = NV5070_CTRL_CMD_PIOR_PROTOCOL_EXT_TMDS_ENC; + break; + } + pImp->Pior[orNumber].protocol = protocol; + } else { + nvAssert(orType == NV0073_CTRL_SPECIFIC_OR_TYPE_NONE); + } + + /* viewport out */ + + pImp->Head[head].OutputScaler.VerticalTaps = + NVEvoScalerTapsToNum(pViewPort->vTaps); + + pImp->Head[head].OutputScaler.HorizontalTaps = + NVEvoScalerTapsToNum(pViewPort->hTaps); + + pImp->Head[head].ViewportSizeOut.Width = pViewPort->out.width; + pImp->Head[head].ViewportSizeOut.Height = pViewPort->out.height; + + pImp->Head[head].ViewportSizeOutMin.Width = + pImp->Head[head].ViewportSizeOut.Width; + + pImp->Head[head].ViewportSizeOutMin.Height = + pImp->Head[head].ViewportSizeOut.Height; + + pImp->Head[head].ViewportSizeOutMax.Width = + pImp->Head[head].ViewportSizeOut.Width; + + pImp->Head[head].ViewportSizeOutMax.Height = + pImp->Head[head].ViewportSizeOut.Height; + + /* viewport in */ + + pImp->Head[head].ViewportSizeIn.Width = pViewPort->in.width; + pImp->Head[head].ViewportSizeIn.Height = pViewPort->in.height; + + /* + * The actual format doesn't really matter, since RM just + * converts it back to bits per pixel for its IMP calculation anyway. The + * hardware doesn't have a "usage bound" for core -- changing the format + * of the core surface will always incur a supervisor interrupt and rerun + * IMP (XXX if we change the core surface as part of a flip to one of a + * different depth, should we force the pre/post IMP update path?). + * + * EVO2 hal uses surfaces of the same format in the core and base channels, + * see needToReprogramCoreSurface() in nvkms-evo2.c. + */ + if (pUsage->layer[NVKMS_MAIN_LAYER].usable) { + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_R5G6B5; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_I8; + } else { /* default to RGB 4BPP */ + nvAssert(!"Unknown core format"); + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8; + } + } else { + pImp->Head[head].Params.Format = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_FORMAT_A8R8G8B8; + } + + pImp->Head[head].Params.SuperSample = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS_SUPER_SAMPLE_X1AA; + + /* base usage bounds */ + + if (pUsage->layer[NVKMS_MAIN_LAYER].usable) { + pImp->Head[head].BaseUsageBounds.Usable = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_YES; + + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_32; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_16; + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_8; + } else { /* default to RGB 8BPP */ + nvAssert(!"Unknown base channel usage bound format"); + pImp->Head[head].BaseUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_PIXEL_DEPTH_64; + } + + pImp->Head[head].BaseUsageBounds.SuperSample = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_SUPER_SAMPLE_X1AA; + } else { + pImp->Head[head].BaseUsageBounds.Usable = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_BASE_USAGE_BOUNDS_USABLE_NO; + } + + /* overlay usage bounds */ + + pImp->Head[head].OverlayUsageBounds.Usable = + pUsage->layer[NVKMS_OVERLAY_LAYER].usable + ? NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_YES + : NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_USABLE_NO; + + overlayFormats = pUsage->layer[NVKMS_OVERLAY_LAYER].usable ? + pUsage->layer[NVKMS_OVERLAY_LAYER].supportedSurfaceMemoryFormats : + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP; + + if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImp->Head[head].OverlayUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32; + } else if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImp->Head[head].OverlayUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_16; + } else { + nvAssert(!"Unknown overlay channel usage bound format"); + pImp->Head[head].OverlayUsageBounds.PixelDepth = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_OVERLAY_USAGE_BOUNDS_PIXEL_DEPTH_32; + } + + /* pixel depth */ + + switch (pTimings->pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444; + break; + case NVKMS_PIXEL_DEPTH_24_444: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + break; + case NVKMS_PIXEL_DEPTH_30_444: + pImp->Head[head].outputResourcePixelDepthBPP = + NV5070_CTRL_IS_MODE_POSSIBLE_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444; + break; + } +} + +void nvEvo1IsModePossible(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVEvoIsModePossibleDispOutput *pOutput) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp)); + NvBool result = FALSE; + NvU32 head; + NvU32 ret; + + InitImpParams(pImp); + + pImp->RequestedOperation = + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_QUERY; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (pInput->head[head].pTimings == NULL) { + continue; + } + + AssignPerHeadImpParams(pDevEvo, pImp, + pInput->head[head].pTimings, + pInput->head[head].pUsage, + head, + pInput->head[head].orIndex, + pInput->head[head].orType); + } + + pImp->base.subdeviceIndex = pDispEvo->displayOwner; + + if (pInput->requireBootClocks) { + // XXX TODO: IMP requires lock pin information if pstate information is + // requested. For now, just assume no locking. + pImp->MinPState = NV5070_CTRL_IS_MODE_POSSIBLE_NEED_MIN_PSTATE; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + pImp->Head[head].displayId[0] = pInput->head[head].displayId; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_IS_MODE_POSSIBLE, + pImp, sizeof(*pImp)); + + if (ret != NV_OK || !pImp->IsPossible || + (pInput->requireBootClocks && + // P8 = "boot clocks" + (pImp->MinPState < NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_P8 && + // XXX TODO: With PStates 3.0, only a "v-pstate" is returned in + // impParams.minPerfLevel. We need to correlate that with "boot + // clocks" somehow. + pImp->MinPState != NV5070_CTRL_IS_MODE_POSSIBLE_PSTATES_UNDEFINED))) { + goto done; + } + + result = TRUE; + +done: + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS); + pOutput->possible = result; +} + +void nvEvo1PrePostIMP(NVDispEvoPtr pDispEvo, NvBool isPre) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS *pImp = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp)); + NvU32 ret; + + if (isPre) { + /* + * Sync the core channel for pre-modeset IMP to ensure that the state + * cache reflects all of the methods we've pushed + */ + ret = nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + if (!ret) { + nvAssert(!"nvRMSyncEvoChannel failed during PreModesetIMP"); + } + } + + nvkms_memset(pImp, 0, sizeof(*pImp)); + + pImp->RequestedOperation = isPre ? + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_PRE_MODESET_USE_SC : + NV5070_CTRL_CMD_IS_MODE_POSSIBLE_REQUESTED_OPERATION_POST_MODESET_USE_SC; + + pImp->base.subdeviceIndex = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_IS_MODE_POSSIBLE, + pImp, sizeof(*pImp)); + if ((ret != NVOS_STATUS_SUCCESS) || !pImp->IsPossible) { + nvAssert(!"NV5070_CTRL_CMD_IS_MODE_POSSIBLE failed"); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS); +} + +/*! + * Return the value to use for HEAD_SET_STORAGE_PITCH. + * + * Per dispClass_02.mfs, the HEAD_SET_STORAGE_PITCH "units are blocks + * if the layout is BLOCKLINEAR, the units are multiples of 256 bytes + * if the layout is PITCH." + * + * \return Returns 0 if the pitch is invalid. Otherwise returns the + * HEAD_SET_STORAGE_PITCH value. + */ +NvU32 nvEvoGetHeadSetStoragePitchValue(const NVDevEvoRec *pDevEvo, + enum NvKmsSurfaceMemoryLayout layout, + NvU32 pitch) +{ + if (layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + /* pitch is already in units of blocks; nothing else needed. */ + } else { + /* pitch is in units of bytes, and must be aligned to 0x100. */ + if ((pitch & 0xFF) != 0) { + return 0; + } + + pitch >>= 8; + } + + if (pitch > pDevEvo->caps.maxPitchValue) { + return 0; + } + + return pitch; +} + +static NvBool GetChannelState(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvU32 *result) +{ + NV5070_CTRL_CMD_GET_CHANNEL_INFO_PARAMS info = { }; + NvU32 ret; + + info.base.subdeviceIndex = sd; + info.channelClass = pChan->hwclass; + info.channelInstance = pChan->instance; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_CHANNEL_INFO, + &info, sizeof(info)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query display engine channel state: 0x%08x:%d:%d:0x%08x", + pChan->hwclass, pChan->instance, sd, ret); + return FALSE; + } + + *result = info.channelState; + + return TRUE; +} + +NvBool nvEvo1IsChannelIdle(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvU32 channelState; + + if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) { + return FALSE; + } + + *result = (channelState == NV5070_CTRL_GET_CHANNEL_INFO_STATE_IDLE); + + return TRUE; +} + +/* + * Result is false if an EVO channel is either one of NO_METHOD_PENDING or + * UNCONNECTED, true o.w. + * + * NO_METHOD_PENDING is a mask for EMPTY | WRTIDLE | IDLE. + * + * If NVKMS hasn't grabbed the channel, it can be seen as UNCONNECTED. + */ +NvBool nvEvo1IsChannelMethodPending(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvU32 channelState; + + if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) { + return FALSE; + } + + *result = !(channelState & + (NV5070_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING | + NV5070_CTRL_GET_CHANNEL_INFO_STATE_UNCONNECTED)); + + return TRUE; +} + +void nvEvo1SetDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings) +{ + nvAssert(!pTimings->dpDsc.enable); +} diff --git a/src/nvidia-modeset/src/nvkms-evo2.c b/src/nvidia-modeset/src/nvkms-evo2.c new file mode 100644 index 000000000..3be05733d --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-evo2.c @@ -0,0 +1,3849 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains implementations of the EVO HAL methods for display class + * 2.x. + */ + +#include "nvkms-dma.h" +#include "nvkms-types.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" + +#include "nvkms-evo.h" +#include "nvkms-evo1.h" + +#include + +#include // NV5070_NOTIFICATION_STATUS + +#include // NV917C_BASE_CHANNEL_DMA +#include // GK104DispOverlayImmControlPio +#include // NV917C_SET_PRESENT_CONTROL_STEREO_FLIP_MODE +#include // NV917D_HEAD_SET_HDMI_CTRL +#include // NV917E_OVERLAY_CHANNEL_DMA +#include // NV917C_SET_SPARE_{PRE,POST}_UPDATE_TRAP + +#include // NV917D_CORE_CHANNEL_DMA +#include // NV917D_NOTIFIER_CRC +#include // NV927D_CORE_CHANNEL_DMA +#include // NV977D_CORE_CHANNEL_DMA +#include // NV947D_CORE_CHANNEL_DMA +#include + +#include // NV5070_CTRL_CMD_STOP_BASE_PARAMS + +ct_assert(NV_EVO_LOCK_PIN_0 > + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1); +ct_assert(NV_EVO_LOCK_PIN_0 > + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_FLIP_LOCK__SIZE_1); + +/** Number of CRCs supported by hardware on NV917D hardware (Comp and SF/SOR) */ +#define NV_EVO2_NUM_CRC_FIELDS 2 + +/** Flags read from CRCNotifier on NV917D hardware (Comp, SF/SOR Ovf and count) */ +#define NV_EVO2_NUM_CRC_FLAGS 3 + +#define NV_EVO2_SUPPORTED_DITHERING_MODES \ + ((1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL)) + +#define NV_EVO2_SUPPORTED_CURSOR_COMP_BLEND_MODES \ + ((1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA)) + +static void +EvoSetCursorImage(NVDevEvoPtr pDevEvo, + const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams); + +static void +EvoPushSetLUTContextDmaMethodsForOneSd(NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const NvU32 ctxdma, + NvBool enableBaseLut, + const NvBool enableOutputLut, + NVEvoUpdateState *updateState); +static void +EvoPushUpdateComposition(NVDevEvoPtr pDevEvo, + const int head, + const NVFlipChannelEvoHwState *pBaseHwState, + const NVFlipChannelEvoHwState *pOverlayHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + +static void InitChannelCaps90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel) +{ + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + static const NVEvoChannelCaps OverlayCaps = { + /* + * Overlay supports timestamp flips on class 9x7e, but error checks + * that it doesn't exceed 61 bits. + */ + .validTimeStampBits = 61, + /* The size of the legacy overlay notifier format. */ + .legacyNotifierFormatSizeBytes = NV_DISP_NOTIFICATION_2_SIZEOF, + /* Overlay does not support tearing/immediate flips. */ + .tearingFlips = FALSE, + .vrrTearingFlips = FALSE, + /* Overlay does not support per-eye stereo flips. */ + .perEyeStereoFlips = FALSE, + }; + + pChannel->caps = OverlayCaps; + } + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + static const NVEvoChannelCaps BaseCaps = { + /* + * Base supports timestamp flips on class 9x7c, but error checks + * that it doesn't exceed 61 bits. + */ + .validTimeStampBits = 61, + /* The size of the legacy base format. */ + .legacyNotifierFormatSizeBytes = NV_DISP_BASE_NOTIFIER_1_SIZEOF, + /* Base supports tearing/immediate flips. */ + .tearingFlips = TRUE, + /* Some 9x7c classes support VRR; may be overridden at runtime. */ + .vrrTearingFlips = FALSE, + /* Base supports per-eye stereo flips. */ + .perEyeStereoFlips = TRUE, + }; + + pChannel->caps = BaseCaps; + + /* Base supports VRR tearing flips for class 917c and up. */ + if (pChannel->hwclass >= NV917C_BASE_CHANNEL_DMA) { + pChannel->caps.vrrTearingFlips = TRUE; + } + } +} + +static void EvoInitChannel90(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + + InitChannelCaps90(pDevEvo, pChannel); + + /* Set up core channel state. */ + if (isCore) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_DEFAULT_BASE_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_DEFAULT_BASE_COLOR, _RED, 0) | + DRF_NUM(917D, _HEAD_SET_DEFAULT_BASE_COLOR, _GREEN, 0) | + DRF_NUM(917D, _HEAD_SET_DEFAULT_BASE_COLOR, _BLUE, 0)); + } + } + + /* Set up base channel state. */ + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + NvU32 head = NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDevEvo->pSubDevices[sd]->baseComp[head].initialized = FALSE; + } + + // For now we only support USE_CORE_LUT mode, but sending this method every + // flip causes an error check to fire for tearing flips even if the LUT mode + // isn't changing. So instead, program it here. ApplyBaseFlipOverrides() + // will force the first flip to be non-tearing. + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_BASE_LUT_LO, 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(917C, _SET_BASE_LUT_LO, _ENABLE, + _USE_CORE_LUT)); + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_OUTPUT_LUT_LO, 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(917C, _SET_OUTPUT_LUT_LO, _ENABLE, + _USE_CORE_LUT)); + } + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + NvU32 head = NV_EVO_CHANNEL_MASK_OVERLAY_HEAD_NUMBER(pChannel->channelMask); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDevEvo->pSubDevices[sd]->overlayComp[head].initialized = FALSE; + } + } +} + +static void EvoInitWindowMapping90(const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + /* Fixed window mapping on EVO 2 -- nothing to do. */ +} + +/* + * These values are the same between all overlay + * (7E_SURFACE_SET_PARAMS_FORMAT_) EVO classes. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 EvoOverlayFormatFromKmsFormat91(enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatI8: + return 0; + case NvKmsSurfaceMemoryFormatR5G6B5: + return 0; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + return NV917E_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + return NV917E_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + return NV917E_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10; + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + return NV917E_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + case NvKmsSurfaceMemoryFormatR16G16B16A16: + return NV917E_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return 0; + } + + return 0; +} + +static void EvoSetRasterParams90(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 overscanColor = + DRF_NUM(917D, _HEAD_SET_OVERSCAN_COLOR, _RED, pOverscanColor->red) | + DRF_NUM(917D, _HEAD_SET_OVERSCAN_COLOR, _GRN, pOverscanColor->green) | + DRF_NUM(917D, _HEAD_SET_OVERSCAN_COLOR, _BLU, pOverscanColor->blue); + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // XXX[AGP]: These methods are sequential and could use an incrementing + // method, but it's not clear if there's a bug in EVO that causes corruption + // sometimes. Play it safe and send methods with count=1. + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OVERSCAN_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, overscanColor); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_SIZE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_SIZE, _WIDTH, pTimings->rasterSize.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_SIZE, _HEIGHT, pTimings->rasterSize.y)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_SYNC_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_SYNC_END, _X, pTimings->rasterSyncEnd.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_SYNC_END, _Y, pTimings->rasterSyncEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_BLANK_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_END, _X, pTimings->rasterBlankEnd.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_END, _Y, pTimings->rasterBlankEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_BLANK_START(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_START, _X, pTimings->rasterBlankStart.x) | + DRF_NUM(917D, _HEAD_SET_RASTER_BLANK_START, _Y, pTimings->rasterBlankStart.y)); + + if (pTimings->interlaced) { + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_RASTER_VERT_BLANK2(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_RASTER_VERT_BLANK2, _YSTART, + pTimings->rasterVertBlank2Start) | + DRF_NUM(917D, _HEAD_SET_RASTER_VERT_BLANK2, _YEND, + pTimings->rasterVertBlank2End)); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _HERTZ, + pTimings->pixelClock * 1000) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _MODE, _CLK_CUSTOM) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _NOT_DRIVER, _FALSE) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _ENABLE_HOPPING, _FALSE) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING_MODE, _VBLANK)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _HERTZ, + pTimings->pixelClock * 1000) | + DRF_DEF(917D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _ADJ1000DIV1001,_FALSE)); +} + +/* + * Wrapper for EvoSetRasterParams90 which additionally sends the HDMI 3D + * control methods. + */ +static void EvoSetRasterParams91(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 hdmiStereoCtrl = + DRF_DEF(917D, _HEAD_SET_HDMI_CTRL, _STEREO3D_STRUCTURE, _FRAME_PACKED) | + DRF_NUM(917D, _HEAD_SET_HDMI_CTRL, _HDMI_VIC, 0); + + EvoSetRasterParams90(pDevEvo, head, + pTimings, + pOverscanColor, updateState); + + if (pTimings->hdmi3D) { + hdmiStereoCtrl |= + DRF_DEF(917D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _STEREO3D); + } else { + hdmiStereoCtrl |= + DRF_DEF(917D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _NORMAL); + } + + nvDmaSetStartEvoMethod(pChannel, + NV917D_HEAD_SET_VACTIVE_SPACE_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _RED_CR, 0) | +#if defined(DEBUG) + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _GRN_Y, 512) | +#else + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _GRN_Y, 0) | +#endif + DRF_NUM(917D, _HEAD_SET_VACTIVE_SPACE_COLOR, _BLU_CB, 0)); + + nvDmaSetStartEvoMethod(pChannel, + NV917D_HEAD_SET_HDMI_CTRL(head), 1); + nvDmaSetEvoMethodData(pChannel, hdmiStereoCtrl); +} + +static void EvoSetProcAmp90(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU32 dynRange; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // These NVT defines match the HEAD_SET_PROCAMP ones. + ct_assert(NVT_COLORIMETRY_RGB == NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB); + ct_assert(NVT_COLORIMETRY_YUV_601 == NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601); + ct_assert(NVT_COLORIMETRY_YUV_709 == NV917D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709); + ct_assert(NVT_COLOR_RANGE_FULL == NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE); + ct_assert(NVT_COLOR_RANGE_LIMITED == NV917D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE); + + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(917D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(917D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PROCAMP, _COLOR_SPACE, + pHeadState->procAmp.colorimetry) | + DRF_DEF(917D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _AUTO) | + DRF_NUM(917D, _HEAD_SET_PROCAMP, _SAT_COS, + pHeadState->procAmp.satCos) | + DRF_NUM(917D, _HEAD_SET_PROCAMP, _SAT_SINE, 0) | + dynRange | + DRF_NUM(917D, _HEAD_SET_PROCAMP, _RANGE_COMPRESSION, + pHeadState->procAmp.colorRange)); +} + +static void EvoSetHeadControl90(NVDevEvoPtr pDevEvo, int sd, int head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + /* + * NOTE: This function should only push state to the hardware based on data + * in the pHC. If not, then we may miss updates due to the memcmp of the + * HeadControl structure in UpdateEvoLockState(). + */ + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + NvU32 data = 0, pin; + NvU32 serverLockMode, clientLockMode; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pHC->serverLock) { + case NV_EVO_NO_LOCK: + serverLockMode = NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + serverLockMode = NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + serverLockMode = NV917D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid server lock mode"); + return; + } + + switch (pHC->clientLock) { + case NV_EVO_NO_LOCK: + clientLockMode = NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + clientLockMode = NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + clientLockMode = NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid client lock mode"); + return; + } + + // Convert head control state to EVO method values. + if (pHC->interlaced) { + data |= DRF_DEF(917D, _HEAD_SET_CONTROL, _STRUCTURE, _INTERLACED); + } else { + data |= DRF_DEF(917D, _HEAD_SET_CONTROL, _STRUCTURE, _PROGRESSIVE); + } + + nvAssert(pHC->serverLockPin != NV_EVO_LOCK_PIN_ERROR); + nvAssert(pHC->clientLockPin != NV_EVO_LOCK_PIN_ERROR); + nvAssert(pHC->flipLockPin != NV_EVO_LOCK_PIN_ERROR); + + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->serverLockPin)) { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + /* + * dispClass_02.mfs says: + * "master lock pin, if internal, must be set to the corresponding + * internal pin for that head" (error check #12) + * (Note that this is only enforced when scanlock master is enabled) + */ + nvAssert(pHC->serverLock == NV_EVO_NO_LOCK || pin == head); + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _MASTER_LOCK_MODE, serverLockMode); + + if (clientLockMode == NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK) { + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_UNSPECIFIED); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->clientLockPin)) { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCK_MODE, clientLockMode); + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _SLAVE_LOCKOUT_WINDOW, + pHC->clientLockoutWindow); + + /* + * Interlaced with stereo lock mode is not supported. + * + * We always enable stereo lock when it's available and either framelock + * or rasterlock is in use. + */ + if (pHC->stereoLocked) { + nvAssert(!pHC->interlaced); + + if (pHC->serverLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(927D, _HEAD_SET_CONTROL, _MASTER_STEREO_LOCK_MODE, + NV927D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE); + } + if (pHC->clientLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(927D, _HEAD_SET_CONTROL, _SLAVE_STEREO_LOCK_MODE, + NV927D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE); + } + } + + /* + * Changing the flip lock pin induces a head shutdown. We want to avoid + * this in two cases: + * + * 1) When transitioning from the UEFI console, the flip lock pin is + * currently set to UNSPECIFIED, so we want to preserve that setting + * if possible to avoid an unnecessary flicker. + * + * 2) While framelock is enabled, we need to avoid head shutdown when + * transitioning to and from fliplock to guarantee no loss of stereo + * sync. + * + * To guarantee stereo sync while also avoiding unnecessary flicker when + * transitioning from UEFI, we'll set the flip lock pin to UNSPECIFIED + * unless fliplock, frame lock, or raster lock are enabled. Enabling + * framelock may induce one head shutdown when transitioning away from + * UNSPECIFIED, but then enabling/disabling fliplock after that will + * have no effect on the fliplock pin. + */ + if (!pHC->flipLock && + (pHC->serverLock == NV_EVO_NO_LOCK) && + (pHC->clientLock == NV_EVO_NO_LOCK)) { + + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _FLIP_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_UNSPECIFIED); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->flipLockPin)) { + pin = pHC->flipLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _FLIP_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK(pin)); + } else { + pin = pHC->flipLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _FLIP_LOCK_PIN, + NV917D_HEAD_SET_CONTROL_FLIP_LOCK_PIN_LOCK_PIN(pin)); + } + if (pHC->flipLock) { + data |= DRF_DEF(917D, _HEAD_SET_CONTROL, _FLIP_LOCK, _ENABLE); + } + + nvAssert(pHC->stereoPin != NV_EVO_LOCK_PIN_ERROR); + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin)) { + /* + * dispClass_02.mfs says: + * "stereo pin, if internal, must be set to the corresponding internal + * pin for that head" (error check #14) + * So just ignore which pin we selected; no sense in wasting cycles + * keeping track of it + */ + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _STEREO_PIN, + NV917D_HEAD_SET_CONTROL_STEREO_PIN_INTERNAL_SCAN_LOCK(head)); + } else { + pin = pHC->stereoPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(917D, _HEAD_SET_CONTROL, _STEREO_PIN, + NV917D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(pin)); + } + + // Send the method. + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, data); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_LOCK_CHAIN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(917D, _HEAD_SET_LOCK_CHAIN, _POSITION, + pHC->lockChainPosition)); +} + +static void EvoSetHeadRefClk90(NVDevEvoPtr pDevEvo, int head, NvBool external, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_SW_SPARE_A(head), 1); + nvDmaSetEvoMethodData(pChannel, external ? + DRF_DEF(907D, _HEAD_SET_SW_SPARE_A_CODE, _VPLL_REF, _GSYNC) : + DRF_DEF(907D, _HEAD_SET_SW_SPARE_A_CODE, _VPLL_REF, _NO_PREF)); +} + +static void EvoDACSetControl90(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_DAC_RGB); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_DAC_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _DAC_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_DEF(917D, _DAC_SET_CONTROL, _PROTOCOL, _RGB_CRT)); +} + +static void EvoSORSetControl90(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 hwProtocol = 0; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + nvAssert(orIndex != NV_INVALID_OR); + + if (headMask != 0) { + switch (protocol) { + default: + nvAssert(!"unexpected protocol"); + /* fallthrough */ + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + hwProtocol = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B; + break; + } + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_SOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _SOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_NUM(917D, _SOR_SET_CONTROL, _PROTOCOL, hwProtocol) | + DRF_DEF(917D, _SOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE) | + DRF_DEF(917D, _SOR_SET_CONTROL, _PIXEL_REPLICATE_MODE, _OFF)); +} + +static void EvoPIORSetControl90(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_PIOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _PIOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_DEF(917D, _PIOR_SET_CONTROL, _PROTOCOL, _EXT_TMDS_ENC) | + DRF_DEF(917D, _PIOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE)); +} + +static NvU32 EvoGetPixelDepth90(const enum nvKmsPixelDepth pixelDepth) +{ + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444; + case NVKMS_PIXEL_DEPTH_24_444: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + case NVKMS_PIXEL_DEPTH_30_444: + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444; + } + nvAssert(!"Unexpected pixel depth"); + return NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; +} + +static void EvoHeadSetControlOR90(NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState) +{ + const NvU32 hwPixelDepth = EvoGetPixelDepth90(pTimings->pixelDepth); + const NvU16 colorSpaceFlag = nvEvo1GetColorSpaceFlag(pDevEvo, + colorSpaceOverride); + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _CRC_MODE, _ACTIVE_RASTER) | + (pTimings->hSyncPol ? + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _POSITIVE_TRUE)) | + (pTimings->vSyncPol ? + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _POSITIVE_TRUE)) | + (colorSpaceOverride ? + (DRF_DEF(977D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _ENABLE) | + DRF_NUM(977D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_FLAG, colorSpaceFlag)) : + DRF_DEF(977D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _DISABLE)) | + DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _PIXEL_DEPTH, hwPixelDepth)); +} + +static void EvoORSetControl90(NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + EvoDACSetControl90(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + EvoSORSetControl90(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + EvoPIORSetControl90(pConnectorEvo, protocol, orIndex, headMask); + break; + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } +} + +static void EvoHeadSetDisplayId90(NVDevEvoPtr pDevEvo, + const NvU32 head, const NvU32 displayId, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_DISPLAY_ID(head, 0), 1); + nvDmaSetEvoMethodData(pChannel, displayId); +} + +static NvBool EvoSetUsageBounds90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVEvoSubDevHeadStateRec *pCurrentFlipState = + &pDevEvo->gpus[sd].headState[head]; + const struct NvKmsUsageBounds *pCurrentUsage = + &pCurrentFlipState->usage; + NvU64 overlayFormats = 0; + NvU32 baseUsage = 0, overlayUsage = 0; + const NVSurfaceEvoRec *pCurrentBaseSurf = + pCurrentFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + const NVSurfaceEvoRec *pCurrentOverlaySurf = + pCurrentFlipState->layer[NVKMS_OVERLAY_LAYER].pSurfaceEvo[NVKMS_LEFT]; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (UsageBoundsEqual(pCurrentUsage, pUsage)) { + return FALSE; + } + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* + * Make sure to interlock usage bounds update with the base and overlay + * channel updates, otherwise it ends up violating hardware error check for + * the base/overlay channel blocking. + * + * // check for blocking violations + * for (vlt_index = 0; vlt_index < NV_CHIP_DISP_TOTAL_HEADS_PRESENT_por; vlt_index++) { + * if ((wir_BlockBase[vlt_index] == TRUE) + * && (wir_BaseQuiescent[vlt_index] == FALSE) + * && ((ecv_GlobalHeadConnected[vlt_index] == TRUE) || (pri_ErrcheckWhenDisconnected == TRUE))) + * throw (vlt_index << NV_DISP_CORE_STATE_ERROR_HEAD_INDEX_SHIFT) | NV_DISP_CORE_STATE_ERROR_001; + * } + * + * for (vlt_index = 0; vlt_index < NV_CHIP_DISP_TOTAL_HEADS_PRESENT_por; vlt_index++) { + * if ((wir_BlockOverlay[vlt_index] == TRUE) + * && (wir_OverlayQuiescent[vlt_index] == FALSE) + * && ((ecv_GlobalHeadConnected[vlt_index] == TRUE) || (pri_ErrcheckWhenDisconnected == TRUE))) + * throw (vlt_index << NV_DISP_CORE_STATE_ERROR_HEAD_INDEX_SHIFT) | NV_DISP_CORE_STATE_ERROR_002; + */ + + if (pCurrentBaseSurf != NULL && + !nvEvoLayerUsageBoundsEqual(pUsage, pCurrentUsage, NVKMS_MAIN_LAYER)) { + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->base[head]); + } + + if (pCurrentOverlaySurf != NULL && + !nvEvoLayerUsageBoundsEqual(pUsage, pCurrentUsage, NVKMS_OVERLAY_LAYER)) { + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->overlay[head]); + } + + + if (pUsage->layer[NVKMS_MAIN_LAYER].usable) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, _USABLE, + _TRUE); + + if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_64); + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_32); + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_16); + } else if (pUsage->layer[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats & + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_8); + } else { + nvAssert(!"Unexpected base pixel depth"); + return FALSE; + } + + baseUsage |= DRF_DEF(917D, _HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS, + _SUPER_SAMPLE, _X1_AA); + } + + overlayUsage |= pUsage->layer[NVKMS_OVERLAY_LAYER].usable ? + DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, _USABLE, _TRUE) : + DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, _USABLE, _FALSE); + + overlayFormats = pUsage->layer[NVKMS_OVERLAY_LAYER].usable ? + pUsage->layer[NVKMS_OVERLAY_LAYER].supportedSurfaceMemoryFormats : + NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP; + + if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_32); + } else if (overlayFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_16); + } else { + nvAssert(!"Unsupported overlay depth"); + overlayUsage |= DRF_DEF(917D, _HEAD_SET_OVERLAY_USAGE_BOUNDS, + _PIXEL_DEPTH, _BPP_16); + } + + nvDmaSetStartEvoMethod(pChannel, + NV917D_HEAD_SET_BASE_CHANNEL_USAGE_BOUNDS(head), 2); + nvDmaSetEvoMethodData(pChannel, baseUsage); + nvDmaSetEvoMethodData(pChannel, overlayUsage); + + return TRUE; +} + +static void EvoSetNotifierMethods90(NVDevEvoRec *pDevEvo, + NVEvoChannelPtr pChannel, + NvBool notify, + NvBool awaken, + NvU32 notifier) +{ + ASSERT_DRF_NUM(917D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier); + + if (notify) { + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDmaSetStartEvoMethod(pChannel, + NV917D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, + _SET_CONTEXT_DMA_NOTIFIER, + _HANDLE, + pDevEvo->core->notifiersDma[sd].ctxHandle)); + nvPopEvoSubDevMask(pDevEvo); + } + } + } else { + nvDmaSetStartEvoMethod(pChannel, + NV917D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _SET_CONTEXT_DMA_NOTIFIER, _HANDLE, 0)); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier) | + (awaken ? + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE_AWAKEN) : + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE)) | + (notify ? + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _NOTIFY, _ENABLE) : + DRF_DEF(917D, _SET_NOTIFIER_CONTROL, _NOTIFY, _DISABLE))); +} + +static void UpdateCore9x(NVEvoChannelPtr pChannel, + NVEvoChannelMask interlockChannelMask) +{ + NvU32 head, value = 0; + + ct_assert(NV_EVO_CHANNEL_MASK_BASE__SIZE == + NV_EVO_CHANNEL_MASK_OVERLAY__SIZE); + for (head = 0; head < NV_EVO_CHANNEL_MASK_BASE__SIZE; head++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE, + interlockChannelMask)) { + value |= DRF_IDX_DEF(917D, _UPDATE, + _INTERLOCK_WITH_BASE, head, _ENABLE); + } + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE, + interlockChannelMask)) { + value |= DRF_IDX_DEF(917D, _UPDATE, + _INTERLOCK_WITH_OVERLAY, head, _ENABLE); + } + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, value); + + nvDmaKickoffEvo(pChannel); +} + +static void UpdateBase91(NVEvoChannelPtr pChannel, + NvBool interlockWithCore, + NvBool vrrTearing) +{ + NvU32 updateValue = 0; + NvU32 trapParam = 0; + + if (interlockWithCore) { + updateValue |= DRF_DEF(917C, _UPDATE, _INTERLOCK_WITH_CORE, _ENABLE); + } + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SPARE_PRE_UPDATE_TRAP, 1); + nvDmaSetEvoMethodData(pChannel, trapParam); + + nvDmaSetStartEvoMethod(pChannel, NV917C_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, updateValue); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SPARE_POST_UPDATE_TRAP, 1); + nvDmaSetEvoMethodData(pChannel, trapParam); + + nvDmaKickoffEvo(pChannel); +} + +static void UpdateOverlay9x(NVEvoChannelPtr pChannel, + NvBool interlockWithCore) +{ + NvU32 value = 0; + + if (interlockWithCore) { + value |= DRF_DEF(917E, _UPDATE, _INTERLOCK_WITH_CORE, _ENABLE); + } + + nvDmaSetStartEvoMethod(pChannel, NV917E_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, value); + + nvDmaKickoffEvo(pChannel); +} + +static void EvoUpdate91(NVDevEvoPtr pDevEvo, + const NVEvoUpdateState *updateState, + NvBool releaseElv) +{ + NvU32 sd; + NVEvoChannelMask fliplockedBaseChannels[NVKMS_MAX_SUBDEVICES] = { }; + NvBool updateAllFliplockedBaseChannels = FALSE; + + /* + * Multiple 'base + core channel interlocked' updates can create deadlock + * if heads are flip locked. + * + * For example - if head-0 and head-1 are flip locked and you initiate two + * 'base + core channel interlocked' updates separately for each of + * the head then that creates deadlock: + * + * + * +--------+ +--------+ +--------+ + * | BASE-0 | | CORE | | BASE-1 | + * +--------+ +--------+ +--------+ + * | | | | | | + * | | | | | | + * +--------+------+--------+ | | + * | INTERLOCKED | | | + * | UPDATE-0 | | | + * +--------+------+--------+ | | + * | Base | | Core | | | + * <...| update |<.... | Update | | | + * : | for | | for | | | + * : | head-0 | | head-0 | | | + * : +--------+------+--------+ | | + * : | | | ^ | | | + * : | | | : | | | + * : +--------+ | : | | | + * : | : | | | + * : +---(----+------+--------+ + * : | : INTERLOCKED | + * : | : UPDATE-1 | + * : +--------+------+--------+ + * V | Core | | Base | + * : | update |<.... | Update | + * : | for | | for |<... + * : | head-1 | | head-1 | : + * : +--------+------+--------+ : + * : | | | | ^ + * : +--------+ +--------+ : + * : : + * V...................>............................> + * + * ^ + * | + * | + * [ BASE-0 and BASE-1 are fliplocked ] + * + * Here you can follow the dotted arrow line and see how deadlock + * has been formed. The dotted arrow line indicates the execution + * dependency of the one update onto another, e.g. the core update + * for head-1 can't get executed unless the core update for head-0 + * gets executed. + * + * To prevent this deadlock, initiate the base channel updates for all flip + * locked heads if update state contains 'base + core channel interlocked' + * for the flip locked head. + */ + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoChannelMask updateChannelMask = updateState->subdev[sd].channelMask; + NVEvoChannelMask interlockChannelMask = + updateChannelMask & ~updateState->subdev[sd].noCoreInterlockMask; + NvU32 head; + + for (head = 0; head < NV_EVO_CHANNEL_MASK_BASE__SIZE; head++) { + NVEvoChannelMask thisMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE); + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock) { + fliplockedBaseChannels[sd] |= thisMask; + } + + /* + * If this update is updating only one base channel without any core + * interlock, in that case, we don't need to also update all flip + * locked base channels. + */ + if (NV_EVO_CHANNEL_MASK_POPCOUNT(interlockChannelMask) <= 1 && + !FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateChannelMask)) { + continue; + } + + if ((updateChannelMask & thisMask) != 0x0 && pHC->flipLock) { + updateAllFliplockedBaseChannels = TRUE; + } + } + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoChannelMask updateChannelMask = updateState->subdev[sd].channelMask | + (updateAllFliplockedBaseChannels ? fliplockedBaseChannels[sd] : 0x0); + NVEvoChannelMask interlockChannelMask = + updateChannelMask & ~updateState->subdev[sd].noCoreInterlockMask; + NvBool interlockWithCore = FALSE; + const NvU32 subDeviceMask = (1 << sd); + NvU32 head; + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + + if (NV_EVO_CHANNEL_MASK_POPCOUNT(interlockChannelMask) > 1) { + /* We can only interlock updates if core is included. */ + nvAssert(!FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateState->subdev[sd].noCoreInterlockMask)); + updateChannelMask |= DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE); + interlockChannelMask |= + DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE); + interlockWithCore = TRUE; + } + + if (FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateChannelMask)) { + UpdateCore9x(pDevEvo->core, updateChannelMask); + } + + for (head = 0; head < NV_EVO_CHANNEL_MASK_OVERLAY__SIZE; head++) { + NVEvoChannelMask thisMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE); + if (updateChannelMask & thisMask) { + NvBool thisInterlockWithCore = interlockWithCore && + (interlockChannelMask & thisMask); + UpdateOverlay9x(pDevEvo->overlay[head], + thisInterlockWithCore); + } + } + + for (head = 0; head < NV_EVO_CHANNEL_MASK_BASE__SIZE; head++) { + NVEvoChannelMask thisMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE); + if (updateChannelMask & thisMask) { + NvBool thisInterlockWithCore = interlockWithCore && + (interlockChannelMask & thisMask); + NvBool vrrTearing = + updateState->subdev[sd].base[head].vrrTearing; + + UpdateBase91(pDevEvo->base[head], + thisInterlockWithCore, vrrTearing); + } + } + + nvPopEvoSubDevMask(pDevEvo); + } +} + +static void EvoSetNotifier90(NVDevEvoRec *pDevEvo, + const NvBool notify, + const NvBool awaken, + const NvU32 notifier, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + EvoSetNotifierMethods90(pDevEvo, pDevEvo->core, notify, awaken, notifier); +} + +/* + * Returns the data for the SET_STORAGE method. The method data + * format is the same between classes 90[CDE]. + */ +static NvU32 EvoComputeSetStorage90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + NvU32 setStorage; + + NvU32 pitch = nvEvoGetHeadSetStoragePitchValue( + pDevEvo, + pSurfaceEvo->layout, + pSurfaceEvo->planes[0].pitch); + nvAssert(pitch != 0); + + if (pSurfaceEvo->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + // 1 block = 1 * X Gobs; 1 Gob = 64B * 4Lines; X = 1 << + // blockHeightLog2Gobs + NvU32 blockHeight = pSurfaceEvo->log2GobsPerBlockY; + + setStorage = DRF_NUM(917D, _HEAD_SET_STORAGE, _BLOCK_HEIGHT, blockHeight) | + DRF_DEF(917D, _HEAD_SET_STORAGE, _MEMORY_LAYOUT, _BLOCKLINEAR); + } else { + setStorage = DRF_DEF(917D, _HEAD_SET_STORAGE, _MEMORY_LAYOUT, _PITCH); + } + + ASSERT_DRF_NUM(917D, _HEAD_SET_STORAGE, _PITCH, pitch); + setStorage |= DRF_NUM(917D, _HEAD_SET_STORAGE, _PITCH, pitch); + + return setStorage; +} + +static void SetCscMatrix(NVEvoChannelPtr pChannel, NvU32 method, + const struct NvKmsCscMatrix *matrix, + NvU32 extraFirstWordBits) +{ + int y; + + // The _COEFF fields are the same across all of the methods on all + // channels. + ct_assert(DRF_SHIFTMASK(NV917C_SET_CSC_RED2RED_COEFF) == + DRF_SHIFTMASK(NV917D_HEAD_SET_CSC_RED2RED_COEFF)); + ct_assert(DRF_SHIFTMASK(NV917C_SET_CSC_RED2RED_COEFF) == + DRF_SHIFTMASK(NV917E_SET_CSC_RED2RED_COEFF)); + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + // Use DRF_NUM to truncate client-supplied values that are out of + // range. + NvU32 val = DRF_NUM(917C, _SET_CSC_RED2RED, _COEFF, + matrix->m[y][x]); + + if (x == 0 && y == 0) { + val |= extraFirstWordBits; + } + + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, val); + + method += 4; + } + } +} + +/* + * These values are the same between all base + * (_SURFACE_SET_PARAMS_FORMAT_) and core (_HEAD_SET_PARAMS_FORMAT_) + * EVO classes. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormat90( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatI8: + return NV917D_HEAD_SET_PARAMS_FORMAT_I8; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + return NV917D_HEAD_SET_PARAMS_FORMAT_A1R5G5B5; + case NvKmsSurfaceMemoryFormatR5G6B5: + return NV917D_HEAD_SET_PARAMS_FORMAT_R5G6B5; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + return NV917D_HEAD_SET_PARAMS_FORMAT_A8R8G8B8; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + return NV917D_HEAD_SET_PARAMS_FORMAT_A8B8G8R8; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + return NV917D_HEAD_SET_PARAMS_FORMAT_A2B10G10R10; + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + return NV917D_HEAD_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + case NvKmsSurfaceMemoryFormatR16G16B16A16: + return NV917D_HEAD_SET_PARAMS_FORMAT_R16_G16_B16_A16; + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return 0; + } + + return 0; +} + +static void EvoSetSurface(NVDevEvoPtr pDevEvo, + const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsCscMatrix *pCscMatrix, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 sd; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + /* + * The EVO2 ->SetCursorImage() function programs cursor image surface + * only if NVEvoSubDeviceRec::pCoreChannelSurface is non-null. + */ + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head] = pSurfaceEvo; + } FOR_EACH_SUBDEV_IN_MASK_END + + if (!pSurfaceEvo) { + // Disable surface scanout on this head. It will scan out the default + // base color instead. + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMAS_ISO(head), 1); + nvDmaSetEvoMethodData(pChannel, 0); + return; + } + + nvAssert(pSurfaceEvo->planes[0].ctxDma); + + // XXX[AGP]: These methods are sequential, but sending them with a single + // count=7 method header sometimes causes EVO to throw an IsoViolation + // exception. + + // Set the surface parameters. + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OFFSET(head), 1); + nvDmaSetEvoMethodData(pChannel, + nvCtxDmaOffsetFromBytes(pSurfaceEvo->planes[0].offset)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_SIZE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_SIZE, _WIDTH, pSurfaceEvo->widthInPixels) | + DRF_NUM(917D, _HEAD_SET_SIZE, _HEIGHT, pSurfaceEvo->heightInPixels)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_STORAGE(head), 1); + nvDmaSetEvoMethodData(pChannel, EvoComputeSetStorage90(pDevEvo, pSurfaceEvo)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PARAMS(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_PARAMS, _FORMAT, + nvHwFormatFromKmsFormat90(pSurfaceEvo->format)) | + DRF_DEF(917D, _HEAD_SET_PARAMS, _SUPER_SAMPLE, _X1_AA) | + DRF_DEF(917D, _HEAD_SET_PARAMS, _GAMMA, _LINEAR)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMAS_ISO(head), 1); + nvDmaSetEvoMethodData(pChannel, pSurfaceEvo->planes[0].ctxDma); + + /* NULL => don't change the CSC. */ + if (pCscMatrix) { + SetCscMatrix(pChannel, NV917D_HEAD_SET_CSC_RED2RED(head), pCscMatrix, 0); + } +} + +static void +EvoPushSetCoreSurfaceMethodsForOneSd(NVDevEvoRec *pDevEvo, + const NvU32 sd, + const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsCscMatrix *pCscMatrix, + NVEvoUpdateState *updateState) +{ + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + const NVFlipCursorEvoHwState *pSdCursorState = &pSdHeadState->cursor; + + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const int dispIndex = pDispEvo->displayOwner; + NvU8 curLutIndex = pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex; + + NvBool enableOutputLut = + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled; + NvBool enableBaseLut = + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled; + + NVLutSurfaceEvoPtr pCurLutSurfEvo = + pDevEvo->lut.head[head].LUT[curLutIndex]; + NvU32 lutCtxdma = pCurLutSurfEvo != NULL ? + pCurLutSurfEvo->dispCtxDma : 0x0; + + if (pSurfaceEvo == NULL || pCurLutSurfEvo == NULL) { + enableOutputLut = FALSE; + enableBaseLut = FALSE; + lutCtxdma = 0x0; + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + EvoSetSurface(pDevEvo, head, pSurfaceEvo, pCscMatrix, updateState); + + EvoSetCursorImage(pDevEvo, + head, + pSurfaceEvo != NULL ? + pSdCursorState->pSurfaceEvo : NULL, + updateState, + &pSdCursorState->cursorCompParams); + + /* + * EvoPushSetLUTContextDmaMethodsForOneSd() force enables base + * Lut if core scanout surface depth is 8. + */ + EvoPushSetLUTContextDmaMethodsForOneSd( + pDevEvo, sd, head, lutCtxdma, enableBaseLut, enableOutputLut, + updateState); + + nvPopEvoSubDevMask(pDevEvo); +} + +static void +FlipBase90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState) +{ + int eye; + + /* program notifier */ + + if (pHwState->completionNotifier.surface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->completionNotifier.surface; + NvU32 value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917C, _SET_NOTIFIER_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917C, _SET_NOTIFIER_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917C, _SET_NOTIFIER_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + value = FLD_SET_DRF_NUM(917C, _SET_NOTIFIER_CONTROL, _MODE, + pHwState->completionNotifier.awaken ? + NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN : + NV917C_SET_NOTIFIER_CONTROL_MODE_WRITE, value); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + /* program semaphore */ + nvAssertSameSemaphoreSurface(pHwState); + + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->syncObject.u.semaphores.acquireSurface; + NvU32 value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917C, _SET_SEMAPHORE_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917C, _SET_SEMAPHORE_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917C, _SET_SEMAPHORE_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.acquireValue); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.releaseValue); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + if (!pHwState->pSurfaceEvo[NVKMS_LEFT]) { + nvAssert(!pHwState->pSurfaceEvo[NVKMS_RIGHT]); + + // Disable base on this head. + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMAS_ISO(0), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMAS_ISO(1), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CSC_RED2RED, 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(917C, _SET_CSC_RED2RED, _OWNER, _CORE)); + return; + } + + NvU32 presentControl = + DRF_NUM(917C, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL, + pHwState->minPresentInterval); + + if (pHwState->tearing) { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _IMMEDIATE, presentControl); + /* + * This avoids an invalid state exception: + * + * if ((SetPresentControl.BeginMode != NON_TEARING) && + * (SetPresentControl.BeginMode != AT_FRAME) + * && (wir_InterlockWithCore == ENABLE)) + * throw NV_DISP_BASE_STATE_ERROR_001; + */ + nvDisableCoreInterlockUpdateState(pDevEvo, updateState, pChannel); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _NON_TEARING, presentControl); + } + + if (pHwState->pSurfaceEvo[NVKMS_RIGHT]) { + if (pHwState->perEyeStereoFlip) { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _STEREO_FLIP_MODE, _AT_ANY_FRAME, + presentControl); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _STEREO_FLIP_MODE, _PAIR_FLIP, + presentControl); + } + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _MODE, _STEREO, presentControl); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _MODE, _MONO, presentControl); + } + + // If we have a non-zero timestamp we need to enable timestamp mode + if (pHwState->timeStamp == 0) { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _TIMESTAMP_MODE, _DISABLE, presentControl); + } else { + presentControl = FLD_SET_DRF(917C, _SET_PRESENT_CONTROL, + _TIMESTAMP_MODE, _ENABLE, presentControl); + } + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_TIMESTAMP_ORIGIN_LO, 2); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, presentControl); + + SetCscMatrix(pChannel, NV917C_SET_CSC_RED2RED, &pHwState->cscMatrix, + DRF_DEF(917C, _SET_CSC_RED2RED, _OWNER, _BASE)); + + // Set the surface parameters. + FOR_ALL_EYES(eye) { + NvU32 ctxdma = 0; + NvU64 offset = 0; + + if (pHwState->pSurfaceEvo[eye]) { + ctxdma = pHwState->pSurfaceEvo[eye]->planes[0].ctxDma; + offset = pHwState->pSurfaceEvo[eye]->planes[0].offset; + } + + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_OFFSET(0, eye), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SURFACE_SET_OFFSET, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + + nvDmaSetStartEvoMethod(pChannel, NV917C_SET_CONTEXT_DMAS_ISO(eye), 1); + nvDmaSetEvoMethodData(pChannel, ctxdma); + } + + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, widthInPixels); + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, heightInPixels); + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_SIZE(0), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SURFACE_SET_SIZE, _WIDTH, + pHwState->pSurfaceEvo[NVKMS_LEFT]->widthInPixels) | + DRF_NUM(917C, _SURFACE_SET_SIZE, _HEIGHT, + pHwState->pSurfaceEvo[NVKMS_LEFT]->heightInPixels)); + + nvAssert(pHwState->sizeIn.width == pHwState->pSurfaceEvo[NVKMS_LEFT]->widthInPixels); + nvAssert(pHwState->sizeIn.height == pHwState->pSurfaceEvo[NVKMS_LEFT]->heightInPixels); + nvAssert(pHwState->sizeIn.width == pHwState->sizeOut.width); + nvAssert(pHwState->sizeIn.height == pHwState->sizeOut.height); + + nvAssert(!pHwState->pSurfaceEvo[NVKMS_RIGHT] || + (EvoComputeSetStorage90(pDevEvo, pHwState->pSurfaceEvo[NVKMS_LEFT]) == + EvoComputeSetStorage90(pDevEvo, pHwState->pSurfaceEvo[NVKMS_RIGHT]))); + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_STORAGE(0), 1); + nvDmaSetEvoMethodData(pChannel, EvoComputeSetStorage90(pDevEvo, pHwState->pSurfaceEvo[NVKMS_LEFT])); + + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, format); + nvDmaSetStartEvoMethod(pChannel, NV917C_SURFACE_SET_PARAMS(0), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917C, _SURFACE_SET_PARAMS, _FORMAT, + nvHwFormatFromKmsFormat90(pHwState->pSurfaceEvo[NVKMS_LEFT]->format)) | + DRF_DEF(917C, _SURFACE_SET_PARAMS, _SUPER_SAMPLE, _X1_AA) | + DRF_DEF(917C, _SURFACE_SET_PARAMS, _GAMMA, _LINEAR)); + + nvAssert(pHwState->inputLut.pLutSurfaceEvo == NULL); +} + +static void +FlipOverlay90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NvBool *pInterlockwithCore) +{ + const NvU32 head = + NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + const NVSurfaceEvoRec *pSurfaceEvo = pHwState->pSurfaceEvo[NVKMS_LEFT]; + NvU32 value; + NvU32 sd; + + /* Overlay class 917E can't do stereo */ + nvAssert(!pHwState->pSurfaceEvo[NVKMS_RIGHT]); + + /* + * The NVKMS driver enforces these conditions on its clients: 1) enable a + * core-surface before enabling an overlay-surface, 2) disable an + * overlay-surface before disabling a core-surface. + * + * Updates to enable/disable a core and an overlay surface execute + * separately and are not interlocked. To avoid a race condition between a + * core and an overlay channel, detect an overlay channel update which is + * enabling/disabling an overlay-surface and interlock that update with a + * core channel update. + * + * This makes sure that an update to disable an overlay-surface interlocked + * with a core channel and a follow-on update to disable the core-surface + * will wait for the previous overlay flip to complete. It also makes sure + * that an update to enable an overlay-surface will wait for the previous + * core channel flip to complete. + */ + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + NvBool prevCtxDmaIso = + pDevEvo->pSubDevices[sd]->overlayContextDmaIso[head]; + + if ((prevCtxDmaIso != 0x0 && pSurfaceEvo == NULL) || + (prevCtxDmaIso == 0x0 && pSurfaceEvo != NULL)) { + *pInterlockwithCore = TRUE; + } + + if (pSurfaceEvo != NULL) { + pDevEvo->pSubDevices[sd]->overlayContextDmaIso[head] = + pSurfaceEvo->planes[0].ctxDma; + pDevEvo->pSubDevices[sd]->overlaySurfFormat[head] = pSurfaceEvo->format; + + } else { + pDevEvo->pSubDevices[sd]->overlayContextDmaIso[head] = 0x0; + } + } FOR_EACH_SUBDEV_IN_MASK_END + + /* program notifier */ + + if (pHwState->completionNotifier.surface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->completionNotifier.surface; + value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917E, _SET_NOTIFIER_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917E, _SET_NOTIFIER_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917E, _SET_NOTIFIER_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + value = FLD_SET_DRF_NUM(917E, _SET_NOTIFIER_CONTROL, _MODE, + pHwState->completionNotifier.awaken ? + NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE_AWAKEN : + NV917E_SET_NOTIFIER_CONTROL_MODE_WRITE, value); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + /* program semaphore */ + nvAssertSameSemaphoreSurface(pHwState); + + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->syncObject.u.semaphores.acquireSurface; + value = 0; + + if (pNIso->format == NVKMS_NISO_FORMAT_LEGACY) { + value = FLD_SET_DRF(917E, _SET_SEMAPHORE_CONTROL, _FORMAT, + _LEGACY, value); + } else { + value = FLD_SET_DRF(917E, _SET_SEMAPHORE_CONTROL, _FORMAT, + _FOUR_WORD, value); + } + + value = FLD_SET_DRF_NUM(917E, _SET_SEMAPHORE_CONTROL, _OFFSET, + pNIso->offsetInWords, value); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.acquireValue); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.releaseValue); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_TIMESTAMP_ORIGIN_LO, 2); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + if (pHwState->timeStamp == 0) { + value = NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_ASAP; + } else { + value = NV917E_SET_PRESENT_CONTROL_BEGIN_MODE_TIMESTAMP; + } + nvAssert(!pHwState->tearing); + nvAssert(!pHwState->vrrTearing); + nvAssert(!pHwState->perEyeStereoFlip); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SET_PRESENT_CONTROL, _BEGIN_MODE, value) | + DRF_NUM(917E, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL, + pHwState->minPresentInterval)); + + if (!pSurfaceEvo) { + // Disable overlay on this head. + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMAS_ISO(NVKMS_LEFT), 1); + nvDmaSetEvoMethodData(pChannel, 0); + return; + } + + nvAssert(pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SIZE_IN, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SET_SIZE_IN, _WIDTH, pHwState->sizeIn.width) | + DRF_NUM(917E, _SET_SIZE_IN, _HEIGHT, pHwState->sizeIn.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_SIZE_OUT, 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(917E, _SET_SIZE_OUT, _WIDTH, + pHwState->sizeOut.width)); + + // Set the surface parameters. + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_OFFSET(NVKMS_LEFT), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SURFACE_SET_OFFSET, _ORIGIN, + nvCtxDmaOffsetFromBytes(pSurfaceEvo->planes[0].offset))); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SURFACE_SET_SIZE, _WIDTH, pSurfaceEvo->widthInPixels) | + DRF_NUM(917E, _SURFACE_SET_SIZE, _HEIGHT, pSurfaceEvo->heightInPixels)); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_STORAGE, 1); + nvDmaSetEvoMethodData(pChannel, EvoComputeSetStorage90(pDevEvo, pSurfaceEvo)); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SURFACE_SET_PARAMS, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917E, _SURFACE_SET_PARAMS, _FORMAT, + EvoOverlayFormatFromKmsFormat91(pSurfaceEvo->format)) | + DRF_DEF(917E, _SURFACE_SET_PARAMS, _COLOR_SPACE, _RGB)); + + SetCscMatrix(pChannel, NV917E_SET_CSC_RED2RED, &pHwState->cscMatrix, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_CONTEXT_DMAS_ISO(NVKMS_LEFT), 1); + nvDmaSetEvoMethodData(pChannel, pSurfaceEvo->planes[0].ctxDma); + + nvAssert(pHwState->inputLut.pLutSurfaceEvo == NULL); +} + +static NvBool +needToReprogramCoreSurface(NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const NVSurfaceEvoRec *pNewSurfaceEvo) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const int dispIndex = pDispEvo->displayOwner; + NvBool enableBaseLut = + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled; + + const NVSurfaceEvoRec *pCurrCoreSurfaceEvo = + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head]; + const NvBool currIsBaseSurfSpecified = + pDevEvo->pSubDevices[sd]->isBaseSurfSpecified[head]; + const NvU32 currHeightInPixels = pCurrCoreSurfaceEvo != NULL ? + pCurrCoreSurfaceEvo->heightInPixels : 0; + const NvU32 currWidthInPixels = pCurrCoreSurfaceEvo != NULL ? + pCurrCoreSurfaceEvo->widthInPixels : 0; + const enum NvKmsSurfaceMemoryFormat currFormat = + pCurrCoreSurfaceEvo != NULL ? + pCurrCoreSurfaceEvo->format : NvKmsSurfaceMemoryFormatI8; + + const NvBool newIsBaseSurfSpecified = pNewSurfaceEvo != NULL; + const NvU32 newHeightInPixels = pNewSurfaceEvo != NULL ? + pNewSurfaceEvo->heightInPixels : 0; + const NvU32 newWidthInPixels = pNewSurfaceEvo != NULL ? + pNewSurfaceEvo->widthInPixels : 0; + const enum NvKmsSurfaceMemoryFormat newFormat = pNewSurfaceEvo != NULL ? + pNewSurfaceEvo->format : NvKmsSurfaceMemoryFormatI8; + + /* If base channel flips from NULL to non-NULL surface or vice-versa */ + if (currIsBaseSurfSpecified != newIsBaseSurfSpecified) { + return TRUE; + } + + /* + * Reprogram the core surface if the current and new base surfaces have + * different size or format. The format check is needed to enable/disable + * the input lut if the input lut is not explicitly enabled/disabled by + * client and the base surface if flipping to or flipping away from the I8 + * format. + */ + if (newIsBaseSurfSpecified) { + + if (newWidthInPixels != currWidthInPixels || + newHeightInPixels != currHeightInPixels) { + return TRUE; + } + + if (!enableBaseLut && + newFormat != currFormat && + (currFormat == NvKmsSurfaceMemoryFormatI8 || + newFormat == NvKmsSurfaceMemoryFormatI8)) { + return TRUE; + } + } + + return !currIsBaseSurfSpecified; +} + +static void +EvoPushUpdateCompositionIfNeeded(NVDevEvoPtr pDevEvo, + const NvU32 sd, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + const NVSurfaceEvoRec *pNewSurfaceEvo = pHwState->pSurfaceEvo[NVKMS_LEFT]; + NvBool updateComposition = FALSE; + const NVFlipChannelEvoHwState *pBaseHwState = NULL; + const NVFlipChannelEvoHwState *pOverlayHwState = NULL; + NvU32 head = NV_INVALID_HEAD; + + if (pNewSurfaceEvo == NULL) { + return; + } + + /* + * Re-program the composition parameters if this is first layer update, or + * if color key selection method is changed, or if layer is using source + * color keying and color key is changed. + */ + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + head = NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + pOverlayHwState = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_OVERLAY_LAYER]; + pBaseHwState = pHwState; + + if ((!pDevEvo->pSubDevices[sd]->baseComp[head].initialized) || + + (pHwState->composition.colorKeySelect != + pDevEvo->pSubDevices[sd]->baseComp[head].colorKeySelect) || + + ((pHwState->composition.colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) && + (pNewSurfaceEvo->format != + pDevEvo->pSubDevices[sd]->baseSurfFormat[head] || + nvkms_memcmp(&pHwState->composition.colorKey, + &pDevEvo->pSubDevices[sd]->baseComp[head].colorKey, + sizeof(&pHwState->composition.colorKey)) != 0))) { + + pDevEvo->pSubDevices[sd]->baseComp[head].initialized = TRUE; + pDevEvo->pSubDevices[sd]->baseComp[head].colorKeySelect = + pHwState->composition.colorKeySelect; + pDevEvo->pSubDevices[sd]->baseComp[head].colorKey = + pHwState->composition.colorKey; + updateComposition = TRUE; + } + } + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + head = NV_EVO_CHANNEL_MASK_OVERLAY_HEAD_NUMBER(pChannel->channelMask); + pBaseHwState = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_MAIN_LAYER]; + pOverlayHwState = pHwState; + + if ((!pDevEvo->pSubDevices[sd]->overlayComp[head].initialized) || + + (pHwState->composition.colorKeySelect != + pDevEvo->pSubDevices[sd]->overlayComp[head].colorKeySelect) || + + ((pHwState->composition.colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) && + (pNewSurfaceEvo->format != + pDevEvo->pSubDevices[sd]->overlaySurfFormat[head] || + nvkms_memcmp(&pHwState->composition.colorKey, + &pDevEvo->pSubDevices[sd]->overlayComp[head].colorKey, + sizeof(&pHwState->composition.colorKey)) != 0))) { + + pDevEvo->pSubDevices[sd]->overlayComp[head].initialized = TRUE; + pDevEvo->pSubDevices[sd]->overlayComp[head].colorKeySelect = + pHwState->composition.colorKeySelect; + pDevEvo->pSubDevices[sd]->overlayComp[head].colorKey = + pHwState->composition.colorKey; + updateComposition = TRUE; + } + } + + if (updateComposition) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushUpdateComposition(pDevEvo, head, pBaseHwState, pOverlayHwState, + updateState, bypassComposition); + nvPopEvoSubDevMask(pDevEvo); + } +} + +static void EvoFlip90(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvU32 sd; + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + EvoPushUpdateCompositionIfNeeded(pDevEvo, sd, pChannel, pHwState, + updateState, bypassComposition); + } FOR_EACH_SUBDEV_IN_MASK_END + + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0) { + const NvU32 head = + NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + + FOR_EACH_SUBDEV_IN_MASK(sd, nvPeekEvoSubDevMask(pDevEvo)) { + if (needToReprogramCoreSurface( + pDevEvo, + sd, + head, + pHwState->pSurfaceEvo[NVKMS_LEFT])) { + const struct NvKmsCscMatrix zeroCscMatrix = { 0 }; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushSetCoreSurfaceMethodsForOneSd(pDevEvo, sd, head, + pHwState->pSurfaceEvo[NVKMS_LEFT], + &zeroCscMatrix, updateState); + nvPopEvoSubDevMask(pDevEvo); + } + + if (pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + pDevEvo->pSubDevices[sd]->isBaseSurfSpecified[head] = TRUE; + pDevEvo->pSubDevices[sd]->baseSurfFormat[head] = + pHwState->pSurfaceEvo[NVKMS_LEFT]->format; + } else { + pDevEvo->pSubDevices[sd]->isBaseSurfSpecified[head] = FALSE; + } + } FOR_EACH_SUBDEV_IN_MASK_END + + FlipBase90(pDevEvo, pChannel, pHwState, updateState); + + if (pHwState->vrrTearing) { + int head = NV_EVO_CHANNEL_MASK_BASE_HEAD_NUMBER(pChannel->channelMask); + NvU32 sd, subDeviceMask = nvPeekEvoSubDevMask(pDevEvo); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDeviceMask & (1 << sd)) { + updateState->subdev[sd].base[head].vrrTearing = TRUE; + } + } + } + } else if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0) { + NvBool interlockWithCore = FALSE; + + FlipOverlay90(pDevEvo, pChannel, pHwState, &interlockWithCore); + + if (interlockWithCore) { + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + } + } else { + nvAssert(!"Unknown channel mask in EvoFlip90"); + } + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); +} + +static void EvoFlipTransitionWAR90(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + /* Nothing to do pre-Turing */ +} + +/*! + * Pack the given abstract color key into a key and mask as required + * by the display engine. + * + * \param[in] format NVKMS format for the input surface + * \param[in] key NVKMS representation of a color key + * \param[out] pValue NV857E_SET_KEY_COLOR_COLOR value + * \param[out] pMask NV857E_SET_KEY_COLOR_MASK value + */ +static void EvoPackColorKey91(enum NvKmsSurfaceMemoryFormat format, + const NVColorKey key, + NvU32 *pValue, NvU32 *pMask) +{ + NvU32 value = 0, mask = 0; + switch (format) { + case NvKmsSurfaceMemoryFormatR5G6B5: + if (key.matchR) { + mask |= 0x1f << 11; + value |= (key.r & 0x1f) << 11; + } + if (key.matchG) { + mask |= 0x3f << 5; + value |= (key.g & 0x3f) << 5; + } + if (key.matchB) { + mask |= 0x1f << 0; + value |= (key.b & 0x1f) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + if (key.matchA) { + mask |= 0x1 << 15; + value |= (key.a & 0x1) << 15; + } + if (key.matchR) { + mask |= 0x1f << 10; + value |= (key.r & 0x1f) << 10; + } + if (key.matchG) { + mask |= 0x1f << 5; + value |= (key.g & 0x1f) << 5; + } + if (key.matchB) { + mask |= 0x1f << 0; + value |= (key.b & 0x1f) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + if (key.matchA) { + /* Only one bit of alpha is handled by the hw. */ + mask |= 0x1 << 31; + value |= (key.a ? 1:0) << 31; + } + if (key.matchR) { + mask |= 0xff << 16; + value |= (key.r & 0xff) << 16; + } + if (key.matchG) { + mask |= 0xff << 8; + value |= (key.g & 0xff) << 8; + } + if (key.matchB) { + mask |= 0xff << 0; + value |= (key.b & 0xff) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + if (key.matchA) { + /* Only one bit of alpha is handled by the hw. */ + mask |= 0x1 << 31; + value |= (key.a ? 1:0) << 31; + } + if (key.matchB) { + mask |= 0xff << 16; + value |= (key.b & 0xff) << 16; + } + if (key.matchG) { + mask |= 0xff << 8; + value |= (key.g & 0xff) << 8; + } + if (key.matchR) { + mask |= 0xff << 0; + value |= (key.r & 0xff) << 0; + } + break; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + if (key.matchA) { + /* Only one bit of alpha is handled by the hw. */ + mask |= 0x1 << 31; + value |= (key.a ? 1:0) << 31; + } + if (key.matchB) { + mask |= 0x3ff << 20; + value |= (key.b & 0x3ff) << 20; + } + if (key.matchG) { + mask |= 0x3ff << 10; + value |= (key.g & 0x3ff) << 10; + } + if (key.matchR) { + mask |= 0x3ff << 0; + value |= (key.r & 0x3ff) << 0; + } + break; + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + nvAssert(!"Unhandled format in nvEvo1PackColorKey"); + break; + } + + *pMask = mask; + *pValue = value; +} + +static NvBool EvoOverlayCompositionControlFromNvKmsCompositionParams( + const NVFlipChannelEvoHwState *pBaseHwState, + const NVFlipChannelEvoHwState *pOverlayHwState, + NvU32 *pMode, + NvU32 *pColorKeyValue, + NvU32 *pColorKeyMask) +{ + const struct NvKmsCompositionParams *pBaseCompParams = + &pBaseHwState->composition; + const struct NvKmsCompositionParams *pOverlayCompParams = + &pOverlayHwState->composition; + + switch (pOverlayCompParams->colorKeySelect) { + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE: + if (pOverlayCompParams->blendingMode[1] == NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) { + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_OPAQUE; + *pColorKeyValue = *pColorKeyMask = 0; + } else { + return FALSE; + } + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC: + if ((pOverlayCompParams->blendingMode[0] == + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) && + (pOverlayCompParams->blendingMode[1] == + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT)) { + + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_SOURCE_COLOR_VALUE_KEYING; + + if (pOverlayHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + EvoPackColorKey91(pOverlayHwState->pSurfaceEvo[NVKMS_LEFT]->format, + pOverlayCompParams->colorKey, + pColorKeyValue, + pColorKeyMask); + } else { + *pColorKeyValue = *pColorKeyMask = 0; + } + + } else { + return FALSE; + } + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST: + if ((pBaseCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) && + (pOverlayCompParams->blendingMode[1] == + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE)) { + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_OPAQUE; + *pColorKeyValue = *pColorKeyMask = 0; + } else if ((pBaseCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) && + (pOverlayCompParams->blendingMode[1] == + NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) && + (pOverlayCompParams->blendingMode[0] == + NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT)) { + + *pMode = NV917E_SET_COMPOSITION_CONTROL_MODE_DESTINATION_COLOR_VALUE_KEYING; + + if (pBaseHwState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + EvoPackColorKey91(pBaseHwState->pSurfaceEvo[NVKMS_LEFT]->format, + pBaseCompParams->colorKey, + pColorKeyValue, + pColorKeyMask); + } else { + *pColorKeyValue = *pColorKeyMask = 0; + } + + } else { + return FALSE; + } + break; + default: + return FALSE; + } + + return TRUE; +} + +static void +EvoPushUpdateComposition(NVDevEvoPtr pDevEvo, + const int head, + const NVFlipChannelEvoHwState *pBaseHwState, + const NVFlipChannelEvoHwState *pOverlayHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + + /* Composition is always programmed through the overlay channel. */ + NVEvoChannelPtr pChannel = pDevEvo->overlay[head]; + NvU32 colorKeyValue = 0, colorKeyMask = 0; + NvU32 compositionModeValue = 0; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (!EvoOverlayCompositionControlFromNvKmsCompositionParams( + pBaseHwState, pOverlayHwState, + &compositionModeValue, + &colorKeyValue, + &colorKeyMask)) { + /* + * composition mode is validated during + * nvUpdateFlipEvoHwState(), so it should always be valid when + * we get here. + */ + nvAssert(!"Invalid composition params"); + return; + } + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_COMPOSITION_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, compositionModeValue); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_KEY_COLOR_LO, 2); + nvDmaSetEvoMethodData(pChannel, colorKeyValue); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917E_SET_KEY_MASK_LO, 2); + nvDmaSetEvoMethodData(pChannel, colorKeyMask); + nvDmaSetEvoMethodData(pChannel, 0); +} + +/* + * The LUT entries in INDEX_1025_UNITY_RANGE have 16 bits, with the + * black value at 24576, and the white at 49151. Since the effective + * range is 16384, we treat this as a 14-bit LUT. However, we need to + * clear the low 3 bits to WAR hardware bug 813188. This gives us + * 14-bit LUT values, but only 11 bits of precision. + */ +static inline NvU16 ColorToLUTEntry(NvU16 val) +{ + const NvU16 val14bit = val >> 2; + return (val14bit & ~7) + 24576; +} + +/* In INDEX_1025_UNITY_RANGE, the LUT indices for color depths with less + * than 10 bpc are the indices you'd have in 257-entry mode multiplied + * by four. So, you under-replicate all but the two least significant bits. + * Since when is EVO supposed to make sense? + */ +static void +EvoFillLUTSurface90(NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth) +{ + int i, lutIndex; + + switch (depth) { + case 15: + for (i = 0; i < nColorMapEntries; i++) { + lutIndex = PALETTE_DEPTH_SHIFT(i, 5) << 2; + pLUTBuffer[lutIndex].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[lutIndex].Green = ColorToLUTEntry(green[i]); + pLUTBuffer[lutIndex].Blue = ColorToLUTEntry(blue[i]); + } + break; + case 16: + for (i = 0; i < nColorMapEntries; i++) { + pLUTBuffer[PALETTE_DEPTH_SHIFT(i, 6) << 2].Green = ColorToLUTEntry(green[i]); + if (i < 32) { + lutIndex = PALETTE_DEPTH_SHIFT(i, 5) << 2; + pLUTBuffer[lutIndex].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[lutIndex].Blue = ColorToLUTEntry(blue[i]); + } + } + break; + case 8: + case 24: + for (i = 0; i < nColorMapEntries; i++) { + lutIndex = i << 2; + pLUTBuffer[lutIndex].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[lutIndex].Green = ColorToLUTEntry(green[i]); + pLUTBuffer[lutIndex].Blue = ColorToLUTEntry(blue[i]); + } + break; + case 30: + for (i = 0; i < nColorMapEntries; i++) { + pLUTBuffer[i].Red = ColorToLUTEntry(red[i]); + pLUTBuffer[i].Green = ColorToLUTEntry(green[i]); + pLUTBuffer[i].Blue = ColorToLUTEntry(blue[i]); + } + break; + default: + nvAssert(!"invalid depth"); + return; + } +} + +static void +EvoPushSetLUTContextDmaMethodsForOneSd(NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const NvU32 ctxdma, + NvBool enableBaseLut, + const NvBool enableOutputLut, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU64 offset; + const NVSurfaceEvoRec *pCoreSurfaceEvo = + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head]; + const NvBool surfaceDepth8 = (pCoreSurfaceEvo != NULL) ? + (pCoreSurfaceEvo->format == NvKmsSurfaceMemoryFormatI8) : FALSE; + + nvAssert(nvPeekEvoSubDevMask(pDevEvo) == NVBIT(sd)); + + // Depth 8 requires the base LUT to be enabled. + if (ctxdma && !enableBaseLut && surfaceDepth8) { + // TODO: Is this still required? Callers should specify the LUT at + // modeset time now. + enableBaseLut = TRUE; + } + + nvAssert(ctxdma || (!enableBaseLut && !enableOutputLut)); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* Program the base LUT */ + + offset = offsetof(NVEvoLutDataRec, base); + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_BASE_LUT_LO(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _MODE, _INDEX_1025_UNITY_RANGE) | + (enableBaseLut ? DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _ENABLE, _ENABLE) : + DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _ENABLE, _DISABLE)) | + DRF_DEF(917D, _HEAD_SET_BASE_LUT_LO, _NEVER_YIELD_TO_BASE, _DISABLE)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_BASE_LUT_HI(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_BASE_LUT_HI, _ORIGIN, offset >> 8)); + + /* Program the output LUT */ + + offset = offsetof(NVEvoLutDataRec, output); + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OUTPUT_LUT_LO(head), 1); + nvDmaSetEvoMethodData(pChannel, + (enableOutputLut ? DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _ENABLE, _ENABLE) : + DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _ENABLE, _DISABLE)) | + DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _MODE, _INTERPOLATE_1025_UNITY_RANGE) | + DRF_DEF(917D, _HEAD_SET_OUTPUT_LUT_LO, _NEVER_YIELD_TO_BASE, _DISABLE)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OUTPUT_LUT_HI(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_OUTPUT_LUT_HI, _ORIGIN, offset >> 8)); + + /* Set the ctxdma that's used by both LUTs */ + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMA_LUT, _HANDLE, ctxdma)); + + /* + * Use this backdoor to disable "wide pipe" underreplication during + * expansion of color components into the display pipe. Underreplication + * of a non-zero 8-bit color to more than 8 bits causes lookups to fall + * between LUT entries in a 256-entry LUT, which we don't want. See bug + * 734919 for details. + * The "wide pipe" may also cause scanout of 8-bit data to an 8-bit OR to + * not be a straight passthrough (bug 895401). + */ + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _WIDE_PIPE_CRC, _DISABLE)); +} + +static void EvoSetLUTContextDma90(const NVDispEvoRec *pDispEvo, + const int head, + NVLutSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NvBool enableOutputLut, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + + const NvBool coreChannelCtxDmaNonNull = + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head] != NULL; + const NvU32 ctxdma = (pLutSurfEvo != NULL) ? pLutSurfEvo->dispCtxDma : 0; + + /* + * If the core channel doesn't have a scanout surface set, then setting the + * LUT context DMA will cause an exception. + */ + if (!coreChannelCtxDmaNonNull && ctxdma) { + return; + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushSetLUTContextDmaMethodsForOneSd( + pDevEvo, sd, head, ctxdma, enableBaseLut, enableOutputLut, + updateState); + nvPopEvoSubDevMask(pDevEvo); +} + +#define NV_EVO2_CAP_GET_PIN(cl, n, pEvoCaps, word, name, idx, pCaps) \ + (pEvoCaps)->pin[(idx)].flipLock = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_##word, \ + _LOCK_PIN##name##USAGE, _FLIP_LOCK, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_##word]); \ + (pEvoCaps)->pin[(idx)].stereo = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_##word, \ + _LOCK_PIN##name##USAGE, _STEREO, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_##word]); \ + (pEvoCaps)->pin[(idx)].scanLock = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_##word, \ + _LOCK_PIN##name##USAGE, _SCAN_LOCK, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_##word]); + +/* Take the max of MAX_PIXELS_t_TAP422 and MAX_PIXELS_t_TAP444 */ +#define NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, x, t, pCaps) \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_##t##TAPS].maxPixelsVTaps = \ + NV_MAX(REF_VAL(NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x##_MAX_PIXELS##t##TAP422, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x]), \ + REF_VAL(NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x##_MAX_PIXELS##t##TAP444, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_HEAD##i##_##x])) + +#define NV_EVO2_CAP_GET_HEAD(cl, n, pEvoCaps, i, x, y, z, pCaps) \ + (pEvoCaps)->head[(i)].usable = TRUE; \ + (pEvoCaps)->head[(i)].scalerCaps.present = TRUE; \ + NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, x, 5, pCaps); \ + NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, y, 3, pCaps); \ + NV_EVO2_CAP_GET_HEAD_MAX_PIXELS(cl, n, pEvoCaps, i, z, 2, pCaps); \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_8TAPS].maxHDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_5TAPS].maxVDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_3TAPS].maxVDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_2TAPS].maxVDownscaleFactor = NV_U16_MAX; \ + (pEvoCaps)->head[(i)].scalerCaps.taps[NV_EVO_SCALER_2TAPS].maxHDownscaleFactor = NV_U16_MAX; + +#define NV_EVO2_CAP_GET_SOR(cl, n, pEvoCaps, i, x, y, pCaps) \ + (pEvoCaps)->sor[(i)].dualTMDS = \ + FLD_TEST_DRF(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_CAP_SOR##i##_##x, \ + _DUAL_TMDS, _TRUE, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_SOR##i##_##x]); \ + (pEvoCaps)->sor[(i)].maxTMDSClkKHz = \ + DRF_VAL(cl##_CORE_NOTIFIER_##n, _CAPABILITIES_CAP_SOR##i##_##y, _TMDS_LVDS_CLK_MAX, \ + (pCaps)[NV##cl##_CORE_NOTIFIER_##n##_CAPABILITIES_CAP_SOR##i##_##y]) * 10000; + +static void EvoParseCapabilityNotifier3(NVEvoCapabilitiesPtr pEvoCaps, + volatile const NvU32 *pCaps) +{ + // Lock pins + // These magic numbers (5, 6, _A, etc.) are token-pasted into the + // NV917D_CORE_NOTIFIER_3_* macros and can't be autogenerated by the + // preprocessor. Architecture appears to have no plans to ever fix this. + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 0, 0x0, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 1, 0x1, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 2, 0x2, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 3, 0x3, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 4, 0x4, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 5, 0x5, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 6, 0x6, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 5, 7, 0x7, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, 8, 0x8, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, 9, 0x9, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _A, 0xa, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _B, 0xb, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _C, 0xc, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _D, 0xd, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _E, 0xe, pCaps); + NV_EVO2_CAP_GET_PIN(917D, 3, pEvoCaps, 6, _F, 0xf, pCaps); + + // Miscellaneous capabilities + pEvoCaps->misc.supportsInterlaced = TRUE; + pEvoCaps->misc.supportsSemiPlanar = FALSE; + pEvoCaps->misc.supportsPlanar = FALSE; + pEvoCaps->misc.supportsDSI = FALSE; + + // Heads + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 0, 53, 54, 55, pCaps); + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 1, 61, 62, 63, pCaps); + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 2, 69, 70, 71, pCaps); + NV_EVO2_CAP_GET_HEAD(917D, 3, pEvoCaps, 3, 77, 78, 79, pCaps); + + // SORs + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 0, 20, 21, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 1, 22, 23, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 2, 24, 25, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 3, 26, 27, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 4, 28, 29, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 5, 30, 31, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 6, 32, 33, pCaps); + NV_EVO2_CAP_GET_SOR(917D, 3, pEvoCaps, 7, 34, 35, pCaps); + + // Don't need any PIOR caps currently. +} + +static NvBool EvoGetCapabilities90(NVDevEvoPtr pDevEvo) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NVDispEvoPtr pDispEvo; + unsigned int i, sd; + struct NvKmsRRParams rrParams = { NVKMS_ROTATION_0, FALSE, FALSE }; + NvU8 layer; + + nvAssert(nvPeekEvoSubDevMask(pDevEvo) == SUBDEVICE_MASK_ALL); + + /* Main layer position and size updates are not supported on EVO. */ + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + pDevEvo->caps.layerCaps[layer].supportsWindowMode = + (layer != NVKMS_MAIN_LAYER); + } + + pDevEvo->caps.cursorCompositionCaps = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NV_EVO2_SUPPORTED_CURSOR_COMP_BLEND_MODES, + }, + }, + } + }; + + /* Base doesn't support any composition with underlying layers. */ + pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].composition = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC] = { + .supportedBlendModes = { + [0] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + }, + }; + + pDevEvo->caps.layerCaps[NVKMS_OVERLAY_LAYER].composition = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC] = { + .supportedBlendModes = { + [0] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT), + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST] = { + .supportedBlendModes = { + [0] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT), + [1] = NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE), + }, + }, + }, + }; + + pDevEvo->caps.validLayerRRTransforms |= + NVBIT(NvKmsRRParamsToCapBit(&rrParams)); + + for (i = NvKmsSurfaceMemoryFormatMin; + i <= NvKmsSurfaceMemoryFormatMax; + i++) { + if (nvHwFormatFromKmsFormat90(i) != 0) { + pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportedSurfaceMemoryFormats |= + NVBIT64(i); + } + + if (EvoOverlayFormatFromKmsFormat91(i) != 0) { + pDevEvo->caps.layerCaps[NVKMS_OVERLAY_LAYER].supportedSurfaceMemoryFormats |= + NVBIT64(i); + } + } + + EvoSetNotifierMethods90(pDevEvo, + pChannel, + TRUE /* notify */, + TRUE /* awaken */, + 0 /* notifier */); + + /* Initialize the capability notifiers. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + nvWriteEvoCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_CAPABILITIES_4, + DRF_DEF(917D_CORE_NOTIFIER_3, _CAPABILITIES_4, _DONE, _FALSE)); + } + + /* Tell the hardware to fill in the notifier. */ + nvDmaSetStartEvoMethod(pChannel, NV917D_GET_CAPABILITIES, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaKickoffEvo(pChannel); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NVEvoSubDevPtr pEvoSubDev; + volatile NvU32 *pCaps; + + nvEvoWaitForCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_CAPABILITIES_4, + DRF_BASE(NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE), + DRF_EXTENT(NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE), + NV917D_CORE_NOTIFIER_3_CAPABILITIES_4_DONE_TRUE); + + pEvoSubDev = &pDevEvo->gpus[sd]; + pCaps = pDevEvo->core->notifiersDma[sd].subDeviceAddress[sd]; + + nvkms_memset(&pEvoSubDev->capabilities, 0, + sizeof(pEvoSubDev->capabilities)); + EvoParseCapabilityNotifier3(&pEvoSubDev->capabilities, pCaps); + } + + /* Reset notifier state so it isn't on for future updates */ + EvoSetNotifierMethods90(pDevEvo, + pChannel, + FALSE /* notify */, + FALSE /* awaken */, + 0 /* notifier */); + nvDmaKickoffEvo(pChannel); + + return TRUE; +} + +static void EvoSetViewportPointIn90(NVDevEvoPtr pDevEvo, const int head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // Set the input viewport point + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_POINT_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_IN, _X, x) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_IN, _Y, y)); +} + +static void EvoSetOutputScaler90(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 imageSharpeningValue, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU32 setControlOutputScaler = 0; + NvU32 vTapsHw = 0, hTapsHw = 0; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + switch (pHeadState->vTaps) { + case NV_EVO_SCALER_5TAPS: + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5; + break; + case NV_EVO_SCALER_3TAPS: + // XXX TAPS_3_ADAPTIVE instead? --> I think only allowed with interlaced + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_3; + break; + case NV_EVO_SCALER_2TAPS: + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2; + break; + case NV_EVO_SCALER_8TAPS: + nvAssert(!"Unknown pHeadState->vTaps"); + // fall through + case NV_EVO_SCALER_1TAP: + vTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_1; + break; + } + switch (pHeadState->hTaps) { + case NV_EVO_SCALER_8TAPS: + hTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_8; + break; + case NV_EVO_SCALER_2TAPS: + hTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2; + break; + case NV_EVO_SCALER_5TAPS: + case NV_EVO_SCALER_3TAPS: + nvAssert(!"Unknown pHeadState->hTaps"); + // fall through + case NV_EVO_SCALER_1TAP: + hTapsHw = NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_1; + break; + } + setControlOutputScaler = + DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _HORIZONTAL_TAPS, + hTapsHw) | + DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _VERTICAL_TAPS, + vTapsHw); + + if (pHeadState->attributes.imageSharpening.available) { + setControlOutputScaler = + FLD_SET_DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, + _HRESPONSE_BIAS, imageSharpeningValue, + setControlOutputScaler); + + setControlOutputScaler = + FLD_SET_DRF_NUM(917D, _HEAD_SET_CONTROL_OUTPUT_SCALER, + _VRESPONSE_BIAS, imageSharpeningValue, + setControlOutputScaler); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL_OUTPUT_SCALER(head), 1); + nvDmaSetEvoMethodData(pChannel, setControlOutputScaler); +} + +static void EvoSetViewportInOut90(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* The input viewport shouldn't vary. */ + nvAssert(pViewPortMin->in.width == pViewPort->in.width); + nvAssert(pViewPortMax->in.width == pViewPort->in.width); + nvAssert(pViewPortMin->in.height == pViewPort->in.height); + nvAssert(pViewPortMax->in.height == pViewPort->in.height); + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_IN, _WIDTH, pViewPort->in.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_IN, _HEIGHT, pViewPort->in.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_X, pViewPort->out.xAdjust) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_Y, pViewPort->out.yAdjust)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_OUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT, _WIDTH, pViewPort->out.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT, _HEIGHT, pViewPort->out.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MIN(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MIN, _WIDTH, pViewPortMin->out.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MIN, _HEIGHT, pViewPortMin->out.height)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_VIEWPORT_SIZE_OUT_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MAX, _WIDTH, pViewPortMax->out.width) | + DRF_NUM(917D, _HEAD_SET_VIEWPORT_SIZE_OUT_MAX, _HEIGHT, pViewPortMax->out.height)); + +} + + +/*! + * Compute the 917D_HEAD_SET_CONTROL_CURSOR method value. + * + * This function also validates that the given NVSurfaceEvoRec can be + * used as a cursor image. + * + * Pre-nvdisplay core channel classes have the same layout of the + * *7D_HEAD_SET_CONTROL_CURSOR method value. + + * + * \param[in] pDevEvo The device on which the cursor will be programmed. + * \param[in] pSurfaceEvo The surface to be used as the cursor image. + * \param[out] pValue The 917D_HEAD_SET_CONTROL_CURSOR method value. + + * \return If TRUE, the surface can be used as a cursor image, and + * pValue contains the method value. If FALSE, the surface + * cannot be used as a cursor image. + */ +NvBool nvEvoGetHeadSetControlCursorValue90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + NvU32 *pValue) +{ + NvU32 value = 0; + + if (pSurfaceEvo == NULL) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _DISABLE); + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + goto done; + } else { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _ENABLE); + } + + /* The cursor must always be pitch. */ + + if (pSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) { + return FALSE; + } + + /* + * The only supported cursor image memory format is A8R8G8B8. + */ + if (pSurfaceEvo->format == NvKmsSurfaceMemoryFormatA8R8G8B8) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + } else { + return FALSE; + } + + /* + * The cursor only supports a few image sizes. + */ + if ((pSurfaceEvo->widthInPixels == 32) && + (pSurfaceEvo->heightInPixels == 32)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W32_H32); + } else if ((pSurfaceEvo->widthInPixels == 64) && + (pSurfaceEvo->heightInPixels == 64)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W64_H64); + } else if ((pDevEvo->cursorHal->caps.maxSize >= 128) && + (pSurfaceEvo->widthInPixels == 128) && + (pSurfaceEvo->heightInPixels == 128)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W128_H128); + } else if ((pDevEvo->cursorHal->caps.maxSize >= 256) && + (pSurfaceEvo->widthInPixels == 256) && + (pSurfaceEvo->heightInPixels == 256)) { + value |= DRF_DEF(927D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W256_H256); + } else { + return FALSE; + } + + /* + * Hard code the cursor hotspot. + */ + value |= DRF_NUM(927D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_Y, 0); + value |= DRF_NUM(927D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_X, 0); + +done: + + if (pValue != NULL) { + *pValue = value; + } + + return TRUE; +} + +static void EvoSetCursorImage(NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 ctxdma = pSurfaceEvo ? pSurfaceEvo->planes[0].ctxDma : 0; + const NvU64 offset = pSurfaceEvo ? pSurfaceEvo->planes[0].offset : 0; + NvU32 headSetControlCursorValue = 0; + NvBool ret; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + nvAssert(pCursorCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE); + nvAssert(NVBIT(pCursorCompParams->blendingMode[1]) & + NV_EVO2_SUPPORTED_CURSOR_COMP_BLEND_MODES); + nvAssert(!pSurfaceEvo || ctxdma); + + ret = nvEvoGetHeadSetControlCursorValue90(pDevEvo, pSurfaceEvo, + &headSetControlCursorValue); + /* + * The caller should have already validated the surface, so there + * shouldn't be a failure. + */ + if (!ret) { + nvAssert(!"Could not construct HEAD_SET_CONTROL_CURSOR value"); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_PRESENT_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_PRESENT_CONTROL_CURSOR, _MODE, _MONO)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_OFFSETS_CURSOR(head, 0), 4); + // The cursor has its own context DMA. + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_OFFSETS_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_OFFSETS_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMAS_CURSOR, _HANDLE, ctxdma)); + // Always set the right cursor context DMA. + // HW will just ignore this if it is not in stereo cursor mode. + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMAS_CURSOR, _HANDLE, ctxdma)); + + switch (pCursorCompParams->blendingMode[1]) { + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + headSetControlCursorValue |= + DRF_DEF(917D, _HEAD_SET_CONTROL_CURSOR, _COMPOSITION, _ALPHA_BLEND); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + headSetControlCursorValue |= + DRF_DEF(917D, _HEAD_SET_CONTROL_CURSOR, _COMPOSITION, _PREMULT_ALPHA_BLEND); + break; + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "%s: composition mode %d not supported for cursor", + __func__, pCursorCompParams->blendingMode[1]); + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, headSetControlCursorValue); +} + +static void EvoSetCursorImage91(NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!((nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)))) { + continue; + } + + /* + * Set up the cursor surface: a cursor surface is allowed only if + * there's a non-NULL ISO ctxdma. + */ + if (pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head] == NULL && + pSurfaceEvo != NULL) { + continue; + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoSetCursorImage(pDevEvo, + head, + pSurfaceEvo, + updateState, + pCursorCompParams); + nvPopEvoSubDevMask(pDevEvo); + } +} + +static NvBool EvoValidateCursorSurface90(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + return nvEvoGetHeadSetControlCursorValue90(pDevEvo, pSurfaceEvo, NULL); +} + +/* + * The 'sourceFetchRect' parameter is ignored by this function because there are + * no format-dependent restrictions for the source fetch rectangle on EVO. + */ +static NvBool EvoValidateWindowFormat90( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + const NvU32 hwFormat = nvHwFormatFromKmsFormat90(format); + + if (hwFormat == 0) { + return FALSE; + } + + if (hwFormatOut != NULL) { + *hwFormatOut = hwFormat; + } + + return TRUE; +} + +static void EvoInitCompNotifier3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvWriteEvoCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_COMPLETION_0 + idx, + DRF_DEF(917D_CORE_NOTIFIER_3, _COMPLETION_0, _DONE, _FALSE)); +} + +static NvBool EvoIsCompNotifierComplete3(NVDispEvoPtr pDispEvo, int idx) { + return nvEvoIsCoreNotifierComplete(pDispEvo, + NV917D_CORE_NOTIFIER_3_COMPLETION_0 + idx, + DRF_BASE(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + DRF_EXTENT(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE); +} + +static void EvoWaitForCompNotifier3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvEvoWaitForCoreNotifier(pDispEvo, NV917D_CORE_NOTIFIER_3_COMPLETION_0 + idx, + DRF_BASE(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + DRF_EXTENT(NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE), + NV917D_CORE_NOTIFIER_3_COMPLETION_0_DONE_TRUE); +} + +static void EvoSetDither91(NVDispEvoPtr pDispEvo, const int head, + const NvBool enabled, const NvU32 type, + const NvU32 algo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 ditherControl; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enabled) { + ditherControl = DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _ENABLE); + + switch (type) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _BITS, _DITHER_TO_6_BITS); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _BITS, _DITHER_TO_8_BITS); + break; + default: + nvAssert(!"Unknown ditherType"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF: + ditherControl = NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE; + break; + } + + } else { + ditherControl = DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _DISABLE); + } + + switch (algo) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_ERR_ACC: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_ERR_ACC); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _TEMPORAL); + break; + default: + nvAssert(!"Unknown DitherAlgo"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN: + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_ERR_ACC: + ditherControl |= + DRF_DEF(917D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_ERR_ACC); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_DITHER_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, ditherControl); +} + +static void EvoSetStallLock94(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enable) { + nvDmaSetStartEvoMethod(pChannel, NV947D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _ENABLE, _TRUE) | + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _MODE, _ONE_SHOT) | + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, _UNSPECIFIED) | + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _UNSTALL_MODE, _LINE_LOCK)); + } else { + nvDmaSetStartEvoMethod(pChannel, NV947D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(947D, _HEAD_SET_STALL_LOCK, _ENABLE, _FALSE)); + } +} + +static NvBool ForceIdleBaseChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NV5070_CTRL_CMD_STOP_BASE_PARAMS stopParams = { }; + NvNotification *pNotifyData = pChannel->notifiersDma[sd].subDeviceAddress[sd]; + NvU64 startTime = 0; + const NvU32 timeout = 2000000; // 2 seconds + NvU32 ret; + + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0); + + pNotifyData->status = NV5070_NOTIFICATION_STATUS_IN_PROGRESS; + + stopParams.base.subdeviceIndex = sd; + stopParams.channelInstance = pChannel->instance; + stopParams.notifyMode = NV5070_CTRL_CMD_STOP_BASE_NOTIFY_MODE_WRITE; + stopParams.hNotifierCtxDma = pChannel->notifiersDma[sd].ctxHandle; + stopParams.offset = 0; + stopParams.hEvent = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_STOP_BASE, + &stopParams, sizeof(stopParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"STOP_BASE failed"); + return FALSE; + } + + do { + if (pNotifyData->status == NV5070_NOTIFICATION_STATUS_DONE_SUCCESS) { + break; + } + + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + nvAssert(!"STOP_BASE timed out"); + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +static NvBool ForceIdleOverlayChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NV5070_CTRL_CMD_STOP_OVERLAY_PARAMS stopParams = { }; + NvNotification *pNotifyData = pChannel->notifiersDma[sd].subDeviceAddress[sd]; + NvU64 startTime = 0; + const NvU32 timeout = 2000000; // 2 seconds + NvU32 ret; + + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0); + + pNotifyData->status = NV5070_NOTIFICATION_STATUS_IN_PROGRESS; + + stopParams.base.subdeviceIndex = sd; + stopParams.channelInstance = pChannel->instance; + stopParams.notifyMode = NV5070_CTRL_CMD_STOP_OVERLAY_NOTIFY_MODE_WRITE; + stopParams.hNotifierCtxDma = pChannel->notifiersDma[sd].ctxHandle; + stopParams.offset = 0; + stopParams.hEvent = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_STOP_OVERLAY, + &stopParams, sizeof(stopParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"STOP_OVERLAY failed"); + return FALSE; + } + + do { + if (pNotifyData->status == NV5070_NOTIFICATION_STATUS_DONE_SUCCESS) { + break; + } + + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + nvAssert(!"STOP_OVERLAY timed out"); + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +static NvBool EvoForceIdleSatelliteChannel90( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState) +{ + NvU32 head, sd; + NvBool ret = TRUE; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + /* + * Forcing a channel to be idle is currently only implemented for + * base. + */ + if ((idleChannelState->subdev[sd].channelMask & + ~(NV_EVO_CHANNEL_MASK_BASE_ALL | + NV_EVO_CHANNEL_MASK_OVERLAY_ALL)) != 0) { + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Forcing channel idle only implemented for base and overlay"); + return FALSE; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVEvoChannelMask thisBaseMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE); + const NVEvoChannelMask thisOverlayMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE); + + if (idleChannelState->subdev[sd].channelMask & + thisBaseMask) { + + NVEvoChannelPtr pBaseChannel = pDevEvo->base[head]; + + if (!ForceIdleBaseChannel(pDevEvo, pBaseChannel, sd)) { + ret = FALSE; + } + } + + if (idleChannelState->subdev[sd].channelMask & + thisOverlayMask) { + + NVEvoChannelPtr pOverlayChannel = pDevEvo->overlay[head]; + + if (!ForceIdleOverlayChannel(pDevEvo, pOverlayChannel, sd)) { + ret = FALSE; + } + } + } + } + + return ret; +} + +static NvBool EvoAllocRmCtrlObject90(NVDevEvoPtr pDevEvo) +{ + /* Nothing to do for pre-nvdisplay */ + return TRUE; +} + +static void EvoFreeRmCtrlObject90(NVDevEvoPtr pDevEvo) +{ + /* Nothing to do for pre-nvdisplay */ +} + +static void EvoSetImmPointOut91(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NVEvoUpdateState *updateState, + NvU16 x, NvU16 y) +{ + GK104DispOverlayImmControlPio *pOverlayImm = + pChannel->imm.u.pio->control[sd]; + + /* The only immediate channel we have is overlay. */ + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_OVERLAY_ALL) != 0); + nvAssert(pChannel->imm.type == NV_EVO_IMM_CHANNEL_PIO); + nvAssert(pOverlayImm != NULL); + + /* Left eye */ + pOverlayImm->SetPointsOut[0] = + DRF_NUM(917B, _SET_POINTS_OUT, _X, x) | + DRF_NUM(917B, _SET_POINTS_OUT, _Y, y); + + pOverlayImm->Update = + DRF_DEF(917B, _UPDATE, _INTERLOCK_WITH_CORE, _DISABLE); +} + +static void EvoStartHeadCRC32Capture90(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NVConnectorEvoPtr pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + NvU32 head, + NvU32 sd, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 dmaCtx = pDma->ctxHandle; + NvU32 orOutput = 0; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_DAC(orIndex); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + if (protocol == NVKMS_PROTOCOL_SOR_DP_A || + protocol == NVKMS_PROTOCOL_SOR_DP_B) { + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SF(head); + } else { + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_SOR(orIndex); + } + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + orOutput = + NV917D_HEAD_SET_CRC_CONTROL_PRIMARY_OUTPUT_PIOR(orIndex); + break; + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CONTEXT_DMA_CRC, _HANDLE, dmaCtx)); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, orOutput) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, _CORE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _TIMESTAMP_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _FLIPLOCK_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); + + /* Reset the CRC notifier */ + nvEvoResetCRC32Notifier(pDma->subDeviceAddress[sd], + NV917D_NOTIFIER_CRC_1_STATUS_0, + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_FALSE); +} + +static void EvoStopHeadCRC32Capture90(NVDevEvoPtr pDevEvo, + NvU32 head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NV917D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _PRIMARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _SECONDARY_OUTPUT, _NONE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, _CORE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _TIMESTAMP_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _FLIPLOCK_MODE, _FALSE) | + DRF_DEF(917D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); +} + +/*! + * Queries the current head's CRC Notifier and returns values if successful + * + * First waits for hardware to finish writing to the CRC32Notifier, + * and performs a read of the Compositor and SF/OR CRCs in numCRC32 frames + * Crc fields in input array crc32 should be calloc'd to 0s. + * + * \param[in] pDevEvo NVKMS device pointer + * \param[in] pDma Pointer to DMA-mapped memory + * \param[in] sd Subdevice index + * \param[in] entry_count Number of independent frames to read CRCs from + * \param[out] crc32 Contains pointers to CRC output arrays + * \param[out] numCRC32 Number of CRC frames successfully read from DMA + * + * \return Returns TRUE if was able to successfully read CRCs from DMA, + * otherwise FALSE + */ +static NvBool EvoQueryHeadCRC32_90(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU32 sd, + NvU32 entry_count, + CRC32NotifierCrcOut *crc32, + NvU32 *numCRC32) +{ + volatile NvU32 *pCRC32Notifier = pDma->subDeviceAddress[sd]; + const NvU32 entry_stride = + NV917D_NOTIFIER_CRC_1_CRC_ENTRY1_8 - NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4; + // Define how many/which variables to read from each CRCNotifierEntry struct + const CRC32NotifierEntryRec field_info[NV_EVO2_NUM_CRC_FIELDS] = { + { + .field_offset = NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3, + .field_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3_COMPOSITOR_CRC), + .field_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_3_COMPOSITOR_CRC), + .field_frame_values = crc32->compositorCrc32 + }, + { + .field_offset = NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4, + .field_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4_PRIMARY_OUTPUT_CRC), + .field_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_CRC_ENTRY0_4_PRIMARY_OUTPUT_CRC), + .field_frame_values = crc32->outputCrc32 + } + }; + const CRC32NotifierEntryFlags flag_info[NV_EVO2_NUM_CRC_FLAGS] = { + { + .flag_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_COUNT), + .flag_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_COUNT), + .flag_type = NVEvoCrc32NotifierFlagCount + }, + { + .flag_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + }, + { + .flag_base_bit = + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + } + }; + + if (!nvEvoWaitForCRC32Notifier(pCRC32Notifier, + NV917D_NOTIFIER_CRC_1_STATUS_0, + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + DRF_EXTENT(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_TRUE)) { + return FALSE; + } + + *numCRC32 = nvEvoReadCRC32Notifier(pCRC32Notifier, + entry_stride, + entry_count, + NV917D_NOTIFIER_CRC_1_STATUS_0, /* Status offset */ + NV_EVO2_NUM_CRC_FIELDS, + NV_EVO2_NUM_CRC_FLAGS, + field_info, + flag_info); + + + nvEvoResetCRC32Notifier(pCRC32Notifier, + NV917D_NOTIFIER_CRC_1_STATUS_0, + DRF_BASE(NV917D_NOTIFIER_CRC_1_STATUS_0_DONE), + NV917D_NOTIFIER_CRC_1_STATUS_0_DONE_FALSE); + + return TRUE; +} + +static void EvoGetScanLine90(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + const void *pDma = pDevEvo->base[head]->pb.control[sd]; + NvU32 scanLine = nvDmaLoadPioMethod(pDma, NV917C_GET_SCANLINE); + + /* + * This method immediately returns the value of the scanline currently being + * read by the DMI. This method is a channel method so it operates + * completely asynchronously from the processing of methods in the + * pushbuffer. A negative value indicate that the DMI is in vertical + * blanking. Note that this is a PIO method that executes immediately. The + * coding of this value is as follows: + * If Line[15] == 0 (positive value) + * then Line[14:0] is the post-aa resolved line currently being read by + * the DMI. + * If Line[15] == 1 (negative value) + * then Line[14:0] is the number of microseconds remaining in the vertical + * blanking interval. + * Examples: + * Line = 0x0192 - DMI is reading line 402 of the current buffer. + * Line = 0x8023 - DMI is 35 uS from the end of vertical blanking. + */ + + if ((scanLine & NVBIT(15)) == 0) { + *pInBlankingPeriod = FALSE; + *pScanLine = scanLine & DRF_MASK(14:0); + } else { + *pInBlankingPeriod = TRUE; + } +} + +static NvU32 EvoGetActiveViewportOffset94(NVDispEvoRec *pDispEvo, NvU32 head) +{ + NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE_PARAMS params = {0}; + NvU32 ret; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.base.subdeviceIndex = pDispEvo->displayOwner; + params.head = head; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_ACTIVE_VIEWPORT_BASE, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to query active viewport offset"); + } + + return params.activeViewportBase; +} + +static void +EvoClearSurfaceUsage91(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfaceEvo) +{ + NvU32 sd; + NvBool kickOff = FALSE; + NVEvoUpdateState updateState = { 0 }; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + const struct NvKmsCscMatrix zeroCscMatrix = { 0 }; + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + + /* + * In background, if the given surface is used for the core surface + * programming to satisfy the EVO hardware constraints then clear + * that usage. Reuse the client specified base surface for the core + * channel programming. + */ + if (pSurfaceEvo != + pDevEvo->pSubDevices[sd]->pCoreChannelSurface[head]) { + continue; + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + EvoPushSetCoreSurfaceMethodsForOneSd(pDevEvo, sd, head, + pSdHeadState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT], + &zeroCscMatrix, &updateState); + nvPopEvoSubDevMask(pDevEvo); + kickOff = TRUE; + } + } + + if (kickOff) { + EvoUpdate91(pDevEvo, &updateState, TRUE /* releaseElv */); + } +} + +static NvBool EvoComputeWindowScalingTaps91(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState) +{ + /* Window scaling isn't supported on EVO. */ + if ((pHwState->sizeIn.width != pHwState->sizeOut.width) || + (pHwState->sizeIn.height != pHwState->sizeOut.height)) + { + return FALSE; + } + + pHwState->hTaps = NV_EVO_SCALER_1TAP; + pHwState->vTaps = NV_EVO_SCALER_1TAP; + + return TRUE; +} + +static NvU32 GetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NV5070_CTRL_GET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL); + params.channelInstance = pChannel->instance; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_GET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to retrieve accelerators"); + return 0; + } + + return params.accelerators; +} + +static NvBool SetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NvU32 accelerators, + NvU32 accelMask) +{ + NV5070_CTRL_SET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL); + params.channelInstance = pChannel->instance; + params.accelerators = accelerators; + params.accelMask = accelMask; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set accelerators"); + return FALSE; + } + + return TRUE; +} + +static void EvoAccelerateChannel91(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 *pOldAccelerators) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NV5070_CTRL_ACCL_IGNORE_PI | + NV5070_CTRL_ACCL_SKIP_SEMA | + NV5070_CTRL_ACCL_IGNORE_FLIPLOCK; + + *pOldAccelerators = GetAccelerators(pDevEvo, pChannel, sd); + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, accelMask, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +static void EvoResetChannelAccelerators91(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 oldAccelerators) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NV5070_CTRL_ACCL_IGNORE_PI | + NV5070_CTRL_ACCL_SKIP_SEMA | + NV5070_CTRL_ACCL_IGNORE_FLIPLOCK; + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, oldAccelerators, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +NVEvoHAL nvEvo94 = { + EvoSetRasterParams91, /* SetRasterParams */ + EvoSetProcAmp90, /* SetProcAmp */ + EvoSetHeadControl90, /* SetHeadControl */ + EvoSetHeadRefClk90, /* SetHeadRefClk */ + EvoHeadSetControlOR90, /* HeadSetControlOR */ + EvoORSetControl90, /* ORSetControl */ + EvoHeadSetDisplayId90, /* HeadSetDisplayId */ + EvoSetUsageBounds90, /* SetUsageBounds */ + EvoUpdate91, /* Update */ + nvEvo1IsModePossible, /* IsModePossible */ + nvEvo1PrePostIMP, /* PrePostIMP */ + EvoSetNotifier90, /* SetNotifier */ + EvoGetCapabilities90, /* GetCapabilities */ + EvoFlip90, /* Flip */ + EvoFlipTransitionWAR90, /* FlipTransitionWAR */ + EvoFillLUTSurface90, /* FillLUTSurface */ + EvoSetLUTContextDma90, /* SetLUTContextDma */ + EvoSetOutputScaler90, /* SetOutputScaler */ + EvoSetViewportPointIn90, /* SetViewportPointIn */ + EvoSetViewportInOut90, /* SetViewportInOut */ + EvoSetCursorImage91, /* SetCursorImage */ + EvoValidateCursorSurface90, /* ValidateCursorSurface */ + EvoValidateWindowFormat90, /* ValidateWindowFormat */ + EvoInitCompNotifier3, /* InitCompNotifier */ + EvoIsCompNotifierComplete3, /* IsCompNotifierComplete */ + EvoWaitForCompNotifier3, /* WaitForCompNotifier */ + EvoSetDither91, /* SetDither */ + EvoSetStallLock94, /* SetStallLock */ + NULL, /* SetDisplayRate */ + EvoInitChannel90, /* InitChannel */ + NULL, /* InitDefaultLut */ + EvoInitWindowMapping90, /* InitWindowMapping */ + nvEvo1IsChannelIdle, /* IsChannelIdle */ + nvEvo1IsChannelMethodPending, /* IsChannelMethodPending */ + EvoForceIdleSatelliteChannel90, /* ForceIdleSatelliteChannel */ + EvoForceIdleSatelliteChannel90, /* ForceIdleSatelliteChannelIgnoreLock */ + EvoAccelerateChannel91, /* AccelerateChannel */ + EvoResetChannelAccelerators91, /* ResetChannelAccelerators */ + EvoAllocRmCtrlObject90, /* AllocRmCtrlObject */ + EvoFreeRmCtrlObject90, /* FreeRmCtrlObject */ + EvoSetImmPointOut91, /* SetImmPointOut */ + EvoStartHeadCRC32Capture90, /* StartCRC32Capture */ + EvoStopHeadCRC32Capture90, /* StopCRC32Capture */ + EvoQueryHeadCRC32_90, /* QueryCRC32 */ + EvoGetScanLine90, /* GetScanLine */ + NULL, /* ConfigureVblankSyncObject */ + nvEvo1SetDscParams, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + EvoGetActiveViewportOffset94, /* GetActiveViewportOffset */ + EvoClearSurfaceUsage91, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTaps91, /* ComputeWindowScalingTaps */ + NULL, /* GetWindowScalingCaps */ + { /* caps */ + FALSE, /* supportsNonInterlockedUsageBoundsUpdate */ + FALSE, /* supportsDisplayRate */ + TRUE, /* supportsFlipLockRGStatus */ + FALSE, /* needDefaultLutSurface */ + FALSE, /* hasUnorm10OLUT */ + TRUE, /* supportsDigitalVibrance */ + TRUE, /* supportsImageSharpening */ + FALSE, /* supportsHDMIVRR */ + TRUE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + TRUE, /* supportsSetStorageMemoryLayout */ + FALSE, /* supportsIndependentAcqRelSemaphore */ + TRUE, /* supportsCoreLut */ + FALSE, /* supportsSynchronizedOverlayPositionUpdate */ + FALSE, /* supportsVblankSyncObjects */ + TRUE, /* requiresScalingTapsInBothDimensions */ + NV_EVO2_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NV5070_CTRL_CMD_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_1TAP, /* minScalerTaps */ + }, +}; diff --git a/src/nvidia-modeset/src/nvkms-evo3.c b/src/nvidia-modeset/src/nvkms-evo3.c new file mode 100644 index 000000000..76458f06d --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-evo3.c @@ -0,0 +1,6965 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file contains implementations of the EVO HAL methods for display class + * 3.x (also known as "nvdisplay"). + */ + +#include "nvkms-dma.h" +#include "nvkms-types.h" +#include "nvkms-rmapi.h" +#include "nvkms-surface.h" +#include "nvkms-softfloat.h" +#include "nvkms-evo.h" +#include "nvkms-evo1.h" +#include "nvkms-modeset-types.h" +#include "nvkms-prealloc.h" +#include "nv-float.h" + +#include + +#include // NVC372_DISPLAY_SW +#include // NVC373_DISP_CAPABILITIES +#include // NVC37B_WINDOW_IMM_CHANNEL_DMA +#include // NVC37D_CORE_CHANNEL_DMA +#include // NVC37D_NOTIFIER_CRC +#include // NVC37D_HEAD_SET_SW_SPARE_* +#include // NVC37E_WINDOW_CHANNEL_DMA +#include // NVC573_DISP_CAPABILITIES +#include // NVC57D_CORE_CHANNEL_DMA +#include // NVC57E_WINDOW_CHANNEL_DMA +#include +#include // NVC673_DISP_CAPABILITIES +#include // NVC67D_CORE_CHANNEL_DMA +#include // NVC67E_WINDOW_CHANNEL_DMA + +#include +#include +#include + +/** Number of CRCs supported by hardware on NVC37D hardware (SF/SOR, Comp, RG) */ +#define NV_EVO3_NUM_CRC_FIELDS 3 + +/** Number of CRCs supported by hardware on NVC37D hardware SF/SOR, Comp, RG Ovf and Count */ +#define NV_EVO3_NUM_CRC_FLAGS 4 + +static NvBool EvoIsChannelIdleC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result); + +static void SetCsc00MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix); +static void SetCsc11MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix); +static void +UpdateCompositionC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState); +static void +UpdateCompositionC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + +ct_assert(NV_EVO_LOCK_PIN_0 > + NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK__SIZE_1); + +/* nvdisplay has a maximum of 2 eyes and 3 planes per surface */ +ct_assert((NVKMS_MAX_EYES * NVKMS_MAX_PLANES_PER_SURFACE) == 6); + +#define NV_EVO3_SUPPORTED_DITHERING_MODES \ + ((1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_AUTO) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_DYNAMIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_STATIC_2X2) | \ + (1 << NV_KMS_DPY_ATTRIBUTE_REQUESTED_DITHERING_MODE_TEMPORAL)) + +#define NV_EVO3_SUPPORTED_CURSOR_COMP_BLEND_MODES \ + ((1 << NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA)) + +/* Windows support all composition modes. */ +#define NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES \ + ((1 << NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA) | \ + (1 << NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA)) + +#define NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C3 \ + (DRF_DEF(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_LUT, _USAGE_1025) | \ + DRF_DEF(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2) | \ + DRF_DEF(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE)) + +#define NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5 \ + (DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _ILUT_ALLOWED, _TRUE) | \ + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _TMO_LUT_ALLOWED, _FALSE)) + +static inline NvU8 EyeAndPlaneToCtxDmaIdx(const NvU8 eye, const NvU8 plane) +{ + /* + * See the definition of the SetContextDmaIso and SetOffset methods in the + * relevant nvdClass_01.mfs file to see how these method array indices are + * mapped. + */ + nvAssert((eye < NVKMS_MAX_EYES) && (plane < NVKMS_MAX_PLANES_PER_SURFACE)); + + return eye + (plane << 1); +} + +static void InitChannelCapsC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel) +{ + if ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0) { + static const NVEvoChannelCaps WindowCaps = { + /* + * Window classes always support timestamp flips, and allow full + * use of the 64-bit timestamp value. + */ + .validTimeStampBits = 64, + /* Window classes always support tearing flips. */ + .tearingFlips = TRUE, + .vrrTearingFlips = TRUE, + /* Window classes support per-eye stereo flips. */ + .perEyeStereoFlips = TRUE, + }; + + pChannel->caps = WindowCaps; + } +} + +// HW supports ratio = 1, 2 (downscaling), 4 (downscaling) +#define NUM_SCALER_RATIOS 3 + +// There are 16 phases stored in matrix, but HW can derive the values of phase +// +16 and -16 from phase 0. Therefore, SW loads phase +16/-16 in phase 0 coeff +// values. +// coeff values in phase 0. +#define NUM_TAPS5_COEFF_PHASES 16 + +// There are 5 coefficient values per phase (or matrix row), but SW doesn't need +// to upload c2. So, the value here is set to 4. +#define NUM_TAPS5_COEFF_VALUES 4 + +// The coefficient values are obtained from bug 1953108 comment 10 +// Per MFS: However since all 5 coefficients have to add up to 1.0, only 4 need to be specified, and +// HW can derive the missing one. The center coefficient is the one that is left out, so +// if the 5 taps need weights (c0, c1, c2, c3, c4) then only (c0, c1, c3, c4) are stored, +// and c2 is calculated by HW. +// Phase 0 is the center phase and the corresponding filter kernel is symmetrical: +// c0=c4, c1=c3 --> only c0 and c1 need to be stored. +// Phase 16 (and -16) is the edge phase and the corresponding filter kernels are: +// (0, c0, c1, c1, c0) for phase +16 +// (c0, c1, c1, c0, 0) for phase -16 +// The difference between +16 and -16 is automatically handled by HW. The table only needs +// to store c0 and c1 for either case. +// Therefore, based on MFS above, the matrix below contains the values loaded to HW. +// Real Phase 0 is commented for easy reference. +// Also, phase 16 values (last row) are commented, but its C0,C1 values are loaded in row 0/phase 0. +static const NvU32 scalerTaps5Coeff[NUM_SCALER_RATIOS][NUM_TAPS5_COEFF_PHASES][NUM_TAPS5_COEFF_VALUES] = +{ + // ratio = 1 + {{ 0 , 0 , -16 , 144}, // real phase 0:{ 0, 0, /*256,*/ 0, 0 }, + { 0 , -5 , /*255,*/ 5 , 0}, + { 0 , -9 , /*254,*/ 11 , 0}, + { -1 , -12 , /*251,*/ 18 , -1}, + { -1 , -15 , /*248,*/ 25 , -1}, + { -1 , -18 , /*243,*/ 33 , -2}, + { -2 , -20 , /*238,*/ 42 , -3}, + { -2 , -21 , /*232,*/ 51 , -3}, + { -3 , -22 , /*225,*/ 60 , -5}, + { -3 , -22 , /*217,*/ 70 , -6}, + { -4 , -22 , /*208,*/ 81 , -7}, + { -4 , -22 , /*199,*/ 91 , -9}, + { -5 , -21 , /*190,*/ 102 , -10}, + { -5 , -20 , /*180,*/ 113 , -12}, + { -5 , -19 , /*169,*/ 125 , -13}, + { -6 , -18 , /*158,*/ 136 , -15} + // real phase 16: { 0 , -16 , 144, 144 , -16 } + }, + // ratio = 2 + {{ 3, 60 , 20 , 108 }, // real phase 0: {3 , 60 , 130 , 60 , 3 }, + { 3 , 57 , /*130,*/ 63 , 4 }, + { 2 , 54 , /*130,*/ 66 , 4 }, + { 2 , 51 , /*129,*/ 69 , 5 }, + { 2 , 48 , /*128,*/ 72 , 6 }, + { 1 , 45 , /*128,*/ 75 , 7 }, + { 1 , 43 , /*127,*/ 78 , 7 }, + { 1 , 40 , /*125,*/ 81 , 8 }, + { 1 , 37 , /*124,*/ 84 , 9 }, + { 0 , 35 , /*122,*/ 88 , 10 }, + { 0 , 33 , /*121,*/ 91 , 12 }, + { 0 , 30 , /*119,*/ 94 , 13 }, + { 0 , 28 , /*117,*/ 97 , 14 }, + { 0 , 26 , /*115,*/ 99 , 16 }, + { 0 , 24 , /*112,*/ 102 , 17 }, + { 0 , 22 , /*110,*/ 105 , 19 }, + // real phase 16:{0 , 20 , 108 , 108 , 20 }, + }, + // ratio = 4 + {{ 4 , 62 , 23 , 105 }, // real phase 0: {4 , 62 , 124 , 62 , 4 , + { 4 , 59 , /*124,*/ 64 , 5 }, + { 3 , 56 , /*124,*/ 67 , 6 }, + { 3 , 53 , /*123,*/ 70 , 7 }, + { 2 , 51 , /*123,*/ 73 , 8 }, + { 2 , 48 , /*122,*/ 76 , 8 }, + { 2 , 45 , /*121,*/ 79 , 9 }, + { 1 , 43 , /*120,*/ 81 , 10 }, + { 1 , 40 , /*119,*/ 84 , 12 }, + { 1 , 38 , /*117,*/ 87 , 13 }, + { 1 , 36 , /*116,*/ 90 , 14 }, + { 0 , 34 , /*114,*/ 92 , 15 }, + { 0 , 31 , /*113,*/ 95 , 17 }, + { 0 , 29 , /*111,*/ 97 , 18 }, + { 0 , 27 , /*109,*/ 100 , 20 }, + { 0 , 25 , /*107,*/ 102 , 22 }, + // real phase 16: {0 , 23 , 105 , 105 , 23 }, + } +}; + +static void InitScalerCoefficientsPrecomp5(NVEvoChannelPtr pChannel, + NvU32 coeff, NvU32 index) +{ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_INPUT_SCALER_COEFF_VALUE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_INPUT_SCALER_COEFF_VALUE, _DATA, coeff) | + DRF_NUM(C57E, _SET_INPUT_SCALER_COEFF_VALUE, _INDEX, index)); +} + +static void InitScalerCoefficientsPostcomp5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 coeff, NvU32 index) +{ + NvU32 h; + + for (h = 0; h < pDevEvo->numHeads; h++) { + nvDmaSetStartEvoMethod(pChannel, + NVC57D_HEAD_SET_OUTPUT_SCALER_COEFF_VALUE(h), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_OUTPUT_SCALER_COEFF_VALUE, _DATA, coeff) | + DRF_NUM(C57D, _HEAD_SET_OUTPUT_SCALER_COEFF_VALUE, _INDEX, index)); + } +} + +static void InitTaps5ScalerCoefficientsC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvBool isPrecomp) +{ + NvU8 ratio; + + if (isPrecomp) { + const NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + const NVEvoScalerCaps *pScalerCaps = &pWinCaps->scalerCaps; + + if (!pScalerCaps->present) { + return; + } + } + + for (ratio = 0; ratio < NUM_SCALER_RATIOS; ratio++) { + NvU8 phase; + for (phase = 0; phase < NUM_TAPS5_COEFF_PHASES; phase++) { + NvU8 coeffIdx; + for (coeffIdx = 0; coeffIdx < NUM_TAPS5_COEFF_VALUES; coeffIdx++) { + NvU32 coeff = scalerTaps5Coeff[ratio][phase][coeffIdx]; + NvU32 index = ratio << 6 | phase << 2 | coeffIdx; + + if (isPrecomp) { + InitScalerCoefficientsPrecomp5(pChannel, coeff, index); + } else { + InitScalerCoefficientsPostcomp5(pDevEvo, + pChannel, coeff, index); + } + } + } + } +} + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix Rec709RGBToLMS = {{ + { 0x4bb8, 0x9f84, 0x14c8, 0 }, + { 0x27fc, 0xba2c, 0x1dd4, 0 }, + { 0x8fc, 0x2818, 0xcef0, 0 }, +}}; + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix LMSToRec709RGB = {{ + { 0x62c48, 0x1aadf4, 0x25a8, 0 }, + { 0x1ead18, 0x28f64, 0x1fc390, 0 }, + { 0x1ffd00, 0x1fbc34, 0x146c4, 0 }, +}}; + +/* + * This is a 3x4 matrix with S5.14 coefficients (truncated from S5.16 + * SW-specified values). + */ +static const struct NvKmsCscMatrix LMSToRec2020RGB = {{ + { 0x36fc0, 0x1d7e54, 0x11e0, 0 }, + { 0x1f3584, 0x1fbc8, 0x1fcebc, 0 }, + { 0x1ff964, 0x1fe6a4, 0x11ff4, 0 }, +}}; + +/* + * The two arrays below specify the PQ OETF transfer function that's used to + * convert from linear LMS FP16 to PQ encoded L'M'S' fixed-point. + */ +static const NvU32 OetfPQ512SegSizesLog2[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, + 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, + 5, +}; + +static const NvU16 OetfPQ512Entries[] = { + 0x0000, 0x000C, 0x0014, 0x001C, 0x0028, 0x003C, 0x005C, 0x008C, 0x00D0, 0x0134, 0x0184, 0x01C8, 0x0238, 0x029C, 0x033C, 0x03C4, + 0x043C, 0x04A4, 0x0504, 0x0560, 0x0600, 0x0690, 0x0714, 0x078C, 0x07FC, 0x0864, 0x08C8, 0x0924, 0x0980, 0x09D4, 0x0A24, 0x0A70, + 0x0B04, 0x0B90, 0x0C10, 0x0C88, 0x0CFC, 0x0D68, 0x0DD4, 0x0E38, 0x0EF4, 0x0FA4, 0x1048, 0x10E4, 0x1174, 0x1200, 0x1284, 0x1304, + 0x13F4, 0x14D0, 0x159C, 0x165C, 0x1714, 0x17C0, 0x1864, 0x1900, 0x1A28, 0x1B34, 0x1C30, 0x1D1C, 0x1DFC, 0x1ECC, 0x1F94, 0x2050, + 0x2104, 0x21B0, 0x2258, 0x22F8, 0x2390, 0x2424, 0x24B4, 0x2540, 0x25C4, 0x2648, 0x26C4, 0x2740, 0x27B8, 0x282C, 0x289C, 0x290C, + 0x29E0, 0x2AAC, 0x2B70, 0x2C2C, 0x2CE0, 0x2D90, 0x2E38, 0x2ED8, 0x2F74, 0x300C, 0x30A0, 0x3130, 0x31BC, 0x3244, 0x32C8, 0x3348, + 0x3440, 0x352C, 0x360C, 0x36E4, 0x37B4, 0x387C, 0x393C, 0x39F8, 0x3AA8, 0x3B58, 0x3C00, 0x3CA4, 0x3D44, 0x3DDC, 0x3E74, 0x3F04, + 0x401C, 0x4128, 0x4228, 0x431C, 0x4408, 0x44E8, 0x45C4, 0x4694, 0x475C, 0x4820, 0x48DC, 0x4994, 0x4A48, 0x4AF4, 0x4B9C, 0x4C3C, + 0x4D78, 0x4EA0, 0x4FBC, 0x50CC, 0x51D0, 0x52CC, 0x53BC, 0x54A0, 0x5580, 0x5658, 0x5728, 0x57F0, 0x58B4, 0x5974, 0x5A2C, 0x5ADC, + 0x5C34, 0x5D7C, 0x5EB4, 0x5FDC, 0x60F4, 0x6204, 0x630C, 0x6404, 0x64F8, 0x65E0, 0x66C4, 0x679C, 0x6870, 0x693C, 0x6A04, 0x6AC4, + 0x6C38, 0x6D94, 0x6EE4, 0x7020, 0x7150, 0x7274, 0x738C, 0x7498, 0x7598, 0x7694, 0x7784, 0x786C, 0x794C, 0x7A24, 0x7AF8, 0x7BC4, + 0x7D50, 0x7EC4, 0x8024, 0x8174, 0x82B4, 0x83E8, 0x850C, 0x8628, 0x8738, 0x883C, 0x8938, 0x8A2C, 0x8B18, 0x8BFC, 0x8CD8, 0x8DB0, + 0x8F4C, 0x90D0, 0x9240, 0x939C, 0x94EC, 0x962C, 0x975C, 0x9880, 0x999C, 0x9AAC, 0x9BB0, 0x9CAC, 0x9DA0, 0x9E8C, 0x9F70, 0xA04C, + 0xA1F4, 0xA384, 0xA500, 0xA664, 0xA7BC, 0xA904, 0xAA3C, 0xAB6C, 0xAC8C, 0xADA0, 0xAEAC, 0xAFAC, 0xB0A4, 0xB194, 0xB27C, 0xB360, + 0xB510, 0xB6A4, 0xB824, 0xB994, 0xBAF0, 0xBC3C, 0xBD78, 0xBEA8, 0xBFCC, 0xC0E4, 0xC1F0, 0xC2F4, 0xC3F0, 0xC4E4, 0xC5CC, 0xC6B0, + 0xC78C, 0xC860, 0xC930, 0xC9F8, 0xCABC, 0xCB7C, 0xCC38, 0xCCEC, 0xCD9C, 0xCE48, 0xCEF0, 0xCF94, 0xD034, 0xD0D4, 0xD16C, 0xD200, + 0xD294, 0xD324, 0xD3B4, 0xD43C, 0xD4C4, 0xD54C, 0xD5CC, 0xD650, 0xD6CC, 0xD748, 0xD7C4, 0xD83C, 0xD8B0, 0xD924, 0xD994, 0xDA08, + 0xDAE0, 0xDBB4, 0xDC84, 0xDD4C, 0xDE10, 0xDECC, 0xDF84, 0xE038, 0xE0E8, 0xE194, 0xE238, 0xE2DC, 0xE37C, 0xE418, 0xE4B0, 0xE544, + 0xE5D4, 0xE664, 0xE6F0, 0xE778, 0xE800, 0xE884, 0xE904, 0xE984, 0xEA00, 0xEA7C, 0xEAF4, 0xEB68, 0xEBDC, 0xEC50, 0xECC0, 0xED30, + 0xEE08, 0xEED8, 0xEFA4, 0xF068, 0xF128, 0xF1E4, 0xF298, 0xF348, 0xF3F4, 0xF49C, 0xF540, 0xF5E0, 0xF67C, 0xF714, 0xF7A8, 0xF83C, + 0xF8CC, 0xF958, 0xF9E0, 0xFA68, 0xFAEC, 0xFB6C, 0xFBE8, 0xFC64, 0xFCE0, 0xFD58, 0xFDCC, 0xFE40, 0xFEB4, 0xFF24, 0xFF90, 0xFFFC, +}; + +/* + * The two arrays below specify the PQ EOTF transfer function that's used to + * convert from PQ encoded L'M'S' fixed-point to linear LMS FP16. This transfer + * function is the inverse of the OETF curve. + */ +static const NvU32 EotfPQ512SegSizesLog2[] = { + 6, 6, 4, 4, 4, 3, 4, 3, 3, 3, 2, 2, 2, 3, 3, 2, + 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 6, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 1, 2, + 2, 1, 1, 2, 2, 2, 2, 1, 2, 1, 1, 2, 1, 4, 2, 2, +}; + +static const NvU16 EotfPQ512Entries[] = { + 0x0000, 0x0001, 0x0003, 0x0005, 0x0008, 0x000C, 0x0011, 0x0016, 0x001B, 0x0022, 0x0028, 0x002F, 0x0037, 0x003F, 0x0048, 0x0051, + 0x005A, 0x0064, 0x006F, 0x007A, 0x0085, 0x0091, 0x009E, 0x00AB, 0x00B8, 0x00C6, 0x00D4, 0x00E3, 0x00F3, 0x0102, 0x0113, 0x0123, + 0x0135, 0x0146, 0x0158, 0x016B, 0x017E, 0x0192, 0x01A6, 0x01BB, 0x01D0, 0x01E5, 0x01FC, 0x0212, 0x0229, 0x0241, 0x0259, 0x0272, + 0x028B, 0x02A4, 0x02BE, 0x02D9, 0x02F4, 0x0310, 0x032C, 0x0349, 0x0366, 0x0384, 0x03A2, 0x03C1, 0x03E0, 0x0400, 0x0421, 0x0442, + 0x0463, 0x0485, 0x04A8, 0x04CB, 0x04EF, 0x0513, 0x0538, 0x055D, 0x0583, 0x05AA, 0x05D1, 0x05F9, 0x0621, 0x064A, 0x0673, 0x069D, + 0x06C7, 0x06F3, 0x071E, 0x074B, 0x0777, 0x07A5, 0x07D3, 0x0801, 0x0819, 0x0830, 0x0849, 0x0861, 0x087A, 0x0893, 0x08AD, 0x08C7, + 0x08E1, 0x08FB, 0x0916, 0x0931, 0x094C, 0x0968, 0x0984, 0x09A0, 0x09BD, 0x09DA, 0x09F7, 0x0A15, 0x0A33, 0x0A51, 0x0A70, 0x0A8F, + 0x0AAE, 0x0ACE, 0x0AEE, 0x0B0E, 0x0B2F, 0x0B50, 0x0B71, 0x0B93, 0x0BB5, 0x0BD7, 0x0BFA, 0x0C0F, 0x0C20, 0x0C32, 0x0C44, 0x0C56, + 0x0C69, 0x0CB5, 0x0D03, 0x0D55, 0x0DA9, 0x0E01, 0x0E5B, 0x0EB9, 0x0F1B, 0x0F7F, 0x0FE7, 0x1029, 0x1061, 0x109A, 0x10D5, 0x1111, + 0x1150, 0x1190, 0x11D3, 0x1217, 0x125E, 0x12A6, 0x12F0, 0x133D, 0x138B, 0x13DC, 0x1417, 0x1442, 0x146D, 0x149A, 0x14C8, 0x14F7, + 0x1527, 0x1558, 0x158B, 0x15BF, 0x15F4, 0x162A, 0x1662, 0x169B, 0x16D5, 0x1711, 0x174E, 0x178C, 0x17CC, 0x1806, 0x1828, 0x184A, + 0x186D, 0x18B4, 0x18FF, 0x194D, 0x199E, 0x19F3, 0x1A4B, 0x1AA7, 0x1B06, 0x1B37, 0x1B69, 0x1B9B, 0x1BCF, 0x1C02, 0x1C1D, 0x1C38, + 0x1C54, 0x1C70, 0x1C8D, 0x1CAB, 0x1CC9, 0x1CE7, 0x1D06, 0x1D26, 0x1D46, 0x1D88, 0x1DCC, 0x1E13, 0x1E5C, 0x1EA8, 0x1EF6, 0x1F47, + 0x1F9A, 0x1FF1, 0x2025, 0x2053, 0x2082, 0x20B3, 0x20E6, 0x211A, 0x214F, 0x2187, 0x21C0, 0x21FA, 0x2237, 0x2275, 0x22B5, 0x22F7, + 0x233B, 0x23C9, 0x2430, 0x247F, 0x24D3, 0x252B, 0x2589, 0x25EB, 0x2653, 0x26C1, 0x2734, 0x27AD, 0x2817, 0x2838, 0x285A, 0x287C, + 0x28A0, 0x28C5, 0x28EA, 0x2911, 0x2938, 0x2960, 0x298A, 0x29B4, 0x29DF, 0x2A0C, 0x2A39, 0x2A68, 0x2A98, 0x2AFA, 0x2B62, 0x2BCE, + 0x2C20, 0x2C5B, 0x2C99, 0x2CDA, 0x2D1E, 0x2D65, 0x2DB0, 0x2DFD, 0x2E4E, 0x2EA3, 0x2EFC, 0x2F58, 0x2FB8, 0x300E, 0x3043, 0x307A, + 0x30B3, 0x30D0, 0x30EE, 0x310D, 0x312C, 0x314C, 0x316D, 0x318E, 0x31B0, 0x31D3, 0x31F6, 0x321A, 0x323F, 0x3265, 0x328B, 0x32B2, + 0x32DA, 0x332D, 0x3383, 0x33DC, 0x341D, 0x344D, 0x347F, 0x34B4, 0x34EA, 0x3523, 0x355E, 0x359B, 0x35DB, 0x361D, 0x3662, 0x36A9, + 0x36F3, 0x3740, 0x3791, 0x37E4, 0x381D, 0x384A, 0x3879, 0x38A9, 0x38DB, 0x3910, 0x3946, 0x397E, 0x39B8, 0x39F5, 0x3A34, 0x3A75, + 0x3AB9, 0x3AFF, 0x3B48, 0x3B94, 0x3BE2, 0x3C1A, 0x3C44, 0x3C70, 0x3C9D, 0x3CA0, 0x3CA3, 0x3CA6, 0x3CA9, 0x3CAC, 0x3CAF, 0x3CB1, + 0x3CB4, 0x3CB7, 0x3CBA, 0x3CBD, 0x3CC0, 0x3CC3, 0x3CC6, 0x3CC9, 0x3CCC, 0x3CCF, 0x3CD2, 0x3CD5, 0x3CD8, 0x3CDB, 0x3CDE, 0x3CE1, + 0x3CE4, 0x3CE7, 0x3CEA, 0x3CEE, 0x3CF1, 0x3CF4, 0x3CF7, 0x3CFA, 0x3CFD, 0x3D00, 0x3D03, 0x3D06, 0x3D09, 0x3D0D, 0x3D10, 0x3D13, + 0x3D16, 0x3D19, 0x3D1C, 0x3D20, 0x3D23, 0x3D26, 0x3D29, 0x3D2C, 0x3D30, 0x3D33, 0x3D36, 0x3D39, 0x3D3D, 0x3D40, 0x3D43, 0x3D46, + 0x3D4A, 0x3D4D, 0x3D50, 0x3D54, 0x3D57, 0x3D5A, 0x3D5D, 0x3D61, 0x3D64, 0x3D9B, 0x3DD3, 0x3E0D, 0x3E4A, 0x3E89, 0x3ECA, 0x3F0E, + 0x3F54, 0x3F9C, 0x3FE8, 0x401B, 0x4043, 0x406D, 0x4099, 0x40C6, 0x40F4, 0x4124, 0x4156, 0x418A, 0x41C0, 0x41F8, 0x4232, 0x426D, + 0x42AB, 0x42EB, 0x432E, 0x4373, 0x43BA, 0x4428, 0x4479, 0x44D0, 0x452D, 0x4591, 0x45FC, 0x466F, 0x46EB, 0x472C, 0x476F, 0x47B5, + 0x47FE, 0x4824, 0x484B, 0x4874, 0x489D, 0x48F5, 0x4954, 0x4986, 0x49B9, 0x49EF, 0x4A26, 0x4A5F, 0x4A9B, 0x4AD9, 0x4B19, 0x4B9F, + 0x4C18, 0x4C66, 0x4CBA, 0x4CE6, 0x4D13, 0x4D43, 0x4D74, 0x4DA7, 0x4DDC, 0x4E12, 0x4E4B, 0x4E86, 0x4EC3, 0x4F02, 0x4F44, 0x4F88, + 0x4FCE, 0x500C, 0x5032, 0x5082, 0x50D8, 0x5106, 0x5135, 0x5166, 0x5199, 0x5205, 0x5278, 0x52F5, 0x537C, 0x53C3, 0x5406, 0x542D, + 0x5454, 0x54A9, 0x5503, 0x550F, 0x551B, 0x5527, 0x5533, 0x5540, 0x554C, 0x5559, 0x5565, 0x5572, 0x557F, 0x558C, 0x5599, 0x55A7, + 0x55B4, 0x55C1, 0x55CF, 0x5607, 0x5641, 0x567E, 0x56BC, 0x56FE, 0x5741, 0x5788, 0x57D1, +}; + +static void InitCsc0LUT(NVEvoChannelPtr pChannel, + const NvU32 *pSegmentSizes, NvU32 numSegmentSizes, + const NvU16 *pLUTEntries, NvU32 numEntries) +{ + NvU32 i; + + for (i = 0; i < numSegmentSizes; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC0LUT_SEGMENT_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC0LUT_SEGMENT_SIZE, _IDX, i) | + DRF_NUM(C57E, _SET_CSC0LUT_SEGMENT_SIZE, _VALUE, pSegmentSizes[i])); + } + + for (i = 0; i < numEntries; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC0LUT_ENTRY, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC0LUT_ENTRY, _IDX, i) | + DRF_NUM(C57E, _SET_CSC0LUT_ENTRY, _VALUE, pLUTEntries[i])); + } +} + +static void InitCsc1LUT(NVEvoChannelPtr pChannel, + const NvU32 *pSegmentSizes, NvU32 numSegmentSizes, + const NvU16 *pLUTEntries, NvU32 numEntries) +{ + NvU32 i; + + for (i = 0; i < numSegmentSizes; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC1LUT_SEGMENT_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC1LUT_SEGMENT_SIZE, _IDX, i) | + DRF_NUM(C57E, _SET_CSC1LUT_SEGMENT_SIZE, _VALUE, pSegmentSizes[i])); + } + + for (i = 0; i < numEntries; i++) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC1LUT_ENTRY, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CSC1LUT_ENTRY, _IDX, i) | + DRF_NUM(C57E, _SET_CSC1LUT_ENTRY, _VALUE, pLUTEntries[i])); + } +} + +static void ConfigureCsc0C5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvBool enable) +{ + NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + struct NvKmsCscMatrix matrix = { }; + NvU32 lutData = 0; + NvU32 csc01Data = 0; + + if (!pWinCaps->csc0MatricesPresent) { + return; + } + + if (enable) { + matrix = Rec709RGBToLMS; + + lutData |= DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _ENABLE, _ENABLE); + + csc01Data |= DRF_DEF(C57E, _SET_CSC01CONTROL, _ENABLE, _ENABLE); + } else { + matrix = NVKMS_IDENTITY_CSC_MATRIX; + + lutData |= DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _INTERPOLATE, _DISABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC0LUT_CONTROL, _ENABLE, _DISABLE); + + csc01Data |= DRF_DEF(C57E, _SET_CSC01CONTROL, _ENABLE, _DISABLE); + } + + /* Linear RGB FP16 -> Linear LMS FP16 */ + SetCsc00MatrixC5(pChannel, &matrix); + + /* Linear LMS FP16 -> PQ encoded L'M'S' fixed-point */ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC0LUT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, lutData); + + /* + * PQ encoded L'M'S' fixed-point -> ICtCp + * + * Note that we're converting between fixed colorspaces, so the default HW + * coefficients are sufficient. + */ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC01CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, csc01Data); +} + +static void ConfigureCsc1C5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvBool enable) +{ + NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + struct NvKmsCscMatrix matrix = { }; + NvU32 lutData = 0; + NvU32 csc10Data = 0; + const NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + const NvU32 head = pDevEvo->headForWindow[win]; + + if (!pWinCaps->csc1MatricesPresent || (head == NV_INVALID_HEAD)) { + return; + } + + if (enable) { + const NvU32 sdMask = nvPeekEvoSubDevMask(pDevEvo); + const NvU32 sd = (sdMask == 0) ? 0 : __builtin_ffs(sdMask) - 1; + NVDispHeadStateEvoRec *pHeadState; + + /* + * All callers of this path should push a single sd on the stack, + * so that ffs(sdMask) is safe. + */ + nvAssert(nvPopCount32(sdMask) == 1); + + pHeadState = &pDevEvo->pDispEvo[sd]->headState[head]; + + if ((pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020RGB) || + (pHeadState->procAmp.colorimetry == NVT_COLORIMETRY_BT2020YCC)) { + matrix = LMSToRec2020RGB; + } else { + matrix = LMSToRec709RGB; + } + + lutData |= DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _ENABLE, _ENABLE); + + csc10Data |= DRF_DEF(C57E, _SET_CSC10CONTROL, _ENABLE, _ENABLE); + } else { + matrix = NVKMS_IDENTITY_CSC_MATRIX; + + lutData |= DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _INTERPOLATE, _DISABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_CSC1LUT_CONTROL, _ENABLE, _DISABLE); + + csc10Data |= DRF_DEF(C57E, _SET_CSC10CONTROL, _ENABLE, _DISABLE); + } + + /* + * ICtCp -> PQ encoded L'M'S' fixed-point + * + * Note that we're converting between fixed colorspaces, so the default HW + * coefficients are sufficient. + */ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC10CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, csc10Data); + + /* PQ encoded L'M'S' fixed-point -> Linear LMS FP16 */ + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CSC1LUT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, lutData); + + /* Linear LMS FP16 -> Linear RGB FP16 */ + SetCsc11MatrixC5(pChannel, &matrix); +} + +static void InitDesktopColorC3(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DESKTOP_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _RED, 0) | + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _GREEN, 0) | + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _BLUE, 0) | + DRF_NUM(C37D, _HEAD_SET_DESKTOP_COLOR, _ALPHA, 255)); + } +} + +static void InitDesktopColorC5(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DESKTOP_COLOR_ALPHA_RED(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_ALPHA_RED, _ALPHA, 255) | + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_ALPHA_RED, _RED, 0)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DESKTOP_COLOR_GREEN_BLUE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_GREEN_BLUE, _GREEN, 0) | + DRF_NUM(C57D, _HEAD_SET_DESKTOP_COLOR_GREEN_BLUE, _BLUE, 0)); + } +} + +static void EvoInitChannel3(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + InitChannelCapsC3(pDevEvo, pChannel); +} + +static void EvoInitChannelC3(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + + EvoInitChannel3(pDevEvo, pChannel); + + if (isCore) { + InitDesktopColorC3(pDevEvo, pChannel); + } +} + +static void EvoInitChannelC5(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + const NvBool isWindow = + ((pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0); + + EvoInitChannel3(pDevEvo, pChannel); + + if (isCore) { + InitTaps5ScalerCoefficientsC5(pDevEvo, pChannel, FALSE); + InitDesktopColorC5(pDevEvo, pChannel); + } else if (isWindow) { + NVEvoWindowCaps *pWinCaps = + &pDevEvo->gpus[0].capabilities.window[pChannel->instance]; + NvU32 csc0SizesLen = ARRAY_LEN(OetfPQ512SegSizesLog2); + NvU32 csc0EntriesLen = ARRAY_LEN(OetfPQ512Entries); + NvU32 csc1SizesLen = ARRAY_LEN(EotfPQ512SegSizesLog2); + NvU32 csc1EntriesLen = ARRAY_LEN(EotfPQ512Entries); + + InitTaps5ScalerCoefficientsC5(pDevEvo, pChannel, TRUE); + + if (pWinCaps->cscLUTsPresent) { + InitCsc0LUT(pChannel, + OetfPQ512SegSizesLog2, csc0SizesLen, + OetfPQ512Entries, csc0EntriesLen); + InitCsc1LUT(pChannel, + EotfPQ512SegSizesLog2, csc1SizesLen, + EotfPQ512Entries, csc1EntriesLen); + } + } +} + +static const NvU32 IdentityFMTMatrix[12] = { + 0x00010000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00010000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00010000, 0x00000000 +}; + +/* + * TODO: The full set of FMT matrices needs to be generated for each RGB and YUV + * encoding. For now, I'm using the matrix below to convert all YUV input + * formats to pipe native. + */ +static const NvU32 YCbCrRec709_8bpcFMTMatrix[12] = { + 0x0001ccb7, 0x00012b3c, 0x00000000, 0x001f06f1, + 0x001f770c, 0x00012b3c, 0x001fc933, 0x00004d2d, + 0x00000000, 0x00012b3c, 0x00021edd, 0x001eddde +}; + +static const NvU32* EvoGetFMTMatrixC5( + const enum NvKmsSurfaceMemoryFormat format) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + + if (pFormatInfo->isYUV) { + return YCbCrRec709_8bpcFMTMatrix; + } else { + return IdentityFMTMatrix; + } +} + +static void EvoSetFMTMatrixC5( + NVEvoChannelPtr pChannel, const enum NvKmsSurfaceMemoryFormat format) +{ + const NvU32 *matrix = EvoGetFMTMatrixC5(format); + NvU32 method = NVC57E_SET_FMT_COEFFICIENT_C00; + int i; + + for (i = 0; i < 12; i++) { + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, matrix[i]); + + method += 4; + } +} + +static void EvoInitDefaultLutC5(NVDevEvoPtr pDevEvo) +{ + NVLutSurfaceEvoPtr pLut = pDevEvo->lut.defaultLut; + NvU16 sd; + + nvAssert(pLut); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoLutDataRec *pData = pLut->subDeviceAddress[sd]; + NvU16 i; + + ct_assert(NV_NUM_EVO_LUT_ENTRIES == 1025); + for (i = 0; i < 1024; i++) { + // nvdisplay 3 uses FP16 entries in the ILUT. + pData->base[NV_LUT_VSS_HEADER_SIZE + i].Red = + pData->base[NV_LUT_VSS_HEADER_SIZE + i].Green = + pData->base[NV_LUT_VSS_HEADER_SIZE + i].Blue = nvUnorm10ToFp16(i).v; + + // nvdisplay 3 uses 16-bit fixed-point entries in the OLUT. + pData->output[NV_LUT_VSS_HEADER_SIZE + i].Red = + pData->output[NV_LUT_VSS_HEADER_SIZE + i].Green = + pData->output[NV_LUT_VSS_HEADER_SIZE + i].Blue = (i << (16 - 10)); + } + + pData->base[NV_LUT_VSS_HEADER_SIZE + 1024] = pData->base[NV_LUT_VSS_HEADER_SIZE + 1023]; + pData->output[NV_LUT_VSS_HEADER_SIZE + 1024] = pData->output[NV_LUT_VSS_HEADER_SIZE + 1023]; + } +} + +static void EvoInitWindowMapping3(NVDevEvoPtr pDevEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 win, sd; + + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* Bind each window to a head. On GV100, there is a fixed mapping. */ + for (win = 0; win < pDevEvo->numWindows; win++) { + NvU32 head = pDevEvo->headForWindow[win]; + + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_CONTROL(win), 1); + if ((head == NV_INVALID_HEAD) || (head >= pDevEvo->numHeads)) { + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _WINDOW_SET_CONTROL, _OWNER, + NVC37D_WINDOW_SET_CONTROL_OWNER_NONE)); + } else { + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37D, _WINDOW_SET_CONTROL, _OWNER, head)); + } + } + + pModesetUpdateState->windowMappingChanged = FALSE; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + void *pCoreDma = pDevEvo->pSubDevices[sd]->pCoreDma; + /* + * Short timeout (100ms) because we don't expect display to be very + * busy at this point (it should at most be processing methods from + * InitChannel()). + */ + const NvU32 timeout = 100000; + NvU64 startTime = 0; + + if (!((nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)))) { + continue; + } + + /* This core channel must be idle before reading state cache */ + do { + NvBool isIdle = NV_FALSE; + if (!EvoIsChannelIdleC3(pDevEvo, pChannel, sd, &isIdle)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, "EvoIsChannelIdleC3() failed!"); + } + if (isIdle) { + break; + } + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Timed out waiting for core channel idle."); + break; + } + } while (TRUE); + + for (win = 0; win < pDevEvo->numWindows; win++) { + NvU32 data = nvDmaLoadPioMethod(pCoreDma, NVC37D_WINDOW_SET_CONTROL(win)); + + if (DRF_VAL(C37D, + _WINDOW_SET_CONTROL, _OWNER, data) != + pDevEvo->headForWindow[win]) { + + pModesetUpdateState->windowMappingChanged = TRUE; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDisableCoreInterlockUpdateState(pDevEvo, + updateState, + pDevEvo->window[win]); + nvPopEvoSubDevMask(pDevEvo); + } + } + } +} + +static void EvoInitWindowMappingC3(const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 win; + + nvPushEvoSubDevMaskDisp(pDispEvo); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + EvoInitWindowMapping3(pDevEvo, + pModesetUpdateState); + + // Set window usage bounds + for (win = 0; win < pDevEvo->numWindows; win++) { + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(win), 1); + /* XXXnvdisplay: window scaling */ + nvDmaSetEvoMethodData(pChannel, NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C3); + } + nvPopEvoSubDevMask(pDevEvo); +} + +static void EvoInitWindowMappingC5(const NVDispEvoRec *pDispEvo, + NVEvoModesetUpdateState *pModesetUpdateState) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 win; + + nvPushEvoSubDevMaskDisp(pDispEvo); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + EvoInitWindowMapping3(pDevEvo, + pModesetUpdateState); + + // Set window usage bounds + for (win = 0; win < pDevEvo->numWindows; win++) { + NvU32 bounds = NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5; + + bounds |= + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2) | + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(win), 1); + nvDmaSetEvoMethodData(pChannel, bounds); + } + nvPopEvoSubDevMask(pDevEvo); +} + +static NvBool ComputeMinFrameIdle( + const NVHwModeTimingsEvo *pTimings, + NvU16 *pLeadingRasterLines, + NvU16 *pTrailingRasterLines) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + + /* + * leadingRasterLines defines the number of lines between the start of the + * frame (vsync) and the start of the active region. This includes Vsync, + * Vertical Back Porch, and the top part of the overscan border. The + * minimum value is 2 because vsync and VBP must be at least 1 line each. + * + * trailingRasterLines defines the number of lines between the end of the + * active region and the end of the frame. This includes the bottom part + * of the overscan border and the Vertical Front Porch. + */ + const NvU32 activeHeight = (pTimings->rasterBlankStart.y - + pTimings->rasterBlankEnd.y); + /* This is how it's done in dispClassNVD20CoreUpdateErrorChecks_hls.c */ + const NvU32 overscan = (activeHeight / 2) - (pViewPort->out.height / 2); + + /* + * The +1 is justified by this comment in the error check: + * + * If the value is 1, that means there are 2 lines of vblank (lines 0 and + * 1) before active. That is why the uLeadingBorder equation needs +1; + */ + const NvU32 leadingRasterLines = + pTimings->rasterBlankEnd.y + overscan + pViewPort->out.yAdjust + 1; + const NvU32 trailingRasterLines = + pTimings->rasterSize.y - (leadingRasterLines + pViewPort->out.height); + + /* nvdClass_01.mfs says: "The minimum value is 2 because vsync and VBP must + * be at least 1 line each." */ + if (leadingRasterLines < 2) { + return FALSE; + } + + *pLeadingRasterLines = leadingRasterLines; + *pTrailingRasterLines = trailingRasterLines; + + return TRUE; +} + +static void EvoSetRasterParamsC3(NVDevEvoPtr pDevEvo, int head, + const NVHwModeTimingsEvo *pTimings, + const NVEvoColorRec *pOverscanColor, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + /* XXXnvdisplay: Convert these for YCbCr, as necessary */ + NvU32 overscanColor = + DRF_NUM(C37D, _HEAD_SET_OVERSCAN_COLOR, _RED_CR, pOverscanColor->red) | + DRF_NUM(C37D, _HEAD_SET_OVERSCAN_COLOR, _GREEN_Y, pOverscanColor->green) | + DRF_NUM(C37D, _HEAD_SET_OVERSCAN_COLOR, _BLUE_CB, pOverscanColor->blue); + NvU32 hdmiStereoCtrl; + NvU16 minFrameIdleLeadingRasterLines, minFrameIdleTrailingRasterLines; + NvBool ret; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // XXX[AGP]: These methods are sequential and could use an incrementing + // method, but it's not clear if there's a bug in EVO that causes corruption + // sometimes. Play it safe and send methods with count=1. + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_OVERSCAN_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, overscanColor); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_SIZE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_SIZE, _WIDTH, pTimings->rasterSize.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_SIZE, _HEIGHT, pTimings->rasterSize.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_SYNC_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_SYNC_END, _X, pTimings->rasterSyncEnd.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_SYNC_END, _Y, pTimings->rasterSyncEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_BLANK_END(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_END, _X, pTimings->rasterBlankEnd.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_END, _Y, pTimings->rasterBlankEnd.y)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_RASTER_BLANK_START(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_START, _X, pTimings->rasterBlankStart.x) | + DRF_NUM(C37D, _HEAD_SET_RASTER_BLANK_START, _Y, pTimings->rasterBlankStart.y)); + + ret = ComputeMinFrameIdle(pTimings, + &minFrameIdleLeadingRasterLines, + &minFrameIdleTrailingRasterLines); + if (!ret) { + /* This should have been ensured by IMP in AssignPerHeadImpParams. */ + nvAssert(ret); + /* In case a mode validation override was used to skip IMP, program the + * default values. This may still cause a hardware exception. */ + minFrameIdleLeadingRasterLines = 2; + minFrameIdleTrailingRasterLines = 1; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_MIN_FRAME_IDLE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_MIN_FRAME_IDLE, _LEADING_RASTER_LINES, + minFrameIdleLeadingRasterLines) | + DRF_NUM(C37D, _HEAD_SET_MIN_FRAME_IDLE, _TRAILING_RASTER_LINES, + minFrameIdleTrailingRasterLines)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _HERTZ, + pTimings->pixelClock * 1000) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PIXEL_CLOCK_CONFIGURATION(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _NOT_DRIVER, _FALSE) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING, _DISABLE) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_CONFIGURATION, _HOPPING_MODE, _VBLANK)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _HERTZ, + pTimings->pixelClock * 1000) | + DRF_DEF(C37D, _HEAD_SET_PIXEL_CLOCK_FREQUENCY_MAX, _ADJ1000DIV1001,_FALSE)); + + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_FRAME_PACKED_VACTIVE_COLOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _RED_CR, 0) | +#if defined(DEBUG) + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _GREEN_Y, 512) | +#else + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _GREEN_Y, 0) | +#endif + DRF_NUM(C37D, _HEAD_SET_FRAME_PACKED_VACTIVE_COLOR, _BLUE_CB, 0)); + + hdmiStereoCtrl = DRF_NUM(C37D, _HEAD_SET_HDMI_CTRL, _HDMI_VIC, 0); + if (pTimings->hdmi3D) { + hdmiStereoCtrl = + FLD_SET_DRF(C37D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _STEREO3D, hdmiStereoCtrl); + } else { + hdmiStereoCtrl = + FLD_SET_DRF(C37D, _HEAD_SET_HDMI_CTRL, _VIDEO_FORMAT, _NORMAL, hdmiStereoCtrl); + } + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_HDMI_CTRL(head), 1); + nvDmaSetEvoMethodData(pChannel, hdmiStereoCtrl); +} + +static void EvoSetProcAmpC3(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU32 dynRange; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // These NVT defines match the HEAD_SET_PROCAMP ones. + ct_assert(NVT_COLORIMETRY_RGB == NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_RGB); + ct_assert(NVT_COLORIMETRY_YUV_601 == NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_601); + ct_assert(NVT_COLORIMETRY_YUV_709 == NVC37D_HEAD_SET_PROCAMP_COLOR_SPACE_YUV_709); + /* XXXnvdisplay add REC2020 */ + ct_assert(NVT_COLOR_RANGE_FULL == NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_DISABLE); + ct_assert(NVT_COLOR_RANGE_LIMITED == NVC37D_HEAD_SET_PROCAMP_RANGE_COMPRESSION_ENABLE); + + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(C37D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(C37D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _COLOR_SPACE, + pHeadState->procAmp.colorimetry) | + DRF_DEF(C37D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _DISABLE) | + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _SAT_COS, + pHeadState->procAmp.satCos) | + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _SAT_SINE, 0) | + dynRange | + DRF_NUM(C37D, _HEAD_SET_PROCAMP, _RANGE_COMPRESSION, + pHeadState->procAmp.colorRange) | + DRF_DEF(C37D, _HEAD_SET_PROCAMP, _BLACK_LEVEL, _GRAPHICS)); +} + +static const struct NvKmsCscMatrix RGBToFullRangeYCbCrRec709Matrix = {{ + { 0x8000, 0x1f8bbc, 0x1ff444, 0x8000 }, + { 0x366c, 0xb718, 0x127c, 0 }, + { 0x1fe2ac, 0x1f9d54, 0x8000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToFullRangeYCbCrRec601Matrix = {{ + { 0x8000, 0x1f94d0, 0x1feb30, 0x8000 }, + { 0x4c8c, 0x9644, 0x1d30, 0 }, + { 0x1fd4cc, 0x1fab34, 0x8000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeYCbCrRec2020Matrix = {{ + { 0x7000, 0x1f9900, 0x1ff700, 0x8000 }, + { 0x3988, 0x947c, 0xcfc, 0x1000 }, + { 0x1fe0b8, 0x1faf44, 0x7000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeYCbCrRec709Matrix = {{ + { 0x7000, 0x1f9a44, 0x1ff5bc, 0x8000 }, + { 0x2e90, 0x9ca4, 0xfd0, 0x1000 }, + { 0x1fe654, 0x1fa9a8, 0x7000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeYCbCrRec601Matrix = {{ + { 0x7000, 0x1fa234, 0x1fedc8, 0x8000 }, + { 0x417c, 0x8090, 0x18f8, 0x1000 }, + { 0x1fda34, 0x1fb5cc, 0x7000, 0x8000 }, +}}; +static const struct NvKmsCscMatrix RGBToLimitedRangeRGB = {{ + { 0xdb04, 0, 0, 0x1000 }, + { 0, 0xdb04, 0, 0x1000 }, + { 0, 0, 0xdb04, 0x1000 }, +}}; + +/*! + * Return the appropriate OCSC1 matrix for the requested color range and + * colorimetry, or NULL if the OCSC1 should be disabled. + */ +static const struct NvKmsCscMatrix* EvoGetOCsc1MatrixC5(const NVDispHeadStateEvoRec *pHeadState) +{ + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + switch (pHeadState->procAmp.colorimetry) { + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + // No OCSC1 needed. + return NULL; + case NVT_COLORIMETRY_YUV_601: + return &RGBToFullRangeYCbCrRec601Matrix; + case NVT_COLORIMETRY_YUV_709: + return &RGBToFullRangeYCbCrRec709Matrix; + default: + nvAssert(!"Unexpected colorimetry"); + return NULL; + } + } else { + switch (pHeadState->procAmp.colorimetry) { + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + return &RGBToLimitedRangeRGB; + case NVT_COLORIMETRY_YUV_601: + return &RGBToLimitedRangeYCbCrRec601Matrix; + case NVT_COLORIMETRY_YUV_709: + return &RGBToLimitedRangeYCbCrRec709Matrix; + case NVT_COLORIMETRY_BT2020YCC: + return &RGBToLimitedRangeYCbCrRec2020Matrix; + default: + nvAssert(!"Unexpected colorimetry"); + return NULL; + } + } +} + +struct EvoClampRangeC5 { + NvU32 green, red_blue; +}; + +/*! + * Return the output clamping ranges for the requested color range and + * colorimetry. + */ +static struct EvoClampRangeC5 +EvoGetOCsc1ClampRange(const NVDispHeadStateEvoRec *pHeadState) +{ + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + return (struct EvoClampRangeC5) { + .green = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _LOW, 0x0) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _HIGH, 0xFFF), + .red_blue = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _LOW, 0x0) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _HIGH, 0xFFF), + }; + } else { + switch (pHeadState->procAmp.colorimetry) { + default: + nvAssert(!"Unexpected colorimetry"); + // fall through + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + return (struct EvoClampRangeC5) { + .green = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _HIGH, 0xEB0), + .red_blue = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _HIGH, 0xEB0), + }; + case NVT_COLORIMETRY_YUV_601: + case NVT_COLORIMETRY_YUV_709: + case NVT_COLORIMETRY_BT2020YCC: + return (struct EvoClampRangeC5) { + .green = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_GREEN, _HIGH, 0xEB0), + .red_blue = DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _LOW, 0x100) | + DRF_NUM(C57D, _HEAD_SET_CLAMP_RANGE_RED_BLUE, _HIGH, 0xF00), + }; + } + } +} + + +static void EvoSetOCsc1C5(NVDispEvoPtr pDispEvo, const NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const struct NvKmsCscMatrix *matrix = EvoGetOCsc1MatrixC5(pHeadState); + struct EvoClampRangeC5 clamp = EvoGetOCsc1ClampRange(pHeadState); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CLAMP_RANGE_GREEN(head), 1); + nvDmaSetEvoMethodData(pChannel, clamp.green); + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CLAMP_RANGE_RED_BLUE(head), 1); + nvDmaSetEvoMethodData(pChannel, clamp.red_blue); + + if (matrix) { + int x, y; + NvU32 method = NVC57D_HEAD_SET_OCSC1COEFFICIENT_C00(head); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC1CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_OCSC1CONTROL, _ENABLE, _ENABLE)); + + for (y = 0; y < 3; y++) { + for (x = 0; x < 4; x++) { + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, matrix->m[y][x]); + + method += 4; + } + } + } else { + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC1CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_OCSC1CONTROL, _ENABLE, _DISABLE)); + } +} + +/* + * 1.402 1.0 0.0 + * -0.714136 1.0 -0.344136 + * 0.0 1.0 1.772 + */ +static const struct NvKmsMatrix CrYCb601toRGBMatrix = { { + { 0x3fb374bc, 0x3f800000, 0x00000000 }, + { 0xbf36d19e, 0x3f800000, 0xbeb03298 }, + { 0x00000000, 0x3f800000, 0x3fe2d0e5 } +} }; + +/* + * 1.5748 1.0 0.0 + * -0.468124 1.0 -0.187324 + * 0.0 1.0 1.8556 + */ +static const struct NvKmsMatrix CrYCb709toRGBMatrix = { { + { 0x3fc9930c, 0x3f800000, 0x00000000 }, + { 0xbeefadf3, 0x3f800000, 0xbe3fd1dd }, + { 0x00000000, 0x3f800000, 0x3fed844d } +} }; + +/* + * 0.5 -0.418688 -0.081312 + * 0.299 0.587 0.114 + * -0.168736 -0.331264 0.5 + */ +static const struct NvKmsMatrix RGBtoCrYCb601Matrix = { { + { 0x3f000000, 0xbed65e46, 0xbda686e8 }, + { 0x3e991687, 0x3f1645a2, 0x3de978d5 }, + { 0xbe2cc921, 0xbea99b6f, 0x3f000000 } +} }; + +/* + * 0.5 -0.45415 -0.04585 + * 0.21260 0.71520 0.07220 + * -0.11457 -0.38543 0.5 + */ +static const struct NvKmsMatrix RGBtoCrYCb709Matrix = { { + { 0x3f000000, 0xbee88659, 0xbd3bcd36 }, + { 0x3e59b3d0, 0x3f371759, 0x3d93dd98 }, + { 0xbdeaa3ad, 0xbec55715, 0x3f000000 } +} }; + +/* + * Converts FP32 to fixed point S5.14 coefficient format + */ +static inline NvU32 cscCoefConvertS514(float32_t x) +{ + /* more concisely, (NvS32)floor(x * 65536.0 + 2.0) */ + const NvS32 y = f32_to_i32(f32_mulAdd(x, + NvU32viewAsF32(NV_FLOAT_65536), + NvU32viewAsF32(NV_FLOAT_TWO)), + softfloat_round_min, FALSE); + return (NvU32)(0x001ffffc & clamp_S32(y, -0x100000, 0xfffff)); +} + +/* + * Sets up the OCSC0 matrix coefficients, used to perform saturation + * adjustment. + * + * The pipeline operates in FP16 RGB, however this adjustment must be + * performed in CrYCb. Therefore, we multiply the saturation + * adjustment matrix by the appropriate color space conversion + * matrix. The specific color space used depends on the colorimetry of + * the final output. Then we multiply by its inverse to convert back + * to RGB. Finally, we convert the coefficients to S5.14 fixed point + * format. + * + * The OCSC0 matrix will be enabled later in EvoSetLUTContextDmaC5 if + * and only if we also enable the OLUT as required by the + * specification. + */ +static void EvoSetOCsc0C5(NVDispEvoPtr pDispEvo, const NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + const float32_t zeroF32 = NvU32viewAsF32(NV_FLOAT_ZERO); + const float32_t oneF32 = NvU32viewAsF32(NV_FLOAT_ONE); + /* divide satCos by the default setting of 1024 */ + const float32_t satCos = f32_div(i32_to_f32(pHeadState->procAmp.satCos), + NvU32viewAsF32(NV_FLOAT_1024)); + const struct NvKmsMatrixF32 satHueMatrix = { { + { satCos, zeroF32, zeroF32 }, + { zeroF32, oneF32, zeroF32 }, + { zeroF32, zeroF32, satCos } + } }; + struct NvKms3x4MatrixF32 ocsc0Matrix = { { + { oneF32, zeroF32, zeroF32, zeroF32 }, + { zeroF32, oneF32, zeroF32, zeroF32 }, + { zeroF32, zeroF32, oneF32, zeroF32 } + } }; + + struct NvKmsMatrixF32 CrYCbtoRGBMatrix; + struct NvKmsMatrixF32 RGBtoCrYCbMatrix; + switch (pHeadState->procAmp.colorimetry) { + default: + nvAssert(!"Unexpected colorimetry"); + /* fallthrough */ + case NVT_COLORIMETRY_RGB: + /* fallthrough; for RGB output, perform saturation adjustment in YUV709 */ + case NVT_COLORIMETRY_YUV_709: + CrYCbtoRGBMatrix = NvKmsMatrixToNvKmsMatrixF32(CrYCb709toRGBMatrix); + RGBtoCrYCbMatrix = NvKmsMatrixToNvKmsMatrixF32(RGBtoCrYCb709Matrix); + break; + case NVT_COLORIMETRY_YUV_601: + CrYCbtoRGBMatrix = NvKmsMatrixToNvKmsMatrixF32(CrYCb601toRGBMatrix); + RGBtoCrYCbMatrix = NvKmsMatrixToNvKmsMatrixF32(RGBtoCrYCb601Matrix); + break; + } + + ocsc0Matrix = nvMultiply3x4Matrix(&RGBtoCrYCbMatrix, &ocsc0Matrix); + ocsc0Matrix = nvMultiply3x4Matrix(&satHueMatrix, &ocsc0Matrix); + ocsc0Matrix = nvMultiply3x4Matrix(&CrYCbtoRGBMatrix, &ocsc0Matrix); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC0COEFFICIENT_C00(head), 12); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C00, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[0][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C01, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[0][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C02, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[0][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C03, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[0][3]))); + + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C10, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[1][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C11, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[1][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C12, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[1][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C13, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[1][3]))); + + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C20, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[2][0]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C21, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[2][1]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C22, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[2][2]))); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C57D, _HEAD_SET_OCSC0COEFFICIENT_C23, _VALUE, cscCoefConvertS514(ocsc0Matrix.m[2][3]))); +} + +static void EvoSetProcAmpC5(NVDispEvoPtr pDispEvo, const NvU32 head, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvU32 dynRange, chromaLpf, chromaDownV; + NvU32 colorimetry; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pHeadState->procAmp.colorimetry) { + default: + nvAssert(!"Unrecognized colorimetry"); + // fall through + case NVT_COLORIMETRY_BT2020RGB: + // fall through + case NVT_COLORIMETRY_RGB: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _RGB); + break; + case NVT_COLORIMETRY_YUV_601: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_601); + break; + case NVT_COLORIMETRY_YUV_709: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_709); + break; + case NVT_COLORIMETRY_BT2020YCC: + colorimetry = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _COLOR_SPACE, _YUV_2020); + break; + } + + if (pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_FULL) { + dynRange = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _VESA); + } else { + nvAssert(pHeadState->procAmp.colorRange == NVT_COLOR_RANGE_LIMITED); + dynRange = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _DYNAMIC_RANGE, _CEA); + } + + /* + * NVC67D_HEAD_SET_PROCAMP_CHROMA_DOWN_V is only defined in NVC67D, but + * it is an unused bit in NVC57D_HEAD_SET_PROCAMP, and YUV420 should only + * be set on >=nvdisplay 4.0, so it's okay to set it here. + */ + if (pHeadState->procAmp.colorFormat == NVT_COLOR_FORMAT_YCbCr420) { + chromaLpf = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _ENABLE); + chromaDownV = DRF_DEF(C67D, _HEAD_SET_PROCAMP, _CHROMA_DOWN_V, _ENABLE); + } else { + chromaLpf = DRF_DEF(C57D, _HEAD_SET_PROCAMP, _CHROMA_LPF, _DISABLE); + chromaDownV = DRF_DEF(C67D, _HEAD_SET_PROCAMP, _CHROMA_DOWN_V, _DISABLE); + } + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_PROCAMP(head), 1); + nvDmaSetEvoMethodData(pChannel, + colorimetry | dynRange | chromaLpf | chromaDownV); + + EvoSetOCsc0C5(pDispEvo, head); + EvoSetOCsc1C5(pDispEvo, head); +} + +/* + * With nvdisplay, external fliplock pins are controlled via a headless + * SetControl method, unlike previous EVO display implementations which + * specified this information in the per-head HeadSetControl method. This + * function loops over all of the core nvkms HeadControl data structures to + * determine which pins should be enabled in the SetControl method. It should + * be called any time the HeadControl data structures are updated. + */ +static void SetControl(NVDevEvoPtr pDevEvo, int sd) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 data = 0; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + if (pHC->flipLock && !NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->flipLockPin)) { + NvU32 pin = pHC->flipLockPin - NV_EVO_LOCK_PIN_0; + data = FLD_IDX_SET_DRF(C37D, _SET_CONTROL, _FLIP_LOCK_PIN, + pin, _ENABLE, data); + } + } + + /* + * GV100 HW bug 2062029 WAR + * + * GV100 always holds the external fliplock line low as if + * NVC37D_SET_CONTROL_FLIP_LOCK_PIN was enabled. To work around this, + * the GV100 VBIOS initializes the fliplock GPIOs to be software + * controlled (forced off). The following rmctrl needs to be called to + * switch HW control of the fliplock GPIOs back on whenever external + * fliplock is enabled. + */ + { + NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS params = { }; + + params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance; + params.bEnable = (data != 0); + + if (nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_SET_SWAPRDY_GPIO_WAR, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, "Failed to override fliplock GPIO"); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, data); +} + +static void EvoSetHeadControlC3(NVDevEvoPtr pDevEvo, int sd, int head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + /* + * NOTE: This function should only push state to the hardware based on data + * in the pHC. If not, then we may miss updates due to the memcmp of the + * HeadControl structure in UpdateEvoLockState(). + */ + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + NvU32 data = 0, pin; + NvU32 serverLockMode, clientLockMode; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pHC->serverLock) { + case NV_EVO_NO_LOCK: + serverLockMode = NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + serverLockMode = NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + serverLockMode = NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid server lock mode"); + return; + } + + switch (pHC->clientLock) { + case NV_EVO_NO_LOCK: + clientLockMode = NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK; + break; + case NV_EVO_FRAME_LOCK: + clientLockMode = NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_FRAME_LOCK; + break; + case NV_EVO_RASTER_LOCK: + clientLockMode = NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_RASTER_LOCK; + break; + default: + nvAssert(!"Invalid client lock mode"); + return; + } + + // Convert head control state to EVO method values. + nvAssert(!pHC->interlaced); + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STRUCTURE, _PROGRESSIVE); + + nvAssert(pHC->serverLockPin != NV_EVO_LOCK_PIN_ERROR); + nvAssert(pHC->clientLockPin != NV_EVO_LOCK_PIN_ERROR); + + if (serverLockMode == NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_MODE_NO_LOCK) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, _LOCK_PIN_NONE); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->serverLockPin)) { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + /* + * nvdClass_01.mfs says: + * "master lock pin, if internal, must be set to the corresponding + * internal pin for that head" (error check #12) + */ + nvAssert(pin == head); + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->serverLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_MASTER_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_LOCK_MODE, serverLockMode); + + if (clientLockMode == NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_MODE_NO_LOCK) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, _LOCK_PIN_NONE); + } else if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->clientLockPin)) { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_INTERNAL_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_INTERNAL_SCAN_LOCK(pin)); + } else { + pin = pHC->clientLockPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_PIN, + NVC37D_HEAD_SET_CONTROL_SLAVE_LOCK_PIN_LOCK_PIN(pin)); + } + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCK_MODE, clientLockMode); + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_LOCKOUT_WINDOW, + pHC->clientLockoutWindow); + + /* + * We always enable stereo lock when it's available and either framelock + * or rasterlock is in use. + */ + if (pHC->stereoLocked) { + if (pHC->serverLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _MASTER_STEREO_LOCK_MODE, + NVC37D_HEAD_SET_CONTROL_MASTER_STEREO_LOCK_MODE_ENABLE); + } + if (pHC->clientLock != NV_EVO_NO_LOCK) { + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _SLAVE_STEREO_LOCK_MODE, + NVC37D_HEAD_SET_CONTROL_SLAVE_STEREO_LOCK_MODE_ENABLE); + } + } + + nvAssert(pHC->stereoPin != NV_EVO_LOCK_PIN_ERROR); + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pHC->stereoPin)) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STEREO_PIN, _LOCK_PIN_NONE); + } else { + pin = pHC->stereoPin - NV_EVO_LOCK_PIN_0; + data |= DRF_NUM(C37D, _HEAD_SET_CONTROL, _STEREO_PIN, + NVC37D_HEAD_SET_CONTROL_STEREO_PIN_LOCK_PIN(pin)); + } + + if (pHC->hdmi3D) { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STEREO3D_STRUCTURE, _FRAME_PACKED); + } else { + data |= DRF_DEF(C37D, _HEAD_SET_CONTROL, _STEREO3D_STRUCTURE, _NORMAL); + } + + /* + * NVC67D_HEAD_SET_CONTROL_YUV420PACKER is only defined in NVC67D, but + * it is an unused bit in NVC37D_HEAD_SET_CONTROL, and YUV420 should only + * be set on >=nvdisplay 4.0, so it's okay to set it here. + */ + if (pHC->hwYuv420) { + data |= DRF_DEF(C67D, _HEAD_SET_CONTROL, _YUV420PACKER, _ENABLE); + } else { + data |= DRF_DEF(C67D, _HEAD_SET_CONTROL, _YUV420PACKER, _DISABLE); + } + + // Send the HeadSetControl method. + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, data); + + SetControl(pDevEvo, sd); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_LOCK_CHAIN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37D, _HEAD_SET_LOCK_CHAIN, _POSITION, + pHC->lockChainPosition)); +} + +static void EvoSetHeadRefClkC3(NVDevEvoPtr pDevEvo, int head, NvBool external, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 sd; + + /* These methods should only apply to a single subdevice */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + if (external) { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A_CODE, + _VPLL_REF, + _QSYNC, + pDevEvo->gpus[sd].setSwSpareA[head]); + } else { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A_CODE, + _VPLL_REF, + _NO_PREF, + pDevEvo->gpus[sd].setSwSpareA[head]); + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_SW_SPARE_A(head), 1); + nvDmaSetEvoMethodData(pChannel, pDevEvo->gpus[sd].setSwSpareA[head]); + nvPopEvoSubDevMask(pDevEvo); + } + } +} + +static void EvoSORSetControlC3(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 hwProtocol = 0; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + nvAssert(orIndex != NV_INVALID_OR); + + if (headMask != 0) { + switch (protocol) { + default: + nvAssert(!"Unknown SOR protocol"); + /* Fall through */ + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_A: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A; + break; + case NVKMS_PROTOCOL_SOR_SINGLE_TMDS_B: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B; + break; + case NVKMS_PROTOCOL_SOR_DUAL_TMDS: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS; + break; + case NVKMS_PROTOCOL_SOR_DP_A: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_A; + break; + case NVKMS_PROTOCOL_SOR_DP_B: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_DP_B; + break; + case NVKMS_PROTOCOL_SOR_LVDS_CUSTOM: + hwProtocol = NVC37D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM; + break; + case NVKMS_PROTOCOL_SOR_HDMI_FRL: + hwProtocol = NVC67D_SOR_SET_CONTROL_PROTOCOL_HDMI_FRL; + break; + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _SOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_NUM(C37D, _SOR_SET_CONTROL, _PROTOCOL, hwProtocol) | + DRF_DEF(C37D, _SOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE) | + DRF_DEF(C37D, _SOR_SET_CONTROL, _PIXEL_REPLICATE_MODE, _OFF)); +} + +static NvU32 EvoGetPixelDepthC3(const enum nvKmsPixelDepth pixelDepth) +{ + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_18_444; + case NVKMS_PIXEL_DEPTH_24_444: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; + case NVKMS_PIXEL_DEPTH_30_444: + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_30_444; + } + nvAssert(!"Unexpected pixel depth"); + return NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_PIXEL_DEPTH_BPP_24_444; +} + +static void EvoPIORSetControlC3(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_PIOR_EXT_TMDS_ENC); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_PIOR_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _PIOR_SET_CONTROL, _OWNER_MASK, headMask) | + DRF_DEF(C37D, _PIOR_SET_CONTROL, _PROTOCOL, _EXT_TMDS_ENC) | + DRF_DEF(C37D, _PIOR_SET_CONTROL, _DE_SYNC_POLARITY, _POSITIVE_TRUE)); +} + +static void EvoDSISetControlC6(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + /* Only Head 0 can be used to drive DSI output on Orin */ + nvAssert((headMask == 0x0) || (headMask == 0x1)); + /* Only one DSI engine exists on Orin */ + nvAssert(orIndex == 0); + + if (headMask != 0) { + nvAssert(protocol == NVKMS_PROTOCOL_DSI); + } + + nvDmaSetStartEvoMethod(pChannel, NVC67D_DSI_SET_CONTROL(orIndex), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _DSI_SET_CONTROL, _OWNER_MASK, headMask)); +} + +static void EvoORSetControlC3Helper(const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask) +{ + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + EvoSORSetControlC3(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + EvoPIORSetControlC3(pConnectorEvo, protocol, orIndex, headMask); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + /* No DAC support on nvdisplay. Fall through. */ + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } +} + +static void EvoORSetControlC3(NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + EvoORSetControlC3Helper(pConnectorEvo, protocol, orIndex, headMask); +} + +static void EvoORSetControlC6(NVDevEvoPtr pDevEvo, + const NVConnectorEvoRec *pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + const NvU32 headMask, + NVEvoUpdateState *updateState) +{ + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pDevEvo->core); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DSI: + EvoDSISetControlC6(pConnectorEvo, protocol, orIndex, headMask); + break; + default: + EvoORSetControlC3Helper(pConnectorEvo, protocol, orIndex, headMask); + break; + } +} + +static void EvoHeadSetControlORC3(NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 hwPixelDepth = EvoGetPixelDepthC3(pTimings->pixelDepth); + const NvU16 colorSpaceFlag = nvEvo1GetColorSpaceFlag(pDevEvo, + colorSpaceOverride); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _CRC_MODE, _COMPLETE_RASTER) | + (pTimings->hSyncPol ? + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _POSITIVE_TRUE)) | + (pTimings->vSyncPol ? + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _POSITIVE_TRUE)) | + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _PIXEL_DEPTH, hwPixelDepth) | + (colorSpaceOverride ? + (DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _ENABLE) | + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_FLAG, colorSpaceFlag)) : + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _DISABLE))); +} + +static void EvoHeadSetControlORC5(NVDevEvoPtr pDevEvo, + const int head, + const NVHwModeTimingsEvo *pTimings, + const NvBool colorSpaceOverride, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 hwPixelDepth = EvoGetPixelDepthC3(pTimings->pixelDepth); + const NvU16 colorSpaceFlag = nvEvo1GetColorSpaceFlag(pDevEvo, + colorSpaceOverride); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CONTROL_OUTPUT_RESOURCE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _CRC_MODE, _COMPLETE_RASTER) | + (pTimings->hSyncPol ? + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _HSYNC_POLARITY, _POSITIVE_TRUE)) | + (pTimings->vSyncPol ? + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _NEGATIVE_TRUE) : + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _VSYNC_POLARITY, _POSITIVE_TRUE)) | + DRF_NUM(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _PIXEL_DEPTH, hwPixelDepth) | + (colorSpaceOverride ? + (DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _ENABLE) | + DRF_NUM(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_FLAG, colorSpaceFlag)) : + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _COLOR_SPACE_OVERRIDE, _DISABLE)) | + DRF_DEF(C57D, _HEAD_SET_CONTROL_OUTPUT_RESOURCE, _EXT_PACKET_WIN, _NONE)); +} + +static void EvoHeadSetDisplayIdC3(NVDevEvoPtr pDevEvo, + const NvU32 head, const NvU32 displayId, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DISPLAY_ID(head, 0), 1); + nvDmaSetEvoMethodData(pChannel, displayId); +} + +static void SetFormatUsageBoundsOneWindow3(NVDevEvoPtr pDevEvo, NvU32 window, + const NvU64 supportedFormats, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 value = 0; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED1BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED2BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED4BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _RGB_PACKED8BPP, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_PACKED422, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_SEMI_PLANAR420, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_SEMI_PLANAR422, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_SEMI_PLANAR444, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _EXT_YUV_SEMI_PLANAR420, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _EXT_YUV_SEMI_PLANAR422, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _EXT_YUV_SEMI_PLANAR444, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_PLANAR444, _TRUE, value); + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420) { + value = FLD_SET_DRF(C37D, _WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS, + _YUV_PLANAR420, _TRUE, value); + } + + if (supportedFormats != 0 && value == 0) { + nvAssert(!"Unknown depth in SetFormatUsageBoundsOneWindow"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_WINDOW_FORMAT_USAGE_BOUNDS(window), 1); + nvDmaSetEvoMethodData(pChannel, value); +} + +static inline NvU32 GetMaxPixelsFetchedPerLine(NvU16 inWidth, + NvU16 maxHDownscaleFactor) +{ + /* + * Volta should be: + * (((SetViewportSizeIn.Width + 6) * SetMaxInputScaleFactor.Horizontal + 1023 ) >> 10 ) + 6 + * + * Turing should be: + * (((SetViewportSizeIn.Width + 6) * SetMaxInputScaleFactor.Horizontal + 1023 ) >> 10 ) + 8 + * + * Ampere, which adds "overfetch" to have tiled displays / 2-head-1-OR use cases without + * visual artefacts at head boundaries: + * (((SetViewportSizeIn.Width + 14) * SetMaxInputScaleFactor.Horizontal + 1023) >> 10) + 8 + * + * We don't have to be super-precise when programming maxPixelsFetchedPerLine, + * so return realistic worst-case value. + */ + return (((inWidth + 14) * maxHDownscaleFactor + 1023) >> 10) + 8; +} + +static void SetScalingUsageBoundsOneWindow5( + NVDevEvoPtr pDevEvo, NvU32 window, + const struct NvKmsScalingUsageBounds *pScaling, + const NVHwModeViewPortEvo *pViewPort, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 setWindowUsageBounds = NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, + NVC57D_WINDOW_SET_MAX_INPUT_SCALE_FACTOR(window), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _WINDOW_SET_MAX_INPUT_SCALE_FACTOR, _HORIZONTAL, + pScaling->maxHDownscaleFactor) | + DRF_NUM(C57D, _WINDOW_SET_MAX_INPUT_SCALE_FACTOR, _VERTICAL, + pScaling->maxVDownscaleFactor)); + + setWindowUsageBounds |= + (DRF_NUM(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _MAX_PIXELS_FETCHED_PER_LINE, + GetMaxPixelsFetchedPerLine(pViewPort->in.width, + pScaling->maxHDownscaleFactor))) | + (pScaling->vTaps >= NV_EVO_SCALER_5TAPS ? + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_5) : + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2)) | + (pScaling->vUpscalingAllowed ? + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _TRUE) : + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE)); + nvDmaSetStartEvoMethod(pChannel, + NVC57D_WINDOW_SET_WINDOW_USAGE_BOUNDS(window), 1); + nvDmaSetEvoMethodData(pChannel, setWindowUsageBounds); +} + +static NvBool EvoSetUsageBounds3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + const struct NvKmsUsageBounds *pCurrentUsage = + &pDevEvo->gpus[sd].headState[head].usage; + /* Return FALSE if a core channel UPDATE isn't actually needed. */ + NvBool needCoreUpdate = FALSE; + NvU32 layer; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NvU64 currentFormats = 0; + NvU64 targetFormats = 0; + + if (pCurrentUsage->layer[layer].usable) { + currentFormats = + pCurrentUsage->layer[layer].supportedSurfaceMemoryFormats; + } + + if (pUsage->layer[layer].usable) { + targetFormats = pUsage->layer[layer].supportedSurfaceMemoryFormats; + } + + if (targetFormats == currentFormats) { + continue; + } + + SetFormatUsageBoundsOneWindow3(pDevEvo, + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask), + targetFormats, + updateState); + needCoreUpdate = TRUE; + } + + return needCoreUpdate; +} + +static NvBool EvoSetUsageBoundsC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + return EvoSetUsageBounds3(pDevEvo, sd, head, pUsage, updateState); +} + +static NvBool EvoSetUsageBoundsC5(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const struct NvKmsUsageBounds *pUsage, + NVEvoUpdateState *updateState) +{ + const struct NvKmsUsageBounds *pCurrentUsage = + &pDevEvo->gpus[sd].headState[head].usage; + NvBool needCoreUpdate; + NvU32 layer; + + needCoreUpdate = EvoSetUsageBounds3(pDevEvo, sd, head, pUsage, updateState); + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!nvEvoScalingUsageBoundsEqual(&pCurrentUsage->layer[layer].scaling, + &pUsage->layer[layer].scaling)) { + const NVHwModeViewPortEvo *pViewPort = + &pDevEvo->gpus[sd].pDispEvo->headState[head].timings.viewPort; + + SetScalingUsageBoundsOneWindow5( + pDevEvo, + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask), + &pUsage->layer[layer].scaling, + pViewPort, + updateState); + needCoreUpdate = TRUE; + } + } + + return needCoreUpdate; +} + +static void EvoSetNotifierC3(NVDevEvoRec *pDevEvo, + const NvBool notify, + const NvBool awaken, + const NvU32 notifier, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + // To work around HW BUG 1945716, set the core channel completion notifier + // context DMA to 0 when notification is not requested. + if (notify) { + NvU32 sd; + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + nvDmaSetStartEvoMethod(pChannel, + NVC37D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, + _SET_CONTEXT_DMA_NOTIFIER, + _HANDLE, + pDevEvo->core->notifiersDma[sd].ctxHandle)); + nvPopEvoSubDevMask(pDevEvo); + } + } + } else { + nvDmaSetStartEvoMethod(pChannel, + NVC37D_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _SET_CONTEXT_DMA_NOTIFIER, _HANDLE, 0)); + } + + /* + * XXXnvdisplay: Note that nvdClass_01.mfs says: + * "The units of the offset are 16 bytes.", while dispClass_02.mfs says: + * "The units of the offset are 32 bit words." + * The "legacy" 32-bit notifier format is no longer supported. This will + * have to be exposed to upper layers. + */ + ASSERT_DRF_NUM(C37D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _SET_NOTIFIER_CONTROL, _OFFSET, notifier) | + (awaken ? + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE_AWAKEN) : + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _MODE, _WRITE)) | + (notify ? + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _NOTIFY, _ENABLE) : + DRF_DEF(C37D, _SET_NOTIFIER_CONTROL, _NOTIFY, _DISABLE))); +} + +static void UpdateCoreC3(NVEvoChannelPtr pChannel, + NVEvoChannelMask interlockChannelMask, + NvU32 flipLockPin, + NvBool releaseElv) +{ + NvU32 head, interlockFlags = 0; + NvU32 window, windowInterlockFlags = 0; + NvU32 update = DRF_NUM(C37D, _UPDATE, _FLIP_LOCK_PIN, flipLockPin); + + update |= releaseElv ? DRF_DEF(C37D, _UPDATE, _RELEASE_ELV, _TRUE) : 0; + + for (head = 0; head < NV_EVO_CHANNEL_MASK_CURSOR__SIZE; head++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _CURSOR, head, _ENABLE, + interlockChannelMask)) { + interlockFlags |= + DRF_IDX_DEF(C37D, _SET_INTERLOCK_FLAGS, + _INTERLOCK_WITH_CURSOR, head, _ENABLE); + } + } + + for (window = 0; window < NV_EVO_CHANNEL_MASK_WINDOW__SIZE; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + interlockChannelMask)) { + windowInterlockFlags |= + DRF_IDX_DEF(C37D, _SET_WINDOW_INTERLOCK_FLAGS, + _INTERLOCK_WITH_WINDOW, window, _ENABLE); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, interlockFlags); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_SET_WINDOW_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, windowInterlockFlags); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, update); + + nvDmaKickoffEvo(pChannel); +} + +static void UpdateWindowIMM(NVEvoChannelPtr pChannel, + NVEvoChannelMask winImmChannelMask, + NVEvoChannelMask winImmInterlockMask, + NvBool releaseElv) +{ + nvAssert((winImmChannelMask & ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) == 0); + nvAssert((winImmInterlockMask & ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) == 0); + + if ((winImmChannelMask & pChannel->channelMask) != 0) { + NvU32 updateImm = 0; + + if ((winImmInterlockMask & pChannel->channelMask) != 0) { + updateImm |= DRF_DEF(C37B, _UPDATE, _INTERLOCK_WITH_WINDOW, _ENABLE); + } else { + updateImm |= DRF_DEF(C37B, _UPDATE, _INTERLOCK_WITH_WINDOW, _DISABLE); + } + updateImm |= releaseElv ? DRF_DEF(C37B, _UPDATE, _RELEASE_ELV, _TRUE) : 0; + + nvDmaSetStartEvoMethod(pChannel->imm.u.dma, NVC37B_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel->imm.u.dma, updateImm); + nvDmaKickoffEvo(pChannel->imm.u.dma); + } +} + +static void UpdateWindowC3(NVEvoChannelPtr pChannel, + NVEvoChannelMask interlockChannelMask, + NVEvoChannelMask winImmChannelMask, + NVEvoChannelMask winImmInterlockMask, + NvBool transitionWAR, + NvU32 flipLockPin, + NvBool releaseElv) +{ + NvU32 head, interlockFlags = 0; + NvU32 window, windowInterlockFlags = 0; + NvU32 update = DRF_NUM(C37E, _UPDATE, _FLIP_LOCK_PIN, flipLockPin); + + update |= releaseElv ? DRF_DEF(C37E, _UPDATE, _RELEASE_ELV, _TRUE) : 0; + + if ((winImmInterlockMask & pChannel->channelMask) != 0) { + /* + * We expect winImmChannelMask to always be a superset of + * winImmInterlockMask. We should never interlock with a window + * immediate channel if we're not also going to kick off that + * window immediate channel. + */ + nvAssert((winImmChannelMask & pChannel->channelMask) != 0); + + update |= DRF_DEF(C37E, _UPDATE, _INTERLOCK_WITH_WIN_IMM, _ENABLE); + } else { + update |= DRF_DEF(C37E, _UPDATE, _INTERLOCK_WITH_WIN_IMM, _DISABLE); + } + + // Nothing currently requires updating a window channel without releasing + // ELV. + nvAssert(releaseElv); + + if (FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + interlockChannelMask)) { + interlockFlags |= + DRF_DEF(C37E, _SET_INTERLOCK_FLAGS, _INTERLOCK_WITH_CORE, _ENABLE); + } + + for (head = 0; head < NV_EVO_CHANNEL_MASK_CURSOR__SIZE; head++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _CURSOR, head, _ENABLE, + interlockChannelMask)) { + interlockFlags |= + DRF_IDX_DEF(C37E, _SET_INTERLOCK_FLAGS, + _INTERLOCK_WITH_CURSOR, head, _ENABLE); + } + } + + for (window = 0; window < NV_EVO_CHANNEL_MASK_WINDOW__SIZE; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + interlockChannelMask)) { + windowInterlockFlags |= + DRF_IDX_DEF(C37E, _SET_WINDOW_INTERLOCK_FLAGS, + _INTERLOCK_WITH_WINDOW, window, _ENABLE); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, interlockFlags); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_WINDOW_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, windowInterlockFlags); + + /* + * If we determined that this update will transition from NULL to non-NULL + * ctxdma or vice-versa, bookend this update method with software methods + * to notify RM to apply a workaround for hardware bug 2193096. + */ + if (transitionWAR) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SW_SET_MCLK_SWITCH, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SW_SET_MCLK_SWITCH, _ENABLE, _FALSE)); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, update); + + if (transitionWAR) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SW_SET_MCLK_SWITCH, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SW_SET_MCLK_SWITCH, _ENABLE, _TRUE)); + } + + UpdateWindowIMM(pChannel, winImmChannelMask, + winImmInterlockMask, releaseElv); + + nvDmaKickoffEvo(pChannel); +} + +/*! + * This function finds any fliplocked channels in the current update and pushes + * flips for them setting the appropriate fliplock pin and interlock masks. + * + * All of this complexity is here to support the case where multiple heads on a + * single GPU are fliplocked together, but flip requests come in for only a + * subset of those heads at a time (e.g., separate X screens on a single GPU). + * Unlike previous hardware, we're required to interlock all channels which are + * part of a fliplock update, instead of just using fliplock across heads. + */ +/* + * There are two scenarios: + * a) All fliplocked channels on this GPU are already part of this update. In + * that case we just need to set the appropriate fliplock pin for each, and + * we're done -- they're already interlocked. + * b) Some fliplocked channels are not part of this update. We still need to + * set them in the interlock mask, but it's dangerous to interlock with any + * channels *not* in the fliplock group; as an example: + * With two separate X screens on a single GPU, each driving one monitor, + * fliplocked together, if we get a flip request for screen 0/head 0 that + * interlocks core and base, then a second flip request for screen 1/head1 + * that interlocks core and base, we would end up programming one flip on + * the window on head 0, one flip on the window on head 1, and two flips in + * the core channel. The second core channel flip would never complete + * since it would be waiting for an interlock with the other window + * channels. + * + * To handle this case we pull the fliplocked channels out of this update + * and update them interlocked with all fliplocked channels (including those + * that aren't in this update), then proceed with a normal interlocked + * update excluding the fliplocked channels. + * + * \return Channel mask of channels which were handled by this function. + * Channels in this mask should be considered done and have no + * further updates pushed. No other channels should be + * interlocked with them. + */ +static NVEvoChannelMask ProcessFlipLockUpdates( + NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 *pFlipLockPin, + const NVEvoUpdateState *updateState) +{ + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 head, window; + /* Channels that are part of this update which need to be fliplocked. */ + NVEvoChannelMask flipLockUpdateMask = 0; + /* All channels on this subdevice which are fliplocked. */ + NVEvoChannelMask flipLockAllMask = 0; + /* Channels which this function has handled and do not need further + * processing. */ + NVEvoChannelMask handledMask = 0; + NVEvoLockPin pin = NV_EVO_LOCK_PIN_ERROR; + NvU32 hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE; + + /* First check if any of the fliplock-qualifying channels are actually + * fliplocked, and determine which pin they're using. */ + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock) { + /* Convert the head index to a window index (two windows per head, + * one "base" and one "overlay"; we only fliplock "base") */ + NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, head * 2, _ENABLE); + if (updateState->subdev[sd].flipLockQualifyingMask & windowMask) { + if (flipLockUpdateMask == 0) { + pin = pHC->flipLockPin; + } else { + /* For now, we only support kicking off a single fliplock + * group as part of a single update call. */ + nvAssert(pin == pHC->flipLockPin); + } + flipLockUpdateMask |= windowMask; + } + } + } + + /* If we don't have any fliplocked updates, then we're done. */ + if (flipLockUpdateMask == 0) { + goto done; + } + + /* + * Gather all of the channels on this GPU which are part of this fliplock + * group (some of which may not be part of this update). + */ + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControl[head]; + + if (pHC->flipLock && (pHC->flipLockPin == pin)) { + NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, head * 2, _ENABLE); + flipLockAllMask |= windowMask; + } + } + + /* Convert the pin to a hardware enum. */ + if (NV_EVO_LOCK_PIN_IS_INTERNAL(pin)) { + hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_INTERNAL_FLIP_LOCK_0 + + (pin - NV_EVO_LOCK_PIN_INTERNAL_0); + } else { + hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN(pin - NV_EVO_LOCK_PIN_0); + } + + /* If we're updating all of the fliplocked channels in this update, we can + * interlock with other channels as normal. */ + if (flipLockUpdateMask == flipLockAllMask) { + goto done; + } + + /* + * Kick off each of our update channels, using the full fliplock mask and + * hwPin calculated above. + */ + nvAssert((flipLockUpdateMask & ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) == 0); + for (window = 0; window < pDevEvo->numWindows; window++) { + const NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE); + NVEvoChannelMask winImmChannelMask = + updateState->subdev[sd].winImmChannelMask; + NVEvoChannelMask winImmInterlockMask = + updateState->subdev[sd].winImmInterlockMask; + if (flipLockUpdateMask & windowMask) { + const NvBool transitionWAR = + (updateState->subdev[sd].flipTransitionWAR & windowMask) != 0; + UpdateWindowC3(pDevEvo->window[window], + flipLockAllMask, + winImmChannelMask, + winImmInterlockMask, + transitionWAR, + hwPin, TRUE /* releaseElv */); + } else { + UpdateWindowIMM(pDevEvo->window[window], winImmChannelMask, + winImmInterlockMask, TRUE /* releaseElv */); + } + } + handledMask = flipLockUpdateMask; + hwPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE; + +done: + *pFlipLockPin = hwPin; + return handledMask; +} + +static void EvoUpdateC3(NVDevEvoPtr pDevEvo, + const NVEvoUpdateState *updateState, + NvBool releaseElv) +{ + NvU32 sd, window; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoChannelMask updateChannelMask = + updateState->subdev[sd].channelMask; + const NVEvoChannelMask noCoreInterlockMask = + updateState->subdev[sd].noCoreInterlockMask; + NVEvoChannelMask coreInterlockMask = + updateChannelMask & ~noCoreInterlockMask; + const NvU32 subDeviceMask = (1 << sd); + NvU32 flipLockPin = NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE; + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + + if (updateState->subdev[sd].flipLockQualifyingMask) { + NVEvoChannelMask handledChannels = 0; + + nvAssert((updateState->subdev[sd].flipLockQualifyingMask & + ~updateChannelMask) == 0); + nvAssert((updateState->subdev[sd].flipLockQualifyingMask & + updateState->subdev[sd].noCoreInterlockMask) == 0); + + handledChannels = + ProcessFlipLockUpdates(pDevEvo, sd, &flipLockPin, updateState); + + updateChannelMask &= ~handledChannels; + coreInterlockMask &= ~handledChannels; + } + + if (FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + updateChannelMask)) { + const NVEvoChannelMask thisInterlockMask = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + coreInterlockMask) ? coreInterlockMask : 0; + UpdateCoreC3(pDevEvo->core, thisInterlockMask, flipLockPin, + releaseElv); + } + + for (window = 0; window < pDevEvo->numWindows; window++) { + const NVEvoChannelMask windowMask = + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE); + NVEvoChannelMask winImmChannelMask = + updateState->subdev[sd].winImmChannelMask; + NVEvoChannelMask winImmInterlockMask = + updateState->subdev[sd].winImmInterlockMask; + if (updateChannelMask & windowMask) { + const NvBool transitionWAR = + (updateState->subdev[sd].flipTransitionWAR & windowMask) != 0; + NVEvoChannelMask thisInterlockMask = + FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + coreInterlockMask) ? coreInterlockMask : 0; + UpdateWindowC3(pDevEvo->window[window], + thisInterlockMask, + winImmChannelMask, + winImmInterlockMask, + transitionWAR, + flipLockPin, + releaseElv); + } else { + UpdateWindowIMM(pDevEvo->window[window], winImmChannelMask, + winImmInterlockMask, releaseElv); + } + } + + nvPopEvoSubDevMask(pDevEvo); + } +} + +/*! + * Initialize head-specific IMP param fields. + * + * Initialize the NVC372_CTRL_IMP_HEAD for the specific head. + * + * \param[out] pImpHead The param structure to initialize. + * \param[in] pTimings The rastering timings and viewport configuration. + * \param[in] head The number of the head that will be driven. + * + * \return FALSE iff the parameters aren't even legal for HW. + */ +static NvBool AssignPerHeadImpParams(NVC372_CTRL_IMP_HEAD *pImpHead, + const NVHwModeTimingsEvo *pTimings, + const int head, + const NVEvoScalerCaps *pScalerCaps) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + struct NvKmsScalingUsageBounds scalingUsageBounds = { }; + + pImpHead->headIndex = head; + + /* raster timings */ + + pImpHead->maxPixelClkKHz = pTimings->pixelClock; + + pImpHead->rasterSize.width = pTimings->rasterSize.x; + pImpHead->rasterSize.height = pTimings->rasterSize.y; + pImpHead->rasterBlankStart.X = pTimings->rasterBlankStart.x; + pImpHead->rasterBlankStart.Y = pTimings->rasterBlankStart.y; + pImpHead->rasterBlankEnd.X = pTimings->rasterBlankEnd.x; + pImpHead->rasterBlankEnd.Y = pTimings->rasterBlankEnd.y; + pImpHead->rasterVertBlank2.yStart = pTimings->rasterVertBlank2Start; + pImpHead->rasterVertBlank2.yEnd = pTimings->rasterVertBlank2End; + + /* XXX TODO: Fill in correct scanlock information (only needed for + * MIN_VPSTATE). */ + pImpHead->control.masterLockMode = NV_DISP_LOCK_MODE_NO_LOCK; + pImpHead->control.masterLockPin = NV_DISP_LOCK_PIN_UNSPECIFIED; + pImpHead->control.slaveLockMode = NV_DISP_LOCK_MODE_NO_LOCK; + pImpHead->control.slaveLockPin = NV_DISP_LOCK_PIN_UNSPECIFIED; + + if (!nvComputeScalingUsageBounds(pScalerCaps, + pViewPort->in.width, pViewPort->in.height, + pViewPort->out.width, pViewPort->out.height, + pViewPort->hTaps, pViewPort->vTaps, + &scalingUsageBounds)) { + return FALSE; + } + pImpHead->bUpscalingAllowedV = scalingUsageBounds.vUpscalingAllowed; + pImpHead->maxDownscaleFactorV = scalingUsageBounds.maxVDownscaleFactor; + pImpHead->maxDownscaleFactorH = scalingUsageBounds.maxHDownscaleFactor; + pImpHead->outputScalerVerticalTaps = + NVEvoScalerTapsToNum(scalingUsageBounds.vTaps); + + if (!ComputeMinFrameIdle(pTimings, + &pImpHead->minFrameIdle.leadingRasterLines, + &pImpHead->minFrameIdle.trailingRasterLines)) { + return FALSE; + } + + /* Assume we'll need the full 1025-entry output LUT. */ + pImpHead->lut = NVC372_CTRL_IMP_LUT_USAGE_1025; + + /* Cursor width, in units of 32 pixels. Assume we use the maximum size. */ + pImpHead->cursorSize32p = 256 / 32; + + pImpHead->bEnableDsc = pTimings->hdmiFrlConfig.dscInfo.bEnableDSC || + pTimings->dpDsc.enable; + + return TRUE; +} + +/*! + * Initialize window-specific IMP param fields. + * + * Initialize the NVC372_CTRL_IMP_WINDOW for the specific window. + * + * \param[out] pImpWindow The param structure to initialize. + * \param[in] pViewPort The viewport configuration for the head that + * the window is bound to. + * \param[in] supportedFormats The surface memory formats that can be + * supported on this window. + * \param[in] window The number of the window. + * \param[in] head The number of the head that the window is + * bound to. + */ +static void AssignPerWindowImpParams(NVC372_CTRL_IMP_WINDOW *pImpWindow, + const NVHwModeViewPortEvo *pViewPort, + const NvU64 supportedFormats, + const struct NvKmsScalingUsageBounds *pScaling, + const int window, + const int head) +{ + pImpWindow->windowIndex = window; + pImpWindow->owningHead = head; + + pImpWindow->formatUsageBound = 0; + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED1BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_1_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED2BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_2_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED4BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_4_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_RGB_PACKED8BPP) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_RGB_PACKED_8_BPP; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PACKED422) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_PACKED_422; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP420) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_420; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP422) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_422; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_SP444) { + pImpWindow->formatUsageBound |= NVC372_CTRL_FORMAT_YUV_SEMI_PLANAR_444; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP420) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_420; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP422) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_422; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_EXT_YUV_SP444) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_EXT_YUV_SEMI_PLANAR_444; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR444) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_YUV_PLANAR_444; + } + if (supportedFormats & NVKMS_SURFACE_MEMORY_FORMATS_YUV_PLANAR420) { + pImpWindow->formatUsageBound |= + NVC372_CTRL_FORMAT_YUV_PLANAR_420; + } + + if (pImpWindow->formatUsageBound == 0) { + nvAssert(!"Unknown format in AssignPerWindowImpParams"); + } + + pImpWindow->maxPixelsFetchedPerLine = + GetMaxPixelsFetchedPerLine(pViewPort->in.width, + pScaling->maxHDownscaleFactor); + + pImpWindow->maxDownscaleFactorH = pScaling->maxHDownscaleFactor; + pImpWindow->maxDownscaleFactorV = pScaling->maxVDownscaleFactor; + pImpWindow->bUpscalingAllowedV = pScaling->vUpscalingAllowed; + pImpWindow->inputScalerVerticalTaps = + NVEvoScalerTapsToNum(pScaling->vTaps); + + /* Assume we need a full 1025-entry window (input) LUT and no tone-mapping + * output (TMO) LUT. */ + pImpWindow->lut = NVC372_CTRL_IMP_LUT_USAGE_1025; + pImpWindow->tmoLut = NVC372_CTRL_IMP_LUT_USAGE_NONE; +} + +static void +EvoIsModePossibleC3(NVDispEvoPtr pDispEvo, + const NVEvoIsModePossibleDispInput *pInput, + NVEvoIsModePossibleDispOutput *pOutput) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NVEvoCapabilitiesPtr pEvoCaps = &pDevEvo->gpus[0].capabilities; + NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pImp = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_IMP_PARAMS, sizeof(*pImp)); + NvBool result = FALSE; + NvU32 head; + NvU32 ret; + + nvkms_memset(pImp, 0, sizeof(*pImp)); + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVHwModeTimingsEvo *pTimings = pInput->head[head].pTimings; + const struct NvKmsUsageBounds *pUsage = pInput->head[head].pUsage; + const NVHwModeViewPortEvo *pViewPort; + NvU8 impHeadIndex; + NvU32 layer; + + if (pTimings == NULL) { + continue; + } + + pViewPort = &pTimings->viewPort; + + impHeadIndex = pImp->numHeads; + pImp->numHeads++; + nvAssert(impHeadIndex < NVC372_CTRL_MAX_POSSIBLE_HEADS); + + if (!AssignPerHeadImpParams(&pImp->head[impHeadIndex], + pTimings, + head, + &pEvoCaps->head[head].scalerCaps)) { + goto done; + } + + /* XXXnvdisplay: This assumes a fixed window<->head mapping */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!pUsage->layer[layer].usable) { + continue; + } + + nvAssert(pImp->numWindows < NVC372_CTRL_MAX_POSSIBLE_WINDOWS); + + AssignPerWindowImpParams( + &pImp->window[pImp->numWindows], + pViewPort, + pUsage->layer[layer].supportedSurfaceMemoryFormats, + &pUsage->layer[layer].scaling, + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask), + head); + + pImp->numWindows++; + } + } + + pImp->base.subdeviceIndex = pDispEvo->displayOwner; + + /* XXXnvdisplay: Set bUseCachedPerfState? */ + + /* + * Set NEED_MIN_VPSTATE if reallocBandwidth != NONE. RM-IMP will only + * output the min required display bandwidth values if NEED_MIN_VPSTATE + * is set. + */ + if (pInput->requireBootClocks || + (pInput->reallocBandwidth != NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE)) { + // XXX TODO: IMP requires lock pin information if pstate information is + // requested. For now, just assume no locking. + pImp->options = NVC372_CTRL_IS_MODE_POSSIBLE_OPTIONS_NEED_MIN_VPSTATE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->rmCtrlHandle, + NVC372_CTRL_CMD_IS_MODE_POSSIBLE, + pImp, sizeof(*pImp)); + + // XXXnvdisplay TODO: check pImp->minImpVPState if + // pInput->requireBootClocks is true? + if (ret != NV_OK || !pImp->bIsPossible) { + goto done; + } + + result = TRUE; + +done: + pOutput->possible = result; + if (pOutput->possible) { + pOutput->minRequiredBandwidthKBPS = pImp->minRequiredBandwidthKBPS; + pOutput->floorBandwidthKBPS = pImp->floorBandwidthKBPS; + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_IMP_PARAMS); +} + +static void EvoPrePostIMPC3(NVDispEvoPtr pDispEvo, NvBool isPre) +{ + /* Nothing to do on nvdisplay -- pre/post IMP calls are not required. */ +} + +static void +EvoFlipC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition); + +static NvBool IsCscMatrixIdentity(const struct NvKmsCscMatrix *matrix) +{ + const struct NvKmsCscMatrix identity = NVKMS_IDENTITY_CSC_MATRIX; + + int y; + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + if (matrix->m[y][x] != identity.m[y][x]) { + return FALSE; + } + } + } + + return TRUE; +} + +/* + * Returns TRUE iff the CSC should be enabled (i.e., the matrix is not the + * identity matrix). + */ +static NvBool SetCscMatrixC3(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + NvU32 method = NVC37E_SET_CSC_RED2RED; + int y; + + if (IsCscMatrixIdentity(matrix)) { + return FALSE; + } + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + // Use DRF_NUM to truncate client-supplied values that are out of + // range. + NvU32 val = DRF_NUM(C37E, _SET_CSC_RED2RED, _COEFF, + matrix->m[y][x]); + + nvDmaSetStartEvoMethod(pChannel, method, 1); + nvDmaSetEvoMethodData(pChannel, val); + + method += 4; + } + } + + return TRUE; +} + +static void SetCscMatrixC5Wrapper(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix, + NvU32 coeffMethod, NvU32 controlMethod, + NvU32 enableMethodData, + NvU32 disableMethodData) +{ + int y; + + if (IsCscMatrixIdentity(matrix)) { + nvDmaSetStartEvoMethod(pChannel, controlMethod, 1); + nvDmaSetEvoMethodData(pChannel, disableMethodData); + return; + } + + nvDmaSetStartEvoMethod(pChannel, controlMethod, 1); + nvDmaSetEvoMethodData(pChannel, enableMethodData); + + for (y = 0; y < 3; y++) { + int x; + + for (x = 0; x < 4; x++) { + // Use DRF_NUM to truncate client-supplied values that are out of + // range. + // + // Note that it doesn't matter whether we use the CSC00 or CSC11 + // methods to truncate since they're identical. + NvU32 val = DRF_NUM(C57E, _SET_CSC00COEFFICIENT_C00, _VALUE, + matrix->m[y][x]); + + nvDmaSetStartEvoMethod(pChannel, coeffMethod, 1); + nvDmaSetEvoMethodData(pChannel, val); + + coeffMethod += 4; + } + } +} + +static void SetCsc00MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + SetCscMatrixC5Wrapper(pChannel, + matrix, + NVC57E_SET_CSC00COEFFICIENT_C00, NVC57E_SET_CSC00CONTROL, + DRF_DEF(C57E, _SET_CSC00CONTROL, _ENABLE, _ENABLE), + DRF_DEF(C57E, _SET_CSC00CONTROL, _ENABLE, _DISABLE)); +} + +static void SetCsc11MatrixC5(NVEvoChannelPtr pChannel, + const struct NvKmsCscMatrix *matrix) +{ + SetCscMatrixC5Wrapper(pChannel, + matrix, + NVC57E_SET_CSC11COEFFICIENT_C00, NVC57E_SET_CSC11CONTROL, + DRF_DEF(C57E, _SET_CSC11CONTROL, _ENABLE, _ENABLE), + DRF_DEF(C57E, _SET_CSC11CONTROL, _ENABLE, _DISABLE)); +} + +/* + * WAR for GV100 HW bug 1978592: + * + * Timestamped flips allow SW to specify the earliest time that the next UPDATE + * will complete. Due to a HW bug, GV100 waits for the timestamp in the ARMED + * state (i.e. the timestamps that were pushed in the previous UPDATE) instead + * of the timestamp in the ASSEMBLY state (the time we want to postpone this + * flip until). + * + * This WAR inserts an additional UPDATE to push the timestamp from ASSEMBLY to + * ARMED while changing no other state, so the following normal UPDATE can + * wait for the correct timestamp. + * + * This update needs to have the following characteristics: + * + * - MIN_PRESENT_INTERVAL 0 + * - TIMESTAMP_MODE _ENABLE + * - All other SET_PRESENT_CONTROL fields unmodified from previous UPDATE + * - SET_UPDATE_TIMESTAMP (target timestamp) + * - RELEASE_ELV _FALSE + * - Non-interlocked + * - Non-fliplocked + */ +static void +InsertAdditionalTimestampFlip(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState) +{ + NvU32 presentControl = pChannel->oldPresentControl; + + /* This hardware bug is only present on GV100 which uses window + * class C37E. */ + nvAssert(pChannel->hwclass == NVC37E_WINDOW_CHANNEL_DMA); + + nvAssert(pHwState->timeStamp != 0); + + /* + * Update the necessary fields in SET_PRESENT_CONTROL without modifying + * the existing values by using the cached SET_PRESENT_CONTROL values + * from the previous update. + * + * Note that BEGIN_MODE must not be changed here; even though BEGIN_MODE + * may currently be NON_TEARING, a NON_TEARING + MIN_PRESENT_INTERVAL 0 + * flip will be correctly collapsed with the surrounding + * MIN_PRESENT_INTERVAL 1 flips. If we were to change BEGIN_MODE to + * IMMEDIATE, this would cause an additional delay due to the transition + * from NON_TEARING to IMMEDIATE. + */ + presentControl = FLD_SET_DRF_NUM(C37E, _SET_PRESENT_CONTROL, + _MIN_PRESENT_INTERVAL, + 0, presentControl); + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, + _TIMESTAMP_MODE, + _ENABLE, presentControl); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, presentControl); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + // Issue non-interlocked, non-fliplocked, non-ReleaseElv UPDATE + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_INTERLOCK_FLAGS, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, + NVC37E_SET_WINDOW_INTERLOCK_FLAGS, + 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_UPDATE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37E, _UPDATE, _RELEASE_ELV, _FALSE) | + DRF_NUM(C37E, _UPDATE, _FLIP_LOCK_PIN, + NVC37E_UPDATE_FLIP_LOCK_PIN_LOCK_PIN_NONE) | + DRF_DEF(C37E, _UPDATE, _INTERLOCK_WITH_WIN_IMM, + _DISABLE)); +} + +static void +EvoProgramSemaphore3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState) +{ + nvAssertSameSemaphoreSurface(pHwState); + + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo == NULL) { + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->syncObject.u.semaphores.acquireSurface; + + nvAssert(pNIso->format == NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY); + /* XXX nvdisplay: enforce this at a higher level */ + nvAssert((pNIso->offsetInWords % 4) == 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_ACQUIRE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.acquireValue); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + pHwState->syncObject.u.semaphores.releaseValue); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37E, _SET_SEMAPHORE_CONTROL, _OFFSET, + pNIso->offsetInWords / 4)); + } +} + +/*! + * On Tegra, syncpts are used for synchronization between SW and HW, + * and also across HW engines. Since NvDisplay 4.0 only natively + * understands semaphores, there's a SHIM layer in the memory subsystem + * that will convert semaphore acquires/releases into corresponding + * syncpoint reads/writes. As such, each syncpoint is mapped to an + * underlying 'dummy' semaphore surface, and the methods for these surfaces + * need to be programmed as if they were real memory-backed semaphores. + */ + +static void +EvoProgramSemaphore6(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState) +{ + NvU32 hCtxDma, offset, acqMode, relMode, value; + const NVFlipNIsoSurfaceEvoHwState *pNIso; + + /*! Program Acq-only semaphore */ + hCtxDma = offset = acqMode = relMode = value = 0; + if (pHwState->syncObject.usingSyncpt) { + hCtxDma = pHwState->syncObject.u.syncpts.preCtxDma; + offset = 0; + acqMode = DRF_DEF(C67E, _SET_ACQ_SEMAPHORE_CONTROL, _ACQ_MODE, _CGEQ); + value = pHwState->syncObject.u.syncpts.preValue; + } else { + if (pHwState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo != NULL) { + pNIso = &pHwState->syncObject.u.semaphores.acquireSurface; + hCtxDma = pNIso->pSurfaceEvo->planes[0].ctxDma; + offset = pNIso->offsetInWords / 4; + acqMode = DRF_DEF(C67E, _SET_ACQ_SEMAPHORE_CONTROL, _ACQ_MODE, _EQ); + value = pHwState->syncObject.u.semaphores.acquireValue; + } + } + + /*! set ctx dma handle */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_CONTEXT_DMA_ACQ_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_CONTEXT_DMA_ACQ, _SEMAPHORE_HANDLE, hCtxDma)); + /*! set semaphore control and acq mode */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_ACQ_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, offset | acqMode); + /*! set semaphore value */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_ACQ_SEMAPHORE_VALUE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_ACQ_SEMAPHORE_VALUE, _VALUE, value)); + + /*! Program Rel-only semaphore */ + hCtxDma = offset = acqMode = relMode = value = 0; + if (pHwState->syncObject.usingSyncpt) { + hCtxDma = pHwState->syncObject.u.syncpts.postCtxDma; + offset = 0; + acqMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _SKIP_ACQ, _TRUE); + relMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _REL_MODE, _WRITE); + value = pHwState->syncObject.u.syncpts.postValue; + /*! increase value in host1x hardware as well */ + if (hCtxDma != 0) { + NvKmsSyncPtOpParams params = { }; + params.incr_max.id = pChannel->postSyncpt.id; + params.incr_max.incr = 1; + nvkms_syncpt_op(NVKMS_SYNCPT_OP_INCR_MAX, ¶ms); + } + } else { + if (pHwState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo != NULL) { + pNIso = &pHwState->syncObject.u.semaphores.releaseSurface; + hCtxDma = pNIso->pSurfaceEvo->planes[0].ctxDma; + offset = pNIso->offsetInWords / 4; + acqMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _SKIP_ACQ, _TRUE); + relMode = DRF_DEF(C67E, _SET_SEMAPHORE_CONTROL, _REL_MODE, _WRITE); + value = pHwState->syncObject.u.semaphores.releaseValue; + } + } + + /*! set ctx dma handle */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_CONTEXT_DMA_SEMAPHORE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_CONTEXT_DMA_SEMAPHORE, _HANDLE, hCtxDma)); + /*! set semaphore control and acq-rel mode */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_SEMAPHORE_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, offset | acqMode | relMode); + /*! set semaphore value */ + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_SEMAPHORE_RELEASE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67E, _SET_SEMAPHORE_RELEASE, _VALUE, value)); +} + +static NvBool +EvoFlipC3Common(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo; + NvU32 presentControl, eye; + NvU32 storage; + NvU8 planeIndex; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* program notifier */ + + if (pHwState->completionNotifier.surface.pSurfaceEvo == NULL) { + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, 0); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, 0); + + } else { + const NVFlipNIsoSurfaceEvoHwState *pNIso = + &pHwState->completionNotifier.surface; + NvU32 value = 0; + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_NOTIFIER, 1); + nvDmaSetEvoMethodData(pChannel, pNIso->pSurfaceEvo->planes[0].ctxDma); + + nvAssert(pNIso->format == NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY); + /* XXX nvdisplay: enforce this at a higher level */ + nvAssert((pNIso->offsetInWords % 4) == 0); + + value = FLD_SET_DRF_NUM(C37E, _SET_NOTIFIER_CONTROL, _OFFSET, + pNIso->offsetInWords / 4, value); + + if (pHwState->completionNotifier.awaken) { + value = FLD_SET_DRF(C37E, _SET_NOTIFIER_CONTROL, _MODE, + _WRITE_AWAKEN, value); + } else { + value = FLD_SET_DRF(C37E, _SET_NOTIFIER_CONTROL, _MODE, + _WRITE, value); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_NOTIFIER_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, value); + } + + if (!pHwState->pSurfaceEvo[NVKMS_LEFT]) { + // Disable this window, and set all its ctxdma entries to NULL. + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + for (planeIndex = 0; + planeIndex < NVKMS_MAX_PLANES_PER_SURFACE; + planeIndex++) { + const NvU8 ctxDmaIdx = EyeAndPlaneToCtxDmaIdx(eye, planeIndex); + nvDmaSetStartEvoMethod(pChannel, + NVC37E_SET_CONTEXT_DMA_ISO(ctxDmaIdx), + 1); + nvDmaSetEvoMethodData(pChannel, 0); + } + } + + return FALSE; + } + + presentControl = DRF_NUM(C37E, _SET_PRESENT_CONTROL, _MIN_PRESENT_INTERVAL, + pHwState->minPresentInterval); + + if (pHwState->timeStamp != 0) { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _TIMESTAMP_MODE, + _ENABLE, presentControl); + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _TIMESTAMP_MODE, + _DISABLE, presentControl); + } + + if (pHwState->tearing) { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _IMMEDIATE, presentControl); + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _BEGIN_MODE, + _NON_TEARING, presentControl); + } + + if (pHwState->pSurfaceEvo[NVKMS_RIGHT]) { + if (pHwState->perEyeStereoFlip) { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _STEREO_MODE, + _AT_ANY_FRAME, presentControl); + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _STEREO_MODE, + _PAIR_FLIP, presentControl); + } + } else { + presentControl = FLD_SET_DRF(C37E, _SET_PRESENT_CONTROL, _STEREO_MODE, + _MONO, presentControl); + } + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PRESENT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, presentControl); + + /* + * GV100 timestamped flips need a duplicate update which only changes + * TIMESTAMP_MODE and MIN_PRESENT_INTERVAL fields in SET_PRESENT_CONTROL; + * to allow updating these fields without changing anything else in + * SET_PRESENT_CONTROL, cache the values we sent in previous flips here. + * (bug 1990958) + */ + pChannel->oldPresentControl = presentControl; + + /* Set the surface parameters. */ + FOR_ALL_EYES(eye) { + const NVSurfaceEvoRec *pSurfaceEvoPerEye = pHwState->pSurfaceEvo[eye]; + NvU8 numSurfacePlanes = 0; + + if (pSurfaceEvoPerEye != NULL) { + pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvoPerEye->format); + numSurfacePlanes = pFormatInfo->numPlanes; + } + + for (planeIndex = 0; + planeIndex < NVKMS_MAX_PLANES_PER_SURFACE; + planeIndex++) { + NvU32 ctxdma = 0; + NvU64 offset = 0; + const NvU8 ctxDmaIdx = EyeAndPlaneToCtxDmaIdx(eye, planeIndex); + + if (planeIndex < numSurfacePlanes) { + ctxdma = pSurfaceEvoPerEye->planes[planeIndex].ctxDma; + offset = pSurfaceEvoPerEye->planes[planeIndex].offset; + } + + nvDmaSetStartEvoMethod(pChannel, + NVC37E_SET_CONTEXT_DMA_ISO(ctxDmaIdx), 1); + nvDmaSetEvoMethodData(pChannel, ctxdma); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_OFFSET(ctxDmaIdx), 1); + nvDmaSetEvoMethodData(pChannel, nvCtxDmaOffsetFromBytes(offset)); + } + } + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SIZE, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_SIZE, _WIDTH, pHwState->pSurfaceEvo[NVKMS_LEFT]->widthInPixels) | + DRF_NUM(C37E, _SET_SIZE, _HEIGHT, pHwState->pSurfaceEvo[NVKMS_LEFT]->heightInPixels)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SIZE_IN, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_SIZE_IN, _WIDTH, pHwState->sizeIn.width) | + DRF_NUM(C37E, _SET_SIZE_IN, _HEIGHT, pHwState->sizeIn.height)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_SIZE_OUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_SIZE_OUT, _WIDTH, pHwState->sizeOut.width) | + DRF_NUM(C37E, _SET_SIZE_OUT, _HEIGHT, pHwState->sizeOut.height)); + + /* XXX nvdisplay: enforce pitch/BL layout are consistent between eyes at a + * higher level */ + + storage = 0; + if (pHwState->pSurfaceEvo[NVKMS_LEFT]->layout == + NvKmsSurfaceMemoryLayoutBlockLinear) { + const NvU32 blockHeight = pHwState->pSurfaceEvo[NVKMS_LEFT]->log2GobsPerBlockY; + storage |= DRF_NUM(C37E, _SET_STORAGE, _BLOCK_HEIGHT, blockHeight); + if (pDevEvo->hal->caps.supportsSetStorageMemoryLayout) { + storage |= DRF_DEF(C37E, _SET_STORAGE, _MEMORY_LAYOUT, _BLOCKLINEAR); + } + } else if (pDevEvo->hal->caps.supportsSetStorageMemoryLayout) { + storage |= DRF_DEF(C37E, _SET_STORAGE, _MEMORY_LAYOUT, _PITCH); + } + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_STORAGE, 1); + nvDmaSetEvoMethodData(pChannel, storage); + + pFormatInfo = nvKmsGetSurfaceMemoryFormatInfo( + pHwState->pSurfaceEvo[NVKMS_LEFT]->format); + + for (planeIndex = 0; + planeIndex < NVKMS_MAX_PLANES_PER_SURFACE; + planeIndex++) { + NvU32 pitch; + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PLANAR_STORAGE(planeIndex), + 1); + + if (planeIndex >= pFormatInfo->numPlanes) { + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_PLANAR_STORAGE, _PITCH, 0)); + continue; + } + + /* + * Per nvdClass_01.mfs, the HEAD_SET_STORAGE_PITCH "units are blocks + * if the layout is BLOCKLINEAR, the units are multiples of 64 bytes + * if the layout is PITCH." + */ + pitch = pHwState->pSurfaceEvo[NVKMS_LEFT]->planes[planeIndex].pitch; + if (pHwState->pSurfaceEvo[NVKMS_LEFT]->layout == + NvKmsSurfaceMemoryLayoutBlockLinear) { + /* pitch is already in units of blocks; no conversion needed. */ + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_PLANAR_STORAGE, _PITCH, pitch)); + } else { + /* XXX nvdisplay: enforce this at a higher level */ + nvAssert((pitch & 63) == 0); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_PLANAR_STORAGE, _PITCH, pitch >> 6)); + } + } + + ASSERT_EYES_MATCH(pHwState->pSurfaceEvo, format); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_UPDATE_TIMESTAMP_LO, 2); + nvDmaSetEvoMethodData(pChannel, NvU64_LO32(pHwState->timeStamp)); + nvDmaSetEvoMethodData(pChannel, NvU64_HI32(pHwState->timeStamp)); + + return TRUE; +} + +/* + * This function returns TRUE if precomp needs to swap the U and V components to + * support the given input surface format. For all such formats, + * SetParams.SwapUV needs to be enabled. + * + * Due to the "feature" described in bug 1640117, there's a mismatch in the + * ihub<->precomp interface: + * - For all Yx___UxVx_N444 and Yx___UxVx_N422 formats, ihub will fetch and send + * the V sample as the first chroma byte, and the U sample as the second byte. + * However, precomp expects the U sample as the first byte, and the V sample + * as the second byte. + * - For all Yx___VxUx_N420 formats, ihub will fetch and send the U sample as + * the first chroma byte, and the V sample as the second byte. + * However, precomp expects the V sample as the first byte, and the U sample + * as the second byte. + * + * In the above explanation, note that ihub simply fetches and sends the chroma + * bytes in the same order that they're packed in memory. + */ +static NvBool IsSurfaceFormatUVSwapped( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + return TRUE; + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return FALSE; + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + case NvKmsSurfaceMemoryFormatR5G6B5: + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + return FALSE; + } + + return FALSE; +} + +/* + * Map the given NvKmsSurfaceMemoryFormat to its corresponding HW format for the + * C370 (Volta) NVDISPLAY class. + * + * Volta supports YUV422 packed, but this function excludes the corresponding + * mappings because the required programming support hasn't been added to NVKMS + * yet. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormatC3( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatI8: + return NVC37E_SET_PARAMS_FORMAT_I8; + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + return NVC37E_SET_PARAMS_FORMAT_A1R5G5B5; + case NvKmsSurfaceMemoryFormatR5G6B5: + return NVC37E_SET_PARAMS_FORMAT_R5G6B5; + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + return NVC37E_SET_PARAMS_FORMAT_A8R8G8B8; + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + return NVC37E_SET_PARAMS_FORMAT_A8B8G8R8; + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + return NVC37E_SET_PARAMS_FORMAT_A2B10G10R10; + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + return NVC37E_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16; + case NvKmsSurfaceMemoryFormatR16G16B16A16: + return NVC37E_SET_PARAMS_FORMAT_R16_G16_B16_A16; + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return 0; + } + + return 0; +} + +/* + * Map the given NvKmsSurfaceMemoryFormat to its corresponding HW format for the + * C570 (Turing) NVDISPLAY class. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormatC5( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + return NVC57E_SET_PARAMS_FORMAT_Y8_U8__Y8_V8_N422; + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + return NVC57E_SET_PARAMS_FORMAT_U8_Y8__V8_Y8_N422; + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + return NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N444; + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + return NVC57E_SET_PARAMS_FORMAT_Y8___U8V8_N422; + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + return NVC57E_SET_PARAMS_FORMAT_Y8___V8U8_N420; + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + return NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N444; + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + return NVC57E_SET_PARAMS_FORMAT_Y10___U10V10_N422; + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + return NVC57E_SET_PARAMS_FORMAT_Y10___V10U10_N420; + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + return NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N444; + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + return NVC57E_SET_PARAMS_FORMAT_Y12___U12V12_N422; + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + return NVC57E_SET_PARAMS_FORMAT_Y12___V12U12_N420; + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + case NvKmsSurfaceMemoryFormatR5G6B5: + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + return nvHwFormatFromKmsFormatC3(format); + } + + return 0; +} + +/* + * Map the given NvKmsSurfaceMemoryFormat to its corresponding HW format for the + * C670 (Orin and Ampere) NVDISPLAY class. + * + * Return 0 in the case of an unrecognized NvKmsSurfaceMemoryFormat. + */ +static NvU32 nvHwFormatFromKmsFormatC6( + const enum NvKmsSurfaceMemoryFormat format) +{ + switch (format) { + case NvKmsSurfaceMemoryFormatY8___U8___V8_N444: + return NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N444; + case NvKmsSurfaceMemoryFormatY8___U8___V8_N420: + return NVC67E_SET_PARAMS_FORMAT_Y8___U8___V8_N420; + case NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422: + case NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N444: + case NvKmsSurfaceMemoryFormatY8___V8U8_N444: + case NvKmsSurfaceMemoryFormatY8___U8V8_N422: + case NvKmsSurfaceMemoryFormatY8___V8U8_N422: + case NvKmsSurfaceMemoryFormatY8___U8V8_N420: + case NvKmsSurfaceMemoryFormatY8___V8U8_N420: + case NvKmsSurfaceMemoryFormatY10___U10V10_N444: + case NvKmsSurfaceMemoryFormatY10___V10U10_N444: + case NvKmsSurfaceMemoryFormatY10___U10V10_N422: + case NvKmsSurfaceMemoryFormatY10___V10U10_N422: + case NvKmsSurfaceMemoryFormatY10___U10V10_N420: + case NvKmsSurfaceMemoryFormatY10___V10U10_N420: + case NvKmsSurfaceMemoryFormatY12___U12V12_N444: + case NvKmsSurfaceMemoryFormatY12___V12U12_N444: + case NvKmsSurfaceMemoryFormatY12___U12V12_N422: + case NvKmsSurfaceMemoryFormatY12___V12U12_N422: + case NvKmsSurfaceMemoryFormatY12___U12V12_N420: + case NvKmsSurfaceMemoryFormatY12___V12U12_N420: + case NvKmsSurfaceMemoryFormatI8: + case NvKmsSurfaceMemoryFormatA1R5G5B5: + case NvKmsSurfaceMemoryFormatX1R5G5B5: + case NvKmsSurfaceMemoryFormatR5G6B5: + case NvKmsSurfaceMemoryFormatA8R8G8B8: + case NvKmsSurfaceMemoryFormatX8R8G8B8: + case NvKmsSurfaceMemoryFormatA8B8G8R8: + case NvKmsSurfaceMemoryFormatX8B8G8R8: + case NvKmsSurfaceMemoryFormatA2B10G10R10: + case NvKmsSurfaceMemoryFormatX2B10G10R10: + case NvKmsSurfaceMemoryFormatRF16GF16BF16AF16: + case NvKmsSurfaceMemoryFormatR16G16B16A16: + case NvKmsSurfaceMemoryFormatRF32GF32BF32AF32: + return nvHwFormatFromKmsFormatC5(format); + } + + return 0; +} + +static +NVLutSurfaceEvoPtr EvoGetLutSurface3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState) +{ + NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + NvU32 head = pDevEvo->headForWindow[win]; + NvBool found = FALSE; + NvU32 dispIndex = 0; + NvU32 sd; + + if ((pHwState->pSurfaceEvo[NVKMS_LEFT] == NULL) || + (head == NV_INVALID_HEAD)) { + return NULL; + } + + /* Input Lut is explicitly enabled by client */ + if (pHwState->inputLut.pLutSurfaceEvo != NULL) { + return pHwState->inputLut.pLutSurfaceEvo; + } + + /* + * For everything but I8 surfaces, we can just use the specified + * LUT, even if it's NULL. + * For I8 surfaces, we can only use the specified surface if it's + * non-NULL (an input LUT is required). + */ + if (pHwState->pSurfaceEvo[NVKMS_LEFT]->format != + NvKmsSurfaceMemoryFormatI8) { + return NULL; + } + + /* + * The rest of the function is to handle the I8 case where no input + * LUT was specified: look up the LUT to use from the device. + */ + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + if (found) { + nvAssert(dispIndex == pDevEvo->gpus[sd].pDispEvo->displayOwner); + } else { + dispIndex = pDevEvo->gpus[sd].pDispEvo->displayOwner; + found = TRUE; + } + } + } + + nvAssert(found); + + /* + * It is not allowed to change the input LUT on immediate flips. The + * higher-level code should makes sure to disable tearing if there is change + * in the surface format and curLUTIndex does not change until next + * EvoSetLUTContextDma3() call which also makes sure to disable tearing. + */ + const NvU32 lutIndex = + pDevEvo->lut.head[head].disp[dispIndex].curLUTIndex; + + return pDevEvo->lut.head[head].LUT[lutIndex]; +} + +static void +EvoFlipC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvBool enableCSC, swapUV, flip3Return; + enum NvKmsSurfaceMemoryFormat format; + NVLutSurfaceEvoPtr pLutSurfaceEvo = + EvoGetLutSurface3(pDevEvo, pChannel, pHwState); + + if (pHwState->timeStamp != 0) { + InsertAdditionalTimestampFlip(pDevEvo, pChannel, pHwState, + updateState); + } + + flip3Return = EvoFlipC3Common(pDevEvo, pChannel, pHwState, updateState); + + /* program semaphore */ + EvoProgramSemaphore3(pDevEvo, pChannel, pHwState); + + if (!flip3Return) { + return; + } + + format = pHwState->pSurfaceEvo[NVKMS_LEFT]->format; + + enableCSC = SetCscMatrixC3(pChannel, &pHwState->cscMatrix); + swapUV = IsSurfaceFormatUVSwapped(format); + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_PARAMS, 1); + nvDmaSetEvoMethodData(pChannel, + (enableCSC ? DRF_DEF(C37E, _SET_PARAMS, _CSC, _ENABLE) : + DRF_DEF(C37E, _SET_PARAMS, _CSC, _DISABLE)) | + DRF_NUM(C37E, _SET_PARAMS, _FORMAT, nvHwFormatFromKmsFormatC3(format)) | + (swapUV ? DRF_DEF(C37E, _SET_PARAMS, _SWAP_UV, _ENABLE) : + DRF_DEF(C37E, _SET_PARAMS, _SWAP_UV, _DISABLE)) | + DRF_DEF(C37E, _SET_PARAMS, _UNDERREPLICATE, _DISABLE)); + + if (pLutSurfaceEvo) { + const NvU32 ctxDma = pLutSurfaceEvo->dispCtxDma; + const NvU32 origin = offsetof(NVEvoLutDataRec, base); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTROL_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37E, _SET_CONTROL_INPUT_LUT, _SIZE, _SIZE_1025) | + DRF_DEF(C37E, _SET_CONTROL_INPUT_LUT, _RANGE, _UNITY) | + DRF_DEF(C37E, _SET_CONTROL_INPUT_LUT, _OUTPUT_MODE, _INDEX)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_OFFSET_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_OFFSET_INPUT_LUT, _ORIGIN, origin)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_CONTEXT_DMA_INPUT_LUT, _HANDLE, ctxDma)); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_CONTEXT_DMA_INPUT_LUT, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } + + UpdateCompositionC3(pDevEvo, pChannel, + &pHwState->composition, updateState); +} + +static void +EvoFlipC5Common(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + enum NvKmsSurfaceMemoryFormat format; + NvBool swapUV; + NvU32 hTaps, vTaps; + NvBool scaling = FALSE; + NVLutSurfaceEvoPtr pLutSurfaceEvo = + EvoGetLutSurface3(pDevEvo, pChannel, pHwState); + + if (!EvoFlipC3Common(pDevEvo, pChannel, pHwState, updateState)) { + return; + } + + format = pHwState->pSurfaceEvo[NVKMS_LEFT]->format; + + swapUV = IsSurfaceFormatUVSwapped(format); + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_PARAMS, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_PARAMS, _FORMAT, nvHwFormatFromKmsFormatC6(format)) | + (swapUV ? DRF_DEF(C57E, _SET_PARAMS, _SWAP_UV, _ENABLE) : + DRF_DEF(C57E, _SET_PARAMS, _SWAP_UV, _DISABLE))); + + /* + * In nvdisplay 2, there was a fixed-function block in the precomp FMT + * module that was responsible for YUV->RGB conversion. + * + * In nvdisplay 3, that fixed-function block no longer exists. + * In its place, there's a generic 3x4 S5.16 coefficient matrix that SW must + * explicitly configure to convert the input surface format to the internal + * RGB pipe native format. + */ + EvoSetFMTMatrixC5(pChannel, format); + + vTaps = (pHwState->vTaps >= NV_EVO_SCALER_5TAPS) ? + NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_5 : + NVC57E_SET_CONTROL_INPUT_SCALER_VERTICAL_TAPS_TAPS_2; + hTaps = (pHwState->hTaps >= NV_EVO_SCALER_5TAPS) ? + NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 : + NVC57E_SET_CONTROL_INPUT_SCALER_HORIZONTAL_TAPS_TAPS_2; + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CONTROL_INPUT_SCALER, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CONTROL_INPUT_SCALER, _VERTICAL_TAPS, vTaps) | + DRF_NUM(C57E, _SET_CONTROL_INPUT_SCALER, _HORIZONTAL_TAPS, hTaps)); + + scaling = (pHwState->sizeIn.width != pHwState->sizeOut.width) || + (pHwState->sizeIn.height != pHwState->sizeOut.height); + nvAssert(!(scaling && bypassComposition)); + + /* + * If scaling, enable the CSC0 and CSC1 pipelines so that we can scale in + * the non-linear ICtCp domain. + * + * If no scaling, just use CSC11 to convert from the input gamut to the + * output (panel) gamut, and disable everything else. + */ + if (scaling) { + ConfigureCsc0C5(pDevEvo, pChannel, TRUE); + ConfigureCsc1C5(pDevEvo, pChannel, TRUE); + } else { + ConfigureCsc0C5(pDevEvo, pChannel, FALSE); + ConfigureCsc1C5(pDevEvo, pChannel, FALSE); + + SetCsc11MatrixC5(pChannel, &pHwState->cscMatrix); + } + + // In nvdisplay 3, an ILUT is required to convert the input surface to FP16, + // unless the surface being displayed is already FP16 to begin with. + if (format == NvKmsSurfaceMemoryFormatRF16GF16BF16AF16 || bypassComposition) { + pLutSurfaceEvo = NULL; + } else if (!pLutSurfaceEvo) { + pLutSurfaceEvo = pDevEvo->lut.defaultLut; + } + + if (pLutSurfaceEvo) { + const NvU32 ctxDma = pLutSurfaceEvo->dispCtxDma; + const NvU32 origin = offsetof(NVEvoLutDataRec, base); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_ILUT_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SET_ILUT_CONTROL, _INTERPOLATE, _DISABLE) | + DRF_DEF(C57E, _SET_ILUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57E, _SET_ILUT_CONTROL, _MODE, _DIRECT10) | + DRF_NUM(C57E, _SET_ILUT_CONTROL, _SIZE, NV_LUT_VSS_HEADER_SIZE + + NV_NUM_EVO_LUT_ENTRIES)); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_OFFSET_ILUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_OFFSET_ILUT, _ORIGIN, origin)); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CONTEXT_DMA_ILUT, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57E, _SET_CONTEXT_DMA_ILUT, _HANDLE, ctxDma)); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_CONTEXT_DMA_ILUT, 1); + nvDmaSetEvoMethodData(pChannel, 0); + } + + UpdateCompositionC5(pDevEvo, pChannel, + &pHwState->composition, updateState, + bypassComposition); +} + +static void +EvoFlipC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + EvoFlipC5Common(pDevEvo, pChannel, pHwState, updateState, bypassComposition); + + /* Work around bug 2117571: whenever the tearing mode is changing, send a + * software method to notify RM. */ + if (pHwState->tearing != pChannel->oldTearingMode) { + NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + NvU32 head = pDevEvo->headForWindow[win]; + + if (head != NV_INVALID_HEAD) { + nvDmaSetStartEvoMethod(pChannel, NVC57E_WINDOWS_NOTIFY_RM, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _WINDOWS_NOTIFY_RM, _VSYNC_STATE_CHANGE, _TRUE) | + DRF_NUM(C57E, _WINDOWS_NOTIFY_RM, _ASSOCIATED_HEAD, head) | + (pHwState->tearing ? + DRF_DEF(C57E, _WINDOWS_NOTIFY_RM, _VSYNC_STATE, _OFF) : + DRF_DEF(C57E, _WINDOWS_NOTIFY_RM, _VSYNC_STATE, _ON))); + } + + pChannel->oldTearingMode = pHwState->tearing; + } + + /* program semaphore */ + EvoProgramSemaphore3(pDevEvo, pChannel, pHwState); +} + +static void +EvoFlipC6(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NVFlipChannelEvoHwState *pHwState, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvBool fromTop = TRUE; + NvBool fromLeft = TRUE; + + NvU32 vDirVal = 0; + NvU32 hDirVal = 0; + + switch (pHwState->rrParams.rotation) { + case NVKMS_ROTATION_90: + case NVKMS_ROTATION_270: + nvAssert(!"Invalid rotation requested."); + /* Fall-through */ + case NVKMS_ROTATION_0: + break; + case NVKMS_ROTATION_180: + fromTop = FALSE; + fromLeft = FALSE; + break; + } + + if (pHwState->rrParams.reflectionX) { + fromLeft = !fromLeft; + } + if (pHwState->rrParams.reflectionY) { + fromTop = !fromTop; + } + + vDirVal = (fromTop ? + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _VERTICAL_DIRECTION, _FROM_TOP) : + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _VERTICAL_DIRECTION, _FROM_BOTTOM)); + hDirVal = (fromLeft ? + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _HORIZONTAL_DIRECTION, _FROM_LEFT) : + DRF_DEF(C67E, _SET_SCAN_DIRECTION, _HORIZONTAL_DIRECTION, _FROM_RIGHT)); + + nvDmaSetStartEvoMethod(pChannel, NVC67E_SET_SCAN_DIRECTION, 1); + nvDmaSetEvoMethodData(pChannel, vDirVal | hDirVal); + + EvoFlipC5Common(pDevEvo, pChannel, pHwState, updateState, bypassComposition); + + /* program semaphore */ + EvoProgramSemaphore6(pDevEvo, pChannel, pHwState); +} + +static void UpdateComposition(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + /* smaller => closer to front */ + NvU32 depth, + NvU32 colorKeySelect, + NvU32 constantAlpha, + NvU32 compositionFactorSelect, + const NVColorKey key, + NVEvoUpdateState *updateState) +{ + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_COMPOSITION_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_COMPOSITION_CONTROL, _COLOR_KEY_SELECT, colorKeySelect) | + DRF_NUM(C37E, _SET_COMPOSITION_CONTROL, _DEPTH, depth)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_COMPOSITION_CONSTANT_ALPHA, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37E, _SET_COMPOSITION_CONSTANT_ALPHA, _K1, constantAlpha)); + + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_COMPOSITION_FACTOR_SELECT, 1); + nvDmaSetEvoMethodData(pChannel, compositionFactorSelect); + +#define UPDATE_COMPONENT(_COMP, _C, _c) \ + nvDmaSetStartEvoMethod(pChannel, NVC37E_SET_KEY_##_COMP, 1); \ + if (key.match##_C) { \ + nvDmaSetEvoMethodData(pChannel, \ + DRF_NUM(C37E, _SET_KEY_##_COMP, _MIN, key._c) | \ + DRF_NUM(C37E, _SET_KEY_##_COMP, _MAX, key._c)); \ + } else { \ + nvDmaSetEvoMethodData(pChannel, \ + DRF_NUM(C37E, _SET_KEY_##_COMP, _MIN, 0) | \ + DRF_SHIFTMASK(NVC37E_SET_KEY_##_COMP##_MAX)); \ + } + + if (colorKeySelect != + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE) { + UPDATE_COMPONENT(ALPHA, A, a); + UPDATE_COMPONENT(RED_CR, R, r); + UPDATE_COMPONENT(GREEN_Y, G, g); + UPDATE_COMPONENT(BLUE_CB, B, b); + } + +#undef UPDATE_COMPONENT +} + +static void EvoFlipTransitionWARC3(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + /* Nothing to do for Volta */ +} + +/* + * Hardware bug 2193096 requires that we send special software methods around + * a window channel update that transitions from NULL ctxdma to non-NULL or + * vice versa. Below we compare the current hardware state in pSdHeadState + * against the state to be pushed in this update in pFlipState, and add any + * window(s) that qualify to the 'flipTransitionWAR' mask in the updateState. + */ +static void EvoFlipTransitionWARC5(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + const NvBool enabledPrev = + pSdHeadState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL; + const NvBool enabledNext = + pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL; + + if (enabledPrev != enabledNext) { + /* XXX TODO: dynamic window assignment */ + const NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER( + pDevEvo->head[head].layer[layer]->channelMask); + updateState->subdev[sd].flipTransitionWAR |= + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, win, _ENABLE); + + nvAssert(pFlipState->dirty.layer[layer]); + } + } +} + +static void EvoFlipTransitionWARC6(NVDevEvoPtr pDevEvo, NvU32 sd, NvU32 head, + const NVEvoSubDevHeadStateRec *pSdHeadState, + const NVFlipEvoHwState *pFlipState, + NVEvoUpdateState *updateState) +{ + /* Nothing to do for Orin/Ampere for now */ +} + +static void +UpdateCompositionC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState) +{ + NvU32 colorKeySelect; + NvU32 compositionFactorSelect = 0; + NvU32 constantAlpha = 0; + NvU32 match; + + switch (pCompParams->colorKeySelect) { + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE: + colorKeySelect = + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DISABLE; + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC: + colorKeySelect = + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_SRC; + + break; + case NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST: + colorKeySelect = + NVC37E_SET_COMPOSITION_CONTROL_COLOR_KEY_SELECT_DST; + + break; + default: + nvAssert(!"Invalid color key select"); + return; + } + + /* Match and nomatch pixels should not use alpha blending mode at once. */ + nvAssert((colorKeySelect == NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) || + (!NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[0])) || + (!NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[1]))); + + /* + * Match and nomatch pixels should not use blending mode PREMULT_ALPHA, + * NON_PREMULT_ALPHA, PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA + * at once. + */ + nvAssert(pCompParams->blendingMode[0] == NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE || + pCompParams->blendingMode[0] == NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT || + pCompParams->blendingMode[1] == NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE || + pCompParams->blendingMode[1] == NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT); + + for (match = 0; match <= 1; match++) { + switch (pCompParams->blendingMode[match]) { + case NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE: + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _ONE) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _ZERO); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _ONE) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _ZERO); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT: + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _ZERO) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _ONE); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _ZERO) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _ONE); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + constantAlpha = 255; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + constantAlpha = 255; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA: + constantAlpha = pCompParams->surfaceAlpha; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA: + constantAlpha = pCompParams->surfaceAlpha; + if (match == 1) { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } else { + compositionFactorSelect |= + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _SRC_COLOR_FACTOR_NO_MATCH_SELECT, _K1) | + DRF_DEF(C37E, _SET_COMPOSITION_FACTOR_SELECT, _DST_COLOR_FACTOR_NO_MATCH_SELECT, _NEG_K1_TIMES_SRC); + } + break; + default: + nvAssert(!"Invalid blend mode"); + return; + } + } + + UpdateComposition(pDevEvo, + pChannel, + pCompParams->depth, + colorKeySelect, + constantAlpha, + compositionFactorSelect, + pCompParams->colorKey, + updateState); +} + +static void EvoBypassCompositionC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NVEvoUpdateState *updateState) +{ + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC57E_SET_COMPOSITION_CONTROL, 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57E, _SET_COMPOSITION_CONTROL, _BYPASS, _ENABLE)); +} + +static void +UpdateCompositionC5(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const struct NvKmsCompositionParams *pCompParams, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + if (bypassComposition) { + EvoBypassCompositionC5(pDevEvo, pChannel, updateState); + } else { + UpdateCompositionC3(pDevEvo, pChannel, pCompParams, updateState); + } +} + +/* + * The LUT entries in INDEX_1025_UNITY_RANGE have 16 bits, with the + * black value at 24576, and the white at 49151. Since the effective + * range is 16384, we treat this as a 14-bit LUT. However, we need to + * clear the low 3 bits to WAR hardware bug 813188. This gives us + * 14-bit LUT values, but only 11 bits of precision. + * XXXnvdisplay: Bug 813188 is supposed to be fixed on NVDisplay; can we expose + * more precision? + */ +static inline NvU16 ColorToLUTEntry(NvU16 val) +{ + const NvU16 val14bit = val >> 2; + return (val14bit & ~7) + 24576; +} + +/* + * Unlike earlier EVO implementations, the INDEX mode of the input LUT on + * NVDisplay is straightforward: the value of the input component is expanded + * to the LUT size by simply shifting left by the difference between the LUT + * index width and the component width. We do the same, here, to select the + * right LUT entry to fill. + */ +static inline NvU32 GetLUTIndex(int i, int componentSize) +{ + return i << (10 - componentSize); +} + +static void +EvoFillLUTSurfaceC3(NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth) +{ + int i; + NvU32 rSize, gSize, bSize; + + switch (depth) { + case 15: + rSize = gSize = bSize = 5; + break; + case 16: + rSize = bSize = 5; + gSize = 6; + break; + case 8: + case 24: + rSize = gSize = bSize = 8; + break; + case 30: + rSize = gSize = bSize = 10; + break; + default: + nvAssert(!"invalid depth"); + return; + } + + for (i = 0; i < nColorMapEntries; i++) { + if (i < (1 << rSize)) { + pLUTBuffer[GetLUTIndex(i, rSize)].Red = ColorToLUTEntry(red[i]); + } + if (i < (1 << gSize)) { + pLUTBuffer[GetLUTIndex(i, gSize)].Green = ColorToLUTEntry(green[i]); + } + if (i < (1 << bSize)) { + pLUTBuffer[GetLUTIndex(i, bSize)].Blue = ColorToLUTEntry(blue[i]); + } + } +} + +static inline float16_t ColorToFp16(NvU16 val, float32_t maxf) +{ + return nvUnormToFp16(val, maxf); +} + +static void +EvoFillLUTSurfaceC5(NVEvoLutEntryRec *pLUTBuffer, + const NvU16 *red, + const NvU16 *green, + const NvU16 *blue, + int nColorMapEntries, int depth) +{ + int i; + NvU32 rSize, gSize, bSize; + const float32_t maxf = ui32_to_f32(0xffff); + + switch (depth) { + case 15: + rSize = gSize = bSize = 5; + break; + case 16: + rSize = bSize = 5; + gSize = 6; + break; + case 8: + case 24: + rSize = gSize = bSize = 8; + break; + case 30: + rSize = gSize = bSize = 10; + break; + default: + nvAssert(!"invalid depth"); + return; + } + + // Skip the VSS header + pLUTBuffer += NV_LUT_VSS_HEADER_SIZE; + + for (i = 0; i < nColorMapEntries; i++) { + if (i < (1 << rSize)) { + pLUTBuffer[GetLUTIndex(i, rSize)].Red = + ColorToFp16(red[i], maxf).v; + } + if (i < (1 << gSize)) { + pLUTBuffer[GetLUTIndex(i, gSize)].Green = + ColorToFp16(green[i], maxf).v; + } + if (i < (1 << bSize)) { + pLUTBuffer[GetLUTIndex(i, bSize)].Blue = + ColorToFp16(blue[i], maxf).v; + } + } +} + +static void EvoSetLUTContextDma3(NVDevEvoPtr pDevEvo, + const int head, + NVLutSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 sd; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* + * For Window semaphores and Notifiers, the general rule of thumb is that + * the current semaphore/notifier will be released if the address for the + * semaphore/notifier changes (via context DMA change or offset change). + * This allows SW to push updates in the window channel that change other + * methods, but do not cause the semaphore or notifier to be released. This + * make it possible to reprogram the window channel with new input Lut + * without releasing semaphore. + * + * Note that higher-level code will use core channel notifiers to + * synchronize these LUT updates, but that's okay because EvoUpdateC3() + * will interlock the core and window channel(s) updates together. + */ + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (nvPeekEvoSubDevMask(pDevEvo) & (1 << sd)) { + NVEvoChannelPtr pChannel = pDevEvo->window[head << 1]; + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NVFlipChannelEvoHwState *pMainFlipState = + &pSdHeadState->layer[NVKMS_MAIN_LAYER]; + NVLutSurfaceEvoPtr pInputLutSurfEvo = enableBaseLut ? + pLutSurfEvo : NULL; + + if (pMainFlipState->inputLut.pLutSurfaceEvo == pInputLutSurfEvo) { + continue; + } + + pMainFlipState->inputLut.pLutSurfaceEvo = pInputLutSurfEvo; + /* It is not allowed to change the input LUT on immediate flips. */ + pMainFlipState->tearing = FALSE; + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + pDevEvo->hal->Flip(pDevEvo, pChannel, pMainFlipState, updateState, + bypassComposition); + + nvPopEvoSubDevMask(pDevEvo); + } + } +} + +static void EvoSetLUTContextDmaC3(const NVDispEvoRec *pDispEvo, + const int head, + NVLutSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NvBool enableOutputLut, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvU32 ctxdma = (pLutSurfEvo != NULL) ? pLutSurfEvo->dispCtxDma : 0; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU64 offset; + + nvAssert(ctxdma || (!enableBaseLut && !enableOutputLut)); + + nvPushEvoSubDevMaskDisp(pDispEvo); + + EvoSetLUTContextDma3(pDevEvo, + head, + pLutSurfEvo, + enableBaseLut, + updateState, + bypassComposition); + + /* Program the output LUT */ + + offset = offsetof(NVEvoLutDataRec, output); + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_OUTPUT_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_LUT, _SIZE, _SIZE_1025) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_LUT, _RANGE, _UNITY) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_OUTPUT_LUT, _OUTPUT_MODE, _INTERPOLATE)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_OFFSET_OUTPUT_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_OFFSET_OUTPUT_LUT, _ORIGIN, offset >> 8)); + + /* Set the ctxdma for the output LUT */ + + if (!enableOutputLut) { + /* Class C37D has no separate enable flag. */ + ctxdma = 0; + } + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_OUTPUT_LUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_OUTPUT_LUT, _HANDLE, ctxdma)); + + nvPopEvoSubDevMask(pDevEvo); +} + +static void EvoSetLUTContextDmaC5(const NVDispEvoRec *pDispEvo, + const int head, + NVLutSurfaceEvoPtr pLutSurfEvo, + NvBool enableBaseLut, + NvBool enableOutputLut, + NVEvoUpdateState *updateState, + NvBool bypassComposition) +{ + NvU32 ctxdma = (pLutSurfEvo != NULL) ? pLutSurfEvo->dispCtxDma : 0; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU64 offset; + + nvAssert(ctxdma || (!enableBaseLut && !enableOutputLut)); + + nvPushEvoSubDevMaskDisp(pDispEvo); + + EvoSetLUTContextDma3(pDevEvo, + head, + pLutSurfEvo, + enableBaseLut, + updateState, + bypassComposition); + + /* Program the output LUT */ + + offset = offsetof(NVEvoLutDataRec, output); + nvAssert((offset & 0xff) == 0); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OLUT_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _INTERPOLATE, _ENABLE) | + DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _MIRROR, _DISABLE) | + DRF_DEF(C57D, _HEAD_SET_OLUT_CONTROL, _MODE, _DIRECT10) | + DRF_NUM(C57D, _HEAD_SET_OLUT_CONTROL, _SIZE, NV_LUT_VSS_HEADER_SIZE + + NV_NUM_EVO_LUT_ENTRIES)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OFFSET_OLUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_OFFSET_OLUT, _ORIGIN, offset >> 8)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OLUT_FP_NORM_SCALE(head), 1); + nvDmaSetEvoMethodData(pChannel, 0xffffffff); + + /* Set the ctxdma for the output LUT */ + + if (bypassComposition) { + ctxdma = 0; + + /* if we're not enabling the OLUT, OCSC0 also needs to be disabled */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC0CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(C57D, _HEAD_SET_OCSC0CONTROL, _ENABLE, _DISABLE)); + } else if (!enableOutputLut) { + /* Use the default OLUT if the client didn't provide one */ + ctxdma = pDevEvo->lut.defaultLut->dispCtxDma; + } + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_CONTEXT_DMA_OLUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_CONTEXT_DMA_OLUT, _HANDLE, ctxdma)); + + if (!bypassComposition) { + /* only enable OCSC0 after enabling the OLUT */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_OCSC0CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_DEF(C57D, _HEAD_SET_OCSC0CONTROL, _ENABLE, _ENABLE)); + } + + nvPopEvoSubDevMask(pDevEvo); +} + +static inline NvU32 ReadCapReg(volatile const NvU32 *pCaps, NvU32 offset) +{ + /* Offsets are in bytes, but the array has dword-sized elements. */ + return pCaps[offset / sizeof(NvU32)]; +} + +static NvBool QueryStereoPinC3(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + NvU32 *pStereoPin) +{ + NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS params = { }; + + params.base.subdeviceIndex = pEvoSubDev->subDeviceInstance; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_GET_LOCKPINS_CAPS, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to query stereo pin"); + return FALSE; + } + + if ((params.stereoPin >= NV_EVO_NUM_LOCK_PIN_CAPS) || + (params.stereoPin == NVC370_CTRL_GET_LOCKPINS_CAPS_STEREO_PIN_NONE)) { + return FALSE; + } else { + *pStereoPin = params.stereoPin; + return TRUE; + } +} + +static void EvoParseCapabilityNotifier3(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + const NvU32 sysCap = ReadCapReg(pCaps, NVC373_SYS_CAP); + const NvU32 sysCapB = ReadCapReg(pCaps, NVC373_SYS_CAPB); + NvU32 i, stereoPin; + NvU32 layer; + + pDevEvo->caps.cursorCompositionCaps = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NV_EVO3_SUPPORTED_CURSOR_COMP_BLEND_MODES, + }, + }, + } + }; + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); layer++) { + pDevEvo->caps.layerCaps[layer].composition = + (struct NvKmsCompositionCapabilities) { + .supportedColorKeySelects = + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) | + NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST), + + .colorKeySelect = { + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE] = { + .supportedBlendModes = { + [1] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC] = { + .supportedBlendModes = { + [0] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + [1] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + }, + }, + + [NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST] = { + .supportedBlendModes = { + [0] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + [1] = NV_EVO3_SUPPORTED_WINDOW_COMP_BLEND_MODES, + }, + }, + }, + }; + } + + /* + * Previous EVO display implementations exposed capabilities for lock pins, + * detailing which pin(s) could be used for which functions. The idea was + * that it didn't make sense to try to drive a stereo pin with a fliplock + * signal (for example), so the pin associated with the stereo function was + * marked as stereo-capable but not any other function; attempting to use a + * non-stereo-capable pin for stereo or vice-versa would result in an error. + * + * With nvdisplay, the meaning of lock pins was changed such that they no + * longer have a shared namespace. So stereo lockpin 0 is not the same as + * fliplock lockpin 0 and neither is the same as scanlock lockpin 0. With + * this scheme, there is no way to specify a pin that is incapable of a + * given function, so the entire capabilities mechanism was removed. + * + * However, the pins chosen for HEAD_SET_CONTROL still need to match the + * pins selected for each function in the VBIOS DCB. Fliplock and scanlock + * query this information through + * NV5070_CTRL_CMD_GET_FRAMELOCK_HEADER_LOCKPINS. Stereo is handled + * here, using NVC370_CTRL_CMD_GET_LOCKPINS_CAPS. + */ + + for (i = 0; i < NV_EVO_NUM_LOCK_PIN_CAPS; i++) { + pEvoCaps->pin[i].flipLock = TRUE; + pEvoCaps->pin[i].scanLock = TRUE; + } + + if (QueryStereoPinC3(pDevEvo, pEvoSubDev, &stereoPin)) { + pEvoCaps->pin[stereoPin].stereo = TRUE; + } + + // Miscellaneous capabilities + // NVDisplay does not support interlaced modes. + pEvoCaps->misc.supportsInterlaced = FALSE; + + // Heads + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC373_HEAD_CAPA__SIZE_1); + for (i = 0; i < NVC373_HEAD_CAPA__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + pHeadCaps->usable = + FLD_IDX_TEST_DRF(C373, _SYS_CAP, _HEAD_EXISTS, i, _YES, sysCap); + } + + // SORs + ct_assert(ARRAY_LEN(pEvoCaps->sor) >= NVC373_SOR_CAP__SIZE_1); + for (i = 0; i < NVC373_SOR_CAP__SIZE_1; i++) { + NVEvoSorCaps *pSorCaps = &pEvoCaps->sor[i]; + + NvBool sorUsable = + FLD_IDX_TEST_DRF(C373, _SYS_CAP, _SOR_EXISTS, i, _YES, sysCap); + + /* XXXnvdisplay: add SOR caps: max DP clk, ... */ + if (sorUsable) { + const NvU32 sorCap = ReadCapReg(pCaps, NVC373_SOR_CAP(i)); + pSorCaps->dualTMDS = + FLD_TEST_DRF(C373, _SOR_CAP, _DUAL_TMDS, _TRUE, sorCap); + + /* + * Assume that all SORs are equally capable, and that all SORs + * support HDMI FRL if the display class supports it. (If this + * assert fires, we may need to rework SOR assignment for such HDMI + * sinks.) + * + * Although HDMI_FRL is only defined for class C6, classes C3 and + * C5 don't use that bit in the SOR_CAP register so it should + * always be 0 on those chips. + */ + nvAssert(!!FLD_TEST_DRF(C673, _SOR_CAP, _HDMI_FRL, _TRUE, sorCap) == + !!pDevEvo->hal->caps.supportsHDMIFRL); + + pSorCaps->maxTMDSClkKHz = + DRF_VAL(C373, _SOR_CLK_CAP, _TMDS_MAX, + ReadCapReg(pCaps, NVC373_SOR_CLK_CAP(i))) * 10000; + } + } + + // Don't need any PIOR caps currently. + + // Windows + ct_assert(ARRAY_LEN(pEvoCaps->window) >= NVC373_SYS_CAPB_WINDOW_EXISTS__SIZE_1); + for (i = 0; i < NVC373_SYS_CAPB_WINDOW_EXISTS__SIZE_1; i++) { + NVEvoWindowCaps *pWinCaps = &pEvoCaps->window[i]; + + pWinCaps->usable = + FLD_IDX_TEST_DRF(C373, _SYS_CAPB, _WINDOW_EXISTS, i, _YES, sysCapB); + } +} + +static void EvoParseCapabilityNotifierC3(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + NvU32 i; + + // Miscellaneous capabilities + pEvoCaps->misc.supportsSemiPlanar = FALSE; + pEvoCaps->misc.supportsPlanar = FALSE; + pEvoCaps->misc.supportsDSI = FALSE; + + // Heads + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC373_HEAD_CAPA__SIZE_1); + for (i = 0; i < NVC373_HEAD_CAPA__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + /* XXXnvdisplay: add caps for hsat, ocsc, lut */ + if (pHeadCaps->usable) { + NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps; + + pScalerCaps->present = + FLD_TEST_DRF(C373, _HEAD_CAPA, _SCALER, _TRUE, + ReadCapReg(pCaps, NVC373_HEAD_CAPA(i))); + if (pScalerCaps->present) { + NVEvoScalerTapsCaps *pTapsCaps; + NvU32 tmp; + + /* + * Note that some of these may be zero (e.g., only 2-tap 444 + * mode is supported on GV100, so the rest are all zero. + * + * Downscaling by more than 2x in either direction is not + * allowed by state error check for both horizontal and + * vertical 2-tap scaling. + * + * Downscaling by more than 4x in either direction is not + * allowed by argument error check (and state error check) for + * 5-tap scaling. + * + * 5-tap scaling is not implemented on GV100, though, so we + * should never see numTaps == 5 on GV100, and we can just use a + * max of 2 here all the time. + */ + + /* 2-tap capabilities */ + tmp = ReadCapReg(pCaps, NVC373_HEAD_CAPD(i)); + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_2TAPS]; + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxPixelsVTaps = + NV_MAX(DRF_VAL(C373, _HEAD_CAPD, _MAX_PIXELS_2TAP422, tmp), + DRF_VAL(C373, _HEAD_CAPD, _MAX_PIXELS_2TAP444, tmp)); + + /* + * Note that there is a capability register for 1TAP, but there + * doesn't appear to be a way to select 1-tap scaling in the + * channel methods, so don't bother reading it for now. + */ + } + } + } +} + +static void EvoParsePrecompScalerCaps5(NVEvoCapabilitiesPtr pEvoCaps, + volatile const NvU32 *pCaps) +{ + int i; + + for (i = 0; i < NVC573_SYS_CAPB_WINDOW_EXISTS__SIZE_1; i++) { + NVEvoWindowCaps *pWinCaps = &pEvoCaps->window[i]; + NVEvoScalerCaps *pScalerCaps = &pWinCaps->scalerCaps; + NVEvoScalerTapsCaps *pTapsCaps; + NvU32 capA = ReadCapReg(pCaps, NVC573_PRECOMP_WIN_PIPE_HDR_CAPA(i)); + NvU32 capD, capF; + NvBool csc00Present = FALSE, csc01Present = FALSE; + NvBool csc0LUTPresent = FALSE, csc1LUTPresent = FALSE; + NvBool csc10Present = FALSE, csc11Present = FALSE; + + csc00Present = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC00_PRESENT, _TRUE, capA); + csc01Present = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC01_PRESENT, _TRUE, capA); + pWinCaps->csc0MatricesPresent = (csc00Present && csc01Present); + + csc0LUTPresent = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC0LUT_PRESENT, _TRUE, capA); + csc1LUTPresent = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC1LUT_PRESENT, _TRUE, capA); + pWinCaps->cscLUTsPresent = (csc0LUTPresent && csc1LUTPresent); + + csc10Present = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC10_PRESENT, _TRUE, capA); + csc11Present = FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, + _CSC11_PRESENT, _TRUE, capA); + pWinCaps->csc1MatricesPresent = (csc10Present && csc11Present); + + pScalerCaps->present = + FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPA, _SCLR_PRESENT, + _TRUE, capA); + if (pScalerCaps->present) { + capD = ReadCapReg(pCaps, NVC573_PRECOMP_WIN_PIPE_HDR_CAPD(i)); + capF = ReadCapReg(pCaps, NVC573_PRECOMP_WIN_PIPE_HDR_CAPF(i)); + + /* 5-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_5TAPS]; + if (FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPD, + _SCLR_VS_MAX_SCALE_FACTOR, _4X, capD)) { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + if (FLD_TEST_DRF(C573, _PRECOMP_WIN_PIPE_HDR_CAPD, + _SCLR_HS_MAX_SCALE_FACTOR, _4X, capD)) { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _PRECOMP_WIN_PIPE_HDR_CAPF, + _VSCLR_MAX_PIXELS_5TAP, capF); + + /* 2-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_2TAPS]; + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _PRECOMP_WIN_PIPE_HDR_CAPF, _VSCLR_MAX_PIXELS_2TAP, + capF); + } + } +} + +static void EvoParseCapabilityNotifierC5C6Common(NVEvoCapabilitiesPtr pEvoCaps, + volatile const NvU32 *pCaps) +{ + NvU32 i; + NvBool postcompScalingSupported = FALSE; + + // Heads + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC573_SYS_CAP_HEAD_EXISTS__SIZE_1); + for (i = 0; i < NVC573_SYS_CAP_HEAD_EXISTS__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + if (pHeadCaps->usable) { + NVEvoScalerCaps *pScalerCaps = &pHeadCaps->scalerCaps; + NVEvoScalerTapsCaps *pTapsCaps; + NvU32 capA = ReadCapReg(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPA(i)); + NvU32 capC, capD; + + pScalerCaps->present = + FLD_TEST_DRF(C573, _POSTCOMP_HEAD_HDR_CAPA, _SCLR_PRESENT, + _TRUE, capA); + if (pScalerCaps->present) { + postcompScalingSupported = TRUE; + + capC = ReadCapReg(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPC(i)); + capD = ReadCapReg(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPD(i)); + + /* + * Note that some of these may be zero. + * + * XXXnvdisplay: what about POSTCOMP_HEAD_HDR_CAPC_SCLR_*? + */ + + /* 5-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_5TAPS]; + if (FLD_TEST_DRF(C573, _POSTCOMP_HEAD_HDR_CAPC, + _SCLR_VS_MAX_SCALE_FACTOR, _4X, capC)) { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + if (FLD_TEST_DRF(C573, _POSTCOMP_HEAD_HDR_CAPC, + _SCLR_HS_MAX_SCALE_FACTOR, _4X, capC)) { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_4X; + } else { + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + } + + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _POSTCOMP_HEAD_HDR_CAPD, + _VSCLR_MAX_PIXELS_5TAP, capD); + + /* 2-tap capabilities */ + pTapsCaps = &pScalerCaps->taps[NV_EVO_SCALER_2TAPS]; + pTapsCaps->maxVDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxHDownscaleFactor = NV_EVO_SCALE_FACTOR_2X; + pTapsCaps->maxPixelsVTaps = + DRF_VAL(C573, _POSTCOMP_HEAD_HDR_CAPD, + _VSCLR_MAX_PIXELS_2TAP, capD); + } + +#if defined(NV_DEBUG) + NvU32 capA = ReadCapReg(pCaps, NVC573_POSTCOMP_HEAD_HDR_CAPA(i)); + NvU32 unitWidth = DRF_VAL(C573, _POSTCOMP_HEAD_HDR_CAPA, _UNIT_WIDTH, capA); + + // EvoInitChannelC5 assumes 16-bit fixed-point. + nvAssert(unitWidth == 16); +#endif + } + } + + /* + * To keep the design simple, NVKMS will expose support for precomp scaling + * iff postcomp scaling isn't supported. This means that on chips which have + * both precomp and postcomp scalers (e.g., Turing), NVKMS will only report + * that postcomp scaling is supported. + */ + if (!postcompScalingSupported) { + EvoParsePrecompScalerCaps5(pEvoCaps, pCaps); + } + + // XXXnvdisplay3: add SOR caps for DP over USB +} + +static void EvoParseCapabilityNotifierC5(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + + // Miscellaneous capabilities + + /* + * On Turing, the NVC573_IHUB_COMMON_CAPA_SUPPORT_PLANAR bit actually + * reports whether IHUB supports YUV _semi-planar_ formats. + */ + pEvoCaps->misc.supportsSemiPlanar = + FLD_TEST_DRF(C573, _IHUB_COMMON_CAPA, _SUPPORT_PLANAR, _TRUE, + ReadCapReg(pCaps, NVC573_IHUB_COMMON_CAPA)); + pEvoCaps->misc.supportsDSI = FALSE; + + EvoParseCapabilityNotifierC5C6Common(pEvoCaps, pCaps); +} + +static void EvoParseCapabilityNotifierC6(NVDevEvoPtr pDevEvo, + NVEvoSubDevPtr pEvoSubDev, + volatile const NvU32 *pCaps) +{ + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + NvU32 capC = ReadCapReg(pCaps, NVC673_IHUB_COMMON_CAPC); + NvU32 i; + + // Miscellaneous capabilities + + pEvoCaps->misc.supportsPlanar = + FLD_TEST_DRF(C673, _IHUB_COMMON_CAPA, _SUPPORT_PLANAR, _TRUE, + ReadCapReg(pCaps, NVC673_IHUB_COMMON_CAPA)); + + pEvoCaps->misc.supportsSemiPlanar = + FLD_TEST_DRF(C673, _IHUB_COMMON_CAPC, _SUPPORT_SEMI_PLANAR, _TRUE, capC); + + pEvoCaps->misc.supportsHVFlip = + FLD_TEST_DRF(C673, _IHUB_COMMON_CAPC, _SUPPORT_HOR_VER_FLIP, _TRUE, capC); + + ct_assert(ARRAY_LEN(pEvoCaps->head) >= NVC673_SYS_CAP_HEAD_EXISTS__SIZE_1); + + // DSI is currently supported on just Orin, which has only 1 DSI engine (DSI0). + pEvoCaps->misc.supportsDSI = + FLD_TEST_DRF(C673, _SYS_CAP, _DSI0_EXISTS, _YES, + ReadCapReg(pCaps, NVC673_SYS_CAP)); + + for (i = 0; i < NVC673_SYS_CAP_HEAD_EXISTS__SIZE_1; i++) { + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[i]; + + if (pHeadCaps->usable) { + NvU32 capA = ReadCapReg(pCaps, NVC673_POSTCOMP_HEAD_HDR_CAPA(i)); + NvBool hclpfPresent = + FLD_TEST_DRF(C673, _POSTCOMP_HEAD_HDR_CAPA, _HCLPF_PRESENT, + _TRUE, capA); + NvBool vfilterPresent = + FLD_TEST_DRF(C673, _POSTCOMP_HEAD_HDR_CAPA, _VFILTER_PRESENT, + _TRUE, capA); + + pHeadCaps->supportsHDMIYUV420HW = hclpfPresent && vfilterPresent; + } + } + + EvoParseCapabilityNotifierC5C6Common(pEvoCaps, pCaps); +} + +static NvU32 UsableWindowCount(const NVEvoCapabilities *pEvoCaps) +{ + NvU32 i, count = 0; + NvBool foundUnusable = FALSE; + + for (i = 0; i < ARRAY_LEN(pEvoCaps->window); i++) { + if (pEvoCaps->window[i].usable) { + count++; + /* Assert that usable windows are contiguous starting from 0. */ + if (foundUnusable) { + nvAssert(!foundUnusable); + } + } else { + foundUnusable = TRUE; + } + } + + return count; +} + +typedef typeof(EvoParseCapabilityNotifierC3) parse_caps_t; +typedef typeof(nvHwFormatFromKmsFormatC3) get_hw_fmt_t; + +static NvBool EvoGetCapabilities3(NVDevEvoPtr pDevEvo, + parse_caps_t *pParse, + get_hw_fmt_t *pGetHwFmt, + NvU32 hwclass, + size_t length) +{ + NvU32 capsHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + NVDispEvoPtr pDispEvo; + NvU32 sd; + NvU32 status; + NvBool ret = FALSE; + NvBool first = TRUE; + NvBool supportsSemiPlanar = TRUE; + NvBool supportsPlanar = TRUE; + NvBool supportsHVFlip = TRUE; + unsigned int i; + enum NvKmsRotation curRotation; + NvBool reflectionX; + NvBool reflectionY; + NvU32 win; + NvU8 layer; + + /* With nvdisplay, capabilities are exposed in a separate object. */ + status = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + capsHandle, + hwclass, NULL); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to allocate caps object"); + goto free_handle; + } + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + pDevEvo->caps.layerCaps[layer].supportsWindowMode = TRUE; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + void *ptr; + + status = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + capsHandle, + 0, + length, + &ptr, + 0); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to map caps memory"); + goto free_object; + } + + nvkms_memset(&pEvoSubDev->capabilities, 0, + sizeof(pEvoSubDev->capabilities)); + + EvoParseCapabilityNotifier3(pDevEvo, pEvoSubDev, ptr); + pParse(pDevEvo, pEvoSubDev, ptr); + + status = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + capsHandle, ptr, 0); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to unmap caps memory"); + } + + if (first) { + pDevEvo->numWindows = + UsableWindowCount(&pEvoSubDev->capabilities); + first = FALSE; + } else { + /* Assert that each subdevice has the same number of windows. */ + nvAssert(pDevEvo->numWindows == + UsableWindowCount(&pEvoSubDev->capabilities)); + } + + /* + * Expose YUV semi-planar iff all of the disps belonging to pDevEvo + * support it. + */ + supportsSemiPlanar &= + pEvoSubDev->capabilities.misc.supportsSemiPlanar; + + /* + * Expose YUV planar iff all of the disps belonging to pDevEvo + * support it. + */ + supportsPlanar &= + pEvoSubDev->capabilities.misc.supportsPlanar; + + supportsHVFlip &= + pEvoSubDev->capabilities.misc.supportsHVFlip; + } + + /* + * On Volta, only WINDOWs (2N) and (2N + 1) can be attached to HEAD N. + * This is a HW restriction that's documented in the MFS. + * However, starting Turing, display HW supports flexible window + * mapping, which means that SW can freely attach any window to any + * head. + * + * On Orin VDK R4, for example, there's currently one usable head + * (HEAD 0), and 8 total usable windows (WINDOWs 0-7). + * This configuration is currently causing issues since this function + * assumes that the available number of heads and windows strictly + * abides by the fixed mapping enforced on Volta. + * + * This window mapping init sequence will eventually be updated to + * support flexible window mapping. But, until that happens, there + * should at least be a sanity check to make sure that the head that the + * current window maps to - per the Volta restriction - is actually + * available. + */ + + for (win = 0; win < pDevEvo->numWindows; win++) { + pDevEvo->headForWindow[win] = NVC37D_WINDOW_MAPPED_TO_HEAD(win); + nvAssert(pDevEvo->headForWindow[win] < pDevEvo->numHeads); + } + + for (i = NvKmsSurfaceMemoryFormatMin; + i <= NvKmsSurfaceMemoryFormatMax; + i++) { + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(i); + + if ((pFormatInfo->numPlanes == 2 && !supportsSemiPlanar) || + (pFormatInfo->numPlanes == 3 && !supportsPlanar)) { + continue; + } + + if (pGetHwFmt(i) != 0) { + NvU8 layer; + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + pDevEvo->caps.layerCaps[layer].supportedSurfaceMemoryFormats |= + NVBIT64(i); + } + } + } + + for (reflectionX = FALSE; + reflectionX <= TRUE; + reflectionX++) { + + for (reflectionY = FALSE; + reflectionY <= TRUE; + reflectionY++) { + + for (curRotation = NVKMS_ROTATION_MIN; + curRotation <= NVKMS_ROTATION_MAX; + curRotation++) { + struct NvKmsRRParams rrParams = { curRotation, + reflectionX, + reflectionY }; + NvU8 bitPosition; + + if ((reflectionX || reflectionY) && !supportsHVFlip) { + continue; + } + + if (curRotation == NVKMS_ROTATION_180 && !supportsHVFlip) { + continue; + } + + /* + * Skipping over rotations by 90 and 270 degrees + * because these rotations require support for + * SCAN_COLUMN rotation, which hasn't been added + * to NVKMS yet. + */ + if (curRotation == NVKMS_ROTATION_90 || + curRotation == NVKMS_ROTATION_270) { + continue; + } + + bitPosition = NvKmsRRParamsToCapBit(&rrParams); + pDevEvo->caps.validLayerRRTransforms |= NVBIT(bitPosition); + } + } + } + + ret = TRUE; + +free_object: + status = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + capsHandle); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to free caps object"); + } + +free_handle: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, capsHandle); + + return ret; +} + +static NvBool EvoGetCapabilitiesC3(NVDevEvoPtr pDevEvo) +{ + return EvoGetCapabilities3(pDevEvo, EvoParseCapabilityNotifierC3, + nvHwFormatFromKmsFormatC3, + NVC373_DISP_CAPABILITIES, + sizeof(_NvC373DispCapabilities)); +} + +static NvBool EvoGetCapabilitiesC5(NVDevEvoPtr pDevEvo) +{ + return EvoGetCapabilities3(pDevEvo, EvoParseCapabilityNotifierC5, + nvHwFormatFromKmsFormatC5, + NVC573_DISP_CAPABILITIES, + sizeof(_NvC573DispCapabilities)); +} + +static NvBool EvoGetCapabilitiesC6(NVDevEvoPtr pDevEvo) +{ + return EvoGetCapabilities3(pDevEvo, EvoParseCapabilityNotifierC6, + nvHwFormatFromKmsFormatC6, + NVC673_DISP_CAPABILITIES, + sizeof(_NvC673DispCapabilities)); +} + +static void EvoSetViewportPointInC3(NVDevEvoPtr pDevEvo, const int head, + NvU16 x, NvU16 y, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* Set the input viewport point */ + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_POINT_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_IN, _X, x) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_IN, _Y, y)); + /* XXXnvdisplay set ViewportValidPointIn to configure overfetch */ +} + +static void EvoSetOutputScalerC3(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvU32 imageSharpeningValue, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + + /* These methods should only apply to a single pDpyEvo */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + NvU32 vTaps = pHeadState->vTaps > NV_EVO_SCALER_2TAPS ? + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_5 : + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_VERTICAL_TAPS_TAPS_2; + NvU32 hTaps = pHeadState->hTaps > NV_EVO_SCALER_2TAPS ? + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_5 : + NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER_HORIZONTAL_TAPS_TAPS_2; + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_OUTPUT_SCALER(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _VERTICAL_TAPS, vTaps) | + DRF_NUM(C37D, _HEAD_SET_CONTROL_OUTPUT_SCALER, _HORIZONTAL_TAPS, hTaps)); +} + +static NvBool EvoSetViewportInOut3(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState, + NvU32 setWindowUsageBounds) +{ + const NVEvoCapabilitiesPtr pEvoCaps = &pDevEvo->gpus[0].capabilities; + NVEvoChannelPtr pChannel = pDevEvo->core; + struct NvKmsScalingUsageBounds scalingUsageBounds = { }; + NvU32 win; + + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + /* The input viewport shouldn't vary. */ + nvAssert(pViewPortMin->in.width == pViewPort->in.width); + nvAssert(pViewPortMax->in.width == pViewPort->in.width); + nvAssert(pViewPortMin->in.height == pViewPort->in.height); + nvAssert(pViewPortMax->in.height == pViewPort->in.height); + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_SIZE_IN(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_IN, _WIDTH, pViewPort->in.width) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_IN, _HEIGHT, pViewPort->in.height)); + /* XXXnvdisplay set ViewportValidSizeIn to configure overfetch */ + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_POINT_OUT_ADJUST(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_X, pViewPort->out.xAdjust) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_POINT_OUT, _ADJUST_Y, pViewPort->out.yAdjust)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_VIEWPORT_SIZE_OUT(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_OUT, _WIDTH, pViewPort->out.width) | + DRF_NUM(C37D, _HEAD_SET_VIEWPORT_SIZE_OUT, _HEIGHT, pViewPort->out.height)); + + /* XXXnvdisplay deal with pViewPortMin, pViewPortMax */ + + if (!nvComputeScalingUsageBounds(&pEvoCaps->head[head].scalerCaps, + pViewPort->in.width, pViewPort->in.height, + pViewPort->out.width, pViewPort->out.height, + pViewPort->hTaps, pViewPort->vTaps, + &scalingUsageBounds)) { + /* Should have been rejected by validation */ + nvAssert(!"Attempt to program invalid viewport"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_MAX_OUTPUT_SCALE_FACTOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_MAX_OUTPUT_SCALE_FACTOR, _HORIZONTAL, + scalingUsageBounds.maxHDownscaleFactor) | + DRF_NUM(C37D, _HEAD_SET_MAX_OUTPUT_SCALE_FACTOR, _VERTICAL, + scalingUsageBounds.maxVDownscaleFactor)); + + /* + * Program MAX_PIXELS_FETCHED_PER_LINE window usage bounds + * for each window that’s attached to the head. + * + * Precomp will clip the post-scaled window to the input viewport, reverse-scale + * this cropped size back to the input surface domain, and isohub will fetch + * this cropped size. This function assumes that there's no window scaling yet, + * so the MAX_PIXELS_FETCHED_PER_LINE will be bounded by the input viewport + * width. SetScalingUsageBoundsOneWindow5() will take care of updating + * MAX_PIXELS_FETCHED_PER_LINE, if window scaling is enabled later. + */ + setWindowUsageBounds |= + DRF_NUM(C37D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _MAX_PIXELS_FETCHED_PER_LINE, + GetMaxPixelsFetchedPerLine(pViewPort->in.width, + NV_EVO_SCALE_FACTOR_1X)); + + for (win = 0; win < pDevEvo->numWindows; win++) { + if (head != pDevEvo->headForWindow[win]) { + continue; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_WINDOW_SET_WINDOW_USAGE_BOUNDS(win), 1); + nvDmaSetEvoMethodData(pChannel, setWindowUsageBounds); + } + + return scalingUsageBounds.vUpscalingAllowed; +} + +static void EvoSetViewportInOutC3(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvBool verticalUpscalingAllowed = + EvoSetViewportInOut3(pDevEvo, head, pViewPortMin, pViewPort, + pViewPortMax, updateState, + NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C3); + + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_HEAD_USAGE_BOUNDS(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _CURSOR, _USAGE_W256_H256) | + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_LUT, _USAGE_1025) | + (verticalUpscalingAllowed ? + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _TRUE) : + DRF_DEF(C37D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE))); +} + +static void EvoSetViewportInOutC5(NVDevEvoPtr pDevEvo, const int head, + const NVHwModeViewPortEvo *pViewPortMin, + const NVHwModeViewPortEvo *pViewPort, + const NVHwModeViewPortEvo *pViewPortMax, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 setWindowUsageBounds = + (NV_EVO3_DEFAULT_WINDOW_USAGE_BOUNDS_C5 | + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _INPUT_SCALER_TAPS, _TAPS_2) | + DRF_DEF(C57D, _WINDOW_SET_WINDOW_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE)); + NvU32 verticalUpscalingAllowed = + EvoSetViewportInOut3(pDevEvo, head, pViewPortMin, pViewPort, + pViewPortMax, updateState, setWindowUsageBounds); + + nvDmaSetStartEvoMethod(pChannel, + NVC57D_HEAD_SET_HEAD_USAGE_BOUNDS(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _CURSOR, _USAGE_W256_H256) | + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OLUT_ALLOWED, _TRUE) | + /* Despite the generic name of this field, it's specific to vertical taps. */ + (pViewPort->vTaps > NV_EVO_SCALER_2TAPS ? + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_SCALER_TAPS, _TAPS_5) : + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _OUTPUT_SCALER_TAPS, _TAPS_2)) | + (verticalUpscalingAllowed ? + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _TRUE) : + DRF_DEF(C57D, _HEAD_SET_HEAD_USAGE_BOUNDS, _UPSCALING_ALLOWED, _FALSE))); +} + +/*! + * Compute the C37D_HEAD_SET_CONTROL_CURSOR method value. + * + * This function also validates that the given NVSurfaceEvoRec can be + * used as a cursor image. + + * + * \param[in] pDevEvo The device on which the cursor will be programmed. + * \param[in] pSurfaceEvo The surface to be used as the cursor image. + * \param[out] pValue The C37D_HEAD_SET_CONTROL_CURSOR method value. + + * \return If TRUE, the surface can be used as a cursor image, and + * pValue contains the method value. If FALSE, the surface + * cannot be used as a cursor image. + */ +static NvBool EvoGetHeadSetControlCursorValueC3(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + NvU32 *pValue) +{ + NvU32 value = 0; + + if (pSurfaceEvo == NULL) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _DISABLE); + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + goto done; + } else { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _ENABLE, _ENABLE); + } + + /* The cursor must always be pitch. */ + + if (pSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) { + return FALSE; + } + + /* + * The only supported cursor image memory format is A8R8G8B8. + */ + if (pSurfaceEvo->format == NvKmsSurfaceMemoryFormatA8R8G8B8) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _FORMAT, _A8R8G8B8); + } else { + return FALSE; + } + + /* + * The cursor only supports a few image sizes. + */ + if ((pSurfaceEvo->widthInPixels == 32) && + (pSurfaceEvo->heightInPixels == 32)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W32_H32); + } else if ((pSurfaceEvo->widthInPixels == 64) && + (pSurfaceEvo->heightInPixels == 64)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W64_H64); + } else if ((pDevEvo->cursorHal->caps.maxSize >= 128) && + (pSurfaceEvo->widthInPixels == 128) && + (pSurfaceEvo->heightInPixels == 128)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W128_H128); + } else if ((pDevEvo->cursorHal->caps.maxSize >= 256) && + (pSurfaceEvo->widthInPixels == 256) && + (pSurfaceEvo->heightInPixels == 256)) { + value |= DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR, _SIZE, _W256_H256); + } else { + return FALSE; + } + + /* + * Hard code the cursor hotspot. + */ + value |= DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_Y, 0); + value |= DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR, _HOT_SPOT_X, 0); + + // XXXnvdisplay: Add support for cursor de-gamma. + +done: + + if (pValue != NULL) { + *pValue = value; + } + + return TRUE; +} + +static void EvoSetCursorImageC3(NVDevEvoPtr pDevEvo, const int head, + const NVSurfaceEvoRec *pSurfaceEvo, + NVEvoUpdateState *updateState, + const struct NvKmsCompositionParams *pCursorCompParams) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + const NvU32 ctxdma = pSurfaceEvo ? pSurfaceEvo->planes[0].ctxDma : 0; + const NvU64 offset = pSurfaceEvo ? pSurfaceEvo->planes[0].offset : 0; + NvU32 headSetControlCursorValue = 0; + NvBool ret; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + nvAssert(pCursorCompParams->colorKeySelect == + NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE); + nvAssert(NVBIT(pCursorCompParams->blendingMode[1]) & + NV_EVO3_SUPPORTED_CURSOR_COMP_BLEND_MODES); + /* These methods should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvAssert(!pSurfaceEvo || ctxdma); + + ret = EvoGetHeadSetControlCursorValueC3(pDevEvo, pSurfaceEvo, + &headSetControlCursorValue); + /* + * The caller should have already validated the surface, so there + * shouldn't be a failure. + */ + if (!ret) { + nvAssert(!"Could not construct HEAD_SET_CONTROL_CURSOR value"); + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_PRESENT_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_PRESENT_CONTROL_CURSOR, _MODE, _MONO)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_CURSOR(head, 0), 4); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_CURSOR, _HANDLE, ctxdma)); + // Always set the right cursor context DMA. + // HW will just ignore this if it is not in stereo cursor mode. + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_CURSOR, _HANDLE, ctxdma)); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_OFFSET_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_OFFSET_CURSOR, _ORIGIN, + nvCtxDmaOffsetFromBytes(offset))); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTROL_CURSOR(head), 1); + nvDmaSetEvoMethodData(pChannel, headSetControlCursorValue); + + nvDmaSetStartEvoMethod(pChannel, + NVC37D_HEAD_SET_CONTROL_CURSOR_COMPOSITION(head), 1); + switch (pCursorCompParams->blendingMode[1]) { + case NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _ZERO) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, 255) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, + pCursorCompParams->surfaceAlpha) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA: + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _K1, + pCursorCompParams->surfaceAlpha) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _CURSOR_COLOR_FACTOR_SELECT, _K1) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, + _VIEWPORT_COLOR_FACTOR_SELECT, _NEG_K1_TIMES_SRC) | + DRF_DEF(C37D, _HEAD_SET_CONTROL_CURSOR_COMPOSITION, _MODE, _BLEND)); + break; + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "%s: composition mode %d not supported for cursor", + __func__, pCursorCompParams->blendingMode[1]); + break; + } +} + +static NvBool EvoValidateCursorSurfaceC3(const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + return EvoGetHeadSetControlCursorValueC3(pDevEvo, pSurfaceEvo, NULL); +} + +static NvBool ValidateWindowFormatSourceRectC3( + const struct NvKmsRect *sourceFetchRect, + const enum NvKmsSurfaceMemoryFormat format) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(format); + + /* + * sourceFetchRect represents the dimensions of the source fetch rectangle. + * If YUV crop and scaler overfetch are supported, it is up to the caller to + * provide the correct dimensions (e.g., ValidSizeIn/ValidPointIn vs. + * SizeIn/PointIn). + * + * For all YUV formats, the position and size of the fetch rectangle must be + * even in the horizontal direction. + * + * For YUV420 formats, there is an additional restriction that the position + * and size of the fetch rectangle must be even in the vertical direction as + * well. + */ + if (pFormatInfo->isYUV) { + if (((sourceFetchRect->x & 1) != 0) || + (sourceFetchRect->width & 1) != 0) { + return FALSE; + } + + if (pFormatInfo->yuv.vertChromaDecimationFactor > 1) { + if (((sourceFetchRect->y & 1) != 0) || + (sourceFetchRect->height & 1) != 0) { + return FALSE; + } + } + } + + return TRUE; +} + +typedef typeof(ValidateWindowFormatSourceRectC3) val_src_rect_t; + +static NvBool EvoValidateWindowFormatWrapper( + const enum NvKmsSurfaceMemoryFormat format, + get_hw_fmt_t *pGetHwFmt, + const struct NvKmsRect *sourceFetchRect, + val_src_rect_t *pValSrcRect, + NvU32 *hwFormatOut) +{ + const NvU32 hwFormat = pGetHwFmt(format); + + if (hwFormat == 0) { + return FALSE; + } + + if (hwFormatOut != NULL) { + *hwFormatOut = hwFormat; + } + + /* + * If sourceFetchRect is NULL, this function is only responsible for + * verifying whether the given NvKmsSurfaceMemoryFormat has a corresponding + * HW format. + */ + if (sourceFetchRect == NULL) { + return TRUE; + } + + return pValSrcRect(sourceFetchRect, format); +} + +static NvBool EvoValidateWindowFormatC3( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + return EvoValidateWindowFormatWrapper( + format, + nvHwFormatFromKmsFormatC3, + sourceFetchRect, + ValidateWindowFormatSourceRectC3, + hwFormatOut); +} + +static NvBool EvoValidateWindowFormatC5( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + return EvoValidateWindowFormatWrapper( + format, + nvHwFormatFromKmsFormatC5, + sourceFetchRect, + ValidateWindowFormatSourceRectC3, + hwFormatOut); +} + +static NvBool EvoValidateWindowFormatC6( + const enum NvKmsSurfaceMemoryFormat format, + const struct NvKmsRect *sourceFetchRect, + NvU32 *hwFormatOut) +{ + return EvoValidateWindowFormatWrapper( + format, + nvHwFormatFromKmsFormatC6, + sourceFetchRect, + ValidateWindowFormatSourceRectC3, + hwFormatOut); +} + +static NvU32 OffsetForNotifier(int idx) +{ + /* NVDisplay notifiers are always the 16-byte variety. We only care about + * the NV_DISP_NOTIFIER__0 dword which contains the status. */ + NvU32 base = idx * (NV_DISP_NOTIFIER_SIZEOF / sizeof(NvU32)); + return base + NV_DISP_NOTIFIER__0; +} + +static void EvoInitCompNotifierC3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvWriteEvoCoreNotifier(pDispEvo, OffsetForNotifier(idx), + DRF_DEF(_DISP, _NOTIFIER__0, _STATUS, _NOT_BEGUN)); +} + +static NvBool EvoIsCompNotifierCompleteC3(NVDispEvoPtr pDispEvo, int idx) { + return nvEvoIsCoreNotifierComplete(pDispEvo, OffsetForNotifier(idx), + DRF_BASE(NV_DISP_NOTIFIER__0_STATUS), + DRF_EXTENT(NV_DISP_NOTIFIER__0_STATUS), + NV_DISP_NOTIFIER__0_STATUS_FINISHED); +} + +static void EvoWaitForCompNotifierC3(const NVDispEvoRec *pDispEvo, int idx) +{ + nvEvoWaitForCoreNotifier(pDispEvo, OffsetForNotifier(idx), + DRF_BASE(NV_DISP_NOTIFIER__0_STATUS), + DRF_EXTENT(NV_DISP_NOTIFIER__0_STATUS), + NV_DISP_NOTIFIER__0_STATUS_FINISHED); +} + +static void EvoSetDitherC3(NVDispEvoPtr pDispEvo, const int head, + const NvBool enabled, const NvU32 type, + const NvU32 algo, + NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 ditherControl; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enabled) { + ditherControl = DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _ENABLE); + + switch (type) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_6_BITS: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _BITS, _TO_6_BITS); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_8_BITS: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _BITS, _TO_8_BITS); + break; + /* XXXnvdisplay: Support DITHER_TO_{10,12}_BITS (see also bug 1729668). */ + default: + nvAssert(!"Unknown ditherType"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF: + ditherControl = NVC37D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE; + break; + } + + } else { + ditherControl = DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _ENABLE, _DISABLE); + } + + switch (algo) { + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_ERR_ACC: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_ERR_ACC); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_2X2: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_STATIC_2X2: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _STATIC_2X2); + break; + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_TEMPORAL: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _TEMPORAL); + break; + default: + nvAssert(!"Unknown DitherAlgo"); + // Fall through + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN: + case NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_DYNAMIC_ERR_ACC: + ditherControl |= + DRF_DEF(C37D, _HEAD_SET_DITHER_CONTROL, _MODE, _DYNAMIC_ERR_ACC); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DITHER_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, ditherControl); +} + +static void EvoSetDisplayRateC3(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, + NVEvoUpdateState *updateState, + NvU32 timeoutMicroseconds) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enable) { + timeoutMicroseconds = + NV_MIN(timeoutMicroseconds, + DRF_MASK(NVC37D_HEAD_SET_DISPLAY_RATE_MIN_REFRESH_INTERVAL)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DISPLAY_RATE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _RUN_MODE, _ONE_SHOT) | + DRF_NUM(C37D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH_INTERVAL, + timeoutMicroseconds) | + (timeoutMicroseconds == 0 ? + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH, _DISABLE) : + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _MIN_REFRESH, _ENABLE))); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_DISPLAY_RATE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_DISPLAY_RATE, _RUN_MODE, _CONTINUOUS)); + } +} + +static void EvoSetStallLockC3(NVDispEvoPtr pDispEvo, const int head, + NvBool enable, NVEvoUpdateState *updateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVEvoChannelPtr pChannel = pDevEvo->core; + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + if (enable) { + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _ENABLE, _TRUE) | + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _MODE, _ONE_SHOT) | + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _LOCK_PIN, _LOCK_PIN_NONE) | + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _UNSTALL_MODE, _LINE_LOCK)); + } else { + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_STALL_LOCK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_STALL_LOCK, _ENABLE, _FALSE)); + } +} + +static NvBool GetChannelState(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvU32 *result) +{ + NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS info = { }; + NvU32 ret; + + info.base.subdeviceIndex = sd; + info.channelClass = pChan->hwclass; + info.channelInstance = pChan->instance; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_GET_CHANNEL_INFO, + &info, sizeof(info)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query display engine channel state: 0x%08x:%d:%d:0x%08x", + pChan->hwclass, pChan->instance, sd, ret); + return FALSE; + } + + *result = info.channelState; + + return TRUE; +} + +static NvBool EvoIsChannelIdleC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvU32 channelState; + + if (!GetChannelState(pDevEvo, pChan, sd, &channelState)) { + return FALSE; + } + + *result = (channelState == NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE); + + return TRUE; +} + +static NvBool EvoIsChannelMethodPendingC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvBool *result) +{ + NvBool tmpResult; + + /* With C370, Idle and NoMethodPending are equivalent. */ + ct_assert(NVC370_CTRL_GET_CHANNEL_INFO_STATE_IDLE == + NVC370_CTRL_GET_CHANNEL_INFO_STATE_NO_METHOD_PENDING); + + if (!EvoIsChannelIdleC3(pDevEvo, pChan, sd, &tmpResult)) { + return FALSE; + } + + *result = !tmpResult; + + return TRUE; +} + +static NvBool EvoAllocRmCtrlObjectC3(NVDevEvoPtr pDevEvo) +{ + const NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + /* Note that this object is not at all related to the GF100_DISP_SW (9072) + * or NV50_DISPLAY_SW (5072) objects, despite their similarity in name. */ + NvU32 status = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + handle, + NVC372_DISPLAY_SW, NULL); + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to allocate nvdisplay rmctrl object"); + goto fail; + } + + pDevEvo->rmCtrlHandle = handle; + + return TRUE; + +fail: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return FALSE; +} + +static NvU32 GetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + NVC370_CTRL_GET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL); + params.channelInstance = + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_GET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to retrieve accelerators"); + return 0; + } + + return params.accelerators; +} + +static NvBool SetAccelerators( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NvU32 accelerators, + NvU32 accelMask) +{ + NVC370_CTRL_SET_ACCL_PARAMS params = { }; + NvU32 ret; + + params.base.subdeviceIndex = sd; + params.channelClass = pChannel->hwclass; + nvAssert(pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL); + params.channelInstance = + NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + params.accelerators = accelerators; + params.accelMask = accelMask; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NVC370_CTRL_CMD_SET_ACCL, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set accelerators"); + return FALSE; + } + + return TRUE; +} + +static void EvoAccelerateChannelC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 *pOldAccelerators) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA; + + *pOldAccelerators = GetAccelerators(pDevEvo, pChannel, sd); + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, accelMask, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +static void EvoResetChannelAcceleratorsC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + const NvU32 sd, + NvU32 oldAccelerators) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA; + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, oldAccelerators, accelMask)) { + nvAssert(!"Failed to set accelerators"); + } +} + +static void ForceFlipToNull( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NVEvoUpdateState *updateState) +{ + NVFlipChannelEvoHwState hwState = { }; + const NvU32 subDeviceMask = (1 << sd); + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + + pDevEvo->hal->Flip(pDevEvo, pChannel, &hwState, updateState, + FALSE /* bypassComposition */); + + nvPopEvoSubDevMask(pDevEvo); +} + +static NvBool PollForChannelIdle( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd) +{ + const NvU32 timeout = 2000000; // 2 seconds + NvU64 startTime = 0; + NvBool isMethodPending = TRUE; + + do { + if (!EvoIsChannelMethodPendingC3(pDevEvo, pChannel, sd, + &isMethodPending)) { + break; + } + + if (!isMethodPending) { + break; + } + + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + return FALSE; + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +/*! + * This function emulates the behavior of the STOP_BASE/STOP_OVERLAY RM control + * calls for pre-EVO hardware. + * + * STOP_BASE/STOP_OVERLAY will apply hardware channel accelerators, push + * methods via the debug interface to NULL context DMAs, and wait for the + * channel to go idle (which means the surface programmed into the core channel + * will become visible). + * + * If we asked RM to do the same thing for the window channel that is emulating + * the base channel on nvdisplay, the display would just go black: there's no + * surface in the core channel, so NULLing the context DMA in the window + * channel will disable both "core" and "base". + * + * So instead, similar functionality is implemented here: we apply + * accelerators, push methods to flip to core, and wait for the channel to + * idle. + */ +typedef struct { + struct { + NvU32 accelerators; + NvBool overridden; + } window[NVKMS_MAX_WINDOWS_PER_DISP]; +} EvoIdleChannelAcceleratorState; + +static NvBool EvoForceIdleSatelliteChannelsWithAccel( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState, + const NvU32 accelMask) +{ + NvU32 sd, window; + NVEvoUpdateState updateState = { }; + NvBool ret = FALSE; + + EvoIdleChannelAcceleratorState *pAcceleratorState = nvCalloc( + pDevEvo->numSubDevices, sizeof(EvoIdleChannelAcceleratorState)); + + if (!pAcceleratorState) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to alloc accelerator state"); + return FALSE; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + /* + * Forcing a channel to be idle is currently only implemented for window + * channels. + */ + if ((idleChannelState->subdev[sd].channelMask & + ~NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0) { + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Forcing non-window channel idle not implemented"); + goto done; + } + + for (window = 0; window < pDevEvo->numWindows; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, + _WINDOW, window, _ENABLE, + idleChannelState->subdev[sd].channelMask)) { + NVEvoChannelPtr pChannel = pDevEvo->window[window]; + + /* Save old window channel accelerators. */ + NvU32 oldAccel = GetAccelerators(pDevEvo, pChannel, sd); + + pAcceleratorState[sd].window[window].accelerators = + oldAccel; + + /* Accelerate window channel. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, accelMask, + accelMask)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set accelerators"); + goto done; + } + pAcceleratorState[sd].window[window].overridden = TRUE; + + /* Push a flip to null in this channel. */ + ForceFlipToNull(pDevEvo, pChannel, sd, &updateState); + } + } + } + + /* Push one update for all of the flips programmed above. */ + EvoUpdateC3(pDevEvo, &updateState, TRUE /* releaseElv */); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + for (window = 0; window < pDevEvo->numWindows; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + idleChannelState->subdev[sd].channelMask)) { + NVEvoChannelPtr pChannel = pDevEvo->window[window]; + + /* Wait for the flips to complete. */ + if (!PollForChannelIdle(pDevEvo, pChannel, sd)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Timed out while idling base channel"); + goto done; + } + } + } + } + + ret = TRUE; + +done: + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + for (window = 0; window < pDevEvo->numWindows; window++) { + if (FLD_IDX_TEST_DRF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE, + idleChannelState->subdev[sd].channelMask)) { + NVEvoChannelPtr pChannel = pDevEvo->window[window]; + + const NvU32 oldAccel = + pAcceleratorState[sd].window[window].accelerators; + + if (!pAcceleratorState[sd].window[window].overridden) { + continue; + } + + /* Restore window channel accelerators. */ + if (!SetAccelerators(pDevEvo, pChannel, sd, oldAccel, + accelMask)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to restore accelerators"); + } + } + } + } + + nvFree(pAcceleratorState); + return ret; +} + +static NvBool EvoForceIdleSatelliteChannelC3( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState) +{ + /* Start with a conservative set of accelerators; may need to add more + * later. */ + const NvU32 accelMask = + NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA; + + return EvoForceIdleSatelliteChannelsWithAccel(pDevEvo, + idleChannelState, + accelMask); +} + +static NvBool EvoForceIdleSatelliteChannelIgnoreLockC3( + NVDevEvoPtr pDevEvo, + const NVEvoIdleChannelState *idleChannelState) +{ + const NvU32 accelMask = + NVC370_CTRL_ACCL_IGNORE_PI | + NVC370_CTRL_ACCL_SKIP_SEMA | + NVC370_CTRL_ACCL_IGNORE_FLIPLOCK | + NVC370_CTRL_ACCL_IGNORE_INTERLOCK; + + return EvoForceIdleSatelliteChannelsWithAccel(pDevEvo, + idleChannelState, + accelMask); +} + +static void EvoFreeRmCtrlObjectC3(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->rmCtrlHandle) { + NvU32 status; + + status = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->rmCtrlHandle); + + if (status != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to free nvdisplay rmctrl object"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->rmCtrlHandle); + pDevEvo->rmCtrlHandle = 0; + } +} + +static void EvoSetImmPointOutC3(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 sd, + NVEvoUpdateState *updateState, + NvU16 x, NvU16 y) +{ + NVEvoChannelPtr pImmChannel = pChannel->imm.u.dma; + + nvAssert((pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0); + nvAssert(pChannel->imm.type == NV_EVO_IMM_CHANNEL_DMA); + + nvDmaSetStartEvoMethod(pImmChannel, + NVC37B_SET_POINT_OUT(0 /* Left eye */), 1); + + nvDmaSetEvoMethodData(pImmChannel, + DRF_NUM(C37B, _SET_POINT_OUT, _X, x) | + DRF_NUM(C37B, _SET_POINT_OUT, _Y, y)); + + nvWinImmChannelUpdateState(pDevEvo, updateState, pChannel); +} + +static void EvoStartHeadCRC32CaptureC3(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NVConnectorEvoPtr pConnectorEvo, + const enum nvKmsTimingsProtocol protocol, + const NvU32 orIndex, + NvU32 head, + NvU32 sd, + NVEvoUpdateState *updateState) +{ + const NvU32 winChannel = head << 1; + NVEvoChannelPtr pChannel = pDevEvo->core; + NvU32 dmaCtx = pDma->ctxHandle; + NvU32 orOutput = 0; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + /* The window channel should fit in + * NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL */ + nvAssert(winChannel < DRF_MASK(NVC37D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL)); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + if (protocol == NVKMS_PROTOCOL_SOR_DP_A || + protocol == NVKMS_PROTOCOL_SOR_DP_B) { + orOutput = NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF; + } else { + orOutput = + NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(orIndex); + } + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + orOutput = + NVC37D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_PIOR(orIndex); + break; + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + /* No DAC support on nvdisplay. Fall through. */ + default: + nvAssert(!"Invalid pConnectorEvo->or.type"); + break; + } + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CONTEXT_DMA_CRC, _HANDLE, dmaCtx)); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C37D, _HEAD_SET_CRC_CONTROL, _PRIMARY_CRC, orOutput) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _SECONDARY_CRC, _NONE) | + DRF_NUM(C37D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, winChannel) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); + + /* Reset the CRC notifier */ + nvEvoResetCRC32Notifier(pDma->subDeviceAddress[sd], + NVC37D_NOTIFIER_CRC_STATUS_0, + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + NVC37D_NOTIFIER_CRC_STATUS_0_DONE_FALSE); +} + +static void EvoStopHeadCRC32CaptureC3(NVDevEvoPtr pDevEvo, + NvU32 head, + NVEvoUpdateState *updateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* These method should only apply to a single pDpy */ + nvAssert(pDevEvo->subDevMaskStackDepth > 0); + + nvUpdateUpdateState(pDevEvo, updateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CONTEXT_DMA_CRC(head), 1); + nvDmaSetEvoMethodData(pChannel, 0); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_CRC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _PRIMARY_CRC, _NONE) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _SECONDARY_CRC, _NONE) | + DRF_NUM(C37D, _HEAD_SET_CRC_CONTROL, _CONTROLLING_CHANNEL, 0) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _EXPECT_BUFFER_COLLAPSE, _FALSE) | + DRF_DEF(C37D, _HEAD_SET_CRC_CONTROL, _CRC_DURING_SNOOZE, _DISABLE)); +} + +/*! + * Queries the current head's CRC Notifier and returns values if successful + * + * First waits for hardware to finish writing to the CRC32Notifier, + * and performs a read of the Compositor, SF/OR CRCs, + * and the RG CRC in numCRC32 frames. + * Crc fields in input array crc32 should be calloc'd to 0s. + * + * \param[in] pDevEvo NVKMS device pointer + * \param[in] pDma Pointer to DMA-mapped memory + * \param[in] sd Subdevice index + * \param[in] entry_count Number of independent frames to read CRCs from + * \param[out] crc32 Contains pointers to CRC output arrays + * \param[out] numCRC32 Number of CRC frames successfully read from DMA + * + * \return Returns TRUE if was able to successfully read CRCs from DMA, + * otherwise FALSE + */ +static NvBool EvoQueryHeadCRC32_C3(NVDevEvoPtr pDevEvo, + NVEvoDmaPtr pDma, + NvU32 sd, + NvU32 entry_count, + CRC32NotifierCrcOut *crc32, + NvU32 *numCRC32) +{ + volatile NvU32 *pCRC32Notifier = pDma->subDeviceAddress[sd]; + const NvU32 entry_stride = + NVC37D_NOTIFIER_CRC_CRC_ENTRY1_21 - NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13; + // Define how many/which variables to read from each CRCNotifierEntry struct + const CRC32NotifierEntryRec field_info[NV_EVO3_NUM_CRC_FIELDS] = { + { + .field_offset = NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11, + .field_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11_COMPOSITOR_CRC), + .field_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_11_COMPOSITOR_CRC), + .field_frame_values = crc32->compositorCrc32, + }, + { + .field_offset = NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12, + .field_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12_RG_CRC), + .field_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_12_RG_CRC), + .field_frame_values = crc32->rasterGeneratorCrc32, + }, + { + .field_offset = NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13, + .field_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13_PRIMARY_OUTPUT_CRC), + .field_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_CRC_ENTRY0_13_PRIMARY_OUTPUT_CRC), + .field_frame_values = crc32->outputCrc32 + } + }; + + const CRC32NotifierEntryFlags flag_info[NV_EVO3_NUM_CRC_FLAGS] = { + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_COUNT), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_COUNT), + .flag_type = NVEvoCrc32NotifierFlagCount + }, + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_COMPOSITOR_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + }, + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_RG_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + }, + { + .flag_base_bit = + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_extent_bit = + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_PRIMARY_OUTPUT_OVERFLOW), + .flag_type = NVEvoCrc32NotifierFlagCrcOverflow + } + }; + + if (!nvEvoWaitForCRC32Notifier(pCRC32Notifier, + NVC37D_NOTIFIER_CRC_STATUS_0, + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + DRF_EXTENT(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + NVC37D_NOTIFIER_CRC_STATUS_0_DONE_TRUE)) { + return FALSE; + } + + *numCRC32 = nvEvoReadCRC32Notifier(pCRC32Notifier, + entry_stride, + entry_count, + NVC37D_NOTIFIER_CRC_STATUS_0, /* Status offset */ + NV_EVO3_NUM_CRC_FIELDS, + NV_EVO3_NUM_CRC_FLAGS, + field_info, + flag_info); + + nvEvoResetCRC32Notifier(pCRC32Notifier, + NVC37D_NOTIFIER_CRC_STATUS_0, + DRF_BASE(NVC37D_NOTIFIER_CRC_STATUS_0_DONE), + NVC37D_NOTIFIER_CRC_STATUS_0_DONE_FALSE); + + return TRUE; +} + +static void EvoGetScanLineC3(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NvU16 *pScanLine, + NvBool *pInBlankingPeriod) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + const NvU32 window = head << 1; + void *pDma = pDevEvo->window[window]->pb.control[sd]; + const NvU32 scanLine = nvDmaLoadPioMethod(pDma, NVC37E_GET_LINE); + + if ((scanLine & NVBIT(15)) == 0) { + *pInBlankingPeriod = FALSE; + *pScanLine = scanLine & DRF_MASK(14:0); + } else { + *pInBlankingPeriod = TRUE; + } +} + +/* + * This method configures and programs the RG Core Semaphores. Default behavior + * is to continuously trigger on the specified rasterline when enabled. + */ +static void +EvoConfigureVblankSyncObjectC6(const NVDevEvoPtr pDevEvo, + const NvU16 rasterLine, + const NvU32 head, + const NvU32 semaphoreIndex, + const NvU32 hCtxDma, + NVEvoUpdateState* pUpdateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + /* + * Populate the NVEvoUpdateState for the caller. The Update State contains + * a mask of which display channels need to be updated. + */ + nvUpdateUpdateState(pDevEvo, pUpdateState, pChannel); + + /* + * Tell HW what ctxdma entry to use to look up actual RG semaphore surface. + * If hCtxDma is 0, HW will disable the semaphore. + */ + nvDmaSetStartEvoMethod(pChannel, + NVC67D_HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE(head, semaphoreIndex), + 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_CONTEXT_DMA_RG_REL_SEMAPHORE, _HANDLE, hCtxDma)); + + if (hCtxDma == 0) { + /* Disabling semaphore so no configuration necessary. */ + return; + } + + /* + * Configure the semaphore with the following: + * Set OFFSET to 0 (default). + * Set PAYLOAD_SIZE to 32bits (default). + * Set REL_MODE to WRITE (default). + * Set RUN_MODE to CONTINUOUS. + * Set RASTER_LINE to start of Vblank: Vsync + Vbp + Vactive. + * + * Note that all these options together fit in 32bits, and that all 32 bits + * must be written each time any given option changes. + * + * The actual payload value doesn't currently matter since this RG + * semaphore will be mapped to a syncpt for now. Each HW-issued payload + * write is converted to a single syncpt increment irrespective of what the + * actual semaphore payload value is. + */ + nvDmaSetStartEvoMethod(pChannel, + NVC67D_HEAD_SET_RG_REL_SEMAPHORE_CONTROL(head, semaphoreIndex), + 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _OFFSET, 0) | + DRF_DEF(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _PAYLOAD_SIZE, + _PAYLOAD_32BIT) | + DRF_DEF(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _REL_MODE, + _WRITE) | + DRF_DEF(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _RUN_MODE, + _CONTINUOUS) | + DRF_NUM(C67D, _HEAD_SET_RG_REL_SEMAPHORE_CONTROL, _RASTER_LINE, + rasterLine)); +} + +static void EvoSetHdmiFrlDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings) +{ + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + const HDMI_FRL_CONFIG *pFrl = &pTimings->hdmiFrlConfig; + NvU32 bpc, flatnessDetThresh; + NvU32 i; + + nvAssert(pDispEvo->pDevEvo->hal->caps.supportsHDMIFRL && + pFrl->frlRate != HDMI_FRL_DATA_RATE_NONE && + pFrl->dscInfo.bEnableDSC); + + bpc = nvPixelDepthToBitsPerComponent(pTimings->pixelDepth); + if (bpc < 8) { + nvAssert(bpc >= 8); + bpc = 8; + } + flatnessDetThresh = (2 << (bpc - 8)); + + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _MODE, _SINGLE) | /* TODO DUAL for 2Head1OR */ + DRF_NUM(C67D, _HEAD_SET_DSC_CONTROL, _FLATNESS_DET_THRESH, flatnessDetThresh) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _FULL_ICH_ERR_PRECISION, _ENABLE) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _AUTO_RESET, _ENABLE) | + DRF_DEF(C67D, _HEAD_SET_DSC_CONTROL, _FORCE_ICH_RESET, _FALSE)); + + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C67D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C67D, _HEAD_SET_DSC_PPS_CONTROL, _LOCATION, _VBLANK) | + DRF_DEF(C67D, _HEAD_SET_DSC_PPS_CONTROL, _FREQUENCY, _EVERY_FRAME) | + /* MFS says "For FRL DSC CVTEM, it should be 0x21 (136bytes)." */ + DRF_NUM(C67D, _HEAD_SET_DSC_PPS_CONTROL, _SIZE, 0x21)); + + /* The loop below assumes the methods are tightly packed. */ + ct_assert(ARRAY_LEN(pFrl->dscInfo.pps) == 32); + ct_assert((NVC67D_HEAD_SET_DSC_PPS_DATA1(0) - NVC67D_HEAD_SET_DSC_PPS_DATA0(0)) == 4); + ct_assert((NVC67D_HEAD_SET_DSC_PPS_DATA31(0) - NVC67D_HEAD_SET_DSC_PPS_DATA0(0)) == (31 * 4)); + for (i = 0; i < ARRAY_LEN(pFrl->dscInfo.pps); i++) { + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_PPS_DATA0(head) + (i * 4), 1); + nvDmaSetEvoMethodData(pChannel, pFrl->dscInfo.pps[i]); + } + + /* Byte 0 must be 0x7f, the rest are don't care (will be filled in by HW) */ + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_DSC_PPS_HEAD(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_DSC_PPS_HEAD, _BYTE0, 0x7f)); + + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_HDMI_DSC_HCACTIVE(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_HDMI_DSC_HCACTIVE, _BYTES, pFrl->dscInfo.dscHActiveBytes) | + DRF_NUM(C67D, _HEAD_SET_HDMI_DSC_HCACTIVE, _TRI_BYTES, pFrl->dscInfo.dscHActiveTriBytes)); + nvDmaSetStartEvoMethod(pChannel, NVC67D_HEAD_SET_HDMI_DSC_HCBLANK(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C67D, _HEAD_SET_HDMI_DSC_HCBLANK, _WIDTH, pFrl->dscInfo.dscHBlankTriBytes)); +} + +static void EvoSetDpDscParams(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings) +{ + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + NvU32 flatnessDetThresh; + NvU32 i; + + nvAssert(pTimings->dpDsc.enable); + + // XXX: I'm pretty sure that this is wrong. + // BitsPerPixelx16 is something like (24 * 16) = 384, and 2 << (384 - 8) is + // an insanely large number. + flatnessDetThresh = (2 << (pTimings->dpDsc.bitsPerPixelX16 - 8)); /* ??? */ + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _MODE, _SINGLE) | /* TODO DUAL for 2Head1OR */ + DRF_NUM(C57D, _HEAD_SET_DSC_CONTROL, _FLATNESS_DET_THRESH, flatnessDetThresh) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _FULL_ICH_ERR_PRECISION, _ENABLE) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _AUTO_RESET, _DISABLE) | + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _FORCE_ICH_RESET, _TRUE)); + + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _TRUE) | + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _LOCATION, _VSYNC) | + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _FREQUENCY, _EVERY_FRAME) | + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_CONTROL, _SIZE, 0x1F /* 32 PPS Dwords - 1 = 31 */)); + + +#define NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS \ + (((NVC57D_HEAD_SET_DSC_PPS_DATA31(0) - NVC57D_HEAD_SET_DSC_PPS_DATA0(0)) / 4) + 1) + + ct_assert(NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS <= ARRAY_LEN(pTimings->dpDsc.pps)); + + for (i = 0; i < NV_EVO5_NUM_HEAD_SET_DSC_PPS_DATA_DWORDS; i++) { + nvDmaSetStartEvoMethod(pChannel,(NVC57D_HEAD_SET_DSC_PPS_DATA0(head) + (i * 4)), 1); + nvDmaSetEvoMethodData(pChannel, pTimings->dpDsc.pps[i]); + } + + /* + * In case of DP, PPS is sent using the SDP over the Main-Link + * during the vertical blanking interval. The PPS SDP header is defined + * in DP 1.4 specification under section 2.2.5.9.1. + */ + + nvDmaSetStartEvoMethod(pChannel, + NVC57D_HEAD_SET_DSC_PPS_HEAD(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE0, 0x00) | /* SDP ID = 0x0 */ + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE1, 0x10) | /* SDP Type = 0x10 */ + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE2, 0x7f) | /* Number of payload data bytes - 1 = 0x7F */ + DRF_NUM(C57D, _HEAD_SET_DSC_PPS_HEAD, _BYTE3, 0x00)); /* Reserved */ +} + +static void EvoSetDscParamsC5(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings) +{ + + if (pTimings->hdmiFrlConfig.frlRate != HDMI_FRL_DATA_RATE_NONE) { + if (pTimings->hdmiFrlConfig.dscInfo.bEnableDSC) { + EvoSetHdmiFrlDscParams(pDispEvo, head, pTimings); + } + } else if (pTimings->dpDsc.enable) { + EvoSetDpDscParams(pDispEvo, head, pTimings); + } else { + NVEvoChannelPtr pChannel = pDispEvo->pDevEvo->core; + + /* Disable DSC function */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_CONTROL, _ENABLE, _FALSE)); + + /* Disable PPS SDP (Secondary-Data Packet), DP won't send out PPS SDP */ + nvDmaSetStartEvoMethod(pChannel, NVC57D_HEAD_SET_DSC_PPS_CONTROL(head), 1); + nvDmaSetEvoMethodData(pChannel, + DRF_DEF(C57D, _HEAD_SET_DSC_PPS_CONTROL, _ENABLE, _FALSE)); + } + +} + +static void +EvoEnableMidFrameAndDWCFWatermarkC5(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NvBool enable, + NVEvoUpdateState *pUpdateState) +{ + NVEvoChannelPtr pChannel = pDevEvo->core; + + if (enable) { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A, + _DISABLE_MID_FRAME_AND_DWCF_WATERMARK, + _FALSE, + pDevEvo->gpus[sd].setSwSpareA[head]); + } else { + pDevEvo->gpus[sd].setSwSpareA[head] = + FLD_SET_DRF(C37D, + _HEAD_SET_SW_SPARE_A, + _DISABLE_MID_FRAME_AND_DWCF_WATERMARK, + _TRUE, + pDevEvo->gpus[sd].setSwSpareA[head]); + } + + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + + nvUpdateUpdateState(pDevEvo, pUpdateState, pChannel); + + nvDmaSetStartEvoMethod(pChannel, NVC37D_HEAD_SET_SW_SPARE_A(head), 1); + nvDmaSetEvoMethodData(pChannel, pDevEvo->gpus[sd].setSwSpareA[head]); + + nvPopEvoSubDevMask(pDevEvo); +} + +static NvU32 EvoGetActiveViewportOffsetC3(NVDispEvoRec *pDispEvo, NvU32 head) +{ + NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS params = {0}; + NvU32 ret; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.base.subdeviceIndex = pDispEvo->displayOwner; + params.windowIndex = head << 1; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->rmCtrlHandle, + NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to query active viewport offset"); + } + + return params.activeViewportPointIn.y; +} + +static NvBool EvoComputeWindowScalingTapsC3(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState) +{ + NvU32 win = NV_EVO_CHANNEL_MASK_WINDOW_NUMBER(pChannel->channelMask); + const NVEvoScalerCaps *pScalerCaps = + &pDevEvo->gpus[0].capabilities.window[win].scalerCaps; + + if (!nvAssignScalerTaps(pDevEvo, + pScalerCaps, + pHwState->sizeIn.width, pHwState->sizeIn.height, + pHwState->sizeOut.width, pHwState->sizeOut.height, + FALSE /* doubleScan */, + &pHwState->hTaps, &pHwState->vTaps)) { + return FALSE; + } + + return TRUE; +} + +static NvBool EvoComputeWindowScalingTapsC5(const NVDevEvoRec *pDevEvo, + const NVEvoChannel *pChannel, + NVFlipChannelEvoHwState *pHwState) +{ + if (!EvoComputeWindowScalingTapsC3(pDevEvo, pChannel, pHwState)) { + return FALSE; + } + + /* + * If scaling is enabled, CSC11 will be used by NVKMS to convert from + * linear FP16 LMS to linear FP16 RGB. As such, the user-supplied precomp + * CSC can't be programmed into CSC11 in this case. + */ + if ((pHwState->sizeIn.width != pHwState->sizeOut.width) || + (pHwState->sizeIn.height != pHwState->sizeOut.height)) { + if (!IsCscMatrixIdentity(&pHwState->cscMatrix)) { + return FALSE; + } + } + + return TRUE; +} + +static inline const NVEvoScalerCaps* +EvoGetWindowScalingCapsC3(const NVDevEvoRec *pDevEvo) +{ + /* + * Use window 0 by default. This should be fine for now since precomp + * scaling will only be enabled on Orin, and all windows have the same + * capabilities on Orin. + * + * The mapping in this function can be updated if/when precomp scaling + * support is extended to other display architectures. + */ + return &pDevEvo->gpus[0].capabilities.window[0].scalerCaps; +} + + +NVEvoHAL nvEvoC3 = { + EvoSetRasterParamsC3, /* SetRasterParams */ + EvoSetProcAmpC3, /* SetProcAmp */ + EvoSetHeadControlC3, /* SetHeadControl */ + EvoSetHeadRefClkC3, /* SetHeadRefClk */ + EvoHeadSetControlORC3, /* HeadSetControlOR */ + EvoORSetControlC3, /* ORSetControl */ + EvoHeadSetDisplayIdC3, /* HeadSetDisplayId */ + EvoSetUsageBoundsC3, /* SetUsageBounds */ + EvoUpdateC3, /* Update */ + EvoIsModePossibleC3, /* IsModePossible */ + EvoPrePostIMPC3, /* PrePostIMP */ + EvoSetNotifierC3, /* SetNotifier */ + EvoGetCapabilitiesC3, /* GetCapabilities */ + EvoFlipC3, /* Flip */ + EvoFlipTransitionWARC3, /* FlipTransitionWAR */ + EvoFillLUTSurfaceC3, /* FillLUTSurface */ + EvoSetLUTContextDmaC3, /* SetLUTContextDma */ + EvoSetOutputScalerC3, /* SetOutputScaler */ + EvoSetViewportPointInC3, /* SetViewportPointIn */ + EvoSetViewportInOutC3, /* SetViewportInOut */ + EvoSetCursorImageC3, /* SetCursorImage */ + EvoValidateCursorSurfaceC3, /* ValidateCursorSurface */ + EvoValidateWindowFormatC3, /* ValidateWindowFormat */ + EvoInitCompNotifierC3, /* InitCompNotifier */ + EvoIsCompNotifierCompleteC3, /* IsCompNotifierComplete */ + EvoWaitForCompNotifierC3, /* WaitForCompNotifier */ + EvoSetDitherC3, /* SetDither */ + EvoSetStallLockC3, /* SetStallLock */ + EvoSetDisplayRateC3, /* SetDisplayRate */ + EvoInitChannelC3, /* InitChannel */ + NULL, /* InitDefaultLut */ + EvoInitWindowMappingC3, /* InitWindowMapping */ + EvoIsChannelIdleC3, /* IsChannelIdle */ + EvoIsChannelMethodPendingC3, /* IsChannelMethodPending */ + EvoForceIdleSatelliteChannelC3, /* ForceIdleSatelliteChannel */ + EvoForceIdleSatelliteChannelIgnoreLockC3, /* ForceIdleSatelliteChannelIgnoreLock */ + EvoAccelerateChannelC3, /* AccelerateChannel */ + EvoResetChannelAcceleratorsC3, /* ResetChannelAccelerators */ + EvoAllocRmCtrlObjectC3, /* AllocRmCtrlObject */ + EvoFreeRmCtrlObjectC3, /* FreeRmCtrlObject */ + EvoSetImmPointOutC3, /* SetImmPointOut */ + EvoStartHeadCRC32CaptureC3, /* StartCRC32Capture */ + EvoStopHeadCRC32CaptureC3, /* StopCRC32Capture */ + EvoQueryHeadCRC32_C3, /* QueryCRC32 */ + EvoGetScanLineC3, /* GetScanLine */ + NULL, /* ConfigureVblankSyncObject */ + nvEvo1SetDscParams, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + EvoGetActiveViewportOffsetC3, /* GetActiveViewportOffset */ + NULL, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTapsC3, /* ComputeWindowScalingTaps */ + EvoGetWindowScalingCapsC3, /* GetWindowScalingCaps */ + { /* caps */ + TRUE, /* supportsNonInterlockedUsageBoundsUpdate */ + TRUE, /* supportsDisplayRate */ + FALSE, /* supportsFlipLockRGStatus */ + FALSE, /* needDefaultLutSurface */ + FALSE, /* hasUnorm10OLUT */ + TRUE, /* supportsDigitalVibrance */ + FALSE, /* supportsImageSharpening */ + FALSE, /* supportsHDMIVRR */ + FALSE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + TRUE, /* supportsSetStorageMemoryLayout */ + FALSE, /* supportsIndependentAcqRelSemaphore */ + FALSE, /* supportsCoreLut */ + TRUE, /* supportsSynchronizedOverlayPositionUpdate */ + FALSE, /* supportsVblankSyncObjects */ + FALSE, /* requiresScalingTapsInBothDimensions */ + NV_EVO3_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_2TAPS, /* minScalerTaps */ + }, +}; + +NVEvoHAL nvEvoC5 = { + EvoSetRasterParamsC3, /* SetRasterParams */ + EvoSetProcAmpC5, /* SetProcAmp */ + EvoSetHeadControlC3, /* SetHeadControl */ + EvoSetHeadRefClkC3, /* SetHeadRefClk */ + EvoHeadSetControlORC5, /* HeadSetControlOR */ + EvoORSetControlC3, /* ORSetControl */ + EvoHeadSetDisplayIdC3, /* HeadSetDisplayId */ + EvoSetUsageBoundsC5, /* SetUsageBounds */ + EvoUpdateC3, /* Update */ + EvoIsModePossibleC3, /* IsModePossible */ + EvoPrePostIMPC3, /* PrePostIMP */ + EvoSetNotifierC3, /* SetNotifier */ + EvoGetCapabilitiesC5, /* GetCapabilities */ + EvoFlipC5, /* Flip */ + EvoFlipTransitionWARC5, /* FlipTransitionWAR */ + EvoFillLUTSurfaceC5, /* FillLUTSurface */ + EvoSetLUTContextDmaC5, /* SetLUTContextDma */ + EvoSetOutputScalerC3, /* SetOutputScaler */ + EvoSetViewportPointInC3, /* SetViewportPointIn */ + EvoSetViewportInOutC5, /* SetViewportInOut */ + EvoSetCursorImageC3, /* SetCursorImage */ + EvoValidateCursorSurfaceC3, /* ValidateCursorSurface */ + EvoValidateWindowFormatC5, /* ValidateWindowFormat */ + EvoInitCompNotifierC3, /* InitCompNotifier */ + EvoIsCompNotifierCompleteC3, /* IsCompNotifierComplete */ + EvoWaitForCompNotifierC3, /* WaitForCompNotifier */ + EvoSetDitherC3, /* SetDither */ + EvoSetStallLockC3, /* SetStallLock */ + EvoSetDisplayRateC3, /* SetDisplayRate */ + EvoInitChannelC5, /* InitChannel */ + EvoInitDefaultLutC5, /* InitDefaultLut */ + EvoInitWindowMappingC5, /* InitWindowMapping */ + EvoIsChannelIdleC3, /* IsChannelIdle */ + EvoIsChannelMethodPendingC3, /* IsChannelMethodPending */ + EvoForceIdleSatelliteChannelC3, /* ForceIdleSatelliteChannel */ + EvoForceIdleSatelliteChannelIgnoreLockC3, /* ForceIdleSatelliteChannelIgnoreLock */ + EvoAccelerateChannelC3, /* AccelerateChannel */ + EvoResetChannelAcceleratorsC3, /* ResetChannelAccelerators */ + EvoAllocRmCtrlObjectC3, /* AllocRmCtrlObject */ + EvoFreeRmCtrlObjectC3, /* FreeRmCtrlObject */ + EvoSetImmPointOutC3, /* SetImmPointOut */ + EvoStartHeadCRC32CaptureC3, /* StartCRC32Capture */ + EvoStopHeadCRC32CaptureC3, /* StopCRC32Capture */ + EvoQueryHeadCRC32_C3, /* QueryCRC32 */ + EvoGetScanLineC3, /* GetScanLine */ + NULL, /* ConfigureVblankSyncObject */ + EvoSetDscParamsC5, /* SetDscParams */ + EvoEnableMidFrameAndDWCFWatermarkC5, /* EnableMidFrameAndDWCFWatermark */ + EvoGetActiveViewportOffsetC3, /* GetActiveViewportOffset */ + NULL, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTapsC5, /* ComputeWindowScalingTaps */ + EvoGetWindowScalingCapsC3, /* GetWindowScalingCaps */ + { /* caps */ + TRUE, /* supportsNonInterlockedUsageBoundsUpdate */ + TRUE, /* supportsDisplayRate */ + FALSE, /* supportsFlipLockRGStatus */ + TRUE, /* needDefaultLutSurface */ + TRUE, /* hasUnorm10OLUT */ + TRUE, /* supportsDigitalVibrance */ + FALSE, /* supportsImageSharpening */ + TRUE, /* supportsHDMIVRR */ + FALSE, /* supportsCoreChannelSurface */ + FALSE, /* supportsHDMIFRL */ + TRUE, /* supportsSetStorageMemoryLayout */ + FALSE, /* supportsIndependentAcqRelSemaphore */ + FALSE, /* supportsCoreLut */ + TRUE, /* supportsSynchronizedOverlayPositionUpdate */ + FALSE, /* supportsVblankSyncObjects */ + FALSE, /* requiresScalingTapsInBothDimensions */ + NV_EVO3_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_2TAPS, /* minScalerTaps */ + }, +}; + +NVEvoHAL nvEvoC6 = { + EvoSetRasterParamsC3, /* SetRasterParams */ + EvoSetProcAmpC5, /* SetProcAmp */ + EvoSetHeadControlC3, /* SetHeadControl */ + EvoSetHeadRefClkC3, /* SetHeadRefClk */ + EvoHeadSetControlORC5, /* HeadSetControlOR */ + EvoORSetControlC6, /* ORSetControl */ + EvoHeadSetDisplayIdC3, /* HeadSetDisplayId */ + EvoSetUsageBoundsC5, /* SetUsageBounds */ + EvoUpdateC3, /* Update */ + EvoIsModePossibleC3, /* IsModePossible */ + EvoPrePostIMPC3, /* PrePostIMP */ + EvoSetNotifierC3, /* SetNotifier */ + EvoGetCapabilitiesC6, /* GetCapabilities */ + EvoFlipC6, /* Flip */ + EvoFlipTransitionWARC6, /* FlipTransitionWAR */ + EvoFillLUTSurfaceC5, /* FillLUTSurface */ + EvoSetLUTContextDmaC5, /* SetLUTContextDma */ + EvoSetOutputScalerC3, /* SetOutputScaler */ + EvoSetViewportPointInC3, /* SetViewportPointIn */ + EvoSetViewportInOutC5, /* SetViewportInOut */ + EvoSetCursorImageC3, /* SetCursorImage */ + EvoValidateCursorSurfaceC3, /* ValidateCursorSurface */ + EvoValidateWindowFormatC6, /* ValidateWindowFormat */ + EvoInitCompNotifierC3, /* InitCompNotifier */ + EvoIsCompNotifierCompleteC3, /* IsCompNotifierComplete */ + EvoWaitForCompNotifierC3, /* WaitForCompNotifier */ + EvoSetDitherC3, /* SetDither */ + EvoSetStallLockC3, /* SetStallLock */ + EvoSetDisplayRateC3, /* SetDisplayRate */ + EvoInitChannelC5, /* InitChannel */ + EvoInitDefaultLutC5, /* InitDefaultLut */ + EvoInitWindowMappingC5, /* InitWindowMapping */ + EvoIsChannelIdleC3, /* IsChannelIdle */ + EvoIsChannelMethodPendingC3, /* IsChannelMethodPending */ + EvoForceIdleSatelliteChannelC3, /* ForceIdleSatelliteChannel */ + EvoForceIdleSatelliteChannelIgnoreLockC3, /* ForceIdleSatelliteChannelIgnoreLock */ + EvoAccelerateChannelC3, /* AccelerateChannel */ + EvoResetChannelAcceleratorsC3, /* ResetChannelAccelerators */ + EvoAllocRmCtrlObjectC3, /* AllocRmCtrlObject */ + EvoFreeRmCtrlObjectC3, /* FreeRmCtrlObject */ + EvoSetImmPointOutC3, /* SetImmPointOut */ + EvoStartHeadCRC32CaptureC3, /* StartCRC32Capture */ + EvoStopHeadCRC32CaptureC3, /* StopCRC32Capture */ + EvoQueryHeadCRC32_C3, /* QueryCRC32 */ + EvoGetScanLineC3, /* GetScanLine */ + EvoConfigureVblankSyncObjectC6, /* ConfigureVblankSyncObject */ + EvoSetDscParamsC5, /* SetDscParams */ + NULL, /* EnableMidFrameAndDWCFWatermark */ + EvoGetActiveViewportOffsetC3, /* GetActiveViewportOffset */ + NULL, /* ClearSurfaceUsage */ + EvoComputeWindowScalingTapsC5, /* ComputeWindowScalingTaps */ + EvoGetWindowScalingCapsC3, /* GetWindowScalingCaps */ + { /* caps */ + TRUE, /* supportsNonInterlockedUsageBoundsUpdate */ + TRUE, /* supportsDisplayRate */ + FALSE, /* supportsFlipLockRGStatus */ + TRUE, /* needDefaultLutSurface */ + TRUE, /* hasUnorm10OLUT */ + TRUE, /* supportsDigitalVibrance */ + FALSE, /* supportsImageSharpening */ + TRUE, /* supportsHDMIVRR */ + FALSE, /* supportsCoreChannelSurface */ + TRUE, /* supportsHDMIFRL */ + FALSE, /* supportsSetStorageMemoryLayout */ + TRUE, /* supportsIndependentAcqRelSemaphore */ + FALSE, /* supportsCoreLut */ + TRUE, /* supportsSynchronizedOverlayPositionUpdate */ + TRUE, /* supportsVblankSyncObjects */ + FALSE, /* requiresScalingTapsInBothDimensions */ + NV_EVO3_SUPPORTED_DITHERING_MODES, /* supportedDitheringModes */ + sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), /* impStructSize */ + NV_EVO_SCALER_2TAPS, /* minScalerTaps */ + }, +}; diff --git a/src/nvidia-modeset/src/nvkms-flip.c b/src/nvidia-modeset/src/nvkms-flip.c new file mode 100644 index 000000000..1f23151a9 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-flip.c @@ -0,0 +1,2899 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-dma.h" +#include "nvkms-evo.h" +#include "nvkms-flip.h" +#include "nvkms-flip-workarea.h" +#include "nvkms-surface.h" +#include "nvkms-prealloc.h" +#include "nvkms-private.h" +#include "nvkms-rm.h" +#include "nvkms-vrr.h" +#include "nvkms-cursor.h" +#include "nvkms-types.h" + +#include "nvkms-sync.h" + +static void SchedulePostFlipIMPTimer(NVDevEvoPtr pDevEvo); + +// The EVO .mfs file defines the maximum minPresentInterval to be 8. +#define NV_MAX_SWAP_INTERVAL 8 + +/*! + * Assign the elements in an NVSurfaceEvoPtr[NVKMS_MAX_EYES] array. + * + * Use NVEvoApiHandlesRec to translate an + * NvKmsSurfaceHandle[NVKMS_MAX_EYES] array into an an + * NVSurfaceEvoPtr[NVKMS_MAX_EYES] array. + * + * \param[in] pOpenDevSurfaceHandles The client's surfaces. + * \param[in] surfaceHandles The handles naming surfaces. + * \param[out] pSurfaceEvos The surface pointers. + * + * \return Return TRUE if all surfaceHandles could be successfully + * translated into pSurfaceEvos. Otherwise, return FALSE. + */ +static NvBool AssignSurfaceArray( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandles[NVKMS_MAX_EYES], + const NVEvoChannelMask channelMask, + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES]) +{ + NvU32 eye; + + nvkms_memset(pSurfaceEvos, 0, sizeof(NVSurfaceEvoRec *) * NVKMS_MAX_EYES); + + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + if (surfaceHandles[eye] != 0) { + pSurfaceEvos[eye] = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + surfaceHandles[eye], + channelMask); + if ((pSurfaceEvos[eye] == NULL) || + (pSurfaceEvos[eye]->isoType != NVKMS_MEMORY_ISO)) { + return FALSE; + } + } + } + return TRUE; +} + + +/*! + * Assign the NVFlipNIsoSurfaceEvoHwState. + * + * Use the given NvKmsNIsoSurface to populate the + * NVFlipNIsoSurfaceEvoHwState. Validate that NvKmsNIsoSurface + * description is legitimate. + * + * \param[in] pDevEvo The device where the surface will be used. + * \param[in] pOpenDevSurfaceHandles The client's surfaces. + * \param[in] pParamsNIso The client's description of the NISO surface. + * \param[in] notifier Whether the NISO surface is a notifier. + * \param[in] pChannel The channel where the surface will be used. + * \param[out] pNIsoState The NVKMS presentation of the NISO surface. + * + * \return Return TRUE if the NVFlipNIsoSurfaceEvoHwState could be + * assigned and validated. Otherwise, return FALSE and leave + * the NVFlipNIsoSurfaceEvoHwState untouched. + */ +static NvBool AssignNIsoEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsNIsoSurface *pParamsNIso, + const NvBool notifier, /* TRUE=notifier; FALSE=semaphore */ + const NVEvoChannel *pChannel, + NVFlipNIsoSurfaceEvoHwState *pNIsoState) +{ + NVSurfaceEvoPtr pSurfaceEvo; + NvU32 elementSizeInBytes = 0, offsetInBytes, maxBytes; + + nvAssert(pParamsNIso->surfaceHandle != 0); + + pSurfaceEvo = + nvEvoGetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + pParamsNIso->surfaceHandle, + pChannel->channelMask); + if (pSurfaceEvo == NULL) { + return FALSE; + } + + /* Attempt to validate the surface: */ + + /* Only pitch surfaces can be used */ + if (pSurfaceEvo->layout != NvKmsSurfaceMemoryLayoutPitch) { + return FALSE; + } + + if (pSurfaceEvo->isoType != NVKMS_MEMORY_NISO) { + return FALSE; + } + + if ((pParamsNIso->format != NVKMS_NISO_FORMAT_FOUR_WORD) && + (pParamsNIso->format != NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY) && + (pParamsNIso->format != NVKMS_NISO_FORMAT_LEGACY)) { + return FALSE; + } + + if ((pDevEvo->caps.validNIsoFormatMask & + (1 << pParamsNIso->format)) == 0) { + return FALSE; + } + + /* Check that the item fits within the surface. */ + switch (pParamsNIso->format) { + case NVKMS_NISO_FORMAT_FOUR_WORD: + case NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY: + elementSizeInBytes = 16; + break; + case NVKMS_NISO_FORMAT_LEGACY: + if (notifier) { + /* Legacy notifier size depends on the channel. */ + elementSizeInBytes = pChannel->caps.legacyNotifierFormatSizeBytes; + } else { + /* Legacy semaphores are always 4 bytes. */ + elementSizeInBytes = 4; + } + break; + } + +#if defined(DEBUG) + /* Assert that the size calculated by nvkms-sync library is the same as the + * one we derived from channel caps above. */ + if (notifier) { + NvBool overlay = !!(pChannel->channelMask & + NV_EVO_CHANNEL_MASK_OVERLAY_ALL); + NvU32 libSize = nvKmsSizeOfNotifier(pParamsNIso->format, overlay); + nvAssert(libSize == elementSizeInBytes); + } else { + nvAssert(nvKmsSizeOfSemaphore(pParamsNIso->format) == elementSizeInBytes); + } +#endif + /* + * offsetInWords is an NvU16 and offsetInBytes is an NvU32, so + * neither of the expressions: + * offsetInWords * 4 + * offsetInBytes + elementSizeInBytes + * should ever overflow. + */ + + ct_assert(sizeof(pParamsNIso->offsetInWords) == 2); + + offsetInBytes = ((NvU32)pParamsNIso->offsetInWords) * 4; + + /* + * Compute the upper extent of the NISO element within the surface. + */ + + maxBytes = offsetInBytes + elementSizeInBytes; + + if (maxBytes > pSurfaceEvo->planes[0].rmObjectSizeInBytes) { + return FALSE; + } + + /* EVO expects the NISO element to fit within a 4k page. */ + + if (maxBytes > 4096) { + return FALSE; + } + + /* + * XXX NVKMS TODO: Check that the surface is in vidmem if + * NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY + */ + + pNIsoState->pSurfaceEvo = pSurfaceEvo; + pNIsoState->format = pParamsNIso->format; + pNIsoState->offsetInWords = pParamsNIso->offsetInWords; + + return TRUE; +} + + +static NvBool AssignCompletionNotifierEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const struct NvKmsCompletionNotifierDescription *pParamsNotif, + const NVEvoChannel *pChannel, + NVFlipCompletionNotifierEvoHwState *pNotif) +{ + NvBool ret; + + nvkms_memset(pNotif, 0, sizeof(*pNotif)); + + /* If no surface is specified, we should not use a notifier. */ + if (pParamsNotif->surface.surfaceHandle == 0) { + return TRUE; + } + + ret = AssignNIsoEvoHwState(pDevEvo, + pOpenDevSurfaceHandles, + &pParamsNotif->surface, + TRUE, /* notifier */ + pChannel, + &pNotif->surface); + if (ret) { + pNotif->awaken = pParamsNotif->awaken; + } + + return ret; +} + +static NvBool AssignSemaphoreEvoHwState( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NVEvoChannel *pChannel, + const NvU32 sd, + const struct NvKmsChannelSyncObjects *pChannelSyncObjects, + NVFlipSyncObjectEvoHwState *pFlipSyncObject) +{ + NvBool ret; + + nvAssert(!pChannelSyncObjects->useSyncpt); + + nvkms_memset(pFlipSyncObject, 0, sizeof(*pFlipSyncObject)); + + if (!pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore) { + /*! acquire and release sema surface needs to be same */ + if (pChannelSyncObjects->u.semaphores.acquire.surface.surfaceHandle != + pChannelSyncObjects->u.semaphores.release.surface.surfaceHandle) { + return FALSE; + } + if (pChannelSyncObjects->u.semaphores.acquire.surface.format != + pChannelSyncObjects->u.semaphores.release.surface.format) { + return FALSE; + } + if (pChannelSyncObjects->u.semaphores.acquire.surface.offsetInWords != + pChannelSyncObjects->u.semaphores.release.surface.offsetInWords) { + return FALSE; + } + } + + /*! If no surface is specified, we should not use a semaphore.*/ + if (pChannelSyncObjects->u.semaphores.acquire.surface.surfaceHandle != 0) { + + ret = AssignNIsoEvoHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pChannelSyncObjects->u.semaphores.acquire.surface, + FALSE, /* notifier */ + pChannel, + &pFlipSyncObject->u.semaphores.acquireSurface); + if (ret) { + pFlipSyncObject->u.semaphores.acquireValue = + pChannelSyncObjects->u.semaphores.acquire.value; + } else { + return ret; + } + } + + /*! If no surface is specified, we should not use a semaphore.*/ + if (pChannelSyncObjects->u.semaphores.release.surface.surfaceHandle != 0) { + + ret = AssignNIsoEvoHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pChannelSyncObjects->u.semaphores.release.surface, + FALSE, /* notifier */ + pChannel, + &pFlipSyncObject->u.semaphores.releaseSurface); + if (ret) { + pFlipSyncObject->u.semaphores.releaseValue = + pChannelSyncObjects->u.semaphores.release.value; + } else { + return ret; + } + } + + return TRUE; +} + +static NvBool AssignPreSyncptEvoHwState( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + const struct NvKmsChannelSyncObjects *pChannelSyncObjects, + NVFlipSyncObjectEvoHwState *pFlipSyncObject) +{ + NvBool ret, bFound = FALSE; + NvU32 id = 0; + NvU32 hSyncptCtxDma, hSyncpt; + NvU32 value; + enum NvKmsSyncptType preType; + + nvAssert(pDevEvo->pAllSyncptUsedInCurrentFlip != NULL); + + nvAssert(pChannelSyncObjects->useSyncpt); + + preType = pChannelSyncObjects->u.syncpts.pre.type; + + if (preType == NVKMS_SYNCPT_TYPE_NONE) { + return TRUE; + } + + if (preType == NVKMS_SYNCPT_TYPE_FD) { + /*! Get id from fd using nvhost API */ + NvKmsSyncPtOpParams params = { }; + params.fd_to_id_and_thresh.fd = + pChannelSyncObjects->u.syncpts.pre.u.fd; + ret = nvkms_syncpt_op(NVKMS_SYNCPT_OP_FD_TO_ID_AND_THRESH, + ¶ms); + if (!ret) { + return FALSE; + } + id = params.fd_to_id_and_thresh.id; + value = params.fd_to_id_and_thresh.thresh; + } else { + id = pChannelSyncObjects->u.syncpts.pre.u.raw.id; + value = pChannelSyncObjects->u.syncpts.pre.u.raw.value; + } + if (id >= NV_SYNCPT_GLOBAL_TABLE_LENGTH) { + return FALSE; + } + /*! use id value to check the global table */ + bFound = (pDevEvo->preSyncptTable[id].hCtxDma != 0); + if (bFound == FALSE) { + /*! Register - allocate and bind ctxdma to syncpt*/ + ret = nvRmEvoAllocAndBindSyncpt(pDevEvo, + pChannel, + id, + &hSyncpt, + &hSyncptCtxDma); + if (!ret) { + nvAssert(!"Failed to register pre-syncpt"); + return FALSE; + } + + /*! Fill the Entry in Global Table */ + pDevEvo->preSyncptTable[id].hCtxDma = hSyncptCtxDma; + pDevEvo->preSyncptTable[id].hSyncpt = hSyncpt; + pDevEvo->preSyncptTable[id].channelMask |= pChannel->channelMask; + pDevEvo->pAllSyncptUsedInCurrentFlip[id] = NV_TRUE; + pDevEvo->preSyncptTable[id].id = id; + } else { + /*! + * syncpt found, just bind the context dma of this syncpt + * to the window if it is not already. + */ + if ((pDevEvo->preSyncptTable[id].channelMask & + pChannel->channelMask) == 0) { + + ret = nvRmEvoBindDispContextDMA(pDevEvo, + pChannel, + pDevEvo->preSyncptTable[id].hCtxDma); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to bind pre-syncpt with ctxdma"); + return ret; + } + pDevEvo->preSyncptTable[id].channelMask |= pChannel->channelMask; + pDevEvo->pAllSyncptUsedInCurrentFlip[id] = NV_TRUE; + /*! hSyncpt already allocated for id*/ + } + } + /*! Fill pre-syncpt related information in hardware state */ + pFlipSyncObject->u.syncpts.preCtxDma = pDevEvo->preSyncptTable[id].hCtxDma; + pFlipSyncObject->u.syncpts.preValue = value; + pFlipSyncObject->usingSyncpt = TRUE; + + return TRUE; +} + +static NvBool AssignPostSyncptEvoHwState( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + const struct NvKmsChannelSyncObjects *pChannelSyncObjects, + NVFlipSyncObjectEvoHwState *pFlipSyncObject) +{ + enum NvKmsSyncptType postType; + NvU32 threshold; + NvKmsSyncPtOpParams params = { }; + NvBool ret = TRUE; + + nvAssert(pChannelSyncObjects->useSyncpt); + + postType = pChannelSyncObjects->u.syncpts.requestedPostType; + + /*! + * It is possible that syncpt is mentioned but post-syncpt + * is not specified (case where only pre-syncpt used) + */ + if (postType == NVKMS_SYNCPT_TYPE_NONE) { + return TRUE; + } + + + /*! read max value of current syncpt id */ + params.read_maxval.id = pChannel->postSyncpt.id; + ret = nvkms_syncpt_op(NVKMS_SYNCPT_OP_READ_MAXVAL, ¶ms); + if (!ret) { + nvAssert(!"Failed syncpt op READ_MAXVAL"); + return FALSE; + } + + /*! return threshold to caller but increase only when programming hw */ + threshold = params.read_maxval.maxval + 1; + + /*! each channel associated with one post-syncpt */ + pFlipSyncObject->u.syncpts.postCtxDma = pChannel->postSyncpt.hCtxDma; + pFlipSyncObject->u.syncpts.postValue = threshold; + + pFlipSyncObject->usingSyncpt = TRUE; + + return TRUE; +} + +void nvFillPostSyncptReplyOneChannel( + NVEvoChannel *pChannel, + enum NvKmsSyncptType postType, + struct NvKmsSyncpt *postSyncpt, + const NVFlipSyncObjectEvoHwState *pHwSyncObject) +{ + if (postType == NVKMS_SYNCPT_TYPE_RAW) { + postSyncpt->u.raw.id = pChannel->postSyncpt.id; + postSyncpt->u.raw.value = pHwSyncObject->u.syncpts.postValue; + postSyncpt->type = NVKMS_SYNCPT_TYPE_RAW; + } else if (postType == NVKMS_SYNCPT_TYPE_FD) { + NvBool ret = TRUE; + NvKmsSyncPtOpParams params = { }; + params.id_and_thresh_to_fd.id = pChannel->postSyncpt.id; + params.id_and_thresh_to_fd.thresh = + pHwSyncObject->u.syncpts.postValue; + + ret = nvkms_syncpt_op(NVKMS_SYNCPT_OP_ID_AND_THRESH_TO_FD, ¶ms); + if (!ret) { + nvAssert(!"Failed syncpt op ID_AND_THRESH_TO_FD"); + return; + } + postSyncpt->u.fd = params.id_and_thresh_to_fd.fd; + postSyncpt->type = NVKMS_SYNCPT_TYPE_FD; + } +} + +static void FillPostSyncptReply( + NVDevEvoRec *pDevEvo, + NvU32 sd, + const struct NvKmsFlipRequestOneSubDevice *pRequestOneSubDevice, + struct NvKmsFlipReplyOneSubDevice *pReplyOneSubDevice, + const struct NvKmsFlipWorkArea *pWorkArea) +{ + NvU32 head; + + /*! check for valid config */ + if (!pDevEvo->supportsSyncpts) { + return; + } + + for (head = 0; head < ARRAY_LEN(pRequestOneSubDevice->head); head++) { + const struct NvKmsFlipCommonParams *pRequestParams = + &pRequestOneSubDevice->head[head]; + struct NvKmsFlipCommonReplyOneHead *pReplyParams = + &pReplyOneSubDevice->head[head]; + const NVFlipEvoHwState *pFlipState = + &pWorkArea->sd[sd].head[head].newState; + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!pRequestParams->layer[layer].syncObjects.specified || + !pRequestParams->layer[layer].syncObjects.val.useSyncpt) { + continue; + } + + nvFillPostSyncptReplyOneChannel( + pDevEvo->head[head].layer[layer], + pRequestParams->layer[layer].syncObjects.val.u.syncpts.requestedPostType, + &pReplyParams->layer[layer].postSyncpt, + &pFlipState->layer[layer].syncObject); + } + } +} + +NvBool nvHandleSyncptRegistration( + NVDevEvoRec *pDevEvo, + NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + NvBool ret = TRUE; + NvU32 layer; + + if (!pDevEvo->supportsSyncpts) { + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pParams->layer[layer].syncObjects.specified && + pParams->layer[layer].syncObjects.val.useSyncpt) { + return FALSE; + } + } + + return TRUE; + } + + pDevEvo->pAllSyncptUsedInCurrentFlip = + nvCalloc(1, sizeof(NvBool) * NV_SYNCPT_GLOBAL_TABLE_LENGTH); + if (pDevEvo->pAllSyncptUsedInCurrentFlip == NULL) { + return FALSE; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!pParams->layer[layer].syncObjects.specified || + !pParams->layer[layer].syncObjects.val.useSyncpt) { + continue; + } + + nvkms_memset(&pFlipState->layer[layer].syncObject, + 0, + sizeof(pFlipState->layer[layer].syncObject)); + + ret = AssignPreSyncptEvoHwState(pDevEvo, + pDevEvo->head[head].layer[layer], + &pParams->layer[layer].syncObjects.val, + &pFlipState->layer[layer].syncObject); + if (!ret) { + nvAssert(!"Failed to store hw state for layer pre-syncpt"); + goto done; + } + + ret = AssignPostSyncptEvoHwState(pDevEvo, + pDevEvo->head[head].layer[layer], + &pParams->layer[layer].syncObjects.val, + &pFlipState->layer[layer].syncObject); + if (!ret) { + nvAssert(!"Failed to store hw state for layer post-syncpt"); + goto done; + } + } + +done: + nvFree(pDevEvo->pAllSyncptUsedInCurrentFlip); + pDevEvo->pAllSyncptUsedInCurrentFlip = NULL; + return ret; +} + + +void nvClearFlipEvoHwState( + NVFlipEvoHwState *pFlipState) +{ + NvU32 i; + + nvkms_memset(pFlipState, 0, sizeof(*pFlipState)); + + for (i = 0; i < ARRAY_LEN(pFlipState->layer); i++) { + pFlipState->layer[i].cscMatrix = NVKMS_IDENTITY_CSC_MATRIX; + } +} + +/*! + * Initialize NVFlipEvoHwState with a current snapshot from headState. + */ +void nvInitFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + NVFlipEvoHwState *pFlipState) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const NVEvoSubDevHeadStateRec *pSdHeadState; + NvU32 i; + + nvClearFlipEvoHwState(pFlipState); + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + pSdHeadState = &pDevEvo->gpus[sd].headState[head]; + + pFlipState->viewPortPointIn = pSdHeadState->viewPortPointIn; + pFlipState->cursor = pSdHeadState->cursor; + + ct_assert(ARRAY_LEN(pFlipState->layer) == ARRAY_LEN(pSdHeadState->layer)); + + for (i = 0; i < ARRAY_LEN(pFlipState->layer); i++) { + pFlipState->layer[i] = pSdHeadState->layer[i]; + } + + // pFlipState->usage describes the usage bounds that will be necessary after + // this flip is complete. Initialize it using pSdHeadState->targetUsage, + // which describes the usage bounds that will be required just before this + // flip occurs, rather than pSdHeadState->usage, which describes the usage + // bounds currently programmed into the hardware. + // + // pSdHeadState->usage may have higher bounds than pSdHeadState->targetUsage + // if TryLoweringUsageBounds has not yet noticed that a satellite channel is + // no longer in use, or a flip to NULL in a satellite channel is pending but + // has not yet occurred. + pFlipState->usage = pSdHeadState->targetUsage; + + pFlipState->disableMidFrameAndDWCFWatermark = + pSdHeadState->targetDisableMidFrameAndDWCFWatermark; +} + + +static NvBool IsLayerDirty(const struct NvKmsFlipCommonParams *pParams, + const NvU32 layer) +{ + return pParams->layer[layer].surface.specified || + pParams->layer[layer].sizeIn.specified || + pParams->layer[layer].sizeOut.specified || + pParams->layer[layer].outputPosition.specified || + pParams->layer[layer].completionNotifier.specified || + pParams->layer[layer].syncObjects.specified || + pParams->layer[layer].compositionParams.specified || + pParams->layer[layer].csc.specified; +} + +/*! + * Check whether the flipPermissions for pOpenDev allow the flipping + * requested by NvKmsFlipCommonParams. + */ +static NvBool CheckFlipPermissions( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams) +{ + const int dispIndex = pDevEvo->gpus[sd].pDispEvo->displayOwner; + const struct NvKmsFlipPermissions *pFlipPermissions = + nvGetFlipPermissionsFromOpenDev(pOpenDev); + const struct NvKmsModesetPermissions *pModesetPermissions = + nvGetModesetPermissionsFromOpenDev(pOpenDev); + const NvU8 allLayersMask = NVBIT(pDevEvo->head[head].numLayers) - 1; + NvU8 layerMask = 0; + NvU32 layer; + + nvAssert(pOpenDev != NULL); + nvAssert(pFlipPermissions != NULL); + nvAssert(pModesetPermissions != NULL); + + layerMask = pFlipPermissions->disp[dispIndex].head[head].layerMask; + + /* + * If the client has modeset permissions for this disp+head, allow + * the client to also perform flips on any layer. + */ + if (!nvDpyIdListIsEmpty(pModesetPermissions->disp[dispIndex]. + head[head].dpyIdList)) { + layerMask = allLayersMask; + } + + /* Changing viewPortIn requires permission to alter all layers. */ + + if (pParams->viewPortIn.specified && (layerMask != allLayersMask)) { + return FALSE; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (IsLayerDirty(pParams, layer) && ((layerMask & NVBIT(layer)) == 0)) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * Determine whether a base channel flip requires a non-tearing present mode. + * + * EVO requires a non-tearing flip when certain parameters are changing. See + * NV_DISP_BASE_STATE_ERROR_052 in dispClass024XBaseUpdateErrorChecks.mfs. + */ +static NvBool FlipRequiresNonTearingMode( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVFlipChannelEvoHwState *pOld, + const NVFlipChannelEvoHwState *pNew) +{ + // TODO: Do we need to care about the right eye here? The error check + // doesn't. + const NVSurfaceEvoRec *pOldSurf = pOld->pSurfaceEvo[NVKMS_LEFT]; + const NVSurfaceEvoRec *pNewSurf = pNew->pSurfaceEvo[NVKMS_LEFT]; + NvU32 oldHwFormat = 0, newHwFormat = 0; + + if (pOldSurf == NULL || pNewSurf == NULL) { + return TRUE; + } + + // If these functions actually return FALSE at this point, then something is + // really wrong... + if (!pDevEvo->hal->ValidateWindowFormat( + pOldSurf->format, NULL, &oldHwFormat)) { + nvAssert(FALSE); + } + + if (!pDevEvo->hal->ValidateWindowFormat( + pNewSurf->format, NULL, &newHwFormat)) { + nvAssert(FALSE); + } + + // Commented entries are things checked in the .mfs that are not yet + // supported in NVKMS. + return // SuperSample + oldHwFormat != newHwFormat || + // Gamma + // Layout (i.e. frame, field1, or field2) + pOldSurf->widthInPixels != pNewSurf->widthInPixels || + pOldSurf->heightInPixels != pNewSurf->heightInPixels || + pOldSurf->layout != pNewSurf->layout; + // UseGainOfs + // NewBaseLut -- USE_CORE_LUT is programmed in InitChannel* + // NewOutputLut +} + + +/*! + * Apply flip overrides if necessary. + * + * 1. Override swap intervals for VRR. + * 2. If the flip is changing certain parameters, override the tearing mode. + */ +static NvBool ApplyBaseFlipOverrides( + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + NVFlipChannelEvoHwState *pNew, + NvBool allowVrr) +{ + const NVDispEvoRec *pDispEvo = pDevEvo->gpus[sd].pDispEvo; + const NVFlipChannelEvoHwState *pOld = + &pDevEvo->gpus[sd].headState[head].layer[NVKMS_MAIN_LAYER]; + + // Apply VRR swap interval overrides. + // + // Note that this applies the overrides whenever the client requests VRR and + // VRR is enabled, regardless of whether actually activating it later + // succeeds. + if (allowVrr) { + if (!nvHeadIsActive(pDispEvo, head)) { + // + // XXX If VRR is allowed then modeset should have happened before + // base channel flip, currently we don't know how to do modeset + // and program base channel for VRR at same time. This should be + // revisited as part of bug 1731279. + // + return FALSE; + } + nvApplyVrrBaseFlipOverrides(pDevEvo->gpus[sd].pDispEvo, head, + pOld, pNew); + } + + if (!nvHeadIsActive(pDispEvo, head)) { + // + // This is possible when modeset and base flip happening at same time, + // tearing parameter does not make sense in that case, + // it should is disabled. + // + pNew->tearing = FALSE; + } else { + // Force non-tearing mode if EVO requires it. + if (FlipRequiresNonTearingMode(pDevEvo, head, pOld, pNew)) { + pNew->tearing = FALSE; + } + } + + return TRUE; +} + +static NvBool ValidateScalingUsageBounds( + const struct NvKmsScalingUsageBounds *pS, + const struct NvKmsScalingUsageBounds *pMaxS) +{ + return (pS->maxVDownscaleFactor <= pMaxS->maxVDownscaleFactor) && + (pS->maxHDownscaleFactor <= pMaxS->maxHDownscaleFactor) && + (pS->vTaps <= pMaxS->vTaps) && + (!pS->vUpscalingAllowed || pMaxS->vUpscalingAllowed); +} + +/*! + * Validate the requested usage bounds against the specified maximums. + */ +static NvBool ValidateUsageBounds( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const struct NvKmsUsageBounds *pUsage, + const struct NvKmsUsageBounds *pGuaranteedUsage) +{ + NvU32 i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + const NvU64 supportedSurfaceFormatsUnion = + pUsage->layer[i].supportedSurfaceMemoryFormats | + pGuaranteedUsage->layer[i].supportedSurfaceMemoryFormats; + + if ((pUsage->layer[i].usable && !pGuaranteedUsage->layer[i].usable) || + (supportedSurfaceFormatsUnion != + pGuaranteedUsage->layer[i].supportedSurfaceMemoryFormats) || + !ValidateScalingUsageBounds(&pUsage->layer[i].scaling, + &pGuaranteedUsage->layer[i].scaling)) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * Assign pFlipState->usage. + */ +static NvBool AssignUsageBounds( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + NVFlipEvoHwState *pFlipState) +{ + struct NvKmsUsageBounds *pUsage = &pFlipState->usage; + int i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + const NVFlipChannelEvoHwState *pLayerFlipState = &pFlipState->layer[i]; + + nvInitScalingUsageBounds(pDevEvo, &pUsage->layer[i].scaling); + + if (pLayerFlipState->pSurfaceEvo[NVKMS_LEFT]) { + pUsage->layer[i].usable = TRUE; + pUsage->layer[i].supportedSurfaceMemoryFormats = + nvEvoGetFormatsWithEqualOrLowerUsageBound( + pLayerFlipState->pSurfaceEvo[NVKMS_LEFT]->format, + pDevEvo->caps.layerCaps[i].supportedSurfaceMemoryFormats); + + /* Scaling is not currently supported for the main layer. Bug 3488083 */ + if (i != NVKMS_MAIN_LAYER && pDevEvo->hal->GetWindowScalingCaps) { + const NVEvoScalerCaps *pScalerCaps = + pDevEvo->hal->GetWindowScalingCaps(pDevEvo); + + if (!nvComputeScalingUsageBounds(pScalerCaps, + pLayerFlipState->sizeIn.width, + pLayerFlipState->sizeIn.height, + pLayerFlipState->sizeOut.width, + pLayerFlipState->sizeOut.height, + pLayerFlipState->hTaps, + pLayerFlipState->vTaps, + &pUsage->layer[i].scaling)) { + return FALSE; + } + } + } else { + pUsage->layer[i].usable = FALSE; + pUsage->layer[i].supportedSurfaceMemoryFormats = 0; + } + } + + return TRUE; +} + +static NvBool OverrideUsageBounds(const NVDevEvoRec *pDevEvo, + NVFlipEvoHwState *pFlipState, + const struct NvKmsFlipCommonParams *pParams, + NvU32 sd, + NvU32 head, + const struct NvKmsUsageBounds *pPossibleUsage) +{ + NvU32 i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + const struct NvKmsScalingUsageBounds *pPossibleScaling = + &pPossibleUsage->layer[i].scaling; + struct NvKmsScalingUsageBounds *pTargetScaling = + &pFlipState->usage.layer[i].scaling; + NvU16 possibleV = pPossibleScaling->maxVDownscaleFactor; + NvU16 possibleH = pPossibleScaling->maxHDownscaleFactor; + NvU16 targetV = pTargetScaling->maxVDownscaleFactor; + NvU16 targetH = pTargetScaling->maxHDownscaleFactor; + + if (!pFlipState->usage.layer[i].usable) { + continue; + } + + if (pParams->layer[i].maxDownscaleFactors.specified) { + NvU16 requestedV = pParams->layer[i].maxDownscaleFactors.vertical; + NvU16 requestedH = pParams->layer[i].maxDownscaleFactors.horizontal; + + if ((requestedV < targetV) || (requestedH < targetH)) { + return FALSE; + } + + if ((requestedV > possibleV) || (requestedH > possibleH)) { + return FALSE; + } + + pTargetScaling->maxVDownscaleFactor = requestedV; + pTargetScaling->maxHDownscaleFactor = requestedH; + } else { + /* + * Calculate max H/V downscale factor by quantizing the range. + * + * E.g., + * max H/V downscale factor supported by HW is 4x for 5-tap and 2x + * for 2-tap mode. If 5-tap mode is required, the target usage bound + * that nvkms will attempt to program will either allow up to 2x + * downscaling, or up to 4x downscaling. If 2-tap mode is required, + * the target usage bound that NVKMS will attempt to program will + * allow up to 2x downscaling. Example: to downscale from 4096x2160 + * -> 2731x864 in 5-tap mode, NVKMS would specify up to 2x for the + * H downscale bound (required is 1.5x), and up to 4x for the V + * downscale bound (required is 2.5x). + */ + if (targetV > NV_EVO_SCALE_FACTOR_1X) { + const NvU16 possibleMid = + NV_EVO_SCALE_FACTOR_1X + ((possibleV - NV_EVO_SCALE_FACTOR_1X) / 2); + + if (targetV <= possibleMid) { + pTargetScaling->maxVDownscaleFactor = possibleMid; + } else { + pTargetScaling->maxVDownscaleFactor = possibleV; + } + } + + if (targetH > NV_EVO_SCALE_FACTOR_1X) { + const NvU16 possibleMid = + NV_EVO_SCALE_FACTOR_1X + ((possibleH - NV_EVO_SCALE_FACTOR_1X) / 2); + + if (targetH <= possibleMid) { + pTargetScaling->maxHDownscaleFactor = possibleMid; + } else { + pTargetScaling->maxHDownscaleFactor = possibleH; + } + } + } + + pTargetScaling->vTaps = pPossibleScaling->vTaps; + pTargetScaling->vUpscalingAllowed = pPossibleScaling->vUpscalingAllowed; + } + + return TRUE; +} + +static NvBool FlipTimeStampValidForChannel( + const NVEvoChannel *pChannel, + NvU64 timeStamp) +{ + if (pChannel->caps.validTimeStampBits < 64) { + const NvU64 validTimeStampMask = + NVBIT64(pChannel->caps.validTimeStampBits) - 1; + if ((timeStamp & ~validTimeStampMask) != 0) { + return FALSE; + } + } + return TRUE; +} + +static NvBool ValidatePerLayerCompParams( + const struct NvKmsCompositionParams *pCompParams, + const struct NvKmsCompositionCapabilities *pCaps, + NVSurfaceEvoPtr pSurfaceEvo) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = (pSurfaceEvo != NULL) ? + nvKmsGetSurfaceMemoryFormatInfo(pSurfaceEvo->format) : NULL; + const enum NvKmsCompositionColorKeySelect colorKeySelect = + pCompParams->colorKeySelect; + NvU32 match; + + if ((pCaps->supportedColorKeySelects & NVBIT(colorKeySelect)) == 0x0) { + return FALSE; + } + + NVKMS_COMPOSITION_FOR_MATCH_BITS(colorKeySelect, match) { + if ((pCaps->colorKeySelect[colorKeySelect].supportedBlendModes[match] & + NVBIT(pCompParams->blendingMode[match])) == 0x0) { + return FALSE; + } + + switch (pCompParams->blendingMode[match]) { + case NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA: + case NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA: + if (pCompParams->surfaceAlpha != 0) { + return FALSE; + } + break; + default: + break; + } + } + + /* Match and nomatch pixels should not use alpha blending mode at once. */ + if ((colorKeySelect != NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE) && + (NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[0])) && + (NvKmsIsCompositionModeUseAlpha(pCompParams->blendingMode[1]))) { + return FALSE; + } + + /* + * If surface is NULL, no further validation required. The composition + * parameters do not take effect if surface is NULL. + */ + if (pFormatInfo == NULL || pFormatInfo->isYUV) { + return TRUE; + } + + /* Disable color keying for 8 Bpp surfaces. */ + if ((colorKeySelect == NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC) || + (colorKeySelect == NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST)) { + + if (pFormatInfo->rgb.bytesPerPixel > 4) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool UpdateLayerFlipEvoHwStateCommon( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const NvU32 layer, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + const NVEvoChannel *pChannel = pDevEvo->head[head].layer[layer]; + NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[layer]; + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pOpenDev); + NvBool ret; + + if (pParams->layer[layer].surface.specified) { + ret = AssignSurfaceArray(pDevEvo, + pOpenDevSurfaceHandles, + pParams->layer[layer].surface.handle, + pChannel->channelMask, + pHwState->pSurfaceEvo); + if (!ret) { + return FALSE; + } + + /* + * Verify the (rotation, reflectionX, reflectionY) is a + * combination currently supported. + */ + if ((NVBIT(NvKmsRRParamsToCapBit(&pParams->layer[layer].surface.rrParams)) & + pDevEvo->caps.validLayerRRTransforms) == 0) { + return FALSE; + } + pHwState->rrParams = pParams->layer[layer].surface.rrParams; + + } + + /* Verify the timestamp is in the valid range for this channel. */ + if (!FlipTimeStampValidForChannel(pChannel, + pParams->layer[layer].timeStamp)) { + return FALSE; + } + pHwState->timeStamp = pParams->layer[layer].timeStamp; + + /*! + * The NVKMS_SYNCPT_TYPE* types are handled earlier in the flip path (in + * the function nvHandleSyncptRegistration) + */ + if (pParams->layer[layer].syncObjects.specified && + !pParams->layer[layer].syncObjects.val.useSyncpt) { + + if (pParams->layer[layer].syncObjects.val.u.semaphores.acquire.surface.surfaceHandle != 0 || + pParams->layer[layer].syncObjects.val.u.semaphores.release.surface.surfaceHandle != 0) { + if (pParams->layer[layer].skipPendingFlips) { + return FALSE; + } + } + + ret = AssignSemaphoreEvoHwState(pDevEvo, + pOpenDevSurfaceHandles, + pChannel, + sd, + &pParams->layer[layer].syncObjects.val, + &pHwState->syncObject); + if (!ret) { + return FALSE; + } + } + + if (pHwState->pSurfaceEvo[NVKMS_LEFT]) { + pHwState->minPresentInterval = + pParams->layer[layer].minPresentInterval; + } else { + /* The hardware requires that MPI be 0 when disabled. */ + pHwState->minPresentInterval = 0; + } + + if (pParams->layer[layer].sizeIn.specified) { + pHwState->sizeIn = pParams->layer[layer].sizeIn.val; + } + + if (pParams->layer[layer].sizeOut.specified) { + pHwState->sizeOut = pParams->layer[layer].sizeOut.val; + } + + /* + * If supportsWindowMode = TRUE, the sizeIn/sizeOut dimensions can be + * any arbitrary (valid) values. + * + * If supportsWindowMode = FALSE (legacy EVO main layer), the sizeIn + * /sizeOut dimensions must match the size of the surface for that layer. + * + * Note that if sizeIn/Out dimensions are invalid i.e. with a width or + * height of zero, this will be rejected by a call to + * ValidateFlipChannelEvoHwState() later in the code path. + * + * Note that if scaling is unsupported, i.e. that sizeIn cannot differ + * from sizeOut, then any unsupported configurations will be caught by the + * ComputeWindowScalingTaps() call later on in this function. + */ + if (!pDevEvo->caps.layerCaps[layer].supportsWindowMode && + (pHwState->pSurfaceEvo[NVKMS_LEFT] != NULL)) { + const NVSurfaceEvoRec *pSurfaceEvo = + pHwState->pSurfaceEvo[NVKMS_LEFT]; + + if ((pHwState->sizeIn.width != pSurfaceEvo->widthInPixels) || + (pHwState->sizeIn.height != pSurfaceEvo->heightInPixels)) { + return FALSE; + } + + if ((pHwState->sizeOut.width != pSurfaceEvo->widthInPixels) || + (pHwState->sizeOut.height != pSurfaceEvo->heightInPixels)) { + return FALSE; + } + } + + /* + * Allow the client to specify non-origin outputPosition only if the + * layer supports window mode. + * + * If window mode is unsupported but the client specifies non-origin + * outputPosition, return FALSE. + */ + if (pDevEvo->caps.layerCaps[layer].supportsWindowMode) { + if (pParams->layer[layer].outputPosition.specified) { + const NvS16 x = pParams->layer[layer].outputPosition.val.x; + const NvS16 y = pParams->layer[layer].outputPosition.val.y; + if ((pHwState->outputPosition.x != x) || + (pHwState->outputPosition.y != y)) { + pHwState->outputPosition.x = x; + pHwState->outputPosition.y = y; + pFlipState->dirty.layerPosition[layer] = TRUE; + } + } + } else if (pParams->layer[layer].outputPosition.specified && + ((pParams->layer[layer].outputPosition.val.x != 0) || + (pParams->layer[layer].outputPosition.val.y != 0))) { + return FALSE; + } + + if (pParams->layer[layer].compositionParams.specified) { + pHwState->composition = + pParams->layer[layer].compositionParams.val; + } + + if (pHwState->composition.depth == 0) { + pHwState->composition.depth = + NVKMS_MAX_LAYERS_PER_HEAD - layer; + } + + /* XXX Move ValidatePerLayerCompParams() call to nvValidateFlipEvoHwState() */ + if (!ValidatePerLayerCompParams( + &pHwState->composition, + &pDevEvo->caps.layerCaps[layer].composition, + pHwState->pSurfaceEvo[NVKMS_LEFT])) { + return FALSE; + } + + if (!pDevEvo->hal->ComputeWindowScalingTaps(pDevEvo, + pChannel, + pHwState)) { + return FALSE; + } + + if (pParams->layer[layer].completionNotifier.specified) { + ret = AssignCompletionNotifierEvoHwState( + pDevEvo, + pOpenDevSurfaceHandles, + &pParams->layer[layer].completionNotifier.val, + pChannel, + &pFlipState->layer[layer].completionNotifier); + if (!ret) { + return FALSE; + } + } + + pFlipState->dirty.layer[layer] = TRUE; + + return TRUE; +} + +static NvBool UpdateMainLayerFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState, + NvBool allowVrr) +{ + const NVEvoChannel *pChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[NVKMS_MAIN_LAYER]; + + if (!IsLayerDirty(pParams, NVKMS_MAIN_LAYER)) { + return TRUE; + } + + if (!UpdateLayerFlipEvoHwStateCommon(pOpenDev, pDevEvo, sd, head, + NVKMS_MAIN_LAYER, + pParams, pFlipState)) { + return FALSE; + } + + if (pParams->layer[NVKMS_MAIN_LAYER].csc.specified) { + if (pParams->layer[NVKMS_MAIN_LAYER].csc.useMain) { + return FALSE; + } else { + pHwState->cscMatrix = pParams->layer[NVKMS_MAIN_LAYER].csc.matrix; + } + } + + if (pParams->layer[NVKMS_MAIN_LAYER].surface.specified) { + if (pParams->layer[NVKMS_MAIN_LAYER].perEyeStereoFlip && + !pChannel->caps.perEyeStereoFlips) { + return FALSE; + } + + pHwState->perEyeStereoFlip = + pParams->layer[NVKMS_MAIN_LAYER].perEyeStereoFlip; + } + + if (pParams->layer[NVKMS_MAIN_LAYER].tearing && !pChannel->caps.tearingFlips) { + return FALSE; + } + + // EVO will throw an invalid argument exception if + // minPresentInterval is too large, or if tearing is enabled and + // it's not zero. + if (pParams->layer[NVKMS_MAIN_LAYER].minPresentInterval > NV_MAX_SWAP_INTERVAL || + (pParams->layer[NVKMS_MAIN_LAYER].tearing && + pParams->layer[NVKMS_MAIN_LAYER].minPresentInterval != 0)) { + return FALSE; + } + + pHwState->tearing = pParams->layer[NVKMS_MAIN_LAYER].tearing; + + if (!ApplyBaseFlipOverrides(pDevEvo, + sd, head, &pFlipState->layer[NVKMS_MAIN_LAYER], + allowVrr)) { + return FALSE; + } + + return TRUE; +} + +static NvBool UpdateCursorLayerFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDevConst(pOpenDev); + + if (pParams->cursor.imageSpecified) { + NVSurfaceEvoPtr pSurfaceEvos[NVKMS_MAX_EYES] = { }; + + if (!nvGetCursorImageSurfaces(pDevEvo, + pOpenDevSurfaceHandles, + &pParams->cursor.image, + pSurfaceEvos)) { + return FALSE; + } + + /* XXX NVKMS TODO: add support for stereo cursor */ + if (pSurfaceEvos[NVKMS_RIGHT] != NULL) { + return FALSE; + } + + pFlipState->cursor.pSurfaceEvo = pSurfaceEvos[NVKMS_LEFT]; + + if (pFlipState->cursor.pSurfaceEvo != NULL) { + if (!ValidatePerLayerCompParams(&pParams->cursor.image.cursorCompParams, + &pDevEvo->caps.cursorCompositionCaps, + pFlipState->cursor.pSurfaceEvo)) { + return FALSE; + } + + pFlipState->cursor.cursorCompParams = + pParams->cursor.image.cursorCompParams; + } + + pFlipState->dirty.cursorSurface = TRUE; + } + + if (pParams->cursor.positionSpecified) { + pFlipState->cursor.x = pParams->cursor.position.x; + pFlipState->cursor.y = pParams->cursor.position.y; + + pFlipState->dirty.cursorPosition = TRUE; + } + + return TRUE; +} + +static NvBool UpdateOverlayLayerFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const NvU32 layer, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + NVFlipChannelEvoHwState *pHwState = &pFlipState->layer[layer]; + + nvAssert(layer != NVKMS_MAIN_LAYER); + + if (!IsLayerDirty(pParams, layer)) { + return TRUE; + } + + if (pParams->layer[layer].skipPendingFlips || + pParams->layer[layer].perEyeStereoFlip) { + return FALSE; + } + + if (!UpdateLayerFlipEvoHwStateCommon(pOpenDev, pDevEvo, sd, head, layer, + pParams, pFlipState)) { + return FALSE; + } + + if (pParams->layer[layer].csc.specified) { + if (pParams->layer[layer].csc.useMain) { + if (pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]) { + pHwState->cscMatrix = + pFlipState->layer[NVKMS_MAIN_LAYER].cscMatrix; + } + } else { + pHwState->cscMatrix = pParams->layer[layer].csc.matrix; + } + } + + return TRUE; +} + +/*! + * Update the NVFlipEvoHwState, using NvKmsFlipCommonParams. + * + * Propagate the requested configuration from NvKmsFlipCommonParams to + * NVFlipEvoHwState, performing steps such as translating from + * NvKmsSurfaceHandle to NVSurfaceEvoRecs. Validate the NvKmsFlipCommonParams + * parameters, but defer more general validation of the resulting + * NVFlipEvoHwState until nvValidateFlipEvoHwState(), which callers must call + * separately. + * + * The NVFlipEvoHwState should first be initialized by calling + * nvInitFlipEvoHwState(). + * + * No NVKMS hardware or software state should be altered here, because + * this function is used before we have decided to commit the proposed + * NVFlipEvoHwState to hardware. + * + * \param[in] pOpenDev The pOpenDev of the client doing the flip. + * \param[in] pDevEvo The device on which the surface image will be set. + * \param[in] sd The subdevice for the flip, as specified by the + * client. + * \param[in] head The head for the flip, as specified by the client. + * \param[in] pParams The requested flip, NvKmsFlipCommonParams. + * \param[in,out] pFlipState The resulting NVFlipEvoHwState. + * \param[in] allowVrr Whether VRR flipping should be allowed. + * \param[in] pPossibleUsage Possible usage. + * + * \return If pFlipState could be updated, return TRUE. + * Otherwise, return FALSE. + */ +NvBool nvUpdateFlipEvoHwState( + const struct NvKmsPerOpenDev *pOpenDev, + const NVDevEvoRec *pDevEvo, + const NvU32 sd, + const NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState, + NvBool allowVrr, + const struct NvKmsUsageBounds *pPossibleUsage) +{ + NvU32 layer; + + if (!CheckFlipPermissions(pOpenDev, pDevEvo, sd, head, pParams)) { + return FALSE; + } + + if (pParams->viewPortIn.specified) { + pFlipState->dirty.viewPortPointIn = TRUE; + pFlipState->viewPortPointIn = pParams->viewPortIn.point; + } + + if (!UpdateCursorLayerFlipEvoHwState(pOpenDev, pDevEvo, pParams, + pFlipState)) { + return FALSE; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer == NVKMS_MAIN_LAYER) { + if (!UpdateMainLayerFlipEvoHwState(pOpenDev, pDevEvo, sd, head, + pParams, pFlipState, allowVrr)) { + return FALSE; + } + continue; + } + + if (!UpdateOverlayLayerFlipEvoHwState(pOpenDev, pDevEvo, sd, head, + layer, pParams, pFlipState)) { + return FALSE; + } + } + + if (!AssignUsageBounds(pDevEvo, head, pFlipState)) { + return FALSE; + } + + if (!OverrideUsageBounds(pDevEvo, pFlipState, pParams, sd, head, + pPossibleUsage)) { + return FALSE; + } + + + /* + * If there is active cursor/cropped-window(overlay) without full screen + * window(base/core) then NVKMS is supposed to disable MidFrame/DWCF + * watermark. + */ + + pFlipState->disableMidFrameAndDWCFWatermark = FALSE; + + if (NV5070_CTRL_SYSTEM_GET_CAP( + pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_2052012_GLITCHY_MCLK_SWITCH) && + !pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]) { + + if (pFlipState->cursor.pSurfaceEvo != NULL) { + pFlipState->disableMidFrameAndDWCFWatermark = TRUE; + } else { + NvU32 layer; + + /* + * XXX TODO: Check the output size of the overlay in order + * to determine if it will be fullscreen or not. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer != NVKMS_MAIN_LAYER && + pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL) { + pFlipState->disableMidFrameAndDWCFWatermark = TRUE; + break; + } + } + } + } + + return TRUE; +} + +/* + * Checks that if the surface is NULL (i.e. no image will be shown), various + * other elements must be NULL as well. If the surface is not NULL, verifies + * that the sizeIn/Out have nonzero values. + */ +inline static NvBool ValidateFlipChannelEvoHwState( + const NVFlipChannelEvoHwState *pState) +{ + if (pState->pSurfaceEvo[NVKMS_LEFT] != NULL) { + /* Verify sizes are valid. */ + if ((pState->sizeIn.width == 0) || (pState->sizeIn.height == 0) || + (pState->sizeOut.width == 0) || (pState->sizeOut.height == 0)) { + return FALSE; + } + + return TRUE; + } + + if (pState->completionNotifier.surface.pSurfaceEvo != NULL) { + return FALSE; + } + + if (!pState->syncObject.usingSyncpt) { + if (pState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo != NULL) { + return FALSE; + } + + if (pState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo != NULL) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool ValidateSurfaceSize( + const NVDevEvoRec *pDevEvo, + const NVSurfaceEvoRec *pSurfaceEvo, + const struct NvKmsRect *sourceFetchRect) +{ + NvU8 planeIndex; + + if ((pSurfaceEvo->widthInPixels > pDevEvo->caps.maxWidthInPixels) || + (pSurfaceEvo->heightInPixels > pDevEvo->caps.maxHeight)) { + return FALSE; + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + NvU64 planePitch = pSurfaceEvo->planes[planeIndex].pitch; + + /* + * Convert planePitch to units of bytes if it's currently specified in + * units of blocks. Each block is 64-bytes wide. + */ + if (pSurfaceEvo->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + planePitch <<= NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH; + } + + if (planePitch > pDevEvo->caps.maxWidthInBytes) { + return FALSE; + } + } + + if (!pDevEvo->hal->ValidateWindowFormat(pSurfaceEvo->format, + sourceFetchRect, + NULL)) { + return FALSE; + } + + return TRUE; +} + +static NvBool +ValidateMainFlipChannelEvoHwState(const NVDevEvoRec *pDevEvo, + const NVFlipChannelEvoHwState *pHwState, + const NVHwModeTimingsEvo *pTimings, + struct NvKmsPoint viewPortPointIn) +{ + NvU32 eye; + const NVSurfaceEvoRec *pFirstSurfaceEvo = NULL; + + /* + * This struct represents the source fetch rectangle for a given surface, + * and will be populated later as such. This function doesn't explicitly set + * sourceFetchRect.{x,y} because NVKMS currently doesn't support programming + * source fetch offsets, so the init value of 0 should be fine for both of + * these fields. + */ + struct NvKmsRect sourceFetchRect = {0}; + + if (!ValidateFlipChannelEvoHwState(pHwState)) { + return FALSE; + } + + for (eye = 0; eye < NVKMS_MAX_EYES; eye++) { + const NVSurfaceEvoRec *pSurfaceEvo = pHwState->pSurfaceEvo[eye]; + + if (pSurfaceEvo == NULL) { + continue; + } + + if (pFirstSurfaceEvo == NULL) { + pFirstSurfaceEvo = pSurfaceEvo; + } else if (pSurfaceEvo->widthInPixels != + pFirstSurfaceEvo->widthInPixels || + pSurfaceEvo->heightInPixels != + pFirstSurfaceEvo->heightInPixels) { + return FALSE; + } + + sourceFetchRect.width = pHwState->sizeIn.width; + sourceFetchRect.height = pHwState->sizeIn.height; + + if (!ValidateSurfaceSize(pDevEvo, pSurfaceEvo, &sourceFetchRect)) { + return FALSE; + } + + /* The use of A_plus_B_greater_than_C_U16 is only valid if these + * fit in a U16 */ + nvAssert(pSurfaceEvo->widthInPixels <= NV_U16_MAX); + nvAssert(pSurfaceEvo->heightInPixels <= NV_U16_MAX); + /* And the checks above in ValidateSurfaceSize should have + * guaranteed that. */ + nvAssert(pDevEvo->caps.maxWidthInPixels <= NV_U16_MAX); + nvAssert(pDevEvo->caps.maxHeight <= NV_U16_MAX); + + /* + * Validate that the requested viewport parameters fit within the + * specified surface, unless the main layer is allowed to be smaller + * than the viewport. + */ + if (!pDevEvo->caps.layerCaps[NVKMS_MAIN_LAYER].supportsWindowMode) { + if (A_plus_B_greater_than_C_U16(viewPortPointIn.x, + pTimings->viewPort.in.width, + pSurfaceEvo->widthInPixels)) { + return FALSE; + } + + if (A_plus_B_greater_than_C_U16(viewPortPointIn.y, + pTimings->viewPort.in.height, + pSurfaceEvo->heightInPixels)) { + return FALSE; + } + } + } + + return TRUE; +} + +static NvBool +ValidateOverlayFlipChannelEvoHwState(const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NvU32 layer, + const NVFlipChannelEvoHwState *pHwState) +{ + const NVSurfaceEvoRec *pSurfaceEvo = pHwState->pSurfaceEvo[NVKMS_LEFT]; + + /* + * This struct represents the source fetch rectangle for a given surface, + * and will be populated later as such. This function doesn't explicitly set + * sourceFetchRect.{x,y} because NVKMS currently doesn't support programming + * source fetch offsets, so the init value of 0 should be fine for both of + * these fields. + */ + struct NvKmsRect sourceFetchRect = {0}; + + nvAssert(layer != NVKMS_MAIN_LAYER); + + if (!ValidateFlipChannelEvoHwState(pHwState)) { + return FALSE; + } + + if (pSurfaceEvo == NULL) { + return TRUE; + } + + sourceFetchRect.width = pHwState->sizeIn.width; + sourceFetchRect.height = pHwState->sizeIn.height; + + if (!ValidateSurfaceSize(pDevEvo, pSurfaceEvo, &sourceFetchRect)) { + return FALSE; + } + + /* Validate input size against surface size. */ + if (pHwState->sizeIn.width > pSurfaceEvo->widthInPixels || + pHwState->sizeIn.height > pSurfaceEvo->heightInPixels) { + return FALSE; + } + + return TRUE; +} + +/*! + * Perform validation of the the given NVFlipEvoHwState. + */ +NvBool nvValidateFlipEvoHwState( + const NVDevEvoRec *pDevEvo, + const NvU32 head, + const NVHwModeTimingsEvo *pTimings, + const NVFlipEvoHwState *pFlipState) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + + if (pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL) { + NvU32 tmpLayer; + + /* Depth should be different for each of the layers owned by the head */ + for (tmpLayer = 0; tmpLayer < pDevEvo->head[head].numLayers; tmpLayer++) { + if (pFlipState->layer[tmpLayer].pSurfaceEvo[NVKMS_LEFT] == NULL) { + continue; + } + + if ((tmpLayer != layer) && + (pFlipState->layer[tmpLayer].composition.depth == + pFlipState->layer[layer].composition.depth)) { + return FALSE; + } + } + + /* Depth of the main layer should be the greatest one */ + if ((pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT] != NULL) && + (pFlipState->layer[layer].composition.depth > + pFlipState->layer[NVKMS_MAIN_LAYER].composition.depth)) { + return FALSE; + } + } + + if (layer == NVKMS_MAIN_LAYER) { + if (!ValidateMainFlipChannelEvoHwState(pDevEvo, + &pFlipState->layer[layer], + pTimings, + pFlipState->viewPortPointIn)) { + return FALSE; + } + continue; + } + + if (pFlipState->dirty.layer[layer] && + !ValidateOverlayFlipChannelEvoHwState(pDevEvo, + head, + layer, + &pFlipState->layer[layer])) { + return FALSE; + } + } + + /* XXX NVKMS TODO: validate cursor x,y against current viewport in? */ + + return ValidateUsageBounds(pDevEvo, + head, + &pFlipState->usage, + &pTimings->viewPort.possibleUsage); +} + + +/*! + * Validate overlay should be enabled only with valid core scanout surface. + */ +static NvBool ValidatePerDispState( + const NVDevEvoRec *pDevEvo, + const struct NvKmsFlipWorkArea *pWorkArea) +{ + const NVDispEvoRec *pDispEvo; + NvU32 sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[pDispEvo->displayOwner].headState[head]; + const NVSurfaceEvoRec *pMainScanoutSurface = + pSdHeadState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + const NVFlipEvoHwState *pFlipState = + &pWorkArea->sd[sd].head[head].newState; + NvU32 layer; + + if (pFlipState->dirty.layer[NVKMS_MAIN_LAYER]) { + pMainScanoutSurface = + pFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (layer == NVKMS_MAIN_LAYER) { + continue; + } + + if (pFlipState->layer[layer].pSurfaceEvo[NVKMS_LEFT] != NULL && + pMainScanoutSurface == NULL) { + return FALSE; + } + } + } + } + + return TRUE; +} + +/* + * Record in the updateState that the given channel needs interlocked + * window immediate updates. + */ +static void UpdateWinImmInterlockState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].winImmInterlockMask |= + pChannel->channelMask; + } + } +} + +/*! + * Record in the updateState that the given channel's method are eligible for + * flip locking. + */ +static void UpdateUpdateFlipLockState(NVDevEvoPtr pDevEvo, + NVEvoUpdateState *updateState, + const NVEvoChannel *pChannel) +{ + const NvU32 subDevMask = nvPeekEvoSubDevMask(pDevEvo); + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDevMask & (1 << sd)) { + updateState->subdev[sd].flipLockQualifyingMask |= + pChannel->channelMask; + } + } +} + +/*! + * Program a flip on all requested layers on the specified head. + * + * This also updates pDispEvo->headState[head], caching what was programmed. + + * \param[in,out] pDispEvo The disp on which the flip should be performed. + * \param[in] head The head on which the flip should be performed. + * \param[in] pFlipState The description of how to update each layer. + * \param[in,out] updateState Indicates which channels require UPDATEs + */ +void nvFlipEvoOneHead( + NVDevEvoPtr pDevEvo, + const NvU32 sd, + const NvU32 head, + const NVFlipEvoHwState *pFlipState, + NvBool allowFlipLock, + NVEvoUpdateState *updateState) +{ + const NvU32 subDeviceMask = NVBIT(sd); + const NVDispHeadStateEvoRec *pHeadState = + &pDevEvo->gpus[sd].pDispEvo->headState[head]; + NvBool bypassComposition = pHeadState->bypassComposition; + NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvU32 layer; + + /* + * Provide the pre-update hardware state (in pSdHeadState) and the new + * target state (pFlipState) to the HAL implementation so that it has the + * information it needs to implement the workaround for hardware bug + * 2193096, which requires special logic on transitions between NULL and + * non-NULL ctxdmas (and vice versa). + */ + pDevEvo->hal->FlipTransitionWAR(pDevEvo, sd, head, + pSdHeadState, pFlipState, + updateState); + + /* + * Promote the software state first, such that the hardware programming + * paths below see the new state atomically. + */ + if (pFlipState->dirty.viewPortPointIn) { + pSdHeadState->viewPortPointIn = pFlipState->viewPortPointIn; + } + + if (pFlipState->dirty.cursorSurface || pFlipState->dirty.cursorPosition) { + pSdHeadState->cursor = pFlipState->cursor; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pFlipState->dirty.layer[layer]) { + pSdHeadState->layer[layer] = pFlipState->layer[layer]; + } + } + + if (pFlipState->dirty.viewPortPointIn) { + nvSetViewPortPointInEvo(pDevEvo->gpus[sd].pDispEvo, + head, + pFlipState->viewPortPointIn.x, + pFlipState->viewPortPointIn.y, + updateState); + } + + if (pFlipState->dirty.cursorSurface) { + nvPushEvoSubDevMask(pDevEvo, NVBIT(sd)); + pDevEvo->hal->SetCursorImage(pDevEvo, + head, + pSdHeadState->cursor.pSurfaceEvo, + updateState, + &pSdHeadState->cursor.cursorCompParams); + nvPopEvoSubDevMask(pDevEvo); + } + + if (pFlipState->dirty.cursorPosition) { + nvEvoMoveCursorInternal(pDevEvo->gpus[sd].pDispEvo, + head, + pFlipState->cursor.x, + pFlipState->cursor.y); + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!pFlipState->dirty.layer[layer]) { + continue; + } + + if (pFlipState->dirty.layerPosition[layer]) { + /* Ensure position updates are supported on this layer. */ + nvAssert(pDevEvo->caps.layerCaps[layer].supportsWindowMode); + + pDevEvo->hal->SetImmPointOut(pDevEvo, + pDevEvo->head[head].layer[layer], + sd, + updateState, + pFlipState->layer[layer].outputPosition.x, + pFlipState->layer[layer].outputPosition.y); + + if (pDevEvo->hal->caps.supportsSynchronizedOverlayPositionUpdate) { + UpdateWinImmInterlockState(pDevEvo, updateState, + pDevEvo->head[head].layer[layer]); + } + } + + nvPushEvoSubDevMask(pDevEvo, subDeviceMask); + pDevEvo->hal->Flip(pDevEvo, + pDevEvo->head[head].layer[layer], + &pFlipState->layer[layer], + updateState, + bypassComposition); + if (layer == NVKMS_MAIN_LAYER && allowFlipLock) { + UpdateUpdateFlipLockState(pDevEvo, updateState, + pDevEvo->head[head].layer[layer]); + } + nvPopEvoSubDevMask(pDevEvo); + } + + pSdHeadState->targetUsage = pFlipState->usage; + + pSdHeadState->targetDisableMidFrameAndDWCFWatermark = + pFlipState->disableMidFrameAndDWCFWatermark; +} + +static void ChangeSurfaceFlipRefCount( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo, + NvBool increase) +{ + if (pSurfaceEvo != NULL) { + if (increase) { + nvEvoIncrementSurfaceRefCnts(pSurfaceEvo); + } else { + nvEvoDecrementSurfaceRefCnts(pSurfaceEvo); + } + } +} + +void nvUpdateSurfacesFlipRefCount( + NVDevEvoPtr pDevEvo, + const NvU32 head, + NVFlipEvoHwState *pFlipState, + NvBool increase) +{ + NvU32 i; + + ChangeSurfaceFlipRefCount( + pDevEvo, + pFlipState->cursor.pSurfaceEvo, + increase); + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + NVFlipChannelEvoHwState *pLayerFlipState = &pFlipState->layer[i]; + + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->pSurfaceEvo[NVKMS_LEFT], + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->pSurfaceEvo[NVKMS_RIGHT], + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->completionNotifier.surface.pSurfaceEvo, + increase); + + if (!pLayerFlipState->syncObject.usingSyncpt) { + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo, + increase); + ChangeSurfaceFlipRefCount( + pDevEvo, + pLayerFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo, + increase); + } + } +} + +static void UnionScalingUsageBounds( + const struct NvKmsScalingUsageBounds *a, + const struct NvKmsScalingUsageBounds *b, + struct NvKmsScalingUsageBounds *ret) +{ + ret->maxVDownscaleFactor = NV_MAX(a->maxVDownscaleFactor, + b->maxVDownscaleFactor); + ret->maxHDownscaleFactor = NV_MAX(a->maxHDownscaleFactor, + b->maxHDownscaleFactor); + ret->vTaps = NV_MAX(a->vTaps, b->vTaps); + ret->vUpscalingAllowed = a->vUpscalingAllowed || b->vUpscalingAllowed; +} + +struct NvKmsUsageBounds nvUnionUsageBounds( + const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b) +{ + struct NvKmsUsageBounds ret; + NvU32 i; + + for (i = 0; i < ARRAY_LEN(a->layer); i++) { + nvAssert(a->layer[i].usable == + !!a->layer[i].supportedSurfaceMemoryFormats); + nvAssert(b->layer[i].usable == + !!b->layer[i].supportedSurfaceMemoryFormats); + + ret.layer[i].usable = a->layer[i].usable || b->layer[i].usable; + + ret.layer[i].supportedSurfaceMemoryFormats = + a->layer[i].supportedSurfaceMemoryFormats | + b->layer[i].supportedSurfaceMemoryFormats; + + UnionScalingUsageBounds(&a->layer[i].scaling, + &b->layer[i].scaling, + &ret.layer[i].scaling); + } + + return ret; +} + +NvBool UsageBoundsEqual( + const struct NvKmsUsageBounds *a, + const struct NvKmsUsageBounds *b) +{ + NvU32 layer; + + for (layer = 0; layer < ARRAY_LEN(a->layer); layer++) { + if (!nvEvoLayerUsageBoundsEqual(a, b, layer)) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool AllocatePreFlipBandwidth(NVDevEvoPtr pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea) +{ + NVValidateImpOneDispHeadParamsRec *timingsParams = NULL; + struct NvKmsUsageBounds *currentAndNew = NULL; + struct NvKmsUsageBounds *guaranteedAndCurrent = NULL; + NVDispEvoPtr pDispEvo; + NvU32 head; + NvBool recheckIMP = FALSE; + NvBool ret = TRUE; + + if (!pDevEvo->isSOCDisplay) { + return TRUE; + } + + timingsParams = + nvCalloc(NVKMS_MAX_HEADS_PER_DISP, sizeof(*timingsParams)); + if (timingsParams == NULL) { + return FALSE; + } + + currentAndNew = + nvCalloc(NVKMS_MAX_HEADS_PER_DISP, sizeof(*currentAndNew)); + if (currentAndNew == NULL) { + nvFree(timingsParams); + return FALSE; + } + + guaranteedAndCurrent = + nvCalloc(NVKMS_MAX_HEADS_PER_DISP, sizeof(*guaranteedAndCurrent)); + if (guaranteedAndCurrent == NULL) { + nvFree(timingsParams); + nvFree(currentAndNew); + return FALSE; + } + + pDispEvo = pDevEvo->pDispEvo[0]; + + // SOC Display never has more than one disp + nvAssert(pDevEvo->nDispEvo == 1); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const struct NvKmsUsageBounds *pCurrent = + &pDevEvo->gpus[0].headState[head].preallocatedUsage; + const struct NvKmsUsageBounds *pNew = + &pWorkArea->sd[0].head[head].newState.usage; + + if (pHeadState->activeRmId == 0) { + continue; + } + + timingsParams[head].pConnectorEvo = pHeadState->pConnectorEvo; + timingsParams[head].activeRmId = pHeadState->activeRmId; + timingsParams[head].pTimings = &pHeadState->timings; + + currentAndNew[head] = nvUnionUsageBounds(pCurrent, pNew); + guaranteedAndCurrent[head] = nvUnionUsageBounds( + &pHeadState->timings.viewPort.guaranteedUsage, + pCurrent); + + if (!ValidateUsageBounds(pDevEvo, + head, + pNew, + &guaranteedAndCurrent[head])) { + recheckIMP = TRUE; + } + + guaranteedAndCurrent[head] = + nvUnionUsageBounds(&guaranteedAndCurrent[head], pNew); + timingsParams[head].pUsage = &guaranteedAndCurrent[head]; + } + + if (recheckIMP) { + ret = nvValidateImpOneDisp(pDispEvo, timingsParams, + FALSE /* requireBootClocks */, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE, + NULL /* pMinIsoBandwidthKBPS */, + NULL /* pMinDramFloorKBPS */); + if (ret) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->gpus[0].headState[head].preallocatedUsage = + currentAndNew[head]; + } + } + } + + nvFree(timingsParams); + nvFree(currentAndNew); + nvFree(guaranteedAndCurrent); + + if (ret) { + nvScheduleLowerDispBandwidthTimer(pDevEvo); + } + + return ret; +} + +/*! + * If the satellite channel is active then pre-NVDisplay hardware does not allow + * to change its usage bounds in non-interlock update. The nvSetUsageBoundsEvo() + * code path for pre-NVDisplay hardware, interlocks the satellite channels with + * the usage bounds update. This makes it essential to poll for + * NO_METHOD_PENDING state of the satellite channels, otherwise blocking + * pre-flip IMP update will also get stuck. + * + * It is not possible to interlock flip-locked satellite channels with the core + * channel usage bounds update; in that case, reject the flip. Do not allow + * client to make any change in surface usage bounds parameters without + * deactivating channel first, if channel is flip-locked. + */ +static NvBool PrepareToDoPreFlipIMP(NVDevEvoPtr pDevEvo, + struct NvKmsFlipWorkArea *pWorkArea) +{ + NvU64 startTime = 0; + NvU32 timeout = 2000000; /* 2 seconds */ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[sd]; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVEvoHeadControlPtr pHC = + &pEvoSubDev->headControl[head]; + const NVEvoSubDevHeadStateRec *pCurrentFlipState = + &pDevEvo->gpus[sd].headState[head]; + const NVSurfaceEvoRec *pCurrentBaseSurf = + pCurrentFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + const struct NvKmsUsageBounds *pCurrentUsage = + &pCurrentFlipState->usage; + + NVFlipEvoHwState *pNewFlipState = + &pWorkArea->sd[sd].head[head].newState; + const NVSurfaceEvoRec *pNewBaseSurf = + pNewFlipState->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + struct NvKmsUsageBounds *pNewUsage = + &pNewFlipState->usage; + + struct NvKmsUsageBounds *pPreFlipUsage = + &pWorkArea->sd[sd].head[head].preFlipUsage; + + NvU32 layer; + + *pPreFlipUsage = nvUnionUsageBounds(pNewUsage, + pCurrentUsage); + + if (pDevEvo->hal->caps.supportsNonInterlockedUsageBoundsUpdate) { + /* + * NVDisplay does not interlock the satellite channel + * with its usage bounds update. + */ + continue; + } + + /* + * If head is flip-locked then do not change usage + * bounds while base channel is active. + */ + if (pHC->flipLock && + /* If the base channel is active before and after flip then + * current and new base usage bounds should be same. */ + ((pNewBaseSurf != NULL && + pCurrentBaseSurf != NULL && + !nvEvoLayerUsageBoundsEqual(pCurrentUsage, + pNewUsage, NVKMS_MAIN_LAYER)) || + /* If the base channel is active before flip then current and + * preflip base usage bounds should be same. */ + (pCurrentBaseSurf != NULL && + !nvEvoLayerUsageBoundsEqual(pCurrentUsage, + pPreFlipUsage, NVKMS_MAIN_LAYER)))) { + return FALSE; + } + + /* + * Poll for NO_METHOD_PENDING state if usage + * bounds of the channel are changed. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!nvEvoLayerUsageBoundsEqual(pCurrentUsage, + pPreFlipUsage, layer) && + !nvEvoPollForNoMethodPending(pDevEvo, + sd, + pDevEvo->head[head].layer[layer], + &startTime, + timeout)) { + return FALSE; + } + } + } + } + + return TRUE; +} + +/*! + * Tasks need to perform before triggering flip, they all should be done here. + * + * If necessary, raise usage bounds and/or disable MidFrameAndDWCFWatermark + * (bug 200508242) in the core channel and do an IMP update. + * + * Note that this function only raises usage bounds and/or disables + * MidFrameAndDWCFWatermark, never lowers usage bounds and/or enables + * MidFrameAndDWCFWatermark. This allows it to run before queuing a flip even + * if there are still pending flips in a base channel. + */ +static void PreFlipIMP(NVDevEvoPtr pDevEvo, + const struct NvKmsFlipWorkArea *pWorkArea) +{ + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoUpdateState updateState = { }; + NvBool update = FALSE; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + const NVFlipEvoHwState *pNewState = + &pWorkArea->sd[sd].head[head].newState; + const struct NvKmsUsageBounds *pPreFlipUsage = + &pWorkArea->sd[sd].head[head].preFlipUsage; + struct NvKmsUsageBounds *pCurrentUsage = + &pDevEvo->gpus[sd].headState[head].usage; + + if (!UsageBoundsEqual(pCurrentUsage, pPreFlipUsage)) { + update |= nvSetUsageBoundsEvo(pDevEvo, sd, head, + pPreFlipUsage, &updateState); + } + + if (!pDevEvo->gpus[sd]. + headState[head].disableMidFrameAndDWCFWatermark && + pNewState->disableMidFrameAndDWCFWatermark) { + + nvEnableMidFrameAndDWCFWatermark(pDevEvo, + sd, + head, + FALSE /* enable */, + &updateState); + update = TRUE; + } + } + + if (update) { + nvDoIMPUpdateEvo(pDispEvo, &updateState); + } + } +} + +static void LowerDispBandwidth(void *dataPtr, NvU32 dataU32) +{ + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP]; + struct NvKmsUsageBounds *guaranteedAndCurrent; + NVDevEvoPtr pDevEvo = dataPtr; + NVDispEvoPtr pDispEvo; + NvU32 head; + NvBool ret; + + guaranteedAndCurrent = + nvCalloc(1, sizeof(*guaranteedAndCurrent) * NVKMS_MAX_HEADS_PER_DISP); + if (guaranteedAndCurrent == NULL) { + nvAssert(guaranteedAndCurrent != NULL); + return; + } + + nvkms_memset(&timingsParams, 0, sizeof(timingsParams)); + + pDispEvo = pDevEvo->pDispEvo[0]; + + // SOC Display never has more than one disp + nvAssert(pDevEvo->nDispEvo == 1); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + const struct NvKmsUsageBounds *pGuaranteed = + &pHeadState->timings.viewPort.guaranteedUsage; + const struct NvKmsUsageBounds *pCurrent = + &pDevEvo->gpus[0].headState[head].usage; + + if (pHeadState->activeRmId == 0) { + continue; + } + + timingsParams[head].pConnectorEvo = pHeadState->pConnectorEvo; + timingsParams[head].activeRmId = pHeadState->activeRmId; + timingsParams[head].pTimings = &pHeadState->timings; + + guaranteedAndCurrent[head] = nvUnionUsageBounds(pGuaranteed, pCurrent); + timingsParams[head].pUsage = &guaranteedAndCurrent[head]; + } + + ret = nvValidateImpOneDisp(pDispEvo, timingsParams, + FALSE /* requireBootClocks */, + NV_EVO_REALLOCATE_BANDWIDTH_MODE_POST, + NULL /* pMinIsoBandwidthKBPS */, + NULL /* pMinDramFloorKBPS */); + if (ret) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->gpus[0].headState[head].preallocatedUsage = + pDevEvo->gpus[0].headState[head].usage; + } + } + + nvAssert(ret); + + nvFree(guaranteedAndCurrent); +} + +void nvCancelLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo) +{ + nvkms_free_timer(pDevEvo->lowerDispBandwidthTimer); + pDevEvo->lowerDispBandwidthTimer = NULL; +} + +void nvScheduleLowerDispBandwidthTimer(NVDevEvoPtr pDevEvo) +{ + nvAssert(pDevEvo->isSOCDisplay); + + nvCancelLowerDispBandwidthTimer(pDevEvo); + + pDevEvo->lowerDispBandwidthTimer = + nvkms_alloc_timer(LowerDispBandwidth, + pDevEvo, + 0, /* dataU32 */ + 30000000 /* 30 seconds */); +} + +/*! + * Check whether the core, base, and overlay channels are idle (i.e. no methods + * pending in the corresponding pushbuffer) and lower the usage bounds if + * possible. + */ +static NvBool TryLoweringUsageBoundsOneHead(NVDevEvoPtr pDevEvo, NvU32 sd, + NvU32 head, + NVEvoUpdateState *updateState) +{ + const NVEvoSubDevHeadStateRec *pHeadState = + &pDevEvo->gpus[sd].headState[head]; + const struct NvKmsUsageBounds *pCurrent = &pHeadState->usage; + const struct NvKmsUsageBounds *pTarget = &pHeadState->targetUsage; + struct NvKmsUsageBounds newUsage = *pCurrent; + NvBool changed = FALSE; + NvBool scheduleLater = FALSE; + int i; + + for (i = 0; i < pDevEvo->head[head].numLayers; i++) { + if (pCurrent->layer[i].usable && !pTarget->layer[i].usable) { + NvBool isMethodPending; + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pDevEvo->head[head].layer[i], + sd, + &isMethodPending) && !isMethodPending) { + newUsage.layer[i] = pTarget->layer[i]; + changed = TRUE; + } else { + scheduleLater = TRUE; + } + } else if ((pCurrent->layer[i].usable && pTarget->layer[i].usable) && + ((pCurrent->layer[i].supportedSurfaceMemoryFormats != + pTarget->layer[i].supportedSurfaceMemoryFormats) || + (!nvEvoScalingUsageBoundsEqual(&pCurrent->layer[i].scaling, + &pTarget->layer[i].scaling)))) { + NvBool isMethodPending; + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, + pDevEvo->head[head].layer[i], + sd, + &isMethodPending) && !isMethodPending) { + newUsage.layer[i] = pTarget->layer[i]; + changed = TRUE; + } else { + scheduleLater = TRUE; + } + } + } + + if (scheduleLater) { + SchedulePostFlipIMPTimer(pDevEvo); + } + + if (changed) { + changed = nvSetUsageBoundsEvo(pDevEvo, sd, head, &newUsage, + updateState); + } + + return changed; +} + +static NvBool +TryEnablingMidFrameAndDWCFWatermarkOneHead(NVDevEvoPtr pDevEvo, + NvU32 sd, + NvU32 head, + NVEvoUpdateState *updateState) +{ + const NVEvoSubDevHeadStateRec *pHeadState = + &pDevEvo->gpus[sd].headState[head]; + NvBool changed = FALSE; + + if (pHeadState->disableMidFrameAndDWCFWatermark && + !pHeadState->targetDisableMidFrameAndDWCFWatermark) { + + NvBool isIdle; + + if (pDevEvo->hal->IsChannelIdle(pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + &isIdle) && isIdle) { + nvEnableMidFrameAndDWCFWatermark(pDevEvo, + sd, + head, + TRUE /* enable */, + updateState); + changed = TRUE; + } else { + // Schedule another timer to try again later. + SchedulePostFlipIMPTimer(pDevEvo); + } + } + + return changed; +} + +static void +TryToDoPostFlipIMP(void *dataPtr, NvU32 dataU32) +{ + NVDevEvoPtr pDevEvo = dataPtr; + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + + pDevEvo->postFlipIMPTimer = NULL; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NVEvoUpdateState updateState = { }; + NvBool update = FALSE; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + if (TryLoweringUsageBoundsOneHead(pDevEvo, sd, head, + &updateState)) { + update = TRUE; + } + + if (TryEnablingMidFrameAndDWCFWatermarkOneHead( + pDevEvo, + sd, + head, + &updateState)) { + update = TRUE; + } + } + + if (update) { + nvDoIMPUpdateEvo(pDispEvo, &updateState); + } + } +} + +static void SchedulePostFlipIMPTimer(NVDevEvoPtr pDevEvo) +{ + if (!pDevEvo->postFlipIMPTimer) { + pDevEvo->postFlipIMPTimer = + nvkms_alloc_timer( + TryToDoPostFlipIMP, + pDevEvo, + 0, /* dataU32 */ + 10000000 /* 10 seconds */); + } +} + +void nvEvoCancelPostFlipIMPTimer(NVDevEvoPtr pDevEvo) +{ + nvkms_free_timer(pDevEvo->postFlipIMPTimer); + pDevEvo->postFlipIMPTimer = NULL; +} + +/*! + * If necessary, schedule a timer to see if usage bounds can be lowered. + */ +static void SchedulePostFlipIMP(NVDevEvoPtr pDevEvo) +{ + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + + // If a timer is already scheduled, do nothing. + if (pDevEvo->postFlipIMPTimer) { + return; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + const NVEvoSubDevHeadStateRec *pHeadState = + &pDevEvo->gpus[sd].headState[head]; + + if (!UsageBoundsEqual(&pHeadState->usage, + &pHeadState->targetUsage) || + (pHeadState->disableMidFrameAndDWCFWatermark != + pHeadState->targetDisableMidFrameAndDWCFWatermark)) { + + SchedulePostFlipIMPTimer(pDevEvo); + return; + } + } + } +} + +/*! + * Program a flip on all requested layers on all requested heads on + * all requested disps in NvKmsFlipRequest. + * + * /param[in] skipUpdate Update software state tracking, but don't kick + * off or perform an UPDATE. + * + * Note that this should be used only when the + * satellite channels (including the cursor) are + * disabled -- only the core channel should be + * displaying anything, and only the core surface + * should be specified in a skipUpdate flip. + * /param[in] allowFlipLock Whether this update should use fliplocked base + * flips. This is used on nvdisplay to set the + * interlock mask to include all fliplocked + * channels if necessary. This should currently + * only be set when this flip was initiated + * through NVKMS_IOCTL_FLIP. + */ +NvBool nvFlipEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipRequest *request, + struct NvKmsFlipReply *reply, + NvBool skipUpdate, + NvBool allowFlipLock) +{ + NvU32 head, sd; + NvU32 requestedHeadCount, activeHeadCount, dirtyBaseChannelCount; + NvBool ret = FALSE; + NvBool changed = FALSE; + NvBool allowVrr = request->allowVrr; + NVDispEvoPtr pDispEvo; + struct NvKmsFlipWorkArea *pWorkArea = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA, + sizeof(*pWorkArea)); + + nvkms_memset(pWorkArea, 0, sizeof(*pWorkArea)); + + /* + * Do not execute NVKMS_IOCTL_FLIP if the display channel yet has not + * been transitioned from vbios to driver. A modeset requires, to make + * display channel transition from vbios to driver. + * + * The NVKMS client should do modeset before initiating + * NVKMS_IOCTL_FLIP requests. + */ + if (pDevEvo->coreInitMethodsPending) { + goto done; + } + + /* + * Initialize the work area. Note we take two snapshots of the + * current headState: newState and oldState. newState will + * describe the new configuration. After that is applied, we will + * refer to oldState to identify any surfaces that are no longer + * in use. + */ + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + for (head = 0; head < ARRAY_LEN(pWorkArea->sd[sd].head); head++) { + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].newState); + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].oldState); + } + } + + + /*! + * Count active and requested heads so we can make a decision about VRR + * and register syncpts if specified. + */ + requestedHeadCount = activeHeadCount = dirtyBaseChannelCount = 0; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + const struct NvKmsFlipRequestOneSubDevice *pRequestOneSubDevice = + &request->sd[sd]; + + for (head = 0; head < ARRAY_LEN(pRequestOneSubDevice->head); head++) { + const NvBool headActive = nvHeadIsActive(pDispEvo, head); + + if (headActive) { + activeHeadCount++; + } + + if (NVBIT(head) & pRequestOneSubDevice->requestedHeadsBitMask) { + requestedHeadCount++; + } + + if (headActive) { + if (!nvHandleSyncptRegistration( + pDevEvo, + head, + &pRequestOneSubDevice->head[head], + &pWorkArea->sd[sd].head[head].newState)) { + goto done; + } + } + } + } + + /* Deactivate VRR if only a subset of the heads are requested */ + if (requestedHeadCount != activeHeadCount) { + allowVrr = FALSE; + } + + /* Validate the flip parameters and update the work area. */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + const struct NvKmsFlipRequestOneSubDevice *pRequestOneSubDevice = + &request->sd[sd]; + + for (head = 0; head < ARRAY_LEN(pRequestOneSubDevice->head); head++) { + const NVDispHeadStateEvoRec *pHeadState; + const NvBool headActive = nvHeadIsActive(pDispEvo, head); + + if (!(NVBIT(head) & pRequestOneSubDevice->requestedHeadsBitMask)) { + continue; + } + + if (!headActive) { + goto done; + } + + pHeadState = &pDispEvo->headState[head]; + + if (!nvUpdateFlipEvoHwState( + pOpenDev, + pDevEvo, + sd, + head, + &pRequestOneSubDevice->head[head], + &pWorkArea->sd[sd].head[head].newState, + allowVrr, + &pHeadState->timings.viewPort.possibleUsage)) { + goto done; + } + + if (pWorkArea->sd[sd].head[head].newState.dirty.layer[NVKMS_MAIN_LAYER]) { + dirtyBaseChannelCount++; + } + + if (!nvValidateFlipEvoHwState( + pDevEvo, + head, + &pHeadState->timings, + &pWorkArea->sd[sd].head[head].newState)) { + goto done; + } + + pWorkArea->sd[sd].changed = TRUE; + changed = TRUE; + } + } + + /* Deactivate VRR if only a subset of the heads are being flipped */ + if (dirtyBaseChannelCount != activeHeadCount) { + allowVrr = FALSE; + } + + if (!ValidatePerDispState(pDevEvo, pWorkArea)) { + goto done; + } + + /* If nothing changed, fail. */ + + if (!changed) { + goto done; + } + + ret = AllocatePreFlipBandwidth(pDevEvo, pWorkArea); + if (!ret) { + goto done; + } + + if (!request->commit) { + ret = NV_TRUE; + goto done; + } + + if (!PrepareToDoPreFlipIMP(pDevEvo, pWorkArea)) { + goto done; + } + + /* + * At this point, something changed on at least one head of one + * subdevice, and has been validated. Apply the request to our + * hardware and software state. We must not fail beyond this + * point. + */ + + ret = TRUE; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (!pWorkArea->sd[sd].changed) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + // Increase refCnt of surfaces used AFTER flip + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].newState, + NV_TRUE); + } + } + + PreFlipIMP(pDevEvo, pWorkArea); + + /* Apply NvKmsFlipRequest::allowVrr only if a base channel has become dirty */ + if (dirtyBaseChannelCount > 0) { + nvSetVrrActive(pDevEvo, allowVrr); + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + const struct NvKmsFlipRequestOneSubDevice *pRequestOneSubDevice = + &request->sd[sd]; + + NVEvoUpdateState updateState = { }; + + if (!pWorkArea->sd[sd].changed) { + continue; + } + + pDispEvo = pDevEvo->gpus[sd].pDispEvo; + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + const NVFlipEvoHwState *pFlipState = + &pWorkArea->sd[sd].head[head].newState; + const struct NvKmsFlipCommonParams *pParams = + &pRequestOneSubDevice->head[head]; + + if (pParams->layer[NVKMS_MAIN_LAYER].skipPendingFlips && + pFlipState->dirty.layer[NVKMS_MAIN_LAYER] && + !skipUpdate) { + pDevEvo->hal->AccelerateChannel( + pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + &pWorkArea->sd[sd].head[head].oldAccelerators); + pWorkArea->sd[sd].head[head].accelerated = TRUE; + } + + nvFlipEvoOneHead(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].newState, + allowFlipLock, + &updateState); + } + + if (!skipUpdate) { + pDevEvo->hal->Update(pDevEvo, &updateState, TRUE /* releaseElv */); + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + // Decrease refCnt of surfaces used BEFORE the flip + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].oldState, + NV_FALSE); + } + + FillPostSyncptReply(pDevEvo, + sd, + &request->sd[sd], + &reply->sd[sd], + pWorkArea); + + } + + { + NvU64 startTime = 0; + const NvU32 timeout = 2000000; /* 2 seconds */ + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!pWorkArea->sd[sd].changed) { + continue; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + if (!pWorkArea->sd[sd].head[head].accelerated) { + continue; + } + + if (!nvEvoPollForNoMethodPending(pDevEvo, + sd, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + &startTime, + timeout)) { + nvAssert(!"Timed out while idling base channel"); + } + + pDevEvo->hal->ResetChannelAccelerators( + pDevEvo, + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER], + sd, + pWorkArea->sd[sd].head[head].oldAccelerators); + } + } + } + + if (dirtyBaseChannelCount > 0) { + nvSetNextVrrFlipTypeAndIndex(pDevEvo, reply); + } else { + // TODO Schedule vrr unstall; per-disp/per-device? + } + + if (!skipUpdate) { + // Note that usage bounds are not lowered here, because the flip + // queued by this function may not occur until later. Instead, schedule + // a timer for later to check if the usage bounds can be lowered. + SchedulePostFlipIMP(pDevEvo); + + pDevEvo->skipConsoleRestore = FALSE; + } + + /* fall through */ + +done: + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_FLIP_WORK_AREA); + + return ret; +} diff --git a/src/nvidia-modeset/src/nvkms-framelock.c b/src/nvidia-modeset/src/nvkms-framelock.c new file mode 100644 index 000000000..cfb00e25f --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-framelock.c @@ -0,0 +1,2210 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-framelock.h" +#include "nvkms-dpy.h" +#include "nvkms-utils.h" +#include "nvkms-evo.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" + +#include "nvkms-private.h" /* nvSendDpyAttributeChangedEventEvo() */ + +#include +#include /* NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS */ +#include +#include "nvos.h" + +static NvBool FrameLockUseHouseSyncGetSupport(NVFrameLockEvoPtr pFrameLockEvo, + NvU32 *val); +static NvBool FrameLockSetPolarity( + NVFrameLockEvoPtr pFrameLockEvo, + enum NvKmsFrameLockAttributePolarityValue val); +static NvBool HouseSyncOutputModeUsable(const NVFrameLockEvoRec *pFrameLockEvo); + +/*! + * Handle framelock sync gain/loss events triggered from resman. + * + * When RM sends an event notification that's handled by FrameLockEvent, + * that function schedules a timer to service that event notification. + * These timers are serviced out of order, though; we may receive a + * SYNC_LOSS event followed by a SYNC_GAIN event, but our scheduled + * callbacks may be called in the reverse order. + * + * Since we can't trust that events were serviced in order, this function + * responds to every sync gain or loss event by querying the actual + * sync status across all GPUs from RM and updating our cached sync status + * and notifying clients if necessary. + */ +static void +FrameLockHandleSyncEvent(void *dataPtr, NvU32 dataU32) +{ + NVDispEvoPtr pDispEvo = dataPtr; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 connectorIndex = pDispEvo->framelock.connectorIndex; + NvBool syncReadyCurrent = FALSE; + NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS statusParams = { 0 }; + + statusParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC, + &statusParams, + sizeof(statusParams)) != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed to query gsync status after event"); + } else { + if (statusParams.bTiming && statusParams.bSyncReady) { + syncReadyCurrent = TRUE; + } + } + + // Update syncReadyGpuMask for consistency with non-NVKMS path, although + // it is currently unused. + if (syncReadyCurrent) { + pFrameLockEvo->syncReadyGpuMask |= (1 << connectorIndex); + } else { + pFrameLockEvo->syncReadyGpuMask &= ~(1 << connectorIndex); + } + + if (syncReadyCurrent != pFrameLockEvo->syncReadyLast) { + pFrameLockEvo->syncReadyLast = syncReadyCurrent; + nvSendFrameLockAttributeChangedEventEvo( + pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY, + pFrameLockEvo->syncReadyLast); + } +} + +/*! + * Receive framelock events from resman. + * + * This function is registered as a kernel callback function from + * resman. + * + * However, it is called with resman's context (alternate stack, + * resman locks held, etc). Schedule deferred work, so that we can + * process the event without resman's encumbrances. + */ +static void FrameLockEvent(void *arg, void *pEventDataVoid, + NvU32 hEvent, + NvU32 Data, NV_STATUS Status) +{ + static nvkms_timer_proc_t *callbackTable[] = { + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(0)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(1)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(2)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(3)] = FrameLockHandleSyncEvent, + + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(0)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(1)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(2)] = FrameLockHandleSyncEvent, + [NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(3)] = FrameLockHandleSyncEvent, + }; + + const NvNotification *pNotifyData = pEventDataVoid; + NvU32 notifyIndex; + + /* callbackTable[] assumes at most four connectors per gsync */ + ct_assert(NV30F1_GSYNC_CONNECTOR_COUNT == 4); + + if (pNotifyData == NULL) { + nvAssert(!"Invalid pNotifyData from resman"); + return; + } + + notifyIndex = pNotifyData->info32; + + if ((notifyIndex >= ARRAY_LEN(callbackTable)) || + (callbackTable[notifyIndex] == NULL)) { + nvAssert(!"Invalid notifyIndex from resman"); + return; + } + + (void) nvkms_alloc_timer_with_ref_ptr( + callbackTable[notifyIndex], /* callback */ + arg, /* argument (this is a ref_ptr to a pDispEvo) */ + 0, /* unused */ + 0); /* timeout (i.e., service as soon as possible) */ +} + +/*! + * Free all events and handles allocated in FrameLockCreateEvents(). + */ +static void FrameLockDestroyEvents(NVDispEvoPtr pDispEvo) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + unsigned int i; + + if (pFrameLockEvo == NULL) { + return; + } + + for (i = 0; i < NV_FRAMELOCK_NUM_EVENTS; i++) { + if (pDispEvo->framelock.gsyncEvent[i].handle) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + pDispEvo->framelock.gsyncEvent[i].handle); + nvFreeUnixRmHandle(&pDispEvo->pDevEvo->handleAllocator, + pDispEvo->framelock.gsyncEvent[i].handle); + pDispEvo->framelock.gsyncEvent[i].handle = 0; + } + } +} + +/*! + * Allocate and configure all events and handles associated with them. + */ +static NvBool FrameLockCreateEvents(NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + const NvU32 connectorIndex = pDispEvo->framelock.connectorIndex; + unsigned int i; + + if (pDispEvo->pFrameLockEvo == NULL) { + return TRUE; + } + + nvAssert(connectorIndex < NV30F1_GSYNC_CONNECTOR_COUNT); + + /* We should only get here on hardware that has per-connector events */ + nvAssert(!(pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_PRIMARY_CONNECTOR_EVENT)); + + for (i = 0; i < NV_FRAMELOCK_NUM_EVENTS; i++) { + NvU32 notifier; + NvBool ret; + + switch (i) { + case NV_FRAMELOCK_SYNC_LOSS: + notifier = NV30F1_GSYNC_NOTIFIERS_SYNC_LOSS(connectorIndex); + break; + case NV_FRAMELOCK_SYNC_GAIN: + notifier = NV30F1_GSYNC_NOTIFIERS_SYNC_GAIN(connectorIndex); + break; + default: + nvAssert(!"Unknown gsync event index"); + continue; + } + + pDispEvo->framelock.gsyncEvent[i].handle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + ret = TRUE; + + if (!nvRmRegisterCallback(pDevEvo, + &pDispEvo->framelock.gsyncEvent[i].callback, + pDispEvo->ref_ptr, + pFrameLockEvo->device, + pDispEvo->framelock.gsyncEvent[i].handle, + FrameLockEvent, + notifier)) { + ret = FALSE; + } + + if (!ret) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Failed to register for framelock event %d", i); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDispEvo->framelock.gsyncEvent[i].handle); + pDispEvo->framelock.gsyncEvent[i].handle = 0; + goto noEvents; + } + } + + return TRUE; + +noEvents: + + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Failed to register for framelock events"); + + FrameLockDestroyEvents(pDispEvo); + + return FALSE; +} + +/*! + * Bind a pSubDev to a pFrameLock. + */ +static void BindGpuToFrameLock(NVDevEvoPtr pDevEvo, + const NvU32 gpuId, + NVFrameLockEvoPtr pFrameLockEvo, + NvU32 connectorIndex) +{ + NVDispEvoPtr pDispEvo; + unsigned int dispIndex; + + if (pFrameLockEvo->nGpuIds >= ARRAY_LEN(pFrameLockEvo->gpuIds)) { + return; + } + + pFrameLockEvo->gpuIds[pFrameLockEvo->nGpuIds] = gpuId; + pFrameLockEvo->nGpuIds++; + + /* + * If a disp exists for this subdevice, wire it up. + * Note that this should not happen for SLI non-display-owners. + */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + if (nvGpuIdOfDispEvo(pDispEvo) != gpuId) { + continue; + } + + pDispEvo->pFrameLockEvo = pFrameLockEvo; + + pDispEvo->framelock.connectorIndex = connectorIndex; + + pFrameLockEvo->connectedGpuMask |= (1 << connectorIndex); + pFrameLockEvo->syncReadyGpuMask &= ~(1 << connectorIndex); + + /* Set up stereo synchronization events */ + FrameLockCreateEvents(pDispEvo); + } +} + +/*! + * Break the binding of pSubDev and pDisp to pFrameLock that we + * created in BindGpuToFrameLock(). + */ +static void UnbindGpuFromFrameLock(NVDevEvoPtr pDevEvo, + const NvU32 gpuId, + NVFrameLockEvoPtr pFrameLockEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int dispIndex; + unsigned int gpu, j; + + for (gpu = 0; gpu < pFrameLockEvo->nGpuIds; gpu++) { + if (pFrameLockEvo->gpuIds[gpu] == gpuId) { + break; + } + } + + if (gpu == pFrameLockEvo->nGpuIds) { + return; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + const NvU32 connectorIndex = pDispEvo->framelock.connectorIndex; + + if (nvGpuIdOfDispEvo(pDispEvo) != gpuId) { + continue; + } + + FrameLockDestroyEvents(pDispEvo); + + pFrameLockEvo->connectedGpuMask &= ~(1 << connectorIndex); + pFrameLockEvo->syncReadyGpuMask &= ~(1 << connectorIndex); + + pDispEvo->framelock.connectorIndex = 0; + + pDispEvo->pFrameLockEvo = NULL; + } + + for (j = gpu; j < (pFrameLockEvo->nGpuIds - 1); j++) { + pFrameLockEvo->gpuIds[j] = pFrameLockEvo->gpuIds[j+1]; + } + + pFrameLockEvo->nGpuIds--; +} + +/*! + * Find the NVFrameLockEvoPtr with the specified gsyncId. + */ +static NVFrameLockEvoPtr FindFrameLock(NvU32 gsyncId) +{ + NVFrameLockEvoPtr pFrameLockEvo; + + FOR_ALL_EVO_FRAMELOCKS(pFrameLockEvo) { + if (pFrameLockEvo->gsyncId == gsyncId) { + return pFrameLockEvo; + } + } + + return NULL; +} + +/*! + * Return whether the NVDevEvoPtr contains a GPU with the specified gpuId. + */ +static NvBool GpuIdInDevEvo(NVDevEvoPtr pDevEvo, NvU32 gpuId) +{ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pDevEvo->pSubDevices[sd]->gpuId == gpuId) { + return TRUE; + } + } + + return FALSE; +} + +/*! + * Free the pFrameLock object. + */ +static void FreeFrameLockEvo(NVDevEvoPtr pDevEvo, + NVFrameLockEvoPtr pFrameLockEvo) +{ + if (pFrameLockEvo == NULL) { + return; + } + + if (pFrameLockEvo->device != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pFrameLockEvo->device); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pFrameLockEvo->device); + pFrameLockEvo->device = 0; + } + + nvAssert(pFrameLockEvo->nGpuIds == 0); + + nvListDel(&pFrameLockEvo->frameLockListEntry); + + nvFree(pFrameLockEvo); +} + +/*! + * Allocate and initialize a new pFrameLock object. + */ +static NVFrameLockEvoPtr AllocFrameLockEvo(NVDevEvoPtr pDevEvo, + int instance, NvU32 gsyncId) +{ + NV30F1_ALLOC_PARAMETERS gsyncAllocParams = { 0 }; + NV30F1_CTRL_GSYNC_GET_CAPS_PARAMS gsyncGetCapsParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo; + + nvAssert(FindFrameLock(gsyncId) == NULL); + + pFrameLockEvo = nvCalloc(1, sizeof(NVFrameLockEvoRec)); + + if (pFrameLockEvo == NULL) { + return NULL; + } + + nvListInit(&pFrameLockEvo->frameLockListEntry); + + pFrameLockEvo->device = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + gsyncAllocParams.gsyncInstance = instance; + + /* allocate a framelock object for the framelock device */ + if (nvRmApiAlloc(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30_GSYNC, + &gsyncAllocParams) != NVOS_STATUS_SUCCESS) { + pFrameLockEvo->device = 0; + goto fail; + } + + /* Store unique frame lock device ID */ + pFrameLockEvo->gsyncId = gsyncId; + pFrameLockEvo->houseSyncUseable = 0; + pFrameLockEvo->nGpuIds = 0; + + /* Initialize the state for the framelock board */ + pFrameLockEvo->polarity = NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE; + pFrameLockEvo->syncDelay = 0; + pFrameLockEvo->syncInterval = 0; + pFrameLockEvo->videoMode = + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO; + pFrameLockEvo->testMode = FALSE; + pFrameLockEvo->houseSyncMode = + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_DISABLED; + + /* Query the framelock revision information */ + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CAPS, + &gsyncGetCapsParams, + sizeof(gsyncGetCapsParams)) + != NVOS_STATUS_SUCCESS) { + goto fail; + } + + /* Check if the Quadro Sync card has a firmware + * version compatible with the GPUs connected to it. + */ + pDevEvo->badFramelockFirmware = gsyncGetCapsParams.isFirmwareRevMismatch; + if (gsyncGetCapsParams.isFirmwareRevMismatch) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "The firmware on this Quadro Sync " + "card is not compatible with the GPUs connected to it." + " Please visit " + " " + "for instructions on installing the correct firmware."); + goto fail; + } + + /* gsyncGetCapsParams.revId has the framelock board id in the high 4 bits + * and the FPGA revision in the low 4 bits. This is preserved here for + * legacy clients, but we expose the full board ID (e.g. 0x358, 0x2060, + * 0x2061) and firmware version individually, so clients can more easily + * distinguish P2061 ("Quadro Sync II") from P2060 and P358 + * ("Quadro Sync"). + */ + + pFrameLockEvo->fpgaIdAndRevision = gsyncGetCapsParams.revId; + pFrameLockEvo->boardId = gsyncGetCapsParams.boardId; + pFrameLockEvo->firmwareMajorVersion = gsyncGetCapsParams.revision; + pFrameLockEvo->firmwareMinorVersion = gsyncGetCapsParams.extendedRevision; + pFrameLockEvo->caps = gsyncGetCapsParams.capFlags; + pFrameLockEvo->maxSyncSkew = gsyncGetCapsParams.maxSyncSkew; + pFrameLockEvo->syncSkewResolution = gsyncGetCapsParams.syncSkewResolution; + pFrameLockEvo->maxSyncInterval = gsyncGetCapsParams.maxSyncInterval; + pFrameLockEvo->videoModeReadOnly = !!(gsyncGetCapsParams.capFlags & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_ONLY_GET_VIDEO_MODE); + + /* Determine if house sync is selectable on this frame lock device */ + if (!FrameLockUseHouseSyncGetSupport(pFrameLockEvo, + &pFrameLockEvo->houseSyncUseable)) { + pFrameLockEvo->houseSyncUseable = FALSE; + } + + pFrameLockEvo->houseSyncModeValidValues = + (1 << NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_DISABLED); + + if (pFrameLockEvo->houseSyncUseable) { + pFrameLockEvo->houseSyncModeValidValues |= + (1 << NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_INPUT); + } + + if (HouseSyncOutputModeUsable(pFrameLockEvo)) { + pFrameLockEvo->houseSyncModeValidValues |= + (1 << NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_OUTPUT); + } + + /* Add frame lock device to global list. */ + nvListAppend(&pFrameLockEvo->frameLockListEntry, &nvEvoGlobal.frameLockList); + + return pFrameLockEvo; + +fail: + + FreeFrameLockEvo(pDevEvo, pFrameLockEvo); + return NULL; +} + + +static void BindFrameLockToDevEvo(NVFrameLockEvoPtr pFrameLockEvo, + NVDevEvoPtr pDevEvo) +{ + NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_PARAMS gsyncTopologyParams = { }; + int i; + + /* find out which gpus are attached to which connectors */ + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GET_GSYNC_GPU_TOPOLOGY, + &gsyncTopologyParams, + sizeof(gsyncTopologyParams)) + != NVOS_STATUS_SUCCESS) { + return; + } + + /* Bind corresponding GPUs to the Frame Lock device */ + for (i = 0; i < ARRAY_LEN(gsyncTopologyParams.gpus); i++) { + + NvU32 connectorIndex; + const NvU32 gpuId = gsyncTopologyParams.gpus[i].gpuId; + + if (gpuId == NV30F1_CTRL_GPU_INVALID_ID) { + continue; + } + + if (!GpuIdInDevEvo(pDevEvo, gpuId)) { + continue; + } + + /* + * Connector type of _NONE means we sync through a proxy GPU, + * which we do not support. + */ + if (gsyncTopologyParams.gpus[i].connector == + NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_NONE) { + continue; + } + /* + * gsyncTopologyParams.gpus[i].connector is an enumerated + * type; convert it to a 0-based index + */ + nvAssert(gsyncTopologyParams.gpus[i].connector < + (NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE + + NV30F1_GSYNC_CONNECTOR_COUNT)); + connectorIndex = gsyncTopologyParams.gpus[i].connector - + NV30F1_CTRL_GET_GSYNC_GPU_TOPOLOGY_ONE; + + BindGpuToFrameLock(pDevEvo, gpuId, pFrameLockEvo, connectorIndex); + } +} + +static void UnBindFrameLockFromDevEvo(NVFrameLockEvoPtr pFrameLockEvo, + NVDevEvoPtr pDevEvo) +{ + int i; + + /* + * Loop through GPUs from highest to lowest, because + * UnbindGpuFromFrameLock() may remove gpuIds[i]. + */ + for (i = pFrameLockEvo->nGpuIds - 1; i >= 0; i--) { + const NvU32 gpuId = pFrameLockEvo->gpuIds[i]; + + if (!GpuIdInDevEvo(pDevEvo, gpuId)) { + continue; + } + + UnbindGpuFromFrameLock(pDevEvo, gpuId, pFrameLockEvo); + } +} + + +/*! + * Find all of the available framelock devices. + * + * Framelock devices can only be recognized by resman after an RM + * client has attached a GPU that the framelock device is connected + * to. So, subsequent calls to this function may find additional + * framelock devices. + * + * Allocate framelock objects for all the newly found framelock devices. + */ +void nvAllocFrameLocksEvo(NVDevEvoPtr pDevEvo) +{ + NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS attachedGsyncParams = { }; + int i; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GSYNC_GET_ATTACHED_IDS, + &attachedGsyncParams, sizeof(attachedGsyncParams)) + != NVOS_STATUS_SUCCESS) { + return; + } + + for (i = 0; i < ARRAY_LEN(attachedGsyncParams.gsyncIds); i++) { + + NVFrameLockEvoPtr pFrameLockEvo; + + if (attachedGsyncParams.gsyncIds[i] == NV0000_CTRL_GSYNC_INVALID_ID) { + continue; + } + + pFrameLockEvo = FindFrameLock(attachedGsyncParams.gsyncIds[i]); + + if (pFrameLockEvo == NULL) { + pFrameLockEvo = AllocFrameLockEvo(pDevEvo, i, + attachedGsyncParams.gsyncIds[i]); + } + + if (pFrameLockEvo == NULL) { + continue; + } + + BindFrameLockToDevEvo(pFrameLockEvo, pDevEvo); + } +} + +/*! + * Free any framelock devices connected to any GPU on this pDevEvo. + */ + +void nvFreeFrameLocksEvo(NVDevEvoPtr pDevEvo) +{ + NVFrameLockEvoPtr pFrameLockEvo, pFrameLockEvoTmp; + + /* Destroy the pFrameLockEvos */ + nvListForEachEntry_safe(pFrameLockEvo, pFrameLockEvoTmp, + &nvEvoGlobal.frameLockList, frameLockListEntry) { + + UnBindFrameLockFromDevEvo(pFrameLockEvo, pDevEvo); + + if (pFrameLockEvo->nGpuIds == 0) { + FreeFrameLockEvo(pDevEvo, pFrameLockEvo); + } + } +} + +/*! + * Determine if this framelock device supports user selection of house + * sync. assign val appropriately. Returns TRUE if the attribute was + * successfully queried. + */ +static NvBool FrameLockUseHouseSyncGetSupport(NVFrameLockEvoPtr pFrameLockEvo, + NvU32 *val) +{ + NV30F1_CTRL_GSYNC_GET_CONTROL_PARAMS_PARAMS + gsyncGetControlParamsParams = { 0 }; + NvU32 ret; + + if (!val) return FALSE; + + gsyncGetControlParamsParams.which = + NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_USE_HOUSE; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_PARAMS, + &gsyncGetControlParamsParams, + sizeof(gsyncGetControlParamsParams)); + + /* If we can query Use House Sync, then it is available */ + *val = (ret == NVOS_STATUS_SUCCESS) ? TRUE : FALSE; + + return *val; +} + + +/*! + * Return whether or not this framelock device supports house sync mode. + * + * House sync mode is currently only available on P2061 (Quadro Sync II). + */ +static NvBool HouseSyncOutputModeUsable(const NVFrameLockEvoRec *pFrameLockEvo) +{ + return (pFrameLockEvo->houseSyncUseable && + (pFrameLockEvo->boardId == + NV30F1_CTRL_GSYNC_GET_CAPS_BOARD_ID_P2061)); +} + + +/*! + * Enable or disable house sync output mode in the framelock board. + */ +static NvBool FrameLockSetHouseSyncOutputMode(NVFrameLockEvoPtr pFrameLockEvo, + NvBool enable) +{ + NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_PARAMS + gsyncSetHouseSyncModeParams = { 0 }; + NvU32 ret; + NvU8 houseSyncMode = enable ? NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_OUTPUT : + NV30F1_CTRL_GSYNC_HOUSE_SYNC_MODE_INPUT; + + nvAssert(HouseSyncOutputModeUsable(pFrameLockEvo)); + + gsyncSetHouseSyncModeParams.houseSyncMode = houseSyncMode; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_HOUSE_SYNC_MODE, + &gsyncSetHouseSyncModeParams, + sizeof(gsyncSetHouseSyncModeParams)); + + return (ret == NVOS_STATUS_SUCCESS); +} + + +/*! + * Set the framelock to use the house sync if val is TRUE, otherwise + * set the framelock to use external sync. Returns FALSE if the + * assignment failed. + */ +NvBool nvFrameLockSetUseHouseSyncEvo(NVFrameLockEvoPtr pFrameLockEvo, NvU32 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + NvBool houseSyncOutputMode = FALSE; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_USE_HOUSE; + + gsyncSetControlParamsParams.useHouseSync = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + if (HouseSyncOutputModeUsable(pFrameLockEvo)) { + + NvS64 houseSyncInputPresent; + NvBool allowHouseSyncOutput = FALSE; + + if (nvFrameLockGetStatusEvo(pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS, + &houseSyncInputPresent)) { + if (houseSyncInputPresent == 0) { + allowHouseSyncOutput = TRUE; + } + } + + if (!val && allowHouseSyncOutput && + (pFrameLockEvo->houseSyncMode == + NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE_OUTPUT)) { + + houseSyncOutputMode = TRUE; + } + + if (!FrameLockSetHouseSyncOutputMode(pFrameLockEvo, houseSyncOutputMode)) { + return FALSE; + } + } + + /* + * House sync polarity is required to be rising edge if house sync is not + * in use. + * + * In addition, house sync polarity has no effect when house sync output + * mode is in use. + */ + if (val && !houseSyncOutputMode) { + return FrameLockSetPolarity(pFrameLockEvo, pFrameLockEvo->polarity); + } else { + return FrameLockSetPolarity(pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE); + } +} + +/*! + * Set the polarity according to val; val is interpreted as an + * NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY value. Returns FALSE if the + * assignment failed. + */ +static NvBool FrameLockSetPolarity( + NVFrameLockEvoPtr pFrameLockEvo, + enum NvKmsFrameLockAttributePolarityValue val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + NvU32 polarity; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY; + + switch (val) { + case NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE: + polarity = NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_RISING_EDGE; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE: + polarity = NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_FALLING_EDGE; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_BOTH_EDGES: + polarity = NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_POLARITY_BOTH_EDGES; + break; + + default: + return FALSE; + } + + gsyncSetControlParamsParams.syncPolarity = polarity; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + return TRUE; +} + +/*! + * Set the sync delay to the value given in val. Returns FALSE if the + * assignment failed. Assigns pFrameLockEvo->syncDelay upon success. + */ +static NvBool FrameLockSetSyncDelay(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + if (val > pFrameLockEvo->maxSyncSkew) return FALSE; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_SYNC_SKEW; + + gsyncSetControlParamsParams.syncSkew = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->syncDelay = val; + + return TRUE; +} + +/*! + * Set the sync interval to the value given in val. Returns FALSE if + * the assignment failed. Assigns pFrameLockEvo->syncInterval upon + * success. + */ +static NvBool FrameLockSetSyncInterval(NVFrameLockEvoPtr pFrameLockEvo, + NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_NSYNC; + + gsyncSetControlParamsParams.nSync = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->syncInterval = val; + + return TRUE; +} + +/*! + * Query the status of the values that are acquired through the + * GET_STATUS_SYNC command, and assign the value to val. Returns + * FALSE if the query failed or if attr is not one of the currently + * handled attributes. + */ +static NvBool FrameLockGetStatusSync(const NVDispEvoRec *pDispEvo, NvS64 *val, + enum NvKmsDispAttribute nvKmsAttribute) +{ + NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_PARAMS gsyncGetStatusSyncParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + + gsyncGetStatusSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_STATUS_SYNC, + &gsyncGetStatusSyncParams, + sizeof(gsyncGetStatusSyncParams)) + != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + switch (nvKmsAttribute) + { + + case NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC: + *val = (gsyncGetStatusSyncParams.bTiming && + gsyncGetStatusSyncParams.bStereoSync && + gsyncGetStatusSyncParams.bSyncReady); + break; + + case NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING: + *val = gsyncGetStatusSyncParams.bTiming ? TRUE : FALSE; + break; + + default: + return FALSE; + } + + return TRUE; +} + +/*! + * Return the sync rate. + */ +static NvS64 FrameLockInterpretSyncRate(const NVFrameLockEvoRec *pFrameLockEvo, + NvS64 val) +{ + /* Only show decimal places if they are accurate. The queried + value provides 4 decimal places */ + if (pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_2DPS) { + // only two are valid + val -= (val % 100); + } else if (pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_3DPS) { + // only three are valid + val -= (val % 10); + } else if (pFrameLockEvo->caps & + NV30F1_CTRL_GSYNC_GET_CAPS_CAP_FLAGS_FREQ_ACCURACY_4DPS) { + // all four are valid, nothing to do + } + return val; +} + +/*! + * Query the status of one of the values that are acquired through the + * GET_STATUS command, and assign the value to val. Returns FALSE if + * the query failed or if attr is not one of the currently handled + * attributes. + */ +NvBool nvFrameLockGetStatusEvo(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + NV30F1_CTRL_GSYNC_GET_STATUS_PARAMS gsyncGetStatusParams = { 0 }; + + switch (attribute) { + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_INPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_INPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE: + gsyncGetStatusParams.which = + NV30F1_CTRL_GSYNC_GET_STATUS_HOUSE_SYNC_INCOMING; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_SYNC_READY; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED: + gsyncGetStatusParams.which = + NV30F1_CTRL_GSYNC_GET_STATUS_PORT0_ETHERNET | + NV30F1_CTRL_GSYNC_GET_STATUS_PORT1_ETHERNET; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE: + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4: + gsyncGetStatusParams.which = NV30F1_CTRL_GSYNC_GET_STATUS_REFRESH; + break; + + default: + return FALSE; + } + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_STATUS, + &gsyncGetStatusParams, + sizeof(gsyncGetStatusParams)) + != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + switch (attribute) { + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS: + *val = gsyncGetStatusParams.bPort0Input ? + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_INPUT : + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_OUTPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS: + *val = gsyncGetStatusParams.bPort1Input ? + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_INPUT : + NV_KMS_FRAMELOCK_ATTRIBUTE_PORT_STATUS_OUTPUT; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS: + *val = gsyncGetStatusParams.bHouseSync; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY: + *val = gsyncGetStatusParams.bSyncReady; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED: + *val = 0x0; + if (gsyncGetStatusParams.bPort0Ethernet) + *val |= NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT0; + if (gsyncGetStatusParams.bPort1Ethernet) + *val |= NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED_PORT1; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE: + *val = + FrameLockInterpretSyncRate(pFrameLockEvo, + gsyncGetStatusParams.houseSyncIncoming); + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE: + case NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4: + *val = FrameLockInterpretSyncRate(pFrameLockEvo, + gsyncGetStatusParams.refresh); + if (attribute == NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE) { + /* _STATUS_REFRESH is in Hz/10000, _SYNC_RATE is Hz/1000 */ + *val /= 10; + } + break; + + default: + return FALSE; + } + + return TRUE; +} + +/*! + * [en|dis]able syncing of the GPU to the FrameLock board for the + * display mask associated with that gpu. val controls whether we are + * enabling or disabling. + */ +static NvBool FrameLockSetEnable(NVDispEvoPtr pDispEvo, NvS64 val) +{ + if (val) { + + /* XXX NVKMS TODO: address the following: + + In Xinerama a single app has a channel on each gpu. Before + framelock is enabled the first time per X server, vblanks + are not synchronized, so if a swap groupped app is started + before framelock is enabled the channels get unstalled at + different times, and it's likely that one display will be + armed while the other is not. When framelock is enabled in + this state, we'll deadlock because suddenly the armed display + is waiting on the unarmed display to unstall, and the unarmed + display cannot arm. Prevent this by idling all channels */ + + return nvEnableFrameLockEvo(pDispEvo); + } else { + return nvDisableFrameLockEvo(pDispEvo); + } +} + +static NvBool FrameLockSetWatchdog(NVFrameLockEvoPtr pFrameLockEvo, NvU32 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_WATCHDOG_PARAMS + gsyncSetControlWatchdogParams = { 0 }; + NvU32 ret; + + gsyncSetControlWatchdogParams.enable = val; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_WATCHDOG, + &gsyncSetControlWatchdogParams, + sizeof(gsyncSetControlWatchdogParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + return TRUE; +} + + +/*! + * For the given display, determine if it can be set as a frame lock + * server + */ +static NvBool FrameLockDpyCanBeServer(const NVDpyEvoRec *pDpyEvo) +{ + NV30F1_CTRL_GSYNC_GET_CONTROL_SYNC_PARAMS gsyncGetControlSyncParams = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + const NvU32 head = pDpyEvo->head; + const NVDispHeadStateEvoRec *pHeadState; + NvU32 ret; + + nvAssert(head != NV_INVALID_HEAD); + nvAssert(pDispEvo); + nvAssert(pDispEvo->pFrameLockEvo); + + pHeadState = &pDispEvo->headState[head]; + nvAssert(pHeadState->activeRmId); + + /* If already a server, assume it can be a server. */ + if (nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + return TRUE; + } + + gsyncGetControlSyncParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncGetControlSyncParams.displays = pHeadState->activeRmId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_GET_CONTROL_SYNC, + &gsyncGetControlSyncParams, + sizeof(gsyncGetControlSyncParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + if (gsyncGetControlSyncParams.master && + nvFrameLockServerPossibleEvo(pDpyEvo)) { + return TRUE; + } + + return FALSE; +} + + +/*! + * For the given display, determine if it can be set as a frame lock + * client. + */ +static NvBool FrameLockDpyCanBeClient(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo; + + nvAssert(pDpyEvo->pDispEvo); + nvAssert(pDpyEvo->pDispEvo->pFrameLockEvo); + nvAssert(nvDpyEvoIsActive(pDpyEvo)); + + pDispEvo = pDpyEvo->pDispEvo; + + /* If already a client, assume it can be a client. */ + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients)) { + return TRUE; + } + + /* Otherwise, see if we can make it a client. */ + return nvFrameLockClientPossibleEvo(pDpyEvo); +} + + +/*! + * [en|dis]able test mode (based on the value of val). Returns FALSE + * if changing the test mode failed. Assigns pFrameLockEvo->testMode + * upon success. + */ +static NvBool FrameLockSetTestMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_TESTING_PARAMS + gsyncSetControlTestingParams = { 0 }; + NvU32 ret; + + gsyncSetControlTestingParams.bEmitTestSignal = (val == TRUE); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_TESTING, + &gsyncSetControlTestingParams, + sizeof(gsyncSetControlTestingParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->testMode = val; + + return TRUE; +} + + +/*! + * Set the video mode according to val; returns FALSE if the + * assignment failed. Assigns pFrameLockEvo->videoMode upon success. + */ +static NvBool FrameLockSetVideoMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_PARAMS_PARAMS + gsyncSetControlParamsParams = { 0 }; + NvU32 ret; + + gsyncSetControlParamsParams.which = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE; + + switch (val) { + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NONE; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_TTL: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_TTL; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_BI_LEVEL: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_NTSCPALSECAM; + break; + + case NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_TRI_LEVEL: + gsyncSetControlParamsParams.syncVideoMode = + NV30F1_CTRL_GSYNC_SET_CONTROL_VIDEO_MODE_HDTV; + break; + + default: + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_PARAMS, + &gsyncSetControlParamsParams, + sizeof(gsyncSetControlParamsParams)); + + if (ret != NVOS_STATUS_SUCCESS) return FALSE; + + pFrameLockEvo->videoMode = val; + + return TRUE; +} + + +/*! + * Enable or disable the swap ready connection through the gsync + * connector. This should be called when we bind the swap barrier. + */ +static NvBool SetSwapBarrier(NVDispEvoPtr pDispEvo, NvS64 val) +{ + NV30F1_CTRL_GSYNC_SET_CONTROL_SWAP_BARRIER_PARAMS + gsyncSetSwapBarrierParams = { 0 }; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 ret; + NvBool enable = !!val; + + if (!pFrameLockEvo) return FALSE; + + nvSetSwapBarrierNotifyEvo(pDispEvo, enable, TRUE /* isPre */); + + gsyncSetSwapBarrierParams.gpuId = nvGpuIdOfDispEvo(pDispEvo); + gsyncSetSwapBarrierParams.enable = enable; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pFrameLockEvo->device, + NV30F1_CTRL_CMD_GSYNC_SET_CONTROL_SWAP_BARRIER, + &gsyncSetSwapBarrierParams, + sizeof(gsyncSetSwapBarrierParams)); + + nvSetSwapBarrierNotifyEvo(pDispEvo, enable, FALSE /* isPre */); + + return (ret == NVOS_STATUS_SUCCESS); +} + + +/*! + * Flush all of our known framelock SW state out to the HW, to make + * sure both are in sync. This should be called any time we get the + * HW back from outside control (e.g., starting X or coming back from + * a VT switch). + */ +static NvBool ResetHardwareOneDisp(NVDispEvoPtr pDispEvo, NvS64 value) +{ + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvU32 activeHeadsMask; + NvBool ret = TRUE; + + if (!pDispEvo->pFrameLockEvo || !value) { + /* Nothing to do */ + return TRUE; + } + + /* We should never get here when framelock is enabled */ + if (pDispEvo->framelock.syncEnabled) { + nvAssert(!"Attempted to reset framelock HW while framelock is enabled"); + return FALSE; + } + + /* (Re-)set the HW state to match the SW state */ + if (!nvFrameLockSetUseHouseSyncEvo(pFrameLockEvo, + pFrameLockEvo->houseSyncArmed)) { + ret = FALSE; + } + if (!FrameLockSetSyncDelay(pFrameLockEvo, pFrameLockEvo->syncDelay)) { + ret = FALSE; + } + if (!FrameLockSetSyncInterval(pFrameLockEvo, pFrameLockEvo->syncInterval)) { + ret = FALSE; + } + if (!FrameLockSetVideoMode(pFrameLockEvo, pFrameLockEvo->videoMode)) { + ret = FALSE; + } + if (!FrameLockSetTestMode(pFrameLockEvo, pFrameLockEvo->testMode)) { + ret = FALSE; + } + + /* Since (we think) sync is disabled, these should always be disabled */ + if (!FrameLockSetWatchdog(pFrameLockEvo, FALSE)) { + ret = FALSE; + } + if (!SetSwapBarrier(pDispEvo, FALSE)) { + ret = FALSE; + } + + /* Disable both server and client lock for all heads */ + activeHeadsMask = nvGetActiveHeadMask(pDispEvo); + + if (!nvFramelockSetControlUnsyncEvo(pDispEvo, activeHeadsMask, TRUE)) { + ret = FALSE; + } + if (!nvFramelockSetControlUnsyncEvo(pDispEvo, activeHeadsMask, FALSE)) { + ret = FALSE; + } + + return ret; +} + + +/*! + * Returns the allowable configurations for the given display device. + * The device must be enabled to advertise server/client + * configuration. + */ +static unsigned int FrameLockGetValidDpyConfig(const NVDpyEvoRec *pDpyEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int valid = + (1 << (NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED)); + + if (!pDpyEvo || !nvDpyEvoIsActive(pDpyEvo)) { + goto done; + } + + pDispEvo = pDpyEvo->pDispEvo; + + if (!pDispEvo || !pDispEvo->pFrameLockEvo) { + goto done; + } + + /* Check if display can be a server */ + + if (FrameLockDpyCanBeServer(pDpyEvo)) { + valid |= (1 << (NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_SERVER)); + } + + /* Check if display can be a client */ + + if (FrameLockDpyCanBeClient(pDpyEvo)) { + valid |= (1 << (NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT)); + } + + done: + + return valid; +} + +static NvBool GetFrameLock(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + *val = (pDispEvo->pFrameLockEvo) ? 1 : 0; + return TRUE; +} + +static NvBool SetFrameLockPolarity(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + if ((val != NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_RISING_EDGE) && + (val != NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_FALLING_EDGE) && + (val != NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY_BOTH_EDGES)) { + return FALSE; + } + + pFrameLockEvo->polarity = val; + + return TRUE; +} + +static NvBool GetFrameLockPolarity(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->polarity; + + return TRUE; +} + +static NvBool GetFrameLockSyncDelay(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->syncDelay; + + return TRUE; +} + +static NvBool GetFrameLockSyncDelayValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = 0; + pValidValues->u.range.max = pFrameLockEvo->maxSyncSkew; + + return TRUE; +} + +static NvBool SetHouseSyncMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + if ((val < 0) || (val > 31)) { + return FALSE; + } + + if ((pFrameLockEvo->houseSyncModeValidValues & NVBIT(val)) == 0) { + return FALSE; + } + + pFrameLockEvo->houseSyncMode = val; + + return TRUE; +} + +static NvBool GetHouseSyncMode(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + if (!pFrameLockEvo->houseSyncUseable) return FALSE; + + *val = pFrameLockEvo->houseSyncMode; + + return TRUE; +} + +static NvBool GetHouseSyncModeValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (!pFrameLockEvo->houseSyncUseable) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = pFrameLockEvo->houseSyncModeValidValues; + + return TRUE; +} + +static NvBool GetFrameLockSyncInterval(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->syncInterval; + + return TRUE; +} + +static NvBool GetFrameLockSyncIntervalValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = 0; + pValidValues->u.range.max = pFrameLockEvo->maxSyncInterval; + + return TRUE; +} + +static NvBool SetFrameLockSync(NVDispEvoRec *pDispEvo, NvS64 val) +{ + NvBool a, b; + + if (!pDispEvo->pFrameLockEvo) return FALSE; + + /* If we are already enabled or already disabled, we're done. */ + if (val == pDispEvo->framelock.syncEnabled) return TRUE; + + /* Something must be set to enable/disable */ + if (nvDpyIdIsInvalid(pDispEvo->framelock.server) && + nvDpyIdListIsEmpty(pDispEvo->framelock.clients)) return FALSE; + + /* If we're disabling and test mode is currently enabled, disable it */ + if (!val && + !nvDpyIdIsInvalid(pDispEvo->framelock.server) && + pDispEvo->pFrameLockEvo->testMode) { + + FrameLockSetTestMode(pDispEvo->pFrameLockEvo, FALSE); + } + + /* + * It is important to set syncEnabled before calling FrameLockSetEnable. + * FrameLockSetEnable may call into GLS which may call back into the + * driver to query if framelock is enabled, which checks this field. + */ + pDispEvo->framelock.syncEnabled = val; + + a = FrameLockSetEnable(pDispEvo, val); + b = FrameLockSetWatchdog(pDispEvo->pFrameLockEvo, val); + + /* + * Since RM doesn't send a SYNC_READY event on sync disable through nvctrl, + * send it here. + */ + if (!val && a && b) { + nvSendFrameLockAttributeChangedEventEvo( + pDispEvo->pFrameLockEvo, + NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY, + FALSE); + pDispEvo->pFrameLockEvo->syncReadyLast = val; + } + + return (a && b); +} + +static NvBool GetFrameLockSync(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + /* return the cached state */ + + *val = ((pDispEvo->framelock.currentServerHead != NV_INVALID_HEAD) || + (pDispEvo->framelock.currentClientHeadsMask != 0x0)); + + return TRUE; +} + +static NvBool GetFrameLockSyncReady(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + /* return the cached state */ + + *val = pFrameLockEvo->syncReadyLast; + + return TRUE; +} + +static NvBool GetFrameLockStereoSync(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + return FrameLockGetStatusSync(pDispEvo, val, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC); +} + +static NvBool GetFrameLockTiming(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + return FrameLockGetStatusSync(pDispEvo, val, + NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING); +} + +static NvBool SetFrameLockTestSignal(NVDispEvoRec *pDispEvo, NvS64 val) +{ + if (!pDispEvo->pFrameLockEvo) return FALSE; + + /* The test signal can only be emitted if the GPU is the server + * and framelock is enabled. + */ + + if (!nvDpyIdIsInvalid(pDispEvo->framelock.server) && + pDispEvo->framelock.syncEnabled) { + return FrameLockSetTestMode(pDispEvo->pFrameLockEvo, val); + } + + return FALSE; +} + +static NvBool GetFrameLockTestSignal(NVDispEvoPtr pDispEvo, NvS64 *val) +{ + if (!pDispEvo->pFrameLockEvo || + nvDpyIdIsInvalid(pDispEvo->framelock.server)) { + return FALSE; + } + + *val = pDispEvo->pFrameLockEvo->testMode; + + return TRUE; +} + +static NvBool SetFrameLockVideoMode(NVFrameLockEvoPtr pFrameLockEvo, NvS64 val) +{ + if (pFrameLockEvo->videoModeReadOnly) { + return FALSE; + } + + return FrameLockSetVideoMode(pFrameLockEvo, val); +} + +static NvBool GetFrameLockVideoMode(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->videoMode; + + return TRUE; +} + +static NvBool GetFrameLockVideoModeValidValues( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_RANGE); + + pValidValues->u.range.min = + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_AUTO; + pValidValues->u.range.max = + NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE_COMPOSITE_TRI_LEVEL; + + if (pFrameLockEvo->videoModeReadOnly) { + pValidValues->writable = FALSE; + } + + return TRUE; +} + +static NvBool GetFrameLockFpgaRevision(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->fpgaIdAndRevision; + + return TRUE; +} + +static NvBool GetFrameLockFirmwareMajorVersion( + const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->firmwareMajorVersion; + + return TRUE; +} + +static NvBool GetFrameLockFirmwareMinorVersion( + const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->firmwareMinorVersion; + + return TRUE; +} + +static NvBool GetFrameLockBoardId(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->boardId; + + return TRUE; +} + +static NvBool GetFrameLockFpgaRevisionUnsupported( + NVDispEvoPtr pDispEvo, + NvS64 *val) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + *val = pDevEvo->badFramelockFirmware; + + return TRUE; +} + +static NvBool GetFrameLockSyncDelayResolution( + const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, + NvS64 *val) +{ + *val = pFrameLockEvo->syncSkewResolution; + + return TRUE; +} + +NvBool nvSetFrameLockDisplayConfigEvo(NVDpyEvoRec *pDpyEvo, NvS64 val) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + unsigned int valid; + NvBool removeFromClients = FALSE; + NvBool removeFromServer = FALSE; + + if (!pDispEvo || !pDispEvo->pFrameLockEvo) { + return FALSE; + } + + /* Only set the config when framelock is disabled */ + + if (pDispEvo->framelock.syncEnabled) { + return FALSE; + } + + valid = FrameLockGetValidDpyConfig(pDpyEvo); + + /* Display device cannot be set as such */ + if (!((1<framelock.server) && + !nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + NVDpyEvoPtr pOtherDpyEvo; + + pOtherDpyEvo = + nvGetDpyEvoFromDispEvo(pDispEvo, pDispEvo->framelock.server); + if (pOtherDpyEvo) { + nvSendDpyAttributeChangedEventEvo( + pOtherDpyEvo, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG, + NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED); + } + } + pDispEvo->framelock.server = pDpyEvo->id; + removeFromClients = TRUE; + break; + + case NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT: + pDispEvo->framelock.clients = + nvAddDpyIdToDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients); + removeFromServer = TRUE; + break; + + case NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED: + removeFromClients = TRUE; + removeFromServer = TRUE; + break; + + default: + return FALSE; + } + + if (removeFromClients) { + if (nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients)) { + pDispEvo->framelock.clients = + nvDpyIdListMinusDpyId(pDispEvo->framelock.clients, pDpyEvo->id); + } + } + + if (removeFromServer) { + if (nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + pDispEvo->framelock.server = nvInvalidDpyId(); + } + } + + return TRUE; +} + +NvBool nvGetFrameLockDisplayConfigEvo(const NVDpyEvoRec *pDpyEvo, NvS64 *val) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + + if (!pDispEvo || !pDispEvo->pFrameLockEvo) { + return FALSE; + } + + if (nvDpyIdsAreEqual(pDispEvo->framelock.server, pDpyEvo->id)) { + *val = NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_SERVER; + } else if (nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->framelock.clients)) { + *val = NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_CLIENT; + } else { + *val = NV_KMS_DPY_ATTRIBUTE_FRAMELOCK_DISPLAY_CONFIG_DISABLED; + } + + return TRUE; +} + +NvBool nvGetFrameLockDisplayConfigValidValuesEvo( + const NVDpyEvoRec *pDpyEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues) +{ + if (pDpyEvo->pDispEvo->pFrameLockEvo == NULL) { + return FALSE; + } + + nvAssert(pValidValues->type == NV_KMS_ATTRIBUTE_TYPE_INTBITS); + + pValidValues->u.bits.ints = FrameLockGetValidDpyConfig(pDpyEvo); + + return TRUE; +} + +static const struct { + NvBool (*set)(NVDispEvoPtr pDispEvo, NvS64 value); + NvBool (*get)(NVDispEvoPtr pDispEvo, NvS64 *pValue); + enum NvKmsAttributeType type; +} DispAttributesDispatchTable[] = { + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK] = { + .set = NULL, + .get = GetFrameLock, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SYNC] = { + .set = SetFrameLockSync, + .get = GetFrameLockSync, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED] = { + .set = NULL, + .get = GetFrameLockFpgaRevisionUnsupported, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_STEREO_SYNC] = { + .set = NULL, + .get = GetFrameLockStereoSync, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TIMING] = { + .set = NULL, + .get = GetFrameLockTiming, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_TEST_SIGNAL] = { + .set = SetFrameLockTestSignal, + .get = GetFrameLockTestSignal, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_RESET] = { + .set = ResetHardwareOneDisp, + .get = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_FRAMELOCK_SET_SWAP_BARRIER] = { + .set = SetSwapBarrier, + .get = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_ALLOW_FLIPLOCK] = { + .set = nvAllowFlipLockEvo, + .get = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_DISP_ATTRIBUTE_QUERY_DP_AUX_LOG] = { + .set = NULL, + .get = nvRmQueryDpAuxLog, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, +}; + + +/*! + * Set pParams->attribute to pParams->value on the given disp. + */ +NvBool nvSetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsSetDispAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DispAttributesDispatchTable)) { + return FALSE; + } + + if (DispAttributesDispatchTable[index].set == NULL) { + return FALSE; + } + + return DispAttributesDispatchTable[index].set(pDispEvo, + pParams->request.value); +} + + +/*! + * Get the value of pParams->attribute on the given disp. + */ +NvBool nvGetDispAttributeEvo(NVDispEvoPtr pDispEvo, + struct NvKmsGetDispAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DispAttributesDispatchTable)) { + return FALSE; + } + + if (DispAttributesDispatchTable[index].get == NULL) { + return FALSE; + } + + return DispAttributesDispatchTable[index].get(pDispEvo, + &pParams->reply.value); +} + + +/*! + * Get the valid values of pParams->attribute on the given disp. + */ +NvBool nvGetDispAttributeValidValuesEvo( + const NVDispEvoRec *pDispEvo, + struct NvKmsGetDispAttributeValidValuesParams *pParams) +{ + struct NvKmsAttributeValidValuesCommonReply *pReply = + &pParams->reply.common; + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(DispAttributesDispatchTable)) { + return FALSE; + } + + /* + * FRAMELOCK and GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED + * can be queried without a pFrameLockEvo; all other + * attributes require a pFrameLockEvo. + */ + if (((pParams->request.attribute != NV_KMS_DISP_ATTRIBUTE_FRAMELOCK) && + (pParams->request.attribute != + NV_KMS_DISP_ATTRIBUTE_GPU_FRAMELOCK_FPGA_REVISION_UNSUPPORTED)) && + (pDispEvo->pFrameLockEvo == NULL)) { + return FALSE; + } + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->readable = (DispAttributesDispatchTable[index].get != NULL); + pReply->writable = (DispAttributesDispatchTable[index].set != NULL); + + pReply->type = DispAttributesDispatchTable[index].type; + + return TRUE; +} + + +static const struct { + NvBool (*set)(NVFrameLockEvoPtr pFrameLockEvo, NvS64 value); + NvBool (*get)(const NVFrameLockEvoRec *pFrameLockEvo, + enum NvKmsFrameLockAttribute attribute, NvS64 *pValue); + NvBool (*getValidValues)( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsAttributeValidValuesCommonReply *pValidValues); + enum NvKmsAttributeType type; +} FrameLockAttributesDispatchTable[] = { + [NV_KMS_FRAMELOCK_ATTRIBUTE_POLARITY] = { + .set = SetFrameLockPolarity, + .get = GetFrameLockPolarity, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BITMASK, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY] = { + .set = FrameLockSetSyncDelay, + .get = GetFrameLockSyncDelay, + .getValidValues = GetFrameLockSyncDelayValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_SYNC_MODE] = { + .set = SetHouseSyncMode, + .get = GetHouseSyncMode, + .getValidValues = GetHouseSyncModeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_INTBITS, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_INTERVAL] = { + .set = FrameLockSetSyncInterval, + .get = GetFrameLockSyncInterval, + .getValidValues = GetFrameLockSyncIntervalValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_READY] = { + .set = NULL, + .get = GetFrameLockSyncReady, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_VIDEO_MODE] = { + .set = SetFrameLockVideoMode, + .get = GetFrameLockVideoMode, + .getValidValues = GetFrameLockVideoModeValidValues, + .type = NV_KMS_ATTRIBUTE_TYPE_RANGE, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_FPGA_REVISION] = { + .set = NULL, + .get = GetFrameLockFpgaRevision, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MAJOR_VERSION] = { + .set = NULL, + .get = GetFrameLockFirmwareMajorVersion, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_FIRMWARE_MINOR_VERSION] = { + .set = NULL, + .get = GetFrameLockFirmwareMinorVersion, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_BOARD_ID] = { + .set = NULL, + .get = GetFrameLockBoardId, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_DELAY_RESOLUTION] = { + .set = NULL, + .get = GetFrameLockSyncDelayResolution, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_PORT0_STATUS] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_PORT1_STATUS] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_HOUSE_STATUS] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BOOLEAN, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_ETHERNET_DETECTED] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_BITMASK, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_SYNC_RATE_4] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, + [NV_KMS_FRAMELOCK_ATTRIBUTE_INCOMING_HOUSE_SYNC_RATE] = { + .set = NULL, + .get = nvFrameLockGetStatusEvo, + .getValidValues = NULL, + .type = NV_KMS_ATTRIBUTE_TYPE_INTEGER, + }, +}; + +NvBool nvSetFrameLockAttributeEvo( + NVFrameLockEvoRec *pFrameLockEvo, + const struct NvKmsSetFrameLockAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(FrameLockAttributesDispatchTable)) { + return FALSE; + } + + if (FrameLockAttributesDispatchTable[index].set == NULL) { + return FALSE; + } + + if ((FrameLockAttributesDispatchTable[index].type == + NV_KMS_ATTRIBUTE_TYPE_BOOLEAN) && + (pParams->request.value != TRUE) && + (pParams->request.value != FALSE)) { + return FALSE; + } + + return FrameLockAttributesDispatchTable[index].set(pFrameLockEvo, + pParams->request.value); +} + +NvBool nvGetFrameLockAttributeEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeParams *pParams) +{ + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(FrameLockAttributesDispatchTable)) { + return FALSE; + } + + if (FrameLockAttributesDispatchTable[index].get == NULL) { + return FALSE; + } + + return FrameLockAttributesDispatchTable[index].get(pFrameLockEvo, + pParams->request.attribute, + &pParams->reply.value); +} + +NvBool nvGetFrameLockAttributeValidValuesEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + struct NvKmsGetFrameLockAttributeValidValuesParams *pParams) +{ + struct NvKmsAttributeValidValuesCommonReply *pReply = + &pParams->reply.common; + NvU32 index = pParams->request.attribute; + + if (index >= ARRAY_LEN(FrameLockAttributesDispatchTable)) { + return FALSE; + } + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->readable = (FrameLockAttributesDispatchTable[index].get != NULL); + pReply->writable = (FrameLockAttributesDispatchTable[index].set != NULL); + + pReply->type = FrameLockAttributesDispatchTable[index].type; + + /* + * The getValidValues function provides two important things: + * - If type==Range, then assigns reply::u::range. + * - If the attribute is not currently available, returns FALSE. + * If the getValidValues function is NULL, assume the attribute is + * available. The type must not be something requires assigning + * to reply::u. + */ + if (FrameLockAttributesDispatchTable[index].getValidValues == NULL) { + nvAssert(pReply->type != NV_KMS_ATTRIBUTE_TYPE_RANGE); + return TRUE; + } + + return FrameLockAttributesDispatchTable[index].getValidValues( + pFrameLockEvo, pReply); +} + +NvU32 nvGetFramelockServerHead(const NVDispEvoRec *pDispEvo) +{ + const NVDpyEvoRec *pDpyEvo = + nvGetDpyEvoFromDispEvo(pDispEvo, pDispEvo->framelock.server); + return (pDpyEvo != NULL) ? pDpyEvo->head : NV_INVALID_HEAD; +} + +NvU32 nvGetFramelockClientHeadsMask(const NVDispEvoRec *pDispEvo) +{ + NvU32 headsMask = 0x0; + const NVDpyEvoRec *pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->framelock.clients, pDispEvo) { + if (pDpyEvo->head == NV_INVALID_HEAD) { + continue; + } + headsMask |= NVBIT(pDpyEvo->head); + } + return headsMask; +} + +void nvUpdateGLSFramelock(const NVDispEvoRec *pDispEvo, const NvU32 head, + const NvBool enable, const NvBool server) +{ + NVDpyEvoRec *pDpyEvo; + NvS64 value = enable | (server << 1); + + /* + * XXX[2Heads1OR] Optimize this loop in follow on code change when + * apiHead -> pDpyEvo mapping will get implemented. + */ + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->validDisplays, pDispEvo) { + if (pDpyEvo->head != head) { + continue; + } + + nvSendDpyAttributeChangedEventEvo(pDpyEvo, + NV_KMS_DPY_ATTRIBUTE_UPDATE_GLS_FRAMELOCK, + value); + } +} diff --git a/src/nvidia-modeset/src/nvkms-hal.c b/src/nvidia-modeset/src/nvkms-hal.c new file mode 100644 index 000000000..9b7934954 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-hal.c @@ -0,0 +1,212 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvkms-types.h" +#include "nvkms-cursor.h" +#include "nvkms-hal.h" +#include "nvkms-rm.h" + +#include "class/cl9470.h" // NV9470_DISPLAY +#include "class/cl9570.h" // NV9570_DISPLAY +#include "class/cl9770.h" // NV9770_DISPLAY +#include "class/cl9870.h" // NV9870_DISPLAY +#include "class/clc370.h" // NVC370_DISPLAY +#include "class/clc570.h" // NVC570_DISPLAY +#include "class/clc670.h" // NVC670_DISPLAY + +#include "class/cl947d.h" // NV947D_CORE_CHANNEL_DMA +#include "class/cl957d.h" // NV957D_CORE_CHANNEL_DMA +#include "class/cl977d.h" // NV977D_CORE_CHANNEL_DMA +#include "class/cl987d.h" // NV987D_CORE_CHANNEL_DMA +#include "class/clc37d.h" // NVC37D_CORE_CHANNEL_DMA +#include "class/clc37e.h" // NVC37E_WINDOW_CHANNEL_DMA +#include "class/clc57d.h" // NVC57D_CORE_CHANNEL_DMA +#include "class/clc57e.h" // NVC57E_WINDOW_CHANNEL_DMA +#include "class/clc67d.h" // NVC67D_CORE_CHANNEL_DMA +#include "class/clc67e.h" // NVC67E_WINDOW_CHANNEL_DMA + +extern NVEvoHAL nvEvo94; +extern NVEvoHAL nvEvoC3; +extern NVEvoHAL nvEvoC5; +extern NVEvoHAL nvEvoC6; + +enum NvKmsAllocDeviceStatus nvAssignEvoCaps(NVDevEvoPtr pDevEvo) +{ +#define ENTRY(_classPrefix, \ + _pEvoHal, \ + _supportsInbandStereoSignaling, \ + _supportsDP13, \ + _supportsHDMI20, \ + _inputLutAppliesToBase, \ + _genericPageKind, \ + _validNIsoFormatMask, \ + _maxPitch, \ + _maxWidthInBytes, \ + _maxWidthInPixels, \ + _maxHeight, \ + _coreChannelDmaArmedOffset, \ + _dmaArmedSize) \ + { \ + .class = NV ## _classPrefix ## 70_DISPLAY, \ + .pEvoHal = _pEvoHal, \ + .coreChannelDma = { \ + .coreChannelClass = \ + NV ## _classPrefix ## 7D_CORE_CHANNEL_DMA, \ + .dmaArmedSize = _dmaArmedSize, \ + .dmaArmedOffset = \ + _coreChannelDmaArmedOffset, \ + }, \ + .evoCaps = { \ + .supportsDP13 = _supportsDP13, \ + .supportsInbandStereoSignaling = \ + _supportsInbandStereoSignaling, \ + .supportsHDMI20 = _supportsHDMI20, \ + .validNIsoFormatMask = _validNIsoFormatMask, \ + .inputLutAppliesToBase = _inputLutAppliesToBase, \ + .maxPitchValue = _maxPitch, \ + .maxWidthInBytes = _maxWidthInBytes, \ + .maxWidthInPixels = _maxWidthInPixels, \ + .maxHeight = _maxHeight, \ + .genericPageKind = _genericPageKind, \ + .maxRasterWidth = DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_RASTER_SIZE_WIDTH), \ + .maxRasterHeight = DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_RASTER_SIZE_HEIGHT),\ + } \ + } + +#define EVO_CORE_CHANNEL_DMA_ARMED_OFFSET 0x0 + +#define EVO_CORE_CHANNEL_DMA_ARMED_SIZE 0x1000 + + +/* Pre-NVDisplay EVO entries */ +#define ENTRY_EVO(_classPrefix, ...) \ + ENTRY(_classPrefix, __VA_ARGS__, \ + ((1 << NVKMS_NISO_FORMAT_LEGACY) | \ + (1 << NVKMS_NISO_FORMAT_FOUR_WORD)), \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_STORAGE_PITCH), \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_STORAGE_PITCH) * \ + NVKMS_BLOCK_LINEAR_GOB_WIDTH, \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_SIZE_WIDTH), \ + DRF_MASK(NV ## _classPrefix ## 7D_HEAD_SET_SIZE_HEIGHT), \ + EVO_CORE_CHANNEL_DMA_ARMED_OFFSET, \ + EVO_CORE_CHANNEL_DMA_ARMED_SIZE) + + +/* + * The file + * https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_display_withoffset.ref.txt + * defines: + * + * #define NV_UDISP_FE_CHN_ASSY_BASEADR_CORE 0x00680000 + * #define NV_UDISP_FE_CHN_ARMED_BASEADR_CORE (0x00680000+32768) + * + * The NVD_CORE_CHANNEL_DMA_ARMED_OFFSET is calculated as + * (NV_UDISP_FE_CHN_ARMED_BASEADR_CORE - NV_UDISP_FE_CHN_ASSY_BASEADR_CORE). + */ +#define NVD_CORE_CHANNEL_DMA_ARMED_OFFSET 0x8000 + +/* + * From the above in dev_display_withoffset.ref.txt, ARMED is the upper + * 32k of the core channel's 64k space. + */ +#define NVD_CORE_CHANNEL_DMA_ARMED_SIZE 0x8000 + + +/* + * The file + * https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/turing/tu104/dev_mmu.ref.txt + * defines: + * + * #define NV_MMU_PTE_KIND_GENERIC_MEMORY 0x06 + * + * The file + * https://github.com/NVIDIA/open-gpu-doc/blob/master/manuals/volta/gv100/dev_mmu.ref.txt + * defines: + * + * #define NV_MMU_PTE_KIND_GENERIC_16BX2 0xfe + * + * Which correspond to the "generic" page kind used for non-compressed single- + * sample blocklinear color images on Turing+ and pre-Turing GPUs respectively. + * This is the only blocklinear memory layout display ever cares about. + */ +#define TURING_GENERIC_KIND 0x06 +#define FERMI_GENERIC_KIND 0xfe + + +/* NVDisplay and later entries */ +#define ENTRY_NVD(_classPrefix, ...) \ + ENTRY(_classPrefix, __VA_ARGS__, \ + (1 << NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY), \ + DRF_MASK(NV ## _classPrefix ## 7E_SET_PLANAR_STORAGE_PITCH), \ + DRF_MASK(NV ## _classPrefix ## 7E_SET_PLANAR_STORAGE_PITCH) * \ + NVKMS_BLOCK_LINEAR_GOB_WIDTH, \ + DRF_MASK(NV ## _classPrefix ## 7E_SET_SIZE_IN_WIDTH), \ + DRF_MASK(NV ## _classPrefix ## 7E_SET_SIZE_IN_WIDTH), \ + NVD_CORE_CHANNEL_DMA_ARMED_OFFSET, \ + NVD_CORE_CHANNEL_DMA_ARMED_SIZE) + + static const struct { + NvU32 class; + const NVEvoHAL *pEvoHal; + const NVEvoCoreChannelDmaRec coreChannelDma; + const NVEvoCapsRec evoCaps; + } dispTable[] = { + /* + * genericPageKind--------------------+ + * inputLutAppliesToBase --------+ | + * supportsHDMI20 ------------+ | | + * supportsDP13 -----------+ | | | + * inbandStereoSignaling+ | | | | + * pEvoHal ----------+ | | | | | + * classPrefix | | | | | | + * | | | | | | | + */ + ENTRY_NVD(C6, &nvEvoC6, 1, 1, 1, 0, TURING_GENERIC_KIND), + ENTRY_NVD(C5, &nvEvoC5, 1, 1, 1, 0, TURING_GENERIC_KIND), + ENTRY_NVD(C3, &nvEvoC3, 1, 1, 1, 0, FERMI_GENERIC_KIND), + ENTRY_EVO(98, &nvEvo94, 1, 1, 1, 1, FERMI_GENERIC_KIND), + ENTRY_EVO(97, &nvEvo94, 1, 1, 1, 1, FERMI_GENERIC_KIND), + ENTRY_EVO(95, &nvEvo94, 1, 0, 1, 1, FERMI_GENERIC_KIND), + ENTRY_EVO(94, &nvEvo94, 1, 0, 0, 1, FERMI_GENERIC_KIND), + }; + + int i; + + for (i = 0; i < ARRAY_LEN(dispTable); i++) { + if (nvRmEvoClassListCheck(pDevEvo, dispTable[i].class)) { + pDevEvo->hal = dispTable[i].pEvoHal; + pDevEvo->dispClass = dispTable[i].class; + pDevEvo->caps = dispTable[i].evoCaps; + + pDevEvo->coreChannelDma = dispTable[i].coreChannelDma; + nvAssert(nvRmEvoClassListCheck( + pDevEvo, + pDevEvo->coreChannelDma.coreChannelClass)); + + return nvInitDispHalCursorEvo(pDevEvo); + } + } + + return NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; +} diff --git a/src/nvidia-modeset/src/nvkms-hdmi.c b/src/nvidia-modeset/src/nvkms-hdmi.c new file mode 100644 index 000000000..a67b78001 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-hdmi.c @@ -0,0 +1,2123 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This source file contains codes for enabling HDMI audio. + */ + + +#include "nvkms-dpy.h" +#include "nvkms-hdmi.h" +#include "nvkms-evo.h" +#include "nvkms-modepool.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" +#include "nvkms-vrr.h" +#include "dp/nvdp-connector.h" + +#include "hdmi_spec.h" +#include "nvos.h" + +#include // NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS +#include // NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM +#include // NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS +#include // NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER + +#include + +#define CAP_HDMI_SUPPORT_GPU 0x00000001 +#define CAP_HDMI_SUPPORT_MONITOR 0x00000002 + +static inline const NVT_EDID_CEA861_INFO *GetExt861(const NVParsedEdidEvoRec *pParsedEdid, + int extIndex) +{ + if (!pParsedEdid->valid || extIndex > 1) { + return NULL; + } + + return (extIndex == 0) ? &pParsedEdid->info.ext861 : + &pParsedEdid->info.ext861_2; +} + +/* + * CalculateVideoInfoFrameColorFormat() - calculate colorspace, + * colorimetry and colorrange for video infoframe. + */ +static void CalculateVideoInfoFrameColorFormat( + const NVAttributesSetEvoRec *pAttributesSet, + const NvU32 hdTimings, + NVT_VIDEO_INFOFRAME_CTRL *pCtrl) +{ + // sets video infoframe colorspace (RGB/YUV). + switch (pAttributesSet->colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr422; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + pCtrl->color_space = NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr420; + break; + default: + nvAssert(!"Invalid colorSpace value"); + break; + } + + // sets video infoframe colorimetry. + switch (pAttributesSet->colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + pCtrl->colorimetry = NVT_COLORIMETRY_RGB; + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + if (hdTimings) { + pCtrl->colorimetry = NVT_COLORIMETRY_YUV_709; + } else { + pCtrl->colorimetry = NVT_COLORIMETRY_YUV_601; + } + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr420: + pCtrl->colorimetry = NVT_COLORIMETRY_YUV_709; + break; + default: + nvAssert(!"Invalid colorSpace value"); + break; + } + + // sets video infoframe colorrange. + switch (pAttributesSet->colorRange) { + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_FULL: + pCtrl->rgb_quantization_range = + NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_FULL_RANGE; + break; + case NV_KMS_DPY_ATTRIBUTE_COLOR_RANGE_LIMITED: + pCtrl->rgb_quantization_range = + NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_LIMITED_RANGE; + break; + default: + nvAssert(!"Invalid colorRange value"); + break; + } + + // Only limited color range is allowed with YUV444 or YUV422 color spaces + nvAssert(!(((pCtrl->color_space == NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr422) || + (pCtrl->color_space == NVT_VIDEO_INFOFRAME_BYTE1_Y1Y0_YCbCr444)) && + (pCtrl->rgb_quantization_range != + NVT_VIDEO_INFOFRAME_BYTE3_Q1Q0_LIMITED_RANGE))); +} + +/* + * GetHDMISupportCap() - find the HDMI capabilities of + * the gpu and the display device. + */ + +static NvU32 GetHDMISupportCap(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 hdmiCap = 0; + int extIndex; + + if (pDpyEvo->hdmiCapable) { + hdmiCap |= CAP_HDMI_SUPPORT_GPU; + } + + for (extIndex = 0; TRUE; extIndex++) { + + int vsdbIndex; + const NVT_EDID_CEA861_INFO *pExt861 = + GetExt861(&pDpyEvo->parsedEdid, extIndex); + + if (pExt861 == NULL) { + break; + } + + if (pExt861->revision <= NVT_CEA861_REV_ORIGINAL) { + continue; + } + + for (vsdbIndex = 0; vsdbIndex < pExt861->total_vsdb; vsdbIndex++) { + if (pExt861->vsdb[vsdbIndex].ieee_id == NVT_CEA861_HDMI_IEEE_ID) { + hdmiCap |= CAP_HDMI_SUPPORT_MONITOR; + return hdmiCap; + } + } + } + + return hdmiCap; +} + +/*! + * Return whether the GPU supports HDMI and the display is connected + * via HDMI. + */ +NvBool nvDpyIsHdmiEvo(const NVDpyEvoRec *pDpyEvo) +{ + NvU32 hdmiCap; + + hdmiCap = GetHDMISupportCap(pDpyEvo); + + return ((hdmiCap & CAP_HDMI_SUPPORT_GPU) && + (hdmiCap & CAP_HDMI_SUPPORT_MONITOR)); +} + +/*! + * Updates the display's HDMI 2.0 capabilities to the RM. + */ +void nvUpdateHdmiCaps(NVDpyEvoPtr pDpyEvo) +{ + NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS params = { 0 }; + NVParsedEdidEvoPtr pParsedEdid = &pDpyEvo->parsedEdid; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (!pDevEvo->caps.supportsHDMI20 || + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + return; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + params.caps = 0; + + /* + * nvUpdateHdmiCaps() gets called on dpy's connect/disconnect events + * to set/clear capabilities, clear capabilities if parsed edid + * is not valid. + */ + if (pParsedEdid->valid) { + const NVT_HDMI_FORUM_INFO *pHdmiInfo = &pParsedEdid->info.hdmiForumInfo; + if (pHdmiInfo->scdc_present) { + params.caps |= DRF_DEF(0073, _CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + _SCDC_SUPPORTED, _TRUE); + } + + if (pHdmiInfo->max_TMDS_char_rate > 0) { + params.caps |= DRF_DEF(0073, _CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + _GT_340MHZ_CLOCK_SUPPORTED, _TRUE); + } + + if (pHdmiInfo->lte_340Mcsc_scramble) { + if (!pHdmiInfo->scdc_present) { + nvEvoLogDisp(pDispEvo, + EVO_LOG_WARN, + "EDID inconsistency: SCDC is not present in EDID, but EDID requests 340Mcsc scrambling."); + } + + params.caps |= DRF_DEF(0073, _CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + _LTE_340MHZ_SCRAMBLING_SUPPORTED, _TRUE); + } + + /* HDMI Fixed-rate link information */ + if (pDevEvo->hal->caps.supportsHDMIFRL) { + nvAssert((pHdmiInfo->max_FRL_Rate & + ~DRF_MASK(NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED)) == 0); + params.caps |= DRF_NUM(0073_CTRL_CMD_SPECIFIC, _SET_HDMI_SINK_CAPS, _MAX_FRL_RATE_SUPPORTED, + pHdmiInfo->max_FRL_Rate); + + if (pHdmiInfo->dsc_1p2) { + nvAssert((pHdmiInfo->dsc_1p2 & + ~DRF_MASK(NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED)) == 0); + params.caps |= DRF_NUM(0073_CTRL_CMD_SPECIFIC, _SET_HDMI_SINK_CAPS, _DSC_MAX_FRL_RATE_SUPPORTED, + pHdmiInfo->dsc_1p2); + params.caps |= DRF_DEF(0073_CTRL_CMD_SPECIFIC, _SET_HDMI_SINK_CAPS, _DSC_12_SUPPORTED, _TRUE); + } + } + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS failed"); + } +} + +/* + * HdmiSendEnable() - Used to signal RM to enable various hdmi components + * such as audio engine. + */ + +static void HdmiSendEnable(NVDpyEvoPtr pDpyEvo, NvBool hdmiEnable) +{ + NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDpyEvo->pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + params.enable = hdmiEnable; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE failed"); + } +} + +/*! + * Disable sending the vendor specific infoframe on this display. + */ +static void DisableVendorSpecificInfoFrame( + const NVDispEvoRec *pDispEvo, + const NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.type = pktType_VendorSpecInfoFrame; + params.transmitControl = DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_CTRL_TRANSMIT_CONTROL, _ENABLE, _NO); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET_CTRL failed"); + } +} + +/* + * SendInfoFrame() - Send infoframe to the hardware through the hdmipkt + * library. + */ + +static void SendInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + NVHDMIPKT_TC transmitControl, + NVT_INFOFRAME_HEADER *pInfoFrameHeader, + NvU32 infoframeSize) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVHDMIPKT_TYPE hdmiLibType; + NVHDMIPKT_RESULT ret; + NvU8 *infoframe = NULL; + NvU8 hdmiPacketType, checksum; + NvU32 i; + NvU8 *pPayload; + size_t headerSize; + NvBool needChecksum = + (transmitControl & DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _EN)); + + /* + * The 'type' the timing library writes into the NVT_INFOFRAME_HEADER + * structure is not the same as the protocol values that hardware expects + * to see in the real packet header; those are defined in the + * HDMI_PACKET_TYPE enums (hdmi_pktType_*) from hdmi_spec.h; use those + * to fill in the first byte of the packet. It's *also* not the type that + * the HDMI library expects to see in its NvHdmiPkt_PacketWrite call; those + * are NVHDMIPKT_TYPE_*. Determine both below. + */ + switch (pInfoFrameHeader->type) { + default: + nvAssert(0); + return; + case NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET: + hdmiLibType = NVHDMIPKT_TYPE_GENERIC; + hdmiPacketType = hdmi_pktType_ExtendedMetadata; + break; + case NVT_INFOFRAME_TYPE_VIDEO: + hdmiLibType = NVHDMIPKT_TYPE_AVI_INFOFRAME; + hdmiPacketType = hdmi_pktType_AviInfoFrame; + break; + case NVT_INFOFRAME_TYPE_VENDOR_SPECIFIC: + hdmiLibType = NVHDMIPKT_TYPE_VENDOR_SPECIFIC_INFOFRAME; + hdmiPacketType = hdmi_pktType_VendorSpecInfoFrame; + break; + } + + /* + * These structures are weird. The NVT_VIDEO_INFOFRAME, + * NVT_VENDOR_SPECIFIC_INFOFRAME, NVT_EXTENDED_METADATA_PACKET_INFOFRAME, + * etc structures are *kind of* what we want to send to the hdmipkt library, + * except the type in the header is different, and a single checksum byte + * may need to be inserted *between* the header and the payload (requiring + * us to allocate a buffer one byte larger). + */ + infoframe = nvAlloc(infoframeSize + (needChecksum ? sizeof(checksum) : 0)); + if (infoframe == NULL) { + return; + } + + /* + * The fields and size of NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER + * match with those of NVT_INFOFRAME_HEADER at the time of writing, but + * nvtiming.h declares them separately. To be safe, special case + * NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET. + */ + if (pInfoFrameHeader->type == NVT_INFOFRAME_TYPE_EXTENDED_METADATA_PACKET) { + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER *pExtMetadataHeader = + (NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER *) pInfoFrameHeader; + + pPayload = (NvU8 *)(pExtMetadataHeader + 1); + headerSize = sizeof(NVT_EXTENDED_METADATA_PACKET_INFOFRAME_HEADER); + } else { + pPayload = (NvU8 *)(pInfoFrameHeader + 1); + headerSize = sizeof(NVT_INFOFRAME_HEADER); + } + + infoframe[0] = hdmiPacketType; + nvkms_memcpy(&infoframe[1], &((NvU8*) pInfoFrameHeader)[1], headerSize - 1); + + if (needChecksum) { + /* PB0: checksum */ + checksum = 0; + infoframe[headerSize] = 0; + for (i = 0; i < infoframeSize + sizeof(checksum); i++) { + checksum += infoframe[i]; + } + infoframe[headerSize] = ~checksum + 1; + } + + /* copy the payload, starting after the 3-byte header and checksum */ + nvkms_memcpy(&infoframe[headerSize + (needChecksum ? sizeof(checksum) : 0)], + pPayload, infoframeSize - headerSize /* payload size */); + + ret = NvHdmiPkt_PacketWrite(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + head, + hdmiLibType, + transmitControl, + infoframeSize, + infoframe); + + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + } + + nvFree(infoframe); +} + +/* + * SendVideoInfoFrame() - Construct video infoframe using provided EDID and call + * SendInfoFrame() to send it to RM. + */ +static void SendVideoInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVAttributesSetEvoRec *pAttributesSet, + const NvBool hdTimings, + const NVT_VIDEO_INFOFRAME_CTRL *pCtrl, + NVT_EDID_INFO *pEdidInfo) +{ + NVT_VIDEO_INFOFRAME_CTRL videoCtrl = *pCtrl; + NVT_VIDEO_INFOFRAME VideoInfoFrame; + NVT_STATUS status; + + + CalculateVideoInfoFrameColorFormat(pAttributesSet, hdTimings, &videoCtrl); + + status = NvTiming_ConstructVideoInfoframe(pEdidInfo, + &videoCtrl, + NULL, &VideoInfoFrame); + if (status != NVT_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Error in constructing Video InfoFrame"); + return; + } + + SendInfoFrame(pDispEvo, + head, + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME, + (NVT_INFOFRAME_HEADER *) &VideoInfoFrame, + sizeof(VideoInfoFrame)); +} + +/* + * SendHDMI3DVendorSpecificInfoFrame() - Construct vendor specific infoframe + * using provided EDID and call SendInfoFrame() to send it to RM. Currently + * hardcoded to send the infoframe necessary for HDMI 3D. + */ + +static void +SendHDMI3DVendorSpecificInfoFrame(const NVDispEvoRec *pDispEvo, + const NvU32 head, NVT_EDID_INFO *pEdidInfo) +{ + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + NVT_VENDOR_SPECIFIC_INFOFRAME_CTRL vendorCtrl = { + .Enable = 1, + .HDMIFormat = NVT_HDMI_VS_BYTE4_HDMI_VID_FMT_3D, + .HDMI_VIC = NVT_HDMI_VS_BYTE5_HDMI_VIC_NA, + .ThreeDStruc = NVT_HDMI_VS_BYTE5_HDMI_3DS_FRAMEPACK, + .ThreeDDetail = NVT_HDMI_VS_BYTE_OPT1_HDMI_3DEX_NA, + .MetadataPresent = 0, + .MetadataType = NVT_HDMI_VS_BYTE_OPT2_HDMI_METADATA_TYPE_NA, + }; + NVT_VENDOR_SPECIFIC_INFOFRAME vendorInfoFrame; + NVT_STATUS status; + + if (!pEdidInfo->HDMI3DSupported) { + // Only send the HDMI 3D infoframe if the display supports HDMI 3D + return; + } + + // Send the infoframe with HDMI 3D configured if we're setting an HDMI 3D + // mode. + if (!pHeadState->timings.hdmi3D) { + DisableVendorSpecificInfoFrame(pDispEvo, head); + return; + } + + status = NvTiming_ConstructVendorSpecificInfoframe(pEdidInfo, + &vendorCtrl, + &vendorInfoFrame); + if (status != NVT_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Error in constructing Vendor Specific InfoFrame"); + return; + } + + SendInfoFrame(pDispEvo, + head, + NVHDMIPKT_TRANSMIT_CONTROL_ENABLE_EVERY_FRAME, + &vendorInfoFrame.Header, + sizeof(vendorInfoFrame)); +} + +/* + * Send video and 3D InfoFrames for HDMI. + */ +void nvUpdateHdmiInfoFrames(const NVDispEvoRec *pDispEvo, + const NvU32 head, + const NVAttributesSetEvoRec *pAttributesSet, + const NvBool hdTimings, + const NVT_VIDEO_INFOFRAME_CTRL *pCtrl, + NVDpyEvoRec *pDpyEvo) +{ + if (!nvDpyIsHdmiEvo(pDpyEvo)) { + return; + } + + if (!pDpyEvo->parsedEdid.valid) { + nvEvoLogDispDebug( + pDispEvo, EVO_LOG_WARN, + "No EDID: cannot construct video/vendor-specific info frame"); + return; + } + + SendVideoInfoFrame(pDispEvo, + head, + pAttributesSet, + hdTimings, + pCtrl, + &pDpyEvo->parsedEdid.info); + + SendHDMI3DVendorSpecificInfoFrame(pDispEvo, + head, + &pDpyEvo->parsedEdid.info); +} + +static void SetDpAudioMute(const NVDispEvoRec *pDispEvo, + const NvU32 displayId, const NvBool mute) +{ + NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + params.mute = mute; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, "NvRmControl" + "(NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM) failed" + "return status = %d...", ret); + } +} + +static void SetDpAudioEnable(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + /* Mute audio stream before disabling it */ + if (!enable) { + SetDpAudioMute(pDispEvo, pHeadState->activeRmId, TRUE); + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.enable = enable; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "%s: Failed to %s DisplayPort audio stream-%u", + pConnectorEvo->name, + enable ? "enable" : "disable", + head); + } + + /* Unmute audio stream after enabling it */ + if (enable) { + SetDpAudioMute(pDispEvo, pHeadState->activeRmId, FALSE); + } +} + +static void EnableHdmiAudio(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable) +{ + static const NvU8 InfoframeMutePacket[] = { + pktType_GeneralControl, 0, 0, HDMI_GENCTRL_PACKET_MUTE_ENABLE, 0, 0, 0, 0, + 0, 0 + }; + static const NvU8 InfoframeUnMutePacket[] = { + pktType_GeneralControl, 0, 0, HDMI_GENCTRL_PACKET_MUTE_DISABLE, 0, 0, 0, 0, + 0, 0 + }; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = pHeadState->activeRmId; + params.transmitControl = + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ENABLE, _YES) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _OTHER_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _SINGLE_FRAME, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _ON_HBLANK, _DISABLE) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _VIDEO_FMT, _SW_CONTROLLED) | + DRF_DEF(0073_CTRL_SPECIFIC, _SET_OD_PACKET_TRANSMIT_CONTROL, _RESERVED_LEGACY_MODE, _NO); + + params.packetSize = sizeof(InfoframeMutePacket); + + nvAssert(sizeof(InfoframeMutePacket) == sizeof(InfoframeUnMutePacket)); + + nvkms_memcpy(params.aPacket, + enable ? InfoframeUnMutePacket : InfoframeMutePacket, + params.packetSize); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET failed"); + } +} + +static const NVT_EDID_CEA861_INFO *GetMaxSampleRateExtBlock( + const NVParsedEdidEvoRec *pParsedEdid, + NvU32 *pMaxFreqSupported) +{ + const NVT_EDID_CEA861_INFO *pExt861 = NULL; + int extIndex; + int i; + + *pMaxFreqSupported = 0; + + for (extIndex = 0; TRUE; extIndex++) { + + NvU8 sampleRateMask = 0; + const NVT_EDID_CEA861_INFO *pTmpExt861 = + GetExt861(pParsedEdid, extIndex); + NvU32 maxFreqSupported = 0; + + if (pTmpExt861 == NULL) { + break; + } + + if (pTmpExt861->revision == NVT_CEA861_REV_NONE) { + continue; + } + + /* loop through all SAD to find out the max supported rate */ + for (i = 0; i < NVT_CEA861_AUDIO_MAX_DESCRIPTOR; i++) { + + const NvU8 byte1 = pTmpExt861->audio[i].byte1; + const NvU8 byte2 = pTmpExt861->audio[i].byte2; + + if (byte1 == 0) { + break; + } + + if ((byte2 & NVT_CEA861_AUDIO_SAMPLE_RATE_MASK) > sampleRateMask) { + sampleRateMask = byte2 & NVT_CEA861_AUDIO_SAMPLE_RATE_MASK; + } + } + + if (sampleRateMask != 0) { + /* get the highest bit index */ + for (i = 7; i >= 1; i--) { + if ((1<<(i-1)) & sampleRateMask) { + maxFreqSupported = i; + break; + } + } + } else if (pTmpExt861->basic_caps & NVT_CEA861_CAP_BASIC_AUDIO) { + /* + * no short audio descriptor found, try the basic cap + * Uncompressed, two channel, digital audio. Exact parameters are + * determined by the interface specification used with CEA-861-D + * (e.g., 2 channel IEC 60958 LPCM, 32, 44.1, and 48 kHz + * sampling rates, 16 bits/sample). + */ + maxFreqSupported = + NV0073_CTRL_DFP_ELD_AUDIO_CAPS_MAX_FREQ_SUPPORTED_0480KHZ; + } + + if (maxFreqSupported > *pMaxFreqSupported) { + *pMaxFreqSupported = maxFreqSupported; + pExt861 = pTmpExt861; + } + } + + return pExt861; +} + +/* + * Search a CEA-861 block for a Vendor Specific Data Block + * with an IEEE "HDMI Licensing, LLC" OUI. + * + * If found, returns VSDB_DATA * to Vendor Specific Data Block + * If !found, returns NULL + */ +static const VSDB_DATA *GetVsdb(const NVT_EDID_CEA861_INFO *pExt861) +{ + const VSDB_DATA *pVsdb = NULL; + + for (int i = 0; i < pExt861->total_vsdb; i++) { + if (pExt861->vsdb[i].ieee_id == NVT_CEA861_HDMI_IEEE_ID) { + pVsdb = &pExt861->vsdb[i]; + break; + } + } + return pVsdb; +} + +static NvBool FillELDBuffer(const NvU32 displayId, + const NvBool isDisplayPort, + const NVParsedEdidEvoRec *pParsedEdid, + NVEldEvoRec *pEld, + NvU32 *pMaxFreqSupported) +{ + const NVT_EDID_CEA861_INFO *pExt861; + NvU32 SADCount, monitorNameLen; + NvU8 name[NVT_EDID_LDD_PAYLOAD_SIZE + 1]; + NVT_STATUS status; + NvU32 i; + NvU8 EldSAI = 0; + NvU8 EldAudSynchDelay = 0; + const VSDB_DATA *pVsdb; + + pExt861 = GetMaxSampleRateExtBlock(pParsedEdid, pMaxFreqSupported); + + if (pExt861 == NULL) { + return FALSE; + } + + /* ELD header block: offset 0: ELD_Ver */ + pEld->buffer[0] = NVT_ELD_VER_2 << 3; + + /* Baseline block: offset 4: CEA_EDID_Ver */ + pEld->buffer[4] = pExt861->revision << 5; + + /* offset 5: SAD_Count */ + SADCount = 0; + while (SADCount < NVT_CEA861_AUDIO_MAX_DESCRIPTOR && + pExt861->audio[SADCount].byte1 != 0) { + SADCount++; + } + pEld->buffer[5] = SADCount << 4; + + /* offset 5: Conn_Type */ + if (isDisplayPort) { + pEld->buffer[5] |= NVT_ELD_CONN_TYPE_DP << 2; + } else { + pEld->buffer[5] |= NVT_ELD_CONN_TYPE_HDMI << 2; + } + + /* offset 5 b0: HDCP; always 0 for now */ + + pVsdb = GetVsdb(pExt861); + /* offset 5 b1=1 if Supports_AI; always 0 for DP */ + if ((!isDisplayPort) && + (pVsdb != NULL) && + (pVsdb->vendor_data_size > 2)) { + EldSAI = pVsdb->vendor_data[2]; + EldSAI >>= 7; + } + pEld->buffer[5] |= EldSAI << 1; + + /* offset 6: Aud_Synch_delay in units of 2 msec */ + if ((pVsdb != NULL) && + (pVsdb->vendor_data_size > 6)) { + EldAudSynchDelay = pVsdb->vendor_data[6]; + } + pEld->buffer[6] = EldAudSynchDelay; + + /* offset 7: speaker allocation multiple allocation is not supported in ELD */ + pEld->buffer[7] = pExt861->speaker[0].byte1; + + /* + * offset 8 ~ 15: port ID; nobody knows what port ID is, so far DD/RM/Audio + * all agree to fill it with display Id. + */ + pEld->buffer[8] = displayId & 0xff; + pEld->buffer[9] = (displayId >> 8) & 0xff; + pEld->buffer[10] = (displayId >> 16) & 0xff; + pEld->buffer[11] = (displayId >> 24) & 0xff; + + /* offset 16 ~ 17: manufacturer name */ + pEld->buffer[16] = pParsedEdid->info.manuf_id & 0xff; + pEld->buffer[17] = pParsedEdid->info.manuf_id >> 8; + /* offset 18 ~ 19: product code */ + pEld->buffer[18] = pParsedEdid->info.product_id & 0xff; + pEld->buffer[19] = (pParsedEdid->info.product_id >> 8) & 0xff; + + /* + * offset 20 ~ 20 + MNL - 1: monitor name string (MNL - Monitor Name + * Length) + */ + + monitorNameLen = 0; + + status = NvTiming_GetProductName(&pParsedEdid->info, name, sizeof(name)); + + if (status == NVT_STATUS_SUCCESS) { + /* + * NvTiming_GetProductName() returns a nul-terminated string, but the + * string in the EDID is terminated with 0x0A and padded with 0x20. + * Put back these special characters. + */ + NvBool pastTerminator = FALSE; + NvU32 i; + + for (i = 0; i < NVT_EDID_LDD_PAYLOAD_SIZE; i++) { + if (pastTerminator) { + name[i] = 0x20; + } + if (name[i] == '\0') { + name[i] = 0x0A; + pastTerminator = TRUE; + } + } + + monitorNameLen = NVT_EDID_LDD_PAYLOAD_SIZE; + pEld->buffer[4] |= NVT_EDID_LDD_PAYLOAD_SIZE; + nvkms_memcpy(&pEld->buffer[20], name, + NVT_EDID_LDD_PAYLOAD_SIZE); + } + + /* offset 20 + MNL ~ 20 + MNL + (3 * SAD_Count) - 1 : CEA_SADs */ + if (SADCount) { + const size_t sadSize = SADCount * sizeof(NVT_3BYTES); + const size_t bufferSize = sizeof(pEld->buffer) - monitorNameLen - 20; + const size_t copySize = NV_MIN(bufferSize, sadSize); + nvAssert(copySize == sadSize); + + nvkms_memcpy(&pEld->buffer[20 + monitorNameLen], + &pExt861->audio[0], copySize); + } + + /* + * The reserved section is not used yet. + * offset 20 + MNL + (3 * SAD_Count) ~ 4 + Baseline_ELD_Len * 4 - 1; + */ + + /* Baseline block size in DWORD */ + i = (16 + monitorNameLen + SADCount * sizeof(NVT_3BYTES) + + sizeof(NvU32) - 1) / sizeof(NvU32); + pEld->buffer[2] = (NvU8)i; + + /* Update the entire ELD space */ + pEld->size = NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER; + + return TRUE; +} + +void nvHdmiDpConstructHeadAudioState(const NvU32 displayId, + const NVDpyEvoRec *pDpyEvo, + NVDispHeadAudioStateEvoRec *pAudioState) +{ + nvkms_memset(pAudioState, 0, sizeof(*pAudioState)); + + /* + * CRT and the DSI digital flat panel does not support audio. If (supported + * == FALSE) the nvHdmiDpEnableDisableAudio does nothing. + */ + if (pDpyEvo->pConnectorEvo->legacyType != + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP || + pDpyEvo->pConnectorEvo->signalFormat == + NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + return; + } + + /* + * The DP/TMDS digital flat panels supports audio, but do not enable audio + * on the eDP and DVI displays. Some eDP panels goes blank when audio is + * enabled, and DVI monitors do not support audio. + * + * If (supported == TRUE) and (enabled == FALSE) then + * nvHdmiDpEnableDisableAudio() makes sure to keep audio disabled for + * a given head. + */ + pAudioState->supported = TRUE; + + if ((nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + pDpyEvo->internal) || + (!nvDpyIsHdmiEvo(pDpyEvo) && + !nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo))) { + return; + } + + if (FillELDBuffer(displayId, + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo), + &pDpyEvo->parsedEdid, + &pAudioState->eld, + &pAudioState->maxFreqSupported)) { + pAudioState->isAudioOverHdmi = nvDpyIsHdmiEvo(pDpyEvo); + pAudioState->enabled = TRUE; + } +} + +#define MAX_AUDIO_DEVICE_ENTRIES \ + (NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_3 + 1) + +/* + * Returns audio device entry of connector, which should + * be attached to given head. Returns NONE if head is inactive. + * + * Each connector(SOR) supports four audio device entries, from 0 to 3, + * which can drive four independent audio streams. Any head can be attached to + * any audio device entry. + * + * Before audio-over-dp-mst support, by default the 0th device entry was + * used when a given head was driving a DP-SST/HDMI/DVI display. This + * function preserves that behavior. In the case of DP-MST, multiple heads + * are attached to a single connector. In that case this functions returns + * a device entry equal to the given head index. + */ +static NvU32 GetAudioDeviceEntry(const NVDispEvoRec *pDispEvo, const NvU32 head) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = + pHeadState->pConnectorEvo; + + if (pConnectorEvo == NULL) { + return NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_NONE; + } + + ct_assert(MAX_AUDIO_DEVICE_ENTRIES == NVKMS_MAX_HEADS_PER_DISP); + + if (nvConnectorUsesDPLib(pConnectorEvo) && + (nvDPGetActiveLinkMode(pConnectorEvo->pDpLibConnector) == + NV_DP_LINK_MODE_MST)) { + return NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_0 + head; + } + + return NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_0; +} + +static NvBool IsAudioDeviceEntryActive( + const NVConnectorEvoRec *pConnectorEvo, const NvU32 deviceEntry) +{ + NvU32 primaryOrIndex; + NvU32 head, headsCount = 0; + NvBool isInMSTMode, isConnectorActive; + + if ((pConnectorEvo->or.mask == 0x0) || + (deviceEntry >= MAX_AUDIO_DEVICE_ENTRIES)) { + return FALSE; + } + + primaryOrIndex = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + isInMSTMode = FALSE; + isConnectorActive = FALSE; + + FOR_EACH_INDEX_IN_MASK( + 32, + head, + pConnectorEvo->or.ownerHeadMask[primaryOrIndex]) { + + const NVDispEvoRec *pDispEvo = pConnectorEvo->pDispEvo; + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NVDpyEvoRec *pDpyEvo = nvGetOneArbitraryDpyEvo( + pHeadState->activeDpys, pDispEvo); + + if (headsCount == 0) { + isInMSTMode = nvDpyEvoIsDPMST(pDpyEvo); + } else { + nvAssert(isInMSTMode == nvDpyEvoIsDPMST(pDpyEvo)); + } + headsCount++; + isConnectorActive = TRUE; + } FOR_EACH_INDEX_IN_MASK_END + + if (!isConnectorActive) { + return FALSE; + } + + nvAssert(isInMSTMode || headsCount == 1); + + if (isInMSTMode) { + return (NVBIT(deviceEntry) & + pConnectorEvo->or.ownerHeadMask[primaryOrIndex]) ? TRUE : FALSE; + } else if (!isInMSTMode && deviceEntry == 0) { + return TRUE; + } + + return FALSE; +} + +/*! + * Send EDID-Like-Data (ELD) to RM. + * + * The ELD contains a subset of the digital display device's EDID + * information related to audio capabilities. The GPU driver sends the + * ELD to hardware and the audio driver reads it by issuing the ELD + * command verb. The ELD should be updated under the following + * situations: + * + * 1. Power on reset + * 2. Pre modeset + * 3. HotPlug / Post modeset + * + * Apart from ELD, also update the following control flags: + * + * isPD - Present Detect, indicates if the monitor is attached + * isELDV - indicates if the ELD is Valid + * + * The values of iSPD and isELDV should be: + * + * NV_ELD_POWER_ON_RESET : isPD = 0, isELDV = 0 + * NV_ELD_PRE_MODESET : isPD = 1, isELDV = 0 + * NV_ELD_POST_MODESET : isPD = 1, isELDV = 1 + * + * \param[in] pDispEvo The disp of the displayId + * \param[in] displayId The display device whose ELD should be updated. + * This should be NVDispHeadStateEvoRec::activeRmId + * in case of NV_ELD_PRE_MODESET and + * NV_ELD_POST_MODESET, otherwise it should be + * NVConnectorEvoRec::displayId. + * \param[in] deviceEntry The device entry of connector. + * \param[in[ isDP The DisplayPort display device. + * \param[in] pParsedEdid The parsed edid from which ELD should be + * extracted. + * \param[in] eldCase The condition that requires updating the ELD. + */ +typedef enum { + NV_ELD_POWER_ON_RESET, + NV_ELD_PRE_MODESET, + NV_ELD_POST_MODESET, +} NvEldCase; + +static void RmSetELDAudioCaps( + const NVDispEvoRec *pDispEvo, const NvU32 displayId, + const NvU32 deviceEntry, + const NvU32 maxFreqSupported, const NVEldEvoRec *pEld, + const NvEldCase eldCase) +{ + NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS params = { 0 }; + NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS audio_power_params = { 0 }; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvBool isPD, isELDV; + NvU32 ret; + + /* setup the ctrl flag */ + switch(eldCase) { + case NV_ELD_POWER_ON_RESET : + isPD = isELDV = FALSE; + break; + case NV_ELD_PRE_MODESET : + isPD = TRUE; + isELDV = FALSE; + break; + case NV_ELD_POST_MODESET : + isPD = isELDV = TRUE; + break; + default : + return; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.deviceEntry = deviceEntry; + params.displayId = displayId; + + if (isELDV) { + if (pEld->size == 0) { + isPD = isELDV = FALSE; + } else { + ct_assert(sizeof(params.bufferELD) == sizeof(pEld->buffer)); + + nvkms_memcpy(params.bufferELD, pEld->buffer, sizeof(pEld->buffer)); + params.numELDSize = pEld->size; + + params.maxFreqSupported = maxFreqSupported; + } + } else { + params.numELDSize = 0; + } + + params.ctrl = + DRF_NUM(0073_CTRL, _DFP_ELD_AUDIO_CAPS, _CTRL_PD, isPD)| + DRF_NUM(0073_CTRL, _DFP_ELD_AUDIO_CAPS, _CTRL_ELDV, isELDV); + + /* + * ELD information won't be populated to GPU HDA controller driver if + * HDA controller is in suspended state. + * Issue NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER RM control call for + * bringing the HDA controller in active state before writing ELD. Once ELD + * data is written, then HDA controller can again go into suspended state. + */ + audio_power_params.bEnter = FALSE; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle, + NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER, + &audio_power_params, sizeof(audio_power_params)); + + if (ret != NVOS_STATUS_SUCCESS) + nvAssert(!"NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER failed"); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, "NvRmControl" + "(NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS) failed" + "return status = %d...", ret); + } + + audio_power_params.bEnter = TRUE; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle, + NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER, + &audio_power_params, sizeof(audio_power_params)); + + if (ret != NVOS_STATUS_SUCCESS) + nvAssert(!"NV2080_CTRL_CMD_OS_UNIX_AUDIO_DYNAMIC_POWER failed"); + +} + +void nvHdmiDpEnableDisableAudio(const NVDispEvoRec *pDispEvo, + const NvU32 head, const NvBool enable) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + const NVConnectorEvoRec *pConnectorEvo = pHeadState->pConnectorEvo; + const NvU32 deviceEntry = GetAudioDeviceEntry(pDispEvo, head); + + /* + * We should only reach this function for active heads, and therefore + * pConnectorEvo and deviceEntry are valid. + */ + nvAssert((pHeadState->pConnectorEvo != NULL) && + (deviceEntry != NV0073_CTRL_DFP_ELD_AUDIO_CAPS_DEVICE_ENTRY_NONE)); + + if (!pHeadState->audio.supported) { + return; + } + + if (!pHeadState->audio.enabled) { + + if (enable) { + /* Make sure to remove corresponding audio device */ + RmSetELDAudioCaps(pDispEvo, + nvDpyIdToNvU32(pConnectorEvo->displayId), + deviceEntry, + 0 /* maxFreqSupported */, + NULL /* pEld */, + NV_ELD_POWER_ON_RESET); + } else { + /* Do nothing. The audio device is already in the disabled state. */ + } + + return; + } + + /* Invalidate ELD buffer before disabling audio */ + if (!enable) { + RmSetELDAudioCaps(pDispEvo, + pHeadState->activeRmId, + deviceEntry, + 0 /* maxFreqSupported */, + NULL /* pEld */, + NV_ELD_PRE_MODESET); + } + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + SetDpAudioEnable(pDispEvo, head, enable); + } + + if (pHeadState->audio.isAudioOverHdmi) { + EnableHdmiAudio(pDispEvo, head, enable); + } + + /* Populate ELD buffer after enabling audio */ + if (enable) { + RmSetELDAudioCaps(pDispEvo, + pHeadState->activeRmId, + deviceEntry, + pHeadState->audio.maxFreqSupported, + &pHeadState->audio.eld, + NV_ELD_POST_MODESET); + } +} + +/* + * Report HDMI capabilities to RM before modeset. + */ +void nvDpyUpdateHdmiPreModesetEvo(NVDpyEvoPtr pDpyEvo) +{ + if (!nvDpyIsHdmiEvo(pDpyEvo)) { + return; + } + + HdmiSendEnable(pDpyEvo, TRUE); +} + +/* + * Parse HDMI 2.1 VRR capabilities from the EDID and GPU. + */ +void nvDpyUpdateHdmiVRRCaps(NVDpyEvoPtr pDpyEvo) +{ +} + +void nvRemoveUnusedHdmiDpAudioDevice(const NVDispEvoRec *pDispEvo) +{ + const NVConnectorEvoRec *pConnectorEvo; + const NvU32 activeSorMask = nvGetActiveSorMask(pDispEvo); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NvU32 deviceEntry; + + // Only connectors with assigned SORs can have audio. + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR || + pConnectorEvo->or.mask == 0x0) { + continue; + } + + // Check whether an active pConnectorEvo shares an SOR with this one. + // + // This is a workaround for the fact that + // NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS takes a displayId rather than + // an SOR index. See bug 1953489. + if (nvIsConnectorActiveEvo(pConnectorEvo) && + (pConnectorEvo->or.mask & activeSorMask) != 0x0) { + continue; + } + + for (deviceEntry = 0; + deviceEntry < MAX_AUDIO_DEVICE_ENTRIES; + deviceEntry++) { + + if (IsAudioDeviceEntryActive(pConnectorEvo, deviceEntry)) { + continue; + } + + RmSetELDAudioCaps(pDispEvo, + nvDpyIdToNvU32(pConnectorEvo->displayId), + deviceEntry, + 0 /* maxFreqSupported */, + NULL /* pEld */, + NV_ELD_POWER_ON_RESET); + } + } +} + +/* + * Find the name of the given audio format, as described in the + * CEA-861 specification's description of byte 1 in the Audio + * Descriptor Block. hasSampleSize and hasMaxBitRate (i.e., how to + * interpret byte 3 of the Audio Descriptor Block) are a function of + * audio format, so set them as a side effect of interpreting the + * audio format. + * + * Note the return value is a const char * and should not be freed. + */ +static const char *GetCea861AudioFormatInfo(NvU8 format, + NvBool *hasSampleSize, + NvBool *hasMaxBitRate) +{ + static const struct { + NvU8 format; + NvBool hasSampleSize : 1; + NvBool hasMaxBitRate : 1; + const char *name; + } audioFormatTable[] = { + { NVT_CEA861_AUDIO_FORMAT_LINEAR_PCM, TRUE, FALSE, "PCM" }, + { NVT_CEA861_AUDIO_FORMAT_AC3, FALSE, TRUE, "AC-3" }, + { NVT_CEA861_AUDIO_FORMAT_MPEG1, FALSE, TRUE, "MPEG-1" }, + { NVT_CEA861_AUDIO_FORMAT_MP3, FALSE, TRUE, "MP3" }, + { NVT_CEA861_AUDIO_FORMAT_MPEG2, FALSE, TRUE, "MPEG-2" }, + { NVT_CEA861_AUDIO_FORMAT_AAC, FALSE, TRUE, "AAC" }, + { NVT_CEA861_AUDIO_FORMAT_DTS, FALSE, TRUE, "DTS" }, + { NVT_CEA861_AUDIO_FORMAT_ATRAC, FALSE, TRUE, "ATRAC" }, + { NVT_CEA861_AUDIO_FORMAT_ONE_BIT, FALSE, FALSE, "DSD" }, + { NVT_CEA861_AUDIO_FORMAT_DDP, FALSE, FALSE, "E-AC-3" }, + { NVT_CEA861_AUDIO_FORMAT_DTS_HD, FALSE, FALSE, "DTS-HD" }, + { NVT_CEA861_AUDIO_FORMAT_MAT, FALSE, FALSE, "MLP" }, + { NVT_CEA861_AUDIO_FORMAT_DST, FALSE, FALSE, "DSP" }, + { NVT_CEA861_AUDIO_FORMAT_WMA_PRO, FALSE, FALSE, "WMA Pro" }, + }; + + int i; + + *hasSampleSize = FALSE; + *hasMaxBitRate = FALSE; + + for (i = 0; i < ARRAY_LEN(audioFormatTable); i++) { + if (format != audioFormatTable[i].format) { + continue; + } + + *hasSampleSize = audioFormatTable[i].hasSampleSize; + *hasMaxBitRate = audioFormatTable[i].hasMaxBitRate; + + return audioFormatTable[i].name; + } + + return ""; +} + + +/* + * Build a string description of the list of sample Rates, as + * described in the CEA-861 specification's description of byte 2 in + * the Audio Descriptor Block. + * + * Note the return value is a static char * and will be overwritten in + * subsequent calls to this function. + */ +static const char *GetCea861AudioSampleRateString(NvU8 sampleRates) +{ + static const struct { + NvU8 rate; + const char *name; + } sampleRateTable[] = { + { NVT_CEA861_AUDIO_SAMPLE_RATE_32KHZ, "32KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_44KHZ, "44KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_48KHZ, "48KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_88KHZ, "88KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_96KHZ, "96KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_176KHZ,"176KHz" }, + { NVT_CEA861_AUDIO_SAMPLE_RATE_192KHZ,"192KHz" }, + }; + + static char sampleRateString[64]; + + NvBool first = TRUE; + int i; + char *s; + int ret, bytesLeft = sizeof(sampleRateString); + + sampleRateString[0] = '\0'; + s = sampleRateString; + + for (i = 0; i < ARRAY_LEN(sampleRateTable); i++) { + if (sampleRates & sampleRateTable[i].rate) { + if (first) { + first = FALSE; + } else { + ret = nvkms_snprintf(s, bytesLeft, ", "); + s += ret; + bytesLeft -= ret; + } + ret = nvkms_snprintf(s, bytesLeft, "%s", sampleRateTable[i].name); + s += ret; + bytesLeft -= ret; + } + } + + nvAssert(bytesLeft >= 0); + + return sampleRateString; +} + + +/* + * Build a string description of the list of sample sizes, as + * described in the CEA-861 specification's description of byte 3 in + * the Audio Descriptor Block. + * + * Note the return value is a static char * and will be overwritten in + * subsequent calls to this function. + */ +static const char *GetCea861AudioSampleSizeString(NvU8 sampleSizes) +{ + static const struct { + NvU8 bit; + const char *name; + } sampleSizeTable[] = { + { NVT_CEA861_AUDIO_SAMPLE_SIZE_16BIT, "16-bits" }, + { NVT_CEA861_AUDIO_SAMPLE_SIZE_20BIT, "20-bits" }, + { NVT_CEA861_AUDIO_SAMPLE_SIZE_24BIT, "24-bits" }, + }; + + static char sampleSizeString[64]; + + NvBool first = TRUE; + int i; + char *s; + int ret, bytesLeft = sizeof(sampleSizeString); + + sampleSizeString[0] = '\0'; + s = sampleSizeString; + + for (i = 0; i < ARRAY_LEN(sampleSizeTable); i++) { + if (sampleSizes & sampleSizeTable[i].bit) { + if (first) { + first = FALSE; + } else { + ret = nvkms_snprintf(s, bytesLeft, ", "); + s += ret; + bytesLeft -= ret; + } + ret = nvkms_snprintf(s, bytesLeft, "%s", sampleSizeTable[i].name); + s += ret; + bytesLeft -= ret; + } + } + + nvAssert(bytesLeft >= 0); + + return sampleSizeString; +} + + +/* + * Log the speaker allocation data block, as described in the CEA-861 + * specification. + */ +static void LogEdidCea861SpeakerAllocationData(NVEvoInfoStringPtr pInfoString, + NvU8 speaker) +{ + if ((speaker & NVT_CEA861_SPEAKER_ALLOC_MASK) == 0) { + return; + } + + nvEvoLogInfoString(pInfoString, + " Speaker Allocation Data :"); + + if (speaker & NVT_CEA861_SPEAKER_ALLOC_FL_FR) { + nvEvoLogInfoString(pInfoString, + " Front Left + Front Right"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_LFE) { + nvEvoLogInfoString(pInfoString, + " Low Frequency Effect"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_FC) { + nvEvoLogInfoString(pInfoString, + " Front Center"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_RL_RR) { + nvEvoLogInfoString(pInfoString, + " Rear Left + Rear Right"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_RC) { + nvEvoLogInfoString(pInfoString, + " Rear Center"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_FLC_FRC) { + nvEvoLogInfoString(pInfoString, + " Front Left Center + Front Right Center"); + } + if (speaker & NVT_CEA861_SPEAKER_ALLOC_RLC_RRC) { + nvEvoLogInfoString(pInfoString, + " Rear Left Center + Rear Right Center"); + } +} + + +static void LogEdidCea861Info(NVEvoInfoStringPtr pInfoString, + const NVT_EDID_CEA861_INFO *pExt861) +{ + int vsdbIndex; + int audioIndex; + + nvEvoLogInfoString(pInfoString, + " CEA-861 revision : %d\n", + pExt861->revision); + + /* + * IEEE vendor registration IDs are tracked here: + * http://standards.ieee.org/develop/regauth/oui/oui.txt + */ + for (vsdbIndex = 0; vsdbIndex < pExt861->total_vsdb; vsdbIndex++) { + const NvU32 ieeeId = pExt861->vsdb[vsdbIndex].ieee_id; + nvEvoLogInfoString(pInfoString, + " IEEE Vendor Registration ID: %02x-%02x-%02x", + (ieeeId >> 16) & 0xFF, + (ieeeId >> 8) & 0xFF, + ieeeId & 0xFF); + } + + nvEvoLogInfoString(pInfoString, + " Supports YCbCr 4:4:4 : %s", + (pExt861->basic_caps & NVT_CEA861_CAP_YCbCr_444) ? + "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + " Supports YCbCr 4:2:2 : %s", + (pExt861->basic_caps & NVT_CEA861_CAP_YCbCr_422) ? + "Yes" : "No"); + + nvEvoLogInfoString(pInfoString, + " Supports Basic Audio : %s", + (pExt861->basic_caps & NVT_CEA861_CAP_BASIC_AUDIO) ? + "Yes" : "No"); + + for (audioIndex = 0; audioIndex < ARRAY_LEN(pExt861->audio); audioIndex++) { + + NvU32 byte1, byte2, byte3; + NvU8 format; + NvU8 maxChannels; + NvU8 sampleRates; + const char *formatString; + NvBool hasSampleSize; + NvBool hasMaxBitRate; + + byte1 = pExt861->audio[audioIndex].byte1; + byte2 = pExt861->audio[audioIndex].byte2; + byte3 = pExt861->audio[audioIndex].byte3; + + if (byte1 == 0) { + break; + } + + nvEvoLogInfoString(pInfoString, + " Audio Descriptor : %d", audioIndex); + + /* + * byte 1 contains the Audio Format and the maximum number + * of channels + */ + + format = ((byte1 & NVT_CEA861_AUDIO_FORMAT_MASK) >> + NVT_CEA861_AUDIO_FORMAT_SHIFT); + + formatString = GetCea861AudioFormatInfo(format, + &hasSampleSize, + &hasMaxBitRate); + + maxChannels = (byte1 & NVT_CEA861_AUDIO_MAX_CHANNEL_MASK) + 1; + + /* byte 2 contains the sample rates */ + + sampleRates = (byte2 & NVT_CEA861_AUDIO_SAMPLE_RATE_MASK); + + /* + * byte 3 varies, depending on Audio Format; interpret + * using hasSampleSize and hasMaxBitRate + */ + + nvEvoLogInfoString(pInfoString, + " Audio Format : %s", formatString); + nvEvoLogInfoString(pInfoString, + " Maximum Channels : %d", maxChannels); + nvEvoLogInfoString(pInfoString, + " Sample Rates : %s", + GetCea861AudioSampleRateString(sampleRates)); + if (hasSampleSize) { + nvEvoLogInfoString(pInfoString, + " Sample Sizes : %s", + GetCea861AudioSampleSizeString(byte3)); + } + if (hasMaxBitRate) { + nvEvoLogInfoString(pInfoString, + " Maximum Bit Rate : %d kHz", + byte3 * 8); + } + } + + LogEdidCea861SpeakerAllocationData(pInfoString, pExt861->speaker[0].byte1); +} + +void nvLogEdidCea861InfoEvo(NVDpyEvoPtr pDpyEvo, + NVEvoInfoStringPtr pInfoString) +{ + int extIndex; + + for (extIndex = 0; TRUE; extIndex++) { + const NVT_EDID_CEA861_INFO *pExt861 = + GetExt861(&pDpyEvo->parsedEdid, extIndex); + + if (pExt861 == NULL) { + break; + } + + if (pExt861->revision == NVT_CEA861_REV_NONE) { + continue; + } + + nvEvoLogInfoString(pInfoString, + "CEA-861 extension block # : %d\n", extIndex); + + LogEdidCea861Info(pInfoString, pExt861); + } +} + +/* + * HDMI 2.0 4K@60hz uncompressed RGB 4:4:4 (6G mode) is allowed if: + * + * - The GPU supports it. + * - The EDID and NVT_TIMING indicate the monitor supports it, or + * this check is overridden. + */ +NvBool nvHdmi204k60HzRGB444Allowed(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + const NVParsedEdidEvoRec *pParsedEdid = &pDpyEvo->parsedEdid; + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + + const NvBool gpuSupports444 = pDevEvo->caps.supportsHDMI20; + + const NvBool overrideMonitorCheck = ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_HDMI2_CHECK) != 0); + + const NvBool monitorSupports444 = + (IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.rgb444.bpcs) && + (pParsedEdid->info.hdmiForumInfo.max_TMDS_char_rate > 0)); + + nvAssert(pParsedEdid->valid); + + return (gpuSupports444 && + (overrideMonitorCheck || monitorSupports444)); +} + +/* + * Enable or disable HDMI 2.1 VRR infoframes. The HDMI 2.1 VRR infoframe must + * be enabled before the first extended vblank after enabling VRR, or the + * display will blank. + */ +void nvHdmiSetVRR(NVDispEvoPtr pDispEvo, NvU32 head, NvBool enable) +{ + NVT_EXTENDED_METADATA_PACKET_INFOFRAME empInfoFrame; + NVT_EXTENDED_METADATA_PACKET_INFOFRAME_CTRL empCtrl; + NVHDMIPKT_TC transmitControl; + NVT_STATUS status; + + nvkms_memset(&empCtrl, NVT_INFOFRAME_CTRL_DONTCARE, + sizeof(empCtrl)); + + empCtrl.EnableVRR = enable; + + status = NvTiming_ConstructExtendedMetadataPacketInfoframe(&empCtrl, + &empInfoFrame); + + if (status != NVT_STATUS_SUCCESS) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_ERROR, + "Error in constructing Extended Metadata Packet InfoFrame"); + return; + } + + transmitControl = + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _ENABLE, _EN) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _OTHER, _DIS) | + DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _CHKSUM_HW, _DIS); + + // Transmit the enable packet every frame, but only transmit the + // disable packet once. + if (enable) { + transmitControl |= DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _DIS); + } else { + transmitControl |= DRF_DEF(_HDMI_PKT, _TRANSMIT_CTRL, _SINGLE, _EN); + } + + SendInfoFrame(pDispEvo, + head, + transmitControl, + (NVT_INFOFRAME_HEADER *) &empInfoFrame, + sizeof(empInfoFrame)); +} + +/* + * The HDMI library calls this function during initialization to ask the + * implementation to allocate and map a NV*71_DISP_SF_USER object. The + * appropriate class, mapping size, and subdevice ID are provided. A handle is + * generated here and passed back to the library; the same handle is provided + * in the symmetric HdmiLibRmFreeMemoryMap() function so we don't have to save + * a copy of it in nvkms's data structures. + */ +static NvBool HdmiLibRmGetMemoryMap( + NvHdmiPkt_CBHandle handle, + NvU32 dispSfUserClassId, + NvU32 dispSfUserSize, + NvU32 sd, + NvU32 *pDispSfHandle, + void **pPtr) +{ + NVDevEvoRec *pDevEvo = handle; + void *ptr = NULL; + NvU32 ret; + NvU32 dispSfHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (dispSfHandle == 0) { + return FALSE; + } + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle, + dispSfUserClassId, + NULL); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + dispSfHandle); + return FALSE; + } + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle, + 0, + dispSfUserSize, + &ptr, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + dispSfHandle); + return FALSE; + } + + *pDispSfHandle = dispSfHandle; + *pPtr = ptr; + + return TRUE; +} + +static void HdmiLibRmFreeMemoryMap( + NvHdmiPkt_CBHandle handle, + NvU32 sd, + NvU32 dispSfHandle, + void *ptr) +{ + NVDevEvoRec *pDevEvo = handle; + NvU32 ret; + + if (ptr != NULL) { + nvAssert(dispSfHandle != 0); + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle, + ptr, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(ret == NVOS_STATUS_SUCCESS); + } + } + + if (dispSfHandle) { + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + dispSfHandle); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(ret == NVOS_STATUS_SUCCESS); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + dispSfHandle); + } +} + +/* Wrapper around RmControl for 0073 (NV04_DISPLAY_COMMON) object. */ +static NvBool HdmiLibRmDispControl( + NvHdmiPkt_CBHandle handle, + NvU32 subDevice, + NvU32 cmd, + void *pParams, + NvU32 paramSize) +{ + NVDevEvoRec *pDevEvo = handle; + NvU32 ret; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + cmd, + pParams, + paramSize); + + return ret == NVOS_STATUS_SUCCESS; +} + +static void HdmiLibAcquireMutex( + NvHdmiPkt_CBHandle handle) +{ + /* The HDMI library only executes when nvkms calls it, and nvkms will only + * call it while holding the nvkms lock. So there is no concurrency to + * protect against with this mutex. */ +} + +static void HdmiLibReleaseMutex( + NvHdmiPkt_CBHandle handle) +{ +} + +static void *HdmiLibMalloc(NvHdmiPkt_CBHandle handle, NvLength len) +{ + return nvAlloc(len); +} + +static void HdmiLibFree(NvHdmiPkt_CBHandle handle, void *p) +{ + nvFree(p); +} + +static void HdmiLibPrint( + NvHdmiPkt_CBHandle handle, + const char *format, ...) +{ + NVDevEvoRec *pDevEvo = handle; + + va_list ap; + va_start(ap, format); + /* The HDMI library doesn't have log levels, but currently only logs in + * debug builds. It's pretty chatty (e.g., it prints "Initialize Success" + * when it inits), so hardcode it to INFO level for now. */ + nvVEvoLog(EVO_LOG_INFO, pDevEvo->gpuLogIndex, format, ap); + va_end(ap); +} + +static void HdmiLibAssert( + NvHdmiPkt_CBHandle handle, + NvBool expr) +{ + /* + * This interface isn't the best... I hope you have a kernel debugger if + * this fires, because the file and line number will always be this one. + */ + nvAssert(expr); +} + +static const NVHDMIPKT_CALLBACK HdmiLibCallbacks = +{ + .rmGetMemoryMap = HdmiLibRmGetMemoryMap, + .rmFreeMemoryMap = HdmiLibRmFreeMemoryMap, + .rmDispControl2 = HdmiLibRmDispControl, + .acquireMutex = HdmiLibAcquireMutex, + .releaseMutex = HdmiLibReleaseMutex, + .setTimeout = NULL, /* optional */ + .checkTimeout = NULL, /* optional */ + .malloc = HdmiLibMalloc, + .free = HdmiLibFree, + .print = HdmiLibPrint, + .assert = HdmiLibAssert, +}; + +void nvTeardownHdmiLibrary(NVDevEvoRec *pDevEvo) +{ + NvHdmiPkt_DestroyLibrary(pDevEvo->hdmiLib.handle); +} + +NvBool nvInitHdmiLibrary(NVDevEvoRec *pDevEvo) +{ + pDevEvo->hdmiLib.handle = + NvHdmiPkt_InitializeLibrary(pDevEvo->dispClass, + pDevEvo->numSubDevices, + pDevEvo, // callback handle + &HdmiLibCallbacks, + 0, // not used because we set + NULL); // NVHDMIPKT_RM_CALLS_INTERNAL=0 + + if (pDevEvo->hdmiLib.handle == NVHDMIPKT_INVALID_HANDLE) { + pDevEvo->hdmiLib.handle = NULL; + return FALSE; + } + + return TRUE; +} + +/* + * Call the HDMI library to "assess" the link. This basically does link + * training to see what the maximum lane configuration is. We do this when the + * monitor is connected after reading the EDID, so we can validate modes + * against the link capabilities. + * + * Returns true if the link was assessed to be capable of FRL, and false + * otherwise. + */ +NvBool nvHdmiFrlAssessLink(NVDpyEvoPtr pDpyEvo) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVHDMIPKT_RESULT ret; + const NvU32 displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + /* HDMI dpys not dynamic dpy so its connector should have a dpyId. */ + nvAssert(displayId != 0); + nvAssert(pDpyEvo->parsedEdid.valid); + + ret = NvHdmi_AssessLinkCapabilities(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + displayId, + &pDpyEvo->parsedEdid.info, + &pDpyEvo->hdmi.srcCaps, + &pDpyEvo->hdmi.sinkCaps); + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + return FALSE; + } + + return pDpyEvo->hdmi.sinkCaps.linkMaxFRLRate != HDMI_FRL_DATA_RATE_NONE; +} + +/* Determine if HDMI FRL is needed to drive the given timings on the given dpy. */ +static NvBool TimingsNeedFRL(const NVDpyEvoRec *pDpyEvo, + const NVHwModeTimingsEvo *pTimings) +{ + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + + /* Can't use FRL if the display hardware doesn't support it */ + if (!pDevEvo->hal->caps.supportsHDMIFRL) { + return FALSE; + } + + /* Can only use FRL for HDMI devices. */ + if (!nvDpyIsHdmiEvo(pDpyEvo)) { + return FALSE; + } + + /* Can only use FRL if the HDMI sink supports it. */ + if (!pDpyEvo->parsedEdid.valid || + !pDpyEvo->parsedEdid.info.hdmiForumInfo.max_FRL_Rate) { + return FALSE; + } + + /* + * For HDMI, maxSingleLinkPixelClockKHz is the maximum non-FRL rate. + * If the rate is higher than that, try to use FRL for the mode. + */ + return pTimings->pixelClock > pDpyEvo->maxSingleLinkPixelClockKHz; +} + +NvBool nvHdmiFrlQueryConfig( + const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + NVHwModeTimingsEvo *pHwTimings, + const struct NvKmsModeValidationParams *pValidationParams) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + HDMI_VIDEO_TRANSPORT_INFO videoTransportInfo = { }; + HDMI_QUERY_FRL_CLIENT_CONTROL clientControl = { }; + const NVT_TIMING *pNvtTiming; + NVT_TIMING nvtTiming = { }; + NVHDMIPKT_RESULT ret; + + if (!TimingsNeedFRL(pDpyEvo, pHwTimings)) { + return TRUE; + } + + /* See if we can find an NVT_TIMING for this mode from the EDID. */ + pNvtTiming = nvFindEdidNVT_TIMING(pDpyEvo, pModeTimings, pValidationParams); + + if (pNvtTiming == NULL) { + /* + * No luck finding this mode in the EDID. + * + * Construct enough of an NVT_TIMING for the hdmi library, based on the + * pHwTimings mode. + * + * The HDMI library's hdmiQueryFRLConfigC671 uses: + * - pVidTransInfo->pTiming->pclk + * - pVidTransInfo->pTiming->HTotal + * - pVidTransInfo->pTiming->HVisible + * - pVidTransInfo->pTiming->VVisible + * + * This is also used, although we don't have a CEA format so we just + * set it to 0: + * - NVT_GET_CEA_FORMAT(pVidTransInfo->pTiming->etc.status) + */ + + /* Convert from KHz to 10KHz; round up for the purposes of determining a + * minimum FRL rate. */ + nvtTiming.pclk = (pHwTimings->pixelClock + 9) / 10; + nvtTiming.HVisible = pHwTimings->rasterBlankStart.x - + pHwTimings->rasterBlankEnd.x; + nvtTiming.HTotal = pHwTimings->rasterSize.x; + nvtTiming.VVisible = pHwTimings->rasterBlankStart.y - + pHwTimings->rasterBlankEnd.y; + nvtTiming.etc.status = 0; + + pNvtTiming = &nvtTiming; + } + + videoTransportInfo.pTiming = pNvtTiming; + /* + * pTimings->pixelDepth isn't assigned yet at this point in mode + * validation, so we can't use that. + * This matches the non-DP default assigned later in + * nvConstructHwModeTimingsEvo(). + * + * TODO: we should select a higher depth by default and downgrade if not + * possible. + */ + videoTransportInfo.bpc = HDMI_BPC8; + /* TODO: support YUV/YCbCr 444 and 422 packing modes. */ + switch (pModeTimings->yuv420Mode) { + case NV_YUV420_MODE_NONE: + videoTransportInfo.packing = HDMI_PIXEL_PACKING_RGB; + break; + case NV_YUV420_MODE_SW: + /* + * Don't bother implementing this with FRL. + * HDMI FRL and HW YUV420 support were both added in nvdisplay 4.0 + * hardware, so if the hardware supports FRL it should support + * YUV420_MODE_HW. + */ + return FALSE; + case NV_YUV420_MODE_HW: + videoTransportInfo.packing = HDMI_PIXEL_PACKING_YCbCr420; + break; + } + /* TODO: implement 2head1or+FRL */ + videoTransportInfo.bDualHeadMode = FALSE; + + clientControl.option = HDMI_QUERY_FRL_HIGHEST_PIXEL_QUALITY; + + if (pValidationParams->forceDsc) { + clientControl.enableDSC = TRUE; + } + if (pValidationParams->dscOverrideBitsPerPixelX16 != 0) { + clientControl.forceBppx16 = TRUE; + clientControl.bitsPerPixelX16 = + pValidationParams->dscOverrideBitsPerPixelX16; + } + + ret = NvHdmi_QueryFRLConfig(pDevEvo->hdmiLib.handle, + &videoTransportInfo, + &clientControl, + &pDpyEvo->hdmi.srcCaps, + &pDpyEvo->hdmi.sinkCaps, + &pHwTimings->hdmiFrlConfig); + + return ret == NVHDMIPKT_SUCCESS; +} + +void nvHdmiFrlClearConfig(NVDispEvoRec *pDispEvo, NvU32 activeRmId) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + NVHDMIPKT_RESULT ret; + ret = NvHdmi_ClearFRLConfig(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, activeRmId); + if (ret != NVHDMIPKT_SUCCESS) { + nvAssert(ret == NVHDMIPKT_SUCCESS); + } +} + +void nvHdmiFrlSetConfig(NVDispEvoRec *pDispEvo, NvU32 head) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + HDMI_FRL_CONFIG *pFrlConfig = &pHeadState->timings.hdmiFrlConfig; + NVHDMIPKT_RESULT ret; + NvU32 retries = 0; + const NvU32 MAX_RETRIES = 5; + + if (pFrlConfig->frlRate == HDMI_FRL_DATA_RATE_NONE) { + return; + } + + nvAssert(pHeadState->activeRmId != 0); + + do { + ret = NvHdmi_SetFRLConfig(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + NV_FALSE /* bFakeLt */, + pFrlConfig); + } while (ret != NVHDMIPKT_SUCCESS && retries++ < MAX_RETRIES); + + if (ret != NVHDMIPKT_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "HDMI FRL link training failed."); + + /* + * Link training failed even after retrying. Since link training + * happens after we've already committed to a modeset and failing is + * not an option, try one last time with the 'bFakeLt' parameter + * set, which should enable enough of the display hardware to + * prevent hangs when we attempt to drive the OR with + * PROTOCOL_HDMI_FRL. + */ + ret = NvHdmi_SetFRLConfig(pDevEvo->hdmiLib.handle, + pDispEvo->displayOwner, + pHeadState->activeRmId, + NV_TRUE /* bFakeLt */, + pFrlConfig); + + if (ret != NVHDMIPKT_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "HDMI FRL fallback link training failed."); + } + } + + if (retries != 0) { + nvEvoLogDispDebug(pDispEvo, EVO_LOG_WARN, + "HDMI FRL link training retried %d times.", + retries); + } + + nvAssert(pHeadState->attributes.digitalSignal == + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_TMDS); + + pHeadState->attributes.digitalSignal = + NV_KMS_DPY_ATTRIBUTE_DIGITAL_SIGNAL_HDMI_FRL; +} diff --git a/src/nvidia-modeset/src/nvkms-hw-states.c b/src/nvidia-modeset/src/nvkms-hw-states.c new file mode 100644 index 000000000..184d7ce4d --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-hw-states.c @@ -0,0 +1,1125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * nvkms-hw-states.c - Defines how to set up EVO hardware for the given usage. + * Used by the EVO state machines in nv_evo_states.c. + */ + +#include "nvkms-types.h" +#include "nvkms-framelock.h" +#include "nvkms-evo-states.h" + +/* + * Listed below are the different locking topologies for scan lock + * + * ------ (raster lock) + * ====== (frame lock) + * + * Config NoLock: No locking relationship between the heads + * + * +--------+ +------------+ + * | Head A | | Heads B... | + * +--------+ +------------+ + * + * + * Config LockHeads: Supports raster lock across two or more heads. + * + * +--------+ +--------+ + * | Head A | -- Internal -+--> | Head B | + * +--------+ | +--------+ + * | + * | +--------+ + * +--> | Head C | + * | +--------+ + * . ... + * + * Config FrameLockClient: Supports frame lock clients across GPUs/systems, + * one head per GPU + * + * +--------+ +-------------+ + * | Gsync | ==============> | Head A/B... | + * +--------+ +-------------+ + * ^ | + * +-------- External ----------+ + * + * + * Config FrameLockServer: Same as above, but generates timing for the + * frame lock network + * + * +--------+ +-------------+ + * | Gsync | | Head A/B... | + * +--------+ +-------------+ + * ^ | + * +-------------- External ---+ + * + * + * Config FrameLockClientManyHeads: Support frame lock across GPUs/systems, + * two or more heads per GPU + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | =====> | Head A | == Internal =+==> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +==> | Head C | + * | +--------+ + * . ... + * + * Config FrameLockServerManyHeads: Same as above, only this head + * is driving timing for the frame lock network. + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | | Head A | == Internal =+==> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +==> | Head C | + * | +--------+ + * . ... + * + * Config LockHeadsFrameLockClient: Frame lock enabled on one head of a + * GPU where two or more heads are raster-locked. + * Config LockHeadsFrameLockClientManyHeads: Same, but two or more heads are + * enabled. + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | =====> | Head A | -- Internal -+--> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +--> | Head C | + * | +--------+ + * . ... + * + * Config LockHeadsFrameLockServer: Frame lock enabled on one head of a GPU + * where two or more heads are raster-locked, and this head is driving timing + * for the frame lock network. + * Config LockHeadsFrameLockServerManyHeads: Same, but one head is frame + * lock server and the others are frame lock clients. + * + * +-------- External ---------------------------+ + * V | + * +--------+ +--------+ +--------+ + * | Gsync | | Head A | -- Internal -+--> | Head B | + * +--------+ +--------+ | +--------+ + * | + * | +--------+ + * +--> | Head C | + * | +--------+ + * . ... + * + * Configs SliPrimary, SliSecondary, SliLastSecondary: Supports SLI. + * + * +-----------------+ + * +--- | Head A, subdev0 | + * | +-----------------+ + * External + * | +-----------------+ + * +--> | Head A, subdev1 | + * | +-----------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config LockHeadsSli{Primary,Secondary,LastSecondary}: Supports SLI with two + * or more heads rasterlocked (primary or any secondary, independently). + * + * +-----------------+ +---------------------+ + * +--- | Head A, subdev0 | -- Internal --> | Heads B..., subdev0 | + * | +-----------------+ +---------------------+ + * External + * | +-----------------+ +---------------------+ + * +--> | Head A, subdev1 | -- Internal --> | Heads B..., subdev1 | + * | +-----------------+ +---------------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * The SliSecondary states also come in a FrameLockClient variant; this means that + * they have framelock enabled in the RM (for reporting purposes; they still + * get their sync from the SLI primary). + * + * + * Config SliPrimaryFrameLockClient: Supports frame lock across GPU + * groups/systems with SLI + * + * + * +===============================+ + * I V + * +-------+ +-----------------+ + * | Gsync | <-----------+--- | Head A, subdev0 | + * +-------+ | +-----------------+ + * External + * | +-----------------+ + * +--> | Head A, subdev1 | + * | +-----------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config SliPrimaryFrameLockServer: Same as above, only this SLI head drives + * timing for the frame lock network. + * + * +-------+ +-----------------+ + * | Gsync | <-----------+--- | Head A, subdev0 | + * +-------+ | +-----------------+ + * External + * | +-----------------+ + * +--> | Head A, subdev1 | + * | +-----------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config SliPrimaryLockHeadsFrameLockClient: Supports frame lock across GPU + * groups/systems with SLI, with two or more heads on a GPU rasterlocked + * together. + * + * + * +======================+ + * I V + * +-------+ +-----------------+ +---------------------+ + * | Gsync | <--+--- | Head A, subdev0 | -- Internal --> | Heads B..., subdev0 | + * +-------+ | +-----------------+ +---------------------+ + * External + * | +-----------------+ +---------------------+ + * +--> | Head A, subdev1 | -- Internal --> | Heads B..., subdev1 | + * | +-----------------+ +---------------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Config SliPrimaryLockHeadsFrameLockServer: Same as above, only this SLI head + * drives timing for the frame lock network. + * + * +-------+ +-----------------+ +---------------------+ + * | Gsync | <--+--- | Head A, subdev0 | -- Internal --> | Heads B..., subdev0 | + * +-------+ | +-----------------+ +---------------------+ + * External + * | +-----------------+ +---------------------+ + * +--> | Head A, subdev1 | -- Internal --> | Heads B..., subdev1 | + * | +-----------------+ +---------------------+ + * | +-----------------+ + * +--> | Head A, subdev2 | + * | +-----------------+ + * . ... + * + * + * Note that for the SLI and framelock topologies we set the external fliplock + * pin. Changing the pin causes a raster reset for some reason, so we want to + * change the pin here, prior to enabling flip lock. + */ + +NvBool nvEvoLockHWStateNoLock(NVDispEvoPtr pDispEvo, NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + + nvAssert(pHeads != NULL && pHeads[0] != NV_INVALID_HEAD); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Disable scan lock on this head */ + pHC->serverLock = NV_EVO_NO_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->clientLock = NV_EVO_NO_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + pHC->clientLockoutWindow = 0; + + /* Reset the flip lock pin to internal, if not needed for SLI */ + if (!HEAD_MASK_QUERY(pEvoSubDev->flipLockPinSetForSliHeadMask, head)) { + pHC->flipLockPin = NV_EVO_LOCK_PIN_INTERNAL(0); + } + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_UNSET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + /* Disable framelock */ + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockExtRefClkMaskAssy &= ~(1 << head); + + /* Reset SLI state */ + pEvoSubDev->sliRasterLockServerMask &= ~(1 << head); + pEvoSubDev->sliRasterLockClientMask &= ~(1 << head); + + pHC->lockChainPosition = 0; + } + + pEvoSubDev->frameLockHouseSync = FALSE; + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i, serverHead = 0; + + nvAssert(pHeads != NULL && + pHeads[0] != NV_INVALID_HEAD && + pHeads[1] != NV_INVALID_HEAD); + + /* First, disable all scan locking */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Make the first head a raster lock server on the internal pin */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(head); + serverHead = head; + } else { + /* Make all the other heads raster lock clients on the internal pin */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(serverHead); + pHC->clientLockoutWindow = 2; + } + } + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) + +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Set up for the FRAME_LOCK_SERVER state */ + if (!nvEvoLockHWStateFrameLockServer(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + nvAssert(pHeads != NULL && pHeads[0] != NV_INVALID_HEAD); + + /* Additionally enable the first head as a frame lock client */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* disable all scan locking */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads != NULL && pHeads[0] != NV_INVALID_HEAD); + + /* Enable the first head as a raster lock server on the external pin */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + + /* Set up the first head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockClientManyHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Set up as a frame lock server with two heads */ + if (!nvEvoLockHWStateFrameLockServerManyHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + /* Additionally enable the first head as a frame lock client */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateFrameLockServerManyHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i, serverHead = 0; + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Disable all scan lock */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Make the first head a frame lock server on the internal pin. + * The first head is guaranteed to be framelock server or one of + * the requested framelock clients here + */ + nvAssert(nvIsFramelockableHead(pDispEvo, head)); + + pHC->serverLock = NV_EVO_FRAME_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(head); + serverHead = head; + + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + /* If two or more heads are framelocked, but at least one head + * cannot be framelocked with the others, that head will be in + * the list of pDpys, but must not be framelocked, so skip it. + */ + + if (!nvIsFramelockableHead(pDispEvo, head)) { + continue; + } + if (i == 1) { + /* Make the second head a raster lock server on the external pin */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + } + /* Make all nonzero heads a frame lock client on the internal pin */ + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(serverHead); + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + /* Set up all heads to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + } + + return TRUE; +} +NvBool nvEvoLockHWStateFrameLockServerHouseSyncManyHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateFrameLockClientManyHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeadsFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_RASTER_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i, serverHead = 0; + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Disable all scan lock */ + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Make the first head a raster lock server on the internal pin */ + if (i == 0) { + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = NV_EVO_LOCK_PIN_INTERNAL(head); + serverHead = head; + + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + if (i == 1) { + /* Make the second head a raster lock server on the external pin */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + } + + /* Make all nonzero heads raster lock clients on the internal pin */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = NV_EVO_LOCK_PIN_INTERNAL(serverHead); + pHC->clientLockoutWindow = 2; + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + /* Set up all heads to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + } + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeadsFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* Set up for the LOCK_HEADS_FRAME_LOCK_SERVER state */ + if (!nvEvoLockHWStateLockHeadsFrameLockServer(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + /* Additionally, enable the first head as a frame lock client */ + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + + pEvoSubDev->frameLockServerMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +static void SetLockChainPosition(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + NVEvoHeadControlPtr pHC) +{ + if (pDispEvo->displayOwner == pEvoSubDev->subDeviceInstance) { + /* + * When we own display (even if subDeviceInstance != 0), set + * lockChainPosition of 0, since we are actually scanning out pixels + * (this is the case for all SLI Mosaic and non-Mosaic display owners). + */ + pHC->lockChainPosition = 0; + } else { + /* + * If we don't own display, just assume the video bridge chain is + * linear + */ + pHC->lockChainPosition = pEvoSubDev->subDeviceInstance; + } +} + +NvBool nvEvoLockHWStateSliPrimary(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = pEvoSubDev->sliServerLockPin; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + + pEvoSubDev->sliRasterLockServerMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = pEvoSubDev->sliServerLockPin; + unsigned int i; + + if (pin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + /* LockHeads sets up server lock on the first head, client lock on the rest */ + nvEvoLockHWStateLockHeads(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads != NULL && + pHeads[0] != NV_INVALID_HEAD && + pHeads[1] != NV_INVALID_HEAD); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* + * The first head is configured as rasterlock server on its + * internal pin. It serves as the server for everything else on + * this GPU, as well as (indirectly though another head) everything + * in the SLI group. + */ + pEvoSubDev->sliRasterLockServerMask |= 1 << head; + } else { + if (i == 1) { + /* + * The first rasterlock client on this GPU also serves as server + * for the rest of the SLI device + */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = pin; + } + + /* All of these heads should inherit extrefclk from the server */ + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + } + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondary(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin serverPin = pEvoSubDev->sliServerLockPin; + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + NvU32 clientLockoutWindow = pEvoSubDev->forceZeroClientLockoutWindow ? 0 : 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || + serverPin == NV_EVO_LOCK_PIN_ERROR || + flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Server lock to be consumed by GPUs further down the chain */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = serverPin; + + /* Client lock to sync to GPUs further up the chain */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondaryFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + if (!nvEvoLockHWStateSliSecondary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + pEvoSubDev->frameLockClientMaskAssy |= 1 << pHeads[0]; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondary(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + NvU32 clientLockoutWindow = pEvoSubDev->forceZeroClientLockoutWindow ? 0 : 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + const int head = pHeads[0]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* Only set up client lock; no more GPUs to consume server lock */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondaryFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + if (!nvEvoLockHWStateSliLastSecondary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + nvAssert(pHeads[0] != NV_INVALID_HEAD); + nvAssert(pHeads[1] == NV_INVALID_HEAD); + + const int head = pHeads[0]; + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondaryLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin serverPin = pEvoSubDev->sliServerLockPin; + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + NvU32 clientLockoutWindow = pEvoSubDev->forceZeroClientLockoutWindow ? 0 : 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || serverPin == NV_EVO_LOCK_PIN_ERROR || + flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* + * first head (chosen arbitrarily): server lock to be consumed by + * GPUs further down the chain + */ + pHC->serverLock = NV_EVO_RASTER_LOCK; + pHC->serverLockPin = serverPin; + } + + /* + * Client lock all heads to the external SLI pin. Note that we cannot + * client lock one head and set up internal locking for the other + * because of bug 405996. + */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliSecondaryLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + + if (!nvEvoLockHWStateSliSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + pEvoSubDev->frameLockClientMaskAssy |= 1 << pHeads[i]; + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondaryLockHeads(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin clientPin = pEvoSubDev->sliClientLockPin; + NvU32 clientLockoutWindow = pEvoSubDev->forceZeroClientLockoutWindow ? 0 : 2; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (clientPin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + nvEvoLockHWStateNoLock(pDispEvo, pEvoSubDev, pHeads); + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + /* + * Client lock all heads to the external SLI pin. Note that we cannot + * client lock one head and set up internal locking for the other + * because of bug 405996. + */ + pHC->clientLock = NV_EVO_RASTER_LOCK; + pHC->clientLockPin = clientPin; + pHC->clientLockoutWindow = clientLockoutWindow; + + pEvoSubDev->sliRasterLockClientMask |= 1 << head; + + SetLockChainPosition(pDispEvo, pEvoSubDev, pHC); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliLastSecondaryLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + + if (!nvEvoLockHWStateSliLastSecondaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + pEvoSubDev->frameLockClientMaskAssy |= 1 << pHeads[i]; + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + unsigned int i; + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + + if (flPin == NV_EVO_LOCK_PIN_ERROR || + !nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateSliPrimaryFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + if (!nvEvoLockHWStateSliPrimary(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Enable first head as framelock client */ + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + } + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServer(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (flPin == NV_EVO_LOCK_PIN_ERROR || + !nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + } else { + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + } + + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockServerHouseSync(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + /* As far as EVO is concerned, House Sync means FL client */ + if (!nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + const int head = pHeads[0]; + pEvoSubDev->frameLockServerMaskAssy |= 1 << head; + pEvoSubDev->frameLockClientMaskAssy &= ~(1 << head); + pEvoSubDev->frameLockHouseSync = TRUE; + + return TRUE; +} + +NvBool nvEvoLockHWStateSliPrimaryLockHeadsFrameLockClient(NVDispEvoPtr pDispEvo, + NVEvoSubDevPtr pEvoSubDev, + const NvU32 *pHeads) +{ + NVEvoLockPin pin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FRAME_LOCK); + NVEvoLockPin flPin = nvEvoGetPinForSignal(pDispEvo, pEvoSubDev, + NV_EVO_LOCK_SIGNAL_FLIP_LOCK); + unsigned int i; + + if (pin == NV_EVO_LOCK_PIN_ERROR || flPin == NV_EVO_LOCK_PIN_ERROR) { + return FALSE; + } + + if (!nvEvoLockHWStateSliPrimaryLockHeads(pDispEvo, pEvoSubDev, pHeads)) { + return FALSE; + } + + for (i = 0; pHeads[i] != NV_INVALID_HEAD; i++) { + const int head = pHeads[i]; + NVEvoHeadControlPtr pHC = &pEvoSubDev->headControlAssy[head]; + + if (i == 0) { + /* Enable first head as framelock client */ + pHC->clientLock = NV_EVO_FRAME_LOCK; + pHC->clientLockPin = pin; + } + + pEvoSubDev->frameLockClientMaskAssy |= 1 << head; + pEvoSubDev->frameLockExtRefClkMaskAssy |= 1 << head; + + /* Set up this head to use the external flip lock pin */ + pHC->flipLockPin = flPin; + pEvoSubDev->flipLockPinSetForFrameLockHeadMask = + HEAD_MASK_SET(pEvoSubDev->flipLockPinSetForFrameLockHeadMask, head); + } + + return TRUE; +} diff --git a/src/nvidia-modeset/src/nvkms-lut.c b/src/nvidia-modeset/src/nvkms-lut.c new file mode 100644 index 000000000..3e623a66d --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-lut.c @@ -0,0 +1,391 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-lut.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvos.h" + +#include /* NV01_MEMORY_LOCAL_USER */ + +static void FreeLutSurfaceEvoInVidmem(NVLutSurfaceEvoPtr pSurfEvo) +{ + NVDevEvoPtr pDevEvo; + + if (pSurfEvo == NULL) { + return; + } + + pDevEvo = pSurfEvo->pDevEvo; + + nvRmEvoUnMapVideoMemory(pDevEvo, pSurfEvo->handle, + pSurfEvo->subDeviceAddress); + + /* Free display context dmas for the surface, if any */ + nvRmEvoFreeDispContextDMA(pDevEvo, &pSurfEvo->dispCtxDma); + + /* Free the surface */ + if (pSurfEvo->handle) { + NvU32 result; + + result = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, pSurfEvo->handle); + if (result != NVOS_STATUS_SUCCESS) { + nvAssert(!"Freeing LUT surface failed"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pSurfEvo->handle); + pSurfEvo->handle = 0; + } + + nvFree(pSurfEvo); +} + +static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo) +{ + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + NvU32 ret = NVOS_STATUS_ERROR_GENERIC; + NvU32 attr = 0, attr2 = 0; + NvU32 allocFlags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN | + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + NvU64 size = 0, alignment = 4096; + + NVLutSurfaceEvoPtr pSurfEvo; + + pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo)); + if (pSurfEvo == NULL) { + return NULL; + } + + pSurfEvo->pDevEvo = pDevEvo; + + size = (sizeof(NVEvoLutDataRec) + 63) & ~63; + + pSurfEvo->size = size; + + pSurfEvo->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (pSurfEvo->handle == 0) { + goto fail; + } + + attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr); + attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT, attr2); + + alignment = NV_MAX(alignment, NV_EVO_SURFACE_ALIGNMENT); + if (alignment != 0) { + allocFlags |= NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + } + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = size; + memAllocParams.attr = attr; + memAllocParams.attr2 = attr2; + memAllocParams.flags = allocFlags; + memAllocParams.alignment = alignment; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfEvo->handle, + NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + /* If we failed the allocation above, abort */ + if (ret != NVOS_STATUS_SUCCESS) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle); + pSurfEvo->handle = 0; + + goto fail; + } + + /* Allocate a display context dma */ + pSurfEvo->dispCtxDma = + nvRmEvoAllocateAndBindDispContextDMA(pDevEvo, + pSurfEvo->handle, + NvKmsSurfaceMemoryLayoutPitch, + pSurfEvo->size - 1); + + if (!pSurfEvo->dispCtxDma) { + goto fail; + } + + /* Map the surface for the CPU */ + if (!nvRmEvoMapVideoMemory(pSurfEvo->pDevEvo, + pSurfEvo->handle, pSurfEvo->size, + pSurfEvo->subDeviceAddress, + SUBDEVICE_MASK_ALL)) { + goto fail; + } + + return pSurfEvo; + + fail: + /* An error occurred -- free the surface */ + FreeLutSurfaceEvoInVidmem(pSurfEvo); + + return NULL; + +} + +static void FreeLutSurfaceEvoInSysmem(NVLutSurfaceEvoPtr pSurfEvo) +{ + NVDevEvoPtr pDevEvo; + + if (pSurfEvo == NULL) { + return; + } + + pDevEvo = pSurfEvo->pDevEvo; + + /* Free display context dmas for the surface, if any */ + nvRmEvoFreeDispContextDMA(pDevEvo, &pSurfEvo->dispCtxDma); + + /* Free the surface */ + if (pSurfEvo->handle) { + NvU32 result; + + if (pSurfEvo->subDeviceAddress[0] != NULL) { + /* + * SOC display devices should only have one subdevice + * (and therefore it is safe to unmap only subDeviceAddress[0]) + * for reasons described in AllocLutSurfaceEvoInSysmem + */ + nvAssert(pDevEvo->numSubDevices == 1); + + result = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfEvo->handle, + pSurfEvo->subDeviceAddress[0], + 0); + if (result != NVOS_STATUS_SUCCESS) { + nvAssert(!"Unmapping LUT surface failed"); + } + pSurfEvo->subDeviceAddress[0] = NULL; + } + + result = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, pSurfEvo->handle); + if (result != NVOS_STATUS_SUCCESS) { + nvAssert(!"Freeing LUT surface failed"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle); + } + + nvFree(pSurfEvo); +} + +static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInSysmem(NVDevEvoPtr pDevEvo) +{ + NvU32 memoryHandle = 0; + void *pBase = NULL; + NvU64 size = 0; + NVLutSurfaceEvoPtr pSurfEvo; + + pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo)); + if (pSurfEvo == NULL) { + return NULL; + } + + pSurfEvo->pDevEvo = pDevEvo; + + size = (sizeof(NVEvoLutDataRec) + 63) & ~63; + + pSurfEvo->size = size; + + memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (memoryHandle == 0) { + goto fail; + } + + /* Allocate the LUT memory from sysmem */ + if (!nvRmAllocSysmem(pDevEvo, memoryHandle, NULL, &pBase, size, + NVKMS_MEMORY_ISO)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to allocate LUT memory from sysmem"); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + + goto fail; + } + + pSurfEvo->handle = memoryHandle; + + /* Allocate and bind a display context dma */ + pSurfEvo->dispCtxDma = + nvRmEvoAllocateAndBindDispContextDMA(pDevEvo, + pSurfEvo->handle, + NvKmsSurfaceMemoryLayoutPitch, + pSurfEvo->size - 1); + if (!pSurfEvo->dispCtxDma) { + goto fail; + } + + /* + * AllocLutSurfaceEvoInSysmem() will only be called if + * pDevEvo->requiresAllAllocationsInSysmem is TRUE. NVKMS will only set this + * cap bit for SOC display devices, and these devices should only have one + * subdevice. + */ + nvAssert(pDevEvo->numSubDevices == 1); + pSurfEvo->subDeviceAddress[0] = pBase; + + return pSurfEvo; + + fail: + /* An error occurred -- free the surface */ + FreeLutSurfaceEvoInSysmem(pSurfEvo); + + return NULL; +} + +static void FreeLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo) +{ + NVDevEvoPtr pDevEvo; + + if (pSurfEvo == NULL) { + return; + } + + pDevEvo = pSurfEvo->pDevEvo; + + if (pDevEvo->requiresAllAllocationsInSysmem) { + FreeLutSurfaceEvoInSysmem(pSurfEvo); + } else { + FreeLutSurfaceEvoInVidmem(pSurfEvo); + } +} + +static NVLutSurfaceEvoPtr AllocLutSurfaceEvo(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->requiresAllAllocationsInSysmem) { + return AllocLutSurfaceEvoInSysmem(pDevEvo); + } else { + return AllocLutSurfaceEvoInVidmem(pDevEvo); + } +} + +NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, dispIndex, i; + + for (head = 0; head < pDevEvo->numHeads; head++) { + for (i = 0; i < ARRAY_LEN(pDevEvo->lut.head[head].LUT); i++) { + pDevEvo->lut.head[head].LUT[i] = AllocLutSurfaceEvo(pDevEvo); + + if (pDevEvo->lut.head[head].LUT[i] == NULL) { + nvFreeLutSurfacesEvo(pDevEvo); + return FALSE; + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + // No palette has been loaded yet, so disable the LUT. + pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate = FALSE; + pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled = FALSE; + pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled = FALSE; + } + } + + if (pDevEvo->hal->caps.needDefaultLutSurface) { + pDevEvo->lut.defaultLut = AllocLutSurfaceEvo(pDevEvo); + if (pDevEvo->lut.defaultLut == NULL) { + nvFreeLutSurfacesEvo(pDevEvo); + return FALSE; + } + + pDevEvo->hal->InitDefaultLut(pDevEvo); + } + + return TRUE; +} + +void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 head, i, dispIndex; + NVDispEvoPtr pDispEvo; + + /* Cancel any queued LUT update timers */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + nvCancelLutUpdateEvo(pDispEvo, head); + } + } + + /* wait for any outstanding LUT updates before freeing the surface */ + if (pDevEvo->core) { + nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + } + + if (pDevEvo->lut.defaultLut != NULL) { + FreeLutSurfaceEvo(pDevEvo->lut.defaultLut); + pDevEvo->lut.defaultLut = NULL; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + for (i = 0; i < ARRAY_LEN(pDevEvo->lut.head[head].LUT); i++) { + if (pDevEvo->lut.head[head].LUT[i] != NULL) { + FreeLutSurfaceEvo(pDevEvo->lut.head[head].LUT[i]); + pDevEvo->lut.head[head].LUT[i] = NULL; + } + } + } +} + +void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo, + const NVEvoLutDataRec *pLUTBuffer, + NVDispEvoPtr pDispEvo) +{ + const NvU32* data = (const NvU32*)pLUTBuffer; + size_t size = sizeof(*pLUTBuffer); + const int sd = pDispEvo->displayOwner; + NvU32 *dst; + const NvU32 *src; + int dword; + + if (pSurfEvo == NULL) { + nvAssert(pSurfEvo); + return; + } + + nvAssert(pSurfEvo->subDeviceAddress[sd]); + + /* The size to copy should not be larger than the surface. */ + nvAssert(size <= pSurfEvo->size); + + /* The source, destination, and size should be 4-byte aligned. */ + nvAssert((((NvUPtr)data) & 0x3) == 0); + nvAssert((((NvUPtr)pSurfEvo->subDeviceAddress[sd]) & 0x3) == 0); + nvAssert((size % 4) == 0); + + src = data; + dst = (NvU32*)pSurfEvo->subDeviceAddress[sd]; + + for (dword = 0; dword < (size/4); dword++) { + *(dst++) = *(src++); + } +} diff --git a/src/nvidia-modeset/src/nvkms-modepool.c b/src/nvidia-modeset/src/nvkms-modepool.c new file mode 100644 index 000000000..17e5a6e8c --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-modepool.c @@ -0,0 +1,1965 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-modepool.h" +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-hdmi.h" +#include "nvkms-utils.h" +#include "nvkms-3dvision.h" +#include "nvkms-evo.h" +#include "nvkms-ioctl.h" + +#include "nv_mode_timings_utils.h" +#include "nv_vasprintf.h" + +#include "nvkms-prealloc.h" + +#include "nvkms-api.h" + +typedef struct { + enum NvKmsModeSource source; + NvBool patchedStereoTimings; +} EvoValidateModeFlags; + +static NvBool +ValidateModeIndexEdid(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex); +static NvBool +ValidateModeIndexVesa(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex); + +static void LogModeValidationEnd(const NVDispEvoRec *pDispEvo, + NVEvoInfoStringPtr pInfoString, + const char *failureReasonFormat, ...) + __attribute__ ((format (printf, 3, 4))); + +static NvBool ConstructModeTimingsMetaData( + NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsMode *pKmsMode, + EvoValidateModeFlags *pFlags, + NVT_VIDEO_INFOFRAME_CTRL *pInfoFrameCtrl); + +static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo, + const struct NvKmsMode *pKmsMode, + const EvoValidateModeFlags *flags, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + struct NvKmsModeValidationValidSyncs *pValidSyncs, + struct NvKmsUsageBounds *pModeUsage); + +#define NV_MAX_MODE_NAME_LEN 64 +#define NV_MAX_MODE_DESCRIPTION_LEN 128 + +/* A single frequency, at its longest, will have the format: "aaa.bbb" */ +#define NV_MAX_FREQUENCY_STRING_LEN 8 + +/* A range element, at its longest, will have the format: "aaa.bbb-ccc.ddd, " */ +#define NV_MAX_RANGE_ELEMENT_STRING_LEN 18 +#define NV_MAX_RANGE_STRING_LEN \ + (NV_MAX_RANGE_ELEMENT_STRING_LEN * NVKMS_MAX_VALID_SYNC_RANGES) + + +void +nvValidateModeIndex(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeIndexRequest *pRequest, + struct NvKmsValidateModeIndexReply *pReply) +{ + const struct NvKmsModeValidationParams *pParams = &pRequest->modeValidation; + const NvU32 requestedModeIndex = pRequest->modeIndex; + NVEvoInfoStringRec infoString; + NvU32 currentModeIndex = 0; + NvBool done; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + nvInitInfoString(&infoString, nvKmsNvU64ToPointer(pRequest->pInfoString), + pRequest->infoStringSize); + + done = ValidateModeIndexEdid(pDpyEvo, pParams, pReply, &infoString, + requestedModeIndex, ¤tModeIndex); + if (done) { + goto out; + } + + done = ValidateModeIndexVesa(pDpyEvo, pParams, pReply, &infoString, + requestedModeIndex, ¤tModeIndex); + if (done) { + goto out; + } + + pReply->end = 1; + return; + +out: + if (pRequest->infoStringSize > 0) { + /* Add 1 for the final '\0' */ + nvAssert((infoString.length + 1) <= pRequest->infoStringSize); + pReply->infoStringLenWritten = infoString.length + 1; + } +} + + +void +nvValidateModeEvo(NVDpyEvoPtr pDpyEvo, + const struct NvKmsValidateModeRequest *pRequest, + struct NvKmsValidateModeReply *pReply) +{ + NVEvoInfoStringRec infoString; + struct NvKmsMode kmsMode = { + .timings = pRequest->mode.timings, + }; + EvoValidateModeFlags evoFlags; + NVT_VIDEO_INFOFRAME_CTRL dummyInfoFrameCtrl; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + if (!ConstructModeTimingsMetaData(pDpyEvo, + &pRequest->modeValidation, + &kmsMode, + &evoFlags, + &dummyInfoFrameCtrl)) { + pReply->valid = FALSE; + return; + } + + nvInitInfoString(&infoString, nvKmsNvU64ToPointer(pRequest->pInfoString), + pRequest->infoStringSize); + + pReply->valid = ValidateMode(pDpyEvo, + &kmsMode, + &evoFlags, + &pRequest->modeValidation, + &infoString, + &pReply->validSyncs, + &pReply->modeUsage); + + if (infoString.length > 0) { + /* Add 1 for the final '\0' */ + nvAssert((infoString.length + 1) <= pRequest->infoStringSize); + pReply->infoStringLenWritten = infoString.length + 1; + } +} + + +/*! + * Determine whether this mode is HDMI 3D by checking the HDMI 3D + * support map parsed from the CEA-861 EDID extension. + * + * Currently only frame packed 3D modes are supported, as we rely on + * Kepler's HW support for this mode. + */ +static NvBool GetHdmi3DValue(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + /* This should only be used in paths where we have a valid parsed EDID. */ + + nvAssert(pDpyEvo->parsedEdid.valid); + + if ((pParams->stereoMode == NVKMS_STEREO_HDMI_3D) && + (NVT_GET_TIMING_STATUS_TYPE(pTiming->etc.status) == + NVT_TYPE_EDID_861ST) && + nvDpyEvoSupportsHdmi3D(pDpyEvo)) { + + const NVT_EDID_INFO *pInfo = &pDpyEvo->parsedEdid.info; + int i; + + for (i = 0; i < pInfo->Hdmi3Dsupport.total; i++) { + HDMI3DDETAILS hdmi3DMap = pInfo->Hdmi3Dsupport.map[i]; + NvU32 vic = NVT_GET_TIMING_STATUS_SEQ(pTiming->etc.status); + if ((vic == hdmi3DMap.Vic) && + (hdmi3DMap.StereoStructureMask & + NVT_HDMI_3D_SUPPORTED_FRAMEPACK_MASK)) { + return TRUE; + } + } + } + + return FALSE; +} + +/* + * For Kepler HW HDMI 1.4 frame packed stereo, HW combines two flips + * into a single top-down double-height frame, and it needs a + * doubled refresh rate to accommodate this. + */ +static void UpdateNvModeTimingsForHdmi3D(NvModeTimings *pModeTimings, + NvBool enableHdmi3D) +{ + if (enableHdmi3D) { + pModeTimings->pixelClockHz *= 2; + pModeTimings->RRx1k *= 2; + } else { + nvAssert((pModeTimings->pixelClockHz % 2) == 0); + pModeTimings->pixelClockHz /= 2; + + nvAssert((pModeTimings->RRx1k % 2) == 0); + pModeTimings->RRx1k /= 2; + } +} + +/* + * DP 1.3 decimated YUV 4:2:0 mode is required if: + * + * - The GPU and monitor both support it. + * - Either the monitor doesn't support RGB 4:4:4 scanout of this mode, or + * the user prefers YUV 4:2:0 scanout when possible. + */ +static NvBool DpYuv420Required(const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + const NVDevEvoRec *pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + const NvBool monitorSupports444 = + IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.rgb444.bpcs); + + if (!pDevEvo->caps.supportsDP13) { + // The GPU doesn't support YUV420. + return FALSE; + } + + if (monitorSupports444) { + // The GPU and monitor both support YUV420 and RGB444; use RGB444 + // by default, but allow the user to prefer YUV420 mode in this + // decision. + return pParams->preferYUV420; + } else { + // The GPU and monitor both support YUV420, and the monitor doesn't + // support RGB444, so we have to fall back to YUV420. + return TRUE; + } +} + +/* + * Return whether this mode requires SW, HW, or no YUV 4:2:0 compression given + * this GPU, display, connector type, and user preference. + */ +static enum NvYuv420Mode GetYUV420Value( + const NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const NVT_TIMING *pTiming) +{ + if (!IS_BPC_SUPPORTED_COLORFORMAT(pTiming->etc.yuv420.bpcs) || + ((pTiming->HSyncWidth & 1) != 0) || + ((pTiming->HFrontPorch & 1) != 0) || + ((pTiming->HVisible & 1) != 0) || + ((pTiming->HTotal & 1) != 0) || + ((pTiming->VVisible & 1) != 0)) { + // If this mode doesn't support YUV420, then the GPU caps or + // user preference doesn't matter. + return NV_YUV420_MODE_NONE; + } else if (nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)) { + if (DpYuv420Required(pDpyEvo, pParams, pTiming)) { + return NV_YUV420_MODE_SW; + } else { + return NV_YUV420_MODE_NONE; + } + } else if (nvDpyIsHdmiEvo(pDpyEvo)) { + /* + * YUV 4:2:0 compression is necessary for HDMI 2.0 4K@60hz modes + * unless the GPU and display both support HDMI 2.0 4K@60hz + * uncompressed RGB 4:4:4 (6G mode). A mode validation override + * may be used to allow RGB 4:4:4 mode if the GPU supports it + * even if the display doesn't claim support in the EDID. + */ + if (!nvHdmi204k60HzRGB444Allowed(pDpyEvo, pParams, pTiming) || + pParams->preferYUV420) { + + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + // XXX assume the heads have equal capabilities + // XXX assume the gpus have equal capabilities + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[0]; + NVEvoCapabilitiesPtr pEvoCaps = &pEvoSubDev->capabilities; + NVEvoHeadCaps *pHeadCaps = &pEvoCaps->head[0]; + + if (pHeadCaps->supportsHDMIYUV420HW) { + return NV_YUV420_MODE_HW; + } else { + return NV_YUV420_MODE_SW; + } + } else { + return NV_YUV420_MODE_NONE; + } + } else { + return NV_YUV420_MODE_NONE; + } +} + + +/*! + * Scan through the EDID-specified modes, counting each one. If the + * count reaches requestedModeIndex, then validate that mode. + * + * \param[in] pDpyEvo The dpy whose EDID's modes are considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[out] pReply The NvKmsValidateModeIndexReply; if we found + * requestedModeIndex, pReply->valid will store if + * the mode was valid. + * \param[in] requestedModeIndex The index of the mode we are looking for. + * \param[in,out] pCurrentModeIndex A running total of the number of modes + * we have considered. This will be incremented + * by the number of modes considered. + * + * \return If we found the mode with index == requestedModeIndex, + * return TRUE. Otherwise, return FALSE. + */ +static NvBool +ValidateModeIndexEdid(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex) +{ + const char *description; + int i; + NvBool is3DVisionStereo = nvIs3DVisionStereoEvo(pParams->stereoMode); + + /* if no EDID, we have nothing to do here */ + + if (!pDpyEvo->parsedEdid.valid) { + return FALSE; + } + + /* Scan through all EDID modes. */ + + for (i = 0; i < pDpyEvo->parsedEdid.info.total_timings; i++) { + + NVT_TIMING timing = pDpyEvo->parsedEdid.info.timing[i]; + EvoValidateModeFlags flags; + struct NvKmsMode kmsMode = { }; + + /* Skip this mode if it was marked invalid by nvtiming. */ + + if (timing.etc.status == 0) { + continue; + } + + /* + * If *pCurrentModeIndex matches requestedModeIndex, then + * validate the mode. Otherwise, go on to the next mode. + */ + if (*pCurrentModeIndex != requestedModeIndex) { + (*pCurrentModeIndex)++; + continue; + } + + nvkms_memset(&flags, 0, sizeof(flags)); + flags.source = NvKmsModeSourceEdid; + + /* patch the mode for 3DVision */ + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + nvPatch3DVisionModeTimingsEvo(&timing, pDpyEvo, pInfoString)) { + flags.patchedStereoTimings = TRUE; + } + + if ((NVT_GET_TIMING_STATUS_TYPE(timing.etc.status) == + NVT_TYPE_EDID_861ST) && + (NVT_GET_CEA_FORMAT(timing.etc.status) > 0) && + (timing.etc.name[0] != '\0')) { + description = (const char *) timing.etc.name; + } else { + description = NULL; + } + + /* convert from the EDID's NVT_TIMING to NvModeTimings */ + + NVT_TIMINGtoNvModeTimings(&timing, &kmsMode.timings); + + /* + * Determine whether this mode is a HDMI 3D by checking the HDMI 3D + * support map parsed from the CEA-861 EDID extension. + * + * Currently only frame packed 3D modes are supported, as we rely on + * Kepler's HW support for this mode. + */ + kmsMode.timings.hdmi3D = GetHdmi3DValue(pDpyEvo, pParams, &timing); + + if (kmsMode.timings.hdmi3D) { + UpdateNvModeTimingsForHdmi3D(&kmsMode.timings, TRUE); + } + + kmsMode.timings.yuv420Mode = GetYUV420Value(pDpyEvo, pParams, &timing); + + /* validate the mode */ + + pReply->valid = ValidateMode(pDpyEvo, + &kmsMode, + &flags, + pParams, + pInfoString, + &pReply->validSyncs, + &pReply->modeUsage); + /* + * if this is a detailed timing, then flag it as such; this + * will be used later when searching for the AutoSelect mode + */ + + if (NVT_GET_TIMING_STATUS_TYPE(timing.etc.status) == + NVT_TYPE_EDID_DTD) { + + /* + * if the EDID indicates that the first detailed timing is + * preferred, then flag it is as such; this will be used + * later when searching for the AutoSelect mode + * + * Note that the sequence number counts from 1 + */ + + if ((pDpyEvo->parsedEdid.info.u.feature_ver_1_3.preferred_timing_is_native) && + NVT_GET_TIMING_STATUS_SEQ(timing.etc.status) == 1) { + + pReply->preferredMode = TRUE; + } + } + + /* + * If the NVT_TIMING was patched for 3DVision above, then the + * NvModeTimings generated from it, when passed to + * nvFindEdidNVT_TIMING() during nvValidateModeForModeset(), + * won't match the original EDID NVT_TIMING. Rebuild + * NvModeTimings based on the original (non-3DVision-patched) + * NVT_TIMING from the EDID, and return that to the client. + * When the NvModeTimings is passed to + * nvValidateModeForModeset(), the 3DVision patching will be + * performed again. + */ + if (flags.patchedStereoTimings) { + enum NvYuv420Mode yuv420Mode = kmsMode.timings.yuv420Mode; + NvBool hdmi3D = kmsMode.timings.hdmi3D; + + NVT_TIMINGtoNvModeTimings(&pDpyEvo->parsedEdid.info.timing[i], + &kmsMode.timings); + kmsMode.timings.yuv420Mode = yuv420Mode; + kmsMode.timings.hdmi3D = hdmi3D; + + if (hdmi3D) { + UpdateNvModeTimingsForHdmi3D(&kmsMode.timings, TRUE); + } + } + + pReply->mode.timings = kmsMode.timings; + pReply->source = NvKmsModeSourceEdid; + + if (description != NULL) { + nvAssert(nvkms_strlen(description) < sizeof(pReply->description)); + nvkms_strncpy(pReply->description, description, + sizeof(pReply->description)); + pReply->description[sizeof(pReply->description) - 1] = '\0'; + } + + nvBuildModeName(kmsMode.timings.hVisible, kmsMode.timings.vVisible, + pReply->mode.name, sizeof(pReply->mode.name)); + return TRUE; + } + + /* No matching mode found. */ + return FALSE; +} + + +// NOTE: does not include timings for 848x480, 1280x768, 1360x768, +// 1400x1050, 1440x900, 1680x1050, 1920x1200 + +static const NvModeTimings VesaModesTable[] = { + /* + * { RRx1k, PClkHz; + * hVisible, hSyncStart, hSyncEnd, hTotal, + * hSkew, + * vVisible, vSyncStart, vSyncEnd, vTotal, + * { widthMM, heightMM }, + * interlaced, doubleScan, + * hSyncPos, hSyncNeg, vSyncPos, vSyncNeg, hdmi3D, yuv420 }, + */ + + // VESA Standard 640x350 @ 85Hz + { 85080, 31500000, + 640, 672, 736, 832, + 0, + 350, 382, 385, 445, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 640x400 @ 85Hz + { 85080, 31500000, + 640, 672, 736, 832, + 0, + 400, 401, 404, 445, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 720x400 @ 85Hz + { 85039, 35500000, + 720, 756, 828, 936, + 0, + 400, 401, 404, 446, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // Industry Standard 640x480 @ 60Hz + { 59940, 25175000, + 640, 656, 752, 800, + 0, + 480, 490, 492, 525, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 640x480 @ 72Hz + { 72809, 31500000, + 640, 664, 704, 832, + 0, + 480, 489, 492, 520, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 640x480 @ 75Hz + { 75000, 31500000, + 640, 656, 720, 840, + 0, + 480, 481, 484, 500, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 640x480 @ 85Hz + { 85008, 36000000, + 640, 696, 752, 832, + 0, + 480, 481, 484, 509, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 800x600 @ 56Hz + { 56250, 36000000, + 800, 824, 896, 1024, + 0, + 600, 601, 603, 625, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 800x600 @ 60Hz + { 60317, 40000000, + 800, 840, 968, 1056, + 0, + 600, 601, 605, 628, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 800x600 @ 72Hz + { 72188, 50000000, + 800, 856, 976, 1040, + 0, + 600, 637, 643, 666, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 800x600 @ 75Hz + { 75000, 49500000, + 800, 816, 896, 1056, + 0, + 600, 601, 604, 625, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 800x600 @ 85Hz + { 85137, 56300000, + 800, 832, 896, 1048, + 0, + 600, 601, 604, 631, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1024x768i @ 87Hz + { 86958, 44900000, + 1024, 1032, 1208, 1264, + 0, + 768, 768, 776, 817, + { 0, 0 }, + TRUE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1024x768 @ 60Hz + { 60004, 65000000, + 1024, 1048, 1184, 1344, + 0, + 768, 771, 777, 806, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 1024x768 @ 70Hz + { 70069, 75000000, + 1024, 1048, 1184, 1328, + 0, + 768, 771, 777, 806, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, FALSE, TRUE, FALSE, FALSE }, + + // VESA Standard 1024x768 @ 75Hz + { 75029, 78750000, + 1024, 1040, 1136, 1312, + 0, + 768, 769, 772, 800, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1024x768 @ 85Hz + { 84997, 94500000, + 1024, 1072, 1168, 1376, + 0, + 768, 769, 772, 808, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1152x864 @ 75Hz + { 75000, 108000000, + 1152, 1216, 1344, 1600, + 0, + 864, 865, 868, 900, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1280x960 @ 60Hz + { 60000, 108000000, + 1280, 1376, 1488, 1800, + 0, + 960, 961, 964, 1000, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1280x960 @ 85Hz + { 85002, 148500000, + 1280, 1344, 1504, 1728, + 0, + 960, 961, 964, 1011, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1280x1024 @ 60Hz + { 60020, 108000000, + 1280, 1328, 1440, 1688, + 0, + 1024, 1025, 1028, 1066, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1280x1024 @ 75Hz + { 75025, 135000000, + 1280, 1296, 1440, 1688, + 0, + 1024, 1025, 1028, 1066, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1280x1024 @ 85Hz + { 85024, 157500000, + 1280, 1344, 1504, 1728, + 0, + 1024, 1025, 1028, 1072, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1600x1200 @ 60Hz + { 60000, 162000000, + 1600, 1664, 1856, 2160, + 0, + 1200, 1201, 1204, 1250, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1600x1200 @ 65Hz + { 65000, 175500000, + 1600, 1664, 1856, 2160, + 0, + 1200, 1201, 1204, 1250, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1600x1200 @ 70Hz + { 70000, 189000000, + 1600, 1664, 1856, 2160, + 0, + 1200, 1201, 1204, 1250, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1600x1200 @ 75Hz + { 75000, 202500000, + 1600, 1664, 1856, 2160, + 0, + 1200, 1201, 1204, 1250, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1600x1200 @ 85Hz + { 85000, 229500000, + 1600, 1664, 1856, 2160, + 0, + 1200, 1201, 1204, 1250, + { 0, 0 }, + FALSE, FALSE, + TRUE, FALSE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1792x1344 @ 60Hz + { 60014, 204800000, + 1792, 1920, 2120, 2448, + 0, + 1344, 1345, 1348, 1394, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1792x1344 @ 75Hz + { 74997, 261000000, + 1792, 1888, 2104, 2456, + 0, + 1344, 1345, 1348, 1417, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1856x1392 @ 60Hz + { 60009, 218300000, + 1856, 1952, 2176, 2528, + 0, + 1392, 1393, 1396, 1439, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1856x1392 @ 75Hz + { 75000, 288000000, + 1856, 1984, 2208, 2560, + 0, + 1392, 1393, 1396, 1500, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1920x1440 @ 60Hz + { 60000, 234000000, + 1920, 2048, 2256, 2600, + 0, + 1440, 1441, 1444, 1500, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, + + // VESA Standard 1920x1440 @ 75Hz + { 75000, 297000000, + 1920, 2064, 2288, 2640, + 0, + 1440, 1441, 1444, 1500, + { 0, 0 }, + FALSE, FALSE, + FALSE, TRUE, TRUE, FALSE, FALSE, FALSE }, +}; + + +/*! + * Scan through the VESA Standard modes, counting each one. If the + * count reaches requestedModeIndex, then validate that mode. + * + * \param[in] pDpyEvo The dpy for whom the modes are considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[out] pReply The NvKmsValidateModeIndexReply; if we found + * requestedModeIndex, pReply->valid will store if + * the mode was valid. + * \param[in] requestedModeIndex The index of the mode we are looking for. + * \param[in,out] pCurrentModeIndex A running total of the number of modes + * we have considered. This will be incremented + * by the number of modes considered. + * + * \return If we found the mode with index == requestedModeIndex, + * return TRUE. Otherwise, return FALSE. + */ +static NvBool +ValidateModeIndexVesa(NVDpyEvoPtr pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsValidateModeIndexReply *pReply, + NVEvoInfoStringPtr pInfoString, + const NvU32 requestedModeIndex, + NvU32 *pCurrentModeIndex) +{ + int i; + + for (i = 0; i < ARRAY_LEN(VesaModesTable); i++) { + struct NvKmsMode kmsMode = { }; + EvoValidateModeFlags flags; + + /* + * If *pCurrentModeIndex matches requestedModeIndex, then + * validate the mode. Otherwise, go on to the next mode. + */ + if (*pCurrentModeIndex != requestedModeIndex) { + (*pCurrentModeIndex)++; + continue; + } + + kmsMode.timings = VesaModesTable[i]; + + nvkms_memset(&flags, 0, sizeof(flags)); + flags.source = NvKmsModeSourceVesa; + + /* is this mode valid? */ + pReply->valid = ValidateMode(pDpyEvo, + &kmsMode, + &flags, + pParams, + pInfoString, + &pReply->validSyncs, + &pReply->modeUsage); + + pReply->mode.timings = kmsMode.timings; + pReply->source = NvKmsModeSourceVesa; + + nvBuildModeName(VesaModesTable[i].hVisible, + VesaModesTable[i].vVisible, + pReply->mode.name, sizeof(pReply->mode.name)); + return TRUE; + } + + /* No matching mode found. */ + return FALSE; +} + + +/*! + * Return if the given NvModeTimings match any entry in VesaModesTable[]. + */ +static NvBool IsVesaMode(const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams) +{ + int i; + + for (i = 0; i < ARRAY_LEN(VesaModesTable); i++) { + if (NvModeTimingsMatch(&VesaModesTable[i], pModeTimings, + TRUE /* ignoreSizeMM */, + ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK) != 0x0) + /* ignoreRRx1k */)) { + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Write to pInfoString with information about the current mode that + * we are validating; called from the beginning of ValidateMode(); + * LogModeValidationEnd() should be called at the end of + * ValidateMode() to report whether the mode was validated. + */ + +static void LogModeValidationBegin(NVEvoInfoStringPtr pInfoString, + const NvModeTimings *pModeTimings, + const char *modeName) +{ + nvEvoLogInfoString(pInfoString, "%d x %d @ %d Hz%s", + pModeTimings->hVisible, + pModeTimings->vVisible, + NV_U32_KHZ_TO_HZ(pModeTimings->RRx1k), + pModeTimings->hdmi3D ? " (HDMI 3D)" : ""); + + nvEvoLogModeValidationModeTimings(pInfoString, pModeTimings); +} + + +/*! + * Append to pInfoString with any mode validation failure. + */ +static void LogModeValidationEnd(const NVDispEvoRec *pDispEvo, + NVEvoInfoStringPtr pInfoString, + const char *failureReasonFormat, ...) +{ + /* expand any varargs, and print the mode validation result */ + + if (failureReasonFormat) { + char *buf; + NV_VSNPRINTF(buf, failureReasonFormat); + nvEvoLogInfoString(pInfoString, + "Mode is rejected: %s.", + buf ? buf : "Unknown failure"); + nvFree(buf); + } +} + + +/*! + * Print mode timings to the NVEvoInfoStringPtr. + */ +void nvEvoLogModeValidationModeTimings(NVEvoInfoStringPtr + pInfoString, + const NvModeTimings *pModeTimings) +{ + const char *extra; + NvU32 hdmi3DPixelClock = HzToKHz(pModeTimings->pixelClockHz); + + if (pModeTimings->hdmi3D) { + hdmi3DPixelClock /= 2; + } + + nvEvoLogInfoString(pInfoString, " Pixel Clock : " + NV_FMT_DIV_1000_POINT_2 " MHz%s", + NV_VA_DIV_1000_POINT_2(hdmi3DPixelClock), + pModeTimings->hdmi3D ? " (HDMI 3D)" : ""); + + nvEvoLogInfoString(pInfoString, " HRes, HSyncStart : %4d, %4d", + pModeTimings->hVisible, + pModeTimings->hSyncStart); + + nvEvoLogInfoString(pInfoString, " HSyncEnd, HTotal : %4d, %4d", + pModeTimings->hSyncEnd, + pModeTimings->hTotal); + + nvEvoLogInfoString(pInfoString, " VRes, VSyncStart : %4d, %4d", + pModeTimings->vVisible, + pModeTimings->vSyncStart); + + nvEvoLogInfoString(pInfoString, " VSyncEnd, VTotal : %4d, %4d", + pModeTimings->vSyncEnd, + pModeTimings->vTotal); + + nvEvoLogInfoString(pInfoString, " Sync Polarity : %s%s%s%s", + pModeTimings->hSyncPos ? "+H " : "", + pModeTimings->hSyncNeg ? "-H " : "", + pModeTimings->vSyncPos ? "+V " : "", + pModeTimings->vSyncNeg ? "-V " : ""); + + if (pModeTimings->interlaced && pModeTimings->doubleScan) { + extra = "Interlace DoubleScan"; + } else if (pModeTimings->interlaced) { + extra = "Interlace"; + } else if (pModeTimings->doubleScan) { + extra = "DoubleScan"; + } else { + extra = NULL; + } + + if (extra) { + nvEvoLogInfoString(pInfoString, " Extra : %s", extra); + } +} + + +/*! + * Adjust the given value by the given percentage, using integer math. + * + * The 'percentage' argument is multiplied by 100 by the caller. E.g., + * + * percentage=50 ==> 50% + * percentage=110 ==> 110% + * + * So, divide by 100.0: + * + * value * percentage / 100 + */ +static NvU32 Percentage(const NvU32 value, const NvU32 percentage) +{ + return axb_div_c(value, percentage, 100); +} + +/*! + * Write the given frequency to the given buffer. + * + * The frequency value is assumed to have been multiplied by 1000, + * such that 'value % 1000' gives the fractional part, and value/1000 + * gives the integer part. + * + * The buffer is assumed to be (at least) NV_MAX_FREQUENCY_STRING_LEN + * bytes long. + * + * Note that to meet the size assumptions made in the + * NV_MAX_FREQUENCY_STRING_LEN definition, the integer portion of the + * frquency value is clamped to 3 digits. + */ +static int +FrequencyToString(const NvU32 value, char *buffer) +{ + int n = nvkms_snprintf(buffer, NV_MAX_FREQUENCY_STRING_LEN, + "%d.%03d", + /* mod 1000, to limit to 3 digits */ + (value / 1000) % 1000, + value % 1000); + + buffer[NV_MAX_FREQUENCY_STRING_LEN - 1] = '\0'; + + return n; +} + +/*! + * Write the given NvKmsModeValidationFrequencyRanges to the given buffer. + */ +static void +RangesToString(const struct NvKmsModeValidationFrequencyRanges *pRanges, + char buffer[NV_MAX_RANGE_STRING_LEN]) +{ + char *s; + int i, n; + + s = buffer; + + for (i = 0; i < pRanges->numRanges; i++) { + if (pRanges->range[i].high == pRanges->range[i].low) { + s += FrequencyToString(pRanges->range[i].high, s); + } else { + char highString[NV_MAX_FREQUENCY_STRING_LEN]; + char lowString[NV_MAX_FREQUENCY_STRING_LEN]; + + FrequencyToString(pRanges->range[i].high, highString); + FrequencyToString(pRanges->range[i].low, lowString); + + n = buffer + NV_MAX_RANGE_STRING_LEN - s; + s += nvkms_snprintf(s, n, "%s-%s", lowString, highString); + } + + if (i < (pRanges->numRanges - 1)) { + n = buffer + NV_MAX_RANGE_STRING_LEN - s; + s += nvkms_snprintf(s, n, ", "); + } + } + + buffer[NV_MAX_RANGE_STRING_LEN - 1] = '\0'; +} + +static NvBool ValidateModeTimings( + NVDpyEvoPtr pDpyEvo, + const struct NvKmsMode *pKmsMode, + const EvoValidateModeFlags *flags, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + struct NvKmsModeValidationValidSyncs *pValidSyncs) +{ + int i; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 overrides = pParams->overrides; + const NvBool is3DVisionStereo = nvIs3DVisionStereoEvo(pParams->stereoMode); + const char *modeName = pKmsMode->name; + const NvModeTimings *pModeTimings = &pKmsMode->timings; + char localModeName[NV_MAX_MODE_NAME_LEN]; + + if (modeName[0] == '\0') { + nvBuildModeName(pModeTimings->hVisible, pModeTimings->vVisible, + localModeName, sizeof(localModeName)); + modeName = localModeName; + } + + /* Compute the validSyncs to use during validation. */ + + *pValidSyncs = pParams->validSyncs; + nvDpySetValidSyncsEvo(pDpyEvo, pValidSyncs); + + if (pModeTimings->interlaced) { + NVEvoSubDevPtr pEvoSubDev = &pDevEvo->gpus[pDispEvo->displayOwner]; + if (!pEvoSubDev->capabilities.misc.supportsInterlaced) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not supported on this GPU"); + return FALSE; + } + } + + if ((flags->source != NvKmsModeSourceEdid) && + (overrides & NVKMS_MODE_VALIDATION_ALLOW_NON_EDID_MODES) == 0) { + + NvBool continuousFrequency = TRUE; + + /* + * EDID 1.3 defines the "GTF Supported" flag like this: + * + * If this bit is set to 1, the display supports timings based + * on the GTF standard. + * + * We interpret this to mean that if the bit is not set, then + * the display device only supports modes listed in the EDID. + */ + if (pDpyEvo->parsedEdid.valid && + (pDpyEvo->parsedEdid.info.version == NVT_EDID_VER_1_3)) { + continuousFrequency = + pDpyEvo->parsedEdid.info.u.feature_ver_1_3.support_gtf; + } + + /* + * EDID 1.4 Release A, Revision 2; Note 5 in section 3.6.4: + * + * If bit 0 is set to 0, then the display is non-continuous + * frequency (multi-mode) and is only specified to accept the + * video timing formats that are listed in BASE EDID and + * certain EXTENSION Blocks. + */ + if (pDpyEvo->parsedEdid.valid && + (pDpyEvo->parsedEdid.info.version >= NVT_EDID_VER_1_4)) { + if (pDpyEvo->parsedEdid.info.input.isDigital) { + continuousFrequency = + pDpyEvo->parsedEdid.info.u.feature_ver_1_4_digital.continuous_frequency; + } else { + continuousFrequency = + pDpyEvo->parsedEdid.info.u.feature_ver_1_4_analog.continuous_frequency; + } + } + + if (!continuousFrequency) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Only EDID-provided modes are allowed on %s (continuous frequency modes not allowed)", + pDpyEvo->name); + return FALSE; + } + + /* + * By default, we only allow EDID modes when driving digital + * protocol. + */ + if (pDpyEvo->parsedEdid.valid && + pDpyEvo->parsedEdid.info.input.isDigital) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Only EDID-provided modes are allowed on %s", + pDpyEvo->name); + return FALSE; + } + } + + /* Throw out modes that will break downstream assumptions */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_TOTAL_SIZE_CHECK) == 0) { + + if (pModeTimings->hVisible > pModeTimings->hSyncStart) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's visible horizontal size (%d) exceeds the horizontal sync start (%d)", + pModeTimings->hVisible, + pModeTimings->hSyncStart); + return FALSE; + } + + if (pModeTimings->hSyncStart > pModeTimings->hSyncEnd) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's horizontal sync start (%d) exceeds the horizontal sync end (%d)", + pModeTimings->hSyncStart, + pModeTimings->hSyncEnd); + return FALSE; + } + + if (pModeTimings->hSyncEnd > pModeTimings->hTotal) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's horizontal sync end (%d) exceeds the horizontal total size (%d)", + pModeTimings->hSyncEnd, + pModeTimings->hTotal); + return FALSE; + } + + if (pModeTimings->vVisible > pModeTimings->vSyncStart) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's visible vertical size (%d) exceeds the vertical sync start (%d)", + pModeTimings->vVisible, + pModeTimings->vSyncStart); + return FALSE; + } + + if (pModeTimings->vSyncStart > pModeTimings->vSyncEnd) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's vertical sync start (%d) exceeds the vertical sync end (%d)", + pModeTimings->vSyncStart, + pModeTimings->vSyncEnd); + return FALSE; + } + + if (pModeTimings->vSyncEnd > pModeTimings->vTotal) { + LogModeValidationEnd(pDispEvo, pInfoString, + "This mode's vertical sync end (%d) exceeds the vertical total size (%d)", + pModeTimings->vSyncEnd, + pModeTimings->vTotal); + return FALSE; + } + } + + /* reject modes with too high pclk */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_PCLK_CHECK) == 0) { + + NvU32 maxPixelClockKHz = pDpyEvo->maxPixelClockKHz; + NvU32 realPixelClock = HzToKHz(pModeTimings->pixelClockHz); + if (pModeTimings->yuv420Mode == NV_YUV420_MODE_SW) { + realPixelClock /= 2; + } + + if (realPixelClock > maxPixelClockKHz) { + NvU32 hdmi3DPixelClock = realPixelClock; + + if (pModeTimings->hdmi3D) { + hdmi3DPixelClock /= 2; + } + + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + (realPixelClock - maxPixelClockKHz < 5000)) { + + nvAssert(!pModeTimings->hdmi3D); + + nvEvoLogInfoString(pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) is slightly higher than Display Device maximum (" NV_FMT_DIV_1000_POINT_1 " MHz), but is within tolerance for 3D Vision Stereo.", + NV_VA_DIV_1000_POINT_1(realPixelClock), + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s) too high for Display Device (Max: " NV_FMT_DIV_1000_POINT_1 " MHz)", + NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), + pModeTimings->hdmi3D ? + ", doubled for HDMI 3D" : "", + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + return FALSE; + } + } + } + + /* check against the EDID's max pclk */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_EDID_MAX_PCLK_CHECK) == 0) { + + NvU32 realPixelClock = HzToKHz(pModeTimings->pixelClockHz); + if (pModeTimings->yuv420Mode == NV_YUV420_MODE_SW) { + realPixelClock /= 2; + } + + if (pDpyEvo->parsedEdid.valid && + (pDpyEvo->parsedEdid.limits.max_pclk_10khz != 0) && + (realPixelClock > + (pDpyEvo->parsedEdid.limits.max_pclk_10khz * 10))) { + + NvU32 hdmi3DPixelClock = realPixelClock; + NvU32 maxPixelClockKHz = pDpyEvo->parsedEdid.limits.max_pclk_10khz * 10; + + if (pModeTimings->hdmi3D) { + hdmi3DPixelClock /= 2; + } + + /* + * If this mode came from the EDID, then something is odd + * (see bug 336963); print a warning, but continue + */ + + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + (realPixelClock - maxPixelClockKHz < 5000)) { + + nvAssert(!pModeTimings->hdmi3D); + + nvEvoLogInfoString(pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) is slightly higher than EDID specified maximum (" NV_FMT_DIV_1000_POINT_1 " MHz), but is within tolerance for 3D Vision Stereo.", + NV_VA_DIV_1000_POINT_1(realPixelClock), + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + + } else if ((flags->source == NvKmsModeSourceEdid) && + ((overrides & + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS) == 0)) { + nvEvoLogInfoString(pInfoString, + "The EDID for %s contradicts itself: mode \"%s\" is specified in the EDID; " + "however, the EDID's reported maximum PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz) would exclude this mode's PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s); " + "ignoring EDID maximum PixelClock check for mode \"%s\".", + pDpyEvo->name, modeName, + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz), + NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), + pModeTimings->hdmi3D ? + ", doubled for HDMI 3D" : "", + modeName); + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "PixelClock (" NV_FMT_DIV_1000_POINT_1 " MHz%s) too high for EDID (EDID Max: " NV_FMT_DIV_1000_POINT_1" MHz)", + NV_VA_DIV_1000_POINT_1(hdmi3DPixelClock), + pModeTimings->hdmi3D ? + ", doubled for HDMI 3D" : "", + NV_VA_DIV_1000_POINT_1(maxPixelClockKHz)); + return FALSE; + } + } + } + + /* check the mode against the max size */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_MAX_SIZE_CHECK) == 0) { + + const NvU32 maxHeight = pDevEvo->caps.maxRasterHeight; + const NvU32 maxWidth = pDevEvo->caps.maxRasterWidth; + + NvU16 realHTotal = pModeTimings->hTotal; + if (pModeTimings->yuv420Mode == NV_YUV420_MODE_SW) { + realHTotal /= 2; + } + + // With YUV420 modes, we want to use the real half-width hTotal + // for validation, but report the full-width value in the log. + if ((realHTotal > maxWidth) || + (pModeTimings->vTotal > maxHeight)) { + + LogModeValidationEnd(pDispEvo, pInfoString, + "Mode total size (%u x %u), with visible size (%u x %u), larger than maximum size (%u x %u)", + pModeTimings->hTotal, + pModeTimings->vTotal, + pModeTimings->hVisible, + pModeTimings->vVisible, + maxWidth, maxHeight); + return FALSE; + } + } + + /* check against the frequency information */ + + if ((overrides & NVKMS_MODE_VALIDATION_NO_HORIZ_SYNC_CHECK) == 0) { + if (pValidSyncs->horizSyncHz.numRanges > 0) { + NvU32 hSync = axb_div_c(pModeTimings->pixelClockHz, 1, + pModeTimings->hTotal); + + for (i = 0; i < pValidSyncs->horizSyncHz.numRanges; i++) { + NvU32 low = pValidSyncs->horizSyncHz.range[i].low; + NvU32 high = pValidSyncs->horizSyncHz.range[i].high; + if ((hSync > Percentage(low, 99)) && + (hSync < Percentage(high, 101))) { + break; + } + } + + /* + * Now see whether we ran out of sync ranges without + * finding a match + */ + + if (i == pValidSyncs->horizSyncHz.numRanges) { + + char rangeString[NV_MAX_RANGE_STRING_LEN]; + char hSyncString[NV_MAX_FREQUENCY_STRING_LEN]; + + RangesToString(&pValidSyncs->horizSyncHz, rangeString); + FrequencyToString(hSync, hSyncString); + + /* + * If this mode came from the EDID and the valid + * HorizSync ranges (which excluded this timing) also + * came from the EDID, then something is odd (see bug + * 336963); print a warning, but continue. + */ + + if ((flags->source == NvKmsModeSourceEdid) && + (pValidSyncs->horizSyncHz.source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID) && + ((overrides & + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS) == 0)) { + + nvEvoLogInfoString(pInfoString, + "The EDID for %s contradicts itself: mode \"%s\" is specified in the EDID; " + "however, the EDID's valid HorizSync range (%s kHz) would exclude this mode's HorizSync (%s kHz); " + "ignoring HorizSync check for mode \"%s\".", + pDpyEvo->name, modeName, + rangeString, hSyncString, modeName); + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "HorizSync (%s kHz) out of range (%s kHz)", hSyncString, rangeString); + return FALSE; + } + } + } + } + + if ((overrides & NVKMS_MODE_VALIDATION_NO_VERT_REFRESH_CHECK) == 0) { + + if (pValidSyncs->vertRefreshHz1k.numRanges > 0) { + + /* + * note: we expect RRx1k to be field rate for interlaced + * modes, (undoubled) frame rate for doublescan modes, and + * (doubled) frame rate for HDMI 3D modes. + */ + NvU32 vRefresh = pModeTimings->RRx1k; + + for (i = 0; i < pValidSyncs->vertRefreshHz1k.numRanges; i++) { + NvU32 low = pValidSyncs->vertRefreshHz1k.range[i].low; + NvU32 high = pValidSyncs->vertRefreshHz1k.range[i].high; + + if ((vRefresh > Percentage(low, 99)) && + (vRefresh < Percentage(high, 101))) { + break; + } + } + + /* + * Now see whether we ran out of refresh ranges without + * finding a match + */ + + if (i == pValidSyncs->vertRefreshHz1k.numRanges) { + + char rangeString[NV_MAX_RANGE_STRING_LEN]; + char vRefreshString[NV_MAX_FREQUENCY_STRING_LEN]; + + if (pModeTimings->hdmi3D) { + vRefresh /= 2; + } + + RangesToString(&pValidSyncs->vertRefreshHz1k, + rangeString); + FrequencyToString(vRefresh, vRefreshString); + + /* + * If this mode came from the EDID and the valid + * VertRefresh ranges (which excluded this timing) + * also came from the EDID, then something is odd (see + * bug 336963); print a warning, but continue. + */ + + if ((flags->source == NvKmsModeSourceEdid) && + (pValidSyncs->vertRefreshHz1k.source == + NVKMS_MODE_VALIDATION_FREQUENCY_RANGE_SOURCE_EDID) && + ((overrides & + NVKMS_MODE_VALIDATION_OBEY_EDID_CONTRADICTIONS) == 0)) { + + nvEvoLogInfoString(pInfoString, + "The EDID for %s contradicts itself: mode \"%s\" is specified in the EDID; " + "however, the EDID's valid VertRefresh range (%s Hz) would exclude this mode's VertRefresh (%s Hz%s); " + "ignoring VertRefresh check for mode \"%s\".", + pDpyEvo->name, modeName, + rangeString, vRefreshString, + pModeTimings->hdmi3D ? ", doubled for HDMI 3D" : "", + modeName); + } else { + + LogModeValidationEnd(pDispEvo, pInfoString, + "VertRefresh (%s Hz%s) out of range (%s Hz)", vRefreshString, + pModeTimings->hdmi3D ? ", doubled for HDMI 3D" : "", + rangeString); + return FALSE; + } + } + } + } + + /* + * If 3D Vision Stereo is enabled, and the pDpy requires patched + * stereo modetimings, and these modetimings are not patched, then + * reject the mode, unless the mode validation override "AllowNon3DVModes" + * has been set. + */ + + if ((overrides & NVKMS_MODE_VALIDATION_ALLOW_NON_3DVISION_MODES) == 0) { + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + !flags->patchedStereoTimings) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Mode not compatible with 3D Vision Stereo"); + return FALSE; + } + } + + /* + * If HDMI 3D is enabled and supported, reject non-HDMI 3D modes unless the + * mode validation override "AllowNonHDMI3DModes" has been set. + */ + if (((overrides & NVKMS_MODE_VALIDATION_ALLOW_NON_HDMI3D_MODES) == 0) && + (pParams->stereoMode == NVKMS_STEREO_HDMI_3D) && + nvDpyEvoSupportsHdmi3D(pDpyEvo) && + !pModeTimings->hdmi3D) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Mode not compatible with HDMI 3D"); + return FALSE; + } + + if (pModeTimings->hdmi3D && pModeTimings->interlaced) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced frame packed HDMI 3D modes are not supported."); + return FALSE; + } + + if (pModeTimings->interlaced && + nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo) && + (overrides & NVKMS_MODE_VALIDATION_ALLOW_DP_INTERLACED) == 0) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not supported over DisplayPort"); + return FALSE; + } + + if (pModeTimings->interlaced && + (overrides & NVKMS_MODE_VALIDATION_NO_INTERLACED_MODES)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not allowed"); + return FALSE; + } + + if (pModeTimings->interlaced && + pParams->stereoMode != NVKMS_STEREO_DISABLED) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Interlaced modes are not allowed with stereo"); + return FALSE; + } + + return TRUE; +} + +/* + * Log to the InfoString with information about this + * particular ViewPort. + */ + +static void LogViewPort(NVEvoInfoStringPtr pInfoString, + const NVHwModeTimingsEvo *pTimings) +{ + const NVHwModeViewPortEvo *pViewPort = &pTimings->viewPort; + const struct NvKmsRect viewPortOut = nvEvoViewPortOutClientView(pTimings); + + /* print the viewport name, size, and taps */ + + nvEvoLogInfoString(pInfoString, + "Viewport %dx%d+%d+%d", + viewPortOut.width, + viewPortOut.height, + viewPortOut.x, + viewPortOut.y); + + nvEvoLogInfoString(pInfoString, + " Horizontal Taps %d", + NVEvoScalerTapsToNum(pViewPort->hTaps)); + + nvEvoLogInfoString(pInfoString, + " Vertical Taps %d", + NVEvoScalerTapsToNum(pViewPort->hTaps)); +} + +/* + * Validate pModeTimings for use on pDpy. If the mode is valid, use + * pDev->disp.ConstructHwModeTimings() to assign pHwModeTimings and + * return TRUE. + */ +static NvBool ValidateMode(NVDpyEvoPtr pDpyEvo, + const struct NvKmsMode *pKmsMode, + const EvoValidateModeFlags *flags, + const struct NvKmsModeValidationParams *pParams, + NVEvoInfoStringPtr pInfoString, + struct NvKmsModeValidationValidSyncs *pValidSyncs, + struct NvKmsUsageBounds *pModeUsage) +{ + const char *modeName = pKmsMode->name; + const NvModeTimings *pModeTimings = &pKmsMode->timings; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + char localModeName[NV_MAX_MODE_NAME_LEN]; + + NVHwModeTimingsEvo *pTimingsEvo = + nvPreallocGet(pDevEvo, + PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS, + sizeof(*pTimingsEvo)); + + NvBool ret = FALSE; + + if (modeName[0] == '\0') { + nvBuildModeName(pModeTimings->hVisible, pModeTimings->vVisible, + localModeName, sizeof(localModeName)); + modeName = localModeName; + } + + /* Initialize the EVO hwModeTimings structure */ + + nvkms_memset(pTimingsEvo, 0, sizeof(*pTimingsEvo)); + + /* begin logging of ModeValidation for this mode */ + + LogModeValidationBegin(pInfoString, pModeTimings, modeName); + + if (!ValidateModeTimings(pDpyEvo, pKmsMode, flags, pParams, + pInfoString, pValidSyncs)) { + goto done; + } + + /* + * we made it past the rest of mode validation; now construct the + * hw modetimings to use for this mode; we do this here so that we + * can report any failures as part of the mode validation + * reporting. + * + * XXX For certain modes like doublescan, interlaced, and YUV 4:2:0 + * emulated mode, the timings stored in the pTimingsEvo constructed + * here are different than the timings in pModeTimings used for validation + * earlier in this function. + * + * In certain cases (like pclk validation for YUV 4:2:0 modes, which store + * a doubled pclk in pModeTimings and the real pclk in pTimingsEvo) we + * want to use the pTimingsEvo value for validation in this function. + * It may make sense to restructure this function so pTimingsEvo + * construction happens earlier, then the pTimingsEvo values are used + * for the remaining validation. + */ + + if (!nvConstructHwModeTimingsEvo(pDpyEvo, + pKmsMode, + NULL, /* pViewPortSizeIn */ + NULL, /* pViewPortOut */ + pTimingsEvo, + pParams, + pInfoString)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "Unable to construct hardware-specific mode " + "timings"); + goto done; + } + + if (!nvDPValidateModeEvo(pDpyEvo, pTimingsEvo, pParams)) { + LogModeValidationEnd(pDispEvo, + pInfoString, "DP Bandwidth check failed"); + goto done; + } + + /* + * Check ViewPortIn dimensions and ensure valid h/vTaps can be assigned. + */ + if (!nvValidateHwModeTimingsViewPort(pDevEvo, + /* XXX assume the gpus have equal capabilities */ + &pDevEvo->gpus[0].capabilities.head[0].scalerCaps, + pTimingsEvo, pInfoString)) { + goto done; + } + + + /* Run the raster timings through IMP checking. */ + + if (!nvConstructHwModeTimingsImpCheckEvo(pDpyEvo->pConnectorEvo, + pTimingsEvo, pParams, pInfoString, + 0 /* head */)) { + LogModeValidationEnd(pDispEvo, pInfoString, + "GPU extended capability check failed"); + goto done; + } + + /* Log modevalidation information about the viewport. */ + + LogViewPort(pInfoString, pTimingsEvo); + + /* Copy out the usage bounds that passed validation */ + + nvkms_memcpy(pModeUsage, &pTimingsEvo->viewPort.possibleUsage, sizeof(*pModeUsage)); + + /* Whew, if we got this far, the mode is valid. */ + + LogModeValidationEnd(pDispEvo, pInfoString, NULL); + + ret = TRUE; + +done: + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS); + + return ret; +} + + +/*! + * Return whether the given NVT_TIMING and NvModeTimings match. + */ +static NvBool NVT_TIMINGmatchesNvModeTimings +( + const NVT_TIMING *pTiming, + const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams +) +{ + NvModeTimings tmpModeTimings; + + NVT_TIMINGtoNvModeTimings(pTiming, &tmpModeTimings); + + return NvModeTimingsMatch(&tmpModeTimings, pModeTimings, + TRUE /* ignoreSizeMM */, + ((pParams->overrides & + NVKMS_MODE_VALIDATION_NO_RRX1K_CHECK) != 0x0) + /* ignoreRRx1k */); +} + + +/*! + * Find the NVT_TIMING from the dpy's EDID that matches the pModeTimings. + */ +const NVT_TIMING *nvFindEdidNVT_TIMING +( + const NVDpyEvoRec *pDpyEvo, + const NvModeTimings *pModeTimings, + const struct NvKmsModeValidationParams *pParams +) +{ + NvModeTimings tmpModeTimings; + int i; + + if (!pDpyEvo->parsedEdid.valid) { + return NULL; + } + + tmpModeTimings = *pModeTimings; + + /* + * Revert any modeTimings modifications that were done for hdmi3D + * in ValidateModeIndexEdid(), so that the modeTimings can be + * compared with the NVT_TIMINGs in the parsed EDID. + */ + if (tmpModeTimings.hdmi3D) { + UpdateNvModeTimingsForHdmi3D(&tmpModeTimings, FALSE); + } + + /* + * The NVT_TIMINGs we compare against below won't have hdmi3D or + * yuv420 set; clear those flags in tmpModeTimings so that we can + * do a more meaningful comparison. + */ + tmpModeTimings.hdmi3D = FALSE; + tmpModeTimings.yuv420Mode = NV_YUV420_MODE_NONE; + + for (i = 0; i < pDpyEvo->parsedEdid.info.total_timings; i++) { + const NVT_TIMING *pTiming = &pDpyEvo->parsedEdid.info.timing[i]; + if (NVT_TIMINGmatchesNvModeTimings(pTiming, &tmpModeTimings, pParams) && + /* + * Only consider the mode a match if the yuv420 + * configuration of pTiming would match pModeTimings. + */ + (pModeTimings->yuv420Mode == + GetYUV420Value(pDpyEvo, pParams, pTiming))) { + return pTiming; + } + } + + return NULL; +} + +/*! + * Construct mode-timing's meta data required for mode validation + * logic. This meta data involves EvoValidateModeFlags, patched stereo + * vision timings, etc. + * + * \param[in] pDpyEvo The dpy for whom the mode is considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[in/out] pKmsMode The NVKMS mode to be considered. + * \param[out] pFlags The EvoValidateModeFlags + * \param[out] pInfoFrameCtrl InfoFrame control + * + * \return Return TRUE on success with patched mode timings, + * EvoValidateModeFlags and infoFrame controls etc.; otherwise + * returns FALSE. + */ +static NvBool ConstructModeTimingsMetaData( + NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + struct NvKmsMode *pKmsMode, + EvoValidateModeFlags *pFlags, + NVT_VIDEO_INFOFRAME_CTRL *pInfoFrameCtrl) +{ + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + EvoValidateModeFlags flags = { 0 }; + NVT_VIDEO_INFOFRAME_CTRL infoFrameCtrl; + NvModeTimings modeTimings = pKmsMode->timings; + const NVT_TIMING *pTiming; + + nvkms_memset(&infoFrameCtrl, NVT_INFOFRAME_CTRL_DONTCARE, + sizeof(infoFrameCtrl)); + + flags.source = NvKmsModeSourceUnknown; + + /* Is this an EDID mode? */ + pTiming = nvFindEdidNVT_TIMING(pDpyEvo, &modeTimings, pParams); + + if (pTiming != NULL) { + NVT_TIMING timing = *pTiming; + const NvBool is3DVisionStereo = + nvIs3DVisionStereoEvo(pParams->stereoMode); + + flags.source = NvKmsModeSourceEdid; + + /* Patch the mode for 3DVision. */ + if (is3DVisionStereo && + pDpyEvo->stereo3DVision.requiresModetimingPatching && + nvPatch3DVisionModeTimingsEvo(&timing, pDpyEvo, + &dummyInfoString)) { + flags.patchedStereoTimings = TRUE; + + /* + * Replace the client's modeTimings with the version + * patched for 3DVision stereo. + */ + NVT_TIMINGtoNvModeTimings(&timing, &modeTimings); + + /* Restore the yuv420 and hdmi3D flags from the client's mode. */ + modeTimings.yuv420Mode = pKmsMode->timings.yuv420Mode; + modeTimings.hdmi3D = pKmsMode->timings.hdmi3D; + + /* Re-apply adjustments for hdmi3D. */ + if (modeTimings.hdmi3D) { + UpdateNvModeTimingsForHdmi3D(&modeTimings, TRUE); + } + + } + + /* Validate yuv420. */ + if (modeTimings.yuv420Mode != + GetYUV420Value(pDpyEvo, pParams, &timing)) { + return FALSE; + } + + /* Validate hdmi3D. */ + if (modeTimings.hdmi3D != GetHdmi3DValue(pDpyEvo, pParams, &timing)) { + return FALSE; + } + + if (pParams->stereoMode == NVKMS_STEREO_HDMI_3D) { + if (!nvDpyEvoSupportsHdmi3D(pDpyEvo)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "HDMI 3D mode is selected, but " + "HDMI 3D is not supported by %s; HDMI 3D may not function " + "properly. This might happen if no EDID is available for " + "%s, if the display is not connected over HDMI, or if the " + "display does not support HDMI 3D.", pDpyEvo->name, + pDpyEvo->name); + } else if (!modeTimings.hdmi3D) { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "HDMI 3D mode is selected, but the " + "currently selected mode is incompatible with HDMI 3D. " + "HDMI 3D will be disabled."); + } + } + + /* + * Compute the infoFrame control; this will be assigned to + * pTimingsEvo after ValidateMode has written to it. + */ + if (nvDpyIsHdmiEvo(pDpyEvo)) { + NvTiming_ConstructVideoInfoframeCtrl(&timing, &infoFrameCtrl); + } + + goto done; + } + + /* Otherwise, is this a VESA mode? */ + + if (IsVesaMode(&modeTimings, pParams)) { + flags.source = NvKmsModeSourceVesa; + goto done; + } + + /* + * Otherwise, this must be a user-specified mode; no metadata changes + * are needed. + */ + +done: + *pFlags = flags; + *pInfoFrameCtrl = infoFrameCtrl; + pKmsMode->timings = modeTimings; + + return TRUE; +} + +/*! + * Validate the NvKmsMode. + * + * \param[in] pDpyEvo The dpy for whom the mode is considered. + * \param[in] pParams The NvKmsModeValidationParams. + * \param[in] pKmsMode The mode to be considered. + * \param[out] pTimingsEvo The EVO mode timings to be programmed in hardware. + * + * \return If the mode is valid, return TRUE and populate pTimingsEvo. + * If the mode is not valid, return FALSE. + */ +NvBool nvValidateModeForModeset(NVDpyEvoRec *pDpyEvo, + const struct NvKmsModeValidationParams *pParams, + const struct NvKmsMode *pKmsMode, + const struct NvKmsSize *pViewPortSizeIn, + const struct NvKmsRect *pViewPortOut, + NVHwModeTimingsEvo *pTimingsEvo) +{ + EvoValidateModeFlags flags; + struct NvKmsMode kmsMode = *pKmsMode; + NVT_VIDEO_INFOFRAME_CTRL infoFrameCtrl; + struct NvKmsModeValidationValidSyncs dummyValidSyncs; + + nvkms_memset(pTimingsEvo, 0, sizeof(*pTimingsEvo)); + + if (!ConstructModeTimingsMetaData(pDpyEvo, + pParams, + &kmsMode, + &flags, + &infoFrameCtrl)) { + return FALSE; + } + + if (!ValidateModeTimings(pDpyEvo, + pKmsMode, + &flags, + pParams, + &dummyInfoString, + &dummyValidSyncs)) { + return FALSE; + } + + if (!nvConstructHwModeTimingsEvo(pDpyEvo, + &kmsMode, + pViewPortSizeIn, + pViewPortOut, + pTimingsEvo, + pParams, + &dummyInfoString)) { + return FALSE; + } + + pTimingsEvo->infoFrameCtrl = infoFrameCtrl; + + return TRUE; +} diff --git a/src/nvidia-modeset/src/nvkms-modeset.c b/src/nvidia-modeset/src/nvkms-modeset.c new file mode 100644 index 000000000..2c956537b --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-modeset.c @@ -0,0 +1,2703 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * The EVO modeset sequence is structured to minimize changes to the + * hardware from one modeset to the next, and to minimize the number + * of UPDATE methods that are programmed. + * + * Software state is tracked in three different structures: + * + * (1) NVDispEvo::headState - This is the NVKMS record of what has + * been programmed in the hardware, for all heads on the disp. + * + * (2) NvKmsSetModeRequest - This is the NVKMS client's description of + * what changes are requested. Note that clients can just request to + * change specific heads on specific disps. Other heads/disps should + * retain their current configuration across the modeset. + * + * (3) NVProposedModeSetHwState - This describes the hardware state + * that is desired at the end of the modeset. It is assigned by + * considering the current state (NVDispEvo::headState) and applying + * any client-requested changes (NvKmsSetModeRequest). + * + * The intended flow is: + * + * - Assign NVProposedModeSetHwState, given NVDispEvo::headState and + * NvKmsSetModeRequest, noting which heads are changing. + * - Check whether the proposed state is valid, and fail the modeset + * if anything about the proposed configuration is invalid. + * + * NOTE: Nothing before this point in the sequence should alter NVKMS + * software state, or program hardware. Also, to the extent + * possible, we should avoid failing the modeset after this point in + * the sequence, because this is when we start altering software and + * hardware state. + * + * - Notify RM that the modeset is starting. + * - Reset the EVO locking state machine. + * - For each disp: + * - For each head: + * - Shut down newly unused heads + * - For each head: + * - Apply the requested changes. + * - Send evo UPDATE method + * - For each head: + * - Perform post-UPDATE work + * - Update the EVO locking state machine. + * - Notify RM that the modeset is complete. + * - Populate the reply structure returned to the NVKMS client. + * + * + * TODO: + * - Would it be worthwhile to centralize SOR (re)assignment, disp-wide, + * in ApplyProposedModeSetHwStateOneDisp() between the calls to + * ApplyProposedModeSetHwStateOneHeadShutDown() and + * ApplyProposedModeSetHwStateOneHeadPreUpdate()? + */ + +#include "nvkms-evo.h" +#include "nvkms-types.h" +#include "nvkms-dpy.h" +#include "nvkms-rm.h" +#include "nvkms-hdmi.h" +#include "nvkms-flip.h" +#include "nvkms-3dvision.h" +#include "nvkms-modepool.h" +#include "nvkms-prealloc.h" +#include "nvkms-private.h" +#include "nvkms-vrr.h" +#include "nvkms-lut.h" + +#include "dp/nvdp-connector.h" + +#include "nvkms-api.h" + +#include "nvkms-modeset.h" +#include "nvkms-modeset-types.h" +#include "nvkms-modeset-workarea.h" +#include "nvkms-attributes.h" + +/*! + * Get an allowFlipLockGroup value that is not yet used by pProposed. + * + * Scan through pProposed to find all currently used + * allowFlipLockGroup values, then pick the first allowFlipLockGroup + * value that is not used. + */ +static NvU8 GetAvailableAllowFlipLockGroupValue( + const NVProposedModeSetHwState *pProposed) +{ + NvU32 dispIndex; + NvU64 allowFlipLockGroupMask = 0; + NvU8 allowFlipLockGroup; + + /* + * Find all of the used allowFlipLockGroup values. Note that + * allowFlipLockGroup is 1-based (so that 0 can represent no + * assigned allowFlipLockGroup). Shift to 0-based, to store in + * allowFlipLockGroupMask. + */ + for (dispIndex = 0; dispIndex < ARRAY_LEN(pProposed->disp); dispIndex++) { + NvU32 head; + for (head = 0; + head < ARRAY_LEN(pProposed->disp[dispIndex].head); + head++) { + + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[dispIndex].head[head]; + + if (pProposedHead->allowFlipLockGroup != 0) { + nvAssert(pProposedHead->allowFlipLockGroup <= 64); + allowFlipLockGroupMask |= + NVBIT64(pProposedHead->allowFlipLockGroup - 1); + } + } + } + + /* Find the first available allowFlipLockGroup values. */ + allowFlipLockGroupMask = ~allowFlipLockGroupMask; + if (allowFlipLockGroupMask == 0) { + /* + * For this to be zero, the pProposed would need to already + * have 64 unique allowFlipLockGroup values; 64 unique + * flipLock groups is highly unlikely. + */ + nvAssert(!"allowFlipLockGroupMask is too small"); + return 0; + } + + allowFlipLockGroup = BIT_IDX_64(LOWESTBIT(allowFlipLockGroupMask)); + + /* Shift allowFlipLockGroup back to 1-based. */ + + return allowFlipLockGroup + 1; +} + + +/*! + * Get the NVHwModeTimingsEvo for the mode requested by the client. + * + * NvKmsSetModeOneHeadRequest::mode specifies mode timings in a + * hardware-neutral format, along with mode validation parameters and + * the dpyIdList on which to set the mode. Validate the requested + * mode and compute NVHwModeTimingsEvo for it. + * + * \param[in] pDispEvo The disp of the dpyIdList and head. + * \param[in] pRequestHead The mode, mode validation parameters, dpyIdList, + * and head requested by the client. + * \param[out] pTimings The mode timings to program in the hardware. + * + * \return Return TRUE if the requested mode is valid and pTimings + * could be assigned. Otherwise, return FALSE. + */ +static NvBool +GetHwModeTimings(const NVDispEvoRec *pDispEvo, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + NVHwModeTimingsEvo *pTimings) +{ + NVDpyEvoPtr pDpyEvo; + + if (nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + return TRUE; + } + + pDpyEvo = nvGetOneArbitraryDpyEvo(pRequestHead->dpyIdList, pDispEvo); + + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvValidateModeForModeset(pDpyEvo, + &pRequestHead->modeValidationParams, + &pRequestHead->mode, + &pRequestHead->viewPortSizeIn, + pRequestHead->viewPortOutSpecified ? + &pRequestHead->viewPortOut : NULL, + pTimings); +} + +static NvBool ApplySyncptRegistration( + NVDevEvoRec *pDevEvo, + NvU32 head, + const struct NvKmsFlipCommonParams *pParams, + NVFlipEvoHwState *pFlipState) +{ + NvU32 layer; + + if (!pDevEvo->supportsSyncpts) { + return TRUE; + } + + /*! + * Modeset path should not request pre-syncpt as it will + * not progress because this will update all of the Core and + * Window method state together, and wait for the Core + * completion notifier to signal. If any of the Window + * channels is waiting for a semaphore acquire, then this + * will stall the Core notifier as well since the Core and + * Window channels are interlocked. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pParams->layer[layer].syncObjects.specified && + pParams->layer[layer].syncObjects.val.useSyncpt && + pParams->layer[layer].syncObjects.val.u.syncpts.pre.type != + NVKMS_SYNCPT_TYPE_NONE) { + nvAssert(!"Failing as pre-syncpt requested in modeset!"); + return FALSE; + } + } + + return nvHandleSyncptRegistration(pDevEvo, + head, + pParams, + pFlipState); +} + +static NvBool +GetColorSpaceAndColorRange( + const NVDispEvoPtr pDispEvo, + const struct NvKmsSetModeOneHeadRequest *pRequestHead, + NVProposedModeSetHwStateOneHead *pProposedHead) +{ + enum NvKmsDpyAttributeColorRangeValue requestedColorRange; + enum NvKmsDpyAttributeRequestedColorSpaceValue requestedColorSpace; + NVDpyEvoRec *pOneArbitraryDpyEvo = + nvGetOneArbitraryDpyEvo(pRequestHead->dpyIdList, pDispEvo); + + if (pRequestHead->colorSpaceSpecified) { + const NVDpyEvoRec *pDpyEvo; + + /* + * There could be multiple DPYs driven by this head. For each DPY, + * validate that the requested colorspace and color range is valid. + */ + FOR_ALL_EVO_DPYS(pDpyEvo, pRequestHead->dpyIdList, pDispEvo) { + if (!nvDpyValidateColorSpace(pDpyEvo, pRequestHead->colorSpace)) { + return FALSE; + } + } + + requestedColorSpace = pRequestHead->colorSpace; + } else { + requestedColorSpace = pOneArbitraryDpyEvo->requestedColorSpace; + } + + if (pRequestHead->colorRangeSpecified) { + requestedColorRange = pRequestHead->colorRange; + } else { + requestedColorRange = pOneArbitraryDpyEvo->requestedColorRange; + } + + /* + * Choose current colorSpace and colorRange based on the current mode + * timings and the requested color space and range. + */ + nvChooseCurrentColorSpaceAndRangeEvo(&pProposedHead->timings, + requestedColorSpace, + requestedColorRange, + &pProposedHead->colorSpace, + &pProposedHead->colorRange); + /* + * When colorspace is specified in modeset request, it should + * match the proposed colorspace. + */ + if (pRequestHead->colorSpaceSpecified) { + NvBool ret = FALSE; + switch (pProposedHead->colorSpace) { + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_RGB: + ret = (pRequestHead->colorSpace == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_RGB); + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr422: + ret = (pRequestHead->colorSpace == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr422); + break; + case NV_KMS_DPY_ATTRIBUTE_CURRENT_COLOR_SPACE_YCbCr444: + ret = (pRequestHead->colorSpace == + NV_KMS_DPY_ATTRIBUTE_REQUESTED_COLOR_SPACE_YCbCr444); + break; + default: + break; + } + if (!ret) { + return ret; + } + } + + /* + * When color range is specified in modeset request, it should + * match the proposed color range. + */ + if (pRequestHead->colorRangeSpecified && + (pProposedHead->colorRange != pRequestHead->colorRange)) { + return FALSE; + } + + return TRUE; +} + +/*! + * Assign the NVProposedModeSetHwState structure. + * + * Use the current hardware state, and the requested changes in + * pRequest, to determine what the desired resulting hardware + * configuration for the device should be. + * + * \param[in] pDevEvo The device whose hardware state is to be changed. + * \param[in] pOpenDev The pOpenDev of the client doing the modeset. + * \param[in] pRequest The requested changes to apply to the hardware state. + * \param[out] pReply The reply structure for the client; if we cannot + * apply some portion of pRequest, set the + * corresponding status field in pReply to a + * non-SUCCESS value. + * \param[out] pProposed The proposed resulting hardware state. + * + * \return If the requested changes could be applied to pProposed, + * return TRUE. If the requested changes could not be applied + * to pProposed, set the corresponding status field in pReply + * to a non-SUCCESS value and return FALSE. + */ +static NvBool +AssignProposedModeSetHwState(NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + NVProposedModeSetHwState *pProposed, + NvBool modesetOwnerChanged) +{ + NvU32 sd; + NVDispEvoPtr pDispEvo; + NvBool ret = TRUE; + NvU8 allowFlipLockGroup = 0; + + /* Initialize pProposed with the current hardware configuration. */ + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < pDevEvo->numHeads; head++) { + + const NVDispHeadStateEvoRec *pHeadState; + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[sd].head[head]; + + /* + * Case of invalid hardware head is handled inside + * nvInitFlipEvoHwState(). + */ + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pProposed->sd[sd].head[head].flip); + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + pHeadState = &pDispEvo->headState[head]; + + pProposedHead->timings = pHeadState->timings; + pProposedHead->dpyIdList = pHeadState->activeDpys; + pProposedHead->pConnectorEvo = pHeadState->pConnectorEvo; + pProposedHead->activeRmId = pHeadState->activeRmId; + pProposedHead->allowFlipLockGroup = pHeadState->allowFlipLockGroup; + pProposedHead->modeValidationParams = + pHeadState->modeValidationParams; + pProposedHead->colorSpace = pHeadState->attributes.colorSpace; + pProposedHead->colorRange = pHeadState->attributes.colorRange; + pProposedHead->changed = FALSE; + pProposedHead->hs10bpcHint = pHeadState->hs10bpcHint; + pProposedHead->audio = pHeadState->audio; + } + } + + /* Update pProposed with the requested changes from the client. */ + + if (pOpenDev == pDevEvo->modesetOwner || pOpenDev == pDevEvo->pNvKmsOpenDev) { + pProposed->allowHeadSurfaceInNvKms = pRequest->allowHeadSurfaceInNvKms; + } else { + pProposed->allowHeadSurfaceInNvKms = pDevEvo->allowHeadSurfaceInNvKms; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + const struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[sd]; + NvBool shutDownAllHeads = FALSE; + NvU32 head; + + if ((pRequest->requestedDispsBitMask & (1 << sd)) == 0) { + if (modesetOwnerChanged) { + shutDownAllHeads = TRUE; + } else { + continue; + } + } + + NVProposedModeSetHwStateOneDisp *pProposedDisp = + &pProposed->disp[sd]; + + pDispEvo = pDevEvo->pDispEvo[sd]; + + for (head = 0; head < pDevEvo->numHeads; head++) { + + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[head]; + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + NVDpyIdList newDpyIdList; + NvBool clearAndContinue = FALSE; + + if ((pRequestDisp->requestedHeadsBitMask & (1 << head)) == 0 || + shutDownAllHeads) { + if (modesetOwnerChanged) { + /* + * If the modeset owner is changing, implicitly shut down + * other heads not included in requestedHeadsBitMask. + */ + newDpyIdList = nvEmptyDpyIdList(); + } else { + /* + * Otherwise, just leave the head alone so it keeps its + * current configuration. + */ + continue; + } + } else { + newDpyIdList = pRequestHead->dpyIdList; + } + + /* + * If newDpyIdList is empty or do not find the valid dpy in + * newDpyIdList, then the head should be disabled. + * Clear the pProposedHead, so that no state leaks to the new + * configuration. + */ + if (nvDpyIdListIsEmpty(newDpyIdList)) { + clearAndContinue = TRUE; + } else { + const NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(newDpyIdList, pDispEvo); + if (pDpyEvo != NULL) { + pProposedHead->pConnectorEvo = pDpyEvo->pConnectorEvo; + pProposedHead->changed = TRUE; + } else { + clearAndContinue = TRUE; + } + } + + + if (clearAndContinue) { + nvkms_memset(pProposedHead, 0, sizeof(*pProposedHead)); + pProposedHead->changed = TRUE; + continue; + } + + pProposedHead->dpyIdList = newDpyIdList; + pProposedHead->activeRmId = + nvRmAllocDisplayId(pDispEvo, pProposedHead->dpyIdList); + if (pProposedHead->activeRmId == 0x0) { + /* XXX Need separate error code? */ + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY; + ret = FALSE; + continue; + } + + /* Verify that the requested dpys are valid on this head. */ + if ((pProposedHead->pConnectorEvo->validHeadMask & NVBIT(head)) == 0) { + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY; + ret = FALSE; + continue; + } + + /* + * Get the requested modetimings for this head. If that + * fails, record in the reply that getting the mode + * failed. In the case of failure, continue to the next + * head so that if additional heads fail, we can report + * more complete failure information to the client. + */ + if (!GetHwModeTimings(pDispEvo, pRequestHead, &pProposedHead->timings)) { + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + ret = FALSE; + continue; + } + + pProposedHead->allowFlipLockGroup = 0; + pProposedHead->modeValidationParams = + pRequestHead->modeValidationParams; + pProposedHead->allowGsync = pRequestHead->allowGsync; + pProposedHead->allowAdaptiveSync = pRequestHead->allowAdaptiveSync; + pProposedHead->vrrOverrideMinRefreshRate = + pRequestHead->vrrOverrideMinRefreshRate; + + if (!GetColorSpaceAndColorRange(pDispEvo, pRequestHead, pProposedHead)) { + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + ret = FALSE; + continue; + } + + /* + * modesetOwnerChanged implies that there was a modeset + * ownership change since the last modeset. If input/output lut not + * specified by the new modeset owner then keep them disabled by + * default. + */ + if (modesetOwnerChanged) { + pProposedHead->lut = pRequestHead->lut; + + if (!pRequestHead->lut.input.specified) { + pProposedHead->lut.input.specified = TRUE; + pProposedHead->lut.input.end = 0; + } + + if (!pRequestHead->lut.output.specified) { + pProposedHead->lut.output.specified = TRUE; + pProposedHead->lut.output.enabled = FALSE; + } + } else if (pRequestHead->lut.input.specified) { + pProposedHead->lut = pRequestHead->lut; + } else { + pProposedHead->lut.input.specified = FALSE; + } + + NVFlipEvoHwState *pFlip = + &pProposed->sd[sd].head[head].flip; + + /* + * Clear the flipStates of all layers: + * + * The current flipState of main layer may still contain + * old surfaces (e.g., headSurface) that are no longer + * desirable or compatible with the new modeset + * configuration. + * + * Function ApplyProposedModeSetHwStateOneHeadShutDown() clears + * pSdHeadState and disables all layers. It is not possible to + * re-apply the existing flipstates because hardware releases + * sempahores when layers get disabled; this results in a stuck + * channel if you re-apply the existing flipstate which has + * the old semaphore values. + */ + + nvClearFlipEvoHwState(pFlip); + + if (pRequest->commit) { + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + pFlip->dirty.layer[layer] = TRUE; + } + } + + if (!ApplySyncptRegistration( + pDevEvo, + head, + &pRequest->disp[sd].head[head].flip, + pFlip)) { + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + ret = FALSE; + continue; /* next head */ + } + if (!nvUpdateFlipEvoHwState(pOpenDev, + pDevEvo, + sd, + head, + &pRequestHead->flip, + pFlip, + FALSE /* allowVrr */, + &pProposedHead->timings.viewPort.possibleUsage)) { + pReply->disp[sd].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + ret = FALSE; + continue; /* next head */ + } + + /* + * If the modeset is flipping to a depth 30 surface, record this as + * a hint to headSurface, so it can also allocate its surfaces at + * depth 30. + */ + { + const NVSurfaceEvoRec *pSurfaceEvo = + pFlip->layer[NVKMS_MAIN_LAYER].pSurfaceEvo[NVKMS_LEFT]; + + pProposedHead->hs10bpcHint = + (pSurfaceEvo != NULL) && + (pSurfaceEvo->format == NvKmsSurfaceMemoryFormatA2B10G10R10 || + pSurfaceEvo->format == NvKmsSurfaceMemoryFormatX2B10G10R10); + } + + /* + * EVO3 hal simulates USE_CORE_LUT behavior. + * NVDisplay window channel does allow to change the input LUT + * on immediate flips, therefore force disable tearing + * if LUT is specified. + * + * XXX NVKMS TODO: Implement separate input programming for + * base and overlay layers and remove code block. + */ + if ((pRequestHead->lut.input.specified || + pRequestHead->lut.output.specified) && + !pDevEvo->hal->caps.supportsCoreLut) { + pFlip->layer[NVKMS_MAIN_LAYER].tearing = FALSE; + } + + /* Construct audio state */ + { + NVDpyEvoRec *pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedHead->dpyIdList, + pDispEvo); + + nvHdmiDpConstructHeadAudioState(pProposedHead->activeRmId, + pDpyEvo, &pProposedHead->audio); + } + } /* head */ + } /* pDispEvo */ + + /* Assign allowFlipLockGroup for the heads specified in the request. */ + + allowFlipLockGroup = GetAvailableAllowFlipLockGroupValue(pProposed); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NvU32 head; + + if ((pRequest->requestedDispsBitMask & NVBIT(sd)) == 0) { + continue; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequest->disp[sd].head[head]; + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[sd].head[head]; + + if ((pRequest->disp[sd].requestedHeadsBitMask & + NVBIT(head)) == 0) { + continue; + } + + if (pRequestHead->allowFlipLock) { + pProposedHead->allowFlipLockGroup = allowFlipLockGroup; + } + } + } + + return ret; +} + + +/*! + * Validate the proposed configuration on the specified disp using IMP. + * + * \param[in] pDispEvo The disp to which pProposedDisp is to be applied. + * \param[in] pProposed The requested configuration. + * \param[in] pProposedDisp The requested configuration for this disp. + * \param[out] pWorkArea The scratch space for the current modeset request. + * + * \return If pProposedDisp passes IMP, return TRUE. Otherwise, + * return FALSE. + */ +static NvBool +ValidateProposedModeSetHwStateOneDispImp(NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState + *pProposed, + NVProposedModeSetHwStateOneDisp + *pProposedDisp, + NVModeSetWorkArea *pWorkArea) +{ + NVValidateImpOneDispHeadParamsRec timingsParams[NVKMS_MAX_HEADS_PER_DISP]; + NvBool skipImpCheck = TRUE, requireBootClocks = FALSE; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head, downgradePossibleHeadsBitMask = 0; + NVEvoReallocateBandwidthMode reallocBandwidth = pDevEvo->isSOCDisplay ? + NV_EVO_REALLOCATE_BANDWIDTH_MODE_PRE : + NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE; + + nvkms_memset(&timingsParams, 0, sizeof(timingsParams)); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + + const NvBool skipImpCheckThisHead = + (pProposedHead->modeValidationParams.overrides & + NVKMS_MODE_VALIDATION_NO_EXTENDED_GPU_CAPABILITIES_CHECK) != 0; + + const NvBool requireBootClocksThisHead = + (pProposedHead->modeValidationParams.overrides & + NVKMS_MODE_VALIDATION_REQUIRE_BOOT_CLOCKS) != 0; + + /* + * Don't try to downgrade heads which are not marked as changed. + * This could lead to unchanged/not-requested heads hogging all + * the disp bandwidth and preventing otherwise possible modesets, + * but it fixes the cases where we could have downgraded unchanged/ + * not-requested heads without NVKMS clients knowing about it. + * Even if we add some mechanism through the modeset reply to notify + * clients about such a change, not all clients might be in a position + * to handle it. This seems to be a fair trade-off for Orin, as by + * default all heads are initialized with minimal usage bounds. + */ + if (pProposedHead->changed) { + downgradePossibleHeadsBitMask |= NVBIT(head); + } + + if (pProposedHead->pConnectorEvo == NULL) { + continue; + } + + timingsParams[head].pConnectorEvo = pProposedHead->pConnectorEvo; + timingsParams[head].activeRmId = pProposedHead->activeRmId; + timingsParams[head].pTimings = &pProposedHead->timings; + timingsParams[head].pUsage = + &pProposedHead->timings.viewPort.guaranteedUsage; + + skipImpCheck = skipImpCheck && skipImpCheckThisHead; + requireBootClocks = requireBootClocks || requireBootClocksThisHead; + } + + if (skipImpCheck && + reallocBandwidth == NV_EVO_REALLOCATE_BANDWIDTH_MODE_NONE) { + return TRUE; + } + + if (!nvValidateImpOneDispDowngrade(pDispEvo, timingsParams, + requireBootClocks, + reallocBandwidth, + downgradePossibleHeadsBitMask)) { + return FALSE; + } + + if (pDevEvo->isSOCDisplay) { + NvBool ret; + struct NvKmsUsageBounds *guaranteedAndProposed = + nvCalloc(1, sizeof(*guaranteedAndProposed) * + NVKMS_MAX_HEADS_PER_DISP); + if (guaranteedAndProposed == NULL) { + return FALSE; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + const struct NvKmsUsageBounds *pProposedUsage; + + if (pProposedHead->pConnectorEvo == NULL) { + continue; + } + + if (pProposedHead->changed) { + pProposedUsage = &pProposed->sd[0].head[head].flip.usage; + } else { + pProposedUsage = + &pDevEvo->gpus[0].headState[head].preallocatedUsage; + } + + guaranteedAndProposed[head] = nvUnionUsageBounds( + &pProposedHead->timings.viewPort.guaranteedUsage, + pProposedUsage); + timingsParams[head].pUsage = &guaranteedAndProposed[head]; + } + + ret = nvValidateImpOneDisp(pDispEvo, timingsParams, + requireBootClocks, + reallocBandwidth, + &pWorkArea->postModesetIsoBandwidthKBPS, + &pWorkArea->postModesetDramFloorKBPS); + + nvFree(guaranteedAndProposed); + + if (!ret) { + return FALSE; + } + + nvScheduleLowerDispBandwidthTimer(pDevEvo); + } + + return TRUE; +} + +static NvBool SkipDisplayPortBandwidthCheck( + const NVProposedModeSetHwStateOneHead *pProposedHead) +{ + return (pProposedHead->modeValidationParams.overrides & + NVKMS_MODE_VALIDATION_NO_DISPLAYPORT_BANDWIDTH_CHECK) != 0; +} + +static NvBool DowngradeDpPixelDepth( + NVDispEvoPtr pDispEvo, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + const NVConnectorEvoRec *pConnectorEvo) +{ + NvU32 head; + + /* + * In DP-MST case, many heads can share same connector and dp-bandwidth + * therefore its necessary to validate and downgrade dp-pixel-depth across + * all head which are sharing same connector before retry. + */ + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + NVHwModeTimingsEvoPtr pTimings = &pProposedHead->timings; + + if (SkipDisplayPortBandwidthCheck(pProposedHead)) { + continue; + } + + if ((pProposedHead->pConnectorEvo == pConnectorEvo) && + nvDowngradeHwModeTimingsDpPixelDepthEvo( + pTimings, + pProposedHead->colorSpace)) { + return TRUE; + } + } + + return FALSE; +} + +/*! + * Validate the DisplayPort bandwidth of the proposed disp configuration. + * + * \param[in] pDispEvo The disp to which pProposedDisp is to be applied. + * \param[in] pProposedDisp The requested configuration. + * + * \return If pProposedDisp passes the DP bandwidth check, return + * TRUE. Otherwise, return FALSE. + */ +static NvBool ValidateProposedModeSetHwStateOneDispDPlib( + NVDispEvoPtr pDispEvo, + NVProposedModeSetHwStateOneDisp *pProposedDisp) +{ + NvU32 head; + NvBool bResult = TRUE, bTryAgain = FALSE; + + +tryAgain: + + bTryAgain = FALSE; + bResult = TRUE; + + nvDPBeginValidation(pDispEvo); + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + NVHwModeTimingsEvoPtr pTimings = &pProposedHead->timings; + + if ((pProposedHead->pConnectorEvo == NULL) || + SkipDisplayPortBandwidthCheck(pProposedHead)) { + continue; + } + + bResult = !!nvDPLibValidateTimings(pDispEvo, + head, + pProposedHead->activeRmId, + pProposedHead->dpyIdList, + pProposedHead->colorSpace, + &pProposedHead->modeValidationParams, + pTimings); + + if (!bResult) { + if (DowngradeDpPixelDepth(pDispEvo, + pProposedDisp, + pProposedHead->pConnectorEvo)) { + bTryAgain = TRUE; + } + + /* + * Cannot downgrade pixelDepth further -- + * This proposed mode-set is not possible on this DP link, so fail. + */ + + break; + } + } + + bResult = !!nvDPEndValidation(pDispEvo) && bResult; + + if (bTryAgain) { + goto tryAgain; + } + + if (bResult) { + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + + if ((pProposedHead->pConnectorEvo == NULL) || + !nvConnectorUsesDPLib(pProposedHead->pConnectorEvo)) { + pProposedHead->pDpLibModesetState = NULL; + continue; + } + + pProposedHead->pDpLibModesetState = + nvDPLibCreateModesetState(pDispEvo, + head, + pProposedHead->activeRmId, + pProposedHead->dpyIdList, + pProposedHead->colorSpace, + &pProposedHead->timings); + if (pProposedHead->pDpLibModesetState == NULL) { + return FALSE; + } + } + } + + return bResult; +} + + + +/*! + * Validate the proposed configuration on the specified disp. + * + * \param[in] pDispEvo The disp to which pProposedDisp is to be applied. + * \param[in] pProposedDisp The requested configuration. + * \param[out] pReplyDisp The reply structure for the client. + * \param[out] pWorkArea The scratch space for the current modeset request. + * + * \return If pProposedDisp is valid, return TRUE. Otherwise, set the + * appropriate status fields in pReplyDisp to non-SUCCESS, + * and return FALSE. + */ +static NvBool +ValidateProposedModeSetHwStateOneDisp( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + NVProposedModeSetHwStateOneDisp *pProposedDisp, + struct NvKmsSetModeOneDispReply *pReplyDisp, + NVModeSetWorkArea *pWorkArea) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVDpyIdList dpyIdList; + NvU32 head; + + /* + * Check that the requested configuration of connectors can be + * driven simultaneously. + */ + dpyIdList = nvEmptyDpyIdList(); + for (head = 0; head < pDevEvo->numHeads; head++) { + NVDpyEvoPtr pDpyEvo; + FOR_ALL_EVO_DPYS(pDpyEvo, + pProposedDisp->head[head].dpyIdList, pDispEvo) { + dpyIdList = nvAddDpyIdToDpyIdList(pDpyEvo->pConnectorEvo->displayId, + dpyIdList); + } + } + + if (!nvRmIsPossibleToActivateDpyIdList(pDispEvo, dpyIdList)) { + pReplyDisp->status = NVKMS_SET_MODE_ONE_DISP_STATUS_INCOMPATIBLE_DPYS; + return FALSE; + } + + /* + * Check that no dpyId is used by multiple heads. + */ + dpyIdList = nvEmptyDpyIdList(); + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + /* + * Intersect the proposed dpys for this head with the + * accumulated list of dpys for this disp; if the intersection + * is not empty, a dpy is proposed to be used on multiple + * heads. + */ + NVDpyIdList proposedDpyIdList = + pProposedDisp->head[head].dpyIdList; + NVDpyIdList intersectedDpyIdList = + nvIntersectDpyIdListAndDpyIdList(dpyIdList, proposedDpyIdList); + + if (!nvDpyIdListIsEmpty(intersectedDpyIdList)) { + pReplyDisp->status = NVKMS_SET_MODE_ONE_DISP_STATUS_DUPLICATE_DPYS; + return FALSE; + } + + dpyIdList = nvAddDpyIdListToDpyIdList(dpyIdList, proposedDpyIdList); + } + + /* + * Check that the requested flipping state is valid. + */ + + for (head = 0; head < pDevEvo->numHeads; head++) { + + if (!pProposedDisp->head[head].changed) { + continue; + } + + if (nvDpyIdListIsEmpty(pProposedDisp->head[head].dpyIdList)) { + continue; + } + + if (!nvValidateFlipEvoHwState( + pDevEvo, + head, + &pProposedDisp->head[head].timings, + &pProposed->sd[pDispEvo->displayOwner].head[head].flip)) { + pReplyDisp->head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_FLIP; + return FALSE; + } + } + + /* + * Check ViewPortIn dimensions and ensure valid h/vTaps can be assigned. + */ + for (head = 0; head < pDevEvo->numHeads; head++) { + + /* XXX assume the gpus have equal capabilities */ + const NVEvoScalerCaps *pScalerCaps = + &pDevEvo->gpus[0].capabilities.head[head].scalerCaps; + const NVHwModeTimingsEvoPtr pTimings = &pProposedDisp->head[head].timings; + + if (!nvValidateHwModeTimingsViewPort(pDevEvo, pScalerCaps, pTimings, + &dummyInfoString)) { + pReplyDisp->head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_MODE; + return FALSE; + } + } + + /* + * Check that the configuration fits DisplayPort bandwidth constraints. + */ + if (!ValidateProposedModeSetHwStateOneDispDPlib(pDispEvo, pProposedDisp)) { + pReplyDisp->status = + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_DISPLAY_PORT_BANDWIDTH_CHECK; + return FALSE; + } + + /* + * Check that the configuration passes IMP. + */ + if (!ValidateProposedModeSetHwStateOneDispImp(pDispEvo, pProposed, + pProposedDisp, pWorkArea)) { + pReplyDisp->status = + NVKMS_SET_MODE_ONE_DISP_STATUS_FAILED_EXTENDED_GPU_CAPABILITIES_CHECK; + return FALSE; + } + + return TRUE; +} + +/*! + * Validate the proposed configuration. + * + * \param[in] pDevEvo The device to which pProposed is to be applied. + * \param[in] pProposed The requested configuration. + * \param[out] pReply The reply structure for the client. + * \param[out] pWorkArea The scratch space for the current modeset request. + * + * \return If pProposed is valid, return TRUE. Otherwise, set the + * appropriate status fields in pReply to non-SUCCESS, + * and return FALSE. + */ +static NvBool +ValidateProposedModeSetHwState(NVDevEvoPtr pDevEvo, + NVProposedModeSetHwState *pProposed, + struct NvKmsSetModeReply *pReply, + NVModeSetWorkArea *pWorkArea) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvBool ret = FALSE; + NVProposedModeSetHwState *pActual = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE, + sizeof(*pActual)); + + /* + * Copy the proposed modeset to a scratch area. During the process below, + * we may modify some parts of the timings. If all of validation succeeds, + * then we'll copy the modified version back out; if not, we don't want to + * touch the input. + */ + nvkms_memcpy(pActual, pProposed, sizeof(*pProposed)); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + NVProposedModeSetHwStateOneDisp *pProposedDisp = + &pActual->disp[dispIndex]; + struct NvKmsSetModeOneDispReply *pReplyDisp; + + pReplyDisp = &pReply->disp[dispIndex]; + + if (!ValidateProposedModeSetHwStateOneDisp(pDispEvo, + pActual, + pProposedDisp, + pReplyDisp, + pWorkArea)) { + goto done; + } + } + + nvkms_memcpy(pProposed, pActual, sizeof(*pProposed)); + ret = TRUE; + +done: + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE); + + return ret; +} + +/*! + * Ensure there is an SOR assigned for this pConnectorEvo, for use by + * the pending modeset. + * + * In DP-MST, multiple heads may use the same pConnectorEvo, and they + * should use the same SOR. + * + * When we call nvAssignSOREvo(), we have to tell RM which SORs have + * already been assigned and need to be excluded from consideration for + * the new SOR assignment request. + */ +static void AssignSor(NVModeSetWorkArea *pWorkArea, + NVConnectorEvoPtr pConnectorEvo) +{ + const NvU32 sd = pConnectorEvo->pDispEvo->displayOwner; + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + // Nothing to do! + return; + } + + /* If an OR has already been assigned for this connector, we are done. */ + if (nvDpyIdIsInDpyIdList( + pConnectorEvo->displayId, + pWorkArea->sd[sd].sorAssignedConnectorsList)) { + nvAssert(pConnectorEvo->or.mask != 0x0); + return; + } + + /* + * We keep a record all the SORs assigned for this modeset, so that + * it can be used as the sorExcludeMask argument to + * nvAssignSOREvo(). + */ + if (nvAssignSOREvo( + pConnectorEvo, + pWorkArea->sd[sd].assignedSorMask /* sorExcludeMask */)) { + nvAssert(pConnectorEvo->or.mask != 0x0); + + pWorkArea->sd[sd].sorAssignedConnectorsList = + nvAddDpyIdToDpyIdList( + pConnectorEvo->displayId, + pWorkArea->sd[sd].sorAssignedConnectorsList); + pWorkArea->sd[sd].assignedSorMask |= pConnectorEvo->or.mask; + } else { + nvAssert(!"Failed to assign SOR, this failure might cause hang!"); + } +} + +static void AssignProposedUsageOneHead( + NVDevEvoPtr pDevEvo, + const NVProposedModeSetHwState *pProposed, + NvU32 head) +{ + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[0].head[head]; + const NVProposedModeSetHwStateOneSubDev *pProposedSd = + &pProposed->sd[0]; + + if (!pDevEvo->isSOCDisplay || (pProposedHead->pConnectorEvo == NULL)) { + return; + } + + pDevEvo->gpus[0].headState[head].preallocatedUsage = + pProposedSd->head[head].flip.usage; +} + +static NvBool IsProposedModeSetHwStateOneDispIncompatibleWithDpy +( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + const NVConnectorEvoRec *pConnectorEvo +) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + + if (!pProposedHead->changed) { + continue; + } + + /* + * DDC partners incompatible with each other, only one should be active + * at a time. + */ + if ((pProposedHead->pConnectorEvo != NULL) && + nvDpyIdIsInDpyIdList(pProposedHead->pConnectorEvo->displayId, + pConnectorEvo->ddcPartnerDpyIdsList)) { + return TRUE; + } + } + + return FALSE; +} + +static void +KickoffModesetUpdateState( + NVDispEvoPtr pDispEvo, + NVEvoModesetUpdateState *modesetUpdateState) +{ + if (!nvDpyIdListIsEmpty(modesetUpdateState->connectorIds)) { + NVConnectorEvoRec *pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, + modesetUpdateState->connectorIds)) { + continue; + } + + if (pConnectorEvo->pDpLibConnector != NULL) { + nvDPPreSetMode(pConnectorEvo->pDpLibConnector, + modesetUpdateState); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + nvDPSerializerPreSetMode(pDispEvo, pConnectorEvo); + } + } + } + + nvDoIMPUpdateEvo(pDispEvo, + &modesetUpdateState->updateState); + + if (!nvDpyIdListIsEmpty(modesetUpdateState->connectorIds)) { + NVConnectorEvoRec *pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, + modesetUpdateState->connectorIds)) { + continue; + } + + if (pConnectorEvo->pDpLibConnector != NULL) { + nvDPPostSetMode(pConnectorEvo->pDpLibConnector); + } else if (nvConnectorIsDPSerializer(pConnectorEvo)) { + nvDPSerializerPostSetMode(pDispEvo, pConnectorEvo); + } + } + } + + *modesetUpdateState = + (NVEvoModesetUpdateState) { }; +} + +/*! + * Determine if display devices driven by head are incompatible with newly + * activated display devices. + */ +static NvBool +IsProposedModeSetHwStateOneHeadIncompatible( + NVDispEvoPtr pDispEvo, + NvU32 head, + const + NVProposedModeSetHwStateOneDisp *pProposedDisp) +{ + const NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[head]; + NvBool isIncompatible; + + /* + * DisplayPort devices require an EVO update when detaching the head + * from the SOR, because DPlib performs link-training to powerdown + * the link. So, always consider DisplayPort as incompatible. + */ + + isIncompatible = + nvConnectorUsesDPLib(pHeadState->pConnectorEvo) || + IsProposedModeSetHwStateOneDispIncompatibleWithDpy(pDispEvo, + pProposedDisp, + pHeadState->pConnectorEvo); + + return isIncompatible; +} + +static void DisableActiveCoreRGSyncObjects(NVDevEvoPtr pDevEvo, + NVDispHeadStateEvoPtr pHeadState, + NvU32 head, + NVEvoUpdateState *pUpdateState) +{ + for (int i = 0; i < pHeadState->numVblankSyncObjectsCreated; i++) { + if (pHeadState->vblankSyncObjects[i].enabled) { + /* hCtxDma of 0 indicates Disable. */ + pDevEvo->hal->ConfigureVblankSyncObject( + pDevEvo, + 0, /* rasterLine */ + head, + i, + 0, /* hCtxDma */ + pUpdateState); + pHeadState->vblankSyncObjects[i].enabled = FALSE; + } + } +} + +/*! + * Send methods to shut down a head + * + * \param[in,out] pDispEvo The disp of the head. + * \param[in] head The head to consider. + * \param[in] pProposedDisp The requested configuration of the display + * \param[in/out] modesetUpdateState Structure tracking channels which need to + * be updated/kicked off + */ +static void +ApplyProposedModeSetHwStateOneHeadShutDown( + NVDispEvoPtr pDispEvo, + NvU32 head, + const + NVProposedModeSetHwStateOneDisp + *pProposedDisp, + NVModeSetWorkArea *pWorkArea) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDispHeadStateEvoPtr pHeadState; + NVDpyEvoPtr pDpyEvo; + const NvU32 sd = pDispEvo->displayOwner; + + /* + * If nothing changed about this head's configuration, then we + * should not shut it down. + */ + if (!pProposedDisp->head[head].changed) { + return; + } + + /* + * Otherwise, go through the shutdown process for any head that + * changed. If NVProposedModeSetHwStateOneHead::dpyIdList is + * empty, then we'll leave it shut down. If it is non-empty, then + * ApplyProposedModeSetHwStateOneHead{Pre,Post}Update() will + * update the head with its new configuration. + */ + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + pHeadState = &pDispEvo->headState[head]; + pDpyEvo = nvGetOneArbitraryDpyEvo(pHeadState->activeDpys, pDispEvo); + + /* + * Identify and disable any active core RG sync objects. + * + * Note: the disable occurs at the hardware level; this intentionally does + * not clear the software state tracking the existence of these sync + * objects, which will be re-enabled at the hardware level in + * ApplyProposedModeSetHwStateOneHeadPreUpdate(), if the given head will be + * active after the modeset. + */ + DisableActiveCoreRGSyncObjects(pDevEvo, pHeadState, head, + &pWorkArea->modesetUpdateState.updateState); + + nvDisable3DVisionAegis(pDpyEvo); + + nvHdmiDpEnableDisableAudio(pDispEvo, head, FALSE /* enable */); + + /* Cancel any pending LUT updates. */ + nvCancelLutUpdateEvo(pDispEvo, head); + + nvEvoDetachConnector(pHeadState->pConnectorEvo, head, &pWorkArea->modesetUpdateState); + + /* Clear software shadow state. */ + + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList = + nvAddDpyIdListToDpyIdList( + pHeadState->activeDpys, + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList); + pHeadState->activeDpys = nvEmptyDpyIdList(); + pHeadState->pConnectorEvo = NULL; + + pHeadState->bypassComposition = FALSE; + nvkms_memset(&pHeadState->timings, 0, sizeof(pHeadState->timings)); + + /* Track old activeRmId and free it after end modeset */ + pWorkArea->sd[pDispEvo->displayOwner].head[head].oldActiveRmId = + pHeadState->activeRmId; + pHeadState->activeRmId = 0; + + pHeadState->allowFlipLockGroup = 0; + + nvkms_memset(&pHeadState->audio, 0, sizeof(pHeadState->audio)); + + nvkms_memset(&pHeadState->modeValidationParams, 0, + sizeof(pHeadState->modeValidationParams)); + + nvkms_memset(&pDevEvo->gpus[sd].headState[head], 0, + sizeof(pDevEvo->gpus[sd].headState[head])); + + pDpyEvo->head = NV_INVALID_HEAD; +} + +static void +ApplyProposedModeSetHwStateOneDispFlip( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVEvoUpdateState *pUpdateState) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + const NvU32 sd = pDispEvo->displayOwner; + + /* + * If nothing changed about this head's configuration, then there + * is nothing to do. + */ + if (!pProposedHead->changed) { + continue; + } + + /* Check for disabled heads. */ + if (pProposedHead->pConnectorEvo == NULL) { + continue; + } + + nvSetUsageBoundsEvo(pDevEvo, sd, head, + &pProposed->sd[sd].head[head].flip.usage, + pUpdateState); + + nvFlipEvoOneHead(pDevEvo, sd, head, + &pProposed->sd[sd].head[head].flip, + FALSE /* allowFlipLock */, + pUpdateState); + } +} + +static void ReenableActiveCoreRGSyncObjects(NVDevEvoPtr pDevEvo, + NVDispHeadStateEvoPtr pHeadState, + NvU32 head, + NVEvoUpdateState *pUpdateState) +{ + for (int i = 0; i < pHeadState->numVblankSyncObjectsCreated; i++) { + if (pHeadState->vblankSyncObjects[i].inUse) { + pDevEvo->hal->ConfigureVblankSyncObject( + pDevEvo, + pHeadState->timings.rasterBlankStart.y, + head, + i, + pHeadState->vblankSyncObjects[i].evoSyncpt.hCtxDma, + pUpdateState); + + pHeadState->vblankSyncObjects[i].enabled = TRUE; + } + } +} + +/*! + * Update the heads to be modified on this disp. + * + * This should update the ASSY state of the head, but not trigger an + * UPDATE method. + * + * \param[in,out] pDispEvo The disp of the head. + * \param[in] head The head to consider. + * \param[in] pProposedHead The requested configuration of the head. + * \param[in,out] updateState Indicates which channels require UPDATEs + * \param[in] bypassComposition + * On Turing and newer, enable display + * composition pipeline bypass mode. + */ +static void +ApplyProposedModeSetHwStateOneHeadPreUpdate( + NVDispEvoPtr pDispEvo, + NvU32 head, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVModeSetWorkArea *pWorkArea, + NvBool bypassComposition) +{ + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + NVEvoModesetUpdateState *pModesetUpdateState = &pWorkArea->modesetUpdateState; + NVEvoUpdateState *updateState = &pModesetUpdateState->updateState; + NVDispHeadStateEvoPtr pHeadState; + NVDpyEvoPtr pDpyEvo = + nvGetOneArbitraryDpyEvo(pProposedHead->dpyIdList, pDispEvo); + + /* + * If nothing changed about this head's configuration, then there + * is nothing to do. + */ + if (!pProposedHead->changed) { + return; + } + + /* Check for disabled heads. */ + + if (pProposedHead->pConnectorEvo == NULL) { + /* + * ApplyProposedModeSetHwStateOneHeadShutDown() should have + * already been called for this head. + */ + nvAssert(!nvHeadIsActive(pDispEvo, head)); + return; + } + + if (pDpyEvo == NULL) { + nvAssert(!"Invalid pDpyEvo"); + return; + } + + pDpyEvo->head = head; + + AssignSor(pWorkArea, pProposedHead->pConnectorEvo); + + nvDpyUpdateHdmiPreModesetEvo(pDpyEvo); + + pHeadState = &pDispEvo->headState[head]; + + pHeadState->bypassComposition = bypassComposition; + + pHeadState->activeRmId = pProposedHead->activeRmId; + + /* + * Cache the list of active pDpys for this head, as well as the + * mode timings. + */ + pHeadState->activeDpys = pProposedHead->dpyIdList; + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList = + nvAddDpyIdListToDpyIdList( + pHeadState->activeDpys, + pWorkArea->sd[pDispEvo->displayOwner].changedDpyIdList); + + nvAssert(pDpyEvo->pConnectorEvo == pProposedHead->pConnectorEvo); + pHeadState->pConnectorEvo = pProposedHead->pConnectorEvo; + + pHeadState->timings = pProposedHead->timings; + + pHeadState->audio = pProposedHead->audio; + + AssignProposedUsageOneHead(pDispEvo->pDevEvo, pProposed, head); + + nvSendHwModeTimingsToAegisEvo(pDispEvo, head); + + /* Set LUT settings */ + nvEvoSetLut(pDispEvo, head, FALSE /* kickoff */, &pProposedHead->lut); + + /* Update current LUT to hardware */ + nvEvoUpdateCurrentPalette(pDispEvo, head, FALSE /* kickoff */); + + nvEvoSetTimings(pDispEvo, head, updateState); + + // Set the dither type & mode + nvSetDitheringEvo(pDispEvo, head, + pDpyEvo->requestedDithering.state, + pDpyEvo->requestedDithering.depth, + pDpyEvo->requestedDithering.mode, + updateState); + + nvEvoHeadSetControlOR(pDispEvo, head, updateState); + + /* Update hardware's current colorSpace and colorRange */ + pHeadState->attributes.colorSpace = pProposedHead->colorSpace; + pHeadState->attributes.colorRange = pProposedHead->colorRange; + nvUpdateCurrentHardwareColorSpaceAndRangeEvo(pDispEvo, head, updateState); + + nvEvoAttachConnector(pProposedHead->pConnectorEvo, + head, + pProposedHead->pDpLibModesetState, + pModesetUpdateState); + + nvSetViewPortsEvo(pDispEvo, head, updateState); + + nvSetImageSharpeningEvo( + pDispEvo, + head, + pDpyEvo->currentAttributes.imageSharpening.value, + updateState); + + + nvSetDVCEvo(pDispEvo, head, + pDpyEvo->currentAttributes.dvc, + updateState); + + pHeadState->attributes.digitalSignal = + nvGetDefaultDpyAttributeDigitalSignalValue(pDpyEvo->pConnectorEvo); + + /* If required, nvHdmiFrlSetConfig() overrides attributes.digitalSignal */ + nvHdmiFrlSetConfig(pDispEvo, head); + + /* + * Re-enable any active sync objects, configuring them in accordance with + * the new timings. + */ + ReenableActiveCoreRGSyncObjects(pDispEvo->pDevEvo, pHeadState, head, + updateState); +} + + +/*! + * Update the heads to be modified on this disp. + * + * PreUpdate() will have already been called on this head, and an + * UPDATE method sent. + * + * \param[in,out] pDispEvo The disp of the head. + * \param[in] head The head to consider. + * \param[in] pProposedHead The requested configuration of the head. + */ +static void +ApplyProposedModeSetHwStateOneHeadPostUpdate(NVDispEvoPtr pDispEvo, + NvU32 head, + const + NVProposedModeSetHwStateOneHead + *pProposedHead) +{ + NVDispHeadStateEvoRec *pHeadState; + + /* + * If nothing changed about this head's configuration, then there + * is nothing to do. + */ + if (!pProposedHead->changed) { + return; + } + + if (!nvHeadIsActive(pDispEvo, head)) { + return; + } + + /* + * Cache configuration state in the headState, so that + * AssignProposedModeSetHwState() can preserve the configuration + * if this head is left alone in the next NvKmsSetModeRequest. + */ + pHeadState = &pDispEvo->headState[head]; + pHeadState->allowFlipLockGroup = pProposedHead->allowFlipLockGroup; + pHeadState->modeValidationParams = pProposedHead->modeValidationParams; + pHeadState->hs10bpcHint = pProposedHead->hs10bpcHint; + + nvUpdateInfoFrames(pDispEvo, head); + + /* Perform 3D vision authentication */ + nv3DVisionAuthenticationEvo(pDispEvo, head); + + nvHdmiDpEnableDisableAudio(pDispEvo, head, TRUE /* enable */); +} + +/* + * Shut down all heads that are incompatible with pProposedDisp. This + * requires doing an update immediately. + */ +static void +KickoffProposedModeSetHwStateIncompatibleHeadsShutDown( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVModeSetWorkArea *pWorkArea) +{ + NvU32 head; + NvBool foundIncompatibleHead = FALSE; + NvU32 clearHdmiFrlActiveRmId[NVKMS_MAX_HEADS_PER_DISP] = { }; + NVDpyIdList proposedActiveConnectorsList = nvEmptyDpyIdList(); + NVDpyIdList currActiveConnectorsList = nvEmptyDpyIdList(); + NVDpyIdList proposedInactiveConnectorList, unionOfActiveConnectorList; + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NVDpyId activeConnectorId = + (pDispEvo->headState[head].pConnectorEvo != NULL) ? + pDispEvo->headState[head].pConnectorEvo->displayId : + nvInvalidDpyId(); + NVDpyId proposedConnectorId = + (pProposedDisp->head[head].pConnectorEvo != NULL) ? + pProposedDisp->head[head].pConnectorEvo->displayId : + nvInvalidDpyId(); + + currActiveConnectorsList = + nvAddDpyIdToDpyIdList(activeConnectorId, + currActiveConnectorsList); + + proposedActiveConnectorsList = + nvAddDpyIdToDpyIdList(proposedConnectorId, + proposedActiveConnectorsList); + } + + proposedInactiveConnectorList = + nvDpyIdListMinusDpyIdList(currActiveConnectorsList, + proposedActiveConnectorsList); + unionOfActiveConnectorList = + nvAddDpyIdListToDpyIdList(proposedActiveConnectorsList, + currActiveConnectorsList); + + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + NvBool thisHeadIncompatible = FALSE; + const NVConnectorEvoRec *pCurrConnectorEvo = + pDispEvo->headState[head].pConnectorEvo; + + if (!pProposedDisp->head[head].changed || !nvHeadIsActive(pDispEvo, head)) { + continue; + } + + /* + * If the number of current active connectors + proposed active + * connectors is greater than number of heads then modeset is under + * risk to run out of SORs. This is because the number of connectors > + * the number of SORs >= the number of heads. + * + * The sor assignment failure during modeset causes display engine + * and/or kernel panics. + * + * In this situation, all the connectors which are not going active + * after modeset, mark them incompatible and shut down them before + * triggering modeset on all the active connectors. + */ + if (nvCountDpyIdsInDpyIdList(unionOfActiveConnectorList) > + pDispEvo->pDevEvo->numHeads && + nvDpyIdIsInDpyIdList(pCurrConnectorEvo->displayId, + proposedInactiveConnectorList)) { + thisHeadIncompatible = TRUE; + } + + /* if the *new* timings are FRL, then we need to shut down the head. */ + if (pProposedDisp->head[head].timings.hdmiFrlConfig.frlRate != + HDMI_FRL_DATA_RATE_NONE) { + thisHeadIncompatible = TRUE; + } + + /* if the *old* timings are FRL, then we need to shut down the head and + * clear the FRL config. */ + if (pDispEvo->headState[head].timings.hdmiFrlConfig.frlRate != + HDMI_FRL_DATA_RATE_NONE) { + thisHeadIncompatible = TRUE; + /* cache the activeRmId since it will be cleared below, but + * we don't want to actually call into the HDMI library until + * afterwards. */ + clearHdmiFrlActiveRmId[head] = pDispEvo->headState[head].activeRmId; + } + + if (IsProposedModeSetHwStateOneHeadIncompatible(pDispEvo, + head, + pProposedDisp)) { + thisHeadIncompatible = TRUE; + } + + if (!thisHeadIncompatible) { + continue; + } + + ApplyProposedModeSetHwStateOneHeadShutDown( + pDispEvo, + head, + pProposedDisp, + pWorkArea); + + foundIncompatibleHead = TRUE; + } + + /* Submit UPDATE method and kick off, to shut down incompatible heads. */ + if (foundIncompatibleHead) { + KickoffModesetUpdateState(pDispEvo, &pWorkArea->modesetUpdateState); + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + if (clearHdmiFrlActiveRmId[head] == 0) { + continue; + } + nvHdmiFrlClearConfig(pDispEvo, clearHdmiFrlActiveRmId[head]); + } + } +} + +static void +KickoffProposedModeSetHwState( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + const NvBool bypassComposition, + NVModeSetWorkArea *pWorkArea) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NVEvoModesetUpdateState *pModesetUpdateState = &pWorkArea->modesetUpdateState; + /* + * If there is a change in window ownership, decouple window channel flips + * and the core channel update that performs a modeset. + * + * This allows window channel flips to be instead interlocked with the core + * channel update that sets the window usage bounds, avoiding window + * invalid usage exceptions. + * + * See comment about NVDisplay error code 37, in + * function EvoInitWindowMapping3(). + */ + const NvBool decoupleFlipUpdates = + pModesetUpdateState->windowMappingChanged; + NvU32 head; + + /* Send methods to shut down any other unused heads, but don't update yet. */ + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + ApplyProposedModeSetHwStateOneHeadShutDown( + pDispEvo, + head, + pProposedDisp, + pWorkArea); + } + + /* Apply pre-UPDATE modifications for any enabled heads. */ + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + ApplyProposedModeSetHwStateOneHeadPreUpdate( + pDispEvo, + head, + pProposed, + pProposedDisp, + pWorkArea, + bypassComposition); + } + + if (!decoupleFlipUpdates) { + /* Merge modeset and flip state updates together */ + ApplyProposedModeSetHwStateOneDispFlip( + pDispEvo, + pProposed, + pProposedDisp, + &pModesetUpdateState->updateState); + } + + /* Submit UPDATE method and kick off. */ + KickoffModesetUpdateState(pDispEvo, + pModesetUpdateState); + + if (decoupleFlipUpdates) { + NVEvoUpdateState flipUpdateState = { }; + + ApplyProposedModeSetHwStateOneDispFlip( + pDispEvo, + pProposed, + pProposedDisp, + &flipUpdateState); + + pDevEvo->hal->Update(pDevEvo, + &flipUpdateState, + TRUE /* releaseElv */); + } + + nvRemoveUnusedHdmiDpAudioDevice(pDispEvo); + + /* Apply post-UPDATE modifications for any enabled heads. */ + for (head = 0; head < pDispEvo->pDevEvo->numHeads; head++) { + + const NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposedDisp->head[head]; + + ApplyProposedModeSetHwStateOneHeadPostUpdate( + pDispEvo, + head, + pProposedHead); + } +} + +static void AllocatePostModesetDispBandwidth(NVDispEvoPtr pDispEvo, + NVModeSetWorkArea *pWorkArea) +{ + const NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NvU64 maxFrameTimeUsec = 0ULL; + NvU32 head; + + if (!pDevEvo->isSOCDisplay) { + return; + } + + if ((pDispEvo->isoBandwidthKBPS == pWorkArea->postModesetIsoBandwidthKBPS) && + (pDispEvo->dramFloorKBPS == pWorkArea->postModesetDramFloorKBPS)) { + return; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU64 curFrameTimeUsec = 0ULL; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + curFrameTimeUsec = nvEvoFrametimeUsFromTimings(&pDispEvo->headState[head].timings); + maxFrameTimeUsec = NV_MAX(maxFrameTimeUsec, curFrameTimeUsec); + } + + nvkms_usleep(maxFrameTimeUsec * 2); + + if (!nvAllocateDisplayBandwidth(pDispEvo, + pWorkArea->postModesetIsoBandwidthKBPS, + pWorkArea->postModesetDramFloorKBPS)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Unexpectedly failed to program post-modeset bandwidth!"); + } +} + +/*! + * Update the disp with the modifications described in pProposedDisp. + * + * \param[in] pDispEvo The disp to be modified. + * \param[in] pProposedDisp The requested configuration of the disp. + * \param[in] pWorkArea Preallocated scratch memory. + * \param[in] updateCoreFirst If true, avoid interlock with core: kick off + * the core channel and wait for a notifier + * before the rest of the channels for this update. + * \param[in] bypassComposition + * On Turing and newer, enable display composition + * pipeline bypass mode. + * + * This function is not allowed to fail. + */ +static void +ApplyProposedModeSetHwStateOneDisp( + NVDispEvoPtr pDispEvo, + const NVProposedModeSetHwState *pProposed, + const NVProposedModeSetHwStateOneDisp *pProposedDisp, + NVModeSetWorkArea *pWorkArea, + NvBool updateCoreFirst, + NvBool bypassComposition) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 head; + const NvU32 sd = pDispEvo->displayOwner; + + nvkms_memset(&pWorkArea->modesetUpdateState, 0, + sizeof(pWorkArea->modesetUpdateState)); + + /* Record the current flip state. */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + nvInitFlipEvoHwState(pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].oldState); + } + + if (updateCoreFirst) { + /* If this is the first core update, initialize the window -> head + * mapping. + * + * Hal ->InitWindowMapping() sets + * NVModesetUpdateState::windowMappingChanged true, if there is + * any change in window ownerships/assignment. This is necessary on + * GV100+ because of a series of unfortunate requirements. + * + * NVDisplay has two requirements that we need to honor: + * + * 1. You can't move a window from one head to another while the head + * is active. + * 2. You can't change window assignments in an update that's + * interlocked with the corresponding window channel. + * + * In addition, GV100 has an additional requirement: + * + * 3. You can't change window assignment for a head while it is + * active, but it's okay to assign windows in the same update that + * activates a head. + * + * If there is a change in window assignment, the task of not + * interlocking core and respective window channels will be handled by + * NVEvoUpdateState::subdev[]::noCoreInterlockMask. + * ->InitWindowMapping() will set 'noCoreInterlockMask' and ->Update() + * will take care not to interlock window channels specified in mask + * with core channel. + * + * The GOP driver and NVKMS assign window channels in the same way. The + * window channels channels 2n and 2n+1 are guaranteed to get assigned + * to head n. + */ + pDevEvo->hal->InitWindowMapping(pDispEvo, &pWorkArea->modesetUpdateState); + } + + /* + * Temporarily lock to the max DRAM frequency to prevent mclk switch events + * from being requested. Display can't tolerate mclk switch events during + * modeset transitions. This max DRAM floor will be released after the Core + * notifier signals post-modeset in the AllocatePostModesetDispBandwidth() + * call below. This only needs to be done for Orin SOC display. + */ + if (!nvAllocateDisplayBandwidth(pDispEvo, + pDispEvo->isoBandwidthKBPS, + NV_U32_MAX)) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Unexpectedly failed to lock to max DRAM pre-modeset!"); + } + + KickoffProposedModeSetHwStateIncompatibleHeadsShutDown( + pDispEvo, + pProposedDisp, + pWorkArea); + + KickoffProposedModeSetHwState( + pDispEvo, + pProposed, + pProposedDisp, + bypassComposition, + pWorkArea); + + /* + * This function waits for 2 frames to make sure that the final IMP + * arbitration settings have been programmed by the post-SV3 worker thread + * in RM. Once these settings have taken effect, it's safe to release the + * max DRAM floor that was previously requested, and to program the ISO + * bandwidth that's required for the new mode. This only needs to be done + * for Orin SOC display. + */ + AllocatePostModesetDispBandwidth(pDispEvo, pWorkArea); + + /* + * Record the new flip state, then generate any flip events, and update + * surface reference counts. + */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + nvInitFlipEvoHwState( + pDevEvo, sd, head, + &pWorkArea->sd[sd].head[head].newState); + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].newState, + NV_TRUE); + nvUpdateSurfacesFlipRefCount( + pDevEvo, + head, + &pWorkArea->sd[sd].head[head].oldState, + NV_FALSE); + } +} + + +/*! + * Initialize the pReply structure. + * + * Mark all of the heads and disps as successful. During the process + * of assigning and validating the proposed configuration, heads with + * invalid requested configuration will have their reply status field + * changed to a non-success value. + * + * \param[in] pRequest The client's requested configuration. This + * indicates which heads on which disps the + * client requested changes on. + * \param[out] pReply The reply to the client. + */ +static void +InitializeReply(const NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + pReply->status = NVKMS_SET_MODE_STATUS_SUCCESS; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + NvU32 head; + + pReply->disp[dispIndex].status = + NVKMS_SET_MODE_ONE_DISP_STATUS_SUCCESS; + + for (head = 0; head < pDevEvo->numHeads; head++) { + + pReply->disp[dispIndex].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS; + } + } +} + + +/*! + * Validate the client-provided NvKmsSetModeRequest. + * + * Check basic validity of NvKmsSetModeRequest: e.g., that + * requestedDispsBitMask and requestedHeadsBitMask do not exceed the + * disps or heads of the pDevEvo. + * + * \param[in] pDevEvo The device that is to be modified. + * \param[in] pOpenDev The pOpenDev of the client doing the modeset. + * \param[in] pRequest The client's requested configuration. This + * indicates which heads on which disps the + * client requested changes on. + * \param[out] pReply The reply to the client. + + * \return If pRequest is valid, return TRUE. Otherwise, set the + * appropriate status fields in pReply to non-SUCCESS, + * and return FALSE. + */ +static NvBool +ValidateRequest(const NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply) +{ + NvU32 dispIndex, head; + NvBool ret = TRUE; + + const struct NvKmsModesetPermissions *pPermissions = + nvGetModesetPermissionsFromOpenDev(pOpenDev); + + nvAssert(pOpenDev != NULL); + nvAssert(pPermissions != NULL); + + /* Check for invalid disps in requestedDispsBitMask. */ + if (nvHasBitAboveMax(pRequest->requestedDispsBitMask, + NVKMS_MAX_SUBDEVICES)) { + pReply->status = NVKMS_SET_MODE_STATUS_INVALID_REQUESTED_DISPS_BITMASK; + ret = FALSE; + } + + for (dispIndex = 0; dispIndex < NVKMS_MAX_SUBDEVICES; dispIndex++) { + + if ((pRequest->requestedDispsBitMask & (1 << dispIndex)) == 0) { + continue; + } + + if (dispIndex >= pDevEvo->nDispEvo) { + pReply->status = + NVKMS_SET_MODE_STATUS_INVALID_REQUESTED_DISPS_BITMASK; + ret = FALSE; + continue; + } + + const struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + + /* Check for invalid heads in requestedHeadsBitMask. */ + if (nvHasBitAboveMax(pRequestDisp->requestedHeadsBitMask, + NVKMS_MAX_HEADS_PER_DISP)) { + pReply->disp[dispIndex].status = + NVKMS_SET_MODE_ONE_DISP_STATUS_INVALID_REQUESTED_HEADS_BITMASK; + ret = FALSE; + } + + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + + if ((pRequestDisp->requestedHeadsBitMask & (1 << head)) == 0) { + continue; + } + + if (head >= pDevEvo->numHeads) { + pReply->disp[dispIndex].status = + NVKMS_SET_MODE_ONE_DISP_STATUS_INVALID_REQUESTED_HEADS_BITMASK; + ret = FALSE; + continue; + } + + const NVDpyIdList permDpyIdList = + pPermissions->disp[dispIndex].head[head].dpyIdList; + + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[head]; + + /* + * Does the client have permission to touch this head at + * all? + */ + if (pRequest->commit && nvDpyIdListIsEmpty(permDpyIdList)) { + pReply->disp[dispIndex].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_PERMISSIONS; + ret = FALSE; + continue; + } + + /* + * pRequestHead->dpyIdList == EMPTY means the head is + * being shut down: no more to do for validation. + */ + if (nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + continue; + } + + /* + * Does the client have permission to drive this dpyIdList + * with this head? + */ + if (pRequest->commit && + !nvDpyIdListIsASubSetofDpyIdList(pRequestHead->dpyIdList, + permDpyIdList)) { + pReply->disp[dispIndex].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_PERMISSIONS; + ret = FALSE; + continue; + } + + /* + * Are all requested dpys in the list of valid dpys for this disp? + */ + if (!nvDpyIdListIsASubSetofDpyIdList( + pRequestHead->dpyIdList, + pDevEvo->pDispEvo[dispIndex]->validDisplays)) { + pReply->disp[dispIndex].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_DPY; + ret = FALSE; + continue; + } + + if (!nvValidateSetLutCommonParams(pDevEvo, &pRequestHead->lut)) { + pReply->disp[dispIndex].head[head].status = + NVKMS_SET_MODE_ONE_HEAD_STATUS_INVALID_LUT; + ret = FALSE; + continue; + } + } + } + + return ret; +} + +static void FillPostSyncptReplyForModeset( + const NVDevEvoRec *pDevEvo, + NvU32 head, + const struct NvKmsFlipCommonParams *pFlipRequest, + struct NvKmsFlipCommonReplyOneHead *pFlipReply, + const NVFlipEvoHwState *pFlipState) +{ + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pFlipRequest->layer[layer].syncObjects.specified && + pFlipRequest->layer[layer].syncObjects.val.useSyncpt) { + nvFillPostSyncptReplyOneChannel( + pDevEvo->head[head].layer[layer], + pFlipRequest->layer[layer].syncObjects.val.u.syncpts.requestedPostType, + &pFlipReply->layer[layer].postSyncpt, + &pFlipState->layer[layer].syncObject); + } + } +} + +/*! + * Assign the NvKmsSetModeReply structure. + * + * After a modeset was successfully completed, update the pReply with + * information about the modeset that the client may need. + * + * \param[in] pDevEvo The device that was modified. + * \param[in] pRequest The client's requested configuration. This + * indicates which heads on which disps the + * client requested changes on. + * \param[out] pReply The reply to the client. + */ +static void +AssignReplySuccess(const NVDevEvoRec *pDevEvo, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + const NVModeSetWorkArea *pWorkArea) +{ + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + + nvkms_memset(pReply, 0, sizeof(*pReply)); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + NvU32 head; + const struct NvKmsSetModeOneDispRequest *pRequestDisp = + &pRequest->disp[dispIndex]; + + if ((pRequest->requestedDispsBitMask & (1 << dispIndex)) == 0) { + continue; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + const struct NvKmsSetModeOneHeadRequest *pRequestHead = + &pRequestDisp->head[head]; + struct NvKmsSetModeOneHeadReply *pReplyHead = + &pReply->disp[dispIndex].head[head]; + + if ((pRequestDisp->requestedHeadsBitMask & (1 << head)) == 0) { + continue; + } + + pReplyHead->status = NVKMS_SET_MODE_ONE_HEAD_STATUS_SUCCESS; + + if (nvDpyIdListIsEmpty(pRequestHead->dpyIdList)) { + pReplyHead->activeRmId = 0; + } else { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + pReplyHead->activeRmId = pHeadState->activeRmId; + pReplyHead->possibleUsage = pHeadState->timings.viewPort.possibleUsage; + pReplyHead->guaranteedUsage = pHeadState->timings.viewPort.guaranteedUsage; + pReplyHead->usingHeadSurface = + (pDispEvo->pHsChannel[head] != NULL); + pReplyHead->vrrEnabled = + (pDispEvo->headState[head].timings.vrr.type != + NVKMS_DPY_VRR_TYPE_NONE); + } + FillPostSyncptReplyForModeset( + pDevEvo, + head, + &pRequestHead->flip, + &pReplyHead->flipReply, + &pWorkArea->sd[dispIndex].head[head].newState); + } + } +} + + +/*! + * Call RM to notify that a modeset is impending, or that the modeset has + * completed. + * + * \param[in] pDevEvo The device to modify. + * \param[in] pProposed The proposed resulting hardware state. + * \param[in] beginOrEnd Whether this is a begin call or an end call. + */ +static void +BeginEndModeset(NVDevEvoPtr pDevEvo, + const NVProposedModeSetHwState *pProposed, + enum NvKmsBeginEndModeset beginOrEnd) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head, dpyMask = 0; + + /* Compute dpyMask: take all the dpyIds on this dispIndex. */ + for (head = 0; head < NVKMS_MAX_HEADS_PER_DISP; head++) { + dpyMask |= + nvDpyIdListToNvU32(pProposed->disp[dispIndex].head[head].dpyIdList); + } + + nvRmBeginEndModeset(pDispEvo, beginOrEnd, dpyMask); + } +} + +/*! + * Idle all of the satellite channels. + * + * XXX NVKMS: use interlocked UPDATEs, instead, so that we don't + * have to busy-wait on the CPU. + * + * XXX NVKMS: we should idle all channels, not just base. + */ +static NvBool IdleAllSatelliteChannels(NVDevEvoRec *pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + for (head = 0; head < pDevEvo->numHeads; head++) { + NvBool unused; + if (!nvRMIdleBaseChannel(pDevEvo, head, sd, &unused)) { + return FALSE; + } + } + } + + return TRUE; +} + +/*! + * Helper function to validate the proposed mode + */ +static NvBool +IsProposedModeSetValid(NVDevEvoPtr pDevEvo, + struct NvKmsSetModeReply *pReply, + const struct NvKmsPerOpenDev *pOpenDev, + NVProposedModeSetHwState *pProposed, + const struct NvKmsSetModeRequest *pRequest, + NVModeSetWorkArea *pWorkArea) +{ + return ValidateProposedModeSetHwState(pDevEvo, pProposed, pReply, + pWorkArea); +} + +/*! + * Perform a modeset across the heads on the disps of the device. + * + * See the comments at the top of this source file for a description + * of the flow performed by this function. + * + * \param[in,out] pDevEvo The device to be modified. + * \param[in] pOpenDev The pOpenDev of the client doing the modeset. + * \param[in] pOpenDevSurfaceHandles + * The table mapping client handles to surfaces. + * \param[in] pRequest The client's requested configuration changes. + * \param[out] pReply The reply to the client. + * \param[in] bypassComposition + * On Turing and higher, enable composition pipeline + * bypass mode. + * \param[in] doRasterLock + * Rasterlock heads in the post-modeset routine. + * + * \return Return TRUE if the modeset was successful. Otherwise, + * return FALSE. If the modeset was not successful, + * the state of the hardware and software should not + * have been changed. + */ +NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsSetModeRequest *pRequest, + struct NvKmsSetModeReply *pReply, + NvBool bypassComposition, + NvBool doRasterLock) +{ + NvBool ret = FALSE; + NVProposedModeSetHwState *pProposed = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE, + sizeof(*pProposed)); + NvU32 dispIndex; + NVDispEvoPtr pDispEvo; + NvU32 dispNeedsEarlyUpdate; + NvBool updateCoreFirst = FALSE; + + /* + * We should shutdown unused heads and do not inherit the previous modeset + * state as part of this modeset if: + * - The requesting client is the internal NVKMS client (i.e., + * this is a console restore modeset), or + * - 'modesetOwnerChanged' is recorded in the device; + * i.e., there was a modeset ownership change since the last + * modeset. + */ + const NvBool modesetOwnerChanged = + (pOpenDev == pDevEvo->pNvKmsOpenDev) ? TRUE : + pDevEvo->modesetOwnerChanged; + + NVModeSetWorkArea *pWorkArea = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_MODE_SET_WORK_AREA, + sizeof(*pWorkArea)); + + nvkms_memset(pProposed, 0, sizeof(*pProposed)); + nvkms_memset(pWorkArea, 0, sizeof(*pWorkArea)); + + nvAssert(pOpenDev != NULL); + + InitializeReply(pDevEvo, pRequest, pReply); + + if (!ValidateRequest(pDevEvo, pOpenDev, pRequest, pReply)) { + goto done; + } + + /* Disallow GC6 in anticipation of touching GPU/displays. */ + if (!nvRmSetGc6Allowed(pDevEvo, FALSE)) { + goto done; + } + + if (!AssignProposedModeSetHwState(pDevEvo, pOpenDev, + pRequest, pReply, pProposed, + modesetOwnerChanged)) { + goto done; + } + + if (!IsProposedModeSetValid(pDevEvo, pReply, pOpenDev, pProposed, + pRequest, pWorkArea)) { + goto done; + } + + /* The requested configuration is valid. */ + + ret = TRUE; + + if (!pRequest->commit) { + goto done; + } + + /* All satellite channels must be idle. */ + + if (!IdleAllSatelliteChannels(pDevEvo)) { + ret = FALSE; + goto done; + } + + /* From this point, we should not fail. */ + + /* + * Disable stereo pin during console restore or modeset owner changes. + */ + if (modesetOwnerChanged) { + NvU32 sd; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NvU32 head; + for (head = 0; head < pDevEvo->numHeads; head++) { + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + nvSetStereoEvo(pDispEvo, head, FALSE); + } + } + } + + nvEvoCancelPostFlipIMPTimer(pDevEvo); + + BeginEndModeset(pDevEvo, pProposed, BEGIN_MODESET); + + nvEvoLockStatePreModeset(pDevEvo, &dispNeedsEarlyUpdate, &pWorkArea->earlyUpdateState); + + nvDisableVrr(pDevEvo); + + updateCoreFirst = pDevEvo->coreInitMethodsPending; + pDevEvo->coreInitMethodsPending = FALSE; + + /* + * If the core channel has assembly state we need to be committed + * before proceeding through the rest of the modeset, kickoff here. + * This is used to disable fliplock before issuing base flips + * in ApplyProposedModeSetHwStateOneDisp. + * + * XXX This violates the assumption (guarded by + * pDevEvo->coreInitMethodsPending) that we aren't kicking + * off until after the assembly core channel state (which we don't + * want to commit) has already been overwritten below and made safe + * for kickoff. Because of this, needsEarlyUpdate should only be set + * when it is safe to kickoff the existing core channel assembly + * state immediately. Currently it is only set when the call + * to nvEvoLockStatePreModeset() above disabled fliplock, at which + * point there should be no invalid state remaining in the + * core channel assembly. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + if (dispNeedsEarlyUpdate & (1 << dispIndex)) { + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &pWorkArea->earlyUpdateState, + TRUE /* releaseElv */); + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + ApplyProposedModeSetHwStateOneDisp(pDispEvo, + pProposed, + &pProposed->disp[dispIndex], + pWorkArea, + updateCoreFirst, + bypassComposition); + } + + nvEnableVrr(pDevEvo, pRequest); + + /* + * Cache whether HS in NVKMS is allowed, so we can make consistent + * decisions for future partial updates from non-modeset owners. + */ + pDevEvo->allowHeadSurfaceInNvKms = pProposed->allowHeadSurfaceInNvKms; + + nvEvoLockStatePostModeset(pDevEvo, doRasterLock); + + BeginEndModeset(pDevEvo, pProposed, END_MODESET); + + AssignReplySuccess(pDevEvo, pRequest, pReply, pWorkArea); + + pDevEvo->skipConsoleRestore = FALSE; + + /* + * If this was a pNvKmsOpenDev-initiated modeset, force the next modeset to + * shut down all unused heads and not to inherit any state from this + * modeset. That will prevent a regular client from inheriting + * pNvKmsOpenDev modeset state. + */ + pDevEvo->modesetOwnerChanged = + (pOpenDev == pDevEvo->pNvKmsOpenDev) ? TRUE : FALSE; + + /* fall through */ +done: + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head; + + /* + * In case of successful commit, update current attribute values and + * free old display IDs. + */ + if (pRequest->commit && ret) { + NVDpyEvoRec *pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, + pWorkArea->sd[dispIndex].changedDpyIdList, + pDispEvo) { + nvDpyUpdateCurrentAttributes(pDpyEvo); + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + if (pWorkArea->sd[dispIndex].head[head].oldActiveRmId != 0x0) { + nvRmFreeDisplayId( + pDispEvo, + pWorkArea->sd[dispIndex].head[head].oldActiveRmId); + } + } + } else { + /* Otherwise, free new allocated RM display IDs for changed heads */ + for (head = 0; head < pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[dispIndex].head[head]; + + if (!pProposedHead->changed || pProposedHead->activeRmId == 0x0) { + continue; + } + nvRmFreeDisplayId(pDispEvo, pProposedHead->activeRmId); + } + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + NVProposedModeSetHwStateOneHead *pProposedHead = + &pProposed->disp[dispIndex].head[head]; + nvDPLibFreeModesetState(pProposedHead->pDpLibModesetState); + } + } + + /* If all heads are shut down, allow GC6. */ + if (nvAllHeadsInactive(pDevEvo)) { + nvRmSetGc6Allowed(pDevEvo, TRUE); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_MODE_SET_WORK_AREA); + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE); + return ret; +} + +/*! + * Perform a modeset that disables some or all heads. + * + * \param[in] pDevEvo The device to shut down. + * \param[in] pTestFunc The pointer to test function, identifying heads + * targeted to shut down. If NULL then shut down + * all heads. + */ +void nvShutDownHeads(NVDevEvoPtr pDevEvo, NVShutDownHeadsTestFunc pTestFunc) +{ + if (pDevEvo->displayHandle != 0) { + struct NvKmsSetModeParams *params = + nvPreallocGet(pDevEvo, PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE, + sizeof(*params)); + struct NvKmsSetModeRequest *req = NULL; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + NvBool dirty = FALSE; + + nvkms_memset(params, 0, sizeof(*params)); + req = ¶ms->request; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + NvU32 head; + + req->requestedDispsBitMask |= NVBIT(dispIndex); + for (head = 0; head < pDevEvo->numHeads; head++) { + /* + * XXX pTestFunc isn't honored by nvSetDispModeEvo()'s + * modesetOwnerChanged logic. + */ + if (pTestFunc && !pTestFunc(pDispEvo, head)) { + continue; + } + + dirty = TRUE; + req->disp[dispIndex].requestedHeadsBitMask |= NVBIT(head); + } + } + + if (dirty) { + req->commit = TRUE; + + /* + * XXX TODO: The coreInitMethodsPending flag indicates that the + * init_no_update methods which were pushed by the hardware during + * core channel allocation are still pending, it means this is + * first modeset after boot and the boot display/heads are still + * active. In theory, we could only shut down heads which satisfies + * pTestFunc() test but this fails because other heads active at + * boot do not have mode timing information populated during + * MarkConnectorBootHeadActive(), so nvSetDispMode() tries to + * program invalid modes on those heads. + * + * For now, just shut down all heads if any head satisfies + * pTestFunc() test. + */ + if (pDevEvo->coreInitMethodsPending) { + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + req->disp[dispIndex].requestedHeadsBitMask |= + NVBIT(pDevEvo->numHeads) - 1; + } + } + + nvSetDispModeEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, req, + ¶ms->reply, FALSE /* bypassComposition */, + TRUE /* doRastertLock */); + } + + nvPreallocRelease(pDevEvo, PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE); + } + + if (pTestFunc == NULL) { + nvAssertAllDpysAreInactive(pDevEvo); + } +} diff --git a/src/nvidia-modeset/src/nvkms-prealloc.c b/src/nvidia-modeset/src/nvkms-prealloc.c new file mode 100644 index 000000000..7d3a1f741 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-prealloc.c @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-types.h" +#include "nvkms-flip-workarea.h" +#include "nvkms-modeset-types.h" +#include "nvkms-modeset-workarea.h" +#include "nvkms-prealloc.h" +#include "nvkms-utils.h" + +#include "nvkms-api.h" + +#include + +static size_t GetSizeForType(NVDevEvoPtr pDevEvo, enum NVPreallocType type) +{ + switch (type) { + case PREALLOC_TYPE_IMP_PARAMS: + return pDevEvo->hal->caps.impStructSize; + case PREALLOC_TYPE_SHUT_DOWN_HEADS_SET_MODE: /* fall through */ + case PREALLOC_TYPE_RESTORE_CONSOLE_SET_MODE: + return sizeof(struct NvKmsSetModeParams); + case PREALLOC_TYPE_MODE_SET_WORK_AREA: + return sizeof(NVModeSetWorkArea); + case PREALLOC_TYPE_FLIP_WORK_AREA: + return sizeof(struct NvKmsFlipWorkArea); + case PREALLOC_TYPE_PROPOSED_MODESET_HW_STATE: /* fallthrough */ + case PREALLOC_TYPE_VALIDATE_PROPOSED_MODESET_HW_STATE: + return sizeof(NVProposedModeSetHwState); + case PREALLOC_TYPE_VALIDATE_MODE_HW_MODE_TIMINGS: + return sizeof(NVHwModeTimingsEvo); + case PREALLOC_TYPE_MAX: + /* Not a real option, but added for -Wswitch-enum */ + break; + } + + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Unknown prealloc type %d in GetSizeForType.", type); + + return 0; +} + +void *nvPreallocGet( + NVDevEvoPtr pDevEvo, + enum NVPreallocType type, + size_t sizeCheck) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + size_t size = GetSizeForType(pDevEvo, type); + + if (size != sizeCheck) { + nvAssert(size == sizeCheck); + return NULL; + } + + if ((pPrealloc->used[type / 8] & NVBIT(type % 8)) != 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d already used in nvPreallocGet.", type); + return NULL; + } + + /* Since these are preallocated, they should not be NULL. */ + if (pPrealloc->ptr[type] == NULL) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d NULL in nvPreallocGet.", type); + } + + pPrealloc->used[type / 8] |= NVBIT(type % 8); + + return pPrealloc->ptr[type]; +} + +void nvPreallocRelease( + NVDevEvoPtr pDevEvo, + enum NVPreallocType type) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + + if ((pPrealloc->used[type / 8] & NVBIT(type % 8)) == 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d not used in nvPreallocRelease.", type); + } + + pPrealloc->used[type / 8] &= ~(NvU8)NVBIT(type % 8); +} + +NvBool nvPreallocAlloc(NVDevEvoPtr pDevEvo) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + NvU32 type; + + for (type = 0; type < PREALLOC_TYPE_MAX; type++) { + size_t size = GetSizeForType(pDevEvo, type); + if (size == 0) { + goto fail; + } + pPrealloc->ptr[type] = nvAlloc(size); + if (pPrealloc->ptr[type] == NULL) { + goto fail; + } + } + + nvkms_memset(pPrealloc->used, 0, sizeof(pPrealloc->used)); + + return TRUE; + +fail: + nvPreallocFree(pDevEvo); + return FALSE; +} + +void nvPreallocFree(NVDevEvoPtr pDevEvo) +{ + struct NVDevPreallocRec *pPrealloc = &pDevEvo->prealloc; + NvU32 type; + + for (type = 0; type < PREALLOC_TYPE_MAX; type++) { + if ((pDevEvo->prealloc.used[type / 8] & NVBIT(type % 8)) != 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Prealloc type %d still used in nvPreallocFree.", type); + } + + nvFree(pPrealloc->ptr[type]); + pPrealloc->ptr[type] = NULL; + } +} diff --git a/src/nvidia-modeset/src/nvkms-rm.c b/src/nvidia-modeset/src/nvkms-rm.c new file mode 100644 index 000000000..2f1036536 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-rm.c @@ -0,0 +1,5324 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +#include "dp/nvdp-connector.h" +#include "dp/nvdp-timer.h" +#include "dp/nvdp-device.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "g_nvkms-evo-states.h" +#include "nvkms-event.h" +#include "nvkms-dpy.h" +#include "nvkms-types.h" +#include "nvkms-evo.h" +#include "nvkms-dma.h" +#include "nvkms-utils.h" +#include "nvkms-private.h" +#include "nvkms-modeset.h" +#include "nvkms-surface.h" +#include "nvkms-vrr.h" + +#include "class/cl0002.h" /* NV01_CONTEXT_DMA */ +#include "class/cl0005.h" /* NV01_EVENT */ + +#include /* NV04_DISPLAY_COMMON */ +#include /* NV01_MEMORY_SYSTEM */ +#include /* NV01_MEMORY_FRAMEBUFFER_CONSOLE */ +#include /* NV01_DEVICE_0 */ +#include /* NV01_MEMORY_LOCAL_USER */ +#include /* NV20_SUBDEVICE_0 */ + +#include "class/clc37b.h" /* NVC37B_WINDOW_IMM_CHANNEL_DMA */ +#include "class/clc37e.h" /* NVC37E_WINDOW_CHANNEL_DMA */ +#include "class/clc57b.h" /* NVC57B_WINDOW_IMM_CHANNEL_DMA */ +#include "class/clc57e.h" /* NVC57E_WINDOW_CHANNEL_DMA */ +#include "class/clc67b.h" /* NVC67B_WINDOW_IMM_CHANNEL_DMA */ +#include "class/clc67e.h" /* NVC67E_WINDOW_CHANNEL_DMA */ + +#include "class/cl917b.h" /* NV917B_OVERLAY_IMM_CHANNEL_PIO */ + +#include "class/cl927c.h" /* NV927C_BASE_CHANNEL_DMA */ + +#include "class/cl917e.h" /* NV917E_OVERLAY_CHANNEL_DMA */ + +#include /* NV0000_CTRL_GPU_* */ +#include /* NV0073_CTRL_SYSTEM_GET_VRR_CONFIG */ +#include /* NV0002_CTRL_CMD_BIND_CONTEXTDMA */ +#include /* NV0073_CTRL_CMD_DFP_GET_INFO */ +#include /* NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID */ +#include /* NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO */ +#include /* NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED */ +#include /* NV0080_CTRL_CMD_GPU_SET_DISPLAY_OWNER */ +#include /* NV0080_CTRL_CMD_GR_GET_CAPS_V2 */ +#include /* NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH */ +#include /* NV2080_CTRL_CMD_BIOS_GET_NBSI */ +#include /* NV2080_CTRL_CMD_BUS_GET_INFO */ +#include /* NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION */ +#include /* NV2080_CTRL_CMD_GPU_GET_SW_FEATURES */ +#include /* NV2080_CTRL_CMD_TIMER_GET_TIME */ +#include /* NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT */ +#include /* NV5070_CTRL_CMD_SET_RMFREE_FLAGS */ +#include /* NV5070_CTRL_CMD_SET_DAC_PWR */ + +#include "nvos.h" + +#include "displayport/dpcd.h" + +static NvU32 GetLegacyConnectorType(NVDispEvoPtr pDispEvo, NVDpyId dpyId); + +static void RmFreeEvoChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel); + +static NvBool QueryGpuCapabilities(NVDevEvoPtr pDevEvo) +{ + NvBool ctxDmaCoherentAllowedDev = FALSE; + NvBool ctxDmaNonCoherentAllowedDev = FALSE; + NvU32 ret, sd; + + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS idInfoParams = { 0 }; + + pDevEvo->isHeadSurfaceSupported = FALSE; + + pDevEvo->validResamplingMethodMask = + NVBIT(NVKMS_RESAMPLING_METHOD_BILINEAR) | + NVBIT(NVKMS_RESAMPLING_METHOD_NEAREST); + + /* ctxDma{,Non}CoherentAllowed */ + + /* simulationType */ + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS simParams = { 0 }; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO, + &simParams, + sizeof(simParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + simParams.type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE; + } + if (sd == 0) { + pDevEvo->simulationType = simParams.type; + } + nvAssert(pDevEvo->simulationType == simParams.type); + } + + /* mobile */ + + idInfoParams.gpuId = pDevEvo->pSubDevices[0]->gpuId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO, + &idInfoParams, sizeof(idInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + pDevEvo->mobile = FALSE; + pDevEvo->isSOCDisplay = FALSE; + } else { + pDevEvo->mobile = + FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _MOBILE, _TRUE, + idInfoParams.gpuFlags); + + pDevEvo->isSOCDisplay = + FLD_TEST_DRF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE, + idInfoParams.gpuFlags); + } + + /* TODO: This cap bit should be queried from RM */ + pDevEvo->requiresAllAllocationsInSysmem = pDevEvo->isSOCDisplay; + + /* ctxDma{,Non}CoherentAllowed */ + + if (!pDevEvo->isSOCDisplay) { + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NV2080_CTRL_BUS_GET_INFO_PARAMS busParams = { 0 }; + struct { + NV2080_CTRL_BUS_INFO coherentFlags; + NV2080_CTRL_BUS_INFO nonCoherentFlags; + } busInfoList = { { 0 } }; + + NvBool ctxDmaCoherentAllowed; + NvBool ctxDmaNonCoherentAllowed; + + busInfoList.coherentFlags.index = + NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS; + busInfoList.nonCoherentFlags.index = + NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS; + + busParams.busInfoListSize = + sizeof(busInfoList) / sizeof(busInfoList.coherentFlags); + busParams.busInfoList = NV_PTR_TO_NvP64(&busInfoList); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_BUS_GET_INFO, + &busParams, sizeof(busParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + ctxDmaCoherentAllowed = + FLD_TEST_DRF(2080_CTRL_BUS_INFO, _COHERENT_DMA_FLAGS, + _CTXDMA, _TRUE, busInfoList.coherentFlags.data); + + ctxDmaNonCoherentAllowed = + FLD_TEST_DRF(2080_CTRL_BUS_INFO, _NONCOHERENT_DMA_FLAGS, + _CTXDMA, _TRUE, busInfoList.nonCoherentFlags.data); + + if (sd == 0) { + ctxDmaCoherentAllowedDev = ctxDmaCoherentAllowed; + ctxDmaNonCoherentAllowedDev = ctxDmaNonCoherentAllowed; + } else { + ctxDmaCoherentAllowedDev = + ctxDmaCoherentAllowedDev && ctxDmaCoherentAllowed; + ctxDmaNonCoherentAllowedDev = + ctxDmaNonCoherentAllowedDev && ctxDmaNonCoherentAllowed; + } + } + nvAssert(ctxDmaCoherentAllowedDev || ctxDmaNonCoherentAllowedDev); + + if (ctxDmaCoherentAllowedDev) { + pDevEvo->isoIOCoherencyModes.coherent = TRUE; + pDevEvo->nisoIOCoherencyModes.coherent = TRUE; + } + + if (ctxDmaNonCoherentAllowedDev) { + pDevEvo->isoIOCoherencyModes.noncoherent = TRUE; + pDevEvo->nisoIOCoherencyModes.noncoherent = TRUE; + } + } else { + /* + * On SOC display, NISO requests are IO-coherent and ISO + * requests are non-coherent. + */ + pDevEvo->isoIOCoherencyModes.noncoherent = TRUE; + pDevEvo->nisoIOCoherencyModes.coherent = TRUE; + } + + pDevEvo->supportsSyncpts = + FALSE; + + return TRUE; +} + + +static void FreeDisplay(NVDispEvoPtr pDispEvo) +{ + NvU32 head; + + if (pDispEvo == NULL) { + return; + } + + for (head = 0; head < ARRAY_LEN(pDispEvo->pSwapGroup); head++) { + nvAssert(pDispEvo->pSwapGroup[head] == NULL); + } + + nvAssert(nvListIsEmpty(&pDispEvo->dpyList)); + + nvkms_free_ref_ptr(pDispEvo->ref_ptr); + + nvFree(pDispEvo); +} + + +static inline NVDispEvoPtr AllocDisplay(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + NVDispEvoPtr pDispEvo = nvCalloc(1, sizeof(NVDispEvoRec)); + + if (pDispEvo == NULL) { + goto fail; + } + + pDispEvo->pDevEvo = pDevEvo; + + nvListInit(&pDispEvo->dpyList); + nvListInit(&pDispEvo->connectorList); + + pDispEvo->framelock.server = nvInvalidDpyId(); + pDispEvo->framelock.clients = nvEmptyDpyIdList(); + pDispEvo->framelock.currentServerHead = NV_INVALID_HEAD; + + for (head = 0; head < ARRAY_LEN(pDispEvo->headState); head++) { + pDispEvo->headState[head].activeDpys = nvEmptyDpyIdList(); + pDispEvo->headState[head].attributes = NV_EVO_DEFAULT_ATTRIBUTES_SET; + } + + pDispEvo->ref_ptr = nvkms_alloc_ref_ptr(pDispEvo); + if (!pDispEvo->ref_ptr) { + goto fail; + } + + return pDispEvo; + +fail: + FreeDisplay(pDispEvo); + + return NULL; +} + + +static void FreeDisplays(NVDevEvoPtr pDevEvo) +{ + unsigned int sd; + + for (sd = 0; sd < pDevEvo->nDispEvo; sd++) { + FreeDisplay(pDevEvo->pDispEvo[sd]); + pDevEvo->pDispEvo[sd] = NULL; + } + pDevEvo->nDispEvo = 0; +} + + +/*! + * Allocate the NVDispRecs for the given pDev. + * + * \param[in,out] pDev The device for which to allocate Displays. + */ +static NvBool AllocDisplays(NVDevEvoPtr pDevEvo) +{ + unsigned int sd; + + nvAssert(pDevEvo->nDispEvo == 0); + + pDevEvo->nDispEvo = pDevEvo->numSubDevices; + + for (sd = 0; sd < pDevEvo->nDispEvo; sd++) { + NVDispEvoPtr pDispEvo = AllocDisplay(pDevEvo); + + if (pDispEvo == NULL) { + goto fail; + } + + pDevEvo->pDispEvo[sd] = pDispEvo; + + pDispEvo->displayOwner = sd; + + pDispEvo->gpuLogIndex = pDevEvo->pSubDevices[sd]->gpuLogIndex; + } + + return TRUE; + +fail: + FreeDisplays(pDevEvo); + return FALSE; +} + +/* + * Get the (id) list of all supported display devices for this pDisp. + */ +static NvBool ProbeValidDisplays(NVDispEvoPtr pDispEvo) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS getSupportedParams = { 0 }; + NvU32 ret; + + pDispEvo->connectorIds = nvEmptyDpyIdList(); + pDispEvo->displayPortMSTIds = nvEmptyDpyIdList(); + pDispEvo->dynamicDpyIds = nvEmptyDpyIdList(); + pDispEvo->validDisplays = nvEmptyDpyIdList(); + + getSupportedParams.subDeviceInstance = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, + &getSupportedParams, sizeof(getSupportedParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get supported display device(s)"); + } else { + NVDpyIdList dpyIdList; + NVDpyId dpyId; + + // Grab only the static ids from the list. Dynamic ids are + // used to communicate with devices that are connected to + // a connector that has a static id. + dpyIdList = nvNvU32ToDpyIdList(getSupportedParams.displayMask); + + FOR_ALL_DPY_IDS(dpyId, dpyIdList) { + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS getOrInfoParams = { 0 }; + getOrInfoParams.subDeviceInstance = pDispEvo->displayOwner; + getOrInfoParams.displayId = nvDpyIdToNvU32(dpyId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + &getOrInfoParams, + sizeof(getOrInfoParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get supported display device(s)"); + } else { + if (!getOrInfoParams.bIsDispDynamic) { + pDispEvo->connectorIds = + nvAddDpyIdToDpyIdList(dpyId, pDispEvo->connectorIds); + } + } + } + } + + pDispEvo->validDisplays = pDispEvo->connectorIds; + + return TRUE; +} + +/*! + * Return TRUE if every pDispEvo on this pDevEvo has an empty validDisplays. + */ +static NvBool NoValidDisplays(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + if (!nvDpyIdListIsEmpty(pDispEvo->validDisplays)) { + return FALSE; + } + } + + return TRUE; +} + + +/* + * Find the NvKmsConnectorSignalFormat for the pConnectorEvo. + */ +static NvKmsConnectorSignalFormat +GetSignalFormat(const NVConnectorEvoRec *pConnectorEvo) +{ + // SignalFormat represents a weird combination of our OR type and protocol. + switch (pConnectorEvo->or.type) { + case NV0073_CTRL_SPECIFIC_OR_TYPE_DAC: + switch (pConnectorEvo->or.protocol) { + default: + nvAssert(!"Unexpected OR protocol for DAC"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA; + } + + case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR: + switch (pConnectorEvo->or.protocol) { + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS; + + default: + nvAssert(!"Unexpected OR protocol for SOR"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A: + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B: + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS; + + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A: + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_DP; + } + + case NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR: + switch (pConnectorEvo->or.protocol) { + default: + nvAssert(!"Unexpected OR protocol for PIOR"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS; + } + + case NV0073_CTRL_SPECIFIC_OR_TYPE_DSI: + switch (pConnectorEvo->or.protocol) { + default: + nvAssert(!"Unexpected OR protocol for DSI"); + // fall through + case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI: + return NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI; + } + + default: + nvAssert(!"Unexpected OR type"); + return NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN; + } + + return NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN; +} + + +static NvU32 GetDfpInfo(const NVConnectorEvoRec *pConnectorEvo) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NV0073_CTRL_DFP_GET_INFO_PARAMS params = { 0 }; + NvU32 ret; + + if (pConnectorEvo->or.type != NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + return 0x0; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_INFO, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, "Failed to query DFP info"); + return 0x0; + } + + return params.flags; +} + +typedef struct _AllocConnectorDispDataRec { + NvU32 dfpIndex; + NvU32 crtIndex; + NvU32 typeIndices[NVKMS_CONNECTOR_TYPE_MAX + 1]; +} AllocConnectorDispDataRec; + +/*! + * Query and setup information for a connector. + */ +static NvBool AllocConnector( + NVDispEvoPtr pDispEvo, + NVDpyId dpyId, + AllocConnectorDispDataRec *pAllocConnectorDispData) +{ + NVConnectorEvoPtr pConnectorEvo = NULL; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS params = { 0 }; + NvU32 ret; + NvBool isDP; + + pConnectorEvo = nvCalloc(1, sizeof(*pConnectorEvo)); + + if (pConnectorEvo == NULL) { + return FALSE; + } + + pConnectorEvo->pDispEvo = pDispEvo; + pConnectorEvo->displayId = dpyId; + pConnectorEvo->type = NVKMS_CONNECTOR_TYPE_UNKNOWN; + pConnectorEvo->physicalIndex = NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION; + pConnectorEvo->physicalLocation = NV_INVALID_CONNECTOR_PHYSICAL_INFORMATION; + /* Query the output resource configuration */ + nvRmGetConnectorORInfo(pConnectorEvo, FALSE); + + isDP = + (pConnectorEvo->or.type == + NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A || + pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B); + + /* Determine the connector type. */ + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(dpyId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, + ¶ms, + sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to determine connector type for connector " + NV_DPY_ID_PRINT_FORMAT, nvDpyIdToPrintFormat(dpyId)); + goto fail; + } else { + + static const struct { + NvU32 type0073; + NvKmsConnectorType typeNvKms; + } connectorTypeTable[] = { + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_EXT, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_USB_C, + NVKMS_CONNECTOR_TYPE_USBC }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_INT, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_MINI_EXT, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_1, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DP_2, + NVKMS_CONNECTOR_TYPE_DP }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VGA_15_PIN, + NVKMS_CONNECTOR_TYPE_VGA }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_SVIDEO, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I_TV_COMPOSITE, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_I, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DVI_D, + NVKMS_CONNECTOR_TYPE_DVI_D }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_ADC, + NVKMS_CONNECTOR_TYPE_ADC }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_1, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_LFH_DVI_I_2, + NVKMS_CONNECTOR_TYPE_DVI_I }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_SPWG, + NVKMS_CONNECTOR_TYPE_LVDS }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_OEM, + NVKMS_CONNECTOR_TYPE_LVDS }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_A, + NVKMS_CONNECTOR_TYPE_HDMI }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_HDMI_C_MINI, + NVKMS_CONNECTOR_TYPE_HDMI }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_VIRTUAL_WFD, + NVKMS_CONNECTOR_TYPE_UNKNOWN }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DSI, + NVKMS_CONNECTOR_TYPE_DSI }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_DP_SERIALIZER, + NVKMS_CONNECTOR_TYPE_DP_SERIALIZER }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_STEREO_3PIN_DIN, + NVKMS_CONNECTOR_TYPE_UNKNOWN }, + { NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_UNKNOWN, + NVKMS_CONNECTOR_TYPE_UNKNOWN }, + }; + + int i, j; + + for (i = 0; i < params.count; i++) { + for (j = 0; j < ARRAY_LEN(connectorTypeTable); j++) { + if (connectorTypeTable[j].type0073 == params.data[i].type) { + if (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_UNKNOWN) { + pConnectorEvo->type = connectorTypeTable[j].typeNvKms; + } else { + /* + * The only cases where we should see + * params.count > 1 (and thus attempt to + * assign pConnectorEvo->type multiple times) + * should be where all the + * NV0073_CTRL_SPECIFIC_CONNECTOR_DATA_TYPE_* + * values map to the same NvKmsConnectorType; + */ + nvAssert(pConnectorEvo->type == + connectorTypeTable[j].typeNvKms); + } + break; + } + } + if (j == ARRAY_LEN(connectorTypeTable)) { + nvAssert(!"Unhandled connector type!"); + } + + if (i == 0) { + pConnectorEvo->physicalIndex = params.data[i].index; + pConnectorEvo->physicalLocation = params.data[i].location; + } else { + nvAssert(pConnectorEvo->physicalIndex == params.data[i].index); + nvAssert(pConnectorEvo->physicalLocation == + params.data[i].location); + } + } + + pConnectorEvo->ddcPartnerDpyIdsList = nvNvU32ToDpyIdList(params.DDCPartners); + } + + /* If the connector type is unknown, ignore this connector. */ + if (pConnectorEvo->type == NVKMS_CONNECTOR_TYPE_UNKNOWN) { + nvFree(pConnectorEvo); + return TRUE; + } + + /* + * Ignore connectors that use DP protocol, but don't have a + * DP-compatible type. + */ + if (isDP && + ((pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_DP) && + !nvConnectorIsDPSerializer(pConnectorEvo) && + (pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_USBC))) { + nvFree(pConnectorEvo); + return TRUE; + } + + /* + * Bind connector to the DP lib if DP capable. Serializer + * connector is not managed by DP lib. + */ + if (isDP && + !nvConnectorIsDPSerializer(pConnectorEvo)) { + pConnectorEvo->pDpLibConnector = nvDPCreateConnector(pConnectorEvo); + if (!pConnectorEvo->pDpLibConnector) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to initialize DisplayPort support for " + NV_DPY_ID_PRINT_FORMAT, nvDpyIdToPrintFormat(dpyId)); + goto fail; + } + } + + pConnectorEvo->signalFormat = GetSignalFormat(pConnectorEvo); + + pConnectorEvo->dfpInfo = GetDfpInfo(pConnectorEvo); + + /* + * Change-list 6909651 has disabled YCbCr* color space for DisplayPort, it + * says - + * "Disable anything other than RGB for DisplayPort; on FERMI at least, + * there are problems YCbCr* on DP (but not HDMI), since the limited range + * must be implemented by EVO HW, and not the LUT (per EVO error checks)." + * + * TODO Investigate if YCbCr* color space for DisplayPort allowed on Kepler + * onward (also take DP-MST into consideration). + */ + if (!nvConnectorUsesDPLib(pConnectorEvo)) { + /* check for color space (YCbCr422, YCbCr444) capability of GPU */ + if (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS_FORMAT, _YCBCR422_CAPABLE, _TRUE, + pConnectorEvo->dfpInfo)) { + pConnectorEvo->colorSpaceCaps.ycbcr422Capable = TRUE; + } + + if (FLD_TEST_DRF(0073, _CTRL_DFP_FLAGS_FORMAT, _YCBCR444_CAPABLE, _TRUE, + pConnectorEvo->dfpInfo)) { + pConnectorEvo->colorSpaceCaps.ycbcr444Capable = TRUE; + } + } else { + pConnectorEvo->colorSpaceCaps.ycbcr422Capable = FALSE; + pConnectorEvo->colorSpaceCaps.ycbcr444Capable = FALSE; + } + + if (pConnectorEvo->signalFormat == NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI) { + nvAssert(pDevEvo->numHeads >= 1); + // DSI supports only HEAD0 assignment + pConnectorEvo->validHeadMask = 0x1; + + if (pConnectorEvo->type != NVKMS_CONNECTOR_TYPE_DSI) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Mismatch between connector type and signal format for DSI!"); + goto fail; + } + } else { + pConnectorEvo->validHeadMask = (1 << pDevEvo->numHeads) - 1; + } + + /* Assign connector indices. */ + + pConnectorEvo->legacyType = + GetLegacyConnectorType(pDispEvo, pConnectorEvo->displayId); + + switch (pConnectorEvo->legacyType) { + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT: + pConnectorEvo->legacyTypeIndex = + pAllocConnectorDispData->crtIndex++; + break; + case NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP: + pConnectorEvo->legacyTypeIndex = + pAllocConnectorDispData->dfpIndex++; + break; + default: + nvAssert(!"Unknown connector type"); + break; + } + + nvAssert(pConnectorEvo->type < + ARRAY_LEN(pAllocConnectorDispData->typeIndices)); + pConnectorEvo->typeIndex = + pAllocConnectorDispData->typeIndices[pConnectorEvo->type]++; + + nvListAppend(&pConnectorEvo->connectorListEntry, &pDispEvo->connectorList); + + nvkms_snprintf(pConnectorEvo->name, sizeof(pConnectorEvo->name), "%s-%u", + NvKmsConnectorTypeString(pConnectorEvo->type), + pConnectorEvo->typeIndex); + + return TRUE; + +fail: + nvFree(pConnectorEvo); + return FALSE; +} + + +static void FreeConnectors(NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo, pConnectorEvoNext; + + nvListForEachEntry_safe(pConnectorEvo, pConnectorEvoNext, + &pDispEvo->connectorList, connectorListEntry) { + // Unbind DP lib from the connector + nvDPDestroyConnector(pConnectorEvo->pDpLibConnector); + pConnectorEvo->pDpLibConnector = NULL; + nvListDel(&pConnectorEvo->connectorListEntry); + nvFree(pConnectorEvo); + } +} + + +/*! + * Allocate and initialize the connector structs for the given pDisp. + * + * NOTE: Each Display ID in pDispEvo->connectorIds (aka the + * NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED mask) is a possible display + * connection to the GPU which is static after boot. + */ +static NvBool AllocConnectors(NVDispEvoPtr pDispEvo) +{ + NVDpyId dpyId; + NVConnectorEvoPtr pConnectorEvo; + AllocConnectorDispDataRec allocConnectorDispData = { }; + + nvAssert(nvListIsEmpty(&pDispEvo->connectorList)); + + if (nvDpyIdListIsEmpty(pDispEvo->connectorIds)) { + /* Allow boards with no connectors */ + return TRUE; + } + + /* Allocate the connectors */ + FOR_ALL_DPY_IDS(dpyId, pDispEvo->connectorIds) { + if (!AllocConnector(pDispEvo, dpyId, &allocConnectorDispData)) { + goto fail; + } + } + + /* + * Reassign pDispEvo->connectorIds, to exclude any connectors ignored above: + * AllocConnector() may return TRUE but not actually create a pConnectorEvo + * for some connectors reported by resman. + */ + pDispEvo->connectorIds = nvEmptyDpyIdList(); + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + pDispEvo->connectorIds = + nvAddDpyIdToDpyIdList(pConnectorEvo->displayId, + pDispEvo->connectorIds); + } + + pDispEvo->validDisplays = pDispEvo->connectorIds; + + return TRUE; + + fail: + FreeConnectors(pDispEvo); + return FALSE; +} + + +/*! + * Query the number of heads and save the result in pDevEvo->numHeads. + * + * Query the number of heads on each pDisp of the pDev and limit to + * the minimum across all pDisps. Query the headMask on each pDisp + * and take the intersection across pDisps. Limit the number of heads + * to the number of bits in the headMask. + * + * \param[in,out] pDev This is the device pointer; the pDisps within + * it are used to query per-GPU information. + * The result is written to pDevEvo->numHeads. + * + * \return Return TRUE if numHeads could be correctly assigned; + * return FALSE if numHeads could not be queried. + */ +static NvBool ProbeHeadCount(NVDevEvoPtr pDevEvo) +{ + NvU32 numHeads = 0, headMask = 0; + int sd, head, numBits; + NVDispEvoPtr pDispEvo; + NvU32 ret; + + pDevEvo->numHeads = 0; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS numHeadsParams = { 0 }; + NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS headMaskParams = { 0 }; + + numHeadsParams.subDeviceInstance = sd; + numHeadsParams.flags = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, + &numHeadsParams, sizeof(numHeadsParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get the number of heads"); + return FALSE; + } + + if (numHeadsParams.numHeads == 0) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "No heads found on board!"); + return FALSE; + } + + if (numHeads == 0) { + numHeads = numHeadsParams.numHeads; + } else { + if (numHeads != numHeadsParams.numHeads) { + NvU32 minNumHeads = + NV_MIN(numHeads, numHeadsParams.numHeads); + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Unexpected numbers of heads " + "(%d, %d); clamping to %d", + numHeads, numHeadsParams.numHeads, minNumHeads); + numHeads = minNumHeads; + } + } + + headMaskParams.subDeviceInstance = sd; + + ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK, + &headMaskParams, sizeof(headMaskParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to get head configuration"); + return FALSE; + } + + if (headMask == 0) { + headMask = headMaskParams.headMask; + } else { + if (headMask != headMaskParams.headMask) { + NvU32 intersectedHeadMask = + headMask & headMaskParams.headMask; + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Unexpected head configurations " + "(0x%02x, 0x%02x); limiting to 0x%02x", + headMask, headMaskParams.headMask, + intersectedHeadMask); + headMask = intersectedHeadMask; + } + } + } + + /* clamp numHeads to the number of bits in headMask */ + + numBits = nvPopCount32(headMask); + + /* for now, we only support headMask when it is tightly packed at 0 */ + + for (head = 0; head < numBits; head++) { + if ((headMask & (1 << head)) == 0) { + NvU32 modifiedHeadMask = (1 << head) - 1; + + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "The head configuration (0x%02x) " + "is unexpected; limiting to 0x%02x", headMask, + modifiedHeadMask); + + headMask = modifiedHeadMask; + numBits = head; + break; + } + } + + /* headMask should never increase numHeads */ + + if (numBits > numHeads) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "The head configuration (0x%02x) " + "is inconsistent with the number of heads (%d)", + headMask, numHeads); + } else if (numBits < numHeads) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Correcting number of heads for " + "current head configuration (0x%02x)", headMask); + numHeads = numBits; + } + + pDevEvo->numHeads = numHeads; + + return TRUE; +} + +/*! + * Set a pConnectorEvo's software state based on the boot head assignment. + */ +static void MarkConnectorBootHeadActive(NVDispEvoPtr pDispEvo, NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NVDpyId displayId, rootPortId; + NVDpyEvoPtr pDpyEvo; + NVConnectorEvoPtr pConnectorEvo; + NVDispHeadStateEvoPtr pHeadState; + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS params = { 0 }; + NvU32 ret; + + // Use the first displayId in the boot display list. + // + // TODO: What should we do if more than one dpy ID is listed for a boot + // display? + nvAssert(nvCountDpyIdsInDpyIdList(pDispEvo->vbiosDpyConfig[head]) == 1); + displayId = nvNextDpyIdInDpyIdListUnsorted(nvInvalidDpyId(), + pDispEvo->vbiosDpyConfig[head]); + + // The displayId reported by RM could be a dynamic one. Find the root port + // for this ID. + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + return; + } + + if (params.bIsDispDynamic) { + rootPortId = nvNvU32ToDpyId(params.rootPortId); + } else { + rootPortId = displayId; + } + + pConnectorEvo = nvGetConnectorFromDisp(pDispEvo, rootPortId); + if (!pConnectorEvo) { + return; + } + + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED)) { + + nvAssert(params.index != NV_INVALID_OR); + if (params.index == NV_INVALID_OR) { + // If RM reported that a head is driving this dpyId, then there + // should be an SOR assigned. However, due to a bug in the way + // PDB_PROP_GPU_DISABLE_VGA_CONSOLE_RESTORATION_ON_RESUME is + // handled, RM can report an "active" head with no SOR assigned on + // certain specific GPUs. If that happens, just treat the head as + // disabled. See bug 1692425. + pDispEvo->vbiosDpyConfig[head] = nvEmptyDpyIdList(); + return; + } else { + // Track the SOR assignment for this connector. See the comment in + // nvRmGetConnectorORInfo() for why this is deferred until now. + nvAssert(pConnectorEvo->or.mask == 0x0); + pConnectorEvo->or.mask |= NVBIT(params.index); + } + } + nvAssert((pConnectorEvo->or.mask & NVBIT(params.index)) != 0x0); + + // Use the pDpyEvo for the connector, since we may not have one for + // display id if it's a dynamic one. + pDpyEvo = nvGetDpyEvoFromDispEvo(pDispEvo, pConnectorEvo->displayId); + + pHeadState = &pDispEvo->headState[head]; + + nvAssert(pDpyEvo->head == NV_INVALID_HEAD); + nvAssert(!nvHeadIsActive(pDispEvo, head)); + + pDpyEvo->head = head; + + pHeadState->activeDpys = + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId); + pHeadState->pConnectorEvo = pConnectorEvo; + pHeadState->activeRmId = nvDpyIdToNvU32(displayId); + + // Track the assigned head. + pConnectorEvo->or.ownerHeadMask[params.index] |= NVBIT(head); + + nvEvoStateStartNoLock(&pDispEvo->pDevEvo->gpus[pDispEvo->displayOwner]); +} + +/*! + * Query the vbios assignment of heads to display devices, and cache + * in pDispEvo->vbiosDpyConfig for later use by nvDPResume(). + * + * \param[in,out] pDisp This is the GPU display pointer; the result is + * written to pDispEvo->vbiosDpyConfig + */ +static void GetVbiosHeadAssignmentOneDisp(NVDispEvoPtr pDispEvo) +{ + unsigned int head; + NvU32 ret = NVOS_STATUS_ERROR_GENERIC; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + nvkms_memset(&pDispEvo->vbiosDpyConfig, 0, + sizeof(pDispEvo->vbiosDpyConfig)); + + /* if there is no display, there is no origDpyConfig */ + + nvAssert(pDevEvo->displayCommonHandle != 0); + + /* + * get the vbios assignment of heads within the GPU, so that + * later when we do head assignment, we can try to preserve the + * existing assignment; see bug 208072 + */ + + for (head = 0; head < pDevEvo->numHeads; head++) { + NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS activeDpysParams = { 0 }; + + activeDpysParams.subDeviceInstance = pDispEvo->displayOwner; + activeDpysParams.head = head; + /* + * We want to check for active displays set by any low-level software + * such as VBIOS, not just those set by an RM client + */ + activeDpysParams.flags = + DRF_DEF(0073, _CTRL_SYSTEM_GET_ACTIVE_FLAGS, _CLIENT, _DISABLE); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, + &activeDpysParams, sizeof(activeDpysParams)); + + if (ret == NVOS_STATUS_SUCCESS) { + // XXX TODO: If this is a dynamic display ID, it's not necessarily + // correlated with the NVDpyId we'll assign to a dynamic pDpyEvo + // later. We should instead store this as an NvU32 and assign it as + // the activeRmId for a dynamic pDpyEvo that DPLib reports as being + // driven by the firmware group. See bug 1656584. + pDispEvo->vbiosDpyConfig[head] = + nvNvU32ToDpyIdList(activeDpysParams.displayId); + if (activeDpysParams.displayId != 0) { + MarkConnectorBootHeadActive(pDispEvo, head); + } + } + + nvAssert(ret == NVOS_STATUS_SUCCESS); + } +} + +static void GetVbiosHeadAssignment(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + NvU32 dispIndex; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + GetVbiosHeadAssignmentOneDisp(pDispEvo); + } +} + +/*! + * Query the boot display device(s). + */ +static void ProbeBootDisplays(NVDispEvoPtr pDispEvo) +{ + NvU32 ret; + NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS bootParams = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + pDispEvo->bootDisplays = nvEmptyDpyIdList(); + + bootParams.subDeviceInstance = pDispEvo->displayOwner; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_BOOT_DISPLAYS, + &bootParams, sizeof(bootParams)); + + if (ret == NVOS_STATUS_SUCCESS) { + pDispEvo->bootDisplays = + nvNvU32ToDpyIdList(bootParams.bootDisplayMask); + } +} + +/*! + * Query the 0073 display common object capabilities. + */ +static NvBool ProbeDisplayCommonCaps(NVDevEvoPtr pDevEvo) +{ + NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS capsParams = { }; + NvU32 ret; + + ct_assert(sizeof(pDevEvo->commonCapsBits) == sizeof(capsParams.capsTbl)); + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_CAPS_V2, + &capsParams, sizeof(capsParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to determine display common capabilities"); + return FALSE; + } + nvkms_memcpy(pDevEvo->commonCapsBits, capsParams.capsTbl, + sizeof(pDevEvo->commonCapsBits)); + + return TRUE; +} + +static NvBool ReadDPCDReg(NVConnectorEvoPtr pConnectorEvo, + NvU32 dpcdAddr, + NvU8 *dpcdData) +{ + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params = { }; + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + params.subDeviceInstance = pConnectorEvo->pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + params.cmd = DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX); + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _READ); + + params.addr = dpcdAddr; + + /* Requested size is 0-based */ + params.size = 0; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_AUXCH_CTRL, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "AUX read failed for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + if (params.size != 1U) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "AUX read returned 0 bytes for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + *dpcdData = params.data[0]; + + return TRUE; +} + +NvBool nvWriteDPCDReg(NVConnectorEvoPtr pConnectorEvo, + NvU32 dpcdAddr, + NvU8 dpcdData) +{ + NV0073_CTRL_DP_AUXCH_CTRL_PARAMS params = { }; + NVDevEvoPtr pDevEvo = pConnectorEvo->pDispEvo->pDevEvo; + + params.subDeviceInstance = pConnectorEvo->pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + params.cmd = DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_TYPE, _AUX); + params.cmd |= DRF_DEF(0073_CTRL, _DP, _AUXCH_CMD_REQ_TYPE, _WRITE); + + params.addr = dpcdAddr; + params.data[0] = dpcdData; + + /* Requested size is 0-based */ + params.size = 0; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_AUXCH_CTRL, + ¶ms, sizeof(params)) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "AUX write failed for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + if (params.size != 1U) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Wrote 0 bytes for DPCD addr 0x%x", + dpcdAddr); + return FALSE; + } + + return TRUE; +} + +static NvBool ReadDPSerializerCaps(NVConnectorEvoPtr pConnectorEvo) +{ + NVDpyIdList oneDpyIdList = + nvAddDpyIdToEmptyDpyIdList(pConnectorEvo->displayId); + NVDpyIdList connectedList; + NvU8 dpcdData = 0; + + /* + * This call will not only confirm that the DP serializer is connected, but + * will also power on the corresponding DPAUX pads if the serializer is + * detected via NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE. The DPAUX pads + * need to be enabled for the DPCD reads below. + */ + connectedList = nvRmGetConnectedDpys(pConnectorEvo->pDispEvo, oneDpyIdList); + if (!nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedList)) { + nvEvoLogDev(pConnectorEvo->pDispEvo->pDevEvo, EVO_LOG_ERROR, + "Serializer connector %s is not currently connected!", + pConnectorEvo->name); + return FALSE; + } + + if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MAX_LINK_BANDWIDTH, &dpcdData)) { + return FALSE; + } + pConnectorEvo->dpSerializerCaps.maxLinkBW = + DRF_VAL(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, dpcdData); + + if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MAX_LANE_COUNT, &dpcdData)) { + return FALSE; + } + pConnectorEvo->dpSerializerCaps.maxLaneCount = + DRF_VAL(_DPCD, _MAX_LANE_COUNT, _LANE, dpcdData); + + if (!ReadDPCDReg(pConnectorEvo, NV_DPCD_MSTM, &dpcdData)) { + return FALSE; + } + pConnectorEvo->dpSerializerCaps.supportsMST = + FLD_TEST_DRF(_DPCD, _MSTM, _CAP, _YES, dpcdData); + + return TRUE; +} + +static NvBool AllocDPSerializerDpys(NVConnectorEvoPtr pConnectorEvo) +{ + NvBool supportsMST; + NvU32 numHeads; + NvU32 i; + + if (!nvConnectorIsDPSerializer(pConnectorEvo)) { + return TRUE; + } + + if (!ReadDPSerializerCaps(pConnectorEvo)) { + return FALSE; + } + + supportsMST = pConnectorEvo->dpSerializerCaps.supportsMST; + numHeads = pConnectorEvo->pDispEvo->pDevEvo->numHeads; + for (i = 0; i < numHeads && supportsMST; i++) { + NvBool dynamicDpyCreated = FALSE; + char address[5] = { }; + + nvkms_snprintf(address, sizeof(address), "0.%d", i + 1); + if ((nvGetDPMSTDpyEvo(pConnectorEvo, address, + &dynamicDpyCreated) == NULL) || + !dynamicDpyCreated) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * + */ +static NvBool AllocDpys(NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + + // At this point, there should be no DisplayPort multistream devices. + nvAssert(nvDpyIdListsAreEqual(pDispEvo->validDisplays, + pDispEvo->connectorIds)); + nvAssert(nvDpyIdListIsEmpty(pDispEvo->displayPortMSTIds)); + nvAssert(nvDpyIdListIsEmpty(pDispEvo->dynamicDpyIds)); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = nvAllocDpyEvo(pDispEvo, pConnectorEvo, + pConnectorEvo->displayId, NULL); + + if (pDpyEvo == NULL) { + nvAssert(!"Failed to allocate pDpy"); + return FALSE; + } + + if (!AllocDPSerializerDpys(pConnectorEvo)) { + nvAssert(!"Failed to allocate non DPLib managed dpys"); + return FALSE; + } + } + + return TRUE; +} + +static void FreeDpys(NVDispEvoPtr pDispEvo) +{ + NVDpyEvoPtr pDpyEvo, pDpyEvoTmp; + + nvListForEachEntry_safe(pDpyEvo, pDpyEvoTmp, + &pDispEvo->dpyList, dpyListEntry) { + nvFreeDpyEvo(pDispEvo, pDpyEvo); + } +} + + +/*! + * Receive hotplug notification from resman. + * + * This function is registered as the kernel callback function from + * resman when an NV2080_NOTIFIERS_HOTPLUG event is generated. + * + * However, this function is called with resman's context (alternate + * stack, resman locks held, etc). Schedule deferred work, so that we + * can process the hotplug event without resman's encumbrances. + */ +static void ReceiveHotplugEvent(void *arg, void *pEventDataVoid, NvU32 hEvent, + NvU32 Data, NV_STATUS Status) +{ + (void) nvkms_alloc_timer_with_ref_ptr( + nvHandleHotplugEventDeferredWork, /* callback */ + arg, /* argument (this is a ref_ptr to a pDispEvo) */ + 0, /* dataU32 */ + 0); +} + +static void ReceiveDPIRQEvent(void *arg, void *pEventDataVoid, NvU32 hEvent, + NvU32 Data, NV_STATUS Status) +{ + // XXX The displayId of the connector that generated the event should be + // available here somewhere. We should figure out how to find that and + // plumb it through to nvHandleDPIRQEventDeferredWork. + (void) nvkms_alloc_timer_with_ref_ptr( + nvHandleDPIRQEventDeferredWork, /* callback */ + arg, /* argument (this is a ref_ptr to a pDispEvo) */ + 0, /* dataU32 */ + 0); +} + +NvBool nvRmRegisterCallback(const NVDevEvoRec *pDevEvo, + NVOS10_EVENT_KERNEL_CALLBACK_EX *cb, + struct nvkms_ref_ptr *ref_ptr, + NvU32 parentHandle, + NvU32 eventHandle, + Callback5ArgVoidReturn func, + NvU32 event) +{ + NV0005_ALLOC_PARAMETERS allocEventParams = { 0 }; + + cb->func = func; + cb->arg = ref_ptr; + + allocEventParams.hParentClient = nvEvoGlobal.clientHandle; + allocEventParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + allocEventParams.notifyIndex = event; + allocEventParams.data = NV_PTR_TO_NvP64(cb); + + return nvRmApiAlloc(nvEvoGlobal.clientHandle, + parentHandle, + eventHandle, + NV01_EVENT_KERNEL_CALLBACK_EX, + &allocEventParams) + == NVOS_STATUS_SUCCESS; +} + +static NvBool RegisterDispCallback(NVOS10_EVENT_KERNEL_CALLBACK_EX *cb, + NVDispEvoPtr pDispEvo, + NvU32 handle, + Callback5ArgVoidReturn func, + NvU32 event) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle; + + return nvRmRegisterCallback(pDevEvo, cb, pDispEvo->ref_ptr, subDevice, + handle, func, event); +} + +enum NvKmsAllocDeviceStatus nvRmAllocDisplays(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + unsigned int sd; + enum NvKmsAllocDeviceStatus status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + NvU32 totalDispNumSubDevices = 0; + + pDevEvo->sli.bridge.present = FALSE; + + if (!QueryGpuCapabilities(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to query GPU capabilities"); + goto fail; + } + + if (pDevEvo->supportsSyncpts) { + pDevEvo->preSyncptTable = + nvCalloc(1, sizeof(NVEvoSyncpt) * NV_SYNCPT_GLOBAL_TABLE_LENGTH); + if (pDevEvo->preSyncptTable == NULL) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate memory for pre-syncpt table"); + goto fail; + } + } + + if (!AllocDisplays(pDevEvo)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate displays"); + goto fail; + } + + /* allocate the display common object for this device */ + + if (nvRmEvoClassListCheck(pDevEvo, NV04_DISPLAY_COMMON)) { + + pDevEvo->displayCommonHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayCommonHandle, + NV04_DISPLAY_COMMON, NULL) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to initialize the display " + "subsystem for the NVIDIA graphics device!"); + goto fail; + + } + } else { + /* + * Not supporting NV04_DISPLAY_COMMON is expected in some + * configurations: e.g., GF117 (an Optimus-only or "coproc" GPU), + * emulation netlists. Fail with "no hardware". + */ + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + if (!ProbeDisplayCommonCaps(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + if (!ProbeHeadCount(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + if (!ProbeValidDisplays(pDispEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + /* Keep track of connectors per pDisp and bind to DP lib if capable */ + if (!AllocConnectors(pDispEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + } + + /* + * If there are no valid display devices, fail with "no hardware". + */ + if (NoValidDisplays(pDevEvo)) { + status = NVKMS_ALLOC_DEVICE_STATUS_NO_HARDWARE_AVAILABLE; + goto fail; + } + + /* + * The number of numSubDevices across disps should equal the + * device's numSubDevices. + */ + totalDispNumSubDevices = 0; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + totalDispNumSubDevices++; + } + + if (totalDispNumSubDevices != pDevEvo->numSubDevices) { + nvAssert(!"Number of disps' subdevices does not match device's"); + } + + /* + * Allocate an NV event for each pDispEvo on the corresponding + * subDevice, tied to the pDevEvo's OS event. + */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { }; + NvU32 subDevice, ret; + + subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle; + + pDispEvo->hotplugEventHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!RegisterDispCallback(&pDispEvo->rmHotplugCallback, pDispEvo, + pDispEvo->hotplugEventHandle, + ReceiveHotplugEvent, + NV2080_NOTIFIERS_HOTPLUG)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register display hotplug event"); + } + + // Enable hotplug notifications from this subdevice. + setEventParams.event = NV2080_NOTIFIERS_HOTPLUG; + setEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle, + subDevice, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &setEventParams, + sizeof(setEventParams))) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register display hotplug " + "handler: 0x%x\n", ret); + } + } + + // Allocate a handler for the DisplayPort "IRQ" event, which is signaled + // when there's a short interruption in the hotplug detect line. + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams = { }; + NvU32 subDevice, ret; + + subDevice = pDevEvo->pSubDevices[pDispEvo->displayOwner]->handle; + + pDispEvo->DPIRQEventHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!RegisterDispCallback(&pDispEvo->rmDPIRQCallback, pDispEvo, + pDispEvo->DPIRQEventHandle, ReceiveDPIRQEvent, + NV2080_NOTIFIERS_DP_IRQ)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register DisplayPort interrupt event"); + } + + // Enable DP IRQ notifications from this subdevice. + setEventParams.event = NV2080_NOTIFIERS_DP_IRQ; + setEventParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + if ((ret = nvRmApiControl(nvEvoGlobal.clientHandle, + subDevice, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &setEventParams, + sizeof(setEventParams))) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to register DisplayPort interrupt " + "handler: 0x%x\n", ret); + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + ProbeBootDisplays(pDispEvo); + + if (!AllocDpys(pDispEvo)) { + goto fail; + } + + } + + nvAllocVrrEvo(pDevEvo); + + return NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + +fail: + nvRmDestroyDisplays(pDevEvo); + return status; +} + + +void nvRmDestroyDisplays(NVDevEvoPtr pDevEvo) +{ + NvU32 ret; + NVDispEvoPtr pDispEvo; + int dispIndex; + NvS64 tmp; + + nvFreeVrrEvo(pDevEvo); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + // Before freeing anything, dump anything left in the RM's DisplayPort + // AUX channel log. + if (pDispEvo->dpAuxLoggingEnabled) { + do { + ret = nvRmQueryDpAuxLog(pDispEvo, &tmp); + } while (ret && tmp); + } + + // Free the DisplayPort IRQ event. + if (pDispEvo->DPIRQEventHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDispEvo->DPIRQEventHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDispEvo->DPIRQEventHandle); + pDispEvo->DPIRQEventHandle = 0; + } + + // Free the hotplug event. + /* + * XXX I wish I could cancel anything scheduled by + * ReceiveHotplugEvent() and ReceiveDPIRQEvent() for this pDispEvo... + */ + if (pDispEvo->hotplugEventHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDispEvo->hotplugEventHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDispEvo->hotplugEventHandle); + pDispEvo->hotplugEventHandle = 0; + } + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + FreeDpys(pDispEvo); + FreeConnectors(pDispEvo); + } + + FreeDisplays(pDevEvo); + + if (pDevEvo->supportsSyncpts) { + nvFree(pDevEvo->preSyncptTable); + } + + if (pDevEvo->displayCommonHandle != 0) { + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->displayCommonHandle); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Free(displayCommonHandle) failed"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->displayCommonHandle); + pDevEvo->displayCommonHandle = 0; + } +} + + +/*! + * The Allocate a display ID that we use to talk to RM about the dpy(s) on + * head. + * + * \param[in] pDisp The display system on which to allocate the ID. + * \param[in] dpyList The list of dpys. + * + * \return The display ID, or 0 on failure. + */ +NvU32 nvRmAllocDisplayId(const NVDispEvoRec *pDispEvo, const NVDpyIdList dpyList) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS params = { 0 }; + const NVDpyEvoRec *pDpyEvo; + const NVConnectorEvoRec *pConnectorEvo = NULL; + NvBool isDPMST = NV_FALSE; + NvU32 ret; + + FOR_ALL_EVO_DPYS(pDpyEvo, dpyList, pDispEvo) { + if (pConnectorEvo == NULL) { + /* First DPY from list, assign pConnectorEvo and isDPMST variable */ + pConnectorEvo = pDpyEvo->pConnectorEvo; + isDPMST = nvDpyEvoIsDPMST(pDpyEvo); + } + + if (pConnectorEvo != pDpyEvo->pConnectorEvo || + isDPMST != nvDpyEvoIsDPMST(pDpyEvo)) { + return 0; + } + } + + nvAssert(nvConnectorUsesDPLib(pConnectorEvo) || !isDPMST); + + if (!isDPMST) { + /* For non-MST dpy(s), simply return static display ID of connector */ + return nvDpyIdToNvU32(pConnectorEvo->displayId); + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID, + ¶ms, sizeof(params)); + + if (ret == NVOS_STATUS_SUCCESS) { + return params.displayIdAssigned; + } else { + nvEvoLogDisp(pDispEvo, EVO_LOG_WARN, + "Failed to allocate display resource."); + } + + return 0; +} + + +/*! + * Send DISPLAY_CHANGE to resman. + * + * This should be called before and after each mode change, with the display + * mask describing the NEW display configuration. + */ +void nvRmBeginEndModeset(NVDispEvoPtr pDispEvo, + enum NvKmsBeginEndModeset beginOrEnd, + NvU32 mask) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS bracketParams = { }; + NvU32 ret; + + bracketParams.subDeviceInstance = pDispEvo->displayOwner; + bracketParams.newDevices = mask; + bracketParams.properties = 0; /* this is currently unused */ + switch (beginOrEnd) { + case BEGIN_MODESET: + bracketParams.enable = NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_START; + break; + case END_MODESET: + bracketParams.enable = NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_END; + break; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE, + &bracketParams, + sizeof(bracketParams)); + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"Failed NV0073_CTRL_CMD_SPECIFIC_DISPLAY_CHANGE"); + } +} + + +/*! + * Free a RM display ID, if it was allocated dynamically. + * + * This function frees a display ID if it was allocated by + * nvRmAllocDisplayId. If the display ID is static, this function does + * nothing. + * + * From ctrl0073dp.h: You must not call this function while either the ARM + * or ASSEMBLY state cache refers to this display-id. The head must not be + * attached. + * + * \param[in] pDisp The display system on which to free the ID. + * \param[in] displayId The display ID to free. + */ +void nvRmFreeDisplayId(const NVDispEvoRec *pDispEvo, NvU32 displayId) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS params = { 0 }; + NVDpyId dpyId = nvNvU32ToDpyId(displayId); + NvU32 ret; + + /* Do nothing if display ID is static one! */ + if (nvDpyIdIsInDpyIdList(dpyId, pDispEvo->connectorIds)) { + return; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to relinquish display resource."); + } +} + + +/*! + * Query Resman for the (broad) display device type. + */ +static NvU32 GetLegacyConnectorType(NVDispEvoPtr pDispEvo, NVDpyId dpyId) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS params = { 0 }; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(dpyId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_TYPE, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failure getting specific display device type."); + return NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_UNKNOWN; + } + + nvAssert((params.displayType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) || + (params.displayType == NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_DFP)); + + return params.displayType; +} + + +/*! + * Query RM for the current OR properties of the given connector. + * + * If 'assertOnly' is TRUE, this function will only assert that the OR + * configuration has not changed. + */ +void nvRmGetConnectorORInfo(NVConnectorEvoPtr pConnectorEvo, NvBool assertOnly) +{ + NVDispEvoPtr pDispEvo = pConnectorEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS params = { 0 }; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pConnectorEvo->displayId); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + ¶ms, + sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed to determine output resource properties."); + + if (assertOnly) { + return; + } + pConnectorEvo->or.type = NV0073_CTRL_SPECIFIC_OR_TYPE_DAC; + pConnectorEvo->or.mask = 0; + pConnectorEvo->or.protocol = + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT; + pConnectorEvo->or.ditherType = NV0073_CTRL_SPECIFIC_OR_DITHER_TYPE_OFF; + pConnectorEvo->or.ditherAlgo = + NV0073_CTRL_SPECIFIC_OR_DITHER_ALGO_UNKNOWN; + pConnectorEvo->or.location = NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP; + + return; + } + + if (!assertOnly) { + pConnectorEvo->or.type = params.type; + if (NV0073_CTRL_SYSTEM_GET_CAP(pDevEvo->commonCapsBits, + NV0073_CTRL_SYSTEM_CAPS_CROSS_BAR_SUPPORTED) && + params.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) { + // For the SOR crossbar, RM may report that multiple displayIDs own + // the same SOR. For example, it may report SOR 2 for both the + // DisplayPort and TMDS halves of a physical connector even though + // they have separate displayIds. + // + // All we really need to know is which SOR is assigned to the boot + // display, so we defer the query to MarkConnectorBootHeadActive(). + pConnectorEvo->or.mask = 0x0; + } else { + pConnectorEvo->or.mask = NVBIT(params.index); + } + pConnectorEvo->or.protocol = params.protocol; + pConnectorEvo->or.ditherType = params.ditherType; + pConnectorEvo->or.ditherAlgo = params.ditherAlgo; + pConnectorEvo->or.location = params.location; + } else { + nvAssert(pConnectorEvo->or.type == params.type); + nvAssert((pConnectorEvo->or.mask & NVBIT(params.index)) != 0x0); + nvAssert(pConnectorEvo->or.protocol == params.protocol); + nvAssert(pConnectorEvo->or.ditherType == params.ditherType); + nvAssert(pConnectorEvo->or.ditherAlgo == params.ditherAlgo); + nvAssert(pConnectorEvo->or.location == params.location); + } +} + +/*! + * Query connector state, and retry if necessary. + */ +NVDpyIdList nvRmGetConnectedDpys(const NVDispEvoRec *pDispEvo, + NVDpyIdList dpyIdList) +{ + NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS params = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayMask = nvDpyIdListToNvU32(dpyIdList); + params.flags = + (DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_METHOD,_DEFAULT) | + DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_DDC,_DEFAULT) | + DRF_DEF(0073_CTRL_SYSTEM,_GET_CONNECT_STATE_FLAGS,_LOAD,_DEFAULT)); + + do { + params.retryTimeMs = 0; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, + ¶ms, + sizeof(params)); + + if (ret == NVOS_STATUS_ERROR_NOT_READY && + params.retryTimeMs == 0) { + // Work around bug 970351: RM returns a zero retry time on platforms + // where the display driver is in user space. Use a conservative + // default. This code can be removed once this call is fixed in RM. + params.retryTimeMs = 20; + } + + if (params.retryTimeMs > 0) { + nvkms_usleep(params.retryTimeMs * 1000); + } else { + nvkms_yield(); + } + } while(params.retryTimeMs > 0); + + if (ret == NVOS_STATUS_SUCCESS) { + return nvNvU32ToDpyIdList(params.displayMask); + } else { + nvEvoLogDisp(pDispEvo, EVO_LOG_ERROR, + "Failed detecting connected display devices"); + return nvEmptyDpyIdList(); + } +} + +/*! + * Notify the DP library that we are ready to proceed after a suspend/boot, and + * that it should initialize and start handling events. + */ +NvBool nvRmResumeDP(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + NVDpyIdList connectedIdsList = + nvRmGetConnectedDpys(pDispEvo, pDispEvo->connectorIds); + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + NvBool plugged = + nvDpyIdIsInDpyIdList(pConnectorEvo->displayId, connectedIdsList); + + if (!pConnectorEvo->pDpLibConnector) { + continue; + } + + if (!nvDPResume(pConnectorEvo->pDpLibConnector, plugged)) { + goto failed; + } + } + } + + return TRUE; + +failed: + nvRmPauseDP(pDevEvo); + return FALSE; +} + + +void nvRmPauseDP(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NVConnectorEvoPtr pConnectorEvo; + + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (nvConnectorUsesDPLib(pConnectorEvo)) { + nvDPPause(pConnectorEvo->pDpLibConnector); + } + } + } +} + + +/*! + * This function is called whenever the DPMS level changes; On a CRT, + * you set the DPMS level by (dis/en)abling the hsync and vsync + * signals: + * + * Hsync Vsync Mode + * ===== ===== ==== + * 1 1 Normal (on). + * 0 1 Standby -- RGB guns off, power supply on, tube filaments + * energized, (screen saver mode). + * 1 0 Suspend -- RGB guns off, power supply off, tube filaments + * energized. + * 0 0 Power off -- small auxiliary circuit stays on to monitor the + * hsync/vsync signals to know when to wake up. + */ +NvBool nvRmSetDpmsEvo(NVDpyEvoPtr pDpyEvo, NvS64 value) +{ + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (nvDpyUsesDPLib(pDpyEvo)) { + nvDPDeviceSetPowerState(pDpyEvo, + (value == NV_KMS_DPY_ATTRIBUTE_DPMS_ON)); + return TRUE; + } else if (pDpyEvo->pConnectorEvo->legacyType != + NV0073_CTRL_SPECIFIC_DISPLAY_TYPE_CRT) { + NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS powerParams = { 0 }; + + powerParams.subDeviceInstance = pDispEvo->displayOwner; + powerParams.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + + powerParams.powerState = (value == NV_KMS_DPY_ATTRIBUTE_DPMS_ON) ? + NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_ON : + NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_OFF; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_MONITOR_POWER, + &powerParams, + sizeof(powerParams)); + + return (ret == NVOS_STATUS_SUCCESS); + } else { + NVConnectorEvoPtr pConnectorEvo = pDpyEvo->pConnectorEvo; + NV5070_CTRL_CMD_SET_DAC_PWR_PARAMS powerParams = { { 0 }, 0 }; + + powerParams.base.subdeviceIndex = pDispEvo->displayOwner; + if (pConnectorEvo->or.mask == 0x0) { + nvAssert(pConnectorEvo->or.mask != 0x0); + return FALSE; + } + powerParams.orNumber = nvEvoConnectorGetPrimaryOr(pConnectorEvo); + + switch (value) { + case NV_KMS_DPY_ATTRIBUTE_DPMS_ON: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _ENABLE); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _ENABLE); + break; + case NV_KMS_DPY_ATTRIBUTE_DPMS_STANDBY: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _LO); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _ENABLE); + break; + case NV_KMS_DPY_ATTRIBUTE_DPMS_SUSPEND: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _ENABLE); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _LO); + break; + case NV_KMS_DPY_ATTRIBUTE_DPMS_OFF: + powerParams.normalHSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_HSYNC, _LO); + powerParams.normalVSync = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_VSYNC, _LO); + break; + default: + return FALSE; + } + // XXX These could probably be disabled too, in the DPMS_OFF case. + powerParams.normalData = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_DATA, _ENABLE); + powerParams.normalPower = + DRF_DEF(5070, _CTRL_CMD_SET_DAC, _PWR_NORMAL_PWR, _ON); + + powerParams.flags = + DRF_DEF(5070, _CTRL_CMD_SET_DAC_PWR_FLAGS, _SPECIFIED_NORMAL, _YES); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_DAC_PWR, + &powerParams, + sizeof(powerParams)); + + return (ret == NVOS_STATUS_SUCCESS); + } +} + + +NvBool nvRmAllocSysmem(NVDevEvoPtr pDevEvo, NvU32 memoryHandle, + NvU32 *ctxDmaFlags, void **ppBase, NvU64 size, + NvKmsMemoryIsoType isoType) +{ + NvU32 ret; + NvBool bufferAllocated = FALSE; + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + const NvKmsDispIOCoherencyModes *pIOCoherencyModes; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + + memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); + + memAllocParams.size = size; + + if (isoType == NVKMS_MEMORY_NISO) { + memAllocParams.attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _YES); + + pIOCoherencyModes = &pDevEvo->nisoIOCoherencyModes; + } else { + pIOCoherencyModes = &pDevEvo->isoIOCoherencyModes; + } + + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) | + DRF_DEF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS) | + DRF_DEF(OS32, _ATTR, _FORMAT, _PITCH); + + if (pIOCoherencyModes->noncoherent) { + // Model (3) + // - allocate USWC system memory + // - allocate ctx dma with NVOS03_FLAGS_CACHE_SNOOP_DISABLE + // - to sync CPU and GPU, flush CPU WC buffer + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, + memAllocParams.attr); + + ret = nvRmApiAlloc( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + NV01_MEMORY_SYSTEM, + &memAllocParams); + + if (ret == NVOS_STATUS_SUCCESS) { + bufferAllocated = TRUE; + if (ctxDmaFlags) { + *ctxDmaFlags |= DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _DISABLE); + } + } else { + bufferAllocated = FALSE; + } + + } + + if (!bufferAllocated && pIOCoherencyModes->coherent) { + // Model (2b): Similar to existing PCI model + // - allocate cached (or USWC) system memory + // - allocate ctx DMA with NVOS03_FLAGS_CACHE_SNOOP_ENABLE + // ... + + memAllocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, + memAllocParams.attr); + + ret = nvRmApiAlloc( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + NV01_MEMORY_SYSTEM, + &memAllocParams); + + if (ret == NVOS_STATUS_SUCCESS) { + bufferAllocated = TRUE; + if (ctxDmaFlags) { + *ctxDmaFlags |= DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _ENABLE); + } + } else { + bufferAllocated = FALSE; + } + } + + if (bufferAllocated) { + ret = nvRmApiMapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + 0, /* offset */ + size, + ppBase, + 0 /* flags */); + + if (ret != NVOS_STATUS_SUCCESS) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle); + + bufferAllocated = FALSE; + } + } + + return bufferAllocated; +} + + +/*****************************************************************************/ +/* Alloc memory and a context dma, following the rules dictated by the + DMA coherence flags. */ +/*****************************************************************************/ + +NvBool nvRmAllocEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma, + NvU64 limit, NvU32 ctxDmaFlags, NvU32 subDeviceMask) +{ + NV_CONTEXT_DMA_ALLOCATION_PARAMS ctxdmaParams = { }; + NvBool bufferAllocated = FALSE; + NvU32 memoryHandle = 0; + void *pBase = NULL; + + NvBool needBar1Mapping = FALSE; + + NvU32 ctxDmaHandle = 0; + NvU32 localCtxDmaFlags = ctxDmaFlags | + DRF_DEF(OS03, _FLAGS, _ACCESS, _READ_WRITE) | + DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE); + + NvU32 ret; + + nvkms_memset(pDma, 0, sizeof(*pDma)); + + memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + /* + * On certain GPUs (GF100, GF104) there exists a hardware bug that forces + * us to put display NISO surfaces (pushbuffer, semaphores, notifiers + * accessed by EVO) in vidmem instead of sysmem. See bug 632241 for + * details. + */ + if (NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY)) { + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { }; + + memAllocParams.owner = NVKMS_RM_HEAP_ID; + memAllocParams.type = NVOS32_TYPE_DMA; + memAllocParams.size = limit + 1; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) | + DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM); + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + /* We can't fall back to any of the sysmem options below, due to + * the nature of the HW bug forcing us to use vidmem. */ + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unable to allocate video memory for display"); + return FALSE; + } + + limit = memAllocParams.size - 1; + + /* We'll access these surfaces through IFB */ + pBase = NULL; + + bufferAllocated = TRUE; + needBar1Mapping = TRUE; + } + + if (!bufferAllocated) { + /* + * Setting NVKMS_MEMORY_NISO since nvRmAllocEvoDma() is currently only + * called to allocate pushbuffer and notifier memory. + */ + bufferAllocated = nvRmAllocSysmem(pDevEvo, memoryHandle, + &localCtxDmaFlags, &pBase, limit + 1, + NVKMS_MEMORY_NISO); + } + + if (!bufferAllocated) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unable to allocate DMA memory"); + + return FALSE; + } + + ctxDmaHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + // Create a ctxdma for this allocation. + ctxdmaParams.hMemory = memoryHandle; + ctxdmaParams.flags = localCtxDmaFlags; + ctxdmaParams.offset = 0; + ctxdmaParams.limit = limit; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + ctxDmaHandle, + NV01_CONTEXT_DMA, + &ctxdmaParams); + + if (ret != NVOS_STATUS_SUCCESS) { + if (pBase != NULL) { + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + memoryHandle, + pBase, + 0); + } + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, memoryHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, ctxDmaHandle); + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate a DMA context"); + + return FALSE; + } + + pDma->memoryHandle = memoryHandle; + + pDma->ctxHandle = ctxDmaHandle; + + pDma->limit = limit; + + if (needBar1Mapping) { + NvBool result; + + result = nvRmEvoMapVideoMemory(pDevEvo, memoryHandle, limit + 1, + pDma->subDeviceAddress, subDeviceMask); + + if (!result) { + nvRmFreeEvoDma(pDevEvo, pDma); + return FALSE; + } + } else { + int sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (((1 << sd) & subDeviceMask) == 0) { + continue; + } + + pDma->subDeviceAddress[sd] = pBase; + } + } + pDma->isBar1Mapping = needBar1Mapping; + + return TRUE; +} + +void nvRmFreeEvoDma(NVDevEvoPtr pDevEvo, NVEvoDmaPtr pDma) +{ + NvU32 ret; + + if (pDma->ctxHandle != 0) { + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, pDma->ctxHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to free DMA context"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDma->ctxHandle); + pDma->ctxHandle = 0; + } + + if (pDma->memoryHandle != 0) { + if (pDma->isBar1Mapping) { + nvRmEvoUnMapVideoMemory(pDevEvo, pDma->memoryHandle, + pDma->subDeviceAddress); + } else { + int sd = 0; + NvBool addressMapped = TRUE; + + /* If pDma->subDeviceAddress[sd] is non-NULL for multiple subdevices, + * assume they are the same. Unmap only one but set all of them to + * NULL. This matches the logic in nvRmAllocEvoDma(). + */ + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (addressMapped && pDma->subDeviceAddress[sd] != NULL) { + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDma->memoryHandle, + pDma->subDeviceAddress[sd], + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to unmap memory"); + } + + addressMapped = FALSE; + } + + pDma->subDeviceAddress[sd] = NULL; + } + } + + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, pDma->memoryHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to free DMA memory"); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDma->memoryHandle); + pDma->memoryHandle = 0; + + pDma->limit = 0; + + nvkms_memset(pDma->subDeviceAddress, 0, sizeof(pDma->subDeviceAddress)); + } +} + +static void +CompletionNotifierEventDeferredWork(void *dataPtr, NvU32 dataU32) +{ + NVEvoChannelPtr pChannel = dataPtr; + + nvSendFlipOccurredEventEvo(pChannel->pb.pDevEvo, pChannel->channelMask); +} + +static void CompletionNotifierEvent(void *arg, void *pEventDataVoid, + NvU32 hEvent, NvU32 Data, NV_STATUS Status) +{ + (void) nvkms_alloc_timer_with_ref_ptr( + CompletionNotifierEventDeferredWork, /* callback */ + arg, /* argument (this is a ref_ptr to a pChannel) */ + 0, /* dataU32 */ + 0); /* timeout: schedule the work immediately */ +} + +/*****************************************************************************/ +/* RmAllocEvoChannel () + * Allocates the EVO channel and associated notifier surfaces and ctxdmas. + * Takes how big the DMA controls are (varies by class of channel) and which + * class to allocate. + */ +/*****************************************************************************/ +static NVEvoChannelPtr +RmAllocEvoChannel(NVDevEvoPtr pDevEvo, + NVEvoChannelMask channelMask, + NvV32 instance, NvU32 class) +{ + NVEvoChannelPtr pChannel = NULL; + NVDmaBufferEvoPtr buffer = NULL; + int sd; + NvU32 ret; + + /* One 4k page is enough to map PUT and GET */ + const NvU64 dmaControlLen = 0x1000; + + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(channelMask) == 1); + + /* Allocate the channel data structure */ + pChannel = nvCalloc(1, sizeof(*pChannel)); + + if (pChannel == NULL) { + goto fail; + } + + buffer = &pChannel->pb; + + pChannel->hwclass = class; + pChannel->instance = instance; + pChannel->channelMask = channelMask; + + pChannel->notifiersDma = nvCalloc(pDevEvo->numSubDevices, sizeof(NVEvoDma)); + + if (pChannel->notifiersDma == NULL) { + goto fail; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NVEvoDmaPtr pNotifiersDma = &pChannel->notifiersDma[sd]; + + void *pDmaDisplayChannel = NULL; + + // Allocation of the notifiers + if (!nvRmAllocEvoDma(pDevEvo, pNotifiersDma, + NV_DMA_EVO_NOTIFIER_SIZE - 1, + DRF_DEF(OS03, _FLAGS, _TYPE, _NOTIFIER), + 1 << sd)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Notifier DMA allocation failed"); + + goto fail; + } + + nvAssert(pNotifiersDma->subDeviceAddress[sd] != NULL); + + // Only allocate memory for one pushbuffer. + // All subdevices will share (via subdevice mask) + if (sd == 0) { + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS ChannelAllocParams = { 0 }; + + NvU64 limit = NV_DMA_EVO_PUSH_BUFFER_SIZE - 1; + NVEvoDmaPtr pDma = &buffer->dma; + + // Allocation of the push buffer + if (!nvRmAllocEvoDma(pDevEvo, pDma, limit, 0, SUBDEVICE_MASK_ALL)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Display engine push buffer DMA allocation failed"); + + goto fail; + } + + if (!pDma->isBar1Mapping) { + buffer->base = pDma->subDeviceAddress[0]; + } else { + /* + * Allocate memory for a shadow copy in sysmem that we'll copy + * to vidmem via BAR1 at kickoff time. + */ + buffer->base = nvCalloc(buffer->dma.limit + 1, 1); + if (buffer->base == NULL) { + goto fail; + } + } + + buffer->channel_handle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + // Channel instance (always 0 for CORE - head number otherwise) + ChannelAllocParams.channelInstance = instance; + // PB CtxDMA Handle + ChannelAllocParams.hObjectBuffer = buffer->dma.ctxHandle; + // Initial offset within the PB + ChannelAllocParams.offset = 0; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + buffer->channel_handle, + class, + &ChannelAllocParams); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Display engine push buffer channel allocation failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + buffer->channel_handle); + buffer->channel_handle = 0; + + goto fail; + } + } + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + buffer->channel_handle, + 0, + dmaControlLen, + &pDmaDisplayChannel, + 0); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Display engine push buffer DMA mapping failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + goto fail; + } + + buffer->control[sd] = pDmaDisplayChannel; + } + + /* Initialize the rest of the required push buffer information */ + buffer->buffer = buffer->base; + buffer->end = (NvU32 *)((char *)buffer->base + + NV_DMA_EVO_PUSH_BUFFER_SIZE - 8); + + /* + * Due to hardware bug 235044, we can not use the last 12 dwords of the + * core channel pushbuffer. Adjust offset_max appropriately. + * + * This bug is fixed in Volta and newer, so this workaround can be removed + * when Pascal support is dropped. See bug 3116066. + */ + buffer->offset_max = NV_DMA_EVO_PUSH_BUFFER_SIZE - + NV_DMA_EVO_PUSH_BUFFER_PAD_SIZE; + buffer->fifo_free_count = (buffer->offset_max >> 2) - 2; + buffer->put_offset = 0; + buffer->num_channels = pDevEvo->numSubDevices; + buffer->pDevEvo = pDevEvo; + buffer->currentSubDevMask = SUBDEVICE_MASK_ALL; + + if (!FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, channelMask)) { + pChannel->ref_ptr = nvkms_alloc_ref_ptr(pChannel); + + if (pChannel->ref_ptr == NULL) { + goto fail; + } + + pChannel->completionNotifierEventHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!nvRmRegisterCallback(pDevEvo, + &pChannel->completionNotifierEventCallback, + pChannel->ref_ptr, + pChannel->pb.channel_handle, + pChannel->completionNotifierEventHandle, + CompletionNotifierEvent, + 0)) { + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->completionNotifierEventHandle); + pChannel->completionNotifierEventHandle = 0; + goto fail; + } + } + + pChannel->imm.type = NV_EVO_IMM_CHANNEL_NONE; + + pDevEvo->hal->InitChannel(pDevEvo, pChannel); + + return pChannel; + +fail: + + RmFreeEvoChannel(pDevEvo, pChannel); + + return NULL; +} + +static void FreeImmediateChannelPio(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NVEvoPioChannel *pPio = pChannel->imm.u.pio; + int sd; + + nvAssert(pPio != NULL); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (!pPio->control[sd]) { + continue; + } + + if (nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pPio->handle, + pPio->control[sd], + 0)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, + "Failed to unmap immediate channel"); + } + pPio->control[sd] = NULL; + } + + if (pPio->handle) { + if (nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pPio->handle)) { + nvEvoLogDev(pDevEvo, EVO_LOG_WARN, "Failed to free immediate channel"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pPio->handle); + pPio->handle = 0; + } + + nvFree(pPio); + pChannel->imm.u.pio = NULL; +} + +static void FreeImmediateChannelDma(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + NVEvoChannelPtr pImmChannel = pChannel->imm.u.dma; + + RmFreeEvoChannel(pDevEvo, pImmChannel); + pChannel->imm.u.dma = NULL; +} + +static void FreeImmediateChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + switch (pChannel->imm.type) { + case NV_EVO_IMM_CHANNEL_NONE: + return; + case NV_EVO_IMM_CHANNEL_PIO: + FreeImmediateChannelPio(pDevEvo, pChannel); + break; + case NV_EVO_IMM_CHANNEL_DMA: + FreeImmediateChannelDma(pDevEvo, pChannel); + break; + } + pChannel->imm.type = NV_EVO_IMM_CHANNEL_NONE; +} + +/*****************************************************************************/ +/* RmFreeEvoChannel () + * Frees all of the stuff allocated in RmAllocEvoChannel */ +/*****************************************************************************/ +static void RmFreeEvoChannel(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel) +{ + int sd; + + if (pChannel == NULL) { + return; + } + + FreeImmediateChannel(pDevEvo, pChannel); + + if (pChannel->completionNotifierEventHandle != 0) { + + nvRmApiFree(nvEvoGlobal.clientHandle, + pChannel->pb.channel_handle, + pChannel->completionNotifierEventHandle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->completionNotifierEventHandle); + + pChannel->completionNotifierEventHandle = 0; + } + + nvkms_free_ref_ptr(pChannel->ref_ptr); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pChannel->pb.control[sd]) { + if (nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pChannel->pb.channel_handle, + pChannel->pb.control[sd], + 0) != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to unmap display engine channel memory"); + } + pChannel->pb.control[sd] = NULL; + } + } + + if (pChannel->pb.channel_handle != 0) { + // If NVKMS restored the console successfully, tell RM to leave the + // channels allocated to avoid shutting down the heads we just + // enabled. + // + // On EVO, only leave the core and base channels allocated. The + // other satellite channels shouldn't be active at the console. + // + // On nvdisplay, one or more window channels are also needed. Rather + // than try to figure out which ones are needed, just leave them all + // alone. + const NvBool isCore = + FLD_TEST_DRF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE, + pChannel->channelMask); + const NvBool isBase = + (pChannel->channelMask & NV_EVO_CHANNEL_MASK_BASE_ALL) != 0; + const NvBool isWindow = + (pChannel->channelMask & NV_EVO_CHANNEL_MASK_WINDOW_ALL) != 0; + if ((isCore || isBase || isWindow) && pDevEvo->skipConsoleRestore) { + NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS params = { }; + + params.base.subdeviceIndex = pDevEvo->vtFbInfo.subDeviceInstance; + params.flags = NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + NV5070_CTRL_CMD_SET_RMFREE_FLAGS, + ¶ms, sizeof(params)) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set the PRESERVE_HW flag"); + } + } + + if (nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + pChannel->pb.channel_handle) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to tear down display engine channel"); + } + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pChannel->pb.channel_handle); + pChannel->pb.channel_handle = 0; + } + + if (pChannel->pb.dma.isBar1Mapping) { + /* Pushbuffer is in vidmem. Free shadow copy. */ + nvFree(pChannel->pb.base); + pChannel->pb.base = NULL; + } + + nvRmFreeEvoDma(pDevEvo, &pChannel->pb.dma); + + if (pChannel->notifiersDma) { + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + nvRmFreeEvoDma(pDevEvo, &pChannel->notifiersDma[sd]); + } + } + + nvFree(pChannel->notifiersDma); + pChannel->notifiersDma = NULL; + + nvFree(pChannel); +} + +static NvBool +AllocImmediateChannelPio(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 class, + NvU32 instance, + NvU32 mapSize) +{ + NVEvoPioChannel *pPio = NULL; + NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS params = { 0 }; + NvU32 sd; + + pPio = nvCalloc(1, sizeof(*pPio)); + + if (!pPio) { + return FALSE; + } + + pChannel->imm.type = NV_EVO_IMM_CHANNEL_PIO; + pChannel->imm.u.pio = pPio; + + params.channelInstance = instance; + + if (nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayHandle, + handle, + class, + ¶ms) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate immediate channel %d", instance); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return FALSE; + } + + pPio->handle = handle; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + void *pImm = NULL; + + if (nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pPio->handle, + 0, + mapSize, + &pImm, + 0) != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to map immediate channel %d/%d", + sd, instance); + return FALSE; + } + + pPio->control[sd] = pImm; + } + + return TRUE; +} + +static NvBool +AllocImmediateChannelDma(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 immClass) +{ + NVEvoChannelPtr pImmChannel = RmAllocEvoChannel( + pDevEvo, + DRF_DEF64(_EVO, _CHANNEL_MASK, _WINDOW_IMM, _ENABLE), + pChannel->instance, immClass); + + if (!pImmChannel) { + return FALSE; + } + + pChannel->imm.type = NV_EVO_IMM_CHANNEL_DMA; + pChannel->imm.u.dma = pImmChannel; + + return TRUE; +} + +NvBool nvRMAllocateBaseChannels(NVDevEvoPtr pDevEvo) +{ + int i; + NvU32 baseClass = 0; + NvU32 head; + + static const NvU32 baseChannelDmaClasses[] = { + NV927C_BASE_CHANNEL_DMA, + }; + + for (i = 0; i < ARRAY_LEN(baseChannelDmaClasses); i++) { + if (nvRmEvoClassListCheck(pDevEvo, baseChannelDmaClasses[i])) { + baseClass = baseChannelDmaClasses[i]; + break; + } + } + + if (!baseClass) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported base display class"); + return FALSE; + } + + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->base[head] = RmAllocEvoChannel( + pDevEvo, + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _BASE, head, _ENABLE), + head, baseClass); + + if (!pDevEvo->base[head]) { + return FALSE; + } + } + + return TRUE; +} + +NvBool nvRMAllocateOverlayChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 immMapSize; + NvU32 head; + + if (!nvRmEvoClassListCheck(pDevEvo, + NV917E_OVERLAY_CHANNEL_DMA)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Unsupported overlay display class"); + return FALSE; + } + + nvAssert(nvRmEvoClassListCheck(pDevEvo, NV917B_OVERLAY_IMM_CHANNEL_PIO)); + + /* + * EvoSetImmPointOut91() will interpret the PIO mapping as a pointer + * to GK104DispOverlayImmControlPio and access the SetPointOut and + * Update fields, which is safe as long as SetPointOut and Update are + * at consistent offsets. + */ + nvAssert(offsetof(GK104DispOverlayImmControlPio, SetPointsOut) == + NV917B_SET_POINTS_OUT(NVKMS_LEFT)); + nvAssert(offsetof(GK104DispOverlayImmControlPio, Update) == + NV917B_UPDATE); + immMapSize = + NV_MAX(NV917B_SET_POINTS_OUT(NVKMS_LEFT), NV917B_UPDATE) + sizeof(NvV32); + + for (head = 0; head < pDevEvo->numHeads; head++) { + pDevEvo->overlay[head] = RmAllocEvoChannel( + pDevEvo, + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _OVERLAY, head, _ENABLE), + head, NV917E_OVERLAY_CHANNEL_DMA); + + if (!pDevEvo->overlay[head]) { + return FALSE; + } + + if (!AllocImmediateChannelPio(pDevEvo, pDevEvo->overlay[head], + NV917B_OVERLAY_IMM_CHANNEL_PIO, head, immMapSize)) { + return FALSE; + } + } + + return TRUE; +} + +/*! + * This allocates a syncpt per channel. This syncpt is dedicated + * to this channel. As NVKMS only supports syncpoints for SOC devices, + * in which there's only one device/sub-device/disp, sd can be 0. + */ +static NvBool AllocSyncpt(NVDevEvoPtr pDevEvo, NVEvoChannelPtr pChannel, + NVEvoSyncpt *pEvoSyncptOut) +{ + NvU32 hSyncptCtxDma, hSyncpt, id; + NvKmsSyncPtOpParams params = { }; + NvBool result; + + if (!pDevEvo->supportsSyncpts) { + return FALSE; + } + + /* + * HW engine on Orin is called HOST1X, all syncpts are in internal RAM of + * HOST1X. + * OP_ALLOC calls into HOST1X driver and allocs a syncpt resource. + */ + params.alloc.syncpt_name = "nvkms-fence"; + result = nvkms_syncpt_op(NVKMS_SYNCPT_OP_ALLOC, ¶ms); + if (!result) { + return FALSE; + } + id = params.alloc.id; + result = nvRmEvoAllocAndBindSyncpt( + pDevEvo, pChannel, id, &hSyncpt, &hSyncptCtxDma); + if (!result) { + /*! put back syncpt as register failed */ + params.put.id = id; + nvkms_syncpt_op(NVKMS_SYNCPT_OP_PUT, ¶ms); + return FALSE; + } + + /*! Populate syncpt values to return. */ + pEvoSyncptOut->id = id; + pEvoSyncptOut->hCtxDma = hSyncptCtxDma; + pEvoSyncptOut->hSyncpt = hSyncpt; + pEvoSyncptOut->channelMask = pChannel->channelMask; + return TRUE; +} + +static NvBool AllocPostSyncptPerChannel(NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel) +{ + if (!pDevEvo->supportsSyncpts) { + return TRUE; + } + + return AllocSyncpt(pDevEvo, pChannel, &pChannel->postSyncpt); +} + +NvBool nvRMAllocateWindowChannels(NVDevEvoPtr pDevEvo) +{ + int index; + NvU32 window; + + static const struct { + NvU32 windowClass; + NvU32 immClass; + } windowChannelClasses[] = { + { NVC67E_WINDOW_CHANNEL_DMA, + NVC67B_WINDOW_IMM_CHANNEL_DMA }, + { NVC57E_WINDOW_CHANNEL_DMA, + NVC57B_WINDOW_IMM_CHANNEL_DMA }, + { NVC37E_WINDOW_CHANNEL_DMA, + NVC37B_WINDOW_IMM_CHANNEL_DMA }, + }, *c = NULL; + + for (index = 0; index < ARRAY_LEN(windowChannelClasses); index++) { + if (nvRmEvoClassListCheck(pDevEvo, + windowChannelClasses[index].windowClass)) { + + c = &windowChannelClasses[index]; + + nvAssert(nvRmEvoClassListCheck(pDevEvo, c->immClass)); + break; + } + } + + if (index >= ARRAY_LEN(windowChannelClasses)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported window display class"); + return FALSE; + } + + nvAssert(pDevEvo->numWindows <= ARRAY_LEN(pDevEvo->window)); + for (window = 0; window < pDevEvo->numWindows; window++) { + pDevEvo->window[window] = RmAllocEvoChannel( + pDevEvo, + DRF_IDX_DEF64(_EVO, _CHANNEL_MASK, _WINDOW, window, _ENABLE), + window, c->windowClass); + + if (!pDevEvo->window[window]) { + return FALSE; + } + + if (!AllocImmediateChannelDma(pDevEvo, pDevEvo->window[window], + c->immClass)) { + return FALSE; + } + + if (!AllocPostSyncptPerChannel(pDevEvo, + pDevEvo->window[window])) { + return FALSE; + } + } + + return TRUE; +} + +static void EvoFreeCoreChannel(NVDevEvoRec *pDevEvo, NVEvoChannel *pChannel) +{ + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 ret; + + if (!pDevEvo->pSubDevices[sd]->pCoreDma) { + continue; + } + + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pChannel->pb.channel_handle, + pDevEvo->pSubDevices[sd]->pCoreDma, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug( + pDevEvo, + EVO_LOG_ERROR, + "Failed to unmap NVDisplay core channel memory mapping for ARMed values"); + } + pDevEvo->pSubDevices[sd]->pCoreDma = NULL; + } + + RmFreeEvoChannel(pDevEvo, pChannel); +} + +static NVEvoChannel* EvoAllocateCoreChannel(NVDevEvoRec *pDevEvo) +{ + NVEvoChannel *pChannel; + NvU32 sd; + + pChannel = + RmAllocEvoChannel(pDevEvo, + DRF_DEF64(_EVO, _CHANNEL_MASK, _CORE, _ENABLE), + 0, + pDevEvo->coreChannelDma.coreChannelClass); + + if (pChannel == NULL) { + goto failed; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pChannel->pb.channel_handle, + pDevEvo->coreChannelDma.dmaArmedOffset, + pDevEvo->coreChannelDma.dmaArmedSize, + (void**)&pDevEvo->pSubDevices[sd]->pCoreDma, + DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev( + pDevEvo, + EVO_LOG_ERROR, + "Core channel memory mapping for ARMed values failed: 0x%x (%s)", + ret, nvstatusToString(ret)); + goto failed; + } + } + + return pChannel; + +failed: + if (pChannel != NULL) { + EvoFreeCoreChannel(pDevEvo, pChannel); + } + return NULL; +} + +/* Pre-allocate the vblank syncpts, store in NVDispHeadStateEvoRec. */ +static void AllocCoreRGSyncpts(NVDevEvoPtr pDevEvo) +{ + + NVDispEvoPtr pDispEvo = NULL; + NvU32 syncptIdx = 0; + + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + return; + } + + /* If Syncpts are supported, we're on Orin, which only has one display. */ + nvAssert(pDevEvo->nDispEvo == 1); + pDispEvo = pDevEvo->pDispEvo[0]; + + /* Initialize all heads' vblank sync object counts to zero. */ + for (int i = 0; i < pDevEvo->numHeads; i++) { + pDispEvo->headState[i].numVblankSyncObjectsCreated = 0; + } + + /* For each core RG syncpt index: */ + for (syncptIdx = 0; syncptIdx < NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD; + syncptIdx++) { + /* For each Head: */ + for (int i = 0; i < pDevEvo->numHeads; i++) { + NvBool result = FALSE; + NVDispHeadStateEvoPtr pHeadState = &pDispEvo->headState[i]; + + result = + AllocSyncpt(pDevEvo, pDevEvo->core, + &pHeadState->vblankSyncObjects[syncptIdx].evoSyncpt); + if (!result) { + /* + * Stop trying to allocate more syncpts if none are + * available. + */ + nvEvoLogDisp(pDispEvo, EVO_LOG_INFO, + "Failed to allocate Core RG Syncpoint at index %d " + "on Head %d.", syncptIdx, i); + return; + } + + /* Populate the index of the syncpt in the NVVblankSyncObjectRec. */ + pHeadState->vblankSyncObjects[syncptIdx].index = syncptIdx; + /* Update the count. */ + pHeadState->numVblankSyncObjectsCreated = syncptIdx + 1; + } + } +} + +NvBool nvRMSetupEvoCoreChannel(NVDevEvoPtr pDevEvo) +{ + NvU32 sd; + + pDevEvo->core = EvoAllocateCoreChannel(pDevEvo); + if (!pDevEvo->core) { + return FALSE; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + // Bind the core notifier ctxDma + NvU32 ret = + nvRmEvoBindDispContextDMA(pDevEvo, pDevEvo->core, + pDevEvo->core->notifiersDma[sd].ctxHandle); + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to bind display engine notify context DMA: 0x%x (%s)", + ret, nvstatusToString(ret)); + nvRMFreeEvoCoreChannel(pDevEvo); + return FALSE; + } + } + + AllocCoreRGSyncpts(pDevEvo); + + nvInitEvoSubDevMask(pDevEvo); + + /* + * XXX NVKMS TODO: Enable core channel event generation; see bug + * 1671139. + */ + + // Query the VBIOS head assignments. Note that this has to happen after the + // core channel is allocated or else RM will return incorrect information + // about dynamic display IDs it allocates for the boot display on DP MST + // devices. + GetVbiosHeadAssignment(pDevEvo); + + return TRUE; +} + +void nvRMFreeBaseChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + RmFreeEvoChannel(pDevEvo, pDevEvo->base[head]); + pDevEvo->base[head] = NULL; + } +} + +void nvRMFreeOverlayChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 head; + + for (head = 0; head < pDevEvo->numHeads; head++) { + RmFreeEvoChannel(pDevEvo, pDevEvo->overlay[head]); + pDevEvo->overlay[head] = NULL; + } +} + +void nvRMFreeWindowChannels(NVDevEvoPtr pDevEvo) +{ + NvU32 window; + + for (window = 0; window < pDevEvo->numWindows; window++) { + nvRmEvoFreeSyncpt(pDevEvo, &pDevEvo->window[window]->postSyncpt); + RmFreeEvoChannel(pDevEvo, pDevEvo->window[window]); + pDevEvo->window[window] = NULL; + } +} + +/* Frees the Core RG Syncpts. */ +static void FreeCoreRGSyncpts(NVDevEvoPtr pDevEvo) +{ + + NVDispEvoPtr pDispEvo = NULL; + + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + return; + } + + /* If Syncpts are supported, we're on Orin, which only has one display. */ + nvAssert(pDevEvo->nDispEvo == 1); + pDispEvo = pDevEvo->pDispEvo[0]; + /* For each Head: */ + for (int i = 0; i < pDevEvo->numHeads; i++) { + /* Free all core RG syncpts. */ + NVDispHeadStateEvoRec *pHeadState = &pDispEvo->headState[i]; + for (int j = 0; j < pHeadState->numVblankSyncObjectsCreated; j++) { + nvRmEvoFreeSyncpt(pDevEvo, + &pHeadState->vblankSyncObjects[j].evoSyncpt); + } + pHeadState->numVblankSyncObjectsCreated = 0; + } +} + +void nvRMFreeEvoCoreChannel(NVDevEvoPtr pDevEvo) +{ + FreeCoreRGSyncpts(pDevEvo); + + if (pDevEvo->core != NULL) { + EvoFreeCoreChannel(pDevEvo, pDevEvo->core); + pDevEvo->core = NULL; + } +} + +/* Poll for an EVO channel on a particular subdevice to process all its methods */ +static NvBool SyncOneEvoChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChan, + NvU32 sd, + NvU32 errorToken) +{ + NvBool isMethodPending; + NvU64 startTime = 0; + const NvU32 timeout = 2000000; // microseconds + + do { + if (!pDevEvo->hal->IsChannelMethodPending(pDevEvo, pChan, + sd, &isMethodPending)) { + return FALSE; + } + + if (!isMethodPending) { + break; + } + + if (!nvIsEmulationEvo(pDevEvo)) { + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Idling display engine timed out: 0x%08x:%d:%d:%d", + pChan->hwclass, pChan->instance, + sd, errorToken); + return FALSE; + } + } + + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + +/* Sync an EVO channel on all subdevices */ +NvBool nvRMSyncEvoChannel( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 errorToken) +{ + NvBool ret = TRUE; + + if (pChannel) { + NvU32 sd; + + nvDmaKickoffEvo(pChannel); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!SyncOneEvoChannel(pDevEvo, pChannel, sd, errorToken)) { + ret = FALSE; + } + } + } + + return ret; +} + + +/* + * Wait for the requested base channel to be idle (no methods pending), and + * call STOP_BASE if the wait times out. + * + * stoppedBase will be TRUE if calling STOP_BASE was necessary and + * successful. + */ +NvBool nvRMIdleBaseChannel(NVDevEvoPtr pDevEvo, NvU32 head, NvU32 sd, + NvBool *stoppedBase) +{ + NVEvoChannelPtr pMainLayerChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + NvU64 startTime = 0; + NvBool idleTimedOut = FALSE; + const NvU32 timeout = 2000000; // 2 seconds + NvBool isMethodPending = TRUE; + NvBool ret = TRUE; + + *stoppedBase = FALSE; + + do { + if (!pDevEvo->hal->IsChannelMethodPending(pDevEvo, + pMainLayerChannel, + sd, + &isMethodPending)) { + break; + } + + if (!isMethodPending) { + break; + } + + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + idleTimedOut = TRUE; + break; + } + + nvkms_yield(); + + } while (TRUE); + + if (idleTimedOut) { + NVEvoIdleChannelState idleChannelState = { }; + + idleChannelState.subdev[sd].channelMask |= pMainLayerChannel->channelMask; + ret = pDevEvo->hal->ForceIdleSatelliteChannel(pDevEvo, &idleChannelState); + + *stoppedBase = ret; + } + + return ret; +} + + +NvBool nvRmEvoClassListCheck(const NVDevEvoRec *pDevEvo, NvU32 classID) +{ + const NvU32 *classes = pDevEvo->supportedClasses; + + int i; + + nvAssert(pDevEvo->numClasses > 0); + + for (i = 0; i < pDevEvo->numClasses; i++) { + if (classes[i] == classID) { + return TRUE; + } + } + + return FALSE; +} + +/*! + * This API used to register syncpt object with RM. + * It involves -> + * 1. Allocate a new NV01_MEMORY_SYNCPOINT syncpt object. + * 2. Allocate a new ctxdma descriptor for the syncpt object. + * 3. Bind the ctxdma entry to the channel. + */ +NvBool nvRmEvoAllocAndBindSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel, + NvU32 id, + NvU32 *pSyncptHandle, + NvU32 *pSyncptCtxDmaHandle) +{ + return FALSE; +} + +static void FreeSyncptHandle( + NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pSyncpt) +{ + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSyncpt->hSyncpt); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pSyncpt->hSyncpt); + pSyncpt->hSyncpt = 0; + + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSyncpt->hCtxDma); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pSyncpt->hCtxDma); + pSyncpt->hCtxDma = 0; +} + +/*! + * This API used to unregister syncpt object with given channel. + * It searches global table, and when finds that for given channel, syncpt + * is registered, then frees it. + */ +void nvRmEvoFreePreSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoChannel *pChannel) +{ + NvU32 i; + NvBool isChannelIdle = NV_FALSE; + + if (pChannel == NULL) { + return; + } + + if (!pDevEvo->supportsSyncpts) { + return; + } + + if (pChannel->channelMask == 0) { + return; + } + + pDevEvo->hal->IsChannelIdle( + pDevEvo, pChannel, 0, &isChannelIdle); + + if (isChannelIdle == NV_FALSE) { + return; + } + + /*! Find pre-syncpt and free it */ + for (i = 0; i < NV_SYNCPT_GLOBAL_TABLE_LENGTH; i++) { + + pDevEvo->preSyncptTable[i].channelMask &= ~pChannel->channelMask; + if (pDevEvo->preSyncptTable[i].channelMask == 0 && + pDevEvo->preSyncptTable[i].hCtxDma != 0) { + + /*! Free handles */ + FreeSyncptHandle(pDevEvo, &pDevEvo->preSyncptTable[i]); + } + } +} + +static NvBool GarbageCollectSyncptHelperOneChannel( + NVDevEvoRec *pDevEvo, + NvU32 sd, + NVEvoChannel *pChannel, + NVEvoSyncpt *pSyncpt, + NVEvoChannelMask *pIdledChannelMask) +{ + NvBool isChannelIdle = FALSE; + + if ((pChannel->channelMask & pSyncpt->channelMask) == 0) { + return TRUE; + } + + if ((*pIdledChannelMask) & pChannel->channelMask) { + goto done; + } + + /*! Check whether channel is idle. */ + pDevEvo->hal->IsChannelIdle(pDevEvo, pChannel, sd, &isChannelIdle); + + if (!isChannelIdle) { + return FALSE; + } + + /*! record idle channel mask to use in next check */ + *pIdledChannelMask |= pChannel->channelMask; + +done: + pSyncpt->channelMask &= ~pChannel->channelMask; + return TRUE; +} + +static NvBool GarbageCollectSyncptHelperOneSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pSyncpt, + NVEvoChannelMask *pIdledChannelMask) +{ + NvBool ret = TRUE; + NvU32 head, sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + /*! + * If a given channel isn't idle, continue to check if this syncpt + * is used on other channels which may be idle. + */ + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (!GarbageCollectSyncptHelperOneChannel( + pDevEvo, + sd, + pDevEvo->head[head].layer[layer], + pSyncpt, + &pIdledChannelMask[sd])) { + ret = FALSE; + } + } + } + } + + return ret; +} + +/*! + * This API is used to unregister the given syncpt object. + */ +void nvRmEvoFreeSyncpt( + NVDevEvoRec *pDevEvo, + NVEvoSyncpt *pEvoSyncpt) +{ + if ((pEvoSyncpt == NULL) || !pDevEvo->supportsSyncpts) { + return; + } + + /*! Put reference of syncptid from nvhost */ + NvKmsSyncPtOpParams params = { }; + params.put.id = pEvoSyncpt->id; + nvkms_syncpt_op(NVKMS_SYNCPT_OP_PUT, ¶ms); + + /*! Free handles */ + FreeSyncptHandle(pDevEvo, pEvoSyncpt); +} + +/*! + * This API try to find free syncpt and then unregisters it. + * It searches global table, and when finds that all channels using this + * syncpt are idle then frees it. It makes sure that syncpt is not part + * of current flip. + */ +NvBool nvRmGarbageCollectSyncpts( + NVDevEvoRec *pDevEvo) +{ + NvU32 i; + NvBool freedSyncpt = FALSE; + NVEvoChannelMask idledChannelMask[NVKMS_MAX_SUBDEVICES] = { 0 }; + + if (!pDevEvo->supportsSyncpts) { + return FALSE; + } + + for (i = 0; i < NV_SYNCPT_GLOBAL_TABLE_LENGTH; i++) { + + NvBool allLayersIdle = NV_TRUE; + + if (pDevEvo->pAllSyncptUsedInCurrentFlip != NULL) { + if (pDevEvo->pAllSyncptUsedInCurrentFlip[i]) { + /*! syncpt is part of current flip, so skip it */ + continue; + } + } + + if (pDevEvo->preSyncptTable[i].hCtxDma == 0) { + /*! syncpt isn't registered, so skip it */ + continue; + } + + allLayersIdle = GarbageCollectSyncptHelperOneSyncpt( + pDevEvo, + &pDevEvo->preSyncptTable[i], + idledChannelMask); + + if (allLayersIdle) { + /*! Free handles */ + FreeSyncptHandle(pDevEvo, &pDevEvo->preSyncptTable[i]); + freedSyncpt = TRUE; + } + } + + return freedSyncpt; +} + +NvU32 nvRmEvoBindDispContextDMA( + NVDevEvoPtr pDevEvo, + NVEvoChannelPtr pChannel, + NvU32 hCtxDma) +{ + NV0002_CTRL_BIND_CONTEXTDMA_PARAMS params = { }; + NvU32 ret; + NvBool retryOnlyOnce = TRUE; + + params.hChannel = pChannel->pb.channel_handle; + +retryOnce: + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + hCtxDma, + NV0002_CTRL_CMD_BIND_CONTEXTDMA, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + /*! + * syncpts (lazily freed) occupy space in the disp ctxDma hash + * table, and therefore may cause bind ctxDma to fail. + * Free any unused syncpts and try again. + */ + if (retryOnlyOnce) { + /*! try to free syncpt only once */ + if (nvRmGarbageCollectSyncpts(pDevEvo)) { + retryOnlyOnce = FALSE; + goto retryOnce; + } + } + } + return ret; +} + + +NvU32 nvRmEvoAllocateAndBindDispContextDMA( + NVDevEvoPtr pDevEvo, + NvU32 hMemory, + const enum NvKmsSurfaceMemoryLayout layout, + NvU64 limit) +{ + NV_CONTEXT_DMA_ALLOCATION_PARAMS ctxdmaParams = { }; + NvU32 hDispCtxDma; + NvU32 flags = DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE); + NvU32 ret; + int h; + + /* each surface to be displayed needs its own ctx dma. */ + nvAssert(pDevEvo->displayHandle != 0); + + nvAssert(pDevEvo->core); + nvAssert(pDevEvo->core->pb.channel_handle); + + nvAssert(hMemory); + nvAssert(limit); + + switch (layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + flags |= DRF_DEF(OS03, _FLAGS, _PTE_KIND, _BL); + break; + case NvKmsSurfaceMemoryLayoutPitch: + flags |= DRF_DEF(OS03, _FLAGS, _PTE_KIND, _PITCH); + break; + } + + hDispCtxDma = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + ctxdmaParams.hMemory = hMemory; + ctxdmaParams.flags = flags; + ctxdmaParams.offset = 0; + ctxdmaParams.limit = limit; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + hDispCtxDma, + NV01_CONTEXT_DMA, + &ctxdmaParams); + + if (ret != NVOS_STATUS_SUCCESS) { + goto cleanup_this_handle_and_fail; + } + + ret = nvRmEvoBindDispContextDMA(pDevEvo, pDevEvo->core, hDispCtxDma); + + if (ret != NVOS_STATUS_SUCCESS) { + goto free_this_handle_and_fail; + } + + for (h = 0; h < pDevEvo->numHeads; h++) { + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[h].numLayers; layer++) { + if (pDevEvo->head[h].layer[layer]) { + nvAssert(pDevEvo->head[h].layer[layer]->pb.channel_handle); + + ret = nvRmEvoBindDispContextDMA(pDevEvo, + pDevEvo->head[h].layer[layer], + hDispCtxDma); + + if (ret != NVOS_STATUS_SUCCESS) { + goto free_this_handle_and_fail; + } + } + } + } + + return hDispCtxDma; + +free_this_handle_and_fail: + + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, hDispCtxDma); + + /* Fall through */ +cleanup_this_handle_and_fail: + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hDispCtxDma); + + return 0; +} + +void nvRmEvoFreeDispContextDMA(NVDevEvoPtr pDevEvo, + NvU32 *hDispCtxDma) +{ + if (*hDispCtxDma) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, *hDispCtxDma); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, *hDispCtxDma); + *hDispCtxDma = 0; + } +} + +void nvRmEvoUnMapVideoMemory(NVDevEvoPtr pDevEvo, NvU32 memoryHandle, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES]) +{ + unsigned int sd; + NvU32 ret; + + if (memoryHandle == 0) { + return; + } + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (subDeviceAddress[sd] != NULL) { + ret = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + memoryHandle, + subDeviceAddress[sd], + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvAssert(!"UnmapMemory() failed"); + } + } + + subDeviceAddress[sd] = NULL; + } +} + +NvBool nvRmEvoMapVideoMemory(NVDevEvoPtr pDevEvo, + NvU32 memoryHandle, NvU64 size, + void *subDeviceAddress[NVKMS_MAX_SUBDEVICES], + NvU32 subDeviceMask) +{ + NvU32 ret; + + unsigned int sd; + + nvkms_memset(subDeviceAddress, 0, sizeof(void*) * NVKMS_MAX_SUBDEVICES); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + void *address = NULL; + + if (((1 << sd) & subDeviceMask) == 0) { + continue; + } + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + memoryHandle, + 0, + size, + &address, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvRmEvoUnMapVideoMemory(pDevEvo, memoryHandle, subDeviceAddress); + return FALSE; + } + subDeviceAddress[sd] = address; + } + return TRUE; +} + +static NvBool GetClassList(NVDevEvoPtr pDevEvo) +{ + NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS classListParams = { 0 }; + NvU32 ret; + + classListParams.numClasses = 0; + classListParams.classList = NvP64_NULL; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_CLASSLIST, + &classListParams, sizeof(classListParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + pDevEvo->supportedClasses = + nvCalloc(classListParams.numClasses, sizeof(NvU32)); + + if (pDevEvo->supportedClasses == NULL) { + return FALSE; + } + + classListParams.classList = NV_PTR_TO_NvP64(pDevEvo->supportedClasses); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_CLASSLIST, + &classListParams, sizeof(classListParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFree(pDevEvo->supportedClasses); + pDevEvo->supportedClasses = NULL; + return FALSE; + } + + pDevEvo->numClasses = classListParams.numClasses; + + return TRUE; +} + +static NvBool GetEngineListOneSubDevice(NVDevEvoPtr pDevEvo, NvU32 sd) +{ + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS engineListParams = { 0 }; + NvU32 ret; + NVSubDeviceEvoPtr pSubDevice = pDevEvo->pSubDevices[sd]; + size_t length; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pSubDevice->handle, + NV2080_CTRL_CMD_GPU_GET_ENGINES_V2, + &engineListParams, sizeof(engineListParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + if (engineListParams.engineCount == 0) { + return TRUE; + } + + length = engineListParams.engineCount * sizeof(NvU32); + + pSubDevice->supportedEngines = nvAlloc(length); + + if (pSubDevice->supportedEngines == NULL) { + return FALSE; + } + + nvkms_memcpy(pSubDevice->supportedEngines, + engineListParams.engineList, + length); + pSubDevice->numEngines = engineListParams.engineCount; + + return TRUE; +} + +static NvBool GetEngineList(NVDevEvoPtr pDevEvo) +{ + int sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (!GetEngineListOneSubDevice(pDevEvo, sd)) { + return FALSE; + } + } + + return TRUE; +} + +static void FreeSubDevice(NVDevEvoPtr pDevEvo, NVSubDeviceEvoPtr pSubDevice) +{ + if (pSubDevice == NULL) { + return; + } + + if (pSubDevice->handle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSubDevice->handle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSubDevice->handle); + } + + if (pSubDevice->gpuString[0] != '\0') { + nvEvoLogDebug(EVO_LOG_INFO, "Freed %s", pSubDevice->gpuString); + } + + nvFree(pSubDevice->supportedEngines); + + nvFree(pSubDevice); +} + +static NVSubDeviceEvoPtr AllocSubDevice(NVDevEvoPtr pDevEvo, const NvU32 sd) +{ + NV2080_ALLOC_PARAMETERS subdevAllocParams = { 0 }; + NV2080_CTRL_GPU_GET_ID_PARAMS getIdParams = { 0 }; + NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidParams = NULL; + NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS pciInfoParams = { 0 }; + NvU32 ret; + const char *uuid; + + NVSubDeviceEvoPtr pSubDevice = nvCalloc(1, sizeof(*pSubDevice)); + + if (pSubDevice == NULL) { + goto failure; + } + + pSubDevice->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + subdevAllocParams.subDeviceId = sd; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSubDevice->handle, + NV20_SUBDEVICE_0, + &subdevAllocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize subDevice"); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSubDevice->handle); + pSubDevice->handle = 0; + goto failure; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pSubDevice->handle, + NV2080_CTRL_CMD_GPU_GET_ID, + &getIdParams, + sizeof(getIdParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to identify GPU"); + goto failure; + } + + pSubDevice->gpuId = getIdParams.gpuId; + + /* Query the UUID for the gpuString. */ + + pGidParams = nvCalloc(1, sizeof(*pGidParams)); + + if (pGidParams == NULL) { + goto failure; + } + + pGidParams->flags = + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _ASCII) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1); + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pSubDevice->handle, + NV2080_CTRL_CMD_GPU_GET_GID_INFO, + pGidParams, + sizeof(*pGidParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + /* If the query failed, make sure the UUID is cleared out. */ + nvkms_memset(pGidParams, 0, sizeof(*pGidParams)); + } + + /* Query the PCI bus address for the gpuString. */ + + pciInfoParams.gpuId = pSubDevice->gpuId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_PCI_INFO, + &pciInfoParams, sizeof(pciInfoParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + /* If the query failed, make sure the PCI bus address is cleared out. */ + nvkms_memset(&pciInfoParams, 0, sizeof(pciInfoParams)); + } + + pSubDevice->gpuLogIndex = nvGetGpuLogIndex(); + + /* + * Create the gpuString, using this example format: + * GPU:0 (GPU-af2422f5-2719-29de-567f-ac899cf458c4) @ PCI:0000:01:00.0 + */ + if ((pGidParams->data[0] == '\0') || (pGidParams->length == 0)) { + uuid = ""; + } else { + uuid = (const char *) pGidParams->data; + } + + nvkms_snprintf(pSubDevice->gpuString, sizeof(pSubDevice->gpuString), + "GPU:%d (%s) @ PCI:%04x:%02x:%02x.0", + pSubDevice->gpuLogIndex, uuid, + pciInfoParams.domain, + pciInfoParams.bus, + pciInfoParams.slot); + + pSubDevice->gpuString[sizeof(pSubDevice->gpuString) - 1] = '\0'; + + nvEvoLogDebug(EVO_LOG_INFO, "Allocated %s", pSubDevice->gpuString); + nvFree(pGidParams); + + return pSubDevice; + +failure: + FreeSubDevice(pDevEvo, pSubDevice); + nvFree(pGidParams); + + return NULL; +} + +static void CloseDevice(NVDevEvoPtr pDevEvo) +{ + NvU32 i; + + for (i = 0; i < ARRAY_LEN(pDevEvo->openedGpuIds); i++) { + const NvU32 gpuId = pDevEvo->openedGpuIds[i]; + + if (gpuId == NV0000_CTRL_GPU_INVALID_ID) { + break; + } + + nvkms_close_gpu(gpuId); + pDevEvo->openedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + } +} + +static NvBool OpenTegraDevice(NVDevEvoPtr pDevEvo) +{ + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS params = { 0 }; + nv_gpu_info_t *gpu_info = NULL; + NvU32 ret, gpu_count = 0; + + nvAssert(pDevEvo->deviceId == NVKMS_DEVICE_ID_TEGRA); + + gpu_info = nvAlloc(NV_MAX_GPUS * sizeof(*gpu_info)); + if (gpu_info == NULL) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to allocate GPU ids arrays"); + goto fail; + } + + gpu_count = nvkms_enumerate_gpus(gpu_info); + if (gpu_count == 0) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "No NVIDIA GPUs found"); + goto fail; + } + + if (gpu_count != 1) { + // XXX If the system has both Tegra/iGPU and dGPU, it is not + // guaranteed to find the Tegra, so fail. + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "More than one NVIDIA GPU found " + "in a Tegra configuration where only Tegra is expected."); + goto fail; + } + + if (!nvkms_open_gpu(gpu_info[0].gpu_id)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to open GPU"); + goto fail; + } + + pDevEvo->openedGpuIds[0] = gpu_info[0].gpu_id; + params.gpuId = gpu_info[0].gpu_id; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to find GPU ID"); + goto fail; + } + + pDevEvo->deviceId = params.deviceInstance; + + nvFree(gpu_info); + return TRUE; + +fail: + nvFree(gpu_info); + CloseDevice(pDevEvo); + return FALSE; +} + +static NvBool OpenDevice(NVDevEvoPtr pDevEvo) +{ + NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS idParams = { }; + NvU32 ret, i, gpuIdIndex = 0; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ATTACHED_IDS, + &idParams, sizeof(idParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to query attached GPUs"); + goto fail; + } + + ct_assert(ARRAY_LEN(pDevEvo->openedGpuIds) >= ARRAY_LEN(idParams.gpuIds)); + + for (i = 0; i < ARRAY_LEN(idParams.gpuIds); i++) { + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS params = { 0 }; + const NvU32 gpuId = idParams.gpuIds[i]; + + if (gpuId == NV0000_CTRL_GPU_INVALID_ID) { + break; + } + + nvAssert(pDevEvo->openedGpuIds[gpuIdIndex] == + NV0000_CTRL_GPU_INVALID_ID); + + params.gpuId = gpuId; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to find GPU ID"); + goto fail; + } + + if (pDevEvo->deviceId != params.deviceInstance) { + continue; + } + + if (!nvkms_open_gpu(gpuId)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to open GPU"); + goto fail; + } + + pDevEvo->openedGpuIds[gpuIdIndex++] = gpuId; + } + + return TRUE; + +fail: + CloseDevice(pDevEvo); + return FALSE; +} + +static void NonStallInterruptCallback( + void *arg, + void *pEventDataVoid, + NvU32 hEvent, + NvU32 data, + NV_STATUS status) +{ + /* + * We are called within resman's altstack and locks. Schedule a separate + * callback to execute with the nvkms_lock. + * + * XXX It might be nice to use a lighter-weight lock here to check if any + * requests are pending in any NvKmsDeferredRequestFifo before scheduling + * nvKmsServiceNonStallInterrupt(). + */ + + (void) nvkms_alloc_timer_with_ref_ptr( + nvKmsServiceNonStallInterrupt, /* callback */ + arg, /* argument (this is a ref_ptr to a pDevEvo) */ + 0, /* dataU32 */ + 0); /* usec */ +} + +static void UnregisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->nonStallInterrupt.handle != 0) { + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS + eventNotificationParams = { 0 }; + + eventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD; + eventNotificationParams.action = + NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &eventNotificationParams, + sizeof(eventNotificationParams)); + + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->nonStallInterrupt.handle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->nonStallInterrupt.handle); + } + + pDevEvo->nonStallInterrupt.handle = 0; +} + +static NvBool RegisterNonStallInterruptCallback(NVDevEvoPtr pDevEvo) +{ + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS eventNotificationParams = { 0 }; + + pDevEvo->nonStallInterrupt.handle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (!nvRmRegisterCallback(pDevEvo, + &pDevEvo->nonStallInterrupt.callback, + pDevEvo->ref_ptr, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->nonStallInterrupt.handle, + NonStallInterruptCallback, + NV2080_NOTIFIERS_FIFO_EVENT_MTHD | + NV01_EVENT_NONSTALL_INTR)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to register nonstall interrupt callback"); + goto failure_free_handle; + } + + // Setup event notification + eventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD; + eventNotificationParams.action = + NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &eventNotificationParams, + sizeof(eventNotificationParams)) + != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to set nonstall interrupt notification"); + goto failure_free_callback_and_handle; + } + + return TRUE; + +failure_free_callback_and_handle: + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[0]->handle, + pDevEvo->nonStallInterrupt.handle); +failure_free_handle: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->nonStallInterrupt.handle); + pDevEvo->nonStallInterrupt.handle = 0; + return FALSE; +} + +NvBool nvRmAllocDeviceEvo(NVDevEvoPtr pDevEvo, + const struct NvKmsAllocDeviceRequest *pRequest) +{ + NV0080_ALLOC_PARAMETERS allocParams = { 0 }; + NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS getNumSubDevicesParams = { 0 }; + NvU32 ret, sd; + + if (nvEvoGlobal.clientHandle == 0) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Client handle not initialized"); + goto failure; + } + + /* + * RM deviceIds should be within [0,NV_MAX_DEVICES); check + * that the client provided a value in range, and add one when + * using deviceId as the per-device unique identifier in the + * RM handle allocator: the identifier is expected to be != 0. + */ + + if ((pRequest->deviceId >= NV_MAX_DEVICES) && + (pRequest->deviceId != NVKMS_DEVICE_ID_TEGRA)) { + goto failure; + } + + pDevEvo->dpTimer = nvDPAllocTimer(pDevEvo); + if (!pDevEvo->dpTimer) { + goto failure; + } + + if (!nvInitUnixRmHandleAllocator(&pDevEvo->handleAllocator, + nvEvoGlobal.clientHandle, + pRequest->deviceId + 1)) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize handles"); + goto failure; + } + + pDevEvo->deviceHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + pDevEvo->deviceId = pRequest->deviceId; + pDevEvo->sli.mosaic = pRequest->sliMosaic; + + if (pRequest->deviceId == NVKMS_DEVICE_ID_TEGRA) { + /* + * On Tegra, NVKMS client is not desktop RM client, so + * enumerate and open first GPU. + */ + if (!OpenTegraDevice(pDevEvo)) { + goto failure; + } + + pDevEvo->usesTegraDevice = TRUE; + } else if (!OpenDevice(pDevEvo)) { + goto failure; + } + + allocParams.deviceId = pDevEvo->deviceId; + + /* Give NVKMS a private GPU virtual address space. */ + allocParams.hClientShare = nvEvoGlobal.clientHandle; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV01_DEVICE_0, + &allocParams); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Failed to initialize device"); + goto failure; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES, + &getNumSubDevicesParams, + sizeof(getNumSubDevicesParams)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to determine number of GPUs"); + goto failure; + } + + ct_assert(NVKMS_MAX_SUBDEVICES == NV_MAX_SUBDEVICES); + if ((getNumSubDevicesParams.numSubDevices == 0) || + (getNumSubDevicesParams.numSubDevices > + ARRAY_LEN(pDevEvo->pSubDevices))) { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, "Unsupported number of GPUs: %d", + getNumSubDevicesParams.numSubDevices); + goto failure; + } + + pDevEvo->numSubDevices = getNumSubDevicesParams.numSubDevices; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + pDevEvo->pSubDevices[sd] = AllocSubDevice(pDevEvo, sd); + if (pDevEvo->pSubDevices[sd] == NULL) { + goto failure; + } + } + + pDevEvo->gpuLogIndex = pDevEvo->pSubDevices[0]->gpuLogIndex; + + if (!GetClassList(pDevEvo) || !GetEngineList(pDevEvo)) { + goto failure; + } + + if (!RegisterNonStallInterruptCallback(pDevEvo)) { + goto failure; + } + + return TRUE; + +failure: + nvRmFreeDeviceEvo(pDevEvo); + return FALSE; +} + +void nvRmFreeDeviceEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 sd; + + UnregisterNonStallInterruptCallback(pDevEvo); + + nvFree(pDevEvo->supportedClasses); + pDevEvo->supportedClasses = NULL; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + FreeSubDevice(pDevEvo, pDevEvo->pSubDevices[sd]); + pDevEvo->pSubDevices[sd] = NULL; + } + + if (pDevEvo->deviceHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pDevEvo->deviceHandle); + pDevEvo->deviceHandle = 0; + } + + nvTearDownUnixRmHandleAllocator(&pDevEvo->handleAllocator); + + nvDPFreeTimer(pDevEvo->dpTimer); + pDevEvo->dpTimer = NULL; + + CloseDevice(pDevEvo); +} + + +/*! + * Determine whether all the dpys in the dpyIdList can be activated together. + * + * \param[in] pDispEvo The disp on which we search for a head. + * \param[in] dpyIdList The connectors to test. + * + * \return Return TRUE if all dpys can be driven simultaneously. + */ +NvBool nvRmIsPossibleToActivateDpyIdList(NVDispEvoPtr pDispEvo, + const NVDpyIdList dpyIdList) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS mapParams = { 0 }; + NvU32 ret = 0; + + /* Trivially accept an empty dpyIdList. */ + + if (nvDpyIdListIsEmpty(dpyIdList)) { + return TRUE; + } + + /* don't even try if EVO isn't initialized (e.g. during a VT switch) */ + + if (!pDevEvo->gpus) { + return FALSE; + } + + /* build a mask of all the displays to use */ + + mapParams.subDeviceInstance = pDispEvo->displayOwner; + + mapParams.displayMask = nvDpyIdListToNvU32(dpyIdList); + + /* ask RM for the head routing */ + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_HEAD_ROUTING_MAP, + &mapParams, + sizeof(mapParams)); + + if ((ret != NVOS_STATUS_SUCCESS) || (mapParams.displayMask == 0)) { + char *dpyIdListStr = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "The requested configuration of display devices " + "(%s) is not supported on this GPU.", + nvSafeString(dpyIdListStr, "unknown")); + nvFree(dpyIdListStr); + + return FALSE; + } + + /* make sure we got everything we asked for */ + + if (mapParams.displayMask != nvDpyIdListToNvU32(dpyIdList)) { + char *requestedDpyIdListStr; + char *returnedDpyIdListStr; + + requestedDpyIdListStr = + nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); + + returnedDpyIdListStr = + nvGetDpyIdListStringEvo(pDispEvo, + nvNvU32ToDpyIdList(mapParams.displayMask)); + + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "The requested configuration of display devices " + "(%s) is not supported on this GPU; " + "%s is recommended, instead.", + nvSafeString(requestedDpyIdListStr, "unknown"), + nvSafeString(returnedDpyIdListStr, "unknown")); + + nvFree(requestedDpyIdListStr); + nvFree(returnedDpyIdListStr); + + return FALSE; + } + + return TRUE; +} + + +/*! + * Tell the RM to save or restore the console VT state. + * + * \param[in] cmd indicate RM about the action. + * + * \return TRUE on success, FALSE on failure. + */ +NvBool nvRmVTSwitch(NVDevEvoPtr pDevEvo, NvU32 cmd) +{ + NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS params = { 0 }; + NvU32 ret; + + params.cmd = cmd; + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_OS_UNIX_VT_SWITCH, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +NvBool nvRmGetVTFBInfo(NVDevEvoPtr pDevEvo) +{ + NvU32 ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + NV0080_CTRL_CMD_OS_UNIX_VT_GET_FB_INFO, + &pDevEvo->vtFbInfo, sizeof(pDevEvo->vtFbInfo)); + + if (ret != NVOS_STATUS_SUCCESS) { + return FALSE; + } + + return TRUE; +} + +/*! + * Import the current framebuffer console memory, for later use with NVKMS-based + * console restore. + * + * Note this relies on pDevEvo->fbInfo populated by nvRmVTSwitch(). + * + * There are several cases in which NVKMS cannot perform console restore: + * + * - Anything other than linear frame buffer consoles (i.e., VGA text modes, + * Non-linear or paletted graphical modes, etc). For those, resman cannot + * query the framebuffer dimensions from the kernel, + * NV0080_CTRL_OS_UNIX_VT_SWITCH_CMD_SAVE_VT_STATE returns empty fbInfo + * params, and consequently pDevEvo->fbInfo.width == 0. + * + * - Linear frame buffer console with an unaligned pitch. In this case, + * nvEvoRegisterSurface() will fail: it has to ensure the surface registration + * satisfies the EVO method interface requirement that PITCH surfaces are + * multiples of 256 bytes. Consequently, pDevEvo->fbConsoleSurfaceHandle will + * be 0. + * + * - Depth 8 frame buffer consoles: these are color index, and cannot be + * supported by NVKMS console restore because they require the VGA palette, + * which exists in special RAM in the VGA core, so we can't name it with a + * ctxdma that we can feed into EVO's LUT. The pFbInfo->depth switch below + * will reject depth 8. + */ +void nvRmImportFbConsoleMemory(NVDevEvoPtr pDevEvo) +{ + NvU32 ret; + struct NvKmsRegisterSurfaceParams registration = { }; + const NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pFbInfo = &pDevEvo->vtFbInfo; + NvHandle hMemory; + + nvAssert(pDevEvo->fbConsoleSurfaceHandle == 0); + + if (pFbInfo->width == 0) { + // No console memory to map. + return; + } + + switch (pFbInfo->depth) { + case 15: + registration.request.format = NvKmsSurfaceMemoryFormatX1R5G5B5; + break; + case 16: + registration.request.format = NvKmsSurfaceMemoryFormatR5G6B5; + break; + case 32: + // That's a lie, it's really depth 24. Fall through. + case 24: + registration.request.format = NvKmsSurfaceMemoryFormatX8R8G8B8; + break; + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Unsupported framebuffer console depth %d", + pFbInfo->depth); + return; + } + + hMemory = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + if (hMemory == 0) { + return; + } + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + hMemory, + NV01_MEMORY_FRAMEBUFFER_CONSOLE, + NULL); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Failed to map framebuffer console memory"); + goto done; + } + + registration.request.useFd = FALSE; + registration.request.rmClient = nvEvoGlobal.clientHandle; + registration.request.widthInPixels = pFbInfo->width; + registration.request.heightInPixels = pFbInfo->height; + registration.request.layout = NvKmsSurfaceMemoryLayoutPitch; + + registration.request.planes[0].u.rmObject = hMemory; + registration.request.planes[0].pitch = pFbInfo->pitch; + registration.request.planes[0].rmObjectSizeInBytes = + (NvU64) pFbInfo->height * (NvU64) pFbInfo->pitch; + + nvEvoRegisterSurface(pDevEvo, pDevEvo->pNvKmsOpenDev, ®istration, + NvHsMapPermissionsNone); + + pDevEvo->fbConsoleSurfaceHandle = registration.reply.surfaceHandle; + + // nvEvoRegisterSurface dups the handle, so we can free the one we just + // imported. + nvRmApiFree(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + hMemory); +done: + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, hMemory); +} + +NvBool nvRmQueryDpAuxLog(NVDispEvoRec *pDispEvo, NvS64 *pValue) +{ + *pValue = FALSE; + return TRUE; +} + + +/*! + * Return the GPU's current PTIMER, or 0 if the query fails. + */ +NvU64 nvRmGetGpuTime(NVDevEvoPtr pDevEvo) +{ + const NvU32 sd = 0; + NV2080_CTRL_TIMER_GET_TIME_PARAMS params; + + NvU32 ret; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_TIMER_GET_TIME, + ¶ms, sizeof(params)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDebug(EVO_LOG_ERROR, "Failed to query GPU time, ret = %d", ret); + return 0; + } + + return params.time_nsec; +} + +NvBool nvRmSetGc6Allowed(NVDevEvoPtr pDevEvo, NvBool allowed) +{ + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS params = { }; + NvU32 sd; + + if (allowed == pDevEvo->gc6Allowed) { + return TRUE; + } + + params.action = allowed ? NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_DEC : + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_INC; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + NvU32 ret = nvRmApiControl( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + NV2080_CTRL_CMD_OS_UNIX_GC6_BLOCKER_REFCNT, + ¶ms, sizeof(params)); + if (ret != NVOS_STATUS_SUCCESS) { + // XXX This is catastrophic, is there a good way to unravel? + nvEvoLogDevDebug( + pDevEvo, EVO_LOG_ERROR, + "Failed to modify GC6 blocker refcount, sd = %d, ret = %x", + sd, ret); + return FALSE; + } + } + + pDevEvo->gc6Allowed = allowed; + + /* + * If we are just now disallowing GC6, it's possible that we previously + * entered GC6 and invalidated display channel state. Re-initialize it here + * to ensure that future modesets are successful. + */ + if (!allowed && pDevEvo->core) { + NvU32 channelIdx; + + pDevEvo->hal->InitChannel(pDevEvo, pDevEvo->core); + pDevEvo->coreInitMethodsPending = TRUE; + + for (channelIdx = 0; channelIdx < pDevEvo->numHeads; channelIdx++) { + // XXX We should InitChannel() for all per-head channels when coming + // out of GC6. + pDevEvo->hal->InitChannel( + pDevEvo, pDevEvo->head[channelIdx].layer[NVKMS_MAIN_LAYER]); + } + } + + return TRUE; +} + +/*! + * Register an RM callback function for the RG line 1 interrupt. + * + * \param[in] pDispEvo The display on which to allocate the callback + * + * \param[in] head The head on which to allocate the callback + * + * \param[in] pCallback The callback function pointer to be registered + * + * \return Handle to callback object on success, 0 on failure. This same + * handle must be used to unregister the callback. + */ +NvU32 nvRmAddRgLine1Callback( + const NVDispEvoRec *pDispEvo, + NvU32 head, + NV0092_REGISTER_RG_LINE_CALLBACK_FN pCallback) +{ + NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS rgLineParams = { 0 }; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + NvU32 ret; + + rgLineParams.subDeviceInstance = pDispEvo->displayOwner; + rgLineParams.head = head; + rgLineParams.rgLineNum = 1; + rgLineParams.pCallbkFn = pCallback; + + /* + * This object only takes a single pointer, but we want it to operate + * on a {pDispEvo,head} tuple, but we want to allocate the callback + * in NVKMS using a ref_ptr to allow for the pDispEvo being freed while + * callbacks are outstanding, so we bundle pDispEvo->ref_ptr and head + * into a single value here before passing it to RM, then decouple them + * in the RM callback function before allocating the NVKMS timer callback. + * + * This works because pDispEvo->ref_ptr will never have the lowest 2 + * bits set, and RM doesn't do anything with pCallbkParams aside from + * passing it back to the callback function. + */ + nvAssert(!((NvUPtr)pDispEvo->ref_ptr & head)); + rgLineParams.pCallbkParams = (void*)((NvUPtr)pDispEvo->ref_ptr | head); + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + handle, + NV0092_RG_LINE_CALLBACK, + &rgLineParams); + + if (ret == NVOS_STATUS_SUCCESS) { + return handle; + } else { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to enable RG line interrupt, ret: %d", ret); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return 0; + } +} + +/*! + * Unregister an RM callback function previously registered with + * nvRmAddRgLine1Callback. + * + * \param[in] pDispEvo The display on which to unregister the + * callback + * + * \param[in] callbackObjectHandle Handle to the previously allocated + * callback object + */ +void nvRmRemoveRgLine1Callback(const NVDispEvoRec *pDispEvo, + NvU32 callbackObjectHandle) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (callbackObjectHandle == 0) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to disable RG line interrupt, obj handle 0"); + return; + } + + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + callbackObjectHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to disable RG line interrupt, ret: %d", ret); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, callbackObjectHandle); +} + +/*! + * Register an RM callback function for the VBlankinterrupt. + * + * \param[in] pDispEvo The display on which to allocate the callback + * + * \param[in] head The head on which to allocate the callback + * + * \param[in] pCallback The callback function pointer to be registered + * + * \return Handle to callback object on success, 0 on failure. This same + * handle must be used to unregister the callback. + */ +NvU32 nvRmAddVBlankCallback( + const NVDispEvoRec *pDispEvo, + NvU32 head, + OSVBLANKCALLBACKPROC pCallback) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + const NvU32 sd = pDispEvo->displayOwner; + NvU32 ret; + NvU32 handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS params = { + .pProc = pCallback, + .LogicalHead = head, + .pParm1 = pDispEvo->ref_ptr, + + /* + * The callback's second argument is a pointer, which is large enough to + * store the head number. + */ + .pParm2 = (void *)(NvUPtr)head, + }; + + ret = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + handle, + NV9010_VBLANK_CALLBACK, + ¶ms); + + if (ret == NVOS_STATUS_SUCCESS) { + return handle; + } else { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to enable VBlank callback, ret: %d", ret); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + return 0; + } +} + +/*! + * Unregister an RM callback function previously registered with + * nvRmAddVBlankCallback. + * + * \param[in] pDispEvo The display on which to unregister the + * callback + * + * \param[in] callbackObjectHandle Handle to the previously allocated + * callback object + */ +void nvRmRemoveVBlankCallback(const NVDispEvoRec *pDispEvo, + NvU32 callbackObjectHandle) +{ + const NvU32 sd = pDispEvo->displayOwner; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + NvU32 ret; + + if (callbackObjectHandle == 0) { + // already removed + return; + } + + ret = nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + callbackObjectHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_ERROR, + "Failed to disable VBlank callback, ret: %d", ret); + } + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, callbackObjectHandle); +} + +/*! + * Initialize the dynamic display mux on supported systems. + * + * \param[in] pDpyEvo The dpy on which to initialize the mux. + */ +static void MuxInit(const NVDpyEvoRec *pDpyEvo) +{ + NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo = pDpyEvo->pDispEvo; + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + NvU32 ret; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyEvoGetConnectorId(pDpyEvo); + + if (pDpyEvo->internal) { + /* Attempt to get the EDID from ACPI. This is required for internal + * displays only, as the internal mux initialization requires data + * from the internal panel's EDID, while the external mux can be + * initialized in the absence of a display, in which case there is + * obviously no EDID present. The EDID read is done via ACPI, in + * order to accommodate mux initialization while the internal panel + * is disconnected from the GPU. */ + + /* Map with hard-coded data for systems known to support dynamic mux + * switching. This is a poor-man's alternative to the WDDM driver's + * CDisplayMgr::NVInitializeACPIToDeviceMaskMap() */ + NV0073_CTRL_SPECIFIC_SET_ACPI_ID_MAPPING_PARAMS acpiMap = { + .mapTable = { + {.acpiId = 0x8001a420, .displayId = 0x1000, .dodIndex = 0}, + } + }; + NVEdidRec edid = { }; + NVParsedEdidEvoRec *pParsedEdid = NULL; + NVEvoInfoStringRec infoString; + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_ACPI_ID_MAPPING, + &acpiMap, sizeof(acpiMap)); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLogDebug(EVO_LOG_ERROR, "Failed to set ACPI ID map."); + return; + } + + nvInitInfoString(&infoString, NULL, 0); + + /* Retrieve the internal panel's EDID from ACPI */ + if (!nvDpyReadAndParseEdidEvo(pDpyEvo, NULL, + NVKMS_EDID_READ_MODE_ACPI, + &edid, &pParsedEdid, + &infoString)) { + /* EDID read is expected to fail on non-dynamic-mux systems. */ + goto edid_done; + } + + if (edid.length == 0 || pParsedEdid == NULL || !pParsedEdid->valid) { + goto edid_done; + } + + params.manfId = pParsedEdid->info.manuf_id; + params.productId = pParsedEdid->info.product_id; + +edid_done: + nvFree(edid.buffer); + nvFree(pParsedEdid); + + /* Internal mux initialization will fail without manfId/productId */ + if (!params.manfId || !params.productId) { + return; + } + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_INIT_MUX_DATA, + ¶ms, + sizeof(params)); + + if (ret == NVOS_STATUS_SUCCESS) { + pDispEvo->muxDisplays = nvAddDpyIdToDpyIdList(pDpyEvo->id, + pDispEvo->muxDisplays); + } else { + nvEvoLogDebug(EVO_LOG_ERROR, "Failed to initialize mux on %s.", + pDpyEvo->name); + } +} + +static NVDpyIdList GetValidMuxDpys(NVDispEvoPtr pDispEvo) +{ + NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS params = { 0 }; + + params.subDeviceInstance = pDispEvo->displayOwner; + + nvRmApiControl(nvEvoGlobal.clientHandle, + pDispEvo->pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX, + ¶ms, sizeof(params)); + + return nvNvU32ToDpyIdList(params.muxDisplayMask); +} + +void nvRmMuxInit(NVDevEvoPtr pDevEvo) +{ + NVDispEvoPtr pDispEvo; + int i; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, i, pDevEvo) { + NVDpyIdList validMuxDpys = GetValidMuxDpys(pDispEvo); + NVDpyEvoPtr pDpyEvo; + + FOR_ALL_EVO_DPYS(pDpyEvo, validMuxDpys, pDispEvo) { + MuxInit(pDpyEvo); + } + } +} + +/*! + * Perform mux pre-switch operations + * + * \param[in] pDpyEvo The Dpy of the target mux + * \param[in] state The target mux state + * + * \return TRUE on success; FALSE on failure + */ +NvBool nvRmMuxPre(const NVDpyEvoRec *pDpyEvo, NvMuxState state) +{ + NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + NvU32 ret; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + params.flags = DRF_DEF(0073_CTRL_DFP, _DISP_MUX_FLAGS, _SR_ENTER_SKIP, _NO); + + if (state == MUX_STATE_DISCRETE) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU; + } else if (state == MUX_STATE_INTEGRATED) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU; + } else { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS, + ¶ms, sizeof(params)); + + nvEvoLogDebug(EVO_LOG_INFO, "RmMuxPre status %d", ret); + + return ret == NVOS_STATUS_SUCCESS; +} + +/*! + * Perform mux switch operation + * + * \param[in] pDpyEvo The Dpy of the target mux + * \param[in] state The target mux state + * + * \return TRUE on success; FALSE on failure + */ +NvBool nvRmMuxSwitch(const NVDpyEvoRec *pDpyEvo, NvMuxState state) +{ + NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + NvU32 ret; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + if (state == MUX_STATE_DISCRETE) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU; + } else if (state == MUX_STATE_INTEGRATED) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU; + } else { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX, + ¶ms, sizeof(params)); + + nvEvoLogDebug(EVO_LOG_INFO, "RmMuxSwitch status %d", ret); + + /* + * Force link training after waiting for the DP AUX link to settle. + * The delay duration comes from DFP_MUX_AUX_SETTLE_DELAY_MS_DEFAULT + * in drivers/resman/kernel/inc/dfpmux.h. + */ + nvkms_usleep(100000); + + if (pDpyEvo->internal && state == MUX_STATE_DISCRETE) { + nvAssert(nvConnectorUsesDPLib(pDpyEvo->pConnectorEvo)); + nvDPNotifyShortPulse(pDpyEvo->pConnectorEvo->pDpLibConnector); + nvDPFireExpiredTimers(pDevEvo); + } + + return ret == NVOS_STATUS_SUCCESS; +} + +/*! + * Perform mux post-switch operations + * + * \param[in] pDpyEvo The Dpy of the target mux + * \param[in] state The target mux state + * + * \return TRUE on success; FALSE on failure + */ +NvBool nvRmMuxPost(const NVDpyEvoRec *pDpyEvo, NvMuxState state) +{ + NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + NvU32 ret; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return FALSE; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + params.flags = DRF_DEF(0073_CTRL_DFP, _DISP_MUX_FLAGS, _SR_ENTER_SKIP, _NO); + + if (state == MUX_STATE_DISCRETE) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_IGPU_TO_DGPU; + } else if (state == MUX_STATE_INTEGRATED) { + params.flags = NV0073_CTRL_DFP_DISP_MUX_FLAGS_SWITCH_TYPE_DGPU_TO_IGPU; + } else { + return FALSE; + } + + ret = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS, + ¶ms, sizeof(params)); + + nvEvoLogDebug(EVO_LOG_INFO, "RmMuxPost status %d", ret); + + return ret == NVOS_STATUS_SUCCESS; +} + +/*! + * Query the current state of a dynamic mux + * + * \param[in] pDpyEvo The Dpy of the target mux whose state is to be queried + * + * \return Mux state (either MUX_STATE_INTEGRATED or MUX_STATE_DISCRETE) on + * success; MUX_STATE_UNKNOWN on failure. + */ +NvMuxState nvRmMuxState(const NVDpyEvoRec *pDpyEvo) +{ + NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS params = { 0 }; + NVDispEvoPtr pDispEvo; + NVDevEvoPtr pDevEvo; + + pDispEvo = pDpyEvo->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + if (!nvDpyIdIsInDpyIdList(pDpyEvo->id, pDispEvo->muxDisplays)) { + return MUX_STATE_UNKNOWN; + } + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = nvDpyIdToNvU32(pDpyEvo->pConnectorEvo->displayId); + + if (NVOS_STATUS_SUCCESS == nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS, + ¶ms, sizeof(params))) { + if (FLD_TEST_DRF(0073_CTRL_DFP, _DISP_MUX, _STATE, _INTEGRATED_GPU, + params.muxStatus)) { + return MUX_STATE_INTEGRATED; + } + if (FLD_TEST_DRF(0073_CTRL_DFP, _DISP_MUX, _STATE, _DISCRETE_GPU, + params.muxStatus)) { + return MUX_STATE_DISCRETE; + } + } + + return MUX_STATE_UNKNOWN; +} + +void nvRmRegisterBacklight(NVDispEvoRec *pDispEvo) +{ + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS dispParams = { 0 }; + NvU32 displayMask, displayId; + NvU32 brightness; + + nvAssert(pDispEvo->backlightDevice == NULL); + + dispParams.subDeviceInstance = pDispEvo->displayOwner; + + if (nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SYSTEM_GET_INTERNAL_DISPLAYS, + &dispParams, sizeof(dispParams)) != NV_OK) { + return; + } + + /* Find a display with a backlight */ + displayMask = dispParams.availableInternalDisplaysMask; + for (; displayMask; displayMask &= ~LOWESTBIT(displayMask)) + { + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NV_STATUS status; + + displayId = LOWESTBIT(displayMask); + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = displayId; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + if (status == NV_OK) + { + brightness = params.brightness; + break; + } + } + + if (displayMask == 0) + { + /* No internal display has backlight */ + return; + } + + pDispEvo->backlightDevice = nvkms_register_backlight( + pDevEvo->pSubDevices[pDispEvo->displayOwner]->gpuId, + displayId, pDispEvo, + brightness); +} + +void nvRmUnregisterBacklight(NVDispEvoRec *pDispEvo) +{ + if (pDispEvo->backlightDevice != NULL) { + nvkms_unregister_backlight(pDispEvo->backlightDevice); + } + pDispEvo->backlightDevice = NULL; +} diff --git a/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c b/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c new file mode 100644 index 000000000..ab3a9f8a8 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-rmapi-dgpu.c @@ -0,0 +1,260 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nv-kernel-rmapi-ops.h" +#include "nvidia-modeset-os-interface.h" + +#include "nvkms-rmapi.h" + +NvU32 nvRmApiAlloc( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject, + NvU32 hClass, + void *pAllocParams) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_ALLOC; + + ops.params.alloc.hRoot = hClient; + ops.params.alloc.hObjectParent = hParent; + ops.params.alloc.hObjectNew = hObject; + ops.params.alloc.hClass = hClass; + ops.params.alloc.pAllocParms = NV_PTR_TO_NvP64(pAllocParams); + + nvkms_call_rm(&ops); + + return ops.params.alloc.status; +} + +NvU32 nvRmApiAllocMemory64( + NvU32 hClient, + NvU32 hParent, + NvU32 hMemory, + NvU32 hClass, + NvU32 flags, + void **ppAddress, + NvU64 *pLimit) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV01_ALLOC_MEMORY; + + ops.params.allocMemory64.hRoot = hClient; + ops.params.allocMemory64.hObjectParent = hParent; + ops.params.allocMemory64.hObjectNew = hMemory; + ops.params.allocMemory64.hClass = hClass; + ops.params.allocMemory64.flags = flags; + ops.params.allocMemory64.pMemory = NV_PTR_TO_NvP64(*ppAddress); + ops.params.allocMemory64.limit = *pLimit; + + nvkms_call_rm(&ops); + + *pLimit = ops.params.allocMemory64.limit; + *ppAddress = NvP64_VALUE(ops.params.allocMemory64.pMemory); + + return ops.params.allocMemory64.status; +} + +NvU32 nvRmApiControl( + NvU32 hClient, + NvU32 hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_CONTROL; + + ops.params.control.hClient = hClient; + ops.params.control.hObject = hObject; + ops.params.control.cmd = cmd; + ops.params.control.params = NV_PTR_TO_NvP64(pParams); + ops.params.control.paramsSize = paramsSize; + + nvkms_call_rm(&ops); + + return ops.params.control.status; +} + +NvU32 nvRmApiDupObject( + NvU32 hClient, + NvU32 hParent, + NvU32 hObjectDest, + NvU32 hClientSrc, + NvU32 hObjectSrc, + NvU32 flags) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_DUP_OBJECT; + + ops.params.dupObject.hClient = hClient; + ops.params.dupObject.hParent = hParent; + ops.params.dupObject.hObject = hObjectDest; + ops.params.dupObject.hClientSrc = hClientSrc; + ops.params.dupObject.hObjectSrc = hObjectSrc; + ops.params.dupObject.flags = flags; + + nvkms_call_rm(&ops); + + return ops.params.dupObject.status; +} + +NvU32 nvRmApiFree( + NvU32 hClient, + NvU32 hParent, + NvU32 hObject) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV01_FREE; + + ops.params.free.hRoot = hClient; + ops.params.free.hObjectParent = hParent; + ops.params.free.hObjectOld = hObject; + + nvkms_call_rm(&ops); + + return ops.params.free.status; +} + +NvU32 nvRmApiVidHeapControl( + void *pVidHeapControlParams) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + NVOS32_PARAMETERS *pParams = pVidHeapControlParams; + + ops.op = NV04_VID_HEAP_CONTROL; + + ops.params.pVidHeapControl = pParams; + + nvkms_call_rm(&ops); + + return pParams->status; +} + +NvU32 nvRmApiMapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + void **ppLinearAddress, + NvU32 flags) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_MAP_MEMORY; + + ops.params.mapMemory.hClient = hClient; + ops.params.mapMemory.hDevice = hDevice; + ops.params.mapMemory.hMemory = hMemory; + ops.params.mapMemory.offset = offset; + ops.params.mapMemory.length = length; + ops.params.mapMemory.flags = flags; + + nvkms_call_rm(&ops); + + *ppLinearAddress = NvP64_VALUE(ops.params.mapMemory.pLinearAddress); + + return ops.params.mapMemory.status; +} + +NvU32 nvRmApiUnmapMemory( + NvU32 hClient, + NvU32 hDevice, + NvU32 hMemory, + const void *pLinearAddress, + NvU32 flags) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_UNMAP_MEMORY; + + ops.params.unmapMemory.hClient = hClient; + ops.params.unmapMemory.hDevice = hDevice; + ops.params.unmapMemory.hMemory = hMemory; + ops.params.unmapMemory.pLinearAddress = NV_PTR_TO_NvP64(pLinearAddress); + ops.params.unmapMemory.flags = flags; + + nvkms_call_rm(&ops); + + return ops.params.unmapMemory.status; +} + +NvU32 nvRmApiMapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_MAP_MEMORY_DMA; + + ops.params.mapMemoryDma.hClient = hClient; + ops.params.mapMemoryDma.hDevice = hDevice; + ops.params.mapMemoryDma.hDma = hDma; + ops.params.mapMemoryDma.hMemory = hMemory; + ops.params.mapMemoryDma.offset = offset; + ops.params.mapMemoryDma.length = length; + ops.params.mapMemoryDma.flags = flags; + ops.params.mapMemoryDma.dmaOffset = *pDmaOffset; + + nvkms_call_rm(&ops); + + *pDmaOffset = ops.params.mapMemoryDma.dmaOffset; + + return ops.params.mapMemoryDma.status; +} + +NvU32 nvRmApiUnmapMemoryDma( + NvU32 hClient, + NvU32 hDevice, + NvU32 hDma, + NvU32 hMemory, + NvU32 flags, + NvU64 dmaOffset) +{ + nvidia_kernel_rmapi_ops_t ops = { 0 }; + + ops.op = NV04_UNMAP_MEMORY_DMA; + + ops.params.unmapMemoryDma.hClient = hClient; + ops.params.unmapMemoryDma.hDevice = hDevice; + ops.params.unmapMemoryDma.hDma = hDma; + ops.params.unmapMemoryDma.hMemory = hMemory; + ops.params.unmapMemoryDma.flags = flags; + ops.params.unmapMemoryDma.dmaOffset = dmaOffset; + + nvkms_call_rm(&ops); + + return ops.params.unmapMemoryDma.status; +} diff --git a/src/nvidia-modeset/src/nvkms-surface.c b/src/nvidia-modeset/src/nvkms-surface.c new file mode 100644 index 000000000..3d78bb200 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-surface.c @@ -0,0 +1,1259 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-surface.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-utils.h" +#include "nvkms-flip.h" +#include "nvkms-private.h" +#include "nvos.h" + +// NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD +#include "ctrl/ctrl0000/ctrl0000unix.h" + +/* NV01_MEMORY_SYSTEM_OS_DESCRIPTOR */ +#include "class/cl0071.h" + +static void FreeSurfaceEvoStruct(NVSurfaceEvoPtr pSurfaceEvo) +{ + if (pSurfaceEvo == NULL) { + return; + } + + nvAssert(!nvSurfaceEvoInAnyOpens(pSurfaceEvo)); + + nvAssert(pSurfaceEvo->structRefCnt == 0); + nvAssert(pSurfaceEvo->rmRefCnt == 0); + + nvFree(pSurfaceEvo); +} + +static void FreeSurfaceEvoRm(NVDevEvoPtr pDevEvo, NVSurfaceEvoPtr pSurfaceEvo) +{ + NvU64 structRefCnt; + NvU32 firstPlaneRmHandle; + NvU8 planeIndex; + + if ((pDevEvo == NULL) || (pSurfaceEvo == NULL)) { + return; + } + + nvAssert(pSurfaceEvo->rmRefCnt == 0); + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + nvRmEvoFreeDispContextDMA(pDevEvo, + &pSurfaceEvo->planes[planeIndex].ctxDma); + } + + firstPlaneRmHandle = pSurfaceEvo->planes[0].rmHandle; + + if (firstPlaneRmHandle != 0) { + + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + if (pSurfaceEvo->cpuAddress[sd] != NULL) { + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + firstPlaneRmHandle, + pSurfaceEvo->cpuAddress[sd], + 0); + pSurfaceEvo->cpuAddress[sd] = NULL; + } + } + + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + if (pSurfaceEvo->planes[planeIndex].rmHandle == 0) { + break; + } + + nvRmApiFree(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfaceEvo->planes[planeIndex].rmHandle); + + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pSurfaceEvo->planes[planeIndex].rmHandle); + + pSurfaceEvo->planes[planeIndex].rmHandle = 0; + } + + /* + * The surface is now an orphan: clear the pSurfaceEvo, for + * everything other than its structRefCnt. The only operation + * that can be done on it is unregistration. + */ + structRefCnt = pSurfaceEvo->structRefCnt; + nvkms_memset(pSurfaceEvo, 0, sizeof(*pSurfaceEvo)); + pSurfaceEvo->structRefCnt = structRefCnt; +} + +void nvEvoIncrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(!nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)); + + pSurfaceEvo->structRefCnt++; +} + +void nvEvoDecrementSurfaceStructRefCnt(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(pSurfaceEvo->structRefCnt >= 1); + pSurfaceEvo->structRefCnt--; + + if (pSurfaceEvo->structRefCnt == 0) { + FreeSurfaceEvoStruct(pSurfaceEvo); + } +} + +static NvBool ValidatePlaneProperties( + NVDevEvoPtr pDevEvo, + const struct NvKmsRegisterSurfaceRequest *pRequest) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(pRequest->format); + NvU8 planeIndex; + + /* + * Reject all registration requests for multi-planar NISO surfaces. + * This is a non-sensical request. + */ + if ((pRequest->isoType == NVKMS_MEMORY_NISO) && + (pFormatInfo->numPlanes > 1)) { + return FALSE; + } + + for (planeIndex = 0; planeIndex < pFormatInfo->numPlanes; planeIndex++) { + + const NvU64 planeOffset = pRequest->planes[planeIndex].offset; + NvU64 planePitch = pRequest->planes[planeIndex].pitch; + NvU64 rmObjectSizeInBytes = + pRequest->planes[planeIndex].rmObjectSizeInBytes; + NvU64 widthInBytes; + NvU64 planeSizeInBytes; + NvU32 planeEffectiveLines = pRequest->heightInPixels; + NvU32 widthInPixels = pRequest->widthInPixels; + + if ((planePitch == 0U) || (rmObjectSizeInBytes == 0U)) + { + nvEvoLog(EVO_LOG_ERROR, "Invalid request parameters, planePitch or rmObjectSizeInBytes, passed during surface registration"); + return FALSE; + } + + if ((pRequest->isoType == NVKMS_MEMORY_ISO) && + ((planeEffectiveLines == 0U) || (widthInPixels == 0U))) + { + nvEvoLog(EVO_LOG_ERROR, "Invalid request parameters, heightInPixels or widthInPixels, passed during surface registration for ISO surfaces"); + return FALSE; + } + + /* The offset must be 1KB-aligned. */ + if ((planeOffset & + ((1 << NV_SURFACE_OFFSET_ALIGNMENT_SHIFT) - 1)) != 0) { + return FALSE; + } + + /* + * Convert planePitch to units of bytes if it's currently specified in + * units of blocks. Each block is 64-bytes wide. + */ + if (pRequest->layout == NvKmsSurfaceMemoryLayoutBlockLinear) { + planePitch <<= NVKMS_BLOCK_LINEAR_LOG_GOB_WIDTH; + } + + /* + * Convert width to bytes. + */ + widthInBytes = widthInPixels; + + if (pFormatInfo->isYUV) { + NvU8 divisor = 1; + NvU8 bytesPerBlock = pFormatInfo->yuv.storageBitsPerComponent >> 3; + + switch (pFormatInfo->numPlanes) { + case 3: + /* planar */ + if (planeIndex > 0) { + divisor = pFormatInfo->yuv.horizChromaDecimationFactor; + } + break; + + case 2: + /* semi-planar */ + if (planeIndex > 0) { + divisor = pFormatInfo->yuv.horizChromaDecimationFactor; + bytesPerBlock *= 2; + } + break; + + case 1: + /* 4:2:2 packed */ + bytesPerBlock *= 2; + } + + widthInBytes *= bytesPerBlock; + /* Dimensions of decimated planes of odd-width YUV surfaces are + * supposed to be rounded up */ + widthInBytes = (widthInBytes + (divisor - 1)) / divisor; + } else { + widthInBytes *= pFormatInfo->rgb.bytesPerPixel; + } + + /* + * Check that an entire line of pixels will fit in the pitch value + * specified. + */ + if (widthInBytes > planePitch) { + return FALSE; + } + + /* + * Check that the entire memory region occupied by this plane falls + * within the size of the underlying memory allocation. + * + * Force planeEffectiveLines to be even before dividing by + * vertChromaDecimationFactor. The height of the source fetch rectangle + * must be even anyways if there's vertical decimation. + */ + if (planeIndex != 0 && pFormatInfo->isYUV && + pFormatInfo->yuv.vertChromaDecimationFactor > 1) { + planeEffectiveLines = planeEffectiveLines & ~(0x1); + planeEffectiveLines /= pFormatInfo->yuv.vertChromaDecimationFactor; + } + + planeSizeInBytes = planeEffectiveLines * planePitch; + + if ((pRequest->isoType == NVKMS_MEMORY_ISO) && + (planeSizeInBytes == 0U)) + { + nvEvoLog(EVO_LOG_ERROR, "Plane size calculated during ISO surface registration is 0"); + return FALSE; + } + + if ((planeSizeInBytes > rmObjectSizeInBytes) || + (planeOffset > (rmObjectSizeInBytes - planeSizeInBytes))) { + return FALSE; + } + } + + return TRUE; +} + +static NvBool ValidateRegisterSurfaceRequest( + NVDevEvoPtr pDevEvo, + const struct NvKmsRegisterSurfaceRequest *pRequest) +{ + const NvKmsSurfaceMemoryFormatInfo *pFormatInfo = + nvKmsGetSurfaceMemoryFormatInfo(pRequest->format); + + /* + * The purpose of this check is to make sure the given format is valid and not + * some garbage number. It exists to check for format validity in the case + * where noDisplayHardWareAccess is TRUE. + */ + if (pFormatInfo->depth == 0) { + return FALSE; + } + + /* + * NvKmsSurfaceMemoryFormat has a few formats that we will never display. + * Head surface has several formats it wants to texture from but we won't + * (and can't) display surfaces with those formats. We should reject any + * attempt to register a surface that is marked for display and uses one of + * those formats. + */ + if (!pRequest->noDisplayHardwareAccess) { + /* + * This isn't a perfect check since we can't predict which channel this + * surface will be used on, but we should definitely reject a format if + * it isn't usable on any channel. + */ + NvBool usableOnAnyChannel = FALSE; + NvU8 layer; + + for (layer = 0; + layer < ARRAY_LEN(pDevEvo->caps.layerCaps); + layer++) { + + if (NVBIT64(pRequest->format) & + pDevEvo->caps.layerCaps[layer].supportedSurfaceMemoryFormats) { + usableOnAnyChannel = TRUE; + break; + } + } + + if (!usableOnAnyChannel) { + return FALSE; + } + + if (!pDevEvo->hal->ValidateWindowFormat(pRequest->format, NULL, NULL)) { + return FALSE; + } + } + + if (!ValidatePlaneProperties(pDevEvo, pRequest)) { + return FALSE; + } + + /* XXX Validate surface properties. */ + + return TRUE; +} + + +void nvEvoRegisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsRegisterSurfaceParams *pParams, + enum NvHsMapPermissions hsMapPermissions) +{ + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + const struct NvKmsRegisterSurfaceRequest *pRequest = &pParams->request; + NVSurfaceEvoPtr pSurfaceEvo = NULL; + NvKmsSurfaceHandle surfaceHandle = 0; + NvU32 result; + NvU8 planeIndex; + NvBool nisoMemory = (pRequest->isoType == NVKMS_MEMORY_NISO); + + /* + * HeadSurface needs a CPU mapping of surfaces containing semaphores, in + * order to check, from the CPU, if a semaphore-interlocked flip is ready. + */ + const NvBool needCpuMapping = nisoMemory && pDevEvo->isHeadSurfaceSupported; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + if (!ValidateRegisterSurfaceRequest(pDevEvo, pRequest)) { + goto fail; + } + + pSurfaceEvo = nvCalloc(1, sizeof(*pSurfaceEvo)); + + if (pSurfaceEvo == NULL) { + goto fail; + } + + pSurfaceEvo->format = pRequest->format; + + surfaceHandle = nvEvoCreateApiHandle(pOpenDevSurfaceHandles, pSurfaceEvo); + + if (surfaceHandle == 0) { + goto fail; + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + const NvU32 planeRmHandle = + nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (planeRmHandle == 0) { + goto fail; + } + + pSurfaceEvo->planes[planeIndex].rmHandle = planeRmHandle; + + if (pRequest->useFd) { + /* + * On T234, the 'fd' provided is allocated outside of RM whereas on + * dGPU it is allocated by RM. So we check whether the fd is associated + * with an nvidia character device, and if it is, then we consider that + * it belongs to RM. Based on whether it belongs to RM or not we need + * to call different mechanisms to import it. + */ + if (nvkms_fd_is_nvidia_chardev(pRequest->planes[planeIndex].u.fd)) { + NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS importParams = { }; + importParams.fd = pRequest->planes[planeIndex].u.fd; + importParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + importParams.object.data.rmObject.hDevice = pDevEvo->deviceHandle; + importParams.object.data.rmObject.hParent = pDevEvo->deviceHandle; + importParams.object.data.rmObject.hObject = planeRmHandle; + + result = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_FROM_FD, + &importParams, + sizeof(importParams)); + } else { + /* + * If 'fd' doesn't belongs to resman assume that it is allocated by + * some other dmabuf allocator (like nvmap). + */ + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS allocParams = { }; + + allocParams.type = NVOS32_TYPE_IMAGE; + allocParams.descriptor = + (NvP64)(NvU64)(pRequest->planes[planeIndex].u.fd); + allocParams.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE; + allocParams.limit = pRequest->planes[planeIndex].rmObjectSizeInBytes - 1; + + allocParams.attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, + allocParams.attr); + allocParams.attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, + _NO, allocParams.attr2); + + /* + * The NVKMS client performing the import doesn't know what the original + * CPU cache attributes are, so assume WRITE_BACK since we only need RM to + * IOVA map the memory into display's address space and the CPU cache + * attributes shouldn't really matter in this case. + */ + allocParams.attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, + _WRITE_BACK, allocParams.attr); + allocParams.flags = NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED; + + switch (pRequest->layout) { + case NvKmsSurfaceMemoryLayoutBlockLinear: + allocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, + allocParams.attr); + break; + + case NvKmsSurfaceMemoryLayoutPitch: + allocParams.attr = + FLD_SET_DRF(OS32, _ATTR, _FORMAT, _PITCH, + allocParams.attr); + break; + + default: + nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Unknown layout"); + goto fail; + } + + if (nisoMemory) { + allocParams.attr2 = + FLD_SET_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES, + allocParams.attr2); + } + + result = nvRmApiAlloc(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + planeRmHandle, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + &allocParams); + + /* + * Bug 200614156. RM doesn't support mapping osdesc objects into CPU’s + * address space. + */ + nvAssert(!needCpuMapping); + } + } else { + /* + * If 'useFd' is not specified, the (rmClient, rmObject) tuple from + * the request is an object in the caller's RM client space. + * Call RM to dup the memory into nvkms's RM client. + */ + result = nvRmApiDupObject(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + planeRmHandle, + pRequest->rmClient, + pRequest->planes[planeIndex].u.rmObject, + 0); + } + + if (result != NVOS_STATUS_SUCCESS) { + goto fail; + } + + /* XXX Validate sizeInBytes: can we query the surface size from RM? */ + + if (!pRequest->noDisplayHardwareAccess) { + + const NvU32 planeCtxDma = + nvRmEvoAllocateAndBindDispContextDMA( + pDevEvo, + planeRmHandle, + pRequest->layout, + pRequest->planes[planeIndex].rmObjectSizeInBytes - 1); + if (!planeCtxDma) { + goto fail; + } + + pSurfaceEvo->planes[planeIndex].ctxDma = planeCtxDma; + } + + pSurfaceEvo->planes[planeIndex].pitch = + pRequest->planes[planeIndex].pitch; + pSurfaceEvo->planes[planeIndex].offset = + pRequest->planes[planeIndex].offset; + pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes = + pRequest->planes[planeIndex].rmObjectSizeInBytes; + } + + pSurfaceEvo->requireCtxDma = !pRequest->noDisplayHardwareAccess; + + /* + * Map the first plane of the surface only into the CPU's address space. + * This is the only valid plane since we would have already rejected + * multi-planar semaphore requests earlier. + */ + if (needCpuMapping) { + + NvU32 sd; + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + + result = nvRmApiMapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->pSubDevices[sd]->handle, + pSurfaceEvo->planes[0].rmHandle, + 0, + pRequest->planes[0].rmObjectSizeInBytes, + (void **) &pSurfaceEvo->cpuAddress[sd], + 0); + + if (result != NVOS_STATUS_SUCCESS) { + goto fail; + } + } + } + + pSurfaceEvo->widthInPixels = pRequest->widthInPixels; + pSurfaceEvo->heightInPixels = pRequest->heightInPixels; + pSurfaceEvo->layout = pRequest->layout; + pSurfaceEvo->log2GobsPerBlockY = pRequest->log2GobsPerBlockY; + pSurfaceEvo->isoType = pRequest->isoType; + + pSurfaceEvo->rmRefCnt = 1; + pSurfaceEvo->structRefCnt = 1; + + pSurfaceEvo->owner.pOpenDev = pOpenDev; + pSurfaceEvo->owner.surfaceHandle = surfaceHandle; + + pParams->reply.surfaceHandle = surfaceHandle; + + return; + +fail: + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + FreeSurfaceEvoRm(pDevEvo, pSurfaceEvo); + FreeSurfaceEvoStruct(pSurfaceEvo); +} + +/* Temporary storage used by ClearSurfaceUsage{Collect,Apply}. */ +struct ClearSurfaceUsageCache { + struct { + struct { + NvBool flipToNull : 1; + NvBool flipSemaphoreToNull : 1; + + NvBool needToIdle : 1; + } layer[NVKMS_MAX_LAYERS_PER_HEAD]; + + NvBool flipCursorToNull : 1; + } head[NVKMS_MAX_SUBDEVICES][NVKMS_MAX_HEADS_PER_DISP]; +}; + +/* + * Search for heads where the surfaces are used, and populate the structure + * pointed to by 'pCache' to indicate which channels need to be updated. + */ +static void +ClearSurfaceUsageCollect(NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo, + struct ClearSurfaceUsageCache *pCache) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, sd; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + + const NVEvoSubDevHeadStateRec *pSdHeadState = + &pDevEvo->gpus[sd].headState[head]; + const NVFlipChannelEvoHwState *pMainFlipState = + &pSdHeadState->layer[NVKMS_MAIN_LAYER]; + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + /* + * XXX NVKMS TODO: flip across heads/subdevices for all scenarios + * that are flip locked. + */ + + if (!pMainFlipState->syncObject.usingSyncpt && + (pSurfaceEvo == pMainFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo || + pSurfaceEvo == pMainFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo)) { + pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipSemaphoreToNull = TRUE; + } + + if (pSurfaceEvo == pMainFlipState->pSurfaceEvo[NVKMS_LEFT] || + pSurfaceEvo == pMainFlipState->pSurfaceEvo[NVKMS_RIGHT] || + pSurfaceEvo == pMainFlipState->completionNotifier.surface.pSurfaceEvo) { + pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipToNull = TRUE; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + const NVFlipChannelEvoHwState *pLayerFlipState = + &pSdHeadState->layer[layer]; + + if (layer == NVKMS_MAIN_LAYER) { + continue; + } + + if (pSurfaceEvo == pLayerFlipState->pSurfaceEvo[NVKMS_LEFT] || + pSurfaceEvo == pLayerFlipState->pSurfaceEvo[NVKMS_RIGHT] || + pSurfaceEvo == pLayerFlipState->completionNotifier.surface.pSurfaceEvo || + (!pLayerFlipState->syncObject.usingSyncpt && + (pSurfaceEvo == pLayerFlipState->syncObject.u.semaphores.acquireSurface.pSurfaceEvo || + pSurfaceEvo == pLayerFlipState->syncObject.u.semaphores.releaseSurface.pSurfaceEvo))) { + pCache->head[sd][head].layer[layer].flipToNull = TRUE; + } + + /* + * EVO requires that, when flipping the base channel (aka main layer) to + * NULL, overlay channel is also flipped to NULL. + */ + if (pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipToNull && + (pLayerFlipState->pSurfaceEvo[NVKMS_LEFT] != NULL || + pLayerFlipState->pSurfaceEvo[NVKMS_RIGHT] != NULL)) { + pCache->head[sd][head].layer[layer].flipToNull = TRUE; + } + } + + if (pSurfaceEvo == pSdHeadState->cursor.pSurfaceEvo) { + pCache->head[sd][head].flipCursorToNull = TRUE; + } + } + } +} + +/* + * Do the hard work to babysit the hardware to ensure that any channels which + * need clearing have actually done so before proceeding to free memory and + * remove ctxdmas from the hash table. + * + * This is achieved in several steps: + * 1. Issue a flip of any overlay layer to NULL -- these are processed + * separately since using one Flip request would interlock them, potentially + * exacerbating stuck channels by getting other channels stuck too. + * Pre-NVDisplay requires that, when flipping the core channel to NULL, + * all satellite channels are also flipped to NULL. The EVO2 hal takes care + * to enable/disable the core surface along with the base surface, + * therefore flip overlay to NULL before base. + * 2. Issue a flip of any main layer to NULL + * 3. Wait for any base/overlay layer that we expect to be idle to actually + * be idle. If they don't idle in a timely fashion, apply accelerators to + * forcibly idle any problematic channels. + * 4. Issue a flip of any core channels to NULL. + */ +static void +ClearSurfaceUsageApply(NVDevEvoPtr pDevEvo, + struct ClearSurfaceUsageCache *pCache, + NvBool skipUpdate) +{ + NVDispEvoPtr pDispEvo; + NvU32 head, sd; + NvBool found = FALSE; + struct NvKmsFlipRequest *request = nvCalloc(1, sizeof(*request)); + + if (request == NULL) { + nvAssert(!"Failed to allocate memory"); + return; + } + + /* 1. Issue a flip of any overlay layer to NULL */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + + struct NvKmsFlipCommonParams *pRequestOneHead = + &request->sd[sd].head[head]; + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + + if (layer == NVKMS_MAIN_LAYER) { + continue; + } + + if (pCache->head[sd][head].layer[layer].flipToNull) { + pRequestOneHead->layer[layer].surface.specified = TRUE; + // No need to specify sizeIn/sizeOut as we are flipping NULL surface. + pRequestOneHead->layer[layer].compositionParams.specified = TRUE; + pRequestOneHead->layer[layer].syncObjects.specified = TRUE; + pRequestOneHead->layer[layer].completionNotifier.specified = TRUE; + + request->sd[sd].requestedHeadsBitMask |= NVBIT(head); + found = TRUE; + + pCache->head[sd][head].layer[layer].needToIdle = TRUE; + } + } + } + } + + if (found) { + request->commit = NV_TRUE; + + nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, request, NULL, skipUpdate, + FALSE /* allowFlipLock */); + + nvkms_memset(request, 0, sizeof(*request)); + found = FALSE; + } + + /* + * No need to idle the overlay layer before flipping the main channel to + * NULL, because the FlipOverlay90() function in the EVO2 hal makes sure + * that the overlay's flip to NULL is always interlocked with the core + * channel and the base (main layer) channel's flip to NULL can proceed only + * after completion of the overlay's flip to NULL (the base channel's flip + * to NULL interlocks with the core channel's flip to NULL). + */ + + /* 2. Issue a flip of any main layer to NULL */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + + struct NvKmsFlipCommonParams *pRequestOneHead = + &request->sd[sd].head[head]; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipToNull || + pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipSemaphoreToNull) { + + if (pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].flipToNull) { + pRequestOneHead->layer[NVKMS_MAIN_LAYER].surface.specified = TRUE; + // No need to specify sizeIn/sizeOut as we are flipping NULL surface. + pRequestOneHead->layer[NVKMS_MAIN_LAYER].completionNotifier.specified = TRUE; + + pCache->head[sd][head].layer[NVKMS_MAIN_LAYER].needToIdle = TRUE; + } + + /* XXX arguably we should also idle for this case, but we + * don't currently have a way to do so without also + * clearing the ISO surface */ + pRequestOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.val.useSyncpt = FALSE; + pRequestOneHead->layer[NVKMS_MAIN_LAYER].syncObjects.specified = TRUE; + + request->sd[sd].requestedHeadsBitMask |= NVBIT(head); + found = TRUE; + } + } + } + + if (found) { + request->commit = NV_TRUE; + + nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, request, NULL, skipUpdate, + FALSE /* allowFlipLock */); + + nvkms_memset(request, 0, sizeof(*request)); + found = FALSE; + } + + /* + * 3. Wait for any base/overlay layer that we expect to be idle to actually + * be idle. If they don't idle in a timely fashion, apply accelerators to + * forcibly idle any problematic channels. + */ + if (!skipUpdate) { + NvU64 startTime = 0; + const NvU32 timeout = 500000; // .5 seconds + NvBool allIdle; + + do { + allIdle = TRUE; + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NvBool isMethodPending; + + if (!pCache->head[sd][head].layer[layer].needToIdle) { + continue; + } + + if (pDevEvo->hal->IsChannelMethodPending( + pDevEvo, pDevEvo->head[head].layer[layer], sd, + &isMethodPending) && + isMethodPending) { + + allIdle = FALSE; + } else { + /* This has been completed, no need to keep trying */ + pCache->head[sd][head].layer[layer].needToIdle = FALSE; + } + } + } + } + + if (!allIdle) { + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + break; + } + nvkms_yield(); + } + } while (!allIdle); + + /* If we timed out above, force things to be idle. */ + if (!allIdle) { + NVEvoIdleChannelState idleChannelState = { }; + NvBool tryToForceIdle = FALSE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pCache->head[sd][head].layer[layer].needToIdle) { + idleChannelState.subdev[sd].channelMask |= + pDevEvo->head[head].layer[layer]->channelMask; + tryToForceIdle = TRUE; + } + } + } + } + + if (tryToForceIdle) { + NvBool ret = pDevEvo->hal->ForceIdleSatelliteChannel(pDevEvo, &idleChannelState); + if (!ret) { + nvAssert(ret); + } + } + } + } + + /* 4. Issue a flip of any core channels to NULL */ + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + + struct NvKmsFlipCommonParams *pRequestOneHead = + &request->sd[sd].head[head]; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if (pCache->head[sd][head].flipCursorToNull) { + pRequestOneHead->cursor.imageSpecified = TRUE; + request->sd[sd].requestedHeadsBitMask |= NVBIT(head); + found = TRUE; + } + } + } + + if (found) { + request->commit = NV_TRUE; + + nvFlipEvo(pDevEvo, pDevEvo->pNvKmsOpenDev, request, NULL, skipUpdate, + FALSE /* allowFlipLock */); + } + + nvFree(request); +} + +/* + * This function unregisters/releases all of the surface handles remaining for + * the given pOpenDev. + * + * It duplicates some functionality of nvEvoUnregisterSurface() and + * nvEvoReleaseSurface(), but with an important difference: it processes the + * "clear surface usage" step for all surfaces up front, and only once that is + * complete it proceeds with freeing the surfaces. + * + * In practice, this makes teardown much smoother than invoking those functions + * individually for each surface, particularly in the case that the hardware is + * stuck and needs accelerators. Consider the case where a client has + * registered several surfaces, and is flipping between two of them, and the + * hardware is stuck on a semaphore acquire that will never complete with + * several frames pending in the pushbuffer. If the first surface processed + * by nvEvoUnregisterSurface() happens to be the current "back buffer" (i.e., + * not the most recently pushed surface to be displayed), then + * nvEvoUnregisterSurface() will call ClearSurfaceUsage(), but it will find no + * channels to clear, and will proceed with nvEvoDecrementSurfaceRefCnts() + * which will call nvRMSyncEvoChannel() to drain any outstanding methods. Due + * to the stalled semaphore, nvRMSyncEvoChannel() will stall for 2 seconds, + * time out along with a nasty message to the kernel log, then we'll free the + * surface and remove its entry from the display hash table anyway. And that + * may happen several times until we finally call nvEvoUnregisterSurface() on + * the surface which is the most recently requested flip, where + * ClearSurfaceUsage() will finally get a chance to tear down the channel + * forcefully by using accelerators to skip the semaphore acquire. But, some + * of the methods which were outstanding and now get processed may reference a + * ctxdma which was already freed, triggering nasty Xid messages. + * + * By gathering up all the channels we can to find which ones to clear first, + * we have a much higher chance of avoiding these timeouts. + */ +void nvEvoFreeClientSurfaces(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NVEvoApiHandlesRec *pOpenDevSurfaceHandles) +{ + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + struct ClearSurfaceUsageCache cache = { }; + NvBool needApply = FALSE; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pOpenDevSurfaceHandles, + pSurfaceEvo, surfaceHandle) { + + if (nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + /* + * If something besides the owner has an rmRefCnt reference, + * the surface might be in use by EVO; flip to NULL to attempt + * to free it. + */ + if (pSurfaceEvo->rmRefCnt > 1) { + ClearSurfaceUsageCollect(pDevEvo, pSurfaceEvo, &cache); + needApply = TRUE; + } + } + } + + if (needApply) { + ClearSurfaceUsageApply(pDevEvo, &cache, FALSE); + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pOpenDevSurfaceHandles, + pSurfaceEvo, surfaceHandle) { + const NvBool isOwner = + nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle); + + /* Remove the handle from the calling client's namespace. */ + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + if (isOwner) { + nvEvoDecrementSurfaceRefCnts(pSurfaceEvo); + } else { + nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo); + } + } + +} + +void nvEvoUnregisterSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle, + NvBool skipUpdate) +{ + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + NVSurfaceEvoPtr pSurfaceEvo; + + pSurfaceEvo = nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, + surfaceHandle); + if (pSurfaceEvo == NULL) { + return; + } + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Surface unregister attempted by non-owner; " + "non-owners must release the surface."); + return; + } + + /* + * If something besides the owner has an rmRefCnt reference, + * the surface might be in use by EVO; flip to NULL to attempt + * to free it. + */ + if (pSurfaceEvo->rmRefCnt > 1) { + struct ClearSurfaceUsageCache cache = { }; + + ClearSurfaceUsageCollect(pDevEvo, pSurfaceEvo, &cache); + ClearSurfaceUsageApply(pDevEvo, &cache, skipUpdate); + } + + /* Remove the handle from the calling client's namespace. */ + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + nvEvoDecrementSurfaceRefCnts(pSurfaceEvo); +} + +void nvEvoReleaseSurface(NVDevEvoPtr pDevEvo, + struct NvKmsPerOpenDev *pOpenDev, + NvKmsSurfaceHandle surfaceHandle) +{ + NVEvoApiHandlesRec *pOpenDevSurfaceHandles = + nvGetSurfaceHandlesFromOpenDev(pOpenDev); + NVSurfaceEvoPtr pSurfaceEvo; + + pSurfaceEvo = nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, + surfaceHandle); + if (pSurfaceEvo == NULL) { + return; + } + + if (nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_WARN, + "Surface release attempted by owner; " + "owners must unregister the surface."); + return; + } + + /* Remove the handle from the calling client's namespace. */ + nvEvoDestroyApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo); +} + +void nvEvoIncrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(!nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)); + + pSurfaceEvo->rmRefCnt++; + pSurfaceEvo->structRefCnt++; +} + +void nvEvoDecrementSurfaceRefCnts(NVSurfaceEvoPtr pSurfaceEvo) +{ + nvAssert(pSurfaceEvo->rmRefCnt >= 1); + pSurfaceEvo->rmRefCnt--; + + if (pSurfaceEvo->rmRefCnt == 0) { + NVDevEvoPtr pDevEvo = + nvGetDevEvoFromOpenDev(pSurfaceEvo->owner.pOpenDev); + + /* + * Don't sync if this surface was registered as not requiring display + * hardware access, to WAR timeouts that result from OGL unregistering + * a deferred request fifo causing a sync here that may timeout if + * GLS hasn't had the opportunity to release semaphores with pending + * flips. (Bug 2050970) + */ + if (pSurfaceEvo->requireCtxDma) { + /* + * XXX NVKMS TODO + * Make the sync more efficient: we only need to sync if the + * in-flight methods flip away from this surface. + */ + NvU32 head; + + /* + * If the core channel is no longer allocated, we don't need to + * sync. This assumes the channels are allocated/deallocated + * together. + */ + if (pDevEvo->core) { + + if (pDevEvo->hal->ClearSurfaceUsage != NULL) { + pDevEvo->hal->ClearSurfaceUsage(pDevEvo, pSurfaceEvo); + } + + nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__); + + for (head = 0; head < pDevEvo->numHeads; head++) { + NvU32 layer; + + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + NVEvoChannelPtr pChannel = + pDevEvo->head[head].layer[layer]; + + nvRMSyncEvoChannel(pDevEvo, pChannel, __LINE__); + } + } + } + } + + FreeSurfaceEvoRm(pDevEvo, pSurfaceEvo); + } + + nvEvoDecrementSurfaceStructRefCnt(pSurfaceEvo); +} + +NvBool nvEvoSurfaceRefCntsTooLarge(const NVSurfaceEvoRec *pSurfaceEvo) +{ + return ((pSurfaceEvo->rmRefCnt == NV_U64_MAX) || + (pSurfaceEvo->structRefCnt == NV_U64_MAX)); +} + +static NVSurfaceEvoPtr GetSurfaceFromHandle( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandle, + const NVEvoChannelMask channelMask, + const NvBool requireCtxDma) +{ + NVSurfaceEvoPtr pSurfaceEvo = + nvEvoGetPointerFromApiHandle(pOpenDevSurfaceHandles, surfaceHandle); + + nvAssert(requireCtxDma || !channelMask); + + if (pSurfaceEvo == NULL) { + return NULL; + } + + if (pSurfaceEvo->rmRefCnt == 0) { /* orphan */ + return NULL; + } + + if (requireCtxDma && !pSurfaceEvo->requireCtxDma) { + return NULL; + } + + /* Validate that the surface can be used as a cursor image */ + if ((channelMask & + NV_EVO_CHANNEL_MASK_CURSOR_ALL) && + !pDevEvo->hal->ValidateCursorSurface(pDevEvo, pSurfaceEvo)) { + return NULL; + } + + /* + * XXX If !requireCtxDma, fetched surfaces aren't going to be accessed by + * the display hardware, so they shouldn't need to be checked by + * nvEvoGetHeadSetStoragePitchValue(). These surfaces will be used as a + * texture by the 3d engine. But previously all surfaces were checked by + * nvEvoGetHeadSetStoragePitchValue() at registration time, and we don't + * know if nvEvoGetHeadSetStoragePitchValue() was protecting us from any + * surface dimensions that could cause trouble for the 3d engine. + */ + if ((channelMask & ~NV_EVO_CHANNEL_MASK_CURSOR_ALL) || !requireCtxDma) { + NvU8 planeIndex; + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + if (nvEvoGetHeadSetStoragePitchValue( + pDevEvo, + pSurfaceEvo->layout, + pSurfaceEvo->planes[planeIndex].pitch) == 0) { + return NULL; + } + } + } + + return pSurfaceEvo; +} + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandle( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + const NvKmsSurfaceHandle surfaceHandle, + const NVEvoChannelMask channelMask) +{ + return GetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + surfaceHandle, + channelMask, + TRUE /* requireCtxDma */); +} + +NVSurfaceEvoPtr nvEvoGetSurfaceFromHandleNoCtxDmaOk( + const NVDevEvoRec *pDevEvo, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvKmsSurfaceHandle surfaceHandle) +{ + return GetSurfaceFromHandle(pDevEvo, + pOpenDevSurfaceHandles, + surfaceHandle, 0x0 /* channelMask */, + FALSE /* requireCtxDma */); +} + +/*! + * Create a deferred request fifo, using the specified pSurfaceEvo. + */ +NVDeferredRequestFifoRec *nvEvoRegisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVSurfaceEvoPtr pSurfaceEvo) +{ + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvU32 ret; + + if (pSurfaceEvo->planes[0].rmObjectSizeInBytes < + sizeof(struct NvKmsDeferredRequestFifo)) { + return NULL; + } + + /* + * XXX validate that the surface is in sysmem; can we query that from + * resman? + */ + + pDeferredRequestFifo = nvCalloc(1, sizeof(*pDeferredRequestFifo)); + + if (pDeferredRequestFifo == NULL) { + return NULL; + } + + /* Get a CPU mapping of the surface. */ + + ret = nvRmApiMapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pSurfaceEvo->planes[0].rmHandle, + 0, + sizeof(*pDeferredRequestFifo->fifo), + (void **) &pDeferredRequestFifo->fifo, + 0); + + if (ret != NVOS_STATUS_SUCCESS) { + nvFree(pDeferredRequestFifo); + return NULL; + } + + pDeferredRequestFifo->pSurfaceEvo = pSurfaceEvo; + + nvEvoIncrementSurfaceRefCnts(pSurfaceEvo); + + return pDeferredRequestFifo; +} + +/*! + * Free the deferred request fifo. + */ +void nvEvoUnregisterDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo) +{ + nvAssert(pDeferredRequestFifo->fifo != NULL); + nvAssert(pDeferredRequestFifo->pSurfaceEvo != NULL); + + nvRmApiUnmapMemory( + nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDeferredRequestFifo->pSurfaceEvo->planes[0].rmHandle, + pDeferredRequestFifo->fifo, + 0); + + nvEvoDecrementSurfaceRefCnts(pDeferredRequestFifo->pSurfaceEvo); + + nvFree(pDeferredRequestFifo); +} diff --git a/src/nvidia-modeset/src/nvkms-utils.c b/src/nvidia-modeset/src/nvkms-utils.c new file mode 100644 index 000000000..648af0c8b --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-utils.c @@ -0,0 +1,796 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-utils.h" +#include "nvkms-types.h" +#include "nv_mode_timings_utils.h" +#include "nv_vasprintf.h" + +#include "nv_list.h" /* for nv_container_of() */ + +void nvVEvoLog(NVEvoLogType logType, NvU8 gpuLogIndex, + const char *fmt, va_list ap) +{ + char *msg, prefix[10]; + const char *gpuPrefix = ""; + int level; + + switch (logType) { + default: + case EVO_LOG_INFO: level = NVKMS_LOG_LEVEL_INFO; break; + case EVO_LOG_WARN: level = NVKMS_LOG_LEVEL_WARN; break; + case EVO_LOG_ERROR: level = NVKMS_LOG_LEVEL_ERROR; break; + } + + msg = nv_vasprintf(fmt, ap); + if (msg == NULL) { + return; + } + + if (gpuLogIndex != NV_INVALID_GPU_LOG_INDEX) { + nvkms_snprintf(prefix, sizeof(prefix), "GPU:%d: ", gpuLogIndex); + gpuPrefix = prefix; + } + + nvkms_log(level, gpuPrefix, msg); + + nvFree(msg); +} + +void nvEvoLogDev(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDevEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +void nvEvoLogDisp(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDispEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +void nvEvoLog(NVEvoLogType logType, const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, NV_INVALID_GPU_LOG_INDEX, fmt, ap); + va_end(ap); +} + +#if defined(DEBUG) + +void nvEvoLogDebug(NVEvoLogType logType, const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, NV_INVALID_GPU_LOG_INDEX, fmt, ap); + va_end(ap); +} + +void nvEvoLogDevDebug(const NVDevEvoRec *pDevEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDevEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +void nvEvoLogDispDebug(const NVDispEvoRec *pDispEvo, NVEvoLogType logType, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + nvVEvoLog(logType, pDispEvo->gpuLogIndex, fmt, ap); + va_end(ap); +} + +#endif /* DEBUG */ + + +/*! + * Initialize the given NVEvoInfoString. + * + * Point the infoString at the specified character array. + */ +void nvInitInfoString(NVEvoInfoStringPtr pInfoString, + char *s, NvU16 totalLength) +{ + nvkms_memset(pInfoString, 0, sizeof(*pInfoString)); + pInfoString->s = s; + pInfoString->totalLength = totalLength; +} + + +/*! + * Append the text, described by 'format' and 'ap', to the infoString. + */ +static void LogInfoString(NVEvoInfoStringPtr pInfoString, + const char *format, va_list ap) +{ + char *s; + size_t size = pInfoString->totalLength - pInfoString->length; + int ret; + + if (pInfoString->s == NULL) { + return; + } + if (size <= 1) { + nvAssert(!"pInfoString too small"); + return; + } + + s = pInfoString->s + pInfoString->length; + + ret = nvkms_vsnprintf(s, size, format, ap); + + if (ret > 0) { + pInfoString->length += NV_MIN((size_t)ret, size - 1); + } + + /* + * If ret is larger than size, then we may need to increase + * totalLength to support logging everything that we are trying to + * log to this buffer. + */ + nvAssert(ret <= size); + + nvAssert(pInfoString->length < pInfoString->totalLength); + pInfoString->s[pInfoString->length] = '\0'; +} + + +/*! + * Append to the infoString, without any additions. + */ +void nvEvoLogInfoStringRaw(NVEvoInfoStringPtr pInfoString, + const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + LogInfoString(pInfoString, format, ap); + va_end(ap); +} + + +/*! + * Append to the infoString, appending a newline. + */ +void nvEvoLogInfoString(NVEvoInfoStringPtr pInfoString, + const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + LogInfoString(pInfoString, format, ap); + va_end(ap); + + nvEvoLogInfoStringRaw(pInfoString, "\n"); +} + + +/*! + * The NVEvoApiHandlesRec-related functions below are used to manage + * sets of NvKms API handles. For the various NvKms objects (e.g., + * devices, disps, connectors, surfaces) clients will specify the + * object by handle, and NVKMS will look up the corresponding object. + * + * We store a pointer to the object in a dynamically allocated array, + * and use the handle to look up the pointer in the array. + * + * Note that handles are 1-based (valid handles are in the range + * [1,numPointers], and 0 is an invalid handle), while indices to the + * corresponding pointers are 0-based (valid indices are in the range + * [0,numPointers-1]). Subtract 1 from the handle to get the index + * for the pointer. + */ + +/*! + * Increase the size of the NVEvoApiHandles::pointers array. + * + * Reallocate the pointers array, increasing by defaultSize. + * Initialize the new region of memory. + */ +static NvBool GrowApiHandlesPointersArray(NVEvoApiHandlesPtr pEvoApiHandles) +{ + NvU32 newNumPointers = + pEvoApiHandles->numPointers + pEvoApiHandles->defaultSize; + size_t oldSize = pEvoApiHandles->numPointers * sizeof(void *); + size_t newSize = newNumPointers * sizeof(void *); + void **newPointers; + + /* Check for wrap in the newNumPointers computation. */ + if (newSize <= oldSize) { + return FALSE; + } + + newPointers = nvRealloc(pEvoApiHandles->pointers, newSize); + + if (newPointers == NULL) { + return FALSE; + } + + nvkms_memset(&newPointers[pEvoApiHandles->numPointers], 0, newSize - oldSize); + + pEvoApiHandles->pointers = newPointers; + pEvoApiHandles->numPointers = newNumPointers; + + return TRUE; +} + + +/*! + * Attempt to shrink the NVEvoApiHandles::pointers array. + * + * If high elements in the array are unused, reduce the array size in + * multiples of defaultSize. + */ +static void ShrinkApiHandlesPointersArray(NVEvoApiHandlesPtr pEvoApiHandles) +{ + NvU32 index; + NvU32 newNumPointers; + void **newPointers; + + /* If the array is already as small as it can be, we are done. */ + + if (pEvoApiHandles->numPointers == pEvoApiHandles->defaultSize) { + return; + } + + /* Find the highest non-empty element. */ + + for (index = pEvoApiHandles->numPointers - 1; index > 0; index--) { + if (pEvoApiHandles->pointers[index] != NULL) { + break; + } + } + + /* + * Compute the new array size by rounding index up to the next + * multiple of defaultSize. + */ + newNumPointers = ((index / pEvoApiHandles->defaultSize) + 1) * + pEvoApiHandles->defaultSize; + + /* If the array is already that size, we are done. */ + + if (pEvoApiHandles->numPointers == newNumPointers) { + return; + } + + newPointers = + nvRealloc(pEvoApiHandles->pointers, newNumPointers * sizeof(void *)); + + if (newPointers != NULL) { + pEvoApiHandles->pointers = newPointers; + pEvoApiHandles->numPointers = newNumPointers; + } +} + + +/*! + * Return true if 'pointer' is already present in pEvoApiHandles + */ +NvBool nvEvoApiHandlePointerIsPresent(NVEvoApiHandlesPtr pEvoApiHandles, + void *pointer) +{ + NvU32 index; + + for (index = 0; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] == pointer) { + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Create an NvKms API handle. + * + * Create an available handle from pEvoApiHandles, and associate + * 'pointer' with the handle. + */ +NvKmsGenericHandle +nvEvoCreateApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, void *pointer) +{ + NvU32 index; + + if (pointer == NULL) { + return 0; + } + + for (index = 0; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] == NULL) { + goto availableIndex; + } + } + + /* + * Otherwise, there are no free elements in the pointers array: + * grow the array and try again. + */ + if (!GrowApiHandlesPointersArray(pEvoApiHandles)) { + return 0; + } + + /* fall through */ + +availableIndex: + + nvAssert(index < pEvoApiHandles->numPointers); + nvAssert(pEvoApiHandles->pointers[index] == NULL); + + pEvoApiHandles->pointers[index] = pointer; + + return index + 1; +} + + +/*! + * Retrieve a pointer that maps to an NvKms API handle. + * + * Return the pointer that nvEvoCreateApiHandle() associated with 'handle'. + */ +void *nvEvoGetPointerFromApiHandle(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle handle) +{ + NvU32 index; + + if (handle == 0) { + return NULL; + } + + index = handle - 1; + + if (index >= pEvoApiHandles->numPointers) { + return NULL; + } + + return pEvoApiHandles->pointers[index]; +} + + +/*! + * Retrieve a pointer that maps to the next NvKms API handle. + * + * This is intended to be used by the + * FOR_ALL_POINTERS_IN_EVO_API_HANDLES() macro. On the first + * iteration, *pHandle == 0, and this will return the first pointer it + * finds in the pointer array. The returned *pHandle will be the + * location to begin searching on the next iteration, and so on. + * + * Once there are no more non-zero elements in the pointer array, + * return NULL. + */ +void *nvEvoGetPointerFromApiHandleNext(const NVEvoApiHandlesRec *pEvoApiHandles, + NvKmsGenericHandle *pHandle) +{ + NvU32 index = *pHandle; + + for (; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] != NULL) { + *pHandle = index + 1; + return pEvoApiHandles->pointers[index]; + } + } + + return NULL; +} + + +/*! + * Remove an NvKms API handle. + * + * Clear the 'handle' entry, and its corresponding pointer, from pEvoApiHandles. + */ +void nvEvoDestroyApiHandle(NVEvoApiHandlesPtr pEvoApiHandles, + NvKmsGenericHandle handle) +{ + NvU32 index; + + if (handle == 0) { + return; + } + + index = handle - 1; + + if (index >= pEvoApiHandles->numPointers) { + return; + } + + pEvoApiHandles->pointers[index] = NULL; + + ShrinkApiHandlesPointersArray(pEvoApiHandles); +} + + +/* Only used in nvAssert, so only build into debug builds to avoid never-used + * warnings */ +#if defined(DEBUG) +/*! + * Return the number of non-NULL pointers in the pointer array. + */ +static NvU32 +CountApiHandles(const NVEvoApiHandlesRec *pEvoApiHandles) +{ + NvU32 index, count = 0; + + for (index = 0; index < pEvoApiHandles->numPointers; index++) { + if (pEvoApiHandles->pointers[index] != NULL) { + count++; + } + } + + return count; +} +#endif /* DEBUG */ + + +/*! + * Initialize the NVEvoApiHandlesRec. + * + * This should be called before any + * nvEvo{Create,GetPointerFrom,Destroy}ApiHandle() calls on this + * pEvoApiHandles. + * + * The pointer array for the pEvoApiHandles will be managed in + * multiples of 'defaultSize'. + */ +NvBool nvEvoInitApiHandles(NVEvoApiHandlesPtr pEvoApiHandles, NvU32 defaultSize) +{ + nvkms_memset(pEvoApiHandles, 0, sizeof(*pEvoApiHandles)); + + pEvoApiHandles->defaultSize = defaultSize; + + return GrowApiHandlesPointersArray(pEvoApiHandles); +} + + +/*! + * Free the NVEvoApiHandlesPtr resources. + */ +void nvEvoDestroyApiHandles(NVEvoApiHandlesPtr pEvoApiHandles) +{ + nvAssert(CountApiHandles(pEvoApiHandles) == 0); + + nvFree(pEvoApiHandles->pointers); + + nvkms_memset(pEvoApiHandles, 0, sizeof(*pEvoApiHandles)); +} + +NvU8 nvPixelDepthToBitsPerComponent(enum nvKmsPixelDepth pixelDepth) +{ + switch (pixelDepth) { + case NVKMS_PIXEL_DEPTH_18_444: + return 6; + case NVKMS_PIXEL_DEPTH_24_444: + return 8; + case NVKMS_PIXEL_DEPTH_30_444: + return 10; + } + nvAssert(!"Unknown NVKMS_PIXEL_DEPTH"); + return 0; +} + +/* Import function required by nvBuildModeName() */ + +int nvBuildModeNameSnprintf(char *str, size_t size, const char *format, ...) +{ + va_list ap; + int ret; + + va_start(ap, format); + ret = nvkms_vsnprintf(str, size, format, ap); + va_end(ap); + + return ret; +} + +/* Import functions required by nv_vasprintf() */ + +void *nv_vasprintf_alloc(size_t size) +{ + return nvAlloc(size); +} + +void nv_vasprintf_free(void *ptr) +{ + nvFree(ptr); +} + +int nv_vasprintf_vsnprintf(char *str, size_t size, + const char *format, va_list ap) +{ + return nvkms_vsnprintf(str, size, format, ap); +} + +/* + * Track the size of each allocation, so that it can be passed to + * nvkms_free(). + */ +typedef struct { + size_t size; /* includes sizeof(nvkms_memory_info_t) */ + char data[] __attribute__((aligned(8))); +} nvkms_memory_info_t; + +void *nvInternalAlloc(size_t size, const NvBool zero) +{ + size_t totalSize = size + sizeof(nvkms_memory_info_t); + nvkms_memory_info_t *p; + + if (totalSize < size) { /* overflow in the above addition */ + return NULL; + } + + p = nvkms_alloc(totalSize, zero); + + if (p == NULL) { + return NULL; + } + + p->size = totalSize; + + return p->data; +} + +void *nvInternalRealloc(void *ptr, size_t size) +{ + nvkms_memory_info_t *p = NULL; + void *newptr; + + if (ptr == NULL) { + /* realloc with a ptr of NULL is equivalent to alloc. */ + return nvInternalAlloc(size, FALSE); + } + + if (size == 0) { + /* realloc with a size of 0 is equivalent to free. */ + nvInternalFree(ptr); + return NULL; + } + + p = nv_container_of(ptr, nvkms_memory_info_t, data); + + newptr = nvInternalAlloc(size, FALSE); + + if (newptr != NULL) { + size_t oldsize = p->size - sizeof(nvkms_memory_info_t); + size_t copysize = (size < oldsize) ? size : oldsize; + nvkms_memcpy(newptr, ptr, copysize); + nvInternalFree(ptr); + } + + return newptr; +} + +void nvInternalFree(void *ptr) +{ + nvkms_memory_info_t *p; + + if (ptr == NULL) { + return; + } + + p = nv_container_of(ptr, nvkms_memory_info_t, data); + + nvkms_free(p, p->size); +} + +char *nvInternalStrDup(const char *str) +{ + size_t len; + char *newstr; + + if (str == NULL) { + return NULL; + } + + len = nvkms_strlen(str) + 1; + + newstr = nvInternalAlloc(len, FALSE); + + if (newstr == NULL) { + return NULL; + } + + nvkms_memcpy(newstr, str, len); + + return newstr; +} + +/*! + * Look up the value of a key in the set of registry keys provided at device + * allocation time, copied from the client request during nvAllocDevEvo(). + * + * \param[in] pDevEvo The device with regkeys to be checked. + * + * \param[in] key The name of the key to look up. + * + * \param[out] val The value of the key, if the key was specified. + * + * \return Whether the key was specified in the registry. + */ +NvBool nvGetRegkeyValue(const NVDevEvoRec *pDevEvo, + const char *key, NvU32 *val) +{ + int i; + + for (i = 0; i < ARRAY_LEN(pDevEvo->registryKeys); i++) { + if (nvkms_strcmp(key, pDevEvo->registryKeys[i].name) == 0) { + *val = pDevEvo->registryKeys[i].value; + return TRUE; + } + } + + return FALSE; +} + +#if defined(DEBUG) + +#include "nv_memory_tracker.h" + +void *nvDebugAlloc(size_t size, int line, const char *file) +{ + return nvMemoryTrackerTrackedAlloc(&nvEvoGlobal.debugMemoryAllocationList, + size, line, file); +} + +void *nvDebugCalloc(size_t nmemb, size_t size, int line, const char *file) +{ + return nvMemoryTrackerTrackedCalloc(&nvEvoGlobal.debugMemoryAllocationList, + nmemb, size, line, file); +} + +void *nvDebugRealloc(void *ptr, size_t size, int line, const char *file) +{ + return nvMemoryTrackerTrackedRealloc(&nvEvoGlobal.debugMemoryAllocationList, + ptr, size, line, file); +} + +void nvDebugFree(void *ptr) +{ + nvMemoryTrackerTrackedFree(ptr); +} + +char *nvDebugStrDup(const char *str, int line, const char *file) +{ + size_t size = nvkms_strlen(str); + char *newStr = nvDebugAlloc(size + 1, line, file); + + if (newStr == NULL) { + return NULL; + } + + nvkms_memcpy(newStr, str, size); + newStr[size] = '\0'; + + return newStr; +} + +void nvReportUnfreedAllocations(void) +{ + nvMemoryTrackerPrintUnfreedAllocations( + &nvEvoGlobal.debugMemoryAllocationList); +} + +void nvMemoryTrackerPrintf(const char *format, ...) +{ + va_list ap; + va_start(ap, format); + nvVEvoLog(EVO_LOG_WARN, NV_INVALID_GPU_LOG_INDEX, format, ap); + va_end(ap); +} + +void *nvMemoryTrackerAlloc(size_t size) +{ + return nvkms_alloc(size, FALSE); +} + +void nvMemoryTrackerFree(void *ptr, size_t size) +{ + nvkms_free(ptr, size); +} + +void nvMemoryTrackerMemset(void *s, int c, size_t n) +{ + nvkms_memset(s, c, n); +} + +void nvMemoryTrackerMemcpy(void *dest, const void *src, size_t n) +{ + nvkms_memcpy(dest, src, n); +} + +#endif /* DEBUG */ + +/* + * The C++ displayPort library source code introduces a reference to + * __cxa_pure_virtual. This should never actually get called, so + * simply assert. + */ +void __cxa_pure_virtual(void); + +void __cxa_pure_virtual(void) +{ + nvAssert(!"Pure virtual function called"); +} + +/* Import functions required by unix_rm_handle */ + +#if defined(DEBUG) + +void nvUnixRmHandleDebugAssert(const char *expString, + const char *filenameString, + const char *funcString, + const unsigned lineNumber) +{ + nvDebugAssert(expString, filenameString, funcString, lineNumber); +} + +void nvUnixRmHandleLogMsg(NvU32 level, const char *fmt, ...) +{ + + va_list ap; + va_start(ap, fmt); + + /* skip verbose messages */ + if (level < NV_UNIX_RM_HANDLE_DEBUG_VERBOSE) { + nvVEvoLog(EVO_LOG_WARN, NV_INVALID_GPU_LOG_INDEX, fmt, ap); + } + + va_end(ap); +} + +#endif /* DEBUG */ + +void *nvUnixRmHandleReallocMem(void *oldPtr, NvLength newSize) +{ + return nvRealloc(oldPtr, newSize); +} + +void nvUnixRmHandleFreeMem(void *ptr) +{ + nvFree(ptr); +} + +/* Import functions required by nv_assert */ + +#if defined(DEBUG) + +void nvDebugAssert(const char *expString, const char *filenameString, + const char *funcString, const unsigned int lineNumber) +{ + nvEvoLog(EVO_LOG_WARN, "NVKMS Assert @%s:%d:%s(): '%s'", + filenameString, lineNumber, funcString, expString); +} + +#endif /* DEBUG */ diff --git a/src/nvidia-modeset/src/nvkms-vrr.c b/src/nvidia-modeset/src/nvkms-vrr.c new file mode 100644 index 000000000..72b12aa3d --- /dev/null +++ b/src/nvidia-modeset/src/nvkms-vrr.c @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms-dma.h" +#include "nvkms-evo.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-vrr.h" +#include "dp/nvdp-connector-event-sink.h" +#include "nvkms-hdmi.h" +#include "nvkms-dpy.h" + +#include + +/*! + * Allocate the VRR semaphore surface. + * + * Only one array of VRR semaphores is needed per "head group", which for our + * purposes means a pDevEvo. This array is allocated when the device is + * initialized and kept around for the lifetime of the pDevEvo. + */ +void nvAllocVrrEvo(NVDevEvoPtr pDevEvo) +{ + NvU32 handle; + NvU64 size = NVKMS_VRR_SEMAPHORE_SURFACE_SIZE; + + /* On GPUs that support the HEAD_SET_DISPLAY_RATE method (nvdisplay), we + * don't need a VRR semaphore surface. */ + if (pDevEvo->hal->caps.supportsDisplayRate) { + return; + } + + handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator); + + if (nvRmAllocSysmem(pDevEvo, handle, NULL, &pDevEvo->vrr.pSemaphores, + size, NVKMS_MEMORY_NISO)) { + pDevEvo->vrr.semaphoreHandle = handle; + } else { + nvEvoLogDev(pDevEvo, EVO_LOG_ERROR, + "Failed to allocate G-SYNC semaphore memory"); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle); + } +} + +void nvFreeVrrEvo(NVDevEvoPtr pDevEvo) +{ + if (pDevEvo->vrr.semaphoreHandle != 0) { + if (pDevEvo->vrr.pSemaphores != NULL) { + nvRmApiUnmapMemory(nvEvoGlobal.clientHandle, + pDevEvo->deviceHandle, + pDevEvo->vrr.semaphoreHandle, + pDevEvo->vrr.pSemaphores, + 0); + pDevEvo->vrr.pSemaphores = NULL; + } + nvRmApiFree(nvEvoGlobal.clientHandle, pDevEvo->deviceHandle, + pDevEvo->vrr.semaphoreHandle); + nvFreeUnixRmHandle(&pDevEvo->handleAllocator, + pDevEvo->vrr.semaphoreHandle); + pDevEvo->vrr.semaphoreHandle = 0; + } +} + +NvBool nvExportVrrSemaphoreSurface(const NVDevEvoRec *pDevEvo, int fd) +{ + // Export the memory as an FD. + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS exportParams = { }; + const NvU32 hMemory = pDevEvo->vrr.semaphoreHandle; + NvU32 status; + + if (hMemory == 0) { + return FALSE; + } + + exportParams.fd = fd; + exportParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM; + exportParams.object.data.rmObject.hDevice = pDevEvo->deviceHandle; + exportParams.object.data.rmObject.hObject = hMemory; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle, + NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD, + &exportParams, sizeof(exportParams)); + + return status == NVOS_STATUS_SUCCESS; +} + +NvBool nvDispSupportsVrr( + const NVDispEvoRec *pDispEvo) +{ + return FALSE; +} + +void nvDisableVrr(NVDevEvoPtr pDevEvo) +{ + return; +} + +void nvGetDpyMinRefreshRateValidValues( + const NVHwModeTimingsEvo *pTimings, + const enum NvKmsDpyVRRType vrrType, + const NvU32 edidTimeoutMicroseconds, + NvU32 *minMinRefreshRate, + NvU32 *maxMinRefreshRate) +{ + return; +} + +void nvEnableVrr( + NVDevEvoPtr pDevEvo, + const struct NvKmsSetModeRequest *pRequest) +{ + return; +} + +void nvSetVrrActive( + NVDevEvoPtr pDevEvo, + NvBool active) +{ + return; +} + +void nvApplyVrrBaseFlipOverrides( + const NVDispEvoRec *pDispEvo, + NvU32 head, + const NVFlipChannelEvoHwState *pOld, + NVFlipChannelEvoHwState *pNew) +{ + return; +} + +void nvCancelVrrFrameReleaseTimers( + NVDevEvoPtr pDevEvo) +{ + return; +} + +void nvSetNextVrrFlipTypeAndIndex( + NVDevEvoPtr pDevEvo, + struct NvKmsFlipReply *reply) +{ + return; +} + +void nvTriggerVrrUnstallMoveCursor( + NVDispEvoPtr pDispEvo) +{ + return; +} + +void nvTriggerVrrUnstallSetCursorImage( + NVDispEvoPtr pDispEvo, + NvBool ctxDmaChanged) +{ + return; +} + diff --git a/src/nvidia-modeset/src/nvkms.c b/src/nvidia-modeset/src/nvkms.c new file mode 100644 index 000000000..31fadd312 --- /dev/null +++ b/src/nvidia-modeset/src/nvkms.c @@ -0,0 +1,4990 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvkms.h" +#include "nvkms-private.h" +#include "nvkms-api.h" + +#include "nvkms-types.h" +#include "nvkms-utils.h" +#include "nvkms-console-restore.h" +#include "nvkms-dpy.h" +#include "nvkms-dma.h" +#include "nvkms-evo.h" +#include "nvkms-rm.h" +#include "nvkms-rmapi.h" +#include "nvkms-modepool.h" +#include "nvkms-modeset.h" +#include "nvkms-attributes.h" +#include "nvkms-framelock.h" +#include "nvkms-surface.h" +#include "nvkms-3dvision.h" +#include "nvkms-ioctl.h" +#include "nvkms-cursor.h" /* nvSetCursorImage, nvEvoMoveCursor */ +#include "nvkms-flip.h" /* nvFlipEvo */ +#include "nvkms-vrr.h" + +#include "dp/nvdp-connector.h" + +#include "nvUnixVersion.h" /* NV_VERSION_STRING */ +#include /* NV01_NULL_OBJECT/NV01_ROOT */ + +#include "nv_list.h" + + +/*! \file + * + * This source file implements the API of NVKMS, built around open, + * close, and ioctl file operations. + * + * An NvKmsPerOpen is stored "per-open"; all API handles are specific + * to a per-open instance. The NvKmsPerOpen is allocated during each + * nvKmsOpen() call, and freed during the corresponding nvKmsClose() + * call. + * + * An NvKmsPerOpenDev stores the API handles for the device and all + * the disps and connectors on the device. It is allocated during + * nvKmsIoctl(ALLOC_DEVICE), and freed during nvKmsIoctl(FREE_DEVICE). + */ + + +/* + * When the NVKMS device file is opened, the per-open structure could + * be used for one of several actions, denoted by its "type". The + * per-open type starts as Undefined. The per-open's first use + * defines its type. Once the type transitions from Undefined to + * anything, it can never transition to any other type. + */ +enum NvKmsPerOpenType { + /* + * The per-open is used for making ioctl calls to make requests of + * NVKMS. + */ + NvKmsPerOpenTypeIoctl, + + /* + * The per-open is used for granting access to a NVKMS registered + * surface. + */ + NvKmsPerOpenTypeGrantSurface, + + /* + * The per-open is used for granting permissions. + */ + NvKmsPerOpenTypeGrantPermissions, + + /* + * The per-open is used for granting access to a swap group + */ + NvKmsPerOpenTypeGrantSwapGroup, + + /* + * The per-open is used to unicast a specific event. + */ + NvKmsPerOpenTypeUnicastEvent, + + /* + * The per-open is currently undefined (this is the initial + * state). + */ + NvKmsPerOpenTypeUndefined, +}; + +struct NvKmsPerOpenConnector { + NVConnectorEvoPtr pConnectorEvo; + NvKmsConnectorHandle nvKmsApiHandle; +}; + +struct NvKmsPerOpenFrameLock { + NVFrameLockEvoPtr pFrameLockEvo; + int refCnt; + NvKmsFrameLockHandle nvKmsApiHandle; +}; + +struct NvKmsPerOpenDisp { + NVDispEvoPtr pDispEvo; + NvKmsDispHandle nvKmsApiHandle; + NvKmsFrameLockHandle frameLockHandle; + NVEvoApiHandlesRec connectorHandles; + struct NvKmsPerOpenConnector connector[NVKMS_MAX_CONNECTORS_PER_DISP]; + NVEvoApiHandlesRec vblankSyncObjectHandles[NVKMS_MAX_HEADS_PER_DISP]; +}; + +struct NvKmsPerOpenDev { + NVDevEvoPtr pDevEvo; + NvKmsDeviceHandle nvKmsApiHandle; + NVEvoApiHandlesRec dispHandles; + NVEvoApiHandlesRec surfaceHandles; + struct NvKmsFlipPermissions flipPermissions; + struct NvKmsModesetPermissions modesetPermissions; + struct NvKmsPerOpenDisp disp[NVKMS_MAX_SUBDEVICES]; + NvBool isPrivileged; + NVEvoApiHandlesRec deferredRequestFifoHandles; +}; + +struct NvKmsPerOpenEventListEntry { + NVListRec eventListEntry; + struct NvKmsEvent event; +}; + +struct NvKmsPerOpen { + nvkms_per_open_handle_t *pOpenKernel; + NvU32 pid; + enum NvKmsClientType clientType; + NVListRec perOpenListEntry; + NVListRec perOpenIoctlListEntry; + enum NvKmsPerOpenType type; + + union { + struct { + NVListRec eventList; + NvU32 eventInterestMask; + NVEvoApiHandlesRec devHandles; + NVEvoApiHandlesRec frameLockHandles; + } ioctl; + + struct { + NVSurfaceEvoPtr pSurfaceEvo; + } grantSurface; + + struct { + NVDevEvoPtr pDevEvo; + NVSwapGroupPtr pSwapGroup; + } grantSwapGroup; + + struct { + NVDevEvoPtr pDevEvo; + struct NvKmsPermissions permissions; + } grantPermissions; + + struct { + /* + * A unicast event NvKmsPerOpen is assigned to an object, so that + * that object can generate events on the unicast event. Store a + * pointer to that object, so that we can clear the pointer when the + * unicast event NvKmsPerOpen is closed. + * + * So far, deferred request fifos with swap groups are the only + * users of unicast events. When we add more users, we can add an + * enum or similar to know which object type is using this unicast + * event. + */ + NVDeferredRequestFifoPtr pDeferredRequestFifo; + } unicastEvent; + }; +}; + +static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); +static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo); + +static NVListRec perOpenList = NV_LIST_INIT(&perOpenList); +static NVListRec perOpenIoctlList = NV_LIST_INIT(&perOpenIoctlList); + +/*! + * Check if there is an NvKmsPerOpenDev on this NvKmsPerOpen that has + * the specified deviceId. + */ +static NvBool DeviceIdAlreadyPresent(struct NvKmsPerOpen *pOpen, NvU32 deviceId) +{ + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + if (pOpenDev->pDevEvo->usesTegraDevice && + (deviceId == NVKMS_DEVICE_ID_TEGRA)) { + return TRUE; + } else if (pOpenDev->pDevEvo->deviceId == deviceId) { + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Get the NvKmsPerOpenDev described by NvKmsPerOpen + deviceHandle. + */ +static struct NvKmsPerOpenDev *GetPerOpenDev( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle) +{ + if (pOpen == NULL) { + return NULL; + } + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.devHandles, deviceHandle); +} + + +/*! + * Get the NvKmsPerOpenDev and NvKmsPerOpenDisp described by + * NvKmsPerOpen + deviceHandle + dispHandle. + */ +static NvBool GetPerOpenDevAndDisp( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + struct NvKmsPerOpenDev **ppOpenDev, + struct NvKmsPerOpenDisp **ppOpenDisp) +{ + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pOpenDisp = nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, + dispHandle); + + if (pOpenDisp == NULL) { + return FALSE; + } + + *ppOpenDev = pOpenDev; + *ppOpenDisp = pOpenDisp; + + return TRUE; +} + + +/*! + * Get the NvKmsPerOpenDisp described by NvKmsPerOpen + deviceHandle + + * dispHandle. + */ +static struct NvKmsPerOpenDisp *GetPerOpenDisp( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle) +{ + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, deviceHandle); + + if (pOpenDev == NULL) { + return NULL; + } + + return nvEvoGetPointerFromApiHandle(&pOpenDev->dispHandles, dispHandle); +} + + +/*! + * Get the NvKmsPerOpenConnector described by NvKmsPerOpen + + * deviceHandle + dispHandle + connectorHandle. + */ +static struct NvKmsPerOpenConnector *GetPerOpenConnector( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NvKmsConnectorHandle connectorHandle) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + + if (pOpenDisp == NULL) { + return NULL; + } + + return nvEvoGetPointerFromApiHandle(&pOpenDisp->connectorHandles, + connectorHandle); +} + + +/*! + * Get the NVDpyEvoRec described by NvKmsPerOpen + deviceHandle + + * dispHandle + dpyId. + */ +static NVDpyEvoRec *GetPerOpenDpy( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NVDpyId dpyId) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + + if (pOpenDisp == NULL) { + return NULL; + } + + return nvGetDpyEvoFromDispEvo(pOpenDisp->pDispEvo, dpyId); +} + + +/*! + * Get the NvKmsPerOpenFrameLock described by pOpen + frameLockHandle. + */ +static struct NvKmsPerOpenFrameLock *GetPerOpenFrameLock( + const struct NvKmsPerOpen *pOpen, + NvKmsFrameLockHandle frameLockHandle) +{ + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + return nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, + frameLockHandle); +} + + +/*! + * Free the NvKmsPerOpenFrameLock associated with this NvKmsPerOpenDisp. + * + * Multiple disps can be assigned to the same framelock object, so + * NvKmsPerOpenFrameLock is reference counted: the object is freed + * once all NvKmsPerOpenDisps remove their reference to it. + * + * \param[in,out] pOpen The per-open data, to which the + * NvKmsPerOpenFrameLock is assigned. + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding + * NvKmsPerOpenFrameLock should be freed. + */ +static void FreePerOpenFrameLock(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDisp *pOpenDisp) +{ + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + pOpenFrameLock = + nvEvoGetPointerFromApiHandle(&pOpen->ioctl.frameLockHandles, + pOpenDisp->frameLockHandle); + if (pOpenFrameLock == NULL) { + return; + } + + pOpenDisp->frameLockHandle = 0; + + pOpenFrameLock->refCnt--; + + if (pOpenFrameLock->refCnt != 0) { + return; + } + + nvEvoDestroyApiHandle(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock->nvKmsApiHandle); + nvFree(pOpenFrameLock); +} + + +/*! + * Allocate and initialize an NvKmsPerOpenFrameLock. + * + * If the disp described by the specified NvKmsPerOpenDisp has a + * framelock object, allocate an NvKmsPerOpenFrameLock for it. + * + * Multiple disps can be assigned to the same framelock object, so + * NvKmsPerOpenFrameLock is reference counted: we first look to see if + * an NvKmsPerOpenFrameLock for this disp's framelock object already + * exists. If so, we increment its reference count. Otherwise, we + * allocate a new NvKmsPerOpenFrameLock. + * + * \param[in,out] pOpen The per-open data, to which the + * new NvKmsPerOpenFrameLock should be assigned. + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp whose corresponding + * NvKmsPerOpenFrameLock should be allocated. + */ +static NvBool AllocPerOpenFrameLock( + struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDisp *pOpenDisp) +{ + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo; + NVFrameLockEvoPtr pFrameLockEvo = pDispEvo->pFrameLockEvo; + NvKmsGenericHandle handle; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pFrameLockEvo == NULL) { + return TRUE; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock, handle) { + if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { + goto done; + } + } + + pOpenFrameLock = nvCalloc(1, sizeof(*pOpenFrameLock)); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pOpenFrameLock->pFrameLockEvo = pFrameLockEvo; + pOpenFrameLock->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpen->ioctl.frameLockHandles, pOpenFrameLock); + + if (pOpenFrameLock->nvKmsApiHandle == 0) { + nvFree(pOpenFrameLock); + return FALSE; + } + +done: + pOpenDisp->frameLockHandle = pOpenFrameLock->nvKmsApiHandle; + pOpenFrameLock->refCnt++; + return TRUE; +} + + +/*! + * Get the NvKmsConnectorHandle that corresponds to the given + * NVConnectorEvoRec on the NvKmsPerOpen + deviceHandle + dispHandle. + */ +static NvKmsConnectorHandle ConnectorEvoToConnectorHandle( + const struct NvKmsPerOpen *pOpen, + const NvKmsDeviceHandle deviceHandle, + const NvKmsDispHandle dispHandle, + const NVConnectorEvoRec *pConnectorEvo) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + struct NvKmsPerOpenConnector *pOpenConnector; + NvKmsGenericHandle connector; + + pOpenDisp = GetPerOpenDisp(pOpen, deviceHandle, dispHandle); + + if (pOpenDisp == NULL) { + return 0; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, + pOpenConnector, connector) { + if (pOpenConnector->pConnectorEvo == pConnectorEvo) { + return pOpenConnector->nvKmsApiHandle; + } + } + + return 0; +} + + +/*! + * Get the NvKmsDeviceHandle and NvKmsDispHandle that corresponds to + * the given NVDispEvoRec on the NvKmsPerOpen. + */ +static NvBool DispEvoToDevAndDispHandles( + const struct NvKmsPerOpen *pOpen, + const NVDispEvoRec *pDispEvo, + NvKmsDeviceHandle *pDeviceHandle, + NvKmsDispHandle *pDispHandle) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + + if (pOpenDev->pDevEvo != pDevEvo) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + if (pOpenDisp->pDispEvo != pDispEvo) { + continue; + } + + *pDeviceHandle = pOpenDev->nvKmsApiHandle; + *pDispHandle = pOpenDisp->nvKmsApiHandle; + + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Get the NvKmsPerOpenDev that corresponds to the given NVDevEvoRec + * on the NvKmsPerOpen. + */ +static struct NvKmsPerOpenDev *DevEvoToOpenDev( + const struct NvKmsPerOpen *pOpen, + const NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + if (pOpenDev->pDevEvo == pDevEvo) { + return pOpenDev; + } + } + + return NULL; +} + + +/*! + * Get the NvKmsFrameLockHandle that corresponds to the given + * NVFrameLockEvoRec on the NvKmsPerOpen. + */ +static NvBool FrameLockEvoToFrameLockHandle( + const struct NvKmsPerOpen *pOpen, + const NVFrameLockEvoRec *pFrameLockEvo, + NvKmsFrameLockHandle *pFrameLockHandle) +{ + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NvKmsGenericHandle handle; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.frameLockHandles, + pOpenFrameLock, handle) { + + if (pOpenFrameLock->pFrameLockEvo == pFrameLockEvo) { + *pFrameLockHandle = pOpenFrameLock->nvKmsApiHandle; + return TRUE; + } + } + + return FALSE; +} + + +/*! + * Clear the specified NvKmsPerOpenConnector. + * + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the + * NvKmsPerOpenConnector is assigned. + * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to be cleared. + */ +static void ClearPerOpenConnector( + struct NvKmsPerOpenDisp *pOpenDisp, + struct NvKmsPerOpenConnector *pOpenConnector) +{ + nvEvoDestroyApiHandle(&pOpenDisp->connectorHandles, + pOpenConnector->nvKmsApiHandle); + nvkms_memset(pOpenConnector, 0, sizeof(*pOpenConnector)); +} + + +/*! + * Initialize an NvKmsPerOpenConnector. + * + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to which the + * NvKmsPerOpenConnector is assigned. + * \param[in,out] pOpenConnector The NvKmsPerOpenConnector to initialize. + * \param[in] pConnectorEvo The connector that the NvKmsPerOpenConnector + * corresponds to. + * + * \return If the NvKmsPerOpenConnector is successfully initialized, + * return TRUE. Otherwise, return FALSE. + */ +static NvBool InitPerOpenConnector( + struct NvKmsPerOpenDisp *pOpenDisp, + struct NvKmsPerOpenConnector *pOpenConnector, + NVConnectorEvoPtr pConnectorEvo) +{ + pOpenConnector->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpenDisp->connectorHandles, pOpenConnector); + + if (pOpenConnector->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenConnector->pConnectorEvo = pConnectorEvo; + + return TRUE; + +fail: + ClearPerOpenConnector(pOpenDisp, pOpenConnector); + return FALSE; +} + +/*! + * Clear the specified NvKmsPerOpenDisp. + * + * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp + * is assigned. + * \param[in,out] pDispEvo The NvKmsPerOpenDisp to be cleared. + */ +static void ClearPerOpenDisp( + struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsPerOpenDisp *pOpenDisp) +{ + struct NvKmsPerOpenConnector *pOpenConnector; + NvKmsGenericHandle connector; + + FreePerOpenFrameLock(pOpen, pOpenDisp); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDisp->connectorHandles, + pOpenConnector, connector) { + ClearPerOpenConnector(pOpenDisp, pOpenConnector); + } + + /* Destroy the API handle structures. */ + nvEvoDestroyApiHandles(&pOpenDisp->connectorHandles); + + for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { + nvEvoDestroyApiHandles(&pOpenDisp->vblankSyncObjectHandles[i]); + } + + nvEvoDestroyApiHandle(&pOpenDev->dispHandles, pOpenDisp->nvKmsApiHandle); + + nvkms_memset(pOpenDisp, 0, sizeof(*pOpenDisp)); +} + + +/*! + * Initialize an NvKmsPerOpenDisp. + * + * \param[in,out] pOpenDev The NvKmsPerOpenDev to which the NvKmsPerOpenDisp + * is assigned. + * \param[in,out] pOpenDisp The NvKmsPerOpenDisp to initialize. + * \param[in] pDispEvo The disp that the NvKmsPerOpenDisp corresponds to. + * + * \return If the NvKmsPerOpenDisp is successfully initialized, return TRUE. + * Otherwise, return FALSE. + */ +static NvBool InitPerOpenDisp( + struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev, + struct NvKmsPerOpenDisp *pOpenDisp, + NVDispEvoPtr pDispEvo) +{ + NVConnectorEvoPtr pConnectorEvo; + NvU32 connector; + + pOpenDisp->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpenDev->dispHandles, pOpenDisp); + + if (pOpenDisp->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenDisp->pDispEvo = pDispEvo; + + if (nvListCount(&pDispEvo->connectorList) >= + ARRAY_LEN(pOpenDisp->connector)) { + nvAssert(!"More connectors on this disp than NVKMS can handle."); + goto fail; + } + + if (!nvEvoInitApiHandles(&pOpenDisp->connectorHandles, + ARRAY_LEN(pOpenDisp->connector))) { + goto fail; + } + + connector = 0; + FOR_ALL_EVO_CONNECTORS(pConnectorEvo, pDispEvo) { + if (!InitPerOpenConnector(pOpenDisp, &pOpenDisp->connector[connector], + pConnectorEvo)) { + goto fail; + } + connector++; + } + + /* Initialize the vblankSyncObjectHandles for each head. */ + for (NvU32 i = 0; i < NVKMS_MAX_HEADS_PER_DISP; i++) { + if (!nvEvoInitApiHandles(&pOpenDisp->vblankSyncObjectHandles[i], + NVKMS_MAX_VBLANK_SYNC_OBJECTS_PER_HEAD)) { + goto fail; + } + } + + if (!AllocPerOpenFrameLock(pOpen, pOpenDisp)) { + goto fail; + } + + return TRUE; + +fail: + ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); + return FALSE; +} + +/*! + * Check that the NvKmsPermissions make sense. + */ +static NvBool ValidateNvKmsPermissions( + const NVDevEvoRec *pDevEvo, + const struct NvKmsPermissions *pPermissions) +{ + if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { + + NvU8 layerMask = pPermissions->flip.disp[d].head[h].layerMask; + + if (layerMask == 0) { + continue; + } + + if (nvHasBitAboveMax(layerMask, pDevEvo->head[h].numLayers)) { + return FALSE; + } + + /* + * If the above blocks didn't 'continue', then there + * are permissions specified for this disp+head. Is + * the specified disp+head in range for the current + * configuration? + */ + if (d >= pDevEvo->nDispEvo) { + return FALSE; + } + + if (h >= pDevEvo->numHeads) { + return FALSE; + } + } + } + } else if (pPermissions->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPermissions->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPermissions->flip.disp[d].head); h++) { + + NVDpyIdList dpyIdList = + pPermissions->modeset.disp[d].head[h].dpyIdList; + + if (nvDpyIdListIsEmpty(dpyIdList)) { + continue; + } + + /* + * If the above blocks didn't 'continue', then there + * are permissions specified for this disp+head. Is + * the specified disp+head in range for the current + * configuration? + */ + if (d >= pDevEvo->nDispEvo) { + return FALSE; + } + + if (h >= pDevEvo->numHeads) { + return FALSE; + } + } + } + } else { + return FALSE; + } + + return TRUE; +} + +/*! + * Assign pPermissions with the maximum permissions possible for + * the pDevEvo. + */ +static void AssignFullNvKmsFlipPermissions( + const NVDevEvoRec *pDevEvo, + struct NvKmsFlipPermissions *pPermissions) +{ + NvU32 dispIndex, head; + + nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); + + for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pPermissions->disp[dispIndex].head[head].layerMask = + NVBIT(pDevEvo->head[head].numLayers) - 1; + } + } +} + +static void AssignFullNvKmsModesetPermissions( + const NVDevEvoRec *pDevEvo, + struct NvKmsModesetPermissions *pPermissions) +{ + NvU32 dispIndex, head; + + nvkms_memset(pPermissions, 0, sizeof(*pPermissions)); + + for (dispIndex = 0; dispIndex < pDevEvo->nDispEvo; dispIndex++) { + for (head = 0; head < pDevEvo->numHeads; head++) { + pPermissions->disp[dispIndex].head[head].dpyIdList = + nvAllDpyIdList(); + } + } +} + +/*! + * Set the modeset owner to pOpenDev + * + * \param pOpenDev The per-open device structure for the new modeset owner. + * \return FALSE if there was already a modeset owner. TRUE otherwise. + */ +static NvBool GrabModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) +{ + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + if (pDevEvo->modesetOwner == pOpenDev) { + return TRUE; + } + + if (pDevEvo->modesetOwner != NULL) { + return FALSE; + } + + /* + * If claiming modeset ownership, undo any SST forcing imposed by + * console restore. + */ + if (pOpenDev != pDevEvo->pNvKmsOpenDev) { + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + } + + pDevEvo->modesetOwner = pOpenDev; + + AssignFullNvKmsFlipPermissions(pDevEvo, &pOpenDev->flipPermissions); + AssignFullNvKmsModesetPermissions(pDevEvo, &pOpenDev->modesetPermissions); + + pDevEvo->modesetOwnerChanged = TRUE; + + return TRUE; +} + + +/*! + * Clear permissions on the specified device for all NvKmsPerOpens. + * + * For NvKmsPerOpen::type==Ioctl, clear the permissions, except for the + * specified pOpenDevExclude. + * + * For NvKmsPerOpen::type==GrantPermissions, clear + * NvKmsPerOpen::grantPermissions and reset NvKmsPerOpen::type to + * Undefined. + */ +static void RevokePermissionsInternal( + const NvU32 typeBitmask, + const NVDevEvoRec *pDevEvo, + const struct NvKmsPerOpenDev *pOpenDevExclude) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + + if ((pOpen->type == NvKmsPerOpenTypeGrantPermissions) && + (pOpen->grantPermissions.pDevEvo == pDevEvo) && + (typeBitmask & NVBIT(pOpen->grantPermissions.permissions.type))) { + nvkms_memset(&pOpen->grantPermissions, 0, + sizeof(pOpen->grantPermissions)); + pOpen->type = NvKmsPerOpenTypeUndefined; + } + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + struct NvKmsPerOpenDev *pOpenDev = + DevEvoToOpenDev(pOpen, pDevEvo); + + if (pOpenDev == NULL) { + continue; + } + + if (pOpenDev == pOpenDevExclude || pOpenDev->isPrivileged) { + continue; + } + + if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING)) { + nvkms_memset(&pOpenDev->flipPermissions, 0, + sizeof(pOpenDev->flipPermissions)); + } + + if (typeBitmask & NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET)) { + nvkms_memset(&pOpenDev->modesetPermissions, 0, + sizeof(pOpenDev->modesetPermissions)); + } + } + } +} + +static void ReallocCoreChannel(NVDevEvoRec *pDevEvo) +{ + if (nvAllocCoreChannelEvo(pDevEvo)) { + nvDPSetAllowMultiStreaming(pDevEvo, TRUE /* allowMST */); + AllocSurfaceCtxDmasForAllOpens(pDevEvo); + } +} + +static void RestoreConsole(NVDevEvoPtr pDevEvo) +{ + pDevEvo->modesetOwnerChanged = TRUE; + + // Try to issue a modeset and flip to the framebuffer console surface. + if (!nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */)) { + // If that didn't work, free the core channel to trigger RM's console + // restore code. + FreeSurfaceCtxDmasForAllOpens(pDevEvo); + nvFreeCoreChannelEvo(pDevEvo); + + // Reallocate the core channel right after freeing it. This makes sure + // that it's allocated and ready right away if another NVKMS client is + // started. + ReallocCoreChannel(pDevEvo); + } +} + +/*! + * Release modeset ownership previously set by GrabModesetOwnership + * + * \param pOpenDev The per-open device structure relinquishing modeset + * ownership. + * \return FALSE if pOpenDev is not the modeset owner, TRUE otherwise. + */ +static NvBool ReleaseModesetOwnership(struct NvKmsPerOpenDev *pOpenDev) +{ + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + if (pDevEvo->modesetOwner != pOpenDev) { + // Only the current owner can release ownership. + return FALSE; + } + + pDevEvo->modesetOwner = NULL; + pDevEvo->handleConsoleHotplugs = TRUE; + + RestoreConsole(pDevEvo); + RevokePermissionsInternal(NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET), + pDevEvo, NULL /* pOpenDevExclude */); + return TRUE; +} + +/*! + * Free the specified NvKmsPerOpenDev. + * + * \param[in,out] pOpen The per-open data, to which the + * NvKmsPerOpenDev is assigned. + * \param[in,out] pOpenDev The NvKmsPerOpenDev to free. + */ +void nvFreePerOpenDev(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pOpenDev == NULL) { + return; + } + + nvEvoDestroyApiHandles(&pOpenDev->surfaceHandles); + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + ClearPerOpenDisp(pOpen, pOpenDev, pOpenDisp); + } + + nvEvoDestroyApiHandles(&pOpenDev->dispHandles); + + nvEvoDestroyApiHandle(&pOpen->ioctl.devHandles, pOpenDev->nvKmsApiHandle); + + nvEvoDestroyApiHandles(&pOpenDev->deferredRequestFifoHandles); + + nvFree(pOpenDev); +} + + +/*! + * Allocate and initialize an NvKmsPerOpenDev. + * + * \param[in,out] pOpen The per-open data, to which the + * new NvKmsPerOpenDev should be assigned. + * \param[in] pDevEvo The device to which the new NvKmsPerOpenDev + * corresponds. + * \param[in] isPrivileged The NvKmsPerOpenDev is privileged which can + * do modeset anytime. + * + * \return On success, return a pointer to the new NvKmsPerOpenDev. + * On failure, return NULL. + */ +struct NvKmsPerOpenDev *nvAllocPerOpenDev(struct NvKmsPerOpen *pOpen, + NVDevEvoPtr pDevEvo, NvBool isPrivileged) +{ + struct NvKmsPerOpenDev *pOpenDev = nvCalloc(1, sizeof(*pOpenDev)); + NVDispEvoPtr pDispEvo; + NvU32 disp; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pOpenDev == NULL) { + goto fail; + } + + pOpenDev->nvKmsApiHandle = + nvEvoCreateApiHandle(&pOpen->ioctl.devHandles, pOpenDev); + + if (pOpenDev->nvKmsApiHandle == 0) { + goto fail; + } + + pOpenDev->pDevEvo = pDevEvo; + + if (!nvEvoInitApiHandles(&pOpenDev->dispHandles, + ARRAY_LEN(pOpenDev->disp))) { + goto fail; + } + + if (pDevEvo->nDispEvo > ARRAY_LEN(pOpenDev->disp)) { + nvAssert(!"More disps on this device than NVKMS can handle."); + goto fail; + } + + FOR_ALL_EVO_DISPLAYS(pDispEvo, disp, pDevEvo) { + if (!InitPerOpenDisp(pOpen, pOpenDev, &pOpenDev->disp[disp], pDispEvo)) { + goto fail; + } + } + + if (!nvEvoInitApiHandles(&pOpenDev->surfaceHandles, 32)) { + goto fail; + } + + pOpenDev->isPrivileged = isPrivileged; + if (pOpenDev->isPrivileged) { + AssignFullNvKmsFlipPermissions(pDevEvo, + &pOpenDev->flipPermissions); + AssignFullNvKmsModesetPermissions(pOpenDev->pDevEvo, + &pOpenDev->modesetPermissions); + } + + if (!nvEvoInitApiHandles(&pOpenDev->deferredRequestFifoHandles, 4)) { + goto fail; + } + + return pOpenDev; + +fail: + nvFreePerOpenDev(pOpen, pOpenDev); + return NULL; +} + + +/*! + * Assign NvKmsPerOpen::type. + * + * This succeeds only if NvKmsPerOpen::type is Undefined, or already + * has the requested type and allowRedundantAssignment is TRUE. + */ +static NvBool AssignNvKmsPerOpenType(struct NvKmsPerOpen *pOpen, + enum NvKmsPerOpenType type, + NvBool allowRedundantAssignment) +{ + if ((pOpen->type == type) && allowRedundantAssignment) { + return TRUE; + } + + if (pOpen->type != NvKmsPerOpenTypeUndefined) { + return FALSE; + } + + switch (type) { + case NvKmsPerOpenTypeIoctl: + nvListInit(&pOpen->ioctl.eventList); + + if (!nvEvoInitApiHandles(&pOpen->ioctl.devHandles, NV_MAX_DEVICES)) { + return FALSE; + } + + if (!nvEvoInitApiHandles(&pOpen->ioctl.frameLockHandles, 4)) { + nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); + return FALSE; + } + + nvListAppend(&pOpen->perOpenIoctlListEntry, &perOpenIoctlList); + break; + + case NvKmsPerOpenTypeGrantSurface: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeGrantSwapGroup: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeGrantPermissions: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeUnicastEvent: + /* Nothing to do, here. */ + break; + + case NvKmsPerOpenTypeUndefined: + nvAssert(!"unexpected NvKmsPerOpenType"); + break; + } + + pOpen->type = type; + return TRUE; +} + +/*! + * Allocate the specified device. + */ +static NvBool AllocDevice(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsAllocDeviceParams *pParams = pParamsVoid; + NVDevEvoPtr pDevEvo; + struct NvKmsPerOpenDev *pOpenDev; + NvU32 disp, head; + NvU8 layer; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + if (nvkms_strcmp(pParams->request.versionString, NV_VERSION_STRING) != 0) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_VERSION_MISMATCH; + return FALSE; + } + + /* + * It is an error to call NVKMS_IOCTL_ALLOC_DEVICE multiple times + * on the same device with the same fd. + */ + if (DeviceIdAlreadyPresent(pOpen, pParams->request.deviceId)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; + } + + pDevEvo = nvFindDevEvoByDeviceId(pParams->request.deviceId); + + if (pDevEvo == NULL) { + pDevEvo = nvAllocDevEvo(&pParams->request, &pParams->reply.status); + if (pDevEvo == NULL) { + return FALSE; + } + } else { + if (!pParams->request.tryInferSliMosaicFromExistingDevice && + (pDevEvo->sli.mosaic != pParams->request.sliMosaic)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; + } + + if (pDevEvo->usesTegraDevice && + (pParams->request.deviceId != NVKMS_DEVICE_ID_TEGRA)) { + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_BAD_REQUEST; + return FALSE; + } + pDevEvo->allocRefCnt++; + } + + pOpenDev = nvAllocPerOpenDev(pOpen, pDevEvo, FALSE /* isPrivileged */); + + if (pOpenDev == NULL) { + nvFreeDevEvo(pDevEvo); + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_FATAL_ERROR; + return FALSE; + } + + /* Beyond this point, the function cannot fail. */ + + if (pParams->request.enableConsoleHotplugHandling) { + pDevEvo->handleConsoleHotplugs = TRUE; + } + + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + pParams->reply.subDeviceMask = + NV_TWO_N_MINUS_ONE(pDevEvo->numSubDevices); + pParams->reply.numHeads = pDevEvo->numHeads; + pParams->reply.numDisps = pDevEvo->nDispEvo; + + ct_assert(ARRAY_LEN(pParams->reply.dispHandles) == + ARRAY_LEN(pOpenDev->disp)); + + for (disp = 0; disp < ARRAY_LEN(pParams->reply.dispHandles); disp++) { + pParams->reply.dispHandles[disp] = pOpenDev->disp[disp].nvKmsApiHandle; + } + + pParams->reply.inputLutAppliesToBase = pDevEvo->caps.inputLutAppliesToBase; + + ct_assert(ARRAY_LEN(pParams->reply.layerCaps) == + ARRAY_LEN(pDevEvo->caps.layerCaps)); + + for (head = 0; head < pDevEvo->numHeads; head++) { + pParams->reply.numLayers[head] = pDevEvo->head[head].numLayers; + } + + for (layer = 0; + layer < ARRAY_LEN(pParams->reply.layerCaps); + layer++) { + pParams->reply.layerCaps[layer] = pDevEvo->caps.layerCaps[layer]; + } + + pParams->reply.surfaceAlignment = NV_EVO_SURFACE_ALIGNMENT; + pParams->reply.requiresVrrSemaphores = !pDevEvo->hal->caps.supportsDisplayRate; + + pParams->reply.nIsoSurfacesInVidmemOnly = + !!NV5070_CTRL_SYSTEM_GET_CAP(pDevEvo->capsBits, + NV5070_CTRL_SYSTEM_CAPS_BUG_644815_DNISO_VIDMEM_ONLY); + + pParams->reply.requiresAllAllocationsInSysmem = + pDevEvo->requiresAllAllocationsInSysmem; + pParams->reply.supportsHeadSurface = pDevEvo->isHeadSurfaceSupported; + + pParams->reply.validNIsoFormatMask = pDevEvo->caps.validNIsoFormatMask; + + pParams->reply.maxWidthInBytes = pDevEvo->caps.maxWidthInBytes; + pParams->reply.maxWidthInPixels = pDevEvo->caps.maxWidthInPixels; + pParams->reply.maxHeightInPixels = pDevEvo->caps.maxHeight; + pParams->reply.cursorCompositionCaps = pDevEvo->caps.cursorCompositionCaps; + pParams->reply.genericPageKind = pDevEvo->caps.genericPageKind; + + pParams->reply.maxCursorSize = pDevEvo->cursorHal->caps.maxSize; + + pParams->reply.validLayerRRTransforms = pDevEvo->caps.validLayerRRTransforms; + + pParams->reply.isoIOCoherencyModes = pDevEvo->isoIOCoherencyModes; + pParams->reply.nisoIOCoherencyModes = pDevEvo->nisoIOCoherencyModes; + + pParams->reply.supportsSyncpts = pDevEvo->supportsSyncpts; + + pParams->reply.supportsIndependentAcqRelSemaphore = + pDevEvo->hal->caps.supportsIndependentAcqRelSemaphore; + + pParams->reply.supportsVblankSyncObjects = + pDevEvo->hal->caps.supportsVblankSyncObjects; + + pParams->reply.status = NVKMS_ALLOC_DEVICE_STATUS_SUCCESS; + + return TRUE; +} + +static void UnregisterDeferredRequestFifos(struct NvKmsPerOpenDev *pOpenDev) +{ + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsGenericHandle handle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo, + handle) { + + nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); + + nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, + pDeferredRequestFifo); + } +} + +/* + * Forward declaration since this function is used by + * DisableRemainingVblankSyncObjects(). + */ +static void DisableAndCleanVblankSyncObject(struct NvKmsPerOpenDisp *pOpenDisp, + NvU32 head, + NVVblankSyncObjectRec *pVblankSyncObject, + NVEvoUpdateState *pUpdateState, + NvKmsVblankSyncObjectHandle handle); + +static void DisableRemainingVblankSyncObjects(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) +{ + struct NvKmsPerOpenDisp *pOpenDisp; + NvKmsGenericHandle disp; + NVVblankSyncObjectRec *pVblankSyncObject; + NvKmsVblankSyncObjectHandle handle; + NvU32 head = 0; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pOpenDev == NULL) { + return; + } + + /* For each pOpenDisp: */ + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->dispHandles, + pOpenDisp, disp) { + /* + * A single update state can handle changes across multiple heads on a + * given Disp. + */ + NVEvoUpdateState updateState = { }; + + /* For each head: */ + for (head = 0; head < ARRAY_LEN(pOpenDisp->vblankSyncObjectHandles); head++) { + NVEvoApiHandlesRec *pHandles = + &pOpenDisp->vblankSyncObjectHandles[head]; + + /* For each still-active vblank sync object: */ + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(pHandles, + pVblankSyncObject, handle) { + DisableAndCleanVblankSyncObject(pOpenDisp, head, + pVblankSyncObject, + &updateState, + handle); + } + } + + if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { + /* + * Instruct hardware to execute the staged commands from the + * ConfigureVblankSyncObject() calls (inherent in + * DisableAndCleanVblankSyncObject()) above. This will set up + * and wait for a notification that the hardware execution + * has completed. + */ + nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, + TRUE); + } + } +} + +static void FreeDeviceReference(struct NvKmsPerOpen *pOpen, + struct NvKmsPerOpenDev *pOpenDev) +{ + /* Disable all client-owned vblank sync objects that still exist. */ + DisableRemainingVblankSyncObjects(pOpen, pOpenDev); + + UnregisterDeferredRequestFifos(pOpenDev); + + nvEvoFreeClientSurfaces(pOpenDev->pDevEvo, pOpenDev, + &pOpenDev->surfaceHandles); + + if (!nvFreeDevEvo(pOpenDev->pDevEvo)) { + // If this pOpenDev is the modeset owner, implicitly release it. Does + // nothing if this pOpenDev is not the modeset owner. + // + // If nvFreeDevEvo() freed the device, then it also implicitly released + // ownership. + ReleaseModesetOwnership(pOpenDev); + + nvAssert(pOpenDev->pDevEvo->modesetOwner != pOpenDev); + } + + nvFreePerOpenDev(pOpen, pOpenDev); +} + +/*! + * Free the specified device. + */ +static NvBool FreeDevice(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsFreeDeviceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + FreeDeviceReference(pOpen, pOpenDev); + + return TRUE; +} + + +/*! + * Get the disp data. This information should remain static for the + * lifetime of the disp. + */ +static NvBool QueryDisp(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDispParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + const NVEvoSubDeviceRec *pSubDevice; + NVDispEvoPtr pDispEvo; + NvU32 connector; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pDispEvo = pOpenDisp->pDispEvo; + + pParams->reply.displayOwner = pDispEvo->displayOwner; + pParams->reply.subDeviceMask = nvDispSubDevMaskEvo(pDispEvo); + // Don't include dynamic displays in validDpys. The data returned here is + // supposed to be static for the lifetime of the pDispEvo. + pParams->reply.validDpys = + nvDpyIdListMinusDpyIdList(pDispEvo->validDisplays, + pDispEvo->dynamicDpyIds); + pParams->reply.bootDpys = pDispEvo->bootDisplays; + pParams->reply.muxDpys = pDispEvo->muxDisplays; + pParams->reply.frameLockHandle = pOpenDisp->frameLockHandle; + pParams->reply.numConnectors = nvListCount(&pDispEvo->connectorList); + + ct_assert(ARRAY_LEN(pParams->reply.connectorHandles) == + ARRAY_LEN(pOpenDisp->connector)); + + for (connector = 0; connector < ARRAY_LEN(pParams->reply.connectorHandles); + connector++) { + pParams->reply.connectorHandles[connector] = + pOpenDisp->connector[connector].nvKmsApiHandle; + } + + pSubDevice = pDispEvo->pDevEvo->pSubDevices[pDispEvo->displayOwner]; + if (pSubDevice != NULL) { + ct_assert(sizeof(pParams->reply.gpuString) >= + sizeof(pSubDevice->gpuString)); + nvkms_memcpy(pParams->reply.gpuString, pSubDevice->gpuString, + sizeof(pSubDevice->gpuString)); + } + + return TRUE; +} + + +/*! + * Get the connector static data. This information should remain static for the + * lifetime of the connector. + */ +static NvBool QueryConnectorStaticData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryConnectorStaticDataParams *pParams = pParamsVoid; + struct NvKmsPerOpenConnector *pOpenConnector; + NVConnectorEvoPtr pConnectorEvo; + + pOpenConnector = GetPerOpenConnector(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.connectorHandle); + if (pOpenConnector == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pConnectorEvo = pOpenConnector->pConnectorEvo; + + pParams->reply.dpyId = pConnectorEvo->displayId; + pParams->reply.isDP = nvConnectorUsesDPLib(pConnectorEvo) || + nvConnectorIsDPSerializer(pConnectorEvo); + pParams->reply.legacyTypeIndex = pConnectorEvo->legacyTypeIndex; + pParams->reply.type = pConnectorEvo->type; + pParams->reply.typeIndex = pConnectorEvo->typeIndex; + pParams->reply.signalFormat = pConnectorEvo->signalFormat; + pParams->reply.physicalIndex = pConnectorEvo->physicalIndex; + pParams->reply.physicalLocation = pConnectorEvo->physicalLocation; + pParams->reply.headMask = pConnectorEvo->validHeadMask; + + pParams->reply.isLvds = + (pConnectorEvo->or.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + (pConnectorEvo->or.protocol == + NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM); + + pParams->reply.locationOnChip = (pConnectorEvo->or.location == + NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP); + return TRUE; +} + + +/*! + * Get the connector dynamic data. This information should reflects changes to + * the connector over time (e.g. for DisplayPort MST devices). + */ +static NvBool QueryConnectorDynamicData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryConnectorDynamicDataParams *pParams = pParamsVoid; + struct NvKmsPerOpenConnector *pOpenConnector; + NVConnectorEvoPtr pConnectorEvo; + NVDispEvoPtr pDispEvo; + NVDpyEvoPtr pDpyEvo; + + pOpenConnector = GetPerOpenConnector(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.connectorHandle); + if (pOpenConnector == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pConnectorEvo = pOpenConnector->pConnectorEvo; + pDispEvo = pConnectorEvo->pDispEvo; + + if (nvConnectorUsesDPLib(pConnectorEvo)) { + pParams->reply.detectComplete = pConnectorEvo->detectComplete; + } else { + pParams->reply.detectComplete = TRUE; + } + + // Find the dynamic dpys on this connector. + pParams->reply.dynamicDpyIdList = nvEmptyDpyIdList(); + FOR_ALL_EVO_DPYS(pDpyEvo, pDispEvo->dynamicDpyIds, pDispEvo) { + if (pDpyEvo->pConnectorEvo == pConnectorEvo) { + pParams->reply.dynamicDpyIdList = + nvAddDpyIdToDpyIdList(pDpyEvo->id, + pParams->reply.dynamicDpyIdList); + } + } + + return TRUE; +} + + +/*! + * Get the static data for the specified dpy. This information should + * remain static for the lifetime of the dpy. + */ +static NvBool QueryDpyStaticData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDpyStaticDataParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pParams->reply.connectorHandle = + ConnectorEvoToConnectorHandle(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pDpyEvo->pConnectorEvo); + /* + * All pConnectorEvos should have corresponding pOpenConnectors, + * so we should always be able to find the NvKmsConnectorHandle. + */ + nvAssert(pParams->reply.connectorHandle != 0); + + pParams->reply.type = pDpyEvo->pConnectorEvo->legacyType; + + if (pDpyEvo->dp.addressString != NULL) { + const size_t len = nvkms_strlen(pDpyEvo->dp.addressString) + 1; + nvkms_memcpy(pParams->reply.dpAddress, pDpyEvo->dp.addressString, + NV_MIN(sizeof(pParams->reply.dpAddress), len)); + pParams->reply.dpAddress[sizeof(pParams->reply.dpAddress) - 1] = '\0'; + } + + pParams->reply.mobileInternal = pDpyEvo->internal; + pParams->reply.isDpMST = nvDpyEvoIsDPMST(pDpyEvo); + + return TRUE; +} + + +/*! + * Get the dynamic data for the specified dpy. This information can + * change when a hotplug occurs. + */ +static NvBool QueryDpyDynamicData(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDpyDynamicDataParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvDpyGetDynamicData(pDpyEvo, pParams); +} + +/* Store a copy of the user's infoString pointer, so we can copy out to it when + * we're done. */ +struct InfoStringExtraUserStateCommon +{ + NvU64 userInfoString; +}; + +/* + * Allocate a kernel buffer to populate the infoString which will be copied out + * to userspace upon completion. + */ +static NvBool InfoStringPrepUserCommon( + NvU32 infoStringSize, + NvU64 *ppInfoString, + struct InfoStringExtraUserStateCommon *pExtra) +{ + char *kernelInfoString = NULL; + + if (infoStringSize == 0) { + *ppInfoString = 0; + return TRUE; + } + + if (!nvKmsNvU64AddressIsSafe(*ppInfoString)) { + return FALSE; + } + + if (infoStringSize > NVKMS_MODE_VALIDATION_MAX_INFO_STRING_LENGTH) { + return FALSE; + } + + kernelInfoString = nvCalloc(1, infoStringSize); + if (kernelInfoString == NULL) { + return FALSE; + } + + pExtra->userInfoString = *ppInfoString; + *ppInfoString = nvKmsPointerToNvU64(kernelInfoString); + + return TRUE; +} + +/* + * Copy the infoString out to userspace and free the kernel-internal buffer. + */ +static NvBool InfoStringDoneUserCommon( + NvU32 infoStringSize, + NvU64 pInfoString, + NvU32 *infoStringLenWritten, + struct InfoStringExtraUserStateCommon *pExtra) +{ + char *kernelInfoString = nvKmsNvU64ToPointer(pInfoString); + int status; + NvBool ret; + + if ((infoStringSize == 0) || (*infoStringLenWritten == 0)) { + ret = TRUE; + goto done; + } + + nvAssert(*infoStringLenWritten <= infoStringSize); + + status = nvkms_copyout(pExtra->userInfoString, + kernelInfoString, + *infoStringLenWritten); + if (status == 0) { + ret = TRUE; + } else { + ret = FALSE; + *infoStringLenWritten = 0; + } + +done: + nvFree(kernelInfoString); + + return ret; +} + +struct NvKmsValidateModeIndexExtraUserState +{ + struct InfoStringExtraUserStateCommon common; +}; + +static NvBool ValidateModeIndexPrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringPrepUserCommon( + pParams->request.infoStringSize, + &pParams->request.pInfoString, + &pExtra->common); +} + +static NvBool ValidateModeIndexDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + struct NvKmsValidateModeIndexExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringDoneUserCommon( + pParams->request.infoStringSize, + pParams->request.pInfoString, + &pParams->reply.infoStringLenWritten, + &pExtra->common); +} + +/*! + * Validate the requested mode. + */ +static NvBool ValidateModeIndex(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsValidateModeIndexParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + nvValidateModeIndex(pDpyEvo, &pParams->request, &pParams->reply); + + return TRUE; +} + +struct NvKmsValidateModeExtraUserState +{ + struct InfoStringExtraUserStateCommon common; +}; + +static NvBool ValidateModePrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeParams *pParams = pParamsVoid; + struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringPrepUserCommon( + pParams->request.infoStringSize, + &pParams->request.pInfoString, + &pExtra->common); +} + +static NvBool ValidateModeDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsValidateModeParams *pParams = pParamsVoid; + struct NvKmsValidateModeExtraUserState *pExtra = pExtraUserStateVoid; + + return InfoStringDoneUserCommon( + pParams->request.infoStringSize, + pParams->request.pInfoString, + &pParams->reply.infoStringLenWritten, + &pExtra->common); +} + +/*! + * Validate the requested mode. + */ +static NvBool ValidateMode(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsValidateModeParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + nvValidateModeEvo(pDpyEvo, &pParams->request, &pParams->reply); + + return TRUE; +} + +static NvBool +CopyInOneLut(NvU64 pRampsUser, struct NvKmsLutRamps **ppRampsKernel) +{ + struct NvKmsLutRamps *pRampsKernel = NULL; + int status; + + if (pRampsUser == 0) { + return TRUE; + } + + if (!nvKmsNvU64AddressIsSafe(pRampsUser)) { + return FALSE; + } + + pRampsKernel = nvAlloc(sizeof(*pRampsKernel)); + if (!pRampsKernel) { + return FALSE; + } + + status = nvkms_copyin((char *)pRampsKernel, pRampsUser, + sizeof(*pRampsKernel)); + if (status != 0) { + nvFree(pRampsKernel); + return FALSE; + } + + *ppRampsKernel = pRampsKernel; + + return TRUE; +} + +static NvBool +CopyInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) +{ + struct NvKmsLutRamps *pInputRamps = NULL; + struct NvKmsLutRamps *pOutputRamps = NULL; + + if (!CopyInOneLut(pCommonLutParams->input.pRamps, &pInputRamps)) { + goto fail; + } + if (!CopyInOneLut(pCommonLutParams->output.pRamps, &pOutputRamps)) { + goto fail; + } + + pCommonLutParams->input.pRamps = nvKmsPointerToNvU64(pInputRamps); + pCommonLutParams->output.pRamps = nvKmsPointerToNvU64(pOutputRamps); + + return TRUE; + +fail: + nvFree(pInputRamps); + nvFree(pOutputRamps); + return FALSE; +} + +static void +FreeCopiedInLutParams(struct NvKmsSetLutCommonParams *pCommonLutParams) +{ + struct NvKmsLutRamps *pInputRamps = + nvKmsNvU64ToPointer(pCommonLutParams->input.pRamps); + struct NvKmsLutRamps *pOutputRamps = + nvKmsNvU64ToPointer(pCommonLutParams->output.pRamps); + + nvFree(pInputRamps); + nvFree(pOutputRamps); +} + +/* No extra user state needed for SetMode; although we lose the user pointers + * for the LUT ramps after copying them in, that's okay because we don't need + * to copy them back out again. */ +struct NvKmsSetModeExtraUserState +{ +}; + +/*! + * Copy in any data referenced by pointer for the SetMode request. Currently + * this is only the LUT ramps. + */ +static NvBool SetModePrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsSetModeRequest *pReq = &pParams->request; + NvU32 disp, head, dispFailed, headFailed; + + /* Iterate over all of the common LUT ramp pointers embedded in the SetMode + * request, and copy in each one. */ + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (head = 0; head < ARRAY_LEN(pReq->disp[disp].head); head++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[head].lut; + + if (!CopyInLutParams(pCommonLutParams)) { + /* Remember how far we got through these loops before we + * failed, so that we can undo everything up to this point. */ + dispFailed = disp; + headFailed = head; + goto fail; + } + } + } + + return TRUE; + +fail: + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (head = 0; head < ARRAY_LEN(pReq->disp[disp].head); head++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[head].lut; + + if (disp > dispFailed || + (disp == dispFailed && head >= headFailed)) { + break; + } + + FreeCopiedInLutParams(pCommonLutParams); + } + } + + return FALSE; +} + +/*! + * Free buffers allocated in SetModePrepUser. + */ +static NvBool SetModeDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsSetModeRequest *pReq = &pParams->request; + NvU32 disp, head; + + for (disp = 0; disp < ARRAY_LEN(pReq->disp); disp++) { + for (head = 0; head < ARRAY_LEN(pReq->disp[disp].head); head++) { + struct NvKmsSetLutCommonParams *pCommonLutParams = + &pReq->disp[disp].head[head].lut; + + FreeCopiedInLutParams(pCommonLutParams); + } + } + + return TRUE; +} + +/*! + * Perform a modeset on the device. + */ +static NvBool SetMode(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetModeParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + return nvSetDispModeEvo(pOpenDev->pDevEvo, pOpenDev, + &pParams->request, &pParams->reply, + FALSE /* bypassComposition */, + TRUE /* doRasterLock */); +} + +static inline NvBool nvHsIoctlSetCursorImage( + NVDispEvoPtr pDispEvo, + const struct NvKmsPerOpenDev *pOpenDevice, + const NVEvoApiHandlesRec *pOpenDevSurfaceHandles, + NvU32 head, + const struct NvKmsSetCursorImageCommonParams *pParams) +{ + return nvSetCursorImage(pDispEvo, + pOpenDevice, + pOpenDevSurfaceHandles, + head, + pParams); +} + +/*! + * Set the cursor image. + */ +static NvBool SetCursorImage(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetCursorImageParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + + if (!GetPerOpenDevAndDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + &pOpenDev, + &pOpenDisp)) { + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + return nvHsIoctlSetCursorImage(pDispEvo, + pOpenDev, + &pOpenDev->surfaceHandles, + pParams->request.head, + &pParams->request.common); +} + +static inline NvBool nvHsIoctlMoveCursor( + NVDispEvoPtr pDispEvo, + NvU32 head, + const struct NvKmsMoveCursorCommonParams *pParams) +{ + nvEvoMoveCursor(pDispEvo, head, pParams); + return TRUE; +} + +/*! + * Change the cursor position. + */ +static NvBool MoveCursor(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsMoveCursorParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + return nvHsIoctlMoveCursor(pDispEvo, + pParams->request.head, + &pParams->request.common); +} + +/* No extra user state needed for SetLut; although we lose the user pointers + * for the LUT ramps after copying them in, that's okay because we don't need + * to copy them back out again. */ +struct NvKmsSetLutExtraUserState +{ +}; + +/*! + * Copy in any data referenced by pointer for the SetLut request. Currently + * this is only the LUT ramps. + */ +static NvBool SetLutPrepUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; + + return CopyInLutParams(pCommonLutParams); +} + +/*! + * Free buffers allocated in SetLutPrepUser. + */ +static NvBool SetLutDoneUser( + void *pParamsVoid, + void *pExtraUserStateVoid) +{ + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsSetLutCommonParams *pCommonLutParams = &pParams->request.common; + + FreeCopiedInLutParams(pCommonLutParams); + + return TRUE; +} + +/*! + * Set the LUT on the specified head. + */ +static NvBool SetLut(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetLutParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + if (!nvValidateSetLutCommonParams(pDispEvo->pDevEvo, + &pParams->request.common)) { + return FALSE; + } + + nvEvoSetLut(pDispEvo, + pParams->request.head, TRUE /* kickoff */, + &pParams->request.common); + + return TRUE; +} + + +/*! + * Return whether the specified head is idle. + */ +static NvBool IdleBaseChannelCheckIdleOneHead( + NVDispEvoPtr pDispEvo, + NvU32 head) +{ + NVDevEvoPtr pDevEvo = pDispEvo->pDevEvo; + + { + NVEvoChannelPtr pMainLayerChannel = + pDevEvo->head[head].layer[NVKMS_MAIN_LAYER]; + NvBool isMethodPending = FALSE; + NvBool ret; + + ret = pDevEvo->hal->IsChannelMethodPending(pDevEvo, pMainLayerChannel, + pDispEvo->displayOwner, &isMethodPending); + return !ret || !isMethodPending; + } +} + +/*! + * Return whether all heads described in pRequest are idle. + * + * Note that we loop over all requested heads, rather than return FALSE once we + * find the first non-idle head, because checking for idle has side effects: in + * headSurface, checking for idle gives the headSurface flip queue the + * opportunity to proceed another frame. + */ +static NvBool IdleBaseChannelCheckIdle( + NVDevEvoPtr pDevEvo, + const struct NvKmsIdleBaseChannelRequest *pRequest, + struct NvKmsIdleBaseChannelReply *pReply) +{ + NvU32 head, sd; + NVDispEvoPtr pDispEvo; + NvBool allIdle = TRUE; + + FOR_ALL_EVO_DISPLAYS(pDispEvo, sd, pDevEvo) { + + for (head = 0; head < pDevEvo->numHeads; head++) { + + NvBool idle; + + if (!nvHeadIsActive(pDispEvo, head)) { + continue; + } + + if ((pRequest->subDevicesPerHead[head] & NVBIT(sd)) == 0) { + continue; + } + + idle = IdleBaseChannelCheckIdleOneHead(pDispEvo, head); + + if (!idle) { + pReply->stopSubDevicesPerHead[head] |= NVBIT(sd); + } + allIdle = allIdle && idle; + } + } + + return allIdle; +} + +/*! + * Idle all requested heads. + * + * First, wait for the heads to idle naturally. If a timeout is exceeded, then + * force the non-idle heads to idle, and record these in pReply. + */ +static NvBool IdleBaseChannelAll( + NVDevEvoPtr pDevEvo, + const struct NvKmsIdleBaseChannelRequest *pRequest, + struct NvKmsIdleBaseChannelReply *pReply) +{ + NvU64 startTime = 0; + + /* + * Each element in subDevicesPerHead[] must be large enough to hold one bit + * per subdevice. + */ + ct_assert(NVKMS_MAX_SUBDEVICES <= + (sizeof(pRequest->subDevicesPerHead[0]) * 8)); + + /* Loop until all head,sd pairs are idle, or we time out. */ + do { + const NvU32 timeout = 2000000; /* 2 seconds */ + + + /* + * Clear the pReply data, + * IdleBaseChannelCheckIdle() will fill it afresh. + */ + nvkms_memset(pReply, 0, sizeof(*pReply)); + + /* If all heads are idle, we are done. */ + if (IdleBaseChannelCheckIdle(pDevEvo, pRequest, pReply)) { + return TRUE; + } + + /* Break out of the loop if we exceed the timeout. */ + if (nvExceedsTimeoutUSec(&startTime, timeout)) { + break; + } + + /* At least one head is not idle; yield, and try again. */ + nvkms_yield(); + + } while (TRUE); + + return TRUE; +} + + +/*! + * Wait for the requested base channels to be idle, returning whether + * stopping the base channels was necessary. + */ +static NvBool IdleBaseChannel(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsIdleBaseChannelParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* Only the modesetOwner can idle base. */ + + if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { + return FALSE; + } + + return IdleBaseChannelAll(pOpenDev->pDevEvo, + &pParams->request, &pParams->reply); +} + + +static inline NvBool nvHsIoctlFlip( + NVDevEvoPtr pDevEvo, + const struct NvKmsPerOpenDev *pOpenDev, + const struct NvKmsFlipRequest *pRequest, + struct NvKmsFlipReply *pReply) +{ + return nvFlipEvo(pOpenDev->pDevEvo, + pOpenDev, + pRequest, + pReply, + FALSE /* skipUpdate */, + TRUE /* allowFlipLock */); +} + +/*! + * Flip the specified head. + */ +static NvBool Flip(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsFlipParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + return nvHsIoctlFlip(pOpenDev->pDevEvo, pOpenDev, + &pParams->request, &pParams->reply); +} + + +/*! + * Record whether this client is interested in the specified dynamic + * dpy. + */ +static NvBool DeclareDynamicDpyInterest(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + /* XXX NVKMS TODO: implement me. */ + + return TRUE; +} + + +/*! + * Register a surface with the specified per-open + device. + */ +static NvBool RegisterSurface(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsRegisterSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + /* + * Only allow userspace clients to specify memory objects by FD. + * This prevents clients from specifying (hClient, hObject) tuples that + * really belong to other clients. + */ + if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE && + !pParams->request.useFd) { + return FALSE; + } + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + nvEvoRegisterSurface(pOpenDev->pDevEvo, pOpenDev, pParams, + NvHsMapPermissionsReadOnly); + return TRUE; +} + + +/*! + * Unregister a surface from the specified per-open + device. + */ +static NvBool UnregisterSurface(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsUnregisterSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + nvEvoUnregisterSurface(pOpenDev->pDevEvo, pOpenDev, + pParams->request.surfaceHandle, + FALSE /* skipUpdate */); + return TRUE; +} + + +/*! + * Associate a surface with the NvKmsPerOpen specified by + * NvKmsGrantSurfaceParams::request::fd. + */ +static NvBool GrantSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsGrantSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSurfaceEvoPtr pSurfaceEvo; + struct NvKmsPerOpen *pOpenFd; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pSurfaceEvo = + nvEvoGetSurfaceFromHandleNoCtxDmaOk(pOpenDev->pDevEvo, + &pOpenDev->surfaceHandles, + pParams->request.surfaceHandle); + if (pSurfaceEvo == NULL) { + return FALSE; + } + + if (nvEvoSurfaceRefCntsTooLarge(pSurfaceEvo)) { + return FALSE; + } + + /* Only the owner of the surface can grant it to other clients. */ + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, + pParams->request.surfaceHandle)) { + return FALSE; + } + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (!AssignNvKmsPerOpenType( + pOpenFd, NvKmsPerOpenTypeGrantSurface, FALSE)) { + return FALSE; + } + + nvEvoIncrementSurfaceStructRefCnt(pSurfaceEvo); + pOpenFd->grantSurface.pSurfaceEvo = pSurfaceEvo; + + return TRUE; +} + + +/*! + * Retrieve the surface and device associated with + * NvKmsAcquireSurfaceParams::request::fd, and give the client an + * NvKmsSurfaceHandle to the surface. + */ +static NvBool AcquireSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsAcquireSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpen *pOpenFd; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsSurfaceHandle surfaceHandle = 0; + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (pOpenFd->type != NvKmsPerOpenTypeGrantSurface) { + return FALSE; + } + + nvAssert(pOpenFd->grantSurface.pSurfaceEvo != NULL); + + if (pOpenFd->grantSurface.pSurfaceEvo->rmRefCnt == 0) { /* orphan */ + return FALSE; + } + + if (nvEvoSurfaceRefCntsTooLarge(pOpenFd->grantSurface.pSurfaceEvo)) { + return FALSE; + } + + /* Since the surface isn't orphaned, it should have an owner, with a + * pOpenDev and a pDevEvo. Get the pOpenDev for the acquiring client that + * matches the owner's pDevEvo. */ + nvAssert(pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo != NULL); + pOpenDev = DevEvoToOpenDev(pOpen, + pOpenFd->grantSurface.pSurfaceEvo->owner.pOpenDev->pDevEvo); + + if (pOpenDev == NULL) { + return FALSE; + } + + surfaceHandle = + nvEvoCreateApiHandle(&pOpenDev->surfaceHandles, + pOpenFd->grantSurface.pSurfaceEvo); + + if (surfaceHandle == 0) { + return FALSE; + } + + nvEvoIncrementSurfaceStructRefCnt(pOpenFd->grantSurface.pSurfaceEvo); + + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + pParams->reply.surfaceHandle = surfaceHandle; + + return TRUE; +} + +static NvBool ReleaseSurface(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsReleaseSurfaceParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + nvEvoReleaseSurface(pOpenDev->pDevEvo, pOpenDev, + pParams->request.surfaceHandle); + return TRUE; +} + + +/*! + * Change the value of the specified attribute. + */ +static NvBool SetDpyAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetDpyAttributeParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvSetDpyAttributeEvo(pDpyEvo, pParams); +} + + +/*! + * Get the value of the specified attribute. + */ +static NvBool GetDpyAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDpyAttributeParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvGetDpyAttributeEvo(pDpyEvo, pParams); +} + + +/*! + * Get the valid values of the specified attribute. + */ +static NvBool GetDpyAttributeValidValues(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDpyAttributeValidValuesParams *pParams = pParamsVoid; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + pParams->request.dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + return nvGetDpyAttributeValidValuesEvo(pDpyEvo, pParams); +} + + +/*! + * Set the value of the specified attribute. + */ +static NvBool SetDispAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetDispAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + return nvSetDispAttributeEvo(pOpenDisp->pDispEvo, pParams); +} + + +/*! + * Get the value of the specified attribute. + */ +static NvBool GetDispAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDispAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + return nvGetDispAttributeEvo(pOpenDisp->pDispEvo, pParams); +} + + +/*! + * Get the valid values of the specified attribute. + */ +static NvBool GetDispAttributeValidValues(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetDispAttributeValidValuesParams *pParams = pParamsVoid; + + struct NvKmsPerOpenDisp *pOpenDisp; + + pOpenDisp = GetPerOpenDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + return FALSE; + } + + return nvGetDispAttributeValidValuesEvo(pOpenDisp->pDispEvo, pParams); +} + + +/*! + * Get information about the specified framelock device. + */ +static NvBool QueryFrameLock(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryFrameLockParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + const NVFrameLockEvoRec *pFrameLockEvo; + NvU32 gpu; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + ct_assert(ARRAY_LEN(pFrameLockEvo->gpuIds) <= + ARRAY_LEN(pParams->reply.gpuIds)); + + for (gpu = 0; gpu < pFrameLockEvo->nGpuIds; gpu++) { + pParams->reply.gpuIds[gpu] = pFrameLockEvo->gpuIds[gpu]; + } + + return TRUE; +} + + +static NvBool SetFrameLockAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetFrameLockAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + NVFrameLockEvoRec *pFrameLockEvo; + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + return nvSetFrameLockAttributeEvo(pFrameLockEvo, pParams); +} + + +static NvBool GetFrameLockAttribute(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetFrameLockAttributeParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + const NVFrameLockEvoRec *pFrameLockEvo; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + return nvGetFrameLockAttributeEvo(pFrameLockEvo, pParams); +} + + +static NvBool GetFrameLockAttributeValidValues(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetFrameLockAttributeValidValuesParams *pParams = pParamsVoid; + struct NvKmsPerOpenFrameLock *pOpenFrameLock; + const NVFrameLockEvoRec *pFrameLockEvo; + + nvkms_memset(&pParams->reply, 0, sizeof(pParams->reply)); + + pOpenFrameLock = + GetPerOpenFrameLock(pOpen, pParams->request.frameLockHandle); + + if (pOpenFrameLock == NULL) { + return FALSE; + } + + pFrameLockEvo = pOpenFrameLock->pFrameLockEvo; + + return nvGetFrameLockAttributeValidValuesEvo(pFrameLockEvo, pParams); +} + + +/*! + * Pop the next event off of the client's event queue. + */ +static NvBool GetNextEvent(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetNextEventParams *pParams = pParamsVoid; + struct NvKmsPerOpenEventListEntry *pEntry; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (nvListIsEmpty(&pOpen->ioctl.eventList)) { + pParams->reply.valid = FALSE; + return TRUE; + } + + pEntry = nvListFirstEntry(&pOpen->ioctl.eventList, + struct NvKmsPerOpenEventListEntry, + eventListEntry); + + pParams->reply.valid = TRUE; + pParams->reply.event = pEntry->event; + + nvListDel(&pEntry->eventListEntry); + + nvFree(pEntry); + + if (nvListIsEmpty(&pOpen->ioctl.eventList)) { + nvkms_event_queue_changed(pOpen->pOpenKernel, FALSE); + } + + return TRUE; +} + + +/*! + * Record the client's event interest for the specified device. + */ +static NvBool DeclareEventInterest(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsDeclareEventInterestParams *pParams = pParamsVoid; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + pOpen->ioctl.eventInterestMask = pParams->request.interestMask; + + return TRUE; +} + +static NvBool ClearUnicastEvent(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsClearUnicastEventParams *pParams = pParamsVoid; + struct NvKmsPerOpen *pOpenFd = NULL; + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + pOpenFd = nvkms_get_per_open_data(pParams->request.unicastEventFd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (pOpenFd->type != NvKmsPerOpenTypeUnicastEvent) { + return FALSE; + } + + nvkms_event_queue_changed(pOpenFd->pOpenKernel, FALSE); + + return TRUE; +} + +static NvBool SetLayerPosition(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSetLayerPositionParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* XXX NVKMS HEADSURFACE TODO: intercept */ + + return nvLayerSetPositionEvo(pOpenDev->pDevEvo, &pParams->request); +} + +static NvBool GrabOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsGrabOwnershipParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + // The only kind of ownership right now is modeset ownership. + return GrabModesetOwnership(pOpenDev); +} + +static NvBool ReleaseOwnership(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsReleaseOwnershipParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + // The only kind of ownership right now is modeset ownership. + return ReleaseModesetOwnership(pOpenDev); +} + +static NvBool GrantPermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsGrantPermissionsParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpen *pOpenFd; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* Only the modesetOwner can grant permissions. */ + + if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { + return FALSE; + } + + if (!ValidateNvKmsPermissions(pOpenDev->pDevEvo, + &pParams->request.permissions)) { + return FALSE; + } + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (!AssignNvKmsPerOpenType( + pOpenFd, NvKmsPerOpenTypeGrantPermissions, FALSE)) { + return FALSE; + } + + pOpenFd->grantPermissions.permissions = pParams->request.permissions; + + pOpenFd->grantPermissions.pDevEvo = pOpenDev->pDevEvo; + + return TRUE; +} + +static NvBool AcquirePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsAcquirePermissionsParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpen *pOpenFd; + const struct NvKmsPermissions *pPermissionsNew; + enum NvKmsPermissionsType type; + + pOpenFd = nvkms_get_per_open_data(pParams->request.fd); + + if (pOpenFd == NULL) { + return FALSE; + } + + if (pOpenFd->type != NvKmsPerOpenTypeGrantPermissions) { + return FALSE; + } + + pOpenDev = DevEvoToOpenDev(pOpen, pOpenFd->grantPermissions.pDevEvo); + + if (pOpenDev == NULL) { + return FALSE; + } + + type = pOpenFd->grantPermissions.permissions.type; + + pPermissionsNew = &pOpenFd->grantPermissions.permissions; + + if (type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pOpenDev->flipPermissions.disp); d++) { + for (h = 0; h < ARRAY_LEN(pOpenDev->flipPermissions. + disp[d].head); h++) { + pOpenDev->flipPermissions.disp[d].head[h].layerMask |= + pPermissionsNew->flip.disp[d].head[h].layerMask; + } + } + + pParams->reply.permissions.flip = pOpenDev->flipPermissions; + + } else if (type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pOpenDev->modesetPermissions.disp); d++) { + for (h = 0; h < ARRAY_LEN(pOpenDev->modesetPermissions. + disp[d].head); h++) { + pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList = + nvAddDpyIdListToDpyIdList( + pOpenDev->modesetPermissions.disp[d].head[h].dpyIdList, + pPermissionsNew->modeset.disp[d].head[h].dpyIdList); + } + } + + pParams->reply.permissions.modeset = pOpenDev->modesetPermissions; + + } else { + /* + * GrantPermissions() should ensure that + * pOpenFd->grantPermissions.permissions.type is always valid. + */ + nvAssert(!"AcquirePermissions validation failure"); + return FALSE; + } + + pParams->reply.permissions.type = type; + pParams->reply.deviceHandle = pOpenDev->nvKmsApiHandle; + + return TRUE; +} + +static NvBool RevokePermissions(struct NvKmsPerOpen *pOpen, void *pParamsVoid) +{ + struct NvKmsRevokePermissionsParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev = + GetPerOpenDev(pOpen, pParams->request.deviceHandle); + const NvU32 validBitmask = + NVBIT(NV_KMS_PERMISSIONS_TYPE_FLIPPING) | + NVBIT(NV_KMS_PERMISSIONS_TYPE_MODESET); + + if (pOpenDev == NULL) { + return FALSE; + } + + /* Only the modeset owner can revoke permissions. */ + if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { + return FALSE; + } + + /* Reject invalid bitmasks. */ + + if ((pParams->request.permissionsTypeBitmask & ~validBitmask) != 0) { + return FALSE; + } + + /* Revoke permissions for everyone except the caller. */ + + RevokePermissionsInternal(pParams->request.permissionsTypeBitmask, + pOpenDev->pDevEvo, + pOpenDev /* pOpenDevExclude */); + return TRUE; +} + +static NvBool RegisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsRegisterDeferredRequestFifoParams *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + NVSurfaceEvoPtr pSurfaceEvo; + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsDeferredRequestFifoHandle handle; + + pOpenDev = GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pSurfaceEvo = nvEvoGetSurfaceFromHandleNoCtxDmaOk( + pOpenDev->pDevEvo, + &pOpenDev->surfaceHandles, + pParams->request.surfaceHandle); + + if (pSurfaceEvo == NULL) { + return FALSE; + } + + /* + * WAR Bug 2050970: If a surface is unregistered and it wasn't registered + * with NvKmsRegisterSurfaceRequest::noDisplayHardwareAccess, then the call + * to nvRMSyncEvoChannel() in nvEvoDecrementSurfaceRefCnts() may hang + * if any flips in flight acquire on semaphore releases that haven't + * occurred yet. + * + * Since a ctxdma is not necessary for the deferred request fifo surface, + * we work around this by forcing all surfaces that will be registered as + * a deferred request fifo to be registered with + * noDisplayHardwareAccess==TRUE, then skip the idle in + * nvEvoDecrementSurfaceRefCnts() for these surfaces. + */ + if (pSurfaceEvo->requireCtxDma) { + return FALSE; + } + + pDeferredRequestFifo = + nvEvoRegisterDeferredRequestFifo(pOpenDev->pDevEvo, pSurfaceEvo); + + if (pDeferredRequestFifo == NULL) { + return FALSE; + } + + handle = nvEvoCreateApiHandle(&pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo); + + if (handle == 0) { + nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, + pDeferredRequestFifo); + return FALSE; + } + + pParams->reply.deferredRequestFifoHandle = handle; + + return TRUE; +} + +static NvBool UnregisterDeferredRequestFifo(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsUnregisterDeferredRequestFifoParams *pParams = pParamsVoid; + NvKmsDeferredRequestFifoHandle handle = + pParams->request.deferredRequestFifoHandle; + NVDeferredRequestFifoRec *pDeferredRequestFifo; + struct NvKmsPerOpenDev *pOpenDev = + GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + pDeferredRequestFifo = + nvEvoGetPointerFromApiHandle( + &pOpenDev->deferredRequestFifoHandles, handle); + + if (pDeferredRequestFifo == NULL) { + return FALSE; + } + + nvEvoDestroyApiHandle(&pOpenDev->deferredRequestFifoHandles, handle); + + nvEvoUnregisterDeferredRequestFifo(pOpenDev->pDevEvo, pDeferredRequestFifo); + + return TRUE; +} + +/*! + * Get the CRC32 data for the specified dpy. + */ +static NvBool QueryDpyCRC32(struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsQueryDpyCRC32Params *pParams = pParamsVoid; + struct NvKmsPerOpenDev *pOpenDev; + struct NvKmsPerOpenDisp *pOpenDisp; + NVDispEvoPtr pDispEvo; + CRC32NotifierCrcOut crcOut; + + if (!GetPerOpenDevAndDisp(pOpen, + pParams->request.deviceHandle, + pParams->request.dispHandle, + &pOpenDev, + &pOpenDisp)) { + return FALSE; + } + + if (pOpenDev->pDevEvo->modesetOwner != pOpenDev) { + // Only the current owner can query CRC32 values. + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + + if (!nvHeadIsActive(pDispEvo, pParams->request.head)) { + return FALSE; + } + + nvkms_memset(&(pParams->reply), 0, sizeof(pParams->reply)); + + // Since will only read 1 frame of CRCs, point to single reply struct vals + crcOut.rasterGeneratorCrc32 = &(pParams->reply.rasterGeneratorCrc32); + crcOut.compositorCrc32 = &(pParams->reply.compositorCrc32); + crcOut.outputCrc32 = &(pParams->reply.outputCrc32); + + if (!nvReadCRC32Evo(pDispEvo, pParams->request.head, &crcOut)) { + return FALSE; + } + + return TRUE; +} + +static NvBool SwitchMux( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsSwitchMuxParams *pParams = pParamsVoid; + const struct NvKmsSwitchMuxRequest *r = &pParams->request; + NVDpyEvoPtr pDpyEvo; + NVDevEvoPtr pDevEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + if (pDevEvo->modesetOwner != GetPerOpenDev(pOpen, r->deviceHandle)) { + return FALSE; + } + + switch (pParams->request.operation) { + case NVKMS_SWITCH_MUX_PRE: + return nvRmMuxPre(pDpyEvo, r->state); + case NVKMS_SWITCH_MUX: + return nvRmMuxSwitch(pDpyEvo, r->state); + case NVKMS_SWITCH_MUX_POST: + return nvRmMuxPost(pDpyEvo, r->state); + default: + return FALSE; + } +} + +static NvBool GetMuxState( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsGetMuxStateParams *pParams = pParamsVoid; + const struct NvKmsGetMuxStateRequest *r = &pParams->request; + NVDpyEvoPtr pDpyEvo; + + pDpyEvo = GetPerOpenDpy(pOpen, r->deviceHandle, r->dispHandle, r->dpyId); + if (pDpyEvo == NULL) { + return FALSE; + } + + pParams->reply.state = nvRmMuxState(pDpyEvo); + + return pParams->reply.state != MUX_STATE_GET; +} + +static NvBool ExportVrrSemaphoreSurface( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsExportVrrSemaphoreSurfaceParams *pParams = pParamsVoid; + const struct NvKmsExportVrrSemaphoreSurfaceRequest *req = &pParams->request; + const struct NvKmsPerOpenDev *pOpenDev = + GetPerOpenDev(pOpen, pParams->request.deviceHandle); + + if (pOpenDev == NULL) { + return FALSE; + } + + return nvExportVrrSemaphoreSurface(pOpenDev->pDevEvo, req->memFd); +} + +static NvBool EnableVblankSyncObject( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsEnableVblankSyncObjectParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp* pOpenDisp = NULL; + NVDispHeadStateEvoRec *pHeadState = NULL; + NVDevEvoPtr pDevEvo = NULL; + NvKmsVblankSyncObjectHandle vblankHandle = 0; + int freeVblankSyncObjectIdx = 0; + NvU32 head = pParams->request.head; + NVVblankSyncObjectRec *vblankSyncObjects = NULL; + NVDispEvoPtr pDispEvo = NULL; + NVEvoUpdateState updateState = { }; + + /* Obtain the Head State. */ + pOpenDisp = GetPerOpenDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle); + if (pOpenDisp == NULL) { + nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp."); + return FALSE; + } + + pDispEvo = pOpenDisp->pDispEvo; + pDevEvo = pDispEvo->pDevEvo; + + /* Ensure Vblank Sync Object API is supported on this chip. */ + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not " + "supported on this chip."); + return FALSE; + } + + /* Validate requested head because it comes from user input. */ + if (head >= ARRAY_LEN(pDispEvo->headState)) { + nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", head); + return FALSE; + } + pHeadState = &pDispEvo->headState[head]; + vblankSyncObjects = pHeadState->vblankSyncObjects; + pDevEvo = pDispEvo->pDevEvo; + + /* + * Find the available sync object. Sync Objects with handle=0 are not in + * use. + */ + for (freeVblankSyncObjectIdx = 0; + freeVblankSyncObjectIdx < pHeadState->numVblankSyncObjectsCreated; + freeVblankSyncObjectIdx++) { + if (!vblankSyncObjects[freeVblankSyncObjectIdx].inUse) { + break; + } + } + if (freeVblankSyncObjectIdx == pHeadState->numVblankSyncObjectsCreated) { + return FALSE; + } + + /* Save the created vblank handle if it is valid. */ + vblankHandle = + nvEvoCreateApiHandle(&pOpenDisp->vblankSyncObjectHandles[head], + &vblankSyncObjects[freeVblankSyncObjectIdx]); + if (vblankHandle == 0) { + nvEvoLogDebug(EVO_LOG_ERROR, "Unable to create vblank handle."); + return FALSE; + } + + if (nvHeadIsActive(pDispEvo, head)) { + /* + * Instruct the hardware to enable a semaphore corresponding to this + * syncpt. The Update State will be populated. + */ + pDevEvo->hal->ConfigureVblankSyncObject( + pDevEvo, + pHeadState->timings.rasterBlankStart.y, + head, + freeVblankSyncObjectIdx, + vblankSyncObjects[freeVblankSyncObjectIdx].evoSyncpt.hCtxDma, + &updateState); + + /* + * Instruct hardware to execute the staged commands from the + * ConfigureVblankSyncObject() call above. This will set up and wait for a + * notification that the hardware execution actually completed. + */ + nvEvoUpdateAndKickOff(pDispEvo, TRUE, &updateState, TRUE); + + vblankSyncObjects[freeVblankSyncObjectIdx].enabled = TRUE; + } + + /* Populate the vblankSyncObjects array. */ + vblankSyncObjects[freeVblankSyncObjectIdx].inUse = TRUE; + + /* Populate the reply field. */ + pParams->reply.vblankHandle = vblankHandle; + /* Note: the syncpt ID is NOT the same as the vblank handle. */ + pParams->reply.syncptId = + pHeadState->vblankSyncObjects[freeVblankSyncObjectIdx].evoSyncpt.id; + + return TRUE; +} + +static void DisableAndCleanVblankSyncObject(struct NvKmsPerOpenDisp *pOpenDisp, + NvU32 head, + NVVblankSyncObjectRec *pVblankSyncObject, + NVEvoUpdateState *pUpdateState, + NvKmsVblankSyncObjectHandle handle) +{ + NVDispEvoPtr pDispEvo = pOpenDisp->pDispEvo; + + if (nvHeadIsActive(pDispEvo, head)) { + /* + * Instruct the hardware to disable the semaphore corresponding to this + * syncpt. The Update State will be populated. + * + * Note: Using dummy zero value for rasterLine because the disable + * codepath in ConfigureVblankSyncObject() does not use that argument. + */ + pDispEvo->pDevEvo->hal->ConfigureVblankSyncObject(pDispEvo->pDevEvo, + 0, /* rasterLine */ + head, + pVblankSyncObject->index, + 0, /* hCtxDma */ + pUpdateState); + /* + * Note: it is the caller's responsibility to call + * nvEvoUpdateAndKickOff(). + */ + } + + pVblankSyncObject->inUse = FALSE; + pVblankSyncObject->enabled = FALSE; + + /* Remove the handle from the map. */ + nvEvoDestroyApiHandle(&pOpenDisp->vblankSyncObjectHandles[head], handle); +} + +static NvBool DisableVblankSyncObject( + struct NvKmsPerOpen *pOpen, + void *pParamsVoid) +{ + struct NvKmsDisableVblankSyncObjectParams *pParams = pParamsVoid; + struct NvKmsPerOpenDisp* pOpenDisp = + GetPerOpenDisp(pOpen, pParams->request.deviceHandle, + pParams->request.dispHandle); + NVVblankSyncObjectRec *pVblankSyncObject = NULL; + NvU32 head = pParams->request.head; + NVDevEvoPtr pDevEvo = NULL; + NVEvoUpdateState updateState = { }; + + if (pOpenDisp == NULL) { + nvEvoLogDebug(EVO_LOG_ERROR, "Unable to GetPerOpenDisp."); + return FALSE; + } + + pDevEvo = pOpenDisp->pDispEvo->pDevEvo; + + /* Ensure Vblank Sync Object API is supported on this chip. */ + if (!pDevEvo->supportsSyncpts || + !pDevEvo->hal->caps.supportsVblankSyncObjects) { + nvEvoLogDebug(EVO_LOG_ERROR, "Vblank Sync Object functionality is not " + "supported on this chip."); + return FALSE; + } + + /* Validate requested head because it comes from user input. */ + if (head >= ARRAY_LEN(pOpenDisp->pDispEvo->headState)) { + nvEvoLogDebug(EVO_LOG_ERROR, "Invalid head requested, head=%d.", head); + return FALSE; + } + + /* Mark the indicated object as free. */ + pVblankSyncObject = + nvEvoGetPointerFromApiHandle(&pOpenDisp->vblankSyncObjectHandles[head], + pParams->request.vblankHandle); + if (pVblankSyncObject == NULL) { + nvEvoLogDebug(EVO_LOG_ERROR, "unable to find object with provided " + "handle."); + return FALSE; + } + + DisableAndCleanVblankSyncObject(pOpenDisp, head, pVblankSyncObject, + &updateState, pParams->request.vblankHandle); + + if (!nvIsUpdateStateEmpty(pOpenDisp->pDispEvo->pDevEvo, &updateState)) { + /* + * Instruct hardware to execute the staged commands from the + * ConfigureVblankSyncObject() call inside of the + * DisableAndCleanVblankSyncObject() call above. This will set up and + * wait for a notification that the hardware execution has completed. + */ + nvEvoUpdateAndKickOff(pOpenDisp->pDispEvo, TRUE, &updateState, TRUE); + } + + return TRUE; +} + +/*! + * Perform the ioctl operation requested by the client. + * + * \param[in,out] pOpenVoid The per-open data, allocated by + * nvKmsOpen(). + * \param[in] cmdOpaque The NVKMS_IOCTL_ operation to perform. + * \param[in,out] paramsAddress A pointer, in the client process's + * address space, to the parameter + * structure. This is cmd-specific. + * \param[in] paramSize The client-specified size of the params. + * + * \return Return TRUE if the ioctl operation was successfully + * performed. Otherwise, return FALSE. + */ +NvBool nvKmsIoctl( + void *pOpenVoid, + const NvU32 cmdOpaque, + const NvU64 paramsAddress, + const size_t paramSize) +{ + static const struct { + + NvBool (*proc)(struct NvKmsPerOpen *pOpen, void *pParamsVoid); + NvBool (*prepUser)(void *pParamsVoid, void *pExtraStateVoid); + NvBool (*doneUser)(void *pParamsVoid, void *pExtraStateVoid); + const size_t paramSize; + /* Size of extra state tracked for user parameters */ + const size_t extraSize; + + const size_t requestSize; + const size_t requestOffset; + + const size_t replySize; + const size_t replyOffset; + + } dispatch[] = { + +#define _ENTRY_WITH_USER(_cmd, _func, _prepUser, _doneUser, _extraSize) \ + [_cmd] = { \ + .proc = _func, \ + .prepUser = _prepUser, \ + .doneUser = _doneUser, \ + .paramSize = sizeof(struct NvKms##_func##Params), \ + .requestSize = sizeof(struct NvKms##_func##Request), \ + .requestOffset = offsetof(struct NvKms##_func##Params, request), \ + .replySize = sizeof(struct NvKms##_func##Reply), \ + .replyOffset = offsetof(struct NvKms##_func##Params, reply), \ + .extraSize = _extraSize, \ + } + +#define ENTRY(_cmd, _func) \ + _ENTRY_WITH_USER(_cmd, _func, NULL, NULL, 0) + +#define ENTRY_CUSTOM_USER(_cmd, _func) \ + _ENTRY_WITH_USER(_cmd, _func, \ + _func##PrepUser, _func##DoneUser, \ + sizeof(struct NvKms##_func##ExtraUserState)) + + ENTRY(NVKMS_IOCTL_ALLOC_DEVICE, AllocDevice), + ENTRY(NVKMS_IOCTL_FREE_DEVICE, FreeDevice), + ENTRY(NVKMS_IOCTL_QUERY_DISP, QueryDisp), + ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_STATIC_DATA, QueryConnectorStaticData), + ENTRY(NVKMS_IOCTL_QUERY_CONNECTOR_DYNAMIC_DATA, QueryConnectorDynamicData), + ENTRY(NVKMS_IOCTL_QUERY_DPY_STATIC_DATA, QueryDpyStaticData), + ENTRY(NVKMS_IOCTL_QUERY_DPY_DYNAMIC_DATA, QueryDpyDynamicData), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE_INDEX, ValidateModeIndex), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_VALIDATE_MODE, ValidateMode), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_MODE, SetMode), + ENTRY(NVKMS_IOCTL_SET_CURSOR_IMAGE, SetCursorImage), + ENTRY(NVKMS_IOCTL_MOVE_CURSOR, MoveCursor), + ENTRY_CUSTOM_USER(NVKMS_IOCTL_SET_LUT, SetLut), + ENTRY(NVKMS_IOCTL_IDLE_BASE_CHANNEL, IdleBaseChannel), + ENTRY(NVKMS_IOCTL_FLIP, Flip), + ENTRY(NVKMS_IOCTL_DECLARE_DYNAMIC_DPY_INTEREST, + DeclareDynamicDpyInterest), + ENTRY(NVKMS_IOCTL_REGISTER_SURFACE, RegisterSurface), + ENTRY(NVKMS_IOCTL_UNREGISTER_SURFACE, UnregisterSurface), + ENTRY(NVKMS_IOCTL_GRANT_SURFACE, GrantSurface), + ENTRY(NVKMS_IOCTL_ACQUIRE_SURFACE, AcquireSurface), + ENTRY(NVKMS_IOCTL_RELEASE_SURFACE, ReleaseSurface), + ENTRY(NVKMS_IOCTL_SET_DPY_ATTRIBUTE, SetDpyAttribute), + ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE, GetDpyAttribute), + ENTRY(NVKMS_IOCTL_GET_DPY_ATTRIBUTE_VALID_VALUES, + GetDpyAttributeValidValues), + ENTRY(NVKMS_IOCTL_SET_DISP_ATTRIBUTE, SetDispAttribute), + ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE, GetDispAttribute), + ENTRY(NVKMS_IOCTL_GET_DISP_ATTRIBUTE_VALID_VALUES, + GetDispAttributeValidValues), + ENTRY(NVKMS_IOCTL_QUERY_FRAMELOCK, QueryFrameLock), + ENTRY(NVKMS_IOCTL_SET_FRAMELOCK_ATTRIBUTE, SetFrameLockAttribute), + ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE, GetFrameLockAttribute), + ENTRY(NVKMS_IOCTL_GET_FRAMELOCK_ATTRIBUTE_VALID_VALUES, + GetFrameLockAttributeValidValues), + ENTRY(NVKMS_IOCTL_GET_NEXT_EVENT, GetNextEvent), + ENTRY(NVKMS_IOCTL_DECLARE_EVENT_INTEREST, DeclareEventInterest), + ENTRY(NVKMS_IOCTL_CLEAR_UNICAST_EVENT, ClearUnicastEvent), + ENTRY(NVKMS_IOCTL_SET_LAYER_POSITION, SetLayerPosition), + ENTRY(NVKMS_IOCTL_GRAB_OWNERSHIP, GrabOwnership), + ENTRY(NVKMS_IOCTL_RELEASE_OWNERSHIP, ReleaseOwnership), + ENTRY(NVKMS_IOCTL_GRANT_PERMISSIONS, GrantPermissions), + ENTRY(NVKMS_IOCTL_ACQUIRE_PERMISSIONS, AcquirePermissions), + ENTRY(NVKMS_IOCTL_REVOKE_PERMISSIONS, RevokePermissions), + ENTRY(NVKMS_IOCTL_QUERY_DPY_CRC32, QueryDpyCRC32), + ENTRY(NVKMS_IOCTL_REGISTER_DEFERRED_REQUEST_FIFO, + RegisterDeferredRequestFifo), + ENTRY(NVKMS_IOCTL_UNREGISTER_DEFERRED_REQUEST_FIFO, + UnregisterDeferredRequestFifo), + ENTRY(NVKMS_IOCTL_SWITCH_MUX, SwitchMux), + ENTRY(NVKMS_IOCTL_GET_MUX_STATE, GetMuxState), + ENTRY(NVKMS_IOCTL_EXPORT_VRR_SEMAPHORE_SURFACE, ExportVrrSemaphoreSurface), + ENTRY(NVKMS_IOCTL_ENABLE_VBLANK_SYNC_OBJECT, EnableVblankSyncObject), + ENTRY(NVKMS_IOCTL_DISABLE_VBLANK_SYNC_OBJECT, DisableVblankSyncObject), + }; + + struct NvKmsPerOpen *pOpen = pOpenVoid; + void *pParamsKernelPointer; + NvBool ret; + enum NvKmsIoctlCommand cmd = cmdOpaque; + void *pExtraUserState = NULL; + + if (!AssignNvKmsPerOpenType(pOpen, NvKmsPerOpenTypeIoctl, TRUE)) { + return FALSE; + } + + if (cmd >= ARRAY_LEN(dispatch)) { + return FALSE; + } + + if (dispatch[cmd].proc == NULL) { + return FALSE; + } + + if (paramSize != dispatch[cmd].paramSize) { + return FALSE; + } + + if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { + pParamsKernelPointer = nvCalloc(1, paramSize + dispatch[cmd].extraSize); + if (pParamsKernelPointer == NULL) { + return FALSE; + } + + if (dispatch[cmd].requestSize > 0) { + int status = + nvkms_copyin((char *) pParamsKernelPointer + + dispatch[cmd].requestOffset, + paramsAddress + dispatch[cmd].requestOffset, + dispatch[cmd].requestSize); + if (status != 0) { + nvFree(pParamsKernelPointer); + return FALSE; + } + } + + if (dispatch[cmd].prepUser) { + pExtraUserState = (char *)pParamsKernelPointer + paramSize; + + if (!dispatch[cmd].prepUser(pParamsKernelPointer, + pExtraUserState)) { + nvFree(pParamsKernelPointer); + return FALSE; + } + } + } else { + pParamsKernelPointer = nvKmsNvU64ToPointer(paramsAddress); + } + + ret = dispatch[cmd].proc(pOpen, pParamsKernelPointer); + + if (pOpen->clientType == NVKMS_CLIENT_USER_SPACE) { + + if (dispatch[cmd].doneUser) { + pExtraUserState = (char *)pParamsKernelPointer + paramSize; + + if (!dispatch[cmd].doneUser(pParamsKernelPointer, + pExtraUserState)) { + ret = FALSE; + } + } + + if (dispatch[cmd].replySize > 0) { + int status = + nvkms_copyout(paramsAddress + dispatch[cmd].replyOffset, + (char *) pParamsKernelPointer + + dispatch[cmd].replyOffset, + dispatch[cmd].replySize); + if (status != 0) { + ret = FALSE; + } + } + + nvFree(pParamsKernelPointer); + } + + return ret; +} + + +/*! + * Close callback. + * + * \param[in,out] pOpenVoid The per-open data, allocated by nvKmsOpen(). + */ +void nvKmsClose(void *pOpenVoid) +{ + struct NvKmsPerOpen *pOpen = pOpenVoid; + + if (pOpen == NULL) { + return; + } + + /* + * First remove the pOpen from global tracking. Otherwise, assertions can + * fail in the free paths below -- the assertions check that the object + * being freed is not tracked by any pOpen. + */ + nvListDel(&pOpen->perOpenListEntry); + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + struct NvKmsPerOpenEventListEntry *pEntry, *pEntryTmp; + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + FreeDeviceReference(pOpen, pOpenDev); + } + + nvEvoDestroyApiHandles(&pOpen->ioctl.frameLockHandles); + + nvEvoDestroyApiHandles(&pOpen->ioctl.devHandles); + + nvListForEachEntry_safe(pEntry, pEntryTmp, + &pOpen->ioctl.eventList, eventListEntry) { + nvListDel(&pEntry->eventListEntry); + nvFree(pEntry); + } + + nvListDel(&pOpen->perOpenIoctlListEntry); + } + + if (pOpen->type == NvKmsPerOpenTypeGrantSurface) { + nvAssert(pOpen->grantSurface.pSurfaceEvo != NULL); + nvEvoDecrementSurfaceStructRefCnt(pOpen->grantSurface.pSurfaceEvo); + } + + if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) { + nvAssert(pOpen->grantSwapGroup.pSwapGroup != NULL); + } + + if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) { + nvRemoveUnicastEvent(pOpen); + } + + nvFree(pOpen); +} + + +/*! + * Open callback. + * + * Allocate, initialize, and return an opaque pointer to an NvKmsPerOpen. + * + * \return If successful, return an NvKmsPerOpen pointer. Otherwise, + * return NULL. + */ +void *nvKmsOpen( + NvU32 pid, + enum NvKmsClientType clientType, + nvkms_per_open_handle_t *pOpenKernel) +{ + struct NvKmsPerOpen *pOpen = nvCalloc(1, sizeof(*pOpen)); + + if (pOpen == NULL) { + goto fail; + } + + pOpen->pid = pid; + pOpen->clientType = clientType; + pOpen->type = NvKmsPerOpenTypeUndefined; + pOpen->pOpenKernel = pOpenKernel; + + nvListAppend(&pOpen->perOpenListEntry, &perOpenList); + + return pOpen; + +fail: + nvKmsClose(pOpen); + return NULL; +} + +extern const char *const pNV_KMS_ID; + +#if NVKMS_PROCFS_ENABLE + +static const char *ProcFsPerOpenTypeString( + enum NvKmsPerOpenType type) +{ + switch (type) { + case NvKmsPerOpenTypeIoctl: return "ioctl"; + case NvKmsPerOpenTypeGrantSurface: return "grantSurface"; + case NvKmsPerOpenTypeGrantSwapGroup: return "grantSwapGroup"; + case NvKmsPerOpenTypeGrantPermissions: return "grantPermissions"; + case NvKmsPerOpenTypeUnicastEvent: return "unicastEvent"; + case NvKmsPerOpenTypeUndefined: return "undefined"; + } + + return "unknown"; +} + +static const char *ProcFsPerOpenClientTypeString( + enum NvKmsClientType clientType) +{ + switch (clientType) { + case NVKMS_CLIENT_USER_SPACE: return "user-space"; + case NVKMS_CLIENT_KERNEL_SPACE: return "kernel-space"; + } + + return "unknown"; +} + +static const char *ProcFsPermissionsTypeString( + enum NvKmsPermissionsType permissionsType) +{ + switch (permissionsType) { + case NV_KMS_PERMISSIONS_TYPE_FLIPPING: return "flipping"; + case NV_KMS_PERMISSIONS_TYPE_MODESET: return "modeset"; + } + + return "unknown"; +} + +static void +ProcFsPrintClients( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + struct NvKmsPerOpen *pOpen; + NVEvoInfoStringRec infoString; + + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + + const char *extra = ""; + + nvInitInfoString(&infoString, buffer, size); + + if (pOpen == nvEvoGlobal.nvKmsPerOpen) { + extra = " (NVKMS-internal client)"; + } + + nvEvoLogInfoString(&infoString, + "Client (pOpen) : %p", pOpen); + nvEvoLogInfoString(&infoString, + " pid : %d%s", pOpen->pid, extra); + nvEvoLogInfoString(&infoString, + " clientType : %s", + ProcFsPerOpenClientTypeString(pOpen->clientType)); + nvEvoLogInfoString(&infoString, + " type : %s", + ProcFsPerOpenTypeString(pOpen->type)); + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + + NvKmsGenericHandle deviceHandle; + struct NvKmsPerOpenDev *pOpenDev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, deviceHandle) { + NVDevEvoPtr pDevEvo = pOpenDev->pDevEvo; + + nvEvoLogInfoString(&infoString, + " pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId, pDevEvo); + nvEvoLogInfoString(&infoString, + " NvKmsDeviceHandle : %d", deviceHandle); + } + + } else if (pOpen->type == NvKmsPerOpenTypeGrantSurface) { + + NVSurfaceEvoPtr pSurfaceEvo = pOpen->grantSurface.pSurfaceEvo; + + nvEvoLogInfoString(&infoString, + " pSurfaceEvo : %p", pSurfaceEvo); + + } else if (pOpen->type == NvKmsPerOpenTypeGrantPermissions) { + + NVDevEvoPtr pDevEvo = pOpen->grantPermissions.pDevEvo; + const struct NvKmsPermissions *pPerms = + &pOpen->grantPermissions.permissions; + + nvEvoLogInfoString(&infoString, + " pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId, pDevEvo); + + nvEvoLogInfoString(&infoString, + " PermissionsType : %s", + ProcFsPermissionsTypeString(pPerms->type)); + + if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_FLIPPING) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) { + + const NvU8 layerMask = + pPerms->flip.disp[d].head[h].layerMask; + + if (layerMask == 0) { + continue; + } + + nvEvoLogInfoString(&infoString, + " disp:%02d, head:%02d : 0x%08x", d, h, + layerMask); + } + } + } else if (pPerms->type == NV_KMS_PERMISSIONS_TYPE_MODESET) { + NvU32 d, h; + + for (d = 0; d < ARRAY_LEN(pPerms->flip.disp); d++) { + for (h = 0; h < ARRAY_LEN(pPerms->flip.disp[d].head); h++) { + + NVDpyIdList dpyIdList = + pPerms->modeset.disp[d].head[h].dpyIdList; + NVDispEvoPtr pDispEvo; + char *dpys; + + if (nvDpyIdListIsEmpty(dpyIdList)) { + continue; + } + + pDispEvo = pDevEvo->pDispEvo[d]; + + dpys = nvGetDpyIdListStringEvo(pDispEvo, dpyIdList); + + if (dpys == NULL) { + continue; + } + + nvEvoLogInfoString(&infoString, + " disp:%02d, head:%02d : %s", d, h, dpys); + + nvFree(dpys); + } + } + } + } else if (pOpen->type == NvKmsPerOpenTypeGrantSwapGroup) { + + NVDevEvoPtr pDevEvo = pOpen->grantSwapGroup.pDevEvo; + + nvEvoLogInfoString(&infoString, + " pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId, pDevEvo); + nvEvoLogInfoString(&infoString, + " pSwapGroup : %p", + pOpen->grantSwapGroup.pSwapGroup); + + } else if (pOpen->type == NvKmsPerOpenTypeUnicastEvent) { + + nvEvoLogInfoString(&infoString, + " pDeferredRequestFifo : %p", + pOpen->unicastEvent.pDeferredRequestFifo); + } + + nvEvoLogInfoString(&infoString, ""); + outString(data, buffer); + } +} + +static void PrintSurfacePlanes( + NVEvoInfoStringRec *pInfoString, + const NVSurfaceEvoRec *pSurfaceEvo) +{ + NvU8 planeIndex; + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + nvEvoLogInfoString(pInfoString, + "plane[%u] disp ctxDma:0x%08x pitch:%u offset:%" NvU64_fmtu + " rmObjectSizeInBytes:%" NvU64_fmtu, + planeIndex, + pSurfaceEvo->planes[planeIndex].ctxDma, + pSurfaceEvo->planes[planeIndex].pitch, + pSurfaceEvo->planes[planeIndex].offset, + pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes); + } +} + +static void PrintSurfaceClients( + NVEvoInfoStringRec *pInfoString, + const NVSurfaceEvoRec *pSurfaceEvo, + const NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + NvKmsGenericHandle deviceHandle; + struct NvKmsPerOpenDev *pOpenDev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, deviceHandle) { + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pTmpSurfaceEvo; + + if (pOpenDev->pDevEvo != pDevEvo) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pTmpSurfaceEvo, surfaceHandle) { + if (pTmpSurfaceEvo != pSurfaceEvo) { + continue; + } + + nvEvoLogInfoString(pInfoString, + " pOpen : %p", pOpen); + nvEvoLogInfoString(pInfoString, + " pOpenDev : %p", pOpenDev); + nvEvoLogInfoString(pInfoString, + " NvKmsSurfaceHandle : %d", surfaceHandle); + } + } + } +} + +static void PrintSurface( + NVEvoInfoStringRec *pInfoString, + const NVSurfaceEvoRec *pSurfaceEvo, + const NVDevEvoRec *pDevEvo) +{ + NvU32 sd; + + nvEvoLogInfoString(pInfoString, + "pSurfaceEvo : %p", pSurfaceEvo); + nvEvoLogInfoString(pInfoString, + " pDevEvo (deviceId:%02d) : %p", pDevEvo->deviceId, pDevEvo); + nvEvoLogInfoString(pInfoString, + " owner : " + "pOpenDev:%p, NvKmsSurfaceHandle:%d", + pSurfaceEvo->owner.pOpenDev, + pSurfaceEvo->owner.surfaceHandle); + nvEvoLogInfoString(pInfoString, + " {width,height}InPixels : %d x %d", + pSurfaceEvo->widthInPixels, + pSurfaceEvo->heightInPixels); + nvEvoLogInfoString(pInfoString, + " misc : " + "log2GobsPerBlockY:%d", + pSurfaceEvo->log2GobsPerBlockY); + nvEvoLogInfoString(pInfoString, + " memory : layout:%s format:%s", + NvKmsSurfaceMemoryLayoutToString(pSurfaceEvo->layout), + nvKmsSurfaceMemoryFormatToString(pSurfaceEvo->format)); + nvEvoLogInfoString(pInfoString, + " refCnts : " + "rmRefCnt:%" NvU64_fmtx" structRefCnt:%" NvU64_fmtx, + pSurfaceEvo->rmRefCnt, + pSurfaceEvo->structRefCnt); + + PrintSurfacePlanes(pInfoString, pSurfaceEvo); + + nvEvoLogInfoString(pInfoString, + " clients :"); + + PrintSurfaceClients(pInfoString, pSurfaceEvo, pDevEvo); + + for (sd = 0; sd < pDevEvo->numSubDevices; sd++) { + if (pSurfaceEvo->cpuAddress[sd] != NULL) { + nvEvoLogInfoString(pInfoString, + " cpuAddress[%02d] : %p", + sd, pSurfaceEvo->cpuAddress[sd]); + } + } + + nvEvoLogInfoString(pInfoString, ""); +} + +static void +ProcFsPrintSurfaces( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + struct NvKmsPerOpen *pOpen; + NVEvoInfoStringRec infoString; + NvU32 i; + + for (i = 0; i < 2; i++) { + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + NvKmsGenericHandle deviceHandle; + struct NvKmsPerOpenDev *pOpenDev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, deviceHandle) { + + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvo, + surfaceHandle) { + /* + * Because clients can grant surfaces between each + * other, a pSurfaceEvo could be in multiple clients' + * lists. So, we loop over all surfaces on all clients + * twice: the first time we print unique surfaces and set + * 'procFsFlag' to recognize duplicates. The second time, + * we clear 'procFsFlag'. + */ + if (i == 0) { + if (pSurfaceEvo->procFsFlag) { + continue; + } + + nvInitInfoString(&infoString, buffer, size); + PrintSurface(&infoString, pSurfaceEvo, + pOpenDev->pDevEvo); + outString(data, buffer); + + pSurfaceEvo->procFsFlag = TRUE; + } else { + pSurfaceEvo->procFsFlag = FALSE; + } + } + } + } + } +} + +static const char *SwapGroupPerEyeStereoString(const NvU32 request) +{ + const NvU32 value = + DRF_VAL(KMS, _DEFERRED_REQUEST, + _SWAP_GROUP_READY_PER_EYE_STEREO, request); + + switch (value) { + + case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_PAIR: + return "PerPair"; + case NVKMS_DEFERRED_REQUEST_SWAP_GROUP_READY_PER_EYE_STEREO_PER_EYE: + return "PerEye"; + } + + return "Unknown"; +} + +static void ProcFsPrintOneDeferredRequestFifo( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString, + const NVDeferredRequestFifoRec *pDeferredRequestFifo, + const struct NvKmsPerOpen *pOpen, + const struct NvKmsPerOpenDev *pOpenDev, + const NvKmsDeferredRequestFifoHandle deferredRequestFifoHandle) +{ + NVEvoInfoStringRec infoString; + + const struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo; + NvU32 i, prevI; + + nvInitInfoString(&infoString, buffer, size); + + nvEvoLogInfoString(&infoString, + "pDeferredRequestFifo : %p", pDeferredRequestFifo); + + nvEvoLogInfoString(&infoString, + " Client (pOpen) : %p", pOpen); + + nvEvoLogInfoString(&infoString, + " pOpenDev : %p", pOpenDev); + + nvEvoLogInfoString(&infoString, + " pSurfaceEvo : %p", pDeferredRequestFifo->pSurfaceEvo); + + nvEvoLogInfoString(&infoString, + " NvKms...RequestFifoHandle : %d", deferredRequestFifoHandle); + + if (pDeferredRequestFifo->swapGroup.pSwapGroup != NULL) { + + nvEvoLogInfoString(&infoString, + " swapGroup :"); + nvEvoLogInfoString(&infoString, + " pSwapGroup : %p", + pDeferredRequestFifo->swapGroup.pSwapGroup); + nvEvoLogInfoString(&infoString, + " pOpenUnicastEvent : %p", + pDeferredRequestFifo->swapGroup.pOpenUnicastEvent); + nvEvoLogInfoString(&infoString, + " ready : %d", + pDeferredRequestFifo->swapGroup.ready); + nvEvoLogInfoString(&infoString, + " semaphoreIndex : 0x%02x", + pDeferredRequestFifo->swapGroup.semaphoreIndex); + } + + nvEvoLogInfoString(&infoString, + " put : %d", fifo->put); + + nvEvoLogInfoString(&infoString, + " get : %d", fifo->get); + + outString(data, buffer); + + for (i = 0; i < ARRAY_LEN(fifo->request); i++) { + + const NvU32 request = fifo->request[i]; + const NvU32 opcode = DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request); + const NvU32 semaphoreIndex = + DRF_VAL(KMS, _DEFERRED_REQUEST, _SEMAPHORE_INDEX, request); + + switch (opcode) { + + case NVKMS_DEFERRED_REQUEST_OPCODE_NOP: + break; + + case NVKMS_DEFERRED_REQUEST_OPCODE_SWAP_GROUP_READY: + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " request[0x%02x] : " + "opcode:SWAP_GROUP_READY, semaphoreIndex:0x%02x, " + "perEyeStereo:%s", + i, semaphoreIndex, + SwapGroupPerEyeStereoString(request)); + outString(data, buffer); + break; + + default: + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " request[0x%02x] : opcode:INVALID", i); + outString(data, buffer); + break; + } + } + + /* + * Print the fifo->semaphore[] array, but collapse multiple lines with + * duplicate values. + * + * To collapse duplicates, loop over all semaphore[] elements. If the + * current element is the same as semaphore[prev], continue. If they + * differ, print the value in semaphore[prev .. i-1], and update prev. + */ + prevI = 0; + + for (i = 1; i <= ARRAY_LEN(fifo->semaphore); i++) { + + const NvU32 prevValue = fifo->semaphore[prevI].data[0]; + + if (i != ARRAY_LEN(fifo->semaphore)) { + const NvU32 currentValue = fifo->semaphore[i].data[0]; + + /* + * If the value in this element matches the previous element, don't + * print anything, yet. + */ + if (currentValue == prevValue) { + continue; + } + } + + nvInitInfoString(&infoString, buffer, size); + + if (prevI == (i - 1)) { + nvEvoLogInfoString(&infoString, + " semaphore[0x%02x] : 0x%08x", + prevI, prevValue); + } else { + nvEvoLogInfoString(&infoString, + " semaphore[0x%02x..0x%02x] : 0x%08x", + prevI, i - 1, prevValue); + } + + outString(data, buffer); + + prevI = i; + } + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, ""); + outString(data, buffer); +} + +static void +ProcFsPrintDeferredRequestFifos( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle devHandle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES( + &pOpen->ioctl.devHandles, + pOpenDev, devHandle) { + + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsGenericHandle fifoHandle; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES( + &pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo, fifoHandle) { + + ProcFsPrintOneDeferredRequestFifo( + data, buffer, size, outString, + pDeferredRequestFifo, + pOpen, + pOpenDev, + fifoHandle); + } + } + } +} + +static void +ProcFsPrintDpyCrcs( + void *data, + char *buffer, + size_t size, + nvkms_procfs_out_string_func_t *outString) +{ + NVDevEvoPtr pDevEvo; + NVDispEvoPtr pDispEvo; + NvU32 dispIndex, head; + NVEvoInfoStringRec infoString; + + FOR_ALL_EVO_DEVS(pDevEvo) { + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + "pDevEvo (deviceId:%02d) : %p", + pDevEvo->deviceId, pDevEvo); + outString(data, buffer); + + FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) { + + nvInitInfoString(&infoString, buffer, size); + nvEvoLogInfoString(&infoString, + " pDispEvo (dispIndex:%02d) : %p", + dispIndex, pDispEvo); + outString(data, buffer); + + for (head = 0; head < pDevEvo->numHeads; head++) { + const NVDispHeadStateEvoRec *pHeadState = + &pDispEvo->headState[head]; + struct NvKmsDpyCRC32 compCrc; + struct NvKmsDpyCRC32 rgCrc; + struct NvKmsDpyCRC32 outputCrc; + CRC32NotifierCrcOut crcOut; + crcOut.compositorCrc32 = &compCrc; + crcOut.rasterGeneratorCrc32 = &rgCrc; + crcOut.outputCrc32 = &outputCrc; + + if (pHeadState->pConnectorEvo == NULL) { + continue; + } + + nvInitInfoString(&infoString, buffer, size); + if (nvReadCRC32Evo(pDispEvo, head, &crcOut)) { + nvEvoLogInfoString(&infoString, + " head %d :", + head); + if (compCrc.supported) { + nvEvoLogInfoString(&infoString, + " compositor CRC : 0x%08x", + compCrc.value); + } else { + nvEvoLogInfoString(&infoString, + " compositor CRC : unsupported"); + } + if (rgCrc.supported) { + nvEvoLogInfoString(&infoString, + " raster generator CRC : 0x%08x", + rgCrc.value); + } else { + nvEvoLogInfoString(&infoString, + " raster generator CRC : unsupported"); + } + if (outputCrc.supported) { + nvEvoLogInfoString(&infoString, + " output CRC : 0x%08x", + outputCrc.value); + } else { + nvEvoLogInfoString(&infoString, + " output CRC : unsupported"); + } + } else { + nvEvoLogInfoString(&infoString, + " head %d : error", + head); + } + outString(data, buffer); + } + } + } +} + +#endif /* NVKMS_PROCFS_ENABLE */ + +void nvKmsGetProcFiles(const nvkms_procfs_file_t **ppProcFiles) +{ +#if NVKMS_PROCFS_ENABLE + static const nvkms_procfs_file_t procFiles[] = { + { "clients", ProcFsPrintClients }, + { "surfaces", ProcFsPrintSurfaces }, + { "deferred-request-fifos", ProcFsPrintDeferredRequestFifos }, + { "crcs", ProcFsPrintDpyCrcs }, + { NULL, NULL }, + }; + + *ppProcFiles = procFiles; +#else + *ppProcFiles = NULL; +#endif +} + +static void FreeGlobalState(void) +{ + nvKmsClose(nvEvoGlobal.nvKmsPerOpen); + nvEvoGlobal.nvKmsPerOpen = NULL; + + if (nvEvoGlobal.clientHandle != 0) { + nvRmApiFree(nvEvoGlobal.clientHandle, nvEvoGlobal.clientHandle, + nvEvoGlobal.clientHandle); + nvEvoGlobal.clientHandle = 0; + } +} + +NvBool nvKmsModuleLoad(void) +{ + NvU32 ret = NVOS_STATUS_ERROR_GENERIC; + + nvEvoLog(EVO_LOG_INFO, "Loading %s", pNV_KMS_ID); + + ret = nvRmApiAlloc(NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &nvEvoGlobal.clientHandle); + + if (ret != NVOS_STATUS_SUCCESS) { + nvEvoLog(EVO_LOG_ERROR, "Failed to initialize client"); + goto fail; + } + + nvEvoGlobal.nvKmsPerOpen = nvKmsOpen(0, NVKMS_CLIENT_KERNEL_SPACE, NULL); + if (!nvEvoGlobal.nvKmsPerOpen) { + nvEvoLog(EVO_LOG_ERROR, "Failed to initialize internal modeset client"); + goto fail; + } + + if (!AssignNvKmsPerOpenType(nvEvoGlobal.nvKmsPerOpen, + NvKmsPerOpenTypeIoctl, FALSE)) { + goto fail; + } + + return TRUE; +fail: + FreeGlobalState(); + + return FALSE; +} + + +void nvKmsModuleUnload(void) +{ + FreeGlobalState(); + + nvAssert(nvListIsEmpty(&nvEvoGlobal.frameLockList)); + nvAssert(nvListIsEmpty(&nvEvoGlobal.devList)); +#if defined(DEBUG) + nvReportUnfreedAllocations(); +#endif + nvEvoLog(EVO_LOG_INFO, "Unloading"); +} + + +static void SendEvent(struct NvKmsPerOpen *pOpen, + const struct NvKmsEvent *pEvent) +{ + struct NvKmsPerOpenEventListEntry *pEntry = nvAlloc(sizeof(*pEntry)); + + nvAssert(pOpen->type == NvKmsPerOpenTypeIoctl); + + if (pEntry == NULL) { + return; + } + + pEntry->event = *pEvent; + nvListAppend(&pEntry->eventListEntry, &pOpen->ioctl.eventList); + + nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE); +} + +static void ConsoleRestoreTimerFired(void *dataPtr, NvU32 dataU32) +{ + NVDevEvoPtr pDevEvo = dataPtr; + + if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) { + pDevEvo->skipConsoleRestore = FALSE; + nvEvoRestoreConsole(pDevEvo, TRUE /* allowMST */); + } +} + +/*! + * Generate a dpy event. + * + * \param[in] pDpyEvo The dpy for which the event should be generated. + * \param[in] eventType The NVKMS_EVENT_TYPE_ + * \param[in] attribute The NvKmsDpyAttribute; only used for + * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED. + * \param[in] NvS64 The NvKmsDpyAttribute value; only used for + * NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED. + */ +static void SendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, + const NvU32 eventType, + const enum NvKmsDpyAttribute attribute, + const NvS64 value) +{ + struct NvKmsPerOpen *pOpen; + const NVDispEvoRec *pDispEvo = pDpyEvo->pDispEvo; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsEvent event = { 0 }; + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + + if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo, + &deviceHandle, &dispHandle)) { + continue; + } + + if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { + continue; + } + + event.eventType = eventType; + + switch (eventType) { + + case NVKMS_EVENT_TYPE_DPY_CHANGED: + event.u.dpyChanged.deviceHandle = deviceHandle; + event.u.dpyChanged.dispHandle = dispHandle; + event.u.dpyChanged.dpyId = pDpyEvo->id; + break; + + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED: + event.u.dynamicDpyConnected.deviceHandle = deviceHandle; + event.u.dynamicDpyConnected.dispHandle = dispHandle; + event.u.dynamicDpyConnected.dpyId = pDpyEvo->id; + break; + + case NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED: + event.u.dynamicDpyDisconnected.deviceHandle = deviceHandle; + event.u.dynamicDpyDisconnected.dispHandle = dispHandle; + event.u.dynamicDpyDisconnected.dpyId = pDpyEvo->id; + break; + + case NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED: + event.u.dpyAttributeChanged.deviceHandle = deviceHandle; + event.u.dpyAttributeChanged.dispHandle = dispHandle; + event.u.dpyAttributeChanged.dpyId = pDpyEvo->id; + event.u.dpyAttributeChanged.attribute = attribute; + event.u.dpyAttributeChanged.value = value; + break; + + default: + nvAssert(!"Bad eventType"); + return; + } + + SendEvent(pOpen, &event); + } + + if (eventType == NVKMS_EVENT_TYPE_DPY_CHANGED) { + NVDevEvoPtr pDevEvo = pDpyEvo->pDispEvo->pDevEvo; + + if (pDevEvo->modesetOwner == NULL && pDevEvo->handleConsoleHotplugs) { + nvkms_free_timer(pDevEvo->consoleRestoreTimer); + pDevEvo->consoleRestoreTimer = + nvkms_alloc_timer(ConsoleRestoreTimerFired, pDevEvo, 0, 500); + } + } +} + +void nvSendDpyEventEvo(const NVDpyEvoRec *pDpyEvo, const NvU32 eventType) +{ + nvAssert(eventType != NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED); + SendDpyEventEvo(pDpyEvo, eventType, + 0 /* attribute (unused) */, + 0 /* value (unused) */ ); +} + +void nvSendDpyAttributeChangedEventEvo(const NVDpyEvoRec *pDpyEvo, + const enum NvKmsDpyAttribute attribute, + const NvS64 value) +{ + SendDpyEventEvo(pDpyEvo, + NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED, + attribute, value); +} + +void nvSendFrameLockAttributeChangedEventEvo( + const NVFrameLockEvoRec *pFrameLockEvo, + const enum NvKmsFrameLockAttribute attribute, + const NvS64 value) +{ + struct NvKmsPerOpen *pOpen; + const NvU32 eventType = NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsEvent event = { 0 }; + NvKmsFrameLockHandle frameLockHandle; + + if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { + continue; + } + + if (!FrameLockEvoToFrameLockHandle(pOpen, pFrameLockEvo, + &frameLockHandle)) { + continue; + } + + event.eventType = eventType; + event.u.frameLockAttributeChanged.frameLockHandle = frameLockHandle; + event.u.frameLockAttributeChanged.attribute = attribute; + event.u.frameLockAttributeChanged.value = value; + + SendEvent(pOpen, &event); + } +} + + +void nvSendFlipOccurredEventEvo( + const NVDevEvoRec *pDevEvo, + NVEvoChannelMask channelMask) +{ + struct NvKmsPerOpen *pOpen; + const NvU32 eventType = NVKMS_EVENT_TYPE_FLIP_OCCURRED; + const NvU32 dispIndex = 0; /* XXX NVKMS TODO: need disp-scope in event */ + const NVDispEvoRec *pDispEvo = pDevEvo->pDispEvo[dispIndex]; + NvU32 head, layer; + + nvAssert(NV_EVO_CHANNEL_MASK_POPCOUNT(channelMask) == 1); + + for (head = 0; head < pDevEvo->numHeads; head++) { + for (layer = 0; layer < pDevEvo->head[head].numLayers; layer++) { + if (pDevEvo->head[head].layer[layer]->channelMask == channelMask) { + break; + } + } + + if (layer < pDevEvo->head[head].numLayers) { + break; + } + } + + if (head >= pDevEvo->numHeads) { + nvAssert(!"Bad channelMask"); + return; + } + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsEvent event = { 0 }; + NvKmsDeviceHandle deviceHandle; + NvKmsDispHandle dispHandle; + + struct NvKmsPerOpenDev *pOpenDev; + const struct NvKmsFlipPermissions *pFlipPermissions; + + pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + + if (pOpenDev == NULL) { + continue; + } + + if ((pOpen->ioctl.eventInterestMask & NVBIT(eventType)) == 0) { + continue; + } + + pFlipPermissions = &pOpenDev->flipPermissions; + + if ((pFlipPermissions->disp[dispIndex].head[head].layerMask & + NVBIT(layer)) == 0x0) { + continue; + } + + if (!DispEvoToDevAndDispHandles(pOpen, pDispEvo, + &deviceHandle, &dispHandle)) { + continue; + } + + event.eventType = eventType; + event.u.flipOccurred.deviceHandle = deviceHandle; + event.u.flipOccurred.dispHandle = dispHandle; + event.u.flipOccurred.head = head; + event.u.flipOccurred.layer = layer; + + SendEvent(pOpen, &event); + } +} + +void nvSendUnicastEvent(struct NvKmsPerOpen *pOpen) +{ + if (pOpen == NULL) { + return; + } + + nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent); + + nvkms_event_queue_changed(pOpen->pOpenKernel, TRUE); +} + +void nvRemoveUnicastEvent(struct NvKmsPerOpen *pOpen) +{ + NVDeferredRequestFifoPtr pDeferredRequestFifo; + + if (pOpen == NULL) { + return; + } + + nvAssert(pOpen->type == NvKmsPerOpenTypeUnicastEvent); + + pDeferredRequestFifo = pOpen->unicastEvent.pDeferredRequestFifo; + + if (pDeferredRequestFifo != NULL) { + pDeferredRequestFifo->swapGroup.pOpenUnicastEvent = NULL; + pOpen->unicastEvent.pDeferredRequestFifo = NULL; + } +} + +static void AllocSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvo, surfaceHandle) { + + NvU8 planeIndex; + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + continue; + } + + if (!pSurfaceEvo->requireCtxDma) { + nvAssert(pSurfaceEvo->planes[0].ctxDma == 0); + continue; + } + + /* + * Orphan surfaces should not get this far: they should + * fail the owner check above. + */ + nvAssert(pSurfaceEvo->rmRefCnt > 0); + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + + pSurfaceEvo->planes[planeIndex].ctxDma = + nvRmEvoAllocateAndBindDispContextDMA( + pDevEvo, + pSurfaceEvo->planes[planeIndex].rmHandle, + pSurfaceEvo->layout, + pSurfaceEvo->planes[planeIndex].rmObjectSizeInBytes - 1); + if (!pSurfaceEvo->planes[planeIndex].ctxDma) { + FreeSurfaceCtxDmasForAllOpens(pDevEvo); + nvAssert(!"Failed to re-allocate surface ctx dma"); + return; + } + } + } + } +} + + +static void FreeSurfaceCtxDmasForAllOpens(NVDevEvoRec *pDevEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + + NvKmsGenericHandle surfaceHandle; + NVSurfaceEvoPtr pSurfaceEvo; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvo, surfaceHandle) { + + NvU8 planeIndex; + + if (!nvEvoIsSurfaceOwner(pSurfaceEvo, pOpenDev, surfaceHandle)) { + continue; + } + + /* + * Orphan surfaces should not get this far: they should + * fail the owner check above. + */ + nvAssert(pSurfaceEvo->rmRefCnt > 0); + + if (!pSurfaceEvo->requireCtxDma) { + nvAssert(pSurfaceEvo->planes[0].ctxDma == 0); + continue; + } + + FOR_ALL_VALID_PLANES(planeIndex, pSurfaceEvo) { + nvRmEvoFreeDispContextDMA( + pDevEvo, + &pSurfaceEvo->planes[planeIndex].ctxDma); + } + } + } +} + +#if defined(DEBUG) +NvBool nvSurfaceEvoInAnyOpens(const NVSurfaceEvoRec *pSurfaceEvo) +{ + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenList, perOpenListEntry) { + + if (pOpen->type == NvKmsPerOpenTypeIoctl) { + struct NvKmsPerOpenDev *pOpenDev; + NvKmsGenericHandle dev; + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpen->ioctl.devHandles, + pOpenDev, dev) { + + NvKmsGenericHandle surfaceHandleUnused; + NVSurfaceEvoPtr pSurfaceEvoTmp; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES(&pOpenDev->surfaceHandles, + pSurfaceEvoTmp, + surfaceHandleUnused) { + if (pSurfaceEvoTmp == pSurfaceEvo) { + return TRUE; + } + } + } + } else if ((pOpen->type == NvKmsPerOpenTypeGrantSurface) && + (pOpen->grantSurface.pSurfaceEvo == pSurfaceEvo)) { + return TRUE; + } + } + + return FALSE; +} +#endif + +NVDevEvoPtr nvGetDevEvoFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev) +{ + nvAssert(pOpenDev != NULL); + return pOpenDev->pDevEvo; +} + +const struct NvKmsFlipPermissions *nvGetFlipPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev) +{ + nvAssert(pOpenDev != NULL); + return &pOpenDev->flipPermissions; +} + +const struct NvKmsModesetPermissions *nvGetModesetPermissionsFromOpenDev( + const struct NvKmsPerOpenDev *pOpenDev) +{ + nvAssert(pOpenDev != NULL); + return &pOpenDev->modesetPermissions; +} + +NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDev( + struct NvKmsPerOpenDev *pOpenDev) +{ + if (pOpenDev == NULL) { + return NULL; + } + + return &pOpenDev->surfaceHandles; +} + +const NVEvoApiHandlesRec *nvGetSurfaceHandlesFromOpenDevConst( + const struct NvKmsPerOpenDev *pOpenDev) +{ + if (pOpenDev == NULL) { + return NULL; + } + + return &pOpenDev->surfaceHandles; +} + +static int suspendCounter = 0; + +/* + * Suspend NVKMS. + * + * This function is called by RM once per GPU, but NVKMS just counts the number + * of suspend calls so that it can deallocate the core channels on the first + * call to suspend(), and reallocate them on the last call to resume(). + */ +void nvKmsSuspend(NvU32 gpuId) +{ + if (suspendCounter == 0) { + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Suspending"); + + /* + * Shut down all heads and skip console restore. + * + * This works around an RM bug where it fails to train DisplayPort + * links during resume if the system was suspended while heads were + * active. + * + * XXX TODO bug 1850734: In addition to fixing the above + * RM bug, NVKMS should clear pDispEvo head and connector state + * that becomes stale after suspend. Shutting the heads down here + * clears the relevant state explicitly. + */ + nvShutDownHeads(pDevEvo, + NULL /* pTestFunc, shut down all heads */); + pDevEvo->skipConsoleRestore = TRUE; + + FreeSurfaceCtxDmasForAllOpens(pDevEvo); + + nvFreeCoreChannelEvo(pDevEvo); + } + } + + suspendCounter++; +} + +void nvKmsResume(NvU32 gpuId) +{ + suspendCounter--; + + if (suspendCounter == 0) { + NVDevEvoPtr pDevEvo; + + FOR_ALL_EVO_DEVS(pDevEvo) { + nvEvoLogDevDebug(pDevEvo, EVO_LOG_INFO, "Resuming"); + + nvRestoreSORAssigmentsEvo(pDevEvo); + + ReallocCoreChannel(pDevEvo); + + if (pDevEvo->modesetOwner == NULL) { + // Hardware state was lost, so we need to force a console + // restore. + pDevEvo->skipConsoleRestore = FALSE; + RestoreConsole(pDevEvo); + } + } + } +} + +static void ServiceOneDeferredRequestFifo( + NVDevEvoPtr pDevEvo, + NVDeferredRequestFifoRec *pDeferredRequestFifo) +{ + struct NvKmsDeferredRequestFifo *fifo = pDeferredRequestFifo->fifo; + NvU32 get, put; + + nvAssert(fifo != NULL); + + get = fifo->get; + put = fifo->put; + + if (put == get) { + return; + } + + if ((get >= ARRAY_LEN(fifo->request)) || + (put >= ARRAY_LEN(fifo->request))) { + return; + } + + while (get != put) { + + const NvU32 request = fifo->request[get]; + const NvU32 opcode = + DRF_VAL(KMS, _DEFERRED_REQUEST, _OPCODE, request); + + switch (opcode) { + + case NVKMS_DEFERRED_REQUEST_OPCODE_NOP: + break; + + default: + nvAssert(!"Invalid NVKMS deferred request opcode"); + break; + } + + get = (get + 1) % ARRAY_LEN(fifo->request); + } + + fifo->get = put; +} + +/*! + * Respond to a non-stall interrupt. + */ +void nvKmsServiceNonStallInterrupt(void *dataPtr, NvU32 dataU32) +{ + NVDevEvoPtr pDevEvo = dataPtr; + struct NvKmsPerOpen *pOpen; + + nvListForEachEntry(pOpen, &perOpenIoctlList, perOpenIoctlListEntry) { + + struct NvKmsPerOpenDev *pOpenDev = DevEvoToOpenDev(pOpen, pDevEvo); + NVDeferredRequestFifoRec *pDeferredRequestFifo; + NvKmsGenericHandle handle; + + if (pOpenDev == NULL) { + continue; + } + + FOR_ALL_POINTERS_IN_EVO_API_HANDLES( + &pOpenDev->deferredRequestFifoHandles, + pDeferredRequestFifo, + handle) { + + ServiceOneDeferredRequestFifo(pDevEvo, pDeferredRequestFifo); + } + } + +} + +NvBool nvKmsGetBacklight(NvU32 display_id, void *drv_priv, NvU32 *brightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NV_STATUS status = NV_ERR_INVALID_STATE; + NVDispEvoRec *pDispEvo = drv_priv; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = display_id; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + if (status == NV_OK) { + *brightness = params.brightness; + } + + return status == NV_OK; +} + +NvBool nvKmsSetBacklight(NvU32 display_id, void *drv_priv, NvU32 brightness) +{ + NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS params = { 0 }; + NV_STATUS status = NV_ERR_INVALID_STATE; + NVDispEvoRec *pDispEvo = drv_priv; + NVDevEvoRec *pDevEvo = pDispEvo->pDevEvo; + + params.subDeviceInstance = pDispEvo->displayOwner; + params.displayId = display_id; + params.brightness = brightness; + + status = nvRmApiControl(nvEvoGlobal.clientHandle, + pDevEvo->displayCommonHandle, + NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS, + ¶ms, sizeof(params)); + + return status == NV_OK; +} diff --git a/src/nvidia-modeset/srcs.mk b/src/nvidia-modeset/srcs.mk new file mode 100644 index 000000000..8754b4455 --- /dev/null +++ b/src/nvidia-modeset/srcs.mk @@ -0,0 +1,181 @@ +SRCS ?= +SRCS_CXX ?= + +SRCS += ../common/shared/nvstatus/nvstatus.c +SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF16UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF32UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_commonNaNToF64UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_f32UIToCommonNaN.c +SRCS += ../common/softfloat/source/8086-SSE/s_f64UIToCommonNaN.c +SRCS += ../common/softfloat/source/8086-SSE/s_propagateNaNF32UI.c +SRCS += ../common/softfloat/source/8086-SSE/s_propagateNaNF64UI.c +SRCS += ../common/softfloat/source/8086-SSE/softfloat_raiseFlags.c +SRCS += ../common/softfloat/source/f32_add.c +SRCS += ../common/softfloat/source/f32_div.c +SRCS += ../common/softfloat/source/f32_eq.c +SRCS += ../common/softfloat/source/f32_eq_signaling.c +SRCS += ../common/softfloat/source/f32_isSignalingNaN.c +SRCS += ../common/softfloat/source/f32_le.c +SRCS += ../common/softfloat/source/f32_le_quiet.c +SRCS += ../common/softfloat/source/f32_lt.c +SRCS += ../common/softfloat/source/f32_lt_quiet.c +SRCS += ../common/softfloat/source/f32_mul.c +SRCS += ../common/softfloat/source/f32_mulAdd.c +SRCS += ../common/softfloat/source/f32_rem.c +SRCS += ../common/softfloat/source/f32_roundToInt.c +SRCS += ../common/softfloat/source/f32_sqrt.c +SRCS += ../common/softfloat/source/f32_sub.c +SRCS += ../common/softfloat/source/f32_to_f16.c +SRCS += ../common/softfloat/source/f32_to_f64.c +SRCS += ../common/softfloat/source/f32_to_i32.c +SRCS += ../common/softfloat/source/f32_to_i32_r_minMag.c +SRCS += ../common/softfloat/source/f32_to_i64.c +SRCS += ../common/softfloat/source/f32_to_i64_r_minMag.c +SRCS += ../common/softfloat/source/f32_to_ui32.c +SRCS += ../common/softfloat/source/f32_to_ui32_r_minMag.c +SRCS += ../common/softfloat/source/f32_to_ui64.c +SRCS += ../common/softfloat/source/f32_to_ui64_r_minMag.c +SRCS += ../common/softfloat/source/f64_add.c +SRCS += ../common/softfloat/source/f64_div.c +SRCS += ../common/softfloat/source/f64_eq.c +SRCS += ../common/softfloat/source/f64_eq_signaling.c +SRCS += ../common/softfloat/source/f64_isSignalingNaN.c +SRCS += ../common/softfloat/source/f64_le.c +SRCS += ../common/softfloat/source/f64_le_quiet.c +SRCS += ../common/softfloat/source/f64_lt.c +SRCS += ../common/softfloat/source/f64_lt_quiet.c +SRCS += ../common/softfloat/source/f64_mul.c +SRCS += ../common/softfloat/source/f64_mulAdd.c +SRCS += ../common/softfloat/source/f64_rem.c +SRCS += ../common/softfloat/source/f64_roundToInt.c +SRCS += ../common/softfloat/source/f64_sqrt.c +SRCS += ../common/softfloat/source/f64_sub.c +SRCS += ../common/softfloat/source/f64_to_f32.c +SRCS += ../common/softfloat/source/f64_to_i32.c +SRCS += ../common/softfloat/source/f64_to_i32_r_minMag.c +SRCS += ../common/softfloat/source/f64_to_i64.c +SRCS += ../common/softfloat/source/f64_to_i64_r_minMag.c +SRCS += ../common/softfloat/source/f64_to_ui32.c +SRCS += ../common/softfloat/source/f64_to_ui32_r_minMag.c +SRCS += ../common/softfloat/source/f64_to_ui64.c +SRCS += ../common/softfloat/source/f64_to_ui64_r_minMag.c +SRCS += ../common/softfloat/source/i32_to_f32.c +SRCS += ../common/softfloat/source/i32_to_f64.c +SRCS += ../common/softfloat/source/i64_to_f32.c +SRCS += ../common/softfloat/source/i64_to_f64.c +SRCS += ../common/softfloat/source/s_addMagsF32.c +SRCS += ../common/softfloat/source/s_addMagsF64.c +SRCS += ../common/softfloat/source/s_approxRecipSqrt32_1.c +SRCS += ../common/softfloat/source/s_approxRecipSqrt_1Ks.c +SRCS += ../common/softfloat/source/s_countLeadingZeros64.c +SRCS += ../common/softfloat/source/s_countLeadingZeros8.c +SRCS += ../common/softfloat/source/s_mul64To128.c +SRCS += ../common/softfloat/source/s_mulAddF32.c +SRCS += ../common/softfloat/source/s_mulAddF64.c +SRCS += ../common/softfloat/source/s_normRoundPackToF32.c +SRCS += ../common/softfloat/source/s_normRoundPackToF64.c +SRCS += ../common/softfloat/source/s_normSubnormalF32Sig.c +SRCS += ../common/softfloat/source/s_normSubnormalF64Sig.c +SRCS += ../common/softfloat/source/s_roundPackToF16.c +SRCS += ../common/softfloat/source/s_roundPackToF32.c +SRCS += ../common/softfloat/source/s_roundPackToF64.c +SRCS += ../common/softfloat/source/s_roundToI32.c +SRCS += ../common/softfloat/source/s_roundToI64.c +SRCS += ../common/softfloat/source/s_roundToUI32.c +SRCS += ../common/softfloat/source/s_roundToUI64.c +SRCS += ../common/softfloat/source/s_shiftRightJam128.c +SRCS += ../common/softfloat/source/s_subMagsF32.c +SRCS += ../common/softfloat/source/s_subMagsF64.c +SRCS += ../common/softfloat/source/softfloat_state.c +SRCS += ../common/softfloat/source/ui32_to_f32.c +SRCS += ../common/softfloat/source/ui32_to_f64.c +SRCS += ../common/softfloat/source/ui64_to_f32.c +SRCS += ../common/softfloat/source/ui64_to_f64.c +SRCS_CXX += ../common/displayport/src/dp_auxretry.cpp +SRCS_CXX += ../common/displayport/src/dp_bitstream.cpp +SRCS_CXX += ../common/displayport/src/dp_buffer.cpp +SRCS_CXX += ../common/displayport/src/dp_configcaps.cpp +SRCS_CXX += ../common/displayport/src/dp_connectorimpl.cpp +SRCS_CXX += ../common/displayport/src/dp_crc.cpp +SRCS_CXX += ../common/displayport/src/dp_deviceimpl.cpp +SRCS_CXX += ../common/displayport/src/dp_discovery.cpp +SRCS_CXX += ../common/displayport/src/dp_edid.cpp +SRCS_CXX += ../common/displayport/src/dp_evoadapter.cpp +SRCS_CXX += ../common/displayport/src/dp_groupimpl.cpp +SRCS_CXX += ../common/displayport/src/dp_guid.cpp +SRCS_CXX += ../common/displayport/src/dp_list.cpp +SRCS_CXX += ../common/displayport/src/dp_merger.cpp +SRCS_CXX += ../common/displayport/src/dp_messagecodings.cpp +SRCS_CXX += ../common/displayport/src/dp_messageheader.cpp +SRCS_CXX += ../common/displayport/src/dp_messages.cpp +SRCS_CXX += ../common/displayport/src/dp_mst_edid.cpp +SRCS_CXX += ../common/displayport/src/dp_splitter.cpp +SRCS_CXX += ../common/displayport/src/dp_sst_edid.cpp +SRCS_CXX += ../common/displayport/src/dp_timer.cpp +SRCS_CXX += ../common/displayport/src/dp_vrr.cpp +SRCS_CXX += ../common/displayport/src/dp_wardatabase.cpp +SRCS_CXX += ../common/displayport/src/dp_watermark.cpp +SRCS_CXX += ../common/displayport/src/dptestutil/dp_testmessage.cpp +SRCS += ../common/modeset/hdmipacket/nvhdmipkt.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_0073.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9171.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9271.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9471.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_9571.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_C371.c +SRCS += ../common/modeset/hdmipacket/nvhdmipkt_C671.c +SRCS += ../common/modeset/timing/nvt_cvt.c +SRCS += ../common/modeset/timing/nvt_displayid20.c +SRCS += ../common/modeset/timing/nvt_dmt.c +SRCS += ../common/modeset/timing/nvt_dsc_pps.c +SRCS += ../common/modeset/timing/nvt_edid.c +SRCS += ../common/modeset/timing/nvt_edidext_861.c +SRCS += ../common/modeset/timing/nvt_edidext_displayid.c +SRCS += ../common/modeset/timing/nvt_edidext_displayid20.c +SRCS += ../common/modeset/timing/nvt_gtf.c +SRCS += ../common/modeset/timing/nvt_tv.c +SRCS += ../common/modeset/timing/nvt_util.c +SRCS += ../common/unix/common/utils/nv_memory_tracker.c +SRCS += ../common/unix/common/utils/nv_mode_timings_utils.c +SRCS += ../common/unix/common/utils/nv_vasprintf.c +SRCS += ../common/unix/common/utils/unix_rm_handle.c +SRCS += kapi/src/nvkms-kapi-channelevent.c +SRCS += kapi/src/nvkms-kapi-notifiers.c +SRCS += kapi/src/nvkms-kapi.c +SRCS += lib/nvkms-format.c +SRCS += lib/nvkms-sync.c +SRCS_CXX += src/dp/nvdp-connector-event-sink.cpp +SRCS_CXX += src/dp/nvdp-connector.cpp +SRCS_CXX += src/dp/nvdp-device.cpp +SRCS_CXX += src/dp/nvdp-evo-interface.cpp +SRCS_CXX += src/dp/nvdp-host.cpp +SRCS_CXX += src/dp/nvdp-timer.cpp +SRCS += src/g_nvkms-evo-states.c +SRCS += src/nvkms-3dvision.c +SRCS += src/nvkms-attributes.c +SRCS += src/nvkms-console-restore.c +SRCS += src/nvkms-cursor.c +SRCS += src/nvkms-cursor2.c +SRCS += src/nvkms-cursor3.c +SRCS += src/nvkms-dma.c +SRCS += src/nvkms-dpy.c +SRCS += src/nvkms-event.c +SRCS += src/nvkms-evo.c +SRCS += src/nvkms-evo1.c +SRCS += src/nvkms-evo2.c +SRCS += src/nvkms-evo3.c +SRCS += src/nvkms-flip.c +SRCS += src/nvkms-framelock.c +SRCS += src/nvkms-hal.c +SRCS += src/nvkms-hdmi.c +SRCS += src/nvkms-hw-states.c +SRCS += src/nvkms-lut.c +SRCS += src/nvkms-modepool.c +SRCS += src/nvkms-modeset.c +SRCS += src/nvkms-prealloc.c +SRCS += src/nvkms-rm.c +SRCS += src/nvkms-rmapi-dgpu.c +SRCS += src/nvkms-surface.c +SRCS += src/nvkms-utils.c +SRCS += src/nvkms-vrr.c +SRCS += src/nvkms.c diff --git a/src/nvidia/Makefile b/src/nvidia/Makefile new file mode 100644 index 000000000..9bdb826ad --- /dev/null +++ b/src/nvidia/Makefile @@ -0,0 +1,162 @@ +########################################################################### +# Makefile for nv-kernel.o +########################################################################### + +NV_MODULE_LOGGING_NAME ?= nvidia + +VERSION_MK_DIR = ../../ + +include ../../utils.mk + +include srcs.mk + +# The source files for nv-kernel.o are all SRCS and SRCS_CXX defined in srcs.mk, +# and the NVIDIA ID string +ALL_SRCS = $(SRCS) $(SRCS_CXX) +ALL_SRCS += $(NVIDSTRING) + +SRC_COMMON = ../common +CONDITIONAL_CFLAGS := + +CFLAGS += -include $(SRC_COMMON)/sdk/nvidia/inc/cpuopsys.h + +CFLAGS += -I kernel/inc +CFLAGS += -I interface +CFLAGS += -I $(SRC_COMMON)/sdk/nvidia/inc +CFLAGS += -I arch/nvalloc/common/inc +CFLAGS += -I arch/nvalloc/common/inc/deprecated +CFLAGS += -I arch/nvalloc/unix/include +CFLAGS += -I inc +CFLAGS += -I inc/os +CFLAGS += -I $(SRC_COMMON)/shared/inc +CFLAGS += -I $(SRC_COMMON)/shared/msgq/inc +CFLAGS += -I $(SRC_COMMON)/inc + +CFLAGS += -I $(SRC_COMMON)/uproc/os/libos-v2.0.0/include +CFLAGS += -I $(SRC_COMMON)/uproc/os/libos-v2.0.0/debug +CFLAGS += -I $(SRC_COMMON)/inc/swref +CFLAGS += -I $(SRC_COMMON)/inc/swref/published + +CFLAGS += -I generated +CFLAGS += -I $(SRC_COMMON)/nvswitch/kernel/inc +CFLAGS += -I $(SRC_COMMON)/nvswitch/interface +CFLAGS += -I $(SRC_COMMON)/nvswitch/common/inc/ +CFLAGS += -I $(SRC_COMMON)/inc/displayport +CFLAGS += -I $(SRC_COMMON)/nvlink/interface/ +CFLAGS += -I src/mm/uvm/interface +CFLAGS += -I inc/libraries +CFLAGS += -I src/libraries +CFLAGS += -I inc/kernel + +CFLAGS += -Werror-implicit-function-declaration +CFLAGS += -Wwrite-strings +CFLAGS += -fno-common +CFLAGS += -ffreestanding + +ifeq ($(TARGET_ARCH),x86_64) + CFLAGS += -msoft-float + CFLAGS += -mno-red-zone + CFLAGS += -mcmodel=kernel + CFLAGS += -mno-mmx + CFLAGS += -mno-sse + CFLAGS += -mno-sse2 + CFLAGS += -mno-3dnow +endif + +ifeq ($(TARGET_ARCH),aarch64) + CFLAGS += -mgeneral-regs-only + CFLAGS += -march=armv8-a + CFLAGS += -mstrict-align +endif + +CFLAGS += -fno-pic + +CFLAGS += -D_LANGUAGE_C +CFLAGS += -D__NO_CTYPE +CFLAGS += -DNVRM +CFLAGS += -DLOCK_VAL_ENABLED=0 +CFLAGS += -DPORT_ATOMIC_64_BIT_SUPPORTED=1 +CFLAGS += -DPORT_IS_KERNEL_BUILD=1 +CFLAGS += -DPORT_IS_CHECKED_BUILD=1 +CFLAGS += -DPORT_MODULE_atomic=1 +CFLAGS += -DPORT_MODULE_core=1 +CFLAGS += -DPORT_MODULE_cpu=1 +CFLAGS += -DPORT_MODULE_crypto=1 +CFLAGS += -DPORT_MODULE_debug=1 +CFLAGS += -DPORT_MODULE_memory=1 +CFLAGS += -DPORT_MODULE_safe=1 +CFLAGS += -DPORT_MODULE_string=1 +CFLAGS += -DPORT_MODULE_sync=1 +CFLAGS += -DPORT_MODULE_thread=1 +CFLAGS += -DPORT_MODULE_util=1 +CFLAGS += -DPORT_MODULE_example=0 +CFLAGS += -DPORT_MODULE_mmio=0 +CFLAGS += -DPORT_MODULE_time=0 +CFLAGS += -DRS_STANDALONE=0 +CFLAGS += -DRS_STANDALONE_TEST=0 +CFLAGS += -DRS_COMPATABILITY_MODE=1 +CFLAGS += -DRS_PROVIDES_API_STATE=0 +CFLAGS += -DNV_CONTAINERS_NO_TEMPLATES + +CFLAGS += -DINCLUDE_NVLINK_LIB +CFLAGS += -DINCLUDE_NVSWITCH_LIB + +CFLAGS += -DNV_PRINTF_STRINGS_ALLOWED=1 +CFLAGS += -DNV_ASSERT_FAILED_USES_STRINGS=1 +CFLAGS += -DPORT_ASSERT_FAILED_USES_STRINGS=1 + +ifeq ($(DEBUG),1) + CFLAGS += -gsplit-dwarf +endif + +# Define how to perform dead code elimination: place each symbol in its own +# section at compile time, and garbage collect unreachable sections at link +# time. exports_link_command.txt tells the linker which symbols need to be +# exported from nv-kernel.o so the linker can determine which symbols are +# unreachable. +CFLAGS += -ffunction-sections +CFLAGS += -fdata-sections +NV_KERNEL_O_LDFLAGS += --gc-sections +EXPORTS_LINK_COMMAND = exports_link_command.txt + +CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -fcf-protection=none) + +ifeq ($(TARGET_ARCH),x86_64) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch-register) + CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch=thunk-extern) +endif + +CFLAGS += $(CONDITIONAL_CFLAGS) + +CC_ONLY_CFLAGS += --std=gnu11 +CXX_ONLY_CFLAGS += --std=gnu++11 + +OBJS = $(call BUILD_OBJECT_LIST,$(ALL_SRCS)) + +# Define how to generate the NVIDIA ID string +$(eval $(call GENERATE_NVIDSTRING, \ + NVRM_ID, \ + UNIX Open Kernel Module, $(OBJS))) + +# Define how to build each object file from the corresponding source file. +$(foreach src, $(ALL_SRCS), $(eval $(call DEFINE_OBJECT_RULE,TARGET,$(src)))) + +NV_KERNEL_O = $(OUTPUTDIR)/nv-kernel.o + +.PNONY: all clean +all: $(NV_KERNEL_O) + +LINKER_SCRIPT = nv-kernel.ld + +$(NV_KERNEL_O): $(OBJS) $(EXPORTS_LINK_COMMAND) $(LINKER_SCRIPT) + $(call quiet_cmd,LD) \ + $(NV_KERNEL_O_LDFLAGS) \ + -T $(LINKER_SCRIPT) \ + -r -o $(NV_KERNEL_O) $(OBJS) @$(EXPORTS_LINK_COMMAND) + $(call quiet_cmd,OBJCOPY) \ + --localize-symbol=memset \ + --localize-symbol=memcpy \ + $@ + +clean: + $(RM) -rf $(OUTPUTDIR) diff --git a/src/nvidia/arch/nvalloc/common/inc/dev_ctrl_defines.h b/src/nvidia/arch/nvalloc/common/inc/dev_ctrl_defines.h new file mode 100644 index 000000000..1644e3d72 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/dev_ctrl_defines.h @@ -0,0 +1,86 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @brief Defines for simplifying SW accesses to the dev_ctrl interrupt tree. + * These are generic defines ued in addition to architecure-specific + * defines in dev_vm_addendum.h + * + */ + +// +// Notes on the terms used below: +// Subtree: The HW tree is a 64-way tree that consists of 2 TOP level interrupt +// registers, 32 bits each. Each of these 64 is referred to as a subtree. +// Leaf: Each of these 64 subtrees are associated with a pair of LEAF registers +// giving us a total of 128 LEAF registers. +// GPU vector: The 128 LEAF registers give us a total of (128*32) GPU vectors +// giving us a total of 4096 GPU vectors +// + +// +// Given a subtree index, the below macros give us the index of the TOP level +// register and the bit within the TOP level register to program for that +// subtree. +// +#define NV_CTRL_INTR_SUBTREE_TO_TOP_IDX(i) ((i) / 32) +#define NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(i) ((i) % 32) + +// +// Given a subtree index, the below macros give us the two LEAF register indices +// that correspond to that subtree. +// +#define NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(i) ((i)*2) +#define NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(i) (((i)*2) + 1) + +// +// Given a LEAF register index, the below macros give us the range of GPU +// interrupt vectors that correspond to those leafs. +// +#define NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_START(i) ((i)*32) +#define NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_END(i) (((i)*32) + 31) + +// +// Given a GPU interrupt vector, the below macros give us the index of the +// LEAF register and the bit within the LEAF register to program for that +// GPU interrupt vector. +// +#define NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(i) ((i) / 32) +#define NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(i) ((i) % 32) + +// +// Given a GPU interrupt vector, the below macro gives us the subtree in which +// it belongs. +// +#define NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(i) ((NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(i)) / 2) + +// +// The max number of leaf registers we expect +// This is enforced to be greater than or equal to +// (NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(NV_CPU_INTR_STALL_SUBTREE_LAST) + 1) +// for the largest NV_CPU_INTR_STALL_SUBTREE_LAST +// +#define NV_MAX_INTR_LEAVES 12 + +// In SW, this specifies an invalid interrupt vector +#define NV_INTR_VECTOR_INVALID (NV_U32_MAX) diff --git a/src/nvidia/arch/nvalloc/common/inc/flcnifcmn.h b/src/nvidia/arch/nvalloc/common/inc/flcnifcmn.h new file mode 100644 index 000000000..854c84ef1 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/flcnifcmn.h @@ -0,0 +1,149 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * flcn Command/Message Interfaces - Common + */ + +#ifndef FLCNIFCMN_H +#define FLCNIFCMN_H + +#include "nvtypes.h" +#include "flcnretval.h" + +#ifndef NV_SIZEOF32 +#define NV_SIZEOF32(v) (sizeof(v)) +#endif + +#ifndef NV_ARRAY_ELEMENTS +#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0]))) +#endif + +/*! + * @ref NvU64_ALIGN32 , NvU64_ALIGN32_PACK, NvU64_ALIGN32_UNPACK + * TODO: Use NV types directly + */ +typedef NvU64_ALIGN32 RM_FLCN_U64; +typedef NvU64_ALIGN32 *PRM_FLCN_U64; + +#define RM_FLCN_U64_IS_ZERO NvU64_ALIGN32_IS_ZERO +#define RM_FLCN_U64_PACK NvU64_ALIGN32_PACK +#define RM_FLCN_U64_UNPACK NvU64_ALIGN32_UNPACK + +/*! + * @brief Header preceding each CMD/MSG exchanged through falcon's queues. + * + * @note Data package sent to falcon are referred as CMDs (commands). + * Data package sent by falcon are referred as MSGs (messages). + */ +typedef struct +{ + /*! + * Unit ID identifies falcon's task/unit receiving/issuing this message. + */ + NvU8 unitId; + /*! + * Total CMD/MSG size (including header). + */ + NvU8 size; + /*! + * Flags identifying state of CMD/MSG. + */ + NvU8 ctrlFlags; + /*! + * Sequence # ID to track each request sent to falcon (where applicable). + */ + NvU8 seqNumId; +} RM_FLCN_QUEUE_HDR, +*PRM_FLCN_QUEUE_HDR; + +/*! + * Convenience macro for determining the size of the falcon's queue header: + */ +#define RM_FLCN_QUEUE_HDR_SIZE sizeof(RM_FLCN_QUEUE_HDR) + +/*! + * Generic Falcon rewind unit ID. + */ +#define RM_FLCN_UNIT_ID_REWIND (0x00U) + +/*! + * Generic CMD structure to hold the header. + */ +typedef struct +{ + RM_FLCN_QUEUE_HDR hdr; + NvU32 cmd; +} RM_FLCN_CMD_GEN; + +/*! + * Generic MSG structure to hold the header. + */ +typedef struct +{ + RM_FLCN_QUEUE_HDR hdr; + NvU32 msg; +} RM_FLCN_MSG_GEN; + +/*! + * Convenience macros for determining the size of body for a command or message: + */ +#define RM_FLCN_CMD_BODY_SIZE(u,t) sizeof(RM_FLCN_##u##_CMD_##t) +#define RM_FLCN_MSG_BODY_SIZE(u,t) sizeof(RM_FLCN_##u##_MSG_##t) + +/*! + * Convenience macros for determining the size of a command or message: + */ +#define RM_FLCN_CMD_SIZE(u,t) \ + (RM_FLCN_QUEUE_HDR_SIZE + RM_FLCN_CMD_BODY_SIZE(u,t)) + +#define RM_FLCN_MSG_SIZE(u,t) \ + (RM_FLCN_QUEUE_HDR_SIZE + RM_FLCN_MSG_BODY_SIZE(u,t)) + +/*! + * Convenience macros for determining the type of a command or message + * (intended to be used symmetrically with the CMD and MSG _SIZE macros): + */ +#define RM_FLCN_CMD_TYPE(u,t) RM_FLCN_##u##_CMD_ID_##t +#define RM_FLCN_MSG_TYPE(u,t) RM_FLCN_##u##_MSG_ID_##t + +/*! + * @brief Falcons' queue header flags (@ref RM_FLCN_QUEUE_HDR::ctrlFlags). + * + * RM_FLCN_QUEUE_FLAGS_STATUS + * Set by command's sender to request back message confirming the completion of + * command's execution. In RM->FLCN communication response is required to free + * command related data tracked within RM (***_SEQ_INFO). + * + * RM_FLCN_QUEUE_HDR_FLAGS_EVENT + * Set by the falcon to distinguish messages sent to RM from command responses. + * + * RM_FLCN_QUEUE_HDR_FLAGS_UNIT_ACK + * Used internally within the falcon to track completed commands when updating + * get (tail) command queue pointers. + */ +#define RM_FLCN_QUEUE_HDR_FLAGS_STATUS NVBIT(0) +#define RM_FLCN_QUEUE_HDR_FLAGS_EVENT NVBIT(2) +#define RM_FLCN_QUEUE_HDR_FLAGS_UNIT_ACK NVBIT(5) + +#endif // FLCNIFCMN_H diff --git a/src/nvidia/arch/nvalloc/common/inc/flcnretval.h b/src/nvidia/arch/nvalloc/common/inc/flcnretval.h new file mode 100644 index 000000000..33d48ff22 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/flcnretval.h @@ -0,0 +1,279 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file flcnretval.h + * @brief Defines various status codes that are convenient to relay status + * information in functions. + */ + +#ifndef FLCNRETVAL_H +#define FLCNRETVAL_H + +#include "nvtypes.h" +/*! + * Defines a generic type that may be used to convey status information. This + * is very similar to the RM_STATUS type but smaller in width to save DMEM and + * stack space. + */ +typedef NvU8 FLCN_STATUS; + +#define FLCN_OK (0x00U) +#define FLCN_ERR_BINARY_NOT_STARTED (0xFEU) +#define FLCN_ERROR (0xFFU) + +#define FLCN_ERR_NOT_SUPPORTED (0x01U) +#define FLCN_ERR_INVALID_ARGUMENT (0x02U) +#define FLCN_ERR_ILLEGAL_OPERATION (0x03U) +#define FLCN_ERR_TIMEOUT (0x04U) +#define FLCN_ERR_NO_FREE_MEM (0x05U) +#define FLCN_ERR_HDCP_INVALID_SRM (0x06U) +#define FLCN_ERR_HDCP_RECV_REVOKED (0x07U) +#define FLCN_ERR_RPC_INVALID_INPUT (0x08U) +#define FLCN_ERR_DMA_SUSPENDED (0x09U) +#define FLCN_ERR_MUTEX_ACQUIRED (0x10U) +#define FLCN_ERR_MUTEX_ID_NOT_AVAILABLE (0x11U) +#define FLCN_ERR_OBJECT_NOT_FOUND (0x12U) +#define FLCN_ERR_MSGBOX_TIMEOUT (0x13U) +#define FLCN_ERR_INVALID_INDEX (0x14U) +#define FLCN_ERR_INVALID_FUNCTION (0x15U) +#define FLCN_ERR_INSUFFICIENT_PMB_PLM_PROTECTION (0x16U) +#define FLCN_ERR_DMA_NACK (0x17U) +#define FLCN_ERR_CHIP_NOT_SUPPORTED_FOR_PR (0x18U) +#define FLCN_ERR_BAR0_PRIV_READ_ERROR (0x19U) +#define FLCN_ERR_BAR0_PRIV_WRITE_ERROR (0x1AU) +#define FLCN_ERR_HDCP22_ABORT_AUTHENTICATION (0x1BU) +#define FLCN_ERR_DPU_IS_BUSY (0x1CU) +#define FLCN_ERR_DPU_TIMEOUT_FOR_HDCP_TYPE1_LOCK_REQUEST (0x1DU) +#define FLCN_ERR_HDCP_TYPE1_LOCK_FAILED (0x1EU) +#define FLCN_ERR_HDCP_TYPE1_LOCK_UNKNOWN (0x1FU) +#define FLCN_ERR_WAIT_FOR_BAR0_IDLE_FAILED (0x20U) +#define FLCN_ERR_CSB_PRIV_READ_ERROR (0x21U) +#define FLCN_ERR_CSB_PRIV_WRITE_ERROR (0x22U) +#define FLCN_ERR_DMA_UNEXPECTED_DMAIDX (0x23U) +#define FLCN_ERR_PRIV_SEC_VIOLATION (0x24U) +#define FLCN_ERR_INVALID_VERSION (0x25U) +#define FLCN_ERR_PR_SHARED_STRUCT_INIT_FAILED (0x26U) +#define FLCN_ERR_GPU_IN_DEBUG_MODE (0x27U) +#define FLCN_ERR_HPD_UNPLUG (0x28U) +#define FLCN_ERR_HDCP22_DELAY_ABORT_AUTHENTICATION (0x29U) +#define FLCN_ERR_SECUREBUS_REGISTER_READ_ERROR (0x2AU) +#define FLCN_ERR_SECUREBUS_REGISTER_WRITE_ERROR (0x2BU) +#define FLCN_ERR_HDCP22_FLUSH_TYPE_LOCK_ACTIVE (0x2CU) +#define FLCN_ERR_HDCP22_FLUSH_TYPE_IN_PROGRESS (0x2DU) +#define FLCN_ERR_FEATURE_NOT_ENABLED (0x2EU) +#define FLCN_ERR_OUT_OF_RANGE (0x2FU) + +// I2C Errors +#define FLCN_ERR_I2C_BUSY (0x30U) +#define FLCN_ERR_I2C_NACK_ADDRESS (0x31U) +#define FLCN_ERR_I2C_NACK_BYTE (0x32U) +#define FLCN_ERR_I2C_SIZE (0x33U) +#define FLCN_ERR_I2C_BUS_INVALID (0x34U) + +#define FLCN_ERR_INVALID_STATE (0x35U) +#define FLCN_ERR_RECURSION_LIMIT_EXCEEDED (0x36U) +#define FLCN_ERR_INVALID_CAST (0x37U) + +// AUX Errors +#define FLCN_ERR_AUX_ERROR (0x3AU) +#define FLCN_ERR_AUX_SEMA_ACQUIRED (0x3BU) +#define FLCN_ERR_AUX_SEMA_INVALID_RELEASE (0x3CU) + +#define FLCN_ERR_MORE_PROCESSING_REQUIRED (0x3EU) +#define FLCN_ERR_DMA_ALIGN (0x3FU) + +// Power-Device Errors +#define FLCN_ERR_PWR_DEVICE_TAMPERED (0x40U) + +#define FLCN_ERR_ITERATION_END (0x41U) + +// Perf change sequence Errors +#define FLCN_ERR_LOCK_NOT_AVAILABLE (0x42U) + +#define FLCN_ERR_STATE_RESET_NEEDED (0x43U) +#define FLCN_ERR_DMA_GENERIC (0x44U) +#define FLCN_ERR_LS_CHK_UCODE_REVOKED (0x45U) +#define FLCN_ERR_ACC_SEQ_MISMATCH (0x46U) +#define FLCN_ERR_SSP_STACK_CHECK_FAILED (0x47U) +#define FLCN_ERR_SE_TRNG_FAILED (0x48U) +#define FLCN_ERR_PROD_MODE_NOT_YET_SUPPORTED (0x49U) + +// SHA HW errors +#define FLCN_ERR_SHA_HW_CHECK_INT_STATUS (0x4AU) +#define FLCN_ERR_SHA_HW_SOFTRESET_REQUIRED (0x4BU) +#define FLCN_ERR_SHA_HW_SOFTRESET_FAILED (0x4CU) +#define FLCN_ERR_SHA_HW_BUSY (0x4DU) + +// +// Add new generic error codes here, do not changes values of exiting error codes, +// because that will affect other binaries and their signatures +// +#define FLCN_ERR_CTXSW_ERROR (0x4EU) + +// VPR SEC2 task errors +#define FLCN_ERR_VPR_APP_INVALID_REQUEST_END_ADDR (0x51U) +#define FLCN_ERR_VPR_APP_INVALID_REQUEST_START_ADDR (0x52U) +#define FLCN_ERR_VPR_APP_SCRUB_VERIF_FAILED (0x53U) +#define FLCN_ERR_VPR_APP_MEMLOCK_ALREADY_SET (0x54U) +#define FLCN_ERR_VPR_APP_INVALID_INDEX (0x55U) +#define FLCN_ERR_VPR_APP_UNEXPECTED_VPR_HANDOFF_FROM_SCRUBBER (0x56U) +#define FLCN_ERR_VPR_APP_CBC_RANGE_CLASH (0x57U) +#define FLCN_ERR_VPR_APP_NOT_SUPPORTED_BY_HW (0x58U) +#define FLCN_ERR_VPR_APP_NOT_SUPPORTED_BY_SW (0x59U) +#define FLCN_ERR_VPR_APP_DISPLAY_VERSION_NOT_SUPPORTED (0x5AU) +#define FLCN_ERR_VPR_APP_VPR_WPR_WRITE_FAILED (0x5BU) +#define FLCN_ERR_VPR_APP_NOTHING_TO_DO (0x5CU) +#define FLCN_ERR_VPR_APP_DISPLAY_NOT_PRESENT (0x5DU) +#define FLCN_ERR_VPR_APP_PREVIOUS_CMD_FAILED_AS_MAX_VPR_IS_0 (0x5EU) +#define FLCN_ERR_VPR_APP_PLM_PROTECTION_NOT_RAISED (0x5FU) +#define FLCN_ERR_VPR_APP_PLM_PROTECTION_ALREADY_RAISED (0x60U) +#define FLCN_ERR_VPR_APP_DISP_FALCON_IS_NOT_IN_LS_MODE (0x61U) +#define FLCN_ERR_VPR_APP_VPR_IS_ALREADY_ENABLED (0x62U) +#define FLCN_ERR_VPR_APP_UNEXPECTEDLY_RUNNING_ON_RISCV (0x63U) + +// Clocks Errors +#define FLCN_ERR_CYCLE_DETECTED (0x70U) +#define FLCN_ERR_INVALID_PATH (0x71U) +#define FLCN_ERR_MISMATCHED_TARGET (0x72U) +#define FLCN_ERR_FREQ_NOT_SUPPORTED (0x73U) +#define FLCN_ERR_INVALID_SOURCE (0x74U) +#define FLCN_ERR_NOT_INITIALIZED (0x75U) + +// HDCP2.2 Errors +#define FLCN_ERR_HDCP22_GETDKEY_FAILED (0x80U) +#define FLCN_ERR_HDCP22_H_PRIME (0x81U) +#define FLCN_ERR_HDCP22_CERT_RX (0x82U) +#define FLCN_ERR_HDCP22_PAIRING (0x83U) +#define FLCN_ERR_HDCP22_L_PRIME (0x84U) +#define FLCN_ERR_HDCP22_V_PRIME (0x85U) +#define FLCN_ERR_HDCP22_INVALID_RXIDLIST (0x86U) +#define FLCN_ERR_HDCP22_M_PRIME (0x87U) +#define FLCN_ERR_HDCP22_SEQ_ROLLOVER (0x88U) +#define FLCN_ERR_HDCP22_RSA_HW (0x89U) +#define FLCN_ERR_HDCP22_ECF_TIMESLOT_MISMATCH (0x90U) + +// LibCCC Errors +#define FLCN_ERR_INIT_CRYPTO_DEVICE_FAILED (0x91U) +#define FLCN_ERR_NVPKA_SELECT_ENGINE_FAILED (0x92U) +#define FLCN_ERR_NVPKA_ACQUIRE_MUTEX_FAILED (0x93U) +#define FLCN_ERR_NVPKA_MODULAR_EXP_LOCK_FAILED (0x94U) +#define FLCN_ERR_NVRNG_INIT_CRYPTO_DEVICE_FAILED (0x95U) +#define FLCN_ERR_NVRNG_SELECT_ENGINE_FAILED (0x96U) +#define FLCN_ERR_NVRNG_GENERATE_FAILED (0x97U) + +// Heavy Secure Errors +#define FLCN_ERR_HS_CHK_INVALID_INPUT (0xA0U) +#define FLCN_ERR_HS_CHK_CHIP_NOT_SUPPORTED (0xA1U) +#define FLCN_ERR_HS_CHK_UCODE_REVOKED (0xA2U) +#define FLCN_ERR_HS_CHK_NOT_IN_LSMODE (0xA3U) +#define FLCN_ERR_HS_CHK_INVALID_LS_PRIV_LEVEL (0xA4U) +#define FLCN_ERR_HS_CHK_INVALID_REGIONCFG (0xA5U) +#define FLCN_ERR_HS_CHK_PRIV_SEC_DISABLED_ON_PROD (0xA6U) +#define FLCN_ERR_HS_CHK_SW_FUSING_ALLOWED_ON_PROD (0xA7U) +#define FLCN_ERR_HS_CHK_INTERNAL_SKU_ON_PROD (0xA8U) +#define FLCN_ERR_HS_CHK_DEVID_OVERRIDE_ENABLED_ON_PROD (0xA9U) +#define FLCN_ERR_HS_CHK_INCONSISTENT_PROD_MODES (0xAAU) +#define FLCN_ERR_HS_CHK_HUB_ENCRPTION_DISABLED (0xABU) +#define FLCN_ERR_HS_PR_ILLEGAL_LASSAHS_STATE_AT_HS_ENTRY (0xACU) +#define FLCN_ERR_HS_PR_ILLEGAL_LASSAHS_STATE_AT_MPK_DECRYPT (0xADU) +#define FLCN_ERR_HS_PR_ILLEGAL_LASSAHS_STATE_AT_HS_EXIT (0xAEU) +#define FLCN_ERR_HS_PROD_MODE_NOT_YET_SUPPORTED (0xAFU) +#define FLCN_ERR_HS_DEV_VERSION_ON_PROD (0xB0U) +#define FLCN_ERR_HS_PR_LASSAHS_LS_SIG_GRP_MISMATCH (0xB1U) +#define FLCN_ERR_HS_PR_LASSAHS_LS_SIG_GRP_OVERLAYS_CNT (0xB2U) +#define FLCN_ERR_HS_PR_LASSAHS_LS_SIG_GRP_INVALID_VA (0xB3U) +#define FLCN_ERR_HS_MUTEX_ACQUIRE_FAILED (0xB4U) +#define FLCN_ERR_HS_MUTEX_RELEASE_FAILED (0xB5U) +#define FLCN_ERR_HS_PR_MPK_DEC_NEEDS_NEWER_ACR_UDE_SCRUBBER (0xB6U) +#define FLCN_ERR_HS_CHK_ENGID_MISMATCH (0xB7U) +#define FLCN_ERR_HS_OPT_INTERNAL_SKU_CHECK_FAILED (0xB8U) +#define FLCN_ERR_HS_CHK_BOARD_MISMATCH (0xB9U) +#define FLCN_ERR_HS_CHK_DISP_ENG_DISABLED (0xBAU) +#define FLCN_ERR_HS_GEN_RANDOM (0xBBU) +#define FLCN_ERR_HS_CHK_IMPROPERLY_FUSED_BOARD (0xBCU) +#define FLCN_ERR_HS_CHK_HDCP_DISABLED (0xBDU) +#define FLCN_ERR_HS_CHK_HDCP_BLACKLISTED_SKU (0XBEU) +#define FLCN_ERR_HS_SECURE_ACTION_ARG_CHECK_FAILED (0xBFU) +#define FLCN_ERR_HS_CHK_RETURN_PC_AT_HS_ENTRY_IS_OF_HS (0xC0U) +#define FLCN_ERR_HS_CHK_HS_LIB_ENTRY_CALLED_BY_NON_HS (0xC1U) +#define FLCN_ERR_HS_DECODE_TRAP_ALREADY_IN_USE (0xC2U) +#define FLCN_ERR_HS_REGISTER_READ_WRITE_ERROR (0xC3U) +#define FLCN_ERR_HS_CHK_CPUCTL_ALIAS_FALSE (0xC4U) +#define FLCN_ERR_HS_UPDATE_RESET_PLM_ERROR (0xC5U) +#define FLCN_ERR_HS_RNG_CONFIG_ERROR (0xC6U) +#define FLCN_ERR_HS_CHK_NOT_IN_HSMODE (0xC7U) +#define FLCN_ERR_HS_CHK_GFW_CHAIN_OF_TRUST_BROKEN (0xC8U) +#define FLCN_ERR_HS_HDCP22_WRONG_SEQUENCE (0xC9U) +#define FLCN_ERR_HS_HDCP22_INTEGRITY_ERROR (0xCAU) +#define FLCN_ERR_HS_HDCP22_WRONG_TYPE (0xCBU) +#define FLCN_ERR_HS_APM_NOT_ENABLED (0xCCU) +#define FLCN_ERR_HS_APM_SMC_ENABLED (0xCDU) +#define FLCN_ERR_HS_APM_FECS_NOT_HALTED (0xCEU) +#define FLCN_ERR_HS_APM_SCRATCH_PLM_INVALID (0xCFU) +#define FLCN_ERR_HS_APM_SCRATCH_INIT_INVALID (0xD0U) + + +// +// BAR0/CSB Priv Read/Write Error Handling Defines +// These need to be defined by HW - NV Bug 200198584 +// +#define FLCN_BAR0_PRIV_PRI_ERROR_MASK 0xFFF00000U +#define FLCN_BAR0_PRIV_PRI_ERROR_CODE 0xBAD00000U +#define FLCN_BAR0_PRIV_PRI_RETURN_VAL 0x00BADBADU + +#define FLCN_CSB_PRIV_PRI_ERROR_MASK 0xFFFF0000U +#define FLCN_CSB_PRIV_PRI_ERROR_CODE 0xBADF0000U + +// +// Macro to check FALCON return status +// +#define CHECK_FLCN_STATUS(expr) do { \ + flcnStatus = (expr); \ + if (flcnStatus != FLCN_OK) \ + { \ + goto ErrorExit; \ + } \ + } while (NV_FALSE) + +// Warnings. +#define FLCN_WARN_NOTHING_TO_DO (0xD0U) +#define FLCN_WARN_NOT_QUERIED (0xD1U) + +// Queue handling Errors +#define FLCN_ERR_QUEUE_MGMT_INVALID_UNIT_ID (0xE0U) +#define FLCN_ERR_QUEUE_MGMT_HEAP_MIRROR_ERR (0xE1U) +#define FLCN_ERR_QUEUE_TASK_INVALID_EVENT_TYPE (0xE2U) +#define FLCN_ERR_QUEUE_TASK_INVALID_UNIT_ID (0xE3U) +#define FLCN_ERR_QUEUE_TASK_INVALID_CMD_TYPE (0xE4U) + +// Posted write errors +#define FLCN_ERR_POSTED_WRITE_FAILURE (0xF0U) +#define FLCN_ERR_POSTED_WRITE_INTERRUPTS_ENABLED (0xF1U) +#define FLCN_ERR_POSTED_WRITE_PRI_CLUSTER_COUNT_MISMATCH (0xF2U) +#define FLCN_ERR_POSTED_WRITE_INCORRECT_PARAMS (0xF3U) + +// Lane Margining errors +#define FLCN_ERR_LM_INVALID_RECEIVER_NUMBER (0xF5U) + +#endif // FLCNRETVAL_H diff --git a/src/nvidia/arch/nvalloc/common/inc/gsp/gsp_error.h b/src/nvidia/arch/nvalloc/common/inc/gsp/gsp_error.h new file mode 100644 index 000000000..2401c784f --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/gsp/gsp_error.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef GSP_ERROR_H +#define GSP_ERROR_H + +// Definitions for GSP-RM to report errors to CPU-RM via mailbox +#define NV_GSP_ERROR_CODE 7:0 +#define NV_GSP_ERROR_REASON 15:8 +#define NV_GSP_ERROR_TASK 23:16 +#define NV_GSP_ERROR_SKIPPED 27:24 +#define NV_GSP_ERROR_TAG 31:28 +#define NV_GSP_ERROR_TAG_VAL 0xE + +#endif // GSP_ERROR_H diff --git a/src/nvidia/arch/nvalloc/common/inc/inforom/ifrdem.h b/src/nvidia/arch/nvalloc/common/inc/inforom/ifrdem.h new file mode 100644 index 000000000..26395e8a5 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/inforom/ifrdem.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _IFR_DEM_H_ +#define _IFR_DEM_H_ + +#define INFOROM_DEM_OBJECT_V1_00_PACKED_SIZE 4144 +#define INFOROM_DEM_OBJECT_V1_00_FIFO_SIZE 4096 + +struct INFOROM_DEM_OBJECT_V1_00 +{ + INFOROM_OBJECT_HEADER_V1_00 header; + inforom_U032 seqNumber; + inforom_U016 writeOffset; + inforom_U016 readOffset; + inforom_X008 fifoBuffer[INFOROM_DEM_OBJECT_V1_00_FIFO_SIZE]; + inforom_U032 reserved[8]; +}; + +#define INFOROM_DEM_OBJECT_V1_00_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "d2w4096x8d" +typedef struct INFOROM_DEM_OBJECT_V1_00 INFOROM_DEM_OBJECT_V1_00; + +#endif // _IFR_DEM_H_ diff --git a/src/nvidia/arch/nvalloc/common/inc/inforom/ifrecc.h b/src/nvidia/arch/nvalloc/common/inc/inforom/ifrecc.h new file mode 100644 index 000000000..337537d79 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/inforom/ifrecc.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _IFRECC_H_ +#define _IFRECC_H_ + +#include "nvtypes.h" +#include "inforom/types.h" + +// NVSwitch ECC v6 object definition +#define INFOROM_ECC_OBJECT_V6_S0_PACKED_SIZE 3808 + +//Used to determine if the entry is empty or not +#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_VALID 0:0 +#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_VALID_FALSE 0 +#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_VALID_TRUE 1 +#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_CORRUPT_TIMEDATA 1:1 +#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_CORRUPT_TIMEDATA_FALSE 0 +#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_CORRUPT_TIMEDATA_TRUE 1 +#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_ADDR_VALID 2:2 +#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_ADDR_VALID_FALSE 0 +#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_HEADER_ADDR_VALID_TRUE 1 + +#define NV_INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_LOCATION_LINK_ID 7:0 + +typedef struct INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER +{ + inforom_U008 header; + inforom_U032 errId; + inforom_U032 lastErrorTimestamp; + inforom_U032 averageEventDelta; + inforom_U016 location; + inforom_U016 sublocation; + inforom_U032 correctedCount; + inforom_U032 uncorrectedCount; + inforom_U032 address; + +} INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER; + +#define INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT 128 + +typedef struct INFOROM_ECC_OBJECT_V6_S0 +{ + INFOROM_OBJECT_HEADER_V1_00 header; + + NvU64_ALIGN32 uncorrectedTotal; + NvU64_ALIGN32 correctedTotal; + inforom_U032 lastClearedTimestamp; + + INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER errorEntries[INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_MAX_COUNT]; + + inforom_U008 padding[68]; +} INFOROM_ECC_OBJECT_V6_S0; + +#define INFOROM_ECC_OBJECT_V6_S0_HEADER_FMT INFOROM_OBJECT_HEADER_V1_00_FMT + +#define INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_FMT "b3d2w3d" + +#define INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_ARRAY_FMT \ + INFOROM_FMT_REP128(INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_FMT) + +#define INFOROM_ECC_OBJECT_V6_S0_PADDING_FMT "68b" + +#define INFOROM_ECC_OBJECT_V6_S0_FMT INFOROM_ECC_OBJECT_V6_S0_HEADER_FMT "qqd" \ + INFOROM_ECC_OBJECT_V6_S0_ERROR_COUNTER_ARRAY_FMT \ + INFOROM_ECC_OBJECT_V6_S0_PADDING_FMT + +// Error event structure for NVSwitch ECC errors +typedef struct +{ + NvU32 sxid; + NvU32 linkId; + NvBool bAddressValid; + NvU32 address; + + // The timestamp is filled in by the inforom ECC error logging API + NvU32 timestamp; + + NvBool bUncErr; + NvU32 errorCount; +} INFOROM_NVS_ECC_ERROR_EVENT; + +typedef union +{ + INFOROM_OBJECT_HEADER_V1_00 header; + INFOROM_ECC_OBJECT_V6_S0 v6s; +} INFOROM_ECC_OBJECT; + +typedef struct +{ + const char *pFmt; + NvU8 *pPackedObject; + INFOROM_ECC_OBJECT *pEcc; + + // Signals if there are pending updates to be flushed to InfoROM + NvBool bDirty; +} INFOROM_ECC_STATE, *PINFOROM_ECC_STATE; + +#endif // _IFRECC_H_ diff --git a/src/nvidia/arch/nvalloc/common/inc/inforom/ifrstruct.h b/src/nvidia/arch/nvalloc/common/inc/inforom/ifrstruct.h new file mode 100644 index 000000000..0ea7235f9 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/inforom/ifrstruct.h @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _IFRSTRUCT_H_ +#define _IFRSTRUCT_H_ + +#include "inforom/types.h" + +#include "inforom/ifrecc.h" +#include "inforom/ifrdem.h" + +#define INFOROM_OBD_OBJECT_V1_XX_PACKED_SIZE 128 + +struct INFOROM_OBD_OBJECT_V1_XX +{ + INFOROM_OBJECT_HEADER_V1_00 header; + inforom_U032 buildDate; + inforom_U008 marketingName[24]; + inforom_U008 serialNumber[16]; + inforom_U008 memoryManufacturer; + inforom_U008 memoryPartID[20]; + inforom_U008 memoryDateCode[6]; + inforom_U008 productPartNumber[20]; + inforom_U008 boardRevision[3]; + inforom_U008 boardType; + inforom_U008 board699PartNumber[20]; + inforom_U008 reserved[5]; +}; +#define INFOROM_OBD_OBJECT_V1_XX_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "d116b" +typedef struct INFOROM_OBD_OBJECT_V1_XX INFOROM_OBD_OBJECT_V1_XX; + +// +// OEM 1.0 +// + +#define INFOROM_OEM_OBJECT_V1_00_PACKED_SIZE 512 +#define INFOROM_OEM_OBJECT_HEADER_VERSION 1 + +struct INFOROM_OEM_OBJECT_V1_00 +{ + INFOROM_OBJECT_HEADER_V1_00 header; + inforom_U008 oemInfo[504]; +}; +#define INFOROM_OEM_OBJECT_V1_00_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "504b" +typedef struct INFOROM_OEM_OBJECT_V1_00 INFOROM_OEM_OBJECT_V1_00; + +#define INFOROM_IMG_OBJECT_V1_00_PACKED_SIZE 64 +#define INFOROM_IMG_OBJECT_V1_00_VERSION_LENGTH 16 + +struct INFOROM_IMG_OBJECT_V1_00 +{ + INFOROM_OBJECT_HEADER_V1_00 header; + inforom_U008 version[INFOROM_IMG_OBJECT_V1_00_VERSION_LENGTH]; + inforom_U016 pciDeviceId; + inforom_U016 pciVendorId; + inforom_U016 pciSubsystemId; + inforom_U016 pciSubsystemVendorId; + inforom_U008 reserved[32]; +}; +#define INFOROM_IMG_OBJECT_V1_00_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "16b4w32b" +typedef struct INFOROM_IMG_OBJECT_V1_00 INFOROM_IMG_OBJECT_V1_00; + +#endif // _IFRSTRUCT_H_ diff --git a/src/nvidia/arch/nvalloc/common/inc/inforom/omsdef.h b/src/nvidia/arch/nvalloc/common/inc/inforom/omsdef.h new file mode 100644 index 000000000..921482e1f --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/inforom/omsdef.h @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INFOROM_OMSDEF_H_ +#define _INFOROM_OMSDEF_H_ + +#include "inforom/types.h" + +#define INFOROM_OMS_OBJECT_V1_PACKED_SIZE 112 + +#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_ENTRY_AVAILABLE 0:0 +#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_ENTRY_AVAILABLE_NO 0 +#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_ENTRY_AVAILABLE_YES 1 +#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE 1:1 +#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE_NO 0 +#define INFOROM_OMS_OBJECT_V1_SETTINGS_ENTRY_DATA_FORCE_DEVICE_DISABLE_YES 1 + +#define INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY_DATA_RESERVED 7:2 +#define INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY_DATA_ENTRY_CHECKSUM 15:8 + +typedef struct INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY +{ + inforom_U016 data; +} INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY; + +#define INFOROM_OMS_OBJECT_V1S_NUM_SETTINGS_ENTRIES 50 + +typedef struct INFOROM_OMS_OBJECT_V1S +{ + INFOROM_OBJECT_HEADER_V1_00 header; + inforom_U032 lifetimeRefreshCount; + INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY settings[ + INFOROM_OMS_OBJECT_V1S_NUM_SETTINGS_ENTRIES]; +} INFOROM_OMS_OBJECT_V1S; + +#define INFOROM_OMS_OBJECT_V1S_FMT INFOROM_OBJECT_HEADER_V1_00_FMT "d50w" + +typedef struct INFOROM_OMS_V1S_DATA +{ + INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pIter; + INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY prev; + INFOROM_OMS_OBJECT_V1S_SETTINGS_ENTRY *pNext; +} INFOROM_OMS_V1S_DATA; + +typedef union +{ + INFOROM_OBJECT_HEADER_V1_00 header; + INFOROM_OMS_OBJECT_V1S v1s; +} INFOROM_OMS_OBJECT; + +typedef union +{ + INFOROM_OMS_V1S_DATA v1s; +} INFOROM_OMS_DATA; + +typedef struct +{ + const char *pFmt; + NvU8 *pPackedObject; + INFOROM_OMS_OBJECT *pOms; + INFOROM_OMS_DATA omsData; +} INFOROM_OMS_STATE; + +#endif /* _INFOROM_OMSDEF_H_ */ diff --git a/src/nvidia/arch/nvalloc/common/inc/inforom/types.h b/src/nvidia/arch/nvalloc/common/inc/inforom/types.h new file mode 100644 index 000000000..c062cb0c8 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/inforom/types.h @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INFOROM_TYPES_H_ +#define _INFOROM_TYPES_H_ + +/*! + * @file types.h + * @brief Common types and definitions used by InfoROM objects + */ + +typedef NvS32 inforom_S008; +typedef NvU32 inforom_U004; +typedef NvU32 inforom_U008; +typedef NvU32 inforom_U016; +typedef NvU32 inforom_U024; +typedef NvU32 inforom_U032; +typedef NvU64 inforom_U064; +typedef NvU8 inforom_X008; + +#define INFOROM_FMT_S08 's' +#define INFOROM_FMT_U04 'n' +#define INFOROM_FMT_U08 'b' +#define INFOROM_FMT_U16 'w' +#define INFOROM_FMT_U24 't' +#define INFOROM_FMT_U32 'd' +#define INFOROM_FMT_U64 'q' +#define INFOROM_FMT_BINARY 'x' + +// Helper macros for generating repeating format sequences +#define INFOROM_FMT_REP02(fmt) fmt fmt +#define INFOROM_FMT_REP04(fmt) INFOROM_FMT_REP02(fmt) INFOROM_FMT_REP02(fmt) +#define INFOROM_FMT_REP08(fmt) INFOROM_FMT_REP04(fmt) INFOROM_FMT_REP04(fmt) +#define INFOROM_FMT_REP16(fmt) INFOROM_FMT_REP08(fmt) INFOROM_FMT_REP08(fmt) +#define INFOROM_FMT_REP32(fmt) INFOROM_FMT_REP16(fmt) INFOROM_FMT_REP16(fmt) +#define INFOROM_FMT_REP64(fmt) INFOROM_FMT_REP32(fmt) INFOROM_FMT_REP32(fmt) +#define INFOROM_FMT_REP128(fmt) INFOROM_FMT_REP64(fmt) INFOROM_FMT_REP64(fmt) +#define INFOROM_FMT_REP256(fmt) INFOROM_FMT_REP128(fmt) INFOROM_FMT_REP128(fmt) +#define INFOROM_FMT_REP512(fmt) INFOROM_FMT_REP256(fmt) INFOROM_FMT_REP256(fmt) + +#define INFOROM_OBJECT_SUBVERSION_SUPPORTS_NVSWITCH(sv) \ + ((((sv) & 0xF0) == 0) || (((sv) & 0xF0) == 0x20)) + +#define INFOROM_OBJECT_HEADER_V1_00_SIZE_OFFSET 0x05 +#define INFOROM_OBJECT_HEADER_V1_00_CHECKSUM_OFFSET 0x07 +#define INFOROM_OBJECT_HEADER_V1_00_PACKED_SIZE 8 + +typedef struct INFOROM_OBJECT_HEADER_V1_00 +{ + inforom_S008 type[3]; + inforom_U008 version; + inforom_U008 subversion; + inforom_U016 size; + inforom_U008 checksum; +} INFOROM_OBJECT_HEADER_V1_00; + +#define INFOROM_OBJECT_HEADER_V1_00_FMT "3s2bwb" + +#endif // _INFOROM_TYPES_H_ diff --git a/src/nvidia/arch/nvalloc/common/inc/nvcst.h b/src/nvidia/arch/nvalloc/common/inc/nvcst.h new file mode 100644 index 000000000..60fdc498d --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/nvcst.h @@ -0,0 +1,356 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVCST_H +#define NVCST_H + +#include +#include +#include + +#define CHIPSET_SETUP_FUNC(name) static NV_STATUS name(OBJCL *pCl); + +CHIPSET_SETUP_FUNC(Intel_25XX_setupFunc) +CHIPSET_SETUP_FUNC(Intel_27XX_setupFunc) +CHIPSET_SETUP_FUNC(Intel_2A40_setupFunc) +CHIPSET_SETUP_FUNC(Intel_0040_setupFunc) +CHIPSET_SETUP_FUNC(Intel_2E00_setupFunc) +CHIPSET_SETUP_FUNC(Intel_25E0_setupFunc) +CHIPSET_SETUP_FUNC(Intel_29XX_setupFunc) +CHIPSET_SETUP_FUNC(Intel_29X0_setupFunc) +CHIPSET_SETUP_FUNC(Intel_29E0_setupFunc) +CHIPSET_SETUP_FUNC(Intel_359E_setupFunc) +CHIPSET_SETUP_FUNC(Intel_4000_setupFunc) +CHIPSET_SETUP_FUNC(Intel_4003_setupFunc) +CHIPSET_SETUP_FUNC(Intel_3400_setupFunc) +CHIPSET_SETUP_FUNC(Intel_3B42_setupFunc) +CHIPSET_SETUP_FUNC(Intel_1C46_setupFunc) +CHIPSET_SETUP_FUNC(Intel_1C10_setupFunc) +CHIPSET_SETUP_FUNC(Intel_1C4B_setupFunc) +CHIPSET_SETUP_FUNC(Intel_1C49_setupFunc) +CHIPSET_SETUP_FUNC(Intel_1D40_setupFunc) +CHIPSET_SETUP_FUNC(Intel_8D47_setupFunc) +CHIPSET_SETUP_FUNC(Intel_8D44_setupFunc) +CHIPSET_SETUP_FUNC(Intel_1E10_setupFunc) +CHIPSET_SETUP_FUNC(Intel_8C4B_setupFunc) +CHIPSET_SETUP_FUNC(Intel_8CC4_setupFunc) +CHIPSET_SETUP_FUNC(Intel_A145_setupFunc) +CHIPSET_SETUP_FUNC(Intel_A2C5_setupFunc) +CHIPSET_SETUP_FUNC(Intel_A242_setupFunc) +CHIPSET_SETUP_FUNC(Intel_A2D2_setupFunc) +CHIPSET_SETUP_FUNC(Intel_A2C9_setupFunc) +CHIPSET_SETUP_FUNC(Intel_A301_setupFunc) +CHIPSET_SETUP_FUNC(Intel_0685_setupFunc) +CHIPSET_SETUP_FUNC(Intel_4381_setupFunc) +CHIPSET_SETUP_FUNC(Intel_7A82_setupFunc) +CHIPSET_SETUP_FUNC(SiS_656_setupFunc) +CHIPSET_SETUP_FUNC(ATI_RS400_setupFunc) +CHIPSET_SETUP_FUNC(ATI_RS480_setupFunc) +CHIPSET_SETUP_FUNC(ATI_RD870_setupFunc) +CHIPSET_SETUP_FUNC(ATI_RD890_setupFunc) +CHIPSET_SETUP_FUNC(ATI_RX780_setupFunc) +CHIPSET_SETUP_FUNC(ATI_FX990_setupFunc) +CHIPSET_SETUP_FUNC(AMD_RS780_setupFunc) +CHIPSET_SETUP_FUNC(AMD_FX790_setupFunc) +CHIPSET_SETUP_FUNC(AMD_FX890_setupFunc) +CHIPSET_SETUP_FUNC(AMD_X370_setupFunc) +CHIPSET_SETUP_FUNC(VIA_VX900_setupFunc) +CHIPSET_SETUP_FUNC(APM_Storm_setupFunc) +CHIPSET_SETUP_FUNC(ARMV8_generic_setupFunc) +CHIPSET_SETUP_FUNC(Marvell_ThunderX2_setupFunc) +CHIPSET_SETUP_FUNC(QEMU_setupFunc) +CHIPSET_SETUP_FUNC(Ampere_eMag_setupFunc) +CHIPSET_SETUP_FUNC(Huawei_Kunpeng920_setupFunc) +CHIPSET_SETUP_FUNC(Mellanox_BlueField_setupFunc) +CHIPSET_SETUP_FUNC(Amazon_Gravitron2_setupFunc) +CHIPSET_SETUP_FUNC(Fujitsu_A64FX_setupFunc) +CHIPSET_SETUP_FUNC(Phytium_FT2000_setupFunc) +CHIPSET_SETUP_FUNC(Ampere_Altra_setupFunc) +CHIPSET_SETUP_FUNC(Arm_NeoverseN1_setupFunc) +CHIPSET_SETUP_FUNC(Nvidia_T210_setupFunc) +CHIPSET_SETUP_FUNC(Nvidia_T194_setupFunc) + + +// Keep string length <=32 (including termination) to avoid string copy overflow +CSINFO chipsetInfo[] = +{ + // PCI Express chipset + + {PCI_VENDOR_ID_INTEL, 0x2580, CS_INTEL_2580, "Grantsdale", Intel_25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2584, CS_INTEL_2584, "Alderwood", Intel_25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2588, CS_INTEL_2588, "Intel2588", Intel_25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2590, CS_INTEL_2590, "Alviso", Intel_25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x25C0, CS_INTEL_25E0, "Greencreek", Intel_25E0_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x25E0, CS_INTEL_25E0, "Greencreek", Intel_25E0_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x29B0, CS_INTEL_29X0, "IntelQ35", Intel_29X0_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x29C0, CS_INTEL_29X0, "BearlakeB", Intel_29X0_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x29D0, CS_INTEL_29X0, "IntelQ33", Intel_29X0_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x29E0, CS_INTEL_29E0, "BearlakeX", Intel_29E0_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x359E, CS_INTEL_359E, "Tumwater", Intel_359E_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x4000, CS_INTEL_4000, "Stoakley", Intel_4000_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x4003, CS_INTEL_4003, "SkullTrail", Intel_4003_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x3400, CS_INTEL_3400, "IntelX58", Intel_3400_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x3403, CS_INTEL_3400, "IntelX58", Intel_3400_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x3405, CS_INTEL_3400, "IntelX58", Intel_3400_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x3406, CS_INTEL_3400, "Tylersburg", Intel_3400_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2770, CS_INTEL_2770, "Lakeport", Intel_25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2774, CS_INTEL_2774, "Glenwood", Intel_27XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x277C, CS_INTEL_277C, "Glenwood", Intel_27XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2A40, CS_INTEL_2A40, "Montevina", Intel_2A40_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2E00, CS_INTEL_2E00, "Eaglelake", Intel_2E00_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2E10, CS_INTEL_2E00, "Eaglelake", Intel_2E00_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2E20, CS_INTEL_2E00, "Eaglelake", Intel_2E00_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2E30, CS_INTEL_2E00, "Eaglelake", Intel_2E00_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x0044, CS_INTEL_0040, "Arrandale/Auburndale", Intel_0040_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x0062, CS_INTEL_0040, "Arrandale/Auburndale", Intel_0040_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xD130, CS_INTEL_3B42, "Clarksfield", Intel_3B42_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xD132, CS_INTEL_3B42, "Clarksfield", Intel_3B42_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x3B42, CS_INTEL_3B42, "P55/PM55/H57", Intel_3B42_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x1C46, CS_INTEL_1C46, "IntelP67-CougarPoint", Intel_1C46_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x1C4B, CS_INTEL_1C46, "HuronRiver-HM67", Intel_1C4B_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x1C4F, CS_INTEL_1C46, "HuronRiver-QM67", Intel_1C4B_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x1C49, CS_INTEL_1C49, "HuronRiver-HM65", Intel_1C49_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x1C44, CS_INTEL_1C46, "IntelZ68", Intel_1C46_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x1C10, CS_INTEL_1C10, "IntelP67", Intel_1C10_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x1D40, CS_INTEL_1D40, "IntelX79", Intel_1D40_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x1D41, CS_INTEL_1D40, "IntelX79", Intel_1D40_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x1E10, CS_INTEL_1E10, "IntelZ75", Intel_1E10_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x0150, CS_INTEL_1E10, "IntelZ77A-GD55", Intel_1E10_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x0151, CS_INTEL_1E10, "IntelZ77A-GD55", Intel_1E10_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x0100, CS_INTEL_1E10, "IntelZ77A-GD55", Intel_1E10_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x8C4B, CS_INTEL_8C4B, "SharkBay-HM87", Intel_8C4B_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x8C44, CS_INTEL_8C4B, "SharkBay-Z87", Intel_8C4B_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x8C41, CS_INTEL_8C4B, "SharkBay-H8x/P8x", Intel_8C4B_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x8C49, CS_INTEL_8C4B, "SharkBay-HM86", Intel_8C4B_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x8C52, CS_INTEL_8C4B, "SharkBay-E3", Intel_8C4B_setupFunc}, // Does not support SLI + {PCI_VENDOR_ID_INTEL, 0x8CC4, CS_INTEL_8CC4, "IntelZ97", Intel_8CC4_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x8CC3, CS_INTEL_8CC4, "IntelHM97", Intel_8CC4_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA145, CS_INTEL_A145, "IntelZ170", Intel_A145_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA14E, CS_INTEL_A145, "IntelHM170", Intel_A145_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA150, CS_INTEL_A145, "IntelHM170", Intel_A145_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA149, CS_INTEL_A145, "SkyLake C236", Intel_A145_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA14A, CS_INTEL_A145, "SkyLake C232", Intel_A145_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA14D, CS_INTEL_A145, "SkyLake-H", Intel_A145_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA244, CS_INTEL_A145, "SkyLake C620", Intel_A145_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x8D47, CS_INTEL_8D47, "IntelX99", Intel_8D47_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x8D44, CS_INTEL_8D47, "IntelC612", Intel_8D44_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA2C5, CS_INTEL_A2C5, "IntelZ270", Intel_A2C5_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA154, CS_INTEL_A2C5, "IntelZ270", Intel_A2C5_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA152, CS_INTEL_A2C5, "IntelRX9S", Intel_A2C5_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA242, CS_INTEL_A242, "IntelC422", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA241, CS_INTEL_A242, "IntelC422", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA243, CS_INTEL_A242, "IntelC422", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA2D2, CS_INTEL_A2D2, "IntelX299", Intel_A2D2_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA2D3, CS_INTEL_A242, "IntelC422", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA1C1, CS_INTEL_A242, "IntelC621", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA1C2, CS_INTEL_A242, "IntelC622", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA1C3, CS_INTEL_A242, "IntelC624", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA1C4, CS_INTEL_A242, "IntelC625", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA1C5, CS_INTEL_A242, "IntelC626", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA1C6, CS_INTEL_A242, "IntelC627", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA1C7, CS_INTEL_A242, "IntelC628", Intel_A242_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA2C9, CS_INTEL_A2C9, "IntelZ370", Intel_A2C9_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA310, CS_INTEL_A2C9, "IntelZ370", Intel_A2C9_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA30E, CS_INTEL_A2C9, "IntelZ370", Intel_A2C9_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA305, CS_INTEL_A2C9, "IntelZ390", Intel_A2C9_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA30D, CS_INTEL_A2C9, "IntelH370", Intel_A2C9_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA301, CS_INTEL_A301, "Intel-CannonLake", Intel_A301_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x0685, CS_INTEL_0685, "Intel-CometLake", Intel_0685_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0xA1CB, CS_INTEL_C620, "Intel-IceLake", NULL}, + {PCI_VENDOR_ID_INTEL, 0x4381, CS_INTEL_4381, "Intel-RocketLake", Intel_4381_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x4385, CS_INTEL_4381, "Intel-RocketLake", Intel_4381_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x7A82, CS_INTEL_7A82, "Intel-AlderLake", Intel_7A82_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x7A84, CS_INTEL_7A82, "Intel-AlderLake", Intel_7A82_setupFunc}, + + {PCI_VENDOR_ID_NVIDIA, 0x0FAE, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc}, + {PCI_VENDOR_ID_NVIDIA, 0x0FAF, CS_NVIDIA_T210, "T210", Nvidia_T210_setupFunc}, + {PCI_VENDOR_ID_NVIDIA, 0x10E5, CS_NVIDIA_T186, "T186", Nvidia_T210_setupFunc}, + {PCI_VENDOR_ID_NVIDIA, 0x10E6, CS_NVIDIA_T186, "T186", Nvidia_T210_setupFunc}, + {PCI_VENDOR_ID_NVIDIA, 0x1AD0, CS_NVIDIA_T194, "T194", Nvidia_T194_setupFunc}, + {PCI_VENDOR_ID_NVIDIA, 0x1AD1, CS_NVIDIA_T194, "T194", Nvidia_T194_setupFunc}, + {PCI_VENDOR_ID_NVIDIA, 0x1AD2, CS_NVIDIA_T194, "T194", Nvidia_T194_setupFunc}, + {PCI_VENDOR_ID_NVIDIA, 0x229A, CS_NVIDIA_T234, "T234", Nvidia_T194_setupFunc}, + {PCI_VENDOR_ID_NVIDIA, 0x229C, CS_NVIDIA_T234, "T234", Nvidia_T194_setupFunc}, + {PCI_VENDOR_ID_NVIDIA, 0x229E, CS_NVIDIA_T234, "T234", Nvidia_T194_setupFunc}, + + + {PCI_VENDOR_ID_SIS, 0x0649, CS_SIS_649, "649", SiS_656_setupFunc}, + {PCI_VENDOR_ID_SIS, 0x0656, CS_SIS_656, "656", SiS_656_setupFunc}, + + {PCI_VENDOR_ID_ATI, 0x5A31, CS_ATI_RS400, "RS400", ATI_RS400_setupFunc}, + {PCI_VENDOR_ID_ATI, 0x5A33, CS_ATI_RS400, "RS400", ATI_RS400_setupFunc}, + {PCI_VENDOR_ID_ATI, 0x5950, CS_ATI_RS480, "RS480", ATI_RS480_setupFunc}, + {PCI_VENDOR_ID_ATI, 0x5951, CS_ATI_RS480, "RS480", ATI_RS480_setupFunc}, + {PCI_VENDOR_ID_ATI, 0x5956, CS_ATI_FX790, "FX790" ,AMD_FX790_setupFunc}, + {PCI_VENDOR_ID_ATI, 0x5A11, CS_ATI_FX890, "FX890" ,AMD_FX890_setupFunc}, + {PCI_VENDOR_ID_ATI, 0x5a13, CS_ATI_RD850, "RD850" ,ATI_RD870_setupFunc}, + {PCI_VENDOR_ID_ATI, 0x5a12, CS_ATI_RD870, "RD870" ,ATI_RD870_setupFunc}, + {PCI_VENDOR_ID_ATI, 0x5a10, CS_ATI_RD890, "RD890" ,ATI_RD890_setupFunc}, + {PCI_VENDOR_ID_ATI, 0x5957, CS_ATI_RX780, "RX780" ,ATI_RX780_setupFunc}, + {PCI_VENDOR_ID_ATI, 0x5A14, CS_ATI_FX990, "FX990/X990/970",ATI_FX990_setupFunc}, + + {PCI_VENDOR_ID_AMD, 0x9601, CS_AMD_GX890, "GX890" ,AMD_FX890_setupFunc}, + {PCI_VENDOR_ID_AMD, 0x9600, CS_AMD_RS780, "RS780" ,AMD_RS780_setupFunc}, + {PCI_VENDOR_ID_AMD, 0x790e, CS_AMD_X370, "X370/X399/X470/ TRX40/X570/WRX80", AMD_X370_setupFunc }, + + {PCI_VENDOR_ID_VIA, 0x0308, CS_VIA_VT8369B, "VT8369B", NULL}, + {PCI_VENDOR_ID_VIA, 0x0410, CS_VIA_VX900, "VX900", VIA_VX900_setupFunc}, + {PCI_VENDOR_ID_APM, 0xe004, CS_APM_STORM, "X-Gene Storm", APM_Storm_setupFunc}, + {PCI_VENDOR_ID_IBM, 0x03DC, CS_IBM_VENICE, "Venice", NULL}, + {PCI_VENDOR_ID_MARVELL, 0xAF00, CS_MARVELL_THUNDERX2, "Marvell ThunderX2", Marvell_ThunderX2_setupFunc}, + {PCI_VENDOR_ID_REDHAT, 0x0008, CS_REDHAT_QEMU, "QEMU Redhat", QEMU_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE005, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE006, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE007, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE008, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE009, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE00A, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE00B, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE00C, CS_AMPERE_EMAG, "AMPERE eMag", Ampere_eMag_setupFunc}, + {PCI_VENDOR_ID_HUAWEI, 0xA120, CS_HUAWEI_KUNPENG920, "Huawei Kunpeng920", Huawei_Kunpeng920_setupFunc}, + {PCI_VENDOR_ID_MELLANOX, 0xA2D0, CS_MELLANOX_BLUEFIELD, "Mellanox BlueField", Mellanox_BlueField_setupFunc}, + {PCI_VENDOR_ID_MELLANOX, 0xA2D4, CS_MELLANOX_BLUEFIELD2, "Mellanox BlueField 2", NULL}, + {PCI_VENDOR_ID_MELLANOX, 0xA2D5, CS_MELLANOX_BLUEFIELD2, "Mellanox BlueField 2 Crypto disabled", NULL}, + {PCI_VENDOR_ID_AMAZON, 0x0200, CS_AMAZON_GRAVITRON2, "Amazon Gravitron2", Amazon_Gravitron2_setupFunc}, + {PCI_VENDOR_ID_FUJITSU, 0x1952, CS_FUJITSU_A64FX, "Fujitsu A64FX", Fujitsu_A64FX_setupFunc}, + {PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc}, + {PCI_VENDOR_ID_CADENCE, 0xDC08, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc}, + {PCI_VENDOR_ID_CADENCE, 0xDC16, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc}, + {PCI_VENDOR_ID_CADENCE, 0xFC01, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc}, + {PCI_VENDOR_ID_CADENCE, 0xFC08, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc}, + {PCI_VENDOR_ID_CADENCE, 0xFC16, CS_PHYTIUM_FT2000, "Phytium FT2000", Phytium_FT2000_setupFunc}, + {PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_S2500, "Phytium S2500", NULL}, + {PCI_VENDOR_ID_CADENCE, 0xDC08, CS_PHYTIUM_S2500, "Phytium S2500", NULL}, + {PCI_VENDOR_ID_CADENCE, 0xDC16, CS_PHYTIUM_S2500, "Phytium S2500", NULL}, + {PCI_VENDOR_ID_CADENCE, 0xFC01, CS_PHYTIUM_S2500, "Phytium S2500", NULL}, + {PCI_VENDOR_ID_CADENCE, 0xFC08, CS_PHYTIUM_S2500, "Phytium S2500", NULL}, + {PCI_VENDOR_ID_CADENCE, 0xFC16, CS_PHYTIUM_S2500, "Phytium S2500", NULL}, + {PCI_VENDOR_ID_AMPERE, 0xE000, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE00D, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE00E, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE010, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE100, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc}, + {PCI_VENDOR_ID_AMPERE, 0xE110, CS_AMPERE_ALTRA, "Ampere Altra", Ampere_Altra_setupFunc}, + {PCI_VENDOR_ID_ARM, 0x0100, CS_ARM_NEOVERSEN1, "Arm Neoverse N1", Arm_NeoverseN1_setupFunc}, + {PCI_VENDOR_ID_HYGON, 0x790E, CS_HYGON_C86, "Hygon-C86-7151", NULL}, + {PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN96XX, "Marvell Octeon CN96xx", ARMV8_generic_setupFunc}, + {PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN98XX, "Marvell Octeon CN98xx", ARMV8_generic_setupFunc}, + +/////////////////////////////////////////////////////////////////////////////////////////////////// + + // last element must have chipset CS_UNKNOWN (zero) + {0, 0, CS_UNKNOWN, "Unknown", NULL} +}; + + +VENDORNAME vendorName[] = +{ + {PCI_VENDOR_ID_NVIDIA, "NVIDIA"}, + {PCI_VENDOR_ID_INTEL, "Intel"}, + {PCI_VENDOR_ID_VIA, "VIA"}, + {PCI_VENDOR_ID_RCC, "ServerWorks"}, + {PCI_VENDOR_ID_MICRON_1, "Micron"}, + {PCI_VENDOR_ID_MICRON_2, "Micron"}, + {PCI_VENDOR_ID_APPLE, "Apple"}, + {PCI_VENDOR_ID_SIS, "SiS"}, + {PCI_VENDOR_ID_ATI, "ATI"}, + {PCI_VENDOR_ID_TRANSMETA, "Transmeta"}, + {PCI_VENDOR_ID_HP, "HP"}, + {PCI_VENDOR_ID_AMD, "AMD"}, + {PCI_VENDOR_ID_ALI, "ALi"}, + {PCI_VENDOR_ID_APM, "AppliedMicro"}, + {PCI_VENDOR_ID_IBM, "IBM"}, + {PCI_VENDOR_ID_MARVELL, "MarvellThunderX2"}, + {PCI_VENDOR_ID_REDHAT, "QemuRedhat"}, + {PCI_VENDOR_ID_AMPERE, "AmpereComputing"}, + {PCI_VENDOR_ID_HUAWEI, "Huawei"}, + {PCI_VENDOR_ID_MELLANOX, "Mellanox"}, + {PCI_VENDOR_ID_AMAZON, "Amazon"}, + {PCI_VENDOR_ID_FUJITSU, "Fujitsu"}, + {PCI_VENDOR_ID_CADENCE, "Cadence"}, + {PCI_VENDOR_ID_ARM, "ARM"}, + {0, "Unknown"} // Indicates end of the table +}; + + +// +// Allowlist all chipsets with which dGPU over PCIe is supported on ARM +// (both v7 and v8) platforms +// +ARMCSALLOWLISTINFO armChipsetAllowListInfo[] = +{ + {PCI_VENDOR_ID_NVIDIA, 0x0FAE, CS_NVIDIA_T210}, // NVIDIA Tegra X1 RP0 + {PCI_VENDOR_ID_NVIDIA, 0x0FAF, CS_NVIDIA_T210}, // NVIDIA Tegra X1 RP1 + {PCI_VENDOR_ID_NVIDIA, 0x10E5, CS_NVIDIA_T186}, // NVIDIA Tegra P1 RP0 + {PCI_VENDOR_ID_NVIDIA, 0x10E6, CS_NVIDIA_T186}, // NVIDIA Tegra P1 RP1 + {PCI_VENDOR_ID_NVIDIA, 0x1AD0, CS_NVIDIA_T194}, // NVIDIA Tegra V1 RP0 + {PCI_VENDOR_ID_NVIDIA, 0x1AD1, CS_NVIDIA_T194}, // NVIDIA Tegra V1 RP1 + {PCI_VENDOR_ID_NVIDIA, 0x1AD2, CS_NVIDIA_T194}, // NVIDIA Tegra V1 RP2 + {PCI_VENDOR_ID_NVIDIA, 0x229A, CS_NVIDIA_T234}, // NVIDIA Tegra Orin RP0 + {PCI_VENDOR_ID_NVIDIA, 0x229C, CS_NVIDIA_T234}, // NVIDIA Tegra Orin RP1 + {PCI_VENDOR_ID_NVIDIA, 0x229E, CS_NVIDIA_T234}, // NVIDIA Tegra Orin RP2 + + {PCI_VENDOR_ID_APM, 0xe004, CS_APM_STORM}, // Applied Micro X-Gene "Storm" + {PCI_VENDOR_ID_MARVELL, 0xAF00, CS_MARVELL_THUNDERX2}, // Marvell ThunderX2 + {PCI_VENDOR_ID_REDHAT, 0x0008, CS_REDHAT_QEMU}, // Redhat QEMU + {PCI_VENDOR_ID_AMPERE, 0xE005, CS_AMPERE_EMAG}, // Ampere eMag + {PCI_VENDOR_ID_AMPERE, 0xE006, CS_AMPERE_EMAG}, // Ampere eMag + {PCI_VENDOR_ID_AMPERE, 0xE007, CS_AMPERE_EMAG}, // Ampere eMag + {PCI_VENDOR_ID_AMPERE, 0xE008, CS_AMPERE_EMAG}, // Ampere eMag + {PCI_VENDOR_ID_AMPERE, 0xE009, CS_AMPERE_EMAG}, // Ampere eMag + {PCI_VENDOR_ID_AMPERE, 0xE00A, CS_AMPERE_EMAG}, // Ampere eMag + {PCI_VENDOR_ID_AMPERE, 0xE00B, CS_AMPERE_EMAG}, // Ampere eMag + {PCI_VENDOR_ID_AMPERE, 0xE00C, CS_AMPERE_EMAG}, // Ampere eMag + {PCI_VENDOR_ID_HUAWEI, 0xA120, CS_HUAWEI_KUNPENG920}, // Huawei Kunpeng 920 + {PCI_VENDOR_ID_MELLANOX, 0xA2D0, CS_MELLANOX_BLUEFIELD}, // Mellanox BlueField + {PCI_VENDOR_ID_MELLANOX, 0xA2D4, CS_MELLANOX_BLUEFIELD2},// Mellanox BlueField 2 + {PCI_VENDOR_ID_MELLANOX, 0xA2D5, CS_MELLANOX_BLUEFIELD2},// Mellanox BlueField 2 Crypto disabled + {PCI_VENDOR_ID_AMAZON, 0x0200, CS_AMAZON_GRAVITRON2}, // Amazon Gravitron2 + {PCI_VENDOR_ID_FUJITSU, 0x1952, CS_FUJITSU_A64FX}, // Fujitsu A64FX + {PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_FT2000}, // Phytium FT2000 + {PCI_VENDOR_ID_CADENCE, 0xDC08, CS_PHYTIUM_FT2000}, // Phytium FT2000 + {PCI_VENDOR_ID_CADENCE, 0xDC16, CS_PHYTIUM_FT2000}, // Phytium FT2000 + {PCI_VENDOR_ID_CADENCE, 0xFC01, CS_PHYTIUM_FT2000}, // Phytium FT2000 + {PCI_VENDOR_ID_CADENCE, 0xFC08, CS_PHYTIUM_FT2000}, // Phytium FT2000 + {PCI_VENDOR_ID_CADENCE, 0xFC16, CS_PHYTIUM_FT2000}, // Phytium FT2000 + {PCI_VENDOR_ID_CADENCE, 0xDC01, CS_PHYTIUM_S2500}, // Phytium S2500 + {PCI_VENDOR_ID_CADENCE, 0xDC08, CS_PHYTIUM_S2500}, // Phytium S2500 + {PCI_VENDOR_ID_CADENCE, 0xDC16, CS_PHYTIUM_S2500}, // Phytium S2500 + {PCI_VENDOR_ID_CADENCE, 0xFC01, CS_PHYTIUM_S2500}, // Phytium S2500 + {PCI_VENDOR_ID_CADENCE, 0xFC08, CS_PHYTIUM_S2500}, // Phytium S2500 + {PCI_VENDOR_ID_CADENCE, 0xDC16, CS_PHYTIUM_S2500}, // Phytium S2500 + {PCI_VENDOR_ID_AMPERE, 0xE000, CS_AMPERE_ALTRA}, // Ampere Altra + {PCI_VENDOR_ID_AMPERE, 0xE00D, CS_AMPERE_ALTRA}, // Ampere Altra + {PCI_VENDOR_ID_AMPERE, 0xE00E, CS_AMPERE_ALTRA}, // Ampere Altra + {PCI_VENDOR_ID_AMPERE, 0xE010, CS_AMPERE_ALTRA}, // Ampere Altra + {PCI_VENDOR_ID_AMPERE, 0xE100, CS_AMPERE_ALTRA}, // Ampere Altra + {PCI_VENDOR_ID_AMPERE, 0xE110, CS_AMPERE_ALTRA}, // Ampere Altra + {PCI_VENDOR_ID_ARM, 0x0100, CS_ARM_NEOVERSEN1}, // Arm Neoverse N1 + {PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN96XX}, // Marvell OCTEON CN96xx + {PCI_VENDOR_ID_MARVELL, 0xA02D, CS_MARVELL_OCTEON_CN98XX}, // Marvell OCTEON CN98xx + + // last element must have chipset CS_UNKNOWN (zero) + {0, 0, CS_UNKNOWN} +}; + +#endif /* NVCST_H */ diff --git a/src/nvidia/arch/nvalloc/common/inc/nvdevid.h b/src/nvidia/arch/nvalloc/common/inc/nvdevid.h new file mode 100644 index 000000000..df372ac70 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/nvdevid.h @@ -0,0 +1,693 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 200-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVDEVID_H +#define NVDEVID_H + + + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Private device ids defines - only defines ! * +* * +\***************************************************************************/ + +/////////////////////////////////////////////////////////////////////////////////////////// +// +// VENDOR/SUBVENDOR IDS +// XXX Cleanup to do: change PCI_VENDOR_* to NV_PCI_SUBID_VENDOR_* +// +/////////////////////////////////////////////////////////////////////////////////////////// +#define NV_PCI_SUBID_VENDOR 15:0 /* RW--F */ +#define NV_PCI_SUBID_VENDOR_AMD 0x1022 +#define NV_PCI_SUBID_VENDOR_ALI 0x10B9 +#define NV_PCI_SUBID_VENDOR_NVIDIA 0x10DE +#define NV_PCI_SUBID_VENDOR_INTEL 0x8086 +#define NV_PCI_SUBID_VENDOR_VIA 0x1106 +#define NV_PCI_SUBID_VENDOR_RCC 0x1166 +#define NV_PCI_SUBID_VENDOR_MICRON_1 0x1042 +#define NV_PCI_SUBID_VENDOR_MICRON_2 0x1344 +#define NV_PCI_SUBID_VENDOR_APPLE 0x106B +#define NV_PCI_SUBID_VENDOR_SIS 0x1039 +#define NV_PCI_SUBID_VENDOR_ATI 0x1002 +#define NV_PCI_SUBID_VENDOR_TRANSMETA 0x1279 +#define NV_PCI_SUBID_VENDOR_HP 0x103C +#define NV_PCI_SUBID_VENDOR_DELL 0x1028 +#define NV_PCI_SUBID_VENDOR_FUJITSU 0x10cf +#define NV_PCI_SUBID_VENDOR_ASUS 0x1043 +#define NV_PCI_SUBID_VENDOR_MSI 0x1462 +#define NV_PCI_SUBID_VENDOR_FOXCONN 0x105B +#define NV_PCI_SUBID_VENDOR_ECS 0x1019 +#define NV_PCI_SUBID_VENDOR_DFI_1 0x106E +#define NV_PCI_SUBID_VENDOR_TOSHIBA 0x1179 +#define NV_PCI_SUBID_VENDOR_DFI_2 0x15BD +#define NV_PCI_SUBID_VENDOR_ACER 0x1025 +#define NV_PCI_SUBID_VENDOR_GIGABYTE 0x1458 +#define NV_PCI_SUBID_VENDOR_EVGA 0x3842 +#define NV_PCI_SUBID_VENDOR_BROADCOM 0x1166 +#define NV_PCI_SUBID_VENDOR_SUPERMICRO 0x15D9 +#define NV_PCI_SUBID_VENDOR_BIOSTAR 0x1565 +#define NV_PCI_SUBID_VENDOR_XFX 0x1682 +#define NV_PCI_SUBID_VENDOR_PCPARTNER 0x19DA +#define NV_PCI_SUBID_VENDOR_LENOVO 0x17AA +#define NV_PCI_SUBID_VENDOR_FSC 0x1734 +#define NV_PCI_SUBID_VENDOR_FTS 0x1734 +#define NV_PCI_SUBID_VENDOR_COLORFUL 0x7377 +#define NV_PCI_SUBID_VENDOR_ASROCK 0x1849 +#define NV_PCI_SUBID_VENDOR_SHUTTLE 0x1297 +#define NV_PCI_SUBID_VENDOR_CLEVO 0x1558 +#define NV_PCI_SUBID_VENDOR_PEGATRON 0x1B0A +#define NV_PCI_SUBID_VENDOR_JETWAY 0x16F3 +#define NV_PCI_SUBID_VENDOR_HIGHGRADE 0x1C6C +#define NV_PCI_SUBID_VENDOR_GALAXY 0x1B4C +#define NV_PCI_SUBID_VENDOR_ZOTAC 0x19DA +#define NV_PCI_SUBID_VENDOR_ARIMA 0x161F +#define NV_PCI_SUBID_VENDOR_BFG 0x19F1 +#define NV_PCI_SUBID_VENDOR_SONY 0x104D +#define NV_PCI_SUBID_VENDOR_BITLAND 0x1642 +#define NV_PCI_SUBID_VENDOR_PC_PARTNER 0x174B +#define NV_PCI_SUBID_VENDOR_NZXT 0x1D96 + +// XXX CKEANUP TO REMOVE IN FAVOR OF NV_PCI_SUBID_VENDOR_* +#define PCI_VENDOR_ID_AMD 0x1022 +#define PCI_VENDOR_ID_ALI 0x10B9 +#define PCI_VENDOR_ID_NVIDIA 0x10DE +#define PCI_VENDOR_ID_INTEL 0x8086 +#define PCI_VENDOR_ID_VIA 0x1106 +#define PCI_VENDOR_ID_RCC 0x1166 +#define PCI_VENDOR_ID_MICRON_1 0x1042 +#define PCI_VENDOR_ID_MICRON_2 0x1344 +#define PCI_VENDOR_ID_APPLE 0x106B +#define PCI_VENDOR_ID_SIS 0x1039 +#define PCI_VENDOR_ID_ATI 0x1002 +#define PCI_VENDOR_ID_TRANSMETA 0x1279 +#define PCI_VENDOR_ID_HP 0x103C +#define PCI_VENDOR_ID_DELL 0x1028 +#define PCI_VENDOR_ID_FUJITSU 0x10cf +#define PCI_VENDOR_ID_ASUS 0x1043 +#define PCI_VENDOR_ID_MSI 0x1462 +#define PCI_VENDOR_ID_FOXCONN 0x105B +#define PCI_VENDOR_ID_ECS 0x1019 +#define PCI_VENDOR_ID_DFI_1 0x106E +#define PCI_VENDOR_ID_TOSHIBA 0x1179 +#define PCI_VENDOR_ID_DFI_2 0x15BD +#define PCI_VENDOR_ID_ACER 0x1025 +#define PCI_VENDOR_ID_GIGABYTE 0x1458 +#define PCI_VENDOR_ID_EVGA 0x3842 +#define PCI_VENDOR_ID_BROADCOM 0x1166 +#define PCI_VENDOR_ID_SUPERMICRO 0x15D9 +#define PCI_VENDOR_ID_BIOSTAR 0x1565 +#define PCI_VENDOR_ID_XFX 0x1682 +#define PCI_VENDOR_ID_PCPARTNER 0x19DA +#define PCI_VENDOR_ID_LENOVO 0x17AA +#define PCI_VENDOR_ID_FSC 0x1734 +#define PCI_VENDOR_ID_FTS 0x1734 +#define PCI_VENDOR_ID_COLORFUL 0x7377 +#define PCI_VENDOR_ID_ASROCK 0x1849 +#define PCI_VENDOR_ID_SHUTTLE 0x1297 +#define PCI_VENDOR_ID_CLEVO 0x1558 +#define PCI_VENDOR_ID_PEGATRON 0x1B0A +#define PCI_VENDOR_ID_JETWAY 0x16F3 +#define PCI_VENDOR_ID_HIGHGRADE 0x1C6C +#define PCI_VENDOR_ID_GALAXY 0x1B4C +#define PCI_VENDOR_ID_ZOTAC 0x19DA +#define PCI_VENDOR_ID_ARIMA 0x161F +#define PCI_VENDOR_ID_PC_PARTNER 0x174B +#define PCI_VENDOR_ID_APM 0x10E8 +#define PCI_VENDOR_ID_IBM 0x1014 +#define PCI_VENDOR_ID_NZXT 0x1D96 +#define PCI_VENDOR_ID_MARVELL 0x177D +#define PCI_VENDOR_ID_REDHAT 0x1B36 +#define PCI_VENDOR_ID_AMPERE 0x1DEF +#define PCI_VENDOR_ID_HUAWEI 0x19E5 +#define PCI_VENDOR_ID_MELLANOX 0x15B3 +#define PCI_VENDOR_ID_AMAZON 0x1D0F +#define PCI_VENDOR_ID_CADENCE 0x17CD +#define PCI_VENDOR_ID_ARM 0x13B5 +#define PCI_VENDOR_ID_HYGON 0x1D94 + +#define NV_PCI_DEVID_DEVICE 31:16 /* RW--F */ +#define NV_PCI_SUBID_DEVICE 31:16 /* RW--F */ + +/////////////////////////////////////////////////////////////////////////////////////////// +// +// GPU DEVICE IDS +// +/////////////////////////////////////////////////////////////////////////////////////////// + +#define NV_PCI_DEVID_DEVICE_PG171_SKU200_PG179_SKU220 0x25B6 /* NVIDIA A16 / NVIDIA A2 */ + +/////////////////////////////////////////////////////////////////////////////////////////// +// +// SUBDEVICE IDs +// +/////////////////////////////////////////////////////////////////////////////////////////// + +// A16 +#define NV_PCI_SUBID_DEVICE_PG171_SKU200 0x14A9 + +/////////////////////////////////////////////////////////////////////////////////////////// +// +// CHIPSET IDs +// +/////////////////////////////////////////////////////////////////////////////////////////// +// Desktop flavor of X58 +#define X58_DESKTOP_DEVIDS 0x3400, 0x3405 +// Mobile version of X58 +#define X58_MOBILE_DEVID 0x3405 +#define X58_MOBILE_CLEVO_7200_SSDEVID 0x7200 + +// Sandy bridge CLEVO platform +#define SANDYBRIDGE_P180HM_SSDEVID 0x8000 +#define SandyBridge_E_X79_P270WM_SSDEVID 0x270 +#define IvyBridge_Z75_P370EM_SSDEVID 0x371 + +// Device ID's of Devices present on Patsburg's PCIE bus. +#define PATSBURG_PCIE_DEVICE_MIN_DEVID 0x1D10 +#define PATSBURG_PCIE_DEVICE_MAX_DEVID 0x1D1F +#define PATSBURG_PCIE_DEVICE_DEVID 0x244E + +//Tylersburg Congurations +#define TYLERSBURG_DEVID 0x3406 + +// Intel Grantsdale definitions +#define DEVICE_ID_INTEL_2580_HOST_BRIDGE 0x2580 +#define DEVICE_ID_INTEL_2581_ROOT_PORT 0x2581 + +// Intel Alderwood definitions +#define DEVICE_ID_INTEL_2584_HOST_BRIDGE 0x2584 +#define DEVICE_ID_INTEL_2585_ROOT_PORT 0x2585 + +// Intel Alviso definitions +#define DEVICE_ID_INTEL_2590_HOST_BRIDGE 0x2590 +#define DEVICE_ID_INTEL_2591_ROOT_PORT 0x2591 + +// Intel Tumwater definitions +#define DEVICE_ID_INTEL_359E_HOST_BRIDGE 0x359E +#define DEVICE_ID_INTEL_3597_ROOT_PORT 0x3597 + +// Intel Stoakley definitions +#define INTEL_4000_SUBDEVICE_ID 0x021D + +// Intel SkullTrail definitions +#define INTEL_4003_SUBDEVICE_ID 0x5358 + +// Intel Core I7 CPU +#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I7 0x2C01 + +// Intel Core I5 CPU Lynnfield +#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_L 0x2C81 +#define INTEL_LYNNFIELD_ROOTPORT_CPU1 0xD138 +#define INTEL_LYNNFIELD_ROOTPORT_CPU2 0xD13A + +// Intel Core I5 CPU Auburndale +#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_A 0x2D41 + +// Intel Core I5 CPU 650 +#define INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_6 0x2D01 + +// Intel Poulsbo definitions +#define DEVICE_ID_INTEL_8100_HOST_BRIDGE 0x8100 +#define DEVICE_ID_INTEL_8110_ROOT_PORT 0x8110 +#define DEVICE_ID_INTEL_8112_ROOT_PORT 0x8112 + +// Intel TunnelCreek definitions +#define DEVICE_ID_INTEL_8180_ROOT_PORT 0x8180 +#define DEVICE_ID_INTEL_8181_ROOT_PORT 0x8181 +#define DEVICE_ID_INTEL_8184_ROOT_PORT 0x8184 +#define DEVICE_ID_INTEL_8185_ROOT_PORT 0x8185 + +// Intel I/O Hub definitions +#define DEVICE_ID_INTEL_3408_ROOT_PORT 0x3408 +#define DEVICE_ID_INTEL_3411_ROOT_PORT 0x3411 +#define DEVICE_ID_INTEL_3420_ROOT_PORT 0x3420 +#define DEVICE_ID_INTEL_3421_ROOT_PORT 0x3421 + +// Intel SandyBridge IIO definitions +#define DEVICE_ID_INTEL_3C02_ROOT_PORT 0x3c02 +#define DEVICE_ID_INTEL_3C03_ROOT_PORT 0x3c03 +#define DEVICE_ID_INTEL_3C04_ROOT_PORT 0x3c04 +#define DEVICE_ID_INTEL_3C05_ROOT_PORT 0x3c05 +#define DEVICE_ID_INTEL_3C06_ROOT_PORT 0x3c06 +#define DEVICE_ID_INTEL_3C07_ROOT_PORT 0x3c07 +#define DEVICE_ID_INTEL_3C08_ROOT_PORT 0x3c08 +#define DEVICE_ID_INTEL_3C09_ROOT_PORT 0x3c09 +#define DEVICE_ID_INTEL_3C0A_ROOT_PORT 0x3c0a +#define DEVICE_ID_INTEL_3C0B_ROOT_PORT 0x3c0b + +// Intel Haswell-E definitions +#define DEVICE_ID_INTEL_2F00_HOST_BRIDGE 0x2f00 +#define DEVICE_ID_INTEL_2F01_ROOT_PORT 0x2f01 +#define DEVICE_ID_INTEL_2F02_ROOT_PORT 0x2f02 +#define DEVICE_ID_INTEL_2F03_ROOT_PORT 0x2f03 +#define DEVICE_ID_INTEL_2F04_ROOT_PORT 0x2f04 +#define DEVICE_ID_INTEL_2F05_ROOT_PORT 0x2f05 +#define DEVICE_ID_INTEL_2F06_ROOT_PORT 0x2f06 +#define DEVICE_ID_INTEL_2F07_ROOT_PORT 0x2f07 +#define DEVICE_ID_INTEL_2F08_ROOT_PORT 0x2f08 +#define DEVICE_ID_INTEL_2F09_ROOT_PORT 0x2f09 +#define DEVICE_ID_INTEL_2F0A_ROOT_PORT 0x2f0a +#define DEVICE_ID_INTEL_2F0B_ROOT_PORT 0x2f0b + +#define DEVICE_ID_INTEL_0C01_ROOT_PORT 0x0c01 + +// Intel IvyTown definitions + +#define DEVICE_ID_INTEL_0E02_ROOT_PORT 0x0e02 +#define DEVICE_ID_INTEL_0E03_ROOT_PORT 0x0e03 +#define DEVICE_ID_INTEL_0E04_ROOT_PORT 0x0e04 +#define DEVICE_ID_INTEL_0E05_ROOT_PORT 0x0e05 +#define DEVICE_ID_INTEL_0E06_ROOT_PORT 0x0e06 +#define DEVICE_ID_INTEL_0E07_ROOT_PORT 0x0e07 +#define DEVICE_ID_INTEL_0E08_ROOT_PORT 0x0e08 +#define DEVICE_ID_INTEL_0E09_ROOT_PORT 0x0e09 +#define DEVICE_ID_INTEL_0E0A_ROOT_PORT 0x0e0a +#define DEVICE_ID_INTEL_0E0B_ROOT_PORT 0x0e0b +// Intel Ivy Bridge E definitions +#define DEVICE_ID_INTEL_0E00_HOST_BRIDGE 0x0E00 + +// Intel Haswell definitions +#define DEVICE_ID_INTEL_0C00_HASWELL_HOST_BRIDGE 0x0C00 +#define DEVICE_ID_INTEL_0C04_HASWELL_HOST_BRIDGE 0x0C04 + +// Intel PCH definitions +#define DEVICE_ID_INTEL_9D10_PCH_BRIDGE 0x9d10 +#define DEVICE_ID_INTEL_9D18_PCH_BRIDGE 0x9d18 +#define DEVICE_ID_INTEL_A117_PCH_BRIDGE 0xa117 +#define DEVICE_ID_INTEL_A118_PCH_BRIDGE 0xa118 +#define DEVICE_ID_INTEL_9C98_PCH_BRIDGE 0x9c98 + +// Intel Broadwell definitions +#define DEVICE_ID_INTEL_6F00_HOST_BRIDGE 0x6f00 +#define DEVICE_ID_INTEL_6F01_ROOT_PORT 0x6f01 +#define DEVICE_ID_INTEL_6F02_ROOT_PORT 0x6f02 +#define DEVICE_ID_INTEL_6F03_ROOT_PORT 0x6f03 +#define DEVICE_ID_INTEL_6F04_ROOT_PORT 0x6f04 +#define DEVICE_ID_INTEL_6F05_ROOT_PORT 0x6f05 +#define DEVICE_ID_INTEL_6F06_ROOT_PORT 0x6f06 +#define DEVICE_ID_INTEL_6F07_ROOT_PORT 0x6f07 +#define DEVICE_ID_INTEL_6F08_ROOT_PORT 0x6f08 +#define DEVICE_ID_INTEL_6F09_ROOT_PORT 0x6f09 +#define DEVICE_ID_INTEL_6F0A_ROOT_PORT 0x6f0A +#define DEVICE_ID_INTEL_6F0B_ROOT_PORT 0x6f0B +#define DEVICE_ID_INTEL_1601_ROOT_PORT 0x1601 +#define DEVICE_ID_INTEL_1605_ROOT_PORT 0x1605 +#define DEVICE_ID_INTEL_1609_ROOT_PORT 0x1609 +#define DEVICE_ID_INTEL_BROADWELL_U_HOST_BRIDGE 0x1604 +#define DEVICE_ID_INTEL_BROADWELL_H_HOST_BRIDGE 0x1614 + +// Intel Skylake definitions +#define DEVICE_ID_INTEL_1901_ROOT_PORT 0x1901 +#define DEVICE_ID_INTEL_1905_ROOT_PORT 0x1905 +#define DEVICE_ID_INTEL_1909_ROOT_PORT 0x1909 +#define DEVICE_ID_INTEL_SKYLAKE_U_HOST_BRIDGE 0x1904 +#define DEVICE_ID_INTEL_SKYLAKE_S_HOST_BRIDGE 0x191F +#define DEVICE_ID_INTEL_SKYLAKE_H_HOST_BRIDGE 0x1910 + +// Intel Skylake-E definitions +#define DEVICE_ID_INTEL_2030_ROOT_PORT 0x2030 +#define DEVICE_ID_INTEL_2033_ROOT_PORT 0x2033 + +// Intel Kabylake definitions +#define DEVICE_ID_INTEL_KABYLAKE_U_HOST_BRIDGE 0x5904 +#define DEVICE_ID_INTEL_KABYLAKE_H_HOST_BRIDGE 0x5910 + +// AMD Matisse, Rome definitions +#define DEVICE_ID_AMD_1483_ROOT_PORT 0x1483 +// AMD Castle Peak definition +#define DEVICE_ID_AMD_1480_ROOT_PORT 0x1480 +// AMD Renoir-H definition +#define DEVICE_ID_AMD_1630_ROOT_PORT 0x1630 + +// Dell SkullTrail definitions +#define DELL_4003_SUBDEVICE_ID 0x021D + +// Dell Quicksilver MLK definitions +#define DELL_0040_SUBDEVICE_ID 0x043a + +// HP Tylersburg definitions +#define TYLERSBURG_Z800_SSDEVID 0x130B + +// HP Romley definitions +#define ROMLEY_Z820_SSDEVID 0x158B +#define ROMLEY_Z620_SSDEVID 0x158A +#define ROMLEY_Z420_SSDEVID 0x1589 + +// HP Grantley definitions +#define GRANTLEY_Z840_SSDEVID 0x2129 +#define GRANTLEY_Z640_SSDEVID 0x212A +#define GRANTLEY_Z440_SSDEVID 0x212B + +// HP PURELY definitions +#define HP_QUADRO_Z4GEN4_DEVID 0xA2D2 +#define PURLEY_Z8GEN4_SSDEVID 0x81C7 +#define PURLEY_Z6GEN4_SSDEVID 0x81C6 +#define PURLEY_Z4GEN4_SSDEVID 0x81C5 + +// Lenovo Romley definitions +#define ROMLEY_C30_SSDEVID 0x1028 +#define ROMLEY_D30_SSDEVID 0x1027 +#define ROMLEY_S30_SSDEVID 0x1026 + +// Dell Romley definitions +#define ROMLEY_T7600_SSDEVID 0x0495 +#define ROMLEY_T5600_SSDEVID 0x0496 +#define ROMLEY_T3600_SSDEVID 0x0497 + +// Dell Romley + IVB-EP CPU Refresh +#define IVYTOWN_T7610_SSDEVID 0x05D4 +#define IVYTOWN_T5610_SSDEVID 0x05D3 + +// Dell Romley (Ipanema) +#define ROMLEY_R7610_SSDEVID 0x05A1 + +// FTS Romley definitions +#define ROMLEY_R920_SSDEVID 0x11B6 + +// Lenovo Grantley (Messi, Pele, Ronaldo) +#define GRANTLEY_V40_SSDEVID 0x1031 +#define GRANTLEY_D40_SSDEVID 0x1030 +#define GRANTLEY_S40_SSDEVID 0x102F + +// Dell Grantley (Avalon) +#define GRANTLEY_T7810_SSDEVID 0x0618 +#define GRANTLEY_T7910_SSDEVID 0x0619 + +// Lenovo Purley (Nile, Volga) +#define PURLEY_P920_SSDEVID 0x1038 +#define PURLEY_P720_SSDEVID 0x1037 +#define PURLEY_P520_SSDEVID 0x1036 + +// Lenovo P520c +#define LENOVO_P520C_SSDEVID 0x103C + +// Dell Purley(Matira) +#define PURLEY_MATIRA3X_DEVID 0xA2D2 +#define PURLEY_MATIRA3X_SSDEVID 0x08B1 +#define PURLEY_MATIRA3_SSDEVID 0x0738 +#define PURLEY_MATIRA5_SSDEVID 0x0739 +#define PURLEY_MATIRA7_SSDEVID 0x073A + +//FTS Grantley +#define GRANTLEY_R940_SSDEVID 0x1201 + +//FTS Purley +#define PURLEY_R970_SSDEVID 0x1230 +#define PURLEY_M770_SSDEVID 0x1231 + +// HP Arrandale, Clarksfield, X58 workstation definitions +#define ARRANDALE_Z200SFF_SSDEVID 0x304A +#define CLARKSFIELD_Z200_SSDEVID 0x170B +#define X58_Z400_SSDEVID 0x1309 + +// GIGABYTE Sniper 3 (Z77) +#define GIGABYTE_SNIPER_3_SSDEVID_1 0x5000 +#define GIGABYTE_SNIPER_3_SSDEVID_2 0x5001 + +// Supermicro Quadro VCA definitions +#define SUPERMICRO_QUADRO_VCA_DEVID 0x8D44 +#define SUPERMICRO_QUADRO_VCA_SSDEVID 0x7270 + +// Supermicro SYS-4027GR-TRT +#define SUPERMICRO_SYS_4027GR_TRT_DEVID 0x1D41 +#define SUPERMICRO_SYS_4027GR_TRT_SSDEVID 0x0732 + +// Supermicro SYS-4029GP-TRT2 +#define SUPERMICRO_SYS_4029GP_TRT2_DEVID 0xA1C2 +#define SUPERMICRO_SYS_4029GP_TRT2_SSDEVID 0x7270 + +// Asus Quadro BOXX definitions +#define ASUS_QUADRO_BOXX_DEVID 0x8D44 +#define ASUS_QUADRO_BOXX_SSDEVID 0x85F6 + +// APEXX8 Quadro BOXX definitions +#define APEXX8_QUADRO_BOXX_DEVID 0xA2D3 +#define APEXX8_QUADRO_BOXX_SSDEVID 0x098e + +// APEXX5 Quadro BOXX definitions +#define APEXX5_QUADRO_BOXX_DEVID 0xA2D3 +#define APEXX5_QUADRO_BOXX_SSDEVID 0x1000 + +// ASUS X99-E-10G +#define ASUS_X99_E_10G_SSDEVID 0x8600 + +// VIA definitions +#define DEVICE_ID_VIA_VT8369B_HOST_BRIDGE 0x0308 + +// Foxconn Einstein 64 [8086:a1c1][105b:7270] +#define FOXCONN_EINSTEIN_64_DEVID 0xA1C1 +#define FOXCONN_EINSTEIN_64_SSDEVID 0x7270 + +// Tyan Workstation +#define TYAN_B7100_DEVID 0xA1C1 +#define TYAN_B7100_SSDEVID 0x7270 + +// ESC 4000 Series Workstation +#define ESC_4000_G4_DEVID 0xA1C1 +#define ESC_4000_G4_SSDEVID 0x871E + + +// NVIDIA C51 +#define NVIDIA_C51_DEVICE_ID_MIN 0x2F0 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_0 0x2F0 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_1 0x2F1 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_2 0x2F2 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_3 0x2F3 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_0 0x2F4 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_1 0x2F5 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_2 0x2F6 +#define NVIDIA_C51_ULDT_CFG_0_DEVICE_ID_IGPU_DISABLE_3 0x2F7 +#define NVIDIA_C51_DEVICE_ID_MAX 0x2F7 + +// NVIDIA MCP55 +#define NVIDIA_MCP55_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0369 + +// NVIDIA MCP61 +#define NVIDIA_MCP61_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x03EA +#define NVIDIA_MCP61_ULDT_CFG_0_DEVICE_ID_PA 0x03E2 + +// NVIDIA C55 +#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_PRO 0x03A0 +#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_PRO 0x03A0 +#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_SLIX16 0x03A1 +#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_SLI 0x03A3 +#define NVIDIA_C55_CPU_PCI_0_DEVICE_ID_U 0x03A2 + +// NVIDIA MCP65 +#define NVIDIA_MCP65_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0444 + +// NVIDIA MCP67/MCP68 +#define NVIDIA_MCP67_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0547 + +// NVIDIA MCP73 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_PV 0x07C0 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_O 0x07C1 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_S 0x07C2 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_V 0x07C3 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_0 0x07C4 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_1 0x07C5 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_RSVD_2 0x07C6 +#define NVIDIA_MCP73_CPU_PCI_0_DEVICE_ID_D 0x07C7 + +// NVIDIA C73 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLI2 0x0800 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLI_ALL 0x0801 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLIX8 0x0802 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_U 0x0803 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_0 0x0804 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_1 0x0805 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_2 0x0806 +#define NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_3 0x0807 + +// NVIDIA MCP77/78 +#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_DEFAULT 0x0754 +#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_1 0x0755 +#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_2 0x0756 +#define NVIDIA_MCP77_ULDT_CFG_0_DEVICE_ID_3 0x0757 +#define NVIDIA_MCP77_MCP_SM_CFG_0_DEVICE_ID_UNIT_SM 0x0752 + +// NVIDIA MCP79/7A +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_DEFAULT 0x0A80 +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_SLIX16 0x0A81 +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_SLI 0x0A82 +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_U 0x0A83 +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_GM 0x0A84 +#define NVIDIA_MCP79_CPU_PCI_0_DEVICE_ID_GVM 0x0A85 +#define NVIDIA_MCP79_MCP_SM_CFG_0_DEVICE_ID_UNIT_SM 0x0AA2 + +// NVIDIA MCP89/P83 +#define NVIDIA_MCP89_CPU_PCI_0_DEVICE_ID_DEFAULT 0x00000D60 + +/////////////////////////////////////////////////////////////////////////////////////////// +// +// enumeration of chipset families +// +/////////////////////////////////////////////////////////////////////////////////////////// + +// +// When adding a variable to the following enum, please +// add it also to the following chipset_names[]. +// +enum { + CS_UNKNOWN = 0x0000, + CS_UNKNOWN_PCIE = 0x1000 +, CS_INTEL_2580 +, CS_INTEL_2584 +, CS_INTEL_2588 +, CS_INTEL_2590 +, CS_INTEL_25E0 +, CS_INTEL_29X0 +, CS_INTEL_29E0 +, CS_INTEL_359E +, CS_INTEL_4000 +, CS_INTEL_4003 +, CS_INTEL_3400 +, CS_INTEL_3B42 +, CS_INTEL_2770 +, CS_INTEL_2774 +, CS_INTEL_277C +, CS_INTEL_2A40 +, CS_INTEL_2E00 +, CS_INTEL_0040 +, CS_INTEL_1C10 +, CS_INTEL_1C46 +, CS_INTEL_1C49 +, CS_INTEL_1D40 +, CS_INTEL_8D47 +, CS_INTEL_1E10 +, CS_INTEL_8C4B +, CS_INTEL_8CC4 +, CS_INTEL_A145 +, CS_INTEL_A2C5 +, CS_INTEL_A242 +, CS_INTEL_A2D2 +, CS_INTEL_A2C9 +, CS_INTEL_A301 +, CS_INTEL_0685 +, CS_INTEL_4381 +, CS_INTEL_7A82 +, CS_NVIDIA_CK804 +, CS_NVIDIA_C19 +, CS_NVIDIA_C51 +, CS_NVIDIA_MCP55 +, CS_NVIDIA_MCP61 +, CS_NVIDIA_C55 +, CS_NVIDIA_MCP65 +, CS_NVIDIA_MCP67 +, CS_NVIDIA_MCP73 +, CS_NVIDIA_C73 +, CS_NVIDIA_MCP77 +, CS_NVIDIA_MCP79 +, CS_NVIDIA_MCP89 +, CS_NVIDIA_TEGRA3 +, CS_SIS_649 +, CS_SIS_656 +, CS_ATI_RS400 +, CS_ATI_RS400_A21 +, CS_ATI_RS480 +, CS_ATI_RS480_A21 +, CS_AMD_RS780 +, CS_VIA_VT8369B +, CS_ATI_FX790 +, CS_ATI_RD850 +, CS_ATI_RD870 +, CS_ATI_RD890 +, CS_ATI_FX890 +, CS_ATI_RX780 +, CS_ATI_FX990 +, CS_AMD_GX890 +, CS_AMD_X370 +, CS_VIA_VX900 +, CS_APM_STORM +, CS_IBM_VENICE +, CS_NVIDIA_T124 +, CS_NVIDIA_T210 +, CS_NVIDIA_T186 +, CS_NVIDIA_T194 +, CS_NVIDIA_T234 +, CS_MARVELL_THUNDERX2 +, CS_REDHAT_QEMU +, CS_AMPERE_EMAG +, CS_HUAWEI_KUNPENG920 +, CS_MELLANOX_BLUEFIELD +, CS_AMAZON_GRAVITRON2 +, CS_FUJITSU_A64FX +, CS_PHYTIUM_FT2000 +, CS_AMPERE_ALTRA +, CS_ARM_NEOVERSEN1 +, CS_MARVELL_OCTEON_CN96XX +, CS_MARVELL_OCTEON_CN98XX +, CS_INTEL_C620 +, CS_HYGON_C86 +, CS_PHYTIUM_S2500 +, CS_MELLANOX_BLUEFIELD2 +, CS_MAX_PCIE +}; + +enum { + RP_UNKNOWN = 0 +, RP_BROADCOM_HT2100 +, RP_INTEL_2581 +, RP_INTEL_2585 +, RP_INTEL_2589 +, RP_INTEL_2591 +, RP_INTEL_3597 +, RP_INTEL_2775 +, RP_INTEL_2771 +, RP_INTEL_8110 +, RP_INTEL_8112 +, RP_INTEL_8180 +, RP_INTEL_8181 +, RP_INTEL_8184 +, RP_INTEL_8185 +, RP_INTEL_3C02 +, RP_INTEL_3C03 +, RP_INTEL_3C04 +, RP_INTEL_3C05 +, RP_INTEL_3C06 +, RP_INTEL_3C07 +, RP_INTEL_3C08 +, RP_INTEL_3C09 +, RP_INTEL_3C0A +, RP_INTEL_3C0B +, RP_INTEL_2F04 +, RP_INTEL_2F08 +, RP_INTEL_0C01 +, RP_INTEL_1601 +, RP_INTEL_1605 +, RP_INTEL_1609 +, RP_INTEL_1901 +, RP_INTEL_1905 +, RP_INTEL_1909 +, RP_INTEL_5904 +, RP_NVIDIA_CK804 +, RP_NVIDIA_C19 +, RP_NVIDIA_C51 +, RP_NVIDIA_MCP55 +, RP_NVIDIA_MCP61 +, RP_NVIDIA_C55 +, RP_NVIDIA_MCP65 +}; + +#endif //NVDEVID_H + diff --git a/src/nvidia/arch/nvalloc/common/inc/nvpcie.h b/src/nvidia/arch/nvalloc/common/inc/nvpcie.h new file mode 100644 index 000000000..56ed38fc1 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/nvpcie.h @@ -0,0 +1,292 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVPCIE_H +#define NVPCIE_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Private PCI Express related defines and structures. * +* * +\***************************************************************************/ + +#define PCI_VENDOR_ID 0x00 +#ifndef PCI_DEVICE_ID +#define PCI_DEVICE_ID 0x02 +#endif +#define PCI_BASE_ADDRESS_1 0x14 /* Aperture Base */ +#define PCI_BASE_ADDRESS_2 0x18 /* Aperture Base */ +#define PCI_CAPABILITY_LIST 0x34 +#define PCI_DEVICE_SPECIFIC 0x40 + +#define NV_PCI_ID 0x0 +#define NV_PCI_ID_VENDOR 15:0 +#define NV_PCI_ID_VENDOR_NVIDIA 0x10DE +#define NV_PCI_ID_DEVICE 31:16 + +#define PCI_MAX_SLOTS 255 +#define PCI_MAX_LANE_WIDTH 32 + +#define PCI_MAX_FUNCTION 8 +#define PCI_INVALID_VENDORID 0xFFFF +#define PCI_INVALID_DEVICEID 0xFFFF +#define PCI_INVALID_SUBVENDORID 0xFFFF +#define PCI_INVALID_SUBDEVICEID 0xFFFF + +#define PCI_CLASS_BRIDGE_DEV 0x06 +#define PCI_SUBCLASS_BR_HOST 0x00 +#define PCI_MULTIFUNCTION 0x80 + +// From PCI Local Bus Specification, Revision 3.0 + +#define CAP_ID_MASK 0xFF + +#define CAP_ID_PMI 0x01 +#define CAP_ID_AGP 0x02 +#define CAP_ID_VPD 0x03 +#define CAP_ID_SLOT_ID 0x04 +#define CAP_ID_MSI 0x05 +#define CAP_ID_HOT_SWAP 0x06 +#define CAP_ID_PCI_X 0x07 +#define CAP_ID_HYPER_TRANSPORT 0x08 +#define CAP_ID_VENDOR_SPECIFIC 0x09 +#define CAP_ID_DEBUG_PORT 0x0A +#define CAP_ID_CRC 0x0B +#define CAP_ID_HOT_PLUG 0x0C +#define CAP_ID_SUBSYSTEM_ID 0x0D +#define CAP_ID_AGP8X 0x0E +#define CAP_ID_SECURE 0x0F +#define CAP_ID_PCI_EXPRESS 0x10 +#define CAP_ID_MSI_X 0x11 + +// +// Extended config space size is 4096 bytes. +// +#define PCI_EXTENDED_CONFIG_SPACE_LENGTH 4096 + +// +// From PCI Local Bus Specification, Revision 3.0 +// HEADER TYPE0 Definitions - Byte offsets +// +#define PCI_HEADER_TYPE0_VENDOR_ID 0x00 +#define PCI_HEADER_TYPE0_DEVICE_ID 0x02 +#define PCI_HEADER_TYPE0_COMMAND 0x04 +#define PCI_HEADER_TYPE0_STATUS 0x06 +#define PCI_HEADER_TYPE0_REVISION_ID 0x08 +#define PCI_HEADER_TYPE0_PROGIF 0x09 +#define PCI_HEADER_TYPE0_SUBCLASS 0x0A +#define PCI_HEADER_TYPE0_BASECLASS 0x0B +#define PCI_HEADER_TYPE0_CACHE_LINE_SIZE 0x0C +#define PCI_HEADER_TYPE0_LATENCY_TIMER 0x0D +#define PCI_HEADER_TYPE0_HEADER_TYPE 0x0E +#define PCI_HEADER_TYPE0_BIST 0x0F +#define PCI_HEADER_TYPE0_BAR0 0x10 +#define PCI_HEADER_TYPE0_BAR1 0x14 +#define PCI_HEADER_TYPE0_BAR2 0x18 +#define PCI_HEADER_TYPE0_BAR3 0x1C +#define PCI_HEADER_TYPE0_BAR4 0x20 +#define PCI_HEADER_TYPE0_BAR5 0x24 +#define PCI_HEADER_TYPE0_CBCIS_PTR 0x28 +#define PCI_HEADER_TYPE0_SUBSYS_VEN_ID 0x2C +#define PCI_HEADER_TYPE0_SUBSYS_ID 0x2E +#define PCI_HEADER_TYPE0_ROMBAR 0x30 +#define PCI_HEADER_TYPE0_CAP_PTR 0x34 +#define PCI_HEADER_TYPE0_INT_LINE 0x3C +#define PCI_HEADER_TYPE0_INT_PIN 0x3D +#define PCI_HEADER_TYPE0_MIN_GNT 0x3E +#define PCI_HEADER_TYPE0_MAX_LAT 0x3F + +// +// From PCI Express Base Specification Revision 2.0 +// HEADER TYPE1 Definitions +#define PCI_HEADER_TYPE1_BRIDGE_CONTROL 0x3E +#define PCI_HEADER_TYPE1_BRIDGE_CONTROL_VGA_EN 0x08 + +#define PCIE_LINK_CAPABILITIES_2 0x000000A4 /* R--4R */ +#define PCIE_LINK_CAPABILITIES_2_RSVD 0:0 /* C--VF */ +#define PCIE_LINK_CAPABILITIES_2_RSVD_INIT 0x00000000 /* C---V */ +#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED 7:1 /* R-EVF */ +#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_GEN1_GEN2_GEN3_GEN4_GEN5 0x0000001F /* R---V */ +#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_HIDDEN 0x00000000 /* R---V */ +#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_GEN1 0x00000001 /* R---V */ +#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_GEN1_GEN2 0x00000003 /* R---V */ +#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_GEN1_GEN2_GEN3 0x00000007 /* R-E-V */ +#define PCIE_LINK_CAPABILITIES_2_SUPPORTED_LINK_SPEED_GEN1_GEN2_GEN3_GEN4 0x0000000F /* R---V */ +#define PCIE_LINK_CAPABILITIES_2_CROSS_LINK_SUPPORT 8:8 /* C--VF */ +#define PCIE_LINK_CAPABILITIES_2_CROSS_LINK_SUPPORT_DISABLED 0x00000000 /* C---V */ +#define PCIE_LINK_CAPABILITIES_2_RET_PRESENCE_DET_SUPP 23:23 /* R-EVF */ +#define PCIE_LINK_CAPABILITIES_2_RET_PRESENCE_DET_SUPP_UNSET 0x00000000 /* R-E-V */ +#define PCIE_LINK_CAPABILITIES_2_2RET_PRESENCE_DET_SUPP 24:24 /* R-EVF */ +#define PCIE_LINK_CAPABILITIES_2_2RET_PRESENCE_DET_SUPP_UNSET 0x00000000 /* R-E-V */ +#define PCIE_LINK_CAPABILITIES_2_RSVD1 31:25 /* C--VF */ +#define PCIE_LINK_CAPABILITIES_2_RSVD1_INIT 0x00000000 /* C---V */ + +// +// PCI Express Virtual Peer-to-Peer Approval Definition +// +// These offsets are unused in hardware on existing chips and are reserved on +// future chips. Software has defined a virtual PCI capability that may be +// emulated by hypervisors at these offsets, and this capability is not tied +// to any specific hardware. +// +// +#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0 0x000000C8 +#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0_ID 7:0 +#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0_NEXT 15:8 +#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0_LENGTH 23:16 +#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0_SIG_LO 31:24 +#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_1 0x000000CC +#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_1_SIG_HI 15:0 +#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_1_VERSION 18:16 +#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_1_PEER_CLIQUE_ID 22:19 +#define NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_1_RSVD 31:23 + +#define NV_PCI_VIRTUAL_P2P_APPROVAL_SIGNATURE 0x00503250 + +// Chipset-specific definitions. +// Intel SantaRosa definitions +#define INTEL_2A00_CONFIG_SPACE_BASE 0x60 + +// Intel Montevina definitions +#define INTEL_2A40_CONFIG_SPACE_BASE 0x60 +#define INTEL_2A40_ASLM_CAPABLE_REVID 0x05 + +// Intel EagleLake definitions +#define INTEL_2E00_CONFIG_SPACE_BASE 0x60 + +// Intel Bearlake definitions +#define INTEL_29XX_CONFIG_SPACE_BASE 0x60 + +// Intel BroadWater definitions +#define INTEL_29A0_CONFIG_SPACE_BASE 0x60 + +// Intel Grantsdale definitions +#define INTEL_25XX_CONFIG_SPACE_BASE 0x48 + +// Intel Tumwater definitions +#define INTEL_359E_CONFIG_SPACE_BASE 0xCC + +// Intel Greencreek definitions +#define INTEL_25E0_CONFIG_SPACE_BASE_ADDRESS 0xE0000000 + +// Intel Stoakley definitions +#define INTEL_4000_CONFIG_SPACE_BASE_ADDRESS 0xE0000000 + +// Intel SkullTrail definitions +#define INTEL_4003_CONFIG_SPACE_BASE_ADDRESS_F 0xF0000000 +#define INTEL_4003_CONFIG_SPACE_BASE_ADDRESS_E 0xE0000000 +#define INTEL_4003_CONFIG_SPACE_BASE_ADDRESS INTEL_4003_CONFIG_SPACE_BASE_ADDRESS_F +#define INTEL_4003_CONFIG_SPACE_BASE 0x64 + +// SiS 656 +#define SIS_656_CONFIG_SPACE_BASE 0xE0 +#define SIS_656_CONFIG_SPACE_BASE_ADDRESS 3:0 // mapped to 31:28 + +// PCI/PCIE definitions +#define PCI_MAX_CAPS 20 // max caps to parse +#define PCI_MAX_DEVICES 32 // max devices on bus +#define PCI_MAX_FUNCTIONS 8 // max functions for a device +#define PCI_CAP_HEADER_ID 7:0 // PCI cap header id +#define PCI_CAP_HEADER_NEXT 15:8 // PCI cap header next +#define PCI_COMMON_CLASS_SUBCLASS 0x0a // PCI class/subclass (word) +#define PCI_COMMON_CLASS_SUBBASECLASS_HOST 0x0600 // Host bridge (connect PCI to CPU) [00] + Bridge Device [06] +#define PCI_COMMON_CLASS_SUBBASECLASS_P2P 0x0604 // PCI-to-PCI bridge (connects PCI buses) [04] + Bridge Device [06] +#define PCI_COMMON_CLASS_SUBBASECLASS_VGA 0x0300 +#define PCI_COMMON_CLASS_SUBBASECLASS_3DCTRL 0x0302 +#define PCI_COMMON_CAP_PTR 0x34 // PCI common cap ptr (byte) +#define PCI_TYPE_1_SECONDARY_BUS_NUMBER 0x19 // PCI type 1 sec bus (byte) +#define PCI_TYPE_1_SUBORDINATE_BUS_NUMBER 0x1a // PCI type 1 sub bus (byte) +#define PCIE_CAP_HEADER_ID 15:0 // PCIE cap header id +#define PCIE_CAP_HEADER_ID_INVALID 0xffff +#define PCIE_CAP_HEADER_NEXT 31:20 // PCIE cap header next +#define PCIE_BUS_SHIFT 20 // PCIE cfg space bus shift +#define PCIE_DEVICE_SHIFT 15 // PCIE cfg space dev shift +#define PCIE_FUNC_SHIFT 12 // PCIE cfg space func shift +#define PCIE_CAP_VERSION 19:16 // PCIE cap version +#define PCIE_CAP_VERSION_2P0 2 // PCIE 2.0 version +#define PCIE_LINK_CNTRL_STATUS_2_OFFSET 0x30 // PCIE Link Control/Status 2 offset +#define PCIE_LINK_STATUS_2 31:16 // PCIE Link Status 2 Register +#define PCIE_LINK_STATUS_2_DE_EMPHASIS 0:0 // PCIE De-Emphasis Level +#define PCI_COMMON_SUBSYSTEM_VENDOR_ID 0x2c // PCI subsystem Vendor Id +#define PCI_COMMON_SUBSYSTEM_ID 0x2e // PCI subsystem Id + + +// PCI Express Capability ID in the enhanced configuration space +#define PCIE_CAP_ID_ERROR 0x1 // PCIE Advanced Error Reporting +#define PCIE_CAP_ID_VC 0x2 // PCIE Virtual Channel (VC) +#define PCIE_CAP_ID_SERIAL 0x3 // PCIE Device Serial Number +#define PCIE_CAP_ID_POWER 0x4 // PCIE Power Budgeting +#define PCIE_CAP_ID_L1_PM_SUBSTATES 0x1E // PCIE L1 PM Substates + +// Intel CPU family. +#define INTEL_CPU_FAMILY_06 0x06 +#define INTEL_CPU_FAMILY_15 0x0f +#define INTEL_CPU_FAMILY_16 0x10 +#define INTEL_CPU_FAMILY_21 0x15 + +// Intel CPU Model. Calculated as Model += (extModel << 4). +#define INTEL_CPU_MODEL_2A 0x2a +#define INTEL_CPU_MODEL_2D 0x2d +#define INTEL_CPU_MODEL_3A 0x3a +#define INTEL_CPU_MODEL_3F 0x3f +// Symbolic defines for each possible virtual channel +enum +{ + RM_PCIE_VIRTUAL_CHANNEL_0 = 0, + RM_PCIE_VIRTUAL_CHANNEL_1, + RM_PCIE_VIRTUAL_CHANNEL_2, + RM_PCIE_VIRTUAL_CHANNEL_3, + RM_PCIE_VIRTUAL_CHANNEL_4, + RM_PCIE_VIRTUAL_CHANNEL_5, + RM_PCIE_VIRTUAL_CHANNEL_6, + RM_PCIE_VIRTUAL_CHANNEL_7, + RM_PCIE_VIRTUAL_CHANNEL_INVALID +}; + +struct OBJCL; +// root port setup functions +NV_STATUS Broadcom_HT2100_setupFunc(OBJGPU *, OBJCL*); + +NV_STATUS Intel_RP25XX_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Intel_RP81XX_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Intel_RP3C0X_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Intel_RP2F0X_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Intel_RP0C0X_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Intel_Broadwell_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Intel_Skylake_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Intel_Skylake_U_Pch_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Intel_Skylake_H_Pch_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Intel_Kabylake_Y_setupFunc(OBJGPU *, OBJCL*); + +NV_STATUS Nvidia_RPC19_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Nvidia_RPC51_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS Nvidia_RPC55_setupFunc(OBJGPU *, OBJCL*); + +NV_STATUS AMD_RP1480_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS AMD_RP1630_setupFunc(OBJGPU *, OBJCL*); +NV_STATUS AMD_RP1483_setupFunc(OBJGPU *, OBJCL*); + +// Determines if the GPU is in a multi-GPU board based on devid checks +NvBool gpuIsMultiGpuBoard(OBJGPU *, NvBool *, NvBool *); + +#endif // NVPCIE_H diff --git a/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h b/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h new file mode 100644 index 000000000..1ff7df68a --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/nvrangetypes.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file nvrangetypes.h + * @brief Range types and operator macros + * @note #include a header to define NvUxx and NvSxx before sourcing this file. + */ + +#ifndef _NVRANGETYPES_H_ +#define _NVRANGETYPES_H_ + + +// +// Define range types by convention +// +#define __NV_DEFINE_RANGE_TYPE(T) \ +typedef struct NvRange ## T \ +{ \ + Nv ## T min; \ + Nv ## T max; \ +} NvRange ## T; + + +__NV_DEFINE_RANGE_TYPE(U64) // NvRangeU64 +__NV_DEFINE_RANGE_TYPE(S64) // NvRangeS64 +__NV_DEFINE_RANGE_TYPE(U32) // NvRangeU32 +__NV_DEFINE_RANGE_TYPE(S32) // NvRangeS32 +__NV_DEFINE_RANGE_TYPE(U16) // NvRangeU16 +__NV_DEFINE_RANGE_TYPE(S16) // NvRangeS16 +__NV_DEFINE_RANGE_TYPE(U8) // NvRangeU8 +__NV_DEFINE_RANGE_TYPE(S8) // NvRangeS8 + + +// +// Operator macros +// +// Macros are named xxx_RANGE (rather than xxx_RANGEU32, etc.) since they work +// properly on ranges with any number of bits, signed or unsigned. +// + +#define NV_EQUAL_RANGE(r1, r2) ((r1).min == (r2).min && (r1).max == (r2).max) +#define NV_EMPTY_INCLUSIVE_RANGE(r) ((r).min > (r).max) +#define NV_EMPTY_EXCLUSIVE_RANGE(r) ((r).min + 1 > (r).max - 1) +#define NV_WITHIN_INCLUSIVE_RANGE(r, x) ((r).min <= (x) && (x) <= (r).max) +#define NV_WITHIN_EXCLUSIVE_RANGE(r, x) ((r).min < (x) && (x) < (r).max) +#define NV_IS_SUBSET_RANGE(r1, r2) ((r1).min >= (r2).min && (r2).max >= (r1).max) +#define NV_IS_SUPERSET_RANGE(r1, r2) ((r1).min <= (r2).min && (r2).max <= (r1).max) +#define NV_CENTER_OF_RANGE(r) ((r).min / 2 + ((r).max + 1) / 2) // Avoid overflow and rounding anomalies. +#define NV_IS_OVERLAPPING_RANGE(r1, r2) \ + (NV_WITHIN_INCLUSIVE_RANGE((r1), (r2).min) || \ + NV_WITHIN_INCLUSIVE_RANGE((r1), (r2).max)) + +#define NV_DISTANCE_FROM_RANGE(r, x) ((x) < (r).min? (r).min - (x): ((x) > (r).max? (x) - (r).max: 0)) +#define NV_VALUE_WITHIN_INCLUSIVE_RANGE(r, x) ((x) < (r).min? (r).min : ((x) > (r).max? (r).max : (x))) +#define NV_VALUE_WITHIN_EXCLUSIVE_RANGE(r, x) ((x) <= (r).min? (r).min + 1 : ((x) >= (r).max? (r).max - 1 : (x))) + +#define NV_INIT_RANGE(r, x, y) \ +do \ +{ \ + (r).min = (x); \ + (r).max = (y); \ +} while(0) + +#define NV_ASSIGN_DELTA_RANGE(r, x, d) \ +do \ +{ \ + (r).min = (x) - (d); \ + (r).max = (x) + (d); \ +} while(0) + +#define NV_ASSIGN_INTERSECTION_RANGE(r1, r2) \ +do \ +{ \ + if ((r1).min < (r2).min) \ + (r1).min = (r2).min; \ + if ((r1).max > (r2).max) \ + (r1).max = (r2).max; \ +} while(0) + +#define NV_ASSIGN_UNION_RANGE(r1, r2) \ +do \ +{ \ + if ((r1).min > (r2).min) \ + (r1).min = (r2).min; \ + if ((r1).max < (r2).max) \ + (r1).max = (r2).max; \ +} while(0) + +#define NV_MULTIPLY_RANGE(r, x) \ +do \ +{ \ + (r).min *= (x); \ + (r).max *= (x); \ +} while(0) + +#define NV_DIVIDE_FLOOR_RANGE(r, x) \ +do \ +{ \ + (r).min /= (x); \ + (r).max /= (x); \ +} while(0) + +#define NV_DIVIDE_CEILING_RANGE(r, x) \ +do \ +{ \ + (r).min = ((r).min + (x) - 1) / (x); \ + (r).max = ((r).max + (x) - 1) / (x); \ +} while(0) + +#define NV_DIVIDE_ROUND_RANGE(r, x) \ +do \ +{ \ + (r).min = ((r).min + (x) / 2) / (x); \ + (r).max = ((r).max + (x) / 2) / (x); \ +} while(0) + +#define NV_DIVIDE_WIDE_RANGE(r, x) \ +do \ +{ \ + (r).min /= (x); \ + (r).max = ((r).max + (x) - 1) / (x); \ +} while(0) + +#define NV_DIVIDE_NARROW_RANGE(r, x) \ +do \ +{ \ + (r).min = ((r).min + (x) - 1) / (x); \ + (r).max /= (x); \ +} while(0) + +#define NV_VALUE_WITHIN_INCLUSIVE_RANGE(r, x) \ + ((x) < (r).min? (r).min : ((x) > (r).max? (r).max : (x))) + +#define NV_WITHIN_INCLUSIVE_RANGE(r, x) \ + ((r).min <= (x) && (x) <= (r).max) + +#define NV_DISTANCE_FROM_RANGE(r, x) \ + ((x) < (r).min? (r).min - (x): ((x) > (r).max? (x) - (r).max: 0)) + +#endif // _NVRANGETYPES_H_ + diff --git a/src/nvidia/arch/nvalloc/common/inc/oob/smbpbi.h b/src/nvidia/arch/nvalloc/common/inc/oob/smbpbi.h new file mode 100644 index 000000000..b8410f667 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/oob/smbpbi.h @@ -0,0 +1,2502 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SMBPBI_H +#define _SMBPBI_H + +#include "oob/smbpbi_impl.h" + +/*! + * This file contains definitions for SMBPBI - SMBUS Out-Of-Band (OOB) interface + */ +/*! + * NV_MSGBOX_CMD() macro below and its derivatives depend on DRF_DEF() and DRF_NUM() + * macros, that are defined in "nvdrf.h" header file, which is a part of + * smbus/msgbox SDK. That file needs to be #included before this (smbpbi.h) + * header file. + */ + +/******************************************************************************** + * * + * GPU COMMANDS * + * * + ********************************************************************************/ + +// +// Encoding: +// 30 29 23 16 15 8 7 0 +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. +// |I|C|R| STATUS | ARG2 | ARG1 | OPCODE | COMMAND +// `-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' +// 31 28 24 +// + +#define NV_MSGBOX_CMD_OPCODE 7:0 +#define NV_MSGBOX_CMD_OPCODE_NULL_CMD 0x00000000 +#define NV_MSGBOX_CMD_OPCODE_GET_CAP_DWORD 0x00000001 +#define NV_MSGBOX_CMD_OPCODE_GET_TEMP 0x00000002 +#define NV_MSGBOX_CMD_OPCODE_GET_EXT_TEMP 0x00000003 /* With 8 fractional bits! */ +#define NV_MSGBOX_CMD_OPCODE_GET_POWER 0x00000004 +#define NV_MSGBOX_CMD_OPCODE_GET_SYS_ID_DATA 0x00000005 +#define NV_MSGBOX_CMD_OPCODE_GET_ECC_V1 0x00000006 +#define NV_MSGBOX_CMD_OPCODE_GET_ECC_V2 0x00000007 +#define NV_MSGBOX_CMD_OPCODE_GPU_PCONTROL 0x00000008 +#define NV_MSGBOX_CMD_OPCODE_GPU_SYSCONTROL 0x00000009 +#define NV_MSGBOX_CMD_OPCODE_SET_MASTER_CAPS 0x0000000a +#define NV_MSGBOX_CMD_OPCODE_SET_PRIMARY_CAPS 0x0000000a // Duplicate value till bug 3190552 is addressed. +#define NV_MSGBOX_CMD_OPCODE_GPU_REQUEST_CPL 0x0000000b +#define NV_MSGBOX_CMD_OPCODE_GET_ECC_V3 0x0000000c +#define NV_MSGBOX_CMD_OPCODE_SCRATCH_READ 0x0000000d +#define NV_MSGBOX_CMD_OPCODE_SCRATCH_WRITE 0x0000000e +#define NV_MSGBOX_CMD_OPCODE_SCRATCH_COPY 0x0000000f +#define NV_MSGBOX_CMD_OPCODE_ASYNC_REQUEST 0x00000010 +#define NV_MSGBOX_CMD_OPCODE_REGISTER_ACCESS 0x00000011 +#define NV_MSGBOX_CMD_OPCODE_GET_POWER_CONNECTOR_STATE 0x00000012 +#define NV_MSGBOX_CMD_OPCODE_GET_PAGE_RETIREMENT_STATS 0x00000013 +#define NV_MSGBOX_CMD_OPCODE_GET_ECC_V4 0x00000014 +#define NV_MSGBOX_CMD_OPCODE_GET_THERM_PARAM 0x00000015 +#define NV_MSGBOX_CMD_OPCODE_GET_ECC_V5 0x00000016 +#define NV_MSGBOX_CMD_OPCODE_ACCESS_WP_MODE 0x00000017 + +// +// TODO: Get rid of MISC_GPU_FLAGS once all SMBPBI implementations have made +// the switch to MISC_DEVICE_FLAGS. +// +#define NV_MSGBOX_CMD_OPCODE_GET_MISC_GPU_FLAGS 0x00000018 +#define NV_MSGBOX_CMD_OPCODE_GET_MISC_DEVICE_FLAGS 0x00000018 +#define NV_MSGBOX_CMD_OPCODE_GPU_UTIL_COUNTERS 0x00000019 +#define NV_MSGBOX_CMD_OPCODE_GET_NVLINK_INFO 0x0000001a +#define NV_MSGBOX_CMD_OPCODE_GET_CLOCK_FREQ_INFO 0x0000001b +#define NV_MSGBOX_CMD_OPCODE_BUNDLE_LAUNCH 0x0000001c +#define NV_MSGBOX_CMD_OPCODE_GET_DRIVER_EVENT_MSG 0x0000001d +#define NV_MSGBOX_CMD_OPCODE_GET_ECC_V6 0x0000001e +#define NV_MSGBOX_CMD_OPCODE_REMAP_ROW_STATS 0x00000020 +#define NV_MSGBOX_CMD_OPCODE_GET_PCIE_LINK_INFO 0x00000021 +#define NV_MSGBOX_CMD_OPCODE_GET_ENERGY_COUNTER 0x00000022 +#define NV_MSGBOX_CMD_OPCODE_RESERVED_0 0x00000024 +#define NV_MSGBOX_CMD_OPCODE_RESERVED_1 0x00000025 +#define NV_MSGBOX_CMD_OPCODE_GET_POWER_HINT_INFO 0x00000026 + +#define NV_MSGBOX_CMD_ARG1 15:8 +#define NV_MSGBOX_CMD_ARG1_NULL 0x00000000 +#define NV_MSGBOX_CMD_ARG1_TEMP_GPU_0 0x00000000 +#define NV_MSGBOX_CMD_ARG1_TEMP_NVSWITCH_0 0x00000000 +#define NV_MSGBOX_CMD_ARG1_TEMP_GPU_1 0x00000001 /* For Gemini boards */ +#define NV_MSGBOX_CMD_ARG1_TEMP_BOARD 0x00000004 +#define NV_MSGBOX_CMD_ARG1_TEMP_MEMORY 0x00000005 +#define NV_MSGBOX_CMD_ARG1_TEMP_PWR_SUPPLY 0x00000006 +#define NV_MSGBOX_CMD_ARG1_TEMP_NUM_SENSORS 7 +#define NV_MSGBOX_CMD_ARG1_POWER_TOTAL 0x00000000 +#define NV_MSGBOX_CMD_ARG1_SMBPBI_POWER 0x00000001 +/* SysId info type encodings for opcode NV_MSGBOX_CMD_OPCODE_GET_SYS_ID_DATA (0x05) */ +#define NV_MSGBOX_CMD_ARG1_BOARD_PART_NUM_V1 0x00000000 +#define NV_MSGBOX_CMD_ARG1_OEM_INFO_V1 0x00000001 +#define NV_MSGBOX_CMD_ARG1_SERIAL_NUM_V1 0x00000002 +#define NV_MSGBOX_CMD_ARG1_MARKETING_NAME_V1 0x00000003 +#define NV_MSGBOX_CMD_ARG1_GPU_PART_NUM_V1 0x00000004 +#define NV_MSGBOX_CMD_ARG1_MEMORY_VENDOR_V1 0x00000005 +#define NV_MSGBOX_CMD_ARG1_MEMORY_PART_NUM_V1 0x00000006 +#define NV_MSGBOX_CMD_ARG1_BUILD_DATE_V1 0x00000007 +#define NV_MSGBOX_CMD_ARG1_FIRMWARE_VER_V1 0x00000008 +#define NV_MSGBOX_CMD_ARG1_VENDOR_ID_V1 0x00000009 +#define NV_MSGBOX_CMD_ARG1_DEV_ID_V1 0x0000000a +#define NV_MSGBOX_CMD_ARG1_SUB_VENDOR_ID_V1 0x0000000b +#define NV_MSGBOX_CMD_ARG1_SUB_ID_V1 0x0000000c +#define NV_MSGBOX_CMD_ARG1_GPU_GUID_V1 0x0000000d +#define NV_MSGBOX_CMD_ARG1_INFOROM_VER_V1 0x0000000e +#define NV_MSGBOX_CMD_ARG1_PRODUCT_LENGTH_V1 0x0000000f +#define NV_MSGBOX_CMD_ARG1_PRODUCT_WIDTH_V1 0x00000010 +#define NV_MSGBOX_CMD_ARG1_PRODUCT_HEIGHT_V1 0x00000011 +#define NV_MSGBOX_CMD_ARG1_PCIE_SPEED_V1 0x00000012 +#define NV_MSGBOX_CMD_ARG1_PCIE_WIDTH_V1 0x00000013 +#define NV_MSGBOX_CMD_ARG1_TGP_LIMIT_V1 0x00000014 +#define NV_MSGBOX_CMD_ARG1_SYS_ID_DATA_TYPE_MAX 0x00000014 /* Adjust, when adding new types */ +#define NV_MSGBOX_CMD_ARG1_REGISTER_ACCESS_WRITE 0x00000000 +#define NV_MSGBOX_CMD_ARG1_REGISTER_ACCESS_READ 0x00000001 +#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_TARGET 0x00000000 +#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_HW_SLOWDN 0x00000001 +#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_SHUTDN 0x00000002 +#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_HBM_SLOWDN 0x00000003 +#define NV_MSGBOX_CMD_ARG1_THERM_PARAM_TEMP_SW_SLOWDN 0x00000004 +#define NV_MSGBOX_CMD_ARG1_GET_MISC_ECC_ENABLED_STATE 0x00000000 +#define NV_MSGBOX_CMD_ARG1_GET_MISC_GPU_RESET_REQUIRED 0x00000001 +#define NV_MSGBOX_CMD_ARG1_GET_MISC_GPU_FLAGS_PAGE_0 0x00000000 +#define NV_MSGBOX_CMD_ARG1_GET_MISC_GPU_FLAGS_PAGE_1 0x00000001 +#define NV_MSGBOX_CMD_ARG1_GET_MISC_DEVICE_FLAGS_PAGE_0 0x00000000 +#define NV_MSGBOX_CMD_ARG1_GET_MISC_DEVICE_FLAGS_PAGE_1 0x00000001 + +#define NV_MSGBOX_CMD_ARG1_GPU_UTIL_COUNTERS_CONTEXT_TIME 0x00000000 +#define NV_MSGBOX_CMD_ARG1_GPU_UTIL_COUNTERS_SM_TIME 0x00000001 +#define NV_MSGBOX_CMD_ARG1_GPU_UTIL_COUNTERS_RESET_COUNTERS 0x000000FF +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_LINK_COUNT 0x00000000 +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_LINK_STATE_V1 0x00000001 +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_LINK_BANDWIDTH 0x00000002 +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_ERR_COUNTER_REPLAY 0x00000003 +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_ERR_COUNTER_RECOVERY 0x00000004 +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_ERR_COUNTER_FLIT_CRC 0x00000005 +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_ERR_COUNTER_DATA_CRC 0x00000006 +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_LINK_STATE_V2 0x00000007 +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_SUBLINK_WIDTH 0x00000008 +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_THROUGHPUT_DATA_TX 0x00000009 +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_THROUGHPUT_DATA_RX 0x0000000a +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_THROUGHPUT_RAW_TX 0x0000000b +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_THROUGHPUT_RAW_RX 0x0000000c +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_TRAINING_ERROR_STATE 0x0000000d +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_RUNTIME_ERROR_STATE 0x0000000e +#define NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_AVAILABILITY 0x0000000f +#define NV_MSGBOX_CMD_ARG1_GET_CLOCK_FREQ_INFO_CURRENT 0x00000000 +#define NV_MSGBOX_CMD_ARG1_GET_CLOCK_FREQ_INFO_MIN 0x00000001 +#define NV_MSGBOX_CMD_ARG1_GET_CLOCK_FREQ_INFO_MAX 0x00000002 +#define NV_MSGBOX_CMD_ARG1_GET_CLOCK_FREQ_INFO_PAGE_3 0x00000003 +#define NV_MSGBOX_CMD_ARG1_REMAP_ROWS_RAW_COUNTS 0x00000000 +#define NV_MSGBOX_CMD_ARG1_REMAP_ROWS_STATE_FLAGS 0x00000001 +#define NV_MSGBOX_CMD_ARG1_REMAP_ROWS_HISTOGRAM 0x00000002 +#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_0 0x00000000 +#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_1 0x00000001 +#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_2 0x00000002 +#define NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_3 0x00000003 + +/* Async requests */ +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_GET \ + 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_SET \ + 0x00000001 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_INFO_GET \ + 0x00000002 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_THERMAL_FAN_V1_COUNT_GET \ + 0x00000003 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_THERMAL_FAN_V1_INFO_GET \ + 0x00000004 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_THERMAL_FAN_V1_STATUS_GET \ + 0x00000005 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_OVERCLOCKING_LIMIT_CONTROL_GET \ + 0x00000006 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_OVERCLOCKING_LIMIT_CONTROL_SET \ + 0x00000007 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_ENERGY_COUNTER_STATUS_GET \ + 0x00000008 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_VIOLATION_COUNTERS_STATUS_GET \ + 0x00000009 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_UTILIZATION_RATE_GET \ + 0x0000000a +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_OOB_CLOCK_LIMIT_SET \ + 0x0000000b +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_DEVICE_MODE_CONTROL \ + 0x0000000c +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_TEST_MESSAGE_SEND \ + 0x0000000e +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_CLOCK_LIMIT_GET \ + 0x0000000d +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_FAN_CURVE_POINTS_GET_SET \ + 0x0000000f +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_POWER_HINT_GET \ + 0x00000010 +#define NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_POLL 0x000000ff + + +#define NV_MSGBOX_CMD_ARG1_ECC_V2_SEL 15:14 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_SEL_CNT 0x00000000 /* counters */ +#define NV_MSGBOX_CMD_ARG1_ECC_V2_SEL_FBA 0x00000001 /* FB addresses */ +/* Counters */ +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_DEV 13:12 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_DEV_FB 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_DEV_GR 0x00000001 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_FB_SRC 11:10 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_FB_SRC_LTC 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_FB_SRC_FB 0x00000001 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_GR_SRC 11:10 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_GR_SRC_L1 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_GR_SRC_RF 0x00000001 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_TYPE 9:8 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_TYPE_SBE 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V2_CNT_TYPE_DBE 0x00000001 +/* FB addresses */ +#define NV_MSGBOX_CMD_ARG1_ECC_V2_FBA_SC 13:13 /* scope */ +#define NV_MSGBOX_CMD_ARG1_ECC_V2_FBA_SC_IND 0x00000000 /* individual addresses */ +#define NV_MSGBOX_CMD_ARG1_ECC_V2_FBA_SC_CUMUL 0x00000001 /* cumulative counters */ +#define NV_MSGBOX_CMD_ARG1_ECC_V2_FBA_IND_FLD 12:12 /* field */ +#define NV_MSGBOX_CMD_ARG1_ECC_V2_FBA_IND_FLD_CTRS 0x00000000 /* counters: sbe, dbe */ +#define NV_MSGBOX_CMD_ARG1_ECC_V2_FBA_IND_FLD_ADDR 0x00000001 /* address value */ +#define NV_MSGBOX_CMD_ARG1_ECC_V2_FBA_IND_INDEX 11:8 /* address array index: 0-13 */ + +#define NV_MSGBOX_CMD_ARG1_ECC_V3_SEL 15:14 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_SEL_CNT 0x00000000 /* counters */ +#define NV_MSGBOX_CMD_ARG1_ECC_V3_SEL_FBA 0x00000001 /* FB addresses */ +/* Counters */ +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_DEV 13:12 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_DEV_FB 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_DEV_GR 0x00000001 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_FB_SRC 11:10 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_FB_SRC_LTC 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_FB_SRC_FB 0x00000001 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_GR_SRC 11:10 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_GR_SRC_L1 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_GR_SRC_RF 0x00000001 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_GR_SRC_TEX 0x00000002 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_TYPE 9:8 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_TYPE_SBE 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V3_CNT_TYPE_DBE 0x00000001 +/* FB addresses */ +#define NV_MSGBOX_CMD_ARG1_ECC_V3_FBA_SC 13:13 /* scope */ +#define NV_MSGBOX_CMD_ARG1_ECC_V3_FBA_SC_IND 0x00000000 /* individual addresses */ +// These are not present in V3 +//#define NV_MSGBOX_CMD_ARG1_ECC_V3_FBA_SC_CUMUL 0x00000001 /* cumulative counters */ +#define NV_MSGBOX_CMD_ARG1_ECC_V3_FBA_IND_FLD 12:12 /* field */ +#define NV_MSGBOX_CMD_ARG1_ECC_V3_FBA_IND_FLD_CTRS 0x00000000 /* counters: sbe, dbe */ +#define NV_MSGBOX_CMD_ARG1_ECC_V3_FBA_IND_FLD_ADDR 0x00000001 /* address value */ +#define NV_MSGBOX_CMD_ARG1_ECC_V3_FBA_IND_INDEX 11:8 /* address array index: 0-13 */ + +#define NV_MSGBOX_CMD_ARG1_ECC_V4_SEL 15:14 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_SEL_CNT 0x00000000 /* counters */ +#define NV_MSGBOX_CMD_ARG1_ECC_V4_SEL_FBA 0x00000001 /* FB addresses */ +/* Counters */ +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_DEV 13:12 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_DEV_FB 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_DEV_GR 0x00000001 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_FB_SRC 11:10 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_FB_SRC_LTC 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_FB_SRC_FB 0x00000001 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_GR_SRC 11:10 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_GR_SRC_SHM 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_GR_SRC_RF 0x00000001 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_GR_SRC_TEX 0x00000002 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_TYPE 9:8 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_TYPE_SBE 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ECC_V4_CNT_TYPE_DBE 0x00000001 +/* FB addresses */ +#define NV_MSGBOX_CMD_ARG1_ECC_V4_FBA_SC 13:13 /* scope */ +#define NV_MSGBOX_CMD_ARG1_ECC_V4_FBA_SC_IND 0x00000000 /* individual addresses */ +// These are not present in V4 +//#define NV_MSGBOX_CMD_ARG1_ECC_V4_FBA_SC_CUMUL 0x00000001 /* cumulative counters */ +#define NV_MSGBOX_CMD_ARG1_ECC_V4_FBA_IND_FLD 12:12 /* field */ +#define NV_MSGBOX_CMD_ARG1_ECC_V4_FBA_IND_FLD_CTRS 0x00000000 /* counters: sbe, dbe */ +#define NV_MSGBOX_CMD_ARG1_ECC_V4_FBA_IND_FLD_ADDR 0x00000001 /* address value */ +#define NV_MSGBOX_CMD_ARG1_ECC_V4_FBA_IND_INDEX 11:8 /* address array index: 0-13 */ + +#define NV_MSGBOX_CMD_ARG1_ECC_V5_SEL 15:15 +#define NV_MSGBOX_CMD_ARG1_ECC_V5_SEL_COUNTS 0x00000000 /* address/region counts */ +#define NV_MSGBOX_CMD_ARG1_ECC_V5_SEL_ERR_BUFFER 0x00000001 /* SRAM error buffer */ + +#define NV_MSGBOX_CMD_ARG1_ECC_V5_COUNTS_INDEX_LO 14:8 /* 7 least significant index bits */ + +#define NV_MSGBOX_CMD_ARG1_ECC_V5_ERR_BUF_INDEX 11:8 /* buffer entry index */ + +#define NV_MSGBOX_CMD_ARG1_ACCESS_WP_MODE_ACTION 8:8 +#define NV_MSGBOX_CMD_ARG1_ACCESS_WP_MODE_ACTION_GET 0x00000000 +#define NV_MSGBOX_CMD_ARG1_ACCESS_WP_MODE_ACTION_SET 0x00000001 + +#define NV_MSGBOX_CMD_ARG1_BUNDLE_REQUEST_COUNT 11:8 +#define NV_MSGBOX_CMD_ARG1_BUNDLE_DISP_RULE_COUNT 15:12 +#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE 15:8 +#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_CORRECTABLE_ERROR 0 +#define NV_MSGBOX_CMD_ARG1_ECC_V6_ERROR_TYPE_UNCORRECTABLE_ERROR 1 + +// Query type of _GET_POWER_HINT_INFO +#define NV_MSGBOX_CMD_ARG1_GET_POWER_HINT_INFO_CLK 0x00000000 +#define NV_MSGBOX_CMD_ARG1_GET_POWER_HINT_INFO_TEMP 0x00000001 +#define NV_MSGBOX_CMD_ARG1_GET_POWER_HINT_INFO_PROFILES 0x00000002 +#define NV_MSGBOX_CMD_ARG1_GET_POWER_HINT_INFO_NUM \ + (NV_MSGBOX_CMD_ARG1_GET_POWER_HINT_INFO_PROFILES + 1) + + +#define NV_MSGBOX_CMD_ARG2 23:16 +#define NV_MSGBOX_CMD_ARG2_NULL 0x00000000 + +#define NV_MSGBOX_CMD_ARG2_ECC_V2_FB_PARTITION 23:20 +#define NV_MSGBOX_CMD_ARG2_ECC_V2_FB_SLICE 19:16 +#define NV_MSGBOX_CMD_ARG2_ECC_V2_GR_GPC 23:20 +#define NV_MSGBOX_CMD_ARG2_ECC_V2_GR_TPC 19:16 + +#define NV_MSGBOX_CMD_ARG2_ECC_V3_FB_PARTITION 23:20 +#define NV_MSGBOX_CMD_ARG2_ECC_V3_FB_SLICE 19:16 +#define NV_MSGBOX_CMD_ARG2_ECC_V3_FB_SUBPARTITION 19:16 +#define NV_MSGBOX_CMD_ARG2_ECC_V3_GR_TEX 23:22 +#define NV_MSGBOX_CMD_ARG2_ECC_V3_GR_GPC 21:19 +#define NV_MSGBOX_CMD_ARG2_ECC_V3_GR_TPC 18:16 + +#define NV_MSGBOX_CMD_ARG2_ECC_V4_FB_PARTITION 23:20 +#define NV_MSGBOX_CMD_ARG2_ECC_V4_FB_SLICE 19:16 +#define NV_MSGBOX_CMD_ARG2_ECC_V4_FB_SUBPARTITION 19:16 +#define NV_MSGBOX_CMD_ARG2_ECC_V4_GR_TEX 23:22 +#define NV_MSGBOX_CMD_ARG2_ECC_V4_GR_GPC 21:19 +#define NV_MSGBOX_CMD_ARG2_ECC_V4_GR_TPC 18:16 + +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_INDEX_HI 23:21 +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE 18:16 +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE_HDR_META 0x0 /* header and metadata */ + +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE_ADDR_ADDR 0x1 /* addr: address */ +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE_ADDR_UNCORRECTED_COUNTS 0x2 /* addr: uncorrectedCounts */ +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE_ADDR_CORRECTED_TOTAL 0x3 /* addr: correctedTotal */ +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE_ADDR_CORRECTED_UNIQUE 0x4 /* addr: correctedUnique */ + +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE_REGN_CORRECTED_TOTAL 0x1 /* region: correctedTotal */ +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE_REGN_CORRECTED_UNIQUE 0x2 /* region: correctedUnique */ +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE_REGN_UNCORRECTED_TOTAL 0x3 /* region: uncorrectedTotal */ +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE_REGN_UNCORRECTED_UNIQUE 0x4 /* region: uncorrectedUnique */ + +#define NV_MSGBOX_CMD_ARG2_ECC_V5_COUNTS_TYPE_MAX 0x4 + +#define NV_MSGBOX_CMD_ARG2_ECC_V5_ERR_BUF_TYPE 17:16 /* entry element type */ +#define NV_MSGBOX_CMD_ARG2_ECC_V5_ERR_BUF_TYPE_ERR_TYPE_META 0x0 /* errorType and metadata */ +#define NV_MSGBOX_CMD_ARG2_ECC_V5_ERR_BUF_TYPE_TIME_STAMP 0x1 /* time stamp */ +#define NV_MSGBOX_CMD_ARG2_ECC_V5_ERR_BUF_TYPE_ADDR 0x2 /* address */ + +#define NV_MSGBOX_CMD_ARG2_ECC_V5_ERR_BUF_TYPE_MAX 0x2 + +#define NV_MSGBOX_CMD_ARG2_REG_INDEX_SCRATCH_PAGE 0x00000000 +#define NV_MSGBOX_CMD_ARG2_REG_INDEX_EVENTS_PENDING 0x00000001 +#define NV_MSGBOX_CMD_ARG2_REG_INDEX_EVENT_MASK 0x00000002 +#define NV_MSGBOX_CMD_ARG2_REG_INDEX_MAX 0x00000002 /* increase when adding + new registers */ + +#define NV_MSGBOX_CMD_ARG2_NVLINK_INFO_AGGREGATE 0x000000FF + +#define NV_MSGBOX_CMD_ARG2_ACCESS_WP_MODE_SET_STATE 23:16 +#define NV_MSGBOX_CMD_ARG2_ACCESS_WP_MODE_SET_STATE_DISABLED 0x0000005A +#define NV_MSGBOX_CMD_ARG2_ACCESS_WP_MODE_SET_STATE_ENABLED 0x000000A5 + +#define NV_MSGBOX_CMD_ARG2_GET_CLOCK_FREQ_INFO_GPCCLK 0x00000000 +#define NV_MSGBOX_CMD_ARG2_GET_CLOCK_FREQ_INFO_MEMCLK 0x00000001 + +#define NV_MSGBOX_CMD_ARG2_ECC_V6_COUNTER_TYPE 23:16 +#define NV_MSGBOX_CMD_ARG2_ECC_V6_COUNTER_TYPE_SRAM 0 +#define NV_MSGBOX_CMD_ARG2_ECC_V6_COUNTER_TYPE_DRAM 1 + +#define NV_MSGBOX_CMD_ARG2_REMAP_ROW_RAW_CNT_COMBINED 0x00000000 +#define NV_MSGBOX_CMD_ARG2_REMAP_ROW_RAW_CNT_UNCORR 0x00000001 +#define NV_MSGBOX_CMD_ARG2_REMAP_ROW_RAW_CNT_CORR 0x00000002 + +#define NV_MSGBOX_CMD_ARG2_REMAP_ROWS_STATE_FLAGS_PAGE0 0x00000000 + +/*! + * Arg2 for _GET_POWER_HINT_INFO + * ARG1 == _GET_POWER_HINT_INFO_CLK + * Return Min/Max frequency in MHz + */ +#define NV_MSGBOX_CMD_ARG2_GET_POWER_HINT_INFO_CLK_GR 0x00000000 +#define NV_MSGBOX_CMD_ARG2_GET_POWER_HINT_INFO_CLK_MEM 0x00000001 + +/*! + * Arg2 for _GET_POWER_HINT_INFO + * ARG1 == _GET_POWER_HINT_INFO_PROFILES + * Return bits represents which profiles are available + */ +#define NV_MSGBOX_CMD_ARG2_GET_POWER_HINT_INFO_PROFILES_PAGE_0 0x00000000 +#define NV_MSGBOX_CMD_ARG2_GET_POWER_HINT_INFO_PROFILES_PAGE_1 0x00000001 +#define NV_MSGBOX_CMD_ARG2_GET_POWER_HINT_INFO_PROFILES_PAGE_2 0x00000002 +#define NV_MSGBOX_CMD_ARG2_GET_POWER_HINT_INFO_PROFILES_PAGE_3 0x00000003 +#define NV_MSGBOX_CMD_ARG2_GET_POWER_HINT_INFO_PROFILES_TOTAL_PAGES \ + (NV_MSGBOX_CMD_ARG2_GET_POWER_HINT_INFO_PROFILES_PAGE_3 + 1) + + +#define NV_MSGBOX_CMD_STATUS 28:24 +#define NV_MSGBOX_CMD_STATUS_NULL 0x00000000 +#define NV_MSGBOX_CMD_STATUS_ERR_REQUEST 0x00000001 +#define NV_MSGBOX_CMD_STATUS_ERR_OPCODE 0x00000002 +#define NV_MSGBOX_CMD_STATUS_ERR_ARG1 0x00000003 +#define NV_MSGBOX_CMD_STATUS_ERR_ARG2 0x00000004 +#define NV_MSGBOX_CMD_STATUS_ERR_DATA 0x00000005 +#define NV_MSGBOX_CMD_STATUS_ERR_MISC 0x00000006 +#define NV_MSGBOX_CMD_STATUS_ERR_I2C_ACCESS 0x00000007 +#define NV_MSGBOX_CMD_STATUS_ERR_NOT_SUPPORTED 0x00000008 +#define NV_MSGBOX_CMD_STATUS_ERR_NOT_AVAILABLE 0x00000009 +#define NV_MSGBOX_CMD_STATUS_ERR_BUSY 0x0000000a +#define NV_MSGBOX_CMD_STATUS_ERR_AGAIN 0x0000000b +#define NV_MSGBOX_CMD_STATUS_ERR_SENSOR_DATA 0x0000000c +#define NV_MSGBOX_CMD_STATUS_ERR_DISPOSITION 0x0000000d +#define NV_MSGBOX_CMD_STATUS_PARTIAL_FAILURE 0x0000001b +#define NV_MSGBOX_CMD_STATUS_ACCEPTED 0x0000001c +#define NV_MSGBOX_CMD_STATUS_INACTIVE 0x0000001d +#define NV_MSGBOX_CMD_STATUS_READY 0x0000001e +#define NV_MSGBOX_CMD_STATUS_SUCCESS 0x0000001f + +#define NV_MSGBOX_CMD_RSVD 29:29 +#define NV_MSGBOX_CMD_RSVD_INIT 0x00000000 + +#define NV_MSGBOX_CMD_COPY_DATA 30:30 +#define NV_MSGBOX_CMD_COPY_DATA_OFF 0x00000000 +#define NV_MSGBOX_CMD_COPY_DATA_ON 0x00000001 + +#define NV_MSGBOX_CMD_EVENT_PENDING 30:30 +#define NV_MSGBOX_CMD_EVENT_PENDING_OFF 0x00000000 +#define NV_MSGBOX_CMD_EVENT_PENDING_ON 0x00000001 + +#define NV_MSGBOX_CMD_INTR 31:31 +#define NV_MSGBOX_CMD_INTR_NOT_PENDING 0x00000000 +#define NV_MSGBOX_CMD_INTR_PENDING 0x00000001 +#define NV_MSGBOX_CMD_INTR_CLEAR 0x00000000 + +/* For individual requests in a bundle */ + +#define NV_MSGBOX_CMD_ON_BUNDLE_FAILURE 31:31 +#define NV_MSGBOX_CMD_ON_BUNDLE_FAILURE_CONTINUE 0x00000000 +#define NV_MSGBOX_CMD_ON_BUNDLE_FAILURE_STOP 0x00000001 + +#define NV_MSGBOX_CMD_DATA_COPY 23:0 +#define NV_MSGBOX_CMD_DATA_COPY_INIT 0x00000000 +#define NV_MSGBOX_CMD_EXT_STATUS 23:0 + +// NV_MSGBOX_CMD_COPY_SIZE_ENCODING used for certain commands to embed size +// indicators in the output when copy-bit is set in request + +#define NV_MSGBOX_CMD_COPY_SIZE_ENCODING_READ_DATA_OUT 0:0 +#define NV_MSGBOX_CMD_COPY_SIZE_ENCODING_READ_DATA_OUT_SET 1 +#define NV_MSGBOX_CMD_COPY_SIZE_ENCODING_READ_DATA_OUT_NOT_SET 0 + +#define NV_MSGBOX_CMD_COPY_SIZE_ENCODING_READ_EXT_DATA_OUT 1:1 +#define NV_MSGBOX_CMD_COPY_SIZE_ENCODING_READ_EXT_DATA_OUT_SET 1 +#define NV_MSGBOX_CMD_COPY_SIZE_ENCODING_READ_EXT_DATA_OUT_NOT_SET 0 + +#define NV_MSGBOX_CMD_COPY_SIZE_ENCODING_DATA 23:2 +#define NV_MSGBOX_CMD_COPY_SIZE_ENCODING_DATA_INIT 0x00000000 + +/* Response to NV_MSGBOX_CMD_ARG1_REMAP_ROWS_HISTOGRAM */ +#define NV_MSGBOX_CMD_REMAP_ROW_HISTOGRAM_NONE_AVAILABILITY 15:0 + +/* Response to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_THROUGHPUT_* */ +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_GRANULARITY 1:0 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_GRANULARITY_KIB 0x00000000 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_GRANULARITY_MIB 0x00000001 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_GRANULARITY_GIB 0x00000002 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_GRANULARITY_TIB 0x00000003 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_NEW_SAMPLE 2:2 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_NEW_SAMPLE_FALSE 0x00000000 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_NEW_SAMPLE_TRUE 0x00000001 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_INVALID_DELTA 3:3 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_INVALID_DELTA_FALSE 0x00000000 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_INVALID_DELTA_TRUE 0x00000001 +#define NV_MSGBOX_CMD_NVLINK_INFO_THROUGHPUT_DELTA 23:4 + +/* + * Response to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_TRAINING_ERR_STATE + * NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_RUNTIME_ERR_STATE + */ +#define NV_MSGBOX_CMD_NVLINK_INFO_TRAINING_ERROR_COUNT 11:0 +#define NV_MSGBOX_CMD_NVLINK_INFO_RUNTIME_ERROR_COUNT 23:12 +#define NV_MSGBOX_CMD_NVLINK_INFO_ERROR_COUNT_MAX \ + DRF_MASK(NV_MSGBOX_CMD_NVLINK_INFO_TRAINING_ERROR_COUNT) + +/** + * Response to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_AVAILABILITY + */ +#define NV_MSGBOX_CMD_NVLINK_INFO_GET_NVLINK_INFO_AVAILABILTY_PAGE_0 0x00000000 + +/* MSGBOX data, capability dword structure */ + +#define NV_MSGBOX_DATA_REG 31:0 +#define NV_MSGBOX_DATA_CAP_COUNT 5 + +#define NV_MSGBOX_DATA_CAP_0 0 +#define NV_MSGBOX_DATA_CAP_0_TEMP_GPU_0 0:0 +#define NV_MSGBOX_DATA_CAP_0_TEMP_GPU_0_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_TEMP_GPU_0_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_TEMP_NVSWITCH_0 0:0 +#define NV_MSGBOX_DATA_CAP_0_TEMP_NVSWITCH_0_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_TEMP_NVSWITCH_0_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_TEMP_GPU_1 1:1 +#define NV_MSGBOX_DATA_CAP_0_TEMP_GPU_1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_TEMP_GPU_1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_TEMP_BOARD 4:4 +#define NV_MSGBOX_DATA_CAP_0_TEMP_BOARD_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_TEMP_BOARD_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_TEMP_MEMORY 5:5 +#define NV_MSGBOX_DATA_CAP_0_TEMP_MEMORY_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_TEMP_MEMORY_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_TEMP_PWR_SUPPLY 6:6 +#define NV_MSGBOX_DATA_CAP_0_TEMP_PWR_SUPPLY_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_TEMP_PWR_SUPPLY_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_EXT_TEMP_BITS 11:8 +#define NV_MSGBOX_DATA_CAP_0_EXT_TEMP_BITS_ZERO 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_EXT_TEMP_BITS_ADT7473 0x00000002 +#define NV_MSGBOX_DATA_CAP_0_EXT_TEMP_BITS_SFXP11_5 0x00000005 +#define NV_MSGBOX_DATA_CAP_0_EXT_TEMP_BITS_SFXP24_8 0x00000008 +#define NV_MSGBOX_DATA_CAP_0_POWER_TOTAL 16:16 +#define NV_MSGBOX_DATA_CAP_0_POWER_TOTAL_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_POWER_TOTAL_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_GPU_PCONTROL 17:17 +#define NV_MSGBOX_DATA_CAP_0_GPU_PCONTROL_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_GPU_PCONTROL_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_GPU_SYSCONTROL 18:18 +#define NV_MSGBOX_DATA_CAP_0_GPU_SYSCONTROL_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_GPU_SYSCONTROL_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_THERMP_BITS 28:24 // Adjust when adding new bits +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_ACOUSTIC 24:24 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_ACOUSTIC_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_ACOUSTIC_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SLOWDN 25:25 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SLOWDN_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SLOWDN_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SHUTDN 26:26 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SHUTDN_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_SHUTDN_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_MEMORY 27:27 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_MEMORY_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_MEMORY_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_GPU_SW_SLOWDOWN 28:28 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_GPU_SW_SLOWDOWN_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_THERMP_TEMP_GPU_SW_SLOWDOWN_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_0_GET_FABRIC_STATE_FLAGS 29:29 +#define NV_MSGBOX_DATA_CAP_0_GET_FABRIC_STATE_FLAGS_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_0_GET_FABRIC_STATE_FLAGS_AVAILABLE 0x00000001 + +#define NV_MSGBOX_DATA_CAP_1 1 +#define NV_MSGBOX_DATA_CAP_1_BOARD_PART_NUM_V1 0:0 +#define NV_MSGBOX_DATA_CAP_1_BOARD_PART_NUM_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_BOARD_PART_NUM_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_OEM_INFO_V1 1:1 +#define NV_MSGBOX_DATA_CAP_1_OEM_INFO_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_OEM_INFO_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_SERIAL_NUM_V1 2:2 +#define NV_MSGBOX_DATA_CAP_1_SERIAL_NUM_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_SERIAL_NUM_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_MARKETING_NAME_V1 3:3 +#define NV_MSGBOX_DATA_CAP_1_MARKETING_NAME_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_MARKETING_NAME_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_GPU_PART_NUM_V1 4:4 +#define NV_MSGBOX_DATA_CAP_1_GPU_PART_NUM_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_GPU_PART_NUM_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_MEMORY_VENDOR_V1 5:5 +#define NV_MSGBOX_DATA_CAP_1_MEMORY_VENDOR_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_MEMORY_VENDOR_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_MEMORY_PART_NUM_V1 6:6 +#define NV_MSGBOX_DATA_CAP_1_MEMORY_PART_NUM_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_MEMORY_PART_NUM_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_BUILD_DATE_V1 7:7 +#define NV_MSGBOX_DATA_CAP_1_BUILD_DATE_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_BUILD_DATE_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_FIRMWARE_VER_V1 8:8 +#define NV_MSGBOX_DATA_CAP_1_FIRMWARE_VER_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_FIRMWARE_VER_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_VENDOR_ID_V1 9:9 +#define NV_MSGBOX_DATA_CAP_1_VENDOR_ID_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_VENDOR_ID_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_DEV_ID_V1 10:10 +#define NV_MSGBOX_DATA_CAP_1_DEV_ID_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_DEV_ID_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_SUB_VENDOR_ID_V1 11:11 +#define NV_MSGBOX_DATA_CAP_1_SUB_VENDOR_ID_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_SUB_VENDOR_ID_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_SUB_ID_V1 12:12 +#define NV_MSGBOX_DATA_CAP_1_SUB_ID_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_SUB_ID_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_GPU_GUID_V1 13:13 +#define NV_MSGBOX_DATA_CAP_1_GPU_GUID_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_GPU_GUID_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_INFOROM_VER_V1 14:14 +#define NV_MSGBOX_DATA_CAP_1_INFOROM_VER_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_INFOROM_VER_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_ECC_V1 16:16 +#define NV_MSGBOX_DATA_CAP_1_ECC_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_ECC_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_ECC_V2 17:17 +#define NV_MSGBOX_DATA_CAP_1_ECC_V2_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_ECC_V2_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_ECC_V3 18:18 +#define NV_MSGBOX_DATA_CAP_1_ECC_V3_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_ECC_V3_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_RET_PG_CNT 19:19 +#define NV_MSGBOX_DATA_CAP_1_RET_PG_CNT_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_RET_PG_CNT_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_ECC_V4 20:20 +#define NV_MSGBOX_DATA_CAP_1_ECC_V4_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_ECC_V4_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_ECC_V5 21:21 +#define NV_MSGBOX_DATA_CAP_1_ECC_V5_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_ECC_V5_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_ACCESS_WP_MODE 22:22 +#define NV_MSGBOX_DATA_CAP_1_ACCESS_WP_MODE_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_ACCESS_WP_MODE_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_GET_ECC_ENABLED_STATE 23:23 +#define NV_MSGBOX_DATA_CAP_1_GET_ECC_ENABLED_STATE_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_GET_ECC_ENABLED_STATE_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_GET_GPU_RESET_REQUIRED 24:24 +#define NV_MSGBOX_DATA_CAP_1_GET_GPU_RESET_REQUIRED_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_GET_GPU_RESET_REQUIRED_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_GPU_UTIL_COUNTERS 25:25 +#define NV_MSGBOX_DATA_CAP_1_GPU_UTIL_COUNTERS_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_GPU_UTIL_COUNTERS_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_NVLINK_INFO_STATE_V1 26:26 +#define NV_MSGBOX_DATA_CAP_1_NVLINK_INFO_STATE_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_NVLINK_INFO_STATE_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_NVLINK_ERROR_COUNTERS 27:27 +#define NV_MSGBOX_DATA_CAP_1_NVLINK_ERROR_COUNTERS_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_NVLINK_ERROR_COUNTERS_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_CLOCK_FREQ_INFO 28:28 +#define NV_MSGBOX_DATA_CAP_1_CLOCK_FREQ_INFO_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_CLOCK_FREQ_INFO_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_GET_MIG_ENABLED_STATE 29:29 +#define NV_MSGBOX_DATA_CAP_1_GET_MIG_ENABLED_STATE_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_GET_MIG_ENABLED_STATE_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_1_ECC_V6 30:30 +#define NV_MSGBOX_DATA_CAP_1_ECC_V6_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_1_ECC_V6_AVAILABLE 0x00000001 + +#define NV_MSGBOX_DATA_CAP_2 2 +#define NV_MSGBOX_DATA_CAP_2_GPU_DRIVER 0:0 +#define NV_MSGBOX_DATA_CAP_2_GPU_DRIVER_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_GPU_DRIVER_NOT_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_NVSWITCH_DRIVER 0:0 +#define NV_MSGBOX_DATA_CAP_2_NVSWITCH_DRIVER_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_NVSWITCH_DRIVER_NOT_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_GPU_REQUEST 1:1 +#define NV_MSGBOX_DATA_CAP_2_GPU_REQUEST_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_GPU_REQUEST_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_NUM_SCRATCH_BANKS 4:2 +#define NV_MSGBOX_DATA_CAP_2_NUM_SCRATCH_BANKS_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_FAN_V1_CONTROL 5:5 +#define NV_MSGBOX_DATA_CAP_2_FAN_V1_CONTROL_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_FAN_V1_CONTROL_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_PRODUCT_LENGTH_V1 6:6 +#define NV_MSGBOX_DATA_CAP_2_PRODUCT_LENGTH_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_PRODUCT_LENGTH_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_PRODUCT_WIDTH_V1 7:7 +#define NV_MSGBOX_DATA_CAP_2_PRODUCT_WIDTH_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_PRODUCT_WIDTH_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_PRODUCT_HEIGHT_V1 8:8 +#define NV_MSGBOX_DATA_CAP_2_PRODUCT_HEIGHT_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_PRODUCT_HEIGHT_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_PCIE_SPEED_V1 9:9 +#define NV_MSGBOX_DATA_CAP_2_PCIE_SPEED_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_PCIE_SPEED_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_PCIE_WIDTH_V1 10:10 +#define NV_MSGBOX_DATA_CAP_2_PCIE_WIDTH_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_PCIE_WIDTH_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_TGP_LIMIT_V1 11:11 +#define NV_MSGBOX_DATA_CAP_2_TGP_LIMIT_V1_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_TGP_LIMIT_V1_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_SCRATCH_PAGE_SIZE 12:12 +#define NV_MSGBOX_DATA_CAP_2_SCRATCH_PAGE_SIZE_1024B 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_SCRATCH_PAGE_SIZE_256B 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_REMAP_ROW_STATS 13:13 +#define NV_MSGBOX_DATA_CAP_2_REMAP_ROW_STATS_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_REMAP_ROW_STATS_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_GET_PCIE_LINK_INFO 14:14 +#define NV_MSGBOX_DATA_CAP_2_GET_PCIE_LINK_INFO_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_GET_PCIE_LINK_INFO_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_GET_GPU_DRAIN_AND_RESET_RECOMMENDED 15:15 +#define NV_MSGBOX_DATA_CAP_2_GET_GPU_DRAIN_AND_RESET_RECOMMENDED_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_GET_GPU_DRAIN_AND_RESET_RECOMMENDED_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_THROUGHPUT 16:16 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_THROUGHPUT_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_THROUGHPUT_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_LINK_STATE_V2 17:17 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_LINK_STATE_V2_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_LINK_STATE_V2_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_SUBLINK_WIDTH 18:18 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_SUBLINK_WIDTH_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_SUBLINK_WIDTH_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_GET_ENERGY_COUNTER 19:19 +#define NV_MSGBOX_DATA_CAP_2_GET_ENERGY_COUNTER_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_GET_ENERGY_COUNTER_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_REMAP_ROW_PENDING 20:20 +#define NV_MSGBOX_DATA_CAP_2_REMAP_ROW_PENDING_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_REMAP_ROW_PENDING_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_REMAP_ROW_HISTOGRAM 21:21 +#define NV_MSGBOX_DATA_CAP_2_REMAP_ROW_HISTOGRAM_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_REMAP_ROW_HISTOGRAM_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_TRAINING_ERROR_STATE 22:22 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_TRAINING_ERROR_STATE_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_TRAINING_ERROR_STATE_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_RUNTIME_ERROR_STATE 23:23 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_RUNTIME_ERROR_STATE_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_RUNTIME_ERROR_STATE_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_GET_PCIE_LINK_TARGET_SPEED 25:25 +#define NV_MSGBOX_DATA_CAP_2_GET_PCIE_LINK_TARGET_SPEED_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_GET_PCIE_LINK_TARGET_SPEED_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_CURRENT_PSTATE 26:26 +#define NV_MSGBOX_DATA_CAP_2_CURRENT_PSTATE_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_CURRENT_PSTATE_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_AVAILABILITY 27:27 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_AVAILABILITY_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_2_NVLINK_INFO_AVAILABILITY_AVAILABLE 0x00000001 + +#define NV_MSGBOX_DATA_CAP_4 4 +#define NV_MSGBOX_DATA_CAP_4_HW_VIOLATION_TIME 0:0 +#define NV_MSGBOX_DATA_CAP_4_HW_VIOLATION_TIME_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_4_HW_VIOLATION_TIME_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_4_SW_VIOLATION_TIME 1:1 +#define NV_MSGBOX_DATA_CAP_4_SW_VIOLATION_TIME_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_4_SW_VIOLATION_TIME_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_4_SW_POWER_VIOLATION_TIME 2:2 +#define NV_MSGBOX_DATA_CAP_4_SW_POWER_VIOLATION_TIME_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_4_SW_POWER_VIOLATION_TIME_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_4_SW_THERMAL_VIOLATION_TIME 3:3 +#define NV_MSGBOX_DATA_CAP_4_SW_THERMAL_VIOLATION_TIME_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_4_SW_THERMAL_VIOLATION_TIME_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_4_UTILIZATION_RATE 4:4 +#define NV_MSGBOX_DATA_CAP_4_UTILIZATION_RATE_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_4_UTILIZATION_RATE_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_4_GET_DRIVER_EVENT_MSG 5:5 +#define NV_MSGBOX_DATA_CAP_4_GET_DRIVER_EVENT_MSG_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_4_GET_DRIVER_EVENT_MSG_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_4_REQUEST_BUNDLING 6:6 +#define NV_MSGBOX_DATA_CAP_4_REQUEST_BUNDLING_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_4_REQUEST_BUNDLING_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_4_SET_DEVICE_DISABLE 7:7 +#define NV_MSGBOX_DATA_CAP_4_SET_DEVICE_DISABLE_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_4_SET_DEVICE_DISABLE_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_4_FAN_CURVE_POINTS_GET_SET 11:11 +#define NV_MSGBOX_DATA_CAP_4_FAN_CURVE_POINTS_GET_SET_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_4_FAN_CURVE_POINTS_GET_SET_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_CAP_4_POWER_HINT 22:22 +#define NV_MSGBOX_DATA_CAP_4_POWER_HINT_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_CAP_4_POWER_HINT_AVAILABLE 0x00000001 + +/* ECC counters */ +#define NV_MSGBOX_DATA_ECC_CNT_16BIT_DBE 31:16 +#define NV_MSGBOX_DATA_ECC_CNT_16BIT_SBE 16:0 +#define NV_MSGBOX_DATA_ECC_CNT_8BIT_DBE 23:16 +#define NV_MSGBOX_DATA_ECC_CNT_8BIT_SBE 7:0 + +#define NV_MSGBOX_DATA_ECC_V5_COUNT_HEADER 7:0 + +#define NV_MSGBOX_DATA_ECC_V5_COUNT_HDR_ID 1:0 +#define NV_MSGBOX_DATA_ECC_V5_COUNT_HDR_ID_INVAL 0 +#define NV_MSGBOX_DATA_ECC_V5_COUNT_HDR_ID_ADDR 1 +#define NV_MSGBOX_DATA_ECC_V5_COUNT_HDR_ID_REGN 2 + +#define NV_MSGBOX_DATA_ECC_V5_COUNT_METADATA 31:16 + +#define NV_MSGBOX_DATA_ECC_V5_ERR_BUF_ERR_TYPE 7:0 + +#define NV_MSGBOX_DATA_ECC_V5_ERR_BUF_ERR_TYPE_ID 0:0 +#define NV_MSGBOX_DATA_ECC_V5_ERR_BUF_ERR_TYPE_ID_UNCORR 0 +#define NV_MSGBOX_DATA_ECC_V5_ERR_BUF_ERR_TYPE_ID_CORR 1 + +#define NV_MSGBOX_DATA_ECC_V5_ERR_BUF_METADATA 31:16 + +#define NV_MSGBOX_DATA_ECC_V5_METADATA_STRUCT_ID 21:16 +#define NV_MSGBOX_DATA_ECC_V5_METADATA_STRUCT_ID_LRF 0 +#define NV_MSGBOX_DATA_ECC_V5_METADATA_STRUCT_ID_L1DATA 1 +#define NV_MSGBOX_DATA_ECC_V5_METADATA_STRUCT_ID_L1TAG 2 +#define NV_MSGBOX_DATA_ECC_V5_METADATA_STRUCT_ID_CBU 3 +#define NV_MSGBOX_DATA_ECC_V5_METADATA_STRUCT_ID_LTC 4 +#define NV_MSGBOX_DATA_ECC_V5_METADATA_STRUCT_ID_DRAM 5 + +#define NV_MSGBOX_DATA_ECC_V5_METADATA_LOCATION_ID 26:22 +#define NV_MSGBOX_DATA_ECC_V5_METADATA_SUBLOCATION_ID 31:27 + +/* NV_MSGBOX_CMD_OPCODE_SCRATCH_COPY src offset argument */ +#define NV_MSGBOX_DATA_COPY_SRC_OFFSET 7:0 + +/* NV_MSGBOX_CMD_ARG2_REG_INDEX_SCRATCH_PAGE register layout */ +#define NV_MSGBOX_DATA_SCRATCH_PAGE_DST 7:0 +#define NV_MSGBOX_DATA_SCRATCH_PAGE_SRC 15:8 + +/* Async request status codes returned in the data register */ +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS 7:0 + +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_SUCCESS 0x00000000 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_CARD_NOT_PRESENT 0x00000001 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_DUAL_LINK_INUSE 0x00000002 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_GENERIC 0x00000003 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_GPU_NOT_FULL_POWER 0x00000004 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_IN_USE 0x00000005 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INSUFFICIENT_RESOURCES 0x00000006 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_ACCESS_TYPE 0x00000007 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_ARGUMENT 0x00000008 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_BASE 0x00000009 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_CHANNEL 0x0000000A +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_CLASS 0x0000000B +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_CLIENT 0x0000000C +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_COMMAND 0x0000000D +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_DATA 0x0000000E +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_DEVICE 0x0000000F +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_DMA_SPECIFIER 0x00000010 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_EVENT 0x00000011 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_FLAGS 0x00000012 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_FUNCTION 0x00000013 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_HEAP 0x00000014 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_INDEX 0x00000015 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_LIMIT 0x00000016 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_METHOD 0x00000017 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_OBJECT_BUFFER 0x00000018 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_OBJECT_ERROR 0x00000019 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_OBJECT_HANDLE 0x0000001A +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_OBJECT_NEW 0x0000001B +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_OBJECT_OLD 0x0000001C +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_OBJECT_PARENT 0x0000001D +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_OFFSET 0x0000001E +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_OWNER 0x0000001F +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_PARAM_STRUCT 0x00000020 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_PARAMETER 0x00000021 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_POINTER 0x00000022 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_REGISTRY_KEY 0x00000023 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_STATE 0x00000024 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_STRING_LENGTH 0x00000025 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_XLATE 0x00000026 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_IRQ_NOT_FIRING 0x00000027 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_MULTIPLE_MEMORY_TYPES 0x00000028 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_NOT_SUPPORTED 0x00000029 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_OPERATING_SYSTEM 0x0000002A +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_PROTECTION_FAULT 0x0000002B +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_TIMEOUT 0x0000002C +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_TOO_MANY_PRIMARIES 0x0000002D +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_IRQ_EDGE_TRIGGERED 0x0000002E +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_OPERATION 0x0000002F +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_NOT_COMPATIBLE 0x00000030 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_MORE_PROCESSING_REQUIRED 0x00000031 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INSUFFICIENT_PERMISSIONS 0x00000032 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_TIMEOUT_RETRY 0x00000033 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_NOT_READY 0x00000034 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_GPU_IS_LOST 0x00000035 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_IN_FULLCHIP_RESET 0x00000036 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_LOCK_STATE 0x00000037 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_ADDRESS 0x00000038 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INVALID_IRQ_LEVEL 0x00000039 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_MEMORY_TRAINING_FAILED 0x00000040 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_BUSY_RETRY 0x00000041 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_INSUFFICIENT_POWER 0x00000042 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_OBJECT_NOT_FOUND 0x00000043 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_BUFFER_TOO_SMALL 0x00000044 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_RESET_REQUIRED 0x00000045 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_PRIV_SEC_VIOLATION 0x00000046 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_DEFERRED 0x00000047 +#define NV_MSGBOX_DATA_ASYNC_REQ_STATUS_ERROR_FREQ_NOT_SUPPORTED 0x00000048 + +/* Response to NV_MSGBOX_CMD_OPCODE_GET_POWER_CONNECTOR_STATE */ +#define NV_MSGBOX_DATA_POWER_CONNECTED 0:0 +#define NV_MSGBOX_DATA_POWER_CONNECTED_SUFFICIENT 0x00000000 +#define NV_MSGBOX_DATA_POWER_CONNECTED_INSUFFICIENT 0x00000001 + +/* Response to NV_MSGBOX_CMD_OPCODE_GET_PAGE_RETIREMENT_STATS */ +#define NV_MSGBOX_DATA_RETIRED_PAGES_CNT_SBE 7:0 +#define NV_MSGBOX_DATA_RETIRED_PAGES_CNT_DBE 15:8 + +/* + * Response to + * NV_MSGBOX_CMD_ARG1_GET_MISC_ECC_ENABLED_STATE + * NV_MSGBOX_CMD_ARG1_GET_MISC_GPU_FLAGS_PAGE_0 + */ +#define NV_MSGBOX_DATA_ECC_ENABLED_STATE_SUPPORTED 0:0 +#define NV_MSGBOX_DATA_ECC_ENABLED_STATE_SUPPORTED_OFF 0x00000000 +#define NV_MSGBOX_DATA_ECC_ENABLED_STATE_SUPPORTED_ON 0x00000001 +#define NV_MSGBOX_DATA_ECC_ENABLED_STATE_CURRENT 1:1 +#define NV_MSGBOX_DATA_ECC_ENABLED_STATE_CURRENT_OFF 0x00000000 +#define NV_MSGBOX_DATA_ECC_ENABLED_STATE_CURRENT_ON 0x00000001 +#define NV_MSGBOX_DATA_ECC_ENABLED_STATE_PENDING 2:2 +#define NV_MSGBOX_DATA_ECC_ENABLED_STATE_PENDING_OFF 0x00000000 +#define NV_MSGBOX_DATA_ECC_ENABLED_STATE_PENDING_ON 0x00000001 + +/* Response to NV_MSGBOX_CMD_ARG1_GET_MISC_GPU_FLAGS_PAGE_0 */ +#define NV_MSGBOX_DATA_MIG_ENABLED_STATE_SUPPORTED 3:3 +#define NV_MSGBOX_DATA_MIG_ENABLED_STATE_SUPPORTED_OFF 0x00000000 +#define NV_MSGBOX_DATA_MIG_ENABLED_STATE_SUPPORTED_ON 0x00000001 +#define NV_MSGBOX_DATA_MIG_ENABLED_STATE_CURRENT 4:4 +#define NV_MSGBOX_DATA_MIG_ENABLED_STATE_CURRENT_OFF 0x00000000 +#define NV_MSGBOX_DATA_MIG_ENABLED_STATE_CURRENT_ON 0x00000001 +#define NV_MSGBOX_DATA_MIG_ENABLED_STATE_PENDING 5:5 +#define NV_MSGBOX_DATA_MIG_ENABLED_STATE_PENDING_OFF 0x00000000 +#define NV_MSGBOX_DATA_MIG_ENABLED_STATE_PENDING_ON 0x00000001 + +/* Response to NV_MSGBOX_CMD_ARG1_GET_MISC_DEVICE_FLAGS_PAGE_0 */ +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DRIVER_STATUS 6:6 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DRIVER_STATUS_NOT_RUNNING 0x00000000 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DRIVER_STATUS_RUNNING 0x00000001 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DEVICE_STATUS 7:7 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DEVICE_STATUS_ENABLED 0x00000000 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DEVICE_STATUS_DISABLED 0x00000001 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_FABRIC_MANAGER_STATUS 10:8 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_FABRIC_MANAGER_STATUS_NOT_RUNNING 0x00000000 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_FABRIC_MANAGER_STATUS_RUNNING 0x00000001 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_FABRIC_MANAGER_STATUS_TIMEOUT 0x00000002 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_FABRIC_MANAGER_STATUS_ERROR 0x00000003 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_SOURCE 13:11 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_SOURCE_NONE 0x00000000 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_SOURCE_LOCAL_DEVICE 0x00000001 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_SOURCE_PEER_DEVICE 0x00000002 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_SOURCE_IN_BAND 0x00000003 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_SOURCE_OUT_OF_BAND 0x00000004 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_REASON 16:14 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_REASON_NONE 0x00000000 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_REASON_CLIENT_OVERRIDE 0x00000001 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_REASON_TRUNK_LINK_FAILED 0x00000002 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_REASON_ACCESS_LINK_FAILED 0x00000003 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DISABLE_REASON_UNSPECIFIED_FAILURE 0x00000004 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DRIVER_RELOAD 17:17 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DRIVER_RELOAD_NOT_PENDING 0x00000000 +#define NV_MSGBOX_DATA_DEVICE_DISABLE_STATE_DRIVER_RELOAD_PENDING 0x00000001 + +/* Response to NV_MSGBOX_CMD_ARG1_ACCESS_WP_MODE_ACTION_GET */ +#define NV_MSGBOX_DATA_ACCESS_WP_MODE_GET_STATE 7:0 +#define NV_MSGBOX_DATA_ACCESS_WP_MODE_GET_STATE_DISABLED 0x0000005A +#define NV_MSGBOX_DATA_ACCESS_WP_MODE_GET_STATE_ENABLED 0x000000A5 + +/* + * Response to + * NV_MSGBOX_CMD_ARG1_GET_MISC_GPU_RESET_REQUIRED + * NV_MSGBOX_CMD_ARG1_GET_MISC_GPU_FLAGS_PAGE_1 + */ +#define NV_MSGBOX_DATA_GPU_RESET_REQUIRED 0:0 +#define NV_MSGBOX_DATA_GPU_RESET_REQUIRED_OFF 0x00000000 +#define NV_MSGBOX_DATA_GPU_RESET_REQUIRED_ON 0x00000001 +#define NV_MSGBOX_DATA_GPU_DRAIN_AND_RESET_RECOMMENDED 1:1 +#define NV_MSGBOX_DATA_GPU_DRAIN_AND_RESET_RECOMMENDED_NO 0x00000000 +#define NV_MSGBOX_DATA_GPU_DRAIN_AND_RESET_RECOMMENDED_YES 0x00000001 + +/** + * Response to + * NV_MSGBOX_CMD_ARG1_GET_CLOCK_FREQ_INFO_PAGE_3 + */ +#define NV_MSGBOX_DATA_GET_CLOCK_FREQ_INFO_PAGE_3_CURRENT_PSTATE 3:0 +#define NV_MSGBOX_DATA_GET_CLOCK_FREQ_INFO_PAGE_3_CURRENT_PSTATE_INVALID 0x0000000F + +/* + * Number of Nvlink data outputs (dataOut, extData) for + * NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_LINK queries + */ +#define NV_MSGBOX_DATA_NVLINK_INFO_DATA_WIDTH 2 + +/* + * Encoding for Invalid data + */ +#define NV_MSGBOX_DATA_NVLINK_INFO_DATA_INVALID 0xFFFFFFFF + +/* Response to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_LINK_STATE_V1 */ +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V1__SIZE 32 +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V1_DOWN 0x00000000 +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V1_UP 0x00000001 +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V1_INVALID 0x000000ff + +/* Response to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_LINK_STATE_V2 */ +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2(i) (3+(i)*4):(0+(i)*4) +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2__SIZE 8 +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_OFF 0x00000000 +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_SAFE 0x00000001 +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_ACTIVE 0x00000002 +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_ERROR 0x00000003 +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_STATE_V2_INVALID 0x000000ff + +/* Response to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_LINK_BANDWIDTH (in Mps) */ +#define NV_MSGBOX_DATA_NVLINK_INFO_LINK_BANDWIDTH__SIZE 1 +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_INVALID 0xFFFFFFFF +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_00000_MBPS 0 +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_16000_MBPS 16000 +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_19200_MBPS 19200 +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_20000_MBPS 20000 +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_25000_MBPS 25000 +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_25781_MBPS 25781 +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_32000_MBPS 32000 +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_40000_MBPS 40000 +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_50000_MBPS 50000 +#define NV_MSGBOX_DATA_NVLINK_LINE_RATE_53125_MBPS 53125 + +/* Response to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_SUBLINK_WIDTH */ +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_WIDTH(i) (5+(i)*6):(0+(i)*6) +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_WIDTH__SIZE 5 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_WIDTH_INVALID 0x000000ff +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_TX_WIDTH 2:0 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_TX_WIDTH_0 0x00000000 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_TX_WIDTH_1 0x00000001 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_TX_WIDTH_2 0x00000002 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_TX_WIDTH_4 0x00000003 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_TX_WIDTH_8 0x00000004 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_TX_WIDTH_INVALID 0x00000007 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_RX_WIDTH 5:3 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_RX_WIDTH_0 0x00000000 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_RX_WIDTH_1 0x00000001 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_RX_WIDTH_2 0x00000002 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_RX_WIDTH_4 0x00000003 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_RX_WIDTH_8 0x00000004 +#define NV_MSGBOX_DATA_NVLINK_INFO_SUBLINK_RX_WIDTH_INVALID 0x00000007 + +/* + * Response to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_TRAINING_ERR_STATE + * NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_RUNTIME_ERR_STATE + */ +#define NV_MSGBOX_DATA_NVLINK_INFO_ERROR_STATE__SIZE 32 +#define NV_MSGBOX_DATA_NVLINK_INFO_ERROR_STATE_INVALID 0x000000ff +#define NV_MSGBOX_DATA_NVLINK_INFO_ERROR_STATE_NO_ERROR 0x00000000 +#define NV_MSGBOX_DATA_NVLINK_INFO_ERROR_STATE_ERROR 0x00000001 + +/* Respones to NV_MSGBOX_CMD_ARG2_REMAP_ROW_RAW_CNT_COMBINED */ +#define NV_MSGBOX_DATA_REMAP_ROW_UNCORR_CNT 10:0 +#define NV_MSGBOX_DATA_REMAP_ROW_UNCORR_CNT_EXCESS 11:11 +#define NV_MSGBOX_DATA_REMAP_ROW_UNCORR_CNT_EXCESS_FALSE 0 +#define NV_MSGBOX_DATA_REMAP_ROW_UNCORR_CNT_EXCESS_TRUE 1 +#define NV_MSGBOX_DATA_REMAP_ROW_CORR_CNT 22:12 +#define NV_MSGBOX_DATA_REMAP_ROW_CORR_CNT_EXCESS 23:23 +#define NV_MSGBOX_DATA_REMAP_ROW_CORR_CNT_EXCESS_FALSE 0 +#define NV_MSGBOX_DATA_REMAP_ROW_CORR_CNT_EXCESS_TRUE 1 + +/* Response to NV_MSGBOX_CMD_ARG1_REMAP_ROWS_STATE_FLAGS */ +#define NV_MSGBOX_DATA_REMAP_ROW_STATE_FLAGS_PAGE0_FAILED_REMAPPING 0:0 +#define NV_MSGBOX_DATA_REMAP_ROW_STATE_FLAGS_PAGE0_FAILED_REMAPPING_FALSE 0 +#define NV_MSGBOX_DATA_REMAP_ROW_STATE_FLAGS_PAGE0_FAILED_REMAPPING_TRUE 1 +#define NV_MSGBOX_DATA_REMAP_ROW_STATE_FLAGS_PAGE0_PENDING_REMAPPING 1:1 +#define NV_MSGBOX_DATA_REMAP_ROW_STATE_FLAGS_PAGE0_PENDING_REMAPPING_FALSE 0 +#define NV_MSGBOX_DATA_REMAP_ROW_STATE_FLAGS_PAGE0_PENDING_REMAPPING_TRUE 1 + +/* Response to NV_MSGBOX_CMD_ARG1_REMAP_ROWS_HISTOGRAM */ +#define NV_MSGBOX_DATA_REMAP_ROW_HISTOGRAM_LOW_AVAILABILITY 15:0 +#define NV_MSGBOX_DATA_REMAP_ROW_HISTOGRAM_PARTIAL_AVAILABILITY 31:16 + +/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_0 */ +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_SPEED 2:0 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_SPEED_UNKNOWN 0x00000000 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_SPEED_2500_MTPS 0x00000001 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_SPEED_5000_MTPS 0x00000002 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_SPEED_8000_MTPS 0x00000003 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_SPEED_16000_MTPS 0x00000004 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_SPEED_32000_MTPS 0x00000005 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_WIDTH 6:4 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_WIDTH_UNKNOWN 0x00000000 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_WIDTH_X1 0x00000001 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_WIDTH_X2 0x00000002 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_WIDTH_X4 0x00000003 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_WIDTH_X8 0x00000004 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_WIDTH_X16 0x00000005 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_LINK_WIDTH_X32 0x00000006 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_NONFATAL_ERROR_COUNT 15:8 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_FATAL_ERROR_COUNT 23:16 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_0_UNSUPP_REQ_COUNT 31:24 + +/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_1 */ +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_1_L0_TO_RECOVERY_COUNT 31:0 + +/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_2 */ +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_2_REPLAY_ROLLOVER_COUNT 15:0 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_2_NAKS_RCVD_COUNT 31:16 + +/* MSGBOX Extended Data Register */ +#define NV_MSGBOX_EXT_DATA_REG 31:0 + +/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_0 */ +#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_0_CORRECTABLE_ERROR_COUNT 15:0 + +/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_1 */ +#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_1_REPLAY_COUNT 31:0 + +/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_2 */ +#define NV_MSGBOX_EXT_DATA_PCIE_LINK_INFO_PAGE_2_NAKS_SENT_COUNT 15:0 + +/* Respones to NV_MSGBOX_CMD_ARG1_GET_PCIE_LINK_INFO_PAGE_3 */ +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED 2:0 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_UNKNOWN 0x00000000 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_2500_MTPS 0x00000001 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_5000_MTPS 0x00000002 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_8000_MTPS 0x00000003 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_16000_MTPS 0x00000004 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_32000_MTPS 0x00000005 +#define NV_MSGBOX_DATA_PCIE_LINK_INFO_PAGE_3_TARGET_LINK_SPEED_RESERVED 2:0 + +/* Response to NV_MSGBOX_CMD_ARG1_REMAP_ROWS_HISTOGRAM */ +#define NV_MSGBOX_EXT_DATA_REMAP_ROW_HISTOGRAM_MAX_AVAILABILITY 31:16 +#define NV_MSGBOX_EXT_DATA_REMAP_ROW_HISTOGRAM_HIGH_AVAILABILITY 15:0 + +/* SysId info per type sizes */ +#define NV_MSGBOX_SYSID_DATA_SIZE_BOARD_PART_NUM_V1 24 +#define NV_MSGBOX_SYSID_DATA_SIZE_OEM_INFO_V1 24 +#define NV_MSGBOX_SYSID_DATA_SIZE_SERIAL_NUM_V1 16 +#define NV_MSGBOX_SYSID_DATA_SIZE_MARKETING_NAME_V1 24 +#define NV_MSGBOX_SYSID_DATA_SIZE_GPU_PART_NUM_V1 16 +#define NV_MSGBOX_SYSID_DATA_SIZE_MEMORY_VENDOR_V1 1 +#define NV_MSGBOX_SYSID_DATA_SIZE_MEMORY_PART_NUM_V1 20 +#define NV_MSGBOX_SYSID_DATA_SIZE_BUILD_DATE_V1 4 +#define NV_MSGBOX_SYSID_DATA_SIZE_FIRMWARE_VER_V1 14 +#define NV_MSGBOX_SYSID_DATA_SIZE_VENDOR_ID_V1 2 +#define NV_MSGBOX_SYSID_DATA_SIZE_DEV_ID_V1 2 +#define NV_MSGBOX_SYSID_DATA_SIZE_SUB_VENDOR_ID_V1 2 +#define NV_MSGBOX_SYSID_DATA_SIZE_SUB_ID_V1 2 +#define NV_MSGBOX_SYSID_DATA_SIZE_GPU_GUID_V1 16 +#define NV_MSGBOX_SYSID_DATA_SIZE_INFOROM_VER_V1 16 +#define NV_MSGBOX_SYSID_DATA_SIZE_PRODUCT_LENGTH_V1 4 +#define NV_MSGBOX_SYSID_DATA_SIZE_PRODUCT_WIDTH_V1 4 +#define NV_MSGBOX_SYSID_DATA_SIZE_PRODUCT_HEIGHT_V1 4 +#define NV_MSGBOX_SYSID_DATA_SIZE_PCIE_SPEED_V1 1 +#define NV_MSGBOX_SYSID_DATA_SIZE_PCIE_WIDTH_V1 1 + +/*! + * Response to NV_MSGBOX_CMD_ARG1_GET_POWER_HINT_INFO_CLK + * 16-bit frequency in MHz + */ +#define NV_MSGBOX_DATA_POWER_HINT_INFO_CLK_GR_MIN 15:0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_CLK_GR_MAX 31:16 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_CLK_MEM_MIN 15:0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_CLK_MEM_MAX 31:16 + +/*! + * Response to NV_MSGBOX_CMD_ARG1_GET_POWER_HINT_INFO_TEMP + * 16-bit frequency in MHz + */ +#define NV_MSGBOX_DATA_POWER_HINT_INFO_TEMP_MIN 15:0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_TEMP_MAX 31:16 + +/*! + * Response to _ARG1_GET_POWER_HINT_INFO_PROFILES, 1 page consists of data and + * ext data, then we can support up to 256 profiles + * Bit index represents profile ID + */ +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_DMMA_PERF 0:0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_DMMA_PERF_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_DMMA_PERF_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_DMMA_HIGH_K 1:1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_DMMA_HIGH_K_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_DMMA_HIGH_K_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_DMMA_LOW_K 2:2 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_DMMA_LOW_K_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_DMMA_LOW_K_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_HMMA 3:3 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_HMMA_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_HMMA_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_IMMA 4:4 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_IMMA_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_IMMA_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_SGEMM 5:5 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_SGEMM_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_SGEMM_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_TRANSFORMER 6:6 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_TRANSFORMER_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_TRANSFORMER_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_0 7:7 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_0_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_0_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_1 8:8 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_1_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_1_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_2 9:9 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_2_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_2_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_3 10:10 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_3_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_3_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_4 11:11 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_4_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_4_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_5 12:12 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_5_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_5_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_6 13:13 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_6_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_6_AVAILABLE 1 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_7 14:14 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_7_NOT_AVAILABLE 0 +#define NV_MSGBOX_DATA_POWER_HINT_INFO_PROFILES_PAGE_0_CUSTOMER_CUSTOM_7_AVAILABLE 1 + +/* Event types */ +typedef enum +{ + NV_MSGBOX_EVENT_TYPE_SERVER_RESTART = 0, + NV_MSGBOX_EVENT_TYPE_GPU_RESET_REQUIRED, + NV_MSGBOX_EVENT_TYPE_DRIVER_ERROR_MESSAGE, + NV_MSGBOX_EVENT_TYPE_TGP_LIMIT_SET_SUCCESS, + NV_MSGBOX_EVENT_TYPE_CLOCK_LIMIT_SET_SUCCESS, + NV_MSGBOX_EVENT_TYPE_ECC_TOGGLE_SUCCESS, + NV_MSGBOX_EVENT_TYPE_MIG_TOGGLE_SUCCESS, + NV_MSGBOX_NUM_EVENTS, /* insert new event types before this line */ +} NvMsgboxEventType; + +/* Bit mask of all defined events */ +#define NV_MSGBOX_EVENT_TYPE__ALL (NVBIT(NV_MSGBOX_NUM_EVENTS) - 1) + +/* ----------------------- Set Master Capabilities ---------------------------- */ + +// +// SET_MASTER_CAPS +// +// Descripiton: +// Set/declare SMBPBI Master capabilities. +// +// Common Encoding: +// 7 0 7 0 +// .-+-+-+-+-+-+-+-. .-+-+-+-+-+-+-+-. +// | CLASS | ARG1 | CAP_INDEX | ARG2 +// `-+-+-+-+-+-+-+-' `-+-+-+-+-+-+-+-' +// +// The SMBPBI Master capabilities are logically grouped into classes (specified +// in arg1). Capabilities are sent 32 at a time (dword index stored in arg2) +// per-class (typically once during system-initialization). +// +// Classes: +// _GPU_REQ - Class of capability bits corresponding 1:1 with the GPU- +// Request opcodes the master supports. +// +#define NV_MSGBOX_CMD_SET_MASTER_CAPS_ARG1_CLASS 7:0 +#define NV_MSGBOX_CMD_SET_MASTER_CAPS_ARG1_CLASS_GPU_REQ 0x00000000 + +#define NV_MSGBOX_DATA_MASTER_CAPS_GPU_REQ_COUNT 1 + +#define NV_MSGBOX_DATA_MASTER_CAPS_GPU_REQ_0 31:0 +#define NV_MSGBOX_DATA_MASTER_CAPS_GPU_REQ_0_CPU_PCONTROL 0:0 +#define NV_MSGBOX_DATA_MASTER_CAPS_GPU_REQ_0_CPU_PCONTROL_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_MASTER_CAPS_GPU_REQ_0_CPU_PCONTROL_AVAILABLE 0x00000001 +#define NV_MSGBOX_DATA_MASTER_CAPS_GPU_REQ_0_SYS_PCONTROL 1:1 +#define NV_MSGBOX_DATA_MASTER_CAPS_GPU_REQ_0_SYS_PCONTROL_NOT_AVAILABLE 0x00000000 +#define NV_MSGBOX_DATA_MASTER_CAPS_GPU_REQ_0_SYS_PCONTROL_AVAILABLE 0x00000001 + +/* ------------------ GPU Performance Control (PCONTROL) ---------------------- */ + +// +// GPU_PCONTROL arg1, arg2, and data encoding (fields are relative to zero, not +// relative to their position in the COMMAND/DATA registers). +// + +// +// ARG1/ARG2 Encoding +// +// 7 3 2 0 7 0 +// .-+-+-+-+-+-+-+-. .-+-+-+-+-+-+-+-. +// | TARGET | ACT | ARG1 |0 0 0 0 0 0 0 0| ARG2 (reserved) +// `-+-+-+-+-+-+-+-' `-+-+-+-+-+-+-+-' +// +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_ACTION 2:0 +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_ACTION_GET_INFO 0x00000000 +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_ACTION_GET_LIMIT 0x00000001 +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_ACTION_SET_LIMIT 0x00000002 +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_ACTION_GET_STATUS 0x00000003 +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_TARGET 7:3 +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_TARGET_VPSTATE 0x00000000 +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_TARGET_POWER_SMBIDX 0x00000001 +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG2_RSVD 7:0 + +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_SET_ACTION(arg1, action) \ + (arg1) = FLD_SET_DRF(_MSGBOX_CMD, _GPU_PCONTROL_ARG1, _ACTION, \ + action, (arg1)) + +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_GET_ACTION(arg1) \ + DRF_VAL(_MSGBOX_CMD, _GPU_PCONTROL_ARG1, _ACTION, (arg1)) + +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_SET_TARGET(arg1, target) \ + (arg1) = FLD_SET_DRF(_MSGBOX_CMD, _GPU_PCONTROL_ARG1, _TARGET, \ + target, (arg1)) + +#define NV_MSGBOX_CMD_GPU_PCONTROL_ARG1_GET_TARGET(arg1) \ + DRF_VAL(_MSGBOX_CMD, _GPU_PCONTROL_ARG1, _TARGET, (arg1)) + +// +// Getting GPU vPstate Information +// +// Inputs: +// ARG1 - {TARGET=VPSTATE, ACTION=GET_INFO} +// ARG2 - Unused/reserved (must be zero) +// +// Outputs: +// DATA - Current min/max range of externally available vpstates +// (vPmin=fastest, vPmax=slowest) +// +// Encoding: +// 31 16 15 8 7 0 +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. +// |0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0| MIN | MAX | DATA +// `-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' +// +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_INFO_MAX 7:0 +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_INFO_MIN 15:8 +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_INFO_RSVD 31:16 + +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_INFO_SET_MAX(data, max) \ + (data) = FLD_SET_DRF_NUM(_MSGBOX_CMD, _GPU_PCONTROL_DATA, \ + _VPSTATE_GET_INFO_MAX, (max), (data)) + +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_INFO_GET_MAX(data) \ + DRF_VAL(_MSGBOX_CMD, _GPU_PCONTROL_DATA, _VPSTATE_GET_INFO_MAX, (data)) + +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_INFO_SET_MIN(data, min) \ + (data) = FLD_SET_DRF_NUM(_MSGBOX_CMD, _GPU_PCONTROL_DATA, \ + _VPSTATE_GET_INFO_MIN, (min), (data)) + +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_INFO_GET_MIN(data) \ + DRF_VAL(_MSGBOX_CMD, _GPU_PCONTROL_DATA, _VPSTATE_GET_INFO_MIN, (data)) + +// +// Getting GPU vPstate Limits +// +// Inputs: +// ARG1 - {TARGET=VPSTATE, ACTION=GET_LIMIT} +// ARG2 - Unused/reserved (must be zero) +// +// Outputs: +// DATA - Last requested vpstate limit/value, zero when no limit has been set. +// +// Encoding: +// 31 8 7 0 +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. +// |0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0| VALUE | DATA +// `-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' +// +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_LIMIT_VALUE 7:0 +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_LIMIT_RSVD 31:8 + +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_LIMIT_SET_VALUE(data, value) \ + (data) = FLD_SET_DRF_NUM(_MSGBOX_CMD, _GPU_PCONTROL_DATA, \ + _VPSTATE_GET_LIMIT_VALUE, (value), (data)) + +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_LIMIT_GET_VALUE(data) \ + DRF_VAL(_MSGBOX_CMD, _GPU_PCONTROL_DATA, _VPSTATE_GET_LIMIT_VALUE, (data)) + +// +// Setting GPU vPstate Limits: +// +// Inputs: +// ARG1 - {TARGET=VPSTATE, ACTION=SET_LIMIT} +// ARG2 - Unused/reserved (must be zero) +// DATA - Desired vpstate limit/value between vPmin and vPmax (inclusive), +// zero to clear active limits. +// Outputs: +// none +// +// Encoding: +// 31 8 7 0 +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. +// |0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0| VALUE | DATA +// `-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' +// +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_SET_LIMIT_VALUE 7:0 +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_SET_LIMIT_VALUE_CLEAR 0x00000000 +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_SET_LIMIT_VALUE_INIT 0x00000000 +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_SET_LIMIT_RSVD 31:8 + +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_SET_LIMIT_SET_VALUE(data, limit) \ + (data) = FLD_SET_DRF_NUM(_MSGBOX_CMD, _GPU_PCONTROL_DATA, \ + _VPSTATE_SET_LIMIT_VALUE, (limit), (data)) + +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_SET_LIMIT_GET_VALUE(data) \ + DRF_VAL(_MSGBOX_CMD, _GPU_PCONTROL_DATA, _VPSTATE_SET_LIMIT_VALUE, (data)) + +// +// Getting GPU vPstate Status +// +// Inputs: +// ARG1 - {TARGET=VPSTATE, ACTION=GET_STATUS} +// ARG2 - Unused/reserved (must be zero) +// +// Outputs: +// DATA - Current vPstate +// +// Encoding: +// 31 8 7 0 +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. +// |0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0| CURRENT | DATA +// `-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' +// +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_STATUS_CURRENT 7:0 +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_STATUS_RSVD 31:8 + +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_STATUS_SET_CURRENT(data, curr) \ + (data) = FLD_SET_DRF_NUM(_MSGBOX_CMD, _GPU_PCONTROL_DATA, \ + _VPSTATE_GET_STATUS_CURRENT, (curr), (data)) + +#define NV_MSGBOX_CMD_GPU_PCONTROL_DATA_VPSTATE_GET_STATUS_GET_CURRENT(data) \ + DRF_VAL(_MSGBOX_CMD, _GPU_PCONTROL_DATA, _VPSTATE_GET_STATUS_CURRENT, (data)) + + +/* ------------------------- GPU System Control ------------------------------- */ + +// +// GPU_SYSCONTROL +// +// Description: +// Command sent to the GPU to set/remove a system-imposed operating limit +// on a specific system-parameter. +// +// Common Encoding: +// 7 2 1 0 7 0 +// .-+-+-+-+-+-+-+-. .-+-+-+-+-+-+-+-. +// | TARGET |ACT| ARG1 |0 0 0 0 0 0 0 0| ARG2 (reserved) +// `-+-+-+-+-+-+-+-' `-+-+-+-+-+-+-+-' +// +// Notes: +// - TARGET accepts NV0000_CTRL_SYSTEM_PARAM_* definitions +// - arg1, arg2, and data encoding (fields are relative to zero, not relative +// to their position in the DATA_OUT register). +// +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_ARG1_ACTION 1:0 +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_ARG1_ACTION_CLEAR_LIMIT 0x00000000 +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_ARG1_ACTION_SET_LIMIT 0x00000001 +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_ARG1_TARGET 7:2 +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_ARG2_RSVD 7:0 + +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_ARG1_SET_ACTION(arg1, action) \ + (arg1) = FLD_SET_DRF(_MSGBOX_CMD, _GPU_SYSCONTROL_ARG1, _ACTION, \ + action, (arg1)) + +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_ARG1_GET_ACTION(arg1) \ + DRF_VAL(_MSGBOX_CMD, _GPU_SYSCONTROL_ARG1, _ACTION, (arg1)) + +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_ARG1_SET_TARGET(arg1, target) \ + (arg1) = FLD_SET_DRF_NUM(_MSGBOX_CMD, _GPU_SYSCONTROL_ARG1, _TARGET, \ + target, (arg1)) + +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_ARG1_GET_TARGET(arg1) \ + DRF_VAL(_MSGBOX_CMD, _GPU_SYSCONTROL_ARG1, _TARGET, (arg1)) + +// +// Setting a System-Parameter Operating-Limit: +// +// Inputs: +// ARG1 - {TARGET=NV_MSGBOX_SYS_PARAM_*, ACTION=SET_LIMIT} +// ARG2 - Unused/reserved (must be zero) +// DATA - Desired limit/value. +// +// Outputs: +// none +// +// Encoding: +// 31 0 +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. +// | VALUE | DATA +// `-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' +// +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_DATA_SET_LIMIT_VALUE 31:0 + +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_DATA_SET_LIMIT_SET_VALUE(data, limit) \ + (data) = FLD_SET_DRF_NUM(_MSGBOX_CMD, _GPU_SYSCONTROL_DATA, \ + _SET_LIMIT_VALUE, (limit), (data)) + +#define NV_MSGBOX_CMD_GPU_SYSCONTROL_DATA_PARAM_SET_LIMIT_GET_VALUE(data) \ + DRF_VAL(_MSGBOX_CMD, _GPU_SYSCONTROL_DATA, _SET_LIMIT_VALUE, (data)) + +// +// Host side scratch memory buffer +// This memory buffer is allocated by the host CPU in its system memory +// and is used for passing additional arguments and results, that don't +// fit in the command/status and data registers due to their size. +// This memory can be accessed by the BMC SMBus master through +// dedicated requests. +// +// Page size is various on different SMBPBI servers. +// Capability dword[2] bit 12 shows size in bytes +// +// _1024 --> 0xFF dwords +// _256 --> 0x40 dowrds +// +#define NV_MSGBOX_SCRATCH_PAGE_SIZE_1024_D 0xFF +#define NV_MSGBOX_SCRATCH_PAGE_SIZE_256_D 0x40 + +// +// TODO Once switch soc/pmu/oobtest remove the reference to this, this will be +// removed. +// +#define NV_MSGBOX_SCRATCH_PAGE_SIZE 1024 +#define NV_MSGBOX_SCRATCH_NUM_PAGES_P 2 // expressed as a power of 2 + // must be >= 2 + +// This cap code plugs into NV_MSGBOX_DATA_CAP_2_NUM_SCRATCH_BANKS +#define NV_MSGBOX_SCRATCH_NUM_PAGES_CAP_CODE \ + (NV_MSGBOX_SCRATCH_NUM_PAGES_P > 0 ? \ + NV_MSGBOX_SCRATCH_NUM_PAGES_P - 1 : 0) + +#define NV_MSGBOX_SCRATCH_NUM_PAGES (1 << NV_MSGBOX_SCRATCH_NUM_PAGES_P) +#define NV_MSGBOX_SCRATCH_BUFFER_SIZE (NV_MSGBOX_SCRATCH_NUM_PAGES \ + * NV_MSGBOX_SCRATCH_PAGE_SIZE) + +// Auxiliary definitions related to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_STATE_SPEED +#define NV_MSGBOX_NVLINK_STATE_BITS_PER_NVLINK 4 +#define NV_MSGBOX_NVLINK_STATE_NVLINKS_PER_REGISTER (32 / NV_MSGBOX_NVLINK_STATE_BITS_PER_NVLINK) +#define NV_MSGBOX_NVLINK_STATE_NUM_NVLINKS_VOLTA 6 +#define NV_MSGBOX_NVLINK_STATE_NUM_NVLINKS_TURING 2 +#define NV_MSGBOX_NVLINK_STATE_NUM_NVLIPT_GA100 3 +#define NV_MSGBOX_NVLINK_STATE_NUM_LINKS_PER_NVLIPT_NVL30 4 +#define NV_MSGBOX_NVLINK_SUBLINK_WIDTH_MAX_NVL30 4 +#define NV_MSGBOX_NVLINK_STATE_NUM_NVLINKS_GA100 \ + (NV_MSGBOX_NVLINK_STATE_NUM_NVLIPT_GA100 * \ + NV_MSGBOX_NVLINK_STATE_NUM_LINKS_PER_NVLIPT_NVL30) +#define NV_MSGBOX_NVLINK_STATE_NUM_NVLIPT_GA10X 1 +#define NV_MSGBOX_NVLINK_STATE_NUM_NVLINKS_GA10X \ + (NV_MSGBOX_NVLINK_STATE_NUM_NVLIPT_GA10X * \ + NV_MSGBOX_NVLINK_STATE_NUM_LINKS_PER_NVLIPT_NVL30) +#define NV_MSGBOX_NVLINK_STATE_NUM_NVLIPT_LR10 9 +#define NV_MSGBOX_NVLINK_STATE_NUM_NVLINKS_LR10 \ + (NV_MSGBOX_NVLINK_STATE_NUM_NVLIPT_LR10 * \ + NV_MSGBOX_NVLINK_STATE_NUM_LINKS_PER_NVLIPT_NVL30) +#define NV_MSGBOX_NVLINK_STATE_NUM_REGISTERS(arch) \ + ((NV_MSGBOX_NVLINK_STATE_NUM_NVLINKS##arch + NV_MSGBOX_NVLINK_STATE_NVLINKS_PER_REGISTER - 1) \ + / NV_MSGBOX_NVLINK_STATE_NVLINKS_PER_REGISTER) + +// Auxiliary definitions related to NV_MSGBOX_CMD_ARG1_GET_NVLINK_INFO_THROUGHPUT_* +#define NV_MSGBOX_NVLINK_THROUGHPUT_COUNTER_INDEX_DATA_TX 0x00000000 +#define NV_MSGBOX_NVLINK_THROUGHPUT_COUNTER_INDEX_DATA_RX 0x00000001 +#define NV_MSGBOX_NVLINK_THROUGHPUT_COUNTER_INDEX_RAW_TX 0x00000002 +#define NV_MSGBOX_NVLINK_THROUGHPUT_COUNTER_INDEX_RAW_RX 0x00000003 +#define NV_MSGBOX_NVLINK_THROUGHPUT_COUNTER_INDEX_MAX 0x00000004 + +#if !defined(NV_MSGBOX_NO_PARAM_STRUCTS) +// +// Parameter structures for asynchronous requests +// passed in scratch space +// + +/*! + * This structure is used to hold parameters for + * NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_GET and + * NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_SET + */ +typedef struct +{ + NvU32 flags; + /*! + * If flags:_PERSIST is _ON, make limitCurrInput persistent, + * surviving the driver reload and the system reboot. + * + * If flags:_CLEAR is _ON, it will clear the TGP limit. The + * persistence still depends on persist flag. + */ +#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_PERSIST 0:0 +#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_PERSIST_OFF 0x00000000 +#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_PERSIST_ON 0x00000001 +#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_CLEAR 1:1 +#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_CLEAR_OFF 0x00000000 +#define NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS_FLAGS_CLEAR_ON 0x00000001 + + /*! + * Current total GPU power limit value to enforce, requested by the + * SMBPBI client, expressed in milliwatts. + * Must always be within range imposed by the current policy. + * 0xffffffff is returned, if the limit has not been set by the SMBPBI client. + */ + NvU32 limitCurrInput; + + /*! + * Currently arbitrated total GPU power limit value after taking + * into account limits, requested by all clients, expressed + * in milliwatts. + */ + NvU32 limitCurrOutput; +} NV_MSGBOX_PMGR_PWR_TGP_LIMIT_CONTROL_PARAMS; + +/*! + * This structure is used to hold parameters for + * NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_INFO_GET + */ +typedef struct +{ + /*! + * Current total GPU power limit lower and upper bounds and the + * default setting, expressed in milliwatts. + * These constraints must be observed, when the limit + * is being set with + * NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_PMGR_PWR_TGP_LIMIT_CONTROL_SET. + */ + NvU32 limitMin; + NvU32 limitMax; + NvU32 limitDefault; +} NV_MSGBOX_PMGR_PWR_TGP_LIMIT_INFO_PARAMS; + +/*! + * This structure is used to hold parameters for + * NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_THERMAL_FAN_V1_COUNT_GET + */ +typedef struct +{ + NvU32 fanCount; // POLICY - add types below +#define NV_MSGBOX_VIOLATION_COUNTERS_TYPE_ALL_SUPPORTED_POLICIES 0xFFFFFFFF //-1 represents All Supported policies +#define NV_MSGBOX_VIOLATION_COUNTERS_TYPE_HW_VIOLATION 0 +#define NV_MSGBOX_VIOLATION_COUNTERS_TYPE_SW_GLOBAL_VIOLATION 1 +#define NV_MSGBOX_VIOLATION_COUNTERS_TYPE_SW_POWER_VIOLATION 2 +#define NV_MSGBOX_VIOLATION_COUNTERS_TYPE_SW_THERMAL_VIOLATION 3 + + NV_DECLARE_ALIGNED(NvU64 violationTime[32], 8); //. + */ +typedef struct +{ + NvU32 flags; + /*! + * If flags: _CLEAR is _ON, it will clear the limits. Otherwise, + * set the clock limits. + * + * If flags: _PERSIST is _ON, set/clear limits persistently, + * surviving the driver reload and the system reboot. + * + */ +#define NV_MSGBOX_OOB_CLOCK_LIMIT_CTRL_PARAMS_FLAGS_PERSIST 0:0 +#define NV_MSGBOX_OOB_CLOCK_LIMIT_CTRL_PARAMS_FLAGS_PERSIST_OFF 0x00000000 +#define NV_MSGBOX_OOB_CLOCK_LIMIT_CTRL_PARAMS_FLAGS_PERSIST_ON 0x00000001 +#define NV_MSGBOX_OOB_CLOCK_LIMIT_CTRL_PARAMS_FLAGS_CLEAR 1:1 +#define NV_MSGBOX_OOB_CLOCK_LIMIT_CTRL_PARAMS_FLAGS_CLEAR_OFF 0x00000000 +#define NV_MSGBOX_OOB_CLOCK_LIMIT_CTRL_PARAMS_FLAGS_CLEAR_ON 0x00000001 + + /*! + * Graphics clock limit in MHz + */ + NvU32 clkMinFreqMHz; + NvU32 clkMaxFreqMHz; +} NV_MSGBOX_OOB_CLOCK_LIMIT_CTRL_PARAMS; + +/* + * Parameters for NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_TEST_MESSAGE_SEND + */ +#define NV_MSGBOX_TEST_MESSAGE_SEND_STRING_SIZE_MAX 32 +/* + * If the message starts with this character, the message + * will not be sent to the kernel. + */ +#define NV_MSGBOX_TEST_MESSAGE_SEND_SILENT_CHAR '@' +typedef struct +{ + NvU8 string[NV_MSGBOX_TEST_MESSAGE_SEND_STRING_SIZE_MAX]; +} NV_MSGBOX_TEST_MESSAGE_SEND_PARAMS; + +/* + * Individual request structure definition for bundled requests + */ +typedef struct +{ + NvU32 cmdStatus; //. + */ +typedef struct { + /*! + * Graphics clock limit in MHz + * if there is no cap, return + * NV_MSGBOX_CLOCK_LIMIT_GET_PARAMS_CLOCK_LIMIT_NOT_SET + */ + NvU16 oobClientMin; + NvU16 oobClientMax; + NvU16 enforcedMin; + NvU16 enforcedMax; +#define NV_MSGBOX_CLOCK_LIMIT_GET_PARAMS_CLOCK_LIMIT_NOT_SET 0xffff +} NV_MSGBOX_CLOCK_LIMIT_GET_PARAMS; + +/*! + * This structure is used to hold parameters for + * NV_MSGBOX_CMD_ARG1_ASYNC_REQUEST_DEVICE_MODE_CONTROL + */ +typedef struct +{ + NvU16 modeType; //> 7) \ + ) +#define NV_MSGBOX_ECC_V5_COUNTS_NUM_ENTRIES 600 + +#define NV_MSGBOX_CMD_ECC_V5_CNT_HDR_META(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_CNT(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_COUNTS_TYPE, _HDR_META) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_CNT_REGN_CORRECTED_TOTAL(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_CNT(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_COUNTS_TYPE, _REGN_CORRECTED_TOTAL) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_CNT_REGN_CORRECTED_UNIQUE(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_CNT(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_COUNTS_TYPE, _REGN_CORRECTED_UNIQUE) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_CNT_REGN_UNCORRECTED_TOTAL(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_CNT(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_COUNTS_TYPE, _REGN_UNCORRECTED_TOTAL) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_CNT_REGN_UNCORRECTED_UNIQUE(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_CNT(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_COUNTS_TYPE, _REGN_UNCORRECTED_UNIQUE) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_CNT_ADDR_ADDR(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_CNT(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_COUNTS_TYPE, _ADDR_ADDR) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_CNT_ADDR_UNCORRECTED_COUNTS(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_CNT(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_COUNTS_TYPE, _ADDR_UNCORRECTED_COUNTS) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_CNT_ADDR_CORRECTED_TOTAL(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_CNT(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_COUNTS_TYPE, _ADDR_CORRECTED_TOTAL) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_CNT_ADDR_CORRECTED_UNIQUE(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_CNT(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_COUNTS_TYPE, _ADDR_CORRECTED_UNIQUE) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_ERR_BUF(index) \ + ( \ + NV_MSGBOX_CMD(_GET_ECC_V5, 0, 0) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG1_ECC_V5_SEL, _ERR_BUFFER) | \ + DRF_NUM(_MSGBOX, _CMD, _ARG1_ECC_V5_ERR_BUF_INDEX, (index)) \ + ) +#define NV_MSGBOX_ECC_V5_ERR_BUF_NUM_ENTRIES 16 + +#define NV_MSGBOX_CMD_ECC_V5_ERR_BUF_ERR_TYPE_META(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_ERR_BUF(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_ERR_BUF_TYPE, _ERR_TYPE_META) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_ERR_BUF_TIME_STAMP(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_ERR_BUF(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_ERR_BUF_TYPE, _TIME_STAMP) \ + ) + +#define NV_MSGBOX_CMD_ECC_V5_ERR_BUF_ADDR(index) \ + ( \ + NV_MSGBOX_CMD_ECC_V5_ERR_BUF(index) | \ + DRF_DEF(_MSGBOX, _CMD, _ARG2_ECC_V5_ERR_BUF_TYPE, _ADDR) \ + ) + +#define NV_MSGBOX_CMD_SET_COPY_DATA(cmd) \ + ( \ + FLD_SET_DRF(_MSGBOX, _CMD, _COPY_DATA, _ON, (cmd)) \ + ) + +#define NV_MSGBOX_GET_CMD_OPCODE(cmd) DRF_VAL(_MSGBOX, _CMD, _OPCODE, (cmd)) +#define NV_MSGBOX_GET_CMD_ARG1(cmd) DRF_VAL(_MSGBOX, _CMD, _ARG1 , (cmd)) +#define NV_MSGBOX_GET_CMD_ARG2(cmd) DRF_VAL(_MSGBOX, _CMD, _ARG2 , (cmd)) +#define NV_MSGBOX_GET_CMD_STATUS(cmd) DRF_VAL(_MSGBOX, _CMD, _STATUS, (cmd)) +#define NV_MSGBOX_CMD_IS_COPY_DATA_SET(cmd) \ + FLD_TEST_DRF(_MSGBOX, _CMD, _COPY_DATA, _ON, (cmd)) + +#define NV_MSGBOX_CAP_IS_AVAILABLE(caps, idx, name) \ + !(FLD_TEST_DRF(_MSGBOX, _DATA_CAP_##idx, name, _NOT_AVAILABLE, (caps)[idx])) + +#define NV_MSGBOX_CAP_SET_AVAILABLE(caps, idx, name) \ + (caps)[idx] = FLD_SET_DRF(_MSGBOX, _DATA_CAP_##idx, name, _AVAILABLE, \ + (caps)[idx]) + +/******************************************************************************** + * * + * GPU REQUESTS * + * * + ********************************************************************************/ + +// +// Encoding: +// 31 24 23 16 15 8 7 0 +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. +// | REQUEST ID | ARG2 | ARG1 | OPCODE | DATA_OUT +// `-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' +// +#define NV_MSGBOX_REQ_OPCODE 7:0 +#define NV_MSGBOX_REQ_OPCODE_CPU_PCONTROL 0x00000000 +#define NV_MSGBOX_REQ_OPCODE_SYS_PCONTROL 0x00000001 +#define NV_MSGBOX_REQ_ARG1 15:8 +#define NV_MSGBOX_REQ_ARG2 23:16 +#define NV_MSGBOX_REQ_REQUEST_ID 31:24 +#define NV_MSGBOX_REQ(opcode, arg1, arg2, requestId) \ + ( \ + DRF_NUM(_MSGBOX, _REQ, _OPCODE , (opcode)) | \ + DRF_NUM(_MSGBOX, _REQ, _ARG1 , (arg1)) | \ + DRF_NUM(_MSGBOX, _REQ, _ARG2 , (arg2)) | \ + DRF_NUM(_MSGBOX, _REQ, _REQUEST_ID, (requestId)) \ + ) + +/* ---------------- CPU Performance Control (PCONTROL) ------------------------ */ + +// +// CPU_PCONTROL +// +// Description: +// GPU-Request used to retrieve information and control various aspects of +// CPU performance. +// +// Common Encoding: +// 7 3 2 0 +// .-+-+-+-+-+-+-+-. +// | TARGET | ACT | ARG1 +// `-+-+-+-+-+-+-+-' +// +// Notes: +// arg1, arg2, and data encoding (fields are relative to zero, not relative +// to their position in the DATA_OUT register). +// +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG1_ACTION 2:0 +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG1_ACTION_RSVD1 0x00000000 +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG1_ACTION_RSVD2 0x00000001 +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG1_ACTION_SET_LIMIT 0x00000002 +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG1_TARGET 7:3 +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG1_TARGET_PSTATE 0x00000000 + +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG1_SET_ACTION(arg1, action) \ + (arg1) = FLD_SET_DRF(_MSGBOX_REQ, _CPU_PCONTROL_ARG1, _ACTION, \ + action, (arg1)) + +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG1_GET_ACTION(arg1) \ + DRF_VAL(_MSGBOX_REQ, _CPU_PCONTROL_ARG1, _ACTION, (arg1)) + +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG1_SET_TARGET(arg1, target) \ + (arg1) = FLD_SET_DRF(_MSGBOX_REQ, _CPU_PCONTROL_ARG1, _TARGET, \ + target, (arg1)) + +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG1_GET_TARGET(arg1) \ + DRF_VAL(_MSGBOX_REQ, _CPU_PCONTROL_ARG1, _TARGET, (arg1)) + +// +// Setting CPU Pstate Limits: +// +// Inputs: +// ARG1 - {TARGET=PSTATE, ACTION=SET_LIMIT} +// ARG2 - Stores desired CPU Pstate limit +// +// Outputs: +// none +// +// Encoding: +// 7 3 2 0 7 0 +// .-+-+-+-+-+-+-+-. .-+-+-+-+-+-+-+-. +// | TARGET | ACT | ARG1 | VALUE | ARG2 +// `-+-+-+-+-+-+-+-' `-+-+-+-+-+-+-+-' +// +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG2_PSTATE_SET_LIMIT_VALUE 7:0 + +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG2_PSTATE_SET_LIMIT_SET_VALUE(arg2, limit) \ + (arg2) = FLD_SET_DRF_NUM(_MSGBOX_REQ, _CPU_PCONTROL_ARG2, \ + _PSTATE_SET_LIMIT_VALUE, (limit), (arg2)) + +#define NV_MSGBOX_REQ_CPU_PCONTROL_ARG2_PSTATE_SET_LIMIT_GET_VALUE(arg2) \ + DRF_VAL(_MSGBOX_REQ, _CPU_PCONTROL_ARG2, _PSTATE_SET_LIMIT_VALUE, (arg2)) + +/* ----------------------- System Performance Control ------------------------- */ + +// +// SYS_PCONTROL +// +// Description: +// GPU-Request used to retrieve information and control various aspects of +// system-performance. +// +// Common Encoding: +// 7 3 2 0 7 0 +// .-+-+-+-+-+-+-+-. .-+-+-+-+-+-+-+-. +// | TARGET | ACT | ARG1 |0 0 0 0 0 0 0 0| ARG2 (reserved) +// `-+-+-+-+-+-+-+-' `-+-+-+-+-+-+-+-' +// +// Notes: +// - TARGET accepts NV0000_CTRL_SYSTEM_PARAM_* definitions +// - arg1, arg2, and data encoding (fields are relative to zero, not relative +// to their position in the DATA_OUT register). +// +#define NV_MSGBOX_REQ_SYS_PCONTROL_ARG1_ACTION 2:0 +#define NV_MSGBOX_REQ_SYS_PCONTROL_ARG1_ACTION_GET_STATUS 0x00000000 +#define NV_MSGBOX_REQ_SYS_PCONTROL_ARG1_ACTION_GET_LIMIT 0x00000001 +#define NV_MSGBOX_REQ_SYS_PCONTROL_ARG1_TARGET 7:3 +#define NV_MSGBOX_REQ_SYS_PCONTROL_ARG2_RSVD 7:0 + +#define NV_MSGBOX_REQ_SYS_PCONTROL_ARG1_SET_ACTION(arg1, action) \ + (arg1) = FLD_SET_DRF(_MSGBOX_REQ, _SYS_PCONTROL_ARG1, _ACTION, \ + action, (arg1)) + +#define NV_MSGBOX_REQ_SYS_PCONTROL_ARG1_GET_ACTION(arg1) \ + DRF_VAL(_MSGBOX_REQ, _SYS_PCONTROL_ARG1, _ACTION, (arg1)) + +#define NV_MSGBOX_REQ_SYS_PCONTROL_ARG1_SET_TARGET(arg1, target) \ + (arg1) = FLD_SET_DRF_NUM(_MSGBOX_REQ, _SYS_PCONTROL_ARG1, _TARGET, \ + (target), (arg1)) + +#define NV_MSGBOX_REQ_SYS_PCONTROL_ARG1_GET_TARGET(arg1) \ + DRF_VAL(_MSGBOX_REQ, _SYS_PCONTROL_ARG1, _TARGET, (arg1)) + +#endif /* _SMBPBI_H */ diff --git a/src/nvidia/arch/nvalloc/common/inc/oob/smbpbi_impl.h b/src/nvidia/arch/nvalloc/common/inc/oob/smbpbi_impl.h new file mode 100644 index 000000000..e7052c88a --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/oob/smbpbi_impl.h @@ -0,0 +1,45 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _SMBPBI_IMPL_H +#define _SMBPBI_IMPL_H +/*! + * This header file stores implementation dependent parameters of the SMBPBI server. + */ + +/*! + * Maximum number of individual requests in a bundle + */ +#define NV_MSGBOX_PARAM_MAX_BUNDLE_SIZE 4 + +/*! + * Maximum number of Result Disposition Rules + */ +#define NV_MSGBOX_PARAM_MAX_DISP_RULES 10 + +/*! + * Maximum length of the Driver Event Message text string is 80, including + * the terminating NUL character. + */ +#define NV_MSGBOX_MAX_DRIVER_EVENT_MSG_TXT_SIZE 80 + +#endif // _SMBPBI_IMPL_H diff --git a/src/nvidia/arch/nvalloc/common/inc/oob/smbpbi_priv.h b/src/nvidia/arch/nvalloc/common/inc/oob/smbpbi_priv.h new file mode 100644 index 000000000..2055aebad --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/oob/smbpbi_priv.h @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _SMBPBI_PRIV_H_ +#define _SMBPBI_PRIV_H_ + +/*! + * This file contains NVIDIA private defines for the SMBPBI + * interface. + */ + +#include "oob/smbpbi.h" + +// MSGBOX command word structure + +// +// Reserving opcodes above 0xC0 for internal/private functionationality. +// +// These opcodes should not be included in any documentation we release outside +// of NVIDIA! +// + +// +// Only for internal use (should not be written to the command register). Used +// for internal tracking when commands are redirected to the RM from the PMU. +// +#define NV_MSGBOX_CMD_ERR_MORE_PROCESSING_REQUIRED 0x000000F0 + +// +// Alternative encodings of the command word +// These are distinguished by a non-zero value in the 29:29 bit, +// previously known as _RSVD. +// +#define NV_MSGBOX_CMD_ENCODING 29:29 +#define NV_MSGBOX_CMD_ENCODING_STANDARD 0x00000000 +#define NV_MSGBOX_CMD_ENCODING_DEBUG 0x00000001 + +// Debug command structure +#define NV_MSGBOX_DEBUG_CMD_OPCODE 1:0 +#define NV_MSGBOX_DEBUG_CMD_OPCODE_READ_PRIV 0x00000000 + +#define NV_MSGBOX_DEBUG_CMD_ARG 23:2 + +/* Utility command constructor macros */ + +#define NV_MSGBOX_DEBUG_CMD(opcode, arg) \ + ( \ + DRF_DEF(_MSGBOX, _DEBUG_CMD, _OPCODE, opcode) | \ + DRF_NUM(_MSGBOX, _DEBUG_CMD, _ARG, (arg)) | \ + DRF_DEF(_MSGBOX, _CMD, _STATUS, _NULL) | \ + DRF_DEF(_MSGBOX, _CMD, _ENCODING, _DEBUG) | \ + DRF_DEF(_MSGBOX, _CMD, _INTR, _PENDING) \ + ) + +#define NV_MSGBOX_DEBUG_CMD_READ_PRIV(offset) \ + NV_MSGBOX_DEBUG_CMD(_READ_PRIV, (offset) >> 2) + +#endif // _SMBPBI_PRIV_H_ + diff --git a/src/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h b/src/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h new file mode 100644 index 000000000..155371ed1 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * << DO NOT EDIT >> + * + * This file describes the format of generated ucode binary. Please do not change the + * content unless the same change is applied to the target ucode builds. + */ + +#ifndef RM_RISCV_UCODE_H +#define RM_RISCV_UCODE_H + +#include "nvtypes.h" + +typedef struct { + // + // Version 1 + // Version 2 + // Vesrion 3 = for Partition boot + // Vesrion 4 = for eb riscv boot + // + NvU32 version; // structure version + NvU32 bootloaderOffset; + NvU32 bootloaderSize; + NvU32 bootloaderParamOffset; + NvU32 bootloaderParamSize; + NvU32 riscvElfOffset; + NvU32 riscvElfSize; + NvU32 appVersion; // Changelist number associated with the image + // + // Manifest contains information about Monitor and it is + // input to BR + // + NvU32 manifestOffset; + NvU32 manifestSize; + // + // Monitor Data offset within RISCV image and size + // + NvU32 monitorDataOffset; + NvU32 monitorDataSize; + // + // Monitor Code offset withtin RISCV image and size + // + NvU32 monitorCodeOffset; + NvU32 monitorCodeSize; + NvU32 bIsMonitorEnabled; + // + // Swbrom Code offset within RISCV image and size + // + NvU32 swbromCodeOffset; + NvU32 swbromCodeSize; + // + // Swbrom Data offset within RISCV image and size + // + NvU32 swbromDataOffset; + NvU32 swbromDataSize; +} RM_RISCV_UCODE_DESC; + +#endif // RM_RISCV_UCODE_H diff --git a/src/nvidia/arch/nvalloc/common/inc/rmflcnbl.h b/src/nvidia/arch/nvalloc/common/inc/rmflcnbl.h new file mode 100644 index 000000000..7a493064b --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/rmflcnbl.h @@ -0,0 +1,147 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Data structures and interfaces used for generic falcon boot-loader. + * + * This generic boot-loader is designed to load both non-secure and + * secure code taking care of signature as well. This bootloader + * should be loaded at the end of the IMEM so that it doesnt overwrite + * itself when it tries to load the code into IMEM starting at blk 0. + * The data will be loaded into DMEM offset 0. + */ + +#ifndef RMFLCNBL_H +#define RMFLCNBL_H + +#include "flcnifcmn.h" + +/*! + * @brief Structure used by the boot-loader to load the rest of the code. + * + * This has to be filled by the GPU driver and copied into DMEM at offset + * provided in the @ref _def_rm_flcn_bl_desc.blDmemDescLoadOff. + */ +typedef struct _def_rm_flcn_bl_dmem_desc +{ + /*! + * reserved should be always first element + */ + NvU32 reserved[4]; + /*! + * signature should follow reserved 16B signature for secure code. + * 0s if no secure code + */ + NvU32 signature[4]; + /*! + * ctxDma is used by the bootloader while loading code/data. + */ + NvU32 ctxDma; + /*! + * 256B aligned physical FB address where code is located. + */ + RM_FLCN_U64 codeDmaBase; + /*! + * Offset from codeDmaBase where the nonSecure code is located. + * The offset must be multiple of 256 to help performance. + */ + NvU32 nonSecureCodeOff; + /*! + * The size of the nonSecure code part. + */ + NvU32 nonSecureCodeSize; + /*! + * Offset from codeDmaBase where the secure code is located. + * The offset must be multiple of 256 to help performance. + */ + NvU32 secureCodeOff; + /*! + * The size of the ecure code part. + */ + NvU32 secureCodeSize; + /*! + * Code entry point which will be invoked by BL after code is loaded. + */ + NvU32 codeEntryPoint; + /*! + * 256B aligned Physical FB Address where data is located. + */ + RM_FLCN_U64 dataDmaBase; + /*! + * Size of data block. Should be multiple of 256B. + */ + NvU32 dataSize; + /*! + * Arguments to be passed to the target firmware being loaded. + */ + NvU32 argc; + /*! + * Number of arguments to be passed to the target firmware being loaded. + */ + NvU32 argv; +} RM_FLCN_BL_DMEM_DESC, *PRM_FLCN_BL_DMEM_DESC; + +/*! + * @brief The header used by the GPU driver to figure out code and data + * sections of bootloader. + */ +typedef struct _def_rm_flcn_bl_img_header +{ + /*! + * Offset of code section in the image. + */ + NvU32 blCodeOffset; + /*! + * Size of code section in the image. + */ + NvU32 blCodeSize; + /*! + * Offset of data section in the image. + */ + NvU32 blDataOffset; + /*! + * Size of data section in the image. + */ + NvU32 blDataSize; +} RM_FLCN_BL_IMG_HEADER, *PRM_FLCN_BL_IMG_HEADER; + +/*! + * @brief The descriptor used by RM to figure out the requirements of boot loader. + */ +typedef struct _def_rm_flcn_bl_desc +{ + /*! + * Starting tag of bootloader + */ + NvU32 blStartTag; + /*! + * Dmem offset where _def_rm_flcn_bl_dmem_desc to be loaded + */ + NvU32 blDmemDescLoadOff; + /*! + * Description of the image + */ + RM_FLCN_BL_IMG_HEADER blImgHeader; +} RM_FLCN_BL_DESC, *PRM_FLCN_BL_DESC; + +#endif // RMFLCNBL_H diff --git a/src/nvidia/arch/nvalloc/common/inc/rmgspseq.h b/src/nvidia/arch/nvalloc/common/inc/rmgspseq.h new file mode 100644 index 000000000..cee7e658b --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/rmgspseq.h @@ -0,0 +1,202 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Top-level header-file that defines the GSP sequencer structures + */ + +#ifndef _RMGSPSEQ_H_ +#define _RMGSPSEQ_H_ + +/*! + * GSP sequencer structures and defines + * + * Here are the supported opcodes. + * the sequencer buffer is a stream of commands composed of + * an op-code directly followed by the exact number of arguments it needs. + * + * The supported op-codes are: + * GSP_SEQUENCER_BUFFER_OPCODE_REG_WRITE: + * arg[0]: the register offset + * arg[1]: the register value + * + * GSP_SEQUENCER_BUFFER_OPCODE_REG_MODIFY: + * arg[0]: the register offset + * arg[1]: the mask where to apply the modification + * arg[2]: the value to apply. The value needs to be shifted to fit inside the mask, + * + * GSP_SEQUENCER_BUFFER_OPCODE_REG_POLL: + * arg[0]: the register offset + * arg[1]: the mask where to apply the modification + * arg[2]: the value to apply. The value needs to be shifted to fit inside the mask. + * arg[3]: the timeout in MS + * arg[4]: an unique error code from GSP_SEQUENCER_BUFFER_ERR. Helps map to the failing GSP code. + * + * GSP_SEQUENCER_BUFFER_OPCODE_DELAY_US + * arg[0]: the delay in micoseconds. + * + * GSP_SEQUENCER_REG_STORE + * This operation stores the specified register at the specified index in + * the sequencer buffer register storage area. + * arg[0]: the reg offset to store in the save area + * arg[1]: index in save area to store value of reg offset + * + * GSP_SEQUENCER_CORE_RESET + * This operation resets the core. This operation takes no arguments. + * + * GSP_SEQUENCER_CORE_START + * This operation starts the core. This operation takes no arguments. + * + * GSP_SEQUENCER_CORE_WAIT_FOR_HALT + * This operation waits for the core to halt after completing execution. + * This operation takes no arguments. + * + * GSP_SEQUENCER_CORE_RESUME + * This operation resumes the core in preparation for switching back to + * the GSP-RM. + */ +typedef enum GSP_SEQ_BUF_OPCODE +{ + GSP_SEQ_BUF_OPCODE_REG_WRITE = 0, + GSP_SEQ_BUF_OPCODE_REG_MODIFY, + GSP_SEQ_BUF_OPCODE_REG_POLL, + GSP_SEQ_BUF_OPCODE_DELAY_US, + GSP_SEQ_BUF_OPCODE_REG_STORE, + GSP_SEQ_BUF_OPCODE_CORE_RESET, + GSP_SEQ_BUF_OPCODE_CORE_START, + GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT, + GSP_SEQ_BUF_OPCODE_CORE_RESUME, +} GSP_SEQ_BUF_OPCODE; + +#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \ + ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \ + (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \ + /* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \ + /* GSP_SEQ_BUF_OPCODE_CORE_START */ \ + /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \ + /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \ + 0) + +// The size of the structure must be DWord aligned! +typedef struct +{ + NvU32 addr; + NvU32 val; +} GSP_SEQ_BUF_PAYLOAD_REG_WRITE; + +// The size of the structure must be DWord aligned! +typedef struct +{ + NvU32 addr; + NvU32 mask; + NvU32 val; +} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY; + +// The size of the structure must be DWord aligned! +typedef struct +{ + NvU32 addr; + NvU32 mask; + NvU32 val; + NvU32 timeout; + NvU32 error; +} GSP_SEQ_BUF_PAYLOAD_REG_POLL; + +// The size of the structure must be DWord aligned! +typedef struct +{ + NvU32 val; +} GSP_SEQ_BUF_PAYLOAD_DELAY_US; + +// The size of the structure must be DWord aligned! +typedef struct +{ + NvU32 addr; + NvU32 index; +} GSP_SEQ_BUF_PAYLOAD_REG_STORE; + +typedef struct GSP_SEQUENCER_BUFFER_CMD +{ + GSP_SEQ_BUF_OPCODE opCode; + union + { + GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite; + GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify; + GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll; + GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs; + GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore; + } payload; +} GSP_SEQUENCER_BUFFER_CMD; + +// +// These error codes printed by the client help us +// see to which GSP uCode line it corresponds. +// +typedef enum GSP_SEQUENCER_BUFFER_ERR +{ + GSP_SEQUENCER_BUFFER_ERR_OK = 0, + GSP_SEQUENCER_BUFFER_ERR_TIMEOUT1, + GSP_SEQUENCER_BUFFER_ERR_TIMEOUT2, + GSP_SEQUENCER_BUFFER_ERR_TIMEOUT3 +} GSP_SEQUENCER_BUFFER_ERR; + + +// Sequencer implementation of FLD_WR_DRF_DEF() +#define GSP_SEQ_FLD_WR_DRF_DEF(gpu, gsp, d, r, f, c) \ +{ \ + GSP_SEQUENCER_BUFFER_CMD cmd; \ + cmd.opCode = GSP_SEQ_BUF_OPCODE_REG_MODIFY; \ + cmd.payload.regModify.addr = NV##d##r; \ + cmd.payload.regModify.mask = DRF_MASK(NV##d##r##f) << DRF_SHIFT(NV##d##r##f); \ + cmd.payload.regModify.val = DRF_DEF(d, r, f, c); \ + (void)gspAppendToSequencerBuffer(gpu, gsp, &cmd); \ +} + +/*! + * Forward references + */ +struct rpc_run_cpu_sequencer_v17_00; + +/*! + * Structure tracking all information relevant to GSP sequencer bufferfor GSP-RM + */ +typedef struct +{ + /*! Pointer to RM-GSP CPU sequencer parameter block */ + struct rpc_run_cpu_sequencer_v17_00 *pRunCpuSeqParam; +} GSP_SEQUENCER_BUFFER; + +/*! + * RM-GSP sequencer buffer register macros. + * GSP_SEQ_BUF_REG_SAVE_SIZE : specifies size of save area in reg values + * GSP_SEQ_BUF_REG_SAVE_MAILBOX0 : index for saving of mailbox0 reg + * GSP_SEQ_BUF_REG_SAVE_MAILBOX1 : index for saving of mailbox1 reg + */ +#define GSP_SEQ_BUF_REG_SAVE_SIZE (8) +#define GSP_SEQ_BUF_REG_SAVE_MAILBOX0 (0) +#define GSP_SEQ_BUF_REG_SAVE_MAILBOX1 (1) + +#endif // _RMGSPSEQ_H_ diff --git a/src/nvidia/arch/nvalloc/common/inc/rmifrif.h b/src/nvidia/arch/nvalloc/common/inc/rmifrif.h new file mode 100644 index 000000000..048fcf8bd --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/rmifrif.h @@ -0,0 +1,124 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2012 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMIFRIF_H_ +#define _RMIFRIF_H_ + +/*! + * @file rmifrif.h + * @brief Defines structures and interfaces common between RM and + * Init-From-Rom (IFR). + * + * For systems supporting GC6 that have on-board VBIOS ROMs, IFR is used + * to expedite several parts of GC6 exit in parallel with PEX init. + * + * After running devinit using a PMU ucode image loaded from the ROM itself, + * parts of RM stateLoad can be done using RM's ucode image. This is + * achieved by loading RM PMU ucode directly from FB. The primary difficulties + * are how to find RM's PMU ucode and how to bootstrap it. + * + * We use the simple approach of allocating a fixed buffer near the + * top of FB that contains the information required to bootstrap RM's PMU + * image. This buffer is called the RM_IFR_GC6_CTX. + * + * The buffer is allocated within RM's reserved memory space, directly before + * the VBIOS workspace (if any is present). Since the VBIOS workspace is + * always a multiple of 64K, RM enforces that the offset between top of memory + * and the end of the buffer is 64K. This way the IFR code can start + * from the top of memory and search downwards in 64K decrements. + * + * A small header is placed at the end of the buffer which contains a + * string signature identifying the buffer and other data needed to find the + * remaining context data. + * + * Top_Of-FB /---------------------\ <- + * | | \ + * | (VBIOS_Workspace) | | END_OFFSET + * | | / + * |---------------------| <- + | | \ + * | GSP FW (if present) | | pFbHalPvtInfo->gspFwSizeBytes + * | | / + * |---------------------| <- + * | RM_IFR_GC6_CTX_HDR | \ + * |---------------------| | + * | (Padding) | | RM_IFR_GC6_CTX_HDR.bufferSize + * |---------------------| | + * | Sequence Data | / + * |---------------------| <- + * | | + * | | + * | | + * | | + * 0x00000000 \---------------------/ + * + * To simplify the RM PMU bootstrap process and decrease IFR maintainence + * cost, the bootstrap process is encoded as a sequence script, leveraging + * a small subset of RM's PMU_SEQ_INST interface (see pmuseqinst.h). + * Register writes are captured during the initial (CPU-driven) RM PMU bootstrap + * and saved into a sequence for replay during GC6 exit. + * + * Only the following opcodes are supported currently: + * NV_PMU_SEQ_WRITE_REG_OPC - (multi-)register write + * NV_PMU_SEQ_EXIT_OPC - sequence done + * + */ + +/*! + * Header structure which identifies the GC6 context buffer. + */ +typedef struct +{ + NvU8 signature[12]; // RM_IFR_GC6_CTX_SIGNATURE + NvU32 bufferSize; // Size of the entire context buffer in bytes + NvU32 seqSizeWords; // Number of 32-bit words of sequence data + NvU32 checksum; // 32-bit chunk checksum of the sequence data +} RM_IFR_GC6_CTX_HDR, *PRM_IFR_GC6_CTX_HDR; + +/*! + * String signature that IFR searches for to find the GC6 context buffer. + */ +#define RM_IFR_GC6_CTX_SIGNATURE "GC6_CTX_HDR" // 12 bytes + +/*! + * Alignment of the offset between top of memory and the end of the + * GC6 context buffer (which is also the end of the header). + */ +#define RM_IFR_GC6_CTX_END_OFFSET_ALIGNMENT 0x10000 // 64KB + +/*! + * Maximum offset between top of memory and the end of the + * GC6 context buffer. This is meant to be a loose upper bound preventing + * scanning of the whole of memory (e.g. when something goes wrong). + */ +#define RM_IFR_GC6_CTX_END_OFFSET_MAX 0x1000000 // 16MB +#define RM_IFR_GC6_CTX_END_OFFSET_MAX_WITH_GSP 0x10000000 // 256MB + +/*! + * Maximum size of the context data in bytes. + * This is limited by FECS falcon DMEM size (4K on Kepler). + * The buffer must fit within DMEM together with stack and other global data. + */ +#define RM_IFR_GC6_CTX_DATA_MAX_SIZE 2048 // 2KB + +#endif // _RMIFRIF_H_ diff --git a/src/nvidia/arch/nvalloc/common/inc/rmlsfm.h b/src/nvidia/arch/nvalloc/common/inc/rmlsfm.h new file mode 100644 index 000000000..d7c17f85e --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/rmlsfm.h @@ -0,0 +1,515 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef RMLSFM_H +#define RMLSFM_H + +/*****************************************************************************/ +/* This file is shared between ACR, SEC2 Binaries */ +/* Do not update this file without updating ACR/SEC2 */ +/*****************************************************************************/ + +/*! + * @file rmlsfm.h + * @brief Top-level header-file that defines Light Secure Falcon Managment + SW shared interfaces. + */ + +/*! + * READ/WRITE masks for WPR region + */ +#define LSF_WPR_REGION_RMASK (0xCU) // Readable only from level 2 and 3 client +#define LSF_WPR_REGION_WMASK (0xCU) // Writable only from level 2 and 3 client +#define LSF_WPR_REGION_RMASK_SUB_WPR_ENABLED (0x8) // Readable only from level 3 client +#define LSF_WPR_REGION_WMASK_SUB_WPR_ENABLED (0x8) // Writable only from level 3 client +#define LSF_WPR_REGION_ALLOW_READ_MISMATCH_NO (0x0) // Disallow read mis-match for all clients +#define LSF_WPR_REGION_ALLOW_WRITE_MISMATCH_NO (0x0) // Disallow write mis-match for all clients + +/*! + * READ mask for WPR region on Tegra + * This is required until we update tegra binaries, Bug 200281517 + * TODO: dgoyal - Remove this once tegra binaries are updated + */ +#define LSF_WPR_REGION_RMASK_FOR_TEGRA (0xFU) + +/*! + * Expected REGION ID to be used for the unprotected FB region (region that + * does not have read or write protections) + */ +#define LSF_UNPROTECTED_REGION_ID (0x0U) + +/*! + * Expected REGION ID to be used for the WPR region for the falcon microcode (includes data). + * ACR allots client requests to each region based on read/write masks and it is supposed + * to provide first priority to requests from LSFM. Providing first priority will naturally assign + * region ID 1 to LSFM and this define will provide a way for different parties to sanity check + * this fact. Also there are other falcons (FECS/video falcons) which depends on this define, so please + * be aware while modifying this. + */ +#define LSF_WPR_EXPECTED_REGION_ID (0x1U) + +/*! + * Expected REGION ID to be used for the unused WPR region. + */ +#define LSF_WPR_UNUSED_REGION_ID (0x2U) + +/*! + * Invalid LS falcon subWpr ID + */ +#define LSF_SUB_WPR_ID_INVALID (0xFFFFFFFFU) + +/*! + * Expected REGION ID to be used for the VPR region. + */ +#define LSF_VPR_REGION_ID (0x3U) + +/*! + * Size of the separate bootloader data that could be present in WPR region. + */ +#define LSF_LS_BLDATA_EXPECTED_SIZE (0x100U) + +/*! + * since we dont check signatures in GC6 exit, we need to hardcode the WPR offset + */ +#define LSF_WPR_EXPECTED_OFFSET (0x0U) + +/*! + * CTXDMA to be used while loading code/data in target falcons + */ +#define LSF_BOOTSTRAP_CTX_DMA_FECS (0x0) + +/*! + * Context DMA ID 6 is reserved for Video UCODE + */ +#define LSF_BOOTSTRAP_CTX_DMA_VIDEO (0x6) +#define LSF_BOOTSTRAP_CTX_DMA_BOOTSTRAP_OWNER (0x0) +#define LSF_BOOTSTRAP_CTX_DMA_FBFLCN (0x0) + +/*! + * Falcon Id Defines + * Defines a common Light Secure Falcon identifier. + * Codesigning infra. assumes LSF_FALCON_ID_ prefix for units, + * Changes to the define needs to be reflected in path [1] + * For new Falcon Id adding, we need to append to the end; + * don't insert the new falcon Id in the middle. + */ +#define LSF_FALCON_ID_PMU (0U) +#define LSF_FALCON_ID_DPU (1U) +#define LSF_FALCON_ID_GSPLITE LSF_FALCON_ID_DPU +#define LSF_FALCON_ID_FECS (2U) +#define LSF_FALCON_ID_GPCCS (3U) +#define LSF_FALCON_ID_NVDEC (4U) +#define LSF_FALCON_ID_NVENC (5U) +#define LSF_FALCON_ID_NVENC0 (5U) +#define LSF_FALCON_ID_NVENC1 (6U) +#define LSF_FALCON_ID_SEC2 (7U) +#define LSF_FALCON_ID_NVENC2 (8U) +#define LSF_FALCON_ID_MINION (9U) +#define LSF_FALCON_ID_FBFALCON (10U) +#define LSF_FALCON_ID_XUSB (11U) +#define LSF_FALCON_ID_GSP_RISCV (12U) +#define LSF_FALCON_ID_PMU_RISCV (13U) +#define LSF_FALCON_ID_SOE (14U) +#define LSF_FALCON_ID_NVDEC1 (15U) +#define LSF_FALCON_ID_OFA (16U) +#define LSF_FALCON_ID_SEC2_RISCV (17U) +#define LSF_FALCON_ID_NVDEC_RISCV (18U) +#define LSF_FALCON_ID_NVDEC_RISCV_EB (19U) +#define LSF_FALCON_ID_NVJPG (20U) +#define LSF_FALCON_ID_END (21U) + +#define LSF_FALCON_ID_INVALID (0xFFFFFFFFU) + +// +// ************************ NOTIFICATION ********************************* +// In case anyone needs to add new LSF falconId, please must calculate +// WPR header size per LSF_FALCON_ID_END. RM needs to call lsfmGetWprHeaderSizeMax_HAL +// to align with acrReadSubWprHeader_HAL in ACR. Otherwise, ACR can't get correct +// address to read sub wpr headers. +// We observer in case LSF_FALCON_ID_END > 32 will cause SEC2 IMEM tag missing error; +// but don't get the root cause yet. +// +#define LSF_FALCON_ID_END_15 (15U) +#define LSF_FALCON_ID_END_17 (17U) +#define LSF_FALCON_ID_END_18 (18U) +#define LSF_FALCON_ID_END_21 (21U) + +#define LSF_FALCON_INSTANCE_DEFAULT_0 (0x0) +#define LSF_FALCON_INSTANCE_COUNT_DEFAULT_1 (0x1) + +// Currently max supported instance is 8 for FECS/GPCCS SMC +#define LSF_FALCON_INSTANCE_FECS_GPCCS_MAX (0x8) +#define LSF_FALCON_INSTANCE_INVALID (0xFFFFFFFFU) +#define LSF_FALCON_INDEX_MASK_DEFAULT_0 (0x0) + + + +/*! + * Size in entries of the ucode descriptor's dependency map. + * This used to be LSF_FALCON_ID_END, but since that had to grow and we did not want to break any + * existing binaries, they had to be split. + * + * Increasing this number should be done with care. + */ +#define LSF_FALCON_DEPMAP_SIZE (11) + +/*! + * Falcon Binaries version defines + */ +#define LSF_FALCON_BIN_VERSION_INVALID (0xFFFFFFFFU) + + +/*! + * Light Secure Falcon Ucode Description Defines + * This stucture is prelim and may change as the ucode signing flow evolves. + */ +typedef struct +{ + NvU8 prdKeys[2][16]; + NvU8 dbgKeys[2][16]; + NvU32 bPrdPresent; + NvU32 bDbgPresent; + NvU32 falconId; + NvU32 bSupportsVersioning; + NvU32 version; + NvU32 depMapCount; + NvU8 depMap[LSF_FALCON_DEPMAP_SIZE * 2 * 4]; + NvU8 kdf[16]; +} LSF_UCODE_DESC, *PLSF_UCODE_DESC; + +/*! + * Light Secure WPR Header + * Defines state allowing Light Secure Falcon bootstrapping. + * + * falconId - LS falcon ID + * lsbOffset - Offset into WPR region holding LSB header + * bootstrapOwner - Bootstrap OWNER (either PMU or SEC2) + * bLazyBootstrap - Skip bootstrapping by ACR + * status - Bootstrapping status + */ +typedef struct +{ + NvU32 falconId; + NvU32 lsbOffset; + NvU32 bootstrapOwner; + NvU32 bLazyBootstrap; + NvU32 binVersion; + NvU32 status; +} LSF_WPR_HEADER, *PLSF_WPR_HEADER; + +/*! + * LSF shared SubWpr Header + * + * useCaseId - Shared SubWpr se case ID (updated by RM) + * startAddr - start address of subWpr (updated by RM) + * size4K - size of subWpr in 4K (updated by RM) + */ +typedef struct +{ + NvU32 useCaseId; + NvU32 startAddr; + NvU32 size4K; +} LSF_SHARED_SUB_WPR_HEADER, *PLSF_SHARED_SUB_WPR_HEADER; + +// Shared SubWpr use case IDs +typedef enum +{ + LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_FRTS_VBIOS_TABLES = 1, + LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_PLAYREADY_SHARED_DATA = 2, + LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_APM_RTS = 3 +} LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_ENUM; + +#define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_APM_RTS +#define LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_INVALID (0xFFFFFFFFU) + +#define MAX_SUPPORTED_SHARED_SUB_WPR_USE_CASES LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX + +// +// Static sizes of shared subWPRs +// Minimum granularity supported is 4K +// +#define LSF_SHARED_DATA_SUB_WPR_FRTS_VBIOS_TABLES_SIZE_IN_4K (0x100) // 1MB in 4K +#define LSF_SHARED_DATA_SUB_WPR_PLAYREADY_SHARED_DATA_SIZE_IN_4K (0x1) // 4K +#define LSF_SHARED_DATA_SUB_WPR_APM_RTS_SIZE_IN_4K (0x1) // 4K + +/*! + * Bootstrap Owner Defines + */ +#define LSF_BOOTSTRAP_OWNER_PMU (LSF_FALCON_ID_PMU) +#define LSF_BOOTSTRAP_OWNER_SEC2 (LSF_FALCON_ID_SEC2) +#define LSF_BOOTSTRAP_OWNER_GSPLITE (LSF_FALCON_ID_GSPLITE) +#define LSF_BOOTSTRAP_OWNER_DEFAULT LSF_BOOTSTRAP_OWNER_PMU + +/*! + * Image Status Defines + */ +#define LSF_IMAGE_STATUS_NONE (0U) +#define LSF_IMAGE_STATUS_COPY (1U) +#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED (2U) +#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED (3U) +#define LSF_IMAGE_STATUS_VALIDATION_DONE (4U) +#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED (5U) +#define LSF_IMAGE_STATUS_BOOTSTRAP_READY (6U) +#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED (7U) + +/*! + * Light Secure Bootstrap Header + * Defines state allowing Light Secure Falcon bootstrapping. + * + * signature - Code/data signature details for this LS falcon + * ucodeOffset - Offset into WPR region where UCODE is located + * ucodeSize - Size of ucode + * dataSize - Size of ucode data + * blCodeSize - Size of bootloader that needs to be loaded by bootstrap owner + * blImemOffset - BL starting virtual address. Need for tagging. + * blDataOffset - Offset into WPR region holding the BL data + * blDataSize - Size of BL data + * appCodeOffset - Offset into WPR region where Application UCODE is located + * appCodeSize - Size of Application UCODE + * appDataOffset - Offset into WPR region where Application DATA is located + * appDataSize - Size of Application DATA + * blLoadCodeAt0 - Load BL at 0th IMEM offset + * bSetVACtx - Make sure to set the code/data loading CTX DMA to be virtual before exiting + * bDmaReqCtx - This falcon requires a ctx before issuing DMAs + * bForcePrivLoad- Use priv loading method instead of bootloader/DMAs + */ + +#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0 0:0 +#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_FALSE 0 +#define NV_FLCN_ACR_LSF_FLAG_LOAD_CODE_AT_0_TRUE 1 +#define NV_FLCN_ACR_LSF_FLAG_SET_VA_CTX 1:1 +#define NV_FLCN_ACR_LSF_FLAG_SET_VA_CTX_FALSE 0 +#define NV_FLCN_ACR_LSF_FLAG_SET_VA_CTX_TRUE 1 +#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX 2:2 +#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_FALSE 0 +#define NV_FLCN_ACR_LSF_FLAG_DMACTL_REQ_CTX_TRUE 1 +#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD 3:3 +#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_FALSE 0 +#define NV_FLCN_ACR_LSF_FLAG_FORCE_PRIV_LOAD_TRUE 1 +typedef struct +{ + LSF_UCODE_DESC signature; + NvU32 ucodeOffset; + NvU32 ucodeSize; + NvU32 dataSize; + NvU32 blCodeSize; + NvU32 blImemOffset; + NvU32 blDataOffset; + NvU32 blDataSize; + NvU32 appCodeOffset; + NvU32 appCodeSize; + NvU32 appDataOffset; + NvU32 appDataSize; + NvU32 flags; + /* + * TODO: Uncomment this once Sanket's changes + * of HALifying is done + NvU32 monitorCodeOffset; + NvU32 monitorDataOffset; + NvU32 manifestOffset; + */ +} LSF_LSB_HEADER, *PLSF_LSB_HEADER; + + +/*! + * Light Secure WPR Content Alignments + */ +#define LSF_WPR_HEADER_ALIGNMENT (256U) +#define LSF_SUB_WPR_HEADER_ALIGNMENT (256U) +#define LSF_LSB_HEADER_ALIGNMENT (256U) +#define LSF_BL_DATA_ALIGNMENT (256U) +#define LSF_BL_DATA_SIZE_ALIGNMENT (256U) +#define LSF_BL_CODE_SIZE_ALIGNMENT (256U) +#define LSF_DATA_SIZE_ALIGNMENT (256U) +#define LSF_CODE_SIZE_ALIGNMENT (256U) + +// MMU excepts subWpr sizes in units of 4K +#define SUB_WPR_SIZE_ALIGNMENT (4096U) + +/*! + * Maximum WPR Header size + */ +#define LSF_WPR_HEADERS_TOTAL_SIZE_MAX (NV_ALIGN_UP((sizeof(LSF_WPR_HEADER) * LSF_FALCON_ID_END), LSF_WPR_HEADER_ALIGNMENT)) +#define LSF_LSB_HEADER_TOTAL_SIZE_MAX (NV_ALIGN_UP(sizeof(LSF_LSB_HEADER), LSF_LSB_HEADER_ALIGNMENT)) + +// +// PMU OBJACR_ALIGNED_256 size will vary with LSF_FALCON_ID_END. +// PMU could run out of DMEM in case we increase LSF_FALCON_ID_END more and more. +// The PMU supports the ACR task on GM20X_thru_VOLTA profiles only. +// In order to prevent LSF_FALCON_ID_END changes to affect older / shipped PMU ucodes (increase of DMEM footprint) +// adding PMU specific ***_END define capturing value covering all PMU profiles that this with the ACR task. +// +#define LSF_FALCON_ID_END_PMU (LSF_FALCON_ID_FBFALCON + 1) +#define LSF_WPR_HEADERS_TOTAL_SIZE_MAX_PMU (NV_ALIGN_UP((sizeof(LSF_WPR_HEADER) * LSF_FALCON_ID_END_PMU), LSF_WPR_HEADER_ALIGNMENT)) + +// Maximum SUB WPR header size +#define LSF_SUB_WPR_HEADERS_TOTAL_SIZE_MAX (NV_ALIGN_UP((sizeof(LSF_SHARED_SUB_WPR_HEADER) * LSF_SHARED_DATA_SUB_WPR_USE_CASE_ID_MAX), LSF_SUB_WPR_HEADER_ALIGNMENT)) + +/*! + * For the ucode surface alignment, We align to RM_PAGE_SIZE because of + * busMapRmAperture issues, not because of Falcon ucode alignment requirements + * which currently are that it be at least 256. + */ +#define LSF_UCODE_DATA_ALIGNMENT RM_PAGE_SIZE + +/*! + * ACR Descriptors used by ACR UC + */ + +/*! + * Supporting maximum of 2 regions. + * This is needed to pre-allocate space in DMEM + */ +#define RM_FLCN_ACR_MAX_REGIONS (2) +#define LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE (0x200) + +/*! + * startAddress - Starting address of region + * endAddress - Ending address of region + * regionID - Region ID + * readMask - Read Mask + * writeMask - WriteMask + * clientMask - Bit map of all clients currently using this region + * shadowMemStartAddress- FB location from where contents need to be copied to startAddress + */ +typedef struct _def_acr_dmem_region_prop +{ + NvU32 startAddress; + NvU32 endAddress; + NvU32 regionID; + NvU32 readMask; + NvU32 writeMask; + NvU32 clientMask; + NvU32 shadowMemStartAddress; +} RM_FLCN_ACR_REGION_PROP, *PRM_FLCN_ACR_REGION_PROP; + + +/*! + * noOfRegions - Number of regions used by RM. + * regionProps - Region properties + */ +typedef struct _def_acr_regions +{ + NvU32 noOfRegions; + RM_FLCN_ACR_REGION_PROP regionProps[RM_FLCN_ACR_MAX_REGIONS]; +} RM_FLCN_ACR_REGIONS, *PRM_FLCN_ACR_REGIONS; + +/*! + * bVprEnabled : When set, ACR_LOCKDOWN phase programs VPR range. Needs to be + : NvU32 because of alignment + * vprStartAddress : Start address of VPR region. SEC2 binary updates this value + * vprEndAddress : End address of VPR region. SEC2 binary updates this value + * hdcpPolicies : VPR display policies. SEC2 binary updates this value + */ +typedef struct _def_acr_vpr_dmem_desc +{ + NvU32 bVprEnabled; + NvU32 vprStartAddress; + NvU32 vprEndAddress; + NvU32 hdcpPolicies; +} ACR_BSI_VPR_DESC, *PACR_BSI_VPR_DESC; + +/*! + * reservedDmem - When the bootstrap owner has done bootstrapping other falcons, + * and need to switch into LS mode, it needs to have its own actual + * DMEM image copied into DMEM as part of LS setup. If ACR desc is at + * location 0, it will definitely get overwritten causing data corruption. + * Hence we are reserving 0x200 bytes to give room for any loading data. + * NOTE: This has to be the first member always + * signature - Signature of ACR ucode. + * wprRegionID - Region ID holding the WPR header and its details + * wprOffset - Offset from the WPR region holding the wpr header + * regions - Region descriptors + * ucodeBlobBase- Used for Tegra, stores non-WPR start address where kernel stores ucode blob + * ucodeBlobSize- Used for Tegra, stores the size of the ucode blob + */ +typedef struct _def_acr_dmem_desc +{ + NvU32 signatures[4]; + NvU32 wprRegionID; + NvU32 wprOffset; + NvU32 mmuMemoryRange; + RM_FLCN_ACR_REGIONS regions; + NvU32 ucodeBlobSize; + // uCodeBlobBase is moved after ucodeBlobSize to inherently align to qword (8 bytes) + NvU64 NV_DECLARE_ALIGNED(ucodeBlobBase, 8); + + /*! + * Do not change the offset of this descriptor as it shared between + * ACR_REGION_LOCKDOWN HS binary and SEC2. Any change in this structure + * need recompilation of SEC2 and ACR_LOCKDOWN HS binary + */ + ACR_BSI_VPR_DESC vprDesc; +} RM_FLCN_ACR_DESC, *PRM_FLCN_ACR_DESC; + +/*! +* Hub keys/nonce Structure in BSI +*/ +#define MAX_SFBHUB_ENCRYPTION_REGION_KEY_SIZE 4 + +typedef struct _def_acr_hub_scratch_data +{ + NvU32 key[MAX_SFBHUB_ENCRYPTION_REGION_KEY_SIZE]; + NvU32 nonce[MAX_SFBHUB_ENCRYPTION_REGION_KEY_SIZE]; +} ACR_BSI_HUB_DESC, *PACR_BSI_HUB_DESC; + +#define MAX_HUB_ENCRYPTION_REGION_COUNT 3 +typedef struct _def_acr_hub_scratch_array +{ + ACR_BSI_HUB_DESC entries[MAX_HUB_ENCRYPTION_REGION_COUNT]; +} ACR_BSI_HUB_DESC_ARRAY, *PACR_BSI_HUB_DESC_ARRAY; + +typedef struct _def_acr_reserved_dmem +{ + NvU32 reservedDmem[(LSF_BOOTSTRAP_OWNER_RESERVED_DMEM_SIZE/4)]; // Always first.. +} ACR_RESERVED_DMEM, *PACR_RESERVED_DMEM; + +#define NV_FLCN_ACR_DESC_FLAGS_SIG_VERIF 0:0 +#define NV_FLCN_ACR_DESC_FLAGS_SIG_VERIF_DISABLE 0 +#define NV_FLCN_ACR_DESC_FLAGS_SIG_VERIF_ENABLE 1 + +/*! + * Size of ACR phase in dword + */ +#define ACR_PHASE_SIZE_DWORD sizeof(RM_FLCN_ACR_DESC)/sizeof(NvU32) + +/*! + * Falcon Mode Tokens + * This is the value logged to a mailbox register to indicate that the + * falcon isn't booted in secure mode. + */ +#define LSF_FALCON_MODE_TOKEN_FLCN_INSECURE (0xDEADDEADU) + + +// LS encryption context, to store data to decrypt LS images. +#define LS_ENCRYPTION_AES128_CBC_IV_SIZE_BYTE (16) + +typedef struct +{ + NvU8 bLsEncrypted; + NvU8 rsvd[3]; + NvU8 lsEncIV[LS_ENCRYPTION_AES128_CBC_IV_SIZE_BYTE]; +} LSF_ENCRYPTION_CONTEXT, *PLSF_ENCRYPTION_CONTEXT; + +#endif // RMLSFM_H + diff --git a/src/nvidia/arch/nvalloc/common/inc/rmpbicmdif.h b/src/nvidia/arch/nvalloc/common/inc/rmpbicmdif.h new file mode 100644 index 000000000..0efac2fb2 --- /dev/null +++ b/src/nvidia/arch/nvalloc/common/inc/rmpbicmdif.h @@ -0,0 +1,140 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Shared postbox interface defines for RM and PMU. + */ + +#ifndef RMPBICMDIF_H +#define RMPBICMDIF_H + +/* + * Define the status of postbox interface at different instances. + * + * The values should be in accordance to the spec and must not be changed. + * A new PBI command must be submitted with the status NV_PBI_COMMAND_STATUS_UNDEFINED. + */ + +#define NV_PBI_COMMAND_STATUS 7:0 +#define NV_PBI_COMMAND_STATUS_UNDEFINED 0x00 // command submitted to PMU +#define NV_PBI_COMMAND_STATUS_SUCCESS 0x01 // command successfully completed by PMU +#define NV_PBI_COMMAND_STATUS_PENDING 0x02 // command accepted by PMU +#define NV_PBI_COMMAND_STATUS_BUSY 0x03 // command processing in PMU +#define NV_PBI_COMMAND_STATUS_UNSPECIFIED_FAILURE 0x04 // unknown failure or hang +#define NV_PBI_COMMAND_STATUS_INVALID_ADDRESS 0x05 // invalid address submitted to PMU +#define NV_PBI_COMMAND_STATUS_MORE_DATA 0x06 // user needs to send more data to PMU +#define NV_PBI_COMMAND_STATUS_INVALID_COMMAND 0x07 // invalid command submitted + +/* + * This corresponds to reserved bits of command register + */ +#define NV_PBI_COMMAND_RSVD_0 15:8 +#define NV_PBI_COMMAND_RSVD_0_VAL 0x00 + +/* + * This specifies the dword index if client is sending multiple dwords + * for single Dword this should be zero + */ +#define NV_PBI_COMMAND_BUFFER_INDEX 19:16 + +/* + * This specifies the total no. of dwords passed by client + */ +#define NV_PBI_COMMAND_BUFFER_SIZE 23:20 + +/* + * These corresponds the different function ID's supported out of PBI + */ +#define NV_PBI_COMMAND_FUNC_ID 27:24 +#define NV_PBI_COMMAND_FUNC_ID_GET_CAPABILITIES (0) +#define NV_PBI_COMMAND_FUNC_ID_EXECUTE_ROUTINE (11) + +// +// Definitions for common 'Execute Routine' calls that are +// shared across all GPUs. +// +// Common routine IDs must be in the range 0x80 to 0xff. +// +// Get GID: +#define NV_PBI_EXECUTE_ROUTINE_GET_GID 0x80 +// Get Feature: +#define NV_PBI_EXECUTE_ROUTINE_GET_FEATURE 0x81 +#define NV_PBI_EXECUTE_ROUTINE_GET_FEATURE_EXCLUSION 5:0 +#define NV_PBI_EXECUTE_ROUTINE_GET_FEATURE_EXCLUSION_ALLOWED 0x2 + +/* + * This corresponds to reserved field of command register + */ +#define NV_PBI_COMMAND_RSVD_1 28:28 +#define NV_PBI_COMMAND_RSVD_1_VAL 0x00 + +/* + * If this bit is set system will be notified on command completion + */ +#define NV_PBI_COMMAND_SYS_NOTIFY 29:29 +#define NV_PBI_COMMAND_SYS_NOTIFY_TRUE 0x01 +#define NV_PBI_COMMAND_SYS_NOTIFY_FALSE 0x00 + +/* + * If this bit is set driver will be notified of command completion status + */ +#define NV_PBI_COMMAND_DRV_NOTIFY 30:30 +#define NV_PBI_COMMAND_DRV_NOTIFY_TRUE 0x01 +#define NV_PBI_COMMAND_DRV_NOTIFY_FALSE 0x00 + +/* + * Defines the interrupt state of the PBI command + */ +#define NV_PBI_COMMAND_INTERRUPT 31:31 +#define NV_PBI_COMMAND_INTERRUPT_TRUE 0x01 +#define NV_PBI_COMMAND_INTERRUPT_FALSE 0x00 + +/* + * This sets the different fields of command register + */ +#define PBI_SET_COMMAND_PARAMS(status, r0, index, sz, cmd, r1, sys, \ + drv, intr, val) \ +{ \ + val = DRF_NUM(_PBI, _COMMAND, _STATUS, status) | \ + DRF_NUM(_PBI, _COMMAND, _RSVD_0, r0) | \ + DRF_NUM(_PBI, _COMMAND, _BUFFER_INDEX, index) | \ + DRF_NUM(_PBI, _COMMAND, _BUFFER_SIZE, sz) | \ + DRF_NUM(_PBI, _COMMAND, _FUNC_ID, cmd) | \ + DRF_NUM(_PBI, _COMMAND, _RSVD_1, r1) | \ + DRF_NUM(_PBI, _COMMAND, _SYS_NOTIFY, sys) | \ + DRF_NUM(_PBI, _COMMAND, _DRV_NOTIFY, drv) | \ + DRF_NUM(_PBI, _COMMAND, _INTERRUPT, intr); \ +} + +/* + * postbox_clients_mutex_id + * + * Define the Mutex ID for different PBI clients + */ +enum postbox_clients_mutex_id +{ + PBI_CLIENT_NONE = 0x00, + PBI_CLIENT_DRIVER_PCIPBI_SHIM = 0x15 +}; + +#endif /* RMPBICMDIF_H */ diff --git a/src/nvidia/arch/nvalloc/unix/include/efi-console.h b/src/nvidia/arch/nvalloc/unix/include/efi-console.h new file mode 100644 index 000000000..7971565d8 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/efi-console.h @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef EFI_CONSOLE_H +#define EFI_CONSOLE_H + +#include "gpu/disp/kern_disp_max.h" + +struct OBJGPU; + +typedef struct +{ + NvBool isDispStateSave; + + NvU32 activeDisplayId[OBJ_MAX_HEADS]; + + struct + { + NvU32 displayId; + + struct { + NvU32 index; + NvU32 subLinkMask; + } sorXBar; + + struct { + NvU32 linkBw; + NvU32 laneCount; + NvU32 linkCtl; + } displayPort; + } activeDfpState[OBJ_MAX_DFPS]; + NvU32 numDfps; + + struct + { + NvU32 coreChannelClass; + NvU32 cacheSize; + NvU32 *pCache; + } display; +} nv_efi_t; + +void RmSaveEFIDisplayState (OBJGPU *pGpu); +void RmRestoreEFIDisplayState (OBJGPU *pGpu); + +#endif // EFI_CONSOLE_H diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-caps.h b/src/nvidia/arch/nvalloc/unix/include/nv-caps.h new file mode 100644 index 000000000..35bbf7c0c --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-caps.h @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_CAPS_H_ +#define _NV_CAPS_H_ + +#include + +/* + * Opaque OS-specific struct; on Linux, this has member + * 'struct proc_dir_entry'. + */ +typedef struct nv_cap nv_cap_t; + +/* + * Creates directory named "capabilities" under the provided path. + * + * @param[in] path Absolute path + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_init(const char *path); + +/* + * Creates capability directory entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability directory's name + * @param[in] mode Capability directory's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Creates capability file entry + * + * @param[in] parent_cap Parent capability directory + * @param[in] name Capability file's name + * @param[in] mode Capability file's access mode + * + * Returns a valid nv_cap_t upon success. Otherwise, returns NULL. + */ +nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, const char *name, int mode); + +/* + * Destroys capability entry + * + * @param[in] cap Capability entry + */ +void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap); + +/* + * Validates and duplicates the provided file descriptor + * + * @param[in] cap Capability entry + * @param[in] fd File descriptor to be validated + * + * Returns duplicate fd upon success. Otherwise, returns -1. + */ +int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd); + +/* + * Closes file descriptor + * + * This function should be used to close duplicate file descriptors + * returned by nv_cap_validate_and_dup_fd. + * + * @param[in] fd File descriptor to be validated + * + */ +void NV_API_CALL nv_cap_close_fd(int fd); + +#endif /* _NV_CAPS_H_ */ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h b/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h new file mode 100644 index 000000000..a8c0c0a1f --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-gpu-info.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_GPU_INFO_H_ +#define _NV_GPU_INFO_H_ + +typedef struct { + NvU32 gpu_id; + + struct { + NvU32 domain; + NvU8 bus, slot, function; + } pci_info; + + /* + * opaque OS-specific pointer; on Linux, this is a pointer to the + * 'struct device' for the GPU. + */ + void *os_device_ptr; +} nv_gpu_info_t; + +#define NV_MAX_GPUS 32 + +#endif /* _NV_GPU_INFO_H_ */ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h new file mode 100644 index 000000000..cb0b6a246 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl-numbers.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_NUMBERS_H +#define NV_IOCTL_NUMBERS_H + +/* NOTE: using an ioctl() number > 55 will overflow! */ +#define NV_IOCTL_MAGIC 'F' +#define NV_IOCTL_BASE 200 +#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0) +#define NV_ESC_REGISTER_FD (NV_IOCTL_BASE + 1) +#define NV_ESC_ALLOC_OS_EVENT (NV_IOCTL_BASE + 6) +#define NV_ESC_FREE_OS_EVENT (NV_IOCTL_BASE + 7) +#define NV_ESC_STATUS_CODE (NV_IOCTL_BASE + 9) +#define NV_ESC_CHECK_VERSION_STR (NV_IOCTL_BASE + 10) +#define NV_ESC_IOCTL_XFER_CMD (NV_IOCTL_BASE + 11) +#define NV_ESC_ATTACH_GPUS_TO_FD (NV_IOCTL_BASE + 12) +#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13) +#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14) +#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17) + +#endif diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h new file mode 100644 index 000000000..ffd1dee87 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-ioctl.h @@ -0,0 +1,145 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef NV_IOCTL_H +#define NV_IOCTL_H + +#include +#include + +typedef struct { + NvU32 domain; /* PCI domain number */ + NvU8 bus; /* PCI bus number */ + NvU8 slot; /* PCI slot number */ + NvU8 function; /* PCI function number */ + NvU16 vendor_id; /* PCI vendor ID */ + NvU16 device_id; /* PCI device ID */ +} nv_pci_info_t; + +/* + * ioctl()'s with parameter structures too large for the + * _IOC cmd layout use the nv_ioctl_xfer_t structure + * and the NV_ESC_IOCTL_XFER_CMD ioctl() to pass the actual + * size and user argument pointer into the RM, which + * will then copy it to/from kernel space in separate steps. + */ +typedef struct nv_ioctl_xfer +{ + NvU32 cmd; + NvU32 size; + NvP64 ptr NV_ALIGN_BYTES(8); +} nv_ioctl_xfer_t; + +typedef struct nv_ioctl_card_info +{ + NvBool valid; + nv_pci_info_t pci_info; /* PCI config information */ + NvU32 gpu_id; + NvU16 interrupt_line; + NvU64 reg_address NV_ALIGN_BYTES(8); + NvU64 reg_size NV_ALIGN_BYTES(8); + NvU64 fb_address NV_ALIGN_BYTES(8); + NvU64 fb_size NV_ALIGN_BYTES(8); + NvU32 minor_number; + NvU8 dev_name[10]; /* device names such as vmgfx[0-32] for vmkernel */ +} nv_ioctl_card_info_t; + +/* alloc event */ +typedef struct nv_ioctl_alloc_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_alloc_os_event_t; + +/* free event */ +typedef struct nv_ioctl_free_os_event +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 fd; + NvU32 Status; +} nv_ioctl_free_os_event_t; + +/* status code */ +typedef struct nv_ioctl_status_code +{ + NvU32 domain; + NvU8 bus; + NvU8 slot; + NvU32 status; +} nv_ioctl_status_code_t; + +/* check version string */ +#define NV_RM_API_VERSION_STRING_LENGTH 64 + +typedef struct nv_ioctl_rm_api_version +{ + NvU32 cmd; + NvU32 reply; + char versionString[NV_RM_API_VERSION_STRING_LENGTH]; +} nv_ioctl_rm_api_version_t; + +#define NV_RM_API_VERSION_CMD_STRICT 0 +#define NV_RM_API_VERSION_CMD_RELAXED '1' +#define NV_RM_API_VERSION_CMD_OVERRIDE '2' + +#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0 +#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1 + +typedef struct nv_ioctl_query_device_intr +{ + NvU32 intrStatus NV_ALIGN_BYTES(4); + NvU32 status; +} nv_ioctl_query_device_intr; + +/* system parameters that the kernel driver may use for configuration */ +typedef struct nv_ioctl_sys_params +{ + NvU64 memblock_size NV_ALIGN_BYTES(8); +} nv_ioctl_sys_params_t; + +typedef struct nv_ioctl_register_fd +{ + int ctl_fd; +} nv_ioctl_register_fd_t; + +#define NV_DMABUF_EXPORT_MAX_HANDLES 128 + +typedef struct nv_ioctl_export_to_dma_buf_fd +{ + int fd; + NvHandle hClient; + NvU32 totalObjects; + NvU32 numObjects; + NvU32 index; + NvU64 totalSize NV_ALIGN_BYTES(8); + NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES]; + NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8); + NvU32 status; +} nv_ioctl_export_to_dma_buf_fd_t; + +#endif diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h b/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h new file mode 100644 index 000000000..f44799b3b --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-kernel-rmapi-ops.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_KERNEL_RMAPI_OPS_H_ +#define _NV_KERNEL_RMAPI_OPS_H_ + +/* + * Define the RMAPI provided to kernel-level RM clients. + * + * Kernel-level RM clients should populate nvidia_kernel_rmapi_ops_t + * by assigning nvidia_kernel_rmapi_ops_t::op and the corresponding + * parameter structure in nvidia_kernel_rmapi_ops_t's params union. + * Then, pass a pointer to the nvidia_kernel_rmapi_ops_t to + * rm_kernel_rmapi_op(). + */ + +#include "nvtypes.h" +#include "nvos.h" + +typedef struct { + NvU32 op; /* One of the NV0[14]_XXXX operations listed below. */ + + union { + NVOS00_PARAMETERS free; /* NV01_FREE */ + NVOS02_PARAMETERS allocMemory64; /* NV01_ALLOC_MEMORY */ + NVOS21_PARAMETERS alloc; /* NV04_ALLOC */ + NVOS32_PARAMETERS *pVidHeapControl; /* NV04_VID_HEAP_CONTROL */ + NVOS33_PARAMETERS mapMemory; /* NV04_MAP_MEMORY */ + NVOS34_PARAMETERS unmapMemory; /* NV04_UNMAP_MEMORY */ + NVOS39_PARAMETERS allocContextDma2; /* NV04_ALLOC_CONTEXT_DMA */ + NVOS46_PARAMETERS mapMemoryDma; /* NV04_MAP_MEMORY_DMA */ + NVOS47_PARAMETERS unmapMemoryDma; /* NV04_UNMAP_MEMORY_DMA */ + NVOS49_PARAMETERS bindContextDma; /* NV04_BIND_CONTEXT_DMA */ + NVOS54_PARAMETERS control; /* NV04_CONTROL*/ + NVOS55_PARAMETERS dupObject; /* NV04_DUP_OBJECT */ + NVOS57_PARAMETERS share; /* NV04_SHARE */ + NVOS61_PARAMETERS addVblankCallback; /* NV04_ADD_VBLANK_CALLBACK */ + } params; +} nvidia_kernel_rmapi_ops_t; + +#endif /* _NV_KERNEL_RMAPI_OPS_H_ */ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h b/src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h new file mode 100644 index 000000000..45f7d25be --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-nb-regs.h @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2007-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_NB_REGS_H_ +#define _NV_NB_REGS_H_ + +typedef struct +{ + NvU32 subsystem_vendor_id; + NvU32 subsystem_device_id; + NvU16 gpu_device_id; +} nv_nb_id_t; + +typedef struct +{ + NvU32 vendor_id; + const char *name; + NvU32 data; +} nv_nb_reg_t; + +/* + * nb_id_table contains the OEM vendor ID, the subsystem ID and the + * GPU device ID of the notebooks for which we need to enable + * vendor specific registry keys. nb_reg_table contains the vendor + * specific registry key values. The initVendorSpecificRegistry() + * function compares the present notebooks OEM subsystem ID and the + * GPU device ID with the values present in id_tables. If a match + * is found, initVendorSpecificRegistry() extracts the vendor + * ID and sets any associated registry key listed in nb_reg_table. + */ + +static nv_nb_id_t nb_id_table[] = { + { PCI_VENDOR_ID_PC_PARTNER, 0x0620, 0x1284 }, // Acer GT 630 + { PCI_VENDOR_ID_PC_PARTNER, 0x0620, 0x124b }, // Acer GT 640 + { 0, 0, 0 } +}; + +static nv_nb_reg_t nb_reg_table[] = { + { PCI_VENDOR_ID_PC_PARTNER, "RmOverrideSupportChipsetAspm", 2 }, + { 0, NULL, 0 } +}; + +#endif //_NV_NB_REGS_H_ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-priv.h b/src/nvidia/arch/nvalloc/unix/include/nv-priv.h new file mode 100644 index 000000000..b091586c1 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-priv.h @@ -0,0 +1,373 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_PRIV_H_ +#define _NV_PRIV_H_ + +#include +#include +#include +#include + +#include + +#define NV_PRIV_REG_WR08(b,o,d) (*((volatile NvV8*)&(b)->Reg008[(o)/1])=(NvV8)(d)) +#define NV_PRIV_REG_WR16(b,o,d) (*((volatile NvV16*)&(b)->Reg016[(o)/2])=(NvV16)(d)) +#define NV_PRIV_REG_WR32(b,o,d) (*((volatile NvV32*)&(b)->Reg032[(o)/4])=(NvV32)(d)) + +#define NV_PRIV_REG_RD08(b,o) ((b)->Reg008[(o)/1]) +#define NV_PRIV_REG_RD16(b,o) ((b)->Reg016[(o)/2]) +#define NV_PRIV_REG_RD32(b,o) ((b)->Reg032[(o)/4]) + +#define NV_NUM_CR_REGS 0x99 + +struct OBJGPU; + + +#define NV_BIT_PLANE_SIZE 64 * 1024 +#define NV_NUM_VGA_BIT_PLANES 4 + +/* +* device state during Power Management +*/ +typedef struct nv_pm_state_s +{ + NvU32 IntrEn; + NvBool InHibernate; +} nv_pm_state_t; + +/* +* data structure for the UNIX workqueues +*/ +typedef struct nv_work_item_s +{ + NvU32 flags; + NvU32 gpuInstance; + union + { + OSWorkItemFunction *pGpuFunction; + OSSystemWorkItemFunction *pSystemFunction; + } func; + void *pData; +} nv_work_item_t; + +#define NV_WORK_ITEM_FLAGS_NONE 0x0 +#define NV_WORK_ITEM_FLAGS_REQUIRES_GPU 0x1 +#define NV_WORK_ITEM_FLAGS_DONT_FREE_DATA 0x2 + +/* + * pseudo-registry data structure + */ + +typedef enum +{ + NV_REGISTRY_ENTRY_TYPE_UNKNOWN = 0, + NV_REGISTRY_ENTRY_TYPE_DWORD, + NV_REGISTRY_ENTRY_TYPE_BINARY, + NV_REGISTRY_ENTRY_TYPE_STRING +} nv_reg_type_t; + +typedef struct nv_reg_entry_s +{ + char *regParmStr; + NvU32 type; + NvU32 data; // used when type == NV_REGISTRY_ENTRY_TYPE_DWORD + NvU8 *pdata; // used when type == NV_REGISTRY_ENTRY_TYPE_{BINARY,STRING} + NvU32 len; // used when type == NV_REGISTRY_ENTRY_TYPE_{BINARY,STRING} + struct nv_reg_entry_s *next; +} nv_reg_entry_t; + +#define INVALID_DISP_ID 0xFFFFFFFF +#define MAX_DISP_ID_PER_ADAPTER 0x2 + +typedef struct nv_i2c_adapter_entry_s +{ + void *pOsAdapter; + NvU32 port; + NvU32 displayId[MAX_DISP_ID_PER_ADAPTER]; +} nv_i2c_adapter_entry_t; + +#define NV_INIT_FLAG_HAL 0x0001 +#define NV_INIT_FLAG_HAL_COMPONENTS 0x0002 +#define NV_INIT_FLAG_GPU_STATE 0x0004 +#define NV_INIT_FLAG_GPU_STATE_LOAD 0x0008 +#define NV_INIT_FLAG_FIFO_WATCHDOG 0x0010 +#define NV_INIT_FLAG_CORE_LOGIC 0x0020 +#define NV_INIT_FLAG_HIRES 0x0040 +#define NV_INIT_FLAG_DISP_STATE_SAVED 0x0080 +#define NV_INIT_FLAG_GPUMGR_ATTACH 0x0100 +#define NV_INIT_FLAG_PUBLIC_I2C 0x0400 +#define NV_INIT_FLAG_SCALABILITY 0x0800 +#define NV_INIT_FLAG_DMA 0x1000 + +#define MAX_I2C_ADAPTERS NV402C_CTRL_NUM_I2C_PORTS + +/* + * GPU dynamic power state machine. + * + * The GPU is in exactly one of these states at at time. Only certain state + * transitions are valid, as documented by the DAGs below. + * + * When in "instant idle" or COARSE mode: + * + * +----------------------+ + * v | + * +---------+ +----------------+ +--------+ + * | UNKNOWN | --> | IDLE_INDICATED | --> | IN_USE | + * +---------+ +----------------+ +--------+ + * + * The transition from UNKNOWN to IDLE_INDICATED happens in + * rm_init_dynamic_power_management(). + * + * Thereafter, transitions from IDLE_INDICATED to IN_USE happen when + * os_ref_dynamic_power() is called and the refcount transitions from 0 to 1; + * transitions from IN_USE to IDLE_INDICATED happen when + * os_unref_dynamic_power() is called and the refcount transitions from 1 to 0. + * Note that only calls to os_(un)ref_dynamic_power() with the mode == COARSE + * are considered in this mode; calls with mode == FINE are ignored. Since + * COARSE calls are placed only in rm_init_adapter/rm_shutdown_adapter, the GPU + * effectively stays in the IN_USE state any time any client has initialized + * it. + * + * + * When in "deferred idle" or FINE mode: + * + * +----------------------------------------------------------------+ + * | | + * | | + * | +-------------------------------------------+----------------------+ + * | | | v + * | +---------+ +----------------+ +--------------+ +----------------+ +--------+ + * | | UNKNOWN | --> | IDLE_INDICATED | --> | | --> | IDLE_SUSTAINED | --> | IN_USE | -+ + * | +---------+ +----------------+ | | +----------------+ +--------+ | + * | ^ | | | ^ | + * +--------------------+ | IDLE_INSTANT | ------+----------------------+ | + * | | | | + * | | | | + * | | <-----+ | + * +--------------+ | + * ^ | + * +-----------------------------------------------------+ + * + * As before, the transition from UNKNOWN to IDLE_INDICATED happens in + * rm_init_dynamic_power_management(). This is not ideal: it means the GPU may + * be powered down immediately upon loading the RM module, even if + * rm_init_adapter() is going to be called soon thereafter. However, we can't + * rely on deferred idle callbacks yet, since those currently rely on core RM + * being initialized. + * + * At the beginning of rm_init_adapter(), the GPU transitions to the IN_USE + * state; during the rm_init_adapter() sequence, + * RmInitDeferredDynamicPowerManagement() will be called which will schedule + * timer callbacks and set the "deferred_idle_enabled" boolean. + * + * While in "deferred idle" mode, one of the callbacks + * timerCallbackForIdlePreConditions(), timerCallbackToIndicateIdle(), or + * RmIndicateIdle() should be scheduled when in the states: + * - IN_USE + * - IDLE_INSTANT + * - IDLE_SUSTAINED + * Note that since we may transition from IN_USE to IDLE_INSTANT rapidly (e.g., + * for a series of RM calls), we don't attempt to schedule the callbacks and + * cancel them on each of these transitions. The + * timerCallbackForIdlePreConditions() callback will simply exit early if in + * the IN_USE state. + * + * As before, the GPU will remain in the IN_USE state until + * os_unref_dynamic_power() is called and the count transitions from 1 to 0 + * (calls with mode == FINE are honored, in this mode, and these transitions + * can happen frequently). When the refcount reaches 0, rather than going + * directly to the IDLE_INDICATED state, it transitions to the IDLE_INSTANT + * state. + * + * Then, when the next timerCallbackForIdlePreConditions() callback executes, + * if all preconditions are met, the state will transition to IDLE_SUSTAINED. + * + * If, when in the IDLE_SUSTAINED state, os_ref_dynamic_power() is called, the + * GPU will transition back to the IN_USE state and return to the IDLE_INSTANT + * state. This ensures that there is a suitable delay between any activity + * that requires bumping the refcount and indicating idleness. + * + * If the timerCallbackForIdlePreConditions() callback executes again and the + * GPU is still in the IDLE_SUSTAINED state, userspace mappings will be revoked + * and the timerCallbackToIndicateIdle() callback will be scheduled. + * + * If, before the timerCallbackToIndicateIdle() callback executes, either + * os_ref_dynamic_power() is called or a mapping which has been revoked is + * accessed (which triggers the RmForceGpuNotIdle() callback), the GPU will + * transition back to the IN_USE or IDLE_INSTANT state, respectively. + * + * Then, when the timerCallbackToIndicateIdle() callback executes, if all + * mappings are still revoked, and the GPU is still in the IDLE_SUSTAINED + * state, and all GPU idleness preconditions remain satisfied, the + * RmIndicateIdle() work item will be enqueued. (Else, the GPU will transition + * back to the IDLE_INSTANT state and the callback for preconditions is + * scheduled again.) + * + * Finally, once the RmIndicateIdle() work item is called, if all of the same + * conditions still hold, the state will transition to IDLE_INDICATED. No + * callbacks will be scheduled from here; the callbacks for preconditions + * should be re-scheduled when transitioning out of the IDLE_INDICATED state. + * + * Once in the IDLE_INDICATED state, the kernel is free to call the RM to + * perform the GC6 entry sequence then turn off power to the GPU (although it + * may not, if the audio function is being used for example). + * + * There are two paths to exit the IDLE_INDICATED state: + * (a) If os_ref_dynamic_power() is called, in which case it transitions + * directly to the IN_USE state; + * (b) If RmForceGpuNotIdle() is called, in which case it transitions back to + * the IDLE_INSTANT state. + */ +typedef enum +{ + NV_DYNAMIC_POWER_STATE_UNKNOWN = 0, + + NV_DYNAMIC_POWER_STATE_IN_USE, + + NV_DYNAMIC_POWER_STATE_IDLE_INSTANT, + NV_DYNAMIC_POWER_STATE_IDLE_SUSTAINED, + NV_DYNAMIC_POWER_STATE_IDLE_INDICATED, +} nv_dynamic_power_state_t; + +typedef struct nv_dynamic_power_s +{ + /* + * mode is read without the mutex -- should be read-only outside of + * rm_init_dynamic_power_management, called during probe only. + */ + nv_dynamic_power_mode_t mode; + /* + * Whether to indicate idle immediately when the refcount reaches 0, or + * only go to the IDLE_INSTANT state, and expect timer callbacks to + * transition through IDLE_SUSTAINED -> IDLE_INDICATED. + */ + NvBool deferred_idle_enabled; + + nv_dynamic_power_state_t state; + NvS32 refcount; + + /* + * A word on lock ordering. These locks must be taken in the order: + * + * RM API lock > this dynamic_power mutex > RM GPUs lock + * + * Skipping any of those locks is fine (if they aren't required to protect + * whatever state is being accessed or modified), so long as the order is + * not violated. + */ + PORT_MUTEX *mutex; + + /* + * callback handles for deferred dynamic power management. + */ + NvP64 idle_precondition_check_event; + NvP64 indicate_idle_event; + NvBool idle_precondition_check_callback_scheduled; + + /* + * callback handle for kernel initiated gc6 entry/exit. + * these will be protected by the gpu lock. + */ + NvP64 remove_idle_holdoff; + NvBool b_idle_holdoff; + + /* + * flag set if the platform does not support fine grain dynamic power + * management. + */ + NvBool b_fine_not_supported; + + /* + * Counter to track clients disallowing GCOFF. + */ + NvU32 clients_gcoff_disallow_refcount; + + /* + * Maximum FB allocation size which can be saved in system memory + * while doing GCOFF based dynamic PM. + */ + NvU64 gcoff_max_fb_size; + + /* + * NVreg_DynamicPowerManagement regkey value set by the user + */ + NvU32 dynamic_power_regkey; +} nv_dynamic_power_t; + +typedef struct +{ + OBJGPU *pGpu; + + NvU32 pmc_boot_0; + + nv_efi_t efi; + + NvU8 scr_vga_active[OBJ_MAX_HEADS]; + NvU8 scr_dcb_index_lo[OBJ_MAX_HEADS]; + NvU8 scr_dcb_index_hi[OBJ_MAX_HEADS]; + + NvU8 font_bitplanes[NV_NUM_VGA_BIT_PLANES][NV_BIT_PLANE_SIZE]; + + NvU32 flags; + NvU32 status; + + nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS]; + + void *pVbiosCopy; + NvU32 vbiosSize; + + nv_pm_state_t pm_state; + + nv_reg_entry_t *pRegistry; + + nv_dynamic_power_t dynamic_power; + + /* Flag to check if the GPU needs 4K page isolation. */ + NvBool b_4k_page_isolation_required; + + /* Flag to check if GPU mobile config is enabled */ + NvBool b_mobile_config_enabled; + + /* Flag to check if S0ix-based power management is enabled. */ + NvBool s0ix_pm_enabled; + + /* + * Maximum FB allocation size which can be saved in system memory + * during system supened with S0ix-based power management. + */ + NvU64 s0ix_gcoff_max_fb_size; + + NvU32 pmc_boot_42; +} nv_priv_t; + +#define NV_SET_NV_PRIV(nv,p) ((nv)->priv = (p)) +#define NV_GET_NV_PRIV(nv) ((nv) ? (nv)->priv : NULL) + +/* + * Make sure that your stack has taken API Lock before using this macro. + */ +#define NV_GET_NV_PRIV_PGPU(nv) \ + (NV_GET_NV_PRIV(nv) ? ((nv_priv_t *)NV_GET_NV_PRIV(nv))->pGpu : NULL) + +#endif // _NV_PRIV_H_ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-reg.h b/src/nvidia/arch/nvalloc/unix/include/nv-reg.h new file mode 100644 index 000000000..569a57495 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-reg.h @@ -0,0 +1,920 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RM_REG_H_ +#define _RM_REG_H_ + +#include "nvtypes.h" + +/* + * use NV_REG_STRING to stringify a registry key when using that registry key + */ + +#define __NV_REG_STRING(regkey) #regkey +#define NV_REG_STRING(regkey) __NV_REG_STRING(regkey) + +/* + * use NV_DEFINE_REG_ENTRY and NV_DEFINE_PARAMS_TABLE_ENTRY to simplify definition + * of registry keys in the kernel module source code. + */ + +#define __NV_REG_VAR(regkey) NVreg_##regkey + +#if defined(NV_MODULE_PARAMETER) +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_ENTRY(regkey, default_value) \ + static NvU32 __NV_REG_VAR(regkey) = (default_value) +#define NV_DEFINE_REG_ENTRY_GLOBAL(regkey, default_value) \ + NvU32 __NV_REG_VAR(regkey) = (default_value) +#endif + +#if defined(NV_MODULE_STRING_PARAMETER) +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value); \ + NV_MODULE_STRING_PARAMETER(__NV_REG_VAR(regkey)) +#else +#define NV_DEFINE_REG_STRING_ENTRY(regkey, default_value) \ + char *__NV_REG_VAR(regkey) = (default_value) +#endif + +#define NV_DEFINE_PARAMS_TABLE_ENTRY(regkey) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(regkey) } + +/* + * Like NV_DEFINE_PARMS_TABLE_ENTRY, but allows a mismatch between the name of + * the regkey and the name of the module parameter. When using this macro, the + * name of the parameter is passed to the extra "parameter" argument, and it is + * this name that must be used in the NV_DEFINE_REG_ENTRY() macro. + */ + +#define NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(regkey, parameter) \ + { NV_REG_STRING(regkey), &__NV_REG_VAR(parameter)} + +/* + *----------------- registry key definitions-------------------------- + */ + +/* + * Option: ModifyDeviceFiles + * + * Description: + * + * When this option is enabled, the NVIDIA driver will verify the validity + * of the NVIDIA device files in /dev and attempt to dynamically modify + * and/or (re-)create them, if necessary. If you don't wish for the NVIDIA + * driver to touch the device files, you can use this registry key. + * + * This module parameter is only honored by the NVIDIA GPU driver and NVIDIA + * capability driver. Furthermore, the NVIDIA capability driver provides + * modifiable /proc file entry (DeviceFileModify=0/1) to alter the behavior of + * this module parameter per device file. + * + * Possible Values: + * 0 = disable dynamic device file management + * 1 = enable dynamic device file management (default) + */ + +#define __NV_MODIFY_DEVICE_FILES ModifyDeviceFiles +#define NV_REG_MODIFY_DEVICE_FILES NV_REG_STRING(__NV_MODIFY_DEVICE_FILES) + +/* + * Option: DeviceFileUID + * + * Description: + * + * This registry key specifies the UID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default UID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_UID DeviceFileUID +#define NV_REG_DEVICE_FILE_UID NV_REG_STRING(__NV_DEVICE_FILE_UID) + +/* + * Option: DeviceFileGID + * + * Description: + * + * This registry key specifies the GID assigned to the NVIDIA device files + * created and/or modified by the NVIDIA driver when dynamic device file + * management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default GID is 0 ('root'). + */ + +#define __NV_DEVICE_FILE_GID DeviceFileGID +#define NV_REG_DEVICE_FILE_GID NV_REG_STRING(__NV_DEVICE_FILE_GID) + +/* + * Option: DeviceFileMode + * + * Description: + * + * This registry key specifies the device file mode assigned to the NVIDIA + * device files created and/or modified by the NVIDIA driver when dynamic + * device file management is enabled. + * + * This module parameter is only honored by the NVIDIA GPU driver. + * + * The default mode is 0666 (octal, rw-rw-rw-). + */ + +#define __NV_DEVICE_FILE_MODE DeviceFileMode +#define NV_REG_DEVICE_FILE_MODE NV_REG_STRING(__NV_DEVICE_FILE_MODE) + +/* + * Option: ResmanDebugLevel + * + * Default value: ~0 + */ + +#define __NV_RESMAN_DEBUG_LEVEL ResmanDebugLevel +#define NV_REG_RESMAN_DEBUG_LEVEL NV_REG_STRING(__NV_RESMAN_DEBUG_LEVEL) + +/* + * Option: RmLogonRC + * + * Default value: 1 + */ + +#define __NV_RM_LOGON_RC RmLogonRC +#define NV_REG_RM_LOGON_RC NV_REG_STRING(__NV_RM_LOGON_RC) + +/* + * Option: InitializeSystemMemoryAllocations + * + * Description: + * + * The NVIDIA Linux driver normally clears system memory it allocates + * for use with GPUs or within the driver stack. This is to ensure + * that potentially sensitive data is not rendered accessible by + * arbitrary user applications. + * + * Owners of single-user systems or similar trusted configurations may + * choose to disable the aforementioned clears using this option and + * potentially improve performance. + * + * Possible values: + * + * 1 = zero out system memory allocations (default) + * 0 = do not perform memory clears + */ + +#define __NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + InitializeSystemMemoryAllocations +#define NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS) + +/* + * Option: RegistryDwords + * + * Description: + * + * This option accepts a semicolon-separated list of key=value pairs. Each + * key name is checked against the table of static options; if a match is + * found, the static option value is overridden, but invalid options remain + * invalid. Pairs that do not match an entry in the static option table + * are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwords=";;..." + */ + +#define __NV_REGISTRY_DWORDS RegistryDwords +#define NV_REG_REGISTRY_DWORDS NV_REG_STRING(__NV_REGISTRY_DWORDS) + +/* + * Option: RegistryDwordsPerDevice + * + * Description: + * + * This option allows to specify registry keys per GPU device. It helps to + * control registry at GPU level of granularity. It accepts a semicolon + * separated list of key=value pairs. The first key value pair MUST be + * "pci=DDDD:BB:DD.F;" where DDDD is Domain, BB is Bus Id, DD is device slot + * number and F is the Function. This PCI BDF is used to identify which GPU to + * assign the registry keys that follows next. + * If a GPU corresponding to the value specified in "pci=DDDD:BB:DD.F;" is NOT + * found, then all the registry keys that follows are skipped, until we find next + * valid pci identified "pci=DDDD:BB:DD.F;". Following are the valid formats for + * the value of the "pci" string: + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI dev id string. + * + * For each of the registry keys that follows, key name is checked against the + * table of static options; if a match is found, the static option value is + * overridden, but invalid options remain invalid. Pairs that do not match an + * entry in the static option table are passed on to the RM directly. + * + * Format: + * + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F;;;..; \ + * pci=DDDD:BB:DD.F;;..;" + */ + +#define __NV_REGISTRY_DWORDS_PER_DEVICE RegistryDwordsPerDevice +#define NV_REG_REGISTRY_DWORDS_PER_DEVICE NV_REG_STRING(__NV_REGISTRY_DWORDS_PER_DEVICE) + +#define __NV_RM_MSG RmMsg +#define NV_RM_MSG NV_REG_STRING(__NV_RM_MSG) + +/* + * Option: UsePageAttributeTable + * + * Description: + * + * Enable/disable use of the page attribute table (PAT) available in + * modern x86/x86-64 processors to set the effective memory type of memory + * mappings to write-combining (WC). + * + * If enabled, an x86 processor with PAT support is present and the host + * system's Linux kernel did not configure one of the PAT entries to + * indicate the WC memory type, the driver will change the second entry in + * the PAT from its default (write-through (WT)) to WC at module load + * time. If the kernel did update one of the PAT entries, the driver will + * not modify the PAT. + * + * In both cases, the driver will honor attempts to map memory with the WC + * memory type by selecting the appropriate PAT entry using the correct + * set of PTE flags. + * + * Possible values: + * + * ~0 = use the NVIDIA driver's default logic (default) + * 1 = enable use of the PAT for WC mappings. + * 0 = disable use of the PAT for WC mappings. + */ + +#define __NV_USE_PAGE_ATTRIBUTE_TABLE UsePageAttributeTable +#define NV_USE_PAGE_ATTRIBUTE_TABLE NV_REG_STRING(__NV_USE_PAGE_ATTRIBUTE_TABLE) + +/* + * Option: EnableMSI + * + * Description: + * + * When this option is enabled and the host kernel supports the MSI feature, + * the NVIDIA driver will enable the PCI-E MSI capability of GPUs with the + * support for this feature instead of using PCI-E wired interrupt. + * + * Possible Values: + * + * 0 = disable MSI interrupt + * 1 = enable MSI interrupt (default) + * + */ + +#define __NV_ENABLE_MSI EnableMSI +#define NV_REG_ENABLE_MSI NV_REG_STRING(__NV_ENABLE_MSI) + +/* + * Option: RegisterForACPIEvents + * + * Description: + * + * When this option is enabled, the NVIDIA driver will register with the + * ACPI subsystem to receive notification of ACPI events. + * + * Possible values: + * + * 1 - register for ACPI events (default) + * 0 - do not register for ACPI events + */ + +#define __NV_REGISTER_FOR_ACPI_EVENTS RegisterForACPIEvents +#define NV_REG_REGISTER_FOR_ACPI_EVENTS NV_REG_STRING(__NV_REGISTER_FOR_ACPI_EVENTS) + +/* + * Option: EnablePCIeGen3 + * + * Description: + * + * Due to interoperability problems seen with Kepler PCIe Gen3 capable GPUs + * when configured on SandyBridge E desktop platforms, NVIDIA feels that + * delivering a reliable, high-quality experience is not currently possible in + * PCIe Gen3 mode on all PCIe Gen3 platforms. Therefore, Quadro, Tesla and + * NVS Kepler products operate in PCIe Gen2 mode by default. You may use this + * option to enable PCIe Gen3 support. + * + * This is completely unsupported! + * + * Possible Values: + * + * 0: disable PCIe Gen3 support (default) + * 1: enable PCIe Gen3 support + */ + +#define __NV_ENABLE_PCIE_GEN3 EnablePCIeGen3 +#define NV_REG_ENABLE_PCIE_GEN3 NV_REG_STRING(__NV_ENABLE_PCIE_GEN3) + +/* + * Option: MemoryPoolSize + * + * Description: + * + * When set to a non-zero value, this option specifies the size of the + * memory pool, given as a multiple of 1 GB, created on VMware ESXi to + * satisfy any system memory allocations requested by the NVIDIA kernel + * module. + */ + +#define __NV_MEMORY_POOL_SIZE MemoryPoolSize +#define NV_REG_MEMORY_POOL_SIZE NV_REG_STRING(__NV_MEMORY_POOL_SIZE) + +/* + * Option: KMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for kmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_KMALLOC_HEAP_MAX_SIZE KMallocHeapMaxSize +#define NV_KMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_KMALLOC_HEAP_MAX_SIZE) + +/* + * Option: VMallocHeapMaxSize + * + * Description: + * + * When set to a non-zero value, this option specifies the maximum size of the + * heap memory space reserved for vmalloc operations. Given as a + * multiple of 1 MB created on VMware ESXi to satisfy any system memory + * allocations requested by the NVIDIA kernel module. + */ + +#define __NV_VMALLOC_HEAP_MAX_SIZE VMallocHeapMaxSize +#define NV_VMALLOC_HEAP_MAX_SIZE NV_REG_STRING(__NV_VMALLOC_HEAP_MAX_SIZE) + +/* + * Option: IgnoreMMIOCheck + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will ignore + * MMIO limit check during device probe on VMWare ESXi kernel. This is + * typically necessary when VMware ESXi MMIO limit differs between any + * base version and its updates. Customer using updates can set regkey + * to avoid probe failure. + */ + +#define __NV_IGNORE_MMIO_CHECK IgnoreMMIOCheck +#define NV_REG_IGNORE_MMIO_CHECK NV_REG_STRING(__NV_IGNORE_MMIO_CHECK) + +/* + * Option: TCEBypassMode + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will attempt to setup + * all GPUs in "TCE bypass mode", in which DMA mappings of system memory bypass + * the IOMMU/TCE remapping hardware on IBM POWER systems. This is typically + * necessary for CUDA applications in which large system memory mappings may + * exceed the default TCE remapping capacity when operated in non-bypass mode. + * + * This option has no effect on non-POWER platforms. + * + * Possible Values: + * + * 0: system default TCE mode on all GPUs + * 1: enable TCE bypass mode on all GPUs + * 2: disable TCE bypass mode on all GPUs + */ +#define __NV_TCE_BYPASS_MODE TCEBypassMode +#define NV_REG_TCE_BYPASS_MODE NV_REG_STRING(__NV_TCE_BYPASS_MODE) + +#define NV_TCE_BYPASS_MODE_DEFAULT 0 +#define NV_TCE_BYPASS_MODE_ENABLE 1 +#define NV_TCE_BYPASS_MODE_DISABLE 2 + +/* + * Option: pci + * + * Description: + * + * On Unix platforms, per GPU based registry key can be specified as: + * NVreg_RegistryDwordsPerDevice="pci=DDDD:BB:DD.F,". + * where DDDD:BB:DD.F refers to Domain:Bus:Device.Function. + * We need this key "pci" to identify what follows next is a PCI BDF identifier, + * for which the registry keys are to be applied. + * + * This define is not used on non-UNIX platforms. + * + * Possible Formats for value: + * + * 1) bus:slot : Domain and function defaults to 0. + * 2) domain:bus:slot : Function defaults to 0. + * 3) domain:bus:slot.func : Complete PCI BDF identifier string. + */ +#define __NV_PCI_DEVICE_BDF pci +#define NV_REG_PCI_DEVICE_BDF NV_REG_STRING(__NV_PCI_DEVICE_BDF) + +/* + * Option: EnableStreamMemOPs + * + * Description: + * + * When this option is enabled, the CUDA driver will enable support for + * CUDA Stream Memory Operations in user-mode applications, which are so + * far required to be disabled by default due to limited support in + * devtools. + * + * Note: this is treated as a hint. MemOPs may still be left disabled by CUDA + * driver for other reasons. + * + * Possible Values: + * + * 0 = disable feature (default) + * 1 = enable feature + */ +#define __NV_ENABLE_STREAM_MEMOPS EnableStreamMemOPs +#define NV_REG_ENABLE_STREAM_MEMOPS NV_REG_STRING(__NV_ENABLE_STREAM_MEMOPS) + +/* + * Option: EnableUserNUMAManagement + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will require the + * user-mode NVIDIA Persistence daemon to manage the onlining and offlining + * of its NUMA device memory. + * + * This option has no effect on platforms that do not support onlining + * device memory to a NUMA node (this feature is only supported on certain + * POWER9 systems). + * + * Possible Values: + * + * 0: disable user-mode NUMA management + * 1: enable user-mode NUMA management (default) + */ +#define __NV_ENABLE_USER_NUMA_MANAGEMENT EnableUserNUMAManagement +#define NV_REG_ENABLE_USER_NUMA_MANAGEMENT NV_REG_STRING(__NV_ENABLE_USER_NUMA_MANAGEMENT) + +/* + * Option: GpuBlacklist + * + * Description: + * + * This option accepts a list of blacklisted GPUs, separated by commas, that + * cannot be attached or used. Each blacklisted GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. This regkey is deprecated and will be removed in the future. Use + * NV_REG_EXCLUDED_GPUS instead. + */ +#define __NV_GPU_BLACKLIST GpuBlacklist +#define NV_REG_GPU_BLACKLIST NV_REG_STRING(__NV_GPU_BLACKLIST) + +/* + * Option: ExcludedGpus + * + * Description: + * + * This option accepts a list of excluded GPUs, separated by commas, that + * cannot be attached or used. Each excluded GPU is identified by a UUID in + * the ASCII format with leading "GPU-". An exact match is required; no partial + * UUIDs. + */ +#define __NV_EXCLUDED_GPUS ExcludedGpus +#define NV_REG_EXCLUDED_GPUS NV_REG_STRING(__NV_EXCLUDED_GPUS) + +/* + * Option: NvLinkDisable + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will not attempt to + * initialize or train NVLink connections for any GPUs. System reboot is required + * for changes to take affect. + * + * This option has no effect if no GPUs support NVLink. + * + * Possible Values: + * + * 0: Do not disable NVLink (default) + * 1: Disable NVLink + */ +#define __NV_NVLINK_DISABLE NvLinkDisable +#define NV_REG_NVLINK_DISABLE NV_REG_STRING(__NV_NVLINK_DISABLE) + +/* + * Option: RestrictProfilingToAdminUsers + * + * Description: + * + * When this option is enabled, the NVIDIA kernel module will prevent users + * without administrative access (i.e., the CAP_SYS_ADMIN capability) from + * using GPU performance counters. + * + * Possible Values: + * + * 0: Do not restrict GPU counters (default) + * 1: Restrict GPU counters to system administrators only + */ + +#define __NV_RM_PROFILING_ADMIN_ONLY RmProfilingAdminOnly +#define __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER RestrictProfilingToAdminUsers +#define NV_REG_RM_PROFILING_ADMIN_ONLY NV_REG_STRING(__NV_RM_PROFILING_ADMIN_ONLY) + +/* + * Option: TemporaryFilePath + * + * Description: + * + * When specified, this option changes the location in which the + * NVIDIA kernel module will create unnamed temporary files (e.g. to + * save the contents of video memory in). The indicated file must + * be a directory. By default, temporary files are created in /tmp. + */ +#define __NV_TEMPORARY_FILE_PATH TemporaryFilePath +#define NV_REG_TEMPORARY_FILE_PATH NV_REG_STRING(__NV_TEMPORARY_FILE_PATH) + +/* + * Option: PreserveVideoMemoryAllocations + * + * If enabled, this option prompts the NVIDIA kernel module to save and + * restore all video memory allocations across system power management + * cycles, i.e. suspend/resume and hibernate/restore. Otherwise, + * only select allocations are preserved. + * + * Possible Values: + * + * 0: Preserve only select video memory allocations (default) + * 1: Preserve all video memory allocations + */ +#define __NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS PreserveVideoMemoryAllocations +#define NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS \ + NV_REG_STRING(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS) + +/* + * Option: EnableS0ixPowerManagement + * + * When this option is enabled, the NVIDIA driver will use S0ix-based + * power management for system suspend/resume, if both the platform and + * the GPU support S0ix. + * + * During system suspend, if S0ix is enabled and + * video memory usage is above the threshold configured by + * 'S0ixPowerManagementVideoMemoryThreshold', video memory will be kept + * in self-refresh mode while the rest of the GPU is powered down. + * + * Otherwise, the driver will copy video memory contents to system memory + * and power off the video memory along with the GPU. + * + * Possible Values: + * + * 0: Disable S0ix based power management (default) + * 1: Enable S0ix based power management + */ + +#define __NV_ENABLE_S0IX_POWER_MANAGEMENT EnableS0ixPowerManagement +#define NV_REG_ENABLE_S0IX_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_ENABLE_S0IX_POWER_MANAGEMENT) + +/* + * Option: S0ixPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use during + * S0ix-based system power management. + * + * When S0ix is enabled and the system is suspended, the driver will + * compare the amount of video memory in use with this threshold, + * to decide whether to keep video memory in self-refresh or copy video + * memory content to system memory. + * + * See the 'EnableS0ixPowerManagement' option. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * Default value for this option is 256MB. + * + */ +#define __NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + S0ixPowerManagementVideoMemoryThreshold +#define NV_REG_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + +/* + * Option: DynamicPowerManagement + * + * This option controls how aggressively the NVIDIA kernel module will manage + * GPU power through kernel interfaces. + * + * Possible Values: + * + * 0: Never allow the GPU to be powered down (default). + * 1: Power down the GPU when it is not initialized. + * 2: Power down the GPU after it has been inactive for some time. + * 3: (Default) Power down the GPU after a period of inactivity (i.e., + * mode 2) on Ampere or later notebooks. Otherwise, do not power down + * the GPU. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT DynamicPowerManagement +#define NV_REG_DYNAMIC_POWER_MANAGEMENT \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT) + +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_NEVER 0 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_COARSE 1 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_FINE 2 +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_DEFAULT 3 + +/* + * Option: DynamicPowerManagementVideoMemoryThreshold + * + * This option controls the threshold that the NVIDIA driver will use + * when selecting the dynamic power management scheme. + * + * When the driver detects that the GPU is idle, it will compare the amount + * of video memory in use with this threshold. + * + * If the current video memory usage is less than the threshold, the + * driver may preserve video memory contents in system memory and power off + * the video memory along with the GPU itself, if supported. Otherwise, + * the video memory will be kept in self-refresh mode while powering down + * the rest of the GPU, if supported. + * + * Values are expressed in Megabytes (1048576 bytes). + * + * If the requested value is greater than 200MB (the default), then it + * will be capped to 200MB. + */ +#define __NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + DynamicPowerManagementVideoMemoryThreshold +#define NV_REG_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD \ + NV_REG_STRING(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD) + +/* + * Option: RegisterPCIDriver + * + * Description: + * + * When this option is enabled, the NVIDIA driver will register with + * PCI subsystem. + * + * Possible values: + * + * 1 - register as PCI driver (default) + * 0 - do not register as PCI driver + */ + +#define __NV_REGISTER_PCI_DRIVER RegisterPCIDriver +#define NV_REG_REGISTER_PCI_DRIVER NV_REG_STRING(__NV_REGISTER_PCI_DRIVER) + +/* + * Option: EnablePCIERelaxedOrderingMode + * + * Description: + * + * When this option is enabled, the registry key RmSetPCIERelaxedOrdering will + * be set to NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE, causing + * every device to set the relaxed ordering bit to 1 in all outbound MWr + * transaction-layer packets. This is equivalent to setting the regkey to + * FORCE_ENABLE as a non-per-device registry key. + * + * Possible values: + * 0 - Do not enable PCIe TLP relaxed ordering bit-setting (default) + * 1 - Enable PCIe TLP relaxed ordering bit-setting + */ +#define __NV_ENABLE_PCIE_RELAXED_ORDERING_MODE EnablePCIERelaxedOrderingMode +#define NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE \ + NV_REG_STRING(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE) + +/* + * Option: EnableGpuFirmware + * + * Description: + * + * When this option is enabled, the NVIDIA driver will enable use of GPU + * firmware. + * + * Possible mode values: + * 0 - Do not enable GPU firmware + * 1 - Enable GPU firmware + * 2 - (Default) Use the default enablement policy for GPU firmware + * + * Setting this to anything other than 2 will alter driver firmware- + * enablement policies, possibly disabling GPU firmware where it would + * have otherwise been enabled by default. + * + * If this key is set globally to the system, the driver may still attempt + * to apply some policies to maintain uniform firmware modes across all + * GPUS. This may result in the driver failing initialization on some GPUs + * to maintain such a policy. + * + * If this key is set using NVreg_RegistryDwordsPerDevice, then the driver + * will attempt to honor whatever configuration is specified without applying + * additional policies. This may also result in failed GPU initialzations if + * the configuration is not possible (for example if the firmware is missing + * from the filesystem, or the GPU is not capable). + * + * Policy bits: + * + * POLICY_ALLOW_FALLBACK: + * As the normal behavior is to fail GPU initialization if this registry + * entry is set in such a way that results in an invalid configuration, if + * instead the user would like the driver to automatically try to fallback + * to initializing the failing GPU with firmware disabled, then this bit can + * be set (ex: 0x11 means try to enable GPU firmware but fall back if needed). + * Note that this can result in a mixed mode configuration (ex: GPU0 has + * firmware enabled, but GPU1 does not). + * + */ + +#define __NV_ENABLE_GPU_FIRMWARE EnableGpuFirmware +#define NV_REG_ENABLE_GPU_FIRMWARE NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE) + +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002 + +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0 +#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010 + +#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012 +#define NV_REG_ENABLE_GPU_FIRMWARE_INVALID_VALUE 0xFFFFFFFF + +/* + * Option: EnableGpuFirmwareLogs + * + * When this option is enabled, the NVIDIA driver will send GPU firmware logs + * to the system log, when possible. + * + * Possible values: + * 0 - Do not send GPU firmware logs to the system log + * 1 - Enable sending of GPU firmware logs to the system log + * 2 - (Default) Enable sending of GPU firmware logs to the system log for + * the debug kernel driver build only + */ +#define __NV_ENABLE_GPU_FIRMWARE_LOGS EnableGpuFirmwareLogs +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS NV_REG_STRING(__NV_ENABLE_GPU_FIRMWARE_LOGS) + +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001 +#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002 + +/* + * Option: EnableDbgBreakpoint + * + * When this option is set to a non-zero value, and the kernel is configured + * appropriately, assertions within resman will trigger a CPU breakpoint (e.g., + * INT3 on x86_64), assumed to be caught by an attached debugger. + * + * When this option is set to the value zero (the default), assertions within + * resman will print to the system log, but no CPU breakpoint will be triggered. + */ +#define __NV_ENABLE_DBG_BREAKPOINT EnableDbgBreakpoint + + +/* + * Option: OpenRmEnableUnsupportedGpus + * + * Open nvidia.ko support for features beyond what is used on Data Center GPUs + * is still fairly immature, so for now require users to opt into use of open + * nvidia.ko with a special registry key, if not on a Data Center GPU. + */ + +#define __NV_OPENRM_ENABLE_UNSUPPORTED_GPUS OpenRmEnableUnsupportedGpus +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS NV_REG_STRING(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS) +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE 0x00000000 +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_ENABLE 0x00000001 +#define NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE + + +#if defined(NV_DEFINE_REGISTRY_KEY_TABLE) + +/* + *---------registry key parameter declarations-------------- + */ + +NV_DEFINE_REG_ENTRY(__NV_RESMAN_DEBUG_LEVEL, ~0); +NV_DEFINE_REG_ENTRY(__NV_RM_LOGON_RC, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MODIFY_DEVICE_FILES, 1); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_UID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_GID, 0); +NV_DEFINE_REG_ENTRY(__NV_DEVICE_FILE_MODE, 0666); +NV_DEFINE_REG_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, 1); +NV_DEFINE_REG_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE, ~0); +NV_DEFINE_REG_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS, 1); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_PCIE_GEN3, 0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_MSI, 1); +NV_DEFINE_REG_ENTRY(__NV_TCE_BYPASS_MODE, NV_TCE_BYPASS_MODE_DEFAULT); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_STREAM_MEMOPS, 0); +NV_DEFINE_REG_ENTRY(__NV_RM_PROFILING_ADMIN_ONLY_PARAMETER, 1); +NV_DEFINE_REG_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, 0); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT, 0); +NV_DEFINE_REG_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 256); +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT, 3); +NV_DEFINE_REG_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD, 200); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE, NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE); +NV_DEFINE_REG_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS, NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG); +NV_DEFINE_REG_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT); + +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_USER_NUMA_MANAGEMENT, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_MEMORY_POOL_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_KMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_VMALLOC_HEAP_MAX_SIZE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_IGNORE_MMIO_CHECK, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_NVLINK_DISABLE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE, 0); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_REGISTER_PCI_DRIVER, 1); +NV_DEFINE_REG_ENTRY_GLOBAL(__NV_ENABLE_DBG_BREAKPOINT, 0); + +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_REGISTRY_DWORDS_PER_DEVICE, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_RM_MSG, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_GPU_BLACKLIST, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_TEMPORARY_FILE_PATH, NULL); +NV_DEFINE_REG_STRING_ENTRY(__NV_EXCLUDED_GPUS, NULL); + +/* + *----------------registry database definition---------------------- + */ + +/* + * You can enable any of the registry options disabled by default by + * editing their respective entries in the table below. The last field + * determines if the option is considered valid - in order for the + * changes to take effect, you need to recompile and reload the NVIDIA + * kernel module. + */ +nv_parm_t nv_parms[] = { + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RESMAN_DEBUG_LEVEL), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_RM_LOGON_RC), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MODIFY_DEVICE_FILES), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_UID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_GID), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DEVICE_FILE_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_USE_PAGE_ATTRIBUTE_TABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_MSI), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_FOR_ACPI_EVENTS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_GEN3), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_MEMORY_POOL_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_KMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_VMALLOC_HEAP_MAX_SIZE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_IGNORE_MMIO_CHECK), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_TCE_BYPASS_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_STREAM_MEMOPS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_USER_NUMA_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_NVLINK_DISABLE), + NV_DEFINE_PARAMS_TABLE_ENTRY_CUSTOM_NAME(__NV_RM_PROFILING_ADMIN_ONLY, + __NV_RM_PROFILING_ADMIN_ONLY_PARAMETER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_PRESERVE_VIDEO_MEMORY_ALLOCATIONS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_S0IX_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_S0IX_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_DYNAMIC_POWER_MANAGEMENT_VIDEO_MEMORY_THRESHOLD), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_REGISTER_PCI_DRIVER), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_PCIE_RELAXED_ORDERING_MODE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_GPU_FIRMWARE_LOGS), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_ENABLE_DBG_BREAKPOINT), + NV_DEFINE_PARAMS_TABLE_ENTRY(__NV_OPENRM_ENABLE_UNSUPPORTED_GPUS), + {NULL, NULL} +}; + +#elif defined(NVRM) + +extern nv_parm_t nv_parms[]; + +#endif /* NV_DEFINE_REGISTRY_KEY_TABLE */ + +#endif /* _RM_REG_H_ */ diff --git a/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h b/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h new file mode 100644 index 000000000..02b0156d5 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv-unix-nvos-params-wrappers.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_ +#define _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_ + +#include + +/* + * This is a wrapper for NVOS02_PARAMETERS with file descriptor + */ + +typedef struct +{ + NVOS02_PARAMETERS params; + int fd; +} nv_ioctl_nvos02_parameters_with_fd; + +/* + * This is a wrapper for NVOS33_PARAMETERS with file descriptor + */ +typedef struct +{ + NVOS33_PARAMETERS params; + int fd; +} nv_ioctl_nvos33_parameters_with_fd; + +#endif // _NV_UNIX_NVOS_PARAMS_WRAPPERS_H_ + diff --git a/src/nvidia/arch/nvalloc/unix/include/nv.h b/src/nvidia/arch/nvalloc/unix/include/nv.h new file mode 100644 index 000000000..07dc0f83e --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv.h @@ -0,0 +1,1023 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _NV_H_ +#define _NV_H_ + + + +#include + +#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(__FreeBSD__) + #include // NULL +#elif defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX) + #include // NULL +#else + #include // NULL +#endif + +#include +#include "nv_stdarg.h" +#include +#include +#include + +extern nv_cap_t *nvidia_caps_root; + +extern const NvBool nv_is_rm_firmware_supported_os; + +#include + +/* NVIDIA's reserved major character device number (Linux). */ +#define NV_MAJOR_DEVICE_NUMBER 195 + +#define GPU_UUID_LEN (16) + +/* + * Buffer size for an ASCII UUID: We need 2 digits per byte, plus space + * for "GPU", 5 dashes, and '\0' termination: + */ +#define GPU_UUID_ASCII_LEN (GPU_UUID_LEN * 2 + 9) + +/* + * #define an absolute maximum used as a sanity check for the + * NV_ESC_IOCTL_XFER_CMD ioctl() size argument. + */ +#define NV_ABSOLUTE_MAX_IOCTL_SIZE 16384 + +/* + * Solaris provides no more than 8 bits for the argument size in + * the ioctl() command encoding; make sure we don't exceed this + * limit. + */ +#define __NV_IOWR_ASSERT(type) ((sizeof(type) <= NV_PLATFORM_MAX_IOCTL_SIZE) ? 1 : -1) +#define __NV_IOWR(nr, type) ({ \ + typedef char __NV_IOWR_TYPE_SIZE_ASSERT[__NV_IOWR_ASSERT(type)]; \ + _IOWR(NV_IOCTL_MAGIC, (nr), type); \ +}) + +#define NV_PCI_DEV_FMT "%04x:%02x:%02x.%x" +#define NV_PCI_DEV_FMT_ARGS(nv) (nv)->pci_info.domain, (nv)->pci_info.bus, \ + (nv)->pci_info.slot, (nv)->pci_info.function + +#define NV_RM_DEVICE_INTR_ADDRESS 0x100 + +/*! + * @brief The order of the display clocks in the below defined enum + * should be synced with below mapping array and macro. + * All four should be updated simultaneously in case + * of removal or addition of clocks in below order. + * Also, TEGRASOC_WHICH_CLK_MAX is used in various places + * in below mentioned files. + * arch/nvalloc/unix/Linux/nv-linux.h + * + * arch/nvalloc/unix/src/os.c + * dispClkMapRmToOsArr[] = {...}; + * + * arch/nvalloc/unix/Linux/nv-clk.c + * osMapClk[] = {...}; + * + */ +typedef enum _TEGRASOC_WHICH_CLK +{ + TEGRASOC_WHICH_CLK_NVDISPLAYHUB, + TEGRASOC_WHICH_CLK_NVDISPLAY_DISP, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0, + TEGRASOC_WHICH_CLK_NVDISPLAY_P1, + TEGRASOC_WHICH_CLK_DPAUX0, + TEGRASOC_WHICH_CLK_FUSE, + TEGRASOC_WHICH_CLK_DSIPLL_VCO, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTPN, + TEGRASOC_WHICH_CLK_DSIPLL_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_VCO, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTA, + TEGRASOC_WHICH_CLK_SPPLL0_CLKOUTB, + TEGRASOC_WHICH_CLK_SPPLL0_DIV10, + TEGRASOC_WHICH_CLK_SPPLL0_DIV25, + TEGRASOC_WHICH_CLK_SPPLL0_DIV27, + TEGRASOC_WHICH_CLK_SPPLL1_VCO, + TEGRASOC_WHICH_CLK_SPPLL1_CLKOUTPN, + TEGRASOC_WHICH_CLK_SPPLL1_DIV27, + TEGRASOC_WHICH_CLK_VPLL0_REF, + TEGRASOC_WHICH_CLK_VPLL0, + TEGRASOC_WHICH_CLK_VPLL1, + TEGRASOC_WHICH_CLK_NVDISPLAY_P0_REF, + TEGRASOC_WHICH_CLK_RG0, + TEGRASOC_WHICH_CLK_RG1, + TEGRASOC_WHICH_CLK_DISPPLL, + TEGRASOC_WHICH_CLK_DISPHUBPLL, + TEGRASOC_WHICH_CLK_DSI_LP, + TEGRASOC_WHICH_CLK_DSI_CORE, + TEGRASOC_WHICH_CLK_DSI_PIXEL, + TEGRASOC_WHICH_CLK_PRE_SOR0, + TEGRASOC_WHICH_CLK_PRE_SOR1, + TEGRASOC_WHICH_CLK_DP_LINK_REF, + TEGRASOC_WHICH_CLK_SOR_LINKA_INPUT, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO, + TEGRASOC_WHICH_CLK_SOR_LINKA_AFIFO_M, + TEGRASOC_WHICH_CLK_RG0_M, + TEGRASOC_WHICH_CLK_RG1_M, + TEGRASOC_WHICH_CLK_SOR0_M, + TEGRASOC_WHICH_CLK_SOR1_M, + TEGRASOC_WHICH_CLK_PLLHUB, + TEGRASOC_WHICH_CLK_SOR0, + TEGRASOC_WHICH_CLK_SOR1, + TEGRASOC_WHICH_CLK_SOR_PAD_INPUT, + TEGRASOC_WHICH_CLK_PRE_SF0, + TEGRASOC_WHICH_CLK_SF0, + TEGRASOC_WHICH_CLK_SF1, + TEGRASOC_WHICH_CLK_DSI_PAD_INPUT, + TEGRASOC_WHICH_CLK_PRE_SOR0_REF, + TEGRASOC_WHICH_CLK_PRE_SOR1_REF, + TEGRASOC_WHICH_CLK_SOR0_PLL_REF, + TEGRASOC_WHICH_CLK_SOR1_PLL_REF, + TEGRASOC_WHICH_CLK_SOR0_REF, + TEGRASOC_WHICH_CLK_SOR1_REF, + TEGRASOC_WHICH_CLK_OSC, + TEGRASOC_WHICH_CLK_DSC, + TEGRASOC_WHICH_CLK_MAUD, + TEGRASOC_WHICH_CLK_AZA_2XBIT, + TEGRASOC_WHICH_CLK_AZA_BIT, + TEGRA234_CLK_MIPI_CAL, + TEGRA234_CLK_UART_FST_MIPI_CAL, + TEGRASOC_WHICH_CLK_MAX, // TEGRASOC_WHICH_CLK_MAX is defined for boundary checks only. +} TEGRASOC_WHICH_CLK; + +#ifdef NVRM + +extern const char *pNVRM_ID; + +/* + * ptr arithmetic convenience + */ + +typedef union +{ + volatile NvV8 Reg008[1]; + volatile NvV16 Reg016[1]; + volatile NvV32 Reg032[1]; +} nv_hwreg_t, * nv_phwreg_t; + + +#define NVRM_PCICFG_NUM_BARS 6 +#define NVRM_PCICFG_BAR_OFFSET(i) (0x10 + (i) * 4) +#define NVRM_PCICFG_BAR_REQTYPE_MASK 0x00000001 +#define NVRM_PCICFG_BAR_REQTYPE_MEMORY 0x00000000 +#define NVRM_PCICFG_BAR_MEMTYPE_MASK 0x00000006 +#define NVRM_PCICFG_BAR_MEMTYPE_64BIT 0x00000004 +#define NVRM_PCICFG_BAR_ADDR_MASK 0xfffffff0 + +#define NVRM_PCICFG_NUM_DWORDS 16 + +#define NV_GPU_NUM_BARS 3 +#define NV_GPU_BAR_INDEX_REGS 0 +#define NV_GPU_BAR_INDEX_FB 1 +#define NV_GPU_BAR_INDEX_IMEM 2 + +typedef struct +{ + NvU64 cpu_address; + NvU64 size; + NvU32 offset; + NvU32 *map; + nv_phwreg_t map_u; +} nv_aperture_t; + +typedef struct +{ + char *name; + NvU32 *data; +} nv_parm_t; + +#define NV_RM_PAGE_SHIFT 12 +#define NV_RM_PAGE_SIZE (1 << NV_RM_PAGE_SHIFT) +#define NV_RM_PAGE_MASK (NV_RM_PAGE_SIZE - 1) + +#define NV_RM_TO_OS_PAGE_SHIFT (os_page_shift - NV_RM_PAGE_SHIFT) +#define NV_RM_PAGES_PER_OS_PAGE (1U << NV_RM_TO_OS_PAGE_SHIFT) +#define NV_RM_PAGES_TO_OS_PAGES(count) \ + ((((NvUPtr)(count)) >> NV_RM_TO_OS_PAGE_SHIFT) + \ + ((((count) & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) != 0) ? 1 : 0)) + +#if defined(NVCPU_X86_64) +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 3) +#else +#define NV_STACK_SIZE (NV_RM_PAGE_SIZE * 2) +#endif + +typedef struct nvidia_stack_s +{ + NvU32 size; + void *top; + NvU8 stack[NV_STACK_SIZE-16] __attribute__ ((aligned(16))); +} nvidia_stack_t; + +/* + * TODO: Remove once all UNIX layers have been converted to use nvidia_stack_t + */ +typedef nvidia_stack_t nv_stack_t; + +typedef struct nv_file_private_t nv_file_private_t; + +/* + * this is a wrapper for unix events + * unlike the events that will be returned to clients, this includes + * kernel-specific data, such as file pointer, etc.. + */ +typedef struct nv_event_s +{ + NvHandle hParent; + NvHandle hObject; + NvU32 index; + NvU32 info32; + NvU16 info16; + nv_file_private_t *nvfp; /* per file-descriptor data pointer */ + NvU32 fd; + NvBool active; /* whether the event should be signaled */ + NvU32 refcount; /* count of associated RM events */ + struct nv_event_s *next; +} nv_event_t; + +typedef struct nv_kern_mapping_s +{ + void *addr; + NvU64 size; + NvU32 modeFlag; + struct nv_kern_mapping_s *next; +} nv_kern_mapping_t; + +typedef struct nv_usermap_access_params_s +{ + NvU64 addr; + NvU64 size; + NvU64 offset; + NvU64 *page_array; + NvU64 num_pages; + NvU64 mmap_start; + NvU64 mmap_size; + NvU64 access_start; + NvU64 access_size; + NvU64 remap_prot_extra; + NvBool contig; +} nv_usermap_access_params_t; + +/* + * It stores mapping context per mapping + */ +typedef struct nv_alloc_mapping_context_s { + void *alloc; + NvU64 page_index; + NvU64 *page_array; + NvU64 num_pages; + NvU64 mmap_start; + NvU64 mmap_size; + NvU64 access_start; + NvU64 access_size; + NvU64 remap_prot_extra; + NvU32 prot; + NvBool valid; +} nv_alloc_mapping_context_t; + +typedef enum +{ + NV_SOC_IRQ_DISPLAY_TYPE, + NV_SOC_IRQ_DPAUX_TYPE, + NV_SOC_IRQ_GPIO_TYPE, + NV_SOC_IRQ_HDACODEC_TYPE, + NV_SOC_IRQ_INVALID_TYPE +} nv_soc_irq_type_t; + +/* + * It stores interrupt numbers and interrupt type and private data + */ +typedef struct nv_soc_irq_info_s { + NvU32 irq_num; + nv_soc_irq_type_t irq_type; + NvBool bh_pending; + union { + NvU32 gpio_num; + NvU32 dpaux_instance; + } irq_data; +} nv_soc_irq_info_t; + +#define NV_MAX_SOC_IRQS 6 +#define NV_MAX_DPAUX_NUM_DEVICES 4 +#define NV_MAX_SOC_DPAUX_NUM_DEVICES 2 // From SOC_DEV_MAPPING + +/* + * per device state + */ + +/* DMA-capable device data, defined by kernel interface layer */ +typedef struct nv_dma_device nv_dma_device_t; + +typedef struct nv_state_t +{ + void *priv; /* private data */ + void *os_state; /* os-specific device state */ + + int flags; + + /* PCI config info */ + nv_pci_info_t pci_info; + NvU16 subsystem_id; + NvU16 subsystem_vendor; + NvU32 gpu_id; + NvU32 iovaspace_id; + struct + { + NvBool valid; + NvU8 uuid[GPU_UUID_LEN]; + } nv_uuid_cache; + void *handle; + + NvU32 pci_cfg_space[NVRM_PCICFG_NUM_DWORDS]; + + /* physical characteristics */ + nv_aperture_t bars[NV_GPU_NUM_BARS]; + nv_aperture_t *regs; + nv_aperture_t *dpaux[NV_MAX_DPAUX_NUM_DEVICES]; + nv_aperture_t *hdacodec_regs; + nv_aperture_t *mipical_regs; + nv_aperture_t *fb, ud; + + NvU32 num_dpaux_instance; + NvU32 interrupt_line; + NvU32 dpaux_irqs[NV_MAX_DPAUX_NUM_DEVICES]; + nv_soc_irq_info_t soc_irq_info[NV_MAX_SOC_IRQS]; + NvS32 current_soc_irq; + NvU32 num_soc_irqs; + NvU32 hdacodec_irq; + NvU8 *soc_dcb_blob; + NvU32 soc_dcb_size; + NvU32 disp_sw_soc_chip_id; + + NvBool primary_vga; + + NvU32 sim_env; + + NvU32 rc_timer_enabled; + + /* list of events allocated for this device */ + nv_event_t *event_list; + + /* lock to protect event_list */ + void *event_spinlock; + + nv_kern_mapping_t *kern_mappings; + + /* Kernel interface DMA device data */ + nv_dma_device_t *dma_dev; + nv_dma_device_t *niso_dma_dev; + + /* + * Per-GPU queue. The actual queue object is usually allocated in the + * arch-specific parent structure (e.g. nv_linux_state_t), and this + * pointer just points to it. + */ + struct os_work_queue *queue; + + /* For loading RM as a firmware (DCE or GSP) client */ + NvBool request_firmware; /* request firmware from the OS */ + NvBool request_fw_client_rm; /* attempt to init RM as FW a client */ + NvBool allow_fallback_to_monolithic_rm; /* allow fallback to monolithic RM if FW client RM doesn't work out */ + NvBool enable_firmware_logs; /* attempt to enable firmware log decoding/printing */ + + /* Variable to track, if nvidia_remove is called */ + NvBool removed; + + NvBool console_device; + + /* Variable to track, if GPU is external GPU */ + NvBool is_external_gpu; + + /* Variable to track, if regkey PreserveVideoMemoryAllocations is set */ + NvBool preserve_vidmem_allocations; + + /* Variable to force allocation of 32-bit addressable memory */ + NvBool force_dma32_alloc; + + /* Variable to track if device has entered dynamic power state */ + NvBool dynamic_power_entered; + + /* PCI power state should be D0 during system suspend */ + NvBool d0_state_in_suspend; + + /* Current cyclestats client and context */ + NvU32 profiler_owner; + void *profiler_context; + + /* + * RMAPI objects to use in the OS layer to talk to core RM. + * + * Note that we only need to store one subdevice handle: in SLI, we will + * have a separate nv_state_t per physical GPU. + */ + struct { + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubDevice; + NvHandle hI2C; + NvHandle hDisp; + } rmapi; + + /* Bool to check if ISO iommu enabled */ + NvBool iso_iommu_present; + + /* Bool to check if dma-buf is supported */ + NvBool dma_buf_supported; + + NvBool printed_openrm_enable_unsupported_gpus_error; + +} nv_state_t; + +// These define need to be in sync with defines in system.h +#define OS_TYPE_LINUX 0x1 +#define OS_TYPE_FREEBSD 0x2 +#define OS_TYPE_SUNOS 0x3 +#define OS_TYPE_VMWARE 0x4 + +struct nv_file_private_t +{ + NvHandle *handles; + NvU16 maxHandles; + NvU32 deviceInstance; + NvU8 metadata[64]; + + nv_file_private_t *ctl_nvfp; + void *ctl_nvfp_priv; +}; + +// Forward define the gpu ops structures +typedef struct gpuSession *nvgpuSessionHandle_t; +typedef struct gpuDevice *nvgpuDeviceHandle_t; +typedef struct gpuAddressSpace *nvgpuAddressSpaceHandle_t; +typedef struct gpuChannel *nvgpuChannelHandle_t; +typedef struct UvmGpuChannelInfo_tag *nvgpuChannelInfo_t; +typedef struct UvmGpuChannelAllocParams_tag nvgpuChannelAllocParams_t; +typedef struct UvmGpuCaps_tag *nvgpuCaps_t; +typedef struct UvmGpuCopyEnginesCaps_tag *nvgpuCesCaps_t; +typedef struct UvmGpuAddressSpaceInfo_tag *nvgpuAddressSpaceInfo_t; +typedef struct UvmGpuAllocInfo_tag *nvgpuAllocInfo_t; +typedef struct UvmGpuP2PCapsParams_tag *nvgpuP2PCapsParams_t; +typedef struct UvmGpuFbInfo_tag *nvgpuFbInfo_t; +typedef struct UvmGpuEccInfo_tag *nvgpuEccInfo_t; +typedef struct UvmGpuFaultInfo_tag *nvgpuFaultInfo_t; +typedef struct UvmGpuAccessCntrInfo_tag *nvgpuAccessCntrInfo_t; +typedef struct UvmGpuAccessCntrConfig_tag *nvgpuAccessCntrConfig_t; +typedef struct UvmGpuInfo_tag nvgpuInfo_t; +typedef struct UvmGpuClientInfo_tag nvgpuClientInfo_t; +typedef struct UvmPmaAllocationOptions_tag *nvgpuPmaAllocationOptions_t; +typedef struct UvmPmaStatistics_tag *nvgpuPmaStatistics_t; +typedef struct UvmGpuMemoryInfo_tag *nvgpuMemoryInfo_t; +typedef struct UvmGpuExternalMappingInfo_tag *nvgpuExternalMappingInfo_t; +typedef struct UvmGpuChannelResourceInfo_tag *nvgpuChannelResourceInfo_t; +typedef struct UvmGpuChannelInstanceInfo_tag *nvgpuChannelInstanceInfo_t; +typedef struct UvmGpuChannelResourceBindParams_tag *nvgpuChannelResourceBindParams_t; +typedef struct UvmGpuPagingChannelAllocParams_tag nvgpuPagingChannelAllocParams_t; +typedef struct UvmGpuPagingChannel_tag *nvgpuPagingChannelHandle_t; +typedef struct UvmGpuPagingChannelInfo_tag *nvgpuPagingChannelInfo_t; +typedef NV_STATUS (*nvPmaEvictPagesCallback)(void *, NvU32, NvU64 *, NvU32, NvU64, NvU64); +typedef NV_STATUS (*nvPmaEvictRangeCallback)(void *, NvU64, NvU64); + +/* + * flags + */ + +#define NV_FLAG_OPEN 0x0001 +#define NV_FLAG_EXCLUDE 0x0002 +#define NV_FLAG_CONTROL 0x0004 +// Unused 0x0008 +#define NV_FLAG_SOC_DISPLAY 0x0010 +#define NV_FLAG_USES_MSI 0x0020 +#define NV_FLAG_USES_MSIX 0x0040 +#define NV_FLAG_PASSTHRU 0x0080 +#define NV_FLAG_SUSPENDED 0x0100 +// Unused 0x0200 +// Unused 0x0400 +#define NV_FLAG_PERSISTENT_SW_STATE 0x0800 +#define NV_FLAG_IN_RECOVERY 0x1000 +// Unused 0x2000 +#define NV_FLAG_UNBIND_LOCK 0x4000 +/* To be set when GPU is not present on the bus, to help device teardown */ +#define NV_FLAG_IN_SURPRISE_REMOVAL 0x8000 + +typedef enum +{ + NV_PM_ACTION_HIBERNATE, + NV_PM_ACTION_STANDBY, + NV_PM_ACTION_RESUME +} nv_pm_action_t; + +typedef enum +{ + NV_PM_ACTION_DEPTH_DEFAULT, + NV_PM_ACTION_DEPTH_MODESET, + NV_PM_ACTION_DEPTH_UVM +} nv_pm_action_depth_t; + +typedef enum +{ + NV_DYNAMIC_PM_NEVER, + NV_DYNAMIC_PM_COARSE, + NV_DYNAMIC_PM_FINE +} nv_dynamic_power_mode_t; + +typedef enum +{ + NV_POWER_STATE_IN_HIBERNATE, + NV_POWER_STATE_IN_STANDBY, + NV_POWER_STATE_RUNNING +} nv_power_state_t; + +typedef enum +{ + NV_FIRMWARE_GSP, + NV_FIRMWARE_GSP_LOG +} nv_firmware_t; + +#define NV_PRIMARY_VGA(nv) ((nv)->primary_vga) + +#define NV_IS_CTL_DEVICE(nv) ((nv)->flags & NV_FLAG_CONTROL) +#define NV_IS_SOC_DISPLAY_DEVICE(nv) \ + ((nv)->flags & NV_FLAG_SOC_DISPLAY) + +#define NV_IS_DEVICE_IN_SURPRISE_REMOVAL(nv) \ + (((nv)->flags & NV_FLAG_IN_SURPRISE_REMOVAL) != 0) + +#define NV_SOC_IS_ISO_IOMMU_PRESENT(nv) \ + ((nv)->iso_iommu_present) + +/* + * NVIDIA ACPI event IDs to be passed into the core NVIDIA + * driver for various events like display switch events, + * AC/battery events, etc.. + */ +#define NV_SYSTEM_ACPI_DISPLAY_SWITCH_EVENT 0x8001 +#define NV_SYSTEM_ACPI_BATTERY_POWER_EVENT 0x8002 + +/* + * GPU add/remove events + */ +#define NV_SYSTEM_GPU_ADD_EVENT 0x9001 +#define NV_SYSTEM_GPU_REMOVE_EVENT 0x9002 + +/* + * Status bit definitions for display switch hotkey events. + */ +#define NV_HOTKEY_STATUS_DISPLAY_ENABLE_LCD 0x01 +#define NV_HOTKEY_STATUS_DISPLAY_ENABLE_CRT 0x02 +#define NV_HOTKEY_STATUS_DISPLAY_ENABLE_TV 0x04 +#define NV_HOTKEY_STATUS_DISPLAY_ENABLE_DFP 0x08 + +/* + * NVIDIA ACPI sub-event IDs (event types) to be passed into + * to core NVIDIA driver for ACPI events. + */ +#define NV_SYSTEM_ACPI_EVENT_VALUE_DISPLAY_SWITCH_DEFAULT 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_AC 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_BATTERY 1 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_UNDOCKED 0 +#define NV_SYSTEM_ACPI_EVENT_VALUE_DOCK_EVENT_DOCKED 1 + +#define NV_ACPI_NVIF_HANDLE_PRESENT 0x01 +#define NV_ACPI_DSM_HANDLE_PRESENT 0x02 +#define NV_ACPI_WMMX_HANDLE_PRESENT 0x04 + +#define NV_EVAL_ACPI_METHOD_NVIF 0x01 +#define NV_EVAL_ACPI_METHOD_WMMX 0x02 + +#define NV_I2C_CMD_READ 1 +#define NV_I2C_CMD_WRITE 2 +#define NV_I2C_CMD_SMBUS_READ 3 +#define NV_I2C_CMD_SMBUS_WRITE 4 +#define NV_I2C_CMD_SMBUS_QUICK_WRITE 5 +#define NV_I2C_CMD_SMBUS_QUICK_READ 6 +#define NV_I2C_CMD_SMBUS_BLOCK_READ 7 +#define NV_I2C_CMD_SMBUS_BLOCK_WRITE 8 + +// Flags needed by OSAllocPagesNode +#define NV_ALLOC_PAGES_NODE_NONE 0x0 +#define NV_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1 + +/* +** where we hide our nv_state_t * ... +*/ +#define NV_SET_NV_STATE(pgpu,p) ((pgpu)->pOsGpuInfo = (p)) +#define NV_GET_NV_STATE(pGpu) \ + (nv_state_t *)((pGpu) ? (pGpu)->pOsGpuInfo : NULL) + +#define IS_REG_OFFSET(nv, offset, length) \ + (((offset) >= (nv)->regs->cpu_address) && \ + (((offset) + ((length)-1)) <= \ + (nv)->regs->cpu_address + ((nv)->regs->size-1))) + +#define IS_FB_OFFSET(nv, offset, length) \ + (((nv)->fb) && ((offset) >= (nv)->fb->cpu_address) && \ + (((offset) + ((length)-1)) <= (nv)->fb->cpu_address + ((nv)->fb->size-1))) + +#define IS_UD_OFFSET(nv, offset, length) \ + (((nv)->ud.cpu_address != 0) && ((nv)->ud.size != 0) && \ + ((offset) >= (nv)->ud.cpu_address) && \ + (((offset) + ((length)-1)) <= (nv)->ud.cpu_address + ((nv)->ud.size-1))) + +#define IS_IMEM_OFFSET(nv, offset, length) \ + (((nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address != 0) && \ + ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].size != 0) && \ + ((offset) >= (nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address) && \ + (((offset) + ((length) - 1)) <= \ + (nv)->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + \ + ((nv)->bars[NV_GPU_BAR_INDEX_IMEM].size - 1))) + +#define NV_RM_MAX_MSIX_LINES 8 + +#define NV_MAX_ISR_DELAY_US 20000 +#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000) + +#define NV_TIMERCMP(a, b, CMP) \ + (((a)->tv_sec == (b)->tv_sec) ? \ + ((a)->tv_usec CMP (b)->tv_usec) : ((a)->tv_sec CMP (b)->tv_sec)) + +#define NV_TIMERADD(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ + if ((result)->tv_usec >= 1000000) \ + { \ + ++(result)->tv_sec; \ + (result)->tv_usec -= 1000000; \ + } \ + } + +#define NV_TIMERSUB(a, b, result) \ + { \ + (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ + (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ + if ((result)->tv_usec < 0) \ + { \ + --(result)->tv_sec; \ + (result)->tv_usec += 1000000; \ + } \ + } + +#define NV_TIMEVAL_TO_US(tv) ((NvU64)(tv).tv_sec * 1000000 + (tv).tv_usec) + +#ifndef NV_ALIGN_UP +#define NV_ALIGN_UP(v,g) (((v) + ((g) - 1)) & ~((g) - 1)) +#endif +#ifndef NV_ALIGN_DOWN +#define NV_ALIGN_DOWN(v,g) ((v) & ~((g) - 1)) +#endif + +/* + * driver internal interfaces + */ + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for UNIX specific OS interface. + * + * --------------------------------------------------------------------------- + */ + +NvU32 NV_API_CALL nv_get_dev_minor (nv_state_t *); +void* NV_API_CALL nv_alloc_kernel_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, void **); +NV_STATUS NV_API_CALL nv_free_kernel_mapping (nv_state_t *, void *, void *, void *); +NV_STATUS NV_API_CALL nv_alloc_user_mapping (nv_state_t *, void *, NvU64, NvU32, NvU64, NvU32, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_free_user_mapping (nv_state_t *, void *, NvU64, void *); +NV_STATUS NV_API_CALL nv_add_mapping_context_to_file (nv_state_t *, nv_usermap_access_params_t*, NvU32, void *, NvU64, NvU32); + +NvU64 NV_API_CALL nv_get_kern_phys_address (NvU64); +NvU64 NV_API_CALL nv_get_user_phys_address (NvU64); +nv_state_t* NV_API_CALL nv_get_adapter_state (NvU32, NvU8, NvU8); +nv_state_t* NV_API_CALL nv_get_ctl_state (void); + +void NV_API_CALL nv_set_dma_address_size (nv_state_t *, NvU32 ); + +NV_STATUS NV_API_CALL nv_alias_pages (nv_state_t *, NvU32, NvU32, NvU32, NvU64, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_alloc_pages (nv_state_t *, NvU32, NvBool, NvU32, NvBool, NvBool, NvU64 *, void **); +NV_STATUS NV_API_CALL nv_free_pages (nv_state_t *, NvU32, NvBool, NvU32, void *); + +NV_STATUS NV_API_CALL nv_register_user_pages (nv_state_t *, NvU64, NvU64 *, void *, void **); +void NV_API_CALL nv_unregister_user_pages (nv_state_t *, NvU64, void **, void **); + +NV_STATUS NV_API_CALL nv_register_peer_io_mem (nv_state_t *, NvU64 *, NvU64, void **); +void NV_API_CALL nv_unregister_peer_io_mem(nv_state_t *, void *); + +struct sg_table; + +NV_STATUS NV_API_CALL nv_register_sgt (nv_state_t *, NvU64 *, NvU64, NvU32, void **, struct sg_table *, void *); +void NV_API_CALL nv_unregister_sgt (nv_state_t *, struct sg_table **, void **, void *); +NV_STATUS NV_API_CALL nv_register_phys_pages (nv_state_t *, NvU64 *, NvU64, NvU32, void **); +void NV_API_CALL nv_unregister_phys_pages (nv_state_t *, void *); + +NV_STATUS NV_API_CALL nv_dma_map_sgt (nv_dma_device_t *, NvU64, NvU64 *, NvU32, void **); +NV_STATUS NV_API_CALL nv_dma_map_pages (nv_dma_device_t *, NvU64, NvU64 *, NvBool, NvU32, void **); +NV_STATUS NV_API_CALL nv_dma_unmap_pages (nv_dma_device_t *, NvU64, NvU64 *, void **); + +NV_STATUS NV_API_CALL nv_dma_map_alloc (nv_dma_device_t *, NvU64, NvU64 *, NvBool, void **); +NV_STATUS NV_API_CALL nv_dma_unmap_alloc (nv_dma_device_t *, NvU64, NvU64 *, void **); + +NV_STATUS NV_API_CALL nv_dma_map_peer (nv_dma_device_t *, nv_dma_device_t *, NvU8, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_peer (nv_dma_device_t *, NvU64, NvU64); + +NV_STATUS NV_API_CALL nv_dma_map_mmio (nv_dma_device_t *, NvU64, NvU64 *); +void NV_API_CALL nv_dma_unmap_mmio (nv_dma_device_t *, NvU64, NvU64); + +void NV_API_CALL nv_dma_cache_invalidate (nv_dma_device_t *, void *); +void NV_API_CALL nv_dma_enable_nvlink (nv_dma_device_t *); + +NvS32 NV_API_CALL nv_start_rc_timer (nv_state_t *); +NvS32 NV_API_CALL nv_stop_rc_timer (nv_state_t *); + +void NV_API_CALL nv_post_event (nv_event_t *, NvHandle, NvU32, NvU32, NvU16, NvBool); +NvS32 NV_API_CALL nv_get_event (nv_file_private_t *, nv_event_t *, NvU32 *); + +void* NV_API_CALL nv_i2c_add_adapter (nv_state_t *, NvU32); +void NV_API_CALL nv_i2c_del_adapter (nv_state_t *, void *); + +void NV_API_CALL nv_acpi_methods_init (NvU32 *); +void NV_API_CALL nv_acpi_methods_uninit (void); + +NV_STATUS NV_API_CALL nv_acpi_method (NvU32, NvU32, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_dsm_method (nv_state_t *, NvU8 *, NvU32, NvBool, NvU32, void *, NvU16, NvU32 *, void *, NvU16 *); +NV_STATUS NV_API_CALL nv_acpi_ddc_method (nv_state_t *, void *, NvU32 *, NvBool); +NV_STATUS NV_API_CALL nv_acpi_dod_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_rom_method (nv_state_t *, NvU32 *, NvU32 *); +NV_STATUS NV_API_CALL nv_acpi_get_powersource (NvU32 *); +NvBool NV_API_CALL nv_acpi_is_battery_present(void); + +NV_STATUS NV_API_CALL nv_acpi_mux_method (nv_state_t *, NvU32 *, NvU32, const char *); + +NV_STATUS NV_API_CALL nv_log_error (nv_state_t *, NvU32, const char *, va_list); + +NvU64 NV_API_CALL nv_get_dma_start_address (nv_state_t *); +NV_STATUS NV_API_CALL nv_set_primary_vga_status(nv_state_t *); +NV_STATUS NV_API_CALL nv_pci_trigger_recovery (nv_state_t *); +NvBool NV_API_CALL nv_requires_dma_remap (nv_state_t *); + +NvBool NV_API_CALL nv_is_rm_firmware_active(nv_state_t *); +const void*NV_API_CALL nv_get_firmware(nv_state_t *, nv_firmware_t, const void **, NvU32 *); +void NV_API_CALL nv_put_firmware(const void *); + +nv_file_private_t* NV_API_CALL nv_get_file_private(NvS32, NvBool, void **); +void NV_API_CALL nv_put_file_private(void *); + +NV_STATUS NV_API_CALL nv_get_device_memory_config(nv_state_t *, NvU64 *, NvU64 *, NvU32 *, NvU32 *, NvS32 *); + +NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *, NvU64 *, NvU64 *, void**); +NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv, NvBool *mode); + +void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv); + +void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64, NvU64); + +void NV_API_CALL nv_p2p_free_platform_data(void *data); + +#if defined(NVCPU_PPC64LE) +NV_STATUS NV_API_CALL nv_get_nvlink_line_rate (nv_state_t *, NvU32 *); +#endif + +NV_STATUS NV_API_CALL nv_revoke_gpu_mappings (nv_state_t *); +void NV_API_CALL nv_acquire_mmap_lock (nv_state_t *); +void NV_API_CALL nv_release_mmap_lock (nv_state_t *); +NvBool NV_API_CALL nv_get_all_mappings_revoked_locked (nv_state_t *); +void NV_API_CALL nv_set_safe_to_mmap_locked (nv_state_t *, NvBool); + +NV_STATUS NV_API_CALL nv_indicate_idle (nv_state_t *); +NV_STATUS NV_API_CALL nv_indicate_not_idle (nv_state_t *); +void NV_API_CALL nv_idle_holdoff (nv_state_t *); + +NvBool NV_API_CALL nv_dynamic_power_available (nv_state_t *); +void NV_API_CALL nv_audio_dynamic_power (nv_state_t *); + +void NV_API_CALL nv_control_soc_irqs (nv_state_t *, NvBool bEnable); +NV_STATUS NV_API_CALL nv_get_current_irq_priv_data(nv_state_t *, NvU32 *); + +NV_STATUS NV_API_CALL nv_acquire_fabric_mgmt_cap (int, int*); +int NV_API_CALL nv_cap_drv_init(void); +void NV_API_CALL nv_cap_drv_exit(void); +NvBool NV_API_CALL nv_is_gpu_accessible(nv_state_t *); + +NvU32 NV_API_CALL nv_get_os_type(void); + +void NV_API_CALL nv_get_updated_emu_seg(NvU32 *start, NvU32 *end); +struct dma_buf; +typedef struct nv_dma_buf nv_dma_buf_t; +struct drm_gem_object; + +NV_STATUS NV_API_CALL nv_dma_import_sgt (nv_dma_device_t *, struct sg_table *, struct drm_gem_object *); +void NV_API_CALL nv_dma_release_sgt(struct sg_table *, struct drm_gem_object *); +NV_STATUS NV_API_CALL nv_dma_import_dma_buf (nv_dma_device_t *, struct dma_buf *, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **); +NV_STATUS NV_API_CALL nv_dma_import_from_fd (nv_dma_device_t *, NvS32, NvU32 *, void **, struct sg_table **, nv_dma_buf_t **); +void NV_API_CALL nv_dma_release_dma_buf (void *, nv_dma_buf_t *); + +void NV_API_CALL nv_schedule_uvm_isr (nv_state_t *); + +NvBool NV_API_CALL nv_platform_supports_s0ix (void); +NvBool NV_API_CALL nv_s2idle_pm_configured (void); + +NvBool NV_API_CALL nv_is_chassis_notebook (void); +void NV_API_CALL nv_allow_runtime_suspend (nv_state_t *nv); +void NV_API_CALL nv_disallow_runtime_suspend (nv_state_t *nv); + +typedef void (*nvTegraDceClientIpcCallback)(NvU32, NvU32, NvU32, void *, void *); + +NV_STATUS NV_API_CALL nv_get_num_phys_pages (void *, NvU32 *); +NV_STATUS NV_API_CALL nv_get_phys_pages (void *, void *, NvU32 *); + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for Resource Manager interface. + * + * --------------------------------------------------------------------------- + */ + +NvBool NV_API_CALL rm_init_rm (nvidia_stack_t *); +void NV_API_CALL rm_shutdown_rm (nvidia_stack_t *); +NvBool NV_API_CALL rm_init_private_state (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_free_private_state (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_init_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_disable_adapter (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_shutdown_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_exclude_adapter (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_api_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_acquire_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_release_gpu_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_release_all_gpus_lock (nvidia_stack_t *); +NV_STATUS NV_API_CALL rm_ioctl (nvidia_stack_t *, nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32); +NvBool NV_API_CALL rm_isr (nvidia_stack_t *, nv_state_t *, NvU32 *); +void NV_API_CALL rm_isr_bh (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_isr_bh_unlocked (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_power_management (nvidia_stack_t *, nv_state_t *, nv_pm_action_t); +NV_STATUS NV_API_CALL rm_stop_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_restart_user_channels (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_save_low_res_mode (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_vbios_version (nvidia_stack_t *, nv_state_t *, char *); +char* NV_API_CALL rm_get_gpu_uuid (nvidia_stack_t *, nv_state_t *); +const NvU8* NV_API_CALL rm_get_gpu_uuid_raw (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_set_rm_firmware_requested(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_get_firmware_version (nvidia_stack_t *, nv_state_t *, char *, NvLength); +void NV_API_CALL rm_cleanup_file_private (nvidia_stack_t *, nv_state_t *, nv_file_private_t *); +void NV_API_CALL rm_unbind_lock (nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_read_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32 *); +NV_STATUS NV_API_CALL rm_write_registry_dword (nvidia_stack_t *, nv_state_t *, const char *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_binary (nvidia_stack_t *, nv_state_t *, const char *, NvU8 *, NvU32); +NV_STATUS NV_API_CALL rm_write_registry_string (nvidia_stack_t *, nv_state_t *, const char *, const char *, NvU32); +void NV_API_CALL rm_parse_option_string (nvidia_stack_t *, const char *); +char* NV_API_CALL rm_remove_spaces (const char *); +char* NV_API_CALL rm_string_token (char **, const char); + +NV_STATUS NV_API_CALL rm_run_rc_callback (nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_execute_work_item (nvidia_stack_t *, void *); +const char* NV_API_CALL rm_get_device_name (NvU16, NvU16, NvU16); + +NV_STATUS NV_API_CALL rm_is_supported_device (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_supported_pci_device(NvU8 pci_class, + NvU8 pci_subclass, + NvU16 vendor, + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_legacy_warning); + +void NV_API_CALL rm_i2c_remove_adapters (nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_i2c_is_smbus_capable (nvidia_stack_t *, nv_state_t *, void *); +NV_STATUS NV_API_CALL rm_i2c_transfer (nvidia_stack_t *, nv_state_t *, void *, NvU8, NvU8, NvU8, NvU32, NvU8 *); + +NV_STATUS NV_API_CALL rm_perform_version_check (nvidia_stack_t *, void *, NvU32); + +NV_STATUS NV_API_CALL rm_system_event (nvidia_stack_t *, NvU32, NvU32); + +void NV_API_CALL rm_disable_gpu_state_persistence (nvidia_stack_t *sp, nv_state_t *); +NV_STATUS NV_API_CALL rm_p2p_init_mapping (nvidia_stack_t *, NvU64, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU64, NvU64, NvU64, NvU64, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_destroy_mapping (nvidia_stack_t *, NvU64); +NV_STATUS NV_API_CALL rm_p2p_get_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, NvU8 **, void *); +NV_STATUS NV_API_CALL rm_p2p_get_gpu_info (nvidia_stack_t *, NvU64, NvU64, NvU8 **, void **); +NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent (nvidia_stack_t *, NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *); +NV_STATUS NV_API_CALL rm_p2p_register_callback (nvidia_stack_t *, NvU64, NvU64, NvU64, void *, void (*)(void *), void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages (nvidia_stack_t *, NvU64, NvU32, NvU64, void *); +NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent(nvidia_stack_t *, void *, void *); +NV_STATUS NV_API_CALL rm_p2p_dma_map_pages (nvidia_stack_t *, nv_dma_device_t *, NvU8 *, NvU32, NvU32, NvU64 *, void **); +NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, NvHandle, void *, NvHandle, NvU64, NvU64, NvHandle *); +void NV_API_CALL rm_dma_buf_undup_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle); +NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle (nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64, NvU64 *); +NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvU64, NvU64); +NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle *, NvHandle *, NvHandle *, void **); +void NV_API_CALL rm_dma_buf_put_client_and_device(nvidia_stack_t *, nv_state_t *, NvHandle, NvHandle, NvHandle, void *); +NV_STATUS NV_API_CALL rm_log_gpu_crash (nv_stack_t *, nv_state_t *); + +void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd); +NvBool NV_API_CALL rm_get_device_remove_flag(nvidia_stack_t *sp, NvU32 gpu_id); +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults(nvidia_stack_t *, nv_state_t *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked(nvidia_stack_t *, nv_state_t *, NvU32 *); +NvBool NV_API_CALL rm_gpu_need_4k_page_isolation(nv_state_t *); +NvBool NV_API_CALL rm_is_chipset_io_coherent(nv_stack_t *); +NvBool NV_API_CALL rm_init_event_locks(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_destroy_event_locks(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_get_gpu_numa_info(nvidia_stack_t *, nv_state_t *, NvS32 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *); +NV_STATUS NV_API_CALL rm_gpu_numa_online(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_gpu_numa_offline(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_device_sequestered(nvidia_stack_t *, nv_state_t *); +void NV_API_CALL rm_check_for_gpu_surprise_removal(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_set_external_kernel_client_count(nvidia_stack_t *, nv_state_t *, NvBool); +NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_is_iommu_needed_for_sriov(nvidia_stack_t *, nv_state_t *); +NvBool NV_API_CALL rm_disable_iomap_wc(void); + +void NV_API_CALL rm_init_dynamic_power_management(nvidia_stack_t *, nv_state_t *, NvBool); +void NV_API_CALL rm_cleanup_dynamic_power_management(nvidia_stack_t *, nv_state_t *); +NV_STATUS NV_API_CALL rm_ref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +void NV_API_CALL rm_unref_dynamic_power(nvidia_stack_t *, nv_state_t *, nv_dynamic_power_mode_t); +NV_STATUS NV_API_CALL rm_transition_dynamic_power(nvidia_stack_t *, nv_state_t *, NvBool); +const char* NV_API_CALL rm_get_vidmem_power_status(nvidia_stack_t *, nv_state_t *); +const char* NV_API_CALL rm_get_dynamic_power_management_status(nvidia_stack_t *, nv_state_t *); +const char* NV_API_CALL rm_get_gpu_gcx_support(nvidia_stack_t *, nv_state_t *, NvBool); + +void NV_API_CALL rm_acpi_notify(nvidia_stack_t *, nv_state_t *, NvU32); +NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 *); + +/* vGPU VFIO specific functions */ +NV_STATUS NV_API_CALL nv_vgpu_create_request(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU32, NvU16 *, NvU32); +NV_STATUS NV_API_CALL nv_vgpu_delete(nvidia_stack_t *, const NvU8 *, NvU16); +NV_STATUS NV_API_CALL nv_vgpu_get_type_ids(nvidia_stack_t *, nv_state_t *, NvU32 *, NvU32 **, NvBool); +NV_STATUS NV_API_CALL nv_vgpu_get_type_info(nvidia_stack_t *, nv_state_t *, NvU32, char *, int, NvU8); +NV_STATUS NV_API_CALL nv_vgpu_get_bar_info(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 *, NvU32, void *); +NV_STATUS NV_API_CALL nv_vgpu_start(nvidia_stack_t *, const NvU8 *, void *, NvS32 *, NvU8 *, NvU32); +NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap(nvidia_stack_t *, nv_state_t *, const NvU8 *, NvU64 **, NvU64 **, NvU32 *); +NV_STATUS NV_API_CALL nv_vgpu_process_vf_info(nvidia_stack_t *, nv_state_t *, NvU8, NvU32, NvU8, NvU8, NvU8, NvBool, void *); +NV_STATUS NV_API_CALL nv_vgpu_update_request(nvidia_stack_t *, const NvU8 *, NvU32, NvU64 *, NvU64 *, const char *); +NV_STATUS NV_API_CALL nv_gpu_bind_event(nvidia_stack_t *); + +NV_STATUS NV_API_CALL nv_get_usermap_access_params(nv_state_t*, nv_usermap_access_params_t*); +nv_soc_irq_type_t NV_API_CALL nv_get_current_irq_type(nv_state_t*); +void NV_API_CALL nv_flush_coherent_cpu_cache_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size); + +/* Callbacks should occur roughly every 10ms. */ +#define NV_SNAPSHOT_TIMER_HZ 100 +void NV_API_CALL nv_start_snapshot_timer(void (*snapshot_callback)(void *context)); +void NV_API_CALL nv_flush_snapshot_timer(void); +void NV_API_CALL nv_stop_snapshot_timer(void); + +static inline const NvU8 *nv_get_cached_uuid(nv_state_t *nv) +{ + return nv->nv_uuid_cache.valid ? nv->nv_uuid_cache.uuid : NULL; +} + +#if defined(NVCPU_X86_64) + +static inline NvU64 nv_rdtsc(void) +{ + NvU64 val; + __asm__ __volatile__ ("rdtsc \t\n" + "shlq $0x20,%%rdx \t\n" + "orq %%rdx,%%rax \t\n" + : "=A" (val)); + return val; +} + +#endif + +#endif /* NVRM */ + +static inline int nv_count_bits(NvU64 word) +{ + NvU64 bits; + + bits = (word & 0x5555555555555555ULL) + ((word >> 1) & 0x5555555555555555ULL); + bits = (bits & 0x3333333333333333ULL) + ((bits >> 2) & 0x3333333333333333ULL); + bits = (bits & 0x0f0f0f0f0f0f0f0fULL) + ((bits >> 4) & 0x0f0f0f0f0f0f0f0fULL); + bits = (bits & 0x00ff00ff00ff00ffULL) + ((bits >> 8) & 0x00ff00ff00ff00ffULL); + bits = (bits & 0x0000ffff0000ffffULL) + ((bits >> 16) & 0x0000ffff0000ffffULL); + bits = (bits & 0x00000000ffffffffULL) + ((bits >> 32) & 0x00000000ffffffffULL); + + return (int)(bits); +} + +#endif diff --git a/src/nvidia/arch/nvalloc/unix/include/nv_escape.h b/src/nvidia/arch/nvalloc/unix/include/nv_escape.h new file mode 100644 index 000000000..629af5a46 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/nv_escape.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_ESCAPE_H_INCLUDED +#define NV_ESCAPE_H_INCLUDED + +#define NV_ESC_RM_ALLOC_MEMORY 0x27 +#define NV_ESC_RM_ALLOC_OBJECT 0x28 +#define NV_ESC_RM_FREE 0x29 +#define NV_ESC_RM_CONTROL 0x2A +#define NV_ESC_RM_ALLOC 0x2B +#define NV_ESC_RM_CONFIG_GET 0x32 +#define NV_ESC_RM_CONFIG_SET 0x33 +#define NV_ESC_RM_DUP_OBJECT 0x34 +#define NV_ESC_RM_SHARE 0x35 +#define NV_ESC_RM_CONFIG_GET_EX 0x37 +#define NV_ESC_RM_CONFIG_SET_EX 0x38 +#define NV_ESC_RM_I2C_ACCESS 0x39 +#define NV_ESC_RM_IDLE_CHANNELS 0x41 +#define NV_ESC_RM_VID_HEAP_CONTROL 0x4A +#define NV_ESC_RM_ACCESS_REGISTRY 0x4D +#define NV_ESC_RM_MAP_MEMORY 0x4E +#define NV_ESC_RM_UNMAP_MEMORY 0x4F +#define NV_ESC_RM_GET_EVENT_DATA 0x52 +#define NV_ESC_RM_ALLOC_CONTEXT_DMA2 0x54 +#define NV_ESC_RM_ADD_VBLANK_CALLBACK 0x56 +#define NV_ESC_RM_MAP_MEMORY_DMA 0x57 +#define NV_ESC_RM_UNMAP_MEMORY_DMA 0x58 +#define NV_ESC_RM_BIND_CONTEXT_DMA 0x59 +#define NV_ESC_RM_EXPORT_OBJECT_TO_FD 0x5C +#define NV_ESC_RM_IMPORT_OBJECT_FROM_FD 0x5D +#define NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO 0x5E + +#endif // NV_ESCAPE_H_INCLUDED diff --git a/src/nvidia/arch/nvalloc/unix/include/os-interface.h b/src/nvidia/arch/nvalloc/unix/include/os-interface.h new file mode 100644 index 000000000..96b1a1d16 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/os-interface.h @@ -0,0 +1,234 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + * Os interface definitions needed by os-interface.c + */ + +#ifndef OS_INTERFACE_H +#define OS_INTERFACE_H + +/******************* Operating System Interface Routines *******************\ +* * +* Operating system wrapper functions used to abstract the OS. * +* * +\***************************************************************************/ + +#include +#include +#include "nv_stdarg.h" +#include +#include +#include + + + +typedef struct +{ + NvU32 os_major_version; + NvU32 os_minor_version; + NvU32 os_build_number; + const char * os_build_version_str; + const char * os_build_date_plus_str; +}os_version_info; + +/* Each OS defines its own version of this opaque type */ +struct os_work_queue; + +/* Each OS defines its own version of this opaque type */ +typedef struct os_wait_queue os_wait_queue; + +/* + * --------------------------------------------------------------------------- + * + * Function prototypes for OS interface. + * + * --------------------------------------------------------------------------- + */ + +NvU64 NV_API_CALL os_get_num_phys_pages (void); +NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64); +void NV_API_CALL os_free_mem (void *); +NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *); +NvU64 NV_API_CALL os_get_current_tick (void); +NvU64 NV_API_CALL os_get_current_tick_hr (void); +NvU64 NV_API_CALL os_get_tick_resolution (void); +NV_STATUS NV_API_CALL os_delay (NvU32); +NV_STATUS NV_API_CALL os_delay_us (NvU32); +NvU64 NV_API_CALL os_get_cpu_frequency (void); +NvU32 NV_API_CALL os_get_current_process (void); +void NV_API_CALL os_get_current_process_name (char *, NvU32); +NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *); +char* NV_API_CALL os_string_copy (char *, const char *); +NvU32 NV_API_CALL os_string_length (const char *); +NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32); +NvS32 NV_API_CALL os_string_compare (const char *, const char *); +NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...); +NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list); +void NV_API_CALL os_log_error (const char *, va_list); +void* NV_API_CALL os_mem_copy (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32); +NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32); +void* NV_API_CALL os_mem_set (void *, NvU8, NvU32); +NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32); +void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *); +NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *); +NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *); +NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8); +NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16); +NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32); +NvBool NV_API_CALL os_pci_remove_supported (void); +void NV_API_CALL os_pci_remove (void *); +void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32); +void NV_API_CALL os_unmap_kernel_space (void *, NvU64); +void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **); +void NV_API_CALL os_unmap_user_space (void *, NvU64, void *); +NV_STATUS NV_API_CALL os_flush_cpu_cache (void); +NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void); +NV_STATUS NV_API_CALL os_flush_user_cache (void); +void NV_API_CALL os_flush_cpu_write_combine_buffer(void); +NvU8 NV_API_CALL os_io_read_byte (NvU32); +NvU16 NV_API_CALL os_io_read_word (NvU32); +NvU32 NV_API_CALL os_io_read_dword (NvU32); +void NV_API_CALL os_io_write_byte (NvU32, NvU8); +void NV_API_CALL os_io_write_word (NvU32, NvU16); +void NV_API_CALL os_io_write_dword (NvU32, NvU32); +NvBool NV_API_CALL os_is_administrator (void); +NvBool NV_API_CALL os_allow_priority_override (void); +void NV_API_CALL os_dbg_init (void); +void NV_API_CALL os_dbg_breakpoint (void); +void NV_API_CALL os_dbg_set_level (NvU32); +NvU32 NV_API_CALL os_get_cpu_count (void); +NvU32 NV_API_CALL os_get_cpu_number (void); +void NV_API_CALL os_disable_console_access (void); +void NV_API_CALL os_enable_console_access (void); +NV_STATUS NV_API_CALL os_registry_init (void); +NV_STATUS NV_API_CALL os_schedule (void); +NV_STATUS NV_API_CALL os_alloc_spinlock (void **); +void NV_API_CALL os_free_spinlock (void *); +NvU64 NV_API_CALL os_acquire_spinlock (void *); +void NV_API_CALL os_release_spinlock (void *, NvU64); +NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *); +NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *); +NV_STATUS NV_API_CALL os_alloc_mutex (void **); +void NV_API_CALL os_free_mutex (void *); +NV_STATUS NV_API_CALL os_acquire_mutex (void *); +NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *); +void NV_API_CALL os_release_mutex (void *); +void* NV_API_CALL os_alloc_semaphore (NvU32); +void NV_API_CALL os_free_semaphore (void *); +NV_STATUS NV_API_CALL os_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *); +NV_STATUS NV_API_CALL os_release_semaphore (void *); +NvBool NV_API_CALL os_semaphore_may_sleep (void); +NV_STATUS NV_API_CALL os_get_version_info (os_version_info*); +NvBool NV_API_CALL os_is_isr (void); +NvBool NV_API_CALL os_pat_supported (void); +void NV_API_CALL os_dump_stack (void); +NvBool NV_API_CALL os_is_efi_enabled (void); +NvBool NV_API_CALL os_is_xen_dom0 (void); +NvBool NV_API_CALL os_is_vgx_hyper (void); +NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32); +NvBool NV_API_CALL os_is_grid_supported (void); +NvU32 NV_API_CALL os_get_grid_csp_support (void); +void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64); +void NV_API_CALL os_bug_check (NvU32, const char *); +NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32); +NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**); +NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *); +NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *); +NV_STATUS NV_API_CALL os_get_euid (NvU32 *); +NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr); +NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *); +void NV_API_CALL os_add_record_for_crashLog (void *, NvU32); +void NV_API_CALL os_delete_record_for_crashLog (void *); +NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32); +NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *); +NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *); +NV_STATUS NV_API_CALL os_get_page (NvU64 address); +NV_STATUS NV_API_CALL os_put_page (NvU64 address); +NvU32 NV_API_CALL os_get_page_refcount (NvU64 address); +NvU32 NV_API_CALL os_count_tail_pages (NvU64 address); +void NV_API_CALL os_free_pages_phys (NvU64, NvU32); +NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *); +NV_STATUS NV_API_CALL os_open_temporary_file (void **); +void NV_API_CALL os_close_file (void *); +NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64); +NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **); +NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64); +NvBool NV_API_CALL os_is_nvswitch_present (void); +void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16); +NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **); +void NV_API_CALL os_free_wait_queue (os_wait_queue *); +void NV_API_CALL os_wait_uninterruptible (os_wait_queue *); +void NV_API_CALL os_wait_interruptible (os_wait_queue *); +void NV_API_CALL os_wake_up (os_wait_queue *); +nv_cap_t* NV_API_CALL os_nv_cap_init (const char *); +nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int); +nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int); +void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *); +int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int); +void NV_API_CALL os_nv_cap_close_fd (int); + +extern NvU32 os_page_size; +extern NvU64 os_page_mask; +extern NvU8 os_page_shift; +extern NvU32 os_sev_status; +extern NvBool os_sev_enabled; +extern NvBool os_dma_buf_enabled; + +/* + * --------------------------------------------------------------------------- + * + * Debug macros. + * + * --------------------------------------------------------------------------- + */ + +#define NV_DBG_INFO 0x0 +#define NV_DBG_SETUP 0x1 +#define NV_DBG_USERERRORS 0x2 +#define NV_DBG_WARNINGS 0x3 +#define NV_DBG_ERRORS 0x4 + + +void NV_API_CALL out_string(const char *str); +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...); + +#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__) + +#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \ + nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status) + +/* + * Fields for os_lock_user_pages flags parameter + */ +#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000 +#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001 + +#endif /* OS_INTERFACE_H */ diff --git a/src/nvidia/arch/nvalloc/unix/include/os_custom.h b/src/nvidia/arch/nvalloc/unix/include/os_custom.h new file mode 100644 index 000000000..37df06fd0 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/os_custom.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OS_CUSTOM_H_ +#define _OS_CUSTOM_H_ + +/*! + * @file os_custom.h + * @brief OS module specific definitions for this OS + */ + +#include +#include + +// File modes, added for NVIDIA capabilities. +#define OS_RUSR 00400 // read permission, owner +#define OS_WUSR 00200 // write permission, owner +#define OS_XUSR 00100 // execute/search permission, owner +#define OS_RWXU (OS_RUSR | OS_WUSR | OS_XUSR) // read, write, execute/search, owner +#define OS_RGRP 00040 // read permission, group +#define OS_WGRP 00020 // write permission, group +#define OS_XGRP 00010 // execute/search permission, group +#define OS_RWXG (OS_RGRP | OS_WGRP | OS_XGRP) // read, write, execute/search, group +#define OS_ROTH 00004 // read permission, other +#define OS_WOTH 00002 // write permission, other +#define OS_XOTH 00001 // execute/search permission, other +#define OS_RWXO (OS_ROTH | OS_WOTH | OS_XOTH) // read, write, execute/search, other +#define OS_RUGO (OS_RUSR | OS_RGRP | OS_ROTH) +#define OS_WUGO (OS_WUSR | OS_WGRP | OS_WOTH) +#define OS_XUGO (OS_XUSR | OS_XGRP | OS_XOTH) + +// Trigger for collecting GPU state for later extraction. +NV_STATUS RmLogGpuCrash(OBJGPU *); + +// This is callback function in the miniport. +// The argument is a device extension, and must be cast as such to be useful. +typedef void (*MINIPORT_CALLBACK)(void*); + +NV_STATUS osPackageRegistry(OBJGPU *pGpu, PACKED_REGISTRY_TABLE *, NvU32 *); + +#endif // _OS_CUSTOM_H_ diff --git a/src/nvidia/arch/nvalloc/unix/include/osapi.h b/src/nvidia/arch/nvalloc/unix/include/osapi.h new file mode 100644 index 000000000..2b15d1efe --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/osapi.h @@ -0,0 +1,192 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OSAPI_H_ +#define _OSAPI_H_ + +#include "core/system.h" +#include "gpu/gpu.h" + +#include // NV_DBG_ERRORS +#include +#include + +#if defined(__use_altstack__) +#if defined(QA_BUILD) +//--------------------------------------------------------------------------- +// +// 32 bit debug marker values. +// +//--------------------------------------------------------------------------- + +#define NV_MARKER1 (NvU32)(('M' << 24) | ('R' << 16) | ('V' << 8) | 'N') +#define NV_MARKER2 (NvU32)(('N' << 24) | ('V' << 16) | ('R' << 8) | 'M') + +// +// The two macros below implement a simple alternate stack usage sanity +// check for QA_BUILD RM builds. NV_ALTSTACK_WRITE_MARKERS() fills +// altstacks with NV_MARKER1, which enables NV_ALTSTACK_CHECK_MARKERS() +// to determine the stack usage fairly reliably by looking for the +// first clobbered marker. If more than 7/8 of the alternate stack were +// used, NV_ALTSTACK_CHECK_MARKERS() prints an error and asserts. +// +#define NV_ALTSTACK_WRITE_MARKERS(sp) \ +{ \ + NvU32 i, *stack = (void *)(sp)->stack; \ + for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \ + stack[i] = NV_MARKER1; \ +} + +#define NV_ALTSTACK_CHECK_MARKERS(sp) \ +{ \ + NvU32 i, *stack = (void *)(sp)->stack; \ + for (i = 0; i < ((sp)->size / sizeof(NvU32)); i++) \ + { \ + if (stack[i] != NV_MARKER1) \ + break; \ + } \ + if ((i * sizeof(NvU32)) < ((sp)->size / 8)) \ + { \ + nv_printf(NV_DBG_ERRORS, "NVRM: altstack: used %d of %d bytes!\n", \ + ((sp)->size - (i * sizeof(NvU32))), (sp)->size); \ + NV_ASSERT_PRECOMP((i * sizeof(NvU32)) >= ((sp)->size / 8)); \ + } \ +} +#else +#define NV_ALTSTACK_WRITE_MARKERS(sp) +#define NV_ALTSTACK_CHECK_MARKERS(sp) +#endif +#if defined(NVCPU_X86_64) +#define NV_ENTER_RM_RUNTIME(sp,fp) \ +{ \ + NV_ALTSTACK_WRITE_MARKERS(sp); \ + __asm__ __volatile__ ("movq %%rbp,%0" : "=r" (fp)); /* save %rbp */ \ + __asm__ __volatile__ ("movq %0,%%rbp" :: "r" ((sp)->top)); \ +} + +#define NV_EXIT_RM_RUNTIME(sp,fp) \ +{ \ + register void *__rbp __asm__ ("rbp"); \ + if (__rbp != (sp)->top) \ + { \ + nv_printf(NV_DBG_ERRORS, "NVRM: detected corrupted runtime stack!\n"); \ + NV_ASSERT_PRECOMP(__rbp == (sp)->top); \ + } \ + NV_ALTSTACK_CHECK_MARKERS(sp); \ + __asm__ __volatile__ ("movq %0,%%rbp" :: "r" (fp)); /* restore %rbp */ \ +} +#else +#error "gcc \"altstacks\" support is not implemented on this platform!" +#endif +#else +#define NV_ENTER_RM_RUNTIME(sp,fp) { (void)sp; (void)fp; } +#define NV_EXIT_RM_RUNTIME(sp,fp) +#endif + +void RmShutdownRm (void); + +NvBool RmInitPrivateState (nv_state_t *); +void RmFreePrivateState (nv_state_t *); + +NvBool RmInitAdapter (nv_state_t *); +NvBool RmPartiallyInitAdapter (nv_state_t *); +void RmShutdownAdapter (nv_state_t *); +void RmDisableAdapter (nv_state_t *); +void RmPartiallyDisableAdapter(nv_state_t *); +NV_STATUS RmGetAdapterStatus (nv_state_t *, NvU32 *); +NV_STATUS RmExcludeAdapter (nv_state_t *); + +NvBool RmGpuHasIOSpaceEnabled (nv_state_t *); + +void RmFreeUnusedClients (nv_state_t *, nv_file_private_t *); +NV_STATUS RmIoctl (nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32); + +NV_STATUS RmAllocOsEvent (NvHandle, nv_file_private_t *, NvU32); +NV_STATUS RmFreeOsEvent (NvHandle, NvU32); + +void RmI2cAddGpuPorts(nv_state_t *); + +NV_STATUS RmInitX86EmuState(OBJGPU *); +void RmFreeX86EmuState(OBJGPU *); +NV_STATUS RmSystemEvent(nv_state_t *, NvU32, NvU32); + +const NvU8 *RmGetGpuUuidRaw(nv_state_t *); + +NV_STATUS nv_vbios_call(OBJGPU *, NvU32 *, NvU32 *); + +int amd_adv_spec_cache_feature(OBJOS *); +int amd_msr_c0011022_incompatible(OBJOS *); + +NV_STATUS rm_get_adapter_status (nv_state_t *, NvU32 *); + +NV_STATUS rm_alloc_os_event (NvHandle, nv_file_private_t *, NvU32); +NV_STATUS rm_free_os_event (NvHandle, NvU32); +NV_STATUS rm_get_event_data (nv_file_private_t *, NvP64, NvU32 *); +void rm_client_free_os_events (NvHandle); + +NV_STATUS rm_create_mmap_context (nv_state_t *, NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32); +NV_STATUS rm_update_device_mapping_info (NvHandle, NvHandle, NvHandle, void *, void *); + +NV_STATUS rm_access_registry (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvP64, NvU32, NvP64, NvU32 *, NvU32 *, NvU32 *); + +// registry management +NV_STATUS RmInitRegistry (void); +NV_STATUS RmDestroyRegistry (nv_state_t *); + +NV_STATUS RmWriteRegistryDword (nv_state_t *, const char *, NvU32 ); +NV_STATUS RmReadRegistryDword (nv_state_t *, const char *, NvU32 *); +NV_STATUS RmWriteRegistryString (nv_state_t *, const char *, const char *, NvU32); +NV_STATUS RmReadRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32 *); +NV_STATUS RmWriteRegistryBinary (nv_state_t *, const char *, NvU8 *, NvU32); +NV_STATUS RmReadRegistryString (nv_state_t *, const char *, NvU8 *, NvU32 *); + +NV_STATUS RmPackageRegistry (nv_state_t *, PACKED_REGISTRY_TABLE *, NvU32 *); + +NvBool RmIsNvifFunctionSupported(NvU32, NvU32); +void RmInitAcpiMethods (OBJOS *, OBJSYS *, OBJGPU *); +void RmUnInitAcpiMethods (OBJSYS *); + +void RmInflateOsToRmPageArray (RmPhysAddr *, NvU64); +void RmDeflateRmToOsPageArray (RmPhysAddr *, NvU64); + +void RmInitS0ixPowerManagement (nv_state_t *); +void RmInitDeferredDynamicPowerManagement (nv_state_t *); +void RmDestroyDeferredDynamicPowerManagement(nv_state_t *); + +NV_STATUS os_ref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t); +void os_unref_dynamic_power (nv_state_t *, nv_dynamic_power_mode_t); +void RmHandleDisplayChange (nvidia_stack_t *, nv_state_t *); +void RmUpdateGc6ConsoleRefCount (nv_state_t *, NvBool); + +NvBool rm_get_uefi_console_status (nv_state_t *); +NvU64 rm_get_uefi_console_size (nv_state_t *, NvU64 *); + +RM_API *RmUnixRmApiPrologue (nv_state_t *, THREAD_STATE_NODE *, NvU32 module); +void RmUnixRmApiEpilogue (nv_state_t *, THREAD_STATE_NODE *); + +static inline NvBool rm_is_system_notebook(void) +{ + return (nv_is_chassis_notebook() || nv_acpi_is_battery_present()); +} + +#endif // _OSAPI_H_ diff --git a/src/nvidia/arch/nvalloc/unix/include/osfuncs.h b/src/nvidia/arch/nvalloc/unix/include/osfuncs.h new file mode 100644 index 000000000..7afbf6373 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/osfuncs.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef OSFUNCS_H +#define OSFUNCS_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Declarations for the Operating System Specific Functions. * +* * +\***************************************************************************/ + +#include + +OSQueueWorkItem osQueueWorkItem; +OSQueueWorkItemWithFlags osQueueWorkItemWithFlags; +OSQueueSystemWorkItem osQueueSystemWorkItem; +OSDbgBreakpointEnabled osDbgBreakpointEnabled; + +void* osGetStereoDongleInterface(void); + +OSCallACPI_DSM osCallACPI_DSM; +OSCallACPI_DDC osCallACPI_DDC; +OSCallACPI_NVHG_ROM osCallACPI_NVHG_ROM; +OSCallACPI_DOD osCallACPI_DOD; +OSCallACPI_MXDS osCallACPI_MXDS; +OSCallACPI_MXDM osCallACPI_MXDM; + +#if defined(NVCPU_X86_64) +OSnv_rdcr4 nv_rdcr4; +NvU64 nv_rdcr3(OBJOS *); +OSnv_cpuid nv_cpuid; +#endif + +#endif // OSFUNCS_H diff --git a/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h b/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h new file mode 100644 index 000000000..1330ad8de --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/include/rmobjexportimport.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMOBJEXPORTIMPORT_H_ +#define _RMOBJEXPORTIMPORT_H_ + +#include "nvstatus.h" + +typedef NvHandle RmObjExportHandle; + +NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject, + RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance); + +void RmFreeObjExportHandle(RmObjExportHandle hObject); + +NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent, + NvHandle *phDstObject, RmObjExportHandle hSrcObject, + NvU8 *pObjectType); + +NV_STATUS RmGetExportObjectInfo(RmObjExportHandle hSrcObject, NvU32 *deviceInstance); +#endif // _RMOBJEXPORTIMPORT_H_ + diff --git a/src/nvidia/arch/nvalloc/unix/src/asm/x86/nv_cpuid.c b/src/nvidia/arch/nvalloc/unix/src/asm/x86/nv_cpuid.c new file mode 100644 index 000000000..0ebb1c833 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/asm/x86/nv_cpuid.c @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +/* inline assembler routines for UNIX platforms */ + +#if defined(NVCPU_X86_64) + +NvS32 nv_cpuid( + OBJOS *pOS, + NvS32 op, + NvS32 subop, + NvU32 *eax, + NvU32 *ebx, + NvU32 *ecx, + NvU32 *edx +) +{ + asm volatile (" cpuid \n" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "a" (op), + "c" (subop) + : "cc"); + + return 1; +} + +#endif diff --git a/src/nvidia/arch/nvalloc/unix/src/asm/x86/nv_rdcr.c b/src/nvidia/arch/nvalloc/unix/src/asm/x86/nv_rdcr.c new file mode 100644 index 000000000..56e6bfd77 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/asm/x86/nv_rdcr.c @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +/* inline assembler routines for UNIX platforms */ + +#if defined(NVCPU_X86_64) + +NvU32 nv_rdcr4(OBJOS *pOS) +{ + NvU64 val; + asm volatile ("movq %%cr4,%0" : "=r" (val)); + return (NvU32)val; +} + +NvU64 nv_rdcr3(OBJOS *pOS) +{ + NvU64 val; + asm volatile ("movq %%cr3,%0" : "=r" (val)); + return val; +} + +#endif diff --git a/src/nvidia/arch/nvalloc/unix/src/escape.c b/src/nvidia/arch/nvalloc/unix/src/escape.c new file mode 100644 index 000000000..256eb1546 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/escape.c @@ -0,0 +1,820 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +//***************************** Module Header ********************************** +// +// This code is linked into the resource manager proper. It receives the +// ioctl from the resource manager's customer, unbundles the args and +// calls the correct resman routines. +// +//****************************************************************************** + +#include +#include +#include +#include +#include +#include +#include + +#include +#include // NV01_ROOT +#include // NV01_ROOT_NON_PRIV +#include // NV01_EVENT +#include // NV01_MEMORY_SYSTEM +#include // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +#define NV_CTL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) == 0) \ + { \ + rmStatus = NV_ERR_INVALID_ARGUMENT; \ + goto done; \ + } \ +} + +#define NV_ACTUAL_DEVICE_ONLY(nv) \ +{ \ + if (((nv)->flags & NV_FLAG_CONTROL) != 0) \ + { \ + rmStatus = NV_ERR_INVALID_ARGUMENT; \ + goto done; \ + } \ +} + +// only return errors through pApi->status +static void RmCreateOsDescriptor(NVOS32_PARAMETERS *pApi, API_SECURITY_INFO secInfo) +{ + NV_STATUS rmStatus; + NvBool writable; + NvU32 flags = 0; + NvU64 allocSize, pageCount, *pPteArray = NULL; + void *pDescriptor, *pPageArray = NULL; + + pDescriptor = NvP64_VALUE(pApi->data.AllocOsDesc.descriptor); + if (((NvUPtr)pDescriptor & ~os_page_mask) != 0) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + goto done; + } + + // Check to prevent an NvU64 overflow + if ((pApi->data.AllocOsDesc.limit + 1) == 0) + { + rmStatus = NV_ERR_INVALID_LIMIT; + goto done; + } + + allocSize = (pApi->data.AllocOsDesc.limit + 1); + pageCount = (1 + ((allocSize - 1) / os_page_size)); + + writable = FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_WRITE, pApi->data.AllocOsDesc.attr2); + flags = FLD_SET_DRF_NUM(_LOCK_USER_PAGES, _FLAGS, _WRITE, writable, flags); + rmStatus = os_lock_user_pages(pDescriptor, pageCount, &pPageArray, flags); + if (rmStatus == NV_OK) + { + pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray; + pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY; + } + else if (rmStatus == NV_ERR_INVALID_ADDRESS) + { + rmStatus = os_lookup_user_io_memory(pDescriptor, pageCount, + &pPteArray, &pPageArray); + if (rmStatus == NV_OK) + { + if (pPageArray != NULL) + { + pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPageArray; + pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY; + } + else if (pPteArray != NULL) + { + pApi->data.AllocOsDesc.descriptor = (NvP64)(NvUPtr)pPteArray; + pApi->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY; + } + else + { + NV_ASSERT_FAILED("unknown memory import type"); + rmStatus = NV_ERR_NOT_SUPPORTED; + } + } + } + if (rmStatus != NV_OK) + goto done; + + Nv04VidHeapControlWithSecInfo(pApi, secInfo); + + if (pApi->status != NV_OK) + { + switch (pApi->data.AllocOsDesc.descriptorType) + { + default: + break; + case NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY: + os_unlock_user_pages(pageCount, pPageArray); + break; + } + } + +done: + if (rmStatus != NV_OK) + pApi->status = rmStatus; +} + +// only return errors through pApi->status +static void RmAllocOsDescriptor(NVOS02_PARAMETERS *pApi, API_SECURITY_INFO secInfo) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 flags, attr, attr2; + NVOS32_PARAMETERS *pVidHeapParams; + + if (!FLD_TEST_DRF(OS02, _FLAGS, _LOCATION, _PCI, pApi->flags) || + !FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, pApi->flags)) + { + rmStatus = NV_ERR_INVALID_FLAGS; + goto done; + } + + attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + + if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, pApi->flags) || + FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, pApi->flags)) + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, attr); + } + else if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, pApi->flags)) + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, attr); + else { + rmStatus = NV_ERR_INVALID_FLAGS; + goto done; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, pApi->flags)) + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr); + else + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, attr); + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, pApi->flags)) + attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES); + else + attr2 = DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); + + pVidHeapParams = portMemAllocNonPaged(sizeof(NVOS32_PARAMETERS)); + if (pVidHeapParams == NULL) + { + rmStatus = NV_ERR_NO_MEMORY; + goto done; + } + portMemSet(pVidHeapParams, 0, sizeof(NVOS32_PARAMETERS)); + + pVidHeapParams->hRoot = pApi->hRoot; + pVidHeapParams->hObjectParent = pApi->hObjectParent; + pVidHeapParams->function = NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR; + + flags = (NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED | + NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED); + + if (DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags)) + attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2); + + // currently CPU-RO memory implies GPU-RO as well + if (DRF_VAL(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, pApi->flags) || + DRF_VAL(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, pApi->flags)) + attr2 = FLD_SET_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2); + + pVidHeapParams->data.AllocOsDesc.hMemory = pApi->hObjectNew; + pVidHeapParams->data.AllocOsDesc.flags = flags; + pVidHeapParams->data.AllocOsDesc.attr = attr; + pVidHeapParams->data.AllocOsDesc.attr2 = attr2; + pVidHeapParams->data.AllocOsDesc.descriptor = pApi->pMemory; + pVidHeapParams->data.AllocOsDesc.limit = pApi->limit; + pVidHeapParams->data.AllocOsDesc.descriptorType = NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS; + + RmCreateOsDescriptor(pVidHeapParams, secInfo); + + pApi->status = pVidHeapParams->status; + + portMemFree(pVidHeapParams); + +done: + if (rmStatus != NV_OK) + pApi->status = rmStatus; +} + +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hRoot) == NV_OFFSETOF(NVOS64_PARAMETERS, hRoot)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectParent) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectParent)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hObjectNew) == NV_OFFSETOF(NVOS64_PARAMETERS, hObjectNew)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, hClass) == NV_OFFSETOF(NVOS64_PARAMETERS, hClass)); +ct_assert(NV_OFFSETOF(NVOS21_PARAMETERS, pAllocParms) == NV_OFFSETOF(NVOS64_PARAMETERS, pAllocParms)); + +NV_STATUS RmIoctl( + nv_state_t *nv, + nv_file_private_t *nvfp, + NvU32 cmd, + void *data, + NvU32 dataSize +) +{ + NV_STATUS rmStatus = NV_ERR_GENERIC; + API_SECURITY_INFO secInfo = { }; + + secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER; + secInfo.paramLocation = PARAM_LOCATION_USER; + secInfo.pProcessToken = NULL; + secInfo.clientOSInfo = nvfp->ctl_nvfp; + if (secInfo.clientOSInfo == NULL) + secInfo.clientOSInfo = nvfp; + + switch (cmd) + { + case NV_ESC_RM_ALLOC_MEMORY: + { + nv_ioctl_nvos02_parameters_with_fd *pApi; + NVOS02_PARAMETERS *pParms; + + pApi = data; + pParms = &pApi->params; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(nv_ioctl_nvos02_parameters_with_fd)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (pParms->hClass == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) + RmAllocOsDescriptor(pParms, secInfo); + else + { + NvU32 flags = pParms->flags; + + Nv01AllocMemoryWithSecInfo(pParms, secInfo); + + // + // If the system memory is going to be mapped immediately, + // create the mmap context for it now. + // + if ((pParms->hClass == NV01_MEMORY_SYSTEM) && + (!FLD_TEST_DRF(OS02, _FLAGS, _ALLOC, _NONE, flags)) && + (!FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, flags)) && + (pParms->status == NV_OK)) + { + if (rm_create_mmap_context(nv, pParms->hRoot, + pParms->hObjectParent, pParms->hObjectNew, + pParms->pMemory, pParms->limit + 1, 0, + pApi->fd) != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "could not create mmap context for %p\n", + NvP64_VALUE(pParms->pMemory)); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + } + + break; + } + + case NV_ESC_RM_ALLOC_OBJECT: + { + NVOS05_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS05_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv01AllocObjectWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_ALLOC: + { + NVOS21_PARAMETERS *pApi = data; + NVOS64_PARAMETERS *pApiAccess = data; + NvBool bAccessApi = (dataSize == sizeof(NVOS64_PARAMETERS)); + + if ((dataSize != sizeof(NVOS21_PARAMETERS)) && + (dataSize != sizeof(NVOS64_PARAMETERS))) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + switch (pApi->hClass) + { + case NV01_ROOT: + case NV01_ROOT_CLIENT: + case NV01_ROOT_NON_PRIV: + { + NV_CTL_DEVICE_ONLY(nv); + + // Force userspace client allocations to be the _CLIENT class. + pApi->hClass = NV01_ROOT_CLIENT; + break; + } + case NV01_EVENT: + case NV01_EVENT_OS_EVENT: + case NV01_EVENT_KERNEL_CALLBACK: + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + break; + } + default: + { + NV_CTL_DEVICE_ONLY(nv); + break; + } + } + + if (!bAccessApi) + { + Nv04AllocWithSecInfo(pApi, secInfo); + } + else + { + Nv04AllocWithAccessSecInfo(pApiAccess, secInfo); + } + + break; + } + + case NV_ESC_RM_FREE: + { + NVOS00_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS00_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv01FreeWithSecInfo(pApi, secInfo); + + if (pApi->status == NV_OK && + pApi->hObjectOld == pApi->hRoot) + { + rm_client_free_os_events(pApi->hRoot); + } + + break; + } + + case NV_ESC_RM_VID_HEAP_CONTROL: + { + NVOS32_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS32_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (pApi->function == NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR) + RmCreateOsDescriptor(pApi, secInfo); + else + Nv04VidHeapControlWithSecInfo(pApi, secInfo); + + break; + } + + case NV_ESC_RM_I2C_ACCESS: + { + NVOS_I2C_ACCESS_PARAMS *pApi = data; + + NV_ACTUAL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS_I2C_ACCESS_PARAMS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04I2CAccessWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_IDLE_CHANNELS: + { + NVOS30_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS30_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04IdleChannelsWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_MAP_MEMORY: + { + nv_ioctl_nvos33_parameters_with_fd *pApi; + NVOS33_PARAMETERS *pParms; + + pApi = data; + pParms = &pApi->params; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(nv_ioctl_nvos33_parameters_with_fd)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04MapMemoryWithSecInfo(pParms, secInfo); + + if (pParms->status == NV_OK) + { + pParms->status = rm_create_mmap_context(nv, pParms->hClient, + pParms->hDevice, pParms->hMemory, + pParms->pLinearAddress, pParms->length, + pParms->offset, pApi->fd); + if (pParms->status != NV_OK) + { + NVOS34_PARAMETERS params; + portMemSet(¶ms, 0, sizeof(NVOS34_PARAMETERS)); + params.hClient = pParms->hClient; + params.hDevice = pParms->hDevice; + params.hMemory = pParms->hMemory; + params.pLinearAddress = pParms->pLinearAddress; + params.flags = pParms->flags; + Nv04UnmapMemoryWithSecInfo(¶ms, secInfo); + } + } + break; + } + + case NV_ESC_RM_UNMAP_MEMORY: + { + NVOS34_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS34_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04UnmapMemoryWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_ACCESS_REGISTRY: + { + NVOS38_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS38_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pApi->status = rm_access_registry(pApi->hClient, + pApi->hObject, + pApi->AccessType, + pApi->pDevNode, + pApi->DevNodeLength, + pApi->pParmStr, + pApi->ParmStrLength, + pApi->pBinaryData, + &pApi->BinaryDataLength, + &pApi->Data, + &pApi->Entry); + break; + } + + case NV_ESC_RM_ALLOC_CONTEXT_DMA2: + { + NVOS39_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS39_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04AllocContextDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_BIND_CONTEXT_DMA: + { + NVOS49_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS49_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04BindContextDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_MAP_MEMORY_DMA: + { + NVOS46_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS46_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04MapMemoryDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_UNMAP_MEMORY_DMA: + { + NVOS47_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS47_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04UnmapMemoryDmaWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_DUP_OBJECT: + { + NVOS55_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS55_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04DupObjectWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_SHARE: + { + NVOS57_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS57_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04ShareWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_ALLOC_OS_EVENT: + { + nv_ioctl_alloc_os_event_t *pApi = data; + + if (dataSize != sizeof(nv_ioctl_alloc_os_event_t)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pApi->Status = rm_alloc_os_event(pApi->hClient, + nvfp, + pApi->fd); + break; + } + + case NV_ESC_FREE_OS_EVENT: + { + nv_ioctl_free_os_event_t *pApi = data; + + if (dataSize != sizeof(nv_ioctl_free_os_event_t)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pApi->Status = rm_free_os_event(pApi->hClient, pApi->fd); + break; + } + + case NV_ESC_RM_GET_EVENT_DATA: + { + NVOS41_PARAMETERS *pApi = data; + + if (dataSize != sizeof(NVOS41_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pApi->status = rm_get_event_data(nvfp, + pApi->pEvent, + &pApi->MoreEvents); + break; + } + + case NV_ESC_STATUS_CODE: + { + nv_state_t *pNv; + nv_ioctl_status_code_t *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(nv_ioctl_status_code_t)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pNv = nv_get_adapter_state(pApi->domain, pApi->bus, pApi->slot); + if (pNv == NULL) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + rmStatus = rm_get_adapter_status(pNv, &pApi->status); + + if (rmStatus != NV_OK) + goto done; + + break; + } + + case NV_ESC_RM_CONTROL: + { + NVOS54_PARAMETERS *pApi = data; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS54_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + Nv04ControlWithSecInfo(pApi, secInfo); + break; + } + + case NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO: + { + NVOS56_PARAMETERS *pApi = data; + void *pOldCpuAddress; + void *pNewCpuAddress; + + NV_CTL_DEVICE_ONLY(nv); + + if (dataSize != sizeof(NVOS56_PARAMETERS)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + pOldCpuAddress = NvP64_VALUE(pApi->pOldCpuAddress); + pNewCpuAddress = NvP64_VALUE(pApi->pNewCpuAddress); + + pApi->status = rm_update_device_mapping_info(pApi->hClient, + pApi->hDevice, + pApi->hMemory, + pOldCpuAddress, + pNewCpuAddress); + break; + } + + case NV_ESC_REGISTER_FD: + { + nv_ioctl_register_fd_t *params = data; + void *priv = NULL; + nv_file_private_t *ctl_nvfp; + + if (dataSize != sizeof(nv_ioctl_register_fd_t)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // LOCK: acquire API lock + rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus != NV_OK) + goto done; + + // If there is already a ctl fd registered on this nvfp, fail. + if (nvfp->ctl_nvfp != NULL) + { + // UNLOCK: release API lock + rmApiLockRelease(); + rmStatus = NV_ERR_INVALID_STATE; + goto done; + } + + // + // Note that this call is valid for both "actual" devices and ctrl + // devices. In particular, NV_ESC_ALLOC_OS_EVENT can be used with + // both types of devices. + // But, the ctl_fd passed in should always correspond to a control FD. + // + ctl_nvfp = nv_get_file_private(params->ctl_fd, + NV_TRUE, /* require ctl fd */ + &priv); + if (ctl_nvfp == NULL) + { + // UNLOCK: release API lock + rmApiLockRelease(); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // Disallow self-referential links, and disallow links to FDs that + // themselves have a link. + if ((ctl_nvfp == nvfp) || (ctl_nvfp->ctl_nvfp != NULL)) + { + nv_put_file_private(priv); + // UNLOCK: release API lock + rmApiLockRelease(); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // + // nvfp->ctl_nvfp is read outside the lock, so set it atomically. + // Note that once set, this can never be removed until the fd + // associated with nvfp is closed. We hold on to 'priv' until the + // fd is closed, too, to ensure that the fd associated with + // ctl_nvfp remains valid. + // + portAtomicSetSize(&nvfp->ctl_nvfp, ctl_nvfp); + nvfp->ctl_nvfp_priv = priv; + + // UNLOCK: release API lock + rmApiLockRelease(); + + // NOTE: nv_put_file_private(priv) is not called here. It MUST be + // called during cleanup of this nvfp. + rmStatus = NV_OK; + break; + } + + default: + { + NV_PRINTF(LEVEL_ERROR, "unknown NVRM ioctl command: 0x%x\n", cmd); + goto done; + } + } + + rmStatus = NV_OK; +done: + + return rmStatus; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c b/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c new file mode 100644 index 000000000..ca3874f02 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/exports-stubs.c @@ -0,0 +1,256 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +NV_STATUS NV_API_CALL rm_schedule_gpu_wakeup( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + return NV_OK; +} + +void NV_API_CALL rm_init_dynamic_power_management( + nvidia_stack_t *sp, + nv_state_t *nv, + NvBool bPr3AcpiMethodPresent +) +{ +} + +void NV_API_CALL rm_cleanup_dynamic_power_management( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ +} + +NV_STATUS NV_API_CALL rm_ref_dynamic_power( + nvidia_stack_t *sp, + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ + return NV_OK; +} + +void NV_API_CALL rm_unref_dynamic_power( + nvidia_stack_t *sp, + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ +} + +NV_STATUS NV_API_CALL rm_transition_dynamic_power( + nvidia_stack_t *sp, + nv_state_t *nv, + NvBool bEnter +) +{ + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_power_management( + nvidia_stack_t *sp, + nv_state_t *pNv, + nv_pm_action_t pmAction +) +{ + return NV_OK; +} + +const char* NV_API_CALL rm_get_vidmem_power_status( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return "?"; +} + +const char* NV_API_CALL rm_get_dynamic_power_management_status( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return "?"; +} + +const char* NV_API_CALL rm_get_gpu_gcx_support( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvBool bGcxTypeGC6 +) +{ + return "?"; +} + +NV_STATUS +subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams +) +{ + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams +) +{ + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams +) +{ + return NV_OK; +} + +void +RmUpdateGc6ConsoleRefCount +( + nv_state_t *nv, + NvBool bIncrease +) +{ +} + +void +RmInitS0ixPowerManagement +( + nv_state_t *nv +) +{ +} + +void +RmInitDeferredDynamicPowerManagement +( + nv_state_t *nv +) +{ +} + +void +RmDestroyDeferredDynamicPowerManagement +( + nv_state_t *nv +) +{ +} + +void RmHandleDisplayChange +( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ +} + +NV_STATUS +os_ref_dynamic_power +( + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ + return NV_OK; +} + +void +os_unref_dynamic_power +( + nv_state_t *nv, + nv_dynamic_power_mode_t mode +) +{ +} + +NV_STATUS NV_API_CALL rm_get_clientnvpcf_power_limits( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *limitRated, + NvU32 *limitCurr +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +deviceCtrlCmdOsUnixVTSwitch_IMPL +( + Device *pDevice, + NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams +) +{ + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_save_low_res_mode( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *faultsCopied +) +{ + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_copy_mmu_faults_unlocked( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *faultsCopied +) +{ + return NV_OK; +} + +NV_STATUS RmInitX86EmuState(OBJGPU *pGpu) +{ + return NV_OK; +} + +void RmFreeX86EmuState(OBJGPU *pGpu) +{ +} diff --git a/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c b/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c new file mode 100644 index 000000000..3c1037c97 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/gcc_helper.c @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +void* memset(void* s, int c, NvUPtr n) +{ + return os_mem_set(s, (NvU8)c, (NvU32)n); +} + +void* memcpy(void* dest, const void* src, NvUPtr n) +{ + return os_mem_copy(dest, src, (NvU32)n); +} diff --git a/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c b/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c new file mode 100644 index 000000000..25398d464 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/os-hypervisor-stubs.c @@ -0,0 +1,150 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" +#include "os/os.h" +#include "nv.h" +#include "nv-hypervisor.h" + +HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void) +{ + return OS_HYPERVISOR_UNKNOWN; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_type_ids( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvU32 *numVgpuTypes, + NvU32 **vgpuTypeIds, + NvBool isVirtfn +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_process_vf_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvU8 cmd, + NvU32 domain, + NvU8 bus, + NvU8 slot, + NvU8 function, + NvBool isMdevAttached, + void *vf_pci_info +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_type_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvU32 vgpuTypeId, + char *buffer, + int type_info, + NvU8 devfn +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_create_request( + nvidia_stack_t *sp, + nv_state_t *pNv, + const NvU8 *pMdevUuid, + NvU32 vgpuTypeId, + NvU16 *vgpuId, + NvU32 gpuPciBdf +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_update_request( + nvidia_stack_t *sp , + const NvU8 *pMdevUuid, + VGPU_DEVICE_STATE deviceState, + NvU64 *offsets, + NvU64 *sizes, + const char *configParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_sparse_mmap( + nvidia_stack_t *sp , + nv_state_t *pNv, + const NvU8 *pMdevUuid, + NvU64 **offsets, + NvU64 **sizes, + NvU32 *numAreas +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_gpu_bind_event( + nvidia_stack_t *sp +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_start( + nvidia_stack_t *sp, + const NvU8 *pMdevUuid, + void *waitQueue, + NvS32 *returnStatus, + NvU8 *vmName, + NvU32 qemuPid +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_delete( + nvidia_stack_t *sp, + const NvU8 *pMdevUuid, + NvU16 vgpuId +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS NV_API_CALL nv_vgpu_get_bar_info( + nvidia_stack_t *sp, + nv_state_t *pNv, + const NvU8 *pMdevUuid, + NvU64 *size, + NvU32 regionIndex, + void *pVgpuVfioRef +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void initVGXSpecificRegistry(OBJGPU *pGpu) +{} + diff --git a/src/nvidia/arch/nvalloc/unix/src/os.c b/src/nvidia/arch/nvalloc/unix/src/os.c new file mode 100644 index 000000000..04a4fb439 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/os.c @@ -0,0 +1,4983 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include // NV device driver interface +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "gpu/gpu.h" +#include +#include + +#include "nverror.h" +#include "kernel/gpu/bif/kernel_bif.h" + +#include "gpu/mem_sys/kern_mem_sys.h" + +#include "mem_mgr/io_vaspace.h" +#include +#include "gpu/mem_mgr/mem_desc.h" +#include "core/thread_state.h" +#include +#include +#include +#include +#include "virtualization/hypervisor/hypervisor.h" +#include "rmobjexportimport.h" +#include +#include "rmapi/rs_utils.h" +#include "rmapi/client_resource.h" +#include "os/dce_rm_client_ipc.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + + + +extern const char *ppOsBugCheckBugcodeStr[]; + + +ct_assert(NV_RM_PAGE_SIZE == RM_PAGE_SIZE); +ct_assert(NV_RM_PAGE_MASK == RM_PAGE_MASK); +ct_assert(NV_RM_PAGE_SHIFT == RM_PAGE_SHIFT); + +typedef struct +{ + NvU32 euid; + NvU32 pid; +} TOKEN_USER, *PTOKEN_USER; + +struct OS_RM_CAPS +{ + NvU32 count; + + // This should be the last element + nv_cap_t **caps; +}; + +NvBool osIsRaisedIRQL() +{ + return (!os_semaphore_may_sleep()); +} + +NvBool osIsISR() +{ + return os_is_isr(); +} + +NV_STATUS osGetDriverBlock +( + OS_GPU_INFO *pOsGpuInfo, + OS_DRIVER_BLOCK *pBlock +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osGetCurrentTick(NvU64 *pTimeInNs) +{ + *pTimeInNs = os_get_current_tick(); + return NV_OK; +} + +NvU64 osGetTickResolution(void) +{ + return os_get_tick_resolution(); +} + +NV_STATUS osGetPerformanceCounter(NvU64 *pTimeInNs) +{ + *pTimeInNs = os_get_current_tick_hr(); + return NV_OK; +} + +NV_STATUS osGetCurrentTime( + NvU32 *pSeconds, + NvU32 *pMicroSeconds +) +{ + return os_get_current_time(pSeconds, pMicroSeconds); +} + +/*! + * @brief Get timestamp for logging. + * + * Everything that logs a time stamp should use this routine for consistency. + * + * The returned value is OS dependent. We want the time stamp to use + * KeQueryPerformanceCounter on Windows so it matches the DirectX timestamps. + * Linux uses microseconds since 1970 (osGetCurrentTime), since matching DirectX + * is not a priority. + * + * osGetTimestampFreq returns the frequency required to decode the time stamps. + * + * @returns system dependent timestamp. + */ +NvU64 osGetTimestamp(void) +{ + NvU32 sec = 0; + NvU32 usec = 0; + osGetCurrentTime(&sec, &usec); + return (NvU64)sec * 1000000 + usec; +} + +/*! + * @brief Get timestamp frequency. + * + * Timestamps are OS dependent. This call returns the frequency + * required to decode them. + * + * @returns Timestamp frequency. For example, 1000000 for MHz. + */ +NvU64 osGetTimestampFreq(void) +{ + return 1000000; +} + +NV_STATUS osDelay(NvU32 milliseconds) +{ + return os_delay(milliseconds); +} + +NV_STATUS osDelayUs(NvU32 microseconds) +{ + return os_delay_us(microseconds); +} + +NV_STATUS osDelayNs(NvU32 nanoseconds) +{ + NvU32 microseconds = NV_MAX(1, (nanoseconds / 1000)); + return os_delay_us(microseconds); +} + +NvU32 osGetCpuFrequency(void) +{ + /* convert os_get_cpu_frequency()'s return value from Hz to MHz */ + return ((NvU32)(os_get_cpu_frequency() / 1000000ULL)); +} + +void* osPciInitHandle( + NvU32 Domain, + NvU8 Bus, + NvU8 Slot, + NvU8 Function, + NvU16 *pVendor, + NvU16 *pDevice +) +{ + // + // Check if the BDF is for a GPU that's already been attached, for which + // we should already have a handle cached. This won't catch devices that + // have been probed but not yet attached, but that shouldn't be a common + // occurrence. + // + // More importantly, having this check here means we don't need to check + // a global list of devices in the kernel interface layer, which could + // have the implication of taking another lock, causing hairy lock + // ordering issues. + // + if (Function == 0) + { + OBJGPU *pGpu = gpumgrGetGpuFromBusInfo(Domain, Bus, Slot); + if (pGpu != NULL) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + if (pVendor) *pVendor = nv->pci_info.vendor_id; + if (pDevice) *pDevice = nv->pci_info.device_id; + return nv->handle; + } + } + + return os_pci_init_handle(Domain, Bus, Slot, Function, pVendor, pDevice); +} + +NvU8 osPciReadByte( + void *pHandle, + NvU32 Offset +) +{ + NvU8 val; + os_pci_read_byte(pHandle, Offset, &val); + return val; +} + +NvU16 osPciReadWord( + void *pHandle, + NvU32 Offset +) +{ + NvU16 val; + os_pci_read_word(pHandle, Offset, &val); + return val; +} + +NvU32 osPciReadDword( + void *pHandle, + NvU32 Offset +) +{ + NvU32 val; + os_pci_read_dword(pHandle, Offset, &val); + return val; +} + +void osPciWriteByte( + void *pHandle, + NvU32 Offset, + NvU8 Value +) +{ + os_pci_write_byte(pHandle, Offset, Value); +} + +void osPciWriteWord( + void *pHandle, + NvU32 Offset, + NvU16 Value +) +{ + os_pci_write_word(pHandle, Offset, Value); +} + +void osPciWriteDword( + void *pHandle, + NvU32 Offset, + NvU32 Value +) +{ + os_pci_write_dword(pHandle, Offset, Value); +} + +void* osMapKernelSpace( + RmPhysAddr Start, + NvU64 Size, + NvU32 Mode, + NvU32 Protect +) +{ + NvU64 offset; + NvU8 *ptr; + + if (0 == Size) + { + NV_ASSERT(Size != 0); + return NULL; + } + + offset = (Start & ~os_page_mask); + Start &= os_page_mask; + Size = ((Size + offset + ~os_page_mask) & os_page_mask); + + ptr = os_map_kernel_space(Start, Size, Mode); + if (ptr != NULL) + return (ptr + offset); + + return NULL; +} + +void osUnmapKernelSpace( + void *pAddress, + NvU64 Size +) +{ + NvU64 offset; + NvUPtr ptr = (NvUPtr)pAddress; + + if (0 == Size) + { + NV_ASSERT(Size != 0); + return; + } + + offset = (ptr & ~os_page_mask); + ptr &= os_page_mask; + Size = ((Size + offset + ~os_page_mask) & os_page_mask); + os_unmap_kernel_space((void *)ptr, Size); +} + +void* osMapIOSpace( + RmPhysAddr Start, + NvU64 Size, + void ** pData, + NvU32 User, + NvU32 Mode, + NvU32 Protect +) +{ + + NvU64 offset; + NvU8 *addr; + + if (0 == Size) + { + NV_ASSERT(Size != 0); + return NULL; + } + + offset = (Start & ~os_page_mask); + Start &= os_page_mask; + Size = ((Size + offset + ~os_page_mask) & os_page_mask); + + if (User) + addr = os_map_user_space(Start, Size, Mode, Protect, pData); + else + addr = os_map_kernel_space(Start, Size, Mode); + if (addr != NULL) + return (addr + offset); + + return addr; +} + +void osUnmapIOSpace( + void *pAddress, + NvU64 Size, + void *pData, + NvU32 User +) +{ + NvU64 offset; + NvUPtr addr = (NvUPtr)pAddress; + + if (0 == Size) + { + NV_ASSERT(Size != 0); + return; + } + + offset = (addr & ~os_page_mask); + addr &= os_page_mask; + Size = ((Size + offset + ~os_page_mask) & os_page_mask); + + if (User) + os_unmap_user_space((void *)addr, Size, pData); + else + os_unmap_kernel_space((void *)addr, Size); +} + +static NV_STATUS setNumaPrivData +( + KernelMemorySystem *pKernelMemorySystem, + nv_state_t *nv, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_STATUS rmStatus = NV_OK; + void *pAllocPrivate = NULL; + NvU64 *addrArray = NULL; + NvU64 numOsPages = pMemDesc->PageCount; + + addrArray = pMemDesc->_pteArray; + + if (NV_RM_PAGE_SIZE < os_page_size) + { + NvU64 numPages; + NvU64 i; + + numPages = pMemDesc->PageCount; + addrArray = portMemAllocNonPaged(numPages * sizeof(NvU64)); + if (addrArray == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemCopy((void*)addrArray, + (numPages * sizeof(NvU64)), (void*)pMemDesc->_pteArray, + (numPages * sizeof(NvU64))); + RmDeflateRmToOsPageArray(addrArray, numPages); + numOsPages = NV_RM_PAGES_TO_OS_PAGES(numPages); + + for (i = 0; i < numOsPages; i++) + { + // Update GPA to system physical address + addrArray[i] += pKernelMemorySystem->coherentCpuFbBase; + } + } + + rmStatus = nv_register_phys_pages(nv, addrArray, numOsPages, NV_MEMORY_CACHED, &pAllocPrivate); + if (rmStatus != NV_OK) + { + goto errors; + } + + memdescSetMemData(pMemDesc, pAllocPrivate, NULL); + +errors: + if (NV_RM_PAGE_SIZE < os_page_size) + { + portMemFree(addrArray); + } + + return rmStatus; +} + +NV_STATUS osGetNumMemoryPages +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 *pNumPages +) +{ + void *pAllocPrivate = NULL; + + pAllocPrivate = memdescGetMemData(pMemDesc); + if (pAllocPrivate == NULL) + { + NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + return nv_get_num_phys_pages(pAllocPrivate, pNumPages); +} + +NV_STATUS osGetMemoryPages +( + MEMORY_DESCRIPTOR *pMemDesc, + void *pPages, + NvU32 *pNumPages +) +{ + void *pAllocPrivate = NULL; + + pAllocPrivate = memdescGetMemData(pMemDesc); + if (pAllocPrivate == NULL) + { + NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + return nv_get_phys_pages(pAllocPrivate, pPages, pNumPages); +} + +NV_STATUS osMapSystemMemory +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 Offset, + NvU64 Length, + NvBool Kernel, + NvU32 Protect, + NvP64 *ppAddress, + NvP64 *ppPrivate +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + + RmPhysAddr userAddress; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + NV_STATUS rmStatus = NV_OK; + void *pAllocPrivate = NULL; + void *pAddress; + void *pPrivate = NULL; + NvU64 pageIndex; + NvU32 pageOffset; + + if (pGpu != NULL && + pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) && + memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + + rmStatus = setNumaPrivData(pKernelMemorySystem, nv, pMemDesc); + if (rmStatus != NV_OK) + return rmStatus; + } + + *ppAddress = NvP64_NULL; + *ppPrivate = NvP64_NULL; + + if ((Offset + Length) < Length) + return NV_ERR_INVALID_ARGUMENT; + if ((Offset + Length) > pMemDesc->Size) + return NV_ERR_INVALID_ARGUMENT; + + pageIndex = (Offset >> os_page_shift); + pageOffset = (Offset & ~os_page_mask); + + pAllocPrivate = memdescGetMemData(pMemDesc); + if (!pAllocPrivate) + { + NV_PRINTF(LEVEL_ERROR, "pAllocPrivate is NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + if (Kernel) + { + pAddress = nv_alloc_kernel_mapping(nv, pAllocPrivate, + pageIndex, pageOffset, Length, &pPrivate); + if (pAddress == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to create system memory kernel mapping!\n"); + rmStatus = NV_ERR_GENERIC; + } + else + { + *ppAddress = NV_PTR_TO_NvP64(pAddress); + *ppPrivate = NV_PTR_TO_NvP64(pPrivate); + } + } + else + { + rmStatus = nv_alloc_user_mapping(nv, pAllocPrivate, + pageIndex, pageOffset, Length, Protect, &userAddress, + &pPrivate); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to create system memory user mapping!\n"); + } + else + { + *ppAddress = (NvP64)(userAddress); + *ppPrivate = NV_PTR_TO_NvP64(pPrivate); + } + } + + return rmStatus; +} + +void osUnmapSystemMemory +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool Kernel, + NvU32 ProcessId, + NvP64 pAddress, + NvP64 pPrivate +) +{ + NV_STATUS status; + void *pAllocPrivate = memdescGetMemData(pMemDesc); + OBJGPU *pGpu = pMemDesc->pGpu; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if (Kernel) + { + status = nv_free_kernel_mapping(nv, pAllocPrivate, NvP64_VALUE(pAddress), + NvP64_VALUE(pPrivate)); + } + else + { + status = nv_free_user_mapping(nv, pAllocPrivate, (NvU64)pAddress, + NvP64_VALUE(pPrivate)); + } + + if (pGpu != NULL && + pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) && + memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM && + pAllocPrivate != NULL) + { + nv_unregister_phys_pages(nv, pAllocPrivate); + memdescSetMemData(pMemDesc, NULL, NULL); + } + + NV_ASSERT(status == NV_OK); +} + +void osIoWriteByte( + NvU32 Address, + NvU8 Value +) +{ + os_io_write_byte(Address, Value); +} + +NvU16 osIoReadWord( + NvU32 Address +) +{ + return os_io_read_word(Address); +} + +void osIoWriteWord( + NvU32 Address, + NvU16 Value +) +{ + os_io_write_word(Address, Value); +} + +NvU8 osIoReadByte( + NvU32 Address +) +{ + return os_io_read_byte(Address); +} + +NvBool osIsAdministrator(void) +{ + return os_is_administrator(); +} + +NvBool osAllowPriorityOverride(void) +{ + return os_allow_priority_override(); +} + +NvU32 osGetCurrentProcess(void) +{ + return os_get_current_process(); +} + +void osGetCurrentProcessName(char *ProcName, NvU32 Length) +{ + return os_get_current_process_name(ProcName, Length); +} + +NV_STATUS osGetCurrentThread(OS_THREAD_HANDLE *pThreadId) +{ + NV_STATUS rmStatus; + NvU64 threadId = 0; + + if (pThreadId == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = os_get_current_thread(&threadId); + if (rmStatus == NV_OK) + { + *pThreadId = threadId; + } + else + { + *pThreadId = 0; + } + + return rmStatus; +} + +NV_STATUS osAttachToProcess(void** ppProcessInfo, NvU32 ProcessId) +{ + // + // This function is used by RmUnmapMemory() to attach to the + // process for which a given device memory mapping was + // created, in order to be able to unmap it. On Linux/UNIX + // platforms, we can't "attach" to a random process, but + // since we don't create/destroy user mappings in the RM, we + // don't need to, either. + // + // Report success to the caller to keep RmUnmapMemory() from + // failing, and memory from being leaked as a result. + // + *ppProcessInfo = NULL; + return NV_OK; +} + +void osDetachFromProcess(void* pProcessInfo) +{ + // stub + return; +} + +NvBool osDbgBreakpointEnabled(void) +{ + return NV_TRUE; +} + +NV_STATUS osAcquireRmSema(void *pSema) +{ + return NV_OK; +} + +NV_STATUS osCondAcquireRmSema(void *pSema) +{ + return NV_OK; +} + +NvU32 osReleaseRmSema(void *pSema, OBJGPU *pDpcGpu) +{ + return NV_SEMA_RELEASE_SUCCEED; +} + +void osSpinLoop(void) +{ +} + +NV_STATUS osSchedule(void) +{ + return os_schedule(); +} + +NV_STATUS osQueueWorkItemWithFlags( + OBJGPU *pGpu, + OSWorkItemFunction pFunction, + void *pParams, + NvU32 flags +) +{ + nv_work_item_t *pWi; + nv_state_t *nv; + NV_STATUS status; + + pWi = portMemAllocNonPaged(sizeof(nv_work_item_t)); + + if (NULL == pWi) + { + return NV_ERR_NO_MEMORY; + } + + pWi->flags = NV_WORK_ITEM_FLAGS_REQUIRES_GPU; + if (flags & OS_QUEUE_WORKITEM_FLAGS_DONT_FREE_PARAMS) + pWi->flags |= NV_WORK_ITEM_FLAGS_DONT_FREE_DATA; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW; + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY) + pWi->flags |= OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY; + + pWi->gpuInstance = gpuGetInstance(pGpu); + pWi->func.pGpuFunction = pFunction; + pWi->pData = pParams; + nv = NV_GET_NV_STATE(pGpu); + + status = os_queue_work_item(nv ? nv->queue : NULL, pWi); + + if (NV_OK != status) + { + portMemFree((void *)pWi); + } + + return status; +} + +NV_STATUS osQueueWorkItem( + OBJGPU *pGpu, + OSWorkItemFunction pFunction, + void *pParams +) +{ + return osQueueWorkItemWithFlags(pGpu, pFunction, pParams, OS_QUEUE_WORKITEM_FLAGS_NONE); +} + +NV_STATUS osQueueSystemWorkItem( + OSSystemWorkItemFunction pFunction, + void *pParams +) +{ + nv_work_item_t *pWi; + NV_STATUS status; + + pWi = portMemAllocNonPaged(sizeof(nv_work_item_t)); + + if (NULL == pWi) + { + return NV_ERR_NO_MEMORY; + } + + pWi->flags = NV_WORK_ITEM_FLAGS_NONE; + pWi->func.pSystemFunction = pFunction; + pWi->pData = pParams; + + status = os_queue_work_item(NULL, pWi); + + if (NV_OK != status) + { + portMemFree((void *)pWi); + } + + return status; +} + +void osQueueMMUFaultHandler(OBJGPU *pGpu) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + nv_schedule_uvm_isr(nv); +} + +static inline nv_dma_device_t* osGetDmaDeviceForMemDesc( + OS_GPU_INFO *pOsGpuInfo, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return (pOsGpuInfo->niso_dma_dev != NULL) && + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO) ? + pOsGpuInfo->niso_dma_dev : pOsGpuInfo->dma_dev; +} + +NV_STATUS osDmaMapPages( + OS_GPU_INFO *pOsGpuInfo, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return nv_dma_map_pages( + osGetDmaDeviceForMemDesc(pOsGpuInfo, pMemDesc), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetPteArray(pMemDesc, AT_CPU), + memdescGetContiguity(pMemDesc, AT_CPU), + memdescGetCpuCacheAttrib(pMemDesc), + NULL); +} + +NV_STATUS osDmaUnmapPages( + OS_GPU_INFO *pOsGpuInfo, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return nv_dma_unmap_pages( + pOsGpuInfo->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetPteArray(pMemDesc, AT_CPU), + NULL); +} + +// +// Set the DMA address size for the given GPU +// +// This is a global device setting and care would need to be taken if it was to +// be modified outside of GPU initialization. At least on Linux other drivers, +// like UVM, might be requesting its own DMA mappings for the same GPU after +// the GPU has been initialized. +// +void osDmaSetAddressSize( + OS_GPU_INFO *pOsGpuInfo, + NvU32 bits +) +{ + nv_set_dma_address_size(pOsGpuInfo, bits); +} + +NV_STATUS osAllocPagesInternal( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pGpu = pMemDesc->pGpu; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + void *pMemData; + NV_STATUS status; + + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + + NV_ASSERT_OR_RETURN(pMemDesc->PageCount > 0, NV_ERR_INVALID_ARGUMENT); + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + status = nv_alias_pages( + NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetContiguity(pMemDesc, AT_CPU), + memdescGetCpuCacheAttrib(pMemDesc), + memdescGetGuestId(pMemDesc), + memdescGetPteArray(pMemDesc, AT_CPU), + &pMemData); + } + else + { + NvBool unencrypted = 0; + + if (nv && (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE))) + nv->force_dma32_alloc = NV_TRUE; + + status = nv_alloc_pages( + NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetContiguity(pMemDesc, AT_CPU), + memdescGetCpuCacheAttrib(pMemDesc), + pSys->getProperty(pSys, + PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS), + unencrypted, + memdescGetPteArray(pMemDesc, AT_CPU), + &pMemData); + + if (nv && nv->force_dma32_alloc) + nv->force_dma32_alloc = NV_FALSE; + } + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + memdescSetMemData(pMemDesc, pMemData, NULL); + + return status; +} + +void osFreePagesInternal( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NV_STATUS rmStatus; + + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + rmStatus = nv_free_pages(NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetContiguity(pMemDesc, AT_CPU), + memdescGetCpuCacheAttrib(pMemDesc), + memdescGetMemData(pMemDesc)); + NV_ASSERT(rmStatus == NV_OK); +} + +NV_STATUS osLockMem( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Not supported on this OS. + DBG_BREAKPOINT(); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osUnlockMem( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Not supported on this OS. + DBG_BREAKPOINT(); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osMapPciMemoryUser( + OS_GPU_INFO *pOsGpuInfo, + RmPhysAddr busAddress, + NvU64 length, + NvU32 Protect, + NvP64 *pVirtualAddress, + NvP64 *pPriv, + NvU32 modeFlag +) +{ + void *addr; + void *priv = NULL; + + addr = osMapIOSpace(busAddress, length, &priv, NV_TRUE, modeFlag, Protect); + + *pPriv = NV_PTR_TO_NvP64(priv); + *pVirtualAddress = NV_PTR_TO_NvP64(addr); + + return (addr != NULL) ? NV_OK : NV_ERR_GENERIC; +} + +void osUnmapPciMemoryUser( + OS_GPU_INFO *pOsGpuInfo, + NvP64 virtualAddress, + NvU64 length, + NvP64 pPriv +) +{ + void *addr, *priv; + + addr = NvP64_VALUE(virtualAddress); + priv = NvP64_VALUE(pPriv); + + osUnmapIOSpace(addr, length, priv, NV_TRUE); +} + +NV_STATUS osMapPciMemoryKernelOld +( + OBJGPU *pGpu, + RmPhysAddr busAddress, + NvU64 length, + NvU32 Protect, + void **pVirtualAddress, + NvU32 modeFlag +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_kern_mapping_t *mapping; + + if (pVirtualAddress == NULL) + return NV_ERR_GENERIC; + + *pVirtualAddress = os_map_kernel_space(busAddress, length, modeFlag); + if (*pVirtualAddress == NULL) + return NV_ERR_GENERIC; + + mapping = portMemAllocNonPaged(sizeof(nv_kern_mapping_t)); + if (NULL == mapping) + { + os_unmap_kernel_space(*pVirtualAddress, length); + *pVirtualAddress = 0; + return NV_ERR_GENERIC; + } + + mapping->addr = *pVirtualAddress; + mapping->size = length; + mapping->modeFlag = modeFlag; + + mapping->next = nv->kern_mappings; + nv->kern_mappings = mapping; + + return NV_OK; +} + +NV_STATUS osMapPciMemoryKernel64 +( + OBJGPU *pGpu, + RmPhysAddr busAddress, + NvU64 length, + NvU32 Protect, + NvP64 *pVirtualAddress, + NvU32 modeFlag +) +{ + void *tmppVirtualAddress = NvP64_VALUE(pVirtualAddress); + NV_STATUS rc; + + rc = osMapPciMemoryKernelOld(pGpu, + busAddress, + length, + Protect, + &tmppVirtualAddress, + modeFlag); + + *pVirtualAddress = NV_PTR_TO_NvP64(tmppVirtualAddress); + + return rc; +} + +void osUnmapPciMemoryKernelOld +( + OBJGPU *pGpu, + void* virtualAddress +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_kern_mapping_t *mapping, *tmp; + + // this can happen, for example, during a call to RmShutdownAdapter() + // from a failed RmInitAdapter() + if (virtualAddress == NULL) + { + return; + } + + tmp = mapping = nv->kern_mappings; + while (mapping) + { + if (mapping->addr == virtualAddress) + { + if (mapping == nv->kern_mappings) + { + nv->kern_mappings = mapping->next; + } + else + { + tmp->next = mapping->next; + } + + os_unmap_kernel_space(mapping->addr, mapping->size); + + portMemFree(mapping); + return; + } + tmp = mapping; + mapping = mapping->next; + } + + DBG_BREAKPOINT(); +} + +void osUnmapPciMemoryKernel64 +( + OBJGPU *pGpu, + NvP64 virtualAddress +) +{ + osUnmapPciMemoryKernelOld(pGpu, NvP64_VALUE(virtualAddress)); +} + +NV_STATUS osMapGPU( + OBJGPU *pGpu, + RS_PRIV_LEVEL privLevel, + NvU64 offset, + NvU64 length, + NvU32 Protect, + NvP64 *pAddress, + NvP64 *pPriv +) +{ + NV_STATUS rmStatus = NV_OK; + + if (privLevel >= RS_PRIV_LEVEL_KERNEL) + { + if (!portSafeAddU64((NvUPtr)pGpu->deviceMappings[0].gpuNvAddr, offset, (NvU64*)pAddress)) + { + rmStatus = NV_ERR_INVALID_LIMIT; + } + } + else + { + RmPhysAddr busAddress; + if (!portSafeAddU64(pGpu->busInfo.gpuPhysAddr, offset, &busAddress)) + { + rmStatus = NV_ERR_INVALID_LIMIT; + } + else + { + rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo, + busAddress, + length, + Protect, + pAddress, + pPriv, + NV_FALSE); + } + } + + return rmStatus; +} + +void osUnmapGPU( + OS_GPU_INFO *pOsGpuInfo, + RS_PRIV_LEVEL privLevel, + NvP64 address, + NvU64 length, + NvP64 priv +) +{ + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + osUnmapPciMemoryUser(pOsGpuInfo, address, length, priv); + } +} + +NV_STATUS osDeviceClassToDeviceName( + NvU32 deviceInstance, + NvU8 *szName +) +{ + return NV_ERR_GENERIC; +} + +static void postEvent( + nv_event_t *event, + NvU32 hEvent, + NvU32 notifyIndex, + NvU32 info32, + NvU16 info16, + NvBool dataValid +) +{ + nv_state_t *nv = nv_get_ctl_state(); + portSyncSpinlockAcquire(nv->event_spinlock); + if (event->active) + nv_post_event(event, hEvent, notifyIndex, + info32, info16, dataValid); + portSyncSpinlockRelease(nv->event_spinlock); +} + +NvU32 osSetEvent +( + OBJGPU *pGpu, + NvP64 eventID +) +{ + nv_event_t *event = NvP64_VALUE(eventID); + postEvent(event, 0, 0, 0, 0, NV_FALSE); + return 1; +} + +NV_STATUS osNotifyEvent( + OBJGPU *pGpu, + PEVENTNOTIFICATION NotifyEvent, + NvU32 Method, + NvU32 Data, + NV_STATUS Status +) +{ + NV_STATUS rmStatus = NV_OK; + + // notify the event + switch (NotifyEvent->NotifyType) + { + case NV01_EVENT_OS_EVENT: + { + nv_event_t *event = NvP64_VALUE(NotifyEvent->Data); + postEvent(event, + NotifyEvent->hEvent, + NotifyEvent->NotifyIndex, + 0, 0, + NotifyEvent->bEventDataRequired); + break; + } + + // NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. please use NV01_EVENT_KERNEL_CALLBACK_EX. + case NV01_EVENT_KERNEL_CALLBACK: + { + MINIPORT_CALLBACK callBackToMiniport = + (MINIPORT_CALLBACK)NvP64_VALUE(NotifyEvent->Data); + + // perform a direct callback to the miniport + if (callBackToMiniport) + callBackToMiniport(NV_GET_NV_STATE(pGpu)); + break; + } + + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(NotifyEvent->Data); + + // passes two arguments (arg, params) to the kernel callback instead of one (arg). + if (kc && kc->func) + { + kc->func(kc->arg, NULL, NotifyEvent->hEvent, Data, Status); + } + break; + } + + + default: + { + rmStatus = NV_ERR_GENERIC; + break; + } + } + + return rmStatus; + +} // end of osNotifyEvent() + +// Allow CPL Events to be callback or events +NV_STATUS osEventNotification +( + OBJGPU *pGpu, + PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, + void * pEventData, + NvU32 eventDataSize +) +{ + return osEventNotificationWithInfo(pGpu, pNotifyEvent, notifyIndex, 0, 0, + pEventData, eventDataSize); +} + +NV_STATUS osEventNotificationWithInfo +( + OBJGPU *pGpu, + PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, + NvU32 info32, + NvU16 info16, + void * pEventData, + NvU32 eventDataSize +) +{ + NV_STATUS rmStatus = NV_OK; + + // walk this object's event list and find any matches for this specific notify + for (; pNotifyEvent; pNotifyEvent = pNotifyEvent->Next) + { + // notifyIndex must match if request isn't for all + if ((notifyIndex != OS_EVENT_NOTIFICATION_INDEX_ALL) && + (pNotifyEvent->NotifyIndex != notifyIndex)) + { + continue; + } + + switch (pNotifyEvent->NotifyType) + { + case NV_EVENT_BUFFER_BIND: + case NV01_EVENT_WIN32_EVENT: + { + nv_event_t *event = NvP64_VALUE(pNotifyEvent->Data); + postEvent(event, + pNotifyEvent->hEvent, + pNotifyEvent->NotifyIndex, + info32, info16, + pNotifyEvent->bEventDataRequired); + break; + } + + // NOTE: NV01_EVENT_KERNEL_CALLBACK is deprecated. please use NV01_EVENT_KERNEL_CALLBACK_EX. + case NV01_EVENT_KERNEL_CALLBACK: + { + MINIPORT_CALLBACK callBackToMiniport = + (MINIPORT_CALLBACK)NvP64_VALUE(pNotifyEvent->Data); + + // perform a direct callback to the miniport + if (callBackToMiniport) + callBackToMiniport(NV_GET_NV_STATE(pGpu)); + break; + } + + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(pNotifyEvent->Data); + + if (kc && kc->func) + { + kc->func(kc->arg, pEventData, pNotifyEvent->hEvent, 0, NV_OK); + } + break; + } + + default: + break; + } + } + + return rmStatus; +} + +// Allow CPL Events to be callback or events +NV_STATUS osObjectEventNotification +( + NvHandle hClient, + NvHandle hObject, + NvU32 hClass, + PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, + void *pEventData, + NvU32 eventDataSize +) +{ + NV_STATUS rmStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, "%s()\n", __FUNCTION__); + // walk this object's event list and find any matches for this specific notify + for (; pNotifyEvent; pNotifyEvent = pNotifyEvent->Next) + { + // notifyIndex must match if request isn't for all + if ((notifyIndex != OS_EVENT_NOTIFICATION_INDEX_ALL) && + (pNotifyEvent->NotifyIndex != notifyIndex)) + { + continue; + } + + switch (pNotifyEvent->NotifyType) + { + case NV01_EVENT_OS_EVENT: + { + nv_event_t *event = NvP64_VALUE(pNotifyEvent->Data); + postEvent(event, + pNotifyEvent->hEvent, + pNotifyEvent->NotifyIndex, + 0, 0, + pNotifyEvent->bEventDataRequired); + break; + } + + case NV01_EVENT_KERNEL_CALLBACK_EX: + { + NVOS10_EVENT_KERNEL_CALLBACK_EX *kc = (NVOS10_EVENT_KERNEL_CALLBACK_EX *)NvP64_VALUE(pNotifyEvent->Data); + + if (kc && kc->func) + { + kc->func(kc->arg, pEventData, pNotifyEvent->hEvent, 0, NV_OK); + } + break; + } + + default: + break; + } + } + + return rmStatus; +} + +NV_STATUS osReferenceObjectCount(void *pEvent) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t *event = pEvent; + + portSyncSpinlockAcquire(nv->event_spinlock); + ++event->refcount; + portSyncSpinlockRelease(nv->event_spinlock); + return NV_OK; +} + +NV_STATUS osDereferenceObjectCount(void *pOSEvent) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t *event = pOSEvent; + + portSyncSpinlockAcquire(nv->event_spinlock); + NV_ASSERT(event->refcount > 0); + --event->refcount; + // If event->refcount == 0 but event->active is true, the client + // has not yet freed the OS event. free_os_event will free its + // memory when they do, or else when the client itself is freed. + if (event->refcount == 0 && !event->active) + portMemFree(event); + portSyncSpinlockRelease(nv->event_spinlock); + + return NV_OK; +} + +NV_STATUS osUserHandleToKernelPtr(NvHandle hClient, NvP64 hEvent, NvP64 *pEvent) +{ + nv_state_t *nv = nv_get_ctl_state(); + NvU32 fd = (NvU64)hEvent; + NV_STATUS result; + + portSyncSpinlockAcquire(nv->event_spinlock); + nv_event_t *e = nv->event_list; + while (e != NULL) + { + if (e->fd == fd && e->hParent == hClient) + break; + e = e->next; + } + + if (e != NULL) + { + ++e->refcount; + *pEvent = NV_PTR_TO_NvP64(e); + result = NV_OK; + } + else + result = NV_ERR_OBJECT_NOT_FOUND; + portSyncSpinlockRelease(nv->event_spinlock); + + return result; +} + +NV_STATUS osFlushCpuCache(void) +{ + return os_flush_cpu_cache_all(); +} + +void osFlushCpuWriteCombineBuffer(void) +{ + os_flush_cpu_write_combine_buffer(); +} + + +// +// Evict GPU memory range from the CPU caches. +// +// On some platforms (e.g. P9+V100), the CPU can coherently cache GPU memory +// and RM takes advantage of that. Most everything is handled transparently, +// but there are two exceptions that require explicitly flushing any CPU cache +// lines of GPU memory. These are: +// +// 1) Flushing memory backing ACR regions before they get locked. +// +// Otherwise the cache could get flushed while the regions are locked causing a +// region violation physical fault. See more details in +// acrFlushRegionsFromGpuCoherentCpuCache_IMPL(). +// +// 2) Flushing all of FB before GPU reset (NVLink going down) +// +// Leaving cache entries on the CPU causes fatal errors when the CPU tries +// flushing them later while the link is down. See more details in +// nvlinkStatePostUnload_IMPL(). +// +void osFlushGpuCoherentCpuCacheRange +( + OS_GPU_INFO *pOsGpuInfo, + NvU64 cpuVirtual, + NvU64 size +) +{ + nv_flush_coherent_cpu_cache_range(pOsGpuInfo, cpuVirtual, size); +} + +void osErrorLogV(OBJGPU *pGpu, NvU32 num, const char * pFormat, va_list arglist) +{ + NV_STATUS rmStatus; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pFormat == NULL) || (*pFormat == '\0')) + { + return; + } + + rmStatus = nv_log_error(nv, num, pFormat, arglist); + NV_ASSERT(rmStatus == NV_OK); +} + +void osErrorLog(OBJGPU *pGpu, NvU32 num, const char* pFormat, ...) +{ + va_list arglist; + va_start(arglist, pFormat); + osErrorLogV(pGpu, num, pFormat, arglist); + va_end(arglist); +} + +NvU32 +osPollHotkeyState +( + OBJGPU *pGpu +) +{ + return 0; +} + +void osDevWriteReg008( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV8 thisValue +) +{ + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + return; + } + + NV_PRIV_REG_WR08(pMapping->gpuNvAddr, thisAddress, thisValue); +} + +void osDevWriteReg016( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV16 thisValue +) +{ + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + return; + } + + NV_PRIV_REG_WR16(pMapping->gpuNvAddr, thisAddress, thisValue); +} + +void osDevWriteReg032( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV32 thisValue +) +{ + NvBool vgpuHandled = NV_FALSE; + + if (vgpuHandled) + { + return; + } + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + return; + } + + NV_PRIV_REG_WR32(pMapping->gpuNvAddr, thisAddress, thisValue); +} + +NvU8 osDevReadReg008( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress +) +{ + NvU8 retval = 0; + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + } + else + retval = NV_PRIV_REG_RD08(pMapping->gpuNvAddr, thisAddress); + + return retval; +} + +NvU16 osDevReadReg016( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress +) +{ + NvU16 retval = 0; + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + } + else + retval = NV_PRIV_REG_RD16(pMapping->gpuNvAddr, thisAddress); + + return retval; +} + +NvU32 osDevReadReg032( + OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress +) +{ + NvU32 retval = 0; + NvBool vgpuHandled = NV_FALSE; + + if (vgpuHandled) + { + return retval; + } + + if (thisAddress >= pMapping->gpuNvLength) + { + NV_ASSERT(thisAddress < pMapping->gpuNvLength); + } + else + retval = NV_PRIV_REG_RD32(pMapping->gpuNvAddr, thisAddress); + + return retval; +} + +NV_STATUS osReadRegistryDwordBase( + OBJGPU *pGpu, + const char *regParmStr, + NvU32 *Data +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmReadRegistryDword(nv, regParmStr, Data); +} + +NV_STATUS osWriteRegistryDword( + OBJGPU *pGpu, + const char *regParmStr, + NvU32 Data +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmWriteRegistryDword(nv, regParmStr, Data); +} + +NV_STATUS osReadRegistryBinary( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 *cbLen +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmReadRegistryBinary(nv, regParmStr, Data, cbLen); +} + +NV_STATUS osWriteRegistryBinary( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmWriteRegistryBinary(nv, regParmStr, Data, cbLen); +} + +NV_STATUS osWriteRegistryVolatile( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReadRegistryVolatile +( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReadRegistryVolatileSize +( + OBJGPU *pGpu, + const char *regParmStr, + NvU32 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReadRegistryStringBase( + OBJGPU *pGpu, + const char *regParmStr, + NvU8 *buffer, + NvU32 *pBufferLength +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmReadRegistryString(nv, regParmStr, buffer, pBufferLength); +} + +NV_STATUS osPackageRegistry( + OBJGPU *pGpu, + PACKED_REGISTRY_TABLE *pRegTable, + NvU32 *pSize +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + return RmPackageRegistry(nv, pRegTable, pSize); +} + +NvU32 osGetCpuCount() +{ + return os_get_cpu_count(); // Total number of logical CPUs. +} + +NvU32 osGetCurrentProcessorNumber(void) +{ + return os_get_cpu_number(); +} + +void osGetTimeoutParams(OBJGPU *pGpu, NvU32 *pTimeoutUs, NvU32 *pScale, NvU32 *pFlags) +{ + NvU32 gpuMode = gpuGetMode(pGpu); + + NV_ASSERT((NV_GPU_MODE_GRAPHICS_MODE == gpuMode) || + (NV_GPU_MODE_COMPUTE_MODE == gpuMode)); + + { + switch (gpuMode) + { + default: + case NV_GPU_MODE_GRAPHICS_MODE: + *pTimeoutUs = 4 * 1000000; + break; + + case NV_GPU_MODE_COMPUTE_MODE: + *pTimeoutUs = 30 * 1000000; + break; + } + } + + *pFlags = GPU_TIMEOUT_FLAGS_OSTIMER; + + *pScale = 1; + if (IS_EMULATION(pGpu) || IS_SIMULATION(pGpu)) + { + *pScale = 60; // 1s -> 1m + } + + return; +} + +void osFlushLog() +{ + // Not implemented +} + +NvU32 osGetSimulationMode(void) +{ + return NV_SIM_MODE_HARDWARE; +} + +NV_STATUS +cliresCtrlCmdOsUnixFlushUserCache_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams +) +{ + Memory *pMemory; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 start, end; + NvBool bInvalidateOnly; + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(RES_GET_CLIENT(pRmCliRes), + pAddressSpaceParams->hObject, + &pMemory)); + + pMemDesc = pMemory->pMemDesc; + + if (memdescGetAddressSpace(pMemDesc) != ADDR_SYSMEM) + { + NV_PRINTF(LEVEL_ERROR, "%s: wrong address space %d\n", + __FUNCTION__, memdescGetAddressSpace(pMemDesc)); + return NV_ERR_INVALID_COMMAND; + } + + if (memdescGetCpuCacheAttrib(pMemDesc) != NV_MEMORY_CACHED) + { + NV_PRINTF(LEVEL_ERROR, "%s: wrong caching type %d\n", + __FUNCTION__, memdescGetCpuCacheAttrib(pMemDesc)); + return NV_ERR_INVALID_COMMAND; + } + + start = pAddressSpaceParams->offset; + end = start + pAddressSpaceParams->length; + + switch(pAddressSpaceParams->cacheOps) + { + case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH_INVALIDATE: + case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_FLUSH: + bInvalidateOnly = NV_FALSE; + break; + + case NV0000_CTRL_OS_UNIX_FLAGS_USER_CACHE_INVALIDATE: + bInvalidateOnly = NV_TRUE; + break; + + default: + NV_PRINTF(LEVEL_ERROR, "%s: cacheOps not specified\n", __FUNCTION__); + return NV_ERR_INVALID_COMMAND; + } + + if ((end - start) > pMemory->Length) + { + NV_PRINTF(LEVEL_ERROR, + "%s: end address 0x%llx exceeded buffer length: 0x%llx\n", + __FUNCTION__, end, pMemory->Length); + return NV_ERR_INVALID_LIMIT; + } + + if (bInvalidateOnly) + { + // + // XXX: this seems fishy - I'm not sure if invalidating by the kernel + // VA only as nv_dma_cache_invalidate() does here is sufficient for + // this control call. + // pAddressSpaceParams->internalOnly is expected to be the RM client + // VA for this control call; if we wanted to invalidate the user VA we + // could do so using that. + // + // For I/O coherent platforms this won't actually do anything. + // On non-I/O-coherent platforms, there's no need to do a second + // invalidation after the full flush. + // + nv_state_t *nv = NV_GET_NV_STATE(pMemDesc->pGpu); + if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE) + { + PIOVAMAPPING pIovaMapping = memdescGetIommuMap(pMemDesc, nv->iovaspace_id); + // + // This should only be called for devices that map memory descriptors + // through the nv-dma library, where the memory descriptor data + // contains all the kernel-specific context we need for the + // invalidation. + // + // (These checks match those in osIovaUnmap() leading up to + // nv_dma_unmap_alloc()). + // + if (pIovaMapping == NULL || + pIovaMapping->pOsData == NULL || + memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED) || + memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + nv_dma_cache_invalidate(nv->dma_dev, pIovaMapping->pOsData); + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + } + else + { + return os_flush_user_cache(); + } + + return NV_OK; +} + +static NV_STATUS +_initializeExportObjectFd +( + nv_file_private_t *nvfp, + NvHandle hClient, + NvHandle hDevice, + NvU16 maxObjects, + NvU8 *metadata +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + Device *pDevice; + + if (nvfp->handles != NULL) + { + return NV_ERR_STATE_IN_USE; + } + + status = serverutilGetResourceRef(hClient, hDevice, &pResourceRef); + if (status != NV_OK) + { + return status; + } + + pDevice = dynamicCast(pResourceRef->pResource, Device); + if (pDevice == NULL) + { + return NV_ERR_INVALID_PARAMETER; + } + + NV_ASSERT_OK_OR_RETURN(os_alloc_mem((void **)&nvfp->handles, + sizeof(nvfp->handles[0]) * maxObjects)); + + os_mem_set(nvfp->handles, 0, + sizeof(nvfp->handles[0]) * maxObjects); + + nvfp->maxHandles = maxObjects; + nvfp->deviceInstance = pDevice->deviceInst; + + if (metadata != NULL) + { + os_mem_copy(nvfp->metadata, metadata, sizeof(nvfp->metadata)); + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdOsUnixExportObjectToFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmObjExportHandle hExportHandle = 0; + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + + /* + * This flag is intended to be implemented entirely in the rmapi library in + * userspace, we should never encounter it here. + */ + if (FLD_TEST_DRF(0000_CTRL, _OS_UNIX_EXPORT_OBJECT_TO_FD_FLAGS, + _EMPTY_FD, _TRUE, pParams->flags)) + { + return NV_ERR_INVALID_PARAMETER; + } + + if (pParams->object.type != NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM || + pParams->fd == -1) + { + return NV_ERR_INVALID_PARAMETER; + } + + status = RmExportObject(hClient, + pParams->object.data.rmObject.hObject, + &hExportHandle, NULL); + if (status != NV_OK) + { + goto done; + } + NV_ASSERT(hExportHandle != 0); + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = _initializeExportObjectFd(nvfp, hClient, + pParams->object.data.rmObject.hDevice, + 1, NULL); + if (status != NV_OK) + { + goto done; + } + + nvfp->handles[0] = hExportHandle; + +done: + + if (status != NV_OK && hExportHandle != 0) + { + RmFreeObjExportHandle(hExportHandle); + } + + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +// This control call has been deprecated. It will be deleted soon. +NV_STATUS +cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams +) +{ + NV_STATUS status; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + + ct_assert(sizeof(nvfp->metadata) == sizeof(pParams->metadata)); + + if (pParams->maxObjects == 0) + { + return NV_ERR_INVALID_PARAMETER; + } + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = _initializeExportObjectFd(nvfp, hClient, pParams->hDevice, + pParams->maxObjects, pParams->metadata); + +done: + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixExportObjectsToFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmObjExportHandle *pExportHandle; + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + NvU32 i; + NvU32 deviceInstance; + NvU32 result; + NvHandle *exportHandles = NULL; + NvBool bFdSetup = NV_FALSE; + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + ct_assert(sizeof(nvfp->metadata) == sizeof(pParams->metadata)); + + /* Setup export FD if not done */ + if (nvfp->handles == NULL) + { + if (pParams->maxObjects == 0) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = _initializeExportObjectFd(nvfp, hClient, pParams->hDevice, + pParams->maxObjects, + pParams->metadata); + if (status != NV_OK) + { + goto done; + } + + bFdSetup = NV_TRUE; + } + + if ((nvfp->handles == NULL) || + (pParams->numObjects > + NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_MAX_OBJECTS)) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if (!portSafeAddU32(pParams->numObjects, pParams->index, &result) || + (result > nvfp->maxHandles)) + { + status = NV_ERR_OUT_OF_RANGE; + goto done; + } + + status = os_alloc_mem((void **)&exportHandles, + sizeof(exportHandles[0]) * + pParams->numObjects); + if (status != NV_OK) + { + goto done; + } + + for (i = 0; i < pParams->numObjects; i++) + { + exportHandles[i] = 0; + + if (pParams->objects[i] == 0) + { + continue; + } + + status = RmExportObject(hClient, + pParams->objects[i], + &exportHandles[i], + &deviceInstance); + if (status != NV_OK) + { + goto done; + } + + NV_ASSERT(exportHandles[i] != 0); + + if (deviceInstance != nvfp->deviceInstance) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + } + + for (i = 0; i < pParams->numObjects; i++) + { + pExportHandle = &nvfp->handles[i + pParams->index]; + + // If the handle already exists in this position, free it + if (*pExportHandle != 0) + { + RmFreeObjExportHandle(*pExportHandle); + *pExportHandle = 0; + } + + *pExportHandle = exportHandles[i]; + } + +done: + + if ((status != NV_OK) && (exportHandles != NULL)) + { + for (i = 0; i < pParams->numObjects; i++) + { + if (exportHandles[i] != 0) + { + RmFreeObjExportHandle(exportHandles[i]); + } + } + } + + if (exportHandles != NULL) + { + os_free_mem(exportHandles); + } + + if ((status != NV_OK) && bFdSetup) + { + os_free_mem(nvfp->handles); + nvfp->handles = NULL; + nvfp->maxHandles = 0; + } + + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixImportObjectFromFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + + if (pParams->object.type != NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM || + pParams->fd == -1) + { + return NV_ERR_INVALID_PARAMETER; + } + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if ((nvfp->handles == NULL) || (nvfp->handles[0] == 0) || + (nvfp->maxHandles > 1)) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + status = RmImportObject(hClient, + pParams->object.data.rmObject.hParent, + &pParams->object.data.rmObject.hObject, + nvfp->handles[0], NULL); + +done: + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + NvU32 i = 0; + RmObjExportHandle hImportHandle; + NvU32 result; + RM_API *pRmApi; + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if ((nvfp->handles == NULL) || + (pParams->numObjects > + NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_TO_FD_MAX_OBJECTS)) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if (!portSafeAddU32(pParams->numObjects, pParams->index, &result) || + (result > nvfp->maxHandles)) + { + status = NV_ERR_OUT_OF_RANGE; + goto done; + } + + for (i = 0; i < pParams->numObjects; i++) + { + hImportHandle = nvfp->handles[i + pParams->index]; + + /* Nothing to import, just continue */ + if (hImportHandle == 0) + { + pParams->objectTypes[i] = \ + NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_NONE; + continue; + } + + status = RmImportObject(hClient, + pParams->hParent, + &pParams->objects[i], + hImportHandle, + &pParams->objectTypes[i]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "%s: Unable to import handle (%x, %x, %x)\n", + __FUNCTION__, pParams->hParent, pParams->objects[i], hImportHandle); + goto done; + } + } + +done: + + if (status != NV_OK) + { + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + while (i > 0) + { + i--; + + if (pParams->objects[i] != 0) + { + pRmApi->Free(pRmApi, hClient, pParams->objects[i]); + } + } + } + + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +NV_STATUS +cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams +) +{ + nv_file_private_t *nvfp = NULL; + void *priv = NULL; + NV_STATUS status = NV_OK; + + if (pParams->fd < 0) + { + return NV_ERR_INVALID_PARAMETER; + } + + nvfp = nv_get_file_private(pParams->fd, NV_TRUE, &priv); + if (nvfp == NULL) + { + NV_ASSERT(priv == NULL); + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + if (nvfp->handles == NULL) + { + status = NV_ERR_INVALID_PARAMETER; + goto done; + } + + pParams->maxObjects = nvfp->maxHandles; + pParams->deviceInstance = nvfp->deviceInstance; + os_mem_copy(pParams->metadata, nvfp->metadata, sizeof(nvfp->metadata)); + +done: + if (priv != NULL) + { + nv_put_file_private(priv); + } + + return status; +} + +/*! + * osAcpiDsm + * + * @brief Handles os specific _DSM method function calls. + * + * Input parameters: + * @param[in] pGpu : OBJGPU pointer + * @param[in] acpiDsmFunction : ACPI DSM function + * @param[in] acpiDsmSubFunction : ACPI DSM subfunction + * @param[in/out] pInOut : in/out buffer, caller should make sure the buffer is large enough. + * @param[in] pSize : when input, size of data that the caller wants to read, in bytes. + * when output, size of valid data in pInOuta in bytes. + */ +NV_STATUS osCallACPI_DSM +( + OBJGPU *pGpu, + ACPI_DSM_FUNCTION acpiDsmFunction, + NvU32 acpiDsmSubFunction, + NvU32 *pInOut, + NvU16 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCallACPI_DOD +( + OBJGPU *pGpu, + NvU32 *pOut, + NvU32 *pSize +) +{ + NV_STATUS rmStatus; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pOut == NULL) || (pSize == NULL)) + { + return NV_ERR_INVALID_POINTER; + } + + rmStatus = nv_acpi_dod_method(nv, pOut, pSize); + + return rmStatus; +} + +// +// osAcpiDdc +// +// Handles os specific _DDC method function calls. _DDC is to get EDID from SBIOS. +// +NV_STATUS osCallACPI_DDC +( + OBJGPU *pGpu, + NvU32 ulAcpiId, + NvU8 *pOutData, + NvU32 *pOutSize, + NvBool bReadMultiBlock +) +{ + NV_STATUS rmStatus; + + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pOutData == NULL) || (pOutSize == NULL)) + { + return NV_ERR_INVALID_POINTER; + } + + portMemSet(pOutData, 0, *pOutSize); + + rmStatus = nv_acpi_ddc_method(nv, pOutData, pOutSize, bReadMultiBlock); + + return rmStatus; +} + +// osCallACPI_NVHG_ROM +// Making ACPI Call into SBIOS with ROM method to get display device's ROM data. +// +NV_STATUS osCallACPI_NVHG_ROM +( + OBJGPU *pGpu, + NvU32 *pInData, + NvU32 *pOutData +) +{ + NV_STATUS rmStatus; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if ((pOutData == NULL) || (pInData == NULL)) + { + return NV_ERR_INVALID_POINTER; + } + + if (pInData[1] > ROM_METHOD_MAX_RETURN_BUFFER_SIZE) + { + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = nv_acpi_rom_method(nv, pInData, pOutData); + + return rmStatus; +} + +void osInitSystemStaticConfig(SYS_STATIC_CONFIG *pConfig) +{ + pConfig->bIsNotebook = rm_is_system_notebook(); + pConfig->osType = nv_get_os_type(); + pConfig->osSevStatus = os_sev_status; + pConfig->bOsSevEnabled = os_sev_enabled; +} + +NvU32 osApiLockAcquireConfigureFlags(NvU32 flags) +{ + return flags; +} + +NV_STATUS osGpuLocksQueueRelease(OBJGPU *pGpu, NvU32 dpcGpuLocksRelease) +{ + return NV_SEMA_RELEASE_FAILED; +} + +void osSyncWithRmDestroy() +{ +} + +void osSyncWithGpuDestroy(NvBool bEntry) +{ +} + +void osModifyGpuSwStatePersistence +( + OS_GPU_INFO *pOsGpuInfo, + NvBool bEnable +) +{ + if (bEnable) + { + pOsGpuInfo->flags |= NV_FLAG_PERSISTENT_SW_STATE; + } + else + { + pOsGpuInfo->flags &= ~NV_FLAG_PERSISTENT_SW_STATE; + } +} + +NV_STATUS +osSystemGetBatteryDrain(NvS32 *pChargeRate) +{ + NV_PRINTF(LEVEL_WARNING, "%s: Platform not supported!\n", __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osPexRecoveryCallback +( + OS_GPU_INFO *pOsGpuInfo, + OS_PEX_RECOVERY_STATUS Status +) +{ + NV_ASSERT_FAILED("Not supported"); + return NV_ERR_NOT_SUPPORTED; +} + +// +//osCallACPI_MXDS +// +//Handles OS specific MXDS function call. +// +NV_STATUS osCallACPI_MXDS +( + OBJGPU *pGpu, + NvU32 acpiId, + NvU32 *pInOut +) +{ + NV_STATUS rmStatus; + + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if (pInOut == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + rmStatus = nv_acpi_mux_method(nv, pInOut, acpiId, "MXDS"); + + return rmStatus; +} + +// +//osCallACPI_MXDM +// +//Handles OS specific MXDM function call. +// +NV_STATUS osCallACPI_MXDM +( + OBJGPU *pGpu, + NvU32 acpiId, + NvU32 *pInOut +) +{ + NV_STATUS rmStatus; + + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if (pInOut == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + rmStatus = nv_acpi_mux_method(nv, pInOut, acpiId, "MXDM"); + + return rmStatus; +} + +#include "lib/protobuf/prb.h" +#include "lib/protobuf/prb_util.h" +#include "g_nvdebug_pb.h" + +NV_STATUS osGetVersionDump(void * pVoid) +{ + PRB_ENCODER * pPrbEnc = (PRB_ENCODER *)pVoid; + NV_STATUS rmStatus; + os_version_info * pOsVersionInfo = NULL; + const char NV_UNKNOWN_BUILD_VERSION[] = "Unknown build version"; + const char NV_UNKNOWN_BUILD_DATE[] = "Unknown build date"; + + NV_ASSERT_OK_OR_RETURN(os_alloc_mem((void**)&pOsVersionInfo, + sizeof(os_version_info))); + portMemSet(pOsVersionInfo, 0, sizeof(os_version_info)); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_OSINFO_FAMILY, + NVDEBUG_OS_UNIX); + + rmStatus = os_get_version_info(pOsVersionInfo); + if (rmStatus != NV_OK) + { + goto cleanup; + } + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_OSINFO_OSMAJORVERSION, + pOsVersionInfo->os_major_version); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_OSINFO_OSMINORVERSION, + pOsVersionInfo->os_minor_version); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_OSINFO_OSBLDNUM, + pOsVersionInfo->os_build_number); + + if (NULL == pOsVersionInfo->os_build_version_str) + { + pOsVersionInfo->os_build_version_str = NV_UNKNOWN_BUILD_VERSION; + } + + prbEncAddString(pPrbEnc, + NVDEBUG_SYSTEMINFO_OSINFO_BUILDVERSION, + pOsVersionInfo->os_build_version_str); + + if (NULL == pOsVersionInfo->os_build_date_plus_str) + { + pOsVersionInfo->os_build_date_plus_str = NV_UNKNOWN_BUILD_DATE; + } + + prbEncAddString(pPrbEnc, + NVDEBUG_SYSTEMINFO_OSINFO_BUILDDATEPLUS, + pOsVersionInfo->os_build_date_plus_str); + +cleanup: + os_free_mem(pOsVersionInfo); + return rmStatus; +} + +NV_STATUS osGetVersion(NvU32 *majorVer, NvU32 *minorVer, NvU32 *buildNum, NvU16 *unusedPatchVersion, NvU16 *unusedProductType) +{ + os_version_info osVersionInfo; + NV_STATUS rmStatus; + + portMemSet(&osVersionInfo, 0, sizeof(osVersionInfo)); + + rmStatus = os_get_version_info(&osVersionInfo); + if (rmStatus == NV_OK) + { + if (majorVer) + *majorVer = osVersionInfo.os_major_version; + if (minorVer) + *minorVer = osVersionInfo.os_minor_version; + if (buildNum) + *buildNum = osVersionInfo.os_build_number; + } + + return rmStatus; +} + +NV_STATUS +osGetSystemCpuLogicalCoreCounts +( + NvU32 *pCpuCoreCount +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGetSystemCpuC0AndAPerfCounters +( + NvU32 coreIndex, + POS_CPU_CORE_PERF_COUNTERS pCpuPerfData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void +osEnableCpuPerformanceCounters +( + OBJOS *pOS +) +{ + NV_ASSERT_FAILED("Not supported"); + return; +} + +NV_STATUS +osCpuDpcObjInit +( + void **ppCpuDpcObj, + OBJGPU *pGpu, + NvU32 coreCount +) +{ + NV_ASSERT_FAILED("Not supported"); + return NV_ERR_NOT_SUPPORTED; +} + +void +osCpuDpcObjQueue +( + void **ppCpuDpcObj, + NvU32 coreCount, + POS_CPU_CORE_PERF_COUNTERS pCpuPerfData +) +{ + NV_ASSERT_FAILED("Not supported"); +} + +void +osCpuDpcObjFree +( + void **ppCpuDpcObj +) +{ + NV_ASSERT_FAILED("Not supported"); +} + +NV_STATUS +osGetCarveoutInfo +( + NvU64 *pAddr, + NvU64 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGetVPRInfo +( + NvU64 *pAddr, + NvU64 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osAllocInVPR +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGetGenCarveout +( + NvU64 *pAddr, + NvU64 *pSize, + NvU32 id, + NvU64 align +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osI2CClosePorts +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 numPorts +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osI2CTransfer +( + OBJGPU *pGpu, + NvU32 Port, + NvU8 Address, + nv_i2c_msg_t *nv_i2c_msgs, + NvU32 count +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraI2CGetBusState +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 port, + NvS32 *scl, + NvS32 *sda +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osReadI2CBufferDirect +( + OBJGPU *pGpu, + NvU32 Port, + NvU8 Address, + void *pOutputBuffer, + NvU32 OutputSize, + void *pInputBuffer, + NvU32 InputSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osWriteI2CBufferDirect +( + OBJGPU *pGpu, + NvU32 Port, + NvU8 Address, + void *pOutputBuffer0, + NvU32 OutputSize0, + void *pOutputBuffer1, + NvU32 OutputSize1 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osGC6PowerControl +( + OBJGPU *pGpu, + NvU32 cmd, + NvU32 *pOut +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool osTestPcieExtendedConfigAccess(void *handle, NvU32 offset) +{ + OBJGPU *pGpu; + KernelBif *pKernelBif; + NvU32 nvXveId = 0; + NvU32 nvXveVccapHdr = 0; + NvU32 pciStart = 0; + NvU32 pcieStart = 0; + + static NvBool retryAllowed = NV_TRUE; + static NvBool configAccess = NV_FALSE; + + // + // Return early for offset within PCI space + // and does not requires extended config space access + // + if (offset < 0x100) + { + return NV_TRUE; + } + + if (!retryAllowed) + { + return configAccess; + } + + pGpu = gpumgrGetSomeGpu(); + if (pGpu == NULL) + { + return configAccess; + } + + retryAllowed = NV_FALSE; + + pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + if (pKernelBif == NULL || kbifGetBusIntfType_HAL(pKernelBif) != + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) + { + return configAccess; + } + + // Now verify PCI and PCIe config start registers. + kbifGetPcieConfigAccessTestRegisters_HAL(pGpu, pKernelBif, &pciStart, &pcieStart); + os_pci_read_dword(handle, pciStart, &nvXveId); + os_pci_read_dword(handle, pcieStart, &nvXveVccapHdr); + + if (NV_OK == kbifVerifyPcieConfigAccessTestRegisters_HAL(pGpu, + pKernelBif, + nvXveId, + nvXveVccapHdr)) + { + configAccess = NV_TRUE; + } + + return configAccess; +} + +/*! + * @brief Map memory into an IOVA space according to the given mapping info. + * + * @param[in] pIovaMapping IOVA mapping info + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osIovaMap +( + PIOVAMAPPING pIovaMapping +) +{ + OBJGPU *pGpu; + nv_state_t *nv, *peer; + NV_STATUS status; + RmPhysAddr base; + NvBool bIsBar0; + PMEMORY_DESCRIPTOR pRootMemDesc; + NvBool bIsFbOffset = NV_FALSE; + NvBool bIsIndirectPeerMapping = NV_FALSE; + NvBool bIsContig; + NV_ADDRESS_SPACE addressSpace; + NvU32 osPageCount; + + if (pIovaMapping == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = gpumgrGetGpuFromId(pIovaMapping->iovaspaceId); + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pRootMemDesc = memdescGetRootMemDesc(pIovaMapping->pPhysMemDesc, NULL); + addressSpace = memdescGetAddressSpace(pIovaMapping->pPhysMemDesc); + if (gpumgrCheckIndirectPeer(pGpu, pRootMemDesc->pGpu) && + (addressSpace == ADDR_FBMEM)) + { + bIsIndirectPeerMapping = NV_TRUE; + } + + if ((addressSpace != ADDR_SYSMEM) && !bIsIndirectPeerMapping) + { + NV_PRINTF(LEVEL_INFO, + "%s passed memory descriptor in an unsupported address space (%s)\n", + __FUNCTION__, + memdescGetApertureString(memdescGetAddressSpace(pIovaMapping->pPhysMemDesc))); + return NV_ERR_NOT_SUPPORTED; + } + + // + // For guest-allocated memory, we don't actually want to do any remapping, + // since the physical address is already the DMA address to be used by the + // GPU. + // + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + return NV_OK; + } + + nv = NV_GET_NV_STATE(pGpu); + + // + // Intercept peer IO type memory. These are contiguous allocations, so no + // need to adjust pages. + // + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) + { + NV_ASSERT(memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU)); + + status = nv_dma_map_mmio(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + &pIovaMapping->iovaArray[0]); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: failed to map peer IO mem (status = 0x%x)\n", + __FUNCTION__, status); + } + + return status; + } + + // + // We need to check against the "root" GPU, e.g., the GPU that owns this + // allocation. If we're trying to map one of its BARs for a peer, we need + // to handle it differently because it wouldn't have gone through our system + // memory page allocation paths, obviously, and wouldn't have alloc private + // data associated with it. + // + peer = NV_GET_NV_STATE(pRootMemDesc->pGpu); + bIsContig = memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU); + if (NV_RM_PAGE_SIZE < os_page_size && !bIsContig) + { + RmDeflateRmToOsPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + base = memdescGetPhysAddr(pIovaMapping->pPhysMemDesc, AT_CPU, 0); + bIsBar0 = IS_REG_OFFSET(peer, base, pIovaMapping->pPhysMemDesc->Size); + + bIsFbOffset = IS_FB_OFFSET(peer, base, pIovaMapping->pPhysMemDesc->Size); + + // + // For indirect peers bIsFbOffset should be NV_TRUE + // TODO:IS_FB_OFFSET macro is currently broken for P9 systems + // Bug 2010857 tracks fixing this + // +#if defined(NVCPU_PPC64LE) + KernelMemorySystem *pRootKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pRootMemDesc->pGpu); + if (bIsIndirectPeerMapping) + { + NvU64 atsBase = base + pRootKernelMemorySystem->coherentCpuFbBase; + if ((atsBase >= pRootKernelMemorySystem->coherentCpuFbBase) && + (atsBase + pIovaMapping->pPhysMemDesc->Size <= + pRootKernelMemorySystem->coherentCpuFbEnd)) + { + bIsFbOffset = NV_TRUE; + } + else + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + } +#endif + + void *pPriv = memdescGetMemData(pIovaMapping->pPhysMemDesc); + osPageCount = NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount); + + if (!bIsBar0 && !bIsFbOffset) + { + if (pPriv == NULL) + { + return NV_ERR_INVALID_STATE; + } + } + else if(bIsIndirectPeerMapping) + { + NV_ASSERT(!bIsBar0 && bIsFbOffset); + // + // TODO: Align onlined GPU memory allocation paths with system memory allocation + // That way pMemDesc->pMemData is setup correctly when we try to create mapping + // to onlined memory of indirect peer. After that we can also get rid of some + // extra code in nv_dma_map_alloc. See bug 190324 for details + // + + status = memdescGetNvLinkGpa(pRootMemDesc->pGpu, (bIsContig ? 1 : osPageCount), + &pIovaMapping->iovaArray[0]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "%s Failed to get SPA\n", __FUNCTION__); + return status; + } + } + + if (!bIsBar0 && (!bIsFbOffset || bIsIndirectPeerMapping)) + { + status = nv_dma_map_alloc( + osGetDmaDeviceForMemDesc(nv, pIovaMapping->pPhysMemDesc), + osPageCount, + &pIovaMapping->iovaArray[0], + bIsContig, &pPriv); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: failed to map allocation (status = 0x%x)\n", + __FUNCTION__, status); + return status; + } + + pIovaMapping->pOsData = pPriv; + } + else if (peer != nv) + { + status = nv_dma_map_peer(nv->dma_dev, peer->dma_dev, bIsBar0 ? 0 : 1, + osPageCount, &pIovaMapping->iovaArray[0]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "%s: failed to map peer (base = 0x%llx, status = 0x%x)\n", + __FUNCTION__, base, status); + return status; + } + + // + // pOsData must be NULL to distinguish a peer DMA mapping from a + // system memory mapping in osIovaUnmap(), so make sure to set it + // accordingly here. + // + pIovaMapping->pOsData = NULL; + } + else + { + NV_PRINTF(LEVEL_INFO, "cannot map a GPU's BAR to itself\n"); + return NV_ERR_NOT_SUPPORTED; + } + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if (NV_RM_PAGE_SIZE < os_page_size && !bIsContig) + { + RmInflateOsToRmPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + return NV_OK; +} + +/*! + * @brief Unmap memory from an IOVA space according to the given mapping info. + * + * This mapping info must have been previously mapped by osIovaMap(). + * + * @param[in] pIovaMapping IOVA mapping info + * + */ +void +osIovaUnmap +( + PIOVAMAPPING pIovaMapping +) +{ + OBJGPU *pGpu; + nv_state_t *nv; + void *pPriv; + NV_STATUS status; + + if (pIovaMapping == NULL) + { + return; + } + + pGpu = gpumgrGetGpuFromId(pIovaMapping->iovaspaceId); + if (pGpu == NULL) + { + return; + } + + // + // For guest-allocated memory, we never actually remapped the memory, so we + // shouldn't try to unmap it here. + // + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + return; + } + + nv = NV_GET_NV_STATE(pGpu); + + if (memdescGetFlag(pIovaMapping->pPhysMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) + { + nv_dma_unmap_mmio(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + pIovaMapping->iovaArray[0]); + + return; + } + + // + // TODO: Formalize the interface with the OS layers so we can use a common + // definition of OS_IOVA_MAPPING_DATA. + // + pPriv = (void *)pIovaMapping->pOsData; + + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + if (pPriv != NULL) + { + status = nv_dma_unmap_alloc(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + &pIovaMapping->iovaArray[0], &pPriv); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: failed to unmap allocation (status = 0x%x)\n", + __FUNCTION__, status); + } + } + else + { + nv_dma_unmap_peer(nv->dma_dev, + NV_RM_PAGES_TO_OS_PAGES(pIovaMapping->pPhysMemDesc->PageCount), + pIovaMapping->iovaArray[0]); + } + + // + // If the OS layer doesn't think in RM page size, we need to fluff out the + // PTE array into RM pages. + // + if (NV_RM_PAGE_SIZE < os_page_size && + !memdescGetContiguity(pIovaMapping->pPhysMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(&pIovaMapping->iovaArray[0], + pIovaMapping->pPhysMemDesc->PageCount); + } + + pIovaMapping->pOsData = NULL; +} + +/*! + * @brief Set the GPU Rail Voltage in Tegra SoC. Currently not supported + * + * @param[in] pGpu GPU object pointer + * @param[in] reqVoltageuV Rail Voltage requested in uV + * @param[out] pSetVoltageuV Rail Voltage set in uV + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osSetGpuRailVoltage +( + OBJGPU *pGpu, + NvU32 reqVoltageuV, + NvU32 *pSetVoltageuV +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Get the GPU Rail Voltage in Tegra SoC. Currently not supported + * + * @param[in] pGpu GPU object pointer + * @param[out] voltageuV Rail Voltage in uV + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osGetGpuRailVoltage +( + OBJGPU *pGpu, + NvU32 *pVoltageuV +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Bring down system in a controlled manner on known error conditions. + * + * @bugCode[in] Error code / reason. + */ +void osBugCheck(NvU32 bugCode) +{ + if (bugCode > OS_BUG_CHECK_BUGCODE_LAST) + { + bugCode = OS_BUG_CHECK_BUGCODE_UNKNOWN; + } + + os_bug_check(bugCode, ppOsBugCheckBugcodeStr[bugCode]); +} + +/*! + * @brief Perform an action at assertion failure. + */ +void osAssertFailed(void) +{ + os_dump_stack(); +} + +/*! + * @brief Get the GPU Chip Info - Speedo and IDDQ values + * + * + * @param[in] pGpu GPU object pointer + * @param[out] pGpuSpeedoHv Pointer to GPU Speedo value at high voltage corner. + * @param[out] pGpuSpeedoLv Pointer to GPU Speedo value at low voltage corner. + * @param[out] pGpuIddq Pointer to GPU Iddq Value + * @param[out] pChipSkuId SKU ID for the chip + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osGetChipInfo +( + OBJGPU *pGpu, + NvU32 *pGpuSpeedoHv, + NvU32 *pGpuSpeedoLv, + NvU32 *pGpuIddq, + NvU32 *pChipSkuId +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/* + * @brief Get the GPU Rail Voltage Info (i.e. Min, Max and StepSize) in Tegra SoC. + * + * @param[in] pGpu GPU object pointer + * @param[out] pMinVoltageuV Minimum Voltage supported on the Rail in Micro Volts + * @param[out] pMaxVoltageuV Maximum Voltage supported on the Rail in Micro Volts + * @param[out] pStepVoltageuV Voltage Step-size supported on the Rail in Micro Volts + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +osGetGpuRailVoltageInfo +( + OBJGPU *pGpu, + NvU32 *pMinVoltageuV, + NvU32 *pMaxVoltageuV, + NvU32 *pStepVoltageuV +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Get the current opaque security token. + * + * For Linux the security token is the effective UID of a process and process ID + * + * Note: This function allocates memory for the token. The onus is on the calling + * function to free the memory associated with the token once its done with it. + * + * @return pointer to the security token. + */ +PSECURITY_TOKEN +osGetSecurityToken() +{ + NV_STATUS rmStatus; + TOKEN_USER *pTokenUser; + + pTokenUser = portMemAllocNonPaged(sizeof(TOKEN_USER)); + if (pTokenUser == NULL) + { + return NULL; + } + rmStatus = os_get_euid(&pTokenUser->euid); + if (rmStatus != NV_OK) + { + portMemFree(pTokenUser); + return NULL; + } + + pTokenUser->pid = os_get_current_process(); + + return (PSECURITY_TOKEN)pTokenUser; +} + +PUID_TOKEN +osGetCurrentUidToken(void) +{ + NV_STATUS rmStatus; + NvU32 *pUidToken; + + pUidToken = portMemAllocNonPaged(sizeof(NvU32)); + if (pUidToken == NULL) + { + return NULL; + } + + rmStatus = os_get_euid(pUidToken); + if (rmStatus != NV_OK) + { + portMemFree(pUidToken); + return NULL; + } + + return (PUID_TOKEN)pUidToken; +} + +/*! + * @brief Interface function to validate the token for the current client + * + * This function takes two tokens as parameters, validates them and checks + * if either the PID or EUID from client database matches the current PID or EUID. + * + * @param[in] pClientSecurityToken security token cached in the client db + * @param[in] pCurrentSecurityToken security token of the current client + * @return NV_OK if the validation is successful + * NV_ERR_INVALID_CLIENT if the tokens do not match + * NV_ERR_INVALID_POINTER if the tokens are invalid + */ +NV_STATUS +osValidateClientTokens +( + PSECURITY_TOKEN pClientSecurityToken, + PSECURITY_TOKEN pCurrentSecurityToken +) +{ + PTOKEN_USER pClientTokenUser = (PTOKEN_USER)pClientSecurityToken; + PTOKEN_USER pCurrentTokenUser = (PTOKEN_USER)pCurrentSecurityToken; + + NV_ASSERT_OR_RETURN((pClientTokenUser != NULL), NV_ERR_INVALID_POINTER); + NV_ASSERT_OR_RETURN((pCurrentTokenUser != NULL), NV_ERR_INVALID_POINTER); + + if ((pClientTokenUser->euid != pCurrentTokenUser->euid) && + (pClientTokenUser->pid != pCurrentTokenUser->pid)) + { + NV_PRINTF(LEVEL_INFO, + "NVRM: %s: Current security token doesn't match the one in the client database. " + "Current EUID: %d, PID: %d; Client DB EUID: %d, PID: %d\n", + __FUNCTION__, pCurrentTokenUser->euid, pCurrentTokenUser->pid, + pClientTokenUser->euid, pClientTokenUser->pid); + return NV_ERR_INVALID_CLIENT; + } + + return NV_OK; +} + +/*! + * @brief Interface function to compare the tokens for two client + * + * This function takes two tokens as parameters, validates them and checks + * if the EUIDs of each token match. + * + * @param[in] pToken1 Token to compare + * @param[in] pToken2 Token to compare + * @return NV_TRUE if the tokens match + * NV_FALSE if the tokens do not match + */ +NvBool +osUidTokensEqual +( + PUID_TOKEN pUidToken1, + PUID_TOKEN pUidToken2 +) +{ + NvU32 * pTokenUser1 = (NvU32*)pUidToken1; + NvU32 * pTokenUser2 = (NvU32*)pUidToken2; + + NV_ASSERT_OR_RETURN((pTokenUser1 != NULL), NV_FALSE); + NV_ASSERT_OR_RETURN((pTokenUser2 != NULL), NV_FALSE); + + if (*pTokenUser1 != *pTokenUser2) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NvBool +osRemoveGpuSupported +( + void +) +{ + return os_pci_remove_supported(); +} + +/* + * @brief Get the address ranges assigned to local or peer GPUs on a system that + * supports hardware address translation services (ATS) over NVLink/C2C. + * + * @note + * - All address values are in the System Physical Address (SPA) space + * - Targets can either be "Local" (bIsPeer=False) or for a specified "Peer" + * (bIsPeer=True, peerIndex=#) GPU + * - Granularity of the target address space is returned as a bit shift value + * (e.g. granularity=37 implies a granularity of 128GiB) + * - Target address and mask values have a specified bit width, and represent + * the higher order bits above the target address granularity + * + * @param[in] pGpu GPU object pointer + * @param[out] pAddrSysPhys Pointer to hold SPA aligned at 128GB boundary + * @param[out] pAddrWidth Address range width value pointer + * @param[out] pMask Mask value pointer + * @param[out] pMaskWidth Mask width value pointer + * @param[out] pGranularity Granularity value pointer + * @param[in] bIsPeer NV_TRUE if this is a peer, local GPU otherwise + * @param[in] peerIndex Peer index + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED + * + * A return value of NV_ERR_NOT_SUPPORTED for the local GPU would + * indicate that the system does not support ATS over NVLink/C2C + */ +NV_STATUS +osGetAtsTargetAddressRange +( + OBJGPU *pGpu, + NvU32 *pAddrSysPhys, + NvU32 *pAddrWidth, + NvU32 *pMask, + NvU32 *pMaskWidth, + NvU32 *pGranularity, + NvBool bIsPeer, + NvU32 peerIndex +) +{ +#if RMCFG_MODULE_KERNEL_BIF && RMCFG_MODULE_KERNEL_NVLINK && (defined(NVCPU_PPC64LE) || defined(NVCPU_AARCH64)) + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + nv_state_t *nv; + const int addrMaskWidth = 0x10; + + if (!pKernelNvlink && !pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_IS_C2C_LINK_UP)) + return NV_ERR_INVALID_ARGUMENT; + + nv = NV_GET_NV_STATE(pGpu); + + // + // TODO : Bug 1848958 restricts peer device tree parsing. Return early if + // peer values are requested. This should be fixed by passing correct pGpu + // pointer of the peer GPU retrieved using peerIds. + // + if (bIsPeer) + { + const int addrWidth = 0x10; + const NvU32 guestAddrGranularity = 37; + + *pAddrSysPhys = 0; + *pAddrWidth = addrWidth; + *pMask = 0; + *pMaskWidth = addrMaskWidth; + *pGranularity = guestAddrGranularity; + return NV_OK; + } + else + { + NvU64 addrSysPhys; + + NV_STATUS status = nv_get_device_memory_config(nv, &addrSysPhys, NULL, + pAddrWidth, pGranularity, NULL); + if (status == NV_OK) + { + *pMask = NVBIT(*pAddrWidth) - 1U; + *pMaskWidth = addrMaskWidth; + + *pAddrSysPhys = addrSysPhys >> *pGranularity; + } + return status; + } + + return NV_OK; +#endif + return NV_ERR_NOT_SUPPORTED; +} + +/* + * @brief Get the physical address in CPU address map and NUMA node id + * of the GPU memory. + * + * @note + * - The physical address is System Physical Address (SPA) in baremetal/host + * and Intermediate Physical Address(IPA) or Guest Physical Address(GPA) + * inside a VM. + * + * @param[in] pGpu GPU object pointer + * @param[out] pAddrPhys Pointer to hold the physical address of FB in + * CPU address map + * @param[out] pNodeId NUMA nodeID of respective GPU memory + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED + * + */ +NV_STATUS +osGetFbNumaInfo +( + OBJGPU *pGpu, + NvU64 *pAddrPhys, + NvS32 *pNodeId +) +{ +#if RMCFG_MODULE_KERNEL_BIF && RMCFG_MODULE_KERNEL_NVLINK && (defined(NVCPU_PPC64LE) || defined(NVCPU_AARCH64)) + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + nv_state_t *nv; + + *pNodeId = NV0000_CTRL_NO_NUMA_NODE; + + if (!pKernelNvlink && !pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_IS_C2C_LINK_UP)) + return NV_ERR_INVALID_ARGUMENT; + + nv = NV_GET_NV_STATE(pGpu); + + NV_STATUS status = nv_get_device_memory_config(nv, NULL, pAddrPhys, NULL, NULL, pNodeId); + + return status; +#endif + return NV_ERR_NOT_SUPPORTED; +} + + +/* + * @brief Verif only function to get the chiplib overrides for link connection + * state for all C2C links. + * + * If chiplib overrides exist, each link can either be enabled (1) or disabled (0) + * + * @param[in] pGpu GPU object pointer + * @param[in] maxLinks Size of pLinkConnection array + * @param[out] pLinkConnection array of pLinkConnection values to be populated by MODS + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED (no overrides available) + */ +NV_STATUS +osGetForcedC2CConnection +( + OBJGPU *pGpu, + NvU32 maxLinks, + NvU32 *pLinkConnection +) +{ + int i, ret; + NV_STATUS status; + char path[64]; + OBJOS *pOS; + OBJSYS *pSys; + + NV_ASSERT_OR_RETURN((pLinkConnection != NULL), NV_ERR_INVALID_POINTER); + NV_ASSERT_OR_RETURN((maxLinks > 0), NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN((pGpu != NULL), NV_ERR_INVALID_ARGUMENT); + + pSys = SYS_GET_INSTANCE(); + pOS = SYS_GET_OS(pSys); + if (pOS == NULL || pOS->osSimEscapeRead == NULL) + { + NV_PRINTF(LEVEL_ERROR, "%s: escape reads not supported on platform\n", + __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + for (i = 0; i < maxLinks; i++) + { + ret = os_snprintf(path, sizeof(path), "CPU_MODEL|CM_ATS_ADDRESS|C2C%u", i); + NV_ASSERT((ret > 0) && (ret < (sizeof(path) - 1))); + + status = pOS->osSimEscapeRead(pGpu, path, 0, 4, &pLinkConnection[i]); + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "%s: %s=0x%X\n", __FUNCTION__, + path, pLinkConnection[i]); + } + else + { + NV_PRINTF(LEVEL_INFO, "%s: osSimEscapeRead for '%s' failed (%u)\n", + __FUNCTION__, path, status); + return NV_ERR_NOT_SUPPORTED; + } + } + return NV_OK; +} + +static NV_STATUS +osGetSmbiosTableInfo +( + const NvU8 *pMappedAddr, + NvU64 *pBaseAddr, + NvU64 *pLength, + NvU64 *pNumSubTypes, + NvU32 *pVersion +) +{ + *pBaseAddr = 0; + *pLength = 0; + *pNumSubTypes = 0; + *pVersion = 0; + + if (portMemCmp(pMappedAddr, "_SM3_", 5) == 0) + { + *pVersion = (pMappedAddr[7] << 8) | pMappedAddr[8]; + portMemCopy(pLength, 4, pMappedAddr + 12, 4); + portMemCopy(pBaseAddr, 8, pMappedAddr + 16, 8); + + *pNumSubTypes = *pLength / 4; + + return NV_OK; + } + + if (portMemCmp(pMappedAddr, "_SM_", 4) == 0) + { + *pVersion = (pMappedAddr[6] << 8) | pMappedAddr[7]; + + pMappedAddr += 16; + + if (portMemCmp(pMappedAddr, "_DMI_", 5) == 0) + { + portMemCopy(pLength, 2, pMappedAddr + 6, 2); + portMemCopy(pBaseAddr, 4, pMappedAddr + 8, 4); + portMemCopy(pNumSubTypes, 2, pMappedAddr + 12, 2); + + if (!*pVersion) + *pVersion = (pMappedAddr[14] & 0xF0) << 4 | + (pMappedAddr[14] & 0x0F); + + return NV_OK; + } + } + + return NV_ERR_INVALID_ADDRESS; +} + + +/* + * @brief Function to export SMBIOS table. Also, maps table in kernel-space. + * + * @param[out] ppBaseVAddr Base virtual address of SMBIOS table. + * @param[out] pLength Size of SMBIOS table. + * @param[out] pNumSubTypes Count of structures (types) embedded in + * the SMBIOS table. + * @param[out] pVersion SMBIOS version + * + * @return NV_OK, NV_ERR_INSUFFICIENT_RESOURCES or NV_ERR_INVALID_ADDRESS + * or errors from OS layer + */ +NV_STATUS +osGetSmbiosTable +( + void **ppBaseVAddr, + NvU64 *pLength, + NvU64 *pNumSubTypes, + NvU32 *pVersion +) +{ + NV_STATUS status = NV_OK; + NvU64 physSmbiosAddr = ~0ull; + void *pMappedAddr = NULL; + NvU64 basePAddr = 0; + + if (!NVCPU_IS_X86_64) + { + return NV_ERR_NOT_SUPPORTED; + } + + status = os_get_smbios_header(&physSmbiosAddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "%s: Failed query SMBIOS table with error: %x \n", + __FUNCTION__, status); + return status; + } + + NV_ASSERT(physSmbiosAddr != ~0ull); + + pMappedAddr = osMapKernelSpace(physSmbiosAddr, + os_page_size, + NV_MEMORY_CACHED, + NV_PROTECT_READ_WRITE); + if (!pMappedAddr) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + status = osGetSmbiosTableInfo(pMappedAddr, + &basePAddr, + pLength, + pNumSubTypes, + pVersion); + + osUnmapKernelSpace(pMappedAddr, os_page_size); + + if (status != NV_OK) + { + return status; + } + + *ppBaseVAddr = osMapKernelSpace(basePAddr, + *pLength, + NV_MEMORY_CACHED, + NV_PROTECT_READ_WRITE); + if (!*ppBaseVAddr) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +/* + * @brief Function to free SMBIOS table mappings + * + * @param[in] pBaseVAddr Base virtual address of SMBIOS table. + * @param[in] length Size of SMBIOS table. + * + */ +void +osPutSmbiosTable +( + void *pBaseVAddr, + NvU64 length +) +{ + osUnmapKernelSpace(pBaseVAddr, length); +} + +NV_STATUS +osGetAcpiRsdpFromUefi +( + NvU32 *pRsdpAddr +) +{ + return os_get_acpi_rsdp_from_uefi(pRsdpAddr); +} + +/* + * @brief Returns NV_TRUE if NvSwitch device is present in the system. + */ +NvBool +osIsNvswitchPresent +( + void +) +{ + return os_is_nvswitch_present(); +} + +/* + * @brief Function to add crashlog buffer entry. + * + * @param[in] pBuffer virt_addr of nvlog buffer + * @param[in] length size of nvlog buffer + */ +void +osAddRecordForCrashLog +( + void *pBuffer, + NvU32 length +) +{ + os_add_record_for_crashLog(pBuffer, length); +} + +/* + * @brief Function to delete crashlog buffer entry. + * + * @param[in] pBuffer virt_addr of nvlog buffer + */ +void +osDeleteRecordForCrashLog +( + void *pBuffer +) +{ + os_delete_record_for_crashLog(pBuffer); +} + +/* + * @brief Queries the sysfs interface to get memblock size + * @param[out] memblock_size Pointer to the memblock_size + */ +NV_STATUS +osNumaMemblockSize +( + NvU64 *memblock_size +) +{ + return os_numa_memblock_size(memblock_size); +} + +NvBool +osNumaOnliningEnabled +( + OS_GPU_INFO *pOsGpuInfo +) +{ + NvS32 numaNodeId = NV0000_CTRL_NO_NUMA_NODE; + + // + // Note that this numaNodeId value fetched from Linux layer might not be + // accurate since it is possible to overwrite it with regkey on some configs + // + if (nv_get_device_memory_config(pOsGpuInfo, NULL, NULL, NULL, NULL, + &numaNodeId) != NV_OK) + { + return NV_FALSE; + } + + return (numaNodeId != NV0000_CTRL_NO_NUMA_NODE); +} + +/* + * @brief Function to call NUMA allocation entry. + * + * @param[in] nid NUMA node id + * @param[in] size Allocation size + * @param[in] flag Allocation flags + * @param[out] pAddress Ptr to the allocated physical address + */ +NV_STATUS +osAllocPagesNode +( + NvS32 nid, + NvLength size, + NvU32 flag, + NvU64 *pAddress +) +{ + NV_STATUS status = NV_OK; + NvU32 localFlag = NV_ALLOC_PAGES_NODE_NONE; + + if (pAddress == NULL || nid < 0 || size > NV_U32_MAX) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Translate the flags + if (flag & OS_ALLOC_PAGES_NODE_SKIP_RECLAIM) + { + localFlag |= NV_ALLOC_PAGES_NODE_SKIP_RECLAIM; + } + + status = os_alloc_pages_node(nid, (NvU32)size, localFlag, pAddress); + return status; +} + +NV_STATUS +osAllocAcquirePage +( + NvU64 pAddress +) +{ + os_get_page(pAddress); + return NV_OK; +} + +NV_STATUS +osAllocReleasePage +( + NvU64 pAddress +) +{ + os_put_page(pAddress); + return NV_OK; +} + +/* + * @brief Function to return refcount on a page + * @param[in] address The physical address of the page + */ +NvU32 +osGetPageRefcount +( + NvU64 pAddress +) +{ + return os_get_page_refcount(pAddress); +} + +/* + * @brief Function to return the number of tail pages if the address is + * referring to a compound page; For non-compound pages, 1 is returned. + * @param[in] address The physical address of the page + */ +NvU32 +osCountTailPages +( + NvU64 pAddress +) +{ + return os_count_tail_pages(pAddress); +} + +/* + * @brief Upon success, gets NPU register address range. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[out] pBase base (physical) of NPU register address range + * @param[out] pSize size of NPU register address range + */ +NV_STATUS +osGetIbmnpuGenregInfo +( + OS_GPU_INFO *pOsGpuInfo, + NvU64 *pBase, + NvU64 *pSize +) +{ + return nv_get_ibmnpu_genreg_info(pOsGpuInfo, pBase, pSize, NULL); +} + +/* + * @brief Upon success, gets NPU's relaxed ordering mode. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[out] pMode relaxed ordering mode + */ +NV_STATUS +osGetIbmnpuRelaxedOrderingMode +( + OS_GPU_INFO *pOsGpuInfo, + NvBool *pMode +) +{ + return nv_get_ibmnpu_relaxed_ordering_mode(pOsGpuInfo, pMode); +} + +/* + * @brief Waits for NVLink HW flush on an NPU associated with a GPU. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + */ +void +osWaitForIbmnpuRsync +( + OS_GPU_INFO *pOsGpuInfo +) +{ + nv_wait_for_ibmnpu_rsync(pOsGpuInfo); +} + +NvU32 +osGetPageSize() +{ + return os_page_size; +} + + + +/* + * @brief Opens a new temporary file for reading and writing + * + * @param[in] ppFile void double pointer + * + * @returns NV_STATUS, NV_OK if success, + NV_ERR_GENERIC, if error + NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osOpenTemporaryFile +( + void **ppFile +) +{ + return os_open_temporary_file(ppFile); +} + +/* + * @brief Closes the specified temporary file + * + * @param[in] pFile Pointer to file + * + * @returns void + */ +void +osCloseFile +( + void *pFile +) +{ + os_close_file(pFile); +} + +/* + * @brief Writes the buffer to the specified file at the given offset + * + * @param[in] pFile Pointer to file (void) + * @param[in] pBuffer Pointer to buffer from which to copy + * @param[in] size Size of the copy + * @parma[in] offset offset in the file + * + * @returns NV_STATUS, NV_OK if success, + NV_ERR_GENERIC, if error + NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osWriteToFile +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ + return os_write_file(pFile, pBuffer, size, offset); +} + +/* + * @brief Reads from the specified file at the given offset + * + * @param[in] pFile Pointer to file (void *) + * @param[in] pBuffer Pointer to buffer to which the data is copied + * @param[in] size Size of the copy + * @parma[in] offset offset in the file + * + * @returns NV_STATUS, NV_OK if success, + NV_ERR_GENERIC, if error + NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osReadFromFile +( + void *pFile, + NvU8 *pBuffer, + NvU64 size, + NvU64 offset +) +{ + return os_read_file(pFile, pBuffer, size, offset); +} + +/* + * @brief Unregisters caps from the capability framework. + * The function assumes that the caps are allocated and stored in the + * hierarchical order. If they aren't, OS (Linux kernel) would warn and + * leak the caps. + * + * @param[in] pOsRmCaps caps of interest + */ +void +osRmCapUnregister +( + OS_RM_CAPS **ppOsRmCaps +) +{ + OS_RM_CAPS *pOsRmCaps = *ppOsRmCaps; + NvS32 i; + + if (pOsRmCaps == NULL) + { + return; + } + + for (i = pOsRmCaps->count - 1; i >= 0; i--) + { + if (pOsRmCaps->caps[i] != NULL) + { + os_nv_cap_destroy_entry(pOsRmCaps->caps[i]); + } + } + + os_free_mem(pOsRmCaps->caps); + os_free_mem(pOsRmCaps); + + *ppOsRmCaps = NULL; +} + +static NV_STATUS +_allocOsRmCaps +( + OS_RM_CAPS **ppOsRmCaps, + NvU32 count +) +{ + NV_STATUS status; + OS_RM_CAPS *pOsRmCaps; + + *ppOsRmCaps = NULL; + + status = os_alloc_mem((void**)&pOsRmCaps, sizeof(OS_RM_CAPS)); + if (status != NV_OK) + return status; + + pOsRmCaps->count = count; + + status = os_alloc_mem((void**)&pOsRmCaps->caps, sizeof(pOsRmCaps->caps[0]) * count); + if (status != NV_OK) + { + os_free_mem(pOsRmCaps); + return status; + } + + os_mem_set(pOsRmCaps->caps, 0, sizeof(pOsRmCaps->caps[0]) * count); + + *ppOsRmCaps = pOsRmCaps; + return NV_OK; +} + +#define OS_RM_CAP_GPU_DIR 0 +#define OS_RM_CAP_GPU_MIG_DIR 1 +#define OS_RM_CAP_GPU_COUNT 2 + +/* + * @brief Registers OBJGPU with the capability framework. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[out] ppOsRmCaps GPU OS specific capabilities pointer + */ +NV_STATUS +osRmCapRegisterGpu +( + OS_GPU_INFO *pOsGpuInfo, + OS_RM_CAPS **ppOsRmCaps +) +{ + NvU32 minor = nv_get_dev_minor(pOsGpuInfo); + char name[16]; + NV_STATUS status; + OS_RM_CAPS *pOsRmCaps; + nv_cap_t *parent; + nv_cap_t *cap; + + // Return success on the unsupported platforms. + if (nvidia_caps_root == NULL) + { + return NV_OK; + } + + if (*ppOsRmCaps != NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_GPU_COUNT); + if (status != NV_OK) + return status; + + *ppOsRmCaps = pOsRmCaps; + + os_snprintf(name, sizeof(name), "gpu%u", minor); + name[sizeof(name) - 1] = '\0'; + parent = nvidia_caps_root; + + cap = os_nv_cap_create_dir_entry(parent, name, (OS_RUGO | OS_XUGO)); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup gpu%u directory\n", minor); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_GPU_DIR] = cap; + parent = cap; + + // TODO: Bug 2679591: Add MIG directory only if SMC is enabled. + // For now, always add "mig" directory. + cap = os_nv_cap_create_dir_entry(parent, "mig", (OS_RUGO | OS_XUGO)); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup mig directory\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_GPU_MIG_DIR] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppOsRmCaps); + + return status; +} + +#define OS_RM_CAP_SMC_PART_DIR 0 +#define OS_RM_CAP_SMC_PART_ACCESS_FILE 1 +#define OS_RM_CAP_SMC_PART_COUNT 2 + +/* + * @brief Registers SMC partition (a.k.a. GPU instance) with the capability + * framework + * + * @param[in] pGpuOsRmCaps GPU OS specific capabilities pointer + * @param[out] ppPartitionOsRmCaps OS specific capabilities pointer for SMC partition + * @param[in] swizzId SMC partition swizz ID + */ +NV_STATUS +osRmCapRegisterSmcPartition +( + OS_RM_CAPS *pGpuOsRmCaps, + OS_RM_CAPS **ppPartitionOsRmCaps, + NvU32 swizzId +) +{ + char name[16]; + NV_STATUS status; + nv_cap_t *parent; + nv_cap_t *cap; + OS_RM_CAPS *pOsRmCaps; + + // Return success as there is nothing to do. + if (pGpuOsRmCaps == NULL) + { + return NV_OK; + } + + if (*ppPartitionOsRmCaps != NULL || swizzId >= NV_U32_MAX) + { + return NV_ERR_INVALID_ARGUMENT; + } + + parent = pGpuOsRmCaps->caps[OS_RM_CAP_GPU_MIG_DIR]; + if (parent == NULL) + { + return NV_ERR_INVALID_STATE; + } + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SMC_PART_COUNT); + if (status != NV_OK) + return status; + + *ppPartitionOsRmCaps = pOsRmCaps; + + os_snprintf(name, sizeof(name), "gi%u", swizzId); + name[sizeof(name) - 1] = '\0'; + + cap = os_nv_cap_create_dir_entry(parent, name, OS_RUGO | OS_XUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup gi%u directory\n", + swizzId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_PART_DIR] = cap; + parent = cap; + + cap = os_nv_cap_create_file_entry(parent, "access", OS_RUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup access file for ID:%u\n", + swizzId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_PART_ACCESS_FILE] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppPartitionOsRmCaps); + + return status; +} + +#define OS_RM_CAP_SMC_EXEC_PART_DIR 0 +#define OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE 1 +#define OS_RM_CAP_SMC_EXEC_PART_COUNT 2 + +/* + * @brief Registers SMC execution partition (a.k.a. compute instance) with the + * capability framework + * + * @param[in] pPartitionOsRmCaps OS specific capabilities pointer for SMC partition + * @param[out] ppExecPartitionOsRmCaps OS specific capabilities pointer for SMC execution partition + * @param[in] execPartitionId SMC execution partition ID + */ +NV_STATUS +osRmCapRegisterSmcExecutionPartition +( + OS_RM_CAPS *pPartitionOsRmCaps, + OS_RM_CAPS **ppExecPartitionOsRmCaps, + NvU32 execPartitionId +) +{ + char name[16]; + NV_STATUS status; + nv_cap_t *parent; + nv_cap_t *cap; + OS_RM_CAPS *pOsRmCaps; + + // Return success as there is nothing to do. + if (pPartitionOsRmCaps == NULL) + { + return NV_OK; + } + + if ((*ppExecPartitionOsRmCaps != NULL) || (execPartitionId >= NV_U32_MAX)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + parent = pPartitionOsRmCaps->caps[OS_RM_CAP_SMC_PART_DIR]; + if (parent == NULL) + { + return NV_ERR_INVALID_STATE; + } + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SMC_EXEC_PART_COUNT); + if (status != NV_OK) + { + return status; + } + + *ppExecPartitionOsRmCaps = pOsRmCaps; + + os_snprintf(name, sizeof(name), "ci%u", execPartitionId); + name[sizeof(name) - 1] = '\0'; + + cap = os_nv_cap_create_dir_entry(parent, name, OS_RUGO | OS_XUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup ci%u directory\n", + execPartitionId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_EXEC_PART_DIR] = cap; + parent = cap; + + cap = os_nv_cap_create_file_entry(parent, "access", OS_RUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup access file for ID:%u\n", + execPartitionId); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + + pOsRmCaps->caps[OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppExecPartitionOsRmCaps); + + return status; +} + +/* + * @brief Release the acquired capability + * + * @param[in] dupedCapDescriptor descriptor to be released + */ +void +osRmCapRelease +( + NvU64 dupedCapDescriptor +) +{ + if (dupedCapDescriptor == NV_U64_MAX) + { + return; + } + + os_nv_cap_close_fd((int)dupedCapDescriptor); +} + +#define OS_RM_CAP_SYS_MIG_DIR 0 +#define OS_RM_CAP_SYS_SMC_CONFIG_FILE 1 +#define OS_RM_CAP_SYS_SMC_MONITOR_FILE 2 +#define OS_RM_CAP_SYS_COUNT 3 + +NV_STATUS +osRmCapRegisterSys +( + OS_RM_CAPS **ppOsRmCaps +) +{ + nv_cap_t **ppCaps; + nv_cap_t *parent; + nv_cap_t *cap; + NV_STATUS status; + OS_RM_CAPS *pOsRmCaps; + + if (nvidia_caps_root == NULL) + return NV_ERR_NOT_SUPPORTED; + + status = _allocOsRmCaps(&pOsRmCaps, OS_RM_CAP_SYS_COUNT); + if (status != NV_OK) + return status; + + *ppOsRmCaps = pOsRmCaps; + + ppCaps = pOsRmCaps->caps; + + parent = os_nv_cap_create_dir_entry(nvidia_caps_root, "mig", OS_RUGO | OS_XUGO); + if (parent == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create mig directory\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + ppCaps[OS_RM_CAP_SYS_MIG_DIR] = parent; + + cap = os_nv_cap_create_file_entry(parent, "config", OS_RUSR); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create mig config file\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + ppCaps[OS_RM_CAP_SYS_SMC_CONFIG_FILE] = cap; + + cap = os_nv_cap_create_file_entry(parent, "monitor", OS_RUGO); + if (cap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create mig monitor file\n"); + status = NV_ERR_OPERATING_SYSTEM; + goto failed; + } + ppCaps[OS_RM_CAP_SYS_SMC_MONITOR_FILE] = cap; + + return NV_OK; + +failed: + osRmCapUnregister(ppOsRmCaps); + return status; +} + +/* + * @brief Acquire the requested capability + * + * @param[in] pOsRmCaps opaque pointer to the caps. + * @param[in] rmCap the capability to be acquired. + * @param[in] capDescriptor descriptor to be used for validation + * @param[out] dupedCapDescriptor returns duplicated descriptor if validation + * is successful + * + * Note: On Linux, duplicating fd is helpful to let administrators know about + * the capability users. See https://linux.die.net/man/8/lsof usage. + */ +NV_STATUS +osRmCapAcquire +( + OS_RM_CAPS *pOsRmCaps, + NvU32 rmCap, + NvU64 capDescriptor, + NvU64 *dupedCapDescriptor +) +{ + nv_cap_t *cap; + int fd = (int)capDescriptor; + int duped_fd; + NvU32 index; + NV_STATUS status; + + *dupedCapDescriptor = NV_U64_MAX; + + switch (rmCap) + { + case NV_RM_CAP_SMC_PARTITION_ACCESS: + { + index = OS_RM_CAP_SMC_PART_ACCESS_FILE; + break; + } + case NV_RM_CAP_EXT_FABRIC_MGMT: + { + status = nv_acquire_fabric_mgmt_cap(fd, &duped_fd); + if (status != NV_OK) + { + return status; + } + + goto done; + } + case NV_RM_CAP_SMC_EXEC_PARTITION_ACCESS: + { + index = OS_RM_CAP_SMC_EXEC_PART_ACCESS_FILE; + break; + } + case NV_RM_CAP_SYS_SMC_CONFIG: + { + index = OS_RM_CAP_SYS_SMC_CONFIG_FILE; + break; + } + case NV_RM_CAP_SYS_SMC_MONITOR: + { + index = OS_RM_CAP_SYS_SMC_MONITOR_FILE; + break; + } + default: + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + if (pOsRmCaps == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (index >= pOsRmCaps->count) + { + return NV_ERR_INVALID_ARGUMENT; + } + + cap = pOsRmCaps->caps[index]; + + duped_fd = os_nv_cap_validate_and_dup_fd(cap, fd); + if (duped_fd < 0) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + +done: + *dupedCapDescriptor = duped_fd; + + return NV_OK; +} + +/* + * @brief Initializes capability descriptor + * + * @param[out] pCapDescriptor descriptor to be used + * + */ +void +osRmCapInitDescriptor +( + NvU64 *pCapDescriptor +) +{ + *pCapDescriptor = NV_U64_MAX; +} + +/* + * @brief Generates random bytes which can be used as a universally unique + * identifier. + * + * @param[out] pBytes Array of random bytes + * @param[in] numBytes Size of the array + */ +NV_STATUS +osGetRandomBytes +( + NvU8 *pBytes, + NvU16 numBytes +) +{ + os_get_random_bytes(pBytes, numBytes); + + return NV_OK; +} + +/* + * @brief Allocate wait queue + * + * @param[out] ppWq Wait queue + */ +NV_STATUS +osAllocWaitQueue +( + OS_WAIT_QUEUE **ppWq +) +{ + return os_alloc_wait_queue(ppWq); +} + +/* + * @brief Free wait queue + * + * @param[in] pWq Wait queue + */ +void +osFreeWaitQueue +( + OS_WAIT_QUEUE *pWq +) +{ + os_free_wait_queue(pWq); +} + +/* + * @brief Put thread to uninterruptible sleep + * + * @param[in] pWq Wait queue + */ +void +osWaitUninterruptible +( + OS_WAIT_QUEUE *pWq +) +{ + os_wait_uninterruptible(pWq); +} + +/* + * @brief Put thread to interruptible sleep + * + * @param[in] pWq Wait queue + */ +void +osWaitInterruptible +( + OS_WAIT_QUEUE *pWq +) +{ + os_wait_interruptible(pWq); +} + +/* + * @brief Wake up thread from uninterruptible sleep + * + * @param[in] pWq Wait queue + */ +void +osWakeUp +( + OS_WAIT_QUEUE *pWq +) +{ + os_wake_up(pWq); +} + +NV_STATUS +osReadPFPciConfigInVF +( + NvU32 addr, + NvU32 *data +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Returns IMP-relevant data collected from other modules + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[out] pTegraImpImportData Structure to receive the data + * + * @returns NV_OK if successful, + * NV_ERR_BUFFER_TOO_SMALL if the array in TEGRA_IMP_IMPORT_DATA is + * too small, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * other errors as may be returned by subfunctions. + */ +NV_STATUS +osTegraSocGetImpImportData +( + TEGRA_IMP_IMPORT_DATA *pTegraImpImportData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Allocates a specified amount of ISO memory bandwidth for display + * + * floorBandwidthKBPS is the minimum required (i.e., floor) dramclk frequency + * multiplied by the width of the pipe over which the display data will travel. + * (It is understood that the bandwidth calculated by multiplying the clock + * frequency by the pipe width will not be realistically achievable, due to + * overhead in the memory subsystem. The infrastructure will not actually use + * the bandwidth value, except to reverse the calculation to get the required + * dramclk frequency.) + * + * This function is basically a wrapper to call the unix/linux layer. + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] averageBandwidthKBPS Amount of ISO memory bandwidth requested + * @param[in] floorBandwidhtKBPS Min required dramclk freq * pipe width + * + * @returns NV_OK if successful, + * NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too + * high, and bandwidth cannot be allocated, + * NV_ERR_NOT_SUPPORTED if the functionality is not available, or + * NV_ERR_GENERIC if some other kind of error occurred. + */ +NV_STATUS +osTegraAllocateDisplayBandwidth +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 averageBandwidthKBPS, + NvU32 floorBandwidthKBPS +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Creates or sets up platform specific nano second resolution timer + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] pTmrEvent Pointer to timer event information + * @param[in/out] pTimer pointer to hold high resolution timer object + */ +NV_STATUS +osCreateNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTmrEvent, + void **pTimer +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Starts platform specific nano second resolution timer + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] pTimer pointer to high resolution timer object + * @param[in] timens time in nano seconds + */ +NV_STATUS +osStartNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTimer, + NvU64 timeNs +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Cancels platform specific nano second resolution timer + * + * @param[in] pOsGpuInfo OS specific GPU information pointer + * @param[in] pTimer pointer to timer object + */ +NV_STATUS +osCancelNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTimer +) +{ + return NV_ERR_NOT_SUPPORTED; + +} + +/*! + * @brief Destroys & cancels platform specific nano second resolution timer + * + * + * @param[in] pGpu Device of interest + * @param[in] pTimer pointer to timer object + */ +NV_STATUS +osDestroyNanoTimer +( + OS_GPU_INFO *pOsGpuInfo, + void *pTimer +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Get number of dpAux instances. + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu GPU object pointer + * @param[out] pNumIntances Number of dpAux instances. + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ + +NV_STATUS +osGetTegraNumDpAuxInstances +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *pNumIntances +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/* + * @brief Return the priv Data of current IRQ. + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu Device of interest + * @param[out] pPrivData privData of current IRQ + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osGetCurrentIrqPrivData +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *pPrivData +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Get the brightness level + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu GPU object pointer + * @param[out] brightness Pointer to brightness level + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osGetTegraBrightnessLevel +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 *brightness +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Set the brightness level + * It is wrapper function to call unix/linux layer. + * + * @param[in] pGpu GPU object pointer + * @param[out] brightness brightness level + * + * @returns NV_STATUS, NV_OK if success, + * NV_ERR_GENERIC, if error + * NV_ERR_NOT_SUPPORTED, for unsupported platforms + */ +NV_STATUS +osSetTegraBrightnessLevel +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 brightness +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/* @brief Gets syncpoint aperture information + * + * @param[in] OS_GPU_INFO OS specific GPU information pointer + * @param[in] syncpointId + * @param[out] *physAddr + * @param[out] *limit + * @param[out] *offset + */ +NV_STATUS +osGetSyncpointAperture +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 syncpointId, + NvU64 *physAddr, + NvU64 *limit, + NvU32 *offset +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Check GPU is accessible or not + * + * @param[in] pGpu GPU object pointer + * + * @returns NVBool, Returns TRUE if the GPU is accessible, + * FALSE, if error + */ +NvBool +osIsGpuAccessible +( + OBJGPU *pGpu +) +{ + return nv_is_gpu_accessible(NV_GET_NV_STATE(pGpu)); +} + +NvBool +osDmabufIsSupported(void) +{ + return os_dma_buf_enabled; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/osapi.c b/src/nvidia/arch/nvalloc/unix/src/osapi.c new file mode 100644 index 000000000..c24402196 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/osapi.c @@ -0,0 +1,5172 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +#include +#include +#include +#include +#include +#include +#include // Declares RmInitRm(). +#include "gpu/gpu.h" +#include +#include + +#include +#include +#include +#include "kernel/gpu/mem_mgr/mem_mgr.h" + +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "rmapi/exports.h" +#include "rmapi/rmapi_utils.h" +#include "rmapi/rs_utils.h" +#include "rmapi/resource_fwd_decls.h" +#include +#include +#include "nv-reg.h" +#include "core/hal_mgr.h" +#include "gpu/device/device.h" + +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "gpu/gpu_uuid.h" + +#include "platform/chipset/pci_pbi.h" + +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrl0073/ctrl0073dp.h" +#include "ctrl/ctrl0073/ctrl0073system.h" +#include "ctrl/ctrl0073/ctrl0073specific.h" +#include "ctrl/ctrl2080/ctrl2080bios.h" +#include "ctrl/ctrl2080/ctrl2080fb.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl402c.h" + +#include "g_nv_name_released.h" // released chip entries from nvChipAutoFlags.h + +#include + +#include "gpu/bus/kern_bus.h" + +// +// Helper function which can be called before doing any RM control +// This function: +// +// a. Performs threadStateInit(). +// b. Acquires API lock. +// c. Checks if RMAPI client handle is valid (i.e. RM is initialized) and +// returns early if RMAPI client handle is invalid. +// d. Increments the dynamic power refcount. If GPU is in RTD3 suspended +// state, then it will wake-up the GPU. +// e. Returns the RMAPI interface handle. +// +// This function should be called only when caller doesn't have acquired API +// lock. Caller needs to use RmUnixRmApiEpilogue() after RM control, if +// RmUnixRmApiPrologue() is successful. +// +RM_API *RmUnixRmApiPrologue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode, NvU32 module) +{ + threadStateInit(pThreadNode, THREAD_STATE_FLAGS_NONE); + + if ((rmApiLockAcquire(API_LOCK_FLAGS_NONE, module)) == NV_OK) + { + if ((pNv->rmapi.hClient != 0) && + (os_ref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE) == NV_OK)) + { + return rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + } + + rmApiLockRelease(); + } + + threadStateFree(pThreadNode, THREAD_STATE_FLAGS_NONE); + + return NULL; +} + +// +// Helper function which can be called after doing RM control, if +// caller has used RmUnixRmApiPrologue() helper function. This function: +// +// a. Decrements the dynamic power refcount. +// b. Release API lock. +// c. Performs threadStateFree(). +// +void RmUnixRmApiEpilogue(nv_state_t *pNv, THREAD_STATE_NODE *pThreadNode) +{ + os_unref_dynamic_power(pNv, NV_DYNAMIC_PM_FINE); + rmApiLockRelease(); + threadStateFree(pThreadNode, THREAD_STATE_FLAGS_NONE); +} + +NvBool RmGpuHasIOSpaceEnabled(nv_state_t * nv) +{ + NvU16 val; + NvBool has_io; + os_pci_read_word(nv->handle, NV_CONFIG_PCI_NV_1, &val); + has_io = FLD_TEST_DRF(_CONFIG, _PCI_NV_1, _IO_SPACE, _ENABLED, val); + return has_io; +} + +// This is a stub function for unix +void osHandleDeferredRecovery( + OBJGPU *pGpu +) +{ + +} + +// This is a stub function for unix +NvBool osIsSwPreInitOnly +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_FALSE; +} + +const NvU8 * RmGetGpuUuidRaw( + nv_state_t *pNv +) +{ + NV_STATUS rmStatus; + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + NvU32 gidFlags; + NvBool isApiLockTaken = NV_FALSE; + + if (pNv->nv_uuid_cache.valid) + goto done; + + // + // PBI is not present in simulation and the loop inside + // pciPbiReadUuid takes up considerable amount of time in + // simulation environment during RM load. + // + if (pGpu && IS_SIMULATION(pGpu)) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + } + else + { + rmStatus = pciPbiReadUuid(pNv->handle, pNv->nv_uuid_cache.uuid); + } + + if (rmStatus == NV_OK) + { + rmStatus = gpumgrSetUuid(pNv->gpu_id, pNv->nv_uuid_cache.uuid); + if (rmStatus != NV_OK) + { + return NULL; + } + + pNv->nv_uuid_cache.valid = NV_TRUE; + goto done; + } + else if (rmStatus == NV_ERR_NOT_SUPPORTED) + { + nv_printf(NV_DBG_INFO, + "NVRM: PBI is not supported for GPU " NV_PCI_DEV_FMT "\n", + NV_PCI_DEV_FMT_ARGS(pNv)); + } + + gidFlags = DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1) + | DRF_DEF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_FORMAT,_BINARY); + + if (!rmApiLockIsOwner()) + { + rmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU); + if (rmStatus != NV_OK) + { + return NULL; + } + + isApiLockTaken = NV_TRUE; + } + + if (pGpu == NULL) + { + if (isApiLockTaken == NV_TRUE) + { + rmApiLockRelease(); + } + + return NULL; + } + + rmStatus = gpuGetGidInfo(pGpu, NULL, NULL, gidFlags); + if (isApiLockTaken == NV_TRUE) + { + rmApiLockRelease(); + } + + if (rmStatus != NV_OK) + return NULL; + + if (!pGpu->gpuUuid.isInitialized) + return NULL; + + // copy the uuid from the OBJGPU uuid cache + os_mem_copy(pNv->nv_uuid_cache.uuid, pGpu->gpuUuid.uuid, GPU_UUID_LEN); + pNv->nv_uuid_cache.valid = NV_TRUE; + +done: + return pNv->nv_uuid_cache.uuid; +} + +static NV_STATUS RmGpuUuidRawToString( + const NvU8 *pGidData, + char *pBuf, + NvU32 bufLen +) +{ + NvU8 *pGidString; + NvU32 GidStrlen; + NV_STATUS rmStatus; + NvU32 gidFlags; + + gidFlags = DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _ASCII) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1); + + rmStatus = transformGidToUserFriendlyString(pGidData, RM_SHA1_GID_SIZE, + &pGidString, &GidStrlen, + gidFlags); + if (rmStatus != NV_OK) + return rmStatus; + + if (bufLen >= GidStrlen) + portMemCopy(pBuf, bufLen, pGidString, GidStrlen); + else + rmStatus = NV_ERR_BUFFER_TOO_SMALL; + + portMemFree((void *)pGidString); + + return rmStatus; +} + +// This function should be called with the API and GPU locks already acquired. +NV_STATUS +RmLogGpuCrash(OBJGPU *pGpu) +{ + NV_STATUS status = NV_OK; + NvBool bGpuIsLost, bGpuIsConnected; + + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Re-evaluate whether or not the GPU is accessible. This could be called + // from a recovery context where the OS has re-enabled MMIO for the device. + // This happens during EEH processing on IBM Power + Linux, and marking + // the device as connected again will allow rcdbAddRmGpuDump() to collect + // more GPU state. + // + bGpuIsLost = pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST); + bGpuIsConnected = pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED); + if (!bGpuIsConnected || bGpuIsLost) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NvU32 pmcBoot0 = NV_PRIV_REG_RD32(nv->regs->map_u, NV_PMC_BOOT_0); + if (pmcBoot0 == nvp->pmc_boot_0) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, NV_TRUE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, NV_FALSE); + } + } + + // + // Log the engine data to the Journal object, to be pulled out later. This + // will return NV_WARN_MORE_PROCESSING_REQUIRED if the dump needed to be + // deferred to a passive IRQL. We still log the crash dump as being created + // in that case since it (should) be created shortly thereafter, and + // there's currently not a good way to print the below notification + // publicly from the core RM when the DPC completes. + // + status = rcdbAddRmGpuDump(pGpu); + if (status != NV_OK && status != NV_WARN_MORE_PROCESSING_REQUIRED) + { + NV_PRINTF(LEVEL_ERROR, + "%s: failed to save GPU crash data\n", __FUNCTION__); + } + else + { + status = NV_OK; + nv_printf(NV_DBG_ERRORS, + "NVRM: A GPU crash dump has been created. If possible, please run\n" + "NVRM: nvidia-bug-report.sh as root to collect this data before\n" + "NVRM: the NVIDIA kernel module is unloaded.\n"); + } + + // Restore the disconnected properties, if they were reset + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, bGpuIsConnected); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, bGpuIsLost); + + // Restore persistence mode to the way it was prior to the crash + osModifyGpuSwStatePersistence(pGpu->pOsGpuInfo, + pGpu->getProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE)); + + return status; +} + +static void free_os_event_under_lock(nv_event_t *event) +{ + event->active = NV_FALSE; + + // If refcount > 0, event will be freed by osDereferenceObjectCount + // when the last associated RM event is freed. + if (event->refcount == 0) + { + portMemFree(event); + } +} + +static void free_os_events( + nv_file_private_t *nvfp, + NvHandle client +) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t **pprev; + + portSyncSpinlockAcquire(nv->event_spinlock); + + pprev = &nv->event_list; + while (*pprev != NULL) + { + nv_event_t *cur = *pprev; + // + // XXX We must be called from either rm_client_free_os_events() or + // RmFreeUnusedClients() for this to work. + // + if ((cur->hParent == client) || (cur->nvfp == nvfp)) + { + *pprev = cur->next; + free_os_event_under_lock(cur); + } + else + { + pprev = &cur->next; + } + } + + portSyncSpinlockRelease(nv->event_spinlock); +} + +void rm_client_free_os_events( + NvHandle client +) +{ + free_os_events(NULL, client); +} + +void RmFreeUnusedClients( + nv_state_t *nv, + nv_file_private_t *nvfp +) +{ + NvU32 *pClientList; + NvU32 numClients, i; + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // + // The 'nvfp' pointer uniquely identifies an open instance in kernel space + // and the kernel interface layer guarantees that we are not called before + // the associated nvfp descriptor is closed. We can thus safely free + // abandoned clients with matching 'nvfp' pointers. + // + status = rmapiGetClientHandlesFromOSInfo(nvfp, &pClientList, &numClients); + if (status != NV_OK) + { + numClients = 0; + } + + for (i = 0; i < numClients; ++i) + { + NV_PRINTF(LEVEL_INFO, "freeing abandoned client 0x%x\n", + pClientList[i]); + + } + + if (numClients != 0) + { + pRmApi->FreeClientList(pRmApi, pClientList, numClients); + + portMemFree(pClientList); + } + + // Clean up any remaining events using this nvfp. + free_os_events(nvfp, 0); +} + +static void RmUnbindLock( + nv_state_t *nv +) +{ + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + if ((pGpu == NULL) || (gpuGetUserClientCount(pGpu) == 0)) + { + nv->flags |= NV_FLAG_UNBIND_LOCK; + } +} + +static NV_STATUS allocate_os_event( + NvHandle hParent, + nv_file_private_t *nvfp, + NvU32 fd +) +{ + nv_state_t *nv = nv_get_ctl_state(); + NvU32 status = NV_OK; + nv_event_t *event; + + nv_event_t *new_event = portMemAllocNonPaged(sizeof(nv_event_t)); + if (new_event == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portSyncSpinlockAcquire(nv->event_spinlock); + for (event = nv->event_list; event; event = event->next) + { + // Only one event may be associated with a given fd. + if (event->hParent == hParent && event->fd == fd) + { + status = NV_ERR_INVALID_ARGUMENT; + portSyncSpinlockRelease(nv->event_spinlock); + goto done; + } + } + + new_event->next = nv->event_list; + nv->event_list = new_event; + portSyncSpinlockRelease(nv->event_spinlock); + +done: + if (status == NV_OK) + { + new_event->hParent = hParent; + new_event->nvfp = nvfp; + new_event->fd = fd; + new_event->active = NV_TRUE; + new_event->refcount = 0; + + NV_PRINTF(LEVEL_INFO, "allocated OS event:\n"); + NV_PRINTF(LEVEL_INFO, " hParent: 0x%x\n", hParent); + NV_PRINTF(LEVEL_INFO, " fd: %d\n", fd); + } + else + { + portMemFree(new_event); + } + + return status; +} + +NV_STATUS RmAllocOsEvent( + NvHandle hParent, + nv_file_private_t *nvfp, + NvU32 fd +) +{ + if (NV_OK != allocate_os_event(hParent, nvfp, fd)) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate OS event\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + return NV_OK; +} + +static NV_STATUS free_os_event( + NvHandle hParent, + NvU32 fd +) +{ + nv_state_t *nv = nv_get_ctl_state(); + nv_event_t *event, *tmp; + NV_STATUS result; + + portSyncSpinlockAcquire(nv->event_spinlock); + tmp = event = nv->event_list; + while (event) + { + if ((event->fd == fd) && (event->hParent == hParent)) + { + if (event == nv->event_list) + nv->event_list = event->next; + else + tmp->next = event->next; + break; + } + tmp = event; + event = event->next; + } + + if (event != NULL) + { + free_os_event_under_lock(event); + result = NV_OK; + } + else + result = NV_ERR_INVALID_EVENT; + portSyncSpinlockRelease(nv->event_spinlock); + + if (result == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "freed OS event:\n"); + NV_PRINTF(LEVEL_INFO, " hParent: 0x%x\n", hParent); + NV_PRINTF(LEVEL_INFO, " fd: %d\n", fd); + } + else + { + NV_PRINTF(LEVEL_ERROR, "failed to find OS event:\n"); + NV_PRINTF(LEVEL_ERROR, " hParent: 0x%x\n", hParent); + NV_PRINTF(LEVEL_ERROR, " fd: %d\n", fd); + } + + return result; +} + +NV_STATUS RmFreeOsEvent( + NvHandle hParent, + NvU32 fd +) +{ + if (NV_OK != free_os_event(hParent, fd)) + { + return NV_ERR_INVALID_EVENT; + } + return NV_OK; +} + +static void RmExecuteWorkItem( + void *pWorkItem +) +{ + nv_work_item_t *pWi = (nv_work_item_t *)pWorkItem; + NvU32 gpuMask; + NvU32 releaseLocks = 0; + + if (!(pWi->flags & NV_WORK_ITEM_FLAGS_REQUIRES_GPU) && + ((pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) || + (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW) || + (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW) || + (pWi->flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY))) + { + // Requesting one of the GPU locks without providing a GPU instance + NV_ASSERT(0); + goto done; + } + + // Get locks requested by workitem + if (NV_OK != workItemLocksAcquire(pWi->gpuInstance, pWi->flags, + &releaseLocks, &gpuMask)) + { + goto done; + } + + // Some work items may not require a valid GPU instance + if (pWi->flags & NV_WORK_ITEM_FLAGS_REQUIRES_GPU) + { + pWi->func.pGpuFunction(pWi->gpuInstance, pWi->pData); + } + else + { + pWi->func.pSystemFunction(pWi->pData); + } + + // Release any locks taken + workItemLocksRelease(releaseLocks, gpuMask); + +done: + if ((pWi->pData != NULL) && + !(pWi->flags & NV_WORK_ITEM_FLAGS_DONT_FREE_DATA)) + { + portMemFree(pWi->pData); + } + + portMemFree((void *)pWi); +} + +static NV_STATUS RmGetEventData( + nv_file_private_t *nvfp, + NvP64 pEvent, + NvU32 *MoreEvents, + NvBool bUserModeArgs +) +{ + NV_STATUS RmStatus; + NvUnixEvent *pKernelEvent = NULL; + nv_event_t nv_event; + RMAPI_PARAM_COPY paramCopy; + + RmStatus = nv_get_event(nvfp, &nv_event, MoreEvents); + if (RmStatus != NV_OK) + return NV_ERR_OPERATING_SYSTEM; + + // setup for access to client's parameters + RMAPI_PARAM_COPY_INIT(paramCopy, pKernelEvent, pEvent, 1, sizeof(NvUnixEvent)); + RmStatus = rmapiParamsAcquire(¶mCopy, bUserModeArgs); + if (RmStatus != NV_OK) + return NV_ERR_OPERATING_SYSTEM; + + pKernelEvent->hObject = nv_event.hObject; + pKernelEvent->NotifyIndex = nv_event.index; + pKernelEvent->info32 = nv_event.info32; + pKernelEvent->info16 = nv_event.info16; + + // release client buffer access, with copyout as needed + if (rmapiParamsRelease(¶mCopy) != NV_OK) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +static NV_STATUS RmAccessRegistry( + NvHandle hClient, + NvHandle hObject, + NvU32 AccessType, + NvP64 clientDevNodeAddress, + NvU32 DevNodeLength, + NvP64 clientParmStrAddress, + NvU32 ParmStrLength, + NvP64 clientBinaryDataAddress, + NvU32 *pBinaryDataLength, + NvU32 *Data, + NvU32 *Entry +) +{ + NvU32 gpuMask = 0, gpuInstance = 0; + OBJGPU *pGpu; + NvBool isDevice = NV_FALSE; + NV_STATUS RmStatus = NV_ERR_OPERATING_SYSTEM; + RsClient *pClient; + Device *pDevice; + Subdevice *pSubdevice; + + RMAPI_PARAM_COPY devNodeParamCopy; + NvU8 *tmpDevNode = NULL; + NvU32 copyOutDevNodeLength = 0; + + RMAPI_PARAM_COPY parmStrParamCopy; + char *tmpParmStr = NULL; + NvU32 copyOutParmStrLength = 0; + + RMAPI_PARAM_COPY binaryDataParamCopy; + NvU8 *tmpBinaryData = NULL; + NvU32 BinaryDataLength = 0; + NvU32 copyOutBinaryDataLength = 0; + + if (NV_OK != serverAcquireClient(&g_resServ, hClient, LOCK_ACCESS_WRITE, &pClient)) + return NV_ERR_INVALID_CLIENT; + + if (hClient == hObject) + { + pGpu = NULL; + } + else + { + RmStatus = deviceGetByHandle(pClient, hObject, &pDevice); + if (RmStatus != NV_OK) + { + RmStatus = subdeviceGetByHandle(pClient, hObject, &pSubdevice); + if (RmStatus != NV_OK) + goto done; + + RmStatus = rmGpuGroupLockAcquire(pSubdevice->subDeviceInst, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (RmStatus != NV_OK) + return RmStatus; + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + pGpu = GPU_RES_GET_GPU(pSubdevice); + } + else + { + RmStatus = rmGpuGroupLockAcquire(pDevice->deviceInst, + GPU_LOCK_GRP_DEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (RmStatus != NV_OK) + return RmStatus; + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + pGpu = GPU_RES_GET_GPU(pDevice); + isDevice = NV_TRUE; + } + } + + if (pBinaryDataLength) + { + BinaryDataLength = *pBinaryDataLength; + } + + // a passed-in devNode + if (DevNodeLength) + { + // the passed-in DevNodeLength does not account for '\0' + DevNodeLength++; + + if (DevNodeLength > NVOS38_MAX_REGISTRY_STRING_LENGTH) + { + RmStatus = NV_ERR_INVALID_STRING_LENGTH; + goto done; + } + + // get access to client's DevNode + RMAPI_PARAM_COPY_INIT(devNodeParamCopy, tmpDevNode, clientDevNodeAddress, DevNodeLength, 1); + devNodeParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + RmStatus = rmapiParamsAcquire(&devNodeParamCopy, NV_TRUE); + if (RmStatus != NV_OK) + { + RmStatus = NV_ERR_OPERATING_SYSTEM; + goto done; + } + } + + // a passed-in parmStr + if (ParmStrLength) + { + // the passed-in ParmStrLength does not account for '\0' + ParmStrLength++; + + if (ParmStrLength > NVOS38_MAX_REGISTRY_STRING_LENGTH) + { + RmStatus = NV_ERR_INVALID_STRING_LENGTH; + goto done; + } + + // get access to client's parmStr + RMAPI_PARAM_COPY_INIT(parmStrParamCopy, tmpParmStr, clientParmStrAddress, ParmStrLength, 1); + parmStrParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + RmStatus = rmapiParamsAcquire(&parmStrParamCopy, NV_TRUE); + if (RmStatus != NV_OK) + { + RmStatus = NV_ERR_OPERATING_SYSTEM; + goto done; + } + } + + if ((AccessType == NVOS38_ACCESS_TYPE_READ_BINARY) || + (AccessType == NVOS38_ACCESS_TYPE_WRITE_BINARY)) + { + if ((BinaryDataLength > NVOS38_MAX_REGISTRY_BINARY_LENGTH) || + (BinaryDataLength == 0)) + { + RmStatus = NV_ERR_INVALID_STRING_LENGTH; + goto done; + } + + // get access to client's binaryData + RMAPI_PARAM_COPY_INIT(binaryDataParamCopy, tmpBinaryData, clientBinaryDataAddress, BinaryDataLength, 1); + if (AccessType == NVOS38_ACCESS_TYPE_READ_BINARY) + binaryDataParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + RmStatus = rmapiParamsAcquire(&binaryDataParamCopy, NV_TRUE); + if (RmStatus != NV_OK) + { + RmStatus = NV_ERR_OPERATING_SYSTEM; + goto done; + } + } + + switch (AccessType) + { + case NVOS38_ACCESS_TYPE_READ_DWORD: + RmStatus = osReadRegistryDword(pGpu, + tmpParmStr, Data); + break; + + case NVOS38_ACCESS_TYPE_WRITE_DWORD: + if (isDevice && osIsAdministrator()) + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + RmStatus = osWriteRegistryDword(pGpu, + tmpParmStr, *Data); + + if (RmStatus != NV_OK) + goto done; + } + break; + } + + RmStatus = osWriteRegistryDword(pGpu, + tmpParmStr, *Data); + break; + + case NVOS38_ACCESS_TYPE_READ_BINARY: + RmStatus = osReadRegistryBinary(pGpu, + tmpParmStr, tmpBinaryData, &BinaryDataLength); + + if (RmStatus != NV_OK) + { + goto done; + } + + if (BinaryDataLength) + copyOutBinaryDataLength = BinaryDataLength; + + break; + + case NVOS38_ACCESS_TYPE_WRITE_BINARY: + if (isDevice && osIsAdministrator()) + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + RmStatus = osWriteRegistryBinary(pGpu, + tmpParmStr, tmpBinaryData, + BinaryDataLength); + + if (RmStatus != NV_OK) + goto done; + } + break; + } + + RmStatus = osWriteRegistryBinary(pGpu, + tmpParmStr, tmpBinaryData, + BinaryDataLength); + break; + + default: + RmStatus = NV_ERR_INVALID_ACCESS_TYPE; + } + + done: + if (gpuMask != 0) + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + + if (tmpDevNode != NULL) + { + // skip copyout on error + if ((RmStatus != NV_OK) || (copyOutDevNodeLength == 0)) + devNodeParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + devNodeParamCopy.paramsSize = copyOutDevNodeLength; + if (NV_OK != rmapiParamsRelease(&devNodeParamCopy)) + if (RmStatus == NV_OK) + RmStatus = NV_ERR_OPERATING_SYSTEM; + } + if (tmpParmStr != NULL) + { + // skip copyout on error + if ((RmStatus != NV_OK) || (copyOutParmStrLength == 0)) + parmStrParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + parmStrParamCopy.paramsSize = copyOutParmStrLength; + if (NV_OK != rmapiParamsRelease(&parmStrParamCopy)) + if (RmStatus == NV_OK) + RmStatus = NV_ERR_OPERATING_SYSTEM; + } + if (tmpBinaryData != NULL) + { + // skip copyout on error + if ((RmStatus != NV_OK) || (copyOutBinaryDataLength == 0)) + binaryDataParamCopy.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + binaryDataParamCopy.paramsSize = copyOutBinaryDataLength; + if (NV_OK != rmapiParamsRelease(&binaryDataParamCopy)) + if (RmStatus == NV_OK) + RmStatus = NV_ERR_OPERATING_SYSTEM; + *pBinaryDataLength = copyOutBinaryDataLength; + } + + serverReleaseClient(&g_resServ, LOCK_ACCESS_WRITE, pClient); + return RmStatus; +} + +static NV_STATUS RmUpdateDeviceMappingInfo( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMappable, + void *pOldCpuAddress, + void *pNewCpuAddress +) +{ + NV_STATUS status; + RsClient *pClient; + RsResourceRef *pMappableRef; + RsCpuMapping *pCpuMapping; + Device *pDevice; + Subdevice *pSubdevice; + NvU32 gpuMask = 0; + + status = serverAcquireClient(&g_resServ, hClient, LOCK_ACCESS_WRITE, &pClient); + if (status != NV_OK) + return status; + + status = deviceGetByHandle(pClient, hDevice, &pDevice); + if (status != NV_OK) + { + status = subdeviceGetByHandle(pClient, hDevice, &pSubdevice); + if (status != NV_OK) + goto done; + + status = rmGpuGroupLockAcquire(pSubdevice->subDeviceInst, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (status != NV_OK) + goto done; + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + } + else + { + status = rmGpuGroupLockAcquire(pDevice->deviceInst, + GPU_LOCK_GRP_DEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, + &gpuMask); + if (status != NV_OK) + goto done; + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + } + + status = clientGetResourceRef(pClient, hMappable, &pMappableRef); + if (status != NV_OK) + goto done; + + if ((objDynamicCastById(pMappableRef->pResource, classId(Memory)) == NULL) && + (objDynamicCastById(pMappableRef->pResource, classId(KernelChannel)) == NULL)) + { + status = NV_ERR_INVALID_OBJECT_HANDLE; + goto done; + } + + status = refFindCpuMappingWithFilter(pMappableRef, + NV_PTR_TO_NvP64(pOldCpuAddress), + serverutilMappingFilterCurrentUserProc, + &pCpuMapping); + if (status != NV_OK) + goto done; + + pCpuMapping->pLinearAddress = NV_PTR_TO_NvP64(pNewCpuAddress); + +done: + + if (gpuMask != 0) + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + + serverReleaseClient(&g_resServ, LOCK_ACCESS_WRITE, pClient); + return status; +} + +static NV_STATUS RmPerformVersionCheck( + void *pData, + NvU32 dataSize +) +{ + nv_ioctl_rm_api_version_t *pParams; + char clientCh, rmCh; + const char *rmStr = NV_VERSION_STRING; + NvBool relaxed = NV_FALSE; + NvU32 i; + + if (dataSize != sizeof(nv_ioctl_rm_api_version_t)) + return NV_ERR_INVALID_ARGUMENT; + + pParams = pData; + + // + // write the reply value, so that the client knows we recognized + // the request + // + pParams->reply = NV_RM_API_VERSION_REPLY_RECOGNIZED; + + // + // the client requested to override the version check; just return + // success. + // + if (pParams->cmd == NV_RM_API_VERSION_CMD_OVERRIDE) + { + return NV_OK; + } + + // + // the client requested relaxed version checking; we will only + // compare the strings until the first decimal point. + // + if (pParams->cmd == NV_RM_API_VERSION_CMD_RELAXED) + { + relaxed = NV_TRUE; + } + + // + // rmStr (i.e., NV_VERSION_STRING) must be null-terminated and fit within + // NV_RM_API_VERSION_STRING_LENGTH, so that: + // + // (1) If the versions don't match, we can return rmStr in + // pParams->versionString. + // (2) The below loop is guaranteed to not overrun rmStr. + // + if ((os_string_length(rmStr) + 1) > NV_RM_API_VERSION_STRING_LENGTH) + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + for (i = 0; i < NV_RM_API_VERSION_STRING_LENGTH; i++) + { + clientCh = pParams->versionString[i]; + rmCh = rmStr[i]; + + // + // fail if the current character is not the same + // + if (clientCh != rmCh) + { + break; + } + + // + // if relaxed matching was requested, succeed when we find the + // first decimal point + // + if ((relaxed) && (clientCh == '.')) + { + return NV_OK; + } + + // + // we found the end of the strings: succeed + // + if (clientCh == '\0') + { + return NV_OK; + } + } + + // + // the version strings did not match: print an error message and + // copy the RM's version string into pParams->versionString, so + // that the client can report the mismatch; explicitly NULL + // terminate the client's string, since we cannot trust it + // + pParams->versionString[NV_RM_API_VERSION_STRING_LENGTH - 1] = '\0'; + + nv_printf(NV_DBG_ERRORS, + "NVRM: API mismatch: the client has the version %s, but\n" + "NVRM: this kernel module has the version %s. Please\n" + "NVRM: make sure that this kernel module and all NVIDIA driver\n" + "NVRM: components have the same version.\n", + pParams->versionString, NV_VERSION_STRING); + + os_string_copy(pParams->versionString, rmStr); + + return NV_ERR_GENERIC; +} + +NV_STATUS RmSystemEvent( + nv_state_t *pNv, + NvU32 event_type, + NvU32 event_val +) +{ + NV_STATUS rmStatus = NV_OK; + NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + switch (event_type) + { + case NV_SYSTEM_ACPI_DISPLAY_SWITCH_EVENT: + // Legacy kepler case, do nothing. + break; + + case NV_SYSTEM_ACPI_BATTERY_POWER_EVENT: + { + Nv2080PowerEventNotification powerParams; + portMemSet(&powerParams, 0, sizeof(powerParams)); + powerParams.bSwitchToAC = NV_TRUE; + powerParams.bGPUCapabilityChanged = NV_FALSE; + powerParams.displayMaskAffected = 0; + + params.eventType = NV0000_CTRL_SYSTEM_EVENT_TYPE_POWER_SOURCE; + if (event_val == NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_BATTERY) + { + params.eventData = NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_BATTERY; + powerParams.bSwitchToAC = NV_FALSE; + } + else if (event_val == NV_SYSTEM_ACPI_EVENT_VALUE_POWER_EVENT_AC) + { + params.eventData = NV0000_CTRL_SYSTEM_EVENT_DATA_POWER_AC; + powerParams.bSwitchToAC = NV_TRUE; + } + else + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + if (rmStatus == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + rmStatus = pRmApi->Control(pRmApi, + pNv->rmapi.hClient, + pNv->rmapi.hClient, + NV0000_CTRL_CMD_SYSTEM_NOTIFY_EVENT, + (void *)¶ms, + sizeof(NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS)); + + // + // TODO: bug 2812848 Investigate if we can use system event + // or if we can broadcast NV2080_NOTIFIERS_POWER_EVENT for all GPUs + // + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_POWER_EVENT, + &powerParams, sizeof(powerParams), 0, 0); + } + break; + } + default: + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + + return rmStatus; +} + +/*! + * @brief Deal with D-notifier events to apply a performance + * level based on the requested auxiliary power-state. + * Read confluence page "D-Notifiers on Linux" for more details. + * + * @param[in] pGpu OBJGPU pointer. + * @param[in] event_type NvU32 Event type. + */ +static void RmHandleDNotifierEvent( + nv_state_t *pNv, + NvU32 event_type +) +{ +} + +static NV_STATUS +RmDmabufVerifyMemHandle( + OBJGPU *pGpu, + NvHandle hSrcClient, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + void *pGpuInstanceInfo +) +{ + NV_STATUS status; + RsClient *pClient = NULL; + RsResourceRef *pSrcMemoryRef = NULL; + Memory *pSrcMemory = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hSrcClient, &pClient)); + + status = clientGetResourceRef(pClient, hMemory, &pSrcMemoryRef); + if (status != NV_OK) + { + return status; + } + + pSrcMemory = dynamicCast(pSrcMemoryRef->pResource, Memory); + if (pSrcMemory == NULL) + { + return NV_ERR_INVALID_OBJECT; + } + + pMemDesc = pSrcMemory->pMemDesc; + + if (pGpuInstanceInfo != NULL) + { + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance; + pKernelMIGGpuInstance = (KERNEL_MIG_GPU_INSTANCE *) pGpuInstanceInfo; + + if ((pKernelMIGGpuInstance->pMemoryPartitionHeap != pSrcMemory->pHeap)) + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + } + + // Check if hMemory belongs to the same pGpu + if ((pMemDesc->pGpu != pGpu) && + (pSrcMemory->pGpu != pGpu)) + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // Offset and size must be aligned to OS page-size + if (!NV_IS_ALIGNED64(offset, os_page_size) || + !NV_IS_ALIGNED64(size, os_page_size)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Only supported for vidmem handles + if (memdescGetAddressSpace(pMemDesc) != ADDR_FBMEM) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if ((size == 0) || + (size > memdescGetSize(pMemDesc)) || + (offset > (memdescGetSize(pMemDesc) - size))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +static NV_STATUS +RmDmabufGetClientAndDevice( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubdevice, + void **ppGpuInstanceInfo +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (IS_MIG_ENABLED(pGpu)) + { + NV_STATUS status; + MIG_INSTANCE_REF ref; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + status = kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, &ref); + if (status != NV_OK) + { + return status; + } + + status = kmigmgrIncRefCount(ref.pKernelMIGGpuInstance->pShare); + if (status != NV_OK) + { + return status; + } + + *phClient = ref.pKernelMIGGpuInstance->instanceHandles.hClient; + *phDevice = ref.pKernelMIGGpuInstance->instanceHandles.hDevice; + *phSubdevice = ref.pKernelMIGGpuInstance->instanceHandles.hSubdevice; + *ppGpuInstanceInfo = (void *) ref.pKernelMIGGpuInstance; + + return NV_OK; + } + + *phClient = pMemoryManager->hClient; + *phDevice = pMemoryManager->hDevice; + *phSubdevice = pMemoryManager->hSubdevice; + *ppGpuInstanceInfo = NULL; + + return NV_OK; +} + +static void +RmDmabufPutClientAndDevice( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hDevice, + NvHandle hSubdevice, + void *pGpuInstanceInfo +) +{ + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance; + + if (pGpuInstanceInfo == NULL) + { + return; + } + + pKernelMIGGpuInstance = (KERNEL_MIG_GPU_INSTANCE *) pGpuInstanceInfo; + + NV_ASSERT_OK(kmigmgrDecRefCount(pKernelMIGGpuInstance->pShare)); +} + +/* + * --------------------------------------------------------------------------- + * + * The routines below are part of the interface between the kernel interface + * layer and the kernel-agnostic portions of the resource manager. + * + * --------------------------------------------------------------------------- + */ + +NvBool NV_API_CALL rm_init_private_state( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + NvBool retval; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + retval = RmInitPrivateState(pNv); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return retval; +} + +void NV_API_CALL rm_free_private_state( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + RmFreePrivateState(pNv); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NvBool NV_API_CALL rm_init_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + NvBool retval = NV_FALSE; + void *fp; + NvBool bEnabled; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_DEVICE_INIT); + + // LOCK: acquire API lock + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT) == NV_OK) + { + if (!((gpumgrQueryGpuDrainState(pNv->gpu_id, &bEnabled, NULL) == NV_OK) + && bEnabled)) + { + if (pNv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + retval = RmPartiallyInitAdapter(pNv); + } + else + { + retval = RmInitAdapter(pNv); + } + } + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return retval; +} + +void NV_API_CALL rm_disable_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_ASSERT_OK(os_flush_work_queue(pNv->queue)); + + // LOCK: acquire API lock + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + if (pNv->flags & NV_FLAG_PERSISTENT_SW_STATE) + { + RmPartiallyDisableAdapter(pNv); + } + else + { + RmDisableAdapter(pNv); + } + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_ASSERT_OK(os_flush_work_queue(pNv->queue)); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_shutdown_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + RmShutdownAdapter(pNv); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_exclude_adapter( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + rmStatus = RmExcludeAdapter(pNv); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_acquire_api_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_release_api_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // UNLOCK: release API lock + rmApiLockRelease(); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_acquire_gpu_lock( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire GPU lock + rmStatus = rmDeviceGpuLocksAcquire(NV_GET_NV_PRIV_PGPU(nv), + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_OSAPI); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_release_gpu_lock( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // UNLOCK: release GPU lock + rmDeviceGpuLocksRelease(NV_GET_NV_PRIV_PGPU(nv), GPUS_LOCK_FLAGS_NONE, NULL); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_acquire_all_gpus_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire all GPUs lock + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_release_all_gpus_lock( + nvidia_stack_t *sp +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // UNLOCK: release all GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +/*! + * @brief Handle ACPI_NOTIFY_GPS_STATUS_CHANGE event. + * + * This function is called for GPS when SBIOS trigger + * gps STATUS_CHANGE event, which calls rm control call + * NV0000_CTRL_CMD_SYSTEM_GPS_CONTROL to init the GPS + * data from SBIOS. + */ +static void RmHandleGPSStatusChange +( + nv_state_t *pNv +) +{ +} + +/*! + * @brief Function to handle device specific ACPI events. + * + * @param[in] sp nvidia_stack_t pointer. + * @param[in] nv nv_state_t pointer. + * @param[in] event_type NvU32 Event type. + */ +void NV_API_CALL rm_acpi_notify( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 event_type +) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + + switch (event_type) + { + case ACPI_VIDEO_NOTIFY_PROBE: + { + THREAD_STATE_NODE threadState; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + RmHandleDisplayChange(sp, nv); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + break; + } + + case ACPI_NOTIFY_GPS_STATUS_CHANGE: + RmHandleGPSStatusChange(nv); + break; + + case ACPI_NOTIFY_POWER_LEVEL_D1: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D2: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D3: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D4: /* fallthrough */ + case ACPI_NOTIFY_POWER_LEVEL_D5: + RmHandleDNotifierEvent(nv, event_type); + break; + + default: + NV_PRINTF(LEVEL_INFO, "No support for 0x%x event\n", event_type); + NV_ASSERT(0); + break; + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +static void nv_align_mmap_offset_length( + nv_usermap_access_params_t *nvuap) +{ + NvU64 page_size = os_page_size; + NvU64 end = nvuap->size + (nvuap->addr & (page_size - 1)); + + nvuap->mmap_start = NV_ALIGN_DOWN(nvuap->addr, page_size); + nvuap->mmap_size = NV_ALIGN_UP(end, page_size); + nvuap->offset = NV_ALIGN_DOWN(nvuap->offset, page_size); +} + +static inline NV_STATUS RmGetArrayMinMax( + NvU64 *array, + NvU64 count, + NvU64 *min, + NvU64 *max +) +{ + NvU64 i; + + if (array == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *min = array[0]; + *max = array[0]; + + if (count == 1) + return NV_OK; + + for (i = 1; i < count; i++) + { + if (array[i] > *max) + *max = array[i]; + + if (array[i] < *min) + *min = array[i]; + } + + return NV_OK; +} + +static NV_STATUS RmSetUserMapAccessRange( + nv_usermap_access_params_t *nvuap +) +{ + NV_STATUS status = NV_OK; + + if (nvuap->contig) + { + nvuap->access_start = nvuap->mmap_start; + nvuap->access_size = nvuap->mmap_size; + } + else + { + NvU64 highest_address_mapped; + NvU64 lowest_address_mapped; + + status = RmGetArrayMinMax(nvuap->page_array, nvuap->num_pages, + &lowest_address_mapped, + &highest_address_mapped); + if (status != NV_OK) + { + return status; + } + + nvuap->access_start = lowest_address_mapped; + nvuap->access_size = (highest_address_mapped + os_page_size) - lowest_address_mapped; + } + + return status; +} + +static NV_STATUS RmGetAllocPrivate(NvU32, NvU32, NvU64, NvU64, NvU32 *, void **, + NvU64 *); +static NV_STATUS RmValidateMmapRequest(nv_state_t *, NvU64, NvU64, NvU32 *); + +static NV_STATUS RmGetMmapPteArray( + KernelMemorySystem *pKernelMemorySystem, + NvHandle hClient, + NvHandle hMemory, + nv_usermap_access_params_t *nvuap +) +{ + NV_STATUS status = NV_OK; + NvU64 pages = 0; + Memory *pMemory = NULL; + PMEMORY_DESCRIPTOR pMemDesc = NULL; + RsResourceRef *pResourceRef; + NvU64 i; + NvU64 *pteArray; + NvU64 index; + + if (nvuap == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // If we're mapping a memory handle, we can get the data from the + // descriptor (required if the allocation being mapped is discontiguous). + // + if (serverutilGetResourceRefWithType(hClient, hMemory, classId(Memory), + &pResourceRef) == NV_OK) + { + pMemory = dynamicCast(pResourceRef->pResource, Memory); + pMemDesc = pMemory->pMemDesc; + nvuap->contig = memdescGetContiguity(pMemDesc, AT_CPU); + } + + // + // In the discontiguous case, the page array needs to be large enough to hold + // the 4K-page-sized addresses that we will then deflate to OS page addresses. + // For the contiguous case, we can create the OS-page-sized addresses + // directly in the array. + // + if (nvuap->contig) + { + pages = nvuap->mmap_size / os_page_size; + } + else + { + pages = nvuap->mmap_size / NV_RM_PAGE_SIZE; + } + + NV_ASSERT_OR_RETURN(pages != 0, NV_ERR_INVALID_ARGUMENT); + + status = os_alloc_mem((void **)&nvuap->page_array, (pages * sizeof(NvU64))); + if (status != NV_OK) + { + return status; + } + + if (!nvuap->contig) + { + pteArray = memdescGetPteArray(pMemDesc, AT_CPU); + index = nvuap->offset / NV_RM_PAGE_SIZE; + + // + // We're guaranteed to have a MEMORY_DESCRIPTOR in the discontiguous + // case. Copy over the addresses now. + // + portMemCopy((void *)nvuap->page_array, + pages * sizeof(NvU64), (void *)&pteArray[index], + pages * sizeof(NvU64)); + + if (NV_RM_PAGE_SIZE < os_page_size) + { + RmDeflateRmToOsPageArray(nvuap->page_array, pages); + pages = NV_RM_PAGES_TO_OS_PAGES(pages); + } + + // + // Convert the GPU physical addresses to system physical addresses, + // if applicable. + // + for (i = 0; i < pages; i++) + { + nvuap->page_array[i] += pKernelMemorySystem->coherentCpuFbBase; + } + } + else + { + // Offset is accounted in mmap_start. + for (nvuap->page_array[0] = nvuap->mmap_start, i = 1; + i < pages; i++) + { + nvuap->page_array[i] = nvuap->page_array[i-1] + os_page_size; + } + } + + nvuap->num_pages = pages; + + return status; +} + +/* Must be called with the API lock and the GPU locks */ +static NV_STATUS RmCreateMmapContextLocked( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 address, + NvU64 size, + NvU64 offset, + NvU32 fd +) +{ + NV_STATUS status = NV_OK; + void *pAllocPriv = NULL; + OBJGPU *pGpu = NULL; + KernelMemorySystem *pKernelMemorySystem = NULL; + NvBool bCoherentAtsCpuOffset = NV_FALSE; + nv_state_t *pNv = NULL; + NvU64 addr = (NvU64)address; + NvU32 prot = 0; + NvU64 pageIndex = 0; + nv_usermap_access_params_t *nvuap = NULL; + NvBool bClientMap = (hClient == hDevice); + + if (!bClientMap) + { + if (CliSetGpuContext(hClient, hDevice, &pGpu, NULL) != NV_OK) + { + NvU32 tmp; + if (CliSetSubDeviceContext(hClient, hDevice, &tmp, &pGpu) != NV_OK) + { + // + // If this mapping isn't for a GPU then we don't need to + // create a context for it. + // + return status; + } + } + } + + status = os_alloc_mem((void**)&nvuap, sizeof(nv_usermap_access_params_t)); + if (status != NV_OK) + { + return status; + } + + portMemSet(nvuap, 0, sizeof(nv_usermap_access_params_t)); + nvuap->addr = addr; + nvuap->size = size; + nvuap->offset = offset; + + // + // Assume the allocation is contiguous until RmGetMmapPteArray + // determines otherwise. + // + nvuap->contig = NV_TRUE; + nv_align_mmap_offset_length(nvuap); + + if (pGpu != NULL) + { + pNv = NV_GET_NV_STATE(pGpu); + pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + bCoherentAtsCpuOffset = IS_COHERENT_CPU_ATS_OFFSET(pKernelMemorySystem, addr, size); + } + + // + // If no device is given, or the address isn't in the given device's BARs, + // validate this as a system memory mapping and associate it with the + // control device. + // + if ((pNv == NULL) || + (!IS_REG_OFFSET(pNv, addr, size) && + !IS_FB_OFFSET(pNv, addr, size) && + !bCoherentAtsCpuOffset && + !IS_IMEM_OFFSET(pNv, addr, size))) + { + pNv = nv_get_ctl_state(); + + // + // Validate the mapping request by looking up the underlying sysmem + // allocation. + // + status = RmGetAllocPrivate(hClient, hMemory, addr, size, &prot, &pAllocPriv, + &pageIndex); + + if (status != NV_OK) + { + goto done; + } + } + else + { + // + // Validate the mapping request for ATS and get pteArray + // + if (bCoherentAtsCpuOffset) + { + status = RmGetMmapPteArray(pKernelMemorySystem, hClient, hMemory, nvuap); + if (status != NV_OK) + { + goto done; + } + } + + if (RmSetUserMapAccessRange(nvuap) != NV_OK) + { + goto done; + } + + status = nv_get_usermap_access_params(pNv, nvuap); + if (status != NV_OK) + { + goto done; + } + + // Validate the mapping request for BAR's. + status = RmValidateMmapRequest(pNv, nvuap->access_start, + nvuap->access_size, &prot); + if (status != NV_OK) + { + goto done; + } + } + + status = nv_add_mapping_context_to_file(pNv, nvuap, prot, pAllocPriv, + pageIndex, fd); + +done: + os_free_mem(nvuap); + return status; +} + +// TODO: Bug 1802250: [uvm8] Use an alt stack in all functions in unix/src/osapi.c +NV_STATUS rm_create_mmap_context( + nv_state_t *pNv, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 address, + NvU64 size, + NvU64 offset, + NvU32 fd +) +{ + NV_STATUS rmStatus = NV_OK; + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) == NV_OK) + { + RmClient *pClient; + + if (NV_OK != serverutilAcquireClient(hClient, LOCK_ACCESS_READ, &pClient)) + return NV_ERR_INVALID_CLIENT; + + if (pClient->ProcID != osGetCurrentProcess()) + { + rmStatus = NV_ERR_INVALID_CLIENT; + } + // LOCK: acquire GPUs lock + else if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) == NV_OK) + { + rmStatus = RmCreateMmapContextLocked(hClient, hDevice, hMemory, + address, size, offset, fd); + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + serverutilReleaseClient(LOCK_ACCESS_READ, pClient); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return rmStatus; +} + +static NV_STATUS RmGetAllocPrivate( + NvU32 hClient, + NvU32 hMemory, + NvU64 offset, + NvU64 length, + NvU32 *pProtection, + void **ppPrivate, + NvU64 *pPageIndex +) +{ + RmClient *pClient; + NV_STATUS rmStatus; + PMEMORY_DESCRIPTOR pMemDesc; + NvU32 pageOffset; + NvU64 pageCount; + RsResourceRef *pResourceRef; + RmResource *pRmResource; + void *pMemData; + NvBool bPeerIoMem; + NvBool bReadOnlyMem; + *pProtection = NV_PROTECT_READ_WRITE; + *ppPrivate = NULL; + + pageOffset = (offset & ~os_page_mask); + offset &= os_page_mask; + + NV_ASSERT_OR_RETURN(rmApiLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + if (NV_OK != serverutilAcquireClient(hClient, LOCK_ACCESS_READ, &pClient)) + return NV_ERR_INVALID_CLIENT; + + rmStatus = clientGetResourceRef(staticCast(pClient, RsClient), hMemory, &pResourceRef); + if (rmStatus != NV_OK) + goto done; + + pRmResource = dynamicCast(pResourceRef->pResource, RmResource); + if (!pRmResource) + { + rmStatus = NV_ERR_INVALID_OBJECT; + goto done; + } + + rmStatus = rmresGetMemoryMappingDescriptor(pRmResource, &pMemDesc); + if (rmStatus != NV_OK) + goto done; + + bReadOnlyMem = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY); + bPeerIoMem = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM); + + if (!(pMemDesc->Allocated || bPeerIoMem)) + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_SYSMEM: + break; + default: + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + pMemData = memdescGetMemData(pMemDesc); + if (pMemData == NULL) + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + rmStatus = os_match_mmap_offset(pMemData, offset, pPageIndex); + if (rmStatus != NV_OK) + goto done; + + pageCount = ((pageOffset + length) / os_page_size); + pageCount += (*pPageIndex + (((pageOffset + length) % os_page_size) ? 1 : 0)); + + if (pageCount > NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (bReadOnlyMem) + *pProtection = NV_PROTECT_READABLE; + + *ppPrivate = pMemData; + +done: + serverutilReleaseClient(LOCK_ACCESS_READ, pClient); + + return rmStatus; +} + +static NV_STATUS RmValidateMmapRequest( + nv_state_t *pNv, + NvU64 offset, + NvU64 length, + NvU32 *pProtection +) +{ + NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS params = { 0 }; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_STATUS status; + + if (osIsAdministrator()) + { + *pProtection = NV_PROTECT_READ_WRITE; + return NV_OK; + } + + params.addressStart = offset; + params.addressLength = length; + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, + pNv->rmapi.hSubDevice, + NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST, + ¶ms, sizeof(params)); + + if (status == NV_OK) + { + *pProtection = params.protection; + } + + return status; +} + +NV_STATUS rm_get_adapter_status( + nv_state_t *pNv, + NvU32 *pStatus +) +{ + NV_STATUS rmStatus = NV_ERR_OPERATING_SYSTEM; + + // LOCK: acquire API lock + if (rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + rmStatus = RmGetAdapterStatus(pNv, pStatus); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return rmStatus; +} + +NvBool NV_API_CALL rm_init_rm( + nvidia_stack_t *sp +) +{ + NvBool retval; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + retval = RmInitRm(); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return retval; +} + +void NV_API_CALL rm_shutdown_rm( + nvidia_stack_t *sp +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + RmShutdownRm(); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NvBool NV_API_CALL rm_init_event_locks( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + NvBool ret; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pNv->event_spinlock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + ret = (pNv->event_spinlock != NULL); + + NV_EXIT_RM_RUNTIME(sp,fp); + return ret; +} + +void NV_API_CALL rm_destroy_event_locks( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (pNv && pNv->event_spinlock) + portSyncSpinlockDestroy(pNv->event_spinlock); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_get_vbios_version( + nvidia_stack_t *sp, + nv_state_t *pNv, + char *vbiosString +) +{ + void *fp; + NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS *params; + RM_API *pRmApi; + THREAD_STATE_NODE threadState; + const size_t vbiosStringLen = 15; // "xx.xx.xx.xx.xx" + + os_snprintf(vbiosString, vbiosStringLen, "??.??.??.??.??"); + + NV_ENTER_RM_RUNTIME(sp,fp); + + params = portMemAllocNonPaged(sizeof(*params)); + if (params == NULL) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return; + } + + portMemSet(params, 0, sizeof(*params)); + + params->biosInfoList[0].index = NV2080_CTRL_BIOS_INFO_INDEX_REVISION; + params->biosInfoList[1].index = NV2080_CTRL_BIOS_INFO_INDEX_OEM_REVISION; + params->biosInfoListSize = 2; + + pRmApi = RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_VBIOS); + if (pRmApi != NULL) + { + NV_STATUS rmStatus; + + rmStatus = pRmApi->Control(pRmApi, + pNv->rmapi.hClient, + pNv->rmapi.hSubDevice, + NV2080_CTRL_CMD_BIOS_GET_INFO_V2, + params, + sizeof(*params)); + + if (rmStatus == NV_OK) + { + const NvU32 biosRevision = params->biosInfoList[0].data; + const NvU32 biosOEMRevision = params->biosInfoList[1].data; + + os_snprintf(vbiosString, vbiosStringLen, + "%02x.%02x.%02x.%02x.%02x", + (biosRevision & 0xff000000) >> 24, + (biosRevision & 0x00ff0000) >> 16, + (biosRevision & 0x0000ff00) >> 8, + (biosRevision & 0x000000ff) >> 0, + biosOEMRevision); + } + + RmUnixRmApiEpilogue(pNv, &threadState); + } + + portMemFree(params); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_stop_user_channels( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS params = { 0 }; + RM_API *pRmApi; + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus = NV_ERR_INVALID_STATE; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pRmApi = RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_FIFO); + if (pRmApi != NULL) + { + params.bDisable = NV_TRUE; + rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient, + pNv->rmapi.hSubDevice, + NV2080_CTRL_CMD_FIFO_DISABLE_USERMODE_CHANNELS, + ¶ms, sizeof(params)); + + RmUnixRmApiEpilogue(pNv, &threadState); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_restart_user_channels( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS params = { 0 }; + RM_API *pRmApi; + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus = NV_ERR_INVALID_STATE; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pRmApi = RmUnixRmApiPrologue(pNv, &threadState, RM_LOCK_MODULES_FIFO); + if (pRmApi != NULL) + { + params.bDisable = NV_FALSE; + rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hSubDevice, + NV2080_CTRL_CMD_FIFO_DISABLE_USERMODE_CHANNELS, + ¶ms, sizeof(params)); + + RmUnixRmApiEpilogue(pNv, &threadState); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +// +// Use this call to check if the chipset is io coherent +// +NvBool NV_API_CALL rm_is_chipset_io_coherent( + nvidia_stack_t *sp) +{ + void *fp; + OBJSYS *pSys; + OBJCL *pCl; + NvBool bIoCoherent = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pSys = SYS_GET_INSTANCE(); + pCl = SYS_GET_CL(pSys); + if (pCl == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "%s: no CL object found, setting io coherent by default\n", + __FUNCTION__); + goto done; + } + + bIoCoherent = pCl->getProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT); + +done: + NV_EXIT_RM_RUNTIME(sp,fp); + + return bIoCoherent; +} + +NV_STATUS NV_API_CALL rm_ioctl( + nvidia_stack_t *sp, + nv_state_t *pNv, + nv_file_private_t *nvfp, + NvU32 Command, + void *pData, + NvU32 dataSize +) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + rmStatus = RmIoctl(pNv, nvfp, Command, pData, dataSize); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +void NV_API_CALL rm_cleanup_file_private( + nvidia_stack_t *sp, + nv_state_t *pNv, + nv_file_private_t *nvfp +) +{ + THREAD_STATE_NODE threadState; + void *fp; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + RM_API_CONTEXT rmApiContext = {0}; + NvU32 i; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + threadStateSetTimeoutOverride(&threadState, 10 * 1000); + + if (rmapiPrologue(pRmApi, &rmApiContext) != NV_OK) + return; + + // LOCK: acquire API lock + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + // Unref any object which was exported on this file. + if (nvfp->handles != NULL) + { + for (i = 0; i < nvfp->maxHandles; i++) + { + if (nvfp->handles[i] == 0) + { + continue; + } + + RmFreeObjExportHandle(nvfp->handles[i]); + nvfp->handles[i] = 0; + } + + os_free_mem(nvfp->handles); + nvfp->handles = NULL; + nvfp->maxHandles = 0; + } + + // Free any RM clients associated with this file. + RmFreeUnusedClients(pNv, nvfp); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + rmapiEpilogue(pRmApi, &rmApiContext); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + if (nvfp->ctl_nvfp != NULL) + { + nv_put_file_private(nvfp->ctl_nvfp_priv); + nvfp->ctl_nvfp = NULL; + nvfp->ctl_nvfp_priv = NULL; + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_unbind_lock( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI) == NV_OK) + { + RmUnbindLock(pNv); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS rm_alloc_os_event( + NvHandle hClient, + nv_file_private_t *nvfp, + NvU32 fd +) +{ + NV_STATUS RmStatus; + + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK) + { + RmStatus = RmAllocOsEvent(hClient, nvfp, fd); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return RmStatus; +} + +NV_STATUS rm_free_os_event( + NvHandle hClient, + NvU32 fd +) +{ + NV_STATUS RmStatus; + + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK) + { + RmStatus = RmFreeOsEvent(hClient, fd); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return RmStatus; +} + +NV_STATUS rm_get_event_data( + nv_file_private_t *nvfp, + NvP64 pEvent, + NvU32 *MoreEvents +) +{ + NV_STATUS RmStatus; + + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK) + { + RmStatus = RmGetEventData(nvfp, pEvent, MoreEvents, NV_TRUE); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_read_registry_dword( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + NvU32 *Data +) +{ + OBJGPU *pGpu = NULL; + NV_STATUS RmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + // + // We can be called from different contexts: + // + // 1) early initialization without device state. + // 2) from outside the RM API (without the lock held) + // + // In context 1)the API lock is not needed and + // in context 2), it needs to be acquired. + // + if (nv != NULL) + { + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return RmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + // Skipping the NULL check as osReadRegistryDword takes care of it. + RmStatus = osReadRegistryDword(pGpu, regParmStr, Data); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_write_registry_dword( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + NvU32 Data +) +{ + NV_STATUS RmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (nv != NULL) + { + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return RmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + RmStatus = RmWriteRegistryDword(nv, regParmStr, Data); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_write_registry_binary( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + NV_STATUS RmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (nv != NULL) + { + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return RmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + RmStatus = RmWriteRegistryBinary(nv, regParmStr, Data, cbLen); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return RmStatus; +} + +NV_STATUS NV_API_CALL rm_write_registry_string( + nvidia_stack_t *sp, + nv_state_t *nv, + const char *regParmStr, + const char *string, + NvU32 stringLength +) +{ + NV_STATUS rmStatus; + void *fp; + NvBool isApiLockTaken = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + + if (nv != NULL) + { + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI)) != NV_OK) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; + } + + isApiLockTaken = NV_TRUE; + } + + rmStatus = RmWriteRegistryString(nv, regParmStr, string, (stringLength + 1)); + + if (isApiLockTaken == NV_TRUE) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +static NvBool NV_API_CALL rm_is_space(const char ch) +{ + // + // return true if it is a: + // ' ' : (space - decimal 32.) + // '\t' : (TAB - decimal 9) + // 'LF' : (Line feed, new line - decimal 10) + // 'VT' : (Vertical TAB - decimal 11) + // 'FF' : (Form feed, new page - decimal 12) + // '\r' : (carriage return - decimal 13) + // + return ((ch == ' ') || ((ch >= '\t') && (ch <= '\r'))); +} + +char* NV_API_CALL rm_remove_spaces(const char *in) +{ + unsigned int len = os_string_length(in) + 1; + const char *in_ptr; + char *out, *out_ptr; + + if (os_alloc_mem((void **)&out, len) != NV_OK) + return NULL; + + in_ptr = in; + out_ptr = out; + + while (*in_ptr != '\0') + { + if (!rm_is_space(*in_ptr)) + *out_ptr++ = *in_ptr; + in_ptr++; + } + *out_ptr = '\0'; + + return out; +} + +char* NV_API_CALL rm_string_token(char **strp, const char delim) +{ + char *s, *token; + + if ((strp == NULL) || (*strp == NULL)) + return NULL; + + s = token = *strp; + *strp = NULL; + + for (; *s != '\0'; s++) { + if (*s == delim) { + *s = '\0'; + *strp = ++s; + break; + } + } + + return token; +} + +// Parse string passed in NVRM as module parameter. +void NV_API_CALL rm_parse_option_string(nvidia_stack_t *sp, const char *nvRegistryDwords) +{ + unsigned int i; + nv_parm_t *entry; + char *option_string = NULL; + char *ptr, *token; + char *name, *value; + NvU32 data; + + if (nvRegistryDwords != NULL) + { + if ((option_string = rm_remove_spaces(nvRegistryDwords)) == NULL) + { + return; + } + + ptr = option_string; + + while ((token = rm_string_token(&ptr, ';')) != NULL) + { + if (!(name = rm_string_token(&token, '=')) || !os_string_length(name)) + { + continue; + } + + if (!(value = rm_string_token(&token, '=')) || !os_string_length(value)) + { + continue; + } + + if (rm_string_token(&token, '=') != NULL) + { + continue; + } + + data = os_strtoul(value, NULL, 0); + + for (i = 0; (entry = &nv_parms[i])->name != NULL; i++) + { + if (os_string_compare(entry->name, name) == 0) + break; + } + + if (!entry->name) + rm_write_registry_dword(sp, NULL, name, data); + else + *entry->data = data; + } + + // Free the memory allocated by rm_remove_spaces() + os_free_mem(option_string); + } +} + +NV_STATUS NV_API_CALL rm_run_rc_callback( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + OBJGPU *pGpu; + void *fp; + + /* make sure our timer's not still running when it shouldn't be */ + if (nv == NULL) + return NV_ERR_GENERIC; + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + if (pGpu == NULL) + return NV_ERR_GENERIC; + + if (nv->rc_timer_enabled == 0) + return NV_ERR_GENERIC; + + if (!FULL_GPU_SANITY_CHECK(pGpu)) + { + return NV_ERR_GENERIC; + } + + NV_ENTER_RM_RUNTIME(sp,fp); + + osRun1HzCallbacksNow(pGpu); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return NV_OK; +} + +static NV_STATUS RmRunNanoTimerCallback( + OBJGPU *pGpu, + void *pTmrEvent +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + POBJTMR pTmr = GPU_GET_TIMER(pGpu); + THREAD_STATE_NODE threadState; + NV_STATUS status = NV_OK; + + // LOCK: try to acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR)) != NV_OK) + { + return status; + } + + if ((status = osCondAcquireRmSema(pSys->pSema)) != NV_OK) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + return status; + } + + threadStateInitISRAndDeferredIntHandler(&threadState, pGpu, + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + // Call timer event service + status = tmrEventServiceOSTimerCallback_HAL(pGpu, pTmr, (PTMR_EVENT)pTmrEvent); + + // Out of conflicting thread + threadStateFreeISRAndDeferredIntHandler(&threadState, + pGpu, THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + osReleaseRmSema(pSys->pSema, NULL); + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, pGpu); + + return status; +} + +NV_STATUS NV_API_CALL rm_run_nano_timer_callback +( + nvidia_stack_t *sp, + nv_state_t *nv, + void *pTmrEvent +) +{ + NV_STATUS status; + OBJGPU *pGpu = NULL; + void *fp; + + if (nv == NULL) + return NV_ERR_GENERIC; + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + if (pGpu == NULL) + return NV_ERR_GENERIC; + + if (!FULL_GPU_SANITY_CHECK(pGpu)) + { + return NV_ERR_GENERIC; + } + + NV_ENTER_RM_RUNTIME(sp,fp); + + status = RmRunNanoTimerCallback(pGpu, pTmrEvent); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return status; +} + +void NV_API_CALL rm_execute_work_item( + nvidia_stack_t *sp, + void *pNvWorkItem +) +{ + void *fp; + THREAD_STATE_NODE threadState; + + NV_ENTER_RM_RUNTIME(sp, fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + RmExecuteWorkItem(pNvWorkItem); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp, fp); +} + +const char* NV_API_CALL rm_get_device_name( + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device +) +{ + unsigned int i; + const char *tmpName = NULL; + + for (i = 0; i < NV_ARRAY_ELEMENTS(sChipsReleased); i++) + { + // if the device ID doesn't match, go to the next entry + if (device != sChipsReleased[i].devID) + { + continue; + } + + // if the entry has 0 for the subsystem IDs, then the device + // ID match is sufficient, but continue scanning through + // sChipsReleased[] in case there is a subsystem ID match later + // in the table + if (sChipsReleased[i].subSystemVendorID == 0 && + sChipsReleased[i].subSystemID == 0) + { + tmpName = sChipsReleased[i].name; + continue; + } + + if (subsystem_vendor == sChipsReleased[i].subSystemVendorID && + subsystem_device == sChipsReleased[i].subSystemID) + { + tmpName = sChipsReleased[i].name; + break; + } + } + + return (tmpName != NULL) ? tmpName : "Unknown"; +} + +NV_STATUS rm_access_registry( + NvHandle hClient, + NvHandle hObject, + NvU32 AccessType, + NvP64 clientDevNodeAddress, + NvU32 DevNodeLength, + NvP64 clientParmStrAddress, + NvU32 ParmStrLength, + NvP64 clientBinaryDataAddress, + NvU32 *pBinaryDataLength, + NvU32 *Data, + NvU32 *Entry +) +{ + NV_STATUS RmStatus; + NvBool bReadOnly = (AccessType == NVOS38_ACCESS_TYPE_READ_DWORD) || + (AccessType == NVOS38_ACCESS_TYPE_READ_BINARY); + + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(bReadOnly ? RMAPI_LOCK_FLAGS_READ : RMAPI_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_OSAPI)) == NV_OK) + { + RmStatus = RmAccessRegistry(hClient, + hObject, + AccessType, + clientDevNodeAddress, + DevNodeLength, + clientParmStrAddress, + ParmStrLength, + clientBinaryDataAddress, + pBinaryDataLength, + Data, + Entry); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return RmStatus; +} + +NV_STATUS rm_update_device_mapping_info( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + void *pOldCpuAddress, + void *pNewCpuAddress +) +{ + NV_STATUS RmStatus; + + // LOCK: acquire API lock + if ((RmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK) + { + RmStatus = RmUpdateDeviceMappingInfo(hClient, + hDevice, + hMemory, + pOldCpuAddress, + pNewCpuAddress); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + return RmStatus; +} + +static NvBool NV_API_CALL rm_is_legacy_device( + NvU16 device_id, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_warning +) +{ + return NV_FALSE; +} + +static NvBool NV_API_CALL rm_is_legacy_arch( + NvU32 pmc_boot_0, + NvU32 pmc_boot_42 +) +{ + NvBool legacy = NV_FALSE; + + return legacy; +} + +NV_STATUS NV_API_CALL rm_is_supported_device( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + OBJSYS *pSys; + POBJHALMGR pHalMgr; + GPUHWREG *reg_mapping; + NvU32 myHalPublicID; + void *fp; + NvU32 pmc_boot_0; + NvU32 pmc_boot_42; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pSys = SYS_GET_INSTANCE(); + pHalMgr = SYS_GET_HALMGR(pSys); + + reg_mapping = osMapKernelSpace(pNv->regs->cpu_address, + os_page_size, + NV_MEMORY_UNCACHED, + NV_PROTECT_READABLE); + + if (reg_mapping == NULL) + { + nv_printf(NV_DBG_ERRORS, "NVRM: failed to map registers!\n"); + rmStatus = NV_ERR_OPERATING_SYSTEM; + goto threadfree; + } + pmc_boot_0 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_0); + pmc_boot_42 = NV_PRIV_REG_RD32(reg_mapping, NV_PMC_BOOT_42); + + osUnmapKernelSpace(reg_mapping, os_page_size); + + if ((pmc_boot_0 == 0xFFFFFFFF) && (pmc_boot_42 == 0xFFFFFFFF)) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA GPU %04x:%02x:%02x.%x\n" + "NVRM: (PCI ID: %04x:%04x) installed in this system has\n" + "NVRM: fallen off the bus and is not responding to commands.\n", + pNv->pci_info.domain, pNv->pci_info.bus, pNv->pci_info.slot, + pNv->pci_info.function, pNv->pci_info.vendor_id, + pNv->pci_info.device_id); + rmStatus = NV_ERR_GPU_IS_LOST; + goto threadfree; + } + + /* + * For legacy architectures, rm_is_legacy_arch() prints "legacy" message. + * We do not want to print "unsupported" message for legacy architectures + * to avoid confusion. Also, the probe should not continue for legacy + * architectures. Hence, we set rmStatus to NV_ERR_NOT_SUPPORTED and + * goto threadfree. + */ + if (rm_is_legacy_arch(pmc_boot_0, pmc_boot_42)) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + goto threadfree; + } + + rmStatus = halmgrGetHalForGpu(pHalMgr, pmc_boot_0, pmc_boot_42, &myHalPublicID); + + if (rmStatus != NV_OK) + goto print_unsupported; + + goto threadfree; + +print_unsupported: + nv_printf(NV_DBG_ERRORS, + "NVRM: The NVIDIA GPU %04x:%02x:%02x.%x (PCI ID: %04x:%04x)\n" + "NVRM: installed in this system is not supported by the\n" + "NVRM: NVIDIA %s driver release.\n" + "NVRM: Please see 'Appendix A - Supported NVIDIA GPU Products'\n" + "NVRM: in this release's README, available on the operating system\n" + "NVRM: specific graphics driver download page at www.nvidia.com.\n", + pNv->pci_info.domain, pNv->pci_info.bus, pNv->pci_info.slot, + pNv->pci_info.function, pNv->pci_info.vendor_id, + pNv->pci_info.device_id, NV_VERSION_STRING); + +threadfree: + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NvBool NV_API_CALL rm_is_supported_pci_device( + NvU8 pci_class, + NvU8 pci_subclass, + NvU16 vendor, + NvU16 device, + NvU16 subsystem_vendor, + NvU16 subsystem_device, + NvBool print_legacy_warning +) +{ + const NvU16 nv_pci_vendor_id = 0x10DE; + const NvU16 nv_pci_id_riva_tnt = 0x0020; + const NvU8 nv_pci_class_display = 0x03; + const NvU8 nv_pci_subclass_display_vga = 0x00; + const NvU8 nv_pci_subclass_display_3d = 0x02; + + if (pci_class != nv_pci_class_display) + { + return NV_FALSE; + } + + if ((pci_subclass != nv_pci_subclass_display_vga) && + (pci_subclass != nv_pci_subclass_display_3d)) + { + return NV_FALSE; + } + + if (vendor != nv_pci_vendor_id) + { + return NV_FALSE; + } + + if (device < nv_pci_id_riva_tnt) + { + return NV_FALSE; + } + + if (rm_is_legacy_device( + device, + subsystem_vendor, + subsystem_device, + print_legacy_warning)) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +/* + * Performs the I2C transfers which are related with DP AUX channel + */ +static NV_STATUS RmDpAuxI2CTransfer +( + nv_state_t *pNv, + NvU32 displayId, + NvU8 addr, + NvU32 len, + NvU8 *pData, + NvBool bWrite +) +{ + NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS *pParams; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_STATUS status; + + if (len > NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE) + { + NV_PRINTF(LEVEL_ERROR, + "%s: requested I2C transfer length %u is greater than maximum supported length %u\n", + __FUNCTION__, len, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE); + return NV_ERR_NOT_SUPPORTED; + } + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(pParams, 0, sizeof(*pParams)); + + pParams->subDeviceInstance = 0; + pParams->displayId = displayId; + pParams->addr = addr; + pParams->size = len; + pParams->bWrite = bWrite; + + if (bWrite) + { + portMemCopy(pParams->data, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_MAX_DATA_SIZE, + pData, len); + } + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp, + NV0073_CTRL_CMD_DP_AUXCH_I2C_TRANSFER_CTRL, + pParams, sizeof(*pParams)); + + if ((status == NV_OK) && !bWrite) + { + portMemCopy(pData, len, pParams->data, pParams->size); + } + + portMemFree(pParams); + + return status; +} + +/* + * Performs the I2C transfers which are not related with DP AUX channel + */ +static NV_STATUS RmNonDPAuxI2CTransfer +( + nv_state_t *pNv, + NvU8 portId, + NvU8 type, + NvU8 addr, + NvU8 command, + NvU32 len, + NvU8 *pData +) +{ + NV402C_CTRL_I2C_TRANSACTION_PARAMS *params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_STATUS rmStatus = NV_OK; + + params = portMemAllocNonPaged(sizeof(*params)); + if (params == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(params, 0, sizeof(*params)); + + params->portId = portId; + // precondition our address (our stack requires this) + params->deviceAddress = addr << 1; + + switch (type) + { + case NV_I2C_CMD_WRITE: + params->transData.i2cBlockData.bWrite = NV_TRUE; + /* fall through*/ + + case NV_I2C_CMD_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW; + params->transData.i2cBlockData.messageLength = len; + params->transData.i2cBlockData.pMessage = pData; + break; + + case NV_I2C_CMD_SMBUS_WRITE: + params->transData.smbusByteData.bWrite = NV_TRUE; + /* fall through*/ + + case NV_I2C_CMD_SMBUS_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW; + params->transData.smbusByteData.message = pData[0]; + params->transData.smbusByteData.registerAddress = command; + break; + + case NV_I2C_CMD_SMBUS_BLOCK_WRITE: + params->transData.smbusBlockData.bWrite = NV_TRUE; + /* fall through*/ + + case NV_I2C_CMD_SMBUS_BLOCK_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW; + params->transData.smbusBlockData.registerAddress = command; + params->transData.smbusBlockData.messageLength = len; + params->transData.smbusBlockData.pMessage = pData; + break; + + case NV_I2C_CMD_SMBUS_QUICK_WRITE: + params->transData.smbusQuickData.bWrite = NV_TRUE; + /* fall through*/ + + case NV_I2C_CMD_SMBUS_QUICK_READ: + params->transType = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW; + break; + + default: + portMemFree(params); + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = pRmApi->Control(pRmApi, pNv->rmapi.hClient, + pNv->rmapi.hI2C, + NV402C_CTRL_CMD_I2C_TRANSACTION, + params, sizeof(*params)); + + // + // For NV_I2C_CMD_SMBUS_READ, copy the read data to original + // data buffer. + // + if (rmStatus == NV_OK && type == NV_I2C_CMD_SMBUS_READ) + { + pData[0] = params->transData.smbusByteData.message; + } + + portMemFree(params); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_i2c_transfer( + nvidia_stack_t *sp, + nv_state_t *pNv, + void *pI2cAdapter, + NvU8 type, + NvU8 addr, + NvU8 command, + NvU32 len, + NvU8 *pData +) +{ + THREAD_STATE_NODE threadState; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = NULL; + NvBool unlockApi = NV_FALSE; + NvBool unlockGpu = NV_FALSE; + NvU32 x; + void *fp; + NvU32 numDispId = 0; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (pNvp->flags & NV_INIT_FLAG_PUBLIC_I2C) + { + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK) + goto finish; + + unlockApi = NV_TRUE; + + // LOCK: acquire GPUs lock + if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK) + goto finish; + + unlockGpu = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (!pGpu) + { + rmStatus = NV_ERR_GENERIC; + goto finish; + } + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + if (pNvp->i2c_adapters[x].pOsAdapter == pI2cAdapter) + { + break; + } + } + + if (x == MAX_I2C_ADAPTERS) + { + rmStatus = NV_ERR_GENERIC; + goto finish; + } + + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + NvU32 displayId = pNvp->i2c_adapters[x].displayId[numDispId]; + + if (displayId == INVALID_DISP_ID) + { + continue; + } + + // Handle i2c-over-DpAux adapters separately from regular i2c adapters + if (displayId == 0) + { + rmStatus = RmNonDPAuxI2CTransfer(pNv, pNvp->i2c_adapters[x].port, + type, addr, command, len, pData); + } + else + { + if ((type != NV_I2C_CMD_READ) && (type != NV_I2C_CMD_WRITE)) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + goto semafinish; + } + + rmStatus = RmDpAuxI2CTransfer(pNv, displayId, addr, len, pData, + type == NV_I2C_CMD_WRITE); + } +semafinish: + if (rmStatus == NV_OK) + { + break; + } + } + +finish: + if (unlockGpu) + { + // UNLOCK: release GPU lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + if (unlockApi) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +static void rm_i2c_add_adapter( + nv_state_t *pNv, + NvU32 port, + NvU32 displayId +) +{ + NvU32 y, free; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NvU32 numDispId = 0; + + for (y = 0, free = MAX_I2C_ADAPTERS; y < MAX_I2C_ADAPTERS; y++) + { + if (pNvp->i2c_adapters[y].pOsAdapter == NULL) + { + // Only find the first free entry, and ignore the rest + if (free == MAX_I2C_ADAPTERS) + { + free = y; + } + } + else if (pNvp->i2c_adapters[y].port == port) + { + break; + } + } + + if (y < MAX_I2C_ADAPTERS) + { + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + if (pNvp->i2c_adapters[y].displayId[numDispId] == INVALID_DISP_ID) + { + pNvp->i2c_adapters[y].displayId[numDispId] = displayId; + break; + } + else + { + NV_PRINTF(LEVEL_INFO, + "%s: adapter already exists (port=0x%x, displayId=0x%x)\n", + __FUNCTION__, port, + pNvp->i2c_adapters[y].displayId[numDispId]); + } + } + + if (numDispId == MAX_DISP_ID_PER_ADAPTER) + { + NV_PRINTF(LEVEL_ERROR, + "%s: no more free display Id entries in adapter\n", + __FUNCTION__); + } + + return; + } + + if (free == MAX_I2C_ADAPTERS) + { + NV_PRINTF(LEVEL_ERROR, "%s: no more free adapter entries exist\n", + __FUNCTION__); + return; + } + + pNvp->i2c_adapters[free].pOsAdapter = nv_i2c_add_adapter(pNv, port); + pNvp->i2c_adapters[free].port = port; + // When port is added, numDispId will be 0. + pNvp->i2c_adapters[free].displayId[numDispId] = displayId; +} + +void RmI2cAddGpuPorts(nv_state_t * pNv) +{ + NvU32 x = 0; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + NvU32 displayMask; + NV_STATUS status; + NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS systemGetSupportedParams = { 0 }; + + // Make displayId as Invalid. + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + NvU32 numDispId; + + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + pNvp->i2c_adapters[x].displayId[numDispId] = INVALID_DISP_ID; + } + } + + // First, set up the regular i2c adapters - one per i2c port + if (pNv->rmapi.hI2C != 0) + { + NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS i2cPortInfoParams = { 0 }; + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hI2C, + NV402C_CTRL_CMD_I2C_GET_PORT_INFO, + &i2cPortInfoParams, sizeof(i2cPortInfoParams)); + + if (status == NV_OK) + { + for (x = 0; x < NV_ARRAY_ELEMENTS(i2cPortInfoParams.info); x++) + { + // + // Check if this port is implemented and RM I2C framework has + // validated this port. Only limited amount of ports can + // be added to the OS framework. + // + if (FLD_TEST_DRF(402C_CTRL, _I2C_GET_PORT_INFO, _IMPLEMENTED, + _YES, i2cPortInfoParams.info[x]) && + FLD_TEST_DRF(402C_CTRL, _I2C_GET_PORT_INFO, _VALID, + _YES, i2cPortInfoParams.info[x])) + { + rm_i2c_add_adapter(pNv, x, 0); + } + } + } + } + + // + // Now set up the i2c-over-DpAux adapters - one per DP OD + // + // 1. Perform NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS RM control which + // will return the mask for all the display ID's. + // 2. Loop for all the display ID's and do + // NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO RM control call. For each + // output resource, check for the following requirements: + // a. It must be DisplayPort. + // b. It must be internal to the GPU (ie, not on the board) + // c. It must be directly connected to the physical connector (ie, no DP + // 1.2 multistream ODs). + // 3. Perform NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID RM control for + // getting the I2C port data. + // + // With these restrictions, we should only end up with at most one OD + // per DP connector. + // + + if (pNv->rmapi.hDisp == 0) + { + return; + } + + systemGetSupportedParams.subDeviceInstance = 0; + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp, + NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, + &systemGetSupportedParams, sizeof(systemGetSupportedParams)); + + if (status != NV_OK) + { + return; + } + + for (displayMask = systemGetSupportedParams.displayMask; + displayMask != 0; + displayMask &= ~LOWESTBIT(displayMask)) + { + NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS orInfoParams = { 0 }; + NvU32 displayId = LOWESTBIT(displayMask); + + orInfoParams.subDeviceInstance = 0; + orInfoParams.displayId = displayId; + + status = pRmApi->Control(pRmApi, pNv->rmapi.hClient, pNv->rmapi.hDisp, + NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, + &orInfoParams, sizeof(orInfoParams)); + + if ((status == NV_OK) && + (orInfoParams.type == NV0073_CTRL_SPECIFIC_OR_TYPE_SOR) && + ((orInfoParams.protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A) || + (orInfoParams.protocol == NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B)) && + (orInfoParams.location == NV0073_CTRL_SPECIFIC_OR_LOCATION_CHIP) && + (!orInfoParams.bIsDispDynamic)) + { + NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS i2cPortIdParams = { 0 }; + + i2cPortIdParams.subDeviceInstance = 0; + i2cPortIdParams.displayId = displayId; + + status = pRmApi->Control(pRmApi, + pNv->rmapi.hClient, + pNv->rmapi.hDisp, + NV0073_CTRL_CMD_SPECIFIC_GET_I2C_PORTID, + &i2cPortIdParams, + sizeof(i2cPortIdParams)); + + if ((status == NV_OK) && + (i2cPortIdParams.ddcPortId != NV0073_CTRL_SPECIFIC_I2C_PORT_NONE)) + { + rm_i2c_add_adapter(pNv, i2cPortIdParams.ddcPortId - 1, displayId); + } + } + } +} + +void NV_API_CALL rm_i2c_remove_adapters( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + // + // Cycle through all adapter entries, and first remove the adapter + // from the list from the kernel, then remove the i2c adapter + // list once that is completed. This should only be used from exit + // module time. Otherwise it could fail to remove some of the + // kernel adapters and subsequent transfer requests would result + // in crashes. + // + NvU32 x = 0; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NvU32 numDispId; + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + if (pNvp->i2c_adapters[x].pOsAdapter != NULL) + { + nv_i2c_del_adapter(pNv, pNvp->i2c_adapters[x].pOsAdapter); + + pNvp->i2c_adapters[x].pOsAdapter = NULL; + pNvp->i2c_adapters[x].port = 0; + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + pNvp->i2c_adapters[x].displayId[numDispId] = INVALID_DISP_ID; + } + } + } +} + +NvBool NV_API_CALL rm_i2c_is_smbus_capable( + nvidia_stack_t *sp, + nv_state_t *pNv, + void *pI2cAdapter +) +{ + THREAD_STATE_NODE threadState; + nv_priv_t *pNvp = NV_GET_NV_PRIV(pNv); + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = NULL; + NvBool unlock = NV_FALSE; + NvU32 x; + NvBool ret = NV_FALSE; + void *fp; + NvU32 numDispId = 0; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (pNvp->flags & NV_INIT_FLAG_PUBLIC_I2C) + { + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_I2C)) != NV_OK) + goto semafinish; + + unlock = NV_TRUE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (!pGpu) + { + goto semafinish; + } + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + if (pNvp->i2c_adapters[x].pOsAdapter == pI2cAdapter) + { + break; + } + } + + if (x == MAX_I2C_ADAPTERS) + { + goto semafinish; + } + + // we do not support smbus functions on i2c-over-DPAUX + for (numDispId = 0; numDispId < MAX_DISP_ID_PER_ADAPTER; numDispId++) + { + if (pNvp->i2c_adapters[x].displayId[numDispId] == 0x0) + { + ret = NV_TRUE; + } + } + +semafinish: + if (unlock) + { + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return ret; +} + +NV_STATUS NV_API_CALL rm_perform_version_check( + nvidia_stack_t *sp, + void *pData, + NvU32 dataSize +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + rmStatus = RmPerformVersionCheck(pData, dataSize); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_system_event( + nvidia_stack_t *sp, + NvU32 event_type, + NvU32 event_val +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + nv_state_t *nv; + OBJGPU *pGpu = gpumgrGetGpu(0);// Grab the first GPU + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) == NV_OK) + { + if (pGpu != NULL) + { + nv = NV_GET_NV_STATE(pGpu); + if ((rmStatus = os_ref_dynamic_power(nv, NV_DYNAMIC_PM_FINE)) == + NV_OK) + { + // LOCK: acquire GPU lock + if ((rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_EVENT)) == + NV_OK) + { + rmStatus = RmSystemEvent(nv, event_type, event_val); + + // UNLOCK: release GPU lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + os_unref_dynamic_power(nv, NV_DYNAMIC_PM_FINE); + } + // UNLOCK: release API lock + rmApiLockRelease(); + } + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_p2p_dma_map_pages( + nvidia_stack_t *sp, + nv_dma_device_t *peer, + NvU8 *pGpuUuid, + NvU32 pageSize, + NvU32 pageCount, + NvU64 *pDmaAddresses, + void **ppPriv +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + if (ppPriv == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *ppPriv = NULL; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK) + { + OBJGPU *pGpu = gpumgrGetGpuFromUuid(pGpuUuid, + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY)); + if (pGpu == NULL) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + else + { + NvU32 i; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING)) + { + NV_ASSERT(pageSize == os_page_size); + + rmStatus = nv_dma_map_alloc(peer, pageCount, pDmaAddresses, + NV_FALSE, ppPriv); + } + else + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + for (i = 0; i < pageCount; i++) + { + // Peer mappings through this API are always via BAR1 + rmStatus = nv_dma_map_peer(peer, nv->dma_dev, 0x1, + pageSize / os_page_size, + &pDmaAddresses[i]); + if ((rmStatus != NV_OK) && (i > 0)) + { + NvU32 j; + for (j = i - 1; j < pageCount; j--) + { + nv_dma_unmap_peer(peer, pageSize / os_page_size, + pDmaAddresses[j]); + } + } + } + } + } + + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_p2p_get_gpu_info( + nvidia_stack_t *sp, + NvU64 gpuVirtualAddress, + NvU64 length, + NvU8 **ppGpuUuid, + void **ppGpuInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P); + if (rmStatus == NV_OK) + { + OBJGPU *pGpu; + rmStatus = RmP2PGetGpuByAddress(gpuVirtualAddress, + length, + &pGpu); + if (rmStatus == NV_OK) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + const NvU8 *pGid; + + pGid = RmGetGpuUuidRaw(nv); + if (pGid == NULL) + { + rmStatus = NV_ERR_GPU_UUID_NOT_FOUND; + } + else + { + rmStatus = os_alloc_mem((void **)ppGpuUuid, GPU_UUID_LEN); + if (rmStatus == NV_OK) + { + os_mem_copy(*ppGpuUuid, pGid, GPU_UUID_LEN); + } + } + + *ppGpuInfo = (void *) pGpu; + } + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_p2p_get_pages_persistent( + nvidia_stack_t *sp, + NvU64 gpuVirtualAddress, + NvU64 length, + void **p2pObject, + NvU64 *pPhysicalAddresses, + NvU32 *pEntries, + void *pPlatformData, + void *pGpuInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK) + { + rmStatus = RmP2PGetPagesPersistent(gpuVirtualAddress, + length, + p2pObject, + pPhysicalAddresses, + pEntries, + pPlatformData, + pGpuInfo); + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_p2p_get_pages( + nvidia_stack_t *sp, + NvU64 p2pToken, + NvU32 vaSpaceToken, + NvU64 gpuVirtualAddress, + NvU64 length, + NvU64 *pPhysicalAddresses, + NvU32 *pWreqMbH, + NvU32 *pRreqMbH, + NvU32 *pEntries, + NvU8 **ppGpuUuid, + void *pPlatformData +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK) + { + OBJGPU *pGpu; + rmStatus = RmP2PGetPagesWithoutCallbackRegistration(p2pToken, + vaSpaceToken, + gpuVirtualAddress, + length, + pPhysicalAddresses, + pWreqMbH, + pRreqMbH, + pEntries, + &pGpu, + pPlatformData); + if (rmStatus == NV_OK) + { + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + const NvU8 *pGid; + + pGid = RmGetGpuUuidRaw(nv); + if (pGid == NULL) + { + NV_ASSERT_OK(RmP2PPutPages(p2pToken, vaSpaceToken, + gpuVirtualAddress, + pPlatformData)); + rmStatus = NV_ERR_GENERIC; + } + else + { + rmStatus = os_alloc_mem((void **)ppGpuUuid, GPU_UUID_LEN); + if (rmStatus == NV_OK) + os_mem_copy(*ppGpuUuid, pGid, GPU_UUID_LEN); + } + } + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_p2p_register_callback( + nvidia_stack_t *sp, + NvU64 p2pToken, + NvU64 gpuVirtualAddress, + NvU64 length, + void *pPlatformData, + void (*pFreeCallback)(void *pData), + void *pData +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK) + { + rmStatus = RmP2PRegisterCallback(p2pToken, gpuVirtualAddress, length, + pPlatformData, pFreeCallback, pData); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_p2p_put_pages_persistent( + nvidia_stack_t *sp, + void *p2pObject, + void *pKey +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK) + { + rmStatus = RmP2PPutPagesPersistent(p2pObject, pKey); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_p2p_put_pages( + nvidia_stack_t *sp, + NvU64 p2pToken, + NvU32 vaSpaceToken, + NvU64 gpuVirtualAddress, + void *pKey +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + if ((rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_P2P)) == NV_OK) + { + rmStatus = RmP2PPutPages(p2pToken, + vaSpaceToken, + gpuVirtualAddress, + pKey); + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +char* NV_API_CALL rm_get_gpu_uuid( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + NV_STATUS rmStatus; + const NvU8 *pGid; + OBJGPU *pGpu; + char *pGidString; + + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // Allocate space for the ASCII string + rmStatus = os_alloc_mem((void **)&pGidString, GPU_UUID_ASCII_LEN); + if (rmStatus != NV_OK) + { + pGidString = NULL; + goto done; + } + + // Get the raw UUID; note the pGid is cached, so we do not need to free it + pGid = RmGetGpuUuidRaw(nv); + + if (pGid != NULL) + { + // Convert the raw UUID to ASCII + rmStatus = RmGpuUuidRawToString(pGid, pGidString, GPU_UUID_ASCII_LEN); + if (rmStatus != NV_OK) + { + os_free_mem(pGidString); + pGidString = NULL; + } + } + else + { + const char *pTmpString; + + // No raw GID, but we still return a string + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + if (rmStatus == NV_ERR_NOT_SUPPORTED && pGpu != NULL && + pGpu->getProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED)) + pTmpString = "N/A"; + else + pTmpString = "GPU-???????\?-???\?-???\?-???\?-????????????"; + + portStringCopy(pGidString, GPU_UUID_ASCII_LEN, pTmpString, + portStringLength(pTmpString) + 1); + } + +done: + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return pGidString; +} + +// +// This function will return the UUID in the binary format +// +const NvU8 * NV_API_CALL rm_get_gpu_uuid_raw( + nvidia_stack_t *sp, + nv_state_t *nv) +{ + THREAD_STATE_NODE threadState; + void *fp; + const NvU8 *pGid; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGid = RmGetGpuUuidRaw(nv); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return pGid; +} + +static void rm_set_firmware_logs( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + NV_STATUS status; + NvU32 data; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + status = RmReadRegistryDword(nv, NV_REG_ENABLE_GPU_FIRMWARE_LOGS, &data); + if (status == NV_OK) + { + if ((data == NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE) +#if defined(DEBUG) || defined(DEVELOP) + || (data == NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG) +#endif + ) + { + nv->enable_firmware_logs = NV_TRUE; + } + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_set_rm_firmware_requested( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + nv->request_firmware = NV_TRUE; + nv->allow_fallback_to_monolithic_rm = NV_FALSE; + + // Check if we want firmware logs + if (nv->request_firmware) + rm_set_firmware_logs(sp, nv); +} + +// +// This function will be called by nv_procfs_read_gpu_info(). +// nv_procfs_read_gpu_info() will not print the 'GPU Firmware:' field at +// all if the 'version' string is empty. +// +// If GSP is enabled (firmware was requested), this function needs to return +// the firmware version or "NA" in case of any errors. +// +// If GSP is not enabled (firmware was not requested), this function needs to +// return the empty string, regardless of error cases. +// +void NV_API_CALL rm_get_firmware_version( + nvidia_stack_t *sp, + nv_state_t *nv, + char *version, + NvLength version_length +) +{ + NV2080_CTRL_GSP_GET_FEATURES_PARAMS params = { 0 }; + RM_API *pRmApi; + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus = NV_OK; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pRmApi = RmUnixRmApiPrologue(nv, &threadState, RM_LOCK_MODULES_GPU); + if (pRmApi != NULL) + { + rmStatus = pRmApi->Control(pRmApi, + nv->rmapi.hClient, + nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_GSP_GET_FEATURES, + ¶ms, + sizeof(params)); + + RmUnixRmApiEpilogue(nv, &threadState); + } + else + { + rmStatus = NV_ERR_INVALID_STATE; + } + + if (rmStatus != NV_OK) + { + if (RMCFG_FEATURE_GSP_CLIENT_RM && nv->request_firmware) + { + const char *pTmpString = "N/A"; + portStringCopy(version, version_length, pTmpString, portStringLength(pTmpString) + 1); + } + NV_PRINTF(LEVEL_INFO, + "%s: Failed to query gpu build versions, status=0x%x\n", + __FUNCTION__, + rmStatus); + goto finish; + } + portMemCopy(version, version_length, params.firmwareVersion, sizeof(params.firmwareVersion)); + +finish: + NV_EXIT_RM_RUNTIME(sp,fp); +} + +// +// disable GPU SW state persistence +// + +void NV_API_CALL rm_disable_gpu_state_persistence(nvidia_stack_t *sp, nv_state_t *nv) +{ + THREAD_STATE_NODE threadState; + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu->setProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE, NV_FALSE); + osModifyGpuSwStatePersistence(pGpu->pOsGpuInfo, NV_FALSE); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_log_gpu_crash( + nv_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if ((status = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DIAG)) == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + if ((pGpu != NULL) && + ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DIAG)) == NV_OK)) + { + status = RmLogGpuCrash(pGpu); + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return status; +} + +void NV_API_CALL rm_kernel_rmapi_op(nvidia_stack_t *sp, void *ops_cmd) +{ + nvidia_kernel_rmapi_ops_t *ops = ops_cmd; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + switch (ops->op) + { + case NV01_FREE: + Nv01FreeKernel(&ops->params.free); + break; + + case NV01_ALLOC_MEMORY: + Nv01AllocMemoryKernel(&ops->params.allocMemory64); + break; + + case NV04_ALLOC: + Nv04AllocKernel(&ops->params.alloc); + break; + + case NV04_VID_HEAP_CONTROL: + Nv04VidHeapControlKernel(ops->params.pVidHeapControl); + break; + + case NV04_MAP_MEMORY: + Nv04MapMemoryKernel(&ops->params.mapMemory); + break; + + case NV04_UNMAP_MEMORY: + Nv04UnmapMemoryKernel(&ops->params.unmapMemory); + break; + + case NV04_ALLOC_CONTEXT_DMA: + Nv04AllocContextDmaKernel(&ops->params.allocContextDma2); + break; + + case NV04_MAP_MEMORY_DMA: + Nv04MapMemoryDmaKernel(&ops->params.mapMemoryDma); + break; + + case NV04_UNMAP_MEMORY_DMA: + Nv04UnmapMemoryDmaKernel(&ops->params.unmapMemoryDma); + break; + + case NV04_BIND_CONTEXT_DMA: + Nv04BindContextDmaKernel(&ops->params.bindContextDma); + break; + + case NV04_CONTROL: + Nv04ControlKernel(&ops->params.control); + break; + + case NV04_DUP_OBJECT: + Nv04DupObjectKernel(&ops->params.dupObject); + break; + + case NV04_SHARE: + Nv04ShareKernel(&ops->params.share); + break; + + case NV04_ADD_VBLANK_CALLBACK: + Nv04AddVblankCallbackKernel(&ops->params.addVblankCallback); + break; + } + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +// +// ACPI method (NVIF/_DSM/WMMX/MXM*/etc.) initialization +// +void RmInitAcpiMethods(OBJOS *pOS, OBJSYS *pSys, OBJGPU *pGpu) +{ + NvU32 handlesPresent; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_NVIF_INIT_DONE)) + return; + + nv_acpi_methods_init(&handlesPresent); + +} + +// +// ACPI method (NVIF/_DSM/WMMX/MXM*/etc.) teardown +// +void RmUnInitAcpiMethods(OBJSYS *pSys) +{ + pSys->setProperty(pSys, PDB_PROP_SYS_NVIF_INIT_DONE, NV_FALSE); + + nv_acpi_methods_uninit(); +} + +// +// Converts an array of OS page address to an array of RM page addresses.This +// assumes that: +// (1) The pteArray is at least pageCount entries large, +// (2) The pageCount is given in RM pages, and +// (3) The OS page entries start at index 0. +// +void RmInflateOsToRmPageArray(RmPhysAddr *pteArray, NvU64 pageCount) +{ + NvUPtr osPageIdx, osPageOffset; + NvU64 i; + + // + // We can do the translation in place by moving backwards, since there + // will always be more RM pages than OS pages + // + for (i = pageCount - 1; i != NV_U64_MAX; i--) + { + osPageIdx = i >> NV_RM_TO_OS_PAGE_SHIFT; + osPageOffset = (i & ((1 << NV_RM_TO_OS_PAGE_SHIFT) - 1)) * + NV_RM_PAGE_SIZE; + pteArray[i] = pteArray[osPageIdx] + osPageOffset; + } +} + +void RmDeflateRmToOsPageArray(RmPhysAddr *pteArray, NvU64 pageCount) +{ + NvU64 i; + + for (i = 0; i < NV_RM_PAGES_TO_OS_PAGES(pageCount); i++) + { + pteArray[i] = pteArray[(i << NV_RM_TO_OS_PAGE_SHIFT)]; + } + + // Zero out the rest of the addresses, which are now invalid + portMemSet(pteArray + i, 0, sizeof(*pteArray) * (pageCount - i)); +} + +NvBool NV_API_CALL +rm_get_device_remove_flag +( + nvidia_stack_t * sp, + NvU32 gpu_id +) +{ + THREAD_STATE_NODE threadState; + void *fp; + NvBool bRemove; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (gpumgrQueryGpuDrainState(gpu_id, NULL, &bRemove) != NV_OK) + { + bRemove = NV_FALSE; + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + return bRemove; +} + +NvBool NV_API_CALL +rm_gpu_need_4k_page_isolation +( + nv_state_t *nv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + return nvp->b_4k_page_isolation_required; +} + +NV_STATUS NV_API_CALL rm_get_gpu_numa_info( + nvidia_stack_t *sp, + nv_state_t *nv, + NvS32 *pNid, + NvU64 *pNumaMemAddr, + NvU64 *pNumaMemSize, + NvU64 *pOfflineAddresses, + NvU32 *pOfflineAddressesCount +) +{ + NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS *pParams; + RM_API *pRmApi; + THREAD_STATE_NODE threadState; + void *fp; + NV_STATUS status = NV_OK; + + if ((pNid == NULL) || (pNumaMemAddr == NULL) || (pNumaMemAddr == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if ((pOfflineAddressesCount != NULL) && + ((pOfflineAddresses == NULL) || + (*pOfflineAddressesCount > NV_ARRAY_ELEMENTS(pParams->numaOfflineAddresses)))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ENTER_RM_RUNTIME(sp,fp); + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + NV_EXIT_RM_RUNTIME(sp,fp); + return NV_ERR_NO_MEMORY; + } + + portMemSet(pParams, 0, sizeof(*pParams)); + + if (pOfflineAddressesCount != NULL) + { + pParams->numaOfflineAddressesCount = *pOfflineAddressesCount; + } + + pRmApi = RmUnixRmApiPrologue(nv, &threadState, RM_LOCK_MODULES_MEM); + if (pRmApi == NULL) + { + status = NV_ERR_INVALID_STATE; + goto finish; + } + + status = pRmApi->Control(pRmApi, nv->rmapi.hClient, nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_FB_GET_NUMA_INFO, + pParams, sizeof(*pParams)); + + RmUnixRmApiEpilogue(nv, &threadState); + + if (status == NV_OK) + { + NvU32 i; + + *pNid = pParams->numaNodeId; + *pNumaMemAddr = pParams->numaMemAddr; + *pNumaMemSize = pParams->numaMemSize; + *pOfflineAddressesCount = pParams->numaOfflineAddressesCount; + + for (i = 0; i < pParams->numaOfflineAddressesCount; i++) + { + pOfflineAddresses[i] = pParams->numaOfflineAddresses[i]; + } + } + +finish: + portMemFree(pParams); + + NV_EXIT_RM_RUNTIME(sp,fp); + + return status; +} + +NV_STATUS NV_API_CALL rm_gpu_numa_online( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS params = { 0 }; + RM_API *pRmApi; + THREAD_STATE_NODE threadState; + void *fp; + NV_STATUS status = NV_OK; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pRmApi = RmUnixRmApiPrologue(nv, &threadState, RM_LOCK_MODULES_MEM); + if (pRmApi == NULL) + { + status = NV_ERR_INVALID_STATE; + goto finish; + } + + params.bOnline = NV_TRUE; + + status = pRmApi->Control(pRmApi, nv->rmapi.hClient, nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_FB_UPDATE_NUMA_STATUS, + ¶ms, sizeof(params)); + + RmUnixRmApiEpilogue(nv, &threadState); + +finish: + NV_EXIT_RM_RUNTIME(sp,fp); + + return status; +} + + +NV_STATUS NV_API_CALL rm_gpu_numa_offline( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS params = { 0 }; + RM_API *pRmApi; + THREAD_STATE_NODE threadState; + void *fp; + NV_STATUS status = NV_OK; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pRmApi = RmUnixRmApiPrologue(nv, &threadState, RM_LOCK_MODULES_MEM); + if (pRmApi == NULL) + { + status = NV_ERR_INVALID_STATE; + goto finish; + } + + params.bOnline = NV_FALSE; + + status = pRmApi->Control(pRmApi, nv->rmapi.hClient, + nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_FB_UPDATE_NUMA_STATUS, + ¶ms, sizeof(params)); + + RmUnixRmApiEpilogue(nv, &threadState); + +finish: + NV_EXIT_RM_RUNTIME(sp,fp); + + return status; +} + +// +// A device is considered "sequestered" if it has drain state enabled for it. +// The kernel interface layer can use this to check the drain state of a device +// in paths outside of initialization, e.g., when clients attempt to reference +// count the device. +// +NvBool NV_API_CALL rm_is_device_sequestered( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + NvBool bDrain = NV_FALSE; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU) == NV_OK) + { + // + // If gpumgrQueryGpuDrainState succeeds, bDrain will be set as needed. + // If gpumgrQueryGpuDrainState fails, bDrain will stay false; we assume + // that if core RM can't tell us the drain state, it must not be + // attached and the "sequestered" question is not relevant. + // + (void) gpumgrQueryGpuDrainState(pNv->gpu_id, &bDrain, NULL); + + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + return bDrain; +} + +void NV_API_CALL rm_check_for_gpu_surprise_removal( + nvidia_stack_t *sp, + nv_state_t *nv +) +{ + THREAD_STATE_NODE threadState; + void *fp; + NV_STATUS rmStatus; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock. + if ((rmStatus = rmApiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU)) == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + if ((rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_GPU)) == NV_OK) + { + osHandleGpuLost(pGpu); + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + // UNLOCK: release api lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_set_external_kernel_client_count( + nvidia_stack_t *sp, + nv_state_t *pNv, + NvBool bIncr +) +{ + THREAD_STATE_NODE threadState; + void *fp; + OBJGPU *pGpu; + NV_STATUS rmStatus = NV_OK; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(pNv); + + if (pGpu != NULL) + { + rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU); + if (rmStatus == NV_OK) + { + rmStatus = gpuSetExternalKernelClientCount(pGpu, bIncr); + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NvBool rm_get_uefi_console_status( + nv_state_t *nv +) +{ + NvU16 fbWidth, fbHeight, fbDepth, fbPitch; + NvU64 fbSize; + NvU64 fbBaseAddress = 0; + NvBool bConsoleDevice = NV_FALSE; + + // os_get_screen_info() will return dimensions and an address for + // any fbdev driver (e.g., efifb, vesafb, etc). To find if this is a + // UEFI console check the fbBaseAddress: if it was set up by the EFI GOP + // driver, it will point into BAR1 (FB); if it was set up by the VBIOS, + // it will point to BAR2 + 16MB. + os_get_screen_info(&fbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch, + nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address, + nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000); + + fbSize = fbHeight * fbPitch; + + bConsoleDevice = (fbSize != 0); + + return bConsoleDevice; +} + +NvU64 rm_get_uefi_console_size( + nv_state_t *nv, + NvU64 *pFbBaseAddress +) +{ + NvU16 fbWidth, fbHeight, fbDepth, fbPitch; + NvU64 fbSize; + + fbSize = fbWidth = fbHeight = fbDepth = fbPitch = 0; + + // os_get_screen_info() will return dimensions and an address for + // any fbdev driver (e.g., efifb, vesafb, etc). To find if this is a + // UEFI console check the fbBaseAddress: if it was set up by the EFI GOP + // driver, it will point into BAR1 (FB); if it was set up by the VBIOS, + // it will point to BAR2 + 16MB. + os_get_screen_info(pFbBaseAddress, &fbWidth, &fbHeight, &fbDepth, &fbPitch, + nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address, + nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000); + + fbSize = fbHeight * fbPitch; + + return fbSize; +} + +/* + * IOMMU needs to be present on the server to support SR-IOV vGPU, unless + * we have SR-IOV enabled for remote GPU. + */ + +NvBool NV_API_CALL rm_is_iommu_needed_for_sriov( + nvidia_stack_t *sp, + nv_state_t * nv +) +{ + OBJGPU *pGpu; + NvU32 data; + NvBool ret = NV_TRUE; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_REMOTE_GPU, &data) == NV_OK) + { + if (data == NV_REG_STR_RM_REMOTE_GPU_ENABLE) + ret = NV_FALSE; + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return ret; +} + +NvBool NV_API_CALL rm_disable_iomap_wc(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + return pSys->pCl->getProperty(pSys, PDB_PROP_CL_DISABLE_IOMAP_WC) == NV_TRUE; +} + +// +// Verifies the handle, offset and size and dups hMemory. +// Must be called with API lock and GPU lock held. +// +NV_STATUS NV_API_CALL rm_dma_buf_dup_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hSrcClient, + NvHandle hDstClient, + NvHandle hDevice, + NvHandle hSubdevice, + void *pGpuInstanceInfo, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + NvHandle *phMemoryDuped +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + OBJGPU *pGpu; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + NV_ASSERT(rmApiLockIsOwner()); + + NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))); + + rmStatus = RmDmabufVerifyMemHandle(pGpu, hSrcClient, hMemory, + offset, size, pGpuInstanceInfo); + if (rmStatus == NV_OK) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemoryDuped = 0; + + rmStatus = pRmApi->DupObject(pRmApi, + hDstClient, + hDevice, + &hMemoryDuped, + hSrcClient, + hMemory, + 0); + if (rmStatus == NV_OK) + { + *phMemoryDuped = hMemoryDuped; + } + else if (rmStatus == NV_ERR_INVALID_OBJECT_PARENT) + { + hMemoryDuped = 0; + + // If duping under Device fails, try duping under Subdevice + rmStatus = pRmApi->DupObject(pRmApi, + hDstClient, + hSubdevice, + &hMemoryDuped, + hSrcClient, + hMemory, + 0); + if (rmStatus == NV_OK) + { + *phMemoryDuped = hMemoryDuped; + } + } + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +// +// Frees dup'd hMemory. +// Must be called with API lock and GPU lock held. +// +void NV_API_CALL rm_dma_buf_undup_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hMemory +) +{ + THREAD_STATE_NODE threadState; + RM_API *pRmApi; + OBJGPU *pGpu; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + + NV_ASSERT(rmApiLockIsOwner()); + + NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))); + + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + pRmApi->Free(pRmApi, hClient, hMemory); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +// +// Maps a handle to BAR1. +// Must be called with API lock and GPU lock held. +// +NV_STATUS NV_API_CALL rm_dma_buf_map_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + NvU64 *pBar1Va +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + OBJGPU *pGpu; + KernelBus *pKernelBus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + NV_ASSERT(rmApiLockIsOwner()); + + NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))); + + rmStatus = kbusMapFbApertureByHandle(pGpu, pKernelBus, hClient, + hMemory, offset, size, pBar1Va); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +// +// Unmaps a handle from BAR1. +// Must be called with API lock and GPU lock held. +// +NV_STATUS NV_API_CALL rm_dma_buf_unmap_mem_handle( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hMemory, + NvU64 size, + NvU64 bar1Va +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + OBJGPU *pGpu; + KernelBus *pKernelBus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + NV_ASSERT(rmApiLockIsOwner()); + + NV_ASSERT(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))); + + rmStatus = kbusUnmapFbApertureByHandle(pGpu, pKernelBus, hClient, + hMemory, bar1Va); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_dma_buf_get_client_and_device( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubdevice, + void **ppGpuInstanceInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + rmStatus = RmDmabufGetClientAndDevice(pGpu, hClient, phClient, phDevice, + phSubdevice, ppGpuInstanceInfo); + + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + // UNLOCK: release API lock + rmApiLockRelease(); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + + return rmStatus; +} + +void NV_API_CALL rm_dma_buf_put_client_and_device( + nvidia_stack_t *sp, + nv_state_t *nv, + NvHandle hClient, + NvHandle hDevice, + NvHandle hSubdevice, + void *pGpuInstanceInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // LOCK: acquire API lock + rmStatus = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + + rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_OSAPI); + if (rmStatus == NV_OK) + { + RmDmabufPutClientAndDevice(pGpu, hClient, hDevice, hSubdevice, + pGpuInstanceInfo); + + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + // UNLOCK: release API lock + rmApiLockRelease(); + } + NV_ASSERT_OK(rmStatus); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} diff --git a/src/nvidia/arch/nvalloc/unix/src/osinit.c b/src/nvidia/arch/nvalloc/unix/src/osinit.c new file mode 100644 index 000000000..49ede1008 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/osinit.c @@ -0,0 +1,2263 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/************************************************************************************************************** +* +* Description: +* UNIX-general, device-independent initialization code for +* the resource manager. +* +* +**************************************************************************************************************/ + +#include +#include // NV device driver interface +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "gpu/gpu.h" +#include +#include "gpu/bus/kern_bus.h" +#include "nverror.h" +#include +#include +#include +#include +#include "kernel/gpu/intr/intr.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "gpu_mgr/gpu_db.h" +#include +#include +#include +#include + +#include +// RMCONFIG: need definition of REGISTER_ALL_HALS() +#include "g_hal_register.h" + +typedef enum +{ + RM_INIT_OK, + + /* general os errors */ + RM_INIT_REG_SETUP_FAILED = 0x10, + RM_INIT_SYS_ENVIRONMENT_FAILED, + + /* gpu errors */ + RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED = 0x20, + RM_INIT_GPU_GPUMGR_CREATE_DEV_FAILED, + RM_INIT_GPU_GPUMGR_ATTACH_GPU_FAILED, + RM_INIT_GPU_PRE_INIT_FAILED, + RM_INIT_GPU_STATE_INIT_FAILED, + RM_INIT_GPU_LOAD_FAILED, + RM_INIT_GPU_UNIVERSAL_VALIDATION_FAILED, + RM_INIT_GPU_DMA_CONFIGURATION_FAILED, + + /* vbios errors */ + RM_INIT_VBIOS_FAILED = 0x30, + RM_INIT_VBIOS_POST_FAILED, + RM_INIT_VBIOS_X86EMU_FAILED, + + /* scalability errors */ + RM_INIT_SCALABILITY_FAILED = 0x40, + + /* general core rm errors */ + RM_INIT_WATCHDOG_FAILED, + RM_FIFO_GET_UD_BAR1_MAP_INFO_FAILED, + RM_GPUDB_REGISTER_FAILED, + + RM_INIT_ALLOC_RMAPI_FAILED, + RM_INIT_GPUINFO_WITH_RMAPI_FAILED, + + /* rm firmware errors */ + RM_INIT_FIRMWARE_POLICY_FAILED = 0x60, + RM_INIT_FIRMWARE_FETCH_FAILED, + RM_INIT_FIRMWARE_VALIDATION_FAILED, + RM_INIT_FIRMWARE_INIT_FAILED, + + RM_INIT_MAX_FAILURES +} rm_init_status; + +typedef rm_init_status RM_INIT_STATUS; + +typedef struct { + RM_INIT_STATUS initStatus; + NV_STATUS rmStatus; + NvU32 line; +} UNIX_STATUS; + +#define INIT_UNIX_STATUS { RM_INIT_OK, NV_OK, 0 } +#define RM_INIT_SUCCESS(init) ((init) == RM_INIT_OK) + +#define RM_SET_ERROR(status, err) { (status).initStatus = (err); \ + (status).line = __LINE__; } + + +// +// GPU architectures support DMA addressing up to a certain address width, +// above which all other bits in any given DMA address must not vary +// (e.g., all 0). This value is the minimum of the DMA addressing +// capabilities, in number of physical address bits, for all supported +// GPU architectures. +// +#define NV_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH 36 + +// +// All GPU architectures with GSP support at least 47 physical address bits. +// +#define NV_GSP_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH 47 + +static inline NvU64 nv_encode_pci_info(nv_pci_info_t *pci_info) +{ + return gpuEncodeDomainBusDevice(pci_info->domain, pci_info->bus, pci_info->slot); +} + +static inline NvU32 nv_generate_id_from_pci_info(nv_pci_info_t *pci_info) +{ + return gpuGenerate32BitId(pci_info->domain, pci_info->bus, pci_info->slot); +} + +static inline void nv_os_map_kernel_space(nv_state_t *nv, nv_aperture_t *aperture) +{ + NV_ASSERT(aperture->map == NULL); + + // let's start off assuming a standard device and map the registers + // normally. It is unfortunate to hard-code the register size here, but we don't + // want to fail trying to map all of a multi-devices' register space + aperture->map = osMapKernelSpace(aperture->cpu_address, + aperture->size, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + aperture->map_u = (nv_phwreg_t)aperture->map; +} + +// local prototypes +static NV_STATUS initCoreLogic(OBJGPU *); +static void initVendorSpecificRegistry(OBJGPU *, NvU16); +static NV_STATUS teardownCoreLogic(OBJOS *, OBJGPU *); +static void initUnixSpecificRegistry(OBJGPU *); + +NvBool osRmInitRm(OBJOS *pOS) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU64 system_memory_size = (NvU64)-1; + + NV_PRINTF(LEVEL_INFO, "init rm\n"); + + if (os_is_efi_enabled()) + { + pSys->setProperty(pSys, PDB_PROP_SYS_IS_UEFI, NV_TRUE); + } + + // have to init this before the debug subsystem, which will + // try to check the value of ResmanDebugLevel + RmInitRegistry(); + + NvU32 data = 0; + if ((osReadRegistryDword(NULL, + NV_REG_ENABLE_PCIE_GEN3, &data) == NV_OK) && + (data != 0)) + { + data = DRF_DEF(_REG_STR, _RM_PCIE, _LINK_SPEED_ALLOW_GEN3, _ENABLE); + + osWriteRegistryDword(NULL, + NV_REG_STR_RM_PCIE_LINK_SPEED, data); + } + + if (!(osReadRegistryDword(NULL, + NV_REG_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, + &data) == NV_OK) || data) + { + pSys->setProperty(pSys, + PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS, + NV_TRUE); + } + + if ((osReadRegistryDword(NULL, + NV_REG_ENABLE_STREAM_MEMOPS, + &data) == NV_OK) && (data != 0)) + { + data = DRF_DEF(_REG_STR, _RM_STREAM_MEMOPS, _ENABLE, _YES); + osWriteRegistryDword(NULL, NV_REG_STR_RM_STREAM_MEMOPS, data); + } + + if ((osReadRegistryDword(NULL, + NV_REG_STR_RM_FORCE_BAR_PATH, + &data) == NV_OK) && (data == 1)) + { + // + // Exposing the GPU memory as NUMA node memory requires coherent CPU + // mappings over NVLink. If those mappings are disabled, also disable + // NUMA-onlining of the device memory. + // + osWriteRegistryDword(NULL, + NV_REG_ENABLE_USER_NUMA_MANAGEMENT, 0); + } + + if ((osReadRegistryDword(NULL, + NV_REG_NVLINK_DISABLE, &data) == NV_OK) && + (data == 1)) + { + osWriteRegistryDword(NULL, NV_REG_STR_RM_NVLINK_CONTROL, 0x1); + } + + if ((osReadRegistryDword(NULL, + NV_REG_ENABLE_PCIE_RELAXED_ORDERING_MODE, &data) == NV_OK) && + (data == 1)) + { + osWriteRegistryDword(NULL, + NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING, + NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_ENABLE); + } + + // init the debug subsystem if necessary + os_dbg_init(); + nvDbgInitRmMsg(NULL); + + // Force nvlog reinit since module params are now available + NVLOG_UPDATE(); + + // Register all supported hals + if (REGISTER_ALL_HALS() != NV_OK) + { + RmDestroyRegistry(NULL); + return NV_FALSE; + } + + system_memory_size = NV_RM_PAGES_PER_OS_PAGE * os_get_num_phys_pages(); + + // if known, relay the number of system memory pages (in terms of RM page + // size) to the RM; this is needed for e.g. TurboCache parts. + if (system_memory_size != (NvU64)-1) + pOS->SystemMemorySize = system_memory_size; + + // Setup any ThreadState defaults + threadStateInitSetupFlags(THREAD_STATE_SETUP_FLAGS_ENABLED | + THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED | + THREAD_STATE_SETUP_FLAGS_SLI_LOGIC_ENABLED | + THREAD_STATE_SETUP_FLAGS_DO_NOT_INCLUDE_SLEEP_TIME_ENABLED); + + return NV_TRUE; +} + +void RmShutdownRm(void) +{ + NV_PRINTF(LEVEL_INFO, "shutdown rm\n"); + + RmDestroyRegistry(NULL); + + // Free objects created with RmInitRm, including the system object + RmDestroyRm(); +} + +// +// osAttachGpu +// +// This routine is used as a callback by the gpumgrAttachGpu +// interface to allow os-dependent code to set up any state +// before engine construction begins. +// +NV_STATUS osAttachGpu( + OBJGPU *pGpu, + void *pOsGpuInfo +) +{ + nv_state_t *nv = (nv_state_t *)pOsGpuInfo; + nv_priv_t *nvp; + + nvp = NV_GET_NV_PRIV(nv); + + nvp->pGpu = pGpu; + + NV_SET_NV_STATE(pGpu, (void *)nv); + + initUnixSpecificRegistry(pGpu); + + // Assign default values to Registry keys for VGX + if (os_is_vgx_hyper()) + { + initVGXSpecificRegistry(pGpu); + } + + return NV_OK; +} + +NV_STATUS osDpcAttachGpu( + OBJGPU *pGpu, + void *pOsGpuInfo +) +{ + return NV_OK; // Nothing to do for unix +} + +void osDpcDetachGpu( + OBJGPU *pGpu +) +{ + return; // Nothing to do for unix +} + +NV_STATUS +osHandleGpuLost +( + OBJGPU *pGpu +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NvU32 pmc_boot_0; + + // Determine if we've already run the handler + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED)) + { + return NV_OK; + } + + pmc_boot_0 = NV_PRIV_REG_RD32(nv->regs->map_u, NV_PMC_BOOT_0); + if (pmc_boot_0 != nvp->pmc_boot_0) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *pBoardInfoParams; + NV_STATUS status; + + // + // This doesn't support PEX Reset and Recovery yet. + // This will help to prevent accessing registers of a GPU + // which has fallen off the bus. + // + nvErrorLog_va((void *)pGpu, ROBUST_CHANNEL_GPU_HAS_FALLEN_OFF_THE_BUS, + "GPU has fallen off the bus."); + + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "GPU has fallen off the bus.\n"); + + pBoardInfoParams = portMemAllocNonPaged(sizeof(*pBoardInfoParams)); + if (pBoardInfoParams != NULL) + { + portMemSet(pBoardInfoParams, 0, sizeof(*pBoardInfoParams)); + + status = pRmApi->Control(pRmApi, nv->rmapi.hClient, + nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO, + pBoardInfoParams, + sizeof(*pBoardInfoParams)); + if (status == NV_OK) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "GPU serial number is %s.\n", + pBoardInfoParams->serialNumber); + } + + portMemFree(pBoardInfoParams); + } + + gpuSetDisconnectedProperties(pGpu); + + // Trigger the OS's PCI recovery mechanism + if (nv_pci_trigger_recovery(nv) != NV_OK) + { + // + // Initiate a crash dump immediately, since the OS doesn't appear + // to have a mechanism wired up for attempted recovery. + // + (void) RmLogGpuCrash(pGpu); + } + else + { + // + // Make the SW state stick around until the recovery can start, but + // don't change the PDB property: this is only used to report to + // clients whether or not persistence mode is enabled, and we'll + // need it after the recovery callbacks to restore the correct + // persistence mode for the GPU. + // + osModifyGpuSwStatePersistence(pGpu->pOsGpuInfo, NV_TRUE); + } + + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + // Set SURPRISE_REMOVAL flag for eGPU to help in device removal. + if ((pCl != NULL) && + pCl->getProperty(pCl, PDB_PROP_CL_IS_EXTERNAL_GPU)) + { + nv->flags |= NV_FLAG_IN_SURPRISE_REMOVAL; + } + DBG_BREAKPOINT(); + } + + return NV_OK; +} + +/* + * Initialize the required GPU information by doing RMAPI control calls + * and store the same in the UNIX specific data structures. + */ +static NV_STATUS +RmInitGpuInfoWithRmApi +( + OBJGPU *pGpu +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams = { 0 }; + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status != NV_OK) + { + return status; + } + + pGpuInfoParams = portMemAllocNonPaged(sizeof(*pGpuInfoParams)); + if (pGpuInfoParams == NULL) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + return NV_ERR_NO_MEMORY; + } + + + portMemSet(pGpuInfoParams, 0, sizeof(*pGpuInfoParams)); + + pGpuInfoParams->gpuInfoListSize = 3; + pGpuInfoParams->gpuInfoList[0].index = NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED; + pGpuInfoParams->gpuInfoList[1].index = NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED; + pGpuInfoParams->gpuInfoList[2].index = NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY; + + status = pRmApi->Control(pRmApi, nv->rmapi.hClient, + nv->rmapi.hSubDevice, + NV2080_CTRL_CMD_GPU_GET_INFO_V2, + pGpuInfoParams, sizeof(*pGpuInfoParams)); + + if (status == NV_OK) + { + nvp->b_4k_page_isolation_required = + (pGpuInfoParams->gpuInfoList[0].data == + NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_YES); + nvp->b_mobile_config_enabled = + (pGpuInfoParams->gpuInfoList[1].data == + NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_YES); + nv->dma_buf_supported = + (pGpuInfoParams->gpuInfoList[2].data == + NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_YES); + } + + portMemFree(pGpuInfoParams); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + return status; +} + +static void RmSetSocDispDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_DISP].gpuNvAddr = (GPUHWREG*) nv->regs->map; + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_DISP].gpuNvPAddr = nv->regs->cpu_address; + gpuAttachArg->socDeviceArgs.deviceMapping[SOC_DEV_MAPPING_DISP].gpuNvLength = (NvU32) nv->regs->size; +} + +static void RmSetSocDpauxDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ +} + +static void RmSetSocHdacodecDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ +} + +static void RmSetSocMipiCalDeviceMappings( + GPUATTACHARG *gpuAttachArg, + nv_state_t *nv +) +{ +} + +static void +osInitNvMapping( + nv_state_t *nv, + NvU32 *pDeviceReference, + UNIX_STATUS *status +) +{ + OBJGPU *pGpu; + OBJSYS *pSys = SYS_GET_INSTANCE(); + GPUATTACHARG *gpuAttachArg; + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NvU32 deviceInstance; + NvU32 data = 0; + + NV_PRINTF(LEVEL_INFO, "osInitNvMapping:\n"); + + // allocate the next available gpu device number + status->rmStatus = gpumgrAllocGpuInstance(pDeviceReference); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot get valid gpu instance\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED); + return; + } + + // RM_BASIC_LOCK_MODEL: allocate GPU lock + status->rmStatus = rmGpuLockAlloc(*pDeviceReference); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** cannot allocate GPU lock\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + + // attach default single-entry broadcast device for this gpu + status->rmStatus = gpumgrCreateDevice(&deviceInstance, NVBIT(*pDeviceReference), NULL); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot attach bc gpu\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_CREATE_DEV_FAILED); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + + // init attach state + gpuAttachArg = portMemAllocNonPaged(sizeof(GPUATTACHARG)); + if (gpuAttachArg == NULL) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot allocate gpuAttachArg\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ALLOC_GPU_FAILED); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + + portMemSet(gpuAttachArg, 0, sizeof(GPUATTACHARG)); + + if (NV_IS_SOC_DISPLAY_DEVICE(nv)) + { + gpuAttachArg->socDeviceArgs.specified = NV_TRUE; + + RmSetSocDispDeviceMappings(gpuAttachArg, nv); + + RmSetSocDpauxDeviceMappings(gpuAttachArg, nv); + + RmSetSocHdacodecDeviceMappings(gpuAttachArg, nv); + + RmSetSocMipiCalDeviceMappings(gpuAttachArg, nv); + + gpuAttachArg->socDeviceArgs.socChipId0 = nv->disp_sw_soc_chip_id; + + gpuAttachArg->socDeviceArgs.iovaspaceId = nv->iovaspace_id; + } + else + { + gpuAttachArg->fbPhysAddr = nv->fb->cpu_address; + gpuAttachArg->fbBaseAddr = (GPUHWREG*) 0; // not mapped + gpuAttachArg->devPhysAddr = nv->regs->cpu_address; + gpuAttachArg->regBaseAddr = (GPUHWREG*) nv->regs->map; + gpuAttachArg->intLine = 0; // don't know yet + gpuAttachArg->instPhysAddr = nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address; + gpuAttachArg->instBaseAddr = (GPUHWREG*) 0; // not mapped + + gpuAttachArg->regLength = nv->regs->size; + gpuAttachArg->fbLength = nv->fb->size; + gpuAttachArg->instLength = nv->bars[NV_GPU_BAR_INDEX_IMEM].size; + + gpuAttachArg->iovaspaceId = nv->iovaspace_id; + } + + // + // we need this to check if we are running on virtual GPU + // in gpuBindHal function later. + // + gpuAttachArg->nvDomainBusDeviceFunc = nv_encode_pci_info(&nv->pci_info); + + gpuAttachArg->bRequestFwClientRm = nv->request_fw_client_rm; + + gpuAttachArg->pOsAttachArg = (void *)nv; + + // use gpu manager to attach gpu + status->rmStatus = gpumgrAttachGpu(*pDeviceReference, gpuAttachArg); + portMemFree(gpuAttachArg); + if (status->rmStatus != NV_OK) + { + gpumgrDestroyDevice(deviceInstance); + RM_SET_ERROR(*status, RM_INIT_GPU_GPUMGR_ATTACH_GPU_FAILED); + NV_PRINTF(LEVEL_ERROR, "*** Cannot attach gpu\n"); + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(*pDeviceReference); + return; + } + nvp->flags |= NV_INIT_FLAG_GPUMGR_ATTACH; + + pGpu = gpumgrGetGpu(*pDeviceReference); + + sysInitRegistryOverrides(pSys); + + sysApplyLockingPolicy(pSys); + + pGpu->busInfo.IntLine = nv->interrupt_line; + pGpu->dmaStartAddress = (RmPhysAddr)nv_get_dma_start_address(nv); + if (nv->fb != NULL) + { + pGpu->registerAccess.gpuFbAddr = (GPUHWREG*) nv->fb->map; + pGpu->busInfo.gpuPhysFbAddr = nv->fb->cpu_address; + } + + // set default parent gpu + gpumgrSetParentGPU(pGpu, pGpu); + + NV_PRINTF(LEVEL_INFO, "device instance : %d\n", *pDeviceReference); + NV_PRINTF(LEVEL_INFO, "NV regs using linear address : 0x%p\n", + pGpu->deviceMappings[SOC_DEV_MAPPING_DISP].gpuNvAddr); + NV_PRINTF(LEVEL_INFO, + "NV fb using linear address : 0x%p\n", pGpu->registerAccess.gpuFbAddr); + + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED, NV_TRUE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_FALSE); + + if (osReadRegistryDword(pGpu, + NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR, &data) == NV_OK) + { + if (data == NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_ENABLE) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS, NV_TRUE); + } + } + + if (!os_is_vgx_hyper()) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT, NV_TRUE); + } + + if ((osReadRegistryDword(NULL, + NV_REG_PRESERVE_VIDEO_MEMORY_ALLOCATIONS, + &data) == NV_OK) && data) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + memmgrSetPmaForcePersistence(pMemoryManager, NV_TRUE); + pKernelMemorySystem->bPreserveComptagBackingStoreOnSuspend = NV_TRUE; + + nv->preserve_vidmem_allocations = NV_TRUE; + } +} + +void osInitScalabilityOptions +( + OBJGPU *pGpu, + void *pDeviceExtension +) +{ +} + + +static NV_STATUS +osInitScalability( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + + osInitScalabilityOptions(pGpu, NULL); + + // We need PCI-E chipset information very early. + if (pCl != NULL) + { + return clInitPcie(pGpu, pCl); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } +} + +static NV_STATUS +osTeardownScalability( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + + return clTeardownPcie(pGpu, pCl); +} + +static inline void +RmSetDeviceDmaAddressSize( + nv_state_t *nv, + NvU8 numDmaAddressBits +) +{ + nv_set_dma_address_size(nv, numDmaAddressBits); +} + +static void +populateDeviceAttributes( + OBJGPU *pGpu, + nv_state_t *nv +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + + if ((pCl != NULL) && pCl->getProperty(pCl, PDB_PROP_CL_IS_EXTERNAL_GPU)) + { + nv->is_external_gpu = NV_TRUE; + } +} + +static void +RmSetConsolePreservationParams(OBJGPU *pGpu) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + NvU64 fbBaseAddress = 0; + NvU64 fbConsoleSize = 0; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + // + // PDB_PROP_GPU_PRIMARY_DEVICE should be NV_FALSE for vGPU configuration so + // return early + // + if (os_is_vgx_hyper() || IS_VIRTUAL(pGpu)) + return; + + // + // Check the OS layer for any video memory used by a console + // driver that should be reserved. + // + // If the console driver is using some amount of memory, its + // mapping is presumably either pointing at the start of BAR1 + // (this is the mapping the EFI GOP driver provides for + // efifb), or the upper 16MB of BAR2 (this is the mapping the + // VBIOS provides for, e.g., vesafb). + // + // In the BAR2 case, there is nothing more to do: the upper + // 16MB of BAR2 will remain in physical mode + // (see the documentation for NV_PBUS_BAR2_BLOCK in dev_bus.ref), + // and the console driver can continue to access it. + // + // In the BAR1 case, RM will put all of BAR1 into virtual + // mode, so we need to create a BAR1 mapping for the console + // driver to use. + // + // If the console driver is not using _either_ of those + // mappings, then the console driver will not be able to + // access the memory we reserve. This happens on some + // UEFI systems with multiple GPUs, because the firmware + // initializes the GOP driver on more than one GPU. In + // that case, just skip reserving anything for GPUs where + // neither BAR1 nor BAR2 match the console's base + // address. + // + fbConsoleSize = rm_get_uefi_console_size(nv, &fbBaseAddress); + + if (fbConsoleSize == 0) + { + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_UEFI)) + { + fbConsoleSize = 0x40000; + } + else + { + NV_PRINTF(LEVEL_INFO, "No Frame Buffer Present\n"); + } + } + + if ((fbConsoleSize > 0) && (fbBaseAddress != 0)) + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + pKernelBus->bPreserveBar1ConsoleEnabled = + (fbBaseAddress == nv->fb->cpu_address); + } + + pMemoryManager->Ram.ReservedConsoleDispMemSize = NV_ALIGN_UP(fbConsoleSize, 0x10000); +} + +static NV_STATUS +RmInitDeviceDma( + nv_state_t *nv +) +{ + if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + POBJVMM pVmm = SYS_GET_VMM(pSys); + POBJVASPACE pIOVAS; + NV_STATUS status = vmmCreateVaspace(pVmm, IO_VASPACE_A, + nv->iovaspace_id, 0, 0ULL, ~0ULL, + 0ULL, 0ULL, + NULL, VASPACE_FLAGS_ENABLE_VMM, + &pIOVAS); + if (status != NV_OK) + { + return status; + } + } + + return NV_OK; +} + +static void +RmTeardownDeviceDma( + nv_state_t *nv +) +{ + if (nv->iovaspace_id != NV_IOVA_DOMAIN_NONE) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + POBJVMM pVmm = SYS_GET_VMM(pSys); + POBJVASPACE pIOVAS; + + if (NV_OK == vmmGetVaspaceFromId(pVmm, nv->iovaspace_id, IO_VASPACE_A, &pIOVAS)) + { + vmmDestroyVaspace(pVmm, pIOVAS); + } + } +} + +static void +RmInitNvDevice( + NvU32 deviceReference, + UNIX_STATUS *status +) +{ + // set the device context + OBJGPU *pGpu = gpumgrGetGpu(deviceReference); + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + NV_PRINTF(LEVEL_INFO, "RmInitNvDevice:\n"); + + NV_PRINTF(LEVEL_INFO, + "device instance : 0x%08x\n", deviceReference); + + // initialize all engines -- calls back osInitMapping() + status->rmStatus = gpumgrStatePreInitGpu(pGpu); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot pre-initialize the device\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_PRE_INIT_FAILED); + return; + } + + RmSetDeviceDmaAddressSize(nv, gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM)); + + os_disable_console_access(); + + status->rmStatus = gpumgrStateInitGpu(pGpu); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot initialize the device\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_STATE_INIT_FAILED); + os_enable_console_access(); + return; + } + nvp->flags |= NV_INIT_FLAG_GPU_STATE; + + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + // + // Make sure bifIsMSIEnabled() gets a chance to look up the OS PCI + // handle for this GPU. This must happen after GPU state + // initialization. + // + if (pKernelBif != NULL) + { + kbifCheckAndRearmMSI(pGpu, pKernelBif); + } + + // Set RM's interrupt enable state to zero here so that interrupts + // won't be enabled during loading + Intr *pIntr = GPU_GET_INTR(pGpu); + if (pIntr != NULL) + { + intrSetIntrEn(pIntr, INTERRUPT_TYPE_DISABLED); + } + + status->rmStatus = gpumgrStateLoadGpu(pGpu, GPU_STATE_DEFAULT); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "*** Cannot load state into the device\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_LOAD_FAILED); + os_enable_console_access(); + return; + } + nvp->flags |= NV_INIT_FLAG_GPU_STATE_LOAD; + + os_enable_console_access(); + + status->rmStatus = gpuPerformUniversalValidation_HAL(pGpu); + if (status->rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Failed universal validation\n"); + RM_SET_ERROR(*status, RM_INIT_GPU_UNIVERSAL_VALIDATION_FAILED); + return; + } + + return; +} + +NV_STATUS osInitMapping( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + NV_PRINTF(LEVEL_INFO, "osInitMapping:\n"); + + // Some PCI BIOSs leave the ROM mapped. This causes problems if it overlays system RAM. Just disable it. + GPU_BUS_CFG_WR32(pGpu, NV_CONFIG_PCI_NV_12, 0); + + // make sure our PCI latency timer is sufficient (max it out) + GPU_BUS_CFG_WR32(pGpu, NV_CONFIG_PCI_NV_3, + DRF_DEF(_CONFIG, _PCI_NV_3, _LATENCY_TIMER, _248_CLOCKS)); + + if (pCl != NULL) + { + initCoreLogic(pGpu); + } + nvp->flags |= NV_INIT_FLAG_CORE_LOGIC; + + return NV_OK; + +} // end of osInitMapping() + +static NV_STATUS +initCoreLogic( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + +#if defined(NVCPU_X86_64) + OBJOS *pOS = SYS_GET_OS(pSys); + if (!os_pat_supported()) + pOS->setProperty(pOS, PDB_PROP_OS_PAT_UNSUPPORTED, NV_TRUE); +#endif + + return clInit(pGpu, pCl); +} + +static NV_STATUS +teardownCoreLogic( + OBJOS *pOS, + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + + return clTeardown(pGpu, pCl); +} + +static void RmTeardownDpauxRegisters( + nv_state_t *nv +) +{ +} + +static void RmTeardownHdacodecRegisters( + nv_state_t *nv +) +{ +} + +static void RmTeardownMipiCalRegisters( + nv_state_t *nv +) +{ +} + +static NV_STATUS +RmTeardownRegisters( + nv_state_t *nv +) +{ + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "Tearing down registers\n"); + + if (nv->regs && nv->regs->map) + { + osUnmapKernelSpace(nv->regs->map, nv->regs->size); + nv->regs->map = 0; + nv->regs->map_u = NULL; + } + + RmTeardownDpauxRegisters(nv); + + RmTeardownHdacodecRegisters(nv); + + RmTeardownMipiCalRegisters(nv); + + return NV_OK; +} + +static NV_STATUS +RmSetupDpauxRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + + return NV_OK; +} + +static NV_STATUS +RmSetupHdacodecRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + + return NV_OK; +} + +static NV_STATUS +RmSetupMipiCalRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + + return NV_OK; +} + +static void +RmSetupRegisters( + nv_state_t *nv, + UNIX_STATUS *status +) +{ + NV_STATUS ret; + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "RmSetupRegisters for 0x%x:0x%x\n", + nv->pci_info.vendor_id, nv->pci_info.device_id); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "pci config info:\n"); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " registers look like: " NvP64_fmt " " NvP64_fmt, + nv->regs->cpu_address, nv->regs->size); + + if (nv->fb != NULL) + { + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " fb looks like: " NvP64_fmt " " NvP64_fmt, + nv->fb->cpu_address, nv->fb->size); + } + + nv_os_map_kernel_space(nv, nv->regs); + if (nv->regs->map == NULL) + { + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "Failed to map regs registers!!\n"); + RM_SET_ERROR(*status, RM_INIT_REG_SETUP_FAILED); + status->rmStatus = NV_ERR_OPERATING_SYSTEM; + return; + } + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "Successfully mapped framebuffer and registers\n"); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "final mappings:\n"); + NV_DEV_PRINTF(NV_DBG_SETUP, nv, " regs: " NvP64_fmt " " NvP64_fmt " 0x%p\n", + nv->regs->cpu_address, nv->regs->size, nv->regs->map); + + ret = RmSetupDpauxRegisters(nv, status); + if (ret != NV_OK) + goto err_unmap_disp_regs; + + ret = RmSetupHdacodecRegisters(nv, status); + if (ret != NV_OK) + { + RmTeardownDpauxRegisters(nv); + goto err_unmap_disp_regs; + } + + ret = RmSetupMipiCalRegisters(nv, status); + if (ret != NV_OK) + { + RmTeardownHdacodecRegisters(nv); + RmTeardownDpauxRegisters(nv); + goto err_unmap_disp_regs; + } + + return; + +err_unmap_disp_regs: + if (nv->regs && nv->regs->map) + { + osUnmapKernelSpace(nv->regs->map, nv->regs->size); + nv->regs->map = 0; + } + + return; +} + +NvBool RmInitPrivateState( + nv_state_t *pNv +) +{ + nv_priv_t *nvp; + NvU32 gpuId; + NvU32 pmc_boot_0 = 0; + NvU32 pmc_boot_42 = 0; + + NV_SET_NV_PRIV(pNv, NULL); + + if (!NV_IS_SOC_DISPLAY_DEVICE(pNv)) + { + pNv->regs->map_u = os_map_kernel_space(pNv->regs->cpu_address, + os_page_size, + NV_MEMORY_UNCACHED); + if (pNv->regs->map_u == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to map GPU registers (DISABLE_INTERRUPTS).\n"); + return NV_FALSE; + } + + pmc_boot_0 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_0); + pmc_boot_42 = NV_PRIV_REG_RD32(pNv->regs->map_u, NV_PMC_BOOT_42); + + os_unmap_kernel_space(pNv->regs->map_u, os_page_size); + pNv->regs->map_u = NULL; + } + + if (os_alloc_mem((void **)&nvp, sizeof(*nvp)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to allocate private device state.\n"); + return NV_FALSE; + } + + gpuId = nv_generate_id_from_pci_info(&pNv->pci_info); + + if (gpumgrRegisterGpuId(gpuId, nv_encode_pci_info(&pNv->pci_info)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to register GPU with GPU manager.\n"); + os_free_mem(nvp); + return NV_FALSE; + } + + pNv->gpu_id = gpuId; + + pNv->iovaspace_id = nv_requires_dma_remap(pNv) ? gpuId : + NV_IOVA_DOMAIN_NONE; + + // + // Set up a reasonable default DMA address size, based on the minimum + // possible on currently supported GPUs. + // + RmSetDeviceDmaAddressSize(pNv, NV_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH); + + os_mem_set(nvp, 0, sizeof(*nvp)); + nvp->status = NV_ERR_INVALID_STATE; + nvp->pmc_boot_0 = pmc_boot_0; + nvp->pmc_boot_42 = pmc_boot_42; + NV_SET_NV_PRIV(pNv, nvp); + + return NV_TRUE; +} + +void RmClearPrivateState( + nv_state_t *pNv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(pNv); + NvU32 status; + void *pVbiosCopy = NULL; + void *pRegistryCopy = NULL; + NvU32 vbiosSize; + NvU32 *pEfiDisplayCache; + NvU32 efiDisplayCacheSize; + nv_i2c_adapter_entry_t i2c_adapters[MAX_I2C_ADAPTERS]; + nv_dynamic_power_t dynamicPowerCopy; + NvU32 x = 0; + NvU32 pmc_boot_0, pmc_boot_42; + + // + // Do not clear private state after GPU resets, it is used while + // recovering the GPU. Only clear the pGpu pointer, which is + // restored during next initialization cycle. + // + if (pNv->flags & NV_FLAG_IN_RECOVERY) + { + nvp->pGpu = NULL; + } + + status = nvp->status; + pVbiosCopy = nvp->pVbiosCopy; + vbiosSize = nvp->vbiosSize; + pRegistryCopy = nvp->pRegistry; + pEfiDisplayCache = nvp->efi.display.pCache; + efiDisplayCacheSize = nvp->efi.display.cacheSize; + dynamicPowerCopy = nvp->dynamic_power; + pmc_boot_0 = nvp->pmc_boot_0; + pmc_boot_42 = nvp->pmc_boot_42; + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + i2c_adapters[x] = nvp->i2c_adapters[x]; + } + + portMemSet(nvp, 0, sizeof(nv_priv_t)); + + nvp->status = status; + nvp->pVbiosCopy = pVbiosCopy; + nvp->vbiosSize = vbiosSize; + nvp->pRegistry = pRegistryCopy; + nvp->efi.display.pCache = pEfiDisplayCache; + nvp->efi.display.cacheSize = efiDisplayCacheSize; + nvp->dynamic_power = dynamicPowerCopy; + nvp->pmc_boot_0 = pmc_boot_0; + nvp->pmc_boot_42 = pmc_boot_42; + + for (x = 0; x < MAX_I2C_ADAPTERS; x++) + { + nvp->i2c_adapters[x] = i2c_adapters[x]; + } + + nvp->flags |= NV_INIT_FLAG_PUBLIC_I2C; +} + +void RmFreePrivateState( + nv_state_t *pNv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(pNv); + + gpumgrUnregisterGpuId(pNv->gpu_id); + + RmDestroyRegistry(pNv); + + if (nvp != NULL) + { + portMemFree(nvp->pVbiosCopy); + portMemFree(nvp->efi.display.pCache); + os_free_mem(nvp); + } + + NV_SET_NV_PRIV(pNv, NULL); +} + +NvBool RmPartiallyInitAdapter( + nv_state_t *nv +) +{ + NV_PRINTF(LEVEL_INFO, "%s: %04x:%02x:%02x.0\n", __FUNCTION__, + nv->pci_info.domain, nv->pci_info.bus, nv->pci_info.slot); + + nv_start_rc_timer(nv); + + return NV_TRUE; +} + +static NV_STATUS +RmInitX86Emu( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + PORT_UNREFERENCED_VARIABLE(nv); + +#if NVCPU_IS_X86_64 + status = RmInitX86EmuState(pGpu); +#else + // We don't expect a "primary VGA" adapter on non-amd64 platforms + NV_ASSERT(!NV_PRIMARY_VGA(nv)); +#endif + + return status; +} + +static NV_STATUS RmRegisterGpudb( + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus; + const NvU8 *pGid; + nv_state_t *pNv = NV_GET_NV_STATE(pGpu); + + pGid = RmGetGpuUuidRaw(pNv); + if (pGid == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get UUID\n"); + return NV_ERR_OPERATING_SYSTEM; + } + + rmStatus = gpudbRegisterGpu(pGid, &pGpu->gpuClData.upstreamPort.addr, + pGpu->busInfo.nvDomainBusDeviceFunc); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to register GPU with GPU data base\n"); + } + + return rmStatus; +} + +static void RmUnixFreeRmApi( + nv_state_t *nv +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if (nv->rmapi.hClient != 0) + { + pRmApi->Free(pRmApi, nv->rmapi.hClient, nv->rmapi.hClient); + } + + portMemSet(&nv->rmapi, 0, sizeof(nv->rmapi)); +} + +static NvBool RmUnixAllocRmApi( + nv_state_t *nv, + NvU32 deviceId +) +{ + NV0080_ALLOC_PARAMETERS deviceParams = { 0 }; + NV2080_ALLOC_PARAMETERS subDeviceParams = { 0 }; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + portMemSet(&nv->rmapi, 0, sizeof(nv->rmapi)); + + if (pRmApi->AllocWithHandle( + pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &nv->rmapi.hClient) != NV_OK) + { + goto fail; + } + + // + // Any call to rmapiDelPendingDevices() will internally delete the UNIX OS + // layer RMAPI handles. Set this flag to preserve these handles. These + // handles will be freed explicitly by RmUnixFreeRmApi(). + // + if (!rmclientSetClientFlagsByHandle(nv->rmapi.hClient, + RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT)) + { + goto fail; + } + + deviceParams.deviceId = deviceId; + + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hClient, + &nv->rmapi.hDevice, + NV01_DEVICE_0, + &deviceParams) != NV_OK) + { + goto fail; + } + + subDeviceParams.subDeviceId = 0; + + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hDevice, + &nv->rmapi.hSubDevice, + NV20_SUBDEVICE_0, + &subDeviceParams) != NV_OK) + { + goto fail; + } + + // + // The NV40_I2C allocation expected to fail, if it is disabled + // with RM config. + // + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hSubDevice, + &nv->rmapi.hI2C, + NV40_I2C, + NULL) != NV_OK) + { + nv->rmapi.hI2C = 0; + } + + // + // The NV04_DISPLAY_COMMON allocation expected to fail for displayless + // system. nv->rmapi.hDisp value needs to be checked before doing display + // related control calls. + // + if (pRmApi->Alloc( + pRmApi, + nv->rmapi.hClient, + nv->rmapi.hDevice, + &nv->rmapi.hDisp, + NV04_DISPLAY_COMMON, + NULL) != NV_OK) + { + nv->rmapi.hDisp = 0; + } + + return NV_TRUE; + +fail: + RmUnixFreeRmApi(nv); + return NV_FALSE; +} + +static NvBool verifyGspFirmware( + const void *pGspFwBuf, + NvU32 gspFwBufSize +) +{ + // + // This array will be populated with a sha256 hash of the GSP-RM firmware + // binary in a post-compile step. We really want this array to be 'const', + // but adding that qualifier here makes the compiler perform undesirable + // optimization assuming the array is always going to be zero. The + // .gspfwhash-rodata section is marked readonly when it is populated with + // the real hash in lieu of 'const'. + // + static NvU8 __attribute__((section(".gspfwhash-rodata"))) + expectedFwHash[NV_SHA256_DIGEST_SIZE] = {}; + NvU32 i; + NvBool bHashCheck = NV_FALSE; + + // + // To allow for simple incremental build workflow, we will only + // perform the firmware hash check if the expected hash has been + // embedded into the kernel binary. + // + for (i = 0; i < NV_SHA256_DIGEST_SIZE; i++) + { + if (expectedFwHash[i] != 0) + { + bHashCheck = NV_TRUE; + break; + } + } + + if (bHashCheck) + { + NvU8 gspFwHash[NV_SHA256_DIGEST_SIZE]; + + nv_sha256(pGspFwBuf, gspFwBufSize, gspFwHash); + + #define NvU64_BIG_ENDIAN(buf) \ + ((NvU64)(buf)[0] << 56) | ((NvU64)(buf)[1] << 48) | \ + ((NvU64)(buf)[2] << 40) | ((NvU64)(buf)[3] << 32) | \ + ((NvU64)(buf)[4] << 24) | ((NvU64)(buf)[5] << 16) | \ + ((NvU64)(buf)[6] << 8) | ((NvU64)(buf)[7] << 0) + + if (portMemCmp(expectedFwHash, gspFwHash, NV_SHA256_DIGEST_SIZE) != 0) + { + NV_PRINTF(LEVEL_ERROR, "GSP firmware validation failed: hash mismatch\n"); + NV_PRINTF(LEVEL_ERROR, "Expected GSP firmware hash: %016llx%016llx%016llx%016llx\n", + NvU64_BIG_ENDIAN(&expectedFwHash[0]), NvU64_BIG_ENDIAN(&expectedFwHash[8]), + NvU64_BIG_ENDIAN(&expectedFwHash[16]), NvU64_BIG_ENDIAN(&expectedFwHash[24])); + NV_PRINTF(LEVEL_ERROR, "Got GSP firmware hash: %016llx%016llx%016llx%016llx\n", + NvU64_BIG_ENDIAN(&gspFwHash[0]), NvU64_BIG_ENDIAN(&gspFwHash[8]), + NvU64_BIG_ENDIAN(&gspFwHash[16]), NvU64_BIG_ENDIAN(&gspFwHash[24])); + NV_PRINTF(LEVEL_ERROR, "The GSP firmware version must exactly match the RM (nv-kernel.o) build.\n"); + NV_PRINTF(LEVEL_ERROR, "Most likely cause of this error is an out of band update to one of the components\n"); + + return NV_FALSE; + } + else + { + NV_PRINTF(LEVEL_NOTICE, "GSP firmware hash: %016llx%016llx%016llx%016llx\n", + NvU64_BIG_ENDIAN(&gspFwHash[0]), NvU64_BIG_ENDIAN(&gspFwHash[8]), + NvU64_BIG_ENDIAN(&gspFwHash[16]), NvU64_BIG_ENDIAN(&gspFwHash[24])); + } + } + else + { + NV_PRINTF(LEVEL_NOTICE, "GSP firmware hash not found.\n"); + } + return NV_TRUE; +} + +NvBool RmInitAdapter( + nv_state_t *nv +) +{ + NvU32 devicereference = 0; + UNIX_STATUS status = INIT_UNIX_STATUS; + nv_priv_t *nvp; + NvBool retVal = NV_FALSE; + OBJSYS *pSys; + OBJGPU *pGpu = NULL; + OBJOS *pOS; + KernelDisplay *pKernelDisplay; + const void *gspFwHandle = NULL; + const void *gspFwLogHandle = NULL; + + GSP_FIRMWARE gspFw = {0}; + PORT_UNREFERENCED_VARIABLE(gspFw); + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "RmInitAdapter\n"); + + nv->flags &= ~NV_FLAG_PASSTHRU; + + RmSetupRegisters(nv, &status); + if (! RM_INIT_SUCCESS(status.initStatus) ) + goto failed; + + nvp = NV_GET_NV_PRIV(nv); + nvp->status = NV_ERR_OPERATING_SYSTEM; + + status.rmStatus = RmInitDeviceDma(nv); + if (status.rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot configure the device for DMA\n"); + RM_SET_ERROR(status, RM_INIT_GPU_DMA_CONFIGURATION_FAILED); + goto shutdown; + } + + nvp->flags |= NV_INIT_FLAG_DMA; + + pSys = SYS_GET_INSTANCE(); + + // + // WAR: If the below UEFI property is set, display RM will attempt to read + // the state cache during RM init in order to retrieve a snapshot of the + // display state that the UEFI driver has already programmed. On Orin + // (T234D), the UEFI boot flow is being enabled on Linux, but our UEFI + // driver doesn't have any display support right now. As such, our UEFI + // driver won't allocate any of the display channels, which means that RM + // will attempt to read the state cache for uninitialized channels. WAR this + // issue by un-setting the below UEFI property for now. + // + // JIRA task TDS-5094 tracks adding display support to the UEFI driver. + // + if (NV_IS_SOC_DISPLAY_DEVICE(nv)) { + pSys->setProperty(pSys, PDB_PROP_SYS_IS_UEFI, NV_FALSE); + } + + // + // Get firmware from the OS, if requested, and decide if RM will run as a + // firmware client. + // + if (nv->request_firmware) + { + RmSetDeviceDmaAddressSize(nv, NV_GSP_GPU_MIN_SUPPORTED_DMA_ADDR_WIDTH); + + gspFwHandle = nv_get_firmware(nv, NV_FIRMWARE_GSP, + &gspFw.pBuf, + &gspFw.size); + if (gspFwHandle == NULL && + !nv->allow_fallback_to_monolithic_rm) + { + RM_SET_ERROR(status, RM_INIT_FIRMWARE_FETCH_FAILED); + goto shutdown; + } + else if (gspFwHandle != NULL) + { + if (!verifyGspFirmware(gspFw.pBuf, gspFw.size)) + { + RM_SET_ERROR(status, RM_INIT_FIRMWARE_VALIDATION_FAILED); + goto shutdown; + } + +#if LIBOS_LOG_DECODE_ENABLE + if (nv->enable_firmware_logs) + { + gspFwLogHandle = nv_get_firmware(nv, NV_FIRMWARE_GSP_LOG, + &gspFw.pLogElf, + &gspFw.logElfSize); + if (gspFwLogHandle == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to load gsp_log.bin, no GSP-RM logs will be printed (non-fatal)\n"); + } + } +#endif + + nv->request_fw_client_rm = NV_TRUE; + } + else + { + nv->request_fw_client_rm = NV_FALSE; + } + } + + // initialize the RM device register mapping + osInitNvMapping(nv, &devicereference, &status); + if (! RM_INIT_SUCCESS(status.initStatus) ) + { + switch (status.rmStatus) + { + case NV_ERR_NOT_SUPPORTED: + nvp->status = NV_ERR_NOT_SUPPORTED; + break; + } + NV_PRINTF(LEVEL_ERROR, + "osInitNvMapping failed, bailing out of RmInitAdapter\n"); + goto shutdown; + } + + // + // now we can have a pdev for the first time... + // + pGpu = gpumgrGetGpu(devicereference); + + pOS = SYS_GET_OS(pSys); + + status.rmStatus = osInitScalability(pGpu); + if (status.rmStatus == NV_OK) + { + nvp->flags |= NV_INIT_FLAG_SCALABILITY; + } + else if (status.rmStatus != NV_ERR_NOT_SUPPORTED) + { + RM_SET_ERROR(status, RM_INIT_SCALABILITY_FAILED); + goto shutdown; + } + + RmSetConsolePreservationParams(pGpu); + + // + // If GSP fw RM support is enabled then start the GSP microcode + // (including the task running the full instance of the RM) and + // exchange the necessary initial RPC messages before continuing + // with GPU initialization here. + // + if (IS_GSP_CLIENT(pGpu)) + { + status.rmStatus = kgspInitRm(pGpu, GPU_GET_KERNEL_GSP(pGpu), &gspFw); + if (status.rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot initialize GSP firmware RM\n"); + RM_SET_ERROR(status, RM_INIT_FIRMWARE_INIT_FAILED); + goto shutdown; + } + } + else if (nv->request_fw_client_rm) + { + // We were expecting to enable GSP-RM but something went wrong. + if (!nv->allow_fallback_to_monolithic_rm) + { + RM_SET_ERROR(status, RM_INIT_FIRMWARE_POLICY_FAILED); + goto shutdown; + } + else + { + NV_PRINTF(LEVEL_NOTICE, "Falling back to monolithic RM\n"); + } + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + if (pKernelDisplay != NULL) + { + kdispSetWarPurgeSatellitesOnCoreFree(pKernelDisplay, NV_TRUE); + } + + if (IS_PASSTHRU(pGpu)) + nv->flags |= NV_FLAG_PASSTHRU; + + populateDeviceAttributes(pGpu, nv); + + RmInitAcpiMethods(pOS, pSys, pGpu); + + status.rmStatus = RmInitX86Emu(pGpu); + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_INIT_VBIOS_X86EMU_FAILED); + NV_PRINTF(LEVEL_ERROR, + "RmInitX86Emu failed, bailing out of RmInitAdapter\n"); + goto shutdown; + } + initVendorSpecificRegistry(pGpu, nv->pci_info.device_id); + + // finally, initialize the device + RmInitNvDevice(devicereference, &status); + if (! RM_INIT_SUCCESS(status.initStatus) ) + { + NV_PRINTF(LEVEL_ERROR, + "RmInitNvDevice failed, bailing out of RmInitAdapter\n"); + switch (status.rmStatus) + { + case NV_ERR_INSUFFICIENT_POWER: + nvp->status = NV_ERR_INSUFFICIENT_POWER; + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, + "GPU does not have the necessary power cables connected.\n"); + break; + } + goto shutdown; + } + + // LOCK: acquire GPUs lock + status.rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_INIT); + if (status.rmStatus != NV_OK) + { + goto shutdown; + } + + status.rmStatus = osVerifySystemEnvironment(pGpu); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_INIT_SYS_ENVIRONMENT_FAILED); + switch (status.rmStatus) + { + case NV_ERR_IRQ_NOT_FIRING: + nvp->status = NV_ERR_IRQ_NOT_FIRING; + break; + } + NV_PRINTF(LEVEL_ERROR, "RmVerifySystemEnvironment failed, bailing!\n"); + goto shutdown; + } + + Intr *pIntr = GPU_GET_INTR(pGpu); + if (pIntr != NULL) + { + intrSetIntrEn(pIntr, INTERRUPT_TYPE_HARDWARE); + } + + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + // initialize the watchdog (disabled by default) + status.rmStatus = pKernelRc != NULL ? krcWatchdogInit_HAL(pGpu, pKernelRc) : + NV_ERR_NOT_SUPPORTED; + if (status.rmStatus == NV_OK) + { + krcWatchdogDisable(pKernelRc); + nvp->flags |= NV_INIT_FLAG_FIFO_WATCHDOG; + } + else if (status.rmStatus == NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_INFO, + "krcWatchdogInit returned _NOT_SUPPORTED. For Kepler GPUs in PGOB mode, this is normal\n"); + } + else + { + RM_SET_ERROR(status, RM_INIT_WATCHDOG_FAILED); + NV_PRINTF(LEVEL_ERROR, + "krcWatchdogInit failed, bailing out of RmInitAdapter\n"); + goto shutdown; + } + nv_start_rc_timer(nv); + + nvp->status = NV_OK; + + if (!RmUnixAllocRmApi(nv, devicereference)) { + RM_SET_ERROR(status, RM_INIT_ALLOC_RMAPI_FAILED); + status.rmStatus = NV_ERR_GENERIC; + goto shutdown; + } + + status.rmStatus = RmInitGpuInfoWithRmApi(pGpu); + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_INIT_GPUINFO_WITH_RMAPI_FAILED); + goto shutdown; + } + + // i2c only on master device?? + RmI2cAddGpuPorts(nv); + nvp->flags |= NV_INIT_FLAG_PUBLIC_I2C; + + // This fifo hal call will fail on pre-fermi gpus. In that case, userd + // info will remain 0 and skipped by fb mmap code + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU64 udaddr = 0; + NvU32 udsize = 0; + if (pKernelFifo != NULL) + { + status.rmStatus = kfifoGetUserdBar1MapInfo_HAL(pGpu, pKernelFifo, &udaddr, &udsize); + if (status.rmStatus == NV_OK) + { + nv->ud.cpu_address = (nv->fb->cpu_address + udaddr); + nv->ud.size = udsize; + } + else if (status.rmStatus != NV_ERR_NOT_SUPPORTED) + { + RM_SET_ERROR(status, RM_FIFO_GET_UD_BAR1_MAP_INFO_FAILED); + NV_PRINTF(LEVEL_ERROR, + "kfifoGetUserdBar1MapInfo failed, bailing out of RmInitAdapter\n"); + goto shutdown; + } + } + + nv->flags &= ~NV_FLAG_IN_RECOVERY; + + pOS->setProperty(pOS, PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED, NV_TRUE); + + RmInitS0ixPowerManagement(nv); + RmInitDeferredDynamicPowerManagement(nv); + + if (!NV_IS_SOC_DISPLAY_DEVICE(nv)) + { + status.rmStatus = RmRegisterGpudb(pGpu); + if (status.rmStatus != NV_OK) + { + RM_SET_ERROR(status, RM_GPUDB_REGISTER_FAILED); + goto shutdown; + } + } + + if (nvp->b_mobile_config_enabled) + { + NvU32 ac_plugged = 0; + if (nv_acpi_get_powersource(&ac_plugged) == NV_OK) + { + // LOCK: acquire GPU lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_NONE) == NV_OK) + { + // + // As we have already acquired the API Lock here, we are + // calling RmSystemEvent directly instead of rm_system_event. + // + RmSystemEvent(nv, NV_SYSTEM_ACPI_BATTERY_POWER_EVENT, !ac_plugged); + + // UNLOCK: release GPU lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + } + } + + { + // OpenRM support for features beyond what is used on Data Center GPUs + // is still fairly immature, so for now require users to opt into use of + // OpenRM with a special registry key, if not on a Data Center GPU. + const GspStaticConfigInfo *pSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + + if (pSCI->computeBranding != COMPUTE_BRANDING_TYPE_TESLA) + { + NvU32 data = NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DEFAULT; + RmReadRegistryDword(nv, NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS, &data); + + if (data == NV_REG_OPENRM_ENABLE_UNSUPPORTED_GPUS_DISABLE) + { + if (!nv->printed_openrm_enable_unsupported_gpus_error) + { + nv_printf(NV_DBG_ERRORS, + "NVRM: Open nvidia.ko is only ready for use on Data Center GPUs.\n"); + nv_printf(NV_DBG_ERRORS, + "NVRM: To force use of Open nvidia.ko on other GPUs, see the\n"); + nv_printf(NV_DBG_ERRORS, + "NVRM: 'OpenRmEnableUnsupportedGpus' kernel module parameter described\n"); + nv_printf(NV_DBG_ERRORS, + "NVRM: in the README.\n"); + nv->printed_openrm_enable_unsupported_gpus_error = NV_TRUE; + } + RM_SET_ERROR(status, RM_INIT_FIRMWARE_INIT_FAILED); + goto shutdown; + } + } + } + + NV_DEV_PRINTF(NV_DBG_SETUP, nv, "RmInitAdapter succeeded!\n"); + + retVal = NV_TRUE; + goto done; + + shutdown: + nv->flags &= ~NV_FLAG_IN_RECOVERY; + + // call ShutdownAdapter to undo anything we've done above + RmShutdownAdapter(nv); + + failed: + NV_DEV_PRINTF(NV_DBG_ERRORS, nv, "RmInitAdapter failed! (0x%x:0x%x:%d)\n", + status.initStatus, status.rmStatus, status.line); + +done: + nv_put_firmware(gspFwHandle); + nv_put_firmware(gspFwLogHandle); + + return retVal; +} + +void RmShutdownAdapter( + nv_state_t *nv +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + NV_STATUS rmStatus; + + if ((pGpu != NULL) && (nvp->flags & NV_INIT_FLAG_GPUMGR_ATTACH)) + { + NvU32 gpuInstance = gpuGetInstance(pGpu); + NvU32 deviceInstance = gpuGetDeviceInstance(pGpu); + OBJSYS *pSys = SYS_GET_INSTANCE(); + + RmUnixFreeRmApi(nv); + + nv->ud.cpu_address = 0; + nv->ud.size = 0; + + // LOCK: acquire GPUs lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + RmDestroyDeferredDynamicPowerManagement(nv); + + gpuFreeEventHandle(pGpu); + + OBJCL *pCl = SYS_GET_CL(pSys); + OBJOS *pOS = GPU_GET_OS(pGpu); + if (pCl != NULL) + { + if (nvp->flags & NV_INIT_FLAG_CORE_LOGIC) + { + teardownCoreLogic(pOS, pGpu); + } + else if (nvp->flags & NV_INIT_FLAG_SCALABILITY) + { + osTeardownScalability(pGpu); + } + } + + rmapiSetDelPendingClientResourcesFromGpuMask(NVBIT(gpuInstance)); + rmapiDelPendingDevices(NVBIT(gpuInstance)); + + os_disable_console_access(); + + if (nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD) + { + rmStatus = gpuStateUnload(pGpu, GPU_STATE_DEFAULT); + NV_ASSERT(rmStatus == NV_OK); + } + + if (nvp->flags & NV_INIT_FLAG_GPU_STATE) + { + rmStatus = gpuStateDestroy(pGpu); + NV_ASSERT(rmStatus == NV_OK); + } + + os_enable_console_access(); + + //if (nvp->flags & NV_INIT_FLAG_HAL) + // destroyHal(pDev); + +#if NVCPU_IS_X86_64 + RmFreeX86EmuState(pGpu); +#endif + + gpumgrDetachGpu(gpuInstance); + gpumgrDestroyDevice(deviceInstance); + + if (nvp->flags & NV_INIT_FLAG_DMA) + { + RmTeardownDeviceDma(nv); + } + + RmClearPrivateState(nv); + + RmUnInitAcpiMethods(pSys); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + // RM_BASIC_LOCK_MODEL: free GPU lock + rmGpuLockFree(deviceInstance); + } + } + else + { + RmClearPrivateState(nv); + } + + RmTeardownRegisters(nv); +} + +void RmPartiallyDisableAdapter( + nv_state_t *nv +) +{ + NV_PRINTF(LEVEL_INFO, "%s: RM is in SW Persistence mode\n", __FUNCTION__); + + nv_stop_rc_timer(nv); +} + +void RmDisableAdapter( + nv_state_t *nv +) +{ + NV_STATUS rmStatus; + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(nv); + NvU32 gpuMask; + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + // + // Normally, we re-enable interrupts when we release the lock + // after RmDisableAdapter(), before disabling the bottom-half ISR. + // If an interrupt came in in that time window, but the bottom half + // wasn't scheduled to run in time, the semaphore acquired by the + // top-half would never be released afterwards (since the bottom-half + // would never run). This call to intrSetIntrEn() ensures that this won't + // happen, by making sure interrupts remain disabled on the GPU in question after + // RmDisableAdapter(). + // + Intr *pIntr = GPU_GET_INTR(pGpu); + if (pIntr != NULL) + { + intrSetIntrEn(pIntr, INTERRUPT_TYPE_DISABLED); + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET)) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY, NV_TRUE); + nv->flags |= NV_FLAG_IN_RECOVERY; + } + + // + // Free the client allocated resources. + // + // This needs to happen prior to tearing down SLI state when SLI is enabled. + // + // Note this doesn't free RM internal resource allocations. Those are + // freed during (gpumgrUpdateSLIConfig->...->)gpuStateUnload. + // + // We need to free resources for all GPUs linked in a group as + // gpumgrUpdateSLIConfig will teardown GPU state for the entire set. + // + gpuMask = gpumgrGetGpuMask(pGpu); + + rmapiSetDelPendingClientResourcesFromGpuMask(gpuMask); + rmapiDelPendingDevices(gpuMask); + + // LOCK: acquire GPUs lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DESTROY) == NV_OK) + { + nv_stop_rc_timer(nv); + + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + OBJOS *pOS = SYS_GET_OS(pSys); + if (pCl != NULL) + { + teardownCoreLogic(pOS, pGpu); + nvp->flags &= ~NV_INIT_FLAG_CORE_LOGIC; + } + + if (nvp->flags & NV_INIT_FLAG_FIFO_WATCHDOG) + { + krcWatchdogShutdown(pGpu, GPU_GET_KERNEL_RC(pGpu)); + nvp->flags &= ~NV_INIT_FLAG_FIFO_WATCHDOG; + } + + os_disable_console_access(); + + if (nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD) + { + rmStatus = gpuStateUnload(pGpu, GPU_STATE_DEFAULT); + NV_ASSERT(rmStatus == NV_OK); + nvp->flags &= ~NV_INIT_FLAG_GPU_STATE_LOAD; + } + + os_enable_console_access(); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } +} + +NV_STATUS RmGetAdapterStatus( + nv_state_t *pNv, + NvU32 *pStatus +) +{ + // + // This status is determined in RmInitAdapter(); the glue layer + // requests it when the adapter failed to initialize to learn + // more about the error condition. This is currently limited to + // osVerifySystemEnvironment() failures. + // + nv_priv_t *nvp; + + nvp = NV_GET_NV_PRIV(pNv); + if (nvp == NULL) + { + return NV_ERR_INVALID_STATE; + } + + *pStatus = nvp->status; + return NV_OK; +} + +static void initVendorSpecificRegistry( + OBJGPU *pGpu, + NvU16 device_id +) +{ + NV_STATUS rmStatus; + NvU32 i; + NvU32 subsystem_id; + NvU32 subsystem_vendor_id; + NvU32 subsystem_device_id; + NvU32 vendor_id = 0; + + if (!pGpu) + return; + + rmStatus = GPU_BUS_CFG_RD32(pGpu, + NV_CONFIG_PCI_NV_11, &subsystem_id); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: Cannot read NV_CONFIG_PCI_NV_11\n", __FUNCTION__); + return; + } + + subsystem_vendor_id = (subsystem_id & 0xffff); + subsystem_device_id = (subsystem_id >> 16); + + for (i = 0; (nb_id_table[i].subsystem_vendor_id) != 0; i++) + { + if ((nb_id_table[i].subsystem_vendor_id == subsystem_vendor_id) && + (nb_id_table[i].subsystem_device_id == subsystem_device_id) && + (nb_id_table[i].gpu_device_id == device_id)) + { + vendor_id = subsystem_vendor_id; + break; + } + } + + if (vendor_id != 0) + { + for (i = 0; nb_reg_table[i].vendor_id != 0; i++) + { + if (nb_reg_table[i].vendor_id == vendor_id) + { + osWriteRegistryDword(pGpu, nb_reg_table[i].name, + nb_reg_table[i].data); + } + } + } +} + +static void initUnixSpecificRegistry( + OBJGPU *pGpu +) +{ + // By default, enable GPU reset on Unix + osWriteRegistryDword(pGpu, "RMSecBusResetEnable", 1); + osWriteRegistryDword(pGpu, "RMForcePcieConfigSave", 1); + +} + +void +osRemoveGpu( + NvU32 domain, + NvU8 bus, + NvU8 device +) +{ + void *handle; + + handle = os_pci_init_handle(domain, bus, device, 0, NULL, NULL); + if (handle != NULL) + { + os_pci_remove(handle); + } +} + +/* + * Check to see if this board supports GPU exclusion. + */ +static NvBool RmIsExcludingAllowed( + nv_state_t *pNv +) +{ + NvU32 feature = 0; + NvU32 brand; + + // DGX-2/HGX-2 systems pre-date the PBI call + if (pNv->pci_info.device_id == 0x1db8) + return NV_TRUE; + + if (pciPbiGetFeature(pNv->handle, &feature) != NV_OK) + return NV_FALSE; + + brand = DRF_VAL(_PBI, _EXECUTE_ROUTINE, _GET_FEATURE_EXCLUSION, feature); + + return (brand == NV_PBI_EXECUTE_ROUTINE_GET_FEATURE_EXCLUSION_ALLOWED); +} + +NV_STATUS RmExcludeAdapter( + nv_state_t *nv +) +{ + NV_STATUS rmStatus; + + if (!RmIsExcludingAllowed(nv)) + return NV_ERR_NOT_SUPPORTED; + + rmStatus = gpumgrExcludeGpuId(nv->gpu_id); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to exclude GPU: 0x%x\n", rmStatus); + return rmStatus; + } + + /* + * GPU exclusion only happens during initialization, and therefore there + * is no need to stop activity on the GPU + */ + + return rmStatus; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c b/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c new file mode 100644 index 000000000..8afc791ca --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/osmemdesc.c @@ -0,0 +1,1015 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************* OS Memory Descriptor APIS *****************************\ +* * +* This contains routines to create and destroy OS memory descriptor * +* * +****************************************************************************/ + +#include // NV device driver interface +#include +#include +#include +#include +#include +#include + +#include "gpu/bif/kernel_bif.h" + +static NV_STATUS osCreateOsDescriptorFromPageArray(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void **); +static void osDestroyOsDescriptorPageArray(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCreateOsDescriptorFromIoMemory(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromIoMemory(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCreateOsDescriptorFromPhysAddr(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromPhysAddr(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCreateOsDescriptorFromFileHandle(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static NV_STATUS osCreateOsDescriptorFromDmaBufPtr(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromDmaBuf(PMEMORY_DESCRIPTOR); +static NV_STATUS osCreateOsDescriptorFromSgtPtr(OBJGPU *, NvP64, NvHandle, NvU32, NvU64 *, MEMORY_DESCRIPTOR **, void**); +static void osDestroyOsDescriptorFromSgt(PMEMORY_DESCRIPTOR); + +static NV_STATUS osCheckGpuBarsOverlapAddrRange(NvRangeU64 addrRange); + +NV_STATUS +osCreateMemFromOsDescriptor +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + NvU32 descriptorType, + RS_PRIV_LEVEL privilegeLevel +) +{ + RmClient* pClient; + NV_STATUS rmStatus; + void *pPrivate; + + if ((pDescriptor == NvP64_NULL) || + (*pLimit == 0) || + (serverutilGetClientUnderLock(hClient, &pClient) != NV_OK)) + { + return NV_ERR_INVALID_PARAM_STRUCT; + } + + // + // For the sake of simplicity, unmatched RM and OS page + // sizes are not currently supported in this path, except for + // PPC64LE and aarch64. + // + // Also, the nvmap handle is sent which can be any random number so + // the virtual address alignment sanity check can't be done here. + // + if (!NVCPU_IS_PPC64LE && + !NVCPU_IS_AARCH64 && + (NV_RM_PAGE_SIZE != os_page_size)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // The two checks below use cached privilege because they + // concern the privilege level of the client, and not the + // privilege level of the calling context which may be + // overridden to KERNEL at some internal callsites. + // + + // + // The RM cannot obtain a table of physical addresses + // for a kernel virtual address range on all of + // the supported UNIX platforms. Since this path is + // not really compelling for kernel allocations on any + // of those platforms, it is not supported. + // For UVM, they could have pre-allocated sysmem to register + // with RM so we put in an exception for that case. + // + if ((rmclientGetCachedPrivilege(pClient) >= RS_PRIV_LEVEL_KERNEL) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR) && + (descriptorType != NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR and + // NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR can only be utilized by kernel space + // rm-clients. + // + if ((rmclientGetCachedPrivilege(pClient) < RS_PRIV_LEVEL_KERNEL) && + ((descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR) || + (descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR))) + { + return NV_ERR_NOT_SUPPORTED; + } + + switch (descriptorType) + { + case NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS: + rmStatus = NV_ERR_NOT_SUPPORTED; + break; + case NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR: + if (privilegeLevel < RS_PRIV_LEVEL_KERNEL) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + break; + } + rmStatus = osCreateOsDescriptorFromPhysAddr(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY: + rmStatus = osCreateOsDescriptorFromIoMemory(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_PAGE_ARRAY: + rmStatus = osCreateOsDescriptorFromPageArray(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_FILE_HANDLE: + rmStatus = osCreateOsDescriptorFromFileHandle(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_DMA_BUF_PTR: + rmStatus = osCreateOsDescriptorFromDmaBufPtr(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + case NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR: + rmStatus = osCreateOsDescriptorFromSgtPtr(pGpu, pDescriptor, + hClient, flags, pLimit, ppMemDesc, &pPrivate); + break; + default: + rmStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + return rmStatus; +} + +static NV_STATUS +osCreateMemdescFromPages +( + OBJGPU *pGpu, + NvU64 size, + NvU32 flags, + NvU32 cacheType, + MEMORY_DESCRIPTOR **ppMemDesc, + void *pImportPriv, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 memdescFlags = MEMDESC_FLAGS_NONE; + NvU32 gpuCachedFlags; + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO; + } + + rmStatus = memdescCreate(ppMemDesc, pGpu, size, 0, + NV_MEMORY_NONCONTIGUOUS, ADDR_SYSMEM, + cacheType, memdescFlags); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, flags)) + gpuCachedFlags = NV_MEMORY_CACHED; + else + gpuCachedFlags = NV_MEMORY_UNCACHED; + + pMemDesc = *ppMemDesc; + rmStatus = nv_register_user_pages(NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetPteArray(pMemDesc, AT_CPU), pImportPriv, + ppPrivate); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + + memdescSetGpuCacheAttrib(pMemDesc, gpuCachedFlags); + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM, NV_TRUE); + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + // + // memdescMapIommu() requires the OS-private data to be set on the memory + // descriptor, but we don't want to wire up the teardown callback just yet: + // that callback needs to unpin the pages, but that will already be done + // as part of failure handling further up the stack if memdescMapIommu() + // fails. So we only set up the priv-data cleanup callback once we're sure + // this call will succeed. + // + memdescSetMemData(pMemDesc, *ppPrivate, NULL); + + rmStatus = memdescMapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + if (rmStatus != NV_OK) + { + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_user_pages(NV_GET_NV_STATE(pGpu), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + NULL /* import_priv */, ppPrivate); + memdescDestroy(pMemDesc); + return rmStatus; + } + + return NV_OK; +} + +static NV_STATUS +osCreateOsDescriptorFromPageArray +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + + *ppPrivate = NvP64_VALUE(pDescriptor); + + // + // Since the only type of memory permitted in this path + // is anonymous user memory, certain restrictions + // apply for the allocation flags: + // + // 1) anonymous memory is write-back cacheable, hence + // the _COHERENCY flag must match. + // + // 2) the RM has no control over the location of the + // associated pages in memory and thus cannot + // honor requests for contiguous memory. + // + // These restrictions are enforced here to avoid subtle + // bugs later on. + // + if ((!FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, flags) && + !FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, flags)) || + FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + rmStatus = osCreateMemdescFromPages(pGpu, (*pLimit + 1), flags, + NV_MEMORY_CACHED, ppMemDesc, + NULL /* pImportPriv */, ppPrivate); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + // All is well - wire up the cleanup callback now + memdescSetMemData(*ppMemDesc, memdescGetMemData(*ppMemDesc), + osDestroyOsDescriptorPageArray); + + return NV_OK; +} + +/*! + * @brief Checks if the given address range overlaps with the BARs for any of + * the GPUs. + */ +static NV_STATUS +osCheckGpuBarsOverlapAddrRange +( + NvRangeU64 addrRange +) +{ + NvRangeU64 gpuPhysAddrRange; + NvRangeU64 gpuPhysFbAddrRange; + NvRangeU64 gpuPhysInstAddrRange; + NvU32 gpuInstance; + OBJGPU *pGpu; + NvU32 gpuMask; + NV_STATUS rmStatus; + + rmStatus = gpumgrGetGpuAttachInfo(NULL, &gpuMask); + NV_ASSERT_OR_RETURN(rmStatus == NV_OK, rmStatus); + + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + NV_INIT_RANGE(gpuPhysFbAddrRange, gpumgrGetGpuPhysFbAddr(pGpu), + gpumgrGetGpuPhysFbAddr(pGpu) + pGpu->fbLength -1); + + NV_INIT_RANGE(gpuPhysAddrRange, pGpu->busInfo.gpuPhysAddr, + pGpu->busInfo.gpuPhysAddr + pGpu->deviceMappings[0].gpuNvLength -1); + + NV_INIT_RANGE(gpuPhysInstAddrRange, pGpu->busInfo.gpuPhysInstAddr, + pGpu->busInfo.gpuPhysInstAddr + pGpu->instLength -1); + + if (NV_IS_OVERLAPPING_RANGE(gpuPhysFbAddrRange, addrRange) || + NV_IS_OVERLAPPING_RANGE(gpuPhysAddrRange, addrRange) || + NV_IS_OVERLAPPING_RANGE(gpuPhysInstAddrRange, addrRange)) + { + return NV_ERR_INVALID_ADDRESS; + } + } + + return NV_OK; +} + +static NV_STATUS +osCreateOsDescriptorFromIoMemory +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + NvU32 gpuCachedFlags; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 *pPteArray; + NvRangeU64 physAddrRange; + NvU64 *base = 0; + NvBool bAllowMmap; + + // + // Unlike the page array path, this one deals exclusively + // with I/O memory, which is expected to be contiguous + // physically, and which may only be accessed with uncached + // transactions. + // + if (!FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, flags) || + !FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + // + // _PEER_MAP_OVERRIDE flag is controlled by the RM and not the client. + // + // RM will set the _PEER_MAP_OVERRIDE_REQUIRED flag itself for IO memory + // memory imported with RmVidHeapControl. + // + if (FLD_TEST_DRF(OS02, _FLAGS, _PEER_MAP_OVERRIDE, _REQUIRED, flags)) + { + // + // Don't allow MMIO mappings for unprivileged users + // This is a temporary WAR for bug 1630288 "[PeerSync] threat related + // to GPU" + // + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + if (!pKernelBif->peerMappingOverride && !osIsAdministrator()) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): permission denied, allowPeermapping=%d\n", + __FUNCTION__, pKernelBif->peerMappingOverride); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + bAllowMmap = !FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _NEVER_MAP, flags); + + base = (void *)(NvUPtr)pDescriptor; + + // + // There is an architectural deadlock scenario involved when full-duplex P2P + // enabled over BAR1. See #3 in the description of bug 1571948 which explains + // the classic deadlock. So, make sure to error out usermode's memory + // registration if a memory range falls within any of the available GPU's + // BAR window. + // + physAddrRange.min = *base; + physAddrRange.max = *base + *pLimit; + + rmStatus = osCheckGpuBarsOverlapAddrRange(physAddrRange); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): phys range 0x%016llx-0x%016llx overlaps with GPU BARs", + __FUNCTION__, physAddrRange.min, physAddrRange.max); + return rmStatus; + } + + rmStatus = memdescCreate(ppMemDesc, pGpu, (*pLimit + 1), 0, + NV_MEMORY_CONTIGUOUS, ADDR_SYSMEM, + NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): error %d while attempting to create the MMIO mapping\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + pMemDesc = *ppMemDesc; + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, flags)) + gpuCachedFlags = NV_MEMORY_CACHED; + else + gpuCachedFlags = NV_MEMORY_UNCACHED; + + memdescSetGpuCacheAttrib(pMemDesc, gpuCachedFlags); + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM, NV_TRUE); + + pPteArray = memdescGetPteArray(pMemDesc, AT_CPU); + pPteArray[0] = *base; + + *ppPrivate = NULL; + + if (bAllowMmap) + { + rmStatus = nv_register_peer_io_mem(NV_GET_NV_STATE(pGpu), pPteArray, + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + ppPrivate); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + } + + memdescSetMemData(pMemDesc, *ppPrivate, NULL); + + // + // memdescMapIommu() requires the OS-private data to be set on the memory + // descriptor, but we don't want to wire up the teardown callback just yet: + // that callback needs to unpin the pages, but that will already be done + // as part of failure handling further up the stack if memdescMapIommu() + // fails. So we only set up the priv-data cleanup callback once we're sure + // this call will succeed. + // + rmStatus = memdescMapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + if (rmStatus != NV_OK) + { + if (*ppPrivate != NULL) + { + nv_unregister_peer_io_mem(NV_GET_NV_STATE(pGpu), *ppPrivate); + } + memdescDestroy(pMemDesc); + return rmStatus; + } + + // All is well - wire up the cleanup callback now + memdescSetMemData(pMemDesc, memdescGetMemData(pMemDesc), + osDestroyOsDescriptorFromIoMemory); + + return NV_OK; +} + +static NV_STATUS +osCreateOsDescriptorFromPhysAddr +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 *pPteArray; + NvU64 base = 0; + NvU32 cache_type = NV_MEMORY_CACHED; + NvU64 memdescFlags = MEMDESC_FLAGS_NONE; + + // Currently only work with contiguous sysmem allocations + if (!FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_TYPE_SYNCPOINT, _APERTURE, flags)) + { + // Syncpoint memory is uncached, DMA mapping needs to skip CPU sync. + cache_type = NV_MEMORY_UNCACHED; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO; + } + + base = (NvU64)pDescriptor; + rmStatus = memdescCreate(ppMemDesc, pGpu, (*pLimit + 1), 0, + NV_MEMORY_CONTIGUOUS, ADDR_SYSMEM, + cache_type, memdescFlags); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): error %d while creating memdesc for kernel memory\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + pMemDesc = *ppMemDesc; + + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM, NV_TRUE); + + pPteArray = memdescGetPteArray(pMemDesc, AT_CPU); + pPteArray[0] = base; + + *ppPrivate = NULL; + rmStatus = nv_register_phys_pages(NV_GET_NV_STATE(pGpu), pPteArray, + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetCpuCacheAttrib(pMemDesc), + ppPrivate); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + + memdescSetMemData(pMemDesc, *ppPrivate, + osDestroyOsDescriptorFromPhysAddr); + + return NV_OK; +} + +static NV_STATUS +_createMemdescFromDmaBufSgtHelper +( + OBJGPU *pGpu, + NvU32 flags, + void *pImportPriv, + struct sg_table *pImportSgt, + NvU32 size, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate, + MEM_DATA_RELEASE_CALL_BACK *pMemDataReleaseCallback +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 cacheType = NV_MEMORY_UNCACHED; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 memdescFlags = MEMDESC_FLAGS_NONE; + NvU32 gpuCachedFlags; + + NV_ASSERT((pMemDataReleaseCallback == osDestroyOsDescriptorFromDmaBuf) || + (pMemDataReleaseCallback == osDestroyOsDescriptorFromSgt)); + + if (FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_COMBINE, flags)) + { + cacheType = NV_MEMORY_WRITECOMBINED; + } + else if (!FLD_TEST_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, flags)) + { + cacheType = NV_MEMORY_CACHED; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, flags)) + { + memdescFlags |= MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO; + } + + rmStatus = memdescCreate(ppMemDesc, pGpu, size, 0, + NV_MEMORY_NONCONTIGUOUS, ADDR_SYSMEM, + cacheType, memdescFlags); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, flags)) + { + gpuCachedFlags = NV_MEMORY_CACHED; + } + else + { + gpuCachedFlags = NV_MEMORY_UNCACHED; + } + + pMemDesc = *ppMemDesc; + + memdescSetGpuCacheAttrib(pMemDesc, gpuCachedFlags); + memdescSetAddress(pMemDesc, NvP64_NULL); + memdescSetMemData(pMemDesc, NULL, NULL); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM, NV_TRUE); + + *ppPrivate = NULL; + rmStatus = nv_register_sgt(NV_GET_NV_STATE(pGpu), + memdescGetPteArray(pMemDesc, AT_CPU), + NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount), + memdescGetCpuCacheAttrib(pMemDesc), + ppPrivate, + pImportSgt, + pImportPriv); + if (rmStatus != NV_OK) + { + memdescDestroy(pMemDesc); + return rmStatus; + } + + // + // If the OS layer doesn't think in RM page size, we need to inflate the + // PTE array into RM pages. + // + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmInflateOsToRmPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + memdescSetMemData(*ppMemDesc, *ppPrivate, NULL); + + // + // memdescMapIommu() requires the OS-private data to be set on the memory + // descriptor, but we don't want to wire up the teardown callback just yet: + // that callback does teardown that will already be done as part of failure + // handling further up the stack if memdescMapIommu() fails. So we only + // setup the priv-data cleanup callback once we're sure this call will + // succeed. + // + rmStatus = memdescMapIommu(*ppMemDesc, pGpu->busInfo.iovaspaceId); + if (rmStatus != NV_OK) + { + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + if (*ppPrivate != NULL) + { + nv_unregister_sgt(NV_GET_NV_STATE(pGpu), &pImportSgt, + (void **) &pImportPriv, *ppPrivate); + } + memdescDestroy(pMemDesc); + return rmStatus; + } + + // All is well - wire up the cleanup callback now + memdescSetMemData(*ppMemDesc, *ppPrivate, pMemDataReleaseCallback); + + return rmStatus; +} + +static NV_STATUS +_createMemdescFromDmaBuf +( + OBJGPU *pGpu, + NvU32 flags, + nv_dma_buf_t *pImportPriv, + void *pUserPages, + struct sg_table *pImportSgt, + NvU32 size, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = + _createMemdescFromDmaBufSgtHelper(pGpu, flags, pImportPriv, pImportSgt, + size, ppMemDesc, ppPrivate, + osDestroyOsDescriptorFromDmaBuf); + + NV_ASSERT(pUserPages == NULL); + + if (rmStatus != NV_OK) + { + nv_dma_release_dma_buf(NULL, pImportPriv); + } + + return rmStatus; +} + +static NV_STATUS +_createMemdescFromSgt +( + OBJGPU *pGpu, + NvU32 flags, + struct drm_gem_object *pImportPrivGem, + struct sg_table *pImportSgt, + NvU32 size, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = + _createMemdescFromDmaBufSgtHelper(pGpu, flags, pImportPrivGem, + pImportSgt, size, ppMemDesc, + ppPrivate, + osDestroyOsDescriptorFromSgt); + if (rmStatus != NV_OK) + { + nv_dma_release_sgt(pImportSgt, pImportPrivGem); + } + + return rmStatus; +} + +static NV_STATUS +osCreateOsDescriptorFromFileHandle +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + NvU32 size = 0; + void *pUserPages = NULL; + nv_dma_buf_t *pImportPriv = NULL; + struct sg_table *pImportSgt = NULL; + NvS32 fd; + + fd = (NvS32)((NvU64)pDescriptor); + if ((NvU64)fd != (NvU64)pDescriptor) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): fd must fit within a signed 32-bit integer!\n", + __FUNCTION__); + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = nv_dma_import_from_fd(nv->dma_dev, fd, &size, + &pUserPages, &pImportSgt, &pImportPriv); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): Error (%d) while trying to import fd!\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + return _createMemdescFromDmaBuf(pGpu, flags, pImportPriv, + pUserPages, pImportSgt, + size, ppMemDesc, ppPrivate); +} + +static NV_STATUS +osCreateOsDescriptorFromSgtPtr +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS *params = + (NVOS32_DESCRIPTOR_TYPE_OS_SGT_PTR_PARAMETERS*)((NvUPtr) pDescriptor); + + struct sg_table *sgt = params->sgt; + struct drm_gem_object *gem = params->gem; + + rmStatus = nv_dma_import_sgt(nv->dma_dev, sgt, gem); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): Error (%d) while trying to import sgt!\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + return _createMemdescFromSgt(pGpu, flags, gem, sgt, + (*pLimit + 1), ppMemDesc, ppPrivate); +} + +static NV_STATUS +osCreateOsDescriptorFromDmaBufPtr +( + OBJGPU *pGpu, + NvP64 pDescriptor, + NvHandle hClient, + NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + void **ppPrivate +) +{ + NV_STATUS rmStatus = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + NvU32 size = 0; + void *pUserPages = NULL; + nv_dma_buf_t *pImportPriv = NULL; + struct sg_table *pImportSgt = NULL; + void *dmaBuf = (void*)((NvUPtr)pDescriptor); + + rmStatus = nv_dma_import_dma_buf(nv->dma_dev, dmaBuf, &size, + &pUserPages, &pImportSgt, &pImportPriv); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s(): Error (%d) while trying to import dma_buf!\n", + __FUNCTION__, rmStatus); + return rmStatus; + } + + return _createMemdescFromDmaBuf(pGpu, flags, pImportPriv, + pUserPages, pImportSgt, + size, ppMemDesc, ppPrivate); +} + +static void +osDestroyOsDescriptorFromPhysAddr +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate; + + pPrivate = memdescGetMemData(pMemDesc); + NV_ASSERT(pPrivate != NULL); + + nv_unregister_phys_pages(NV_GET_NV_STATE(pGpu), pPrivate); +} + +static void +osDestroyOsDescriptorFromIoMemory +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate = memdescGetMemData(pMemDesc); + + if (pPrivate == NULL) + { + return; + } + + nv_unregister_peer_io_mem(NV_GET_NV_STATE(pGpu), pPrivate); +} + +static void +osDestroyOsDescriptorPageArray +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NvU64 osPageCount = NV_RM_PAGES_TO_OS_PAGES(pMemDesc->PageCount); + NV_STATUS status; + void *pPrivate; + + pPrivate = memdescGetMemData(pMemDesc); + + NV_ASSERT(pPrivate != NULL); + + // + // TODO: Bug 1811006: Notably skip any IOMMU mapping management as the + // pMemDesc->pGpu might have been torn down already and the pGpu passed in + // doesn't necessarily have IOMMU mappings. For now just allow memdescDestroy() + // to clean up whatever is there (this may not work correctly either if any + // of the IOMMU mappings have outlasted their VASPACEs). This should + // be cleaned up once the fix for bug 1811006 is known. + // + + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_user_pages(NV_GET_NV_STATE(pGpu), osPageCount, + NULL /* import_priv */, &pPrivate); + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_FOREIGN_PAGE) == NV_FALSE) + { + status = os_unlock_user_pages(osPageCount, pPrivate); + NV_ASSERT(status == NV_OK); + } + else + { + os_free_mem(pPrivate); + } +} + +static void +osDestroyOsDescriptorFromDmaBuf +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate = memdescGetMemData(pMemDesc); + + struct sg_table *pImportSgt; + void *pImportPriv; + + /* + * Unmap IOMMU now or we will get a kernel crash when it is unmapped after + * pImportSgt is freed. + */ + memdescUnmapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_sgt(NV_GET_NV_STATE(pGpu), &pImportSgt, + &pImportPriv, pPrivate); + + /* + * pImportSgt doesn't need to be passed to nv_dma_release_dma_buf() because + * the DMA-BUF associated with pImportPriv already has a reference to the + * SGT. + */ + + nv_dma_release_dma_buf(NULL, pImportPriv); +} + +static void +osDestroyOsDescriptorFromSgt +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + void *pPrivate = memdescGetMemData(pMemDesc); + + struct sg_table *pImportSgt; + struct drm_gem_object *pImportPrivGem; + + NV_ASSERT(pPrivate != NULL); + + /* + * Unmap IOMMU now or we will get a kernel crash when it is unmapped after + * pImportSgt is freed. + */ + memdescUnmapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + + if ((NV_RM_PAGE_SIZE < os_page_size) && + !memdescGetContiguity(pMemDesc, AT_CPU)) + { + RmDeflateRmToOsPageArray(memdescGetPteArray(pMemDesc, AT_CPU), + pMemDesc->PageCount); + } + + nv_unregister_sgt(NV_GET_NV_STATE(pGpu), &pImportSgt, + (void **) &pImportPrivGem, pPrivate); + + nv_dma_release_sgt(pImportSgt, pImportPrivGem); +} diff --git a/src/nvidia/arch/nvalloc/unix/src/osnvlink.c b/src/nvidia/arch/nvalloc/unix/src/osnvlink.c new file mode 100644 index 000000000..1f41a7637 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/osnvlink.c @@ -0,0 +1,676 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "osapi.h" + +#if defined(INCLUDE_NVLINK_LIB) +#include "nvlink.h" +// +// The functions in this file are a workaround for a significant design flaw +// where RM callbacks for the nvlink library are built with the altstack +// enabled, but the nvlink library is not built with altstack support. Whenever +// the library calls a callback, the stack switching needs to be accounted for +// or else we will observe corruption of data structures in the nvlink library +// as data is pushed onto what the callback thinks is the stack. See bug +// 1710300. +// +// This bug has also exposed other problems, such as the complete lack of +// locking awareness by these callbacks (e.g., assumption that the RMAPI and +// GPU locks are always held on entry, which is not a legitimate assumption). +// For now, we ignore that just to unblock testing. +// +extern NvlStatus knvlinkCoreAddLinkCallback(struct nvlink_link *); +extern NvlStatus knvlinkCoreRemoveLinkCallback(struct nvlink_link *); +extern NvlStatus knvlinkCoreLockLinkCallback(struct nvlink_link *); +extern void knvlinkCoreUnlockLinkCallback(struct nvlink_link *); +extern NvlStatus knvlinkCoreQueueLinkChangeCallback(struct nvlink_link_change *); +extern NvlStatus knvlinkCoreSetDlLinkModeCallback(struct nvlink_link *, NvU64, NvU32); +extern NvlStatus knvlinkCoreGetDlLinkModeCallback(struct nvlink_link *, NvU64 *); +extern NvlStatus knvlinkCoreSetTlLinkModeCallback(struct nvlink_link *, NvU64, NvU32); +extern NvlStatus knvlinkCoreGetTlLinkModeCallback(struct nvlink_link *, NvU64 *); +extern NvlStatus knvlinkCoreGetTxSublinkModeCallback(struct nvlink_link *, NvU64 *, NvU32 *); +extern NvlStatus knvlinkCoreSetTxSublinkModeCallback(struct nvlink_link *, NvU64, NvU32); +extern NvlStatus knvlinkCoreGetRxSublinkModeCallback(struct nvlink_link *, NvU64 *, NvU32 *); +extern NvlStatus knvlinkCoreSetRxSublinkModeCallback(struct nvlink_link *, NvU64, NvU32); +extern NvlStatus knvlinkCoreReadDiscoveryTokenCallback(struct nvlink_link *, NvU64 *); +extern NvlStatus knvlinkCoreWriteDiscoveryTokenCallback(struct nvlink_link *, NvU64); +extern void knvlinkCoreTrainingCompleteCallback(struct nvlink_link *); +extern void knvlinkCoreGetUphyLoadCallback(struct nvlink_link *, NvBool*); + +/*! + * @brief Helper to allocate an alternate stack from within core RM. + * + * This needs to be an NV_API_CALL (built to use the original stack instead + * of the altstack) since it is called before we switch to using the altstack. + */ +static NV_STATUS NV_API_CALL osNvlinkAllocAltStack(nvidia_stack_t **pSp) +{ + NV_STATUS status = NV_OK; + nvidia_stack_t *sp = NULL; +#if defined(NVCPU_X86_64) && defined(__use_altstack__) + status = os_alloc_mem((void **)&sp, sizeof(nvidia_stack_t)); + if (status == NV_OK) + { + sp->size = sizeof(sp->stack); + sp->top = sp->stack + sp->size; + } +#endif + *pSp = sp; + return status; +} + +/*! + * @brief Helper to free an alternate stack from within core RM. + * + * This needs to be an NV_API_CALL (built to use the original stack instead + * of the altstack) since it is called after we've switched back to using the + * original stack. + */ +static void NV_API_CALL osNvlinkFreeAltStack(nvidia_stack_t *sp) +{ +#if defined(NVCPU_X86_64) && defined(__use_altstack__) + os_free_mem(sp); +#endif +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_add_link +( + struct nvlink_link *link +) +{ + void *fp; + NvlStatus status; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp; + + if (NV_OK != osNvlinkAllocAltStack(&sp)) + { + return NVL_ERR_GENERIC; + } + + NV_ENTER_RM_RUNTIME(sp, fp); + + status = knvlinkCoreAddLinkCallback(link); + + NV_EXIT_RM_RUNTIME(sp, fp); + + if (status == NVL_SUCCESS) + { + pLink->pOsInfo = sp; + } + else + { + osNvlinkFreeAltStack(sp); + } + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_remove_link +( + struct nvlink_link *link +) +{ + void *fp; + NvlStatus status; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + pLink->pOsInfo = NULL; + + NV_ENTER_RM_RUNTIME(sp, fp); + + status = knvlinkCoreRemoveLinkCallback(link); + + NV_EXIT_RM_RUNTIME(sp, fp); + + osNvlinkFreeAltStack(sp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_lock_link +( + struct nvlink_link *link +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreLockLinkCallback(link); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static void NV_API_CALL rm_nvlink_ops_unlock_link +( + struct nvlink_link *link +) +{ + void *fp; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + knvlinkCoreUnlockLinkCallback(link); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_queue_link_change +( + struct nvlink_link_change *link_change +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link_change->master->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreQueueLinkChangeCallback(link_change); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_set_dl_link_mode +( + struct nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreSetDlLinkModeCallback(link, mode, flags); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_get_dl_link_mode +( + struct nvlink_link *link, + NvU64 *mode +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreGetDlLinkModeCallback(link, mode); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_set_tl_link_mode +( + struct nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreSetTlLinkModeCallback(link, mode, flags); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_get_tl_link_mode +( + struct nvlink_link *link, + NvU64 *mode +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreGetTlLinkModeCallback(link, mode); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_tx_mode +( + struct nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreSetTxSublinkModeCallback(link, mode, flags); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_tx_mode +( + struct nvlink_link *link, + NvU64 *mode, + NvU32 *subMode +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreGetTxSublinkModeCallback(link, mode, subMode); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_rx_mode +( + struct nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreSetRxSublinkModeCallback(link, mode, flags); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_rx_mode +( + struct nvlink_link *link, + NvU64 *mode, + NvU32 *subMode +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreGetRxSublinkModeCallback(link, mode, subMode); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_set_link_rx_detect +( + struct nvlink_link *link, + NvU32 flags +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreSetRxSublinkDetectCallback(link, flags); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_get_link_rx_detect +( + struct nvlink_link *link +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreGetRxSublinkDetectCallback(link); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static void NV_API_CALL rm_nvlink_get_uphy_load +( + struct nvlink_link *link, + NvBool *bUnlocked +) +{ + void *fp; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + knvlinkCoreGetUphyLoadCallback(link, bUnlocked); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_read_link_discovery_token +( + struct nvlink_link *link, + NvU64 *token +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreReadDiscoveryTokenCallback(link, token); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static NvlStatus NV_API_CALL rm_nvlink_ops_write_link_discovery_token +( + struct nvlink_link *link, + NvU64 token +) +{ + void *fp; + NvlStatus status; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = knvlinkCoreWriteDiscoveryTokenCallback(link, token); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); + + return status; +} + +static void NV_API_CALL rm_nvlink_ops_training_complete +( + struct nvlink_link *link +) +{ + void *fp; + THREAD_STATE_NODE threadState = {0}; + KNVLINK_RM_LINK *pLink = link->link_info; + nvidia_stack_t *sp = (nvidia_stack_t *)pLink->pOsInfo; + + NV_ENTER_RM_RUNTIME(sp, fp); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + knvlinkCoreTrainingCompleteCallback(link); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + NV_EXIT_RM_RUNTIME(sp, fp); +} + +#endif /* defined(INCLUDE_NVLINK_LIB) */ + +const struct nvlink_link_handlers* osGetNvlinkLinkCallbacks(void) +{ +#if defined(INCLUDE_NVLINK_LIB) + static const struct nvlink_link_handlers rm_nvlink_link_ops = + { + .add = rm_nvlink_ops_add_link, + .remove = rm_nvlink_ops_remove_link, + .lock = rm_nvlink_ops_lock_link, + .unlock = rm_nvlink_ops_unlock_link, + .queue_link_change = rm_nvlink_ops_queue_link_change, + .set_dl_link_mode = rm_nvlink_ops_set_dl_link_mode, + .get_dl_link_mode = rm_nvlink_ops_get_dl_link_mode, + .set_tl_link_mode = rm_nvlink_ops_set_tl_link_mode, + .get_tl_link_mode = rm_nvlink_ops_get_tl_link_mode, + .set_tx_mode = rm_nvlink_ops_set_link_tx_mode, + .get_tx_mode = rm_nvlink_ops_get_link_tx_mode, + .set_rx_mode = rm_nvlink_ops_set_link_rx_mode, + .get_rx_mode = rm_nvlink_ops_get_link_rx_mode, + .set_rx_detect = rm_nvlink_ops_set_link_rx_detect, + .get_rx_detect = rm_nvlink_ops_get_link_rx_detect, + .write_discovery_token = rm_nvlink_ops_write_link_discovery_token, + .read_discovery_token = rm_nvlink_ops_read_link_discovery_token, + .training_complete = rm_nvlink_ops_training_complete, + .get_uphy_load = rm_nvlink_get_uphy_load, + }; + + return &rm_nvlink_link_ops; +#else + return NULL; +#endif +} + +/* + * @brief Verif only function to get the chiplib overrides for link connection + * state for all NVLINKs. + * + * If chiplib overrides exist, each link can either be enabled (1) or disabled (0) + * + * @param[in] pGpu GPU object pointer + * @param[in] maxLinks Size of pLinkConnection array + * @param[out] pLinkConnection array of pLinkConnection values to be populated by MODS + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED (no overrides available) + */ +NV_STATUS +osGetForcedNVLinkConnection +( + OBJGPU *pGpu, + NvU32 maxLinks, + NvU32 *pLinkConnection +) +{ + int i, ret; + NV_STATUS status; + char path[64]; + OBJSYS *pSys; + OBJOS *pOS; + + NV_ASSERT_OR_RETURN((pLinkConnection != NULL), NV_ERR_INVALID_POINTER); + NV_ASSERT_OR_RETURN((maxLinks > 0), NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN((pGpu != NULL), NV_ERR_INVALID_ARGUMENT); + + pSys = SYS_GET_INSTANCE(); + pOS = SYS_GET_OS(pSys); + if (pOS == NULL || pOS->osSimEscapeRead == NULL) + { + NV_PRINTF(LEVEL_ERROR, "%s: escape reads not supported on platform\n", + __FUNCTION__); + return NV_ERR_NOT_SUPPORTED; + } + + for (i = 0; i < maxLinks; i++) + { + ret = os_snprintf(path, sizeof(path), "CPU_MODEL|CM_ATS_ADDRESS|NVLink%u", i); + NV_ASSERT((ret > 0) && (ret < (sizeof(path) - 1))); + + status = pOS->osSimEscapeRead(pGpu, path, 0, 4, &pLinkConnection[i]); + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "%s: %s=0x%X\n", __FUNCTION__, + path, pLinkConnection[i]); + } + else + { + NV_PRINTF(LEVEL_INFO, "%s: osSimEscapeRead for '%s' failed (%u)\n", + __FUNCTION__, path, status); + return NV_ERR_NOT_SUPPORTED; + } + } + return NV_OK; +} + +/* + * @brief Get Platform suggested NVLink linerate + * + * NVLink will use this function to get the platform suggested linerate + * if available in FRU or device tree. + * + * @param[in] pGpu GPU object pointer + * @param[out] NvU32 * Suggested datarate + * + * @return NV_OK or NV_ERR_NOT_SUPPORTED (platform linerate data not available) + */ +NV_STATUS +osGetPlatformNvlinkLinerate +( + OBJGPU *pGpu, + NvU32 *lineRate +) +{ +#if defined(NVCPU_PPC64LE) + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + if (!pKernelNvlink) + return NV_ERR_INVALID_ARGUMENT; + + return nv_get_nvlink_line_rate(nv, lineRate); +#else + //TODO : FRU based method to be filled out by Bug 200285656 + //*lineRate = 0; + //return NV_OK; + return NV_ERR_NOT_SUPPORTED; +#endif +} + +void +osSetNVLinkSysmemLinkState +( + OBJGPU *pGpu, + NvBool enabled +) +{ + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + NV_ASSERT(enabled); + if (enabled) + nv_dma_enable_nvlink(nv->dma_dev); +} diff --git a/src/nvidia/arch/nvalloc/unix/src/osunix.c b/src/nvidia/arch/nvalloc/unix/src/osunix.c new file mode 100644 index 000000000..7d70602de --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/osunix.c @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Routines ***************************\ +* * +* Fills in os specific function pointers for the Unix OS object. * +* * +\***************************************************************************/ + +#include +#include + +static void initOSSpecificFunctionPointers(OBJOS *); +static void initMiscOSFunctionPointers(OBJOS *); +static void initUnixOSFunctionPointers(OBJOS *); +static void initOSSpecificProperties(OBJOS *); + +void +osInitObjOS(OBJOS *pOS) +{ + initOSSpecificFunctionPointers(pOS); + initOSSpecificProperties(pOS); +} + +static void +initOSSpecificFunctionPointers(OBJOS *pOS) +{ + initMiscOSFunctionPointers(pOS); + initUnixOSFunctionPointers(pOS); +} + +static void +initMiscOSFunctionPointers(OBJOS *pOS) +{ + pOS->osQueueWorkItem = osQueueWorkItem; + pOS->osQueueWorkItemWithFlags = osQueueWorkItemWithFlags; + pOS->osQueueSystemWorkItem = osQueueSystemWorkItem; +} + +static void +initUnixOSFunctionPointers(OBJOS *pOS) +{ +#if defined(NVCPU_X86_64) + pOS->osNv_rdcr4 = nv_rdcr4; + pOS->osNv_cpuid = nv_cpuid; +#endif + + pOS->osCallACPI_DSM = osCallACPI_DSM; + pOS->osCallACPI_DDC = osCallACPI_DDC; + pOS->osCallACPI_NVHG_ROM = osCallACPI_NVHG_ROM; + pOS->osCallACPI_DOD = osCallACPI_DOD; + pOS->osCallACPI_MXDM = osCallACPI_MXDM; + pOS->osCallACPI_MXDS = osCallACPI_MXDS; + + pOS->osDbgBreakpointEnabled = osDbgBreakpointEnabled; +} + +static void +initOSSpecificProperties +( + OBJOS *pOS +) +{ + pOS->setProperty(pOS, PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT, NV_TRUE); + pOS->setProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE, NV_TRUE); + pOS->setProperty(pOS, PDB_PROP_OS_LIMIT_GPU_RESET, NV_TRUE); +} diff --git a/src/nvidia/arch/nvalloc/unix/src/registry.c b/src/nvidia/arch/nvalloc/unix/src/registry.c new file mode 100644 index 000000000..522b937c1 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/registry.c @@ -0,0 +1,705 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#if defined(DEBUG_REGISTRY) +#define DBG_REG_PRINTF(a, ...) \ + NV_PRINTF(LEVEL_INFO, a, ##__VA_ARGS__) +#else +#define DBG_REG_PRINTF(a, ...) +#endif + +static NvS32 stringCaseCompare( + const char *string1, + const char *string2 +) +{ + NvU8 c1, c2; + + do + { + c1 = *string1, c2 = *string2; + if (c1 >= 'A' && c1 <= 'Z') + c1 += ('a' - 'A'); + if (c2 >= 'A' && c2 <= 'Z') + c2 += ('a' - 'A'); + string1++, string2++; + } + while ((c1 == c2) && (c1 != '\0')); + + return (c1 - c2); +} + +static nv_reg_entry_t *the_registry = NULL; + +static nv_reg_entry_t* regCreateNewRegistryKey( + nv_state_t *nv, + const char *regParmStr +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + nv_reg_entry_t *new_reg = NULL; + char *new_ParmStr = NULL; + NvU32 parm_size; + + if (regParmStr == NULL) + { + DBG_BREAKPOINT(); + return NULL; + } + + new_reg = portMemAllocNonPaged(sizeof(nv_reg_entry_t)); + if (NULL == new_reg) + { + NV_PRINTF(LEVEL_ERROR, "failed to grow registry\n"); + return NULL; + } + + portMemSet(new_reg, 0, sizeof(nv_reg_entry_t)); + + if (regParmStr != NULL) + { + parm_size = (portStringLength(regParmStr) + 1); + new_ParmStr = portMemAllocNonPaged(parm_size); + if (NULL == new_ParmStr) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate registry param string\n"); + portMemFree(new_reg); + return NULL; + } + + NV_ASSERT(parm_size <= NVOS38_MAX_REGISTRY_STRING_LENGTH); + + if (portMemCopy(new_ParmStr, parm_size, regParmStr, parm_size) == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to copy registry param string\n"); + portMemFree(new_ParmStr); + portMemFree(new_reg); + return NULL; + } + } + + new_reg->regParmStr = new_ParmStr; + new_reg->type = NV_REGISTRY_ENTRY_TYPE_UNKNOWN; + + if (nvp != NULL) + { + new_reg->next = nvp->pRegistry; + nvp->pRegistry = new_reg; + DBG_REG_PRINTF("local registry now at 0x%p\n", nvp->pRegistry); + } + else + { + new_reg->next = the_registry; + the_registry = new_reg; + DBG_REG_PRINTF("global registry now at 0x%p\n", the_registry); + } + + return new_reg; +} + +static NV_STATUS regFreeEntry(nv_reg_entry_t *tmp) +{ + portMemFree(tmp->regParmStr); + tmp->regParmStr = NULL; + { + portMemFree(tmp->pdata); + tmp->pdata = NULL; + tmp->len = 0; + } + portMemFree(tmp); + + return NV_OK; +} + +static nv_reg_entry_t* regFindRegistryEntry( + nv_state_t *nv, + const char *regParmStr, + NvU32 type, + NvBool *bGlobalEntry +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + nv_reg_entry_t *tmp; + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + if (nvp != NULL) + { + tmp = nvp->pRegistry; + DBG_REG_PRINTF(" local registry at 0x%p\n", tmp); + + while ((tmp != NULL) && (tmp->regParmStr != NULL)) + { + DBG_REG_PRINTF(" Testing against %s\n", + tmp->regParmStr); + if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) && + (type == tmp->type)) + { + DBG_REG_PRINTF(" found a match!\n"); + if (bGlobalEntry) + *bGlobalEntry = NV_FALSE; + return tmp; + } + tmp = tmp->next; + } + } + + tmp = the_registry; + DBG_REG_PRINTF(" global registry at 0x%p\n", tmp); + + while ((tmp != NULL) && (tmp->regParmStr != NULL)) + { + DBG_REG_PRINTF(" Testing against %s\n", + tmp->regParmStr); + if ((stringCaseCompare(tmp->regParmStr, regParmStr) == 0) && + (type == tmp->type)) + { + DBG_REG_PRINTF(" found a match!\n"); + if (bGlobalEntry) + *bGlobalEntry = NV_TRUE; + return tmp; + } + tmp = tmp->next; + } + + DBG_REG_PRINTF(" no match\n"); + return NULL; +} + +NV_STATUS RmWriteRegistryDword( + nv_state_t *nv, + const char *regParmStr, + NvU32 Data +) +{ + nv_reg_entry_t *tmp; + NvBool bGlobalEntry; + + if (regParmStr == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s -> 0x%x\n", __FUNCTION__, regParmStr, Data); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_DWORD, &bGlobalEntry); + + // If we found an entry and we were looking for a global entry and + // found a global, or we were looking for a per-GPU entry and found a + // per-GPU entry + if (tmp != NULL && + ((nv == NULL && bGlobalEntry) || + (nv != NULL && !bGlobalEntry))) + { + tmp->data = Data; + + if (stringCaseCompare(regParmStr, "ResmanDebugLevel") == 0) + { + os_dbg_set_level(Data); + } + + return NV_OK; + } + + tmp = regCreateNewRegistryKey(nv, regParmStr); + if (tmp == NULL) + return NV_ERR_GENERIC; + + tmp->type = NV_REGISTRY_ENTRY_TYPE_DWORD; + tmp->data = Data; + + return NV_OK; +} + +NV_STATUS RmReadRegistryDword( + nv_state_t *nv, + const char *regParmStr, + NvU32 *Data +) +{ + nv_reg_entry_t *tmp; + + if ((regParmStr == NULL) || (Data == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_DWORD, NULL); + if (tmp == NULL) + { + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_BINARY, NULL); + if ((tmp != NULL) && (tmp->len >= sizeof(NvU32))) + { + *Data = *(NvU32 *)tmp->pdata; + } + else + { + DBG_REG_PRINTF(" not found\n"); + return NV_ERR_GENERIC; + } + } + else + { + *Data = tmp->data; + } + + DBG_REG_PRINTF(" found in the_registry: 0x%x\n", *Data); + + return NV_OK; +} + +NV_STATUS RmReadRegistryBinary( + nv_state_t *nv, + const char *regParmStr, + NvU8 *Data, + NvU32 *cbLen +) +{ + nv_reg_entry_t *tmp; + NV_STATUS status; + + if ((regParmStr == NULL) || (Data == NULL) || (cbLen == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_BINARY, NULL); + if (tmp == NULL) + { + DBG_REG_PRINTF(" not found\n"); + return NV_ERR_GENERIC; + } + + DBG_REG_PRINTF(" found\n"); + + if (*cbLen >= tmp->len) + { + portMemCopy((NvU8 *)Data, *cbLen, (NvU8 *)tmp->pdata, tmp->len); + *cbLen = tmp->len; + status = NV_OK; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "buffer (length: %u) is too small (data length: %u)\n", + *cbLen, tmp->len); + status = NV_ERR_GENERIC; + } + + return status; +} + +NV_STATUS RmWriteRegistryBinary( + nv_state_t *nv, + const char *regParmStr, + NvU8 *Data, + NvU32 cbLen +) +{ + nv_reg_entry_t *tmp; + NvBool bGlobalEntry; + + if ((regParmStr == NULL) || (Data == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_BINARY, &bGlobalEntry); + + // If we found an entry and we were looking for a global entry and + // found a global, or we were looking for a per-GPU entry and found a + // per-GPU entry + if (tmp != NULL && + ((nv == NULL && bGlobalEntry) || + (nv != NULL && !bGlobalEntry))) + { + if (tmp->pdata != NULL) + { + portMemFree(tmp->pdata); + tmp->pdata = NULL; + tmp->len = 0; + } + } + else + { + tmp = regCreateNewRegistryKey(nv, regParmStr); + if (tmp == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to create binary registry entry\n"); + return NV_ERR_GENERIC; + } + } + + tmp->pdata = portMemAllocNonPaged(cbLen); + if (NULL == tmp->pdata) + { + NV_PRINTF(LEVEL_ERROR, "failed to write binary registry entry\n"); + return NV_ERR_GENERIC; + } + + tmp->type = NV_REGISTRY_ENTRY_TYPE_BINARY; + tmp->len = cbLen; + portMemCopy((NvU8 *)tmp->pdata, tmp->len, (NvU8 *)Data, cbLen); + + return NV_OK; +} + +NV_STATUS RmWriteRegistryString( + nv_state_t *nv, + const char *regParmStr, + const char *buffer, + NvU32 bufferLength +) +{ + nv_reg_entry_t *tmp; + NvBool bGlobalEntry; + + if ((regParmStr == NULL) || (buffer == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_STRING, &bGlobalEntry); + + // If we found an entry and we were looking for a global entry and + // found a global, or we were looking for a per-GPU entry and found a + // per-GPU entry + if (tmp != NULL && + ((nv == NULL && bGlobalEntry) || + (nv != NULL && !bGlobalEntry))) + { + if (tmp->pdata != NULL) + { + portMemFree(tmp->pdata); + tmp->len = 0; + tmp->pdata = NULL; + } + } + else + { + tmp = regCreateNewRegistryKey(nv, regParmStr); + if (tmp == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to allocate a string registry entry!\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + + tmp->pdata = portMemAllocNonPaged(bufferLength); + if (tmp->pdata == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to write a string registry entry!\n"); + return NV_ERR_NO_MEMORY; + } + + tmp->type = NV_REGISTRY_ENTRY_TYPE_STRING; + tmp->len = bufferLength; + portMemCopy((void *)tmp->pdata, tmp->len, buffer, (bufferLength - 1)); + tmp->pdata[bufferLength-1] = '\0'; + + return NV_OK; +} + +NV_STATUS RmReadRegistryString( + nv_state_t *nv, + const char *regParmStr, + NvU8 *buffer, + NvU32 *pBufferLength +) +{ + NvU32 bufferLength; + nv_reg_entry_t *tmp; + + if ((regParmStr == NULL) || (buffer == NULL) || (pBufferLength == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + DBG_REG_PRINTF("%s: %s\n", __FUNCTION__, regParmStr); + + bufferLength = *pBufferLength; + *pBufferLength = 0; + *buffer = '\0'; + + tmp = regFindRegistryEntry(nv, regParmStr, + NV_REGISTRY_ENTRY_TYPE_STRING, NULL); + if (tmp == NULL) + { + return NV_ERR_GENERIC; + } + + if (bufferLength >= tmp->len) + { + portMemCopy((void *)buffer, bufferLength, (void *)tmp->pdata, tmp->len); + *pBufferLength = tmp->len; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "buffer (length: %u) is too small (data length: %u)\n", + bufferLength, tmp->len); + return NV_ERR_BUFFER_TOO_SMALL; + } + + return NV_OK; +} + +NV_STATUS RmInitRegistry(void) +{ + NV_STATUS rmStatus; + + rmStatus = os_registry_init(); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to initialize the OS registry!\n"); + } + + return rmStatus; +} + +NV_STATUS RmDestroyRegistry(nv_state_t *nv) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + nv_reg_entry_t *tmp; + + if (nvp != NULL) + { + tmp = nvp->pRegistry; + nvp->pRegistry = NULL; + } + else + { + tmp = the_registry; + the_registry = NULL; + } + + while (tmp != NULL) + { + nv_reg_entry_t *entry = tmp; + tmp = tmp->next; + regFreeEntry(entry); + } + + return NV_OK; +} + +static void regCountEntriesAndSize( + NvU32 *pNumEntries, // Pointer to number of entries + NvU32 *pSize, // Pointer to total size + nv_reg_entry_t *pRegEntry // Pointer local or global registry +) +{ + // + // Note that *pNumEntries and *pSize are not initialized here. This is so + // we can accumulate totals of both global and local registries. + // + NvU32 numEntries = *pNumEntries; + NvU32 size = *pSize; + + while ((pRegEntry != NULL) && (pRegEntry->regParmStr != NULL)) + { + size += portStringLength(pRegEntry->regParmStr) + 1 + pRegEntry->len; + numEntries++; + pRegEntry = pRegEntry->next; + } + + *pNumEntries = numEntries; + *pSize = size; +} + +static NV_STATUS regCopyEntriesToPackedBuffer( + PACKED_REGISTRY_TABLE *pRegTable, // Pointer to packed record + nv_reg_entry_t *pRegEntry, // Pointer local or global registry + NvU32 *pEntryIndex, // Pointer to next index + NvU32 *pDataOffset // Pointer to offset of next data byte. +) +{ + NvU8 *pByte = (NvU8 *)pRegTable; // Byte version of record pointer. + NV_STATUS nvStatus = NV_OK; + NvU32 entryIndex = *pEntryIndex; + NvU32 dataOffset = *pDataOffset; + + // Walk the records and copy the data. + while ((pRegEntry != NULL) && (pRegEntry->regParmStr != NULL)) + { + PACKED_REGISTRY_ENTRY *pEntry = &pRegTable->entries[entryIndex]; + NvU32 slen = portStringLength(pRegEntry->regParmStr) + 1; + + // Sanity check the data offset and index against counted totals. + if ((dataOffset + slen + pRegEntry->len > pRegTable->size) || + (entryIndex >= pRegTable->numEntries)) + { + // Something has changed since we counted them? + NV_PRINTF(LEVEL_ERROR, "Registry entry record is full\n"); + nvStatus = NV_ERR_INVALID_STATE; + break; + } + + // Copy registry entry name to data blob. + pEntry->nameOffset = dataOffset; + portMemCopy(&pByte[dataOffset], slen, pRegEntry->regParmStr, slen); + dataOffset += slen; + + switch (pRegEntry->type) + { + case NV_REGISTRY_ENTRY_TYPE_DWORD: + pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_DWORD; + pEntry->length = sizeof(NvU32); + pEntry->data = pRegEntry->data; + break; + + case NV_REGISTRY_ENTRY_TYPE_BINARY: + case NV_REGISTRY_ENTRY_TYPE_STRING: + pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_STRING; + if (pRegEntry->type == NV_REGISTRY_ENTRY_TYPE_BINARY) + pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_BINARY; + + pEntry->length = pRegEntry->len; + pEntry->data = dataOffset; + portMemCopy(&pByte[dataOffset], pEntry->length, + pRegEntry->pdata, pRegEntry->len); + dataOffset += pRegEntry->len; + break; + + default: + // We should never get here. + pEntry->type = REGISTRY_TABLE_ENTRY_TYPE_UNKNOWN; + pEntry->length = 0; + pEntry->data = 0; + DBG_BREAKPOINT(); + break; + } + + pRegEntry = pRegEntry->next; + entryIndex++; + } + + *pEntryIndex = entryIndex; + *pDataOffset = dataOffset; + return nvStatus; +} + +// Package registry entries +NV_STATUS RmPackageRegistry( + nv_state_t *nv, + PACKED_REGISTRY_TABLE *pRegTable, + NvU32 *pSize +) +{ + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + nv_reg_entry_t *pLocalRegistry = NULL; + NV_STATUS nvStatus = NV_OK; + NvU32 totalSize; + NvU32 numEntries; + + if (pSize == NULL) + return NV_ERR_INVALID_ARGUMENT; + + // Use the local (per-device) registry if we have one. + if (nvp != NULL) + pLocalRegistry = nvp->pRegistry; + + numEntries = 0; + totalSize = NV_OFFSETOF(PACKED_REGISTRY_TABLE, entries); + + // Count the number of global entries and total size. + regCountEntriesAndSize(&numEntries, &totalSize, the_registry); + + // Count the number of local entries and total size. + regCountEntriesAndSize(&numEntries, &totalSize, pLocalRegistry); + + // Add table record size into total size. + totalSize += sizeof(PACKED_REGISTRY_ENTRY) * numEntries; + + // + // If this function is called to only compute total size of registry table, + // then we are done here. + // + if (pRegTable == NULL) + { + *pSize = totalSize; + return NV_OK; + } + + // Return warning if there are no registry entries. + if (numEntries == 0) + return NV_WARN_NOTHING_TO_DO; + + if (totalSize > *pSize) + { + NV_PRINTF(LEVEL_ERROR, "Registry entries overflow RPC record\n"); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Fill in our new structure with the first pass (counting) values. + pRegTable->size = totalSize; + *pSize = totalSize; + pRegTable->numEntries = numEntries; + + // Offset of first byte after the registry entry table. + totalSize = NV_OFFSETOF(PACKED_REGISTRY_TABLE, entries) + + (sizeof(PACKED_REGISTRY_ENTRY) * numEntries); + + // Starting index in the registry entry table. + numEntries = 0; + + // Walk the global registry and copy the data. + nvStatus = regCopyEntriesToPackedBuffer(pRegTable, + the_registry, &numEntries, &totalSize); + + // Walk the local registry and copy the data. + if (nvStatus == NV_OK) + { + nvStatus = regCopyEntriesToPackedBuffer(pRegTable, + pLocalRegistry, &numEntries, &totalSize); + } + + // Sanity check second pass against first pass. + if ((numEntries != pRegTable->numEntries) || (totalSize != pRegTable->size)) + { + NV_PRINTF(LEVEL_ERROR, "First/second pass mismatch\n"); + nvStatus = NV_ERR_INVALID_STATE; + } + + return nvStatus; +} + diff --git a/src/nvidia/arch/nvalloc/unix/src/rm-gpu-ops.c b/src/nvidia/arch/nvalloc/unix/src/rm-gpu-ops.c new file mode 100644 index 000000000..f350bd1eb --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/rm-gpu-ops.c @@ -0,0 +1,812 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include "rmapi/nv_gpu_ops.h" +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h" + +NV_STATUS NV_API_CALL rm_gpu_ops_create_session( + nvidia_stack_t *sp, + struct gpuSession **session) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsCreateSession(session); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session ( + nvidia_stack_t *sp, gpuSessionHandle session) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsDestroySession(session); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_device_create ( + nvidia_stack_t *sp, + nvgpuSessionHandle_t session, + const gpuInfo *pGpuInfo, + const NvProcessorUuid *gpuUuid, + nvgpuDeviceHandle_t *device, + NvBool bCreateSmcPartition) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsDeviceCreate(session, pGpuInfo, gpuUuid, device, bCreateSmcPartition); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy ( + nvidia_stack_t *sp, + gpuDeviceHandle device) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsDeviceDestroy(device); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create ( + nvidia_stack_t *sp, + gpuDeviceHandle device, + NvU64 vaBase, + NvU64 vaSize, + gpuAddressSpaceHandle *vaSpace, + gpuAddressSpaceInfo *vaSpaceInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsAddressSpaceCreate(device, vaBase, vaSize, vaSpace, + vaSpaceInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space( + nvidia_stack_t *sp, + gpuDeviceHandle device, + NvHandle hUserClient, + NvHandle hUserVASpace, + gpuAddressSpaceHandle *dupedVaspace, + gpuAddressSpaceInfo *vaSpaceInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsDupAddressSpace(device, hUserClient, hUserVASpace, + dupedVaspace, vaSpaceInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *sp, + gpuAddressSpaceHandle vaspace) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + nvGpuOpsAddressSpaceDestroy(vaspace); + NV_EXIT_RM_RUNTIME(sp,fp); + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb( + nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, + NvLength size, NvU64 *gpuOffset, gpuAllocInfo *allocInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsMemoryAllocFb(vaspace, size, gpuOffset, allocInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_get_p2p_caps(nvidia_stack_t *sp, + gpuDeviceHandle device1, + gpuDeviceHandle device2, + getP2PCapsParams *pP2pCapsParams) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsGetP2PCaps(device1, device2, pP2pCapsParams); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_sys( + nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, + NvLength size, NvU64 *gpuOffset, gpuAllocInfo *allocInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsMemoryAllocSys(vaspace, size, gpuOffset, allocInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks( + nvidia_stack_t *sp, + void *pPma, + pmaEvictPagesCb_t evictPages, + pmaEvictRangeCb_t evictRange, + void *callbackData) +{ + THREAD_STATE_NODE threadState; + NV_STATUS rmStatus; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + rmStatus = pmaRegisterEvictionCb(pPma, evictPages, evictRange, callbackData); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks( + nvidia_stack_t *sp, + void *pPma) +{ + THREAD_STATE_NODE threadState; + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + pmaUnregisterEvictionCb(pPma); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object( + nvidia_stack_t *sp, + gpuDeviceHandle device, + void **pPma, + const nvgpuPmaStatistics_t *pPmaPubStats) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsGetPmaObject(device, pPma, + (const UvmPmaStatistics **)pPmaPubStats); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages( + nvidia_stack_t *sp, void *pPma, + NvLength pageCount, NvU32 pageSize, + nvgpuPmaAllocationOptions_t pPmaAllocOptions, + NvU64 *pPages) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsPmaAllocPages(pPma, pageCount, pageSize, + pPmaAllocOptions, pPages); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages( + nvidia_stack_t *sp, void *pPma, + NvU64 *pPages, NvLength pageCount, NvU32 pageSize, NvU32 flags) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsPmaPinPages(pPma, pPages, pageCount, pageSize, flags); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages( + nvidia_stack_t *sp, void *pPma, + NvU64 *pPages, NvLength pageCount, NvU32 pageSize) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsPmaUnpinPages(pPma, pPages, pageCount, pageSize); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_map( + nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, + NvU64 gpuOffset, NvLength length, void **cpuPtr, NvU32 pageSize) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsMemoryCpuMap(vaspace, gpuOffset, length, cpuPtr, + pageSize); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_ummap( + nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, void* cpuPtr) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + nvGpuOpsMemoryCpuUnMap(vaspace, cpuPtr); + NV_EXIT_RM_RUNTIME(sp,fp); + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *sp, + gpuAddressSpaceHandle vaspace, + const gpuChannelAllocParams *allocParams, + gpuChannelHandle *channel, + gpuChannelInfo *channelInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsChannelAllocate(vaspace, allocParams, channel, + channelInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t * sp, + nvgpuChannelHandle_t channel) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + nvGpuOpsChannelDestroy(channel); + NV_EXIT_RM_RUNTIME(sp,fp); + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *sp, + void *pPma, NvU64 *pPages, NvLength pageCount, NvU32 pageSize, NvU32 flags) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + nvGpuOpsPmaFreePages(pPma, pPages, pageCount, pageSize, flags); + NV_EXIT_RM_RUNTIME(sp,fp); + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_memory_free( + nvidia_stack_t *sp, gpuAddressSpaceHandle vaspace, NvU64 gpuOffset) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + nvGpuOpsMemoryFree(vaspace, gpuOffset); + NV_EXIT_RM_RUNTIME(sp,fp); + return NV_OK; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *sp, + gpuDeviceHandle device, + gpuCaps * caps) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsQueryCaps(device, caps); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_query_ces_caps(nvidia_stack_t *sp, + gpuDeviceHandle device, + gpuCesCaps *caps) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsQueryCesCaps(device, caps); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_get_gpu_info(nvidia_stack_t *sp, + const NvProcessorUuid *pUuid, + const gpuClientInfo *pGpuClientInfo, + gpuInfo *pGpuInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsGetGpuInfo(pUuid, pGpuClientInfo, pGpuInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t *sp, + gpuDeviceHandle device) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsServiceDeviceInterruptsRM(device); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *sp, + gpuAddressSpaceHandle vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsSetPageDirectory(vaSpace, physAddress, numEntries, + bVidMemAperture, pasid); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *sp, + gpuAddressSpaceHandle vaSpace) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsUnsetPageDirectory(vaSpace); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_dup_allocation(nvidia_stack_t *sp, + gpuAddressSpaceHandle srcVaSpace, + NvU64 srcAddress, + gpuAddressSpaceHandle dstVaSpace, + NvU64 *dstAddress) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsDupAllocation(srcVaSpace, srcAddress, dstVaSpace, dstAddress); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_dup_memory (nvidia_stack_t *sp, + gpuDeviceHandle device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + nvgpuMemoryInfo_t gpuMemoryInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsDupMemory(device, hClient, hPhysMemory, hDupMemory, gpuMemoryInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_free_duped_handle (nvidia_stack_t *sp, + gpuDeviceHandle device, + NvHandle hPhysHandle) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsFreeDupedHandle(device, hPhysHandle); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_get_fb_info (nvidia_stack_t *sp, + gpuDeviceHandle device, + gpuFbInfo * fbInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsGetFbInfo(device, fbInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_get_ecc_info (nvidia_stack_t *sp, + gpuDeviceHandle device, + gpuEccInfo * eccInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsGetEccInfo(device, eccInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +// +// Please see the comments for nvUvmInterfaceOwnPageFaultIntr(), in +// nv_uvm_interface.h, for the recommended way to use this routine. +// +// How it works: +// +// The rmGpuLocksAcquire call generally saves the current GPU interrupt +// state, then disables interrupt generation for one (or all) GPUs. +// Likewise, the rmGpuLocksRelease call restores (re-enables) those +// interrupts to their previous state. However, the rmGpuLocksRelease +// call does NOT restore interrupts that RM does not own. +// +// This is rather hard to find in the code, so: very approximately, the +// following sequence happens: rmGpuLocksRelease, osEnableInterrupts, +// intrRestoreNonStall_HAL, intrEncodeIntrEn_HAL, and that last one skips +// over any interrupts that RM does not own. +// +// This means that things are a bit asymmetric, because this routine +// actually changes that ownership in between the rmGpuLocksAcquire and +// rmGpuLocksRelease calls. So: +// +// -- If you call this routine with bOwnInterrupts == NV_TRUE (UVM is +// taking ownership from the RM), then rmGpuLocksAcquire disables all +// GPU interrupts. Then the ownership is taken away from RM, so the +// rmGpuLocksRelease call leaves the replayable page fault interrupts +// disabled. It is then up to UVM (the caller) to enable replayable +// page fault interrupts when it is ready. +// +// -- If you call this routine with bOwnInterrupts == NV_FALSE (UVM is +// returning ownership to the RM), then rmGpuLocksAcquire disables +// all GPU interrupts that RM owns. Then the ownership is returned to +// RM, so the rmGpuLocksRelease call re-enables replayable page fault +// interrupts. So, that implies that you need to disable replayable page +// fault interrupts before calling this routine, in order to hand +// over a GPU to RM that is not generating interrupts, until RM is +// ready to handle the interrupts. +// +NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *sp, + struct gpuDevice *device, + NvBool bOwnInterrupts) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsOwnPageFaultIntr(device, bOwnInterrupts); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info (nvidia_stack_t *sp, + gpuDeviceHandle device, + gpuFaultInfo *pFaultInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsInitFaultInfo(device, pFaultInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info (nvidia_stack_t *sp, + gpuDeviceHandle device, + gpuFaultInfo *pFaultInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsDestroyFaultInfo(device, pFaultInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +// Functions +// +// - rm_gpu_ops_has_pending_non_replayable_faults +// - rm_gpu_ops_get_non_replayable_faults +// +// Cannot take the GPU/RM lock because it is called during fault servicing. +// This could produce deadlocks if the UVM bottom half gets stuck behind a +// stalling interrupt that cannot be serviced if UVM is holding the lock. +// +// However, these functions can be safely called with no locks because it is +// just accessing the given client shadow fault buffer, which is implemented +// using a lock-free queue. There is a different client shadow fault buffer +// per GPU: RM top-half producer, UVM top/bottom-half consumer. + +NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *sp, + gpuFaultInfo *pFaultInfo, + NvBool *hasPendingFaults) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsHasPendingNonReplayableFaults(pFaultInfo, hasPendingFaults); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *sp, + gpuFaultInfo *pFaultInfo, + void *faultBuffer, + NvU32 *numFaults) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsGetNonReplayableFaults(pFaultInfo, faultBuffer, numFaults); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *sp, + gpuDeviceHandle device, + gpuAccessCntrInfo *accessCntrInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsInitAccessCntrInfo(device, accessCntrInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *sp, + gpuDeviceHandle device, + gpuAccessCntrInfo *accessCntrInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsDestroyAccessCntrInfo(device, accessCntrInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *sp, + gpuDeviceHandle device, + gpuAccessCntrInfo *accessCntrInfo, + gpuAccessCntrConfig *accessCntrConfig) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsEnableAccessCntr(device, accessCntrInfo, accessCntrConfig); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *sp, + gpuDeviceHandle device, + gpuAccessCntrInfo *accessCntrInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsDisableAccessCntr(device, accessCntrInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL +rm_gpu_ops_p2p_object_create(nvidia_stack_t *sp, + gpuDeviceHandle device1, + gpuDeviceHandle device2, + NvHandle *hP2pObject) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp, fp); + rmStatus = nvGpuOpsP2pObjectCreate(device1, device2, hP2pObject); + NV_EXIT_RM_RUNTIME(sp, fp); + return rmStatus; +} + +void NV_API_CALL +rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *sp, + nvgpuSessionHandle_t session, + NvHandle hP2pObject) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp, fp); + nvGpuOpsP2pObjectDestroy(session, hP2pObject); + NV_EXIT_RM_RUNTIME(sp, fp); +} + +NV_STATUS NV_API_CALL +rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t* sp, + nvgpuAddressSpaceHandle_t vaSpace, + NvHandle hDupedMemory, + NvU64 offset, + NvU64 size, + nvgpuExternalMappingInfo_t gpuExternalMappingInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp, fp); + rmStatus = nvGpuOpsGetExternalAllocPtes(vaSpace, hDupedMemory, offset, size, + gpuExternalMappingInfo); + NV_EXIT_RM_RUNTIME(sp, fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL +rm_gpu_ops_retain_channel(nvidia_stack_t* sp, + nvgpuAddressSpaceHandle_t vaSpace, + NvHandle hClient, + NvHandle hChannel, + void **retainedChannel, + nvgpuChannelInstanceInfo_t channelInstanceInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp, fp); + rmStatus = nvGpuOpsRetainChannel(vaSpace, hClient, hChannel, + (gpuRetainedChannel **)retainedChannel, + channelInstanceInfo); + NV_EXIT_RM_RUNTIME(sp, fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL +rm_gpu_ops_bind_channel_resources(nvidia_stack_t* sp, + void *retainedChannel, + nvgpuChannelResourceBindParams_t channelResourceBindParams) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp, fp); + rmStatus = nvGpuOpsBindChannelResources(retainedChannel, + channelResourceBindParams); + NV_EXIT_RM_RUNTIME(sp, fp); + return rmStatus; +} + +void NV_API_CALL +rm_gpu_ops_release_channel(nvidia_stack_t *sp, void *retainedChannel) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp, fp); + nvGpuOpsReleaseChannel(retainedChannel); + NV_EXIT_RM_RUNTIME(sp, fp); +} + +void NV_API_CALL +rm_gpu_ops_stop_channel(nvidia_stack_t * sp, + void *retainedChannel, + NvBool bImmediate) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + nvGpuOpsStopChannel(retainedChannel, bImmediate); + NV_EXIT_RM_RUNTIME(sp, fp); +} + +NV_STATUS NV_API_CALL +rm_gpu_ops_get_channel_resource_ptes(nvidia_stack_t* sp, + nvgpuAddressSpaceHandle_t vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + nvgpuExternalMappingInfo_t gpuExternalMappingInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp, fp); + rmStatus = nvGpuOpsGetChannelResourcePtes(vaSpace, resourceDescriptor, + offset, size, + gpuExternalMappingInfo); + NV_EXIT_RM_RUNTIME(sp, fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL +rm_gpu_ops_report_non_replayable_fault(nvidia_stack_t *sp, + nvgpuDeviceHandle_t device, + const void *pFaultPacket) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsReportNonReplayableFault(device, pFaultPacket); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +NV_STATUS NV_API_CALL +rm_gpu_ops_paging_channel_allocate(nvidia_stack_t *sp, + gpuDeviceHandle device, + const gpuPagingChannelAllocParams *allocParams, + gpuPagingChannelHandle *channel, + gpuPagingChannelInfo *channelInfo) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsPagingChannelAllocate(device, allocParams, channel, + channelInfo); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +void NV_API_CALL +rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *sp, + gpuPagingChannelHandle channel) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + nvGpuOpsPagingChannelDestroy(channel); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL +rm_gpu_ops_paging_channels_map(nvidia_stack_t *sp, + gpuAddressSpaceHandle srcVaSpace, + NvU64 srcAddress, + gpuDeviceHandle device, + NvU64 *dstAddress) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsPagingChannelsMap(srcVaSpace, srcAddress, device, dstAddress); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + +void NV_API_CALL +rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *sp, + gpuAddressSpaceHandle srcVaSpace, + NvU64 srcAddress, + gpuDeviceHandle device) +{ + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + nvGpuOpsPagingChannelsUnmap(srcVaSpace, srcAddress, device); + NV_EXIT_RM_RUNTIME(sp,fp); +} + +NV_STATUS NV_API_CALL +rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *sp, + gpuPagingChannelHandle channel, + char *methodStream, + NvU32 methodStreamSize) +{ + NV_STATUS rmStatus; + void *fp; + NV_ENTER_RM_RUNTIME(sp,fp); + rmStatus = nvGpuOpsPagingChannelPushStream(channel, methodStream, methodStreamSize); + NV_EXIT_RM_RUNTIME(sp,fp); + return rmStatus; +} + diff --git a/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c b/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c new file mode 100644 index 000000000..570354661 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/rmobjexportimport.c @@ -0,0 +1,624 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * + * @brief Provides RmExportObject, RmImportObject, RmFreeObjExportHandle and + * RmGetExportObjectInfo interfaces : + * + * These interfaces allow rm clients to export their objects into + * a unique RmObjExportHandle which another rm client could + * import, even if the source rm client gets destroyed. + * + * RM's device instance may get destroyed asynchronously, in which + * case exported objects residing on that device instance also get + * destroyed. This means it is not possible to import it back, but the + * RmObjExportHandle into which the object had been exported still + * remains valid but no other object could get it. + * + * There are not init/fini routines, it is the responsibility of the + * rest of RM's eco-system to make sure that all RmObjExportHandles get + * freed during driver unload. + * + * The api lock is expected to be held before calling into + * rmobjexportimport.c; do not hold gpu or any other lock. + */ + +#include "rmobjexportimport.h" +#include "nvlimits.h" +#include "gpu/device/device.h" + +#include "containers/map.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" + +#include "class/cl0080.h" +#include "class/cl2080.h" +#include +#include + +// +// A reference to an RmObjExportHandle +// generated by function RmGenerateObjExportHandle(). +// +typedef struct +{ + NvU32 deviceInstance; +} RmObjExportHandleRef; +MAKE_MAP(RmObjExportHandleMap, RmObjExportHandleRef); + +// +// Memory allocator +// +PORT_MEM_ALLOCATOR *pMemAllocator; + +// +// Map RmObjExportHandle -> RmObjExportHandleRef +// +RmObjExportHandleMap objExportHandleMap; + +// +// Rm client to use to dup an object exported to RmObjExportHandle. The minimal +// requirement for duping is to have a device object allocated. This rm client +// is simply like any other external rm client and has no any special handling. +// +// We keep this rm client just like any other external rm client: if +// gpu(s)/device gets powered-down/uninitialized, rm objects allocated by +// external rm clients and located on that gpu(s)/device gets freed (the +// os-layer does that). In that way, code in this file doesn't need to worry +// about freeing exported objects located on that gpu(s)/device. +// +NvHandle hObjExportRmClient; + +// +// Tracker for device and subdevice handles. For now only one subdevice +// (instance 0) is supported per device. +// +typedef struct +{ + NvHandle hRmDevice; + NvHandle hRmSubDevice; +} RmObjExportDevice; + +RmObjExportDevice objExportDevice[NV_MAX_DEVICES]; + +// +// Usage reference counter for static object in this file like rm client used to +// dup an exported object, memory allocator, map etc. +// +NvU64 objExportImportRefCount; + +// +// Static functions for internal use to code in this file. +// +static NV_STATUS RmRefObjExportImport (void); +static void RmUnrefObjExportImport (void); + +static RmObjExportHandle RmGenerateObjExportHandle (NvU32 deviceInstance); +static NV_STATUS RmUnrefObjExportHandle (RmObjExportHandle hObject); + +// +// Free the RmObjExportHandle. +// +static NV_STATUS RmUnrefObjExportHandle(RmObjExportHandle hObject) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + RmObjExportHandleRef *pHandleRef = + mapFind(&objExportHandleMap, hObject); + + if (pHandleRef == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (pRmApi->Free(pRmApi, + hObjExportRmClient, + (NvHandle)mapKey(&objExportHandleMap, pHandleRef)) != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Exported object trying to free was zombie in %s\n", + __FUNCTION__); + } + + mapRemove(&objExportHandleMap, pHandleRef); + + return NV_OK; +} + +// +// Generate unique RmObjExportHandle. +// +static RmObjExportHandle RmGenerateObjExportHandle(NvU32 deviceInstance) +{ + // + // The object export handle belongs to range of 0 to + // (MAX_OBJ_EXPORT_HANDLES - 1). + // + // Handle 0 is considered as invalid object handle, this function generates + // handle from range of 1 to (MAX_OBJ_EXPORT_HANDLES - 1). + // + #define MAX_OBJ_EXPORT_HANDLES 0x80000 + + static NvHandle hObjExportHandleNext = 1; + + RmObjExportHandle hStartHandle = hObjExportHandleNext; + RmObjExportHandle hObject = 0; + + do + { + RmObjExportHandleRef *pHandleRef; + + hObject = hObjExportHandleNext++; + /* Reset hObjExportHandleNext to next valid handle */ + if (hObjExportHandleNext == MAX_OBJ_EXPORT_HANDLES) { + hObjExportHandleNext = 1; + } + + pHandleRef = mapFind(&objExportHandleMap, hObject); + + if (hObject != hObjExportRmClient && pHandleRef == NULL) + { + break; + } + else + { + hObject = 0; + } + + } while(hObjExportHandleNext != hStartHandle); + + if (hObject != 0) + { + RmObjExportHandleRef *pHandleRef = + mapInsertNew(&objExportHandleMap, hObject); + + if (pHandleRef != NULL) + { + pHandleRef->deviceInstance = deviceInstance; + } + else + { + hObject = 0; + } + } + + return hObject; +} + +// +// Validate that the given hObject is not one of our internally used handles. +// +// Note that mapFind(&objExportHandleMap, hObject) could still fail; that is the +// caller's responsibility. +// +static NvBool RmValidateHandleAgainstInternalHandles(RmObjExportHandle hObject) +{ + NvU32 i; + + // + // No external RmObjExportHandle could be valid if hObjExportRmClient has + // not been allocated yet, or if it is equal to any of the handles used + // internally by code in this file. + // + if (objExportImportRefCount == 0 || hObjExportRmClient == 0 || + hObject == hObjExportRmClient) + { + return NV_FALSE; + } + + for (i = 0; i < NV_ARRAY_ELEMENTS(objExportDevice); i++) + { + if (objExportDevice[i].hRmDevice != 0 && + (hObject == objExportDevice[i].hRmDevice || + hObject == objExportDevice[i].hRmSubDevice)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +// +// Increment reference count of static objects internally +// used by code in this file. +// +static NV_STATUS RmRefObjExportImport(void) +{ + NV_STATUS rmStatus = NV_OK; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if ((objExportImportRefCount++) != 0) + { + NV_ASSERT(hObjExportRmClient != 0); + NV_ASSERT(pMemAllocator != NULL); + return NV_OK; + } + + rmStatus = pRmApi->AllocWithHandle(pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &hObjExportRmClient); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc root in %s\n", __FUNCTION__); + goto failed; + } + + pMemAllocator = portMemAllocatorCreateNonPaged(); + + if (pMemAllocator == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to alloc memory allocator in %s\n", + __FUNCTION__); + goto failed; + } + + mapInit(&objExportHandleMap, pMemAllocator); + + return NV_OK; + +failed: + + RmUnrefObjExportImport(); + + return rmStatus; +} + +// +// Decrement reference count of static objects internally used by code in this +// file, and free them if reference count reaches to zero. +// +static void RmUnrefObjExportImport(void) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if ((--objExportImportRefCount) != 0) + { + return; + } + + if (pMemAllocator != NULL) + { + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS(objExportDevice); i++) + { + if (objExportDevice[i].hRmDevice != 0) + { + RmUnrefObjExportHandle(objExportDevice[i].hRmSubDevice); + objExportDevice[i].hRmSubDevice = 0; + RmUnrefObjExportHandle(objExportDevice[i].hRmDevice); + objExportDevice[i].hRmDevice = 0; + } + } + + mapDestroy(&objExportHandleMap); + + portMemAllocatorRelease(pMemAllocator); + pMemAllocator = NULL; + } + + if (hObjExportRmClient != 0) + { + NV_STATUS rmStatus = pRmApi->Free(pRmApi, + hObjExportRmClient, + hObjExportRmClient); + + NV_ASSERT(rmStatus == NV_OK); + hObjExportRmClient = 0; + } +} + +NV_STATUS RmExportObject(NvHandle hSrcClient, NvHandle hSrcObject, + RmObjExportHandle *pDstObject, NvU32 *pDeviceInstance) +{ + RmObjExportHandle hDstObject; + NvU32 deviceInstance = NV_MAX_DEVICES; + NvHandle hTmpObject; + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if (pDstObject == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Find the device instance on which the rm object exists. + // + hTmpObject = hSrcObject; + do + { + RsResourceRef *pResourceRef; + + status = serverutilGetResourceRef(hSrcClient, hTmpObject, &pResourceRef); + if (status != NV_OK) + return status; + + Device *pDevice = dynamicCast(pResourceRef->pResource, Device); + if (pDevice != NULL) + { + deviceInstance = pDevice->deviceInst; + break; + } + + hTmpObject = pResourceRef->pParentRef ? pResourceRef->pParentRef->hResource : 0; + } while (hTmpObject != 0); + + if ((hTmpObject == 0) || (deviceInstance >= NV_MAX_DEVICES)) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + status = RmRefObjExportImport(); + + if (status != NV_OK) + { + return status; + } + + if (objExportDevice[deviceInstance].hRmDevice == 0 || + serverutilValidateNewResourceHandle(hObjExportRmClient, + objExportDevice[deviceInstance].hRmDevice)) + { + // + // Device object has not been created or it got destroyed in the + // teardown path of device instance destruction; allocate a fresh device + // object. + // + NV0080_ALLOC_PARAMETERS params; + NV2080_ALLOC_PARAMETERS subdevParams; + + if (objExportDevice[deviceInstance].hRmDevice == 0) + { + NV_ASSERT(objExportDevice[deviceInstance].hRmSubDevice == 0); + + objExportDevice[deviceInstance].hRmDevice = + RmGenerateObjExportHandle(deviceInstance); + objExportDevice[deviceInstance].hRmSubDevice = + RmGenerateObjExportHandle(deviceInstance); + + if (objExportDevice[deviceInstance].hRmDevice == 0 || + objExportDevice[deviceInstance].hRmSubDevice == 0) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handles in %s\n", + __FUNCTION__); + + status = NV_ERR_NO_MEMORY; + goto done; + } + } + + portMemSet(¶ms, 0, sizeof(NV0080_ALLOC_PARAMETERS)); + + params.deviceId = deviceInstance; + + status = pRmApi->AllocWithHandle(pRmApi, + hObjExportRmClient, + hObjExportRmClient, + objExportDevice[deviceInstance].hRmDevice, + NV01_DEVICE_0, + ¶ms); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc device in %s\n", + __FUNCTION__); + goto done; + } + + portMemSet(&subdevParams, 0, sizeof(NV2080_ALLOC_PARAMETERS)); + + subdevParams.subDeviceId = 0; + + status = pRmApi->AllocWithHandle(pRmApi, + hObjExportRmClient, + objExportDevice[deviceInstance].hRmDevice, + objExportDevice[deviceInstance].hRmSubDevice, + NV20_SUBDEVICE_0, + &subdevParams); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc subdevice in %s\n", + __FUNCTION__); + + (void) pRmApi->Free(pRmApi, hObjExportRmClient, + objExportDevice[deviceInstance].hRmDevice); + goto done; + } + } + + hDstObject = RmGenerateObjExportHandle(deviceInstance); + + if (hDstObject == 0) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate object handle in %s\n", + __FUNCTION__); + status = NV_ERR_NO_MEMORY; + goto done; + } + + // If duping under device handle fails, try subdevice handle. + status = pRmApi->DupObject(pRmApi, + hObjExportRmClient, + objExportDevice[deviceInstance].hRmDevice, + &hDstObject, + hSrcClient, + hSrcObject, + 0 /* flags */); + if (status != NV_OK) + { + if (status == NV_ERR_INVALID_OBJECT_PARENT) + { + NV_PRINTF(LEVEL_INFO, + "pRmApi->DupObject(Dev, failed due to invalid parent in %s." + " Now attempting DupObject with Subdev handle.\n", + __FUNCTION__); + + status = pRmApi->DupObject(pRmApi, + hObjExportRmClient, + objExportDevice[deviceInstance].hRmSubDevice, + &hDstObject, + hSrcClient, + hSrcObject, + 0 /* flags */); + if (status != NV_OK) + { + RmUnrefObjExportHandle(hDstObject); + + NV_PRINTF(LEVEL_ERROR, + "pRmApi->DupObject(Subdev, failed with error code 0x%x in %s\n", + status, __FUNCTION__); + goto done; + } + } + else + { + RmUnrefObjExportHandle(hDstObject); + + NV_PRINTF(LEVEL_ERROR, + "pRmApi->DupObject(Dev, failed with error code 0x%x in %s\n", + status, __FUNCTION__); + goto done; + } + } + + if (pDeviceInstance != NULL) + { + *pDeviceInstance = deviceInstance; + } + + *pDstObject = hDstObject; + +done: + if (status != NV_OK) + { + RmUnrefObjExportImport(); + } + + return status; +} + +void RmFreeObjExportHandle(RmObjExportHandle hObject) +{ + if (!RmValidateHandleAgainstInternalHandles(hObject)) + { + NV_PRINTF(LEVEL_ERROR, "Invalid handle to exported object in %s\n", + __FUNCTION__); + return; + } + + RmUnrefObjExportHandle(hObject); + + RmUnrefObjExportImport(); +} + +NV_STATUS RmImportObject(NvHandle hDstClient, NvHandle hDstParent, + NvHandle *phDstObject, RmObjExportHandle hSrcObject, + NvU8 *pObjectType) +{ + NV_STATUS status; + NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + if (!RmValidateHandleAgainstInternalHandles(hSrcObject)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (mapFind(&objExportHandleMap, hSrcObject) == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pObjectType != NULL) + { + params.hObject = hSrcObject; + params.mapFlags = 0; + params.addrSpaceType = \ + NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + + status = pRmApi->Control(pRmApi, hObjExportRmClient, hObjExportRmClient, + NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE, + ¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GET_ADDR_SPACE_TYPE failed with error code 0x%x in %s\n", + status, __FUNCTION__); + return status; + } + + switch (params.addrSpaceType) + { + case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM: + *pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_SYSMEM; + break; + case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM: + *pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_VIDMEM; + break; + case NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC: + *pObjectType = NV0000_CTRL_CMD_OS_UNIX_IMPORT_OBJECT_TYPE_FABRIC; + break; + default: + NV_ASSERT_OK_OR_RETURN(NV_ERR_INVALID_ARGUMENT); + } + } + + status = pRmApi->DupObject(pRmApi, hDstClient, hDstParent, phDstObject, + hObjExportRmClient, hSrcObject, + 0 /* flags */); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "pRmApi->DupObject(pRmApi, failed with error code 0x%x in %s\n", + status, __FUNCTION__); + return status; + } + + return NV_OK; +} + +NV_STATUS RmGetExportObjectInfo(RmObjExportHandle hSrcObject, NvU32 *deviceInstance) +{ + RmObjExportHandleRef *pHandleRef = NULL; + + if (!RmValidateHandleAgainstInternalHandles(hSrcObject)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pHandleRef = mapFind(&objExportHandleMap, hSrcObject); + if (pHandleRef == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + *deviceInstance = pHandleRef->deviceInstance; + return NV_OK; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/subdevice_ctrl_os_unix.c b/src/nvidia/arch/nvalloc/unix/src/subdevice_ctrl_os_unix.c new file mode 100644 index 000000000..6989c7c7f --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/subdevice_ctrl_os_unix.c @@ -0,0 +1,100 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include // NV device driver interface +#include +#include +#include +#include +#include "gpu/gpu.h" +#include "gpu/gpu_resource.h" +#include "gpu/subdevice/subdevice.h" +#include +#include +#include "gpu/mem_mgr/mem_desc.h" +#include "mem_mgr/mem.h" +#include +#include +#include "rmapi/rs_utils.h" +#include "rmapi/client_resource.h" +#include +#include +#include // NV01_EVENT +#include // NV01_MEMORY_SYSTEM +#include // G84_PERFBUFFER +#include +#include +#include +#include + +/*! + * @brief Implements the NV2080_CTRL_CMD_OS_UNIX_VIDMEM_PERSISTENCE_STATUS + * RmControl request. It will check if the GPU video memory will be + * persistent during system suspend/resume cycle. + * + * @param[in] pSubdevice + * @param[in,out] pParams + * + * @return + * NV_OK Success + */ +NV_STATUS +subdeviceCtrlCmdOsUnixVidmemPersistenceStatus_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + + pParams->bVidmemPersistent = !gpuIsVidmemPreservationBrokenBug3172217(pGpu) && + (nv->preserve_vidmem_allocations || + nvp->s0ix_pm_enabled); + + return NV_OK; +} + +/*! + * @brief Implements the NV2080_CTRL_CMD_OS_UNIX_UPDATE_TGP_STATUS + * RmControl request. It sets restore TGP flag which is used + * to restore TGP limits when client is killed. + * + * @param[in] pSubdevice + * @param[in] pParams + * + * @return + * NV_OK Success + */ +NV_STATUS +subdeviceCtrlCmdOsUnixUpdateTgpStatus_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS *pParams +) +{ + pSubdevice->bUpdateTGP = pParams->bUpdateTGP; + + return NV_OK; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/unix_console.c b/src/nvidia/arch/nvalloc/unix/src/unix_console.c new file mode 100644 index 000000000..ad78a4c38 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/unix_console.c @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(Device *pDevice, + NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FB) == NV_OK) + { + // See if the console is on one of the subdevices of this device. + portMemSet(pParams, 0, sizeof(*pParams)); + + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE) + + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (memmgrGetReservedConsoleMemDesc(pGpu, pMemoryManager) != NULL) + { + NvU64 baseAddr; + + // There should only be one. + NV_ASSERT(pParams->width == 0); + + pParams->subDeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + // Console is either mapped to BAR1 or BAR2 + 16 MB + os_get_screen_info(&baseAddr, &pParams->width, + &pParams->height, &pParams->depth, + &pParams->pitch, + nv->bars[NV_GPU_BAR_INDEX_FB].cpu_address, + nv->bars[NV_GPU_BAR_INDEX_IMEM].cpu_address + 0x1000000); + } + + SLI_LOOP_END + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + else + { + NV_PRINTF(LEVEL_INFO,"%s: Failed to acquire GPU lock", __FUNCTION__); + } + + return NV_OK; +} diff --git a/src/nvidia/arch/nvalloc/unix/src/unix_intr.c b/src/nvidia/arch/nvalloc/unix/src/unix_intr.c new file mode 100644 index 000000000..69da2d394 --- /dev/null +++ b/src/nvidia/arch/nvalloc/unix/src/unix_intr.c @@ -0,0 +1,570 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "kernel/gpu/intr/intr.h" +#include +#include "gpu/disp/kern_disp.h" +#include "objtmr.h" + +static NvBool osInterruptPending( + OBJGPU *pGpu, + NvBool *serviced, + THREAD_STATE_NODE *pThreadState +) +{ + POBJDISP pDisp; + KernelDisplay *pKernelDisplay; + NvBool pending, sema_release; + THREAD_STATE_NODE threadState; + NvU32 gpuMask, gpuInstance; + Intr *pIntr = NULL; + MC_ENGINE_BITVECTOR intr0Pending; + MC_ENGINE_BITVECTOR intr1Pending; + + *serviced = NV_FALSE; + pending = NV_FALSE; + sema_release = NV_TRUE; + OBJGPU *pDeviceLockGpu = pGpu; + NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; // ISR allocations come from this buffer + PORT_MEM_ALLOCATOR *pIsrAllocator; + + // + // GPU interrupt servicing ("top half") + // + // Top-level processing of GPU interrupts is performed using the + // steps below; although the code is straight forward, there + // are a few points to be aware of: + // + // 1) The GPUs lock is acquired for two reasons: to allow + // looping over GPUs atomically in SLI and to sanity + // check the PCI configuration space of any initialized + // GPUs. If the acquisition fails, the early return + // is acceptable since GPU interrupts are disabled while + // the lock is held; note that returning success + // in this case could interfere with the processing + // of third-party device interrupts if the IRQ is shared. + // Due to the above, some interrupts may be reported as + // unhandled if invocations of the ISR registered with + // the kernel are not serialized. This is bad, but + // ignored by currently supported kernels, provided most + // interrupts are handled. + // + // 2) Since acquisition of the lock disables interrupts + // on all initialized GPUs, NV_PMC_INTR_EN_0 can not be + // relied up on to determine whether interrupts are + // expected from a given GPU. The code below is therefore + // forced to rely on software state. NV_PMC_INTR_EN_0 + // is read only as a sanity check to guard against + // invalid GPU state (lack of PCI memory access, etc.). + // + // 3) High priority interrupts (VBLANK, etc.), are serviced in + // this function, service of all other interrupts is + // deferred until a bottom half. If a bottom half needs + // to be scheduled, release of the GPUs lock is + // likewise deferred until completion of the bottom half. + // + // 4) To reduce the risk of starvation, an effort is made to + // consolidate processing of interrupts pending on + // all GPUs sharing a given IRQ. + // + // 5) Care is taken to ensure that the consolidated interrupt + // processing is performed in the context of a GPU + // that has interrupts pending. Else if additional ISR + // processing via a bottom-half is required, this + // bottom-half ISR might race against the GPU's shut-down + // path. + // + + pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator)); + tlsIsrInit(pIsrAllocator); + + // For SWRL granular locking process the countdown timer interrupt. + if (pDeviceLockGpu->getProperty(pDeviceLockGpu, PDB_PROP_GPU_SWRL_GRANULAR_LOCKING)) + { + threadStateInitISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS); + + gpuMask = gpumgrGetGpuMask(pDeviceLockGpu); + + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + pIntr = GPU_GET_INTR(pGpu); + + if (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr)) + { + // If interrupt enable is garbage the GPU is probably in a bad state + if (intrGetIntrEnFromHw_HAL(pGpu, pIntr, &threadState) > INTERRUPT_TYPE_MAX) + { + continue; + } + + intrGetPendingStall_HAL(pGpu, pIntr, &intr0Pending, &threadState); + POBJTMR pTmr = GPU_GET_TIMER(pGpu); + *serviced = tmrServiceSwrlWrapper(pGpu, pTmr, &intr0Pending, &threadState); + } + } + + threadStateFreeISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS); + } + + // LOCK: try to acquire GPUs lock + if (rmDeviceGpuLocksAcquire(pDeviceLockGpu, GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_ISR) == NV_OK) + { + threadStateInitISRAndDeferredIntHandler(&threadState, + pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR); + + gpuMask = gpumgrGetGpuMask(pDeviceLockGpu); + + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + pIntr = GPU_GET_INTR(pGpu); + pDisp = GPU_GET_DISP(pGpu); + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + if ((pDisp != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + } + else if ((pIntr != NULL) && INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr)) + { + // If interrupt enable is garbage the GPU is probably in a bad state + if (intrGetIntrEnFromHw_HAL(pGpu, pIntr, &threadState) > INTERRUPT_TYPE_MAX) + continue; + + intrGetPendingStall_HAL(pGpu, pIntr, &intr0Pending, &threadState); + if (bitVectorTest(&intr0Pending, MC_ENGINE_IDX_DISP)) + { + if (pKernelDisplay != NULL) + { + kdispServiceVblank_HAL(pGpu, pKernelDisplay, 0, + (VBLANK_STATE_PROCESS_LOW_LATENCY | + VBLANK_STATE_PROCESS_CALLED_FROM_ISR), + &threadState); + *serviced = NV_TRUE; + intrGetPendingStall_HAL(pGpu, pIntr, &intr0Pending, &threadState); + } + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS)) + { + pIntr = GPU_GET_INTR(pGpu); + if (pIntr != NULL) + { + NvBool bCtxswLog = NV_FALSE; + intrGetPendingNonStall_HAL(pGpu, pIntr, &intr1Pending, &threadState); + intrCheckFecsEventbufferPending(pGpu, pIntr, &intr1Pending, &bCtxswLog); + } + } + + if (!bitVectorTestAllCleared(&intr0Pending) || + !bitVectorTestAllCleared(&intr1Pending)) + { + pending = NV_TRUE; + sema_release = NV_FALSE; + } + } + } + threadStateFreeISRAndDeferredIntHandler(&threadState, + pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR); + + if (sema_release) + { + NV_ASSERT(!pending); + + // UNLOCK: release GPUs lock + rmDeviceGpuLocksRelease(pDeviceLockGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + else + { + rmDeviceGpuLockSetOwner(pDeviceLockGpu, GPUS_LOCK_OWNER_PENDING_DPC_REFRESH); + } + } + + if (pDeviceLockGpu->getProperty(pDeviceLockGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED) && + pDeviceLockGpu->getProperty(pDeviceLockGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS)) + { + threadStateInitISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS); + + gpuMask = gpumgrGetGpuMask(pDeviceLockGpu); + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + pIntr = GPU_GET_INTR(pGpu); + if ((pIntr != NULL) && (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr))) + { + NvBool bCtxswLog = NV_FALSE; + intrGetPendingNonStall_HAL(pGpu, pIntr, &intr1Pending, &threadState); + intrCheckFecsEventbufferPending(pGpu, pIntr, &intr1Pending, &bCtxswLog); + if (!bitVectorTestAllCleared(&intr1Pending)) + { + intrServiceNonStall_HAL(pGpu, pIntr, &intr1Pending, &threadState); + *serviced = NV_TRUE; + } + } + } + threadStateFreeISRLockless(&threadState, pDeviceLockGpu, THREAD_STATE_FLAGS_IS_ISR_LOCKLESS); + } + + tlsIsrDestroy(pIsrAllocator); + portMemAllocatorRelease(pIsrAllocator); + + return pending; +} + +NV_STATUS osIsr( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + nv_state_t *nv = NV_GET_NV_STATE(pGpu); + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + NvBool pending = NV_FALSE; + NvBool serviced = NV_FALSE; + Intr *pIntr; + + if (nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + pending = osInterruptPending(pGpu, &serviced, NULL /* threadstate */); + } + else + { + pIntr = GPU_GET_INTR(pGpu); + if (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr)) + { + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + pending = osInterruptPending(pGpu, &serviced, NULL /* threadstate */); + kbifCheckAndRearmMSI(pGpu, pKernelBif); + } + } + } + + if (!pending && (IS_VIRTUAL(pGpu) || !serviced)) + status = NV_ERR_NO_INTR_PENDING; + else if (pending) + status = NV_WARN_MORE_PROCESSING_REQUIRED; + + return status; +} + +/* + * Helper function to determine when the RM SEMA/GPUS LOCK should toggle + * interrupts. Based on the state of the GPU - we must add cases here as we + * discover them. + * + * Noteworthy special cases: + * + * - Suspend/resume: the GPU could still be suspended and not accessible + * on the bus, while passive-level threads need to grab the GPUs + * lock, or other GPUs are being resumed and triggering interrupts. + * + * - SLI state transitions: interrupts are disabled manually prior to + * removing GPUs from the lock mask leading up to SLI link/unlink + * operations on UNIX, but since the GPUs lock is not held by design in + * these paths, it needs to be ensured that GPUs lock acquisitions + * occurring aynchronously do not re-enable interrupts on any of the + * GPUs undergoing the SLI state transition. + * + * @param[in] pGpu OBJGPU pointer + * + * @return NV_TRUE if the RM SEMA/GPUS LOCK should toggle interrupts, NV_FALSE + * otherwise. + */ +NvBool osLockShouldToggleInterrupts(OBJGPU *pGpu) +{ + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + return NV_TRUE; + + return (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH) && + gpuIsStateLoaded(pGpu) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SLI_LINK_CODEPATH)); +} + +void osEnableInterrupts(OBJGPU *pGpu) +{ + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // enable irq through os call + nv_control_soc_irqs(NV_GET_NV_STATE(pGpu), NV_TRUE); + return; + } + else + { + Intr *pIntr = GPU_GET_INTR(pGpu); + NvU32 intrEn; + + if (!pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING)) + NV_ASSERT(intrGetIntrEnFromHw_HAL(pGpu, pIntr, NULL) == INTERRUPT_TYPE_DISABLED); + + intrEn = intrGetIntrEn(pIntr); + intrSetIntrEnInHw_HAL(pGpu, pIntr, intrEn, NULL); + + if (pIntr != NULL) + { + intrSetStall_HAL(pGpu, pIntr, intrEn, NULL); + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED)) + { + if (pIntr != NULL) + { + intrRestoreNonStall_HAL(pGpu, pIntr, intrGetIntrEn(pIntr), NULL); + } + } + } +} + +void osDisableInterrupts( + OBJGPU *pGpu, + NvBool bIsr +) +{ + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // disable irq through os call + nv_control_soc_irqs(NV_GET_NV_STATE(pGpu), NV_FALSE); + return; + } + else + { + Intr *pIntr = GPU_GET_INTR(pGpu); + NvU32 new_intr_en_0 = INTERRUPT_TYPE_DISABLED; + + intrSetIntrEnInHw_HAL(pGpu, pIntr, new_intr_en_0, NULL); + + if (pIntr != NULL) + { + intrSetStall_HAL(pGpu, pIntr, new_intr_en_0, NULL); + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED)) + { + if (pIntr != NULL) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS)) + { + intrRestoreNonStall_HAL(pGpu, pIntr, intrGetIntrEn(pIntr), NULL); + } + else + { + intrRestoreNonStall_HAL(pGpu, pIntr, new_intr_en_0, NULL); + } + } + } + } +} + +static void RmIsrBottomHalf( + nv_state_t *pNv +) +{ + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + THREAD_STATE_NODE threadState; + OS_THREAD_HANDLE threadId; + NvU32 gpuMask, gpuInstance; + OBJGPU *pDeviceLockGpu = pGpu; + Intr *pIntr = NULL; + POBJDISP pDisp = NULL; + NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; // ISR allocations come from this buffer + PORT_MEM_ALLOCATOR *pIsrAllocator; + + pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator)); + tlsIsrInit(pIsrAllocator); + + // + // The owning thread changes as the ISR acquires the GPUs lock, + // but the bottom half releases it. Refresh the ThreadId owner to be + // correct here for the bottom half context. + // + osGetCurrentThread(&threadId); + rmDeviceGpuLockSetOwner(pDeviceLockGpu, threadId); + + gpuMask = gpumgrGetGpuMask(pGpu); + + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + + threadStateInitISRAndDeferredIntHandler(&threadState, + pGpu, THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER); + + pIntr = GPU_GET_INTR(pGpu); + pDisp = GPU_GET_DISP(pGpu); + + // + // Call disp service incase of SOC Display, + // TODO : with multi interrupt handling based on irq aux interrupts are serviced by dpAuxService + // See JIRA task TDS-4253. + // + if ((pDisp != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + } + else if ((pIntr != NULL) && (INTERRUPT_TYPE_HARDWARE == intrGetIntrEn(pIntr))) + { + intrServiceStall_HAL(pGpu, pIntr); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS)) + { + MC_ENGINE_BITVECTOR intrPending; + intrServiceNonStall_HAL(pGpu, pIntr, &intrPending, &threadState); + } + } + + threadStateFreeISRAndDeferredIntHandler(&threadState, + pGpu, THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER); + } + + // UNLOCK: release GPUs lock + rmDeviceGpuLocksRelease(pDeviceLockGpu, GPUS_LOCK_FLAGS_NONE, NULL); + + tlsIsrDestroy(pIsrAllocator); + portMemAllocatorRelease(pIsrAllocator); +} + +static void RmIsrBottomHalfUnlocked( + nv_state_t *pNv +) +{ + OBJGPU *pGpu = NV_GET_NV_PRIV_PGPU(pNv); + Intr *pIntr; + THREAD_STATE_NODE threadState; + + // In the GSP client scenario, the fatal fault interrupt is not shared + // by UVM and CPU-RM. Instead, it is handled entirely by GSP-RM. We + // therefore do not expect this function to be called. But if it is, bail + // without attempting to service interrupts. + if (IS_GSP_CLIENT(pGpu)) + { + return; + } + + // Grab GPU lock here as this kthread-item was enqueued without grabbing GPU lock + if (rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DPC) == NV_OK) + { + if (FULL_GPU_SANITY_CHECK(pGpu)) + { + pIntr = GPU_GET_INTR(pGpu); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (intrGetIntrEn(pIntr) != INTERRUPT_TYPE_DISABLED) + { + MC_ENGINE_BITVECTOR intrPending; + intrGetPendingStall_HAL(pGpu, pIntr, &intrPending, &threadState); + intrServiceNonStallBottomHalf(pGpu, pIntr, &intrPending, &threadState); + } + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + } + + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } +} + +NvBool NV_API_CALL rm_isr( + nvidia_stack_t *sp, + nv_state_t *nv, + NvU32 *NeedBottomHalf +) +{ + NV_STATUS status; + nv_priv_t *nvp = NV_GET_NV_PRIV(nv); + OBJGPU *pGpu; + NvBool retval; + void *fp; + + if ((nvp->flags & NV_INIT_FLAG_GPU_STATE_LOAD) == 0) + { + return NV_FALSE; + } + + pGpu = NV_GET_NV_PRIV_PGPU(nv); + if (pGpu == NULL) + { + return NV_FALSE; + } + + NV_ENTER_RM_RUNTIME(sp,fp); + + // call actual isr function here + status = isrWrapper(pGpu->testIntr, pGpu); + + switch (status) + { + case NV_OK: + *NeedBottomHalf = NV_FALSE; + retval = NV_TRUE; + break; + case NV_WARN_MORE_PROCESSING_REQUIRED: + *NeedBottomHalf = NV_TRUE; + retval = NV_TRUE; + break; + case NV_ERR_NO_INTR_PENDING: + default: + *NeedBottomHalf = NV_FALSE; + retval = NV_FALSE; + break; + } + + NV_EXIT_RM_RUNTIME(sp,fp); + + return retval; +} + +void NV_API_CALL rm_isr_bh( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + RmIsrBottomHalf(pNv); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + +void NV_API_CALL rm_isr_bh_unlocked( + nvidia_stack_t *sp, + nv_state_t *pNv +) +{ + void *fp; + + NV_ENTER_RM_RUNTIME(sp,fp); + + RmIsrBottomHalfUnlocked(pNv); + + NV_EXIT_RM_RUNTIME(sp,fp); +} + diff --git a/src/nvidia/exports_link_command.txt b/src/nvidia/exports_link_command.txt new file mode 100644 index 000000000..ac7087523 --- /dev/null +++ b/src/nvidia/exports_link_command.txt @@ -0,0 +1,190 @@ +--undefined=rm_disable_adapter +--undefined=rm_execute_work_item +--undefined=rm_free_os_event +--undefined=rm_free_private_state +--undefined=rm_cleanup_file_private +--undefined=rm_unbind_lock +--undefined=rm_get_device_name +--undefined=rm_get_vbios_version +--undefined=rm_get_gpu_uuid +--undefined=rm_get_gpu_uuid_raw +--undefined=rm_set_rm_firmware_requested +--undefined=rm_get_firmware_version +--undefined=rm_i2c_remove_adapters +--undefined=rm_i2c_is_smbus_capable +--undefined=rm_i2c_transfer +--undefined=rm_init_adapter +--undefined=rm_init_private_state +--undefined=rm_init_rm +--undefined=rm_ioctl +--undefined=rm_is_supported_device +--undefined=rm_is_supported_pci_device +--undefined=rm_isr +--undefined=rm_isr_bh +--undefined=rm_isr_bh_unlocked +--undefined=rm_perform_version_check +--undefined=rm_power_management +--undefined=rm_stop_user_channels +--undefined=rm_restart_user_channels +--undefined=rm_read_registry_dword +--undefined=rm_run_rc_callback +--undefined=rm_run_nano_timer_callback +--undefined=rm_save_low_res_mode +--undefined=rm_shutdown_adapter +--undefined=rm_exclude_adapter +--undefined=rm_acquire_api_lock +--undefined=rm_release_api_lock +--undefined=rm_acquire_gpu_lock +--undefined=rm_release_gpu_lock +--undefined=rm_acquire_all_gpus_lock +--undefined=rm_release_all_gpus_lock +--undefined=rm_shutdown_rm +--undefined=rm_system_event +--undefined=rm_write_registry_binary +--undefined=rm_write_registry_dword +--undefined=rm_write_registry_string +--undefined=rm_parse_option_string +--undefined=rm_remove_spaces +--undefined=rm_string_token +--undefined=rm_disable_gpu_state_persistence +--undefined=pNVRM_ID +--undefined=rm_p2p_get_pages +--undefined=rm_p2p_get_pages_persistent +--undefined=rm_p2p_get_gpu_info +--undefined=rm_p2p_register_callback +--undefined=rm_p2p_put_pages +--undefined=rm_p2p_put_pages_persistent +--undefined=rm_p2p_dma_map_pages +--undefined=rm_dma_buf_dup_mem_handle +--undefined=rm_dma_buf_undup_mem_handle +--undefined=rm_dma_buf_map_mem_handle +--undefined=rm_dma_buf_unmap_mem_handle +--undefined=rm_dma_buf_get_client_and_device +--undefined=rm_dma_buf_put_client_and_device +--undefined=rm_gpu_ops_address_space_destroy +--undefined=rm_gpu_ops_memory_cpu_ummap +--undefined=rm_gpu_ops_channel_destroy +--undefined=rm_gpu_ops_memory_free +--undefined=rm_gpu_ops_create_session +--undefined=rm_gpu_ops_memory_alloc_sys +--undefined=rm_gpu_ops_address_space_create +--undefined=rm_gpu_ops_channel_allocate +--undefined=rm_gpu_ops_memory_alloc_fb +--undefined=rm_gpu_ops_memory_cpu_map +--undefined=rm_gpu_ops_destroy_session +--undefined=rm_gpu_ops_query_caps +--undefined=rm_gpu_ops_query_ces_caps +--undefined=rm_gpu_ops_get_gpu_info +--undefined=rm_gpu_ops_service_device_interrupts_rm +--undefined=rm_gpu_ops_dup_allocation +--undefined=rm_gpu_ops_free_duped_handle +--undefined=rm_gpu_ops_get_fb_info +--undefined=rm_gpu_ops_get_ecc_info +--undefined=rm_gpu_ops_init_fault_info +--undefined=rm_gpu_ops_destroy_fault_info +--undefined=rm_gpu_ops_has_pending_non_replayable_faults +--undefined=rm_gpu_ops_get_non_replayable_faults +--undefined=rm_gpu_ops_init_access_cntr_info +--undefined=rm_gpu_ops_destroy_access_cntr_info +--undefined=rm_gpu_ops_enable_access_cntr +--undefined=rm_gpu_ops_disable_access_cntr +--undefined=rm_gpu_ops_get_pma_object +--undefined=rm_gpu_ops_pma_alloc_pages +--undefined=rm_gpu_ops_pma_free_pages +--undefined=rm_gpu_ops_pma_pin_pages +--undefined=rm_gpu_ops_pma_unpin_pages +--undefined=rm_gpu_ops_pma_register_callbacks +--undefined=rm_gpu_ops_pma_unregister_callbacks +--undefined=rm_gpu_ops_dup_address_space +--undefined=rm_gpu_ops_own_page_fault_intr +--undefined=rm_gpu_ops_dup_memory +--undefined=rm_gpu_ops_get_p2p_caps +--undefined=rm_gpu_ops_set_page_directory +--undefined=rm_gpu_ops_unset_page_directory +--undefined=rm_gpu_ops_stop_channel +--undefined=rm_gpu_ops_p2p_object_create +--undefined=rm_gpu_ops_p2p_object_destroy +--undefined=rm_gpu_ops_get_external_alloc_ptes +--undefined=rm_gpu_ops_retain_channel +--undefined=rm_gpu_ops_bind_channel_resources +--undefined=rm_gpu_ops_release_channel +--undefined=rm_gpu_ops_get_channel_resource_ptes +--undefined=rm_gpu_ops_report_non_replayable_fault +--undefined=rm_gpu_ops_paging_channel_allocate +--undefined=rm_gpu_ops_paging_channel_destroy +--undefined=rm_gpu_ops_paging_channels_map +--undefined=rm_gpu_ops_paging_channels_unmap +--undefined=rm_gpu_ops_paging_channel_push_stream +--undefined=rm_gpu_ops_device_create +--undefined=rm_gpu_ops_device_destroy +--undefined=rm_log_gpu_crash +--undefined=rm_kernel_rmapi_op +--undefined=nv_get_hypervisor_type +--undefined=nvlink_lib_initialize +--undefined=nvlink_lib_unload +--undefined=nvlink_lib_ioctl_ctrl +--undefined=nvswitch_lib_register_device +--undefined=nvswitch_lib_unregister_device +--undefined=nvswitch_lib_initialize_device +--undefined=nvswitch_lib_post_init_device +--undefined=nvswitch_lib_post_init_blacklist_device +--undefined=nvswitch_lib_shutdown_device +--undefined=nvswitch_lib_enable_interrupts +--undefined=nvswitch_lib_disable_interrupts +--undefined=nvswitch_lib_check_interrupts +--undefined=nvswitch_lib_service_interrupts +--undefined=nvswitch_lib_ctrl +--undefined=nvswitch_lib_get_log_count +--undefined=nvswitch_lib_get_device_info +--undefined=nvswitch_lib_deferred_task_dispatcher +--undefined=nvswitch_lib_check_api_version +--undefined=nvswitch_lib_load_platform_info +--undefined=nvswitch_lib_validate_device_id +--undefined=nvswitch_lib_get_uuid +--undefined=nvswitch_lib_get_physid +--undefined=nvswitch_lib_read_fabric_state +--undefined=nvswitch_lib_get_client_event +--undefined=nvswitch_lib_add_client_event +--undefined=nvswitch_lib_remove_client_events +--undefined=nvswitch_lib_notify_client_events +--undefined=nvswitch_lib_get_bios_version +--undefined=nvswitch_lib_use_pin_irq +--undefined=nvswitch_lib_get_valid_ports_mask +--undefined=nvswitch_lib_is_i2c_supported +--undefined=nvswitch_lib_i2c_transfer +--undefined=rm_gpu_copy_mmu_faults +--undefined=rm_gpu_copy_mmu_faults_unlocked +--undefined=rm_gpu_need_4k_page_isolation +--undefined=rm_is_chipset_io_coherent +--undefined=rm_get_device_remove_flag +--undefined=rm_init_event_locks +--undefined=rm_destroy_event_locks +--undefined=rm_get_gpu_numa_info +--undefined=rm_gpu_numa_online +--undefined=rm_gpu_numa_offline +--undefined=rm_is_device_sequestered +--undefined=nv_vgpu_create_request +--undefined=nv_vgpu_delete +--undefined=nv_vgpu_get_bar_info +--undefined=nv_vgpu_start +--undefined=nv_vgpu_get_type_ids +--undefined=nv_vgpu_get_type_info +--undefined=nv_vgpu_get_sparse_mmap +--undefined=nv_vgpu_update_request +--undefined=nv_vgpu_process_vf_info +--undefined=nv_gpu_bind_event +--undefined=rm_check_for_gpu_surprise_removal +--undefined=rm_set_external_kernel_client_count +--undefined=rm_schedule_gpu_wakeup +--undefined=rm_init_dynamic_power_management +--undefined=rm_cleanup_dynamic_power_management +--undefined=rm_ref_dynamic_power +--undefined=rm_unref_dynamic_power +--undefined=rm_transition_dynamic_power +--undefined=rm_get_vidmem_power_status +--undefined=rm_acpi_notify +--undefined=rm_get_dynamic_power_management_status +--undefined=rm_get_gpu_gcx_support +--undefined=rm_is_iommu_needed_for_sriov +--undefined=rm_disable_iomap_wc +--undefined=rm_get_clientnvpcf_power_limits diff --git a/src/nvidia/generated/g_access_cntr_buffer_nvoc.c b/src/nvidia/generated/g_access_cntr_buffer_nvoc.c new file mode 100644 index 000000000..1adec79d5 --- /dev/null +++ b/src/nvidia/generated/g_access_cntr_buffer_nvoc.c @@ -0,0 +1,592 @@ +#define NVOC_ACCESS_CNTR_BUFFER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_access_cntr_buffer_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x1f0074 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_AccessCounterBuffer; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_AccessCounterBuffer(AccessCounterBuffer*); +void __nvoc_init_funcTable_AccessCounterBuffer(AccessCounterBuffer*); +NV_STATUS __nvoc_ctor_AccessCounterBuffer(AccessCounterBuffer*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_AccessCounterBuffer(AccessCounterBuffer*); +void __nvoc_dtor_AccessCounterBuffer(AccessCounterBuffer*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_AccessCounterBuffer; + +static const struct NVOC_RTTI __nvoc_rtti_AccessCounterBuffer_AccessCounterBuffer = { + /*pClassDef=*/ &__nvoc_class_def_AccessCounterBuffer, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_AccessCounterBuffer, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_AccessCounterBuffer_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(AccessCounterBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_AccessCounterBuffer_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(AccessCounterBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_AccessCounterBuffer_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(AccessCounterBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_AccessCounterBuffer_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(AccessCounterBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_AccessCounterBuffer_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(AccessCounterBuffer, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_AccessCounterBuffer_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(AccessCounterBuffer, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_AccessCounterBuffer_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(AccessCounterBuffer, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_AccessCounterBuffer = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_AccessCounterBuffer_AccessCounterBuffer, + &__nvoc_rtti_AccessCounterBuffer_Notifier, + &__nvoc_rtti_AccessCounterBuffer_INotifier, + &__nvoc_rtti_AccessCounterBuffer_GpuResource, + &__nvoc_rtti_AccessCounterBuffer_RmResource, + &__nvoc_rtti_AccessCounterBuffer_RmResourceCommon, + &__nvoc_rtti_AccessCounterBuffer_RsResource, + &__nvoc_rtti_AccessCounterBuffer_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_AccessCounterBuffer = +{ + /*classInfo=*/ { + /*size=*/ sizeof(AccessCounterBuffer), + /*classId=*/ classId(AccessCounterBuffer), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "AccessCounterBuffer", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_AccessCounterBuffer, + /*pCastInfo=*/ &__nvoc_castinfo_AccessCounterBuffer, + /*pExportInfo=*/ &__nvoc_export_info_AccessCounterBuffer +}; + +static NV_STATUS __nvoc_thunk_AccessCounterBuffer_gpuresMap(struct GpuResource *pAccessCounterBuffer, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return accesscntrMap((struct AccessCounterBuffer *)(((unsigned char *)pAccessCounterBuffer) - __nvoc_rtti_AccessCounterBuffer_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_AccessCounterBuffer_gpuresUnmap(struct GpuResource *pAccessCounterBuffer, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return accesscntrUnmap((struct AccessCounterBuffer *)(((unsigned char *)pAccessCounterBuffer) - __nvoc_rtti_AccessCounterBuffer_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_AccessCounterBuffer_gpuresGetMapAddrSpace(struct GpuResource *pAccessCounterBuffer, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return accesscntrGetMapAddrSpace((struct AccessCounterBuffer *)(((unsigned char *)pAccessCounterBuffer) - __nvoc_rtti_AccessCounterBuffer_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_GpuResource_accesscntrShareCallback(struct AccessCounterBuffer *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_AccessCounterBuffer_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_accesscntrMapTo(struct AccessCounterBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_accesscntrGetOrAllocNotifShare(struct AccessCounterBuffer *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_AccessCounterBuffer_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_accesscntrCheckMemInterUnmap(struct AccessCounterBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_AccessCounterBuffer_RmResource.offset), bSubdeviceHandleProvided); +} + +static void __nvoc_thunk_Notifier_accesscntrSetNotificationShare(struct AccessCounterBuffer *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_AccessCounterBuffer_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_accesscntrGetRefCount(struct AccessCounterBuffer *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_accesscntrAddAdditionalDependants(struct RsClient *pClient, struct AccessCounterBuffer *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_accesscntrControl_Prologue(struct AccessCounterBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_accesscntrGetRegBaseOffsetAndSize(struct AccessCounterBuffer *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_AccessCounterBuffer_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_accesscntrInternalControlForward(struct AccessCounterBuffer *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_AccessCounterBuffer_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_accesscntrUnmapFrom(struct AccessCounterBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_accesscntrControl_Epilogue(struct AccessCounterBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_accesscntrControlLookup(struct AccessCounterBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_accesscntrGetInternalObjectHandle(struct AccessCounterBuffer *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_AccessCounterBuffer_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_accesscntrControl(struct AccessCounterBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_AccessCounterBuffer_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_accesscntrGetMemInterMapParams(struct AccessCounterBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_AccessCounterBuffer_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_accesscntrGetMemoryMappingDescriptor(struct AccessCounterBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_AccessCounterBuffer_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_accesscntrControlFilter(struct AccessCounterBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_accesscntrUnregisterEvent(struct AccessCounterBuffer *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_AccessCounterBuffer_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_accesscntrCanCopy(struct AccessCounterBuffer *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_accesscntrPreDestruct(struct AccessCounterBuffer *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_accesscntrGetNotificationListPtr(struct AccessCounterBuffer *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_AccessCounterBuffer_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_accesscntrGetNotificationShare(struct AccessCounterBuffer *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_AccessCounterBuffer_Notifier.offset)); +} + +static NvBool __nvoc_thunk_RmResource_accesscntrAccessCallback(struct AccessCounterBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_AccessCounterBuffer_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_AccessCounterBuffer[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) accesscntrCtrlCmdAccessCntrBufferReadGet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3650101u, + /*paramSize=*/ sizeof(NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_GET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_AccessCounterBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "accesscntrCtrlCmdAccessCntrBufferReadGet" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) accesscntrCtrlCmdAccessCntrBufferWriteGet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3650102u, + /*paramSize=*/ sizeof(NVC365_CTRL_ACCESS_CNTR_BUFFER_WRITE_GET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_AccessCounterBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "accesscntrCtrlCmdAccessCntrBufferWriteGet" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) accesscntrCtrlCmdAccessCntrBufferReadPut_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3650103u, + /*paramSize=*/ sizeof(NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_PUT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_AccessCounterBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "accesscntrCtrlCmdAccessCntrBufferReadPut" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) accesscntrCtrlCmdAccessCntrBufferEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3650104u, + /*paramSize=*/ sizeof(NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_AccessCounterBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "accesscntrCtrlCmdAccessCntrBufferEnable" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) accesscntrCtrlCmdAccessCntrBufferGetSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3650105u, + /*paramSize=*/ sizeof(NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_AccessCounterBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "accesscntrCtrlCmdAccessCntrBufferGetSize" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3650106u, + /*paramSize=*/ sizeof(NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_AccessCounterBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) accesscntrCtrlCmdAccessCntrBufferGetFullInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3650107u, + /*paramSize=*/ sizeof(NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_FULL_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_AccessCounterBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "accesscntrCtrlCmdAccessCntrBufferGetFullInfo" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) accesscntrCtrlCmdAccessCntrBufferResetCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3650108u, + /*paramSize=*/ sizeof(NVC365_CTRL_ACCESS_CNTR_BUFFER_RESET_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_AccessCounterBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "accesscntrCtrlCmdAccessCntrBufferResetCounters" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) accesscntrCtrlCmdAccessCntrSetConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3650109u, + /*paramSize=*/ sizeof(NVC365_CTRL_ACCESS_CNTR_SET_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_AccessCounterBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "accesscntrCtrlCmdAccessCntrSetConfig" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) accesscntrCtrlCmdAccessCntrBufferEnableIntr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc365010bu, + /*paramSize=*/ sizeof(NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_INTR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_AccessCounterBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "accesscntrCtrlCmdAccessCntrBufferEnableIntr" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_AccessCounterBuffer = +{ + /*numEntries=*/ 10, + /*pExportEntries=*/ __nvoc_exported_method_def_AccessCounterBuffer +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_AccessCounterBuffer(AccessCounterBuffer *pThis) { + __nvoc_accesscntrDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_AccessCounterBuffer(AccessCounterBuffer *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_AccessCounterBuffer(AccessCounterBuffer *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_AccessCounterBuffer_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_AccessCounterBuffer_fail_Notifier; + __nvoc_init_dataField_AccessCounterBuffer(pThis); + + status = __nvoc_accesscntrConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_AccessCounterBuffer_fail__init; + goto __nvoc_ctor_AccessCounterBuffer_exit; // Success + +__nvoc_ctor_AccessCounterBuffer_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_AccessCounterBuffer_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_AccessCounterBuffer_fail_GpuResource: +__nvoc_ctor_AccessCounterBuffer_exit: + + return status; +} + +static void __nvoc_init_funcTable_AccessCounterBuffer_1(AccessCounterBuffer *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__accesscntrMap__ = &accesscntrMap_IMPL; + + pThis->__accesscntrUnmap__ = &accesscntrUnmap_IMPL; + + pThis->__accesscntrGetMapAddrSpace__ = &accesscntrGetMapAddrSpace_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__accesscntrCtrlCmdAccessCntrBufferReadGet__ = &accesscntrCtrlCmdAccessCntrBufferReadGet_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__accesscntrCtrlCmdAccessCntrBufferReadPut__ = &accesscntrCtrlCmdAccessCntrBufferReadPut_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__accesscntrCtrlCmdAccessCntrBufferWriteGet__ = &accesscntrCtrlCmdAccessCntrBufferWriteGet_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__accesscntrCtrlCmdAccessCntrBufferEnable__ = &accesscntrCtrlCmdAccessCntrBufferEnable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__accesscntrCtrlCmdAccessCntrBufferGetSize__ = &accesscntrCtrlCmdAccessCntrBufferGetSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings__ = &accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__accesscntrCtrlCmdAccessCntrBufferGetFullInfo__ = &accesscntrCtrlCmdAccessCntrBufferGetFullInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__accesscntrCtrlCmdAccessCntrBufferResetCounters__ = &accesscntrCtrlCmdAccessCntrBufferResetCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__accesscntrCtrlCmdAccessCntrSetConfig__ = &accesscntrCtrlCmdAccessCntrSetConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__accesscntrCtrlCmdAccessCntrBufferEnableIntr__ = &accesscntrCtrlCmdAccessCntrBufferEnableIntr_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_AccessCounterBuffer_gpuresMap; + + pThis->__nvoc_base_GpuResource.__gpuresUnmap__ = &__nvoc_thunk_AccessCounterBuffer_gpuresUnmap; + + pThis->__nvoc_base_GpuResource.__gpuresGetMapAddrSpace__ = &__nvoc_thunk_AccessCounterBuffer_gpuresGetMapAddrSpace; + + pThis->__accesscntrShareCallback__ = &__nvoc_thunk_GpuResource_accesscntrShareCallback; + + pThis->__accesscntrMapTo__ = &__nvoc_thunk_RsResource_accesscntrMapTo; + + pThis->__accesscntrGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_accesscntrGetOrAllocNotifShare; + + pThis->__accesscntrCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_accesscntrCheckMemInterUnmap; + + pThis->__accesscntrSetNotificationShare__ = &__nvoc_thunk_Notifier_accesscntrSetNotificationShare; + + pThis->__accesscntrGetRefCount__ = &__nvoc_thunk_RsResource_accesscntrGetRefCount; + + pThis->__accesscntrAddAdditionalDependants__ = &__nvoc_thunk_RsResource_accesscntrAddAdditionalDependants; + + pThis->__accesscntrControl_Prologue__ = &__nvoc_thunk_RmResource_accesscntrControl_Prologue; + + pThis->__accesscntrGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_accesscntrGetRegBaseOffsetAndSize; + + pThis->__accesscntrInternalControlForward__ = &__nvoc_thunk_GpuResource_accesscntrInternalControlForward; + + pThis->__accesscntrUnmapFrom__ = &__nvoc_thunk_RsResource_accesscntrUnmapFrom; + + pThis->__accesscntrControl_Epilogue__ = &__nvoc_thunk_RmResource_accesscntrControl_Epilogue; + + pThis->__accesscntrControlLookup__ = &__nvoc_thunk_RsResource_accesscntrControlLookup; + + pThis->__accesscntrGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_accesscntrGetInternalObjectHandle; + + pThis->__accesscntrControl__ = &__nvoc_thunk_GpuResource_accesscntrControl; + + pThis->__accesscntrGetMemInterMapParams__ = &__nvoc_thunk_RmResource_accesscntrGetMemInterMapParams; + + pThis->__accesscntrGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_accesscntrGetMemoryMappingDescriptor; + + pThis->__accesscntrControlFilter__ = &__nvoc_thunk_RsResource_accesscntrControlFilter; + + pThis->__accesscntrUnregisterEvent__ = &__nvoc_thunk_Notifier_accesscntrUnregisterEvent; + + pThis->__accesscntrCanCopy__ = &__nvoc_thunk_RsResource_accesscntrCanCopy; + + pThis->__accesscntrPreDestruct__ = &__nvoc_thunk_RsResource_accesscntrPreDestruct; + + pThis->__accesscntrGetNotificationListPtr__ = &__nvoc_thunk_Notifier_accesscntrGetNotificationListPtr; + + pThis->__accesscntrGetNotificationShare__ = &__nvoc_thunk_Notifier_accesscntrGetNotificationShare; + + pThis->__accesscntrAccessCallback__ = &__nvoc_thunk_RmResource_accesscntrAccessCallback; +} + +void __nvoc_init_funcTable_AccessCounterBuffer(AccessCounterBuffer *pThis) { + __nvoc_init_funcTable_AccessCounterBuffer_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_AccessCounterBuffer(AccessCounterBuffer *pThis) { + pThis->__nvoc_pbase_AccessCounterBuffer = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_AccessCounterBuffer(pThis); +} + +NV_STATUS __nvoc_objCreate_AccessCounterBuffer(AccessCounterBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + AccessCounterBuffer *pThis; + + pThis = portMemAllocNonPaged(sizeof(AccessCounterBuffer)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(AccessCounterBuffer)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_AccessCounterBuffer); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_AccessCounterBuffer(pThis); + status = __nvoc_ctor_AccessCounterBuffer(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_AccessCounterBuffer_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_AccessCounterBuffer_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_AccessCounterBuffer(AccessCounterBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_AccessCounterBuffer(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_access_cntr_buffer_nvoc.h b/src/nvidia/generated/g_access_cntr_buffer_nvoc.h new file mode 100644 index 000000000..f2f3eed6f --- /dev/null +++ b/src/nvidia/generated/g_access_cntr_buffer_nvoc.h @@ -0,0 +1,352 @@ +#ifndef _G_ACCESS_CNTR_BUFFER_NVOC_H_ +#define _G_ACCESS_CNTR_BUFFER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_access_cntr_buffer_nvoc.h" + +#ifndef ACCESS_CNTR_BUFFER_H +#define ACCESS_CNTR_BUFFER_H + +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" + +#include "ctrl/ctrlc365.h" + +/*! + * RM internal class representing ACCESS_COUNTER_NOTIFY_BUFFER + */ +#ifdef NVOC_ACCESS_CNTR_BUFFER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct AccessCounterBuffer { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct AccessCounterBuffer *__nvoc_pbase_AccessCounterBuffer; + NV_STATUS (*__accesscntrMap__)(struct AccessCounterBuffer *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__accesscntrUnmap__)(struct AccessCounterBuffer *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__accesscntrGetMapAddrSpace__)(struct AccessCounterBuffer *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__accesscntrCtrlCmdAccessCntrBufferReadGet__)(struct AccessCounterBuffer *, NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_GET_PARAMS *); + NV_STATUS (*__accesscntrCtrlCmdAccessCntrBufferReadPut__)(struct AccessCounterBuffer *, NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_PUT_PARAMS *); + NV_STATUS (*__accesscntrCtrlCmdAccessCntrBufferWriteGet__)(struct AccessCounterBuffer *, NVC365_CTRL_ACCESS_CNTR_BUFFER_WRITE_GET_PARAMS *); + NV_STATUS (*__accesscntrCtrlCmdAccessCntrBufferEnable__)(struct AccessCounterBuffer *, NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS *); + NV_STATUS (*__accesscntrCtrlCmdAccessCntrBufferGetSize__)(struct AccessCounterBuffer *, NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_SIZE_PARAMS *); + NV_STATUS (*__accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings__)(struct AccessCounterBuffer *, NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS_PARAMS *); + NV_STATUS (*__accesscntrCtrlCmdAccessCntrBufferGetFullInfo__)(struct AccessCounterBuffer *, NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_FULL_INFO_PARAMS *); + NV_STATUS (*__accesscntrCtrlCmdAccessCntrBufferResetCounters__)(struct AccessCounterBuffer *, NVC365_CTRL_ACCESS_CNTR_BUFFER_RESET_COUNTERS_PARAMS *); + NV_STATUS (*__accesscntrCtrlCmdAccessCntrSetConfig__)(struct AccessCounterBuffer *, NVC365_CTRL_ACCESS_CNTR_SET_CONFIG_PARAMS *); + NV_STATUS (*__accesscntrCtrlCmdAccessCntrBufferEnableIntr__)(struct AccessCounterBuffer *, NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_INTR_PARAMS *); + NvBool (*__accesscntrShareCallback__)(struct AccessCounterBuffer *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__accesscntrMapTo__)(struct AccessCounterBuffer *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__accesscntrGetOrAllocNotifShare__)(struct AccessCounterBuffer *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__accesscntrCheckMemInterUnmap__)(struct AccessCounterBuffer *, NvBool); + void (*__accesscntrSetNotificationShare__)(struct AccessCounterBuffer *, struct NotifShare *); + NvU32 (*__accesscntrGetRefCount__)(struct AccessCounterBuffer *); + void (*__accesscntrAddAdditionalDependants__)(struct RsClient *, struct AccessCounterBuffer *, RsResourceRef *); + NV_STATUS (*__accesscntrControl_Prologue__)(struct AccessCounterBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__accesscntrGetRegBaseOffsetAndSize__)(struct AccessCounterBuffer *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__accesscntrInternalControlForward__)(struct AccessCounterBuffer *, NvU32, void *, NvU32); + NV_STATUS (*__accesscntrUnmapFrom__)(struct AccessCounterBuffer *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__accesscntrControl_Epilogue__)(struct AccessCounterBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__accesscntrControlLookup__)(struct AccessCounterBuffer *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__accesscntrGetInternalObjectHandle__)(struct AccessCounterBuffer *); + NV_STATUS (*__accesscntrControl__)(struct AccessCounterBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__accesscntrGetMemInterMapParams__)(struct AccessCounterBuffer *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__accesscntrGetMemoryMappingDescriptor__)(struct AccessCounterBuffer *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__accesscntrControlFilter__)(struct AccessCounterBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__accesscntrUnregisterEvent__)(struct AccessCounterBuffer *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__accesscntrCanCopy__)(struct AccessCounterBuffer *); + void (*__accesscntrPreDestruct__)(struct AccessCounterBuffer *); + PEVENTNOTIFICATION *(*__accesscntrGetNotificationListPtr__)(struct AccessCounterBuffer *); + struct NotifShare *(*__accesscntrGetNotificationShare__)(struct AccessCounterBuffer *); + NvBool (*__accesscntrAccessCallback__)(struct AccessCounterBuffer *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_AccessCounterBuffer_TYPEDEF__ +#define __NVOC_CLASS_AccessCounterBuffer_TYPEDEF__ +typedef struct AccessCounterBuffer AccessCounterBuffer; +#endif /* __NVOC_CLASS_AccessCounterBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_AccessCounterBuffer +#define __nvoc_class_id_AccessCounterBuffer 0x1f0074 +#endif /* __nvoc_class_id_AccessCounterBuffer */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_AccessCounterBuffer; + +#define __staticCast_AccessCounterBuffer(pThis) \ + ((pThis)->__nvoc_pbase_AccessCounterBuffer) + +#ifdef __nvoc_access_cntr_buffer_h_disabled +#define __dynamicCast_AccessCounterBuffer(pThis) ((AccessCounterBuffer*)NULL) +#else //__nvoc_access_cntr_buffer_h_disabled +#define __dynamicCast_AccessCounterBuffer(pThis) \ + ((AccessCounterBuffer*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(AccessCounterBuffer))) +#endif //__nvoc_access_cntr_buffer_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_AccessCounterBuffer(AccessCounterBuffer**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_AccessCounterBuffer(AccessCounterBuffer**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_AccessCounterBuffer(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_AccessCounterBuffer((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define accesscntrMap(pAccessCounterBuffer, pCallContext, pParams, pCpuMapping) accesscntrMap_DISPATCH(pAccessCounterBuffer, pCallContext, pParams, pCpuMapping) +#define accesscntrUnmap(pAccessCounterBuffer, pCallContext, pCpuMapping) accesscntrUnmap_DISPATCH(pAccessCounterBuffer, pCallContext, pCpuMapping) +#define accesscntrGetMapAddrSpace(pAccessCounterBuffer, pCallContext, mapFlags, pAddrSpace) accesscntrGetMapAddrSpace_DISPATCH(pAccessCounterBuffer, pCallContext, mapFlags, pAddrSpace) +#define accesscntrCtrlCmdAccessCntrBufferReadGet(pAccessCounterBuffer, pGetParams) accesscntrCtrlCmdAccessCntrBufferReadGet_DISPATCH(pAccessCounterBuffer, pGetParams) +#define accesscntrCtrlCmdAccessCntrBufferReadPut(pAccessCounterBuffer, pParams) accesscntrCtrlCmdAccessCntrBufferReadPut_DISPATCH(pAccessCounterBuffer, pParams) +#define accesscntrCtrlCmdAccessCntrBufferWriteGet(pAccessCounterBuffer, pGetParams) accesscntrCtrlCmdAccessCntrBufferWriteGet_DISPATCH(pAccessCounterBuffer, pGetParams) +#define accesscntrCtrlCmdAccessCntrBufferEnable(pAccessCounterBuffer, pGetParams) accesscntrCtrlCmdAccessCntrBufferEnable_DISPATCH(pAccessCounterBuffer, pGetParams) +#define accesscntrCtrlCmdAccessCntrBufferGetSize(pAccessCounterBuffer, pGetParams) accesscntrCtrlCmdAccessCntrBufferGetSize_DISPATCH(pAccessCounterBuffer, pGetParams) +#define accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings(pAccessCounterBuffer, pParams) accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings_DISPATCH(pAccessCounterBuffer, pParams) +#define accesscntrCtrlCmdAccessCntrBufferGetFullInfo(pAccessCounterBuffer, pParams) accesscntrCtrlCmdAccessCntrBufferGetFullInfo_DISPATCH(pAccessCounterBuffer, pParams) +#define accesscntrCtrlCmdAccessCntrBufferResetCounters(pAccessCounterBuffer, pParams) accesscntrCtrlCmdAccessCntrBufferResetCounters_DISPATCH(pAccessCounterBuffer, pParams) +#define accesscntrCtrlCmdAccessCntrSetConfig(pAccessCounterBuffer, pParams) accesscntrCtrlCmdAccessCntrSetConfig_DISPATCH(pAccessCounterBuffer, pParams) +#define accesscntrCtrlCmdAccessCntrBufferEnableIntr(pAccessCounterBuffer, pGetParams) accesscntrCtrlCmdAccessCntrBufferEnableIntr_DISPATCH(pAccessCounterBuffer, pGetParams) +#define accesscntrShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) accesscntrShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define accesscntrMapTo(pResource, pParams) accesscntrMapTo_DISPATCH(pResource, pParams) +#define accesscntrGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) accesscntrGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define accesscntrCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) accesscntrCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define accesscntrSetNotificationShare(pNotifier, pNotifShare) accesscntrSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define accesscntrGetRefCount(pResource) accesscntrGetRefCount_DISPATCH(pResource) +#define accesscntrAddAdditionalDependants(pClient, pResource, pReference) accesscntrAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define accesscntrControl_Prologue(pResource, pCallContext, pParams) accesscntrControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define accesscntrGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) accesscntrGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define accesscntrInternalControlForward(pGpuResource, command, pParams, size) accesscntrInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define accesscntrUnmapFrom(pResource, pParams) accesscntrUnmapFrom_DISPATCH(pResource, pParams) +#define accesscntrControl_Epilogue(pResource, pCallContext, pParams) accesscntrControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define accesscntrControlLookup(pResource, pParams, ppEntry) accesscntrControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define accesscntrGetInternalObjectHandle(pGpuResource) accesscntrGetInternalObjectHandle_DISPATCH(pGpuResource) +#define accesscntrControl(pGpuResource, pCallContext, pParams) accesscntrControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define accesscntrGetMemInterMapParams(pRmResource, pParams) accesscntrGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define accesscntrGetMemoryMappingDescriptor(pRmResource, ppMemDesc) accesscntrGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define accesscntrControlFilter(pResource, pCallContext, pParams) accesscntrControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define accesscntrUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) accesscntrUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define accesscntrCanCopy(pResource) accesscntrCanCopy_DISPATCH(pResource) +#define accesscntrPreDestruct(pResource) accesscntrPreDestruct_DISPATCH(pResource) +#define accesscntrGetNotificationListPtr(pNotifier) accesscntrGetNotificationListPtr_DISPATCH(pNotifier) +#define accesscntrGetNotificationShare(pNotifier) accesscntrGetNotificationShare_DISPATCH(pNotifier) +#define accesscntrAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) accesscntrAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS accesscntrMap_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); + +static inline NV_STATUS accesscntrMap_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pAccessCounterBuffer->__accesscntrMap__(pAccessCounterBuffer, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS accesscntrUnmap_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); + +static inline NV_STATUS accesscntrUnmap_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pAccessCounterBuffer->__accesscntrUnmap__(pAccessCounterBuffer, pCallContext, pCpuMapping); +} + +NV_STATUS accesscntrGetMapAddrSpace_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS accesscntrGetMapAddrSpace_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pAccessCounterBuffer->__accesscntrGetMapAddrSpace__(pAccessCounterBuffer, pCallContext, mapFlags, pAddrSpace); +} + +NV_STATUS accesscntrCtrlCmdAccessCntrBufferReadGet_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_GET_PARAMS *pGetParams); + +static inline NV_STATUS accesscntrCtrlCmdAccessCntrBufferReadGet_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_GET_PARAMS *pGetParams) { + return pAccessCounterBuffer->__accesscntrCtrlCmdAccessCntrBufferReadGet__(pAccessCounterBuffer, pGetParams); +} + +NV_STATUS accesscntrCtrlCmdAccessCntrBufferReadPut_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_PUT_PARAMS *pParams); + +static inline NV_STATUS accesscntrCtrlCmdAccessCntrBufferReadPut_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_PUT_PARAMS *pParams) { + return pAccessCounterBuffer->__accesscntrCtrlCmdAccessCntrBufferReadPut__(pAccessCounterBuffer, pParams); +} + +NV_STATUS accesscntrCtrlCmdAccessCntrBufferWriteGet_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_WRITE_GET_PARAMS *pGetParams); + +static inline NV_STATUS accesscntrCtrlCmdAccessCntrBufferWriteGet_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_WRITE_GET_PARAMS *pGetParams) { + return pAccessCounterBuffer->__accesscntrCtrlCmdAccessCntrBufferWriteGet__(pAccessCounterBuffer, pGetParams); +} + +NV_STATUS accesscntrCtrlCmdAccessCntrBufferEnable_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS *pGetParams); + +static inline NV_STATUS accesscntrCtrlCmdAccessCntrBufferEnable_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS *pGetParams) { + return pAccessCounterBuffer->__accesscntrCtrlCmdAccessCntrBufferEnable__(pAccessCounterBuffer, pGetParams); +} + +NV_STATUS accesscntrCtrlCmdAccessCntrBufferGetSize_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_SIZE_PARAMS *pGetParams); + +static inline NV_STATUS accesscntrCtrlCmdAccessCntrBufferGetSize_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_SIZE_PARAMS *pGetParams) { + return pAccessCounterBuffer->__accesscntrCtrlCmdAccessCntrBufferGetSize__(pAccessCounterBuffer, pGetParams); +} + +NV_STATUS accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS_PARAMS *pParams); + +static inline NV_STATUS accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS_PARAMS *pParams) { + return pAccessCounterBuffer->__accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings__(pAccessCounterBuffer, pParams); +} + +NV_STATUS accesscntrCtrlCmdAccessCntrBufferGetFullInfo_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_FULL_INFO_PARAMS *pParams); + +static inline NV_STATUS accesscntrCtrlCmdAccessCntrBufferGetFullInfo_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_FULL_INFO_PARAMS *pParams) { + return pAccessCounterBuffer->__accesscntrCtrlCmdAccessCntrBufferGetFullInfo__(pAccessCounterBuffer, pParams); +} + +NV_STATUS accesscntrCtrlCmdAccessCntrBufferResetCounters_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_RESET_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS accesscntrCtrlCmdAccessCntrBufferResetCounters_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_RESET_COUNTERS_PARAMS *pParams) { + return pAccessCounterBuffer->__accesscntrCtrlCmdAccessCntrBufferResetCounters__(pAccessCounterBuffer, pParams); +} + +NV_STATUS accesscntrCtrlCmdAccessCntrSetConfig_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_SET_CONFIG_PARAMS *pParams); + +static inline NV_STATUS accesscntrCtrlCmdAccessCntrSetConfig_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_SET_CONFIG_PARAMS *pParams) { + return pAccessCounterBuffer->__accesscntrCtrlCmdAccessCntrSetConfig__(pAccessCounterBuffer, pParams); +} + +NV_STATUS accesscntrCtrlCmdAccessCntrBufferEnableIntr_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_INTR_PARAMS *pGetParams); + +static inline NV_STATUS accesscntrCtrlCmdAccessCntrBufferEnableIntr_DISPATCH(struct AccessCounterBuffer *pAccessCounterBuffer, NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_INTR_PARAMS *pGetParams) { + return pAccessCounterBuffer->__accesscntrCtrlCmdAccessCntrBufferEnableIntr__(pAccessCounterBuffer, pGetParams); +} + +static inline NvBool accesscntrShareCallback_DISPATCH(struct AccessCounterBuffer *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__accesscntrShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS accesscntrMapTo_DISPATCH(struct AccessCounterBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__accesscntrMapTo__(pResource, pParams); +} + +static inline NV_STATUS accesscntrGetOrAllocNotifShare_DISPATCH(struct AccessCounterBuffer *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__accesscntrGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS accesscntrCheckMemInterUnmap_DISPATCH(struct AccessCounterBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__accesscntrCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline void accesscntrSetNotificationShare_DISPATCH(struct AccessCounterBuffer *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__accesscntrSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 accesscntrGetRefCount_DISPATCH(struct AccessCounterBuffer *pResource) { + return pResource->__accesscntrGetRefCount__(pResource); +} + +static inline void accesscntrAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct AccessCounterBuffer *pResource, RsResourceRef *pReference) { + pResource->__accesscntrAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS accesscntrControl_Prologue_DISPATCH(struct AccessCounterBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__accesscntrControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS accesscntrGetRegBaseOffsetAndSize_DISPATCH(struct AccessCounterBuffer *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__accesscntrGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS accesscntrInternalControlForward_DISPATCH(struct AccessCounterBuffer *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__accesscntrInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS accesscntrUnmapFrom_DISPATCH(struct AccessCounterBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__accesscntrUnmapFrom__(pResource, pParams); +} + +static inline void accesscntrControl_Epilogue_DISPATCH(struct AccessCounterBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__accesscntrControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS accesscntrControlLookup_DISPATCH(struct AccessCounterBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__accesscntrControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle accesscntrGetInternalObjectHandle_DISPATCH(struct AccessCounterBuffer *pGpuResource) { + return pGpuResource->__accesscntrGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS accesscntrControl_DISPATCH(struct AccessCounterBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__accesscntrControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS accesscntrGetMemInterMapParams_DISPATCH(struct AccessCounterBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__accesscntrGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS accesscntrGetMemoryMappingDescriptor_DISPATCH(struct AccessCounterBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__accesscntrGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS accesscntrControlFilter_DISPATCH(struct AccessCounterBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__accesscntrControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS accesscntrUnregisterEvent_DISPATCH(struct AccessCounterBuffer *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__accesscntrUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool accesscntrCanCopy_DISPATCH(struct AccessCounterBuffer *pResource) { + return pResource->__accesscntrCanCopy__(pResource); +} + +static inline void accesscntrPreDestruct_DISPATCH(struct AccessCounterBuffer *pResource) { + pResource->__accesscntrPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *accesscntrGetNotificationListPtr_DISPATCH(struct AccessCounterBuffer *pNotifier) { + return pNotifier->__accesscntrGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *accesscntrGetNotificationShare_DISPATCH(struct AccessCounterBuffer *pNotifier) { + return pNotifier->__accesscntrGetNotificationShare__(pNotifier); +} + +static inline NvBool accesscntrAccessCallback_DISPATCH(struct AccessCounterBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__accesscntrAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS accesscntrConstruct_IMPL(struct AccessCounterBuffer *arg_pAccessCounterBuffer, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_accesscntrConstruct(arg_pAccessCounterBuffer, arg_pCallContext, arg_pParams) accesscntrConstruct_IMPL(arg_pAccessCounterBuffer, arg_pCallContext, arg_pParams) +void accesscntrDestruct_IMPL(struct AccessCounterBuffer *pAccessCounterBuffer); +#define __nvoc_accesscntrDestruct(pAccessCounterBuffer) accesscntrDestruct_IMPL(pAccessCounterBuffer) +#undef PRIVATE_FIELD + + +#endif // ACCESS_CNTR_BUFFER_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_ACCESS_CNTR_BUFFER_NVOC_H_ diff --git a/src/nvidia/generated/g_all_dcl_pb.c b/src/nvidia/generated/g_all_dcl_pb.c new file mode 100644 index 000000000..5726572df --- /dev/null +++ b/src/nvidia/generated/g_all_dcl_pb.c @@ -0,0 +1,168 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#include "nvtypes.h" +#include "prbrt.h" +#include "g_all_dcl_pb.h" + +// 'Engines' field defaults + +// 'Engines' field descriptors +const PRB_FIELD_DESC prb_fields_dcl_engines[] = { + { + 1, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + NVDEBUG_ENG_GPU, + 0, + PRB_MAYBE_FIELD_NAME("eng_gpu") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + NVDEBUG_ENG_MC, + 0, + PRB_MAYBE_FIELD_NAME("eng_mc") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'DclMsg' field defaults + +// 'DclMsg' field descriptors +const PRB_FIELD_DESC prb_fields_dcl_dclmsg[] = { + { + 330, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + JOURNAL_COMMON, + 0, + PRB_MAYBE_FIELD_NAME("common") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 315, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + JOURNAL_ASSERT, + 0, + PRB_MAYBE_FIELD_NAME("journal_assert") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 320, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + JOURNAL_RVAHEADER, + 0, + PRB_MAYBE_FIELD_NAME("journal_rvaheader") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 321, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + JOURNAL_BADREAD, + 0, + PRB_MAYBE_FIELD_NAME("journal_badread") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 327, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + JOURNAL_BUGCHECK, + 0, + PRB_MAYBE_FIELD_NAME("journal_bugcheck") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 329, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + RC_RCCOUNTER, + 0, + PRB_MAYBE_FIELD_NAME("rcCounter") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + DCL_ENGINES, + 0, + PRB_MAYBE_FIELD_NAME("engine") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'ErrorBlock' field defaults + +// 'ErrorBlock' field descriptors +const PRB_FIELD_DESC prb_fields_dcl_errorblock[] = { + { + 1, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + DCL_DCLMSG, + 0, + PRB_MAYBE_FIELD_NAME("data") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// Message descriptors +const PRB_MSG_DESC prb_messages_dcl[] = { + { + 2, + prb_fields_dcl_engines, + PRB_MAYBE_MESSAGE_NAME("Dcl.Engines") + }, + { + 7, + prb_fields_dcl_dclmsg, + PRB_MAYBE_MESSAGE_NAME("Dcl.DclMsg") + }, + { + 1, + prb_fields_dcl_errorblock, + PRB_MAYBE_MESSAGE_NAME("Dcl.ErrorBlock") + }, +}; + +// Service descriptors +const PRB_SERVICE_DESC prb_services_dcl[] = { + { 0 } +}; + diff --git a/src/nvidia/generated/g_all_dcl_pb.h b/src/nvidia/generated/g_all_dcl_pb.h new file mode 100644 index 000000000..8fd5b053a --- /dev/null +++ b/src/nvidia/generated/g_all_dcl_pb.h @@ -0,0 +1,66 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#ifndef G_ALL_DCL_PB_H__ +#define G_ALL_DCL_PB_H__ + +#include "g_engines_pb.h" +#include "g_journal_pb.h" +#include "g_rc_pb.h" +#include "g_regs_pb.h" + +extern const PRB_MSG_DESC prb_messages_dcl[]; + +// Message descriptor pointers +#define DCL_ENGINES (&prb_messages_dcl[0]) +#define DCL_DCLMSG (&prb_messages_dcl[1]) +#define DCL_ERRORBLOCK (&prb_messages_dcl[2]) + +// Message maximum lengths +// Does not include repeated fields, strings and byte arrays. +#define DCL_ENGINES_LEN 130 +#define DCL_DCLMSG_LEN 567 +#define DCL_ERRORBLOCK_LEN 571 + +extern const PRB_FIELD_DESC prb_fields_dcl_engines[]; + +// 'Engines' field descriptor pointers +#define DCL_ENGINES_ENG_GPU (&prb_fields_dcl_engines[0]) +#define DCL_ENGINES_ENG_MC (&prb_fields_dcl_engines[1]) + +// 'Engines' field lengths +#define DCL_ENGINES_ENG_GPU_LEN 59 +#define DCL_ENGINES_ENG_MC_LEN 69 + +extern const PRB_FIELD_DESC prb_fields_dcl_dclmsg[]; + +// 'DclMsg' field descriptor pointers +#define DCL_DCLMSG_COMMON (&prb_fields_dcl_dclmsg[0]) +#define DCL_DCLMSG_JOURNAL_ASSERT (&prb_fields_dcl_dclmsg[1]) +#define DCL_DCLMSG_JOURNAL_RVAHEADER (&prb_fields_dcl_dclmsg[2]) +#define DCL_DCLMSG_JOURNAL_BADREAD (&prb_fields_dcl_dclmsg[3]) +#define DCL_DCLMSG_JOURNAL_BUGCHECK (&prb_fields_dcl_dclmsg[4]) +#define DCL_DCLMSG_RCCOUNTER (&prb_fields_dcl_dclmsg[5]) +#define DCL_DCLMSG_ENGINE (&prb_fields_dcl_dclmsg[6]) + +// 'DclMsg' field lengths +#define DCL_DCLMSG_COMMON_LEN 42 +#define DCL_DCLMSG_JOURNAL_ASSERT_LEN 128 +#define DCL_DCLMSG_JOURNAL_RVAHEADER_LEN 54 +#define DCL_DCLMSG_JOURNAL_BADREAD_LEN 70 +#define DCL_DCLMSG_JOURNAL_BUGCHECK_LEN 69 +#define DCL_DCLMSG_RCCOUNTER_LEN 64 +#define DCL_DCLMSG_ENGINE_LEN 133 + +extern const PRB_FIELD_DESC prb_fields_dcl_errorblock[]; + +// 'ErrorBlock' field descriptor pointers +#define DCL_ERRORBLOCK_DATA (&prb_fields_dcl_errorblock[0]) + +// 'ErrorBlock' field lengths +#define DCL_ERRORBLOCK_DATA_LEN 570 + +extern const PRB_SERVICE_DESC prb_services_dcl[]; + +// Service descriptor pointers + +#endif // G_ALL_DCL_PB_H__ diff --git a/src/nvidia/generated/g_allclasses.h b/src/nvidia/generated/g_allclasses.h new file mode 100644 index 000000000..608e490ec --- /dev/null +++ b/src/nvidia/generated/g_allclasses.h @@ -0,0 +1,676 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * g_allclasses.h + * + * Pull in all class headers or class number declarations. + * The class list is generated by chip-config from Classes.pm + * + * NOTE: this file may be included multiple times + * + */ + + +#if defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER) + +#include // NV01_ROOT +#include // NV01_ROOT_NON_PRIV +#include // NV01_ROOT_CLIENT +#include // FABRIC_MANAGER_SESSION +#include // NV0020_GPU_MANAGEMENT +#include // NV01_DEVICE_0 +#include // NV20_SUBDEVICE_0 +#include // NV2081_BINAPI +#include // NV2082_BINAPI_PRIVILEGED +#include // NV20_SUBDEVICE_DIAG +#include // NV01_CONTEXT_DMA +#include // NV01_MEMORY_SYSTEM +#include // NV01_MEMORY_LOCAL_PRIVILEGED +#include // NV01_MEMORY_LOCAL_USER +#include // NV01_MEMORY_VIRTUAL +#include // NV01_MEMORY_LOCAL_PHYSICAL +#include // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR +#include // NV01_MEMORY_DEVICELESS +#include // NV01_MEMORY_FRAMEBUFFER_CONSOLE +#include // NV01_MEMORY_HW_RESOURCES +#include // NV01_MEMORY_FLA +#include // NV_MEMORY_FABRIC +#include // FABRIC_VASPACE_A +#include // IO_VASPACE_A +#include // NV01_NULL +#include // NV01_EVENT +#include // NV01_EVENT_KERNEL_CALLBACK +#include // NV01_EVENT_OS_EVENT +#include // NV01_EVENT_KERNEL_CALLBACK_EX +#include // NV01_TIMER +#include // KERNEL_GRAPHICS_CONTEXT +#include // NV50_CHANNEL_GPFIFO +#include // GF100_CHANNEL_GPFIFO +#include // KEPLER_CHANNEL_GPFIFO_A +#include // UVM_CHANNEL_RETAINER +#include // KEPLER_CHANNEL_GPFIFO_B +#include // MAXWELL_CHANNEL_GPFIFO_A +#include // PASCAL_CHANNEL_GPFIFO_A +#include // VOLTA_CHANNEL_GPFIFO_A +#include // TURING_CHANNEL_GPFIFO_A +#include // AMPERE_CHANNEL_GPFIFO_A +#include // NV04_SOFTWARE_TEST +#include // VOLTA_USERMODE_A +#include // TURING_USERMODE_A +#include // AMPERE_USERMODE_A +#include // NVC371_DISP_SF_USER +#include // NVC372_DISPLAY_SW +#include // NVC573_DISP_CAPABILITIES +#include // NVC673_DISP_CAPABILITIES +#include // NV04_DISPLAY_COMMON +#include // NV50_DEFERRED_API_CLASS +#include // MPS_COMPUTE +#include // NVC570_DISPLAY +#include // NVC57A_CURSOR_IMM_CHANNEL_PIO +#include // NVC57B_WINDOW_IMM_CHANNEL_DMA +#include // NVC57D_CORE_CHANNEL_DMA +#include // NVC57E_WINDOW_CHANNEL_DMA +#include // NVC670_DISPLAY +#include // NVC671_DISP_SF_USER +#include // NVC67A_CURSOR_IMM_CHANNEL_PIO +#include // NVC67B_WINDOW_IMM_CHANNEL_DMA +#include // NVC67D_CORE_CHANNEL_DMA +#include // NVC67E_WINDOW_CHANNEL_DMA +#include // NV9010_VBLANK_CALLBACK +#include // GF100_PROFILER +#include // MAXWELL_PROFILER +#include // MAXWELL_PROFILER_DEVICE +#include // GF100_SUBDEVICE_MASTER +#include // GF100_ZBC_CLEAR +#include // GF100_DISP_SW +#include // GF100_TIMED_SEMAPHORE_SW +#include // G84_PERFBUFFER +#include // NV50_MEMORY_VIRTUAL +#include // NV50_P2P +#include // NV50_THIRD_PARTY_P2P +#include // FERMI_TWOD_A +#include // FERMI_VASPACE_A +#include // GF100_HDACODEC +#include // NVC4B0_VIDEO_DECODER +#include // NVC6B0_VIDEO_DECODER +#include // NVC7B0_VIDEO_DECODER +#include // NVC4B7_VIDEO_ENCODER +#include // NVB4B7_VIDEO_ENCODER +#include // NVC7B7_VIDEO_ENCODER +#include // NVC4D1_VIDEO_NVJPG +#include // NVC6FA_VIDEO_OFA +#include // NVC7FA_VIDEO_OFA +#include // KEPLER_INLINE_TO_MEMORY_B +#include // FERMI_CONTEXT_SHARE_A +#include // KEPLER_CHANNEL_GROUP_A +#include // PASCAL_DMA_COPY_A +#include // TURING_DMA_COPY_A +#include // AMPERE_DMA_COPY_A +#include // AMPERE_DMA_COPY_B +#include // MAXWELL_DMA_COPY_A +#include // ACCESS_COUNTER_NOTIFY_BUFFER +#include // MMU_FAULT_BUFFER +#include // TURING_A +#include // TURING_COMPUTE_A +#include // AMPERE_A +#include // AMPERE_COMPUTE_A +#include // AMPERE_B +#include // AMPERE_COMPUTE_B +#include // AMPERE_SMC_PARTITION_REF +#include // AMPERE_SMC_EXEC_PARTITION_REF +#include // AMPERE_SMC_CONFIG_SESSION +#include // NV0092_RG_LINE_CALLBACK +#include // AMPERE_SMC_MONITOR_SESSION +#include // NV40_DEBUG_BUFFER +#include // GT200_DEBUGGER +#include // NV40_I2C +#include // NV0060_SYNC_GPU_BOOST +#include // GP100_UVM_SW +#include // NV_EVENT_BUFFER + +#else // defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER) + +#ifndef NV01_ROOT +#define NV01_ROOT (0x00000000) +#endif +#ifndef NV1_ROOT +#define NV1_ROOT (0x00000000) // alias +#endif +#ifndef NV01_NULL_OBJECT +#define NV01_NULL_OBJECT (0x00000000) // alias +#endif +#ifndef NV1_NULL_OBJECT +#define NV1_NULL_OBJECT (0x00000000) // alias +#endif + +#ifndef NV01_ROOT_NON_PRIV +#define NV01_ROOT_NON_PRIV (0x00000001) +#endif +#ifndef NV1_ROOT_NON_PRIV +#define NV1_ROOT_NON_PRIV (0x00000001) // alias +#endif + +#ifndef NV01_ROOT_CLIENT +#define NV01_ROOT_CLIENT (0x00000041) +#endif + +#ifndef FABRIC_MANAGER_SESSION +#define FABRIC_MANAGER_SESSION (0x0000000f) +#endif + +#ifndef NV0020_GPU_MANAGEMENT +#define NV0020_GPU_MANAGEMENT (0x00000020) +#endif + +#ifndef NV01_DEVICE_0 +#define NV01_DEVICE_0 (0x00000080) +#endif + +#ifndef NV20_SUBDEVICE_0 +#define NV20_SUBDEVICE_0 (0x00002080) +#endif + +#ifndef NV2081_BINAPI +#define NV2081_BINAPI (0x00002081) +#endif + +#ifndef NV2082_BINAPI_PRIVILEGED +#define NV2082_BINAPI_PRIVILEGED (0x00002082) +#endif + +#ifndef NV20_SUBDEVICE_DIAG +#define NV20_SUBDEVICE_DIAG (0x0000208f) +#endif + +#ifndef NV01_CONTEXT_DMA +#define NV01_CONTEXT_DMA (0x00000002) +#endif + +#ifndef NV01_MEMORY_SYSTEM +#define NV01_MEMORY_SYSTEM (0x0000003e) +#endif +#ifndef NV1_MEMORY_SYSTEM +#define NV1_MEMORY_SYSTEM (0x0000003e) // alias +#endif + +#ifndef NV01_MEMORY_LOCAL_PRIVILEGED +#define NV01_MEMORY_LOCAL_PRIVILEGED (0x0000003f) +#endif +#ifndef NV1_MEMORY_LOCAL_PRIVILEGED +#define NV1_MEMORY_LOCAL_PRIVILEGED (0x0000003f) // alias +#endif +#ifndef NV01_MEMORY_PRIVILEGED +#define NV01_MEMORY_PRIVILEGED (0x0000003f) // alias +#endif +#ifndef NV1_MEMORY_PRIVILEGED +#define NV1_MEMORY_PRIVILEGED (0x0000003f) // alias +#endif + +#ifndef NV01_MEMORY_LOCAL_USER +#define NV01_MEMORY_LOCAL_USER (0x00000040) +#endif +#ifndef NV1_MEMORY_LOCAL_USER +#define NV1_MEMORY_LOCAL_USER (0x00000040) // alias +#endif +#ifndef NV01_MEMORY_USER +#define NV01_MEMORY_USER (0x00000040) // alias +#endif +#ifndef NV1_MEMORY_USER +#define NV1_MEMORY_USER (0x00000040) // alias +#endif + +#ifndef NV01_MEMORY_VIRTUAL +#define NV01_MEMORY_VIRTUAL (0x00000070) +#endif +#ifndef NV01_MEMORY_SYSTEM_DYNAMIC +#define NV01_MEMORY_SYSTEM_DYNAMIC (0x00000070) // alias +#endif +#ifndef NV1_MEMORY_SYSTEM_DYNAMIC +#define NV1_MEMORY_SYSTEM_DYNAMIC (0x00000070) // alias +#endif + +#ifndef NV01_MEMORY_LOCAL_PHYSICAL +#define NV01_MEMORY_LOCAL_PHYSICAL (0x000000c2) +#endif + +#ifndef NV01_MEMORY_SYSTEM_OS_DESCRIPTOR +#define NV01_MEMORY_SYSTEM_OS_DESCRIPTOR (0x00000071) +#endif + +#ifndef NV01_MEMORY_DEVICELESS +#define NV01_MEMORY_DEVICELESS (0x000090ce) +#endif + +#ifndef NV01_MEMORY_FRAMEBUFFER_CONSOLE +#define NV01_MEMORY_FRAMEBUFFER_CONSOLE (0x00000076) +#endif + +#ifndef NV01_MEMORY_HW_RESOURCES +#define NV01_MEMORY_HW_RESOURCES (0x000000b1) +#endif + +#ifndef NV01_MEMORY_FLA +#define NV01_MEMORY_FLA (0x000000f3) +#endif + +#ifndef NV_MEMORY_FABRIC +#define NV_MEMORY_FABRIC (0x000000f8) +#endif + +#ifndef FABRIC_VASPACE_A +#define FABRIC_VASPACE_A (0x000000fc) +#endif + +#ifndef IO_VASPACE_A +#define IO_VASPACE_A (0x000000f2) +#endif + +#ifndef NV01_NULL +#define NV01_NULL (0x00000030) +#endif +#ifndef NV1_NULL +#define NV1_NULL (0x00000030) // alias +#endif + +#ifndef NV01_EVENT +#define NV01_EVENT (0x00000005) +#endif +#ifndef NV1_EVENT +#define NV1_EVENT (0x00000005) // alias +#endif + +#ifndef NV01_EVENT_KERNEL_CALLBACK +#define NV01_EVENT_KERNEL_CALLBACK (0x00000078) +#endif +#ifndef NV1_EVENT_KERNEL_CALLBACK +#define NV1_EVENT_KERNEL_CALLBACK (0x00000078) // alias +#endif + +#ifndef NV01_EVENT_OS_EVENT +#define NV01_EVENT_OS_EVENT (0x00000079) +#endif +#ifndef NV1_EVENT_OS_EVENT +#define NV1_EVENT_OS_EVENT (0x00000079) // alias +#endif +#ifndef NV01_EVENT_WIN32_EVENT +#define NV01_EVENT_WIN32_EVENT (0x00000079) // alias +#endif +#ifndef NV1_EVENT_WIN32_EVENT +#define NV1_EVENT_WIN32_EVENT (0x00000079) // alias +#endif + +#ifndef NV01_EVENT_KERNEL_CALLBACK_EX +#define NV01_EVENT_KERNEL_CALLBACK_EX (0x0000007e) +#endif +#ifndef NV1_EVENT_KERNEL_CALLBACK_EX +#define NV1_EVENT_KERNEL_CALLBACK_EX (0x0000007e) // alias +#endif + +#ifndef NV01_TIMER +#define NV01_TIMER (0x00000004) +#endif +#ifndef NV1_TIMER +#define NV1_TIMER (0x00000004) // alias +#endif + +#ifndef KERNEL_GRAPHICS_CONTEXT +#define KERNEL_GRAPHICS_CONTEXT (0x00000090) +#endif + +#ifndef NV50_CHANNEL_GPFIFO +#define NV50_CHANNEL_GPFIFO (0x0000506f) +#endif + +#ifndef GF100_CHANNEL_GPFIFO +#define GF100_CHANNEL_GPFIFO (0x0000906f) +#endif + +#ifndef KEPLER_CHANNEL_GPFIFO_A +#define KEPLER_CHANNEL_GPFIFO_A (0x0000a06f) +#endif + +#ifndef UVM_CHANNEL_RETAINER +#define UVM_CHANNEL_RETAINER (0x0000c574) +#endif + +#ifndef KEPLER_CHANNEL_GPFIFO_B +#define KEPLER_CHANNEL_GPFIFO_B (0x0000a16f) +#endif + +#ifndef MAXWELL_CHANNEL_GPFIFO_A +#define MAXWELL_CHANNEL_GPFIFO_A (0x0000b06f) +#endif + +#ifndef PASCAL_CHANNEL_GPFIFO_A +#define PASCAL_CHANNEL_GPFIFO_A (0x0000c06f) +#endif + +#ifndef VOLTA_CHANNEL_GPFIFO_A +#define VOLTA_CHANNEL_GPFIFO_A (0x0000c36f) +#endif + +#ifndef TURING_CHANNEL_GPFIFO_A +#define TURING_CHANNEL_GPFIFO_A (0x0000c46f) +#endif + +#ifndef AMPERE_CHANNEL_GPFIFO_A +#define AMPERE_CHANNEL_GPFIFO_A (0x0000c56f) +#endif + +#ifndef NV04_SOFTWARE_TEST +#define NV04_SOFTWARE_TEST (0x0000007d) +#endif +#ifndef NV4_SOFTWARE_TEST +#define NV4_SOFTWARE_TEST (0x0000007d) // alias +#endif + +#ifndef VOLTA_USERMODE_A +#define VOLTA_USERMODE_A (0x0000c361) +#endif + +#ifndef TURING_USERMODE_A +#define TURING_USERMODE_A (0x0000c461) +#endif + +#ifndef AMPERE_USERMODE_A +#define AMPERE_USERMODE_A (0x0000c561) +#endif + +#ifndef NVC371_DISP_SF_USER +#define NVC371_DISP_SF_USER (0x0000c371) +#endif + +#ifndef NVC372_DISPLAY_SW +#define NVC372_DISPLAY_SW (0x0000c372) +#endif + +#ifndef NVC573_DISP_CAPABILITIES +#define NVC573_DISP_CAPABILITIES (0x0000c573) +#endif + +#ifndef NVC673_DISP_CAPABILITIES +#define NVC673_DISP_CAPABILITIES (0x0000c673) +#endif + +#ifndef NV04_DISPLAY_COMMON +#define NV04_DISPLAY_COMMON (0x00000073) +#endif + +#ifndef NV50_DEFERRED_API_CLASS +#define NV50_DEFERRED_API_CLASS (0x00005080) +#endif + +#ifndef MPS_COMPUTE +#define MPS_COMPUTE (0x0000900e) +#endif + +#ifndef NVC570_DISPLAY +#define NVC570_DISPLAY (0x0000c570) +#endif + +#ifndef NVC57A_CURSOR_IMM_CHANNEL_PIO +#define NVC57A_CURSOR_IMM_CHANNEL_PIO (0x0000c57a) +#endif + +#ifndef NVC57B_WINDOW_IMM_CHANNEL_DMA +#define NVC57B_WINDOW_IMM_CHANNEL_DMA (0x0000c57b) +#endif + +#ifndef NVC57D_CORE_CHANNEL_DMA +#define NVC57D_CORE_CHANNEL_DMA (0x0000c57d) +#endif + +#ifndef NVC57E_WINDOW_CHANNEL_DMA +#define NVC57E_WINDOW_CHANNEL_DMA (0x0000c57e) +#endif + +#ifndef NVC670_DISPLAY +#define NVC670_DISPLAY (0x0000c670) +#endif + +#ifndef NVC671_DISP_SF_USER +#define NVC671_DISP_SF_USER (0x0000c671) +#endif + +#ifndef NVC67A_CURSOR_IMM_CHANNEL_PIO +#define NVC67A_CURSOR_IMM_CHANNEL_PIO (0x0000c67a) +#endif + +#ifndef NVC67B_WINDOW_IMM_CHANNEL_DMA +#define NVC67B_WINDOW_IMM_CHANNEL_DMA (0x0000c67b) +#endif + +#ifndef NVC67D_CORE_CHANNEL_DMA +#define NVC67D_CORE_CHANNEL_DMA (0x0000c67d) +#endif + +#ifndef NVC67E_WINDOW_CHANNEL_DMA +#define NVC67E_WINDOW_CHANNEL_DMA (0x0000c67e) +#endif + +#ifndef NV9010_VBLANK_CALLBACK +#define NV9010_VBLANK_CALLBACK (0x00009010) +#endif + +#ifndef GF100_PROFILER +#define GF100_PROFILER (0x000090cc) +#endif + +#ifndef MAXWELL_PROFILER +#define MAXWELL_PROFILER (0x0000b0cc) +#endif + +#ifndef MAXWELL_PROFILER_DEVICE +#define MAXWELL_PROFILER_DEVICE (0x0000b2cc) +#endif + +#ifndef GF100_SUBDEVICE_MASTER +#define GF100_SUBDEVICE_MASTER (0x000090e6) +#endif + +#ifndef GF100_ZBC_CLEAR +#define GF100_ZBC_CLEAR (0x00009096) +#endif + +#ifndef GF100_DISP_SW +#define GF100_DISP_SW (0x00009072) +#endif + +#ifndef GF100_TIMED_SEMAPHORE_SW +#define GF100_TIMED_SEMAPHORE_SW (0x00009074) +#endif + +#ifndef G84_PERFBUFFER +#define G84_PERFBUFFER (0x0000844c) +#endif + +#ifndef NV50_MEMORY_VIRTUAL +#define NV50_MEMORY_VIRTUAL (0x000050a0) +#endif + +#ifndef NV50_P2P +#define NV50_P2P (0x0000503b) +#endif + +#ifndef NV50_THIRD_PARTY_P2P +#define NV50_THIRD_PARTY_P2P (0x0000503c) +#endif + +#ifndef FERMI_TWOD_A +#define FERMI_TWOD_A (0x0000902d) +#endif + +#ifndef FERMI_VASPACE_A +#define FERMI_VASPACE_A (0x000090f1) +#endif + +#ifndef GF100_HDACODEC +#define GF100_HDACODEC (0x000090ec) +#endif + +#ifndef NVC4B0_VIDEO_DECODER +#define NVC4B0_VIDEO_DECODER (0x0000c4b0) +#endif + +#ifndef NVC6B0_VIDEO_DECODER +#define NVC6B0_VIDEO_DECODER (0x0000c6b0) +#endif + +#ifndef NVC7B0_VIDEO_DECODER +#define NVC7B0_VIDEO_DECODER (0x0000c7b0) +#endif + +#ifndef NVC4B7_VIDEO_ENCODER +#define NVC4B7_VIDEO_ENCODER (0x0000c4b7) +#endif + +#ifndef NVB4B7_VIDEO_ENCODER +#define NVB4B7_VIDEO_ENCODER (0x0000b4b7) +#endif + +#ifndef NVC7B7_VIDEO_ENCODER +#define NVC7B7_VIDEO_ENCODER (0x0000c7b7) +#endif + +#ifndef NVC4D1_VIDEO_NVJPG +#define NVC4D1_VIDEO_NVJPG (0x0000c4d1) +#endif + +#ifndef NVC6FA_VIDEO_OFA +#define NVC6FA_VIDEO_OFA (0x0000c6fa) +#endif + +#ifndef NVC7FA_VIDEO_OFA +#define NVC7FA_VIDEO_OFA (0x0000c7fa) +#endif + +#ifndef KEPLER_INLINE_TO_MEMORY_B +#define KEPLER_INLINE_TO_MEMORY_B (0x0000a140) +#endif + +#ifndef FERMI_CONTEXT_SHARE_A +#define FERMI_CONTEXT_SHARE_A (0x00009067) +#endif + +#ifndef KEPLER_CHANNEL_GROUP_A +#define KEPLER_CHANNEL_GROUP_A (0x0000a06c) +#endif + +#ifndef PASCAL_DMA_COPY_A +#define PASCAL_DMA_COPY_A (0x0000c0b5) +#endif + +#ifndef TURING_DMA_COPY_A +#define TURING_DMA_COPY_A (0x0000c5b5) +#endif + +#ifndef AMPERE_DMA_COPY_A +#define AMPERE_DMA_COPY_A (0x0000c6b5) +#endif + +#ifndef AMPERE_DMA_COPY_B +#define AMPERE_DMA_COPY_B (0x0000c7b5) +#endif + +#ifndef MAXWELL_DMA_COPY_A +#define MAXWELL_DMA_COPY_A (0x0000b0b5) +#endif + +#ifndef ACCESS_COUNTER_NOTIFY_BUFFER +#define ACCESS_COUNTER_NOTIFY_BUFFER (0x0000c365) +#endif + +#ifndef MMU_FAULT_BUFFER +#define MMU_FAULT_BUFFER (0x0000c369) +#endif + +#ifndef TURING_A +#define TURING_A (0x0000c597) +#endif + +#ifndef TURING_COMPUTE_A +#define TURING_COMPUTE_A (0x0000c5c0) +#endif + +#ifndef AMPERE_A +#define AMPERE_A (0x0000c697) +#endif + +#ifndef AMPERE_COMPUTE_A +#define AMPERE_COMPUTE_A (0x0000c6c0) +#endif + +#ifndef AMPERE_B +#define AMPERE_B (0x0000c797) +#endif + +#ifndef AMPERE_COMPUTE_B +#define AMPERE_COMPUTE_B (0x0000c7c0) +#endif + +#ifndef AMPERE_SMC_PARTITION_REF +#define AMPERE_SMC_PARTITION_REF (0x0000c637) +#endif + +#ifndef AMPERE_SMC_EXEC_PARTITION_REF +#define AMPERE_SMC_EXEC_PARTITION_REF (0x0000c638) +#endif + +#ifndef AMPERE_SMC_CONFIG_SESSION +#define AMPERE_SMC_CONFIG_SESSION (0x0000c639) +#endif + +#ifndef NV0092_RG_LINE_CALLBACK +#define NV0092_RG_LINE_CALLBACK (0x00000092) +#endif + +#ifndef AMPERE_SMC_MONITOR_SESSION +#define AMPERE_SMC_MONITOR_SESSION (0x0000c640) +#endif + +#ifndef NV40_DEBUG_BUFFER +#define NV40_DEBUG_BUFFER (0x000000db) +#endif + +#ifndef GT200_DEBUGGER +#define GT200_DEBUGGER (0x000083de) +#endif + +#ifndef NV40_I2C +#define NV40_I2C (0x0000402c) +#endif + +#ifndef NV0060_SYNC_GPU_BOOST +#define NV0060_SYNC_GPU_BOOST (0x00000060) +#endif + +#ifndef GP100_UVM_SW +#define GP100_UVM_SW (0x0000c076) +#endif + +#ifndef NV_EVENT_BUFFER +#define NV_EVENT_BUFFER (0x000090cd) +#endif + +#endif // defined(SDK_ALL_CLASSES_INCLUDE_FULL_HEADER) diff --git a/src/nvidia/generated/g_binary_api_nvoc.c b/src/nvidia/generated/g_binary_api_nvoc.c new file mode 100644 index 000000000..d641c7e08 --- /dev/null +++ b/src/nvidia/generated/g_binary_api_nvoc.c @@ -0,0 +1,659 @@ +#define NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_binary_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb7a47c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_BinaryApi(BinaryApi*); +void __nvoc_init_funcTable_BinaryApi(BinaryApi*); +NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_BinaryApi(BinaryApi*); +void __nvoc_dtor_BinaryApi(BinaryApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApi; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_BinaryApi = { + /*pClassDef=*/ &__nvoc_class_def_BinaryApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_BinaryApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_BinaryApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_BinaryApi_BinaryApi, + &__nvoc_rtti_BinaryApi_GpuResource, + &__nvoc_rtti_BinaryApi_RmResource, + &__nvoc_rtti_BinaryApi_RmResourceCommon, + &__nvoc_rtti_BinaryApi_RsResource, + &__nvoc_rtti_BinaryApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(BinaryApi), + /*classId=*/ classId(BinaryApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "BinaryApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_BinaryApi, + /*pCastInfo=*/ &__nvoc_castinfo_BinaryApi, + /*pExportInfo=*/ &__nvoc_export_info_BinaryApi +}; + +static NV_STATUS __nvoc_thunk_BinaryApi_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return binapiControl((struct BinaryApi *)(((unsigned char *)pResource) - __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_binapiShareCallback(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiUnmap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiGetMemInterMapParams(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiGetMemoryMappingDescriptor(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiGetMapAddrSpace(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_binapiGetInternalObjectHandle(struct BinaryApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiControlFilter(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_binapiAddAdditionalDependants(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_binapiGetRefCount(struct BinaryApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiCheckMemInterUnmap(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiMapTo(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiControl_Prologue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiGetRegBaseOffsetAndSize(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_binapiCanCopy(struct BinaryApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiInternalControlForward(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_binapiPreDestruct(struct BinaryApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiUnmapFrom(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_binapiControl_Epilogue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiControlLookup(struct BinaryApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiMap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_binapiAccessCallback(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_BinaryApi(BinaryApi *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_BinaryApi(BinaryApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApi_fail_GpuResource; + __nvoc_init_dataField_BinaryApi(pThis); + + status = __nvoc_binapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApi_fail__init; + goto __nvoc_ctor_BinaryApi_exit; // Success + +__nvoc_ctor_BinaryApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_BinaryApi_fail_GpuResource: +__nvoc_ctor_BinaryApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_BinaryApi_1(BinaryApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__binapiControl__ = &binapiControl_IMPL; + + pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_BinaryApi_gpuresControl; + + pThis->__binapiShareCallback__ = &__nvoc_thunk_GpuResource_binapiShareCallback; + + pThis->__binapiUnmap__ = &__nvoc_thunk_GpuResource_binapiUnmap; + + pThis->__binapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_binapiGetMemInterMapParams; + + pThis->__binapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_binapiGetMemoryMappingDescriptor; + + pThis->__binapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_binapiGetMapAddrSpace; + + pThis->__binapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_binapiGetInternalObjectHandle; + + pThis->__binapiControlFilter__ = &__nvoc_thunk_RsResource_binapiControlFilter; + + pThis->__binapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_binapiAddAdditionalDependants; + + pThis->__binapiGetRefCount__ = &__nvoc_thunk_RsResource_binapiGetRefCount; + + pThis->__binapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_binapiCheckMemInterUnmap; + + pThis->__binapiMapTo__ = &__nvoc_thunk_RsResource_binapiMapTo; + + pThis->__binapiControl_Prologue__ = &__nvoc_thunk_RmResource_binapiControl_Prologue; + + pThis->__binapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_binapiGetRegBaseOffsetAndSize; + + pThis->__binapiCanCopy__ = &__nvoc_thunk_RsResource_binapiCanCopy; + + pThis->__binapiInternalControlForward__ = &__nvoc_thunk_GpuResource_binapiInternalControlForward; + + pThis->__binapiPreDestruct__ = &__nvoc_thunk_RsResource_binapiPreDestruct; + + pThis->__binapiUnmapFrom__ = &__nvoc_thunk_RsResource_binapiUnmapFrom; + + pThis->__binapiControl_Epilogue__ = &__nvoc_thunk_RmResource_binapiControl_Epilogue; + + pThis->__binapiControlLookup__ = &__nvoc_thunk_RsResource_binapiControlLookup; + + pThis->__binapiMap__ = &__nvoc_thunk_GpuResource_binapiMap; + + pThis->__binapiAccessCallback__ = &__nvoc_thunk_RmResource_binapiAccessCallback; +} + +void __nvoc_init_funcTable_BinaryApi(BinaryApi *pThis) { + __nvoc_init_funcTable_BinaryApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_BinaryApi(BinaryApi *pThis) { + pThis->__nvoc_pbase_BinaryApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_BinaryApi(pThis); +} + +NV_STATUS __nvoc_objCreate_BinaryApi(BinaryApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + BinaryApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(BinaryApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(BinaryApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_BinaryApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_BinaryApi(pThis); + status = __nvoc_ctor_BinaryApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_BinaryApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_BinaryApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_BinaryApi(BinaryApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_BinaryApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x1c0579 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi; + +void __nvoc_init_BinaryApiPrivileged(BinaryApiPrivileged*); +void __nvoc_init_funcTable_BinaryApiPrivileged(BinaryApiPrivileged*); +NV_STATUS __nvoc_ctor_BinaryApiPrivileged(BinaryApiPrivileged*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_BinaryApiPrivileged(BinaryApiPrivileged*); +void __nvoc_dtor_BinaryApiPrivileged(BinaryApiPrivileged*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApiPrivileged; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_BinaryApiPrivileged = { + /*pClassDef=*/ &__nvoc_class_def_BinaryApiPrivileged, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_BinaryApiPrivileged, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_BinaryApi = { + /*pClassDef=*/ &__nvoc_class_def_BinaryApi, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_BinaryApiPrivileged = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_BinaryApiPrivileged_BinaryApiPrivileged, + &__nvoc_rtti_BinaryApiPrivileged_BinaryApi, + &__nvoc_rtti_BinaryApiPrivileged_GpuResource, + &__nvoc_rtti_BinaryApiPrivileged_RmResource, + &__nvoc_rtti_BinaryApiPrivileged_RmResourceCommon, + &__nvoc_rtti_BinaryApiPrivileged_RsResource, + &__nvoc_rtti_BinaryApiPrivileged_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged = +{ + /*classInfo=*/ { + /*size=*/ sizeof(BinaryApiPrivileged), + /*classId=*/ classId(BinaryApiPrivileged), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "BinaryApiPrivileged", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_BinaryApiPrivileged, + /*pCastInfo=*/ &__nvoc_castinfo_BinaryApiPrivileged, + /*pExportInfo=*/ &__nvoc_export_info_BinaryApiPrivileged +}; + +static NV_STATUS __nvoc_thunk_BinaryApiPrivileged_binapiControl(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return binapiprivControl((struct BinaryApiPrivileged *)(((unsigned char *)pResource) - __nvoc_rtti_BinaryApiPrivileged_BinaryApi.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_binapiprivShareCallback(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiprivUnmap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiprivGetMemInterMapParams(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiprivGetMemoryMappingDescriptor(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiprivGetMapAddrSpace(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_binapiprivGetInternalObjectHandle(struct BinaryApiPrivileged *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiprivControlFilter(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_binapiprivAddAdditionalDependants(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_binapiprivGetRefCount(struct BinaryApiPrivileged *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiprivCheckMemInterUnmap(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiprivMapTo(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_binapiprivControl_Prologue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_binapiprivCanCopy(struct BinaryApiPrivileged *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiprivInternalControlForward(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_binapiprivPreDestruct(struct BinaryApiPrivileged *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiprivUnmapFrom(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_binapiprivControl_Epilogue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_binapiprivControlLookup(struct BinaryApiPrivileged *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_binapiprivMap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_binapiprivAccessCallback(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApiPrivileged = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_BinaryApi(BinaryApi*); +void __nvoc_dtor_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + __nvoc_dtor_BinaryApi(&pThis->__nvoc_base_BinaryApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_BinaryApiPrivileged(BinaryApiPrivileged *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_BinaryApi(&pThis->__nvoc_base_BinaryApi, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApiPrivileged_fail_BinaryApi; + __nvoc_init_dataField_BinaryApiPrivileged(pThis); + + status = __nvoc_binapiprivConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_BinaryApiPrivileged_fail__init; + goto __nvoc_ctor_BinaryApiPrivileged_exit; // Success + +__nvoc_ctor_BinaryApiPrivileged_fail__init: + __nvoc_dtor_BinaryApi(&pThis->__nvoc_base_BinaryApi); +__nvoc_ctor_BinaryApiPrivileged_fail_BinaryApi: +__nvoc_ctor_BinaryApiPrivileged_exit: + + return status; +} + +static void __nvoc_init_funcTable_BinaryApiPrivileged_1(BinaryApiPrivileged *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__binapiprivControl__ = &binapiprivControl_IMPL; + + pThis->__nvoc_base_BinaryApi.__binapiControl__ = &__nvoc_thunk_BinaryApiPrivileged_binapiControl; + + pThis->__binapiprivShareCallback__ = &__nvoc_thunk_GpuResource_binapiprivShareCallback; + + pThis->__binapiprivUnmap__ = &__nvoc_thunk_GpuResource_binapiprivUnmap; + + pThis->__binapiprivGetMemInterMapParams__ = &__nvoc_thunk_RmResource_binapiprivGetMemInterMapParams; + + pThis->__binapiprivGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_binapiprivGetMemoryMappingDescriptor; + + pThis->__binapiprivGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_binapiprivGetMapAddrSpace; + + pThis->__binapiprivGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_binapiprivGetInternalObjectHandle; + + pThis->__binapiprivControlFilter__ = &__nvoc_thunk_RsResource_binapiprivControlFilter; + + pThis->__binapiprivAddAdditionalDependants__ = &__nvoc_thunk_RsResource_binapiprivAddAdditionalDependants; + + pThis->__binapiprivGetRefCount__ = &__nvoc_thunk_RsResource_binapiprivGetRefCount; + + pThis->__binapiprivCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_binapiprivCheckMemInterUnmap; + + pThis->__binapiprivMapTo__ = &__nvoc_thunk_RsResource_binapiprivMapTo; + + pThis->__binapiprivControl_Prologue__ = &__nvoc_thunk_RmResource_binapiprivControl_Prologue; + + pThis->__binapiprivGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize; + + pThis->__binapiprivCanCopy__ = &__nvoc_thunk_RsResource_binapiprivCanCopy; + + pThis->__binapiprivInternalControlForward__ = &__nvoc_thunk_GpuResource_binapiprivInternalControlForward; + + pThis->__binapiprivPreDestruct__ = &__nvoc_thunk_RsResource_binapiprivPreDestruct; + + pThis->__binapiprivUnmapFrom__ = &__nvoc_thunk_RsResource_binapiprivUnmapFrom; + + pThis->__binapiprivControl_Epilogue__ = &__nvoc_thunk_RmResource_binapiprivControl_Epilogue; + + pThis->__binapiprivControlLookup__ = &__nvoc_thunk_RsResource_binapiprivControlLookup; + + pThis->__binapiprivMap__ = &__nvoc_thunk_GpuResource_binapiprivMap; + + pThis->__binapiprivAccessCallback__ = &__nvoc_thunk_RmResource_binapiprivAccessCallback; +} + +void __nvoc_init_funcTable_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + __nvoc_init_funcTable_BinaryApiPrivileged_1(pThis); +} + +void __nvoc_init_BinaryApi(BinaryApi*); +void __nvoc_init_BinaryApiPrivileged(BinaryApiPrivileged *pThis) { + pThis->__nvoc_pbase_BinaryApiPrivileged = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_BinaryApi = &pThis->__nvoc_base_BinaryApi; + __nvoc_init_BinaryApi(&pThis->__nvoc_base_BinaryApi); + __nvoc_init_funcTable_BinaryApiPrivileged(pThis); +} + +NV_STATUS __nvoc_objCreate_BinaryApiPrivileged(BinaryApiPrivileged **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + BinaryApiPrivileged *pThis; + + pThis = portMemAllocNonPaged(sizeof(BinaryApiPrivileged)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(BinaryApiPrivileged)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_BinaryApiPrivileged); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_BinaryApiPrivileged(pThis); + status = __nvoc_ctor_BinaryApiPrivileged(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_BinaryApiPrivileged_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_BinaryApiPrivileged_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_BinaryApiPrivileged(BinaryApiPrivileged **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_BinaryApiPrivileged(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_binary_api_nvoc.h b/src/nvidia/generated/g_binary_api_nvoc.h new file mode 100644 index 000000000..c4758ca46 --- /dev/null +++ b/src/nvidia/generated/g_binary_api_nvoc.h @@ -0,0 +1,416 @@ +#ifndef _G_BINARY_API_NVOC_H_ +#define _G_BINARY_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_binary_api_nvoc.h" + +#ifndef BINARY_API_H +#define BINARY_API_H + +#include "core/core.h" +#include "rmapi/resource.h" +#include "gpu/gpu_resource.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" + +#ifdef NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct BinaryApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct BinaryApi *__nvoc_pbase_BinaryApi; + NV_STATUS (*__binapiControl__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__binapiShareCallback__)(struct BinaryApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__binapiUnmap__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__binapiGetMemInterMapParams__)(struct BinaryApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__binapiGetMemoryMappingDescriptor__)(struct BinaryApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__binapiGetMapAddrSpace__)(struct BinaryApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__binapiGetInternalObjectHandle__)(struct BinaryApi *); + NV_STATUS (*__binapiControlFilter__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__binapiAddAdditionalDependants__)(struct RsClient *, struct BinaryApi *, RsResourceRef *); + NvU32 (*__binapiGetRefCount__)(struct BinaryApi *); + NV_STATUS (*__binapiCheckMemInterUnmap__)(struct BinaryApi *, NvBool); + NV_STATUS (*__binapiMapTo__)(struct BinaryApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__binapiControl_Prologue__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__binapiGetRegBaseOffsetAndSize__)(struct BinaryApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__binapiCanCopy__)(struct BinaryApi *); + NV_STATUS (*__binapiInternalControlForward__)(struct BinaryApi *, NvU32, void *, NvU32); + void (*__binapiPreDestruct__)(struct BinaryApi *); + NV_STATUS (*__binapiUnmapFrom__)(struct BinaryApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__binapiControl_Epilogue__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__binapiControlLookup__)(struct BinaryApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__binapiMap__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__binapiAccessCallback__)(struct BinaryApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_BinaryApi_TYPEDEF__ +#define __NVOC_CLASS_BinaryApi_TYPEDEF__ +typedef struct BinaryApi BinaryApi; +#endif /* __NVOC_CLASS_BinaryApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_BinaryApi +#define __nvoc_class_id_BinaryApi 0xb7a47c +#endif /* __nvoc_class_id_BinaryApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi; + +#define __staticCast_BinaryApi(pThis) \ + ((pThis)->__nvoc_pbase_BinaryApi) + +#ifdef __nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApi(pThis) ((BinaryApi*)NULL) +#else //__nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApi(pThis) \ + ((BinaryApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(BinaryApi))) +#endif //__nvoc_binary_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_BinaryApi(BinaryApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_BinaryApi(BinaryApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_BinaryApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_BinaryApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define binapiControl(pResource, pCallContext, pParams) binapiControl_DISPATCH(pResource, pCallContext, pParams) +#define binapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) binapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define binapiUnmap(pGpuResource, pCallContext, pCpuMapping) binapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define binapiGetMemInterMapParams(pRmResource, pParams) binapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define binapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) binapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define binapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) binapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define binapiGetInternalObjectHandle(pGpuResource) binapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define binapiControlFilter(pResource, pCallContext, pParams) binapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define binapiAddAdditionalDependants(pClient, pResource, pReference) binapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define binapiGetRefCount(pResource) binapiGetRefCount_DISPATCH(pResource) +#define binapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) binapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define binapiMapTo(pResource, pParams) binapiMapTo_DISPATCH(pResource, pParams) +#define binapiControl_Prologue(pResource, pCallContext, pParams) binapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define binapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) binapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define binapiCanCopy(pResource) binapiCanCopy_DISPATCH(pResource) +#define binapiInternalControlForward(pGpuResource, command, pParams, size) binapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define binapiPreDestruct(pResource) binapiPreDestruct_DISPATCH(pResource) +#define binapiUnmapFrom(pResource, pParams) binapiUnmapFrom_DISPATCH(pResource, pParams) +#define binapiControl_Epilogue(pResource, pCallContext, pParams) binapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define binapiControlLookup(pResource, pParams, ppEntry) binapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define binapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) binapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define binapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) binapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS binapiControl_IMPL(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS binapiControl_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiControl__(pResource, pCallContext, pParams); +} + +static inline NvBool binapiShareCallback_DISPATCH(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__binapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS binapiUnmap_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__binapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS binapiGetMemInterMapParams_DISPATCH(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__binapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS binapiGetMemoryMappingDescriptor_DISPATCH(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__binapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS binapiGetMapAddrSpace_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__binapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle binapiGetInternalObjectHandle_DISPATCH(struct BinaryApi *pGpuResource) { + return pGpuResource->__binapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS binapiControlFilter_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void binapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference) { + pResource->__binapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 binapiGetRefCount_DISPATCH(struct BinaryApi *pResource) { + return pResource->__binapiGetRefCount__(pResource); +} + +static inline NV_STATUS binapiCheckMemInterUnmap_DISPATCH(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__binapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS binapiMapTo_DISPATCH(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__binapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS binapiControl_Prologue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiGetRegBaseOffsetAndSize_DISPATCH(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__binapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool binapiCanCopy_DISPATCH(struct BinaryApi *pResource) { + return pResource->__binapiCanCopy__(pResource); +} + +static inline NV_STATUS binapiInternalControlForward_DISPATCH(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__binapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void binapiPreDestruct_DISPATCH(struct BinaryApi *pResource) { + pResource->__binapiPreDestruct__(pResource); +} + +static inline NV_STATUS binapiUnmapFrom_DISPATCH(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__binapiUnmapFrom__(pResource, pParams); +} + +static inline void binapiControl_Epilogue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__binapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiControlLookup_DISPATCH(struct BinaryApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__binapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS binapiMap_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__binapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool binapiAccessCallback_DISPATCH(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__binapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS binapiConstruct_IMPL(struct BinaryApi *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_binapiConstruct(arg_pResource, arg_pCallContext, arg_pParams) binapiConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#ifdef NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct BinaryApiPrivileged { + const struct NVOC_RTTI *__nvoc_rtti; + struct BinaryApi __nvoc_base_BinaryApi; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct BinaryApi *__nvoc_pbase_BinaryApi; + struct BinaryApiPrivileged *__nvoc_pbase_BinaryApiPrivileged; + NV_STATUS (*__binapiprivControl__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__binapiprivShareCallback__)(struct BinaryApiPrivileged *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__binapiprivUnmap__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__binapiprivGetMemInterMapParams__)(struct BinaryApiPrivileged *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__binapiprivGetMemoryMappingDescriptor__)(struct BinaryApiPrivileged *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__binapiprivGetMapAddrSpace__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__binapiprivGetInternalObjectHandle__)(struct BinaryApiPrivileged *); + NV_STATUS (*__binapiprivControlFilter__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__binapiprivAddAdditionalDependants__)(struct RsClient *, struct BinaryApiPrivileged *, RsResourceRef *); + NvU32 (*__binapiprivGetRefCount__)(struct BinaryApiPrivileged *); + NV_STATUS (*__binapiprivCheckMemInterUnmap__)(struct BinaryApiPrivileged *, NvBool); + NV_STATUS (*__binapiprivMapTo__)(struct BinaryApiPrivileged *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__binapiprivControl_Prologue__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__binapiprivGetRegBaseOffsetAndSize__)(struct BinaryApiPrivileged *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__binapiprivCanCopy__)(struct BinaryApiPrivileged *); + NV_STATUS (*__binapiprivInternalControlForward__)(struct BinaryApiPrivileged *, NvU32, void *, NvU32); + void (*__binapiprivPreDestruct__)(struct BinaryApiPrivileged *); + NV_STATUS (*__binapiprivUnmapFrom__)(struct BinaryApiPrivileged *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__binapiprivControl_Epilogue__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__binapiprivControlLookup__)(struct BinaryApiPrivileged *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__binapiprivMap__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__binapiprivAccessCallback__)(struct BinaryApiPrivileged *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ +#define __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ +typedef struct BinaryApiPrivileged BinaryApiPrivileged; +#endif /* __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ */ + +#ifndef __nvoc_class_id_BinaryApiPrivileged +#define __nvoc_class_id_BinaryApiPrivileged 0x1c0579 +#endif /* __nvoc_class_id_BinaryApiPrivileged */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged; + +#define __staticCast_BinaryApiPrivileged(pThis) \ + ((pThis)->__nvoc_pbase_BinaryApiPrivileged) + +#ifdef __nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApiPrivileged(pThis) ((BinaryApiPrivileged*)NULL) +#else //__nvoc_binary_api_h_disabled +#define __dynamicCast_BinaryApiPrivileged(pThis) \ + ((BinaryApiPrivileged*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(BinaryApiPrivileged))) +#endif //__nvoc_binary_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_BinaryApiPrivileged(BinaryApiPrivileged**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_BinaryApiPrivileged(BinaryApiPrivileged**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_BinaryApiPrivileged(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_BinaryApiPrivileged((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define binapiprivControl(pResource, pCallContext, pParams) binapiprivControl_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) binapiprivShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define binapiprivUnmap(pGpuResource, pCallContext, pCpuMapping) binapiprivUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define binapiprivGetMemInterMapParams(pRmResource, pParams) binapiprivGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define binapiprivGetMemoryMappingDescriptor(pRmResource, ppMemDesc) binapiprivGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define binapiprivGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) binapiprivGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define binapiprivGetInternalObjectHandle(pGpuResource) binapiprivGetInternalObjectHandle_DISPATCH(pGpuResource) +#define binapiprivControlFilter(pResource, pCallContext, pParams) binapiprivControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivAddAdditionalDependants(pClient, pResource, pReference) binapiprivAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define binapiprivGetRefCount(pResource) binapiprivGetRefCount_DISPATCH(pResource) +#define binapiprivCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) binapiprivCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define binapiprivMapTo(pResource, pParams) binapiprivMapTo_DISPATCH(pResource, pParams) +#define binapiprivControl_Prologue(pResource, pCallContext, pParams) binapiprivControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) binapiprivGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define binapiprivCanCopy(pResource) binapiprivCanCopy_DISPATCH(pResource) +#define binapiprivInternalControlForward(pGpuResource, command, pParams, size) binapiprivInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define binapiprivPreDestruct(pResource) binapiprivPreDestruct_DISPATCH(pResource) +#define binapiprivUnmapFrom(pResource, pParams) binapiprivUnmapFrom_DISPATCH(pResource, pParams) +#define binapiprivControl_Epilogue(pResource, pCallContext, pParams) binapiprivControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define binapiprivControlLookup(pResource, pParams, ppEntry) binapiprivControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define binapiprivMap(pGpuResource, pCallContext, pParams, pCpuMapping) binapiprivMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define binapiprivAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) binapiprivAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS binapiprivControl_IMPL(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS binapiprivControl_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiprivControl__(pResource, pCallContext, pParams); +} + +static inline NvBool binapiprivShareCallback_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__binapiprivShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS binapiprivUnmap_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__binapiprivUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS binapiprivGetMemInterMapParams_DISPATCH(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__binapiprivGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS binapiprivGetMemoryMappingDescriptor_DISPATCH(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__binapiprivGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS binapiprivGetMapAddrSpace_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__binapiprivGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle binapiprivGetInternalObjectHandle_DISPATCH(struct BinaryApiPrivileged *pGpuResource) { + return pGpuResource->__binapiprivGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS binapiprivControlFilter_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiprivControlFilter__(pResource, pCallContext, pParams); +} + +static inline void binapiprivAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference) { + pResource->__binapiprivAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 binapiprivGetRefCount_DISPATCH(struct BinaryApiPrivileged *pResource) { + return pResource->__binapiprivGetRefCount__(pResource); +} + +static inline NV_STATUS binapiprivCheckMemInterUnmap_DISPATCH(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__binapiprivCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS binapiprivMapTo_DISPATCH(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__binapiprivMapTo__(pResource, pParams); +} + +static inline NV_STATUS binapiprivControl_Prologue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__binapiprivControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiprivGetRegBaseOffsetAndSize_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__binapiprivGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool binapiprivCanCopy_DISPATCH(struct BinaryApiPrivileged *pResource) { + return pResource->__binapiprivCanCopy__(pResource); +} + +static inline NV_STATUS binapiprivInternalControlForward_DISPATCH(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__binapiprivInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void binapiprivPreDestruct_DISPATCH(struct BinaryApiPrivileged *pResource) { + pResource->__binapiprivPreDestruct__(pResource); +} + +static inline NV_STATUS binapiprivUnmapFrom_DISPATCH(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__binapiprivUnmapFrom__(pResource, pParams); +} + +static inline void binapiprivControl_Epilogue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__binapiprivControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS binapiprivControlLookup_DISPATCH(struct BinaryApiPrivileged *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__binapiprivControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS binapiprivMap_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__binapiprivMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool binapiprivAccessCallback_DISPATCH(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__binapiprivAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS binapiprivConstruct_IMPL(struct BinaryApiPrivileged *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_binapiprivConstruct(arg_pResource, arg_pCallContext, arg_pParams) binapiprivConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_BINARY_API_NVOC_H_ diff --git a/src/nvidia/generated/g_bindata.c b/src/nvidia/generated/g_bindata.c new file mode 100644 index 000000000..1e4c2499e --- /dev/null +++ b/src/nvidia/generated/g_bindata.c @@ -0,0 +1,151 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + +#include "core/bin_data.h" +#include "rmflcnbl.h" +#include "nvBinSegment.h" +#include "../src/kernel/core/bin_data_pvt.h" + +// +// Forward declarations. +// These are needed for generating the bindata access functions, which use +// "struct X" as a parameter. This is easier than #include-ing all the necessary +// headers. TODO: consider sharing a list of forward struct declarations with +// e.g. eng_desc.h? +// +struct KernelGsp; +struct KernelSec2; + +// +// Default segment for bindata to put the data arrays to paged memory. +// This segment is never locked. +// +CONS_SEGMENT("PAGErGEN") + +#if !defined(NVLOG_PARSING) + +struct BINDATA_STORAGE_PVT_ALL +{ +#define BINDATA_NO_SEGMENTS +#define BINDATA_INCLUDE_STORAGE_PVT_DECL + +#include "g_bindata_kgspGetBinArchiveGspRmBoot_TU102.c" +#include "g_bindata_kgspGetBinArchiveGspRmBoot_GA100.c" +#include "g_bindata_kgspGetBinArchiveGspRmBoot_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c" +#include "g_bindata_ksec2GetBinArchiveBlUcode_TU102.c" + +#undef BINDATA_INCLUDE_STORAGE_PVT_DECL +#undef BINDATA_NO_SEGMENTS +}; + +// First include the data as it's referenced by storage_pvt +// Also useful to keep the NvU8 data packed together so we can merge multiple +// buffers into a single chunk for allocations. +#define BINDATA_INCLUDE_DATA + +#include "g_bindata_kgspGetBinArchiveGspRmBoot_TU102.c" +#include "g_bindata_kgspGetBinArchiveGspRmBoot_GA100.c" +#include "g_bindata_kgspGetBinArchiveGspRmBoot_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c" +#include "g_bindata_ksec2GetBinArchiveBlUcode_TU102.c" + +#undef BINDATA_INCLUDE_DATA + +BINDATA_CONST struct BINDATA_STORAGE_PVT_ALL g_bindata_pvt = +{ +#define BINDATA_NO_SEGMENTS +#define BINDATA_INCLUDE_STORAGE_PVT_DEFN + +#include "g_bindata_kgspGetBinArchiveGspRmBoot_TU102.c" +#include "g_bindata_kgspGetBinArchiveGspRmBoot_GA100.c" +#include "g_bindata_kgspGetBinArchiveGspRmBoot_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c" +#include "g_bindata_ksec2GetBinArchiveBlUcode_TU102.c" + +#undef BINDATA_INCLUDE_STORAGE_PVT_DEFN +#undef BINDATA_NO_SEGMENTS +}; + +const NvU32 g_bindata_pvt_count = sizeof(g_bindata_pvt) / sizeof(BINDATA_STORAGE_PVT); + +// Lastly, include everything else.. +#define BINDATA_INCLUDE_ARCHIVE +#define BINDATA_INCLUDE_FUNCTION + +#include "g_bindata_kgspGetBinArchiveGspRmBoot_TU102.c" +#include "g_bindata_kgspGetBinArchiveGspRmBoot_GA100.c" +#include "g_bindata_kgspGetBinArchiveGspRmBoot_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterLoadUcode_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterReloadUcode_GA102.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU102.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU116.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA100.c" +#include "g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c" +#include "g_bindata_ksec2GetBinArchiveBlUcode_TU102.c" + +#undef BINDATA_INCLUDE_FUNCTION +#undef BINDATA_INCLUDE_ARCHIVE + +#endif // !defined(NVLOG_PARSING) diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_GA100.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_GA100.c new file mode 100644 index 000000000..af40f7cb0 --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_GA100.c @@ -0,0 +1,3455 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA100("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/load/g_booteruc_load_ga100_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36352 +// COMPRESSED SIZE (bytes): 24092 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA100_image_dbg_data[] = +{ + 0xed, 0xfc, 0x43, 0x90, 0x68, 0x3d, 0xdb, 0x00, 0xec, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0xdd, + 0xbb, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0xb6, 0xb1, 0xdb, 0xb6, 0x6d, 0xdb, 0xf6, 0x79, 0xbe, 0x53, + 0x75, 0x26, 0xef, 0xe4, 0x4c, 0xff, 0xbf, 0x6a, 0x5f, 0x93, 0x3b, 0x49, 0xd5, 0xca, 0xca, 0x20, + 0x6b, 0x55, 0xa5, 0xee, 0x24, 0x92, 0x00, 0x08, 0x33, 0x19, 0x00, 0x71, 0x00, 0xe0, 0x00, 0xdd, + 0x47, 0xf9, 0xef, 0x2d, 0x3f, 0x80, 0x92, 0x00, 0x88, 0xff, 0xd7, 0x00, 0x01, 0x0e, 0xf0, 0xff, + 0xad, 0x46, 0x00, 0x00, 0x00, 0xfe, 0x24, 0x03, 0xf8, 0xd2, 0x03, 0x00, 0xbc, 0x01, 0xbd, 0x01, + 0xbc, 0x03, 0xc5, 0x00, 0x00, 0x01, 0xf4, 0xee, 0xec, 0xfe, 0xfe, 0xfe, 0xc2, 0x47, 0x00, 0x00, + 0x02, 0xfc, 0xe6, 0x81, 0x34, 0x1d, 0x01, 0x30, 0xa7, 0xaf, 0x03, 0x36, 0x25, 0x03, 0xd0, 0xa4, + 0xaf, 0x03, 0xfd, 0x17, 0x48, 0xd3, 0xd7, 0x81, 0xff, 0x0b, 0x78, 0xff, 0x3d, 0x0b, 0x90, 0x9e, + 0x07, 0xdc, 0xb2, 0x0b, 0xdc, 0x9b, 0x97, 0xbf, 0x9b, 0x9e, 0x07, 0x98, 0x9e, 0x01, 0xd4, 0xb2, + 0x0b, 0xd8, 0xb2, 0x0e, 0xe4, 0xbb, 0x0b, 0x00, 0x00, 0xcf, 0x28, 0x09, 0x80, 0xf0, 0x9a, 0x07, + 0xd0, 0x9d, 0x1c, 0xf7, 0x5f, 0x97, 0x2d, 0xef, 0x01, 0x2d, 0xef, 0xfe, 0x11, 0x00, 0xc1, 0x20, + 0xff, 0x35, 0xc1, 0x03, 0xb6, 0x67, 0x00, 0xe0, 0x02, 0xfd, 0x5f, 0x09, 0xf8, 0xbf, 0x12, 0x20, + 0xd0, 0xff, 0xaf, 0x0d, 0xec, 0xff, 0x4a, 0xff, 0x75, 0x0b, 0x94, 0x02, 0x0f, 0xb1, 0xf3, 0x7f, + 0x43, 0xf8, 0x7e, 0x07, 0x8d, 0x00, 0xd0, 0x07, 0xf8, 0xce, 0x05, 0xe9, 0x8a, 0x07, 0x6a, 0x4d, + 0x46, 0xf8, 0xaf, 0xfe, 0xf3, 0x01, 0xe0, 0xfb, 0xdf, 0xc0, 0xde, 0x80, 0xba, 0x8b, 0x3f, 0x01, + 0xe3, 0xfa, 0xc6, 0xa1, 0x23, 0x08, 0x68, 0x01, 0xf2, 0xf3, 0xde, 0x00, 0xe2, 0xfe, 0x8b, 0xbd, + 0xbf, 0x4f, 0x0c, 0x5f, 0x3f, 0x92, 0x80, 0xf9, 0x79, 0x7c, 0xef, 0x00, 0xff, 0x07, 0x1d, 0xe4, + 0xf2, 0x16, 0xfb, 0x0b, 0x3a, 0xfe, 0x67, 0x52, 0xe2, 0x10, 0x2d, 0x97, 0x59, 0xc9, 0xbf, 0xe8, + 0xc9, 0x1e, 0xaf, 0xcb, 0x97, 0xfa, 0xca, 0x70, 0x40, 0xfc, 0x2d, 0xeb, 0xbd, 0xeb, 0xa4, 0x2b, + 0x65, 0x34, 0x86, 0x3a, 0xa4, 0x03, 0xb2, 0xda, 0x3b, 0xd5, 0xfa, 0x9a, 0x62, 0xa3, 0x6d, 0x43, + 0xd8, 0x02, 0x3b, 0x89, 0x25, 0x00, 0xb2, 0x3e, 0x94, 0xaf, 0xa1, 0x06, 0xdb, 0x73, 0xbd, 0xac, + 0x50, 0xf1, 0x62, 0xa8, 0xfa, 0xe4, 0xf0, 0x4a, 0x5b, 0x98, 0x3b, 0x11, 0x5d, 0xcf, 0x6a, 0xb8, + 0xeb, 0x04, 0x52, 0xe3, 0x4e, 0x10, 0x8c, 0x53, 0x66, 0x20, 0x72, 0x76, 0xd1, 0x57, 0xee, 0x80, + 0x04, 0x5e, 0xd4, 0x15, 0x8a, 0x02, 0xc6, 0x60, 0x86, 0x43, 0x03, 0xa3, 0xde, 0xaf, 0x49, 0x7b, + 0x5d, 0xcd, 0x91, 0xd0, 0x19, 0x0b, 0x48, 0x11, 0x22, 0x05, 0xb3, 0x13, 0x86, 0xef, 0xa0, 0xa5, + 0xf4, 0xe1, 0x09, 0xa9, 0xd8, 0x7c, 0x5c, 0x7e, 0xee, 0x01, 0x4c, 0xaf, 0xb8, 0xeb, 0x32, 0x20, + 0xf5, 0xd7, 0x18, 0x12, 0x36, 0xcb, 0xa9, 0xfc, 0xf0, 0xc9, 0xbb, 0xc4, 0xef, 0x3e, 0xa1, 0xbf, + 0x86, 0xc1, 0x7f, 0x7e, 0x64, 0x0d, 0x58, 0xdb, 0xba, 0xe0, 0xcc, 0x15, 0x23, 0xa2, 0xc5, 0x38, + 0x3b, 0xf0, 0xbd, 0x7a, 0xb4, 0x24, 0x9a, 0xc6, 0x0d, 0xdd, 0x9f, 0x86, 0xb4, 0x44, 0xa6, 0x5a, + 0x49, 0x32, 0x0e, 0x7c, 0x52, 0x35, 0x45, 0xa3, 0x14, 0x18, 0x0c, 0xdd, 0x88, 0xb2, 0x65, 0x55, + 0x50, 0x65, 0x34, 0xba, 0x61, 0xd1, 0x73, 0xe5, 0xdd, 0x3a, 0xf9, 0x26, 0x84, 0xd9, 0x21, 0x3e, + 0x60, 0x6d, 0xb4, 0x80, 0xab, 0x36, 0x32, 0x3b, 0x8f, 0x92, 0xb6, 0x4a, 0xe1, 0xe3, 0x9d, 0x1a, + 0xdb, 0x24, 0xff, 0xea, 0xfa, 0x39, 0x6d, 0xca, 0xac, 0xdd, 0x4b, 0xa5, 0xed, 0x55, 0xa7, 0x9b, + 0x54, 0xf0, 0x94, 0x85, 0x89, 0x03, 0xac, 0xd4, 0x35, 0x2a, 0xf3, 0x8e, 0x45, 0x4b, 0x27, 0xf3, + 0x68, 0x4a, 0x99, 0x75, 0x3c, 0xc0, 0x15, 0x68, 0x75, 0x05, 0xf9, 0xd8, 0x53, 0x8c, 0xd9, 0x21, + 0xa9, 0x69, 0x9e, 0x8d, 0xcc, 0x9a, 0x9e, 0x14, 0x6f, 0xad, 0x46, 0xd2, 0xd7, 0x77, 0x37, 0x64, + 0x64, 0x7e, 0xeb, 0x7d, 0x11, 0xaf, 0x77, 0x6f, 0x84, 0x32, 0xf0, 0x5b, 0xa8, 0x94, 0xb8, 0x17, + 0x5a, 0x9f, 0xd5, 0x3f, 0xe9, 0xaf, 0xf0, 0xab, 0x6a, 0x6b, 0x51, 0xab, 0xd4, 0xaa, 0x38, 0xae, + 0x3e, 0xff, 0xe0, 0xf5, 0x3e, 0x84, 0x06, 0xc1, 0xcc, 0x2b, 0x3f, 0x03, 0xa4, 0xbe, 0xe6, 0x78, + 0xa5, 0xd6, 0x89, 0x24, 0xc7, 0x9f, 0xd0, 0x05, 0xa0, 0x1a, 0x79, 0xe9, 0xd7, 0xb4, 0xd2, 0x02, + 0x75, 0x68, 0x45, 0x8a, 0x3a, 0x70, 0x7f, 0xf7, 0x9c, 0x36, 0xbd, 0x43, 0xa9, 0xaa, 0xba, 0xcd, + 0xc5, 0x4d, 0x77, 0xd7, 0xd0, 0x36, 0xde, 0x36, 0x6a, 0x84, 0xf3, 0x4a, 0x5c, 0x43, 0x3a, 0x14, + 0x50, 0x71, 0x5c, 0x26, 0x6e, 0xd1, 0xe4, 0x42, 0xad, 0xde, 0x27, 0xd9, 0x8a, 0xa8, 0x96, 0x7a, + 0x06, 0x36, 0xa4, 0x7c, 0xf2, 0x9c, 0x49, 0xdc, 0x18, 0x4c, 0xba, 0x8b, 0x15, 0xd0, 0xde, 0xfe, + 0xe7, 0xc0, 0x41, 0x9d, 0x60, 0x28, 0x1e, 0x93, 0x83, 0x64, 0xd6, 0xbf, 0xbf, 0xe4, 0xd5, 0xfb, + 0x20, 0x34, 0xb2, 0x24, 0x73, 0xea, 0x31, 0x20, 0x34, 0x53, 0x4d, 0xfe, 0xda, 0x4a, 0x71, 0xfb, + 0x4b, 0x7c, 0x5e, 0x72, 0x8e, 0x4e, 0xe7, 0x01, 0x1f, 0x94, 0x93, 0x4b, 0xdd, 0xe1, 0xda, 0x4b, + 0x65, 0x46, 0x8c, 0xeb, 0x34, 0x6e, 0x32, 0x53, 0xb4, 0xe9, 0x67, 0xbc, 0x3b, 0x01, 0xb2, 0x69, + 0x07, 0x46, 0xee, 0x19, 0xef, 0x85, 0xc3, 0xde, 0x44, 0x9c, 0x61, 0x4b, 0x18, 0xdd, 0x84, 0x52, + 0xfb, 0xba, 0x12, 0x69, 0x53, 0x7f, 0x7e, 0x33, 0x70, 0x31, 0x56, 0x45, 0x01, 0x2b, 0xb1, 0xea, + 0xb2, 0x2b, 0xb2, 0xae, 0xf0, 0xcf, 0x77, 0x1f, 0xb8, 0x92, 0x7f, 0x7e, 0x1a, 0x7f, 0x7e, 0x3b, + 0x3e, 0x5e, 0xe4, 0x9a, 0x98, 0x16, 0x69, 0x4a, 0xc3, 0x34, 0x68, 0xa9, 0xc8, 0x57, 0x39, 0xe1, + 0x4f, 0xa8, 0xf2, 0x31, 0xb7, 0x48, 0xb1, 0x7d, 0x3a, 0x02, 0x0d, 0x30, 0x02, 0x63, 0x71, 0xd3, + 0x97, 0x7b, 0x6b, 0x0e, 0x37, 0x32, 0xba, 0xcd, 0x83, 0x2c, 0x0d, 0xa3, 0x05, 0x2b, 0xec, 0x5a, + 0x77, 0xe7, 0xaa, 0x46, 0xf6, 0x5d, 0x65, 0xb7, 0xf3, 0x27, 0x9a, 0xf1, 0x17, 0xe6, 0x44, 0x3f, + 0xbe, 0x12, 0xfd, 0x15, 0xbc, 0x03, 0xc7, 0x47, 0x24, 0x87, 0xdd, 0x4d, 0x63, 0x00, 0x7f, 0xa7, + 0x10, 0x22, 0x9c, 0x9e, 0x4c, 0xa7, 0xb0, 0x1f, 0xe1, 0x78, 0xab, 0x0b, 0x39, 0xa2, 0x6f, 0x64, + 0x96, 0xee, 0xe0, 0xcb, 0x96, 0x3c, 0x1d, 0x73, 0xa2, 0xeb, 0xcb, 0xee, 0xf5, 0x9c, 0x9f, 0x2a, + 0xa0, 0x0a, 0x8f, 0x5c, 0x3a, 0x47, 0x97, 0x0d, 0xbf, 0x26, 0x6a, 0xcf, 0x9b, 0x2a, 0xd7, 0xe3, + 0x07, 0xa2, 0x47, 0x4b, 0xa1, 0x91, 0xbb, 0x57, 0xff, 0x26, 0x69, 0x8f, 0x2c, 0xeb, 0x27, 0xd0, + 0x95, 0xb5, 0xee, 0xd0, 0xca, 0xec, 0xd3, 0xda, 0xb0, 0xd9, 0xca, 0x53, 0x61, 0x20, 0xab, 0x9b, + 0x58, 0x9c, 0x3e, 0xf7, 0x27, 0x70, 0x0a, 0x95, 0x10, 0x6f, 0xb9, 0x31, 0x1b, 0x12, 0xbf, 0xe5, + 0x05, 0x18, 0xad, 0xeb, 0x13, 0xcb, 0xcf, 0x15, 0x12, 0x2e, 0xed, 0xf7, 0x68, 0xdf, 0xa3, 0x8a, + 0x68, 0x1c, 0xeb, 0x6a, 0xdf, 0x1f, 0xac, 0x4b, 0xba, 0xd9, 0x9d, 0x7e, 0x59, 0xa3, 0x1a, 0xec, + 0x13, 0xed, 0x7d, 0x73, 0xf4, 0x9b, 0xb6, 0x5a, 0x70, 0x6e, 0x89, 0x99, 0xcf, 0x88, 0x0d, 0x3c, + 0xa3, 0x79, 0x84, 0xa2, 0x8c, 0x14, 0xc1, 0x5e, 0x04, 0xfb, 0x82, 0xf0, 0x5a, 0x4b, 0x9c, 0xb5, + 0xa1, 0xc6, 0x7e, 0x42, 0xe7, 0xd9, 0xa9, 0x90, 0x43, 0x97, 0x44, 0x8c, 0x29, 0xda, 0x1b, 0x1e, + 0x1c, 0x7d, 0xf8, 0x83, 0xf7, 0xe8, 0xb3, 0x1f, 0xa5, 0xc8, 0x1d, 0xaf, 0x59, 0x99, 0xcb, 0xee, + 0x01, 0xed, 0xe1, 0x20, 0x1e, 0xb4, 0xa4, 0xf8, 0xed, 0x9c, 0x9b, 0x49, 0x8f, 0xd4, 0x10, 0x0c, + 0xba, 0xd4, 0x40, 0x60, 0x09, 0xb8, 0x6b, 0x5c, 0xf1, 0x46, 0x29, 0xc2, 0xb0, 0x1a, 0x02, 0x37, + 0xf8, 0x37, 0x90, 0x0f, 0x67, 0x8f, 0xdf, 0xe7, 0x48, 0xba, 0x44, 0x48, 0x69, 0x18, 0xa7, 0xf7, + 0x54, 0xe9, 0x86, 0x2a, 0xb7, 0xef, 0x86, 0xce, 0x31, 0xe2, 0x87, 0x39, 0x7a, 0x68, 0x93, 0x23, + 0x15, 0x0d, 0x5a, 0xbe, 0x46, 0xc5, 0x61, 0x24, 0x02, 0xec, 0xeb, 0x5e, 0xe0, 0x1c, 0x16, 0x90, + 0x05, 0xfd, 0xf6, 0x9d, 0xdc, 0xad, 0x0a, 0xbe, 0x27, 0x87, 0xd5, 0x81, 0x36, 0x1a, 0x25, 0x89, + 0x31, 0x92, 0xbb, 0x27, 0x0c, 0x20, 0x6f, 0xa7, 0xa7, 0x05, 0x36, 0x33, 0xaa, 0x5b, 0x5f, 0x44, + 0xb4, 0xc2, 0xf5, 0xc1, 0x47, 0xc1, 0xe3, 0xd3, 0x16, 0x37, 0x8c, 0x51, 0x83, 0x8d, 0x3a, 0x97, + 0x11, 0x63, 0x2d, 0xde, 0x6d, 0xd6, 0x3a, 0x75, 0xdc, 0xcf, 0xba, 0x4b, 0x48, 0x2d, 0xc5, 0x82, + 0xbb, 0x34, 0xd2, 0xb2, 0x63, 0x0e, 0x54, 0xd5, 0x98, 0x4c, 0x7d, 0x03, 0x56, 0x3b, 0xaa, 0xfb, + 0x9b, 0x52, 0x21, 0xaf, 0xa4, 0xdc, 0x64, 0xf0, 0xb5, 0x6f, 0xba, 0x0e, 0xa9, 0xcc, 0x2e, 0x27, + 0x96, 0x23, 0x2e, 0x1a, 0x3a, 0x57, 0xef, 0x27, 0x60, 0x1a, 0x7d, 0x0c, 0x66, 0x51, 0x03, 0xd8, + 0x02, 0x24, 0x02, 0xfa, 0x79, 0x0f, 0x48, 0xd3, 0x33, 0x50, 0xb0, 0xb8, 0x63, 0xdb, 0x5f, 0x7f, + 0x14, 0xba, 0xd3, 0x6e, 0x7f, 0x79, 0xca, 0x92, 0x25, 0x77, 0x36, 0xcc, 0x70, 0x93, 0x73, 0xb2, + 0xe2, 0xdb, 0xee, 0xbf, 0x1a, 0xba, 0xc1, 0xff, 0xb8, 0x67, 0x39, 0x3b, 0x79, 0xc4, 0xeb, 0xe9, + 0xa3, 0xca, 0x7d, 0x38, 0x86, 0xbc, 0x8b, 0xe1, 0x8e, 0xd1, 0xc7, 0xe3, 0xc9, 0x91, 0x98, 0x45, + 0xfb, 0x1b, 0xfb, 0x8b, 0x21, 0xfd, 0x4a, 0x0c, 0x9d, 0x98, 0x80, 0x2d, 0x5c, 0x57, 0xd2, 0xb4, + 0xa9, 0x02, 0x7c, 0x59, 0xed, 0x1c, 0x67, 0x79, 0x9c, 0x73, 0xdb, 0x67, 0x8a, 0x00, 0xae, 0x1a, + 0x89, 0xa5, 0x45, 0x09, 0x14, 0xf7, 0xcd, 0x41, 0x9b, 0x0d, 0x30, 0xdd, 0x3c, 0xc5, 0x6d, 0xa2, + 0x9f, 0x36, 0x29, 0x8d, 0x0d, 0x97, 0x11, 0x01, 0x8e, 0x1d, 0xb9, 0x70, 0xe8, 0x19, 0xcc, 0x32, + 0x33, 0x29, 0xaa, 0x63, 0x72, 0xee, 0x0a, 0x8d, 0xb0, 0x2f, 0xf8, 0x97, 0x14, 0x34, 0xe9, 0x95, + 0xb2, 0x96, 0xf1, 0x26, 0xa8, 0xf1, 0xf6, 0xc6, 0x58, 0x10, 0x85, 0x97, 0x99, 0x3a, 0xe9, 0x8d, + 0x40, 0x6b, 0x14, 0x7e, 0xdd, 0xd8, 0x45, 0x89, 0x71, 0xb5, 0x7f, 0x07, 0xa5, 0xc7, 0x3a, 0xf0, + 0xe0, 0x91, 0x37, 0xac, 0x7c, 0x20, 0x70, 0xa8, 0xb1, 0x24, 0xd8, 0x58, 0x03, 0xc2, 0x01, 0xfd, + 0xa5, 0xf3, 0x4f, 0x58, 0x5c, 0x8a, 0x9f, 0xde, 0xe9, 0x87, 0x96, 0x77, 0xa5, 0xcc, 0x2a, 0x35, + 0xbc, 0xd2, 0xe0, 0xb2, 0x9a, 0xc0, 0xea, 0x3f, 0x15, 0xd8, 0xc6, 0x9a, 0xd9, 0x8c, 0xbc, 0x2b, + 0xf0, 0x5c, 0xd7, 0x5f, 0x1a, 0xc4, 0xb4, 0x79, 0xd8, 0x73, 0xe5, 0x8e, 0x6e, 0x5c, 0x3b, 0xb8, + 0x79, 0xc9, 0xb5, 0x1d, 0x99, 0xbd, 0xa4, 0x16, 0x98, 0xb8, 0x39, 0xc2, 0xe4, 0x5a, 0x5e, 0x7e, + 0x54, 0x71, 0xa4, 0x07, 0x80, 0x2f, 0x58, 0x08, 0x51, 0x37, 0x33, 0xb4, 0x1f, 0x1c, 0x9a, 0xdd, + 0x16, 0xdd, 0x24, 0x28, 0xdf, 0xff, 0x60, 0x3b, 0xca, 0x3e, 0xae, 0x50, 0x6c, 0x86, 0x46, 0xe2, + 0xf2, 0xb4, 0x04, 0x6c, 0xe2, 0x59, 0xbc, 0xf2, 0xe0, 0x16, 0x23, 0x10, 0xc6, 0x24, 0xc0, 0x2c, + 0xeb, 0x59, 0x00, 0x0a, 0x55, 0x80, 0x27, 0xab, 0x73, 0x64, 0x8d, 0x6b, 0x9b, 0xf9, 0xf9, 0x7a, + 0xc0, 0xb4, 0x17, 0x9f, 0xb5, 0x9d, 0xed, 0x57, 0x28, 0x5c, 0x3d, 0x32, 0x2b, 0xf3, 0xc1, 0x03, + 0xd8, 0xc1, 0xbb, 0xb7, 0x42, 0x3c, 0x41, 0xe9, 0x29, 0x0d, 0xcb, 0x5c, 0x9c, 0xcb, 0xce, 0xf4, + 0x1f, 0xe3, 0x17, 0xd9, 0xc2, 0x2c, 0xd4, 0x05, 0x41, 0xc8, 0xf9, 0x16, 0x75, 0x67, 0x19, 0x64, + 0x98, 0x59, 0x00, 0x3a, 0x19, 0x1b, 0x37, 0xd4, 0xba, 0x1b, 0x40, 0x4e, 0x3f, 0x68, 0x93, 0xe8, + 0x66, 0x69, 0x06, 0x53, 0x82, 0x12, 0xb7, 0x85, 0x97, 0xf8, 0xfe, 0x95, 0x69, 0x0c, 0x69, 0x0c, + 0x2e, 0xd6, 0x53, 0xa7, 0x6e, 0x38, 0x8c, 0x24, 0xd2, 0x8d, 0xf8, 0xc8, 0xc1, 0xd2, 0x46, 0x7f, + 0xf2, 0xed, 0x96, 0x9e, 0x46, 0x02, 0x61, 0x3e, 0x88, 0x07, 0xd0, 0x5d, 0x7d, 0x5a, 0x09, 0xd6, + 0xaf, 0xab, 0xdf, 0x5d, 0x97, 0x69, 0x49, 0x68, 0xb4, 0x2f, 0xc1, 0x82, 0x7a, 0x69, 0xcb, 0x36, + 0x1b, 0x9a, 0xeb, 0x64, 0xad, 0x1f, 0x42, 0xaf, 0x7d, 0xb6, 0x0f, 0x96, 0x11, 0xd7, 0xd1, 0xe1, + 0x9c, 0x49, 0xaa, 0x58, 0x34, 0xda, 0xb0, 0x83, 0xb0, 0x0a, 0xa3, 0xb2, 0xdf, 0x4c, 0x5b, 0x1c, + 0xd4, 0x7e, 0x69, 0xf4, 0xe3, 0xcb, 0xa2, 0xb7, 0xf1, 0x45, 0x55, 0x6e, 0x5b, 0xa7, 0x2c, 0x66, + 0x4b, 0x24, 0x41, 0x2d, 0xb7, 0xe3, 0x67, 0xb6, 0xed, 0x45, 0xeb, 0x04, 0xc1, 0xe2, 0x9a, 0x49, + 0x84, 0x7b, 0x3b, 0xbf, 0x68, 0x7d, 0x11, 0xf1, 0xf2, 0x67, 0x5d, 0x43, 0x4a, 0x0d, 0xb6, 0x0e, + 0x76, 0x4f, 0xe7, 0x47, 0x3a, 0x88, 0x24, 0x3b, 0x83, 0xb3, 0x9e, 0xef, 0x58, 0xe5, 0x1d, 0xf1, + 0xdc, 0x5b, 0xbf, 0x05, 0x4e, 0x4e, 0x90, 0x07, 0xd3, 0x20, 0x47, 0x57, 0xec, 0x0c, 0x95, 0x09, + 0x39, 0x7a, 0xf6, 0x90, 0xd6, 0xff, 0x55, 0x1f, 0xfa, 0xdd, 0xc9, 0xc9, 0x50, 0xa9, 0xb1, 0x9f, + 0xf4, 0xb0, 0xe2, 0x3b, 0x21, 0x88, 0xbc, 0x41, 0x61, 0xd9, 0xfa, 0xf1, 0xe7, 0xd4, 0x3e, 0x99, + 0x28, 0x14, 0xb8, 0x2b, 0xc6, 0xb0, 0x86, 0x0f, 0xaa, 0x27, 0x10, 0x81, 0x62, 0x3a, 0x66, 0x94, + 0xe9, 0xc3, 0xae, 0x76, 0x87, 0x47, 0xe3, 0xc6, 0xf7, 0x16, 0xf3, 0xf5, 0x13, 0xfc, 0x3c, 0xef, + 0x02, 0x33, 0x3f, 0x57, 0xe2, 0x0a, 0x9e, 0xdc, 0x39, 0x70, 0x11, 0x42, 0xfc, 0xf5, 0xad, 0x92, + 0xc5, 0x2d, 0x7a, 0xb1, 0x72, 0x85, 0x84, 0x39, 0x10, 0xf6, 0x4d, 0xaf, 0x74, 0x61, 0x4f, 0x63, + 0x30, 0x2a, 0xf0, 0xca, 0x57, 0x1a, 0xf7, 0x2b, 0x24, 0x25, 0x3e, 0x80, 0xec, 0x17, 0xa0, 0xa9, + 0x16, 0x4b, 0x80, 0x7a, 0x1a, 0xa1, 0x8e, 0xc2, 0x77, 0xac, 0x5f, 0x27, 0x38, 0xbc, 0x30, 0xde, + 0xd3, 0x5d, 0x49, 0xbe, 0x39, 0x88, 0x95, 0xb6, 0x54, 0xd8, 0x8b, 0x74, 0x9f, 0x7d, 0x70, 0x33, + 0x07, 0x7b, 0x22, 0xa1, 0xec, 0xba, 0xd9, 0xb1, 0x5d, 0xac, 0x67, 0xb6, 0xf1, 0x41, 0x05, 0x1e, + 0xbb, 0xab, 0xf0, 0xd4, 0x39, 0x2e, 0x33, 0x6e, 0x9d, 0xdc, 0x8e, 0xba, 0xb1, 0x35, 0xa0, 0xb1, + 0xc0, 0x6b, 0xdd, 0xd1, 0x89, 0x17, 0x18, 0x7f, 0xce, 0xc5, 0x20, 0xe9, 0xbc, 0x94, 0x55, 0xc8, + 0x1b, 0x8c, 0x76, 0x09, 0x25, 0x43, 0x8d, 0x7d, 0x2b, 0x25, 0x7d, 0x62, 0xb0, 0xbd, 0x69, 0x63, + 0x40, 0x4a, 0x5e, 0x3f, 0x67, 0xd6, 0x11, 0x86, 0x36, 0x0d, 0x30, 0x91, 0xa2, 0xd3, 0x96, 0xb4, + 0xe4, 0xd3, 0x86, 0x4b, 0xe7, 0xfb, 0x33, 0x10, 0x86, 0xa0, 0xe2, 0xd2, 0x95, 0xce, 0x60, 0x61, + 0x3e, 0x6f, 0xe4, 0x77, 0xac, 0x34, 0x88, 0x41, 0xa7, 0xea, 0x97, 0x0c, 0x92, 0x18, 0x17, 0x31, + 0x9f, 0x69, 0x42, 0x78, 0xf0, 0x45, 0x03, 0xa9, 0xda, 0x40, 0x44, 0x85, 0xb6, 0x12, 0xaf, 0x27, + 0x0d, 0x01, 0x14, 0xca, 0x34, 0x22, 0xd4, 0xb7, 0x3d, 0x84, 0x03, 0xdd, 0xa9, 0x74, 0x9e, 0xd5, + 0x0f, 0x48, 0xd1, 0xc0, 0xae, 0x15, 0x46, 0x21, 0xae, 0xbb, 0x92, 0x45, 0xea, 0xeb, 0xa4, 0xf6, + 0xeb, 0x7b, 0x7f, 0x5c, 0xed, 0x8c, 0x40, 0x85, 0xcd, 0xa1, 0xc9, 0xfa, 0x89, 0x6f, 0x97, 0xf6, + 0xd9, 0x99, 0x30, 0xa2, 0x31, 0x2f, 0xe8, 0xa9, 0x9a, 0x70, 0x1c, 0x8d, 0x99, 0x54, 0x2e, 0xdd, + 0xd0, 0xb2, 0xe0, 0xe4, 0x08, 0xa9, 0xd3, 0xaa, 0xee, 0x25, 0x18, 0x75, 0x71, 0xdd, 0xc5, 0xa9, + 0x64, 0xac, 0x94, 0x6b, 0xa1, 0x15, 0x40, 0xfe, 0x56, 0x9f, 0xc8, 0x49, 0x4b, 0x52, 0x33, 0xbf, + 0x1b, 0x27, 0xf2, 0x6c, 0x2b, 0xba, 0x2e, 0x0d, 0xc8, 0xbd, 0x48, 0x6f, 0x85, 0x6b, 0xc3, 0xe5, + 0xdd, 0xc8, 0x46, 0xfc, 0xd2, 0xc7, 0x8e, 0x3c, 0x6d, 0x85, 0xa1, 0x09, 0x45, 0x30, 0xc9, 0xce, + 0x49, 0xae, 0xc0, 0x85, 0xc1, 0x17, 0x49, 0xcb, 0x91, 0x80, 0x93, 0xc1, 0x6e, 0x99, 0x44, 0xc9, + 0x63, 0x0b, 0xb8, 0x7f, 0x2c, 0x70, 0x93, 0x29, 0x9b, 0xe4, 0x5d, 0xe4, 0x3f, 0x67, 0x23, 0x94, + 0x50, 0xfe, 0x41, 0xd0, 0xd1, 0x8b, 0xb7, 0x56, 0x46, 0x7f, 0x1b, 0x1a, 0xdf, 0x54, 0xeb, 0x01, + 0xa8, 0x71, 0x2f, 0x69, 0xa5, 0x39, 0x47, 0x69, 0x26, 0x70, 0x32, 0x89, 0x5e, 0xdc, 0x08, 0x33, + 0xdb, 0xe3, 0xca, 0xff, 0x6c, 0x7a, 0x61, 0xd1, 0x2e, 0x9f, 0x9d, 0x00, 0xb1, 0xf9, 0xad, 0xc3, + 0x62, 0xc2, 0xc7, 0x1d, 0xf6, 0x55, 0x5d, 0xc0, 0x16, 0xde, 0x80, 0xb8, 0xb8, 0xd2, 0x06, 0xcc, + 0x18, 0xc5, 0xcf, 0x99, 0x98, 0x74, 0x83, 0x80, 0x47, 0xe8, 0x55, 0x36, 0x4d, 0x36, 0x78, 0xad, + 0x79, 0x43, 0xb8, 0xa3, 0x65, 0x09, 0x7a, 0x37, 0xb4, 0x21, 0x36, 0xed, 0xc7, 0x18, 0x5b, 0x26, + 0x1d, 0xaf, 0x54, 0xcf, 0xff, 0xea, 0x59, 0x03, 0x6b, 0x88, 0x7f, 0x50, 0xbb, 0xeb, 0xc6, 0x81, + 0x53, 0x56, 0x80, 0xe8, 0xd0, 0xd3, 0x2d, 0x78, 0xcc, 0x10, 0x2f, 0x4a, 0x06, 0x74, 0xe6, 0xe3, + 0xff, 0x8b, 0x71, 0x7d, 0x92, 0xa5, 0x3b, 0xbe, 0x72, 0x5f, 0x8e, 0x65, 0x0c, 0xa4, 0xdb, 0xc6, + 0xf3, 0x08, 0x82, 0x9b, 0xff, 0xda, 0xe4, 0x71, 0xe1, 0x54, 0x49, 0x15, 0x9d, 0xe6, 0xa0, 0x19, + 0x11, 0x39, 0x7d, 0x9f, 0x66, 0x8a, 0xa6, 0x22, 0xaa, 0x5c, 0x5a, 0x26, 0x09, 0xcb, 0xe0, 0x2a, + 0x52, 0x82, 0x9a, 0x02, 0x02, 0xfa, 0x30, 0x54, 0x7e, 0xc9, 0x4b, 0x96, 0x2b, 0x14, 0x74, 0x5d, + 0x38, 0x02, 0xdb, 0x8a, 0x84, 0x8d, 0xdc, 0x10, 0x80, 0x52, 0xdc, 0xab, 0x43, 0x82, 0x99, 0x45, + 0x88, 0xcd, 0x6d, 0x98, 0x2b, 0x33, 0x79, 0xf7, 0x0e, 0x64, 0xfa, 0xf8, 0xba, 0x82, 0xfd, 0x38, + 0x93, 0x6c, 0x1f, 0x70, 0xeb, 0xb6, 0x26, 0xb3, 0x62, 0x7b, 0xe2, 0x60, 0xab, 0xa8, 0x50, 0x07, + 0x39, 0xab, 0x50, 0x9f, 0x65, 0x88, 0xde, 0x7a, 0xe4, 0x10, 0xf4, 0x99, 0xcd, 0xc4, 0xda, 0x7f, + 0x1d, 0x17, 0x3d, 0x94, 0xd4, 0xac, 0x84, 0xde, 0xb4, 0xb1, 0xab, 0xfc, 0xb5, 0x74, 0xf1, 0x36, + 0xbc, 0x21, 0x33, 0x63, 0xd5, 0x23, 0x92, 0x61, 0x2c, 0x35, 0xee, 0xa1, 0x4f, 0x80, 0x4b, 0x15, + 0x26, 0xc4, 0xda, 0x54, 0xdd, 0xc7, 0x46, 0xb3, 0x14, 0xfb, 0xc1, 0x69, 0x94, 0x0c, 0xb9, 0xe0, + 0xdb, 0x76, 0x5d, 0xb6, 0x13, 0x3f, 0x7a, 0xce, 0xa0, 0x28, 0xa3, 0x9b, 0xf7, 0x58, 0x99, 0xc6, + 0x39, 0x5d, 0xc5, 0x7b, 0x02, 0x4b, 0xeb, 0x3e, 0x18, 0x15, 0xac, 0xb0, 0x9b, 0x3d, 0x82, 0x87, + 0xcf, 0xc9, 0x6f, 0xab, 0x61, 0xa1, 0x65, 0x05, 0xb4, 0xa7, 0xd8, 0xc1, 0x22, 0x0f, 0xa0, 0xde, + 0xd0, 0xc2, 0x93, 0x1c, 0x32, 0x22, 0x24, 0xcd, 0x4d, 0xe4, 0x19, 0x44, 0x5e, 0x97, 0x0b, 0x43, + 0xf1, 0x52, 0x29, 0xa2, 0x5c, 0x58, 0x8e, 0x9d, 0xf7, 0x85, 0x63, 0x86, 0x4c, 0xb7, 0x55, 0xed, + 0x89, 0x75, 0x9c, 0xcb, 0xc0, 0x48, 0x69, 0x88, 0x54, 0x02, 0x0f, 0x2e, 0x54, 0x78, 0xa4, 0x7a, + 0xab, 0x73, 0xf4, 0x58, 0xf0, 0xae, 0xfc, 0x64, 0x19, 0xbf, 0xf3, 0xf8, 0x39, 0x6a, 0x76, 0x96, + 0x84, 0x00, 0xb8, 0x58, 0xf8, 0x22, 0x6b, 0xdb, 0xc0, 0x24, 0x74, 0x77, 0x92, 0x2f, 0x74, 0x71, + 0xce, 0x0b, 0xa9, 0x6d, 0xa6, 0x3b, 0x52, 0x20, 0x72, 0xa4, 0xcf, 0xd1, 0x3f, 0x10, 0xc4, 0x3f, + 0x93, 0x50, 0x43, 0x0e, 0x06, 0x99, 0xad, 0x0f, 0x33, 0x4a, 0x5c, 0x90, 0x4c, 0xaa, 0x93, 0xba, + 0x4e, 0x9f, 0x72, 0x78, 0x6f, 0x4f, 0x9b, 0x08, 0x8d, 0x9c, 0xbe, 0x2c, 0x83, 0x47, 0x49, 0xaa, + 0xc3, 0xd1, 0xe1, 0x30, 0xa2, 0xfe, 0xd5, 0x92, 0x49, 0x41, 0xfc, 0x23, 0xb1, 0x54, 0x84, 0x7b, + 0x10, 0x62, 0x90, 0xb2, 0xa2, 0x88, 0x8f, 0x1b, 0x69, 0xbc, 0xb9, 0xdf, 0xdd, 0xee, 0x05, 0x4e, + 0xb7, 0xeb, 0x68, 0x2c, 0x86, 0x7e, 0x91, 0x35, 0xe8, 0xfe, 0xd2, 0x53, 0x4a, 0xa8, 0xf1, 0x97, + 0x7b, 0x0b, 0x90, 0x7c, 0xc1, 0x70, 0xde, 0x4e, 0x42, 0x78, 0x46, 0x41, 0xe0, 0xb0, 0xa6, 0xe3, + 0x86, 0x76, 0x56, 0x87, 0x49, 0xdb, 0xf3, 0x0b, 0x6a, 0x2f, 0xc7, 0xe0, 0x67, 0xb2, 0x68, 0xcd, + 0x88, 0x3a, 0x4a, 0xdd, 0x0f, 0xe4, 0x48, 0x0c, 0x93, 0x12, 0x66, 0x8f, 0x46, 0xac, 0x02, 0xd0, + 0x2b, 0x99, 0x73, 0x22, 0xbe, 0xae, 0xbe, 0xef, 0xf9, 0xf5, 0x1b, 0x5d, 0x34, 0x10, 0x49, 0x45, + 0x2f, 0x9b, 0xb7, 0x63, 0x1f, 0x63, 0xa1, 0xe0, 0x3a, 0x2a, 0xc7, 0xe5, 0x69, 0xec, 0x48, 0xfa, + 0xa7, 0xd7, 0xb6, 0x09, 0xc0, 0xa9, 0x0f, 0x8c, 0x6e, 0x15, 0xe9, 0x09, 0x6e, 0x4c, 0x15, 0x97, + 0x02, 0xe0, 0xf7, 0xc7, 0xcd, 0x83, 0xd7, 0x7a, 0xb5, 0x43, 0x81, 0xf1, 0x8f, 0xfe, 0x9b, 0x90, + 0x9a, 0xa0, 0x70, 0x78, 0x9b, 0xf8, 0xb6, 0x79, 0xca, 0xce, 0x84, 0x67, 0xc8, 0xe6, 0x3a, 0x74, + 0xb6, 0xd5, 0x1c, 0x90, 0xfb, 0xb3, 0x49, 0xd3, 0xc4, 0x5a, 0xd0, 0x87, 0x57, 0xf7, 0x78, 0x67, + 0x6c, 0xa5, 0xf7, 0xdc, 0x60, 0x51, 0xd7, 0x16, 0xb6, 0xce, 0xc5, 0xbe, 0x03, 0x20, 0xe8, 0x38, + 0x28, 0x4b, 0x1b, 0x00, 0x8b, 0xf5, 0x79, 0x76, 0x9c, 0x50, 0x6e, 0x1a, 0x14, 0x9d, 0x07, 0x1e, + 0xc7, 0xa1, 0xfc, 0xd7, 0x97, 0x7b, 0x9a, 0x51, 0x5e, 0x89, 0x8d, 0xda, 0xf5, 0x85, 0x2d, 0xb3, + 0x5a, 0x15, 0x0d, 0x23, 0xc5, 0xdb, 0x35, 0x33, 0x9a, 0xe2, 0x71, 0x7f, 0xe1, 0x77, 0x8b, 0xb6, + 0x70, 0x7b, 0x3b, 0x4a, 0x76, 0x4a, 0xe7, 0x97, 0xda, 0x22, 0x5e, 0xb7, 0x57, 0xff, 0x55, 0x2a, + 0x5d, 0x5d, 0x12, 0xd7, 0x75, 0x88, 0x2a, 0x42, 0xfa, 0xdb, 0x5b, 0x01, 0x13, 0x71, 0x7a, 0xac, + 0xf0, 0xf7, 0xa5, 0xa1, 0x63, 0xf8, 0x5e, 0xea, 0x53, 0x70, 0xc2, 0x9f, 0x22, 0x58, 0x38, 0xc2, + 0xb3, 0xea, 0x72, 0x16, 0x65, 0x62, 0x39, 0xf1, 0xa3, 0x36, 0xa2, 0x63, 0xa4, 0x58, 0xdb, 0xdd, + 0x92, 0x87, 0x65, 0x59, 0x78, 0xd5, 0x56, 0x8e, 0x6d, 0x40, 0x19, 0x1c, 0x71, 0x2a, 0xc3, 0x5f, + 0xcd, 0x88, 0x4b, 0x73, 0x48, 0xe0, 0xa2, 0x5e, 0x8d, 0x6c, 0x1c, 0xe2, 0xa8, 0xaa, 0x54, 0x49, + 0x30, 0x1f, 0x00, 0x2b, 0xca, 0x2b, 0x5c, 0x26, 0x70, 0x49, 0x1d, 0x63, 0x58, 0x3d, 0x4c, 0xe3, + 0xe5, 0x07, 0xdb, 0xa5, 0x8b, 0xac, 0x1f, 0x2e, 0x74, 0xf1, 0x3c, 0x58, 0x73, 0x77, 0x32, 0x8d, + 0x3a, 0x26, 0xf2, 0x70, 0x4e, 0xc1, 0xe6, 0xbb, 0xeb, 0x20, 0xa5, 0xd8, 0x12, 0x9f, 0xb7, 0x95, + 0x72, 0x32, 0x9e, 0xf7, 0xae, 0xa8, 0x03, 0x83, 0xaa, 0xa7, 0xcf, 0x89, 0x80, 0x62, 0x94, 0x1e, + 0x87, 0x5c, 0xd4, 0x94, 0x8b, 0x2c, 0x8d, 0xa1, 0xeb, 0x00, 0xed, 0x07, 0x8d, 0xdd, 0x91, 0x55, + 0x86, 0xb6, 0xe2, 0x53, 0x84, 0x6a, 0xf2, 0x47, 0xa2, 0xc4, 0x62, 0xea, 0xb8, 0x33, 0xe1, 0x8f, + 0x84, 0x57, 0xe7, 0xde, 0x1a, 0xe7, 0xd3, 0xd6, 0x7d, 0x5a, 0xc2, 0xa7, 0x88, 0x44, 0x86, 0xef, + 0xa9, 0x96, 0xa7, 0x5e, 0x1b, 0x33, 0xec, 0x02, 0x14, 0xd7, 0x7d, 0x31, 0x5f, 0x3d, 0x1f, 0xd6, + 0x4c, 0x2c, 0xa8, 0xd3, 0xf7, 0x0b, 0x79, 0x20, 0xc4, 0xaf, 0x22, 0x0c, 0x93, 0x4a, 0xaa, 0x80, + 0x61, 0x1e, 0xfe, 0x1b, 0x60, 0xf4, 0xa0, 0xd4, 0x1d, 0xd8, 0x03, 0x72, 0x80, 0x61, 0x5b, 0xd9, + 0xc7, 0xa2, 0x8b, 0xc5, 0xec, 0x15, 0x52, 0xca, 0x18, 0x3c, 0x03, 0x20, 0x1c, 0x06, 0xed, 0x3d, + 0xca, 0xe4, 0x72, 0x2f, 0x61, 0x28, 0x3b, 0x44, 0x50, 0x20, 0x38, 0x9a, 0xca, 0x20, 0xdd, 0xc4, + 0x30, 0xc5, 0x31, 0xe2, 0xb5, 0xf0, 0x36, 0xa6, 0x2c, 0x31, 0x5d, 0x5b, 0xc2, 0xc6, 0xf0, 0xcc, + 0x79, 0x9c, 0xac, 0x57, 0x7c, 0xa3, 0xca, 0x26, 0x6e, 0x24, 0x76, 0x79, 0xd7, 0xe7, 0x83, 0x99, + 0x1d, 0xd5, 0xd6, 0x8c, 0xd7, 0x66, 0x52, 0x91, 0xa1, 0x53, 0x5e, 0xc8, 0xaa, 0x40, 0xe6, 0xc4, + 0x97, 0x31, 0x2c, 0x5f, 0x00, 0xcb, 0xfe, 0x3f, 0xee, 0xe6, 0x40, 0x3a, 0x14, 0x58, 0xa8, 0x6c, + 0x4d, 0xb7, 0x3f, 0x8a, 0xc2, 0x6d, 0xb0, 0x39, 0x0c, 0x06, 0x3b, 0x48, 0x9a, 0xcb, 0x33, 0xf5, + 0x85, 0xfe, 0xb4, 0x10, 0x53, 0x56, 0x52, 0x3e, 0x71, 0x71, 0xfb, 0x59, 0x8a, 0x4b, 0x16, 0xf9, + 0x16, 0xce, 0x82, 0x59, 0x68, 0x9f, 0x68, 0x99, 0x4d, 0x09, 0xc3, 0xb8, 0x04, 0x5f, 0x95, 0xf5, + 0x40, 0x2f, 0x2d, 0x8e, 0x73, 0xaa, 0x6b, 0x19, 0xbd, 0x98, 0x69, 0x57, 0x28, 0xee, 0xcd, 0x27, + 0x6c, 0x90, 0xc8, 0x52, 0x83, 0x51, 0x3c, 0x78, 0x0b, 0x4f, 0x2e, 0x5c, 0x7e, 0x68, 0x79, 0x96, + 0x16, 0xef, 0x83, 0xb1, 0xda, 0x6a, 0xd0, 0x61, 0x7f, 0x2b, 0x7e, 0x14, 0x1d, 0x81, 0xd0, 0xe7, + 0xc7, 0x97, 0x38, 0x45, 0x63, 0x33, 0x61, 0x60, 0x4b, 0x53, 0x3e, 0x84, 0xd8, 0xe7, 0x52, 0xd0, + 0x69, 0x3e, 0xd5, 0xce, 0x1d, 0x03, 0xe8, 0x0f, 0xfc, 0xf9, 0x94, 0x37, 0x78, 0x5e, 0x50, 0x8c, + 0x44, 0x75, 0x54, 0x8b, 0xe9, 0xc7, 0xeb, 0x5e, 0x0d, 0x46, 0xf6, 0xf8, 0x4f, 0x36, 0x0f, 0xa1, + 0x90, 0xf6, 0x96, 0xe6, 0x66, 0x42, 0x3f, 0xcd, 0x1f, 0x3f, 0x74, 0xfb, 0x3b, 0x07, 0x50, 0x9e, + 0x15, 0x6c, 0xd4, 0xa7, 0x6f, 0x0d, 0x26, 0xec, 0xfd, 0x1f, 0x77, 0x7a, 0x38, 0x1f, 0xd2, 0x44, + 0x88, 0x3c, 0x59, 0x4a, 0xd7, 0x93, 0xb4, 0x16, 0xc5, 0x44, 0x70, 0xeb, 0x02, 0xd7, 0x63, 0x43, + 0x33, 0xc2, 0xe5, 0x46, 0x94, 0x2e, 0x50, 0x5f, 0x72, 0x8c, 0x2e, 0x44, 0x9a, 0xe6, 0x99, 0x68, + 0x56, 0x9b, 0xac, 0xac, 0xb9, 0xd5, 0xef, 0x37, 0x77, 0xd7, 0x4d, 0x8c, 0x36, 0xc4, 0x04, 0x1a, + 0x24, 0x5a, 0x1e, 0xc3, 0x4e, 0x70, 0x5d, 0xeb, 0x7e, 0x93, 0x31, 0x3b, 0xd8, 0x27, 0x0b, 0xa2, + 0x5d, 0x41, 0x29, 0xcb, 0x33, 0x0e, 0x34, 0xcf, 0x57, 0x35, 0x27, 0xff, 0x10, 0xb9, 0x3d, 0x4c, + 0xef, 0x5b, 0x90, 0xdb, 0xc6, 0x12, 0x98, 0x09, 0x96, 0x7a, 0x96, 0xdb, 0x17, 0x26, 0xcf, 0x7b, + 0xd3, 0x80, 0x8e, 0x0c, 0x3f, 0xa2, 0xae, 0x18, 0xa1, 0xe8, 0x8b, 0x11, 0xf0, 0x3b, 0x95, 0xb7, + 0x5d, 0xc7, 0xc2, 0x7b, 0xb0, 0xac, 0x7e, 0xda, 0x88, 0x9c, 0x3f, 0xf9, 0x28, 0xd7, 0xf3, 0xd0, + 0x48, 0xd0, 0x95, 0x4c, 0x90, 0x89, 0x52, 0x3a, 0x08, 0xe2, 0xdf, 0xe8, 0x5b, 0xf1, 0x0a, 0xe1, + 0x8d, 0xbd, 0x06, 0x98, 0x28, 0xbb, 0xf5, 0x46, 0x59, 0x26, 0x51, 0xaf, 0xea, 0xe1, 0x8d, 0x44, + 0xc2, 0xd7, 0xb7, 0x16, 0xde, 0x80, 0xa2, 0x0a, 0x5c, 0xd5, 0xed, 0xaf, 0xbb, 0xd5, 0x04, 0x42, + 0x13, 0x9e, 0x8f, 0x99, 0xd6, 0xf3, 0x1a, 0x27, 0xd1, 0x51, 0xf4, 0x8f, 0xc3, 0xb8, 0xe1, 0x85, + 0xa3, 0x0b, 0x9e, 0x82, 0x6f, 0x49, 0xd2, 0x8b, 0x58, 0xcf, 0x08, 0xaa, 0x56, 0xd4, 0x6f, 0xe0, + 0x52, 0x6e, 0x89, 0xa6, 0xfb, 0x37, 0x32, 0x12, 0xd6, 0x9d, 0xdb, 0x9d, 0x27, 0xce, 0x23, 0x0b, + 0xdd, 0xb1, 0xae, 0xb3, 0xab, 0x40, 0x86, 0x6a, 0x29, 0xbd, 0x62, 0x6f, 0x71, 0xf8, 0xa0, 0x55, + 0x3d, 0x85, 0x44, 0x79, 0xae, 0xbf, 0xf8, 0x8b, 0x71, 0x3a, 0x75, 0x1e, 0xa8, 0x70, 0x3d, 0xa0, + 0x9f, 0x8f, 0xd2, 0x88, 0x13, 0x29, 0xfb, 0x5a, 0x6e, 0x24, 0xb7, 0xde, 0x35, 0x7b, 0x47, 0x57, + 0xdd, 0x32, 0x08, 0xeb, 0xd6, 0xbc, 0x39, 0xd1, 0x50, 0xaf, 0xf3, 0xcd, 0x5c, 0x70, 0x77, 0x8c, + 0x3b, 0x75, 0x49, 0xcf, 0x63, 0xe6, 0xf2, 0xe7, 0x7e, 0x81, 0xed, 0x6d, 0x4f, 0x0c, 0xd5, 0x21, + 0xe3, 0x16, 0x54, 0x4f, 0x49, 0xce, 0xa4, 0x46, 0xc9, 0x46, 0xa6, 0xca, 0x86, 0x01, 0x32, 0x19, + 0xd1, 0x33, 0x77, 0xa9, 0x40, 0x9b, 0xfd, 0x89, 0x8f, 0x6b, 0x6d, 0x98, 0xfc, 0x79, 0xaa, 0xfe, + 0xdf, 0xff, 0x7b, 0x5e, 0xe5, 0x38, 0xe2, 0x06, 0x00, 0x42, 0xb7, 0x33, 0x53, 0xdc, 0x3a, 0x84, + 0x47, 0x38, 0xbd, 0x2f, 0xd3, 0x53, 0x61, 0xac, 0x5c, 0x76, 0x1a, 0x73, 0x80, 0xcc, 0x43, 0xed, + 0xfa, 0x7c, 0x8f, 0x4d, 0x86, 0xd2, 0xb9, 0xc7, 0x43, 0xe5, 0xbb, 0x58, 0x43, 0xed, 0xc4, 0xf5, + 0xb9, 0x57, 0xbf, 0xfa, 0x5f, 0x31, 0x8d, 0x06, 0x50, 0x33, 0x6a, 0x95, 0x5b, 0xcd, 0x23, 0x0a, + 0xc8, 0x1e, 0xc1, 0x66, 0x54, 0x62, 0xab, 0x50, 0xfc, 0xd8, 0xb4, 0xf2, 0xd1, 0x4d, 0xfa, 0x6b, + 0xdf, 0x42, 0x87, 0x66, 0x79, 0x5f, 0xcf, 0x39, 0x2e, 0x8b, 0x72, 0x2c, 0xb7, 0xba, 0x0d, 0xfc, + 0x2c, 0x32, 0x85, 0x56, 0x03, 0xf7, 0xd6, 0x69, 0x16, 0xf0, 0xbe, 0xeb, 0xfd, 0x98, 0x38, 0x84, + 0x35, 0xc5, 0xcd, 0x04, 0x42, 0xde, 0x82, 0x1d, 0xdc, 0x67, 0x7a, 0x66, 0xed, 0xd8, 0xaf, 0x0e, + 0x64, 0x7c, 0xc2, 0x04, 0xff, 0xf1, 0x75, 0x75, 0x05, 0xf8, 0xe6, 0x8f, 0xde, 0x67, 0x64, 0xe4, + 0xa3, 0x4d, 0x92, 0x16, 0x14, 0x70, 0xdb, 0x38, 0x20, 0x0f, 0x63, 0x5c, 0xb6, 0xd9, 0x3f, 0x42, + 0xd7, 0x65, 0xd3, 0x6b, 0x1b, 0x0a, 0x63, 0xd7, 0xdc, 0x58, 0x31, 0xc2, 0xc7, 0xbf, 0x11, 0xe9, + 0x8d, 0x07, 0x47, 0x77, 0x40, 0xae, 0x5a, 0x30, 0x62, 0x9b, 0xec, 0xa9, 0xe9, 0xc9, 0x14, 0x35, + 0x93, 0x65, 0xbc, 0x19, 0x17, 0x29, 0x7d, 0x27, 0x10, 0x32, 0x99, 0xa7, 0x53, 0x3f, 0x6b, 0x66, + 0x7d, 0x50, 0x45, 0x5d, 0x0f, 0xda, 0x44, 0xa3, 0x34, 0xf5, 0x49, 0x36, 0x6e, 0x9a, 0x64, 0x9a, + 0x0d, 0xa3, 0x95, 0x41, 0x9e, 0x39, 0x07, 0xd6, 0xa7, 0xbe, 0xcf, 0x98, 0xa6, 0xc7, 0xe7, 0x35, + 0x18, 0x83, 0x98, 0xca, 0xdd, 0x61, 0xb3, 0x44, 0x3e, 0x51, 0x01, 0x9a, 0xa7, 0xdf, 0xd6, 0xd9, + 0x67, 0x77, 0x92, 0x26, 0x8f, 0xaa, 0x19, 0xda, 0x62, 0xbd, 0x6f, 0xc6, 0x47, 0xfc, 0xe3, 0xb1, + 0x43, 0xe3, 0x40, 0xed, 0xcb, 0x57, 0xed, 0x01, 0x4b, 0xf0, 0xc0, 0xd5, 0xf3, 0x65, 0x36, 0x06, + 0x9f, 0x03, 0xae, 0x0f, 0xfa, 0x07, 0x5f, 0x63, 0x2b, 0x45, 0x34, 0x8a, 0xc5, 0x3d, 0x92, 0x6b, + 0x9f, 0x17, 0x32, 0xb4, 0x9c, 0xf4, 0x4e, 0x58, 0x7c, 0x42, 0x5e, 0xb3, 0x99, 0xac, 0x38, 0xe9, + 0x39, 0xeb, 0x3b, 0xd0, 0x4e, 0x0b, 0x42, 0xe7, 0x05, 0x4d, 0x5f, 0x57, 0x9d, 0x45, 0xe5, 0xc2, + 0xf7, 0x10, 0xed, 0x94, 0x7f, 0xe2, 0x23, 0x4b, 0xe9, 0xdf, 0x75, 0x4f, 0xf5, 0xb8, 0x73, 0x6e, + 0x58, 0x69, 0x5f, 0xc2, 0xc5, 0xc4, 0x16, 0xea, 0x4e, 0x12, 0x84, 0x7a, 0x11, 0x45, 0x29, 0x70, + 0x01, 0x1f, 0x41, 0x07, 0xfd, 0xee, 0x84, 0x76, 0xbd, 0x33, 0x5c, 0x6d, 0x95, 0x01, 0xb9, 0x4f, + 0x52, 0x58, 0x07, 0x67, 0xcb, 0xc3, 0xca, 0xbf, 0x7e, 0xa2, 0x8e, 0x50, 0xfe, 0xcc, 0x70, 0xeb, + 0x8c, 0x1b, 0x3b, 0xbf, 0xdd, 0xae, 0x1b, 0x5a, 0xd3, 0x75, 0x54, 0xd8, 0xa8, 0x48, 0xf8, 0x12, + 0x57, 0x68, 0x4a, 0xde, 0xa8, 0x97, 0x88, 0x91, 0x6e, 0x95, 0xf2, 0x0a, 0x3d, 0x3b, 0xee, 0x18, + 0xd8, 0x54, 0x04, 0xe8, 0x70, 0xe8, 0xa7, 0xbc, 0x91, 0x1c, 0x57, 0x8a, 0x30, 0x9d, 0x53, 0x07, + 0xf3, 0x4c, 0x29, 0xdc, 0x61, 0xc8, 0x8a, 0x5b, 0x08, 0x06, 0x8b, 0xd5, 0xdf, 0xc6, 0x17, 0x18, + 0x89, 0x3d, 0xe1, 0xa0, 0x95, 0x9f, 0xb5, 0x9a, 0xfb, 0xe5, 0x6f, 0x69, 0xde, 0x92, 0x3c, 0x42, + 0x6c, 0x56, 0x63, 0x71, 0xcf, 0x78, 0xeb, 0xd0, 0xfa, 0x0b, 0xd5, 0x3a, 0xb9, 0x20, 0xb7, 0xbf, + 0xbe, 0x67, 0x1a, 0xf4, 0x4b, 0xa5, 0xc5, 0x3d, 0x51, 0xcc, 0x5c, 0x00, 0xd6, 0x20, 0x4c, 0xe5, + 0x6f, 0xc6, 0x7a, 0x93, 0x7c, 0x8f, 0x12, 0x21, 0xba, 0x6d, 0x7a, 0x62, 0x58, 0xae, 0x97, 0x75, + 0xd1, 0x6a, 0xc0, 0x75, 0x44, 0xa4, 0xb6, 0xca, 0xbb, 0xff, 0xb9, 0x0e, 0xdf, 0x5d, 0xda, 0xc5, + 0x14, 0x03, 0x66, 0xc0, 0x2b, 0x0c, 0xa2, 0x5a, 0x87, 0x40, 0xc1, 0xc2, 0x2b, 0x3d, 0x9d, 0x7b, + 0xc4, 0x5e, 0x4b, 0x01, 0x63, 0xeb, 0xe7, 0xdf, 0xb3, 0x84, 0x0c, 0x27, 0x1a, 0x94, 0x64, 0xc5, + 0xd1, 0x0d, 0xcc, 0xf9, 0xdf, 0xef, 0x31, 0x5d, 0xd4, 0x67, 0x2a, 0xef, 0x48, 0xc4, 0xef, 0xec, + 0x26, 0xce, 0x5a, 0xeb, 0xbe, 0x19, 0x0c, 0xb3, 0x0b, 0x36, 0xf0, 0xd7, 0x31, 0x1f, 0x45, 0x8a, + 0xd6, 0x10, 0xdd, 0xa8, 0xf9, 0xa0, 0xcb, 0xaf, 0xb0, 0x79, 0x15, 0xbf, 0xf7, 0xf0, 0xf4, 0x33, + 0x4e, 0x05, 0xa5, 0x95, 0x2f, 0x92, 0xde, 0x9f, 0xbb, 0x0c, 0xce, 0xbd, 0x7a, 0x69, 0x5f, 0x95, + 0xdc, 0xc1, 0xde, 0x09, 0x27, 0x62, 0xea, 0xf7, 0x87, 0x25, 0x88, 0x40, 0xa8, 0x3e, 0x34, 0x29, + 0xd8, 0x1f, 0x3e, 0x35, 0x11, 0x2a, 0x84, 0xc0, 0x0a, 0x3f, 0x4a, 0x50, 0xb7, 0x22, 0x02, 0xa2, + 0xe0, 0xee, 0xa0, 0x39, 0x6d, 0x34, 0xb8, 0xb9, 0x97, 0xb6, 0x8e, 0x14, 0x31, 0x15, 0x34, 0x38, + 0x1b, 0xe3, 0xe7, 0xd4, 0x29, 0x30, 0xac, 0x04, 0x81, 0x03, 0xec, 0xb2, 0x91, 0x5d, 0xc0, 0xe7, + 0xd4, 0x1f, 0xea, 0x48, 0x5c, 0xed, 0x54, 0xf8, 0xfc, 0x45, 0x0b, 0x49, 0x21, 0x3f, 0xb6, 0x38, + 0x29, 0x8c, 0x25, 0x69, 0x39, 0x37, 0x2a, 0xbe, 0x27, 0xec, 0x9b, 0x75, 0xc2, 0x9c, 0x6c, 0x4c, + 0xf6, 0xe6, 0x4c, 0x9f, 0xbd, 0xad, 0x5a, 0xc7, 0x01, 0x3b, 0x31, 0x7e, 0x49, 0x7f, 0xd6, 0x68, + 0x46, 0x4a, 0x97, 0x57, 0x74, 0xe2, 0x8f, 0x2f, 0xc3, 0xe5, 0x04, 0xbf, 0x66, 0xea, 0x33, 0xd0, + 0xa7, 0xb1, 0x6d, 0x95, 0xc8, 0x49, 0xa0, 0x5a, 0x91, 0xe6, 0x43, 0xca, 0x27, 0x34, 0x89, 0xf9, + 0x49, 0x8b, 0x64, 0xcb, 0x22, 0x50, 0xf6, 0x08, 0xc5, 0x74, 0xf4, 0x63, 0xf2, 0xaf, 0xd5, 0x7d, + 0xdd, 0x23, 0x2b, 0xe4, 0x5c, 0xb5, 0x99, 0xa2, 0xec, 0x07, 0xc2, 0xe6, 0xa9, 0xb1, 0x0a, 0xcd, + 0x9c, 0xee, 0x9b, 0x18, 0x21, 0x24, 0x89, 0x0e, 0xfa, 0x97, 0xa0, 0x4f, 0x42, 0xa0, 0xf9, 0x47, + 0xc0, 0xdd, 0x48, 0x7e, 0x29, 0x09, 0x26, 0xc3, 0x67, 0x38, 0x7f, 0x3c, 0x24, 0x2e, 0x57, 0x62, + 0xf7, 0xf3, 0x99, 0xeb, 0x8f, 0x33, 0xee, 0x0f, 0x2a, 0x27, 0x2a, 0x78, 0x1e, 0xd6, 0xa0, 0xf1, + 0x4c, 0x0b, 0x0b, 0xc8, 0xb7, 0x66, 0xc9, 0xd7, 0xf1, 0x17, 0x0c, 0x92, 0x31, 0x76, 0x89, 0x95, + 0xe3, 0xfc, 0x2e, 0x89, 0x35, 0xcc, 0xfd, 0x24, 0xae, 0x1b, 0x50, 0x50, 0x33, 0x90, 0xf0, 0x04, + 0xc8, 0xb0, 0x4b, 0xe7, 0xf3, 0x3a, 0x12, 0xaa, 0x98, 0x54, 0x7f, 0x27, 0x60, 0x4c, 0x68, 0x81, + 0x00, 0x69, 0x9b, 0x83, 0x26, 0xcb, 0x49, 0x61, 0xdb, 0xa7, 0xae, 0x88, 0xc5, 0x68, 0x1e, 0xa0, + 0x17, 0xf1, 0x31, 0x8b, 0xd2, 0x57, 0x80, 0x2b, 0x49, 0xa1, 0x7a, 0x2c, 0x91, 0x39, 0x31, 0xad, + 0x74, 0xc5, 0x9c, 0x07, 0x8e, 0x6e, 0x9d, 0xbb, 0x72, 0x92, 0x2a, 0x96, 0xa2, 0xa1, 0xda, 0x75, + 0x73, 0xe0, 0x64, 0xcb, 0x16, 0x21, 0x58, 0x08, 0x82, 0x37, 0x22, 0x6d, 0x27, 0x66, 0xbb, 0x5d, + 0x2f, 0x7b, 0x78, 0x3e, 0x18, 0x79, 0x0c, 0x8c, 0xd2, 0xc3, 0x3c, 0xfa, 0xa0, 0x27, 0x5c, 0x66, + 0x4c, 0x88, 0xf0, 0x4d, 0xdc, 0x8c, 0x35, 0x11, 0xfa, 0xc6, 0xd0, 0xc3, 0x0b, 0x19, 0x64, 0xd0, + 0x88, 0xb2, 0xca, 0xb0, 0xe0, 0x61, 0xe5, 0xd8, 0xd0, 0xa4, 0x9b, 0x19, 0x0f, 0xcf, 0x4c, 0x2d, + 0x36, 0xe7, 0xae, 0x44, 0xad, 0xdf, 0x42, 0x72, 0xa1, 0x3a, 0x1e, 0x15, 0x83, 0xbd, 0x61, 0x25, + 0xb1, 0x58, 0x84, 0x5a, 0x81, 0xb8, 0x3e, 0xd9, 0xfc, 0x3a, 0x0b, 0x24, 0x6f, 0x16, 0x99, 0x1d, + 0x75, 0x82, 0x81, 0x1b, 0x83, 0xe8, 0x01, 0x72, 0xdc, 0xe7, 0x05, 0xdf, 0xf8, 0xdc, 0x7b, 0x21, + 0xfc, 0x90, 0xa1, 0xdf, 0x5f, 0xdf, 0xed, 0x6f, 0xb9, 0xdc, 0x57, 0x50, 0xe0, 0x4a, 0x74, 0xd2, + 0xc9, 0xa7, 0xdd, 0x74, 0xde, 0xb7, 0xbc, 0xe4, 0xbc, 0xe9, 0xb2, 0x6c, 0x93, 0x94, 0xd2, 0xf5, + 0x9f, 0xef, 0x4a, 0xda, 0xac, 0x32, 0x1d, 0xe0, 0x4d, 0x86, 0xd2, 0x40, 0x5f, 0xc6, 0xdb, 0x14, + 0x6f, 0xd0, 0x0d, 0x2f, 0x69, 0x3e, 0x00, 0x71, 0x38, 0x3f, 0x71, 0x73, 0xc4, 0xb8, 0x8a, 0xf9, + 0xb1, 0xb4, 0xe0, 0x87, 0x8a, 0x10, 0xa8, 0x8a, 0xc6, 0x98, 0x19, 0xb5, 0xfc, 0x40, 0x3b, 0x01, + 0xd6, 0x7e, 0xb0, 0x36, 0x08, 0x15, 0xed, 0x9d, 0xb4, 0x75, 0x27, 0x90, 0x01, 0x41, 0xb0, 0x76, + 0x70, 0xbd, 0x51, 0xaf, 0xf2, 0x40, 0xe3, 0x21, 0x3f, 0xbc, 0x3c, 0xa2, 0x29, 0x6e, 0xfb, 0x9c, + 0xe9, 0x2c, 0x77, 0xec, 0xb0, 0x41, 0x92, 0x91, 0x58, 0x5a, 0x13, 0x7d, 0x1c, 0x96, 0xf4, 0x06, + 0xa8, 0xa3, 0x47, 0xba, 0xed, 0xf8, 0x4d, 0xee, 0xf9, 0x7e, 0xc0, 0x14, 0x65, 0x03, 0x6a, 0xcd, + 0x1c, 0x61, 0xfc, 0x4e, 0xf5, 0xf3, 0x6e, 0x47, 0xbf, 0xdb, 0x39, 0xcb, 0x01, 0xad, 0x99, 0xdd, + 0xdd, 0xfc, 0x0e, 0xdb, 0xcc, 0x8a, 0x3c, 0x39, 0xd8, 0xc7, 0x56, 0xaf, 0x34, 0xc5, 0xe2, 0xb1, + 0x79, 0x0e, 0xda, 0x54, 0x8b, 0xdd, 0xfa, 0xc1, 0x1d, 0x0a, 0x3a, 0x26, 0x83, 0xcb, 0xfd, 0xbe, + 0xc6, 0xc9, 0x85, 0x15, 0x82, 0x10, 0x5a, 0x21, 0x11, 0x28, 0xef, 0x30, 0x8d, 0xd5, 0x56, 0x0f, + 0xb3, 0xd7, 0x3f, 0xc1, 0xb7, 0xc8, 0x6a, 0xf2, 0x5a, 0x9c, 0xca, 0xf5, 0xdf, 0xda, 0x2d, 0x43, + 0x5b, 0x11, 0xfd, 0x9c, 0x33, 0x5e, 0x68, 0xb4, 0x25, 0x78, 0xcc, 0x7a, 0x81, 0x8e, 0x8c, 0xce, + 0x22, 0x2b, 0x98, 0xc6, 0x4d, 0x65, 0xe2, 0x20, 0x52, 0x72, 0xfe, 0xcd, 0x44, 0x2a, 0x0f, 0x73, + 0xa2, 0x0f, 0x1a, 0x0a, 0x8e, 0xfb, 0xa0, 0x67, 0x64, 0x1b, 0xd2, 0x26, 0x8c, 0xa1, 0x0b, 0xb4, + 0x2e, 0xb8, 0x6b, 0xdd, 0x7d, 0x96, 0x97, 0xc7, 0x0f, 0xb8, 0x99, 0x4d, 0xd8, 0x01, 0x9f, 0x7f, + 0xe9, 0x98, 0x75, 0xe1, 0xf4, 0x0d, 0x07, 0x9c, 0x2b, 0x3e, 0xb7, 0x0f, 0x12, 0xa6, 0x67, 0x71, + 0x1b, 0x72, 0x6d, 0x39, 0x13, 0x76, 0x8b, 0x92, 0xb0, 0x3c, 0xfc, 0x42, 0xdb, 0xb4, 0x9b, 0xa4, + 0x71, 0xce, 0x22, 0xb9, 0x15, 0x68, 0xa6, 0x1c, 0xf6, 0x68, 0x90, 0x41, 0xe3, 0x06, 0x4c, 0x8e, + 0x25, 0x9c, 0x84, 0x3c, 0xb5, 0x46, 0x83, 0x90, 0x74, 0x70, 0x0a, 0x5b, 0xe8, 0x2f, 0x4e, 0x53, + 0xbe, 0x5f, 0xbe, 0xee, 0x67, 0xd8, 0x22, 0x21, 0x11, 0xc2, 0xe0, 0x76, 0xbb, 0x59, 0x92, 0xd6, + 0x52, 0xce, 0x72, 0x52, 0x95, 0x87, 0xf3, 0x4e, 0x1c, 0xb3, 0x21, 0x68, 0x59, 0x71, 0xd0, 0x24, + 0x9f, 0x1b, 0x13, 0x44, 0x57, 0x3e, 0x31, 0x5f, 0x9b, 0x67, 0xf4, 0x76, 0xb6, 0xba, 0x6f, 0xdf, + 0x74, 0x3a, 0x18, 0x02, 0x5a, 0xdb, 0x3e, 0xcb, 0xc0, 0x93, 0x2a, 0x9d, 0xe8, 0x7a, 0x73, 0x03, + 0xde, 0xf2, 0xcb, 0x91, 0xb2, 0x1e, 0x5d, 0x9e, 0x27, 0x01, 0xbd, 0x1f, 0x70, 0x4a, 0xae, 0x1b, + 0x27, 0xb6, 0x7e, 0x78, 0x14, 0x53, 0x21, 0x65, 0xa9, 0x00, 0x01, 0x22, 0x01, 0x27, 0xc7, 0xd4, + 0x48, 0x64, 0xad, 0x46, 0x7f, 0xaa, 0xc9, 0xd6, 0xd4, 0x3d, 0x46, 0xd3, 0xf9, 0xc2, 0xf0, 0xf1, + 0x47, 0x38, 0xe8, 0xbd, 0x6d, 0x67, 0xc8, 0xe3, 0x70, 0xbb, 0x84, 0x28, 0x85, 0x97, 0xc0, 0xd5, + 0xef, 0x60, 0x04, 0x39, 0x4a, 0x34, 0xa7, 0x73, 0xca, 0x1c, 0x46, 0x3e, 0x6b, 0x13, 0x90, 0xe7, + 0x99, 0x16, 0x91, 0x7c, 0xa9, 0x2c, 0xa9, 0xb5, 0xaf, 0x57, 0xaf, 0x71, 0xad, 0x1a, 0xf1, 0xff, + 0x4a, 0xf4, 0x6e, 0x64, 0xe1, 0x9f, 0x8c, 0x05, 0xd2, 0x08, 0x16, 0x12, 0x0b, 0xef, 0x7f, 0xad, + 0x53, 0x91, 0xdb, 0x22, 0x67, 0x0c, 0x73, 0x0b, 0x4a, 0x49, 0x64, 0x83, 0xdd, 0x28, 0x4d, 0x13, + 0x82, 0x79, 0xfb, 0x5a, 0x76, 0x2e, 0xf9, 0x9c, 0xed, 0xbe, 0x7a, 0x35, 0xcb, 0xb7, 0xfd, 0x54, + 0x32, 0xce, 0x8a, 0x08, 0xfb, 0x89, 0x19, 0x11, 0x04, 0x82, 0xf4, 0x00, 0x7a, 0xd7, 0x4c, 0xce, + 0x99, 0xa0, 0x79, 0x9d, 0xeb, 0xb7, 0x7e, 0xb4, 0xa6, 0xf9, 0xbe, 0x3b, 0x1b, 0xf5, 0x09, 0x0b, + 0xb4, 0xb9, 0x12, 0x75, 0x61, 0x03, 0xf0, 0x53, 0x85, 0x81, 0x7f, 0x54, 0xbe, 0x58, 0xfa, 0xe4, + 0xf2, 0xf7, 0xf1, 0x35, 0x14, 0x1c, 0xc8, 0x05, 0xd7, 0x70, 0x96, 0x66, 0x77, 0x3b, 0x5d, 0x63, + 0x69, 0x40, 0x1f, 0xb2, 0x5c, 0x7a, 0xfb, 0x03, 0x3c, 0x1b, 0xf0, 0xeb, 0xa5, 0x1e, 0xd0, 0xe0, + 0x25, 0x96, 0x92, 0x81, 0x08, 0xbd, 0x0a, 0x34, 0x7f, 0x77, 0xd9, 0x1d, 0xde, 0x21, 0x49, 0xa2, + 0xc2, 0x42, 0x4f, 0x9e, 0xc7, 0x8a, 0x9c, 0xce, 0xec, 0xcb, 0x23, 0x25, 0xdd, 0xf7, 0x0d, 0xa1, + 0xa1, 0x03, 0x86, 0xc6, 0xbf, 0x16, 0xd3, 0xec, 0x62, 0xa7, 0x4a, 0xb8, 0x14, 0x3e, 0xb5, 0x5b, + 0xa1, 0x65, 0x7b, 0x3b, 0xf8, 0x8d, 0xae, 0xae, 0xd4, 0x4c, 0x2c, 0x57, 0xc0, 0xac, 0xab, 0x07, + 0x5d, 0x5a, 0xed, 0x95, 0xcd, 0xe3, 0xca, 0x43, 0xc8, 0xfc, 0xd8, 0x2f, 0xe8, 0x4a, 0xec, 0x54, + 0xa5, 0xf9, 0x31, 0x19, 0x42, 0xbe, 0xc6, 0xe7, 0xc6, 0x93, 0x7e, 0xc6, 0xe5, 0x5b, 0x94, 0x1f, + 0x4d, 0x8c, 0xae, 0xdd, 0xf3, 0xaf, 0x71, 0xd7, 0xf3, 0x1d, 0xa0, 0xc7, 0xf4, 0x98, 0xb5, 0x5f, + 0x78, 0x3f, 0xeb, 0x1d, 0xb7, 0x9d, 0xac, 0xde, 0x21, 0x26, 0x6d, 0xeb, 0x6f, 0x2d, 0x4c, 0xeb, + 0x66, 0x25, 0x95, 0x3d, 0x85, 0x32, 0xd3, 0xc0, 0x35, 0x60, 0x8a, 0xa9, 0x5c, 0xa6, 0xeb, 0x48, + 0x91, 0xf4, 0x24, 0x4e, 0x4b, 0xa1, 0xf4, 0x74, 0xda, 0x1f, 0x76, 0x91, 0xc1, 0x16, 0x9c, 0xe6, + 0xd2, 0x16, 0xe5, 0x59, 0x56, 0xd1, 0x65, 0x86, 0x4e, 0x03, 0xab, 0x3b, 0xb7, 0x7c, 0x65, 0x6a, + 0x05, 0x3e, 0x94, 0x34, 0xd2, 0xcf, 0xc1, 0x4a, 0xd7, 0x1b, 0xf7, 0xb3, 0xcf, 0x88, 0x37, 0x32, + 0xdc, 0x8f, 0x00, 0x7e, 0xb5, 0xc6, 0x4e, 0x1f, 0xfa, 0xc3, 0xf1, 0xd9, 0x2c, 0x69, 0x46, 0x0a, + 0xba, 0x88, 0x63, 0xfd, 0x4d, 0x99, 0x2e, 0x55, 0xb7, 0xb5, 0xfa, 0x51, 0x41, 0x70, 0x4c, 0xa8, + 0xae, 0xca, 0x64, 0x43, 0xc1, 0x13, 0x63, 0x61, 0x28, 0x98, 0x3d, 0xe8, 0x94, 0xc9, 0x94, 0x38, + 0xa0, 0xbc, 0x89, 0x51, 0x28, 0xc9, 0x72, 0xc6, 0xd7, 0x21, 0x84, 0x87, 0xfb, 0xdd, 0xa3, 0xca, + 0xcb, 0xb9, 0x46, 0x1c, 0x51, 0x7b, 0x9a, 0x3d, 0xaa, 0x49, 0x58, 0xae, 0xe4, 0xf8, 0x4b, 0x84, + 0x36, 0xd5, 0xdf, 0x4e, 0x32, 0xa9, 0xdc, 0x6a, 0x79, 0x30, 0x99, 0xab, 0x5e, 0x4e, 0xf4, 0x30, + 0x2b, 0xeb, 0x43, 0x48, 0xfd, 0xcd, 0x3b, 0x87, 0xaf, 0xf4, 0x58, 0xba, 0x3c, 0x30, 0x94, 0xb0, + 0xda, 0x2f, 0xf7, 0xb6, 0x72, 0xff, 0x81, 0x08, 0x04, 0x7d, 0x90, 0x2a, 0xfc, 0xbe, 0x47, 0x80, + 0xdd, 0xfc, 0x75, 0xad, 0xa4, 0xe9, 0x15, 0xdf, 0x34, 0x67, 0xe5, 0x1b, 0xb8, 0x86, 0xd8, 0x3c, + 0x9b, 0xd4, 0x1c, 0xde, 0xfc, 0x02, 0xb1, 0x9b, 0x95, 0x0e, 0xbc, 0xe7, 0x4d, 0x66, 0x9a, 0x9e, + 0x40, 0x37, 0x08, 0x25, 0x73, 0x76, 0x76, 0x48, 0xea, 0xf9, 0x8d, 0xeb, 0x51, 0xb6, 0xca, 0x47, + 0xdd, 0x54, 0x19, 0xde, 0x00, 0x36, 0x1b, 0xdd, 0x98, 0x4f, 0x60, 0x9f, 0x90, 0x67, 0x1f, 0xa3, + 0x6d, 0xa1, 0xa3, 0xff, 0x86, 0x68, 0x10, 0x27, 0xbf, 0x40, 0x7b, 0x3d, 0xe4, 0x5f, 0x14, 0xc0, + 0xdb, 0x1a, 0xd1, 0x7d, 0xd3, 0xad, 0x03, 0xa6, 0x1c, 0xbb, 0x33, 0x44, 0x69, 0x42, 0xc2, 0xe6, + 0x22, 0xda, 0xe4, 0x67, 0xef, 0xe2, 0xbe, 0x7d, 0xc7, 0x70, 0xa8, 0x2c, 0x04, 0x54, 0xac, 0xea, + 0x7d, 0x19, 0xe0, 0xde, 0xde, 0x8b, 0x5a, 0x4f, 0x80, 0xa2, 0x45, 0x4c, 0xb7, 0x77, 0x73, 0x14, + 0x9f, 0xb5, 0xd6, 0x56, 0x4f, 0xc3, 0x2a, 0x5e, 0x09, 0xb9, 0xfd, 0xc6, 0x25, 0x88, 0x94, 0x0b, + 0x3b, 0x87, 0x51, 0x69, 0x80, 0x37, 0x4c, 0x4f, 0xf8, 0x5d, 0xe0, 0x21, 0xee, 0xe5, 0xb9, 0x88, + 0x63, 0x67, 0xfb, 0xa6, 0xd3, 0x67, 0x35, 0x04, 0xf8, 0x68, 0xc9, 0xa9, 0xe8, 0xa2, 0x50, 0xe4, + 0xab, 0xd4, 0xe5, 0xe1, 0x1a, 0x51, 0x64, 0x06, 0xcf, 0x9a, 0xd7, 0xd5, 0x4c, 0x65, 0xea, 0x7e, + 0x2f, 0x7f, 0xfb, 0xa6, 0x24, 0xda, 0xf7, 0x75, 0xe2, 0x64, 0xe8, 0xe2, 0x65, 0x1a, 0x4f, 0xf1, + 0x7d, 0x57, 0x8d, 0x57, 0x3b, 0x3e, 0xf7, 0xfa, 0x6b, 0xfc, 0xf8, 0x34, 0x07, 0x6c, 0xef, 0x83, + 0xa7, 0xf8, 0x98, 0xa4, 0x38, 0xd6, 0x41, 0xda, 0xd5, 0x31, 0x36, 0xa2, 0x09, 0x58, 0x35, 0xe5, + 0x9d, 0x45, 0x2e, 0x84, 0x5a, 0xae, 0x60, 0x63, 0xaf, 0x69, 0x87, 0xd9, 0x92, 0x66, 0x42, 0x6f, + 0x63, 0xff, 0xb1, 0xcf, 0x04, 0x79, 0xfe, 0x3a, 0x38, 0x77, 0x53, 0xbd, 0xb6, 0x68, 0xae, 0x1e, + 0x95, 0xf4, 0x10, 0x61, 0xd9, 0x4b, 0x3d, 0xf6, 0x7d, 0xe2, 0x8d, 0x0b, 0x19, 0x73, 0xd8, 0x1a, + 0x80, 0x22, 0x28, 0xff, 0x3e, 0x95, 0x15, 0xf2, 0x0e, 0x34, 0x6d, 0x4a, 0x82, 0xf2, 0x39, 0x25, + 0xf8, 0x5b, 0xf3, 0xe1, 0xe0, 0x48, 0x53, 0xf2, 0x68, 0xd7, 0x10, 0x23, 0x41, 0xfb, 0x39, 0xdf, + 0xb3, 0x67, 0x1a, 0xbe, 0x17, 0xbd, 0x5f, 0x52, 0xcc, 0x52, 0x1d, 0xf2, 0x35, 0x25, 0x5c, 0x6e, + 0x6d, 0xc6, 0xe7, 0xb6, 0xf9, 0x84, 0xb7, 0x64, 0x81, 0x2f, 0xe2, 0x20, 0xe9, 0x0a, 0x8d, 0xb1, + 0x13, 0x83, 0x1b, 0x64, 0x7c, 0x04, 0x8c, 0xc6, 0x94, 0xb9, 0xaa, 0xa3, 0xe9, 0x0c, 0x1c, 0x4b, + 0xee, 0x34, 0x4a, 0xdf, 0xe4, 0x80, 0x32, 0x63, 0xe7, 0x16, 0x52, 0x69, 0xe8, 0x46, 0x1b, 0x28, + 0x2f, 0x76, 0x25, 0x48, 0xe0, 0xc8, 0x9b, 0xe5, 0x0d, 0xc2, 0xcb, 0xbb, 0x04, 0xfe, 0x1d, 0x2a, + 0x68, 0x22, 0x44, 0xda, 0x7e, 0xbe, 0xd6, 0x85, 0xb9, 0xb1, 0x6e, 0x37, 0x8f, 0x88, 0x32, 0xc4, + 0x67, 0x49, 0xf9, 0xd9, 0x43, 0x9c, 0x02, 0x22, 0xe8, 0x23, 0xaa, 0x65, 0xf9, 0xe4, 0x86, 0x18, + 0xe9, 0x56, 0x22, 0x83, 0x02, 0x39, 0x8a, 0x57, 0x35, 0x11, 0x33, 0x11, 0x39, 0x31, 0x98, 0x65, + 0x99, 0x5f, 0x65, 0x4c, 0xa0, 0x38, 0x3b, 0x95, 0x30, 0xdd, 0x9c, 0x5e, 0x8a, 0x5f, 0x2e, 0x56, + 0x86, 0xdc, 0x2a, 0xf7, 0xe7, 0xe3, 0xa1, 0x29, 0xb1, 0x84, 0xba, 0xac, 0x79, 0x20, 0xe0, 0x97, + 0x64, 0x76, 0x9d, 0xc5, 0xf5, 0x23, 0x6c, 0x34, 0x65, 0x80, 0x4b, 0xed, 0x70, 0x1b, 0xbc, 0x41, + 0x19, 0x40, 0xab, 0x0a, 0xc2, 0x7e, 0x13, 0x31, 0xd4, 0x63, 0xda, 0x47, 0x56, 0x24, 0xe7, 0x0d, + 0x56, 0xba, 0xc8, 0xad, 0xa7, 0x98, 0x37, 0x80, 0x3b, 0xc7, 0xc3, 0x05, 0x8d, 0x95, 0x10, 0xeb, + 0x7b, 0xe2, 0xbd, 0x57, 0x95, 0xf8, 0xee, 0x0f, 0x03, 0xde, 0xc0, 0xc4, 0x4e, 0xef, 0xf9, 0xb6, + 0xa0, 0x27, 0x9d, 0x85, 0xb4, 0x53, 0xcc, 0x9a, 0x41, 0x07, 0x24, 0xfb, 0xec, 0x40, 0x67, 0x78, + 0x90, 0x21, 0x52, 0x3f, 0x47, 0xbd, 0x81, 0x73, 0xb5, 0x51, 0x0d, 0xfe, 0x9e, 0x71, 0xd2, 0xdb, + 0x32, 0x54, 0xe2, 0x9a, 0x2c, 0xe4, 0x5f, 0x70, 0x19, 0xb5, 0x1f, 0x47, 0xe1, 0x88, 0x8a, 0x4d, + 0x41, 0x2e, 0xf1, 0xa0, 0x4a, 0x52, 0x7e, 0xf2, 0x8f, 0x6c, 0x8f, 0xed, 0xee, 0x68, 0x34, 0xbf, + 0x0f, 0x4b, 0x0d, 0xdf, 0x4e, 0xe4, 0x02, 0x3c, 0xa6, 0xc2, 0x05, 0xe3, 0x10, 0x89, 0xb0, 0x0d, + 0xc2, 0xdc, 0x5f, 0x62, 0x65, 0xd2, 0xcb, 0x60, 0x6d, 0x04, 0x7a, 0xbf, 0xd0, 0x33, 0x88, 0xe5, + 0x21, 0x43, 0x57, 0x07, 0x7d, 0xf9, 0x34, 0x2f, 0x04, 0x6a, 0x26, 0x2c, 0x9a, 0xee, 0xbe, 0x87, + 0xce, 0xe2, 0x81, 0x9e, 0x0b, 0x9e, 0xa8, 0x0c, 0xee, 0xa4, 0x03, 0x3b, 0xf7, 0x52, 0xea, 0x71, + 0x61, 0x07, 0x83, 0x8e, 0x8a, 0x7e, 0x1d, 0xea, 0xe6, 0x92, 0x10, 0xc8, 0x43, 0x08, 0xd1, 0x74, + 0x07, 0x2d, 0x6c, 0x2d, 0xac, 0xef, 0x0c, 0xdf, 0xc3, 0x22, 0x24, 0xf6, 0xb1, 0x8b, 0x29, 0x85, + 0x8f, 0x90, 0xd4, 0x20, 0x3d, 0xee, 0xd2, 0xcf, 0x65, 0x13, 0x54, 0x80, 0x9a, 0x91, 0xbc, 0x19, + 0xb6, 0x34, 0xc5, 0x52, 0x93, 0x74, 0x4d, 0xc3, 0x8b, 0x9b, 0x6d, 0xf4, 0x96, 0xfc, 0xc2, 0xd5, + 0x9d, 0x6d, 0x1a, 0x6e, 0x3c, 0x07, 0x49, 0xa5, 0x10, 0x05, 0x63, 0x61, 0x49, 0x86, 0xdc, 0xf0, + 0x5e, 0xc0, 0x78, 0x3b, 0x09, 0x63, 0x55, 0xa2, 0x37, 0xc7, 0x86, 0xc3, 0x17, 0x68, 0x99, 0x8c, + 0xf5, 0x4d, 0x8c, 0xeb, 0x0e, 0x72, 0x62, 0x5f, 0x6d, 0xa2, 0x13, 0x7c, 0xa0, 0x0a, 0x61, 0x5d, + 0xbb, 0x9a, 0x86, 0x49, 0xc3, 0x84, 0x6f, 0xc6, 0xdd, 0x8d, 0x31, 0x10, 0x17, 0x05, 0x8b, 0xe2, + 0x84, 0x72, 0xe3, 0x3e, 0x85, 0x49, 0x1a, 0x30, 0xb5, 0xc3, 0x17, 0x03, 0x36, 0xd9, 0x6d, 0x17, + 0xcc, 0x37, 0xdc, 0xbf, 0x8e, 0x34, 0xb0, 0x5e, 0x48, 0xfe, 0xdb, 0xf3, 0xda, 0xe2, 0xcd, 0xb8, + 0xc6, 0x8e, 0x0e, 0xf6, 0x17, 0xae, 0xfe, 0xb8, 0x2e, 0x22, 0x6a, 0x8b, 0xe1, 0x50, 0xee, 0x11, + 0x1a, 0x7f, 0x0e, 0x96, 0x47, 0xb3, 0xad, 0xd5, 0xd6, 0x06, 0x7f, 0xbb, 0x6d, 0x10, 0x9d, 0x64, + 0xca, 0xb4, 0x9a, 0x65, 0xc5, 0xad, 0xd4, 0x3b, 0x21, 0x8c, 0x70, 0x00, 0x62, 0xba, 0x83, 0x70, + 0xf4, 0x88, 0xdb, 0xe4, 0x0b, 0xaf, 0x19, 0xbf, 0xd2, 0xeb, 0x06, 0x5d, 0x87, 0xc9, 0xf5, 0x0c, + 0x06, 0x6a, 0x1c, 0x97, 0x95, 0xbf, 0xbb, 0xd1, 0x08, 0x4f, 0x58, 0x4f, 0x63, 0x58, 0x1d, 0x57, + 0x6e, 0x8a, 0x9e, 0xea, 0x5c, 0x40, 0xaa, 0x66, 0x64, 0xb0, 0x7b, 0x2d, 0x05, 0x1a, 0xbe, 0xc0, + 0x74, 0x51, 0x83, 0x2e, 0xf0, 0x25, 0x10, 0x69, 0x61, 0x3a, 0x26, 0xcf, 0x7e, 0x3d, 0xb6, 0x31, + 0xcb, 0xc8, 0x49, 0xa7, 0x79, 0x25, 0x62, 0x05, 0x0e, 0x39, 0xe1, 0xcf, 0x3d, 0x06, 0x62, 0xe7, + 0x04, 0xa2, 0x77, 0xd6, 0x56, 0xf6, 0xe8, 0xae, 0xc2, 0xc9, 0x97, 0x32, 0x9f, 0x65, 0x6b, 0xd7, + 0xfd, 0x60, 0x5d, 0x44, 0xb8, 0x22, 0x10, 0xd2, 0xb1, 0x73, 0x26, 0x65, 0x89, 0x45, 0x2d, 0xf6, + 0x72, 0x31, 0xf8, 0x77, 0x93, 0x64, 0x64, 0x9b, 0xef, 0x45, 0x60, 0x92, 0x27, 0xec, 0xb0, 0x8c, + 0xb5, 0xbe, 0xdc, 0x46, 0xf6, 0x57, 0x0e, 0x1b, 0xd0, 0x08, 0x71, 0x28, 0x5e, 0x11, 0x64, 0x00, + 0x8e, 0xec, 0xa7, 0x85, 0x72, 0xf9, 0x7d, 0xce, 0xaf, 0x9e, 0x66, 0x68, 0xe5, 0x1a, 0x0f, 0x1e, + 0x07, 0x6c, 0x89, 0xa6, 0x27, 0x62, 0x9a, 0xe1, 0xf9, 0xd4, 0x5c, 0x90, 0x92, 0xb8, 0x8a, 0xaa, + 0x3b, 0xaf, 0x15, 0x1e, 0x42, 0x29, 0x61, 0x6c, 0xdc, 0xb5, 0x24, 0xda, 0xdd, 0x47, 0x4e, 0xb5, + 0xff, 0xba, 0xc2, 0xf6, 0x2f, 0x9c, 0x74, 0x02, 0xf7, 0xc4, 0x28, 0x7e, 0xfa, 0x57, 0x13, 0x52, + 0x95, 0x73, 0x25, 0x7a, 0xb3, 0x32, 0x92, 0x82, 0x4a, 0xd2, 0x6d, 0x11, 0x1a, 0x17, 0xad, 0x4c, + 0xaa, 0xee, 0x4b, 0xf9, 0x6b, 0x35, 0x69, 0xc7, 0x82, 0x79, 0x77, 0xab, 0xc4, 0xdd, 0x1d, 0x11, + 0xff, 0xac, 0xa7, 0x02, 0xc3, 0x15, 0xad, 0x63, 0x74, 0xc2, 0x8f, 0xd8, 0xf0, 0x56, 0x8a, 0x3e, + 0x3d, 0x47, 0x93, 0x4a, 0x9b, 0xe0, 0x24, 0x94, 0x61, 0xe1, 0x23, 0xc5, 0x99, 0xe6, 0x12, 0x66, + 0xc4, 0x3e, 0xff, 0xb7, 0xe8, 0x4b, 0xfb, 0xc0, 0x06, 0x95, 0x6a, 0x83, 0x09, 0x74, 0x6c, 0xf9, + 0xdc, 0x77, 0xd2, 0xe9, 0x08, 0x3b, 0xd4, 0x90, 0xc8, 0xeb, 0x51, 0xe8, 0xf2, 0x33, 0x04, 0xa9, + 0x05, 0x50, 0x3b, 0xf2, 0x69, 0x5f, 0x19, 0xfe, 0xe2, 0xd7, 0xa0, 0x37, 0xb6, 0x8f, 0x66, 0x3f, + 0xa7, 0xe7, 0x0b, 0x46, 0xc8, 0x03, 0x39, 0x63, 0xc1, 0x6e, 0x02, 0x6c, 0xb9, 0xef, 0xeb, 0x8b, + 0x5c, 0x18, 0x10, 0x4c, 0x82, 0x05, 0x5b, 0xf4, 0x07, 0xa6, 0x4a, 0xa9, 0x81, 0x24, 0x05, 0xc6, + 0x72, 0x3d, 0x11, 0x1b, 0x6a, 0xe6, 0x3d, 0xec, 0xbf, 0x10, 0xce, 0x7c, 0x27, 0x68, 0x27, 0x1f, + 0x3a, 0xc7, 0xdc, 0x45, 0x5d, 0xfa, 0x89, 0x9e, 0x1b, 0x50, 0x35, 0x6a, 0xd2, 0x23, 0x87, 0xb3, + 0x56, 0x9d, 0x0d, 0x8f, 0x1a, 0x6e, 0x01, 0x21, 0x3f, 0x5c, 0xc5, 0xf3, 0x01, 0x94, 0x04, 0x98, + 0xa9, 0x21, 0xcf, 0xfa, 0x92, 0xc8, 0x90, 0x05, 0x9d, 0xb9, 0x40, 0x16, 0x3f, 0xf8, 0x62, 0x06, + 0x75, 0x20, 0x6c, 0x96, 0x79, 0xdb, 0x90, 0x5f, 0x56, 0x02, 0xd6, 0xab, 0x81, 0x81, 0x38, 0xa7, + 0xd5, 0x73, 0x8d, 0x29, 0x71, 0xfd, 0xad, 0x43, 0x6d, 0x50, 0xeb, 0x68, 0x61, 0x32, 0x41, 0x0c, + 0xa7, 0x25, 0xd1, 0x30, 0x8e, 0xbf, 0x28, 0xe0, 0x19, 0xbd, 0x7d, 0xc3, 0xa7, 0xdc, 0x81, 0x25, + 0x50, 0xb3, 0xc7, 0x71, 0x78, 0x58, 0xe8, 0xc5, 0x1c, 0xc0, 0x56, 0xc2, 0x07, 0x6c, 0xac, 0x7f, + 0xa4, 0x69, 0xc2, 0x6b, 0xef, 0xa2, 0x15, 0xdb, 0x6b, 0x60, 0x72, 0x54, 0xd2, 0xde, 0x30, 0xd5, + 0xc4, 0x32, 0xa8, 0x67, 0x6b, 0x50, 0x62, 0xac, 0x93, 0x58, 0xd2, 0xe8, 0x5a, 0x8c, 0x64, 0x07, + 0x1d, 0x30, 0x15, 0xce, 0xfb, 0xed, 0x43, 0xeb, 0xf3, 0x4c, 0xb1, 0xac, 0xf3, 0x54, 0x15, 0xce, + 0x19, 0x9c, 0x60, 0x03, 0xb2, 0xbb, 0x0f, 0x36, 0x80, 0x8b, 0xe0, 0x24, 0xd1, 0xa1, 0x16, 0xb4, + 0x76, 0x67, 0xeb, 0xa4, 0xb1, 0x13, 0xc3, 0x9a, 0x86, 0x17, 0xbd, 0x64, 0xb0, 0x48, 0xaf, 0xf8, + 0xf0, 0xef, 0x71, 0x19, 0x31, 0xd3, 0xf0, 0x49, 0xdd, 0x99, 0x8a, 0x56, 0x82, 0xd9, 0xab, 0x60, + 0xb0, 0xba, 0x62, 0x60, 0xbb, 0x06, 0x5f, 0x3a, 0x89, 0x14, 0xa0, 0xf2, 0xf3, 0x95, 0xf0, 0xb9, + 0xba, 0x4f, 0x28, 0x17, 0x18, 0x5b, 0x42, 0xb1, 0x2a, 0x0a, 0xc6, 0x9c, 0xe6, 0x47, 0xca, 0x36, + 0x46, 0xc1, 0x1c, 0x84, 0xc1, 0xca, 0xc6, 0xde, 0xab, 0x57, 0x1f, 0xa5, 0x53, 0x23, 0x56, 0x71, + 0xbe, 0x16, 0x38, 0x41, 0x41, 0x21, 0xff, 0xee, 0x72, 0x93, 0x44, 0x6a, 0x6a, 0xe5, 0x96, 0xf2, + 0x70, 0x56, 0x38, 0x44, 0x37, 0x17, 0x35, 0x23, 0x0b, 0x11, 0x6c, 0xe3, 0x43, 0xd8, 0xda, 0x0f, + 0xfd, 0xc4, 0x1e, 0xc5, 0x60, 0xf5, 0xf6, 0x66, 0xcb, 0x5d, 0xff, 0xe0, 0x63, 0xa9, 0xb3, 0x3d, + 0x08, 0xad, 0x7f, 0xdb, 0x7e, 0x9a, 0x2a, 0x23, 0xc0, 0xf9, 0xe9, 0xf6, 0x3c, 0x2f, 0x04, 0x3e, + 0xb7, 0xd9, 0x07, 0x2e, 0x47, 0xbe, 0x99, 0xf4, 0xd4, 0x89, 0x59, 0x4e, 0xa4, 0x1d, 0x6a, 0x88, + 0x49, 0x56, 0xca, 0x1b, 0x65, 0x75, 0x74, 0x59, 0xf9, 0xf1, 0xbe, 0x65, 0x29, 0xdd, 0xae, 0x3c, + 0x64, 0x28, 0x14, 0x5e, 0xa4, 0x5b, 0x68, 0x54, 0x12, 0x4c, 0xcb, 0xa8, 0x27, 0x4d, 0x0e, 0x50, + 0x38, 0x0f, 0x48, 0x32, 0x94, 0x12, 0x7f, 0x51, 0x33, 0x87, 0xca, 0xdd, 0x34, 0x33, 0x45, 0x83, + 0x29, 0x3b, 0x53, 0xfa, 0x0c, 0x32, 0x31, 0x1b, 0x66, 0x88, 0x98, 0xd2, 0x3c, 0x30, 0xbd, 0x79, + 0x43, 0xb8, 0xc3, 0x98, 0x34, 0x2b, 0x3f, 0x01, 0x59, 0x97, 0x67, 0x62, 0xa6, 0x64, 0xb8, 0xdf, + 0x22, 0x3f, 0xda, 0xc3, 0xd8, 0xaf, 0x45, 0xc6, 0x4f, 0x71, 0x46, 0x99, 0x71, 0xe0, 0x9c, 0x29, + 0x88, 0xae, 0xc8, 0x96, 0x2d, 0xde, 0x9d, 0x12, 0x1a, 0xd6, 0x3d, 0xfa, 0x79, 0x27, 0x75, 0x26, + 0x7f, 0x4f, 0x1c, 0xe0, 0xca, 0x11, 0x75, 0x2d, 0x0a, 0xe4, 0x15, 0xa8, 0x3d, 0x42, 0xb4, 0x16, + 0xf8, 0xd3, 0xc9, 0xec, 0x24, 0x63, 0x18, 0x0c, 0x4f, 0xaf, 0xaa, 0x9f, 0x2f, 0x11, 0x8d, 0xed, + 0x4c, 0x14, 0x5e, 0x88, 0xda, 0x20, 0xcc, 0x31, 0xc5, 0x67, 0xfb, 0x52, 0xf6, 0x13, 0xa7, 0x4d, + 0x84, 0x86, 0xfc, 0xfb, 0xae, 0x07, 0xcc, 0x4d, 0x2a, 0xb2, 0xec, 0x3f, 0xd6, 0x57, 0xf0, 0xca, + 0xa8, 0xc8, 0xdc, 0x51, 0x9a, 0x52, 0xd8, 0x3a, 0xf8, 0x49, 0xc8, 0x4f, 0x01, 0x7d, 0x39, 0x77, + 0xa6, 0xdf, 0xda, 0xfa, 0x58, 0x98, 0x6d, 0x21, 0x71, 0x63, 0x79, 0x30, 0x3e, 0x65, 0x80, 0x02, + 0xbc, 0x60, 0x91, 0x57, 0xe6, 0x0d, 0x26, 0xa2, 0x30, 0x34, 0x6a, 0x72, 0x50, 0x70, 0x8b, 0x1b, + 0x78, 0xf1, 0x05, 0xc4, 0xf0, 0x7a, 0xa8, 0xa7, 0x9d, 0x31, 0x10, 0xdd, 0xb8, 0xe8, 0x25, 0x83, + 0xfe, 0x00, 0x46, 0xdd, 0xdc, 0x9d, 0xc6, 0xbb, 0xdb, 0xfc, 0x20, 0x08, 0x0f, 0x20, 0x88, 0x96, + 0x9a, 0xa0, 0x97, 0x79, 0x5b, 0x29, 0xbe, 0xda, 0xf0, 0xa8, 0x3d, 0xd1, 0xf0, 0x88, 0xa7, 0x64, + 0x8b, 0x08, 0x63, 0x81, 0xcb, 0xdd, 0x13, 0x37, 0x89, 0x37, 0xa1, 0xc9, 0xfb, 0xbd, 0x2e, 0x3e, + 0xec, 0x9a, 0x0d, 0xc7, 0x99, 0x40, 0x1f, 0xbf, 0x0a, 0xcf, 0xb2, 0x30, 0x6a, 0x96, 0xb6, 0xcb, + 0x8d, 0x51, 0x51, 0xfb, 0xc4, 0xaa, 0xfd, 0xb2, 0x17, 0xef, 0xab, 0x86, 0x24, 0xe9, 0x57, 0x09, + 0xf7, 0x2e, 0xcd, 0xac, 0xf7, 0xc4, 0x74, 0x16, 0xb5, 0x6b, 0xd6, 0xb0, 0xac, 0x88, 0x86, 0x37, + 0x5a, 0x83, 0x14, 0x71, 0x7f, 0x01, 0x7d, 0x07, 0x7e, 0x75, 0xf6, 0xa6, 0x67, 0x58, 0xd6, 0x2c, + 0x41, 0xec, 0x08, 0x49, 0x7b, 0x63, 0xff, 0xf3, 0x11, 0x1c, 0x93, 0x97, 0xb0, 0x3e, 0x4b, 0x76, + 0x4a, 0xb9, 0xf2, 0xcb, 0x2c, 0x87, 0xe3, 0xdd, 0x45, 0xcb, 0xbf, 0x52, 0xcd, 0x12, 0xff, 0xea, + 0xf8, 0xc7, 0xa1, 0xa0, 0x11, 0x03, 0xc1, 0xa2, 0x83, 0x58, 0x93, 0xd5, 0x60, 0xf0, 0x8c, 0xa1, + 0xd6, 0x84, 0x04, 0x8e, 0x84, 0x30, 0xe1, 0x08, 0xdf, 0xdb, 0x15, 0x31, 0x4b, 0x3a, 0x30, 0x91, + 0x24, 0x67, 0xce, 0x31, 0x06, 0x35, 0x49, 0x28, 0xc3, 0x08, 0x0c, 0xb4, 0x62, 0xb5, 0x63, 0x70, + 0xb7, 0x46, 0xea, 0xa4, 0xa9, 0xfb, 0xe5, 0x47, 0x02, 0xad, 0x11, 0x1d, 0x1e, 0xd0, 0xe7, 0xef, + 0x48, 0x23, 0x70, 0xd8, 0x4b, 0xba, 0x6b, 0xde, 0x87, 0x6a, 0xdb, 0x2c, 0x3e, 0x5a, 0x7a, 0x34, + 0x7e, 0xfc, 0xa3, 0x7c, 0xa4, 0x4f, 0xc8, 0x25, 0xa9, 0xab, 0xfc, 0x59, 0x69, 0x58, 0x14, 0x78, + 0xe7, 0xe2, 0x15, 0x4b, 0x11, 0xf5, 0x59, 0x6e, 0x4e, 0xb0, 0x1c, 0xcd, 0xe0, 0xde, 0xd4, 0x95, + 0xd2, 0x68, 0x74, 0xcb, 0x4f, 0xd2, 0x77, 0xa2, 0xe5, 0xf4, 0x67, 0xc4, 0x39, 0xa6, 0xf2, 0xa7, + 0x9d, 0xc4, 0x7b, 0x93, 0x9a, 0x0b, 0x49, 0x10, 0x75, 0xad, 0x6c, 0xef, 0xc6, 0x84, 0xea, 0x67, + 0xb9, 0x7b, 0x47, 0xff, 0xa4, 0x09, 0xe7, 0xa0, 0xc0, 0xf6, 0x0c, 0x66, 0xcf, 0x2d, 0x1b, 0x83, + 0xfd, 0x98, 0x69, 0x8b, 0x4a, 0x20, 0x6f, 0x75, 0x1c, 0x07, 0x0e, 0xd5, 0x31, 0x58, 0xf5, 0x8c, + 0x16, 0x94, 0x6c, 0xb8, 0x62, 0x68, 0xfe, 0x70, 0x50, 0x43, 0xcc, 0x24, 0x8b, 0x1c, 0x5b, 0x79, + 0xb0, 0x07, 0xc8, 0x98, 0x2a, 0x27, 0xdb, 0xc6, 0xd8, 0xa2, 0xca, 0x57, 0x8a, 0xcd, 0xa2, 0x2e, + 0xc1, 0xa1, 0x8c, 0xcc, 0x05, 0x34, 0x68, 0x37, 0x27, 0x66, 0xeb, 0x3c, 0x58, 0x1d, 0x78, 0xc3, + 0x26, 0x54, 0x83, 0x16, 0x31, 0x6c, 0x90, 0x1f, 0x59, 0x07, 0xb5, 0x0c, 0x0c, 0x68, 0x29, 0xa9, + 0x0f, 0x98, 0x8b, 0x04, 0xe7, 0x01, 0x4b, 0xf6, 0xd4, 0x6d, 0xad, 0xab, 0x5e, 0x57, 0x34, 0xef, + 0x7b, 0xef, 0x0f, 0x04, 0x61, 0x1d, 0xca, 0x52, 0x30, 0x05, 0x1c, 0x8b, 0x89, 0xd7, 0x1c, 0x5a, + 0xf3, 0xf4, 0xd8, 0x4f, 0x4a, 0xce, 0x09, 0x0f, 0xe2, 0xf8, 0x46, 0x7d, 0x26, 0x29, 0x63, 0x7b, + 0xdd, 0x17, 0xd8, 0xc2, 0xd7, 0x5e, 0x72, 0xfe, 0x98, 0x6f, 0x4f, 0x2a, 0x72, 0xa1, 0x57, 0xc9, + 0xc3, 0x03, 0x07, 0xbe, 0x1f, 0x25, 0x42, 0x51, 0x9e, 0x18, 0x13, 0x28, 0xd8, 0xa3, 0x6e, 0x4b, + 0x5e, 0x86, 0x8a, 0x73, 0x52, 0x6f, 0x51, 0x8b, 0xdd, 0x6f, 0x61, 0xc7, 0x34, 0xc3, 0x8c, 0xb8, + 0x27, 0x20, 0xae, 0x23, 0x13, 0xdf, 0x64, 0x05, 0xb9, 0xea, 0xdf, 0x83, 0xe4, 0x66, 0x01, 0x86, + 0x61, 0xd7, 0x1d, 0x4b, 0x38, 0xb3, 0xcf, 0xc9, 0x4f, 0x76, 0x5c, 0x64, 0x4a, 0x8c, 0x00, 0x08, + 0xb0, 0x25, 0x77, 0xb0, 0x42, 0x9b, 0xe6, 0xc5, 0x73, 0xcb, 0xb4, 0xd9, 0x07, 0x08, 0x6d, 0xcc, + 0x04, 0xe8, 0x99, 0xfe, 0x85, 0x94, 0x1a, 0x02, 0x84, 0xa4, 0x8b, 0x34, 0x8e, 0xcd, 0x2a, 0x3d, + 0xbd, 0xf1, 0xa4, 0x08, 0x17, 0xf4, 0x2e, 0xc6, 0x5a, 0x99, 0x45, 0xfc, 0x52, 0x04, 0x69, 0xa1, + 0x47, 0x84, 0xba, 0xbd, 0x36, 0xed, 0xc7, 0x0f, 0xf1, 0xf4, 0x72, 0x58, 0xe8, 0x22, 0x52, 0x3f, + 0x3f, 0xb0, 0xc3, 0x2d, 0xde, 0xe9, 0x6b, 0xe8, 0x97, 0x91, 0xf7, 0x28, 0x35, 0xb2, 0x10, 0x61, + 0xa0, 0x9c, 0x6e, 0xa5, 0x9a, 0x0c, 0x4e, 0x07, 0xcf, 0xa0, 0x33, 0xa3, 0x6b, 0x81, 0x10, 0x6d, + 0x9b, 0xb3, 0xe1, 0x90, 0xc5, 0x2e, 0x2b, 0xa0, 0xfb, 0x4e, 0x6e, 0x1a, 0x68, 0xb1, 0xcb, 0x0a, + 0xda, 0xe2, 0x16, 0x28, 0x77, 0x50, 0x62, 0xe8, 0x08, 0xce, 0x80, 0x1e, 0x84, 0xc7, 0xb4, 0x64, + 0x8a, 0x6d, 0x6c, 0x5b, 0x19, 0xae, 0x2b, 0x66, 0x7a, 0x1a, 0xf3, 0xae, 0xdf, 0x40, 0xcd, 0xd3, + 0x48, 0x11, 0xb6, 0x03, 0x08, 0x97, 0xeb, 0x67, 0x1b, 0x10, 0xcf, 0x60, 0x1f, 0xe8, 0x3d, 0xcf, + 0xd8, 0xaf, 0x56, 0x63, 0x4d, 0x25, 0x8a, 0xc9, 0xb2, 0x82, 0x3c, 0x14, 0xda, 0x18, 0x95, 0xa5, + 0x37, 0x5c, 0x09, 0x6e, 0xfd, 0x89, 0x57, 0xaa, 0x44, 0xca, 0x8b, 0xcb, 0x1b, 0x04, 0x08, 0x0f, + 0xd4, 0xb1, 0xdc, 0x61, 0xd6, 0xe6, 0xe8, 0x50, 0xd4, 0x07, 0x06, 0x79, 0xd3, 0x67, 0xe1, 0x12, + 0x3b, 0x7e, 0x4d, 0x80, 0x1d, 0x2a, 0x3d, 0xab, 0x50, 0x2e, 0x7a, 0x00, 0x9c, 0x75, 0xe2, 0x01, + 0xff, 0xc5, 0x97, 0x83, 0xe3, 0x01, 0x3e, 0x20, 0xb0, 0x2d, 0x12, 0x95, 0x9e, 0x30, 0x7f, 0x74, + 0x4d, 0x56, 0xa5, 0x6f, 0x84, 0x8b, 0x59, 0x5a, 0x70, 0xd9, 0x32, 0xdc, 0xfa, 0xa3, 0xb3, 0x2b, + 0xea, 0xe2, 0x69, 0xea, 0xbe, 0x33, 0xd2, 0x51, 0x67, 0xa1, 0xb1, 0xdc, 0xf5, 0x45, 0x78, 0xa1, + 0x7b, 0xbc, 0xb2, 0xb1, 0xdb, 0xa2, 0xb9, 0x92, 0x37, 0x45, 0x81, 0x8b, 0xcf, 0x48, 0x3a, 0xf1, + 0x65, 0x34, 0xaf, 0xf1, 0x49, 0x6c, 0x23, 0x07, 0x1b, 0x93, 0x24, 0x8e, 0x1a, 0xb1, 0x51, 0xf7, + 0xbf, 0x70, 0x79, 0x33, 0xa9, 0x3b, 0x5c, 0x04, 0xf3, 0x2d, 0x0f, 0xf0, 0x83, 0xa7, 0x8e, 0xd6, + 0x10, 0x7b, 0xef, 0x68, 0x46, 0x93, 0x6e, 0x17, 0x12, 0xbc, 0xd7, 0x3f, 0x4d, 0xf2, 0x47, 0x95, + 0x18, 0x2d, 0x20, 0x13, 0x8d, 0xc0, 0x51, 0xad, 0x32, 0x2d, 0x43, 0xf0, 0xdb, 0xa8, 0x65, 0x9b, + 0xdb, 0xe1, 0xa5, 0x07, 0x6c, 0x73, 0xed, 0xc3, 0x3f, 0x38, 0x09, 0xac, 0xd1, 0x18, 0xad, 0x99, + 0x89, 0xb6, 0x8e, 0x15, 0x31, 0x4f, 0x4b, 0xc4, 0xc5, 0xb8, 0x53, 0xa0, 0x00, 0x28, 0x41, 0x20, + 0x4c, 0x63, 0xa4, 0x82, 0x90, 0x48, 0x41, 0x90, 0x4c, 0x4e, 0x98, 0xa2, 0xe7, 0x2d, 0x68, 0xeb, + 0x5f, 0x7d, 0x44, 0xbe, 0x7f, 0xa5, 0x41, 0xaf, 0x5f, 0x38, 0x2c, 0x28, 0x8b, 0x21, 0x18, 0x31, + 0xa1, 0x77, 0xe9, 0xf0, 0xc0, 0x1d, 0x7d, 0x20, 0xba, 0xe3, 0x2f, 0xb3, 0x82, 0xb1, 0x7b, 0xa5, + 0x38, 0x11, 0xf6, 0x85, 0x9d, 0x15, 0x65, 0x0b, 0x66, 0x99, 0x6b, 0x97, 0x24, 0x12, 0xda, 0x3b, + 0x82, 0x2b, 0xe7, 0xcf, 0x5b, 0x28, 0x19, 0xc1, 0xcf, 0x1d, 0x7b, 0x50, 0x0d, 0x0f, 0x16, 0x8a, + 0x87, 0x80, 0x91, 0xd9, 0x11, 0x82, 0xef, 0x5e, 0xbd, 0x2d, 0x28, 0xbe, 0x7f, 0xe7, 0x03, 0x5f, + 0xf5, 0x59, 0xb7, 0xca, 0x54, 0x03, 0xb7, 0x72, 0x54, 0x78, 0xe9, 0xf6, 0x31, 0x9e, 0xbb, 0xcd, + 0x27, 0x2e, 0xfe, 0x3c, 0x65, 0x29, 0xce, 0x34, 0x0b, 0x3b, 0x77, 0x96, 0xc7, 0x0b, 0x23, 0xc1, + 0x0e, 0xce, 0x0b, 0x40, 0x91, 0x76, 0x73, 0xa9, 0x3a, 0x16, 0x56, 0xe3, 0xef, 0x20, 0xaf, 0xba, + 0x79, 0xd7, 0xac, 0xea, 0xf2, 0xed, 0x52, 0xfb, 0xcf, 0x8c, 0x91, 0x0b, 0xf3, 0x86, 0x42, 0x72, + 0x57, 0xbf, 0xf5, 0xe0, 0x25, 0x20, 0x11, 0xc9, 0x43, 0xad, 0x7a, 0x30, 0x46, 0xf6, 0x08, 0x82, + 0xad, 0x9d, 0x1c, 0xca, 0x90, 0xeb, 0xab, 0x8f, 0xd4, 0x2f, 0xbd, 0x98, 0xba, 0x05, 0xe3, 0x6c, + 0x29, 0xd7, 0x3f, 0xfc, 0x32, 0xc4, 0x4d, 0xe7, 0xf0, 0xa6, 0x2d, 0xa0, 0xf8, 0xe1, 0xb6, 0xec, + 0x49, 0x04, 0x59, 0x6b, 0xa3, 0xed, 0x38, 0xdb, 0xad, 0xac, 0x6e, 0xef, 0x2e, 0xa0, 0x5c, 0x19, + 0xaf, 0x64, 0x39, 0xbb, 0x2f, 0xfa, 0xbb, 0x70, 0x82, 0xb5, 0x24, 0xbd, 0x28, 0xa5, 0xc6, 0x90, + 0xfc, 0x0d, 0xdd, 0x98, 0x97, 0xf8, 0xa8, 0xeb, 0xf1, 0x07, 0x53, 0x4e, 0xb8, 0xb6, 0x42, 0x69, + 0xd9, 0x7d, 0x12, 0x10, 0x4b, 0x7e, 0x93, 0xd3, 0x99, 0xe1, 0xbc, 0x08, 0xa1, 0xc2, 0x11, 0x43, + 0x9b, 0x0c, 0x78, 0x33, 0x74, 0x18, 0x59, 0x38, 0x74, 0x16, 0x81, 0x6c, 0xd1, 0xaa, 0x27, 0xa5, + 0xa7, 0x9a, 0xe3, 0x21, 0x2d, 0xe8, 0x00, 0xa5, 0x9d, 0x0f, 0xbc, 0x98, 0xa3, 0x1c, 0x15, 0xdc, + 0x68, 0x45, 0xe1, 0xab, 0xe0, 0x25, 0x33, 0x30, 0x59, 0x98, 0x3a, 0x1f, 0x5a, 0xcf, 0x44, 0xf1, + 0xb7, 0xde, 0x80, 0xb4, 0x3a, 0xf5, 0xa0, 0x74, 0xeb, 0x8a, 0x86, 0x4c, 0x3b, 0xdb, 0x03, 0x5f, + 0x4d, 0x81, 0xfe, 0x14, 0x3c, 0xa2, 0xdc, 0xe1, 0x65, 0xc8, 0xc1, 0x34, 0xac, 0x90, 0x8c, 0x82, + 0x9b, 0xe8, 0x0f, 0x50, 0x3c, 0x8c, 0xd5, 0x89, 0xd2, 0x98, 0x54, 0x3e, 0x1d, 0xee, 0xeb, 0xa8, + 0x5f, 0xa6, 0x03, 0xbb, 0xbf, 0x15, 0x2c, 0xf9, 0x10, 0x44, 0xe2, 0x0d, 0xfb, 0xd1, 0xef, 0xde, + 0xb8, 0x0c, 0x1f, 0x0b, 0x5b, 0x84, 0xb6, 0xdc, 0xf7, 0x7c, 0x65, 0xa9, 0x25, 0x8b, 0x87, 0x7f, + 0x53, 0x3d, 0x07, 0x29, 0xa9, 0xe9, 0x11, 0x80, 0x50, 0x8b, 0xce, 0xfe, 0x68, 0x7a, 0x63, 0xb7, + 0x8a, 0xa6, 0x4c, 0xf5, 0x1c, 0xe7, 0xfc, 0x2a, 0x4c, 0xe5, 0x20, 0x1b, 0xab, 0xb7, 0xec, 0x20, + 0xcb, 0xbd, 0x56, 0x92, 0x01, 0x35, 0xd0, 0x48, 0x5d, 0xbd, 0x84, 0x11, 0xa2, 0xfc, 0x4c, 0xea, + 0x3d, 0xf0, 0x23, 0xc0, 0xd8, 0xa4, 0x30, 0x82, 0x65, 0xac, 0x16, 0x88, 0x9d, 0x88, 0x3c, 0xcd, + 0x71, 0xab, 0x43, 0xcc, 0xdf, 0xc7, 0xbe, 0x7f, 0x24, 0x50, 0xc9, 0xdc, 0x6a, 0xfd, 0xa9, 0x53, + 0x86, 0x4a, 0x42, 0xb8, 0x2f, 0x36, 0x85, 0x3b, 0xee, 0xd7, 0x17, 0x05, 0x2d, 0x3d, 0x9b, 0x52, + 0xcf, 0xa6, 0x4b, 0x65, 0x94, 0x56, 0x02, 0xaa, 0x21, 0x1b, 0x9c, 0xad, 0x25, 0xa2, 0xe2, 0x61, + 0x58, 0x74, 0x3c, 0x0b, 0x39, 0xe9, 0xb9, 0x58, 0x8d, 0x31, 0x57, 0x92, 0xcb, 0xd9, 0x02, 0x3a, + 0xb8, 0x2a, 0x9a, 0xab, 0x62, 0x66, 0x99, 0xc1, 0xdd, 0x87, 0xef, 0x08, 0x25, 0x94, 0xda, 0xfa, + 0x96, 0x57, 0x46, 0xb5, 0x15, 0x49, 0x01, 0x17, 0x26, 0xa8, 0x15, 0xce, 0x39, 0x46, 0xfa, 0x8d, + 0x4d, 0xc8, 0x5b, 0xf1, 0xa6, 0xdb, 0xde, 0xe0, 0x64, 0x65, 0xce, 0x2e, 0x8f, 0x4b, 0xa8, 0x66, + 0x17, 0x09, 0x02, 0xb1, 0x1d, 0xc5, 0xc4, 0xd8, 0xf3, 0x5b, 0x8d, 0xf8, 0xa4, 0x4a, 0x4a, 0xcb, + 0xfa, 0x24, 0xca, 0x33, 0x7c, 0x04, 0x87, 0x7f, 0xe7, 0xed, 0x09, 0x54, 0x28, 0xbe, 0x2a, 0x22, + 0xd6, 0xfe, 0xce, 0x19, 0xd1, 0x3e, 0x49, 0x92, 0x08, 0x1c, 0x09, 0xb8, 0x57, 0x1f, 0x4b, 0x81, + 0x71, 0xa9, 0x74, 0x51, 0x9d, 0x70, 0x9b, 0xbb, 0x2f, 0xa1, 0xfe, 0x4a, 0x2a, 0xb5, 0xbb, 0x5f, + 0x21, 0x24, 0xda, 0xb9, 0x9c, 0x8b, 0x60, 0xde, 0x96, 0xfb, 0xc3, 0xbf, 0x19, 0x5a, 0xfb, 0x08, + 0x23, 0x40, 0x59, 0x4f, 0x80, 0xe7, 0xcf, 0x83, 0xf8, 0x4e, 0x82, 0xd2, 0x18, 0xb3, 0xfb, 0x28, + 0x92, 0x64, 0xde, 0xac, 0x8f, 0x75, 0x51, 0xc6, 0xf2, 0x32, 0xc2, 0x6e, 0x32, 0x36, 0xa7, 0x06, + 0x0b, 0x44, 0x50, 0xe4, 0xc4, 0xe2, 0xd1, 0x1b, 0x88, 0x95, 0x03, 0x52, 0xa1, 0xf8, 0xe2, 0x18, + 0x21, 0x9f, 0x52, 0xc0, 0x42, 0xd6, 0x1c, 0x52, 0xd3, 0xf5, 0x70, 0x32, 0x66, 0x64, 0xf3, 0x8c, + 0xca, 0x31, 0xcd, 0xe0, 0x34, 0x8b, 0x6d, 0x07, 0xc4, 0x0e, 0x05, 0x51, 0x7c, 0xd2, 0x92, 0x4d, + 0x65, 0xa6, 0xc7, 0x09, 0x94, 0x53, 0xb0, 0xee, 0x4f, 0xb9, 0xab, 0x9a, 0x99, 0x03, 0xa7, 0xce, + 0x4e, 0xdb, 0x86, 0x8e, 0x43, 0x09, 0x7a, 0x70, 0x66, 0x52, 0x93, 0xa4, 0xdf, 0x2a, 0x74, 0xa3, + 0x40, 0x18, 0xcb, 0x6d, 0xe0, 0x34, 0xff, 0x85, 0xac, 0x18, 0x42, 0x33, 0x48, 0xa6, 0xbe, 0x9a, + 0xbe, 0x52, 0x65, 0x59, 0x66, 0x40, 0xe3, 0xc1, 0x39, 0x48, 0xc0, 0x2e, 0xd4, 0x4f, 0xa2, 0x56, + 0xb2, 0xd8, 0x6c, 0xc1, 0x5b, 0x59, 0x16, 0xa6, 0x96, 0x1a, 0x3e, 0x85, 0xc2, 0x7b, 0x2b, 0xce, + 0xe1, 0xfc, 0x03, 0x4c, 0x02, 0xea, 0xfd, 0x6a, 0x93, 0xf0, 0xc9, 0x21, 0x5d, 0xe7, 0x0c, 0xf0, + 0x22, 0x87, 0xf1, 0xda, 0xea, 0x46, 0x64, 0xb5, 0x9f, 0x48, 0xd8, 0x18, 0x8e, 0x2a, 0xd5, 0x58, + 0x53, 0x1c, 0xdb, 0xec, 0x2f, 0xc4, 0xf5, 0x21, 0x61, 0x36, 0xc0, 0xd7, 0x7a, 0x7e, 0x67, 0x75, + 0x44, 0xcd, 0xf3, 0x77, 0x6f, 0x49, 0xec, 0x05, 0x06, 0xeb, 0x89, 0x8a, 0xdc, 0x3e, 0xc9, 0x5b, + 0xdc, 0x12, 0x81, 0xc2, 0x0d, 0xc0, 0xb8, 0x9a, 0x83, 0x87, 0x71, 0x65, 0x80, 0x20, 0x1e, 0x80, + 0x27, 0x9c, 0x52, 0x8c, 0x29, 0x56, 0x28, 0xeb, 0xf4, 0xd4, 0x18, 0xf0, 0xa8, 0xe3, 0xbb, 0x40, + 0x3b, 0xb3, 0xf3, 0x0c, 0xef, 0x8d, 0xa2, 0xa7, 0xa0, 0x9e, 0x9a, 0xa2, 0xab, 0x47, 0x2c, 0x22, + 0x0f, 0xbb, 0x10, 0xa0, 0xc5, 0x2d, 0xd1, 0x42, 0xaa, 0x57, 0x00, 0x53, 0x22, 0x5b, 0xd4, 0x35, + 0x05, 0xe9, 0x1b, 0x68, 0xde, 0x92, 0x99, 0x58, 0xeb, 0x04, 0x6f, 0x2f, 0xed, 0xc2, 0x2a, 0x33, + 0x53, 0x82, 0xbb, 0x5e, 0xa2, 0x68, 0x05, 0x89, 0xc4, 0xf7, 0x87, 0x8a, 0xd2, 0x8d, 0x5f, 0x90, + 0xf3, 0x04, 0xdf, 0x87, 0xcf, 0xbb, 0x1a, 0x66, 0x39, 0xe1, 0x2e, 0xac, 0x47, 0xd7, 0xfa, 0x12, + 0xa3, 0x60, 0xa1, 0x8d, 0x2d, 0x57, 0x43, 0x27, 0xbf, 0x03, 0x9e, 0x51, 0xa6, 0xfa, 0x71, 0x1d, + 0xaf, 0x91, 0x0b, 0x55, 0x95, 0xef, 0xf8, 0xe5, 0x3a, 0xa6, 0x58, 0xef, 0xb2, 0xc7, 0xdd, 0x96, + 0x40, 0xb1, 0x00, 0x09, 0xca, 0x73, 0x21, 0xcb, 0x27, 0x44, 0x1f, 0x10, 0xca, 0xc5, 0x76, 0x35, + 0xae, 0x36, 0xac, 0x99, 0x6e, 0x32, 0x95, 0x93, 0x83, 0x8b, 0x00, 0x0e, 0xe2, 0x23, 0x90, 0xd4, + 0x93, 0xde, 0x87, 0x63, 0xcc, 0xa2, 0xdc, 0x49, 0xd3, 0x04, 0x70, 0x6b, 0x2c, 0x07, 0x34, 0x79, + 0x09, 0x38, 0x55, 0xc2, 0xfa, 0xe3, 0xd2, 0x03, 0x5d, 0x86, 0xa4, 0xff, 0x25, 0x8d, 0xfc, 0x5c, + 0x79, 0x1a, 0xd2, 0x44, 0xa3, 0xf8, 0x39, 0x56, 0x75, 0x0f, 0x1c, 0x0d, 0xc1, 0x10, 0xe8, 0x98, + 0x52, 0xca, 0x5b, 0x9e, 0x3c, 0x38, 0x14, 0x87, 0x34, 0x70, 0xeb, 0x3a, 0x50, 0xc2, 0x7b, 0x27, + 0x06, 0x42, 0xe3, 0xbe, 0x03, 0x90, 0xd7, 0x2e, 0x27, 0x62, 0xed, 0x86, 0x4f, 0xb1, 0x08, 0xa0, + 0xff, 0x40, 0x1e, 0x2e, 0xd4, 0x75, 0x02, 0xcc, 0x1f, 0x7d, 0x00, 0x73, 0x74, 0x61, 0x81, 0x10, + 0x9e, 0xf4, 0x93, 0xc3, 0x3b, 0x6c, 0xce, 0xca, 0x10, 0xc4, 0x61, 0xe2, 0x45, 0x48, 0x49, 0x21, + 0x5b, 0x2f, 0xa5, 0x99, 0x10, 0xc5, 0x42, 0x17, 0xe1, 0x15, 0xc5, 0x04, 0xcd, 0x1e, 0x57, 0x03, + 0x20, 0xe4, 0x26, 0x14, 0x0b, 0xc9, 0xbc, 0xe7, 0xa4, 0x4c, 0x3b, 0xe9, 0x15, 0xe8, 0xed, 0x19, + 0xc4, 0x7a, 0xe4, 0x95, 0xdd, 0xee, 0xbc, 0x38, 0x38, 0x4b, 0x4c, 0xec, 0xc6, 0x13, 0xa5, 0x6e, + 0x41, 0x88, 0x40, 0x0b, 0xc4, 0xa2, 0xbf, 0xac, 0xfb, 0x2d, 0x9a, 0xc6, 0xf6, 0xc9, 0xbb, 0x2d, + 0xf3, 0x5a, 0x6f, 0x38, 0x56, 0xd8, 0xf7, 0x6a, 0x08, 0xe2, 0x3e, 0xea, 0x25, 0x2e, 0x43, 0x0b, + 0xb9, 0x62, 0xde, 0x16, 0xa5, 0x2f, 0xc5, 0x31, 0x0d, 0x4b, 0x55, 0x7f, 0x2a, 0x0c, 0x61, 0x0e, + 0xe4, 0x89, 0xcf, 0x10, 0xd9, 0x9e, 0xb2, 0xac, 0x09, 0xae, 0x8b, 0x7a, 0x64, 0xdc, 0x64, 0xcd, + 0x85, 0x32, 0x45, 0x56, 0x24, 0xd1, 0x1c, 0x14, 0x31, 0xdb, 0xa8, 0xd3, 0xa7, 0xa0, 0x38, 0xa8, + 0x87, 0x3f, 0x03, 0xce, 0x03, 0x7d, 0xc1, 0xc1, 0x72, 0x64, 0x76, 0x96, 0xbd, 0x0a, 0x3e, 0x4b, + 0x1c, 0xc4, 0x56, 0x7f, 0x87, 0xad, 0x54, 0x12, 0xec, 0xf0, 0x58, 0x10, 0x40, 0x99, 0xb5, 0x68, + 0xbd, 0x6a, 0x6e, 0xb7, 0xd8, 0x81, 0x63, 0xd2, 0x07, 0xac, 0x71, 0x85, 0x6d, 0x98, 0xb9, 0xd8, + 0x06, 0xee, 0x1d, 0xed, 0x5b, 0x46, 0xfd, 0xdd, 0x15, 0x4a, 0x14, 0x1b, 0xe5, 0x87, 0x7c, 0x44, + 0xaa, 0x2b, 0xed, 0x23, 0xc4, 0x9b, 0x02, 0x8c, 0x88, 0x60, 0x4e, 0x11, 0xe4, 0xe4, 0x74, 0xed, + 0xfd, 0x65, 0x6f, 0xa4, 0x37, 0x89, 0x65, 0xdd, 0xec, 0xea, 0x50, 0x60, 0xa9, 0x03, 0x8d, 0x5d, + 0x70, 0x8d, 0x8a, 0x1f, 0xe8, 0xac, 0x25, 0xda, 0x7c, 0x81, 0x65, 0x34, 0xd4, 0x50, 0xff, 0x80, + 0x7e, 0x14, 0x11, 0x1b, 0x7b, 0x8c, 0xde, 0x44, 0x82, 0x00, 0x3e, 0xb7, 0xaa, 0x7d, 0xd4, 0x23, + 0xdc, 0x9a, 0x5e, 0xa0, 0xb9, 0x8f, 0xc2, 0x06, 0x28, 0xad, 0xc7, 0x28, 0x50, 0xcc, 0xd3, 0x14, + 0x64, 0x53, 0xdb, 0x6c, 0x7b, 0x23, 0x34, 0xbc, 0xf2, 0x72, 0xc5, 0x28, 0x4e, 0x28, 0xf4, 0xee, + 0xad, 0x81, 0xc6, 0x2f, 0x14, 0xe9, 0xb2, 0x66, 0xe1, 0x4d, 0x20, 0xa9, 0x74, 0xed, 0xde, 0x90, + 0x85, 0x8c, 0x56, 0x87, 0x56, 0x91, 0xed, 0xb7, 0x3e, 0xa7, 0x67, 0xef, 0x0e, 0xe3, 0xf6, 0xb7, + 0xec, 0x75, 0x04, 0x96, 0xb7, 0x9a, 0xad, 0x79, 0x88, 0x0d, 0xee, 0x10, 0x31, 0x6e, 0x53, 0x85, + 0x4d, 0x4f, 0x56, 0xd2, 0x0b, 0xf7, 0x2f, 0x8d, 0x70, 0x25, 0x5c, 0x49, 0xb6, 0xa1, 0xf4, 0x45, + 0x2a, 0x4c, 0x35, 0x3f, 0xdc, 0x1f, 0xbe, 0x44, 0x80, 0x63, 0x29, 0xc1, 0x33, 0xcf, 0xc3, 0xa5, + 0x25, 0x58, 0x3a, 0xe1, 0x21, 0x1b, 0x03, 0x4f, 0xde, 0xc6, 0xab, 0xc0, 0xa0, 0x5a, 0x8f, 0xae, + 0xe5, 0x14, 0xbd, 0xb0, 0x16, 0x28, 0x11, 0x1c, 0x3a, 0x50, 0x3a, 0xe3, 0xcd, 0x8b, 0x0e, 0xd2, + 0x3f, 0x63, 0x59, 0x11, 0x9e, 0x04, 0x81, 0x20, 0x10, 0xab, 0x8e, 0xcc, 0x74, 0x74, 0xae, 0x9a, + 0x70, 0x00, 0x2a, 0x9d, 0xfe, 0xbd, 0xac, 0x6e, 0x9d, 0x9c, 0x6c, 0x6f, 0xd9, 0xdd, 0x96, 0x88, + 0xd5, 0x3e, 0x68, 0x0f, 0x03, 0x12, 0x6a, 0x52, 0x80, 0x58, 0xfc, 0x7f, 0x8a, 0x21, 0x76, 0x5e, + 0xb1, 0x3c, 0x80, 0x54, 0x38, 0xc1, 0xb0, 0xcd, 0x2b, 0xe6, 0x04, 0x15, 0x40, 0x1d, 0x8c, 0x40, + 0x6c, 0x86, 0x3f, 0xfd, 0x79, 0xbc, 0x6d, 0xe8, 0x5b, 0x57, 0xb6, 0x85, 0x9b, 0x71, 0x09, 0x2a, + 0xce, 0xf5, 0x63, 0x86, 0xe6, 0xee, 0x64, 0x84, 0x03, 0x90, 0x93, 0x44, 0xe3, 0x5f, 0x3a, 0x53, + 0x35, 0xa1, 0x88, 0x83, 0x9e, 0x9b, 0xf3, 0x80, 0x6a, 0x2e, 0x51, 0xd5, 0xb0, 0xf0, 0x2c, 0xa7, + 0xd3, 0xff, 0x60, 0xa3, 0x33, 0x08, 0x47, 0x20, 0xc2, 0xe1, 0x8c, 0x1f, 0x35, 0x1e, 0x53, 0x0f, + 0x4f, 0xe1, 0x27, 0x8f, 0xd1, 0xbd, 0x8e, 0x05, 0x41, 0x6c, 0x82, 0x50, 0x95, 0x71, 0x9a, 0x3b, + 0x7a, 0x7e, 0x79, 0x2f, 0xd4, 0xa2, 0x4b, 0xd4, 0xb8, 0x4a, 0x80, 0x3f, 0x95, 0x09, 0x4a, 0xe2, + 0xc0, 0x79, 0x19, 0x08, 0x71, 0x29, 0xfd, 0xe1, 0x5f, 0x55, 0x43, 0x6a, 0xd1, 0xb5, 0xe9, 0x71, + 0x13, 0x64, 0xb7, 0x72, 0x96, 0xc2, 0x7e, 0x03, 0x7b, 0x06, 0x45, 0xba, 0x8c, 0xc7, 0xd8, 0x2e, + 0xb1, 0xc3, 0xe1, 0x08, 0xc5, 0xd3, 0xca, 0x47, 0x0b, 0x19, 0x71, 0xc9, 0xb6, 0x9f, 0xb5, 0xec, + 0x60, 0x70, 0xbc, 0xdf, 0x66, 0x18, 0x14, 0xaa, 0x62, 0x7c, 0x7b, 0x4c, 0xc7, 0x2b, 0x31, 0x4e, + 0x14, 0xf3, 0xf5, 0xb6, 0x4f, 0x80, 0x1c, 0xa4, 0x66, 0xa9, 0x6f, 0x3d, 0xf6, 0x98, 0xfa, 0x0e, + 0x95, 0x27, 0x43, 0x32, 0xb8, 0xc5, 0x07, 0x44, 0xb9, 0x1a, 0x5e, 0x0a, 0x1e, 0x7e, 0x1c, 0xaf, + 0x28, 0x16, 0x80, 0xba, 0x67, 0x59, 0x44, 0xf1, 0xae, 0x87, 0xbd, 0x8e, 0x11, 0x22, 0xdf, 0xb8, + 0x31, 0xc9, 0x52, 0xc1, 0x80, 0xb3, 0xa6, 0xee, 0xaa, 0x8b, 0x35, 0xc1, 0x29, 0xe9, 0xcb, 0xf5, + 0xb1, 0x48, 0x95, 0xf6, 0xcd, 0x4e, 0xd9, 0xf6, 0xea, 0xad, 0xca, 0xe5, 0x50, 0x4c, 0x2c, 0xbf, + 0x13, 0x92, 0xe7, 0xfe, 0xc4, 0x72, 0xef, 0x24, 0x9d, 0x00, 0xb0, 0x20, 0x81, 0x7d, 0x81, 0x59, + 0x9a, 0xa8, 0x9d, 0xf0, 0x34, 0xe8, 0x70, 0xa4, 0x09, 0x67, 0x48, 0xf3, 0x26, 0x25, 0xb2, 0x34, + 0x7c, 0xf8, 0x96, 0x7e, 0xad, 0xdc, 0x51, 0x41, 0x9a, 0x29, 0x5d, 0x90, 0x9b, 0x8d, 0x6f, 0x38, + 0x6f, 0x6e, 0x7c, 0x4a, 0xed, 0x03, 0xcb, 0xbf, 0x40, 0x7c, 0x9a, 0xbc, 0x22, 0x32, 0xbb, 0x39, + 0x91, 0xb4, 0x29, 0x57, 0xbe, 0x27, 0x39, 0xbc, 0x29, 0x01, 0x96, 0x26, 0x44, 0x66, 0x2b, 0x6e, + 0xb2, 0xda, 0xd9, 0x72, 0x92, 0xa0, 0xaa, 0x68, 0x9a, 0xd3, 0xa1, 0x23, 0x46, 0x8c, 0xda, 0x1a, + 0x50, 0x0e, 0x9a, 0x7f, 0xf2, 0xe4, 0xa0, 0xc8, 0x59, 0x5b, 0xb3, 0xa3, 0xe3, 0xbc, 0xcb, 0x03, + 0x81, 0xf4, 0x26, 0x13, 0xc0, 0xd0, 0xd7, 0xe6, 0x08, 0xce, 0x94, 0xb7, 0x1f, 0x68, 0x17, 0xf1, + 0x82, 0xe1, 0xec, 0x1d, 0x33, 0x9c, 0x16, 0x5d, 0xa8, 0xfb, 0xc9, 0x74, 0x3e, 0xde, 0x0d, 0x39, + 0x92, 0xf8, 0xa4, 0x06, 0x13, 0x6d, 0x16, 0xe0, 0xf7, 0xb7, 0x86, 0xc7, 0x5f, 0x4d, 0x49, 0x59, + 0x43, 0x1a, 0x5d, 0xcf, 0x46, 0xfe, 0x41, 0x81, 0xd7, 0xe5, 0x8c, 0xa4, 0x80, 0xfc, 0xbc, 0x55, + 0x41, 0xb9, 0xe4, 0x17, 0xf3, 0x82, 0xf4, 0xf8, 0xfd, 0xbe, 0x82, 0x8e, 0x30, 0xed, 0x98, 0xf2, + 0xd0, 0xe3, 0x63, 0xf2, 0xa5, 0xe8, 0x05, 0x9c, 0xa5, 0x2c, 0x5b, 0x71, 0xbb, 0xb7, 0xdf, 0xd8, + 0x1e, 0x2a, 0xa7, 0xe7, 0x6b, 0xf0, 0x46, 0x4a, 0xe0, 0x99, 0xee, 0x03, 0xe1, 0x07, 0xef, 0x9b, + 0x19, 0x5d, 0xd3, 0x5e, 0xb5, 0xd4, 0x65, 0xe1, 0x0c, 0xa8, 0x45, 0x25, 0xe7, 0x58, 0xe5, 0x6e, + 0x4b, 0xd2, 0x2d, 0x5d, 0xd6, 0xf6, 0x68, 0xf1, 0x98, 0x1a, 0x6a, 0x15, 0x57, 0xdd, 0xe0, 0xa1, + 0x22, 0xf0, 0x42, 0xe6, 0x5b, 0xeb, 0x05, 0x31, 0xcb, 0x80, 0x6e, 0x29, 0xf9, 0x6b, 0x5f, 0x9d, + 0x3c, 0xa1, 0xf7, 0xbd, 0x7f, 0xb4, 0x6a, 0x9c, 0xa4, 0xf0, 0x41, 0xab, 0xf6, 0xa7, 0xa8, 0x8b, + 0x8f, 0x5f, 0xc2, 0x8a, 0xa2, 0x26, 0x52, 0x16, 0x30, 0xc0, 0x4f, 0x57, 0xa6, 0xde, 0x2e, 0xd6, + 0xee, 0xe5, 0x39, 0x9d, 0xd8, 0x0e, 0x77, 0x83, 0x04, 0x2d, 0xee, 0xca, 0x46, 0x1c, 0xce, 0xd3, + 0x32, 0x8b, 0xf5, 0xbb, 0xab, 0x6c, 0x07, 0x9b, 0x18, 0x21, 0xc9, 0xe6, 0xd3, 0x97, 0xbc, 0xd5, + 0x3a, 0xe5, 0xea, 0x8a, 0x9d, 0x50, 0x51, 0x58, 0x1a, 0xc1, 0x0d, 0xa7, 0xe1, 0xba, 0x98, 0x98, + 0xa4, 0x03, 0xc3, 0x7e, 0x8f, 0xd3, 0x07, 0x70, 0x47, 0x05, 0x50, 0xfe, 0x0f, 0x9d, 0xf1, 0xba, + 0x28, 0xe2, 0x81, 0x02, 0xfb, 0xd5, 0x28, 0xc5, 0xaa, 0x3b, 0xe3, 0xb4, 0xda, 0xfc, 0xdb, 0x09, + 0x09, 0x35, 0xc8, 0x43, 0xd0, 0x8f, 0x66, 0x94, 0x51, 0xca, 0xdd, 0xed, 0x1f, 0xf7, 0x6c, 0x99, + 0x16, 0x6f, 0xce, 0x64, 0x5a, 0xbf, 0x29, 0x34, 0xcb, 0xdc, 0xd2, 0x36, 0x0d, 0x09, 0xc4, 0x94, + 0xc4, 0x33, 0xa8, 0x99, 0xd6, 0x07, 0x1a, 0x1a, 0xa0, 0xa4, 0x1c, 0x15, 0x89, 0xc4, 0x1e, 0xdb, + 0x97, 0xab, 0x87, 0xbf, 0x80, 0x11, 0x5b, 0xfe, 0x2d, 0x93, 0xb1, 0x40, 0xe7, 0xdd, 0x5a, 0xa1, + 0xa4, 0x6e, 0x95, 0x4f, 0xab, 0x27, 0x70, 0x4a, 0x11, 0x4a, 0xcf, 0x5e, 0xf2, 0x08, 0xa3, 0xc4, + 0x5c, 0x4d, 0x97, 0x16, 0xe9, 0x09, 0x0b, 0x06, 0x59, 0x07, 0xf8, 0x01, 0xb0, 0xef, 0x82, 0x94, + 0xf2, 0x1d, 0x22, 0x02, 0xda, 0xc4, 0xd3, 0x51, 0x6d, 0xd2, 0xb9, 0x2a, 0xe2, 0x83, 0x43, 0x39, + 0xbd, 0x20, 0xb7, 0x98, 0x34, 0x93, 0x87, 0x81, 0x19, 0x4d, 0x34, 0x89, 0x63, 0x8c, 0xe4, 0x27, + 0x6b, 0x56, 0x33, 0x97, 0xa2, 0xa1, 0x0b, 0x9b, 0x44, 0x4b, 0x1a, 0x5b, 0x4e, 0x8f, 0x82, 0x2f, + 0xa2, 0x56, 0x5e, 0x0f, 0xcc, 0xf5, 0x6e, 0x90, 0x2e, 0x0b, 0x4d, 0x70, 0x8d, 0xb7, 0x76, 0xcd, + 0x79, 0xe9, 0x71, 0x7d, 0x43, 0x7f, 0x29, 0x59, 0x92, 0x46, 0x45, 0x1e, 0x0b, 0x46, 0x4c, 0xca, + 0xbc, 0x49, 0xe8, 0x31, 0xe9, 0x98, 0xfb, 0x65, 0xb3, 0x29, 0x05, 0x22, 0x1f, 0x9e, 0x4c, 0x88, + 0x64, 0xaf, 0x50, 0x3a, 0x82, 0x1b, 0xc0, 0x56, 0xa7, 0x69, 0xc9, 0xf8, 0xbe, 0x5a, 0xec, 0x33, + 0x25, 0x3d, 0x42, 0xd2, 0x2a, 0xa3, 0xdc, 0x69, 0xbf, 0xbe, 0x3b, 0xfb, 0x0a, 0xb9, 0xac, 0xda, + 0x11, 0xd1, 0xb9, 0x10, 0x43, 0xb6, 0x6b, 0xa7, 0x02, 0xe7, 0x88, 0x61, 0x45, 0x11, 0x8b, 0x68, + 0x49, 0x44, 0xbb, 0x83, 0xa1, 0x6a, 0x63, 0x34, 0x7c, 0x65, 0xcd, 0x16, 0x14, 0x20, 0xce, 0xcf, + 0xe3, 0x98, 0xe7, 0xbe, 0xd4, 0xd8, 0x91, 0x38, 0xa0, 0x3d, 0x86, 0xe0, 0x24, 0x20, 0xef, 0xee, + 0x84, 0xff, 0xf0, 0x2f, 0x3e, 0x0d, 0xa2, 0xae, 0x40, 0xc6, 0x9d, 0x72, 0x3b, 0xa0, 0x1c, 0xfc, + 0x0c, 0x43, 0x3b, 0x75, 0x69, 0x5a, 0xb9, 0xe6, 0x8f, 0x35, 0x46, 0xe7, 0xd8, 0xc1, 0x90, 0x56, + 0x9f, 0x77, 0xf8, 0xf6, 0x9b, 0x2e, 0x90, 0x9a, 0x94, 0x79, 0x96, 0x7c, 0xb5, 0x59, 0x8b, 0xdf, + 0xf6, 0xe9, 0x9f, 0x3d, 0x51, 0x1d, 0x29, 0xf2, 0x80, 0x2c, 0xe2, 0x50, 0x6f, 0x4b, 0x6b, 0x1a, + 0x39, 0xc2, 0x0b, 0xfd, 0x8e, 0x5a, 0x5e, 0x49, 0xff, 0x8d, 0xc8, 0x12, 0xad, 0xc6, 0x3b, 0x8b, + 0x1e, 0x9b, 0x65, 0x71, 0x14, 0xfb, 0x86, 0x30, 0x21, 0xf5, 0x56, 0x0e, 0x47, 0xeb, 0x69, 0x29, + 0xb3, 0x93, 0x0e, 0xf6, 0x72, 0x98, 0x3c, 0xcd, 0x3b, 0xa6, 0xc3, 0x95, 0xa3, 0x32, 0xd1, 0x8e, + 0xc2, 0x55, 0xb7, 0xbe, 0x29, 0x7e, 0xc6, 0x2c, 0xe9, 0x45, 0xd5, 0xc2, 0x91, 0xac, 0xa5, 0x1c, + 0x92, 0x77, 0xcc, 0x2b, 0x24, 0xa2, 0xb9, 0xe8, 0xa3, 0x77, 0xbe, 0xad, 0x30, 0x70, 0x19, 0x48, + 0xc2, 0x7a, 0xaf, 0x4f, 0xeb, 0xf3, 0xbf, 0x4d, 0x38, 0x4f, 0x11, 0x88, 0xfe, 0xdd, 0x06, 0xea, + 0x67, 0xb2, 0xb5, 0x5f, 0x0e, 0x39, 0xc0, 0xe0, 0xc2, 0x00, 0x91, 0x39, 0x20, 0xa3, 0xcc, 0x2a, + 0xb9, 0x0d, 0x5d, 0xce, 0xbc, 0x84, 0xec, 0x65, 0x0e, 0xbb, 0xca, 0x03, 0x2a, 0xe0, 0xe4, 0x0f, + 0xe1, 0x9d, 0x1a, 0x06, 0x6f, 0x70, 0xf3, 0x16, 0x10, 0xec, 0xce, 0x49, 0x5b, 0x8b, 0x1d, 0x24, + 0xdd, 0x33, 0x37, 0x4e, 0x60, 0xbd, 0xd7, 0x63, 0x81, 0xe5, 0x62, 0x61, 0x77, 0x9b, 0x75, 0xa1, + 0x9b, 0x8d, 0x5a, 0xe2, 0x69, 0x5f, 0xdf, 0x4b, 0x81, 0x1b, 0x20, 0x63, 0xe6, 0xaf, 0x6c, 0x02, + 0x34, 0xd7, 0xe6, 0xf0, 0xb3, 0x55, 0x59, 0x2f, 0xf3, 0x7b, 0x60, 0x2b, 0xa7, 0x35, 0xd4, 0xae, + 0x14, 0x5f, 0xe4, 0x69, 0x7c, 0xe9, 0x29, 0x8a, 0xcb, 0x82, 0x9e, 0x1f, 0x6f, 0xea, 0xb1, 0xb5, + 0x09, 0x19, 0xfe, 0x94, 0x32, 0x1b, 0xfd, 0x6c, 0x71, 0x31, 0x75, 0xf5, 0x22, 0xb5, 0xc3, 0xb4, + 0xd6, 0x3c, 0xdf, 0xbd, 0x91, 0x0c, 0x69, 0xb0, 0xcb, 0xcd, 0x46, 0xb7, 0x77, 0x58, 0x93, 0x74, + 0x1f, 0x7d, 0x52, 0x2c, 0xc6, 0x73, 0x44, 0x61, 0x0a, 0xbe, 0x42, 0x70, 0x61, 0xb8, 0xcf, 0x13, + 0xbb, 0xc1, 0xb6, 0xee, 0xa3, 0xb0, 0x8a, 0x53, 0xef, 0x59, 0x6d, 0xfa, 0x24, 0xe1, 0xdc, 0x6c, + 0xc4, 0xb2, 0xec, 0x1f, 0x1e, 0xf4, 0xa2, 0xd3, 0xba, 0xda, 0x0d, 0x3d, 0xc2, 0x5c, 0x72, 0x52, + 0x11, 0xbe, 0x8c, 0x44, 0x83, 0xe0, 0x77, 0xa8, 0xc5, 0x30, 0x28, 0x90, 0xec, 0x4e, 0xd6, 0x13, + 0x38, 0x9b, 0x33, 0x8f, 0x0a, 0x5a, 0x4f, 0x5b, 0x12, 0xd9, 0x68, 0x94, 0x0f, 0x49, 0xb9, 0x84, + 0x15, 0x9d, 0xef, 0xe8, 0x0a, 0xf9, 0x5e, 0x44, 0xb8, 0x95, 0x2b, 0x95, 0xf6, 0x95, 0xb2, 0xfa, + 0xc5, 0xc8, 0x53, 0xd2, 0xe3, 0xa7, 0x55, 0x37, 0xea, 0x8e, 0xd6, 0x0f, 0x08, 0x59, 0x7b, 0xc3, + 0xb6, 0xe1, 0x72, 0x8e, 0x5e, 0x56, 0xd8, 0x00, 0x64, 0xea, 0x44, 0xd8, 0x5d, 0x85, 0x7f, 0x9e, + 0x33, 0x68, 0xe2, 0x4e, 0x41, 0x9f, 0x23, 0x7e, 0x07, 0xdb, 0x02, 0x02, 0x53, 0xc8, 0x88, 0x1c, + 0x1f, 0xd5, 0xad, 0x3a, 0x90, 0xd5, 0xf6, 0x91, 0xdd, 0xd8, 0xf9, 0x21, 0xdc, 0x43, 0x7a, 0x44, + 0x38, 0xe0, 0x83, 0x40, 0xc0, 0xcc, 0x83, 0x54, 0xe4, 0xdc, 0xca, 0x19, 0xa1, 0x42, 0x7e, 0x7b, + 0xf1, 0x7f, 0x77, 0x8e, 0x6e, 0x67, 0x90, 0x78, 0xea, 0xbb, 0x04, 0x5d, 0x29, 0x40, 0x8c, 0x96, + 0x8b, 0xf2, 0x78, 0xd8, 0x73, 0x63, 0x76, 0x2e, 0x56, 0x82, 0xc8, 0xb9, 0x55, 0x2f, 0x1d, 0x4c, + 0xf1, 0xf1, 0x20, 0xb4, 0x78, 0x85, 0x45, 0x04, 0x2f, 0xf6, 0x76, 0xe6, 0x8d, 0x95, 0xf6, 0x26, + 0x7c, 0x76, 0xd1, 0x7e, 0xbe, 0x41, 0xe5, 0x99, 0xa0, 0xa2, 0xc1, 0x11, 0x0a, 0xa1, 0x25, 0x1d, + 0x94, 0x51, 0xd1, 0x6c, 0x13, 0x89, 0xbb, 0x92, 0x1a, 0x89, 0x02, 0x2b, 0x74, 0xa6, 0x80, 0xc1, + 0xa4, 0xed, 0x7f, 0xc6, 0x10, 0xa8, 0x78, 0xea, 0xf5, 0x9f, 0x0a, 0x48, 0x39, 0x21, 0xdf, 0xee, + 0xab, 0xf0, 0x50, 0x3a, 0x19, 0x0a, 0x66, 0x79, 0x41, 0x18, 0xaa, 0x33, 0xc1, 0x22, 0x33, 0x83, + 0xd9, 0x71, 0x0b, 0xcb, 0x99, 0xb1, 0x79, 0x4a, 0xa1, 0xed, 0xe8, 0x88, 0xcb, 0x74, 0x8e, 0x91, + 0x0a, 0x13, 0xe9, 0xc3, 0x8d, 0xdd, 0x0d, 0x54, 0x61, 0xda, 0x66, 0x10, 0x20, 0x39, 0xf1, 0x59, + 0x1f, 0xf9, 0x38, 0x95, 0x5d, 0xa0, 0xc7, 0x49, 0xed, 0x7c, 0x8c, 0xb8, 0x29, 0x39, 0xed, 0xa8, + 0xaa, 0x2c, 0x26, 0x3f, 0x67, 0x6f, 0xf9, 0xb2, 0xa4, 0x39, 0x72, 0xa4, 0xb7, 0x05, 0x2d, 0x6f, + 0xb0, 0x37, 0x7d, 0x15, 0x14, 0xf3, 0x9e, 0xc2, 0x88, 0x49, 0x22, 0x04, 0xdb, 0x80, 0x90, 0x2a, + 0x96, 0x93, 0x1d, 0xe8, 0xb8, 0x51, 0xaa, 0xe1, 0x24, 0xd0, 0x51, 0x65, 0x07, 0x3c, 0x76, 0x6f, + 0x3e, 0x94, 0x19, 0x2e, 0xa0, 0x6c, 0x87, 0xa4, 0x48, 0xc0, 0xb9, 0xd8, 0x52, 0x16, 0x43, 0xa9, + 0xc6, 0x29, 0x7a, 0x6d, 0x20, 0x80, 0xf9, 0x8e, 0x7c, 0xf2, 0x7f, 0x60, 0x8d, 0xac, 0x6a, 0xcf, + 0x35, 0x2a, 0x66, 0xf8, 0x11, 0x7c, 0x77, 0x9f, 0x68, 0xcb, 0xb8, 0x0a, 0x70, 0x33, 0x40, 0x37, + 0x88, 0xf4, 0x33, 0x1f, 0xef, 0x5e, 0xee, 0xf6, 0xb3, 0x66, 0x77, 0xbb, 0x02, 0x2c, 0xca, 0x5d, + 0x41, 0xd8, 0x4a, 0xf9, 0x59, 0x7e, 0xb7, 0x3d, 0xa4, 0x0f, 0x18, 0x24, 0x78, 0xdf, 0x36, 0x2b, + 0x0a, 0x02, 0x50, 0xd5, 0x8d, 0x7e, 0xc8, 0xae, 0x4d, 0x55, 0x9e, 0x68, 0xc5, 0xf4, 0x7f, 0x56, + 0x68, 0xad, 0xf5, 0x1e, 0x3e, 0x31, 0x37, 0x48, 0x88, 0x9c, 0xef, 0x3b, 0x3c, 0x3c, 0x06, 0x65, + 0xc1, 0x7f, 0x41, 0xa1, 0x82, 0xf6, 0x0f, 0x71, 0xe3, 0xb3, 0xd6, 0x8d, 0xfc, 0x63, 0x7d, 0xdf, + 0x44, 0xfa, 0x8b, 0x18, 0xa6, 0x35, 0x52, 0x7b, 0x0d, 0x87, 0xc7, 0x69, 0x87, 0x78, 0x5c, 0x16, + 0xe1, 0xda, 0x63, 0x07, 0xd8, 0x8a, 0xea, 0x9e, 0x11, 0x98, 0x5d, 0x22, 0x27, 0x53, 0x90, 0xb5, + 0xeb, 0xd7, 0x4e, 0xb7, 0xa1, 0x89, 0xc4, 0x70, 0xfa, 0x58, 0xf9, 0x82, 0x30, 0x45, 0x28, 0x4a, + 0x68, 0x4e, 0x7b, 0x88, 0xe3, 0xd8, 0x78, 0x83, 0xa4, 0xa3, 0x11, 0xab, 0x16, 0x46, 0x83, 0xe2, + 0xde, 0xb4, 0xd7, 0x4c, 0xb2, 0x6c, 0x7c, 0x54, 0xf6, 0x37, 0x36, 0x8e, 0x2d, 0xb1, 0x10, 0xac, + 0xe9, 0xcf, 0x4d, 0xcd, 0xfa, 0x9b, 0x02, 0xa0, 0x91, 0xf4, 0xd7, 0xa3, 0xea, 0x02, 0xd8, 0x13, + 0x13, 0xdf, 0x1a, 0x87, 0xa6, 0x7f, 0x1a, 0x46, 0x30, 0xd1, 0x70, 0xb5, 0x48, 0xbc, 0x35, 0x0f, + 0x7f, 0xbb, 0x75, 0xf7, 0x05, 0x09, 0x5c, 0x54, 0xa0, 0x47, 0xe8, 0xf7, 0x71, 0xa4, 0xfc, 0x6e, + 0x2f, 0x5d, 0x87, 0x5c, 0xb0, 0x50, 0x30, 0x89, 0x3b, 0x7d, 0xb5, 0x4f, 0x54, 0xd7, 0x19, 0x50, + 0x71, 0xbe, 0x17, 0x9a, 0x18, 0x50, 0x58, 0x2f, 0x85, 0x50, 0x10, 0xca, 0xe3, 0xb5, 0xae, 0xc5, + 0xe7, 0xef, 0x77, 0x6c, 0x9d, 0x63, 0x83, 0x1f, 0xb5, 0xc6, 0xf6, 0xd5, 0xda, 0x90, 0xe5, 0xcf, + 0xed, 0x8d, 0x18, 0xe0, 0x01, 0x46, 0xe8, 0xb1, 0x4b, 0xe7, 0x8f, 0xd4, 0x78, 0x00, 0x11, 0x39, + 0x39, 0x97, 0x33, 0xdc, 0xcc, 0xba, 0x03, 0x1b, 0x31, 0x56, 0x29, 0x7f, 0xa2, 0xa5, 0x7f, 0x1c, + 0x03, 0x9e, 0x10, 0x9b, 0xbe, 0x4a, 0x94, 0x42, 0x91, 0x7c, 0xce, 0xcb, 0x52, 0x7d, 0x73, 0x0e, + 0x8b, 0xec, 0x68, 0xa2, 0x1a, 0xa6, 0xa1, 0x6a, 0xd0, 0x50, 0xbe, 0x10, 0x09, 0x0c, 0x6d, 0xd6, + 0x5f, 0xff, 0xc0, 0x4d, 0x17, 0x09, 0xcc, 0x46, 0x7f, 0x3c, 0x27, 0xf3, 0xdc, 0x0d, 0xfc, 0xee, + 0x93, 0x27, 0xc7, 0x0d, 0x73, 0x91, 0x2c, 0xec, 0x57, 0x10, 0xe6, 0xeb, 0x05, 0xb9, 0x7b, 0x6e, + 0xd2, 0x22, 0xbd, 0xe5, 0xa6, 0x33, 0xbe, 0x07, 0x4d, 0xaa, 0x02, 0xba, 0xb9, 0xa3, 0x8d, 0x3c, + 0x6c, 0x10, 0xfe, 0x80, 0x6e, 0x9f, 0x31, 0x7a, 0xfc, 0x19, 0x10, 0xe4, 0x3e, 0xeb, 0x31, 0xf4, + 0xb6, 0xc4, 0x1d, 0x69, 0x61, 0x24, 0x31, 0xae, 0xb8, 0x12, 0x9c, 0x83, 0x6d, 0x33, 0x9c, 0x4e, + 0xd3, 0x23, 0x42, 0x04, 0x49, 0x13, 0xa5, 0x0a, 0x7a, 0xe7, 0x77, 0xce, 0x42, 0x7c, 0x76, 0x44, + 0x59, 0x51, 0xde, 0x75, 0x1d, 0xfc, 0x50, 0xfb, 0xc8, 0x68, 0x34, 0xac, 0x2d, 0xd4, 0xeb, 0x3a, + 0xc1, 0x8a, 0xa3, 0x4e, 0x7d, 0xc0, 0xe5, 0x51, 0xfd, 0xc4, 0x9e, 0xac, 0x2b, 0x51, 0x98, 0x14, + 0x8e, 0xfa, 0x23, 0x28, 0x0f, 0xa6, 0x8a, 0xb9, 0x82, 0xb9, 0x1f, 0xa4, 0x6c, 0xc1, 0xf7, 0x45, + 0xcc, 0x26, 0xdf, 0xe1, 0xe9, 0xb2, 0x32, 0x7e, 0xa2, 0x62, 0xb1, 0x21, 0xd4, 0xa0, 0xc7, 0x5f, + 0x11, 0x89, 0x8d, 0x5c, 0x04, 0x46, 0x71, 0x3b, 0x00, 0x2e, 0xbb, 0x27, 0x3d, 0x85, 0xb8, 0x3d, + 0xc4, 0x48, 0x7e, 0x2d, 0xa7, 0x11, 0xc1, 0x40, 0xdb, 0x1c, 0x0b, 0x36, 0xd9, 0x43, 0xff, 0xee, + 0x9c, 0xf3, 0x34, 0x20, 0x32, 0x77, 0xcd, 0xdc, 0x7d, 0x8c, 0x98, 0x33, 0x13, 0xb1, 0x33, 0x25, + 0x93, 0x37, 0x0c, 0x7c, 0x1d, 0x74, 0xc8, 0xa1, 0x8b, 0x96, 0xda, 0x79, 0x17, 0xf7, 0xd4, 0xc1, + 0xfb, 0xe7, 0x90, 0x16, 0x29, 0x33, 0xe2, 0x3f, 0x05, 0x90, 0x65, 0x6b, 0xe2, 0x75, 0x43, 0x16, + 0x23, 0xad, 0xaa, 0xd2, 0xa7, 0x47, 0x11, 0xd2, 0xfd, 0x9b, 0xdd, 0xaa, 0x54, 0xf3, 0x79, 0x24, + 0x6b, 0xd3, 0xc5, 0xa5, 0x79, 0x2a, 0x61, 0xd8, 0x1c, 0x5c, 0x0b, 0xc2, 0xab, 0x17, 0x3c, 0xd5, + 0x2d, 0x08, 0x3f, 0x89, 0x4d, 0x84, 0xf8, 0xf0, 0x5a, 0x41, 0x50, 0x3e, 0x53, 0xfb, 0x1c, 0x47, + 0x15, 0xa2, 0x37, 0xb9, 0x0e, 0x13, 0x01, 0x49, 0xc4, 0xe0, 0xd8, 0x64, 0xf4, 0x79, 0x7e, 0x1d, + 0xfa, 0xed, 0xb0, 0x68, 0xd7, 0x94, 0xb4, 0x8e, 0xb4, 0x59, 0x50, 0x76, 0x82, 0xae, 0x18, 0x7e, + 0xe4, 0xd9, 0x48, 0x14, 0x39, 0x37, 0x79, 0x30, 0x96, 0x10, 0xa9, 0xa3, 0x24, 0xf6, 0xe9, 0x6e, + 0xc0, 0x6f, 0xea, 0x88, 0xf7, 0x0a, 0x21, 0xfe, 0x74, 0x9e, 0xf4, 0xcd, 0xa2, 0xb1, 0xa6, 0x48, + 0xcf, 0x7b, 0x71, 0x56, 0x7f, 0xe3, 0x24, 0xdb, 0x97, 0xa4, 0xcc, 0xea, 0xef, 0x6f, 0xc5, 0xa7, + 0xe4, 0x68, 0xc4, 0xcf, 0x41, 0x0f, 0x4c, 0x88, 0x0e, 0xe1, 0xda, 0x9a, 0x17, 0x23, 0x90, 0xe7, + 0x7c, 0xba, 0x84, 0xa9, 0xe4, 0x8e, 0x13, 0x2b, 0x98, 0x26, 0xa4, 0xf7, 0x86, 0x50, 0x79, 0x4b, + 0x95, 0x94, 0x45, 0xba, 0xac, 0x8d, 0x91, 0x11, 0x84, 0x66, 0x94, 0x9c, 0xe4, 0x33, 0xe6, 0xcc, + 0x64, 0x54, 0xb2, 0x72, 0x93, 0x79, 0xb4, 0xc9, 0xbb, 0x03, 0x0b, 0x5c, 0x9f, 0xb7, 0xbf, 0x18, + 0x55, 0x63, 0xea, 0x22, 0xa6, 0x11, 0x7b, 0x5e, 0xa0, 0x2d, 0x91, 0xf1, 0x59, 0x65, 0x54, 0x1a, + 0x8a, 0x26, 0xab, 0x39, 0x8d, 0x88, 0x58, 0xb6, 0x78, 0x51, 0x75, 0x33, 0x4e, 0x65, 0xe9, 0xcc, + 0xdc, 0x10, 0x79, 0x01, 0x79, 0x92, 0x5e, 0xf9, 0xa3, 0x11, 0x7f, 0xa8, 0x44, 0xad, 0x0d, 0xde, + 0xf0, 0xc8, 0x18, 0x14, 0x6b, 0xa8, 0xcf, 0xa9, 0x9c, 0xb0, 0x9e, 0x61, 0x45, 0xb4, 0x48, 0xa9, + 0x02, 0x51, 0xa9, 0x38, 0x56, 0xd2, 0x37, 0x56, 0x37, 0x57, 0x30, 0xa2, 0x57, 0xec, 0x55, 0x59, + 0x11, 0xf0, 0xe9, 0xc3, 0x17, 0x67, 0x25, 0x8e, 0x59, 0x27, 0x30, 0x46, 0xcc, 0xef, 0x87, 0xd1, + 0xce, 0x65, 0xec, 0x4b, 0x5f, 0xb3, 0x59, 0x47, 0xf6, 0xd5, 0xed, 0x88, 0x5a, 0x5b, 0x6b, 0x51, + 0xfb, 0xcf, 0x30, 0xb9, 0x83, 0xd4, 0xb1, 0x76, 0xad, 0x2b, 0xe0, 0x37, 0x79, 0x28, 0x78, 0x26, + 0x5d, 0x80, 0xae, 0x54, 0xba, 0x1b, 0x73, 0x26, 0x71, 0xc6, 0x15, 0xad, 0xfe, 0x88, 0xd7, 0xc8, + 0x55, 0x50, 0xdb, 0x0a, 0x2c, 0x39, 0x20, 0x1f, 0x4c, 0x3b, 0xd4, 0xd7, 0xc8, 0x27, 0xa7, 0xf9, + 0x55, 0x8a, 0xa1, 0xff, 0x74, 0xbe, 0xc7, 0x03, 0x12, 0x57, 0x19, 0x4c, 0xa1, 0x43, 0xcf, 0xad, + 0xc7, 0x3c, 0xb6, 0xb9, 0x13, 0x78, 0x4d, 0x08, 0xfa, 0x57, 0xc5, 0x53, 0x3c, 0xfa, 0xd6, 0x1b, + 0x1f, 0x44, 0x71, 0xec, 0x45, 0x0e, 0xd8, 0xdf, 0x3c, 0x8a, 0x58, 0xc9, 0x45, 0x38, 0x6e, 0x7f, + 0xcb, 0x9d, 0x2c, 0x58, 0x2e, 0x2a, 0xcf, 0x23, 0x1c, 0xdc, 0x62, 0x60, 0x08, 0x52, 0xf5, 0x01, + 0x97, 0xe7, 0xd7, 0x1d, 0xd2, 0x3a, 0x07, 0x5a, 0x18, 0x84, 0x81, 0x4a, 0xb2, 0xaa, 0x19, 0xee, + 0x40, 0xd0, 0x79, 0xbb, 0x92, 0x4f, 0x69, 0x23, 0x0a, 0x98, 0xd0, 0x28, 0x09, 0xfc, 0xfa, 0x0a, + 0x44, 0xc8, 0x9d, 0x54, 0x68, 0x60, 0x27, 0x0e, 0x12, 0x22, 0x81, 0xc7, 0x41, 0x70, 0x05, 0xe5, + 0x9f, 0x18, 0x7f, 0xd5, 0xd0, 0xbf, 0xdf, 0x60, 0x25, 0x08, 0x01, 0xd8, 0x3d, 0x86, 0x0a, 0xfd, + 0x14, 0x7f, 0x16, 0xb9, 0xb7, 0xec, 0xf9, 0x65, 0x8a, 0xcf, 0x66, 0xd3, 0x31, 0xc5, 0xd1, 0x5a, + 0x62, 0x4f, 0x5f, 0x0b, 0x15, 0x8c, 0x57, 0x35, 0x9b, 0x89, 0x4b, 0xb3, 0xc0, 0x21, 0xd6, 0x06, + 0x03, 0xa9, 0x56, 0x84, 0x9f, 0xd6, 0xfb, 0xc6, 0xe6, 0xea, 0xb5, 0x45, 0xb8, 0x7e, 0xbb, 0x3e, + 0x0b, 0x7c, 0xfb, 0xbd, 0xaa, 0x8c, 0x48, 0x0e, 0x7c, 0x34, 0x96, 0x84, 0xc2, 0x5b, 0x2a, 0xa6, + 0x17, 0x96, 0xd9, 0xcd, 0xe7, 0xcf, 0x99, 0x2d, 0xcf, 0x7e, 0x71, 0xfb, 0x69, 0x9e, 0x95, 0xc4, + 0x74, 0x6f, 0xa2, 0xb3, 0xfb, 0x8f, 0x32, 0x69, 0x38, 0xd7, 0x19, 0x6f, 0x0c, 0xe2, 0x08, 0x94, + 0xaa, 0x7d, 0x17, 0x00, 0x8a, 0x7e, 0x4a, 0x6d, 0x4f, 0xd9, 0xdc, 0xa4, 0x7c, 0x4d, 0x9e, 0x24, + 0xc6, 0xc2, 0xf5, 0xc0, 0xab, 0x11, 0x28, 0x39, 0x3f, 0xb9, 0xe2, 0x42, 0xc2, 0x4d, 0x10, 0x5d, + 0x57, 0xaa, 0xdd, 0x77, 0x66, 0x8e, 0x8c, 0xfc, 0xbc, 0xb4, 0x21, 0xe7, 0x1b, 0x31, 0x6b, 0x9f, + 0xb0, 0x80, 0xe5, 0xa1, 0x1e, 0x84, 0xdd, 0x36, 0x9e, 0x43, 0xdc, 0xf2, 0x10, 0xd6, 0x52, 0xab, + 0x09, 0x19, 0xeb, 0xf7, 0xe0, 0x99, 0x53, 0xc6, 0xac, 0x6d, 0xcd, 0xe3, 0xd3, 0x38, 0xfd, 0x60, + 0x91, 0xbc, 0xc1, 0xf2, 0x18, 0xfc, 0x1c, 0x76, 0xfa, 0x8a, 0xe3, 0x37, 0x37, 0x30, 0xdb, 0x83, + 0x5e, 0x7b, 0xda, 0x0c, 0xbf, 0x63, 0x7a, 0x0c, 0x31, 0x76, 0xf6, 0x74, 0x0a, 0xa1, 0xc8, 0xea, + 0x60, 0x7c, 0xd5, 0xee, 0xb4, 0xa1, 0x29, 0x97, 0x89, 0x16, 0xb0, 0xed, 0x01, 0x3a, 0x67, 0x28, + 0xc9, 0xc3, 0x61, 0x4a, 0x80, 0x92, 0xb0, 0xa7, 0x5b, 0xe7, 0xfb, 0x3e, 0x19, 0xc9, 0x9c, 0xa2, + 0x06, 0x6c, 0x74, 0x4e, 0xa4, 0xf3, 0xec, 0xff, 0xa9, 0xa9, 0x5e, 0xba, 0x35, 0xda, 0xb0, 0xc9, + 0x17, 0x6c, 0x84, 0x55, 0x7f, 0xde, 0x5c, 0xe9, 0x0d, 0x01, 0xb6, 0x97, 0xba, 0xdf, 0x4c, 0xd5, + 0xd5, 0x04, 0x87, 0xe1, 0x50, 0x7a, 0x69, 0x9f, 0x4d, 0x27, 0x27, 0x0f, 0x56, 0xff, 0xbc, 0x70, + 0x55, 0x2f, 0x90, 0x40, 0x54, 0xae, 0xc1, 0x23, 0x65, 0x9b, 0x27, 0x7c, 0xcc, 0x36, 0x68, 0x86, + 0x02, 0x05, 0x10, 0x7f, 0x6c, 0x67, 0xe3, 0xa7, 0x14, 0x46, 0xe5, 0x87, 0xc8, 0x8b, 0x61, 0xca, + 0xc2, 0xf1, 0xd7, 0xf9, 0xb4, 0x74, 0x39, 0xfd, 0xac, 0x17, 0x05, 0xbc, 0x5e, 0x73, 0xa1, 0x5e, + 0x59, 0x9f, 0x5b, 0x25, 0xb8, 0xe0, 0x74, 0x25, 0xf9, 0x93, 0xc1, 0x84, 0x23, 0x09, 0x7c, 0x33, + 0x38, 0x70, 0x91, 0x3f, 0xd0, 0x75, 0x7c, 0x6b, 0x44, 0x9e, 0xc9, 0x23, 0x33, 0xdd, 0xd5, 0x74, + 0x23, 0x0e, 0x63, 0x8c, 0xb5, 0xaa, 0x09, 0x5b, 0xf7, 0xa9, 0x8d, 0x8a, 0xfa, 0x34, 0xf4, 0x92, + 0x2f, 0xbb, 0x7e, 0xb5, 0x66, 0xe3, 0x0c, 0x47, 0x99, 0x67, 0x7b, 0x9a, 0x03, 0xe9, 0xd5, 0xca, + 0xf5, 0x8d, 0x8b, 0x82, 0x48, 0x62, 0x0b, 0xf9, 0x64, 0x0f, 0xc0, 0x82, 0x5f, 0x87, 0x58, 0x45, + 0x84, 0x1d, 0x31, 0x20, 0xf8, 0xda, 0x59, 0xce, 0x41, 0x63, 0x11, 0x55, 0xbb, 0xad, 0xe0, 0x9b, + 0xd7, 0x61, 0xf7, 0xb0, 0x78, 0xcd, 0xfc, 0x6c, 0x29, 0x94, 0x2f, 0x5b, 0x3e, 0x7a, 0xa7, 0x1a, + 0xad, 0x35, 0x31, 0xd6, 0xfe, 0xe8, 0x8f, 0xe0, 0x29, 0x3f, 0x32, 0xaf, 0xda, 0x49, 0x14, 0x78, + 0xec, 0x49, 0xb3, 0xb9, 0x98, 0x30, 0xd6, 0x42, 0xab, 0x33, 0x74, 0xed, 0xb5, 0x4c, 0x05, 0x40, + 0x47, 0x60, 0x9c, 0x15, 0xe9, 0x1f, 0x0b, 0x32, 0x34, 0x14, 0xd8, 0xb3, 0x07, 0xf5, 0xdf, 0xda, + 0xf6, 0x63, 0x59, 0xf6, 0x5d, 0xba, 0xe7, 0x71, 0x35, 0xf9, 0x4e, 0xb0, 0xa3, 0x74, 0x85, 0xec, + 0x37, 0x93, 0x6b, 0xca, 0xd5, 0x78, 0x5d, 0xa7, 0x19, 0xa2, 0xdc, 0x77, 0x9b, 0xe0, 0xa0, 0xc4, + 0x9e, 0xae, 0x87, 0x29, 0x56, 0x96, 0x3a, 0x7a, 0x96, 0x92, 0x07, 0x5e, 0xee, 0xfe, 0x7c, 0x39, + 0xd3, 0x04, 0x32, 0x49, 0x8b, 0xe3, 0xdb, 0xa9, 0xab, 0xbb, 0xd9, 0x45, 0x3e, 0xa3, 0x2a, 0xbe, + 0x84, 0x1e, 0x17, 0x9a, 0x88, 0x4d, 0xf5, 0x36, 0xaf, 0xd5, 0xdb, 0x19, 0x1f, 0x07, 0x3a, 0xc7, + 0x59, 0xcd, 0x83, 0xc2, 0x8f, 0xb7, 0x1c, 0xb6, 0xc4, 0xd5, 0x68, 0x34, 0x37, 0x2b, 0x8e, 0x92, + 0x7c, 0xb2, 0x72, 0x9a, 0xb0, 0x41, 0xc4, 0x98, 0xbd, 0x9d, 0xb9, 0x66, 0xc4, 0x8e, 0x5d, 0x15, + 0xd0, 0x67, 0x90, 0x10, 0xb6, 0x45, 0x35, 0x18, 0xc0, 0xb7, 0x1c, 0xf9, 0xd7, 0xe8, 0x2a, 0xe5, + 0x0c, 0x3d, 0x2b, 0x52, 0xc2, 0xb6, 0x39, 0xef, 0xdd, 0x8e, 0x80, 0xe3, 0x0f, 0x84, 0x8a, 0xf1, + 0x5b, 0x27, 0x73, 0x18, 0x87, 0x29, 0x9c, 0x54, 0x16, 0x91, 0xe4, 0x8a, 0x8a, 0x70, 0xf4, 0x8a, + 0x03, 0x42, 0x19, 0x08, 0x9a, 0xa9, 0xf3, 0x70, 0x88, 0x15, 0xb7, 0x03, 0x60, 0xba, 0xd2, 0x8c, + 0xe8, 0x8b, 0xb2, 0xdd, 0x1a, 0x43, 0xe5, 0x7a, 0x8d, 0x9a, 0xa5, 0xc7, 0xd0, 0xf5, 0x5c, 0x32, + 0xf6, 0x02, 0x7e, 0xd3, 0x7a, 0x82, 0x23, 0x27, 0xb2, 0x4f, 0xde, 0xfb, 0x84, 0x3f, 0x06, 0x8b, + 0x02, 0x5e, 0x9a, 0x26, 0xa3, 0x7f, 0xd5, 0xbc, 0x2c, 0x88, 0x96, 0x02, 0xa8, 0xf7, 0xf5, 0x01, + 0x90, 0xe1, 0x30, 0xc2, 0x42, 0xba, 0xdb, 0xf2, 0xc5, 0x3a, 0x6f, 0xd5, 0x81, 0x03, 0xae, 0xdf, + 0xdd, 0x60, 0x42, 0x33, 0x20, 0x91, 0x8f, 0xaf, 0xca, 0x26, 0x5a, 0xbc, 0x4c, 0x89, 0x38, 0x27, + 0x76, 0x97, 0x2b, 0x0c, 0x79, 0x86, 0x7a, 0x58, 0x4d, 0x2e, 0x0c, 0x53, 0xd5, 0x67, 0xba, 0x8d, + 0x1c, 0x26, 0x32, 0xe4, 0x05, 0xa4, 0x9b, 0x03, 0xf3, 0x63, 0x57, 0x6f, 0x0b, 0x22, 0x48, 0xfe, + 0x6d, 0xd9, 0x6e, 0x57, 0xc9, 0xa8, 0x76, 0x5f, 0x3e, 0xe9, 0x41, 0xf8, 0x5a, 0xbc, 0x24, 0xed, + 0x86, 0x19, 0xaf, 0xbb, 0xc9, 0x86, 0xae, 0xdf, 0x2d, 0xb7, 0x8a, 0xa8, 0xcd, 0x0b, 0x0b, 0x4a, + 0xd3, 0x9b, 0xe0, 0x92, 0xf9, 0x02, 0x67, 0x9b, 0x3b, 0x56, 0x43, 0x1d, 0x47, 0x82, 0xb8, 0xab, + 0xaf, 0x6f, 0x6d, 0x86, 0xf9, 0x4b, 0xad, 0xbf, 0x86, 0x6e, 0x23, 0x88, 0x58, 0x17, 0x76, 0x2c, + 0x5c, 0x70, 0xdc, 0xe1, 0x8e, 0xba, 0x15, 0xcb, 0xa9, 0xa4, 0x99, 0x97, 0x7a, 0x7c, 0x5c, 0x43, + 0x3b, 0xa9, 0xf2, 0xfc, 0xa4, 0xb6, 0x39, 0xd5, 0xdb, 0x1e, 0xa4, 0x17, 0xc8, 0x30, 0x68, 0x14, + 0x2d, 0x4c, 0x88, 0x75, 0x92, 0x54, 0x29, 0xa6, 0x2a, 0x4b, 0x50, 0x38, 0xd1, 0x0f, 0x10, 0x84, + 0xde, 0x67, 0x42, 0x95, 0x97, 0x12, 0xe4, 0x92, 0xb5, 0x6a, 0x07, 0xa0, 0xa8, 0x51, 0x63, 0x33, + 0xe6, 0x4c, 0x78, 0xd2, 0x31, 0x8e, 0xbe, 0x33, 0x07, 0x7b, 0xdc, 0xcb, 0x43, 0xe9, 0xe3, 0x9a, + 0x5c, 0xd2, 0x4d, 0xec, 0x11, 0x26, 0xd9, 0x50, 0x1a, 0x2d, 0x96, 0xbf, 0x83, 0xf9, 0x73, 0xa4, + 0xd5, 0x53, 0xa9, 0xdb, 0x8d, 0x43, 0xf1, 0x33, 0x32, 0x45, 0xfd, 0x2a, 0x95, 0x9f, 0xc2, 0x2a, + 0x5a, 0x9f, 0x4d, 0x3f, 0x2e, 0xb7, 0xe1, 0xd8, 0xf7, 0x80, 0xea, 0x88, 0xfa, 0x5f, 0x67, 0x4b, + 0x2d, 0x14, 0xf8, 0x08, 0x16, 0x4b, 0xeb, 0x76, 0x98, 0xd1, 0xe8, 0x35, 0x1a, 0xd1, 0x83, 0x80, + 0xeb, 0xc0, 0x89, 0xd6, 0x7b, 0x7e, 0x07, 0x24, 0xb8, 0xc2, 0x90, 0xb5, 0x1a, 0x99, 0x63, 0xa0, + 0x4b, 0xe0, 0xa0, 0x09, 0xae, 0x4a, 0xb7, 0x88, 0x32, 0xb0, 0xf3, 0x2b, 0x65, 0x3a, 0x62, 0x69, + 0xe1, 0xf9, 0x11, 0x0b, 0xea, 0xf8, 0x48, 0x99, 0xb6, 0x5e, 0x21, 0x33, 0xef, 0x5b, 0xc0, 0x4c, + 0xe1, 0xf0, 0xe9, 0xfb, 0x11, 0x5e, 0xa3, 0x87, 0xd5, 0xd3, 0xe1, 0x75, 0x67, 0x3c, 0x6d, 0xde, + 0xbf, 0xde, 0xac, 0xb2, 0x62, 0x52, 0xb0, 0xe8, 0x7b, 0x80, 0x87, 0x8d, 0x56, 0x29, 0x8a, 0x64, + 0x3f, 0xe3, 0x87, 0x69, 0x74, 0xf0, 0xf8, 0x67, 0xdb, 0xf0, 0x7f, 0x83, 0xb5, 0x82, 0x3e, 0x9a, + 0x1b, 0x52, 0x2f, 0xa9, 0x63, 0xdb, 0xd4, 0x15, 0x06, 0x96, 0xec, 0xcd, 0x0f, 0x31, 0x3d, 0x18, + 0x98, 0x8c, 0x70, 0xc0, 0x79, 0x89, 0x76, 0x9c, 0xa5, 0xad, 0x98, 0x6f, 0xf5, 0xab, 0x36, 0xab, + 0xe6, 0xbd, 0xce, 0x1c, 0xcc, 0x04, 0x50, 0xeb, 0x2b, 0x9b, 0xf3, 0x55, 0x5b, 0x4c, 0xb2, 0xb4, + 0xa5, 0x51, 0x80, 0x88, 0x0b, 0x41, 0x4b, 0x5e, 0xac, 0x2a, 0xa6, 0x38, 0x12, 0xfa, 0x8b, 0x8b, + 0x91, 0x7d, 0x18, 0xf9, 0x17, 0x3a, 0xea, 0x86, 0xd4, 0x74, 0x13, 0x55, 0x00, 0x77, 0x00, 0x93, + 0x4f, 0x06, 0x4f, 0x75, 0xfb, 0xa7, 0x2e, 0x00, 0xd5, 0xb9, 0xf8, 0x7d, 0x92, 0x95, 0x79, 0x68, + 0xd5, 0xba, 0x2a, 0xcd, 0x24, 0xda, 0xb6, 0x59, 0xf0, 0xed, 0x7e, 0xc4, 0x68, 0x6c, 0x77, 0xa2, + 0xe6, 0xf7, 0x4e, 0x89, 0x7b, 0xbc, 0xaf, 0x95, 0x26, 0x71, 0xaa, 0x18, 0xe8, 0xe4, 0x97, 0x73, + 0xf1, 0x5d, 0xe8, 0xcc, 0x23, 0xdf, 0xf7, 0x5a, 0xc5, 0x0b, 0x64, 0xe3, 0x2d, 0x63, 0xa6, 0xdc, + 0x29, 0x3d, 0x53, 0xd9, 0x6e, 0x6a, 0xc6, 0x1b, 0x4e, 0x1a, 0xfd, 0xd8, 0xc8, 0xc5, 0xa0, 0xa3, + 0xef, 0x9d, 0x83, 0xa5, 0x36, 0x70, 0x82, 0x33, 0xf1, 0x44, 0x8d, 0x61, 0xe8, 0x80, 0x41, 0x00, + 0x3b, 0x9d, 0x3d, 0xab, 0x26, 0x2c, 0x9a, 0x74, 0x88, 0xb0, 0x5f, 0x72, 0x1c, 0xb1, 0x2b, 0x5b, + 0xd5, 0x4c, 0xea, 0x2f, 0xbe, 0x0f, 0xc5, 0x4b, 0x25, 0xbd, 0x17, 0x0d, 0x6e, 0x55, 0xc8, 0x37, + 0xdf, 0xee, 0xff, 0xd8, 0x22, 0xab, 0x4c, 0x02, 0xca, 0xd2, 0x69, 0xaf, 0x41, 0xed, 0xc9, 0x6f, + 0x18, 0x3d, 0x32, 0xce, 0x2f, 0xc3, 0x64, 0x8b, 0x69, 0x09, 0xbb, 0xa6, 0x1a, 0x67, 0x15, 0x87, + 0x0f, 0xc2, 0xa5, 0x45, 0xbe, 0x4a, 0x7b, 0x4e, 0x0c, 0x4b, 0x99, 0xa1, 0x4b, 0xce, 0x0f, 0x71, + 0x75, 0xcb, 0x6d, 0xac, 0x92, 0xec, 0xfd, 0xd1, 0x3c, 0xcc, 0x1e, 0x2c, 0x84, 0x1e, 0x79, 0x98, + 0x7d, 0x6f, 0x50, 0x07, 0x2a, 0x30, 0x70, 0xe1, 0x32, 0x86, 0x62, 0x78, 0xa1, 0x62, 0x88, 0x81, + 0x15, 0x24, 0x5a, 0xf4, 0x83, 0x3f, 0x84, 0xf0, 0x24, 0x49, 0xb6, 0xee, 0x2d, 0x9a, 0x66, 0xbd, + 0xf2, 0x0d, 0xc5, 0xb6, 0xa0, 0x06, 0x2e, 0x0e, 0x54, 0x06, 0xcf, 0xc0, 0x59, 0x20, 0x60, 0x30, + 0x6c, 0x9e, 0x03, 0x12, 0x81, 0xd4, 0x7f, 0x8d, 0x51, 0x07, 0x12, 0x8d, 0xdc, 0xf2, 0x48, 0x5f, + 0xd6, 0x5e, 0x43, 0x8e, 0xcd, 0xd3, 0xf0, 0x5d, 0x59, 0x18, 0xe9, 0x66, 0x8a, 0xdf, 0xa4, 0x63, + 0x54, 0x41, 0x38, 0x2d, 0xfb, 0xab, 0x46, 0x2a, 0x7d, 0x77, 0x0a, 0x2f, 0xb3, 0x94, 0x49, 0x23, + 0x0d, 0xa2, 0x29, 0x68, 0xa6, 0x2b, 0x44, 0x7f, 0x6f, 0x69, 0x1a, 0x24, 0xbe, 0x9f, 0x35, 0x90, + 0x48, 0x5d, 0x23, 0xad, 0x42, 0xe3, 0xce, 0xfd, 0xf4, 0x96, 0x53, 0x61, 0xb3, 0x10, 0xf1, 0xab, + 0xd9, 0x71, 0x6c, 0x31, 0xbf, 0x80, 0x28, 0x5d, 0x0c, 0x00, 0x28, 0x0e, 0x2f, 0xe6, 0x82, 0x2e, + 0x31, 0x05, 0x77, 0x4c, 0x65, 0xf2, 0x38, 0x45, 0x65, 0x9e, 0xd4, 0xd7, 0x3e, 0x85, 0x9e, 0x0c, + 0x9e, 0xa2, 0x5a, 0xe3, 0xea, 0xf4, 0xdd, 0x4e, 0x2d, 0xc8, 0x26, 0x64, 0x75, 0x86, 0xe1, 0xea, + 0x3b, 0x30, 0x72, 0xf6, 0x8e, 0x59, 0xdd, 0x93, 0xc4, 0x16, 0x51, 0x6f, 0x1a, 0xdf, 0xce, 0xeb, + 0x38, 0xb6, 0xbe, 0x8b, 0x04, 0xb7, 0xef, 0xc3, 0x03, 0x80, 0xd7, 0xc7, 0x45, 0x6c, 0x14, 0x2d, + 0x76, 0xb3, 0xe6, 0xf8, 0x97, 0x92, 0xde, 0xb5, 0xbf, 0xc3, 0xd9, 0xac, 0x80, 0x91, 0xa5, 0x82, + 0x49, 0x7f, 0x96, 0x0c, 0xf6, 0x8f, 0xb3, 0xe5, 0x1e, 0x26, 0xc2, 0xa6, 0xfa, 0x6b, 0x73, 0x9c, + 0x88, 0xcc, 0xc5, 0xf8, 0x3e, 0x1c, 0x9b, 0x5c, 0xe0, 0x56, 0xba, 0x10, 0xb6, 0x07, 0x3b, 0x27, + 0x9a, 0xc0, 0x43, 0x8e, 0xf3, 0xa8, 0x64, 0xab, 0x11, 0x53, 0xeb, 0x16, 0x5e, 0xdc, 0xdf, 0xdb, + 0x0f, 0xc4, 0xa5, 0x75, 0xeb, 0x2e, 0x24, 0x71, 0x0e, 0xfa, 0xbf, 0x63, 0xb1, 0xbe, 0xe1, 0x24, + 0xf0, 0xfe, 0x14, 0x0d, 0xba, 0x01, 0x39, 0xaa, 0x94, 0xef, 0x8a, 0x93, 0x64, 0xe0, 0x87, 0x36, + 0xdd, 0x5d, 0x03, 0xcb, 0x2b, 0x61, 0x86, 0xde, 0xad, 0xfe, 0xf3, 0xe6, 0xf3, 0x4d, 0xfd, 0xd2, + 0x4d, 0xf5, 0xab, 0x4d, 0x44, 0x28, 0xa1, 0x99, 0x35, 0xd9, 0xcc, 0xcd, 0xdd, 0x9b, 0x34, 0x6a, + 0x42, 0xf3, 0xe6, 0xa5, 0x5f, 0x8b, 0x19, 0xc2, 0x09, 0xc4, 0xde, 0xf0, 0x2c, 0x5f, 0x65, 0x20, + 0x12, 0x1c, 0x75, 0x9a, 0x40, 0x09, 0x10, 0x3c, 0x71, 0xc3, 0xca, 0x07, 0x9d, 0x7f, 0x65, 0xad, + 0x82, 0xb7, 0x66, 0xb4, 0x6e, 0xc9, 0xce, 0x33, 0x19, 0xb4, 0x26, 0x60, 0x55, 0x3a, 0xdf, 0x39, + 0xc8, 0x7b, 0x10, 0xb4, 0x5a, 0xf4, 0xfe, 0x04, 0xb4, 0x96, 0x96, 0x17, 0x3d, 0xd9, 0x9c, 0x73, + 0xc3, 0xee, 0xc4, 0xba, 0x6e, 0x77, 0x88, 0x0d, 0x4f, 0x81, 0x0c, 0xb8, 0xe0, 0x04, 0xb6, 0xc7, + 0xe0, 0x20, 0xd4, 0x02, 0x55, 0x9c, 0x8a, 0xcb, 0x23, 0x73, 0x5d, 0x67, 0xf4, 0x47, 0x31, 0xd1, + 0x4a, 0x39, 0xf4, 0x08, 0x90, 0xa5, 0x6f, 0x2e, 0xee, 0x8a, 0x3b, 0x99, 0x58, 0xc9, 0xdd, 0x7b, + 0x19, 0x7d, 0x1b, 0x1a, 0xca, 0x35, 0x48, 0xbe, 0x81, 0x41, 0x87, 0x45, 0x5b, 0xe1, 0x51, 0xdf, + 0xd1, 0xed, 0x33, 0xa7, 0x8c, 0x0c, 0x74, 0x41, 0x23, 0xe1, 0x4f, 0x3f, 0x3a, 0xf8, 0xd0, 0xb4, + 0x84, 0x0d, 0xbc, 0x90, 0xf1, 0x57, 0x2a, 0x49, 0x44, 0xee, 0x7d, 0xac, 0x9c, 0x2a, 0x09, 0x99, + 0x5b, 0xa9, 0x8c, 0x4b, 0x51, 0x4e, 0x86, 0x6b, 0xda, 0xea, 0xf4, 0x2d, 0x59, 0x25, 0x3b, 0x0f, + 0x71, 0x02, 0x69, 0xb5, 0x50, 0xe1, 0x21, 0x76, 0xd1, 0x40, 0x32, 0x20, 0xfa, 0xe4, 0x47, 0x33, + 0x71, 0x86, 0x91, 0x36, 0xab, 0x9e, 0xf5, 0x7e, 0x6b, 0x36, 0x32, 0x06, 0x94, 0x5e, 0x62, 0xe7, + 0x00, 0x52, 0x09, 0x1b, 0xe7, 0x8e, 0xca, 0x1d, 0x4c, 0xcc, 0x6f, 0xea, 0xdc, 0x1c, 0xad, 0x82, + 0x63, 0x16, 0x38, 0x97, 0x43, 0x67, 0x2c, 0x2b, 0xd4, 0x61, 0x08, 0x8e, 0x5c, 0x4e, 0x30, 0xf9, + 0x68, 0x07, 0x01, 0xd5, 0xb8, 0x4c, 0x98, 0x54, 0x54, 0x35, 0x76, 0x87, 0xba, 0x51, 0x13, 0x67, + 0x89, 0xfa, 0x05, 0x42, 0x78, 0xe9, 0xf9, 0x67, 0x05, 0xb0, 0x21, 0x60, 0xa5, 0xe8, 0x42, 0x0b, + 0xa4, 0x6a, 0xd9, 0x47, 0x8a, 0x3d, 0x4e, 0xff, 0x0d, 0x17, 0x69, 0x6a, 0x91, 0xec, 0xf4, 0x6e, + 0x7f, 0x15, 0x30, 0xe4, 0xd8, 0x38, 0x33, 0x9d, 0x0e, 0x08, 0x67, 0xe8, 0xfb, 0x3c, 0xe5, 0x94, + 0x15, 0x21, 0xdf, 0x36, 0x09, 0xb8, 0x66, 0xdd, 0x4b, 0x85, 0xc2, 0x20, 0x1d, 0xfa, 0x83, 0xca, + 0xc1, 0xa5, 0xe8, 0x63, 0xbb, 0x48, 0x6f, 0x7f, 0x04, 0xb9, 0x76, 0x26, 0xdd, 0x4e, 0x35, 0xbc, + 0x35, 0x49, 0x25, 0x05, 0xca, 0xfa, 0xf2, 0x0c, 0xf3, 0xea, 0x7e, 0x46, 0x7c, 0x48, 0x35, 0x06, + 0x7d, 0x4f, 0xee, 0xc9, 0x44, 0x90, 0xc1, 0x87, 0x0c, 0x6c, 0x05, 0x82, 0x21, 0x2c, 0x9f, 0xe2, + 0x96, 0x6e, 0xee, 0xa4, 0xa1, 0x94, 0x91, 0x38, 0x3a, 0x68, 0x5d, 0xe1, 0x14, 0xf8, 0x55, 0x5d, + 0x76, 0x19, 0x11, 0xd6, 0x2c, 0x64, 0xce, 0x47, 0xd7, 0xeb, 0x84, 0xd9, 0x67, 0x4b, 0xb0, 0xda, + 0xa9, 0xea, 0x83, 0x34, 0xd8, 0xab, 0xd2, 0x59, 0xb2, 0xef, 0x81, 0xfe, 0x27, 0xc2, 0xc2, 0x68, + 0xab, 0xde, 0x64, 0x76, 0xa1, 0x57, 0x14, 0x89, 0x55, 0x4a, 0xca, 0x32, 0x20, 0x21, 0xe2, 0x0b, + 0x5f, 0x2d, 0x3d, 0xc9, 0x85, 0xcb, 0xca, 0x91, 0xf8, 0xe5, 0x9d, 0x92, 0x21, 0x51, 0x0d, 0xf1, + 0xb2, 0xdd, 0x2f, 0x34, 0x11, 0x5c, 0x15, 0xea, 0x7d, 0x7d, 0x3a, 0x26, 0xe2, 0x44, 0x03, 0xeb, + 0x7e, 0xc8, 0xdf, 0xe9, 0x15, 0xdd, 0x8f, 0xd9, 0x9d, 0xa3, 0x2a, 0xa8, 0x7c, 0x4a, 0x63, 0xb5, + 0x36, 0xd1, 0xc9, 0x78, 0x82, 0xd8, 0x89, 0x7d, 0xb8, 0x78, 0x3e, 0x97, 0xdb, 0x1d, 0x22, 0xa8, + 0x7a, 0xaf, 0x24, 0xe3, 0x94, 0x7f, 0xc1, 0x00, 0x3e, 0xa6, 0x04, 0x00, 0xee, 0x4e, 0x61, 0xbb, + 0x34, 0x27, 0x67, 0x9e, 0xd1, 0x9f, 0xbc, 0xa6, 0x2d, 0xbc, 0xcc, 0xd1, 0x7d, 0xee, 0xee, 0x97, + 0x93, 0x3a, 0xef, 0xbe, 0x95, 0x02, 0x70, 0x50, 0x9b, 0x27, 0x40, 0xe9, 0xc9, 0x2f, 0x64, 0xe0, + 0xd6, 0xb8, 0x12, 0xab, 0xa3, 0xb8, 0xe2, 0x75, 0xfc, 0xec, 0x2e, 0xaa, 0x4e, 0x1a, 0xd5, 0xb7, + 0xa5, 0x65, 0xbe, 0xce, 0x85, 0xb1, 0x19, 0xa9, 0x7c, 0x86, 0x84, 0x5f, 0xe2, 0x82, 0x55, 0xb5, + 0x4b, 0x53, 0x52, 0x1c, 0x62, 0x42, 0x4d, 0xd1, 0x6d, 0x36, 0x99, 0xed, 0x8c, 0xb8, 0xa7, 0x11, + 0x5f, 0x2d, 0x4a, 0x70, 0x81, 0x59, 0x5d, 0x32, 0x25, 0x1e, 0x5b, 0x97, 0xed, 0xb1, 0x5d, 0xd9, + 0xe3, 0x9c, 0x49, 0x94, 0xae, 0xee, 0xef, 0xca, 0x3a, 0x9f, 0xa8, 0x1f, 0x30, 0xdb, 0x17, 0xd9, + 0x99, 0xbb, 0xfc, 0x33, 0x44, 0x01, 0xad, 0x35, 0x1d, 0x26, 0xc5, 0xb8, 0x37, 0x42, 0x21, 0xab, + 0x76, 0x31, 0x6b, 0xbb, 0xb2, 0x86, 0x12, 0x7c, 0xbd, 0xd4, 0x12, 0xa9, 0xef, 0xed, 0x19, 0x46, + 0xc3, 0x2d, 0xe8, 0x36, 0x06, 0x72, 0xc2, 0xc0, 0x84, 0xb8, 0x70, 0x27, 0xb7, 0xf9, 0xdd, 0x92, + 0x7d, 0x40, 0xc0, 0xda, 0xc7, 0x11, 0x2a, 0x4b, 0xc6, 0xa1, 0xf7, 0x12, 0x74, 0xb6, 0x51, 0x6a, + 0x27, 0x8f, 0x3c, 0xb8, 0x98, 0x61, 0xd8, 0x47, 0xdb, 0x0d, 0x48, 0xae, 0x89, 0x11, 0x88, 0x1e, + 0x4b, 0x01, 0x1b, 0xf4, 0xf0, 0xd3, 0xcf, 0x03, 0x4d, 0x33, 0x7c, 0x64, 0xc9, 0xfd, 0x6d, 0xb3, + 0x36, 0xaa, 0xb0, 0xf2, 0xcd, 0x5b, 0xec, 0x3d, 0x63, 0xa0, 0xd8, 0x22, 0xa0, 0x22, 0xd3, 0xb5, + 0x8c, 0x88, 0x02, 0xd2, 0x50, 0xc7, 0x8f, 0xd6, 0x00, 0x8a, 0x0f, 0xc8, 0xf4, 0xad, 0xcc, 0xc1, + 0x84, 0x41, 0xbc, 0xcd, 0x77, 0xfb, 0xd8, 0x8f, 0xa6, 0xef, 0xd4, 0xb4, 0xdd, 0xcb, 0x0f, 0x55, + 0x11, 0x5c, 0xbe, 0x61, 0xf1, 0x7b, 0x5e, 0xd0, 0x72, 0x65, 0x30, 0xf8, 0xb3, 0x20, 0x42, 0x20, + 0x09, 0x90, 0x39, 0x81, 0xde, 0x6f, 0x44, 0x96, 0xd9, 0x24, 0xfb, 0x84, 0x9b, 0x80, 0x67, 0xf3, + 0x68, 0x6a, 0x15, 0x20, 0xc1, 0x01, 0xd8, 0x6e, 0x7d, 0x7f, 0x99, 0xac, 0x48, 0xf4, 0xee, 0x91, + 0xb8, 0x77, 0xe4, 0xc5, 0x67, 0x61, 0x55, 0xee, 0xd3, 0x79, 0x93, 0xfc, 0x00, 0x28, 0x41, 0xc4, + 0x61, 0x9a, 0xe4, 0xcd, 0x71, 0xc7, 0xd3, 0x20, 0x0f, 0xd1, 0x18, 0x4a, 0xd5, 0xd1, 0xc2, 0x86, + 0x3e, 0x63, 0x45, 0x3c, 0xad, 0x15, 0x7e, 0xb0, 0x29, 0x08, 0x34, 0xf1, 0xb8, 0xda, 0x88, 0x10, + 0x76, 0x8e, 0x69, 0x21, 0xb5, 0x32, 0x59, 0x13, 0x26, 0x02, 0x75, 0x5e, 0x71, 0xa0, 0xb9, 0xd5, + 0x9d, 0x12, 0x0a, 0x12, 0xde, 0x2b, 0x12, 0xb2, 0x77, 0xe4, 0x8f, 0xfc, 0xb6, 0xd2, 0xfa, 0xd0, + 0x90, 0xf6, 0xe1, 0xf9, 0x3b, 0x66, 0x80, 0x54, 0xdf, 0x8a, 0xed, 0x7a, 0xb8, 0x5b, 0x42, 0x25, + 0x17, 0x70, 0x27, 0xbd, 0x14, 0x4d, 0x48, 0xc1, 0xb4, 0xe8, 0x3d, 0x28, 0x28, 0xbf, 0x49, 0x10, + 0x18, 0x2f, 0x8e, 0xf4, 0xfd, 0x3a, 0x33, 0xda, 0x15, 0x75, 0xa4, 0x7a, 0x64, 0x4d, 0x52, 0x90, + 0x06, 0xcf, 0x19, 0x73, 0xc9, 0xe5, 0x65, 0x11, 0x55, 0x7f, 0x3b, 0x6b, 0x86, 0x9c, 0x2f, 0x7b, + 0x47, 0x63, 0xdc, 0x62, 0x3f, 0x5b, 0x2b, 0xb8, 0xae, 0x26, 0x8f, 0x65, 0xf4, 0xc6, 0x5f, 0xc1, + 0xc9, 0xd3, 0xa4, 0xe6, 0xdb, 0x52, 0xdb, 0x19, 0xf7, 0xe7, 0x97, 0x5b, 0xef, 0x16, 0x2c, 0x9e, + 0x51, 0xf1, 0xd3, 0x23, 0x00, 0x88, 0x53, 0xbe, 0xe5, 0x17, 0x8a, 0x36, 0x96, 0x42, 0x3b, 0x63, + 0xba, 0xf0, 0x2f, 0xa0, 0x55, 0xff, 0x42, 0x9e, 0x95, 0x89, 0x96, 0x3f, 0x60, 0x6b, 0xec, 0x9c, + 0x34, 0x59, 0x38, 0x3f, 0x6b, 0x08, 0xa0, 0xa0, 0xda, 0xbd, 0x0d, 0xf7, 0x6e, 0xd6, 0x1b, 0xfd, + 0x0b, 0xf9, 0x3f, 0x0e, 0x6b, 0xb3, 0x2b, 0x0f, 0x44, 0x83, 0xea, 0xc4, 0x49, 0x02, 0x2d, 0xff, + 0xbb, 0x5f, 0xfc, 0x59, 0x55, 0x03, 0x10, 0x57, 0x4e, 0x1d, 0xbf, 0xe3, 0xf0, 0x91, 0xa6, 0xaf, + 0x22, 0x5e, 0x75, 0x02, 0x54, 0xe5, 0x68, 0x5a, 0xb7, 0x69, 0x44, 0x5f, 0xe7, 0x1b, 0x80, 0x4b, + 0x70, 0xfe, 0x70, 0x0f, 0x2a, 0xbf, 0x0f, 0x6f, 0x1e, 0x6f, 0x4c, 0x9c, 0x03, 0x98, 0xe4, 0x0d, + 0x86, 0xbb, 0x2b, 0xc2, 0xf7, 0x35, 0x93, 0xcc, 0x5b, 0x49, 0xfe, 0xd1, 0x4e, 0x2e, 0x1a, 0xdc, + 0x89, 0x6e, 0xc5, 0xa0, 0x34, 0x98, 0xb9, 0x26, 0x8f, 0xdc, 0x61, 0x30, 0xbf, 0x06, 0x32, 0x8e, + 0xec, 0x75, 0x35, 0xe4, 0x59, 0x12, 0x2c, 0x03, 0x55, 0x5d, 0x0c, 0x62, 0x37, 0x4d, 0xa0, 0xbc, + 0x4a, 0x59, 0x0d, 0xa7, 0x69, 0x91, 0xb3, 0x63, 0x4c, 0x71, 0x4e, 0x6f, 0x71, 0xd0, 0x35, 0xda, + 0x1d, 0x87, 0x09, 0xcd, 0x0d, 0xe5, 0x58, 0x65, 0x2f, 0xf8, 0xdb, 0x08, 0xbb, 0xda, 0x3d, 0x9c, + 0x6e, 0x05, 0x6c, 0xe3, 0x05, 0x4f, 0xa3, 0xba, 0x7b, 0xe9, 0x6f, 0xbc, 0x0a, 0x69, 0xe6, 0x5c, + 0x23, 0x50, 0x1a, 0xd6, 0x78, 0xae, 0xe8, 0x26, 0xb3, 0x34, 0x5f, 0x86, 0x8d, 0x21, 0xe1, 0x0f, + 0x1c, 0xe2, 0xad, 0x4a, 0x5b, 0xcb, 0xc7, 0xc9, 0xe5, 0x39, 0xba, 0xac, 0x55, 0xe9, 0x7c, 0xfd, + 0xab, 0xc5, 0x89, 0x65, 0xaa, 0x13, 0x19, 0x33, 0x57, 0xc6, 0x35, 0x6b, 0x67, 0x2f, 0xed, 0x9d, + 0x69, 0x47, 0x14, 0xaa, 0xba, 0x36, 0xf7, 0x4b, 0x77, 0x23, 0x01, 0x8b, 0xda, 0x63, 0x16, 0x00, + 0x1a, 0x59, 0xb7, 0x89, 0x33, 0x56, 0x96, 0x9b, 0x53, 0x31, 0xaa, 0x09, 0xc1, 0x19, 0xa7, 0x4a, + 0x22, 0x1e, 0x4f, 0xaa, 0xfd, 0xc4, 0x55, 0x8c, 0x53, 0x53, 0x6a, 0x2f, 0x3c, 0x68, 0x46, 0xe7, + 0x1b, 0xea, 0xee, 0xcc, 0x91, 0x17, 0x46, 0x6a, 0xac, 0x5b, 0x9e, 0x36, 0x8e, 0x5f, 0x1c, 0x64, + 0x98, 0x4d, 0x3d, 0x47, 0xdb, 0x17, 0x04, 0x3b, 0x56, 0x53, 0x5b, 0xc4, 0x05, 0x81, 0x2c, 0x51, + 0x7f, 0x9b, 0xc7, 0xff, 0xee, 0x7f, 0x06, 0x76, 0xc0, 0xe9, 0xc6, 0x44, 0x84, 0x0d, 0x72, 0xd7, + 0x1d, 0xf0, 0x6f, 0x37, 0x55, 0x4f, 0x3c, 0x12, 0x98, 0x1f, 0xdb, 0x1e, 0x62, 0x9d, 0xcd, 0xd8, + 0x02, 0xf3, 0x27, 0x3a, 0x47, 0x8a, 0x9b, 0x03, 0x2f, 0x00, 0x18, 0xb2, 0x1b, 0x23, 0x4a, 0x4e, + 0x2b, 0x9a, 0x21, 0x1e, 0x62, 0xd0, 0xd4, 0x42, 0x7f, 0x6e, 0x5d, 0x06, 0x07, 0x30, 0xf1, 0xbf, + 0xb8, 0x50, 0xa4, 0x9f, 0x0c, 0x17, 0x51, 0x9c, 0x0a, 0x85, 0xb6, 0xc2, 0x58, 0xb8, 0x60, 0xd9, + 0x7f, 0x98, 0xd5, 0xfe, 0xc9, 0xf5, 0x13, 0x81, 0xbe, 0xad, 0x3b, 0xa5, 0x4d, 0x4e, 0x21, 0x7f, + 0xd8, 0x03, 0x45, 0xeb, 0x18, 0xdb, 0x27, 0xb4, 0x11, 0xe9, 0x25, 0x90, 0x3a, 0xc5, 0x04, 0x4b, + 0x35, 0xd0, 0x1f, 0x07, 0xbf, 0x2e, 0xdc, 0xd1, 0x87, 0x9a, 0xc5, 0x35, 0x1d, 0xdc, 0xcc, 0xcd, + 0xb3, 0x86, 0x98, 0xdc, 0xf3, 0xbf, 0x1f, 0x3e, 0xd9, 0x8b, 0xbf, 0x25, 0xed, 0x14, 0x05, 0x82, + 0xf5, 0xb8, 0xf8, 0xe6, 0xc1, 0xeb, 0x1a, 0x59, 0xc0, 0x54, 0x5f, 0x92, 0x8f, 0x6b, 0xb1, 0x8c, + 0x39, 0x9b, 0x81, 0x89, 0x4b, 0xe0, 0xcd, 0xcf, 0x1e, 0x06, 0x76, 0xda, 0xcf, 0x48, 0xb0, 0x8e, + 0x67, 0x2f, 0xdf, 0xd0, 0x1a, 0x39, 0xd8, 0x26, 0x40, 0xdf, 0xfc, 0xc0, 0xd0, 0x79, 0x7a, 0x34, + 0x7f, 0xec, 0xcb, 0x90, 0x8a, 0x4c, 0xa3, 0xcb, 0x69, 0x6e, 0xad, 0x1a, 0xaa, 0x4e, 0x65, 0x89, + 0x9a, 0x55, 0xf3, 0x18, 0xc2, 0x21, 0xad, 0xdc, 0x1f, 0xc8, 0xae, 0x4c, 0x21, 0xd6, 0x1f, 0x0e, + 0x88, 0xb9, 0x84, 0x83, 0x84, 0x71, 0x6e, 0x65, 0xf7, 0x44, 0xd9, 0xf0, 0x95, 0x90, 0x36, 0x37, + 0xb5, 0x12, 0x0f, 0xc1, 0xe6, 0xa8, 0xbc, 0xb0, 0xf2, 0x3d, 0x3b, 0x9d, 0x9d, 0xc8, 0x81, 0x80, + 0x6f, 0xdc, 0xaf, 0x75, 0x95, 0x27, 0x57, 0xe8, 0xe8, 0x13, 0x33, 0x15, 0x1a, 0x3c, 0x4f, 0x5e, + 0x7e, 0xd3, 0xe7, 0x81, 0x0e, 0x9b, 0x2b, 0x4b, 0x0e, 0x85, 0xfd, 0x0d, 0x56, 0x44, 0x78, 0x09, + 0xc9, 0xa6, 0xf5, 0xf7, 0x6c, 0xba, 0xf4, 0xea, 0x5f, 0x90, 0xcf, 0x41, 0x23, 0xaa, 0x47, 0x74, + 0xaf, 0xc3, 0x0d, 0x75, 0x96, 0xa0, 0x29, 0x7c, 0xdf, 0x01, 0x92, 0xb4, 0xde, 0xd9, 0x9a, 0x37, + 0xcd, 0x11, 0x12, 0x0c, 0x67, 0x77, 0xf6, 0x9e, 0x57, 0x53, 0x44, 0x64, 0x79, 0xe8, 0x15, 0x75, + 0x48, 0x80, 0xdb, 0x12, 0x0e, 0x8e, 0x60, 0x4d, 0x70, 0x2b, 0x30, 0x5f, 0x03, 0x70, 0xa1, 0x24, + 0x05, 0x01, 0xb9, 0xc6, 0xc1, 0xad, 0x04, 0x07, 0x51, 0xbd, 0x97, 0xa6, 0x89, 0xd3, 0xd0, 0xb4, + 0x4a, 0x19, 0x2d, 0x91, 0x3a, 0x92, 0x4b, 0xdc, 0xe5, 0x1f, 0x7f, 0xd9, 0x97, 0xa7, 0xd2, 0x5c, + 0x11, 0x9d, 0x9d, 0xa1, 0x44, 0x61, 0xde, 0x5c, 0xf3, 0xeb, 0x9c, 0xb1, 0xb4, 0xbd, 0x71, 0x3d, + 0x4b, 0x8a, 0x7e, 0x90, 0xf5, 0x15, 0x2b, 0xa3, 0x94, 0x77, 0x35, 0x4d, 0x1f, 0x0e, 0x93, 0xe2, + 0xe7, 0xcb, 0x2a, 0x9c, 0xbe, 0x57, 0xd4, 0x4a, 0x7c, 0x2a, 0xb8, 0xc6, 0x0c, 0x4c, 0x6e, 0x3c, + 0x3b, 0x8a, 0x9f, 0x27, 0xf1, 0x29, 0x61, 0x01, 0x52, 0x48, 0x5e, 0x0b, 0x62, 0xa3, 0x1a, 0xb6, + 0x40, 0xe6, 0xbe, 0x59, 0xf1, 0xa8, 0xa0, 0x02, 0x0d, 0x0f, 0xae, 0x2f, 0xda, 0xa2, 0x6a, 0x4f, + 0xf5, 0xcb, 0x63, 0x24, 0x88, 0xff, 0xb1, 0x02, 0x61, 0xc4, 0x2b, 0xde, 0x03, 0x0c, 0xdf, 0xe7, + 0x2c, 0xe8, 0x0b, 0xfd, 0x7a, 0x73, 0x4d, 0x85, 0xc4, 0xe1, 0x97, 0x23, 0x20, 0x1f, 0xd6, 0xc1, + 0xb0, 0x1c, 0xaa, 0xda, 0xc5, 0x15, 0x48, 0xb2, 0xd2, 0xf0, 0x9b, 0xee, 0x44, 0x7a, 0xe2, 0x32, + 0xe6, 0x51, 0x9a, 0x18, 0x9b, 0xd5, 0x8e, 0xdb, 0xa4, 0xe9, 0xfd, 0xb3, 0x03, 0xf3, 0x03, 0x90, + 0x42, 0x3c, 0x0f, 0xde, 0x77, 0x5f, 0xaa, 0xa0, 0xac, 0x4f, 0xc8, 0xdd, 0x51, 0xe8, 0xbe, 0xb1, + 0x5a, 0x99, 0x2f, 0x42, 0x2f, 0xa1, 0x4c, 0xcb, 0x64, 0xa8, 0xeb, 0x3a, 0x1b, 0xc1, 0x5e, 0x40, + 0xde, 0x47, 0x19, 0x7a, 0x59, 0x5f, 0xf3, 0xcb, 0x33, 0x3b, 0xbf, 0xd4, 0x0c, 0x86, 0x35, 0x7e, + 0x77, 0x45, 0x95, 0xab, 0x3f, 0x7d, 0xa5, 0x50, 0xc6, 0x20, 0x85, 0x6a, 0x41, 0x63, 0xa1, 0xbf, + 0x07, 0x32, 0x3d, 0xf2, 0x19, 0x07, 0x57, 0x19, 0x1d, 0xff, 0xe7, 0xf4, 0xce, 0x0a, 0x8d, 0xa9, + 0xae, 0x0a, 0xdc, 0xde, 0x99, 0xec, 0x8d, 0x00, 0x61, 0xda, 0xb3, 0x73, 0x77, 0xd8, 0xb9, 0x25, + 0x0f, 0x09, 0x19, 0xbc, 0x51, 0xf0, 0xdd, 0x80, 0xb1, 0xb5, 0x2e, 0x1e, 0x61, 0x15, 0xff, 0x3e, + 0x40, 0x52, 0x03, 0x4f, 0x58, 0x7a, 0x8f, 0x5a, 0x75, 0xc4, 0x2c, 0x02, 0x1f, 0x45, 0x5e, 0xd9, + 0xdd, 0xb9, 0xaf, 0x6a, 0x55, 0x7d, 0xf3, 0x00, 0x1a, 0x06, 0x5e, 0xb1, 0x82, 0xba, 0x2d, 0xcb, + 0x0b, 0xa8, 0xda, 0x4a, 0x99, 0x65, 0xdc, 0x56, 0xa0, 0x78, 0xdf, 0xe0, 0x7d, 0x54, 0x4e, 0xdd, + 0xc3, 0xe2, 0xe6, 0x3a, 0x04, 0x48, 0x1c, 0xe8, 0x7a, 0xc9, 0xd3, 0x95, 0x78, 0xb9, 0xec, 0x29, + 0xc6, 0x0d, 0xc3, 0xee, 0xf2, 0x0d, 0x7b, 0x3d, 0x52, 0xac, 0x20, 0x48, 0xb3, 0x6d, 0x58, 0xd2, + 0x1d, 0x36, 0xa2, 0x2f, 0x57, 0xa8, 0xfd, 0xc1, 0x65, 0xe4, 0x3b, 0xbd, 0xf9, 0x4f, 0xb0, 0xe3, + 0xf6, 0xca, 0x1c, 0x74, 0xad, 0x55, 0xd3, 0x22, 0x57, 0x39, 0x0f, 0x1f, 0xc5, 0x63, 0x6e, 0xdf, + 0x95, 0x12, 0x63, 0x97, 0x10, 0xb8, 0x6b, 0x48, 0xdb, 0x48, 0x3e, 0x57, 0xa1, 0x01, 0xdf, 0x2b, + 0x8f, 0xcd, 0x71, 0x60, 0x4f, 0xca, 0x02, 0xdc, 0xcb, 0xfa, 0xeb, 0xa7, 0x67, 0x75, 0x63, 0xc0, + 0xb2, 0x64, 0xcf, 0xb2, 0xeb, 0x02, 0x43, 0xf5, 0xef, 0x5b, 0x20, 0xc4, 0x34, 0xc7, 0x39, 0x38, + 0x9e, 0xe7, 0xf9, 0x56, 0x4e, 0x33, 0xb5, 0x05, 0x5d, 0x40, 0x8c, 0xa5, 0xa5, 0x16, 0x6d, 0x42, + 0x60, 0x46, 0xb6, 0xb4, 0xcf, 0xfa, 0x14, 0xfb, 0x3c, 0x1c, 0x01, 0x4a, 0x2d, 0x69, 0x58, 0xd2, + 0xa6, 0x07, 0xbc, 0x53, 0x31, 0xa9, 0x0e, 0x67, 0x8a, 0x33, 0x19, 0x5e, 0x7f, 0x93, 0xb6, 0xbe, + 0x62, 0xcc, 0x03, 0x7c, 0xc4, 0x36, 0xff, 0x7d, 0xfd, 0x8b, 0x33, 0x21, 0x5a, 0xb5, 0xbc, 0x28, + 0x8c, 0x88, 0xe8, 0xe0, 0xa6, 0x2a, 0x5d, 0x4c, 0xc5, 0x3d, 0xeb, 0xd5, 0x71, 0x73, 0x4d, 0x71, + 0x1f, 0x03, 0x23, 0x48, 0x88, 0x53, 0x4b, 0xf6, 0xce, 0x44, 0xf6, 0xe3, 0x3c, 0x6f, 0x61, 0x06, + 0x43, 0x75, 0x21, 0xcb, 0xae, 0x76, 0x69, 0xcc, 0x88, 0xd7, 0xbb, 0xe5, 0x92, 0x81, 0xce, 0x10, + 0x92, 0x96, 0x08, 0x00, 0x6e, 0xa6, 0x69, 0x9a, 0x95, 0xc7, 0xd6, 0xd7, 0x23, 0x9c, 0x30, 0xa6, + 0xdc, 0x6d, 0x3a, 0x5d, 0x37, 0x67, 0x66, 0xd9, 0xb1, 0xc1, 0x26, 0xf8, 0xdc, 0x12, 0x66, 0x94, + 0x32, 0xff, 0x3b, 0x61, 0xf1, 0xdf, 0xd7, 0xb9, 0x75, 0xf0, 0x09, 0x21, 0x18, 0x6c, 0xaf, 0xb2, + 0xd0, 0x31, 0x8b, 0xf2, 0x6b, 0x1d, 0xfb, 0x42, 0x78, 0xf9, 0xf0, 0xa8, 0xa1, 0x79, 0xed, 0x01, + 0x4f, 0x17, 0xec, 0x9b, 0x16, 0x5b, 0x0e, 0xbe, 0xdd, 0x7b, 0x09, 0x05, 0x3b, 0x89, 0x4f, 0xdf, + 0xc1, 0x8a, 0x00, 0x98, 0x5b, 0x86, 0xd6, 0x9e, 0xf2, 0xc5, 0x3f, 0x62, 0x83, 0x35, 0x45, 0xa3, + 0xab, 0x7a, 0xce, 0x54, 0x9b, 0x39, 0x06, 0xa5, 0xbf, 0x89, 0x41, 0x3f, 0x8d, 0xd7, 0x02, 0x7c, + 0x8b, 0x65, 0xd5, 0x2d, 0x52, 0x55, 0xa5, 0xb1, 0x45, 0x56, 0xe9, 0x6d, 0x2f, 0x65, 0x9f, 0x6d, + 0xb4, 0xba, 0xc0, 0xf9, 0x2f, 0xf1, 0xa4, 0xba, 0x7c, 0x02, 0x7f, 0x69, 0xd7, 0x25, 0x67, 0x75, + 0x67, 0x71, 0x5b, 0x64, 0xee, 0x69, 0x4c, 0x63, 0xbf, 0x81, 0x0b, 0xc3, 0x3f, 0xbf, 0xbb, 0x7f, + 0x19, 0x85, 0x17, 0x0e, 0x9c, 0x05, 0xd5, 0x2e, 0x3c, 0x4e, 0x27, 0x2e, 0x71, 0x2f, 0xce, 0x00, + 0x1a, 0x7f, 0x0e, 0x4c, 0xff, 0x88, 0x30, 0x20, 0x31, 0xbe, 0x2f, 0xfb, 0xac, 0xf4, 0x0c, 0x6d, + 0x22, 0x9a, 0x35, 0x1b, 0xf3, 0xe8, 0xa1, 0xe0, 0xcf, 0x02, 0xf9, 0xa3, 0xdf, 0x3a, 0x46, 0xf5, + 0x28, 0x07, 0xff, 0x7d, 0xba, 0x85, 0xa4, 0x3b, 0x63, 0x1c, 0xa1, 0x55, 0x52, 0x07, 0x05, 0x80, + 0x62, 0xab, 0x09, 0xd7, 0x8a, 0xbf, 0xd8, 0xf4, 0x29, 0x4b, 0x3b, 0x40, 0xdc, 0xd7, 0x60, 0xb4, + 0x30, 0xe7, 0xce, 0xd8, 0x24, 0xe6, 0x2f, 0x4f, 0x06, 0xdf, 0xb7, 0xa4, 0xd4, 0xcb, 0x51, 0xf1, + 0x4f, 0xcb, 0xb9, 0x68, 0xb9, 0xbc, 0x28, 0x83, 0xb1, 0x1c, 0x2d, 0x50, 0xc8, 0x24, 0x3c, 0x63, + 0x13, 0xc6, 0x74, 0xda, 0x5a, 0x75, 0xea, 0x6b, 0xcb, 0xf5, 0x9c, 0x56, 0x4d, 0x95, 0x21, 0x37, + 0x35, 0x83, 0x33, 0x2f, 0x5d, 0xd5, 0xb9, 0x7e, 0xe5, 0x0e, 0x71, 0xa3, 0x0a, 0x58, 0x73, 0x32, + 0x9c, 0x44, 0xe4, 0x74, 0x26, 0xb3, 0x37, 0x30, 0x87, 0x28, 0x98, 0x0c, 0xf8, 0x70, 0x68, 0x7a, + 0x7b, 0xe3, 0x75, 0x53, 0x7d, 0x6a, 0x4f, 0x55, 0x3d, 0xc2, 0xf3, 0xd7, 0x51, 0xb5, 0xd3, 0x5a, + 0x75, 0xd3, 0x75, 0xd7, 0x33, 0x3a, 0xcb, 0x0e, 0x8f, 0x97, 0xee, 0x56, 0xb7, 0x5d, 0x70, 0xc9, + 0xd8, 0xdb, 0x75, 0xf1, 0x66, 0xc2, 0x93, 0x87, 0x34, 0x3a, 0xc6, 0x37, 0x29, 0x35, 0x0c, 0x27, + 0x8d, 0x07, 0xb8, 0x61, 0xfb, 0x45, 0xd9, 0xb6, 0xc5, 0xa9, 0xaf, 0xc9, 0x60, 0x5b, 0x87, 0xef, + 0x5e, 0x1e, 0x78, 0xc8, 0x49, 0xce, 0x4b, 0xe2, 0xec, 0x1d, 0xb2, 0x5c, 0x88, 0x32, 0x73, 0x6b, + 0x4d, 0x14, 0x1d, 0xbe, 0xbf, 0xe2, 0x31, 0x86, 0xd5, 0xd7, 0x36, 0x19, 0xde, 0x5a, 0xa2, 0x56, + 0xdf, 0x37, 0x1c, 0xf8, 0xc6, 0x2c, 0xbd, 0xcc, 0x72, 0x01, 0x41, 0x94, 0x4c, 0xde, 0x95, 0xe5, + 0xc5, 0x82, 0xba, 0xe8, 0xa7, 0xc9, 0xa7, 0xc4, 0x2f, 0x9d, 0x78, 0xf4, 0x8a, 0x2b, 0x36, 0x9a, + 0xca, 0x4b, 0x51, 0xd3, 0xe7, 0x8b, 0x34, 0xb3, 0xfe, 0xe1, 0xbc, 0x30, 0x05, 0xab, 0xa1, 0xd5, + 0x16, 0x91, 0x86, 0x0b, 0xf3, 0x8f, 0x16, 0x40, 0xda, 0x10, 0x1d, 0x21, 0xc6, 0x2f, 0xb2, 0xc1, + 0xd0, 0xef, 0x9b, 0xc3, 0xfb, 0x66, 0xf5, 0xb7, 0x81, 0x6c, 0xc1, 0x20, 0x81, 0xa0, 0xde, 0xeb, + 0xd5, 0xc0, 0xaf, 0x25, 0x4b, 0xdc, 0xa7, 0xf9, 0x6f, 0x35, 0x58, 0xf9, 0x22, 0x3f, 0x9a, 0xf4, + 0xc5, 0xb4, 0xad, 0x9a, 0xcc, 0xe6, 0xe5, 0xad, 0x67, 0x13, 0x65, 0x86, 0xcd, 0xde, 0x8d, 0xe7, + 0x68, 0x6e, 0x23, 0xba, 0x2d, 0x71, 0xf2, 0x17, 0xc5, 0x4a, 0x0e, 0xf8, 0xd2, 0x76, 0x8c, 0x38, + 0x1b, 0x65, 0x5a, 0xf7, 0x76, 0xe9, 0xa1, 0x84, 0xea, 0x06, 0x6e, 0x8c, 0xa5, 0x79, 0x2e, 0x17, + 0x19, 0x67, 0xcb, 0x81, 0xec, 0x4d, 0xdd, 0xef, 0xfb, 0x2f, 0xdb, 0xda, 0xd3, 0x9f, 0xf4, 0xbf, + 0x1e, 0x55, 0xe6, 0x7d, 0x88, 0x4a, 0x20, 0xfc, 0xcd, 0x8e, 0xd3, 0x15, 0xa2, 0x0b, 0xe7, 0x10, + 0xba, 0x16, 0x29, 0x35, 0x85, 0x72, 0x5d, 0x9f, 0xd6, 0x18, 0xc3, 0xc1, 0x70, 0x95, 0x4e, 0x8e, + 0xc3, 0x1d, 0xef, 0x74, 0xa5, 0x52, 0x00, 0xe5, 0x00, 0xb5, 0xb1, 0x46, 0xc5, 0x69, 0x13, 0x33, + 0x74, 0xc9, 0xe6, 0x01, 0xb6, 0xf5, 0xb4, 0xc7, 0x89, 0x9b, 0xc8, 0x2e, 0xe7, 0xb4, 0xb7, 0x19, + 0xef, 0x50, 0xbf, 0xd3, 0xe8, 0x51, 0x0b, 0x0d, 0xf9, 0xf1, 0xa0, 0xad, 0xc4, 0x32, 0x6e, 0x22, + 0x38, 0x2d, 0x83, 0xdc, 0xc0, 0x24, 0xaf, 0xca, 0xef, 0xc7, 0x53, 0xc8, 0xab, 0xa2, 0x4b, 0x5e, + 0x8e, 0x96, 0x37, 0x95, 0x5f, 0x59, 0xdd, 0x9d, 0x91, 0x9d, 0xaf, 0xb0, 0x4f, 0x44, 0xf2, 0x98, + 0x04, 0x0f, 0xa3, 0x64, 0x34, 0xa3, 0x1f, 0x49, 0xc4, 0x3d, 0x2c, 0x22, 0xef, 0x2e, 0x5b, 0x1e, + 0xd4, 0x94, 0xe9, 0x33, 0x4f, 0x39, 0x22, 0x08, 0x39, 0xb2, 0xb1, 0x41, 0xdb, 0xef, 0x7c, 0x16, + 0x61, 0x13, 0x5c, 0x60, 0x8e, 0x67, 0xef, 0xf7, 0x17, 0x7f, 0x70, 0xbc, 0xaf, 0x6e, 0x3b, 0x73, + 0xd4, 0x3f, 0x0c, 0x2a, 0x29, 0x4c, 0x4c, 0xa8, 0x90, 0x02, 0x9d, 0xca, 0xfe, 0x7e, 0x89, 0x9a, + 0x1b, 0xb2, 0xf7, 0x6b, 0x4a, 0xf9, 0x3d, 0x16, 0x19, 0x96, 0xbc, 0xb0, 0x02, 0x4c, 0x1e, 0xeb, + 0x41, 0xc8, 0x9d, 0x75, 0xf2, 0x63, 0xac, 0x5a, 0xfb, 0x3f, 0x6c, 0xd5, 0x1e, 0xcd, 0x8f, 0x05, + 0x96, 0x0a, 0x0c, 0x99, 0xc9, 0xcc, 0x93, 0x72, 0x48, 0x67, 0xa9, 0xe0, 0x0b, 0x7c, 0x23, 0x43, + 0x51, 0x00, 0x4f, 0x86, 0xaa, 0xd5, 0xbe, 0x50, 0x8c, 0x65, 0x51, 0xf2, 0xd2, 0xbb, 0x6a, 0x59, + 0x9d, 0x9e, 0xa3, 0xa1, 0xd0, 0xae, 0x97, 0xe4, 0xbc, 0x2e, 0x47, 0x2e, 0x87, 0xe7, 0x38, 0x10, + 0x3c, 0x07, 0x7b, 0x9c, 0x8b, 0xe7, 0x8c, 0xf6, 0x1a, 0x88, 0xd2, 0x5d, 0xa5, 0xd9, 0xa8, 0x7b, + 0x80, 0xf0, 0x3c, 0xda, 0x39, 0x35, 0x93, 0x9d, 0x77, 0x07, 0xe3, 0xae, 0x88, 0x5c, 0xff, 0xd8, + 0xc3, 0x95, 0xf6, 0x00, 0xc6, 0x1b, 0x4d, 0xc3, 0xfa, 0xa3, 0xa5, 0x0d, 0x95, 0x3b, 0x2b, 0x3c, + 0x0b, 0x13, 0x65, 0x10, 0x44, 0x36, 0x41, 0x19, 0x0f, 0x49, 0xa4, 0xd2, 0xcd, 0x44, 0xb3, 0xd1, + 0x2e, 0x76, 0x1f, 0x3e, 0x58, 0x38, 0x03, 0xc6, 0x42, 0x0a, 0x4d, 0xc7, 0x23, 0xdf, 0xd1, 0xb0, + 0xc5, 0x3d, 0xdf, 0x61, 0x94, 0xbe, 0x97, 0x47, 0x18, 0x7f, 0xb9, 0x56, 0x2c, 0x83, 0x36, 0x03, + 0x0a, 0x76, 0x8f, 0x41, 0xd2, 0x68, 0xcc, 0xa2, 0xf7, 0x97, 0xf4, 0xa2, 0x34, 0x38, 0xcf, 0x01, + 0xee, 0x3e, 0x63, 0x12, 0x47, 0x83, 0x93, 0x77, 0xdf, 0xb8, 0x05, 0xa1, 0x84, 0xc1, 0x64, 0x77, + 0x53, 0xe3, 0x0f, 0x9c, 0xe0, 0xb2, 0x46, 0x27, 0xda, 0x9e, 0x5f, 0x96, 0x44, 0x9a, 0xb6, 0xe4, + 0x1b, 0xd0, 0xe6, 0x79, 0x49, 0xcc, 0x27, 0xb5, 0x5e, 0xf0, 0x27, 0xa5, 0x7f, 0x2d, 0xe0, 0xf7, + 0x39, 0x76, 0xfd, 0xf4, 0x09, 0x72, 0x53, 0xbe, 0xab, 0xbd, 0x6b, 0x71, 0x2b, 0x4c, 0x9f, 0x2c, + 0xb5, 0xd8, 0x47, 0x56, 0x62, 0xe3, 0x1c, 0x42, 0xe6, 0x17, 0xdb, 0x55, 0xab, 0x4b, 0x05, 0xa3, + 0x10, 0x41, 0xd8, 0x46, 0x62, 0x9f, 0x8a, 0x04, 0x66, 0x91, 0xa0, 0x1f, 0x0b, 0xf6, 0x2b, 0x6b, + 0xfb, 0xa9, 0xaf, 0xaa, 0xc9, 0x05, 0xb5, 0xf1, 0xfc, 0xef, 0xf9, 0x99, 0x33, 0x08, 0x81, 0x07, + 0x3a, 0x18, 0x72, 0xdd, 0xcb, 0xae, 0x66, 0xa6, 0xd8, 0xf3, 0x17, 0x88, 0x77, 0xbc, 0xe5, 0x20, + 0xfe, 0x81, 0xa5, 0xd0, 0x01, 0xcb, 0x74, 0x98, 0xcb, 0xe6, 0xa4, 0x4e, 0xab, 0x5f, 0xd9, 0x68, + 0x0c, 0xf7, 0xd6, 0x22, 0x79, 0xc0, 0x6c, 0x55, 0xd2, 0x53, 0x69, 0x57, 0xca, 0xd6, 0x10, 0xca, + 0x56, 0xe4, 0x28, 0xc1, 0x66, 0x04, 0x26, 0x4a, 0x0f, 0x64, 0x04, 0x2e, 0x90, 0xb5, 0x44, 0x7a, + 0xd5, 0x18, 0x84, 0x45, 0x2c, 0x93, 0xaf, 0x1b, 0x6b, 0x3e, 0x86, 0x71, 0xbb, 0xdd, 0x7c, 0x9d, + 0xd3, 0x40, 0x51, 0x99, 0x97, 0xa9, 0x9a, 0x67, 0xf5, 0x5d, 0x1b, 0x17, 0x2c, 0xa1, 0xc3, 0x38, + 0xde, 0x31, 0xc6, 0x78, 0x3e, 0x3a, 0xc4, 0x86, 0xe7, 0xd2, 0x24, 0xa7, 0x1e, 0x9d, 0x31, 0x93, + 0x5d, 0x7f, 0x23, 0x17, 0x8e, 0x68, 0xdb, 0xac, 0xb3, 0xa1, 0xda, 0x6b, 0x57, 0x03, 0x09, 0x9d, + 0x49, 0xc8, 0x49, 0x41, 0xf0, 0x99, 0xe5, 0x07, 0x6f, 0x0e, 0xb8, 0x3d, 0x9a, 0xcc, 0x6c, 0xa8, + 0x51, 0x6c, 0xfa, 0xaf, 0xd6, 0x4d, 0x9f, 0xbf, 0x29, 0x2b, 0x89, 0xf7, 0x4f, 0xd8, 0xf1, 0xbd, + 0xe0, 0x2a, 0x61, 0x2b, 0x04, 0xc4, 0x3c, 0x86, 0x74, 0xd6, 0x9c, 0x1c, 0x96, 0xe8, 0x81, 0x18, + 0x27, 0x3e, 0x1a, 0xc7, 0x04, 0x00, 0xa1, 0x3c, 0x1b, 0x56, 0x4b, 0x7e, 0x0d, 0x97, 0x6e, 0xc0, + 0xd9, 0x1e, 0xa4, 0x57, 0x98, 0x4a, 0xd2, 0xb4, 0x07, 0x46, 0x5f, 0x6f, 0x93, 0x1f, 0xa7, 0x37, + 0xc0, 0xa3, 0xe6, 0x98, 0x6c, 0x91, 0x71, 0x33, 0x2a, 0x32, 0x8e, 0xf9, 0x67, 0xd7, 0xa9, 0x9e, + 0x30, 0x95, 0xcc, 0xae, 0x33, 0x6d, 0x94, 0x82, 0x45, 0x78, 0x8d, 0xa3, 0x7b, 0x8e, 0x4e, 0x35, + 0x92, 0xe0, 0x1f, 0x9f, 0xd2, 0x78, 0x90, 0xac, 0xd6, 0xcf, 0x32, 0xe3, 0x54, 0xe2, 0xae, 0x44, + 0xce, 0x07, 0x36, 0x73, 0x1d, 0x8a, 0x54, 0xd1, 0x6e, 0xe1, 0x90, 0x8f, 0x6e, 0xfb, 0x60, 0xfa, + 0x76, 0x60, 0x48, 0xd3, 0xdb, 0x22, 0x24, 0x52, 0x9f, 0x35, 0x79, 0xe7, 0x07, 0xa2, 0x44, 0x39, + 0x6b, 0x13, 0x77, 0xd8, 0x52, 0xa9, 0x84, 0x39, 0x30, 0x93, 0xcf, 0x5f, 0x10, 0x8a, 0x15, 0x68, + 0x9c, 0xd8, 0x2a, 0xef, 0x57, 0x32, 0x23, 0xd8, 0xea, 0x1a, 0x68, 0x35, 0x21, 0x9c, 0x18, 0x11, + 0x7b, 0xcf, 0xd5, 0x21, 0x63, 0xb8, 0xb3, 0xe9, 0x0d, 0xb2, 0x1b, 0xf9, 0xf8, 0xf2, 0xd6, 0x29, + 0xa9, 0x5f, 0x72, 0x33, 0xa3, 0x50, 0xf8, 0xd7, 0xc4, 0x41, 0xe1, 0x12, 0x5d, 0xc6, 0x92, 0x5b, + 0x63, 0xb0, 0xb6, 0xb4, 0xf7, 0x06, 0x16, 0x06, 0x7d, 0x5e, 0x98, 0x6c, 0x34, 0x56, 0x6f, 0xee, + 0xd5, 0x20, 0x68, 0xb9, 0x64, 0xf8, 0x00, 0xbf, 0x3e, 0x49, 0x8f, 0xb5, 0x04, 0x40, 0x01, 0x5e, + 0xaa, 0x7c, 0xe0, 0xc1, 0x78, 0x32, 0x8c, 0x20, 0x05, 0x24, 0xc5, 0x03, 0xf5, 0x7e, 0x63, 0xc2, + 0x1d, 0x83, 0x91, 0xc1, 0x1b, 0xc7, 0x72, 0x6b, 0x1e, 0x56, 0x0d, 0x1a, 0x95, 0x7e, 0x7b, 0xb3, + 0x52, 0x4a, 0x66, 0xd2, 0x32, 0x17, 0x06, 0xa3, 0xf6, 0xc7, 0xb1, 0xc3, 0x63, 0xb4, 0xc1, 0xd7, + 0xea, 0x0f, 0xb4, 0xaa, 0x38, 0x82, 0x1c, 0xb1, 0xc5, 0xc2, 0xa1, 0x85, 0x95, 0x15, 0x21, 0x3b, + 0x05, 0xbc, 0x88, 0x62, 0x09, 0x88, 0xec, 0x1f, 0x15, 0xe3, 0xe6, 0xba, 0x4a, 0xc5, 0x5e, 0x44, + 0x77, 0xb9, 0xfd, 0xb6, 0x01, 0xf1, 0x73, 0x70, 0x1f, 0x42, 0x4a, 0x7f, 0x1d, 0xa6, 0xa8, 0x95, + 0x99, 0xa6, 0xaa, 0x95, 0x07, 0x2e, 0x57, 0x04, 0x2f, 0xf4, 0x4e, 0xf4, 0x5e, 0x6b, 0x5f, 0x25, + 0x9f, 0xff, 0x54, 0x60, 0xa0, 0xfc, 0xc4, 0x5c, 0x56, 0x7e, 0x67, 0xe2, 0xfe, 0xef, 0x79, 0xdb, + 0xd1, 0x8c, 0x0a, 0x4b, 0x44, 0xdb, 0x57, 0x37, 0x4a, 0x17, 0x32, 0x23, 0xa6, 0x9c, 0xf3, 0x22, + 0x9f, 0xbe, 0xec, 0x6a, 0xc5, 0x3b, 0x18, 0xe0, 0x0e, 0xad, 0x82, 0xcc, 0x29, 0xdf, 0x70, 0x61, + 0x6c, 0x64, 0xd5, 0x84, 0x3d, 0x4d, 0xa7, 0x03, 0xc8, 0xac, 0x9d, 0xbf, 0x8a, 0x30, 0x71, 0x5e, + 0xc5, 0x86, 0xe2, 0xd4, 0xaf, 0x06, 0x9c, 0x80, 0x04, 0x64, 0xa4, 0x21, 0x51, 0xac, 0x5d, 0x0a, + 0x88, 0x19, 0x0f, 0x41, 0x69, 0x3d, 0xcd, 0xbd, 0x40, 0x8d, 0x4a, 0xdd, 0xa8, 0xd8, 0xfe, 0xfa, + 0xf4, 0xc1, 0xb6, 0xb6, 0x8b, 0x1e, 0x3c, 0xca, 0x6d, 0xf1, 0x06, 0xc7, 0xaa, 0xb3, 0xeb, 0x69, + 0x1f, 0x01, 0xd3, 0x5b, 0x03, 0xe8, 0x97, 0xba, 0xf3, 0x97, 0x3e, 0x37, 0x17, 0xca, 0xc7, 0x75, + 0x24, 0xce, 0x87, 0x55, 0x6b, 0x25, 0x82, 0x0c, 0xf3, 0xe5, 0x6c, 0xc4, 0xf2, 0xbd, 0x83, 0xba, + 0x99, 0x2b, 0xd6, 0x8c, 0x42, 0xbf, 0xcf, 0xa1, 0x4e, 0x73, 0x9b, 0x8f, 0xd6, 0x88, 0xc5, 0xf0, + 0xef, 0xbd, 0xc9, 0x00, 0xa1, 0xae, 0x3a, 0x3b, 0xe6, 0x72, 0x49, 0xa1, 0xc8, 0x69, 0x88, 0xcf, + 0xd5, 0x31, 0x77, 0x30, 0xc5, 0x36, 0xba, 0x55, 0xbd, 0xe0, 0x5d, 0x93, 0x83, 0xfc, 0xed, 0xcd, + 0x7b, 0xdd, 0xe2, 0x71, 0xeb, 0xfd, 0x19, 0xbe, 0x61, 0x74, 0xb8, 0xe6, 0x49, 0x06, 0x51, 0x80, + 0x1a, 0x78, 0x03, 0xa9, 0x89, 0x5e, 0x21, 0xe3, 0xb3, 0xab, 0x89, 0xa5, 0xe0, 0x41, 0x46, 0xee, + 0x30, 0x8a, 0x11, 0xec, 0x4a, 0x3c, 0x3f, 0xde, 0xa0, 0x8f, 0x98, 0xfe, 0x9c, 0x0a, 0xcf, 0x34, + 0x51, 0xd2, 0x0b, 0x90, 0x13, 0x6f, 0xcb, 0x0f, 0xb5, 0xe5, 0x51, 0x63, 0x78, 0x0c, 0x9a, 0x0a, + 0x5f, 0x4b, 0xdf, 0x77, 0xb0, 0xf6, 0x47, 0x8d, 0xba, 0xe5, 0x54, 0x52, 0x70, 0x9b, 0xc0, 0x8d, + 0xee, 0xe9, 0xb5, 0x91, 0xc0, 0x18, 0x9f, 0x1f, 0x11, 0xc1, 0x91, 0x0b, 0x44, 0x9f, 0xfd, 0xe6, + 0x99, 0x34, 0x41, 0x61, 0x47, 0xde, 0x09, 0x32, 0x8d, 0x32, 0x05, 0xfb, 0xf0, 0x6c, 0xdd, 0x0b, + 0x91, 0x33, 0x1d, 0x87, 0xe9, 0xe4, 0xb5, 0x8e, 0x44, 0x04, 0xc8, 0xd7, 0x80, 0x24, 0x67, 0x5a, + 0xca, 0xe1, 0xd6, 0xab, 0x39, 0x4a, 0x3c, 0xbc, 0x3e, 0x84, 0x11, 0xe4, 0xf2, 0x29, 0x1c, 0x9c, + 0xb2, 0xcb, 0x4c, 0x85, 0x3c, 0xa7, 0x09, 0xb8, 0x35, 0xd6, 0x6e, 0x79, 0x25, 0x2e, 0x4d, 0x7a, + 0xc9, 0x16, 0x34, 0xee, 0xec, 0x04, 0xae, 0x37, 0x09, 0xb9, 0x8a, 0x5f, 0xd0, 0x75, 0x1d, 0xad, + 0xd2, 0x49, 0xdd, 0x60, 0x64, 0x28, 0xf0, 0xe4, 0xcc, 0x15, 0x41, 0xcf, 0x32, 0x1b, 0xa6, 0xed, + 0x00, 0x1a, 0x1c, 0xb3, 0xce, 0xb5, 0x1d, 0xd1, 0xf3, 0x6f, 0xd1, 0x1e, 0x67, 0x46, 0xfc, 0x6b, + 0x3f, 0x27, 0xa6, 0xd8, 0x50, 0x7b, 0x78, 0x2a, 0x2f, 0xb7, 0x56, 0xb0, 0x16, 0x9a, 0x13, 0x03, + 0xa8, 0x01, 0xdd, 0xba, 0xe5, 0x5e, 0x0e, 0x99, 0x4c, 0x95, 0x98, 0xb5, 0x79, 0xc5, 0xdb, 0xcb, + 0x92, 0x41, 0x75, 0x6f, 0x4f, 0x11, 0x1f, 0x2d, 0x02, 0xd9, 0xe7, 0xe4, 0x84, 0x80, 0xc7, 0x66, + 0xe9, 0x91, 0xea, 0x25, 0xac, 0xec, 0xe7, 0x76, 0x43, 0x34, 0x13, 0xd7, 0x04, 0xee, 0xb1, 0xcd, + 0x75, 0xdf, 0x40, 0x2f, 0xb8, 0xeb, 0x6f, 0xba, 0xee, 0xd1, 0x25, 0x9c, 0xa3, 0xe6, 0x9b, 0x76, + 0x41, 0x6e, 0x64, 0x89, 0x6f, 0x43, 0xc6, 0x42, 0x19, 0x5d, 0x72, 0x7a, 0x5b, 0xbe, 0x98, 0x62, + 0xb6, 0x0c, 0xd5, 0xb2, 0x72, 0xea, 0x7a, 0x70, 0x14, 0x48, 0x31, 0xd2, 0xda, 0x23, 0x05, 0x3a, + 0x0f, 0x49, 0x1c, 0xf8, 0xd5, 0x2f, 0x8a, 0xa3, 0x90, 0x7b, 0xf2, 0xa7, 0x08, 0x0e, 0x29, 0xd2, + 0xab, 0x5d, 0x50, 0x5e, 0x7e, 0xfb, 0x55, 0x48, 0xce, 0x83, 0x5e, 0xc5, 0x62, 0xd8, 0x18, 0xf0, + 0x58, 0xf1, 0x0f, 0x95, 0xb3, 0xc6, 0x7b, 0xc6, 0x47, 0x41, 0x0c, 0xb5, 0x9e, 0xb2, 0x6e, 0x40, + 0xec, 0xb3, 0x79, 0xe3, 0xbf, 0x1f, 0x92, 0x33, 0x6e, 0xf9, 0x29, 0xd0, 0x35, 0xb6, 0x7e, 0x2a, + 0x1a, 0xea, 0xb9, 0x37, 0xae, 0x11, 0xa6, 0x1b, 0x6c, 0xe7, 0xa3, 0x76, 0xb2, 0x54, 0xce, 0x3c, + 0xaf, 0xb5, 0xfd, 0xc1, 0x0a, 0xf6, 0xba, 0x8b, 0xad, 0xd7, 0x0f, 0x3a, 0xa6, 0x24, 0x54, 0x63, + 0x9a, 0x71, 0x41, 0x2d, 0x6a, 0x57, 0xa2, 0xd6, 0x08, 0xf0, 0x68, 0xb7, 0xa0, 0xe1, 0xb4, 0x4c, + 0x48, 0x4b, 0x91, 0xc7, 0x9c, 0x7e, 0xf6, 0xb7, 0xa6, 0xfb, 0xb9, 0x63, 0x9d, 0xd6, 0xfa, 0x85, + 0x28, 0xc2, 0xb4, 0xf6, 0xf8, 0xad, 0x32, 0x24, 0x7f, 0xb2, 0xed, 0x42, 0x70, 0x16, 0x50, 0xd7, + 0x79, 0x1b, 0x5f, 0x2c, 0x56, 0x4e, 0x33, 0xf2, 0x2b, 0x33, 0x5b, 0xef, 0x18, 0xff, 0xb2, 0x9c, + 0x64, 0x5e, 0x21, 0x0b, 0x4d, 0xb8, 0xac, 0xf1, 0x9d, 0x2d, 0xda, 0x03, 0xae, 0xec, 0xfc, 0x99, + 0xb1, 0x7e, 0x48, 0x9c, 0xb3, 0xd4, 0xe1, 0x8d, 0x18, 0x4f, 0xe1, 0x1c, 0xe5, 0x50, 0x2c, 0x41, + 0x4e, 0x51, 0x6d, 0x51, 0x79, 0x53, 0xd4, 0xf1, 0xfb, 0x8c, 0x5e, 0x22, 0xe1, 0xcb, 0x64, 0x0a, + 0x4f, 0x13, 0xd7, 0xb1, 0xb6, 0xaf, 0x28, 0x7e, 0x1b, 0xc0, 0xf6, 0xa3, 0xce, 0xcb, 0x56, 0x6d, + 0xa4, 0xe3, 0x69, 0x3a, 0xbe, 0x1e, 0x17, 0xbf, 0x26, 0x6f, 0xe7, 0x16, 0xdd, 0xe4, 0x9d, 0x5d, + 0x3f, 0x7c, 0x1b, 0x16, 0x65, 0x19, 0xd9, 0xc4, 0xd7, 0x40, 0xd4, 0xec, 0x71, 0xe3, 0x29, 0x05, + 0xec, 0x81, 0x66, 0x80, 0xb4, 0xe1, 0x96, 0x84, 0x62, 0xdd, 0x9b, 0x3a, 0xde, 0x37, 0x55, 0xa2, + 0x03, 0x1c, 0x11, 0x07, 0x2f, 0x71, 0x86, 0x3e, 0x63, 0x07, 0xc1, 0xdb, 0xe2, 0x59, 0x6a, 0x11, + 0x00, 0x75, 0xa8, 0x66, 0x52, 0x74, 0x93, 0xe4, 0xd3, 0x13, 0x84, 0xdc, 0x1d, 0x2d, 0x79, 0xd1, + 0x7d, 0xb1, 0xef, 0xf5, 0x32, 0x00, 0x6c, 0x0b, 0xf4, 0x38, 0x42, 0xaf, 0x90, 0x02, 0xab, 0xf5, + 0x19, 0xb2, 0x13, 0x4e, 0xd2, 0x6a, 0x7f, 0xfd, 0x91, 0xf5, 0x9c, 0x22, 0xdf, 0x4a, 0x78, 0xe8, + 0x4a, 0xd1, 0x64, 0x2d, 0x7b, 0xa5, 0x70, 0x55, 0xca, 0xc0, 0xdd, 0x7e, 0xf0, 0x5e, 0x87, 0xf6, + 0xb1, 0xad, 0x68, 0xdc, 0x05, 0x82, 0x3b, 0x49, 0xd9, 0x7a, 0x7b, 0x0e, 0xdb, 0x3d, 0xb9, 0x35, + 0xce, 0x47, 0x46, 0xe0, 0x41, 0xf6, 0x73, 0xc6, 0x6a, 0x4a, 0xbf, 0xe3, 0xe3, 0x67, 0xb7, 0x4e, + 0x15, 0xf0, 0xa2, 0xf0, 0x0f, 0xeb, 0x2a, 0xed, 0x28, 0xe4, 0xcb, 0xb6, 0xf1, 0x88, 0x3d, 0xe9, + 0xba, 0x70, 0x33, 0x49, 0xeb, 0x21, 0xa2, 0x98, 0x17, 0xf5, 0x4a, 0x1b, 0xfd, 0xc8, 0x2e, 0x61, + 0xeb, 0xcc, 0xf0, 0xee, 0xf8, 0xf8, 0x6b, 0xe5, 0xff, 0xb3, 0xd2, 0x24, 0x43, 0x36, 0xb6, 0xdd, + 0x4b, 0x4d, 0x0c, 0x59, 0x11, 0x3e, 0x1a, 0x86, 0x62, 0xaf, 0x07, 0x33, 0x18, 0x41, 0x69, 0x11, + 0x57, 0x23, 0x1e, 0x85, 0xe6, 0x73, 0x2e, 0xe7, 0xd8, 0x61, 0x93, 0xf6, 0xd5, 0x0c, 0x97, 0x06, + 0x3c, 0x73, 0xfd, 0x80, 0xc3, 0xa8, 0xed, 0x7e, 0xe5, 0x8e, 0xa1, 0x0e, 0x71, 0xf9, 0x87, 0xb3, + 0xe2, 0xa1, 0x46, 0x55, 0xa7, 0x0a, 0xfe, 0xb5, 0xcb, 0x6e, 0xb1, 0x7a, 0xa5, 0x89, 0x57, 0x63, + 0xc2, 0x9e, 0x5d, 0xf9, 0x93, 0xd2, 0x80, 0xb8, 0x92, 0xdf, 0xe8, 0x4b, 0x5e, 0x0e, 0x2c, 0xff, + 0x0c, 0x46, 0x62, 0xd4, 0x7a, 0xd8, 0x25, 0xde, 0xb2, 0x37, 0x60, 0x96, 0x5c, 0x8c, 0xe5, 0x0a, + 0xc8, 0x29, 0x7f, 0x3c, 0xa0, 0x19, 0xfe, 0xf7, 0x11, 0xce, 0x45, 0xbb, 0xff, 0xf6, 0xfc, 0x5e, + 0x7b, 0x23, 0x7c, 0x96, 0x73, 0xce, 0x34, 0x9a, 0x24, 0x42, 0x16, 0xdc, 0x14, 0xd8, 0x63, 0xd9, + 0x83, 0x23, 0xd1, 0x4b, 0xf1, 0x28, 0xac, 0xcb, 0xfc, 0x62, 0x79, 0xdc, 0x46, 0x10, 0xec, 0x30, + 0x29, 0x08, 0xec, 0x40, 0x8d, 0x34, 0x4a, 0x5b, 0x78, 0x15, 0xa3, 0x35, 0x16, 0x5b, 0x14, 0x92, + 0x7d, 0x7d, 0xfe, 0xb0, 0xa9, 0x7b, 0x19, 0xa6, 0xec, 0xb6, 0x57, 0x66, 0xa2, 0x9a, 0xb8, 0xa1, + 0x38, 0x05, 0x85, 0x15, 0xa7, 0xc2, 0x9d, 0x68, 0x43, 0xde, 0xbc, 0x84, 0x2e, 0x39, 0x00, 0xb7, + 0xa9, 0xa6, 0x9d, 0x98, 0x2b, 0x20, 0x84, 0x7a, 0x2c, 0xf8, 0xe4, 0x5e, 0xbb, 0x29, 0x34, 0x72, + 0xf6, 0xd8, 0x2b, 0x56, 0x9d, 0x1d, 0xc7, 0x55, 0x0b, 0x6b, 0x92, 0x2b, 0x10, 0x32, 0xfe, 0x3d, + 0x50, 0x57, 0x2a, 0xa3, 0xdf, 0x50, 0x0e, 0x12, 0xbf, 0xfd, 0x4f, 0x2d, 0xac, 0xa2, 0x7f, 0xa5, + 0x5e, 0x08, 0x3a, 0xbf, 0x72, 0xec, 0x98, 0xeb, 0x8b, 0x0a, 0xbb, 0xd0, 0x80, 0xd5, 0x96, 0x03, + 0xcd, 0xd1, 0xc3, 0x63, 0x2d, 0xe4, 0x80, 0xe6, 0x29, 0x2d, 0x3b, 0xb7, 0x1f, 0xd7, 0x7d, 0xe4, + 0xa6, 0xb7, 0x04, 0x4a, 0xed, 0x4b, 0x0f, 0x90, 0xeb, 0x7c, 0x84, 0x42, 0x6c, 0x59, 0xe0, 0xcd, + 0x93, 0x07, 0x24, 0xfc, 0x4a, 0xfa, 0x1c, 0x30, 0x44, 0x2c, 0x58, 0xc1, 0x8d, 0x29, 0xaf, 0xdf, + 0x94, 0x06, 0xd4, 0xba, 0x87, 0x74, 0xf7, 0x5d, 0xe5, 0x2f, 0x4c, 0x8f, 0x5d, 0xbe, 0xc2, 0x27, + 0x52, 0xcb, 0xaf, 0x3d, 0xbe, 0xff, 0xd0, 0x41, 0xf2, 0x4d, 0xf8, 0x01, 0x46, 0x92, 0x2c, 0xd7, + 0x9d, 0x33, 0x0f, 0x28, 0x8e, 0x5d, 0xef, 0xe3, 0xbc, 0xf3, 0x46, 0x1c, 0x00, 0x67, 0xfb, 0x77, + 0xf7, 0x28, 0xf2, 0x0b, 0x2a, 0x47, 0x60, 0x59, 0xa2, 0x91, 0x83, 0x86, 0xcf, 0x46, 0x6c, 0xab, + 0xe4, 0x36, 0x03, 0x54, 0x9d, 0xf0, 0x80, 0x6e, 0x78, 0xe2, 0x20, 0xda, 0x3e, 0xee, 0x23, 0xcc, + 0x8c, 0xa9, 0x7b, 0x41, 0x1f, 0x59, 0x85, 0x87, 0x9c, 0x8e, 0x29, 0xb2, 0x58, 0x10, 0xbb, 0x78, + 0x05, 0x3a, 0xb5, 0x46, 0x21, 0xa0, 0x97, 0x96, 0xed, 0x0f, 0x37, 0x7b, 0x11, 0xb7, 0x92, 0x0c, + 0xd6, 0x56, 0x76, 0x36, 0xe6, 0x0f, 0xf6, 0xb3, 0x56, 0x2c, 0x05, 0x74, 0x6f, 0x8f, 0xe1, 0x0e, + 0x1a, 0x2f, 0xdf, 0x60, 0xba, 0x69, 0x3f, 0x82, 0xb3, 0x98, 0xf8, 0x32, 0x6c, 0x02, 0x5e, 0xbc, + 0x1e, 0x53, 0x57, 0xf2, 0x49, 0xd7, 0xab, 0x12, 0x1c, 0x98, 0x50, 0xb6, 0x59, 0x83, 0x9c, 0xa5, + 0x86, 0x34, 0xae, 0x98, 0xbb, 0xef, 0x9c, 0x9f, 0x69, 0x8d, 0xc3, 0x3e, 0x73, 0x67, 0xd4, 0xa0, + 0x2b, 0xd8, 0xa3, 0xd0, 0xc7, 0x8d, 0xdd, 0x62, 0xc7, 0xb3, 0x86, 0x6c, 0xe9, 0x37, 0xb8, 0xac, + 0xd7, 0xf4, 0xcf, 0xa1, 0x15, 0x52, 0x60, 0x59, 0x03, 0x31, 0xf3, 0x5f, 0x3d, 0x80, 0x46, 0x99, + 0x37, 0x5b, 0x6d, 0x73, 0x4d, 0xfd, 0xc7, 0x33, 0xd5, 0xe5, 0x03, 0x76, 0xd4, 0xb3, 0x70, 0xb0, + 0xd8, 0x55, 0xc1, 0x78, 0xcf, 0xbe, 0xcc, 0x30, 0x8f, 0x60, 0xd9, 0x9f, 0x67, 0x56, 0x45, 0x0e, + 0xae, 0x7f, 0xf9, 0xce, 0xff, 0x67, 0xe7, 0x3b, 0x79, 0x35, 0xd7, 0x05, 0x3d, 0x31, 0x68, 0x9b, + 0x14, 0x3c, 0x4f, 0x24, 0xee, 0x41, 0x38, 0xb0, 0xfe, 0xff, 0xe5, 0x3b, 0x3b, 0xf0, 0x4a, 0x8c, + 0x2e, 0x34, 0x67, 0x90, 0x5e, 0x20, 0x2c, 0x1c, 0x95, 0x5e, 0xe2, 0x0a, 0x06, 0x2d, 0x31, 0x0b, + 0x92, 0xfd, 0x50, 0x2e, 0xe6, 0x3a, 0xe8, 0x49, 0xe3, 0xc6, 0x00, 0x83, 0xf8, 0x45, 0x48, 0xd0, + 0x5c, 0x2e, 0x3c, 0x87, 0xaf, 0xb1, 0xb8, 0x73, 0xf7, 0xb2, 0x03, 0x60, 0x00, 0x1f, 0xcf, 0x33, + 0x39, 0xb4, 0x8d, 0x50, 0x1f, 0x45, 0x4a, 0x7f, 0x8c, 0xea, 0x69, 0x75, 0x95, 0x7b, 0x4f, 0x4a, + 0xbe, 0x64, 0x72, 0xaa, 0x88, 0xd4, 0x63, 0xec, 0x00, 0x05, 0xc2, 0x9f, 0xe9, 0x69, 0xce, 0x64, + 0x22, 0x1c, 0x24, 0x5d, 0x52, 0x7f, 0xd4, 0x4f, 0x2a, 0x7d, 0x6d, 0xf5, 0x53, 0xea, 0xc1, 0x4d, + 0x6a, 0x0e, 0xde, 0x14, 0x7a, 0xad, 0x28, 0xcd, 0x0c, 0x0a, 0xc1, 0xe4, 0x05, 0x9e, 0x0f, 0x76, + 0x23, 0xb2, 0xd6, 0x3c, 0xad, 0x7d, 0xad, 0x9e, 0x9a, 0x7d, 0xb1, 0xea, 0x2d, 0x4b, 0x2d, 0x2f, + 0x68, 0x42, 0x81, 0x3d, 0x16, 0x02, 0xac, 0xd2, 0x5d, 0x09, 0xba, 0x03, 0xef, 0x20, 0x59, 0x18, + 0xe0, 0x71, 0xab, 0x6a, 0x8c, 0x05, 0xcc, 0x1d, 0x72, 0x11, 0x4b, 0xd5, 0xba, 0xde, 0xe0, 0xcb, + 0x09, 0x2f, 0x06, 0xc2, 0x24, 0xe6, 0x4c, 0x80, 0xe2, 0x03, 0xf9, 0x40, 0xbd, 0x42, 0xb5, 0x74, + 0x91, 0x3c, 0xaf, 0x3d, 0x9d, 0x5a, 0xb5, 0x5c, 0x1e, 0x78, 0x9c, 0x37, 0x12, 0x86, 0x1e, 0x18, + 0xe8, 0x24, 0xdb, 0xdb, 0xb1, 0x20, 0xb2, 0x14, 0x74, 0x17, 0xa8, 0x08, 0xd9, 0x56, 0x6b, 0x44, + 0x6f, 0xaa, 0x4a, 0x2d, 0xf0, 0x33, 0xf0, 0x0d, 0xae, 0x54, 0x58, 0xb8, 0x14, 0xa1, 0x24, 0x4a, + 0xa1, 0x8d, 0x03, 0xef, 0xb1, 0x32, 0x9f, 0xf9, 0x8f, 0xf1, 0x1e, 0x61, 0x0b, 0x50, 0x19, 0x05, + 0x21, 0x85, 0xcd, 0x47, 0x38, 0x5d, 0xe5, 0x03, 0x92, 0xf5, 0xe8, 0x3d, 0x5f, 0x46, 0x3a, 0xb4, + 0xf6, 0x61, 0xdf, 0xb5, 0x9d, 0xa2, 0xc2, 0x6b, 0x7e, 0xec, 0x49, 0x23, 0x28, 0x68, 0x1e, 0xac, + 0x32, 0x15, 0x18, 0xc1, 0x0c, 0x8c, 0xed, 0x90, 0x4c, 0xa6, 0xaf, 0x3a, 0x94, 0x29, 0x14, 0x3b, + 0x0e, 0xb9, 0x4e, 0xd0, 0xd0, 0xa2, 0xc6, 0x5b, 0x8e, 0x86, 0x99, 0xa1, 0xeb, 0xe0, 0x39, 0x21, + 0xd4, 0xd6, 0x9d, 0xb3, 0x29, 0x1e, 0x36, 0x6e, 0x2c, 0x4c, 0x93, 0x51, 0xae, 0x33, 0xa9, 0x35, + 0xf8, 0xe4, 0x2d, 0xae, 0x5f, 0x38, 0xb7, 0x1c, 0x18, 0xa6, 0x6b, 0x8a, 0x86, 0xd5, 0x08, 0x74, + 0x2e, 0xbd, 0x70, 0xb2, 0xf9, 0x5c, 0x5a, 0xba, 0x1b, 0x1b, 0xfb, 0xdc, 0x5e, 0x0c, 0x68, 0xbd, + 0x40, 0xf0, 0xd6, 0x7c, 0xf3, 0x8c, 0x27, 0x42, 0xce, 0x46, 0x27, 0x20, 0x99, 0xd1, 0x84, 0xd2, + 0xc5, 0xcf, 0xce, 0x33, 0x14, 0xc2, 0xb3, 0x1a, 0x72, 0x8c, 0x6c, 0x64, 0xa2, 0x15, 0xa4, 0x22, + 0x5f, 0xf9, 0xf7, 0x4e, 0x9c, 0xbd, 0x37, 0x26, 0x13, 0x42, 0xe6, 0x43, 0x93, 0x1d, 0x9d, 0xa2, + 0x85, 0x62, 0xa3, 0x4e, 0x08, 0x57, 0x18, 0xf1, 0xa8, 0x7c, 0x9e, 0xf2, 0x34, 0xfb, 0x7a, 0x8a, + 0xfc, 0x96, 0x05, 0x04, 0xa4, 0xeb, 0x75, 0x10, 0x2c, 0x32, 0xfa, 0xd2, 0xec, 0x70, 0x48, 0xac, + 0x01, 0xc5, 0x2b, 0x95, 0xf1, 0x53, 0xee, 0x9f, 0x9d, 0xe9, 0x19, 0xe7, 0x0c, 0x19, 0x51, 0x84, + 0xf2, 0x60, 0x23, 0x66, 0xf5, 0x4e, 0xa7, 0x6b, 0x1d, 0xab, 0x02, 0xd5, 0x57, 0x9e, 0x54, 0x9a, + 0x5f, 0x9f, 0x46, 0x58, 0x16, 0x1e, 0xe3, 0xb4, 0xdb, 0xc0, 0x9a, 0xca, 0x00, 0x5a, 0xa9, 0x7d, + 0x87, 0x74, 0xf0, 0xac, 0x45, 0x3a, 0x35, 0x36, 0xfc, 0x10, 0xdd, 0x0f, 0x2c, 0xf9, 0xb7, 0x05, + 0x20, 0xeb, 0x7f, 0xdb, 0xeb, 0xad, 0x50, 0xc9, 0x47, 0x5c, 0xfb, 0x6b, 0x0e, 0x14, 0x67, 0x23, + 0xe4, 0x3a, 0xbf, 0x2a, 0x57, 0xf9, 0x62, 0xb5, 0x90, 0xc6, 0xca, 0x0b, 0xe1, 0xbb, 0xa5, 0xa3, + 0x02, 0xaa, 0x79, 0xd5, 0x38, 0xf5, 0x8c, 0xe5, 0x54, 0x9a, 0x8f, 0xf0, 0x1d, 0x67, 0x1b, 0xd7, + 0xb3, 0xe8, 0x07, 0x55, 0x70, 0xb0, 0x4a, 0x68, 0xa1, 0xa4, 0x74, 0x8a, 0xfe, 0x52, 0x84, 0x2f, + 0x07, 0x38, 0x8c, 0x67, 0xba, 0x73, 0x57, 0x39, 0x70, 0xad, 0x2b, 0x51, 0xa7, 0xbe, 0x7c, 0xd0, + 0x9d, 0x5b, 0x6d, 0x28, 0xdd, 0xdb, 0xd9, 0x2d, 0xf4, 0xbb, 0x6a, 0xe1, 0xac, 0x70, 0x84, 0xa0, + 0xdd, 0xdf, 0xc1, 0xae, 0xfe, 0x7a, 0x4f, 0x2a, 0x9c, 0xc0, 0xf3, 0x7e, 0x15, 0x04, 0x2e, 0xba, + 0xd8, 0x1c, 0x51, 0x77, 0xc6, 0xe2, 0xa8, 0x4e, 0x57, 0xdf, 0x18, 0xbd, 0x99, 0x11, 0x0c, 0xeb, + 0xa4, 0x33, 0xa9, 0xb1, 0xb0, 0xf1, 0xbb, 0x31, 0xa9, 0x67, 0x3b, 0x37, 0x04, 0xfa, 0xbf, 0xf5, + 0x7d, 0x1c, 0x63, 0x1a, 0x7d, 0x5f, 0x26, 0x6a, 0xb6, 0x22, 0x38, 0x66, 0x5d, 0x9a, 0x3f, 0x62, + 0x49, 0x04, 0x50, 0x3a, 0x43, 0x4c, 0x15, 0x02, 0xbd, 0xc3, 0x8d, 0x7e, 0xe3, 0x4e, 0xcb, 0xc5, + 0x66, 0xa4, 0x79, 0x2f, 0x9c, 0x22, 0x87, 0x96, 0x40, 0x00, 0xbe, 0xa8, 0xc1, 0x24, 0x0f, 0xf9, + 0xdd, 0x8a, 0xd9, 0x79, 0xec, 0x0d, 0x2f, 0xd1, 0x57, 0x2a, 0x5f, 0xcd, 0x6d, 0x10, 0xc5, 0x12, + 0x80, 0xc2, 0x5d, 0xe1, 0xb8, 0x9f, 0xf1, 0xa0, 0xfd, 0x71, 0x52, 0x64, 0x5f, 0x2a, 0x95, 0xd6, + 0x24, 0x9f, 0x94, 0x87, 0x54, 0xef, 0xa6, 0xd8, 0x1b, 0x9a, 0xa8, 0x81, 0x28, 0x2e, 0xef, 0x79, + 0x86, 0x43, 0x6a, 0xea, 0xe7, 0xb6, 0xbd, 0xd6, 0x76, 0x69, 0x40, 0x14, 0x28, 0xdb, 0xba, 0xbd, + 0x48, 0x06, 0xb6, 0xbd, 0xa2, 0x7f, 0xf8, 0xbb, 0xa4, 0xea, 0x90, 0x90, 0xaa, 0x18, 0xcc, 0x44, + 0xda, 0x80, 0x6d, 0xa1, 0xd0, 0xa7, 0xcd, 0x1a, 0x37, 0x49, 0xde, 0x29, 0x6f, 0x55, 0x8b, 0x85, + 0x21, 0xdf, 0x78, 0x3c, 0x89, 0xfc, 0xea, 0xd5, 0xdd, 0x2d, 0x96, 0x7e, 0xf7, 0x0a, 0x46, 0x82, + 0x36, 0xf9, 0x50, 0x2a, 0x08, 0xea, 0xf7, 0x27, 0xa0, 0xa4, 0xb6, 0x68, 0xa8, 0xbd, 0x24, 0x11, + 0xfb, 0xed, 0x0c, 0x79, 0xd9, 0xa5, 0x82, 0xc0, 0xc1, 0xa8, 0xff, 0x4b, 0x2e, 0xd5, 0xd4, 0xcd, + 0x38, 0x7e, 0xba, 0x5f, 0x05, 0x55, 0x2a, 0x12, 0x94, 0x9d, 0x24, 0xfe, 0x21, 0xa5, 0xfe, 0xfa, + 0x46, 0xb6, 0x0c, 0xe6, 0x30, 0xd0, 0x94, 0xb9, 0x10, 0xae, 0x54, 0xa5, 0x71, 0x38, 0x84, 0x40, + 0xcd, 0xfb, 0x29, 0x9f, 0x05, 0x80, 0xc9, 0x25, 0x4c, 0xfa, 0xd4, 0x5a, 0x5e, 0xe8, 0xae, 0x95, + 0x20, 0xa8, 0x5a, 0x59, 0xe9, 0x03, 0x32, 0xd9, 0x21, 0xe4, 0xc0, 0x6d, 0x2c, 0x37, 0x5c, 0x10, + 0x5b, 0x7a, 0x27, 0x0b, 0x13, 0x91, 0x0b, 0xab, 0xb6, 0x91, 0x9f, 0xb9, 0x6b, 0x5a, 0x1a, 0x0c, + 0x54, 0x2b, 0xab, 0xd1, 0xbc, 0x39, 0x5d, 0x81, 0x1e, 0xfd, 0xfc, 0x77, 0x88, 0x9d, 0xec, 0xac, + 0x2e, 0x50, 0x43, 0xe9, 0x01, 0x1a, 0x75, 0x70, 0x9a, 0xc4, 0xf3, 0xde, 0xe5, 0x0b, 0x1c, 0x41, + 0xe7, 0xbe, 0x51, 0xcb, 0x74, 0x09, 0x52, 0x24, 0xe6, 0xd4, 0x2f, 0x6e, 0xb6, 0x4c, 0x3f, 0xa4, + 0x9f, 0xf7, 0xda, 0x2b, 0x6d, 0x85, 0x79, 0x84, 0x78, 0xb7, 0xe8, 0x1a, 0x54, 0x49, 0x3e, 0xdb, + 0x72, 0x34, 0xb9, 0xf6, 0x76, 0xd0, 0x7c, 0xeb, 0xab, 0x09, 0x86, 0x88, 0xd3, 0x2e, 0xaf, 0x39, + 0x05, 0x9a, 0x13, 0x2e, 0xe1, 0x37, 0xcb, 0x2d, 0x8e, 0xb6, 0xa6, 0xfb, 0x76, 0x3d, 0x92, 0x56, + 0x00, 0xc1, 0xdb, 0x51, 0x9c, 0x21, 0x14, 0x0b, 0xd6, 0xa8, 0x27, 0xcb, 0x33, 0x24, 0xc8, 0x5c, + 0x86, 0x42, 0x75, 0x76, 0x22, 0xc9, 0x49, 0x07, 0x3d, 0x8a, 0x30, 0x3d, 0x57, 0x48, 0x97, 0x0c, + 0x3c, 0x2d, 0xd7, 0x34, 0x95, 0x57, 0x46, 0x75, 0x63, 0x1c, 0x58, 0x05, 0xad, 0xfe, 0x33, 0xa7, + 0xfd, 0x51, 0xec, 0xf6, 0xa8, 0xea, 0xf2, 0x36, 0xda, 0xc0, 0x9f, 0xf6, 0x5d, 0x91, 0x5f, 0x56, + 0xa0, 0x7b, 0x48, 0x45, 0xf8, 0x17, 0xf8, 0xea, 0xe2, 0x0a, 0xe6, 0xec, 0xe3, 0x99, 0x1f, 0xd2, + 0x20, 0x03, 0xbd, 0x15, 0x0b, 0x73, 0x31, 0xe1, 0xaf, 0x9c, 0xfc, 0xc8, 0x01, 0xba, 0x60, 0xee, + 0x24, 0x14, 0x34, 0x01, 0xdf, 0x64, 0x20, 0x13, 0xb0, 0xcb, 0x42, 0x3e, 0x60, 0x59, 0x0e, 0xe4, + 0x04, 0x19, 0x74, 0x9d, 0x48, 0x8a, 0x62, 0x19, 0xe1, 0x4d, 0xd9, 0xce, 0xd2, 0xa1, 0xa4, 0xad, + 0xce, 0x05, 0x6b, 0x83, 0xd3, 0xdf, 0x0d, 0x36, 0x3e, 0x7b, 0x92, 0xcc, 0xa3, 0xf3, 0x65, 0x76, + 0xe3, 0xc0, 0x5e, 0x6f, 0x20, 0xa4, 0xe7, 0x6f, 0x55, 0x51, 0xef, 0x0d, 0x73, 0x32, 0x73, 0x4f, + 0x18, 0xf9, 0x98, 0xb8, 0x18, 0x96, 0x93, 0x0a, 0x68, 0xd2, 0x5f, 0x91, 0x2f, 0x62, 0x08, 0xcf, + 0x35, 0x55, 0x6d, 0x56, 0x1b, 0xc5, 0x52, 0xad, 0x37, 0x26, 0x4c, 0xa7, 0xf7, 0x6c, 0x52, 0xd6, + 0x1b, 0xc0, 0x7d, 0xb7, 0x05, 0x3f, 0x01, 0x77, 0x11, 0xd7, 0x16, 0xe4, 0x33, 0x47, 0x02, 0xa5, + 0x65, 0xdd, 0xdb, 0x41, 0xbd, 0x94, 0xdd, 0x6f, 0x37, 0x21, 0xc8, 0x0f, 0xfb, 0x29, 0xde, 0xfe, + 0x75, 0x8e, 0x39, 0x32, 0xfc, 0x8e, 0x63, 0xbe, 0xcb, 0x13, 0xb8, 0xe4, 0x82, 0x7e, 0x3b, 0xa6, + 0x9c, 0xc6, 0x94, 0x4f, 0xe5, 0x3f, 0x1d, 0xe7, 0x21, 0xfb, 0x42, 0xfa, 0x75, 0x65, 0x7a, 0x82, + 0x38, 0x7a, 0x4f, 0xe7, 0xef, 0x56, 0xfd, 0xa7, 0xa1, 0x1d, 0xd3, 0x29, 0x56, 0xe5, 0x9a, 0x96, + 0x2b, 0x6c, 0x80, 0xd8, 0xc7, 0xd1, 0xe4, 0x3a, 0xcc, 0x18, 0xcf, 0x9f, 0x2a, 0x13, 0xf4, 0x92, + 0x2f, 0x91, 0xa6, 0xad, 0x56, 0x91, 0x01, 0x0e, 0xf4, 0x75, 0x8d, 0xce, 0xbe, 0xc2, 0x26, 0x2e, + 0xbf, 0xb1, 0xfa, 0x41, 0x4b, 0x2c, 0x8a, 0xd7, 0x12, 0xcf, 0xed, 0x96, 0x6a, 0x7c, 0x8b, 0x48, + 0x10, 0x13, 0x16, 0x2c, 0xaa, 0xe1, 0xd2, 0x79, 0x60, 0xdf, 0x7b, 0x85, 0xba, 0xb7, 0xa6, 0x8e, + 0xcf, 0x43, 0x08, 0x97, 0x17, 0xee, 0xa6, 0xea, 0xb8, 0x4e, 0xa7, 0xd9, 0xd2, 0xd9, 0xc5, 0x07, + 0x60, 0x33, 0x58, 0x56, 0x49, 0x7e, 0xed, 0x3c, 0x1a, 0xa5, 0x21, 0xd4, 0x02, 0x7f, 0xcf, 0x28, + 0x79, 0x1b, 0x74, 0x3a, 0x75, 0x17, 0x53, 0x70, 0x67, 0xd1, 0x15, 0x0e, 0xe6, 0xc0, 0xa2, 0x28, + 0xfd, 0x21, 0xb8, 0x3b, 0x46, 0xab, 0xc4, 0x9e, 0xd3, 0x2a, 0xec, 0xf4, 0x11, 0x27, 0x09, 0x59, + 0x43, 0x71, 0xdf, 0x1c, 0x3e, 0x4e, 0x90, 0x0b, 0x8a, 0x62, 0x86, 0x0c, 0xc4, 0x5e, 0x8f, 0x08, + 0x08, 0x81, 0x95, 0x4f, 0xa4, 0xad, 0x40, 0xf6, 0xbb, 0x35, 0x10, 0x55, 0xad, 0x39, 0x3d, 0xd7, + 0x33, 0xdb, 0x7a, 0x52, 0x4b, 0x7f, 0x2a, 0x8c, 0x81, 0x31, 0x14, 0x1b, 0xfb, 0x0b, 0x6c, 0x48, + 0x30, 0xb9, 0x60, 0x19, 0xb6, 0xa5, 0xc6, 0xb1, 0x66, 0x97, 0x80, 0x4d, 0xe8, 0xc9, 0xd1, 0xa9, + 0x71, 0xc4, 0xf3, 0x1c, 0x48, 0xeb, 0x17, 0xc1, 0x7f, 0x4f, 0x92, 0x41, 0xd0, 0x49, 0x7b, 0x61, + 0x1e, 0x4f, 0x77, 0x36, 0xff, 0x88, 0x60, 0xdb, 0xf7, 0x49, 0x2e, 0x6c, 0x5b, 0x11, 0x4d, 0x9c, + 0xd7, 0x65, 0x6e, 0x66, 0xe5, 0xfc, 0xe5, 0xfc, 0x24, 0xc6, 0x3f, 0x97, 0xc5, 0x0d, 0x5f, 0xb5, + 0x7b, 0x3e, 0xc1, 0xf5, 0x45, 0xeb, 0x69, 0x91, 0xa6, 0x67, 0x34, 0x56, 0x34, 0x50, 0x9e, 0x90, + 0x60, 0xa7, 0x4b, 0x02, 0xef, 0x3e, 0x8d, 0xe1, 0x26, 0x78, 0xa9, 0x07, 0x15, 0x29, 0xd5, 0xa1, + 0xf7, 0x6c, 0x35, 0x66, 0xaa, 0xf0, 0x5b, 0xfa, 0xa2, 0xb2, 0x83, 0x77, 0x79, 0xc5, 0xa6, 0xb5, + 0x1b, 0x82, 0x30, 0xae, 0x18, 0x91, 0xbc, 0x45, 0x3c, 0x4c, 0xc8, 0xdd, 0x65, 0x82, 0x86, 0x51, + 0x58, 0x74, 0x15, 0xb7, 0x8e, 0x7b, 0x76, 0xec, 0x08, 0xf7, 0x05, 0x36, 0xb1, 0xdb, 0x69, 0x4b, + 0x0a, 0x3f, 0xea, 0x97, 0x53, 0x13, 0x44, 0x86, 0xcf, 0xa1, 0x86, 0x18, 0x64, 0x9f, 0x6c, 0xc9, + 0x83, 0x04, 0x00, 0x9f, 0x37, 0x9d, 0xfb, 0x83, 0xd1, 0x3c, 0xda, 0xe8, 0x5e, 0x29, 0xe5, 0xb3, + 0x96, 0xf8, 0xf1, 0x9b, 0x6a, 0x17, 0x88, 0x07, 0xd0, 0xef, 0x5e, 0xe0, 0xd3, 0x9f, 0xf1, 0x0e, + 0xc7, 0xb5, 0xe0, 0xe2, 0xb0, 0x42, 0xfb, 0x91, 0x1d, 0x26, 0xc6, 0x54, 0x0b, 0xc9, 0x4f, 0xb0, + 0x97, 0xce, 0xa0, 0x52, 0x99, 0x57, 0xc3, 0x88, 0x2a, 0x08, 0x1a, 0x83, 0xf3, 0x76, 0x2e, 0xa2, + 0x0e, 0x1f, 0x9e, 0x2f, 0xe6, 0xaa, 0xc0, 0x44, 0x60, 0x23, 0x01, 0x92, 0x22, 0xbe, 0xe0, 0xd7, + 0x49, 0x12, 0xfc, 0x46, 0x64, 0x47, 0xa8, 0x2a, 0xcc, 0xaa, 0xa2, 0x54, 0x5d, 0x59, 0xc1, 0xe6, + 0xf5, 0x56, 0x3b, 0x03, 0xdf, 0xf0, 0xd7, 0x61, 0x28, 0x8c, 0x27, 0x6e, 0x56, 0xfa, 0xcf, 0x5c, + 0xab, 0x9f, 0x4b, 0xa4, 0xa7, 0xd8, 0x07, 0x62, 0xc2, 0xef, 0x6e, 0x24, 0xe6, 0xd7, 0xff, 0xde, + 0xc7, 0x78, 0xf0, 0xc8, 0xd1, 0xe6, 0x09, 0x2f, 0x29, 0x6a, 0x5e, 0xaf, 0xb0, 0x3f, 0x03, 0x47, + 0x8d, 0x7f, 0x8a, 0x3c, 0x6c, 0xd1, 0x13, 0x6e, 0x8b, 0xf1, 0x92, 0x25, 0x3b, 0x20, 0x1c, 0xc5, + 0xc9, 0xa7, 0x16, 0xc9, 0x84, 0x60, 0xef, 0xea, 0x91, 0x93, 0x6b, 0xf3, 0xfc, 0x09, 0x87, 0xc0, + 0x11, 0x53, 0x74, 0x6e, 0x10, 0xc6, 0x50, 0x2f, 0x1d, 0x67, 0x76, 0xb6, 0xb1, 0xb0, 0x27, 0xa0, + 0xd3, 0x9b, 0xa5, 0xfc, 0x2a, 0x28, 0x30, 0x7e, 0xed, 0x00, 0x60, 0x76, 0xce, 0x8a, 0x22, 0x97, + 0xe2, 0xe3, 0x4c, 0xca, 0x09, 0xba, 0xc0, 0xe8, 0xe8, 0x89, 0x8a, 0x0e, 0x54, 0x49, 0xb0, 0x45, + 0x09, 0xa1, 0xad, 0x4c, 0xab, 0x83, 0xe0, 0x57, 0x25, 0x6e, 0x18, 0x9b, 0x6b, 0x9e, 0x9a, 0xec, + 0x33, 0x2b, 0x3b, 0x6b, 0x34, 0xcd, 0xa6, 0x3a, 0x87, 0xf1, 0xd4, 0x0b, 0x4a, 0x41, 0xbb, 0xe5, + 0x18, 0xa8, 0x69, 0x16, 0xd4, 0x5f, 0x15, 0xd3, 0xdf, 0x27, 0x6c, 0x56, 0x16, 0x02, 0x46, 0xaa, + 0xed, 0xca, 0x84, 0x41, 0x8e, 0x24, 0x58, 0x08, 0x39, 0xb9, 0x94, 0x5d, 0xcd, 0x27, 0x97, 0x54, + 0xf3, 0x3d, 0x19, 0xdc, 0x19, 0x77, 0x3e, 0x12, 0x85, 0x89, 0x85, 0x0f, 0x7c, 0xe0, 0x45, 0x40, + 0x3f, 0xfd, 0x36, 0xe3, 0x2b, 0xad, 0x1a, 0x86, 0xbc, 0x70, 0x6c, 0xe9, 0xef, 0xdc, 0xb1, 0x76, + 0x73, 0xe7, 0x2f, 0x8b, 0xe5, 0xdf, 0x2f, 0x3d, 0x67, 0x19, 0x49, 0x49, 0xd3, 0xf1, 0x6d, 0xb3, + 0xc8, 0xbb, 0xbe, 0x3c, 0x50, 0xce, 0xb0, 0x67, 0x78, 0xea, 0xca, 0xf2, 0xd3, 0x78, 0xfe, 0xcd, + 0x9a, 0xa4, 0x9e, 0x21, 0xca, 0x09, 0x29, 0x93, 0x89, 0xdd, 0xae, 0x5a, 0x00, 0x39, 0x2e, 0x34, + 0x6e, 0x63, 0xa8, 0x78, 0x32, 0x5c, 0x00, 0x34, 0x79, 0xb7, 0xd4, 0x39, 0x6d, 0xee, 0xc4, 0x1f, + 0xa1, 0x48, 0xe6, 0xc0, 0x52, 0xa1, 0x24, 0x4e, 0x80, 0xfe, 0x98, 0x46, 0x52, 0x0d, 0x1e, 0x35, + 0x8e, 0x9b, 0xd9, 0x7f, 0x43, 0x4f, 0xfa, 0xc3, 0x7f, 0xa2, 0xe2, 0x75, 0x5f, 0x07, 0x27, 0x84, + 0x6e, 0xe7, 0x4c, 0x70, 0x1e, 0x11, 0x3d, 0x25, 0xd0, 0xba, 0x20, 0x9d, 0x60, 0x0f, 0x5e, 0xfa, + 0x5e, 0x7d, 0xef, 0x6d, 0x19, 0xcb, 0x60, 0xe2, 0x15, 0xb5, 0x7c, 0x3e, 0xf4, 0xc4, 0x16, 0x55, + 0x40, 0x66, 0x74, 0x77, 0xe2, 0x1c, 0x91, 0xa3, 0x66, 0x2b, 0x6d, 0x31, 0x36, 0x1c, 0xd7, 0xb3, + 0x0a, 0x3a, 0x8a, 0xef, 0xd6, 0xd8, 0x52, 0xbd, 0x03, 0x58, 0x08, 0x8b, 0x42, 0xfb, 0x54, 0xc4, + 0x76, 0x02, 0x24, 0xc6, 0xfe, 0x88, 0x10, 0x7b, 0x0b, 0x12, 0xc8, 0xf5, 0xf9, 0x37, 0x3b, 0x9f, + 0xfa, 0x80, 0xad, 0x34, 0x8c, 0xfc, 0x3a, 0xbc, 0x99, 0x63, 0xa1, 0x8c, 0x52, 0xd8, 0xf6, 0xe8, + 0x7a, 0x7e, 0xd1, 0xf1, 0x5d, 0x55, 0x46, 0x00, 0xc8, 0xf6, 0xd7, 0xeb, 0x48, 0xdf, 0xf3, 0x18, + 0x2e, 0xe0, 0x5c, 0x7c, 0x4c, 0x88, 0xd2, 0x93, 0x97, 0xac, 0x3a, 0xc7, 0x6a, 0x5e, 0x23, 0x89, + 0x29, 0x68, 0x69, 0xe0, 0x7b, 0x99, 0x9f, 0xe3, 0x9c, 0x31, 0xee, 0x72, 0x57, 0x41, 0x1d, 0x54, + 0x6b, 0x7e, 0xc9, 0x9b, 0x62, 0x41, 0xc2, 0x0f, 0x32, 0x9c, 0xdf, 0x90, 0x6c, 0x19, 0xa8, 0x22, + 0x4c, 0x86, 0x3e, 0x51, 0xad, 0x0a, 0x9c, 0x68, 0xc9, 0x8a, 0x13, 0x97, 0xce, 0xdb, 0xa9, 0x2d, + 0xf4, 0xef, 0xff, 0xe6, 0x83, 0xae, 0x68, 0x20, 0x9c, 0x2b, 0x45, 0x12, 0xd2, 0x8d, 0x1a, 0x27, + 0xb4, 0x76, 0x6c, 0x95, 0x4a, 0x35, 0xbe, 0x04, 0xab, 0x4a, 0xc7, 0xfe, 0x20, 0x13, 0x32, 0x4d, + 0x80, 0x58, 0x28, 0xa1, 0x63, 0xfc, 0x5e, 0x1b, 0xc7, 0x59, 0x41, 0xac, 0xc2, 0x4d, 0x2f, 0xc5, + 0xdc, 0xd5, 0x15, 0x37, 0x00, 0xbe, 0xed, 0x09, 0x4a, 0x43, 0xe2, 0x64, 0x9f, 0xe8, 0x4a, 0x65, + 0x61, 0x38, 0xa0, 0x80, 0x7e, 0x2d, 0x1b, 0x5a, 0x7b, 0x73, 0x8b, 0xd7, 0x57, 0xbc, 0x1b, 0x01, + 0xb1, 0x88, 0x67, 0xd2, 0xbc, 0x74, 0xb7, 0x57, 0x76, 0x5f, 0x7b, 0xb2, 0xe8, 0x91, 0x04, 0xc3, + 0xac, 0x53, 0x84, 0x09, 0xaa, 0x2a, 0xed, 0x0c, 0x9a, 0x13, 0x5b, 0x86, 0x98, 0x3c, 0x4a, 0x42, + 0x1c, 0xbd, 0x60, 0x2d, 0x61, 0x0e, 0x4c, 0x70, 0x0f, 0xbb, 0x0f, 0x9a, 0x8d, 0xe8, 0x75, 0x51, + 0x00, 0x6d, 0x05, 0xfe, 0x9c, 0xf3, 0x8c, 0xfb, 0x08, 0xfb, 0xd6, 0x0c, 0x60, 0xff, 0xed, 0xae, + 0x7a, 0x49, 0xbe, 0x40, 0x63, 0x13, 0x81, 0x91, 0xfd, 0x3c, 0x62, 0x3e, 0xb7, 0x0c, 0x61, 0xba, + 0x73, 0x60, 0x93, 0xfc, 0x9e, 0x28, 0x5f, 0xf4, 0xe0, 0xbe, 0xb0, 0x92, 0xc7, 0xc8, 0xfe, 0xa3, + 0xe7, 0xd8, 0x3c, 0x73, 0x8e, 0x65, 0x9d, 0x86, 0x50, 0x6e, 0xd2, 0x13, 0xb2, 0x4c, 0x12, 0x7a, + 0x14, 0xfe, 0xe9, 0x88, 0x7b, 0xf6, 0x74, 0x2d, 0x97, 0x93, 0x13, 0xce, 0x37, 0x65, 0x98, 0xa1, + 0x8c, 0x23, 0xd7, 0x11, 0x49, 0x61, 0x8d, 0xf1, 0x45, 0xce, 0x78, 0x9a, 0x37, 0xe3, 0xe3, 0x25, + 0x59, 0xcd, 0x62, 0x27, 0x41, 0xec, 0xe1, 0xbe, 0x4a, 0x37, 0x6c, 0x47, 0x1b, 0x53, 0xb8, 0x86, + 0x30, 0xb8, 0x22, 0xdb, 0x47, 0x7f, 0x36, 0x3b, 0xa8, 0x85, 0xf7, 0xcd, 0x3b, 0x49, 0x74, 0x25, + 0xe9, 0x0b, 0xfc, 0x46, 0xc9, 0x0a, 0xfe, 0xe4, 0x10, 0x68, 0x7c, 0x11, 0x16, 0xf3, 0xdc, 0x25, + 0x08, 0x00, 0x51, 0x65, 0xd5, 0x57, 0x74, 0xb7, 0xb3, 0xf7, 0x33, 0x8d, 0x2c, 0xff, 0xf3, 0x92, + 0x10, 0x1a, 0xb9, 0x46, 0x4e, 0xe8, 0x3c, 0x2e, 0xf9, 0x65, 0x1f, 0xdf, 0x3c, 0x34, 0x2a, 0x67, + 0x43, 0x31, 0x10, 0xab, 0x2e, 0x6e, 0x16, 0x88, 0x0c, 0x64, 0x74, 0x9a, 0x6a, 0x18, 0x01, 0x22, + 0x7a, 0xd9, 0xca, 0x23, 0x5c, 0x8d, 0x53, 0x7a, 0xae, 0x3e, 0xb4, 0x6a, 0x7e, 0x56, 0x1c, 0xa7, + 0x0e, 0x94, 0xfd, 0x47, 0xf6, 0xca, 0xe9, 0x6c, 0xa2, 0xfa, 0xcf, 0x22, 0x1b, 0x34, 0xef, 0x23, + 0x7a, 0x11, 0x90, 0xd2, 0xb8, 0xf4, 0xb5, 0xd0, 0xbc, 0xdc, 0x06, 0x06, 0x51, 0xdc, 0xcc, 0x8e, + 0x5d, 0x00, 0xd1, 0x83, 0x7f, 0xe5, 0x23, 0xf3, 0xf8, 0x42, 0x19, 0xb4, 0x29, 0xb2, 0x82, 0x24, + 0x59, 0x60, 0xc5, 0x6a, 0xd2, 0x80, 0xdb, 0xc6, 0x7c, 0xda, 0x73, 0xa3, 0x32, 0x30, 0x04, 0x70, + 0x80, 0x7f, 0x3a, 0x86, 0xcc, 0xe2, 0x9b, 0x84, 0x3c, 0xfb, 0x6d, 0xca, 0x72, 0x5a, 0x50, 0xf3, + 0x48, 0x4a, 0x0c, 0xbe, 0xa2, 0xe1, 0x6c, 0xfb, 0x8f, 0xbd, 0x27, 0xe5, 0x83, 0xe1, 0x2b, 0x23, + 0x03, 0x27, 0x34, 0x22, 0xaa, 0x90, 0xdf, 0x0b, 0xca, 0xf8, 0xb9, 0x82, 0xaa, 0xb2, 0xab, 0x17, + 0x6f, 0x04, 0x34, 0xa0, 0xdc, 0xb7, 0xed, 0xf3, 0x42, 0xb0, 0xf3, 0xab, 0x48, 0x5f, 0xa5, 0xfe, + 0xe6, 0xb4, 0xf2, 0xf8, 0x3e, 0xdd, 0x9b, 0x12, 0xca, 0xd7, 0x37, 0x2c, 0x3e, 0xfd, 0x13, 0x41, + 0x72, 0xc8, 0xf0, 0x49, 0x77, 0xf7, 0x2b, 0x76, 0x5f, 0x13, 0x99, 0x24, 0x0d, 0x7c, 0x7c, 0xc9, + 0x4a, 0xf9, 0x5f, 0xc8, 0xb0, 0x17, 0xc8, 0x76, 0xfd, 0xf9, 0xeb, 0x69, 0x4d, 0x26, 0x82, 0x2c, + 0x78, 0x25, 0x82, 0x56, 0x05, 0x18, 0xed, 0x7c, 0x3f, 0xbe, 0xeb, 0x60, 0xca, 0x8d, 0x5e, 0x4b, + 0xe3, 0x2b, 0xb7, 0x24, 0x32, 0xa8, 0x53, 0x85, 0x3f, 0x02, 0x5c, 0x92, 0x36, 0xd6, 0xaf, 0x8c, + 0x5a, 0x0c, 0x17, 0x8c, 0x16, 0xf3, 0x25, 0xaf, 0x80, 0x3b, 0x11, 0x15, 0xa2, 0x27, 0x63, 0xa9, + 0xbb, 0xd4, 0xb6, 0x5d, 0xc8, 0x4d, 0x0d, 0x8a, 0x61, 0x67, 0xfb, 0x11, 0x04, 0x6e, 0xf6, 0x32, + 0x04, 0xa8, 0xd8, 0x74, 0x84, 0xa6, 0xa2, 0x9e, 0x8f, 0x59, 0x81, 0x6f, 0x94, 0xc9, 0xa3, 0x29, + 0x01, 0x6f, 0x15, 0xa8, 0xcb, 0x73, 0x08, 0x27, 0x2f, 0xc4, 0x0e, 0xea, 0xfe, 0xf4, 0x2c, 0x35, + 0xdb, 0x9c, 0xad, 0xd0, 0x0f, 0x81, 0x7f, 0xe3, 0x31, 0x55, 0xc8, 0x61, 0xeb, 0xec, 0x1b, 0x17, + 0x5e, 0x64, 0x28, 0x4f, 0x1b, 0xbd, 0xff, 0xf7, 0x7e, 0xe6, 0x0f, 0x97, 0x4b, 0xba, 0x04, 0x70, + 0xfa, 0x3a, 0x85, 0xad, 0x62, 0x50, 0xea, 0x4a, 0xa8, 0x8a, 0xff, 0x9d, 0x9f, 0x4d, 0x0b, 0x74, + 0x38, 0xf8, 0xb0, 0xf1, 0xe1, 0xae, 0x20, 0x63, 0x12, 0xd0, 0x13, 0xa1, 0x1f, 0x96, 0x07, 0xd8, + 0x9b, 0x0a, 0x55, 0xa6, 0x93, 0x8d, 0x4b, 0xb8, 0xe7, 0x81, 0xa6, 0x18, 0xaf, 0x3c, 0x45, 0xb0, + 0x1c, 0x21, 0x63, 0x67, 0xbd, 0xe6, 0xa8, 0x53, 0x7f, 0x89, 0xaa, 0xbe, 0xb7, 0xaa, 0xf8, 0x34, + 0x65, 0x8d, 0x59, 0x9b, 0xa7, 0x83, 0xf4, 0x43, 0x3a, 0x90, 0x2e, 0xbf, 0xb0, 0x3e, 0x94, 0xdb, + 0x7c, 0x38, 0x8f, 0x73, 0xc7, 0xa8, 0x98, 0xe9, 0x7e, 0x49, 0x90, 0xe0, 0x95, 0x66, 0xac, 0xea, + 0x66, 0xf0, 0x63, 0x87, 0xb4, 0xd2, 0x0b, 0x23, 0xfe, 0x6a, 0x53, 0xa0, 0xbf, 0x0d, 0x31, 0x6e, + 0x69, 0x62, 0x29, 0xa5, 0x83, 0xa8, 0xff, 0x51, 0xe6, 0x4a, 0x5a, 0x38, 0xd8, 0x7f, 0x43, 0x85, + 0x27, 0x79, 0x3c, 0xd3, 0xb6, 0x02, 0x0e, 0x2f, 0xd7, 0x2a, 0x9c, 0xba, 0xff, 0xfc, 0x7b, 0xed, + 0x33, 0xe6, 0xe8, 0xd7, 0x18, 0xde, 0x8b, 0x36, 0x5c, 0x24, 0x4c, 0x88, 0x65, 0x6f, 0xed, 0xfc, + 0x0d, 0x28, 0x38, 0x80, 0xc0, 0x58, 0xe7, 0x5f, 0xa4, 0x63, 0x8f, 0x9c, 0x11, 0xfd, 0xf0, 0xb5, + 0x85, 0x91, 0xb5, 0x0a, 0x93, 0x44, 0xe4, 0xdf, 0xd4, 0xe4, 0xd4, 0x10, 0xc5, 0x59, 0xd7, 0xa9, + 0x51, 0x30, 0x66, 0xe6, 0x18, 0x76, 0x92, 0x75, 0xdc, 0x13, 0x49, 0x34, 0xee, 0xc4, 0xf1, 0xf9, + 0x5e, 0x12, 0x1c, 0x89, 0xa9, 0x84, 0x9f, 0x9a, 0x6c, 0xf0, 0x78, 0x69, 0x23, 0x27, 0x35, 0x45, + 0xe0, 0x28, 0x13, 0x2b, 0x06, 0x27, 0xdb, 0x83, 0xb0, 0x88, 0xca, 0x62, 0x4d, 0x35, 0xda, 0xbb, + 0xee, 0xfc, 0x63, 0x6d, 0x91, 0x39, 0x2c, 0xfe, 0xb6, 0x7c, 0x81, 0x4e, 0x61, 0xcc, 0xa9, 0x3a, + 0xf4, 0x46, 0x06, 0xde, 0x33, 0x26, 0x7c, 0x51, 0x95, 0xe1, 0xb4, 0xb2, 0x6e, 0x7c, 0x2c, 0xcf, + 0xeb, 0xf7, 0x9e, 0xb8, 0xbb, 0x1d, 0x6b, 0xb8, 0x92, 0x42, 0x3a, 0x25, 0x17, 0x5d, 0x3f, 0x8f, + 0x93, 0xbb, 0x93, 0x70, 0x9c, 0x03, 0x35, 0xed, 0x05, 0x1b, 0x59, 0x5c, 0x7a, 0x7e, 0xdb, 0x66, + 0x6f, 0x21, 0xa6, 0x76, 0x46, 0x25, 0xdd, 0xa0, 0x36, 0x08, 0x30, 0xe9, 0xe1, 0x08, 0x00, 0x6e, + 0xf3, 0x48, 0x3f, 0x42, 0x52, 0x98, 0x28, 0x51, 0x02, 0xed, 0x10, 0x26, 0xbd, 0x74, 0x31, 0xed, + 0x02, 0xdb, 0xde, 0xd1, 0x73, 0xae, 0x05, 0x4d, 0x73, 0xca, 0x62, 0x10, 0xb7, 0x0b, 0xaa, 0x47, + 0xec, 0xb0, 0x77, 0x87, 0x53, 0x07, 0xda, 0x06, 0x19, 0x38, 0x41, 0x7c, 0x2f, 0x41, 0x07, 0xc5, + 0x07, 0x8a, 0x74, 0x2a, 0xc6, 0x64, 0x85, 0x50, 0x1c, 0xf7, 0x46, 0xb0, 0x4d, 0xf9, 0xcd, 0x63, + 0xde, 0xa9, 0x88, 0x5e, 0xac, 0xc4, 0x2e, 0xbd, 0x5f, 0x68, 0xcc, 0xf0, 0xda, 0x62, 0xb9, 0xdf, + 0xd2, 0x39, 0x90, 0x61, 0x26, 0x58, 0xc6, 0x69, 0x11, 0xae, 0x23, 0x3b, 0xd9, 0xd8, 0xee, 0x5d, + 0xfb, 0xb1, 0x9e, 0xfa, 0xc4, 0x4c, 0xf8, 0xf2, 0xd1, 0x19, 0xe8, 0x11, 0x91, 0xf3, 0x6d, 0x8e, + 0x2c, 0x0b, 0x16, 0xe7, 0xd4, 0x50, 0x1c, 0x19, 0x2a, 0x28, 0xcf, 0x41, 0x59, 0x23, 0x78, 0x61, + 0xd5, 0x55, 0x0e, 0x79, 0x44, 0x95, 0x73, 0x30, 0xcd, 0xa5, 0x0f, 0x40, 0x27, 0xf2, 0xe5, 0xad, + 0x19, 0x76, 0x18, 0xeb, 0x90, 0xe7, 0x4a, 0x03, 0xec, 0x13, 0x0f, 0x6e, 0x0e, 0x41, 0xa3, 0x76, + 0x1e, 0x3c, 0x86, 0xbd, 0x8d, 0x61, 0xad, 0xab, 0xca, 0x17, 0xb3, 0xea, 0x59, 0xbd, 0x96, 0x97, + 0x92, 0x8c, 0x7d, 0x46, 0xea, 0x11, 0xb0, 0xd5, 0xb2, 0x5c, 0x2d, 0x3f, 0x11, 0x48, 0x66, 0x92, + 0xac, 0x71, 0x4a, 0x46, 0x9b, 0xd0, 0x52, 0x8c, 0x37, 0x43, 0x77, 0x4b, 0x8a, 0x12, 0x4e, 0xe5, + 0x73, 0xfc, 0xa5, 0x0c, 0x3e, 0x36, 0xa0, 0xe8, 0xd7, 0x84, 0xb7, 0x07, 0x6d, 0xb1, 0x5c, 0xa8, + 0x48, 0xe6, 0x9c, 0x66, 0xcd, 0x1a, 0x58, 0xe2, 0x07, 0xad, 0x75, 0x21, 0x3e, 0x64, 0x73, 0xd3, + 0x78, 0x26, 0x61, 0xd9, 0xa7, 0xc2, 0x0a, 0xbf, 0x9a, 0xa2, 0x25, 0xe2, 0x80, 0xb9, 0xe3, 0xa1, + 0x81, 0x49, 0x27, 0xea, 0x66, 0xa0, 0x81, 0x0f, 0x55, 0x84, 0xe1, 0xbf, 0x9c, 0x11, 0x57, 0x94, + 0x2d, 0x71, 0xc3, 0xdf, 0x17, 0xe2, 0x53, 0x9b, 0xf1, 0xa0, 0xfa, 0x09, 0x21, 0xb4, 0x79, 0x9c, + 0xfd, 0x85, 0xee, 0xd8, 0x92, 0xc5, 0x92, 0x9c, 0x86, 0x8c, 0xce, 0x89, 0x04, 0xa9, 0xf1, 0x91, + 0x22, 0xfb, 0xcb, 0xcc, 0x47, 0xbf, 0xcb, 0x2c, 0xd1, 0xb1, 0x7b, 0x47, 0xe7, 0xd6, 0xa5, 0x3e, + 0x99, 0x49, 0x6b, 0x6c, 0x56, 0x82, 0xa1, 0xca, 0x41, 0xf9, 0x37, 0xc9, 0xde, 0x28, 0xa8, 0x18, + 0x0a, 0x4b, 0xc5, 0xeb, 0xcc, 0x08, 0xa1, 0xa5, 0xc5, 0xc8, 0x65, 0xf7, 0x60, 0x31, 0x9c, 0xc6, + 0xa3, 0x96, 0x11, 0x08, 0x27, 0x7c, 0xad, 0xf8, 0x0f, 0x5c, 0x16, 0x3e, 0x78, 0x42, 0xea, 0x05, + 0x66, 0x88, 0x70, 0x27, 0x6e, 0x93, 0x33, 0xd2, 0xf0, 0xc9, 0x85, 0xd1, 0x9f, 0x03, 0x5a, 0x04, + 0x91, 0x22, 0x88, 0x32, 0x56, 0x67, 0xeb, 0x6b, 0x33, 0xbf, 0x0e, 0x53, 0xac, 0x01, 0x06, 0xb9, + 0xe0, 0x44, 0x32, 0x9a, 0x06, 0x18, 0xef, 0x1a, 0xed, 0x7d, 0x7a, 0xfb, 0x25, 0x00, 0x06, 0x48, + 0xdd, 0x03, 0xb0, 0x7a, 0xde, 0xb4, 0xd8, 0xae, 0x5e, 0x67, 0xb3, 0x33, 0xdd, 0xd9, 0xd2, 0x18, + 0x32, 0x0c, 0xd1, 0xea, 0xc2, 0xc7, 0xf4, 0x15, 0x71, 0x10, 0x14, 0xc8, 0xf8, 0xab, 0xff, 0x9d, + 0xff, 0x85, 0x48, 0xc0, 0xa7, 0xcf, 0x53, 0x26, 0x4a, 0xc4, 0x11, 0x84, 0x9d, 0xd7, 0xa9, 0xdc, + 0x9b, 0xf2, 0x0b, 0x23, 0x94, 0xe7, 0xd9, 0x7a, 0xba, 0xa8, 0x78, 0xbf, 0x5c, 0x2d, 0xd0, 0x1b, + 0x3f, 0x06, 0xbd, 0xe7, 0xcc, 0x42, 0xcd, 0xd5, 0xc7, 0xaa, 0xad, 0x90, 0xc9, 0x4c, 0xe3, 0x12, + 0x08, 0xd9, 0x8c, 0x0f, 0x51, 0x3c, 0x6a, 0xc4, 0xd3, 0x18, 0xfa, 0xf6, 0x45, 0x6c, 0x91, 0xc1, + 0xfc, 0xcf, 0x75, 0xb4, 0x00, 0x4d, 0x32, 0x35, 0x4c, 0xae, 0x30, 0x4d, 0x59, 0x7d, 0x3a, 0x10, + 0x86, 0xe6, 0x08, 0xd5, 0x5b, 0xda, 0x8d, 0x81, 0x17, 0xd6, 0x7a, 0xbd, 0x7b, 0x3a, 0x3a, 0x58, + 0xe2, 0xe1, 0xa1, 0x05, 0x25, 0x05, 0x94, 0xa1, 0x98, 0xb1, 0xe1, 0xe2, 0x74, 0x7d, 0xfb, 0x12, + 0x1d, 0x89, 0x08, 0xe9, 0xb9, 0x76, 0xde, 0x3e, 0xd3, 0xa8, 0x34, 0xbe, 0xb7, 0x0e, 0xf2, 0x5e, + 0xbc, 0x4d, 0x1c, 0x23, 0x31, 0x8e, 0x56, 0x30, 0xc8, 0xc5, 0x76, 0x74, 0x94, 0x88, 0x76, 0xe5, + 0x2f, 0x40, 0x3c, 0x92, 0xc7, 0x8e, 0xd4, 0x1c, 0x4b, 0xab, 0xe1, 0x90, 0x15, 0xfb, 0x3a, 0xd3, + 0x82, 0xdb, 0x88, 0xb3, 0xca, 0x8d, 0x3d, 0x99, 0x9a, 0x32, 0xa6, 0xd7, 0xcf, 0x7e, 0x03, 0x1c, + 0xc2, 0x10, 0xa5, 0x81, 0x42, 0xac, 0x5c, 0xa4, 0xaf, 0x7e, 0x8a, 0xd8, 0xd5, 0x78, 0xea, 0x4b, + 0x39, 0x94, 0x81, 0xee, 0xac, 0x96, 0x12, 0xcf, 0x87, 0x3c, 0x0d, 0x3b, 0xd6, 0xd1, 0xda, 0x1e, + 0x37, 0x99, 0xdf, 0x12, 0xcf, 0x01, 0xc1, 0x1b, 0xc9, 0x06, 0x97, 0x40, 0x34, 0xe3, 0xcd, 0xa8, + 0x37, 0xd4, 0x30, 0x6c, 0xad, 0x12, 0xc3, 0xdb, 0xe4, 0x9b, 0x2a, 0xe6, 0x8f, 0x53, 0xcc, 0xaf, + 0x02, 0xe8, 0x42, 0xb9, 0xa1, 0x77, 0xdc, 0x3c, 0xed, 0xaa, 0xbc, 0x4b, 0xe4, 0x7d, 0x49, 0xa0, + 0x5e, 0xba, 0xa2, 0x15, 0x50, 0x54, 0xf6, 0xb3, 0x15, 0x67, 0x2d, 0xe0, 0xc7, 0x19, 0x9d, 0x74, + 0xa6, 0x94, 0xfc, 0xc2, 0x04, 0x4c, 0x83, 0x53, 0x3e, 0x61, 0x31, 0x01, 0xe8, 0x7b, 0x5e, 0x8c, + 0xfc, 0x4e, 0x4c, 0x14, 0x09, 0x94, 0xab, 0xd0, 0x3e, 0x69, 0xf3, 0xc5, 0x3d, 0x63, 0x22, 0x0d, + 0xab, 0xc6, 0x2a, 0x0b, 0x45, 0x92, 0xf0, 0x65, 0x66, 0x8e, 0x47, 0x15, 0xb4, 0x4b, 0xd4, 0xc3, + 0x57, 0xff, 0xfb, 0x7e, 0x08, 0xb8, 0xf9, 0x8f, 0x4d, 0xed, 0x55, 0x26, 0xd5, 0xfe, 0x9d, 0x7a, + 0xa8, 0x45, 0x53, 0x96, 0x90, 0x04, 0x44, 0x03, 0x15, 0x4c, 0x8a, 0x3c, 0xd7, 0xae, 0xf2, 0x16, + 0x9f, 0xe6, 0xef, 0x46, 0xf2, 0xf3, 0x8a, 0x98, 0x42, 0xcb, 0xe5, 0xc8, 0x54, 0xf2, 0x87, 0x3b, + 0xbd, 0x13, 0xac, 0x79, 0xbf, 0xfd, 0xed, 0x85, 0x5e, 0xec, 0x1d, 0xbc, 0x6e, 0xec, 0xd2, 0x89, + 0x65, 0x4e, 0xc6, 0xc2, 0x21, 0x1e, 0xb0, 0x9c, 0x55, 0xa6, 0xb2, 0xd2, 0x25, 0x47, 0x4a, 0x96, + 0x6e, 0x27, 0x5d, 0xb5, 0xe7, 0x62, 0xc0, 0x04, 0x69, 0x09, 0xd8, 0x31, 0xfe, 0xa6, 0x07, 0x28, + 0xcc, 0x7b, 0x41, 0x6d, 0xa2, 0x37, 0x88, 0xa0, 0xac, 0x85, 0x9d, 0xa2, 0xb5, 0x5b, 0xa6, 0xe3, + 0x75, 0xb1, 0xdb, 0x64, 0x07, 0x1f, 0x94, 0xe4, 0x8b, 0x09, 0x46, 0xbf, 0x7b, 0x30, 0x4c, 0x25, + 0xf5, 0xa6, 0x20, 0xb7, 0xff, 0xad, 0x03, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, + 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, + 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, + 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, + 0xff, 0x0a, 0xff, 0x1f, 0x1e, 0x09, 0x99, 0x26, 0x00, 0x8e, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA100_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36352, // uncompressed data size (bytes) + 24092, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA100_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA100("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/load/g_booteruc_load_ga100_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA100_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x4e, 0x00, 0x62, 0x3d, 0x08, 0x13, 0x4c, 0xc4, 0x43, 0x69, + 0x20, 0x00, 0x00, 0x87, 0x58, 0x20, 0x04, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA100_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA100_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA100("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/load/g_booteruc_load_ga100_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36352 +// COMPRESSED SIZE (bytes): 24090 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA100_image_prod_data[] = +{ + 0xed, 0xfc, 0x43, 0x90, 0x68, 0x3d, 0xdb, 0x00, 0xec, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0xd6, + 0x6e, 0xdb, 0xb6, 0x6d, 0xdb, 0xdd, 0xbb, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0xe6, 0x79, 0xbe, 0x53, + 0x75, 0x26, 0xef, 0xe4, 0x4c, 0xff, 0xbf, 0x6a, 0x5f, 0x93, 0xa0, 0x6a, 0x65, 0xa5, 0x6a, 0x25, + 0x2b, 0x83, 0x3b, 0x89, 0x24, 0x00, 0xc2, 0x4c, 0x06, 0x40, 0x1c, 0x00, 0x38, 0x40, 0xf7, 0x51, + 0xfe, 0x7b, 0xf3, 0x0f, 0xa0, 0x24, 0x00, 0xe2, 0xff, 0x55, 0x40, 0x80, 0x03, 0xfc, 0x7f, 0x8b, + 0x11, 0x00, 0x00, 0x80, 0x3f, 0xc9, 0x00, 0xbe, 0xf4, 0x00, 0x00, 0x6f, 0x40, 0x6f, 0x00, 0xef, + 0x40, 0x31, 0x00, 0x40, 0x00, 0xbd, 0x3b, 0xbb, 0xbf, 0xbf, 0xbf, 0xf0, 0x11, 0x00, 0x80, 0x00, + 0xbf, 0x79, 0x20, 0x8d, 0x47, 0x00, 0xcc, 0xe9, 0xeb, 0x80, 0x8d, 0xc9, 0x00, 0x34, 0xe9, 0xeb, + 0x40, 0xff, 0x25, 0xa4, 0xe9, 0xeb, 0xc0, 0xff, 0x25, 0x78, 0xff, 0x3d, 0x0b, 0x90, 0x9e, 0x07, + 0xdc, 0xbc, 0x0b, 0xdc, 0x9b, 0x97, 0xbf, 0x9b, 0x9e, 0x07, 0x98, 0x9e, 0x01, 0xd4, 0xbc, 0x0b, + 0xd8, 0xbc, 0x0e, 0xe4, 0xbb, 0x0b, 0x00, 0x00, 0xcf, 0x28, 0x09, 0x80, 0xf0, 0x9a, 0x07, 0xd0, + 0x9d, 0x1c, 0xf7, 0x5f, 0x93, 0xcd, 0xef, 0x01, 0xcd, 0xef, 0xfe, 0x11, 0x00, 0xc1, 0x20, 0xff, + 0x55, 0xc1, 0x03, 0xb6, 0x65, 0x00, 0xe0, 0x02, 0xfd, 0x5f, 0x0e, 0xf8, 0xbf, 0x1c, 0x20, 0xd0, + 0xff, 0xaf, 0x0e, 0xec, 0xff, 0x72, 0xff, 0x35, 0x0b, 0x94, 0x02, 0x0f, 0xb1, 0xf3, 0x7f, 0x5d, + 0xf8, 0x7e, 0x07, 0x8d, 0x00, 0xd0, 0x07, 0xf8, 0xce, 0x05, 0xe9, 0x8c, 0x07, 0x6a, 0x49, 0x46, + 0xf8, 0xaf, 0xfc, 0xf3, 0x01, 0xe0, 0xfb, 0x5f, 0xc7, 0xde, 0x80, 0xba, 0x8b, 0x3f, 0x01, 0xe3, + 0xfa, 0xc6, 0xa1, 0x23, 0x08, 0x68, 0x01, 0xf2, 0xf3, 0xde, 0x00, 0xe2, 0xfe, 0x4b, 0x7b, 0x7f, + 0x9f, 0x18, 0xbe, 0x7e, 0x24, 0x01, 0xf3, 0xf3, 0xf8, 0xde, 0x01, 0xfe, 0x4f, 0xf7, 0x83, 0x29, + 0x79, 0x3e, 0xa8, 0x47, 0x67, 0x41, 0xb7, 0x4a, 0xca, 0x26, 0x7c, 0x82, 0xa1, 0x40, 0x73, 0xcb, + 0x32, 0x4e, 0x28, 0x53, 0xe1, 0x37, 0xcc, 0x4f, 0x52, 0x49, 0xfd, 0x90, 0x68, 0xbc, 0xad, 0xb6, + 0xbd, 0x97, 0xe0, 0x70, 0x34, 0xd5, 0xa2, 0x19, 0xbc, 0xc3, 0x45, 0xa1, 0xb6, 0xbc, 0x42, 0x4a, + 0xda, 0x2e, 0x6a, 0x1f, 0x84, 0x43, 0x60, 0xfa, 0xcb, 0xc5, 0x47, 0x0a, 0x39, 0xe9, 0x23, 0xb9, + 0x95, 0xf3, 0xee, 0x64, 0x1d, 0xab, 0x5d, 0xfe, 0xf0, 0x79, 0x71, 0x01, 0x3d, 0x60, 0x39, 0xc8, + 0x15, 0x51, 0x21, 0xaf, 0x97, 0xf5, 0x8c, 0x33, 0x06, 0x24, 0xf1, 0x06, 0x64, 0x4c, 0x5d, 0x8b, + 0xf9, 0x41, 0x2f, 0x27, 0xb3, 0xe7, 0x5d, 0x47, 0x63, 0x30, 0x64, 0x1d, 0x65, 0xdd, 0x85, 0x64, + 0x6e, 0x07, 0x87, 0xff, 0x36, 0xe5, 0x80, 0xd1, 0x4f, 0x67, 0x30, 0x4f, 0xb2, 0xa9, 0x44, 0x02, + 0x57, 0xe6, 0xee, 0x02, 0xc0, 0x99, 0x1e, 0x44, 0x48, 0x55, 0xb6, 0x55, 0x34, 0xfb, 0xfb, 0x72, + 0xab, 0xc6, 0xeb, 0x24, 0x88, 0xa8, 0xc4, 0xd7, 0x92, 0xf7, 0x7c, 0xc9, 0x2e, 0x5b, 0xab, 0x2a, + 0x25, 0x28, 0x25, 0x95, 0x31, 0x46, 0xea, 0x39, 0xcc, 0x0b, 0xe2, 0x29, 0x94, 0x70, 0xca, 0x8c, + 0xb6, 0xaa, 0xe1, 0xe3, 0x87, 0x57, 0x9d, 0xa2, 0x7e, 0x26, 0x87, 0x5d, 0xe5, 0x22, 0x3e, 0xd0, + 0x84, 0x53, 0xfc, 0xf2, 0xc2, 0x18, 0x43, 0x41, 0x6f, 0x94, 0x1b, 0x1f, 0xc3, 0x98, 0x8c, 0xdc, + 0xd8, 0xb3, 0x23, 0xf2, 0x82, 0x73, 0x3f, 0xfc, 0xd0, 0x93, 0x02, 0x10, 0xb9, 0xde, 0x25, 0xde, + 0xe9, 0x8a, 0x3b, 0x13, 0x73, 0x9a, 0xe5, 0xda, 0xde, 0x6f, 0xce, 0x57, 0xbf, 0x77, 0xb5, 0xc4, + 0x13, 0xa2, 0x98, 0xac, 0x3d, 0xce, 0xc2, 0xa1, 0x1b, 0x85, 0x7b, 0xea, 0x4a, 0xf8, 0x8c, 0xd4, + 0x1e, 0xc7, 0x5d, 0x2f, 0x78, 0x01, 0x79, 0x07, 0xe8, 0x9e, 0x4f, 0x21, 0x10, 0x13, 0xcf, 0xe7, + 0xe6, 0x54, 0xf0, 0x64, 0x55, 0x5f, 0xe5, 0x97, 0x68, 0xdf, 0xd6, 0x66, 0xb0, 0x46, 0x5e, 0x9f, + 0xdb, 0x6b, 0x78, 0x9b, 0x97, 0x6f, 0x93, 0xea, 0xb7, 0xbe, 0x5f, 0xf9, 0x99, 0x57, 0x4f, 0xb9, + 0x6d, 0xd5, 0x6c, 0xfc, 0x55, 0x05, 0x32, 0x51, 0x46, 0x43, 0x6c, 0xeb, 0xcc, 0x85, 0x25, 0xfe, + 0x8e, 0x51, 0x63, 0x86, 0xf3, 0xfd, 0x19, 0x29, 0x85, 0xa6, 0x44, 0x1d, 0x5d, 0x62, 0x35, 0x09, + 0xed, 0x0b, 0xa2, 0x23, 0x9c, 0xe2, 0x47, 0xd2, 0x65, 0x09, 0x47, 0x4d, 0xeb, 0xa2, 0xb5, 0x09, + 0x18, 0xb3, 0xa7, 0x1c, 0x4b, 0xd5, 0x8e, 0x32, 0xf9, 0xee, 0xd7, 0x55, 0xbd, 0x57, 0xc1, 0x77, + 0x95, 0xbd, 0xdc, 0xe1, 0x78, 0x72, 0x57, 0xaf, 0x32, 0x83, 0x02, 0x20, 0xed, 0x42, 0x8b, 0x79, + 0x99, 0xb6, 0x32, 0x22, 0x12, 0xd1, 0x24, 0xfc, 0x1e, 0xc6, 0xfb, 0x27, 0xf0, 0xf0, 0xf9, 0xa0, + 0x45, 0xd1, 0x2c, 0x81, 0x4e, 0x17, 0xe4, 0x87, 0x64, 0x34, 0x20, 0x66, 0xa4, 0x5d, 0x99, 0xd2, + 0x53, 0x94, 0x3c, 0x8d, 0x49, 0x21, 0xe1, 0xfe, 0xb0, 0x00, 0x62, 0xde, 0x37, 0x2a, 0x1f, 0x8e, + 0xf6, 0x06, 0xba, 0xf8, 0x0a, 0x0e, 0xeb, 0x16, 0x16, 0xf3, 0x88, 0xf8, 0x99, 0xff, 0xa9, 0x36, + 0x1c, 0xb6, 0x49, 0xd6, 0x8d, 0x10, 0x7d, 0xb9, 0xee, 0x3b, 0xa6, 0xdc, 0x75, 0x9f, 0x2a, 0x79, + 0xfb, 0xc5, 0xfc, 0xa2, 0x71, 0xf1, 0xc0, 0x0c, 0x2c, 0x4a, 0xa4, 0xdc, 0xac, 0x75, 0xd9, 0x79, + 0x0e, 0xe6, 0xf5, 0x6f, 0x6e, 0x34, 0x61, 0xea, 0x2e, 0x55, 0x2b, 0x3f, 0x3f, 0x3f, 0xc9, 0x94, + 0x1f, 0x94, 0xde, 0x7b, 0x14, 0xd8, 0x92, 0x79, 0x08, 0xf4, 0x42, 0xee, 0xd5, 0x8a, 0x7e, 0x84, + 0x2f, 0xa5, 0x27, 0xd3, 0x83, 0x13, 0x29, 0xa7, 0x48, 0x46, 0x85, 0xdb, 0xe3, 0xdf, 0x21, 0x52, + 0xd1, 0x3a, 0xf2, 0x14, 0x77, 0x3a, 0x24, 0x29, 0xa1, 0x5e, 0xe8, 0xe1, 0xa5, 0x6d, 0xdc, 0x59, + 0x7d, 0xc6, 0x90, 0x65, 0xf3, 0xe2, 0xca, 0xbc, 0x69, 0xd7, 0x09, 0x73, 0x86, 0x19, 0xe2, 0xbd, + 0x41, 0xa3, 0xc6, 0xa9, 0x2f, 0xb0, 0x1b, 0xc8, 0x61, 0xae, 0x89, 0x7d, 0x9e, 0xb5, 0x39, 0x30, + 0x69, 0x2f, 0x21, 0x29, 0xea, 0xb2, 0x62, 0xa1, 0x3e, 0x55, 0x7e, 0xb9, 0x5b, 0x98, 0x21, 0x0b, + 0x78, 0x37, 0x1e, 0x1f, 0x17, 0x2d, 0x88, 0x46, 0xb9, 0x97, 0x12, 0xdb, 0xb7, 0x02, 0x6d, 0xa2, + 0x6a, 0xb6, 0xab, 0x55, 0x3e, 0x31, 0x47, 0x30, 0x35, 0x8e, 0x31, 0x48, 0x07, 0x5a, 0x24, 0xa6, + 0x01, 0xd3, 0x5d, 0xae, 0x29, 0xa6, 0x82, 0xf3, 0xc5, 0x37, 0x9d, 0x39, 0xe0, 0x32, 0x4d, 0x23, + 0x47, 0xa8, 0x4a, 0x23, 0x0b, 0xc7, 0x42, 0x64, 0xfc, 0x82, 0x76, 0xea, 0x8d, 0x1f, 0xa6, 0xd8, + 0xfa, 0x1e, 0x89, 0xd7, 0x88, 0x4d, 0xe6, 0xc6, 0x7a, 0x48, 0x4c, 0x19, 0x47, 0x98, 0x35, 0x82, + 0x2d, 0xfc, 0x90, 0x7d, 0x9e, 0xb2, 0x04, 0xf5, 0x3f, 0x6b, 0xe3, 0xdc, 0x2f, 0xe2, 0x36, 0x80, + 0x84, 0xbd, 0x16, 0x26, 0xc7, 0x5a, 0x2f, 0xd7, 0x80, 0x33, 0xd9, 0x7a, 0xf9, 0xd2, 0x4a, 0x4e, + 0x62, 0xd3, 0x6e, 0x9a, 0x2b, 0x3c, 0x93, 0x3a, 0x42, 0x62, 0xd3, 0x11, 0x33, 0xb2, 0x26, 0xaa, + 0xd9, 0x0a, 0x55, 0x3e, 0xb2, 0xe7, 0x62, 0xca, 0x65, 0x47, 0xa3, 0xfa, 0x8e, 0xc0, 0x44, 0x90, + 0x5f, 0x93, 0xf0, 0x78, 0xf1, 0x2e, 0x37, 0x8e, 0x97, 0xb4, 0x98, 0x96, 0xb9, 0xf9, 0x7d, 0xcc, + 0x35, 0x93, 0x37, 0x40, 0x62, 0x99, 0xe2, 0x3b, 0x9a, 0x33, 0x34, 0xaa, 0xd5, 0x1c, 0x7a, 0x61, + 0x69, 0x80, 0x29, 0xee, 0x0e, 0x25, 0x30, 0x71, 0xf4, 0x36, 0x6c, 0xc3, 0xe5, 0xd2, 0xbc, 0xe4, + 0x2f, 0xa4, 0xc4, 0x7e, 0x2d, 0x6a, 0xf2, 0x1d, 0x36, 0x61, 0xad, 0xe1, 0xe1, 0x00, 0x00, 0x30, + 0x3e, 0xdf, 0x90, 0xbd, 0xa8, 0xf1, 0x0b, 0x33, 0xd8, 0xb2, 0x58, 0xb7, 0x72, 0x0a, 0x1f, 0x39, + 0xc0, 0x00, 0x23, 0x77, 0x7a, 0x31, 0x3f, 0x71, 0xc0, 0x42, 0xc2, 0x5c, 0x64, 0x62, 0x2d, 0xd6, + 0xec, 0x1f, 0xda, 0xb8, 0x9f, 0x19, 0x4d, 0xd3, 0x4d, 0xd5, 0xb7, 0x39, 0x69, 0x6d, 0x29, 0xdc, + 0x76, 0x52, 0x3f, 0x4b, 0x24, 0x70, 0x09, 0x35, 0x8c, 0x63, 0xdc, 0xf1, 0x8c, 0xb4, 0x23, 0x43, + 0xc9, 0xc2, 0xc1, 0x91, 0x4c, 0xf7, 0x1f, 0xcb, 0x10, 0x0c, 0xa4, 0x53, 0x55, 0x77, 0xdf, 0x07, + 0x31, 0x13, 0x44, 0x76, 0xab, 0x66, 0x08, 0xc8, 0xa4, 0x82, 0xd6, 0xbf, 0x52, 0x6e, 0x7f, 0x52, + 0x38, 0xfb, 0xa2, 0x2f, 0x01, 0x4e, 0x37, 0x6b, 0x78, 0x5a, 0xee, 0x11, 0x8c, 0x14, 0x7d, 0xa1, + 0xd7, 0xd6, 0xe2, 0x0a, 0x7b, 0x46, 0x7c, 0xca, 0x29, 0x19, 0x3b, 0xcc, 0x05, 0x51, 0x10, 0xa6, + 0x04, 0x61, 0x2f, 0x1b, 0x03, 0x97, 0x08, 0xf7, 0x88, 0x21, 0xd0, 0xf3, 0x01, 0x45, 0x39, 0x36, + 0x28, 0xe7, 0x93, 0x17, 0x2f, 0x09, 0xbf, 0xde, 0xf8, 0xad, 0x99, 0xbf, 0xbb, 0x29, 0x32, 0xaa, + 0x6a, 0x9c, 0x8e, 0x84, 0xc2, 0x40, 0xf8, 0x5b, 0x5a, 0x26, 0x48, 0x7d, 0x0e, 0x0d, 0x77, 0xd1, + 0xf4, 0x83, 0x76, 0x2f, 0x8d, 0x77, 0x5a, 0xb7, 0x7f, 0x0d, 0xef, 0x64, 0x97, 0x8e, 0xcc, 0x0c, + 0xb3, 0xa1, 0xa3, 0x5a, 0xee, 0x5b, 0x08, 0x1b, 0x23, 0x23, 0x2c, 0x1a, 0x54, 0x59, 0xda, 0x70, + 0x18, 0xe7, 0x86, 0xe3, 0x38, 0xbb, 0xe6, 0x83, 0x36, 0xb6, 0xbd, 0x2d, 0x05, 0x48, 0x1a, 0x8e, + 0x0e, 0x61, 0x61, 0xbb, 0x3d, 0x45, 0xc9, 0x31, 0xcd, 0x41, 0x84, 0xd9, 0x9b, 0x20, 0xf4, 0x75, + 0xbe, 0xc4, 0x09, 0x8c, 0x0a, 0x8a, 0xf4, 0x0c, 0x1c, 0xdc, 0x15, 0x97, 0x19, 0xa4, 0x35, 0x72, + 0x76, 0x90, 0x23, 0x5f, 0x40, 0x43, 0xe4, 0xd9, 0x2d, 0xb7, 0x98, 0x5e, 0xdb, 0xd4, 0xf4, 0x88, + 0x31, 0x46, 0x14, 0x06, 0x36, 0x99, 0xea, 0xbb, 0x2b, 0x5a, 0xd0, 0xee, 0x93, 0x2f, 0x12, 0xe8, + 0xf9, 0x1e, 0x7f, 0x01, 0x6f, 0x21, 0x93, 0xd2, 0xfb, 0x4e, 0x65, 0xa1, 0x6c, 0x10, 0x31, 0xe4, + 0xc8, 0xea, 0x76, 0x2e, 0x0a, 0xb7, 0x1c, 0x24, 0x89, 0x03, 0xe3, 0xc6, 0x4b, 0xac, 0xe6, 0xaa, + 0x7a, 0x8d, 0x02, 0xf3, 0x33, 0x03, 0x3e, 0x5f, 0x09, 0x32, 0xc2, 0xe4, 0x27, 0x3f, 0x62, 0x1d, + 0xcf, 0xce, 0xd6, 0xa1, 0x95, 0xcb, 0xf0, 0xa0, 0x54, 0xd1, 0x4c, 0x4b, 0x02, 0x9c, 0xe8, 0x71, + 0xfc, 0x0a, 0x19, 0xa0, 0xa2, 0x60, 0x61, 0x72, 0xa0, 0x77, 0x2e, 0x91, 0xfc, 0xf6, 0x00, 0x5f, + 0x90, 0xa4, 0x3a, 0x7b, 0x15, 0xd4, 0x89, 0x81, 0x17, 0xe9, 0x21, 0x8d, 0x20, 0xbf, 0xe8, 0xd9, + 0xeb, 0x5d, 0x64, 0x61, 0x96, 0x0c, 0x8e, 0x1c, 0x05, 0x0b, 0x14, 0xea, 0x36, 0xde, 0x0e, 0x17, + 0x1a, 0xa6, 0xc8, 0x9d, 0xa6, 0x95, 0x5d, 0xc9, 0xcf, 0xaf, 0xb8, 0x2b, 0xdd, 0x14, 0xdd, 0x07, + 0x5c, 0xab, 0x28, 0xf5, 0xfb, 0xe2, 0x08, 0xa6, 0x36, 0x71, 0xea, 0x14, 0x88, 0xc5, 0x9e, 0x47, + 0x12, 0x23, 0x54, 0x44, 0x77, 0x94, 0x5f, 0xdd, 0xbf, 0xf9, 0xb4, 0x68, 0x5d, 0x45, 0x07, 0xc0, + 0xb3, 0xfe, 0xe6, 0x03, 0xbf, 0x85, 0xf0, 0x98, 0x69, 0x1f, 0xa2, 0x0b, 0x39, 0xeb, 0x32, 0xb2, + 0xa9, 0xb4, 0x94, 0x0f, 0x68, 0x99, 0xa4, 0x8c, 0xd2, 0x3c, 0xa4, 0x35, 0xe5, 0xf9, 0x34, 0xa5, + 0x11, 0xcb, 0xc1, 0x43, 0x54, 0x28, 0xf6, 0x56, 0xb1, 0x18, 0x3f, 0xa6, 0xf0, 0xf5, 0x13, 0x0f, + 0xd9, 0x3a, 0x0f, 0xad, 0xda, 0x43, 0x71, 0x14, 0x16, 0x43, 0xd7, 0xb4, 0xb7, 0xe5, 0xb1, 0x0e, + 0x93, 0xf8, 0x9a, 0x02, 0x1a, 0xcd, 0x1b, 0x05, 0x49, 0xe6, 0x77, 0x6d, 0xd7, 0x74, 0xbe, 0xac, + 0x9d, 0x28, 0xca, 0x89, 0x92, 0x02, 0xc8, 0x66, 0xe1, 0x71, 0x44, 0xac, 0x56, 0x5a, 0x54, 0x49, + 0xe3, 0x62, 0xf0, 0x26, 0x02, 0x02, 0x09, 0x86, 0xd3, 0x64, 0x5d, 0x9f, 0x37, 0x84, 0x7e, 0xd9, + 0x4b, 0x9a, 0x56, 0x35, 0x25, 0x17, 0x1c, 0xca, 0x05, 0xa0, 0xdf, 0x10, 0xb6, 0x17, 0x38, 0x2f, + 0x2a, 0xbe, 0x0b, 0x32, 0x0a, 0xeb, 0x42, 0x2c, 0x4f, 0x51, 0x8b, 0x62, 0x6a, 0xc0, 0x47, 0x7f, + 0xd9, 0x90, 0xf6, 0x34, 0x80, 0x73, 0x81, 0x10, 0x8c, 0x06, 0xb7, 0xe0, 0x70, 0x46, 0x45, 0xe6, + 0x69, 0x04, 0x9b, 0xa9, 0x64, 0xd7, 0x42, 0x21, 0xb8, 0x70, 0x9f, 0x1a, 0x09, 0xad, 0x66, 0x01, + 0xf7, 0x16, 0xb4, 0xc9, 0xd2, 0xd9, 0x8a, 0x88, 0x50, 0x06, 0x67, 0x7c, 0xb5, 0x54, 0x70, 0x10, + 0xb9, 0xd0, 0xc8, 0x88, 0x5d, 0x81, 0x5c, 0xfc, 0x61, 0xe7, 0x0f, 0xb1, 0x0e, 0x29, 0x0b, 0x50, + 0xf7, 0x19, 0xf9, 0xbc, 0x79, 0x6b, 0x2b, 0x87, 0x98, 0x58, 0x7a, 0xa3, 0xdf, 0x6a, 0xfe, 0xac, + 0xf6, 0xc6, 0xd5, 0xfa, 0xeb, 0xa7, 0x94, 0x15, 0x4e, 0xc7, 0x22, 0x1e, 0xf0, 0x83, 0xe6, 0x1d, + 0x46, 0xeb, 0x06, 0x22, 0x58, 0x89, 0x46, 0x8c, 0x8b, 0xa4, 0xbb, 0xf3, 0x41, 0x23, 0x6e, 0x7b, + 0x37, 0xe6, 0xb3, 0xc2, 0x35, 0xc5, 0x7c, 0xad, 0x01, 0x28, 0xd6, 0xc7, 0x5f, 0x08, 0xfc, 0xa0, + 0xc6, 0x70, 0x45, 0xcc, 0xaa, 0x56, 0xf0, 0x06, 0x48, 0x6e, 0x34, 0x05, 0x32, 0xa5, 0x63, 0x2f, + 0x3f, 0x30, 0x5f, 0x18, 0x11, 0xcd, 0xb9, 0xad, 0x32, 0x0d, 0xd7, 0xe7, 0x9b, 0x87, 0x24, 0xdb, + 0x51, 0x77, 0x0b, 0xbb, 0x18, 0x10, 0xfa, 0xec, 0x5d, 0xda, 0x44, 0x8b, 0xd8, 0xaf, 0xdb, 0x03, + 0xa0, 0x21, 0xe4, 0x16, 0xaf, 0xef, 0x76, 0x81, 0xcd, 0xf0, 0x52, 0x63, 0x81, 0xcf, 0x46, 0x4f, + 0xd9, 0x9a, 0x2e, 0x21, 0x3a, 0x73, 0x0f, 0x2a, 0xad, 0xcc, 0x71, 0xfc, 0x2e, 0x91, 0x69, 0x77, + 0xb3, 0xcc, 0x31, 0x6c, 0x7c, 0xf1, 0x25, 0xa4, 0x56, 0x4b, 0xd3, 0xad, 0xbe, 0x60, 0x9f, 0xad, + 0x12, 0xc9, 0x73, 0x04, 0x3d, 0x98, 0x26, 0xc7, 0xe3, 0x21, 0xc7, 0xcd, 0xb6, 0x02, 0xa1, 0xc8, + 0x4e, 0x90, 0x55, 0xc5, 0x64, 0xb9, 0x75, 0x18, 0x02, 0x71, 0x1a, 0xde, 0x95, 0x5a, 0x0c, 0x4c, + 0x96, 0xa7, 0x62, 0x34, 0xab, 0xf3, 0x36, 0x01, 0x3a, 0x8c, 0x57, 0x51, 0xf4, 0x30, 0x1a, 0xd1, + 0x66, 0xd6, 0xfb, 0x6c, 0xeb, 0xae, 0xff, 0x62, 0x72, 0x34, 0xa6, 0x46, 0xb4, 0x03, 0xb5, 0x83, + 0xac, 0x0c, 0x29, 0xef, 0xe3, 0x9e, 0x50, 0x76, 0x82, 0x61, 0xb6, 0x40, 0x0f, 0x54, 0xd7, 0x18, + 0x5b, 0x3c, 0x1e, 0xa3, 0x68, 0x42, 0x76, 0x34, 0xfd, 0x03, 0x41, 0x29, 0x85, 0x63, 0xb9, 0x9e, + 0x20, 0x9e, 0xfb, 0x90, 0x10, 0x70, 0x42, 0x7b, 0x10, 0x91, 0x7d, 0x87, 0x25, 0x06, 0xa0, 0xc0, + 0x4d, 0x94, 0x2b, 0x3e, 0xb9, 0x97, 0x83, 0x92, 0x2a, 0x85, 0x75, 0x91, 0xe2, 0x3c, 0x74, 0x52, + 0x7c, 0x94, 0xbf, 0x9c, 0x84, 0xc7, 0x68, 0x9c, 0xbb, 0x65, 0x60, 0x90, 0x9f, 0xc7, 0x5a, 0xa6, + 0x5f, 0x6e, 0xf2, 0x0a, 0x98, 0x58, 0x12, 0x0c, 0xc5, 0xe0, 0x49, 0x6d, 0x1e, 0x24, 0xd4, 0xab, + 0xd9, 0x52, 0x46, 0xb9, 0x60, 0xd7, 0x6e, 0x12, 0x31, 0x0b, 0x79, 0x00, 0x43, 0x54, 0x3d, 0x90, + 0x06, 0x8f, 0x88, 0xc7, 0xde, 0xa9, 0x7a, 0xd0, 0x1c, 0xd8, 0xb3, 0x94, 0x03, 0x3a, 0x2d, 0x4b, + 0xb3, 0x2c, 0xa7, 0xdf, 0xcb, 0xa3, 0x40, 0xf7, 0x08, 0xc5, 0xef, 0x80, 0x56, 0x16, 0xc2, 0x4f, + 0x4e, 0x79, 0x94, 0x2e, 0xed, 0xc4, 0x37, 0xb8, 0xd4, 0xaf, 0xb8, 0x90, 0x1f, 0x74, 0xd8, 0x94, + 0x40, 0xca, 0xe1, 0x68, 0x96, 0xd0, 0xf1, 0x7e, 0xe6, 0x32, 0x10, 0xc0, 0x77, 0x4e, 0x73, 0xca, + 0x06, 0xd6, 0x97, 0x32, 0xaf, 0x69, 0x50, 0xfb, 0xea, 0xbf, 0xaa, 0x46, 0xa9, 0x76, 0xfc, 0x78, + 0xe9, 0x34, 0xf7, 0x89, 0x12, 0xeb, 0xb1, 0x3e, 0xdc, 0xe2, 0x3a, 0x4f, 0x13, 0x7b, 0x87, 0xe6, + 0x1d, 0x1b, 0xf6, 0xae, 0x19, 0x52, 0xbb, 0x32, 0xc1, 0x50, 0x33, 0xde, 0x97, 0x4e, 0x18, 0x76, + 0x34, 0xae, 0x4e, 0x26, 0x56, 0x61, 0x4c, 0x39, 0x94, 0xbb, 0x91, 0x80, 0xdd, 0xcc, 0x4a, 0x22, + 0xa5, 0x5b, 0xb6, 0x88, 0xa7, 0xbd, 0x95, 0x1c, 0xe2, 0x78, 0x94, 0xd6, 0xaa, 0x3e, 0x0d, 0xdc, + 0x30, 0x93, 0x75, 0x4f, 0xb7, 0xb0, 0x87, 0xac, 0x7d, 0x86, 0x0a, 0xb8, 0x6a, 0x79, 0x62, 0x6c, + 0x09, 0x0c, 0x0b, 0x74, 0x9d, 0x08, 0x57, 0xb0, 0xff, 0xc2, 0xa0, 0x5e, 0xb2, 0x74, 0x51, 0x08, + 0xb9, 0xf2, 0x42, 0x29, 0x4f, 0x25, 0x1f, 0x86, 0x93, 0x2b, 0x92, 0x32, 0x40, 0xf2, 0x53, 0xa0, + 0x77, 0x2a, 0xad, 0x50, 0x77, 0xa8, 0x82, 0xe6, 0x2e, 0x04, 0x85, 0x4b, 0xa0, 0x18, 0xce, 0xf1, + 0xdc, 0x42, 0x2f, 0x87, 0x46, 0x3b, 0x73, 0xaa, 0x5e, 0xb5, 0xb6, 0xd9, 0x0c, 0x00, 0x78, 0x13, + 0xda, 0x9d, 0x14, 0xb4, 0x00, 0x20, 0x7c, 0xec, 0x5b, 0x16, 0x96, 0x44, 0xca, 0x29, 0x10, 0x78, + 0x87, 0x9f, 0xb5, 0x3c, 0x05, 0x85, 0x36, 0xf1, 0x48, 0x9c, 0x27, 0x25, 0xdc, 0x99, 0x4b, 0x20, + 0x59, 0x88, 0xf8, 0x84, 0xb3, 0xc1, 0x6b, 0x7c, 0x03, 0x1f, 0x14, 0x29, 0xc4, 0xf1, 0xa2, 0x86, + 0xf6, 0xba, 0x43, 0x99, 0xee, 0xcb, 0x4b, 0x87, 0xcc, 0xd6, 0x18, 0xb2, 0xb8, 0x4a, 0x67, 0x81, + 0x6b, 0x59, 0x30, 0x82, 0x51, 0xfc, 0xb3, 0x50, 0x42, 0xc9, 0x86, 0xd5, 0x13, 0x08, 0xa6, 0xfd, + 0x76, 0x7d, 0x7f, 0xec, 0xe7, 0x5c, 0xca, 0x72, 0x0f, 0xa9, 0x92, 0x7d, 0xda, 0xb4, 0x0b, 0x57, + 0x6e, 0x67, 0x6a, 0xa5, 0xa3, 0x9a, 0x45, 0x39, 0x3d, 0xae, 0x05, 0x3e, 0x30, 0xfc, 0x54, 0x86, + 0x26, 0x2a, 0xb0, 0xcc, 0xf2, 0xc8, 0xc9, 0xa4, 0x04, 0xce, 0x03, 0x41, 0x0f, 0x98, 0x88, 0x0e, + 0x9c, 0x06, 0x4b, 0x57, 0xc1, 0x19, 0x6c, 0xbc, 0x27, 0x01, 0xea, 0x06, 0x01, 0x77, 0x50, 0xe1, + 0x01, 0x05, 0xf8, 0x38, 0xdc, 0x06, 0xa9, 0x18, 0x2c, 0x7e, 0x27, 0x21, 0xeb, 0x64, 0x3c, 0xb7, + 0x2f, 0x80, 0xa9, 0xf4, 0xab, 0x0d, 0xef, 0xfc, 0x47, 0x90, 0xd6, 0x85, 0xc4, 0x3d, 0xc5, 0xfd, + 0xaa, 0xfa, 0x06, 0x26, 0x76, 0x72, 0x11, 0x48, 0xec, 0xea, 0xcb, 0xf5, 0xec, 0x69, 0xd9, 0x56, + 0xe9, 0xec, 0xbd, 0xcf, 0xe5, 0xb4, 0xb4, 0xd1, 0x40, 0xc2, 0x38, 0xc5, 0x33, 0x94, 0x46, 0x1d, + 0xe8, 0x97, 0x73, 0xa0, 0x5c, 0xb2, 0x58, 0x40, 0xe6, 0x31, 0x2e, 0x74, 0x70, 0x3b, 0x97, 0x62, + 0xb8, 0x3a, 0x78, 0x32, 0x19, 0x80, 0x22, 0x40, 0x70, 0x10, 0x2e, 0xb9, 0x01, 0x27, 0x78, 0xff, + 0x0b, 0xad, 0xa6, 0xfc, 0xf0, 0x28, 0x94, 0xcc, 0x4f, 0x3b, 0x38, 0x80, 0xf5, 0x54, 0x13, 0xf4, + 0xfc, 0x81, 0x72, 0xc1, 0x6b, 0x13, 0x87, 0x98, 0x9d, 0x61, 0xe8, 0xe7, 0xba, 0x4e, 0xac, 0xdc, + 0x06, 0x3c, 0x4c, 0x87, 0xfa, 0xd5, 0xd8, 0x2f, 0xee, 0x87, 0x58, 0x03, 0x5d, 0x68, 0xb0, 0xd3, + 0x3a, 0xbd, 0x82, 0x7c, 0x71, 0x8a, 0x9d, 0x3c, 0x74, 0x78, 0x9f, 0xed, 0x4f, 0xee, 0xe5, 0x1e, + 0x02, 0xb2, 0x50, 0xbd, 0x30, 0x89, 0x57, 0x21, 0x7e, 0xe5, 0x6f, 0x9e, 0xc1, 0x2e, 0x5c, 0x85, + 0x71, 0x9c, 0x68, 0xe5, 0x03, 0xd0, 0x42, 0x95, 0x14, 0xd6, 0x2f, 0x2e, 0x3a, 0x28, 0x40, 0xa0, + 0x97, 0xcd, 0xdc, 0xd2, 0xba, 0xb8, 0x99, 0xb4, 0xf5, 0x3a, 0xc8, 0x37, 0x83, 0xd2, 0xc8, 0x5e, + 0xa4, 0xb0, 0x50, 0x79, 0xfb, 0x74, 0xde, 0x9a, 0xb2, 0xf6, 0xa1, 0x77, 0x2d, 0xc1, 0x59, 0xba, + 0x99, 0x79, 0xe6, 0x87, 0x90, 0x97, 0x52, 0x30, 0x9b, 0x61, 0xf1, 0xb3, 0x96, 0x7c, 0x5c, 0x4a, + 0xcd, 0x9f, 0xd3, 0xf5, 0x25, 0x41, 0xe9, 0x61, 0x72, 0xc7, 0x03, 0x26, 0x4b, 0x14, 0x59, 0xc2, + 0x1b, 0xd4, 0x3f, 0x11, 0x4e, 0x86, 0x87, 0xd9, 0xe1, 0x46, 0xa9, 0xf4, 0x0b, 0x90, 0x57, 0xef, + 0x22, 0x63, 0x24, 0x71, 0xf3, 0x5a, 0x21, 0xec, 0x8b, 0xa4, 0x5d, 0x51, 0x61, 0xbf, 0x72, 0x30, + 0xfa, 0x7d, 0x0c, 0xb4, 0x5d, 0x76, 0x9e, 0xf4, 0xdb, 0xab, 0xa3, 0x55, 0xce, 0xe8, 0xbd, 0xd5, + 0x94, 0xf0, 0x20, 0xf6, 0xc0, 0x22, 0x42, 0xd0, 0x99, 0x56, 0xba, 0x1d, 0xd0, 0xec, 0x3e, 0x80, + 0x6d, 0x16, 0x14, 0x3c, 0x9b, 0xc9, 0x5b, 0xa9, 0x20, 0x42, 0x3d, 0xec, 0x67, 0xc8, 0x7b, 0x35, + 0xd6, 0xa1, 0xa2, 0x2e, 0xf6, 0x42, 0x3b, 0x88, 0xe1, 0xcc, 0xb7, 0xd2, 0xaf, 0x59, 0xa5, 0xe4, + 0xa0, 0x0c, 0xce, 0x9e, 0x7e, 0xa4, 0x8b, 0xb0, 0xca, 0xb7, 0x30, 0x0c, 0x4a, 0x28, 0x95, 0x72, + 0x3a, 0x7e, 0x54, 0x76, 0x7b, 0x1e, 0x2b, 0xe6, 0x81, 0xcb, 0x62, 0x9e, 0x2b, 0xc9, 0xd9, 0xb9, + 0x8f, 0xde, 0x6e, 0x85, 0x91, 0x9c, 0xa0, 0xf5, 0x7c, 0x4f, 0xd2, 0x9e, 0x4b, 0x3c, 0x5d, 0x63, + 0xe4, 0xd9, 0x27, 0xa3, 0x72, 0xa9, 0x82, 0x42, 0x74, 0xeb, 0xc2, 0x7b, 0x56, 0x28, 0x2b, 0x1f, + 0x82, 0xa1, 0x35, 0x2c, 0x8d, 0x44, 0x1c, 0x71, 0xed, 0x54, 0xc6, 0xc2, 0xec, 0x61, 0x7f, 0x6c, + 0xd8, 0x89, 0x33, 0xbc, 0x66, 0x96, 0x68, 0x44, 0x9f, 0x5e, 0xe2, 0x96, 0x48, 0x7d, 0x58, 0x9a, + 0x30, 0x66, 0x3f, 0x08, 0xa1, 0x3c, 0x01, 0xf3, 0x10, 0x70, 0x05, 0xf2, 0x9f, 0x29, 0xa8, 0x84, + 0x33, 0x88, 0xf7, 0xb4, 0x21, 0x82, 0x75, 0x47, 0xfb, 0x1b, 0xcb, 0x8a, 0xf2, 0xde, 0xb3, 0xc1, + 0xca, 0xc9, 0xcc, 0xe3, 0x0a, 0x50, 0x66, 0x0a, 0x15, 0xe7, 0x90, 0x37, 0xf3, 0x35, 0x4e, 0xe0, + 0xbd, 0x8e, 0xe8, 0x69, 0x39, 0x91, 0x17, 0x0a, 0x15, 0x99, 0x2e, 0xd0, 0x37, 0xa9, 0x05, 0x0a, + 0x68, 0x6e, 0xb3, 0xea, 0xe9, 0x08, 0x76, 0x06, 0xc9, 0x22, 0x58, 0x06, 0x40, 0x30, 0x58, 0xc7, + 0x09, 0x50, 0xd9, 0x37, 0x44, 0x83, 0x24, 0x7e, 0x25, 0x58, 0x0d, 0xf7, 0xf3, 0xe5, 0x75, 0xb3, + 0xb8, 0x46, 0x39, 0xef, 0x6d, 0xc4, 0x10, 0x7a, 0x26, 0x28, 0xfd, 0xd1, 0x06, 0xe9, 0x64, 0x0e, + 0x04, 0x76, 0x86, 0x6f, 0x93, 0x40, 0xd9, 0xf5, 0x6c, 0xd3, 0xa6, 0x1f, 0x54, 0x90, 0xf7, 0xd1, + 0x2d, 0x8c, 0x18, 0x42, 0x2a, 0x4f, 0x5b, 0xd2, 0xc4, 0xbd, 0x36, 0xec, 0x85, 0xad, 0x33, 0x77, + 0xad, 0x91, 0xfc, 0xec, 0x7e, 0xcf, 0x0d, 0xcf, 0x8e, 0x12, 0x55, 0x40, 0x38, 0xfa, 0xe6, 0x33, + 0xfa, 0xd6, 0xda, 0xb5, 0xb2, 0x55, 0x0b, 0x7b, 0xc4, 0xbe, 0x1a, 0x28, 0xef, 0x1a, 0xf7, 0x9b, + 0xe8, 0x82, 0x2f, 0x3a, 0xdb, 0xb7, 0xaf, 0x09, 0x8c, 0x2b, 0x48, 0x3e, 0x28, 0x46, 0x30, 0x38, + 0x22, 0x7e, 0x14, 0x56, 0x10, 0xd3, 0x88, 0x30, 0x80, 0xd1, 0x66, 0x31, 0x5f, 0x3d, 0xeb, 0x6d, + 0xb6, 0x3c, 0xe9, 0xf8, 0xd1, 0xd0, 0x36, 0xc4, 0x7b, 0x11, 0xcd, 0x32, 0x4b, 0x1a, 0x7e, 0xfe, + 0x0a, 0x37, 0x8a, 0xd6, 0x2f, 0x6e, 0xf8, 0x12, 0x31, 0x05, 0x35, 0x9c, 0x7b, 0x04, 0x1b, 0xbb, + 0x13, 0x8f, 0xa1, 0x1e, 0xc2, 0x10, 0xad, 0x7f, 0x93, 0xef, 0x31, 0x0e, 0xcd, 0x6c, 0xe7, 0x5c, + 0xba, 0x11, 0x37, 0x69, 0x26, 0xbb, 0x74, 0x90, 0x43, 0x65, 0xc2, 0xc7, 0x60, 0x67, 0xd5, 0xbf, + 0x70, 0xc8, 0x1f, 0x3e, 0xd7, 0x2a, 0x25, 0x18, 0x0a, 0x23, 0xfb, 0x6f, 0xed, 0xa9, 0x06, 0x4d, + 0x86, 0xd1, 0x6f, 0x44, 0x08, 0xed, 0xb7, 0x73, 0x69, 0xcd, 0x77, 0xdb, 0x07, 0x18, 0xce, 0x79, + 0xbe, 0x65, 0x61, 0xca, 0x76, 0xc4, 0x10, 0x70, 0x49, 0x59, 0x7d, 0xec, 0x9c, 0x6e, 0xb1, 0x56, + 0x91, 0xa8, 0x4d, 0x1b, 0x4b, 0x67, 0x67, 0xf4, 0x2b, 0x19, 0x14, 0xb8, 0xa4, 0x70, 0x28, 0x31, + 0xd9, 0xd3, 0xaa, 0x00, 0xd5, 0x85, 0x7b, 0x94, 0xd2, 0xfb, 0x23, 0xa1, 0x37, 0xb7, 0xfe, 0x48, + 0x6d, 0xd8, 0xa0, 0x3e, 0xf6, 0xc5, 0x8b, 0x01, 0x63, 0xab, 0xb2, 0x52, 0xb4, 0xbe, 0x16, 0x11, + 0x7e, 0x52, 0x6e, 0x59, 0xd4, 0xd9, 0xe8, 0x19, 0xca, 0x8b, 0xbc, 0xfa, 0x69, 0x62, 0x0d, 0x18, + 0x7c, 0x39, 0xeb, 0x96, 0x31, 0xc2, 0xb5, 0xc1, 0x7d, 0x0a, 0x38, 0xd7, 0x76, 0x40, 0xcb, 0x1e, + 0x42, 0xd7, 0xc1, 0xd3, 0x1f, 0x89, 0x27, 0xd2, 0x60, 0xb6, 0x21, 0xa3, 0x15, 0x36, 0xd5, 0xf1, + 0x1e, 0xaa, 0xb9, 0x7c, 0x4f, 0x33, 0x83, 0xb1, 0x2f, 0x43, 0x17, 0xf1, 0x01, 0x47, 0xdb, 0x08, + 0xa1, 0xc6, 0xc2, 0x61, 0x77, 0x37, 0x20, 0x4e, 0xee, 0xc0, 0xa6, 0x5e, 0x05, 0xc4, 0x15, 0xbb, + 0xcf, 0x7b, 0x9e, 0xd0, 0x15, 0xb8, 0x35, 0xc2, 0x1b, 0xe3, 0x23, 0xaf, 0xc4, 0x77, 0xb5, 0x8f, + 0xa1, 0x7e, 0x79, 0xd0, 0xce, 0x0a, 0xc1, 0x5f, 0xeb, 0xee, 0x32, 0x14, 0x3b, 0x0e, 0x2e, 0x43, + 0xeb, 0xbd, 0x90, 0x2c, 0x09, 0xc2, 0x00, 0x4e, 0x55, 0x57, 0x64, 0xf7, 0x63, 0x4f, 0x37, 0xbd, + 0xc1, 0x78, 0x28, 0x93, 0x4e, 0x62, 0xd9, 0x4b, 0xa5, 0xbc, 0xd5, 0xb6, 0x7c, 0xef, 0x5e, 0xb3, + 0x39, 0x5c, 0xf6, 0x9d, 0xf8, 0x6b, 0xa2, 0xe6, 0x8d, 0x7e, 0xbe, 0x09, 0xde, 0x33, 0x79, 0xa4, + 0xb1, 0x28, 0x0e, 0x86, 0x9b, 0x9d, 0x24, 0x50, 0xf2, 0x0a, 0x09, 0xf8, 0xfb, 0x68, 0x1f, 0x3e, + 0x39, 0x15, 0xbf, 0xec, 0xfa, 0xf8, 0x27, 0x1a, 0xbf, 0x56, 0xe2, 0x0a, 0x22, 0xd1, 0x30, 0x5a, + 0xcb, 0x84, 0x5e, 0x54, 0xf7, 0xe6, 0x21, 0xe1, 0x91, 0xa6, 0xba, 0x69, 0x61, 0xac, 0x87, 0x0a, + 0x24, 0xbc, 0xfa, 0xcb, 0xa3, 0xaa, 0xc7, 0xd5, 0x4b, 0xd2, 0x8f, 0xd1, 0xbd, 0xce, 0xaf, 0xd2, + 0x78, 0x1c, 0xff, 0xac, 0x2b, 0x9e, 0xb9, 0x05, 0xf2, 0x8e, 0x18, 0x30, 0xda, 0xb6, 0xcb, 0x8d, + 0x0f, 0x43, 0xdf, 0x76, 0xf0, 0x2c, 0xcc, 0x6a, 0xae, 0x4a, 0x31, 0x24, 0x4a, 0x19, 0xb6, 0x8e, + 0xb1, 0xeb, 0xbb, 0x21, 0xb4, 0x11, 0x92, 0x21, 0x61, 0xa6, 0x0f, 0x20, 0x77, 0x23, 0x6b, 0xbc, + 0x67, 0x9d, 0x18, 0x79, 0xd1, 0xf9, 0x30, 0xe5, 0x90, 0xb4, 0xf7, 0x53, 0xd0, 0x84, 0x1e, 0x9f, + 0xa4, 0xad, 0xe5, 0x67, 0x4b, 0x25, 0x0c, 0xfe, 0xad, 0x27, 0x53, 0xe2, 0x27, 0x94, 0x90, 0x15, + 0x08, 0xb5, 0xa4, 0x54, 0x85, 0x83, 0x68, 0x66, 0xe7, 0x48, 0x36, 0xec, 0xeb, 0x15, 0xb8, 0x7b, + 0xa9, 0x73, 0x07, 0xc2, 0x7a, 0x20, 0x27, 0x60, 0x4f, 0xcf, 0x57, 0xc1, 0xef, 0x73, 0x69, 0xe6, + 0x90, 0x3f, 0xf7, 0xa8, 0x98, 0x20, 0x60, 0x23, 0x98, 0xbc, 0xed, 0xe1, 0x04, 0x52, 0x1c, 0x03, + 0xde, 0x4c, 0x6c, 0x93, 0xff, 0x62, 0x1e, 0x28, 0xd5, 0x24, 0x15, 0x8b, 0x07, 0xad, 0x2c, 0x06, + 0x2f, 0xe5, 0x15, 0xa7, 0x82, 0xfd, 0x10, 0xfb, 0xab, 0x49, 0xcb, 0x53, 0x7d, 0xee, 0x32, 0x22, + 0x47, 0x57, 0xa1, 0xbf, 0xf1, 0x1c, 0x65, 0xf8, 0x9d, 0x16, 0xcf, 0x89, 0x3a, 0x0f, 0x71, 0xa1, + 0xc4, 0x94, 0x9c, 0xbb, 0x6d, 0xb9, 0x37, 0x09, 0xff, 0xf2, 0x4c, 0xb9, 0xdd, 0xa6, 0x83, 0xb3, + 0xb3, 0x82, 0x51, 0xc9, 0xbb, 0x1a, 0x52, 0x47, 0x24, 0x9d, 0xe1, 0x19, 0xf0, 0x7d, 0xbe, 0xdd, + 0x8f, 0x9a, 0x31, 0x60, 0x3e, 0x1f, 0x31, 0x67, 0x8f, 0xc2, 0x7e, 0xd3, 0x0b, 0x64, 0x02, 0xb8, + 0x42, 0xb5, 0x7e, 0x04, 0x26, 0x1e, 0x3e, 0x9c, 0x2a, 0x17, 0xca, 0x3a, 0x94, 0x0f, 0xb6, 0xce, + 0xae, 0x4b, 0xb9, 0x0f, 0x4b, 0x48, 0x57, 0x8a, 0x59, 0x78, 0x16, 0xeb, 0xed, 0x11, 0x34, 0x88, + 0x4b, 0x73, 0xa1, 0x4d, 0x7f, 0x48, 0x51, 0x2f, 0xea, 0xab, 0xc6, 0x57, 0x70, 0xb9, 0xd5, 0x6e, + 0x6f, 0xa9, 0xef, 0x7b, 0x9f, 0xa7, 0xb9, 0x59, 0xca, 0xe3, 0x01, 0xa2, 0x7b, 0x29, 0xf7, 0x69, + 0xb8, 0x8a, 0x8a, 0x19, 0xda, 0x8c, 0x3a, 0xfb, 0x91, 0x9a, 0x58, 0xe5, 0x4a, 0x24, 0x29, 0xea, + 0xe7, 0xa0, 0x97, 0x2a, 0xa6, 0x3b, 0x83, 0x5d, 0x82, 0xa1, 0x97, 0x5c, 0x47, 0xc9, 0xc4, 0xc7, + 0x12, 0xa6, 0xb0, 0x67, 0x3e, 0x34, 0xfd, 0x1c, 0x3c, 0xc6, 0x88, 0x89, 0x00, 0xf2, 0x24, 0x0b, + 0x04, 0x9d, 0x99, 0xa9, 0xfe, 0x87, 0x82, 0x29, 0x2b, 0x23, 0x52, 0x16, 0xad, 0x01, 0x6a, 0x73, + 0xca, 0x75, 0xb5, 0x86, 0xdc, 0xb6, 0xf1, 0xd3, 0x2a, 0x4b, 0x89, 0x14, 0x61, 0xb2, 0xb0, 0xfb, + 0xaa, 0x4e, 0xe9, 0xe8, 0xaf, 0x6c, 0xe3, 0xe6, 0x24, 0xd4, 0x62, 0xc2, 0x38, 0xa1, 0x23, 0x0d, + 0x38, 0x8c, 0x5e, 0x59, 0xa6, 0x22, 0x43, 0x02, 0xcd, 0x73, 0x62, 0x2b, 0x38, 0x40, 0xcc, 0xff, + 0xfe, 0xdf, 0xc5, 0xdf, 0x5c, 0x7e, 0x54, 0x07, 0xd4, 0x07, 0x15, 0x8c, 0x36, 0xd1, 0x2d, 0x02, + 0x1c, 0xd9, 0x8c, 0xeb, 0x74, 0x0b, 0xef, 0x11, 0x44, 0xb5, 0xd3, 0x34, 0xb2, 0x4b, 0xfa, 0x71, + 0x94, 0x93, 0x9c, 0xa3, 0xa9, 0x3c, 0x77, 0x66, 0x41, 0xa4, 0x65, 0xe6, 0xe1, 0x5f, 0xde, 0x87, + 0xf0, 0xe1, 0xb9, 0x83, 0xaa, 0xc3, 0x19, 0xab, 0xea, 0xd1, 0xe7, 0x53, 0x8f, 0x0f, 0x41, 0x90, + 0x34, 0xf7, 0x3a, 0x60, 0x81, 0x16, 0xca, 0x79, 0x84, 0x1f, 0x4c, 0xab, 0xc9, 0x9a, 0x7e, 0xd5, + 0x00, 0x0e, 0xe5, 0x5d, 0x16, 0xaf, 0xe3, 0x07, 0x91, 0x45, 0xe2, 0x79, 0xc2, 0x78, 0x3a, 0xef, + 0x50, 0x36, 0x92, 0x52, 0xc5, 0x4d, 0x1f, 0x0d, 0xfc, 0xda, 0xcd, 0x0c, 0x99, 0x9d, 0x50, 0xb4, + 0x97, 0xe2, 0x72, 0x5e, 0xae, 0xba, 0xb5, 0xd2, 0x5c, 0xf3, 0x93, 0xf9, 0xe7, 0x3d, 0x0d, 0xa1, + 0x9c, 0xf4, 0xa4, 0x19, 0x68, 0x95, 0x4e, 0xab, 0x6b, 0x58, 0x29, 0x17, 0xa7, 0xb4, 0x70, 0xd6, + 0x90, 0x0d, 0x77, 0xca, 0xb8, 0x4e, 0x00, 0xda, 0x70, 0x63, 0x15, 0x52, 0xdd, 0x27, 0x97, 0x52, + 0x2d, 0x8b, 0xf2, 0x84, 0xd6, 0xb3, 0x6a, 0xf7, 0x07, 0x80, 0x81, 0xfc, 0x4d, 0x84, 0xb3, 0x9a, + 0x27, 0x8b, 0xe7, 0xff, 0x62, 0x50, 0x8d, 0x42, 0x23, 0x40, 0xee, 0x5a, 0xc4, 0x9c, 0x58, 0xe1, + 0x01, 0x0d, 0xa3, 0x90, 0xed, 0x65, 0xfa, 0xf2, 0x65, 0x9c, 0x0d, 0x7e, 0xac, 0x4c, 0x8c, 0xf2, + 0x49, 0x74, 0xc3, 0x49, 0x92, 0xfc, 0x29, 0x85, 0xca, 0xe6, 0x1f, 0xe7, 0x06, 0xbb, 0x3c, 0xea, + 0xe1, 0x29, 0x19, 0x10, 0xbf, 0x59, 0xf4, 0x6f, 0xf5, 0x39, 0x55, 0xd7, 0xcd, 0x7e, 0x6a, 0x50, + 0x80, 0x8f, 0x1f, 0xd9, 0x39, 0xe6, 0xfd, 0x1a, 0x57, 0xed, 0x09, 0x77, 0xbb, 0x6c, 0x5a, 0xad, + 0x95, 0xe3, 0xcc, 0xd3, 0x5b, 0x32, 0x41, 0xd2, 0x58, 0x76, 0x28, 0x2d, 0xbc, 0xe9, 0x50, 0x19, + 0xdc, 0x94, 0xb4, 0x99, 0x51, 0xae, 0xfe, 0x3c, 0x92, 0x29, 0x5a, 0x25, 0x72, 0xe8, 0x89, 0x36, + 0xd2, 0xae, 0x83, 0x96, 0x05, 0xce, 0x63, 0x80, 0xa6, 0x14, 0x51, 0xb8, 0x11, 0xd9, 0x1c, 0x10, + 0x70, 0x0d, 0xf5, 0x44, 0x8b, 0xca, 0xbe, 0xa4, 0x55, 0xbe, 0x2d, 0x4f, 0x5d, 0x9a, 0xb4, 0xa0, + 0x7e, 0x41, 0xb4, 0xbc, 0x31, 0x17, 0x61, 0x2f, 0xf2, 0x08, 0xf4, 0xfb, 0xbe, 0x7e, 0xc2, 0x3c, + 0x8b, 0x4d, 0x62, 0x97, 0x02, 0x80, 0x86, 0x0e, 0x1b, 0x56, 0xcb, 0x0f, 0x5c, 0x5b, 0x83, 0xba, + 0x57, 0xe0, 0xca, 0x91, 0x6c, 0x6e, 0x69, 0xf8, 0x0e, 0x7a, 0xb9, 0x10, 0x83, 0x8c, 0xec, 0x9e, + 0x94, 0x4f, 0x94, 0x5b, 0x93, 0xc7, 0x00, 0x29, 0xee, 0x26, 0xfa, 0x5b, 0xe5, 0xd0, 0xcc, 0xa2, + 0x97, 0xf4, 0x6e, 0x89, 0x99, 0x78, 0x74, 0x49, 0x5e, 0x12, 0x21, 0xfa, 0xc9, 0xfa, 0xc1, 0x22, + 0x75, 0xa5, 0x0e, 0xa8, 0xb4, 0x31, 0x5b, 0x2d, 0x43, 0xe9, 0x24, 0xe9, 0xff, 0x97, 0x09, 0x7e, + 0x0c, 0x8f, 0x62, 0x51, 0x0f, 0x21, 0x70, 0x53, 0x4a, 0x0f, 0x00, 0x1a, 0x4c, 0xc1, 0x70, 0xa2, + 0xee, 0xa1, 0xe3, 0x33, 0x9d, 0xfc, 0x10, 0x88, 0x8a, 0x9a, 0xb2, 0x6f, 0x24, 0xb5, 0x4d, 0x1d, + 0xc0, 0x74, 0x7b, 0x78, 0xe9, 0x46, 0xef, 0x7e, 0x7f, 0x84, 0xe9, 0xab, 0x5b, 0x59, 0x24, 0x4a, + 0xa8, 0xd6, 0xab, 0x20, 0x3a, 0x79, 0xcd, 0x57, 0x8a, 0xc6, 0xf9, 0x50, 0x0c, 0x28, 0x88, 0x65, + 0x81, 0xa0, 0xfa, 0x7c, 0xad, 0x56, 0x27, 0xf9, 0xfd, 0xcf, 0x09, 0x18, 0xac, 0x08, 0xf5, 0x98, + 0x14, 0x3f, 0x38, 0x0c, 0x7a, 0xf5, 0xfd, 0x4a, 0xa3, 0x72, 0x18, 0x6d, 0x95, 0xdd, 0x4c, 0x81, + 0xa5, 0x0d, 0xf8, 0xc5, 0x2d, 0xad, 0x42, 0x8e, 0xf2, 0x8c, 0xb8, 0x81, 0x24, 0x4f, 0xbb, 0x86, + 0xae, 0x94, 0xa8, 0x7f, 0xfc, 0x79, 0x7e, 0x4d, 0xa0, 0xca, 0x8b, 0x21, 0x39, 0x29, 0xfb, 0x74, + 0xda, 0x3c, 0x58, 0x48, 0x05, 0xd1, 0x73, 0x1f, 0xca, 0xdc, 0xd8, 0x74, 0x42, 0x22, 0x11, 0x9a, + 0x7c, 0x46, 0xb1, 0x3c, 0xb0, 0xcf, 0xab, 0x6c, 0x8f, 0x70, 0x87, 0x33, 0xe1, 0xea, 0x99, 0xc6, + 0x1c, 0x32, 0xe1, 0xff, 0xce, 0xc7, 0x1d, 0xfa, 0xd2, 0xf2, 0x28, 0x4c, 0x98, 0x55, 0xe2, 0x73, + 0xae, 0x5a, 0x31, 0xee, 0x4e, 0x06, 0xd3, 0x87, 0xeb, 0xd0, 0x09, 0xe9, 0x9f, 0x24, 0x4b, 0x72, + 0x38, 0x86, 0xb3, 0x0b, 0x9c, 0xc4, 0xe3, 0xe0, 0x80, 0xce, 0xac, 0xbf, 0x99, 0x5b, 0x98, 0x4b, + 0x91, 0x87, 0x14, 0x6a, 0x8b, 0x96, 0xd3, 0xba, 0x8e, 0xe0, 0x42, 0xa1, 0xd0, 0xd2, 0xc6, 0xed, + 0xa8, 0x72, 0x54, 0x35, 0xad, 0xaf, 0x37, 0x6a, 0xc7, 0x8e, 0xed, 0x3c, 0x4d, 0xee, 0x0d, 0x8d, + 0x54, 0x76, 0x26, 0xb8, 0xf9, 0xcf, 0x81, 0x65, 0x9d, 0xc4, 0x75, 0x96, 0x36, 0x7f, 0x78, 0x64, + 0x44, 0xd4, 0x03, 0x48, 0xfe, 0xf4, 0xbe, 0x30, 0x54, 0x99, 0x30, 0xf5, 0xc0, 0x5d, 0x9c, 0x5b, + 0x1a, 0x4b, 0x1d, 0xd5, 0xa1, 0x32, 0xc5, 0xb4, 0x64, 0xd3, 0x64, 0xed, 0x7a, 0x78, 0x22, 0x59, + 0x72, 0x08, 0xdd, 0xf1, 0x62, 0x2c, 0x26, 0x2e, 0x04, 0x03, 0xae, 0x29, 0x2d, 0xf5, 0x38, 0x1a, + 0x73, 0x9d, 0x51, 0x95, 0xce, 0x25, 0x45, 0xc2, 0x7a, 0x9c, 0x40, 0x98, 0xd6, 0xae, 0x2e, 0x4a, + 0xd4, 0xfb, 0xa9, 0xcf, 0x9d, 0x9a, 0x2e, 0xf1, 0x5b, 0x9d, 0xdf, 0x0a, 0x3e, 0x09, 0x06, 0x09, + 0xe6, 0x30, 0x34, 0xf1, 0x10, 0x8f, 0x13, 0x18, 0x18, 0x6d, 0x52, 0xa5, 0x34, 0x69, 0x63, 0x20, + 0x25, 0x17, 0xb3, 0x55, 0x17, 0x06, 0xee, 0x26, 0x2e, 0x77, 0x3b, 0x3e, 0xd3, 0xcf, 0xb3, 0x13, + 0xe5, 0x03, 0xf9, 0xe6, 0xed, 0xac, 0x28, 0xb1, 0x0f, 0x3a, 0x1d, 0xe6, 0x81, 0xc6, 0xfb, 0x27, + 0x65, 0x91, 0x9b, 0x1f, 0xaa, 0x5c, 0x5d, 0x0e, 0xe0, 0x74, 0x46, 0x4e, 0xab, 0xad, 0x97, 0x7d, + 0x5d, 0xa5, 0x5b, 0x48, 0x0f, 0x85, 0x9e, 0x0f, 0xaa, 0x20, 0x7a, 0x7a, 0x70, 0x97, 0x92, 0xc6, + 0xfa, 0x21, 0xcb, 0xaa, 0x44, 0x8b, 0x13, 0x28, 0x5f, 0x24, 0xa8, 0x10, 0x84, 0xb3, 0x5f, 0xe8, + 0xf1, 0x4f, 0x75, 0x3a, 0x62, 0x94, 0xe7, 0x8b, 0xe2, 0xeb, 0x2c, 0xb8, 0xfb, 0xc9, 0x12, 0xc6, + 0x3a, 0x7b, 0x27, 0xa8, 0xc9, 0x94, 0xfb, 0xda, 0xf6, 0x93, 0x0f, 0x68, 0x4d, 0x21, 0x5c, 0x97, + 0x86, 0x9f, 0x59, 0xcf, 0xd1, 0xea, 0xd0, 0x7b, 0xa5, 0x48, 0x91, 0xfd, 0xf5, 0x04, 0xf2, 0x90, + 0x86, 0xdb, 0xbf, 0xa0, 0x44, 0x26, 0xf7, 0xce, 0xa7, 0x25, 0xf1, 0x7d, 0x60, 0x41, 0x7f, 0xab, + 0x02, 0xa9, 0xc3, 0x1d, 0xcf, 0x90, 0xe3, 0x12, 0x2c, 0x1e, 0x92, 0x57, 0xbb, 0xf5, 0x7c, 0x59, + 0x45, 0x7a, 0xbc, 0x63, 0xc4, 0xde, 0xf6, 0xd0, 0xb1, 0x41, 0x96, 0x4f, 0x72, 0x8e, 0xd2, 0xe4, + 0x78, 0xb6, 0x97, 0x88, 0x6e, 0x3b, 0xf4, 0xd2, 0x72, 0x51, 0x1b, 0x4e, 0x1b, 0xd3, 0x8c, 0x5e, + 0x88, 0x70, 0x33, 0xfd, 0x8e, 0x36, 0xc0, 0xe6, 0xa9, 0xb6, 0xc5, 0x42, 0xd3, 0xeb, 0x00, 0x6d, + 0x85, 0x0b, 0xef, 0x4c, 0x11, 0x1a, 0xb2, 0xcf, 0x3a, 0x27, 0xfe, 0x25, 0x06, 0x4b, 0x2e, 0x8b, + 0x7b, 0xca, 0xe2, 0x2a, 0x46, 0x37, 0x43, 0xa2, 0x13, 0xfd, 0xce, 0xed, 0xee, 0x85, 0x58, 0x91, + 0x2f, 0xdd, 0x6f, 0xab, 0x81, 0x73, 0x89, 0x3c, 0xf8, 0x7b, 0x62, 0x49, 0x1b, 0xcf, 0xde, 0x5d, + 0xcf, 0xc3, 0x6f, 0x2b, 0x97, 0x08, 0x3e, 0x0c, 0x43, 0x03, 0xfa, 0x46, 0xef, 0xb3, 0xb1, 0xbc, + 0xd1, 0x97, 0x8b, 0x0a, 0x3c, 0xfd, 0x97, 0xa7, 0xd9, 0x03, 0xba, 0xcf, 0x6b, 0x2c, 0x1c, 0xd5, + 0xfb, 0x6e, 0xc1, 0x29, 0x1a, 0x10, 0x33, 0x91, 0x4b, 0x44, 0x43, 0xd8, 0xed, 0x1a, 0x2e, 0x0b, + 0x2b, 0x15, 0xe2, 0xb7, 0x3f, 0x9f, 0x0c, 0xd5, 0x0b, 0xdf, 0xa0, 0x8f, 0x41, 0x33, 0x89, 0xa0, + 0xc3, 0x4b, 0xdb, 0xe5, 0x39, 0xdc, 0x71, 0x67, 0x62, 0x3f, 0xfa, 0xbd, 0x52, 0xc5, 0x91, 0xe5, + 0x5c, 0xdb, 0x74, 0x87, 0xed, 0x33, 0x58, 0xb4, 0x11, 0xa7, 0x0d, 0xc6, 0x55, 0xf0, 0x18, 0x89, + 0xb6, 0xf2, 0x60, 0x84, 0x85, 0x38, 0x7f, 0x8a, 0x9a, 0xb1, 0xe4, 0xe4, 0xc0, 0x17, 0x3d, 0x30, + 0x62, 0x9d, 0x4e, 0x68, 0x8b, 0xfc, 0x11, 0x5c, 0xd3, 0xcd, 0x24, 0x31, 0x00, 0x3a, 0x54, 0x21, + 0x3d, 0xc5, 0x1b, 0x96, 0x11, 0x1c, 0x87, 0x42, 0xf5, 0xec, 0xf2, 0x63, 0xca, 0xe1, 0x4f, 0xe0, + 0xbc, 0x07, 0xfc, 0xde, 0x5f, 0x43, 0xfa, 0x8e, 0x69, 0x45, 0x78, 0x33, 0xc0, 0x33, 0xe7, 0x86, + 0x12, 0x8d, 0x5a, 0x91, 0x6c, 0x03, 0x33, 0x96, 0x72, 0x89, 0x89, 0xf2, 0x79, 0xa9, 0x0a, 0x25, + 0x0c, 0x69, 0x6e, 0x7d, 0x28, 0x3e, 0x17, 0xd6, 0x21, 0x01, 0x24, 0xaa, 0x60, 0x20, 0x94, 0xe4, + 0x08, 0xf8, 0x1d, 0x76, 0x18, 0xfd, 0xa9, 0xd5, 0x1f, 0x95, 0x58, 0xf2, 0x3c, 0xa3, 0x45, 0x13, + 0x87, 0xf9, 0x4f, 0xd7, 0xcf, 0x0d, 0xab, 0x59, 0x86, 0xae, 0xf1, 0x4a, 0x9a, 0xbd, 0x5a, 0x3d, + 0xbb, 0x6c, 0x8c, 0x31, 0x0d, 0x78, 0x49, 0xd2, 0x30, 0x51, 0x0c, 0x8e, 0xc8, 0xad, 0xb7, 0x95, + 0x6a, 0x05, 0x19, 0xf1, 0xa4, 0x40, 0x7c, 0xf2, 0xd1, 0x16, 0x28, 0x08, 0x47, 0x60, 0xb3, 0x83, + 0x42, 0x9b, 0x82, 0xef, 0xc2, 0x11, 0xbb, 0x77, 0x36, 0x56, 0xde, 0x2d, 0x97, 0x44, 0x08, 0xa8, + 0xfc, 0x0b, 0x93, 0x28, 0x1e, 0x3b, 0x82, 0x11, 0xc8, 0x5f, 0x32, 0x8e, 0xf6, 0xce, 0xe4, 0x5a, + 0x27, 0x4d, 0x1f, 0x83, 0xa7, 0x2e, 0xdb, 0x4a, 0xf4, 0xbc, 0xfd, 0xe3, 0x2a, 0xe5, 0x87, 0x2a, + 0x51, 0x40, 0x40, 0xf0, 0xda, 0x4b, 0x52, 0xbb, 0x60, 0x12, 0xa0, 0x96, 0xc5, 0x37, 0x46, 0xc8, + 0x5a, 0x13, 0x11, 0x89, 0xe3, 0x33, 0x1f, 0x72, 0xed, 0x20, 0x2a, 0x8c, 0x56, 0xaa, 0xaa, 0xc0, + 0x6d, 0x2d, 0x1c, 0xba, 0x50, 0x16, 0x43, 0x8c, 0xc4, 0x00, 0xcc, 0x22, 0x3e, 0xa8, 0xc4, 0xde, + 0xe5, 0xf7, 0xe6, 0xc6, 0xe7, 0x3f, 0x89, 0xb4, 0xfd, 0x89, 0xf3, 0x66, 0x8b, 0x69, 0x37, 0x62, + 0x8f, 0x06, 0x5f, 0xc6, 0xc6, 0xe6, 0xc3, 0x83, 0xd4, 0x87, 0x0c, 0x65, 0x4f, 0xf0, 0x59, 0x29, + 0x51, 0x52, 0x0d, 0x93, 0xa3, 0x79, 0xeb, 0xc7, 0x39, 0xaa, 0xc5, 0x39, 0x82, 0xd9, 0xbc, 0xbe, + 0x31, 0x22, 0xbb, 0xdd, 0x2c, 0x23, 0x74, 0xdc, 0xb9, 0xf3, 0x00, 0x5d, 0xa8, 0x61, 0xab, 0x38, + 0x5e, 0xe0, 0xb5, 0xe3, 0xf1, 0xfb, 0x9b, 0x23, 0x22, 0x65, 0xbf, 0xf9, 0x3d, 0x93, 0x7f, 0xc7, + 0x58, 0xed, 0xc4, 0x38, 0x53, 0xbb, 0x80, 0xb1, 0x39, 0x1f, 0x9a, 0x6e, 0xd2, 0x17, 0x14, 0xb1, + 0x64, 0x6c, 0xa2, 0xe6, 0xf8, 0x3c, 0xd1, 0xac, 0x50, 0xbe, 0xcd, 0xa8, 0xcf, 0xeb, 0xc5, 0x70, + 0xbb, 0x9a, 0xa9, 0x1e, 0x66, 0xed, 0x09, 0xb3, 0x54, 0x75, 0xda, 0x65, 0xac, 0x0e, 0x45, 0xf0, + 0x7f, 0xd0, 0x11, 0x15, 0x08, 0x96, 0x7c, 0x2c, 0x5a, 0xe5, 0x50, 0xe7, 0x57, 0xe9, 0x35, 0x0e, + 0x96, 0x52, 0x9e, 0xc7, 0xd4, 0xf9, 0xa4, 0x09, 0x45, 0x71, 0x73, 0x5a, 0x03, 0x98, 0x78, 0x49, + 0x14, 0xb3, 0x55, 0xb1, 0x72, 0xb1, 0xfb, 0xc8, 0x8c, 0xa7, 0xea, 0xdd, 0xfb, 0x1e, 0xae, 0xd2, + 0x87, 0xc8, 0x13, 0xed, 0x2d, 0xb1, 0xf1, 0x8a, 0xc1, 0x80, 0x56, 0xe8, 0x52, 0x87, 0xd8, 0x77, + 0xa9, 0x4c, 0xb6, 0xca, 0x8d, 0x10, 0x38, 0x7b, 0xfd, 0x68, 0x5d, 0xaf, 0xa2, 0xc9, 0xd8, 0x40, + 0xce, 0x12, 0xd5, 0xd6, 0x08, 0x25, 0x52, 0x06, 0x2b, 0x6d, 0x2b, 0x2f, 0xbf, 0x7c, 0xfa, 0x49, + 0x40, 0x01, 0x50, 0x05, 0x6d, 0x4a, 0xed, 0x24, 0x2a, 0xa6, 0x6d, 0x03, 0xbc, 0xe2, 0x06, 0x1a, + 0xe2, 0x65, 0xeb, 0xf5, 0xf9, 0xc2, 0x47, 0x07, 0x9a, 0x3a, 0xec, 0x99, 0xa3, 0x0e, 0xd5, 0x43, + 0x16, 0x7d, 0x90, 0x42, 0x59, 0xdc, 0x43, 0x9f, 0x93, 0x08, 0xdb, 0x65, 0xb2, 0xc1, 0x05, 0x38, + 0xe6, 0x5c, 0x29, 0xa7, 0x54, 0x45, 0xde, 0x6d, 0xaa, 0x5b, 0xac, 0x0f, 0xed, 0xeb, 0xb6, 0xc1, + 0x34, 0xee, 0xa4, 0xe9, 0x69, 0x15, 0x65, 0xb1, 0x8c, 0xfd, 0xf9, 0x9b, 0xae, 0x1e, 0xdb, 0xb0, + 0xe9, 0x16, 0xbe, 0x8b, 0x95, 0x8e, 0xf7, 0x58, 0xa0, 0x10, 0xd3, 0x73, 0x09, 0xc0, 0xa8, 0x9c, + 0xd1, 0x84, 0x34, 0xa9, 0xab, 0xdb, 0xae, 0x97, 0x11, 0xd5, 0x4c, 0x61, 0x49, 0xe4, 0x1e, 0xc0, + 0x0d, 0x9c, 0x40, 0x0a, 0x29, 0x62, 0xfd, 0x88, 0x76, 0x1e, 0x3e, 0xa1, 0x94, 0x74, 0x39, 0x86, + 0x97, 0xd0, 0x1f, 0xb3, 0xd1, 0x03, 0xf2, 0x22, 0xc7, 0x42, 0x26, 0x51, 0x8a, 0x7d, 0x79, 0x33, + 0x55, 0x5d, 0x5b, 0xcc, 0x6d, 0xb6, 0x85, 0x95, 0x9f, 0x2c, 0xee, 0x6c, 0xe4, 0x05, 0x7c, 0xfb, + 0x75, 0x14, 0xd5, 0xb6, 0xaf, 0x64, 0x38, 0x91, 0x0a, 0xe7, 0xf7, 0x15, 0xb1, 0xcf, 0xd9, 0x36, + 0x20, 0x9b, 0xe0, 0x28, 0xda, 0xd4, 0x17, 0xbb, 0xfa, 0x08, 0xbd, 0x7e, 0xa1, 0x6e, 0xe8, 0x5d, + 0xf5, 0x39, 0x81, 0x4c, 0xab, 0xb8, 0xf7, 0xd5, 0x0e, 0x22, 0x37, 0xda, 0x93, 0xbc, 0x1b, 0x8d, + 0x81, 0x73, 0x27, 0x38, 0x33, 0x4d, 0xc0, 0x7f, 0xb9, 0xb5, 0xa7, 0xe9, 0xcc, 0x27, 0x5d, 0xbf, + 0xbd, 0xa7, 0x14, 0xe6, 0x11, 0xcf, 0x21, 0x2a, 0x11, 0xb3, 0x73, 0x6a, 0xe7, 0x53, 0x0a, 0x0c, + 0x05, 0xb0, 0x95, 0x52, 0xf0, 0x8d, 0xa3, 0xf2, 0x63, 0xe5, 0x60, 0xa5, 0x1b, 0x95, 0x93, 0xfa, + 0xc2, 0x56, 0xaa, 0x02, 0xf3, 0x24, 0x8e, 0x40, 0xd5, 0x7b, 0xb0, 0xed, 0xf2, 0x4d, 0x1e, 0xf1, + 0x0b, 0xfd, 0xaa, 0x83, 0x3e, 0x8f, 0x11, 0xea, 0x1a, 0xcc, 0xd2, 0x57, 0x03, 0xd2, 0x93, 0xd7, + 0x87, 0x46, 0xc9, 0x68, 0xc9, 0x36, 0x6d, 0x14, 0x87, 0x07, 0x0b, 0xce, 0x25, 0xb8, 0xd4, 0x3b, + 0xb6, 0x63, 0x3c, 0x7d, 0xc8, 0x9e, 0x62, 0x3c, 0x1d, 0x5d, 0xbb, 0x2c, 0xc5, 0xb0, 0x63, 0x67, + 0xc8, 0x4f, 0x11, 0xf7, 0x03, 0x69, 0x07, 0x2e, 0x0b, 0x58, 0xdd, 0x92, 0xda, 0x35, 0xe2, 0xcf, + 0x75, 0x34, 0xdb, 0x82, 0xcf, 0x61, 0x46, 0xc8, 0x3b, 0x8a, 0x01, 0xac, 0x9b, 0xbf, 0x57, 0x75, + 0xf2, 0xbd, 0xc3, 0x93, 0x0d, 0x29, 0x96, 0x77, 0x8c, 0x73, 0xf3, 0x34, 0xca, 0x38, 0x21, 0x59, + 0x3f, 0x4e, 0xbd, 0xb5, 0xe9, 0x56, 0x5a, 0x39, 0xa7, 0x0c, 0xb6, 0xb4, 0x21, 0xc5, 0x62, 0xd4, + 0xb7, 0x92, 0x6a, 0x0d, 0xe4, 0x58, 0x7e, 0xd9, 0x5b, 0x77, 0x8c, 0x25, 0x73, 0x35, 0xb8, 0x37, + 0x0f, 0x6b, 0x0f, 0x17, 0xb4, 0x67, 0xad, 0x2e, 0xb3, 0xef, 0xae, 0xdc, 0xdf, 0x0e, 0x98, 0xcf, + 0x02, 0x01, 0x94, 0xb2, 0x95, 0xc1, 0x9e, 0xa6, 0x7a, 0x0b, 0x78, 0x09, 0xcb, 0x85, 0x89, 0x1a, + 0x4e, 0x46, 0x0c, 0x54, 0x8d, 0x96, 0x84, 0x41, 0x05, 0xb6, 0x05, 0xf5, 0x81, 0x0e, 0x22, 0x90, + 0x25, 0xc6, 0x54, 0x72, 0x24, 0x9b, 0x0a, 0xf5, 0xbc, 0xb7, 0xca, 0xff, 0x62, 0x51, 0x61, 0x45, + 0x3f, 0x46, 0x0a, 0x41, 0xd8, 0x04, 0x19, 0x29, 0x0a, 0x7f, 0x7f, 0x23, 0x30, 0xb9, 0xfc, 0xc6, + 0xcd, 0xab, 0x0e, 0x88, 0xf7, 0x9c, 0x53, 0x85, 0x50, 0xa4, 0x5f, 0x1c, 0x1f, 0x4d, 0x32, 0x81, + 0xcd, 0x18, 0x71, 0x0e, 0x15, 0xc9, 0xc7, 0xe7, 0x1f, 0x97, 0x2e, 0x4e, 0x28, 0xd5, 0xb1, 0xa7, + 0x21, 0x23, 0xa2, 0x90, 0x0d, 0x61, 0x2a, 0x15, 0x3e, 0xac, 0x8d, 0x19, 0x58, 0x49, 0xb0, 0x1e, + 0xce, 0x97, 0x95, 0x53, 0xf2, 0x38, 0xb0, 0xc0, 0x0e, 0xf5, 0xc8, 0xed, 0x40, 0xb7, 0x82, 0x5f, + 0x3e, 0xc6, 0x7f, 0x66, 0x18, 0x7e, 0x9a, 0xf0, 0xbe, 0x22, 0x1a, 0x79, 0xe0, 0xee, 0xa4, 0xb3, + 0x3e, 0xef, 0x98, 0x53, 0x0d, 0x85, 0x05, 0xfc, 0x22, 0x54, 0xe1, 0x5e, 0xa0, 0xe9, 0x28, 0x33, + 0x98, 0x34, 0x7a, 0x53, 0xff, 0x1e, 0xc9, 0x86, 0x83, 0x59, 0x9b, 0x7b, 0x38, 0x2a, 0xa0, 0xf9, + 0x3f, 0x19, 0xf4, 0xb1, 0x07, 0xe3, 0xe1, 0xc9, 0x7a, 0x6b, 0x53, 0x4c, 0x89, 0x5a, 0x10, 0x57, + 0x29, 0xa8, 0x30, 0xda, 0xf2, 0x22, 0xd5, 0xec, 0x39, 0x34, 0x66, 0x9f, 0x48, 0x47, 0x66, 0xde, + 0xc3, 0x4c, 0xf5, 0xdb, 0xb2, 0xbd, 0xaf, 0xd8, 0x56, 0xb4, 0xdb, 0xe7, 0xca, 0x68, 0x02, 0xb2, + 0x8f, 0x80, 0x8d, 0x53, 0xb1, 0x38, 0x74, 0xfa, 0x60, 0xd6, 0xb2, 0x0f, 0xb0, 0xac, 0x4c, 0x87, + 0x0d, 0xa3, 0xfc, 0x10, 0x4f, 0x19, 0x77, 0x91, 0xe0, 0x64, 0xbd, 0x5c, 0x24, 0x93, 0xc1, 0xfa, + 0xbe, 0x0b, 0x66, 0x83, 0x2b, 0x36, 0x2c, 0xb7, 0x28, 0x66, 0xbc, 0x24, 0xd1, 0x5c, 0xdd, 0xc6, + 0xba, 0x58, 0xd1, 0xe8, 0xcf, 0xa0, 0xf2, 0xae, 0xce, 0x62, 0xe5, 0xf6, 0xd4, 0xa3, 0x55, 0x0c, + 0x91, 0xa7, 0x84, 0x74, 0x3e, 0x5f, 0x89, 0x18, 0xab, 0xf7, 0x66, 0xcc, 0x74, 0xc6, 0x88, 0xe0, + 0xdb, 0xb9, 0x6d, 0x2d, 0x83, 0x19, 0xef, 0x20, 0x2a, 0xd5, 0x5e, 0xd0, 0x5e, 0xb3, 0xc1, 0x1e, + 0x4c, 0xc5, 0x18, 0x7e, 0xe8, 0xbd, 0x89, 0xd3, 0xe6, 0xcd, 0x8e, 0x4c, 0x0d, 0x0b, 0xbc, 0xba, + 0x09, 0x38, 0x7b, 0xfb, 0xa6, 0x0b, 0xc2, 0x57, 0x84, 0xd3, 0xfe, 0xa4, 0xb2, 0xf1, 0xbd, 0xa2, + 0x8e, 0xd7, 0x3a, 0x39, 0x60, 0x5e, 0xb4, 0x1a, 0x4e, 0xd0, 0xf4, 0xed, 0x81, 0x29, 0x99, 0xe0, + 0x37, 0x6b, 0x7a, 0x87, 0xc3, 0x16, 0xd5, 0x6c, 0x99, 0xdc, 0xf8, 0x49, 0x7b, 0x65, 0xe5, 0xec, + 0x38, 0x77, 0xfc, 0xac, 0x87, 0x16, 0xeb, 0x3b, 0xb8, 0x28, 0x71, 0x4b, 0xaa, 0x9b, 0x25, 0x11, + 0x81, 0x83, 0x85, 0xd7, 0x6a, 0x69, 0x09, 0x56, 0x26, 0x2c, 0x48, 0xce, 0x6e, 0x72, 0xac, 0x85, + 0x35, 0xaf, 0x8e, 0xd2, 0x05, 0xb6, 0x35, 0xf5, 0xf1, 0x5d, 0x8f, 0xc3, 0x20, 0xe4, 0x01, 0x0c, + 0x1a, 0xe9, 0x86, 0xcf, 0x35, 0xbc, 0x6f, 0x8a, 0x7b, 0x7e, 0xa3, 0x5b, 0x48, 0x5d, 0x88, 0x77, + 0x79, 0x98, 0xf5, 0x82, 0x36, 0x1b, 0xb4, 0x1f, 0xd8, 0x00, 0x6d, 0x9a, 0x4d, 0xde, 0xf1, 0x9e, + 0x9c, 0xba, 0x48, 0x87, 0xda, 0x53, 0xbf, 0x5d, 0xdd, 0xdf, 0xfe, 0x78, 0x9b, 0xec, 0x07, 0x7f, + 0x6e, 0x58, 0xb0, 0x3e, 0x0e, 0xe3, 0xbb, 0xbb, 0xbd, 0x1f, 0x42, 0x57, 0x87, 0x98, 0xe1, 0x9c, + 0x11, 0xd3, 0x42, 0xc2, 0x98, 0x62, 0xc2, 0x5d, 0xb9, 0x2c, 0x1a, 0x0b, 0xa5, 0x73, 0xe7, 0x55, + 0x37, 0xd6, 0x4c, 0x57, 0xf8, 0x96, 0x95, 0x30, 0x01, 0xf3, 0x6a, 0x96, 0x0c, 0x34, 0x09, 0x1e, + 0x97, 0xdc, 0x36, 0xd1, 0x94, 0x54, 0x60, 0xa6, 0x27, 0x6e, 0xdb, 0x0c, 0xbd, 0x48, 0xe7, 0x4e, + 0x9a, 0x9d, 0xea, 0x5d, 0x92, 0x66, 0x47, 0x6f, 0x36, 0x97, 0xba, 0x72, 0x18, 0x18, 0xb6, 0xf9, + 0xea, 0x08, 0x7a, 0xbb, 0x40, 0x4b, 0x4b, 0x31, 0x0c, 0xf9, 0x4b, 0x73, 0x4d, 0xee, 0x3d, 0xe9, + 0x03, 0x60, 0x4b, 0x85, 0x00, 0x2e, 0xa6, 0x2f, 0x03, 0x80, 0x4b, 0x34, 0x4c, 0x3f, 0x77, 0xb6, + 0xd8, 0x8a, 0x92, 0x6b, 0x09, 0x8a, 0x2c, 0xea, 0x77, 0x05, 0x1b, 0x1c, 0xc5, 0xb0, 0x54, 0x12, + 0x2f, 0x58, 0x9a, 0x84, 0x72, 0xf8, 0xfc, 0x84, 0x7c, 0xa7, 0x07, 0x34, 0x1a, 0x30, 0x0a, 0x5d, + 0xf8, 0xa1, 0x7f, 0x4c, 0x7e, 0xcb, 0xac, 0xf0, 0xab, 0x2b, 0xa1, 0x62, 0xdf, 0xbb, 0x86, 0x48, + 0x71, 0xd9, 0x5a, 0x2d, 0xce, 0xbe, 0x7e, 0x37, 0x2e, 0xc9, 0xfc, 0x4e, 0xed, 0x13, 0xdd, 0xf0, + 0xcb, 0x30, 0x3c, 0xd1, 0x8c, 0xa2, 0x63, 0x1e, 0xb2, 0x1c, 0x9e, 0x19, 0xf3, 0x66, 0xb4, 0x34, + 0xce, 0xcb, 0xed, 0x34, 0xad, 0x54, 0xb9, 0x19, 0x19, 0x9a, 0xa2, 0x9a, 0x52, 0xd1, 0x53, 0x09, + 0xb6, 0x00, 0x40, 0xe9, 0x72, 0x2a, 0x01, 0x7f, 0x9c, 0xd4, 0x09, 0x3d, 0xd1, 0x8c, 0x35, 0x83, + 0xad, 0x46, 0x2b, 0xfe, 0x91, 0xac, 0x5d, 0xc9, 0x5d, 0xf1, 0x04, 0x79, 0xf5, 0x7b, 0x7c, 0xab, + 0xcc, 0x91, 0xf8, 0xc9, 0xeb, 0xa1, 0xe1, 0x02, 0x26, 0xb2, 0x51, 0x90, 0xfe, 0xc2, 0x2d, 0xdf, + 0x70, 0x02, 0xc9, 0x98, 0x4a, 0xc9, 0xbb, 0xdc, 0x58, 0x19, 0x77, 0xd7, 0x71, 0x4d, 0xa2, 0xf7, + 0x4e, 0x4a, 0x75, 0x3d, 0x59, 0x31, 0x98, 0x20, 0x87, 0xf5, 0x5c, 0x1b, 0x69, 0xe1, 0xe7, 0xf6, + 0xbb, 0xdb, 0x27, 0x02, 0xfb, 0xce, 0xca, 0xbb, 0x44, 0xc7, 0xd4, 0x4b, 0xae, 0x45, 0x18, 0x50, + 0x56, 0xff, 0x20, 0x1e, 0x03, 0x36, 0xdf, 0x05, 0xeb, 0xc6, 0x10, 0x42, 0xf3, 0x09, 0xd9, 0x7c, + 0xfe, 0xb4, 0x03, 0x98, 0x96, 0x7a, 0x44, 0x52, 0x4f, 0x39, 0x8e, 0x86, 0xef, 0x06, 0x8a, 0x58, + 0x3f, 0xe5, 0xfd, 0xec, 0x73, 0xae, 0xb9, 0xfd, 0x52, 0xc5, 0xf7, 0xd3, 0x60, 0xbe, 0x9e, 0x1e, + 0x99, 0xf3, 0x23, 0x29, 0x77, 0x6c, 0x21, 0x4e, 0xe4, 0xfb, 0x71, 0x3b, 0x21, 0xed, 0x95, 0x3c, + 0x36, 0xbb, 0xaf, 0x60, 0xfc, 0x09, 0x64, 0x79, 0xba, 0xf9, 0x78, 0x84, 0xa5, 0x4b, 0x76, 0xc1, + 0x21, 0x06, 0xce, 0x27, 0x47, 0xa8, 0xc8, 0xe6, 0x75, 0x1d, 0x9f, 0xe6, 0x86, 0xd7, 0x6c, 0x1f, + 0x64, 0x06, 0x2d, 0xe4, 0xb0, 0xc6, 0x00, 0x85, 0x46, 0x86, 0xa8, 0xc5, 0x36, 0x1f, 0xdd, 0x60, + 0xd7, 0x5e, 0xbd, 0x95, 0x92, 0x4e, 0x90, 0x5b, 0x7d, 0xb0, 0xf4, 0x6b, 0x7f, 0xc8, 0x4c, 0x7f, + 0x11, 0xd5, 0x79, 0x49, 0xc3, 0xa8, 0x2a, 0x38, 0xff, 0x51, 0x40, 0x21, 0x46, 0x79, 0x51, 0x54, + 0xc3, 0xb7, 0xcf, 0x2a, 0xe3, 0x81, 0xe4, 0xc8, 0x90, 0xae, 0xf5, 0x00, 0xab, 0x57, 0xb4, 0x9f, + 0x18, 0xb7, 0xed, 0x23, 0x42, 0x7b, 0x7c, 0x5b, 0x8f, 0x17, 0x23, 0xa8, 0x68, 0xb6, 0xb6, 0x61, + 0xe9, 0xd8, 0x48, 0x61, 0x8d, 0x88, 0x24, 0xd9, 0xba, 0x0b, 0x2b, 0xcd, 0x2b, 0xd1, 0xbb, 0x72, + 0xad, 0xe8, 0xe8, 0x11, 0xba, 0x82, 0xd4, 0xe7, 0x21, 0x38, 0xf9, 0xd6, 0x5c, 0x2e, 0xd2, 0xf9, + 0x40, 0xf4, 0xa6, 0x6c, 0x00, 0x31, 0xfd, 0xc1, 0x73, 0x8a, 0x4c, 0xea, 0x39, 0x97, 0xd4, 0xe0, + 0xae, 0x50, 0x10, 0x19, 0x9c, 0xd6, 0xe0, 0x73, 0x52, 0xf6, 0x4a, 0x26, 0x0f, 0x0e, 0x92, 0xc4, + 0xf1, 0xe4, 0x06, 0x59, 0xa3, 0x3f, 0x4d, 0xc1, 0x13, 0x7f, 0xc0, 0xd2, 0x8d, 0x15, 0x02, 0x8f, + 0xb7, 0xf0, 0x8c, 0x34, 0x9e, 0xfc, 0xec, 0x60, 0xc0, 0xff, 0x45, 0x33, 0xda, 0x81, 0x5f, 0x9f, + 0x38, 0x27, 0x8d, 0x52, 0x29, 0xb8, 0xc7, 0x4d, 0xbf, 0xb0, 0xe1, 0xd9, 0x55, 0x0d, 0x32, 0xa2, + 0x20, 0x8e, 0x45, 0x1e, 0xe9, 0x3d, 0x5f, 0xc4, 0x46, 0xe9, 0x0d, 0x16, 0x7a, 0xac, 0x05, 0x0a, + 0xc8, 0xf3, 0x34, 0x4e, 0xa5, 0x6b, 0x88, 0xa1, 0xda, 0xc9, 0xeb, 0x56, 0xe6, 0xb4, 0xf9, 0x49, + 0x34, 0x01, 0x55, 0xf6, 0xb4, 0x5d, 0xe2, 0x95, 0x75, 0x6a, 0xd9, 0x64, 0x40, 0x6c, 0xda, 0xdb, + 0x02, 0xd4, 0x96, 0x55, 0x38, 0xa5, 0x21, 0x9b, 0xe3, 0xe5, 0x4c, 0xb1, 0x90, 0xaf, 0x61, 0xda, + 0x4e, 0x1f, 0x40, 0xb3, 0x2e, 0x23, 0xd3, 0x5c, 0xeb, 0xf9, 0x39, 0x0a, 0xf6, 0x7d, 0x7d, 0xbf, + 0xe0, 0x06, 0x8b, 0x96, 0xbd, 0x22, 0x9a, 0x29, 0xf6, 0x82, 0xd5, 0x1c, 0x05, 0xeb, 0x61, 0x8f, + 0xac, 0x49, 0x49, 0x09, 0xfa, 0xf6, 0x72, 0x45, 0xe8, 0x33, 0xcf, 0xd2, 0xdb, 0x01, 0x8a, 0x4f, + 0x77, 0xb3, 0x6c, 0x01, 0xdd, 0xe9, 0x0e, 0x47, 0x23, 0xff, 0x6f, 0x64, 0xe2, 0x14, 0x47, 0x49, + 0x86, 0xc9, 0xdc, 0x8b, 0x13, 0xd8, 0x66, 0x1e, 0x17, 0x34, 0xdd, 0xf3, 0x96, 0x79, 0x6f, 0xc0, + 0xc1, 0xc0, 0x89, 0x48, 0xd9, 0x04, 0x56, 0x0c, 0x7b, 0xd7, 0xf8, 0x39, 0x8c, 0x36, 0x31, 0xe2, + 0x4e, 0x54, 0x2f, 0x44, 0xf6, 0x28, 0x6f, 0xba, 0x78, 0xc3, 0x3b, 0x51, 0x2a, 0xdd, 0x91, 0xe9, + 0x09, 0x46, 0xa2, 0xcf, 0xfa, 0xa4, 0x9b, 0x4c, 0x9e, 0xc5, 0xec, 0xf9, 0xaa, 0xf1, 0xac, 0xe4, + 0x86, 0xe3, 0xe5, 0x68, 0x06, 0x6e, 0x78, 0xf4, 0x9b, 0xaa, 0xb9, 0x0b, 0x88, 0x9c, 0x0d, 0xdb, + 0xba, 0xe6, 0xa9, 0xd6, 0x9d, 0xdb, 0xb2, 0xb1, 0xf0, 0x0d, 0xa9, 0xdb, 0x9c, 0xe5, 0x86, 0x1e, + 0xf4, 0x1b, 0xe4, 0x43, 0x26, 0x9f, 0x65, 0x8a, 0x8d, 0x07, 0x51, 0x64, 0x87, 0x4e, 0xc0, 0x15, + 0x69, 0x54, 0xf6, 0xe4, 0x67, 0x1f, 0xa5, 0xa5, 0x9a, 0x6a, 0x25, 0x3f, 0x99, 0x16, 0xeb, 0x7a, + 0x36, 0x1c, 0xb6, 0xf1, 0xef, 0xd7, 0x13, 0xf9, 0x6a, 0x2c, 0xa2, 0x2a, 0x91, 0x6c, 0xe6, 0xae, + 0xfe, 0x15, 0x6e, 0x8c, 0x89, 0x5a, 0x5e, 0x2e, 0x31, 0xa7, 0xe4, 0x7b, 0x70, 0x70, 0xd6, 0x40, + 0x17, 0x7c, 0xd2, 0x64, 0x96, 0xa8, 0x92, 0x92, 0x66, 0xe5, 0x1c, 0xb4, 0xa2, 0x98, 0xf7, 0xaf, + 0x20, 0x70, 0xb9, 0x30, 0x2f, 0x6a, 0x77, 0x7a, 0x7c, 0x8a, 0xbc, 0xd7, 0xa1, 0xba, 0xd7, 0x42, + 0xb9, 0x1d, 0x2d, 0x0c, 0xd5, 0x70, 0xc3, 0x1c, 0x14, 0x6a, 0xf2, 0x68, 0x09, 0xf6, 0x41, 0x31, + 0x5e, 0x1a, 0xa2, 0xa2, 0x62, 0x99, 0x63, 0x83, 0x48, 0xbd, 0x29, 0x63, 0xb5, 0x75, 0xf0, 0xc6, + 0xff, 0xe2, 0x11, 0xa2, 0x26, 0x78, 0x2a, 0x46, 0x95, 0xef, 0x57, 0xc1, 0x50, 0xb3, 0x88, 0x0d, + 0x1c, 0x96, 0x3b, 0xa7, 0x7e, 0xad, 0xb2, 0xda, 0x2a, 0x56, 0x3b, 0xc8, 0xdd, 0xa7, 0x28, 0xc7, + 0x75, 0x51, 0xd2, 0x3f, 0x83, 0x25, 0x2b, 0xb9, 0x72, 0x42, 0x61, 0xeb, 0xe0, 0x82, 0xaa, 0x85, + 0x15, 0xee, 0x2e, 0x8d, 0x51, 0xc2, 0xf4, 0x08, 0x32, 0x9d, 0x24, 0xd5, 0xcd, 0x8d, 0xd3, 0x07, + 0xe2, 0xa6, 0x0a, 0x92, 0xbc, 0xf0, 0x6c, 0x18, 0x94, 0x92, 0x55, 0x56, 0xf4, 0x62, 0xcc, 0x8a, + 0xb6, 0xfe, 0x7e, 0x59, 0xcb, 0xcb, 0x80, 0xf8, 0x35, 0x1b, 0xb0, 0x67, 0x6b, 0xc3, 0x3b, 0x0d, + 0xd9, 0x5e, 0xef, 0x0a, 0x11, 0x5f, 0x40, 0xa1, 0x3d, 0xd3, 0xcd, 0x7d, 0x86, 0xd2, 0xb9, 0xf1, + 0xf7, 0x13, 0x2a, 0x61, 0x0a, 0x2e, 0x9a, 0x44, 0x6d, 0x84, 0x97, 0xc4, 0x63, 0x4e, 0x50, 0x33, + 0x3d, 0x92, 0x28, 0xaa, 0x1a, 0x6b, 0x59, 0x81, 0xef, 0x8c, 0x8d, 0x7b, 0xda, 0x68, 0x42, 0x8e, + 0x61, 0x58, 0xf4, 0x1f, 0x19, 0x34, 0x8c, 0x43, 0x9b, 0x95, 0xf9, 0xe8, 0x5a, 0xac, 0x28, 0x86, + 0x17, 0xe9, 0x60, 0x0d, 0x38, 0x9d, 0xac, 0x98, 0x45, 0xb9, 0x6d, 0x10, 0x92, 0x63, 0x5a, 0x80, + 0xbe, 0x99, 0x54, 0x91, 0xdc, 0x4c, 0x01, 0xfb, 0x90, 0x5d, 0x00, 0x90, 0x88, 0x06, 0x97, 0x2f, + 0xe0, 0x43, 0xda, 0x3e, 0x3d, 0x16, 0x03, 0x5e, 0x2b, 0x61, 0xaa, 0xaa, 0xe8, 0x8c, 0x55, 0x97, + 0xfd, 0x58, 0x37, 0x0c, 0x11, 0x34, 0x9f, 0xc9, 0xe0, 0x1f, 0x62, 0xab, 0xee, 0x04, 0xe5, 0xf4, + 0x0e, 0xf1, 0xdb, 0x21, 0x03, 0xd0, 0x0f, 0xa9, 0xbe, 0x96, 0x1a, 0xc1, 0x34, 0x99, 0x6e, 0xd1, + 0xb6, 0xa2, 0x5a, 0xfe, 0x13, 0x84, 0x13, 0x18, 0xf5, 0xfa, 0x38, 0x02, 0x41, 0x67, 0x1d, 0x22, + 0xda, 0xe8, 0x9d, 0xc3, 0x62, 0xb6, 0x8a, 0xa5, 0x8e, 0x0c, 0xa2, 0xb2, 0x3c, 0x93, 0x9b, 0x1c, + 0x42, 0x56, 0xe6, 0xf5, 0xb3, 0x43, 0x7c, 0x42, 0x78, 0x09, 0x51, 0xb5, 0x04, 0xd2, 0x36, 0xa8, + 0x4a, 0xbc, 0x3a, 0x7d, 0x6f, 0x95, 0xa6, 0x3a, 0x10, 0xe5, 0xa4, 0x55, 0x4a, 0xe6, 0x54, 0xae, + 0x9b, 0xcc, 0xc7, 0x03, 0xca, 0x8b, 0x8f, 0xbf, 0xe8, 0x71, 0x0f, 0xe0, 0xde, 0xa1, 0x5d, 0x03, + 0x3a, 0x0e, 0xb4, 0x76, 0x75, 0xa6, 0x84, 0x47, 0x48, 0x71, 0x94, 0x78, 0xbe, 0x01, 0xbd, 0x7d, + 0x63, 0x09, 0x41, 0x38, 0x93, 0xef, 0x93, 0x72, 0x03, 0x50, 0x88, 0xee, 0x5b, 0xb3, 0x49, 0x9a, + 0x21, 0x73, 0x83, 0xa1, 0x17, 0x4f, 0x62, 0x04, 0xde, 0xa9, 0x32, 0xc4, 0x19, 0x36, 0x77, 0x09, + 0xad, 0xdc, 0x0a, 0xb9, 0xbd, 0xec, 0xca, 0x8e, 0xfe, 0xab, 0x03, 0x3a, 0x98, 0xc2, 0x75, 0x37, + 0x8b, 0x84, 0x75, 0x7c, 0x5d, 0x63, 0x69, 0x1f, 0x47, 0x95, 0x16, 0x60, 0xd3, 0x38, 0xb9, 0xe7, + 0xe5, 0xd4, 0x6c, 0x5a, 0x65, 0x28, 0x1f, 0xc0, 0x1d, 0xbc, 0xb8, 0x82, 0x16, 0x07, 0x5e, 0x3c, + 0xb1, 0xbc, 0xcd, 0xa1, 0x57, 0xc9, 0x1b, 0x7a, 0x64, 0x1c, 0xb4, 0x71, 0xd7, 0x33, 0x22, 0x3c, + 0x05, 0x35, 0x0b, 0x06, 0x7d, 0x40, 0x34, 0x97, 0x8b, 0x63, 0x38, 0x71, 0xd3, 0x5e, 0xce, 0xc8, + 0x10, 0xfc, 0xe6, 0x07, 0xec, 0xa3, 0x17, 0xb1, 0xd1, 0x59, 0x81, 0x22, 0xf9, 0x12, 0xf3, 0x60, + 0x2d, 0xfb, 0x63, 0x05, 0xa4, 0x65, 0x64, 0x14, 0x0b, 0xe2, 0x95, 0xfc, 0x4d, 0x6b, 0x29, 0x3e, + 0xca, 0x45, 0x9b, 0x42, 0x27, 0x0b, 0x6a, 0x34, 0x63, 0x7b, 0x3e, 0xa7, 0xa8, 0x1e, 0x85, 0x6e, + 0xba, 0x3b, 0xba, 0xa8, 0x64, 0x0c, 0x00, 0x22, 0xe8, 0xd5, 0xa5, 0xbb, 0xda, 0xcd, 0x0f, 0x94, + 0xc9, 0xc8, 0x36, 0xf6, 0x13, 0xc6, 0xae, 0x27, 0x65, 0x5e, 0xa7, 0xc4, 0x83, 0x1f, 0x6d, 0xc5, + 0x49, 0x7b, 0x9e, 0xa7, 0xf2, 0x30, 0x7e, 0xdb, 0x39, 0x2e, 0x52, 0x8f, 0x02, 0xb6, 0xc7, 0x7c, + 0xd5, 0x5b, 0xad, 0x16, 0xb2, 0xec, 0xb6, 0x18, 0xff, 0xa7, 0xeb, 0x34, 0xc6, 0xb9, 0xed, 0x74, + 0x7e, 0xdb, 0x9f, 0xbb, 0x5c, 0x50, 0xe2, 0xb1, 0x1a, 0x73, 0xff, 0x5c, 0x57, 0x9e, 0xa1, 0x74, + 0xc1, 0xa5, 0x7a, 0xf3, 0x7d, 0xd0, 0x46, 0x43, 0x32, 0xab, 0xc6, 0xba, 0x88, 0xe9, 0xd7, 0xa2, + 0xc3, 0xd5, 0x45, 0xb8, 0x1e, 0xf9, 0xde, 0xaf, 0x22, 0x98, 0x2b, 0x6b, 0x9a, 0x14, 0x61, 0xc5, + 0x25, 0x80, 0xbe, 0x0d, 0xac, 0x14, 0x01, 0x37, 0x23, 0x12, 0x95, 0x24, 0xa3, 0xb0, 0x78, 0xaa, + 0x29, 0x20, 0x0c, 0x9e, 0x4d, 0xbd, 0x37, 0x34, 0x54, 0xb2, 0x54, 0xa6, 0xe3, 0x0d, 0x6c, 0x61, + 0x28, 0x6f, 0x7a, 0xa9, 0xa3, 0x5f, 0x33, 0x89, 0x7e, 0xc8, 0x74, 0xbf, 0x92, 0xa7, 0x4e, 0x5c, + 0xc3, 0x3a, 0x8f, 0xf8, 0x22, 0x39, 0x34, 0xd1, 0x7d, 0x7f, 0x4a, 0x55, 0x17, 0xb6, 0x9d, 0x9a, + 0x8d, 0x4c, 0x45, 0x5d, 0x9c, 0xb3, 0x5b, 0x4f, 0x9e, 0xcf, 0xb4, 0x23, 0xf7, 0x27, 0x1c, 0x4d, + 0xc0, 0x0a, 0x54, 0x39, 0x6c, 0x6a, 0x1b, 0x74, 0x22, 0xb9, 0x54, 0x70, 0xce, 0xb2, 0x38, 0x70, + 0x48, 0x1a, 0xfa, 0x71, 0x42, 0x4a, 0x24, 0x03, 0xba, 0x83, 0x0e, 0x87, 0x5a, 0x7c, 0x6f, 0x65, + 0x79, 0x4a, 0x13, 0xec, 0xe2, 0xb7, 0x17, 0x14, 0x17, 0xb9, 0x55, 0xe1, 0xe6, 0x0d, 0x28, 0xee, + 0x6d, 0x1a, 0xba, 0x8b, 0xb2, 0x7a, 0x00, 0x54, 0x6f, 0x80, 0x34, 0xb7, 0xd7, 0x06, 0x10, 0x65, + 0x92, 0xa1, 0xb6, 0x32, 0xce, 0xb8, 0xb3, 0x29, 0xe0, 0xe2, 0xd3, 0xc9, 0x51, 0xcd, 0xb4, 0x37, + 0xb9, 0xc7, 0x25, 0x71, 0xc4, 0x5e, 0x4e, 0xce, 0xc7, 0x3f, 0xd1, 0x43, 0xee, 0x76, 0xae, 0x49, + 0xda, 0x8b, 0x6d, 0x95, 0x4d, 0x2f, 0xcf, 0x14, 0xf2, 0xc0, 0x90, 0xd5, 0x28, 0x74, 0x10, 0xbf, + 0xe1, 0x0b, 0x72, 0x74, 0x7e, 0x29, 0x7b, 0x0c, 0x34, 0xe0, 0xb6, 0xb3, 0x38, 0xb7, 0xfb, 0x46, + 0x91, 0xda, 0x59, 0x40, 0xc5, 0x0b, 0xfd, 0xea, 0x63, 0x12, 0xaf, 0xdb, 0xb7, 0xa3, 0xc6, 0x3f, + 0xa3, 0xe8, 0xec, 0xcf, 0xfa, 0xe9, 0x5f, 0xc5, 0x7c, 0x6c, 0x93, 0x23, 0x08, 0x53, 0xa3, 0x99, + 0x39, 0x6a, 0x93, 0xeb, 0x06, 0xed, 0x57, 0xac, 0x96, 0x9f, 0x92, 0x71, 0x29, 0x2b, 0x74, 0x6c, + 0xd3, 0x03, 0xcb, 0xa7, 0x1c, 0xfe, 0x9b, 0xdc, 0xc2, 0x6a, 0x2b, 0xe0, 0x48, 0x2a, 0xd1, 0xba, + 0x51, 0x44, 0x03, 0x66, 0xdc, 0x05, 0x49, 0xde, 0x86, 0x35, 0xb5, 0x26, 0x94, 0xde, 0x1a, 0xa6, + 0xda, 0x04, 0xc5, 0x0c, 0x47, 0xf8, 0xc0, 0x63, 0x1d, 0x2e, 0x10, 0x63, 0x59, 0x63, 0xa5, 0xfd, + 0xd0, 0xbf, 0x36, 0xde, 0x34, 0x2a, 0x8a, 0x54, 0x8f, 0xaf, 0x0c, 0xa1, 0x44, 0xaa, 0x19, 0x48, + 0xe2, 0x75, 0xf2, 0xcb, 0x5c, 0xfe, 0x5b, 0xd4, 0xab, 0x34, 0x4e, 0x8f, 0xea, 0x71, 0x79, 0x51, + 0xc4, 0x63, 0x5a, 0xee, 0xc3, 0xf3, 0x0d, 0x73, 0x47, 0x9b, 0x2d, 0x06, 0x0e, 0x03, 0x48, 0xfa, + 0x3a, 0xce, 0xd2, 0x9a, 0x5f, 0xb2, 0xe6, 0xbe, 0x06, 0x9f, 0xda, 0xbe, 0xca, 0x1e, 0xcc, 0xb8, + 0x4a, 0x4d, 0x58, 0x83, 0x64, 0xbf, 0x85, 0x88, 0xa9, 0x40, 0xbe, 0x9a, 0x5c, 0x68, 0xc3, 0x27, + 0x32, 0x77, 0x6a, 0x55, 0xa4, 0x2c, 0x4a, 0x99, 0x44, 0x75, 0x6e, 0x7b, 0x74, 0x5b, 0xb2, 0x76, + 0xed, 0x4a, 0x68, 0x5a, 0x57, 0xf0, 0xea, 0xa3, 0x54, 0x30, 0x85, 0x17, 0x84, 0xba, 0x7a, 0xb6, + 0x6a, 0x82, 0xd3, 0xd6, 0xad, 0xdf, 0xd7, 0xfd, 0x50, 0xe9, 0xf2, 0xa5, 0xe1, 0x7e, 0x5c, 0x25, + 0xe9, 0x32, 0x67, 0x61, 0xdd, 0xd0, 0x57, 0xc1, 0xc1, 0x8c, 0x18, 0xd6, 0xa3, 0x40, 0x8b, 0x8f, + 0x92, 0x42, 0x57, 0xc5, 0xaa, 0x51, 0x9d, 0x80, 0x4c, 0x84, 0x25, 0xc7, 0xf3, 0x02, 0x79, 0x44, + 0x69, 0x7d, 0x74, 0xd3, 0x50, 0xba, 0xe9, 0x1c, 0xcd, 0xa1, 0x56, 0x3f, 0xe8, 0x12, 0x3a, 0x4f, + 0x8d, 0x79, 0x98, 0x31, 0x53, 0x79, 0x25, 0x8f, 0xe0, 0xc0, 0x4f, 0x3f, 0xe8, 0x7b, 0xff, 0xad, + 0xc8, 0x73, 0xb1, 0xe9, 0x6d, 0x78, 0xed, 0x5e, 0x6b, 0x54, 0x4f, 0xdf, 0xad, 0x1c, 0xb9, 0xb4, + 0x98, 0x36, 0xf8, 0xc5, 0xe0, 0x50, 0xec, 0xb8, 0x11, 0xd9, 0x0c, 0x49, 0x10, 0x7b, 0x6a, 0x5b, + 0x01, 0xb4, 0xfb, 0xbd, 0xe6, 0x18, 0x12, 0xa3, 0xd0, 0x61, 0x3b, 0x8f, 0x61, 0x53, 0x6c, 0xb4, + 0x51, 0x98, 0x68, 0x36, 0x06, 0x92, 0x48, 0x5f, 0xc4, 0x2b, 0x48, 0xc8, 0x74, 0x16, 0x3e, 0x01, + 0x2e, 0x14, 0xd3, 0x98, 0xbf, 0xec, 0x48, 0x97, 0x4e, 0xde, 0xd3, 0x78, 0x69, 0xa5, 0xc2, 0x32, + 0x51, 0x9b, 0xa1, 0xc4, 0xdd, 0x06, 0xf3, 0xa5, 0xb7, 0x45, 0x4a, 0xe4, 0x9e, 0x4e, 0x73, 0x3a, + 0xec, 0xdf, 0x38, 0x88, 0xc6, 0x8a, 0xd3, 0x51, 0xd3, 0xf5, 0x4d, 0xa8, 0xec, 0x13, 0x73, 0xec, + 0x26, 0xc4, 0x03, 0xc6, 0x65, 0x34, 0x4a, 0x80, 0x58, 0x84, 0xb9, 0x86, 0xd4, 0x92, 0xd6, 0x8a, + 0xfe, 0x83, 0xbc, 0x46, 0x24, 0x27, 0xfe, 0xcf, 0x93, 0xa2, 0x29, 0x95, 0xb8, 0x96, 0x93, 0x99, + 0x2e, 0x44, 0xa4, 0xcb, 0x2f, 0x7c, 0x6b, 0x29, 0x5b, 0xeb, 0xed, 0x26, 0x48, 0xf7, 0x92, 0x9a, + 0xe2, 0x0e, 0x40, 0xd8, 0xf5, 0xb9, 0xcd, 0xaa, 0x26, 0xb9, 0x71, 0xab, 0x37, 0x64, 0x39, 0xb5, + 0x4e, 0xec, 0x1d, 0x4e, 0x87, 0x34, 0x38, 0xcc, 0x12, 0x68, 0xc9, 0x0b, 0x2d, 0x40, 0xe7, 0xa8, + 0xa0, 0x97, 0x10, 0x6b, 0x0f, 0x13, 0xc2, 0x6e, 0xb4, 0x3e, 0x39, 0x04, 0x25, 0xa4, 0xdc, 0x3d, + 0x9b, 0x25, 0x02, 0x34, 0x3f, 0x31, 0x5e, 0x9c, 0x3a, 0x8d, 0x28, 0x83, 0x9b, 0x66, 0xfa, 0xed, + 0x12, 0x2a, 0xe8, 0xdb, 0xd2, 0xa2, 0x0e, 0x3c, 0x62, 0x3a, 0xc8, 0x7f, 0x73, 0x3f, 0xb4, 0xbb, + 0xd5, 0xcb, 0x80, 0x53, 0x41, 0x36, 0x8f, 0x45, 0xbe, 0x19, 0x43, 0x96, 0x1c, 0x26, 0x2b, 0xae, + 0x46, 0x82, 0x6c, 0x1a, 0xca, 0x8a, 0x34, 0x5a, 0xad, 0xef, 0x91, 0x13, 0xcf, 0x87, 0x43, 0xd4, + 0x62, 0xe8, 0x36, 0xb9, 0xd5, 0x9f, 0x54, 0xaa, 0xf8, 0x02, 0x86, 0x8a, 0x6b, 0x80, 0xfc, 0x9f, + 0x90, 0xe9, 0xd3, 0xc6, 0xb1, 0x36, 0xbc, 0x6c, 0x19, 0x32, 0x85, 0xe8, 0x9b, 0x37, 0x97, 0x6e, + 0xa0, 0x55, 0x16, 0xf0, 0x57, 0x1b, 0xc2, 0xf6, 0x75, 0xd5, 0xe0, 0x30, 0x97, 0x46, 0xe5, 0xf6, + 0x64, 0xd9, 0xd5, 0x3d, 0x3f, 0x41, 0xb0, 0xb8, 0x40, 0x71, 0x4b, 0x80, 0x3d, 0x1d, 0xf3, 0xde, + 0x77, 0xe4, 0xf4, 0xfb, 0x5a, 0x10, 0x02, 0x40, 0xc6, 0xf1, 0xdc, 0xd1, 0xe7, 0x34, 0x4f, 0xf7, + 0xe4, 0x48, 0x12, 0x85, 0x69, 0x97, 0xd0, 0x88, 0xf0, 0x79, 0x77, 0xfb, 0x98, 0x79, 0xd4, 0x7c, + 0xd3, 0x31, 0xa4, 0x72, 0xa7, 0x1c, 0x1d, 0xb0, 0x6e, 0x25, 0x9b, 0x4b, 0x52, 0x4b, 0x58, 0x02, + 0x06, 0xff, 0x48, 0x9f, 0xc4, 0x33, 0x83, 0x8e, 0x36, 0x46, 0x96, 0xdc, 0xb0, 0x3f, 0x64, 0x0a, + 0x17, 0xa3, 0xd9, 0xba, 0xb3, 0x34, 0x8c, 0x9c, 0x52, 0x75, 0x23, 0x10, 0x52, 0x08, 0xf4, 0x3c, + 0x2d, 0x85, 0x96, 0x8c, 0x79, 0xda, 0xed, 0xe8, 0x65, 0xca, 0xa7, 0x8f, 0x25, 0x6f, 0xbb, 0xbb, + 0xb9, 0x82, 0x9d, 0xf6, 0xda, 0x27, 0x63, 0x89, 0x02, 0xd8, 0x14, 0x34, 0x64, 0xf4, 0x50, 0x19, + 0xed, 0x39, 0x8a, 0x00, 0x5c, 0xb3, 0x9f, 0x0f, 0xc1, 0x8a, 0x81, 0x44, 0xbd, 0xb7, 0xb1, 0x4f, + 0x22, 0xb4, 0xbe, 0x2b, 0x6d, 0x60, 0x58, 0x0d, 0xcb, 0x43, 0xdf, 0x55, 0xcb, 0x9c, 0xc4, 0x66, + 0x4e, 0x91, 0xf3, 0x80, 0x7c, 0x0c, 0xa8, 0x20, 0xbb, 0xc7, 0x2b, 0x3c, 0x9c, 0xa7, 0x0e, 0x21, + 0x1e, 0x52, 0x2f, 0xb3, 0x1c, 0x30, 0x77, 0x18, 0x07, 0xc6, 0xef, 0xf3, 0xe7, 0x26, 0x4a, 0xd1, + 0x15, 0x00, 0x87, 0xe6, 0xb8, 0xcc, 0xbb, 0x1e, 0x64, 0x81, 0x98, 0x1b, 0xd5, 0xb4, 0x42, 0x74, + 0xe9, 0x74, 0xaf, 0x57, 0xdb, 0x24, 0xdb, 0xca, 0x66, 0xf5, 0xfe, 0xee, 0x90, 0xa0, 0x58, 0x62, + 0xf7, 0xdb, 0x0c, 0xc5, 0xdd, 0x78, 0x5a, 0xc2, 0x73, 0x60, 0x06, 0xc9, 0xf7, 0xfc, 0x98, 0x0a, + 0x84, 0x9c, 0x4b, 0x60, 0x05, 0xb0, 0x9e, 0x11, 0xc1, 0xed, 0xaa, 0xae, 0xe3, 0x48, 0xcf, 0x34, + 0x17, 0x4d, 0x3c, 0x02, 0xdf, 0x31, 0xa0, 0xc3, 0xf4, 0xf6, 0x08, 0xba, 0x4e, 0xee, 0x90, 0x04, + 0xa5, 0x61, 0xee, 0xf2, 0x2b, 0x33, 0x46, 0x6e, 0x47, 0x37, 0x48, 0xdb, 0xdb, 0x52, 0x23, 0x27, + 0x87, 0x48, 0x05, 0x1c, 0x02, 0xe8, 0x43, 0x1f, 0x48, 0x6d, 0xa1, 0x46, 0xa2, 0xbf, 0xb1, 0x11, + 0x18, 0x35, 0xa4, 0x7a, 0x3d, 0x3c, 0x68, 0xe2, 0x04, 0x92, 0x2c, 0x3d, 0xa2, 0x5a, 0x37, 0x2f, + 0xe6, 0x75, 0x26, 0xaf, 0x9f, 0x16, 0xb7, 0x4d, 0x63, 0x88, 0x5a, 0xf4, 0xa1, 0x03, 0x4d, 0x22, + 0x6a, 0x46, 0x34, 0x7f, 0x92, 0x7e, 0x24, 0x72, 0xeb, 0x2f, 0xf0, 0x01, 0xf9, 0x30, 0x01, 0xaf, + 0xac, 0x74, 0x28, 0x6d, 0x90, 0x2e, 0x40, 0x05, 0x5e, 0x92, 0x34, 0x07, 0x79, 0x48, 0x64, 0x83, + 0x38, 0x86, 0x03, 0xe8, 0xe5, 0x2e, 0x4f, 0x69, 0x2f, 0x13, 0x9b, 0x43, 0x36, 0xa5, 0x89, 0x92, + 0x6e, 0x23, 0xd7, 0xee, 0x7f, 0x01, 0xf2, 0xab, 0xbd, 0x73, 0x2e, 0xe6, 0x02, 0x51, 0x97, 0x40, + 0x78, 0xce, 0xb1, 0xfe, 0x84, 0x99, 0xe2, 0x2b, 0x06, 0x05, 0x40, 0x02, 0x11, 0xde, 0x31, 0x94, + 0x54, 0x0f, 0xb4, 0xf4, 0xfa, 0xd1, 0x41, 0x6a, 0x03, 0x63, 0xcb, 0x9c, 0x48, 0x1a, 0xde, 0x2e, + 0xa6, 0xf3, 0x7b, 0x1c, 0xa3, 0xde, 0x06, 0xf2, 0x02, 0xc8, 0x37, 0x17, 0x9b, 0x2b, 0x84, 0x63, + 0x89, 0xd6, 0x15, 0xbf, 0xbe, 0xd7, 0x29, 0xad, 0x66, 0xaa, 0x54, 0x38, 0x0e, 0xc2, 0x08, 0xa4, + 0x77, 0xb7, 0xfa, 0x5f, 0xe4, 0x26, 0x76, 0x00, 0x8e, 0xe1, 0xbf, 0xf7, 0xe1, 0x32, 0xb3, 0x03, + 0x66, 0xb0, 0xf1, 0xe5, 0x6c, 0x11, 0x45, 0x17, 0x6b, 0x90, 0x6d, 0xef, 0x5b, 0xea, 0x03, 0xf6, + 0x6e, 0x65, 0xf9, 0x8c, 0x77, 0xdf, 0x24, 0x21, 0x0c, 0x17, 0x92, 0x21, 0x0f, 0x8c, 0xdd, 0xee, + 0x06, 0xab, 0x28, 0x3d, 0x89, 0xb8, 0x46, 0xb7, 0x04, 0x2e, 0x74, 0x28, 0x56, 0x81, 0xef, 0x98, + 0xa7, 0x4e, 0xb8, 0x96, 0x76, 0xaf, 0x81, 0xd7, 0xd7, 0x5d, 0x2d, 0x1d, 0x7d, 0x3a, 0xec, 0xe2, + 0x60, 0xe7, 0xcb, 0x06, 0x63, 0x47, 0x84, 0x30, 0xc9, 0xd2, 0xde, 0x21, 0xb0, 0xe3, 0xd9, 0xd1, + 0xec, 0x43, 0xe1, 0x54, 0xd5, 0xdd, 0x0e, 0x93, 0x2f, 0x9a, 0xfc, 0xad, 0x4e, 0xe1, 0xde, 0xe1, + 0xc9, 0x0e, 0x92, 0xe6, 0x89, 0x43, 0xc1, 0x5b, 0x02, 0xd1, 0xe7, 0x51, 0xd6, 0x16, 0xd4, 0xc9, + 0x66, 0x58, 0xe3, 0xe9, 0x73, 0x0d, 0x51, 0x93, 0x84, 0xb3, 0xec, 0x4a, 0xbd, 0x55, 0xe4, 0x59, + 0x76, 0x9a, 0x78, 0xa7, 0xa7, 0xa0, 0xde, 0xe6, 0x75, 0xce, 0x07, 0x5c, 0x38, 0xfb, 0x78, 0x0f, + 0x85, 0x79, 0xef, 0x57, 0x69, 0x56, 0x5b, 0x84, 0xf4, 0xc0, 0xf5, 0x7c, 0xaf, 0xf1, 0x23, 0x84, + 0x39, 0xb8, 0x74, 0x5b, 0x2d, 0xc9, 0x56, 0xd8, 0x9d, 0x9b, 0x4d, 0x59, 0x85, 0xdd, 0x94, 0xd2, + 0xc0, 0x9a, 0x8c, 0x54, 0xbe, 0xd3, 0xff, 0xa2, 0x8c, 0xd3, 0xa3, 0x49, 0xc6, 0x6c, 0xb1, 0x5a, + 0x3e, 0xc1, 0x13, 0x34, 0x17, 0x36, 0x16, 0x95, 0x33, 0x87, 0xa1, 0x82, 0x19, 0x8d, 0x25, 0x8c, + 0x26, 0x53, 0xe4, 0x34, 0x38, 0x65, 0x6e, 0x09, 0xc8, 0x46, 0xc4, 0xec, 0x59, 0x41, 0x30, 0x56, + 0xbf, 0x73, 0x56, 0x14, 0xf7, 0x44, 0x52, 0x0c, 0x05, 0xcf, 0x9e, 0x1e, 0x9c, 0xf6, 0xa5, 0x69, + 0xa7, 0x46, 0x11, 0x73, 0x65, 0x25, 0x1b, 0x73, 0xb4, 0x55, 0xe9, 0x90, 0xfb, 0x4b, 0xba, 0x43, + 0x5e, 0xf1, 0x77, 0x40, 0xb3, 0x19, 0x0a, 0x2c, 0xa7, 0x08, 0xb3, 0x28, 0x9b, 0xd9, 0x25, 0x0a, + 0x50, 0x85, 0xca, 0xf0, 0xb2, 0x60, 0xcf, 0x88, 0x24, 0x5a, 0xbd, 0x2f, 0x60, 0x63, 0x59, 0xd9, + 0xeb, 0x22, 0xab, 0x4d, 0x17, 0x42, 0x3d, 0x96, 0xc4, 0xb5, 0xcb, 0x1d, 0x72, 0x33, 0x81, 0x4b, + 0xed, 0x93, 0xf0, 0x97, 0x24, 0x6e, 0x9f, 0xb6, 0x84, 0x18, 0x61, 0x76, 0x3c, 0x4b, 0xb1, 0x99, + 0xe4, 0xce, 0x08, 0xe1, 0x97, 0xfb, 0x8c, 0xe1, 0x4f, 0x08, 0x13, 0xfa, 0x91, 0xfb, 0x45, 0xc9, + 0xb1, 0x87, 0x86, 0x48, 0x75, 0x6b, 0x45, 0xea, 0x5b, 0x15, 0xd9, 0x9a, 0x64, 0xce, 0x1b, 0x35, + 0xaa, 0x73, 0xad, 0x5c, 0x3a, 0x32, 0x73, 0x53, 0x4e, 0x75, 0x40, 0x19, 0x10, 0x1e, 0x36, 0xd5, + 0xe9, 0xa6, 0x1e, 0xc4, 0x12, 0xd6, 0xe6, 0xa6, 0x6c, 0x47, 0x52, 0x6d, 0x51, 0x76, 0xbe, 0x9a, + 0x79, 0x6c, 0x44, 0x98, 0x3c, 0x1a, 0x21, 0x4c, 0xff, 0xdc, 0xe2, 0x9c, 0xbb, 0xc8, 0x5e, 0x69, + 0xe2, 0xb7, 0xec, 0xe2, 0x85, 0x03, 0x1d, 0x94, 0x23, 0x78, 0x9b, 0xa9, 0xc8, 0xec, 0xea, 0xaf, + 0x7d, 0x5b, 0x2f, 0xa9, 0x53, 0x44, 0xb3, 0xa2, 0x00, 0x2d, 0x02, 0xdc, 0x36, 0x9f, 0x6b, 0x7e, + 0xe9, 0xdb, 0xa4, 0x89, 0x5f, 0x84, 0x7d, 0x69, 0x1f, 0x77, 0x6e, 0x94, 0xcd, 0x1a, 0x8d, 0x86, + 0x76, 0xe7, 0x0d, 0xe7, 0x69, 0xbc, 0x2d, 0x7d, 0x61, 0x83, 0xc6, 0x75, 0x82, 0x84, 0x39, 0xca, + 0xca, 0x5f, 0x65, 0xb4, 0xa6, 0xe7, 0x9e, 0xa0, 0xb2, 0x1c, 0x0e, 0x99, 0x29, 0x00, 0x72, 0xcc, + 0xaa, 0xbc, 0xa7, 0x45, 0x73, 0xa1, 0xec, 0xf3, 0xf7, 0xde, 0x7a, 0x50, 0xea, 0x05, 0x99, 0x28, + 0x48, 0x31, 0xd3, 0x43, 0x0a, 0x94, 0x9b, 0x46, 0x98, 0x5a, 0x26, 0xec, 0xfe, 0x78, 0xcf, 0xd2, + 0xf6, 0x5d, 0xf5, 0x56, 0x8f, 0x4f, 0xc5, 0xf4, 0x9c, 0x66, 0x03, 0xf2, 0xf7, 0x8c, 0x03, 0xe3, + 0x6f, 0x34, 0x32, 0x08, 0x7c, 0xb3, 0x6b, 0x99, 0xd9, 0xd9, 0x46, 0x59, 0x30, 0x2d, 0x37, 0xa0, + 0x9b, 0xbf, 0x39, 0x59, 0x0f, 0xc1, 0x23, 0x7a, 0x94, 0x3e, 0xdc, 0x65, 0x63, 0x36, 0x9c, 0xce, + 0xdf, 0xb1, 0x22, 0xae, 0x4a, 0x42, 0x97, 0x8d, 0x1c, 0xbe, 0xcb, 0x4c, 0xaa, 0x35, 0xd5, 0xb7, + 0x60, 0x68, 0x6a, 0x59, 0xda, 0xd0, 0x8d, 0x6b, 0xd8, 0x5f, 0x24, 0x28, 0x15, 0x7c, 0x02, 0xf6, + 0x32, 0xa2, 0x53, 0x52, 0xc8, 0x7b, 0xdb, 0x7a, 0x32, 0x8a, 0x10, 0x9d, 0xcc, 0x29, 0xa6, 0xf3, + 0x3e, 0xd9, 0x9d, 0x13, 0xae, 0x2e, 0x8e, 0x3a, 0x4a, 0x83, 0xc4, 0x54, 0x85, 0x26, 0x2a, 0x72, + 0xe6, 0x1d, 0xc9, 0x93, 0xfc, 0x6c, 0xf6, 0x32, 0xc3, 0x98, 0x72, 0x02, 0x29, 0xe1, 0xec, 0xe6, + 0xc1, 0x6d, 0x98, 0x37, 0x49, 0x6a, 0xb8, 0xe0, 0xf7, 0x9a, 0x32, 0xfd, 0xaf, 0xd5, 0x8d, 0xf6, + 0x34, 0xfd, 0xfc, 0x77, 0x2a, 0x89, 0x65, 0xaf, 0xc4, 0xf7, 0x94, 0xed, 0xf9, 0x42, 0xa8, 0xa9, + 0x40, 0x23, 0x2d, 0x43, 0x30, 0xcd, 0x9d, 0xc8, 0xd9, 0x13, 0x25, 0x30, 0x23, 0x49, 0x1c, 0x48, + 0x51, 0xcc, 0x68, 0x68, 0xb5, 0x8a, 0x4f, 0x49, 0xb3, 0x8c, 0xdb, 0xa5, 0x88, 0xcc, 0x7a, 0xbc, + 0x38, 0x33, 0xca, 0x30, 0x88, 0xd7, 0x5c, 0xd6, 0x08, 0xd2, 0x97, 0xe5, 0xfe, 0xbd, 0xd0, 0x4e, + 0x21, 0xdb, 0x43, 0x16, 0x1e, 0x85, 0xf4, 0x7a, 0x46, 0x1f, 0xfe, 0x5c, 0x6d, 0xce, 0xd6, 0x7e, + 0x0d, 0xb3, 0x4f, 0x7c, 0x3c, 0x35, 0x7c, 0xb2, 0xdf, 0x10, 0x93, 0x1f, 0x6e, 0xa1, 0x57, 0xf7, + 0xd4, 0x40, 0xf7, 0x20, 0x24, 0x99, 0x45, 0x75, 0xdb, 0x3f, 0x10, 0xd1, 0x1a, 0x69, 0x1f, 0x57, + 0xa8, 0x38, 0x02, 0x1d, 0x42, 0x6a, 0xc0, 0xf5, 0xe9, 0x17, 0x6d, 0x1f, 0xb2, 0x8e, 0xb4, 0x44, + 0x4a, 0x17, 0x98, 0x8d, 0xfe, 0xd2, 0x86, 0xd9, 0x61, 0x9d, 0xe4, 0x24, 0x09, 0xe4, 0xba, 0xec, + 0x8b, 0x64, 0xbe, 0xc3, 0xdb, 0x14, 0x74, 0x5d, 0xc4, 0xeb, 0xdd, 0x1b, 0xdd, 0x07, 0x56, 0x4a, + 0xcf, 0xf0, 0xfa, 0x21, 0x22, 0xb9, 0x32, 0x3d, 0x9c, 0x43, 0xe8, 0xf7, 0x7d, 0xb3, 0xfc, 0x68, + 0xe2, 0x60, 0x47, 0x0d, 0x6b, 0xa8, 0x4f, 0x13, 0xad, 0x46, 0x83, 0xe4, 0x84, 0x1c, 0x54, 0x16, + 0x3b, 0x96, 0x3d, 0x60, 0xce, 0xfd, 0x66, 0xd3, 0x02, 0xb3, 0x1b, 0x69, 0x92, 0x0a, 0xa7, 0xcc, + 0x56, 0x53, 0x27, 0xe8, 0xc8, 0x57, 0x40, 0x5f, 0xe5, 0xa8, 0x3f, 0x48, 0x34, 0x11, 0xd0, 0x9f, + 0xb7, 0x6a, 0x43, 0x12, 0x1c, 0x57, 0xe9, 0xed, 0xdd, 0x77, 0xa9, 0x8c, 0x19, 0xea, 0x50, 0x11, + 0xae, 0x3d, 0x58, 0xc1, 0x38, 0xa7, 0xe6, 0x0b, 0xa4, 0x22, 0x2d, 0x8a, 0x5d, 0xfc, 0x70, 0xe4, + 0x66, 0x26, 0x53, 0xca, 0x81, 0x57, 0x0a, 0xf6, 0x24, 0xc3, 0x3e, 0x5f, 0x60, 0x34, 0x92, 0xed, + 0xb9, 0xd8, 0x67, 0xdb, 0xb9, 0x2a, 0x9e, 0x44, 0xd5, 0x00, 0xb6, 0x62, 0x01, 0x90, 0xde, 0x0e, + 0x21, 0xdc, 0x64, 0xf7, 0xb2, 0x7c, 0x13, 0xf2, 0x70, 0x27, 0x04, 0xde, 0x37, 0x50, 0x38, 0x04, + 0xa9, 0xc5, 0x3c, 0xdf, 0xae, 0xce, 0xb8, 0x92, 0x90, 0x34, 0xfe, 0x0f, 0x84, 0x11, 0x4e, 0xd0, + 0x37, 0xfe, 0xae, 0x03, 0x9b, 0x5e, 0xfb, 0x3f, 0xca, 0xae, 0x2a, 0xc2, 0x12, 0x92, 0xb3, 0xd2, + 0xf4, 0xa9, 0xbf, 0x3c, 0x03, 0x25, 0xf0, 0x56, 0xb1, 0xb4, 0x1a, 0xe4, 0xd7, 0xa7, 0x4e, 0xeb, + 0xba, 0xd3, 0xa6, 0xb3, 0xf1, 0xca, 0x5b, 0x4f, 0x44, 0x10, 0x2a, 0x03, 0x6c, 0xd9, 0xe6, 0x33, + 0x53, 0xbb, 0x5e, 0x76, 0xc1, 0x14, 0xd9, 0x15, 0x2e, 0x0e, 0x66, 0xf6, 0x56, 0x02, 0x1e, 0xbf, + 0x6b, 0x9c, 0x74, 0x35, 0x3a, 0x03, 0xad, 0x64, 0xcb, 0x99, 0xec, 0xfb, 0xfa, 0x3e, 0x0a, 0x88, + 0x02, 0x70, 0x05, 0x79, 0xe9, 0x26, 0xf8, 0x56, 0x44, 0x46, 0x42, 0xe7, 0x70, 0xb6, 0x45, 0x86, + 0x12, 0xf6, 0xa1, 0x7e, 0x1d, 0x02, 0x7f, 0x9a, 0x55, 0x2f, 0x35, 0x6d, 0xaf, 0x26, 0x86, 0xba, + 0x70, 0x2b, 0x40, 0xce, 0xa8, 0xcd, 0xd6, 0x05, 0x15, 0x91, 0x2e, 0x33, 0xb9, 0x91, 0x97, 0xe2, + 0xc0, 0x85, 0x0e, 0xf9, 0x6f, 0xed, 0xbf, 0xd1, 0x40, 0xd9, 0x40, 0x48, 0x70, 0xac, 0xa9, 0x95, + 0x38, 0x6d, 0x7b, 0x2d, 0x87, 0xbd, 0x26, 0xd0, 0xcf, 0xd9, 0xfa, 0xb7, 0x3f, 0x66, 0xba, 0x89, + 0x99, 0x5f, 0x70, 0x58, 0x76, 0x16, 0x41, 0x8b, 0x96, 0x6c, 0xcf, 0x54, 0xad, 0x07, 0xee, 0xce, + 0x1e, 0x65, 0xce, 0xf4, 0x5f, 0x36, 0x04, 0xcd, 0x3c, 0x02, 0xea, 0x76, 0x5d, 0x51, 0x6c, 0x23, + 0xee, 0x88, 0xc6, 0xf4, 0x2b, 0x3c, 0x72, 0x29, 0xfa, 0x96, 0xfd, 0x25, 0xef, 0xa6, 0xdd, 0x54, + 0x96, 0x59, 0x79, 0x92, 0x35, 0x6e, 0xf7, 0x7b, 0x85, 0x4d, 0x52, 0xd9, 0xd8, 0x08, 0xd9, 0xd6, + 0xef, 0xe5, 0x09, 0xb7, 0x8c, 0x67, 0x24, 0xf0, 0x70, 0x46, 0x34, 0x5c, 0x3f, 0x4d, 0x7f, 0xb0, + 0x73, 0xea, 0x87, 0xc8, 0x53, 0xba, 0x83, 0xce, 0xa8, 0x9c, 0x25, 0xbd, 0x9b, 0x50, 0x00, 0x03, + 0x2b, 0xc5, 0x2f, 0xe8, 0xad, 0x45, 0x6f, 0xf4, 0x22, 0x8c, 0xd9, 0xc0, 0x0c, 0x43, 0xe9, 0x41, + 0xd8, 0x1a, 0x0d, 0x76, 0x7c, 0xc8, 0xfc, 0xf6, 0x0f, 0x02, 0xaa, 0x59, 0x5a, 0xc3, 0x22, 0xfa, + 0xc2, 0x09, 0x07, 0x93, 0x23, 0x2f, 0x3b, 0x07, 0x2a, 0x63, 0xb9, 0xd2, 0x77, 0xfa, 0x5e, 0x38, + 0xaf, 0xc4, 0x11, 0xeb, 0x97, 0x14, 0xc1, 0x87, 0xb6, 0xe8, 0x2d, 0x46, 0xce, 0xca, 0x13, 0x17, + 0x61, 0x2a, 0x18, 0x14, 0x7e, 0x66, 0x2d, 0x5b, 0x4c, 0xf5, 0x28, 0x16, 0x3b, 0xe5, 0xb1, 0x18, + 0xbc, 0xb5, 0xe8, 0x1b, 0x70, 0x6b, 0x32, 0x07, 0xfa, 0xc3, 0x5d, 0xc2, 0x41, 0x7b, 0xaa, 0xcf, + 0x96, 0xf1, 0xae, 0x75, 0x15, 0xc7, 0x17, 0xf1, 0x67, 0x4f, 0x01, 0x53, 0x6b, 0x81, 0x72, 0xdc, + 0xb5, 0x9d, 0xd3, 0x9c, 0x5c, 0x20, 0x3d, 0xdd, 0x44, 0x56, 0xea, 0x92, 0xb1, 0x5a, 0xac, 0x91, + 0xc0, 0xd9, 0x6a, 0xb2, 0x5e, 0x33, 0x16, 0xf9, 0xa9, 0xec, 0x79, 0x63, 0xb7, 0xe5, 0x20, 0xa9, + 0x3a, 0x32, 0xbd, 0xc3, 0x53, 0x8b, 0x98, 0xc1, 0x5e, 0x04, 0x73, 0x06, 0x9d, 0x34, 0xc4, 0x2c, + 0x83, 0xea, 0x62, 0x25, 0x4f, 0xd3, 0x02, 0x49, 0x50, 0xcd, 0x32, 0xfd, 0xef, 0x67, 0xd8, 0x8d, + 0xf3, 0xfc, 0x1c, 0xa7, 0x02, 0x51, 0xdf, 0xa7, 0xc1, 0x92, 0x1a, 0x74, 0xda, 0x3a, 0x75, 0xad, + 0xd7, 0x14, 0xb3, 0xf9, 0x47, 0x87, 0xfc, 0x26, 0xd9, 0x00, 0x6f, 0x61, 0xff, 0xc2, 0x8b, 0x06, + 0x06, 0x4d, 0xc5, 0xf0, 0x9b, 0xc4, 0x11, 0x7a, 0x64, 0x4b, 0x62, 0x1a, 0xe4, 0xbf, 0xdd, 0x2d, + 0x7d, 0xb1, 0x3b, 0x54, 0x06, 0xf8, 0xb6, 0x75, 0x55, 0xa4, 0x8c, 0xad, 0x0b, 0x11, 0xef, 0x00, + 0xbf, 0x3b, 0x7c, 0xb9, 0xd2, 0xc9, 0xd4, 0x1e, 0xc8, 0xcb, 0x8e, 0xdd, 0x49, 0x53, 0x1c, 0xa4, + 0xda, 0x3a, 0x90, 0x45, 0x03, 0x54, 0x10, 0x0c, 0x11, 0x98, 0x54, 0x21, 0x2c, 0x34, 0x47, 0x51, + 0x92, 0xc2, 0x61, 0xe3, 0x1c, 0x51, 0x2a, 0xa0, 0xc9, 0xd3, 0xa5, 0x56, 0x50, 0xb9, 0x5f, 0x25, + 0x87, 0xdd, 0xd0, 0xf0, 0x2d, 0xd3, 0x9f, 0x55, 0x87, 0xee, 0xf2, 0xdf, 0xae, 0x60, 0x4c, 0x18, + 0xe5, 0x77, 0x7b, 0xdc, 0x51, 0x82, 0x99, 0x87, 0x26, 0x55, 0xf7, 0x87, 0xdc, 0x68, 0x35, 0x78, + 0xac, 0x9d, 0x41, 0x39, 0x7a, 0x40, 0x0b, 0xaf, 0xc6, 0x33, 0x5a, 0x54, 0x19, 0x67, 0x96, 0xe8, + 0x67, 0x5d, 0xc9, 0xdf, 0x47, 0xfa, 0x1d, 0x4c, 0x5f, 0x5c, 0x78, 0x59, 0xc0, 0xe9, 0xd0, 0x1d, + 0xb2, 0x00, 0xf7, 0x18, 0x9e, 0x0f, 0x82, 0x1e, 0xe3, 0xf4, 0x17, 0xdf, 0x95, 0xa5, 0x8c, 0xe6, + 0xe6, 0xe8, 0x11, 0x96, 0xae, 0xf6, 0x4f, 0x42, 0x34, 0xf5, 0xa0, 0x63, 0x62, 0xcc, 0x63, 0xab, + 0x8d, 0x08, 0x6b, 0xf2, 0x8f, 0xaa, 0xe2, 0x27, 0xbf, 0x33, 0x9b, 0xbf, 0xaf, 0xd0, 0x1e, 0x2a, + 0xb3, 0x5b, 0xab, 0x96, 0x5a, 0x9c, 0x14, 0x15, 0xb4, 0x5b, 0x5a, 0x72, 0x9d, 0x40, 0x46, 0x25, + 0x44, 0xdc, 0xbe, 0x60, 0x96, 0xee, 0x4d, 0x67, 0xeb, 0x74, 0x4a, 0x9d, 0x6e, 0x62, 0x5a, 0x6b, + 0x11, 0x36, 0x9b, 0x96, 0x84, 0xb8, 0x15, 0x4d, 0x32, 0xa0, 0xf9, 0x2c, 0x2a, 0x0b, 0x58, 0xec, + 0x73, 0x8a, 0x12, 0x35, 0x4c, 0x6b, 0x3c, 0x16, 0x90, 0x1e, 0x9e, 0xad, 0x07, 0xa8, 0xbd, 0x2f, + 0x7a, 0x79, 0x6e, 0xb7, 0x3c, 0x72, 0x05, 0xee, 0xf2, 0xa3, 0x16, 0xce, 0x27, 0xb7, 0x02, 0xd5, + 0xea, 0xcd, 0x50, 0x6b, 0xe5, 0x14, 0x16, 0x99, 0xd3, 0xfe, 0xa1, 0xaf, 0xbf, 0x6d, 0x00, 0x0f, + 0x27, 0x68, 0xac, 0x1b, 0x13, 0x07, 0x1f, 0x88, 0x55, 0x7a, 0x18, 0x69, 0x21, 0x2f, 0x3d, 0x97, + 0x04, 0xdd, 0x6b, 0x40, 0x6b, 0x79, 0xaa, 0x7d, 0xf4, 0x92, 0x17, 0x70, 0x50, 0x1b, 0x4b, 0x10, + 0x10, 0xf6, 0xf3, 0xde, 0x20, 0x7c, 0xc6, 0xcf, 0x2c, 0x59, 0x3f, 0xcc, 0x62, 0x21, 0x4f, 0x9f, + 0x79, 0x97, 0x24, 0xe5, 0x56, 0x86, 0x8b, 0xc6, 0x80, 0x52, 0xb2, 0x56, 0xd8, 0x71, 0x9f, 0xb2, + 0x3e, 0x83, 0xff, 0xba, 0x8f, 0xb4, 0x1f, 0x9c, 0x1d, 0x6d, 0x4b, 0xb3, 0xf2, 0x07, 0xd2, 0xdc, + 0x83, 0x6a, 0x88, 0x9c, 0x2c, 0xc5, 0x69, 0xdc, 0x8e, 0x02, 0x91, 0x7b, 0xe5, 0xcd, 0xa7, 0x5e, + 0x62, 0xad, 0x78, 0x7b, 0xb9, 0xca, 0x3f, 0x79, 0xea, 0x58, 0xb9, 0xe3, 0xba, 0xbe, 0xaf, 0x14, + 0xf9, 0xd2, 0x6e, 0xe8, 0xa6, 0x82, 0x2b, 0xc7, 0x93, 0x2f, 0x1e, 0xab, 0xb9, 0x0a, 0xa0, 0x90, + 0x21, 0xe0, 0x36, 0xca, 0xfd, 0x71, 0x6c, 0xa3, 0x97, 0x57, 0x14, 0x8a, 0x39, 0x25, 0x2a, 0xec, + 0x09, 0x51, 0x13, 0xba, 0x50, 0x2e, 0xbb, 0x84, 0x36, 0xfd, 0x75, 0x25, 0x24, 0x2f, 0x35, 0x40, + 0xa8, 0x09, 0x4d, 0x98, 0xea, 0xd8, 0x35, 0x55, 0xdc, 0x13, 0xaf, 0x1d, 0x3c, 0x5a, 0x16, 0xf9, + 0x51, 0x6d, 0x2b, 0x84, 0xfa, 0x2d, 0xa8, 0x49, 0x7a, 0xbb, 0x8a, 0x0c, 0xeb, 0xd5, 0x83, 0xd9, + 0x2e, 0x43, 0x3b, 0xdd, 0xe9, 0xb4, 0x3c, 0xd0, 0x18, 0xa4, 0xd3, 0x71, 0xf9, 0xbb, 0xbf, 0x0b, + 0xa7, 0xc4, 0xce, 0xda, 0x56, 0x47, 0xe8, 0x1a, 0x72, 0x37, 0x71, 0x43, 0x2b, 0x9e, 0x3d, 0x6a, + 0x60, 0xb6, 0xb6, 0xef, 0x98, 0x72, 0x5c, 0x24, 0xd1, 0xef, 0x2d, 0x46, 0x66, 0xc3, 0xa9, 0x02, + 0x52, 0xe3, 0x96, 0x84, 0xbe, 0xa8, 0xd0, 0xad, 0x07, 0x19, 0xfc, 0x5b, 0x8b, 0x93, 0x5a, 0xac, + 0xd6, 0x98, 0x4a, 0xc1, 0x74, 0xe2, 0x6d, 0x72, 0x69, 0xb4, 0x03, 0xc0, 0xc6, 0x9a, 0x0b, 0x07, + 0x78, 0x09, 0x7c, 0xbc, 0xa4, 0xd5, 0x87, 0xae, 0x9b, 0x5f, 0x2a, 0x9c, 0xea, 0xfc, 0x30, 0x41, + 0x5f, 0x51, 0x70, 0x8e, 0x1e, 0x31, 0x91, 0x3b, 0x26, 0x48, 0xd1, 0x5a, 0x9f, 0xe9, 0x91, 0xed, + 0x8d, 0xd3, 0xac, 0xa1, 0xb2, 0x62, 0xf1, 0xc9, 0x43, 0xc6, 0x30, 0xca, 0x99, 0x69, 0x4c, 0xde, + 0xb0, 0xc6, 0xad, 0xd8, 0x56, 0x7f, 0x77, 0x71, 0x8b, 0xb5, 0x43, 0x2e, 0x24, 0x08, 0x97, 0xe5, + 0xca, 0x97, 0x86, 0x93, 0x0a, 0x9c, 0x24, 0x97, 0x82, 0x55, 0x25, 0x90, 0x74, 0x4e, 0x2e, 0x9d, + 0x90, 0xa9, 0xe6, 0x93, 0x50, 0x6c, 0x88, 0x4b, 0x94, 0x75, 0xad, 0x66, 0x60, 0x27, 0xe2, 0x17, + 0x86, 0x37, 0x2a, 0xf8, 0xf5, 0x2a, 0x9b, 0x48, 0x55, 0xa0, 0x51, 0xf8, 0x8d, 0x92, 0x38, 0xa1, + 0x8d, 0x14, 0x54, 0x4d, 0xb3, 0x0d, 0xe9, 0x5b, 0x21, 0x42, 0xa0, 0x4e, 0x2e, 0x1e, 0xec, 0x0d, + 0x1b, 0x40, 0x6c, 0xe6, 0x34, 0xc7, 0x69, 0xc9, 0xd6, 0xa3, 0xa5, 0x99, 0x3f, 0x83, 0x31, 0x6a, + 0x8e, 0xf9, 0x6c, 0x93, 0xd8, 0xce, 0x14, 0x53, 0xff, 0x41, 0x56, 0x96, 0x59, 0xa1, 0xb2, 0x9b, + 0xf1, 0x44, 0x0e, 0xd3, 0x40, 0xad, 0x0c, 0xc3, 0x9b, 0x8e, 0xee, 0x1c, 0xc0, 0x6f, 0x87, 0x92, + 0xfb, 0x76, 0x52, 0x10, 0x07, 0xa9, 0xc7, 0x67, 0x9e, 0xc1, 0xa1, 0xb6, 0x4b, 0x4c, 0x09, 0x33, + 0xd2, 0x3a, 0x16, 0x4c, 0x42, 0x4e, 0x28, 0x91, 0x49, 0xad, 0xf1, 0xa6, 0x2a, 0x72, 0xeb, 0x6c, + 0x62, 0x82, 0xb6, 0x87, 0xca, 0x13, 0xd4, 0xef, 0xc6, 0x37, 0xf6, 0xeb, 0x31, 0x0e, 0x60, 0x62, + 0x69, 0xf7, 0x87, 0x67, 0xc3, 0x94, 0x27, 0x5b, 0x64, 0x2d, 0x3a, 0x14, 0x5f, 0xf7, 0xb1, 0xa5, + 0x5b, 0x39, 0x68, 0x3e, 0x0d, 0x64, 0x1e, 0x51, 0x3a, 0xad, 0x72, 0x0c, 0x57, 0x58, 0x61, 0xfe, + 0x4c, 0xf2, 0xb3, 0x7d, 0x7e, 0x0d, 0xe0, 0x80, 0x35, 0xd0, 0x19, 0xf1, 0xa2, 0x3d, 0xd3, 0x0d, + 0x02, 0x2f, 0xc9, 0x6d, 0x69, 0x83, 0xf4, 0xf4, 0xd3, 0x1c, 0xa9, 0x36, 0x24, 0x05, 0x30, 0xba, + 0xa1, 0x84, 0x4f, 0x1d, 0x0a, 0x36, 0x49, 0xe3, 0x8f, 0x51, 0xbc, 0x8f, 0x91, 0x90, 0xee, 0x08, + 0x54, 0x0c, 0x42, 0xc0, 0x7c, 0x02, 0xf9, 0x77, 0x13, 0xfb, 0x53, 0x72, 0x5d, 0x73, 0x75, 0x76, + 0xc8, 0x2d, 0xfe, 0xad, 0x15, 0xfe, 0xde, 0xc5, 0xa1, 0xe3, 0x32, 0x3c, 0x6a, 0xf2, 0xb5, 0x89, + 0xa9, 0xe1, 0x94, 0x74, 0x86, 0x02, 0x5a, 0xe7, 0xb0, 0xd6, 0xf8, 0x3e, 0x0b, 0xa9, 0xa5, 0x41, + 0x08, 0xb5, 0x8f, 0x34, 0xa6, 0x2d, 0x02, 0xec, 0x71, 0x94, 0x6e, 0xe3, 0x5b, 0x9b, 0x93, 0x76, + 0x55, 0x98, 0x33, 0xde, 0x5c, 0x51, 0x1f, 0xaa, 0xef, 0x14, 0xc0, 0x5f, 0xd0, 0x25, 0xbe, 0xcd, + 0xb0, 0xab, 0x54, 0x62, 0xe6, 0x43, 0x6e, 0xa7, 0x34, 0x2e, 0xf9, 0x70, 0x28, 0x30, 0x3a, 0x3b, + 0x32, 0x7d, 0xad, 0x07, 0x32, 0x06, 0x4b, 0xee, 0xc0, 0xd6, 0x36, 0xd3, 0xa7, 0xdc, 0x64, 0x1e, + 0x72, 0xba, 0x02, 0x04, 0xdb, 0x05, 0x67, 0x40, 0xf5, 0xb4, 0xaa, 0xe4, 0x94, 0xf8, 0xe6, 0x67, + 0xc3, 0x84, 0x18, 0xbd, 0xe2, 0x6f, 0x80, 0xda, 0x67, 0x4b, 0xe1, 0xa0, 0xbf, 0x6d, 0x14, 0xe6, + 0x56, 0x92, 0x91, 0x41, 0x6c, 0xa0, 0x1d, 0x49, 0x70, 0xc0, 0x3e, 0x41, 0x00, 0x96, 0xb8, 0xba, + 0x82, 0xf9, 0x4f, 0x42, 0xc0, 0x65, 0x4e, 0x56, 0xd9, 0x47, 0x1c, 0x53, 0xb2, 0xb9, 0x8c, 0x7f, + 0xee, 0x06, 0x39, 0xbf, 0x0c, 0xf3, 0xa3, 0xc8, 0x43, 0x7f, 0x45, 0xf8, 0xee, 0x49, 0xa1, 0xc2, + 0xd6, 0xf6, 0x55, 0x67, 0xfa, 0x6d, 0x8a, 0x22, 0x83, 0xfd, 0xc2, 0x91, 0x23, 0x23, 0x8b, 0x38, + 0x09, 0x95, 0x03, 0x84, 0x6e, 0x75, 0x4f, 0x74, 0x23, 0x5f, 0x8e, 0xe3, 0xab, 0xca, 0x16, 0x3b, + 0x4f, 0x78, 0x4a, 0xaa, 0x2e, 0x34, 0xf1, 0xff, 0xd8, 0x0d, 0xab, 0xae, 0x87, 0x1a, 0xbe, 0x27, + 0x37, 0xed, 0x29, 0xad, 0xb9, 0xc0, 0xbc, 0x1d, 0xcb, 0xe4, 0x2f, 0x8b, 0x65, 0xf9, 0x55, 0x73, + 0xa1, 0xf0, 0xa0, 0xd0, 0x85, 0x00, 0x7d, 0x0b, 0x69, 0x2b, 0x0e, 0x77, 0xe0, 0xd5, 0x71, 0xda, + 0x53, 0x26, 0xc3, 0x69, 0x5c, 0xc6, 0xe6, 0xcc, 0xac, 0x45, 0x2f, 0x2d, 0x96, 0x94, 0x4c, 0x75, + 0x75, 0x12, 0x4f, 0xb6, 0xbd, 0x38, 0x3f, 0xbc, 0x5b, 0x86, 0xd6, 0x17, 0x68, 0x16, 0x38, 0xec, + 0xf3, 0xd6, 0x82, 0x3e, 0x23, 0xc0, 0x6b, 0xba, 0x5d, 0x05, 0xc6, 0x90, 0x89, 0x1b, 0x6c, 0x4f, + 0xe3, 0x69, 0x13, 0x04, 0x16, 0xf3, 0x57, 0xd0, 0x31, 0x1c, 0x33, 0x35, 0x69, 0xf6, 0x4f, 0x5b, + 0x29, 0xed, 0xf4, 0x7c, 0xa0, 0x4b, 0x75, 0x2d, 0xfa, 0x49, 0xb5, 0x2e, 0xcd, 0xaa, 0xb2, 0xb9, + 0x14, 0x91, 0x28, 0x42, 0x18, 0x18, 0x49, 0x91, 0x18, 0x6e, 0x25, 0xa5, 0xdb, 0x2e, 0xa3, 0xd6, + 0x80, 0x83, 0xce, 0xe0, 0x5b, 0x97, 0x78, 0x57, 0x72, 0x5e, 0x16, 0xa5, 0x48, 0x02, 0x84, 0x72, + 0xed, 0x8b, 0xda, 0x92, 0xcd, 0x69, 0xc8, 0xa8, 0x35, 0x80, 0x2b, 0xa3, 0x8b, 0x73, 0x3d, 0xfb, + 0x51, 0xb5, 0x3f, 0x26, 0x62, 0xd5, 0xfa, 0xde, 0x4c, 0x2b, 0x45, 0xe1, 0xce, 0x99, 0xd1, 0x79, + 0x73, 0x04, 0xb6, 0x6a, 0x0b, 0x86, 0x7e, 0x69, 0xd8, 0xa9, 0x9e, 0xc4, 0x3f, 0x8e, 0x7a, 0x49, + 0x6c, 0x51, 0x72, 0x2a, 0xe3, 0x41, 0x72, 0x18, 0xb1, 0xa0, 0x17, 0x75, 0xb5, 0xad, 0xad, 0x02, + 0x75, 0xff, 0x78, 0xa2, 0x53, 0x1d, 0x93, 0x59, 0x11, 0x24, 0x45, 0x7e, 0x69, 0x2b, 0x4f, 0x58, + 0x3b, 0xee, 0xfc, 0x91, 0x87, 0xa4, 0x6d, 0x2e, 0xf8, 0xec, 0x91, 0x38, 0xd9, 0xbe, 0xe5, 0x13, + 0x2a, 0x89, 0xee, 0xfa, 0xeb, 0x18, 0xa2, 0x43, 0xbf, 0xe1, 0x4a, 0xd1, 0xa2, 0x34, 0x2e, 0x50, + 0xac, 0xb6, 0x9f, 0x3c, 0x77, 0x82, 0x88, 0x75, 0x20, 0x13, 0xa3, 0x00, 0x1f, 0x3a, 0xe6, 0x90, + 0x45, 0xf0, 0xc2, 0x1a, 0x07, 0x6b, 0xa6, 0x71, 0x39, 0x02, 0x19, 0xd5, 0x96, 0x96, 0xd7, 0x5a, + 0x0b, 0x23, 0xc7, 0x26, 0x21, 0x12, 0x9d, 0xd4, 0x71, 0xa7, 0xe9, 0xad, 0xaf, 0xdc, 0x57, 0xcc, + 0xda, 0xb2, 0x07, 0xf6, 0x44, 0x0d, 0x94, 0x3c, 0xb2, 0xcf, 0xad, 0x83, 0x57, 0x0e, 0xab, 0x9a, + 0xf0, 0x33, 0x7d, 0xc5, 0x32, 0xa3, 0x3f, 0xcf, 0x70, 0xf5, 0x11, 0x44, 0x64, 0x27, 0x4a, 0x51, + 0x76, 0xc3, 0x12, 0x68, 0xc7, 0x43, 0x8c, 0x80, 0x2d, 0xef, 0x0c, 0x2d, 0xc1, 0x50, 0xb8, 0x22, + 0x61, 0x32, 0x5a, 0x36, 0x25, 0x6b, 0x5f, 0x53, 0x46, 0x44, 0x19, 0xd1, 0x9e, 0x74, 0x8e, 0x00, + 0x1a, 0x99, 0xc2, 0x2d, 0x09, 0xd1, 0x57, 0x89, 0x74, 0x03, 0x43, 0x6c, 0x4c, 0xe4, 0xee, 0x8e, + 0xa6, 0x7f, 0x53, 0x2b, 0xf1, 0x02, 0x6d, 0x75, 0x04, 0x8e, 0xb3, 0x48, 0x1c, 0x84, 0x81, 0xe6, + 0x79, 0x85, 0x40, 0x6e, 0xd0, 0x76, 0x1d, 0xd8, 0x8a, 0xd0, 0x05, 0x6a, 0xc6, 0xeb, 0x77, 0xd8, + 0x95, 0xdd, 0x25, 0x89, 0x8a, 0x5e, 0xdc, 0x2b, 0xdb, 0x96, 0x75, 0x15, 0x0d, 0xbd, 0xd2, 0xef, + 0xbb, 0x78, 0x7d, 0x6c, 0x21, 0xe8, 0xdc, 0x94, 0xcc, 0xcc, 0x88, 0x06, 0xd2, 0x33, 0x9c, 0xf8, + 0x10, 0x2c, 0xcb, 0xed, 0x0a, 0xb6, 0x1e, 0x24, 0x63, 0xa6, 0x3d, 0x78, 0x53, 0xa3, 0xa8, 0x8c, + 0x50, 0xf0, 0x13, 0x46, 0xde, 0xa6, 0x81, 0xb4, 0x57, 0x04, 0xc9, 0xa0, 0xec, 0xf8, 0x1b, 0x26, + 0x36, 0x83, 0xd7, 0x52, 0xaf, 0xc2, 0xbb, 0x34, 0xa5, 0x33, 0x64, 0xa1, 0x04, 0x8f, 0x31, 0x52, + 0x86, 0xa2, 0x5a, 0x1a, 0xaa, 0xcb, 0x13, 0x1b, 0xae, 0x7f, 0xf6, 0x02, 0xe7, 0x33, 0x39, 0x15, + 0xbe, 0xb8, 0x1a, 0xac, 0x75, 0x2c, 0xae, 0xa1, 0xc6, 0xaa, 0x46, 0x89, 0xc2, 0xe6, 0x52, 0x0c, + 0x5b, 0x18, 0xc4, 0x36, 0x6a, 0xeb, 0x74, 0x1a, 0x70, 0x48, 0x8f, 0xea, 0xad, 0x75, 0x21, 0xd6, + 0xf2, 0x37, 0x80, 0x50, 0x7f, 0xfa, 0x4b, 0x42, 0x56, 0x69, 0xb9, 0x2b, 0x40, 0x7a, 0x08, 0xa2, + 0xe2, 0x95, 0xb3, 0xea, 0x96, 0x63, 0xa5, 0xc7, 0xae, 0x1d, 0x1b, 0xbe, 0x78, 0xa9, 0xa0, 0x73, + 0x98, 0x24, 0x52, 0xe3, 0xf1, 0xb0, 0x14, 0x34, 0x19, 0x71, 0x5d, 0x7b, 0x4c, 0xe4, 0x65, 0x06, + 0xad, 0xbc, 0xbc, 0xd0, 0xe0, 0xd2, 0xf4, 0x5d, 0xd3, 0xe9, 0x32, 0x82, 0x6d, 0xc3, 0x1f, 0x5f, + 0xa9, 0xa6, 0xcc, 0x72, 0x2d, 0x82, 0x7b, 0x86, 0x42, 0x54, 0xf3, 0x95, 0x80, 0x69, 0x2b, 0x04, + 0x8e, 0x7e, 0x54, 0xbc, 0x20, 0x07, 0x57, 0x51, 0xb1, 0x2c, 0xab, 0x26, 0x08, 0x8a, 0x92, 0xd6, + 0x74, 0x48, 0xe4, 0xeb, 0xd4, 0xe3, 0xc8, 0x8b, 0xf4, 0xa3, 0x3e, 0x2f, 0x51, 0x5f, 0x91, 0x9c, + 0x36, 0x20, 0x0b, 0x6d, 0x00, 0x8b, 0xa4, 0x4c, 0x02, 0xdc, 0x87, 0x26, 0x46, 0x3c, 0xb0, 0x4c, + 0xda, 0x0e, 0x2f, 0x6b, 0xdb, 0x75, 0xf9, 0x16, 0x69, 0x1c, 0x8c, 0x07, 0xd0, 0xb2, 0x8d, 0x11, + 0x20, 0xca, 0x70, 0xd5, 0xe1, 0x5d, 0xa4, 0x49, 0x88, 0xf9, 0x8f, 0x87, 0x94, 0xc1, 0xa5, 0x91, + 0xcc, 0x64, 0xa6, 0xd0, 0x16, 0xd4, 0x94, 0xa9, 0x3d, 0x1e, 0x03, 0xd0, 0x00, 0x65, 0x11, 0x23, + 0x40, 0xeb, 0xb8, 0x4c, 0x6b, 0x73, 0x71, 0x70, 0x33, 0x5b, 0x62, 0x50, 0xce, 0x47, 0x54, 0x41, + 0xd3, 0x39, 0x40, 0x78, 0x58, 0xdd, 0x47, 0x26, 0xf7, 0xd2, 0xfe, 0xa9, 0x33, 0x92, 0xff, 0xf5, + 0x85, 0xfa, 0x2d, 0x9c, 0xd9, 0x79, 0xf5, 0xd0, 0x6c, 0x00, 0xbb, 0x26, 0x2d, 0x36, 0x6b, 0x80, + 0x5f, 0xf1, 0xdc, 0x53, 0xe3, 0xc3, 0xa7, 0x1f, 0x53, 0xa0, 0xf2, 0x7b, 0xd0, 0xca, 0x75, 0xc3, + 0xfc, 0xd5, 0xee, 0x17, 0xe1, 0x93, 0x73, 0x1b, 0x26, 0x8e, 0xa1, 0x80, 0x3e, 0x68, 0x3e, 0xbe, + 0x40, 0x9b, 0x4d, 0x89, 0x04, 0x80, 0xe7, 0x60, 0x50, 0x9a, 0x01, 0x60, 0xde, 0xfc, 0x35, 0xba, + 0x0c, 0xd5, 0x24, 0x0c, 0x63, 0x8d, 0x69, 0x90, 0x6a, 0xf4, 0xd0, 0xb3, 0x26, 0xfe, 0x86, 0x96, + 0x16, 0xb7, 0xf4, 0x28, 0xa7, 0xda, 0xf9, 0x5e, 0xc2, 0xcf, 0xf1, 0x02, 0x47, 0x1c, 0x4b, 0x8f, + 0x05, 0xd1, 0x7b, 0xbc, 0x29, 0x10, 0xdb, 0x78, 0xec, 0xd8, 0x1c, 0x97, 0x8c, 0xf9, 0xaf, 0xc3, + 0x20, 0x05, 0xab, 0x73, 0xba, 0x1b, 0x00, 0x7e, 0xbc, 0xcb, 0x50, 0x7a, 0x18, 0xf4, 0x4e, 0x43, + 0x42, 0x36, 0x5b, 0x8d, 0x8b, 0xd6, 0x8e, 0x03, 0xcc, 0x1f, 0xa4, 0x54, 0x09, 0x25, 0xe0, 0xaa, + 0x01, 0x0e, 0x04, 0x31, 0xba, 0x29, 0x27, 0x0a, 0xd6, 0x3e, 0x2c, 0x1f, 0xea, 0x1a, 0x07, 0x28, + 0xba, 0x09, 0x09, 0x24, 0x07, 0xef, 0x48, 0x16, 0x03, 0x38, 0x09, 0x05, 0xdb, 0xce, 0xcc, 0x6b, + 0xc4, 0xc9, 0xa7, 0xed, 0x74, 0xd9, 0x6e, 0x67, 0xad, 0xff, 0x66, 0xf9, 0xb0, 0xdf, 0x5c, 0x0b, + 0xc4, 0x40, 0xd9, 0x1c, 0xf2, 0x25, 0xc0, 0xb9, 0xa2, 0x80, 0x5b, 0xc4, 0xed, 0x58, 0x1c, 0x82, + 0xf1, 0x60, 0x91, 0x93, 0x13, 0x87, 0xfc, 0x82, 0xc1, 0x9a, 0x7d, 0x3e, 0x39, 0xe7, 0x16, 0x19, + 0x2d, 0xf1, 0x2d, 0x46, 0x15, 0xc4, 0x1e, 0x95, 0xb5, 0x69, 0x97, 0x28, 0xe5, 0x1b, 0xd9, 0xd4, + 0xd9, 0xbc, 0x05, 0x03, 0x94, 0x52, 0xf7, 0xd0, 0xba, 0x53, 0xdd, 0xb5, 0x50, 0x4a, 0x46, 0xdb, + 0x50, 0xcd, 0x86, 0x35, 0xcc, 0xb6, 0xf3, 0x3c, 0x5f, 0xde, 0xa0, 0x85, 0x85, 0x4f, 0x43, 0xca, + 0x00, 0x6b, 0xe7, 0xa5, 0xbb, 0xa6, 0x10, 0x35, 0x1a, 0x7e, 0x83, 0x0a, 0xe3, 0x50, 0x29, 0x68, + 0x8c, 0x6d, 0x5e, 0xb7, 0x08, 0x7b, 0x28, 0xaf, 0x7f, 0x8b, 0x37, 0xc8, 0xce, 0xa4, 0x7d, 0x1f, + 0x91, 0x14, 0x0a, 0x3f, 0xa1, 0x0e, 0x88, 0x34, 0x99, 0x82, 0x10, 0x9a, 0xc7, 0x1e, 0xa4, 0x61, + 0x2a, 0x24, 0x51, 0x50, 0xbf, 0x67, 0x47, 0xa1, 0xad, 0x2f, 0xa5, 0x25, 0x76, 0x22, 0xda, 0x8d, + 0x38, 0x2c, 0x8a, 0x5c, 0xfa, 0xc5, 0x92, 0x10, 0x8f, 0x3a, 0x7f, 0x28, 0x0b, 0x3d, 0xe8, 0x5b, + 0x5a, 0x03, 0x3f, 0x25, 0x74, 0x4c, 0x65, 0x97, 0x30, 0xb5, 0xce, 0x39, 0xc3, 0x2a, 0x09, 0x3e, + 0xf1, 0x30, 0xd3, 0x73, 0x3c, 0x3e, 0xb8, 0xd9, 0xd8, 0xb3, 0xea, 0x9b, 0x30, 0x03, 0xaf, 0x84, + 0xff, 0x1b, 0xe8, 0x33, 0xba, 0x86, 0xf7, 0xc4, 0xdf, 0xe7, 0x35, 0xee, 0x37, 0x42, 0x74, 0x86, + 0xe9, 0xc6, 0xe8, 0x9d, 0xea, 0x88, 0x22, 0xdf, 0x50, 0xe6, 0x33, 0xd7, 0xd9, 0x20, 0xde, 0x47, + 0xd9, 0x58, 0x23, 0x5d, 0xca, 0x2d, 0xd7, 0xd8, 0x78, 0x62, 0xea, 0xcb, 0x78, 0xd9, 0x30, 0xe9, + 0x9e, 0xad, 0x2d, 0x6b, 0xe5, 0x23, 0x08, 0x58, 0xad, 0x5f, 0x2f, 0xb8, 0xb2, 0xa3, 0x49, 0x48, + 0xf9, 0x78, 0x3b, 0xc7, 0xc7, 0x73, 0x6d, 0xb8, 0x4b, 0x12, 0xbf, 0xd3, 0xe2, 0xf5, 0x5a, 0x1c, + 0x70, 0xc0, 0xb4, 0xa8, 0xdf, 0xac, 0x11, 0xe8, 0x8b, 0x62, 0xf0, 0x04, 0x9c, 0xf8, 0xe0, 0x8b, + 0xb9, 0x59, 0x3e, 0xde, 0x17, 0xb5, 0x63, 0x91, 0x21, 0x63, 0xde, 0x9c, 0x71, 0x15, 0x70, 0x9d, + 0xcf, 0xbf, 0x2b, 0x72, 0x23, 0x80, 0x3f, 0x4f, 0x60, 0xe4, 0x28, 0x5a, 0x57, 0xa8, 0x62, 0xb0, + 0xca, 0xe8, 0x44, 0xd3, 0xb9, 0x87, 0x14, 0x8e, 0xd7, 0xd2, 0x47, 0xbf, 0x9d, 0xb5, 0x41, 0xf8, + 0xe6, 0x22, 0x5f, 0x12, 0x75, 0x09, 0xd6, 0x70, 0x39, 0x32, 0xc3, 0x6e, 0xc3, 0x6a, 0xc6, 0x0b, + 0x23, 0x94, 0x99, 0x4e, 0x26, 0x82, 0xc5, 0x76, 0xfb, 0x10, 0xfd, 0xce, 0x61, 0x1a, 0xf3, 0x3e, + 0xb5, 0xb0, 0x55, 0xf8, 0xd7, 0xb6, 0x3c, 0x9a, 0xc6, 0xe0, 0xec, 0x53, 0x72, 0x28, 0x41, 0xba, + 0x83, 0xd2, 0xe3, 0xf0, 0x31, 0x8a, 0x24, 0x70, 0x55, 0x1b, 0xb6, 0xc1, 0x7c, 0x82, 0x6f, 0x95, + 0x40, 0x59, 0x4e, 0x44, 0xef, 0x97, 0x12, 0x69, 0xe6, 0xc0, 0xa0, 0x58, 0x71, 0xeb, 0x7c, 0xb2, + 0x83, 0xc3, 0xc9, 0x6a, 0x53, 0x83, 0x38, 0x2c, 0x4a, 0x77, 0xed, 0x62, 0x51, 0x8f, 0x77, 0x10, + 0xe2, 0x99, 0x68, 0xcb, 0xf5, 0x50, 0xca, 0xaf, 0x1c, 0x08, 0xf6, 0x5c, 0xb6, 0xa6, 0x5f, 0x2d, + 0x56, 0x9e, 0x5d, 0x56, 0x61, 0x63, 0x6e, 0xb1, 0x13, 0x82, 0xfd, 0xcf, 0x53, 0xf9, 0x1e, 0x08, + 0xe2, 0x34, 0xb4, 0xb8, 0x83, 0x7e, 0x01, 0xde, 0xf6, 0x63, 0xa1, 0x3b, 0xab, 0xd7, 0xf9, 0xf3, + 0xb2, 0x4a, 0xb9, 0xa0, 0x57, 0x4e, 0x8e, 0x4f, 0x2b, 0x3c, 0x31, 0x02, 0x18, 0x1b, 0x9b, 0xf6, + 0x40, 0xb7, 0xf8, 0x08, 0xf6, 0x39, 0x95, 0xc5, 0xec, 0x3a, 0xe8, 0x2c, 0x30, 0x85, 0x92, 0x60, + 0x3e, 0x6f, 0xbc, 0xae, 0x34, 0xdc, 0xbe, 0x08, 0x03, 0x58, 0x80, 0x34, 0x7c, 0x4c, 0xb0, 0x74, + 0x0b, 0xd3, 0x2d, 0x48, 0x42, 0xd4, 0x6c, 0x5b, 0x7d, 0x23, 0x17, 0xfc, 0x89, 0xc2, 0x0d, 0x1a, + 0x9c, 0x26, 0xb4, 0x6f, 0x0d, 0x5a, 0x3e, 0xe7, 0xc1, 0xbb, 0x65, 0xb2, 0xad, 0x1f, 0x61, 0x91, + 0x7c, 0xa6, 0xb4, 0xe9, 0xe2, 0xce, 0xad, 0xca, 0x66, 0xd5, 0xb1, 0x69, 0x9c, 0xe1, 0x03, 0x33, + 0xf3, 0x20, 0xbc, 0xd7, 0x2e, 0x9b, 0x30, 0xae, 0x67, 0x4f, 0x7e, 0xd3, 0x63, 0x7b, 0xeb, 0x2c, + 0xb2, 0x07, 0x68, 0xf3, 0x5a, 0xb2, 0xae, 0x00, 0x97, 0xc2, 0xc0, 0xb9, 0xa0, 0xc7, 0xe9, 0xc3, + 0xba, 0x1e, 0xc8, 0xee, 0xbc, 0x51, 0x74, 0x1f, 0xed, 0xad, 0x91, 0xbb, 0xad, 0x8e, 0x81, 0xc0, + 0xa2, 0x96, 0x9d, 0xeb, 0x92, 0x51, 0xa2, 0x02, 0x78, 0x8b, 0x93, 0x83, 0x00, 0x2c, 0x5b, 0x27, + 0xc6, 0x48, 0x87, 0xd8, 0x79, 0x03, 0xb3, 0x34, 0x20, 0xd2, 0x4b, 0xd7, 0xe9, 0xc5, 0x11, 0x6a, + 0xd0, 0xc4, 0x1e, 0xcf, 0xa7, 0x79, 0x9f, 0xe5, 0xba, 0x02, 0x92, 0x6d, 0x64, 0x01, 0x8f, 0xe3, + 0x17, 0xaa, 0xe6, 0x3f, 0x91, 0x9b, 0x8c, 0xa9, 0x12, 0xfd, 0x0d, 0x06, 0x7e, 0xe4, 0xbe, 0xdd, + 0xbf, 0x75, 0x5c, 0xbb, 0xb2, 0xa7, 0x5b, 0xc4, 0x95, 0xf9, 0x71, 0x76, 0xe5, 0x0e, 0x78, 0xcf, + 0x4b, 0x65, 0xfe, 0x9e, 0xfe, 0x50, 0x47, 0xbd, 0x87, 0xf9, 0x18, 0x20, 0x81, 0xfd, 0x0e, 0x32, + 0x54, 0xd4, 0x5b, 0x15, 0xe5, 0x8e, 0x02, 0x8b, 0xc9, 0x9b, 0x75, 0x27, 0x1b, 0x82, 0x75, 0xf0, + 0xa4, 0xe9, 0xed, 0x24, 0x53, 0xa7, 0xbf, 0x3d, 0x2c, 0xf1, 0xd8, 0xde, 0xec, 0xfb, 0x84, 0xcd, + 0xc4, 0xc3, 0xa9, 0x88, 0x45, 0xab, 0x3d, 0xad, 0x97, 0x80, 0x9b, 0x48, 0x10, 0xbe, 0x7c, 0x60, + 0xce, 0xaa, 0x72, 0xf3, 0x8d, 0xa2, 0x86, 0x3b, 0x0f, 0x96, 0x47, 0x0a, 0xa0, 0x59, 0x4e, 0x4d, + 0x04, 0xd9, 0x61, 0x3a, 0x32, 0x4e, 0xdd, 0x1f, 0x1b, 0xc5, 0x8c, 0x9e, 0x14, 0xd2, 0xcb, 0x09, + 0xe4, 0x74, 0xb5, 0xca, 0x06, 0xe8, 0x4a, 0x03, 0x2d, 0x64, 0x83, 0x65, 0x30, 0x2c, 0x40, 0x72, + 0x97, 0x4c, 0xa0, 0x7d, 0x93, 0x49, 0x3d, 0xf9, 0xba, 0x31, 0x55, 0x2f, 0xb5, 0x7f, 0x70, 0x69, + 0x52, 0x76, 0xb0, 0x1d, 0xa8, 0xaa, 0x60, 0xb6, 0x1d, 0x09, 0x71, 0x7b, 0xfe, 0x5c, 0xbe, 0x38, + 0x9e, 0x9f, 0xeb, 0xf3, 0x77, 0xe7, 0xba, 0xd2, 0xdd, 0x97, 0x67, 0xd6, 0x39, 0x32, 0xf7, 0xdd, + 0x13, 0xdf, 0xba, 0xf6, 0x68, 0x83, 0xb1, 0x8d, 0x8c, 0x64, 0x70, 0x88, 0xe0, 0x01, 0x74, 0xf4, + 0x63, 0x3b, 0x3f, 0x57, 0x6f, 0x21, 0xd7, 0xc5, 0x1e, 0xf6, 0x13, 0xeb, 0xab, 0x1e, 0xb1, 0x61, + 0x4d, 0xd5, 0x45, 0x96, 0x9c, 0xe5, 0xc6, 0x09, 0xc3, 0xd6, 0xb8, 0xc5, 0x4e, 0xdb, 0xc2, 0x44, + 0xbf, 0x33, 0x78, 0xda, 0x86, 0xd9, 0x14, 0xa9, 0x5e, 0x00, 0x7b, 0xc5, 0x9d, 0xa1, 0x61, 0x42, + 0x67, 0x42, 0x3d, 0xad, 0x56, 0xae, 0x8d, 0x2d, 0x24, 0x34, 0xa0, 0x7f, 0xd2, 0xc3, 0xf4, 0x89, + 0xf6, 0xa5, 0xaa, 0xb8, 0x49, 0x62, 0x9d, 0xb6, 0x9b, 0x00, 0x9c, 0x65, 0x79, 0x3a, 0x06, 0x5f, + 0x9a, 0x68, 0x81, 0xc2, 0x98, 0xd9, 0xb6, 0x5b, 0x48, 0x5c, 0x55, 0x8f, 0xc8, 0x43, 0x7b, 0xce, + 0xc6, 0x02, 0x9e, 0x5b, 0xd2, 0x11, 0x32, 0xa1, 0xdb, 0x44, 0xbf, 0xf2, 0xd4, 0x22, 0x54, 0xae, + 0x6c, 0x33, 0xcf, 0x20, 0xa6, 0xf4, 0x26, 0x9c, 0xaa, 0x49, 0xbe, 0xe8, 0x51, 0x02, 0x31, 0x71, + 0x60, 0xca, 0x3a, 0x94, 0xa5, 0x0c, 0x37, 0x7d, 0xde, 0x82, 0x25, 0x40, 0xd9, 0x4b, 0xda, 0x45, + 0x2f, 0x30, 0x97, 0x3b, 0xd8, 0xc9, 0x69, 0xd3, 0xf0, 0x0d, 0x17, 0x73, 0xfd, 0x8e, 0xb1, 0xad, + 0xf9, 0x6e, 0x0f, 0x4f, 0x05, 0xf0, 0xfe, 0x7d, 0xad, 0xcb, 0xb1, 0x6b, 0x29, 0x1b, 0x73, 0x09, + 0xa7, 0xaa, 0x92, 0x24, 0x51, 0x81, 0x9b, 0x49, 0x96, 0x54, 0xb9, 0x4c, 0x31, 0x8a, 0xd1, 0x08, + 0x26, 0x88, 0x29, 0xd2, 0x22, 0xa4, 0x61, 0x7d, 0xb0, 0x4a, 0x55, 0xe2, 0x78, 0x0d, 0xdc, 0x9b, + 0x16, 0x6c, 0x70, 0x83, 0x73, 0x34, 0xc3, 0xe9, 0xf8, 0xc9, 0x72, 0x20, 0xad, 0x48, 0x73, 0x1b, + 0x00, 0xe4, 0x03, 0x44, 0xb2, 0x3c, 0x42, 0x77, 0xea, 0x4e, 0x19, 0x01, 0x14, 0x76, 0xc6, 0x84, + 0x2f, 0x89, 0x21, 0xa7, 0x7a, 0x82, 0x37, 0x38, 0x11, 0x9c, 0x1b, 0x18, 0xad, 0x4c, 0x0f, 0x3d, + 0xab, 0xb1, 0x3a, 0x0b, 0xb1, 0x95, 0x8d, 0xbd, 0x62, 0xa0, 0x5d, 0xff, 0x3d, 0x54, 0x76, 0x8b, + 0x3e, 0xb5, 0x82, 0xde, 0x64, 0x56, 0xe4, 0x90, 0x3a, 0x77, 0x80, 0xdd, 0xab, 0x02, 0x98, 0x27, + 0x0c, 0x1c, 0xab, 0xf2, 0xb4, 0x66, 0x3e, 0x2b, 0x6f, 0x59, 0x09, 0x2d, 0x41, 0x7d, 0x34, 0xec, + 0xe7, 0x40, 0xb5, 0x61, 0x07, 0x1e, 0xf7, 0xcd, 0xdd, 0x46, 0x40, 0x03, 0xb0, 0xc8, 0x70, 0x63, + 0x9e, 0xf6, 0xcd, 0x5b, 0xbe, 0x84, 0xd5, 0xaf, 0xc8, 0xb2, 0xf1, 0x16, 0xb0, 0xa2, 0x95, 0x33, + 0x0c, 0x05, 0xe3, 0x23, 0x6d, 0x89, 0x38, 0x83, 0x1e, 0x35, 0xa8, 0xcc, 0x4c, 0x68, 0xa2, 0x3e, + 0x78, 0x15, 0x3f, 0x6e, 0x78, 0x05, 0x14, 0x3f, 0xa2, 0x14, 0x3b, 0xc5, 0xe2, 0x47, 0x57, 0x11, + 0xbe, 0x95, 0x9a, 0x4d, 0x70, 0x22, 0x3e, 0xd8, 0x8a, 0x40, 0x7c, 0xa9, 0x94, 0x83, 0x2e, 0xbf, + 0x94, 0x98, 0xa9, 0x8f, 0x6e, 0x10, 0x80, 0x4d, 0x26, 0x55, 0xf8, 0x43, 0xe0, 0x63, 0x3a, 0x62, + 0x8f, 0x75, 0x05, 0xc3, 0xba, 0xe8, 0x57, 0x36, 0x37, 0x37, 0x7e, 0x95, 0xb3, 0x03, 0xed, 0x1f, + 0xf6, 0xd7, 0xaa, 0x6b, 0x7e, 0x0f, 0x2e, 0x77, 0x9f, 0x9f, 0xd5, 0xa0, 0xef, 0xd7, 0xb5, 0x2e, + 0xcb, 0x8d, 0x50, 0x8e, 0xdf, 0xf7, 0xc3, 0x6e, 0x50, 0xd2, 0xe1, 0xcf, 0x70, 0xc3, 0x6b, 0x2b, + 0x0f, 0xee, 0x47, 0x16, 0x67, 0x31, 0xbf, 0x13, 0x94, 0x6c, 0x0f, 0xcb, 0x7d, 0xb4, 0x0f, 0xd5, + 0x21, 0xd7, 0xab, 0x80, 0xdb, 0x5f, 0x1c, 0x8a, 0xa0, 0x65, 0x8b, 0x24, 0x0a, 0xca, 0x96, 0x9f, + 0x9e, 0x01, 0x84, 0xd0, 0xf5, 0xeb, 0x64, 0x32, 0xb4, 0xe0, 0xfc, 0xab, 0x9f, 0xcf, 0x29, 0x8d, + 0x8a, 0x07, 0xf8, 0x8d, 0x55, 0x1a, 0xf0, 0x85, 0x6d, 0xb4, 0x26, 0x95, 0x3c, 0x87, 0xaa, 0xe7, + 0x2f, 0x86, 0x61, 0xf6, 0x52, 0x17, 0xa9, 0x0c, 0x24, 0xe4, 0x8f, 0xcb, 0x01, 0x2c, 0x8c, 0x18, + 0xc5, 0x57, 0xac, 0x11, 0x4e, 0x6d, 0xaf, 0xf2, 0x3c, 0xb2, 0xf8, 0x87, 0x80, 0x18, 0xa3, 0xf3, + 0x50, 0xca, 0x4a, 0x4f, 0xd3, 0x71, 0xa6, 0xc3, 0x81, 0xd1, 0x9c, 0x52, 0x05, 0x6d, 0x21, 0xbf, + 0xd9, 0x60, 0x87, 0x1c, 0x26, 0x6a, 0x4c, 0xbd, 0xd6, 0xf7, 0x57, 0xd7, 0xa9, 0x57, 0xb6, 0x46, + 0x51, 0xa7, 0x31, 0x2b, 0xdf, 0x22, 0xa6, 0x08, 0x56, 0x61, 0x50, 0xc2, 0x5b, 0xdd, 0xd8, 0x41, + 0xae, 0xe1, 0xea, 0x9f, 0xfd, 0xac, 0xc2, 0xca, 0x62, 0x06, 0x75, 0xa1, 0x1c, 0x76, 0xec, 0x25, + 0xce, 0x9a, 0xf1, 0x8d, 0x3b, 0xe8, 0x1e, 0x34, 0x44, 0xe2, 0xae, 0xcf, 0x37, 0x90, 0x8d, 0x7d, + 0x69, 0x82, 0x68, 0xf0, 0xe1, 0xb6, 0x22, 0x6f, 0xe3, 0xd6, 0x09, 0x2c, 0x46, 0xc7, 0xac, 0x30, + 0xd0, 0x15, 0xc7, 0xcb, 0xb6, 0x94, 0xce, 0xd8, 0xd4, 0xca, 0xfa, 0x51, 0x14, 0x83, 0x4c, 0x92, + 0xa0, 0xfb, 0x92, 0x16, 0x17, 0xde, 0x6e, 0x2f, 0xcb, 0x25, 0x88, 0xca, 0xdf, 0x3e, 0x54, 0xd3, + 0x3a, 0xbf, 0x5c, 0x5f, 0x6c, 0x84, 0x2a, 0xe4, 0x96, 0xab, 0x4e, 0x3f, 0xb8, 0x5e, 0x60, 0x01, + 0xa1, 0x62, 0x25, 0x98, 0xd7, 0x60, 0x15, 0x33, 0x19, 0x09, 0xe4, 0xfa, 0x73, 0xed, 0xb9, 0x72, + 0x11, 0xfb, 0x4e, 0x4d, 0xa3, 0x07, 0x0b, 0xe1, 0xbd, 0x37, 0x75, 0xa5, 0xa1, 0xfb, 0xc7, 0x1f, + 0xf0, 0xe9, 0x92, 0x0e, 0x3d, 0xbc, 0x30, 0x13, 0xa9, 0x2a, 0xc4, 0x34, 0x9e, 0x3d, 0x99, 0x45, + 0x42, 0x91, 0x41, 0x84, 0xf9, 0xc9, 0xc9, 0xc4, 0xb9, 0x2b, 0x02, 0xad, 0x36, 0x64, 0xde, 0xba, + 0x55, 0x0e, 0x58, 0xbe, 0x70, 0x5d, 0x4c, 0x0b, 0x35, 0xc4, 0x9e, 0xaf, 0x96, 0xf7, 0xb3, 0x2f, + 0x04, 0x47, 0xde, 0xa8, 0x85, 0xb4, 0x97, 0x9c, 0x37, 0x32, 0x0d, 0x23, 0x65, 0x9c, 0xfc, 0x76, + 0xa6, 0x54, 0xfc, 0xf0, 0x43, 0x95, 0x1d, 0xfd, 0xa3, 0x22, 0xe3, 0xca, 0x44, 0x22, 0x8b, 0x21, + 0x11, 0xe8, 0x88, 0x7e, 0xfb, 0x42, 0xbb, 0x1c, 0x7e, 0x2d, 0x38, 0xcf, 0x9a, 0x34, 0x2a, 0x02, + 0x03, 0xe1, 0x17, 0x32, 0x3a, 0x68, 0x3d, 0x55, 0x9f, 0x91, 0x3f, 0x3c, 0x68, 0xa8, 0xc6, 0x82, + 0xa4, 0x35, 0x47, 0x64, 0x29, 0x46, 0x60, 0xf6, 0x60, 0xc1, 0x4f, 0xd1, 0x21, 0xd6, 0xe2, 0x19, + 0xc7, 0x3e, 0x9f, 0x31, 0x71, 0x10, 0x1e, 0xad, 0x35, 0xb2, 0x1d, 0x52, 0xef, 0x42, 0xf6, 0xc5, + 0x52, 0x17, 0xe3, 0xa0, 0x34, 0xe2, 0x61, 0x27, 0x4e, 0x17, 0x90, 0x91, 0x2e, 0xe2, 0x88, 0xc8, + 0x87, 0x27, 0x47, 0x89, 0x5f, 0xed, 0xd4, 0x0b, 0x48, 0x0d, 0x0e, 0xe1, 0xfa, 0xe2, 0x9d, 0x3c, + 0x03, 0xf9, 0x7c, 0xb5, 0xef, 0x1a, 0x07, 0x09, 0x3c, 0xe4, 0xe7, 0x37, 0xd8, 0x18, 0x82, 0x80, + 0xeb, 0xcd, 0xb1, 0x2f, 0x5b, 0xd4, 0x8a, 0xcd, 0xc0, 0x7b, 0x2b, 0x4a, 0xa5, 0x66, 0x5d, 0x1d, + 0x60, 0xc9, 0x86, 0x19, 0x5f, 0xb1, 0xc4, 0x68, 0x79, 0x88, 0x71, 0x33, 0x34, 0xd9, 0xc8, 0x8e, + 0x37, 0xa0, 0x57, 0xa2, 0xc1, 0x31, 0x7e, 0x7b, 0x34, 0x2d, 0x8a, 0x14, 0x06, 0x3e, 0xdb, 0xae, + 0x0c, 0xc9, 0x5f, 0x1a, 0x94, 0x7e, 0xa7, 0x5b, 0x3d, 0xc0, 0xa1, 0x08, 0x4a, 0x62, 0x99, 0xea, + 0xd0, 0x45, 0xc0, 0x59, 0x4f, 0x98, 0x34, 0xed, 0x2c, 0xd3, 0xfd, 0x2d, 0x94, 0xf8, 0xbf, 0xed, + 0x96, 0xec, 0x47, 0x73, 0x70, 0x3d, 0xb4, 0x1f, 0xa1, 0x09, 0x17, 0x0e, 0x18, 0x0b, 0xff, 0x7d, + 0x0a, 0x09, 0xd5, 0x63, 0x5c, 0xca, 0x64, 0x5e, 0x3e, 0x4b, 0xe9, 0x59, 0x25, 0xc7, 0xd3, 0x60, + 0xef, 0xc6, 0xec, 0x30, 0x3d, 0xbf, 0xab, 0xbd, 0xa0, 0xb3, 0x84, 0xeb, 0x99, 0x48, 0xbd, 0x99, + 0x11, 0x41, 0xb8, 0x54, 0xaf, 0x09, 0xc0, 0x49, 0x5f, 0x86, 0x40, 0xe2, 0x3c, 0x19, 0x54, 0xb2, + 0x98, 0x59, 0xd0, 0xb4, 0xf8, 0x37, 0xf5, 0x74, 0xa6, 0xb9, 0x83, 0x1e, 0xae, 0xa4, 0xd7, 0x7d, + 0x0a, 0x0f, 0x03, 0xa4, 0x6e, 0xa0, 0x50, 0x51, 0x9a, 0x7d, 0xb6, 0x14, 0x9f, 0x91, 0xce, 0x16, + 0x3f, 0x79, 0x99, 0xc4, 0xca, 0x5d, 0xfb, 0x26, 0x7b, 0x2d, 0x32, 0x47, 0x51, 0x9f, 0xac, 0x19, + 0x48, 0xfc, 0x24, 0x05, 0x04, 0x6b, 0x49, 0x17, 0x73, 0xed, 0x88, 0x40, 0x77, 0xf0, 0x74, 0xfc, + 0x19, 0xe9, 0xdd, 0x68, 0x77, 0x5f, 0xb8, 0x0c, 0xa1, 0x6e, 0xc0, 0x56, 0x68, 0x9f, 0xea, 0xe0, + 0x20, 0xcd, 0xcb, 0xc5, 0x26, 0x7b, 0xbd, 0x7d, 0xb0, 0x30, 0xcf, 0x15, 0xb3, 0xb4, 0x22, 0x3f, + 0x2c, 0xb4, 0x78, 0xef, 0x6d, 0x17, 0xa1, 0x74, 0xda, 0x3f, 0x44, 0xad, 0x73, 0xe2, 0xa4, 0xd3, + 0x76, 0x1e, 0x89, 0x0e, 0xb1, 0x6d, 0xee, 0x55, 0x39, 0x68, 0xc1, 0x49, 0x3d, 0xb9, 0x66, 0xac, + 0x0b, 0xb7, 0x63, 0x75, 0x90, 0x12, 0x51, 0x2a, 0x92, 0x0b, 0xb5, 0x25, 0x06, 0x39, 0x6b, 0xaa, + 0x88, 0x1a, 0x3e, 0x40, 0x06, 0x3a, 0x43, 0xf9, 0xe4, 0x3c, 0xad, 0x83, 0xa5, 0x82, 0xa8, 0x04, + 0x8a, 0xaf, 0x8f, 0xd8, 0x61, 0x95, 0x1b, 0xd2, 0x4a, 0xc5, 0x24, 0xdf, 0xf7, 0x72, 0x6d, 0xc9, + 0x8c, 0x79, 0x85, 0xa9, 0x59, 0xf9, 0x13, 0x04, 0xf9, 0x5e, 0xea, 0x0a, 0xed, 0xc4, 0xe2, 0x04, + 0x10, 0xfa, 0xf1, 0x24, 0x1e, 0x71, 0x5b, 0x3b, 0x85, 0x81, 0x92, 0xdf, 0x86, 0xc1, 0x34, 0x8f, + 0xa2, 0x85, 0xce, 0xa0, 0x08, 0x7c, 0x2c, 0xa6, 0x0d, 0xd5, 0x76, 0xf7, 0x36, 0x20, 0xea, 0x33, + 0x0d, 0xc0, 0xa6, 0x09, 0x7a, 0x9d, 0x2f, 0x04, 0xd1, 0xa7, 0xd2, 0x4e, 0xdc, 0xba, 0x1f, 0x72, + 0xf0, 0xe4, 0x9b, 0x24, 0x8c, 0x11, 0xdf, 0x4d, 0x75, 0x81, 0x6b, 0x55, 0xf8, 0xc3, 0x96, 0x1f, + 0xc1, 0xd3, 0x59, 0x8c, 0xdb, 0xf5, 0xb9, 0x75, 0x9d, 0xaf, 0x08, 0x9f, 0x47, 0x86, 0xb0, 0x85, + 0xf2, 0xaf, 0x46, 0xea, 0xb4, 0xe3, 0x46, 0x92, 0x6e, 0xbe, 0x71, 0xe3, 0x38, 0x9d, 0x51, 0xf5, + 0xe6, 0x70, 0x20, 0xc0, 0x4c, 0x29, 0x46, 0x22, 0x44, 0xe4, 0xf4, 0x22, 0x67, 0x20, 0x9b, 0xac, + 0xa9, 0x6b, 0x6d, 0x0f, 0x2a, 0x5e, 0xdd, 0xa4, 0xfe, 0x7c, 0xba, 0xc1, 0xfd, 0xd5, 0x2e, 0x9c, + 0xbe, 0xc8, 0x5a, 0x73, 0xba, 0xaf, 0xde, 0x75, 0xbd, 0x16, 0x33, 0xc2, 0x26, 0xb5, 0x77, 0x98, + 0x2e, 0x6a, 0x35, 0x4b, 0xc3, 0xc1, 0x7f, 0xe3, 0xbd, 0x88, 0x84, 0x53, 0x1c, 0x98, 0xf4, 0xc9, + 0x22, 0x00, 0xd4, 0xc8, 0x08, 0x73, 0x81, 0x80, 0xe7, 0xef, 0xf7, 0x5f, 0x25, 0xd4, 0xed, 0xa8, + 0x4f, 0x6f, 0x08, 0x3b, 0x71, 0x43, 0x6b, 0x64, 0x2e, 0xc6, 0x82, 0x11, 0x8e, 0xfb, 0x0d, 0x44, + 0x99, 0x87, 0xf9, 0xf1, 0x32, 0x39, 0x31, 0xb1, 0x21, 0x4f, 0xcb, 0xc6, 0x0c, 0x83, 0x6a, 0x3f, + 0xb4, 0x2f, 0xc6, 0xaa, 0xaf, 0x7c, 0x39, 0xd0, 0xfd, 0xc9, 0x13, 0xac, 0x45, 0x2d, 0x4c, 0x4a, + 0x21, 0xf5, 0x57, 0xc5, 0x7d, 0x7b, 0x9d, 0x05, 0xd0, 0x7d, 0x9f, 0x35, 0x85, 0xb3, 0x6a, 0xc1, + 0xe2, 0x6b, 0x4d, 0x17, 0xb8, 0x41, 0x29, 0x36, 0x4b, 0xd8, 0x10, 0x8b, 0x1b, 0x25, 0xfb, 0xe1, + 0xc7, 0x97, 0x90, 0x15, 0x85, 0x77, 0x7d, 0x01, 0x83, 0xb6, 0xf8, 0xb5, 0xc1, 0x1c, 0x4e, 0x2d, + 0x6b, 0xc2, 0x19, 0xce, 0x84, 0x60, 0x1c, 0x97, 0x76, 0x7c, 0x9c, 0x6f, 0xce, 0x8b, 0x30, 0xc5, + 0x29, 0xfa, 0x66, 0x60, 0x1a, 0x87, 0xee, 0xe4, 0x33, 0xda, 0xa9, 0x37, 0x9f, 0xe1, 0x14, 0x20, + 0x26, 0xf1, 0x2d, 0x72, 0x55, 0xee, 0xe2, 0x7a, 0x56, 0x12, 0x5e, 0x9a, 0x00, 0xd2, 0x4d, 0xdb, + 0x7f, 0x95, 0x0e, 0x68, 0x46, 0xd2, 0xd1, 0xc8, 0x4c, 0xab, 0x6e, 0x69, 0xd8, 0x04, 0xd7, 0x4f, + 0xf6, 0x14, 0xa2, 0x7f, 0xa1, 0x35, 0x0f, 0xe7, 0x30, 0x9e, 0x05, 0x50, 0xc8, 0x61, 0xa3, 0xa1, + 0x07, 0xa4, 0x26, 0x1c, 0x17, 0xa2, 0xb1, 0xe0, 0x62, 0xe5, 0x41, 0x07, 0x14, 0xec, 0x73, 0xd0, + 0xbc, 0x4b, 0x7a, 0x10, 0x86, 0x1b, 0x05, 0x5f, 0xe3, 0xdd, 0x34, 0x60, 0xdb, 0xb0, 0x04, 0x8c, + 0x2a, 0x08, 0x24, 0x59, 0xec, 0x9d, 0x7b, 0x37, 0x13, 0x38, 0xd8, 0x01, 0xef, 0x38, 0x32, 0xa9, + 0xfb, 0x6d, 0x99, 0x55, 0x20, 0x41, 0x32, 0xe0, 0x63, 0xc5, 0xcc, 0x25, 0x86, 0x82, 0xf0, 0x14, + 0xe7, 0x29, 0x7f, 0x43, 0x30, 0xbc, 0xe3, 0xf9, 0x81, 0xe7, 0x96, 0xb6, 0xfd, 0x5d, 0x44, 0xb7, + 0xbc, 0x9d, 0x68, 0x04, 0x76, 0x98, 0x8a, 0x9e, 0x2f, 0x35, 0xf2, 0xe1, 0xb1, 0x82, 0x74, 0xf8, + 0x5c, 0x13, 0xa3, 0xec, 0xc4, 0x99, 0xac, 0x98, 0xec, 0x9f, 0x44, 0x68, 0x22, 0x91, 0x94, 0x82, + 0x26, 0x7d, 0x4c, 0x73, 0x23, 0xce, 0xa4, 0x1a, 0xf4, 0x27, 0xc0, 0x4a, 0x41, 0xd1, 0x9e, 0x16, + 0xee, 0xb1, 0x69, 0xf2, 0x19, 0x5d, 0xbf, 0x04, 0x2f, 0x3c, 0xdb, 0xb1, 0xd1, 0xea, 0xfb, 0x1e, + 0x67, 0x02, 0x54, 0x82, 0x42, 0x40, 0x9d, 0xe0, 0x39, 0xb6, 0xac, 0xa7, 0x72, 0x89, 0x28, 0xf0, + 0x7d, 0x66, 0x7b, 0xd9, 0xf8, 0x36, 0x9c, 0x18, 0xda, 0x61, 0xb9, 0x8f, 0xe2, 0xfb, 0xdf, 0x37, + 0x44, 0x86, 0x64, 0xd8, 0xf6, 0x7f, 0xa1, 0x2a, 0x22, 0xdd, 0x67, 0x05, 0x75, 0x17, 0x40, 0xf3, + 0xea, 0x07, 0x1f, 0x66, 0xc8, 0x3c, 0x18, 0x5b, 0x97, 0x01, 0xf7, 0x1f, 0x77, 0x47, 0xf3, 0xee, + 0xed, 0xcf, 0xa8, 0x44, 0xd5, 0x89, 0xdb, 0xea, 0x17, 0x78, 0x12, 0xfb, 0x29, 0x31, 0xc3, 0xe8, + 0x4d, 0x41, 0x0e, 0x9a, 0x21, 0xb7, 0x91, 0x7b, 0x88, 0x8b, 0xc7, 0x67, 0xaf, 0x27, 0xbd, 0x38, + 0x46, 0x56, 0x6a, 0x98, 0x07, 0x8c, 0x3f, 0xf8, 0x9e, 0xa5, 0xc1, 0xe5, 0xdd, 0x4c, 0x11, 0x57, + 0xb3, 0xdf, 0x20, 0x94, 0x0c, 0xbc, 0x5c, 0xa7, 0x96, 0x99, 0x5e, 0x8f, 0x97, 0x1d, 0x06, 0x37, + 0x9b, 0x7e, 0x3c, 0xb7, 0x7e, 0xe6, 0xbe, 0x16, 0xaa, 0x43, 0xa4, 0xe4, 0x2f, 0xda, 0xe4, 0x45, + 0xfa, 0x20, 0x2c, 0xeb, 0x78, 0x46, 0xfc, 0x7f, 0xb0, 0x6d, 0xa8, 0xb9, 0x68, 0x30, 0x21, 0xf6, + 0xa8, 0x1f, 0x10, 0x2a, 0x9b, 0x8b, 0x72, 0x9f, 0xef, 0xe1, 0xe4, 0xb6, 0x03, 0x9e, 0x1f, 0x85, + 0x83, 0x77, 0xc0, 0x4b, 0x26, 0x98, 0x23, 0xb9, 0xf2, 0x2c, 0x20, 0xbf, 0x96, 0x2b, 0x50, 0x74, + 0xaa, 0x0f, 0x28, 0x55, 0x69, 0x35, 0x86, 0x86, 0xbf, 0x1a, 0xdb, 0x42, 0x3a, 0xfe, 0x2e, 0x36, + 0x6c, 0xae, 0x8b, 0x7d, 0x64, 0xc1, 0xe0, 0x1b, 0x13, 0xef, 0xac, 0xae, 0x65, 0x82, 0xc0, 0xe8, + 0xef, 0x08, 0x29, 0x96, 0xbe, 0xaf, 0xb5, 0x44, 0x0a, 0x9a, 0x20, 0x93, 0xaa, 0x03, 0xfc, 0xca, + 0x0a, 0x2d, 0x35, 0x19, 0xfa, 0xd8, 0xd5, 0x14, 0x22, 0x1b, 0x4f, 0x78, 0xf5, 0x74, 0xb6, 0x7d, + 0xd8, 0xbf, 0xfe, 0x02, 0xd2, 0x9d, 0x71, 0x47, 0xec, 0xf0, 0x45, 0x0e, 0xa3, 0xb5, 0xa4, 0x9f, + 0x7a, 0xf2, 0x03, 0x2a, 0x8c, 0x77, 0x47, 0x83, 0x4c, 0xcb, 0x7a, 0xb0, 0x4b, 0x24, 0x20, 0x0c, + 0x9c, 0x0e, 0x4f, 0xb8, 0x59, 0x26, 0xa8, 0x5a, 0xb0, 0x98, 0x99, 0x4e, 0x88, 0x5d, 0xb5, 0x2e, + 0xde, 0xf4, 0xf8, 0x56, 0x72, 0x17, 0x7f, 0x66, 0x45, 0xb0, 0x8e, 0x76, 0xa8, 0xc1, 0xce, 0x08, + 0x9c, 0x15, 0x82, 0xa6, 0xc2, 0x6e, 0x48, 0x65, 0x20, 0x2b, 0x59, 0x49, 0x55, 0x7a, 0x2b, 0x39, + 0x7e, 0x2d, 0xff, 0xed, 0xdd, 0x1c, 0xbd, 0xcf, 0x5c, 0xc9, 0x18, 0xd5, 0xf8, 0xdf, 0xfd, 0xe2, + 0x91, 0x4c, 0x68, 0x2a, 0x51, 0x65, 0x9b, 0xac, 0x74, 0x1c, 0x08, 0x5c, 0x2f, 0x72, 0x2b, 0x45, + 0x7e, 0xdf, 0x28, 0x09, 0x81, 0xd1, 0x46, 0xc0, 0x80, 0x48, 0x06, 0x88, 0xad, 0xa3, 0xee, 0xc5, + 0xc6, 0xc4, 0x47, 0x8b, 0x51, 0x4f, 0x57, 0x0a, 0x23, 0xf1, 0x20, 0x51, 0xd6, 0xcd, 0x87, 0x81, + 0xed, 0xe2, 0x38, 0xfd, 0xa8, 0x89, 0x4d, 0x94, 0x34, 0xe6, 0x19, 0xd9, 0x40, 0x79, 0x5d, 0x1b, + 0xf3, 0x0c, 0xda, 0x99, 0xb4, 0x2e, 0x02, 0xa5, 0xcc, 0x42, 0xf9, 0x03, 0x17, 0x6d, 0x27, 0xcb, + 0x6f, 0x35, 0xc5, 0x97, 0xa2, 0xcc, 0x53, 0x05, 0xdc, 0xfe, 0x79, 0x48, 0x5b, 0x1a, 0x70, 0x66, + 0x5c, 0x07, 0xfd, 0xc1, 0xa6, 0x8c, 0x05, 0x82, 0x12, 0xed, 0xd3, 0xb9, 0xcc, 0x97, 0xb5, 0xfc, + 0x8b, 0x95, 0x19, 0x08, 0xd6, 0x38, 0x1a, 0x7a, 0xf2, 0xd1, 0x0b, 0xe2, 0x15, 0x54, 0x97, 0xdb, + 0x51, 0x3f, 0x1e, 0x9e, 0x58, 0x77, 0xcf, 0xb1, 0x6c, 0xdb, 0xf1, 0xf6, 0x3c, 0xf9, 0x04, 0xb7, + 0x97, 0x0c, 0x7e, 0xe6, 0x9a, 0xca, 0x76, 0xc1, 0x98, 0x8e, 0x0e, 0x54, 0x85, 0x17, 0xb0, 0x0d, + 0xee, 0x15, 0xf9, 0x57, 0xec, 0x56, 0x0c, 0x8a, 0x82, 0xfd, 0x6f, 0x61, 0x00, 0x5c, 0x66, 0x99, + 0x73, 0x16, 0xee, 0x9e, 0xa4, 0xc1, 0x28, 0x83, 0xe9, 0x6c, 0x8d, 0x82, 0x47, 0xd6, 0xab, 0xce, + 0x9b, 0x1c, 0xb7, 0x84, 0xaf, 0x40, 0x51, 0x45, 0x59, 0xa4, 0xae, 0xc9, 0xe4, 0x24, 0x4b, 0x41, + 0x2e, 0xfc, 0xe3, 0xbd, 0xb8, 0x5b, 0xf3, 0x66, 0xf4, 0x5c, 0x94, 0x75, 0x0a, 0x40, 0x29, 0xc4, + 0x02, 0x32, 0xfc, 0xe8, 0x00, 0x69, 0x2c, 0x23, 0x42, 0xc3, 0xad, 0xfb, 0x6b, 0x27, 0x90, 0x79, + 0x44, 0x2c, 0x20, 0x63, 0x3c, 0x51, 0x66, 0x62, 0x12, 0x77, 0x9a, 0x4c, 0x02, 0x70, 0x45, 0x4b, + 0x1a, 0xae, 0xee, 0xe1, 0x44, 0x91, 0xf5, 0x7e, 0x33, 0xfd, 0x1e, 0xcd, 0xed, 0xda, 0x9c, 0xaf, + 0xff, 0xbb, 0xff, 0x39, 0x86, 0xa9, 0xc2, 0x81, 0x1e, 0x9d, 0xd6, 0xe1, 0x8a, 0xe7, 0xb5, 0xbe, + 0x87, 0x71, 0x46, 0x8c, 0x4a, 0x4d, 0xa8, 0xe8, 0x37, 0x31, 0x4b, 0xdd, 0xcc, 0x81, 0xd2, 0x79, + 0xf6, 0x12, 0x63, 0x08, 0x02, 0xfa, 0x7e, 0x95, 0x0b, 0xff, 0x5b, 0x47, 0x22, 0x96, 0x21, 0xf2, + 0x14, 0x4e, 0xbd, 0x49, 0x26, 0xe1, 0xc3, 0xf8, 0x64, 0x2a, 0x54, 0xad, 0x75, 0x79, 0x45, 0x5c, + 0xf8, 0xac, 0x04, 0x6f, 0x74, 0x6d, 0x0d, 0xb5, 0x59, 0xf9, 0x42, 0x4e, 0xe2, 0x7b, 0x28, 0x6c, + 0xf7, 0xe3, 0xa4, 0xe6, 0xa3, 0x8e, 0xe9, 0x42, 0xb6, 0xed, 0x59, 0xad, 0xb8, 0x4d, 0x5f, 0xe4, + 0x59, 0xf4, 0xb4, 0x6e, 0xe0, 0x10, 0xf2, 0x35, 0xea, 0x02, 0x74, 0x5f, 0xf1, 0x4e, 0xf7, 0x07, + 0x8a, 0xfe, 0xd6, 0xb6, 0x4a, 0xd3, 0x5b, 0xc9, 0xbd, 0xad, 0x96, 0xee, 0x7a, 0xac, 0x9a, 0x48, + 0x2e, 0xc2, 0x2c, 0x83, 0xaf, 0x5b, 0xb2, 0xb9, 0x68, 0x0a, 0xd7, 0xb2, 0x1c, 0x47, 0x10, 0x67, + 0x9c, 0x67, 0xd6, 0x8b, 0xa8, 0xbd, 0xc2, 0x0a, 0xc2, 0xf2, 0x82, 0x9d, 0x0f, 0xb0, 0x3c, 0x5b, + 0xf5, 0xf5, 0x0c, 0xb8, 0xa2, 0x30, 0x0b, 0x37, 0x43, 0xcd, 0x21, 0xa6, 0x86, 0x8b, 0xd4, 0xca, + 0xb7, 0x2a, 0x71, 0x35, 0xa2, 0x4a, 0x18, 0xe3, 0xa1, 0x32, 0xe6, 0x31, 0x3c, 0xd0, 0x59, 0x35, + 0x8f, 0x2c, 0x82, 0x03, 0xbd, 0xea, 0x5b, 0x97, 0x4c, 0x3f, 0xbe, 0x6b, 0xe5, 0xe2, 0x8f, 0xbf, + 0xe9, 0x65, 0x29, 0x26, 0x2d, 0xe8, 0x43, 0xcb, 0x1e, 0x7c, 0x87, 0x15, 0x4d, 0x1d, 0x65, 0xc4, + 0x4a, 0xc5, 0x70, 0xe5, 0x4a, 0x70, 0x69, 0x69, 0x56, 0x22, 0xae, 0xd6, 0xc6, 0x18, 0x9e, 0xd1, + 0xa9, 0xa3, 0x3a, 0x81, 0xf9, 0xe5, 0x03, 0xe5, 0xf8, 0x03, 0x3b, 0x38, 0x7a, 0x2e, 0x3d, 0x3d, + 0x52, 0x36, 0x8a, 0xd3, 0x0f, 0x40, 0xeb, 0x72, 0xe2, 0x3c, 0x14, 0xe9, 0xfe, 0x5f, 0x99, 0xd4, + 0x19, 0x61, 0xcc, 0xc8, 0xe3, 0x53, 0xb5, 0x8c, 0x7a, 0xed, 0xf9, 0x73, 0xcb, 0xfb, 0x4b, 0xbe, + 0x10, 0xc9, 0xae, 0x20, 0xf7, 0x3c, 0xda, 0x4d, 0xd4, 0xa8, 0x07, 0xb4, 0xe1, 0xe5, 0x8c, 0xb6, + 0x12, 0xdf, 0xa6, 0xd8, 0xde, 0x2d, 0xa2, 0xad, 0x3d, 0x64, 0x77, 0x66, 0x40, 0xc4, 0x0c, 0x60, + 0x30, 0xfd, 0x0f, 0x17, 0xae, 0x9d, 0x11, 0x0c, 0xb3, 0x43, 0x63, 0x5a, 0xeb, 0xa3, 0x9c, 0xf1, + 0x5a, 0x39, 0x1d, 0x28, 0xe1, 0xbf, 0x7b, 0x39, 0x50, 0x3b, 0x43, 0xd4, 0xbd, 0x7c, 0xa9, 0x15, + 0x24, 0x4e, 0x16, 0x9d, 0x06, 0x50, 0xf8, 0xf3, 0xec, 0x45, 0xc8, 0x09, 0x89, 0xaf, 0xcc, 0x1d, + 0xfa, 0xae, 0xf2, 0xc3, 0x14, 0x8a, 0x64, 0xb9, 0xf2, 0xf4, 0x31, 0xcb, 0xb0, 0x71, 0x25, 0x94, + 0xd5, 0xb8, 0x7a, 0x60, 0xcf, 0xd8, 0x00, 0xb6, 0xeb, 0x84, 0x99, 0x9a, 0x18, 0xea, 0x43, 0x63, + 0x0d, 0xe3, 0x3b, 0xbc, 0x79, 0x81, 0xe2, 0xce, 0x50, 0x95, 0xdb, 0x02, 0x60, 0x09, 0xc6, 0x60, + 0x3b, 0x44, 0x4e, 0xe5, 0x15, 0xe6, 0x1a, 0xbe, 0x94, 0xc5, 0x42, 0x4d, 0x84, 0xd7, 0x99, 0x34, + 0x69, 0x64, 0x7a, 0xb8, 0x83, 0x0a, 0x3e, 0xd5, 0x74, 0x31, 0xd7, 0x7c, 0xc5, 0xac, 0x9e, 0x30, + 0x37, 0xd3, 0x82, 0x2b, 0xc6, 0xaa, 0x7c, 0xeb, 0x76, 0x1f, 0x24, 0xec, 0x59, 0x42, 0x22, 0x45, + 0xac, 0x6a, 0xea, 0x4f, 0xbc, 0xce, 0xf0, 0x44, 0x8f, 0xfc, 0xc4, 0xa2, 0x6b, 0xad, 0x0b, 0x29, + 0x91, 0x01, 0x77, 0xcf, 0x73, 0x78, 0x20, 0x85, 0x3f, 0xab, 0x25, 0xe5, 0xb3, 0x7f, 0xb6, 0xf7, + 0xe3, 0x1f, 0xcf, 0x1c, 0x20, 0xa0, 0x93, 0xe1, 0x66, 0x05, 0x04, 0xf3, 0x2d, 0xc3, 0x3b, 0x7a, + 0xd6, 0xdb, 0x31, 0x08, 0x31, 0x06, 0x00, 0x5a, 0x7e, 0xc3, 0x55, 0x84, 0xec, 0x28, 0x4d, 0x03, + 0xf8, 0xae, 0x5b, 0x6a, 0x9e, 0xd2, 0xa7, 0x8c, 0x07, 0xb7, 0xb4, 0x95, 0x23, 0xa9, 0xab, 0x6c, + 0x43, 0x17, 0xe0, 0x36, 0x94, 0x66, 0xb8, 0xf6, 0x89, 0xe4, 0x6f, 0x9e, 0x71, 0x62, 0x5e, 0x35, + 0x25, 0x8d, 0x11, 0x95, 0x68, 0x08, 0x84, 0xf4, 0x60, 0xfc, 0x9c, 0x36, 0xab, 0xd1, 0x47, 0xc6, + 0xa1, 0x15, 0xde, 0x44, 0x5f, 0x9b, 0xfc, 0xca, 0x87, 0x72, 0xed, 0xd9, 0xa5, 0x47, 0x2c, 0x7e, + 0x30, 0x35, 0xbb, 0x83, 0xa2, 0x05, 0xa4, 0xbb, 0x55, 0xb9, 0x63, 0x77, 0xdc, 0x22, 0x7f, 0xca, + 0x6d, 0xc9, 0x00, 0xf8, 0x30, 0xbc, 0xd9, 0x68, 0x21, 0x7f, 0x16, 0xf1, 0x4a, 0xcd, 0x94, 0xdf, + 0x11, 0x2b, 0xa3, 0x85, 0x38, 0x6f, 0xc1, 0xd7, 0x62, 0x9d, 0x6c, 0xab, 0xa9, 0x7d, 0x94, 0x2d, + 0x84, 0xce, 0x2c, 0xe1, 0x92, 0x0d, 0x89, 0x8f, 0xf5, 0x62, 0x06, 0xbc, 0x85, 0xa6, 0x02, 0x1e, + 0x05, 0xe3, 0x2b, 0xb1, 0x90, 0xe6, 0xeb, 0x72, 0xdb, 0xe8, 0xe0, 0x94, 0x6f, 0xfe, 0x6a, 0x32, + 0x79, 0xe3, 0xa1, 0x7a, 0xef, 0xc3, 0xb9, 0x17, 0xc6, 0x79, 0x44, 0xd8, 0x81, 0x50, 0xf4, 0x5c, + 0x2f, 0x4d, 0xb7, 0x47, 0x55, 0x11, 0xf0, 0x56, 0x7e, 0x62, 0xe4, 0x54, 0x64, 0xe1, 0x61, 0x03, + 0x21, 0x71, 0xc8, 0x6a, 0x3e, 0xb9, 0x5b, 0x8a, 0x3b, 0xcb, 0x99, 0xc2, 0xf7, 0x5a, 0xd7, 0x0d, + 0x39, 0x7c, 0x4c, 0xce, 0x4d, 0xed, 0xa1, 0x73, 0x44, 0x73, 0xba, 0x94, 0x89, 0xa0, 0x4c, 0x38, + 0xf1, 0xa8, 0xd6, 0x5d, 0x61, 0xab, 0x2f, 0x2a, 0xbe, 0xf2, 0xad, 0xee, 0x50, 0x78, 0x68, 0x53, + 0xc8, 0x34, 0xa3, 0xf0, 0x2f, 0x16, 0xe6, 0xef, 0x98, 0x35, 0xde, 0x8d, 0x01, 0x8b, 0x32, 0xd6, + 0x1a, 0x0f, 0x41, 0x9a, 0xff, 0xc5, 0x04, 0x11, 0xb3, 0x86, 0xf4, 0xd2, 0xb3, 0x73, 0x3f, 0x24, + 0xec, 0x78, 0xaa, 0x74, 0x52, 0x5f, 0x5c, 0x89, 0x02, 0x78, 0x3b, 0xab, 0x5e, 0x59, 0xa4, 0x3f, + 0xe4, 0x3d, 0x26, 0x5f, 0x5e, 0x6c, 0x92, 0x30, 0xe3, 0x04, 0xb8, 0xd8, 0x68, 0x3c, 0x8e, 0x76, + 0x6c, 0x94, 0x56, 0xd2, 0xe0, 0x9a, 0x83, 0x1a, 0x7f, 0xf8, 0x54, 0xea, 0xef, 0x54, 0x27, 0x04, + 0xe8, 0xbf, 0x32, 0x94, 0x16, 0x46, 0x34, 0xbb, 0x66, 0x64, 0x82, 0x3c, 0x5f, 0x05, 0xa4, 0x5a, + 0xc6, 0x75, 0xaf, 0x46, 0x32, 0xde, 0xef, 0x9c, 0xdc, 0x40, 0x95, 0x9f, 0xc6, 0x68, 0x61, 0x4a, + 0xb0, 0x2e, 0xac, 0x47, 0x44, 0x70, 0xe6, 0x72, 0xc9, 0x57, 0xea, 0x6f, 0x35, 0x89, 0x4f, 0x87, + 0x90, 0xed, 0x35, 0x08, 0x16, 0x46, 0x5c, 0x78, 0x44, 0xc7, 0x5b, 0xaf, 0xef, 0xac, 0xa9, 0x92, + 0x2f, 0x08, 0x26, 0x34, 0xda, 0xb7, 0xcc, 0xe4, 0x2b, 0x0c, 0x41, 0x32, 0xbc, 0x07, 0xe4, 0xaa, + 0x71, 0x8a, 0xbc, 0xaf, 0x82, 0xd2, 0x60, 0x2b, 0x15, 0x19, 0x49, 0x89, 0x52, 0xd9, 0x38, 0xed, + 0x35, 0x67, 0x1e, 0xe5, 0xcd, 0x0c, 0xa8, 0x2c, 0x72, 0x00, 0xd5, 0x1f, 0x14, 0x22, 0x1d, 0xb2, + 0xf2, 0x17, 0x13, 0x92, 0xc1, 0x14, 0x4b, 0xb5, 0x5f, 0x16, 0x6a, 0x93, 0x28, 0xef, 0x69, 0x9e, + 0x41, 0x4d, 0x9a, 0xe6, 0x93, 0x73, 0xca, 0x48, 0x03, 0x8c, 0x49, 0xa1, 0x38, 0xa2, 0x26, 0xa2, + 0x7a, 0xd3, 0x1f, 0x49, 0xa2, 0xc5, 0xe2, 0xc2, 0x6b, 0x8b, 0xc1, 0xf6, 0x4f, 0x03, 0x82, 0x5c, + 0x2e, 0xc5, 0xf2, 0x69, 0x9a, 0x37, 0xd8, 0x5c, 0x8e, 0x81, 0xdd, 0x77, 0x2b, 0x69, 0xb2, 0x8a, + 0xff, 0x7c, 0xab, 0x42, 0x30, 0x81, 0xa4, 0x12, 0xb5, 0x52, 0x13, 0xf0, 0x67, 0xe1, 0x92, 0xc7, + 0x4b, 0x3c, 0x6e, 0x9a, 0x07, 0x44, 0xe8, 0x25, 0x28, 0x82, 0x53, 0x90, 0x44, 0xe3, 0x7c, 0x01, + 0xa5, 0xe7, 0x78, 0xa6, 0x4b, 0x93, 0xf0, 0xc7, 0xd4, 0x8a, 0x05, 0x4f, 0xba, 0x9d, 0xaa, 0xed, + 0xd5, 0x5f, 0x33, 0x82, 0x21, 0x57, 0x9f, 0xd7, 0xb0, 0xf2, 0xa1, 0x5f, 0x10, 0xc3, 0xce, 0x5b, + 0xa9, 0x59, 0xde, 0x88, 0x97, 0xd3, 0x09, 0x53, 0x13, 0x8e, 0x55, 0x74, 0x5d, 0x49, 0xdc, 0xa6, + 0x52, 0x6c, 0x85, 0xdc, 0x4a, 0xc6, 0xbf, 0xd1, 0xea, 0x96, 0x26, 0x1e, 0x81, 0xa9, 0xd1, 0xb4, + 0x09, 0x2a, 0xee, 0xc8, 0xdf, 0x79, 0x06, 0x7f, 0x25, 0x1f, 0xf3, 0x78, 0xbc, 0xf3, 0xaa, 0xbc, + 0x55, 0xe1, 0xed, 0x69, 0x9b, 0xf9, 0x00, 0x42, 0x40, 0x95, 0x05, 0xe9, 0x47, 0x3b, 0x3e, 0x1b, + 0xf9, 0xa3, 0xe3, 0x7d, 0x70, 0x3f, 0x6f, 0xed, 0x0e, 0xd9, 0xe8, 0x70, 0x25, 0x85, 0x17, 0xf8, + 0xf6, 0xc0, 0x00, 0x92, 0x83, 0xd4, 0xd7, 0xee, 0x00, 0xf4, 0xc8, 0x09, 0xe0, 0x3c, 0x43, 0xff, + 0xa1, 0x1d, 0x75, 0xb4, 0x20, 0x48, 0x98, 0x78, 0x12, 0x41, 0x20, 0x90, 0xc0, 0xdb, 0x51, 0xf3, + 0x95, 0x75, 0x1f, 0x8f, 0x33, 0x82, 0x16, 0x5e, 0x39, 0x01, 0x99, 0x00, 0xfa, 0xb3, 0x30, 0x4b, + 0x6c, 0xc3, 0x4a, 0xab, 0xcb, 0xab, 0x25, 0xcd, 0x4a, 0x58, 0xf3, 0x2a, 0x14, 0xb3, 0x56, 0x45, + 0xbe, 0xf5, 0xd7, 0xe6, 0xf3, 0xd0, 0xe6, 0x97, 0x9a, 0xf7, 0xab, 0xce, 0x11, 0x26, 0xc3, 0x8b, + 0x01, 0xa3, 0xfd, 0x0a, 0x0b, 0xd1, 0x9c, 0x4b, 0x1c, 0xea, 0x3e, 0x55, 0xc7, 0xda, 0xd3, 0x22, + 0x10, 0x23, 0x59, 0x2a, 0x5d, 0x4a, 0xfb, 0xd4, 0x47, 0x3a, 0xe2, 0x66, 0xb5, 0x18, 0xc0, 0xf0, + 0x1a, 0xa7, 0xed, 0x42, 0xaa, 0x0d, 0x55, 0x45, 0x55, 0x68, 0xfb, 0x4a, 0x18, 0x2b, 0x7f, 0x92, + 0xd1, 0x96, 0xba, 0x79, 0x08, 0xdc, 0xe4, 0xd3, 0x89, 0xcd, 0x55, 0x21, 0xb4, 0x84, 0x94, 0x83, + 0x20, 0xa8, 0x48, 0x5e, 0xc8, 0xa9, 0xc4, 0x51, 0x89, 0xc5, 0x77, 0x06, 0x97, 0x7e, 0x12, 0x8a, + 0xd2, 0xde, 0x4d, 0xa1, 0xd9, 0x91, 0x6a, 0x77, 0xd4, 0xc2, 0xd5, 0x04, 0xaa, 0x7e, 0xee, 0xd6, + 0x5a, 0x56, 0x22, 0x6a, 0x87, 0xef, 0x23, 0x9d, 0x15, 0x23, 0x03, 0xd5, 0x9f, 0xe6, 0x48, 0x3f, + 0x3f, 0xd5, 0x2f, 0x7c, 0xf4, 0x6e, 0xa6, 0xc4, 0xf5, 0xd6, 0x71, 0x8d, 0xe5, 0xc3, 0x3b, 0x84, + 0xb0, 0x38, 0x45, 0x11, 0xb9, 0xa0, 0x07, 0x71, 0x62, 0x07, 0x82, 0xf3, 0xee, 0xd6, 0x8b, 0x3f, + 0x52, 0x0b, 0xd7, 0x1a, 0xa4, 0x47, 0x24, 0xa8, 0x27, 0x4a, 0x62, 0x16, 0xc1, 0xf9, 0x13, 0xe1, + 0xf3, 0x25, 0x8a, 0x67, 0xae, 0x65, 0x51, 0xcd, 0xd4, 0x2e, 0x4b, 0xaa, 0x11, 0x16, 0x63, 0x0c, + 0x05, 0x39, 0xef, 0xd8, 0xea, 0x56, 0xe2, 0x96, 0xd2, 0x32, 0xb6, 0x59, 0x83, 0x6e, 0xf0, 0x33, + 0xc3, 0xe3, 0x15, 0xf1, 0xb9, 0x43, 0x68, 0x52, 0x40, 0x07, 0xe0, 0x63, 0x53, 0xd8, 0xd7, 0xc3, + 0x55, 0xe3, 0xa7, 0x54, 0x15, 0x2c, 0x4d, 0xb7, 0x5e, 0xf8, 0x7a, 0x4f, 0x00, 0xfe, 0xec, 0xa9, + 0x13, 0xe4, 0x9e, 0x22, 0xe9, 0x94, 0xf4, 0xe4, 0x15, 0xec, 0x51, 0x94, 0xc0, 0x22, 0x90, 0x04, + 0x46, 0xc6, 0xd1, 0x4a, 0xb9, 0x33, 0x37, 0xb4, 0xf0, 0x74, 0xaa, 0x8a, 0x9d, 0xa7, 0x9c, 0x4f, + 0xa8, 0x1f, 0x93, 0xea, 0xc3, 0x0f, 0xf5, 0x38, 0x97, 0x33, 0x22, 0xcf, 0xda, 0x51, 0xa4, 0x35, + 0x7b, 0x10, 0xdc, 0x83, 0x43, 0xfb, 0x8a, 0xf7, 0xce, 0xf2, 0x72, 0x9e, 0xae, 0x92, 0x7c, 0x7b, + 0x88, 0x93, 0x54, 0x23, 0xb1, 0x16, 0xa5, 0x72, 0x46, 0x9a, 0x26, 0xc4, 0xe1, 0xd0, 0xeb, 0x8a, + 0xe6, 0xcb, 0x73, 0x85, 0x07, 0x32, 0x52, 0xe9, 0xdd, 0x6d, 0xb0, 0x0d, 0x12, 0x08, 0x82, 0xc0, + 0x80, 0x86, 0xed, 0xc1, 0xce, 0x88, 0x13, 0xd3, 0xea, 0x6d, 0x76, 0xf4, 0x1c, 0x44, 0x8a, 0x56, + 0x04, 0x9a, 0x5c, 0x5e, 0x7c, 0x73, 0x90, 0x14, 0xf8, 0x90, 0x83, 0xbc, 0x2e, 0x88, 0xfc, 0xd6, + 0xd0, 0x73, 0x33, 0x13, 0xc9, 0xd6, 0xea, 0x23, 0xc3, 0xcf, 0xfc, 0x42, 0x16, 0x5c, 0x58, 0xb8, + 0xa1, 0xf9, 0x4b, 0x23, 0x64, 0x75, 0x1d, 0x97, 0x9a, 0xa9, 0x7a, 0xbc, 0xe6, 0x6d, 0xf1, 0xea, + 0x50, 0x78, 0x54, 0x5d, 0xa7, 0x82, 0x20, 0xaa, 0x5a, 0x15, 0xe4, 0x88, 0xbc, 0xf3, 0xe7, 0x88, + 0x61, 0xcb, 0xcc, 0xc1, 0x46, 0x98, 0x76, 0x81, 0xea, 0x6f, 0x88, 0x37, 0xd4, 0xe5, 0x19, 0xb3, + 0xf3, 0x0b, 0x78, 0xe3, 0x4f, 0x27, 0x0b, 0xf9, 0x2f, 0x37, 0x21, 0x6b, 0x6d, 0x61, 0xc1, 0xb4, + 0x9d, 0x7b, 0x2c, 0x95, 0x9a, 0x0a, 0xf9, 0x96, 0xaf, 0x19, 0xe8, 0x9d, 0xd0, 0xf9, 0x69, 0x9c, + 0x3b, 0x4c, 0x44, 0x5e, 0x9b, 0xc0, 0x99, 0x10, 0x65, 0x37, 0x6c, 0x3f, 0x3a, 0x05, 0x2c, 0x59, + 0x02, 0x8d, 0x57, 0xec, 0x25, 0x06, 0x94, 0x11, 0x96, 0xab, 0x5a, 0xa5, 0x43, 0x43, 0x21, 0x07, + 0x3a, 0x2b, 0xd9, 0xfa, 0xf4, 0x68, 0xc8, 0xc3, 0x5f, 0x02, 0xde, 0x5a, 0x9c, 0x1b, 0xc9, 0x4a, + 0xb9, 0x97, 0xb9, 0xd5, 0x51, 0xf4, 0x8f, 0xde, 0x14, 0xfb, 0x90, 0x0b, 0xb2, 0x49, 0x1b, 0x9d, + 0xeb, 0x7d, 0x1a, 0x1d, 0x2c, 0xdf, 0xa5, 0x30, 0x5c, 0x4d, 0x0a, 0x1f, 0x00, 0x72, 0x77, 0x9f, + 0xce, 0xae, 0x03, 0x73, 0xec, 0xb9, 0xa6, 0xa0, 0x2c, 0xd7, 0xa6, 0xd6, 0x08, 0xfd, 0x52, 0x42, + 0xbe, 0xde, 0x73, 0x95, 0x55, 0x6c, 0xd6, 0xeb, 0xcb, 0x14, 0xc8, 0xc9, 0xce, 0xbe, 0xd4, 0x97, + 0x42, 0x91, 0x7b, 0x7b, 0xff, 0x4a, 0xf9, 0x31, 0xe1, 0xab, 0x3d, 0xaa, 0x82, 0x36, 0x97, 0x8b, + 0x9d, 0xc9, 0x8d, 0x0c, 0xed, 0x95, 0x7b, 0x2b, 0x1b, 0xcb, 0xec, 0x22, 0xdf, 0x63, 0xec, 0x1f, + 0xb8, 0x09, 0x0a, 0x0c, 0x97, 0xd6, 0x5b, 0x53, 0x22, 0x4a, 0x1d, 0xc7, 0x7a, 0xa5, 0xe3, 0x08, + 0x86, 0xc5, 0x01, 0x6b, 0x25, 0x38, 0xd9, 0xc2, 0xea, 0xc3, 0x8b, 0x45, 0x25, 0x26, 0x95, 0xeb, + 0x81, 0xf9, 0x1f, 0xe4, 0x04, 0x5b, 0x0e, 0x46, 0xa5, 0xc3, 0x7b, 0xe3, 0x81, 0xc6, 0xdb, 0xeb, + 0xbd, 0xbf, 0xb7, 0x50, 0x6a, 0x3b, 0xc2, 0x49, 0x05, 0x04, 0x45, 0xfe, 0x3e, 0xd8, 0xa9, 0xbe, + 0x35, 0xc9, 0x10, 0x73, 0xc1, 0xbf, 0x1c, 0x7f, 0x57, 0x28, 0xbd, 0xa7, 0x48, 0x1c, 0xe9, 0xec, + 0xc5, 0xdf, 0x36, 0xa1, 0xf0, 0xb7, 0x0e, 0xc0, 0xa9, 0x26, 0xbc, 0x8a, 0x19, 0x0c, 0x61, 0xa4, + 0x9f, 0x30, 0x64, 0xcb, 0xee, 0x43, 0x9f, 0xe8, 0x00, 0x43, 0x6c, 0x7c, 0x4c, 0x55, 0xbd, 0x86, + 0x2c, 0xb7, 0xbe, 0xce, 0x18, 0x5f, 0x60, 0x26, 0x4a, 0xcb, 0xe7, 0x15, 0x0c, 0xa7, 0x25, 0x85, + 0x72, 0x95, 0x72, 0x90, 0x04, 0xff, 0xf7, 0xfc, 0x8c, 0x94, 0x06, 0xae, 0x87, 0x9a, 0x49, 0xc4, + 0x32, 0x6a, 0xf2, 0xfa, 0x40, 0x44, 0x8f, 0xf4, 0xc2, 0xc5, 0xd1, 0x7b, 0x45, 0xf3, 0x5b, 0x5a, + 0xb2, 0x41, 0xc8, 0xb2, 0xbe, 0x57, 0xeb, 0x27, 0x13, 0x18, 0x1a, 0x97, 0xbb, 0x56, 0x90, 0x45, + 0x25, 0xad, 0x87, 0x91, 0xee, 0xb6, 0x17, 0xbe, 0xe2, 0xd3, 0x4f, 0x87, 0xec, 0x43, 0x93, 0xe5, + 0xa1, 0x44, 0x33, 0x26, 0x63, 0x04, 0x24, 0xbe, 0x07, 0x8b, 0x89, 0x16, 0x63, 0x30, 0xf8, 0x63, + 0xb2, 0xb0, 0x97, 0x2f, 0xdd, 0xa9, 0xc7, 0xcc, 0xdc, 0x10, 0xbb, 0x25, 0xda, 0x8c, 0xdc, 0x84, + 0x93, 0x45, 0x7f, 0x79, 0xec, 0x34, 0x97, 0x58, 0xc7, 0xbc, 0xaf, 0xb7, 0xdb, 0xfb, 0x16, 0xd1, + 0x62, 0x5a, 0x46, 0x13, 0xd5, 0x40, 0x72, 0x0a, 0xc6, 0x1e, 0x4e, 0xdf, 0x69, 0x72, 0x7b, 0x75, + 0xe8, 0x9b, 0xb4, 0x0e, 0x8b, 0x24, 0x73, 0xe9, 0x8a, 0xa6, 0xbe, 0x18, 0x1b, 0x0a, 0x4c, 0x3d, + 0x58, 0x18, 0x01, 0x49, 0xdd, 0xaa, 0x40, 0x97, 0x69, 0xa5, 0x23, 0x32, 0x0d, 0x6c, 0xe8, 0x68, + 0x07, 0x01, 0xaf, 0x68, 0xad, 0x2b, 0xeb, 0x59, 0xa1, 0x06, 0xf9, 0xc3, 0xbc, 0x93, 0x28, 0x34, + 0x41, 0x86, 0x7e, 0x6d, 0xfa, 0x39, 0xce, 0x0c, 0x0a, 0xae, 0xef, 0xa2, 0x98, 0xba, 0x52, 0xfe, + 0x72, 0x3a, 0xfc, 0xdc, 0xf6, 0x95, 0x72, 0x01, 0x02, 0x74, 0x29, 0x47, 0x64, 0xe2, 0xdd, 0x0a, + 0x25, 0x99, 0xa0, 0x32, 0xf9, 0x8e, 0xd5, 0x57, 0x4f, 0x75, 0xdd, 0xfd, 0xf5, 0x58, 0xbd, 0x18, + 0x31, 0xc1, 0xa5, 0xd5, 0x18, 0xea, 0x3c, 0xda, 0x77, 0x02, 0xab, 0x57, 0x7b, 0x46, 0x01, 0xf1, + 0xde, 0xf2, 0xa3, 0x79, 0xe7, 0x82, 0xcb, 0x16, 0x2d, 0xad, 0x07, 0x80, 0x9d, 0x3d, 0x02, 0x2f, + 0x7c, 0xee, 0x94, 0x31, 0x5a, 0x12, 0x9a, 0x78, 0xe6, 0x9d, 0xf3, 0x93, 0x22, 0x6c, 0x95, 0xaa, + 0xe0, 0x65, 0x3f, 0x0c, 0xa3, 0xbf, 0x4c, 0xcb, 0x2d, 0x38, 0x89, 0x01, 0x41, 0xc2, 0xb6, 0x86, + 0xee, 0xf5, 0x18, 0x36, 0x1a, 0x2e, 0xa6, 0x60, 0x5a, 0xcf, 0xb4, 0x99, 0x3e, 0xce, 0x40, 0xb4, + 0x64, 0x31, 0x0c, 0xc5, 0x8d, 0x60, 0x25, 0xbe, 0x12, 0xd7, 0x7c, 0x35, 0x7e, 0x90, 0xda, 0xc0, + 0xf0, 0x91, 0xd1, 0x73, 0xe8, 0xe6, 0x91, 0xfd, 0xd7, 0x2c, 0x8d, 0x49, 0xfe, 0x64, 0x87, 0xa1, + 0x3e, 0x7b, 0x6e, 0x2c, 0x68, 0x69, 0xe9, 0x0e, 0x0b, 0xd4, 0xb0, 0x38, 0x92, 0xbd, 0x7c, 0x06, + 0x65, 0x9c, 0xd4, 0x55, 0x91, 0x78, 0xca, 0xd0, 0xeb, 0xcb, 0x58, 0x82, 0x93, 0xf7, 0x9d, 0x92, + 0x36, 0xf6, 0xd4, 0x62, 0x3c, 0x67, 0xaa, 0xc1, 0x98, 0xd7, 0xf1, 0xc3, 0xdd, 0xc6, 0xcb, 0xfa, + 0x5c, 0x7b, 0x77, 0x9f, 0x91, 0xcd, 0x4e, 0x3f, 0x84, 0x45, 0x71, 0xb0, 0xff, 0xba, 0x69, 0x1f, + 0xb9, 0x69, 0xa0, 0xc5, 0xaa, 0x9a, 0x48, 0xa9, 0xa7, 0xac, 0xd4, 0xc7, 0x7c, 0x85, 0x5c, 0x26, + 0xc6, 0xde, 0x4f, 0xe4, 0x67, 0xc3, 0xe1, 0x34, 0x59, 0xaa, 0xe1, 0x9f, 0x25, 0xb3, 0xf9, 0x2d, + 0xab, 0x3f, 0x70, 0x99, 0x10, 0x35, 0x9e, 0xf8, 0xed, 0xdc, 0x5e, 0xa5, 0x03, 0x99, 0x7a, 0xd9, + 0xc0, 0xbd, 0xc0, 0x02, 0xcf, 0xf6, 0x9c, 0xf9, 0x73, 0xc1, 0x0e, 0x63, 0x4e, 0xac, 0x01, 0x5e, + 0x23, 0x8c, 0xce, 0xc2, 0x32, 0xb4, 0xfc, 0xd1, 0x02, 0xee, 0x61, 0xb8, 0xfb, 0x5c, 0xe4, 0x24, + 0xbe, 0x67, 0xd4, 0x90, 0x49, 0xa2, 0x69, 0x5d, 0xc0, 0x3c, 0xcc, 0xfc, 0x57, 0x0a, 0x76, 0xf3, + 0xce, 0xea, 0x74, 0x6c, 0x01, 0xaa, 0x3d, 0x1e, 0xf4, 0x8a, 0x92, 0xb9, 0xf4, 0x0e, 0xf6, 0x23, + 0x9a, 0x96, 0xa8, 0x78, 0xb5, 0x98, 0x9e, 0x31, 0x6a, 0xff, 0x7b, 0xde, 0xf6, 0x89, 0x5e, 0x27, + 0x53, 0x46, 0x8f, 0x4f, 0xf4, 0xb6, 0xb3, 0x4f, 0x9a, 0xc3, 0x85, 0xc5, 0x74, 0x6e, 0xbb, 0x61, + 0xc9, 0x0c, 0x28, 0x85, 0x5f, 0x21, 0x7f, 0x40, 0xc1, 0x60, 0x32, 0x0e, 0xff, 0x9b, 0xf1, 0x24, + 0xdb, 0xfb, 0xb2, 0xf4, 0xf6, 0xef, 0x6c, 0xee, 0x8d, 0x9f, 0x47, 0x2c, 0x12, 0x0d, 0xe6, 0x0e, + 0x35, 0xa8, 0x20, 0x69, 0x61, 0xbf, 0x96, 0xf7, 0xa9, 0xe5, 0x49, 0xc1, 0xe3, 0x42, 0x97, 0xa3, + 0x8e, 0xc3, 0x9c, 0xc9, 0x11, 0x4c, 0xb9, 0x93, 0x5d, 0xbc, 0x33, 0x1f, 0xbd, 0x76, 0xc6, 0x5f, + 0xe4, 0xb5, 0xae, 0x8c, 0xe5, 0x60, 0xa6, 0xab, 0x81, 0x86, 0xe5, 0xa4, 0x3a, 0xc1, 0xc7, 0xa1, + 0x1a, 0x22, 0x74, 0x60, 0x43, 0xe1, 0x0f, 0x56, 0x2e, 0x75, 0xec, 0x4e, 0xd0, 0x26, 0xd9, 0xf7, + 0x41, 0x1e, 0x72, 0xfd, 0xae, 0xae, 0x9b, 0x73, 0x7d, 0x20, 0xce, 0x5a, 0xb9, 0x37, 0x36, 0xf9, + 0x34, 0x79, 0xbd, 0xec, 0x28, 0x9a, 0x74, 0xa3, 0x85, 0x35, 0x2f, 0x24, 0xa6, 0xec, 0x5e, 0xd8, + 0x19, 0xa1, 0x68, 0x0f, 0x14, 0x56, 0xca, 0xb6, 0x13, 0x77, 0xb2, 0xf5, 0x70, 0x1c, 0x1d, 0x61, + 0x16, 0xef, 0x6b, 0x4a, 0x4b, 0x38, 0x51, 0x3c, 0xa4, 0x8d, 0x86, 0x21, 0x78, 0xa4, 0xb4, 0x11, + 0xf8, 0xe0, 0xec, 0xde, 0xb2, 0x35, 0xd3, 0x9b, 0xc1, 0xe3, 0xcc, 0x64, 0x2e, 0xc5, 0x19, 0x13, + 0x38, 0x0c, 0xde, 0x8e, 0x5a, 0x11, 0x3a, 0xae, 0x3f, 0x61, 0x73, 0x8c, 0x1f, 0xbe, 0xce, 0xe3, + 0xd5, 0xfb, 0x60, 0x87, 0x1c, 0x5a, 0x51, 0xaa, 0xba, 0x2f, 0x95, 0xa8, 0xe5, 0x9b, 0x46, 0xe9, + 0xbb, 0xd5, 0x4b, 0x81, 0xf4, 0x34, 0x4f, 0xc5, 0x8a, 0x55, 0x9e, 0xad, 0xb8, 0xbe, 0xde, 0xaa, + 0x6b, 0x6e, 0x62, 0xd2, 0x5a, 0x71, 0xc6, 0x7b, 0xc6, 0xbc, 0x81, 0x3c, 0xf7, 0xe0, 0xc2, 0xda, + 0xee, 0xdb, 0x89, 0xab, 0xe3, 0x2d, 0x38, 0xe9, 0xa0, 0x25, 0xda, 0xc9, 0x12, 0x9d, 0x75, 0x4a, + 0x12, 0x73, 0x76, 0xfa, 0xa7, 0x07, 0xa6, 0x21, 0xdc, 0x8b, 0xf1, 0x09, 0xe8, 0x69, 0x5e, 0x9f, + 0xd2, 0xca, 0x50, 0x05, 0x25, 0x99, 0xb9, 0x3b, 0x89, 0xdd, 0x9a, 0xec, 0x31, 0x7b, 0x2a, 0xef, + 0x76, 0xbd, 0x11, 0x63, 0xdc, 0xd7, 0x87, 0xe4, 0x0b, 0x7f, 0x9d, 0x14, 0x45, 0x65, 0x99, 0x49, + 0xd6, 0x87, 0xf0, 0xfe, 0xde, 0xff, 0x21, 0xfa, 0xc1, 0xf7, 0xdb, 0xeb, 0xf4, 0x79, 0x83, 0x9c, + 0x76, 0xec, 0xb2, 0x05, 0x47, 0xb0, 0x19, 0xb1, 0x43, 0x1e, 0xf3, 0x67, 0xc3, 0x2b, 0xd0, 0x4a, + 0xda, 0x6e, 0xdf, 0x45, 0xb2, 0x38, 0xe0, 0x38, 0xcf, 0x33, 0xe7, 0x0d, 0xc1, 0xbd, 0xcd, 0x5e, + 0x0f, 0x36, 0xe1, 0x5c, 0x6e, 0x49, 0x5a, 0x11, 0x4a, 0xe2, 0x04, 0x04, 0x2a, 0x54, 0xed, 0x0e, + 0x44, 0x4a, 0xe9, 0x32, 0xc7, 0xde, 0xea, 0x8c, 0x4b, 0x07, 0x3f, 0x08, 0x5b, 0xa9, 0x08, 0x80, + 0x99, 0x03, 0x3a, 0x93, 0x4d, 0xf0, 0xc5, 0xa0, 0xea, 0x68, 0x55, 0x47, 0xe8, 0x8e, 0x0b, 0x3a, + 0xbc, 0x73, 0x28, 0xa2, 0x4b, 0xe9, 0xa4, 0x78, 0xe3, 0x98, 0x4a, 0x33, 0x64, 0x7e, 0x1a, 0x08, + 0xdd, 0x4c, 0x6c, 0xe8, 0x64, 0xbf, 0x14, 0xd1, 0x75, 0x6b, 0x37, 0xc2, 0x35, 0x1b, 0x5e, 0x5a, + 0x77, 0xa8, 0x19, 0x30, 0x51, 0x2e, 0x75, 0x9a, 0x5c, 0x19, 0xef, 0xb3, 0x9d, 0xf0, 0xe7, 0x81, + 0x9e, 0x42, 0xc2, 0xf5, 0xbd, 0x3b, 0xc4, 0x3f, 0xad, 0xc5, 0x46, 0x2e, 0x93, 0xf0, 0xcf, 0xc7, + 0x7f, 0x4e, 0xc3, 0xf8, 0xb8, 0xe0, 0x52, 0x59, 0x04, 0xe6, 0x21, 0x37, 0xd4, 0x3d, 0x8d, 0xc7, + 0x1d, 0x56, 0x9a, 0x1f, 0x47, 0x99, 0x88, 0xd5, 0xa9, 0x21, 0x67, 0xd4, 0x58, 0xe7, 0x14, 0x39, + 0xf6, 0xf4, 0xf5, 0xad, 0xdc, 0x6f, 0xb5, 0x4c, 0x1a, 0x6d, 0xb9, 0x29, 0x13, 0x9d, 0x6a, 0x53, + 0x74, 0x09, 0x14, 0x89, 0xdb, 0x65, 0x19, 0x0d, 0x47, 0x9a, 0x74, 0x7f, 0x6b, 0x6f, 0xbf, 0x6d, + 0xcd, 0x3e, 0x56, 0x39, 0x99, 0x01, 0x7e, 0x70, 0xae, 0x49, 0x1d, 0xe9, 0x7c, 0x01, 0x3b, 0xa3, + 0x91, 0x5e, 0xc4, 0xfd, 0x6a, 0x98, 0x8d, 0xe5, 0xdb, 0x41, 0xea, 0x74, 0xd5, 0x0a, 0x3b, 0x26, + 0xb5, 0xec, 0xc3, 0x9c, 0x18, 0xd3, 0x35, 0x97, 0xa1, 0xf7, 0x14, 0xe2, 0x21, 0xfa, 0x8e, 0x15, + 0x12, 0x33, 0x0a, 0x0d, 0x17, 0x37, 0x73, 0x18, 0xb5, 0xe6, 0x64, 0x99, 0xd9, 0x14, 0x75, 0x2f, + 0x48, 0x64, 0x28, 0x9e, 0x04, 0x36, 0x70, 0xa2, 0xde, 0x36, 0xa5, 0xe3, 0x52, 0x0b, 0xe8, 0x21, + 0x19, 0xcd, 0xe3, 0xee, 0xb8, 0xb7, 0x23, 0xe3, 0xcc, 0x79, 0x09, 0x40, 0x4c, 0x92, 0x0e, 0xef, + 0x45, 0x86, 0xe2, 0xcb, 0xf3, 0x22, 0x57, 0x0b, 0x97, 0x4c, 0x22, 0xe0, 0x5b, 0x87, 0xc0, 0x9c, + 0xf6, 0x72, 0xb7, 0xdf, 0x98, 0x59, 0xf0, 0x4f, 0x20, 0xb2, 0x34, 0xad, 0x45, 0x02, 0xa0, 0xac, + 0xea, 0x48, 0x0c, 0x40, 0xbd, 0x4e, 0x12, 0x0d, 0x1e, 0x0b, 0x6d, 0x6c, 0x46, 0xd3, 0x07, 0x39, + 0x8f, 0x18, 0x8a, 0xa0, 0x66, 0x16, 0x3e, 0x0a, 0x7a, 0xd9, 0x52, 0xf0, 0x90, 0xaf, 0x10, 0x2a, + 0xaa, 0xd5, 0x53, 0xfc, 0x03, 0xd9, 0xb1, 0x69, 0x14, 0x76, 0x4b, 0x49, 0x87, 0x27, 0xd4, 0xa9, + 0xa5, 0xc1, 0x9f, 0xad, 0x50, 0x7f, 0x94, 0xd8, 0xd4, 0x9f, 0x20, 0xc5, 0x89, 0xc6, 0x70, 0xa0, + 0x0f, 0x32, 0x46, 0xa1, 0x4d, 0x8e, 0xcf, 0x6b, 0x6d, 0xf9, 0x76, 0x95, 0xe1, 0x1a, 0x52, 0xb4, + 0x40, 0x97, 0x94, 0xcb, 0xc8, 0x65, 0xb1, 0x19, 0x1f, 0x0d, 0x99, 0xca, 0x61, 0x51, 0x4c, 0xd1, + 0xa0, 0xc1, 0x42, 0x99, 0xa1, 0x4d, 0x4f, 0xc1, 0xd7, 0x66, 0xcf, 0x36, 0xaf, 0x59, 0xea, 0x70, + 0x73, 0x58, 0x6c, 0xcb, 0xf7, 0x60, 0xe3, 0xc1, 0x56, 0x0b, 0xdd, 0x7e, 0x7b, 0x45, 0x03, 0x96, + 0x1c, 0x13, 0xa8, 0x62, 0xf1, 0x31, 0xed, 0xd8, 0xbc, 0x70, 0xff, 0x07, 0xfa, 0xfb, 0xf9, 0xbb, + 0x65, 0x13, 0x84, 0x93, 0x4a, 0x0c, 0xf4, 0x98, 0x80, 0xcd, 0xd1, 0x3b, 0x48, 0x9a, 0x42, 0x80, + 0x4a, 0xad, 0x04, 0x94, 0xcc, 0x3b, 0x79, 0x7c, 0xc5, 0x17, 0x95, 0x05, 0x7f, 0x8b, 0x8b, 0x70, + 0xa0, 0xa1, 0x38, 0x83, 0x1f, 0x81, 0xd9, 0x66, 0x1d, 0x71, 0x4a, 0xa5, 0xd4, 0x42, 0xb1, 0x11, + 0x44, 0xe1, 0x82, 0xad, 0xe4, 0x92, 0x80, 0xde, 0x13, 0x81, 0x24, 0x23, 0xd8, 0x58, 0xac, 0x48, + 0xe7, 0x56, 0xfd, 0x47, 0xa7, 0xb8, 0x50, 0x59, 0x0c, 0x26, 0x66, 0xf5, 0x65, 0x70, 0x91, 0x26, + 0xfb, 0x01, 0x07, 0x21, 0x4f, 0x8f, 0xad, 0xf3, 0xe0, 0x32, 0xeb, 0xab, 0xdb, 0x58, 0xe5, 0x86, + 0xf4, 0x5b, 0x23, 0x7b, 0xdf, 0x00, 0xf7, 0xb2, 0xc7, 0x71, 0x0c, 0xc9, 0x8e, 0x7a, 0x3a, 0x9a, + 0xff, 0xa5, 0xef, 0x93, 0x0e, 0xca, 0xe9, 0xe9, 0x22, 0xdd, 0xe5, 0x05, 0x37, 0x02, 0xaf, 0x8a, + 0xc8, 0x2d, 0xbd, 0x1c, 0xd1, 0x68, 0x9b, 0x04, 0x16, 0x31, 0x5a, 0x38, 0xbf, 0xeb, 0x2f, 0x4d, + 0x9f, 0xdb, 0xae, 0xcc, 0x0e, 0x9a, 0xe2, 0x24, 0xc2, 0x52, 0x45, 0xc9, 0x1d, 0xa3, 0x65, 0x3a, + 0xad, 0xf5, 0x52, 0x68, 0x4a, 0xdb, 0xc0, 0x9f, 0x7d, 0x27, 0x40, 0xb7, 0xf5, 0x11, 0xad, 0xb2, + 0x1f, 0x87, 0xd6, 0x39, 0x3b, 0x96, 0x19, 0xd2, 0x69, 0xd4, 0xab, 0x15, 0x5f, 0x5d, 0x20, 0x73, + 0x9b, 0x11, 0x82, 0xe6, 0xf6, 0xb8, 0x30, 0xce, 0x2e, 0x4a, 0x63, 0xcc, 0x4c, 0x43, 0xfa, 0x23, + 0x22, 0x66, 0x99, 0x4e, 0xe5, 0x70, 0x82, 0x2e, 0xd8, 0x4f, 0x11, 0xca, 0x09, 0xb8, 0x79, 0x00, + 0xc2, 0xa2, 0x83, 0x3f, 0x80, 0x06, 0x35, 0x36, 0x16, 0xf2, 0xc2, 0xef, 0x0e, 0x1d, 0x83, 0x55, + 0x48, 0xdf, 0x9b, 0x75, 0xc2, 0xe2, 0x62, 0xef, 0x86, 0xcf, 0x7f, 0x76, 0x34, 0x28, 0x76, 0xdd, + 0xb2, 0xed, 0xb7, 0xaa, 0x3b, 0x21, 0xab, 0xc9, 0xbd, 0x68, 0x8b, 0x9c, 0xda, 0x3c, 0xf4, 0xbb, + 0x80, 0x36, 0x5b, 0x5f, 0x3c, 0xd6, 0xb8, 0xcf, 0x29, 0x15, 0xe4, 0x4b, 0x0d, 0xbb, 0x12, 0xe3, + 0x34, 0xb5, 0x24, 0x25, 0xd3, 0xd9, 0x3a, 0x8a, 0x39, 0x08, 0x3b, 0xaa, 0xfb, 0x08, 0xe7, 0x01, + 0x49, 0x51, 0x47, 0x22, 0x49, 0xac, 0x93, 0xf5, 0xa5, 0xf7, 0x62, 0x15, 0x86, 0x50, 0x82, 0x5f, + 0xe8, 0xe7, 0xc5, 0xf3, 0x30, 0x77, 0x86, 0x58, 0xe1, 0xca, 0x89, 0x68, 0x8d, 0xc3, 0x5e, 0xfe, + 0x58, 0x8c, 0x8a, 0xd6, 0x76, 0xc5, 0x3d, 0x8a, 0x0d, 0x3e, 0x0c, 0x8e, 0xcf, 0x90, 0xb9, 0x62, + 0x4d, 0x60, 0xea, 0xef, 0x9c, 0xa7, 0x24, 0x40, 0xf9, 0x9d, 0xa0, 0x19, 0xb6, 0x4d, 0x53, 0x6b, + 0x79, 0x0e, 0x81, 0x76, 0xbe, 0x4d, 0xac, 0xcd, 0x8e, 0x67, 0x06, 0xba, 0x1b, 0x42, 0x80, 0xe5, + 0xaf, 0x20, 0xae, 0xb8, 0x68, 0x64, 0xf5, 0x84, 0x48, 0x67, 0xe8, 0x5c, 0x3b, 0xce, 0x84, 0xb5, + 0x76, 0x11, 0xa2, 0x3a, 0xdf, 0xc5, 0x44, 0x85, 0x55, 0xa1, 0xa1, 0x97, 0x71, 0xaf, 0x9e, 0x83, + 0x15, 0x14, 0xb6, 0x01, 0xc3, 0x8e, 0x49, 0x11, 0x83, 0xe2, 0xac, 0x79, 0xdd, 0xa7, 0x52, 0x04, + 0xfe, 0x02, 0x59, 0xfa, 0xcf, 0xeb, 0xea, 0x54, 0xbd, 0x3a, 0x33, 0x10, 0xfc, 0x84, 0xfb, 0xe6, + 0xe6, 0x56, 0x1a, 0xc4, 0xf7, 0xac, 0xc6, 0x03, 0x46, 0x2b, 0xae, 0x3e, 0x7f, 0xbf, 0x6b, 0x13, + 0x50, 0x7a, 0x67, 0x2f, 0x20, 0xac, 0xdf, 0xa0, 0x81, 0x7c, 0xef, 0x17, 0x7e, 0x05, 0x41, 0xa0, + 0x46, 0x75, 0x65, 0x62, 0xef, 0xcc, 0x21, 0xbd, 0xb8, 0xc9, 0xee, 0x1a, 0xe1, 0x52, 0xe5, 0xdf, + 0x2e, 0x0e, 0x65, 0x94, 0xf1, 0x6b, 0x87, 0x75, 0x9d, 0x2c, 0xa0, 0xc0, 0xe9, 0x87, 0x90, 0x9c, + 0x8e, 0x9c, 0x63, 0xc8, 0x21, 0xc5, 0x1f, 0xf7, 0x60, 0xfa, 0x91, 0xe7, 0xee, 0xd3, 0xc7, 0x37, + 0x67, 0x5b, 0x58, 0xbc, 0xea, 0x77, 0x0a, 0xae, 0xcb, 0x65, 0x3b, 0xf5, 0x30, 0xe4, 0x4a, 0x3e, + 0x22, 0x30, 0xf0, 0x70, 0x98, 0x5b, 0x5b, 0xac, 0xa1, 0x7b, 0xed, 0x28, 0xa9, 0x79, 0xdb, 0x4a, + 0x83, 0x1f, 0x5f, 0x0a, 0x25, 0x79, 0x5d, 0x3b, 0x75, 0x79, 0x9d, 0xbf, 0x57, 0x0b, 0xf0, 0x26, + 0x9a, 0x10, 0xd4, 0xaf, 0xd8, 0x13, 0x66, 0x26, 0x23, 0x17, 0x01, 0x85, 0x19, 0x7d, 0xe4, 0xac, + 0x88, 0x44, 0x5a, 0x56, 0xab, 0x6d, 0x78, 0xeb, 0x99, 0x1f, 0x3e, 0x91, 0xe1, 0x54, 0xa6, 0x2f, + 0x6c, 0x2f, 0xb7, 0x76, 0xdc, 0xf6, 0x70, 0xf5, 0xa7, 0x6d, 0x9d, 0x82, 0x71, 0x48, 0x65, 0x3a, + 0x19, 0xd8, 0x29, 0xb8, 0xc7, 0x22, 0xcc, 0xbb, 0xbc, 0x1a, 0x0d, 0x6a, 0x3f, 0x1a, 0x48, 0x6c, + 0x27, 0x9f, 0x58, 0xf1, 0xbc, 0xa7, 0x4a, 0xc7, 0x72, 0xb0, 0xdd, 0x09, 0x01, 0x82, 0xfe, 0x8b, + 0x77, 0xfe, 0x3f, 0x3b, 0xde, 0x99, 0xbc, 0x0e, 0xb8, 0xbc, 0x23, 0x2d, 0xc2, 0xfc, 0x3b, 0x90, + 0x7d, 0x50, 0xb7, 0x22, 0xdb, 0xf4, 0xff, 0x2f, 0xde, 0x19, 0x23, 0x84, 0x77, 0xf5, 0x36, 0x95, + 0x4f, 0x12, 0x6a, 0x5d, 0x41, 0x4b, 0x00, 0x04, 0x10, 0x1e, 0x94, 0xc5, 0xff, 0x3e, 0xc2, 0x4c, + 0x74, 0x80, 0xc1, 0xef, 0x0b, 0x25, 0x49, 0xc5, 0xe3, 0x42, 0x20, 0x5e, 0x28, 0x2d, 0x9c, 0xb5, + 0x67, 0x76, 0x5e, 0x7e, 0xef, 0x73, 0x66, 0x43, 0x86, 0x91, 0x91, 0x5d, 0xd1, 0xfb, 0x7a, 0x01, + 0x4b, 0x76, 0xd4, 0x78, 0x77, 0x05, 0x60, 0xb7, 0x7d, 0x0f, 0xcd, 0x5a, 0xdc, 0xb9, 0xae, 0x33, + 0x04, 0xfa, 0xdf, 0xf2, 0xee, 0x73, 0x37, 0xaf, 0xec, 0x36, 0xe7, 0x65, 0x01, 0x02, 0xe3, 0xd7, + 0x03, 0x18, 0x35, 0x21, 0xfb, 0x25, 0x63, 0x45, 0x2b, 0x6a, 0x6f, 0x72, 0x55, 0x62, 0x4b, 0x7e, + 0x10, 0xc6, 0xa5, 0xbb, 0x89, 0xd3, 0xb7, 0xda, 0x5e, 0x59, 0x16, 0x41, 0x13, 0x6d, 0x50, 0x39, + 0xc1, 0x9b, 0xf0, 0x38, 0x3f, 0xc1, 0x44, 0xfa, 0x98, 0x32, 0xee, 0xc5, 0x45, 0x06, 0x9b, 0x7e, + 0x78, 0xa3, 0x6a, 0x47, 0xb7, 0xc8, 0xfa, 0xac, 0xf0, 0xf1, 0xa7, 0xd8, 0x17, 0x83, 0xa7, 0x34, + 0x00, 0x3d, 0x7f, 0xc2, 0x83, 0xf9, 0x45, 0xbe, 0xc5, 0xe7, 0x20, 0x14, 0xe8, 0xc3, 0x23, 0xfd, + 0x88, 0xc3, 0x2c, 0xc4, 0xaa, 0x0e, 0xbb, 0x61, 0x8c, 0x75, 0x23, 0xb7, 0x52, 0x67, 0xe5, 0x67, + 0xe2, 0x69, 0x25, 0x2c, 0x19, 0xa0, 0xfe, 0xec, 0x1c, 0x97, 0x42, 0x01, 0xf7, 0xd9, 0x02, 0x38, + 0xfa, 0x60, 0x0f, 0xe5, 0x46, 0x81, 0x9e, 0x8f, 0x63, 0x57, 0x8c, 0xaf, 0x8d, 0x4f, 0x0c, 0xc6, + 0xd9, 0xb7, 0xf4, 0x67, 0x60, 0x24, 0xbf, 0x01, 0x36, 0x74, 0x63, 0xc4, 0xba, 0xef, 0x7a, 0xde, + 0x2f, 0x32, 0x49, 0x88, 0x54, 0xa2, 0xdd, 0xd5, 0xae, 0xf1, 0xca, 0x71, 0x2e, 0x32, 0xe0, 0x94, + 0x22, 0x0a, 0xd1, 0xea, 0xad, 0x73, 0x82, 0xc2, 0xfa, 0x33, 0xd8, 0x23, 0xfb, 0x88, 0x8c, 0x25, + 0xe6, 0x30, 0x66, 0x1b, 0x27, 0xc1, 0xc7, 0xa9, 0x0c, 0x72, 0x9c, 0xad, 0xe3, 0xf9, 0x4c, 0x4f, + 0xb2, 0xaf, 0x42, 0x36, 0xb4, 0xf7, 0xc3, 0xc5, 0x41, 0x45, 0xba, 0x67, 0xaf, 0x58, 0x64, 0x4f, + 0x39, 0x81, 0x40, 0x4b, 0xaf, 0x9b, 0xc6, 0xe3, 0xc0, 0xfb, 0x6b, 0x42, 0x89, 0x87, 0x95, 0x23, + 0x38, 0x79, 0x59, 0x6d, 0xfc, 0x50, 0xa3, 0xd8, 0x17, 0xae, 0x40, 0x74, 0x8b, 0xf1, 0xcd, 0x76, + 0x82, 0x1a, 0xb5, 0x2c, 0x0a, 0x62, 0x35, 0xa3, 0x77, 0x2c, 0xab, 0xe7, 0x4f, 0x4a, 0xae, 0x22, + 0xbb, 0xfb, 0x9b, 0xcf, 0x2d, 0xc8, 0x03, 0xf6, 0x54, 0xb8, 0xc8, 0x13, 0x8f, 0xf0, 0x48, 0x91, + 0xe2, 0xb9, 0xc6, 0x8d, 0x1c, 0x5d, 0xb2, 0x4b, 0xb9, 0x29, 0x35, 0xd7, 0xf6, 0xb1, 0x39, 0x7b, + 0xa1, 0x49, 0x87, 0x63, 0xa7, 0xe2, 0x26, 0xf7, 0xe1, 0xb0, 0xb3, 0xec, 0x8f, 0x34, 0x1a, 0x4b, + 0x0b, 0x4d, 0x2c, 0x6b, 0x8d, 0x50, 0x86, 0x2a, 0xe7, 0x21, 0x7e, 0x64, 0xae, 0x3e, 0xae, 0xa9, + 0x89, 0xed, 0x59, 0x0b, 0x6e, 0x90, 0x77, 0x9b, 0x88, 0xbb, 0xbb, 0x5a, 0x66, 0xf9, 0x58, 0xbf, + 0x44, 0xff, 0x64, 0x7a, 0xb2, 0xe0, 0xc0, 0x9f, 0x4b, 0xae, 0x3a, 0x85, 0xce, 0x17, 0x18, 0xd1, + 0x7b, 0x81, 0x4e, 0xb9, 0xd1, 0x1c, 0x79, 0x57, 0x38, 0x6f, 0x12, 0x0a, 0xde, 0xe2, 0x22, 0x70, + 0x6c, 0xfc, 0x2a, 0xe9, 0x3a, 0x58, 0x52, 0xd0, 0x0b, 0xbe, 0x39, 0x59, 0xf2, 0x93, 0x40, 0x9a, + 0xf7, 0xf2, 0x8a, 0x2b, 0x04, 0x21, 0x32, 0x5e, 0x51, 0x3c, 0x6a, 0xbe, 0x0b, 0xaf, 0x33, 0x6a, + 0x85, 0x0b, 0xa3, 0xc7, 0xb1, 0x76, 0x91, 0xca, 0xf8, 0xdf, 0x54, 0xb4, 0xf3, 0xbd, 0x9e, 0x24, + 0x62, 0x31, 0x03, 0x33, 0x07, 0x1a, 0x27, 0x76, 0xb2, 0x8f, 0x78, 0xb6, 0x7e, 0x11, 0x5e, 0x51, + 0xa5, 0x6b, 0xb8, 0xba, 0x0c, 0xd0, 0xa8, 0x78, 0xc8, 0x7e, 0x21, 0xc6, 0x9d, 0xcb, 0x86, 0x77, + 0xb5, 0x8b, 0xab, 0x68, 0xc6, 0x98, 0x20, 0x31, 0x01, 0x46, 0x4d, 0xaf, 0x36, 0x99, 0x57, 0x4b, + 0x0f, 0xfd, 0x84, 0x29, 0x55, 0xbb, 0x06, 0xe8, 0x08, 0x78, 0xfc, 0xc7, 0xb6, 0x84, 0x99, 0x50, + 0xb6, 0xde, 0xe7, 0x6a, 0x2e, 0xdd, 0x12, 0xd4, 0xec, 0xef, 0x4e, 0x6d, 0x46, 0xfb, 0x2b, 0xc2, + 0x0b, 0x91, 0x95, 0xc5, 0x3f, 0xe2, 0x1c, 0xa7, 0x3c, 0xe2, 0x9a, 0x05, 0x6c, 0x64, 0x06, 0x52, + 0xb8, 0xcc, 0x7c, 0xf9, 0x4f, 0xa4, 0xdc, 0x13, 0x7f, 0x8a, 0x7e, 0xf4, 0xf2, 0x05, 0xc3, 0x6c, + 0xd4, 0xd8, 0x26, 0xfb, 0xb6, 0x18, 0x92, 0xa4, 0xc5, 0x3b, 0xef, 0xb3, 0xab, 0xa8, 0x8f, 0xdd, + 0x62, 0xb6, 0x41, 0x34, 0x97, 0x34, 0xc1, 0xc8, 0x6b, 0x1b, 0x17, 0x6a, 0xdf, 0xb4, 0x86, 0xc7, + 0x80, 0x4a, 0x24, 0xec, 0x65, 0x2e, 0x61, 0xa8, 0x15, 0xef, 0x5c, 0x41, 0x58, 0x83, 0xa4, 0x23, + 0x9d, 0xcd, 0x86, 0xef, 0x48, 0x1b, 0x5c, 0x12, 0xe1, 0x4d, 0x17, 0x67, 0xcb, 0xf6, 0xf9, 0xcb, + 0xb3, 0x9a, 0x2a, 0x37, 0x11, 0xbd, 0x53, 0xd2, 0x0b, 0xd5, 0x1e, 0x3b, 0x41, 0x6d, 0x73, 0x9c, + 0x1b, 0xf7, 0xd0, 0xff, 0xc8, 0x2a, 0x25, 0xac, 0x42, 0x35, 0xb0, 0x1a, 0x35, 0x74, 0x76, 0xc1, + 0xb5, 0x30, 0x52, 0xa4, 0x66, 0x97, 0xe2, 0x3b, 0xd3, 0xeb, 0xf8, 0xe9, 0xec, 0x1d, 0xaa, 0x5d, + 0xb8, 0xac, 0x6b, 0x47, 0xa3, 0x0b, 0xe1, 0x2a, 0x08, 0x2b, 0x62, 0x22, 0xd3, 0x39, 0x63, 0x89, + 0x89, 0xe1, 0xa8, 0xba, 0x08, 0x31, 0xf1, 0xff, 0x71, 0xdc, 0x2b, 0x82, 0xda, 0xe6, 0x21, 0x81, + 0x8a, 0xc6, 0x9b, 0xf0, 0x0d, 0x7d, 0x82, 0xec, 0x40, 0x32, 0x24, 0xd9, 0xe2, 0xfc, 0xe6, 0x61, + 0x64, 0xfe, 0xf6, 0xf9, 0x9c, 0x1c, 0xdb, 0xe9, 0x67, 0x8e, 0x8d, 0x3e, 0xe8, 0xeb, 0xae, 0x64, + 0xa8, 0xd9, 0x0a, 0x94, 0x6b, 0x16, 0xb0, 0x83, 0xa2, 0xe1, 0x25, 0x23, 0xd7, 0x90, 0x8f, 0x65, + 0xfa, 0x3c, 0xc7, 0x73, 0xd4, 0xf0, 0xf3, 0xb8, 0xe6, 0xfa, 0xea, 0xba, 0x98, 0x6f, 0x44, 0x2a, + 0xd0, 0xf3, 0x7d, 0xc5, 0xa2, 0xe3, 0xdb, 0x88, 0xe7, 0x86, 0x28, 0xea, 0x65, 0xde, 0x18, 0x59, + 0x7f, 0xf9, 0xbe, 0x30, 0x26, 0xa3, 0x79, 0x75, 0x07, 0xc8, 0x63, 0x03, 0xbf, 0x24, 0xf6, 0x11, + 0x1a, 0x62, 0x33, 0xd5, 0x97, 0xa1, 0x33, 0xf8, 0x69, 0x66, 0xb6, 0x4f, 0x20, 0xb3, 0xc2, 0xae, + 0xb6, 0xc7, 0x12, 0x10, 0x70, 0xf2, 0x32, 0x51, 0xa8, 0x55, 0xc5, 0x00, 0x13, 0x7e, 0xbe, 0xfb, + 0x86, 0xf5, 0x1a, 0x14, 0x8e, 0x7f, 0xb7, 0x69, 0x6d, 0xf4, 0x75, 0xe0, 0x39, 0x95, 0x24, 0xb3, + 0xd8, 0xad, 0xcb, 0xbe, 0xbc, 0xb8, 0xa2, 0x2a, 0xd7, 0x71, 0x8f, 0x69, 0x62, 0x49, 0x93, 0x38, + 0x03, 0x0d, 0x94, 0x7d, 0x97, 0x97, 0x4a, 0x1a, 0xa9, 0x4c, 0x00, 0xb3, 0x19, 0xeb, 0x79, 0x4b, + 0x8f, 0x3d, 0x5a, 0x15, 0x34, 0x68, 0xa0, 0xb2, 0x07, 0xf5, 0xdb, 0x9c, 0xde, 0x47, 0x76, 0xf8, + 0xa3, 0xe2, 0x52, 0x2d, 0x4b, 0xad, 0x54, 0xea, 0xd2, 0x35, 0x03, 0xc7, 0x21, 0x41, 0x2b, 0xaa, + 0xbe, 0xb2, 0x36, 0xfb, 0x86, 0x0c, 0x10, 0xdc, 0x0b, 0xc0, 0x80, 0xe9, 0xf5, 0x30, 0x25, 0x8e, + 0xd3, 0xfa, 0x80, 0x8c, 0x77, 0x1e, 0x6c, 0xa5, 0xcd, 0x72, 0x87, 0x2c, 0x40, 0x54, 0xe4, 0x26, + 0x66, 0x9e, 0x8c, 0x5b, 0x45, 0x68, 0x91, 0x3e, 0x72, 0xe3, 0x23, 0x1b, 0x11, 0xcd, 0xab, 0xaf, + 0xb9, 0x50, 0x73, 0x98, 0x55, 0x40, 0x2d, 0xd5, 0xe1, 0xaa, 0x31, 0x83, 0x51, 0x71, 0x16, 0x0b, + 0x68, 0x45, 0x39, 0xfb, 0x3a, 0xc9, 0x65, 0xbd, 0x34, 0x26, 0x62, 0xbd, 0x32, 0x9e, 0x8d, 0xa7, + 0xb0, 0xed, 0x94, 0x0b, 0x85, 0x9d, 0xb5, 0xac, 0xfb, 0x27, 0x89, 0x37, 0xa9, 0x81, 0xb2, 0x3b, + 0x87, 0x41, 0x0d, 0x1d, 0x4e, 0xf2, 0x92, 0x3d, 0x25, 0x46, 0x93, 0xb0, 0x3a, 0xe2, 0x17, 0x4f, + 0x1d, 0xed, 0x08, 0xd5, 0x49, 0x7f, 0x68, 0xe7, 0x64, 0x5f, 0x06, 0x0b, 0x6c, 0x63, 0x53, 0x3d, + 0x33, 0x4e, 0x9c, 0x9a, 0xd4, 0x51, 0x53, 0x11, 0x38, 0x39, 0xe9, 0x48, 0x6a, 0xc5, 0x3b, 0x5a, + 0xee, 0xf6, 0xd8, 0x0e, 0x06, 0x55, 0xd8, 0xaa, 0xf5, 0xae, 0x8e, 0x27, 0xe7, 0xe9, 0xf2, 0x73, + 0xae, 0x2a, 0x63, 0x17, 0x87, 0x3b, 0xe7, 0x4d, 0x2f, 0x8f, 0x13, 0xe0, 0x91, 0x08, 0x1b, 0x55, + 0x09, 0x2d, 0xc7, 0xa1, 0xc3, 0x79, 0x4a, 0xfc, 0x3d, 0xd8, 0xc1, 0x7d, 0x48, 0x4b, 0x63, 0xf1, + 0xfe, 0x91, 0x54, 0xf1, 0x36, 0xbd, 0xba, 0xbf, 0xbe, 0xce, 0xbc, 0x70, 0x0a, 0x23, 0x67, 0x41, + 0x5e, 0xf2, 0x72, 0x47, 0x89, 0xee, 0x2f, 0x36, 0xa2, 0x6d, 0x8d, 0xf8, 0x8a, 0x4f, 0x5f, 0x43, + 0x3b, 0x64, 0xbe, 0x99, 0x5b, 0x45, 0x67, 0x47, 0x82, 0x9d, 0xff, 0x49, 0xe8, 0xca, 0x60, 0xbf, + 0xca, 0xd7, 0x02, 0xfb, 0xbe, 0x9b, 0x50, 0x8e, 0xfa, 0x86, 0xff, 0x7b, 0x24, 0x90, 0x6e, 0x49, + 0x8a, 0x07, 0x85, 0x89, 0xa9, 0x55, 0x6a, 0xf8, 0x8b, 0x6d, 0x8a, 0x03, 0xab, 0x81, 0x87, 0x1f, + 0xc3, 0x12, 0x35, 0x69, 0xa7, 0x43, 0xff, 0x0b, 0xde, 0x8c, 0x51, 0x95, 0xf2, 0x6a, 0xc4, 0x67, + 0x2f, 0x99, 0x07, 0x2e, 0x9d, 0x10, 0x35, 0xc8, 0xb0, 0x59, 0x4c, 0x7d, 0x2b, 0x61, 0x47, 0xa4, + 0x7f, 0x51, 0x3d, 0xe5, 0x14, 0x61, 0xb2, 0x6a, 0xf3, 0x96, 0x2f, 0xd6, 0x6c, 0xb7, 0xb2, 0xcc, + 0x9a, 0xf4, 0xc2, 0xa1, 0x20, 0x52, 0xa9, 0xf6, 0xaf, 0xaa, 0x08, 0xe0, 0x1c, 0xce, 0xb5, 0x1f, + 0x9d, 0x98, 0x5a, 0x0d, 0xe9, 0x38, 0x05, 0x86, 0x57, 0x92, 0x78, 0xfa, 0xfe, 0xaf, 0x8f, 0x77, + 0x97, 0x49, 0xca, 0x53, 0xe0, 0x81, 0xe9, 0x44, 0x23, 0xd3, 0xfb, 0xcc, 0x71, 0xf2, 0x0c, 0x2c, + 0xc4, 0x8d, 0x8e, 0x66, 0xa3, 0x84, 0x92, 0x2f, 0x26, 0xf8, 0x18, 0xd2, 0x98, 0x84, 0xb1, 0xff, + 0x07, 0xf4, 0x3d, 0xe5, 0x0a, 0xe3, 0xd4, 0x14, 0x27, 0x15, 0x9f, 0x81, 0x54, 0x99, 0xf2, 0x26, + 0xf2, 0x59, 0xe3, 0xce, 0x39, 0xee, 0xb2, 0xc8, 0x70, 0xd4, 0xab, 0xc8, 0xd8, 0x67, 0x46, 0xdb, + 0x93, 0x71, 0x01, 0xe2, 0xef, 0x68, 0xeb, 0xec, 0x21, 0x24, 0x3d, 0xce, 0x38, 0xdf, 0xaa, 0xf8, + 0x0c, 0xd3, 0x5a, 0x08, 0x58, 0x60, 0xb1, 0xed, 0x26, 0xb5, 0x19, 0x8f, 0x26, 0x0c, 0x6d, 0x1e, + 0x9f, 0x91, 0xfd, 0xa4, 0xa8, 0xec, 0xa4, 0x44, 0x53, 0x1e, 0xf8, 0x63, 0xcc, 0x80, 0xa3, 0x99, + 0x54, 0xe5, 0xe8, 0xb3, 0x55, 0xf6, 0x02, 0xa0, 0x72, 0xc7, 0xfc, 0xe3, 0x3d, 0x1d, 0x70, 0x9a, + 0x50, 0x0c, 0x28, 0x6f, 0x20, 0xe0, 0x4e, 0x5e, 0xcc, 0xdb, 0x73, 0x29, 0x10, 0xeb, 0x80, 0x82, + 0x49, 0xa5, 0xbb, 0x5d, 0x49, 0xeb, 0x1f, 0xee, 0xa3, 0xc8, 0x08, 0xc1, 0x6a, 0x9d, 0xd1, 0x13, + 0xc9, 0xd3, 0x76, 0x24, 0x6d, 0x34, 0x43, 0x43, 0x16, 0x56, 0x4e, 0x61, 0x67, 0xe8, 0x07, 0xb1, + 0x15, 0x10, 0x9e, 0x52, 0x21, 0xa9, 0xdc, 0x84, 0xbf, 0xb9, 0x13, 0xca, 0xfd, 0x61, 0x41, 0x1d, + 0x31, 0xe0, 0x1d, 0x3e, 0x31, 0x4c, 0xf8, 0x2d, 0x77, 0x63, 0x44, 0x7a, 0x3a, 0xd5, 0x38, 0xab, + 0x9b, 0x54, 0x0e, 0x2b, 0xbf, 0x4d, 0x9e, 0x88, 0x99, 0xb2, 0x59, 0xa3, 0xdf, 0xab, 0x10, 0x5d, + 0x89, 0x21, 0xb4, 0xe6, 0x22, 0x3e, 0x99, 0xf5, 0x10, 0x96, 0xaf, 0x10, 0x37, 0x1f, 0x3b, 0xa6, + 0x9e, 0x61, 0xe9, 0x2b, 0x3a, 0xaa, 0x78, 0x37, 0xb1, 0x1d, 0xa7, 0x25, 0x64, 0xa1, 0xf6, 0x69, + 0x12, 0xd9, 0xb5, 0x2d, 0x97, 0x9f, 0x34, 0xa9, 0xcd, 0x4c, 0x64, 0x63, 0xc2, 0x21, 0x96, 0x98, + 0xf9, 0x82, 0xb7, 0x97, 0x08, 0xcb, 0xca, 0x93, 0x3c, 0x15, 0x53, 0x05, 0x6f, 0x80, 0xc2, 0x1f, + 0x7f, 0x04, 0x86, 0x7f, 0xd6, 0xe8, 0x35, 0xfc, 0x0b, 0xdd, 0x44, 0x76, 0x34, 0xa4, 0x6c, 0xc2, + 0x3a, 0xc6, 0x13, 0x36, 0x24, 0xbd, 0x77, 0xfd, 0x12, 0x28, 0x5a, 0xdc, 0x6c, 0x55, 0x0b, 0x69, + 0xfc, 0xa4, 0x44, 0x51, 0x98, 0x81, 0x3b, 0x80, 0x75, 0xe9, 0xae, 0xdc, 0xe5, 0x82, 0xef, 0x25, + 0x5c, 0x23, 0x93, 0x4d, 0xd1, 0x6c, 0xbb, 0x7b, 0x0c, 0x07, 0x2a, 0xc4, 0x0b, 0xfd, 0xe0, 0x4f, + 0x1b, 0x33, 0x3f, 0x28, 0xb5, 0x09, 0x53, 0x2e, 0x16, 0xb0, 0x4c, 0xfa, 0xb3, 0x2f, 0x8f, 0x47, + 0xf4, 0xd0, 0x0f, 0xc1, 0x91, 0x7f, 0x69, 0xed, 0xa9, 0xf2, 0x24, 0x18, 0x24, 0xbb, 0x46, 0x52, + 0x21, 0x1e, 0x79, 0x1e, 0x75, 0x09, 0xc9, 0x2d, 0x15, 0xab, 0xf3, 0x15, 0x94, 0xc2, 0x2d, 0xcb, + 0xa2, 0x1c, 0xcf, 0xbd, 0x2b, 0x3a, 0x9f, 0x95, 0xf5, 0x50, 0xda, 0xd6, 0x55, 0x2f, 0x22, 0xc2, + 0xda, 0x90, 0x73, 0x63, 0xd6, 0xbc, 0x99, 0xc1, 0x74, 0x20, 0xa0, 0xfe, 0xf7, 0x3e, 0x46, 0xd5, + 0xf3, 0xee, 0x8a, 0x58, 0x12, 0xef, 0x58, 0x30, 0xa2, 0xe8, 0x72, 0xea, 0x23, 0xeb, 0xa5, 0x59, + 0x7e, 0xe3, 0xf5, 0xe0, 0x22, 0x97, 0x71, 0xfd, 0x58, 0x0e, 0xbb, 0x77, 0x1a, 0x27, 0x6b, 0xc9, + 0xa7, 0xa3, 0x95, 0x73, 0x43, 0x6f, 0xe5, 0x9c, 0x09, 0x9f, 0x17, 0x46, 0xcf, 0xd7, 0xc1, 0xb2, + 0x91, 0x65, 0x39, 0xe6, 0xbb, 0x39, 0xf4, 0xce, 0x35, 0x13, 0x56, 0x8a, 0x65, 0xbc, 0x76, 0x28, + 0x84, 0x59, 0x94, 0x37, 0xbf, 0x35, 0x8d, 0x21, 0xf1, 0x31, 0xfe, 0xbc, 0xa9, 0x95, 0xae, 0xde, + 0xe2, 0xa0, 0x26, 0xbc, 0x7e, 0xcb, 0xd9, 0xee, 0x3b, 0xb5, 0xca, 0xad, 0x06, 0xbb, 0x5e, 0xc2, + 0xd9, 0x1b, 0x24, 0x53, 0x58, 0x7c, 0x59, 0xf4, 0x30, 0x14, 0x70, 0xd2, 0x58, 0x9a, 0x15, 0xa0, + 0xeb, 0x83, 0x6b, 0x36, 0x6f, 0x5f, 0x63, 0xec, 0xf5, 0xed, 0x49, 0x91, 0x22, 0xcf, 0xc9, 0x83, + 0x4a, 0x0c, 0xb0, 0xdc, 0x30, 0x98, 0x82, 0x4b, 0x2c, 0x1e, 0xa2, 0xb8, 0x19, 0xb1, 0xf6, 0x39, + 0x51, 0xde, 0x88, 0xf4, 0x22, 0xea, 0x87, 0x74, 0x24, 0xe0, 0x58, 0x1a, 0x15, 0xe8, 0xae, 0xc8, + 0x1d, 0x52, 0x73, 0xdf, 0xfc, 0x57, 0xd6, 0xb0, 0x45, 0x93, 0x21, 0x5b, 0x55, 0x7e, 0x1c, 0xdc, + 0x53, 0x22, 0x74, 0xbe, 0xf2, 0xc8, 0xaf, 0x5c, 0xd2, 0x23, 0x37, 0xc3, 0x45, 0x85, 0x04, 0xfe, + 0x7a, 0x24, 0x2c, 0x07, 0x8b, 0x1b, 0xd3, 0x52, 0x9c, 0xf6, 0x82, 0x2b, 0xa7, 0x9c, 0x59, 0x29, + 0x36, 0x51, 0x83, 0x00, 0x10, 0x3c, 0x88, 0x47, 0xb6, 0x2d, 0x4d, 0x56, 0x37, 0x7a, 0x3d, 0x16, + 0x8b, 0x0c, 0x2d, 0xe9, 0xe2, 0x2d, 0x87, 0x7e, 0xb0, 0x34, 0x07, 0x8d, 0xea, 0x48, 0x87, 0x27, + 0xea, 0x2f, 0x1f, 0x3c, 0xb7, 0x79, 0xdc, 0x53, 0xd6, 0xd9, 0x79, 0xa8, 0x80, 0x83, 0xef, 0x44, + 0xc5, 0x5d, 0xa7, 0x9b, 0x43, 0xfc, 0x0b, 0x32, 0xc6, 0xc2, 0x21, 0x88, 0x63, 0x20, 0x09, 0x32, + 0x60, 0xc1, 0xb7, 0xcb, 0x76, 0x8b, 0xe5, 0x50, 0xb7, 0xe5, 0x30, 0xea, 0x6f, 0x51, 0x09, 0xa8, + 0x56, 0x64, 0xbc, 0xe8, 0xf1, 0x5c, 0xaf, 0xb4, 0x17, 0x62, 0xcd, 0x8b, 0xbb, 0xb6, 0xb6, 0x6f, + 0x57, 0x12, 0x33, 0xde, 0xf7, 0xd7, 0x8e, 0x73, 0x7d, 0xb0, 0x48, 0xf1, 0xad, 0xc3, 0xf3, 0xc2, + 0xc3, 0x99, 0x3d, 0xa3, 0xe2, 0x08, 0x1a, 0x05, 0xda, 0x51, 0x05, 0xbf, 0x24, 0x03, 0xce, 0x82, + 0xbf, 0x98, 0xa7, 0xc7, 0x16, 0x58, 0x1c, 0xcf, 0xfa, 0xb5, 0x7d, 0x2a, 0x2b, 0x54, 0xea, 0x85, + 0x3d, 0x9b, 0x14, 0x41, 0x52, 0xec, 0x15, 0xf0, 0x93, 0x45, 0x7a, 0xd5, 0x91, 0x3a, 0x74, 0xed, + 0xa3, 0x15, 0xfd, 0x5f, 0x88, 0xcc, 0x10, 0x4d, 0x12, 0x85, 0x57, 0xb1, 0x36, 0x67, 0xa3, 0xdc, + 0xaf, 0x28, 0xea, 0x17, 0x24, 0xe4, 0xf2, 0x94, 0x12, 0xfc, 0x1c, 0x4e, 0x07, 0x3f, 0x0c, 0xd1, + 0x3f, 0x69, 0x8d, 0x63, 0x00, 0x5e, 0xf8, 0xf3, 0xf3, 0xfd, 0x26, 0x7a, 0x0c, 0x2f, 0x8c, 0x7f, + 0x20, 0x49, 0xaf, 0x4b, 0x1b, 0xbe, 0xe7, 0xfa, 0xed, 0x01, 0xe5, 0x08, 0xc6, 0x8a, 0x71, 0xd0, + 0xd0, 0x8b, 0xf7, 0x33, 0xf2, 0x75, 0x3c, 0x12, 0xa7, 0xd4, 0x6d, 0x68, 0x5e, 0x50, 0x94, 0x6e, + 0x3c, 0x8b, 0x92, 0x0d, 0xf2, 0xb3, 0x33, 0x1a, 0x9a, 0x29, 0x66, 0xa6, 0x60, 0x31, 0x49, 0xfe, + 0x37, 0x1e, 0x14, 0x30, 0xaf, 0x57, 0x27, 0x56, 0xf6, 0x60, 0x30, 0x24, 0x7e, 0x7d, 0x4a, 0x4a, + 0x04, 0x72, 0xdd, 0xc0, 0xb4, 0x01, 0xdb, 0xf6, 0xd9, 0x41, 0x29, 0x92, 0x6f, 0xe5, 0x60, 0x36, + 0x53, 0x7c, 0x2e, 0x1f, 0xe0, 0x67, 0x08, 0xe8, 0x85, 0x2d, 0x14, 0x1a, 0xb8, 0xb9, 0xea, 0x3f, + 0x61, 0x5e, 0xdf, 0x7a, 0x7d, 0x80, 0xfe, 0x4e, 0xae, 0xf1, 0x6c, 0x1e, 0xef, 0x40, 0x5c, 0x8c, + 0x94, 0x33, 0xf0, 0x93, 0x2b, 0x30, 0x9f, 0xe6, 0x44, 0x81, 0xd3, 0x14, 0xa2, 0x37, 0x48, 0x4e, + 0x95, 0xd0, 0x9b, 0x54, 0x58, 0x6f, 0x93, 0x4e, 0x28, 0x43, 0x43, 0x99, 0x8e, 0x1c, 0x1d, 0x05, + 0xf2, 0xf2, 0x67, 0xed, 0x03, 0x4a, 0x57, 0xb6, 0xc4, 0x71, 0x5c, 0xff, 0x14, 0xcb, 0xc5, 0x5a, + 0x62, 0x38, 0xc0, 0xbe, 0x09, 0x4c, 0x71, 0x05, 0xad, 0x58, 0x4d, 0x16, 0x73, 0x4c, 0x78, 0x40, + 0x2c, 0xf0, 0x6f, 0x58, 0x19, 0x7f, 0x43, 0xd0, 0xcf, 0xf1, 0xa5, 0x24, 0x2e, 0xcf, 0xe6, 0x39, + 0xff, 0x8f, 0xcf, 0x13, 0xc0, 0x4e, 0x84, 0x0a, 0x52, 0x73, 0x09, 0x94, 0x97, 0xf6, 0xbc, 0xc3, + 0x0e, 0x13, 0x52, 0x6c, 0x0a, 0xa8, 0x9c, 0x89, 0x96, 0x6d, 0x56, 0x24, 0x87, 0x4d, 0x3c, 0x57, + 0x7b, 0x81, 0x4d, 0xfe, 0x81, 0x27, 0xdd, 0xc9, 0xde, 0xe4, 0x62, 0x98, 0x06, 0x54, 0x7f, 0x7d, + 0x8b, 0x6f, 0xcc, 0x83, 0x22, 0x84, 0x99, 0x98, 0xfc, 0x06, 0x9a, 0x07, 0x04, 0xe0, 0xa2, 0xfd, + 0xeb, 0x7b, 0x10, 0x14, 0x5d, 0x16, 0x5d, 0x6f, 0x5c, 0x5f, 0xdb, 0xa6, 0x0d, 0x29, 0x56, 0x0f, + 0x37, 0xea, 0xc7, 0xb8, 0x5e, 0x6c, 0x7a, 0x23, 0xf7, 0x5d, 0x38, 0xda, 0x27, 0x80, 0x0e, 0x9e, + 0xff, 0xe7, 0x4d, 0x05, 0x69, 0x14, 0x65, 0x34, 0x82, 0xc0, 0xfa, 0x6d, 0x9b, 0x19, 0x2a, 0x84, + 0xbd, 0xdc, 0xed, 0x57, 0x1b, 0x75, 0x71, 0x5e, 0xc8, 0xbe, 0x39, 0x33, 0xbf, 0x3e, 0x4f, 0x21, + 0x47, 0x6f, 0x7f, 0xae, 0x35, 0xaf, 0x68, 0xa6, 0xb7, 0x61, 0x93, 0x67, 0x3d, 0x79, 0x38, 0x68, + 0xc1, 0xb0, 0x3b, 0xb4, 0xa4, 0x0f, 0xed, 0xaa, 0xfa, 0xcc, 0x1d, 0xc7, 0x03, 0x84, 0x7b, 0x7d, + 0x6c, 0x69, 0x4a, 0xcf, 0xef, 0x79, 0x22, 0x6d, 0x3b, 0xe9, 0xe1, 0xb4, 0xce, 0xa1, 0xe3, 0x09, + 0x73, 0x38, 0x54, 0x42, 0x90, 0x95, 0xbc, 0x56, 0x61, 0x5c, 0x91, 0xb8, 0x60, 0xee, 0x0c, 0x4d, + 0x02, 0x97, 0x68, 0x60, 0x8e, 0x4a, 0xa3, 0xbc, 0xfa, 0x26, 0x75, 0xd3, 0x9e, 0x4a, 0x2d, 0x3b, + 0x93, 0xb1, 0x9e, 0xee, 0x4b, 0xdc, 0x25, 0x4b, 0x3f, 0xb2, 0xd0, 0xbe, 0x70, 0xfe, 0xda, 0x1e, + 0x15, 0x5f, 0x0a, 0x6d, 0x3c, 0xef, 0x88, 0x24, 0xfe, 0xd4, 0xb8, 0xce, 0x7c, 0x0a, 0x53, 0x9a, + 0x47, 0x16, 0x14, 0x48, 0x5a, 0xcd, 0x3b, 0xaa, 0x35, 0x61, 0x05, 0x83, 0x80, 0xe7, 0x43, 0x76, + 0xc6, 0xf5, 0xc0, 0xb2, 0x56, 0xba, 0x24, 0xa9, 0x9c, 0x10, 0xf4, 0x0b, 0x9a, 0x2e, 0xde, 0x77, + 0xd9, 0x80, 0x8b, 0x14, 0x3c, 0x77, 0xd6, 0x41, 0x60, 0x72, 0x12, 0xe6, 0x99, 0x5c, 0x39, 0xb6, + 0x7c, 0x80, 0x96, 0x2e, 0xa6, 0x76, 0xa4, 0x55, 0x25, 0xb3, 0x63, 0xf5, 0x1e, 0xe8, 0xc9, 0xeb, + 0xaf, 0xfa, 0x59, 0xbb, 0xdb, 0x39, 0x5c, 0x97, 0x1d, 0x05, 0x17, 0x7e, 0xcf, 0xdd, 0xd4, 0x74, + 0xb6, 0xfa, 0x3b, 0x6f, 0x92, 0x2b, 0x66, 0x43, 0xe7, 0x0a, 0x97, 0x75, 0x8d, 0x70, 0x5b, 0x7d, + 0xee, 0x9d, 0x1b, 0x99, 0x53, 0x39, 0x97, 0xdd, 0x05, 0x02, 0x21, 0x74, 0x22, 0x3e, 0xd4, 0xe5, + 0x2a, 0x89, 0x30, 0x5d, 0xc9, 0x2e, 0x5c, 0x4f, 0x04, 0x65, 0x5a, 0x14, 0x63, 0x94, 0xc5, 0x10, + 0x3c, 0xaf, 0xac, 0x63, 0x90, 0x5b, 0x53, 0x18, 0xde, 0x4b, 0x97, 0x11, 0x43, 0x88, 0xa8, 0x73, + 0xae, 0x65, 0xee, 0x1c, 0x02, 0x01, 0x24, 0x46, 0x7b, 0xba, 0x3d, 0x52, 0xc5, 0xe8, 0x22, 0x2b, + 0xa8, 0xfd, 0xb9, 0xb8, 0x4d, 0x62, 0xa4, 0xc9, 0xea, 0xa7, 0xec, 0x5e, 0xac, 0xd6, 0xb3, 0x3e, + 0x5c, 0xc7, 0x6e, 0x4e, 0x87, 0xb9, 0xa8, 0x1c, 0x26, 0x8a, 0x92, 0x28, 0x77, 0x7c, 0xfd, 0x3e, + 0xb2, 0x0b, 0xff, 0x0f, 0x69, 0x10, 0xcc, 0x1f, 0x6a, 0x7d, 0x26, 0xee, 0x19, 0x29, 0x3d, 0xac, + 0xcc, 0xab, 0x3f, 0x73, 0x9a, 0x3c, 0x8c, 0x49, 0xce, 0xe8, 0xb5, 0x25, 0x8c, 0xf9, 0xcd, 0x2c, + 0xf0, 0x91, 0x96, 0xbe, 0x3c, 0x17, 0x6e, 0xb3, 0x2a, 0xa7, 0x19, 0xe8, 0x1d, 0xe1, 0x06, 0xff, + 0x2d, 0x6c, 0x52, 0xff, 0x7b, 0x3f, 0xb3, 0xb6, 0x2c, 0x02, 0xf4, 0x0a, 0xa7, 0x31, 0xc0, 0xdf, + 0x7c, 0x79, 0x5a, 0x8a, 0x98, 0xc6, 0x8e, 0xff, 0x1d, 0x9f, 0x52, 0x11, 0x90, 0x67, 0x75, 0xf5, + 0xf7, 0x41, 0x08, 0x3f, 0xd7, 0x23, 0x35, 0x57, 0xb3, 0x21, 0x4d, 0x93, 0x9e, 0x6b, 0x97, 0x07, + 0xd4, 0xa8, 0x9e, 0x25, 0xfe, 0x41, 0x37, 0xcf, 0x85, 0x5e, 0x7b, 0x83, 0x0e, 0xbd, 0x69, 0x67, + 0x73, 0x9d, 0xd2, 0xf0, 0x05, 0x7a, 0xc8, 0x04, 0xf4, 0x35, 0xb5, 0x32, 0x0e, 0xba, 0x0a, 0x83, + 0xf7, 0x29, 0x75, 0x9b, 0x77, 0x30, 0x0e, 0x5f, 0x2f, 0xdc, 0xcc, 0xc0, 0xc8, 0x4a, 0xd2, 0xf3, + 0x68, 0x23, 0xdd, 0x88, 0x1a, 0x2e, 0x9c, 0x65, 0x80, 0x3f, 0x01, 0x51, 0x3a, 0x98, 0xd1, 0xa2, + 0x38, 0xc7, 0x8c, 0x50, 0x19, 0xf7, 0xb6, 0x43, 0x37, 0xa7, 0xc5, 0x6a, 0x19, 0x69, 0xad, 0x0b, + 0x10, 0xbc, 0xf5, 0x5e, 0xef, 0x8e, 0xaa, 0x46, 0x27, 0xf9, 0x78, 0x89, 0x67, 0x70, 0xcf, 0x21, + 0xfd, 0x49, 0xa2, 0x4a, 0xda, 0xcd, 0x6d, 0xc8, 0xd4, 0xc3, 0x6e, 0xd6, 0x93, 0x41, 0x71, 0xc0, + 0xbd, 0x0b, 0x99, 0x0c, 0xa3, 0xd9, 0x13, 0x66, 0xea, 0x8c, 0x05, 0x6a, 0x13, 0x38, 0x8d, 0xc6, + 0xdb, 0x60, 0xd7, 0x97, 0x0b, 0x40, 0xd1, 0x07, 0xbd, 0x75, 0xdf, 0x8a, 0x18, 0xfa, 0x68, 0x91, + 0x67, 0x08, 0xaa, 0xd0, 0x63, 0x85, 0xe0, 0xa3, 0x3d, 0x92, 0x30, 0x1a, 0xac, 0x33, 0x9f, 0x1a, + 0xf8, 0xad, 0x53, 0xa5, 0xb7, 0x48, 0xeb, 0x35, 0x94, 0x0c, 0x34, 0x10, 0x9d, 0x21, 0xe3, 0x2d, + 0xbc, 0xe2, 0xa8, 0xaf, 0x7b, 0xb6, 0xb4, 0xbd, 0xc6, 0xeb, 0x3f, 0xc0, 0x46, 0x95, 0x32, 0x9c, + 0x1a, 0x84, 0x15, 0x8d, 0x5a, 0xbb, 0x49, 0x3f, 0x14, 0x3b, 0x5c, 0x60, 0xde, 0x26, 0x4e, 0xc3, + 0xfe, 0x7b, 0xdb, 0x4c, 0xfd, 0xb6, 0x76, 0x0d, 0x02, 0x4a, 0x29, 0x03, 0x84, 0xb3, 0x02, 0x46, + 0x82, 0x8b, 0xdf, 0xc1, 0xd9, 0x1d, 0x65, 0xa5, 0xda, 0xb8, 0xfa, 0xa4, 0xf7, 0x9e, 0x6b, 0x5a, + 0x86, 0x75, 0xbf, 0x7a, 0xaa, 0xa5, 0xe1, 0x2d, 0xf5, 0x63, 0xc8, 0x7e, 0x01, 0xdb, 0x3a, 0x10, + 0xab, 0x7b, 0x86, 0xee, 0x2a, 0xb2, 0xe1, 0xc2, 0x5b, 0x1b, 0x38, 0xd0, 0x8d, 0x7f, 0xd2, 0xbf, + 0x5f, 0x90, 0xa2, 0x7a, 0x59, 0x61, 0xcc, 0xb6, 0xc0, 0xf3, 0xb1, 0xed, 0xaa, 0x61, 0x6f, 0x96, + 0x1d, 0x14, 0x09, 0xac, 0x38, 0xae, 0xb1, 0x44, 0xd0, 0x0b, 0x36, 0xf1, 0x50, 0xfd, 0xb9, 0xe7, + 0xd0, 0xf2, 0x4c, 0x1f, 0xb6, 0xc2, 0x27, 0x24, 0x10, 0x5e, 0x90, 0x5d, 0xa2, 0x4f, 0x5d, 0xbc, + 0x98, 0x6f, 0xd2, 0x2e, 0xa9, 0xbf, 0xec, 0x19, 0xfc, 0x06, 0xc7, 0x15, 0x34, 0x25, 0xb5, 0x9a, + 0xe8, 0xdf, 0x6c, 0x62, 0xb2, 0x89, 0x5c, 0xe3, 0x20, 0xc5, 0x9e, 0x5e, 0xd9, 0x97, 0x71, 0xb0, + 0x98, 0x75, 0x6e, 0x6f, 0x00, 0xfc, 0xb8, 0x08, 0x81, 0x30, 0x37, 0x2f, 0xef, 0xb0, 0x82, 0x0a, + 0xb9, 0x53, 0x72, 0x13, 0xd1, 0xf0, 0xe7, 0xa6, 0x36, 0x5c, 0x29, 0x30, 0xf0, 0xc7, 0x7c, 0x2d, + 0x68, 0x0a, 0xc5, 0xff, 0xf0, 0x5f, 0x0b, 0x1b, 0x6c, 0x60, 0x3a, 0x16, 0x47, 0xf2, 0x9e, 0x68, + 0xdd, 0x2f, 0x31, 0xce, 0xf5, 0xb7, 0x23, 0xc4, 0x1a, 0x88, 0x6e, 0xab, 0xc9, 0x61, 0xab, 0x8b, + 0xc8, 0x5f, 0x00, 0x52, 0x78, 0xd0, 0x9c, 0xf7, 0xab, 0xe3, 0x9a, 0x20, 0xff, 0x16, 0x7c, 0x54, + 0x11, 0xfd, 0x3d, 0xdf, 0xff, 0x51, 0x06, 0xaf, 0x67, 0xcb, 0xb7, 0xad, 0x3f, 0xf4, 0xcb, 0xa5, + 0xf2, 0xb6, 0x94, 0x64, 0xb6, 0xac, 0xfd, 0x66, 0x90, 0xf9, 0xcd, 0x01, 0x17, 0xfa, 0xa2, 0x9a, + 0xdf, 0xba, 0xd6, 0x90, 0x1b, 0xb3, 0x76, 0xd3, 0xdc, 0x1c, 0x7f, 0x46, 0x13, 0x03, 0x97, 0x4d, + 0xc2, 0x40, 0xdf, 0x55, 0x54, 0xdc, 0x14, 0xe8, 0x44, 0x8f, 0x9e, 0xe0, 0x72, 0xa9, 0x34, 0x24, + 0x1e, 0x48, 0xee, 0xb5, 0xb1, 0x9b, 0xbb, 0x1a, 0x24, 0xc7, 0x3f, 0xf2, 0xb7, 0x03, 0x95, 0x9c, + 0x32, 0x87, 0x84, 0xe9, 0x26, 0xb8, 0xe3, 0xa0, 0x7e, 0xdb, 0xfe, 0xdb, 0x21, 0x50, 0x91, 0x7b, + 0xf5, 0x2b, 0x3d, 0x3b, 0xb1, 0x79, 0x0f, 0xca, 0xd8, 0xd3, 0x7e, 0xb5, 0x33, 0x63, 0xe7, 0x36, + 0x5e, 0x13, 0x26, 0x27, 0x1a, 0xb8, 0x44, 0x50, 0x68, 0x15, 0x22, 0x59, 0x8b, 0xd3, 0x67, 0x03, + 0xc1, 0xb2, 0x3f, 0x8c, 0xbc, 0x17, 0x78, 0x92, 0x58, 0x26, 0xc1, 0xe5, 0xfc, 0x66, 0x3d, 0xe0, + 0x36, 0xad, 0x8d, 0xd2, 0x5c, 0xbc, 0x90, 0x0a, 0xd7, 0xbd, 0xe1, 0x62, 0x10, 0x3f, 0x3c, 0x1b, + 0x0f, 0x89, 0x0d, 0xe1, 0xff, 0x89, 0xfe, 0x80, 0x72, 0x1a, 0xa6, 0xe1, 0x5a, 0x92, 0x49, 0x92, + 0xe5, 0x54, 0xf4, 0x1b, 0x1a, 0xe8, 0xec, 0xb1, 0x3a, 0x61, 0x9b, 0x99, 0x46, 0xea, 0x0c, 0x21, + 0x0f, 0x93, 0xa8, 0x8a, 0x2e, 0xd7, 0x0b, 0xbe, 0x18, 0x52, 0x37, 0xba, 0x88, 0x7e, 0x94, 0x8e, + 0x12, 0xc3, 0x3f, 0xb8, 0x6e, 0x23, 0xce, 0x71, 0xf0, 0x37, 0x3e, 0x88, 0x0e, 0xd7, 0x13, 0x84, + 0x8e, 0x50, 0x06, 0xb8, 0x44, 0x78, 0x7a, 0x9b, 0xf4, 0xe6, 0x89, 0x7a, 0x82, 0xaa, 0xae, 0xf2, + 0xf2, 0x14, 0xbf, 0xee, 0x18, 0x9b, 0x0d, 0x21, 0x32, 0x28, 0xe3, 0x33, 0xce, 0xaf, 0x26, 0xdd, + 0xb0, 0x13, 0x2d, 0x80, 0x3f, 0xba, 0xeb, 0x19, 0x35, 0x62, 0x91, 0x14, 0x31, 0xa0, 0xaf, 0xe9, + 0x01, 0xff, 0xd7, 0x00, 0x39, 0x92, 0x14, 0xff, 0x95, 0x6c, 0xff, 0x99, 0xc3, 0x10, 0x4f, 0x19, + 0xe1, 0x07, 0xdd, 0xbc, 0x90, 0x2d, 0x52, 0x06, 0xf2, 0xfa, 0xf8, 0x7f, 0xc7, 0x7f, 0x31, 0x4d, + 0x47, 0xc1, 0xf3, 0x5b, 0xee, 0xab, 0x72, 0xd7, 0x24, 0x9e, 0x44, 0x67, 0xb5, 0x82, 0x11, 0xe6, + 0x70, 0x30, 0x58, 0xa3, 0xd7, 0xd1, 0x5e, 0x67, 0x7f, 0xdb, 0xd9, 0x5e, 0xa5, 0x73, 0xfc, 0x5c, + 0x6e, 0xe1, 0xf2, 0x11, 0x3f, 0xcb, 0x27, 0x63, 0x5b, 0x6c, 0xa8, 0xc7, 0x75, 0x0e, 0xe5, 0x93, + 0x8d, 0xde, 0x78, 0x6b, 0xd2, 0x39, 0xb1, 0x66, 0xc2, 0xd3, 0x44, 0xbd, 0xd3, 0xef, 0x8a, 0x9b, + 0xe7, 0xf7, 0x6e, 0xaf, 0x06, 0x5d, 0x87, 0x88, 0xf9, 0x75, 0xb1, 0xb1, 0x06, 0xc9, 0x85, 0xc8, + 0x54, 0x13, 0xa3, 0xf4, 0x57, 0xe5, 0xdf, 0x0c, 0x1b, 0x1b, 0xca, 0xc9, 0x39, 0x0e, 0xab, 0x29, + 0xe2, 0x30, 0x49, 0x8e, 0xb6, 0x5b, 0x63, 0x7a, 0x88, 0xf0, 0x41, 0xbf, 0xe1, 0xbf, 0x0f, 0xdc, + 0x74, 0x33, 0x99, 0x71, 0xeb, 0x3a, 0xe5, 0xa3, 0xed, 0x7e, 0x9a, 0x9e, 0xf3, 0xb3, 0xb2, 0x79, + 0x91, 0xb7, 0xed, 0x5c, 0x5e, 0xc1, 0x69, 0x78, 0x73, 0xf7, 0x5b, 0x1b, 0x70, 0x76, 0xea, 0x8f, + 0x66, 0x56, 0xdb, 0x75, 0x09, 0xca, 0x41, 0x73, 0xdd, 0x08, 0x4a, 0x24, 0x10, 0xb9, 0xb4, 0x59, + 0xfe, 0x59, 0x23, 0x85, 0x32, 0x34, 0x8b, 0xf6, 0x7d, 0x95, 0x31, 0xbc, 0xd3, 0xc1, 0x78, 0xee, + 0x3f, 0x27, 0xfe, 0xa6, 0x0e, 0xd8, 0x41, 0x2e, 0xa0, 0x09, 0x2f, 0x50, 0x39, 0xcd, 0x73, 0x2b, + 0xbc, 0x3e, 0x4c, 0x79, 0x98, 0xf6, 0xda, 0x0e, 0xb5, 0x4b, 0x6c, 0x69, 0x64, 0x09, 0x94, 0xbd, + 0x4d, 0xc9, 0xcb, 0x5d, 0x82, 0xfa, 0x9e, 0xcd, 0x3b, 0x96, 0x63, 0x1e, 0xcb, 0xdf, 0xdc, 0x83, + 0xc6, 0xe5, 0xc5, 0x36, 0x6c, 0x3f, 0xc4, 0x02, 0xd3, 0xc7, 0x0c, 0xfe, 0x1b, 0xed, 0x30, 0x85, + 0x6a, 0xf7, 0xe3, 0x10, 0xa8, 0x51, 0x1f, 0x8a, 0x4b, 0x6d, 0xd7, 0x2b, 0x60, 0xcd, 0x10, 0xe9, + 0x84, 0xce, 0x46, 0xc7, 0xd0, 0x82, 0x16, 0xd4, 0xc1, 0x5a, 0x7e, 0x2c, 0x82, 0x86, 0x87, 0xfa, + 0x7e, 0xc9, 0x18, 0x50, 0xd7, 0xa1, 0xc5, 0x41, 0x2e, 0x7a, 0xe1, 0xac, 0x25, 0x67, 0xc9, 0x7a, + 0x59, 0x07, 0x83, 0xef, 0xd5, 0x63, 0xa0, 0x79, 0x1d, 0xa6, 0x32, 0x15, 0x0b, 0x71, 0x83, 0x50, + 0xe7, 0x3e, 0x0b, 0x27, 0x4a, 0xa9, 0x22, 0x21, 0x6d, 0x1f, 0x0f, 0xf1, 0xd1, 0x5d, 0xff, 0xfb, + 0x7e, 0x7f, 0x14, 0x4d, 0x88, 0x07, 0x27, 0xd4, 0x1a, 0xa5, 0x13, 0x5a, 0x53, 0x3f, 0x2d, 0xd8, + 0x5d, 0xa0, 0x56, 0x18, 0xbe, 0x08, 0xe7, 0x5e, 0x14, 0x96, 0xb2, 0x7c, 0xc0, 0xa6, 0x1d, 0xf7, + 0xc4, 0xbf, 0x09, 0x1b, 0x2b, 0x4e, 0xa3, 0x80, 0xee, 0xe6, 0x5d, 0x25, 0x1b, 0xdc, 0xb2, 0xc8, + 0x62, 0xe3, 0xc4, 0xf1, 0x9d, 0x46, 0x24, 0x0d, 0xd9, 0xfb, 0xf3, 0x01, 0xbd, 0xcb, 0x8d, 0x37, + 0x45, 0x6d, 0x35, 0xfa, 0x81, 0x03, 0xcd, 0x95, 0x39, 0xbe, 0x07, 0xe8, 0x42, 0x96, 0xbc, 0xd6, + 0x80, 0x96, 0xe3, 0x0c, 0x6f, 0xb2, 0x65, 0x68, 0xd3, 0xc0, 0x04, 0x2b, 0x96, 0x18, 0xe7, 0x2b, + 0x90, 0x14, 0xc1, 0x08, 0xe6, 0x94, 0x10, 0x7f, 0x69, 0x6a, 0xb9, 0xef, 0x6b, 0x9e, 0x92, 0xac, + 0x24, 0x52, 0x8a, 0x98, 0x2f, 0x9c, 0xda, 0x9b, 0xa9, 0xee, 0x2d, 0xc1, 0xb4, 0x2f, 0x7a, 0xa3, + 0x0b, 0xfe, 0xb7, 0x0c, 0xf0, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, + 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, + 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, + 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0xff, 0x2b, 0xfc, + 0x7f, 0x00, 0xc9, 0xf1, 0xf2, 0x75, 0x00, 0x8e, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA100_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36352, // uncompressed data size (bytes) + 24090, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA100_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA100("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/load/g_booteruc_load_ga100_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA100_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x4e, 0x00, 0x62, 0x3d, 0x08, 0x13, 0x4c, 0xc4, 0x43, 0x69, + 0x20, 0x00, 0x00, 0x87, 0x58, 0x20, 0x04, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA100_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA100_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA100("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/load/g_booteruc_load_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_dbg_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 384 +// COMPRESSED SIZE (bytes): 397 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA100_sig_dbg_data[] = +{ + 0x01, 0x80, 0x01, 0x7f, 0xfe, 0x18, 0x97, 0x69, 0x1c, 0xf9, 0x7c, 0xb1, 0xa1, 0xac, 0xbe, 0x29, + 0x3b, 0x10, 0x8e, 0x68, 0x8d, 0xe5, 0xf1, 0x82, 0xbe, 0x17, 0xe7, 0xbb, 0xf4, 0x35, 0x95, 0x09, + 0x89, 0xff, 0xc9, 0x08, 0x76, 0x52, 0x16, 0xd4, 0xa9, 0xb4, 0x79, 0xa2, 0x8e, 0x17, 0xd0, 0x9e, + 0x9f, 0x36, 0x39, 0x9c, 0xd4, 0x30, 0x29, 0x8e, 0x11, 0x49, 0xda, 0x59, 0x93, 0xce, 0xa7, 0x5d, + 0x04, 0x33, 0xc4, 0xf6, 0x94, 0x3b, 0xd7, 0x73, 0xee, 0xde, 0xc6, 0x8d, 0x09, 0x30, 0xd8, 0xaf, + 0x7a, 0x49, 0x8b, 0x50, 0x7d, 0x1a, 0x8d, 0x36, 0xa2, 0xdb, 0xf1, 0xbc, 0xcd, 0xba, 0x5b, 0x57, + 0x61, 0x25, 0x86, 0x30, 0xc6, 0x94, 0xdc, 0xce, 0xdc, 0xac, 0x9d, 0x03, 0x2b, 0x97, 0xcd, 0xff, + 0xc6, 0x95, 0x65, 0x3a, 0xb5, 0x14, 0x9a, 0x74, 0xdd, 0xb4, 0x4d, 0xd6, 0xc1, 0xd5, 0x3e, 0xcd, + 0x86, 0x0c, 0xff, 0x5a, 0x59, 0xe9, 0x2e, 0x85, 0x4a, 0x54, 0x0e, 0x4e, 0x6d, 0xa1, 0x09, 0xd5, + 0x1e, 0x49, 0xb6, 0xef, 0x9b, 0x5e, 0xa2, 0x7d, 0x29, 0xd3, 0xb2, 0xef, 0xc3, 0x90, 0xdf, 0xef, + 0xb5, 0x6b, 0xf5, 0x47, 0x77, 0x0d, 0x62, 0x37, 0x26, 0x60, 0xf0, 0x8c, 0x2c, 0xeb, 0x25, 0x75, + 0x73, 0x01, 0x84, 0xdd, 0x02, 0xe7, 0x2f, 0x1c, 0xd6, 0x79, 0xf2, 0xdd, 0x60, 0x6f, 0x5d, 0xc9, + 0xa6, 0x0b, 0xde, 0x66, 0x93, 0x4f, 0x25, 0x49, 0x65, 0x4a, 0x00, 0x8f, 0xbf, 0xd1, 0xae, 0x5d, + 0xee, 0xef, 0xf4, 0xd6, 0xe8, 0x12, 0xde, 0x21, 0x22, 0xea, 0x74, 0x93, 0x95, 0xb5, 0xd5, 0xb0, + 0x21, 0xb8, 0x8c, 0xef, 0xca, 0x65, 0x37, 0x3e, 0x37, 0x78, 0x28, 0xa0, 0x0d, 0xde, 0xd5, 0x0d, + 0x1a, 0x09, 0xfa, 0x17, 0x15, 0x04, 0xf1, 0x46, 0x19, 0x31, 0x0f, 0x6a, 0xaa, 0x6c, 0x4d, 0xf2, + 0x45, 0x49, 0x79, 0xfb, 0xe0, 0x01, 0xb8, 0x42, 0x01, 0xee, 0x8b, 0xca, 0x7a, 0x27, 0x34, 0xb9, + 0xdd, 0xe0, 0xdf, 0xe1, 0xba, 0x9e, 0x84, 0xfc, 0xdc, 0x75, 0x35, 0xbb, 0xf3, 0xaa, 0x4c, 0xe3, + 0x47, 0x60, 0xb5, 0xd6, 0x23, 0x00, 0x52, 0xa6, 0xec, 0xcf, 0xb0, 0xb9, 0x13, 0x3e, 0x43, 0x05, + 0x94, 0x7f, 0x20, 0xa9, 0x56, 0xfb, 0xd9, 0x47, 0xc0, 0x03, 0x75, 0xe9, 0x10, 0x33, 0x9c, 0x3e, + 0xca, 0x31, 0x0e, 0xd9, 0x4a, 0x1d, 0x80, 0xa9, 0xd6, 0xd9, 0x9f, 0x13, 0x81, 0xf5, 0xf2, 0x4a, + 0x3d, 0x2a, 0xfe, 0x21, 0x49, 0xe2, 0x1e, 0x8b, 0x36, 0x75, 0x0d, 0xe0, 0x5b, 0x2e, 0x56, 0x97, + 0x9a, 0xed, 0x09, 0xb6, 0xee, 0xda, 0x76, 0xb9, 0xc0, 0x65, 0xe1, 0xf8, 0x67, 0xa5, 0x1e, 0x13, + 0xb2, 0xec, 0x2e, 0x10, 0xf8, 0x11, 0x0c, 0x44, 0x1f, 0x5b, 0x23, 0xc3, 0x73, 0x4d, 0x7a, 0x56, + 0x0a, 0xa7, 0x92, 0x19, 0x85, 0x42, 0x91, 0x41, 0x4f, 0x80, 0x01, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA100_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 384, // uncompressed data size (bytes) + 397, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA100_sig_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA100("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/load/g_booteruc_load_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_prod_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 384 +// COMPRESSED SIZE (bytes): 397 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA100_sig_prod_data[] = +{ + 0x01, 0x80, 0x01, 0x7f, 0xfe, 0xa8, 0x3f, 0x96, 0x00, 0x76, 0x81, 0x01, 0x91, 0x3e, 0x7a, 0x81, + 0x8b, 0xdc, 0xa8, 0x69, 0x88, 0xbb, 0x18, 0xdc, 0x91, 0x9b, 0x11, 0x2d, 0x97, 0xc9, 0x35, 0x63, + 0x10, 0x46, 0x17, 0xf0, 0xb3, 0x95, 0xa4, 0x98, 0x8b, 0x0c, 0x14, 0x98, 0x83, 0xdf, 0x9e, 0x69, + 0x98, 0xcd, 0x6c, 0xe2, 0x97, 0x27, 0x94, 0x45, 0x93, 0x04, 0xdc, 0x17, 0x5b, 0x17, 0x6c, 0xa2, + 0x73, 0xd4, 0xf3, 0xa1, 0x70, 0xd7, 0x43, 0x64, 0xbb, 0x83, 0x1b, 0x81, 0xa3, 0xb5, 0x94, 0x29, + 0xfb, 0x81, 0x17, 0x41, 0x66, 0x2e, 0x5a, 0xd3, 0x93, 0x13, 0x78, 0x3c, 0x19, 0xb2, 0xce, 0x0a, + 0xd4, 0xea, 0x56, 0x62, 0xaa, 0xb6, 0x2c, 0x6d, 0x56, 0x7c, 0x5c, 0x96, 0xc9, 0x09, 0xe3, 0x6c, + 0xe6, 0x19, 0x3a, 0x1a, 0xe2, 0xba, 0xdb, 0x9c, 0x9c, 0x62, 0x55, 0x22, 0x26, 0xb9, 0xb3, 0xd8, + 0x26, 0x67, 0xb3, 0x92, 0x72, 0x4b, 0xb2, 0x7a, 0x26, 0x13, 0x91, 0x3d, 0xad, 0xd0, 0xf1, 0x1d, + 0x74, 0x68, 0xc9, 0x6e, 0x14, 0xa5, 0xf4, 0x46, 0x6d, 0x26, 0xaf, 0xc2, 0xe2, 0x48, 0xf0, 0x72, + 0x48, 0xa2, 0xab, 0xcf, 0x79, 0x07, 0xd9, 0x2c, 0x1e, 0xb0, 0xb7, 0x8c, 0xaa, 0x05, 0xd3, 0x91, + 0x9c, 0x2c, 0xe7, 0xf7, 0xd3, 0x11, 0xed, 0x39, 0x5e, 0xa7, 0x33, 0x3d, 0x1d, 0xa8, 0x3d, 0x9f, + 0xd7, 0xfd, 0x5b, 0x9e, 0x50, 0xc2, 0x4b, 0x18, 0x5f, 0x6c, 0xd4, 0x78, 0xac, 0xa1, 0x36, 0x51, + 0x25, 0xfe, 0xb1, 0xf7, 0xb5, 0x76, 0xe6, 0xeb, 0x1c, 0x71, 0x79, 0xd0, 0x4d, 0x0e, 0x55, 0x9c, + 0x73, 0xd4, 0x35, 0x14, 0x89, 0x87, 0xb4, 0xb9, 0x43, 0x64, 0xc5, 0x20, 0xa5, 0xee, 0x6e, 0x83, + 0x63, 0x83, 0x04, 0xa9, 0xb8, 0xcf, 0x2c, 0x9d, 0x70, 0xe3, 0x67, 0x35, 0x24, 0xb5, 0xd3, 0xa0, + 0x18, 0x21, 0xfb, 0xd1, 0x45, 0x5e, 0xd6, 0xd0, 0x02, 0x76, 0xa2, 0x1c, 0xaa, 0x35, 0x12, 0x9b, + 0x04, 0x8a, 0x00, 0xe3, 0xc3, 0xda, 0x84, 0x9e, 0xf7, 0xf5, 0xce, 0x78, 0xb3, 0x9f, 0x4c, 0x3b, + 0x0c, 0xc7, 0xa6, 0x89, 0x7c, 0x39, 0x17, 0x68, 0x9c, 0xda, 0xa6, 0x76, 0x38, 0x39, 0x8b, 0x61, + 0x87, 0x78, 0x64, 0x5c, 0x76, 0x77, 0x76, 0xe9, 0xa4, 0x67, 0x31, 0x43, 0x2f, 0xa8, 0xce, 0xe6, + 0x68, 0x8d, 0xdb, 0x9a, 0x36, 0x81, 0xaf, 0x76, 0xf4, 0x0e, 0x6f, 0x9a, 0xaf, 0xc1, 0x31, 0x71, + 0x57, 0xda, 0xd1, 0x01, 0x3d, 0xb3, 0x10, 0xd5, 0x8f, 0xf0, 0x41, 0x47, 0x60, 0x64, 0x90, 0x40, + 0x81, 0x29, 0xc6, 0xe3, 0x27, 0xa2, 0x8c, 0x72, 0xa4, 0x50, 0xd8, 0x3e, 0x89, 0xf4, 0x81, 0x4d, + 0xfc, 0x33, 0x26, 0x98, 0x25, 0x27, 0xd0, 0xfd, 0x2b, 0x60, 0x69, 0x6a, 0x6b, 0xff, 0x7f, 0x13, + 0x45, 0x88, 0x71, 0x39, 0x6d, 0xf9, 0xc0, 0x35, 0x96, 0x80, 0x01, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA100_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 384, // uncompressed data size (bytes) + 397, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA100_sig_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA100("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/load/g_booteruc_load_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_ga100_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA100_patch_loc_data[] = +{ + 0x00, 0x62, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA100_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA100_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA100("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/load/g_booteruc_load_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_ga100_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA100_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA100_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA100_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA100("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/load/g_booteruc_load_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_ga100_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA100_patch_meta_data[] = +{ + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA100_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA100_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA100("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/load/g_booteruc_load_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA100_num_sigs_data[] = +{ + 0x01, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA100_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA100_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterLoadUcode_GA100 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA100_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA100_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA100_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA100_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA100_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA100_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA100_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA100_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA100_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA100_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterLoadUcode_GA100(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterLoadUcode_GA100; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_GA102.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_GA102.c new file mode 100644 index 000000000..a25f0ca2a --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_GA102.c @@ -0,0 +1,3640 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA102("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/load/g_booteruc_load_ga10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 37120 +// COMPRESSED SIZE (bytes): 25363 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA102_image_dbg_data[] = +{ + 0xed, 0xfd, 0x43, 0xb4, 0xf0, 0x4c, 0xd7, 0x00, 0x6c, 0x1e, 0xdb, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, + 0xdb, 0xb6, 0x6d, 0xdf, 0xc7, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0xe7, 0xff, 0xc6, 0xef, 0xe4, + 0xef, 0x49, 0xaf, 0x1e, 0xf4, 0x73, 0xcd, 0x32, 0xca, 0x4a, 0xb2, 0x6a, 0x27, 0x7b, 0x57, 0xed, + 0x14, 0x00, 0xc0, 0xff, 0x7f, 0xb3, 0xc1, 0x60, 0x1f, 0x6d, 0xfa, 0x16, 0x6c, 0x59, 0x03, 0xe1, + 0xdf, 0x42, 0xee, 0x0c, 0x3d, 0xc8, 0xc5, 0x63, 0x15, 0x3e, 0x7d, 0xdc, 0x16, 0xc7, 0x2a, 0x76, + 0x66, 0x22, 0xdc, 0x92, 0xb4, 0xe0, 0x26, 0xcc, 0x99, 0xa8, 0xf5, 0x75, 0x8e, 0x09, 0xb5, 0x8b, + 0x50, 0x89, 0x93, 0xba, 0xef, 0x08, 0x28, 0x7a, 0x76, 0xc0, 0xeb, 0xf2, 0xa3, 0xbe, 0x36, 0x1a, + 0x14, 0x7f, 0xcf, 0xfa, 0xe8, 0x3a, 0xed, 0x4a, 0x1d, 0x8b, 0xa5, 0x0e, 0xed, 0x80, 0xac, 0xf6, + 0x49, 0xb3, 0xb9, 0xa1, 0xd8, 0x6c, 0xdb, 0x14, 0xb6, 0xc4, 0x4e, 0x66, 0x09, 0x84, 0xac, 0x0f, + 0xe3, 0x6b, 0xa8, 0xc1, 0xf6, 0xda, 0x28, 0x2b, 0x54, 0xbc, 0x1c, 0xae, 0x3e, 0x3d, 0xba, 0xd6, + 0x11, 0xe6, 0x4e, 0x42, 0xd7, 0xb7, 0x1e, 0xe9, 0x3a, 0x85, 0xd4, 0xbc, 0x17, 0x04, 0xe3, 0x94, + 0x19, 0x8c, 0x9a, 0x5b, 0xf2, 0x93, 0x3b, 0x24, 0x81, 0x17, 0x75, 0x83, 0xa2, 0x80, 0x31, 0x9c, + 0xe5, 0xd0, 0xc4, 0xa8, 0xf7, 0x5f, 0xe2, 0x58, 0x19, 0x0d, 0x0b, 0x26, 0x62, 0xb7, 0xbe, 0x5c, + 0x47, 0xd4, 0x6c, 0x9e, 0x90, 0x38, 0x6c, 0x29, 0x7d, 0x7c, 0x46, 0x2a, 0xb6, 0x98, 0x90, 0x9f, + 0x7f, 0x04, 0xd3, 0x2f, 0xee, 0x42, 0xae, 0x4e, 0x7c, 0x9d, 0xed, 0xbd, 0x4d, 0xe3, 0x07, 0xfb, + 0x92, 0x20, 0xac, 0xa4, 0xa1, 0x4c, 0x1c, 0xa8, 0x61, 0x08, 0x58, 0x18, 0x5d, 0x07, 0xd6, 0xb1, + 0x29, 0x38, 0x77, 0xc3, 0x88, 0x04, 0xce, 0xa6, 0xb3, 0x18, 0xa3, 0xb5, 0x3b, 0xe6, 0x08, 0xb9, + 0xb0, 0x44, 0xed, 0xc4, 0x4a, 0x5f, 0x59, 0xd7, 0x83, 0xdb, 0x40, 0x55, 0x23, 0x82, 0x24, 0x70, + 0xca, 0xf2, 0xcb, 0x00, 0xc4, 0x6e, 0x59, 0x13, 0x54, 0x19, 0x8b, 0x69, 0x58, 0xf2, 0x5a, 0xfd, + 0xb0, 0x49, 0xb9, 0x0d, 0x65, 0x86, 0x80, 0xa1, 0x1e, 0x5f, 0x1d, 0x75, 0x3d, 0x3d, 0x69, 0x0e, + 0xe0, 0x74, 0xae, 0xf5, 0xa5, 0x8a, 0xfc, 0x2d, 0x33, 0x1d, 0x87, 0xb0, 0xcc, 0x4a, 0x46, 0x07, + 0xa7, 0xb2, 0x23, 0xa8, 0x17, 0xd4, 0xed, 0x21, 0x15, 0x3c, 0x63, 0x61, 0xe2, 0x00, 0x2b, 0x75, + 0x8b, 0xce, 0xbc, 0x67, 0xd1, 0x96, 0x2c, 0xa1, 0xd8, 0x36, 0xab, 0x26, 0x18, 0x86, 0x7c, 0x21, + 0x7e, 0xfe, 0xfa, 0x46, 0xb0, 0x55, 0xcd, 0xfd, 0xf6, 0xf0, 0x29, 0xe7, 0x58, 0xdf, 0x2c, 0x27, + 0x36, 0xb5, 0xc6, 0x47, 0x09, 0xd8, 0x0b, 0x1d, 0x5d, 0xd8, 0xfe, 0x58, 0xc2, 0xeb, 0xdb, 0x1f, + 0xa5, 0x0c, 0xfa, 0x11, 0x2a, 0xdd, 0xff, 0x98, 0xb7, 0x09, 0xc5, 0xa5, 0xd8, 0x76, 0x24, 0x78, + 0x7b, 0x94, 0x2e, 0xbe, 0x07, 0x08, 0xc5, 0x22, 0x4e, 0xd7, 0xd4, 0x38, 0xfb, 0xd6, 0xe2, 0xdd, + 0xa3, 0x7b, 0x0d, 0xcf, 0x35, 0xd7, 0x9a, 0xa8, 0xd4, 0x3e, 0x95, 0xe4, 0x50, 0x0f, 0x5b, 0x04, + 0xaa, 0x91, 0x97, 0x7e, 0xfb, 0xc7, 0x52, 0xcf, 0xdb, 0x0a, 0xba, 0x55, 0x6f, 0x3a, 0xa2, 0x78, + 0xfa, 0x79, 0x6f, 0x26, 0x9c, 0x56, 0xb7, 0xb5, 0xb4, 0xe5, 0xe1, 0x16, 0xd6, 0xc6, 0xdb, 0x46, + 0x8d, 0x70, 0x51, 0x89, 0x6b, 0xd4, 0xb2, 0x8b, 0xc4, 0x88, 0x1f, 0xf9, 0x6b, 0xcb, 0x75, 0x5e, + 0x5b, 0x53, 0xae, 0xc0, 0x17, 0xdd, 0x52, 0xcf, 0xc0, 0x86, 0x94, 0x4f, 0x9e, 0x33, 0x85, 0x1b, + 0x8b, 0x49, 0x77, 0xb9, 0x0a, 0x2a, 0x59, 0x11, 0x1d, 0x24, 0xeb, 0x18, 0x18, 0x5d, 0xd2, 0xf7, + 0x1e, 0xb1, 0xb5, 0xaf, 0xe4, 0x2f, 0x9d, 0x9e, 0x7d, 0x53, 0x28, 0xd4, 0xb3, 0xe7, 0xd2, 0x39, + 0xf2, 0xea, 0xa9, 0x3d, 0xd4, 0x1f, 0x7f, 0xb0, 0xcc, 0xe7, 0x2d, 0xe7, 0xe4, 0x7c, 0x11, 0xf8, + 0x49, 0x39, 0xb5, 0xdc, 0x13, 0x01, 0xb8, 0x2c, 0x4d, 0x59, 0x0c, 0x31, 0xe4, 0xee, 0xb2, 0x06, + 0x61, 0xdb, 0x8d, 0x32, 0xcf, 0x2b, 0xfb, 0xef, 0xd0, 0xd8, 0x23, 0xe3, 0xa3, 0x70, 0xc4, 0x87, + 0x88, 0x33, 0x7c, 0x19, 0xa3, 0x87, 0x50, 0xea, 0x40, 0x4f, 0xe2, 0xdf, 0xb4, 0xfa, 0x5f, 0x06, + 0x2e, 0xc6, 0x9a, 0x28, 0x60, 0xa5, 0x5c, 0xaf, 0x83, 0x66, 0x38, 0x41, 0x40, 0x9f, 0x06, 0xf3, + 0xcb, 0x97, 0x5a, 0x6c, 0x3e, 0x74, 0x9d, 0x24, 0x53, 0x6f, 0xbe, 0x18, 0x3e, 0x56, 0x87, 0x80, + 0x21, 0x09, 0x35, 0x3a, 0x9b, 0xe0, 0x66, 0x15, 0xc3, 0x5b, 0x01, 0x65, 0x9e, 0xd6, 0x17, 0xa7, + 0x59, 0xb8, 0x6f, 0x85, 0x4d, 0x2b, 0x60, 0x1f, 0x67, 0xee, 0xd2, 0xf4, 0x11, 0x5c, 0x87, 0x13, + 0x9e, 0xb4, 0x16, 0xc2, 0xa5, 0xa3, 0x7d, 0xeb, 0xde, 0x7c, 0xd5, 0xe8, 0x81, 0x9b, 0xec, 0x4e, + 0xfe, 0x64, 0x33, 0xfe, 0xe2, 0x3c, 0xc1, 0xc8, 0x5f, 0x2a, 0x46, 0x77, 0xce, 0xad, 0xdb, 0x27, + 0x45, 0x55, 0xd7, 0x64, 0x21, 0xeb, 0xdf, 0x34, 0x42, 0xa4, 0xf3, 0xb3, 0xd9, 0x34, 0xf6, 0x13, + 0x1c, 0x6f, 0x75, 0x21, 0x47, 0xcc, 0x0e, 0x59, 0x34, 0x30, 0xf8, 0x46, 0xe7, 0xf0, 0x32, 0x7c, + 0x92, 0xfc, 0xb9, 0x58, 0xfc, 0x87, 0xd8, 0xfc, 0xea, 0x2a, 0x97, 0xc8, 0x0a, 0xdb, 0xfd, 0x46, + 0x35, 0x45, 0x8d, 0x7f, 0x18, 0xc1, 0x46, 0xc2, 0x60, 0xcc, 0x58, 0x29, 0x34, 0x72, 0xcf, 0x5a, + 0x77, 0xb2, 0xce, 0xe8, 0x8a, 0xc1, 0xa0, 0x1f, 0x56, 0x86, 0xff, 0xaa, 0x09, 0x00, 0x7a, 0x04, + 0x35, 0x4d, 0x10, 0xcf, 0xd9, 0x99, 0x86, 0xa9, 0xe5, 0xd9, 0xcb, 0x40, 0x22, 0xa7, 0x50, 0x09, + 0xf1, 0xb6, 0x3b, 0xb3, 0x11, 0xb1, 0x87, 0x0e, 0x78, 0x96, 0xc4, 0x29, 0x25, 0xe8, 0x3a, 0x09, + 0x7c, 0x5e, 0x85, 0xbd, 0x17, 0xb6, 0x35, 0x83, 0x15, 0x62, 0x0c, 0xbb, 0x7b, 0xbb, 0xca, 0xe9, + 0x2f, 0x93, 0xef, 0x92, 0x2b, 0x85, 0xe1, 0x01, 0xd1, 0xfe, 0x0f, 0xc7, 0x80, 0x59, 0xab, 0x25, + 0xe7, 0xb6, 0x98, 0xc5, 0xac, 0x18, 0x76, 0x3c, 0xf5, 0x83, 0xee, 0xbc, 0x15, 0x9c, 0x4d, 0x76, + 0x12, 0x34, 0x34, 0x06, 0xe3, 0x89, 0xb9, 0xe4, 0x06, 0xbd, 0x80, 0x8c, 0x7a, 0x41, 0xe5, 0x1c, + 0xc3, 0x83, 0xab, 0x23, 0xe9, 0xa7, 0xd1, 0xe1, 0xf1, 0x67, 0x00, 0x78, 0xaf, 0x01, 0xfb, 0x71, + 0xaa, 0xdc, 0xc9, 0xba, 0xb5, 0x05, 0x38, 0x78, 0x9c, 0x05, 0xc4, 0x9e, 0x28, 0x54, 0x52, 0x06, + 0x13, 0xb2, 0x50, 0xd7, 0x5d, 0x26, 0xc1, 0x90, 0x6b, 0x0d, 0x04, 0x96, 0x80, 0x87, 0xe6, 0x35, + 0x6f, 0xb4, 0x22, 0x0c, 0xab, 0x11, 0x70, 0x43, 0x40, 0x03, 0xf9, 0x48, 0xf6, 0xc4, 0x43, 0x8e, + 0xa4, 0x6b, 0xa4, 0x94, 0xa6, 0x49, 0x9a, 0xfa, 0x47, 0xc0, 0xf8, 0xd9, 0x51, 0xd3, 0x19, 0xe1, + 0x76, 0xc5, 0x6e, 0x0b, 0x74, 0xed, 0x71, 0xf1, 0x99, 0x52, 0xf8, 0xd6, 0x12, 0xd8, 0xc0, 0x65, + 0x12, 0xfa, 0x3a, 0x1a, 0xa9, 0x47, 0x0a, 0x02, 0xf9, 0x6f, 0xbf, 0xf0, 0xfd, 0xb1, 0x0c, 0x36, + 0xe5, 0x8f, 0x8c, 0x59, 0xa0, 0x45, 0xb2, 0x18, 0x23, 0xb9, 0x47, 0xe2, 0x20, 0xf2, 0x4e, 0xfa, + 0xbf, 0xa0, 0x66, 0x46, 0x0d, 0x1b, 0xa2, 0x76, 0xac, 0x5e, 0xf2, 0xb3, 0xa0, 0x56, 0x6f, 0x57, + 0xec, 0xe8, 0x8d, 0xa1, 0xd3, 0x1f, 0x11, 0x35, 0x7f, 0xb5, 0x0f, 0x05, 0xe0, 0x36, 0x03, 0x62, + 0x5e, 0xce, 0x28, 0x14, 0x98, 0x34, 0x65, 0x1c, 0x21, 0x61, 0xe2, 0x72, 0xf0, 0x34, 0x96, 0xb1, + 0x8c, 0xa4, 0xb1, 0x92, 0x4a, 0xbc, 0x19, 0x21, 0xe5, 0x77, 0x35, 0xfb, 0x5e, 0xe3, 0xbc, 0xa2, + 0x5b, 0x66, 0xce, 0x49, 0x23, 0xe2, 0xf9, 0x31, 0x83, 0x6f, 0xce, 0x3e, 0x4b, 0x4a, 0x3e, 0xca, + 0xb8, 0x44, 0x68, 0xd6, 0x55, 0xe4, 0xa1, 0x06, 0x69, 0xda, 0x78, 0x13, 0xa0, 0x0f, 0x0b, 0x4f, + 0xcd, 0x87, 0x8b, 0x72, 0x60, 0xa6, 0x60, 0x97, 0x82, 0x31, 0x93, 0x81, 0xb1, 0x27, 0x52, 0xb6, + 0x2c, 0x9f, 0x63, 0xcc, 0xf3, 0x2a, 0xee, 0x7a, 0x4f, 0xed, 0x6a, 0x66, 0xef, 0xd7, 0xd7, 0x42, + 0xa1, 0x2f, 0xf1, 0x65, 0x03, 0x6c, 0x25, 0xb3, 0x47, 0x3a, 0xc2, 0x7f, 0x81, 0x5f, 0x1a, 0xa6, + 0x49, 0x7f, 0x64, 0xcf, 0x49, 0x37, 0x22, 0x4a, 0x86, 0xfb, 0xca, 0x36, 0x84, 0xb0, 0x86, 0x4d, + 0xd7, 0xec, 0xe4, 0x42, 0xc7, 0x12, 0x4e, 0xa1, 0x3d, 0x00, 0x03, 0x74, 0xb5, 0xa8, 0x65, 0x41, + 0x32, 0x5d, 0xd6, 0xc6, 0x15, 0x6e, 0xb4, 0x47, 0xdc, 0xcc, 0xfd, 0x05, 0xa6, 0x01, 0x88, 0x8a, + 0xb3, 0x0a, 0xbc, 0x52, 0xe4, 0xfd, 0xac, 0x07, 0x2b, 0x03, 0x1e, 0x3e, 0xfd, 0xc5, 0xd4, 0x63, + 0x28, 0xfc, 0x1d, 0x1b, 0x09, 0xe0, 0x60, 0x6f, 0x92, 0x28, 0x3b, 0x47, 0xa8, 0xad, 0x37, 0x0d, + 0x20, 0x92, 0x3c, 0x3e, 0x1f, 0xae, 0xa0, 0x00, 0xab, 0x69, 0x73, 0x19, 0xee, 0xda, 0x15, 0xcc, + 0xb0, 0xf5, 0x4a, 0xc2, 0x60, 0x27, 0x62, 0x19, 0x5a, 0xb5, 0xa5, 0xf7, 0x36, 0x3c, 0x79, 0xd1, + 0xb7, 0x17, 0x38, 0x07, 0x7c, 0x95, 0xf6, 0x96, 0xb8, 0x6e, 0x0d, 0xec, 0x81, 0xa2, 0x32, 0x19, + 0xf1, 0x5c, 0x40, 0x0c, 0xa1, 0xac, 0x16, 0x5c, 0x99, 0x6b, 0xee, 0x77, 0xc7, 0x67, 0xda, 0x71, + 0x09, 0x80, 0x90, 0x96, 0xd0, 0x94, 0x23, 0x26, 0xda, 0x67, 0x27, 0xd3, 0x6a, 0xfa, 0x06, 0x43, + 0xac, 0x66, 0x5a, 0x63, 0xce, 0x94, 0x93, 0x78, 0x5a, 0x75, 0xa3, 0x39, 0x5f, 0xe1, 0x2c, 0x8f, + 0x03, 0xdf, 0xdd, 0x90, 0xd1, 0xc9, 0xe1, 0x02, 0xe7, 0x02, 0x15, 0xbd, 0x13, 0x3f, 0xeb, 0xc0, + 0x4c, 0x9e, 0xe0, 0x09, 0x9d, 0x39, 0xfc, 0x56, 0xb5, 0x06, 0x48, 0x2a, 0x06, 0x26, 0x80, 0x09, + 0xa7, 0xbc, 0x3d, 0x61, 0x6e, 0xe1, 0xad, 0xf8, 0x17, 0x35, 0x44, 0xf2, 0x17, 0xd4, 0xff, 0x0a, + 0x78, 0x15, 0x3e, 0xc4, 0x86, 0x9d, 0x50, 0xce, 0x04, 0x22, 0xb3, 0x6d, 0x93, 0x07, 0xe5, 0xc6, + 0x53, 0x12, 0xf9, 0xf9, 0x7d, 0xd1, 0xc7, 0xd2, 0x53, 0xf6, 0xd7, 0x29, 0x45, 0x0e, 0x49, 0x8a, + 0x9c, 0x03, 0x0b, 0xb5, 0xe8, 0xb8, 0x87, 0xbd, 0x2e, 0xa1, 0xde, 0x2e, 0xdf, 0xc2, 0xa7, 0x1b, + 0x1c, 0x64, 0x0e, 0x65, 0xb0, 0xe1, 0x6b, 0xc9, 0x45, 0x79, 0x7c, 0xc3, 0x91, 0xe8, 0x28, 0x0b, + 0x0d, 0xfc, 0xd3, 0x53, 0x66, 0xf8, 0xf5, 0x01, 0x8d, 0x98, 0xc3, 0xd5, 0x5c, 0x74, 0xd2, 0xd1, + 0xf5, 0x88, 0x0e, 0xfe, 0xd4, 0x3a, 0x9c, 0x9a, 0xbe, 0x7b, 0x62, 0x51, 0x4d, 0x91, 0xbd, 0xd5, + 0x2f, 0x3b, 0xe6, 0xc4, 0x79, 0xd2, 0xe2, 0xd8, 0x1b, 0x4b, 0xe6, 0x2d, 0x81, 0x57, 0x60, 0xc1, + 0x16, 0xfd, 0x23, 0x7c, 0x37, 0x42, 0x14, 0x8b, 0xde, 0x26, 0x48, 0x5c, 0x6b, 0x9a, 0x61, 0xc5, + 0x9b, 0xca, 0x30, 0x72, 0xf5, 0xbc, 0x74, 0x0a, 0xd8, 0xc5, 0x75, 0x9e, 0x07, 0xaf, 0x5b, 0x8c, + 0xd0, 0x1b, 0xd6, 0x32, 0x7b, 0x4a, 0x48, 0xe9, 0xe1, 0x11, 0x21, 0x13, 0xa6, 0x5c, 0x94, 0x52, + 0x6f, 0xd1, 0x5f, 0xc2, 0x04, 0xbf, 0x7a, 0xc3, 0xd9, 0x67, 0x9d, 0x6d, 0x71, 0x0c, 0xf9, 0x48, + 0x0d, 0x7f, 0x4e, 0x30, 0x3e, 0x47, 0x07, 0xff, 0x6a, 0x99, 0x28, 0x9b, 0x8a, 0x53, 0x7e, 0x9f, + 0x1d, 0xa4, 0x6d, 0xa7, 0xcb, 0x20, 0x8d, 0x3c, 0xa8, 0x22, 0x09, 0x91, 0x6f, 0xed, 0x3b, 0x20, + 0xc4, 0xbe, 0x88, 0xec, 0xc8, 0x1b, 0x72, 0x8b, 0x7c, 0x08, 0x46, 0x3f, 0x17, 0x96, 0x0a, 0x7a, + 0xfc, 0x30, 0x26, 0xa7, 0xba, 0xab, 0xdd, 0x4c, 0xf4, 0x7e, 0x79, 0x09, 0xeb, 0x66, 0x88, 0x24, + 0xfe, 0x41, 0x53, 0x06, 0x4c, 0xcd, 0x79, 0xa9, 0x1c, 0x96, 0x4e, 0x3f, 0xf3, 0xd4, 0x38, 0x11, + 0x47, 0xf0, 0xa9, 0x70, 0x18, 0x2e, 0x7d, 0x33, 0xfe, 0xd0, 0xaa, 0x2c, 0x93, 0xb2, 0x15, 0x23, + 0x62, 0xb4, 0xf7, 0xb6, 0x2a, 0xa4, 0xb8, 0x14, 0x36, 0x49, 0xba, 0x3a, 0x24, 0x41, 0xc0, 0x1f, + 0x50, 0x68, 0x22, 0xd2, 0x9a, 0x55, 0xd2, 0xce, 0xf5, 0x38, 0x85, 0x2e, 0xef, 0x6d, 0xf8, 0x49, + 0x68, 0xc4, 0xa9, 0xd7, 0xaf, 0xc3, 0x6e, 0xa0, 0x0a, 0x7b, 0x26, 0x3f, 0xcc, 0x56, 0x4f, 0xdc, + 0x63, 0xd1, 0x0e, 0x6a, 0x78, 0x16, 0x2b, 0xda, 0xc3, 0x55, 0x22, 0x80, 0xcf, 0x97, 0xd3, 0x15, + 0xdc, 0xd3, 0x30, 0x50, 0x3b, 0x3b, 0x48, 0x96, 0x72, 0x23, 0x89, 0xcf, 0x6b, 0x96, 0xf8, 0xa7, + 0xe2, 0xab, 0x28, 0x24, 0x94, 0x8c, 0x86, 0xc9, 0xf5, 0xd8, 0x2f, 0x2d, 0x19, 0xd0, 0xad, 0xed, + 0x04, 0xbd, 0xc8, 0x1a, 0xff, 0xd1, 0xc2, 0x7a, 0x8a, 0xf5, 0x92, 0x7d, 0x6a, 0xe7, 0x82, 0xfb, + 0x22, 0xf5, 0x35, 0x8d, 0x1c, 0xed, 0xcd, 0xbe, 0x1c, 0x7f, 0xb7, 0x31, 0xe1, 0x1d, 0x1a, 0xfd, + 0xdb, 0xd3, 0xd8, 0x81, 0x48, 0x85, 0x6b, 0xca, 0x4c, 0xb9, 0xfb, 0xc5, 0x65, 0x3b, 0xea, 0x34, + 0x2d, 0xdf, 0xae, 0xcd, 0x28, 0x63, 0x78, 0xff, 0x85, 0x04, 0x24, 0xb9, 0xd1, 0x3e, 0xc6, 0x03, + 0x70, 0xdc, 0xf1, 0x7b, 0xcb, 0x3a, 0xc2, 0xcb, 0xc5, 0xb4, 0x1a, 0x24, 0xe2, 0x07, 0x08, 0x32, + 0x4d, 0xcb, 0xba, 0xfa, 0x46, 0x80, 0x17, 0xaf, 0x58, 0x54, 0x6c, 0xec, 0x35, 0x36, 0x98, 0x3d, + 0xa3, 0x73, 0xf8, 0x40, 0x34, 0x20, 0x29, 0x9e, 0x03, 0x51, 0x98, 0x9f, 0x7b, 0xd8, 0x48, 0xc6, + 0x62, 0x3b, 0x97, 0x74, 0x07, 0x87, 0x2d, 0xda, 0xe1, 0x2a, 0x0b, 0xb0, 0x08, 0x5e, 0x51, 0xed, + 0xb6, 0x68, 0xac, 0x50, 0xae, 0xa8, 0xdf, 0x28, 0x7e, 0x20, 0x44, 0xf2, 0x3b, 0xae, 0xaa, 0xaf, + 0xd7, 0x42, 0x70, 0x69, 0x75, 0x0a, 0x72, 0x1f, 0x61, 0x77, 0x13, 0x2f, 0x17, 0x42, 0x13, 0x49, + 0xa1, 0x24, 0x28, 0xdc, 0x0f, 0x32, 0x2b, 0x42, 0x48, 0xb3, 0x3f, 0xe1, 0xa2, 0x70, 0x2f, 0x4e, + 0xd6, 0x42, 0xe2, 0x4f, 0x12, 0xe2, 0x07, 0x28, 0xee, 0x64, 0x42, 0x0a, 0x81, 0x01, 0x17, 0x64, + 0x08, 0x9e, 0x26, 0x01, 0xb2, 0x07, 0x6b, 0x43, 0xfa, 0xea, 0xb0, 0xc4, 0xd0, 0x6a, 0x52, 0xaa, + 0x2d, 0x63, 0x6f, 0xaf, 0xf0, 0xfd, 0xec, 0x4b, 0x7e, 0x71, 0x06, 0x38, 0xdf, 0x18, 0x37, 0xe5, + 0x23, 0x61, 0xed, 0xbe, 0xf7, 0x8e, 0x71, 0x54, 0xfd, 0x6d, 0xaf, 0x95, 0x68, 0xe2, 0x56, 0x82, + 0x80, 0x78, 0x83, 0xc8, 0xa8, 0xc2, 0xf6, 0x6f, 0x83, 0x6f, 0x15, 0xf0, 0x66, 0x81, 0xaa, 0x29, + 0x26, 0x46, 0x41, 0x58, 0x51, 0xa6, 0xf6, 0x15, 0x29, 0xe8, 0x31, 0x75, 0x94, 0xbe, 0x52, 0x52, + 0x7b, 0xe1, 0xea, 0x2a, 0x8d, 0xa7, 0x39, 0x6f, 0x20, 0x3d, 0xcc, 0x7d, 0x4e, 0xbe, 0xec, 0x27, + 0x50, 0x02, 0xac, 0x76, 0x62, 0x26, 0x42, 0x55, 0x3e, 0xa4, 0x23, 0xc4, 0xff, 0x97, 0x5a, 0x73, + 0x24, 0xd4, 0x47, 0x28, 0xda, 0x4f, 0x21, 0x66, 0xae, 0x4e, 0x2e, 0x6a, 0x4a, 0x26, 0xcf, 0x24, + 0x37, 0x81, 0x53, 0x27, 0x02, 0xef, 0xbe, 0xb3, 0x6f, 0xa6, 0xe3, 0x6b, 0xce, 0xce, 0x7f, 0x8d, + 0xfe, 0xe9, 0x2d, 0xd9, 0x61, 0xfa, 0x0b, 0x50, 0xf4, 0x92, 0xf6, 0x58, 0x88, 0xba, 0xc1, 0xb3, + 0x66, 0xfc, 0x4c, 0x83, 0x8e, 0x77, 0x79, 0x9a, 0xc9, 0x66, 0xb4, 0x2e, 0xee, 0xde, 0x09, 0xa7, + 0xc8, 0xbc, 0xd9, 0x5d, 0xb9, 0xee, 0x26, 0xc0, 0xfe, 0xb9, 0x96, 0xc3, 0xa0, 0x55, 0x5f, 0x92, + 0x5d, 0x35, 0x27, 0xed, 0x21, 0x0e, 0xad, 0x1a, 0xdd, 0xfe, 0x48, 0x4b, 0xe4, 0xa4, 0x82, 0xc2, + 0x1d, 0x2b, 0x1d, 0xa0, 0x99, 0x4d, 0x29, 0x18, 0x62, 0x2c, 0x47, 0x72, 0x82, 0xec, 0x2a, 0x3c, + 0xd0, 0x46, 0x61, 0x26, 0x21, 0x67, 0x22, 0x16, 0x79, 0x10, 0x91, 0x6f, 0x4d, 0x63, 0x06, 0x7a, + 0x63, 0xe5, 0x86, 0x39, 0x1c, 0x5c, 0xb4, 0xbe, 0xb7, 0x1b, 0xf5, 0xf7, 0x82, 0xc4, 0x28, 0xe5, + 0x09, 0xa8, 0x80, 0x86, 0xc1, 0x91, 0x5f, 0xcc, 0x77, 0xac, 0xba, 0x7b, 0x43, 0x7e, 0x98, 0x90, + 0xe3, 0x68, 0xc2, 0x82, 0x0c, 0x50, 0xce, 0x9b, 0x75, 0xa5, 0xc5, 0x14, 0xa1, 0xe6, 0x4a, 0x3c, + 0xda, 0x76, 0xd6, 0x9a, 0x90, 0x58, 0x77, 0x45, 0x6b, 0x3b, 0xdf, 0x21, 0x20, 0x4f, 0x8f, 0x01, + 0x43, 0x6e, 0xab, 0x8d, 0x23, 0xb6, 0x82, 0xbd, 0xb7, 0xd5, 0xb7, 0x3a, 0x78, 0x24, 0x74, 0x30, + 0xa3, 0x04, 0xaa, 0x98, 0x2f, 0xf3, 0x7a, 0xe2, 0xed, 0x79, 0x6e, 0x11, 0x97, 0x1d, 0xac, 0x83, + 0xc9, 0x07, 0x4a, 0x30, 0x9c, 0x7b, 0xb8, 0xd1, 0x12, 0x90, 0x4c, 0xba, 0xf1, 0x24, 0xaf, 0x85, + 0xb2, 0x84, 0x8c, 0x61, 0xb1, 0xc6, 0xda, 0xd3, 0xcd, 0x95, 0x42, 0x77, 0x99, 0x81, 0x2d, 0xe9, + 0x8e, 0xbe, 0xbf, 0xe9, 0x88, 0xdc, 0x94, 0x45, 0xd3, 0x4a, 0x5c, 0x6b, 0xdf, 0xe3, 0x37, 0x79, + 0x0f, 0x8e, 0xf2, 0x91, 0x06, 0xba, 0x72, 0xe8, 0x0f, 0xc9, 0x39, 0x12, 0xec, 0x79, 0x65, 0x3f, + 0x1e, 0xdf, 0x4f, 0xcf, 0x3b, 0x1e, 0xbe, 0x7f, 0xcf, 0x07, 0xd4, 0x4d, 0x29, 0x9b, 0x7e, 0xbd, + 0x24, 0x14, 0x34, 0x79, 0x3f, 0x2e, 0xd6, 0x2c, 0x5d, 0xb3, 0xab, 0x1b, 0x11, 0x32, 0x59, 0xd5, + 0x30, 0x90, 0x4f, 0x79, 0xde, 0x02, 0x18, 0xb0, 0x56, 0xf6, 0x92, 0x31, 0x18, 0x67, 0x8c, 0x2c, + 0x6e, 0xf1, 0x83, 0xa0, 0xdb, 0x51, 0xbf, 0x89, 0x50, 0x79, 0x4a, 0x9d, 0xc9, 0x44, 0xef, 0xa3, + 0x21, 0x3e, 0xce, 0xd9, 0xc0, 0x4f, 0x4e, 0x7f, 0x5d, 0x12, 0x05, 0xfb, 0xc4, 0x93, 0xaa, 0x46, + 0xff, 0xf7, 0x10, 0x15, 0xd7, 0x7c, 0x8a, 0xaa, 0x4d, 0x41, 0x14, 0xcf, 0x33, 0x89, 0x5d, 0x33, + 0x28, 0xa9, 0x95, 0x78, 0xe9, 0x5f, 0x01, 0x6c, 0xeb, 0x98, 0x73, 0x68, 0x1e, 0xdb, 0xec, 0x7a, + 0xbf, 0x4f, 0x1a, 0x4f, 0xe3, 0x44, 0x8d, 0xba, 0x25, 0xac, 0x85, 0x5a, 0xd3, 0x5d, 0xf0, 0xae, + 0x4f, 0xa8, 0x9a, 0x90, 0xca, 0xa8, 0xe0, 0xb0, 0x3d, 0x9c, 0xa8, 0x80, 0xf0, 0x27, 0x0e, 0x7a, + 0xb2, 0x00, 0x30, 0x13, 0xad, 0xa1, 0xfe, 0xbf, 0xcf, 0x07, 0x31, 0x39, 0xbb, 0xa7, 0x6e, 0xed, + 0x33, 0xea, 0xa9, 0xeb, 0xae, 0x95, 0xc1, 0x74, 0xf0, 0x42, 0x32, 0xd5, 0x01, 0xa7, 0x80, 0x67, + 0xe5, 0x4b, 0x1a, 0x05, 0xf7, 0x5b, 0x59, 0x43, 0x01, 0x57, 0x98, 0xbb, 0xc8, 0xcc, 0x26, 0x6e, + 0xa9, 0x3f, 0xc1, 0x09, 0xb3, 0xcf, 0x88, 0x6d, 0x0d, 0x3e, 0x53, 0xdf, 0x10, 0xb8, 0x3d, 0x0c, + 0xca, 0xa6, 0x98, 0x61, 0xfe, 0x6e, 0xee, 0xdc, 0x11, 0x05, 0x42, 0xb0, 0xb3, 0x3c, 0x37, 0x0a, + 0x16, 0xa5, 0x3d, 0x80, 0xcc, 0x77, 0x19, 0x44, 0x09, 0x49, 0xf7, 0x91, 0x51, 0x3d, 0xb0, 0x04, + 0x58, 0xff, 0xaf, 0x46, 0x3f, 0xf4, 0xc4, 0xd7, 0x65, 0x59, 0xb8, 0x20, 0x0f, 0x36, 0xb5, 0x72, + 0x31, 0x7c, 0x66, 0x06, 0xa6, 0xef, 0x67, 0xd6, 0x66, 0xd4, 0xc7, 0xe1, 0xf2, 0xcc, 0x08, 0x83, + 0x81, 0x4b, 0x8d, 0x3d, 0x2a, 0xc7, 0x05, 0x75, 0x89, 0x35, 0x46, 0x38, 0x22, 0x9e, 0xe7, 0x3b, + 0xf3, 0x19, 0xc9, 0x63, 0x0c, 0x06, 0x1d, 0x5f, 0xf7, 0x96, 0xb1, 0xad, 0xf5, 0x49, 0x07, 0xb9, + 0x14, 0x3e, 0xee, 0x60, 0xeb, 0xdb, 0xd0, 0xf1, 0x56, 0x86, 0x1a, 0x53, 0x7e, 0x9a, 0xdf, 0x77, + 0xed, 0x61, 0x1a, 0x09, 0xcd, 0xe6, 0xd3, 0x93, 0x66, 0xdc, 0x0a, 0xfa, 0x72, 0x88, 0xf2, 0x9b, + 0x7b, 0x8e, 0x8b, 0xe1, 0xf2, 0xbc, 0x2d, 0x31, 0x6f, 0x4c, 0x5c, 0xe6, 0x89, 0x95, 0xc9, 0x17, + 0xad, 0x6b, 0x60, 0xd2, 0x56, 0x63, 0xa3, 0x7b, 0x17, 0x80, 0x9d, 0x74, 0x03, 0x07, 0x50, 0xe5, + 0xf4, 0x31, 0x5f, 0x09, 0x7a, 0x46, 0x21, 0xf9, 0x94, 0xb3, 0x2a, 0xa4, 0x8d, 0x8d, 0x9b, 0xa6, + 0x80, 0x9e, 0x38, 0x35, 0xef, 0x6a, 0xe7, 0x97, 0x38, 0x2d, 0x5f, 0x53, 0xbb, 0xfc, 0x09, 0x86, + 0xf6, 0xb7, 0xa5, 0x10, 0x21, 0x73, 0x85, 0xb4, 0xc6, 0x2f, 0x08, 0x29, 0xc5, 0x66, 0x2b, 0xec, + 0xc9, 0xee, 0x9d, 0x9f, 0xcc, 0x7c, 0xc8, 0xc6, 0xc1, 0x0d, 0x4b, 0xbe, 0x51, 0x4e, 0x97, 0x9e, + 0x13, 0x03, 0x73, 0xd6, 0x65, 0xd4, 0xe8, 0xeb, 0x4f, 0x11, 0xfb, 0xeb, 0x6f, 0xe4, 0x60, 0xd4, + 0x85, 0xfe, 0x41, 0x29, 0x61, 0xd5, 0xe3, 0x6b, 0x9a, 0xf5, 0x97, 0xd7, 0x9c, 0x5f, 0xe2, 0x59, + 0x0c, 0x53, 0x39, 0xf2, 0xc4, 0x9b, 0x67, 0xac, 0xc0, 0x0b, 0x4a, 0xef, 0xea, 0x0b, 0xc1, 0xef, + 0x73, 0x10, 0x42, 0x62, 0xba, 0xed, 0x08, 0x30, 0x9c, 0x4b, 0x1a, 0xbc, 0x4b, 0x9f, 0x2e, 0x2f, + 0x58, 0x25, 0x53, 0xaf, 0x79, 0x11, 0x7b, 0x59, 0x76, 0x16, 0xac, 0x87, 0x66, 0x1d, 0x94, 0x4c, + 0x6c, 0x1f, 0xd0, 0xc6, 0x82, 0x91, 0x18, 0x7c, 0x8c, 0xc1, 0x67, 0x96, 0xbe, 0x71, 0x0d, 0x64, + 0x6e, 0x92, 0xd5, 0xdd, 0xa2, 0xea, 0x0f, 0x61, 0x71, 0x70, 0x77, 0xe6, 0xbe, 0x20, 0xf6, 0xf3, + 0x8f, 0xde, 0x3d, 0x57, 0xff, 0x03, 0xd2, 0x13, 0x22, 0xfd, 0xfa, 0x35, 0x77, 0x1b, 0x09, 0x0b, + 0x2a, 0xb8, 0xf4, 0x8d, 0x3c, 0x14, 0x4d, 0xaf, 0x8d, 0x97, 0x25, 0xb7, 0x06, 0x2c, 0xb4, 0x27, + 0x78, 0xbb, 0xbf, 0x90, 0x44, 0xbe, 0xcc, 0x58, 0xc9, 0x5c, 0x63, 0x2a, 0x15, 0x22, 0x97, 0x89, + 0x86, 0x71, 0x05, 0xe8, 0xa3, 0xe7, 0x54, 0x46, 0xfd, 0xe2, 0xa9, 0x44, 0x86, 0xf7, 0x59, 0x1a, + 0x9c, 0x80, 0x46, 0x6b, 0x42, 0x56, 0x7d, 0x5c, 0xbd, 0xdb, 0x0a, 0xa6, 0x52, 0x61, 0x17, 0xd6, + 0x8b, 0x81, 0x96, 0x9d, 0xf0, 0x39, 0xe3, 0x6c, 0x3e, 0xf6, 0x68, 0x0b, 0xb6, 0x17, 0x50, 0x5b, + 0xaa, 0xf2, 0xd4, 0xec, 0xf0, 0x53, 0x94, 0xae, 0x96, 0xb2, 0x41, 0x81, 0xfb, 0x9e, 0xa9, 0xf8, + 0x18, 0xc7, 0x82, 0x52, 0xf4, 0x58, 0x23, 0x09, 0xc4, 0x15, 0x6c, 0xcc, 0x1f, 0xad, 0xb5, 0x94, + 0xa5, 0x0a, 0xce, 0xc8, 0x39, 0x43, 0x4f, 0xa3, 0xe2, 0x2c, 0x82, 0x5b, 0xe2, 0x53, 0xd9, 0x7a, + 0x63, 0x31, 0x67, 0x85, 0x0b, 0xe2, 0x05, 0x35, 0x91, 0xc9, 0x71, 0xe8, 0xf2, 0xf2, 0x5a, 0xc2, + 0xea, 0x0e, 0x10, 0x82, 0xde, 0xc8, 0x0a, 0xd9, 0x32, 0x58, 0xd2, 0xd9, 0x94, 0x0f, 0x4b, 0x6d, + 0xb3, 0xc7, 0xd6, 0x96, 0x2d, 0x46, 0x87, 0xe8, 0xbc, 0x04, 0xf0, 0xdc, 0xd2, 0x7b, 0xaa, 0x87, + 0xf7, 0x80, 0xfb, 0x91, 0xc5, 0x8b, 0x79, 0xc4, 0x23, 0x56, 0xe0, 0x45, 0x42, 0xe4, 0x82, 0x77, + 0xd0, 0x47, 0x00, 0xff, 0xb0, 0x57, 0xde, 0xb2, 0x45, 0x40, 0x91, 0xf2, 0x5b, 0x6d, 0x2d, 0xf9, + 0x54, 0x8e, 0xe0, 0xaa, 0xbf, 0x4f, 0x7f, 0x36, 0x55, 0x67, 0x9d, 0xa3, 0xb7, 0x43, 0x16, 0x8c, + 0xe3, 0xa0, 0xc7, 0xef, 0x97, 0x76, 0x11, 0x2f, 0xeb, 0x69, 0x90, 0xb8, 0x92, 0xe0, 0xba, 0xca, + 0xc6, 0xb9, 0x12, 0x0f, 0xae, 0x64, 0xbc, 0xe4, 0xf8, 0x91, 0x25, 0x27, 0x6c, 0xe2, 0x0e, 0xd8, + 0x95, 0xad, 0x0c, 0xaf, 0xd0, 0xb8, 0x4e, 0xba, 0x31, 0xb5, 0x25, 0x65, 0x68, 0x91, 0xfc, 0x39, + 0x7f, 0xc6, 0x4b, 0x6b, 0x9f, 0x20, 0xc9, 0x5f, 0x4d, 0x63, 0xfe, 0xdf, 0x72, 0xe6, 0x53, 0x6d, + 0xcd, 0xf1, 0xd4, 0x6c, 0x51, 0xdf, 0x93, 0x78, 0x8b, 0x17, 0x0a, 0xe8, 0x06, 0xf3, 0x32, 0x89, + 0x16, 0x22, 0x4b, 0x43, 0xe2, 0xb8, 0x11, 0x85, 0x61, 0x5d, 0x22, 0xda, 0x3f, 0xda, 0x7c, 0xdf, + 0x5d, 0x15, 0x64, 0x36, 0x47, 0xe6, 0xfa, 0x6a, 0x4d, 0xf2, 0x33, 0x34, 0x64, 0xff, 0x7f, 0xa8, + 0x49, 0x31, 0x99, 0xbe, 0x7b, 0x94, 0x98, 0x50, 0x0b, 0x8c, 0xc2, 0x17, 0xf5, 0x88, 0x10, 0x84, + 0xbb, 0xd5, 0x29, 0x4f, 0xcc, 0x2d, 0x59, 0x58, 0x3c, 0x84, 0xf2, 0x2c, 0xe5, 0xc8, 0x20, 0x79, + 0x28, 0x2f, 0x18, 0x3d, 0x60, 0x23, 0x7c, 0xaf, 0x3f, 0x68, 0xfe, 0x33, 0xd3, 0xa7, 0xec, 0x1b, + 0x00, 0x17, 0xf4, 0x0e, 0x82, 0x38, 0xac, 0xe9, 0x61, 0x45, 0x39, 0x37, 0x9f, 0x6d, 0x79, 0x6a, + 0xda, 0xb1, 0x3e, 0x18, 0x57, 0x77, 0x5f, 0x52, 0x65, 0x0c, 0x7c, 0x81, 0x7c, 0x9e, 0xf8, 0xb2, + 0x27, 0x5e, 0x60, 0x15, 0xa0, 0x8d, 0xa7, 0x52, 0xe4, 0xec, 0xc9, 0xea, 0x4a, 0xc5, 0x76, 0x5f, + 0x8b, 0xee, 0x7b, 0xdc, 0xa7, 0xd1, 0x56, 0x18, 0xff, 0xcc, 0x83, 0xa6, 0xfb, 0x66, 0x47, 0x05, + 0xfa, 0x89, 0xac, 0x98, 0xe7, 0x1e, 0x2b, 0xcf, 0x36, 0x7d, 0x20, 0x48, 0x26, 0x24, 0xe6, 0x1b, + 0xf9, 0xf7, 0x48, 0x34, 0x8f, 0xf4, 0xb8, 0xfb, 0xca, 0x83, 0x91, 0xb5, 0x76, 0x66, 0xaa, 0x0d, + 0x41, 0x65, 0xde, 0x9c, 0x4e, 0xda, 0x17, 0xea, 0xe3, 0x49, 0x34, 0x26, 0x71, 0x3e, 0xd9, 0xaa, + 0x72, 0xa3, 0xbc, 0x17, 0xae, 0x6e, 0xec, 0xf6, 0xe0, 0xd9, 0x2e, 0xd8, 0x64, 0x38, 0x7b, 0x88, + 0x89, 0xfe, 0x89, 0x18, 0xe6, 0x2a, 0x47, 0xb7, 0xc8, 0x0e, 0x97, 0x71, 0x93, 0xe7, 0x21, 0x1f, + 0x71, 0xd1, 0xa6, 0x1b, 0x60, 0xf4, 0x0e, 0xfc, 0xc4, 0xe2, 0xdd, 0xb2, 0xbb, 0x42, 0x51, 0x6c, + 0xe7, 0xaa, 0x65, 0xd3, 0x30, 0x3c, 0xd6, 0xce, 0xae, 0xdd, 0x6a, 0xf8, 0xe5, 0x76, 0x98, 0x3f, + 0x2a, 0x4d, 0xca, 0x45, 0x82, 0x59, 0x61, 0x06, 0x74, 0xb9, 0x66, 0x5d, 0x72, 0xd9, 0xf3, 0xf7, + 0x61, 0xfd, 0x88, 0x4f, 0x43, 0x4c, 0x93, 0x07, 0x4d, 0x52, 0x6b, 0x1c, 0x7c, 0xbd, 0xa3, 0x4a, + 0x05, 0xfe, 0x46, 0xf1, 0x24, 0x11, 0x1c, 0x5c, 0x75, 0x94, 0x24, 0x13, 0x4b, 0xb8, 0x09, 0x39, + 0x28, 0x52, 0xa1, 0x52, 0xdc, 0x1b, 0xf9, 0x8c, 0x60, 0x4d, 0xc3, 0xd4, 0xf1, 0xe1, 0x08, 0xd1, + 0xd3, 0x64, 0x06, 0x18, 0xeb, 0xb4, 0xfb, 0xea, 0xb2, 0x2d, 0x9d, 0xfc, 0xf4, 0xc3, 0x1f, 0x94, + 0xc9, 0x0a, 0x01, 0x62, 0xe8, 0x76, 0xa8, 0x80, 0x7c, 0x62, 0xce, 0x74, 0x6c, 0xe4, 0x90, 0x6d, + 0xa0, 0x86, 0x84, 0x44, 0xb7, 0x1f, 0x7a, 0x92, 0x2c, 0x54, 0x13, 0x74, 0x37, 0xfa, 0x98, 0xfe, + 0x85, 0x9e, 0x44, 0x96, 0x89, 0xac, 0xa1, 0x08, 0xe4, 0x95, 0x11, 0xe7, 0x3d, 0xc1, 0x4c, 0x26, + 0x26, 0xea, 0x84, 0x6b, 0x0b, 0xf0, 0x21, 0x39, 0xf3, 0xe2, 0xc3, 0xb0, 0x76, 0x73, 0x17, 0x6f, + 0x36, 0x9b, 0x40, 0x37, 0x7b, 0xc9, 0x01, 0xf5, 0xdc, 0x65, 0x45, 0xd9, 0x3b, 0x35, 0x37, 0xcb, + 0x35, 0xc4, 0x69, 0xaa, 0x50, 0xe5, 0x45, 0x65, 0x03, 0x9e, 0xd2, 0x40, 0xa1, 0x4c, 0x87, 0x6c, + 0x35, 0x21, 0x5c, 0x9f, 0x3c, 0x53, 0x3e, 0x6c, 0x12, 0xc1, 0x5d, 0x65, 0xbf, 0xcd, 0xdb, 0x96, + 0xf3, 0x2f, 0x85, 0xe1, 0x7f, 0x4c, 0x01, 0x9d, 0xe4, 0x71, 0x3a, 0x4d, 0xe3, 0x44, 0xf6, 0x4c, + 0xfe, 0x4b, 0xc6, 0x39, 0x78, 0x31, 0x44, 0xa7, 0x1f, 0x8d, 0x9c, 0x11, 0xfa, 0x47, 0x56, 0xae, + 0x19, 0xfe, 0x6c, 0x0b, 0xcc, 0x72, 0x5f, 0x9c, 0x13, 0xc0, 0x48, 0x9b, 0x6b, 0x97, 0xcf, 0xf8, + 0xb4, 0xf9, 0xd9, 0xfd, 0xee, 0xd1, 0x98, 0x71, 0x26, 0x45, 0x44, 0x85, 0x01, 0xe9, 0x49, 0x9c, + 0xa0, 0x3d, 0x94, 0x3b, 0xae, 0xcd, 0xdb, 0x3a, 0x5f, 0x4f, 0xa7, 0xdd, 0xa1, 0x17, 0x51, 0x75, + 0xd3, 0xe6, 0xee, 0x09, 0x05, 0x1c, 0x89, 0x53, 0x1d, 0xd2, 0xf1, 0xbf, 0xdf, 0x6b, 0xd2, 0x8a, + 0x7c, 0x6e, 0xf6, 0x94, 0xce, 0xfb, 0xf6, 0x5c, 0xe5, 0x25, 0x9d, 0xd3, 0x35, 0xee, 0xf2, 0x9f, + 0x61, 0xf0, 0xe9, 0xf3, 0x9a, 0x46, 0xb8, 0x3a, 0x94, 0x40, 0xb9, 0x8f, 0x95, 0x8b, 0x23, 0xcc, + 0x3b, 0x5e, 0x3b, 0x90, 0xcd, 0x4a, 0xa6, 0x03, 0x20, 0x10, 0x96, 0x93, 0xd1, 0xb0, 0x86, 0x58, + 0x92, 0xb1, 0x26, 0xdc, 0x6c, 0x0a, 0x4f, 0x35, 0x4b, 0x07, 0x91, 0xee, 0xc8, 0x80, 0x3c, 0xf9, + 0xc5, 0xf4, 0x86, 0xef, 0x0a, 0x5b, 0xe9, 0x3b, 0x7b, 0xd8, 0xbf, 0x4b, 0x7e, 0x11, 0x47, 0x84, + 0x84, 0x4d, 0x75, 0xff, 0x64, 0xe5, 0xcc, 0x61, 0x73, 0x9d, 0xc3, 0x3c, 0x9c, 0xd4, 0x98, 0xe4, + 0x7a, 0xd0, 0xc1, 0x6c, 0x30, 0x56, 0x1c, 0xa4, 0xa0, 0x6c, 0xab, 0x18, 0x8e, 0x5e, 0xde, 0x3e, + 0xb1, 0x44, 0xbc, 0xbb, 0xc7, 0x33, 0xfd, 0x99, 0x4c, 0xac, 0x7d, 0xcd, 0x9b, 0x90, 0x6e, 0x54, + 0x3a, 0x61, 0xa3, 0x2d, 0xb6, 0xc8, 0x2b, 0x2e, 0x8f, 0x34, 0xab, 0xa9, 0x8b, 0xf8, 0xf3, 0x1d, + 0xda, 0x2f, 0x85, 0x57, 0x50, 0x77, 0x3a, 0x07, 0xec, 0x74, 0x2a, 0xbe, 0xdb, 0x68, 0x98, 0x46, + 0x08, 0xc4, 0x38, 0x83, 0x64, 0x35, 0x9e, 0xea, 0x55, 0xc6, 0x20, 0x56, 0x89, 0xe6, 0xbe, 0x8d, + 0x0d, 0xe3, 0xd4, 0xbd, 0x5b, 0x04, 0x96, 0x5d, 0x53, 0xa6, 0x62, 0x6b, 0x7b, 0x7e, 0x60, 0x60, + 0x8a, 0xe2, 0x09, 0x58, 0x54, 0x26, 0x27, 0x2d, 0x0e, 0x14, 0x6d, 0x90, 0x60, 0xa5, 0x53, 0xf1, + 0x56, 0x78, 0x60, 0x0d, 0xd4, 0x0f, 0x27, 0x20, 0x6f, 0x77, 0xea, 0x15, 0xee, 0xde, 0x76, 0xea, + 0x62, 0x9e, 0x45, 0xa6, 0x99, 0xaa, 0x76, 0x39, 0x50, 0x79, 0xb9, 0x34, 0x71, 0x64, 0x05, 0xe8, + 0xdb, 0x42, 0x90, 0x54, 0xa2, 0x4c, 0x0f, 0x09, 0xcd, 0xed, 0x4b, 0xdb, 0xea, 0x7f, 0xac, 0x7a, + 0x93, 0x29, 0x48, 0x77, 0xca, 0xe5, 0x58, 0x05, 0x66, 0x24, 0x06, 0x8b, 0x69, 0x98, 0x1f, 0x3a, + 0xe3, 0xe8, 0x26, 0xe4, 0x70, 0xb0, 0x0d, 0x67, 0x99, 0x0a, 0xf7, 0xaf, 0x81, 0x0c, 0xf7, 0x1f, + 0xc9, 0xce, 0x3a, 0xa5, 0x91, 0x7b, 0x44, 0xc2, 0x7e, 0xdd, 0xf6, 0xff, 0xfd, 0xcc, 0x6f, 0xe2, + 0x6f, 0x9e, 0xcd, 0xfe, 0x83, 0xf3, 0x4e, 0x1e, 0x0d, 0xf6, 0xc9, 0x6a, 0x63, 0x72, 0x7a, 0x11, + 0x94, 0x0b, 0x9a, 0x52, 0x93, 0x50, 0xa7, 0xcd, 0xa6, 0x3f, 0x2f, 0x88, 0x5e, 0xdb, 0xc7, 0xe6, + 0x8d, 0x30, 0x18, 0x2c, 0x52, 0xce, 0x64, 0x39, 0xe5, 0xa9, 0x4a, 0x2f, 0x54, 0xc3, 0x05, 0x99, + 0x2f, 0xb4, 0x0f, 0x6c, 0x32, 0x8d, 0xd1, 0x70, 0x9f, 0x83, 0xb8, 0xba, 0x32, 0x24, 0xcc, 0x3e, + 0x60, 0xf6, 0xac, 0xec, 0xe6, 0x22, 0x20, 0xcd, 0x65, 0x36, 0x50, 0x54, 0x39, 0x25, 0x6c, 0x3c, + 0x19, 0xf0, 0x42, 0x98, 0xff, 0x0b, 0x34, 0x4d, 0x11, 0x39, 0x8f, 0xeb, 0x8c, 0xc3, 0x97, 0xba, + 0x68, 0xd4, 0x33, 0xa9, 0xfe, 0x63, 0x77, 0x03, 0xe1, 0x29, 0x25, 0x3f, 0x95, 0xe4, 0x9a, 0x87, + 0x98, 0xae, 0xae, 0xc6, 0xf9, 0x06, 0x9b, 0x84, 0x9c, 0x65, 0x8d, 0x98, 0x04, 0xc5, 0xe2, 0x53, + 0x03, 0xc7, 0x8e, 0x56, 0x7d, 0xbb, 0xbe, 0xbc, 0xd4, 0x50, 0x29, 0xb6, 0x69, 0xd8, 0x6e, 0x0f, + 0x8a, 0xe8, 0xb9, 0x6a, 0xdc, 0x24, 0x79, 0x50, 0xc0, 0xeb, 0x77, 0x71, 0x85, 0x20, 0x95, 0x23, + 0x2e, 0xc1, 0xb5, 0x71, 0x63, 0xd3, 0xfa, 0x8a, 0x60, 0x07, 0xd2, 0x20, 0x02, 0xd6, 0x2a, 0x2f, + 0xa5, 0x64, 0xf7, 0x6f, 0x1c, 0x07, 0xd3, 0xea, 0xf6, 0x17, 0xae, 0xfc, 0x78, 0x87, 0xef, 0xaf, + 0xd3, 0x71, 0xcb, 0x10, 0x13, 0x6f, 0xad, 0xbf, 0xe8, 0x02, 0x5c, 0x82, 0x19, 0xbf, 0x56, 0xe3, + 0xf2, 0x96, 0x1f, 0x35, 0x66, 0xee, 0x36, 0x9d, 0x07, 0xd3, 0x5d, 0x81, 0xc6, 0xc4, 0x63, 0x92, + 0x7a, 0x0d, 0xbc, 0xe6, 0x42, 0x37, 0x8f, 0x06, 0x87, 0xc4, 0xb1, 0x7f, 0x3e, 0x40, 0xc7, 0x5c, + 0x04, 0x90, 0x1d, 0x43, 0xcd, 0x5b, 0xf0, 0xa0, 0x14, 0x12, 0x74, 0x2d, 0xee, 0xdd, 0xf7, 0xd4, + 0xbb, 0xd3, 0x90, 0x52, 0x9b, 0x05, 0x83, 0x11, 0x8a, 0xb8, 0x57, 0x0e, 0xcc, 0xa4, 0x4d, 0x0a, + 0xc5, 0xb8, 0x36, 0xef, 0x81, 0x00, 0xe5, 0xbc, 0x82, 0x61, 0x39, 0xd8, 0x35, 0x7a, 0x9d, 0x3c, + 0x9f, 0xeb, 0xc5, 0x18, 0xb6, 0x9f, 0xb3, 0x7c, 0x46, 0x8b, 0x58, 0x7a, 0xc4, 0x80, 0x4f, 0xf0, + 0x03, 0xe9, 0x73, 0x25, 0xf4, 0x08, 0x42, 0xc5, 0x91, 0xa8, 0x78, 0x57, 0x80, 0x2c, 0x1c, 0x9f, + 0x11, 0xf7, 0xda, 0x5e, 0x77, 0x84, 0xbe, 0x8c, 0x95, 0x2e, 0x47, 0x79, 0xe3, 0x2f, 0x6c, 0x8a, + 0x7d, 0x59, 0x0b, 0xf9, 0xa3, 0x38, 0x1b, 0xfa, 0x42, 0xec, 0xf2, 0x55, 0xd3, 0xd4, 0x41, 0xb0, + 0x43, 0x79, 0x7b, 0x02, 0x53, 0xab, 0xcb, 0x18, 0x18, 0x2d, 0x29, 0x92, 0xec, 0xb8, 0x82, 0x63, + 0x04, 0xc1, 0x64, 0xd8, 0xf6, 0x75, 0xf0, 0x3c, 0x41, 0xc4, 0x57, 0x47, 0x56, 0xbe, 0xdf, 0x5a, + 0x6b, 0x02, 0x5b, 0xc0, 0x1b, 0x70, 0xe1, 0xc6, 0xe8, 0x24, 0xaf, 0xf1, 0x41, 0x60, 0x1d, 0x64, + 0xde, 0xf9, 0x1c, 0x75, 0x18, 0x01, 0x03, 0x8f, 0x44, 0xfa, 0xc6, 0xc4, 0x20, 0x8a, 0xae, 0x71, + 0xe7, 0xca, 0xea, 0xe8, 0x15, 0xf3, 0x14, 0x81, 0x6f, 0xe6, 0x7e, 0x31, 0x5b, 0xad, 0x98, 0x65, + 0x2c, 0xa9, 0x99, 0x42, 0x08, 0x99, 0xca, 0x40, 0xe2, 0x68, 0xcc, 0x0e, 0x37, 0x34, 0x58, 0xc7, + 0xa3, 0x2c, 0x44, 0x30, 0x03, 0xe2, 0x18, 0xf7, 0x7b, 0x17, 0x4c, 0xdd, 0x63, 0xcf, 0x30, 0xf8, + 0x7c, 0x5a, 0xdc, 0xd4, 0x02, 0x62, 0xe4, 0xb1, 0xa3, 0xda, 0x8e, 0x08, 0xad, 0xa9, 0xe2, 0x51, + 0xd1, 0xbb, 0xdf, 0xc8, 0x11, 0x69, 0xea, 0xce, 0xf1, 0xea, 0x3d, 0xe5, 0x78, 0x77, 0x56, 0x7c, + 0x9e, 0x3b, 0xf3, 0x20, 0x23, 0x1f, 0x3e, 0xe6, 0x28, 0x11, 0x3b, 0x23, 0x49, 0xb4, 0x6c, 0xb0, + 0x2f, 0x5f, 0x41, 0x9f, 0xc6, 0xe4, 0x28, 0x02, 0x1f, 0xe9, 0x5a, 0x7e, 0x1f, 0x4d, 0x61, 0xb2, + 0x3f, 0xb3, 0x65, 0x6a, 0x1d, 0x75, 0x32, 0x79, 0x46, 0x55, 0xd6, 0xc2, 0xd5, 0x94, 0x65, 0xa8, + 0x2d, 0xa5, 0x90, 0xd1, 0x50, 0x41, 0x6d, 0x56, 0xb4, 0x76, 0xa1, 0x87, 0x86, 0x4c, 0x4d, 0x5f, + 0xea, 0xdd, 0xe9, 0x8b, 0xa4, 0x93, 0xad, 0x51, 0x3d, 0xf6, 0xea, 0xe4, 0x0c, 0x2b, 0x49, 0xd4, + 0x9e, 0xae, 0x46, 0x3c, 0xe5, 0xcc, 0x77, 0x30, 0x96, 0xd4, 0x6d, 0x0f, 0xe5, 0x56, 0xf8, 0xf4, + 0x43, 0x6e, 0x45, 0xb7, 0xd6, 0x82, 0x0a, 0x23, 0x1a, 0xba, 0x7c, 0x79, 0xeb, 0x9a, 0xa1, 0x00, + 0xfe, 0x11, 0x6e, 0x4f, 0x7f, 0xc3, 0x81, 0x99, 0x3f, 0xfc, 0x69, 0x3b, 0x66, 0xbc, 0x0c, 0xab, + 0xb9, 0xa7, 0x2c, 0x37, 0xdb, 0xd9, 0x02, 0x3e, 0x47, 0x1d, 0x1c, 0xc7, 0x3b, 0xbd, 0xb5, 0x1e, + 0xa5, 0xc6, 0xe8, 0x4e, 0xee, 0x38, 0xae, 0x6d, 0x40, 0x87, 0x8c, 0x6b, 0x1e, 0xe7, 0x9b, 0xb7, + 0x76, 0xb4, 0x73, 0xb7, 0x4d, 0xae, 0x3f, 0x20, 0xa2, 0x06, 0xd1, 0x7e, 0xe6, 0x0d, 0x8d, 0x83, + 0x43, 0xa1, 0xd7, 0x97, 0xd7, 0xfb, 0xec, 0x57, 0x7e, 0x0f, 0x17, 0x3a, 0x4c, 0x60, 0xe6, 0xea, + 0xc6, 0xfa, 0x75, 0xfd, 0x56, 0xbe, 0x3b, 0xb1, 0x1b, 0x77, 0xe2, 0xf3, 0x7d, 0x46, 0x23, 0x5c, + 0x8c, 0xe3, 0xbc, 0x98, 0x21, 0x21, 0x2a, 0xf6, 0x0e, 0x57, 0xde, 0x80, 0xa0, 0xe8, 0xeb, 0xc5, + 0x5a, 0xe1, 0xb4, 0x4c, 0x83, 0x5e, 0xd5, 0xea, 0x8e, 0x7b, 0xfc, 0x3d, 0x59, 0xf5, 0x46, 0x2a, + 0x4d, 0xdb, 0x64, 0x0a, 0xf0, 0xf2, 0xd7, 0x90, 0xb6, 0xe6, 0x28, 0x29, 0x6d, 0xeb, 0x90, 0xa6, + 0x06, 0xe1, 0xa8, 0x3a, 0x30, 0xc3, 0x2b, 0x1f, 0x51, 0x79, 0xe2, 0x76, 0x67, 0xaa, 0xd5, 0x7c, + 0x20, 0x33, 0x3c, 0x0c, 0x80, 0x2d, 0xb3, 0x83, 0x19, 0x96, 0x95, 0x9d, 0xa4, 0x98, 0xf9, 0xdf, + 0x78, 0x24, 0x95, 0xef, 0x98, 0x74, 0xbc, 0x96, 0xe5, 0x40, 0xa0, 0x7f, 0x19, 0xc8, 0x0c, 0x3a, + 0x3b, 0xd7, 0x71, 0xb1, 0x60, 0x40, 0xee, 0x58, 0xba, 0x9e, 0xad, 0x80, 0x8f, 0x5f, 0x3b, 0x46, + 0x7a, 0x9e, 0x3e, 0x95, 0xb5, 0xc8, 0x9a, 0x38, 0x1d, 0x79, 0xc0, 0x98, 0x13, 0x90, 0x5e, 0x0a, + 0xd6, 0xca, 0xed, 0xec, 0x88, 0x70, 0x43, 0x8d, 0x11, 0xbd, 0x7a, 0x65, 0x6d, 0xf3, 0xfe, 0x0a, + 0xa9, 0x2d, 0x2d, 0x0d, 0x2d, 0x67, 0xd8, 0x79, 0xf2, 0x87, 0x24, 0x5c, 0x4c, 0x06, 0x3e, 0x2b, + 0x2b, 0x0e, 0x50, 0x61, 0x97, 0x35, 0x5c, 0x39, 0x02, 0x15, 0x7a, 0x9c, 0xc2, 0x31, 0x74, 0x05, + 0xbf, 0x65, 0xe1, 0x5b, 0x8f, 0xe3, 0x2c, 0xea, 0xb0, 0xfb, 0x4d, 0xa7, 0xe8, 0x4b, 0xb0, 0x7b, + 0xd6, 0xf6, 0x1d, 0x54, 0x41, 0x8a, 0xe1, 0x45, 0xbb, 0xf3, 0xe0, 0x5a, 0x5e, 0xbd, 0xb8, 0x0b, + 0x48, 0x7f, 0x50, 0xaf, 0x53, 0x68, 0x0d, 0x77, 0x06, 0x16, 0x4d, 0x7d, 0xaa, 0xc9, 0xf4, 0x34, + 0x39, 0xaa, 0x02, 0x75, 0x51, 0x8a, 0x00, 0x58, 0x8b, 0x9e, 0x7f, 0xac, 0x20, 0xf2, 0x08, 0xc3, + 0x33, 0x2a, 0x5e, 0xe8, 0xd3, 0xfc, 0x24, 0xe7, 0x3b, 0x95, 0x60, 0x4e, 0x5b, 0xa0, 0x40, 0xf2, + 0xe6, 0x51, 0xf5, 0xf1, 0x3b, 0xe1, 0x86, 0x2b, 0x3a, 0x73, 0x34, 0xc5, 0xe2, 0xd7, 0x5c, 0xb9, + 0xbb, 0x0d, 0xef, 0xc0, 0xfd, 0xc7, 0x8a, 0x0e, 0x05, 0xf1, 0x6b, 0xec, 0x3d, 0x72, 0xab, 0x8f, + 0x11, 0x9a, 0xd4, 0x8f, 0x20, 0x32, 0x46, 0x54, 0xa4, 0x4c, 0xeb, 0xe6, 0x77, 0x6c, 0xec, 0xd0, + 0x61, 0x5a, 0xb7, 0x81, 0xd6, 0xa1, 0x0d, 0x05, 0x32, 0xec, 0x10, 0x05, 0x19, 0xcd, 0x89, 0xf1, + 0x85, 0xf1, 0x03, 0x6e, 0x25, 0xa0, 0x07, 0x0c, 0x9b, 0x05, 0x4c, 0x30, 0xfe, 0xce, 0x54, 0x36, + 0x63, 0x2d, 0xb3, 0x64, 0xeb, 0x79, 0x9e, 0xe7, 0x14, 0x31, 0x8d, 0x93, 0xa4, 0xcc, 0x23, 0x4e, + 0x8f, 0xd2, 0x06, 0xbd, 0xa9, 0x10, 0xee, 0xe3, 0x5b, 0xf4, 0x08, 0x72, 0x8c, 0xc1, 0xb7, 0xe4, + 0x1f, 0x44, 0xf4, 0xa0, 0xf5, 0x41, 0x5d, 0x7d, 0xa1, 0x60, 0xd1, 0x8e, 0x69, 0xde, 0x7c, 0xe8, + 0xdb, 0x74, 0x0b, 0xf8, 0x54, 0xa8, 0xa8, 0x35, 0x3a, 0x9c, 0x6e, 0x63, 0x76, 0xc5, 0x6d, 0xd7, + 0x6d, 0x2d, 0x6c, 0xa3, 0x9b, 0x68, 0x2d, 0x22, 0xba, 0x66, 0xa9, 0x9b, 0xfd, 0xd6, 0xd3, 0xec, + 0x3a, 0x3c, 0x48, 0x78, 0x33, 0x6e, 0xd0, 0xcd, 0x13, 0x1d, 0x49, 0x02, 0x5d, 0x27, 0xb0, 0x41, + 0xcf, 0x6f, 0xe0, 0x27, 0x16, 0x1e, 0x97, 0xcd, 0xde, 0xf6, 0x9d, 0x86, 0x8f, 0x6c, 0x6b, 0xe0, + 0x9a, 0xde, 0xae, 0x02, 0x0d, 0xa0, 0x34, 0xe0, 0x44, 0x6e, 0x84, 0xb4, 0x6c, 0x3b, 0x2a, 0x88, + 0xed, 0xe7, 0x8a, 0x0c, 0x46, 0x91, 0xc4, 0xda, 0x09, 0xc1, 0xbe, 0x67, 0xc3, 0xef, 0x33, 0x77, + 0xb4, 0xa8, 0x92, 0x2f, 0xb9, 0xf0, 0x55, 0xa8, 0x8e, 0x90, 0x25, 0xdc, 0xc4, 0x72, 0x86, 0x1c, + 0xb1, 0xee, 0x96, 0x12, 0x23, 0x19, 0xeb, 0xa9, 0x62, 0x1a, 0xfb, 0xe6, 0x65, 0x46, 0xec, 0xb2, + 0x19, 0x3e, 0x20, 0x8d, 0x36, 0x75, 0x91, 0xce, 0x8c, 0xe3, 0x7d, 0x30, 0xe1, 0x45, 0x60, 0x86, + 0xab, 0xdb, 0x6e, 0x9f, 0x50, 0x36, 0x9e, 0xc9, 0x34, 0x05, 0xd0, 0xa0, 0x3e, 0xe6, 0xf9, 0xb8, + 0x31, 0x57, 0x20, 0x53, 0xe0, 0xcc, 0x3a, 0x38, 0xa8, 0xd2, 0x5a, 0x5b, 0xb0, 0xdb, 0xb8, 0x48, + 0x8d, 0x47, 0xf1, 0xdd, 0x1f, 0x64, 0x08, 0xe8, 0x69, 0xe0, 0x20, 0x74, 0xff, 0x0f, 0x4f, 0x51, + 0xca, 0x7d, 0x70, 0xca, 0x09, 0x3e, 0xc2, 0xe3, 0x35, 0x47, 0xb9, 0x88, 0x9b, 0x16, 0xdd, 0xdc, + 0x1d, 0x21, 0xfc, 0x59, 0x99, 0xa7, 0x26, 0x29, 0xc4, 0xc9, 0x01, 0xc8, 0xbb, 0x14, 0xd4, 0xf9, + 0xa1, 0x27, 0xb5, 0x5d, 0xed, 0x0a, 0x7f, 0x02, 0x6d, 0x19, 0x0c, 0x9d, 0xa1, 0x95, 0x00, 0x3b, + 0x29, 0xb0, 0x9b, 0xc2, 0x87, 0xe4, 0x10, 0xf2, 0xfc, 0x10, 0x30, 0xda, 0x09, 0x6e, 0xcf, 0x0b, + 0x49, 0x03, 0x2f, 0x82, 0x82, 0xd4, 0x09, 0xef, 0x39, 0x63, 0x12, 0x0f, 0x6d, 0xac, 0x9b, 0x23, + 0x04, 0xeb, 0xd1, 0xe7, 0x19, 0x9a, 0xf6, 0x26, 0x68, 0xdf, 0xce, 0xcf, 0x78, 0xce, 0x66, 0xea, + 0xcb, 0x24, 0x21, 0x7e, 0xab, 0xa6, 0xd2, 0xba, 0xb7, 0x44, 0x74, 0xd3, 0xb7, 0x0e, 0x53, 0x62, + 0x45, 0x69, 0x58, 0xd2, 0x0a, 0xd4, 0xed, 0x0a, 0x80, 0xec, 0x57, 0x18, 0xd4, 0xf3, 0xb7, 0x93, + 0xd5, 0x3f, 0x57, 0xf5, 0x39, 0x18, 0x3e, 0xb4, 0xe1, 0x3d, 0x5c, 0xbd, 0x19, 0x93, 0x3e, 0xeb, + 0x10, 0xad, 0x23, 0x44, 0xd3, 0xd6, 0xae, 0xcb, 0x29, 0x2a, 0x87, 0xe6, 0xa7, 0xf7, 0x30, 0xb3, + 0xff, 0x29, 0x1c, 0x7a, 0x82, 0x70, 0x26, 0xa2, 0xa4, 0xa3, 0x72, 0x86, 0x12, 0xa0, 0x53, 0x3a, + 0x99, 0x9e, 0x7d, 0x60, 0xd7, 0xdb, 0x5d, 0xec, 0x97, 0x70, 0xae, 0x1a, 0x44, 0x18, 0x4e, 0x6f, + 0xdc, 0x45, 0xdc, 0x5d, 0x0d, 0x34, 0xd3, 0x65, 0x92, 0xbd, 0x4f, 0xfd, 0x13, 0x2e, 0x0b, 0xe2, + 0x5f, 0xc6, 0xc5, 0x8f, 0x13, 0x39, 0x41, 0x40, 0x9e, 0xb4, 0xc3, 0x95, 0xd5, 0x27, 0x33, 0xde, + 0x97, 0x39, 0x82, 0x80, 0x2b, 0xf1, 0x99, 0x65, 0xf2, 0x71, 0x27, 0xea, 0x6d, 0xc7, 0x0a, 0x7b, + 0x4f, 0x07, 0x7e, 0xbb, 0x6c, 0xe7, 0x27, 0x4a, 0xe4, 0x2d, 0xe7, 0x62, 0xa1, 0xf7, 0x90, 0x87, + 0xb1, 0x82, 0x87, 0x89, 0x49, 0x35, 0x2f, 0xe7, 0x6f, 0xfd, 0x46, 0x20, 0x4a, 0x15, 0xe3, 0xec, + 0xe2, 0x36, 0x83, 0x0f, 0xe5, 0x20, 0xa4, 0x7a, 0x2e, 0xc7, 0x3e, 0x75, 0x86, 0x80, 0x74, 0x5d, + 0x83, 0x04, 0x3a, 0xa6, 0xae, 0xcc, 0x68, 0xb9, 0xb0, 0x18, 0xf6, 0x44, 0xee, 0x22, 0x49, 0x7a, + 0xe8, 0xc9, 0x06, 0x52, 0x87, 0xf1, 0x82, 0xa3, 0x2c, 0x6d, 0xab, 0x91, 0x5a, 0xe8, 0x9e, 0x86, + 0x80, 0xb1, 0x46, 0xe2, 0x56, 0x8c, 0x80, 0x4e, 0xea, 0xb9, 0x5d, 0xee, 0x1c, 0xca, 0xe4, 0x4a, + 0xd5, 0x76, 0x3f, 0x0e, 0x93, 0x63, 0x13, 0x6b, 0x5b, 0xbf, 0x2d, 0xb8, 0x03, 0x3a, 0x64, 0x6e, + 0xb0, 0x43, 0x8b, 0x2c, 0xdd, 0xef, 0x9d, 0x49, 0x83, 0xe7, 0xdf, 0xa1, 0xc7, 0x97, 0x41, 0x00, + 0x4a, 0x04, 0x78, 0x28, 0x58, 0x69, 0xbc, 0x6e, 0xa4, 0x92, 0xe6, 0x6e, 0x06, 0x37, 0x1e, 0xd4, + 0xaf, 0x2a, 0x92, 0x02, 0xa4, 0x59, 0x6b, 0x56, 0x3f, 0x7d, 0x21, 0xc3, 0xed, 0x66, 0x1a, 0x87, + 0x49, 0xeb, 0x44, 0xad, 0x5a, 0xba, 0x0e, 0xeb, 0x0b, 0x7f, 0x38, 0x9f, 0x7c, 0xca, 0x7c, 0x4e, + 0x12, 0x03, 0x14, 0xe9, 0x45, 0x55, 0xc8, 0xb0, 0xfc, 0xaf, 0x57, 0x08, 0x0d, 0x01, 0x35, 0x43, + 0x0f, 0x1e, 0xf1, 0xb9, 0x42, 0x98, 0x70, 0x30, 0xfa, 0x09, 0x32, 0x9b, 0xcb, 0xb4, 0x87, 0x23, + 0xfe, 0x23, 0x7e, 0xe1, 0x5a, 0x88, 0x9e, 0x6d, 0x60, 0x49, 0x7f, 0xdc, 0x81, 0xb9, 0x98, 0x5d, + 0x3a, 0x62, 0x45, 0x14, 0x0d, 0xe7, 0xa5, 0xbc, 0xdf, 0xa1, 0x14, 0xe5, 0x6b, 0x0b, 0xee, 0xa6, + 0xf7, 0x2e, 0x44, 0x11, 0xe2, 0x52, 0xf8, 0x7e, 0x35, 0x8a, 0xa0, 0x3b, 0x66, 0x33, 0x56, 0x31, + 0x3b, 0xf9, 0x4c, 0x43, 0xf3, 0x35, 0x66, 0xe4, 0x06, 0x29, 0x37, 0xf2, 0xf7, 0x4b, 0x4a, 0xac, + 0x67, 0xcd, 0x9d, 0xad, 0x7d, 0xf1, 0x91, 0x1c, 0x9e, 0x20, 0xc5, 0x5c, 0x03, 0x4b, 0xc1, 0x24, + 0xd3, 0x62, 0x1c, 0x0c, 0xf7, 0x50, 0xc8, 0x10, 0x1c, 0x32, 0x83, 0xff, 0x4f, 0xd5, 0x16, 0x7b, + 0xef, 0x61, 0x05, 0xb0, 0x2e, 0x68, 0xf7, 0x67, 0x22, 0x19, 0x6b, 0xe5, 0x62, 0xcd, 0x34, 0x5a, + 0xff, 0xdf, 0x3a, 0xa7, 0x2e, 0x0e, 0xff, 0x09, 0x03, 0x79, 0x89, 0xda, 0xec, 0xcc, 0x16, 0x3c, + 0xf4, 0x07, 0x71, 0x21, 0xb2, 0x07, 0x64, 0xd8, 0xb9, 0x50, 0x02, 0xe3, 0xf7, 0xe5, 0x76, 0x20, + 0xf6, 0x97, 0x27, 0x0a, 0xcc, 0x15, 0xa8, 0x8c, 0xf8, 0x5a, 0x5c, 0x09, 0xee, 0x01, 0xa6, 0xa9, + 0xca, 0x15, 0x20, 0x76, 0x4e, 0x4e, 0xc9, 0xb3, 0xfc, 0xdc, 0x64, 0xdb, 0x1c, 0xc4, 0xd0, 0xf6, + 0x5d, 0x9c, 0xd2, 0x31, 0x73, 0xe7, 0xf4, 0xe6, 0x88, 0xc1, 0x1a, 0xb9, 0xf7, 0xe5, 0x4a, 0xc9, + 0x19, 0x5f, 0x9f, 0x20, 0xcc, 0xe5, 0x0f, 0x60, 0xf5, 0xd8, 0x6a, 0xd8, 0xdb, 0x6e, 0x3b, 0xff, + 0xeb, 0xf8, 0x5e, 0x6c, 0x12, 0x99, 0x08, 0xaf, 0x3e, 0xb9, 0x08, 0x01, 0xf8, 0xb4, 0x7e, 0x32, + 0xc3, 0x52, 0x37, 0xf8, 0x4a, 0xc2, 0xfd, 0x7b, 0x0e, 0x9c, 0x29, 0xdb, 0x96, 0xe1, 0xf3, 0xec, + 0xce, 0x12, 0xfb, 0xaa, 0x21, 0x01, 0xe3, 0x9f, 0xd1, 0x68, 0x9f, 0x7d, 0x70, 0xca, 0x60, 0xee, + 0x33, 0x6f, 0x95, 0x99, 0x99, 0x66, 0xd0, 0x53, 0x57, 0x9d, 0x0b, 0xf6, 0x9f, 0xee, 0xd8, 0x0d, + 0xa0, 0x44, 0xa1, 0x5b, 0x75, 0x0c, 0xa2, 0x27, 0x26, 0x1f, 0x32, 0x99, 0x2c, 0x59, 0x83, 0x10, + 0xb9, 0x21, 0x1e, 0x2d, 0x53, 0x36, 0x99, 0xe2, 0xbf, 0x3f, 0xe2, 0xf6, 0x6e, 0x89, 0x5a, 0x9d, + 0xe7, 0xc7, 0x53, 0x2e, 0x2b, 0xcb, 0x08, 0xab, 0x3b, 0x79, 0xd7, 0x49, 0xc4, 0x97, 0xe1, 0xc7, + 0x2c, 0x49, 0xfb, 0x78, 0x5f, 0x9c, 0x30, 0x4c, 0x19, 0xfe, 0x03, 0xca, 0xd2, 0x6f, 0xbc, 0xb9, + 0x71, 0xff, 0x7a, 0xe2, 0x44, 0x87, 0x98, 0x1b, 0x64, 0x4a, 0x8e, 0x72, 0xfd, 0xfc, 0x3f, 0x7d, + 0x1c, 0xd0, 0xc4, 0x62, 0xcd, 0xbe, 0x46, 0x00, 0x5b, 0x86, 0x0c, 0x0e, 0x76, 0x2d, 0x51, 0x6c, + 0x6a, 0x3e, 0x95, 0x4f, 0x0c, 0x03, 0xca, 0x61, 0xba, 0x4c, 0xac, 0x00, 0x48, 0x7c, 0x62, 0x2b, + 0x32, 0xd4, 0x8a, 0x8e, 0x5c, 0x67, 0x0e, 0x5e, 0xfb, 0xc5, 0xc0, 0x57, 0xb6, 0xc4, 0xd9, 0xaa, + 0x40, 0x02, 0x34, 0x85, 0xd7, 0xf8, 0xdf, 0xf6, 0x18, 0x55, 0x5c, 0x70, 0xa7, 0xfa, 0x9e, 0x99, + 0x26, 0x82, 0xec, 0x73, 0x67, 0xd2, 0xba, 0x73, 0x85, 0x66, 0x67, 0x8f, 0xe8, 0x24, 0xc0, 0x2c, + 0x9c, 0x38, 0x4d, 0xb0, 0xc9, 0x1f, 0x03, 0x17, 0x84, 0x79, 0xcf, 0x86, 0x7d, 0x83, 0x44, 0x1a, + 0x3e, 0x6b, 0xdb, 0xeb, 0xcb, 0x4f, 0xd3, 0xee, 0xf8, 0x5b, 0xdd, 0x6a, 0x28, 0x01, 0x60, 0x2e, + 0x93, 0x5e, 0xfe, 0xe8, 0x7a, 0xc4, 0xb9, 0xa1, 0x5f, 0x90, 0xf2, 0x8f, 0x2e, 0xbe, 0xed, 0xdd, + 0xfb, 0xa4, 0x83, 0xb9, 0xc9, 0x5f, 0x45, 0xd0, 0x73, 0x7b, 0x7f, 0x87, 0xcb, 0x05, 0xe6, 0xa6, + 0x74, 0xfb, 0x52, 0xa3, 0x4e, 0x57, 0xf5, 0x23, 0x31, 0xdc, 0x21, 0xcf, 0x1c, 0x12, 0xab, 0xdb, + 0x24, 0x72, 0x71, 0x7c, 0xa0, 0x1c, 0x63, 0xa4, 0xd9, 0xc8, 0x25, 0xab, 0x91, 0x97, 0x7a, 0x77, + 0x75, 0xa2, 0x44, 0xf0, 0x87, 0x76, 0x19, 0x47, 0x45, 0x13, 0x74, 0x1d, 0x30, 0x9b, 0x73, 0x4e, + 0x4a, 0x08, 0x3a, 0x16, 0x65, 0x04, 0x24, 0xd1, 0xfc, 0x3a, 0x80, 0x11, 0x03, 0x85, 0x7e, 0xc2, + 0xd9, 0x0a, 0xbf, 0xa8, 0xd5, 0x21, 0x07, 0xe5, 0x4d, 0x4d, 0xca, 0xed, 0x5b, 0x86, 0x84, 0xc4, + 0x4e, 0xff, 0xa6, 0x70, 0x63, 0x5f, 0x6b, 0x0e, 0x4a, 0xb2, 0x52, 0xde, 0x57, 0x76, 0xa0, 0x38, + 0xd7, 0x38, 0x50, 0xc9, 0x6b, 0x86, 0x62, 0x41, 0x9e, 0xc3, 0x08, 0x4e, 0x93, 0xd3, 0x3f, 0x0a, + 0xf2, 0x08, 0x25, 0xbc, 0x06, 0xa8, 0xe2, 0x5d, 0x52, 0xfe, 0xe8, 0x9b, 0x31, 0x0b, 0xa9, 0x21, + 0xd1, 0x31, 0x9b, 0x5a, 0x67, 0xe9, 0x5f, 0xa2, 0x84, 0x01, 0x93, 0xae, 0x39, 0x7b, 0xb7, 0xa7, + 0x55, 0xeb, 0xa8, 0x6b, 0x6f, 0xb1, 0xa5, 0xa1, 0xf7, 0x80, 0xbb, 0x85, 0x50, 0xc9, 0xa1, 0xc1, + 0xac, 0x9a, 0x36, 0xdf, 0x2b, 0x27, 0xe7, 0xf4, 0xc1, 0x8c, 0xef, 0x51, 0xe3, 0x3b, 0x90, 0x4a, + 0x2d, 0x6f, 0x24, 0x3b, 0x31, 0x81, 0xe2, 0x78, 0x9c, 0x62, 0x9d, 0xfb, 0xab, 0x83, 0x58, 0xc2, + 0xdb, 0x01, 0x86, 0xab, 0x74, 0x86, 0x52, 0x20, 0x90, 0x15, 0x26, 0x4b, 0xcd, 0x36, 0x02, 0x4f, + 0x69, 0x48, 0x5f, 0x65, 0x9b, 0x50, 0xa6, 0xec, 0x42, 0x72, 0x04, 0xd5, 0xe9, 0x5c, 0x53, 0x47, + 0xe1, 0x19, 0xff, 0x85, 0x97, 0xe1, 0x2b, 0xf1, 0x25, 0x2b, 0xa1, 0x21, 0xe4, 0x93, 0xe9, 0xd3, + 0x85, 0xdf, 0xcf, 0xb7, 0xfe, 0x7e, 0x9c, 0x0b, 0x29, 0xc2, 0x9f, 0x2f, 0x56, 0x8b, 0x0b, 0xbf, + 0x0d, 0x27, 0xe7, 0x68, 0x7c, 0x6c, 0x1c, 0xd2, 0x00, 0x77, 0x1b, 0x4d, 0xff, 0xa6, 0x14, 0xfd, + 0x90, 0x62, 0x81, 0x24, 0xc1, 0x0c, 0x5e, 0x62, 0xbc, 0x05, 0x0e, 0x72, 0xfd, 0x84, 0xfa, 0xb3, + 0xc2, 0x84, 0xbb, 0x40, 0xab, 0xb7, 0x8a, 0x02, 0xde, 0xaf, 0xc8, 0xf7, 0x7b, 0x28, 0xdd, 0xf9, + 0xc6, 0xba, 0xab, 0x32, 0xda, 0x49, 0xc3, 0x89, 0x6f, 0x44, 0x0a, 0x9b, 0x98, 0x6b, 0xea, 0x21, + 0x69, 0xe7, 0x44, 0xaa, 0xd1, 0xb4, 0x6b, 0x07, 0x4a, 0xfc, 0x2b, 0x9a, 0x0c, 0xab, 0x90, 0x8d, + 0xc3, 0xac, 0xae, 0x9e, 0xb6, 0x4c, 0x11, 0x05, 0x44, 0x4f, 0x1a, 0x41, 0x25, 0xdc, 0x2f, 0x22, + 0x00, 0x32, 0x70, 0xdd, 0x61, 0x81, 0x7d, 0x67, 0xf5, 0x98, 0x5f, 0x47, 0x14, 0x24, 0x89, 0x4a, + 0xeb, 0x8c, 0x09, 0xaf, 0x5a, 0xf7, 0xb1, 0x45, 0x1f, 0xdb, 0xd7, 0x37, 0xb1, 0x9c, 0xb8, 0x5e, + 0xc1, 0x80, 0xa0, 0x36, 0x99, 0x65, 0x90, 0x33, 0xd6, 0xfe, 0x74, 0x87, 0xc7, 0xd8, 0x4d, 0x97, + 0x10, 0xda, 0x61, 0xac, 0x4e, 0x69, 0x2f, 0x68, 0x07, 0xc8, 0x7b, 0xb7, 0x32, 0xca, 0x98, 0x4c, + 0xb4, 0xf6, 0x9e, 0xe2, 0x70, 0xae, 0x41, 0x4f, 0x15, 0x90, 0xf1, 0x6c, 0x9e, 0xfb, 0x9e, 0x6c, + 0x21, 0xc3, 0x52, 0xd6, 0x46, 0x7d, 0x63, 0x32, 0x23, 0x78, 0xad, 0x8c, 0x3d, 0x52, 0x62, 0x0f, + 0xc2, 0xe7, 0xc2, 0x74, 0x33, 0xaa, 0x05, 0x62, 0x1a, 0x79, 0xe2, 0x8a, 0x3c, 0x32, 0xb2, 0x9c, + 0xb3, 0x85, 0xb8, 0xbc, 0x6d, 0x62, 0xd1, 0x05, 0x18, 0x12, 0xae, 0x2f, 0xf7, 0x8d, 0xf8, 0xd2, + 0x82, 0x9e, 0x05, 0x86, 0x33, 0xc9, 0x2c, 0x35, 0xd2, 0xdf, 0xd5, 0xc8, 0x8a, 0x3d, 0xa1, 0x48, + 0xdf, 0x52, 0x31, 0x86, 0x01, 0x8e, 0x5d, 0xb1, 0x15, 0x4c, 0xda, 0xa2, 0x3b, 0x85, 0x9e, 0x81, + 0x24, 0x78, 0xef, 0x1a, 0x8d, 0xfe, 0xb0, 0xdf, 0x73, 0xc0, 0xf4, 0xf0, 0xd0, 0xb0, 0xa4, 0xa5, + 0x0a, 0x36, 0x9c, 0x56, 0x24, 0x56, 0xca, 0xd2, 0xc8, 0xf9, 0xf9, 0x0c, 0xd4, 0xca, 0x64, 0x8a, + 0x0b, 0x1f, 0x2b, 0x67, 0x43, 0x46, 0x2a, 0x3e, 0xe2, 0x14, 0x64, 0xda, 0xad, 0x96, 0xbc, 0x21, + 0x4b, 0x98, 0xff, 0x4b, 0x16, 0xc3, 0x05, 0x05, 0x20, 0x7e, 0x37, 0xf4, 0x2e, 0x5c, 0x97, 0xa5, + 0x07, 0x15, 0xd9, 0xbf, 0x49, 0x1f, 0xbc, 0x55, 0x00, 0xba, 0x47, 0x2e, 0x18, 0x1c, 0xd7, 0x6a, + 0xb0, 0x34, 0xcd, 0x9f, 0x1b, 0xbc, 0xb1, 0xd0, 0x75, 0x11, 0x82, 0xdd, 0x22, 0xb3, 0x03, 0x3b, + 0x09, 0x95, 0xbb, 0xbb, 0x15, 0x2a, 0xcb, 0x03, 0x78, 0x3d, 0x8c, 0x49, 0x91, 0x0e, 0xe7, 0x8c, + 0xc3, 0xb3, 0xee, 0x95, 0x75, 0xd4, 0x19, 0x21, 0xd0, 0x7e, 0xd6, 0xa9, 0x20, 0x2d, 0x1a, 0x48, + 0xf6, 0xe7, 0x77, 0x11, 0xe8, 0x0b, 0x60, 0xb4, 0x01, 0x13, 0x54, 0x68, 0x8c, 0x6c, 0xbe, 0xbe, + 0x60, 0xbf, 0x7f, 0x75, 0x4a, 0x54, 0x84, 0x0a, 0xfb, 0xfe, 0x7d, 0x07, 0xc8, 0x4a, 0x45, 0xaf, + 0x6b, 0xcf, 0xab, 0x73, 0xc6, 0xf0, 0x9a, 0x47, 0xbc, 0xce, 0x1b, 0x20, 0xfa, 0xc6, 0xa8, 0x5b, + 0x5e, 0x09, 0xed, 0x6d, 0x79, 0x3b, 0x4e, 0xbe, 0x40, 0xc7, 0x55, 0x72, 0x1c, 0x65, 0xe6, 0x9e, + 0x73, 0x9f, 0xb8, 0xb1, 0x79, 0x12, 0xd4, 0xf7, 0x2c, 0x51, 0x07, 0xc3, 0xc2, 0x3f, 0xc3, 0xf4, + 0xd7, 0x6f, 0xa6, 0x03, 0x19, 0x76, 0x51, 0xca, 0xae, 0x91, 0x63, 0x01, 0x60, 0xd4, 0xf0, 0xf5, + 0xcf, 0x11, 0x10, 0xfa, 0xb2, 0x37, 0x59, 0x86, 0x0b, 0x8d, 0xff, 0x83, 0xf4, 0xe0, 0x45, 0x6d, + 0x80, 0x4c, 0xd1, 0x25, 0xa6, 0xbc, 0x01, 0x04, 0x42, 0x74, 0x06, 0x93, 0x2f, 0xc6, 0xe4, 0x5c, + 0xb6, 0x9b, 0x1d, 0xff, 0x86, 0x87, 0x83, 0xf3, 0x9f, 0x2f, 0x72, 0x8f, 0x5a, 0x20, 0xc5, 0xa7, + 0xd3, 0x50, 0x1f, 0x02, 0x1c, 0xae, 0x85, 0x52, 0x6d, 0x06, 0x99, 0xbb, 0x37, 0xf7, 0xe4, 0x01, + 0x6d, 0xc0, 0x8c, 0x71, 0x10, 0xc2, 0x79, 0xf6, 0x3f, 0xdf, 0xd6, 0x3f, 0xd4, 0x2e, 0x76, 0x5a, + 0x6c, 0x5f, 0x50, 0x60, 0xce, 0x7d, 0xeb, 0xac, 0xce, 0xab, 0xb3, 0x0a, 0x5f, 0xad, 0x49, 0x8c, + 0xc6, 0x93, 0x18, 0x08, 0x3f, 0x76, 0xa0, 0x16, 0xee, 0xa5, 0xce, 0x9d, 0x53, 0x53, 0xea, 0xf9, + 0x86, 0xc0, 0x80, 0x3b, 0x31, 0x59, 0x6d, 0x7f, 0x87, 0x66, 0x9f, 0xd7, 0xc5, 0x55, 0x75, 0xf7, + 0xef, 0xe1, 0xd8, 0x13, 0x5f, 0x0c, 0x08, 0x8b, 0xd3, 0x8e, 0x63, 0x90, 0x45, 0x24, 0x68, 0xf6, + 0x89, 0x3b, 0x15, 0x0e, 0x1a, 0x93, 0x4f, 0x98, 0xc2, 0x73, 0xf3, 0x98, 0x0d, 0x75, 0xce, 0x38, + 0x02, 0xa4, 0xa1, 0xbd, 0x4f, 0x5d, 0xe6, 0xff, 0x7c, 0xfb, 0x24, 0xb0, 0xcc, 0xe2, 0xb8, 0x9c, + 0xe0, 0xd7, 0xb4, 0xec, 0x7d, 0x22, 0x2c, 0x61, 0x89, 0xc8, 0x55, 0xf3, 0x3b, 0xc5, 0xa7, 0x33, + 0x28, 0xc4, 0x13, 0x9c, 0x5d, 0xec, 0x81, 0x32, 0x26, 0xe4, 0x0e, 0x0e, 0x68, 0x08, 0x48, 0xb9, + 0x7b, 0xd0, 0x02, 0x69, 0x4d, 0x0d, 0x3c, 0xc5, 0x71, 0x51, 0x85, 0x2b, 0xa4, 0x3b, 0x28, 0x20, + 0xc6, 0x25, 0x91, 0xcf, 0x4a, 0x06, 0xc9, 0x9b, 0xdb, 0x1d, 0x44, 0x4e, 0x77, 0xb9, 0x94, 0x76, + 0xd8, 0xf0, 0x2a, 0xea, 0x7c, 0x3e, 0x19, 0x16, 0xff, 0x8c, 0x41, 0x1d, 0x82, 0xb0, 0x18, 0xf0, + 0x3e, 0xd4, 0x85, 0xf8, 0x02, 0x6d, 0x98, 0x0f, 0x0c, 0x39, 0x09, 0xc5, 0x7c, 0x2d, 0xf8, 0x0a, + 0xe7, 0xb8, 0xfa, 0x91, 0xbe, 0x9e, 0x2d, 0x51, 0x99, 0xbe, 0x20, 0xce, 0x3a, 0x41, 0xdd, 0xba, + 0x93, 0xe1, 0xa9, 0xe4, 0x67, 0x72, 0x5b, 0xd5, 0xa8, 0x57, 0x87, 0x20, 0xb8, 0xd2, 0x8c, 0xe1, + 0xca, 0x17, 0x35, 0xd5, 0xc9, 0xa9, 0x6d, 0x50, 0x03, 0x13, 0xde, 0xc0, 0x99, 0x16, 0x45, 0x73, + 0x2a, 0xa2, 0x22, 0x36, 0x91, 0x09, 0xa8, 0xda, 0x20, 0xeb, 0x61, 0x7f, 0x67, 0x14, 0x1f, 0x11, + 0x8d, 0xc4, 0x42, 0x55, 0xfc, 0xcc, 0x07, 0x53, 0xde, 0x46, 0x16, 0xfc, 0x67, 0x87, 0x82, 0x02, + 0xdd, 0xeb, 0x39, 0x39, 0x7a, 0xfc, 0x8c, 0xf6, 0xed, 0x80, 0x90, 0x99, 0x3a, 0xd5, 0xfd, 0xe4, + 0x27, 0x84, 0xfb, 0xe2, 0xa3, 0x7a, 0xf6, 0x78, 0x11, 0x9b, 0xff, 0xe2, 0x5d, 0x6e, 0x74, 0xf1, + 0x18, 0xd2, 0x1a, 0x87, 0xeb, 0x5c, 0x79, 0x23, 0xe4, 0x19, 0x36, 0x78, 0x5c, 0x7d, 0xc3, 0x95, + 0xa0, 0xe6, 0x7d, 0xfd, 0x30, 0x76, 0xee, 0x99, 0xe3, 0xc9, 0x9a, 0x3f, 0x6b, 0xcc, 0x9e, 0x42, + 0x87, 0x8c, 0x69, 0xf8, 0x1c, 0x47, 0x17, 0x94, 0x19, 0x33, 0x02, 0xb0, 0xf8, 0x05, 0x71, 0x64, + 0x1b, 0x0b, 0xa3, 0xf7, 0xe3, 0x17, 0xe3, 0x30, 0x48, 0x5e, 0x93, 0xd4, 0x7e, 0x3b, 0xde, 0x7b, + 0x9e, 0xdc, 0xb3, 0x9f, 0x6d, 0x80, 0xa8, 0x76, 0x01, 0x8b, 0xd9, 0xb1, 0xd3, 0x9d, 0x49, 0xee, + 0xae, 0x11, 0x97, 0x38, 0xe5, 0xa7, 0xb3, 0xbe, 0x57, 0xab, 0x79, 0x19, 0x9c, 0x2c, 0xf2, 0xa5, + 0xb3, 0xf3, 0xe7, 0xb9, 0xd3, 0x51, 0x12, 0xf4, 0x5e, 0xcf, 0xe2, 0xb4, 0x52, 0x61, 0x37, 0xa3, + 0x46, 0xaa, 0xce, 0x95, 0xf4, 0x26, 0x65, 0x77, 0xe0, 0xba, 0xab, 0x24, 0x53, 0x09, 0x8f, 0xd2, + 0x3e, 0x2e, 0x9d, 0xda, 0x09, 0x09, 0x1a, 0xee, 0x5d, 0x1c, 0xd4, 0xe4, 0x7f, 0x0f, 0x32, 0xaa, + 0x47, 0x9f, 0x33, 0x0f, 0xcb, 0x7f, 0xc8, 0xc0, 0x8f, 0x16, 0x52, 0xd8, 0x0e, 0x97, 0xce, 0xf7, + 0x70, 0x8f, 0x34, 0x6c, 0xbe, 0xa0, 0xb4, 0xda, 0x7f, 0x6d, 0xbb, 0x2f, 0xd7, 0x1a, 0x12, 0x81, + 0xa6, 0x99, 0x89, 0x42, 0x3f, 0x4e, 0x99, 0xae, 0x27, 0xa0, 0x59, 0x47, 0xd6, 0xe2, 0xcf, 0x4b, + 0xeb, 0x32, 0x26, 0x00, 0x42, 0xb7, 0x1e, 0x47, 0xd4, 0x7e, 0x53, 0x36, 0x23, 0x31, 0x46, 0x71, + 0x10, 0x2b, 0x37, 0xa4, 0xba, 0x22, 0xbc, 0x99, 0x79, 0xb0, 0xeb, 0x5e, 0x19, 0xe5, 0x95, 0x61, + 0xda, 0x2f, 0x66, 0xe0, 0xb6, 0x5c, 0x1e, 0xe9, 0xbc, 0xda, 0x7e, 0xd0, 0x07, 0x5a, 0xf4, 0x65, + 0xfd, 0x9c, 0x95, 0xab, 0x67, 0x68, 0x9d, 0x7e, 0x50, 0x8c, 0xef, 0xd6, 0x07, 0x32, 0x3f, 0xc3, + 0xcf, 0xf2, 0xc2, 0xdb, 0x40, 0x1e, 0x5e, 0x99, 0xbd, 0xbf, 0x57, 0x37, 0x8c, 0x65, 0xc3, 0x49, + 0x49, 0xa3, 0xb3, 0x25, 0x0a, 0x7d, 0x7c, 0x50, 0xf6, 0x86, 0x35, 0xd3, 0xd9, 0x58, 0x39, 0xa9, + 0x9e, 0x84, 0x0b, 0x6f, 0x76, 0x2b, 0xef, 0xdd, 0x69, 0x18, 0x8e, 0x54, 0x2a, 0x5b, 0xf1, 0xd0, + 0x0f, 0xbd, 0xe9, 0x51, 0x41, 0x65, 0xb5, 0x73, 0xa1, 0x6d, 0xa3, 0x10, 0x18, 0xf6, 0x3c, 0xe9, + 0x1d, 0xfd, 0x68, 0xa0, 0x9c, 0xc8, 0x61, 0x43, 0xc3, 0x7a, 0x53, 0x56, 0xfd, 0x46, 0x26, 0x5c, + 0x6a, 0x83, 0x6a, 0x1c, 0xfa, 0x60, 0x8a, 0xca, 0x67, 0xa0, 0xbc, 0xf9, 0x0f, 0x49, 0x46, 0x5e, + 0xe1, 0x4b, 0x9f, 0x40, 0xc7, 0xad, 0x24, 0x38, 0x6d, 0x3e, 0xde, 0x7a, 0xce, 0x21, 0x70, 0xf3, + 0xfc, 0x08, 0xe5, 0x0d, 0xc7, 0x9e, 0xf1, 0xb7, 0x88, 0x9f, 0xd8, 0x95, 0x58, 0x70, 0x30, 0xfd, + 0xd5, 0xf0, 0xcb, 0x08, 0x77, 0xe1, 0x81, 0x54, 0x87, 0x80, 0xb8, 0x4c, 0xfb, 0x18, 0x2e, 0xca, + 0x08, 0x38, 0x18, 0x48, 0x55, 0x9b, 0xa5, 0xf0, 0x56, 0x4a, 0x05, 0x7e, 0xc6, 0x36, 0xac, 0x4d, + 0x1b, 0x04, 0x6e, 0x71, 0xb5, 0xac, 0x12, 0x83, 0xb9, 0xd6, 0x28, 0x53, 0x89, 0xc3, 0xd6, 0x2d, + 0x0b, 0x37, 0x6d, 0x7c, 0x5e, 0x20, 0x90, 0x35, 0x82, 0xc2, 0x85, 0x9e, 0x13, 0xd8, 0x01, 0x18, + 0x92, 0xae, 0x89, 0x4d, 0xc7, 0xb9, 0x93, 0x27, 0x54, 0x03, 0xe5, 0xa5, 0x4d, 0xe6, 0x24, 0x8b, + 0x85, 0xf6, 0x5a, 0xb1, 0x17, 0x07, 0xa4, 0xc5, 0xaa, 0xff, 0x74, 0xbc, 0x80, 0x48, 0xa5, 0x40, + 0x2c, 0x42, 0xe8, 0x95, 0x3c, 0xb1, 0xa3, 0x42, 0x9f, 0x82, 0x37, 0x88, 0x61, 0xce, 0x8e, 0x80, + 0xa0, 0xa6, 0x07, 0xe3, 0xfb, 0x50, 0x91, 0x9a, 0x3d, 0x48, 0xd5, 0x4a, 0xac, 0xcd, 0xcc, 0xe8, + 0x59, 0xaa, 0xeb, 0x2b, 0x41, 0xb9, 0xa0, 0xb1, 0xd0, 0x8b, 0x51, 0x91, 0x52, 0xf6, 0x56, 0xf9, + 0xb2, 0x18, 0xa7, 0x3d, 0x1b, 0x15, 0xb9, 0x61, 0x41, 0xc4, 0xc1, 0x4d, 0x59, 0xc9, 0x03, 0x76, + 0xc8, 0x58, 0x0c, 0x51, 0x96, 0x3c, 0x45, 0x22, 0xa7, 0x20, 0x96, 0x3d, 0x63, 0xe9, 0xb6, 0x5b, + 0x8a, 0x22, 0xc2, 0xa4, 0x8b, 0xe8, 0x6a, 0x9d, 0x85, 0xe5, 0xe6, 0x14, 0x3f, 0xff, 0xb8, 0x91, + 0x04, 0xe2, 0xb0, 0x84, 0xc4, 0xe8, 0x52, 0xa5, 0x1f, 0xa1, 0x84, 0x0b, 0xa2, 0x11, 0x61, 0xf4, + 0x38, 0x1b, 0x11, 0x6a, 0x6b, 0xca, 0x71, 0x70, 0x3d, 0x56, 0xa8, 0xe9, 0x23, 0xbc, 0xb6, 0x02, + 0xa4, 0x44, 0x83, 0x9a, 0xd8, 0xe5, 0x04, 0xf9, 0x64, 0xa3, 0x6b, 0xab, 0xfb, 0x71, 0x8b, 0xa1, + 0xb2, 0xfc, 0xbe, 0x11, 0xa5, 0x6b, 0x27, 0xbb, 0xff, 0x18, 0x83, 0x4c, 0xc6, 0xc8, 0x26, 0x95, + 0x78, 0xa1, 0x51, 0x56, 0x22, 0xd9, 0xa4, 0xed, 0x0c, 0x18, 0x85, 0x9d, 0x2f, 0x91, 0xf4, 0x58, + 0xe0, 0x4c, 0x15, 0x4b, 0x6e, 0x7c, 0x70, 0x8f, 0xa6, 0x64, 0x3f, 0x07, 0x1e, 0xa6, 0xee, 0x0a, + 0x23, 0x3b, 0xc0, 0xd1, 0x23, 0x70, 0x45, 0x24, 0x20, 0x2a, 0x28, 0x26, 0x7c, 0xe8, 0x06, 0x70, + 0xe1, 0x53, 0xbd, 0x85, 0xe2, 0x22, 0x7f, 0xc1, 0xf1, 0xe5, 0xea, 0x6f, 0xa7, 0xb5, 0xfd, 0xc3, + 0x6f, 0x87, 0xbe, 0x3d, 0x18, 0xf1, 0x09, 0x31, 0x5e, 0x7a, 0xcf, 0xb4, 0xcb, 0x55, 0x7d, 0xe9, + 0x00, 0xa6, 0xa8, 0x28, 0x04, 0x73, 0x86, 0x1f, 0x29, 0xdb, 0x04, 0x05, 0x73, 0x08, 0x06, 0x2b, + 0x1b, 0x7b, 0xbf, 0x5e, 0x63, 0x8c, 0x4e, 0x8d, 0x58, 0xc5, 0xe5, 0x46, 0xe0, 0x14, 0x05, 0x85, + 0xfc, 0xa7, 0x8b, 0x01, 0x60, 0x7c, 0x5d, 0x46, 0xb6, 0x3b, 0xe9, 0x09, 0x95, 0x5c, 0x97, 0x08, + 0x49, 0xaa, 0x30, 0x54, 0xb0, 0x8d, 0x0f, 0x61, 0xfb, 0x20, 0xec, 0x0b, 0x7b, 0x0c, 0x83, 0xd5, + 0xc7, 0x87, 0x2d, 0x70, 0xe9, 0xdf, 0x92, 0x93, 0xec, 0xe2, 0x23, 0xaf, 0x17, 0xcc, 0x3f, 0xa9, + 0xb7, 0x76, 0x0f, 0x95, 0xf5, 0x43, 0x01, 0x39, 0x4f, 0x67, 0xa5, 0x65, 0x4f, 0x6c, 0x51, 0x5d, + 0xdb, 0x30, 0xa0, 0x0a, 0x90, 0x8c, 0x75, 0x7a, 0xd9, 0xcd, 0x57, 0x80, 0x41, 0xd9, 0x39, 0xec, + 0x84, 0x71, 0x53, 0x4a, 0x10, 0xed, 0x33, 0x8c, 0x87, 0x30, 0xf6, 0x46, 0x34, 0xae, 0xb8, 0x36, + 0x14, 0x9a, 0xa7, 0x44, 0xb5, 0x88, 0xa1, 0x66, 0x2e, 0xe5, 0x84, 0xce, 0xad, 0x4b, 0x90, 0x4e, + 0xad, 0xdc, 0xd7, 0xb7, 0xe6, 0x20, 0xc3, 0x09, 0x78, 0x7a, 0x43, 0xd1, 0xb1, 0x05, 0xba, 0x5c, + 0xa8, 0x15, 0x97, 0xc8, 0x7c, 0xcb, 0xba, 0x77, 0x5c, 0x6c, 0x7c, 0x82, 0xe0, 0x5d, 0xb9, 0xd4, + 0x34, 0xfc, 0x9d, 0xe4, 0xa8, 0x13, 0xc4, 0xbb, 0x54, 0x28, 0x7a, 0x40, 0xd4, 0x58, 0x4b, 0x37, + 0x81, 0xb8, 0x3d, 0x5e, 0xa0, 0x5c, 0x21, 0xec, 0x1b, 0xf5, 0x32, 0xc5, 0x3d, 0x6a, 0xf6, 0x6e, + 0xb5, 0x39, 0xee, 0xae, 0xea, 0x4c, 0x9d, 0xca, 0x42, 0xce, 0x6b, 0x76, 0x72, 0x47, 0x7a, 0x11, + 0x8c, 0x63, 0xfb, 0x3f, 0x55, 0xb4, 0x5f, 0x66, 0x5c, 0x3a, 0xb2, 0x89, 0xc0, 0xea, 0xcc, 0xb4, + 0x24, 0x30, 0xec, 0xd4, 0x62, 0xa9, 0xed, 0x3e, 0x48, 0x11, 0x66, 0x90, 0x5a, 0x84, 0x31, 0x64, + 0x92, 0x57, 0x1a, 0x06, 0x53, 0xb9, 0x68, 0x66, 0xe9, 0x9d, 0x92, 0x8d, 0x95, 0xb8, 0x82, 0xe5, + 0xd4, 0x2a, 0x37, 0x70, 0x76, 0x1d, 0x3b, 0x51, 0x80, 0x67, 0x3d, 0x31, 0x6e, 0x1e, 0x6c, 0x13, + 0xa7, 0x96, 0xb6, 0x1e, 0x84, 0x69, 0x3b, 0x42, 0xc8, 0xc8, 0x2d, 0x6f, 0x79, 0x93, 0xe5, 0x9c, + 0x42, 0xfb, 0x7c, 0xf2, 0xe5, 0xa2, 0xf7, 0x89, 0xe7, 0xbd, 0xf3, 0x00, 0x64, 0x85, 0x7d, 0xe2, + 0xbe, 0x9e, 0x17, 0xbd, 0x59, 0xc2, 0x35, 0x67, 0x9c, 0x2e, 0xf6, 0xd1, 0x93, 0x4a, 0x6f, 0xd2, + 0xef, 0x65, 0x54, 0x8c, 0xea, 0x5c, 0x68, 0x86, 0xce, 0xba, 0x0f, 0xab, 0x0f, 0x71, 0x06, 0x62, + 0x80, 0x8f, 0xe9, 0xc6, 0xa0, 0x77, 0xce, 0x2d, 0x42, 0xe6, 0x6c, 0x22, 0xbe, 0xa6, 0x49, 0x6a, + 0x65, 0x15, 0x09, 0x9f, 0x23, 0xe9, 0x8a, 0xf4, 0x48, 0x96, 0x92, 0xed, 0x28, 0x6f, 0xbf, 0x76, + 0xea, 0x00, 0x6c, 0xf0, 0xfe, 0x6d, 0x92, 0x75, 0x76, 0x65, 0xf2, 0xd3, 0x1b, 0x02, 0xf7, 0x6e, + 0xab, 0xb1, 0x04, 0x11, 0x16, 0x5c, 0x00, 0xd2, 0x20, 0x86, 0xea, 0x1b, 0xf4, 0x6a, 0xdd, 0x4e, + 0x62, 0x85, 0xa8, 0xd0, 0xf2, 0x94, 0xe1, 0x14, 0xfc, 0x0c, 0x2b, 0x7e, 0x8f, 0x53, 0xda, 0xd8, + 0x5f, 0x96, 0x43, 0x16, 0xf4, 0xb0, 0xfe, 0x6d, 0x44, 0x74, 0xec, 0xd0, 0x35, 0xfa, 0x45, 0xdd, + 0xac, 0xc0, 0xe2, 0x25, 0x2f, 0x3f, 0x71, 0xba, 0xb6, 0x28, 0x90, 0x28, 0x5d, 0x30, 0xf6, 0x22, + 0x56, 0xc3, 0xe3, 0xf3, 0xc9, 0x85, 0xe5, 0x7c, 0xc6, 0xb0, 0xfa, 0xd6, 0xee, 0xcf, 0xef, 0x72, + 0xb0, 0xcd, 0x82, 0x47, 0x1d, 0x5e, 0x66, 0xa4, 0xb0, 0x60, 0x32, 0xd9, 0x82, 0x12, 0x6e, 0x6e, + 0xe1, 0x5d, 0xcc, 0x86, 0x34, 0xfc, 0xf6, 0xfb, 0x0c, 0x81, 0x57, 0x19, 0xe8, 0x4d, 0x7c, 0xbf, + 0xa4, 0x00, 0xcc, 0x4c, 0x15, 0x57, 0xff, 0x8e, 0x51, 0xa8, 0x4a, 0xf0, 0x9a, 0xb5, 0xce, 0x04, + 0xc7, 0x7c, 0xb1, 0xb0, 0xc3, 0x5d, 0x38, 0xb9, 0xd7, 0x77, 0xe7, 0x74, 0x65, 0x40, 0xad, 0x4d, + 0x08, 0x99, 0xc0, 0xf5, 0x53, 0xfd, 0x06, 0x59, 0xfe, 0x4b, 0xc0, 0xa2, 0xbf, 0x94, 0xbe, 0xfd, + 0xa1, 0xa7, 0xdb, 0x95, 0x5b, 0xa1, 0xc2, 0xb0, 0xb6, 0xe3, 0xda, 0xfd, 0x73, 0xee, 0x30, 0x04, + 0xed, 0x9f, 0xd8, 0x26, 0x5a, 0xf5, 0x9c, 0x13, 0x58, 0x44, 0x3f, 0x15, 0x91, 0xeb, 0x3a, 0xce, + 0xdd, 0xb7, 0xee, 0x97, 0xda, 0xcb, 0xd6, 0x38, 0xfc, 0xac, 0xca, 0x1d, 0xe3, 0x59, 0xd7, 0x7e, + 0x9b, 0x22, 0x59, 0x33, 0xaf, 0xef, 0xc4, 0x4b, 0xfc, 0x8d, 0x05, 0x24, 0xad, 0x7f, 0x70, 0x8b, + 0xe7, 0x8f, 0xd9, 0xc7, 0x1f, 0xe0, 0x51, 0x9a, 0x8d, 0x36, 0xa8, 0xdb, 0x7a, 0xd8, 0xb2, 0x93, + 0xdd, 0xa6, 0x87, 0x87, 0x5a, 0xaf, 0x95, 0x63, 0x2d, 0x27, 0x71, 0x2b, 0x75, 0xdc, 0x28, 0x15, + 0x8a, 0x86, 0x81, 0xa1, 0x52, 0x7b, 0xc5, 0xb9, 0x11, 0x83, 0x94, 0x41, 0x57, 0x30, 0x6a, 0x2a, + 0xcc, 0x70, 0x4b, 0xa4, 0x5b, 0xb2, 0x20, 0xf9, 0xf6, 0xc3, 0x0d, 0x3a, 0x12, 0x3e, 0x18, 0xb6, + 0x5f, 0x6f, 0x8d, 0x90, 0x6d, 0xc2, 0x63, 0x8e, 0x57, 0x0f, 0x3d, 0x58, 0xd8, 0x1a, 0xce, 0x98, + 0xbd, 0x49, 0x4c, 0x9f, 0x70, 0xfa, 0x2b, 0x1c, 0x07, 0x5f, 0xe1, 0x27, 0x81, 0x6d, 0xf4, 0xf4, + 0x0f, 0xbb, 0x72, 0xa4, 0x94, 0x11, 0xd5, 0xc0, 0xe2, 0xb7, 0x9c, 0x81, 0xf7, 0xd9, 0x9b, 0xd8, + 0x01, 0x6d, 0x94, 0xf9, 0x28, 0xf6, 0xbe, 0x6f, 0x9e, 0xdf, 0x24, 0x05, 0x1c, 0x38, 0x75, 0x5f, + 0x41, 0x49, 0xfc, 0x27, 0xf6, 0xfa, 0xc0, 0x8e, 0x4e, 0xd5, 0x94, 0x60, 0xf2, 0x4a, 0xf2, 0xb9, + 0x3f, 0x8c, 0xb2, 0xbd, 0x89, 0x6f, 0x84, 0x83, 0x25, 0x6e, 0xa9, 0xc3, 0x1c, 0xec, 0x57, 0xfb, + 0xd0, 0x8d, 0x55, 0xa8, 0x94, 0x6b, 0x87, 0x4d, 0xd2, 0x7b, 0x83, 0x0b, 0x51, 0x11, 0x42, 0x8a, + 0x98, 0x8f, 0x36, 0xe3, 0xe9, 0x99, 0x81, 0x02, 0x7a, 0xe1, 0xd9, 0x6c, 0x53, 0xd2, 0x22, 0xf1, + 0x3a, 0xd9, 0xc8, 0xc4, 0x94, 0xb7, 0xc2, 0x08, 0x9e, 0x1e, 0x99, 0xce, 0x22, 0x8d, 0xf0, 0x45, + 0xb9, 0x51, 0x7c, 0x72, 0x77, 0xe1, 0x8d, 0x15, 0x9b, 0x54, 0x0a, 0x67, 0xfe, 0x07, 0x1e, 0x54, + 0x4f, 0xd3, 0x3f, 0x50, 0xb2, 0x06, 0xf1, 0xf1, 0xb2, 0xae, 0x5f, 0x8f, 0x30, 0xd3, 0xfc, 0x15, + 0xd7, 0x79, 0x8e, 0xe2, 0x6d, 0xb8, 0xd4, 0x70, 0x4c, 0xb6, 0x7c, 0x80, 0x25, 0x49, 0x73, 0xf9, + 0x7e, 0xa0, 0xff, 0x54, 0x91, 0x06, 0x48, 0xe0, 0x7f, 0xb4, 0x15, 0x6c, 0x53, 0xa6, 0x07, 0xcc, + 0x08, 0x94, 0xd0, 0xcc, 0x7e, 0xa3, 0xd5, 0xa4, 0x74, 0x94, 0x50, 0xf1, 0xb1, 0x77, 0x0b, 0x18, + 0x07, 0xf6, 0xee, 0x97, 0xfd, 0xa7, 0xd7, 0xdc, 0x43, 0x1e, 0x74, 0x69, 0xf3, 0xcb, 0xf5, 0xd7, + 0x04, 0xb2, 0x66, 0xa6, 0x37, 0xeb, 0xfa, 0xf3, 0x33, 0x05, 0xba, 0x9e, 0x51, 0x63, 0xee, 0x25, + 0x1c, 0xac, 0x52, 0x9f, 0x0c, 0x1b, 0x78, 0x67, 0xb8, 0x65, 0x44, 0x6d, 0x11, 0x71, 0x02, 0xf5, + 0x5e, 0xaa, 0x7a, 0x46, 0x79, 0xfb, 0xf8, 0x61, 0x98, 0x68, 0x74, 0x76, 0x6b, 0xeb, 0x79, 0x6c, + 0x74, 0x07, 0x84, 0xcf, 0x55, 0xdd, 0x84, 0x49, 0x97, 0xfc, 0xc0, 0x4a, 0x19, 0x02, 0x1c, 0x1c, + 0xd7, 0x9c, 0x19, 0xcd, 0xa3, 0x8b, 0x77, 0xe3, 0xcf, 0x3a, 0x85, 0xc7, 0xcb, 0xe6, 0xf5, 0x4e, + 0xc6, 0xa6, 0x60, 0xad, 0xea, 0x3c, 0x7e, 0x9f, 0x6b, 0xe1, 0x34, 0x2e, 0x82, 0xd3, 0xe5, 0x71, + 0xac, 0x05, 0xfd, 0xf9, 0x0f, 0x2a, 0x3d, 0xd8, 0xa9, 0xe2, 0x09, 0x7f, 0x9a, 0x50, 0x55, 0x57, + 0x0a, 0x0f, 0x2b, 0x5d, 0xde, 0x5d, 0x96, 0xfb, 0x5e, 0x46, 0x76, 0xb1, 0x46, 0xaa, 0x3c, 0x73, + 0x4a, 0x1c, 0x62, 0x1b, 0xce, 0xf1, 0x38, 0xdb, 0x80, 0x86, 0xd1, 0xea, 0x0b, 0x6e, 0xf1, 0x5d, + 0xf3, 0x4a, 0xb7, 0x21, 0x84, 0xcc, 0x39, 0xeb, 0x41, 0x4d, 0x71, 0xa0, 0x25, 0xf8, 0x69, 0xcc, + 0x54, 0x98, 0x0d, 0x5c, 0xf8, 0x9a, 0xf5, 0xd0, 0x6d, 0x6f, 0x2b, 0xf5, 0x87, 0x8e, 0x27, 0xfd, + 0x0a, 0x91, 0x3d, 0xdd, 0x9e, 0x43, 0x9f, 0x14, 0x03, 0x77, 0xfd, 0x50, 0xcd, 0xd3, 0xa6, 0x06, + 0x1f, 0x20, 0x4e, 0xf0, 0xaf, 0xf8, 0x5c, 0x64, 0x02, 0x39, 0xca, 0xd2, 0x20, 0xee, 0xc5, 0xb7, + 0x8b, 0xa5, 0xbe, 0xb8, 0x9d, 0x4e, 0x2b, 0x47, 0x0c, 0x63, 0x36, 0xbc, 0xf1, 0x3c, 0xc2, 0x63, + 0x1f, 0xa0, 0xa3, 0xbd, 0xa6, 0x3e, 0x7b, 0x8c, 0x89, 0x34, 0xe7, 0xb6, 0x9f, 0x49, 0x65, 0xfb, + 0x0c, 0x04, 0xc3, 0x2f, 0x1b, 0x22, 0x7a, 0x5c, 0x3d, 0x32, 0x58, 0x8a, 0xbd, 0x7c, 0x6e, 0x8c, + 0x67, 0xf1, 0x6d, 0x2a, 0xa1, 0x95, 0xa9, 0x3e, 0xa1, 0x39, 0xbb, 0xcc, 0x25, 0xf5, 0xd0, 0x19, + 0xce, 0x6b, 0x72, 0xd4, 0x79, 0x1c, 0x55, 0x39, 0xfc, 0x73, 0x00, 0xc8, 0xbc, 0x3f, 0xe9, 0x26, + 0x50, 0x1a, 0x92, 0xe8, 0x28, 0xb1, 0x0b, 0xda, 0xf9, 0xcc, 0x2f, 0x25, 0x29, 0xa7, 0x87, 0xbf, + 0xef, 0xbd, 0x8d, 0x09, 0xfd, 0xf2, 0x7d, 0xbb, 0xd7, 0x49, 0x9f, 0x26, 0x6e, 0xa3, 0xe7, 0xf7, + 0xa2, 0xa6, 0xb0, 0xa8, 0x33, 0x7b, 0x4b, 0x78, 0x6f, 0x46, 0xe8, 0x30, 0x4c, 0x4b, 0xab, 0x0e, + 0xe5, 0xdf, 0x3c, 0xe5, 0x64, 0xc0, 0x15, 0x39, 0x64, 0xbd, 0xb3, 0xfe, 0x76, 0x08, 0x1b, 0xda, + 0x1a, 0x0f, 0x6b, 0xb2, 0x16, 0x13, 0xf2, 0xac, 0xd0, 0xc8, 0xf6, 0xe6, 0xa2, 0x74, 0xb4, 0x5d, + 0x6d, 0xe4, 0x91, 0x91, 0xb1, 0x89, 0xab, 0x3c, 0x6c, 0xaf, 0x8f, 0x14, 0x64, 0xde, 0xfc, 0xa8, + 0x03, 0x73, 0x91, 0x6f, 0x5b, 0xbb, 0xe4, 0x8b, 0x3e, 0x72, 0xaa, 0x17, 0x50, 0x4e, 0xa6, 0xf2, + 0x8b, 0x2c, 0x26, 0x3f, 0xa0, 0x0a, 0x3c, 0xf7, 0x68, 0x9f, 0xf8, 0xad, 0x5e, 0x19, 0x06, 0x46, + 0xe3, 0x41, 0xee, 0x77, 0x11, 0x44, 0x5b, 0x6c, 0xe1, 0x58, 0x35, 0x07, 0xc3, 0x38, 0x9c, 0x07, + 0x9f, 0x13, 0x1a, 0x80, 0xbf, 0xc7, 0x07, 0xd9, 0xc9, 0x44, 0x5b, 0xc4, 0x14, 0xb5, 0x1e, 0x90, + 0x15, 0xe1, 0xd6, 0x5b, 0x18, 0xd0, 0xe7, 0xa3, 0x0d, 0x33, 0x64, 0x9e, 0x32, 0xa3, 0xf9, 0x56, + 0x61, 0x6a, 0xf0, 0xfc, 0xae, 0x61, 0xe1, 0xd4, 0x8f, 0x3e, 0xd9, 0xfd, 0x98, 0x9e, 0x54, 0xcc, + 0x3d, 0x02, 0xd8, 0x1b, 0xc1, 0x47, 0x95, 0x02, 0xc4, 0xae, 0xd1, 0xe3, 0xc6, 0x62, 0xe2, 0x56, + 0xbc, 0x76, 0xf0, 0x81, 0xbe, 0xfe, 0x05, 0x38, 0xb5, 0x30, 0x1f, 0x65, 0x2f, 0x81, 0x1d, 0x89, + 0x9f, 0x54, 0x8f, 0x76, 0x2e, 0x3a, 0x62, 0x2a, 0x9c, 0x69, 0xb6, 0x81, 0x64, 0x25, 0xa5, 0x78, + 0xd8, 0x1a, 0xbf, 0x6e, 0xb2, 0xe1, 0xf3, 0x97, 0x10, 0x90, 0xfd, 0xd0, 0x75, 0xbd, 0x99, 0x77, + 0x2b, 0x79, 0x43, 0x77, 0xdb, 0xc2, 0xc5, 0x92, 0xef, 0xf1, 0x3c, 0x4d, 0x32, 0x93, 0x39, 0xd3, + 0x5e, 0x25, 0x53, 0x9f, 0x59, 0x81, 0x27, 0xc5, 0x00, 0xa4, 0x8a, 0x55, 0x55, 0xb0, 0xe7, 0xda, + 0x81, 0xaf, 0x64, 0x0b, 0x10, 0x12, 0x81, 0xb4, 0xc6, 0x08, 0xa3, 0xa6, 0x97, 0xa7, 0x59, 0x45, + 0x74, 0xb3, 0x38, 0xef, 0x03, 0xc2, 0x65, 0xa8, 0xcb, 0x92, 0x89, 0x41, 0xbb, 0xfc, 0xe2, 0x95, + 0x50, 0x49, 0xb7, 0xf7, 0x62, 0x1e, 0x99, 0xa9, 0xc6, 0xda, 0x46, 0x4a, 0xff, 0xd4, 0x7f, 0x62, + 0x70, 0x7c, 0xd4, 0xfa, 0xa8, 0x30, 0x81, 0xf8, 0xc6, 0x0b, 0x41, 0xd8, 0xfc, 0x9d, 0x1f, 0x4d, + 0x57, 0xd6, 0xf0, 0xad, 0x90, 0x4b, 0x15, 0x84, 0x2d, 0x12, 0x41, 0x2a, 0x23, 0x71, 0xad, 0x4a, + 0x52, 0x13, 0x42, 0x7d, 0xbb, 0x9d, 0x85, 0x78, 0xa7, 0xbd, 0x1a, 0x1c, 0xf4, 0xdb, 0xb9, 0x3b, + 0x45, 0x41, 0x54, 0x41, 0x28, 0xea, 0x5c, 0x82, 0x70, 0x3d, 0x95, 0xc1, 0x10, 0x02, 0x74, 0xa0, + 0xb6, 0xc9, 0xcf, 0xe4, 0x03, 0x34, 0x05, 0x95, 0x51, 0xa1, 0xf0, 0xec, 0xce, 0xee, 0x55, 0xc9, + 0xe2, 0xb0, 0xf9, 0x11, 0xca, 0xd5, 0xae, 0x26, 0xe5, 0xc8, 0x2a, 0x6d, 0xb1, 0xf2, 0x77, 0xfb, + 0x76, 0x84, 0x0d, 0x98, 0xcc, 0x6d, 0x89, 0x5b, 0x51, 0xf8, 0x34, 0x03, 0x0b, 0x7f, 0xfd, 0xda, + 0x26, 0x09, 0x63, 0x0a, 0x40, 0xa2, 0x98, 0x71, 0x4b, 0x27, 0x5e, 0xb4, 0x41, 0x96, 0xdd, 0x8b, + 0x1c, 0xb4, 0xda, 0x32, 0x0c, 0x70, 0x0e, 0xa2, 0xa9, 0x0e, 0xa2, 0x7b, 0x8e, 0xc0, 0xc6, 0x48, + 0xa8, 0xc3, 0x85, 0x90, 0x60, 0x97, 0x71, 0x9b, 0xed, 0x6d, 0xf8, 0xfd, 0x29, 0xcb, 0xa0, 0xae, + 0x5e, 0x7c, 0x16, 0xd0, 0x75, 0xb0, 0xe4, 0xc4, 0x61, 0x28, 0x2d, 0x21, 0x59, 0x2f, 0x75, 0xce, + 0x3b, 0x19, 0x92, 0x79, 0xe4, 0xbb, 0x72, 0xec, 0xd5, 0x93, 0x16, 0x1d, 0xd1, 0xad, 0xb6, 0xd6, + 0x27, 0x68, 0xd2, 0x48, 0x64, 0xcf, 0x81, 0xc8, 0x04, 0x2d, 0xd0, 0x29, 0x93, 0x99, 0x35, 0xed, + 0x02, 0x1d, 0xe7, 0xd5, 0xba, 0xee, 0x30, 0x0b, 0x25, 0x69, 0xaf, 0x42, 0xdb, 0x3d, 0xbd, 0x0b, + 0x09, 0x4e, 0x29, 0x13, 0x70, 0xd0, 0x89, 0xb1, 0x63, 0xa7, 0x91, 0x74, 0x6f, 0x89, 0xec, 0x3a, + 0x3a, 0x71, 0x9e, 0x97, 0x10, 0x6b, 0xd6, 0x5f, 0x37, 0xa5, 0x97, 0xe8, 0xf6, 0x70, 0xc8, 0x0c, + 0xf2, 0xb4, 0xfa, 0x01, 0x86, 0x76, 0x2b, 0x83, 0xfb, 0x87, 0x15, 0x3a, 0xc1, 0xac, 0xfc, 0xc9, + 0xb1, 0x4a, 0xdf, 0x01, 0x73, 0xae, 0x29, 0xd5, 0x0a, 0xbf, 0x8d, 0x5b, 0x62, 0x4a, 0xa0, 0xd9, + 0xa0, 0xd2, 0x59, 0xc7, 0x6a, 0xc0, 0x31, 0xba, 0xe7, 0x01, 0x7f, 0x82, 0xb6, 0x8e, 0x5e, 0xaf, + 0x68, 0x15, 0x51, 0xd2, 0xe3, 0xd3, 0x16, 0xd2, 0x8c, 0x39, 0x08, 0x10, 0x48, 0xfc, 0xeb, 0x83, + 0x86, 0xc4, 0x25, 0x96, 0x5b, 0x5c, 0xee, 0x77, 0x77, 0xc2, 0x7f, 0xa6, 0x6c, 0x24, 0xe4, 0x03, + 0xa9, 0x69, 0x71, 0xeb, 0x56, 0x4f, 0xb4, 0xb7, 0x7c, 0x0b, 0xd0, 0xda, 0xeb, 0x80, 0xe3, 0x16, + 0xab, 0x16, 0xca, 0xe4, 0xfa, 0x36, 0x3b, 0xca, 0x6d, 0xb7, 0xd8, 0xdc, 0x81, 0xec, 0xe4, 0xf3, + 0x12, 0x65, 0x05, 0xde, 0xe7, 0x41, 0x02, 0x66, 0x4d, 0x8d, 0xcb, 0x4f, 0x4c, 0x91, 0x45, 0x86, + 0x23, 0x8c, 0xea, 0xfe, 0x8d, 0xc9, 0x71, 0xb1, 0xa2, 0xef, 0x0c, 0xab, 0x25, 0xc6, 0x02, 0xb7, + 0xaf, 0xaa, 0x18, 0xe2, 0xde, 0x18, 0x34, 0x4d, 0x84, 0xb0, 0xb2, 0x2f, 0x2d, 0xc5, 0xb5, 0xd0, + 0xca, 0x7d, 0x34, 0xfc, 0x4e, 0x37, 0x04, 0xa4, 0x20, 0xb7, 0x63, 0x11, 0x2c, 0xb8, 0xf7, 0x2f, + 0x76, 0x8e, 0x9d, 0x16, 0x2c, 0xd5, 0xb0, 0xfd, 0x35, 0xec, 0x29, 0x3e, 0x0a, 0x41, 0xba, 0x85, + 0x19, 0x6b, 0x29, 0xc0, 0x6a, 0x84, 0xcd, 0x33, 0x94, 0xe9, 0x98, 0xf2, 0x4e, 0xe6, 0xe4, 0xce, + 0xfe, 0x6e, 0x2c, 0xb8, 0x49, 0xc3, 0xe1, 0x31, 0x9f, 0xb9, 0x1d, 0x11, 0x84, 0xa9, 0x5a, 0xf3, + 0xc5, 0x13, 0x83, 0xd1, 0x8e, 0x6f, 0xb1, 0xe0, 0xc7, 0x99, 0x78, 0x6a, 0x1c, 0x45, 0x1f, 0xff, + 0x84, 0xcb, 0xa4, 0x9f, 0xdc, 0x8f, 0xf4, 0xea, 0xdd, 0x0b, 0xe6, 0x5d, 0x69, 0x6e, 0x8f, 0x4a, + 0x2a, 0x84, 0xa0, 0x7d, 0xcd, 0xa8, 0x1a, 0xa6, 0xf6, 0xae, 0xdf, 0x5a, 0x3c, 0x43, 0x01, 0x2c, + 0x86, 0x87, 0xac, 0x41, 0xa7, 0x5e, 0x95, 0x9f, 0x8d, 0x83, 0x66, 0x90, 0x26, 0x0e, 0x0f, 0x27, + 0xe6, 0x9d, 0xe7, 0x59, 0xe8, 0xb3, 0x63, 0xe3, 0x74, 0xd6, 0x29, 0xf3, 0xe9, 0xbb, 0xcb, 0xa2, + 0x23, 0xd4, 0xc6, 0x45, 0x03, 0xaf, 0xe9, 0x34, 0x70, 0x8c, 0x02, 0xf0, 0x28, 0x60, 0xa9, 0x2a, + 0xd1, 0x33, 0x7b, 0x7d, 0x05, 0x0c, 0x1c, 0xf4, 0x92, 0x27, 0x3e, 0xf7, 0x2a, 0x29, 0x18, 0x2f, + 0xa6, 0xb6, 0x85, 0x9b, 0x29, 0xf5, 0xc5, 0xd9, 0x83, 0x19, 0xc2, 0xa3, 0xfe, 0x11, 0xbf, 0x03, + 0xea, 0x6f, 0xae, 0xa9, 0x99, 0x33, 0x7f, 0x7f, 0xe1, 0xe1, 0x82, 0xb9, 0x4a, 0xbd, 0xfe, 0x77, + 0x45, 0x51, 0x38, 0x50, 0xba, 0x79, 0x59, 0x95, 0xc2, 0x4d, 0xc4, 0xc4, 0x77, 0xc3, 0xe9, 0xd6, + 0x0a, 0xd1, 0xf1, 0x58, 0xce, 0xcd, 0xab, 0xc7, 0xa0, 0x39, 0x2f, 0x0e, 0xad, 0xdf, 0xa6, 0x76, + 0x15, 0xd8, 0x6b, 0xc6, 0x33, 0xc2, 0x0e, 0xe7, 0x19, 0x97, 0x5a, 0x7c, 0x76, 0x0a, 0x81, 0x28, + 0x47, 0xa9, 0x96, 0xc0, 0x47, 0x9b, 0xad, 0x97, 0x9c, 0x7d, 0xe4, 0xb5, 0xe8, 0xdd, 0x77, 0x94, + 0x08, 0xd1, 0x52, 0x67, 0x31, 0xe6, 0xf5, 0xfe, 0x39, 0xf7, 0xdb, 0x15, 0xda, 0xd0, 0x9e, 0x47, + 0x0c, 0x7e, 0x52, 0x78, 0x5c, 0xdb, 0xfc, 0x92, 0x00, 0x39, 0xb6, 0x8d, 0x9a, 0x23, 0xda, 0xd7, + 0xdf, 0xe6, 0xea, 0xeb, 0x59, 0x28, 0x3a, 0x74, 0x13, 0x20, 0xbd, 0x1d, 0xd8, 0xb7, 0xdb, 0x1b, + 0x83, 0x4e, 0x08, 0xc1, 0x7e, 0xba, 0x89, 0x22, 0x9d, 0x39, 0xb7, 0x29, 0xd9, 0x21, 0x9c, 0x97, + 0x63, 0x20, 0x5e, 0xa2, 0x4c, 0x3d, 0x9c, 0x7c, 0x3c, 0x58, 0xa8, 0xcc, 0x95, 0xc9, 0x16, 0xa5, + 0xff, 0xda, 0xe6, 0x70, 0x0c, 0x86, 0xcf, 0xa6, 0xc5, 0x59, 0x7e, 0xd7, 0x43, 0x95, 0x79, 0x3c, + 0xe1, 0xb6, 0xe8, 0xa5, 0xc7, 0x03, 0xca, 0x3c, 0xaa, 0x27, 0x49, 0x96, 0xf9, 0xc1, 0x0f, 0xd4, + 0xbe, 0x47, 0x10, 0x35, 0xae, 0xf2, 0x28, 0xb0, 0x92, 0xae, 0xac, 0xcf, 0xb0, 0xe0, 0x07, 0x61, + 0x15, 0xb4, 0xf9, 0xaf, 0x9f, 0xf3, 0xd2, 0x67, 0x2d, 0x59, 0x14, 0xf9, 0x42, 0x58, 0xe8, 0xf5, + 0xd2, 0xed, 0x99, 0xab, 0x55, 0x6e, 0x59, 0x52, 0x67, 0xc0, 0x65, 0xc8, 0xed, 0x1c, 0xa1, 0xcd, + 0x62, 0x00, 0xa8, 0xdc, 0x98, 0x08, 0xa2, 0xba, 0xb2, 0x1f, 0xf5, 0xf4, 0xe3, 0xa3, 0x57, 0x0d, + 0x48, 0x97, 0xd0, 0x79, 0x0f, 0x47, 0xaa, 0x35, 0xf7, 0x6d, 0x3b, 0x6a, 0xb2, 0x17, 0xb0, 0x95, + 0xc2, 0x08, 0x13, 0xd6, 0xaf, 0xb3, 0x66, 0xf3, 0xe3, 0x17, 0x7a, 0xcb, 0x1c, 0xb8, 0x95, 0x14, + 0x31, 0x90, 0xe6, 0x9b, 0xd3, 0xf0, 0x12, 0x2b, 0x08, 0xc0, 0xc8, 0xe7, 0xc5, 0xe5, 0xcf, 0xf7, + 0x5d, 0xe5, 0x53, 0xc1, 0x15, 0x46, 0xb8, 0xff, 0xdf, 0x12, 0x05, 0xc0, 0x64, 0xf5, 0xed, 0x88, + 0x48, 0x40, 0x84, 0xef, 0x48, 0x92, 0x46, 0xee, 0xd5, 0x42, 0xbb, 0x1a, 0xa5, 0x93, 0x3d, 0x52, + 0x62, 0x9b, 0x3f, 0x0a, 0xa2, 0xff, 0x6f, 0xb9, 0xfc, 0x9f, 0xe7, 0x88, 0x35, 0x00, 0x0b, 0x9c, + 0x06, 0x9e, 0x9c, 0x9c, 0x5a, 0x4e, 0x4f, 0x44, 0x83, 0x5d, 0xc2, 0x60, 0xad, 0x2d, 0x34, 0x60, + 0x9f, 0xaf, 0x86, 0x11, 0xef, 0x3e, 0x11, 0x4e, 0x8e, 0xca, 0xba, 0x4d, 0x3b, 0xd4, 0x2d, 0xc0, + 0xf0, 0xe5, 0xa9, 0xb3, 0xe2, 0xdc, 0xcf, 0xfb, 0x68, 0x70, 0x4b, 0xdb, 0x05, 0x44, 0xcd, 0x39, + 0x38, 0x76, 0x54, 0xd2, 0x63, 0x22, 0x2c, 0x26, 0xd3, 0xfa, 0x44, 0x7c, 0x17, 0xb5, 0x59, 0xec, + 0x54, 0xa1, 0xda, 0x34, 0x33, 0x37, 0x92, 0x09, 0x85, 0x5d, 0xe2, 0xa3, 0x59, 0x96, 0xdf, 0x69, + 0x28, 0x78, 0xc1, 0x7c, 0x98, 0x64, 0x9c, 0x0d, 0x8f, 0x2b, 0xe9, 0x95, 0x81, 0x4b, 0x6c, 0xc2, + 0xc3, 0xcf, 0xe0, 0xeb, 0x89, 0x0c, 0x5e, 0x48, 0xb5, 0x39, 0xbb, 0xc8, 0xe9, 0xdf, 0xcb, 0x17, + 0x45, 0xc6, 0x84, 0x54, 0x30, 0x6c, 0x24, 0x01, 0x34, 0xc2, 0xf8, 0x9b, 0xaa, 0x85, 0xaf, 0xbe, + 0x52, 0xa5, 0x4d, 0x55, 0x44, 0xbe, 0xff, 0x38, 0xf7, 0x7b, 0x9a, 0xdd, 0x0d, 0x32, 0x12, 0xcb, + 0x84, 0xff, 0xc9, 0x8a, 0xfe, 0x62, 0xb2, 0xf5, 0x11, 0x5d, 0xea, 0xcb, 0xf9, 0x5a, 0x5b, 0xd0, + 0xb5, 0x0c, 0x9b, 0x2f, 0x2c, 0xe2, 0x51, 0x59, 0x63, 0x74, 0x47, 0x73, 0xd0, 0x9f, 0x85, 0xac, + 0xb4, 0x05, 0x63, 0x24, 0x4f, 0xaa, 0xc5, 0x9f, 0xa9, 0x49, 0xae, 0x57, 0xa8, 0x7e, 0xc6, 0x31, + 0x5c, 0x6a, 0xb0, 0x39, 0xb8, 0xea, 0xa7, 0xc0, 0xb7, 0x09, 0xfc, 0x66, 0xdf, 0x3a, 0x5e, 0x36, + 0xb3, 0x9d, 0xa2, 0xac, 0x47, 0xa7, 0x03, 0x1c, 0x81, 0x31, 0x38, 0xa1, 0x38, 0xf0, 0xe2, 0xfe, + 0xfb, 0x84, 0x23, 0xdc, 0xa8, 0x4e, 0x2f, 0x40, 0x2f, 0x6a, 0xc6, 0x64, 0x08, 0x71, 0x00, 0x2e, + 0x09, 0x5a, 0x49, 0xaf, 0x49, 0xbb, 0xe4, 0x5f, 0xfd, 0x20, 0xdf, 0x84, 0xd9, 0x22, 0x7c, 0x2c, + 0xe2, 0x6e, 0x3a, 0x8d, 0xc3, 0xf8, 0xe1, 0x31, 0x0c, 0x78, 0xb1, 0x14, 0x65, 0xb5, 0x86, 0xc8, + 0xd8, 0xe2, 0xb4, 0xba, 0x5c, 0xfa, 0x09, 0x31, 0x77, 0xe4, 0x05, 0x36, 0xa1, 0x46, 0xf1, 0x58, + 0x5b, 0xaa, 0x08, 0x45, 0x8a, 0x99, 0x32, 0xfe, 0x0e, 0xfb, 0xf6, 0xdd, 0x8f, 0xc6, 0x61, 0xc8, + 0x52, 0xd2, 0x2a, 0x12, 0x68, 0x5d, 0x7c, 0x21, 0xed, 0x10, 0x56, 0x06, 0x24, 0x4e, 0xd4, 0x4a, + 0x6a, 0xe5, 0x3d, 0xdc, 0x70, 0x56, 0x47, 0x12, 0x3c, 0x26, 0xd4, 0x02, 0xb1, 0x6e, 0x17, 0xcb, + 0x82, 0xa4, 0x76, 0xc6, 0xa7, 0x39, 0xcf, 0x17, 0xb4, 0xad, 0xb5, 0x09, 0xfa, 0x35, 0x1c, 0x46, + 0x20, 0x37, 0x5c, 0x31, 0x93, 0x48, 0xf9, 0x47, 0xb0, 0xe1, 0xce, 0x11, 0xa5, 0xfb, 0x61, 0x82, + 0xad, 0x31, 0xa6, 0x99, 0x2f, 0x50, 0x4f, 0x21, 0x82, 0x27, 0x21, 0x51, 0xa0, 0x1a, 0x31, 0x4e, + 0x4d, 0x13, 0x20, 0x57, 0x69, 0x49, 0x76, 0x8e, 0xd5, 0xdb, 0x6a, 0xe5, 0xf8, 0x93, 0x40, 0x2e, + 0x59, 0xa1, 0x43, 0x6d, 0x17, 0x23, 0xb7, 0x25, 0xc4, 0x14, 0x9e, 0x11, 0x66, 0x05, 0x72, 0x66, + 0x4f, 0x5c, 0x54, 0x8c, 0x45, 0x81, 0xae, 0x30, 0x30, 0x77, 0x75, 0x60, 0xe8, 0x02, 0x94, 0x56, + 0xad, 0x06, 0x62, 0x2d, 0x53, 0xb1, 0x51, 0x80, 0x23, 0xff, 0xcd, 0x75, 0xf2, 0x3d, 0xf0, 0x4e, + 0x21, 0xc0, 0x68, 0x92, 0x89, 0x66, 0x60, 0x91, 0xf6, 0x7e, 0x8a, 0xb1, 0x19, 0xf3, 0x4c, 0x6f, + 0x47, 0xd4, 0xd0, 0x8a, 0x5c, 0xb2, 0xc3, 0x41, 0x55, 0x87, 0xa3, 0x6e, 0x4c, 0x61, 0xdd, 0x56, + 0xaa, 0x5b, 0x6f, 0xd0, 0xf4, 0x9d, 0xf5, 0x31, 0xb6, 0xd4, 0xd3, 0x91, 0xb5, 0x27, 0xf7, 0xb6, + 0xde, 0xf7, 0x07, 0x65, 0x63, 0x62, 0x9e, 0x8a, 0xaa, 0xbf, 0x54, 0xa9, 0x4f, 0x2d, 0x01, 0xa2, + 0xb9, 0xc9, 0xd6, 0x4d, 0x73, 0x09, 0xa1, 0x3e, 0xb0, 0xfd, 0xb7, 0xb8, 0x79, 0x4b, 0x12, 0x73, + 0x6c, 0xb6, 0x40, 0x9c, 0x0f, 0x08, 0xe2, 0xb8, 0x26, 0xdb, 0xbb, 0x0e, 0xa8, 0xe1, 0x54, 0xe1, + 0x10, 0xab, 0xac, 0xaa, 0xb0, 0xe9, 0x53, 0x35, 0x26, 0xc0, 0x60, 0xed, 0x12, 0x4d, 0x7e, 0xea, + 0x76, 0x2f, 0xa1, 0x7d, 0x47, 0x05, 0x84, 0x18, 0x61, 0x9b, 0x84, 0x18, 0xc1, 0x74, 0xf1, 0xb8, + 0x3d, 0x7b, 0xd4, 0xa7, 0x8a, 0x55, 0xa9, 0xf1, 0x1e, 0x90, 0xac, 0xd1, 0x9a, 0xe6, 0xaf, 0xf2, + 0xad, 0xb2, 0x6b, 0x38, 0x81, 0x6f, 0xe7, 0xac, 0xb3, 0xbd, 0x89, 0x8d, 0xc4, 0xa7, 0x90, 0x1c, + 0x7a, 0x2c, 0x1b, 0x6f, 0xcb, 0xb4, 0xa3, 0x24, 0x33, 0xda, 0x71, 0xc2, 0x42, 0xfb, 0x41, 0xb8, + 0xd8, 0xad, 0xf4, 0x85, 0x91, 0xc6, 0xb1, 0x58, 0xe1, 0x0e, 0x42, 0x48, 0x1b, 0xb3, 0x96, 0x4e, + 0xbb, 0x89, 0x3e, 0xef, 0xc5, 0x16, 0x23, 0x3c, 0x64, 0xad, 0x2d, 0xf9, 0x19, 0x1f, 0x5a, 0xa6, + 0xea, 0xdc, 0xc6, 0x4e, 0x32, 0xce, 0xdf, 0xde, 0x6c, 0x15, 0x00, 0xc5, 0xa1, 0x60, 0x4b, 0x4a, + 0x5a, 0x94, 0xdd, 0x84, 0x39, 0xd2, 0xeb, 0x46, 0x46, 0x0b, 0xe5, 0x22, 0x68, 0x28, 0xd2, 0x73, + 0x5c, 0x7d, 0x76, 0xb7, 0x94, 0x9f, 0xdf, 0x11, 0x43, 0x6e, 0x21, 0x5f, 0xcc, 0xf8, 0xa7, 0x9a, + 0xab, 0x05, 0xe6, 0x66, 0xa4, 0x45, 0xbc, 0x7f, 0x7c, 0x68, 0x28, 0xaa, 0x5a, 0xfc, 0xb6, 0xd8, + 0xbf, 0x26, 0x6a, 0x69, 0x91, 0x3e, 0xd2, 0x5e, 0xb2, 0xb6, 0x99, 0xdd, 0xa9, 0x0b, 0x5d, 0x48, + 0xcd, 0x41, 0x04, 0x0c, 0x13, 0x07, 0xa5, 0xa1, 0xad, 0xd3, 0x80, 0x55, 0x8c, 0x19, 0x8d, 0x99, + 0xaf, 0x30, 0xc1, 0x45, 0x21, 0x26, 0xe4, 0xab, 0xbe, 0xf0, 0x44, 0x73, 0x8d, 0x59, 0x2b, 0x3b, + 0xa7, 0x5c, 0xb8, 0x7a, 0x9c, 0x58, 0x3e, 0x2b, 0xb4, 0x5f, 0x3e, 0xcb, 0xbf, 0x65, 0xfe, 0x49, + 0xfd, 0x96, 0xf5, 0x90, 0x5f, 0x1f, 0x81, 0xf9, 0x1f, 0xa8, 0x04, 0xd4, 0xd5, 0x35, 0x65, 0x7c, + 0x93, 0xfc, 0x88, 0xf1, 0x6f, 0xc1, 0x90, 0xc6, 0xf0, 0xe9, 0x9d, 0x40, 0xa3, 0x31, 0xe1, 0x0e, + 0x6d, 0xe3, 0x26, 0xec, 0xb3, 0x80, 0x5c, 0x8d, 0xe1, 0x8b, 0x44, 0xb7, 0xe6, 0x88, 0x3d, 0x96, + 0x45, 0x46, 0xeb, 0x04, 0x47, 0x4b, 0x58, 0x38, 0x36, 0xa3, 0x29, 0x9b, 0xd3, 0xa7, 0x0d, 0x3d, + 0x77, 0xfe, 0x2b, 0xb7, 0x66, 0xc4, 0xe5, 0x6c, 0x2b, 0xa7, 0xab, 0x19, 0x34, 0xc6, 0x66, 0x7c, + 0xae, 0x52, 0xab, 0x86, 0x06, 0x94, 0x2e, 0x7c, 0x79, 0xc3, 0xda, 0x83, 0x87, 0x92, 0xb1, 0xf4, + 0x26, 0x0d, 0x76, 0xa7, 0xe1, 0x25, 0xbd, 0x0d, 0x21, 0x60, 0x2d, 0xe2, 0x07, 0x5d, 0xa7, 0xc1, + 0x72, 0x89, 0xb0, 0xb3, 0x6e, 0x0d, 0x8d, 0xb5, 0xc4, 0x90, 0x4c, 0x87, 0x6b, 0xad, 0x13, 0x27, + 0x75, 0xf7, 0x48, 0x04, 0x40, 0xe5, 0x42, 0x23, 0x2f, 0x6d, 0x17, 0x8a, 0x3c, 0x61, 0xe0, 0xfd, + 0x13, 0x89, 0x1c, 0xba, 0x99, 0xcc, 0xc0, 0x2b, 0xce, 0x44, 0xda, 0x4b, 0x4b, 0x65, 0x41, 0x60, + 0xa8, 0xcd, 0x94, 0x89, 0xff, 0x5d, 0xaf, 0x02, 0x09, 0x08, 0x0f, 0x0d, 0x60, 0x7a, 0xe5, 0x43, + 0x82, 0xf9, 0x71, 0xbf, 0x5a, 0x47, 0x7c, 0x2f, 0xc5, 0x1d, 0x28, 0x3b, 0xdd, 0x7e, 0xdc, 0x6a, + 0x21, 0xc2, 0x54, 0x9b, 0xb9, 0xbb, 0x1e, 0x82, 0xcb, 0x70, 0x93, 0xc3, 0xb4, 0x1d, 0x15, 0x20, + 0xcb, 0x5b, 0x22, 0x37, 0xdc, 0x71, 0xea, 0x91, 0xd2, 0xdb, 0xa5, 0x2b, 0x4a, 0x41, 0xf8, 0x5e, + 0x6f, 0x81, 0x51, 0x46, 0xe6, 0x54, 0xe0, 0x62, 0x73, 0x9a, 0x4b, 0x3a, 0x6c, 0xf5, 0xda, 0xe4, + 0x1f, 0x58, 0xd7, 0xef, 0xc5, 0xf9, 0xc8, 0x42, 0x7b, 0xbc, 0x4d, 0xff, 0x95, 0x9b, 0xc1, 0x45, + 0xae, 0x00, 0x88, 0x55, 0x29, 0xb8, 0xef, 0xf1, 0xbf, 0xf3, 0xa3, 0xf5, 0x10, 0x64, 0x8d, 0x3e, + 0xa7, 0x4a, 0x04, 0x29, 0x40, 0x6f, 0xc8, 0xd2, 0x80, 0x57, 0xf4, 0x13, 0x47, 0x71, 0x59, 0x90, + 0x4f, 0xbb, 0xc3, 0xe8, 0xcc, 0xab, 0x85, 0xce, 0x5f, 0x62, 0x5d, 0xd1, 0x66, 0xcf, 0xf8, 0x30, + 0xb7, 0x50, 0x0c, 0x1a, 0x54, 0x26, 0xb1, 0x37, 0xe9, 0x6f, 0x74, 0x19, 0x69, 0x3d, 0x45, 0x79, + 0x32, 0x5e, 0xf5, 0xfd, 0x01, 0xa8, 0x14, 0x2c, 0xdb, 0x52, 0x56, 0x14, 0x22, 0xf6, 0x26, 0x74, + 0x6b, 0xdc, 0x23, 0x9e, 0x10, 0xb9, 0x65, 0xdb, 0x6f, 0x15, 0xcd, 0x9e, 0x5d, 0x05, 0x34, 0xe3, + 0xbd, 0x06, 0x7e, 0xf1, 0x4c, 0xcf, 0x7f, 0xf4, 0x75, 0xf5, 0xa5, 0x4b, 0x47, 0x58, 0x8b, 0xed, + 0x2e, 0x5b, 0x0e, 0x1b, 0x9e, 0x92, 0x33, 0xb2, 0x0c, 0xab, 0x76, 0xf3, 0xa1, 0x71, 0xd9, 0x97, + 0xd0, 0x9b, 0x0f, 0x3a, 0xea, 0x61, 0x8c, 0x46, 0x71, 0xc4, 0x08, 0xb5, 0x7d, 0x0c, 0xcb, 0x02, + 0xe7, 0x7a, 0xbb, 0xcf, 0xc6, 0xb3, 0x29, 0x7c, 0x39, 0x88, 0x86, 0xaf, 0x4a, 0x2d, 0x24, 0x83, + 0x7c, 0xf1, 0x41, 0x11, 0xe0, 0x27, 0x47, 0xc8, 0xd5, 0x8e, 0x6a, 0x36, 0x8f, 0x49, 0xf3, 0x56, + 0xba, 0xbc, 0x91, 0x13, 0xa9, 0x9c, 0x63, 0x1c, 0x8a, 0x25, 0xc8, 0x44, 0xee, 0x5c, 0x71, 0x11, + 0x51, 0xa9, 0xf3, 0xc5, 0xd0, 0x44, 0x7d, 0x66, 0x25, 0x18, 0xa9, 0x5f, 0xc3, 0x9c, 0x33, 0x2d, + 0x91, 0x6f, 0x81, 0xef, 0xd5, 0x55, 0x74, 0xce, 0xb2, 0xe3, 0x4b, 0xd4, 0x9a, 0x20, 0x2e, 0x52, + 0x13, 0xf4, 0x49, 0x47, 0x46, 0x52, 0x0f, 0xc9, 0xec, 0xc8, 0x51, 0x0c, 0xcd, 0x24, 0xa6, 0x46, + 0xd7, 0x45, 0x6f, 0x12, 0x5a, 0xea, 0x00, 0xae, 0x06, 0xc9, 0x1e, 0x38, 0xbc, 0x7e, 0x01, 0x90, + 0xa8, 0x69, 0x19, 0x5a, 0x9b, 0xdf, 0xa0, 0x8c, 0xbf, 0x18, 0xd5, 0xef, 0x24, 0xcc, 0x45, 0xbc, + 0x2c, 0xc3, 0x19, 0x59, 0xec, 0x1f, 0x52, 0x95, 0x27, 0x9a, 0xcc, 0x7a, 0x48, 0x34, 0xca, 0x05, + 0x59, 0x45, 0xe1, 0x4c, 0x5b, 0xc0, 0x24, 0x5e, 0xee, 0xff, 0x50, 0xf2, 0xcd, 0x76, 0xf8, 0x97, + 0x41, 0x7d, 0x78, 0x84, 0xe1, 0x7d, 0x41, 0x48, 0x8d, 0x27, 0xa8, 0x70, 0xcf, 0x48, 0xd2, 0xb9, + 0xb7, 0xd9, 0x20, 0x4d, 0x12, 0xec, 0xc0, 0x04, 0x7f, 0x6b, 0x3b, 0x4e, 0xd6, 0xe5, 0x46, 0xc4, + 0x33, 0x50, 0x23, 0x9b, 0xe2, 0x54, 0x69, 0x3c, 0x35, 0x95, 0xd5, 0x22, 0x8c, 0x57, 0x7c, 0x88, + 0xa5, 0xaa, 0x10, 0x11, 0x3d, 0x24, 0xf8, 0x2f, 0xc9, 0xee, 0x5d, 0x60, 0x68, 0xe3, 0x2b, 0xc3, + 0xb7, 0x71, 0x82, 0xc7, 0x5e, 0xe9, 0x2f, 0x9c, 0xad, 0xb7, 0xc0, 0x0e, 0x3f, 0xaa, 0x71, 0xd2, + 0x1c, 0x68, 0x7d, 0x90, 0x7a, 0x54, 0x8a, 0x63, 0x36, 0xa7, 0xd5, 0xc0, 0xea, 0x0e, 0xf3, 0x78, + 0xcc, 0x64, 0x08, 0xa1, 0x2e, 0x74, 0x45, 0xf2, 0xb2, 0x9d, 0xba, 0x43, 0x6b, 0xb1, 0x18, 0xa4, + 0xe6, 0x55, 0x7c, 0xb2, 0x40, 0x44, 0x39, 0x71, 0x79, 0x2f, 0xa3, 0xd6, 0x42, 0xd7, 0x58, 0xd1, + 0xf5, 0xd6, 0xc4, 0x97, 0x03, 0x31, 0x2f, 0x4c, 0xde, 0x47, 0x65, 0xdb, 0x3b, 0x41, 0xdb, 0xc2, + 0x5c, 0xa2, 0x7d, 0xbc, 0xa4, 0x84, 0x0f, 0x55, 0x87, 0x62, 0xb0, 0xd9, 0xb8, 0x0e, 0x6e, 0x8e, + 0xe1, 0xd8, 0x8f, 0x00, 0x0c, 0x2d, 0x42, 0xc0, 0xc7, 0x3d, 0xba, 0x99, 0x3d, 0x69, 0x94, 0xe9, + 0x1f, 0xb0, 0x88, 0x43, 0x4c, 0x50, 0xae, 0x47, 0x0c, 0x6c, 0xfb, 0x7f, 0xbd, 0xfc, 0x7a, 0x71, + 0x83, 0xd0, 0x63, 0x6d, 0xb4, 0x70, 0x89, 0xde, 0xf7, 0x94, 0x01, 0x5f, 0x9c, 0xde, 0x44, 0x48, + 0x76, 0xd8, 0x76, 0x9e, 0xa6, 0x38, 0x8e, 0x2e, 0x50, 0xad, 0x4e, 0x9b, 0xb1, 0x54, 0x60, 0x6f, + 0xb7, 0xcb, 0xc8, 0x10, 0xf2, 0xbd, 0xa1, 0x5c, 0xe3, 0x47, 0x82, 0xb0, 0xe3, 0x34, 0x2c, 0x36, + 0xc6, 0xe2, 0x6b, 0x39, 0xc0, 0x89, 0xe2, 0xaa, 0x66, 0x94, 0x13, 0xd9, 0x47, 0xf2, 0x98, 0x8d, + 0x4b, 0xd4, 0xa0, 0x7e, 0x73, 0x0a, 0xcc, 0x3d, 0x5b, 0x49, 0x5d, 0xc8, 0xb8, 0x40, 0x78, 0xb3, + 0xad, 0x7f, 0xf2, 0x68, 0x82, 0x56, 0xe5, 0x8d, 0xf3, 0xa5, 0x95, 0xde, 0x56, 0x19, 0x97, 0xcd, + 0xfb, 0x81, 0x47, 0x1b, 0x24, 0x24, 0x30, 0xb2, 0xd7, 0x97, 0x44, 0x23, 0x49, 0xed, 0xa7, 0x5a, + 0xc0, 0x3e, 0x56, 0x49, 0x47, 0xe7, 0x67, 0x69, 0x14, 0x65, 0xda, 0x65, 0x81, 0xa7, 0x71, 0x4d, + 0xed, 0xfd, 0x87, 0xd2, 0xcc, 0xbc, 0x15, 0xff, 0x2f, 0x36, 0x07, 0x8a, 0x0c, 0x6a, 0xd5, 0x51, + 0x8d, 0xbb, 0x72, 0x0d, 0x72, 0x0e, 0xcb, 0x44, 0x14, 0x70, 0x7a, 0xd5, 0xec, 0xfb, 0x2b, 0xe8, + 0xfd, 0xa9, 0x8b, 0xc4, 0x62, 0x5f, 0x59, 0x34, 0xa1, 0x9e, 0xb8, 0x88, 0xa9, 0x9d, 0x7d, 0x10, + 0x81, 0x18, 0x47, 0xe2, 0xe3, 0xe9, 0x83, 0x96, 0xa1, 0x77, 0x88, 0x34, 0x71, 0xdd, 0xbe, 0xe9, + 0xc3, 0x7c, 0x8d, 0x10, 0xb1, 0xf7, 0x37, 0x9a, 0xe1, 0x59, 0x42, 0x41, 0xd6, 0x76, 0x5c, 0x64, + 0x50, 0xf1, 0x51, 0xd4, 0x73, 0xc3, 0x87, 0x8f, 0xab, 0x9a, 0xad, 0x5f, 0x08, 0xb1, 0x59, 0xf4, + 0x67, 0xc6, 0xf8, 0x22, 0xa8, 0x1e, 0x7f, 0xeb, 0xc5, 0xc0, 0x0c, 0xfa, 0x66, 0x40, 0xc3, 0xd8, + 0x4d, 0xf9, 0xda, 0x51, 0xc9, 0xc7, 0x1f, 0xa5, 0x2a, 0x79, 0x1c, 0x95, 0x19, 0x33, 0x97, 0x70, + 0xc4, 0x8d, 0x1e, 0xfa, 0xe6, 0x2f, 0x67, 0x3e, 0x03, 0x3f, 0x18, 0xd8, 0x03, 0x69, 0xdb, 0xd7, + 0x0a, 0x12, 0xb6, 0x31, 0x1f, 0x1e, 0x4e, 0x87, 0xe3, 0xf6, 0x87, 0x56, 0x79, 0x7e, 0x3d, 0x79, + 0x8a, 0x66, 0x51, 0xf8, 0x42, 0x90, 0xe7, 0xe7, 0xf6, 0x02, 0xd9, 0xef, 0x18, 0x96, 0xd1, 0x3c, + 0x1f, 0xe6, 0x6a, 0x73, 0x23, 0x1b, 0xae, 0xdb, 0x03, 0x43, 0x9c, 0x89, 0xec, 0x37, 0x0d, 0x54, + 0x85, 0xae, 0x08, 0xb9, 0x3c, 0x76, 0xa3, 0x13, 0x16, 0xa3, 0xb3, 0x65, 0x76, 0x5e, 0x5c, 0x16, + 0xe8, 0x1d, 0x1e, 0x4e, 0x10, 0x68, 0x6c, 0x28, 0xa1, 0x6f, 0xb3, 0x7d, 0xa8, 0x2d, 0xe1, 0x5b, + 0x06, 0xc8, 0x94, 0x5e, 0xae, 0x2a, 0x5c, 0x87, 0x62, 0xe6, 0x42, 0x19, 0x19, 0xbf, 0xad, 0xf8, + 0x9a, 0x12, 0xcf, 0x7a, 0x79, 0x3c, 0xc1, 0xb7, 0x9b, 0x3e, 0x61, 0x51, 0x02, 0xe8, 0x36, 0x12, + 0x30, 0xed, 0x96, 0xec, 0x18, 0x56, 0xb3, 0xcd, 0xa0, 0xe2, 0xa3, 0x48, 0x89, 0xdd, 0xae, 0x91, + 0xce, 0xa7, 0x9d, 0x9c, 0xcd, 0xee, 0x5a, 0x04, 0xf9, 0x71, 0x32, 0x83, 0x8a, 0x58, 0x9e, 0x6c, + 0xdf, 0x3d, 0xb1, 0x9e, 0x91, 0xd2, 0x16, 0xfe, 0x36, 0xbe, 0xbd, 0x97, 0xa3, 0xf6, 0x6a, 0x48, + 0x5b, 0x1c, 0x60, 0x01, 0x77, 0x6b, 0xbc, 0x17, 0xae, 0xe5, 0xde, 0xb2, 0xa4, 0xd0, 0x4b, 0x08, + 0xa9, 0x4c, 0xfc, 0xae, 0xaa, 0xa8, 0xe1, 0xc8, 0x28, 0x38, 0x94, 0x2a, 0xfa, 0x62, 0x79, 0xc7, + 0x9f, 0xc5, 0x6e, 0x0d, 0x36, 0x48, 0xb9, 0xad, 0x91, 0x88, 0x28, 0x7d, 0x3e, 0xce, 0x76, 0x58, + 0x47, 0xdd, 0x6b, 0x2c, 0x33, 0x00, 0xf1, 0xb1, 0x15, 0x42, 0x76, 0x2f, 0xbf, 0x55, 0x25, 0x28, + 0xbe, 0xf1, 0x67, 0x8a, 0x50, 0xb3, 0x53, 0x63, 0x03, 0x66, 0x10, 0xce, 0xea, 0xd1, 0xd4, 0x1f, + 0x12, 0x62, 0x7f, 0xa4, 0xfa, 0xb0, 0xdd, 0x61, 0xed, 0xde, 0x7c, 0x99, 0x14, 0xf5, 0x31, 0xbe, + 0x86, 0x10, 0xbb, 0xb5, 0xcd, 0x46, 0x9e, 0xe0, 0xbf, 0x9e, 0xa3, 0x01, 0x2f, 0xa6, 0x9a, 0xd8, + 0x79, 0xcf, 0xea, 0x72, 0xe6, 0x28, 0x9c, 0x79, 0x0b, 0x9d, 0xe7, 0xc0, 0xdb, 0xd3, 0xe6, 0x81, + 0x89, 0x3a, 0xa3, 0xa2, 0xb8, 0x76, 0x19, 0x7f, 0x26, 0x1f, 0xe6, 0xc9, 0xd6, 0x3f, 0xf6, 0x51, + 0xc3, 0xe9, 0x4a, 0x07, 0x06, 0x25, 0xa0, 0x46, 0xbd, 0x87, 0x09, 0x01, 0x4f, 0x85, 0x00, 0x41, + 0xe2, 0x8a, 0x69, 0x0b, 0x05, 0xd5, 0x32, 0x7e, 0xca, 0x5e, 0xa1, 0xc2, 0x4d, 0x78, 0x5e, 0x78, + 0xde, 0xbd, 0x7a, 0x09, 0xb9, 0xd1, 0x87, 0x72, 0x17, 0xf3, 0x40, 0xe5, 0x50, 0xff, 0x8c, 0x02, + 0x74, 0x56, 0xa2, 0xbe, 0x95, 0xe9, 0x56, 0xb8, 0xdb, 0xd7, 0x17, 0xdb, 0x3f, 0x82, 0xa8, 0x4b, + 0x41, 0xb6, 0xbc, 0x1b, 0x16, 0x07, 0xa0, 0xc0, 0x46, 0xcf, 0xea, 0xf3, 0x0f, 0x47, 0x6c, 0xed, + 0x32, 0xe1, 0x9f, 0x71, 0x75, 0xdb, 0xdd, 0x6a, 0x81, 0x32, 0x6f, 0x60, 0xa3, 0xdb, 0x28, 0x5d, + 0x69, 0x67, 0x62, 0x83, 0x3c, 0x9d, 0xff, 0xe5, 0x16, 0x71, 0xde, 0x10, 0x6d, 0xd4, 0x54, 0x4a, + 0x7b, 0xb1, 0x9c, 0xd5, 0x8a, 0x7b, 0x3d, 0xa4, 0xeb, 0x2b, 0x64, 0x13, 0x4f, 0x18, 0x90, 0x1e, + 0x71, 0xb0, 0xd4, 0xfa, 0x7e, 0x74, 0x53, 0xb9, 0x69, 0xdc, 0x50, 0x17, 0x35, 0x11, 0xa3, 0x45, + 0x87, 0xc8, 0x4b, 0xa8, 0x56, 0x34, 0x93, 0x5b, 0x66, 0x36, 0x3b, 0x31, 0x3b, 0x7a, 0xd4, 0x2f, + 0xb2, 0x11, 0x83, 0xbd, 0xad, 0x77, 0x52, 0x14, 0x87, 0x09, 0x1b, 0xab, 0x14, 0x0c, 0x21, 0xa9, + 0x99, 0x75, 0xa0, 0x7d, 0x00, 0x4c, 0xea, 0x9a, 0xe7, 0x50, 0x29, 0xb1, 0x1d, 0x51, 0x1f, 0xcd, + 0x01, 0xb2, 0x14, 0x85, 0x97, 0x86, 0xfd, 0xbc, 0x02, 0xb8, 0x2a, 0xb2, 0x6d, 0x38, 0x31, 0xb6, + 0xd7, 0x48, 0x39, 0x7c, 0xb6, 0xfe, 0xe2, 0x42, 0xe0, 0x2f, 0xbe, 0xc1, 0x3a, 0x73, 0x4f, 0x13, + 0x1c, 0xe9, 0x7a, 0xa9, 0xee, 0xbe, 0x8d, 0xe7, 0x63, 0x51, 0x42, 0xfa, 0x34, 0x9b, 0x56, 0x42, + 0xf7, 0x5e, 0xd7, 0x4e, 0xaf, 0xea, 0x21, 0x2f, 0x66, 0x1a, 0xbf, 0xa0, 0x5d, 0x43, 0x6c, 0xf8, + 0x0a, 0xbb, 0x50, 0xd1, 0xef, 0x09, 0xc5, 0x80, 0x1f, 0xbc, 0x58, 0xc3, 0xca, 0x49, 0xa6, 0x44, + 0x91, 0xbc, 0xc1, 0x16, 0x19, 0x68, 0x82, 0xd4, 0x70, 0x8d, 0xf5, 0x13, 0x20, 0xcf, 0x3b, 0x19, + 0x8c, 0x18, 0x2e, 0x4b, 0xa4, 0x31, 0x09, 0x9f, 0xdb, 0x6e, 0x9c, 0x4a, 0x06, 0x1c, 0x99, 0x10, + 0xf7, 0x5e, 0xb6, 0x97, 0x7e, 0xc3, 0x4b, 0xf6, 0x8b, 0x8c, 0xb5, 0x30, 0xdf, 0x4a, 0xfc, 0xe3, + 0x34, 0x70, 0x2a, 0x22, 0x84, 0x04, 0x42, 0x9f, 0xf6, 0xe7, 0x3f, 0xf1, 0x72, 0x99, 0xa7, 0xff, + 0xb6, 0xc3, 0x89, 0x46, 0x20, 0x7f, 0x51, 0x31, 0xc5, 0x01, 0x18, 0xb3, 0x5e, 0xfb, 0x73, 0xad, + 0x1d, 0x1b, 0x8e, 0x0d, 0x9c, 0xe9, 0xa1, 0x55, 0xf3, 0x0a, 0x58, 0x9f, 0xf4, 0xc2, 0x8e, 0x9c, + 0x5b, 0x95, 0x87, 0xf8, 0xf7, 0x6a, 0x3a, 0xc3, 0x9d, 0x44, 0xeb, 0xfb, 0xcb, 0xd6, 0x74, 0xfc, + 0x51, 0xc2, 0x35, 0xad, 0xca, 0xab, 0xcf, 0xe0, 0xc1, 0x12, 0xde, 0x1d, 0xfd, 0xe4, 0xfe, 0xad, + 0xfb, 0x93, 0xe0, 0xfe, 0x43, 0x11, 0x75, 0x6d, 0x1e, 0x4f, 0x3d, 0x6d, 0xb3, 0x5c, 0x2a, 0x8e, + 0x18, 0x3b, 0x75, 0x8b, 0xe0, 0x22, 0xc7, 0xd4, 0xd8, 0xb8, 0x59, 0xbe, 0xdd, 0xbd, 0xd6, 0x2d, + 0x1e, 0x3e, 0xe6, 0x9d, 0x24, 0x3a, 0x36, 0xf3, 0xb2, 0x87, 0x39, 0xef, 0xe7, 0x52, 0x8f, 0x6c, + 0x27, 0xa2, 0xe3, 0x37, 0x1c, 0xb5, 0xab, 0xc1, 0x19, 0x2a, 0x04, 0x9a, 0x74, 0xc5, 0xea, 0xdc, + 0x76, 0xb0, 0xd3, 0xae, 0xfc, 0x43, 0x95, 0x74, 0x45, 0x6a, 0xc1, 0x14, 0xc4, 0x42, 0xee, 0xf3, + 0x8f, 0xc8, 0x07, 0xb5, 0xc5, 0x49, 0x26, 0x68, 0xa0, 0x02, 0x24, 0x25, 0x75, 0x99, 0xcf, 0x3b, + 0xc6, 0x1c, 0x1c, 0x79, 0x33, 0x50, 0x38, 0x0d, 0x1a, 0x17, 0x47, 0x8f, 0xc1, 0xf3, 0xf2, 0x3a, + 0x51, 0x09, 0x3b, 0xac, 0x0e, 0x5a, 0x2c, 0x5d, 0x8f, 0xfb, 0xa9, 0x21, 0xb5, 0xd4, 0x7d, 0xf0, + 0x3d, 0xa9, 0x48, 0xa4, 0x55, 0x29, 0xc5, 0xd9, 0x86, 0xae, 0x42, 0x35, 0xec, 0x06, 0x7c, 0xe5, + 0xe9, 0x92, 0x17, 0xf1, 0xdd, 0x76, 0xdf, 0xf0, 0xb7, 0x2d, 0xd3, 0xbc, 0xa6, 0xf9, 0xfd, 0x0b, + 0x70, 0xbf, 0xe9, 0xe0, 0x89, 0x06, 0xe8, 0x89, 0xe4, 0xcb, 0x9b, 0x91, 0x9b, 0x31, 0x8e, 0xe4, + 0x31, 0x6f, 0x31, 0x69, 0x57, 0xb2, 0xab, 0x43, 0xfc, 0xd6, 0x37, 0x74, 0x41, 0x46, 0xc7, 0x0b, + 0x87, 0x7d, 0x6e, 0x40, 0x4a, 0x30, 0xb0, 0x8d, 0xf8, 0x66, 0x16, 0xce, 0xae, 0xab, 0x25, 0x0f, + 0x71, 0x25, 0xad, 0x23, 0xd7, 0x6a, 0xdb, 0xa7, 0x15, 0x30, 0xe2, 0x2c, 0x27, 0x14, 0x5f, 0x98, + 0xd7, 0x37, 0xf0, 0xdc, 0xe8, 0xeb, 0x9e, 0x59, 0xe2, 0xf6, 0xe1, 0x37, 0xd2, 0xb2, 0x2a, 0x07, + 0xed, 0xe0, 0x8a, 0x61, 0x4b, 0x4e, 0x60, 0xe3, 0xf8, 0xaf, 0xe9, 0x44, 0x27, 0xa8, 0x80, 0xc7, + 0x37, 0x71, 0x85, 0x74, 0x14, 0x57, 0xbf, 0xfd, 0x71, 0xd7, 0x6d, 0x46, 0x7f, 0xfb, 0x51, 0x78, + 0x8a, 0xc0, 0x6b, 0x93, 0x38, 0x3b, 0xa2, 0x7b, 0xaf, 0xe9, 0xdc, 0x24, 0x6c, 0x35, 0x76, 0x8a, + 0xb9, 0x6d, 0x87, 0xc6, 0x61, 0x44, 0x79, 0xcf, 0x48, 0x0f, 0x73, 0xe6, 0x8e, 0x7e, 0xa2, 0x66, + 0x67, 0x00, 0xcc, 0xac, 0xbc, 0xc0, 0xc1, 0x1b, 0x09, 0xdc, 0x61, 0x19, 0xf0, 0x99, 0x78, 0x22, + 0x8d, 0x45, 0x33, 0x7c, 0x52, 0xad, 0x08, 0x5d, 0x9c, 0xe1, 0x99, 0xd1, 0xa3, 0x13, 0x26, 0x26, + 0x90, 0xc3, 0x6e, 0x70, 0x10, 0x3e, 0x3d, 0x42, 0xa5, 0x95, 0x26, 0xba, 0xe1, 0x49, 0xc4, 0x56, + 0xe3, 0xab, 0xd6, 0x54, 0x9f, 0xc8, 0xfe, 0xa3, 0x56, 0x0e, 0xe1, 0xa5, 0x15, 0x87, 0x67, 0xbb, + 0x96, 0xb9, 0x98, 0xb7, 0x55, 0x52, 0x62, 0x29, 0xa4, 0xec, 0x49, 0x47, 0x6f, 0x33, 0x6b, 0xf5, + 0xca, 0xb3, 0xd5, 0x64, 0x20, 0x81, 0x57, 0xea, 0xef, 0xad, 0x37, 0xc5, 0x90, 0xd7, 0xb7, 0x3f, + 0xeb, 0x9e, 0xfb, 0x50, 0x12, 0xfe, 0x99, 0x2b, 0x7b, 0x02, 0x46, 0x20, 0x1d, 0x88, 0x2d, 0xbf, + 0xeb, 0x87, 0x23, 0xe6, 0x3c, 0xee, 0x60, 0x6e, 0xb9, 0x6b, 0x4b, 0x4a, 0x00, 0xc7, 0xb4, 0x7a, + 0xf2, 0x5b, 0xa6, 0xe3, 0xeb, 0x42, 0x3f, 0x46, 0xaf, 0x1f, 0x15, 0xce, 0x83, 0x30, 0x12, 0x43, + 0xd1, 0x77, 0x40, 0xb7, 0xcc, 0x6d, 0x2a, 0x28, 0xa5, 0x03, 0x0a, 0xc1, 0x90, 0x96, 0x21, 0x56, + 0x31, 0x0b, 0x44, 0x22, 0x69, 0x73, 0x60, 0xa6, 0x36, 0x8d, 0xad, 0xaa, 0x31, 0x29, 0x9c, 0xef, + 0x02, 0x59, 0xc7, 0xad, 0x86, 0x11, 0xb7, 0x6d, 0x2d, 0xb1, 0x34, 0x6f, 0x74, 0x9f, 0x53, 0x84, + 0xd3, 0xd9, 0x4d, 0x91, 0x8b, 0xfc, 0x5f, 0x18, 0x0a, 0xb1, 0xdf, 0xc3, 0x7e, 0x71, 0xc1, 0xa9, + 0x1b, 0x8d, 0x5d, 0x3f, 0xe7, 0x02, 0xe5, 0xea, 0x51, 0x97, 0x6a, 0x61, 0x64, 0xa9, 0x0e, 0xf2, + 0x00, 0xe1, 0x41, 0xb7, 0x9d, 0xa6, 0xbe, 0x0a, 0x1b, 0x81, 0x30, 0x83, 0x06, 0xbd, 0x88, 0xe1, + 0x9f, 0xbe, 0x4e, 0xec, 0x18, 0x4a, 0x57, 0x17, 0xe4, 0xd8, 0x84, 0x25, 0x25, 0x7b, 0x3d, 0x94, + 0x16, 0xc0, 0x42, 0x8d, 0x79, 0x45, 0x4f, 0x6d, 0x96, 0x95, 0x35, 0x03, 0xbf, 0xe4, 0x0e, 0x53, + 0x2b, 0x7d, 0x03, 0x9b, 0x1f, 0xca, 0xf1, 0xfa, 0xa4, 0xc8, 0xfa, 0x29, 0x68, 0x2e, 0x56, 0x3d, + 0x8e, 0xe0, 0xe6, 0x64, 0x13, 0x33, 0xb6, 0x6b, 0x28, 0xd7, 0x1a, 0x70, 0xac, 0xe2, 0x98, 0x44, + 0x27, 0xfc, 0xd6, 0x12, 0x04, 0x1c, 0x46, 0x62, 0x46, 0x5d, 0x64, 0x4d, 0xbb, 0x17, 0x4d, 0x01, + 0xe5, 0x40, 0x84, 0x94, 0x57, 0xff, 0x78, 0xec, 0x3d, 0x17, 0xcc, 0x48, 0xf1, 0x7a, 0xdd, 0x8a, + 0x09, 0x18, 0xce, 0xd2, 0x78, 0x9f, 0xfd, 0x12, 0x2a, 0xcf, 0x68, 0x45, 0x88, 0x2c, 0x18, 0xaf, + 0x99, 0x6f, 0x8a, 0xcb, 0x5e, 0x0a, 0xf0, 0xd4, 0xa4, 0xd4, 0xea, 0x10, 0xf4, 0x35, 0x2c, 0x76, + 0x79, 0xab, 0x0e, 0x05, 0xf8, 0x6f, 0xf2, 0x21, 0xac, 0x29, 0xc9, 0x67, 0x28, 0x88, 0xeb, 0xbb, + 0x52, 0x80, 0x59, 0xfc, 0x6c, 0xf4, 0x88, 0x82, 0x60, 0xdb, 0xf2, 0xba, 0x51, 0x7b, 0x00, 0x63, + 0x02, 0x3a, 0x11, 0xbc, 0xa5, 0xdc, 0xb6, 0x1b, 0xa2, 0x34, 0xb8, 0x2a, 0x58, 0x18, 0x3b, 0x19, + 0xb7, 0xd2, 0x44, 0xe8, 0xca, 0xe0, 0x88, 0xdb, 0xca, 0xe9, 0xa1, 0x79, 0x1d, 0x56, 0x3e, 0x86, + 0xce, 0xf3, 0x88, 0x9b, 0x80, 0x9f, 0x2d, 0x0e, 0xaf, 0x5b, 0x52, 0xb2, 0x75, 0x2d, 0x16, 0x86, + 0x7c, 0xe3, 0xc9, 0x14, 0xf2, 0x9b, 0x77, 0x4f, 0x8f, 0x58, 0xfa, 0xfd, 0x1b, 0x18, 0x09, 0xda, + 0xd4, 0x63, 0xa9, 0x20, 0xa8, 0xbf, 0x7a, 0x60, 0x49, 0x6d, 0xd1, 0x70, 0x7b, 0x49, 0x12, 0xf6, + 0xfb, 0x39, 0xf2, 0x8a, 0x6b, 0x05, 0x81, 0xa3, 0xf1, 0xc0, 0xb7, 0x1c, 0x68, 0x4d, 0xe4, 0x84, + 0x9e, 0x79, 0xd9, 0x4e, 0x56, 0xf2, 0xc3, 0x64, 0x4e, 0xf7, 0x68, 0x7a, 0x6a, 0xfd, 0xcd, 0xad, + 0x6c, 0x19, 0xcc, 0x51, 0x90, 0x19, 0x73, 0x21, 0x5c, 0xa9, 0x4a, 0x23, 0x30, 0x78, 0xc0, 0x0f, + 0x3e, 0x0f, 0xc7, 0xcf, 0x84, 0x43, 0x7d, 0xf0, 0xc5, 0x48, 0x9a, 0x27, 0xa3, 0x47, 0xa0, 0xd8, + 0x54, 0x3d, 0x02, 0x16, 0xae, 0xf6, 0x22, 0x5e, 0x3e, 0xd1, 0xf2, 0x62, 0x04, 0x9e, 0x7c, 0x41, + 0xfe, 0x3f, 0x83, 0x71, 0x5b, 0xc4, 0x57, 0xda, 0xcc, 0x6f, 0xdb, 0x92, 0x80, 0xef, 0x8a, 0x8b, + 0x8d, 0x36, 0x87, 0xa2, 0x64, 0xf4, 0xbf, 0xe4, 0x80, 0xe9, 0xc2, 0x65, 0xe9, 0x88, 0x17, 0xc5, + 0x32, 0x16, 0x7d, 0xbb, 0x7d, 0x05, 0x54, 0x00, 0x04, 0x32, 0x07, 0x98, 0x92, 0xdf, 0xa7, 0xed, + 0x8d, 0xbc, 0x26, 0xb7, 0x9d, 0x37, 0x92, 0x1b, 0xa2, 0xc2, 0x97, 0x1b, 0x90, 0xec, 0xc1, 0xd2, + 0xa6, 0x75, 0x5c, 0x67, 0x4f, 0x0e, 0x03, 0x13, 0x4b, 0xfd, 0xd8, 0x68, 0xb9, 0xc0, 0x30, 0xd9, + 0xa1, 0xd4, 0xba, 0x49, 0xc8, 0xdb, 0x38, 0xd0, 0x23, 0xc1, 0x86, 0xa4, 0x28, 0x20, 0xc9, 0x11, + 0xf3, 0x18, 0xee, 0x23, 0xc9, 0xdf, 0xa3, 0x25, 0x35, 0x69, 0xdd, 0xf4, 0xc8, 0x9d, 0xbe, 0x41, + 0x62, 0xff, 0x8e, 0xf6, 0x99, 0x24, 0x0e, 0xd7, 0x30, 0x09, 0x4d, 0x0f, 0x5c, 0x1e, 0x96, 0xa5, + 0xfc, 0xbb, 0x04, 0x32, 0x48, 0xcc, 0x64, 0x02, 0x7a, 0x57, 0xd5, 0xe7, 0x14, 0x05, 0xd4, 0xae, + 0x59, 0xfe, 0x87, 0x8c, 0x7a, 0x6e, 0x9e, 0xef, 0x22, 0xc1, 0x97, 0x7a, 0xb0, 0x5b, 0x47, 0x07, + 0xdf, 0x7b, 0xb2, 0xda, 0x7c, 0xeb, 0xb5, 0x0c, 0x67, 0x38, 0x53, 0xd6, 0x82, 0x45, 0x1e, 0xa0, + 0xb0, 0x71, 0xed, 0x83, 0x9e, 0x58, 0x95, 0x1e, 0xf5, 0x67, 0x6f, 0x5d, 0x6e, 0xb1, 0x12, 0x76, + 0x6e, 0x4a, 0x73, 0x5e, 0x57, 0xf9, 0xbe, 0xf8, 0x77, 0xfc, 0xdc, 0x37, 0xf3, 0xbf, 0xbe, 0x24, + 0x92, 0xf8, 0xba, 0xda, 0xbc, 0x77, 0x94, 0x67, 0x0e, 0x88, 0x21, 0x5f, 0x6e, 0xb1, 0x2f, 0x91, + 0xf5, 0xfe, 0x74, 0x2d, 0xb3, 0x2d, 0x0d, 0xe7, 0x0b, 0x01, 0x5c, 0xc0, 0x74, 0x99, 0x66, 0x67, + 0x40, 0x55, 0x6c, 0x50, 0x76, 0x78, 0xc2, 0x90, 0xf2, 0xd3, 0x91, 0xb1, 0xce, 0x51, 0xe0, 0xaf, + 0x0c, 0x4f, 0x0a, 0xc9, 0x55, 0x8d, 0xf9, 0x6c, 0x76, 0x0b, 0x8e, 0x99, 0x81, 0x88, 0x75, 0xd5, + 0xe3, 0x8c, 0xa9, 0xa6, 0xa5, 0x3f, 0xef, 0x40, 0xa0, 0x5d, 0x8f, 0x7d, 0xc2, 0x9c, 0xa0, 0x40, + 0xa7, 0x6c, 0x6c, 0xcf, 0x5a, 0xe1, 0xb1, 0xa6, 0xdc, 0x54, 0x17, 0xbc, 0x46, 0x4b, 0x42, 0xe3, + 0x3c, 0x97, 0xde, 0x77, 0x0b, 0x89, 0x47, 0x92, 0x5d, 0xc4, 0x01, 0x9f, 0x50, 0x5d, 0x4f, 0xb5, + 0x6e, 0xcd, 0x6c, 0x48, 0x7b, 0xdc, 0xa9, 0xd1, 0x73, 0xab, 0xe6, 0xef, 0x35, 0x62, 0x41, 0x71, + 0xcf, 0xea, 0x57, 0x7b, 0xff, 0x4e, 0xf0, 0xb7, 0xea, 0x62, 0x07, 0xcf, 0x4f, 0x09, 0x7c, 0xa1, + 0xde, 0xfc, 0x1e, 0xa2, 0xcb, 0x74, 0xec, 0x07, 0x7f, 0x64, 0x19, 0x6d, 0x39, 0x9d, 0xd4, 0x1a, + 0xdc, 0x23, 0xe9, 0x00, 0x22, 0xcd, 0x33, 0xc9, 0x0c, 0xe4, 0xf5, 0x44, 0xfc, 0x52, 0xf2, 0x89, + 0x60, 0x05, 0x41, 0x32, 0x31, 0x4e, 0xef, 0x66, 0xa9, 0x25, 0x39, 0x97, 0x57, 0x80, 0xe9, 0x80, + 0x48, 0xfd, 0x0f, 0x9c, 0x36, 0xc9, 0x09, 0x55, 0xdf, 0xfc, 0x84, 0x8b, 0x92, 0x6f, 0xe7, 0xab, + 0xad, 0xf5, 0xe4, 0xcf, 0x42, 0x9b, 0x7b, 0xc7, 0x99, 0xaa, 0xa7, 0x13, 0xf4, 0xa5, 0x02, 0x62, + 0x90, 0x0f, 0x21, 0x8f, 0x87, 0x3f, 0xf9, 0x63, 0x22, 0xe2, 0x47, 0xb6, 0xe8, 0x0f, 0xd6, 0x6d, + 0x10, 0xcb, 0x95, 0x62, 0xc8, 0x5c, 0xd3, 0x89, 0x14, 0x30, 0x3a, 0x72, 0x18, 0x59, 0x0e, 0xc9, + 0xf0, 0x6c, 0xb3, 0xfd, 0x63, 0xd5, 0x80, 0xa1, 0xa3, 0xd9, 0x6e, 0xf2, 0x3d, 0xfc, 0xf1, 0x47, + 0xbc, 0x4e, 0x36, 0xd9, 0xa1, 0x24, 0x87, 0xea, 0xf2, 0x28, 0x7a, 0xdb, 0xd1, 0xb1, 0x9b, 0xfd, + 0x64, 0x81, 0x3c, 0x33, 0x18, 0x56, 0x5e, 0x97, 0x05, 0xed, 0x84, 0x7e, 0x5c, 0x83, 0x60, 0x86, + 0x5e, 0x9b, 0xae, 0xc3, 0xa8, 0xeb, 0xa5, 0xf8, 0x95, 0x9b, 0x17, 0x5b, 0x45, 0xb9, 0x00, 0x9f, + 0xf8, 0xc3, 0xe8, 0x74, 0xa1, 0xd6, 0xc6, 0xf0, 0x4b, 0xb0, 0xfe, 0x16, 0xee, 0x01, 0xb3, 0xe7, + 0xb5, 0xde, 0x03, 0x15, 0xcc, 0x2e, 0xd5, 0xfe, 0x6c, 0x76, 0x2f, 0xec, 0x6d, 0x59, 0xe6, 0x11, + 0xbc, 0xcf, 0xe6, 0xd3, 0x65, 0x11, 0x2d, 0x6c, 0x29, 0xac, 0x63, 0xfa, 0x0b, 0xcc, 0xa6, 0x8c, + 0x37, 0x6a, 0xe8, 0xc0, 0xf4, 0x82, 0x9a, 0x83, 0x07, 0x87, 0xee, 0x28, 0x85, 0xe9, 0x9b, 0x37, + 0xea, 0x63, 0xeb, 0x6c, 0x24, 0xac, 0xdc, 0xf8, 0xd6, 0x11, 0x8e, 0xa1, 0xbf, 0xfa, 0xbf, 0x5e, + 0x4a, 0x33, 0x15, 0x7a, 0x76, 0x5f, 0x78, 0x16, 0x87, 0x85, 0x57, 0xb7, 0xee, 0x61, 0x7f, 0x74, + 0xe8, 0x38, 0xa5, 0xfc, 0xd7, 0xba, 0xfd, 0x55, 0x76, 0x6c, 0xca, 0xfb, 0x01, 0x02, 0xa5, 0x47, + 0xc1, 0x5f, 0xe3, 0x17, 0x83, 0x4d, 0x1f, 0xa2, 0xe4, 0x27, 0xaa, 0x75, 0xa6, 0xc7, 0xd1, 0x8f, + 0xf0, 0xd5, 0x0c, 0xfe, 0x56, 0xda, 0xd7, 0xb6, 0x74, 0x7e, 0x73, 0x1b, 0xca, 0x8a, 0xc1, 0x7a, + 0x69, 0x3a, 0xc5, 0x4e, 0x0f, 0x4b, 0xd0, 0xb5, 0x16, 0x1d, 0x96, 0xaa, 0xad, 0x9e, 0x28, 0xb2, + 0x39, 0x54, 0x48, 0xb8, 0xcc, 0xa0, 0x76, 0x16, 0x06, 0xe0, 0x88, 0x45, 0xe5, 0xfe, 0x67, 0x62, + 0x51, 0x67, 0xba, 0x73, 0x94, 0x32, 0x77, 0x99, 0xa7, 0x74, 0x70, 0x2a, 0xf3, 0xec, 0x10, 0x34, + 0x80, 0x29, 0x66, 0x22, 0x8f, 0x82, 0x50, 0x78, 0xcd, 0x7f, 0xee, 0xae, 0x52, 0xbe, 0xeb, 0x49, + 0x9f, 0x7f, 0x69, 0xf6, 0x41, 0x78, 0x00, 0x03, 0x1e, 0x05, 0xbe, 0x03, 0x19, 0x1f, 0x70, 0x5c, + 0x8b, 0xae, 0x8e, 0xab, 0xb4, 0x9f, 0xd9, 0xe1, 0x62, 0x4c, 0xb5, 0x90, 0xfc, 0x04, 0xfb, 0xe9, + 0x0c, 0x2a, 0x95, 0x79, 0x35, 0x8c, 0xa8, 0x82, 0xa0, 0xb1, 0x38, 0xef, 0x17, 0x22, 0x1a, 0xf0, + 0x11, 0xf9, 0x62, 0x6e, 0x0a, 0x4c, 0x04, 0xb6, 0x12, 0x20, 0xa9, 0xe2, 0x8b, 0xfe, 0x9d, 0x24, + 0x21, 0xef, 0x44, 0xf6, 0x84, 0xaa, 0xc2, 0xac, 0x2a, 0x4a, 0xd5, 0x95, 0x9b, 0x46, 0x10, 0x1f, + 0xbe, 0xd5, 0x84, 0x67, 0x96, 0xbb, 0xbb, 0x2b, 0xf1, 0x69, 0x9a, 0xfb, 0x19, 0x4e, 0x5d, 0x02, + 0x90, 0x5a, 0x40, 0xe8, 0x19, 0x23, 0xea, 0x83, 0x4d, 0xec, 0x9d, 0x00, 0x25, 0xe3, 0x98, 0x9a, + 0xf1, 0xbd, 0x6f, 0xa4, 0xfe, 0xb1, 0x2c, 0x27, 0x4c, 0x6c, 0x80, 0x45, 0x48, 0x37, 0x5a, 0x3f, + 0x75, 0x7a, 0x35, 0x45, 0x81, 0xae, 0x49, 0xd3, 0x9b, 0x56, 0xe3, 0x04, 0xee, 0xfd, 0x98, 0x76, + 0x66, 0x26, 0x73, 0xac, 0x58, 0xdf, 0xf0, 0xc4, 0x0b, 0x32, 0x5c, 0xae, 0x38, 0xc5, 0x16, 0x68, + 0xe9, 0x0d, 0x2c, 0x62, 0x08, 0x20, 0x3e, 0x44, 0xcb, 0xbb, 0x91, 0x35, 0x41, 0x2a, 0x68, 0xbc, + 0xbc, 0x2d, 0xa4, 0xa8, 0x09, 0x21, 0xf4, 0x8f, 0x04, 0x36, 0xd8, 0x9f, 0x2a, 0x30, 0x28, 0xdc, + 0xbd, 0x3e, 0xc1, 0x95, 0x75, 0xc3, 0x18, 0x80, 0xb7, 0x3d, 0xf6, 0xf6, 0xbd, 0xb8, 0x01, 0xdd, + 0xe1, 0x70, 0x99, 0x86, 0x58, 0xab, 0x00, 0x82, 0xec, 0x95, 0xa4, 0x50, 0xae, 0x99, 0x1e, 0x80, + 0xb0, 0x78, 0xc5, 0xa6, 0x22, 0x89, 0x81, 0xfa, 0xe5, 0x40, 0xd4, 0xa5, 0x9c, 0xcb, 0xd7, 0xfa, + 0x8b, 0x5f, 0x1c, 0x7a, 0xe3, 0xb0, 0xea, 0xf1, 0xa1, 0x33, 0x23, 0x74, 0xa4, 0x02, 0x59, 0xf7, + 0xa2, 0xd2, 0xc7, 0x9c, 0xca, 0xb5, 0x0e, 0x10, 0xa9, 0x65, 0xa8, 0x45, 0xa0, 0x61, 0x21, 0x83, + 0x15, 0xb3, 0xd4, 0xc0, 0x89, 0x54, 0xa8, 0xfe, 0x8b, 0xa4, 0x9a, 0x22, 0x9c, 0xe3, 0xcc, 0x9f, + 0x55, 0xb8, 0x9e, 0x0a, 0xac, 0xe4, 0x3b, 0xd5, 0xe3, 0x58, 0x17, 0x93, 0xd1, 0x1e, 0x28, 0x45, + 0x2d, 0x64, 0xe1, 0xb2, 0x13, 0x65, 0xdc, 0x6e, 0x40, 0x3d, 0x8d, 0xe6, 0xf0, 0x85, 0x08, 0xce, + 0xb0, 0x6d, 0xa7, 0x2f, 0xac, 0xbf, 0xb7, 0x1c, 0x8c, 0x63, 0x96, 0x1a, 0xce, 0x56, 0x33, 0x9e, + 0xe5, 0x8b, 0x2c, 0x12, 0xa9, 0x3b, 0x50, 0xf3, 0x4a, 0xef, 0x17, 0xe3, 0xa6, 0x9a, 0xad, 0xc7, + 0x2d, 0xe3, 0x51, 0x0a, 0x29, 0x55, 0x1f, 0xd2, 0x68, 0x93, 0xe7, 0x5d, 0xbd, 0x8a, 0xdc, 0xa2, + 0xcd, 0x8a, 0x5a, 0xf0, 0xee, 0x1d, 0xac, 0xf8, 0xed, 0xd3, 0x31, 0x33, 0xed, 0x25, 0x27, 0x6a, + 0x53, 0x8d, 0x61, 0x48, 0xf6, 0x24, 0x2f, 0x50, 0x96, 0x0e, 0x44, 0x2d, 0x37, 0x9c, 0xe7, 0x69, + 0x73, 0xf7, 0x22, 0x2b, 0xaa, 0xd9, 0x20, 0x1b, 0xba, 0x38, 0xbb, 0x80, 0x54, 0x05, 0x22, 0x8e, + 0x60, 0xb3, 0x6c, 0xd0, 0x6c, 0x6e, 0x37, 0xad, 0xf1, 0x4a, 0x06, 0x32, 0xdc, 0x9a, 0x7a, 0x36, + 0xf2, 0xa9, 0x6d, 0x0b, 0x1f, 0x18, 0x9d, 0xcc, 0xb6, 0xb4, 0xbe, 0x12, 0x92, 0x8e, 0xd4, 0xb9, + 0x48, 0xe4, 0xe5, 0x60, 0xde, 0xba, 0x4f, 0x13, 0x96, 0x90, 0x37, 0xa5, 0x71, 0xb5, 0x2a, 0xe7, + 0x2a, 0xa8, 0x06, 0xf4, 0x3b, 0xed, 0x87, 0x57, 0x82, 0x1b, 0xd5, 0x80, 0x56, 0x30, 0xfb, 0x3d, + 0x8a, 0xac, 0x0c, 0x92, 0xc3, 0x16, 0x20, 0xf3, 0x89, 0xef, 0x4e, 0xef, 0xc2, 0x5e, 0x2d, 0x84, + 0x72, 0xd7, 0xf1, 0xb5, 0x8f, 0x20, 0x72, 0xb8, 0xa3, 0xf2, 0x6c, 0xc4, 0x9f, 0x13, 0x2d, 0xf9, + 0x05, 0x12, 0xf1, 0x87, 0xf7, 0xd3, 0x28, 0x95, 0x07, 0x50, 0x4e, 0x83, 0x7f, 0x05, 0x9b, 0xdc, + 0x02, 0x27, 0x36, 0x16, 0x75, 0x86, 0x4e, 0x50, 0xc7, 0x5a, 0xd1, 0xc5, 0x64, 0x06, 0x4d, 0xeb, + 0x9b, 0x5e, 0x3e, 0xf7, 0xa2, 0x2b, 0xcb, 0x9b, 0x85, 0x99, 0x68, 0x0d, 0x2f, 0x68, 0x15, 0x1b, + 0x0c, 0xac, 0xbb, 0x60, 0x21, 0xa8, 0x24, 0x93, 0x30, 0xa8, 0x48, 0xa0, 0x24, 0x0a, 0x29, 0x01, + 0xc8, 0xf0, 0xf4, 0xaa, 0x5c, 0x44, 0xd6, 0xf1, 0x4b, 0x85, 0x61, 0xb1, 0xdb, 0x4a, 0x32, 0xe6, + 0xf5, 0xd5, 0x86, 0xc4, 0x48, 0x34, 0x64, 0x04, 0x9e, 0x1b, 0xa3, 0x72, 0x2a, 0x78, 0x50, 0x9e, + 0x5a, 0xcf, 0xe9, 0xd2, 0x41, 0xfb, 0xc2, 0x00, 0x38, 0x8b, 0xcb, 0x93, 0x82, 0x54, 0x3e, 0xbc, + 0x9e, 0x18, 0xd8, 0x01, 0x99, 0x23, 0x46, 0xe7, 0x74, 0x98, 0x57, 0xfb, 0x32, 0x78, 0x06, 0xe6, + 0xa2, 0x39, 0xc7, 0xd2, 0x17, 0xbc, 0x53, 0x0a, 0xe2, 0x7a, 0xde, 0xd3, 0xb2, 0xe0, 0xbf, 0x3a, + 0xf5, 0xf0, 0x3d, 0xd9, 0xb7, 0x89, 0xc2, 0xf8, 0x5d, 0xc5, 0x38, 0xf3, 0x50, 0x08, 0x01, 0x82, + 0x63, 0x3f, 0x67, 0x2c, 0xb8, 0xb8, 0x88, 0x4a, 0xbc, 0xbc, 0x87, 0xf3, 0xbf, 0x71, 0x5b, 0xf6, + 0x04, 0x49, 0x5c, 0xfa, 0xe7, 0xcb, 0x97, 0xe5, 0xb8, 0xde, 0x87, 0x7a, 0xb7, 0x3b, 0x1e, 0xb9, + 0x4b, 0x77, 0x2b, 0x3d, 0xd1, 0x91, 0xbe, 0x4f, 0xa8, 0x9f, 0x43, 0x4d, 0x4c, 0x47, 0x8a, 0xe4, + 0x8b, 0x5b, 0x81, 0xe4, 0x84, 0x92, 0x60, 0xfd, 0xa8, 0x2b, 0x7f, 0xcf, 0x1f, 0xe1, 0x8d, 0x63, + 0xcf, 0xbf, 0x01, 0xd8, 0xf9, 0xdf, 0x27, 0xfb, 0xc5, 0x9f, 0x75, 0x12, 0xb9, 0xb9, 0x1d, 0x61, + 0x8f, 0x96, 0x33, 0x5d, 0x24, 0xc1, 0xed, 0x54, 0x97, 0x8e, 0x7d, 0x5d, 0x1e, 0x35, 0x23, 0x64, + 0xd2, 0x76, 0x8b, 0xa9, 0xa3, 0xaf, 0x22, 0xf0, 0xd2, 0x7a, 0x72, 0x3c, 0x4f, 0x51, 0x92, 0xd2, + 0xd1, 0xce, 0xbe, 0x18, 0x06, 0x8c, 0x37, 0x40, 0x23, 0x1c, 0x24, 0x2a, 0x69, 0x86, 0x74, 0x76, + 0x29, 0x05, 0x22, 0x43, 0x64, 0x49, 0xa2, 0xd6, 0x6f, 0x95, 0xe6, 0x3b, 0xbd, 0x52, 0x09, 0x1b, + 0x79, 0xd5, 0xdc, 0xe8, 0x44, 0xf1, 0xb3, 0x1f, 0xda, 0xe9, 0xb9, 0xa0, 0xc8, 0x5d, 0x1a, 0x19, + 0x6c, 0xe3, 0xc0, 0xef, 0xc8, 0x70, 0xb4, 0x48, 0x17, 0x41, 0x26, 0x35, 0x9d, 0x21, 0x16, 0x28, + 0xc0, 0x33, 0x32, 0xe2, 0x89, 0x24, 0xdb, 0x99, 0x2f, 0x4e, 0xb2, 0x18, 0x4e, 0x36, 0xd5, 0x8d, + 0x83, 0x05, 0xe7, 0x92, 0x0a, 0xaa, 0xdd, 0xf8, 0x5a, 0xec, 0x15, 0xf7, 0x6d, 0xbc, 0x71, 0x49, + 0x11, 0x8a, 0xcf, 0x2a, 0x28, 0x04, 0x37, 0xa5, 0x16, 0x7b, 0xd2, 0x87, 0x32, 0xfe, 0x64, 0xc4, + 0x42, 0x09, 0xbd, 0x36, 0x3a, 0x74, 0xbf, 0xfa, 0x8b, 0xb1, 0xde, 0x9c, 0xc5, 0x5e, 0x3c, 0x3c, + 0xfa, 0xc5, 0x6e, 0x3b, 0x86, 0x1c, 0x63, 0x89, 0xea, 0x40, 0x43, 0x00, 0x03, 0x16, 0xc0, 0x22, + 0xcc, 0x0a, 0x22, 0x02, 0x7b, 0x4a, 0x1d, 0xf3, 0xa2, 0x0e, 0xba, 0x13, 0xed, 0x29, 0x9a, 0x34, + 0x7c, 0x83, 0x85, 0xc7, 0xb4, 0x69, 0x3a, 0x02, 0x0f, 0xa0, 0x4a, 0x48, 0x13, 0x73, 0xf3, 0x96, + 0x90, 0xa9, 0xd0, 0xa6, 0x1d, 0x8d, 0xd5, 0x5d, 0xfe, 0x66, 0x28, 0xda, 0x15, 0xd6, 0x1b, 0x40, + 0x76, 0xb4, 0xfe, 0x63, 0x6a, 0x2e, 0x3f, 0xf4, 0x2d, 0xfd, 0x25, 0xb5, 0xd6, 0xf9, 0xa9, 0x40, + 0x34, 0xd8, 0x1d, 0x62, 0xb5, 0x3c, 0xfc, 0x39, 0x94, 0x23, 0xf0, 0x09, 0x0e, 0xbb, 0xa1, 0x53, + 0xa5, 0x71, 0x49, 0x59, 0x47, 0xce, 0x6d, 0xfa, 0xcc, 0x48, 0x5a, 0xcd, 0x2d, 0xa5, 0xb5, 0x08, + 0x8d, 0x03, 0x9c, 0xfd, 0xc6, 0xcd, 0x4a, 0xb1, 0x38, 0x14, 0x2d, 0x8e, 0xc8, 0xa5, 0x30, 0xb9, + 0x05, 0x46, 0x66, 0x31, 0x64, 0x8a, 0x8b, 0x63, 0xa6, 0xa2, 0x94, 0xd5, 0x78, 0x0b, 0xd1, 0xfa, + 0x64, 0x5b, 0x72, 0x87, 0x5b, 0x03, 0x90, 0xa4, 0x9f, 0xc5, 0xeb, 0xc3, 0x2f, 0xb7, 0xf6, 0x70, + 0x9d, 0x7e, 0x44, 0xdd, 0xd7, 0x7f, 0x69, 0xfa, 0x17, 0x61, 0xa5, 0xad, 0x38, 0xe6, 0x55, 0xad, + 0xf1, 0x3c, 0x6f, 0xc2, 0x21, 0x25, 0x29, 0x04, 0x70, 0x62, 0xc6, 0x70, 0x6c, 0xfb, 0x1e, 0x5c, + 0x8f, 0xf5, 0xd4, 0x65, 0xf3, 0xfb, 0xad, 0xc3, 0xb4, 0xbb, 0xe7, 0x47, 0xfa, 0xd6, 0xd4, 0x8d, + 0xa0, 0x23, 0xae, 0xc7, 0x33, 0x9b, 0xe0, 0x8d, 0x74, 0xa3, 0x02, 0x95, 0x39, 0xac, 0x0b, 0x30, + 0xaf, 0x35, 0x73, 0xb0, 0x5f, 0x1d, 0xf6, 0x77, 0x3f, 0xfc, 0x81, 0xcb, 0xce, 0x7c, 0x77, 0x84, + 0xeb, 0xab, 0xa7, 0x8f, 0x39, 0x2c, 0xbb, 0x36, 0xdb, 0x21, 0xf8, 0x33, 0x00, 0x08, 0x49, 0x79, + 0xf3, 0xa6, 0x6b, 0x20, 0xdc, 0x73, 0xb1, 0x15, 0xb1, 0x7b, 0x46, 0x08, 0xaa, 0x2d, 0xaf, 0xee, + 0xf1, 0xd5, 0xfe, 0xdc, 0xdb, 0xbf, 0xad, 0x41, 0xde, 0x2c, 0xd8, 0x42, 0xf9, 0x20, 0x09, 0xba, + 0x71, 0x5b, 0x4e, 0x5b, 0xd7, 0xe2, 0x9b, 0x63, 0xb6, 0xf5, 0x57, 0xe1, 0xe4, 0x67, 0xbc, 0xba, + 0x6b, 0x85, 0xa4, 0xe4, 0x8c, 0x6e, 0x96, 0xe4, 0xb6, 0x9a, 0xab, 0xc7, 0xeb, 0x1c, 0x7a, 0x35, + 0x3b, 0x29, 0xfb, 0xb0, 0x16, 0xa9, 0x46, 0x84, 0x85, 0x6a, 0xa5, 0x09, 0x55, 0x16, 0xcf, 0x90, + 0x85, 0x28, 0xa5, 0x20, 0x3c, 0xed, 0xf9, 0xb9, 0xbb, 0x6a, 0x03, 0xe8, 0x56, 0xdd, 0x26, 0x70, + 0x13, 0x61, 0x1a, 0xd4, 0x4a, 0xbb, 0x6a, 0x4e, 0x28, 0xca, 0xb1, 0x50, 0xe4, 0x0a, 0x48, 0xab, + 0x5a, 0xbe, 0xd5, 0x5a, 0xe6, 0xf1, 0x58, 0x7b, 0x3b, 0xab, 0x00, 0x41, 0xdd, 0xe9, 0x7f, 0x64, + 0x69, 0x20, 0xb8, 0xaa, 0xa9, 0xfa, 0x33, 0xb8, 0x1a, 0x53, 0xc6, 0x85, 0xc7, 0xff, 0x50, 0xbc, + 0x58, 0xb6, 0x83, 0xf6, 0x73, 0xd9, 0x38, 0x1c, 0x19, 0x74, 0x65, 0xec, 0xf4, 0xeb, 0x4b, 0x53, + 0x95, 0x4a, 0x95, 0x29, 0x8e, 0xd2, 0xcd, 0x17, 0x72, 0x9e, 0x0a, 0xc8, 0xd9, 0xe8, 0xf2, 0xfd, + 0x36, 0x11, 0x56, 0x47, 0x9b, 0xba, 0xa1, 0xcc, 0x87, 0xf2, 0x18, 0x19, 0x09, 0x61, 0xfd, 0xf1, + 0xd8, 0x39, 0xf1, 0x0c, 0x3f, 0x76, 0x99, 0x0f, 0xd3, 0x3f, 0x91, 0xf1, 0x7f, 0x2b, 0x30, 0x1a, + 0x5a, 0xc1, 0xb7, 0xe0, 0x63, 0xc2, 0xdd, 0xaa, 0x36, 0x6d, 0x3d, 0x0f, 0xec, 0xe1, 0x6f, 0x97, + 0x34, 0xce, 0x3f, 0xab, 0x2b, 0x72, 0xd9, 0x23, 0x7b, 0xd7, 0x97, 0xdc, 0x07, 0x99, 0x9f, 0x02, + 0xdf, 0x21, 0xf7, 0x13, 0xc9, 0x67, 0xf5, 0x65, 0xae, 0xcc, 0xbf, 0xf3, 0xc2, 0xc3, 0x30, 0x4f, + 0x13, 0x53, 0xe8, 0x38, 0x60, 0x87, 0xc3, 0xe4, 0xf6, 0xa9, 0x69, 0x38, 0x5c, 0x9a, 0xaf, 0xc4, + 0x81, 0x9d, 0xe6, 0xff, 0x39, 0xbf, 0x9e, 0x82, 0xb8, 0xd9, 0x01, 0xe8, 0xa8, 0x6f, 0xd7, 0xc9, + 0xba, 0x6f, 0x27, 0xe5, 0x71, 0x5f, 0xb5, 0x9a, 0x0b, 0xa0, 0x75, 0xb4, 0x0a, 0xc6, 0x7b, 0xa8, + 0xd4, 0x03, 0x0a, 0xa0, 0x2e, 0xdf, 0xe6, 0x71, 0x6e, 0x89, 0xb6, 0xbf, 0x25, 0x07, 0x01, 0x3b, + 0x1d, 0x4a, 0xbd, 0x1d, 0x35, 0x08, 0x00, 0x30, 0x2d, 0x5e, 0x4a, 0x25, 0xfe, 0x6f, 0x7c, 0xad, + 0x46, 0x52, 0x9e, 0xd2, 0xb0, 0x71, 0xd1, 0x21, 0x57, 0x9c, 0x91, 0x84, 0xc6, 0x4c, 0x1d, 0x62, + 0x08, 0xa5, 0x62, 0x21, 0xbd, 0x2c, 0xad, 0xec, 0xa5, 0x2d, 0x73, 0x6b, 0x95, 0xb3, 0x6e, 0xa4, + 0xf9, 0xde, 0xe6, 0x04, 0x64, 0xac, 0x72, 0x67, 0xc6, 0x25, 0x87, 0x1c, 0xcb, 0x42, 0x2b, 0x93, + 0x4d, 0x18, 0x4a, 0x31, 0xc5, 0x3d, 0x09, 0x2b, 0x55, 0x50, 0x0d, 0x80, 0x64, 0x89, 0x3f, 0xfc, + 0xdf, 0xfe, 0x92, 0x96, 0x56, 0x81, 0xbe, 0x53, 0x8d, 0x3f, 0x19, 0x76, 0x50, 0x14, 0xc6, 0x75, + 0xaa, 0x41, 0xdb, 0xae, 0xb4, 0x01, 0x40, 0x2e, 0x00, 0x24, 0xe1, 0xe7, 0xab, 0xdf, 0x7c, 0x16, + 0x8c, 0xb2, 0x9b, 0xc7, 0x3c, 0xa8, 0xc0, 0x59, 0x5d, 0x08, 0x7c, 0xfc, 0x27, 0x89, 0x84, 0x66, + 0x11, 0xca, 0xb8, 0x8c, 0x4e, 0xaf, 0xa9, 0xb3, 0xe2, 0x7b, 0xd5, 0x7e, 0x37, 0xa9, 0x66, 0xe0, + 0xbb, 0x7e, 0xb8, 0xb9, 0x12, 0xcc, 0xba, 0x00, 0x5e, 0xc8, 0x5a, 0x96, 0x99, 0x7a, 0xad, 0x63, + 0x14, 0xa0, 0x20, 0x95, 0x36, 0x2f, 0x0e, 0x08, 0xae, 0xd4, 0xa7, 0x9c, 0x4d, 0x63, 0x04, 0xba, + 0x5b, 0x96, 0x58, 0x5b, 0x24, 0xa9, 0x79, 0x12, 0x26, 0x61, 0x85, 0x0c, 0x17, 0xd0, 0xe0, 0xcc, + 0xab, 0x6c, 0x9b, 0xbd, 0xc3, 0x30, 0xc7, 0x41, 0x0f, 0x40, 0x78, 0xa2, 0x3f, 0x8f, 0x94, 0x96, + 0xd7, 0x56, 0xf3, 0x96, 0x84, 0x9a, 0x0d, 0xfa, 0x64, 0x81, 0x91, 0x8f, 0x89, 0x12, 0x91, 0x99, + 0x0b, 0x4d, 0xa4, 0xc4, 0x02, 0x69, 0x1d, 0x85, 0x1f, 0xcb, 0x8b, 0x43, 0xdf, 0xf4, 0x51, 0xce, + 0x1a, 0x00, 0x0f, 0x2e, 0xf3, 0xab, 0xae, 0xf6, 0x24, 0x8f, 0x7f, 0x66, 0xeb, 0x1f, 0x82, 0x09, + 0xf9, 0x92, 0x91, 0x08, 0xb3, 0x16, 0x33, 0xfc, 0x7d, 0xf3, 0x9a, 0x26, 0x7d, 0x69, 0x6e, 0x9c, + 0x08, 0xdb, 0x72, 0x36, 0x36, 0x8a, 0xa7, 0x03, 0x6f, 0xc2, 0xcb, 0x24, 0x6e, 0x15, 0x8f, 0x41, + 0xbb, 0xb9, 0x0f, 0x1b, 0x33, 0xfd, 0x5d, 0xcf, 0x74, 0x78, 0x91, 0x36, 0x3b, 0x65, 0x27, 0x4e, + 0x74, 0x04, 0x84, 0xd1, 0x42, 0x39, 0x85, 0x8e, 0x05, 0x5a, 0x5b, 0xc1, 0xa9, 0x99, 0x32, 0x49, + 0x79, 0xb1, 0x28, 0x99, 0x79, 0x25, 0xec, 0x5a, 0x60, 0xec, 0x43, 0x2d, 0xbd, 0x8e, 0xd6, 0x20, + 0x5b, 0xd8, 0xd4, 0x6f, 0x8b, 0xc1, 0xc1, 0x57, 0x54, 0xa5, 0xdc, 0x05, 0x3b, 0x47, 0xdb, 0xee, + 0xe4, 0xa4, 0xc1, 0xc7, 0x3d, 0xfb, 0x1c, 0x9f, 0x55, 0x11, 0xc5, 0xd3, 0x8a, 0xcb, 0xff, 0x1e, + 0x49, 0xe7, 0x5c, 0xb1, 0x0e, 0x53, 0x8b, 0x78, 0x3a, 0xd9, 0xb5, 0xe4, 0x40, 0x08, 0xbc, 0xa4, + 0x8a, 0x6a, 0xc0, 0x7b, 0xa1, 0x56, 0x9c, 0x4d, 0x74, 0xee, 0x68, 0x0b, 0x59, 0x51, 0xc6, 0x35, + 0x32, 0xca, 0x81, 0xbf, 0x7b, 0xeb, 0x62, 0x2c, 0x74, 0x7c, 0x94, 0xb8, 0x7a, 0x58, 0x65, 0x9f, + 0x85, 0x21, 0x4f, 0x8c, 0xad, 0x51, 0xd0, 0x27, 0xc6, 0x86, 0xa7, 0x91, 0xe6, 0x59, 0x46, 0xfa, + 0x2c, 0x17, 0xe8, 0xbf, 0x5c, 0x29, 0xd1, 0x9f, 0x90, 0xd8, 0xa2, 0xb7, 0xa1, 0xdb, 0x96, 0x14, + 0xc9, 0x09, 0x0b, 0x18, 0x73, 0x49, 0x48, 0x38, 0x59, 0x6d, 0xd3, 0x1c, 0x7a, 0xef, 0xd5, 0x9b, + 0xed, 0x46, 0xce, 0xb7, 0xa8, 0xdd, 0xa4, 0xc5, 0x20, 0x3d, 0x00, 0xab, 0x0a, 0x7a, 0x03, 0x95, + 0x97, 0x49, 0xa2, 0x8e, 0xb5, 0xc1, 0xce, 0x64, 0x34, 0x31, 0x4d, 0xa6, 0x73, 0x61, 0xc7, 0x67, + 0x09, 0x97, 0xad, 0x56, 0xcb, 0xa2, 0x72, 0x5a, 0xca, 0xe7, 0x93, 0xa9, 0x5e, 0x19, 0xc8, 0x0e, + 0x5b, 0x2b, 0x36, 0xef, 0xf0, 0x1e, 0x14, 0xd0, 0xc3, 0xaa, 0xff, 0x85, 0x31, 0x99, 0x34, 0x81, + 0xf4, 0x02, 0x82, 0x28, 0xe7, 0x12, 0x0a, 0x81, 0xdd, 0x04, 0x25, 0x2c, 0xc0, 0xd3, 0x8e, 0xa1, + 0x33, 0xd7, 0x00, 0x05, 0xb6, 0x2c, 0x90, 0xeb, 0x9a, 0xa5, 0xe1, 0x3f, 0x12, 0xb8, 0xce, 0xe4, + 0xff, 0xf3, 0x78, 0x49, 0xf4, 0xca, 0xc8, 0x28, 0xb5, 0x15, 0x14, 0x39, 0x6b, 0xda, 0x41, 0x9d, + 0xbb, 0x72, 0x06, 0x99, 0x3a, 0x20, 0xd4, 0xa8, 0xab, 0x94, 0x40, 0xb1, 0xf6, 0x56, 0xbe, 0xde, + 0xf8, 0x66, 0xf8, 0xdc, 0xc7, 0xca, 0xcc, 0x00, 0x33, 0x4b, 0x8f, 0x36, 0x53, 0x23, 0xeb, 0x61, + 0xd2, 0xa6, 0xef, 0xde, 0x7f, 0x36, 0x8c, 0xcd, 0x30, 0x10, 0x51, 0x4f, 0x65, 0x02, 0x6c, 0x10, + 0x3f, 0x6a, 0x90, 0x5d, 0x71, 0x78, 0xa7, 0xea, 0x14, 0x88, 0xf6, 0x4d, 0x93, 0x16, 0xa0, 0x23, + 0x42, 0xbf, 0x61, 0x1b, 0xb8, 0xb3, 0xa3, 0x98, 0xb2, 0x31, 0xf7, 0x63, 0x34, 0xe6, 0xaf, 0xd8, + 0xc2, 0x59, 0xcc, 0x57, 0x54, 0x83, 0x8f, 0x6b, 0x72, 0xc7, 0xbb, 0x62, 0x8d, 0xac, 0x9c, 0xaa, + 0x8a, 0xa2, 0x63, 0x2f, 0x96, 0xea, 0xde, 0x45, 0xa9, 0x3b, 0x0a, 0xf2, 0x21, 0x7e, 0x33, 0xf5, + 0x5f, 0x89, 0xb7, 0xc2, 0x1f, 0x95, 0x79, 0x7a, 0xa0, 0x13, 0x2e, 0x04, 0x4f, 0x4c, 0x06, 0xdf, + 0x68, 0x18, 0xd6, 0xd5, 0xc1, 0x4a, 0xfe, 0x33, 0xcc, 0xbc, 0x34, 0x5f, 0x40, 0xcd, 0xb0, 0xbc, + 0xd9, 0x4a, 0x07, 0xf1, 0x5a, 0x2f, 0xdc, 0x97, 0x16, 0xa9, 0x5e, 0x61, 0xb6, 0xf6, 0x95, 0xf3, + 0x08, 0x57, 0x92, 0xbf, 0xc5, 0x2a, 0x71, 0x51, 0x68, 0x52, 0xdd, 0x5b, 0x5c, 0x19, 0x3c, 0x35, + 0x66, 0xe2, 0xfd, 0x17, 0x93, 0x29, 0x39, 0x32, 0x6f, 0x0d, 0x5b, 0xa9, 0x46, 0xe6, 0xe5, 0x7e, + 0xeb, 0xa0, 0xd7, 0x61, 0x79, 0x34, 0x10, 0x63, 0x87, 0xde, 0xfc, 0xea, 0x17, 0xbd, 0x8b, 0x91, + 0xd1, 0xad, 0xd8, 0x67, 0xdf, 0xfb, 0xb8, 0xe4, 0x2c, 0x8e, 0x3c, 0x5d, 0x35, 0x12, 0xd3, 0x68, + 0xe0, 0xeb, 0xf1, 0x2c, 0x2c, 0x45, 0x82, 0x8e, 0x3a, 0x82, 0x58, 0xd1, 0x7a, 0xd0, 0x9e, 0xd1, + 0xa0, 0x06, 0x24, 0xbd, 0x5c, 0x19, 0x08, 0x46, 0x93, 0x40, 0x71, 0x47, 0x5d, 0xf4, 0xaf, 0xb3, + 0x1c, 0x0a, 0xce, 0xb6, 0x06, 0xed, 0x6b, 0x0a, 0xe0, 0xbc, 0x12, 0x79, 0xcf, 0xe0, 0x53, 0x62, + 0xa5, 0x3a, 0xd3, 0xbf, 0xc0, 0x7c, 0x78, 0x88, 0xc9, 0x66, 0xf6, 0x46, 0x0d, 0x51, 0x19, 0x1e, + 0x89, 0xb0, 0xca, 0x5a, 0xfa, 0xc0, 0xc9, 0x49, 0x08, 0x7e, 0x7d, 0xb2, 0xf1, 0xc2, 0x29, 0x19, + 0x0a, 0x8d, 0x41, 0xba, 0x5f, 0xca, 0xb6, 0xc4, 0x45, 0x7e, 0xda, 0x17, 0xbf, 0x4b, 0x85, 0x6e, + 0x81, 0x5d, 0x28, 0x3e, 0xf1, 0x4f, 0xad, 0x1a, 0xbf, 0x25, 0x8c, 0x9a, 0xa3, 0x39, 0x30, 0xce, + 0xd9, 0x02, 0x83, 0x0b, 0x82, 0xb5, 0x08, 0x95, 0xc5, 0x85, 0xe5, 0x12, 0x84, 0xf7, 0xb8, 0xfd, + 0x09, 0x1f, 0x07, 0xa8, 0x1d, 0x88, 0xc2, 0xdf, 0x93, 0x23, 0xee, 0x76, 0x75, 0x92, 0x16, 0x7c, + 0x64, 0xcb, 0x29, 0x53, 0x27, 0x3f, 0xb0, 0x8f, 0xc2, 0xc5, 0x4e, 0xfb, 0xd6, 0xf2, 0x8b, 0xc4, + 0xbd, 0xaf, 0xe8, 0x0f, 0x89, 0xf1, 0x63, 0xc5, 0x77, 0xb5, 0x77, 0x78, 0x0b, 0xd1, 0xbc, 0xd9, + 0x84, 0x47, 0x1f, 0x65, 0x19, 0xaa, 0x81, 0xa9, 0xae, 0x5e, 0xd3, 0x6b, 0xa4, 0x55, 0x5e, 0x8a, + 0x0c, 0x54, 0x2a, 0x7e, 0x1f, 0x7e, 0x85, 0x87, 0xfd, 0x13, 0xd1, 0xff, 0xa1, 0x89, 0x21, 0xb5, + 0x91, 0x7b, 0x5d, 0x50, 0x07, 0x31, 0x9c, 0x2c, 0x97, 0x5b, 0x88, 0xd8, 0xed, 0xc8, 0x41, 0xf4, + 0xcf, 0xe6, 0x2d, 0x89, 0xbc, 0x53, 0x5d, 0xd1, 0xd7, 0xaa, 0x4a, 0x10, 0x23, 0xa3, 0x26, 0x12, + 0x2e, 0x71, 0xea, 0xba, 0x54, 0x68, 0xba, 0x74, 0xf1, 0x58, 0x80, 0x9c, 0xef, 0xc1, 0x9b, 0x54, + 0xf6, 0xe2, 0xf8, 0xf3, 0xcd, 0x74, 0xda, 0x57, 0xeb, 0x8b, 0xc2, 0x15, 0x45, 0x74, 0xdb, 0x0e, + 0x58, 0xe0, 0xc4, 0x34, 0x98, 0xc0, 0x63, 0x34, 0x63, 0xaa, 0xc0, 0x93, 0x6a, 0x48, 0xd1, 0x92, + 0xe3, 0x8e, 0xcb, 0x72, 0x88, 0xcb, 0xf2, 0xad, 0x27, 0xac, 0x19, 0x73, 0xef, 0xc4, 0x1d, 0xf8, + 0xb8, 0x9f, 0x0d, 0xa6, 0x2c, 0xba, 0xd5, 0x83, 0x80, 0x0c, 0xd6, 0xcc, 0x44, 0xdf, 0x79, 0x9e, + 0xa1, 0x3c, 0xf8, 0x0c, 0x6c, 0xbc, 0x0b, 0x28, 0xb1, 0xe8, 0xf4, 0xfa, 0xed, 0xfe, 0xc6, 0x2e, + 0x2f, 0xfe, 0xa2, 0x00, 0x27, 0x97, 0x75, 0x56, 0x0e, 0x37, 0x48, 0xdb, 0x43, 0xc9, 0xc0, 0x11, + 0x68, 0xb2, 0xfb, 0xe4, 0x99, 0xe8, 0x07, 0x33, 0x51, 0x3a, 0x1f, 0x04, 0xfd, 0x17, 0x52, 0x37, + 0x8f, 0x86, 0x6a, 0x7b, 0x5d, 0x2b, 0xd3, 0xb4, 0x35, 0x27, 0xbe, 0x27, 0x1b, 0xc9, 0x28, 0x2b, + 0x20, 0x56, 0x78, 0x0f, 0x81, 0x9a, 0x05, 0x98, 0x32, 0xd2, 0x04, 0x5c, 0x2a, 0x4a, 0x3e, 0x23, + 0x5c, 0xd9, 0xbd, 0x53, 0x6f, 0x5f, 0x8d, 0x7c, 0xc1, 0x76, 0x1d, 0x4d, 0x0e, 0xf3, 0x82, 0x45, + 0xf4, 0x7b, 0xe2, 0x19, 0x15, 0xbc, 0xea, 0xa9, 0xc6, 0x70, 0xf4, 0x09, 0x13, 0x04, 0xe7, 0xb0, + 0x0e, 0x48, 0xa3, 0x6d, 0xda, 0x84, 0x4d, 0x48, 0x15, 0xc4, 0x54, 0x57, 0xe1, 0x8f, 0x0b, 0xa9, + 0x2c, 0x11, 0x96, 0x1d, 0x8c, 0xa6, 0x96, 0xee, 0xe4, 0x1f, 0xa0, 0xc0, 0x20, 0x04, 0x6c, 0xf0, + 0xb8, 0x55, 0xde, 0xc5, 0xb6, 0x4d, 0x8a, 0x81, 0xb3, 0x7c, 0x31, 0xe6, 0x10, 0x3c, 0x68, 0x39, + 0xca, 0x6d, 0x5b, 0x46, 0xb2, 0xb6, 0x89, 0x59, 0xd8, 0x8a, 0x06, 0xa0, 0xb5, 0x84, 0x97, 0xcd, + 0x94, 0x08, 0x2b, 0x44, 0x9b, 0xd4, 0xd0, 0xd7, 0xb2, 0xbf, 0x3e, 0xf6, 0x15, 0x1d, 0x99, 0x98, + 0xbb, 0xe1, 0x74, 0x47, 0x0a, 0x48, 0x5b, 0xef, 0x08, 0x69, 0x8b, 0xeb, 0xe0, 0x4c, 0xdd, 0x72, + 0xd5, 0x32, 0xa1, 0x0b, 0xe1, 0xa6, 0xd8, 0x1d, 0xb3, 0xf2, 0x83, 0x9e, 0x26, 0x2a, 0xe1, 0xb7, + 0x30, 0x9f, 0xa4, 0x31, 0x32, 0x2f, 0x13, 0x3c, 0xf4, 0xe6, 0xd1, 0xd0, 0xef, 0x62, 0x95, 0x60, + 0x08, 0xc8, 0x1f, 0xdd, 0xdb, 0xf1, 0x6e, 0xd3, 0x27, 0xc8, 0x5b, 0x7d, 0x35, 0xa4, 0xe0, 0xa7, + 0xf6, 0x1c, 0x09, 0xca, 0x47, 0xba, 0x2a, 0x79, 0xfa, 0x76, 0x47, 0x9f, 0x8d, 0xb5, 0xb5, 0x69, + 0x54, 0x19, 0x91, 0xde, 0xfb, 0xdd, 0x16, 0x6e, 0x5b, 0x61, 0x35, 0x4b, 0xb1, 0xb2, 0xae, 0x23, + 0x95, 0x50, 0xc7, 0x01, 0x78, 0x1d, 0x00, 0x8b, 0x01, 0x27, 0x54, 0x11, 0x4a, 0x00, 0xad, 0x5c, + 0xb7, 0xd4, 0xd0, 0x2a, 0x7f, 0xed, 0xe6, 0xd8, 0x05, 0x43, 0x3d, 0xeb, 0x09, 0xb4, 0x6b, 0x03, + 0x04, 0x6f, 0xf1, 0xf4, 0xfb, 0xe2, 0xf8, 0x53, 0x75, 0x79, 0xe6, 0x15, 0x77, 0xb2, 0x54, 0x9d, + 0x53, 0x14, 0x0f, 0x6b, 0xe2, 0xf2, 0xf3, 0x57, 0x0b, 0x55, 0x69, 0xc3, 0xe2, 0xb3, 0x97, 0x4a, + 0x48, 0x84, 0x66, 0xcf, 0xa6, 0x4c, 0x5a, 0x86, 0x1a, 0x77, 0x56, 0x9c, 0xc1, 0xed, 0xac, 0x6d, + 0x3e, 0xf4, 0x88, 0xd7, 0xde, 0xd8, 0x72, 0x09, 0xcf, 0x15, 0x1e, 0xe1, 0xf3, 0x79, 0x67, 0x46, + 0x8b, 0x62, 0xd4, 0xd8, 0x9a, 0x8b, 0x59, 0xf6, 0x7d, 0x24, 0x83, 0xd9, 0xc1, 0xe1, 0xcb, 0xdc, + 0x94, 0xf4, 0x3f, 0xc5, 0x13, 0xbf, 0xde, 0xb8, 0x2b, 0x05, 0x6a, 0xbb, 0x48, 0xaa, 0xe4, 0xe4, + 0xa7, 0x8e, 0xdc, 0xd2, 0x84, 0x0f, 0x79, 0x42, 0xde, 0x93, 0xbc, 0x59, 0x36, 0x89, 0x85, 0x50, + 0x39, 0x84, 0x0e, 0x62, 0x33, 0xa2, 0xdd, 0x5e, 0x87, 0x85, 0xd9, 0x9e, 0x7e, 0xd3, 0x44, 0xdd, + 0x8b, 0xbc, 0x50, 0x89, 0x3e, 0x01, 0x00, 0xba, 0xa3, 0x77, 0xb2, 0x8a, 0x03, 0xd9, 0xc2, 0xdc, + 0x7f, 0x81, 0xa4, 0x05, 0x57, 0xb8, 0xb3, 0xfc, 0xdf, 0x75, 0x60, 0xde, 0xf2, 0x65, 0x51, 0x1d, + 0xf8, 0x9b, 0x5f, 0xbc, 0x4d, 0x98, 0xef, 0x53, 0x53, 0xfa, 0x76, 0xdc, 0x2c, 0x23, 0x12, 0x15, + 0x26, 0xc6, 0x71, 0x94, 0x38, 0x28, 0x47, 0x83, 0xf8, 0xd7, 0x1a, 0x8a, 0xbc, 0xb2, 0x54, 0x31, + 0x1b, 0xcb, 0xd9, 0xf2, 0xd9, 0x3c, 0xb5, 0x1b, 0x53, 0x6a, 0xa7, 0x23, 0x08, 0x74, 0x7f, 0x46, + 0xc7, 0xb3, 0xec, 0x66, 0xe5, 0x5c, 0xcb, 0xe2, 0xdd, 0xe0, 0x66, 0x0f, 0x3b, 0x1f, 0x2a, 0x26, + 0x8f, 0x1e, 0xee, 0x36, 0xd9, 0x3b, 0x4c, 0x0a, 0x54, 0x53, 0x66, 0xf2, 0x50, 0x79, 0x7a, 0x1c, + 0xf5, 0x25, 0x63, 0xb8, 0x9f, 0x40, 0xb2, 0x12, 0x24, 0xcb, 0x42, 0x7b, 0xef, 0x80, 0xe0, 0xa8, + 0xee, 0x30, 0xa0, 0x3b, 0x62, 0xe0, 0x16, 0xd9, 0x88, 0x34, 0xfa, 0x2d, 0x26, 0xd2, 0x0b, 0x21, + 0x81, 0xa1, 0x31, 0xaa, 0x3d, 0x11, 0x27, 0xfc, 0x3f, 0x99, 0xe0, 0x21, 0xfb, 0x23, 0xbc, 0x99, + 0xa4, 0xa2, 0x9a, 0x11, 0x6b, 0x10, 0x3a, 0xa1, 0xb5, 0x7b, 0xf7, 0xf9, 0xf7, 0xcb, 0xe0, 0x57, + 0xf0, 0x42, 0x3f, 0x7c, 0xa0, 0xbc, 0x3d, 0x08, 0x84, 0x6e, 0x92, 0xa4, 0x26, 0x2b, 0x02, 0x07, + 0xbd, 0x56, 0x07, 0x9a, 0x59, 0x8e, 0x0c, 0xe5, 0xa2, 0x66, 0x00, 0x7f, 0x06, 0xd6, 0x48, 0xdd, + 0xdb, 0x6d, 0xfc, 0x07, 0x5f, 0x00, 0xc7, 0x41, 0x75, 0xca, 0xcc, 0x5b, 0x76, 0x76, 0xfb, 0xe2, + 0x37, 0xc9, 0x28, 0x64, 0x24, 0x65, 0x34, 0x23, 0x3f, 0xcb, 0x51, 0xcc, 0x84, 0x81, 0xef, 0x14, + 0x7e, 0x74, 0xd9, 0x7b, 0x5d, 0x0e, 0xff, 0xe8, 0xbc, 0x49, 0x4a, 0x70, 0x12, 0xba, 0xe5, 0x1c, + 0x08, 0xa7, 0xd4, 0x17, 0x7a, 0x34, 0xd9, 0xc4, 0x3f, 0x8a, 0x26, 0xc9, 0x4c, 0xce, 0xed, 0x58, + 0xe8, 0xb1, 0x1e, 0x52, 0x7b, 0xcb, 0x9e, 0xc4, 0x76, 0x92, 0xad, 0x09, 0xbf, 0x5a, 0xa0, 0x02, + 0x83, 0xbf, 0xde, 0x16, 0x40, 0xf8, 0x24, 0x2a, 0xf1, 0xf3, 0xc2, 0x22, 0xa5, 0xed, 0x7e, 0xb6, + 0x5c, 0x5a, 0x22, 0x6f, 0xc8, 0xa0, 0xbe, 0x67, 0xbd, 0x60, 0xf6, 0xa7, 0xe3, 0x69, 0x1c, 0x15, + 0x62, 0xff, 0x51, 0x90, 0xf5, 0x1f, 0xd1, 0x2a, 0x92, 0x22, 0x6a, 0xda, 0x45, 0x74, 0x18, 0x6d, + 0x35, 0x84, 0x72, 0xda, 0x36, 0x36, 0x91, 0x7a, 0x80, 0xdb, 0xd9, 0x22, 0xfb, 0x1d, 0xe9, 0xfe, + 0x73, 0xde, 0xfd, 0xbb, 0x3d, 0x2c, 0xc2, 0x95, 0x1a, 0xa0, 0x1a, 0x95, 0xb8, 0x02, 0x9c, 0x3c, + 0x13, 0x42, 0xd1, 0x0b, 0xe7, 0x9f, 0x28, 0x6e, 0x01, 0x92, 0x92, 0xfc, 0xa7, 0x08, 0x6d, 0xc6, + 0x66, 0x94, 0x6b, 0xd8, 0xe8, 0xd6, 0xbf, 0x46, 0x39, 0xd7, 0xe5, 0x2d, 0x6f, 0x32, 0xdd, 0xd2, + 0x11, 0x6b, 0xc8, 0x4d, 0xd4, 0x32, 0xe9, 0xee, 0xc3, 0xa5, 0x30, 0x9d, 0x8e, 0x09, 0x97, 0x25, + 0x87, 0xd7, 0x2b, 0xa0, 0x7d, 0xef, 0x24, 0xe6, 0x14, 0x5f, 0xfe, 0x27, 0x06, 0x7c, 0x93, 0x0f, + 0x47, 0xac, 0xf6, 0x58, 0xcf, 0x47, 0x52, 0xc5, 0xa8, 0xbb, 0x54, 0x1a, 0xb6, 0x9c, 0x7b, 0x4b, + 0x3e, 0xd6, 0x60, 0x01, 0x75, 0x87, 0x1c, 0xfe, 0xa6, 0x4e, 0x57, 0x7e, 0xa2, 0x6e, 0xba, 0x13, + 0xc5, 0xce, 0x54, 0xa3, 0xaa, 0x21, 0x1b, 0xd4, 0xe8, 0xf2, 0xd3, 0x1a, 0xb5, 0x54, 0xf7, 0x69, + 0x4c, 0xe6, 0xde, 0x70, 0x29, 0xae, 0x60, 0x32, 0xf0, 0x24, 0xc9, 0xba, 0x15, 0x68, 0xb7, 0x3d, + 0x5b, 0x87, 0x0f, 0x5e, 0x02, 0xce, 0xfc, 0x72, 0x66, 0x97, 0x36, 0x5e, 0x05, 0x70, 0xdd, 0x41, + 0x0f, 0x7f, 0x62, 0xf7, 0x5b, 0xd3, 0xbb, 0xcc, 0x91, 0x1e, 0x7c, 0xc9, 0xee, 0x9b, 0xdd, 0xe3, + 0xf1, 0x28, 0xd3, 0x31, 0x69, 0xd7, 0xa7, 0x94, 0x29, 0xc3, 0xce, 0xb3, 0x4e, 0x25, 0x87, 0x18, + 0x3c, 0x35, 0xc9, 0x99, 0xb9, 0x4a, 0x15, 0x5d, 0x8a, 0xbd, 0x13, 0xf1, 0x87, 0x52, 0x8e, 0x6a, + 0x08, 0x44, 0x08, 0x36, 0x1d, 0x57, 0x55, 0x54, 0xca, 0x2b, 0x13, 0x35, 0x3e, 0xac, 0xf7, 0xcc, + 0x5f, 0x49, 0xbf, 0x5a, 0x41, 0xaa, 0x5b, 0x97, 0x76, 0x31, 0x34, 0xa4, 0xcd, 0x3b, 0x04, 0xb7, + 0xeb, 0x37, 0xaa, 0x2b, 0x58, 0x19, 0x40, 0x30, 0xa7, 0x8a, 0xbe, 0xe3, 0x97, 0xc4, 0x04, 0xb0, + 0xdc, 0xf4, 0xcd, 0xda, 0x35, 0xfe, 0x34, 0xca, 0xef, 0x8d, 0x3b, 0xef, 0x19, 0xd2, 0xae, 0x56, + 0xc0, 0x98, 0xe9, 0xd4, 0x46, 0x58, 0xf5, 0xe8, 0x6b, 0x0c, 0x83, 0x9f, 0x24, 0x8a, 0x3a, 0x54, + 0x21, 0xf5, 0xa2, 0x7b, 0xe7, 0x41, 0x01, 0xff, 0x4c, 0x11, 0x80, 0xd0, 0xa5, 0x64, 0x77, 0x74, + 0x29, 0x6d, 0xa2, 0xfd, 0x6f, 0xff, 0x3c, 0xb1, 0x7d, 0x76, 0x51, 0x2b, 0xa0, 0x5c, 0x09, 0x40, + 0x41, 0x63, 0x25, 0x1b, 0x15, 0xf6, 0x3e, 0xd5, 0x74, 0x4e, 0xd0, 0xdb, 0xca, 0x46, 0xae, 0x36, + 0xca, 0x5a, 0x3e, 0x5a, 0xfc, 0x61, 0x70, 0x6b, 0x79, 0x52, 0x1e, 0x7c, 0xda, 0xa9, 0x93, 0x29, + 0x13, 0xa7, 0x6d, 0x2f, 0x75, 0x58, 0x88, 0x94, 0xf3, 0x47, 0x1b, 0x8d, 0x51, 0x10, 0x61, 0xd3, + 0xcb, 0x68, 0x27, 0x89, 0x4e, 0x96, 0x42, 0xc8, 0x26, 0xe2, 0xc9, 0x2a, 0xff, 0x69, 0x86, 0x24, + 0x14, 0x9d, 0x2e, 0x8a, 0xb8, 0x9e, 0x96, 0x0d, 0x7b, 0x3c, 0xde, 0x7b, 0x08, 0x71, 0x00, 0xbf, + 0x4e, 0xd4, 0xf2, 0xb2, 0xd2, 0xd8, 0x6d, 0x2a, 0x12, 0x67, 0x64, 0xa1, 0xb7, 0xb4, 0xe9, 0xc5, + 0x9b, 0x59, 0xd1, 0xaa, 0x7d, 0x3a, 0x9c, 0x8c, 0x17, 0xd4, 0x39, 0x03, 0xd6, 0xdd, 0xa8, 0xeb, + 0xb2, 0x3f, 0x86, 0x7f, 0x1b, 0xe2, 0x33, 0x2a, 0xc4, 0x53, 0xca, 0xc2, 0xd7, 0xa5, 0x56, 0x26, + 0xfe, 0xc7, 0x84, 0x40, 0x08, 0xeb, 0x8b, 0xa7, 0x6e, 0x44, 0xe3, 0xdd, 0x59, 0x8b, 0xa3, 0x0e, + 0x55, 0xc5, 0x35, 0xa3, 0xb9, 0x94, 0xa2, 0x4b, 0xc3, 0x58, 0xa9, 0x72, 0x9f, 0xa7, 0x6c, 0x44, + 0x83, 0xa4, 0x84, 0x8d, 0xbc, 0x81, 0x3f, 0xf3, 0x52, 0xf8, 0x06, 0x6b, 0x5a, 0xa6, 0x46, 0x1b, + 0x15, 0x2d, 0x77, 0x80, 0x59, 0x52, 0x59, 0x74, 0x67, 0x8b, 0x98, 0x87, 0x43, 0x57, 0x5f, 0x24, + 0xa7, 0xdd, 0x99, 0x79, 0xf0, 0xf4, 0x79, 0xe8, 0x91, 0xd2, 0xb6, 0x6b, 0xc2, 0x0e, 0xf6, 0x2b, + 0x66, 0x7e, 0xe7, 0x04, 0x67, 0x1f, 0xc8, 0x69, 0xfd, 0xbe, 0xf3, 0x4d, 0xc0, 0x2e, 0x58, 0x9a, + 0x75, 0xb5, 0x65, 0xa3, 0xb9, 0x96, 0xdc, 0x22, 0xf2, 0x89, 0x93, 0xb4, 0x34, 0x04, 0xa0, 0xc8, + 0xea, 0xde, 0xcb, 0xf3, 0xec, 0xc2, 0x94, 0x32, 0xee, 0x00, 0xef, 0xcf, 0x31, 0x8b, 0x64, 0x74, + 0xb4, 0xf9, 0x94, 0x06, 0x58, 0x5a, 0x70, 0x59, 0xf7, 0xa3, 0xb2, 0xc4, 0xd1, 0x6a, 0x0f, 0x01, + 0x3d, 0xcd, 0xb8, 0x89, 0xd2, 0x6d, 0x15, 0x46, 0x3d, 0x88, 0xbb, 0xf8, 0x75, 0x88, 0x37, 0x19, + 0x2c, 0xb0, 0x98, 0x94, 0x3d, 0x1b, 0x27, 0xd9, 0xe6, 0x9b, 0xf9, 0x1d, 0x3b, 0xb5, 0x3d, 0x42, + 0x86, 0x1d, 0x9f, 0x79, 0xdf, 0x78, 0x21, 0x22, 0x8d, 0xd1, 0x42, 0xb6, 0xe1, 0xd6, 0x81, 0x6d, + 0x39, 0xe5, 0x95, 0xf2, 0xcf, 0xab, 0x32, 0x76, 0x1b, 0x29, 0xf0, 0x3b, 0xf4, 0xc6, 0x76, 0x09, + 0x73, 0xcf, 0xa0, 0x6b, 0xca, 0x84, 0xc8, 0x9c, 0x34, 0x89, 0xac, 0x8f, 0x6a, 0xcf, 0xc5, 0xc2, + 0xb5, 0x5c, 0x6f, 0x52, 0xac, 0x85, 0xd1, 0xe2, 0xcb, 0xdc, 0x9e, 0x40, 0x9a, 0xb9, 0x0b, 0xb0, + 0x0e, 0x8a, 0xe3, 0xe7, 0x44, 0x43, 0x37, 0xd2, 0xef, 0x3e, 0x18, 0x5a, 0xdd, 0xf5, 0x9b, 0xfc, + 0x0e, 0x9e, 0xfa, 0x8d, 0x29, 0x2c, 0x0a, 0xf0, 0x61, 0xcd, 0x2a, 0x70, 0xfb, 0x8e, 0xac, 0x19, + 0xfd, 0x09, 0xf1, 0x50, 0x5a, 0xfe, 0x5f, 0x22, 0x1f, 0x81, 0x21, 0xe9, 0x2a, 0x80, 0xdf, 0xa1, + 0xc3, 0xbc, 0x82, 0xf0, 0xfc, 0xff, 0xae, 0x5f, 0xb6, 0xf1, 0x9c, 0x3d, 0x86, 0xf8, 0x07, 0x00, + 0x26, 0x46, 0x95, 0x8b, 0x62, 0xae, 0x4f, 0x2b, 0xf2, 0x9c, 0xb4, 0x96, 0x2c, 0x1e, 0x50, 0x42, + 0xad, 0xf5, 0x95, 0xc9, 0x24, 0xd5, 0x2f, 0xa3, 0x6c, 0xed, 0x02, 0xeb, 0xb7, 0x8a, 0xbb, 0x0b, + 0xbf, 0x3c, 0xd7, 0x92, 0x45, 0xd5, 0xd7, 0xe8, 0xb4, 0x21, 0x04, 0x0d, 0x79, 0x5a, 0x38, 0xa9, + 0x29, 0xc6, 0x0d, 0x21, 0x03, 0x14, 0x76, 0xd0, 0x27, 0x79, 0xd8, 0x7b, 0x85, 0xc5, 0xb9, 0xdd, + 0x1b, 0x49, 0x1b, 0x2f, 0x50, 0x6c, 0x26, 0xd1, 0x49, 0x08, 0xe4, 0x2a, 0x7c, 0x0f, 0xee, 0x6e, + 0xeb, 0x9b, 0x7c, 0x7d, 0x76, 0xef, 0x65, 0x6e, 0x77, 0x4f, 0x3b, 0x4d, 0x0f, 0x35, 0x95, 0x2e, + 0x7d, 0xa1, 0xec, 0xaf, 0x6f, 0x4a, 0xd7, 0xad, 0xf5, 0xce, 0x35, 0x5a, 0x19, 0x6d, 0x24, 0x90, + 0x63, 0x83, 0x5f, 0x51, 0x96, 0x41, 0xa0, 0x19, 0x82, 0x97, 0x3a, 0x31, 0x94, 0x38, 0x73, 0xad, + 0xe8, 0x04, 0x12, 0xae, 0x89, 0xd7, 0x1d, 0x97, 0x97, 0xc6, 0x65, 0xa2, 0x4c, 0x7d, 0x64, 0xdc, + 0x08, 0xfa, 0xfd, 0x09, 0x36, 0x56, 0x30, 0xc1, 0xac, 0xaf, 0x43, 0xad, 0x51, 0x44, 0x29, 0xf2, + 0xfc, 0x82, 0xfd, 0xde, 0x82, 0x51, 0x73, 0x81, 0x40, 0x92, 0x11, 0x40, 0x02, 0x29, 0x0f, 0xc8, + 0xbd, 0xd7, 0xb8, 0xb3, 0x52, 0xb0, 0xc1, 0x96, 0x1f, 0x90, 0xf4, 0x82, 0x9c, 0xcd, 0x4c, 0x35, + 0xe2, 0x7d, 0x37, 0x43, 0x57, 0x55, 0x78, 0xa1, 0xbb, 0x41, 0x83, 0xa1, 0x35, 0x7a, 0xbb, 0x81, + 0x7f, 0xa1, 0xfa, 0xb2, 0xf8, 0xc8, 0x99, 0x4d, 0x40, 0x64, 0x2d, 0x85, 0x3b, 0xbc, 0x94, 0x89, + 0xc2, 0xac, 0xe7, 0x8c, 0x06, 0x02, 0x7e, 0xde, 0x20, 0x9b, 0x4e, 0x74, 0xe3, 0x05, 0xe1, 0x16, + 0xa3, 0x2e, 0x77, 0xff, 0xf7, 0x63, 0x37, 0x47, 0xc4, 0x0c, 0x1b, 0xec, 0x6e, 0x96, 0xa4, 0xd0, + 0x27, 0x8c, 0x63, 0xb1, 0xb3, 0xd8, 0x23, 0x8d, 0x5f, 0x3b, 0x6d, 0x4b, 0x76, 0x23, 0x1d, 0xc1, + 0x97, 0xf3, 0x02, 0xc9, 0x5f, 0xad, 0x48, 0x0e, 0x24, 0x3c, 0xf6, 0x02, 0x4a, 0xcc, 0x79, 0x5b, + 0x28, 0x9c, 0x37, 0x8d, 0x05, 0x75, 0x68, 0xd8, 0xeb, 0x0a, 0x48, 0x84, 0xac, 0x09, 0x65, 0x5e, + 0xaf, 0x05, 0xcc, 0xad, 0x49, 0x11, 0x4e, 0x67, 0x80, 0xa8, 0x0b, 0x64, 0x43, 0x1c, 0x25, 0xc8, + 0xe5, 0x35, 0xc3, 0xf5, 0x55, 0x69, 0x6e, 0x23, 0x2a, 0x53, 0x1b, 0xbf, 0x9d, 0x5b, 0x9a, 0xd9, + 0xd5, 0x43, 0x11, 0x9b, 0x5e, 0x7e, 0x65, 0x3b, 0x8c, 0xa2, 0xde, 0x82, 0x8c, 0xe1, 0xc1, 0x54, + 0x70, 0x47, 0x81, 0x78, 0x6f, 0xd4, 0x47, 0xce, 0x5c, 0xc0, 0xa6, 0x94, 0x9f, 0x85, 0xc5, 0xf8, + 0xab, 0xd9, 0x7b, 0x99, 0x8b, 0x64, 0xba, 0x3d, 0x3a, 0x28, 0xae, 0xed, 0xb8, 0x1e, 0x0e, 0x0b, + 0xeb, 0xf7, 0x74, 0x9d, 0x19, 0x56, 0xf6, 0x68, 0x2f, 0xc7, 0x85, 0xa0, 0x39, 0x7f, 0x13, 0x0f, + 0x3f, 0x88, 0xb4, 0x7e, 0xd7, 0xb0, 0x3c, 0x34, 0xc0, 0xe4, 0xec, 0x53, 0xb6, 0xe9, 0x65, 0x4c, + 0x2e, 0xab, 0x6f, 0x59, 0x7a, 0x82, 0x6d, 0x4c, 0xf4, 0xbf, 0xfd, 0x8d, 0xf3, 0xbc, 0x83, 0x44, + 0x1b, 0x9e, 0x13, 0x2b, 0xe3, 0xe2, 0x6d, 0x59, 0x17, 0xea, 0x59, 0x81, 0xd4, 0xa9, 0x90, 0x1c, + 0x72, 0x92, 0xc2, 0x36, 0x64, 0x9f, 0x3e, 0x72, 0x3e, 0x62, 0x0e, 0xf7, 0x8b, 0x75, 0xa7, 0xa0, + 0x06, 0x38, 0xe8, 0x20, 0x55, 0xdd, 0xec, 0x0d, 0xb6, 0xed, 0xc3, 0xc7, 0xb4, 0x7b, 0x3f, 0x32, + 0x40, 0xd7, 0x3f, 0x8d, 0x97, 0x64, 0x2e, 0x0e, 0xb7, 0x5a, 0xd3, 0x2a, 0x14, 0x1c, 0x15, 0xbd, + 0x71, 0x2d, 0x81, 0xe3, 0xb4, 0x47, 0x56, 0xf8, 0x43, 0xcb, 0xaa, 0x06, 0x6e, 0x55, 0x16, 0x7d, + 0xce, 0x9f, 0x7d, 0xfc, 0xa1, 0xc3, 0x9b, 0x5f, 0x20, 0x3b, 0x87, 0x2f, 0xf5, 0x43, 0x7c, 0xf6, + 0xfb, 0xfc, 0xd0, 0x1e, 0xef, 0xf8, 0x50, 0x4e, 0x1c, 0x42, 0x2b, 0x82, 0xe5, 0xf4, 0xdf, 0x47, + 0x43, 0x6b, 0x5f, 0x2c, 0xc2, 0x2c, 0xfa, 0xbb, 0xa0, 0xea, 0xf6, 0x3b, 0x62, 0x74, 0x6e, 0x88, + 0x33, 0x19, 0x45, 0x63, 0xbc, 0x53, 0xe8, 0x90, 0xcf, 0xff, 0x3a, 0xad, 0x7e, 0x09, 0xc8, 0x68, + 0xe4, 0xa4, 0x0b, 0x20, 0x2e, 0x46, 0xfd, 0xe6, 0xdf, 0xd4, 0xc0, 0x61, 0x5a, 0x1e, 0x1e, 0x23, + 0xd1, 0x92, 0xff, 0xa9, 0xa0, 0x77, 0xc6, 0x54, 0x7e, 0x78, 0x58, 0x46, 0xa4, 0xa6, 0xab, 0x0f, + 0x80, 0x31, 0x29, 0x81, 0x5d, 0x9c, 0xc2, 0x84, 0x5f, 0xa0, 0x66, 0x8d, 0xc7, 0xe3, 0x1f, 0x70, + 0x42, 0xd3, 0xfa, 0xd8, 0x4f, 0xb1, 0xd2, 0x11, 0xcd, 0xbf, 0xfc, 0x62, 0x08, 0x97, 0xf7, 0x98, + 0xf7, 0xd0, 0x12, 0x3f, 0xc0, 0x88, 0xca, 0x34, 0xd7, 0x0a, 0xdb, 0x3d, 0x2a, 0x23, 0x21, 0xed, + 0x0b, 0x17, 0xc1, 0x87, 0x96, 0x86, 0x9a, 0xfd, 0x90, 0xc2, 0x78, 0x33, 0xec, 0x8b, 0x10, 0x7d, + 0xa9, 0xa3, 0x62, 0xfc, 0x02, 0xe5, 0xcc, 0xe4, 0x27, 0xf3, 0x26, 0x47, 0xc7, 0x86, 0x9a, 0xe4, + 0x53, 0xe2, 0xf5, 0xfc, 0x1e, 0x1e, 0x5d, 0x17, 0xb5, 0xf6, 0xa8, 0x84, 0xa8, 0xae, 0x07, 0x83, + 0x3a, 0xba, 0xf9, 0x5a, 0xdf, 0x67, 0x40, 0x0e, 0x00, 0xb7, 0x7f, 0xc9, 0xbe, 0xd9, 0xe0, 0x88, + 0xab, 0x7a, 0x1a, 0x56, 0xb0, 0x75, 0x49, 0xa9, 0x74, 0x64, 0xdf, 0xd2, 0xed, 0x4c, 0x3e, 0x96, + 0x16, 0x7e, 0xe1, 0xe2, 0x70, 0x23, 0x2e, 0x95, 0xe1, 0x47, 0x79, 0x0e, 0xe6, 0x99, 0x1c, 0x11, + 0x57, 0x56, 0xcc, 0xed, 0xd4, 0x08, 0x6d, 0x2d, 0x3a, 0x93, 0x4f, 0xec, 0x0f, 0x03, 0xb2, 0x61, + 0x0a, 0xd9, 0x29, 0xf3, 0x11, 0xa1, 0x8e, 0xa9, 0x46, 0x0c, 0x27, 0x10, 0xba, 0xec, 0x58, 0xff, + 0x8b, 0x1e, 0x91, 0xc6, 0xeb, 0x63, 0x74, 0x0a, 0x1e, 0x5e, 0x5e, 0x6a, 0xf8, 0xf5, 0x32, 0xec, + 0x3c, 0x94, 0x62, 0xe8, 0x5c, 0xd2, 0x4a, 0x1c, 0x87, 0x9c, 0xc8, 0x65, 0x15, 0xc0, 0x84, 0x95, + 0xe7, 0xd4, 0x56, 0xde, 0x35, 0x03, 0xce, 0x5d, 0x10, 0x0c, 0x9e, 0x54, 0x17, 0xbc, 0xb2, 0x84, + 0x71, 0x36, 0xe6, 0xc8, 0xa3, 0x26, 0xbe, 0x55, 0x3e, 0x86, 0x48, 0x9b, 0xa5, 0xc8, 0x7d, 0x98, + 0x5a, 0x0f, 0x36, 0xd8, 0xf9, 0xd8, 0x41, 0xb8, 0xd9, 0xee, 0x36, 0x55, 0x1a, 0x9b, 0x35, 0xf7, + 0xd2, 0x00, 0x14, 0x5c, 0xd2, 0x3b, 0x94, 0x37, 0x67, 0x4e, 0x80, 0xe1, 0x60, 0x8e, 0x68, 0xeb, + 0x46, 0x2a, 0x41, 0x6a, 0x7f, 0x18, 0x9c, 0xc5, 0x31, 0xd5, 0x2c, 0xd3, 0xdb, 0xc5, 0x24, 0x28, + 0x03, 0xb5, 0x2a, 0x4c, 0xa9, 0x0c, 0xd2, 0x4e, 0x14, 0x0d, 0x4a, 0xc1, 0xe4, 0x70, 0x74, 0x68, + 0x8c, 0xa5, 0x17, 0x80, 0x14, 0x38, 0x1e, 0x37, 0x07, 0x7e, 0x83, 0x96, 0x78, 0xbc, 0xa0, 0xc4, + 0xe3, 0xb7, 0xaf, 0x12, 0x5c, 0x13, 0xed, 0x11, 0x8e, 0xd3, 0x84, 0xcc, 0xfa, 0x7f, 0xeb, 0xa7, + 0xb6, 0x58, 0x5f, 0xfe, 0xd6, 0x6c, 0xdd, 0xfc, 0xdc, 0xba, 0xa2, 0x6a, 0x98, 0x2f, 0x10, 0xdc, + 0x1e, 0xa7, 0x3d, 0x0f, 0xed, 0x78, 0x1f, 0xb0, 0x58, 0xb0, 0x0d, 0x5e, 0x2c, 0xef, 0xc9, 0x26, + 0xcb, 0xee, 0x38, 0xbf, 0x21, 0xa2, 0xeb, 0xa6, 0x33, 0xf7, 0x1f, 0x67, 0x6f, 0x80, 0xaf, 0x57, + 0x42, 0x12, 0x83, 0x44, 0x1e, 0x33, 0x5e, 0x3e, 0x43, 0x38, 0xf4, 0x4e, 0x7e, 0xf3, 0x5c, 0x77, + 0xca, 0x98, 0xb9, 0xf0, 0x37, 0x41, 0x97, 0x14, 0x70, 0x21, 0x44, 0x50, 0x72, 0x96, 0xab, 0xa8, + 0x9d, 0x48, 0x24, 0xe3, 0xec, 0x9f, 0x0f, 0x69, 0xe7, 0xda, 0x80, 0x2c, 0xd5, 0xfe, 0x70, 0x61, + 0x91, 0xb7, 0x19, 0xa0, 0x74, 0xc2, 0xf6, 0xe0, 0xcb, 0x1f, 0x33, 0xa7, 0x12, 0x86, 0x52, 0x88, + 0x89, 0xd7, 0x0c, 0x3f, 0xc8, 0x14, 0xd4, 0xc8, 0x05, 0x79, 0xf2, 0x23, 0x22, 0xe2, 0xcc, 0xf1, + 0x45, 0x94, 0x0d, 0x98, 0x28, 0x12, 0xf2, 0x09, 0xd8, 0x53, 0x98, 0xee, 0x9a, 0x6e, 0xce, 0xaa, + 0x2a, 0xc2, 0xe6, 0x5d, 0x29, 0x46, 0xde, 0xea, 0x2a, 0x7a, 0x3f, 0xbe, 0x40, 0x3e, 0x4c, 0x83, + 0x77, 0xb5, 0xd0, 0x27, 0x2e, 0xcf, 0x10, 0x40, 0x92, 0xcc, 0x01, 0x8a, 0x75, 0xea, 0xc3, 0x03, + 0xe9, 0x83, 0x45, 0xa2, 0xa4, 0x18, 0xe4, 0x16, 0x61, 0xf3, 0xb6, 0xd3, 0xfe, 0x27, 0x8c, 0xbf, + 0xeb, 0x3c, 0x6a, 0x60, 0xf7, 0x70, 0x69, 0x6a, 0xeb, 0x21, 0xb3, 0x18, 0x9b, 0xd5, 0x13, 0x6a, + 0xa2, 0x60, 0x6d, 0xf5, 0xe1, 0x12, 0x53, 0xfb, 0x2b, 0x1d, 0x7d, 0xb8, 0x3d, 0x38, 0xe5, 0x2d, + 0xf8, 0xe8, 0x09, 0x77, 0x83, 0x72, 0x09, 0xdb, 0xe3, 0xd9, 0x2c, 0x0e, 0xc6, 0x31, 0xbf, 0x2c, + 0xba, 0x81, 0x6d, 0x92, 0x0b, 0x69, 0xc1, 0x48, 0x10, 0x5c, 0xb5, 0x85, 0xba, 0x0c, 0xb8, 0x0d, + 0x95, 0xe9, 0xb0, 0x26, 0x9a, 0x4a, 0x91, 0x48, 0xcb, 0xf5, 0x71, 0x4b, 0x7d, 0xd4, 0xcb, 0x5a, + 0x03, 0xe5, 0x88, 0x95, 0x53, 0x91, 0xe6, 0x33, 0x0d, 0xd2, 0xaf, 0x51, 0xea, 0x99, 0xfa, 0xf8, + 0x05, 0x6b, 0x0d, 0x9d, 0x33, 0xee, 0x2c, 0xcf, 0x81, 0x7c, 0x23, 0x6b, 0xb1, 0xf3, 0xc7, 0x61, + 0x5e, 0x55, 0x71, 0x18, 0xdd, 0xb8, 0xea, 0xf7, 0x79, 0x50, 0x56, 0x08, 0x80, 0x95, 0x81, 0xcb, + 0xfd, 0xbf, 0xe3, 0x15, 0x1d, 0xcf, 0x18, 0xc6, 0x07, 0x3e, 0x80, 0x72, 0x1c, 0x59, 0x3c, 0x67, + 0x0b, 0xb6, 0xc6, 0xf8, 0x4c, 0x2d, 0x5e, 0x71, 0x90, 0xad, 0x56, 0x3c, 0x86, 0xb3, 0xa9, 0xe8, + 0xc5, 0x65, 0x4c, 0xc1, 0x54, 0x56, 0xd7, 0x6e, 0x73, 0x6e, 0xe5, 0xfb, 0x63, 0x31, 0x84, 0xa6, + 0xc4, 0x37, 0xcd, 0x6a, 0x0a, 0xe6, 0x1f, 0x55, 0xb5, 0x31, 0x15, 0xaa, 0xa2, 0x8e, 0xc7, 0x59, + 0x2f, 0xcb, 0x2d, 0x97, 0xd4, 0x4e, 0x09, 0x0b, 0x2e, 0xbb, 0x08, 0x4c, 0x93, 0x5a, 0xf2, 0x09, + 0xbf, 0xa3, 0x84, 0xe6, 0xd7, 0x92, 0xc3, 0x33, 0x26, 0xc6, 0x90, 0x53, 0x7b, 0x81, 0xaf, 0x52, + 0xa0, 0x4c, 0xd8, 0x4d, 0xbe, 0xbd, 0xcc, 0x91, 0xeb, 0x90, 0x37, 0x52, 0x47, 0xdc, 0xb0, 0x24, + 0x82, 0xa9, 0x52, 0x77, 0x45, 0xfe, 0x80, 0x1e, 0xea, 0xb1, 0x0e, 0x9f, 0x27, 0x2f, 0x0c, 0x10, + 0x5c, 0xbe, 0x68, 0xe3, 0xff, 0xd6, 0x23, 0xa4, 0x0f, 0xa6, 0x85, 0xc2, 0x8f, 0xaf, 0x8e, 0x56, + 0xaa, 0x9a, 0x63, 0x88, 0xa0, 0xa6, 0x74, 0xe1, 0x2a, 0xc7, 0xd3, 0x83, 0x4d, 0x27, 0x77, 0x8c, + 0x9b, 0x81, 0x56, 0x92, 0xd7, 0xb7, 0x95, 0xbb, 0x79, 0x77, 0xea, 0x38, 0x06, 0x2a, 0x85, 0x72, + 0x5b, 0x77, 0x8d, 0x7d, 0x8e, 0x83, 0x4b, 0xff, 0xb7, 0x3e, 0x32, 0x39, 0x99, 0xe5, 0xf7, 0x6a, + 0xb1, 0x82, 0xa9, 0x19, 0x34, 0xfb, 0x46, 0x1f, 0xae, 0xe7, 0xc3, 0xaa, 0xa6, 0xed, 0x55, 0x8f, + 0xba, 0xfc, 0xd8, 0xec, 0xf3, 0x8a, 0x74, 0xba, 0xbf, 0xbf, 0xc4, 0xcc, 0x64, 0xd8, 0xcf, 0x2a, + 0x91, 0xfb, 0xce, 0x1c, 0x25, 0x2a, 0x4b, 0x73, 0x96, 0xac, 0xdf, 0x31, 0x0f, 0x63, 0x04, 0xff, + 0x5b, 0x8e, 0x30, 0x52, 0xf0, 0xb9, 0x15, 0x2a, 0x8a, 0x1b, 0xf4, 0xbf, 0xd7, 0x83, 0xfb, 0xae, + 0x61, 0xfa, 0x4d, 0xef, 0x57, 0x64, 0x82, 0xe3, 0xb1, 0x1a, 0xd6, 0xd1, 0xcf, 0xc2, 0x4f, 0x68, + 0xcf, 0xb5, 0xa5, 0x36, 0x96, 0x00, 0x2f, 0x9f, 0xcd, 0x02, 0xd4, 0x34, 0x14, 0x39, 0x36, 0x89, + 0x0c, 0x57, 0x47, 0x01, 0xc0, 0x21, 0x49, 0xc0, 0x46, 0x95, 0x02, 0xe0, 0xf2, 0x4e, 0xd7, 0x6c, + 0xa5, 0x0d, 0xb8, 0xa0, 0x56, 0x01, 0x09, 0x8f, 0xeb, 0xbd, 0xdd, 0x00, 0xb2, 0x01, 0x69, 0x7d, + 0x34, 0x57, 0x66, 0x80, 0x76, 0x15, 0x93, 0xe9, 0x03, 0xa2, 0x34, 0xc7, 0x2b, 0x3a, 0xd2, 0x44, + 0xf6, 0xb0, 0xea, 0x55, 0x36, 0x23, 0xa2, 0x9e, 0x46, 0x8b, 0x91, 0xb3, 0xba, 0x9e, 0xd0, 0x7c, + 0x7f, 0x1f, 0x20, 0xae, 0x83, 0x61, 0x06, 0x1d, 0x52, 0xa9, 0x85, 0xe0, 0x14, 0xfd, 0xb1, 0xe0, + 0xb1, 0x11, 0x7b, 0x8b, 0x6c, 0xdd, 0x19, 0xef, 0x16, 0xbe, 0xb3, 0x4e, 0x34, 0xd0, 0x81, 0xc0, + 0xdc, 0x19, 0x0f, 0x76, 0x95, 0xe1, 0x7e, 0x15, 0x67, 0x00, 0xb2, 0xa8, 0x1c, 0x23, 0x11, 0x69, + 0x25, 0xc4, 0xb3, 0xbc, 0x09, 0x08, 0xf5, 0x85, 0x90, 0xc7, 0xa9, 0x57, 0x87, 0xe9, 0xda, 0x35, + 0x4b, 0x04, 0x06, 0x7b, 0x3b, 0xd5, 0x41, 0xa4, 0x05, 0xc2, 0x9f, 0xc3, 0xfa, 0x16, 0x6e, 0xbc, + 0x4b, 0x31, 0xc8, 0x0f, 0x58, 0x6a, 0x40, 0x27, 0xad, 0xad, 0xd8, 0x1d, 0xef, 0xf2, 0xff, 0x56, + 0xff, 0xce, 0xcf, 0xbe, 0x87, 0xf5, 0x30, 0xf4, 0xaa, 0xef, 0x9d, 0x1b, 0x3f, 0x63, 0x4d, 0x3e, + 0xe9, 0xd3, 0x0c, 0x1d, 0x6f, 0xb3, 0x1d, 0xca, 0x0e, 0xcb, 0xac, 0x39, 0xd7, 0x27, 0x3e, 0x2b, + 0xe1, 0x31, 0x8d, 0x8c, 0x93, 0x02, 0xea, 0x6d, 0xe3, 0x54, 0x47, 0x6c, 0xcf, 0xcb, 0x81, 0xca, + 0x35, 0x56, 0xf5, 0x93, 0xee, 0x49, 0x5b, 0xd8, 0x19, 0xd0, 0xda, 0x6d, 0xb9, 0xab, 0x04, 0x2a, + 0x45, 0xb3, 0x4e, 0xf5, 0xbc, 0x90, 0xf7, 0xff, 0x42, 0xdf, 0xf7, 0x41, 0x71, 0x3f, 0x58, 0x77, + 0xd8, 0xac, 0x2c, 0x4a, 0xa3, 0x84, 0xc8, 0xc8, 0x6c, 0xa6, 0xde, 0xe9, 0xb7, 0x5a, 0x14, 0x01, + 0x5b, 0xe5, 0xd2, 0x46, 0xc8, 0xa3, 0x12, 0xd4, 0x9d, 0xb5, 0xf7, 0x57, 0x0d, 0xdf, 0x22, 0x39, + 0xcc, 0xc1, 0x3b, 0xc5, 0x0e, 0x44, 0x27, 0x67, 0x57, 0xa7, 0x04, 0x54, 0x6a, 0x9b, 0xf0, 0x81, + 0x2e, 0xb0, 0xa6, 0x9a, 0xed, 0xf2, 0x48, 0x30, 0xe0, 0xe8, 0x70, 0x24, 0x3a, 0x1e, 0x65, 0x71, + 0xe7, 0x2d, 0x31, 0xb1, 0x0c, 0x05, 0xe4, 0x7c, 0x2f, 0x5e, 0x87, 0x9b, 0x86, 0x67, 0x2c, 0x67, + 0xc6, 0xa7, 0xd5, 0x96, 0x41, 0x2c, 0x1e, 0xa0, 0x30, 0x86, 0xba, 0xce, 0x35, 0x0c, 0xe6, 0xcb, + 0xa1, 0xb9, 0xaa, 0xd9, 0x2c, 0x4a, 0xe4, 0x8f, 0x5d, 0xc8, 0x19, 0x41, 0x51, 0x78, 0xdb, 0x66, + 0xa1, 0xa5, 0x64, 0x4e, 0x55, 0xc0, 0xfc, 0xfe, 0xa0, 0x42, 0xa6, 0x16, 0x20, 0x06, 0xab, 0x2c, + 0x5d, 0x0b, 0xa2, 0x79, 0x5c, 0xc9, 0x04, 0xd0, 0xd2, 0x9b, 0xda, 0x90, 0x21, 0x85, 0x5c, 0x73, + 0x8f, 0x7b, 0x23, 0xda, 0x9c, 0x74, 0x75, 0xfa, 0x22, 0xe9, 0xc8, 0xcf, 0xc9, 0x84, 0xd4, 0x78, + 0x35, 0x7c, 0xc8, 0x46, 0x73, 0xd1, 0x60, 0xf1, 0x3e, 0x7c, 0x4f, 0x0e, 0xb8, 0x8b, 0xe3, 0xfa, + 0x3e, 0x1b, 0xf5, 0xe4, 0x07, 0x6e, 0x9b, 0x3e, 0x30, 0x50, 0x19, 0x5a, 0x09, 0x90, 0x22, 0xe0, + 0x83, 0xd9, 0xb3, 0xae, 0x2a, 0x12, 0x8e, 0xf5, 0x90, 0x47, 0x3c, 0xb9, 0xe6, 0xd4, 0xa2, 0x00, + 0x12, 0x7b, 0xb8, 0x3f, 0x14, 0x60, 0xd1, 0xc8, 0x69, 0x0d, 0xff, 0x43, 0x23, 0xc4, 0xdd, 0xcc, + 0xd4, 0x21, 0x85, 0xc9, 0x40, 0xd0, 0x76, 0x31, 0xf2, 0x4c, 0x4f, 0x39, 0xd9, 0x39, 0x73, 0x83, + 0xe0, 0x07, 0xa6, 0x1c, 0x24, 0xd8, 0xb2, 0x1c, 0x5c, 0x3f, 0xb9, 0x92, 0xe3, 0x7b, 0xbd, 0xea, + 0x83, 0xfd, 0xd0, 0x9c, 0x91, 0x4a, 0x80, 0x5f, 0x2b, 0xea, 0x53, 0x3a, 0x8a, 0x38, 0x3b, 0x2d, + 0xea, 0x8c, 0x16, 0x13, 0xe0, 0xef, 0x58, 0xce, 0xe0, 0x4d, 0xb3, 0x6b, 0x4b, 0xbd, 0x1a, 0x9d, + 0xb0, 0x9f, 0x55, 0x56, 0x2c, 0x57, 0xce, 0x70, 0x07, 0xb2, 0xbe, 0x4d, 0xb2, 0xe8, 0x77, 0x41, + 0x04, 0x45, 0xd8, 0xcc, 0x30, 0x71, 0x9a, 0xdc, 0x84, 0xfc, 0x51, 0x6f, 0x28, 0x28, 0x9d, 0xe8, + 0x50, 0x48, 0x2f, 0x84, 0x56, 0x34, 0xb4, 0x15, 0x1d, 0x2f, 0x90, 0x21, 0xfa, 0x2d, 0xf2, 0x38, + 0x33, 0xdc, 0x1b, 0xd5, 0xfc, 0x00, 0x26, 0x38, 0x29, 0x5e, 0x95, 0xf6, 0x86, 0xb9, 0xfb, 0x92, + 0xa8, 0xaa, 0x32, 0x98, 0x6d, 0x26, 0xe1, 0xca, 0xa0, 0xc3, 0x6a, 0xd7, 0x3a, 0x90, 0xaa, 0xcc, + 0x79, 0x4c, 0x18, 0xb1, 0x32, 0x31, 0x9a, 0x32, 0xa6, 0xab, 0x0c, 0x5b, 0x9e, 0xb0, 0xa6, 0x7d, + 0x75, 0x1a, 0xa1, 0x41, 0x04, 0x2a, 0xce, 0x85, 0x1f, 0xf7, 0x1e, 0xef, 0x9b, 0x2e, 0x5b, 0x68, + 0x63, 0x3a, 0x10, 0x09, 0xbd, 0xcc, 0x9a, 0x32, 0x8b, 0xb7, 0x6b, 0x52, 0xd5, 0x9a, 0xcb, 0xee, + 0x96, 0x7d, 0xc6, 0xb5, 0xb3, 0x3d, 0xd4, 0xa4, 0x4c, 0x4e, 0x65, 0x78, 0xcb, 0xc6, 0x32, 0xbf, + 0x85, 0x67, 0x43, 0x88, 0x59, 0xa9, 0x9c, 0xd7, 0x6e, 0x85, 0x28, 0xe2, 0x28, 0xbe, 0x84, 0xf5, + 0x1f, 0x89, 0x73, 0x21, 0x9e, 0xa4, 0x1a, 0x87, 0x05, 0xd0, 0xaa, 0x22, 0xc7, 0x1e, 0x73, 0x24, + 0xf2, 0x0f, 0xb2, 0x55, 0x3c, 0x12, 0x60, 0x1a, 0x0e, 0x46, 0x36, 0x87, 0x08, 0x1e, 0x45, 0xf8, + 0x36, 0x51, 0xe0, 0xb2, 0x9f, 0x7b, 0x50, 0x32, 0x0f, 0xda, 0x61, 0x03, 0xb5, 0x0c, 0x3f, 0xb2, + 0x9c, 0x08, 0x44, 0x5b, 0x3a, 0xac, 0xa5, 0x84, 0x8c, 0x0d, 0x3f, 0xf6, 0x53, 0xd4, 0xd5, 0xd7, + 0xd6, 0x0d, 0xd7, 0xe6, 0xad, 0x8d, 0x6f, 0x5a, 0xc1, 0x6f, 0x45, 0x67, 0x70, 0x3c, 0x55, 0xa0, + 0x6a, 0x55, 0x75, 0x26, 0x5b, 0xb5, 0x15, 0x60, 0xf2, 0x34, 0x12, 0x6e, 0x5b, 0xe3, 0xe8, 0x96, + 0x41, 0xe9, 0xf7, 0x3e, 0xfe, 0xb0, 0x82, 0x53, 0x21, 0x78, 0x6b, 0x2c, 0xbb, 0x3a, 0x65, 0x6e, + 0x55, 0x7e, 0x1f, 0x24, 0xec, 0xb2, 0xbe, 0x33, 0x06, 0xaa, 0xc3, 0x27, 0x8c, 0x5f, 0x02, 0x8e, + 0xee, 0xf3, 0x79, 0xa5, 0x4a, 0x0d, 0x00, 0xd8, 0xdf, 0xb7, 0xa9, 0xe6, 0xe5, 0x35, 0xac, 0x97, + 0xb1, 0x93, 0x1d, 0x92, 0xf2, 0x5f, 0x93, 0xa8, 0x12, 0xfa, 0x21, 0xf7, 0x8d, 0x80, 0x82, 0xc8, + 0x20, 0xd1, 0x0b, 0x70, 0x66, 0xee, 0x19, 0xda, 0x60, 0xe1, 0x0b, 0x9d, 0xb8, 0xd0, 0xec, 0x95, + 0x32, 0xd4, 0xff, 0x2b, 0x07, 0x27, 0x61, 0xc9, 0x00, 0x4c, 0x78, 0x0c, 0x40, 0x1d, 0xcc, 0x75, + 0x3d, 0x0d, 0xd8, 0xe4, 0xba, 0xa3, 0x72, 0xff, 0xd7, 0xd0, 0x0a, 0x76, 0x35, 0x24, 0x4d, 0xfa, + 0xde, 0xf3, 0xd1, 0xae, 0xc6, 0xda, 0xbe, 0x95, 0x5b, 0xd5, 0xbe, 0xdc, 0xb0, 0xb8, 0x21, 0x12, + 0xed, 0x6b, 0x48, 0x0e, 0x92, 0xa3, 0xf9, 0x29, 0xf3, 0x46, 0x2b, 0x7b, 0x64, 0xf8, 0xf8, 0x05, + 0xc0, 0x7f, 0xf0, 0x1e, 0xc2, 0xbe, 0xa2, 0x16, 0x2a, 0xf8, 0x8c, 0x6d, 0x3a, 0x48, 0x67, 0xba, + 0x48, 0xe7, 0x1a, 0x64, 0xd6, 0x74, 0xa2, 0x4e, 0x53, 0x57, 0x7a, 0x83, 0xbf, 0xfc, 0xb1, 0x23, + 0x04, 0xb1, 0x48, 0x9a, 0x9f, 0x10, 0x44, 0xce, 0xda, 0x59, 0x1c, 0xe1, 0x9f, 0x48, 0x31, 0x98, + 0x59, 0x95, 0x9e, 0xd0, 0xda, 0x92, 0xc9, 0xd2, 0x0d, 0xeb, 0x8c, 0xa8, 0xf9, 0xf1, 0x4d, 0x1f, + 0x3b, 0xe4, 0x8e, 0xf6, 0xa8, 0xad, 0x54, 0xc1, 0xb2, 0xed, 0x3f, 0x77, 0x16, 0x69, 0xfe, 0x06, + 0xc1, 0x7c, 0xbb, 0x63, 0xb9, 0xb8, 0xf0, 0x65, 0xf9, 0xef, 0xc6, 0xde, 0x05, 0x0c, 0x2c, 0x4b, + 0x11, 0x69, 0xdd, 0xcb, 0x7a, 0x09, 0x1f, 0x47, 0x48, 0x0d, 0x88, 0xa9, 0xeb, 0xf3, 0x30, 0xc8, + 0x4c, 0x9d, 0xfe, 0xeb, 0xeb, 0xd0, 0x52, 0x0b, 0x0c, 0x73, 0x11, 0x22, 0x84, 0xc4, 0x33, 0xf7, + 0x4e, 0xa3, 0x0e, 0x58, 0x23, 0x85, 0xb7, 0x5c, 0x34, 0x92, 0x6d, 0xe7, 0xe6, 0x72, 0x00, 0x60, + 0xd3, 0x93, 0xa9, 0x4c, 0x27, 0x7a, 0x90, 0x2d, 0xaf, 0x71, 0xe5, 0x07, 0x8b, 0x1c, 0xfd, 0x28, + 0x12, 0xef, 0x01, 0x8c, 0x93, 0xa7, 0x6f, 0x67, 0x63, 0x1a, 0xb7, 0x4d, 0x60, 0x21, 0x75, 0xdd, + 0x37, 0xff, 0x2a, 0x7d, 0x1d, 0x8f, 0x1f, 0x93, 0x6f, 0xba, 0xb2, 0x41, 0xa7, 0x6e, 0xc5, 0xe0, + 0xa9, 0x7b, 0xf3, 0x8c, 0x9f, 0xf1, 0x2d, 0xd9, 0xbb, 0xa7, 0x89, 0x92, 0x4a, 0xa2, 0x53, 0x07, + 0x60, 0xd1, 0xe9, 0x79, 0x1c, 0xb3, 0x4b, 0xfe, 0x87, 0x2f, 0x73, 0x3b, 0xfa, 0xf7, 0xe8, 0x1c, + 0x43, 0x72, 0x6b, 0xe0, 0x51, 0x68, 0x28, 0x14, 0xa8, 0xb7, 0x6f, 0x72, 0x2a, 0x30, 0xb4, 0xda, + 0xe0, 0x49, 0x32, 0x0d, 0xed, 0x5c, 0x0f, 0xa7, 0xc7, 0x7b, 0x1e, 0xe3, 0xda, 0xeb, 0x0f, 0x6d, + 0x01, 0x74, 0x8d, 0xe4, 0xaa, 0x39, 0x54, 0xea, 0xcb, 0xd7, 0xe6, 0x96, 0x23, 0xce, 0xa1, 0x7d, + 0x93, 0xce, 0x05, 0xe0, 0xc2, 0x4a, 0xde, 0x22, 0xaf, 0x18, 0x7c, 0xfb, 0x6b, 0x49, 0x93, 0x52, + 0xa6, 0xb5, 0xc2, 0xf8, 0x40, 0xa4, 0xc9, 0x91, 0x21, 0x28, 0xbb, 0xaf, 0x17, 0xb8, 0x03, 0x8f, + 0x37, 0x08, 0xcc, 0x62, 0xb3, 0xa6, 0xcb, 0xd6, 0x95, 0xc2, 0x87, 0xf6, 0x94, 0x2c, 0x84, 0xec, + 0x0f, 0xb4, 0xf7, 0xf8, 0xbc, 0x80, 0x3f, 0x22, 0xd1, 0xd9, 0x73, 0xfd, 0xac, 0x94, 0x5a, 0x1b, + 0xb5, 0x4c, 0x41, 0xa5, 0x3e, 0x35, 0x71, 0x56, 0x88, 0x9d, 0x6c, 0x87, 0xfb, 0x0a, 0x33, 0x13, + 0xd9, 0xea, 0x4e, 0x7a, 0x05, 0x60, 0x6c, 0xe6, 0xaf, 0xb5, 0x77, 0x5f, 0x33, 0xa5, 0xe7, 0x13, + 0x58, 0x1c, 0x7f, 0xc9, 0x19, 0x9b, 0xf5, 0xd5, 0x97, 0x70, 0x2f, 0x14, 0x20, 0xad, 0x63, 0xed, + 0x3c, 0x07, 0x4c, 0xbd, 0x43, 0xf9, 0x1a, 0xa3, 0x6a, 0x10, 0xc0, 0x55, 0xbd, 0x30, 0xaf, 0xde, + 0xd0, 0x4f, 0xae, 0xbd, 0xb0, 0x79, 0x78, 0x3a, 0xd0, 0x4a, 0x63, 0xcc, 0xe3, 0x62, 0xc7, 0x8d, + 0xfd, 0x6e, 0xca, 0xc1, 0x0e, 0xaa, 0x93, 0x72, 0x28, 0xf0, 0x5f, 0xfa, 0x51, 0xf9, 0xb8, 0x7a, + 0x90, 0x84, 0xf2, 0xce, 0x65, 0x82, 0xe6, 0x6a, 0x48, 0x47, 0x9f, 0x0e, 0xfe, 0x16, 0xb9, 0xf8, + 0x73, 0xcf, 0x8d, 0xd5, 0x9c, 0x94, 0x4c, 0xea, 0xd3, 0x41, 0xf2, 0xd8, 0x21, 0x08, 0xa6, 0xe2, + 0x30, 0x65, 0x6f, 0x74, 0xb2, 0x77, 0xcb, 0x51, 0x5c, 0xef, 0x4f, 0x3f, 0x3b, 0x9e, 0xe8, 0x68, + 0xe6, 0x81, 0x84, 0xb1, 0x16, 0x23, 0x20, 0x01, 0xee, 0xef, 0xc6, 0x45, 0x5d, 0x84, 0xb5, 0xec, + 0x4e, 0x0b, 0x9f, 0x00, 0xf4, 0x1d, 0x9b, 0xf2, 0x4a, 0xce, 0x45, 0xa4, 0x10, 0xb5, 0xb2, 0x14, + 0x86, 0x66, 0x4c, 0xb4, 0xe0, 0x2d, 0xc0, 0x0d, 0x11, 0x25, 0x64, 0x14, 0x9a, 0xe5, 0xce, 0x51, + 0xe7, 0xe9, 0x7b, 0xb8, 0x34, 0x8a, 0x6c, 0xea, 0x3e, 0x10, 0x2a, 0xae, 0x69, 0x7d, 0xf0, 0xe8, + 0x3e, 0x05, 0x9b, 0xbe, 0xdb, 0xaf, 0x38, 0xf6, 0xc5, 0xf7, 0xd9, 0x78, 0x16, 0x0a, 0xa8, 0x28, + 0xf0, 0x97, 0xb2, 0x81, 0xb7, 0x88, 0xf4, 0x15, 0xee, 0x82, 0x53, 0xb7, 0xac, 0x4f, 0xa7, 0x68, + 0xcf, 0xc5, 0x7c, 0x56, 0xfc, 0x0c, 0x33, 0x66, 0xfa, 0xbc, 0x80, 0x02, 0xf4, 0xef, 0xd0, 0xba, + 0xec, 0x69, 0x2c, 0x1f, 0xe6, 0x41, 0xe4, 0x1c, 0x3d, 0xd4, 0x0c, 0xbd, 0xf4, 0xb9, 0x52, 0x94, + 0x78, 0xee, 0xd0, 0x7a, 0x47, 0xc3, 0xb1, 0x41, 0xb9, 0x47, 0xc4, 0xeb, 0xad, 0x76, 0x07, 0x5e, + 0x02, 0x89, 0xec, 0xef, 0x4b, 0x4f, 0x1a, 0xaa, 0x22, 0xfe, 0x87, 0x67, 0x0d, 0xc7, 0x6d, 0xd8, + 0xf6, 0xb6, 0x87, 0x0c, 0x94, 0x75, 0x20, 0x4e, 0x3e, 0xf6, 0xe2, 0xb4, 0x3f, 0x16, 0xde, 0x65, + 0xbe, 0xb8, 0x6b, 0x09, 0xd8, 0xc8, 0xcc, 0x52, 0x8b, 0x31, 0xc3, 0xf7, 0x19, 0xc1, 0x4b, 0x3a, + 0x2c, 0x64, 0x79, 0xdd, 0x52, 0x70, 0xe2, 0x9f, 0x93, 0x78, 0x27, 0x7a, 0xb0, 0xa4, 0xe7, 0xcf, + 0xf1, 0xa3, 0xf9, 0x23, 0x0f, 0xa0, 0xe6, 0xee, 0xb0, 0x5f, 0x35, 0x4e, 0x05, 0xdc, 0x11, 0xe1, + 0x11, 0x70, 0x52, 0xf5, 0x51, 0x28, 0x0c, 0x83, 0x5e, 0x17, 0xf5, 0xe7, 0xa5, 0x5e, 0xb1, 0xe7, + 0xa9, 0x8d, 0x80, 0xeb, 0x4e, 0x34, 0xd5, 0xaa, 0x93, 0x88, 0xf0, 0xa6, 0xce, 0x70, 0xe2, 0x38, + 0x52, 0xdb, 0xee, 0x86, 0x54, 0xe2, 0x3d, 0x46, 0x87, 0x88, 0x46, 0xa2, 0xbe, 0xb2, 0xdf, 0xb0, + 0x7b, 0x6d, 0x40, 0x03, 0xbf, 0x1c, 0x85, 0x50, 0x9c, 0xde, 0xe9, 0xec, 0x9b, 0x82, 0x6d, 0x94, + 0x9c, 0xec, 0x7e, 0x6f, 0x37, 0x18, 0x50, 0x70, 0x54, 0x2c, 0xe7, 0xb9, 0x50, 0xf8, 0x1f, 0x2c, + 0x0a, 0xd2, 0x24, 0x0e, 0x87, 0x70, 0xbe, 0x6f, 0x0d, 0xb3, 0x3e, 0xe5, 0x4d, 0x9c, 0x7d, 0xcb, + 0x8d, 0x59, 0x7c, 0xae, 0xaf, 0xa4, 0xd4, 0xf5, 0xf2, 0x4d, 0x9a, 0xfa, 0xb3, 0x8b, 0xc5, 0x3c, + 0x2f, 0xc9, 0x12, 0x7d, 0xba, 0x21, 0x0f, 0x37, 0x85, 0xee, 0xe5, 0x23, 0x47, 0x57, 0x58, 0x6c, + 0x38, 0x6a, 0xf4, 0x7d, 0x76, 0xe3, 0x55, 0x94, 0xe1, 0x43, 0x2b, 0x47, 0xa8, 0x3a, 0x97, 0xaf, + 0x14, 0xd6, 0xf2, 0xc3, 0x61, 0x2e, 0xe5, 0xda, 0x51, 0x42, 0x6d, 0x05, 0x3e, 0x40, 0x1f, 0x05, + 0xe3, 0x9c, 0xd8, 0x3f, 0x0a, 0xb6, 0x12, 0x89, 0xe4, 0x52, 0x7b, 0xe2, 0xb7, 0x1e, 0x3d, 0x7e, + 0x13, 0x21, 0xca, 0xaa, 0xc8, 0x7d, 0x0e, 0x94, 0xa4, 0xab, 0x2b, 0x39, 0x66, 0xef, 0x9f, 0x9d, + 0x58, 0x5c, 0xf1, 0x1d, 0x77, 0x5b, 0x08, 0x2f, 0xc8, 0x30, 0x51, 0x9b, 0xe8, 0x45, 0xbd, 0x58, + 0xdf, 0x58, 0xf1, 0x8d, 0xb2, 0xca, 0xa1, 0x05, 0xf8, 0x01, 0x7f, 0xb0, 0xf4, 0x83, 0x98, 0x18, + 0x17, 0x70, 0x84, 0x55, 0xed, 0xdd, 0xe2, 0xca, 0x0b, 0x96, 0x2c, 0xe9, 0x0d, 0x8b, 0xe4, 0x53, + 0x1a, 0x5f, 0x7c, 0x61, 0x5d, 0x2a, 0x63, 0x1e, 0xdc, 0xf7, 0x30, 0x27, 0xc2, 0xc9, 0x88, 0x64, + 0xf5, 0x29, 0x9f, 0x53, 0x34, 0x38, 0xfc, 0xd3, 0xa3, 0x20, 0x54, 0x84, 0x3a, 0x66, 0xc2, 0x28, + 0x92, 0xcb, 0xba, 0x61, 0x6d, 0x12, 0xcf, 0xb6, 0xd8, 0xc1, 0xf5, 0x1b, 0x12, 0x2c, 0x26, 0xe6, + 0x4d, 0x55, 0x53, 0x72, 0xeb, 0xd9, 0x82, 0x7c, 0xd7, 0x79, 0x97, 0xaa, 0x35, 0x2b, 0xe6, 0x39, + 0x8d, 0x84, 0xd1, 0x8e, 0x79, 0x4f, 0x9d, 0x44, 0x2f, 0xd8, 0x6d, 0x7b, 0xa6, 0x5f, 0xd7, 0x2e, + 0x37, 0x8f, 0x71, 0x06, 0xbb, 0x5d, 0x79, 0xae, 0x46, 0xca, 0xcd, 0x7b, 0xba, 0x49, 0x37, 0x8c, + 0xf8, 0x7d, 0x61, 0x5b, 0x5b, 0x2c, 0xb3, 0x75, 0x64, 0xa1, 0xa8, 0xf2, 0xf5, 0x35, 0xbc, 0xf8, + 0x7e, 0xca, 0x0e, 0x6b, 0xf5, 0xff, 0x7b, 0x8f, 0x37, 0xf4, 0x60, 0x2e, 0x9a, 0x56, 0xb8, 0xc3, + 0x17, 0xd2, 0x89, 0x59, 0x66, 0x47, 0x95, 0xee, 0xd9, 0xe8, 0x7e, 0xa4, 0x82, 0x39, 0x73, 0xe1, + 0xce, 0x66, 0x43, 0xb8, 0x5e, 0x64, 0x6e, 0x28, 0x82, 0x7b, 0x39, 0x50, 0x4c, 0x13, 0xd9, 0xce, + 0xbf, 0xa7, 0xf2, 0x5f, 0x1b, 0x4b, 0x23, 0x82, 0xd1, 0x37, 0x84, 0xaa, 0xae, 0x41, 0x53, 0xec, + 0x3e, 0x0c, 0xc9, 0x16, 0x1c, 0xbc, 0x89, 0x39, 0x51, 0x19, 0x27, 0x74, 0xf6, 0x27, 0x6d, 0x0c, + 0x60, 0x4d, 0x50, 0xc5, 0x02, 0xc6, 0xd4, 0xf8, 0xf9, 0xcb, 0x01, 0xf1, 0x2d, 0xc1, 0xcd, 0x04, + 0xcb, 0xdf, 0xaf, 0x0e, 0xf4, 0x4c, 0x47, 0xfc, 0x2e, 0x85, 0x83, 0x14, 0xb7, 0x9e, 0xc9, 0xd1, + 0x0b, 0x12, 0x4d, 0x59, 0x78, 0x66, 0xa7, 0xbe, 0xb5, 0x34, 0xa0, 0xce, 0x3d, 0xe3, 0xa1, 0x50, + 0x89, 0xb5, 0x59, 0x97, 0x24, 0xa9, 0x96, 0xc6, 0x51, 0x54, 0xca, 0x0c, 0x82, 0xf9, 0x28, 0xa6, + 0xfd, 0x71, 0x43, 0x2c, 0x7c, 0x39, 0xa3, 0x06, 0xf6, 0xa8, 0x4a, 0x0d, 0x3e, 0x48, 0x94, 0xa9, + 0xbd, 0x39, 0x53, 0xf9, 0x20, 0x2a, 0x90, 0x9e, 0x97, 0xf0, 0xe6, 0x8a, 0x21, 0x7a, 0x33, 0x19, + 0xd5, 0x00, 0x66, 0xba, 0x02, 0x1e, 0x73, 0x19, 0xd1, 0xaa, 0x60, 0xe3, 0x4e, 0x5e, 0x5d, 0xcc, + 0x3e, 0xc1, 0xc8, 0xa6, 0x9d, 0xe3, 0x91, 0x07, 0x70, 0xa2, 0x1d, 0xe5, 0xe0, 0xd8, 0x71, 0xcb, + 0xfa, 0xa5, 0x90, 0x14, 0x8a, 0x7e, 0xc0, 0xc1, 0x91, 0xb6, 0x37, 0x6f, 0xfe, 0xd9, 0x1b, 0xe8, + 0xe6, 0x0d, 0x55, 0x07, 0x82, 0x71, 0x1c, 0x27, 0xf8, 0x27, 0x80, 0xe3, 0x8a, 0x90, 0xa4, 0x0b, + 0x19, 0x8d, 0x27, 0x1f, 0x54, 0x09, 0x7f, 0x80, 0x67, 0x52, 0x95, 0xb2, 0x14, 0xb8, 0x18, 0x17, + 0x8b, 0xf5, 0x5c, 0x22, 0x8e, 0xe4, 0x41, 0xd8, 0xbb, 0x2c, 0xcc, 0xaf, 0x9f, 0xe7, 0xf3, 0xbf, + 0xa3, 0x14, 0x9a, 0x2f, 0x51, 0xa5, 0x70, 0x38, 0x08, 0x83, 0x75, 0x8b, 0x7d, 0x43, 0x4b, 0x9d, + 0x93, 0x60, 0x45, 0x4e, 0x23, 0x6b, 0xfd, 0x65, 0xaf, 0x47, 0xf7, 0x1e, 0x5c, 0xe6, 0x1b, 0x76, + 0x76, 0xcf, 0x5b, 0x91, 0x70, 0x96, 0x51, 0x16, 0x6f, 0xf7, 0x79, 0xbc, 0x70, 0x72, 0xfb, 0x25, + 0xc6, 0xb9, 0xe3, 0x7e, 0x4e, 0x90, 0xac, 0xca, 0x28, 0xd0, 0x54, 0xe3, 0xeb, 0x00, 0x7d, 0x9a, + 0x91, 0x9b, 0x2e, 0x9f, 0x06, 0x37, 0x2a, 0x66, 0x24, 0x3a, 0x30, 0xc4, 0x2a, 0x09, 0x64, 0x04, + 0x00, 0xa8, 0x4a, 0xc8, 0xf5, 0x3f, 0x0e, 0x17, 0xb6, 0x9f, 0xc3, 0x8e, 0xbd, 0x5e, 0x1e, 0x71, + 0x35, 0x9b, 0x97, 0xda, 0xae, 0x12, 0xa7, 0x55, 0x9e, 0x9b, 0x62, 0x78, 0x0b, 0x49, 0x54, 0xc1, + 0x10, 0xab, 0x1d, 0x5d, 0x75, 0x5d, 0x42, 0x17, 0x38, 0x56, 0x5c, 0xde, 0xeb, 0xf5, 0x36, 0x6f, + 0xa1, 0x89, 0x18, 0x63, 0xaa, 0x14, 0x9a, 0xb5, 0xc4, 0x8d, 0x05, 0x0d, 0x1a, 0x55, 0xd3, 0xd9, + 0xde, 0x7a, 0xac, 0x95, 0x1b, 0x74, 0xd4, 0x67, 0x52, 0x09, 0xa3, 0x84, 0x81, 0x73, 0xe9, 0xf4, + 0x79, 0x9b, 0x8b, 0x6f, 0x7d, 0x88, 0xad, 0x38, 0x5f, 0x30, 0x7c, 0xb0, 0x75, 0xbb, 0xdd, 0x25, + 0xa9, 0x7c, 0x7b, 0x41, 0xd8, 0x04, 0xdf, 0xea, 0xb6, 0x5a, 0xd7, 0x3a, 0x72, 0x3e, 0x92, 0xc3, + 0x3d, 0x0f, 0x67, 0x1a, 0x1a, 0x58, 0x65, 0xb1, 0x5d, 0xf1, 0xb3, 0x5a, 0x1e, 0x82, 0x03, 0x82, + 0x22, 0x75, 0x78, 0xdc, 0x60, 0x76, 0x03, 0xcc, 0xbe, 0x9c, 0xd5, 0xc9, 0x8b, 0xe1, 0xbb, 0xf6, + 0x4f, 0xcd, 0x8a, 0xcd, 0x17, 0x83, 0x5d, 0x3a, 0x71, 0xcc, 0xab, 0x82, 0x7a, 0xc9, 0x82, 0xa8, + 0x05, 0x1c, 0xea, 0xb9, 0x39, 0x66, 0x2d, 0x84, 0x02, 0x7d, 0xb1, 0x81, 0xc6, 0xcc, 0x72, 0x84, + 0xe3, 0x4a, 0x46, 0x2f, 0x59, 0xbf, 0x85, 0x72, 0xb5, 0x42, 0x2e, 0x39, 0x80, 0x69, 0x0a, 0xa4, + 0xc7, 0x5d, 0xbe, 0x52, 0xb3, 0x64, 0xfc, 0xdf, 0x9d, 0x54, 0xa0, 0xe2, 0xdd, 0xca, 0xf2, 0x36, + 0x52, 0xce, 0x37, 0xa8, 0xa2, 0xf0, 0x0c, 0x85, 0xb7, 0xb1, 0x62, 0xe9, 0x8d, 0xcd, 0x55, 0xed, + 0xdd, 0xac, 0x9f, 0x4e, 0x22, 0xf4, 0x86, 0x7c, 0x42, 0xce, 0x4b, 0xc8, 0xc2, 0x95, 0xa8, 0xa4, + 0x4f, 0x9c, 0xa8, 0x40, 0xb9, 0x45, 0x09, 0x56, 0x05, 0x54, 0xb0, 0x84, 0xa1, 0xbd, 0xe7, 0x6d, + 0x22, 0x98, 0x88, 0xeb, 0x71, 0xde, 0x7e, 0xc0, 0xfe, 0x45, 0x23, 0x69, 0x5c, 0x94, 0x7f, 0x73, + 0x45, 0x3d, 0x06, 0xf1, 0xb8, 0xd2, 0x71, 0x71, 0x7a, 0x0c, 0x69, 0xc9, 0xf2, 0x88, 0xd3, 0x77, + 0x42, 0xbe, 0x5a, 0xa6, 0x94, 0x16, 0x11, 0x92, 0x4a, 0x01, 0x8e, 0x00, 0x7a, 0x2e, 0x9f, 0xd7, + 0x9a, 0xd1, 0x36, 0x54, 0xf6, 0x25, 0x90, 0xdb, 0x3b, 0xfc, 0x3c, 0x35, 0x65, 0xa5, 0xe3, 0xa0, + 0xe8, 0x1b, 0xc3, 0xe3, 0x40, 0x8f, 0x8f, 0x04, 0xa0, 0x50, 0xbc, 0x02, 0xd3, 0xfc, 0x0b, 0xd7, + 0x87, 0x6c, 0x44, 0xfd, 0x5e, 0x6a, 0x33, 0xd7, 0xcd, 0xfd, 0xd2, 0xd2, 0x89, 0x43, 0xdf, 0x5a, + 0x6f, 0xd3, 0x7e, 0x68, 0x3a, 0xb5, 0xe6, 0x3c, 0xdb, 0x59, 0xd8, 0x32, 0xbf, 0xde, 0xe5, 0x3a, + 0xdd, 0x58, 0xbb, 0xe1, 0x29, 0x60, 0xae, 0x50, 0x90, 0x57, 0xdb, 0xe8, 0x1c, 0x48, 0xa2, 0xb2, + 0xf0, 0xba, 0x6e, 0xb2, 0x8d, 0xbe, 0xc2, 0x9a, 0x37, 0xc6, 0xfa, 0x89, 0x46, 0x82, 0xbb, 0xa7, + 0x50, 0xe8, 0x5d, 0x26, 0xc4, 0x81, 0x65, 0x0a, 0xf4, 0x22, 0xae, 0x90, 0x54, 0x4f, 0xc3, 0x09, + 0xf2, 0x8d, 0x58, 0x05, 0x73, 0xb7, 0xac, 0x80, 0xf3, 0x3b, 0xa1, 0x6b, 0x11, 0xed, 0xd7, 0x4f, + 0x47, 0xca, 0xb1, 0xf5, 0x19, 0x03, 0xc4, 0x55, 0xee, 0xc2, 0x89, 0x77, 0xe3, 0x34, 0x2f, 0xfb, + 0x86, 0xad, 0xd0, 0x82, 0x8c, 0x1c, 0x56, 0x76, 0xad, 0x93, 0xfc, 0x01, 0x99, 0x34, 0xb3, 0x34, + 0x5e, 0x4f, 0xae, 0x5d, 0xf4, 0x2a, 0x92, 0x90, 0x05, 0xe9, 0x6e, 0x73, 0x5d, 0x3d, 0x9c, 0x24, + 0xd0, 0x0f, 0x85, 0x57, 0x15, 0x97, 0xc8, 0x16, 0xb1, 0x48, 0xce, 0x89, 0x9a, 0x34, 0x98, 0x83, + 0x50, 0x59, 0x45, 0xa5, 0xd3, 0xba, 0x53, 0xd7, 0x2d, 0x6f, 0x44, 0x21, 0x74, 0x3b, 0x93, 0x1d, + 0x0a, 0xe9, 0x8f, 0x66, 0x4c, 0xee, 0xed, 0xdf, 0x35, 0x9d, 0x3f, 0xb2, 0x94, 0x81, 0xc2, 0xac, + 0xfb, 0x1d, 0x32, 0xf2, 0x90, 0x71, 0x44, 0xaa, 0xa9, 0x70, 0x3f, 0xb4, 0xd6, 0x3e, 0x1c, 0x89, + 0x20, 0xbb, 0x18, 0xc4, 0xa9, 0xbf, 0x99, 0x8d, 0x41, 0x8a, 0x21, 0xe6, 0xa2, 0x59, 0x26, 0x9f, + 0xd9, 0xe8, 0x4c, 0x18, 0x3f, 0xdd, 0xa1, 0xc9, 0x7a, 0xa5, 0x3e, 0xa1, 0xca, 0x95, 0x3d, 0x33, + 0xfd, 0xda, 0x9a, 0xd6, 0x2f, 0xeb, 0xf1, 0x2b, 0xc8, 0x44, 0x07, 0xcb, 0x2f, 0xa1, 0x0b, 0x1c, + 0xc4, 0xb3, 0x4a, 0x39, 0x0c, 0xd1, 0x7a, 0x07, 0x9e, 0x2a, 0x37, 0x1b, 0xc1, 0xfa, 0x94, 0x08, + 0x20, 0x65, 0x3d, 0x6a, 0x12, 0xa8, 0x61, 0x57, 0x0b, 0x74, 0xc9, 0xdf, 0x8c, 0x25, 0xfc, 0x07, + 0xa3, 0x56, 0xd9, 0x8a, 0xb6, 0x04, 0x2b, 0x3f, 0xd7, 0x70, 0xaf, 0xd5, 0x37, 0xfe, 0x62, 0x6e, + 0x1a, 0x91, 0x7e, 0xd1, 0x7a, 0xfe, 0xf0, 0x94, 0x3e, 0x78, 0x40, 0x03, 0xb4, 0x43, 0x69, 0x63, + 0xed, 0xbe, 0xc6, 0x17, 0x68, 0x16, 0x49, 0xc7, 0xe6, 0x63, 0x35, 0x73, 0xea, 0x3a, 0x17, 0xda, + 0xb5, 0xe6, 0xdf, 0x1d, 0xe3, 0xc2, 0xe9, 0x62, 0x29, 0xbf, 0x2e, 0x29, 0x2a, 0xbc, 0x91, 0xd4, + 0x50, 0x1e, 0x63, 0x0b, 0x62, 0x39, 0x3b, 0x84, 0xab, 0xa7, 0xd6, 0x62, 0x91, 0x64, 0xa8, 0x0d, + 0x2b, 0xab, 0xa8, 0xb7, 0xcf, 0x1d, 0xd4, 0xee, 0xf6, 0xb3, 0xcf, 0x3f, 0xd6, 0x77, 0x86, 0xa5, + 0x65, 0x0c, 0xb4, 0xd3, 0x6e, 0xf3, 0x61, 0x27, 0xbb, 0x94, 0x91, 0x4a, 0x01, 0x28, 0x5b, 0x98, + 0x90, 0x64, 0xdf, 0x9d, 0x48, 0x80, 0x1f, 0xc7, 0x36, 0x01, 0x4e, 0x7c, 0x9d, 0x18, 0x3c, 0x60, + 0x2c, 0xb8, 0xbf, 0xc2, 0xf5, 0xca, 0x1e, 0x83, 0xdd, 0xec, 0x5e, 0x7f, 0x9b, 0x0d, 0x58, 0xef, + 0x44, 0xfd, 0x4e, 0xbc, 0x3a, 0xab, 0x49, 0x8b, 0xf1, 0x31, 0x76, 0xc8, 0x4c, 0x72, 0xf0, 0xda, + 0xc9, 0x3c, 0x2f, 0x23, 0xb3, 0x83, 0xa5, 0x01, 0xb2, 0x03, 0xf0, 0x31, 0xb7, 0xc6, 0xff, 0xe4, + 0x08, 0x56, 0x1e, 0xc1, 0xb6, 0x7b, 0x0b, 0x19, 0x31, 0x01, 0xba, 0x56, 0x21, 0x72, 0xe9, 0x1d, + 0x16, 0xf4, 0x2a, 0x0f, 0xa5, 0x30, 0xeb, 0x84, 0xb2, 0x9e, 0x85, 0x0d, 0xe3, 0x8e, 0x1e, 0x51, + 0xf2, 0xcc, 0xff, 0xbf, 0xf3, 0xe7, 0x22, 0x8a, 0x39, 0x92, 0xc5, 0x29, 0x82, 0x37, 0x59, 0xec, + 0xb3, 0x38, 0x18, 0x89, 0x0d, 0xd2, 0x9e, 0x8e, 0x2f, 0x9d, 0x30, 0xa6, 0xde, 0xbf, 0xc2, 0xa6, + 0x1e, 0x50, 0x9f, 0x19, 0xe6, 0x9e, 0x83, 0x2a, 0x4a, 0x66, 0xc7, 0x3e, 0xf6, 0xca, 0x11, 0x98, + 0x7c, 0x9b, 0x55, 0xba, 0x9f, 0x3b, 0x39, 0x59, 0x72, 0x9c, 0x23, 0xbd, 0xe3, 0xb5, 0xb6, 0x11, + 0x76, 0x7c, 0x4a, 0x99, 0xdf, 0xb6, 0x49, 0x8b, 0x4b, 0x55, 0x24, 0xd9, 0x35, 0x88, 0xcc, 0xbb, + 0xe1, 0x73, 0xb5, 0xbf, 0xf7, 0xe1, 0xca, 0xed, 0xa0, 0x1c, 0x29, 0x18, 0x90, 0xda, 0x55, 0xd2, + 0xcd, 0x87, 0x77, 0x60, 0x2d, 0xdd, 0x84, 0x30, 0x37, 0x1d, 0xb8, 0x35, 0x6b, 0xb8, 0x24, 0xf5, + 0xc0, 0x35, 0x4c, 0xd5, 0x46, 0x78, 0x22, 0x85, 0x66, 0x7a, 0x60, 0x60, 0xfa, 0x0f, 0xf9, 0xc8, + 0x37, 0xe3, 0x1f, 0xbb, 0xc3, 0x88, 0xa3, 0x24, 0x15, 0x32, 0x9a, 0xc2, 0xfd, 0x94, 0x0f, 0xac, + 0xd8, 0x66, 0x98, 0xc8, 0x32, 0xbb, 0xca, 0x65, 0x37, 0x33, 0xcc, 0xa3, 0x10, 0xa9, 0xf8, 0x48, + 0xb2, 0x7d, 0x76, 0x8a, 0x49, 0x40, 0xd7, 0xf9, 0xe9, 0x4a, 0xf8, 0x46, 0x51, 0x3d, 0x22, 0x0e, + 0x8b, 0x16, 0x19, 0x3f, 0xea, 0xde, 0x3b, 0x46, 0x80, 0x5c, 0xc2, 0xd9, 0x74, 0xa5, 0xa7, 0x3e, + 0xb8, 0x95, 0x3f, 0x54, 0x66, 0xe0, 0x47, 0xc6, 0x29, 0x8c, 0x90, 0x8d, 0xfd, 0xe2, 0xcb, 0xf2, + 0xd7, 0x7d, 0x31, 0xc7, 0x2e, 0x99, 0xa5, 0x87, 0x27, 0x8d, 0x4e, 0x73, 0x26, 0x28, 0xe9, 0xb2, + 0x84, 0x54, 0x19, 0x44, 0x6a, 0xc9, 0xf1, 0x14, 0xa8, 0x60, 0xbe, 0x3d, 0xc7, 0xbe, 0x40, 0xe4, + 0x75, 0xdc, 0x9a, 0xad, 0xc2, 0xc8, 0x8e, 0x63, 0x49, 0x9a, 0x10, 0x4c, 0x4d, 0x94, 0x44, 0x97, + 0x8f, 0x27, 0x12, 0xeb, 0x32, 0x24, 0xa9, 0xb2, 0x26, 0x6d, 0x39, 0xab, 0x36, 0x53, 0xcd, 0xce, + 0xcc, 0xdb, 0x0b, 0x47, 0x3d, 0xdb, 0x24, 0x1f, 0xb5, 0x5c, 0x7d, 0x27, 0xb2, 0x57, 0xba, 0x92, + 0x86, 0x0a, 0x13, 0x8a, 0xea, 0xda, 0x22, 0x07, 0x10, 0xf6, 0x8c, 0xfa, 0x2d, 0x6c, 0x61, 0x6a, + 0x86, 0x60, 0x02, 0x95, 0xe7, 0x7c, 0xe3, 0xba, 0xcc, 0x52, 0x36, 0x05, 0x4c, 0x6a, 0xee, 0xef, + 0x09, 0x27, 0x9b, 0x34, 0xc7, 0x0c, 0xff, 0x9b, 0x4f, 0x12, 0x81, 0xf6, 0x92, 0x4d, 0x59, 0x25, + 0xba, 0x63, 0x68, 0xef, 0x23, 0xbe, 0x2b, 0x89, 0x30, 0xba, 0x4c, 0x48, 0x7e, 0x3b, 0x24, 0x34, + 0x0f, 0x8f, 0xc9, 0xd9, 0x52, 0x0c, 0xc6, 0x69, 0x88, 0x9b, 0x07, 0x21, 0x03, 0x19, 0x9f, 0xa5, + 0x19, 0x45, 0x82, 0x88, 0x5e, 0xb5, 0xf2, 0x08, 0x57, 0xe3, 0x60, 0xea, 0x98, 0xd3, 0xd1, 0x95, + 0x88, 0x15, 0xdb, 0xe4, 0x0a, 0x4b, 0xe7, 0x3e, 0x61, 0x33, 0xb3, 0xc5, 0x3e, 0xdb, 0x99, 0xe0, + 0x76, 0xe8, 0x53, 0xb7, 0xde, 0xee, 0x3c, 0x0b, 0x3f, 0x1c, 0x52, 0x4b, 0xa4, 0xa8, 0x14, 0xb8, + 0x11, 0x7d, 0x58, 0x0d, 0x5b, 0xe1, 0x3a, 0xf8, 0xb6, 0xfe, 0x86, 0x10, 0x3a, 0x7f, 0xf4, 0xb4, + 0x2f, 0xd3, 0xc6, 0x82, 0xd9, 0xe8, 0xa6, 0xe1, 0x64, 0xd1, 0xaf, 0xe4, 0x49, 0xa3, 0xab, 0x6b, + 0xa6, 0xa6, 0x17, 0x21, 0x82, 0xeb, 0xdc, 0x08, 0x54, 0x75, 0x4b, 0x72, 0x8d, 0x4d, 0x00, 0x4f, + 0x35, 0x3f, 0xab, 0x9b, 0x03, 0x80, 0x18, 0xd0, 0x21, 0x11, 0x01, 0xe2, 0x79, 0xbd, 0x5b, 0x14, + 0xac, 0x84, 0x3f, 0x5e, 0xc2, 0x93, 0x07, 0x4d, 0x97, 0x36, 0xf3, 0x8e, 0x11, 0x7c, 0x48, 0x44, + 0x90, 0xfd, 0xa4, 0x90, 0x24, 0xb4, 0xbc, 0xcb, 0x67, 0xba, 0xd4, 0x94, 0xb7, 0x3a, 0x21, 0x2f, + 0xc2, 0xe4, 0xb6, 0xe5, 0x21, 0x90, 0xed, 0xcf, 0xeb, 0x66, 0x4d, 0x17, 0xeb, 0xe5, 0xcb, 0x8b, + 0xdd, 0x2d, 0xc3, 0x97, 0xb6, 0x6b, 0x41, 0xd2, 0x93, 0x27, 0x40, 0x89, 0x49, 0x61, 0x4b, 0xbd, + 0x80, 0xed, 0xdd, 0xcd, 0x71, 0x30, 0x22, 0x99, 0x6b, 0x28, 0xaf, 0x51, 0xd9, 0xb5, 0x80, 0x42, + 0xf5, 0x86, 0xb2, 0xef, 0xc7, 0x6a, 0x87, 0x7a, 0x82, 0xbb, 0x21, 0xb5, 0xa1, 0x38, 0x9e, 0x9c, + 0x31, 0xb2, 0xd4, 0xd1, 0xaa, 0x7f, 0xb5, 0x06, 0x9b, 0xee, 0x6e, 0x8f, 0x78, 0x5c, 0xda, 0x74, + 0xd3, 0xbe, 0x02, 0xe7, 0x72, 0xc0, 0x96, 0x63, 0xde, 0x21, 0x05, 0x87, 0xe4, 0x18, 0x8d, 0x54, + 0x66, 0x31, 0x55, 0x31, 0xaa, 0xf3, 0x4f, 0xab, 0x3d, 0x50, 0x89, 0x31, 0x5e, 0xb5, 0xf4, 0xd4, + 0x20, 0x32, 0xc6, 0x4a, 0xf3, 0x53, 0xc8, 0x35, 0xe4, 0x7c, 0xd4, 0xdc, 0xad, 0x81, 0xed, 0xfe, + 0x7c, 0x9b, 0x6d, 0x8d, 0xae, 0x1e, 0xca, 0x8f, 0xc6, 0x64, 0x5c, 0x08, 0xfe, 0xe9, 0xf7, 0x2f, + 0xb1, 0x63, 0x18, 0xa9, 0xdf, 0xe5, 0x1a, 0x45, 0x40, 0x75, 0x36, 0xb7, 0x5a, 0x9a, 0x7c, 0xba, + 0x2d, 0xaf, 0x44, 0x8c, 0xa0, 0x1a, 0x92, 0x32, 0x81, 0x5e, 0xb3, 0xb1, 0x8f, 0x3a, 0x19, 0x9c, + 0xb8, 0x5b, 0xd0, 0xb3, 0xec, 0xcd, 0xe7, 0x58, 0xb9, 0xbd, 0xcd, 0x8a, 0x4d, 0xdd, 0xdc, 0x9d, + 0x13, 0x87, 0xbf, 0x9d, 0x3d, 0x06, 0x5c, 0x81, 0x55, 0xbb, 0x08, 0xb4, 0x8e, 0x99, 0xdf, 0x5b, + 0x4f, 0xcb, 0x1a, 0xf4, 0x6a, 0x01, 0x0b, 0xff, 0x01, 0x58, 0x19, 0x2b, 0xbf, 0x51, 0xba, 0x0a, + 0xe8, 0x0c, 0x9a, 0xfd, 0x45, 0x63, 0xfc, 0xed, 0xf2, 0x43, 0x36, 0x4d, 0xda, 0xf7, 0x71, 0xa8, + 0x16, 0x8c, 0x34, 0xdf, 0x06, 0x24, 0xc9, 0x06, 0xfb, 0xff, 0xef, 0x78, 0xd8, 0x89, 0xd5, 0x92, + 0xd9, 0x95, 0x1d, 0xd8, 0xfe, 0x17, 0x92, 0xa8, 0x74, 0xea, 0x90, 0xb0, 0xf4, 0xa1, 0xa7, 0x59, + 0xd1, 0xfb, 0x43, 0x2e, 0x17, 0xb7, 0xc5, 0x7e, 0x39, 0xa1, 0x79, 0xef, 0x68, 0xfa, 0xea, 0x7b, + 0x74, 0x2c, 0x47, 0xe8, 0x4a, 0xce, 0xe3, 0x81, 0xc7, 0xc1, 0xe5, 0xbc, 0x85, 0xea, 0x23, 0x81, + 0xf1, 0x7f, 0x19, 0xb5, 0x87, 0x3e, 0x38, 0x7c, 0xee, 0x56, 0x40, 0x82, 0xd9, 0xda, 0x2b, 0x71, + 0xb3, 0xe5, 0x6a, 0x36, 0xd2, 0x34, 0xd0, 0xef, 0x68, 0x1f, 0xde, 0x79, 0xb4, 0x83, 0x41, 0x54, + 0xbf, 0xab, 0x59, 0x3f, 0xad, 0xfc, 0xc8, 0x99, 0x3f, 0x64, 0xa4, 0x9e, 0x8e, 0xf1, 0xb6, 0x7a, + 0xa2, 0x55, 0x8b, 0x49, 0x0b, 0x92, 0x1d, 0xe2, 0x75, 0x55, 0xbf, 0xc0, 0x5e, 0x88, 0xd7, 0xd2, + 0xd5, 0x0b, 0x83, 0x6b, 0xb0, 0xee, 0x0b, 0x9c, 0x2b, 0x55, 0x27, 0x11, 0x51, 0xd5, 0xb9, 0x9b, + 0x11, 0xed, 0xf4, 0x74, 0x91, 0x3b, 0x2c, 0xcc, 0x5e, 0x30, 0x2f, 0x38, 0xf2, 0x8b, 0x4c, 0x0b, + 0x6c, 0x15, 0x44, 0xc9, 0x74, 0xc7, 0x8d, 0xce, 0xea, 0x8d, 0x45, 0x59, 0x80, 0x3f, 0x26, 0x1c, + 0x41, 0xa1, 0x13, 0xc7, 0xd3, 0x3a, 0x60, 0x93, 0xc4, 0x44, 0xd5, 0x57, 0xb5, 0xc4, 0xb2, 0xae, + 0x0c, 0xd9, 0x43, 0xe8, 0xde, 0x91, 0x30, 0x85, 0xe9, 0x8a, 0x9e, 0x4c, 0x57, 0x23, 0xda, 0xe3, + 0x91, 0x93, 0x22, 0xcb, 0xc3, 0x51, 0xe1, 0xb2, 0x3b, 0x46, 0x6f, 0x2c, 0xd1, 0xd9, 0x51, 0x41, + 0x9b, 0x29, 0xc6, 0xc8, 0x3b, 0x16, 0x54, 0x14, 0x29, 0x97, 0x90, 0x12, 0x5a, 0x1b, 0xfd, 0xc9, + 0xd8, 0xd5, 0x00, 0xd8, 0x85, 0x6d, 0xff, 0xeb, 0xfb, 0xad, 0x4c, 0xb5, 0xe5, 0xf8, 0xa4, 0x72, + 0x06, 0x0e, 0xd5, 0x89, 0x2d, 0xcf, 0xd4, 0x2e, 0x35, 0xb2, 0x2a, 0x53, 0xe9, 0x50, 0x30, 0x0e, + 0xf6, 0x84, 0x35, 0xe5, 0xa9, 0x9b, 0x35, 0xed, 0x5b, 0x0b, 0x02, 0xb5, 0x3c, 0x4a, 0xec, 0xdf, + 0x62, 0xd9, 0x09, 0xb2, 0xa2, 0xb8, 0xc8, 0x83, 0x08, 0xc5, 0xb2, 0x53, 0x8d, 0xb7, 0x53, 0x89, + 0x99, 0x3f, 0x96, 0x2f, 0x62, 0x20, 0x0f, 0x46, 0x0c, 0x7b, 0x99, 0x33, 0x61, 0x79, 0x72, 0x68, + 0x04, 0xc9, 0x15, 0x92, 0xd0, 0x23, 0xcc, 0x99, 0x88, 0xd1, 0x5d, 0x9a, 0xb3, 0xbc, 0x78, 0x8d, + 0x40, 0x89, 0x65, 0x53, 0xc8, 0xe1, 0x91, 0x37, 0x8d, 0xcb, 0x05, 0x9f, 0xda, 0xd1, 0x23, 0x71, + 0xcf, 0xc5, 0x71, 0x24, 0xd6, 0xf8, 0x98, 0x0a, 0xb9, 0x5d, 0xc9, 0x86, 0xce, 0xa9, 0x31, 0x55, + 0xef, 0x5d, 0x60, 0xed, 0xf7, 0xe3, 0x37, 0x7d, 0xb6, 0xf3, 0x37, 0xe0, 0xd1, 0x1c, 0xb8, 0x1b, + 0xf2, 0xf1, 0x4c, 0x59, 0x68, 0xf4, 0x7c, 0xb1, 0x98, 0x5c, 0x58, 0x14, 0x68, 0xc6, 0x23, 0xc2, + 0x39, 0xf9, 0x3c, 0xd2, 0xf8, 0x0c, 0x9f, 0xe5, 0xd9, 0xf5, 0x23, 0x5b, 0xfd, 0x62, 0x0c, 0x99, + 0xf4, 0xf1, 0xaa, 0x65, 0xf5, 0xa6, 0x0a, 0x4d, 0xdd, 0x08, 0x49, 0x7f, 0x56, 0xdb, 0xbd, 0x8f, + 0xe8, 0x8f, 0xef, 0x77, 0x27, 0x34, 0x48, 0x4c, 0x69, 0x73, 0xab, 0xc6, 0xed, 0xf9, 0xb6, 0x24, + 0xa1, 0xbe, 0x1c, 0xbb, 0xd6, 0x71, 0xca, 0x81, 0x26, 0xab, 0x22, 0x55, 0xce, 0xda, 0x77, 0x9a, + 0xfc, 0xed, 0xb3, 0x76, 0xe5, 0x96, 0x35, 0x45, 0x77, 0x50, 0xc6, 0xe7, 0xa1, 0x52, 0x96, 0xa3, + 0x10, 0x46, 0x81, 0x3b, 0x51, 0xdb, 0xd2, 0x28, 0xeb, 0x4d, 0xac, 0x75, 0x3b, 0x10, 0xc6, 0x35, + 0x0a, 0x20, 0xc6, 0x55, 0x43, 0xd7, 0xcb, 0x38, 0xd3, 0xc0, 0x3a, 0x15, 0x5c, 0xc1, 0x92, 0x25, + 0x61, 0xa0, 0xea, 0x13, 0xef, 0xb0, 0xc1, 0x78, 0xdb, 0xe1, 0x9e, 0xfb, 0x0d, 0x5c, 0xb9, 0xa9, + 0xa5, 0xec, 0xde, 0x7d, 0x24, 0x20, 0x8d, 0x42, 0xf3, 0x5d, 0xe5, 0x28, 0xc2, 0x4a, 0x01, 0x71, + 0x33, 0x3f, 0x1f, 0x6d, 0x83, 0x7b, 0x96, 0x2c, 0xd7, 0x74, 0x0b, 0xcd, 0x77, 0x2f, 0x68, 0xca, + 0x1a, 0xc5, 0x62, 0x21, 0x39, 0x60, 0xcf, 0x34, 0x4e, 0xab, 0x23, 0xf5, 0x4f, 0xc5, 0x1b, 0x96, + 0x40, 0x8b, 0x50, 0x1d, 0x41, 0xac, 0x64, 0x42, 0x8e, 0x93, 0x81, 0xf0, 0x6d, 0xb3, 0xa3, 0x54, + 0x50, 0x1e, 0x46, 0x0e, 0x33, 0x62, 0xc0, 0x49, 0x15, 0xbc, 0xc5, 0xc6, 0xa0, 0x49, 0xd4, 0x37, + 0xf6, 0x2a, 0xa1, 0x56, 0xbc, 0x8f, 0x60, 0xdc, 0xc3, 0x56, 0x9c, 0x43, 0x08, 0x5a, 0x36, 0xea, + 0x04, 0x12, 0x81, 0x34, 0xe6, 0x5a, 0xa3, 0xdb, 0x5a, 0xa5, 0x6a, 0xa6, 0xc4, 0xff, 0xfe, 0x3f, + 0x72, 0x93, 0x9d, 0x84, 0x14, 0x5a, 0x74, 0x5a, 0xb4, 0x9f, 0x42, 0x1a, 0x48, 0xc7, 0x2b, 0x67, + 0x39, 0x78, 0x85, 0x19, 0x20, 0x61, 0xc5, 0x6a, 0x92, 0xe2, 0xfd, 0xec, 0x90, 0x38, 0x35, 0xdb, + 0x07, 0x3c, 0xc3, 0x9d, 0x17, 0x80, 0x46, 0x83, 0x0d, 0x2e, 0x97, 0x91, 0x74, 0x04, 0xc5, 0x97, + 0x34, 0x30, 0x6e, 0x1a, 0xba, 0xd8, 0x85, 0xfd, 0xc9, 0xfd, 0x91, 0x85, 0xe5, 0xe3, 0xe3, 0xd0, + 0x14, 0x02, 0xd8, 0x56, 0x4b, 0x29, 0x39, 0xd1, 0x3f, 0x6c, 0x98, 0x9e, 0x48, 0x76, 0x63, 0x35, + 0x53, 0xde, 0x80, 0xcf, 0xbc, 0x20, 0xa1, 0xda, 0x3b, 0x73, 0xef, 0x96, 0xf4, 0xf9, 0x7b, 0x40, + 0x92, 0xe7, 0x51, 0xa3, 0x90, 0x1a, 0x7a, 0x40, 0x8e, 0xc7, 0xc2, 0xd6, 0xe6, 0xf7, 0xad, 0xcb, + 0x09, 0xbc, 0x72, 0xab, 0xc8, 0x20, 0xdc, 0xe5, 0x87, 0xca, 0x3b, 0x9f, 0x42, 0xc4, 0x00, 0x52, + 0xde, 0x9d, 0xbb, 0xf0, 0xb5, 0x83, 0x80, 0x6c, 0xe0, 0x9d, 0x92, 0x80, 0x0b, 0x57, 0xed, 0x04, + 0xd6, 0x2e, 0x89, 0xcb, 0xf1, 0xa3, 0xd2, 0x68, 0x38, 0x58, 0x22, 0xe6, 0x98, 0x78, 0x64, 0x83, + 0xf8, 0x7f, 0xcf, 0x9f, 0xdb, 0x7f, 0x5e, 0xcc, 0xc0, 0xe4, 0xdb, 0x85, 0x8b, 0xe7, 0xb6, 0x87, + 0xea, 0x18, 0x20, 0x59, 0x88, 0x65, 0xa1, 0x63, 0x16, 0x82, 0x5c, 0x72, 0xa7, 0x45, 0xf9, 0xc0, + 0xf7, 0x76, 0x9b, 0x63, 0xac, 0xa5, 0x21, 0x2a, 0x3e, 0x2f, 0xee, 0x45, 0x4a, 0x3e, 0x28, 0xff, + 0xce, 0xd3, 0x7b, 0xce, 0x3e, 0x13, 0x9d, 0x1b, 0x32, 0x2e, 0x01, 0xff, 0x57, 0xbf, 0x43, 0xb6, + 0xe3, 0x5c, 0x16, 0xca, 0xac, 0xe6, 0xc0, 0xd7, 0x77, 0x80, 0x38, 0x99, 0x2b, 0x9f, 0x4f, 0x4b, + 0x28, 0x39, 0x44, 0x89, 0x89, 0x1f, 0xe5, 0x31, 0xc9, 0x4e, 0x8e, 0x49, 0x8e, 0xd3, 0xb2, 0xf6, + 0x78, 0xc3, 0x56, 0xe7, 0xe8, 0x8b, 0x92, 0x72, 0x39, 0xc9, 0xe8, 0xff, 0x00, 0x86, 0xa9, 0xa4, + 0xd1, 0x14, 0xec, 0xfe, 0xff, 0xed, 0xe3, 0xff, 0x5f, 0xef, 0x3f, 0xf2, 0x9f, 0xff, 0xfc, 0xe7, + 0x3f, 0xff, 0xf9, 0xcf, 0x7f, 0xfe, 0xf3, 0x9f, 0xff, 0xfc, 0xe7, 0x3f, 0xff, 0xf9, 0xcf, 0x7f, + 0xfe, 0xf3, 0x9f, 0xff, 0xfc, 0xe7, 0x3f, 0xff, 0xf9, 0xcf, 0x7f, 0xfe, 0xf3, 0x9f, 0xff, 0xfc, + 0xe7, 0x3f, 0xff, 0xf9, 0xcf, 0x7f, 0xfe, 0xf3, 0x9f, 0xff, 0xfc, 0xe7, 0x3f, 0xff, 0xf9, 0xcf, + 0x7f, 0xfe, 0xf3, 0x9f, 0xff, 0xfc, 0xe7, 0xff, 0x73, 0xff, 0x0f, 0xaf, 0xfc, 0x7a, 0xa1, 0x00, + 0x91, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA102_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 37120, // uncompressed data size (bytes) + 25363, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA102_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA102("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/load/g_booteruc_load_ga10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA102_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x4e, 0x05, 0x62, 0x6d, 0x08, 0x13, 0x4c, 0xa4, 0x40, 0x69, + 0x20, 0x00, 0x00, 0x9c, 0xaf, 0x89, 0x9b, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA102_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA102_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA102("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/load/g_booteruc_load_ga10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 37120 +// COMPRESSED SIZE (bytes): 25360 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA102_image_prod_data[] = +{ + 0xed, 0xdd, 0x43, 0xb0, 0x68, 0x3b, 0xd4, 0x20, 0xe0, 0x63, 0xdb, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, + 0xdb, 0xb6, 0x6d, 0xdb, 0xb6, 0x8d, 0x7b, 0x74, 0x8f, 0x6d, 0xdb, 0xb6, 0xfa, 0x1f, 0xbf, 0x49, + 0xf7, 0xa4, 0xab, 0x07, 0x7d, 0xbf, 0x59, 0x46, 0xbb, 0x52, 0xa9, 0xa4, 0x56, 0x56, 0xb2, 0xb2, + 0x01, 0x00, 0xfe, 0xff, 0xa6, 0xd3, 0x7d, 0xd9, 0x2d, 0xf6, 0xf4, 0xa9, 0x17, 0x0f, 0x6f, 0x3d, + 0xdd, 0xe5, 0x7f, 0x50, 0x1e, 0x0b, 0xb3, 0xd0, 0xa1, 0x04, 0x60, 0x8f, 0x04, 0xd9, 0xf7, 0x76, + 0x66, 0xaf, 0x3e, 0x48, 0xdd, 0xfd, 0x68, 0x50, 0x9d, 0x42, 0x78, 0xc4, 0x86, 0x3c, 0x6f, 0x68, + 0xfb, 0xa3, 0x45, 0x38, 0xb2, 0xb7, 0x40, 0x0f, 0x19, 0x0c, 0x34, 0xec, 0xac, 0xa3, 0xb4, 0xb8, + 0xd8, 0xc8, 0xde, 0x97, 0xb7, 0x7f, 0x50, 0x1a, 0x4b, 0x7c, 0xf9, 0xc1, 0x9c, 0x53, 0xbc, 0x44, + 0x70, 0x56, 0xdc, 0xf2, 0xd4, 0x7e, 0x2c, 0xe1, 0x80, 0x7c, 0x10, 0x40, 0x58, 0x01, 0x83, 0x9d, + 0xdb, 0xbf, 0xb3, 0x18, 0x90, 0x2f, 0x40, 0xd7, 0x56, 0x41, 0x93, 0xfc, 0x9a, 0x60, 0x3d, 0x87, + 0xfc, 0x2b, 0xde, 0xef, 0x85, 0x02, 0x41, 0xa6, 0xf6, 0x9c, 0x24, 0x9d, 0xb8, 0xd6, 0x85, 0x73, + 0xaf, 0xd2, 0x15, 0x71, 0xe1, 0x4e, 0xda, 0xb8, 0xc3, 0xed, 0xe6, 0x49, 0x32, 0x45, 0x68, 0x6f, + 0xf4, 0x59, 0x23, 0x90, 0x2c, 0x1f, 0xd9, 0xda, 0x18, 0x27, 0x97, 0x7a, 0xe3, 0xe1, 0x05, 0x95, + 0x0f, 0x20, 0x4e, 0xad, 0xa0, 0x8d, 0x0c, 0x49, 0xbd, 0x32, 0x0f, 0x8a, 0x1f, 0x97, 0xa1, 0x5d, + 0xf8, 0x85, 0x22, 0x91, 0x0f, 0x36, 0x50, 0x92, 0x97, 0x3d, 0xb8, 0xe5, 0xad, 0xd3, 0x07, 0x51, + 0x12, 0x88, 0x58, 0xd6, 0xde, 0x9d, 0xc8, 0xce, 0x27, 0xbf, 0x23, 0xed, 0xc8, 0xaf, 0x56, 0x79, + 0x34, 0x57, 0x3d, 0x18, 0x1d, 0x25, 0x95, 0x80, 0xaa, 0x73, 0x8a, 0xe1, 0x9f, 0xf7, 0xac, 0x8d, + 0xa4, 0x7d, 0x6e, 0x20, 0x55, 0x06, 0x91, 0xdf, 0x08, 0xa4, 0x20, 0xb1, 0xf9, 0x6b, 0x93, 0x50, + 0x50, 0x67, 0x5a, 0xbc, 0xa0, 0x30, 0x3c, 0x23, 0x5f, 0x7d, 0xdd, 0x52, 0x20, 0xd6, 0x8f, 0x62, + 0xe4, 0x29, 0x2e, 0x9d, 0x3c, 0xc7, 0xcc, 0x83, 0x98, 0x8e, 0xd0, 0xad, 0xf9, 0x26, 0x7b, 0x2d, + 0xa2, 0xf3, 0x69, 0x93, 0xbd, 0xa9, 0x77, 0x26, 0x59, 0xb7, 0xb2, 0x40, 0xa6, 0x59, 0x8e, 0x3e, + 0x1b, 0x14, 0x2b, 0x6e, 0xb5, 0x5a, 0xfc, 0x16, 0xc6, 0x02, 0xe5, 0xda, 0x51, 0x24, 0xb2, 0x41, + 0x26, 0x81, 0x64, 0x7e, 0x40, 0x65, 0xe8, 0x9d, 0xfb, 0xfa, 0x6a, 0xcf, 0xd3, 0x8b, 0x93, 0xcd, + 0x3d, 0xfa, 0xd6, 0xa9, 0xd6, 0xfd, 0xc9, 0xf3, 0x70, 0x03, 0x17, 0x0a, 0x85, 0x3d, 0x29, 0xef, + 0x75, 0x7f, 0x99, 0xbc, 0xcc, 0x4f, 0xfa, 0xc4, 0xca, 0x66, 0xc5, 0xe3, 0xe2, 0xa1, 0xb8, 0x30, + 0xbd, 0x13, 0xe1, 0x3e, 0xa1, 0x21, 0x83, 0xa5, 0x1c, 0x49, 0x03, 0xbc, 0x91, 0x19, 0xb3, 0x4a, + 0xb1, 0x8c, 0xe7, 0xa3, 0x89, 0xd6, 0x73, 0x6c, 0xfc, 0xe0, 0xa6, 0x6c, 0x27, 0x65, 0x47, 0x0e, + 0x35, 0x7c, 0x20, 0x0a, 0xf1, 0x02, 0xdd, 0x68, 0x9d, 0x4b, 0xf3, 0x7c, 0xcd, 0xc5, 0x52, 0x20, + 0x7b, 0x61, 0xd1, 0xde, 0x08, 0xf9, 0x9a, 0x02, 0x14, 0xbe, 0xea, 0xfd, 0x3e, 0x17, 0x07, 0xd6, + 0x68, 0x9c, 0x32, 0x9a, 0x19, 0x38, 0xa6, 0x59, 0x6f, 0xd8, 0xc4, 0x79, 0x55, 0x50, 0x55, 0xbd, + 0xf6, 0x47, 0x57, 0xee, 0x83, 0x12, 0xbb, 0xf7, 0xca, 0x01, 0x02, 0x31, 0x76, 0xb3, 0xc3, 0x2e, + 0x18, 0xe2, 0x50, 0xaa, 0xae, 0x4a, 0x5e, 0x1d, 0x1d, 0xfd, 0x58, 0x4b, 0x45, 0x95, 0xbc, 0xcd, + 0x18, 0xf5, 0x83, 0xec, 0x8d, 0x1a, 0x64, 0xb3, 0x25, 0xbf, 0xd5, 0x03, 0xfd, 0xdb, 0x36, 0x95, + 0x07, 0x81, 0x70, 0x3a, 0x63, 0x9b, 0x8d, 0x88, 0x8f, 0x02, 0xb0, 0x0b, 0x34, 0x69, 0x28, 0x46, + 0x53, 0x36, 0xad, 0xf6, 0x7b, 0x87, 0x43, 0xc6, 0xbc, 0xf9, 0x5d, 0x99, 0x69, 0x50, 0x85, 0x37, + 0xf3, 0x56, 0x9d, 0x42, 0xf0, 0xa5, 0x03, 0xd8, 0x36, 0x6c, 0xee, 0x56, 0xcd, 0x6c, 0x40, 0x75, + 0x62, 0xb7, 0x5a, 0xe2, 0x4b, 0x1c, 0xfd, 0xa9, 0x38, 0x35, 0xf5, 0xf0, 0x8f, 0x54, 0x8a, 0xff, + 0x4b, 0x5d, 0xc7, 0x63, 0x5d, 0x79, 0x6d, 0x24, 0xae, 0x71, 0x1b, 0xb3, 0xda, 0x79, 0x81, 0x4b, + 0xb5, 0xb7, 0xa1, 0xa3, 0x8a, 0x68, 0x11, 0x36, 0x89, 0x50, 0x17, 0x1d, 0xe8, 0xa5, 0x20, 0x0d, + 0xfb, 0xda, 0x51, 0xc7, 0x06, 0x6b, 0x67, 0xcb, 0xbb, 0x85, 0x96, 0x60, 0x20, 0x28, 0xc4, 0x16, + 0x5e, 0x6f, 0x8b, 0xc2, 0x6d, 0xb5, 0xa5, 0x93, 0x40, 0x41, 0xa0, 0xb8, 0x66, 0x07, 0x0d, 0x8d, + 0x65, 0x28, 0x0e, 0x61, 0x19, 0xcc, 0x1e, 0x6d, 0xd2, 0xdc, 0x9b, 0x31, 0x80, 0x3b, 0xf8, 0x31, + 0x67, 0x27, 0x76, 0x99, 0x71, 0x51, 0x45, 0xd4, 0xb9, 0xf2, 0x92, 0xf4, 0x6a, 0xe8, 0x53, 0x4a, + 0xd2, 0x14, 0x19, 0xa9, 0x01, 0xa9, 0x89, 0x2f, 0xf6, 0x19, 0x82, 0x70, 0xaa, 0x92, 0x69, 0x7b, + 0xf1, 0xa7, 0x3e, 0xab, 0x94, 0x92, 0x60, 0xe4, 0x8f, 0x65, 0xfe, 0x1e, 0x0e, 0xe1, 0x62, 0x70, + 0x24, 0x22, 0x97, 0xfe, 0x43, 0x16, 0x85, 0xd6, 0x13, 0x0b, 0xb9, 0x8c, 0xe8, 0x0d, 0x17, 0xcc, + 0x19, 0xe2, 0xf3, 0xe6, 0xe6, 0xe6, 0x5f, 0x2d, 0x1d, 0xc1, 0x2f, 0x9b, 0x39, 0x6b, 0x48, 0x84, + 0xf2, 0xb9, 0x22, 0x22, 0x66, 0x5b, 0x89, 0xd8, 0x2c, 0x8b, 0x3e, 0xff, 0x60, 0xa1, 0xb0, 0xf0, + 0xb9, 0x86, 0x42, 0x89, 0x18, 0x6b, 0x1d, 0xba, 0xec, 0xc3, 0x37, 0x5b, 0x2b, 0xce, 0x98, 0x74, + 0x7a, 0xdd, 0x4d, 0xe0, 0xee, 0x19, 0xbe, 0x5e, 0xc2, 0x5b, 0x3e, 0x6b, 0xc4, 0x2c, 0x02, 0xf6, + 0x5b, 0x5e, 0x72, 0xf3, 0xa2, 0xba, 0x80, 0x2f, 0xd1, 0x01, 0x8e, 0x45, 0x56, 0x8a, 0x9a, 0x22, + 0x21, 0x97, 0x27, 0xcc, 0x06, 0xe4, 0x07, 0x09, 0x4c, 0xd1, 0xa3, 0xfd, 0x76, 0xc7, 0xfd, 0x1d, + 0x26, 0xb5, 0x90, 0xb1, 0xef, 0x9b, 0x8d, 0x13, 0x0f, 0x3c, 0x73, 0x8c, 0x6d, 0x29, 0x6e, 0xaa, + 0x62, 0x5a, 0x4e, 0x2b, 0xba, 0xf3, 0x1e, 0xf2, 0x90, 0x71, 0x51, 0x4a, 0xb0, 0xc7, 0x36, 0x65, + 0x91, 0xc7, 0x6d, 0xca, 0xa4, 0xf9, 0x28, 0x63, 0xcb, 0x98, 0x3d, 0x63, 0x9d, 0xbb, 0xc9, 0x45, + 0x16, 0xcd, 0x06, 0x51, 0x7f, 0x91, 0x08, 0x0c, 0xcd, 0x40, 0x6d, 0xf2, 0xfa, 0x2d, 0xea, 0x5b, + 0x01, 0xa5, 0x58, 0x03, 0x9f, 0x3c, 0x2f, 0x6c, 0xe0, 0xb9, 0x64, 0x48, 0x48, 0x00, 0x11, 0xad, + 0xdc, 0xd6, 0xbe, 0xae, 0xcf, 0x7b, 0xdc, 0x6d, 0x99, 0x22, 0xe4, 0xdf, 0x8e, 0xde, 0xc9, 0xb4, + 0xe2, 0x46, 0xff, 0x4c, 0xc9, 0xde, 0x13, 0xe7, 0x6d, 0xa3, 0xbb, 0x04, 0x47, 0xa2, 0x06, 0xa8, + 0xc5, 0xc1, 0xc5, 0x97, 0xdc, 0x1a, 0xfb, 0x19, 0x8a, 0x6c, 0xd1, 0x86, 0x53, 0x73, 0x9f, 0xee, + 0xed, 0xfe, 0xf2, 0x2f, 0xb5, 0x76, 0xed, 0x3e, 0x72, 0xba, 0x7e, 0x59, 0x2a, 0x72, 0xd6, 0xac, + 0x43, 0xe8, 0x4a, 0x65, 0x84, 0x8e, 0x14, 0xde, 0xd9, 0xdd, 0x9c, 0x96, 0x8e, 0x70, 0x50, 0xe1, + 0x65, 0x62, 0x26, 0x94, 0x12, 0x0d, 0x17, 0x1a, 0xeb, 0x8b, 0x51, 0xc0, 0xee, 0xfe, 0x7a, 0x43, + 0x78, 0x9d, 0x5a, 0xd6, 0x6e, 0x3a, 0xaa, 0xe5, 0x81, 0x8d, 0xf0, 0x6b, 0x37, 0xab, 0xb1, 0x33, + 0x32, 0x06, 0x36, 0x61, 0xdc, 0xb2, 0x30, 0x9b, 0x1c, 0xa3, 0x4c, 0x0d, 0x34, 0x9a, 0x2f, 0x32, + 0xe3, 0xf7, 0x8f, 0x70, 0x92, 0x29, 0x79, 0x12, 0x84, 0x29, 0x86, 0x5f, 0x53, 0xb1, 0xe5, 0x4c, + 0x19, 0x57, 0x0f, 0x81, 0xc7, 0x4b, 0xab, 0x21, 0x42, 0xae, 0x10, 0x1a, 0xcb, 0x16, 0x5a, 0x09, + 0xb3, 0xb5, 0x48, 0x1d, 0xae, 0xcd, 0x55, 0xc9, 0x89, 0x19, 0x77, 0x2e, 0x84, 0xa8, 0x7a, 0xcd, + 0xdf, 0x63, 0x09, 0xe7, 0xf9, 0xcf, 0x8f, 0x15, 0x94, 0xc9, 0xb4, 0x00, 0x14, 0xc4, 0xb3, 0xe9, + 0x9b, 0xa2, 0xea, 0x88, 0x03, 0x11, 0xdf, 0xbb, 0xeb, 0x29, 0xc0, 0x46, 0xbb, 0x81, 0xf8, 0x5e, + 0x67, 0xdc, 0x8a, 0xdd, 0xe6, 0xcf, 0x31, 0x5e, 0xbe, 0xbe, 0xe1, 0x57, 0xce, 0xdb, 0x69, 0x9e, + 0x0d, 0x8b, 0xc7, 0x64, 0xd7, 0x32, 0x0a, 0xea, 0x9a, 0x52, 0xb6, 0x79, 0x75, 0x6c, 0x2a, 0xc3, + 0x77, 0x0e, 0xef, 0xad, 0x3d, 0x11, 0x09, 0x42, 0xee, 0xcc, 0xab, 0x90, 0xeb, 0x16, 0xbe, 0x2f, + 0xca, 0x2b, 0x49, 0x82, 0x44, 0x27, 0x4a, 0x19, 0xe8, 0xb9, 0xd7, 0xfd, 0xb0, 0x38, 0xd3, 0xc6, + 0x94, 0x39, 0xb7, 0x0f, 0x4a, 0xcb, 0xd8, 0x51, 0x01, 0xf0, 0x04, 0x7f, 0xf4, 0x1f, 0x23, 0x12, + 0x91, 0x92, 0xa1, 0xed, 0x90, 0xa3, 0x3d, 0x52, 0x69, 0x08, 0xab, 0x3f, 0x0f, 0x24, 0x49, 0x18, + 0x97, 0x53, 0x91, 0xfb, 0xaa, 0x06, 0x20, 0xb4, 0x44, 0x91, 0x87, 0xe3, 0xc5, 0x1d, 0xc5, 0xbc, + 0xa2, 0x7b, 0x61, 0x12, 0xd4, 0x2f, 0x67, 0x4c, 0x5e, 0x17, 0xf1, 0x70, 0x09, 0xc0, 0x91, 0xd6, + 0x3c, 0x1a, 0x84, 0xa0, 0x81, 0x50, 0x01, 0x2f, 0x63, 0x8a, 0xcb, 0x27, 0xda, 0x67, 0x96, 0x6e, + 0x6c, 0x04, 0x6c, 0x16, 0xf1, 0x19, 0x1c, 0x97, 0x0f, 0xee, 0xd4, 0x34, 0xe3, 0x76, 0xd4, 0x29, + 0x00, 0xb4, 0xb7, 0x60, 0x07, 0x72, 0x8c, 0xb6, 0x42, 0xac, 0xf1, 0x6c, 0xaa, 0xc5, 0x4e, 0x6f, + 0x83, 0xef, 0xc2, 0x3c, 0xc4, 0x13, 0xe9, 0xfe, 0x7d, 0x1a, 0x1f, 0xb1, 0x07, 0x2b, 0xbe, 0x2a, + 0x6b, 0x51, 0x29, 0x6b, 0xb0, 0x44, 0xa3, 0x7f, 0x02, 0xc1, 0x2f, 0xf4, 0xee, 0x6e, 0xf6, 0x0c, + 0x91, 0xbe, 0xba, 0xd2, 0xce, 0x88, 0x33, 0xe7, 0xf0, 0xb9, 0x80, 0x63, 0xb7, 0x6e, 0xa6, 0xb4, + 0xa3, 0x4a, 0x5c, 0x72, 0xdd, 0x56, 0xdc, 0xa2, 0xaf, 0x0f, 0xaa, 0x84, 0x6b, 0x9f, 0xd9, 0x5b, + 0x33, 0x8e, 0x9f, 0x2c, 0x51, 0x2f, 0xe8, 0x96, 0x57, 0x02, 0x3e, 0xe0, 0x77, 0xd7, 0x10, 0xd9, + 0x33, 0xf8, 0x58, 0xe0, 0xec, 0x75, 0xbd, 0xfb, 0x2f, 0xe2, 0x9b, 0x39, 0x48, 0xea, 0x14, 0xdc, + 0xdc, 0xd3, 0x15, 0x5c, 0x09, 0x85, 0xa7, 0x5d, 0x26, 0x6c, 0x89, 0xea, 0x59, 0xbc, 0x4f, 0x6a, + 0x5b, 0x2d, 0xde, 0x1d, 0x67, 0x83, 0x5f, 0x6b, 0xc9, 0xe8, 0x0d, 0x02, 0xf9, 0x71, 0x1b, 0x1b, + 0xe3, 0x07, 0xe6, 0xd3, 0xee, 0x41, 0xb2, 0x78, 0xba, 0xbd, 0x74, 0xd1, 0x36, 0x15, 0xac, 0x62, + 0xd6, 0xab, 0x74, 0xa6, 0xb4, 0x8a, 0x90, 0xd3, 0x1a, 0x66, 0x50, 0x3d, 0x0a, 0xe9, 0x07, 0xc7, + 0x29, 0xab, 0xaf, 0x4d, 0xda, 0x9a, 0x1e, 0xbf, 0x50, 0x5e, 0x8a, 0x8c, 0x1a, 0x90, 0xb9, 0x13, + 0x5d, 0x08, 0xc6, 0x48, 0x0f, 0xa0, 0xea, 0xf5, 0x11, 0xe7, 0xf9, 0x11, 0xa8, 0x6a, 0x24, 0xfa, + 0xcd, 0xf3, 0x6b, 0xf3, 0x71, 0xcb, 0x6b, 0x2a, 0x62, 0x53, 0x5d, 0x3a, 0xf2, 0xef, 0x10, 0x09, + 0xfc, 0xa5, 0x5c, 0xbd, 0xca, 0x3d, 0x4b, 0x78, 0xfb, 0x7c, 0xd7, 0xa6, 0x10, 0x9d, 0x79, 0x07, + 0x99, 0x7d, 0xc9, 0xad, 0x69, 0xb4, 0x13, 0xee, 0x07, 0x5d, 0xad, 0x52, 0x17, 0x4d, 0x06, 0xfe, + 0xba, 0x6f, 0x7b, 0xd7, 0x68, 0x6f, 0x10, 0x14, 0xc8, 0x00, 0x03, 0x53, 0x64, 0xd6, 0x6b, 0xa8, + 0x4c, 0xf0, 0x0a, 0xfe, 0x0c, 0x24, 0xcf, 0x74, 0x94, 0xa7, 0xf1, 0x0f, 0x3e, 0x30, 0x41, 0x23, + 0xb3, 0x29, 0x47, 0x02, 0x11, 0xf5, 0x4a, 0x25, 0x58, 0x29, 0xdd, 0x13, 0x93, 0x5d, 0x44, 0x3c, + 0x0a, 0x3a, 0xf2, 0x5d, 0x96, 0x13, 0x23, 0x49, 0x5c, 0x97, 0x57, 0x57, 0xaf, 0x05, 0x71, 0x8c, + 0x04, 0x65, 0x68, 0x21, 0x19, 0x8f, 0xb4, 0xf1, 0x7c, 0x61, 0xf3, 0xa5, 0x74, 0xd4, 0x39, 0x7e, + 0xbf, 0xe5, 0x1b, 0xac, 0x84, 0xc5, 0xcc, 0xdc, 0xc1, 0xa3, 0xaf, 0xc9, 0x9a, 0xf6, 0x4f, 0x9e, + 0xab, 0x3b, 0xd2, 0x1a, 0x0c, 0xa1, 0x3d, 0x86, 0x3b, 0xb9, 0x1c, 0x4e, 0xb7, 0x04, 0x95, 0xfa, + 0x52, 0x78, 0xfe, 0x0e, 0x26, 0x9b, 0xd9, 0x3c, 0x48, 0xb2, 0xd7, 0x99, 0x55, 0x92, 0xa7, 0xf2, + 0x8f, 0xc7, 0x63, 0xfc, 0x38, 0xe2, 0xf3, 0x4a, 0x89, 0xd1, 0xb9, 0x90, 0xca, 0xe7, 0xbc, 0xac, + 0xc3, 0xf6, 0xca, 0x79, 0xd8, 0x37, 0x01, 0x89, 0x6d, 0x4b, 0x37, 0xa7, 0xad, 0x2d, 0x04, 0xe1, + 0x8b, 0x0a, 0x3c, 0x58, 0x31, 0x8b, 0x8c, 0xcb, 0x1a, 0x39, 0x1f, 0x59, 0x7c, 0x21, 0x52, 0x50, + 0x1e, 0x36, 0x9a, 0xd9, 0xa5, 0x44, 0xc4, 0xa5, 0x31, 0x9c, 0x30, 0xda, 0x48, 0xfd, 0xf4, 0x96, + 0xa3, 0x9e, 0xfb, 0xf3, 0x31, 0xcc, 0x73, 0xd7, 0xfd, 0x96, 0x5d, 0x6b, 0x72, 0x13, 0x84, 0x15, + 0x4b, 0x43, 0xe3, 0xa7, 0xef, 0xeb, 0x13, 0xad, 0x32, 0xc7, 0x88, 0x1e, 0x77, 0x76, 0xd6, 0xde, + 0x74, 0x4b, 0x14, 0x70, 0x08, 0x62, 0xe6, 0xee, 0xf1, 0xac, 0xdc, 0x3c, 0x7e, 0x2e, 0x59, 0x79, + 0xa4, 0x8b, 0x3e, 0xb1, 0x6c, 0x3e, 0x9c, 0x09, 0xa1, 0xb7, 0x6b, 0x8a, 0x7a, 0x53, 0x3b, 0x7b, + 0x81, 0xd0, 0x18, 0x90, 0xfe, 0x49, 0xda, 0xdf, 0x67, 0xc0, 0x6c, 0xfb, 0x93, 0x47, 0x28, 0xe4, + 0x33, 0x0c, 0x54, 0x3c, 0xba, 0x2d, 0xf7, 0xc5, 0x82, 0x53, 0x9b, 0x42, 0x76, 0xc5, 0xb6, 0x10, + 0x5e, 0xc5, 0x01, 0x90, 0x61, 0x5b, 0x61, 0xdb, 0x2d, 0xed, 0x59, 0xb2, 0x76, 0xe2, 0xac, 0x20, + 0x30, 0x04, 0xb4, 0x7c, 0xc5, 0xb0, 0xde, 0x9e, 0x8e, 0x00, 0x00, 0xea, 0xd5, 0x75, 0x87, 0xb1, + 0x4b, 0xe9, 0xdc, 0x8c, 0xa1, 0x42, 0xeb, 0x97, 0x87, 0xb8, 0x1e, 0x0b, 0x67, 0x54, 0x3c, 0xcf, + 0xc4, 0xa6, 0x6c, 0x21, 0x4a, 0x73, 0xfd, 0x2b, 0xbd, 0x78, 0x48, 0xeb, 0x95, 0x33, 0xe2, 0x7f, + 0x99, 0x88, 0xe4, 0x29, 0xf8, 0x43, 0x6e, 0xeb, 0xfb, 0x61, 0xc7, 0xdd, 0x23, 0x4c, 0xa0, 0xc1, + 0x01, 0xf6, 0xb7, 0x4a, 0x80, 0x13, 0x53, 0x2e, 0xfd, 0xa8, 0xd7, 0x25, 0x57, 0xcc, 0xe2, 0x55, + 0x5e, 0xf4, 0x61, 0x7b, 0xb7, 0x71, 0x4e, 0xab, 0xbb, 0x30, 0x95, 0x8c, 0x1a, 0xee, 0xf8, 0xb8, + 0x47, 0x9c, 0xd0, 0xdf, 0x06, 0x9d, 0x18, 0xd0, 0x59, 0x37, 0x65, 0x0c, 0x51, 0xcf, 0xd1, 0xed, + 0x97, 0xa3, 0x1e, 0xb9, 0x10, 0x8d, 0x18, 0x8b, 0x88, 0x46, 0xa1, 0x4e, 0x07, 0xd4, 0x96, 0xa1, + 0xa4, 0x91, 0x69, 0x87, 0xd3, 0xae, 0xbf, 0xf2, 0x40, 0xc7, 0xfc, 0xb0, 0xe3, 0xf0, 0x0c, 0x7e, + 0xdb, 0x88, 0x24, 0x66, 0x7a, 0x9a, 0x74, 0x4f, 0xd6, 0xcf, 0xcc, 0xde, 0x99, 0xe2, 0x2a, 0xb8, + 0x5c, 0x3d, 0x66, 0x4d, 0x68, 0x5a, 0x66, 0x26, 0x98, 0xb9, 0x33, 0xde, 0x36, 0xeb, 0xfe, 0xee, + 0xf9, 0xc4, 0x97, 0x80, 0x8f, 0x8d, 0x5f, 0x3f, 0x24, 0xd4, 0x96, 0xe7, 0x79, 0xd9, 0x8a, 0x81, + 0x29, 0x29, 0x90, 0x54, 0xba, 0x93, 0x5f, 0x48, 0x6e, 0x4c, 0x56, 0x96, 0x5b, 0x1d, 0x25, 0xd1, + 0x04, 0x42, 0x12, 0x19, 0x43, 0xd1, 0xf6, 0x19, 0xcc, 0xfc, 0x0f, 0x54, 0x5a, 0xbb, 0x38, 0x7c, + 0x61, 0x31, 0x62, 0x3f, 0x20, 0x12, 0x33, 0xb9, 0xe2, 0xdc, 0x4e, 0x21, 0xee, 0x1f, 0xdc, 0x67, + 0xb5, 0x2c, 0x7b, 0x12, 0x67, 0xa8, 0x9b, 0xb2, 0x1f, 0x0f, 0x14, 0x7a, 0xab, 0x41, 0x66, 0x98, + 0x90, 0xec, 0x17, 0xea, 0xe7, 0xa3, 0x3d, 0x18, 0x37, 0xfd, 0x96, 0xc5, 0xb4, 0x1b, 0xa6, 0x03, + 0x07, 0xc4, 0x62, 0xb0, 0xb7, 0x25, 0x46, 0x31, 0xf0, 0xf8, 0xbb, 0xf8, 0x0b, 0x5d, 0x7e, 0xb0, + 0x65, 0x80, 0x7e, 0x57, 0xff, 0x12, 0xf3, 0xf3, 0x4a, 0x2f, 0xb0, 0xc2, 0x89, 0xb6, 0xca, 0xe6, + 0x2b, 0xc6, 0xe9, 0xb1, 0x1e, 0xcc, 0xf5, 0x18, 0x99, 0x73, 0x11, 0x59, 0x95, 0xc2, 0x22, 0xb6, + 0x2d, 0x01, 0x4d, 0x87, 0x8a, 0x3c, 0x4f, 0x7c, 0x77, 0x16, 0xd2, 0xae, 0x23, 0xc1, 0xfb, 0x0e, + 0xd9, 0xc7, 0x50, 0x3b, 0x31, 0xcd, 0x19, 0xd8, 0x55, 0x6c, 0xf8, 0xc8, 0x7d, 0x59, 0x76, 0x2f, + 0x9f, 0x1b, 0x7f, 0x25, 0x20, 0xef, 0x77, 0xcc, 0xf8, 0x9f, 0x52, 0xb7, 0x80, 0xb2, 0xfa, 0x47, + 0x9f, 0x86, 0x8c, 0x90, 0xf2, 0xcb, 0xfa, 0x31, 0xa0, 0xba, 0xcd, 0x40, 0xc4, 0xf1, 0xe4, 0xee, + 0x78, 0x6c, 0x04, 0xc0, 0x3a, 0x2c, 0x4e, 0xc9, 0x72, 0x45, 0x06, 0xdb, 0x69, 0x80, 0x34, 0xac, + 0xb5, 0x67, 0xb5, 0x0b, 0xb2, 0x15, 0x31, 0x2c, 0x5c, 0x06, 0x55, 0x3c, 0xcb, 0x67, 0x2a, 0x1c, + 0x73, 0x30, 0xbe, 0x5b, 0x22, 0x68, 0xc0, 0xac, 0x4f, 0x7b, 0x1a, 0x04, 0xe6, 0x68, 0x0f, 0xbc, + 0x2a, 0x10, 0xa0, 0x8f, 0x9d, 0xdf, 0x5b, 0x81, 0x84, 0xab, 0x12, 0x8a, 0x78, 0xa7, 0x53, 0x6c, + 0x3e, 0x85, 0xed, 0x91, 0x08, 0xbf, 0xb7, 0xd7, 0xf7, 0xbb, 0x15, 0xb8, 0x0f, 0x48, 0x4b, 0xe4, + 0x80, 0x5d, 0x99, 0xa2, 0xac, 0xec, 0x24, 0x86, 0xc2, 0x99, 0x7b, 0x2b, 0xc9, 0x79, 0xb4, 0xbb, + 0x24, 0xbe, 0x54, 0x23, 0x63, 0xcc, 0xa6, 0x51, 0x37, 0x70, 0x19, 0x4d, 0x9b, 0x8d, 0x03, 0xf8, + 0x6b, 0xf8, 0x78, 0x02, 0xdd, 0x2a, 0xbc, 0x4c, 0x2b, 0x05, 0x86, 0x89, 0x64, 0x47, 0x94, 0x90, + 0x74, 0x90, 0xc5, 0x6e, 0xcc, 0x39, 0x20, 0x7f, 0x91, 0x80, 0x8d, 0x0d, 0x35, 0x97, 0x03, 0x59, + 0xc1, 0x9a, 0x05, 0xe0, 0x37, 0x11, 0x4c, 0x54, 0x41, 0x39, 0x03, 0x75, 0xba, 0x06, 0x3e, 0x3d, + 0x67, 0x1e, 0x70, 0x9c, 0x63, 0x21, 0x55, 0x44, 0x9f, 0x70, 0xb7, 0x4f, 0x58, 0x4a, 0x14, 0xa9, + 0xc6, 0xf6, 0xeb, 0xde, 0xd9, 0x5b, 0x5e, 0xa4, 0x72, 0xce, 0x5e, 0xfc, 0x4d, 0x61, 0xe9, 0x46, + 0xbd, 0x22, 0x0c, 0xcf, 0xc6, 0x3b, 0x2b, 0x18, 0x9c, 0x28, 0xa4, 0x3e, 0xab, 0x2c, 0xff, 0x2f, + 0xc1, 0xef, 0x55, 0x3a, 0x82, 0x08, 0x0c, 0x97, 0xd9, 0x2a, 0x52, 0xa7, 0x99, 0xaa, 0x00, 0xe1, + 0xc1, 0xaa, 0x53, 0x32, 0xb5, 0xe9, 0xf8, 0x22, 0xa7, 0x15, 0x14, 0xcc, 0xf7, 0xc3, 0x02, 0xa6, + 0xe1, 0x76, 0x54, 0x50, 0xbf, 0xe7, 0xae, 0xfd, 0xc9, 0x3b, 0xcb, 0x5e, 0x40, 0xea, 0x54, 0x54, + 0x94, 0x40, 0x68, 0xe1, 0xc4, 0x9d, 0xff, 0x8e, 0xcf, 0x21, 0xae, 0xaf, 0xa8, 0xc0, 0x4d, 0x1d, + 0x3b, 0xf3, 0xd4, 0x24, 0x28, 0xb8, 0xd8, 0x98, 0x75, 0x7a, 0x26, 0x92, 0xd0, 0x76, 0x3a, 0x02, + 0x20, 0xba, 0x42, 0x86, 0x86, 0xc3, 0x7b, 0xd4, 0xe6, 0x66, 0x2e, 0x64, 0xe4, 0x33, 0xbb, 0xb8, + 0x81, 0x71, 0xeb, 0xa4, 0xbf, 0x04, 0x4e, 0x96, 0x2c, 0x95, 0x05, 0x8c, 0x61, 0xb2, 0xe0, 0x21, + 0x46, 0x64, 0x2b, 0xc7, 0x9d, 0xca, 0x33, 0x1e, 0xad, 0x1f, 0xdc, 0x44, 0x73, 0x9c, 0x96, 0x0c, + 0x27, 0xbc, 0x92, 0x49, 0x0e, 0x26, 0x08, 0xdc, 0xe9, 0xf2, 0xab, 0x46, 0x75, 0x6d, 0x9b, 0xa3, + 0x00, 0x26, 0x2d, 0x27, 0x69, 0x07, 0xce, 0x52, 0x54, 0xf6, 0xf1, 0x8c, 0xff, 0x97, 0xa3, 0x5e, + 0x23, 0x40, 0x00, 0x78, 0x2c, 0x4f, 0xc0, 0x04, 0x02, 0xf9, 0xec, 0x43, 0xc9, 0x1f, 0xf0, 0xa8, + 0xb7, 0xfc, 0x3f, 0x9e, 0x4c, 0x08, 0x49, 0xb6, 0x98, 0xa6, 0x59, 0x6d, 0xa3, 0x77, 0xef, 0x62, + 0xf3, 0x9c, 0xa4, 0x63, 0x06, 0x28, 0xdb, 0xac, 0xf5, 0x77, 0x64, 0x6e, 0x2c, 0x26, 0x11, 0x81, + 0x3a, 0xeb, 0x2d, 0x98, 0xd5, 0x46, 0xad, 0x1f, 0x1a, 0xe0, 0xdf, 0x3a, 0x4a, 0x44, 0x76, 0x48, + 0xda, 0x41, 0xf9, 0xd3, 0xef, 0x7f, 0xa5, 0xfa, 0xaf, 0x39, 0xba, 0x08, 0x9c, 0x87, 0xdd, 0xcf, + 0x14, 0xc4, 0x17, 0x2b, 0xe7, 0x4e, 0xa1, 0x7e, 0x15, 0x91, 0xb6, 0x1e, 0x2f, 0xb9, 0x98, 0x93, + 0x92, 0x6a, 0xb3, 0x81, 0x5a, 0x9d, 0x7a, 0x59, 0x32, 0x26, 0xe0, 0xd8, 0x7c, 0x13, 0x71, 0x77, + 0xaa, 0x32, 0x0d, 0x64, 0xa1, 0x88, 0xd0, 0x4e, 0x35, 0xf2, 0xa0, 0x44, 0xec, 0x0a, 0x7c, 0xb2, + 0xf1, 0xb6, 0x86, 0x20, 0x3a, 0xd9, 0xc0, 0xc6, 0x20, 0x2e, 0x54, 0x61, 0x38, 0xa7, 0x7c, 0xb5, + 0xb5, 0x34, 0xa8, 0xf0, 0x9c, 0xb9, 0x47, 0x3b, 0x69, 0xb9, 0x38, 0x05, 0xc0, 0x16, 0x05, 0x83, + 0xc6, 0x9f, 0xe4, 0xb7, 0x05, 0x5a, 0x85, 0x3b, 0xad, 0xeb, 0x66, 0x16, 0x73, 0x03, 0xb7, 0xb6, + 0xc1, 0x07, 0x29, 0xdb, 0x33, 0x79, 0xe6, 0xbd, 0x13, 0xb1, 0x04, 0xad, 0x90, 0xbd, 0x6c, 0x3a, + 0xb8, 0x6d, 0xbe, 0xc5, 0xda, 0xfc, 0x33, 0xa7, 0x56, 0x99, 0x11, 0x39, 0xd3, 0x2c, 0x03, 0xbc, + 0x0b, 0x12, 0x80, 0x7d, 0x62, 0x09, 0x9c, 0x6f, 0xe8, 0x96, 0xe3, 0xc8, 0x6e, 0x2f, 0x1d, 0x9e, + 0xdb, 0x58, 0x9e, 0x17, 0x50, 0xee, 0x7f, 0xee, 0xd7, 0xd1, 0x44, 0xbb, 0xa1, 0x25, 0x98, 0xb2, + 0x74, 0xe6, 0xc0, 0xa0, 0x80, 0xd4, 0x6c, 0x86, 0xa5, 0xb6, 0x90, 0x80, 0x15, 0xd3, 0x88, 0x7d, + 0x16, 0x47, 0x9f, 0xf3, 0x4e, 0xf7, 0x3d, 0x2b, 0x17, 0x57, 0x35, 0xc2, 0xc5, 0x7c, 0xf1, 0x9a, + 0xb0, 0xf2, 0xb1, 0x91, 0x96, 0xd7, 0xf3, 0x23, 0x9b, 0x41, 0x81, 0x22, 0x85, 0xa1, 0xf0, 0x48, + 0x8c, 0x0e, 0x08, 0x19, 0xcb, 0x7f, 0xcf, 0xb3, 0x6e, 0x40, 0x9a, 0xdf, 0x97, 0xc6, 0x1e, 0xad, + 0xd7, 0x88, 0x63, 0xcc, 0xa6, 0x22, 0x2d, 0xb1, 0x97, 0xbe, 0x55, 0x2e, 0xc3, 0xa3, 0x5d, 0x52, + 0xe7, 0x83, 0x45, 0xb2, 0x5d, 0x81, 0xb3, 0x51, 0x69, 0xa0, 0xea, 0x18, 0x15, 0xe2, 0x7e, 0xcb, + 0xa7, 0xb7, 0xed, 0xb4, 0x99, 0xcf, 0xf9, 0x80, 0xaf, 0x93, 0x08, 0x83, 0x88, 0x46, 0x77, 0x89, + 0x9a, 0x71, 0x8f, 0xd1, 0x00, 0x4b, 0x74, 0xce, 0x14, 0x67, 0xeb, 0x60, 0xf0, 0x82, 0x0b, 0x60, + 0x48, 0x2a, 0x45, 0xb1, 0xa6, 0xf2, 0x36, 0x91, 0x42, 0x67, 0x05, 0x99, 0x27, 0xac, 0xfe, 0xe6, + 0xbf, 0xaf, 0x6e, 0x83, 0xa7, 0xa1, 0xec, 0x28, 0x97, 0x9e, 0x31, 0x8b, 0x83, 0x59, 0xaf, 0x4c, + 0x35, 0x96, 0xc0, 0x99, 0x59, 0x1c, 0x1d, 0x5a, 0xbc, 0xa6, 0x48, 0x28, 0x88, 0xc3, 0x16, 0xb3, + 0x8e, 0xd3, 0x71, 0xdc, 0x9a, 0x4a, 0x6a, 0x36, 0xca, 0x3e, 0x02, 0xe8, 0x78, 0x57, 0x38, 0x48, + 0xca, 0x1f, 0x48, 0x5b, 0x31, 0x76, 0x22, 0x17, 0xf4, 0x8f, 0xd0, 0x22, 0x4e, 0xfe, 0x09, 0x35, + 0x2d, 0x2b, 0x53, 0xc6, 0xf9, 0xc2, 0x85, 0xc1, 0x6f, 0x11, 0x1a, 0xc6, 0xb5, 0xf5, 0x07, 0x30, + 0xae, 0xc7, 0x0a, 0x1a, 0xf6, 0xdd, 0x0a, 0x86, 0x9a, 0xe5, 0x73, 0x6f, 0x3f, 0xe7, 0xe9, 0x96, + 0x02, 0xb1, 0xf6, 0x18, 0x89, 0xc2, 0x69, 0x94, 0x88, 0xae, 0x47, 0xfc, 0xc8, 0xde, 0x81, 0x35, + 0x68, 0xc0, 0x7b, 0x08, 0x7a, 0x9f, 0x3e, 0xa0, 0xc4, 0x06, 0x9d, 0x34, 0xd7, 0x76, 0x0d, 0x6c, + 0x89, 0xdd, 0x3d, 0xfd, 0xc3, 0xbd, 0xe5, 0xd6, 0xf5, 0x10, 0x40, 0x21, 0xff, 0xbd, 0xeb, 0xe7, + 0x8c, 0x3a, 0xca, 0x85, 0x40, 0xf6, 0x3d, 0x02, 0x16, 0x83, 0xca, 0x64, 0x0e, 0x8a, 0x9e, 0x05, + 0x45, 0xf3, 0x1a, 0x0f, 0xa1, 0xe9, 0xf8, 0xc0, 0x76, 0x19, 0xff, 0x16, 0x8a, 0xd2, 0x59, 0x4b, + 0xfc, 0x9b, 0x72, 0x63, 0xfc, 0xd4, 0x7f, 0xb5, 0xf4, 0xab, 0x71, 0x25, 0xf8, 0x16, 0x05, 0xb0, + 0x50, 0x1f, 0x5d, 0x17, 0x87, 0xf1, 0x20, 0xa2, 0x03, 0xa6, 0x41, 0x95, 0xf3, 0x15, 0x89, 0x94, + 0xef, 0x15, 0x24, 0xa5, 0xc5, 0x54, 0x42, 0xbe, 0x21, 0x6b, 0x38, 0x72, 0x88, 0x43, 0xb3, 0xad, + 0xe4, 0xee, 0x4e, 0x0a, 0x47, 0x65, 0xd4, 0x85, 0xdd, 0x6a, 0x66, 0x33, 0x1f, 0x98, 0x4e, 0xb0, + 0x63, 0x27, 0x01, 0xde, 0xd6, 0x1c, 0xa1, 0x89, 0x11, 0x2a, 0xf7, 0x56, 0x6b, 0xfa, 0xb1, 0x68, + 0x76, 0x89, 0x76, 0xf2, 0x95, 0xa2, 0x52, 0x89, 0x15, 0x8b, 0x38, 0x91, 0xfe, 0x17, 0x27, 0x83, + 0x6b, 0xc0, 0x2f, 0xbb, 0x4b, 0x68, 0x0f, 0xa6, 0x82, 0x03, 0x55, 0x71, 0xe1, 0x99, 0x68, 0x51, + 0x0c, 0x7b, 0x59, 0xc3, 0x8a, 0x55, 0x68, 0x27, 0x8a, 0x19, 0x1e, 0xf3, 0x63, 0xde, 0xb9, 0xbf, + 0xcd, 0xc5, 0x0f, 0xd6, 0x63, 0x73, 0x48, 0x52, 0xa5, 0xa2, 0x30, 0x0f, 0x94, 0xeb, 0xbe, 0xa6, + 0x64, 0x75, 0xce, 0xe7, 0x32, 0x24, 0xd3, 0x20, 0x09, 0x31, 0xb7, 0xa3, 0x21, 0x9d, 0x0c, 0xbc, + 0x2c, 0x0b, 0x35, 0xa6, 0x5f, 0x3e, 0x53, 0x46, 0xd4, 0x4f, 0x7e, 0x19, 0x24, 0x10, 0x8b, 0x6c, + 0x78, 0x90, 0x65, 0x2a, 0x05, 0x4d, 0xd6, 0x95, 0x89, 0x4d, 0x02, 0x77, 0x49, 0xdc, 0x75, 0xb8, + 0x1e, 0xd8, 0xd5, 0x47, 0xe3, 0x45, 0x31, 0x6b, 0xac, 0xfb, 0x0f, 0xf5, 0xa8, 0x59, 0x1d, 0x72, + 0xe7, 0x0b, 0x46, 0xc2, 0x13, 0x1b, 0xf2, 0x89, 0x30, 0xde, 0x09, 0x5c, 0x07, 0xc7, 0x78, 0x4a, + 0xae, 0x77, 0x23, 0x1c, 0x71, 0x4b, 0x90, 0x89, 0x20, 0x52, 0x07, 0x6f, 0xa2, 0x99, 0x9e, 0x69, + 0x65, 0x7d, 0x60, 0x86, 0x0c, 0x7f, 0xab, 0xc6, 0x16, 0xbb, 0xf4, 0x4a, 0x9f, 0x49, 0xed, 0x36, + 0x36, 0x15, 0xa4, 0x09, 0x29, 0xce, 0xa6, 0xc3, 0x73, 0x53, 0xa7, 0x41, 0x34, 0x5f, 0xa7, 0xde, + 0x58, 0x65, 0xae, 0x3c, 0xcc, 0x9c, 0x9f, 0xa8, 0x59, 0x6a, 0xc0, 0xd9, 0xfa, 0x62, 0x7d, 0x9c, + 0x78, 0xae, 0xc8, 0xa5, 0xd6, 0x97, 0x9d, 0x5d, 0x14, 0xc8, 0x42, 0x1d, 0xd5, 0x1e, 0xbf, 0x0a, + 0x0d, 0xb0, 0xd4, 0xcf, 0x80, 0x3c, 0xd8, 0x81, 0x99, 0x6c, 0x03, 0xd1, 0x98, 0xc3, 0x93, 0x64, + 0x2d, 0x12, 0x99, 0x8d, 0x98, 0x79, 0xcf, 0xdf, 0x6c, 0xb0, 0x2f, 0x1e, 0xff, 0x11, 0x4f, 0x3c, + 0xd5, 0x10, 0x74, 0x0b, 0x27, 0x0e, 0x97, 0x70, 0x12, 0x8f, 0xfd, 0x52, 0x6a, 0x83, 0x6a, 0x62, + 0xb3, 0x83, 0xe1, 0x3b, 0xc7, 0x66, 0xa0, 0x2a, 0x82, 0x20, 0x6f, 0x37, 0x2b, 0xbb, 0x5b, 0x61, + 0xe6, 0xc5, 0x4e, 0x7d, 0x4d, 0xa2, 0xea, 0x9e, 0x61, 0x83, 0x96, 0x87, 0xdb, 0xbc, 0x6d, 0x31, + 0x70, 0x27, 0x41, 0x57, 0xc9, 0xfa, 0x59, 0x05, 0xe5, 0x63, 0xe4, 0x4c, 0x1b, 0x9a, 0x3f, 0x19, + 0x0f, 0x1b, 0x18, 0xae, 0xb1, 0x22, 0xd8, 0x48, 0xea, 0xb3, 0x35, 0x2a, 0x30, 0x2b, 0xf8, 0x48, + 0x47, 0x73, 0xf4, 0xe1, 0x51, 0xf9, 0x3e, 0x34, 0x86, 0xdb, 0x5e, 0x28, 0x92, 0x12, 0xe7, 0x6a, + 0x3b, 0xe6, 0x8f, 0x0b, 0x0e, 0x6e, 0x82, 0xc7, 0x8b, 0x1a, 0xe0, 0xbe, 0xb4, 0xfc, 0x3f, 0xa2, + 0x07, 0x8e, 0x56, 0x04, 0x0e, 0xc3, 0xe3, 0x21, 0x70, 0x53, 0x54, 0x61, 0xf7, 0xb7, 0xeb, 0xe5, + 0x8d, 0x22, 0x0a, 0xb0, 0xbc, 0x28, 0x23, 0xc5, 0xd9, 0xd5, 0x40, 0xb5, 0x49, 0xd8, 0xb7, 0x29, + 0x36, 0x9a, 0x70, 0xaf, 0xa7, 0x9b, 0xdb, 0x27, 0x97, 0x82, 0x95, 0x6f, 0xa3, 0x25, 0xbe, 0xbd, + 0xe9, 0x51, 0x6c, 0xaf, 0xbf, 0x1c, 0x56, 0x89, 0xce, 0xbe, 0x88, 0x0b, 0xe7, 0x53, 0x05, 0x13, + 0x4c, 0xe3, 0x09, 0xe3, 0x8f, 0xed, 0x00, 0xe8, 0x4b, 0x4e, 0xae, 0xe0, 0xe0, 0x4b, 0x31, 0x94, + 0x5b, 0x19, 0x48, 0x72, 0x52, 0x9d, 0xd7, 0x24, 0x3b, 0x67, 0xfe, 0x43, 0x38, 0xc3, 0xd6, 0x68, + 0x87, 0x94, 0x0d, 0x1f, 0xdd, 0xe2, 0x85, 0xfd, 0xf0, 0xc6, 0x78, 0x5c, 0x63, 0xb1, 0x13, 0x1c, + 0x5f, 0x45, 0x0f, 0x0a, 0xed, 0x69, 0x49, 0x28, 0xe1, 0x9e, 0x7c, 0x58, 0x6d, 0x81, 0x61, 0x2d, + 0x6a, 0xbb, 0x4e, 0xdc, 0x37, 0xb8, 0x25, 0xd2, 0xc7, 0xa4, 0xff, 0x8d, 0xd7, 0x28, 0x39, 0xbb, + 0xcc, 0x5d, 0x63, 0x12, 0x80, 0xf0, 0x81, 0x89, 0xb3, 0x8e, 0xac, 0x71, 0x55, 0xfa, 0x3c, 0xe0, + 0x85, 0xb9, 0xf8, 0x2e, 0x96, 0xc0, 0x95, 0x70, 0xb8, 0x0d, 0xaa, 0x81, 0xfd, 0xe5, 0x28, 0x55, + 0xa5, 0x4e, 0x6f, 0x5d, 0xff, 0x42, 0x69, 0x54, 0x32, 0x57, 0x41, 0xd2, 0x13, 0x0a, 0x68, 0x4a, + 0x4b, 0xea, 0xcf, 0x11, 0xa9, 0xa6, 0xc1, 0x94, 0xf1, 0x66, 0x27, 0xa0, 0x21, 0x58, 0xf1, 0x57, + 0xf9, 0xc9, 0x12, 0x3d, 0x3e, 0x81, 0x79, 0x2c, 0xe4, 0xe6, 0x7f, 0xc2, 0x30, 0x45, 0x42, 0xfe, + 0xbd, 0xd7, 0xc2, 0x40, 0xb4, 0xe2, 0x18, 0x41, 0x17, 0x7d, 0x93, 0x25, 0x91, 0xaf, 0xdb, 0xf0, + 0xbf, 0xc6, 0x23, 0xf5, 0xdf, 0x6b, 0xc9, 0x16, 0x2d, 0x8c, 0x64, 0xf7, 0x20, 0x88, 0xe2, 0xdf, + 0xf2, 0xfd, 0xab, 0x92, 0x06, 0x09, 0xa9, 0xf1, 0x30, 0x96, 0x15, 0xca, 0x1f, 0x17, 0xba, 0x0a, + 0x45, 0x86, 0xe1, 0xcd, 0x8b, 0x3e, 0x04, 0x85, 0x3c, 0x9e, 0x36, 0x23, 0x08, 0x6e, 0x06, 0x7e, + 0xbb, 0x66, 0x46, 0xeb, 0xbe, 0x80, 0xd8, 0xbe, 0xe0, 0x6f, 0xfb, 0x67, 0x87, 0x34, 0xb2, 0x00, + 0xab, 0x0a, 0x3d, 0x54, 0xfc, 0x73, 0xe9, 0x60, 0x4a, 0xd7, 0x22, 0xd2, 0x1c, 0x08, 0x1d, 0xd3, + 0x1d, 0x9b, 0xff, 0x85, 0x11, 0x22, 0xc2, 0xe2, 0x18, 0xdb, 0x3b, 0x9d, 0x0b, 0x06, 0x6b, 0x50, + 0xd9, 0xdb, 0x31, 0x43, 0x9f, 0xd2, 0x76, 0x4d, 0x4f, 0x45, 0x83, 0xe4, 0x95, 0xd9, 0x29, 0x9c, + 0x17, 0xf5, 0xeb, 0xdc, 0xf2, 0x80, 0x92, 0x0f, 0xd2, 0x44, 0xa3, 0x08, 0x00, 0xbc, 0xff, 0x44, + 0x5f, 0xc9, 0x0f, 0x2a, 0xec, 0x6f, 0x88, 0xb5, 0x5f, 0x65, 0xa7, 0xfb, 0x0e, 0x09, 0x44, 0x9d, + 0x6c, 0xb0, 0x8b, 0x3e, 0x8a, 0x36, 0x77, 0xdc, 0xd9, 0x6b, 0x62, 0xe0, 0x7c, 0x00, 0x5c, 0x6c, + 0x27, 0xde, 0x1c, 0x30, 0x3c, 0x95, 0xfa, 0x75, 0x52, 0x79, 0xca, 0xfd, 0x73, 0xcf, 0xea, 0x5d, + 0x6a, 0xe4, 0x79, 0x7c, 0xbd, 0x22, 0x63, 0xcc, 0x03, 0xca, 0xe1, 0x39, 0xbb, 0x61, 0x7e, 0x01, + 0x02, 0xfd, 0xb7, 0x79, 0x5a, 0x63, 0x05, 0x7e, 0xd4, 0x76, 0xb0, 0x4f, 0xbc, 0x76, 0xfe, 0xf7, + 0x71, 0x9f, 0x0f, 0x7d, 0x43, 0xd7, 0xb3, 0x54, 0xaf, 0xce, 0xac, 0xd6, 0xa6, 0xbc, 0xab, 0x38, + 0xd9, 0xf5, 0x27, 0x86, 0x53, 0x07, 0x33, 0x40, 0xd0, 0x5e, 0xf7, 0xd8, 0x23, 0x2e, 0x91, 0x0a, + 0x8f, 0x5f, 0x50, 0xb7, 0xc8, 0xc0, 0x1c, 0x91, 0x3b, 0xf8, 0x63, 0xa4, 0x0a, 0x73, 0xba, 0xf8, + 0x3b, 0xe2, 0xe1, 0xa6, 0x2e, 0x97, 0xa4, 0x7d, 0x39, 0x8a, 0x8c, 0x98, 0xbc, 0x86, 0x69, 0xc6, + 0x4f, 0x84, 0x65, 0x6b, 0x82, 0x04, 0x02, 0xb6, 0x09, 0x31, 0x49, 0x72, 0x24, 0x15, 0x2d, 0x50, + 0x13, 0x81, 0xc1, 0x11, 0xf3, 0x8b, 0xed, 0xf9, 0xaf, 0x22, 0x83, 0x0a, 0x0c, 0x63, 0x1a, 0x8c, + 0xd8, 0xdb, 0x1b, 0xc7, 0x91, 0x9d, 0xa2, 0xc2, 0x34, 0xaf, 0x80, 0x55, 0x73, 0xcf, 0xba, 0x70, + 0x3a, 0xc5, 0x01, 0xe2, 0x62, 0xbf, 0x81, 0xf2, 0x8f, 0x38, 0xc4, 0xbd, 0x52, 0xbb, 0x81, 0xae, + 0x47, 0x6c, 0xcb, 0x11, 0x4d, 0xbe, 0xde, 0x52, 0x12, 0x9b, 0xeb, 0x8d, 0xad, 0x11, 0x5b, 0x08, + 0x66, 0x93, 0x32, 0x63, 0xf5, 0x36, 0x04, 0xc3, 0x54, 0x8d, 0xaa, 0xd4, 0x9e, 0x0e, 0x62, 0x60, + 0xdd, 0x96, 0x90, 0xbe, 0xfb, 0xa4, 0xe1, 0x7b, 0xec, 0x52, 0x81, 0x35, 0xfa, 0x88, 0x75, 0x7e, + 0xa4, 0xef, 0xde, 0x6c, 0xcf, 0x0d, 0x1f, 0x71, 0xed, 0xa0, 0xd5, 0x5b, 0xd7, 0x74, 0x1d, 0x80, + 0x1d, 0x5f, 0x5c, 0xcb, 0xd3, 0xd5, 0xd9, 0x9d, 0xd2, 0x9a, 0x3c, 0x07, 0xec, 0x4f, 0xd2, 0x99, + 0xbb, 0x81, 0x01, 0xae, 0xb9, 0x7f, 0x67, 0x20, 0x4f, 0xe6, 0xfe, 0x33, 0xb8, 0xfb, 0x1b, 0x4f, + 0x9b, 0x56, 0x4f, 0x30, 0x00, 0x85, 0xb5, 0xae, 0x86, 0x8a, 0x3c, 0x73, 0x87, 0x2f, 0x8a, 0x59, + 0x26, 0xfc, 0x49, 0xdf, 0x11, 0x7c, 0x0a, 0x89, 0x7e, 0xdd, 0xa0, 0x8a, 0xe4, 0xe3, 0x46, 0xea, + 0x7c, 0x78, 0x1d, 0x96, 0x43, 0x5a, 0xd0, 0xe1, 0xf5, 0x18, 0x40, 0x53, 0x2f, 0xcd, 0x07, 0xd9, + 0x41, 0xea, 0x5b, 0x62, 0x4f, 0x28, 0x84, 0x9f, 0x30, 0x65, 0xa9, 0x00, 0x41, 0x87, 0x1d, 0x11, + 0x4e, 0x02, 0x8e, 0x75, 0xa6, 0x3d, 0x8f, 0x3a, 0x60, 0x8f, 0xb8, 0x92, 0xa5, 0x47, 0xd1, 0x27, + 0xb3, 0x7d, 0xd7, 0x84, 0x44, 0x22, 0x6c, 0x23, 0x3b, 0x42, 0x53, 0x6f, 0x2d, 0xc1, 0x99, 0x79, + 0x82, 0xaf, 0xae, 0xce, 0x89, 0xe9, 0xd8, 0x5e, 0xad, 0xa7, 0x96, 0xac, 0x88, 0xec, 0x53, 0x1e, + 0x62, 0xb7, 0x0f, 0xf3, 0xfe, 0xaa, 0xfc, 0x21, 0x09, 0x5d, 0xe5, 0xd8, 0x19, 0xff, 0xba, 0x1c, + 0x55, 0xf3, 0x0e, 0xb5, 0xca, 0xcc, 0xbc, 0x7b, 0x27, 0x6e, 0x9e, 0x0a, 0x07, 0x34, 0xaa, 0x1a, + 0x41, 0x46, 0xc5, 0x9c, 0xfc, 0x34, 0x2a, 0xb3, 0x4f, 0x7d, 0x6f, 0xba, 0x7d, 0x62, 0x49, 0x34, + 0xaa, 0xdc, 0x45, 0x09, 0x58, 0x5d, 0x6c, 0xaf, 0x60, 0xf4, 0x10, 0x60, 0xc8, 0xb6, 0x20, 0x56, + 0x60, 0xed, 0xc5, 0x4d, 0x6f, 0xe5, 0x2b, 0x44, 0x63, 0xdf, 0xd3, 0x59, 0x74, 0xbf, 0x7e, 0xf5, + 0x80, 0x5a, 0xad, 0x58, 0x68, 0x1e, 0x25, 0x8b, 0xc8, 0x31, 0xec, 0x27, 0xd0, 0x51, 0xe8, 0x9e, + 0x25, 0xa6, 0xfb, 0xbe, 0x6a, 0x20, 0x9c, 0x9f, 0x6c, 0xe3, 0xac, 0xce, 0x58, 0x50, 0x5f, 0x92, + 0x0f, 0x80, 0x47, 0x86, 0x97, 0xf8, 0xc9, 0xea, 0x72, 0xc7, 0xeb, 0x23, 0x5a, 0x57, 0x15, 0x47, + 0x07, 0x46, 0xd0, 0x26, 0xcf, 0xfa, 0x52, 0xec, 0xaa, 0x11, 0x0f, 0xc4, 0xe3, 0xba, 0x74, 0xf5, + 0x1f, 0x06, 0x39, 0xda, 0x11, 0x71, 0xd9, 0x0e, 0x0f, 0xab, 0x0a, 0x8a, 0x45, 0xd3, 0xe0, 0x8f, + 0x71, 0x4e, 0x26, 0x8b, 0x0e, 0xfb, 0x80, 0x2b, 0x0f, 0x31, 0x4b, 0xc4, 0x26, 0x09, 0x1d, 0xaa, + 0xaa, 0x3e, 0xec, 0xd8, 0xe9, 0x98, 0xe1, 0xf3, 0x30, 0xbe, 0xa6, 0x07, 0xea, 0x8f, 0xc2, 0xb5, + 0x4f, 0xef, 0xa0, 0xa0, 0x2a, 0xc0, 0x41, 0xf8, 0x92, 0x4d, 0x3b, 0x59, 0x00, 0xc4, 0x35, 0xa2, + 0x38, 0x83, 0x02, 0xba, 0x03, 0xee, 0xb6, 0x90, 0x09, 0x7b, 0x22, 0x5d, 0x61, 0x75, 0x09, 0x23, + 0x9f, 0xde, 0x2f, 0x40, 0xfb, 0xed, 0xf7, 0x09, 0x0d, 0xcb, 0xea, 0xf3, 0xe6, 0x79, 0x00, 0x13, + 0x4a, 0x5d, 0x16, 0x57, 0xb4, 0xf0, 0x3f, 0x2e, 0x83, 0x49, 0xe9, 0xb1, 0x8a, 0x0c, 0x17, 0xb3, + 0x48, 0xf3, 0x60, 0x7e, 0x30, 0x4c, 0xd3, 0xdb, 0xe6, 0x19, 0x27, 0x91, 0x93, 0x9a, 0xbd, 0x57, + 0xd0, 0x6b, 0x99, 0x27, 0xd9, 0x2d, 0x09, 0x42, 0x86, 0x9a, 0x7d, 0xa2, 0x8d, 0x66, 0xd2, 0x54, + 0x9d, 0xa7, 0xe9, 0x0d, 0x6b, 0x31, 0xf0, 0xb3, 0xac, 0xca, 0x72, 0xd8, 0x4e, 0xc2, 0x72, 0x9e, + 0x4b, 0xcb, 0x0c, 0xa3, 0xcf, 0x5b, 0xd9, 0x97, 0x1e, 0x87, 0x18, 0x76, 0x1f, 0x38, 0xb0, 0xd0, + 0x30, 0x91, 0x23, 0x17, 0x30, 0x67, 0x5c, 0xc9, 0xcd, 0xa8, 0x55, 0xce, 0xf9, 0x47, 0x63, 0x63, + 0x37, 0x2f, 0x9f, 0x6a, 0x77, 0x95, 0x79, 0xfa, 0xcd, 0xb2, 0xb5, 0x7f, 0x42, 0x4a, 0x31, 0x98, + 0x91, 0xb4, 0xb0, 0x79, 0x4d, 0x0c, 0x97, 0x5b, 0xf0, 0x9b, 0x6b, 0x58, 0x13, 0xaf, 0x63, 0xe1, + 0x8f, 0x4f, 0xda, 0x3c, 0x27, 0xc9, 0x87, 0x8a, 0xe8, 0x50, 0x4e, 0x44, 0x52, 0xec, 0x30, 0xf8, + 0xee, 0x39, 0xdf, 0x52, 0x5f, 0xca, 0x9b, 0x49, 0x4d, 0xe1, 0x90, 0x73, 0x1e, 0x28, 0x83, 0xd0, + 0x62, 0xce, 0x79, 0x69, 0xb5, 0xae, 0x7b, 0x78, 0x3c, 0xea, 0x5c, 0xba, 0x36, 0x41, 0xbe, 0x10, + 0x80, 0x86, 0x58, 0xe4, 0x40, 0x7e, 0x3c, 0x77, 0x10, 0x8b, 0x45, 0xfd, 0xc5, 0xb2, 0x3b, 0x56, + 0x5b, 0x95, 0x91, 0xa1, 0xa1, 0xb8, 0xb9, 0xfa, 0xf1, 0xa9, 0x77, 0x37, 0x0e, 0xa1, 0xfe, 0xbb, + 0x1e, 0x15, 0x22, 0x97, 0x81, 0xae, 0x17, 0x47, 0x42, 0xda, 0x63, 0xfb, 0x6c, 0xfb, 0x35, 0x9c, + 0x24, 0xae, 0x5c, 0x75, 0x73, 0x7e, 0x2f, 0x88, 0x23, 0x0b, 0xde, 0x4b, 0xef, 0xcd, 0x22, 0xaa, + 0x9d, 0x75, 0x19, 0x32, 0x9c, 0xb9, 0x65, 0xb1, 0x06, 0xb0, 0xa6, 0x02, 0x04, 0x2b, 0x20, 0xe5, + 0x60, 0x6d, 0xd1, 0x8f, 0x27, 0x04, 0xb0, 0xa1, 0xf9, 0x5a, 0x17, 0x84, 0xf1, 0x82, 0xa7, 0xc8, + 0x50, 0x1f, 0x8c, 0x03, 0x5a, 0xd3, 0x09, 0xc2, 0x05, 0x1c, 0x73, 0xa2, 0x11, 0xe3, 0x65, 0x0c, + 0x57, 0xab, 0x6f, 0x85, 0xec, 0xfe, 0xec, 0x51, 0xbc, 0xc3, 0xe9, 0x33, 0xda, 0x6b, 0x6f, 0x82, + 0xae, 0xbc, 0x46, 0xc3, 0x2b, 0x4f, 0x44, 0xbe, 0x16, 0xfa, 0x62, 0x49, 0xeb, 0x3c, 0x06, 0xde, + 0xd5, 0x9f, 0x8d, 0xd1, 0x92, 0x14, 0xc1, 0xb0, 0x47, 0x05, 0xf1, 0x69, 0xbe, 0x80, 0x32, 0x52, + 0x90, 0x92, 0xed, 0x7c, 0x76, 0x47, 0x2f, 0x9a, 0x3e, 0xc1, 0x48, 0x87, 0xee, 0xe3, 0x53, 0x19, + 0xc0, 0x92, 0x40, 0x4d, 0x08, 0x66, 0xed, 0x6a, 0x1e, 0x9a, 0xe2, 0xf5, 0x4e, 0x2c, 0x81, 0x72, + 0x2f, 0xce, 0x62, 0x06, 0x7b, 0xa7, 0x2b, 0x64, 0x23, 0xdc, 0x1a, 0xb9, 0x68, 0x6a, 0xdc, 0x97, + 0x79, 0x13, 0x0d, 0x56, 0x40, 0xe1, 0xa0, 0xb9, 0x1f, 0x17, 0x89, 0xda, 0x69, 0x5a, 0xc9, 0x3f, + 0x6f, 0x97, 0x46, 0x44, 0x7b, 0xad, 0x71, 0x90, 0x6f, 0xd2, 0x4c, 0x1e, 0x68, 0x9c, 0x32, 0xed, + 0x59, 0xef, 0xbf, 0xb8, 0x37, 0xe3, 0x63, 0xad, 0xd8, 0xc8, 0x8a, 0x10, 0x32, 0x50, 0xe7, 0xee, + 0x5c, 0x89, 0xad, 0xe7, 0xa0, 0x16, 0x03, 0xb2, 0x52, 0x11, 0x28, 0xd2, 0xf7, 0xce, 0xaf, 0xac, + 0x50, 0xee, 0xf4, 0x8b, 0x0c, 0xf0, 0x6a, 0xd8, 0xd5, 0x90, 0x43, 0x9c, 0x7f, 0xaf, 0xd1, 0xcb, + 0xb2, 0xb8, 0x71, 0x15, 0xf0, 0x28, 0x36, 0xe5, 0xb6, 0xa1, 0xb5, 0x8e, 0xff, 0xed, 0xc5, 0x43, + 0xc2, 0x65, 0xd4, 0x29, 0x6a, 0xf1, 0x15, 0x86, 0x17, 0x15, 0x89, 0x52, 0x84, 0x6e, 0x52, 0x2c, + 0x4c, 0x38, 0x45, 0xbd, 0xaa, 0x5b, 0x71, 0xfc, 0x7d, 0x51, 0x25, 0xd0, 0xc1, 0x6c, 0x87, 0x5f, + 0x7b, 0xb5, 0x1a, 0x54, 0x8e, 0xcc, 0xac, 0x10, 0xef, 0xa7, 0xe7, 0x75, 0x8b, 0xaa, 0x33, 0x4f, + 0x63, 0x82, 0xd8, 0x05, 0x6b, 0x73, 0x46, 0xcc, 0x8c, 0xaf, 0x31, 0x51, 0x40, 0x99, 0x21, 0x6f, + 0x0c, 0x8c, 0xfd, 0x12, 0x68, 0xb9, 0x90, 0x1c, 0x83, 0xa7, 0x74, 0xbf, 0x41, 0x0e, 0x8e, 0x8c, + 0xfe, 0x72, 0x3a, 0x38, 0x3b, 0xa0, 0x3e, 0x50, 0xfe, 0x86, 0x4d, 0xd3, 0x7e, 0x66, 0x91, 0x52, + 0xa6, 0x08, 0xc6, 0x97, 0xa7, 0x10, 0x3f, 0x01, 0x71, 0xb2, 0x9a, 0x86, 0x3c, 0x2d, 0x05, 0xae, + 0x5f, 0x63, 0x4f, 0x08, 0x4f, 0xc0, 0x60, 0xd8, 0xc1, 0x27, 0xc8, 0xf3, 0x76, 0xdf, 0xa7, 0x95, + 0x57, 0xf4, 0x02, 0xaa, 0xfd, 0xdd, 0x2c, 0x64, 0x82, 0x6b, 0xba, 0xc7, 0x71, 0x2f, 0x91, 0xb7, + 0xbc, 0x02, 0xe7, 0xe3, 0x01, 0x02, 0x02, 0xb2, 0x8b, 0xa3, 0x04, 0x84, 0xfd, 0x22, 0x14, 0x04, + 0x71, 0x3e, 0xe8, 0x94, 0xa0, 0x72, 0xbb, 0x12, 0xe0, 0x1b, 0xca, 0xd4, 0xf2, 0x9a, 0x6d, 0xaa, + 0xcb, 0xae, 0x50, 0x8a, 0x42, 0xda, 0xe4, 0xed, 0x73, 0x5c, 0xdd, 0xea, 0x2d, 0x89, 0x84, 0xda, + 0x95, 0x88, 0xc3, 0xd3, 0xb3, 0xea, 0x70, 0x9f, 0x0b, 0xbc, 0x51, 0xe7, 0x6d, 0x16, 0x71, 0x10, + 0x7f, 0x52, 0x11, 0x29, 0x7c, 0xfd, 0xfc, 0xd9, 0x3c, 0xd6, 0x7c, 0x20, 0x45, 0x17, 0xf3, 0x57, + 0x78, 0x97, 0x7e, 0x74, 0x27, 0xd7, 0x8d, 0xa2, 0x8e, 0x35, 0xfd, 0x5e, 0xd9, 0x1c, 0xed, 0xb7, + 0x8f, 0xf0, 0xe9, 0xb6, 0xc5, 0x9c, 0xa5, 0x86, 0x1c, 0x24, 0xde, 0xcd, 0xd1, 0x4a, 0xd7, 0x14, + 0xc4, 0xa1, 0x21, 0x90, 0x0b, 0xf7, 0x32, 0x12, 0x48, 0x2c, 0xb6, 0xc2, 0x31, 0x6d, 0xf1, 0xcb, + 0x8d, 0xb2, 0x73, 0x1f, 0xac, 0x00, 0x0b, 0x1d, 0xab, 0x65, 0x7d, 0xb9, 0x0e, 0xa1, 0x6f, 0xd4, + 0x4d, 0x7a, 0xf9, 0xeb, 0x86, 0xf8, 0x5c, 0xfe, 0xa8, 0xb3, 0x8b, 0xb3, 0xe7, 0x74, 0x04, 0x4c, + 0x31, 0x8a, 0x48, 0x99, 0x6f, 0x3b, 0xeb, 0x34, 0x10, 0xa1, 0xe8, 0x60, 0xa6, 0x92, 0x98, 0xf4, + 0xe4, 0xb2, 0x23, 0xad, 0x2b, 0x87, 0xe2, 0xbc, 0x1a, 0x55, 0xd6, 0xc5, 0x88, 0xb3, 0xe3, 0x50, + 0xfa, 0xbb, 0x0e, 0xb6, 0xe9, 0x82, 0x22, 0x5c, 0x00, 0xbc, 0xdb, 0x22, 0x44, 0xb7, 0xc7, 0xd7, + 0x36, 0x62, 0x96, 0xe8, 0x2c, 0xbc, 0xc1, 0x10, 0xf9, 0x0a, 0xd9, 0x5b, 0x49, 0xf2, 0x4a, 0x29, + 0x19, 0x8e, 0xed, 0x24, 0x83, 0xe6, 0xc9, 0xc6, 0x93, 0xde, 0x4b, 0x75, 0xe8, 0x83, 0x93, 0xc8, + 0x0b, 0xb9, 0xa6, 0x99, 0xb3, 0xeb, 0xa8, 0x12, 0x55, 0x6a, 0xbd, 0xba, 0x10, 0x1a, 0xf3, 0xe5, + 0x26, 0xed, 0xb5, 0x1f, 0x19, 0x37, 0xc8, 0x92, 0xc1, 0x8c, 0x6e, 0xd5, 0x2c, 0xb1, 0x8b, 0x18, + 0x7c, 0x48, 0x04, 0x41, 0x02, 0xac, 0x9c, 0xc1, 0x9b, 0xe4, 0x47, 0x10, 0x79, 0x26, 0x39, 0x10, + 0x46, 0x0a, 0x9d, 0x99, 0x90, 0x9a, 0x57, 0x74, 0xf9, 0x9e, 0xb8, 0xfc, 0xa6, 0x89, 0xfe, 0x95, + 0xcf, 0xf8, 0x63, 0xbe, 0xc3, 0x58, 0xe6, 0x91, 0x84, 0x94, 0x86, 0x15, 0xfc, 0x85, 0x6b, 0x29, + 0xd1, 0xf1, 0x2c, 0xa0, 0x6f, 0x43, 0x7a, 0xb1, 0x5d, 0xfb, 0x01, 0xc0, 0xcb, 0x22, 0xa5, 0xe9, + 0xca, 0xc9, 0x04, 0xee, 0xaf, 0xa4, 0xcc, 0x57, 0xa6, 0xd9, 0x8b, 0x73, 0x67, 0xd9, 0xf6, 0x08, + 0x3e, 0x6e, 0x80, 0xb8, 0xba, 0x4d, 0x5c, 0xb1, 0xc6, 0x0a, 0x81, 0x73, 0x79, 0xa6, 0x48, 0xd3, + 0x7b, 0x10, 0xea, 0x2d, 0x6e, 0xf5, 0x70, 0xa3, 0x78, 0x9a, 0x68, 0xb0, 0x4d, 0xe4, 0xd1, 0x8d, + 0x31, 0x1f, 0xc5, 0xf8, 0xfa, 0x8b, 0x16, 0x10, 0xef, 0x89, 0xa7, 0x06, 0xe7, 0x71, 0xa9, 0x4d, + 0x78, 0xa2, 0x48, 0x60, 0xbc, 0x9a, 0x69, 0x0b, 0xe8, 0xf1, 0x17, 0x53, 0x13, 0x01, 0x5f, 0xc6, + 0xb1, 0x3f, 0x21, 0xcc, 0x87, 0x27, 0x83, 0x76, 0xcc, 0xcf, 0x1e, 0x31, 0x73, 0xcd, 0x8a, 0xec, + 0xa0, 0xf3, 0x9b, 0x4e, 0x69, 0xae, 0xd5, 0x3b, 0xe1, 0xa9, 0x37, 0xd9, 0x70, 0x8f, 0xb1, 0x12, + 0x3f, 0xae, 0x5c, 0x3e, 0x8d, 0xd8, 0x4a, 0x96, 0xfb, 0xc0, 0xb5, 0x59, 0x8e, 0x21, 0x50, 0x00, + 0xb1, 0xf3, 0x26, 0xe5, 0xb9, 0xbc, 0x78, 0x58, 0xea, 0x5a, 0xd5, 0x05, 0x91, 0xbb, 0xbf, 0xb5, + 0xa9, 0x2e, 0x85, 0x02, 0x7d, 0x9c, 0xa8, 0xc1, 0xfe, 0x0a, 0xb1, 0x2a, 0xa2, 0x80, 0x48, 0x39, + 0x4e, 0x1b, 0xc0, 0x38, 0xbf, 0x21, 0x17, 0xa6, 0x0f, 0x66, 0xc1, 0xea, 0xe5, 0x02, 0xb8, 0x8d, + 0x7f, 0xab, 0x54, 0x40, 0x9c, 0x51, 0x25, 0x8c, 0x90, 0xbb, 0xbc, 0x31, 0xcf, 0xae, 0x3c, 0xd4, + 0x9f, 0xa2, 0xd0, 0x3b, 0x93, 0xcb, 0x0d, 0x3b, 0xcc, 0xf1, 0x8a, 0xfe, 0x06, 0x44, 0x54, 0xa0, + 0xb5, 0xdd, 0xcf, 0xa7, 0x51, 0xca, 0x86, 0x0c, 0xe0, 0x21, 0x3a, 0x84, 0xf5, 0xaf, 0xa6, 0x93, + 0x95, 0xcb, 0xd1, 0x90, 0x04, 0x01, 0x42, 0x62, 0xa6, 0xd7, 0xd4, 0x78, 0x78, 0x8d, 0x81, 0x17, + 0x72, 0x53, 0x92, 0x70, 0xd2, 0xb7, 0x90, 0xde, 0x53, 0x86, 0xde, 0x4e, 0xa9, 0x7d, 0x06, 0x9e, + 0x9c, 0x85, 0x4b, 0x95, 0x11, 0x18, 0x92, 0x99, 0xca, 0xc3, 0x41, 0x97, 0x9b, 0x3c, 0x40, 0x68, + 0xf2, 0x0c, 0xe9, 0x16, 0x36, 0x7b, 0x25, 0xfb, 0xa8, 0x67, 0xeb, 0x71, 0x43, 0x25, 0x52, 0x51, + 0x43, 0xc7, 0x1b, 0xd0, 0xbc, 0x20, 0x0d, 0xd7, 0x93, 0x0f, 0xea, 0xcf, 0xdd, 0xd8, 0xe8, 0xd2, + 0xb5, 0x96, 0x27, 0x3d, 0x97, 0x83, 0xec, 0xb9, 0x0f, 0x74, 0xa9, 0x29, 0x88, 0xa3, 0x97, 0x89, + 0x39, 0xe4, 0x16, 0x74, 0x9e, 0xa0, 0x8a, 0x65, 0x21, 0x4c, 0x96, 0x87, 0x19, 0xc9, 0xfd, 0x9a, + 0x19, 0x05, 0x02, 0x05, 0xbd, 0x0f, 0xf2, 0x83, 0xa9, 0x0a, 0x60, 0x9c, 0xd4, 0xed, 0x97, 0xb8, + 0xf3, 0x3e, 0x9b, 0x81, 0xfd, 0x64, 0xcb, 0x27, 0x7c, 0xb0, 0x44, 0xa6, 0xdc, 0x14, 0x78, 0x82, + 0xcc, 0xf9, 0xfe, 0x38, 0xd7, 0x78, 0x92, 0x6f, 0x24, 0xc5, 0x61, 0x67, 0x1a, 0x24, 0x73, 0x8f, + 0x60, 0x70, 0x43, 0x72, 0x45, 0xf1, 0x02, 0x74, 0xd5, 0x78, 0x13, 0xeb, 0x30, 0x54, 0xaa, 0x1c, + 0xa1, 0x2b, 0x00, 0xbf, 0xb2, 0x16, 0x9d, 0x3f, 0x15, 0x40, 0x2d, 0xb9, 0xcd, 0xfc, 0xc1, 0x7d, + 0xab, 0x0e, 0x19, 0xa1, 0x8b, 0x18, 0xd8, 0xf7, 0xd9, 0x7c, 0xb5, 0xb7, 0x59, 0x03, 0xee, 0x75, + 0xfe, 0x2d, 0xba, 0xe5, 0xee, 0x7b, 0x7d, 0x15, 0xb6, 0xbe, 0x75, 0x36, 0x04, 0x67, 0xe4, 0x3a, + 0xe1, 0x99, 0xdc, 0x11, 0x4a, 0xf6, 0x2b, 0x41, 0xbb, 0x1c, 0xe3, 0xca, 0xf4, 0x34, 0x5f, 0xc7, + 0xad, 0xf1, 0x36, 0x2a, 0x1a, 0x27, 0x52, 0x40, 0x21, 0x5d, 0xc8, 0xf6, 0xb4, 0x33, 0xfe, 0x98, + 0x4d, 0xb1, 0x19, 0x6d, 0xf5, 0xd3, 0x85, 0xfb, 0x8b, 0x59, 0xd9, 0x28, 0x0d, 0x8b, 0x09, 0x20, + 0xbe, 0x66, 0xae, 0x16, 0x1d, 0x04, 0x23, 0xfb, 0x69, 0x0d, 0x8e, 0x6f, 0x7e, 0x4b, 0x39, 0x08, + 0xae, 0x89, 0x1b, 0x71, 0x3f, 0x95, 0x15, 0x8c, 0x4a, 0x49, 0xdf, 0x8e, 0x25, 0x1e, 0xfa, 0x5f, + 0xf5, 0x4a, 0xaa, 0xbb, 0x7b, 0x2a, 0xd4, 0x1a, 0x3a, 0xfd, 0x58, 0xd8, 0xe9, 0x28, 0x6d, 0x09, + 0x5a, 0x8d, 0xf2, 0xb5, 0xe0, 0x47, 0xf7, 0x33, 0x8c, 0xd8, 0x8e, 0x2f, 0x1d, 0xb0, 0x2b, 0x75, + 0x01, 0x9f, 0xa9, 0xd1, 0x78, 0xa1, 0xf3, 0x58, 0xd5, 0x87, 0xe4, 0x87, 0x9e, 0xd0, 0xd0, 0xd2, + 0x7b, 0x3b, 0x64, 0xad, 0x5f, 0x70, 0xa1, 0x4b, 0x3f, 0xc9, 0x3a, 0x90, 0x3e, 0x2b, 0xab, 0x98, + 0xea, 0xbf, 0x94, 0x31, 0x76, 0x9c, 0xb0, 0xc8, 0xd2, 0xdc, 0x33, 0xed, 0x4e, 0x59, 0x1f, 0x18, + 0x2e, 0x06, 0x15, 0x49, 0x0c, 0xb0, 0xbc, 0xb3, 0x70, 0x74, 0xa0, 0x66, 0x35, 0x19, 0x83, 0x2e, + 0xae, 0xf5, 0xa8, 0x49, 0xc4, 0x89, 0xa4, 0x91, 0x19, 0xc7, 0x7b, 0x14, 0xd5, 0x27, 0x1b, 0x34, + 0xbb, 0xc0, 0x70, 0x19, 0x8f, 0xf8, 0xa8, 0x70, 0xb4, 0x4f, 0xc5, 0x67, 0x02, 0xb7, 0x5e, 0xdb, + 0x1b, 0x24, 0x5c, 0x31, 0x28, 0x2e, 0xf4, 0x1f, 0x5e, 0xf5, 0xfd, 0x5a, 0xf4, 0xa7, 0xfb, 0x23, + 0x88, 0x26, 0x65, 0x5c, 0x60, 0x4c, 0x2a, 0x54, 0x30, 0xe0, 0xb6, 0x4a, 0x59, 0xc8, 0xf1, 0x83, + 0xb5, 0x36, 0x14, 0x4a, 0x50, 0x6f, 0x6a, 0xbb, 0x2b, 0x40, 0xe2, 0xf1, 0xad, 0x23, 0xb8, 0x30, + 0x8f, 0xa7, 0xaf, 0xbd, 0xd1, 0x10, 0xf1, 0xf4, 0x13, 0xb1, 0x2c, 0x80, 0x41, 0x11, 0x7b, 0x7f, + 0xf9, 0x3d, 0xdd, 0xde, 0x1f, 0xaa, 0xf4, 0xf0, 0xdb, 0x99, 0x13, 0xbf, 0x8a, 0x39, 0xa3, 0x1e, + 0xd7, 0x7a, 0x3b, 0xc0, 0x78, 0xda, 0x1d, 0x0d, 0xac, 0xc6, 0x46, 0x35, 0xe5, 0x38, 0x92, 0x22, + 0xa2, 0x53, 0xaf, 0x49, 0xd1, 0x63, 0x4d, 0x47, 0x01, 0x1a, 0x80, 0xb4, 0x86, 0x97, 0xb8, 0xa7, + 0x46, 0xf7, 0xb5, 0x57, 0x68, 0x61, 0x1d, 0xe5, 0x87, 0xa2, 0x81, 0xf6, 0xd3, 0xaa, 0xae, 0xdf, + 0x72, 0x1f, 0x83, 0x66, 0x98, 0x07, 0xa4, 0xa5, 0xf8, 0x8d, 0x22, 0x12, 0x38, 0x74, 0x3d, 0xbf, + 0x80, 0x7a, 0x70, 0xa7, 0xa6, 0xfa, 0xa3, 0xeb, 0x98, 0x4d, 0x8c, 0xc1, 0xf0, 0xf5, 0x47, 0x54, + 0x0c, 0x0d, 0x3a, 0xd0, 0x61, 0xea, 0x07, 0x7b, 0x2b, 0xb6, 0x4e, 0x7d, 0xfc, 0xef, 0x9f, 0x22, + 0xf8, 0x69, 0x86, 0x35, 0x1c, 0xf8, 0x2b, 0x05, 0xbb, 0x04, 0xcf, 0x97, 0xc0, 0xc9, 0x17, 0xe0, + 0x2d, 0x15, 0x3a, 0xe1, 0xab, 0x0b, 0x1a, 0x3d, 0xcf, 0xf7, 0xa8, 0x14, 0x8b, 0xe5, 0x82, 0x4e, + 0xc2, 0x6e, 0xbd, 0xbe, 0x70, 0xd7, 0xe6, 0x28, 0x42, 0x6f, 0x6e, 0x85, 0xfc, 0xbc, 0x1e, 0xf5, + 0x06, 0xff, 0x50, 0x95, 0x66, 0x27, 0xd9, 0x2f, 0x6a, 0xf4, 0xb4, 0x1b, 0xb2, 0xa1, 0xa0, 0xca, + 0x71, 0x1a, 0x4e, 0x18, 0x90, 0xe7, 0xd4, 0x00, 0xa9, 0x52, 0x17, 0xff, 0x81, 0x0d, 0x0c, 0x50, + 0x06, 0x33, 0x73, 0xb5, 0x80, 0x84, 0x89, 0x56, 0x44, 0x5d, 0x06, 0x31, 0x9e, 0xc1, 0x23, 0xbb, + 0x29, 0x16, 0xb5, 0xb1, 0x2e, 0xbf, 0xe8, 0xd9, 0x96, 0xfd, 0x59, 0x4c, 0x4d, 0xa2, 0x29, 0x1c, + 0xe3, 0xa1, 0x2c, 0x83, 0xce, 0x14, 0x7c, 0x25, 0xcc, 0x57, 0x57, 0xaf, 0xd8, 0xaf, 0x5c, 0x18, + 0x13, 0x73, 0x86, 0xe2, 0x44, 0x18, 0x88, 0x1a, 0xae, 0x56, 0xbc, 0xb2, 0x3d, 0x1b, 0xed, 0x72, + 0x4e, 0x78, 0x5a, 0x34, 0x80, 0xae, 0x67, 0xa3, 0x4c, 0xbe, 0x70, 0x41, 0xa0, 0xe7, 0x72, 0x8c, + 0xb6, 0xde, 0xd9, 0xdf, 0x35, 0x2a, 0x6d, 0x4b, 0x04, 0xa8, 0xaf, 0xde, 0xcf, 0xcf, 0x6f, 0x48, + 0x51, 0xf1, 0x4a, 0x99, 0xc8, 0x20, 0xed, 0xf5, 0x60, 0x79, 0xf7, 0x22, 0xc1, 0x24, 0x8b, 0x4a, + 0x55, 0x87, 0x40, 0xee, 0xa3, 0x01, 0x22, 0xe8, 0xc3, 0xa6, 0x75, 0xf4, 0x76, 0x8a, 0x0f, 0xa4, + 0x94, 0x39, 0x57, 0x80, 0xb0, 0xd7, 0x8d, 0x74, 0x49, 0xa5, 0x02, 0xa5, 0x38, 0xfe, 0x70, 0xfc, + 0xd2, 0xaa, 0xf0, 0xa6, 0x87, 0xfc, 0x21, 0x00, 0x41, 0x9d, 0x69, 0xd6, 0x12, 0x34, 0xa1, 0xfa, + 0x56, 0x60, 0x34, 0xa7, 0x61, 0x05, 0x1a, 0xb2, 0xaf, 0x6a, 0xf8, 0x19, 0xda, 0x85, 0x5f, 0xfe, + 0xde, 0xc4, 0x73, 0xbc, 0x20, 0xad, 0x25, 0xb4, 0x99, 0x12, 0xd5, 0x0b, 0xa9, 0x19, 0x5b, 0x91, + 0x0d, 0xdc, 0x1a, 0x78, 0x21, 0x20, 0x58, 0xdd, 0xbc, 0xca, 0x4a, 0x2a, 0xec, 0xe4, 0x71, 0x9c, + 0xdc, 0xb2, 0xd8, 0x4d, 0x21, 0x44, 0x35, 0x5a, 0x8c, 0xbf, 0x50, 0x83, 0x3c, 0x22, 0x9b, 0xa6, + 0x2d, 0x92, 0xca, 0xe5, 0x51, 0x7a, 0x38, 0xdb, 0x9d, 0xe5, 0x28, 0x83, 0xe8, 0x49, 0x8c, 0x32, + 0xaa, 0xaa, 0x67, 0x4a, 0x33, 0xe3, 0x47, 0x7f, 0xd3, 0xdc, 0x75, 0xb0, 0x19, 0x11, 0xe9, 0xe3, + 0x30, 0xc5, 0x97, 0xaf, 0x88, 0x4a, 0xe0, 0x91, 0x3e, 0x3a, 0xe5, 0x69, 0x3d, 0xe9, 0xab, 0x91, + 0xfd, 0x63, 0xd8, 0xdf, 0x41, 0xc6, 0xaf, 0x1a, 0x26, 0xa6, 0x65, 0x04, 0x30, 0x9d, 0xbe, 0x3c, + 0xea, 0x32, 0xa6, 0x1d, 0x36, 0xb1, 0xa6, 0xaa, 0x29, 0xdd, 0x01, 0xa3, 0x61, 0x60, 0xf0, 0x5d, + 0x09, 0x3a, 0x8d, 0xe1, 0xc3, 0x76, 0xce, 0xa5, 0x6d, 0x87, 0x8a, 0x05, 0x6e, 0xf6, 0x8e, 0x19, + 0x9a, 0xe6, 0x1e, 0x29, 0x22, 0x3e, 0xe4, 0xdc, 0x9b, 0x30, 0x8f, 0xf8, 0x28, 0x7f, 0x94, 0xa7, + 0x7a, 0x0b, 0x8b, 0xdb, 0xc4, 0x68, 0x8f, 0xb7, 0xa0, 0x32, 0xe5, 0xfd, 0x0b, 0x0b, 0x11, 0x36, + 0xfc, 0xc0, 0x4f, 0x85, 0x22, 0xcb, 0xfd, 0x48, 0xf0, 0xe1, 0xd8, 0x38, 0x7a, 0x5a, 0xac, 0x2f, + 0x10, 0x03, 0xc1, 0x22, 0x05, 0xa4, 0xb5, 0x06, 0x26, 0xe2, 0x0b, 0xcf, 0xf4, 0xa9, 0x50, 0x9a, + 0x6e, 0xf0, 0x79, 0xce, 0x9d, 0x2d, 0xc0, 0xcc, 0x05, 0xd0, 0xfa, 0xa0, 0xc1, 0x24, 0x1c, 0x12, + 0x10, 0xc8, 0xa9, 0x72, 0x7c, 0x07, 0x2d, 0x38, 0x5d, 0xdb, 0x5c, 0xce, 0xa6, 0xb9, 0x03, 0x96, + 0x6f, 0x38, 0x34, 0xd8, 0x1c, 0x34, 0x09, 0x9a, 0xcc, 0xa9, 0x1d, 0x3c, 0x08, 0x95, 0xaf, 0x16, + 0x25, 0x17, 0x17, 0x87, 0xa9, 0xd0, 0x00, 0xc4, 0x36, 0x64, 0x10, 0x39, 0x14, 0x0d, 0xd9, 0xe4, + 0x9b, 0xad, 0x9f, 0xef, 0x0c, 0xb2, 0x5a, 0x1c, 0x38, 0x73, 0xce, 0x6c, 0xfd, 0x42, 0x92, 0x66, + 0xbf, 0x57, 0x0a, 0x4f, 0x18, 0x39, 0xd9, 0x8e, 0x0d, 0x37, 0x1e, 0xc6, 0x74, 0xd7, 0xc1, 0xbf, + 0x15, 0x59, 0x59, 0xe5, 0x29, 0x49, 0x7e, 0x90, 0xd4, 0x8e, 0x56, 0xa1, 0xfd, 0xeb, 0x0c, 0x8e, + 0x91, 0xcb, 0xde, 0xd8, 0xdd, 0xd7, 0x3f, 0xbd, 0xc8, 0xea, 0x38, 0xc5, 0x6e, 0xd9, 0x5f, 0x2e, + 0x92, 0xa2, 0x51, 0x2a, 0x35, 0x05, 0x14, 0x84, 0xd1, 0x2f, 0x52, 0xbb, 0x4a, 0x0b, 0x13, 0x0d, + 0x2a, 0x4a, 0x65, 0x24, 0x18, 0x98, 0xe0, 0xbc, 0x09, 0x82, 0x37, 0xd8, 0x73, 0xc8, 0x24, 0x7f, + 0x57, 0x06, 0x74, 0x75, 0xb9, 0x3b, 0x52, 0x6f, 0x36, 0x3b, 0x10, 0x73, 0x1b, 0x62, 0xd4, 0x36, + 0x5c, 0xeb, 0x98, 0xe8, 0x64, 0xd6, 0xe2, 0x60, 0x5d, 0xf2, 0x23, 0x0e, 0xc8, 0xff, 0x35, 0x7a, + 0xba, 0xbb, 0x29, 0x79, 0x0f, 0x3a, 0xdf, 0xcd, 0x1a, 0xb0, 0xcd, 0xf5, 0xc3, 0xbf, 0x90, 0x9a, + 0x66, 0xec, 0xa6, 0x5f, 0x11, 0x4d, 0x07, 0xf6, 0x78, 0xa0, 0xf6, 0x79, 0x83, 0x0f, 0x2b, 0xde, + 0xdb, 0xc1, 0xd2, 0xf8, 0x62, 0x72, 0xe2, 0x91, 0x7c, 0xd1, 0x59, 0x47, 0xa5, 0x64, 0xe0, 0xbd, + 0xc4, 0x57, 0xe0, 0x64, 0xcf, 0x4b, 0xdc, 0x0d, 0xe2, 0x48, 0x7f, 0x7f, 0xb1, 0x5a, 0xc8, 0x99, + 0xad, 0xb1, 0x41, 0xbf, 0x2a, 0x1a, 0x01, 0x37, 0x64, 0x14, 0x1c, 0x45, 0xd9, 0xb6, 0x25, 0xf1, + 0x53, 0x34, 0x81, 0x54, 0xc7, 0x05, 0x2a, 0xf9, 0xfc, 0x01, 0x60, 0x65, 0x8a, 0x85, 0x3e, 0x02, + 0x3c, 0x05, 0xeb, 0x7b, 0x9f, 0xde, 0x8f, 0xba, 0xed, 0xe4, 0xb2, 0xac, 0x6a, 0xce, 0xa4, 0xa6, + 0xde, 0x6d, 0x9d, 0xd9, 0x7b, 0x2c, 0x1e, 0x08, 0xf6, 0x43, 0xdb, 0x67, 0xfd, 0x6a, 0xbe, 0xb2, + 0x6f, 0x04, 0x85, 0xcb, 0xee, 0xc5, 0x9c, 0x9a, 0x88, 0x57, 0xfb, 0x5d, 0xe0, 0x22, 0x31, 0xba, + 0x5f, 0x79, 0xb3, 0x51, 0x9e, 0xed, 0xb9, 0x38, 0x99, 0x07, 0x97, 0x0e, 0x1e, 0xf8, 0xc6, 0x27, + 0x1d, 0x97, 0x2b, 0x61, 0x20, 0x80, 0x44, 0xea, 0x62, 0x0c, 0xcc, 0xa7, 0xb7, 0x44, 0x9d, 0xd2, + 0x12, 0x37, 0x93, 0x49, 0x0d, 0x03, 0x93, 0x52, 0xd0, 0xff, 0x58, 0xf1, 0xf0, 0x3f, 0x61, 0x9e, + 0x15, 0x44, 0xf1, 0xc7, 0x08, 0x61, 0x30, 0xea, 0xb4, 0x90, 0xbe, 0xe0, 0x86, 0xe2, 0xb7, 0x99, + 0x17, 0x02, 0xd5, 0x6a, 0xd6, 0x12, 0xf7, 0x55, 0xa5, 0xd8, 0x49, 0xa1, 0xd3, 0xf2, 0xf9, 0x8b, + 0x3e, 0xa3, 0x1d, 0x09, 0x91, 0xf2, 0x6e, 0xa0, 0xf0, 0xf1, 0xc2, 0xbd, 0x9e, 0x93, 0x74, 0x40, + 0xcd, 0x52, 0x6d, 0xc3, 0xd6, 0xf3, 0x36, 0xe2, 0x42, 0x0e, 0x6f, 0x92, 0x04, 0x96, 0x89, 0x16, + 0x6a, 0x35, 0x63, 0x0e, 0xf4, 0x50, 0xa2, 0xcf, 0xa3, 0x2c, 0x99, 0xee, 0x58, 0xd1, 0x2c, 0x11, + 0x69, 0xeb, 0xc9, 0x10, 0xe0, 0x9a, 0xa6, 0xd0, 0xa2, 0x94, 0x27, 0xba, 0x61, 0x6d, 0x51, 0xd2, + 0x79, 0xd3, 0xfc, 0x45, 0x09, 0xa2, 0x22, 0xc7, 0xce, 0x34, 0x1d, 0xb0, 0xe7, 0x69, 0x5c, 0x05, + 0xb8, 0x40, 0x83, 0x63, 0x90, 0x24, 0x85, 0x7f, 0xf6, 0x57, 0x3d, 0x7a, 0x4a, 0x13, 0xb5, 0x5b, + 0x3d, 0xba, 0xd1, 0xde, 0x79, 0xe5, 0xbb, 0xb8, 0x18, 0xa6, 0x7e, 0x39, 0x6d, 0xf3, 0x22, 0x46, + 0x8d, 0xa3, 0xae, 0xda, 0x5d, 0x29, 0x86, 0x33, 0x64, 0x8d, 0xe8, 0x16, 0xa3, 0xf9, 0x1b, 0xb4, + 0x91, 0x29, 0xdc, 0xce, 0x09, 0x8b, 0x2c, 0xec, 0x6e, 0x7e, 0x20, 0xba, 0xf7, 0xbd, 0x23, 0x92, + 0x97, 0x62, 0xdf, 0xb6, 0x3c, 0xea, 0xdf, 0xd4, 0x9b, 0xb8, 0xf9, 0x74, 0x34, 0x5b, 0x05, 0x0d, + 0x8a, 0x70, 0x70, 0x96, 0xe3, 0xf7, 0x2c, 0xb4, 0xe3, 0x13, 0x74, 0xee, 0x63, 0xe3, 0xaf, 0x66, + 0x30, 0xe2, 0x48, 0xd5, 0x19, 0xb9, 0x15, 0x71, 0x3e, 0x2a, 0x21, 0x11, 0x4f, 0x1d, 0xda, 0x72, + 0x5d, 0x0a, 0xdb, 0x82, 0x4f, 0x68, 0x5b, 0xd7, 0x07, 0xc5, 0x6e, 0x32, 0x61, 0x4c, 0xdd, 0x80, + 0xda, 0x6c, 0xd7, 0xfd, 0x31, 0x22, 0x6a, 0xb5, 0x88, 0x77, 0x60, 0x8b, 0x8a, 0x3d, 0x83, 0xe1, + 0x68, 0x37, 0xa7, 0x3b, 0x75, 0x91, 0x28, 0x0a, 0x2b, 0xb2, 0xcf, 0x4e, 0x5b, 0x86, 0x98, 0x42, + 0x7c, 0x03, 0x76, 0xe8, 0x0c, 0xf8, 0x93, 0xe9, 0x09, 0xe1, 0xd6, 0x85, 0xcc, 0x33, 0x31, 0x6e, + 0x23, 0xba, 0xfd, 0xf6, 0x24, 0xe9, 0x9a, 0x9d, 0xd0, 0xf8, 0x67, 0x3c, 0xd1, 0xf6, 0x7a, 0xd7, + 0xe8, 0x01, 0x5b, 0xc7, 0x17, 0x27, 0x07, 0x2e, 0x54, 0x66, 0x0a, 0xe4, 0xbc, 0xb7, 0xc1, 0x3d, + 0xb9, 0xc7, 0xa7, 0xc9, 0xb8, 0xde, 0x55, 0x59, 0xcc, 0x79, 0xaa, 0x48, 0x37, 0x68, 0x63, 0xdc, + 0x2f, 0x7b, 0xc8, 0x3d, 0xb4, 0xf4, 0x5b, 0x56, 0xd5, 0x8a, 0x5d, 0x10, 0xa2, 0xad, 0xc7, 0x6c, + 0x8d, 0x1e, 0x80, 0xc9, 0xba, 0xaf, 0xe3, 0xee, 0xd5, 0x68, 0xa5, 0xc2, 0xcd, 0xe2, 0xbf, 0x94, + 0x72, 0x5a, 0x13, 0xd9, 0x7e, 0x0f, 0x23, 0x33, 0xb5, 0xed, 0x8c, 0x7e, 0xa5, 0x6d, 0x16, 0x8c, + 0xa5, 0x1a, 0x78, 0x9a, 0x8e, 0x1e, 0x05, 0x2d, 0x40, 0x01, 0x81, 0x58, 0xc6, 0xf1, 0xcc, 0x60, + 0x15, 0x96, 0xd9, 0x53, 0x4a, 0x22, 0x69, 0xd6, 0xb0, 0x59, 0xaf, 0xe5, 0x83, 0x91, 0x7f, 0xa5, + 0x8c, 0xd7, 0x44, 0x27, 0xc1, 0x50, 0x12, 0x14, 0x34, 0x17, 0xff, 0x7a, 0x3e, 0x39, 0x53, 0x80, + 0xb9, 0xb3, 0xc1, 0xaf, 0x24, 0x53, 0x40, 0x33, 0xa5, 0x55, 0x24, 0x25, 0x39, 0x7f, 0x9a, 0xc4, + 0xc7, 0xa1, 0x67, 0x15, 0x9d, 0x37, 0x97, 0x40, 0x2c, 0xbb, 0x3b, 0x1f, 0x85, 0xaa, 0x86, 0x1e, + 0x3b, 0x48, 0xbb, 0xb2, 0xd4, 0x50, 0xf5, 0x07, 0x0a, 0x28, 0x85, 0xc5, 0xc6, 0xcf, 0xb3, 0x6b, + 0x4f, 0x3b, 0x64, 0x69, 0xe3, 0x21, 0xcc, 0xbd, 0x5a, 0x9f, 0x07, 0x19, 0x05, 0x68, 0x73, 0x27, + 0xa5, 0x64, 0x5f, 0x49, 0x41, 0x59, 0x22, 0xfe, 0xca, 0xe9, 0xa9, 0xb4, 0x33, 0xcd, 0x74, 0x27, + 0x73, 0x8a, 0x90, 0x8d, 0xc7, 0xce, 0xbf, 0x50, 0xd0, 0x0d, 0x2a, 0xa0, 0x9c, 0x66, 0xdd, 0x3c, + 0xa0, 0xaa, 0xb4, 0xf3, 0xa7, 0xb8, 0xa3, 0xf8, 0xc4, 0x04, 0x95, 0x62, 0xc6, 0xf6, 0x4c, 0x51, + 0xe9, 0xc8, 0x1c, 0xb2, 0x5b, 0x7a, 0x96, 0x84, 0x91, 0x4c, 0xaa, 0xb1, 0x6f, 0x7e, 0x62, 0xfe, + 0x52, 0x54, 0xe1, 0xec, 0x44, 0x95, 0x5f, 0xaa, 0x38, 0x15, 0xe1, 0x06, 0xb4, 0x75, 0x18, 0x99, + 0x6a, 0x6c, 0x49, 0x67, 0x5e, 0xdb, 0xd4, 0x82, 0x37, 0x51, 0x27, 0xa3, 0x1c, 0xba, 0x62, 0x01, + 0x84, 0x14, 0xf7, 0x3a, 0xdd, 0xf6, 0x49, 0xe2, 0xc4, 0x20, 0x4a, 0xf0, 0x12, 0x89, 0x69, 0xe8, + 0x6e, 0x17, 0x8e, 0xf2, 0xa2, 0x9c, 0xcb, 0xc2, 0xa8, 0x7e, 0x9e, 0xf6, 0x9a, 0x0e, 0x74, 0xa0, + 0xd1, 0x2f, 0x9f, 0x99, 0x81, 0xd4, 0xc2, 0x5c, 0x02, 0xc1, 0x62, 0xaa, 0x5a, 0x68, 0x67, 0x32, + 0x1d, 0xdd, 0x80, 0xcc, 0x32, 0x61, 0x1d, 0x6c, 0x3c, 0x73, 0x52, 0xd7, 0x35, 0x27, 0xf8, 0x48, + 0x63, 0x76, 0xf9, 0xf9, 0x38, 0x52, 0xc2, 0x57, 0xec, 0xa0, 0x6e, 0x2a, 0x4b, 0x19, 0xa2, 0x61, + 0xfe, 0x78, 0x2c, 0xfb, 0xc2, 0x51, 0xb9, 0xbf, 0x92, 0xf0, 0xd9, 0xd1, 0x5d, 0x41, 0x3a, 0x9b, + 0x60, 0x1b, 0xc0, 0xad, 0x6e, 0x39, 0xc6, 0xd9, 0x89, 0xc8, 0x75, 0x5e, 0x67, 0xf5, 0xc4, 0xce, + 0x16, 0x2f, 0xc8, 0xed, 0x6d, 0xd8, 0x8a, 0x02, 0x89, 0xef, 0x47, 0x47, 0xc2, 0xa3, 0x71, 0x45, + 0x0e, 0xaa, 0x88, 0x45, 0xe4, 0x77, 0xff, 0x25, 0x79, 0x00, 0x70, 0x11, 0x90, 0x63, 0xa0, 0xf9, + 0xf9, 0x24, 0x53, 0xc7, 0x41, 0x1c, 0x3a, 0x1a, 0x0b, 0x84, 0x44, 0x85, 0xfe, 0xe4, 0x27, 0xf2, + 0x5c, 0x64, 0xcb, 0xa6, 0x39, 0xd1, 0x94, 0x38, 0x0b, 0x6d, 0xe8, 0x17, 0x3f, 0x30, 0xde, 0xe6, + 0x15, 0x1c, 0x3d, 0xd7, 0x37, 0xdd, 0xf0, 0x9f, 0x61, 0x20, 0xcd, 0x80, 0xcb, 0x4a, 0x5f, 0x9f, + 0x1b, 0x91, 0x88, 0x0b, 0x70, 0xa5, 0x43, 0xaa, 0x74, 0xff, 0xce, 0x17, 0xe5, 0xd1, 0x4b, 0xc3, + 0xb6, 0x6c, 0xe4, 0x77, 0xd2, 0xb9, 0x67, 0x3e, 0xf9, 0xa1, 0x27, 0x28, 0x0e, 0x84, 0x99, 0x9c, + 0xae, 0xa6, 0x81, 0x18, 0x73, 0xd5, 0xdf, 0x28, 0xff, 0xcc, 0x1c, 0xc3, 0xfc, 0xdd, 0x5a, 0xe8, + 0x3d, 0x26, 0xc8, 0x94, 0xe2, 0xcb, 0x40, 0x59, 0xac, 0x2e, 0xb5, 0xa4, 0x1e, 0x5a, 0xfa, 0x48, + 0x48, 0xa8, 0xbe, 0x2c, 0x0b, 0x65, 0x00, 0x0b, 0xac, 0xdd, 0xcf, 0x2b, 0x39, 0x02, 0xd1, 0xdf, + 0xe4, 0x68, 0x57, 0x00, 0xc9, 0x30, 0x74, 0x2f, 0x0f, 0x1e, 0xe5, 0x65, 0xad, 0x94, 0x54, 0x04, + 0x50, 0x68, 0x1b, 0xc6, 0xca, 0x63, 0x72, 0xab, 0x14, 0x8b, 0xd8, 0x7e, 0x7b, 0x68, 0xc5, 0xaf, + 0x37, 0x96, 0xbb, 0x2b, 0x6f, 0xd4, 0x5f, 0x89, 0xa1, 0x92, 0xee, 0xaa, 0x98, 0x07, 0x40, 0x1e, + 0x72, 0xad, 0x76, 0x16, 0xd4, 0xc6, 0x83, 0x10, 0xd8, 0x9d, 0xea, 0x6c, 0x78, 0x53, 0xbb, 0xde, + 0xd8, 0x2a, 0x80, 0xd8, 0xc1, 0x08, 0x0f, 0x46, 0x33, 0x6d, 0x5e, 0x63, 0xcc, 0x46, 0x54, 0xd2, + 0x39, 0x35, 0x29, 0x81, 0x80, 0x93, 0x29, 0x89, 0xb5, 0x4d, 0xdf, 0xeb, 0x78, 0x37, 0xd0, 0xe5, + 0x1d, 0x0c, 0x24, 0x3d, 0xbf, 0x3a, 0x2d, 0xb4, 0x90, 0x42, 0xdb, 0xb3, 0xfc, 0x5a, 0xb0, 0x85, + 0x40, 0x08, 0x20, 0x05, 0x0e, 0xef, 0xc3, 0xa8, 0xe9, 0x29, 0x83, 0x84, 0x3d, 0x76, 0xda, 0xeb, + 0x0e, 0x67, 0x70, 0x45, 0xb9, 0xfb, 0x56, 0x95, 0x36, 0xe7, 0x7a, 0xf0, 0xe9, 0x7a, 0xa4, 0xee, + 0x22, 0x18, 0x28, 0x2a, 0xfb, 0x19, 0x9a, 0x33, 0x9d, 0x09, 0xed, 0x15, 0x33, 0xe4, 0xb7, 0x4e, + 0x68, 0xbc, 0x82, 0x4e, 0x7e, 0x21, 0x28, 0xac, 0xb7, 0xa8, 0x25, 0xab, 0xc9, 0xfa, 0x5b, 0xb6, + 0x39, 0x3d, 0xef, 0x95, 0x8d, 0x13, 0x51, 0x98, 0xd7, 0xae, 0x04, 0xad, 0x79, 0x50, 0x57, 0x3f, + 0x34, 0x89, 0x4b, 0x79, 0x65, 0x45, 0xce, 0x00, 0x8b, 0x7d, 0x42, 0x7f, 0xc2, 0x6c, 0xbe, 0x7d, + 0xe9, 0x55, 0x72, 0x51, 0xb3, 0xab, 0xc9, 0x23, 0x2b, 0xc4, 0x87, 0x02, 0xbc, 0xc9, 0xc7, 0xcb, + 0x7b, 0xeb, 0xb4, 0x2b, 0xf5, 0x32, 0x63, 0xbf, 0x0c, 0xa0, 0xfb, 0x80, 0x3c, 0x1c, 0xa7, 0xe7, + 0x77, 0xbc, 0x10, 0xd9, 0x02, 0x89, 0x0c, 0xbd, 0x5d, 0xa6, 0x2b, 0xba, 0x06, 0xab, 0xb0, 0xe8, + 0x08, 0x59, 0x7a, 0x7e, 0x38, 0x11, 0x56, 0x5c, 0x7e, 0x70, 0xad, 0x9d, 0x4b, 0xe4, 0x61, 0x24, + 0xf2, 0x05, 0x3d, 0xc7, 0xd5, 0xc3, 0xc0, 0x3c, 0x6b, 0x19, 0x0e, 0x88, 0x29, 0x3b, 0xdc, 0x0e, + 0xf6, 0x4a, 0x55, 0xfa, 0x23, 0x36, 0xa4, 0xb2, 0x35, 0xbc, 0xef, 0xe4, 0x2a, 0x7e, 0xcf, 0xd3, + 0x5e, 0x3c, 0xa1, 0x1c, 0x1c, 0x18, 0x38, 0xd6, 0x34, 0x9e, 0x99, 0x60, 0xfd, 0xec, 0x29, 0x36, + 0xc1, 0x99, 0x83, 0x0a, 0x6d, 0x53, 0x9a, 0xf8, 0x71, 0x30, 0x02, 0x69, 0x5a, 0x62, 0xa1, 0x46, + 0xd8, 0xa4, 0x5c, 0x9d, 0x27, 0xf0, 0x19, 0x42, 0xec, 0x2f, 0x3f, 0x32, 0xee, 0xad, 0x6f, 0xc6, + 0x66, 0x77, 0x81, 0x46, 0x88, 0x32, 0xa8, 0x70, 0x7a, 0x61, 0x55, 0xce, 0xc9, 0x3d, 0xf8, 0x45, + 0xcc, 0xdc, 0xe6, 0x15, 0xf8, 0x88, 0x3d, 0x85, 0x6d, 0x52, 0x56, 0xc8, 0x97, 0x89, 0xd1, 0xaf, + 0x78, 0x40, 0xd2, 0x9d, 0x58, 0x88, 0x38, 0xc8, 0x92, 0xb0, 0xbb, 0xe8, 0x78, 0x57, 0x68, 0xe9, + 0x14, 0x0a, 0x76, 0x15, 0xbb, 0x89, 0x99, 0x5e, 0xdf, 0x41, 0x38, 0x37, 0xcf, 0x16, 0x98, 0x2e, + 0x75, 0x59, 0xa9, 0x93, 0x5d, 0x28, 0xc7, 0xb3, 0x7b, 0x5f, 0x51, 0x5f, 0x74, 0x13, 0x3b, 0xfd, + 0x97, 0xce, 0xec, 0x79, 0xb6, 0xf0, 0xd9, 0x37, 0x25, 0xea, 0x00, 0x3f, 0x52, 0xc7, 0x69, 0x59, + 0x7f, 0x6f, 0x14, 0x5d, 0xb5, 0xe3, 0xb2, 0x58, 0xe0, 0x52, 0x00, 0xb8, 0x26, 0xf3, 0xaa, 0xbe, + 0x1c, 0x08, 0x09, 0x82, 0xd2, 0x48, 0x7c, 0x84, 0xf4, 0x01, 0xa0, 0x9f, 0xe6, 0xaa, 0xd6, 0xee, + 0xfe, 0xcd, 0x58, 0x7c, 0xed, 0xfb, 0xcc, 0xdf, 0xe8, 0x30, 0x03, 0x19, 0x6a, 0xa1, 0x75, 0x6c, + 0x22, 0xfe, 0xa8, 0x8c, 0x5b, 0x5c, 0x42, 0x19, 0x38, 0x64, 0xf9, 0x1d, 0x8a, 0x1b, 0x2e, 0xa4, + 0x81, 0xd1, 0x07, 0x19, 0xe8, 0x25, 0xf6, 0x7a, 0x7c, 0x9d, 0xc9, 0xe2, 0xd7, 0x1d, 0xcd, 0xb6, + 0xa4, 0x07, 0x53, 0x90, 0xcb, 0x59, 0x51, 0x4b, 0x58, 0x15, 0xcd, 0xfe, 0x68, 0xb8, 0x4e, 0x12, + 0x23, 0x61, 0xf6, 0x8a, 0xb3, 0x5f, 0xc3, 0x66, 0x70, 0x03, 0xd6, 0xe0, 0x27, 0x47, 0xb0, 0xd0, + 0xe9, 0x55, 0xbf, 0x87, 0x36, 0xb9, 0x39, 0x24, 0xb3, 0xe5, 0x6d, 0xc6, 0x72, 0x1b, 0x5d, 0x5d, + 0x17, 0xee, 0x7b, 0x0c, 0x62, 0xde, 0xca, 0x7d, 0xe4, 0xcd, 0xd5, 0xab, 0xf8, 0x27, 0x7b, 0x32, + 0x7f, 0x5e, 0xd9, 0x2c, 0x06, 0x5c, 0xcc, 0xb4, 0xbe, 0xd3, 0x29, 0xee, 0x2f, 0x00, 0xa9, 0x22, + 0x75, 0xd3, 0x9b, 0x1f, 0x80, 0x8d, 0x04, 0xa4, 0xeb, 0xeb, 0x09, 0x7e, 0x65, 0x4e, 0x12, 0x22, + 0x82, 0xba, 0x2d, 0x95, 0x76, 0xd1, 0xc3, 0x25, 0x0c, 0xa4, 0x58, 0x23, 0xb7, 0xe9, 0x8b, 0x92, + 0x8f, 0xcf, 0x0a, 0xb0, 0xe0, 0xe7, 0x77, 0xbf, 0x19, 0x84, 0x57, 0xc2, 0xbc, 0x3c, 0xb1, 0xa1, + 0xf8, 0x97, 0x47, 0xaa, 0x09, 0x08, 0x61, 0x0a, 0xaa, 0x55, 0x93, 0x98, 0xfa, 0x3d, 0x48, 0x38, + 0xfb, 0xfc, 0x30, 0xdf, 0x94, 0x08, 0x29, 0xa3, 0x09, 0x62, 0xf6, 0x8e, 0x3e, 0xa9, 0x83, 0xff, + 0x38, 0x2d, 0x5d, 0x45, 0xfe, 0xa3, 0xce, 0xdb, 0x9b, 0x0a, 0x5d, 0xb3, 0x8c, 0x42, 0x9f, 0x96, + 0x50, 0xef, 0xfb, 0x9c, 0xf6, 0x84, 0xcd, 0x8f, 0xbc, 0x19, 0x87, 0x6c, 0xc1, 0x95, 0xf7, 0xa6, + 0x61, 0x8b, 0xa2, 0x01, 0xcc, 0x9a, 0x16, 0x90, 0x60, 0xdb, 0xdb, 0xa9, 0x73, 0xdc, 0x71, 0xa9, + 0xf9, 0x94, 0xd0, 0x2e, 0x59, 0x4d, 0xe5, 0xeb, 0x9f, 0xdc, 0x1b, 0x97, 0xc3, 0xf9, 0xd2, 0xb3, + 0x96, 0x0c, 0x7f, 0xf9, 0xca, 0x63, 0xc7, 0x19, 0x36, 0x4a, 0x01, 0xcb, 0x7c, 0x6f, 0x1f, 0xfb, + 0x1a, 0xeb, 0xfa, 0xbc, 0xfd, 0xc4, 0x2e, 0x1f, 0xd0, 0xd2, 0x39, 0x32, 0xa6, 0x94, 0x9a, 0xdf, + 0x12, 0x98, 0x8e, 0x03, 0x83, 0xdf, 0x36, 0xaf, 0xa0, 0x11, 0x92, 0xf0, 0xcb, 0x4d, 0xb6, 0xcd, + 0x93, 0x0f, 0xab, 0xda, 0xce, 0xc4, 0x06, 0x6e, 0xf1, 0xb3, 0x70, 0x26, 0xf5, 0xfe, 0xe5, 0xc2, + 0x25, 0xa5, 0x42, 0xee, 0xbc, 0xeb, 0x69, 0xc0, 0xe3, 0xa6, 0x9e, 0x7b, 0xfe, 0x5d, 0x6c, 0x30, + 0x3c, 0x18, 0xec, 0x25, 0xae, 0x14, 0x89, 0xf2, 0x8b, 0x1b, 0xae, 0x7b, 0x2f, 0x9c, 0x8e, 0x34, + 0x54, 0xe5, 0x6a, 0x2c, 0x8c, 0xa5, 0x68, 0xa1, 0x1d, 0x96, 0x56, 0xaa, 0x90, 0x9f, 0x81, 0x73, + 0xc0, 0x9c, 0x3c, 0x6c, 0x78, 0x3e, 0x99, 0xbf, 0x48, 0xea, 0xb4, 0xcd, 0x13, 0x83, 0x81, 0x43, + 0xc8, 0xc5, 0x49, 0x20, 0x4c, 0x06, 0x3f, 0xcb, 0x20, 0x1f, 0xd0, 0x60, 0x59, 0x23, 0x13, 0xf5, + 0x9d, 0xbe, 0x22, 0xbf, 0xce, 0xcd, 0xe2, 0x91, 0x85, 0xa0, 0x26, 0xdc, 0x53, 0x43, 0xf1, 0x41, + 0x57, 0x98, 0x23, 0x96, 0x28, 0x57, 0x99, 0x34, 0xa6, 0xba, 0x3b, 0x3a, 0xb2, 0x74, 0x8b, 0x27, + 0xd0, 0xf0, 0xe0, 0x40, 0x70, 0xd8, 0x5f, 0x27, 0x9d, 0xe3, 0xfe, 0x06, 0x55, 0x52, 0x92, 0x71, + 0x4f, 0xac, 0x57, 0x04, 0x1f, 0x35, 0x63, 0xdc, 0x10, 0x21, 0x72, 0xe3, 0x7f, 0x7e, 0xd1, 0xb0, + 0x84, 0x80, 0x6a, 0xf7, 0x26, 0x21, 0xc8, 0xc1, 0xad, 0xb1, 0x02, 0x6f, 0xfa, 0xc5, 0x86, 0x30, + 0x6c, 0x2d, 0x45, 0x16, 0xcf, 0xcd, 0x9d, 0x10, 0xf2, 0xfe, 0xf0, 0xd3, 0x65, 0xed, 0x64, 0x45, + 0xb3, 0xf3, 0x3c, 0x84, 0x6e, 0x83, 0xc5, 0xd8, 0xde, 0x1c, 0x21, 0xee, 0xd7, 0x10, 0x34, 0x63, + 0x58, 0x84, 0xfb, 0x78, 0x97, 0x48, 0xd3, 0x68, 0xd6, 0x52, 0x01, 0xdf, 0xa2, 0x34, 0x04, 0x7f, + 0x8d, 0x6e, 0x47, 0x18, 0xe2, 0xe9, 0x1c, 0x69, 0x33, 0xcb, 0xbe, 0xb3, 0x68, 0x52, 0xa8, 0x41, + 0x60, 0x3a, 0x89, 0x20, 0x9b, 0xa4, 0x3a, 0x4b, 0x73, 0x4c, 0xd4, 0x8e, 0x7f, 0x14, 0x14, 0xf0, + 0x5d, 0xcd, 0x8a, 0xdc, 0x44, 0xa2, 0x65, 0xa4, 0x8a, 0x05, 0xf4, 0xf4, 0xdf, 0x31, 0xcf, 0xf0, + 0x99, 0x5d, 0x3c, 0x05, 0x5c, 0x12, 0x92, 0x2b, 0x0b, 0x92, 0xd3, 0x36, 0xb9, 0x63, 0xbf, 0xa2, + 0x0e, 0xf6, 0x5f, 0x9e, 0x33, 0x39, 0x5e, 0xf3, 0x3c, 0xca, 0x9c, 0x41, 0x0d, 0x88, 0xc6, 0x80, + 0x32, 0x57, 0x18, 0x84, 0x30, 0x71, 0x98, 0xff, 0x82, 0xf2, 0x19, 0xa9, 0x53, 0x20, 0xee, 0x9a, + 0xd4, 0xcd, 0xef, 0xd1, 0x44, 0xe1, 0x63, 0x79, 0xfb, 0x35, 0xf7, 0xbc, 0x98, 0x1f, 0xa5, 0xef, + 0x22, 0xc9, 0x26, 0xac, 0x21, 0x0c, 0x08, 0x1f, 0x6c, 0x1f, 0xb4, 0xcc, 0xb7, 0x3c, 0x64, 0x9d, + 0xd2, 0x26, 0x1b, 0x36, 0x5e, 0x33, 0xbd, 0x3a, 0x53, 0x29, 0x88, 0xa9, 0xe0, 0xf6, 0x27, 0x02, + 0xa2, 0x16, 0x5e, 0xad, 0x73, 0xb3, 0x6d, 0x67, 0x16, 0xed, 0xc0, 0x17, 0x20, 0x21, 0x77, 0x21, + 0x02, 0x3a, 0xc4, 0xa5, 0x63, 0x31, 0x23, 0x62, 0x17, 0xb4, 0x4d, 0x73, 0xf6, 0x7a, 0xee, 0xeb, + 0xc0, 0x51, 0xdd, 0x40, 0xbd, 0x1c, 0x7e, 0x49, 0xaa, 0x0f, 0xf8, 0xde, 0x02, 0x8d, 0xf3, 0x53, + 0x49, 0x31, 0x57, 0xed, 0xc0, 0x13, 0x4d, 0x89, 0x4d, 0x70, 0xad, 0xfa, 0x6d, 0x94, 0xa4, 0x22, + 0xe1, 0x11, 0xea, 0x27, 0x06, 0x2f, 0x80, 0xcf, 0xa4, 0x55, 0xe4, 0x45, 0x4a, 0xbf, 0xfd, 0x51, + 0xb2, 0xa5, 0x20, 0xc7, 0x4d, 0x5d, 0xef, 0xb4, 0xb5, 0x64, 0x90, 0xd0, 0xf1, 0xc6, 0x4e, 0x1d, + 0xc9, 0x54, 0x58, 0x97, 0xe4, 0x92, 0x81, 0xbd, 0xc7, 0xef, 0x45, 0x07, 0x86, 0x14, 0x90, 0x77, + 0x90, 0xb9, 0x68, 0x99, 0xa7, 0x17, 0x18, 0xf0, 0xfd, 0x6c, 0x5d, 0xa1, 0x93, 0x7d, 0x0a, 0xa8, + 0x6c, 0x6c, 0x18, 0x1d, 0xe9, 0xb6, 0x18, 0x39, 0x7f, 0x75, 0x71, 0x5b, 0x4d, 0x48, 0x4b, 0x1c, + 0x27, 0x27, 0xd0, 0x01, 0x26, 0x44, 0xef, 0x45, 0x10, 0x62, 0xf2, 0x63, 0xb7, 0xaf, 0x82, 0x51, + 0xbe, 0x6f, 0x54, 0x04, 0x69, 0x5e, 0x48, 0xef, 0xa7, 0x99, 0xd5, 0x9f, 0xb8, 0x7b, 0x70, 0x83, + 0xe0, 0x37, 0x86, 0x19, 0xd6, 0x0f, 0xb4, 0xe6, 0xaf, 0x5e, 0xd2, 0x02, 0x90, 0xb1, 0x3b, 0x68, + 0x71, 0xe0, 0xed, 0xc3, 0x58, 0x9a, 0xd3, 0xca, 0x9a, 0x0e, 0xe4, 0x5b, 0x09, 0xa3, 0x33, 0x3c, + 0x63, 0x4d, 0x19, 0x97, 0xba, 0xb9, 0x9f, 0x94, 0x78, 0x18, 0xa5, 0x07, 0x2d, 0x50, 0x35, 0xa5, + 0xf5, 0x56, 0x6c, 0xe9, 0x28, 0x57, 0x72, 0x83, 0x51, 0xf2, 0x6d, 0x4b, 0x2d, 0x0d, 0x06, 0x53, + 0xe4, 0x39, 0x5a, 0x9a, 0x95, 0x04, 0x8d, 0x08, 0x97, 0x68, 0x3e, 0x2c, 0x2f, 0x7f, 0x8d, 0x31, + 0x11, 0x74, 0xd1, 0x05, 0xb8, 0x2b, 0x4c, 0x0e, 0x04, 0x90, 0xe7, 0xf0, 0x3e, 0xa2, 0x23, 0x4a, + 0x7c, 0xb4, 0xa9, 0x64, 0x0f, 0x9c, 0xdf, 0x28, 0x1d, 0x0a, 0xb0, 0x8b, 0x12, 0x88, 0x5b, 0xa5, + 0x33, 0x30, 0x4c, 0xe7, 0x5c, 0x90, 0x8e, 0x8a, 0x83, 0x57, 0xb4, 0x5c, 0xc5, 0x74, 0xf5, 0x04, + 0xc3, 0xf9, 0x2a, 0xd7, 0x7d, 0x3b, 0x7e, 0xb1, 0xb4, 0xba, 0x35, 0x07, 0x94, 0x34, 0x91, 0x35, + 0x5b, 0xca, 0x4d, 0x2f, 0xe2, 0xe1, 0x96, 0x3d, 0x4c, 0xe5, 0x2a, 0x89, 0x27, 0xcd, 0xb3, 0x9b, + 0xc5, 0xf8, 0x4c, 0x01, 0x0d, 0x8c, 0x0c, 0xe6, 0x30, 0x1b, 0x8b, 0xa3, 0x7d, 0xd1, 0x49, 0x82, + 0x05, 0xe8, 0x47, 0x1f, 0xd2, 0xd1, 0x47, 0x1f, 0xa9, 0x1b, 0xdc, 0x65, 0x18, 0xfe, 0x0e, 0x48, + 0x26, 0x03, 0xae, 0x15, 0x79, 0xdd, 0xd1, 0xa6, 0x39, 0xb1, 0x8d, 0xd7, 0x8c, 0x06, 0xb2, 0x28, + 0x01, 0xf4, 0xb9, 0x19, 0xa8, 0x94, 0x3c, 0xa0, 0xa1, 0x08, 0xa1, 0x98, 0x31, 0x5c, 0xec, 0x2a, + 0x67, 0x1f, 0xd1, 0xef, 0x26, 0x37, 0x60, 0xd4, 0x3e, 0xea, 0xa0, 0x4d, 0xd6, 0xce, 0xfe, 0xe3, + 0x2c, 0xbd, 0xa5, 0x92, 0x04, 0x47, 0xbf, 0x9c, 0xb8, 0xb2, 0x57, 0x4a, 0x98, 0xda, 0x84, 0xc3, + 0x5f, 0x4c, 0x7d, 0xe4, 0xe9, 0x29, 0x83, 0xfd, 0x64, 0x34, 0x05, 0xa9, 0xfc, 0xe6, 0xe7, 0xb1, + 0x3e, 0x0d, 0xc4, 0xbf, 0xf2, 0xb2, 0x17, 0x00, 0xab, 0x1a, 0x2a, 0x8c, 0xbe, 0xf0, 0x44, 0x3e, + 0x46, 0xb1, 0x2a, 0x94, 0x1b, 0x05, 0x9d, 0x30, 0x95, 0x6a, 0x5e, 0x24, 0xa9, 0xf3, 0xd4, 0x7c, + 0x79, 0x5d, 0xf3, 0x6b, 0xdb, 0x09, 0x13, 0xa1, 0x55, 0x07, 0x7a, 0xef, 0x9e, 0x46, 0xb0, 0x91, + 0x0c, 0xcc, 0x07, 0x89, 0xa8, 0xc1, 0x63, 0x9b, 0x37, 0x17, 0x75, 0x9a, 0x42, 0x6a, 0xe4, 0x12, + 0x03, 0x96, 0x36, 0x77, 0x40, 0xf8, 0x48, 0xdc, 0x4d, 0x4d, 0x7c, 0x2b, 0x49, 0xa6, 0xa5, 0xee, + 0xef, 0x7f, 0x81, 0x92, 0x10, 0xae, 0x03, 0xb6, 0xfe, 0x81, 0x2c, 0x17, 0x23, 0x22, 0x3e, 0x07, + 0x81, 0x2b, 0xb0, 0x2d, 0x7c, 0xea, 0xbf, 0x02, 0x65, 0x92, 0x04, 0x00, 0x96, 0xb8, 0xa4, 0x0d, + 0x53, 0x55, 0x3d, 0x62, 0x1e, 0xb7, 0x0f, 0xa9, 0x7f, 0x33, 0xce, 0xd8, 0x65, 0x75, 0x0d, 0x9f, + 0x2e, 0xd6, 0x8b, 0x69, 0x52, 0xfc, 0x54, 0xc8, 0x4f, 0x10, 0x9c, 0xdb, 0xe4, 0x60, 0xd6, 0x34, + 0x64, 0x59, 0xbb, 0x35, 0xe2, 0xf9, 0xed, 0x5a, 0xe9, 0xce, 0xde, 0x92, 0x5a, 0x68, 0x96, 0x8a, + 0x89, 0x8b, 0xaf, 0xed, 0x86, 0xbf, 0x58, 0x95, 0x1f, 0x01, 0x21, 0xec, 0xdf, 0xba, 0x93, 0x20, + 0x70, 0x10, 0xd1, 0x2e, 0x6b, 0x0e, 0x2d, 0xdf, 0xc7, 0xb0, 0x2c, 0x23, 0xe0, 0x84, 0xb2, 0xbd, + 0x20, 0x8c, 0x2f, 0xcd, 0xbd, 0x49, 0x0c, 0x34, 0x7c, 0xe6, 0x54, 0x51, 0x9e, 0x00, 0xa6, 0xc6, + 0x45, 0x9c, 0x37, 0xd3, 0x01, 0x76, 0x40, 0x4a, 0x5a, 0x5c, 0xc1, 0x21, 0xbc, 0x5b, 0x51, 0x3b, + 0xec, 0x29, 0xb5, 0xae, 0xb5, 0x8d, 0xe9, 0xde, 0x2e, 0x08, 0x5d, 0xc0, 0xa6, 0xad, 0x65, 0xd1, + 0xd5, 0xfa, 0x90, 0x08, 0x04, 0xc3, 0x05, 0x52, 0x93, 0xd5, 0xbe, 0xbd, 0xd5, 0x96, 0x51, 0x85, + 0x67, 0x06, 0x1a, 0xa4, 0x50, 0xf7, 0x92, 0x90, 0xb9, 0xf7, 0x07, 0xae, 0xd0, 0x5d, 0xc5, 0xf1, + 0x79, 0x95, 0x47, 0x06, 0xdb, 0x8e, 0xfe, 0x26, 0xfe, 0xa7, 0x32, 0x34, 0x6e, 0xea, 0x72, 0x63, + 0x3c, 0x3a, 0x2f, 0x98, 0x1e, 0xbe, 0xbf, 0x49, 0x5a, 0xa7, 0x90, 0xc1, 0xe8, 0x78, 0x2b, 0x4c, + 0xea, 0x02, 0x0b, 0xf8, 0x2b, 0x2c, 0x8b, 0xab, 0xcf, 0xdc, 0x5b, 0x58, 0x23, 0xba, 0x3c, 0x68, + 0xdf, 0x5b, 0x33, 0xaf, 0xe1, 0xb5, 0xc2, 0x62, 0x62, 0x77, 0xa8, 0xf3, 0xa4, 0x6b, 0xbe, 0x2a, + 0x8f, 0x29, 0x77, 0xa4, 0x6c, 0x20, 0x5b, 0xb9, 0x65, 0x2d, 0x2a, 0xf3, 0x71, 0xd8, 0x8a, 0xd1, + 0xc4, 0x2b, 0xa7, 0x68, 0x8f, 0x19, 0x0b, 0x76, 0x21, 0x49, 0xdb, 0x17, 0x25, 0xd1, 0xc6, 0x51, + 0x4c, 0x8d, 0xc1, 0x60, 0x6d, 0x7e, 0xce, 0x38, 0x3b, 0x24, 0x65, 0xe2, 0x93, 0xf4, 0x7f, 0x36, + 0x5a, 0x48, 0x3c, 0x99, 0x96, 0xa8, 0x21, 0xb2, 0x0a, 0xf1, 0xdf, 0x3a, 0x5e, 0x26, 0x0f, 0xde, + 0x47, 0xb7, 0x61, 0xf6, 0x7d, 0xe2, 0x6a, 0x35, 0x27, 0x18, 0x05, 0x6e, 0xe9, 0x2b, 0x5c, 0x61, + 0x1b, 0x3e, 0xe2, 0x27, 0x44, 0xf9, 0xf7, 0x21, 0x51, 0x61, 0xf1, 0x48, 0x3a, 0x08, 0x6c, 0xa9, + 0xe8, 0xea, 0x55, 0xf5, 0xac, 0xb1, 0xb6, 0x3f, 0xf7, 0xf6, 0xef, 0xb8, 0x70, 0x3a, 0xf3, 0x15, + 0x24, 0xe3, 0x88, 0x02, 0x65, 0x9f, 0xf0, 0xd3, 0x90, 0xa8, 0x86, 0x78, 0x4b, 0xbc, 0x89, 0xab, + 0xdd, 0x95, 0xe1, 0x53, 0x43, 0x90, 0x39, 0xcc, 0x60, 0xb5, 0xe3, 0x17, 0xf1, 0x9b, 0x21, 0x2b, + 0xb3, 0xc1, 0x56, 0x27, 0x1d, 0xc7, 0x45, 0x34, 0x13, 0x7b, 0xe9, 0x7d, 0x2e, 0xb0, 0x19, 0xf9, + 0xe2, 0xdb, 0x6d, 0x7c, 0x86, 0xf5, 0xab, 0xe6, 0x78, 0xaf, 0xda, 0x17, 0x93, 0x31, 0xc6, 0x64, + 0x95, 0xe1, 0x57, 0x8b, 0x8e, 0x6f, 0x75, 0xad, 0xa0, 0x51, 0x87, 0xe1, 0x32, 0xce, 0xb0, 0xc8, + 0x49, 0xa1, 0x37, 0x4e, 0x46, 0x8b, 0xa7, 0x58, 0xc8, 0x10, 0x16, 0x1a, 0x4b, 0xdd, 0x25, 0x1a, + 0x1b, 0x2f, 0x94, 0x31, 0x35, 0x30, 0x02, 0x91, 0xa2, 0xc8, 0x1d, 0xb3, 0x1b, 0x0d, 0xd2, 0xf2, + 0x99, 0x03, 0x43, 0x49, 0xa5, 0xf8, 0x47, 0xf3, 0x1d, 0xa9, 0x9f, 0xba, 0x70, 0xec, 0xe1, 0xee, + 0x34, 0x9a, 0x33, 0xc8, 0xcc, 0x28, 0xeb, 0xe4, 0x12, 0x8a, 0x62, 0xd0, 0xf2, 0xd2, 0xa1, 0xec, + 0x5e, 0x58, 0x27, 0x17, 0x66, 0xec, 0xad, 0xf0, 0x73, 0x10, 0x53, 0xef, 0x00, 0xe3, 0xbd, 0x3e, + 0xeb, 0x66, 0xaa, 0xd4, 0x29, 0xf4, 0xec, 0x37, 0x1c, 0xad, 0x7a, 0x9c, 0xa5, 0x5e, 0x4b, 0x68, + 0xe6, 0xc1, 0x4e, 0xf4, 0xcd, 0x65, 0xe1, 0x92, 0x7f, 0x97, 0xad, 0x84, 0x09, 0xc8, 0x83, 0x59, + 0x2c, 0x01, 0x64, 0xf8, 0xb2, 0x7c, 0x0b, 0x12, 0x9a, 0x19, 0xca, 0xdb, 0x0d, 0x31, 0x30, 0x4d, + 0x52, 0x17, 0x9e, 0xea, 0x6e, 0x58, 0xb4, 0xe2, 0xa2, 0x00, 0xdb, 0x68, 0x50, 0x90, 0xd8, 0x10, + 0xac, 0xbe, 0x33, 0x83, 0x80, 0x95, 0x51, 0x5a, 0x33, 0xbb, 0x20, 0x6a, 0xde, 0xaf, 0x86, 0x3b, + 0x11, 0x16, 0xb4, 0x07, 0xcc, 0x7f, 0x69, 0xb7, 0xa8, 0xbc, 0x66, 0x03, 0x8e, 0x89, 0x99, 0x8f, + 0x86, 0x14, 0xa6, 0x07, 0xc9, 0xed, 0x48, 0x55, 0xb0, 0xa7, 0x5e, 0x83, 0x82, 0x28, 0xcc, 0xd4, + 0x18, 0xae, 0x67, 0xd3, 0x6c, 0xad, 0xd0, 0x0e, 0x1f, 0x35, 0x83, 0xb9, 0xd9, 0xa0, 0xba, 0xf4, + 0x97, 0x29, 0xda, 0xe7, 0x83, 0xf2, 0xad, 0xe0, 0xcf, 0x77, 0x62, 0x85, 0x93, 0x71, 0x4b, 0xf3, + 0xa7, 0x42, 0x8b, 0x9b, 0x54, 0x33, 0x3e, 0xd0, 0x3f, 0x58, 0x67, 0xa4, 0x30, 0x41, 0x38, 0x54, + 0xde, 0xd6, 0x46, 0x98, 0xaf, 0x17, 0x86, 0x6e, 0x92, 0xc2, 0x49, 0x26, 0x25, 0xee, 0xa2, 0x92, + 0xa9, 0xa7, 0xc6, 0x8d, 0x86, 0x58, 0x59, 0x81, 0x65, 0x3f, 0x2d, 0xae, 0x12, 0x34, 0xf6, 0x02, + 0xe1, 0x73, 0x0c, 0xbd, 0xf3, 0x89, 0x2a, 0xd8, 0xc3, 0x1b, 0x13, 0xb0, 0x3b, 0xb9, 0x85, 0x61, + 0x9c, 0x96, 0xdc, 0x16, 0x19, 0x47, 0x4c, 0x3c, 0xf1, 0xb1, 0xcc, 0x10, 0x7f, 0xce, 0x2f, 0x5d, + 0xc0, 0xb1, 0xf8, 0x95, 0x49, 0xde, 0xd3, 0x15, 0x55, 0x46, 0x01, 0x5f, 0xd5, 0x1b, 0xef, 0x18, + 0xce, 0x93, 0xd2, 0x2e, 0xe9, 0xdf, 0xdd, 0xa1, 0x3d, 0x3e, 0x86, 0x0f, 0x06, 0x59, 0x3d, 0x8b, + 0xd2, 0x07, 0x88, 0x75, 0x16, 0x22, 0xf9, 0xf6, 0xab, 0x5e, 0xb1, 0xc2, 0x8a, 0x45, 0xa2, 0x9c, + 0xdc, 0x0a, 0x6a, 0x29, 0xa3, 0x52, 0xe3, 0xb8, 0x99, 0xe2, 0xc8, 0x59, 0x99, 0x52, 0x1d, 0xde, + 0x5b, 0x69, 0x37, 0x90, 0xd1, 0x0a, 0x4b, 0x2f, 0xc5, 0xd5, 0x6d, 0x97, 0xea, 0xe5, 0x63, 0x4e, + 0xc2, 0x71, 0x63, 0x25, 0xdd, 0x39, 0x6e, 0x39, 0x96, 0x3d, 0xe5, 0xc5, 0xcf, 0xc3, 0xb5, 0x6a, + 0xbe, 0xde, 0x7c, 0xa6, 0xe6, 0x73, 0x19, 0xf0, 0x69, 0xa3, 0xdc, 0x05, 0x59, 0x94, 0x25, 0x9b, + 0x3b, 0x10, 0x77, 0x05, 0xc3, 0xe6, 0x50, 0xb8, 0x0b, 0x1a, 0x65, 0x7b, 0x53, 0xab, 0x92, 0x9e, + 0xeb, 0x73, 0xa3, 0x19, 0xa5, 0x07, 0x6d, 0x8f, 0x8b, 0x0e, 0x5a, 0x3a, 0xf3, 0xc5, 0xf1, 0xc8, + 0xac, 0xb1, 0x5b, 0x46, 0x45, 0x69, 0x1f, 0x63, 0x5a, 0xd9, 0xd8, 0x44, 0x7c, 0x02, 0x54, 0x39, + 0x2f, 0xf6, 0xc7, 0x97, 0x6d, 0x36, 0x66, 0xac, 0xaa, 0xbc, 0xb9, 0x48, 0xe0, 0xde, 0x1f, 0x4e, + 0x04, 0x10, 0x36, 0x97, 0xfd, 0x29, 0x56, 0x1c, 0x9e, 0x44, 0x13, 0x15, 0x30, 0x4c, 0x33, 0x81, + 0x7d, 0x09, 0x22, 0x82, 0xa3, 0x3a, 0x86, 0xa3, 0x09, 0xab, 0xbe, 0x72, 0x5f, 0xd8, 0x73, 0xfb, + 0x91, 0xf7, 0x2f, 0x75, 0x35, 0x17, 0xac, 0x41, 0xef, 0x7b, 0x2d, 0xfc, 0x75, 0x7c, 0xde, 0x17, + 0x2b, 0xdd, 0xe5, 0xc6, 0x51, 0x2a, 0x3b, 0x94, 0x76, 0x7d, 0xd1, 0x1b, 0xb2, 0x07, 0xd9, 0x61, + 0x75, 0x5d, 0x92, 0x0e, 0x33, 0x40, 0xac, 0x5b, 0x3f, 0xf9, 0xd5, 0x70, 0x7c, 0x83, 0xf1, 0x56, + 0x5b, 0xb1, 0xe7, 0x49, 0xaa, 0x4e, 0x76, 0x03, 0x6e, 0xc7, 0x9b, 0x85, 0xa0, 0xd3, 0xfb, 0x5a, + 0x5c, 0xa6, 0xdc, 0xd3, 0x99, 0xcf, 0x75, 0xc8, 0xa4, 0x8c, 0xfe, 0xc0, 0x4f, 0x91, 0xa1, 0x42, + 0x3b, 0xbe, 0x5f, 0x98, 0xca, 0x1a, 0x9f, 0xbc, 0x8b, 0xba, 0x14, 0xdc, 0xee, 0x57, 0x47, 0x2d, + 0x0d, 0x17, 0x60, 0x2a, 0x3b, 0x30, 0xfb, 0x90, 0x70, 0xc1, 0x06, 0x6f, 0x05, 0xea, 0x10, 0xc2, + 0x71, 0x3c, 0x1b, 0xc7, 0x54, 0x6a, 0xc0, 0xb1, 0xc1, 0x11, 0x2c, 0x1a, 0x4d, 0xcb, 0x15, 0x8e, + 0x8f, 0x1c, 0x0f, 0x3b, 0xec, 0x43, 0x0c, 0x81, 0x03, 0xf7, 0x6e, 0x23, 0x90, 0x1f, 0x79, 0x1f, + 0x77, 0xe3, 0x3e, 0x45, 0xac, 0xdd, 0x01, 0x2d, 0xc5, 0x13, 0x24, 0xd5, 0x94, 0x2f, 0x5c, 0x84, + 0x6b, 0xce, 0xea, 0x47, 0xcb, 0x2e, 0x2b, 0xac, 0xe8, 0xf6, 0xee, 0x28, 0xe1, 0xb1, 0x45, 0x94, + 0x95, 0xd6, 0xcf, 0x53, 0x2b, 0x83, 0x7f, 0xdb, 0x91, 0x67, 0xa8, 0xa7, 0xfe, 0x19, 0x29, 0xed, + 0x32, 0x2d, 0x6e, 0xb1, 0x6b, 0x7b, 0xb7, 0x0c, 0x68, 0xb4, 0x6e, 0x7a, 0x06, 0x4a, 0x48, 0x88, + 0xcf, 0x57, 0xcb, 0x62, 0x63, 0xa6, 0x22, 0xab, 0xb0, 0xa9, 0xfd, 0xe5, 0x19, 0xaa, 0x94, 0xde, + 0xfa, 0x28, 0x23, 0xe8, 0xe6, 0x6a, 0xc5, 0x0e, 0xf6, 0xe1, 0x03, 0xb6, 0xe8, 0xa0, 0xa9, 0x11, + 0x15, 0xbb, 0xca, 0x8c, 0xe9, 0x4e, 0xb0, 0x27, 0x20, 0xd5, 0x41, 0x7e, 0x5a, 0x73, 0x2b, 0xe4, + 0xec, 0xc7, 0x19, 0xe8, 0xb0, 0xf1, 0x95, 0x29, 0xd0, 0x83, 0x88, 0xe7, 0xd7, 0xd5, 0x65, 0x2e, + 0x22, 0xaa, 0x20, 0xd2, 0x11, 0xdd, 0x29, 0xd7, 0x51, 0xa7, 0xd4, 0xb1, 0x1f, 0x38, 0xc4, 0x87, + 0xd5, 0x41, 0xfa, 0x26, 0x5b, 0xbc, 0x2c, 0x33, 0x94, 0x7e, 0x76, 0xc2, 0xb1, 0xb5, 0x36, 0xd0, + 0x7e, 0xf5, 0x28, 0xee, 0xd4, 0x82, 0x2c, 0xf1, 0xef, 0x61, 0x2d, 0x9f, 0xa4, 0x29, 0x67, 0xee, + 0x6f, 0x59, 0xeb, 0xfb, 0x1e, 0xb9, 0xea, 0xd8, 0x45, 0x05, 0x35, 0x12, 0x32, 0x17, 0xd2, 0xfb, + 0x2c, 0x7a, 0x16, 0x51, 0xe7, 0x8b, 0xe0, 0xc3, 0x85, 0x3d, 0xf2, 0xac, 0x0b, 0x15, 0x3d, 0xb5, + 0x22, 0xf6, 0x86, 0x8a, 0x86, 0x6a, 0x87, 0xb3, 0x82, 0x96, 0x99, 0xa6, 0x9f, 0x2e, 0xf2, 0x8e, + 0xd2, 0xdc, 0x34, 0xf0, 0x8e, 0xa5, 0x4a, 0xf3, 0x24, 0xb3, 0x34, 0x02, 0x5a, 0xa8, 0xfb, 0x7c, + 0xc0, 0xbe, 0x7c, 0xb1, 0x4a, 0x82, 0x6d, 0x55, 0x85, 0x79, 0xce, 0x6f, 0x0c, 0x42, 0x3e, 0x32, + 0xb6, 0x5c, 0x14, 0xb8, 0xc5, 0xdc, 0xc9, 0x75, 0x9c, 0xda, 0x19, 0x0e, 0xed, 0x0c, 0xdc, 0xed, + 0xe6, 0x69, 0x96, 0xc7, 0xa9, 0xc6, 0x5e, 0x78, 0x60, 0x51, 0x12, 0x15, 0xe1, 0xaa, 0x34, 0x89, + 0x70, 0xb9, 0x7c, 0x4e, 0x6a, 0x5f, 0x26, 0x19, 0xe2, 0x4f, 0x5e, 0xcd, 0x4b, 0xc1, 0x97, 0xf1, + 0xfb, 0x4b, 0xc0, 0xe8, 0x66, 0xb0, 0x33, 0x95, 0x26, 0x77, 0x37, 0x1f, 0xf8, 0x32, 0x0c, 0x32, + 0xfc, 0x57, 0xe9, 0x9d, 0x06, 0x15, 0x32, 0x67, 0x95, 0xa9, 0x70, 0x2c, 0x5c, 0x97, 0xff, 0xdb, + 0x67, 0x73, 0x23, 0xdc, 0x90, 0x6f, 0x34, 0x05, 0xb7, 0x6c, 0x5a, 0xdf, 0xe1, 0xb3, 0x17, 0xc7, + 0xce, 0x27, 0xfd, 0x35, 0xd9, 0xbf, 0xbb, 0x72, 0x43, 0xf3, 0x4d, 0x98, 0x57, 0xc6, 0xf8, 0xc7, + 0x48, 0xaf, 0x48, 0x2b, 0x75, 0x13, 0xc7, 0x17, 0xaa, 0x32, 0xee, 0x2e, 0xd4, 0x0f, 0x44, 0x50, + 0x5a, 0xa8, 0x22, 0x28, 0x46, 0x16, 0xf4, 0xc0, 0x68, 0x59, 0x74, 0x70, 0x81, 0x27, 0x60, 0x61, + 0x29, 0x6e, 0x17, 0x52, 0xfb, 0x72, 0x84, 0x3b, 0x2f, 0x20, 0x32, 0x26, 0x9e, 0x3c, 0x38, 0x9b, + 0x37, 0x82, 0xff, 0xb7, 0x43, 0x73, 0xc0, 0x4f, 0x52, 0x87, 0x5a, 0x4a, 0x71, 0x25, 0xb8, 0x81, + 0xf8, 0x97, 0xd5, 0x42, 0xa2, 0x3f, 0x64, 0xb4, 0x2c, 0x8b, 0x8f, 0xf5, 0x76, 0x25, 0x2f, 0xcc, + 0x78, 0x39, 0x9a, 0x1e, 0x34, 0xf3, 0xcd, 0xb0, 0x8d, 0xc3, 0x4e, 0x41, 0x0a, 0xd7, 0x01, 0x94, + 0x31, 0x73, 0xa2, 0x1a, 0xd9, 0xe3, 0xcd, 0x01, 0x45, 0x5a, 0x26, 0xb6, 0x91, 0x1f, 0xf7, 0x06, + 0x7c, 0xf5, 0x80, 0xf5, 0x8f, 0xb4, 0xd7, 0x89, 0x1a, 0xc1, 0x8e, 0x6f, 0x37, 0x2e, 0x9d, 0x3c, + 0x5b, 0x1a, 0x62, 0x25, 0x67, 0xa7, 0xea, 0x46, 0x7e, 0x4b, 0xb2, 0x11, 0x55, 0xcf, 0x42, 0xba, + 0xb0, 0xb4, 0xd3, 0x0e, 0xf3, 0xfa, 0xf0, 0xf0, 0x10, 0x48, 0xe9, 0xb9, 0x6c, 0x9d, 0x91, 0xd8, + 0x4c, 0xde, 0xf2, 0x05, 0x24, 0xee, 0xb0, 0x32, 0x01, 0x87, 0xfe, 0x0d, 0xf0, 0xc2, 0x73, 0x84, + 0x89, 0x47, 0x24, 0xae, 0xbe, 0x10, 0xb4, 0xed, 0xd7, 0x16, 0x40, 0x93, 0x9d, 0xd9, 0x9e, 0xe8, + 0x20, 0xfe, 0x35, 0x8b, 0xff, 0xbe, 0xff, 0x83, 0xc4, 0x2b, 0x8f, 0x4f, 0x22, 0xa4, 0xa1, 0x84, + 0xad, 0xe4, 0x36, 0xe1, 0xd4, 0x2d, 0xc8, 0xbd, 0x54, 0xb0, 0xa6, 0x44, 0xa4, 0x87, 0xff, 0xbc, + 0x36, 0xb9, 0xd6, 0x08, 0x17, 0xeb, 0x3d, 0xec, 0xf2, 0x52, 0x02, 0x40, 0xfa, 0x20, 0x0a, 0x75, + 0x74, 0xee, 0x86, 0x02, 0x1a, 0xd5, 0x82, 0x0b, 0x40, 0xd1, 0x24, 0x3b, 0x8a, 0x66, 0x5f, 0x94, + 0x57, 0x58, 0xd7, 0xac, 0x57, 0xf8, 0x87, 0x32, 0x4f, 0xcc, 0xd6, 0xa1, 0xb2, 0xf2, 0xae, 0xab, + 0x8e, 0xa7, 0x20, 0x5f, 0x6d, 0xd9, 0x7f, 0xef, 0xab, 0x10, 0xb8, 0x2c, 0xc8, 0xee, 0xa1, 0xf5, + 0x09, 0x95, 0x63, 0xdd, 0x2b, 0x6f, 0x25, 0xed, 0xb2, 0xd2, 0x2a, 0x28, 0x76, 0xa5, 0xd0, 0x7c, + 0x26, 0xba, 0xba, 0x87, 0xc4, 0x30, 0x27, 0xe0, 0x80, 0x92, 0x0b, 0xda, 0x87, 0xf8, 0x0d, 0xf4, + 0x6b, 0x28, 0x6d, 0xa5, 0xfd, 0xb1, 0x26, 0x26, 0x94, 0x20, 0xc3, 0x6c, 0x94, 0xd6, 0x24, 0xb1, + 0x89, 0x8a, 0xd5, 0xe4, 0xcb, 0x68, 0x25, 0xa0, 0x68, 0x95, 0x7d, 0x22, 0x18, 0x71, 0xaa, 0x52, + 0xed, 0x5b, 0x05, 0x8b, 0xb3, 0x2a, 0x81, 0x61, 0xf7, 0x5f, 0x10, 0x4c, 0xbb, 0x79, 0x8a, 0x5c, + 0xf2, 0x06, 0xe2, 0xf8, 0xec, 0xb3, 0xc0, 0xec, 0xec, 0xfc, 0xef, 0xf9, 0x68, 0xe1, 0x90, 0xa4, + 0xab, 0x53, 0xb2, 0x61, 0xfc, 0xc9, 0xb6, 0x3a, 0x32, 0x65, 0x92, 0xe6, 0x36, 0xc0, 0x24, 0xd0, + 0xa0, 0xe6, 0x92, 0x43, 0x70, 0x94, 0xf3, 0x8c, 0x2f, 0x76, 0x97, 0x18, 0xb7, 0x20, 0xc7, 0x72, + 0xcc, 0x23, 0x8d, 0xbc, 0x87, 0x9e, 0x46, 0x1b, 0x8c, 0xbb, 0x53, 0xdc, 0xa8, 0x19, 0x52, 0x88, + 0x17, 0xfa, 0x7d, 0x95, 0xda, 0xc2, 0x2a, 0x0d, 0x3d, 0x60, 0xa3, 0xc4, 0x6f, 0x99, 0x46, 0x56, + 0x86, 0x95, 0xd0, 0x68, 0x79, 0x71, 0x3b, 0x94, 0xd9, 0x4e, 0xc8, 0xc5, 0xc3, 0x08, 0x34, 0xa4, + 0x98, 0x7f, 0xdf, 0x74, 0xe0, 0xec, 0x5d, 0xa7, 0x40, 0x43, 0xc8, 0x12, 0x82, 0x1e, 0xaa, 0x9a, + 0xce, 0x76, 0xff, 0x7d, 0xc6, 0x85, 0x73, 0xae, 0x97, 0x49, 0x29, 0xf0, 0xcc, 0xad, 0x85, 0x77, + 0x48, 0x11, 0x96, 0xf9, 0xea, 0x98, 0x56, 0x4e, 0x51, 0xd0, 0xd3, 0x37, 0x30, 0x0e, 0x24, 0x37, + 0x5a, 0xae, 0x7d, 0x06, 0x19, 0x4f, 0xf5, 0xf4, 0x78, 0xbf, 0x8e, 0x01, 0xf0, 0x60, 0xa4, 0x6e, + 0xc0, 0x73, 0x25, 0xd6, 0x62, 0xea, 0xce, 0x45, 0xb9, 0xb2, 0xac, 0xaa, 0x89, 0xcf, 0xcd, 0xb0, + 0xb7, 0xb2, 0xd6, 0xb5, 0x2e, 0x7b, 0x34, 0x4c, 0x11, 0xfa, 0x34, 0xbf, 0xb5, 0x57, 0x6a, 0x2b, + 0x13, 0x6f, 0x1e, 0xba, 0xf9, 0x6b, 0xd5, 0x8d, 0x76, 0x0c, 0x72, 0x77, 0x7d, 0xcb, 0x52, 0xe1, + 0x2f, 0x85, 0x43, 0xf8, 0x68, 0x41, 0xc6, 0x08, 0x02, 0xf8, 0xb2, 0xd1, 0x3d, 0x4f, 0x4a, 0xa4, + 0x47, 0x39, 0x4c, 0xfe, 0xd9, 0x96, 0x31, 0xa4, 0x82, 0xd5, 0x79, 0xe2, 0x8a, 0xbd, 0x2a, 0x3f, + 0x68, 0x6d, 0x53, 0xde, 0x39, 0xf0, 0x08, 0x01, 0x7f, 0x1b, 0x7d, 0xd9, 0x3c, 0x55, 0xeb, 0x26, + 0xb1, 0x95, 0x43, 0xde, 0x0f, 0xc5, 0x67, 0x85, 0xd8, 0xfa, 0x9c, 0xda, 0xdb, 0x27, 0x04, 0x86, + 0xd7, 0x6c, 0x4e, 0x59, 0xf1, 0xe9, 0x18, 0x2b, 0x3d, 0xb6, 0x13, 0xf9, 0xb9, 0x0e, 0x73, 0x83, + 0x33, 0x56, 0xff, 0xe4, 0xd3, 0xe4, 0x66, 0x46, 0x78, 0xd2, 0xe4, 0x37, 0x21, 0x59, 0x13, 0xfa, + 0x8f, 0x7e, 0x74, 0x2c, 0xaa, 0xc4, 0xaf, 0x80, 0xae, 0x3b, 0x15, 0x5d, 0x47, 0x3a, 0xa6, 0x17, + 0x38, 0x8a, 0x63, 0x6c, 0x43, 0x5b, 0x4f, 0x36, 0x03, 0xa6, 0x72, 0xdb, 0x47, 0x98, 0x20, 0xd5, + 0xbc, 0x8b, 0xde, 0x30, 0x5c, 0x43, 0x0d, 0xf4, 0x3a, 0xda, 0x0e, 0xfa, 0xb1, 0x39, 0x16, 0xf8, + 0x33, 0x3e, 0x5b, 0x7f, 0x1f, 0x17, 0x16, 0x15, 0xd9, 0x93, 0x5e, 0x1a, 0x6d, 0x02, 0x5b, 0x72, + 0x40, 0xbf, 0x10, 0xce, 0x14, 0xa7, 0x3b, 0x3f, 0x1d, 0x35, 0xaa, 0xc6, 0x1c, 0xdb, 0x96, 0x9b, + 0x52, 0x93, 0xcf, 0x05, 0x27, 0xda, 0x76, 0x4d, 0xfe, 0x9f, 0x7c, 0x9b, 0x26, 0x35, 0x96, 0xc3, + 0x16, 0x27, 0x07, 0x8f, 0x56, 0x20, 0x3f, 0xbb, 0x2c, 0xce, 0xa8, 0x19, 0xb3, 0x61, 0x35, 0xac, + 0x3f, 0xbb, 0x9b, 0x45, 0x29, 0xed, 0xe0, 0x32, 0x33, 0x25, 0xae, 0x1a, 0xe2, 0x64, 0x18, 0xb5, + 0x95, 0x95, 0x53, 0xd8, 0x68, 0x3f, 0xb5, 0x7c, 0xaa, 0xe3, 0x28, 0xc2, 0xc5, 0xc1, 0xdc, 0x6b, + 0x2f, 0xbf, 0x3c, 0x83, 0x5e, 0xa5, 0xf9, 0x39, 0xef, 0x56, 0x37, 0xab, 0x96, 0x59, 0x91, 0xfc, + 0x91, 0xe6, 0x73, 0xe6, 0xd0, 0x9a, 0x7b, 0x48, 0x2b, 0xe1, 0x88, 0x00, 0xf7, 0x3a, 0x18, 0x9c, + 0xe2, 0x03, 0x33, 0x70, 0xd5, 0x52, 0x1d, 0xe5, 0x20, 0x2a, 0xa9, 0x33, 0x44, 0x35, 0xee, 0xc8, + 0x74, 0x0a, 0xd3, 0x07, 0x4c, 0x89, 0xc5, 0x76, 0xbf, 0xd2, 0xb3, 0x56, 0x8a, 0x6e, 0x25, 0xd8, + 0xc9, 0xaf, 0xc7, 0x42, 0x34, 0xb2, 0xc1, 0xe7, 0xab, 0xaa, 0xa2, 0x43, 0xc8, 0x04, 0xbb, 0xe7, + 0x32, 0x1a, 0x6f, 0x37, 0x59, 0xc5, 0x00, 0x4a, 0x24, 0x35, 0x81, 0x28, 0xb0, 0x37, 0xea, 0xfe, + 0x9f, 0xe0, 0xda, 0xda, 0xcb, 0xa8, 0x17, 0xc5, 0x2a, 0x43, 0x45, 0x93, 0xa5, 0xfa, 0xd3, 0x7d, + 0x7e, 0x22, 0x1d, 0x83, 0x15, 0x7f, 0x65, 0x50, 0xf8, 0xb1, 0xf5, 0x49, 0x7c, 0x83, 0x37, 0xe6, + 0xc1, 0xd7, 0x20, 0x7e, 0x30, 0xaf, 0xe5, 0x2b, 0xb0, 0xdd, 0x87, 0xa1, 0x3f, 0x23, 0x18, 0x87, + 0x5f, 0x78, 0xab, 0xb1, 0x3b, 0xb4, 0xbc, 0xa6, 0x99, 0x0e, 0x95, 0xc0, 0x01, 0x9c, 0x90, 0xb6, + 0xf2, 0xd8, 0xc5, 0x3f, 0xb9, 0xf6, 0x39, 0xe0, 0x85, 0xc9, 0x39, 0xda, 0x10, 0xd4, 0x67, 0x29, + 0x60, 0x5c, 0xa6, 0x3e, 0x10, 0xb3, 0xc2, 0xf4, 0x47, 0x91, 0x2f, 0x8f, 0x0f, 0x63, 0x2f, 0xec, + 0xef, 0x9a, 0x0a, 0x99, 0xa6, 0x53, 0x89, 0xa6, 0xa5, 0x16, 0x90, 0x9f, 0xb7, 0xc7, 0x52, 0xce, + 0x37, 0x8e, 0x04, 0x26, 0x0f, 0x5e, 0x8b, 0xfc, 0xe0, 0x7d, 0x1e, 0x52, 0xe4, 0xf4, 0x1e, 0xd5, + 0xad, 0xf1, 0xe4, 0x96, 0xa7, 0x04, 0xe9, 0x21, 0x65, 0xcc, 0x47, 0x9e, 0x5e, 0x5a, 0xce, 0xd4, + 0xb5, 0xdf, 0x3d, 0xd4, 0x1a, 0xbe, 0x2c, 0xb5, 0x47, 0xd0, 0x06, 0xf4, 0xd0, 0xb3, 0xe4, 0xbc, + 0x8a, 0xc2, 0x32, 0x03, 0x23, 0x4e, 0xa7, 0x73, 0x25, 0x27, 0xa3, 0x5d, 0x07, 0x3a, 0x61, 0xa3, + 0xc8, 0x02, 0x77, 0xa2, 0x62, 0x1d, 0x9d, 0xb2, 0x10, 0x38, 0x2c, 0xba, 0x75, 0x08, 0x9d, 0xc8, + 0xb8, 0xd3, 0xa5, 0x8b, 0x30, 0xb8, 0xb9, 0xa8, 0x30, 0x0b, 0xed, 0xda, 0x7f, 0x46, 0x41, 0x8e, + 0x39, 0xd2, 0x3d, 0x13, 0x83, 0x4f, 0xe0, 0x31, 0xa6, 0x47, 0x15, 0x14, 0xba, 0x56, 0x70, 0xca, + 0x57, 0x3c, 0xf1, 0xc5, 0x35, 0xe7, 0x3d, 0x3d, 0xcd, 0x09, 0xc0, 0x94, 0x43, 0x1c, 0xc9, 0xa0, + 0x92, 0xf1, 0x5e, 0x6b, 0x07, 0x40, 0x27, 0x61, 0x76, 0xd1, 0x17, 0x78, 0x1c, 0x4e, 0x7f, 0xad, + 0x9e, 0x8a, 0xb2, 0x3b, 0x52, 0x44, 0xb2, 0x67, 0x44, 0xc7, 0x50, 0x26, 0x5d, 0xa1, 0x9d, 0xfe, + 0x2e, 0x10, 0x90, 0x11, 0x31, 0x08, 0xaf, 0x0b, 0xcc, 0xc7, 0x18, 0xfc, 0xd5, 0xc6, 0xf1, 0x47, + 0x07, 0x86, 0x7e, 0xed, 0xb1, 0x29, 0x44, 0x5b, 0x26, 0xe3, 0x7c, 0xcc, 0x40, 0xe7, 0x0c, 0x77, + 0xef, 0x4e, 0x1b, 0x64, 0x47, 0xb6, 0x58, 0x30, 0xb7, 0x93, 0xd9, 0x37, 0x71, 0x63, 0xd0, 0xc5, + 0xe2, 0x50, 0x34, 0x46, 0x45, 0x3b, 0x68, 0x31, 0xfb, 0xb1, 0xcc, 0xf7, 0xb7, 0x80, 0xbc, 0x9c, + 0x08, 0x78, 0xb0, 0xed, 0x2d, 0x91, 0x0a, 0xf7, 0x3e, 0x27, 0x04, 0x83, 0xfd, 0xb1, 0xec, 0xf3, + 0x3d, 0x99, 0x64, 0x84, 0x42, 0xe2, 0xa1, 0x46, 0x37, 0x92, 0x57, 0xd3, 0x2c, 0x8c, 0xb5, 0x27, + 0x01, 0xf2, 0xe9, 0x25, 0x76, 0xb5, 0x04, 0xa3, 0x40, 0x59, 0x1e, 0xee, 0x9b, 0x8d, 0xab, 0x31, + 0x4f, 0x20, 0xbe, 0x9f, 0x2e, 0xae, 0xda, 0xef, 0xfc, 0xeb, 0xe0, 0x9f, 0x11, 0x81, 0xf7, 0x13, + 0xa0, 0x31, 0xf7, 0xe5, 0x53, 0x6e, 0x88, 0x26, 0xa6, 0x34, 0xca, 0x9e, 0xeb, 0x69, 0x6d, 0x7a, + 0x17, 0xe8, 0x67, 0xb9, 0x2d, 0x09, 0x62, 0xbf, 0x3a, 0x00, 0x2f, 0x04, 0x20, 0xab, 0x06, 0xaf, + 0x6c, 0x92, 0xd3, 0xac, 0x05, 0x9b, 0xfa, 0xc3, 0x4a, 0xf2, 0xd0, 0xb2, 0x60, 0xf3, 0xc0, 0x88, + 0x6d, 0x0a, 0x3e, 0x4a, 0x59, 0xf8, 0x35, 0x64, 0x48, 0x85, 0x69, 0x0e, 0x47, 0xec, 0x8b, 0x66, + 0x7c, 0x03, 0x10, 0xc8, 0xf0, 0x29, 0x9e, 0xfd, 0xf9, 0x2a, 0x87, 0x2e, 0xa8, 0x97, 0x69, 0x6e, + 0x15, 0x69, 0x94, 0xcc, 0x1c, 0x81, 0x98, 0x60, 0x54, 0xde, 0x94, 0x20, 0xa2, 0xcf, 0xb4, 0xf2, + 0x25, 0x1e, 0x34, 0xc1, 0xa4, 0x4f, 0x0a, 0x59, 0x7f, 0x44, 0xd5, 0xc0, 0xb4, 0xd3, 0xa3, 0xfe, + 0xd6, 0xae, 0xe9, 0xef, 0x78, 0x93, 0x24, 0x1e, 0x7c, 0x09, 0xa8, 0x12, 0xd0, 0xca, 0x82, 0x9c, + 0x14, 0x8e, 0xd3, 0xc9, 0x62, 0xc8, 0xb7, 0x0c, 0x6e, 0x14, 0x6f, 0xfd, 0x26, 0x36, 0xbc, 0x9a, + 0xb1, 0x72, 0x2e, 0x22, 0x05, 0xed, 0x9b, 0x12, 0x20, 0xfb, 0x11, 0xa2, 0x19, 0xc1, 0x2d, 0xe1, + 0xe3, 0x5c, 0xba, 0x57, 0x62, 0xf5, 0x27, 0x59, 0x9c, 0xea, 0x18, 0xeb, 0xd3, 0x03, 0x68, 0x8e, + 0xad, 0xbb, 0x74, 0xe2, 0xfe, 0x49, 0x1f, 0xf0, 0xb1, 0xa0, 0xf4, 0x84, 0x89, 0x84, 0x2f, 0xd1, + 0x7e, 0x2e, 0xc2, 0x5f, 0x04, 0x38, 0xff, 0x90, 0x7b, 0x1e, 0x7c, 0x1f, 0x12, 0xe3, 0x96, 0x5f, + 0xf4, 0xc8, 0x58, 0x96, 0x4c, 0x3a, 0x32, 0xb6, 0xb5, 0xb7, 0x0d, 0x98, 0x2d, 0x07, 0x0f, 0x75, + 0xc1, 0x30, 0x01, 0x07, 0x13, 0x65, 0xb3, 0xbd, 0xd3, 0xb1, 0xff, 0x07, 0x80, 0x65, 0x42, 0xcd, + 0x42, 0x32, 0xb2, 0x31, 0x5b, 0x56, 0x8f, 0xfa, 0x7c, 0xb5, 0xd1, 0x8e, 0x7b, 0xa9, 0x48, 0x3c, + 0xce, 0xcf, 0x6d, 0xb9, 0xd0, 0x18, 0x6f, 0x09, 0xd3, 0xd9, 0x51, 0x30, 0x73, 0x89, 0xae, 0x5f, + 0x77, 0x3c, 0x7b, 0x4d, 0xb6, 0x8b, 0xb2, 0xf8, 0x26, 0xe5, 0x7c, 0x6f, 0x92, 0xe1, 0x70, 0x11, + 0x27, 0xaf, 0x4c, 0xc7, 0x92, 0x4e, 0x3d, 0x15, 0x40, 0xa6, 0xa9, 0x39, 0x4c, 0x3b, 0x7c, 0x1a, + 0xc5, 0xb0, 0x97, 0x79, 0xff, 0xdd, 0x82, 0xcb, 0x00, 0xe6, 0xcd, 0xdd, 0x1d, 0x15, 0xf9, 0x54, + 0x3c, 0x53, 0x57, 0x4d, 0x0b, 0x32, 0x9d, 0x0e, 0x90, 0xfa, 0x7b, 0x92, 0x10, 0x82, 0xac, 0x85, + 0x6b, 0x99, 0x6d, 0x26, 0x75, 0xd8, 0xca, 0xc2, 0x24, 0xd8, 0xdf, 0x07, 0x0a, 0xce, 0x52, 0xd7, + 0xcf, 0xd0, 0xfb, 0x60, 0xd1, 0xba, 0x6e, 0x3d, 0x06, 0x79, 0xc8, 0x88, 0xc8, 0xcb, 0xdb, 0x3d, + 0x25, 0xb0, 0xbc, 0xd1, 0xa5, 0xea, 0x2b, 0xbb, 0x1d, 0xd9, 0x7e, 0x4c, 0x7a, 0xd8, 0xa4, 0x59, + 0x8e, 0x15, 0x09, 0x12, 0x2a, 0x1c, 0x0f, 0x83, 0x9e, 0xcb, 0xb4, 0x02, 0x29, 0xd2, 0x70, 0x34, + 0x33, 0x39, 0x3c, 0xb5, 0xbc, 0x58, 0x36, 0xb7, 0x79, 0x23, 0x7b, 0x3e, 0xe1, 0x83, 0xe0, 0x22, + 0x88, 0xdc, 0x09, 0x69, 0xb3, 0x5a, 0x8e, 0xdc, 0x25, 0xff, 0x17, 0x34, 0x85, 0x33, 0x10, 0xe2, + 0xbd, 0x89, 0x32, 0x11, 0xf5, 0xfb, 0x20, 0xfa, 0xc4, 0x78, 0x53, 0x46, 0xd9, 0x44, 0x1f, 0x5c, + 0x5f, 0xd8, 0xc1, 0xc9, 0x74, 0xce, 0x02, 0xa0, 0xea, 0x92, 0xe8, 0x75, 0x8c, 0xcf, 0x27, 0x4d, + 0x26, 0xe5, 0x6c, 0x13, 0x9c, 0x63, 0x8b, 0xa6, 0xd6, 0xf9, 0xc8, 0x1e, 0xb3, 0xe0, 0x40, 0xc8, + 0x8d, 0x35, 0xd0, 0x69, 0xaa, 0x37, 0x59, 0x06, 0xde, 0x5e, 0x8b, 0x1b, 0xf8, 0x36, 0xd1, 0x0c, + 0xc8, 0x2a, 0x93, 0xfb, 0xa3, 0xc7, 0xd4, 0xe3, 0xa6, 0x86, 0x4b, 0xbe, 0x67, 0x50, 0x61, 0x14, + 0xec, 0x5d, 0x5e, 0x40, 0xdd, 0x76, 0x83, 0xad, 0x5c, 0xc1, 0xca, 0x4f, 0x0f, 0x02, 0xc7, 0xa8, + 0x78, 0x3d, 0x46, 0x22, 0x2a, 0x27, 0x74, 0xff, 0x96, 0x57, 0x6f, 0x77, 0x8a, 0xea, 0xcb, 0x71, + 0x2f, 0x87, 0x00, 0x16, 0x34, 0x8c, 0xbb, 0xca, 0xbf, 0x7c, 0x00, 0x36, 0xdf, 0xf3, 0x7e, 0x51, + 0xf2, 0x87, 0x18, 0x4c, 0xb4, 0x33, 0xb0, 0x23, 0x18, 0x23, 0xbb, 0xdb, 0xfd, 0x07, 0x42, 0xc9, + 0x48, 0xad, 0xb0, 0x75, 0x08, 0xc6, 0xe1, 0x4f, 0x8c, 0x18, 0x54, 0x22, 0x92, 0xca, 0x2a, 0xfb, + 0x0b, 0xaf, 0x64, 0xeb, 0x17, 0xb0, 0xd0, 0x32, 0x07, 0xdf, 0x7b, 0xe8, 0x83, 0xbe, 0x0f, 0xac, + 0x32, 0xa0, 0xaf, 0xf8, 0x5f, 0x10, 0x5b, 0x6e, 0x6a, 0xda, 0x58, 0xd7, 0xf1, 0x2e, 0xdc, 0x31, + 0x92, 0xc5, 0xab, 0x38, 0x1b, 0x92, 0x77, 0x19, 0x31, 0xf8, 0x07, 0xf6, 0x58, 0x24, 0x65, 0x38, + 0x4d, 0x76, 0xc3, 0xa5, 0x49, 0x2d, 0x29, 0xaf, 0x34, 0x79, 0xde, 0x56, 0x7d, 0xf5, 0x59, 0xb7, + 0xe4, 0xa3, 0x33, 0xbc, 0xee, 0xfd, 0xcd, 0x53, 0x7e, 0x56, 0x50, 0xa6, 0x0e, 0xf3, 0xda, 0x63, + 0xdb, 0xe5, 0xb7, 0xaa, 0x21, 0x8f, 0x86, 0xde, 0x7d, 0x01, 0x6c, 0x53, 0x01, 0xf4, 0xfa, 0xfa, + 0xf8, 0xe2, 0x54, 0xd5, 0x22, 0x7b, 0x16, 0xc5, 0xa3, 0x7e, 0xe0, 0xa8, 0x7e, 0x63, 0xe3, 0x9f, + 0x48, 0x69, 0x7f, 0xcc, 0xce, 0x6e, 0x24, 0x04, 0xa3, 0xc7, 0xd1, 0xad, 0x25, 0x71, 0x3b, 0xcb, + 0x79, 0x69, 0x17, 0xd9, 0x31, 0xfe, 0x00, 0x74, 0x66, 0x03, 0x02, 0xd7, 0xbc, 0xb1, 0x78, 0xcd, + 0x32, 0x90, 0xe5, 0x79, 0xb2, 0xe8, 0xed, 0x83, 0x3b, 0x40, 0x8a, 0x70, 0x74, 0x19, 0x94, 0x01, + 0xe6, 0xb1, 0xb8, 0x39, 0x9d, 0xa7, 0xad, 0x17, 0xae, 0xf0, 0xd0, 0x53, 0x02, 0x1e, 0x9a, 0xa2, + 0x3f, 0xbc, 0xef, 0xe0, 0x2e, 0x9f, 0x83, 0x3b, 0x20, 0xde, 0xcd, 0x94, 0xf6, 0xed, 0x48, 0x6f, + 0x1f, 0x27, 0x35, 0x6c, 0x88, 0xc8, 0x5d, 0x7a, 0x28, 0xfc, 0x9d, 0xb5, 0xa2, 0x8e, 0xc0, 0x9e, + 0x16, 0x58, 0xa9, 0xcc, 0x5a, 0x43, 0xc7, 0x84, 0x32, 0x85, 0x1c, 0x29, 0x85, 0x72, 0x96, 0xf3, + 0x93, 0xb4, 0x1c, 0xf6, 0x08, 0x63, 0x8f, 0xb7, 0xa4, 0xa6, 0x9d, 0x8a, 0x91, 0x17, 0xd9, 0x6c, + 0x1b, 0x45, 0xed, 0x03, 0x48, 0xdb, 0x92, 0x1e, 0x11, 0xc8, 0x9a, 0x07, 0x23, 0x4c, 0x5f, 0xc8, + 0xab, 0x45, 0x05, 0x9e, 0xba, 0x9b, 0x2e, 0xaf, 0x46, 0xce, 0x57, 0xe7, 0x5a, 0x73, 0xeb, 0x0a, + 0xbb, 0xf5, 0xc8, 0xdb, 0x7f, 0x16, 0xc9, 0x3f, 0xc8, 0x6b, 0x2d, 0x4d, 0xe8, 0x04, 0x79, 0xd3, + 0x53, 0x38, 0x3f, 0x64, 0xf9, 0xf1, 0x2d, 0x9e, 0x71, 0xce, 0x66, 0xa2, 0x64, 0xbd, 0xb1, 0x5c, + 0xaf, 0x18, 0xd9, 0x81, 0x0d, 0x63, 0xf1, 0x7b, 0x75, 0xbd, 0x26, 0x9c, 0x03, 0xa6, 0xee, 0x2c, + 0x7b, 0x86, 0xe1, 0x03, 0x56, 0x88, 0x2b, 0x53, 0x35, 0xe7, 0xac, 0x70, 0xd9, 0x52, 0xfe, 0x76, + 0x7e, 0x9f, 0xf2, 0xec, 0x83, 0xb8, 0xe9, 0x04, 0xd9, 0x33, 0xfe, 0x0a, 0x7f, 0x85, 0x59, 0x78, + 0xa4, 0x6f, 0x91, 0xd7, 0x37, 0x16, 0xdf, 0xd5, 0xd1, 0xa2, 0x47, 0xeb, 0x77, 0xe5, 0x6a, 0xa3, + 0xe5, 0x61, 0x61, 0x88, 0x5f, 0xeb, 0x1f, 0x1f, 0x7c, 0x95, 0x08, 0x40, 0xa6, 0x2e, 0xd5, 0xfa, + 0x38, 0x6c, 0x94, 0xe1, 0x82, 0x54, 0x03, 0xee, 0x1a, 0x03, 0xe3, 0xb3, 0x7e, 0x23, 0x40, 0x63, + 0x6f, 0x2d, 0xb8, 0x46, 0x2b, 0xd4, 0x6f, 0xbc, 0x84, 0x6c, 0xc0, 0x28, 0x80, 0x85, 0x81, 0x7b, + 0x86, 0x81, 0xef, 0x99, 0xf8, 0xad, 0x4b, 0x57, 0xb9, 0x24, 0xe7, 0x8d, 0xd0, 0x7d, 0x91, 0x13, + 0x18, 0xdd, 0xdd, 0x6a, 0xbf, 0x6c, 0x59, 0xb6, 0xfb, 0x9a, 0xa8, 0x21, 0x0c, 0x39, 0x5c, 0x58, + 0x14, 0xa1, 0x35, 0xa2, 0x63, 0xc7, 0xac, 0xfa, 0x46, 0xca, 0x26, 0x99, 0x0c, 0x82, 0xd7, 0xe9, + 0x65, 0xb9, 0xdd, 0x2a, 0xe9, 0x1f, 0xa1, 0xc5, 0xc2, 0xf6, 0x08, 0x6a, 0xaf, 0x9f, 0xe7, 0x40, + 0x27, 0x47, 0x7c, 0x28, 0xb3, 0x59, 0xb4, 0x77, 0x51, 0x80, 0xb4, 0x91, 0x08, 0x57, 0x58, 0xd3, + 0xb9, 0xa1, 0x16, 0x4a, 0x32, 0xc4, 0x3a, 0x07, 0x38, 0x77, 0x34, 0x6c, 0xaa, 0xb7, 0xad, 0x8a, + 0xe1, 0x04, 0x91, 0xaf, 0x4d, 0xf6, 0x6a, 0xf5, 0x2d, 0x1d, 0xca, 0xdb, 0x29, 0xc8, 0xd7, 0x12, + 0x27, 0x17, 0xd0, 0x45, 0x59, 0x90, 0xd7, 0xcf, 0x10, 0x7d, 0xaf, 0xa1, 0x9a, 0x1c, 0x14, 0x89, + 0x93, 0x96, 0x7b, 0xe9, 0x8f, 0x9a, 0xfb, 0x02, 0xd4, 0x7f, 0x05, 0x61, 0xd0, 0x50, 0x01, 0xc0, + 0x39, 0x15, 0x59, 0xde, 0xfc, 0x1a, 0xb5, 0x71, 0x13, 0x1c, 0x3c, 0xb3, 0x35, 0x65, 0x84, 0xb3, + 0xe3, 0xc7, 0xb0, 0x2b, 0x99, 0x1e, 0x90, 0xab, 0x95, 0x73, 0xa1, 0xa4, 0x82, 0xb8, 0xa4, 0x33, + 0xfb, 0x7e, 0x47, 0xab, 0x72, 0x3e, 0x5b, 0xe5, 0x63, 0x10, 0xd2, 0x18, 0x87, 0x29, 0x1c, 0xc2, + 0x7f, 0x66, 0x06, 0x84, 0x74, 0xfa, 0xaa, 0xd0, 0x5d, 0x9d, 0xd9, 0xd2, 0x48, 0xf1, 0x10, 0x6a, + 0x92, 0x6a, 0xed, 0xa1, 0x0b, 0x3d, 0xf0, 0xe2, 0xc4, 0x08, 0xfd, 0x92, 0x66, 0xd3, 0x73, 0x71, + 0x52, 0xf4, 0x24, 0x9f, 0xaa, 0xaf, 0x20, 0xaf, 0x15, 0x14, 0xd4, 0xde, 0xd6, 0xe5, 0xa1, 0xf9, + 0x49, 0x16, 0x97, 0x7d, 0xfe, 0x21, 0x6f, 0x31, 0xaa, 0xea, 0x02, 0xb6, 0xd7, 0xc9, 0xb2, 0x51, + 0x7e, 0xf0, 0x2f, 0x37, 0xf6, 0xdf, 0x91, 0xff, 0x94, 0x33, 0x8e, 0x21, 0xc9, 0xa5, 0x43, 0x27, + 0x0a, 0x95, 0x38, 0x1f, 0x6a, 0xfa, 0xa4, 0x11, 0x9f, 0x89, 0x5e, 0xd3, 0xe5, 0x19, 0x92, 0xb4, + 0xf7, 0xbb, 0x53, 0x26, 0xc7, 0x10, 0x44, 0x29, 0x0e, 0xf4, 0x9a, 0xa1, 0x7d, 0x62, 0x92, 0xfa, + 0xcc, 0x4a, 0xf0, 0xa6, 0x34, 0x14, 0x5a, 0xd3, 0xda, 0x0f, 0xef, 0x7e, 0xb3, 0x94, 0x6f, 0x0f, + 0x34, 0x7e, 0x3e, 0x8e, 0x84, 0x7f, 0xc1, 0x87, 0x11, 0x46, 0xbd, 0x14, 0x83, 0x8c, 0x3f, 0x03, + 0x08, 0x78, 0x23, 0xa1, 0x75, 0xd3, 0x25, 0xaf, 0x33, 0x9a, 0xa7, 0xb3, 0x7c, 0x3c, 0xf8, 0x8e, + 0xc0, 0x19, 0x3b, 0xaa, 0xf2, 0xa3, 0xc7, 0x68, 0xe1, 0xdf, 0x15, 0x7b, 0x44, 0x92, 0x07, 0x53, + 0x5c, 0x8a, 0xdb, 0x04, 0x86, 0xcd, 0x33, 0xea, 0xeb, 0xea, 0xcc, 0x45, 0x02, 0x16, 0xe7, 0x51, + 0x88, 0x1a, 0xd9, 0x73, 0x43, 0xb8, 0x09, 0x22, 0xbf, 0xcd, 0x7a, 0xf0, 0x46, 0xdc, 0x2a, 0xe5, + 0x45, 0x6c, 0x31, 0xa7, 0xde, 0x41, 0x24, 0xdf, 0x52, 0x03, 0xf1, 0xe2, 0x7f, 0x73, 0x47, 0xd1, + 0x2c, 0x76, 0x0b, 0x7a, 0x0e, 0xd7, 0x86, 0xd5, 0x23, 0x3b, 0x83, 0x6d, 0x2a, 0x4b, 0x4b, 0x3f, + 0x61, 0x76, 0x99, 0x3a, 0xc9, 0x06, 0xfd, 0x7c, 0x31, 0xbb, 0x97, 0xed, 0x3f, 0xa7, 0xf1, 0x2e, + 0x2c, 0xc6, 0xce, 0xb9, 0x6f, 0x30, 0x43, 0x79, 0x36, 0x8b, 0x91, 0x0b, 0x49, 0x95, 0x3a, 0x16, + 0x86, 0x1a, 0xf7, 0x3f, 0xdb, 0x40, 0x6f, 0xe5, 0xf2, 0x78, 0x46, 0x52, 0x7a, 0x70, 0x6a, 0x9c, + 0x1c, 0x83, 0x74, 0xeb, 0xb6, 0x73, 0x89, 0x17, 0x09, 0xad, 0xc0, 0x9c, 0xac, 0xfd, 0x6c, 0x2e, + 0x6d, 0xca, 0x75, 0xcc, 0x02, 0xda, 0x10, 0x34, 0x59, 0xf5, 0x10, 0x64, 0x9c, 0x97, 0xf7, 0x9e, + 0x22, 0xa2, 0xa4, 0x0d, 0xed, 0x3c, 0xfd, 0x55, 0xc1, 0xf2, 0xf0, 0xc2, 0x71, 0x23, 0x81, 0xd8, + 0x11, 0xe4, 0x61, 0xbb, 0xe1, 0x9a, 0xfc, 0x8b, 0x9f, 0x22, 0x6f, 0x85, 0x8a, 0xe6, 0x00, 0xc1, + 0x2a, 0xa7, 0x54, 0xc0, 0x5f, 0x9e, 0xbe, 0x1c, 0xb6, 0x75, 0x9f, 0x3f, 0x0b, 0xdc, 0xd7, 0x4f, + 0xd7, 0x2d, 0xf0, 0xfb, 0x4d, 0xaa, 0xda, 0x14, 0x66, 0x68, 0xcc, 0xc8, 0x85, 0x8d, 0xe8, 0x8b, + 0x3a, 0x23, 0x54, 0x38, 0xf9, 0x5b, 0x25, 0x8c, 0xd0, 0x20, 0xd5, 0x28, 0x0b, 0x5b, 0x78, 0xa9, + 0xd3, 0xa2, 0xa9, 0x87, 0x5f, 0x1b, 0x1f, 0xc6, 0xe5, 0x1f, 0xf8, 0xd7, 0xd3, 0xa5, 0xe9, 0x18, + 0xc4, 0xf0, 0xf2, 0x06, 0x07, 0xb2, 0xe5, 0xf9, 0xe5, 0xbd, 0x43, 0x03, 0x09, 0x22, 0x1a, 0xb8, + 0xc1, 0x7f, 0x76, 0x8b, 0x97, 0x67, 0xfd, 0xe6, 0x28, 0xca, 0xa8, 0x92, 0x48, 0x76, 0xf8, 0x21, + 0x6f, 0x25, 0x1a, 0x14, 0x35, 0x8b, 0xc2, 0x1d, 0xed, 0x89, 0xca, 0x0d, 0xd8, 0xa3, 0x17, 0xbf, + 0x0e, 0x95, 0xce, 0x64, 0x3a, 0xb6, 0x93, 0xa9, 0x20, 0xae, 0xbd, 0xfa, 0xab, 0x51, 0x84, 0x49, + 0x47, 0x95, 0x50, 0x33, 0x9d, 0xa3, 0xa9, 0xc2, 0x89, 0xf9, 0xee, 0x00, 0x06, 0xa2, 0x27, 0x24, + 0x26, 0x05, 0xdf, 0x8f, 0x65, 0x43, 0xce, 0x02, 0x5a, 0xaf, 0x04, 0xa1, 0x3d, 0xbe, 0xe9, 0x99, + 0x5c, 0xa3, 0x88, 0x7a, 0x51, 0xdc, 0x98, 0x37, 0x6e, 0xc5, 0xb3, 0x18, 0x9e, 0x94, 0x87, 0x28, + 0x88, 0x60, 0x14, 0x87, 0xd5, 0xae, 0x11, 0x86, 0x0e, 0xcb, 0xf1, 0x8f, 0x3c, 0xa5, 0xee, 0x64, + 0x0a, 0xc8, 0xc5, 0x1f, 0x71, 0x2b, 0x89, 0x7b, 0x3e, 0xfa, 0x03, 0xf0, 0xb1, 0x5f, 0x44, 0x91, + 0xb0, 0x39, 0xce, 0x83, 0xc2, 0xce, 0x8f, 0x58, 0x16, 0xaf, 0x0a, 0x6e, 0x64, 0x4f, 0x56, 0xb1, + 0xdf, 0x3e, 0xc0, 0x93, 0xde, 0x55, 0x5f, 0xcb, 0xbc, 0x09, 0x2b, 0x30, 0x36, 0xa5, 0x0c, 0xed, + 0xa3, 0x64, 0xb6, 0x06, 0x98, 0xbb, 0x7e, 0x94, 0xa0, 0x99, 0x82, 0xf5, 0x5c, 0x5a, 0x0f, 0x71, + 0x49, 0xf0, 0x61, 0x58, 0x6c, 0xa7, 0x39, 0xff, 0xe3, 0xc5, 0x49, 0x56, 0x37, 0x46, 0x77, 0x57, + 0x85, 0xaf, 0x88, 0xab, 0x64, 0xf4, 0xc5, 0x16, 0xf2, 0x6f, 0x78, 0x37, 0xef, 0x36, 0x6e, 0xfd, + 0xe6, 0x8a, 0xab, 0xeb, 0x05, 0x6f, 0xae, 0xf1, 0x68, 0x33, 0xcf, 0xb2, 0xf9, 0xd4, 0x77, 0x57, + 0xbd, 0x0d, 0x43, 0x6b, 0x0a, 0xdc, 0xff, 0x89, 0x84, 0x70, 0x9a, 0x99, 0x70, 0xb5, 0x40, 0x7b, + 0x11, 0x82, 0x31, 0x1c, 0x06, 0x39, 0x70, 0x1a, 0xac, 0xe7, 0x24, 0x89, 0xea, 0x89, 0x56, 0x92, + 0x5f, 0x00, 0x8f, 0x1b, 0x14, 0xcc, 0xc0, 0x76, 0x7c, 0xbf, 0x9f, 0x64, 0x97, 0x88, 0x12, 0xca, + 0x00, 0x6f, 0x3b, 0x88, 0x0e, 0x8b, 0x45, 0x6c, 0xaf, 0x3f, 0x4c, 0xa0, 0x09, 0xb8, 0x2e, 0x51, + 0x5b, 0xa1, 0x46, 0xb4, 0x97, 0xb3, 0xf2, 0x10, 0x4e, 0x24, 0x84, 0x86, 0xba, 0xd0, 0x9f, 0x63, + 0x4f, 0x8b, 0x29, 0x90, 0xc2, 0x73, 0xfa, 0x6d, 0x58, 0xf5, 0x76, 0x04, 0x10, 0xf2, 0xa0, 0x15, + 0x02, 0xb4, 0x1e, 0x78, 0x43, 0x76, 0xb5, 0xb2, 0x27, 0x53, 0x88, 0xb3, 0x71, 0x52, 0xfb, 0x70, + 0xf0, 0x9f, 0xe6, 0x76, 0x5e, 0x15, 0xc6, 0x7b, 0x56, 0xd2, 0xfd, 0xba, 0xc4, 0x54, 0x44, 0x48, + 0xb4, 0x9a, 0x2c, 0x90, 0xc3, 0x82, 0x40, 0x85, 0x18, 0xad, 0xcb, 0x5f, 0x14, 0xa9, 0xda, 0x5b, + 0xfa, 0xc0, 0x72, 0x99, 0x89, 0x5b, 0x68, 0xd6, 0xc2, 0xbf, 0x62, 0x45, 0x8a, 0xbe, 0xfa, 0x35, + 0xc0, 0x67, 0xd1, 0x99, 0x28, 0xf0, 0x86, 0xbb, 0x3b, 0x56, 0xb7, 0xab, 0x02, 0x75, 0x89, 0x4a, + 0x2c, 0x2e, 0xae, 0x1e, 0x99, 0xa6, 0xbc, 0x8f, 0xfa, 0x89, 0x07, 0x93, 0xb1, 0x78, 0x9c, 0x79, + 0x0c, 0xf0, 0x3b, 0xc7, 0x22, 0xfd, 0x3c, 0x48, 0x3d, 0xa8, 0x3b, 0xd7, 0x05, 0xc0, 0x02, 0xd8, + 0x8a, 0x5c, 0xc2, 0x6c, 0xed, 0xb0, 0xf1, 0x8e, 0xb4, 0x43, 0xef, 0xdc, 0x7f, 0x9c, 0xe4, 0xa6, + 0x10, 0x86, 0x7e, 0x6e, 0x07, 0x74, 0x86, 0x21, 0xc9, 0xf8, 0x80, 0xea, 0x30, 0xb9, 0x4d, 0xeb, + 0xc8, 0x82, 0x20, 0xc2, 0x6f, 0xab, 0x20, 0xd3, 0xb0, 0xfc, 0x44, 0xe5, 0xe4, 0x4b, 0x3a, 0x97, + 0x43, 0x15, 0x10, 0xcb, 0x7e, 0x07, 0xdb, 0x85, 0xbc, 0x08, 0x97, 0x99, 0xcd, 0xe3, 0x86, 0x71, + 0xce, 0x17, 0x22, 0x68, 0xdb, 0x4c, 0xf8, 0xca, 0x92, 0x7d, 0x38, 0xbc, 0x74, 0x99, 0x31, 0x21, + 0x73, 0xa9, 0x33, 0x30, 0x76, 0x1c, 0xf8, 0x02, 0xaa, 0xfa, 0x25, 0xda, 0x34, 0x9a, 0xa7, 0xba, + 0x6b, 0x78, 0xc5, 0xb5, 0x5f, 0xdc, 0x4b, 0x4b, 0xb1, 0xd9, 0x74, 0x59, 0x94, 0xfb, 0x99, 0xfe, + 0x60, 0x19, 0x00, 0x5c, 0xd9, 0x0a, 0xbd, 0x21, 0xdf, 0x52, 0x8f, 0x96, 0x05, 0xa6, 0x33, 0x99, + 0x6c, 0x1e, 0xeb, 0xe4, 0x05, 0x45, 0xed, 0x6d, 0x52, 0xfa, 0xcd, 0x3a, 0x62, 0xf3, 0x6e, 0x42, + 0x2c, 0x67, 0xcb, 0x63, 0xad, 0x73, 0xe3, 0xd6, 0x23, 0xb0, 0xdd, 0x67, 0x2d, 0x0a, 0x54, 0x45, + 0x4e, 0x02, 0x97, 0xf7, 0x01, 0xe6, 0x7d, 0x49, 0x92, 0xea, 0x2f, 0x2d, 0x9c, 0x41, 0x03, 0xff, + 0xb9, 0x94, 0xfd, 0x09, 0x68, 0x99, 0xaa, 0x49, 0xb1, 0x36, 0xdf, 0x64, 0x6f, 0x8f, 0x5c, 0xf9, + 0x01, 0x66, 0xea, 0x93, 0xb9, 0x54, 0x28, 0x93, 0xce, 0x0a, 0x01, 0xdf, 0xb6, 0x46, 0x4d, 0x78, + 0x0c, 0x1f, 0x46, 0xe4, 0x63, 0x57, 0x9d, 0x7d, 0x28, 0x34, 0x38, 0x0f, 0x0f, 0x9d, 0x07, 0x2c, + 0x16, 0x63, 0x0b, 0xa1, 0x96, 0xea, 0xae, 0xe6, 0x8e, 0xe4, 0x82, 0xb1, 0x63, 0x64, 0x1b, 0xdb, + 0x73, 0x33, 0xba, 0x1d, 0xe0, 0x58, 0x41, 0xe3, 0x3c, 0x36, 0xdc, 0xdf, 0xe6, 0xa8, 0xc8, 0x59, + 0xe9, 0xc8, 0x81, 0x0a, 0xa5, 0x34, 0xa7, 0x95, 0xd8, 0xff, 0x81, 0x24, 0xec, 0x52, 0xb3, 0xe9, + 0x8f, 0xa0, 0xd2, 0x7b, 0x72, 0xc9, 0xe3, 0x1f, 0xbb, 0x72, 0xf0, 0x41, 0x7a, 0x8c, 0xb6, 0x46, + 0x77, 0xa1, 0xd7, 0x9b, 0x23, 0x02, 0x81, 0x04, 0xe9, 0x86, 0x6f, 0xa5, 0xb3, 0x0c, 0x25, 0x33, + 0x57, 0x21, 0x5a, 0x1d, 0xfc, 0x0a, 0x35, 0xcc, 0x28, 0x40, 0xd1, 0xc7, 0xba, 0x91, 0xf3, 0x7e, + 0x64, 0xb6, 0xc4, 0xc6, 0xe4, 0x3f, 0x64, 0x99, 0x9c, 0xd0, 0xcd, 0x35, 0xf9, 0xb9, 0xdd, 0xaf, + 0x8e, 0xbe, 0x91, 0x89, 0xa1, 0xde, 0xca, 0x4d, 0x57, 0xb8, 0x78, 0x44, 0x02, 0xf1, 0x01, 0x55, + 0xff, 0x0c, 0xbe, 0xfa, 0xe2, 0xa7, 0x90, 0x9f, 0x29, 0xeb, 0xa7, 0xd2, 0x43, 0x60, 0x58, 0x47, + 0x92, 0xb8, 0x52, 0xe5, 0xc8, 0x4c, 0x3d, 0xf7, 0x9f, 0x9c, 0x18, 0x0b, 0xc8, 0xb6, 0x82, 0xcd, + 0x7c, 0x00, 0xf3, 0xce, 0x9e, 0x7c, 0x7f, 0x21, 0xdf, 0x16, 0x30, 0xe5, 0x68, 0x38, 0x84, 0xad, + 0x2b, 0x84, 0x1d, 0xd9, 0xab, 0x97, 0xe5, 0xa1, 0xac, 0xc1, 0xde, 0x66, 0x91, 0x3c, 0x9e, 0x12, + 0x29, 0xce, 0xb2, 0xa7, 0xdf, 0x20, 0xc0, 0xb1, 0x8f, 0xf1, 0xbe, 0xa6, 0x8b, 0xcf, 0x69, 0xff, + 0xa9, 0x50, 0x3a, 0xbe, 0xf3, 0xf7, 0x8d, 0x28, 0x35, 0x66, 0x91, 0x97, 0xc7, 0xec, 0x9c, 0x4b, + 0x35, 0xb4, 0xe6, 0x2e, 0xf6, 0xd0, 0xd7, 0xcf, 0xda, 0x36, 0xbf, 0xa3, 0x18, 0x51, 0x17, 0xa4, + 0x78, 0x7c, 0x5b, 0xa4, 0xdb, 0x27, 0x6e, 0x72, 0x25, 0x74, 0x10, 0xc7, 0xf2, 0xa8, 0x74, 0x60, + 0x09, 0x1c, 0x18, 0x98, 0x35, 0xe5, 0x58, 0x88, 0xf7, 0xe1, 0x03, 0xca, 0x0f, 0xb6, 0x85, 0xe6, + 0x42, 0xdc, 0x22, 0x9c, 0xdc, 0x2e, 0x47, 0x54, 0x2d, 0x4a, 0xe0, 0xb3, 0x6e, 0x40, 0x86, 0xa6, + 0x2b, 0x0a, 0xf9, 0x38, 0x23, 0x0c, 0x63, 0xfc, 0x6b, 0x01, 0x4d, 0x53, 0x1c, 0xbe, 0x81, 0x83, + 0xc9, 0x9b, 0x61, 0x57, 0xfd, 0x27, 0xd7, 0x02, 0x54, 0xfa, 0x11, 0x89, 0xbe, 0xad, 0xed, 0xef, + 0xed, 0x54, 0x34, 0x96, 0x65, 0xf4, 0x2c, 0x16, 0x3c, 0xa8, 0x5a, 0x21, 0x31, 0x95, 0x31, 0x46, + 0xe5, 0x62, 0x52, 0xe0, 0x09, 0x72, 0x6c, 0xf2, 0x27, 0xd1, 0xe1, 0xee, 0xdf, 0x70, 0x8c, 0x66, + 0x7e, 0xbb, 0x96, 0x36, 0x4b, 0xb0, 0x62, 0x32, 0x73, 0x96, 0x00, 0xf4, 0x45, 0xc2, 0x7c, 0x25, + 0xc9, 0x1f, 0x1f, 0xd5, 0x37, 0x44, 0x1b, 0x77, 0x75, 0x31, 0xeb, 0xf6, 0x3c, 0xa0, 0x03, 0xff, + 0xf1, 0xc8, 0xe3, 0xc0, 0x83, 0x8f, 0xfc, 0xc8, 0x9b, 0xc6, 0xb2, 0x24, 0xc1, 0x34, 0x50, 0xa3, + 0x8e, 0xa3, 0xae, 0x70, 0x33, 0x21, 0x15, 0xa9, 0x04, 0x54, 0xc8, 0xa3, 0xe8, 0x3b, 0x6d, 0x67, + 0x7f, 0x3a, 0xa9, 0x92, 0x1e, 0x3a, 0x83, 0x76, 0x10, 0x70, 0xd2, 0x66, 0x78, 0x3c, 0x52, 0x10, + 0x88, 0x87, 0xaf, 0xb8, 0x25, 0xa5, 0xf3, 0x45, 0xa4, 0x78, 0x7e, 0x21, 0xef, 0x5d, 0x23, 0x44, + 0x8b, 0x52, 0xee, 0xe8, 0x62, 0xa1, 0x5d, 0x8b, 0xda, 0xfe, 0x82, 0xa0, 0xdf, 0xbe, 0x02, 0xbc, + 0xa1, 0xf5, 0xe0, 0x07, 0xb2, 0xf6, 0xef, 0x51, 0xdd, 0x18, 0xac, 0x09, 0x46, 0x4c, 0x16, 0x9b, + 0xd7, 0x29, 0xd5, 0x5c, 0x78, 0xd1, 0x21, 0xea, 0x3f, 0xa5, 0x9f, 0x9f, 0xb6, 0x64, 0x9c, 0xf2, + 0x69, 0x29, 0x09, 0xc2, 0xcb, 0x4a, 0xdf, 0x7f, 0x7a, 0x85, 0x03, 0x26, 0xac, 0xe4, 0x12, 0xf7, + 0xe7, 0x89, 0x50, 0x5b, 0x44, 0x92, 0xee, 0x83, 0x9a, 0xbb, 0x73, 0x58, 0xf2, 0x9b, 0xc6, 0x06, + 0xef, 0x59, 0xee, 0x45, 0x3f, 0xee, 0xa6, 0x55, 0x4c, 0xe5, 0x65, 0x78, 0x8f, 0xf3, 0xfd, 0x71, + 0x12, 0x07, 0xa7, 0x98, 0x25, 0xf1, 0x11, 0xdf, 0x68, 0x8c, 0x3e, 0x16, 0xf3, 0xe8, 0xf6, 0x99, + 0x9d, 0xe4, 0x43, 0x75, 0x1b, 0xa5, 0xb7, 0x47, 0x7e, 0xc1, 0x3f, 0xea, 0x8d, 0xf3, 0xc7, 0xf5, + 0x08, 0x45, 0x02, 0xf4, 0x4d, 0x7d, 0x28, 0x0e, 0x22, 0xc3, 0xf6, 0x03, 0x11, 0x4c, 0x5d, 0xb9, + 0x6d, 0x06, 0x88, 0x3a, 0x87, 0x59, 0xfb, 0x4e, 0x12, 0x09, 0x91, 0xdb, 0x61, 0x51, 0x8a, 0x7c, + 0xad, 0xec, 0x76, 0xb8, 0xe8, 0xd2, 0x4b, 0x1b, 0x92, 0xff, 0xfc, 0x5a, 0xb5, 0xba, 0x18, 0xad, + 0x87, 0x5c, 0x86, 0xc3, 0x0d, 0xb1, 0x33, 0x9b, 0x75, 0x18, 0x44, 0xe7, 0x08, 0xa1, 0x87, 0xf8, + 0xf6, 0x58, 0xc9, 0x2a, 0xf8, 0xb1, 0xdd, 0x7f, 0xd2, 0x86, 0xc0, 0xf7, 0x4b, 0xbb, 0x40, 0x51, + 0x16, 0xb4, 0x64, 0x3c, 0x63, 0xc4, 0x99, 0xc7, 0x58, 0x25, 0x42, 0x62, 0xf5, 0x84, 0x87, 0x3c, + 0xd8, 0x87, 0xe5, 0x12, 0x7e, 0x6a, 0x79, 0x7a, 0xa6, 0xb7, 0x45, 0x70, 0xe2, 0x2c, 0x62, 0xfc, + 0xbc, 0x6a, 0x36, 0x24, 0x1d, 0xf8, 0x51, 0x78, 0x3a, 0x35, 0x43, 0x70, 0xc6, 0x1c, 0xee, 0x83, + 0xd8, 0x21, 0x7e, 0xe8, 0xe1, 0xa3, 0x44, 0xd4, 0x5a, 0x84, 0xf4, 0x82, 0x08, 0xde, 0x95, 0xd3, + 0xd3, 0x0c, 0xc3, 0xd5, 0x79, 0xe1, 0x8e, 0x8a, 0xe1, 0x48, 0x24, 0xb4, 0x73, 0xd7, 0x7a, 0xf4, + 0x2a, 0xda, 0xf0, 0x66, 0xa3, 0x61, 0x9f, 0x50, 0x1c, 0x17, 0x89, 0x29, 0x2a, 0x63, 0x86, 0x8c, + 0x68, 0x1b, 0x93, 0x74, 0x5a, 0x09, 0x25, 0xa1, 0x93, 0x09, 0xe4, 0x31, 0xbe, 0x7d, 0x5e, 0xbb, + 0xc8, 0x89, 0x3d, 0x44, 0x91, 0x9c, 0xf0, 0x76, 0x58, 0x26, 0xa4, 0xc5, 0x1b, 0x8e, 0x45, 0xf9, + 0x7a, 0x6c, 0x75, 0x55, 0xe1, 0x26, 0x7c, 0x91, 0x84, 0xa3, 0xd3, 0xd9, 0x62, 0x48, 0x06, 0xc0, + 0x87, 0xad, 0xdf, 0x35, 0x44, 0x9d, 0x45, 0x0e, 0x25, 0xd5, 0x4d, 0x14, 0x98, 0xc7, 0x59, 0xf7, + 0xb4, 0xe5, 0x22, 0x5c, 0x73, 0x97, 0x52, 0x95, 0x23, 0x9a, 0xc0, 0x6e, 0xeb, 0xf6, 0x6a, 0xc2, + 0xf1, 0x95, 0x53, 0xfa, 0x3f, 0x9f, 0x42, 0xc3, 0x79, 0x42, 0x25, 0x15, 0xed, 0x02, 0xc4, 0x48, + 0x99, 0xd2, 0x42, 0xbf, 0x66, 0xcf, 0x23, 0xe8, 0x56, 0xfe, 0x16, 0x56, 0x0e, 0x84, 0x30, 0x26, + 0x2a, 0x4e, 0xb5, 0xb0, 0x96, 0x1a, 0xa9, 0xce, 0x4c, 0xad, 0x1b, 0x29, 0xa8, 0xe8, 0xcb, 0x44, + 0x37, 0x70, 0xa1, 0x8b, 0x9a, 0xcf, 0x3a, 0x96, 0x99, 0xf1, 0xc6, 0xbf, 0x24, 0x8c, 0xc0, 0xee, + 0x94, 0x2f, 0x03, 0x8e, 0x89, 0xaa, 0x0a, 0xed, 0x0e, 0x41, 0x89, 0x95, 0x74, 0xf1, 0x66, 0x8e, + 0xc4, 0x2c, 0x0e, 0xfa, 0x51, 0x25, 0x88, 0xb2, 0x34, 0xea, 0x69, 0x16, 0x59, 0x81, 0xa7, 0x9d, + 0xc1, 0x0c, 0x00, 0xe2, 0x21, 0xb9, 0x25, 0x9b, 0x69, 0x8d, 0x3e, 0x66, 0xa8, 0x39, 0x37, 0x5d, + 0xeb, 0x62, 0xb6, 0x38, 0xb6, 0x05, 0x3d, 0x80, 0x48, 0x24, 0x01, 0xde, 0x56, 0x60, 0x8d, 0x50, + 0xb1, 0x73, 0xb2, 0x74, 0x94, 0x45, 0xfe, 0xf8, 0x67, 0x1f, 0x16, 0x3c, 0xf4, 0xe7, 0x1a, 0xd5, + 0x82, 0xcc, 0x3b, 0x97, 0x25, 0x33, 0xe2, 0x83, 0xc7, 0xd8, 0x82, 0x27, 0xf6, 0x53, 0x4c, 0x79, + 0x25, 0x16, 0x52, 0xdc, 0xf3, 0x20, 0x0b, 0x08, 0x86, 0x4f, 0x6a, 0xe6, 0xcd, 0xe6, 0x12, 0x14, + 0x82, 0xfa, 0x58, 0x6d, 0x09, 0x9f, 0xc1, 0x9e, 0x70, 0xa5, 0x8c, 0x30, 0x27, 0xea, 0x66, 0xa9, + 0x6e, 0x4a, 0x88, 0x75, 0x92, 0x01, 0x5e, 0x9f, 0xf6, 0x09, 0x3b, 0xf7, 0x6e, 0x5f, 0xd7, 0x69, + 0x07, 0x48, 0xf2, 0x96, 0x97, 0x8c, 0x6f, 0x04, 0xea, 0x93, 0xba, 0xdf, 0x2e, 0x4d, 0xf0, 0x88, + 0x71, 0xad, 0x73, 0xce, 0xcc, 0x0c, 0xa3, 0xda, 0x15, 0x12, 0x3a, 0x25, 0x92, 0xab, 0xf2, 0x80, + 0x7a, 0x52, 0x93, 0x00, 0xb1, 0x08, 0xde, 0x06, 0x55, 0xd2, 0x14, 0xfc, 0xca, 0x81, 0x77, 0x45, + 0xe2, 0x63, 0x16, 0x07, 0x9c, 0xb6, 0xcb, 0x15, 0xe7, 0x0f, 0x44, 0xaf, 0x5d, 0x57, 0x1c, 0xfd, + 0x40, 0x2d, 0x1f, 0xb3, 0x6f, 0x5e, 0x11, 0xc7, 0xea, 0x0f, 0x28, 0x27, 0xf0, 0xf5, 0x47, 0x9a, + 0xa3, 0x3c, 0xc3, 0xdd, 0xd4, 0xdc, 0xe0, 0xd6, 0xfa, 0x90, 0x1f, 0xd7, 0xd6, 0x6f, 0xa1, 0x50, + 0x80, 0xd9, 0x53, 0x89, 0x40, 0xe8, 0xe0, 0xd6, 0x11, 0x6b, 0x0a, 0x17, 0x0d, 0xce, 0x99, 0x27, + 0x68, 0x14, 0xdb, 0x0a, 0x25, 0x6f, 0xa3, 0x1c, 0x93, 0xf5, 0x4a, 0x80, 0x3b, 0x1a, 0xea, 0xcc, + 0xe1, 0xd6, 0xb7, 0x27, 0xa3, 0x02, 0xc6, 0x09, 0x05, 0xcc, 0x39, 0xfd, 0x4d, 0xfb, 0x99, 0x6a, + 0x0f, 0x83, 0x6b, 0x16, 0x74, 0xf6, 0xe5, 0x3a, 0x3a, 0xcf, 0x55, 0x7a, 0x43, 0x72, 0x02, 0x94, + 0xff, 0x45, 0x81, 0x2b, 0x26, 0xa6, 0x4e, 0x2d, 0xc6, 0x07, 0x85, 0x12, 0x2d, 0x5e, 0x58, 0xf2, + 0x11, 0x76, 0x89, 0xf9, 0xa9, 0x4a, 0x31, 0x35, 0x0b, 0xd1, 0x96, 0x55, 0x12, 0x08, 0x75, 0xab, + 0x41, 0x44, 0x2e, 0xdd, 0x68, 0xd8, 0x26, 0xc2, 0xd8, 0xbc, 0x99, 0x37, 0xed, 0x26, 0x07, 0xb4, + 0xec, 0x37, 0x74, 0xfe, 0x64, 0xd5, 0x95, 0xe3, 0xd8, 0x7c, 0xa2, 0x3b, 0x62, 0xfe, 0xe8, 0xd0, + 0x5c, 0x21, 0x36, 0x9c, 0xb4, 0xd9, 0x2d, 0xf3, 0x1d, 0xc6, 0xb1, 0xd7, 0x9a, 0x19, 0x12, 0x68, + 0x07, 0x9c, 0xc8, 0xff, 0x4e, 0xfa, 0x9e, 0x13, 0xd3, 0xbf, 0x61, 0xce, 0x4f, 0x2e, 0x6a, 0x06, + 0x9e, 0x0d, 0xc4, 0xd3, 0xfd, 0xed, 0x09, 0xfd, 0x45, 0x82, 0xf5, 0x40, 0x1f, 0x3e, 0x8e, 0xa0, + 0xda, 0xff, 0x8c, 0x68, 0x7f, 0x48, 0x99, 0x27, 0x15, 0x19, 0xec, 0x0c, 0x65, 0x0c, 0x79, 0xca, + 0x59, 0x44, 0x07, 0x5d, 0xdb, 0x00, 0x51, 0x59, 0x90, 0x49, 0xd0, 0x9f, 0xca, 0x81, 0xc1, 0x22, + 0xff, 0xef, 0xfa, 0x7a, 0x4c, 0xbc, 0x32, 0xbc, 0x88, 0x78, 0x3a, 0x76, 0x23, 0xff, 0x74, 0xdb, + 0x7c, 0xc4, 0x6a, 0xe9, 0x26, 0x0a, 0x1f, 0xe3, 0x53, 0x40, 0x06, 0xa3, 0xf1, 0xf1, 0x02, 0x25, + 0x37, 0xad, 0xb9, 0x10, 0x99, 0x5e, 0xd6, 0x40, 0x96, 0xa3, 0x92, 0x4b, 0xcf, 0x6a, 0x87, 0xaf, + 0x55, 0x5b, 0xf3, 0xf2, 0xc5, 0x1e, 0xdb, 0xb2, 0x8e, 0xb6, 0xf5, 0x5b, 0xe0, 0xd5, 0xd3, 0x35, + 0x13, 0x2f, 0xb1, 0xf4, 0xdf, 0xfa, 0x92, 0x58, 0xa9, 0x61, 0x2a, 0x50, 0xeb, 0xee, 0x88, 0x04, + 0x55, 0x70, 0xae, 0x3b, 0xcd, 0x82, 0xf7, 0x40, 0x3c, 0x45, 0xd2, 0x14, 0x1b, 0x18, 0xc9, 0xc2, + 0x61, 0x65, 0x5b, 0xd7, 0x0a, 0x06, 0x16, 0xba, 0xd6, 0xb2, 0x67, 0xf2, 0x7e, 0x63, 0xda, 0xb4, + 0x33, 0x6e, 0xf1, 0xfd, 0xcd, 0x3d, 0x82, 0x98, 0xc3, 0xb1, 0x84, 0x00, 0xf7, 0xdd, 0x6b, 0x89, + 0x52, 0xab, 0x43, 0xe7, 0xd7, 0xf6, 0xb6, 0x69, 0x38, 0xc1, 0xdc, 0x59, 0xd3, 0xce, 0xe9, 0x75, + 0x40, 0xee, 0x60, 0x16, 0x2b, 0x75, 0x8b, 0x32, 0xdf, 0x4f, 0x27, 0x00, 0x03, 0x64, 0x17, 0x89, + 0x3b, 0x7b, 0x35, 0xbb, 0x84, 0x0c, 0x96, 0x0e, 0x8f, 0x2d, 0xd9, 0xd5, 0x2f, 0x9a, 0xa4, 0x69, + 0x91, 0xcc, 0xd2, 0x04, 0xfa, 0xb9, 0x75, 0x9f, 0x8f, 0xb6, 0x64, 0x6c, 0xdf, 0x00, 0xbd, 0x04, + 0x36, 0x38, 0xc0, 0x4c, 0xc2, 0x2f, 0x15, 0x83, 0x17, 0x2e, 0x3a, 0x9f, 0x3a, 0x86, 0xe5, 0x2b, + 0xd3, 0x9f, 0x88, 0xd6, 0x31, 0x4a, 0x3b, 0xf1, 0xb7, 0x6d, 0xfb, 0x9d, 0xec, 0xd1, 0x8f, 0x44, + 0x0b, 0x2d, 0xc1, 0xa2, 0x92, 0x1c, 0x54, 0x11, 0x3b, 0xee, 0x10, 0x72, 0xde, 0x78, 0x07, 0x47, + 0x78, 0xb6, 0x39, 0x0e, 0x5b, 0x98, 0x4a, 0xa4, 0x51, 0x41, 0x1b, 0x0c, 0x59, 0xc2, 0xc1, 0x31, + 0x60, 0x3b, 0xb8, 0xdc, 0xd1, 0x99, 0x9c, 0xd5, 0xb1, 0xaa, 0xcf, 0xea, 0x7d, 0xf4, 0x70, 0x3e, + 0x86, 0x3e, 0x17, 0x90, 0xd5, 0x26, 0x86, 0x87, 0x70, 0xe2, 0xb8, 0x17, 0x01, 0x65, 0xab, 0x82, + 0xa7, 0xa0, 0x83, 0x9f, 0xea, 0x84, 0x33, 0xd5, 0x34, 0x29, 0x32, 0x8e, 0xf8, 0x0e, 0x2d, 0x56, + 0x63, 0xd2, 0x0f, 0x84, 0x05, 0xcc, 0xbb, 0x92, 0x00, 0xcc, 0x22, 0x9c, 0x2c, 0x59, 0xfa, 0xde, + 0x08, 0x0a, 0xd8, 0x9a, 0x62, 0x6b, 0x8e, 0x99, 0x8e, 0x77, 0xce, 0x1d, 0xba, 0x39, 0x30, 0x1a, + 0xa7, 0x62, 0x5b, 0xb9, 0x1b, 0x00, 0xe1, 0x08, 0xf7, 0x08, 0x17, 0xff, 0xc8, 0xab, 0xbb, 0xd2, + 0x08, 0xa4, 0x86, 0x24, 0xaa, 0x98, 0x91, 0xee, 0x1d, 0x8b, 0x0c, 0xf8, 0x15, 0x53, 0x7b, 0xcd, + 0x21, 0xdf, 0x1c, 0x5f, 0xc4, 0xcb, 0x82, 0x6a, 0xd5, 0x5c, 0xb7, 0x58, 0x2d, 0x97, 0x1a, 0xba, + 0x69, 0x9e, 0xef, 0xca, 0xf3, 0x6e, 0x02, 0xb5, 0x24, 0x55, 0x8f, 0x29, 0xb5, 0x27, 0xa0, 0x55, + 0xb9, 0x8d, 0xa5, 0x10, 0x2f, 0x60, 0xf9, 0x5d, 0x2a, 0x87, 0x67, 0xb1, 0xf4, 0x34, 0xda, 0x31, + 0xfd, 0x33, 0x6c, 0xcd, 0x39, 0xb4, 0xcd, 0xc7, 0x0d, 0x0b, 0x46, 0x6a, 0x20, 0x1e, 0x98, 0x9b, + 0x16, 0xd4, 0xb4, 0x20, 0xdf, 0x59, 0xcc, 0xd6, 0x0d, 0x72, 0x95, 0x6e, 0x97, 0xe2, 0x68, 0xe6, + 0x0e, 0x82, 0x76, 0xe9, 0xde, 0xea, 0x3c, 0x34, 0x55, 0x38, 0x37, 0x2d, 0x24, 0x6e, 0xa6, 0x84, + 0x9c, 0xa0, 0x10, 0x99, 0xc6, 0x89, 0xeb, 0xb7, 0x75, 0x57, 0x0f, 0xb2, 0xc5, 0x52, 0x2f, 0x10, + 0x0d, 0xb5, 0xad, 0xee, 0xb8, 0x94, 0x8c, 0xc7, 0x0e, 0x7a, 0x27, 0xcd, 0x13, 0xbe, 0x0f, 0x02, + 0x42, 0x8f, 0x5f, 0xc8, 0xb6, 0x66, 0xe1, 0xdc, 0x43, 0x38, 0xf3, 0xc1, 0x05, 0x50, 0x71, 0xe1, + 0xe5, 0xb3, 0x49, 0xef, 0x28, 0x86, 0x27, 0x96, 0x30, 0xd1, 0xe9, 0x65, 0x60, 0xfb, 0x26, 0xab, + 0xa9, 0x7b, 0x1c, 0x02, 0x4d, 0x23, 0x15, 0xb4, 0xd1, 0x33, 0x8c, 0x5a, 0x5a, 0x55, 0x5f, 0x42, + 0x08, 0x64, 0x60, 0x9f, 0xca, 0x19, 0xb7, 0xa3, 0x85, 0x01, 0x41, 0xfa, 0x51, 0x56, 0x2f, 0x48, + 0x7b, 0x97, 0x47, 0x00, 0xe3, 0x6e, 0x6c, 0xe6, 0x88, 0x3e, 0xaf, 0xf2, 0x2d, 0xee, 0xa8, 0x21, + 0x16, 0x35, 0x01, 0xe4, 0xfd, 0xc2, 0xda, 0xf2, 0x12, 0xaf, 0x42, 0x0d, 0xd6, 0xe3, 0xc1, 0x96, + 0x6b, 0x98, 0x0a, 0xff, 0xf5, 0xa0, 0x3d, 0x10, 0x13, 0xdf, 0x95, 0xb9, 0xb0, 0xda, 0x7a, 0xc5, + 0x6e, 0xd3, 0xec, 0xec, 0x34, 0x09, 0x3d, 0x5f, 0x29, 0x72, 0xc9, 0x46, 0xf7, 0x4c, 0xf9, 0x47, + 0xd8, 0xe5, 0xc4, 0x5d, 0x26, 0x44, 0x35, 0xdd, 0x37, 0xc6, 0xe4, 0xb3, 0x0f, 0xb3, 0x1e, 0x2e, + 0xc9, 0x07, 0x43, 0x5f, 0xcf, 0x90, 0x0e, 0xf1, 0x7a, 0x01, 0x66, 0x0a, 0x48, 0x6a, 0xdb, 0x9b, + 0x32, 0x50, 0x5c, 0x78, 0xa3, 0x5a, 0x0d, 0xbc, 0x6b, 0xcf, 0x95, 0x16, 0xff, 0xc3, 0xd1, 0x55, + 0xfd, 0x26, 0x38, 0xce, 0xa5, 0xe9, 0xbd, 0xd6, 0x8c, 0xba, 0x64, 0xef, 0x4e, 0xd5, 0x9e, 0x75, + 0x59, 0xed, 0x07, 0xf7, 0x8f, 0x6e, 0x3f, 0x1d, 0x94, 0x41, 0xb4, 0xd4, 0xa0, 0x03, 0x5e, 0xbd, + 0x3f, 0xcb, 0xd8, 0x9a, 0xb9, 0x10, 0xd5, 0x48, 0x93, 0xa9, 0x73, 0x22, 0xe1, 0x9d, 0x0e, 0x7d, + 0x58, 0x3c, 0xf7, 0x52, 0x71, 0x87, 0x52, 0x88, 0x5b, 0xf1, 0x47, 0x64, 0x99, 0x7f, 0x67, 0x98, + 0x5d, 0x28, 0x20, 0x1f, 0xf2, 0xab, 0x3d, 0x1f, 0xd7, 0x7e, 0x41, 0x8b, 0x78, 0x86, 0xd3, 0xe7, + 0x12, 0xd8, 0x07, 0x3a, 0x3f, 0xb2, 0xc4, 0xac, 0x31, 0xbc, 0x86, 0x1e, 0xbb, 0xde, 0x36, 0x3a, + 0xcf, 0x4a, 0xf2, 0xb6, 0x93, 0x0e, 0x3a, 0x86, 0x0c, 0x19, 0xf6, 0xba, 0x1b, 0xa5, 0x97, 0xbd, + 0x3d, 0xfd, 0x1a, 0x7e, 0x96, 0xa1, 0xa3, 0xe1, 0x88, 0xaa, 0xc8, 0x93, 0x98, 0x80, 0x12, 0x23, + 0x8c, 0xdf, 0xac, 0x69, 0x88, 0xbe, 0xc6, 0x29, 0xdf, 0x8b, 0x0c, 0x54, 0x16, 0x86, 0x05, 0x6b, + 0x7c, 0xf8, 0x43, 0x84, 0x28, 0xa7, 0x1d, 0x5e, 0x37, 0x9d, 0xd2, 0xf5, 0x1c, 0x53, 0x3f, 0x26, + 0xbf, 0x16, 0x64, 0x9a, 0xb3, 0x92, 0x4e, 0xba, 0x6b, 0x1d, 0x67, 0xb4, 0x81, 0x5f, 0x51, 0xb1, + 0xdf, 0x66, 0x47, 0xe7, 0xa4, 0xf4, 0xb6, 0x00, 0xa1, 0x6f, 0x65, 0x1c, 0x2c, 0x7a, 0x9d, 0x4d, + 0xa9, 0x63, 0x01, 0xba, 0x0e, 0xfc, 0xf0, 0x87, 0xff, 0x50, 0xe1, 0x87, 0xd0, 0x29, 0x9e, 0xf3, + 0xe7, 0x42, 0x71, 0xd7, 0x23, 0x2f, 0x58, 0x4b, 0x09, 0xc7, 0x51, 0x97, 0xd1, 0x7f, 0x8a, 0x54, + 0x7d, 0xbe, 0xf9, 0x05, 0x6b, 0x60, 0x48, 0x5d, 0xcd, 0xd1, 0xaa, 0x6d, 0xe5, 0x06, 0x98, 0x4e, + 0x4b, 0x51, 0x16, 0x2a, 0x1f, 0x65, 0xb4, 0x0b, 0x06, 0x21, 0xfd, 0xa1, 0x02, 0xc6, 0x57, 0x49, + 0x38, 0x05, 0xd9, 0xe6, 0x6d, 0x78, 0xd7, 0x1c, 0xd6, 0x2b, 0x90, 0x9a, 0xdd, 0x14, 0x73, 0x37, + 0xce, 0xd2, 0x4d, 0xe6, 0x2d, 0x0c, 0xca, 0x35, 0x6d, 0xe3, 0xd8, 0xbf, 0x8b, 0x1b, 0xb5, 0x7f, + 0xc8, 0xfa, 0x62, 0x9d, 0xf1, 0x86, 0x82, 0xbc, 0xea, 0xa5, 0x68, 0x61, 0x61, 0x5e, 0xfd, 0x32, + 0xb6, 0x97, 0x5b, 0x1a, 0xed, 0x0b, 0x5a, 0x74, 0xb4, 0xa3, 0xa0, 0x49, 0x36, 0x95, 0x11, 0x7b, + 0x08, 0xee, 0x2b, 0xcc, 0xe7, 0xb3, 0x07, 0xc6, 0x11, 0x95, 0x80, 0xb8, 0xfc, 0xad, 0xc4, 0x5b, + 0xf0, 0x0c, 0xbf, 0x1e, 0x5a, 0x03, 0xf4, 0x71, 0x53, 0x66, 0x18, 0xf6, 0x83, 0xe3, 0x7a, 0xf6, + 0xb0, 0x54, 0x40, 0xde, 0xbc, 0x2d, 0x99, 0xa2, 0xe9, 0x48, 0x3b, 0x27, 0x8a, 0xeb, 0x53, 0x17, + 0x63, 0xee, 0xa9, 0xd5, 0xc2, 0x78, 0xf6, 0x98, 0x39, 0x09, 0xd8, 0xae, 0x49, 0x1d, 0xfd, 0x45, + 0x1c, 0x83, 0xea, 0xb9, 0x05, 0x48, 0xf4, 0x84, 0xe7, 0x4f, 0x07, 0x9f, 0x5b, 0x59, 0x23, 0xad, + 0x74, 0x60, 0x85, 0x12, 0xd3, 0xe7, 0xab, 0xb6, 0x23, 0x11, 0x48, 0x67, 0x29, 0x8d, 0x31, 0x35, + 0x7a, 0x26, 0x68, 0x29, 0x0a, 0x1a, 0xc0, 0x12, 0x62, 0x52, 0x91, 0x7a, 0x15, 0xef, 0xe8, 0xc5, + 0x7b, 0x44, 0x25, 0xb1, 0x84, 0x81, 0x21, 0xd0, 0x2f, 0xc0, 0x87, 0x07, 0x42, 0x8e, 0xc7, 0x02, + 0x9b, 0xc5, 0xf6, 0x7b, 0x46, 0xc8, 0xdb, 0x3f, 0x67, 0xc7, 0xdc, 0x05, 0xbe, 0x83, 0x93, 0x78, + 0xf7, 0xb4, 0xbd, 0xfa, 0x80, 0xff, 0xc3, 0x8a, 0x1e, 0xa7, 0x0e, 0x54, 0x80, 0xbf, 0x08, 0x9e, + 0xe4, 0xaa, 0x2b, 0x67, 0xb8, 0xda, 0x89, 0x56, 0xfc, 0x2e, 0xbb, 0xc4, 0x55, 0x5d, 0xf9, 0xfd, + 0xf7, 0xd3, 0xbd, 0x42, 0xc4, 0x2d, 0x9d, 0xb8, 0x2f, 0xe0, 0x85, 0xde, 0xe1, 0x96, 0xe6, 0xdc, + 0x0e, 0xb1, 0x11, 0xc8, 0xf1, 0x0d, 0xc6, 0xb6, 0x8b, 0x5c, 0xd2, 0x21, 0x3e, 0x80, 0xe2, 0x98, + 0x98, 0x7b, 0x67, 0x04, 0xd0, 0xfb, 0x38, 0xe9, 0x9a, 0x77, 0x3f, 0x83, 0x29, 0x62, 0x5a, 0x1d, + 0xb1, 0xc6, 0xba, 0xb6, 0x2c, 0x61, 0x37, 0x53, 0xed, 0x4d, 0x93, 0x0b, 0xb5, 0x93, 0xef, 0xa1, + 0xc0, 0xdc, 0x53, 0xac, 0x64, 0x0f, 0xba, 0x14, 0xb4, 0xf9, 0xe4, 0xc7, 0x7c, 0x03, 0x98, 0x61, + 0x80, 0x4f, 0x15, 0xd6, 0xde, 0x29, 0x94, 0x89, 0x52, 0xb0, 0x00, 0x8a, 0xf9, 0x09, 0x8b, 0x04, + 0x61, 0x11, 0xad, 0x8d, 0x5c, 0x52, 0xec, 0xe1, 0x78, 0xc0, 0x2c, 0x2e, 0xee, 0xe5, 0x73, 0xf2, + 0xf0, 0x04, 0x9a, 0x68, 0x79, 0x18, 0x1a, 0xe3, 0x8f, 0xb9, 0x62, 0xf5, 0xe6, 0xf9, 0x1c, 0xe3, + 0xb2, 0x36, 0xdd, 0x03, 0x1c, 0x6e, 0xda, 0x24, 0xea, 0xab, 0x71, 0x8a, 0x5f, 0xf4, 0xd4, 0x12, + 0xf0, 0xaf, 0xbb, 0x62, 0x39, 0xf8, 0x59, 0xec, 0xfd, 0xc4, 0x52, 0x79, 0x68, 0x72, 0xec, 0x34, + 0x3e, 0x12, 0xfe, 0xde, 0xaf, 0x51, 0xcb, 0xe8, 0x06, 0x7b, 0x6c, 0x27, 0x7d, 0xf7, 0x89, 0x8b, + 0x98, 0xdb, 0x55, 0xc2, 0xeb, 0x55, 0xdd, 0xb8, 0x62, 0x98, 0xb2, 0xbb, 0x6f, 0x4c, 0x22, 0x80, + 0x8a, 0xbf, 0x88, 0xf5, 0xd0, 0xac, 0x35, 0xd4, 0xda, 0xad, 0x58, 0xf5, 0x5f, 0x2f, 0x21, 0xa5, + 0xea, 0xdd, 0xb8, 0x1c, 0x5f, 0xc0, 0x1d, 0x65, 0x48, 0x0c, 0x5e, 0xa8, 0x20, 0x3f, 0x8a, 0xf6, + 0xcc, 0xea, 0x46, 0x9f, 0x86, 0x38, 0xab, 0x9a, 0x7e, 0x82, 0xc1, 0x0f, 0x96, 0xb2, 0x5a, 0x06, + 0x3d, 0xe3, 0xe2, 0x32, 0x0f, 0x41, 0x1b, 0xac, 0xf2, 0x29, 0x26, 0xc0, 0x82, 0x82, 0x71, 0x92, + 0xa7, 0xbf, 0xf3, 0x1f, 0xa8, 0xc9, 0x62, 0x78, 0x0e, 0xf1, 0xa1, 0x55, 0x03, 0x18, 0x02, 0xed, + 0xc2, 0xfc, 0x8c, 0x5d, 0x09, 0xaa, 0xee, 0x80, 0x9b, 0x3d, 0x85, 0x87, 0xcc, 0x6a, 0xda, 0xcd, + 0x50, 0x3e, 0x2b, 0x24, 0x65, 0xb8, 0x01, 0x7d, 0x15, 0x1b, 0x2f, 0x5d, 0xa3, 0x26, 0xb7, 0xf0, + 0x0c, 0x40, 0x6a, 0x04, 0x0c, 0xf5, 0x60, 0xbf, 0x18, 0x1d, 0x1e, 0x0c, 0x7a, 0x6c, 0x06, 0xae, + 0x41, 0x40, 0x02, 0x05, 0xfc, 0x54, 0x0f, 0xe9, 0xbc, 0x37, 0x76, 0xa7, 0x93, 0x24, 0xa8, 0xd5, + 0x24, 0x81, 0x40, 0xf5, 0x4a, 0x00, 0xca, 0x9b, 0xc4, 0x5f, 0x3b, 0x3b, 0x24, 0xa5, 0x7f, 0xd0, + 0x5d, 0xd7, 0x35, 0x12, 0xc2, 0x6b, 0x53, 0x39, 0x28, 0x34, 0xc6, 0xd9, 0xcb, 0xe2, 0xc2, 0xbb, + 0x49, 0xd3, 0x57, 0x2f, 0xfc, 0x32, 0xfc, 0x50, 0xae, 0x10, 0x2d, 0x27, 0xfb, 0x26, 0x8c, 0xdf, + 0x11, 0xf4, 0xaa, 0x42, 0x0f, 0xe1, 0x16, 0xcb, 0xc7, 0x46, 0x89, 0xdf, 0xc8, 0x76, 0xcc, 0x41, + 0x02, 0x58, 0xb0, 0x94, 0xae, 0xb2, 0x5f, 0xf9, 0x0b, 0xb0, 0xf2, 0xa4, 0xbd, 0x6b, 0xe7, 0x8e, + 0x70, 0x58, 0xa0, 0x18, 0x5b, 0xd9, 0x84, 0x08, 0xe4, 0xe2, 0x00, 0x76, 0x46, 0x1c, 0x2d, 0x45, + 0x7f, 0x2b, 0x05, 0x96, 0x41, 0x29, 0xe6, 0x47, 0x05, 0x41, 0xa9, 0x4a, 0x33, 0x6a, 0x7e, 0x64, + 0x32, 0xc2, 0x25, 0xbe, 0xa8, 0x6d, 0x7e, 0xaa, 0x0e, 0x58, 0x37, 0x20, 0x4f, 0xaf, 0x04, 0x92, + 0xd3, 0x92, 0x80, 0x1c, 0xdc, 0xc9, 0x8e, 0x99, 0x09, 0x02, 0xb6, 0x03, 0x78, 0x4a, 0x1d, 0x4c, + 0x27, 0xd3, 0x54, 0x80, 0x6f, 0x19, 0xb6, 0x39, 0xa8, 0x59, 0xfa, 0xc9, 0x6b, 0x48, 0xfa, 0x9a, + 0xef, 0x2b, 0x8a, 0x0c, 0xc9, 0x2e, 0x02, 0x6a, 0x94, 0x74, 0xbe, 0xde, 0x63, 0x44, 0xef, 0x04, + 0xb6, 0x21, 0x8f, 0x4b, 0x09, 0x7e, 0xfb, 0xd0, 0x46, 0xdb, 0x3a, 0xd3, 0x7e, 0xc3, 0x99, 0x9b, + 0xfd, 0x7f, 0x38, 0xb0, 0x41, 0xfa, 0xcb, 0xf9, 0xbf, 0xeb, 0x93, 0xe6, 0xb7, 0x02, 0x81, 0xe1, + 0xa9, 0xdd, 0x3d, 0xfd, 0xc6, 0xd4, 0x8b, 0x0b, 0xc5, 0x7c, 0x4f, 0x05, 0xf2, 0x1d, 0xfa, 0xd8, + 0xaf, 0xe2, 0x4d, 0xee, 0x9c, 0x18, 0xce, 0xd1, 0xd7, 0xc9, 0x84, 0xca, 0xf3, 0x5b, 0x4b, 0x24, + 0x06, 0x57, 0x77, 0xaf, 0x10, 0xa0, 0x21, 0xce, 0xb1, 0x0f, 0x0d, 0xca, 0xd8, 0xc7, 0x57, 0x25, + 0xb1, 0x4a, 0x84, 0x27, 0x78, 0x66, 0xad, 0x96, 0x16, 0x75, 0x58, 0xb3, 0x16, 0xbb, 0xf7, 0x48, + 0xfd, 0x7e, 0x43, 0x1b, 0x71, 0xad, 0xc8, 0xe0, 0x9e, 0x99, 0x8f, 0x73, 0x12, 0x7b, 0x17, 0xd1, + 0x0d, 0x21, 0x09, 0x20, 0x5f, 0xb5, 0x02, 0x99, 0x0a, 0xba, 0xbf, 0x4a, 0x75, 0x6f, 0x71, 0x2f, + 0xe5, 0xd5, 0x09, 0x3d, 0xf6, 0x57, 0x60, 0x5f, 0xce, 0x3b, 0x77, 0x3f, 0xee, 0xf7, 0x0d, 0x05, + 0xae, 0xae, 0xd9, 0x5b, 0x76, 0x29, 0xcf, 0x67, 0x59, 0x4b, 0xa1, 0x1f, 0x01, 0xd5, 0x8a, 0x62, + 0x62, 0x8c, 0x15, 0x7b, 0x03, 0x9e, 0x4e, 0xac, 0x29, 0xaa, 0x6e, 0xcd, 0xb0, 0x8d, 0x8c, 0x9f, + 0x5a, 0x05, 0x76, 0xab, 0x71, 0x05, 0x60, 0xf9, 0xdb, 0x9c, 0x5f, 0x50, 0x3b, 0xed, 0x9f, 0xd1, + 0xac, 0x88, 0x1b, 0xd7, 0xad, 0xa4, 0x8a, 0x65, 0x64, 0x50, 0x01, 0xe7, 0x41, 0xb8, 0x10, 0x3a, + 0xb1, 0xa6, 0x25, 0xbb, 0x7e, 0x0b, 0x13, 0xc5, 0x92, 0xe4, 0x81, 0xd2, 0xf9, 0x6b, 0xdc, 0xfd, + 0xfb, 0x5f, 0x3b, 0x99, 0xe3, 0x0b, 0xdf, 0x9b, 0xb0, 0x70, 0x36, 0xd3, 0x9b, 0x23, 0x8a, 0x75, + 0x6e, 0x1a, 0xad, 0xdd, 0x83, 0x5d, 0x2d, 0x74, 0x9f, 0x5e, 0xfd, 0x7a, 0x19, 0xae, 0xac, 0x28, + 0x54, 0x4b, 0xe7, 0xa0, 0x49, 0xc0, 0x4a, 0x16, 0x54, 0x6e, 0x9f, 0xbb, 0xf7, 0x2f, 0x2e, 0x39, + 0xd4, 0xaf, 0x2b, 0x58, 0x2f, 0x60, 0x2e, 0x92, 0xc3, 0x36, 0x38, 0xb7, 0x0e, 0xfa, 0xb3, 0xe8, + 0xbf, 0xe1, 0x2b, 0xa7, 0xe8, 0x8a, 0x0d, 0xca, 0x40, 0xd3, 0x81, 0x18, 0x12, 0xd3, 0x0e, 0x8d, + 0xdb, 0x5b, 0x86, 0xb3, 0xd7, 0xa6, 0x00, 0x97, 0x2e, 0x7b, 0xd4, 0xbe, 0xe9, 0x9c, 0xa7, 0x4b, + 0x7a, 0x60, 0x6e, 0x87, 0xa9, 0x41, 0xe6, 0xde, 0x8f, 0x8a, 0x6a, 0xb2, 0xab, 0x3c, 0xf8, 0x52, + 0xf3, 0x1e, 0xf5, 0xc5, 0x47, 0x25, 0x48, 0xb0, 0xf2, 0x45, 0x42, 0x19, 0x52, 0x1f, 0x82, 0xed, + 0x97, 0xe8, 0xb3, 0x7d, 0x23, 0xee, 0x43, 0xfa, 0x2e, 0xd8, 0x64, 0x06, 0xa6, 0xde, 0x5b, 0x27, + 0xc2, 0x2a, 0x83, 0x6c, 0xa9, 0x37, 0x51, 0x97, 0x6b, 0xae, 0x94, 0x4e, 0xfb, 0xf3, 0x1f, 0x84, + 0x45, 0x14, 0x1a, 0xc5, 0x61, 0xaa, 0xca, 0xd7, 0x28, 0x69, 0x75, 0xd8, 0x07, 0xf3, 0xce, 0xa8, + 0x08, 0x87, 0xf0, 0xa5, 0x22, 0x97, 0x35, 0xdc, 0xd4, 0x76, 0xd7, 0xfb, 0xa3, 0xd0, 0x27, 0x7c, + 0x75, 0x2e, 0xbc, 0xa9, 0xa7, 0xb2, 0x66, 0x63, 0x58, 0x6c, 0x94, 0x5a, 0xb8, 0xb0, 0x1e, 0x0d, + 0xae, 0x4b, 0xf2, 0x65, 0xc0, 0x12, 0xdb, 0x2c, 0x6e, 0x2a, 0xcf, 0xf7, 0xa3, 0xd9, 0xd4, 0xa7, + 0x0b, 0x06, 0xf9, 0xfd, 0x98, 0xff, 0x14, 0xb7, 0x06, 0xe4, 0xa0, 0x20, 0x6d, 0x69, 0x38, 0x20, + 0x9b, 0x46, 0x64, 0x15, 0xa3, 0x96, 0x8a, 0xd3, 0xd5, 0x24, 0xa6, 0x54, 0xf8, 0x3c, 0x08, 0x98, + 0x3f, 0xd2, 0x7c, 0xc6, 0x78, 0x54, 0x6f, 0x3f, 0x26, 0xac, 0xa6, 0x73, 0x1c, 0xce, 0xcb, 0x24, + 0xa1, 0x24, 0x8b, 0xe5, 0xf2, 0xf7, 0x34, 0x39, 0xf9, 0xfa, 0xe6, 0xc5, 0xf5, 0x90, 0xed, 0x15, + 0x40, 0xa5, 0xd8, 0x67, 0xb2, 0xaa, 0xae, 0x19, 0xe7, 0xf1, 0xbd, 0xc2, 0x9a, 0xc4, 0xbf, 0x93, + 0x78, 0x61, 0x0f, 0xb9, 0xfe, 0xa9, 0x6e, 0xe0, 0xde, 0x94, 0x93, 0xa5, 0xa8, 0xb6, 0x05, 0x34, + 0x6d, 0xca, 0x39, 0x63, 0xd8, 0xc2, 0x2d, 0x6d, 0xa9, 0x09, 0xe3, 0x8d, 0x51, 0x28, 0x42, 0x10, + 0x62, 0xf3, 0x97, 0x07, 0xd3, 0xaf, 0x9c, 0x77, 0x70, 0xe1, 0xb9, 0x6d, 0xc7, 0x47, 0x49, 0x4f, + 0x9c, 0xe8, 0x97, 0x0e, 0xd1, 0x5b, 0x4c, 0x36, 0xd8, 0x20, 0x7b, 0x1b, 0xa1, 0xbf, 0x21, 0x89, + 0xd6, 0xbd, 0x36, 0x8b, 0xd1, 0xfe, 0x00, 0x38, 0xb3, 0x9f, 0xe3, 0xc3, 0x11, 0xa9, 0x1d, 0x2f, + 0x37, 0x39, 0xf1, 0x80, 0xcf, 0xcd, 0x94, 0x88, 0xcd, 0x9c, 0x16, 0x95, 0xc1, 0xea, 0xe4, 0x48, + 0x09, 0x40, 0x42, 0x52, 0x2a, 0x38, 0x5f, 0xf6, 0x98, 0x25, 0xe8, 0x18, 0xe5, 0x69, 0xab, 0x3a, + 0x05, 0xda, 0xff, 0x9a, 0x38, 0xca, 0x00, 0xd8, 0x78, 0x8c, 0xec, 0xba, 0x3a, 0x01, 0x1f, 0x7b, + 0x15, 0x1a, 0xcf, 0x00, 0x70, 0x89, 0xa1, 0x6f, 0x79, 0x8d, 0xa0, 0x44, 0x92, 0xd6, 0xe0, 0x4d, + 0x06, 0x6e, 0x62, 0xa0, 0x58, 0x7c, 0x21, 0x3b, 0x6d, 0x62, 0x3a, 0xe0, 0x5e, 0x41, 0x79, 0xbd, + 0xc3, 0xe2, 0xee, 0xcb, 0xe5, 0x25, 0xd0, 0x47, 0xba, 0x8c, 0x04, 0x94, 0xe2, 0x91, 0xb4, 0x3c, + 0xc8, 0x08, 0xa9, 0xce, 0x27, 0x3d, 0xe6, 0xc0, 0x6c, 0xdf, 0x4c, 0xa1, 0x39, 0x2d, 0xb2, 0x8c, + 0x43, 0x1b, 0x63, 0x58, 0xda, 0xcf, 0x13, 0xdb, 0x0e, 0x66, 0x63, 0x7f, 0x7b, 0xa5, 0x42, 0x96, + 0xd4, 0xc0, 0x5a, 0xca, 0x90, 0xa4, 0x95, 0x04, 0xf2, 0x9d, 0xcf, 0x2e, 0x5c, 0x5a, 0x58, 0x69, + 0xa2, 0xbd, 0x6f, 0xaa, 0xc9, 0xa2, 0x18, 0xc3, 0x7d, 0xe5, 0xad, 0x52, 0x9b, 0xbe, 0x43, 0x09, + 0xa2, 0xf8, 0xd9, 0x5b, 0x5b, 0x91, 0x53, 0xb0, 0x05, 0x6d, 0x3a, 0x7b, 0x09, 0x3c, 0xea, 0x17, + 0x95, 0xe4, 0x1f, 0xe6, 0x3f, 0xa1, 0x2d, 0xa2, 0x4a, 0x69, 0x6e, 0x9e, 0xfa, 0xca, 0x6f, 0xfa, + 0x63, 0xf2, 0x91, 0x54, 0x6e, 0x82, 0x1b, 0x32, 0x5e, 0xb6, 0x6b, 0xff, 0xd6, 0x43, 0x7f, 0x03, + 0x93, 0x77, 0xf5, 0xd5, 0xdf, 0x7e, 0xc2, 0x3d, 0x29, 0x85, 0x83, 0x39, 0xf0, 0xf2, 0xbc, 0x1c, + 0x96, 0x20, 0x1b, 0x14, 0x19, 0x32, 0x99, 0x5e, 0x1b, 0x5d, 0x9f, 0x58, 0x27, 0xb6, 0x29, 0xd7, + 0x8e, 0x8d, 0x90, 0xc7, 0xdc, 0x83, 0xd0, 0x12, 0xee, 0x81, 0x23, 0xe3, 0xe8, 0xa2, 0xae, 0x43, + 0x65, 0x00, 0x19, 0x60, 0x37, 0x63, 0x03, 0x3c, 0xe7, 0x5d, 0x1a, 0x97, 0x84, 0xdc, 0xf0, 0xd7, + 0xf8, 0x92, 0xa1, 0x3b, 0x7e, 0x6f, 0x91, 0xb5, 0xcd, 0x86, 0xfd, 0x54, 0x75, 0x1a, 0x88, 0x16, + 0xf9, 0xcb, 0x5c, 0xe6, 0x1a, 0xaa, 0x42, 0x01, 0xb3, 0xe8, 0xda, 0x79, 0xa4, 0x76, 0x08, 0x7e, + 0x6d, 0x25, 0xa9, 0x4d, 0x33, 0xec, 0x05, 0xe5, 0x95, 0x47, 0x5d, 0xd0, 0xa2, 0xfb, 0x09, 0x0f, + 0x08, 0x0d, 0xe2, 0xc7, 0xc7, 0xb7, 0x35, 0x6d, 0x23, 0x56, 0x8b, 0xdb, 0x44, 0x3c, 0x7f, 0x81, + 0x09, 0x9e, 0xf8, 0x7b, 0x9b, 0xfe, 0x13, 0xe2, 0x78, 0x42, 0xae, 0x2d, 0xab, 0x17, 0x9a, 0x55, + 0x51, 0xca, 0x61, 0xcb, 0x9c, 0x0e, 0xc0, 0x58, 0xf6, 0xbf, 0xf7, 0x97, 0xcf, 0xaa, 0xb9, 0xdb, + 0x2e, 0x4a, 0xf6, 0xbb, 0x23, 0x25, 0xdd, 0x72, 0xaf, 0xdc, 0x29, 0x7e, 0x01, 0x6c, 0x3b, 0x5d, + 0x03, 0x68, 0x7f, 0x57, 0xa8, 0xa8, 0x2e, 0x7e, 0xb4, 0x02, 0xcc, 0xee, 0x51, 0xfa, 0x63, 0x3d, + 0xac, 0x6e, 0xee, 0xbb, 0x8f, 0x63, 0x98, 0x64, 0x13, 0x64, 0xbf, 0x8f, 0xc3, 0xb1, 0xff, 0x0c, + 0xe1, 0x18, 0x0c, 0xc7, 0xb9, 0xf1, 0x43, 0x6b, 0x26, 0x70, 0xb2, 0xff, 0x52, 0x85, 0x60, 0x1f, + 0xe4, 0x13, 0xbf, 0xcf, 0xc5, 0x76, 0x60, 0x34, 0x62, 0xbe, 0xfe, 0xed, 0xba, 0xb3, 0xa8, 0xbd, + 0x6b, 0xa2, 0x63, 0x1e, 0xce, 0xa7, 0xc6, 0xd9, 0x98, 0x1e, 0x75, 0x0e, 0x48, 0x1e, 0x9e, 0x0f, + 0xc3, 0x45, 0xca, 0x7c, 0x49, 0x03, 0x42, 0x65, 0x1c, 0x47, 0xcc, 0xa3, 0x73, 0xac, 0xe8, 0x18, + 0x3e, 0x3b, 0xe7, 0x3d, 0x98, 0x32, 0x50, 0xfe, 0xd6, 0x98, 0x6f, 0x01, 0x21, 0xb4, 0x3c, 0xe5, + 0xeb, 0x4c, 0xb3, 0xef, 0xa4, 0xcd, 0xfb, 0x28, 0x67, 0x6a, 0x57, 0xf2, 0x14, 0xee, 0x75, 0xf0, + 0x8e, 0xde, 0x2c, 0x28, 0x10, 0x94, 0xca, 0x94, 0x7c, 0x5b, 0x41, 0x72, 0x41, 0x49, 0xb5, 0xaa, + 0x33, 0x5e, 0x3b, 0xa3, 0xee, 0x87, 0x3e, 0x85, 0x89, 0x57, 0xd5, 0x29, 0x07, 0x82, 0xb4, 0xb4, + 0x31, 0x82, 0x47, 0xad, 0x43, 0x88, 0xc6, 0x0b, 0x42, 0x36, 0x4e, 0x5f, 0x94, 0x9a, 0x77, 0x0a, + 0x64, 0x5a, 0x1a, 0x8f, 0x81, 0x9a, 0x8e, 0x9e, 0x9f, 0xbd, 0xb8, 0xdd, 0x9b, 0x4e, 0xfd, 0xeb, + 0x2a, 0xe0, 0x8d, 0xc9, 0x22, 0x05, 0xc6, 0xf5, 0x17, 0x17, 0x2c, 0x94, 0x76, 0x17, 0xc4, 0xd0, + 0x16, 0xaf, 0x1c, 0xa3, 0xfa, 0x16, 0x1c, 0xa3, 0x08, 0xb9, 0x99, 0x60, 0x66, 0xed, 0x05, 0x4c, + 0x12, 0x9f, 0x7c, 0xf8, 0xcc, 0x87, 0xda, 0xea, 0x81, 0xb1, 0xaf, 0x7f, 0xba, 0xef, 0xf8, 0xc1, + 0x20, 0x06, 0x79, 0xb7, 0xec, 0xf5, 0x95, 0xd2, 0x80, 0x4e, 0xd1, 0x82, 0x07, 0x95, 0x0a, 0xb6, + 0x90, 0x03, 0x2d, 0x84, 0x25, 0xb6, 0x38, 0x6e, 0x42, 0x4d, 0xc6, 0xdf, 0x5e, 0x8b, 0x38, 0x7e, + 0x74, 0x94, 0x75, 0x53, 0x1b, 0x18, 0x59, 0xba, 0x79, 0x82, 0x4d, 0xfb, 0x0b, 0x0a, 0xee, 0x7b, + 0xb0, 0x94, 0x95, 0x32, 0x0b, 0x32, 0xec, 0x97, 0x65, 0x5f, 0x38, 0x9e, 0x7c, 0xd9, 0x6e, 0x89, + 0x1f, 0xc9, 0x6f, 0xd7, 0xf5, 0x79, 0x16, 0xb5, 0xc0, 0x05, 0x9c, 0x32, 0x6b, 0xac, 0xec, 0xad, + 0x15, 0x2f, 0x41, 0x16, 0xef, 0xde, 0x3e, 0x46, 0x55, 0xa4, 0x42, 0x78, 0x2d, 0x54, 0x73, 0x21, + 0x77, 0x77, 0x48, 0xd8, 0x46, 0xa0, 0x1e, 0x5a, 0xcc, 0x58, 0x6d, 0x85, 0x2c, 0x02, 0xd5, 0x70, + 0xb3, 0xaf, 0x57, 0x37, 0x95, 0xf2, 0x46, 0x45, 0xbb, 0x60, 0x14, 0xa8, 0xcf, 0x97, 0xb7, 0x73, + 0x6e, 0xfc, 0xfe, 0x1e, 0x59, 0xd8, 0x28, 0x77, 0x90, 0x15, 0xad, 0xe9, 0x8d, 0x99, 0x60, 0x9a, + 0xcf, 0x69, 0x7f, 0x2b, 0x76, 0x4b, 0x79, 0x73, 0xb0, 0x49, 0xd1, 0xd2, 0x6c, 0x12, 0x1a, 0x89, + 0xc6, 0x2a, 0x91, 0xb7, 0xef, 0x21, 0x55, 0xf5, 0x81, 0xce, 0xd9, 0x9f, 0xea, 0xbf, 0xf5, 0x8d, + 0xc7, 0x8f, 0x72, 0x7a, 0xdd, 0x7d, 0x33, 0x22, 0x38, 0x6a, 0x23, 0x12, 0x14, 0x09, 0x18, 0x11, + 0x0b, 0x9b, 0xe8, 0xce, 0x2c, 0xc5, 0x00, 0x2a, 0x0a, 0xab, 0xc1, 0xca, 0x83, 0x80, 0xfa, 0x98, + 0x6f, 0xba, 0xfd, 0xad, 0x34, 0x28, 0x7a, 0xcd, 0xf1, 0x98, 0xce, 0xcf, 0x64, 0x59, 0x50, 0x8c, + 0xb4, 0x27, 0x33, 0x2a, 0x2e, 0xaa, 0x43, 0x87, 0x3f, 0x9e, 0x6b, 0x93, 0x2e, 0xc6, 0xa4, 0x8f, + 0x5b, 0x10, 0xa1, 0xb5, 0x00, 0x10, 0x25, 0x18, 0x6c, 0xfd, 0x2c, 0x65, 0x0b, 0xcc, 0xcb, 0x12, + 0x28, 0xf1, 0x0b, 0x97, 0x2b, 0x47, 0x69, 0xb8, 0xc9, 0xad, 0xa6, 0x5f, 0xc5, 0x42, 0x5f, 0x0f, + 0x4d, 0xf4, 0x6b, 0x9e, 0xab, 0xe0, 0x73, 0x26, 0x47, 0x98, 0x81, 0xd1, 0xa8, 0x8d, 0xc8, 0xdf, + 0xe7, 0xeb, 0x5c, 0x5e, 0x7d, 0xa0, 0x5d, 0xbd, 0xf6, 0xfc, 0xd6, 0xca, 0x15, 0xbf, 0x2c, 0xd9, + 0x59, 0xa9, 0xa7, 0xde, 0xa1, 0x64, 0xc9, 0x2c, 0x1e, 0x67, 0x42, 0xdf, 0x8c, 0x02, 0xaf, 0xec, + 0xc2, 0xef, 0x51, 0x59, 0xf0, 0xd5, 0x92, 0x27, 0x1d, 0xd5, 0x02, 0x45, 0x2f, 0x99, 0xb7, 0x8f, + 0xd2, 0x7e, 0xba, 0x15, 0xce, 0xb0, 0xd4, 0x5c, 0xdb, 0x67, 0xe7, 0xbd, 0x02, 0xb5, 0xb9, 0x1f, + 0x51, 0x90, 0x77, 0xaf, 0xd9, 0xdf, 0xca, 0x58, 0x0d, 0x40, 0x9a, 0x0e, 0x30, 0x9b, 0xc9, 0x41, + 0xfa, 0x1a, 0xb2, 0x42, 0x00, 0xb7, 0x9b, 0xbc, 0xa1, 0x65, 0x7a, 0xa0, 0x34, 0x7c, 0xb3, 0xd7, + 0x7d, 0x4f, 0xa5, 0x4c, 0xb1, 0x2b, 0x0e, 0x4b, 0x3b, 0x53, 0x72, 0x09, 0x40, 0xc0, 0x20, 0x73, + 0xf6, 0x74, 0x41, 0x67, 0x3d, 0x10, 0x21, 0x53, 0x38, 0xcc, 0x49, 0x99, 0x7e, 0x48, 0x48, 0x4f, + 0xcb, 0xe3, 0x4b, 0x77, 0xca, 0xdb, 0x5c, 0xb1, 0x90, 0xde, 0x84, 0x3a, 0xc4, 0x48, 0xf3, 0xa4, + 0x46, 0xdc, 0x8e, 0x7f, 0x76, 0x63, 0x7d, 0x7c, 0x60, 0x03, 0xf6, 0xa5, 0x8f, 0x54, 0x4e, 0xa3, + 0x5a, 0x3b, 0x3f, 0x65, 0xf2, 0xc7, 0x40, 0x17, 0x08, 0xf1, 0x72, 0x46, 0xa5, 0xc6, 0xd0, 0x6f, + 0x94, 0xd1, 0x11, 0xb2, 0xc6, 0x4f, 0x77, 0xbd, 0x95, 0x24, 0x17, 0x52, 0x55, 0x1c, 0xf7, 0x5b, + 0xe6, 0x63, 0x45, 0x38, 0x04, 0x67, 0x9d, 0x38, 0x75, 0xfb, 0xeb, 0x19, 0xd3, 0xf0, 0xac, 0x0a, + 0xd9, 0xe5, 0xbb, 0x31, 0x7e, 0x58, 0x11, 0xb8, 0x7d, 0xe1, 0x0d, 0x76, 0xdc, 0xd0, 0x14, 0x70, + 0xcf, 0x71, 0x32, 0x4e, 0x75, 0xa7, 0xd1, 0x32, 0x9c, 0x0e, 0xe0, 0x61, 0x81, 0x3a, 0xee, 0xe1, + 0x6e, 0xbd, 0xc7, 0xcc, 0xd7, 0x48, 0x31, 0x05, 0x94, 0xa1, 0xd3, 0xd3, 0xae, 0x18, 0x91, 0x1b, + 0xf5, 0x0e, 0xc1, 0x9f, 0x08, 0xe5, 0xfb, 0xd5, 0xc7, 0xbe, 0xff, 0xc6, 0x68, 0xe1, 0xd7, 0x0f, + 0x81, 0x82, 0x24, 0xcd, 0x83, 0x0d, 0xb8, 0xb0, 0xa8, 0x17, 0x61, 0x5d, 0x5c, 0xf6, 0x84, 0xae, + 0x47, 0x3e, 0xa8, 0x8b, 0xd6, 0xbf, 0xe9, 0xfd, 0x20, 0xaa, 0xc8, 0x92, 0x4d, 0x56, 0xcd, 0xb2, + 0x7d, 0x67, 0xd2, 0x03, 0x63, 0xa1, 0x84, 0x8f, 0x77, 0x74, 0x7a, 0xca, 0x1e, 0xe2, 0x6e, 0xf5, + 0x7a, 0xda, 0xf2, 0x8d, 0xd1, 0xb6, 0x59, 0xa2, 0x0f, 0x0b, 0xab, 0xc4, 0xce, 0xb9, 0x70, 0x78, + 0x90, 0xda, 0x15, 0x31, 0x49, 0xa3, 0x1d, 0x97, 0x84, 0x2d, 0xa6, 0x33, 0xaa, 0xf9, 0xc1, 0x80, + 0xf5, 0x04, 0x9f, 0x2d, 0x9e, 0xee, 0x62, 0x25, 0x20, 0xb2, 0xe3, 0xf1, 0xcf, 0x79, 0x6c, 0xfc, + 0x31, 0x9e, 0xa5, 0x36, 0xa7, 0x83, 0x58, 0x4b, 0x3e, 0x77, 0x10, 0x16, 0x2e, 0x14, 0x6a, 0xc8, + 0xec, 0xc6, 0x8a, 0x52, 0x2d, 0xa8, 0xa5, 0x7e, 0x63, 0x66, 0x88, 0x26, 0xc3, 0xa5, 0xb7, 0x3d, + 0xfd, 0xdf, 0xd5, 0x53, 0x37, 0x5f, 0x34, 0x75, 0x6f, 0xa5, 0x34, 0x30, 0x17, 0xae, 0x8b, 0xa5, + 0x4b, 0xcd, 0xf2, 0xdc, 0x7f, 0x0a, 0x7a, 0x85, 0xdc, 0xf3, 0xb0, 0xc4, 0x34, 0x89, 0x9e, 0xfb, + 0xe1, 0xfe, 0x64, 0xa1, 0xab, 0x61, 0x34, 0xb9, 0x2b, 0x9d, 0x0c, 0xa6, 0xd9, 0xa9, 0xd0, 0x3f, + 0x91, 0x83, 0xc6, 0xd5, 0x06, 0x65, 0xb9, 0xdc, 0xff, 0xed, 0x28, 0xa6, 0x33, 0x69, 0x53, 0xa3, + 0x69, 0xb9, 0xce, 0x7a, 0x30, 0x8c, 0x3f, 0xac, 0x7a, 0x0f, 0x2f, 0x49, 0x4b, 0x17, 0xa4, 0x41, + 0x78, 0x75, 0x63, 0xd0, 0x44, 0x25, 0x6d, 0x88, 0x2e, 0xd8, 0x71, 0x8a, 0x9f, 0x75, 0x4d, 0x06, + 0x70, 0x3f, 0x22, 0x47, 0xd9, 0x70, 0xed, 0xb1, 0xab, 0x2d, 0xaa, 0x05, 0x7b, 0xa8, 0x03, 0xff, + 0x44, 0xaa, 0x2b, 0x8b, 0xb4, 0x2a, 0xbb, 0xd8, 0x37, 0x52, 0x16, 0x99, 0xe9, 0x15, 0x5c, 0x9d, + 0x56, 0xaf, 0x08, 0x89, 0x03, 0x7e, 0x67, 0x67, 0x10, 0x51, 0xda, 0x32, 0xbc, 0x5d, 0x5d, 0xb0, + 0x60, 0x66, 0x67, 0x50, 0x2e, 0x09, 0xc2, 0xca, 0x0f, 0x68, 0x31, 0xe6, 0x84, 0x33, 0xd8, 0xc0, + 0x13, 0x95, 0x89, 0x88, 0xe2, 0x59, 0x17, 0xaa, 0x66, 0x65, 0x45, 0xc2, 0xb5, 0xfc, 0xd3, 0xd4, + 0x5e, 0xa7, 0x15, 0xdf, 0x8c, 0xec, 0x04, 0x1d, 0x9f, 0xb4, 0xad, 0x7d, 0x84, 0x66, 0x18, 0xca, + 0x71, 0xb4, 0x6d, 0x2c, 0x0f, 0xcd, 0x0b, 0x75, 0x57, 0x09, 0x38, 0x2d, 0x0a, 0xfc, 0xb6, 0xb8, + 0x81, 0x4c, 0x22, 0xd2, 0xa5, 0x47, 0x49, 0xec, 0xdf, 0xf2, 0x19, 0x37, 0x54, 0xca, 0x88, 0x9c, + 0xd6, 0x89, 0xe3, 0x8a, 0xc2, 0x50, 0x9e, 0xfa, 0xa3, 0x38, 0x6e, 0x5c, 0xfc, 0x3a, 0x41, 0xbb, + 0x1c, 0x27, 0xe2, 0x38, 0x16, 0xa2, 0xb4, 0x92, 0x8e, 0x1b, 0x39, 0x50, 0x03, 0x57, 0x6c, 0x9e, + 0x6b, 0x53, 0xfd, 0x8c, 0x29, 0x7a, 0x8f, 0x94, 0x62, 0xd7, 0x00, 0x7f, 0x4f, 0x2b, 0xd2, 0x9d, + 0x9e, 0xf3, 0xba, 0x41, 0x85, 0xc0, 0x22, 0xc2, 0x72, 0xed, 0xd5, 0x2a, 0x13, 0x59, 0x1c, 0x41, + 0x1a, 0xa6, 0x6d, 0x04, 0xe2, 0x14, 0x2a, 0xa4, 0x54, 0x53, 0x7b, 0xf2, 0x22, 0x1e, 0x3e, 0x01, + 0x9e, 0x3a, 0x5a, 0x20, 0x84, 0x0f, 0xc6, 0x25, 0xbe, 0x4c, 0x00, 0xcf, 0x79, 0xb5, 0xdc, 0xa7, + 0xc1, 0xfb, 0x64, 0xf6, 0xf2, 0xdf, 0xf9, 0xaa, 0x5a, 0x87, 0x27, 0xcd, 0xfd, 0x13, 0x6f, 0xb8, + 0x0d, 0x99, 0x07, 0xbe, 0x8c, 0x31, 0xda, 0x6a, 0x94, 0x4d, 0x0d, 0xd3, 0x82, 0x43, 0xda, 0x96, + 0xed, 0x8b, 0x28, 0x5c, 0x2f, 0xd9, 0x3c, 0x9c, 0xe0, 0xeb, 0x6d, 0x21, 0x62, 0x85, 0x2f, 0x8c, + 0xe3, 0x77, 0xe5, 0xa5, 0x32, 0x28, 0x3f, 0xb8, 0x02, 0x2b, 0x10, 0xe9, 0xe9, 0x25, 0xc1, 0x58, + 0x78, 0x76, 0x3d, 0x25, 0x25, 0x5e, 0xd2, 0xae, 0xa5, 0x24, 0x88, 0x5e, 0xd5, 0x1d, 0xc0, 0x96, + 0xd0, 0xb7, 0x3b, 0x8e, 0xdc, 0xb6, 0xc6, 0x5d, 0x27, 0x94, 0x5b, 0xe4, 0x64, 0xb1, 0xfe, 0xed, + 0xe4, 0xbb, 0x4c, 0xa0, 0x3b, 0xea, 0xaf, 0x84, 0x01, 0x65, 0x59, 0x0f, 0x3d, 0x36, 0xa1, 0x94, + 0x48, 0x7a, 0xd7, 0x89, 0x15, 0xf8, 0x8b, 0x02, 0x44, 0x96, 0x77, 0x97, 0x43, 0x5d, 0x3e, 0x47, + 0xfc, 0xd4, 0xc4, 0x14, 0x22, 0xaa, 0xa3, 0xf1, 0xdf, 0x7c, 0x84, 0x00, 0x79, 0x9c, 0xc4, 0x72, + 0x62, 0x6e, 0x14, 0xf7, 0x8a, 0x86, 0xc7, 0x4f, 0x46, 0x8b, 0x8c, 0xa6, 0x7d, 0x25, 0xd5, 0x74, + 0x37, 0xf3, 0x15, 0xc4, 0xd0, 0xeb, 0xda, 0x88, 0x67, 0xde, 0xbd, 0x7d, 0xa2, 0x8c, 0x1c, 0xa8, + 0x0f, 0x0b, 0x2c, 0xfc, 0x53, 0xfd, 0xeb, 0x50, 0xc7, 0x72, 0xeb, 0x7f, 0xf3, 0x23, 0x1d, 0x67, + 0x93, 0x72, 0xe3, 0xf0, 0x87, 0x14, 0xba, 0xd6, 0xb6, 0x68, 0x28, 0xa3, 0x87, 0xd4, 0xd9, 0x79, + 0x58, 0xe7, 0x7b, 0xbd, 0x2c, 0x78, 0x90, 0x63, 0x5e, 0x0d, 0x52, 0x8f, 0x4d, 0x06, 0x3e, 0xc6, + 0xb9, 0x42, 0x41, 0x99, 0x9a, 0xaf, 0x7b, 0x21, 0x88, 0xbd, 0x4f, 0x1a, 0x1d, 0xcb, 0x6c, 0x18, + 0xa8, 0xf4, 0x40, 0xeb, 0x3a, 0x15, 0xd2, 0xeb, 0xfb, 0xc7, 0xd3, 0xac, 0x3d, 0xb9, 0xff, 0xed, + 0x4f, 0xab, 0x39, 0x23, 0xe4, 0xab, 0x62, 0xb8, 0xcc, 0xac, 0x5e, 0x46, 0x00, 0xb8, 0x42, 0xdf, + 0x2b, 0x6c, 0xa0, 0x59, 0x7f, 0x08, 0x1c, 0x6d, 0x91, 0x76, 0xd8, 0xaa, 0x30, 0x8e, 0x72, 0xfd, + 0x42, 0xd6, 0xb4, 0x37, 0x8f, 0x48, 0x89, 0xa2, 0x66, 0xdb, 0x3e, 0x5e, 0x7b, 0xe8, 0x66, 0x67, + 0xc6, 0xe5, 0x09, 0x4f, 0xff, 0x36, 0x28, 0x5b, 0x67, 0xbe, 0xab, 0xec, 0xa7, 0x0c, 0x23, 0x8c, + 0xba, 0x26, 0x81, 0x68, 0x86, 0x6a, 0x3e, 0xf5, 0xad, 0x8e, 0x70, 0x54, 0x3f, 0x5a, 0xe2, 0x90, + 0x1a, 0x5c, 0x9d, 0x56, 0x19, 0xe5, 0xb5, 0xa6, 0x8b, 0xcf, 0x7a, 0x8e, 0x72, 0x47, 0x7d, 0xe6, + 0x7b, 0x90, 0xda, 0x75, 0xaa, 0xe9, 0x85, 0x1a, 0x90, 0x72, 0x71, 0x5c, 0x0a, 0x8d, 0x92, 0x50, + 0x56, 0xb2, 0xe3, 0xa8, 0x3b, 0xfb, 0x91, 0x18, 0x41, 0xdb, 0xa6, 0xd1, 0x33, 0xe2, 0x4e, 0x8f, + 0xa2, 0xc9, 0xd7, 0xfa, 0x6b, 0xaa, 0x56, 0xa0, 0x41, 0xcb, 0xc9, 0xeb, 0xb7, 0xcd, 0x03, 0x4c, + 0xc6, 0x61, 0x16, 0xc0, 0x14, 0xb4, 0xe0, 0x48, 0x4a, 0x72, 0x57, 0x19, 0x46, 0xab, 0x5b, 0xfe, + 0xa8, 0x58, 0xc8, 0xf2, 0xac, 0x33, 0x7d, 0xa8, 0x89, 0x20, 0xb3, 0x10, 0xc5, 0x20, 0x82, 0xe0, + 0xe8, 0x23, 0xc9, 0xcf, 0xe2, 0xa6, 0x29, 0x33, 0x08, 0x9c, 0x67, 0x3e, 0xb8, 0x1d, 0xb8, 0x4e, + 0xdc, 0xff, 0x2e, 0xff, 0x1d, 0xe6, 0x28, 0x1e, 0x26, 0xd5, 0x45, 0xc5, 0x05, 0x42, 0xcc, 0x3c, + 0xfd, 0x53, 0x04, 0xf2, 0xf7, 0xb9, 0x72, 0x3b, 0x4f, 0x24, 0xfd, 0xb6, 0x97, 0x3d, 0x4f, 0xfb, + 0x7e, 0x06, 0x4a, 0xc2, 0xbf, 0x2e, 0x68, 0x5e, 0x73, 0x2f, 0x2a, 0x7d, 0xe3, 0x6a, 0x3f, 0xbe, + 0xb5, 0x5e, 0xd0, 0x3a, 0x29, 0xec, 0xd1, 0x95, 0x72, 0xc6, 0xa4, 0x65, 0xf3, 0x41, 0x3b, 0x01, + 0xd6, 0x37, 0xaa, 0x27, 0x9d, 0xe7, 0x3b, 0x26, 0x65, 0xc9, 0x06, 0xea, 0xcb, 0x53, 0xe3, 0xc0, + 0x28, 0x45, 0x54, 0x69, 0x7f, 0x6a, 0x61, 0xc4, 0x7a, 0xa1, 0xb4, 0x4c, 0xfa, 0xc5, 0xad, 0x60, + 0x28, 0x96, 0x53, 0x3e, 0x2b, 0xd4, 0x56, 0x51, 0x3b, 0x5d, 0x83, 0xe9, 0xea, 0xf4, 0xcb, 0x0f, + 0x36, 0x21, 0x76, 0xfe, 0x14, 0x39, 0x7d, 0xb5, 0x5a, 0x45, 0xfb, 0x6f, 0xa0, 0x18, 0x60, 0xb6, + 0xa9, 0x64, 0xb1, 0x6a, 0x83, 0x4a, 0xa7, 0xa1, 0x55, 0x94, 0xc4, 0xc1, 0x5e, 0xfd, 0x72, 0xfd, + 0xbb, 0xcb, 0x2a, 0x78, 0xde, 0xfc, 0x2d, 0x92, 0x82, 0xa2, 0x38, 0xbe, 0xdf, 0x33, 0x52, 0x93, + 0x1b, 0x27, 0x77, 0x3e, 0xcd, 0xe6, 0x72, 0x62, 0xc4, 0x71, 0x6f, 0xdb, 0xe5, 0xf8, 0xb8, 0xab, + 0x98, 0xc9, 0x02, 0xb1, 0x1f, 0x86, 0x30, 0x35, 0xd2, 0xee, 0x8e, 0x4a, 0x8a, 0x07, 0x88, 0xdf, + 0x66, 0xde, 0xd0, 0xd3, 0xd6, 0xd6, 0x55, 0xf7, 0xe5, 0xb6, 0x8b, 0xa6, 0x0a, 0x0e, 0x1a, 0xa6, + 0x81, 0x62, 0x3d, 0xe0, 0x71, 0xeb, 0x24, 0x4b, 0xff, 0x9f, 0xc4, 0x20, 0x5a, 0xdd, 0x18, 0xdd, + 0x3b, 0x2f, 0xc5, 0x84, 0xa1, 0x8f, 0xbf, 0x85, 0x56, 0xb3, 0x41, 0x97, 0x4a, 0xc9, 0x36, 0xb4, + 0x80, 0x77, 0x49, 0x6e, 0xd0, 0x5b, 0xb2, 0x8d, 0xb0, 0x6c, 0xb2, 0xb4, 0xf6, 0x59, 0x2f, 0xd7, + 0xbc, 0xbf, 0xf7, 0x88, 0x4b, 0x24, 0x13, 0x3a, 0x8d, 0xfb, 0x31, 0x60, 0x20, 0x88, 0xb7, 0x48, + 0xe0, 0x64, 0xe1, 0xa8, 0xfa, 0xb0, 0xb3, 0x1a, 0xd0, 0xaa, 0xe7, 0xd4, 0xf6, 0xab, 0xa0, 0x2f, + 0xde, 0xd8, 0x7a, 0xe9, 0xd2, 0xd9, 0x27, 0xdd, 0xf7, 0x13, 0xa4, 0x27, 0x27, 0x9c, 0x45, 0x12, + 0x6b, 0x4a, 0x2b, 0x6b, 0x0f, 0x23, 0xd1, 0x68, 0xf2, 0x20, 0xbf, 0x86, 0x2a, 0xcc, 0xf8, 0x72, + 0x5e, 0xfa, 0x30, 0x85, 0xb7, 0x56, 0x5b, 0x1b, 0x66, 0x32, 0x17, 0x1c, 0xeb, 0x43, 0x47, 0xca, + 0x74, 0x93, 0x8e, 0x19, 0xc8, 0xba, 0x63, 0xd5, 0x73, 0x69, 0x1f, 0x75, 0x84, 0xf1, 0x68, 0x6d, + 0x06, 0x11, 0xf7, 0xf7, 0x21, 0x3f, 0x21, 0x88, 0x03, 0x50, 0xd8, 0x98, 0x34, 0x25, 0x73, 0xbb, + 0x9c, 0x3d, 0xc9, 0xdc, 0x4c, 0x3c, 0xe3, 0x9f, 0x3f, 0xd7, 0xdf, 0x50, 0x16, 0xe9, 0xda, 0x0f, + 0x1c, 0x47, 0x59, 0x15, 0xa4, 0x6d, 0x00, 0x9e, 0x08, 0xa4, 0xb1, 0x40, 0xcc, 0xb9, 0xa1, 0x6c, + 0x68, 0x1a, 0x9a, 0x03, 0xa7, 0x75, 0x34, 0x9a, 0x8b, 0xca, 0x73, 0x39, 0xbb, 0xf7, 0xfe, 0x97, + 0x27, 0x58, 0x6b, 0x09, 0xd8, 0xc3, 0xfd, 0x5b, 0x2a, 0xaa, 0x5d, 0x97, 0xb5, 0x80, 0x23, 0xa4, + 0xa7, 0x99, 0xb4, 0x2a, 0x6e, 0x99, 0xb9, 0x0c, 0x57, 0x79, 0xa0, 0xd0, 0xfe, 0x98, 0x3a, 0x6e, + 0x3a, 0xd1, 0x5b, 0xba, 0x3f, 0xad, 0xd3, 0x6b, 0xad, 0x99, 0x6b, 0x49, 0x1b, 0x08, 0x8a, 0x09, + 0xc5, 0x42, 0x8e, 0xda, 0x39, 0xfb, 0x6d, 0xb0, 0xbf, 0xbe, 0xa1, 0xd5, 0xd8, 0x00, 0x1c, 0xaf, + 0x67, 0xe1, 0x0b, 0x45, 0x90, 0xb2, 0x18, 0xb6, 0x46, 0x4c, 0x56, 0x40, 0x69, 0x69, 0x00, 0x36, + 0x7b, 0x62, 0x7e, 0x93, 0xdd, 0xdd, 0xc8, 0x22, 0x65, 0x4d, 0x81, 0x77, 0x57, 0x42, 0xac, 0x45, + 0xd9, 0x15, 0xf2, 0x38, 0x47, 0xde, 0xfb, 0x3c, 0x95, 0xe8, 0xc6, 0x64, 0x59, 0x28, 0x59, 0x8a, + 0xa9, 0xa2, 0xc0, 0xa7, 0xeb, 0x11, 0xa2, 0x69, 0xec, 0xf6, 0xe0, 0x0c, 0xe5, 0x8e, 0x81, 0x0b, + 0x52, 0xbc, 0x5e, 0x15, 0x81, 0x76, 0x62, 0xad, 0x21, 0x2a, 0x8e, 0x38, 0x9a, 0x27, 0xfc, 0x57, + 0x11, 0xe8, 0x49, 0x65, 0x0d, 0x58, 0xcc, 0x37, 0x16, 0xea, 0x6d, 0xcb, 0x20, 0x85, 0x3e, 0x38, + 0x73, 0xc9, 0x0b, 0x55, 0x00, 0x69, 0x2d, 0xa4, 0xd2, 0xf4, 0x85, 0x6c, 0x00, 0xef, 0x07, 0xa6, + 0x2a, 0xb7, 0x54, 0x67, 0xfd, 0x71, 0xc4, 0x9a, 0xdc, 0x2e, 0x20, 0xcf, 0xb9, 0x5b, 0x34, 0x29, + 0xb0, 0x2a, 0x66, 0xe3, 0xa1, 0x85, 0x26, 0x1c, 0x59, 0xed, 0x7b, 0xa3, 0x50, 0x3d, 0x9d, 0x37, + 0xaf, 0x93, 0xf3, 0xb0, 0xec, 0x6f, 0x4d, 0x4f, 0x2b, 0x7d, 0xe7, 0xd2, 0x06, 0x22, 0x82, 0x9b, + 0x15, 0xec, 0x89, 0x85, 0x0b, 0x08, 0x4b, 0x76, 0xf8, 0xc8, 0x55, 0x0b, 0x6f, 0x6f, 0x27, 0x22, + 0x56, 0x64, 0xc6, 0xed, 0x15, 0x3a, 0xef, 0x1c, 0x53, 0x66, 0x9d, 0x0a, 0xb7, 0x52, 0xe4, 0x54, + 0xa4, 0xe1, 0xe2, 0xf9, 0x29, 0xec, 0x78, 0x8a, 0x63, 0x9e, 0xcd, 0xbd, 0x5e, 0x16, 0x41, 0xef, + 0x15, 0x34, 0x6c, 0x1e, 0xf1, 0x16, 0x5f, 0x7f, 0x32, 0x67, 0x60, 0xab, 0xc5, 0x4b, 0xa1, 0x9f, + 0xac, 0x71, 0x68, 0x97, 0x90, 0x86, 0x3a, 0x74, 0xd7, 0x34, 0x8f, 0x75, 0x8a, 0x13, 0xdf, 0x93, + 0x45, 0x69, 0xba, 0xf0, 0x77, 0x71, 0xbd, 0x88, 0x34, 0x44, 0xb0, 0xca, 0xbb, 0x43, 0x72, 0x41, + 0x05, 0x88, 0xa6, 0x31, 0xfb, 0x98, 0x2c, 0x08, 0xdf, 0x41, 0xb4, 0x5b, 0x04, 0xc2, 0xf1, 0x95, + 0x76, 0x88, 0x37, 0x57, 0x4f, 0x5a, 0x7d, 0x64, 0xa6, 0x02, 0xf0, 0xe9, 0x9b, 0x89, 0x78, 0x5c, + 0xed, 0xa1, 0xf3, 0x25, 0x32, 0x18, 0x64, 0xef, 0x91, 0x38, 0x63, 0x85, 0x64, 0x2b, 0x79, 0x57, + 0x5a, 0x0a, 0x1c, 0x14, 0x15, 0xe8, 0x22, 0x62, 0x92, 0x20, 0x07, 0x1a, 0x80, 0x33, 0xac, 0xd0, + 0x63, 0xaf, 0x28, 0x5f, 0x33, 0x11, 0x13, 0xf5, 0x52, 0xa9, 0xf3, 0x7c, 0x6b, 0xb2, 0x1b, 0x1b, + 0x4c, 0x91, 0x93, 0xab, 0x76, 0xe3, 0xe1, 0xc7, 0x4e, 0x10, 0x68, 0xa5, 0xc5, 0x59, 0x15, 0x36, + 0x37, 0x19, 0xf0, 0xea, 0x75, 0xa2, 0x1c, 0x08, 0x25, 0xc5, 0xdb, 0x93, 0xd8, 0x3e, 0xca, 0xca, + 0xe2, 0x07, 0xc4, 0x61, 0x91, 0x91, 0xc5, 0xeb, 0x05, 0xc7, 0xb7, 0xe2, 0x8c, 0xb1, 0xbe, 0x6c, + 0xa0, 0x48, 0x5a, 0x53, 0xaa, 0x65, 0x71, 0x3e, 0xc9, 0xcb, 0x65, 0xcf, 0x2e, 0x11, 0x76, 0xa2, + 0x46, 0x0c, 0xcb, 0x73, 0xd5, 0x4b, 0xe6, 0xa7, 0xf2, 0xb3, 0x41, 0x5a, 0x80, 0xeb, 0x98, 0xcc, + 0xe9, 0x32, 0xee, 0x7d, 0x11, 0xa8, 0x53, 0xe0, 0xdf, 0x54, 0xb5, 0xd5, 0x4f, 0xfb, 0xe8, 0x27, + 0x49, 0xb2, 0xba, 0xed, 0x9d, 0xa5, 0xec, 0x11, 0x6f, 0x63, 0xf9, 0x5e, 0xc2, 0x4f, 0xab, 0x7e, + 0x1b, 0x81, 0x12, 0xee, 0xd5, 0xf2, 0xc6, 0x55, 0xd9, 0x5b, 0xe8, 0x08, 0xfb, 0xa8, 0xdd, 0x68, + 0x7f, 0xd6, 0x40, 0x10, 0x8c, 0x34, 0xf2, 0x70, 0x5f, 0x5d, 0xd9, 0x57, 0xc4, 0x45, 0x39, 0xac, + 0x6f, 0xae, 0xc9, 0x51, 0x33, 0x62, 0xaa, 0x91, 0xfc, 0x5e, 0x33, 0x83, 0x79, 0x2c, 0x49, 0x38, + 0xe8, 0x55, 0x83, 0xcc, 0x55, 0x31, 0xb1, 0x03, 0xba, 0x53, 0xd1, 0x19, 0xb8, 0xad, 0xc6, 0xf3, + 0x71, 0xd6, 0x96, 0xd0, 0x74, 0xb2, 0x8e, 0xfb, 0xd1, 0x43, 0x83, 0xc9, 0xe9, 0xf5, 0xea, 0x58, + 0xb3, 0x6b, 0x8f, 0x9e, 0xee, 0xe2, 0x98, 0x25, 0xba, 0xf7, 0x7b, 0x0b, 0xa0, 0x53, 0x4b, 0x62, + 0x57, 0xc4, 0xec, 0x29, 0x11, 0x6a, 0x72, 0x18, 0x6e, 0x03, 0x61, 0xab, 0x41, 0x00, 0x3a, 0x25, + 0xcc, 0x53, 0x5a, 0xf0, 0x44, 0x6e, 0x12, 0x85, 0xb1, 0x1b, 0x10, 0x7c, 0x22, 0x7f, 0xe2, 0x70, + 0xbf, 0x5d, 0xca, 0x2b, 0x08, 0xbd, 0xf9, 0xba, 0x9c, 0x4e, 0xd3, 0x2a, 0x2e, 0xb6, 0x1d, 0x9a, + 0x75, 0x29, 0x33, 0xbc, 0x6c, 0x81, 0x1d, 0xb4, 0x08, 0xbe, 0xf0, 0x62, 0x01, 0x9b, 0xd5, 0x2c, + 0x9b, 0x23, 0x04, 0x94, 0x4f, 0x67, 0xd4, 0x1e, 0x4c, 0x3c, 0x8b, 0x47, 0x7b, 0x8a, 0x45, 0xc3, + 0x77, 0xf8, 0x86, 0xab, 0x7b, 0xdb, 0x07, 0x44, 0x21, 0x47, 0x1c, 0x1e, 0xf0, 0x6a, 0xab, 0x04, + 0x64, 0xa7, 0x20, 0xba, 0x5b, 0xfd, 0xb6, 0x69, 0xa7, 0xbc, 0x37, 0x12, 0xfd, 0x25, 0x28, 0x12, + 0xa5, 0x1d, 0x36, 0x08, 0x9a, 0x49, 0x94, 0x48, 0x84, 0x3f, 0x0f, 0xf2, 0x31, 0xb5, 0xc4, 0x88, + 0x90, 0x81, 0xd5, 0x81, 0x7e, 0xa0, 0xb2, 0xa0, 0xba, 0xc9, 0x3d, 0x7f, 0xce, 0xf4, 0x9c, 0x2f, + 0x1b, 0x86, 0xd9, 0xd2, 0xcf, 0xf7, 0xa0, 0x71, 0xbd, 0x2f, 0x3f, 0xda, 0x31, 0xab, 0xad, 0x15, + 0xbd, 0x0e, 0x5d, 0x9c, 0x21, 0xa2, 0x97, 0x93, 0xea, 0xf9, 0xc0, 0xf6, 0xce, 0xe2, 0x66, 0xe9, + 0x51, 0xba, 0x8d, 0xcb, 0x04, 0x3a, 0xfa, 0x49, 0x4e, 0xb0, 0xa8, 0xe5, 0x63, 0x68, 0x62, 0xb3, + 0x4d, 0x1e, 0xea, 0x89, 0x60, 0x5f, 0x10, 0xef, 0x99, 0xa5, 0x32, 0xa4, 0x84, 0x02, 0x01, 0xaf, + 0x90, 0x29, 0xa8, 0x96, 0x5c, 0xf3, 0xb5, 0x6d, 0xa0, 0x93, 0xc7, 0x12, 0x19, 0x27, 0xe8, 0x3e, + 0xe5, 0x28, 0x45, 0x41, 0x72, 0xed, 0x8d, 0x6e, 0x9e, 0xf0, 0xdb, 0xc0, 0xcf, 0xe9, 0x58, 0x1a, + 0x9d, 0xb0, 0x30, 0x4c, 0xcf, 0xde, 0xd2, 0xd8, 0xea, 0x05, 0x74, 0xd9, 0xb2, 0x52, 0xd1, 0x99, + 0x7f, 0x10, 0xd8, 0x31, 0x7b, 0xdb, 0x55, 0x1a, 0x4d, 0x06, 0x2f, 0x33, 0x00, 0xb6, 0x38, 0x23, + 0xa9, 0x9b, 0x82, 0x70, 0xe5, 0x68, 0x2c, 0x16, 0x2d, 0x58, 0x0d, 0x22, 0x43, 0x53, 0xa5, 0x5e, + 0x05, 0xa7, 0x2c, 0x10, 0x83, 0x5a, 0xf0, 0x38, 0xc1, 0x93, 0xd6, 0x99, 0x3a, 0x86, 0x86, 0xe3, + 0x1b, 0xbc, 0xa7, 0xaa, 0x17, 0xfd, 0x45, 0x37, 0x27, 0x1f, 0xe2, 0x01, 0x94, 0x45, 0x46, 0x2c, + 0x57, 0xc3, 0x77, 0xf7, 0x63, 0x10, 0x3b, 0x9d, 0x9b, 0x84, 0x39, 0x53, 0x72, 0x21, 0xda, 0x79, + 0xc1, 0x22, 0xd5, 0x82, 0xf6, 0x58, 0xea, 0xc3, 0x24, 0xf2, 0x9b, 0xe2, 0xf2, 0xb7, 0x53, 0x36, + 0x6b, 0xa4, 0xe0, 0xeb, 0x2b, 0xa8, 0x92, 0xca, 0xe6, 0x54, 0x6e, 0x47, 0x3c, 0x53, 0x93, 0x46, + 0xbb, 0xd2, 0xa7, 0xde, 0x46, 0x68, 0x98, 0x65, 0x6f, 0x6c, 0x99, 0x36, 0x05, 0x51, 0x48, 0x8d, + 0x61, 0x52, 0x1d, 0xed, 0x35, 0xc4, 0xe7, 0xa0, 0xb0, 0x40, 0xe6, 0x43, 0xad, 0x1a, 0xd2, 0xb5, + 0x53, 0x10, 0x7a, 0x1d, 0x77, 0x7d, 0x8f, 0x1c, 0xca, 0xdf, 0xc9, 0xb6, 0xad, 0x13, 0x7e, 0x65, + 0xbd, 0x75, 0xcc, 0x32, 0x2e, 0x96, 0x3f, 0x77, 0xd6, 0x85, 0x8d, 0x4b, 0xe2, 0xe1, 0x71, 0x74, + 0xee, 0xb3, 0xcb, 0x50, 0x96, 0x98, 0x8a, 0xbe, 0xb8, 0xc0, 0xbb, 0x34, 0x2b, 0x77, 0x74, 0x50, + 0xa4, 0x32, 0x49, 0xf6, 0xa5, 0xd3, 0x26, 0x24, 0x11, 0x36, 0x57, 0x11, 0x25, 0x7a, 0x9c, 0x03, + 0x7e, 0x6a, 0x5d, 0x5a, 0x24, 0xa3, 0xb7, 0x5a, 0x03, 0x76, 0x91, 0x4e, 0x51, 0xaf, 0x2a, 0x2e, + 0xfd, 0x12, 0x37, 0x7c, 0xaa, 0xd5, 0xd6, 0xd1, 0xde, 0xd8, 0x05, 0x36, 0x18, 0x11, 0xb3, 0x78, + 0x38, 0x64, 0xec, 0x19, 0x99, 0x80, 0x45, 0x22, 0xf2, 0x1d, 0xb7, 0x99, 0x9c, 0x6d, 0xd5, 0x92, + 0x54, 0xc9, 0xce, 0x3b, 0x74, 0x94, 0xcb, 0x54, 0x7c, 0x54, 0x7f, 0x3c, 0xdd, 0x4a, 0x68, 0xc2, + 0xea, 0xcb, 0x66, 0x76, 0x30, 0x9d, 0x35, 0x84, 0xed, 0x2e, 0xa3, 0x63, 0x47, 0x0d, 0xd8, 0x61, + 0x2c, 0xfa, 0xf7, 0x38, 0x0a, 0x03, 0x66, 0x61, 0xe7, 0x04, 0x1d, 0x7a, 0xbf, 0xe8, 0xfe, 0x66, + 0x76, 0xef, 0x4f, 0x61, 0x4c, 0x31, 0x6b, 0x7c, 0xc9, 0xf3, 0xe8, 0xf8, 0xe1, 0x4c, 0x47, 0x39, + 0x1d, 0xe5, 0x0e, 0x5c, 0x68, 0x79, 0x0e, 0x5d, 0xf3, 0xef, 0xe3, 0x32, 0x0a, 0x6c, 0xe8, 0x09, + 0x21, 0xeb, 0x59, 0xd9, 0xce, 0x6d, 0xa3, 0x6d, 0xb8, 0x2b, 0x5e, 0x45, 0x3f, 0xe7, 0xb4, 0xb8, + 0x93, 0x5b, 0x8c, 0xdd, 0xf6, 0xa8, 0x9b, 0xfb, 0xd5, 0xb6, 0xf7, 0x19, 0xc8, 0xff, 0xd4, 0x58, + 0x2c, 0xbe, 0xcb, 0x91, 0x07, 0xd6, 0x95, 0xa0, 0xaf, 0x6d, 0x27, 0xa1, 0xa8, 0x64, 0x72, 0xc5, + 0xa4, 0x62, 0xaa, 0xc8, 0x66, 0x17, 0x62, 0xd9, 0x4e, 0xcf, 0xf2, 0x01, 0x22, 0x98, 0xc9, 0xd1, + 0xd9, 0x08, 0xe4, 0xfc, 0x2a, 0x6d, 0xec, 0x9c, 0x0b, 0x7d, 0x85, 0x3e, 0x7f, 0x68, 0x00, 0x9b, + 0x44, 0x6b, 0x87, 0x98, 0x43, 0x2c, 0xd7, 0x6e, 0xf7, 0x40, 0x48, 0x38, 0x22, 0xd4, 0x49, 0x57, + 0x84, 0x13, 0xd4, 0xe4, 0x53, 0x34, 0x90, 0xbf, 0xaa, 0x83, 0x70, 0xf7, 0x7a, 0xbd, 0x28, 0xcb, + 0xcc, 0x78, 0x6a, 0xac, 0x8e, 0x99, 0xd0, 0x6f, 0x08, 0x5a, 0x06, 0x99, 0xc8, 0xf9, 0x29, 0xac, + 0x0f, 0x54, 0x65, 0x75, 0x05, 0xa7, 0x1c, 0x37, 0xc1, 0xd4, 0x04, 0x0c, 0xb2, 0x8f, 0x72, 0x7a, + 0xe4, 0xd0, 0xe3, 0x88, 0x49, 0xf7, 0x5e, 0xf4, 0x8f, 0x26, 0x77, 0x08, 0x40, 0x75, 0x7e, 0xf4, + 0x57, 0x64, 0xea, 0xa7, 0xce, 0x14, 0x38, 0x0a, 0xaf, 0xcf, 0x11, 0x86, 0xea, 0x88, 0x35, 0xd2, + 0x84, 0x1d, 0x28, 0xfa, 0x00, 0xbe, 0x46, 0x0e, 0x48, 0x93, 0x56, 0x1c, 0xf7, 0x91, 0x83, 0xdf, + 0xae, 0x0b, 0xe4, 0xc4, 0x1b, 0xa2, 0xac, 0x21, 0x3e, 0x9e, 0xc0, 0xaf, 0x4a, 0xdd, 0x07, 0x19, + 0x2b, 0x59, 0x7f, 0xc7, 0x85, 0x33, 0x98, 0xb3, 0xfd, 0x16, 0xcc, 0x76, 0x2a, 0x4f, 0x28, 0x81, + 0x5d, 0xfe, 0x5d, 0xea, 0x17, 0xd5, 0xad, 0xf5, 0x87, 0xf7, 0x90, 0xff, 0xe0, 0x41, 0xe1, 0x7c, + 0x70, 0xdf, 0xa0, 0x6a, 0x1e, 0x88, 0x7d, 0xe2, 0x41, 0x53, 0x73, 0x85, 0x45, 0x37, 0x79, 0x82, + 0x2e, 0x07, 0x6d, 0x39, 0xb3, 0xfe, 0x06, 0x66, 0x99, 0xbd, 0x81, 0x64, 0xae, 0xfa, 0x70, 0x16, + 0xf9, 0xa4, 0x4a, 0x45, 0xb0, 0x96, 0xc9, 0xfb, 0x45, 0x43, 0x74, 0x3b, 0xe3, 0x8c, 0x92, 0x42, + 0xe2, 0x6a, 0xdc, 0x71, 0xf3, 0x69, 0xab, 0x0a, 0x6b, 0x5d, 0x87, 0x0f, 0x07, 0x3b, 0xa1, 0x83, + 0x63, 0xde, 0xc9, 0x80, 0x67, 0xc8, 0x35, 0x5a, 0x06, 0x4d, 0x09, 0x23, 0xfc, 0x8d, 0x46, 0x94, + 0xb3, 0xc8, 0x49, 0x22, 0xf6, 0x35, 0x60, 0x22, 0xde, 0xc7, 0x26, 0x32, 0x9f, 0xbf, 0x0d, 0xbc, + 0x31, 0xd4, 0x2a, 0xb7, 0x6f, 0x49, 0x3d, 0x95, 0x96, 0x1d, 0x37, 0x83, 0xec, 0xb2, 0x0e, 0x21, + 0xa4, 0x4d, 0x22, 0x69, 0x6d, 0xe6, 0xaa, 0x7e, 0xf4, 0x4c, 0xb0, 0x19, 0x5a, 0x38, 0x6e, 0x4f, + 0x20, 0x75, 0xfd, 0x63, 0xfa, 0x97, 0x30, 0x8e, 0x52, 0xe7, 0x3b, 0xb4, 0xe8, 0x49, 0x95, 0x05, + 0x5d, 0xe4, 0x57, 0x63, 0xbc, 0x34, 0x11, 0x34, 0x4a, 0xf1, 0x80, 0x5c, 0xd1, 0x07, 0x03, 0x1e, + 0x93, 0x52, 0xd3, 0xd6, 0x0f, 0x25, 0xff, 0xcc, 0x04, 0x0f, 0x9f, 0xb9, 0x07, 0x60, 0xf7, 0x75, + 0x77, 0x26, 0xdf, 0xb6, 0xc5, 0x7a, 0xf3, 0xca, 0xbe, 0x28, 0xc1, 0x65, 0x0e, 0x5f, 0x6b, 0x82, + 0x06, 0x47, 0xc7, 0xda, 0x8c, 0x41, 0x86, 0xdb, 0x64, 0x79, 0x06, 0x02, 0x29, 0xf7, 0xe6, 0x9a, + 0x82, 0x4f, 0xae, 0x60, 0x8b, 0x48, 0x02, 0x3b, 0x8f, 0x6c, 0x85, 0x31, 0x1d, 0x14, 0x17, 0x76, + 0xf4, 0x95, 0x62, 0xbc, 0x2e, 0xe4, 0x3a, 0x5c, 0x8e, 0x03, 0x0c, 0xbd, 0x70, 0x72, 0x2f, 0xd5, + 0xf4, 0x81, 0x8f, 0x93, 0xaf, 0xcf, 0xdd, 0xee, 0x58, 0xd0, 0x7a, 0xbf, 0xd6, 0x29, 0x60, 0x54, + 0xf2, 0x62, 0x76, 0x10, 0xbb, 0x89, 0x42, 0x8e, 0x4f, 0x49, 0x5c, 0x3a, 0xcc, 0x3b, 0x1c, 0x83, + 0xd7, 0xe6, 0x56, 0x9e, 0x33, 0xd9, 0xe9, 0xf3, 0xaf, 0x35, 0x34, 0xf5, 0x56, 0xfd, 0x83, 0x1e, + 0x83, 0x8a, 0x48, 0x94, 0xf2, 0x47, 0x9f, 0x0e, 0xbb, 0x53, 0xc3, 0x55, 0x33, 0x3f, 0x1d, 0xe9, + 0xdd, 0xab, 0x78, 0xb5, 0x17, 0x0b, 0xa6, 0x97, 0x64, 0xaf, 0x22, 0x20, 0x49, 0x32, 0xe0, 0xc3, + 0x4f, 0x3d, 0xba, 0x83, 0x61, 0x2f, 0x1b, 0x60, 0xa5, 0xce, 0x50, 0x0d, 0x90, 0x8f, 0xfc, 0xf4, + 0xe9, 0xf2, 0x40, 0x89, 0x14, 0x0c, 0x9f, 0xde, 0x9c, 0xe2, 0xcc, 0x8c, 0xff, 0x68, 0xc7, 0x95, + 0xe1, 0x43, 0x18, 0x32, 0xbf, 0x81, 0x7b, 0xa4, 0x87, 0x6d, 0xc5, 0x26, 0xa8, 0x6d, 0xe1, 0x4d, + 0x04, 0x02, 0xe2, 0x89, 0x06, 0xef, 0x8c, 0x2e, 0xe8, 0x28, 0x65, 0xdf, 0x6f, 0xe9, 0x05, 0x92, + 0xbf, 0xce, 0x0a, 0xc5, 0xc9, 0x49, 0x84, 0xeb, 0xc8, 0x68, 0x5e, 0xad, 0xc9, 0xd2, 0xdc, 0x9c, + 0x1d, 0xf5, 0x51, 0xfd, 0x15, 0x8a, 0x71, 0xac, 0xd3, 0x63, 0xab, 0xc2, 0x5d, 0x63, 0x42, 0xa7, + 0x96, 0x4a, 0xee, 0xb6, 0xa2, 0xbf, 0x14, 0xb4, 0x2f, 0x07, 0x24, 0xaf, 0x16, 0x26, 0x65, 0x68, + 0xfc, 0xc9, 0x8f, 0x19, 0x64, 0x87, 0x76, 0x68, 0x3c, 0x22, 0xb8, 0x15, 0x2a, 0xeb, 0xd9, 0xf4, + 0x9b, 0x69, 0xea, 0x03, 0xe5, 0x1a, 0x37, 0x3e, 0x76, 0x53, 0x75, 0x4b, 0xe3, 0xa7, 0x76, 0xb5, + 0x3b, 0xeb, 0x37, 0xe1, 0x6a, 0x4f, 0x86, 0xce, 0x52, 0xfe, 0x13, 0x11, 0xb7, 0x18, 0x05, 0xee, + 0x8c, 0x61, 0xca, 0x2b, 0xf0, 0x28, 0xc2, 0xfd, 0x5c, 0x3e, 0xdc, 0x3a, 0x8a, 0xa2, 0xaf, 0xe6, + 0x78, 0xb9, 0x2b, 0x0a, 0x7b, 0x77, 0xb0, 0x94, 0xb4, 0x4c, 0xae, 0x77, 0xde, 0x46, 0x47, 0xc6, + 0xd1, 0x2a, 0xfb, 0x66, 0x9c, 0x29, 0x14, 0x7b, 0x6d, 0xb2, 0x47, 0x31, 0xec, 0xcd, 0x70, 0x64, + 0x88, 0x1f, 0x08, 0x2e, 0x39, 0x73, 0x74, 0xf8, 0x39, 0x92, 0x24, 0x4e, 0xc9, 0x9b, 0x07, 0x44, + 0xc8, 0xc5, 0x5d, 0x77, 0x11, 0x6a, 0xd3, 0x53, 0x8b, 0xd0, 0xa3, 0x08, 0xc5, 0x9f, 0xf5, 0x4b, + 0xe8, 0x29, 0x52, 0xdc, 0xbe, 0xd5, 0xe5, 0xb2, 0x43, 0x4d, 0xa9, 0x47, 0xd7, 0x4b, 0x1f, 0x8e, + 0xde, 0xcb, 0xfe, 0x2e, 0x47, 0x80, 0xa6, 0xdc, 0x3e, 0x72, 0xdd, 0x12, 0x5b, 0x1e, 0xfe, 0x7e, + 0xbc, 0x54, 0xd3, 0xf7, 0x2a, 0xc4, 0xb0, 0x03, 0x92, 0xd3, 0x2c, 0x21, 0xfb, 0x11, 0x17, 0x2c, + 0xf5, 0x2a, 0x4a, 0x3d, 0x49, 0xf7, 0x7c, 0xac, 0x55, 0x7f, 0x0f, 0xc3, 0x53, 0xd7, 0xdf, 0x5e, + 0x97, 0x16, 0xb5, 0x18, 0xbb, 0x71, 0x1c, 0x7c, 0xb7, 0x30, 0x20, 0x78, 0x25, 0x8a, 0xab, 0x8f, + 0xd1, 0xdc, 0x82, 0x71, 0xda, 0x78, 0xa7, 0xfb, 0x76, 0x39, 0x06, 0x8a, 0x5e, 0xc8, 0x23, 0x59, + 0xe1, 0x0d, 0x5a, 0xe3, 0xef, 0xc2, 0xeb, 0x5d, 0x42, 0x41, 0xd1, 0x09, 0xe4, 0x73, 0x94, 0xd1, + 0x77, 0xeb, 0xd2, 0x37, 0x52, 0xea, 0x6a, 0xdb, 0xa8, 0xf1, 0x5f, 0x03, 0xd7, 0x4d, 0xa3, 0x29, + 0x6d, 0x2a, 0x39, 0xc7, 0x4f, 0x13, 0xee, 0x71, 0xd5, 0x30, 0xa0, 0x56, 0x64, 0xf0, 0xcb, 0x76, + 0x53, 0x77, 0x0a, 0xea, 0x62, 0xc1, 0x33, 0x1a, 0x01, 0x92, 0xb7, 0x3f, 0x37, 0x30, 0xdb, 0x21, + 0x2e, 0xb7, 0x15, 0x30, 0x0a, 0x69, 0x34, 0x67, 0x33, 0xcc, 0xdf, 0xae, 0x26, 0x9f, 0x33, 0x3f, + 0xa1, 0x6d, 0x7f, 0xbd, 0xda, 0xea, 0x30, 0x72, 0x37, 0xde, 0x69, 0x6e, 0x34, 0x3c, 0x3f, 0x5a, + 0x4d, 0xa2, 0x91, 0xa9, 0xae, 0xfd, 0xd3, 0x85, 0x10, 0x76, 0x1c, 0x76, 0x64, 0x88, 0xe1, 0x80, + 0x80, 0x2b, 0xb0, 0xbe, 0x1c, 0x09, 0x78, 0x41, 0x2c, 0x75, 0x79, 0xa2, 0xb4, 0xfd, 0x2e, 0x2d, + 0xa8, 0x36, 0x02, 0x3e, 0x34, 0xbe, 0x85, 0x5b, 0x99, 0x6e, 0xf0, 0xde, 0xa2, 0x3e, 0x46, 0x62, + 0x6f, 0xec, 0x27, 0x85, 0x47, 0x9c, 0xa0, 0xf8, 0xee, 0x06, 0xf4, 0xbd, 0x34, 0xe9, 0x31, 0x57, + 0x09, 0xe9, 0x25, 0xf9, 0xa5, 0x8f, 0xed, 0xd1, 0x1f, 0xb5, 0x22, 0x8a, 0xf3, 0x4a, 0x5d, 0xfd, + 0x84, 0x01, 0x8c, 0xa3, 0x7c, 0x0f, 0xb7, 0x25, 0x75, 0x3d, 0xc7, 0x34, 0xd3, 0xf9, 0xa3, 0xe9, + 0x17, 0x25, 0x68, 0x9b, 0xb1, 0x8d, 0x39, 0xcf, 0x92, 0x95, 0x36, 0xe8, 0x9c, 0x61, 0xbb, 0xaa, + 0x87, 0x91, 0x76, 0x1d, 0x7c, 0x59, 0xb1, 0x41, 0xe8, 0x6a, 0xbb, 0x7b, 0xa9, 0xf2, 0x03, 0x9f, + 0x75, 0x57, 0xaf, 0x24, 0xc9, 0xea, 0x42, 0xac, 0x4d, 0x71, 0x62, 0x99, 0xaa, 0x31, 0x31, 0x7b, + 0x19, 0x8c, 0x32, 0x6b, 0x31, 0xb3, 0xa8, 0x21, 0xab, 0x80, 0xf9, 0xc8, 0xf6, 0xf5, 0xce, 0xbf, + 0xa2, 0x2a, 0x9b, 0x3f, 0xf6, 0xda, 0xbd, 0x0f, 0xe1, 0xaf, 0x66, 0x5a, 0xfb, 0xa1, 0x24, 0x31, + 0xd1, 0xab, 0x85, 0x78, 0xd0, 0x8a, 0x8f, 0x61, 0x15, 0xea, 0x4d, 0xd9, 0xd0, 0xe5, 0x53, 0x38, + 0xfc, 0x9f, 0xda, 0x31, 0xf4, 0x2b, 0x67, 0x17, 0x28, 0x5e, 0x9c, 0x63, 0x89, 0x8d, 0xeb, 0x51, + 0x22, 0xe7, 0x95, 0x35, 0xa5, 0xe6, 0x8c, 0xe7, 0xf5, 0xb3, 0x3c, 0xf5, 0x23, 0xa1, 0xb2, 0xac, + 0x04, 0x6f, 0x5c, 0xee, 0x67, 0xe6, 0x5a, 0xf3, 0x4a, 0xaf, 0xe3, 0x69, 0xfb, 0x42, 0x57, 0x6e, + 0x10, 0x55, 0x0f, 0xf3, 0x47, 0x68, 0xa1, 0x02, 0xad, 0xd5, 0x40, 0xd6, 0x1c, 0xeb, 0x4c, 0xc3, + 0x14, 0x1d, 0xcd, 0x0d, 0xf4, 0xf2, 0x46, 0x68, 0xd7, 0x69, 0xc2, 0xd7, 0x65, 0x26, 0xe0, 0x56, + 0xe3, 0xf2, 0x5a, 0x22, 0x9e, 0xfb, 0xe3, 0x41, 0x05, 0xf9, 0xc5, 0x5d, 0x4c, 0xf4, 0x5e, 0x4b, + 0x1d, 0x2a, 0x95, 0x2e, 0x62, 0xed, 0x81, 0xd0, 0xbe, 0x6f, 0xc5, 0xb1, 0xe4, 0xbd, 0x68, 0x1c, + 0xae, 0x00, 0xa5, 0x97, 0xcd, 0x2f, 0x87, 0xae, 0xf6, 0x6c, 0x49, 0xc1, 0x2e, 0xeb, 0xd0, 0x2b, + 0xd0, 0x53, 0x8c, 0x14, 0x06, 0x12, 0x03, 0xc8, 0xd7, 0x1d, 0x4d, 0x86, 0xa2, 0xf8, 0x4c, 0xbe, + 0xc3, 0xcf, 0x8d, 0x82, 0xfb, 0xbb, 0xe2, 0x42, 0xdf, 0xe8, 0xcb, 0x63, 0x56, 0x3c, 0x80, 0xa0, + 0xbc, 0x49, 0x22, 0x3c, 0xc8, 0x86, 0xd0, 0x23, 0x12, 0xee, 0x7a, 0x33, 0x8e, 0xaa, 0x2b, 0x39, + 0x00, 0xf5, 0x15, 0x4d, 0x8c, 0xc2, 0x6e, 0x14, 0x53, 0x14, 0xe4, 0xb4, 0xe7, 0x25, 0x13, 0x79, + 0x9c, 0xb8, 0xea, 0x26, 0xc4, 0x8a, 0x66, 0x48, 0x8e, 0xd7, 0xfc, 0x83, 0x7d, 0x08, 0x70, 0x56, + 0x9f, 0xc3, 0x21, 0x73, 0x56, 0x20, 0xfe, 0x7b, 0x7e, 0xde, 0x59, 0x60, 0xa9, 0xa9, 0x14, 0x64, + 0x51, 0x68, 0xfe, 0x47, 0xff, 0xf0, 0x44, 0xb2, 0xc2, 0x65, 0x0e, 0xa6, 0x38, 0x60, 0xa7, 0x4c, + 0x8d, 0x90, 0xe5, 0x71, 0x9f, 0x3e, 0xe4, 0xbe, 0x74, 0x71, 0xa9, 0x4a, 0x91, 0xa4, 0xde, 0x4a, + 0x96, 0xa4, 0x23, 0xea, 0x12, 0x40, 0xbd, 0xc2, 0xfe, 0xbe, 0x6d, 0x12, 0x20, 0x45, 0x80, 0x85, + 0xd0, 0x54, 0x35, 0x2b, 0x59, 0x60, 0x4b, 0x11, 0xdd, 0x09, 0xd3, 0x3a, 0xc8, 0x29, 0xd6, 0x0f, + 0xed, 0x5b, 0x8d, 0x55, 0x9f, 0x4b, 0x64, 0x8e, 0xaa, 0xb9, 0x06, 0x00, 0xb6, 0xc3, 0xd4, 0x5a, + 0xe9, 0xb5, 0x62, 0x2b, 0xa8, 0xc8, 0x6c, 0x6d, 0xb9, 0x26, 0xb0, 0x47, 0x65, 0xad, 0x19, 0x67, + 0x0f, 0x85, 0xef, 0x37, 0xc5, 0x49, 0xd5, 0xed, 0x8f, 0x89, 0x52, 0x92, 0x29, 0x81, 0x0b, 0xf0, + 0x30, 0xe6, 0x84, 0x1b, 0x0b, 0xde, 0x6c, 0x40, 0x0a, 0xfa, 0xea, 0xc8, 0x11, 0xd3, 0x77, 0xbb, + 0xe6, 0x91, 0xad, 0x6b, 0x69, 0xd0, 0x24, 0x60, 0xfe, 0x8d, 0xd5, 0x29, 0xfc, 0x61, 0xc6, 0x82, + 0x0a, 0x35, 0x3c, 0x20, 0xe1, 0xa8, 0xf8, 0xa3, 0xe2, 0x15, 0xe5, 0x77, 0xc1, 0xf5, 0x01, 0x6f, + 0x8e, 0x16, 0x84, 0xfa, 0xdd, 0xac, 0xfe, 0xcf, 0xa7, 0x60, 0x50, 0xd8, 0xe1, 0x57, 0xc5, 0xab, + 0x58, 0x28, 0xb6, 0x7d, 0x69, 0xb9, 0x44, 0x3e, 0x80, 0xba, 0xca, 0xd8, 0x61, 0x4f, 0x38, 0x38, + 0x33, 0xca, 0x25, 0xf8, 0x3d, 0xd8, 0xcf, 0x64, 0x8f, 0x0d, 0xd7, 0x06, 0xdf, 0x64, 0x27, 0x67, + 0x74, 0xf2, 0x92, 0x13, 0xbf, 0x06, 0xd6, 0x92, 0x11, 0xf0, 0x65, 0x6c, 0xb8, 0xcf, 0xb9, 0xa3, + 0xaf, 0x6f, 0xe1, 0x4b, 0x82, 0xcc, 0x45, 0x87, 0x71, 0xc7, 0xe3, 0xf0, 0xa5, 0x2a, 0x20, 0x2b, + 0x9d, 0x0f, 0x3f, 0xcb, 0x39, 0x54, 0x86, 0x89, 0xe6, 0xf5, 0x78, 0x22, 0x38, 0x5a, 0x9f, 0xb6, + 0x0b, 0x20, 0xdc, 0x93, 0x81, 0x18, 0xd3, 0xd7, 0x49, 0xa5, 0xa5, 0x14, 0x01, 0x0d, 0x75, 0xe1, + 0x9a, 0xd5, 0x1b, 0xd3, 0x32, 0x7b, 0xb9, 0xa8, 0x6f, 0xad, 0xb0, 0xe6, 0xe9, 0x2e, 0x22, 0x2f, + 0x0c, 0x29, 0xfa, 0x8c, 0x7c, 0x91, 0x1c, 0xea, 0xe3, 0x0e, 0xce, 0x5e, 0xaa, 0xba, 0xd5, 0x37, + 0xd0, 0x3e, 0x47, 0x61, 0x27, 0xb0, 0x8d, 0x40, 0x7f, 0xfe, 0x7f, 0xf7, 0x93, 0x00, 0x14, 0x6c, + 0x79, 0xe0, 0xa1, 0x3b, 0xad, 0x13, 0xb0, 0xc6, 0x43, 0x2c, 0xef, 0x15, 0xb6, 0x85, 0x1b, 0xba, + 0x53, 0x75, 0x5e, 0xdb, 0xdd, 0xd7, 0x01, 0x2a, 0x2b, 0xed, 0x34, 0xf0, 0x43, 0x96, 0x71, 0x93, + 0xa3, 0x25, 0x33, 0xc2, 0xfb, 0xc6, 0x9b, 0x39, 0x64, 0x41, 0x42, 0x63, 0x9b, 0x98, 0x1b, 0x85, + 0x11, 0xcc, 0xc2, 0xa6, 0x36, 0x55, 0x25, 0xc5, 0x76, 0x1a, 0x5f, 0xec, 0xed, 0xda, 0x43, 0x86, + 0xa6, 0xee, 0x9e, 0x32, 0xdf, 0x64, 0x49, 0x85, 0xd1, 0x39, 0x8a, 0x3c, 0x7d, 0x1f, 0xc3, 0x47, + 0xc4, 0x8b, 0x57, 0x34, 0xa3, 0x09, 0x93, 0x2d, 0xb2, 0xb4, 0x9b, 0x07, 0x3f, 0xfb, 0x19, 0x85, + 0x1f, 0xdf, 0xa5, 0x50, 0xb1, 0x0a, 0xdd, 0x5c, 0xcf, 0xc2, 0xa7, 0x82, 0x1a, 0xca, 0x81, 0xc1, + 0x52, 0x75, 0x56, 0x13, 0x87, 0x1c, 0xe6, 0xc0, 0xd9, 0xb3, 0x8d, 0x94, 0x7f, 0x73, 0x55, 0xb3, + 0xbe, 0x9f, 0x61, 0x21, 0x05, 0x87, 0xdc, 0x77, 0xea, 0x21, 0x7b, 0x4f, 0x45, 0x38, 0x1d, 0xe8, + 0x05, 0xcf, 0x3a, 0xb8, 0x01, 0xf7, 0xb6, 0x47, 0xda, 0x43, 0x9f, 0xac, 0x1a, 0x5e, 0xd5, 0xc4, + 0x80, 0x5d, 0xeb, 0x2b, 0xf0, 0x9b, 0xe4, 0x34, 0xe4, 0x63, 0x5e, 0xca, 0x4d, 0x81, 0x73, 0x74, + 0xcb, 0x61, 0x3c, 0xe3, 0x2e, 0xcb, 0x2a, 0x65, 0x56, 0xc9, 0x3b, 0x89, 0x57, 0x7f, 0x7a, 0xa5, + 0x52, 0x10, 0xd9, 0x58, 0x60, 0x8a, 0xa6, 0x4d, 0x92, 0xcb, 0x0d, 0x28, 0xae, 0x88, 0xda, 0x9e, + 0xf4, 0x16, 0x3c, 0xe2, 0x78, 0xe6, 0x9d, 0xbb, 0x62, 0x08, 0xed, 0xa7, 0x1f, 0x85, 0xba, 0x8d, + 0xd1, 0x23, 0xd7, 0x72, 0x9b, 0xdc, 0x17, 0x4c, 0x21, 0xaa, 0x58, 0x16, 0x07, 0x7e, 0xb6, 0xd9, + 0xda, 0xa9, 0xd6, 0x88, 0xc3, 0xbb, 0xe1, 0x5d, 0xdf, 0xf3, 0xdf, 0x75, 0xb0, 0x51, 0xc6, 0xe1, + 0x91, 0xb3, 0x3d, 0xd2, 0xac, 0x98, 0x05, 0xfc, 0x5c, 0xd1, 0x51, 0x9b, 0x9d, 0xe1, 0xd0, 0xa7, + 0xc4, 0x29, 0xe7, 0xe6, 0x79, 0xe2, 0x6d, 0xcf, 0xe7, 0xae, 0x5f, 0x87, 0x5b, 0x90, 0xe5, 0xa8, + 0x5c, 0x85, 0xd9, 0x90, 0x35, 0x79, 0xf7, 0xe6, 0xc8, 0x77, 0x0b, 0x21, 0x9a, 0xd8, 0x9f, 0x4b, + 0x9e, 0xbd, 0x2d, 0x3c, 0xaf, 0x4a, 0x19, 0x23, 0x55, 0x95, 0x60, 0x31, 0x0a, 0x41, 0x60, 0x1e, + 0x44, 0x69, 0x8c, 0x70, 0x3d, 0xaa, 0x79, 0x8c, 0x2b, 0x67, 0xea, 0xb3, 0x76, 0x24, 0x03, 0x5c, + 0xc7, 0xae, 0xa6, 0x8e, 0x80, 0x83, 0xf9, 0xf8, 0xef, 0xab, 0xc7, 0xb5, 0x52, 0xc6, 0x90, 0x34, + 0x0e, 0x56, 0xdb, 0x0c, 0xc4, 0x69, 0x34, 0xcb, 0xfe, 0xba, 0x47, 0xee, 0xa8, 0x7e, 0xae, 0xee, + 0x3c, 0xcf, 0xc9, 0xcb, 0xc4, 0xa6, 0x18, 0x8e, 0x3c, 0x95, 0x79, 0x00, 0x6f, 0x87, 0x90, 0x30, + 0xc1, 0xe1, 0x37, 0x35, 0x24, 0x9a, 0x21, 0x38, 0x1e, 0x57, 0xf3, 0x71, 0xd0, 0xcf, 0x47, 0xb0, + 0xcc, 0xe1, 0x16, 0xfe, 0xf2, 0x4a, 0x7c, 0x98, 0x6b, 0x6b, 0xcc, 0x1b, 0xad, 0x81, 0xde, 0xe3, + 0x99, 0xb5, 0xc3, 0x15, 0x1c, 0xeb, 0xf1, 0x65, 0x4c, 0x8c, 0x82, 0xfb, 0x45, 0xfe, 0x77, 0x3e, + 0xec, 0xd2, 0x9d, 0x02, 0xd1, 0x75, 0xc5, 0xea, 0x0b, 0xfd, 0x41, 0x7b, 0x13, 0x4f, 0x9a, 0x68, + 0x91, 0xd4, 0xd8, 0x22, 0xd8, 0x00, 0x11, 0x0f, 0x9d, 0x69, 0xce, 0x76, 0xa6, 0xa9, 0x7c, 0xba, + 0xf9, 0xa8, 0xdb, 0x9e, 0x4f, 0x25, 0x53, 0xb1, 0xc1, 0xe0, 0xdc, 0x90, 0x44, 0x75, 0xb6, 0x60, + 0x32, 0x11, 0x97, 0x16, 0x6a, 0xd7, 0x5e, 0xce, 0xe9, 0xb4, 0x65, 0x96, 0xef, 0xda, 0xf7, 0x22, + 0x17, 0x24, 0x39, 0x7c, 0x50, 0xd1, 0x3e, 0xfd, 0x95, 0xaa, 0x63, 0xa7, 0x7d, 0xa8, 0x80, 0x3d, + 0xfd, 0xb8, 0xde, 0xfa, 0x14, 0x6f, 0xff, 0x22, 0x33, 0xde, 0xec, 0x0a, 0x3e, 0x6e, 0x93, 0x99, + 0x09, 0x63, 0x49, 0x5a, 0x3e, 0x3c, 0x06, 0xd0, 0xca, 0x87, 0xf0, 0x71, 0xda, 0x70, 0xa0, 0xfe, + 0x10, 0x3b, 0x90, 0x1f, 0x38, 0x9d, 0xb5, 0xd7, 0x92, 0x83, 0x4a, 0x00, 0xbf, 0x4d, 0x50, 0xf8, + 0x79, 0x9c, 0xdd, 0x45, 0x66, 0x1a, 0xeb, 0x9b, 0x5d, 0x7f, 0x1f, 0x71, 0xde, 0xf6, 0x21, 0xe0, + 0xf2, 0xbe, 0xb3, 0xcc, 0xc4, 0xa6, 0xad, 0x6a, 0x6f, 0x8d, 0x9e, 0x76, 0xb8, 0xfd, 0x28, 0xc0, + 0x93, 0x52, 0x80, 0x0f, 0xf0, 0x8d, 0xe6, 0x65, 0x77, 0xb8, 0xd4, 0xd2, 0x84, 0x98, 0x83, 0x10, + 0x3a, 0x23, 0xce, 0xa0, 0xb8, 0x2a, 0x1d, 0xfc, 0x1e, 0x8f, 0x47, 0x14, 0xcf, 0x56, 0xa3, 0x88, + 0x68, 0x01, 0x50, 0x1d, 0x4f, 0xe0, 0x01, 0xf1, 0xdd, 0xd8, 0x73, 0x18, 0x7b, 0x92, 0x35, 0x22, + 0xb8, 0x58, 0x08, 0x95, 0x16, 0xcf, 0x75, 0x98, 0x97, 0xe4, 0xdb, 0xf5, 0xe9, 0x1a, 0xc2, 0xb4, + 0x0e, 0xdc, 0x2e, 0x5a, 0x3b, 0x99, 0xb0, 0x77, 0xa0, 0xab, 0xdd, 0x76, 0xcf, 0xd3, 0xfc, 0x43, + 0x38, 0x67, 0xea, 0x00, 0x04, 0xde, 0x9f, 0x78, 0x20, 0x14, 0xbd, 0xc2, 0x03, 0xe3, 0x30, 0xbd, + 0x4f, 0x4c, 0x5c, 0x6d, 0x90, 0xf2, 0x3e, 0x37, 0x11, 0xac, 0xef, 0x22, 0x51, 0xaa, 0x25, 0xe9, + 0x1c, 0x21, 0x17, 0x53, 0x16, 0x68, 0x09, 0x64, 0x7e, 0x86, 0x95, 0xec, 0x9c, 0xb5, 0x15, 0x47, + 0x70, 0x3b, 0x32, 0xfe, 0x2c, 0x12, 0xf7, 0x9c, 0xe0, 0x30, 0x0d, 0x2e, 0x93, 0x76, 0xd7, 0x43, + 0x23, 0x60, 0xbb, 0xbe, 0x0c, 0xc1, 0x39, 0x3a, 0x9e, 0x5b, 0x00, 0xa2, 0x54, 0xf5, 0x16, 0x6a, + 0xf9, 0x45, 0xd7, 0x2f, 0xcf, 0x78, 0xe9, 0x9a, 0x7b, 0xdd, 0xb4, 0x77, 0x2a, 0xa7, 0xf1, 0xe6, + 0xe8, 0xc5, 0x43, 0x04, 0xa1, 0xf9, 0x74, 0x9b, 0x10, 0x0a, 0x86, 0xe6, 0xa6, 0xb8, 0xc7, 0x20, + 0x75, 0x23, 0x30, 0x94, 0x5e, 0x3d, 0x29, 0xae, 0xa0, 0x83, 0xe7, 0x0f, 0xcc, 0xa3, 0x92, 0x32, + 0xd7, 0xcd, 0xc5, 0x89, 0xf2, 0x0d, 0x5b, 0x7e, 0xb3, 0x8d, 0xbc, 0x62, 0xec, 0x32, 0xa2, 0x1e, + 0x1f, 0xe7, 0xa5, 0xaa, 0xf1, 0x56, 0xc0, 0xc0, 0xe1, 0x45, 0x9b, 0x98, 0xf8, 0x26, 0xc8, 0x33, + 0x88, 0x01, 0x9a, 0x45, 0x25, 0x10, 0x7a, 0xcb, 0x4d, 0xa5, 0x7a, 0xc5, 0x2b, 0x5c, 0x73, 0xf5, + 0x3c, 0xcf, 0x3c, 0x78, 0x89, 0x3d, 0x99, 0xbc, 0xf9, 0xb6, 0xaa, 0xfe, 0x3c, 0x56, 0x0b, 0x24, + 0x52, 0x3f, 0xf2, 0x7e, 0xee, 0xf3, 0xcc, 0xad, 0x0e, 0xc0, 0x5c, 0x3d, 0x17, 0xad, 0xea, 0x44, + 0x63, 0xb4, 0xe2, 0x84, 0x99, 0x43, 0x56, 0x5d, 0x92, 0x23, 0xd1, 0x1d, 0xa9, 0xdb, 0x9d, 0x93, + 0x8e, 0x90, 0xd5, 0x84, 0x71, 0x1f, 0x74, 0x6b, 0xe9, 0x00, 0xba, 0x01, 0x44, 0x39, 0xb2, 0x07, + 0xad, 0xa4, 0xab, 0x9e, 0xd7, 0x6a, 0xb6, 0x85, 0x38, 0x30, 0x03, 0x14, 0x00, 0xc6, 0x41, 0x37, + 0x21, 0x06, 0x50, 0x0c, 0xeb, 0xcc, 0x52, 0x39, 0xcf, 0xce, 0xf0, 0x36, 0x4c, 0x6d, 0x29, 0xb4, + 0x43, 0xed, 0x5a, 0x87, 0x08, 0xb9, 0x3c, 0xe8, 0x1f, 0xd6, 0xae, 0xb8, 0x4f, 0x45, 0xfb, 0x99, + 0x55, 0x6a, 0x69, 0xac, 0xd3, 0xc4, 0x2f, 0x20, 0x69, 0x4a, 0xec, 0xd6, 0xe2, 0x1e, 0xba, 0x37, + 0x00, 0x72, 0xd5, 0xe2, 0x0d, 0x54, 0x94, 0x95, 0x60, 0x05, 0x23, 0xcd, 0x0e, 0x9c, 0x97, 0x02, + 0x12, 0xa4, 0x2a, 0xe0, 0xd5, 0x9e, 0x32, 0xf6, 0x4c, 0x6b, 0x85, 0x9a, 0xff, 0x2d, 0x7b, 0x4d, + 0x9f, 0x61, 0xfe, 0x9c, 0x17, 0x14, 0xb2, 0x48, 0x13, 0xa0, 0x21, 0x33, 0xde, 0x95, 0xe3, 0xe2, + 0xc4, 0xc4, 0x10, 0x0f, 0x86, 0xa0, 0x1c, 0x8f, 0xbe, 0x1a, 0x0e, 0x61, 0xd7, 0x4f, 0x93, 0x74, + 0x30, 0x72, 0x09, 0x8d, 0xc9, 0xba, 0xb5, 0x14, 0xea, 0xea, 0xb7, 0xef, 0x90, 0xc8, 0x85, 0x9e, + 0xf5, 0xdf, 0xf7, 0x23, 0xb7, 0xd9, 0xf9, 0xd0, 0x98, 0xf9, 0x74, 0x94, 0xf9, 0x41, 0xb9, 0x68, + 0xf1, 0x98, 0x33, 0xdc, 0xd5, 0x6e, 0x82, 0xa0, 0x50, 0x20, 0x27, 0xcf, 0xf9, 0xc6, 0xf3, 0x34, + 0xf6, 0x81, 0xd5, 0xc6, 0x34, 0x65, 0x2c, 0x6c, 0xcd, 0x11, 0xee, 0x60, 0xe0, 0x37, 0x06, 0x75, + 0x6e, 0xa4, 0x0d, 0xc5, 0x7d, 0xf4, 0xaa, 0xa8, 0x55, 0x9d, 0xe5, 0xe2, 0xa0, 0x98, 0x0f, 0x73, + 0x26, 0x04, 0x7c, 0x46, 0xcb, 0xae, 0x97, 0xe3, 0x7f, 0x11, 0x54, 0xbc, 0x1e, 0x67, 0x64, 0x79, + 0x9c, 0x5d, 0xbb, 0x77, 0xe2, 0x20, 0xf1, 0xc2, 0x6b, 0x33, 0xb0, 0xff, 0xe4, 0xa1, 0x11, 0x6c, + 0x41, 0x70, 0xe9, 0x2d, 0x58, 0x8f, 0x37, 0x1a, 0xc1, 0x49, 0xc5, 0x73, 0xd7, 0x66, 0xd0, 0x8b, + 0x6a, 0x06, 0x66, 0x8c, 0xfd, 0x10, 0x83, 0xff, 0xb8, 0xb6, 0xd6, 0x85, 0x87, 0xf6, 0x2d, 0xe7, + 0x84, 0xb2, 0x14, 0x4b, 0xf8, 0x9f, 0xf2, 0x98, 0x6c, 0x86, 0x5d, 0xa5, 0xad, 0x13, 0xba, 0xf1, + 0x4a, 0x48, 0x76, 0xc1, 0x9c, 0xeb, 0xaa, 0x90, 0x96, 0x5e, 0x00, 0x6f, 0x21, 0xee, 0xd7, 0x46, + 0x11, 0x6a, 0x9e, 0x5c, 0xfc, 0xf7, 0xfb, 0x4f, 0x69, 0x9a, 0x06, 0x53, 0x22, 0xc3, 0x8b, 0x10, + 0x06, 0x37, 0x89, 0xa0, 0x5e, 0x28, 0xdf, 0xa6, 0x0f, 0xe4, 0xea, 0xfe, 0x67, 0x2e, 0x6e, 0x31, + 0xb8, 0x20, 0xcf, 0xc9, 0x2b, 0xc2, 0xda, 0x00, 0xc1, 0x78, 0x89, 0x96, 0xa8, 0x90, 0x48, 0x56, + 0xd5, 0x56, 0x7b, 0xea, 0x9d, 0xc7, 0x97, 0x59, 0x38, 0x8b, 0xc6, 0x76, 0x94, 0x02, 0x48, 0x2f, + 0x60, 0xb4, 0xdb, 0x0d, 0x99, 0xd0, 0x39, 0x19, 0x85, 0xf1, 0xb6, 0xaf, 0x55, 0x3b, 0x4b, 0x0d, + 0xbf, 0xd8, 0x50, 0x10, 0x4e, 0x55, 0x16, 0x12, 0xbc, 0xe0, 0xe6, 0xe1, 0x09, 0xf2, 0x1e, 0x25, + 0x78, 0xce, 0x2a, 0xb9, 0x4d, 0xbe, 0x44, 0xf5, 0x42, 0x60, 0x97, 0x80, 0x9d, 0x19, 0x36, 0x6a, + 0x32, 0x53, 0xa1, 0xb8, 0x14, 0x7d, 0xea, 0xff, 0xb7, 0xdb, 0xff, 0xaf, 0xff, 0x3f, 0xf2, 0xcf, + 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, + 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, + 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, + 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0x9f, 0xfb, 0x5f, 0xff, 0x18, 0xe6, 0xcf, 0x00, 0x91, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA102_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 37120, // uncompressed data size (bytes) + 25360, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA102_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA102("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/load/g_booteruc_load_ga10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA102_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x4e, 0x05, 0x62, 0x6d, 0x08, 0x13, 0x4c, 0xa4, 0x40, 0x69, + 0x20, 0x00, 0x00, 0x9c, 0xaf, 0x89, 0x9b, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA102_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA102_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA102("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/load/g_booteruc_load_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_dbg_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 768 +// COMPRESSED SIZE (bytes): 781 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA102_sig_dbg_data[] = +{ + 0x01, 0x00, 0x03, 0xff, 0xfc, 0xdb, 0xe8, 0xf7, 0xf0, 0x27, 0xbd, 0x22, 0x57, 0xa9, 0xdf, 0x76, + 0x6d, 0x56, 0x21, 0xbd, 0x9b, 0x4d, 0xf0, 0x28, 0x31, 0x0c, 0x22, 0xd9, 0x16, 0x2b, 0xc7, 0x51, + 0xff, 0x15, 0xaa, 0x6d, 0x09, 0x78, 0xed, 0x83, 0xd9, 0x59, 0xad, 0xc8, 0x78, 0x86, 0x87, 0xf4, + 0x73, 0x8f, 0xf7, 0x13, 0x95, 0x0b, 0x23, 0x5c, 0x11, 0xef, 0xef, 0xb1, 0x52, 0xaf, 0x30, 0x66, + 0x9e, 0x36, 0x31, 0x31, 0x5e, 0x5a, 0x32, 0x04, 0xa9, 0x2d, 0x1e, 0x68, 0x46, 0x68, 0x45, 0x4a, + 0x49, 0xbb, 0x06, 0x1b, 0xe2, 0x6d, 0x39, 0x53, 0x63, 0x07, 0x23, 0xd0, 0x50, 0x8f, 0xd4, 0x91, + 0x4e, 0x1d, 0x23, 0x6b, 0x26, 0xec, 0xf3, 0x68, 0x13, 0x34, 0xf7, 0xc4, 0x23, 0x87, 0x6c, 0xda, + 0xb7, 0xd0, 0x07, 0xbe, 0xf5, 0xa6, 0xe8, 0xf8, 0x96, 0x11, 0x5f, 0x10, 0x18, 0x70, 0x75, 0x5e, + 0x07, 0x62, 0x79, 0x98, 0x7f, 0x89, 0xc3, 0xe5, 0x9b, 0xa0, 0xaa, 0x7f, 0x28, 0xef, 0xe0, 0x62, + 0x4e, 0x31, 0x57, 0x75, 0xb6, 0x46, 0x6e, 0xc2, 0xc9, 0x8f, 0x62, 0xbe, 0x5c, 0xf7, 0x28, 0x72, + 0x90, 0x4e, 0x65, 0x5d, 0xaa, 0x3f, 0xe3, 0x4b, 0x01, 0x17, 0x4c, 0x37, 0xa9, 0xda, 0xc4, 0x17, + 0x84, 0x17, 0xae, 0x2f, 0xde, 0xca, 0x36, 0x12, 0xe1, 0x6c, 0xff, 0xe6, 0xc1, 0x9f, 0x74, 0x8a, + 0x12, 0xff, 0x37, 0x57, 0x4f, 0x2c, 0xb6, 0x2a, 0x07, 0xf5, 0x76, 0x4e, 0x48, 0x06, 0x62, 0x35, + 0xca, 0xd1, 0x3f, 0x65, 0xef, 0x9f, 0x3c, 0xa6, 0x9b, 0xa5, 0x5f, 0xfe, 0xd3, 0xdd, 0x6f, 0x36, + 0x84, 0x6a, 0x6b, 0x0e, 0x64, 0xe6, 0x6b, 0xb0, 0x6d, 0x28, 0x48, 0xcc, 0x61, 0x7a, 0x71, 0x00, + 0xd8, 0xb4, 0x12, 0x96, 0xc1, 0xa4, 0xed, 0x5a, 0x79, 0x6b, 0xda, 0x5f, 0x1a, 0xe9, 0xb2, 0x80, + 0x3a, 0x4c, 0x43, 0x74, 0x15, 0x99, 0x42, 0x37, 0xd5, 0x1b, 0x62, 0x02, 0xe7, 0xaf, 0x88, 0x6d, + 0xde, 0xa3, 0xc9, 0x8b, 0x20, 0x35, 0x98, 0xca, 0x0a, 0xfe, 0x6d, 0x58, 0x12, 0xfc, 0x6f, 0xab, + 0xa2, 0x96, 0xc4, 0xb7, 0xd3, 0x76, 0x34, 0x8c, 0x79, 0xbd, 0xe2, 0x9e, 0x64, 0x45, 0x48, 0xd5, + 0x3d, 0x88, 0xca, 0xb3, 0xee, 0xd1, 0x3d, 0x2d, 0xda, 0x1a, 0x64, 0xb0, 0xab, 0x70, 0xd9, 0xc1, + 0xd4, 0xcf, 0x74, 0x29, 0xb7, 0x9b, 0x76, 0x3f, 0xb6, 0x72, 0xd8, 0xbf, 0x03, 0x4c, 0x9d, 0x7a, + 0xc6, 0xfe, 0x8c, 0xf5, 0x3a, 0x4a, 0xf8, 0x1a, 0x02, 0x1b, 0xb3, 0xe0, 0x1d, 0x46, 0x88, 0x9a, + 0x31, 0x0c, 0x9e, 0xea, 0x75, 0x0a, 0x8d, 0x22, 0xe2, 0xfb, 0xc4, 0x26, 0x0c, 0x77, 0xf6, 0xeb, + 0x40, 0xa8, 0x1d, 0x27, 0xb9, 0x84, 0xb3, 0x73, 0xab, 0x8d, 0x93, 0x6d, 0x2d, 0xa5, 0xcc, 0x8f, + 0xff, 0x99, 0xf5, 0x45, 0xad, 0x22, 0x60, 0x20, 0x11, 0xb6, 0x00, 0x07, 0xd4, 0xde, 0x36, 0xea, + 0xe3, 0x1b, 0x4d, 0xec, 0x12, 0x5d, 0xe0, 0x5e, 0x7e, 0xfb, 0xfa, 0x5d, 0xb1, 0x56, 0x83, 0xa7, + 0xd0, 0xc5, 0x55, 0xab, 0xc5, 0xb9, 0x07, 0x85, 0xd3, 0xe7, 0x7b, 0x06, 0xd4, 0x95, 0x73, 0xa5, + 0x3d, 0x3d, 0x26, 0x88, 0x19, 0xbb, 0x04, 0x11, 0xcb, 0x9e, 0xa3, 0x7c, 0x78, 0x4f, 0x18, 0x41, + 0xf9, 0x01, 0x99, 0x37, 0x3f, 0xc0, 0xff, 0x61, 0xb5, 0xf5, 0xbf, 0x27, 0x21, 0xbc, 0x0f, 0x80, + 0x87, 0x05, 0x20, 0x22, 0x31, 0x87, 0xe8, 0x50, 0x28, 0xa4, 0xf4, 0xad, 0x38, 0x14, 0x42, 0x7d, + 0xf5, 0x89, 0x5d, 0x12, 0x31, 0xe5, 0x52, 0xab, 0x65, 0x0b, 0x8b, 0x80, 0xe2, 0xc7, 0x51, 0xf3, + 0x60, 0x66, 0xa6, 0xf4, 0x1d, 0xad, 0x1b, 0x43, 0x23, 0xe1, 0x95, 0x8d, 0x64, 0x1e, 0x7f, 0x59, + 0x85, 0xfa, 0xeb, 0xcf, 0x28, 0xd3, 0xff, 0xf0, 0xe1, 0xed, 0xf3, 0x6c, 0x12, 0xf8, 0xa1, 0xa2, + 0x7f, 0xcc, 0xd9, 0x09, 0x00, 0x9f, 0x04, 0xe5, 0x87, 0x9f, 0xdc, 0x93, 0xd4, 0xf0, 0x4c, 0x8d, + 0xf0, 0xa1, 0xe1, 0x53, 0xfd, 0x50, 0x7a, 0xc1, 0xfa, 0x0c, 0xef, 0x28, 0xab, 0x57, 0xe6, 0x1c, + 0x0a, 0x6c, 0xca, 0x51, 0x32, 0x91, 0x33, 0x3e, 0x42, 0x85, 0x6a, 0x7b, 0xd3, 0x6d, 0x0c, 0x02, + 0x53, 0x49, 0x5f, 0xe4, 0x8f, 0x79, 0x24, 0x3f, 0xae, 0x35, 0x6f, 0x5c, 0x44, 0x1d, 0x04, 0xc7, + 0x3d, 0x08, 0xf9, 0x93, 0x03, 0xc9, 0xe4, 0x0f, 0x86, 0x19, 0xfc, 0x0d, 0x68, 0x46, 0x65, 0x0a, + 0x2e, 0x2d, 0xdf, 0xd0, 0x71, 0x4b, 0xaa, 0x36, 0x96, 0xef, 0xe6, 0xea, 0x26, 0x8f, 0x2a, 0x02, + 0xf6, 0xec, 0xcb, 0x63, 0x0f, 0xce, 0x3b, 0x25, 0xdf, 0x56, 0x1c, 0x9b, 0xdd, 0x5a, 0x8e, 0x4c, + 0x9d, 0x51, 0x42, 0xe5, 0x43, 0x91, 0x2b, 0x41, 0x62, 0x6f, 0xea, 0x64, 0x09, 0xfa, 0xe9, 0x8b, + 0x6b, 0x15, 0x09, 0x07, 0xc4, 0x58, 0x7c, 0x80, 0x21, 0x06, 0xb0, 0xa5, 0xa3, 0x37, 0x4d, 0x04, + 0x23, 0x43, 0x59, 0xfa, 0x95, 0x9d, 0x3f, 0x2b, 0x1b, 0x2f, 0x10, 0x91, 0x24, 0x28, 0xae, 0x2b, + 0xff, 0xab, 0x3b, 0xfa, 0x7e, 0x44, 0x2d, 0x22, 0x3c, 0x50, 0x0f, 0xac, 0x0a, 0x8a, 0xbd, 0xfc, + 0x20, 0xf5, 0x99, 0xfa, 0xce, 0x27, 0x93, 0x89, 0x45, 0xe4, 0x0e, 0xdd, 0x86, 0x07, 0xd3, 0x9a, + 0x3b, 0x4c, 0xce, 0x50, 0x92, 0x46, 0xee, 0x4c, 0x19, 0x05, 0xd0, 0x6a, 0x5f, 0x22, 0x5f, 0x04, + 0x1a, 0x3c, 0xa5, 0xe1, 0x67, 0x73, 0xaa, 0x5b, 0x78, 0xaf, 0x03, 0x41, 0xc5, 0xca, 0x7c, 0x97, + 0x4d, 0xf6, 0xa3, 0x59, 0x7b, 0xde, 0xbe, 0xfd, 0xcf, 0xda, 0x27, 0x6c, 0xd2, 0x78, 0x7c, 0xad, + 0xfd, 0x03, 0xc7, 0x02, 0x27, 0xf1, 0xfe, 0xf3, 0xc3, 0x00, 0x03, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA102_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 768, // uncompressed data size (bytes) + 781, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA102_sig_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA102("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/load/g_booteruc_load_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_prod_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 768 +// COMPRESSED SIZE (bytes): 421 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA102_sig_prod_data[] = +{ + 0xb3, 0x62, 0x8c, 0xfb, 0xe2, 0xad, 0x58, 0x17, 0x2e, 0x21, 0x25, 0xf3, 0x92, 0x75, 0x91, 0x75, + 0xe0, 0x23, 0x81, 0xfb, 0xf3, 0x9c, 0xf7, 0x29, 0x48, 0xcc, 0x12, 0x7f, 0xdc, 0xff, 0x24, 0x7a, + 0xa1, 0xe1, 0xb5, 0x27, 0x6f, 0x17, 0x7b, 0xe4, 0x1c, 0x33, 0xf6, 0x3d, 0x95, 0x7d, 0x6c, 0xd6, + 0x8f, 0x85, 0x13, 0xb6, 0xfe, 0xd7, 0x0b, 0xdc, 0xf7, 0xe0, 0x2e, 0xdb, 0xc7, 0x9e, 0xb6, 0x92, + 0xce, 0xcd, 0x41, 0x73, 0x76, 0xef, 0xf9, 0xf8, 0x33, 0x78, 0x55, 0xf8, 0x4c, 0x9b, 0xa8, 0x72, + 0x91, 0xe3, 0x57, 0x67, 0xe5, 0xec, 0x7b, 0xbd, 0xed, 0x7f, 0xe5, 0x3a, 0xa3, 0x5c, 0xce, 0xe9, + 0x87, 0x19, 0xec, 0x73, 0x4f, 0xeb, 0xbf, 0xb8, 0x78, 0x9f, 0xa9, 0xf2, 0xfa, 0xc1, 0x33, 0xbb, + 0x04, 0x53, 0x8f, 0x89, 0xf8, 0x87, 0x66, 0x30, 0x2c, 0x63, 0xfb, 0x34, 0x71, 0x73, 0xab, 0xd2, + 0xfe, 0x94, 0xed, 0xb6, 0x13, 0xa6, 0x99, 0xfc, 0x51, 0x30, 0x0a, 0x15, 0xe9, 0x3c, 0x63, 0x30, + 0x75, 0xab, 0xd5, 0xde, 0x98, 0x9d, 0xd7, 0xb3, 0x4f, 0x48, 0x2c, 0x3f, 0x54, 0x7a, 0xf3, 0xca, + 0xe3, 0x1d, 0xbd, 0x53, 0x0f, 0x72, 0x7f, 0xf1, 0x9a, 0xba, 0x65, 0x0f, 0x5b, 0xdc, 0xd4, 0xbc, + 0xbd, 0xcb, 0x12, 0x6f, 0x0b, 0xf1, 0xfa, 0x3b, 0x2b, 0xd9, 0x2d, 0xaa, 0xf2, 0x5f, 0x7a, 0xfb, + 0x82, 0xdb, 0x7d, 0xc1, 0xce, 0x3e, 0xb5, 0xbb, 0xef, 0xac, 0x19, 0xf7, 0xef, 0xb5, 0x98, 0x74, + 0xed, 0x5a, 0xe9, 0x42, 0xe3, 0xe5, 0x7e, 0x0b, 0x27, 0x6c, 0xef, 0xd8, 0xd6, 0xcc, 0xbd, 0xf6, + 0xea, 0xf2, 0x8b, 0xd9, 0x6d, 0x86, 0xf1, 0xca, 0xd1, 0xed, 0x9b, 0x5b, 0xad, 0xae, 0xc8, 0x8b, + 0x5d, 0xba, 0x2b, 0xfe, 0xd8, 0xbd, 0x5e, 0x21, 0x9f, 0x7d, 0xe6, 0xa7, 0x54, 0xb6, 0xa6, 0xea, + 0x39, 0xc2, 0x31, 0x1d, 0xb5, 0x9b, 0x25, 0xd8, 0xd5, 0xd6, 0x6b, 0xb4, 0xcc, 0xbd, 0xb9, 0xbc, + 0x3c, 0x4a, 0xfc, 0xc2, 0xe6, 0xc9, 0x77, 0xd3, 0x6d, 0x53, 0x64, 0x44, 0x5e, 0x6d, 0xcd, 0x93, + 0xdd, 0x78, 0xe5, 0xbb, 0x8d, 0x73, 0xd7, 0xb4, 0x79, 0xfa, 0xa5, 0x82, 0x2a, 0x1c, 0x8f, 0x9c, + 0x78, 0xf2, 0x97, 0xcc, 0xeb, 0xdb, 0x21, 0x99, 0x3e, 0x5f, 0xe2, 0xfe, 0xf2, 0x9c, 0xae, 0x37, + 0xe2, 0xad, 0x0d, 0x0c, 0xa7, 0x63, 0x77, 0x65, 0xff, 0x66, 0xd3, 0x0a, 0xaa, 0x92, 0x65, 0x4e, + 0xad, 0x9a, 0xb3, 0xef, 0x89, 0x93, 0x75, 0xa9, 0xe2, 0xea, 0x05, 0x8a, 0xbd, 0xd3, 0x9f, 0x3a, + 0x09, 0xfe, 0x9c, 0x12, 0xf8, 0xd6, 0xd0, 0xe3, 0x35, 0x53, 0x26, 0xb7, 0x40, 0xff, 0xe6, 0xa5, + 0x1e, 0x87, 0xa7, 0xd9, 0x2e, 0xee, 0x2d, 0xcc, 0x35, 0x3f, 0x78, 0x69, 0x55, 0xce, 0xaa, 0xa3, + 0x27, 0xd8, 0xd6, 0x66, 0xe6, 0xac, 0xc9, 0xe3, 0xdc, 0xf3, 0xfb, 0xdd, 0xd2, 0xdf, 0x8f, 0xec, + 0x16, 0xdc, 0x3a, 0x2e, 0xb4, 0x31, 0x8a, 0x61, 0x14, 0x0c, 0x28, 0x00, 0x00, 0x27, 0x4c, 0x11, + 0xa6, 0x00, 0x03, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA102_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 768, // uncompressed data size (bytes) + 421, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA102_sig_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA102("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/load/g_booteruc_load_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_ga10x_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA102_patch_loc_data[] = +{ + 0x10, 0x65, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA102_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA102_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA102("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/load/g_booteruc_load_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_ga10x_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA102_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA102_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA102_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA102("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/load/g_booteruc_load_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_ga10x_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA102_patch_meta_data[] = +{ + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA102_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA102_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_GA102("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/load/g_booteruc_load_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_GA102_num_sigs_data[] = +{ + 0x02, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_GA102_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_GA102_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterLoadUcode_GA102 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA102_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA102_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA102_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA102_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA102_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA102_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA102_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA102_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA102_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_GA102_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterLoadUcode_GA102(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterLoadUcode_GA102; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_TU102.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_TU102.c new file mode 100644 index 000000000..107fb2b23 --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_TU102.c @@ -0,0 +1,3455 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU102("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/load/g_booteruc_load_tu10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 35840 +// COMPRESSED SIZE (bytes): 24474 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU102_image_dbg_data[] = +{ + 0xed, 0xdc, 0x43, 0x90, 0x28, 0x6d, 0x13, 0x28, 0xe8, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0x7d, + 0xda, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0x36, 0x4e, 0xdb, 0xb6, 0xed, 0xbe, 0xff, 0x9d, 0xdd, + 0x7c, 0xbb, 0x59, 0x4e, 0xc4, 0x79, 0x36, 0xf9, 0x66, 0x45, 0x54, 0x65, 0x2d, 0x0a, 0x91, 0x15, + 0x51, 0x29, 0x09, 0x80, 0x30, 0x9b, 0x01, 0x10, 0x07, 0x00, 0x0a, 0xd0, 0x73, 0x94, 0xff, 0xde, + 0xf2, 0x03, 0x28, 0x09, 0x80, 0xf8, 0x7f, 0x37, 0x40, 0x80, 0x02, 0xfc, 0x3f, 0x69, 0x04, 0x00, + 0x00, 0xe0, 0x4f, 0x32, 0x80, 0x2f, 0x3d, 0x00, 0xc0, 0x1b, 0xd0, 0x1b, 0xc0, 0x3b, 0x50, 0x0c, + 0x00, 0x10, 0x40, 0xdf, 0xce, 0xee, 0xef, 0xef, 0x2f, 0x7c, 0x04, 0x00, 0x20, 0xc0, 0x6f, 0x1e, + 0x48, 0xd3, 0x11, 0x00, 0x73, 0xfa, 0x3a, 0x60, 0x53, 0x32, 0x00, 0x4d, 0xfa, 0x3a, 0xd0, 0xff, + 0x02, 0x69, 0xfa, 0x3a, 0xf0, 0xff, 0x02, 0xde, 0xff, 0xf6, 0x05, 0x48, 0xcf, 0x03, 0x6e, 0xd9, + 0x05, 0xee, 0xcb, 0xcb, 0xdf, 0x4d, 0xcf, 0x03, 0x4c, 0xcf, 0x00, 0x6a, 0xd9, 0x05, 0x6c, 0x59, + 0x07, 0xf2, 0x5d, 0x01, 0x00, 0x80, 0x67, 0x94, 0x04, 0x40, 0x78, 0xcd, 0x03, 0x80, 0xff, 0x5f, + 0x0d, 0x33, 0x90, 0xff, 0xbb, 0x80, 0x6d, 0xcf, 0x00, 0x00, 0x04, 0xfa, 0xdf, 0xea, 0x89, 0x07, + 0x28, 0x0e, 0x00, 0x00, 0x2c, 0xe2, 0x7f, 0xa5, 0xbe, 0xf3, 0x40, 0x7b, 0x9e, 0x3e, 0xde, 0xc1, + 0xde, 0x80, 0xff, 0x77, 0x34, 0xa0, 0x14, 0x78, 0x88, 0x9d, 0xff, 0x5b, 0xf9, 0xfb, 0x1d, 0x34, + 0x02, 0x40, 0x1f, 0xe0, 0x3b, 0x17, 0xa4, 0x2b, 0x1e, 0xa8, 0x35, 0x19, 0xe1, 0x7f, 0xf9, 0xcf, + 0x07, 0x80, 0xef, 0xff, 0xce, 0xe7, 0x0d, 0xa8, 0xa7, 0xf8, 0x13, 0x30, 0x0e, 0x49, 0x13, 0x36, + 0x82, 0x80, 0x1a, 0x20, 0x3f, 0xef, 0x0d, 0x20, 0xee, 0x7f, 0xb1, 0xef, 0xf7, 0x89, 0xe1, 0xeb, + 0x47, 0x12, 0x30, 0x3f, 0x8f, 0xef, 0x0e, 0xe0, 0xff, 0x05, 0x72, 0x4d, 0x87, 0xec, 0xef, 0x58, + 0x49, 0x79, 0xe2, 0xba, 0x2a, 0xfb, 0x35, 0x99, 0x28, 0x6e, 0xae, 0x80, 0x76, 0xab, 0xae, 0xa4, + 0x7e, 0x99, 0x6d, 0xd4, 0x09, 0xf6, 0x21, 0xa1, 0x69, 0xc1, 0x8f, 0x8b, 0xc6, 0xbc, 0x97, 0xb5, + 0x8e, 0xfd, 0xe0, 0xda, 0xd6, 0x26, 0x2a, 0x25, 0x2d, 0xa1, 0xe6, 0x46, 0x1e, 0x42, 0x48, 0x2f, + 0x26, 0x1d, 0x69, 0xd9, 0xb5, 0x93, 0x7a, 0x14, 0xfa, 0xa1, 0xd8, 0x2c, 0x20, 0x22, 0x87, 0x2e, + 0xd1, 0xac, 0x3b, 0x59, 0xfd, 0xdd, 0x48, 0x49, 0x01, 0x7d, 0x4e, 0x60, 0x2a, 0xd5, 0xb7, 0x61, + 0x3f, 0x4f, 0xec, 0x52, 0x1d, 0x4e, 0xbf, 0xd1, 0x1b, 0x9e, 0x7f, 0x00, 0xb9, 0x2d, 0xf6, 0x62, + 0x41, 0x7e, 0x0f, 0x8f, 0xf7, 0x5e, 0xe4, 0x06, 0xc9, 0x91, 0x47, 0x39, 0xa4, 0x8c, 0x65, 0x71, + 0xd8, 0xd4, 0xf5, 0xd1, 0x20, 0x23, 0xc7, 0x94, 0x29, 0xfb, 0xc9, 0x16, 0x44, 0x7c, 0x7e, 0x44, + 0x1d, 0xe5, 0xa0, 0x08, 0x50, 0xe4, 0xc7, 0xe8, 0xf5, 0x5e, 0x77, 0x4d, 0x8a, 0xdd, 0x13, 0xa4, + 0xe1, 0xe8, 0x40, 0xa6, 0x5a, 0x8a, 0x4f, 0xff, 0xe9, 0x84, 0xdc, 0xe6, 0x9e, 0xa1, 0xcc, 0xb6, + 0xd8, 0x38, 0x46, 0xfb, 0x97, 0xf4, 0xda, 0x02, 0x84, 0xc2, 0xbb, 0xf7, 0x4a, 0x4f, 0x45, 0xcb, + 0x38, 0x69, 0x80, 0x81, 0x15, 0x6f, 0x49, 0x20, 0x0e, 0xf6, 0x0a, 0x0d, 0x79, 0x61, 0x39, 0x6d, + 0xe7, 0x4b, 0x9a, 0x8d, 0xe4, 0xa1, 0x61, 0xcc, 0x92, 0xaa, 0x0f, 0xa3, 0x4d, 0x7d, 0xba, 0xa8, + 0xba, 0xfc, 0xbc, 0xb2, 0x57, 0xc9, 0xf6, 0x54, 0x17, 0x4f, 0xfa, 0xd8, 0xe2, 0x70, 0x7c, 0x1b, + 0xfe, 0x63, 0x86, 0x94, 0x91, 0xed, 0x0d, 0x93, 0x01, 0x57, 0x8d, 0x6f, 0x9f, 0x2b, 0x73, 0x61, + 0x2b, 0x17, 0x1a, 0x35, 0x25, 0xc0, 0xc9, 0xe2, 0xfd, 0x47, 0x4c, 0xae, 0x3d, 0x75, 0xa3, 0x55, + 0xb1, 0xc5, 0x93, 0x4f, 0x54, 0xa2, 0x03, 0x81, 0xff, 0xed, 0xfe, 0x60, 0x35, 0xa3, 0xfc, 0xec, + 0x16, 0x93, 0x52, 0xc7, 0x4e, 0xd9, 0x57, 0x79, 0x8e, 0x26, 0xf3, 0x54, 0x73, 0xb2, 0x0d, 0x1c, + 0x67, 0x7a, 0xfe, 0xb8, 0x13, 0x95, 0xdd, 0x02, 0x3a, 0x9f, 0x04, 0x0f, 0x9b, 0x92, 0x71, 0x7e, + 0x30, 0x55, 0xa3, 0xd9, 0xbe, 0xc0, 0x28, 0x7e, 0x5c, 0x53, 0xd3, 0xdc, 0xda, 0x56, 0x34, 0x0a, + 0xe5, 0x96, 0x96, 0xda, 0xbc, 0x2b, 0x1b, 0x36, 0x88, 0xbf, 0xe1, 0x97, 0xff, 0xc7, 0xf8, 0x5a, + 0x9a, 0x03, 0x29, 0x30, 0x19, 0x36, 0x82, 0x40, 0x45, 0xf0, 0x7c, 0xf8, 0x5b, 0xf2, 0xd2, 0x4e, + 0xce, 0xb1, 0x2b, 0x52, 0x15, 0x1e, 0x33, 0x9f, 0x6f, 0xbc, 0x37, 0x47, 0xfe, 0xa8, 0xb0, 0xbc, + 0xca, 0x1c, 0xf7, 0xb6, 0x78, 0x5c, 0xef, 0x97, 0xb8, 0x24, 0x4c, 0xd3, 0x3c, 0xe4, 0xc8, 0xac, + 0xb2, 0x27, 0xdf, 0xde, 0x6a, 0xb0, 0x16, 0xb6, 0xcb, 0xe4, 0x20, 0x75, 0x2c, 0xe1, 0x83, 0x51, + 0xdd, 0x11, 0x00, 0xa0, 0xd9, 0x54, 0xf0, 0xc9, 0x7b, 0x34, 0xcb, 0x36, 0x6e, 0xbb, 0xbc, 0x2e, + 0x69, 0x07, 0x61, 0xfe, 0xd1, 0xe4, 0x32, 0x58, 0xfb, 0x87, 0x5f, 0x67, 0xa2, 0x5f, 0x8d, 0xdf, + 0xde, 0xdb, 0xa0, 0x57, 0x37, 0x5d, 0xc9, 0x45, 0xc5, 0x3d, 0x2b, 0xe4, 0x12, 0xa2, 0xeb, 0x90, + 0xee, 0xd4, 0x0d, 0x47, 0xe5, 0x70, 0x89, 0x6d, 0xc4, 0x82, 0xad, 0x37, 0x27, 0x36, 0x3f, 0x8d, + 0x6c, 0x9f, 0x7a, 0x8c, 0xc7, 0x75, 0x2a, 0xf6, 0x39, 0xa7, 0x40, 0xa3, 0x6b, 0x24, 0xcf, 0xcf, + 0xfd, 0x1e, 0x75, 0x49, 0xed, 0x9d, 0x0a, 0x9b, 0x4a, 0x93, 0xe5, 0xa0, 0xd8, 0x7a, 0x14, 0x28, + 0xa4, 0x1f, 0x5d, 0x26, 0x31, 0xa5, 0x9e, 0x7a, 0x98, 0x58, 0x6e, 0x9f, 0x85, 0x5d, 0x1d, 0x81, + 0x9d, 0xbe, 0xfd, 0x7b, 0x0e, 0xb7, 0x4a, 0x10, 0x6c, 0xde, 0x4f, 0x24, 0x21, 0xcb, 0x17, 0xfa, + 0xac, 0x85, 0x9e, 0x71, 0xc0, 0x32, 0x86, 0xf8, 0x57, 0x8d, 0x2a, 0xb0, 0xab, 0x49, 0x13, 0xe2, + 0x50, 0x32, 0xd6, 0xf2, 0x5b, 0x4f, 0x9d, 0xf9, 0x99, 0x81, 0xd2, 0x4c, 0x15, 0x59, 0x91, 0x64, + 0x30, 0xfc, 0x6c, 0x48, 0xea, 0x9c, 0x36, 0x95, 0x58, 0x0f, 0x9e, 0x5f, 0xb5, 0x62, 0x3b, 0x9b, + 0x46, 0x98, 0x56, 0xed, 0x90, 0xdb, 0x89, 0x06, 0x79, 0x6c, 0x12, 0xe6, 0xba, 0xf4, 0xb4, 0x00, + 0x43, 0x9d, 0x3c, 0x09, 0xff, 0x5c, 0x10, 0xa2, 0x70, 0x47, 0xfe, 0x37, 0xd2, 0xc3, 0x5f, 0x4b, + 0x29, 0xff, 0xee, 0x69, 0x47, 0x47, 0x50, 0x36, 0x5e, 0x22, 0xee, 0xe7, 0x27, 0xc9, 0x2a, 0xcf, + 0xa2, 0x38, 0xd8, 0x55, 0xbb, 0xd1, 0xc8, 0xc6, 0x43, 0xc6, 0x2f, 0x18, 0x56, 0xa1, 0x81, 0x47, + 0x32, 0x62, 0x63, 0x95, 0xe3, 0x51, 0xbb, 0xfb, 0x36, 0xd4, 0x46, 0xbd, 0xeb, 0x9d, 0x38, 0xf1, + 0x82, 0x48, 0xa3, 0xb2, 0x10, 0x76, 0x97, 0xf0, 0x4a, 0x30, 0x27, 0x22, 0xf5, 0x50, 0xaf, 0x28, + 0x13, 0x2c, 0x8e, 0xd4, 0x4b, 0x98, 0xc2, 0x15, 0x10, 0x72, 0x51, 0x9b, 0x8a, 0xfe, 0x4f, 0x3d, + 0xbd, 0x5e, 0x6c, 0x4d, 0x32, 0xaa, 0xc8, 0x2a, 0x7e, 0x42, 0xd4, 0xdc, 0xfa, 0x4e, 0x8c, 0xd6, + 0x63, 0x3e, 0x2e, 0x0c, 0x7f, 0x54, 0x05, 0x64, 0x10, 0xf4, 0x98, 0xb7, 0x13, 0x84, 0x5c, 0x04, + 0x86, 0xf6, 0x18, 0x6c, 0xf8, 0x10, 0xdc, 0x0f, 0x5a, 0x07, 0x6f, 0xd1, 0x1c, 0x6d, 0xd1, 0xdd, + 0x53, 0x7e, 0x39, 0xa2, 0x98, 0x0e, 0xba, 0x60, 0x2a, 0x03, 0x9d, 0x1a, 0xa1, 0x3c, 0x6c, 0x5b, + 0x27, 0xf2, 0xc2, 0x15, 0xb9, 0x5e, 0x1c, 0x06, 0x23, 0x4c, 0xdc, 0xcb, 0x97, 0x51, 0xa3, 0x8a, + 0xd2, 0xe3, 0x45, 0x92, 0x4e, 0x0a, 0xb0, 0x3d, 0x94, 0x10, 0x78, 0xa4, 0xd1, 0x36, 0x0e, 0xb3, + 0x85, 0xa2, 0xdb, 0x7a, 0xa0, 0xd3, 0xa8, 0xba, 0xb0, 0x9e, 0xfc, 0x48, 0x0b, 0x64, 0xa1, 0xe2, + 0x59, 0x19, 0xe8, 0xd8, 0x9c, 0x96, 0x2c, 0x3d, 0x12, 0x7b, 0x1e, 0xb6, 0xec, 0xbb, 0xcb, 0x20, + 0xed, 0x1b, 0x66, 0x0f, 0x38, 0x59, 0xd6, 0x7d, 0x98, 0xdf, 0xf3, 0x4e, 0xae, 0x65, 0xf0, 0xe0, + 0x0e, 0x48, 0x84, 0x7e, 0x79, 0xc2, 0x7a, 0x21, 0xf6, 0xaa, 0x81, 0xca, 0xd9, 0x1b, 0x29, 0x8f, + 0x84, 0x98, 0xf7, 0xce, 0x54, 0x91, 0xac, 0xcb, 0x24, 0xd0, 0x55, 0x56, 0x76, 0xec, 0x60, 0x5c, + 0xa9, 0x10, 0xb2, 0x90, 0x79, 0xdc, 0xe6, 0xca, 0x38, 0x11, 0x2f, 0xbd, 0x69, 0x2c, 0xb2, 0xf5, + 0x33, 0x6d, 0x2b, 0x0d, 0x35, 0x62, 0xd7, 0x2d, 0x33, 0xcd, 0x8d, 0x8c, 0xaa, 0x27, 0x01, 0x43, + 0x18, 0x3b, 0x51, 0xf9, 0x4c, 0xd2, 0x86, 0xeb, 0xec, 0x19, 0x3f, 0x24, 0x2c, 0xa8, 0x01, 0x63, + 0x05, 0xe0, 0xdb, 0xad, 0xa4, 0xe9, 0x0e, 0xcf, 0x41, 0xaa, 0x3f, 0x5c, 0x00, 0x75, 0x0c, 0xb7, + 0x61, 0x52, 0xe1, 0xe3, 0xfe, 0x91, 0x1f, 0x63, 0x0f, 0xf3, 0xc3, 0x90, 0xe5, 0x57, 0xb3, 0x0f, + 0xbc, 0x67, 0x38, 0xfa, 0x8a, 0x07, 0x5f, 0xf9, 0x5e, 0x25, 0xad, 0xcb, 0xc0, 0x6d, 0x1e, 0x52, + 0x5d, 0x7f, 0xa7, 0x46, 0x85, 0x07, 0xd6, 0xbc, 0x1f, 0xcd, 0xa9, 0x32, 0xac, 0xcb, 0xb9, 0x00, + 0x81, 0xcc, 0x18, 0xf3, 0xbf, 0x99, 0xd0, 0xe1, 0x8b, 0xee, 0xd7, 0xc1, 0x44, 0x29, 0x47, 0x99, + 0xd4, 0xef, 0xb7, 0x66, 0xab, 0x30, 0x20, 0x98, 0x9d, 0x1d, 0x57, 0xac, 0x4b, 0x76, 0x85, 0x65, + 0x30, 0xd3, 0x05, 0x58, 0x57, 0x2c, 0xbf, 0x18, 0x28, 0xd2, 0xda, 0xaf, 0xab, 0x48, 0x1a, 0x1a, + 0x7d, 0xcc, 0xf9, 0x3a, 0xce, 0xe7, 0x9d, 0xa3, 0xd5, 0x79, 0xe1, 0x24, 0x5a, 0x2b, 0xf6, 0x8e, + 0x5c, 0xeb, 0x3d, 0xd9, 0x6c, 0x4e, 0xba, 0x14, 0xf7, 0x6a, 0x31, 0x74, 0xa4, 0x2e, 0x80, 0x0b, + 0x76, 0x11, 0xd7, 0xd7, 0xaa, 0xf7, 0x8a, 0xba, 0x88, 0xae, 0x11, 0x9c, 0x72, 0x6d, 0x4c, 0x34, + 0x55, 0x15, 0xba, 0xcb, 0x98, 0xaf, 0x3f, 0xbf, 0xb0, 0xc1, 0xa6, 0x07, 0xab, 0x8c, 0x72, 0xbc, + 0x84, 0xd0, 0x89, 0xeb, 0x29, 0x19, 0xdf, 0xc8, 0xea, 0xe4, 0xb0, 0xa6, 0x23, 0x86, 0x11, 0x4e, + 0x9d, 0xf6, 0x04, 0x8e, 0xf9, 0x24, 0x99, 0xef, 0x01, 0x98, 0x27, 0x19, 0x9d, 0x5c, 0x55, 0xae, + 0xb4, 0xba, 0xa3, 0xda, 0xaa, 0x81, 0xd7, 0x44, 0x46, 0x8c, 0x18, 0x38, 0x02, 0xa5, 0x22, 0xe5, + 0xd9, 0x0a, 0xd9, 0x64, 0x74, 0x7e, 0xb8, 0x23, 0x6a, 0x67, 0x1f, 0x3f, 0xf5, 0x7a, 0xfe, 0xcc, + 0x22, 0x00, 0x87, 0xa7, 0x24, 0x04, 0x2f, 0x10, 0x62, 0x72, 0x1f, 0x14, 0xae, 0x11, 0xf2, 0x93, + 0xe4, 0xd6, 0x4b, 0x13, 0xa5, 0x3f, 0x74, 0x0e, 0x97, 0x0b, 0xf3, 0xe6, 0x6a, 0x41, 0xf0, 0x65, + 0xc6, 0x56, 0xa4, 0x0b, 0xf5, 0xb5, 0x32, 0xd2, 0x4d, 0xaf, 0x19, 0x45, 0x6c, 0x09, 0x02, 0x5c, + 0x9f, 0xfd, 0x78, 0xdd, 0xfe, 0xda, 0x1d, 0x34, 0xc9, 0xb7, 0x9d, 0x0a, 0x75, 0xe6, 0x99, 0x5b, + 0xc3, 0x77, 0x92, 0xab, 0x50, 0x29, 0x37, 0x7f, 0x21, 0xfd, 0xed, 0x5a, 0xbc, 0x0b, 0x4a, 0x52, + 0xaa, 0xa4, 0x43, 0x68, 0x2a, 0x89, 0x67, 0xc9, 0x0c, 0x32, 0xa5, 0x33, 0x4d, 0x1b, 0xc2, 0x97, + 0x10, 0xc0, 0x34, 0x59, 0x3b, 0x8b, 0x3b, 0x37, 0x2a, 0x9d, 0x60, 0x22, 0x77, 0x94, 0x3e, 0x3e, + 0xe8, 0x8e, 0xac, 0x6f, 0x3a, 0x5c, 0xd2, 0x88, 0x05, 0x34, 0xf4, 0x9c, 0x68, 0x4e, 0x28, 0x63, + 0xd4, 0xc4, 0xcb, 0x99, 0xd3, 0x18, 0x25, 0x2a, 0xe7, 0x00, 0x95, 0x8f, 0xb3, 0x6b, 0xca, 0xdd, + 0xb1, 0xf2, 0xe5, 0x3a, 0xd7, 0x62, 0xea, 0x36, 0x3f, 0x26, 0x44, 0x81, 0xbc, 0x25, 0x33, 0x8b, + 0x71, 0x0e, 0xc4, 0xe1, 0xcb, 0xeb, 0x9b, 0xc8, 0x95, 0x7f, 0x72, 0x4c, 0x80, 0xa6, 0x1d, 0x68, + 0x48, 0xdf, 0x83, 0x1f, 0x79, 0xc3, 0x4a, 0x94, 0x02, 0x85, 0x01, 0xaf, 0x25, 0xc8, 0x07, 0xcc, + 0xda, 0xb9, 0xf7, 0xd8, 0xac, 0xfa, 0x93, 0x00, 0xe8, 0x6c, 0xf3, 0xf0, 0x6d, 0xab, 0x57, 0x5b, + 0x5d, 0x0f, 0x1c, 0x5b, 0x85, 0x8d, 0x91, 0x1a, 0xce, 0x84, 0xe8, 0xbf, 0xa4, 0x28, 0x40, 0x6a, + 0x9b, 0xf2, 0x43, 0x4b, 0x06, 0x9b, 0x12, 0x98, 0x2d, 0xf0, 0x43, 0x1e, 0x53, 0x62, 0x05, 0xc1, + 0x9d, 0xf5, 0x10, 0xfa, 0x5b, 0xbb, 0x62, 0x22, 0xb2, 0xe4, 0x31, 0x79, 0x04, 0xfc, 0xdc, 0x15, + 0x2e, 0x96, 0xb5, 0x7c, 0x4e, 0x57, 0x29, 0x11, 0x60, 0xe7, 0xad, 0x8f, 0x0c, 0x3f, 0x4f, 0x42, + 0x2a, 0xe6, 0xfe, 0x63, 0x82, 0xb7, 0x6b, 0xfe, 0xc6, 0xea, 0x2f, 0x5a, 0x67, 0xb1, 0xa8, 0x02, + 0xad, 0x5b, 0x0b, 0xc0, 0x51, 0x4b, 0x34, 0x7d, 0x73, 0xf7, 0x4a, 0x79, 0x2a, 0xa8, 0xf7, 0x0a, + 0x30, 0x08, 0xe7, 0x95, 0x05, 0x03, 0xd5, 0x6a, 0xe4, 0x87, 0xbb, 0xb3, 0xdc, 0x87, 0x59, 0xc4, + 0xc8, 0xb2, 0x3f, 0xc5, 0xeb, 0x6a, 0x54, 0x8f, 0x33, 0x75, 0x9b, 0xd6, 0x4c, 0xf6, 0x4c, 0x0c, + 0xc5, 0xc7, 0x2b, 0x18, 0x29, 0xf4, 0x9a, 0x6c, 0xc3, 0xf6, 0x07, 0x1f, 0x89, 0x8a, 0x3e, 0x50, + 0xed, 0xda, 0x39, 0x33, 0x87, 0x8b, 0x99, 0xd0, 0xf4, 0x5c, 0x3a, 0xe4, 0xc6, 0xd0, 0xb2, 0x04, + 0x5f, 0x09, 0xb8, 0x71, 0x92, 0x58, 0x56, 0x70, 0xe1, 0x05, 0x16, 0x19, 0x39, 0x0a, 0x46, 0x6c, + 0xe7, 0x04, 0x9f, 0xfb, 0x6e, 0xf6, 0x9c, 0xc6, 0x6b, 0x90, 0x8e, 0x81, 0x8b, 0xf7, 0xd4, 0x18, + 0xf4, 0x87, 0x1f, 0x44, 0xce, 0x49, 0x48, 0xeb, 0x0c, 0xfe, 0x68, 0xcb, 0xab, 0x52, 0x4d, 0xf3, + 0xed, 0xe4, 0xab, 0x16, 0xbe, 0x15, 0x8a, 0xc3, 0x33, 0x8b, 0xfe, 0x58, 0xaf, 0x1b, 0x87, 0xcc, + 0x0d, 0x94, 0x17, 0x2c, 0x70, 0xe3, 0x43, 0x6f, 0x09, 0x7e, 0x67, 0x44, 0x97, 0x6c, 0x9a, 0x09, + 0x05, 0x1c, 0xa6, 0x50, 0xd8, 0xa3, 0x38, 0x04, 0x3d, 0xbe, 0x3e, 0xa2, 0x2c, 0x6d, 0xc7, 0x32, + 0x40, 0x0d, 0x56, 0xa6, 0x38, 0xea, 0x95, 0x38, 0x5f, 0x61, 0x0b, 0x51, 0x42, 0xc0, 0xc6, 0xdf, + 0xb8, 0xe9, 0x33, 0xef, 0xbd, 0x08, 0xc6, 0xd2, 0x4c, 0x27, 0x21, 0x1b, 0x51, 0xed, 0x90, 0xab, + 0x3d, 0xcc, 0x1c, 0x8b, 0x09, 0xf1, 0xa0, 0xed, 0x49, 0xa4, 0xec, 0xab, 0xe7, 0x99, 0x59, 0x51, + 0x17, 0xce, 0xe2, 0xba, 0xd8, 0x22, 0x56, 0xdc, 0x49, 0xa2, 0xb2, 0x6b, 0x06, 0xf7, 0xb0, 0x33, + 0xfd, 0x2e, 0x72, 0xff, 0x57, 0xd8, 0xa6, 0xe8, 0x22, 0xc0, 0x66, 0xae, 0x6d, 0xcf, 0x33, 0x70, + 0x6f, 0x5b, 0x76, 0xc9, 0x57, 0xa7, 0x31, 0xd5, 0xec, 0xed, 0xb7, 0xe6, 0x7e, 0x7d, 0xd6, 0x49, + 0x55, 0x65, 0x7e, 0xa9, 0xfd, 0x89, 0xa6, 0x54, 0xa3, 0x38, 0xb2, 0xb5, 0x3e, 0x3c, 0xbb, 0xf2, + 0x88, 0xb3, 0xa6, 0x99, 0x48, 0x85, 0x27, 0x80, 0xeb, 0x5e, 0xca, 0x14, 0xdf, 0xba, 0x67, 0xff, + 0xf7, 0x3b, 0x4b, 0x51, 0x6b, 0x44, 0xa6, 0x5f, 0x32, 0x70, 0x9f, 0x2c, 0x87, 0xcb, 0x91, 0x1a, + 0x6b, 0x15, 0xcd, 0x27, 0x5d, 0xef, 0xb9, 0x62, 0x32, 0x31, 0x76, 0x99, 0xaa, 0x8a, 0xdb, 0xcf, + 0xea, 0x91, 0x0e, 0x0f, 0x03, 0x92, 0xa1, 0x5e, 0x1f, 0x39, 0x79, 0xb4, 0xad, 0x6f, 0xd3, 0x2f, + 0x9a, 0xe9, 0x25, 0x16, 0x1c, 0x79, 0xae, 0x40, 0xa6, 0x33, 0x1e, 0xf2, 0xad, 0x1e, 0xd0, 0x06, + 0x09, 0xb1, 0xc0, 0x90, 0x24, 0x27, 0x06, 0xbc, 0xef, 0x55, 0x8f, 0xcc, 0xaa, 0x52, 0xb8, 0xe1, + 0xd7, 0x8b, 0x8f, 0x46, 0xbb, 0xda, 0xea, 0x5c, 0x92, 0x78, 0xd4, 0xe1, 0x1e, 0x92, 0x64, 0xc7, + 0xd7, 0x71, 0x11, 0x93, 0x6b, 0x55, 0xe2, 0x2c, 0x9a, 0xca, 0xf5, 0xae, 0x65, 0x71, 0xdc, 0xf8, + 0x4c, 0x10, 0x8f, 0x68, 0x21, 0x7f, 0x2b, 0x7f, 0x64, 0x52, 0x15, 0x96, 0xd9, 0xa2, 0x7a, 0x76, + 0xe4, 0x0f, 0xc6, 0x33, 0x66, 0xc5, 0xc6, 0xaa, 0x97, 0xf6, 0xd4, 0x70, 0x70, 0xf5, 0x8f, 0x01, + 0xaa, 0xf9, 0x9f, 0xf8, 0x15, 0x3d, 0x66, 0xcf, 0x55, 0x13, 0xa6, 0x11, 0x2c, 0xc8, 0x94, 0x67, + 0x3c, 0x6a, 0x32, 0xef, 0xc0, 0x17, 0x26, 0xe0, 0xb8, 0x5f, 0xcf, 0x72, 0x42, 0xa9, 0x93, 0x39, + 0xb3, 0xc4, 0x7d, 0xca, 0x33, 0x4c, 0x79, 0x0b, 0xb9, 0xfc, 0x8a, 0x25, 0xb6, 0xd2, 0x4a, 0x27, + 0x50, 0xaa, 0x6b, 0x68, 0x98, 0xfc, 0x81, 0xf8, 0x48, 0x41, 0x51, 0x0f, 0x8c, 0xd7, 0x40, 0x2b, + 0xad, 0x6c, 0x6e, 0xd9, 0xc8, 0xa2, 0xbc, 0x52, 0xe4, 0xf9, 0xb8, 0x3d, 0x0e, 0xab, 0x1e, 0x72, + 0xf5, 0x1b, 0x2c, 0x30, 0x82, 0x28, 0x55, 0xfe, 0xd7, 0x77, 0x73, 0x15, 0x95, 0x90, 0xa6, 0xd8, + 0xe9, 0x69, 0xa1, 0x12, 0x82, 0xc1, 0x79, 0x9d, 0x03, 0xf2, 0x57, 0x39, 0x6a, 0x56, 0x66, 0x04, + 0x7d, 0x1f, 0xd2, 0xf4, 0xab, 0x06, 0xa9, 0xdc, 0x8b, 0x91, 0xc7, 0x61, 0xca, 0x8f, 0x38, 0x6a, + 0x60, 0xbe, 0x10, 0x1a, 0x85, 0xfb, 0xd1, 0x0b, 0x87, 0x46, 0x78, 0xb2, 0x8f, 0xd6, 0x1c, 0xda, + 0xe5, 0xef, 0x95, 0x1b, 0x9c, 0xe7, 0xb8, 0xc6, 0x3e, 0x96, 0x9d, 0x60, 0x8f, 0x1e, 0x70, 0xa4, + 0xb7, 0xe7, 0x46, 0xa4, 0x99, 0xb8, 0xcc, 0xae, 0xf8, 0x8a, 0xd3, 0xb9, 0x5b, 0x58, 0x18, 0x9a, + 0xfd, 0x63, 0x90, 0xf9, 0x83, 0xfd, 0xeb, 0xf9, 0xc3, 0x35, 0x97, 0xb6, 0x5f, 0x65, 0x72, 0xa0, + 0xad, 0x2c, 0x6c, 0xdd, 0x16, 0x06, 0xd3, 0x74, 0x99, 0x10, 0xb2, 0x5f, 0x19, 0x72, 0xd3, 0x5f, + 0x0e, 0x8b, 0x2c, 0x10, 0xe0, 0x91, 0xe0, 0x59, 0x9e, 0x3f, 0x54, 0x69, 0xbd, 0xce, 0x2c, 0x0a, + 0x21, 0xea, 0xf8, 0xe8, 0xcf, 0x40, 0x23, 0x30, 0x41, 0xb1, 0x23, 0x2c, 0x2d, 0x70, 0xf5, 0xb8, + 0x7e, 0x74, 0xb1, 0x26, 0xc3, 0x9f, 0x4d, 0x92, 0x58, 0x61, 0xc8, 0xbd, 0xd9, 0xe6, 0x81, 0xf4, + 0x0a, 0xe3, 0x96, 0xe4, 0x1f, 0x4d, 0xf8, 0x91, 0x89, 0xcc, 0xdb, 0x13, 0x0b, 0x37, 0x3f, 0x47, + 0x6c, 0x8f, 0x90, 0x26, 0x3b, 0xa4, 0x29, 0xc9, 0x00, 0x04, 0xfa, 0x03, 0x97, 0x86, 0xa7, 0xf8, + 0x7a, 0x5a, 0xf4, 0xad, 0x8b, 0xec, 0x4d, 0x6f, 0xbb, 0xa8, 0xa7, 0xfe, 0xa4, 0xe7, 0x99, 0x18, + 0x4d, 0xba, 0x1f, 0x4f, 0xfe, 0x84, 0xb2, 0xbd, 0x1e, 0xcd, 0x72, 0x77, 0x83, 0xd1, 0x7c, 0xdd, + 0xf5, 0x79, 0x4c, 0x2d, 0x5a, 0xd0, 0xd5, 0xd1, 0xd1, 0x8d, 0x1e, 0xcc, 0x62, 0x30, 0x89, 0x42, + 0x51, 0x1f, 0x4f, 0x7a, 0x33, 0x6a, 0xc1, 0x4c, 0x64, 0x13, 0x93, 0x75, 0xb8, 0xa3, 0x3f, 0xbc, + 0xfc, 0x75, 0x40, 0x65, 0xcb, 0xf5, 0x1f, 0x3e, 0x10, 0xa5, 0x2c, 0x58, 0x74, 0x71, 0x2e, 0x6a, + 0x0a, 0xcf, 0xd7, 0xe3, 0xde, 0x78, 0x2a, 0xe9, 0x92, 0xfb, 0xcb, 0x15, 0xec, 0x56, 0xa8, 0x59, + 0xf8, 0xd1, 0x73, 0x27, 0xfd, 0xbe, 0x8a, 0xf5, 0x5e, 0x84, 0x90, 0x39, 0x7a, 0xcc, 0x1e, 0x0f, + 0xe4, 0xe3, 0x9c, 0xf0, 0x46, 0xfa, 0xc6, 0xed, 0xb8, 0x94, 0x10, 0x63, 0x97, 0x69, 0xbd, 0x15, + 0x22, 0x34, 0x88, 0x94, 0xe2, 0xfe, 0x21, 0xf4, 0xe4, 0x57, 0x94, 0x63, 0xb2, 0xf5, 0x62, 0xd3, + 0x88, 0x2a, 0x9d, 0x3f, 0x11, 0xbf, 0x21, 0x85, 0xfa, 0x0b, 0xfd, 0xe9, 0xde, 0x4b, 0xb5, 0x7f, + 0x8a, 0x49, 0xdf, 0xe7, 0xa6, 0x26, 0xfc, 0x5e, 0x98, 0x73, 0xfb, 0x07, 0xd1, 0xe4, 0x56, 0x97, + 0x3d, 0x0c, 0x1f, 0x44, 0xd2, 0x14, 0xc7, 0x95, 0x02, 0x87, 0xd1, 0x67, 0xc8, 0x9e, 0x77, 0xd8, + 0x90, 0x38, 0x07, 0x17, 0xa0, 0xd5, 0xad, 0x0f, 0xe5, 0x02, 0x10, 0x8e, 0xc8, 0x75, 0xb1, 0x28, + 0x4b, 0xc4, 0xe2, 0xd6, 0x0d, 0xde, 0x1d, 0xa6, 0x44, 0x41, 0xa2, 0xb6, 0xc6, 0x7d, 0x10, 0x86, + 0x81, 0x84, 0x40, 0x84, 0x79, 0x18, 0x0d, 0x97, 0xa9, 0xf9, 0xf0, 0xfe, 0xe6, 0x85, 0x0c, 0x26, + 0xd6, 0xd8, 0xe4, 0xac, 0xb6, 0xdd, 0x7f, 0x1d, 0xed, 0x4c, 0xe8, 0xc3, 0x7f, 0xe2, 0x77, 0x1f, + 0x37, 0x8a, 0x76, 0x31, 0x29, 0x14, 0x15, 0xde, 0xee, 0x4c, 0xc5, 0xb1, 0xbc, 0x9a, 0x22, 0x57, + 0xb5, 0x9c, 0xd9, 0x1a, 0xdd, 0xc3, 0x2d, 0x6f, 0x78, 0x14, 0x1f, 0x4e, 0x46, 0x5d, 0x45, 0xdb, + 0xa4, 0x0c, 0xdc, 0x5d, 0x60, 0xc5, 0x87, 0x5d, 0x5d, 0xaa, 0xdc, 0x5e, 0x97, 0x4a, 0x25, 0x6c, + 0xbe, 0xe9, 0x4b, 0x47, 0xe8, 0xaa, 0x46, 0xdc, 0x92, 0x04, 0xbc, 0xec, 0x2b, 0x09, 0x11, 0x3d, + 0x9e, 0x68, 0x70, 0x47, 0xd0, 0xa2, 0xf4, 0x0d, 0x4c, 0x53, 0x91, 0x8a, 0x11, 0xa9, 0xd8, 0xec, + 0xd1, 0xff, 0x08, 0x15, 0xe7, 0x56, 0x80, 0xd7, 0xd6, 0x83, 0x34, 0xaf, 0x1f, 0x07, 0x27, 0xb6, + 0xe5, 0x43, 0x33, 0x32, 0xb3, 0xfa, 0x79, 0xe9, 0x5b, 0x04, 0x83, 0x10, 0x00, 0xab, 0x06, 0xee, + 0x1a, 0xd0, 0xdf, 0xde, 0x6f, 0xd0, 0xa9, 0xa8, 0x8e, 0x45, 0x44, 0x85, 0x60, 0x16, 0xce, 0xa2, + 0xe5, 0xcd, 0x44, 0x19, 0xa4, 0x10, 0x86, 0x06, 0x78, 0x9c, 0x41, 0x22, 0x7a, 0xf4, 0xb2, 0x36, + 0x9e, 0x31, 0x20, 0x4c, 0xd4, 0x22, 0x25, 0xcc, 0x2e, 0xbf, 0xc6, 0x80, 0x75, 0xb8, 0x55, 0x0e, + 0xac, 0x23, 0x2d, 0x75, 0x3d, 0x06, 0x64, 0xd2, 0xd6, 0xdc, 0xb0, 0xed, 0xd2, 0x18, 0xf7, 0xcb, + 0x79, 0x79, 0x29, 0x36, 0x0b, 0x6c, 0x2b, 0xea, 0x80, 0x52, 0x45, 0x60, 0x7c, 0x6d, 0x0f, 0xfb, + 0x20, 0x0b, 0x55, 0x31, 0x25, 0x56, 0x74, 0xd8, 0x5e, 0x28, 0x11, 0x4a, 0x91, 0xc3, 0x84, 0x4c, + 0x4b, 0xf8, 0x52, 0xb7, 0x4c, 0x79, 0x1f, 0x54, 0x76, 0x02, 0x60, 0x44, 0x92, 0xf3, 0x46, 0x01, + 0xb1, 0x52, 0x4e, 0x66, 0xab, 0xc8, 0xa0, 0x46, 0x5e, 0x11, 0x6e, 0xdc, 0x7e, 0x3d, 0x2f, 0x0f, + 0x7e, 0xa1, 0x2a, 0x35, 0x51, 0x18, 0x00, 0xd1, 0xd5, 0x97, 0xe2, 0x72, 0x9d, 0xb8, 0xb1, 0xb2, + 0x8d, 0x2e, 0xc6, 0xe0, 0x30, 0xb4, 0x8c, 0x22, 0x53, 0x3a, 0x36, 0x1e, 0xc2, 0x55, 0x8d, 0xba, + 0xd1, 0xbc, 0xec, 0x45, 0x3a, 0x9e, 0x22, 0x43, 0x81, 0x48, 0xa7, 0x57, 0xa1, 0xfc, 0x04, 0x6c, + 0x93, 0x29, 0xf5, 0x32, 0xab, 0xe7, 0x92, 0x6d, 0x41, 0xaa, 0x73, 0xfb, 0x1f, 0x0e, 0xc9, 0x72, + 0x2c, 0x89, 0x25, 0x17, 0x60, 0x2d, 0xd7, 0xcf, 0xb4, 0x89, 0x7e, 0xe2, 0x75, 0x53, 0xf7, 0x53, + 0x05, 0x10, 0x1f, 0x67, 0x00, 0x2b, 0xff, 0x50, 0xf4, 0x00, 0x97, 0xfd, 0x75, 0x29, 0x4c, 0xfc, + 0x54, 0x0f, 0xd8, 0xa9, 0x64, 0xb3, 0x22, 0x1c, 0x83, 0x03, 0x62, 0x22, 0xa0, 0x6d, 0x97, 0xd6, + 0xd3, 0x64, 0x1d, 0x2f, 0x0e, 0xf8, 0xa3, 0x51, 0x50, 0x31, 0xff, 0xcd, 0xbd, 0x77, 0xae, 0xfb, + 0x25, 0x76, 0x97, 0x29, 0xdd, 0x89, 0x7d, 0xdc, 0x4d, 0x51, 0x12, 0x97, 0x9e, 0x5d, 0xa0, 0x83, + 0x71, 0x8a, 0x32, 0x28, 0x39, 0xfe, 0x31, 0xff, 0xbc, 0x2c, 0x53, 0x57, 0x92, 0x7c, 0xc3, 0xb9, + 0xe8, 0xd7, 0x18, 0x30, 0x60, 0x5d, 0x55, 0x05, 0xae, 0xd1, 0x3f, 0x64, 0xaa, 0x80, 0x21, 0x55, + 0x44, 0xb1, 0xc0, 0xd9, 0x01, 0xe5, 0x72, 0x7b, 0xed, 0xbe, 0x33, 0x35, 0xd1, 0xd5, 0x2c, 0x17, + 0x08, 0xcc, 0xa2, 0x4a, 0x0a, 0xc9, 0x20, 0x0e, 0xaf, 0x5d, 0x22, 0x2a, 0x0a, 0x13, 0x05, 0xbc, + 0x08, 0x43, 0xc0, 0xe7, 0x3e, 0xfb, 0xc8, 0x0c, 0x53, 0x00, 0x4f, 0x3d, 0x27, 0xa9, 0x13, 0x86, + 0xbf, 0xee, 0x5f, 0x2c, 0xc3, 0xac, 0xab, 0xcc, 0x6a, 0xa6, 0xb3, 0x01, 0x99, 0x27, 0xf6, 0x7e, + 0x6d, 0xe1, 0x0d, 0x2f, 0xf2, 0xcb, 0x60, 0x17, 0x7e, 0x69, 0x1a, 0x13, 0x49, 0x88, 0xa9, 0x10, + 0xf0, 0x80, 0x83, 0x9f, 0x00, 0x07, 0xa3, 0xe0, 0xbf, 0x55, 0x55, 0x5b, 0xbf, 0xac, 0x48, 0xc5, + 0x4b, 0x3d, 0x36, 0x11, 0x96, 0x7c, 0x3b, 0xd1, 0xd1, 0x89, 0x20, 0x64, 0x61, 0xa7, 0x53, 0x62, + 0xac, 0x8a, 0x17, 0x15, 0x50, 0x0d, 0x19, 0x54, 0x35, 0xdf, 0x3b, 0x94, 0x34, 0x01, 0xb1, 0x8c, + 0xcf, 0xa7, 0x0c, 0xad, 0xd4, 0x2f, 0xbd, 0xf1, 0xe8, 0x06, 0x24, 0xa2, 0x94, 0x82, 0xa4, 0xc0, + 0xe8, 0x9b, 0x00, 0x19, 0xb7, 0x86, 0x9b, 0x17, 0x81, 0x57, 0x6a, 0x54, 0x23, 0xd9, 0xb4, 0x8d, + 0x99, 0xa2, 0xc8, 0x18, 0xbe, 0xac, 0x1b, 0x5f, 0xbe, 0xc1, 0x6f, 0xd9, 0x87, 0x39, 0xff, 0x34, + 0xc5, 0xf9, 0xab, 0xcb, 0x95, 0x3d, 0xdd, 0x45, 0x05, 0xd6, 0x29, 0xdd, 0x95, 0x32, 0xe6, 0xa9, + 0xe0, 0x4a, 0x79, 0x95, 0x66, 0x8f, 0x38, 0xcf, 0xa2, 0x7c, 0x10, 0x5f, 0x7f, 0x9d, 0xc1, 0x16, + 0x6c, 0x6c, 0x78, 0x71, 0x80, 0x92, 0xd5, 0x6a, 0x40, 0x14, 0x60, 0x53, 0x9d, 0x5d, 0x2c, 0xf0, + 0x02, 0x42, 0x46, 0x73, 0x2a, 0xc7, 0x0a, 0x73, 0x29, 0xaa, 0x55, 0x9f, 0x75, 0x76, 0x31, 0x01, + 0xca, 0x66, 0xdf, 0x8f, 0x95, 0xe4, 0x1b, 0x3e, 0x92, 0x84, 0x91, 0x35, 0xeb, 0xe0, 0x58, 0xec, + 0x19, 0x28, 0xd0, 0x63, 0xdc, 0x12, 0xd0, 0x49, 0x23, 0x8c, 0x3e, 0x23, 0x0e, 0x82, 0x28, 0x9d, + 0x44, 0x35, 0x9d, 0x10, 0xb0, 0xbb, 0xc6, 0x20, 0x45, 0x5f, 0x8f, 0x52, 0x84, 0x35, 0xa9, 0x06, + 0x84, 0x38, 0x05, 0x6f, 0x01, 0xb1, 0x05, 0x0e, 0x04, 0x0f, 0xfe, 0xa9, 0x32, 0xf5, 0x20, 0xf9, + 0x32, 0xa5, 0x6f, 0xa6, 0x91, 0xdf, 0x0c, 0x78, 0x40, 0x40, 0x02, 0x2c, 0xb4, 0x77, 0x67, 0x75, + 0x91, 0x39, 0x5c, 0x4b, 0xb5, 0xf6, 0x41, 0x16, 0x88, 0xb2, 0x27, 0x1c, 0x4e, 0xd9, 0x8d, 0x70, + 0xb1, 0x03, 0x8a, 0x41, 0xb0, 0xf2, 0x98, 0x64, 0xd3, 0x40, 0x25, 0x0b, 0xba, 0xcf, 0x04, 0x73, + 0x84, 0x87, 0x1c, 0x6b, 0xf8, 0x5b, 0xdf, 0x1d, 0x93, 0xc5, 0xac, 0xbc, 0xf7, 0x41, 0xbd, 0x10, + 0xec, 0x7a, 0x2f, 0x63, 0x3e, 0xf2, 0x7a, 0x92, 0x77, 0xc8, 0x77, 0x82, 0x1e, 0x6b, 0xeb, 0xe2, + 0x6f, 0x3e, 0xc2, 0x0d, 0x85, 0x20, 0x65, 0x3a, 0x3a, 0xc6, 0x47, 0x52, 0xb6, 0x68, 0x66, 0xa2, + 0x08, 0x62, 0x71, 0xc4, 0xc3, 0xa6, 0xa5, 0x7d, 0x87, 0xee, 0xaa, 0x4e, 0xf4, 0x79, 0x92, 0x4f, + 0x92, 0xae, 0x55, 0x93, 0x90, 0xc3, 0x58, 0x04, 0xa4, 0x75, 0x61, 0x76, 0xa8, 0xd9, 0x9f, 0x28, + 0x67, 0xbe, 0xc7, 0xdd, 0xe9, 0xd2, 0x86, 0x29, 0x8b, 0xed, 0xf7, 0xae, 0x89, 0xcf, 0x3b, 0x50, + 0x65, 0x10, 0x9b, 0x04, 0xe9, 0xb5, 0x2a, 0xa1, 0x87, 0x54, 0x34, 0xbd, 0xc1, 0xf1, 0x92, 0xe3, + 0x98, 0x7e, 0xee, 0xaa, 0x8b, 0xe5, 0xf9, 0x52, 0x1e, 0x7b, 0x8b, 0xcc, 0x19, 0xab, 0x9c, 0xdb, + 0xbf, 0xa6, 0x7f, 0x27, 0x4a, 0x34, 0xe7, 0xf7, 0xc7, 0x53, 0x2d, 0xde, 0xd6, 0x18, 0x6f, 0xe9, + 0x0a, 0xf7, 0xae, 0xec, 0x2b, 0x73, 0x95, 0x73, 0xe8, 0x5e, 0xc2, 0x1d, 0xbc, 0xd4, 0x6c, 0xc9, + 0xf4, 0xdc, 0x9a, 0xcc, 0x23, 0xbb, 0x15, 0x3b, 0x97, 0xaa, 0x51, 0xb5, 0x9a, 0x11, 0x83, 0x40, + 0x8b, 0x2e, 0x1e, 0x2c, 0x35, 0xa3, 0x2e, 0x50, 0xfb, 0x70, 0x05, 0x00, 0xee, 0x7c, 0xac, 0x6b, + 0xda, 0xb7, 0xe3, 0x6b, 0x4a, 0xbe, 0x7c, 0x37, 0x6d, 0x86, 0xdb, 0x7e, 0xd9, 0x9b, 0x46, 0x64, + 0x09, 0x9e, 0xdf, 0x03, 0x85, 0x9e, 0xf9, 0xb6, 0x23, 0xd5, 0x0b, 0xeb, 0x08, 0x63, 0x5a, 0x3b, + 0x9f, 0x86, 0xc2, 0x1e, 0xe7, 0x3d, 0x2f, 0x43, 0xd6, 0xa3, 0xdb, 0x7d, 0xb7, 0xa9, 0xf5, 0x65, + 0x24, 0x97, 0xc0, 0xe5, 0x69, 0x59, 0xc4, 0x16, 0xa9, 0x16, 0xec, 0xe5, 0xca, 0xfa, 0xb5, 0x14, + 0x14, 0x32, 0xb5, 0xd6, 0xa9, 0x07, 0xc5, 0x61, 0x62, 0xad, 0xb9, 0xdf, 0xc5, 0x41, 0xfd, 0xfb, + 0x07, 0x09, 0x5a, 0x14, 0xf2, 0x4a, 0x6c, 0xe7, 0x67, 0x25, 0xa0, 0xad, 0xef, 0xdb, 0x2f, 0x5a, + 0x74, 0xf1, 0x52, 0x3b, 0xa0, 0x4e, 0xf6, 0x4d, 0xb7, 0x73, 0x7a, 0x95, 0xe9, 0xa5, 0x86, 0x89, + 0x2c, 0xea, 0x6e, 0xb0, 0x90, 0x85, 0x81, 0xab, 0xad, 0xa9, 0xa7, 0x36, 0x2c, 0x7c, 0x23, 0xb3, + 0xcb, 0x5d, 0x85, 0x90, 0x71, 0x41, 0xcd, 0x08, 0x77, 0xce, 0x20, 0xf1, 0xfe, 0xd2, 0xe4, 0x78, + 0x49, 0x39, 0x3c, 0x37, 0xf6, 0x2e, 0x46, 0x1a, 0xcf, 0x7e, 0xc8, 0x70, 0x23, 0x5d, 0xa0, 0x62, + 0xf7, 0x90, 0x98, 0xaa, 0xa2, 0x75, 0x77, 0x89, 0xb3, 0x2d, 0xd2, 0xa0, 0xa1, 0xc8, 0xe7, 0x59, + 0xd6, 0x8e, 0xb4, 0xe5, 0x48, 0xf0, 0xd5, 0x26, 0xe7, 0xea, 0x43, 0x05, 0x05, 0xf4, 0xd0, 0xdc, + 0x65, 0x39, 0xdb, 0x79, 0x53, 0x41, 0xf1, 0x67, 0xad, 0xf4, 0xc7, 0xf6, 0xbf, 0xcf, 0x77, 0x8e, + 0x55, 0x31, 0x73, 0x09, 0x8f, 0xee, 0x71, 0x53, 0xf9, 0x1a, 0xc1, 0xbf, 0x37, 0xd5, 0xcd, 0x3b, + 0x5e, 0x11, 0x67, 0x60, 0x02, 0x11, 0x76, 0x63, 0x39, 0x7e, 0xb1, 0x5d, 0x3d, 0x79, 0x7a, 0xfa, + 0x7b, 0x8a, 0x3b, 0xe2, 0xb8, 0xd5, 0xcc, 0xf9, 0x8f, 0xcd, 0x76, 0xfa, 0xee, 0xb8, 0xc2, 0xd4, + 0x2e, 0x80, 0x0e, 0x65, 0x68, 0x3d, 0x2b, 0x91, 0xc1, 0xe7, 0xa8, 0xc0, 0x86, 0x31, 0x2c, 0x75, + 0xc7, 0x98, 0xe2, 0x1c, 0xea, 0xf0, 0x7a, 0x3d, 0x48, 0xdc, 0xe4, 0x35, 0x7d, 0x89, 0xc0, 0x9a, + 0x8a, 0x37, 0x32, 0x27, 0xd7, 0xed, 0xd9, 0xbf, 0x46, 0x5b, 0xa2, 0x8d, 0xf7, 0xcc, 0x78, 0xcd, + 0x58, 0xf9, 0x82, 0x1c, 0xa1, 0xe4, 0x77, 0x63, 0xb7, 0x1e, 0xf9, 0xca, 0x5b, 0xf5, 0x03, 0x0b, + 0x53, 0xca, 0x11, 0x74, 0x89, 0x3e, 0xb3, 0x50, 0xd4, 0xc8, 0xe4, 0xe4, 0xd5, 0xfa, 0x75, 0x79, + 0xe0, 0x21, 0xa7, 0xc4, 0x9c, 0x7e, 0x6b, 0x74, 0x3f, 0xe1, 0x66, 0x34, 0x82, 0x4a, 0xe6, 0x0f, + 0x3e, 0x17, 0xe8, 0x23, 0x9c, 0xa5, 0x89, 0x3d, 0xef, 0x34, 0xee, 0x84, 0x3b, 0xa1, 0xc5, 0x91, + 0x5a, 0x35, 0x58, 0x66, 0xb3, 0x3a, 0x6d, 0x58, 0x8f, 0x04, 0xe7, 0x17, 0xc4, 0x89, 0xac, 0x1a, + 0xee, 0x8c, 0x33, 0xae, 0xd8, 0x27, 0xfd, 0x92, 0xaf, 0x17, 0x15, 0xb0, 0xf0, 0xb6, 0x48, 0x1c, + 0x97, 0xf2, 0x1c, 0x12, 0xc2, 0xf2, 0xa7, 0x75, 0x3d, 0xc9, 0xd5, 0x70, 0x3b, 0x35, 0x5c, 0xdd, + 0xb6, 0x3d, 0x4c, 0x06, 0xbc, 0x6c, 0x76, 0x79, 0x77, 0xdc, 0xbb, 0x65, 0xfd, 0x35, 0x9a, 0xe1, + 0x96, 0xef, 0x7f, 0xa1, 0xe9, 0xba, 0xcd, 0x0f, 0xea, 0xf0, 0x4e, 0xef, 0xf4, 0x10, 0x2e, 0xe0, + 0x44, 0x62, 0x1c, 0x6f, 0xe9, 0x94, 0xb7, 0xb1, 0x48, 0xf1, 0x17, 0x48, 0x93, 0x51, 0xa6, 0xa7, + 0x8c, 0x60, 0xff, 0x4c, 0xb9, 0x96, 0x6a, 0xa8, 0xe7, 0x3f, 0x07, 0x28, 0x5b, 0xb3, 0x36, 0x88, + 0x02, 0x2d, 0x2b, 0x96, 0xc4, 0xef, 0x41, 0x83, 0x82, 0x00, 0x99, 0xb3, 0x32, 0x6e, 0x6e, 0x38, + 0xff, 0x26, 0xf4, 0xcb, 0x1c, 0x3e, 0xa5, 0xf8, 0xc5, 0x1c, 0x67, 0x2d, 0x27, 0xb5, 0xd1, 0x7c, + 0xf8, 0xe4, 0xbb, 0x59, 0xda, 0x1a, 0x1b, 0x1f, 0x2a, 0xd3, 0xda, 0x7f, 0x4e, 0xcb, 0xc5, 0xd4, + 0x6b, 0x68, 0xc5, 0xca, 0xc1, 0x49, 0xde, 0x34, 0x3d, 0x6b, 0x29, 0xcf, 0xae, 0xd4, 0x55, 0x65, + 0x52, 0x01, 0x69, 0xb7, 0x98, 0xfe, 0x3e, 0x57, 0x3d, 0x45, 0xaa, 0x72, 0x8f, 0x58, 0x0c, 0x07, + 0x87, 0x96, 0x95, 0x7b, 0x32, 0xa0, 0x36, 0x5c, 0x40, 0x92, 0x9e, 0xcf, 0x37, 0x13, 0xac, 0x13, + 0x42, 0xbc, 0xb1, 0xde, 0x97, 0x55, 0x6e, 0xd2, 0x0b, 0xd8, 0x3a, 0xab, 0x19, 0x89, 0xf9, 0xe5, + 0x75, 0x30, 0x60, 0xec, 0x6d, 0xb1, 0x7e, 0xea, 0x5a, 0x9f, 0x0b, 0x88, 0xd3, 0xe2, 0xd4, 0x3a, + 0x17, 0xf1, 0x5e, 0x34, 0x15, 0xca, 0x1a, 0xbd, 0x9b, 0x05, 0x9c, 0xba, 0x0f, 0x98, 0xdd, 0xb4, + 0x0c, 0x05, 0xfa, 0x18, 0xd7, 0xa8, 0xe2, 0xaf, 0x2c, 0xb6, 0xbe, 0x90, 0x8f, 0x67, 0xc0, 0xa0, + 0x2e, 0x3c, 0x14, 0xd2, 0xb7, 0x20, 0xa9, 0x63, 0x53, 0x39, 0xda, 0x27, 0xf1, 0xb0, 0x0f, 0xf9, + 0x24, 0x39, 0x73, 0xc1, 0xb8, 0xfc, 0x6e, 0x6e, 0xd5, 0xe2, 0xf7, 0xe7, 0x3f, 0x14, 0x36, 0x88, + 0xda, 0xaf, 0xe3, 0x43, 0x5b, 0x6e, 0xfd, 0xe1, 0x00, 0x4b, 0x80, 0xb2, 0x62, 0xc8, 0x20, 0xe6, + 0x5e, 0xf5, 0x68, 0xa1, 0xe8, 0x51, 0xca, 0xf9, 0x43, 0x02, 0xbd, 0x8b, 0x04, 0xf4, 0x75, 0xe9, + 0xda, 0xf9, 0xf7, 0x7e, 0x11, 0xc6, 0x6c, 0x51, 0xb1, 0x68, 0xc5, 0x67, 0xc7, 0xbe, 0x6f, 0x31, + 0xee, 0x4b, 0x10, 0x6b, 0x31, 0x8b, 0x28, 0x41, 0x2d, 0xb2, 0x4d, 0x07, 0x8a, 0x02, 0x8c, 0xae, + 0x3c, 0x90, 0xc3, 0xfd, 0xcd, 0x6e, 0xba, 0xaf, 0x72, 0x55, 0x55, 0xf2, 0xc0, 0xb3, 0x98, 0xd4, + 0x52, 0xea, 0x91, 0x6e, 0x5c, 0x8a, 0x71, 0x65, 0x07, 0x72, 0x52, 0x67, 0xbf, 0x72, 0xe6, 0xa3, + 0x21, 0x81, 0x9d, 0xac, 0x20, 0xb4, 0xc8, 0x8e, 0x7b, 0x86, 0xbb, 0x95, 0x3a, 0x41, 0x4c, 0xe5, + 0xbf, 0xf7, 0xe3, 0x92, 0x63, 0x00, 0xa6, 0xc6, 0x90, 0x3e, 0x8b, 0x9f, 0x34, 0x30, 0x53, 0xe8, + 0xf8, 0xea, 0x79, 0x21, 0x3d, 0xd5, 0x04, 0xfd, 0x77, 0x93, 0x18, 0x52, 0xfd, 0x5f, 0x5e, 0xa6, + 0x00, 0x6e, 0xd6, 0xd9, 0x68, 0x66, 0x16, 0x3f, 0x04, 0x43, 0x97, 0xd9, 0xcf, 0xa7, 0x20, 0x89, + 0xc7, 0x7d, 0xf6, 0xb6, 0xb4, 0xda, 0xf8, 0x0e, 0x9c, 0x7a, 0x9a, 0xcd, 0xfa, 0xf8, 0xab, 0x99, + 0xc9, 0x54, 0x14, 0x23, 0x96, 0x8e, 0xa5, 0x98, 0xaf, 0xfa, 0x83, 0x07, 0x6b, 0x19, 0x45, 0x3a, + 0x48, 0xd7, 0x1a, 0x81, 0x69, 0x1f, 0x52, 0x2a, 0xef, 0xb1, 0x5c, 0x23, 0x27, 0x13, 0xf3, 0x25, + 0xb3, 0x01, 0xf0, 0xb4, 0x46, 0x21, 0x41, 0x88, 0x37, 0x4a, 0xb7, 0x5a, 0x82, 0xd4, 0x0b, 0x14, + 0x8b, 0x9b, 0x6b, 0x08, 0xa6, 0x36, 0x5d, 0xbd, 0x0f, 0xbc, 0x4e, 0xd9, 0x1f, 0x7f, 0x2c, 0xec, + 0x4b, 0x0c, 0xbb, 0x45, 0x48, 0xbd, 0xa9, 0xb2, 0xdf, 0x35, 0x43, 0xb5, 0x90, 0xb8, 0x78, 0xd5, + 0x99, 0x6c, 0x0c, 0x97, 0xfb, 0x44, 0x92, 0x9c, 0x46, 0x58, 0x81, 0xcc, 0xef, 0x6f, 0xdd, 0x5d, + 0xc0, 0x7b, 0xe1, 0x8c, 0xa9, 0x71, 0x54, 0x13, 0x6a, 0x6d, 0x0e, 0xcc, 0x33, 0x47, 0x36, 0x46, + 0xb0, 0x49, 0xf8, 0xeb, 0x34, 0x4d, 0x95, 0x42, 0x32, 0xc9, 0x28, 0xaf, 0x55, 0x3b, 0xa9, 0x43, + 0xd1, 0xfb, 0x2f, 0x1c, 0xe0, 0xbb, 0x42, 0x6f, 0xad, 0xf7, 0x84, 0x9a, 0xcc, 0x4a, 0x1e, 0xc6, + 0x3f, 0x90, 0x3b, 0xbd, 0xc0, 0xb3, 0x07, 0xe3, 0xb6, 0x95, 0x81, 0xf2, 0xc1, 0x2c, 0x17, 0xaf, + 0x16, 0x8e, 0xdd, 0x2c, 0x4f, 0xc7, 0xa4, 0x53, 0x27, 0xc9, 0xb4, 0x14, 0x39, 0x92, 0xa5, 0xcc, + 0xf3, 0x04, 0xc8, 0xfe, 0xae, 0xeb, 0xbf, 0x2f, 0x8d, 0xfb, 0x5c, 0x17, 0x0f, 0x10, 0xb5, 0x43, + 0xfe, 0x63, 0x64, 0xa1, 0x55, 0x91, 0xda, 0xfc, 0x9c, 0x54, 0x66, 0x25, 0x38, 0x92, 0x43, 0x22, + 0x24, 0xd6, 0xa1, 0x59, 0xcb, 0xd0, 0xed, 0x72, 0x87, 0xdd, 0x9c, 0xf2, 0x55, 0x63, 0x69, 0xa1, + 0xba, 0xe3, 0xcb, 0x19, 0x15, 0xc3, 0xbd, 0xa4, 0x91, 0x00, 0x4d, 0xa4, 0x36, 0x94, 0x87, 0x9c, + 0x97, 0x97, 0x9b, 0x79, 0xa0, 0x7a, 0x06, 0x92, 0xd2, 0x74, 0xae, 0x09, 0x40, 0xfa, 0x3b, 0xbc, + 0x90, 0x20, 0x5c, 0x72, 0x5f, 0x27, 0xc9, 0xa0, 0x49, 0x2c, 0xd7, 0xbd, 0x1c, 0xf3, 0x34, 0xf6, + 0x53, 0x1c, 0xbe, 0x78, 0x09, 0x42, 0x4e, 0xc7, 0x69, 0x26, 0x67, 0xa6, 0xa3, 0x24, 0x3a, 0xd3, + 0x45, 0x90, 0x66, 0xfb, 0xd8, 0x7b, 0xbb, 0xea, 0xa5, 0x1e, 0x87, 0x77, 0x0e, 0xb0, 0x5e, 0x75, + 0xdd, 0x80, 0xc6, 0x6e, 0xfd, 0x2c, 0x9c, 0x48, 0xc3, 0x31, 0xfc, 0x1b, 0x39, 0x96, 0xc0, 0x87, + 0xc3, 0xe1, 0x8d, 0xd2, 0x3b, 0x6c, 0xfe, 0xf4, 0x18, 0xcd, 0x32, 0x7f, 0xbd, 0x1b, 0x09, 0x47, + 0x4f, 0x7b, 0xe6, 0x49, 0xea, 0x5c, 0x8b, 0x41, 0x61, 0xc9, 0xc8, 0x1d, 0x56, 0xa4, 0x57, 0x88, + 0xe5, 0xe5, 0xa8, 0xef, 0x0a, 0xfa, 0xbe, 0x06, 0x8b, 0xba, 0x7c, 0x2e, 0x44, 0xcb, 0x23, 0x69, + 0x08, 0xc9, 0x3f, 0x89, 0x90, 0xbb, 0x06, 0x4b, 0xcc, 0x7c, 0xc1, 0xc4, 0x25, 0x83, 0x4f, 0x2f, + 0x3f, 0xd4, 0x55, 0xe8, 0x91, 0xc8, 0xc6, 0x3c, 0x7a, 0x6d, 0xd3, 0xad, 0x42, 0x36, 0x33, 0x89, + 0xfa, 0x60, 0x52, 0x02, 0xca, 0x59, 0xb6, 0x73, 0x7a, 0xa9, 0x44, 0x20, 0x3b, 0x14, 0x57, 0xad, + 0x24, 0x05, 0x91, 0x9b, 0x72, 0xfa, 0x75, 0x80, 0x3c, 0x04, 0x36, 0xd9, 0x61, 0xb3, 0x3c, 0x67, + 0x17, 0x00, 0xab, 0x11, 0x25, 0xef, 0x8f, 0x68, 0x2a, 0xb9, 0x68, 0xd4, 0x36, 0x13, 0x88, 0x4e, + 0xe2, 0xaa, 0x06, 0x70, 0xce, 0x99, 0x28, 0x8f, 0x0c, 0x74, 0xcd, 0xcb, 0x6c, 0x6d, 0xf0, 0x9c, + 0xbd, 0xa0, 0xd1, 0xc4, 0x60, 0xf9, 0x6d, 0xc0, 0x97, 0xb6, 0x96, 0x2a, 0xdc, 0x09, 0x5f, 0x48, + 0xb0, 0x08, 0xd0, 0x18, 0x6f, 0x0e, 0xd9, 0xf6, 0x5d, 0x39, 0x6f, 0x89, 0x2b, 0xe5, 0x96, 0x42, + 0xe2, 0xa6, 0x7b, 0x55, 0xdd, 0x88, 0x1f, 0x99, 0x6e, 0x9b, 0x17, 0x91, 0x42, 0x6b, 0x7a, 0x14, + 0xd2, 0x14, 0xed, 0xfb, 0x52, 0xcf, 0x3f, 0x22, 0xc5, 0x12, 0xb1, 0x58, 0x54, 0x8b, 0xcb, 0xf2, + 0x9b, 0x5c, 0xd8, 0x0d, 0xaf, 0x72, 0xc7, 0x61, 0xbe, 0x11, 0xc0, 0xde, 0x39, 0x3b, 0xe4, 0x19, + 0x14, 0xc3, 0xe5, 0xaa, 0x11, 0xe7, 0x2d, 0xe7, 0x23, 0x08, 0xd1, 0x18, 0x4d, 0x05, 0xb1, 0x24, + 0x94, 0x4c, 0x32, 0xe9, 0xd3, 0x49, 0xff, 0xd0, 0xe4, 0xcb, 0x1d, 0x16, 0x20, 0xa4, 0x58, 0x64, + 0xa1, 0xa6, 0xe1, 0x6f, 0x14, 0x27, 0xce, 0x48, 0x33, 0x88, 0x82, 0x2a, 0x4e, 0xfa, 0xa3, 0xac, + 0xb0, 0x20, 0xd4, 0x22, 0xf6, 0x71, 0x38, 0x66, 0xb0, 0x81, 0x15, 0x54, 0x74, 0x35, 0x00, 0xcc, + 0x0a, 0x81, 0xc1, 0xdb, 0x86, 0xc4, 0x27, 0x6b, 0x71, 0x0b, 0xa3, 0xda, 0xd5, 0xfb, 0x79, 0x16, + 0xa6, 0x2b, 0x4b, 0xa5, 0x47, 0xf4, 0xc5, 0xb0, 0xfe, 0x52, 0x39, 0xb1, 0xe3, 0x6d, 0x39, 0xad, + 0xdb, 0xa5, 0xd6, 0x09, 0x55, 0x22, 0xf6, 0x5f, 0xc3, 0xdc, 0x92, 0x07, 0xcb, 0x0c, 0x4c, 0x14, + 0x2c, 0x7f, 0x0b, 0x9a, 0xf9, 0x74, 0x86, 0xb5, 0x68, 0x37, 0xc3, 0xe0, 0xbb, 0xdf, 0x3f, 0x51, + 0xe5, 0x04, 0x1c, 0x2b, 0xd6, 0xcb, 0xd7, 0xc3, 0x24, 0x52, 0x3c, 0x55, 0x32, 0xe6, 0x12, 0x47, + 0xe9, 0xb1, 0xf6, 0x64, 0x10, 0x4c, 0x6b, 0x86, 0xeb, 0xdd, 0x34, 0xc5, 0x73, 0x40, 0xdb, 0x80, + 0xd7, 0x9f, 0x1f, 0xc6, 0x86, 0xcb, 0x18, 0x3b, 0x97, 0xb7, 0xd2, 0xd6, 0x89, 0x18, 0x94, 0x32, + 0x93, 0xe2, 0x02, 0x17, 0xfc, 0xf1, 0x7f, 0xa8, 0xc7, 0x02, 0xd0, 0x86, 0x3f, 0x4b, 0x42, 0x8c, + 0xcc, 0x45, 0x53, 0xe7, 0x2d, 0x95, 0x3c, 0x9a, 0x58, 0x81, 0x97, 0x01, 0x6f, 0x15, 0xe9, 0x50, + 0xb3, 0x35, 0x7d, 0xc0, 0x3e, 0xc2, 0xe2, 0xda, 0xe2, 0x8e, 0xcd, 0xb5, 0x7e, 0x6e, 0xde, 0x3b, + 0x98, 0x22, 0x80, 0xfb, 0xab, 0xf1, 0xb8, 0x18, 0x60, 0x1e, 0x0c, 0x6d, 0xc3, 0xdc, 0x41, 0x04, + 0xc7, 0xd9, 0x06, 0x53, 0x9d, 0xf0, 0xa3, 0x14, 0x36, 0x2e, 0xfa, 0x31, 0xcf, 0x2a, 0x70, 0x5f, + 0x44, 0x67, 0xb5, 0x4d, 0x38, 0xff, 0x7a, 0x0d, 0xa5, 0x5e, 0xb2, 0xc7, 0x8c, 0x79, 0xde, 0x91, + 0x48, 0x7e, 0xa1, 0x19, 0x9b, 0xdc, 0x68, 0x3a, 0x57, 0xeb, 0xba, 0xfa, 0x65, 0x3b, 0x20, 0xec, + 0x2c, 0x44, 0xc5, 0x1a, 0x89, 0xc9, 0xac, 0x96, 0xe9, 0x4d, 0x38, 0x86, 0x31, 0x89, 0x7d, 0x6a, + 0x33, 0x6d, 0x8e, 0xd4, 0x34, 0x21, 0x6b, 0xdb, 0x0e, 0x5f, 0xb2, 0xf2, 0x38, 0xe1, 0xd3, 0xad, + 0xaf, 0x6d, 0x7b, 0x92, 0x04, 0xf5, 0x08, 0x5c, 0x54, 0x64, 0x8d, 0x79, 0x9e, 0x6d, 0xeb, 0x7b, + 0xdc, 0xf8, 0xd1, 0x7c, 0xe0, 0x89, 0x1d, 0x88, 0xcc, 0xbf, 0xbc, 0x53, 0x88, 0x05, 0xeb, 0x67, + 0x9d, 0xff, 0xdb, 0x69, 0x95, 0xd9, 0x4c, 0xd9, 0x6a, 0x4f, 0x06, 0x83, 0xa2, 0xfd, 0x13, 0x0b, + 0xd5, 0x8e, 0xff, 0xf2, 0x24, 0xdf, 0xd4, 0xd4, 0x29, 0x00, 0x81, 0x3b, 0xd2, 0x6b, 0xe1, 0xae, + 0xe0, 0x7f, 0x58, 0x3b, 0x92, 0x88, 0x19, 0xba, 0x67, 0xa8, 0xaa, 0x65, 0x19, 0xd2, 0x2e, 0x49, + 0xc9, 0x2e, 0x52, 0xc1, 0x77, 0xe2, 0x2f, 0xb4, 0x5c, 0x56, 0xd8, 0x6b, 0x9a, 0x8c, 0xbc, 0x66, + 0x1f, 0x45, 0x2b, 0x72, 0xcc, 0xad, 0xcb, 0x81, 0xe6, 0x08, 0x5f, 0x4e, 0xef, 0xe9, 0x4e, 0x0b, + 0xe2, 0x19, 0x70, 0x6f, 0x03, 0x7c, 0x95, 0x60, 0xdf, 0x05, 0xae, 0xf3, 0xe7, 0x14, 0xf7, 0x38, + 0xd5, 0x5b, 0x56, 0x97, 0xcd, 0x8d, 0x8f, 0x6a, 0x2e, 0x3d, 0x2b, 0x99, 0x54, 0x0a, 0xb9, 0x1e, + 0x82, 0xa4, 0x09, 0x83, 0x10, 0x31, 0x14, 0x63, 0xd3, 0xbb, 0x45, 0xb0, 0xff, 0x98, 0x49, 0xbb, + 0x0d, 0x96, 0xf6, 0x89, 0x96, 0xaa, 0xe4, 0x23, 0x26, 0xb9, 0x51, 0x21, 0x33, 0xe8, 0x30, 0x62, + 0x4a, 0xf9, 0x1c, 0x6e, 0x19, 0x77, 0xbf, 0x19, 0xe8, 0x47, 0xda, 0x25, 0xd0, 0x6c, 0x8f, 0x9c, + 0x72, 0x13, 0x2e, 0xfe, 0xdc, 0xe4, 0x7b, 0x37, 0x29, 0x2e, 0x73, 0xa5, 0x8b, 0xd4, 0xe0, 0x1d, + 0x91, 0x9c, 0xea, 0xcc, 0x8e, 0xbf, 0xae, 0x27, 0x0d, 0x8a, 0x50, 0x36, 0x21, 0x05, 0x7d, 0xb6, + 0x06, 0x0f, 0xc4, 0x9b, 0x78, 0xe5, 0xb1, 0x94, 0x68, 0xa4, 0xe7, 0xdd, 0xb4, 0x37, 0x8b, 0x18, + 0x8f, 0x9d, 0xf4, 0xf6, 0x54, 0x5f, 0x1c, 0x3d, 0x98, 0xa3, 0x0d, 0xb0, 0xc8, 0x33, 0x00, 0xc0, + 0x40, 0x9c, 0x95, 0x21, 0x44, 0xfc, 0xb6, 0xbe, 0x64, 0x8b, 0xdd, 0xc5, 0x40, 0xb4, 0xff, 0x94, + 0x4e, 0xf6, 0x26, 0x66, 0xe8, 0xf7, 0x0c, 0xf3, 0x79, 0x22, 0x9a, 0x5c, 0x11, 0x7f, 0xa2, 0xb6, + 0x86, 0xff, 0x60, 0xbe, 0x9e, 0x53, 0x27, 0x4c, 0xa5, 0x73, 0x4f, 0x58, 0x33, 0xac, 0xb4, 0x96, + 0xa8, 0x3f, 0x20, 0xbf, 0xa2, 0x7e, 0x5b, 0xf1, 0xcc, 0x59, 0xec, 0x00, 0x70, 0xa5, 0x60, 0xcc, + 0x83, 0x84, 0xff, 0xa2, 0x1b, 0x49, 0x91, 0x38, 0x2f, 0xf2, 0x09, 0xaf, 0xc8, 0x8b, 0x0b, 0x5f, + 0xbd, 0x9a, 0xc4, 0xd8, 0x7f, 0xa3, 0xad, 0xec, 0x6c, 0x3b, 0x7d, 0x8e, 0xe8, 0x33, 0x86, 0xea, + 0x05, 0x5f, 0x32, 0x91, 0xde, 0xc3, 0xf5, 0x69, 0x50, 0x73, 0x8b, 0x4f, 0xc8, 0x17, 0x66, 0xc3, + 0xae, 0x7b, 0xd8, 0xec, 0x8f, 0xbe, 0x8a, 0x6a, 0xe3, 0x54, 0x88, 0x87, 0xf6, 0x95, 0x99, 0x07, + 0x59, 0x41, 0x1a, 0x5d, 0x44, 0x00, 0x77, 0x9b, 0x26, 0xc9, 0x3c, 0xba, 0x94, 0xe6, 0xc3, 0x6f, + 0xec, 0xb9, 0x4c, 0x75, 0x96, 0x72, 0xe8, 0x9e, 0x7b, 0x21, 0xea, 0x58, 0xfa, 0x42, 0x91, 0x5c, + 0x11, 0x29, 0x4b, 0xa1, 0x2b, 0xeb, 0x29, 0xb4, 0x69, 0xae, 0x2c, 0x5d, 0x6d, 0xe5, 0xda, 0x27, + 0xe3, 0xc8, 0x34, 0xc6, 0xc5, 0x99, 0x0a, 0xab, 0x59, 0x17, 0x7e, 0x8f, 0xfe, 0xaf, 0x36, 0x58, + 0xd2, 0x11, 0xcd, 0xe5, 0x29, 0x8a, 0x47, 0x1f, 0x35, 0xe0, 0x76, 0x98, 0x74, 0x5d, 0xa1, 0x1c, + 0xd5, 0x7b, 0x40, 0x37, 0x50, 0x87, 0xd3, 0xc8, 0xfb, 0x22, 0xe9, 0x6c, 0xf0, 0x84, 0x09, 0x3e, + 0x2d, 0x94, 0x3a, 0x94, 0x28, 0x3c, 0xe7, 0xaf, 0x10, 0x61, 0x1f, 0xfe, 0xa7, 0xd5, 0x63, 0x6e, + 0x37, 0x8c, 0xf6, 0x1f, 0x51, 0x9f, 0x6a, 0xfc, 0xfc, 0xec, 0x9d, 0x6f, 0x0b, 0x0f, 0x54, 0x63, + 0x19, 0x43, 0xe4, 0x13, 0x92, 0x14, 0xfc, 0xd4, 0x89, 0xbd, 0xd9, 0xe2, 0xfa, 0x07, 0x10, 0x19, + 0x95, 0x39, 0x7a, 0x16, 0xe4, 0x44, 0xcc, 0x32, 0xdd, 0xf6, 0x23, 0xa6, 0xa7, 0x41, 0x88, 0x50, + 0x6d, 0x5b, 0x35, 0x43, 0x4e, 0xba, 0x25, 0x17, 0xb5, 0x05, 0x7f, 0x49, 0x96, 0xaa, 0xbf, 0x9c, + 0x7f, 0xd6, 0x20, 0xd0, 0x53, 0x16, 0x25, 0xf5, 0xf0, 0xd5, 0xcf, 0x11, 0x2a, 0xf6, 0x4f, 0x9a, + 0x83, 0x23, 0x6b, 0x93, 0x6f, 0x17, 0x3f, 0x2f, 0x11, 0x12, 0x6f, 0x71, 0xc9, 0x05, 0x07, 0xc5, + 0xb4, 0x57, 0x83, 0x04, 0x73, 0x5d, 0xd3, 0x65, 0x22, 0x2a, 0xf6, 0xe4, 0xd2, 0x79, 0x31, 0x63, + 0xe1, 0x60, 0x62, 0xee, 0xc0, 0xb2, 0x2c, 0xe9, 0x12, 0x9f, 0xad, 0x89, 0x76, 0xbc, 0xc2, 0x4e, + 0x20, 0xd1, 0x07, 0x0d, 0x3d, 0x74, 0x6d, 0x4a, 0x5f, 0x45, 0xd8, 0x12, 0x23, 0x3e, 0xa3, 0xc5, + 0xa7, 0x8f, 0x8e, 0x50, 0x18, 0xaf, 0x43, 0x00, 0x9d, 0xc6, 0xf5, 0x17, 0x77, 0x57, 0xfb, 0x7c, + 0xf3, 0xed, 0x06, 0xeb, 0x12, 0x2f, 0xfd, 0x4d, 0x57, 0xda, 0xac, 0x1e, 0x4a, 0x7f, 0x88, 0xe1, + 0x77, 0xbf, 0x51, 0xc8, 0x47, 0xf3, 0x61, 0x94, 0x4b, 0xde, 0x75, 0x98, 0x9d, 0x9e, 0x44, 0xe7, + 0x04, 0xd4, 0x33, 0xc3, 0x8a, 0x9a, 0x39, 0x7e, 0xb0, 0xf5, 0x58, 0xe3, 0xa2, 0x6d, 0x24, 0x63, + 0x1f, 0x7c, 0xb2, 0x2f, 0xce, 0x9f, 0xed, 0xe6, 0xac, 0x09, 0xb0, 0xf9, 0x81, 0xee, 0x49, 0xe0, + 0xbb, 0xa5, 0x6e, 0x62, 0xe4, 0x5c, 0x8e, 0x16, 0xf4, 0x51, 0x4e, 0xbf, 0x5e, 0x5c, 0x00, 0x40, + 0xb4, 0x96, 0x46, 0x8d, 0x9f, 0x86, 0x26, 0x65, 0x02, 0x56, 0x9a, 0x24, 0x08, 0x36, 0x19, 0x9c, + 0x0e, 0x36, 0x59, 0xd0, 0x70, 0x05, 0x7d, 0x96, 0xc3, 0xa5, 0xe2, 0x8c, 0x25, 0x7d, 0xc6, 0xcf, + 0xf8, 0x91, 0xcf, 0x7e, 0x53, 0x05, 0x83, 0x75, 0xcb, 0x4c, 0xbd, 0xa7, 0x7a, 0xc6, 0xe3, 0x0d, + 0x6a, 0x43, 0x4c, 0xa0, 0x7d, 0xce, 0x77, 0x62, 0xb4, 0x24, 0xc2, 0x11, 0x6b, 0x2c, 0x4f, 0x06, + 0xcf, 0xc3, 0x68, 0xf0, 0x5d, 0xf7, 0x1c, 0x95, 0x4d, 0x2b, 0x9f, 0xab, 0x52, 0x8d, 0x9b, 0xca, + 0x8c, 0x08, 0x60, 0x6a, 0xe0, 0x94, 0x0f, 0xa2, 0x05, 0x26, 0x41, 0xf3, 0xb3, 0x9e, 0x7e, 0x73, + 0xb1, 0xb2, 0x21, 0x0d, 0x9b, 0xbc, 0x13, 0x10, 0x93, 0x37, 0x53, 0xea, 0x28, 0x20, 0x40, 0xec, + 0x8c, 0x6d, 0x56, 0x3e, 0xea, 0xf0, 0xd3, 0xd1, 0x0f, 0xe7, 0x25, 0x58, 0x2d, 0x21, 0xb2, 0xf1, + 0xfc, 0x47, 0x69, 0xc7, 0x9c, 0xb6, 0x6a, 0xe9, 0xd6, 0xf3, 0xc5, 0x17, 0xc0, 0x66, 0x83, 0x9e, + 0x84, 0x4c, 0x3e, 0xa2, 0x83, 0xf8, 0x00, 0x0f, 0xe2, 0x89, 0x34, 0x13, 0xa3, 0x65, 0x29, 0x5e, + 0x6c, 0xf9, 0x6c, 0xe0, 0x1b, 0xc8, 0x69, 0x44, 0x3e, 0xa7, 0x0c, 0x4b, 0x25, 0xff, 0x30, 0xc6, + 0xb8, 0xa1, 0x0f, 0x00, 0xfb, 0xe6, 0x98, 0xed, 0x67, 0xdf, 0x4e, 0x55, 0x52, 0xc5, 0xa9, 0x0e, + 0xdc, 0x49, 0xd6, 0x1c, 0xd4, 0x8a, 0xd8, 0xe0, 0x45, 0x1a, 0xc5, 0x2f, 0xdc, 0x06, 0x77, 0x23, + 0xc1, 0xba, 0x0d, 0x05, 0x47, 0x17, 0x14, 0x83, 0xaf, 0x15, 0x49, 0x66, 0xbf, 0x18, 0x0a, 0x8c, + 0xed, 0x79, 0x70, 0x89, 0x03, 0x5b, 0x1f, 0xda, 0x29, 0xb2, 0xbb, 0x38, 0x88, 0x5e, 0x63, 0x77, + 0xc5, 0xcb, 0xe8, 0x7a, 0xbf, 0xb2, 0x4a, 0xdd, 0xcb, 0xe0, 0x11, 0x54, 0x4e, 0x0d, 0xa9, 0x5b, + 0xf6, 0xc4, 0x28, 0x63, 0xb9, 0xbd, 0x83, 0x48, 0xce, 0xdd, 0x03, 0x90, 0x55, 0xbf, 0x51, 0x3e, + 0x3a, 0x14, 0x7d, 0xcb, 0x21, 0x76, 0x84, 0x30, 0x14, 0x61, 0xe1, 0x1c, 0xcb, 0x65, 0x7f, 0xd1, + 0x69, 0xaf, 0x76, 0xab, 0x1d, 0xbc, 0x2b, 0x46, 0xca, 0xd1, 0x33, 0x9b, 0x4b, 0xe0, 0xfe, 0x4a, + 0x7d, 0x0c, 0x82, 0xef, 0x37, 0xf3, 0x73, 0x91, 0xdc, 0xd5, 0xa2, 0x9e, 0x24, 0x20, 0xa4, 0xf4, + 0xef, 0x2c, 0x58, 0xd6, 0x83, 0x55, 0x2b, 0x43, 0x5e, 0x28, 0x18, 0xce, 0xfc, 0xb0, 0xc4, 0x12, + 0x69, 0x24, 0xb8, 0x07, 0x94, 0x39, 0xc5, 0x58, 0xcc, 0x68, 0xb9, 0xb3, 0x87, 0x89, 0x78, 0x46, + 0x42, 0xe9, 0x83, 0xb0, 0xcb, 0x62, 0xe2, 0x85, 0xe5, 0x70, 0x2d, 0xf5, 0x2e, 0x6a, 0x65, 0xe0, + 0x8f, 0x02, 0xb4, 0x57, 0x57, 0xdd, 0x6e, 0x2c, 0x66, 0xaf, 0xc7, 0x3d, 0x6d, 0xf5, 0xba, 0xec, + 0xfa, 0x51, 0x78, 0xe8, 0x85, 0x34, 0x49, 0xcd, 0xbf, 0x17, 0xc0, 0x93, 0x30, 0x25, 0x2a, 0x85, + 0xb7, 0x5e, 0x10, 0xdf, 0xfe, 0xb6, 0x5e, 0x77, 0x42, 0x91, 0x21, 0x18, 0x94, 0xe6, 0xff, 0x26, + 0x22, 0x6f, 0x78, 0xd1, 0xef, 0x3b, 0xa5, 0x5c, 0x2f, 0xa2, 0x1d, 0x34, 0x80, 0x93, 0xfe, 0x52, + 0xc2, 0xbf, 0xd9, 0xa4, 0xb8, 0xd6, 0xee, 0x52, 0xbe, 0xbb, 0x62, 0xf6, 0xd3, 0x69, 0x9c, 0x48, + 0xdf, 0xae, 0xe2, 0xc4, 0xf9, 0x40, 0xa3, 0x8b, 0x96, 0x03, 0xae, 0x02, 0x6b, 0x67, 0x4f, 0x5a, + 0x8f, 0xf3, 0xe0, 0x1b, 0x6b, 0x53, 0x9a, 0x98, 0xbe, 0x62, 0x3e, 0xac, 0x4d, 0x64, 0x0e, 0x08, + 0x4b, 0x3e, 0xf6, 0x48, 0x48, 0x32, 0x43, 0x6e, 0x1b, 0xcd, 0x45, 0xd9, 0xf8, 0xef, 0xa3, 0x0a, + 0x8c, 0x6d, 0x18, 0x5a, 0x08, 0x3b, 0x55, 0x3c, 0x23, 0xef, 0x0d, 0x5d, 0x12, 0x32, 0xfe, 0x49, + 0xf0, 0x2c, 0xbc, 0xdd, 0x72, 0xd3, 0x13, 0xb6, 0x4c, 0xf6, 0xb1, 0x7c, 0x19, 0x59, 0x12, 0x13, + 0xab, 0xaa, 0x8c, 0x4c, 0xb6, 0x74, 0xc0, 0x77, 0xac, 0x12, 0xdb, 0xbc, 0x66, 0xae, 0xf6, 0x2b, + 0xae, 0x04, 0xbe, 0xeb, 0x65, 0x29, 0xcf, 0x0e, 0x47, 0x2a, 0xbf, 0x5f, 0x18, 0xb5, 0x65, 0x48, + 0xfe, 0x4d, 0x01, 0x1c, 0xab, 0xf4, 0xda, 0x45, 0xf2, 0x58, 0x68, 0xcd, 0xca, 0x0c, 0x1c, 0x38, + 0x39, 0x1f, 0x9a, 0xcc, 0xec, 0x44, 0x22, 0x8e, 0xd1, 0xc5, 0x83, 0xae, 0xef, 0xd3, 0xd0, 0x23, + 0xc3, 0x33, 0x56, 0xb7, 0x36, 0xcb, 0x2f, 0x6c, 0xea, 0x31, 0xd7, 0xda, 0x23, 0x73, 0xba, 0x22, + 0x8f, 0x67, 0x06, 0x4c, 0x79, 0x10, 0x0a, 0x03, 0x4c, 0x0f, 0x3a, 0xe4, 0x13, 0xc7, 0xea, 0x3d, + 0x92, 0x3e, 0xc8, 0xd6, 0xb2, 0x16, 0x30, 0x7e, 0x48, 0x87, 0x6a, 0xe8, 0xdd, 0x23, 0xef, 0xb8, + 0xae, 0x4c, 0x76, 0xf2, 0x90, 0x3b, 0xfc, 0x64, 0x8d, 0xc2, 0xc1, 0x6c, 0xe6, 0xa0, 0x71, 0xe7, + 0x4f, 0xf4, 0xb7, 0x31, 0xc0, 0x25, 0x60, 0x16, 0xef, 0xfb, 0x8c, 0x35, 0xaf, 0x75, 0x3f, 0x90, + 0x6b, 0x76, 0x03, 0xf0, 0xf3, 0x76, 0x4b, 0xc4, 0xa3, 0x2f, 0xef, 0x8d, 0xe0, 0x32, 0x63, 0xcb, + 0xba, 0x38, 0x12, 0x87, 0x24, 0xea, 0xa1, 0x87, 0xdb, 0x7a, 0x9e, 0x17, 0x41, 0xc6, 0x3d, 0xb0, + 0xb7, 0xbf, 0x5b, 0x34, 0xbb, 0xf5, 0xfb, 0x5e, 0x93, 0x8e, 0xd6, 0xb9, 0x77, 0x5c, 0x93, 0x6a, + 0x44, 0x1f, 0xaf, 0x05, 0x70, 0xe1, 0x3c, 0x31, 0xae, 0xe7, 0xc3, 0x63, 0x18, 0xe9, 0x12, 0xed, + 0x93, 0x18, 0x10, 0xa1, 0xb1, 0xda, 0x53, 0x35, 0x7c, 0x90, 0x94, 0x59, 0xdd, 0xae, 0x87, 0x91, + 0x8a, 0x20, 0xc2, 0x95, 0xe3, 0x5a, 0x4d, 0x82, 0x99, 0x7a, 0x1f, 0x12, 0x5a, 0x77, 0x69, 0xcd, + 0xc0, 0x41, 0xe5, 0x43, 0x2b, 0x93, 0x58, 0xb4, 0x5e, 0x06, 0xa0, 0xe6, 0x6b, 0x0a, 0x42, 0x3a, + 0xe4, 0x12, 0xc2, 0x31, 0xb6, 0x35, 0x57, 0xe1, 0xfa, 0xea, 0x51, 0x92, 0x5b, 0x0b, 0x00, 0x7d, + 0x4a, 0x37, 0xf5, 0x82, 0x06, 0xdb, 0xf3, 0x7f, 0x9c, 0xbd, 0x08, 0x9a, 0x78, 0x40, 0xe4, 0x38, + 0x79, 0xbf, 0xd1, 0xa6, 0x7a, 0x2f, 0x98, 0x5b, 0x4b, 0xe8, 0xe5, 0x0f, 0xb6, 0x54, 0x9f, 0xcf, + 0x30, 0x90, 0x63, 0xd3, 0xd1, 0x61, 0x82, 0x69, 0xd3, 0x60, 0xeb, 0xf2, 0xbb, 0x0e, 0x89, 0xfe, + 0x01, 0xe2, 0x3b, 0x7b, 0x01, 0xbe, 0xc5, 0x84, 0x62, 0xbe, 0xda, 0x78, 0x54, 0x0e, 0x89, 0x66, + 0x63, 0x32, 0x43, 0x50, 0xf0, 0xae, 0x2e, 0x00, 0x1f, 0x85, 0x5a, 0xb0, 0x27, 0x24, 0x27, 0x5d, + 0x99, 0x1b, 0x57, 0x30, 0xdc, 0x30, 0xd8, 0x06, 0x6d, 0xeb, 0xdc, 0xd0, 0x06, 0x5a, 0xb9, 0xfd, + 0x13, 0x91, 0x24, 0xb9, 0xf7, 0xc3, 0x70, 0xad, 0x01, 0x43, 0x2f, 0x0d, 0xb2, 0x23, 0x30, 0xe1, + 0xe8, 0xe5, 0x3d, 0x71, 0x9f, 0xb9, 0x35, 0x9f, 0xca, 0xab, 0x95, 0x85, 0xe0, 0x20, 0xe4, 0x9d, + 0xbb, 0xd2, 0x32, 0xc0, 0xaf, 0x0d, 0x14, 0x8d, 0x65, 0x10, 0xe4, 0x0f, 0x2c, 0x17, 0x1b, 0x36, + 0xb6, 0xb2, 0xc2, 0xfc, 0xa5, 0xd4, 0x50, 0x02, 0x22, 0x4f, 0x08, 0xdb, 0x68, 0x05, 0x5d, 0x57, + 0xf9, 0x89, 0x85, 0x65, 0x75, 0x7b, 0x8b, 0xdc, 0xd9, 0xb3, 0x26, 0x02, 0x93, 0x01, 0x29, 0xf3, + 0x97, 0x7c, 0x22, 0x13, 0x05, 0x83, 0x37, 0x01, 0x05, 0xf8, 0x5e, 0x10, 0xf8, 0xcb, 0xdb, 0xcf, + 0x95, 0xa1, 0xd6, 0xb8, 0x13, 0x42, 0x5a, 0x58, 0x78, 0x91, 0xad, 0x3f, 0x57, 0xea, 0x29, 0xb7, + 0x58, 0x19, 0x3f, 0xbc, 0xe4, 0xf4, 0x41, 0x15, 0x50, 0x52, 0x9d, 0xf1, 0xf3, 0xad, 0x08, 0xf7, + 0x13, 0xa8, 0x8c, 0x5d, 0x7e, 0xb2, 0x71, 0xcc, 0x2a, 0xff, 0x7b, 0x45, 0x84, 0x2d, 0xc5, 0x76, + 0xff, 0x4b, 0x17, 0x59, 0x44, 0x7c, 0x85, 0x4a, 0x9a, 0x82, 0x73, 0xac, 0x57, 0xec, 0x8e, 0xf0, + 0xf6, 0xaa, 0x8e, 0x77, 0x75, 0x76, 0xf1, 0x9c, 0x25, 0x35, 0xac, 0xff, 0x43, 0x67, 0xe4, 0x7a, + 0xbc, 0x0b, 0x49, 0xf0, 0x3a, 0x5c, 0x21, 0x81, 0x28, 0x06, 0xeb, 0x92, 0x75, 0xa0, 0xf6, 0xb2, + 0xa8, 0xbc, 0x21, 0xa6, 0x94, 0x6a, 0x56, 0x05, 0xa2, 0x92, 0x33, 0x79, 0xfe, 0x29, 0x65, 0x98, + 0x77, 0xbd, 0x0d, 0xbb, 0xd3, 0xb7, 0x69, 0xa5, 0xe5, 0x69, 0xfb, 0x49, 0xdd, 0xaf, 0x04, 0x44, + 0xe1, 0x09, 0xb1, 0x3f, 0x22, 0x94, 0x4e, 0x4d, 0x0f, 0xcf, 0xec, 0x4f, 0xa8, 0xd4, 0xf6, 0x43, + 0xa9, 0x7b, 0x33, 0xf3, 0x51, 0x99, 0x6f, 0x51, 0x57, 0x93, 0x37, 0x92, 0x5a, 0x33, 0x61, 0x8b, + 0x56, 0xd5, 0xd6, 0xd0, 0x06, 0xb4, 0x7a, 0xc9, 0xa2, 0x67, 0x4e, 0xd9, 0x09, 0xb0, 0x9f, 0xa9, + 0x43, 0x93, 0x78, 0xf0, 0x80, 0x82, 0x07, 0x32, 0x1f, 0xc4, 0x4d, 0x89, 0x5b, 0xbf, 0x52, 0xe0, + 0xc9, 0xf8, 0x86, 0x3c, 0x90, 0xe5, 0x22, 0x2e, 0x86, 0x5e, 0x02, 0xfa, 0x30, 0xad, 0x14, 0xd3, + 0x92, 0xde, 0x99, 0x63, 0xba, 0xfb, 0x8e, 0x9f, 0x4e, 0xc5, 0x6f, 0x0c, 0xf5, 0x70, 0xeb, 0x04, + 0x6c, 0xc2, 0x86, 0xcf, 0x78, 0xc3, 0x77, 0xfd, 0x5a, 0x2b, 0x42, 0xbd, 0xe3, 0x64, 0x74, 0xcc, + 0xcc, 0xc0, 0x13, 0xdc, 0x39, 0xf8, 0x04, 0x3c, 0x4d, 0xe8, 0x78, 0x08, 0x57, 0xdd, 0xa6, 0xd0, + 0xd3, 0xf8, 0x47, 0x29, 0x96, 0xef, 0xe6, 0xdd, 0x9a, 0xc0, 0xd6, 0xaf, 0x1b, 0xd1, 0xf5, 0x58, + 0x60, 0x5b, 0xdd, 0xcf, 0x9f, 0x62, 0x41, 0x35, 0x8d, 0xd7, 0x6a, 0x26, 0xb0, 0x6e, 0x41, 0x80, + 0xd3, 0x9e, 0xf8, 0x42, 0x1e, 0x62, 0xe4, 0xa8, 0x85, 0x17, 0x22, 0x18, 0xe8, 0xd0, 0xa5, 0x71, + 0x07, 0x1c, 0x16, 0x59, 0xb6, 0x43, 0x2e, 0x38, 0x59, 0x99, 0x1c, 0xc5, 0x4f, 0xdb, 0x1c, 0x17, + 0x52, 0x89, 0x3d, 0x67, 0x18, 0x90, 0xd2, 0x4c, 0x07, 0xac, 0xb6, 0x3f, 0x12, 0x3a, 0x7b, 0x1c, + 0x25, 0xc4, 0x2e, 0x5d, 0x77, 0x26, 0x4f, 0xdb, 0x50, 0x5f, 0xea, 0x77, 0xdf, 0xd2, 0xb1, 0x6d, + 0x9f, 0xca, 0x89, 0x39, 0x88, 0x23, 0x4a, 0x7e, 0xa2, 0xd6, 0xbf, 0xe4, 0xed, 0xfa, 0x71, 0x34, + 0x1c, 0xaa, 0x26, 0x3d, 0xb6, 0x55, 0x5b, 0x6f, 0x8f, 0x7a, 0xe2, 0x14, 0xd0, 0xf9, 0x95, 0x1e, + 0x74, 0x26, 0x32, 0x40, 0xd2, 0x32, 0x13, 0x38, 0x19, 0xa9, 0x0b, 0xd6, 0x09, 0x47, 0x52, 0x41, + 0x46, 0x5d, 0x86, 0x67, 0x08, 0x3d, 0x93, 0x23, 0xf3, 0x04, 0x94, 0xa0, 0x6e, 0xc2, 0x85, 0xa5, + 0x73, 0x24, 0xcc, 0x75, 0x42, 0xd5, 0x3c, 0xe4, 0xc5, 0xd5, 0xd1, 0xb6, 0xbf, 0x45, 0x57, 0x11, + 0x31, 0x25, 0xd3, 0x20, 0x34, 0x9c, 0x9c, 0x55, 0x83, 0x67, 0xdd, 0x75, 0x42, 0x6c, 0x39, 0x87, + 0x85, 0xb9, 0x9b, 0xbd, 0x75, 0xff, 0xed, 0x3f, 0x7f, 0x3a, 0x04, 0x72, 0x6d, 0xa7, 0xac, 0xc9, + 0xde, 0x41, 0x91, 0x53, 0x1a, 0x32, 0xc2, 0x08, 0x54, 0x35, 0x15, 0xbf, 0xeb, 0x4a, 0x43, 0xde, + 0x3e, 0x0f, 0x19, 0x20, 0x37, 0xa2, 0x2f, 0xaf, 0xf6, 0x35, 0xcb, 0x7b, 0x63, 0xb1, 0xb7, 0x87, + 0x4c, 0xf6, 0x3e, 0xcd, 0x30, 0xbb, 0xd1, 0x6c, 0x15, 0xd9, 0x23, 0x79, 0x2f, 0x9d, 0xaf, 0x5f, + 0x45, 0xfc, 0xa4, 0xcf, 0x4b, 0x2a, 0xa6, 0x1d, 0xe8, 0x87, 0xe2, 0x1a, 0x10, 0x88, 0xbf, 0x0d, + 0x9b, 0x11, 0x35, 0x3b, 0x44, 0x45, 0x6c, 0x09, 0xac, 0x98, 0x1d, 0x52, 0x73, 0x54, 0x6c, 0x24, + 0x91, 0x16, 0x0e, 0x71, 0x8e, 0xdb, 0x04, 0x84, 0x59, 0xe5, 0x04, 0xa2, 0x14, 0x9e, 0x45, 0xcb, + 0x05, 0x19, 0x98, 0x81, 0x77, 0xf2, 0xf2, 0xbb, 0x6c, 0x5b, 0x6e, 0x0c, 0x4c, 0xb4, 0x50, 0x7f, + 0x69, 0x5b, 0xea, 0x90, 0x63, 0xbc, 0x1d, 0x4f, 0x04, 0x3e, 0xb5, 0x63, 0x08, 0xe5, 0x8e, 0xa9, + 0x7d, 0xa2, 0x8b, 0x3f, 0xa8, 0x0a, 0x70, 0xbb, 0xf3, 0x91, 0x0b, 0x25, 0x6e, 0xb8, 0xd9, 0xbc, + 0x8e, 0x46, 0x2b, 0x51, 0x22, 0xec, 0xe5, 0xdf, 0x59, 0xbb, 0x1e, 0x66, 0xfd, 0xbd, 0x67, 0x9d, + 0x96, 0x3d, 0xbf, 0xdc, 0x99, 0x1c, 0x4a, 0x70, 0x1e, 0xe1, 0xb5, 0xec, 0x19, 0xf7, 0x4c, 0x94, + 0xb5, 0x8f, 0xa0, 0xe3, 0xbe, 0x6b, 0x06, 0xdc, 0x3b, 0xdf, 0xd7, 0x53, 0x8d, 0x93, 0x0a, 0xa8, + 0xf1, 0x69, 0x2b, 0x13, 0x68, 0x7a, 0x03, 0xbc, 0x0f, 0x54, 0x08, 0x4e, 0x3b, 0xa2, 0x85, 0xb3, + 0x31, 0xaa, 0x06, 0x16, 0x03, 0x6d, 0x57, 0xab, 0x8f, 0x1b, 0x22, 0xd7, 0x33, 0xd6, 0x24, 0xba, + 0x70, 0xa7, 0x95, 0x87, 0x76, 0x47, 0xa0, 0xb1, 0xcb, 0x46, 0xa7, 0x0a, 0xcc, 0x9b, 0xc3, 0xf7, + 0x22, 0x3b, 0x59, 0x9f, 0x6b, 0xcc, 0x84, 0xea, 0xd4, 0x16, 0xe7, 0xf6, 0x9d, 0x30, 0x7f, 0x4d, + 0xdb, 0xf5, 0x15, 0xa0, 0xd0, 0x9e, 0xc7, 0x0b, 0x4f, 0x87, 0x99, 0x4d, 0x96, 0xd3, 0xb7, 0x13, + 0x82, 0xa0, 0x28, 0xe8, 0x2f, 0x0d, 0xda, 0xa9, 0x72, 0xba, 0x8e, 0x35, 0x0d, 0xf0, 0xee, 0xc0, + 0xbe, 0x58, 0xe0, 0x74, 0x3c, 0x2b, 0x13, 0x65, 0x0e, 0xc4, 0x1b, 0xfa, 0x01, 0x67, 0x4b, 0x80, + 0x29, 0x0e, 0xd5, 0x6a, 0x00, 0x01, 0x41, 0x4e, 0xac, 0x07, 0x70, 0x8e, 0x9c, 0x09, 0x1d, 0x37, + 0x1e, 0x9e, 0xb8, 0xea, 0x84, 0xa4, 0xdc, 0x37, 0x6b, 0xe9, 0x5d, 0x20, 0x5e, 0x0b, 0x56, 0x78, + 0x69, 0x11, 0x1f, 0x99, 0x94, 0xaa, 0x44, 0x67, 0x68, 0x08, 0x69, 0x56, 0xdb, 0xe5, 0x0d, 0xfd, + 0xf3, 0x02, 0x2d, 0xe2, 0x71, 0x42, 0x16, 0x02, 0x86, 0x2d, 0x78, 0x5b, 0x1d, 0x30, 0xfb, 0x41, + 0xca, 0xac, 0x56, 0x92, 0xb4, 0x62, 0x01, 0xf2, 0xde, 0xa2, 0xea, 0xe2, 0x25, 0x6a, 0x73, 0xbc, + 0xda, 0xd7, 0x38, 0xc7, 0x83, 0x53, 0x30, 0x72, 0x52, 0x16, 0x84, 0xea, 0xe8, 0x53, 0x22, 0x6b, + 0x21, 0x57, 0x0f, 0xa6, 0x45, 0x5d, 0xf0, 0xe8, 0x63, 0xe2, 0x2b, 0x54, 0x7a, 0xb3, 0xa8, 0x4b, + 0x23, 0xc8, 0x37, 0x1c, 0x80, 0xe7, 0xdd, 0x8c, 0xcf, 0x5b, 0xe3, 0x2c, 0xac, 0x51, 0x35, 0x07, + 0xc2, 0x15, 0xa8, 0x32, 0x16, 0xb6, 0xe0, 0xd2, 0x50, 0x9f, 0x5f, 0x32, 0x28, 0x6a, 0x9e, 0x41, + 0x2f, 0x46, 0xf0, 0xe9, 0x49, 0x38, 0x32, 0xdc, 0x8c, 0x67, 0x7d, 0x1d, 0xc2, 0x0a, 0x5a, 0xa4, + 0x06, 0x83, 0x2b, 0x69, 0xe4, 0x8a, 0xae, 0x69, 0xb7, 0x3a, 0x62, 0x47, 0x09, 0xce, 0xaf, 0x1d, + 0xfe, 0x9c, 0x67, 0x9e, 0xae, 0xf2, 0x75, 0x64, 0x1d, 0x19, 0x10, 0xc5, 0x0e, 0x26, 0x04, 0x76, + 0xef, 0xb4, 0x5c, 0x56, 0x01, 0x78, 0x47, 0xac, 0xff, 0xf4, 0xeb, 0x95, 0x5e, 0x27, 0xe4, 0x55, + 0x8d, 0xb2, 0xfb, 0x56, 0x1e, 0x14, 0x8f, 0xaa, 0x78, 0xbe, 0x5c, 0x98, 0x5b, 0x7c, 0x9f, 0xcf, + 0x70, 0x3b, 0xee, 0x43, 0x28, 0x68, 0xdd, 0x59, 0xd2, 0x7c, 0xf0, 0x8c, 0xfd, 0xda, 0xbe, 0x59, + 0xd1, 0xca, 0x57, 0x2a, 0xf8, 0x48, 0x02, 0xac, 0x60, 0xbe, 0xdc, 0xaa, 0xed, 0x18, 0x0a, 0x2e, + 0x4a, 0x52, 0xa3, 0x86, 0x72, 0x15, 0xdd, 0xea, 0x8c, 0xc5, 0xc7, 0x21, 0xe3, 0x1e, 0x26, 0x03, + 0xd7, 0x5d, 0x42, 0x9e, 0x69, 0x43, 0xf3, 0x30, 0xda, 0xc1, 0xbc, 0xfb, 0x04, 0x70, 0xc1, 0x4c, + 0xf3, 0x77, 0x99, 0x25, 0x0a, 0xe1, 0x02, 0xd2, 0x13, 0xa0, 0x4a, 0x6e, 0xa9, 0x51, 0xd7, 0x9d, + 0x17, 0xb7, 0xd6, 0xb9, 0xda, 0x88, 0xaa, 0x96, 0x12, 0xe6, 0x34, 0xde, 0xc8, 0x9c, 0xc0, 0x77, + 0xbf, 0xb1, 0x6a, 0x48, 0x9f, 0xef, 0x4f, 0xf2, 0xbb, 0xa7, 0x83, 0x73, 0xb4, 0x98, 0x98, 0x1e, + 0xc0, 0x66, 0x78, 0x36, 0x8e, 0x3e, 0x0e, 0x28, 0xaa, 0x50, 0x05, 0xbb, 0xa4, 0x7e, 0x95, 0x34, + 0x8c, 0x64, 0x29, 0x51, 0x3c, 0x1c, 0x65, 0x97, 0xaa, 0x04, 0x97, 0x71, 0x74, 0xe8, 0xc1, 0x90, + 0xe3, 0x75, 0x0a, 0xc4, 0xaa, 0x0b, 0xa1, 0xf3, 0x0b, 0x39, 0xf2, 0xa7, 0x9a, 0xf1, 0x1a, 0x5e, + 0x96, 0x50, 0x7b, 0x96, 0xb7, 0x49, 0xa8, 0x4a, 0x4c, 0x58, 0xe2, 0x13, 0x18, 0xba, 0xa3, 0xc3, + 0x71, 0x81, 0x9d, 0x38, 0x96, 0x2e, 0xde, 0x95, 0x72, 0x5e, 0xdd, 0x72, 0x1f, 0x4a, 0xf1, 0x2a, + 0x43, 0xe7, 0x92, 0xdf, 0x73, 0x24, 0x05, 0x45, 0xad, 0x40, 0x88, 0x77, 0xe5, 0x6a, 0x96, 0x48, + 0x74, 0x95, 0x03, 0x01, 0xa7, 0x8e, 0x49, 0x75, 0xc2, 0x07, 0xcb, 0xea, 0xac, 0x4c, 0xe6, 0x95, + 0x8f, 0xd2, 0xe5, 0xfb, 0xf4, 0xa9, 0x7a, 0x33, 0x91, 0x63, 0x15, 0x39, 0x38, 0x56, 0x79, 0x9c, + 0x27, 0xcd, 0x12, 0xda, 0xaa, 0xb8, 0x08, 0x7f, 0x1c, 0x19, 0xd7, 0x46, 0xd5, 0xe9, 0x01, 0xbf, + 0x4b, 0x8c, 0xac, 0xdf, 0x6f, 0xf9, 0x13, 0xd0, 0xdd, 0x9e, 0x90, 0xa5, 0x97, 0x16, 0xfc, 0x65, + 0x49, 0xc9, 0x1f, 0x03, 0x3a, 0x34, 0x7d, 0x90, 0xa5, 0x3c, 0x33, 0xe6, 0x58, 0x55, 0xad, 0x92, + 0x68, 0x0a, 0x9c, 0x0c, 0xdf, 0x49, 0xa0, 0x3a, 0x8e, 0x88, 0x88, 0x9f, 0x5c, 0x08, 0xdc, 0xa8, + 0x75, 0x03, 0xf4, 0xfe, 0x12, 0x62, 0x56, 0x3e, 0xab, 0x19, 0x8c, 0x22, 0xde, 0xea, 0x90, 0xe9, + 0x33, 0xeb, 0x40, 0x05, 0x65, 0x8c, 0xbb, 0x60, 0x6f, 0x66, 0xc7, 0xa4, 0x98, 0x31, 0x46, 0x0a, + 0xea, 0xda, 0x58, 0xdb, 0x16, 0x09, 0x7a, 0x78, 0x2e, 0x04, 0xf8, 0xa3, 0x24, 0xbb, 0x93, 0x0c, + 0x97, 0x48, 0xc4, 0xc9, 0xeb, 0x3c, 0xea, 0xee, 0x49, 0x49, 0x26, 0x44, 0x78, 0xc1, 0xaf, 0x51, + 0xfd, 0xe2, 0x33, 0xa7, 0x75, 0xb3, 0x24, 0x29, 0x79, 0x84, 0xe5, 0x23, 0x71, 0xe8, 0x9e, 0xf8, + 0xfb, 0xad, 0x8b, 0xd0, 0x57, 0x32, 0xab, 0xef, 0x9b, 0x52, 0xb2, 0x4b, 0x61, 0xc1, 0xba, 0x45, + 0x32, 0xd5, 0x38, 0xa8, 0xee, 0x33, 0xd9, 0xda, 0x65, 0xe4, 0x94, 0x67, 0x9e, 0xf7, 0x66, 0xbe, + 0x18, 0x02, 0x66, 0xb2, 0x63, 0x02, 0xcc, 0x60, 0x39, 0x07, 0x86, 0xc1, 0x2c, 0x3b, 0x93, 0x5a, + 0xd5, 0xc5, 0xe7, 0x77, 0x6a, 0x7f, 0x70, 0xf4, 0xcd, 0x9c, 0x0e, 0xbb, 0x8e, 0xfe, 0xa8, 0x03, + 0x62, 0xc7, 0x71, 0xa9, 0x57, 0x18, 0xcf, 0xd7, 0x06, 0x7d, 0x70, 0xcb, 0xd1, 0xc0, 0x20, 0xa9, + 0x6e, 0x9e, 0xeb, 0xa5, 0xaf, 0x27, 0xbb, 0xc6, 0x20, 0x80, 0xe3, 0xa4, 0x48, 0x3e, 0x1c, 0x21, + 0x0f, 0x5d, 0xa5, 0xba, 0xd3, 0x17, 0x67, 0x80, 0xb3, 0x72, 0x68, 0x1c, 0xf0, 0x5b, 0xf1, 0xf7, + 0x1b, 0x57, 0x85, 0x11, 0xa7, 0x2d, 0x86, 0x16, 0xc1, 0x1c, 0xc0, 0xfe, 0x70, 0xaa, 0x42, 0xeb, + 0x1b, 0x7b, 0x94, 0x0a, 0x8b, 0x58, 0x13, 0x9e, 0x9c, 0xd0, 0x85, 0xb7, 0x65, 0x6f, 0x9d, 0xdd, + 0x2e, 0x96, 0x45, 0xcf, 0xd4, 0xe5, 0xd2, 0x9b, 0x2e, 0x51, 0x64, 0xb8, 0x6e, 0xe5, 0x8c, 0xae, + 0xbd, 0x2b, 0xb5, 0x6a, 0x26, 0x1d, 0x2d, 0xc4, 0xdc, 0x85, 0xcc, 0x05, 0x1d, 0xe0, 0x0f, 0x80, + 0x58, 0x6f, 0x03, 0x61, 0x1f, 0xa4, 0x17, 0x86, 0x17, 0xa2, 0xc7, 0xb4, 0xe7, 0xac, 0xef, 0x57, + 0x55, 0xc8, 0x04, 0x78, 0x19, 0x8e, 0xa1, 0x6c, 0x8e, 0x48, 0x90, 0x55, 0x68, 0x20, 0xba, 0x18, + 0xdd, 0xa6, 0xf1, 0x84, 0x04, 0xc0, 0x2a, 0x57, 0x25, 0x15, 0x64, 0x5c, 0x69, 0x9a, 0xd2, 0xb3, + 0x4f, 0x09, 0x1e, 0x04, 0xc1, 0x0b, 0x50, 0x0d, 0xef, 0x5f, 0x6b, 0x1e, 0x19, 0x40, 0x51, 0x2a, + 0x7f, 0xc3, 0xd2, 0x86, 0x89, 0x3b, 0xa6, 0xe7, 0x15, 0x87, 0x32, 0xec, 0xca, 0x83, 0x64, 0x76, + 0x30, 0x43, 0x0e, 0x0b, 0x52, 0x93, 0xbf, 0xf9, 0xa5, 0xc5, 0x88, 0x92, 0xf2, 0xcb, 0xa5, 0x6a, + 0x4c, 0xe4, 0xb4, 0x70, 0x1a, 0x10, 0x38, 0xd1, 0x6c, 0x52, 0x95, 0xbf, 0x0a, 0x9c, 0x23, 0xef, + 0x2d, 0x39, 0xc8, 0xd9, 0x89, 0x05, 0xb9, 0xea, 0xaa, 0x22, 0xf4, 0xba, 0x93, 0xbc, 0x7a, 0xf7, + 0x77, 0xab, 0x50, 0x8e, 0xaf, 0xf1, 0x7f, 0xe3, 0xac, 0x71, 0xb0, 0x24, 0x90, 0xba, 0xbc, 0xc0, + 0x46, 0xd1, 0x5f, 0x00, 0xa1, 0x40, 0x85, 0x6f, 0xe0, 0x7f, 0xdd, 0x00, 0xab, 0xe4, 0xad, 0xb5, + 0xf2, 0xb5, 0x01, 0x50, 0x9a, 0xae, 0x5f, 0x51, 0xb9, 0xe4, 0x19, 0xca, 0xab, 0x3d, 0xec, 0xf4, + 0x44, 0x94, 0x7f, 0x3d, 0xa4, 0x1d, 0x31, 0xb4, 0xea, 0xb6, 0x53, 0x61, 0x32, 0x11, 0x25, 0x11, + 0x11, 0x28, 0x67, 0x1c, 0xc9, 0x08, 0x24, 0x65, 0x7b, 0x54, 0xe8, 0x2c, 0x67, 0x16, 0x14, 0x0a, + 0x33, 0x7f, 0x09, 0x18, 0x2b, 0x3a, 0x16, 0x9b, 0x1c, 0xd6, 0x69, 0xcc, 0x3e, 0xf8, 0x10, 0x7c, + 0xc0, 0x4f, 0xbf, 0x3e, 0xac, 0xa8, 0x9b, 0x6c, 0xf7, 0x21, 0x9f, 0x15, 0xa0, 0x51, 0x7b, 0xad, + 0xcb, 0x5d, 0xf5, 0x8d, 0xb5, 0xaa, 0xee, 0x94, 0x6a, 0x37, 0xfa, 0xbf, 0xc0, 0x7d, 0xa0, 0x8d, + 0xf2, 0x80, 0x3e, 0xbf, 0xfd, 0x2e, 0xb4, 0x6d, 0x40, 0xde, 0x4e, 0x16, 0xd2, 0xfc, 0x49, 0x11, + 0xbb, 0x70, 0xeb, 0x96, 0x6b, 0x2f, 0x88, 0xde, 0x8b, 0xbd, 0x12, 0xa5, 0xd3, 0xe8, 0xe2, 0xd9, + 0x7d, 0xef, 0xcd, 0x36, 0xac, 0xeb, 0x44, 0x01, 0xe9, 0xe0, 0xbf, 0x13, 0x78, 0xb2, 0x02, 0x31, + 0x5e, 0x6f, 0xb5, 0x59, 0x9b, 0xfa, 0xe2, 0x32, 0x8b, 0xcb, 0x81, 0x27, 0xb6, 0x5c, 0xce, 0x1e, + 0x10, 0xb5, 0x2f, 0x25, 0x9d, 0xe8, 0x0f, 0x3b, 0x81, 0x2a, 0x05, 0x05, 0xc7, 0x8b, 0xc8, 0x20, + 0xdc, 0xf5, 0xfc, 0xef, 0xc7, 0x13, 0xf2, 0x3b, 0x60, 0x82, 0xd1, 0xbc, 0x0d, 0xef, 0x11, 0x50, + 0xd5, 0x26, 0xf9, 0x62, 0x67, 0xcb, 0x78, 0x62, 0xbd, 0x6b, 0x8e, 0x22, 0x48, 0x7b, 0xdc, 0x0e, + 0x12, 0x08, 0xed, 0xe2, 0x84, 0xe9, 0x21, 0xaa, 0x40, 0xb9, 0xd0, 0x2d, 0x28, 0x72, 0x58, 0x9b, + 0xa6, 0xd3, 0x7a, 0x3d, 0x23, 0x8b, 0x88, 0x33, 0x6d, 0x33, 0xdc, 0x50, 0x1a, 0xdb, 0xa5, 0x66, + 0xe8, 0x09, 0xdf, 0xd8, 0x09, 0xf3, 0xe0, 0x10, 0x9c, 0xf8, 0x64, 0xf2, 0x0f, 0x1a, 0x0a, 0xa4, + 0xb5, 0x0d, 0x40, 0x87, 0x57, 0x76, 0xa5, 0x41, 0x5c, 0xca, 0x9b, 0x10, 0x73, 0xa1, 0x20, 0xe3, + 0x47, 0x9c, 0x9e, 0x78, 0x43, 0xba, 0x09, 0x98, 0x2e, 0x44, 0xef, 0xab, 0x72, 0x0d, 0x0f, 0x72, + 0xdf, 0x3d, 0x50, 0x31, 0x94, 0x74, 0x7d, 0x05, 0x00, 0x53, 0xcd, 0x95, 0xdb, 0x49, 0x4a, 0xe8, + 0xa9, 0x7c, 0x2d, 0x58, 0x9c, 0x1e, 0x0a, 0x3c, 0x38, 0xef, 0x39, 0x37, 0x44, 0x9b, 0x85, 0xa2, + 0xba, 0xfe, 0x84, 0x0d, 0xaf, 0x6a, 0x44, 0x59, 0xcf, 0xb5, 0xa2, 0xcd, 0x2a, 0x96, 0x39, 0x78, + 0x2b, 0xf4, 0xd9, 0x6e, 0x9f, 0x15, 0xb7, 0x6c, 0xc4, 0x5d, 0xec, 0xd5, 0xd5, 0x72, 0xd7, 0xa9, + 0x1a, 0x20, 0x5f, 0xca, 0xa0, 0x82, 0xc7, 0xe9, 0x97, 0xa5, 0x63, 0xf6, 0xd9, 0x08, 0x77, 0x74, + 0x9c, 0xfb, 0x63, 0x43, 0xe7, 0x4e, 0x5a, 0x3d, 0x5c, 0x8d, 0x88, 0x4d, 0xeb, 0xbd, 0x38, 0x50, + 0x5b, 0x0a, 0xeb, 0xc7, 0x72, 0x7d, 0x1e, 0xd1, 0x68, 0xe7, 0x2b, 0xbe, 0xe4, 0x69, 0x8e, 0xfa, + 0xa3, 0x1f, 0x3f, 0xc3, 0x24, 0xf5, 0x1a, 0x21, 0xa4, 0xa0, 0xd9, 0xad, 0x55, 0x7d, 0x64, 0xc9, + 0x8b, 0x48, 0xa6, 0xdc, 0x71, 0x1a, 0xd0, 0x08, 0x43, 0x1b, 0x14, 0x8c, 0xb7, 0xc3, 0x54, 0x09, + 0x0b, 0x8c, 0x2e, 0x6b, 0x0e, 0xe6, 0x99, 0x5c, 0xe3, 0x91, 0x25, 0xc9, 0x60, 0xa7, 0xd2, 0x39, + 0x9d, 0x85, 0x45, 0x8d, 0xfe, 0x11, 0x27, 0x19, 0x2d, 0xe4, 0x2e, 0xe5, 0x41, 0x6a, 0x87, 0x05, + 0xb3, 0xdb, 0x9f, 0xb5, 0x7e, 0x06, 0xf4, 0x4f, 0xcc, 0x2a, 0x1d, 0x93, 0xcc, 0xe4, 0xd9, 0xa2, + 0x10, 0xeb, 0x50, 0x4a, 0x18, 0xed, 0x9b, 0xfc, 0x38, 0x70, 0xa2, 0xd9, 0x46, 0xf8, 0x0d, 0x7c, + 0x79, 0x4d, 0x58, 0xcb, 0x95, 0x3c, 0xde, 0x74, 0x56, 0xc4, 0x56, 0x8f, 0xdc, 0xf7, 0x08, 0xa6, + 0x4d, 0xa0, 0xb9, 0x56, 0x7f, 0x30, 0xce, 0x6a, 0x0d, 0x31, 0xca, 0xf0, 0x4b, 0x27, 0x0a, 0x73, + 0x83, 0xa0, 0x12, 0x32, 0x38, 0xaa, 0x39, 0xb9, 0x86, 0x75, 0x13, 0xe8, 0x73, 0x1b, 0x23, 0x8b, + 0x5f, 0x5e, 0xca, 0xeb, 0x07, 0x27, 0x40, 0x12, 0xe6, 0xd7, 0xfd, 0x9b, 0x61, 0x4f, 0xed, 0x6c, + 0xfd, 0x6b, 0x55, 0x3c, 0x89, 0x8d, 0x47, 0x05, 0x73, 0x4c, 0xca, 0x40, 0x9a, 0x5e, 0xb7, 0xc4, + 0x3b, 0x67, 0x97, 0xe0, 0x21, 0x03, 0xd0, 0x83, 0x15, 0x0c, 0xb9, 0x79, 0xf4, 0xde, 0xfe, 0xb7, + 0xc2, 0xaa, 0xeb, 0xe4, 0x6c, 0x52, 0x3d, 0xe4, 0xe2, 0xd4, 0x59, 0x49, 0xe3, 0xe6, 0x6b, 0xff, + 0xe6, 0x82, 0x1d, 0x18, 0x9b, 0x67, 0x7c, 0x40, 0xbd, 0xf8, 0x12, 0x55, 0xc0, 0xe8, 0x48, 0xb9, + 0x22, 0xf3, 0xcc, 0xc8, 0x52, 0x18, 0x27, 0x72, 0x0b, 0x75, 0xed, 0x1b, 0xc4, 0x8f, 0xd9, 0x4a, + 0x86, 0xba, 0x36, 0x4d, 0x58, 0xee, 0xad, 0x9c, 0x7d, 0xb0, 0xda, 0xc7, 0xc6, 0xb7, 0xce, 0x13, + 0x04, 0x30, 0x5c, 0xb2, 0xf8, 0x91, 0x21, 0xdc, 0x9c, 0xde, 0x3b, 0xd2, 0x7f, 0xa0, 0x49, 0x79, + 0x29, 0x9f, 0xdd, 0x5e, 0x54, 0xbc, 0x68, 0xcf, 0x2c, 0xf0, 0x72, 0xd7, 0xce, 0x7e, 0x6b, 0x53, + 0x4b, 0x21, 0x74, 0x35, 0x48, 0x41, 0xdc, 0xf8, 0x36, 0x04, 0xfc, 0xef, 0x9c, 0x8e, 0x5e, 0x4a, + 0xe8, 0xee, 0xd9, 0x0c, 0x00, 0x87, 0x6f, 0xf5, 0xdf, 0x12, 0x85, 0x6d, 0x21, 0xec, 0x5d, 0x18, + 0xb6, 0xd0, 0xf6, 0x0b, 0x2d, 0x65, 0xeb, 0x03, 0x2d, 0x32, 0xdf, 0xc9, 0xb6, 0xbf, 0x17, 0xc6, + 0xb3, 0x5c, 0x2f, 0x3c, 0x3e, 0x7f, 0x19, 0x20, 0xe9, 0xeb, 0x6b, 0x47, 0x52, 0x39, 0xde, 0x52, + 0x0d, 0xbf, 0x3b, 0xd1, 0x1c, 0xde, 0x8d, 0xb7, 0xd9, 0xdf, 0x96, 0x5f, 0xac, 0x78, 0xb1, 0x3e, + 0x36, 0x9e, 0xf0, 0xfd, 0x6b, 0x7c, 0x1a, 0xe0, 0x7b, 0x74, 0x1f, 0xbb, 0x66, 0x30, 0xf5, 0xf1, + 0x6b, 0xfc, 0x8e, 0x7a, 0x6b, 0xa8, 0x23, 0xf6, 0x56, 0xe6, 0xc7, 0xa2, 0x87, 0xf6, 0xeb, 0x02, + 0xd5, 0xab, 0x87, 0x86, 0xa0, 0x1e, 0x89, 0x65, 0x5b, 0x3e, 0xc1, 0xae, 0x37, 0x85, 0x12, 0xc1, + 0xeb, 0xfb, 0xa4, 0x6a, 0xe3, 0x79, 0x37, 0x71, 0x02, 0x42, 0x31, 0xa2, 0xa6, 0x48, 0xfa, 0x64, + 0xb1, 0x53, 0x7b, 0xad, 0xd3, 0x5f, 0x0c, 0xb7, 0x5f, 0x9b, 0x3f, 0x22, 0x11, 0x46, 0x85, 0x1a, + 0x9b, 0x6a, 0x46, 0x67, 0xb9, 0x4b, 0xa7, 0x8d, 0x64, 0xf8, 0xfa, 0xad, 0xfa, 0xbd, 0xaf, 0x23, + 0x98, 0xbe, 0x1b, 0x15, 0x33, 0x8a, 0xbd, 0xff, 0x0a, 0xaa, 0xc6, 0xb5, 0xa1, 0x17, 0xcc, 0x95, + 0xce, 0xf5, 0x48, 0x84, 0x55, 0x1c, 0x78, 0xfa, 0x5a, 0x1f, 0x20, 0x34, 0x57, 0x6b, 0x56, 0x84, + 0x42, 0x46, 0xcb, 0x30, 0x2e, 0x50, 0x2a, 0x09, 0xb0, 0x9a, 0xda, 0xa5, 0xe6, 0x1f, 0x08, 0xf4, + 0xea, 0xf9, 0x1a, 0x42, 0xaf, 0x4f, 0xa2, 0x65, 0x9e, 0xf8, 0x24, 0x11, 0x9f, 0x24, 0x56, 0x96, + 0x2d, 0x9d, 0x3a, 0xd0, 0x50, 0x38, 0xf1, 0x5f, 0xac, 0x2f, 0xf2, 0x62, 0x5f, 0x11, 0x21, 0x9a, + 0xe5, 0x94, 0x26, 0x9e, 0xd3, 0xe4, 0x26, 0x83, 0x85, 0xf1, 0x17, 0x6d, 0x68, 0xbc, 0x28, 0xdd, + 0xcd, 0xbe, 0xe6, 0xe4, 0xaa, 0xeb, 0x32, 0xf6, 0x06, 0x0b, 0x06, 0xb2, 0x43, 0xda, 0x29, 0x1a, + 0x92, 0xaa, 0x98, 0x58, 0x32, 0x92, 0x5b, 0xd2, 0xdf, 0x22, 0xa1, 0xb1, 0xde, 0xf4, 0x86, 0x67, + 0xf4, 0xbb, 0x83, 0x32, 0x5e, 0xb5, 0x15, 0xf8, 0xe1, 0x9e, 0x7c, 0x83, 0xfe, 0x10, 0x9b, 0x4d, + 0xea, 0xad, 0x64, 0xe3, 0xd4, 0xc2, 0xce, 0x26, 0x12, 0xfa, 0x19, 0x31, 0x7d, 0xde, 0x90, 0x67, + 0xf0, 0x1a, 0x81, 0x21, 0xc7, 0x06, 0x14, 0x1b, 0xd1, 0x9c, 0x62, 0xef, 0x04, 0xd4, 0xc2, 0x6e, + 0x3f, 0x5b, 0x3f, 0xb0, 0x2e, 0xba, 0xba, 0xa9, 0x8c, 0xb4, 0x47, 0x54, 0xa2, 0x57, 0xe9, 0x68, + 0xd5, 0xc7, 0x28, 0x4a, 0x55, 0x60, 0x87, 0xee, 0x1a, 0xd1, 0xbe, 0x2e, 0xf6, 0x4d, 0xe1, 0x85, + 0x27, 0x7c, 0xfa, 0xf8, 0x3a, 0x4f, 0xd1, 0xc6, 0x9b, 0x61, 0x39, 0xb3, 0x11, 0x44, 0x5d, 0x93, + 0xec, 0x2e, 0x11, 0x36, 0x8e, 0x5e, 0xc7, 0xbc, 0x99, 0x81, 0x07, 0x88, 0x51, 0x45, 0x86, 0x07, + 0x61, 0x4f, 0x23, 0x99, 0xad, 0x0c, 0x9a, 0xf0, 0xed, 0xbf, 0x0d, 0x12, 0x7e, 0xd6, 0x1b, 0xe0, + 0x7c, 0xe7, 0x4a, 0x41, 0x6c, 0xff, 0xf1, 0xf5, 0xe5, 0x7a, 0x5f, 0xe2, 0xc6, 0xda, 0x7f, 0xf8, + 0x5d, 0x6b, 0x33, 0x1e, 0x64, 0x3b, 0xe2, 0xc9, 0x21, 0xe4, 0xc4, 0xd6, 0x01, 0xbd, 0x55, 0x5d, + 0x65, 0xb7, 0x8b, 0x27, 0xbd, 0xfd, 0x96, 0xc9, 0x9f, 0x8e, 0xf2, 0x4c, 0x7c, 0x80, 0x20, 0xae, + 0x68, 0x52, 0xc4, 0x0f, 0xd4, 0x1c, 0x05, 0x4b, 0xfb, 0x63, 0x6d, 0x44, 0x88, 0x73, 0xf7, 0xc1, + 0x19, 0xc0, 0x4f, 0xb4, 0xf8, 0xbd, 0xbc, 0x75, 0xaf, 0x8d, 0xaa, 0x36, 0x8d, 0x80, 0x81, 0x76, + 0x3b, 0x79, 0x2b, 0xf3, 0x78, 0x20, 0xaf, 0x27, 0x7e, 0xca, 0x9b, 0x03, 0xec, 0x0f, 0x6e, 0x26, + 0xa7, 0x97, 0xd0, 0x4d, 0x71, 0xa8, 0xad, 0x31, 0xc9, 0x76, 0x83, 0x5a, 0xb5, 0x79, 0xd1, 0x51, + 0x24, 0x40, 0xf0, 0x3e, 0x44, 0x21, 0xf0, 0x9e, 0x0a, 0x15, 0x41, 0xdd, 0xe1, 0x04, 0x59, 0x20, + 0xc6, 0x44, 0x2b, 0xb0, 0xa9, 0xd1, 0xe0, 0x5a, 0x39, 0xfd, 0xe9, 0xca, 0x45, 0xb3, 0xb8, 0x28, + 0x81, 0xae, 0x8a, 0x6e, 0x17, 0x5c, 0x1d, 0xc0, 0x1d, 0xa0, 0x50, 0x2b, 0x44, 0x4c, 0xc8, 0xf2, + 0x3e, 0x8e, 0x92, 0x5b, 0x5f, 0xf7, 0xc2, 0xd5, 0xdd, 0x81, 0x1f, 0xb6, 0x8f, 0xc1, 0xc1, 0x6e, + 0x18, 0xcb, 0x9c, 0xb9, 0x77, 0xe2, 0x49, 0x35, 0x61, 0x7f, 0xde, 0x1d, 0x69, 0x16, 0xf0, 0xa5, + 0x47, 0x03, 0x4e, 0x7a, 0x86, 0x39, 0x78, 0x97, 0x2a, 0xa8, 0x95, 0x93, 0x83, 0xdd, 0x0a, 0x81, + 0xbf, 0x36, 0xdf, 0xc1, 0x48, 0xb2, 0x9a, 0xdd, 0xfb, 0x3d, 0xf1, 0x15, 0x63, 0x74, 0x97, 0x74, + 0x58, 0xdb, 0x5c, 0x74, 0x0d, 0xd3, 0xc6, 0x99, 0x10, 0x83, 0x2a, 0x2a, 0xeb, 0x47, 0xbf, 0x61, + 0x39, 0x2b, 0x29, 0xe0, 0x9c, 0xed, 0x7e, 0xa1, 0x90, 0x56, 0x38, 0xd7, 0x37, 0x8f, 0xdc, 0xd7, + 0x0b, 0x1e, 0xa3, 0xd3, 0x24, 0xc9, 0xf9, 0x12, 0xfd, 0x31, 0x71, 0xc8, 0x01, 0xf8, 0x97, 0xe5, + 0x6c, 0xb1, 0xe7, 0x84, 0x0a, 0xde, 0x1a, 0xee, 0x39, 0x34, 0x18, 0xdb, 0x62, 0xf9, 0xc0, 0xbe, + 0x51, 0xef, 0x2d, 0x7e, 0xd3, 0x3a, 0xa3, 0xc6, 0x2c, 0x98, 0xb9, 0x11, 0xaa, 0xef, 0x44, 0x20, + 0xc2, 0xb6, 0x95, 0x35, 0x4e, 0xd0, 0x9e, 0x63, 0xa9, 0x83, 0x6b, 0xf0, 0x1b, 0xee, 0xf5, 0x4b, + 0x4c, 0x7d, 0xc2, 0xbf, 0xa9, 0x40, 0x1f, 0x6d, 0x91, 0xb4, 0x57, 0x13, 0x91, 0x23, 0xfd, 0x7d, + 0xea, 0x36, 0x82, 0xbf, 0xac, 0x0e, 0xed, 0xea, 0x5d, 0x39, 0x68, 0x30, 0x37, 0x2c, 0x9a, 0x3b, + 0x41, 0x93, 0x04, 0xb1, 0x39, 0x12, 0xf0, 0xb9, 0x29, 0x7a, 0x1c, 0xec, 0xc3, 0x41, 0x69, 0x4e, + 0xae, 0xc3, 0xec, 0x3b, 0xda, 0x7a, 0x8b, 0x0c, 0xc3, 0x79, 0x3b, 0x66, 0xf2, 0x62, 0x90, 0x4b, + 0x6f, 0x58, 0xef, 0x49, 0x9f, 0x0c, 0x1f, 0xfd, 0x47, 0x3f, 0x52, 0x79, 0x74, 0x95, 0xa8, 0x57, + 0x18, 0x53, 0x8a, 0xb2, 0x1b, 0xce, 0x46, 0x3b, 0xee, 0x72, 0x17, 0x27, 0x2f, 0x48, 0xb4, 0x5e, + 0x0b, 0x97, 0x81, 0x0f, 0xb6, 0xad, 0x7a, 0xe8, 0x31, 0x0c, 0x72, 0xc7, 0x2d, 0x69, 0xab, 0x6f, + 0x57, 0x7f, 0xd6, 0xe3, 0xc9, 0x78, 0x82, 0xff, 0xa7, 0x46, 0x6e, 0x67, 0x30, 0x3f, 0xa8, 0x37, + 0xcd, 0xd1, 0x30, 0x8b, 0x8b, 0xc4, 0x3d, 0xce, 0x43, 0x91, 0xd7, 0x52, 0xb6, 0x02, 0xf7, 0x18, + 0x18, 0xa5, 0xad, 0xcc, 0x50, 0x5d, 0x2f, 0xb6, 0x7e, 0xde, 0x29, 0x9e, 0x0c, 0x09, 0xf2, 0x3e, + 0x9f, 0x92, 0x00, 0x75, 0x36, 0x24, 0xc0, 0x12, 0x6a, 0xe2, 0x99, 0x2b, 0x62, 0x94, 0x42, 0x18, + 0x78, 0x9d, 0x69, 0xd8, 0x21, 0x4a, 0xfe, 0x38, 0x08, 0x77, 0xf6, 0x95, 0xfe, 0x6d, 0x02, 0x92, + 0x12, 0x8e, 0x11, 0x10, 0xc3, 0xc4, 0x77, 0x86, 0x97, 0xd0, 0xce, 0xb9, 0xf4, 0xde, 0x8f, 0xd0, + 0xcd, 0x51, 0xac, 0xed, 0xde, 0x69, 0x06, 0xe6, 0x96, 0x9a, 0x40, 0x67, 0x2e, 0xd4, 0x6f, 0x85, + 0x4c, 0x82, 0x40, 0x5b, 0x71, 0x5f, 0x39, 0x29, 0xfd, 0x38, 0x9b, 0x6e, 0xa5, 0xaf, 0x54, 0xec, + 0x0c, 0xde, 0x51, 0x57, 0x13, 0xc1, 0x7b, 0x90, 0x58, 0x68, 0xd8, 0x02, 0x9d, 0x2d, 0xb4, 0xcf, + 0x53, 0x37, 0x32, 0xd1, 0x10, 0x1d, 0xc1, 0x0e, 0xd9, 0x31, 0x35, 0xec, 0x89, 0x4c, 0x2c, 0x58, + 0x72, 0x45, 0x50, 0xae, 0xc8, 0xfd, 0x5c, 0xf8, 0xfc, 0x3d, 0xce, 0x74, 0x08, 0x72, 0xa6, 0x20, + 0x50, 0x1f, 0x1c, 0x3f, 0x88, 0x72, 0x49, 0x01, 0x66, 0x55, 0x9b, 0x9f, 0xab, 0x6c, 0x3b, 0xbe, + 0x8a, 0xbb, 0x10, 0x1f, 0x70, 0x15, 0xc3, 0xa8, 0x17, 0x1a, 0x63, 0xee, 0x40, 0xe9, 0x69, 0x12, + 0xea, 0x4f, 0x1b, 0x6a, 0x28, 0x9a, 0xbd, 0x0a, 0xff, 0x61, 0x02, 0xdc, 0x8f, 0x1c, 0xbd, 0x9d, + 0xd0, 0x89, 0xa0, 0xc7, 0xee, 0xa4, 0xdd, 0xa1, 0x20, 0x34, 0xa3, 0x1e, 0x51, 0x08, 0x29, 0x8c, + 0xc1, 0xb4, 0x51, 0xdd, 0x4b, 0xd6, 0xdb, 0xa5, 0x75, 0xd0, 0x89, 0x90, 0xc7, 0x9e, 0xed, 0x30, + 0x2e, 0x1d, 0x85, 0x7b, 0xd0, 0x2e, 0x02, 0xad, 0xa5, 0x7b, 0x9a, 0x95, 0xb9, 0x27, 0xe3, 0x81, + 0xd9, 0x44, 0x7b, 0x30, 0xa5, 0xe8, 0x11, 0xa0, 0xbc, 0xbd, 0xc1, 0x07, 0x3a, 0xf1, 0xe8, 0x7a, + 0xb1, 0x4a, 0x97, 0x76, 0x20, 0x1d, 0xea, 0xea, 0x2a, 0x1a, 0x5d, 0x86, 0x96, 0x4a, 0xa9, 0x30, + 0x01, 0x71, 0x62, 0x91, 0xa4, 0x57, 0x76, 0x58, 0xd7, 0x2f, 0xe4, 0xf8, 0xe1, 0x06, 0x8d, 0xec, + 0x6a, 0xd5, 0xae, 0xcd, 0x31, 0xee, 0x35, 0xf6, 0xa2, 0x0d, 0x37, 0xc6, 0x56, 0xe7, 0x0f, 0xf9, + 0x0f, 0x8a, 0xcf, 0xe6, 0x3a, 0x72, 0x0f, 0x5b, 0x61, 0x1d, 0x82, 0x3b, 0x26, 0x96, 0x98, 0xec, + 0xa4, 0x35, 0x41, 0x56, 0x58, 0x60, 0x21, 0x47, 0xda, 0x3d, 0x98, 0x9e, 0x98, 0x1d, 0x6d, 0x76, + 0x80, 0x1d, 0x73, 0x5d, 0x4a, 0x23, 0x08, 0xfd, 0xb2, 0x79, 0x46, 0x85, 0xdc, 0xfe, 0xca, 0xc8, + 0xbb, 0xe5, 0xe2, 0x53, 0x4d, 0x7f, 0xbd, 0xce, 0x67, 0x9d, 0xa5, 0x4e, 0x8f, 0x33, 0x40, 0xeb, + 0xc8, 0x2c, 0x9f, 0x9b, 0x55, 0xf5, 0x04, 0x1d, 0xa1, 0xc5, 0xfb, 0x14, 0x2f, 0x41, 0x06, 0x35, + 0x06, 0x69, 0xeb, 0x0d, 0xe6, 0xf3, 0x54, 0x62, 0xc0, 0x53, 0x2c, 0x66, 0x70, 0x9a, 0x77, 0x56, + 0xf3, 0x80, 0x9c, 0xa7, 0xa2, 0xdb, 0xc6, 0x41, 0x6d, 0x30, 0xae, 0xc1, 0x38, 0x08, 0xec, 0x33, + 0x77, 0x9e, 0x7d, 0xa9, 0x95, 0xdf, 0x47, 0x41, 0x97, 0xb2, 0x31, 0x53, 0x5e, 0x41, 0x1e, 0xc2, + 0xeb, 0x88, 0x05, 0x14, 0xf6, 0x2d, 0xa0, 0x9c, 0x57, 0x09, 0x9f, 0x0e, 0x84, 0x89, 0x5e, 0x07, + 0xa7, 0x6a, 0x53, 0x3a, 0xf5, 0x86, 0x96, 0x0d, 0x4b, 0xa4, 0xae, 0x76, 0x7e, 0x02, 0xe8, 0x0b, + 0x70, 0x09, 0xa9, 0x67, 0xbb, 0xf1, 0x41, 0x1c, 0x13, 0x12, 0xcc, 0x96, 0xb2, 0xa5, 0x68, 0x7e, + 0x5a, 0xe8, 0x82, 0xed, 0x8c, 0xe3, 0x68, 0xab, 0xe3, 0x44, 0x1a, 0xc1, 0xf8, 0xb7, 0xe6, 0xa9, + 0xee, 0xaf, 0xc9, 0xd3, 0xc9, 0x36, 0x12, 0x60, 0xc6, 0x8f, 0xcb, 0xe9, 0xb4, 0xa5, 0x11, 0xed, + 0xda, 0xe4, 0x56, 0x44, 0x33, 0xc1, 0x95, 0x5b, 0x57, 0xc2, 0xd0, 0x1e, 0xa3, 0x4c, 0x9b, 0xa4, + 0x48, 0xe8, 0x9a, 0xab, 0x3e, 0xb1, 0xae, 0x64, 0xcf, 0x04, 0x5a, 0x56, 0x44, 0xb0, 0x10, 0x83, + 0x06, 0xe7, 0xce, 0x22, 0x69, 0xd5, 0x95, 0x09, 0xbf, 0xdf, 0xd7, 0x9b, 0x13, 0x77, 0x3d, 0x50, + 0xc9, 0xd9, 0x6c, 0x50, 0x46, 0x71, 0x32, 0x0f, 0x96, 0x81, 0xbf, 0x84, 0xb0, 0xa1, 0x5c, 0xb7, + 0x54, 0xc7, 0xe5, 0xd9, 0xc6, 0x4f, 0xa1, 0xe1, 0x75, 0xbd, 0xf2, 0xdc, 0x96, 0xc7, 0x79, 0x6a, + 0xd3, 0x89, 0x02, 0x35, 0x94, 0xb6, 0x8f, 0x59, 0x37, 0x83, 0x52, 0xe5, 0x56, 0x2b, 0xa5, 0x74, + 0x89, 0xd7, 0x6c, 0x94, 0xfb, 0xec, 0x96, 0xdb, 0xfe, 0x27, 0xd9, 0x73, 0x5d, 0xa2, 0x95, 0x51, + 0x6d, 0x7d, 0x9e, 0xf9, 0xe3, 0xd3, 0xb9, 0xc1, 0xf5, 0x26, 0x42, 0x88, 0x5a, 0xf5, 0x71, 0x17, + 0x96, 0x93, 0x37, 0x3a, 0x21, 0x15, 0x54, 0x65, 0x86, 0x8f, 0x50, 0x16, 0xdb, 0x05, 0xb0, 0x0d, + 0xa1, 0xfb, 0x7c, 0x70, 0x35, 0xf0, 0x04, 0x8d, 0x8a, 0x1e, 0x46, 0x87, 0xf0, 0x84, 0x10, 0x05, + 0xeb, 0x08, 0x1e, 0x78, 0x04, 0x54, 0x45, 0xc0, 0x26, 0x40, 0x65, 0x00, 0xa3, 0xff, 0x59, 0x7b, + 0xa9, 0xe5, 0x84, 0x6d, 0x29, 0xe0, 0xa3, 0x05, 0xb8, 0x51, 0x6f, 0x54, 0x85, 0x1a, 0xea, 0x69, + 0xb3, 0x78, 0xcc, 0xfd, 0x77, 0xe3, 0xfc, 0xdd, 0x51, 0x4e, 0x9e, 0x92, 0xcd, 0x79, 0x6c, 0x06, + 0x55, 0xe9, 0xf2, 0xc0, 0x04, 0xfa, 0x4c, 0xfa, 0xe7, 0xce, 0xd7, 0x96, 0x5e, 0x42, 0xbc, 0x2b, + 0xa1, 0x76, 0x62, 0xda, 0xb6, 0x2c, 0x6c, 0xae, 0xa4, 0x86, 0x8c, 0x2c, 0x40, 0xa7, 0xbf, 0x3c, + 0x99, 0x2e, 0xfb, 0xfe, 0x60, 0x00, 0x41, 0x06, 0x6d, 0xc1, 0x3e, 0x79, 0x23, 0xbf, 0x9b, 0xd0, + 0x82, 0x44, 0x43, 0x67, 0x7e, 0x05, 0xde, 0x8e, 0x95, 0x01, 0x6a, 0xbd, 0xf5, 0x92, 0xcd, 0x0a, + 0x89, 0x81, 0x58, 0x5e, 0x87, 0xeb, 0x32, 0x4f, 0xde, 0x0c, 0xa3, 0x6b, 0x6f, 0x24, 0x0a, 0x36, + 0x5b, 0x4b, 0xf6, 0xea, 0xac, 0x7b, 0x1a, 0x40, 0x66, 0xf6, 0x5c, 0x6b, 0x85, 0x60, 0xaf, 0x3d, + 0x43, 0xce, 0xbc, 0x7c, 0x4e, 0x73, 0x16, 0xd5, 0xac, 0xfb, 0x91, 0xb5, 0xd8, 0xf6, 0x19, 0xf9, + 0x9b, 0x01, 0x5a, 0x12, 0xdf, 0xb9, 0x2f, 0xe1, 0xda, 0xd8, 0x0e, 0xc5, 0xa5, 0xcb, 0xc5, 0x37, + 0xa6, 0x63, 0x48, 0xe8, 0x25, 0x57, 0xfc, 0xf1, 0x21, 0x70, 0x83, 0x13, 0xde, 0xef, 0xa0, 0xb9, + 0xdd, 0xfc, 0xba, 0x43, 0xb7, 0x7c, 0xaa, 0x6f, 0x85, 0x89, 0xcc, 0x30, 0x1c, 0x3b, 0x7c, 0x62, + 0xd4, 0x8e, 0x08, 0x53, 0x06, 0x58, 0x69, 0xb5, 0xb1, 0x6f, 0x42, 0xc0, 0xfc, 0xf4, 0xab, 0x42, + 0xa2, 0xa7, 0x5a, 0xb8, 0xa3, 0x37, 0x93, 0xa0, 0xba, 0x4c, 0x4d, 0x34, 0x82, 0x1e, 0x6d, 0x7b, + 0x62, 0x30, 0xd1, 0xce, 0xf4, 0x0f, 0xda, 0x57, 0x5b, 0xc0, 0x83, 0x89, 0xd3, 0x13, 0x9f, 0x44, + 0x3a, 0xe5, 0x96, 0x8b, 0x94, 0xd2, 0xb9, 0xb4, 0x4c, 0x47, 0xcf, 0x6a, 0xb0, 0xff, 0xce, 0x0e, + 0xd7, 0x07, 0x78, 0xd6, 0x66, 0x6f, 0x48, 0x15, 0x4e, 0x47, 0xe8, 0xb1, 0x79, 0x8c, 0x0a, 0x03, + 0xa8, 0xe3, 0x0a, 0x81, 0x11, 0x36, 0xe0, 0x76, 0x1d, 0x05, 0xfc, 0x6c, 0xa6, 0x21, 0x7a, 0xb4, + 0x03, 0xd5, 0xee, 0x6a, 0x68, 0x6b, 0x79, 0x1a, 0xde, 0xdc, 0xdf, 0x68, 0x31, 0x56, 0xff, 0x95, + 0xf2, 0xe5, 0xb7, 0xed, 0x79, 0xde, 0x74, 0x2a, 0xf9, 0x68, 0xf9, 0x26, 0x3f, 0xb1, 0x13, 0xa6, + 0xba, 0xe5, 0x4c, 0x01, 0xe9, 0x6c, 0xc3, 0x60, 0xc2, 0xdd, 0x20, 0x5d, 0xd1, 0x54, 0x0b, 0xb6, + 0x0c, 0x86, 0xcf, 0xf2, 0x2f, 0xa8, 0xec, 0x63, 0x67, 0xf9, 0xf0, 0x67, 0x55, 0xef, 0xa5, 0xba, + 0x6e, 0xe4, 0x1b, 0xb1, 0x30, 0xf1, 0xd7, 0x05, 0xd0, 0xe8, 0xf3, 0xa6, 0xd9, 0x31, 0xf8, 0x69, + 0xd9, 0xf0, 0xa0, 0x08, 0xb6, 0x61, 0x77, 0x03, 0x4d, 0x67, 0xd7, 0x93, 0xa1, 0x89, 0xb6, 0xb2, + 0x4e, 0xb5, 0x6e, 0x41, 0xb7, 0x6c, 0xd3, 0x46, 0x81, 0x70, 0xb5, 0xc9, 0x3c, 0x68, 0x17, 0x32, + 0xe2, 0xba, 0xc1, 0x28, 0x03, 0xc1, 0x72, 0x5c, 0x42, 0xca, 0xf7, 0x16, 0x62, 0xfb, 0x57, 0xbb, + 0x33, 0x29, 0x21, 0xe9, 0x34, 0x70, 0x60, 0x4b, 0xea, 0x82, 0x68, 0x52, 0x5f, 0xfe, 0xb3, 0xe6, + 0x05, 0x36, 0xdc, 0x62, 0x20, 0xbf, 0x65, 0xf3, 0xdb, 0x1e, 0xe4, 0x47, 0xac, 0xe2, 0x51, 0xc0, + 0xac, 0xed, 0xc6, 0x73, 0xcb, 0x1d, 0x62, 0x72, 0x9c, 0x15, 0x24, 0xbe, 0x6b, 0xbf, 0x58, 0xc6, + 0x79, 0xc3, 0x3f, 0x2c, 0xeb, 0x86, 0x9f, 0xca, 0x60, 0x6c, 0x93, 0x45, 0x7b, 0xb4, 0xbf, 0xd9, + 0x28, 0xfe, 0xf6, 0x05, 0x9c, 0x99, 0x9a, 0x34, 0x59, 0xd7, 0x15, 0x04, 0x4c, 0x0c, 0x17, 0x68, + 0x5c, 0x86, 0xdd, 0xb1, 0x67, 0xe4, 0x46, 0xe5, 0x47, 0x4e, 0x97, 0x2b, 0x2b, 0x3c, 0x2a, 0x5c, + 0x8e, 0x2d, 0x68, 0x57, 0xcb, 0xa4, 0x62, 0xea, 0x29, 0xfd, 0xf2, 0x08, 0x9c, 0x86, 0xf2, 0x6c, + 0x13, 0x2f, 0xcf, 0x6d, 0xa4, 0xae, 0x6e, 0x2e, 0xc9, 0x21, 0x4a, 0xac, 0xaf, 0x28, 0xa6, 0xd5, + 0xd6, 0xc9, 0x67, 0x22, 0x0f, 0x44, 0xd2, 0x57, 0x6d, 0xc8, 0x9a, 0xcf, 0x6d, 0x80, 0xdb, 0x7b, + 0xb2, 0xdc, 0x7e, 0xb0, 0x68, 0x5a, 0x01, 0xaf, 0xb3, 0x8d, 0xbc, 0x30, 0xeb, 0xfe, 0x7a, 0xd4, + 0xdc, 0xbc, 0x55, 0x5d, 0xdc, 0x92, 0x23, 0x85, 0x1e, 0xfd, 0xb0, 0x01, 0x2d, 0x26, 0x6a, 0xf1, + 0x40, 0x77, 0xcb, 0x9f, 0x0d, 0xef, 0x2d, 0x08, 0x24, 0x93, 0x8f, 0x9c, 0xab, 0x0d, 0xf0, 0xc3, + 0x5b, 0x22, 0x90, 0x51, 0x4f, 0x4f, 0x7c, 0x6c, 0xad, 0xe7, 0x81, 0x8d, 0xb7, 0x03, 0xd1, 0xcb, + 0xd1, 0x36, 0x83, 0xd2, 0xec, 0x41, 0x67, 0x2b, 0x4b, 0xf7, 0xd4, 0x22, 0x68, 0x6a, 0x90, 0x38, + 0x82, 0x01, 0x37, 0x04, 0xf0, 0xf4, 0xfa, 0xdc, 0x24, 0x67, 0xc3, 0xa4, 0x34, 0xdb, 0xb9, 0xf9, + 0x9d, 0x0d, 0xee, 0xc3, 0xb7, 0xc7, 0x07, 0x18, 0xc9, 0x17, 0xa7, 0xbb, 0x99, 0x88, 0xb4, 0xeb, + 0x87, 0x75, 0xb7, 0xab, 0xc8, 0x4a, 0xf8, 0x92, 0x5b, 0xd4, 0x57, 0xdc, 0xa3, 0x15, 0xf5, 0x4a, + 0x1c, 0x72, 0x5d, 0x58, 0xf9, 0xcc, 0xde, 0xca, 0xca, 0x3a, 0x7f, 0x85, 0x3d, 0x70, 0x8b, 0xce, + 0xe1, 0xc2, 0xaa, 0xd8, 0x2e, 0x5c, 0x5a, 0x8e, 0x2b, 0x2f, 0xaa, 0x77, 0x24, 0xcd, 0x01, 0x15, + 0x88, 0x59, 0x07, 0x4d, 0xa1, 0xc3, 0x72, 0x00, 0x0d, 0x24, 0x42, 0xca, 0xb5, 0x5d, 0xb2, 0x54, + 0x2f, 0xec, 0xaa, 0xd6, 0x64, 0xce, 0x36, 0x2b, 0x29, 0xf7, 0xe6, 0xb9, 0x6e, 0x12, 0xb1, 0x73, + 0x57, 0x6c, 0xfe, 0xba, 0xfb, 0x38, 0x64, 0x10, 0x18, 0x64, 0x43, 0x91, 0x28, 0xca, 0xd5, 0xee, + 0xac, 0x8a, 0x7b, 0x1f, 0x69, 0x0a, 0x6b, 0x79, 0xd3, 0x1b, 0xe9, 0xd8, 0xf5, 0x08, 0x57, 0xd7, + 0xd2, 0xc7, 0xba, 0x15, 0x7a, 0x94, 0xda, 0x4d, 0x8c, 0x61, 0xbe, 0xcb, 0xf2, 0x83, 0xb9, 0xbd, + 0x00, 0xe1, 0xbc, 0xc8, 0xf4, 0xd6, 0x73, 0xb5, 0xfb, 0x16, 0xa5, 0x1e, 0x91, 0x60, 0x29, 0x08, + 0x08, 0xdb, 0xbf, 0x66, 0xa8, 0x14, 0xcf, 0x1d, 0x21, 0xf0, 0xa5, 0xc4, 0x53, 0x20, 0x5e, 0xec, + 0x1f, 0x2e, 0x6d, 0xe3, 0x15, 0x26, 0x18, 0x39, 0x31, 0x89, 0xea, 0x24, 0x26, 0x86, 0x71, 0x8d, + 0xcb, 0x1d, 0x12, 0xfa, 0xcb, 0xe3, 0xc4, 0xe8, 0x5f, 0xa8, 0x85, 0x13, 0x32, 0xa2, 0x30, 0xee, + 0x17, 0x42, 0x44, 0x50, 0xa2, 0x73, 0x5a, 0xa6, 0xa5, 0x18, 0xef, 0xab, 0x4d, 0x8b, 0xf7, 0x23, + 0xdf, 0xd6, 0x9b, 0x39, 0x17, 0x5a, 0x77, 0xbb, 0x53, 0x25, 0xe3, 0xf9, 0xb3, 0x84, 0x80, 0xa4, + 0x5c, 0xe5, 0xb6, 0x72, 0x33, 0x37, 0xc3, 0x42, 0x7f, 0x44, 0x38, 0x02, 0x82, 0xe5, 0xab, 0xdc, + 0x06, 0xa3, 0x28, 0xd1, 0x69, 0x71, 0x76, 0x0e, 0x6e, 0xc8, 0x6c, 0x22, 0x1d, 0x64, 0x15, 0xf6, + 0xb7, 0xc9, 0x36, 0xdc, 0x0d, 0x47, 0x80, 0xc1, 0x7a, 0x63, 0x0c, 0xff, 0x6e, 0xed, 0xfa, 0xe6, + 0xc6, 0xd2, 0x96, 0x64, 0xb8, 0xa8, 0xf2, 0x32, 0x95, 0x74, 0x9b, 0x39, 0x30, 0xe3, 0x8e, 0xbb, + 0x6d, 0xac, 0x66, 0x54, 0x8c, 0x63, 0x62, 0xa3, 0xa1, 0x03, 0x50, 0x52, 0xef, 0xa0, 0x2f, 0xea, + 0x7e, 0xd1, 0x1f, 0x71, 0xb9, 0x44, 0x18, 0x83, 0xcd, 0xea, 0xcf, 0xef, 0x88, 0x6f, 0x17, 0xef, + 0x94, 0x90, 0x7d, 0x59, 0x01, 0xfb, 0xe2, 0xaa, 0xff, 0x98, 0x63, 0x4c, 0xfa, 0xbe, 0x97, 0x65, + 0xf3, 0x7b, 0x40, 0xe2, 0xad, 0xe4, 0x86, 0x15, 0x6d, 0x30, 0xe5, 0xe1, 0x46, 0xc3, 0xa9, 0x8e, + 0x81, 0x3b, 0x69, 0x78, 0x40, 0xc0, 0x07, 0x54, 0x56, 0x6c, 0x36, 0xc2, 0xdf, 0xc8, 0x49, 0x54, + 0x8a, 0x63, 0x1d, 0x03, 0x79, 0x46, 0x41, 0xa3, 0xbf, 0x98, 0x4a, 0x9a, 0x6f, 0xe9, 0xf4, 0xc0, + 0xa5, 0x0b, 0xc4, 0x88, 0x31, 0x06, 0x8e, 0xc3, 0xea, 0x0d, 0x97, 0xfc, 0xf7, 0xfb, 0xcf, 0xd6, + 0xc1, 0x04, 0x23, 0x4c, 0xbd, 0x15, 0xe8, 0x2e, 0x43, 0x04, 0x35, 0xf4, 0xb3, 0xd6, 0xe9, 0xd3, + 0x91, 0x56, 0x7f, 0x7a, 0x4b, 0xea, 0xe0, 0x2a, 0x38, 0xcc, 0x2d, 0x11, 0x85, 0xc4, 0xe5, 0x5d, + 0x39, 0x8c, 0xe7, 0x21, 0x26, 0x70, 0x79, 0x2a, 0x45, 0x88, 0xd7, 0x87, 0xd1, 0xaa, 0xb0, 0xc6, + 0x84, 0x86, 0x2c, 0x55, 0xbc, 0x02, 0x32, 0x9a, 0x84, 0x14, 0xa4, 0xac, 0x38, 0x9d, 0x98, 0xf3, + 0xdd, 0x2c, 0x37, 0xdf, 0xa1, 0xdb, 0x4f, 0xa4, 0x93, 0x0a, 0x2b, 0x60, 0xdd, 0x10, 0x3a, 0x8d, + 0xaf, 0x8c, 0x2b, 0x9f, 0x20, 0xe5, 0xc4, 0x23, 0xc4, 0xce, 0x50, 0xde, 0xe0, 0x28, 0x96, 0x49, + 0x45, 0xbe, 0xc6, 0x43, 0xdb, 0x13, 0xb1, 0x21, 0x92, 0x8a, 0x60, 0xda, 0x98, 0xb7, 0x75, 0xea, + 0xcb, 0xaa, 0x64, 0x41, 0xac, 0x54, 0x44, 0xd9, 0x19, 0xae, 0xc1, 0x74, 0x68, 0xd8, 0x3e, 0xa5, + 0x01, 0x34, 0x84, 0x40, 0x5f, 0x53, 0x73, 0xe9, 0x77, 0x96, 0xe8, 0x8f, 0x36, 0x09, 0x75, 0xc3, + 0x2c, 0xdf, 0x64, 0x54, 0xe7, 0xb0, 0xb9, 0xe9, 0x79, 0x12, 0x8d, 0x06, 0x3d, 0x13, 0xc3, 0x25, + 0x22, 0xc0, 0x6a, 0x71, 0x75, 0x46, 0xac, 0xdb, 0x6f, 0x7a, 0xcf, 0xf0, 0x9b, 0x75, 0x4c, 0x3b, + 0xec, 0x95, 0x0d, 0x78, 0x43, 0xa3, 0xba, 0x59, 0xd0, 0x1b, 0x2a, 0x03, 0x47, 0xcf, 0xf2, 0xae, + 0xf6, 0x80, 0xa3, 0x8e, 0x83, 0xbe, 0xff, 0x2a, 0x36, 0x72, 0xef, 0xe8, 0x13, 0x24, 0xe5, 0x3d, + 0x8b, 0xe6, 0x84, 0x89, 0x58, 0xa5, 0x66, 0x6d, 0x0a, 0xff, 0xa8, 0xd5, 0xcc, 0x1f, 0xb1, 0xb1, + 0x04, 0x44, 0x95, 0x22, 0x82, 0xec, 0xa9, 0x16, 0x2b, 0xe7, 0x55, 0x9b, 0x7b, 0xb0, 0x79, 0x1c, + 0x9d, 0x1c, 0xb0, 0x66, 0x38, 0x55, 0x97, 0x21, 0x92, 0x5d, 0x27, 0xd2, 0xbf, 0x68, 0x9e, 0x9f, + 0x00, 0x70, 0x6b, 0x2c, 0xf4, 0xa7, 0xfa, 0x68, 0x31, 0x14, 0x08, 0xaa, 0xeb, 0xec, 0xbc, 0x9e, + 0xa4, 0xfe, 0x14, 0xb3, 0x12, 0x58, 0xec, 0xcb, 0xa4, 0x80, 0x36, 0x26, 0xac, 0xf9, 0xc2, 0xd3, + 0x7c, 0x47, 0x27, 0x3b, 0x67, 0x67, 0x0d, 0xeb, 0x14, 0xfa, 0xc0, 0xde, 0x22, 0xbc, 0x51, 0xfd, + 0x0e, 0xcd, 0xdf, 0x5d, 0x98, 0x05, 0x19, 0x72, 0xc3, 0x1f, 0xc4, 0x50, 0x63, 0x03, 0xd4, 0xd3, + 0xd6, 0x77, 0xbf, 0xab, 0xa5, 0x9d, 0xb8, 0x2e, 0xdf, 0x9c, 0x76, 0x75, 0xe0, 0xd5, 0xc8, 0x4b, + 0x4b, 0x3e, 0x31, 0xc3, 0xd0, 0xc3, 0xf8, 0x1c, 0x3e, 0x40, 0xd8, 0x18, 0xd0, 0x92, 0x55, 0x42, + 0xf4, 0x04, 0x58, 0x06, 0x46, 0xfe, 0xc7, 0x3f, 0xdb, 0xe5, 0x2d, 0x48, 0x84, 0x45, 0xe3, 0x91, + 0x29, 0x82, 0x30, 0xdf, 0x5c, 0x07, 0x46, 0xd8, 0x87, 0x5d, 0xa1, 0x36, 0xab, 0x82, 0x18, 0xb6, + 0xb5, 0xd2, 0xd8, 0x8c, 0x8f, 0xd9, 0xe3, 0xaf, 0xc8, 0xf3, 0x61, 0xa5, 0x40, 0x4b, 0x86, 0x1b, + 0xed, 0xc4, 0x39, 0x90, 0x37, 0x41, 0xb4, 0x5c, 0x4a, 0xed, 0x92, 0x65, 0xc9, 0xe9, 0x09, 0x07, + 0xb2, 0xc1, 0x9d, 0x84, 0xc1, 0x0f, 0x93, 0x30, 0x77, 0x50, 0xf9, 0x04, 0xad, 0x27, 0xb3, 0x55, + 0xc8, 0xe3, 0x12, 0x82, 0x97, 0x2a, 0x65, 0xde, 0xe2, 0xd4, 0x68, 0xfa, 0x93, 0x33, 0x3c, 0xf2, + 0xf0, 0xe8, 0x1c, 0x55, 0xcd, 0xdd, 0x14, 0x5c, 0x3b, 0xb9, 0x39, 0x8e, 0x60, 0x31, 0xec, 0x22, + 0x94, 0xa2, 0xae, 0xcc, 0x65, 0x29, 0x85, 0xca, 0xcd, 0x22, 0x2c, 0xd0, 0xf0, 0x34, 0x88, 0x2b, + 0x29, 0xb2, 0x70, 0x22, 0x18, 0xe9, 0x24, 0x15, 0x2f, 0x61, 0x5f, 0xfc, 0xc4, 0xc4, 0x46, 0xb7, + 0x4d, 0x5a, 0x63, 0x40, 0x77, 0x50, 0x55, 0xcc, 0x61, 0xcc, 0xa3, 0xe3, 0x07, 0xd9, 0xb9, 0x89, + 0xfe, 0x52, 0xde, 0xe7, 0xbf, 0x10, 0xa8, 0x8f, 0xbd, 0xb1, 0x61, 0x36, 0xd6, 0x70, 0x18, 0x3a, + 0xe7, 0xfd, 0x21, 0xd9, 0x85, 0x8c, 0x64, 0xd1, 0x64, 0x13, 0x6c, 0x4f, 0x63, 0x5f, 0x41, 0xf2, + 0xcb, 0x75, 0x51, 0xad, 0x66, 0x88, 0x0e, 0xeb, 0xb9, 0xa3, 0x7c, 0x65, 0xa0, 0xf2, 0x9c, 0x4e, + 0xbc, 0xcf, 0x4b, 0x87, 0xc0, 0x91, 0xe4, 0xbb, 0x19, 0xed, 0xe9, 0xc2, 0x43, 0x9f, 0x47, 0x8e, + 0xfa, 0xfc, 0x07, 0xf4, 0x1a, 0xa4, 0x00, 0xaa, 0xf8, 0x43, 0x31, 0xa2, 0x43, 0x33, 0xb7, 0x0c, + 0xe6, 0x5c, 0x6b, 0xa7, 0x1b, 0xb6, 0xfc, 0x2c, 0x4b, 0x3d, 0x24, 0x2c, 0x73, 0xec, 0x08, 0x62, + 0x04, 0xec, 0xb0, 0x49, 0x5d, 0xed, 0xe2, 0xad, 0x2f, 0x23, 0xf6, 0xe6, 0x23, 0xc1, 0x00, 0x01, + 0xb8, 0x24, 0xce, 0xb3, 0xe4, 0x63, 0x0c, 0x0c, 0xbc, 0x48, 0x86, 0x1a, 0xb1, 0xba, 0x73, 0x09, + 0x3f, 0x81, 0x09, 0xaa, 0x54, 0xe6, 0x9d, 0x3d, 0x4a, 0x95, 0xc6, 0x1a, 0x6f, 0x8f, 0x9b, 0xf0, + 0x4d, 0x5f, 0x2b, 0xce, 0x8c, 0xbc, 0x13, 0x47, 0x67, 0xfe, 0x92, 0xb4, 0xe5, 0x2a, 0xd6, 0x4e, + 0x5f, 0xb2, 0xe5, 0x76, 0xbd, 0x4d, 0x6a, 0x99, 0x04, 0x10, 0x5c, 0x2f, 0xd0, 0x46, 0x58, 0x36, + 0x80, 0x15, 0xe4, 0x93, 0x2e, 0xf2, 0xb1, 0x74, 0xc6, 0x4e, 0xa8, 0x1e, 0xd6, 0x8c, 0x02, 0x05, + 0x61, 0x11, 0x98, 0x75, 0x9a, 0x52, 0x91, 0xc8, 0x76, 0x80, 0xd2, 0x27, 0x0c, 0xda, 0x23, 0xc1, + 0xfd, 0x3c, 0x0f, 0x66, 0x7e, 0x9c, 0xd8, 0x32, 0xc4, 0x97, 0x57, 0x59, 0x99, 0x1a, 0x38, 0xe8, + 0x99, 0xb6, 0xbf, 0xad, 0xd8, 0xc6, 0xee, 0xdf, 0x06, 0x4b, 0xb5, 0xb5, 0xfb, 0x5c, 0x09, 0x91, + 0x2f, 0xc7, 0x1f, 0xae, 0x0b, 0xdb, 0x47, 0x19, 0x6e, 0x04, 0x75, 0x4f, 0xe5, 0x42, 0xdf, 0xaf, + 0xf8, 0x95, 0x0a, 0x38, 0xdc, 0xec, 0x79, 0x56, 0x76, 0x29, 0xef, 0x50, 0x60, 0x8e, 0xb2, 0x9e, + 0xdc, 0x26, 0x55, 0x9b, 0x0b, 0x6e, 0xb1, 0x9f, 0xb5, 0xa0, 0x51, 0xe1, 0xa7, 0x86, 0x98, 0xe5, + 0xca, 0x71, 0x7b, 0xec, 0x7f, 0xca, 0xc5, 0x6a, 0xc6, 0xcf, 0xa8, 0x12, 0xad, 0x79, 0x7f, 0x53, + 0xbc, 0x07, 0x5d, 0x69, 0xc5, 0x66, 0xb5, 0x15, 0xaf, 0x9d, 0x90, 0xa4, 0xdc, 0xfe, 0x58, 0x44, + 0x33, 0x34, 0x4b, 0x2c, 0x8e, 0x12, 0xa9, 0x85, 0xf3, 0x6d, 0x77, 0x04, 0x0e, 0x0d, 0x65, 0xb1, + 0xf8, 0x4d, 0xce, 0xcb, 0xde, 0x51, 0x40, 0x29, 0x69, 0x21, 0x6c, 0x79, 0x9e, 0xa9, 0x98, 0x3a, + 0xea, 0x21, 0x47, 0xed, 0x64, 0x96, 0xf2, 0x5b, 0x98, 0x75, 0x21, 0xf4, 0x47, 0x8e, 0x0b, 0x43, + 0x43, 0x66, 0xf9, 0x4c, 0x28, 0x19, 0x71, 0xce, 0x98, 0xf7, 0x91, 0xb6, 0x51, 0x04, 0x97, 0x4b, + 0xb3, 0x7f, 0x7f, 0xd3, 0x12, 0xa8, 0xe3, 0xa4, 0x57, 0xbb, 0x1c, 0xbc, 0x60, 0x3d, 0x7f, 0xa6, + 0xd0, 0x17, 0xb9, 0x7b, 0x94, 0x66, 0x17, 0xdc, 0x9c, 0x45, 0xf5, 0x45, 0xc1, 0xcb, 0xf1, 0xbf, + 0x3d, 0x50, 0xd1, 0x2d, 0x7e, 0x3e, 0x74, 0x43, 0x72, 0xc8, 0xb7, 0x84, 0x03, 0xc7, 0x28, 0x23, + 0xc4, 0x7d, 0xa1, 0xc6, 0x7e, 0x0a, 0x38, 0x66, 0xb9, 0x60, 0x69, 0x6a, 0xee, 0x1b, 0x83, 0x5d, + 0xa6, 0x40, 0x68, 0x71, 0x27, 0x3d, 0x98, 0x39, 0xdf, 0x5f, 0xd5, 0xad, 0x55, 0x48, 0x4e, 0x0c, + 0x26, 0xb5, 0x71, 0xe9, 0x94, 0xc9, 0xef, 0x57, 0x27, 0xae, 0x77, 0xa8, 0x2e, 0xf0, 0x79, 0x91, + 0xbf, 0xc1, 0x55, 0x04, 0xee, 0xa1, 0xa7, 0x68, 0x9d, 0x65, 0xaf, 0xf8, 0x1b, 0x89, 0xb8, 0xf9, + 0x61, 0x0f, 0x5e, 0x36, 0x29, 0x4a, 0x63, 0x41, 0xd2, 0x10, 0x69, 0x22, 0x45, 0x4b, 0x4c, 0x05, + 0xbe, 0x51, 0x1c, 0x5b, 0x7e, 0xff, 0xe2, 0xbd, 0xd7, 0x67, 0x1e, 0xb2, 0x3f, 0x5f, 0x23, 0x10, + 0x25, 0x86, 0xb4, 0x04, 0x81, 0x15, 0x45, 0x81, 0x8d, 0xe7, 0x75, 0xc6, 0x29, 0x13, 0x9f, 0xbc, + 0x03, 0x58, 0x43, 0xcf, 0x2a, 0xa1, 0xdd, 0xc2, 0xb5, 0x0f, 0xb2, 0xf6, 0xed, 0x59, 0x57, 0xe4, + 0xa3, 0xaf, 0x6b, 0xb5, 0xf0, 0x7f, 0x8b, 0xf3, 0x6a, 0x81, 0x69, 0xde, 0x8d, 0xaf, 0xdf, 0x49, + 0xf3, 0x91, 0x9c, 0x57, 0xc2, 0x3f, 0xf6, 0xea, 0x39, 0x11, 0xba, 0x15, 0x72, 0x10, 0xd4, 0x57, + 0x8e, 0xc4, 0x4e, 0xba, 0x15, 0x33, 0xbc, 0xf9, 0xff, 0x7a, 0x9e, 0x84, 0x3e, 0xba, 0x16, 0xcc, + 0x85, 0x23, 0x63, 0x55, 0xf3, 0xd7, 0x45, 0xaa, 0xcc, 0x7c, 0x1a, 0xd9, 0xb7, 0xe7, 0x05, 0xf1, + 0x7f, 0xe3, 0xb3, 0xa9, 0x48, 0xbb, 0x1b, 0x1f, 0x65, 0x39, 0x88, 0xc2, 0x08, 0xdc, 0xbf, 0xa7, + 0x2f, 0xdd, 0xb6, 0x5a, 0x97, 0xff, 0x65, 0x63, 0x29, 0xe3, 0xdb, 0xb3, 0xdb, 0x7e, 0x8a, 0xc1, + 0x8e, 0x9a, 0xb3, 0x7f, 0xec, 0x27, 0xf4, 0x91, 0x1c, 0xb6, 0x94, 0xf1, 0x37, 0x15, 0x9b, 0xa4, + 0xdd, 0x0b, 0xe1, 0x06, 0x9c, 0xc2, 0xbc, 0x24, 0xe0, 0x21, 0xb1, 0x3f, 0x10, 0xc3, 0xe5, 0xc2, + 0x59, 0xe0, 0xfb, 0xee, 0xee, 0xdc, 0x97, 0x75, 0xff, 0xe8, 0xcd, 0xa7, 0x5c, 0x83, 0xc4, 0xe8, + 0x86, 0x47, 0x08, 0xec, 0x08, 0x25, 0x72, 0x83, 0x7e, 0x57, 0x6d, 0x62, 0x53, 0x32, 0x59, 0x7a, + 0x39, 0xda, 0xd6, 0x31, 0xb2, 0xc2, 0xf6, 0x51, 0xa1, 0x12, 0xbe, 0x84, 0x34, 0x09, 0x67, 0x09, + 0x65, 0x03, 0x05, 0x76, 0x2c, 0xa4, 0x57, 0x3c, 0xb6, 0x91, 0x5b, 0xe2, 0x30, 0x19, 0x2d, 0xcf, + 0x00, 0x35, 0x0a, 0x17, 0xce, 0x48, 0x75, 0x66, 0x7f, 0xb9, 0x56, 0xb3, 0x4a, 0xc3, 0x5c, 0xf4, + 0x06, 0xfd, 0xd3, 0xb6, 0x60, 0xe9, 0x35, 0xec, 0x70, 0x97, 0x8c, 0xb5, 0x63, 0xda, 0x07, 0x6d, + 0xa8, 0xf7, 0x4b, 0x2a, 0x1a, 0xdb, 0x5f, 0x36, 0x4e, 0x36, 0x49, 0x09, 0xd7, 0xb2, 0x20, 0x89, + 0xd5, 0x42, 0xb0, 0x9a, 0x17, 0x45, 0xe8, 0x6f, 0x9a, 0xe8, 0x3a, 0xbc, 0xcb, 0x12, 0xf4, 0x5c, + 0xfa, 0xc1, 0x28, 0xe7, 0xce, 0x0f, 0x82, 0xef, 0x33, 0x8e, 0x5e, 0x37, 0xd0, 0xe3, 0x3f, 0x72, + 0xe6, 0xae, 0xad, 0xd9, 0x1b, 0x56, 0x5f, 0x69, 0xd4, 0xb8, 0x13, 0xe5, 0xcb, 0x98, 0xec, 0xfa, + 0x49, 0x38, 0xc3, 0x5a, 0xd1, 0xef, 0x13, 0x9d, 0x03, 0xda, 0x90, 0xf1, 0x8c, 0x2e, 0x0c, 0x5a, + 0x05, 0xa0, 0xcf, 0xab, 0x64, 0xe1, 0xaa, 0x73, 0x1b, 0x57, 0x1a, 0x27, 0x6b, 0xc2, 0xc6, 0xbf, + 0xa8, 0x25, 0x01, 0xaf, 0x59, 0x2d, 0x5d, 0xe2, 0xdb, 0xc3, 0xee, 0xf6, 0x8e, 0xd9, 0xf1, 0xfb, + 0xf9, 0x8e, 0x26, 0x8d, 0x41, 0x05, 0x5b, 0xd0, 0xb8, 0x2a, 0x27, 0x21, 0x5c, 0x65, 0xc6, 0x44, + 0x75, 0xd1, 0x81, 0x47, 0xba, 0x6e, 0x63, 0xfc, 0x45, 0x60, 0x33, 0x11, 0x4f, 0x65, 0xbe, 0xc9, + 0x86, 0x74, 0x78, 0xd1, 0xb3, 0xf9, 0x57, 0xb5, 0xdc, 0x2c, 0x5a, 0x67, 0x5f, 0x91, 0x7d, 0x60, + 0x12, 0x1e, 0x41, 0x22, 0x84, 0x2e, 0x78, 0xe9, 0xba, 0xba, 0xd8, 0x73, 0x3a, 0xd0, 0x60, 0x70, + 0xc1, 0x3d, 0x1a, 0x2d, 0x04, 0x4f, 0xff, 0x6b, 0x57, 0x89, 0x3b, 0xcc, 0x55, 0x1a, 0xd8, 0xd9, + 0x2b, 0x47, 0x51, 0xdb, 0xe4, 0x16, 0xd8, 0x10, 0xce, 0xee, 0x90, 0x55, 0x8e, 0xe8, 0xf1, 0xeb, + 0x56, 0x79, 0xe1, 0xe9, 0xc4, 0xea, 0x12, 0xcd, 0xf7, 0x65, 0xa9, 0x2d, 0x00, 0x40, 0x0e, 0x2e, + 0xd2, 0x1c, 0xa2, 0xc0, 0x4e, 0xe3, 0x66, 0x16, 0x1a, 0x49, 0x62, 0x55, 0xa4, 0x5b, 0x24, 0x5d, + 0x8c, 0x75, 0xba, 0x65, 0xf2, 0x2a, 0x85, 0x3c, 0xe2, 0x26, 0xfb, 0x6f, 0x3f, 0x07, 0xbf, 0xf9, + 0x91, 0x3c, 0x0d, 0x28, 0xd3, 0x23, 0x1d, 0x9d, 0x30, 0x9e, 0xe6, 0x76, 0xec, 0x63, 0x21, 0x20, + 0x80, 0x9b, 0xa2, 0x69, 0x48, 0xf3, 0xa9, 0xc1, 0x89, 0x06, 0x9a, 0x65, 0xf6, 0x62, 0x26, 0x24, + 0xbb, 0x10, 0x6b, 0x76, 0xce, 0x79, 0x23, 0xfc, 0x79, 0x34, 0x18, 0xa4, 0x08, 0xf0, 0x94, 0x2d, + 0x00, 0x85, 0x4f, 0x82, 0xb8, 0x3e, 0x69, 0x49, 0xbf, 0x62, 0x22, 0xfd, 0x67, 0xc4, 0xb8, 0x46, + 0xa5, 0xd3, 0xa6, 0xc8, 0x70, 0x39, 0xca, 0x3c, 0xe7, 0xee, 0x02, 0x24, 0x64, 0x80, 0xe0, 0xb6, + 0xda, 0xa7, 0x28, 0xcb, 0x53, 0x96, 0x21, 0x00, 0x30, 0x8a, 0xea, 0x2c, 0xc1, 0xeb, 0x12, 0x28, + 0xb5, 0x72, 0x98, 0x3a, 0x3b, 0xdd, 0x3c, 0x53, 0xc2, 0xb4, 0x67, 0xf9, 0x59, 0x94, 0x7c, 0xc5, + 0x54, 0x3d, 0x9f, 0xb0, 0x6c, 0x54, 0x89, 0xfe, 0xc4, 0x05, 0x14, 0x6a, 0x7f, 0x7d, 0x2c, 0x20, + 0x90, 0xdd, 0x17, 0x19, 0xb6, 0xc1, 0x2f, 0x12, 0x22, 0x59, 0x34, 0xb4, 0x7a, 0x75, 0xc3, 0x3b, + 0x70, 0xc7, 0xdd, 0xf2, 0x59, 0xca, 0x64, 0xb3, 0xcb, 0xd1, 0x43, 0xb1, 0x3a, 0x67, 0x0d, 0xb9, + 0x7b, 0x66, 0x33, 0xd7, 0x8a, 0xd9, 0x0b, 0x4e, 0x49, 0x6b, 0x40, 0x15, 0x03, 0x4b, 0x7d, 0x95, + 0xf1, 0xfa, 0x47, 0x5f, 0x2e, 0x8c, 0xea, 0x5e, 0xe9, 0x23, 0x71, 0x1a, 0xf0, 0x99, 0x82, 0x71, + 0x98, 0x2f, 0x10, 0xe4, 0x44, 0x6c, 0xa3, 0x15, 0x80, 0x53, 0x34, 0x77, 0x49, 0x24, 0x30, 0xc2, + 0x9a, 0x8b, 0x6b, 0x5f, 0x35, 0x38, 0xc9, 0x28, 0xfc, 0x37, 0x15, 0x91, 0x04, 0xec, 0xa2, 0x6a, + 0x80, 0x9d, 0x34, 0x75, 0x7f, 0xc6, 0x99, 0xe7, 0x9f, 0x3c, 0x06, 0xd0, 0x57, 0x81, 0x6e, 0xf8, + 0x02, 0x3a, 0xea, 0xf7, 0x50, 0x67, 0xec, 0x14, 0x98, 0xdf, 0xc3, 0x4f, 0x0e, 0xd7, 0x5e, 0x25, + 0x89, 0xa9, 0xa9, 0x67, 0x92, 0x63, 0x2c, 0x5c, 0x9f, 0x5a, 0xf5, 0xf6, 0x49, 0xe2, 0x6b, 0x3e, + 0xa5, 0xf9, 0x48, 0x4c, 0xb5, 0x54, 0x49, 0x69, 0x61, 0x11, 0xa2, 0x56, 0x9d, 0x6e, 0x06, 0xd9, + 0xd3, 0x69, 0x78, 0x98, 0x1a, 0x08, 0xd1, 0xd0, 0xe1, 0x74, 0xb2, 0xe4, 0xcd, 0x1a, 0xc5, 0xe1, + 0x01, 0x79, 0x62, 0x26, 0xb3, 0x1d, 0x56, 0xac, 0x33, 0xed, 0x56, 0xf7, 0xd1, 0xa6, 0x54, 0xc6, + 0x92, 0x31, 0x60, 0x85, 0x45, 0xea, 0x12, 0xd5, 0x01, 0x48, 0x49, 0x5a, 0x30, 0x6f, 0x89, 0x5a, + 0xd3, 0x04, 0xaf, 0xe7, 0x7d, 0x6d, 0x50, 0x92, 0x94, 0x30, 0x54, 0x5b, 0x6e, 0x9d, 0xc3, 0x96, + 0x55, 0xd2, 0xa4, 0x92, 0xe6, 0xb3, 0x6c, 0x1b, 0x48, 0x88, 0x98, 0x99, 0x52, 0x31, 0xa9, 0x20, + 0xb5, 0x94, 0xae, 0x6e, 0x83, 0x6b, 0xc6, 0x06, 0x47, 0x5b, 0x22, 0xc2, 0x25, 0xde, 0x65, 0xa8, + 0x05, 0x71, 0x12, 0x4a, 0x98, 0x07, 0x2d, 0x12, 0xe5, 0xbb, 0xda, 0x4c, 0x0d, 0xc2, 0xcb, 0x6b, + 0x10, 0xd1, 0x85, 0xb6, 0xb0, 0xac, 0x70, 0x82, 0x2e, 0xc7, 0x93, 0xa2, 0x86, 0x38, 0x19, 0xff, + 0x77, 0x04, 0x85, 0xed, 0x7c, 0x2b, 0xd9, 0x88, 0x6c, 0x37, 0x89, 0x45, 0x79, 0x34, 0x65, 0x7d, + 0xe9, 0x59, 0xcf, 0xba, 0xf1, 0x3d, 0x66, 0xa9, 0xea, 0x57, 0x0c, 0x7f, 0xc4, 0xe1, 0xb4, 0x4e, + 0xac, 0xbc, 0x4d, 0xc9, 0x31, 0x80, 0x34, 0x07, 0xc7, 0x8c, 0x3f, 0x3c, 0xe0, 0x54, 0x0c, 0x24, + 0xac, 0x34, 0xad, 0xd4, 0x65, 0x93, 0xa0, 0xf0, 0x34, 0x40, 0x2d, 0x1f, 0x23, 0x74, 0xfc, 0x7c, + 0x6e, 0x8c, 0xdf, 0xa7, 0x84, 0xda, 0x7b, 0xa6, 0x36, 0x79, 0xce, 0xc7, 0xe0, 0xc6, 0xbe, 0xf8, + 0x16, 0x4b, 0x9b, 0x3a, 0xae, 0xab, 0x3a, 0x8f, 0xaf, 0xa3, 0x47, 0x1a, 0x37, 0x84, 0x02, 0x22, + 0x6b, 0xce, 0x8f, 0x7d, 0x83, 0x68, 0xe6, 0x6c, 0xcd, 0x11, 0x19, 0x05, 0x49, 0x2d, 0x47, 0xe5, + 0xd4, 0xbf, 0xa0, 0xd7, 0xf7, 0x40, 0xe7, 0x18, 0xb7, 0xb1, 0x2c, 0x58, 0xf8, 0x5b, 0x72, 0xeb, + 0x44, 0x3c, 0xa9, 0xcd, 0x4c, 0x31, 0x82, 0xec, 0x74, 0xbf, 0xc2, 0x76, 0xbc, 0x79, 0x31, 0xf0, + 0xa0, 0x35, 0x18, 0x2b, 0x3d, 0x91, 0x81, 0xcd, 0x88, 0x35, 0xbb, 0xbd, 0xe8, 0x34, 0xca, 0xf2, + 0x1f, 0xfd, 0xbb, 0x2d, 0xd4, 0x94, 0xb0, 0xc1, 0x2c, 0xe4, 0xdd, 0x0e, 0xbf, 0x0f, 0xfe, 0x67, + 0x21, 0x00, 0x98, 0x67, 0xbe, 0x35, 0xf0, 0xf2, 0xd1, 0x4a, 0x39, 0x99, 0x51, 0x50, 0x2f, 0xce, + 0xab, 0xea, 0xf4, 0x96, 0x26, 0x49, 0x61, 0x35, 0x5e, 0xd4, 0xbf, 0x27, 0x4c, 0x76, 0x15, 0x0a, + 0x80, 0xf3, 0x8b, 0xa8, 0x3e, 0xd3, 0x59, 0xcc, 0x3d, 0x7b, 0xd2, 0x47, 0x7c, 0x0c, 0x57, 0x89, + 0x31, 0x10, 0x57, 0x3a, 0x45, 0xb3, 0xb1, 0x67, 0xa0, 0x1c, 0x83, 0x45, 0x49, 0x05, 0x30, 0x8d, + 0xe2, 0x24, 0xfd, 0xa6, 0x74, 0x2a, 0x37, 0x3b, 0xfd, 0x08, 0xba, 0x37, 0x4d, 0xd7, 0x4b, 0x74, + 0x56, 0xa8, 0xb9, 0x0c, 0xa5, 0x90, 0xfa, 0x0d, 0x67, 0x93, 0x56, 0xcf, 0x79, 0x2d, 0x6a, 0x78, + 0xb0, 0x9d, 0x09, 0x6a, 0x12, 0x83, 0xb7, 0xcf, 0xb0, 0x53, 0xe2, 0x0c, 0xec, 0x3c, 0x02, 0x1b, + 0xd3, 0x95, 0xf0, 0xe0, 0x96, 0x6c, 0x74, 0x7b, 0x36, 0x1d, 0x90, 0xcc, 0x5b, 0x5c, 0xf7, 0x98, + 0xa4, 0x30, 0x6c, 0x30, 0xcd, 0xaf, 0x7f, 0xc3, 0x9d, 0x5b, 0x5a, 0x2e, 0x1e, 0x05, 0x91, 0x5b, + 0x5d, 0x2b, 0x6d, 0x3d, 0xb9, 0x65, 0xd9, 0x32, 0x9f, 0xea, 0x72, 0x30, 0x66, 0xcf, 0x2d, 0x3e, + 0xbc, 0x60, 0x61, 0x19, 0x38, 0xcb, 0x26, 0x26, 0x33, 0x1a, 0xa5, 0x96, 0xca, 0xa9, 0x84, 0x66, + 0xde, 0x45, 0x7b, 0xcf, 0xeb, 0x3a, 0x63, 0x98, 0xc0, 0xf7, 0x9b, 0x10, 0x28, 0x84, 0x90, 0x9d, + 0x13, 0xe3, 0xc5, 0xf9, 0x28, 0x8f, 0x4b, 0xfd, 0xc7, 0x9d, 0x9e, 0x3c, 0xd0, 0x48, 0x9c, 0x24, + 0x12, 0x4e, 0x2a, 0x54, 0x7d, 0xa2, 0x5a, 0x1f, 0xb0, 0xc6, 0xd0, 0xd0, 0xe7, 0xe1, 0x24, 0x49, + 0xa6, 0xb3, 0x09, 0x95, 0xe5, 0xdc, 0x9b, 0xa0, 0xe0, 0x66, 0x26, 0x8f, 0xb7, 0x0d, 0x93, 0x98, + 0xd3, 0xa9, 0x15, 0x07, 0x55, 0x85, 0xc2, 0x77, 0x60, 0x50, 0xd5, 0xe0, 0xb3, 0x2d, 0x80, 0xd3, + 0x98, 0xe1, 0x81, 0x62, 0xe1, 0x89, 0x28, 0x58, 0x92, 0x84, 0xa5, 0x5c, 0xdd, 0xb0, 0xf4, 0xf1, + 0x24, 0x35, 0xc8, 0xf2, 0x28, 0xfd, 0x95, 0x8d, 0x1f, 0x0d, 0x00, 0x33, 0x05, 0x37, 0xb2, 0x9c, + 0xdf, 0xe6, 0xe7, 0xa2, 0xea, 0x89, 0x9c, 0x39, 0x61, 0x68, 0x53, 0xbb, 0x99, 0x69, 0x4b, 0x82, + 0x27, 0xef, 0x05, 0x4e, 0xac, 0x9d, 0x86, 0x77, 0x53, 0x96, 0x36, 0x11, 0x22, 0xef, 0x92, 0x23, + 0x51, 0x50, 0x51, 0x6d, 0x1d, 0x8b, 0x22, 0x9e, 0x78, 0xbb, 0x07, 0x71, 0x96, 0xb9, 0x79, 0x8b, + 0x84, 0x0b, 0xba, 0xd1, 0x2f, 0x8f, 0x4c, 0x37, 0x6e, 0x72, 0xe2, 0x27, 0x74, 0x47, 0xfb, 0x18, + 0xee, 0x5e, 0x86, 0xdb, 0xa2, 0x95, 0x0c, 0xfa, 0x10, 0x70, 0xee, 0x21, 0xfb, 0xd8, 0xb8, 0xa6, + 0x41, 0x4f, 0x2b, 0xb5, 0xbc, 0x41, 0x97, 0x39, 0x77, 0x2d, 0xc8, 0x21, 0x29, 0x78, 0x7e, 0xe8, + 0x1a, 0x41, 0x36, 0xc0, 0xef, 0xee, 0x90, 0x1d, 0x9c, 0xb7, 0xd2, 0x2a, 0x62, 0x49, 0x5f, 0x98, + 0x4c, 0x2e, 0x78, 0xe1, 0x6a, 0x78, 0x6b, 0x47, 0x24, 0x1e, 0x1d, 0x00, 0x4b, 0x26, 0xbd, 0x35, + 0xfa, 0xc6, 0xe9, 0x9a, 0x0d, 0xdf, 0x6e, 0xa8, 0x71, 0x15, 0xf6, 0xc4, 0xb6, 0xae, 0x31, 0x24, + 0x3b, 0x5b, 0x85, 0x42, 0x5b, 0x51, 0x05, 0x73, 0x7c, 0x8d, 0xff, 0x12, 0xe9, 0x61, 0xd7, 0xf7, + 0xd6, 0xc1, 0xd5, 0xa2, 0x4b, 0xd6, 0x90, 0x93, 0x54, 0x85, 0x37, 0x56, 0x96, 0x16, 0x07, 0x2f, + 0x12, 0x03, 0x0f, 0xf6, 0x45, 0xfd, 0x81, 0x6e, 0x4d, 0xd7, 0x67, 0xb0, 0x18, 0x0d, 0xc9, 0x71, + 0xc8, 0x9e, 0x2e, 0x42, 0xdd, 0x70, 0x96, 0x5a, 0x0d, 0x12, 0x0a, 0xc0, 0x3a, 0xc5, 0x93, 0x48, + 0xe9, 0xf4, 0xca, 0xee, 0xe0, 0x17, 0xeb, 0x30, 0x16, 0x43, 0x6e, 0x89, 0x23, 0xfd, 0xeb, 0xdb, + 0x8f, 0x81, 0x5e, 0x11, 0xf5, 0xa2, 0x35, 0x03, 0xc9, 0x43, 0xe9, 0xb8, 0x3d, 0xa5, 0x82, 0xa2, + 0x3a, 0x09, 0x2d, 0xd1, 0x83, 0xd6, 0xcd, 0xbc, 0xa9, 0x9c, 0x51, 0x11, 0xe4, 0xa6, 0xc1, 0xf8, + 0x36, 0x12, 0x94, 0x9b, 0x44, 0x68, 0xbb, 0xf6, 0x5d, 0xbe, 0x7c, 0x27, 0xdc, 0xc0, 0x22, 0x6e, + 0x88, 0xe1, 0x65, 0x26, 0xab, 0x57, 0xa6, 0xcb, 0x27, 0x89, 0x0f, 0x5c, 0x4d, 0x1e, 0xd4, 0x42, + 0xf4, 0x2e, 0x1b, 0x75, 0x95, 0xa8, 0x9e, 0x62, 0xdf, 0x17, 0xfc, 0x1c, 0xee, 0x0a, 0xf0, 0x8b, + 0x23, 0x17, 0xf5, 0xe3, 0xb8, 0x00, 0xcf, 0x84, 0xbd, 0xbb, 0x7d, 0x42, 0x71, 0x31, 0xeb, 0xbd, + 0x00, 0xb5, 0x95, 0x68, 0xc8, 0xb4, 0xbe, 0x15, 0x12, 0x8c, 0xe1, 0xce, 0x43, 0x36, 0xda, 0xd3, + 0xa8, 0xa4, 0xa1, 0xa4, 0x33, 0xb1, 0xb9, 0x9f, 0x2f, 0xb6, 0x0f, 0xe4, 0x3b, 0xb1, 0x72, 0xcd, + 0xf5, 0x60, 0x3c, 0x57, 0xbc, 0xbe, 0x38, 0x25, 0x09, 0x03, 0x33, 0xff, 0xf3, 0x65, 0x57, 0x64, + 0xb8, 0x26, 0xa2, 0xbd, 0xd0, 0x7e, 0xcb, 0xfb, 0xc9, 0xe9, 0xc2, 0xf4, 0x72, 0xae, 0xbb, 0xc1, + 0x86, 0x77, 0x04, 0xe0, 0x94, 0xab, 0xf1, 0x10, 0xe1, 0xdf, 0x8c, 0xf3, 0xc6, 0x73, 0xcd, 0xfa, + 0x34, 0x6a, 0xf5, 0xc2, 0x63, 0x06, 0xd5, 0x09, 0x84, 0xe5, 0x22, 0xc9, 0x45, 0xdb, 0x80, 0xc8, + 0x80, 0xef, 0xc0, 0x5e, 0xed, 0x03, 0xea, 0x56, 0x0a, 0x6b, 0xca, 0x10, 0x74, 0x6e, 0xc2, 0x97, + 0x9d, 0x3b, 0x8d, 0x8e, 0x58, 0xee, 0xcb, 0xe2, 0x14, 0x68, 0x49, 0x16, 0x90, 0x34, 0xff, 0xdc, + 0x72, 0x7b, 0x06, 0x54, 0x7f, 0x90, 0x92, 0x7c, 0x8b, 0x39, 0x80, 0x03, 0x9e, 0x37, 0xaf, 0xaa, + 0x16, 0x48, 0xd4, 0x55, 0x10, 0xa2, 0xfe, 0xe0, 0x42, 0x2f, 0x7b, 0xae, 0xbb, 0xbf, 0xec, 0x58, + 0xcf, 0xcc, 0x6f, 0xd3, 0x3b, 0xdd, 0x39, 0x2c, 0x70, 0x39, 0x8a, 0x11, 0xfb, 0x20, 0x5d, 0x3e, + 0x1c, 0x99, 0xfd, 0x5b, 0x02, 0xc4, 0x6b, 0xf7, 0x4c, 0xb5, 0x5b, 0xd0, 0xc3, 0x58, 0xe0, 0x53, + 0xe0, 0x9a, 0x7b, 0x3b, 0xea, 0x3f, 0x05, 0x7e, 0xa7, 0x90, 0x72, 0x33, 0x32, 0x60, 0x6b, 0x7b, + 0x7a, 0x77, 0xf7, 0x6b, 0x6f, 0x7d, 0x35, 0x45, 0x76, 0x55, 0xef, 0xdc, 0x65, 0x55, 0xa2, 0x58, + 0x7b, 0x7f, 0x3e, 0x71, 0x01, 0x3d, 0x28, 0x16, 0xea, 0x39, 0x2d, 0x11, 0x26, 0x05, 0x40, 0x28, + 0xb4, 0x01, 0x2b, 0x57, 0x66, 0x57, 0x5a, 0xe8, 0xfb, 0x3b, 0xae, 0x9d, 0xe6, 0x16, 0x97, 0x05, + 0x6b, 0xb2, 0x74, 0x98, 0x2e, 0x54, 0x33, 0xfe, 0xc9, 0xc6, 0xea, 0x73, 0x9f, 0x3d, 0xa8, 0x1b, + 0x43, 0xf2, 0x8b, 0xea, 0x6a, 0x1a, 0xc3, 0x40, 0xef, 0x69, 0xa0, 0x1d, 0xd1, 0xec, 0xf4, 0x92, + 0xe9, 0xfb, 0x52, 0x62, 0x80, 0x7c, 0xcd, 0x17, 0x08, 0x3d, 0x29, 0xfd, 0x67, 0x3a, 0xb9, 0x84, + 0xc0, 0x84, 0x30, 0xc2, 0x42, 0x07, 0x2b, 0xc8, 0x01, 0x44, 0x48, 0x49, 0x53, 0x51, 0x14, 0xea, + 0x38, 0x3c, 0x9a, 0x42, 0x5d, 0x8a, 0x27, 0x0d, 0x3b, 0x90, 0x08, 0xa2, 0x9a, 0x5e, 0xa9, 0xcb, + 0x56, 0xf9, 0x3c, 0x94, 0x22, 0x67, 0xd6, 0xc6, 0xc6, 0xd5, 0x7f, 0x99, 0xb6, 0x73, 0x03, 0x8a, + 0x41, 0x54, 0x0e, 0x35, 0xc7, 0x2f, 0x84, 0x79, 0xb7, 0xa5, 0x65, 0x3e, 0x31, 0xbe, 0xed, 0x26, + 0x43, 0x1c, 0x2e, 0x8a, 0x72, 0x80, 0x66, 0xa6, 0x40, 0x71, 0x44, 0xdc, 0xf7, 0xe7, 0x14, 0xc7, + 0x56, 0xe0, 0xf1, 0x6f, 0xb5, 0xa5, 0xd4, 0xce, 0x90, 0xaa, 0xd1, 0xa4, 0x20, 0xa6, 0x00, 0x54, + 0x03, 0x73, 0x72, 0xb3, 0x1f, 0x86, 0x5b, 0x57, 0x19, 0x51, 0x5e, 0x4a, 0xea, 0x79, 0x18, 0xcc, + 0x91, 0x95, 0x5b, 0xbe, 0x3b, 0xcd, 0x32, 0x7c, 0x6e, 0x10, 0x22, 0x4d, 0xd7, 0x1a, 0x06, 0xd8, + 0xcf, 0x83, 0x76, 0xcd, 0x1f, 0x23, 0x02, 0xe0, 0xe2, 0x4d, 0x92, 0xdc, 0x4e, 0x25, 0x48, 0xf3, + 0xe8, 0x0c, 0x46, 0x07, 0x9e, 0xf8, 0x28, 0x6d, 0x50, 0xa8, 0xdd, 0x2a, 0xc7, 0x33, 0x55, 0xd7, + 0x50, 0x1d, 0xcd, 0x9e, 0x17, 0xb0, 0x98, 0xdf, 0xa1, 0x1d, 0xc1, 0x71, 0xb0, 0x03, 0x56, 0xba, + 0x33, 0xb8, 0x8f, 0x81, 0x4c, 0x14, 0x25, 0x36, 0xf3, 0xbb, 0x6e, 0x8e, 0xc5, 0x2c, 0xbc, 0x28, + 0xbf, 0xb8, 0xc1, 0x47, 0x56, 0x1e, 0xf5, 0xdf, 0xc7, 0x17, 0x7f, 0xd8, 0xa9, 0xd8, 0x9c, 0xe8, + 0xae, 0x43, 0x1b, 0xef, 0xd5, 0xb0, 0x96, 0x63, 0xee, 0x28, 0x90, 0xf5, 0x40, 0xa1, 0xda, 0x32, + 0xd0, 0xa6, 0x9b, 0xb2, 0xd8, 0x0a, 0x46, 0x9e, 0xed, 0xdd, 0xd1, 0x64, 0x50, 0x46, 0x4d, 0x49, + 0x87, 0xb7, 0xa1, 0x1a, 0x9b, 0x29, 0xc7, 0x1e, 0xf9, 0xda, 0x39, 0x2e, 0xb4, 0x5a, 0x5c, 0x91, + 0x85, 0x76, 0x39, 0xa1, 0x55, 0xc1, 0xfc, 0xd7, 0xda, 0x79, 0x05, 0x63, 0x4e, 0xe5, 0x15, 0x6b, + 0x9a, 0x16, 0x39, 0x91, 0x37, 0xec, 0x55, 0x8a, 0x4e, 0xe7, 0x75, 0xa0, 0x89, 0x6b, 0x92, 0x62, + 0x4e, 0x0d, 0x94, 0x2c, 0xf7, 0xd4, 0x15, 0x84, 0x7d, 0xfa, 0xe9, 0x96, 0xb4, 0xb2, 0x9d, 0xcc, + 0x71, 0x77, 0x6e, 0xc1, 0x6c, 0xb0, 0x6e, 0xd4, 0xb1, 0xc1, 0x4e, 0x7c, 0xc5, 0x6e, 0xce, 0x0f, + 0xbb, 0xdd, 0x09, 0xa0, 0x03, 0x8e, 0xce, 0x7b, 0x2c, 0x58, 0x3c, 0x19, 0x1d, 0x12, 0xd1, 0x1b, + 0xa2, 0x8f, 0x14, 0x69, 0xae, 0xca, 0x61, 0x5f, 0x94, 0xf0, 0x03, 0x8c, 0xcd, 0x3c, 0xaa, 0xc5, + 0x97, 0xf4, 0xde, 0xde, 0x25, 0xe4, 0x83, 0xac, 0x63, 0x77, 0x19, 0x65, 0x33, 0x37, 0x96, 0x46, + 0xe4, 0x60, 0xc1, 0x64, 0x09, 0xe2, 0x90, 0x36, 0x52, 0x8f, 0xbb, 0x40, 0xa7, 0xdf, 0x52, 0x3d, + 0xd8, 0xe9, 0x78, 0xea, 0xfb, 0x92, 0xbe, 0x3b, 0xe3, 0x13, 0xfd, 0x0d, 0x11, 0x2f, 0x1d, 0xe4, + 0x66, 0x3c, 0xf7, 0x7d, 0xb3, 0x44, 0x81, 0x5f, 0xd0, 0xfd, 0x17, 0x2a, 0xe7, 0xcf, 0x3f, 0xcc, + 0xfe, 0x84, 0xe1, 0x51, 0xa5, 0x68, 0x82, 0xa0, 0x74, 0x06, 0x17, 0xb3, 0x76, 0x46, 0xb5, 0x55, + 0xeb, 0xb7, 0x86, 0x08, 0x2d, 0x25, 0x8e, 0x3c, 0x37, 0xde, 0xd2, 0xdd, 0x2d, 0x82, 0x52, 0x56, + 0x01, 0x9b, 0x6c, 0x8a, 0x57, 0xc0, 0x40, 0x06, 0x98, 0xbc, 0xae, 0xd4, 0xc4, 0x9a, 0xeb, 0xb4, + 0xa6, 0xd2, 0xe6, 0xe9, 0xc7, 0x59, 0x14, 0x38, 0xd2, 0x05, 0xae, 0xc9, 0x2a, 0xc7, 0x42, 0x4b, + 0x8d, 0x74, 0xad, 0x6e, 0x35, 0xbf, 0xc7, 0x6a, 0x68, 0x23, 0xf8, 0x45, 0x3d, 0xa9, 0xa0, 0x6c, + 0xa7, 0x42, 0xc2, 0xe0, 0x5f, 0x09, 0xae, 0x9c, 0xdf, 0xbd, 0xb4, 0xbd, 0x95, 0x6f, 0x08, 0x69, + 0x65, 0xb8, 0xc5, 0x85, 0xf7, 0x70, 0x12, 0x3a, 0x54, 0x7c, 0xa2, 0x2c, 0xee, 0x83, 0xa0, 0xba, + 0xe6, 0xd0, 0xec, 0x55, 0xf2, 0x25, 0xcf, 0x20, 0x4d, 0xe7, 0xcc, 0x55, 0xeb, 0xa8, 0x65, 0xcf, + 0xf9, 0x6c, 0xc6, 0xbf, 0x0a, 0x7d, 0x70, 0x75, 0xc0, 0xe4, 0xce, 0xf9, 0x03, 0x9f, 0xf9, 0xcf, + 0x51, 0xa8, 0xf3, 0x5a, 0x2f, 0x54, 0x95, 0x50, 0x83, 0xb1, 0xa4, 0xea, 0xf3, 0x20, 0x7c, 0xc5, + 0x23, 0x88, 0x4b, 0x57, 0xa3, 0x3d, 0x65, 0x7a, 0x5a, 0xe8, 0x5c, 0x8e, 0xf0, 0xb9, 0x2b, 0xc6, + 0x9a, 0x67, 0xb3, 0xf4, 0x82, 0xd4, 0x3c, 0x1a, 0x35, 0xbc, 0x62, 0xb9, 0x88, 0x2e, 0xf8, 0x65, + 0xa9, 0x93, 0x90, 0x95, 0xbd, 0x91, 0x17, 0x17, 0xf0, 0xa2, 0xda, 0x8f, 0xb1, 0xde, 0x32, 0x5f, + 0x72, 0x50, 0xc9, 0x2f, 0x90, 0xb2, 0x49, 0xf9, 0xcf, 0x15, 0x86, 0x22, 0x2e, 0x45, 0x35, 0x25, + 0x00, 0xf0, 0x71, 0x49, 0x67, 0x31, 0x48, 0x2d, 0x99, 0x1f, 0x41, 0xd5, 0x1a, 0x7c, 0x48, 0x71, + 0x29, 0xbf, 0x56, 0xc8, 0x14, 0x40, 0x5b, 0xf9, 0x4d, 0x13, 0x68, 0x3f, 0x6a, 0x6a, 0xb1, 0x98, + 0x62, 0xd3, 0x6d, 0x76, 0x3f, 0x93, 0x10, 0x54, 0xd0, 0x16, 0x0a, 0x29, 0xc7, 0xb8, 0x7d, 0x6e, + 0x52, 0x1e, 0xda, 0xc8, 0xbc, 0x83, 0xc9, 0x78, 0x49, 0x20, 0x9f, 0xe0, 0xf3, 0xaf, 0x2a, 0x3e, + 0xad, 0xbf, 0xd2, 0xd6, 0xf0, 0x6d, 0xd6, 0xdb, 0x37, 0xa1, 0x3a, 0x36, 0xc3, 0x7a, 0xf3, 0x89, + 0xac, 0x30, 0x59, 0xcc, 0x5c, 0xc3, 0x0b, 0x5c, 0x5f, 0x74, 0xcc, 0x1b, 0xdc, 0xd1, 0x61, 0x7c, + 0x2a, 0xf7, 0xc1, 0x90, 0x29, 0x1f, 0x01, 0xb6, 0x43, 0x56, 0xc0, 0xc3, 0x94, 0x65, 0x39, 0x37, + 0x00, 0x3b, 0x63, 0xb3, 0xc3, 0x8a, 0xaa, 0xa9, 0x82, 0x93, 0x3b, 0x04, 0xe1, 0x78, 0x12, 0x10, + 0x5a, 0x22, 0x4e, 0x66, 0xc9, 0x69, 0x44, 0xfd, 0xb9, 0x75, 0xd7, 0xbc, 0x4d, 0x94, 0x18, 0xfc, + 0x78, 0x50, 0xaa, 0xcf, 0xd1, 0x42, 0x47, 0x93, 0x97, 0x38, 0x78, 0xed, 0x8c, 0xe1, 0xe3, 0x65, + 0x2c, 0xfe, 0xf6, 0x82, 0xa1, 0xf8, 0x51, 0xcb, 0x87, 0x4a, 0x06, 0xe2, 0xae, 0x68, 0x4b, 0xcc, + 0x9c, 0xf5, 0x6a, 0xf1, 0x2b, 0x89, 0x3d, 0xdb, 0x03, 0x10, 0x27, 0x30, 0xd0, 0xca, 0xe7, 0x47, + 0xba, 0x98, 0xc0, 0x19, 0xbb, 0x3f, 0x78, 0x7e, 0x3e, 0xd0, 0x49, 0x0b, 0xbf, 0xf3, 0x1e, 0x86, + 0xc2, 0xd9, 0xe1, 0x4e, 0x22, 0x62, 0x17, 0xf2, 0xce, 0x3b, 0x6e, 0x28, 0x9b, 0xe4, 0x0e, 0x6f, + 0xa9, 0xae, 0x4c, 0xe4, 0xbf, 0x44, 0x50, 0x8d, 0xc2, 0x0b, 0x08, 0xf1, 0x7c, 0xef, 0xd0, 0xd6, + 0xb0, 0x10, 0xc2, 0xf0, 0xe7, 0x76, 0x57, 0x28, 0x8b, 0x65, 0x33, 0x32, 0x4d, 0x25, 0xa8, 0x28, + 0x62, 0x39, 0xb9, 0xd3, 0x50, 0xe7, 0x4a, 0xa1, 0x1f, 0x55, 0x64, 0x62, 0x61, 0x25, 0x31, 0xd6, + 0xc3, 0x91, 0xc1, 0x24, 0x79, 0xea, 0x73, 0x83, 0x26, 0x1d, 0x2e, 0x93, 0x42, 0xc8, 0x92, 0x87, + 0xa8, 0x09, 0x38, 0xf6, 0xa9, 0xf7, 0xb5, 0xc7, 0x48, 0xb1, 0x5e, 0x45, 0x01, 0xbf, 0x16, 0x28, + 0x6f, 0xc1, 0xdc, 0xe5, 0x1b, 0xd1, 0x60, 0xcd, 0x16, 0x0e, 0xb3, 0xaf, 0x43, 0x32, 0x39, 0x27, + 0x82, 0x60, 0xdf, 0xb5, 0x48, 0xfa, 0x6d, 0x7c, 0xd2, 0x53, 0x8d, 0x63, 0x4b, 0x05, 0x2b, 0x07, + 0x1e, 0x68, 0xc9, 0x3a, 0xed, 0x5e, 0x1c, 0x71, 0x46, 0x2d, 0x53, 0x3f, 0x7b, 0x3d, 0x52, 0xe1, + 0xd5, 0x17, 0xc1, 0xa3, 0x83, 0x36, 0x79, 0x85, 0x65, 0x2b, 0x42, 0xb2, 0xd6, 0xe2, 0x75, 0x71, + 0x88, 0x78, 0x5c, 0xd2, 0x81, 0xd9, 0x9f, 0xc1, 0xcd, 0x60, 0x26, 0x2c, 0xfd, 0x0a, 0x98, 0xa6, + 0xc0, 0x25, 0x36, 0x5c, 0x71, 0xe5, 0x2f, 0x88, 0x79, 0x33, 0xcb, 0x9f, 0x68, 0x19, 0x6e, 0x27, + 0x89, 0xfc, 0x57, 0xcd, 0xab, 0x37, 0x25, 0xb0, 0xcf, 0x35, 0xcd, 0x77, 0x65, 0xc8, 0x59, 0x0b, + 0x27, 0xbf, 0x94, 0xda, 0xad, 0xf7, 0x86, 0x22, 0x5e, 0x50, 0x09, 0x9e, 0xeb, 0x98, 0xdd, 0x5d, + 0xbd, 0x65, 0xbf, 0x00, 0x0b, 0xeb, 0x72, 0xcd, 0x83, 0xcb, 0xc9, 0x89, 0xaf, 0x0b, 0x8e, 0x83, + 0x91, 0xc9, 0xe4, 0x58, 0x1b, 0x71, 0x50, 0xb6, 0xab, 0xe2, 0x49, 0x6b, 0x7d, 0x75, 0xa1, 0x40, + 0x5f, 0x91, 0x7c, 0x00, 0x77, 0x72, 0x0d, 0xf6, 0x82, 0xbe, 0x2b, 0x8c, 0xb2, 0x53, 0x39, 0x90, + 0x6f, 0x18, 0x90, 0xf6, 0xc0, 0x25, 0x9a, 0x06, 0x44, 0x5d, 0x09, 0x13, 0x35, 0x51, 0x83, 0x53, + 0x53, 0xf3, 0x78, 0x10, 0xd7, 0x4a, 0x26, 0x12, 0xcf, 0xdc, 0x55, 0xdf, 0xef, 0xbf, 0x7f, 0x7e, + 0x8d, 0x2b, 0x91, 0x75, 0x2e, 0xad, 0x85, 0xf1, 0x83, 0xeb, 0x09, 0x77, 0x27, 0xdd, 0x4b, 0x83, + 0x31, 0x2a, 0xd6, 0x38, 0x47, 0x44, 0x24, 0x3f, 0xca, 0x5e, 0xc6, 0x9f, 0x23, 0xeb, 0xea, 0xc6, + 0x4e, 0x15, 0xe7, 0x7a, 0xae, 0xc2, 0x9c, 0x2a, 0x2f, 0x3e, 0x54, 0x73, 0x27, 0xa7, 0xf4, 0x92, + 0x2d, 0x4d, 0x20, 0x96, 0xba, 0x8d, 0x61, 0xa9, 0x2c, 0x9d, 0x67, 0x2b, 0xb4, 0x75, 0xd5, 0x01, + 0x8b, 0xd4, 0x79, 0x24, 0xa3, 0x38, 0xf4, 0xea, 0x5a, 0xf2, 0xb1, 0x4c, 0x25, 0xee, 0x0b, 0x61, + 0x90, 0xb8, 0x5d, 0x0c, 0xaf, 0x7e, 0xcb, 0x3f, 0x94, 0x18, 0x01, 0x46, 0x8e, 0x53, 0xe0, 0xd8, + 0x32, 0xcd, 0xaa, 0x9e, 0xc4, 0x98, 0xec, 0x39, 0xe7, 0xe0, 0x13, 0xe2, 0xbc, 0x67, 0xdb, 0xba, + 0x2e, 0xdc, 0x17, 0x1b, 0x6e, 0x31, 0x6b, 0x9e, 0x95, 0xfd, 0x09, 0x72, 0xc4, 0xea, 0x94, 0x1a, + 0x12, 0x18, 0x8e, 0x2e, 0xfc, 0x1f, 0xf0, 0xd1, 0x53, 0x69, 0x6b, 0x0a, 0x04, 0xb9, 0x64, 0x83, + 0xbb, 0x8b, 0x3e, 0xac, 0x7f, 0x07, 0xe7, 0x12, 0x38, 0x38, 0xd0, 0x45, 0x9b, 0xac, 0x05, 0x1b, + 0x2c, 0xee, 0x77, 0xac, 0xa5, 0xdf, 0xfa, 0x1c, 0x77, 0x6e, 0x47, 0x58, 0x38, 0x7a, 0x6c, 0x23, + 0xea, 0x71, 0x0d, 0xd5, 0x5c, 0x4a, 0xf9, 0x03, 0xca, 0xf9, 0xba, 0x40, 0x0b, 0x15, 0x63, 0x84, + 0x83, 0x1c, 0x35, 0x44, 0xb9, 0x31, 0x11, 0x70, 0xf3, 0x8c, 0xcf, 0xfa, 0x75, 0xab, 0x94, 0xe4, + 0xd2, 0xce, 0xa8, 0x4c, 0x82, 0x66, 0xf3, 0xa9, 0x7c, 0xf0, 0x40, 0x9d, 0xa5, 0x58, 0x78, 0xa7, + 0x87, 0xfc, 0xef, 0xa4, 0xdf, 0x02, 0x75, 0xcb, 0x59, 0x3c, 0x9e, 0x34, 0x6c, 0xa6, 0x67, 0x86, + 0xaa, 0x50, 0xc0, 0xce, 0x46, 0x2a, 0xa5, 0x71, 0xcd, 0x37, 0x53, 0x51, 0xcb, 0x72, 0x31, 0x04, + 0x98, 0xef, 0xd0, 0x88, 0x18, 0x23, 0x4e, 0x59, 0x3f, 0xe7, 0x93, 0x16, 0xdc, 0x50, 0x78, 0xaf, + 0xe3, 0x07, 0xdc, 0x51, 0xb0, 0x0d, 0xd6, 0x96, 0x8c, 0x53, 0x2f, 0x54, 0x30, 0x9c, 0xd3, 0x57, + 0x10, 0xee, 0xd3, 0xb9, 0x5d, 0x40, 0x3d, 0xb1, 0x08, 0x32, 0x5d, 0x8c, 0xdc, 0xcb, 0x11, 0x54, + 0x2e, 0x44, 0x2b, 0x46, 0xd2, 0x51, 0xfe, 0xc8, 0xbb, 0x5b, 0x07, 0x08, 0xba, 0x76, 0x2d, 0xd9, + 0x5d, 0x57, 0xda, 0xb5, 0x52, 0xe4, 0x63, 0x2d, 0xef, 0x20, 0x85, 0xa2, 0xe4, 0xc8, 0x1c, 0x12, + 0x0d, 0xa0, 0x51, 0xf9, 0xb9, 0x5a, 0x9c, 0x2f, 0x65, 0x66, 0x68, 0x98, 0xe3, 0xa2, 0xf2, 0x92, + 0xe0, 0xd7, 0x9d, 0x3e, 0x1c, 0x1c, 0xaa, 0xd5, 0x31, 0xd1, 0x80, 0xc7, 0xa3, 0x0e, 0x68, 0xa1, + 0xc4, 0xc6, 0x07, 0xb9, 0x0e, 0x13, 0xd2, 0x8a, 0xbc, 0x87, 0x7d, 0xbf, 0x13, 0xfc, 0x5d, 0x6b, + 0xe8, 0x55, 0x67, 0xa6, 0x7c, 0x8a, 0xc4, 0x6f, 0x70, 0x39, 0xd3, 0xce, 0xbc, 0xf5, 0xe8, 0xbf, + 0xf1, 0xca, 0x61, 0xd4, 0xfc, 0x48, 0x82, 0x75, 0x37, 0x1b, 0x16, 0x8e, 0x4a, 0xd5, 0x95, 0x54, + 0x42, 0x20, 0x03, 0x63, 0x6b, 0x21, 0x6f, 0xfa, 0xad, 0x1d, 0xa5, 0xb0, 0x98, 0x94, 0x2c, 0x94, + 0x80, 0xa2, 0xd8, 0x88, 0xf9, 0xf0, 0xca, 0x94, 0x65, 0xd0, 0xc8, 0xed, 0x99, 0x15, 0x0d, 0x96, + 0x68, 0x50, 0x12, 0xce, 0xa7, 0x93, 0xc3, 0x6c, 0xbd, 0xc4, 0x0a, 0x40, 0xfb, 0x93, 0x65, 0x26, + 0xac, 0xa5, 0xc3, 0xaf, 0xe9, 0x66, 0x76, 0x74, 0xde, 0xfa, 0x12, 0x1f, 0x76, 0x2c, 0x85, 0x9c, + 0x37, 0x2c, 0x0e, 0xaa, 0x98, 0xe5, 0xb3, 0xe4, 0x08, 0xdc, 0x98, 0xe4, 0x9a, 0x12, 0x55, 0xf7, + 0xd9, 0x23, 0xd9, 0xeb, 0xd4, 0x35, 0xe9, 0x9a, 0x49, 0x56, 0x8f, 0x09, 0x92, 0x92, 0x2f, 0x67, + 0x0a, 0x54, 0x71, 0xee, 0xc2, 0x91, 0x4a, 0xcd, 0xf4, 0x9d, 0x99, 0xd8, 0xbd, 0x67, 0xc9, 0x2e, + 0xf5, 0x8a, 0x27, 0x04, 0x5f, 0x51, 0x0d, 0xf2, 0x2b, 0xee, 0xe0, 0x26, 0x00, 0x63, 0xf8, 0x13, + 0x64, 0xd3, 0xbd, 0x0a, 0xe9, 0x56, 0x61, 0x17, 0x2c, 0xe0, 0x07, 0xa6, 0x95, 0x81, 0x28, 0x5d, + 0x5f, 0xf1, 0x3b, 0x18, 0xd3, 0x6d, 0x2f, 0xc5, 0xa0, 0x3f, 0x45, 0xb9, 0x09, 0xd0, 0x15, 0x0d, + 0xb7, 0x8c, 0xfd, 0x37, 0xa1, 0x0c, 0xb1, 0xdc, 0x9b, 0xc0, 0x25, 0x3c, 0x44, 0x05, 0xd5, 0x4c, + 0x2c, 0x1b, 0x67, 0x87, 0x8f, 0x6a, 0x59, 0x42, 0x7d, 0x36, 0xd0, 0xaa, 0xf4, 0xb8, 0xff, 0xb8, + 0xb9, 0xf6, 0xa3, 0xc4, 0xaf, 0x95, 0x4c, 0x30, 0x66, 0xf3, 0xd7, 0xb3, 0x92, 0x0f, 0x3f, 0xa0, + 0xa4, 0x3f, 0xfc, 0x22, 0x15, 0xd7, 0x39, 0x15, 0xdd, 0xac, 0xc8, 0x46, 0x71, 0x1a, 0xa2, 0xbd, + 0xea, 0x9d, 0x85, 0xa6, 0xb2, 0xbb, 0x51, 0x8b, 0xeb, 0x8d, 0x80, 0x78, 0xd0, 0x93, 0x7e, 0x1a, + 0x0f, 0x54, 0x40, 0xc1, 0xad, 0x37, 0xe4, 0x6a, 0x78, 0xcb, 0x53, 0xcd, 0x2e, 0x77, 0xb1, 0x53, + 0x9c, 0xc4, 0x86, 0xc8, 0x5d, 0x3c, 0x84, 0x99, 0xbe, 0x58, 0x9c, 0xcd, 0x5e, 0x42, 0x77, 0x55, + 0x9a, 0xc2, 0xc9, 0x60, 0xa9, 0x66, 0xba, 0x8c, 0x57, 0xb5, 0xca, 0x4c, 0xba, 0x8e, 0xdd, 0x4d, + 0x1d, 0x7a, 0x11, 0x5b, 0x5b, 0x5c, 0xd4, 0xe4, 0x4b, 0xc0, 0x14, 0x8f, 0xfc, 0xcb, 0x5f, 0x15, + 0x9e, 0x9b, 0x15, 0x1d, 0xf6, 0xa3, 0x52, 0x2f, 0x07, 0xc6, 0x2e, 0x1a, 0xfa, 0xe9, 0x61, 0xb8, + 0x45, 0x87, 0x1c, 0x17, 0xb4, 0x28, 0x98, 0xcd, 0x00, 0xe5, 0x50, 0x81, 0x02, 0xdc, 0x31, 0x0e, + 0x33, 0xae, 0xad, 0x16, 0x73, 0x28, 0xa6, 0x1e, 0x9d, 0xf0, 0x4f, 0x9a, 0x1d, 0x30, 0x8b, 0xb4, + 0xd4, 0x7c, 0x94, 0x4a, 0x1d, 0xf4, 0xe0, 0xd5, 0x48, 0x96, 0x46, 0xb9, 0x1f, 0x46, 0x0b, 0x6d, + 0x04, 0xc4, 0xf2, 0xc7, 0x07, 0x6d, 0x0c, 0x8a, 0x91, 0x20, 0x95, 0x7c, 0x15, 0xad, 0xd4, 0xdc, + 0x36, 0xbf, 0x1f, 0xa9, 0xd3, 0xaf, 0xdb, 0x0d, 0xca, 0x2b, 0xb3, 0x22, 0xed, 0x35, 0x4c, 0x00, + 0x9b, 0x91, 0xd9, 0x5c, 0xa0, 0xe5, 0x1b, 0x46, 0x35, 0x5c, 0x81, 0x28, 0x1d, 0x9a, 0x6d, 0xa8, + 0x1d, 0x90, 0x6e, 0xf3, 0x12, 0xce, 0x55, 0x78, 0x01, 0x8f, 0x82, 0x65, 0xb6, 0x3d, 0x32, 0x47, + 0x34, 0x7a, 0x77, 0xdf, 0xbb, 0xea, 0x20, 0xd2, 0xc1, 0x83, 0xa4, 0x9c, 0xbd, 0x04, 0x42, 0x88, + 0xb6, 0x47, 0x97, 0x44, 0xf2, 0xdf, 0xd8, 0xb6, 0xbc, 0x53, 0x01, 0x06, 0xa7, 0xfd, 0x32, 0x0e, + 0x74, 0x4a, 0x7a, 0xf7, 0x86, 0x0a, 0x08, 0x06, 0x14, 0x7b, 0xb6, 0x8d, 0x42, 0x34, 0xaf, 0x11, + 0xe3, 0x36, 0x9d, 0x25, 0x9b, 0x0c, 0x03, 0x70, 0x83, 0x82, 0x7c, 0xeb, 0xef, 0x0a, 0x7e, 0x70, + 0x00, 0xcc, 0x78, 0x82, 0x66, 0x84, 0x95, 0x46, 0x75, 0x8f, 0x44, 0x22, 0x17, 0x0a, 0xbf, 0xde, + 0x35, 0xae, 0x34, 0x42, 0x35, 0x41, 0x6d, 0x49, 0xca, 0x00, 0x62, 0x03, 0xdb, 0x0d, 0x13, 0x53, + 0xb2, 0x82, 0x40, 0x43, 0x42, 0xb1, 0xe3, 0xe5, 0x03, 0x1c, 0x22, 0xef, 0x73, 0xca, 0xb0, 0x62, + 0xa4, 0x5a, 0x1c, 0x8e, 0x6f, 0xc7, 0xca, 0x92, 0xfb, 0x39, 0x1e, 0x74, 0xd9, 0xee, 0xb6, 0x25, + 0x99, 0x76, 0x83, 0x69, 0x01, 0xee, 0x81, 0xbc, 0x18, 0x8f, 0x9f, 0xf5, 0x9c, 0xd6, 0x3c, 0x6e, + 0x81, 0x9b, 0x6a, 0x16, 0x34, 0x85, 0xe0, 0xa6, 0x01, 0x67, 0xed, 0x82, 0x08, 0x35, 0x6d, 0x75, + 0x9d, 0x27, 0x7d, 0xf5, 0xe2, 0x04, 0x95, 0x99, 0xdd, 0x35, 0x7b, 0xe4, 0x38, 0x1b, 0xf1, 0xd1, + 0x29, 0x36, 0x2f, 0x16, 0x20, 0x24, 0x43, 0xf4, 0xfe, 0x61, 0x7d, 0x45, 0x4f, 0xce, 0x4f, 0x3a, + 0x38, 0x53, 0x7f, 0x72, 0xf1, 0x4d, 0xfd, 0xdf, 0x64, 0x48, 0xad, 0xc1, 0x29, 0x70, 0x4e, 0x63, + 0x7e, 0xbc, 0xda, 0xc2, 0x3e, 0x72, 0x7c, 0x17, 0x0c, 0xc7, 0x85, 0xec, 0xc0, 0x2a, 0x6d, 0x88, + 0xda, 0xb4, 0x02, 0xf4, 0x7d, 0x9d, 0x09, 0xb7, 0x6a, 0x7a, 0x3b, 0xb2, 0x8b, 0x14, 0x5c, 0xfd, + 0x5c, 0x3f, 0xef, 0xad, 0x44, 0xdf, 0xd4, 0x5e, 0x5d, 0x6e, 0xf0, 0x7e, 0x27, 0x23, 0xcd, 0x07, + 0xb1, 0x2b, 0x52, 0xa5, 0xb5, 0xfc, 0x88, 0x37, 0xae, 0x18, 0xc1, 0x5c, 0x98, 0xf3, 0x08, 0xc5, + 0x7d, 0x3c, 0xc4, 0x0d, 0xb2, 0xfd, 0x19, 0x4d, 0x95, 0x35, 0x9d, 0x5f, 0xf9, 0x2e, 0x46, 0x4f, + 0x3a, 0xa1, 0x4a, 0xa7, 0xf2, 0x39, 0xa6, 0x86, 0x58, 0xd0, 0x2f, 0xb5, 0x79, 0xa2, 0x3d, 0x19, + 0x12, 0x12, 0xbc, 0x0e, 0x7d, 0x1b, 0x28, 0x5c, 0xc5, 0x69, 0xa2, 0x2d, 0x0d, 0x59, 0x38, 0xd3, + 0x1a, 0x95, 0x7c, 0x02, 0x88, 0xd4, 0x65, 0xbd, 0x24, 0x27, 0xe9, 0xc1, 0x06, 0x62, 0xa7, 0x76, + 0x67, 0x5e, 0xd7, 0xa9, 0x4f, 0xb4, 0xea, 0x31, 0xf6, 0x6c, 0xab, 0x1c, 0x79, 0xdc, 0xec, 0x4c, + 0xfd, 0x48, 0x00, 0xaf, 0x05, 0xcb, 0xc2, 0xa6, 0x5a, 0x87, 0x67, 0x45, 0xb9, 0xe8, 0xe5, 0x27, + 0x06, 0x72, 0x52, 0xd0, 0xc8, 0xf5, 0xae, 0x09, 0xe1, 0xc0, 0xcf, 0x85, 0xa8, 0x50, 0xda, 0x88, + 0xc4, 0xc3, 0xc9, 0xa5, 0xc1, 0xda, 0x08, 0xa3, 0x52, 0xbb, 0xbf, 0x42, 0xb9, 0x09, 0x95, 0xfe, + 0xb9, 0x56, 0xbd, 0x54, 0x31, 0x1f, 0x5c, 0x4e, 0x32, 0xea, 0x13, 0xde, 0x15, 0x93, 0x45, 0x10, + 0x3e, 0x1b, 0x97, 0x10, 0xa0, 0x8e, 0x17, 0x3b, 0x22, 0xfe, 0xb0, 0x36, 0x86, 0x38, 0x64, 0x63, + 0x0a, 0x2b, 0x78, 0x95, 0xba, 0xf8, 0x7f, 0x37, 0xe1, 0xcc, 0xf2, 0x33, 0xfe, 0xbc, 0x34, 0x28, + 0xaa, 0x34, 0x43, 0x55, 0xb7, 0x95, 0x3e, 0xe9, 0x2e, 0xf6, 0x08, 0xec, 0xbd, 0x38, 0x6b, 0x1a, + 0x61, 0x3c, 0x69, 0x27, 0x62, 0xae, 0xa7, 0x04, 0x1e, 0x0d, 0xf2, 0xe7, 0x0d, 0xb0, 0xb7, 0x9c, + 0x9e, 0xbf, 0x0d, 0xd6, 0x66, 0xca, 0xb0, 0xd6, 0xf3, 0xee, 0x39, 0x1c, 0xb7, 0xc1, 0x1c, 0x68, + 0x4e, 0xb9, 0xc7, 0x4a, 0xbf, 0xa0, 0x85, 0xb3, 0x2a, 0x87, 0xe5, 0x3f, 0xb9, 0x26, 0x05, 0x45, + 0x82, 0xce, 0x6c, 0xb5, 0xc8, 0x39, 0xea, 0x62, 0xc5, 0x0b, 0x7b, 0x0f, 0x40, 0xbb, 0xd4, 0x69, + 0x87, 0xf9, 0x63, 0x2f, 0xab, 0xf7, 0xd2, 0x79, 0xcb, 0x25, 0x34, 0x42, 0xdb, 0x57, 0x03, 0xfb, + 0x0f, 0xf7, 0x90, 0xb4, 0xc4, 0xfb, 0x20, 0xc8, 0xa8, 0xa8, 0xcc, 0x81, 0x00, 0xb9, 0xbb, 0x2d, + 0xdd, 0xb4, 0xe7, 0x12, 0xee, 0x71, 0xe4, 0xd7, 0x93, 0x2d, 0x73, 0xfe, 0x95, 0x05, 0x3e, 0x28, + 0x70, 0x15, 0x0d, 0x54, 0x8d, 0x5b, 0x0c, 0xef, 0x21, 0x68, 0xf2, 0x70, 0xb5, 0x64, 0x08, 0x13, + 0xb9, 0x32, 0x73, 0x5a, 0x41, 0xbd, 0xa0, 0x1b, 0x44, 0x38, 0xf7, 0xf6, 0x13, 0xd9, 0x1a, 0x91, + 0x91, 0xc3, 0xd4, 0x67, 0xe5, 0x88, 0xa2, 0xdb, 0x97, 0x84, 0x40, 0xd8, 0x5d, 0x2a, 0xe9, 0x91, + 0x15, 0x82, 0xdf, 0x5d, 0x6d, 0x37, 0x40, 0x8e, 0x26, 0x4b, 0x33, 0x72, 0x30, 0x6c, 0x61, 0x8c, + 0xf2, 0x26, 0x30, 0x92, 0x91, 0x7e, 0x6e, 0x6a, 0xa8, 0x03, 0x99, 0x6a, 0xf7, 0x46, 0xdc, 0x78, + 0xe0, 0x9e, 0xbc, 0x93, 0x4d, 0xaf, 0xd0, 0xfc, 0x5e, 0x54, 0xad, 0xe6, 0x74, 0x09, 0x64, 0x3d, + 0x4f, 0x09, 0xba, 0x6c, 0xbd, 0x68, 0xe2, 0xea, 0xcd, 0x63, 0xdd, 0xb7, 0xdf, 0x2d, 0xcd, 0x0d, + 0x39, 0x49, 0x3c, 0x9a, 0xaa, 0x5f, 0x6d, 0x2b, 0xe3, 0x7c, 0xdf, 0x26, 0x55, 0x1e, 0xe4, 0x55, + 0xb9, 0xc5, 0x7f, 0xf1, 0x07, 0x7e, 0x42, 0x22, 0x73, 0xc2, 0xaa, 0xbf, 0xa4, 0xbc, 0xab, 0x7d, + 0x96, 0x93, 0xd6, 0x79, 0x61, 0x40, 0x0c, 0xda, 0x78, 0x17, 0xf9, 0xd2, 0x66, 0x24, 0xd8, 0x1e, + 0xe3, 0x40, 0xf4, 0xd9, 0xa4, 0x4a, 0x18, 0x30, 0xa9, 0x2c, 0xec, 0xfb, 0xd9, 0xa2, 0x64, 0xa3, + 0x1f, 0x64, 0x55, 0x45, 0xc2, 0x20, 0x0a, 0x21, 0xbf, 0x6d, 0x56, 0xbe, 0x85, 0x0e, 0xa2, 0x5d, + 0x42, 0x68, 0x7a, 0xdd, 0xa3, 0x31, 0xb8, 0x58, 0x35, 0xf4, 0xef, 0xb9, 0xe2, 0x86, 0x68, 0x1e, + 0x8c, 0x1e, 0x29, 0x7b, 0xd9, 0xb1, 0x16, 0x0f, 0x13, 0xdd, 0x9e, 0xa8, 0xed, 0x59, 0xea, 0xf3, + 0x25, 0xd7, 0xaa, 0x08, 0xef, 0x2d, 0x6e, 0x71, 0xe9, 0x48, 0xae, 0x65, 0x14, 0x6f, 0x5d, 0x87, + 0xd4, 0xdf, 0xfd, 0x49, 0x62, 0x00, 0xcf, 0x18, 0x2f, 0x9b, 0xab, 0xa8, 0x6e, 0xc4, 0x97, 0x06, + 0x07, 0xe7, 0xe1, 0x22, 0xc2, 0x3e, 0xac, 0x65, 0x1e, 0x3c, 0x3e, 0xd7, 0xef, 0x64, 0x97, 0x63, + 0xc0, 0x61, 0xff, 0x72, 0x78, 0x41, 0xfc, 0xdd, 0x14, 0x30, 0x44, 0x1f, 0x85, 0xd2, 0x27, 0xa4, + 0xee, 0x12, 0x8b, 0xce, 0x88, 0xd6, 0xc0, 0x92, 0xb9, 0xba, 0xc3, 0x81, 0x83, 0xe2, 0xa6, 0x8e, + 0xeb, 0x3f, 0x8e, 0x05, 0x0f, 0xa3, 0x17, 0x01, 0x3f, 0xf4, 0xee, 0x82, 0x84, 0x58, 0xbf, 0xc0, + 0xaa, 0x95, 0x70, 0x63, 0x8e, 0x67, 0x40, 0x24, 0xae, 0x1d, 0x82, 0xc7, 0x6d, 0x9a, 0xd1, 0x68, + 0x45, 0x9e, 0x23, 0x28, 0x1e, 0x72, 0xb3, 0x71, 0x4b, 0xc5, 0x19, 0x01, 0x7d, 0xc5, 0x65, 0xeb, + 0xc2, 0x0c, 0x4a, 0xb6, 0x90, 0x89, 0x87, 0x16, 0x77, 0xb4, 0x3f, 0x83, 0x63, 0xc1, 0x65, 0x33, + 0x16, 0xd9, 0x40, 0x9e, 0xac, 0x88, 0x51, 0xa0, 0x4e, 0x22, 0x9e, 0x48, 0x2c, 0xe6, 0xe5, 0x5b, + 0xb5, 0xcf, 0x23, 0xd1, 0x2f, 0xbd, 0x73, 0xc6, 0x31, 0x6c, 0x5b, 0x56, 0x94, 0xd8, 0xde, 0x5a, + 0x87, 0x13, 0x84, 0xde, 0x2a, 0x5e, 0xa1, 0xbd, 0xf2, 0x83, 0x20, 0x72, 0xb7, 0x78, 0x06, 0x5f, + 0xcc, 0xa2, 0xdd, 0x33, 0x07, 0x19, 0x1f, 0x2c, 0xd1, 0xa2, 0x2f, 0xd8, 0xd5, 0xa0, 0x0b, 0xf3, + 0xe3, 0x2b, 0x11, 0x4e, 0xae, 0x12, 0x7e, 0xbe, 0xd4, 0x0e, 0xfd, 0xd2, 0x31, 0x83, 0x54, 0x45, + 0xdd, 0xbb, 0x02, 0xa4, 0x20, 0x40, 0x6e, 0x30, 0xaf, 0xfd, 0x44, 0xf7, 0x82, 0xe2, 0x03, 0xab, + 0x0c, 0xdc, 0xff, 0x34, 0xb6, 0xb1, 0xb1, 0x2b, 0x7b, 0x59, 0x30, 0x35, 0x2f, 0xdd, 0xbf, 0x3a, + 0xfb, 0xcb, 0x14, 0x71, 0x7f, 0x86, 0x99, 0xc2, 0x92, 0xe5, 0xe4, 0xf4, 0xe5, 0x87, 0x5a, 0x0c, + 0xdf, 0x3b, 0xc8, 0x77, 0xd6, 0x23, 0x91, 0x0c, 0xa3, 0x0f, 0xf9, 0x29, 0x64, 0x1a, 0x83, 0x36, + 0xce, 0xf9, 0x99, 0x40, 0xab, 0xdc, 0xd9, 0x6b, 0xdc, 0x08, 0x39, 0x1d, 0x7b, 0xe0, 0x53, 0x9e, + 0x49, 0xb0, 0x80, 0x8a, 0xdd, 0xd0, 0x3e, 0x0d, 0xa9, 0x2c, 0xf0, 0x7e, 0xeb, 0x9b, 0x1e, 0xe5, + 0x0c, 0xc1, 0xe2, 0x90, 0xff, 0x65, 0x44, 0x11, 0xf3, 0x8a, 0xb6, 0xa7, 0x14, 0xab, 0x06, 0xfe, + 0x03, 0x83, 0xeb, 0x79, 0xd6, 0xd8, 0x35, 0x5f, 0x66, 0xfa, 0xe3, 0x87, 0x0d, 0xf0, 0x24, 0x85, + 0xd9, 0xc0, 0xc9, 0x61, 0xb0, 0xa1, 0x8a, 0x38, 0xf5, 0x62, 0x20, 0x31, 0xb7, 0xb8, 0x9b, 0xfc, + 0xf1, 0xa5, 0xca, 0xee, 0xac, 0x07, 0x18, 0xc0, 0x39, 0x7f, 0x38, 0x78, 0xc6, 0x0d, 0x3c, 0xcc, + 0xf5, 0x86, 0xe0, 0x2d, 0x0f, 0x04, 0x80, 0xaa, 0x25, 0xc6, 0x4d, 0x3b, 0x39, 0x3a, 0x8e, 0xbc, + 0xca, 0x20, 0xaf, 0xe5, 0xd5, 0x30, 0x0b, 0x94, 0xe4, 0x18, 0x55, 0x53, 0x2f, 0x5d, 0xaa, 0xe2, + 0x36, 0x3b, 0x9f, 0x09, 0x06, 0x3a, 0xe7, 0x5a, 0x61, 0x42, 0xef, 0xde, 0x18, 0x42, 0x31, 0x0f, + 0xc3, 0x3a, 0xd3, 0x9e, 0x9b, 0x34, 0x0c, 0xd1, 0xd8, 0x13, 0xd1, 0xb7, 0xc1, 0xaf, 0x49, 0x9b, + 0x38, 0xc3, 0x8f, 0x0c, 0x0a, 0xf3, 0xaa, 0x52, 0xda, 0x99, 0x77, 0xbc, 0x1f, 0xb6, 0xca, 0x93, + 0x8a, 0x76, 0xdd, 0x2a, 0xbb, 0xfe, 0x9c, 0x77, 0xe5, 0x2c, 0xb6, 0x91, 0x96, 0x7c, 0x2f, 0x9b, + 0xba, 0x04, 0xa9, 0x0f, 0xee, 0x47, 0x55, 0x93, 0xaa, 0x80, 0x9f, 0x6e, 0x5b, 0x14, 0x53, 0xbb, + 0x03, 0xaa, 0x99, 0x00, 0xfc, 0xf0, 0xba, 0xfb, 0x6c, 0x07, 0xe8, 0xb7, 0x45, 0x5a, 0xcf, 0x41, + 0xa4, 0x1e, 0x24, 0x89, 0x61, 0x86, 0xe8, 0xc6, 0x9c, 0x10, 0xf5, 0x8b, 0x95, 0xff, 0xda, 0x5a, + 0xfb, 0x78, 0x5d, 0x67, 0xd0, 0xda, 0x5a, 0x5d, 0xe3, 0x76, 0xb4, 0xed, 0x94, 0x4b, 0x7f, 0x11, + 0x60, 0xf0, 0x11, 0xd8, 0xa0, 0x7e, 0x6a, 0xea, 0x11, 0x6c, 0x5b, 0x89, 0xea, 0xa7, 0x9c, 0xd5, + 0xc4, 0xd6, 0xc7, 0xf7, 0xd1, 0xaa, 0x77, 0xf5, 0x15, 0x24, 0x6c, 0xea, 0x2b, 0xe8, 0x0d, 0x85, + 0xef, 0x69, 0x4b, 0x16, 0xfc, 0x85, 0x04, 0x03, 0x4e, 0xcc, 0x90, 0xcb, 0x9e, 0xbe, 0x41, 0x33, + 0xbd, 0x6e, 0x8a, 0x4b, 0x7a, 0xe1, 0x46, 0xd6, 0x1b, 0x96, 0xc3, 0xf4, 0x25, 0xde, 0x34, 0xa2, + 0xf4, 0xb0, 0xab, 0xe2, 0x4b, 0xfd, 0x59, 0x0b, 0x8c, 0xe3, 0x32, 0xb6, 0xa9, 0x4b, 0xa4, 0x4a, + 0x9e, 0xb2, 0x74, 0x79, 0x55, 0x93, 0x89, 0x7f, 0x55, 0x2b, 0x9a, 0xd0, 0xa4, 0xb3, 0x2e, 0x4c, + 0x94, 0x52, 0xa2, 0x03, 0x5a, 0x2f, 0x86, 0x0d, 0xd8, 0xce, 0xd0, 0xe4, 0xd8, 0xce, 0x4b, 0x8e, + 0x5c, 0xcc, 0x5c, 0x2c, 0xd2, 0x6e, 0xa6, 0xa9, 0x04, 0x15, 0xf6, 0xa7, 0x99, 0x8f, 0x9f, 0x9c, + 0x10, 0x85, 0x3c, 0xf5, 0x01, 0x36, 0xa9, 0x24, 0x61, 0xcc, 0x95, 0x15, 0xc7, 0xaf, 0x60, 0x24, + 0xb3, 0x5b, 0xdb, 0xc8, 0x64, 0x42, 0xe0, 0xc5, 0xb2, 0xaf, 0xf5, 0xed, 0xce, 0x86, 0x21, 0x1e, + 0xa6, 0x6e, 0x1c, 0x7b, 0xaf, 0xd0, 0x2f, 0x1c, 0xda, 0x8b, 0x90, 0x37, 0x05, 0x69, 0x20, 0xd6, + 0x35, 0x17, 0x85, 0xc6, 0x59, 0xbe, 0x10, 0xfd, 0x50, 0x8e, 0x68, 0x8f, 0x2a, 0x02, 0x8a, 0xe7, + 0xb9, 0x7a, 0x19, 0x98, 0xd1, 0xcd, 0xd9, 0x08, 0xbb, 0x5e, 0xd3, 0x65, 0x3e, 0xdc, 0x81, 0x1a, + 0x16, 0xe3, 0xd7, 0x9e, 0xb4, 0x0a, 0xae, 0xc0, 0x60, 0x18, 0xfc, 0x6c, 0xb1, 0x75, 0x45, 0xda, + 0x54, 0x2a, 0x23, 0x12, 0x70, 0x19, 0x11, 0xb9, 0x15, 0x86, 0x7b, 0x17, 0x4b, 0x79, 0xe1, 0xc5, + 0xce, 0x2c, 0xcb, 0x9c, 0xfd, 0xd2, 0xa8, 0x4d, 0x9b, 0x58, 0x5d, 0xe7, 0x7d, 0x98, 0x1b, 0x0a, + 0x9e, 0xba, 0xbd, 0x47, 0x21, 0xaf, 0x73, 0xe2, 0x9a, 0x8b, 0x2c, 0x0b, 0x0d, 0xbc, 0x3a, 0xb7, + 0x26, 0x2e, 0xb9, 0x18, 0xd4, 0x46, 0xe9, 0x83, 0x3f, 0xa3, 0xbc, 0x9e, 0x8f, 0x78, 0xdf, 0xa1, + 0xdd, 0x0f, 0x3f, 0xbb, 0xa1, 0xa0, 0x1b, 0x53, 0x40, 0x6e, 0x9e, 0x72, 0x18, 0xad, 0x55, 0x20, + 0x59, 0xb1, 0x7a, 0xea, 0x19, 0x5d, 0xb1, 0xfc, 0x82, 0x64, 0x2d, 0xb0, 0x88, 0x97, 0xec, 0x71, + 0x9d, 0x14, 0x90, 0xd8, 0x60, 0x56, 0xda, 0xa2, 0xb0, 0x2d, 0xe2, 0xcf, 0x87, 0x41, 0x75, 0x6c, + 0x8c, 0xdc, 0x2b, 0xd2, 0x69, 0xcb, 0x76, 0x4f, 0xb6, 0xfd, 0x91, 0x26, 0x43, 0x8e, 0x1d, 0x8e, + 0x80, 0xfe, 0x28, 0x11, 0x20, 0xa3, 0x33, 0xa5, 0xfb, 0xc5, 0x19, 0xcd, 0xb5, 0x4d, 0x6f, 0xc6, + 0x77, 0x67, 0xaa, 0x4e, 0xec, 0xed, 0xfc, 0x3e, 0xbf, 0x46, 0x54, 0x86, 0x5e, 0x2a, 0x6a, 0xc4, + 0x23, 0x1d, 0x35, 0xda, 0x09, 0x08, 0x4c, 0xb8, 0xee, 0x61, 0x60, 0x3e, 0x8a, 0x80, 0x36, 0x2d, + 0x39, 0x0a, 0x14, 0xb7, 0x37, 0x35, 0xe2, 0xe3, 0x9c, 0xc1, 0x9d, 0xe2, 0x62, 0x59, 0xfd, 0x31, + 0x9a, 0xa8, 0x43, 0x33, 0x96, 0x8b, 0x1f, 0x3d, 0x0c, 0xed, 0x6a, 0x66, 0x30, 0x10, 0xa8, 0x5b, + 0x85, 0xaa, 0xc6, 0xd4, 0x4d, 0xd6, 0x4a, 0xc8, 0x05, 0xb2, 0x57, 0x4d, 0xec, 0x40, 0xe8, 0xbb, + 0x8b, 0x66, 0xb4, 0xe8, 0xcb, 0x7d, 0x7f, 0xc9, 0x7b, 0xf4, 0xef, 0xcb, 0x64, 0x4e, 0x45, 0xcf, + 0xbb, 0x15, 0xb9, 0x96, 0xfc, 0x2d, 0x27, 0xcb, 0xe3, 0x1e, 0xd7, 0xe6, 0x96, 0x35, 0x39, 0xa0, + 0xd2, 0x36, 0xf6, 0x3b, 0xdb, 0x0a, 0x32, 0x8e, 0x05, 0x8c, 0xe4, 0xff, 0xda, 0x1c, 0x6a, 0xe5, + 0xe0, 0xa5, 0x1c, 0x8d, 0x77, 0xde, 0x42, 0xbf, 0xc7, 0x67, 0x31, 0xe5, 0x28, 0x5b, 0x9c, 0xe6, + 0x5c, 0xee, 0x9e, 0xfd, 0xa0, 0x95, 0x40, 0xc4, 0xf7, 0x44, 0xac, 0x59, 0x09, 0x42, 0x4a, 0xc1, + 0xe8, 0x92, 0xb0, 0xa0, 0x5e, 0x9d, 0xd2, 0x30, 0xbf, 0x13, 0x60, 0x79, 0x75, 0x6c, 0x7d, 0x43, + 0x71, 0xd7, 0xf1, 0xf0, 0xaa, 0xb8, 0xdc, 0xe4, 0x18, 0xba, 0x3d, 0x0c, 0xa4, 0x4f, 0x49, 0xde, + 0x6f, 0xa5, 0x6c, 0x88, 0x8f, 0xc9, 0xaf, 0xe0, 0xdd, 0x06, 0xeb, 0x46, 0x44, 0xb2, 0x91, 0x53, + 0xa8, 0x53, 0x23, 0xbc, 0xb0, 0x4e, 0xff, 0xe0, 0x1a, 0x28, 0xf7, 0x2a, 0x29, 0xa4, 0x21, 0xab, + 0x15, 0x64, 0x24, 0x28, 0xf8, 0xee, 0xe5, 0x56, 0x2a, 0x08, 0x13, 0xa9, 0x11, 0x73, 0xad, 0x4e, + 0xba, 0x61, 0x88, 0x0b, 0x32, 0x3a, 0x5c, 0xb3, 0x2b, 0x0c, 0x8f, 0x31, 0x4b, 0x72, 0x9d, 0x3c, + 0x8c, 0xdf, 0x6c, 0x9e, 0xf0, 0x46, 0x8d, 0x8c, 0x5a, 0x50, 0x4a, 0xb5, 0x08, 0x40, 0xb1, 0x72, + 0x96, 0x3a, 0x39, 0x20, 0x6b, 0x72, 0x3e, 0x28, 0x54, 0x92, 0xc3, 0x7b, 0x09, 0x4d, 0x98, 0xd6, + 0x58, 0x64, 0x6e, 0x1d, 0xe2, 0x61, 0xe0, 0xaa, 0x64, 0xcd, 0xff, 0xbd, 0x4f, 0x1a, 0x93, 0xd0, + 0x3c, 0x94, 0xb9, 0x13, 0xb8, 0xcc, 0xf8, 0x48, 0x33, 0x32, 0x4c, 0x48, 0xb5, 0xce, 0x19, 0x77, + 0x01, 0xf2, 0x9a, 0xc3, 0xba, 0xa0, 0x50, 0x9b, 0x4e, 0xdc, 0x82, 0x15, 0x4c, 0x14, 0x05, 0x73, + 0x31, 0x1f, 0x73, 0x5e, 0xe1, 0x77, 0x67, 0x76, 0xc2, 0x1e, 0xbe, 0x84, 0xf3, 0x5e, 0x28, 0x52, + 0xd1, 0x1d, 0xeb, 0x44, 0x03, 0xb3, 0xaf, 0x65, 0xd9, 0x9b, 0x9f, 0x9f, 0xa2, 0xd9, 0xd2, 0x5d, + 0x57, 0x6c, 0x96, 0x2c, 0x4d, 0xd7, 0x9c, 0x18, 0x1a, 0xc2, 0x47, 0x3e, 0x04, 0x90, 0xc7, 0xb2, + 0xf8, 0x08, 0x47, 0xdd, 0x4f, 0xb9, 0xd1, 0x9d, 0x8a, 0xd9, 0x0c, 0xb9, 0x80, 0x6d, 0x70, 0x99, + 0xbf, 0x4d, 0x69, 0x15, 0x3e, 0x5f, 0xd7, 0x6d, 0x32, 0x81, 0x18, 0xe4, 0x16, 0x3e, 0xd4, 0xa2, + 0xed, 0x95, 0xae, 0xd5, 0x54, 0x59, 0x37, 0x9f, 0x31, 0x05, 0xe5, 0xab, 0xc4, 0xae, 0x26, 0x04, + 0x40, 0x4c, 0x44, 0x69, 0xf4, 0xb9, 0x70, 0x19, 0xfc, 0x6a, 0xb1, 0xce, 0x86, 0xcc, 0x62, 0xcd, + 0xe3, 0x8a, 0x58, 0x47, 0x6f, 0xc1, 0xb3, 0xd3, 0xcf, 0xbe, 0xec, 0xc0, 0xf7, 0x9b, 0xfd, 0xf3, + 0x01, 0xf6, 0x90, 0x7c, 0x81, 0x70, 0x11, 0xa0, 0x76, 0x13, 0x36, 0xef, 0xe0, 0x32, 0x80, 0x91, + 0x2c, 0x6a, 0x72, 0x9b, 0x2e, 0xfc, 0xe2, 0x23, 0x11, 0x2a, 0xd5, 0xc0, 0x1e, 0x2e, 0x4a, 0x74, + 0xc6, 0xd2, 0x11, 0x2b, 0x69, 0xf2, 0x9c, 0xcd, 0xbc, 0xce, 0x72, 0xb5, 0xbb, 0xf6, 0x0b, 0x84, + 0xbf, 0x40, 0x8d, 0x14, 0xe2, 0xf0, 0xde, 0x6d, 0xb3, 0xc1, 0x5b, 0x2a, 0x74, 0xe6, 0x63, 0x60, + 0x92, 0xcf, 0xec, 0x32, 0xb2, 0x1c, 0xcd, 0x5d, 0x74, 0xa3, 0xfa, 0x2b, 0x2d, 0xf1, 0x3c, 0x07, + 0xba, 0x78, 0xd7, 0xff, 0xfb, 0x3f, 0x2b, 0x75, 0x7d, 0xae, 0x2d, 0x2f, 0x76, 0x7b, 0xd4, 0xbb, + 0x91, 0xff, 0x71, 0xe9, 0xc5, 0x8f, 0x20, 0xa2, 0x97, 0xf4, 0x3e, 0xac, 0xfa, 0xa0, 0xfb, 0x51, + 0xa9, 0x70, 0x87, 0x80, 0x3a, 0xea, 0xad, 0x05, 0xea, 0x5c, 0xd5, 0xc9, 0x50, 0x78, 0x62, 0x8b, + 0xde, 0x1d, 0xf1, 0x47, 0x86, 0x91, 0x66, 0xda, 0xe1, 0x1f, 0x10, 0x92, 0x38, 0x60, 0xaa, 0xa8, + 0x69, 0x31, 0xe9, 0xcc, 0x89, 0xf7, 0x57, 0x03, 0x37, 0xed, 0xcf, 0x1c, 0xc6, 0x73, 0x14, 0xe2, + 0x53, 0x87, 0x3f, 0xda, 0x9c, 0x73, 0x03, 0x56, 0x24, 0xa4, 0xab, 0x81, 0xd8, 0xe0, 0x84, 0x2a, + 0x0d, 0xb3, 0x7d, 0xbf, 0x22, 0xec, 0x77, 0x62, 0x65, 0xc9, 0xc7, 0x70, 0x6a, 0x13, 0xbb, 0x29, + 0xfa, 0x7c, 0x43, 0xb8, 0xeb, 0x95, 0x0e, 0x7b, 0x3c, 0x25, 0xe7, 0x8d, 0x3a, 0x68, 0xe5, 0xef, + 0xc3, 0x3d, 0x46, 0x26, 0x1c, 0x56, 0xfa, 0xca, 0xfd, 0x54, 0xd6, 0x3f, 0x22, 0xe3, 0x40, 0x23, + 0x8c, 0x9c, 0xee, 0xab, 0x42, 0x68, 0x1a, 0xc2, 0x38, 0xdb, 0x64, 0xac, 0x5a, 0x76, 0x0b, 0x9d, + 0x30, 0xe9, 0x30, 0x32, 0x0f, 0x5f, 0x9e, 0xd8, 0x9b, 0xb8, 0xcb, 0xf8, 0xab, 0x8c, 0x10, 0x8c, + 0x13, 0x43, 0xc0, 0x76, 0x0c, 0x39, 0xb6, 0x0e, 0xb2, 0x6b, 0x6d, 0x59, 0xfa, 0xd7, 0xb8, 0xec, + 0x98, 0xb6, 0x3f, 0x79, 0x4f, 0x6b, 0x13, 0x8b, 0x8c, 0xf0, 0xbd, 0xb7, 0xf0, 0x0f, 0xc4, 0xa8, + 0x6d, 0x8e, 0x19, 0x59, 0xa6, 0x75, 0x6d, 0xb2, 0xce, 0x01, 0xb4, 0x09, 0xbd, 0x98, 0x5a, 0x8e, + 0xa3, 0x3f, 0x66, 0x90, 0x71, 0xed, 0xb5, 0x23, 0xab, 0x57, 0xbf, 0xa7, 0x8e, 0xee, 0x03, 0x22, + 0x94, 0x66, 0x5c, 0xc4, 0x21, 0x7a, 0x39, 0x41, 0x36, 0x39, 0x08, 0x44, 0x9f, 0xc8, 0x1a, 0xb2, + 0x68, 0x9f, 0x44, 0x64, 0xef, 0x19, 0x87, 0x81, 0x85, 0xeb, 0x1a, 0xa1, 0xfe, 0x2c, 0x1f, 0x79, + 0xcd, 0x8a, 0xe1, 0x57, 0x9b, 0x88, 0x3b, 0x19, 0xc4, 0x60, 0xe2, 0x07, 0x73, 0x45, 0xc0, 0x15, + 0xda, 0xa5, 0xc4, 0xb9, 0x29, 0xfe, 0x27, 0xac, 0x27, 0x8e, 0x8c, 0x99, 0x13, 0x2e, 0x1e, 0xc7, + 0xa9, 0x92, 0x0b, 0x32, 0xb0, 0xc2, 0xb7, 0x55, 0xbf, 0x2d, 0xec, 0xa6, 0x8d, 0xbe, 0x57, 0x71, + 0x7f, 0x1e, 0x26, 0x9a, 0x3d, 0x53, 0xeb, 0xa1, 0x5b, 0x2c, 0x42, 0xa7, 0xaa, 0xb5, 0x6f, 0xe5, + 0xc7, 0xdc, 0x30, 0xf1, 0x9f, 0xda, 0x1b, 0x83, 0xe2, 0x18, 0x62, 0xc0, 0xb8, 0x54, 0x4b, 0xb3, + 0x43, 0x48, 0x88, 0x25, 0x0b, 0x8b, 0xf1, 0xb5, 0x28, 0xa9, 0x06, 0xc2, 0x69, 0x3a, 0xfa, 0xe5, + 0xf7, 0xaf, 0xf5, 0xf9, 0xc8, 0xaa, 0x5d, 0xfe, 0x3b, 0xaf, 0x48, 0x82, 0x91, 0x6c, 0x76, 0x65, + 0x06, 0x9c, 0x63, 0xfe, 0x39, 0x39, 0x7a, 0xbf, 0x3d, 0xe5, 0x0a, 0xaf, 0x9e, 0x62, 0xf5, 0x23, + 0x1e, 0x07, 0xf1, 0x83, 0x62, 0xca, 0x65, 0x8f, 0xee, 0x18, 0x93, 0x78, 0x90, 0xb6, 0x13, 0x6d, + 0x00, 0x64, 0xcf, 0x52, 0x1a, 0xa1, 0xb1, 0x63, 0x9c, 0x67, 0xf9, 0x7e, 0xb6, 0x0f, 0x67, 0x3d, + 0xb5, 0xfb, 0xe2, 0xbe, 0x49, 0xdb, 0x5e, 0xd3, 0xfc, 0xb4, 0x53, 0xb3, 0x7f, 0x06, 0xfd, 0xd2, + 0xff, 0x40, 0xf5, 0x99, 0x3c, 0xe2, 0xb3, 0xc5, 0xfe, 0x43, 0x76, 0x64, 0x9a, 0x98, 0x80, 0x20, + 0x89, 0x1e, 0xdb, 0x44, 0xe8, 0x72, 0x3f, 0x55, 0x82, 0xe1, 0x6f, 0x3a, 0x4b, 0x24, 0xff, 0xaa, + 0x45, 0xc8, 0x27, 0x2f, 0xcd, 0x38, 0x76, 0xc4, 0xb3, 0x61, 0xcb, 0xb6, 0x14, 0x4a, 0x2c, 0xa6, + 0x84, 0xe1, 0xb0, 0x7b, 0x7d, 0xb6, 0x89, 0x99, 0x44, 0x33, 0x57, 0xcc, 0x69, 0x75, 0x0f, 0x19, + 0x1d, 0xd0, 0x34, 0x4f, 0xe8, 0xc9, 0x24, 0xd3, 0x74, 0xd7, 0x1a, 0x70, 0x6a, 0x3d, 0x77, 0x67, + 0x39, 0xbf, 0x0a, 0x84, 0xf0, 0x85, 0x7f, 0x1f, 0x21, 0xc0, 0xf4, 0x7f, 0xaf, 0x17, 0x42, 0x5a, + 0x8c, 0xe3, 0x21, 0x99, 0x4d, 0xb1, 0x01, 0x3c, 0x48, 0xaf, 0x7e, 0x3d, 0xd4, 0x49, 0x08, 0xb9, + 0xed, 0xa2, 0x1c, 0x1a, 0x10, 0xfd, 0x6b, 0x29, 0x33, 0xa1, 0x4f, 0x07, 0x8c, 0x43, 0x06, 0x8e, + 0xde, 0x58, 0xc2, 0xe0, 0xf6, 0x36, 0xfb, 0x31, 0x06, 0x5e, 0xb5, 0x98, 0xb6, 0x73, 0x15, 0x8e, + 0xb3, 0x64, 0x44, 0xa3, 0xdb, 0x2d, 0x6b, 0xc3, 0x94, 0x5c, 0x1c, 0xd9, 0x0b, 0x6b, 0x53, 0x5d, + 0x75, 0xc4, 0x16, 0xb4, 0xb5, 0x8f, 0x40, 0xd5, 0xe7, 0x05, 0xd7, 0x81, 0x50, 0x10, 0xdd, 0x87, + 0x1d, 0xf7, 0x21, 0x07, 0x4e, 0xe1, 0xaa, 0x03, 0x35, 0x76, 0x6c, 0xe2, 0x56, 0x1f, 0xe0, 0xcd, + 0xc1, 0x1f, 0x6b, 0x32, 0x1a, 0x59, 0x58, 0x68, 0x1a, 0x07, 0x4a, 0x42, 0x38, 0xbe, 0xc5, 0x1d, + 0x36, 0xc1, 0xc0, 0xd4, 0xf8, 0x63, 0x2d, 0xbd, 0xcc, 0xab, 0xec, 0xd4, 0x23, 0xaa, 0x48, 0x57, + 0x38, 0x68, 0x04, 0x09, 0xf6, 0x5f, 0xc3, 0xde, 0x12, 0xa1, 0x7a, 0xd5, 0x86, 0xf9, 0x90, 0xbd, + 0xca, 0x51, 0xd7, 0x23, 0x9d, 0x3a, 0xa3, 0x25, 0x5a, 0x6f, 0xdc, 0xc7, 0x67, 0x06, 0xab, 0x9a, + 0x48, 0x9a, 0x05, 0xb9, 0x3d, 0x73, 0xc9, 0xac, 0x12, 0x18, 0x2f, 0x35, 0x72, 0x34, 0x17, 0xcd, + 0x80, 0xd6, 0xd0, 0xd1, 0xaf, 0x25, 0x36, 0xcc, 0x01, 0x57, 0x30, 0x9b, 0x33, 0xcc, 0x61, 0xb3, + 0x72, 0xdf, 0x26, 0x6f, 0x88, 0x0c, 0x75, 0x0d, 0xfa, 0x49, 0x87, 0xa5, 0x62, 0x24, 0x15, 0x42, + 0xa0, 0xab, 0x79, 0x3c, 0x87, 0x78, 0x3a, 0xdb, 0x5d, 0x4d, 0xfb, 0x37, 0x92, 0xf2, 0x04, 0x32, + 0xba, 0xdb, 0x2e, 0x72, 0x92, 0xde, 0x8f, 0x91, 0x4f, 0x75, 0x89, 0x4f, 0x94, 0xb1, 0x42, 0xee, + 0x4e, 0x2a, 0x84, 0xf9, 0x53, 0x07, 0xff, 0x7b, 0x4b, 0xa5, 0x98, 0xac, 0x79, 0x12, 0x30, 0xde, + 0x1f, 0x21, 0x85, 0xf3, 0x7c, 0xef, 0x59, 0x41, 0x10, 0x90, 0xed, 0x4c, 0x78, 0x22, 0x19, 0x28, + 0x45, 0x7e, 0x5a, 0xbd, 0x3e, 0xd1, 0xed, 0x8b, 0x66, 0xc4, 0xb2, 0x15, 0x5d, 0xba, 0xdd, 0x0c, + 0x89, 0xd8, 0x99, 0xd7, 0xfa, 0x10, 0xd2, 0x19, 0xe7, 0x8a, 0x86, 0x42, 0x99, 0x1f, 0x26, 0xd1, + 0xbc, 0xaa, 0x52, 0xf8, 0x11, 0x14, 0xe3, 0xec, 0x1b, 0xec, 0x3b, 0xb3, 0xc0, 0x26, 0x30, 0x9a, + 0xf4, 0x43, 0xe5, 0x60, 0x3d, 0xba, 0x00, 0xbf, 0x77, 0x39, 0x2b, 0x20, 0xf7, 0xbe, 0xe6, 0xdf, + 0xcd, 0x10, 0xe8, 0x2e, 0xd3, 0x84, 0x63, 0x18, 0xa4, 0x9f, 0xfc, 0x01, 0xcd, 0x5d, 0x82, 0x1e, + 0xf8, 0x78, 0xdc, 0xde, 0x34, 0x98, 0xee, 0xd8, 0xef, 0x90, 0x44, 0x89, 0x92, 0xa6, 0xc3, 0x24, + 0x4a, 0xc3, 0x48, 0xdd, 0x88, 0x07, 0x12, 0x7a, 0x6b, 0x2e, 0x17, 0x81, 0x82, 0xbf, 0xf9, 0x16, + 0x0e, 0xfd, 0x2c, 0xe6, 0xbd, 0xc1, 0xf6, 0x74, 0x9c, 0xd6, 0x93, 0xad, 0xd5, 0xaa, 0x4b, 0x4e, + 0xbb, 0x14, 0xc1, 0x2d, 0xc7, 0x13, 0x1f, 0xcc, 0x24, 0xdd, 0x8b, 0x23, 0xeb, 0x28, 0x8d, 0x5d, + 0x2c, 0x17, 0xc3, 0x75, 0x26, 0x73, 0xcc, 0xb0, 0x72, 0x18, 0x92, 0x11, 0x6e, 0xa2, 0x41, 0xc3, + 0x9d, 0xae, 0x00, 0x94, 0xe4, 0xe7, 0x84, 0x64, 0x89, 0x39, 0x1a, 0xd9, 0x9d, 0xe6, 0xd5, 0xaf, + 0x7f, 0xdd, 0xee, 0x0e, 0xed, 0x5c, 0x08, 0x7c, 0x2f, 0xb0, 0x62, 0x13, 0xe3, 0x06, 0xd8, 0x1c, + 0x15, 0xe5, 0xf9, 0xc6, 0xa9, 0x48, 0x0e, 0x44, 0x07, 0x24, 0xca, 0xcf, 0x93, 0x6b, 0xdf, 0x97, + 0xd7, 0x94, 0xa4, 0xa6, 0x2e, 0xd9, 0x79, 0x71, 0xd9, 0xff, 0x2c, 0xbb, 0xeb, 0xa3, 0xa2, 0x2f, + 0x3e, 0xa7, 0xbc, 0x42, 0xa5, 0x76, 0x9f, 0x98, 0x60, 0x9b, 0x1a, 0xfa, 0x40, 0x22, 0x95, 0x24, + 0x0a, 0xe6, 0x99, 0xea, 0xf8, 0x31, 0xd4, 0x75, 0x8f, 0xe2, 0xb9, 0x96, 0x57, 0x4d, 0x5e, 0x2e, + 0x09, 0xe8, 0xd2, 0xcb, 0x39, 0xe9, 0x18, 0xf7, 0x17, 0x88, 0x71, 0x2d, 0x0f, 0x39, 0x4a, 0x61, + 0xf0, 0xec, 0xf2, 0xfe, 0xb5, 0x4f, 0xbc, 0x19, 0x4b, 0x8d, 0x70, 0xde, 0x70, 0x1e, 0xe7, 0x9f, + 0xfe, 0x44, 0xa9, 0x22, 0x21, 0x6c, 0x6f, 0xd7, 0xce, 0xc8, 0xa4, 0x98, 0x2b, 0xe7, 0x54, 0xd3, + 0xbe, 0xb8, 0xd0, 0x42, 0x2b, 0x7f, 0x35, 0x42, 0xa3, 0x86, 0xee, 0x5d, 0x9b, 0xe7, 0x8a, 0x53, + 0x6b, 0xdf, 0xde, 0xf7, 0x8b, 0xb0, 0xcd, 0x13, 0x5a, 0x66, 0x08, 0x40, 0x44, 0xc1, 0x8f, 0x41, + 0x7c, 0xa5, 0xda, 0x93, 0x24, 0xe5, 0x78, 0x76, 0x18, 0x1c, 0x13, 0x31, 0x0d, 0x57, 0x39, 0x32, + 0x6a, 0x91, 0x5e, 0x5e, 0xee, 0x10, 0x1e, 0x75, 0xe7, 0x38, 0x15, 0x64, 0x70, 0xab, 0x95, 0x2d, + 0x83, 0xe1, 0x94, 0xca, 0x8f, 0x5e, 0xfc, 0xdb, 0xdf, 0x92, 0x92, 0xdd, 0xac, 0x28, 0x5c, 0x04, + 0x6e, 0xe9, 0x0d, 0x8e, 0x13, 0x56, 0xcc, 0xea, 0x1f, 0x61, 0x8b, 0xa2, 0xa6, 0x02, 0xf3, 0x36, + 0x5d, 0x79, 0x97, 0xd1, 0x3f, 0x59, 0x4d, 0x38, 0xaa, 0x27, 0x2a, 0xc1, 0x27, 0x07, 0x99, 0x5f, + 0x0b, 0xdb, 0x2f, 0xa5, 0xe1, 0x61, 0xe3, 0x0c, 0xa8, 0xcd, 0x06, 0x83, 0x0b, 0x03, 0xb2, 0xa2, + 0xc8, 0x07, 0xef, 0x8e, 0x3e, 0xbb, 0x21, 0x30, 0xd0, 0x66, 0x67, 0x56, 0x8c, 0x0a, 0x77, 0xc8, + 0xce, 0x22, 0x40, 0xc4, 0xaf, 0xe0, 0x27, 0x8a, 0x7d, 0xe8, 0x2e, 0x8e, 0x73, 0x87, 0x28, 0xfb, + 0xdc, 0xa9, 0x33, 0x63, 0x5a, 0xa1, 0x2b, 0x43, 0x78, 0x96, 0xa8, 0x48, 0xcb, 0x23, 0x08, 0x58, + 0x47, 0x31, 0xd9, 0xa1, 0xe9, 0xdf, 0x46, 0xb4, 0x35, 0xfa, 0x62, 0xa3, 0xd8, 0x3c, 0xab, 0xa6, + 0x07, 0xec, 0x60, 0xa5, 0x7b, 0xd3, 0xae, 0x0a, 0x70, 0x77, 0xac, 0x3c, 0xc4, 0x29, 0xed, 0x4f, + 0xb9, 0x3e, 0x89, 0x10, 0xa4, 0x2c, 0x3e, 0x1b, 0x78, 0x49, 0x04, 0x38, 0x15, 0xa2, 0xd7, 0x0e, + 0xff, 0x2e, 0x27, 0xbc, 0x47, 0xf9, 0x0d, 0xd8, 0xf8, 0xf9, 0xfb, 0x35, 0x3e, 0x59, 0x7d, 0x3c, + 0x7d, 0x8f, 0xb1, 0x33, 0x7d, 0xd3, 0xed, 0xd7, 0xd5, 0x9b, 0xc5, 0xfc, 0xba, 0xae, 0xe2, 0xfe, + 0x08, 0xee, 0x31, 0xc8, 0x23, 0xae, 0x69, 0xf1, 0x20, 0xaa, 0x95, 0xaa, 0x8e, 0x90, 0xfb, 0xd2, + 0x93, 0x38, 0xfa, 0x16, 0x81, 0xcf, 0xa7, 0x17, 0x1b, 0xa1, 0x48, 0x6b, 0x14, 0x33, 0x05, 0x06, + 0xa9, 0xc1, 0xdd, 0xcb, 0x35, 0x6b, 0xb2, 0x44, 0xc3, 0x43, 0x94, 0xf5, 0xe8, 0x64, 0x9a, 0xf5, + 0x64, 0x06, 0xec, 0xab, 0xbe, 0x68, 0x63, 0xde, 0x53, 0xd0, 0x8d, 0x0a, 0x2b, 0x46, 0x19, 0x79, + 0x72, 0xcb, 0xbd, 0xf7, 0x86, 0x54, 0xef, 0xdf, 0xe4, 0x28, 0x4a, 0x06, 0x13, 0x1c, 0x2f, 0x86, + 0xd9, 0x0a, 0x75, 0xc5, 0x07, 0x0f, 0xf3, 0x15, 0x71, 0x18, 0x27, 0x54, 0xc2, 0x3e, 0x8e, 0xef, + 0x49, 0x4b, 0x35, 0x47, 0x0a, 0xca, 0x6d, 0x46, 0x98, 0x25, 0x64, 0x38, 0x26, 0xf7, 0x64, 0x3e, + 0xa6, 0x38, 0x69, 0x5a, 0x99, 0xac, 0xdd, 0x9f, 0x4c, 0x15, 0xad, 0x91, 0x14, 0xfd, 0x31, 0xff, + 0x98, 0xbd, 0x23, 0x94, 0x13, 0x45, 0xdd, 0xd8, 0x66, 0x8b, 0x75, 0x63, 0x8b, 0x14, 0x52, 0xa7, + 0xdc, 0x02, 0x32, 0x5c, 0x07, 0x69, 0xc9, 0xb5, 0xef, 0x7a, 0x14, 0x01, 0xbb, 0x70, 0xd7, 0x99, + 0x0e, 0x34, 0x30, 0x5c, 0x7e, 0x50, 0x73, 0x20, 0xde, 0x4e, 0x19, 0x75, 0x57, 0xa0, 0x38, 0xdd, + 0x5d, 0x57, 0x4c, 0x2d, 0xb1, 0x06, 0x00, 0xba, 0x45, 0xb4, 0xbd, 0x49, 0x40, 0x18, 0x4e, 0xe4, + 0xb1, 0x62, 0x66, 0x20, 0x78, 0xae, 0xad, 0x56, 0xce, 0xd2, 0x6d, 0x13, 0x3b, 0x7f, 0x95, 0x33, + 0xb0, 0xb1, 0x53, 0x74, 0x67, 0x19, 0xe4, 0x4f, 0xac, 0x95, 0xde, 0x83, 0x1f, 0xcd, 0x9f, 0x0e, + 0xb8, 0x05, 0x6e, 0xbe, 0x33, 0xef, 0x37, 0x68, 0xee, 0x28, 0xd0, 0xaa, 0xfb, 0x2c, 0x7d, 0xb6, + 0xd1, 0xa3, 0x24, 0x67, 0xbb, 0x75, 0x10, 0x61, 0xa7, 0x53, 0x32, 0xc3, 0x76, 0xca, 0xb2, 0xcd, + 0x76, 0x41, 0x31, 0x67, 0x56, 0xfa, 0xaa, 0x98, 0x60, 0xec, 0xd3, 0x06, 0xae, 0x57, 0xff, 0x52, + 0xac, 0x43, 0x9e, 0xac, 0xca, 0x2c, 0x0b, 0x28, 0x7b, 0x71, 0x72, 0x1f, 0x31, 0xc1, 0x7a, 0x94, + 0xd6, 0x27, 0xe8, 0x66, 0x22, 0xbf, 0xcf, 0xb4, 0x30, 0x41, 0x72, 0x3c, 0xc0, 0x27, 0x56, 0xfe, + 0x3e, 0x20, 0xc8, 0xde, 0x94, 0x55, 0x51, 0xfa, 0x58, 0x82, 0x49, 0x24, 0xb7, 0xa2, 0x08, 0xce, + 0x78, 0xe2, 0x67, 0xff, 0x5c, 0xf1, 0xba, 0x07, 0x4c, 0x4b, 0x64, 0x07, 0xfe, 0x89, 0x4f, 0xd7, + 0x2b, 0x60, 0x2e, 0x6c, 0xbd, 0x70, 0x19, 0xd1, 0x12, 0x76, 0x28, 0x0d, 0x70, 0xf4, 0xc3, 0x00, + 0x99, 0x11, 0xf2, 0x98, 0x30, 0x36, 0x19, 0x5d, 0x2b, 0xcd, 0x4f, 0x92, 0xbe, 0x7b, 0x05, 0x5a, + 0x8d, 0x59, 0x62, 0xb3, 0x80, 0x48, 0x1c, 0xd1, 0x3f, 0x10, 0xba, 0x39, 0xd1, 0xfe, 0x1a, 0xaf, + 0x95, 0x5f, 0x2a, 0x29, 0x2e, 0x96, 0x2e, 0x2f, 0xf3, 0xfd, 0x44, 0xe0, 0x9f, 0xfe, 0xf3, 0xed, + 0x66, 0xdf, 0xeb, 0x9c, 0xb1, 0x12, 0x4a, 0xef, 0x89, 0xdb, 0x29, 0xb5, 0x25, 0xf6, 0x65, 0x7d, + 0x5d, 0x43, 0xf5, 0x29, 0x2a, 0x7e, 0x51, 0xfe, 0x87, 0x1d, 0xb5, 0xdf, 0x7a, 0x23, 0xe0, 0x2f, + 0x6b, 0xbf, 0xbb, 0x85, 0xe5, 0x59, 0x6c, 0x26, 0x77, 0x45, 0xd7, 0xf3, 0x07, 0xf6, 0x9f, 0xd9, + 0xde, 0xee, 0x73, 0x25, 0xd8, 0xdf, 0x9b, 0xb6, 0x37, 0x4c, 0xe1, 0xd6, 0x40, 0xba, 0x8c, 0x3c, + 0x13, 0xdf, 0x8f, 0x1f, 0x8f, 0xf4, 0xa5, 0xc6, 0x51, 0x85, 0x28, 0x50, 0x19, 0x19, 0xcb, 0x7d, + 0xbb, 0xa5, 0x9a, 0x18, 0x45, 0x44, 0x0d, 0xb4, 0xd0, 0xd2, 0x13, 0x9d, 0x53, 0xe7, 0x75, 0x90, + 0x71, 0xaf, 0x5a, 0xb4, 0x3f, 0x8d, 0x45, 0xf1, 0xba, 0x1b, 0x3a, 0xe9, 0x5f, 0x1a, 0xfa, 0x8b, + 0xc3, 0x22, 0x64, 0x37, 0x29, 0xe0, 0xba, 0x77, 0x09, 0x3e, 0x34, 0x1e, 0x55, 0x65, 0xe8, 0x03, + 0xe0, 0x9c, 0x28, 0xf7, 0xc4, 0x2c, 0xd8, 0x29, 0x33, 0xb8, 0x65, 0x46, 0xf2, 0x60, 0xc0, 0x2e, + 0x8e, 0x2f, 0x50, 0x86, 0xa3, 0xf0, 0x5e, 0x6e, 0x72, 0x63, 0x4e, 0x5b, 0xab, 0x57, 0xd5, 0x81, + 0x69, 0x59, 0x95, 0x86, 0x9f, 0x62, 0xd3, 0xb9, 0xa5, 0x54, 0x06, 0x73, 0x50, 0x91, 0x9c, 0x01, + 0x19, 0xa9, 0xca, 0x7b, 0x6f, 0x66, 0xe1, 0xfc, 0x97, 0x84, 0x58, 0x15, 0x45, 0x69, 0x7b, 0xd2, + 0xd1, 0xf8, 0xd1, 0x7d, 0xbe, 0x13, 0x43, 0xd6, 0x4e, 0x62, 0x48, 0x66, 0x7f, 0x3a, 0x89, 0x45, + 0xdb, 0x76, 0xdc, 0x3a, 0xc5, 0x01, 0x1c, 0xe7, 0x76, 0x37, 0xee, 0x79, 0x8a, 0x00, 0xc8, 0x15, + 0xc0, 0x1f, 0x95, 0xcf, 0x22, 0x5c, 0x0f, 0xe7, 0x75, 0x6b, 0xa9, 0x20, 0x5a, 0xdc, 0x97, 0xe6, + 0x19, 0x9d, 0xae, 0x0f, 0x80, 0x6f, 0x87, 0x53, 0x88, 0xe1, 0xe9, 0xcc, 0x51, 0xf9, 0x05, 0x29, + 0xde, 0x9f, 0x8a, 0x3e, 0x64, 0x65, 0x80, 0x69, 0x08, 0x58, 0xfc, 0x85, 0x12, 0x8a, 0x9e, 0xad, + 0xb1, 0xe5, 0x9d, 0xee, 0x00, 0x37, 0x95, 0x34, 0x1e, 0x6a, 0xc0, 0xc0, 0x86, 0xee, 0x67, 0x30, + 0x38, 0x3a, 0x67, 0x61, 0x6e, 0x01, 0xd7, 0xd5, 0xea, 0x9a, 0xda, 0x97, 0x41, 0xef, 0x10, 0x4b, + 0xe5, 0x12, 0xe4, 0xf4, 0x53, 0x95, 0x97, 0x64, 0x23, 0xea, 0x61, 0x63, 0xd1, 0xcb, 0x59, 0x9f, + 0x96, 0x0c, 0x69, 0x40, 0xa1, 0x48, 0x7d, 0x75, 0x10, 0x4c, 0xff, 0x5a, 0xd1, 0xf2, 0xf9, 0x55, + 0xf4, 0x33, 0x17, 0xbc, 0x2a, 0x57, 0x88, 0x71, 0x5c, 0x94, 0xaf, 0x4f, 0x84, 0xdd, 0x2d, 0xda, + 0x3c, 0x79, 0xe8, 0x31, 0xf3, 0xb7, 0xda, 0x85, 0xa4, 0x9c, 0xd2, 0x36, 0xaf, 0x01, 0x17, 0x07, + 0x8a, 0x6c, 0x7f, 0x13, 0xe0, 0x4d, 0xd9, 0x4b, 0x7e, 0xa3, 0x59, 0x8e, 0xa8, 0x91, 0x89, 0x40, + 0x6c, 0xe1, 0x90, 0x45, 0xff, 0x8c, 0x5e, 0x49, 0x7a, 0x80, 0xfb, 0x19, 0x24, 0x27, 0xb3, 0x9f, + 0x05, 0x07, 0x46, 0x07, 0x6c, 0x90, 0x6a, 0xc9, 0xdc, 0x0b, 0xd7, 0x77, 0x57, 0x1a, 0xc0, 0xfa, + 0x67, 0xea, 0xaa, 0x45, 0xeb, 0xd3, 0x9c, 0x67, 0xd5, 0xa7, 0x82, 0x56, 0xa4, 0x44, 0x71, 0xfe, + 0x77, 0xfe, 0x53, 0xca, 0x5f, 0xa8, 0x01, 0xc4, 0xb1, 0x15, 0x92, 0xe2, 0x43, 0x9b, 0x94, 0x27, + 0x5e, 0xc7, 0x36, 0xe0, 0x4a, 0xcf, 0xfd, 0xc6, 0x99, 0x43, 0x9d, 0xf3, 0x23, 0xb9, 0x0d, 0xb8, + 0xb2, 0x75, 0xe2, 0x45, 0xe8, 0xf7, 0xae, 0x7d, 0xc2, 0x11, 0x1e, 0x2f, 0x69, 0x14, 0x8c, 0xf0, + 0xc6, 0xad, 0x6a, 0xda, 0x63, 0x79, 0x04, 0x5f, 0x52, 0xc1, 0x0e, 0x15, 0xe8, 0x77, 0x8d, 0x7a, + 0x1d, 0x71, 0xf3, 0x02, 0x8c, 0x0a, 0x63, 0xb4, 0xfe, 0x29, 0xed, 0x5f, 0xf9, 0x82, 0xac, 0xc9, + 0xf6, 0xd7, 0x1d, 0x8b, 0x30, 0x6f, 0x59, 0x70, 0xb3, 0xea, 0x11, 0xb6, 0xf6, 0x46, 0x5a, 0xc7, + 0x0b, 0x5b, 0xf1, 0xb1, 0x44, 0x20, 0xcc, 0x94, 0x6c, 0x30, 0x3c, 0x1c, 0x8f, 0xb2, 0x90, 0x4e, + 0xfd, 0xbc, 0x2e, 0x78, 0xc0, 0xc5, 0x17, 0xf2, 0xc5, 0x40, 0x38, 0xd3, 0xc5, 0x1c, 0x49, 0x9b, + 0xee, 0xea, 0x66, 0xf2, 0x3b, 0x23, 0xff, 0x45, 0xab, 0x36, 0x37, 0x83, 0xc5, 0x9f, 0x66, 0x9e, + 0xe5, 0x69, 0xeb, 0x45, 0xf8, 0xac, 0xa5, 0x4d, 0x64, 0xd9, 0xde, 0x5d, 0x2a, 0xbb, 0xd7, 0x91, + 0x36, 0x31, 0x47, 0x31, 0x61, 0xad, 0x0c, 0x4d, 0xcd, 0x08, 0x31, 0xc3, 0x3b, 0x73, 0x79, 0x82, + 0x5c, 0x62, 0xe8, 0x25, 0x68, 0x49, 0x04, 0x89, 0x1b, 0x13, 0xb3, 0x75, 0x17, 0xda, 0x15, 0x8d, + 0xba, 0xb2, 0xf6, 0x6d, 0xbe, 0xd4, 0xa4, 0x81, 0x5e, 0x54, 0x2b, 0x0b, 0x9b, 0x9e, 0x38, 0x93, + 0x5c, 0xb2, 0xf6, 0x2c, 0x05, 0x77, 0x6d, 0x79, 0xf5, 0x52, 0xb0, 0xde, 0x30, 0x2f, 0xdb, 0x86, + 0x37, 0x73, 0x0e, 0xcb, 0xd9, 0xbb, 0xc9, 0x1a, 0x67, 0x91, 0x40, 0x36, 0x6d, 0xe6, 0x9a, 0x38, + 0xa3, 0x06, 0x3a, 0x78, 0xb9, 0xab, 0x86, 0xae, 0x6b, 0x37, 0xb5, 0x2b, 0xde, 0x19, 0xe0, 0x36, + 0x1e, 0x51, 0xb7, 0x94, 0xae, 0x5f, 0x94, 0x75, 0xd2, 0x2d, 0x5a, 0xf9, 0xe4, 0x77, 0x81, 0x76, + 0x8c, 0x60, 0x73, 0x26, 0xf7, 0x40, 0x6c, 0xaa, 0xca, 0x85, 0x22, 0x43, 0x76, 0x3d, 0x4e, 0x9c, + 0x9f, 0xcb, 0x6d, 0x6f, 0x36, 0x8c, 0xc6, 0x47, 0x93, 0x8e, 0xa9, 0xf0, 0xbc, 0x6e, 0x25, 0x0d, + 0x6c, 0xea, 0x56, 0xb2, 0xa7, 0xdf, 0xbb, 0x60, 0x6e, 0x62, 0xce, 0xf1, 0x49, 0x1a, 0xa5, 0x3f, + 0x65, 0x41, 0xe1, 0x1e, 0x8d, 0x84, 0x95, 0xed, 0x8c, 0xb9, 0x87, 0x11, 0x1f, 0x9f, 0x5d, 0xa8, + 0xe5, 0x5b, 0x7b, 0x3e, 0xea, 0x87, 0x41, 0x9a, 0xaf, 0xfc, 0x5f, 0x20, 0xd4, 0xa1, 0x1f, 0xfb, + 0xe9, 0xb7, 0x25, 0xa5, 0xa2, 0xe6, 0x0b, 0x8d, 0xa6, 0xa0, 0xa4, 0x13, 0xa4, 0x2c, 0x28, 0x43, + 0x8b, 0xbc, 0xe4, 0x05, 0x85, 0x19, 0x3b, 0xee, 0x98, 0x5f, 0x2e, 0x25, 0xd6, 0xde, 0xd4, 0x18, + 0x19, 0xdb, 0xfe, 0x3f, 0x9c, 0x81, 0x34, 0x6e, 0xb3, 0xf6, 0x1b, 0x85, 0x09, 0xd9, 0xc5, 0x7a, + 0xa7, 0x59, 0xba, 0xec, 0xb7, 0x39, 0xf6, 0x55, 0x95, 0x52, 0x34, 0x6f, 0xaf, 0x5f, 0xda, 0x0f, + 0x07, 0xda, 0x50, 0x8c, 0x46, 0x46, 0x6d, 0x17, 0x9b, 0xb3, 0x6f, 0x7a, 0x4d, 0x09, 0xce, 0x89, + 0x4d, 0x6b, 0xaf, 0x99, 0x36, 0x22, 0xfb, 0xe9, 0x85, 0xcc, 0x13, 0x02, 0xf2, 0x46, 0x1a, 0x35, + 0x57, 0xa1, 0xed, 0x1d, 0x68, 0x77, 0x29, 0x62, 0x04, 0xd0, 0xd0, 0xcb, 0xcb, 0x9e, 0x8e, 0xe4, + 0xcd, 0x7d, 0xe9, 0x6a, 0xbc, 0x72, 0xdc, 0xc2, 0x25, 0xce, 0x19, 0x31, 0xf0, 0xdf, 0x39, 0xfc, + 0xcb, 0x5d, 0xdd, 0x4c, 0x39, 0x7f, 0x2f, 0xce, 0x48, 0x72, 0x4c, 0xb0, 0xe8, 0xf5, 0x11, 0xa0, + 0x43, 0xfa, 0xf0, 0x64, 0x24, 0xe7, 0xef, 0x05, 0x6d, 0x7b, 0xc4, 0x25, 0xe8, 0x0a, 0xa0, 0xe7, + 0x49, 0x93, 0x93, 0x2e, 0xca, 0x3d, 0x0f, 0xf6, 0x7a, 0x37, 0x87, 0xdf, 0xa1, 0x98, 0x54, 0x1b, + 0xdb, 0x45, 0xec, 0x4c, 0x02, 0x32, 0xdb, 0x4d, 0x66, 0x44, 0x25, 0x27, 0x4e, 0x52, 0x88, 0x0a, + 0x49, 0x6a, 0x24, 0xa6, 0x9b, 0xc4, 0xa0, 0xbc, 0x5e, 0x8b, 0xfe, 0x7c, 0xa0, 0xe1, 0x75, 0x1f, + 0x42, 0x50, 0x15, 0x2f, 0xa6, 0xc8, 0xc8, 0x94, 0xa3, 0xf4, 0x3c, 0xdd, 0x63, 0x16, 0x02, 0xe3, + 0x30, 0x57, 0xd0, 0xc3, 0x3e, 0x6f, 0xb2, 0x1c, 0x3e, 0x01, 0x41, 0x5b, 0x1e, 0x5c, 0x6b, 0x1d, + 0xad, 0xaa, 0x2a, 0xfa, 0x89, 0x81, 0x21, 0x05, 0x80, 0x93, 0x1d, 0x3a, 0xda, 0x01, 0x6d, 0x2d, + 0x1c, 0xb0, 0x96, 0xc7, 0x35, 0xe2, 0x89, 0x4e, 0xd6, 0x17, 0x9f, 0xe1, 0x21, 0xf2, 0x9d, 0xe5, + 0x0b, 0x18, 0x8a, 0xa8, 0xc4, 0xb9, 0x89, 0x8f, 0x60, 0xc3, 0xe9, 0xb8, 0x32, 0x40, 0x92, 0x39, + 0x14, 0x5d, 0x32, 0x5d, 0xe7, 0xad, 0x28, 0xa3, 0xd4, 0x45, 0x81, 0xe4, 0x2c, 0x8b, 0x1c, 0x61, + 0xa3, 0x50, 0xc2, 0xde, 0x46, 0xc7, 0x0f, 0x79, 0x8f, 0x4f, 0x1e, 0xa5, 0xea, 0xd3, 0x27, 0xab, + 0xc7, 0x16, 0x16, 0xcc, 0x47, 0xc9, 0x83, 0xef, 0x30, 0xf5, 0xf6, 0x88, 0x26, 0x2c, 0x32, 0x43, + 0x6b, 0x55, 0xd7, 0x14, 0x5a, 0xe2, 0x9d, 0x16, 0x80, 0x0b, 0x55, 0xba, 0x77, 0xc7, 0x46, 0x3f, + 0xf2, 0x4c, 0x04, 0x87, 0xf3, 0x18, 0xe4, 0xdb, 0xb0, 0x0c, 0x01, 0x24, 0x88, 0xeb, 0x10, 0x53, + 0x9e, 0xf8, 0xa1, 0x1c, 0xbb, 0xec, 0x24, 0xa3, 0x25, 0x26, 0x37, 0x36, 0xb8, 0xb4, 0x25, 0x03, + 0x76, 0xe6, 0x41, 0x2e, 0x9e, 0x64, 0xf0, 0x23, 0xac, 0xce, 0xe6, 0xd2, 0xb1, 0x44, 0x74, 0xc8, + 0xa8, 0x57, 0x87, 0x50, 0xc9, 0xe3, 0x62, 0xd3, 0x54, 0x9f, 0x83, 0xd5, 0xbc, 0xca, 0x92, 0x01, + 0xd3, 0xbb, 0xd6, 0xc5, 0xc6, 0xfe, 0x35, 0xb9, 0x1f, 0xfd, 0xe8, 0x5b, 0x7a, 0x75, 0x43, 0x98, + 0x6a, 0x60, 0x61, 0xc6, 0xb0, 0x44, 0x5b, 0xe2, 0xf0, 0xe1, 0x9f, 0x44, 0xb7, 0x6d, 0xfd, 0x70, + 0x4f, 0x8c, 0xf6, 0x90, 0x88, 0xfe, 0x99, 0xf5, 0x8d, 0xb9, 0xc0, 0x5b, 0x60, 0x84, 0x10, 0xfa, + 0xfb, 0x67, 0xaf, 0x0b, 0xf6, 0x16, 0x2e, 0x7b, 0xc0, 0x24, 0xa5, 0xdf, 0xa0, 0xac, 0x80, 0x7a, + 0xf9, 0x74, 0x38, 0x64, 0x58, 0x60, 0xe2, 0x60, 0x3e, 0x93, 0xb6, 0x22, 0xbf, 0xdc, 0x16, 0x4b, + 0xcf, 0x80, 0x73, 0xca, 0x1d, 0x0a, 0xbb, 0x1f, 0xdd, 0x6d, 0xd0, 0x4c, 0xe4, 0x8c, 0x1f, 0x6d, + 0x6d, 0xa4, 0xd9, 0xe9, 0xda, 0x63, 0xcf, 0x98, 0x9c, 0x7e, 0x41, 0xa8, 0x3a, 0x2d, 0xa0, 0x21, + 0x28, 0x71, 0x11, 0xb2, 0x66, 0x3e, 0x03, 0xc7, 0xed, 0x36, 0x30, 0x6e, 0x02, 0xd9, 0x5e, 0x8d, + 0xd9, 0x09, 0x2c, 0x6c, 0x4b, 0x59, 0x81, 0x6a, 0xb3, 0x95, 0x73, 0x2e, 0x32, 0xd3, 0x65, 0xeb, + 0x0e, 0xce, 0x1f, 0x13, 0x98, 0x70, 0x4d, 0x63, 0xa3, 0x1f, 0x64, 0x73, 0x6f, 0x4b, 0x61, 0xd8, + 0x38, 0x92, 0x0f, 0xfa, 0xce, 0x83, 0xca, 0x5f, 0x87, 0x1e, 0x4f, 0x5f, 0xad, 0x50, 0x93, 0xb0, + 0x86, 0x4c, 0xe9, 0x42, 0x87, 0x4a, 0xef, 0xca, 0xca, 0x38, 0xae, 0xe9, 0xc6, 0xa7, 0x23, 0xea, + 0x6b, 0xc8, 0x58, 0xea, 0xae, 0xea, 0x21, 0x7f, 0xad, 0x3d, 0xee, 0xb1, 0x7d, 0x0b, 0x1e, 0xda, + 0x2a, 0x94, 0x28, 0x9f, 0x9a, 0x4b, 0xd7, 0xeb, 0xc7, 0xeb, 0x00, 0x92, 0x2a, 0xa4, 0x82, 0x91, + 0x80, 0xec, 0x60, 0x8c, 0xf4, 0x63, 0xfa, 0x97, 0x9d, 0x0e, 0x90, 0x05, 0xb1, 0xbb, 0x9f, 0x62, + 0x28, 0x7c, 0x20, 0xa7, 0xc9, 0xb1, 0x72, 0xfa, 0x28, 0xde, 0x71, 0x1d, 0x02, 0x6b, 0xff, 0xe2, + 0xec, 0x18, 0xe6, 0xf8, 0xdb, 0x9b, 0x2c, 0x98, 0xf5, 0x1d, 0x7a, 0x27, 0xa6, 0x19, 0x98, 0xc9, + 0x29, 0xbb, 0x05, 0xf1, 0x15, 0x79, 0x1b, 0xad, 0xc6, 0xc7, 0xdd, 0x8b, 0xda, 0xdc, 0x80, 0x31, + 0xe3, 0x6d, 0xca, 0x35, 0x02, 0x7b, 0x44, 0x01, 0x3f, 0xde, 0x92, 0xb0, 0xae, 0x8b, 0xa0, 0xe3, + 0x4e, 0xfb, 0x78, 0x1b, 0xc4, 0x5c, 0xd0, 0xba, 0x91, 0xb0, 0x34, 0x1a, 0x3c, 0x9a, 0x0f, 0xd0, + 0xa8, 0x10, 0x90, 0xe2, 0xf1, 0x4b, 0x4e, 0xd6, 0xb7, 0x4d, 0xeb, 0x12, 0x22, 0x58, 0xf5, 0x57, + 0x9c, 0x1d, 0xc4, 0xe6, 0xbf, 0xf3, 0x79, 0x86, 0x2f, 0xef, 0xc4, 0xe9, 0xc2, 0x32, 0x79, 0x97, + 0xa1, 0x0c, 0x3c, 0x70, 0x89, 0x72, 0x22, 0x0e, 0xf8, 0x74, 0x63, 0xa9, 0x4f, 0x88, 0x0c, 0x8a, + 0xfa, 0xe0, 0x48, 0xc1, 0xb6, 0x60, 0x99, 0xd5, 0xe9, 0xa9, 0x5d, 0xfa, 0xfa, 0x29, 0xa7, 0xb6, + 0xb7, 0x05, 0x0b, 0xb7, 0x2b, 0xf6, 0x68, 0x2a, 0x2a, 0x16, 0xcc, 0x10, 0xb0, 0x02, 0x5f, 0x76, + 0x89, 0x3e, 0x91, 0x68, 0xd6, 0xc4, 0x01, 0x54, 0xb7, 0xf1, 0x5b, 0xc1, 0xbd, 0x23, 0x14, 0xe1, + 0x69, 0xaf, 0x18, 0x01, 0x43, 0xec, 0x53, 0xfe, 0xdb, 0xff, 0x58, 0x10, 0x54, 0xed, 0xc0, 0x1b, + 0xc8, 0x1c, 0xf5, 0x1d, 0x3f, 0x74, 0xb3, 0xe8, 0xcb, 0x5d, 0xb7, 0x55, 0xf1, 0x6a, 0xed, 0x83, + 0xa5, 0x2c, 0x51, 0x68, 0xed, 0x2f, 0xb0, 0x7c, 0x8d, 0xde, 0x9e, 0x7b, 0xf7, 0xa9, 0x38, 0x6c, + 0x58, 0x14, 0xa7, 0xc2, 0x97, 0x2f, 0xd0, 0x3c, 0xc6, 0x9c, 0xb8, 0xe4, 0x05, 0x14, 0xcc, 0x01, + 0xad, 0xec, 0x42, 0x5a, 0xf4, 0x8c, 0x40, 0xf9, 0xd2, 0x05, 0x62, 0x17, 0xa4, 0x22, 0xca, 0x41, + 0x64, 0x70, 0x70, 0x12, 0x7d, 0xb9, 0xee, 0xff, 0x92, 0xc4, 0x71, 0xd5, 0x04, 0x53, 0x23, 0x43, + 0xf4, 0x2a, 0x92, 0x4a, 0xbe, 0x76, 0x9a, 0xff, 0x46, 0x5d, 0x73, 0x87, 0x33, 0xbb, 0x48, 0xb2, + 0xec, 0xee, 0x42, 0x30, 0x5e, 0x55, 0x1e, 0xd2, 0xcd, 0xf7, 0x75, 0xce, 0xe3, 0x35, 0xd6, 0x3b, + 0x91, 0x8a, 0xc8, 0x36, 0xa9, 0xd1, 0x82, 0x78, 0x74, 0x41, 0x80, 0x34, 0xd2, 0x17, 0xa0, 0x72, + 0x7c, 0x06, 0x68, 0x24, 0xf2, 0x38, 0xaf, 0xcc, 0xb8, 0xcc, 0x34, 0x89, 0xa0, 0x19, 0x24, 0x0c, + 0x40, 0x15, 0x12, 0xc5, 0xbd, 0x16, 0x08, 0x2c, 0x46, 0x8b, 0xd4, 0xdd, 0x10, 0xd1, 0x68, 0xba, + 0xbc, 0x34, 0x12, 0x65, 0x6c, 0x8a, 0xdd, 0x98, 0x7d, 0x73, 0x0d, 0x89, 0xe5, 0xba, 0x91, 0x21, + 0x77, 0x34, 0xf0, 0xd3, 0xaf, 0x4e, 0x45, 0xcc, 0xf7, 0x6c, 0x87, 0x01, 0x90, 0xfd, 0xfd, 0x71, + 0xb9, 0xad, 0x8e, 0x63, 0xaf, 0x8e, 0x23, 0xb4, 0xda, 0x9c, 0x9a, 0x23, 0x02, 0xa9, 0xb8, 0x90, + 0x5e, 0x65, 0xa6, 0x9e, 0x81, 0x65, 0xaf, 0x5f, 0x7b, 0xa4, 0x70, 0xa7, 0x0b, 0x49, 0xab, 0x34, + 0x11, 0x5b, 0xac, 0xcd, 0x66, 0xef, 0x3c, 0x16, 0x00, 0xf0, 0x10, 0x73, 0xd9, 0x91, 0x58, 0x53, + 0xc7, 0xcc, 0xd5, 0x1b, 0xd7, 0x0a, 0x8f, 0x69, 0xb1, 0xf3, 0x3a, 0x1b, 0x82, 0x1a, 0xd9, 0x96, + 0x29, 0x41, 0xc9, 0xd4, 0x45, 0x78, 0x46, 0x6d, 0xe1, 0x19, 0x90, 0xcd, 0x3a, 0x24, 0x59, 0x5e, + 0xe8, 0xb6, 0x75, 0x7e, 0x44, 0xba, 0xe2, 0x5f, 0xcb, 0x42, 0xb7, 0x3c, 0x1e, 0x9a, 0x6f, 0xbf, + 0x66, 0xe2, 0xb1, 0xb2, 0xc0, 0xbb, 0x40, 0xf6, 0xb0, 0xff, 0xdc, 0x7b, 0x82, 0x39, 0x09, 0x8c, + 0x40, 0x19, 0x64, 0x92, 0x75, 0xcd, 0xdc, 0xd0, 0xd3, 0xfe, 0x4a, 0xb0, 0x11, 0x34, 0xa4, 0x47, + 0x8d, 0xec, 0x55, 0x95, 0x38, 0x96, 0xb0, 0x41, 0xcf, 0x0a, 0xfd, 0xe9, 0xf1, 0x66, 0xaa, 0x73, + 0xd9, 0x94, 0x78, 0x5c, 0xa1, 0xf6, 0x6d, 0xb4, 0x25, 0x1b, 0xd3, 0xb6, 0xb9, 0x33, 0x2d, 0x76, + 0x9d, 0x6b, 0x23, 0xce, 0xfd, 0xe8, 0xef, 0xf5, 0x47, 0x63, 0x0a, 0x54, 0x2f, 0x45, 0x33, 0x0f, + 0x5b, 0x9f, 0xda, 0x18, 0x11, 0x91, 0xdf, 0x24, 0x61, 0x3c, 0xe7, 0x86, 0xda, 0x64, 0x15, 0xbe, + 0x06, 0xa2, 0xa7, 0xec, 0x89, 0x4a, 0xfa, 0x5a, 0x42, 0xd3, 0x36, 0x60, 0x1f, 0x56, 0x75, 0x45, + 0xda, 0xa9, 0xb1, 0xe3, 0x81, 0x75, 0x52, 0xaf, 0xf0, 0xc8, 0x55, 0xe5, 0x55, 0x5a, 0xd1, 0xb2, + 0xfa, 0x24, 0xfd, 0xcb, 0x30, 0xef, 0xcb, 0x02, 0x94, 0xe9, 0x05, 0x5e, 0x4f, 0xf4, 0x0b, 0x98, + 0x69, 0x86, 0xc3, 0x17, 0x5a, 0xfb, 0x80, 0xe7, 0xa6, 0xca, 0x19, 0xea, 0xf2, 0xdc, 0xf5, 0x07, + 0x38, 0x56, 0x82, 0x8a, 0x0d, 0xae, 0x39, 0x8d, 0xa1, 0x3b, 0xe7, 0xde, 0x0d, 0x39, 0x3c, 0x90, + 0x42, 0xce, 0x91, 0x66, 0x30, 0xea, 0x75, 0x43, 0x42, 0x3d, 0xf4, 0xb3, 0xb8, 0x98, 0x4c, 0x63, + 0xa7, 0xcd, 0x26, 0x82, 0x2e, 0xb9, 0x09, 0x4c, 0x67, 0x84, 0x60, 0xcb, 0x4b, 0xf6, 0x69, 0x57, + 0x07, 0x32, 0x9b, 0xf5, 0x17, 0x03, 0x26, 0xa6, 0x94, 0x99, 0x3c, 0x2e, 0x0e, 0xe0, 0xd4, 0xd5, + 0x06, 0x05, 0x8f, 0xcb, 0x05, 0x0c, 0x97, 0x84, 0xdd, 0x12, 0xeb, 0x70, 0xe1, 0xfe, 0x92, 0x0d, + 0x81, 0x07, 0x31, 0x3d, 0x3f, 0x58, 0x69, 0xef, 0xf2, 0x37, 0xc8, 0xa0, 0x3a, 0xa7, 0xdc, 0xa0, + 0x03, 0xc7, 0xb0, 0x3f, 0x45, 0x72, 0x28, 0xca, 0x60, 0x85, 0x7e, 0x1b, 0xa9, 0xea, 0x2c, 0x69, + 0x98, 0xcb, 0x4a, 0x8d, 0x98, 0x60, 0x42, 0xfa, 0xb9, 0x21, 0xf5, 0x72, 0x52, 0xe1, 0xb0, 0x8b, + 0x84, 0x43, 0x0c, 0x82, 0xb0, 0x1a, 0x16, 0x17, 0x1a, 0x31, 0xfa, 0x97, 0x07, 0x70, 0xb6, 0x91, + 0x7f, 0xa4, 0x20, 0xcc, 0xe5, 0x71, 0x8b, 0x3e, 0xbf, 0x21, 0xef, 0x37, 0x5b, 0xd2, 0x26, 0x5d, + 0xa0, 0xa7, 0x86, 0x6b, 0x42, 0x34, 0x01, 0xb8, 0x4a, 0x50, 0x77, 0x35, 0xd3, 0x2b, 0xd4, 0x41, + 0x00, 0x03, 0x42, 0x71, 0x0e, 0x00, 0xd6, 0x5f, 0x3b, 0x1d, 0x9c, 0xb0, 0xba, 0x95, 0x28, 0x34, + 0xd7, 0x61, 0x69, 0xf0, 0x5d, 0xf8, 0xf2, 0xc1, 0x9c, 0x9c, 0x32, 0xb5, 0xb6, 0x70, 0x64, 0xcc, + 0x7d, 0x3e, 0xe9, 0x86, 0x89, 0x32, 0xc7, 0x80, 0x96, 0x81, 0xf5, 0x6d, 0xb2, 0x0f, 0xa8, 0xe7, + 0xa2, 0x0b, 0xb7, 0x61, 0x5c, 0x52, 0x29, 0x79, 0xc8, 0x00, 0x83, 0x8a, 0xfa, 0xaa, 0x70, 0x11, + 0xb4, 0xdc, 0xde, 0x73, 0x1b, 0x86, 0xeb, 0xce, 0x11, 0xe5, 0xe3, 0x72, 0xde, 0x61, 0x83, 0xfb, + 0x73, 0x86, 0xda, 0xbc, 0xb7, 0xb0, 0xa9, 0xea, 0x6e, 0x04, 0x85, 0x42, 0x7a, 0xa0, 0x86, 0xb8, + 0xf6, 0xc7, 0xcf, 0x28, 0x26, 0xa0, 0x12, 0xb7, 0x9b, 0x30, 0xe1, 0xbf, 0xf3, 0x75, 0xbb, 0x3c, + 0x75, 0xf7, 0xf1, 0x16, 0x30, 0x21, 0x68, 0x7b, 0xe7, 0x33, 0xa7, 0x0d, 0xef, 0x53, 0x47, 0xf1, + 0xf7, 0x2d, 0x2b, 0x82, 0x65, 0x7c, 0xaf, 0x26, 0xc2, 0x66, 0xca, 0xeb, 0x77, 0x2f, 0xf2, 0x9f, + 0x71, 0x7c, 0x59, 0xf9, 0x23, 0xa9, 0xc8, 0xb2, 0x32, 0x17, 0xc9, 0x20, 0x18, 0x95, 0x8a, 0x66, + 0xd7, 0x0a, 0xdf, 0x23, 0x0e, 0x5e, 0x87, 0xcd, 0x9e, 0x0e, 0x04, 0x16, 0xc9, 0xe0, 0x3d, 0x7e, + 0x70, 0x77, 0x99, 0x9c, 0xba, 0xf0, 0x71, 0x3d, 0xad, 0x52, 0x74, 0xaa, 0xca, 0xd8, 0xe5, 0x03, + 0xe6, 0x85, 0xc4, 0x9f, 0xa8, 0x85, 0xae, 0xc0, 0xa8, 0x28, 0x4f, 0xda, 0x8b, 0xd3, 0x44, 0x8e, + 0xa5, 0xf0, 0x66, 0xbc, 0xa9, 0xaa, 0xc6, 0xdf, 0x21, 0xa1, 0x42, 0x3f, 0x5d, 0xe4, 0x77, 0xc9, + 0x63, 0xb7, 0x21, 0xef, 0x56, 0x34, 0x9e, 0x5a, 0x9d, 0x74, 0xfb, 0x17, 0x33, 0x2a, 0xff, 0x92, + 0xfc, 0x41, 0x33, 0x1c, 0x3c, 0x86, 0x21, 0x6e, 0xff, 0x68, 0x02, 0xfb, 0x8d, 0x1c, 0x3b, 0x80, + 0xbb, 0xa5, 0x4a, 0x89, 0x40, 0x3a, 0x00, 0xfc, 0x76, 0x3c, 0xa6, 0x29, 0x58, 0xdd, 0x7c, 0x03, + 0xa8, 0x9e, 0x15, 0xac, 0x35, 0x70, 0x13, 0x84, 0x06, 0xd3, 0xf9, 0xc0, 0xe7, 0x6a, 0x9e, 0x5b, + 0x2d, 0x16, 0x77, 0x5e, 0x1b, 0x66, 0xb9, 0x06, 0xcf, 0xde, 0x7a, 0x2c, 0x05, 0x2c, 0x3f, 0xf0, + 0x46, 0x89, 0x60, 0xd3, 0x20, 0x83, 0x44, 0x67, 0xbf, 0x36, 0xd6, 0xf5, 0x5b, 0x86, 0xf1, 0xad, + 0x56, 0x28, 0xd5, 0xd4, 0x61, 0x4a, 0xd0, 0xf1, 0xa6, 0xe0, 0xb6, 0x9f, 0xa3, 0x88, 0x6d, 0x01, + 0xa1, 0x2e, 0xe5, 0x81, 0x8d, 0x31, 0xcc, 0x04, 0x0a, 0x1a, 0xe8, 0x37, 0x17, 0xf6, 0x90, 0x95, + 0x5d, 0x4e, 0xce, 0xe0, 0x01, 0xee, 0x6e, 0xa5, 0x13, 0xf5, 0x4a, 0xae, 0x51, 0xb4, 0x7e, 0x51, + 0x33, 0x12, 0x70, 0x74, 0x71, 0x69, 0xf6, 0xc3, 0x13, 0x49, 0x14, 0x48, 0x4f, 0x79, 0x0e, 0x94, + 0xb5, 0x7b, 0xc8, 0x41, 0xa0, 0x97, 0x56, 0x3c, 0xcc, 0x9b, 0x75, 0xc8, 0x1e, 0x0e, 0x33, 0x40, + 0x9f, 0x5a, 0x68, 0xf9, 0xb3, 0x31, 0x89, 0xe3, 0x53, 0x7c, 0x55, 0x53, 0xb9, 0xa4, 0xe8, 0x0b, + 0x3e, 0xd1, 0xa1, 0x94, 0x4e, 0x6d, 0xd1, 0x5c, 0x97, 0x82, 0x96, 0x44, 0xd9, 0xbc, 0x65, 0x91, + 0x64, 0x3c, 0xe3, 0x70, 0x9c, 0xda, 0x94, 0x11, 0x2f, 0x71, 0x71, 0x07, 0xd6, 0x4e, 0x6f, 0x8f, + 0x07, 0x7b, 0x3a, 0x99, 0xb8, 0xcd, 0x8c, 0xae, 0xa7, 0x23, 0xaf, 0x7d, 0x4f, 0x0e, 0x63, 0x1c, + 0x77, 0x3f, 0xb4, 0xbe, 0xec, 0x14, 0xd9, 0xf5, 0x99, 0x46, 0xdd, 0x7b, 0x2a, 0x10, 0xc0, 0x45, + 0x8d, 0x4a, 0x4e, 0x8d, 0x2d, 0x90, 0xf6, 0xca, 0x21, 0xae, 0x20, 0x22, 0x99, 0x48, 0xc7, 0xf6, + 0x4b, 0xa6, 0x37, 0xb9, 0x46, 0xf1, 0xc5, 0x2f, 0x29, 0x23, 0x7c, 0x42, 0x62, 0xd0, 0x10, 0x92, + 0xf9, 0x63, 0x3c, 0x7c, 0x20, 0xf6, 0x98, 0xa1, 0xb0, 0x2f, 0xb7, 0xa8, 0x51, 0x93, 0xd3, 0x3f, + 0xa5, 0x3c, 0xd0, 0xfb, 0x7c, 0x05, 0xe5, 0xe8, 0x8c, 0xfc, 0x27, 0x33, 0x8f, 0x7f, 0xca, 0x49, + 0x6b, 0xa0, 0x33, 0x51, 0xcd, 0x57, 0x4d, 0x13, 0x28, 0xff, 0xb3, 0x66, 0x77, 0x0f, 0x07, 0xd8, + 0x07, 0x47, 0xb3, 0x7c, 0x8d, 0x04, 0xc3, 0x14, 0x57, 0x14, 0x05, 0x66, 0x03, 0x31, 0x18, 0xf1, + 0xd3, 0x62, 0x48, 0x7e, 0x72, 0x76, 0xe3, 0x74, 0x43, 0xfa, 0xb7, 0x0e, 0x79, 0x34, 0xdd, 0x72, + 0xe5, 0x9e, 0x78, 0xed, 0x02, 0x9d, 0x3e, 0x79, 0x47, 0x03, 0x78, 0xc2, 0x49, 0xa7, 0x99, 0x4f, + 0x60, 0x85, 0x91, 0xd7, 0xf2, 0xdc, 0x25, 0x6f, 0xf5, 0xb5, 0x3a, 0x5c, 0x5d, 0x7c, 0xf5, 0xc1, + 0x73, 0xca, 0x23, 0xd2, 0x4b, 0xe8, 0xd0, 0x3e, 0xc5, 0x01, 0xa7, 0x42, 0x7e, 0x18, 0x43, 0x8d, + 0x48, 0xee, 0xe9, 0xda, 0xc2, 0x83, 0x7d, 0xd4, 0x70, 0x41, 0x73, 0x03, 0x97, 0x92, 0x80, 0xf6, + 0x70, 0x14, 0x76, 0x6a, 0xf9, 0x5b, 0x76, 0x96, 0x23, 0xc2, 0xd7, 0x81, 0xe2, 0x01, 0x2c, 0xcd, + 0x56, 0x52, 0x8a, 0x96, 0x0f, 0xdc, 0xfe, 0x6f, 0xa9, 0xa5, 0xec, 0xdc, 0x79, 0x09, 0x44, 0x21, + 0x63, 0x5f, 0x23, 0x01, 0x97, 0xe3, 0xc3, 0x00, 0x9c, 0xb6, 0xde, 0x43, 0x75, 0x7b, 0x33, 0x9d, + 0x53, 0x11, 0x40, 0xd6, 0x1d, 0x59, 0xfd, 0xe3, 0x18, 0xa6, 0x72, 0x6e, 0x23, 0xba, 0x68, 0x69, + 0x02, 0x5e, 0xda, 0x29, 0x83, 0x8a, 0x67, 0x2c, 0xb2, 0xb0, 0x2d, 0x76, 0x60, 0xd7, 0x33, 0x43, + 0x55, 0x2e, 0x25, 0x65, 0xcb, 0xed, 0xe1, 0x9c, 0xad, 0x20, 0xff, 0x7e, 0x9b, 0xb3, 0x94, 0xef, + 0x8a, 0xbb, 0x67, 0x1d, 0xb2, 0x5f, 0xbb, 0xfa, 0x23, 0x2d, 0x90, 0x1f, 0xff, 0xed, 0xcf, 0x7f, + 0xeb, 0x3b, 0xae, 0x2f, 0xe4, 0xe0, 0xe0, 0x93, 0x1b, 0xf0, 0x91, 0x01, 0x2a, 0x9c, 0xc3, 0x1e, + 0xd6, 0xff, 0xb7, 0x3e, 0xa0, 0x14, 0xd8, 0xf5, 0x36, 0x78, 0xdc, 0x0f, 0xc8, 0x8b, 0xd5, 0xf1, + 0x63, 0x04, 0xfa, 0xdc, 0x2b, 0x15, 0xf5, 0xef, 0xb3, 0x6d, 0x43, 0xf5, 0x89, 0x1b, 0xc2, 0x26, + 0xde, 0x38, 0x5a, 0x22, 0x1a, 0xed, 0x4a, 0xb1, 0x6b, 0x7e, 0x8e, 0x40, 0x56, 0xec, 0xbb, 0xa9, + 0x1b, 0x17, 0x96, 0x97, 0x02, 0xaf, 0x5e, 0x68, 0xaf, 0xca, 0x88, 0x09, 0x92, 0x2e, 0xd5, 0x1d, + 0x1f, 0x1e, 0x47, 0x44, 0x45, 0x80, 0x50, 0x6c, 0x52, 0x6f, 0xb0, 0xf0, 0x36, 0xb5, 0x59, 0x79, + 0x2a, 0xd7, 0x4c, 0x00, 0xc9, 0x59, 0x12, 0xf2, 0x56, 0x0f, 0x05, 0x54, 0x19, 0x16, 0x38, 0xf6, + 0x75, 0x9c, 0xf2, 0xc0, 0xff, 0xd7, 0x1c, 0xe0, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, + 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, + 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, + 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xff, 0x8d, + 0xff, 0x03, 0x19, 0x52, 0x34, 0xad, 0x00, 0x8c, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU102_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 35840, // uncompressed data size (bytes) + 24474, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU102_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU102("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/load/g_booteruc_load_tu10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU102_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x4e, 0x00, 0x62, 0x1d, 0x08, 0x13, 0x4c, 0xc4, 0x43, 0x69, + 0x20, 0x00, 0x00, 0x6e, 0x8b, 0xb6, 0xe9, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU102_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU102_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU102("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/load/g_booteruc_load_tu10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 35840 +// COMPRESSED SIZE (bytes): 24478 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU102_image_prod_data[] = +{ + 0xed, 0xfc, 0x43, 0x90, 0x28, 0x4d, 0xd7, 0x00, 0xea, 0xb6, 0x6d, 0x5b, 0xbb, 0x6d, 0x9b, 0xbb, + 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0x76, 0xf7, 0x79, 0xff, 0x3b, + 0xbb, 0xdf, 0xec, 0x0c, 0x4f, 0xc4, 0x7e, 0x26, 0x99, 0x59, 0x11, 0x55, 0xab, 0x06, 0x95, 0x19, + 0xb5, 0xaa, 0x22, 0x97, 0x38, 0x00, 0xc2, 0x4c, 0x3a, 0x40, 0x2c, 0x00, 0x28, 0x40, 0xf7, 0x51, + 0xde, 0x7b, 0xf3, 0x0f, 0xa0, 0x38, 0x00, 0xe2, 0xff, 0x1d, 0x80, 0x00, 0x05, 0xf8, 0xff, 0x0d, + 0xc3, 0x01, 0x00, 0x00, 0x7f, 0x92, 0x00, 0x7c, 0xe8, 0x00, 0x00, 0xde, 0x80, 0xde, 0x00, 0xde, + 0x81, 0xa2, 0x01, 0x80, 0x00, 0x7a, 0x77, 0x76, 0x7f, 0x7f, 0x7f, 0xe1, 0xc3, 0x01, 0x00, 0x01, + 0x7e, 0x73, 0x41, 0x1a, 0x8f, 0x00, 0x98, 0xd2, 0xd6, 0x01, 0x1b, 0x93, 0x00, 0xa8, 0xd3, 0xd6, + 0x81, 0xfe, 0x6b, 0x48, 0xd2, 0xd6, 0x81, 0xff, 0x6b, 0xf0, 0xfe, 0x3b, 0x17, 0x20, 0x2d, 0x17, + 0xb8, 0x79, 0x17, 0xb8, 0x37, 0x37, 0x6f, 0x37, 0x2d, 0x17, 0x30, 0x2d, 0x1d, 0xa8, 0x79, 0x17, + 0xb0, 0x79, 0x1d, 0xc8, 0x67, 0x05, 0x00, 0x00, 0x9e, 0x41, 0x1c, 0x00, 0xe1, 0x35, 0x17, 0x00, + 0xfe, 0xbf, 0x18, 0xa6, 0x20, 0xff, 0xd7, 0x81, 0x6d, 0x4b, 0x07, 0x00, 0x04, 0xfa, 0xaf, 0xf7, + 0xc4, 0x0d, 0x14, 0x0b, 0x00, 0x00, 0x16, 0xfe, 0x5f, 0xa8, 0xef, 0x5c, 0xd0, 0xee, 0xa7, 0x8f, + 0x77, 0xb0, 0x37, 0xe0, 0xff, 0xae, 0x06, 0x94, 0x0c, 0x0f, 0xb1, 0xf3, 0x7f, 0x91, 0xbf, 0xdf, + 0x41, 0xc3, 0x01, 0xf4, 0x00, 0xbe, 0x73, 0x40, 0x3a, 0xe3, 0x80, 0x5a, 0x92, 0x10, 0xfe, 0x1b, + 0xff, 0x7c, 0x00, 0xf8, 0xfc, 0x77, 0x3f, 0x6f, 0x40, 0xdd, 0x45, 0x9f, 0x80, 0xb1, 0x48, 0xea, + 0xb0, 0xe1, 0x04, 0x54, 0x00, 0x79, 0xb9, 0x6f, 0x00, 0xb1, 0xff, 0xb5, 0xbd, 0xbf, 0x4f, 0xf4, + 0x5f, 0x3f, 0xe2, 0x80, 0x79, 0xb9, 0xbc, 0x77, 0x00, 0xff, 0x7f, 0x46, 0x02, 0x59, 0xd7, 0x12, + 0xd3, 0xbc, 0x4d, 0x59, 0x51, 0xd6, 0x51, 0xa3, 0x19, 0xd6, 0x68, 0x52, 0xdf, 0x19, 0x1b, 0xf9, + 0xef, 0x05, 0x37, 0xae, 0xc5, 0x06, 0x87, 0x1c, 0xfc, 0x8a, 0xbd, 0x51, 0xa8, 0x68, 0x7a, 0x08, + 0x79, 0x5c, 0xed, 0x8a, 0xa0, 0x63, 0xe2, 0xd5, 0x26, 0xcb, 0x8b, 0x4e, 0x35, 0x3c, 0x3a, 0xee, + 0x87, 0x83, 0xad, 0x35, 0xca, 0x78, 0x56, 0x80, 0xad, 0x55, 0xa5, 0x4f, 0xd0, 0x54, 0x69, 0xc3, + 0x08, 0x29, 0x87, 0x5e, 0x64, 0x3f, 0x2a, 0x64, 0xe9, 0x9e, 0x7d, 0x0c, 0xc9, 0x35, 0x30, 0x0b, + 0xf0, 0x48, 0x2f, 0x00, 0x42, 0x96, 0x8e, 0x5f, 0xb2, 0xcf, 0xbd, 0x2b, 0x7f, 0xc4, 0xc6, 0x91, + 0xf1, 0x18, 0xc8, 0x29, 0x55, 0xd4, 0x19, 0xac, 0x59, 0x8e, 0xc1, 0x7b, 0x93, 0xb9, 0x99, 0x91, + 0x77, 0x76, 0x3b, 0xaf, 0xee, 0x30, 0x99, 0xc4, 0x45, 0xb6, 0x03, 0x02, 0x06, 0x72, 0x10, 0x82, + 0xc5, 0x80, 0x3e, 0x08, 0x28, 0x00, 0x2f, 0x20, 0x33, 0x64, 0xc3, 0xe3, 0xfe, 0x76, 0xa4, 0x90, + 0xf2, 0x73, 0x04, 0xea, 0xea, 0x64, 0x05, 0x19, 0xa1, 0xd4, 0x5a, 0xbb, 0x34, 0x2a, 0x62, 0xd8, + 0x93, 0x20, 0x9a, 0xf5, 0x10, 0xde, 0xb5, 0xc2, 0xbe, 0x57, 0x70, 0x04, 0xdc, 0xd3, 0x17, 0xb3, + 0x19, 0xdd, 0xf8, 0xbd, 0xb1, 0x0e, 0x48, 0x2a, 0x50, 0x06, 0xad, 0xed, 0x7c, 0x55, 0x4a, 0x42, + 0x37, 0x78, 0x93, 0x56, 0xf1, 0xed, 0xaf, 0xe0, 0x1b, 0x0e, 0x59, 0xf7, 0x6b, 0x4d, 0xb0, 0x48, + 0x15, 0x65, 0xfd, 0x7a, 0xe0, 0x11, 0x1b, 0x18, 0xbf, 0x98, 0x3c, 0xd6, 0x3d, 0x9c, 0x63, 0x00, + 0xf3, 0xbd, 0x9a, 0x26, 0xdd, 0x7e, 0x0b, 0x02, 0x27, 0x03, 0x6e, 0x0d, 0x41, 0xce, 0xa4, 0x50, + 0x7c, 0x07, 0xfa, 0xe2, 0x17, 0x0d, 0x77, 0xf7, 0xdf, 0xf2, 0xce, 0x34, 0x86, 0x16, 0xd6, 0x54, + 0xcd, 0xf4, 0xd4, 0x77, 0xeb, 0x7e, 0x74, 0x4e, 0x10, 0x7f, 0x00, 0x4a, 0xfd, 0x0c, 0xc2, 0xbd, + 0x96, 0x42, 0x15, 0x36, 0xa1, 0xa9, 0x86, 0x3a, 0x6d, 0x79, 0x57, 0x00, 0xac, 0x8f, 0x91, 0x92, + 0xe9, 0x23, 0xad, 0x9e, 0x24, 0xc8, 0x79, 0xe3, 0x62, 0xfa, 0x98, 0x1c, 0xb9, 0x3a, 0xba, 0x22, + 0xdc, 0x1c, 0xb7, 0x3a, 0xa9, 0x86, 0x64, 0x90, 0x92, 0x30, 0xf7, 0xac, 0x91, 0x98, 0xb4, 0xbc, + 0xa7, 0x26, 0xc0, 0x45, 0xc9, 0xd7, 0xab, 0x31, 0x1e, 0x7d, 0x89, 0xcb, 0xa1, 0x8b, 0x61, 0x24, + 0x9c, 0xb3, 0x8d, 0x0e, 0xae, 0x9a, 0x0f, 0xa9, 0x6b, 0x76, 0x69, 0x87, 0x4b, 0x5b, 0xb4, 0x76, + 0x53, 0xb1, 0xfb, 0x7a, 0xed, 0xde, 0x1b, 0xa1, 0xe8, 0xf8, 0xbb, 0xd8, 0x00, 0xf2, 0x27, 0x85, + 0x3b, 0x7a, 0x62, 0xee, 0x50, 0x26, 0xc2, 0xcc, 0x6f, 0x80, 0x66, 0xc7, 0x57, 0x1f, 0x23, 0x27, + 0xb6, 0x0d, 0x3c, 0x74, 0x98, 0xca, 0x70, 0x7d, 0x95, 0x62, 0xba, 0xcd, 0xb7, 0x77, 0x7c, 0x26, + 0x74, 0x0e, 0x9c, 0x35, 0x26, 0x01, 0xad, 0x6a, 0xdf, 0x27, 0x9f, 0xf1, 0x5d, 0x81, 0xe5, 0x3f, + 0xde, 0x2e, 0x44, 0x38, 0x8f, 0x6e, 0x49, 0x27, 0x42, 0x04, 0xcf, 0xf9, 0xbd, 0x30, 0xa9, 0x82, + 0x6f, 0x29, 0x9a, 0x4d, 0x64, 0x35, 0x58, 0x52, 0xf8, 0xa0, 0xa5, 0x18, 0x08, 0x2c, 0xe0, 0x6b, + 0xa3, 0x57, 0x3e, 0x59, 0xac, 0xdf, 0x74, 0xd0, 0x4f, 0x66, 0x23, 0x98, 0xf9, 0x63, 0x8f, 0x2e, + 0xb8, 0xbb, 0x91, 0x2e, 0x72, 0x4c, 0x2c, 0xb6, 0x24, 0x42, 0x12, 0x7e, 0x3b, 0x2c, 0x09, 0x01, + 0x4a, 0x64, 0x7d, 0xf6, 0xba, 0x80, 0x13, 0x54, 0xea, 0xe7, 0x26, 0x2e, 0xe1, 0x63, 0x3e, 0xe5, + 0x6c, 0x14, 0x3d, 0x2f, 0xe7, 0xbd, 0xaf, 0xe5, 0xf7, 0x64, 0x0a, 0xba, 0xff, 0x35, 0xb3, 0x04, + 0x56, 0x4f, 0x80, 0x49, 0x1e, 0x98, 0x88, 0x24, 0x26, 0x9d, 0xce, 0x5e, 0xc0, 0xc5, 0xb9, 0xcf, + 0x46, 0xcb, 0xa9, 0x62, 0x55, 0x54, 0x46, 0xcd, 0x82, 0xe4, 0xc5, 0xe1, 0xaa, 0x49, 0x7b, 0x78, + 0x95, 0xac, 0x1b, 0x12, 0xf5, 0xfa, 0xc3, 0x74, 0xa1, 0x9e, 0x3c, 0x5f, 0x70, 0x1c, 0x26, 0xd8, + 0xcf, 0xb8, 0xb1, 0x96, 0x1d, 0x49, 0x36, 0xd8, 0x6a, 0xda, 0x11, 0xc3, 0x65, 0xcb, 0x45, 0x09, + 0x41, 0xaf, 0x89, 0x37, 0x83, 0x91, 0xa0, 0x62, 0xfe, 0x55, 0x37, 0xfd, 0x0e, 0x11, 0x47, 0x13, + 0x5c, 0x11, 0x3c, 0x39, 0x31, 0x8b, 0xe5, 0x77, 0xdb, 0x85, 0x9b, 0x46, 0xbb, 0xf1, 0x42, 0x6e, + 0x69, 0x85, 0x28, 0x29, 0x9c, 0x92, 0xcc, 0x38, 0xd5, 0x0d, 0x28, 0x84, 0xd7, 0xb8, 0xe6, 0x9c, + 0xd4, 0x95, 0xf4, 0xf2, 0xd8, 0x5d, 0x8d, 0x9f, 0x56, 0x2c, 0xa2, 0xe9, 0xf9, 0xfd, 0x67, 0x2c, + 0x65, 0x88, 0x33, 0x14, 0x06, 0x2e, 0x94, 0x8d, 0x2a, 0x44, 0x3d, 0xc4, 0x5f, 0xd5, 0x9a, 0x2d, + 0x03, 0xd9, 0xba, 0xaf, 0x9b, 0x9b, 0x7b, 0x1f, 0xd6, 0xc1, 0x15, 0x02, 0x43, 0x32, 0xce, 0x9a, + 0x53, 0x38, 0xb4, 0x79, 0x60, 0x3b, 0x0f, 0x1e, 0x3c, 0x97, 0xf1, 0x6f, 0x2d, 0xea, 0x52, 0xb6, + 0xdf, 0xec, 0xfc, 0xdf, 0x0a, 0x6d, 0x32, 0x45, 0x73, 0x00, 0x91, 0x88, 0xc5, 0x9f, 0x24, 0xe3, + 0xd6, 0x72, 0xfd, 0x47, 0x2b, 0x11, 0xed, 0x2b, 0xa8, 0xd7, 0xcb, 0xe3, 0x68, 0xbe, 0x35, 0x4d, + 0xf4, 0xb6, 0xf2, 0x9d, 0xee, 0x8e, 0xd7, 0x9f, 0x9f, 0xc2, 0x96, 0x0c, 0x38, 0x7a, 0x45, 0xd0, + 0xd7, 0xb9, 0x14, 0x85, 0xca, 0xdb, 0x17, 0x65, 0x64, 0xc5, 0x9a, 0x30, 0xd9, 0x4f, 0x78, 0x92, + 0x46, 0xfc, 0xb8, 0xe3, 0xea, 0xd5, 0x0c, 0xc3, 0xe3, 0xd4, 0xdc, 0xec, 0xac, 0x56, 0x20, 0xe8, + 0x74, 0xec, 0x41, 0xda, 0x41, 0x65, 0x50, 0xde, 0x1b, 0x65, 0x4f, 0xb8, 0xee, 0x4c, 0xde, 0xe4, + 0xe1, 0xd5, 0xa1, 0x3b, 0xa0, 0xf9, 0x2d, 0xf5, 0x9d, 0xf3, 0x35, 0x89, 0xf7, 0x94, 0xeb, 0x40, + 0xf3, 0xc3, 0x44, 0xb4, 0x91, 0x10, 0xae, 0xd2, 0xe6, 0x98, 0x7d, 0x8b, 0x82, 0xf8, 0x0b, 0xdb, + 0x96, 0x69, 0xeb, 0x3d, 0x56, 0xad, 0x45, 0xc7, 0x8d, 0x12, 0x9a, 0xf0, 0x7b, 0x79, 0x06, 0xc7, + 0xae, 0x12, 0xf5, 0x95, 0x06, 0x53, 0xd7, 0x9a, 0xef, 0xd6, 0xa9, 0xeb, 0xac, 0xa7, 0x2a, 0x01, + 0x89, 0x36, 0xb9, 0x3c, 0x5d, 0xf7, 0x44, 0x1b, 0x2e, 0xaf, 0x91, 0xc7, 0x4d, 0x73, 0x9c, 0x98, + 0x86, 0x9e, 0x27, 0xd7, 0x2e, 0x0c, 0x07, 0xc8, 0x76, 0x95, 0x5d, 0xab, 0x94, 0x03, 0x41, 0x0d, + 0x3b, 0x0b, 0x0a, 0x92, 0x9c, 0x15, 0xc7, 0x74, 0x1a, 0xe3, 0xd1, 0xd0, 0x42, 0xf4, 0x5c, 0x89, + 0x1d, 0xf9, 0xcb, 0xb1, 0xa8, 0x18, 0xc0, 0xdd, 0x9b, 0x85, 0x29, 0xdd, 0xb4, 0xbb, 0xa8, 0x39, + 0x43, 0x58, 0x0f, 0x25, 0x87, 0x58, 0xd0, 0x8b, 0x7c, 0x1c, 0x30, 0x26, 0x95, 0x17, 0x56, 0xb6, + 0x9d, 0xa4, 0xdc, 0x95, 0xfa, 0x38, 0x0e, 0x0c, 0xb2, 0xf0, 0x57, 0xdd, 0xd6, 0x0e, 0x9b, 0x61, + 0x51, 0x1d, 0xc6, 0x8f, 0x88, 0x86, 0xed, 0x25, 0x24, 0xaf, 0x74, 0x61, 0xe0, 0x61, 0x8d, 0x61, + 0xa8, 0x07, 0xe0, 0xac, 0x3a, 0xe8, 0x53, 0x3e, 0x4b, 0x78, 0x56, 0xdc, 0x69, 0xb1, 0x48, 0xa4, + 0xc8, 0x1a, 0xbe, 0xbc, 0x69, 0xd3, 0xa3, 0xc2, 0x06, 0x69, 0x70, 0x89, 0x54, 0x32, 0xa5, 0x5c, + 0x75, 0x94, 0x0d, 0xfb, 0x0d, 0x93, 0x6a, 0xf7, 0x3a, 0x40, 0x59, 0xe2, 0x33, 0x79, 0xe0, 0xb4, + 0x29, 0x2e, 0x94, 0xdc, 0xa5, 0x4c, 0x3f, 0x52, 0x7c, 0xb3, 0x76, 0x75, 0x17, 0x07, 0xcc, 0x3c, + 0xa4, 0xdb, 0x72, 0x9f, 0xf3, 0xd3, 0x2a, 0x5d, 0x8a, 0xa4, 0x15, 0x83, 0x10, 0x6a, 0x65, 0x77, + 0x8d, 0x77, 0x78, 0xaf, 0x8d, 0x72, 0x18, 0x93, 0x85, 0x28, 0x17, 0x33, 0x80, 0x3e, 0x81, 0x2a, + 0x3e, 0x81, 0x42, 0x88, 0x8b, 0x68, 0x18, 0x02, 0x4e, 0xf5, 0x41, 0x7e, 0x8a, 0x77, 0xd1, 0xf1, + 0x1b, 0x5f, 0xb6, 0xdc, 0x86, 0xac, 0xe6, 0x4a, 0x1e, 0xbb, 0x0c, 0xe5, 0xee, 0x9c, 0xf1, 0x83, + 0xb8, 0xfb, 0xd5, 0x90, 0xf1, 0xd6, 0x12, 0x64, 0xeb, 0x4f, 0x9b, 0xac, 0x25, 0x0f, 0x13, 0xe4, + 0x04, 0xc8, 0x67, 0xee, 0x0c, 0x53, 0xdc, 0x85, 0x85, 0x38, 0x4f, 0xc4, 0x9b, 0x63, 0x90, 0x0c, + 0x28, 0x24, 0xf0, 0xde, 0xf8, 0x99, 0xb9, 0x09, 0x78, 0x7f, 0x42, 0x3e, 0xf4, 0x10, 0xa6, 0x7b, + 0x07, 0xc1, 0x6b, 0xc3, 0x20, 0x89, 0x3f, 0x98, 0x59, 0xb3, 0x77, 0xb7, 0x74, 0xa4, 0x29, 0x3c, + 0xa4, 0x81, 0x4b, 0x9b, 0x62, 0x28, 0x30, 0xd0, 0x3a, 0xf6, 0xa5, 0x1c, 0x3d, 0xac, 0x54, 0xe1, + 0x6f, 0x16, 0xaf, 0xfd, 0xf3, 0x6b, 0xbb, 0x27, 0xc7, 0xec, 0xb2, 0x17, 0x2a, 0x42, 0x34, 0xb9, + 0xa2, 0x13, 0x79, 0x47, 0xbb, 0x05, 0x92, 0xd8, 0x92, 0xf7, 0x27, 0x95, 0x2a, 0xac, 0x7c, 0x48, + 0x45, 0x1d, 0x13, 0x81, 0x01, 0x95, 0x9f, 0x66, 0x94, 0x7d, 0xbe, 0x38, 0x70, 0x6d, 0xcf, 0x95, + 0x9a, 0x3b, 0x51, 0x74, 0x2f, 0x5a, 0xc3, 0xbc, 0x05, 0x30, 0x66, 0x0d, 0xa6, 0x72, 0x51, 0xb5, + 0xbc, 0x6d, 0x88, 0x26, 0x40, 0x2d, 0x1e, 0x0e, 0xdf, 0x83, 0x24, 0xb6, 0x7d, 0x86, 0xa2, 0xac, + 0x1d, 0xcb, 0xaa, 0x81, 0x61, 0x51, 0xd2, 0xac, 0xc1, 0x27, 0x86, 0xbe, 0xd3, 0x7f, 0xce, 0x95, + 0xd0, 0x88, 0x28, 0xd2, 0x9d, 0xa2, 0x7b, 0x95, 0x6f, 0xd9, 0xf6, 0x13, 0xfc, 0xd1, 0x0d, 0x6d, + 0xe4, 0x54, 0xfc, 0xdd, 0x8e, 0xc5, 0x18, 0x30, 0x25, 0x4f, 0x82, 0x72, 0x8f, 0xb3, 0x06, 0x0f, + 0xe8, 0x7b, 0xc9, 0x42, 0x5a, 0x77, 0x11, 0xdc, 0xe6, 0x6d, 0xd6, 0xb3, 0x74, 0x7f, 0x54, 0x2d, + 0x54, 0xf7, 0x98, 0xca, 0x05, 0x7a, 0x79, 0xa6, 0x70, 0x04, 0xb0, 0x24, 0x7d, 0x8f, 0xdb, 0xec, + 0x12, 0x7d, 0xb9, 0x21, 0xeb, 0x77, 0x9c, 0x84, 0xc2, 0x8a, 0x94, 0x65, 0x93, 0x7c, 0x38, 0x61, + 0x50, 0x37, 0x69, 0x00, 0x9b, 0xf2, 0x55, 0xe0, 0xae, 0x6e, 0xa8, 0x1a, 0xa0, 0x03, 0x15, 0x8f, + 0x90, 0x0c, 0xed, 0x81, 0xbb, 0x49, 0x9d, 0x1b, 0x47, 0xb0, 0xef, 0x13, 0xcc, 0xc4, 0xc2, 0x89, + 0x51, 0x6d, 0xbf, 0xc8, 0x31, 0xe4, 0xb5, 0x1b, 0x30, 0x08, 0xee, 0x86, 0xd4, 0x12, 0x47, 0x23, + 0x9d, 0xa5, 0xb9, 0x1a, 0x4c, 0xe7, 0xb0, 0xd7, 0x74, 0xea, 0xd8, 0x44, 0x23, 0x1c, 0xc5, 0x66, + 0xe2, 0x93, 0x29, 0xd7, 0x01, 0x40, 0x03, 0xe2, 0xc6, 0x46, 0xa1, 0xf8, 0xb4, 0xf9, 0xd2, 0x63, + 0xb2, 0x54, 0x4a, 0x63, 0x34, 0x27, 0xe4, 0x72, 0xe9, 0x2f, 0x7a, 0x9e, 0xc0, 0x66, 0x92, 0x78, + 0x52, 0xd7, 0x39, 0xac, 0x02, 0x98, 0x17, 0x15, 0x01, 0xa3, 0xbf, 0x71, 0x07, 0xa8, 0x14, 0x74, + 0x73, 0xb8, 0x6a, 0x1e, 0x52, 0x45, 0x9c, 0x1e, 0x99, 0x83, 0x2b, 0x0c, 0x36, 0xa1, 0x89, 0x4d, + 0x74, 0xe6, 0xe7, 0xb4, 0xeb, 0xab, 0xae, 0xc4, 0x01, 0xd3, 0xdf, 0x58, 0x0f, 0x3a, 0xc9, 0x6a, + 0x2c, 0x9b, 0x11, 0xca, 0xc3, 0x65, 0x0b, 0xb6, 0x6d, 0x84, 0x49, 0xc1, 0x43, 0x56, 0xf1, 0x3e, + 0xa4, 0x83, 0x1d, 0xca, 0x6a, 0x44, 0x69, 0xbc, 0xbc, 0x96, 0x9d, 0x30, 0x8f, 0x74, 0x57, 0x50, + 0xd4, 0x26, 0x7d, 0x30, 0x87, 0xf4, 0x0b, 0x29, 0xea, 0x9a, 0x93, 0x35, 0xb5, 0xe2, 0xaf, 0xaf, + 0xeb, 0x7e, 0x7e, 0x52, 0x22, 0x19, 0xe8, 0xbe, 0x5d, 0xbd, 0x5a, 0x1c, 0xba, 0x46, 0xeb, 0xa2, + 0xbc, 0xf3, 0xe3, 0x17, 0x0f, 0xab, 0x1c, 0xba, 0x99, 0x03, 0xcb, 0x0a, 0x70, 0xe2, 0x6b, 0xd1, + 0x8f, 0x01, 0xdb, 0x02, 0x8b, 0xe8, 0x37, 0xb2, 0x67, 0xc7, 0xc5, 0x6a, 0x30, 0xb9, 0xe6, 0x31, + 0x4b, 0x3b, 0x6a, 0x7c, 0x64, 0xc9, 0xdf, 0x08, 0x8d, 0xa4, 0xb8, 0xe1, 0x4e, 0x24, 0xdf, 0x66, + 0x24, 0x51, 0x9d, 0x85, 0x4d, 0x0f, 0xf6, 0xed, 0xe1, 0xb4, 0xb8, 0x4a, 0xe0, 0x8d, 0x81, 0xe9, + 0x8d, 0x5a, 0x85, 0xf0, 0x37, 0xca, 0x18, 0xaa, 0x71, 0xc4, 0x22, 0x5c, 0x5f, 0xc9, 0x76, 0xa1, + 0xe1, 0x86, 0xe7, 0xbf, 0x9f, 0x71, 0xe1, 0x22, 0x33, 0xbc, 0x77, 0xbf, 0x5b, 0x5a, 0x74, 0x2b, + 0x6b, 0x3b, 0x0d, 0x5e, 0x38, 0x40, 0xce, 0xfd, 0xa5, 0x6d, 0x2a, 0x4f, 0x68, 0x41, 0x03, 0x85, + 0x20, 0x23, 0x81, 0xfc, 0x6a, 0x28, 0xb7, 0x39, 0x56, 0x12, 0x8a, 0xe6, 0x25, 0x76, 0xd9, 0x17, + 0x81, 0x11, 0x0c, 0x1d, 0x28, 0x43, 0xa2, 0x27, 0xda, 0x57, 0x5c, 0x19, 0xea, 0xc3, 0x12, 0x2a, + 0xdd, 0x81, 0x04, 0x4e, 0x51, 0xdb, 0x52, 0x26, 0x74, 0xe3, 0x49, 0xfc, 0xca, 0x1e, 0xbd, 0x0f, + 0x1b, 0xa3, 0x11, 0x94, 0x1e, 0xc2, 0x02, 0xed, 0xef, 0x79, 0xa9, 0x46, 0x55, 0x40, 0x02, 0xb2, + 0x7a, 0x0e, 0xb5, 0xfa, 0x5e, 0xcf, 0x69, 0x3d, 0x46, 0x4a, 0x95, 0xdb, 0x1b, 0x6b, 0x4b, 0xf6, + 0x01, 0xc6, 0xc6, 0x8d, 0x0b, 0x0e, 0x05, 0x72, 0x0f, 0x98, 0xc7, 0x74, 0x9e, 0x7d, 0x15, 0x10, + 0x6a, 0xf8, 0x7e, 0x62, 0xd4, 0x57, 0x84, 0xb8, 0xb7, 0x9a, 0xc4, 0x91, 0xdf, 0x23, 0xb6, 0x44, + 0xcb, 0xf4, 0xef, 0x93, 0xa1, 0xb4, 0x04, 0xbc, 0x2e, 0x7d, 0xf0, 0x72, 0x85, 0xc0, 0xfb, 0x3e, + 0xbe, 0xcc, 0xc4, 0x81, 0xff, 0x82, 0x4c, 0xdb, 0x2f, 0x8e, 0x81, 0x04, 0xbd, 0x57, 0x76, 0x04, + 0xf0, 0xe3, 0x1c, 0x16, 0x41, 0x3f, 0x57, 0xc9, 0x01, 0x4a, 0xcd, 0xc0, 0x65, 0xaa, 0xc1, 0x29, + 0xe2, 0x61, 0x05, 0x12, 0x36, 0x1d, 0x00, 0x01, 0x2e, 0xca, 0x2b, 0x89, 0x6c, 0x31, 0x21, 0x9d, + 0x6e, 0x55, 0xe5, 0x89, 0xb5, 0x3f, 0x37, 0x84, 0x1b, 0xf1, 0xce, 0x95, 0xa3, 0x65, 0xa8, 0x80, + 0xc7, 0x90, 0x82, 0xa9, 0x4a, 0xeb, 0x87, 0x6e, 0xc4, 0xef, 0xa7, 0x46, 0x90, 0xf4, 0x8b, 0xbe, + 0xd4, 0x25, 0x70, 0xab, 0x6d, 0x0e, 0xc8, 0x15, 0xd6, 0xa7, 0x63, 0xb2, 0xbc, 0x82, 0xaa, 0x31, + 0xa9, 0x2c, 0xc8, 0x37, 0x01, 0x5c, 0x41, 0x62, 0x37, 0xf9, 0x33, 0xc7, 0x3e, 0x7b, 0x67, 0xde, + 0x3a, 0xd0, 0x95, 0xd7, 0x67, 0x8d, 0xe1, 0xda, 0x98, 0x12, 0xde, 0x91, 0xcd, 0x9c, 0x39, 0xf2, + 0x92, 0x52, 0xa5, 0xe3, 0x21, 0xae, 0xf7, 0x73, 0x0e, 0xa3, 0x8b, 0x2d, 0x38, 0x67, 0x18, 0x56, + 0xb1, 0x0b, 0x6f, 0x6b, 0x36, 0x91, 0x02, 0x24, 0x36, 0xde, 0x5c, 0x95, 0xd1, 0xe9, 0xff, 0x40, + 0xb6, 0xc1, 0xcb, 0x1a, 0x99, 0xd8, 0x9f, 0x21, 0xf0, 0xa2, 0x7e, 0xd6, 0xe9, 0x3d, 0x9a, 0xef, + 0xe0, 0x67, 0xae, 0x91, 0x16, 0x27, 0xf1, 0x86, 0x8f, 0xaf, 0x23, 0xbb, 0x9d, 0x93, 0xa0, 0xa6, + 0xc2, 0x7b, 0xf3, 0x26, 0x95, 0xbc, 0x97, 0x9a, 0xae, 0xf7, 0xd3, 0x9b, 0x99, 0x04, 0xfc, 0x0d, + 0x9c, 0x5e, 0x59, 0xe3, 0x0e, 0x2e, 0x8c, 0x86, 0x2f, 0xdc, 0x79, 0x85, 0x3b, 0x1b, 0xb0, 0x77, + 0x25, 0xb4, 0x84, 0xfa, 0xeb, 0x3b, 0x95, 0x31, 0xf6, 0x67, 0x53, 0x3b, 0x94, 0x0b, 0x2e, 0xeb, + 0xca, 0x9d, 0x26, 0x1e, 0x4c, 0x5d, 0x39, 0x00, 0xf1, 0xd6, 0xa8, 0x28, 0xf5, 0xcb, 0xa3, 0xeb, + 0x38, 0x25, 0x2a, 0x1f, 0xd5, 0xa3, 0xe1, 0xbb, 0xc7, 0x04, 0xd8, 0xd6, 0x3e, 0xcc, 0x78, 0x88, + 0x40, 0xd6, 0x03, 0x06, 0x30, 0xd8, 0x50, 0x92, 0x92, 0x58, 0x30, 0x40, 0x56, 0x38, 0x85, 0x84, + 0x8e, 0xcd, 0xf4, 0xe4, 0xeb, 0x2e, 0x94, 0xa7, 0x4d, 0x23, 0xd6, 0xaa, 0xa0, 0xb1, 0xff, 0x1d, + 0x87, 0x57, 0x86, 0x50, 0x74, 0xa0, 0xd2, 0xe3, 0xee, 0xef, 0x80, 0x1c, 0x66, 0xd9, 0xea, 0xa5, + 0x99, 0x80, 0x40, 0xc8, 0x0c, 0x35, 0x58, 0x64, 0x2e, 0x81, 0xc3, 0xb7, 0x94, 0xe7, 0xb1, 0xf4, + 0x58, 0xd4, 0x24, 0xc1, 0x62, 0xb9, 0x45, 0xf8, 0xcf, 0xa0, 0xdb, 0xf2, 0xc5, 0xa6, 0x33, 0x7c, + 0x40, 0x3d, 0xf7, 0xc2, 0xd3, 0x5b, 0x16, 0x23, 0xf6, 0x2d, 0xf4, 0x00, 0x7b, 0xdd, 0xd9, 0xee, + 0x84, 0x4d, 0xa1, 0x1c, 0xee, 0x98, 0xbd, 0xb6, 0x55, 0xf7, 0xf8, 0x26, 0xb7, 0xf5, 0xed, 0x81, + 0xb4, 0xd4, 0xcc, 0x35, 0xa7, 0xba, 0xdf, 0x73, 0x32, 0x44, 0xc3, 0xe1, 0x4e, 0xf8, 0x0f, 0x1f, + 0x97, 0xbb, 0xbb, 0xba, 0x41, 0x4a, 0xa8, 0x6d, 0x17, 0xac, 0xaf, 0xc5, 0xcb, 0x38, 0x87, 0x3a, + 0x7d, 0xee, 0x5f, 0xe1, 0x59, 0xeb, 0xc7, 0x8e, 0x27, 0x4c, 0x9b, 0x8d, 0x2f, 0xb4, 0x28, 0x85, + 0xfc, 0x22, 0x76, 0x5b, 0xe3, 0xc6, 0x96, 0xd0, 0x96, 0x39, 0x72, 0x39, 0x51, 0x0d, 0x50, 0xbb, + 0xd2, 0xba, 0x55, 0x9f, 0x1a, 0xf5, 0xbe, 0xe9, 0x6a, 0x15, 0xe6, 0x46, 0xc6, 0xe4, 0x10, 0xe7, + 0xa5, 0x96, 0xf9, 0xf6, 0xe3, 0xd6, 0x17, 0x4c, 0x28, 0xee, 0xb3, 0x40, 0xce, 0x6f, 0xae, 0x15, + 0xa3, 0x1f, 0xf2, 0x85, 0xd8, 0x97, 0xcc, 0xc0, 0xd8, 0xae, 0x91, 0x8a, 0xda, 0xe0, 0x9e, 0x76, + 0x9e, 0x1f, 0xc9, 0x69, 0x2d, 0x8a, 0x53, 0x45, 0x73, 0x3c, 0xab, 0x0e, 0xd4, 0xf2, 0x75, 0x9d, + 0x06, 0x99, 0xf5, 0x89, 0x9e, 0x58, 0xba, 0x76, 0xff, 0xe8, 0x8f, 0xad, 0x9b, 0x15, 0x03, 0x46, + 0x83, 0x97, 0x43, 0x6b, 0xd4, 0x6b, 0xff, 0x6a, 0x71, 0xa5, 0xd9, 0x12, 0xcc, 0x38, 0x3b, 0x4c, + 0xc4, 0xd1, 0xd1, 0x61, 0xcd, 0x20, 0xe1, 0xe3, 0x29, 0x52, 0x05, 0x08, 0x5c, 0xfd, 0x45, 0x14, + 0x0f, 0xa6, 0x55, 0x2c, 0x7e, 0xb0, 0xb7, 0xf0, 0xa0, 0x73, 0xd9, 0x80, 0x45, 0x21, 0xe5, 0x77, + 0x39, 0xa7, 0x07, 0x48, 0xae, 0x5c, 0xcb, 0x93, 0x24, 0xf2, 0x02, 0x00, 0x1a, 0x9c, 0xca, 0x72, + 0xd0, 0x5b, 0xc1, 0xe8, 0xec, 0x2c, 0x1f, 0xdb, 0x28, 0xb0, 0x5b, 0x19, 0xf2, 0x34, 0x8f, 0xae, + 0x92, 0xb3, 0xbb, 0x4b, 0x4b, 0xe6, 0x0b, 0x67, 0x81, 0x5c, 0x2c, 0xb4, 0x50, 0xd2, 0x2c, 0x74, + 0x68, 0xc2, 0xd6, 0xd7, 0xd8, 0x33, 0xd0, 0x10, 0x93, 0xa4, 0x23, 0x9a, 0x70, 0x04, 0xe6, 0xe1, + 0x68, 0xd9, 0x29, 0x25, 0x8b, 0x5c, 0xb9, 0x44, 0xa9, 0xd4, 0x88, 0xf9, 0xb9, 0x42, 0xe9, 0xcd, + 0x1e, 0xc1, 0xa5, 0x2b, 0x13, 0x47, 0xb5, 0xc2, 0x81, 0x15, 0x41, 0x10, 0x97, 0x5e, 0x15, 0xe9, + 0xb2, 0x4f, 0x6e, 0x54, 0x7a, 0xdf, 0xf9, 0x33, 0xda, 0x2c, 0xc2, 0x39, 0xf2, 0x11, 0x59, 0x1d, + 0x60, 0x9d, 0x8b, 0x9e, 0x14, 0x14, 0x83, 0x68, 0x56, 0x87, 0x38, 0x0e, 0xee, 0x4a, 0x73, 0xd8, + 0x92, 0x60, 0x7d, 0xe3, 0x7d, 0x86, 0xdd, 0x2b, 0xdd, 0xf6, 0xd2, 0x9a, 0xfb, 0xd0, 0xe8, 0x73, + 0x80, 0x40, 0x11, 0xa3, 0xd1, 0x97, 0xf4, 0x1f, 0xf4, 0x83, 0x6a, 0xb3, 0x83, 0x0f, 0xa0, 0xa3, + 0x8f, 0xe5, 0x36, 0x2c, 0x7c, 0xdd, 0x1e, 0xd3, 0x48, 0x15, 0xb8, 0x6f, 0xed, 0x14, 0x60, 0x4e, + 0x61, 0xd3, 0x16, 0xfd, 0xd6, 0x82, 0xe1, 0xb9, 0x26, 0xb0, 0x0e, 0x08, 0xfb, 0x09, 0x9f, 0x48, + 0x51, 0x01, 0x5b, 0xc3, 0xb2, 0x49, 0x29, 0x1b, 0x9d, 0x4d, 0x3f, 0x9d, 0xf3, 0x87, 0x74, 0x70, + 0xba, 0x94, 0x11, 0xa5, 0xbb, 0x84, 0xb8, 0x65, 0xac, 0x8f, 0x6d, 0x24, 0x06, 0x56, 0x1f, 0xe1, + 0xc3, 0x47, 0xce, 0xe1, 0xb4, 0x55, 0x2c, 0x07, 0x7f, 0xfd, 0x2b, 0x57, 0xcd, 0x82, 0x96, 0x75, + 0xbd, 0xdc, 0x1b, 0x30, 0x59, 0x9f, 0x48, 0x4d, 0xa7, 0xd0, 0x15, 0x3f, 0x3c, 0x03, 0x1d, 0x99, + 0xaa, 0x94, 0x88, 0x52, 0x37, 0xd4, 0x94, 0x68, 0xb9, 0x65, 0xb5, 0x2a, 0x84, 0x61, 0xca, 0xaf, + 0xe0, 0x8a, 0x62, 0xdb, 0x64, 0x56, 0xca, 0x88, 0x66, 0x00, 0x2b, 0x3f, 0x63, 0x57, 0xc2, 0xd8, + 0x5c, 0x35, 0xa1, 0x1c, 0x36, 0xab, 0xeb, 0x50, 0x66, 0xc3, 0x5f, 0x07, 0x4a, 0xdf, 0x98, 0xc2, + 0x54, 0x8f, 0x23, 0x3e, 0xe2, 0xc7, 0x6b, 0xd9, 0x9c, 0xc5, 0xcd, 0x4c, 0x06, 0x21, 0x69, 0xf2, + 0xed, 0x9b, 0xce, 0xde, 0xf5, 0x60, 0x5a, 0x5b, 0x56, 0xc3, 0xf0, 0x43, 0xa2, 0x0c, 0x0c, 0xaa, + 0x48, 0x0e, 0x31, 0x59, 0xef, 0x31, 0x3a, 0x03, 0x11, 0x67, 0xb4, 0x3d, 0x01, 0x1c, 0xe8, 0xf8, + 0xf3, 0x3c, 0x99, 0x19, 0x53, 0x2e, 0x50, 0xc9, 0x4b, 0x7f, 0x35, 0x60, 0x51, 0xd2, 0xda, 0xd4, + 0xdf, 0x6d, 0x95, 0x52, 0xeb, 0x80, 0x61, 0x1e, 0xef, 0x2e, 0xba, 0x00, 0x1d, 0xd0, 0xb5, 0x4a, + 0xef, 0xe5, 0x26, 0xac, 0x00, 0x71, 0x7d, 0xe5, 0xd6, 0x07, 0xfb, 0x2b, 0x47, 0x64, 0xb0, 0xa5, + 0xc8, 0x1b, 0xe5, 0x83, 0x05, 0x36, 0x1b, 0xb8, 0xd7, 0x33, 0x4b, 0x78, 0x51, 0x76, 0x06, 0x5a, + 0x28, 0xef, 0x49, 0xdd, 0xd8, 0xed, 0x88, 0x58, 0x1d, 0xed, 0x1c, 0xc0, 0x6b, 0xff, 0xf3, 0x06, + 0xcb, 0x7b, 0xea, 0x67, 0xc5, 0x76, 0x3b, 0x15, 0xe2, 0x2b, 0x73, 0x82, 0x11, 0x2a, 0xed, 0xf5, + 0x3f, 0xa6, 0xed, 0xcc, 0xab, 0xd0, 0xd6, 0xba, 0xc9, 0x7f, 0xe9, 0xf7, 0xd3, 0x2b, 0x9e, 0x9b, + 0x39, 0x52, 0xb7, 0x2b, 0xd8, 0x24, 0xee, 0x42, 0xa8, 0xa7, 0xc6, 0x9a, 0x02, 0x5f, 0xd7, 0x42, + 0x60, 0x4c, 0x1a, 0x46, 0x11, 0xff, 0x90, 0x7f, 0x49, 0x4a, 0x9d, 0xd3, 0xaf, 0x8f, 0x94, 0x82, + 0x40, 0xf8, 0xae, 0x90, 0x52, 0xc1, 0x38, 0xa0, 0x19, 0x6d, 0x53, 0x8a, 0x5d, 0x3f, 0x08, 0xeb, + 0x17, 0xf6, 0x42, 0x24, 0xbb, 0x9f, 0x6a, 0x03, 0x1f, 0xe8, 0x11, 0xe6, 0xfd, 0xee, 0x4f, 0x40, + 0x08, 0x80, 0x14, 0x31, 0x6e, 0xc6, 0x5d, 0xed, 0xc7, 0x59, 0xbd, 0xfa, 0x72, 0xa0, 0x05, 0xc7, + 0xa1, 0x33, 0x91, 0x81, 0xe1, 0x56, 0xbb, 0x56, 0x89, 0x9c, 0x39, 0xf6, 0x78, 0x34, 0x06, 0x7d, + 0x22, 0x59, 0x86, 0xc7, 0xc3, 0xb6, 0x37, 0xb8, 0xa1, 0x5c, 0xe0, 0x3b, 0x8d, 0xf1, 0x66, 0x96, + 0x3a, 0x7e, 0x21, 0x76, 0x23, 0x7f, 0x81, 0x1b, 0x91, 0x6a, 0xbd, 0xf6, 0x2a, 0x6e, 0x15, 0x9d, + 0x57, 0xb1, 0xde, 0xb8, 0x2d, 0xa8, 0x67, 0x90, 0xa0, 0x2b, 0x99, 0xe8, 0xc6, 0x97, 0x33, 0x70, + 0x3e, 0x8d, 0x83, 0x4c, 0xf4, 0xeb, 0x33, 0x71, 0x7d, 0x6b, 0xa0, 0xad, 0x0a, 0x7e, 0xb5, 0x54, + 0x39, 0xcf, 0x94, 0xfb, 0xf5, 0x87, 0x82, 0xa8, 0x39, 0x7c, 0x1f, 0x51, 0x63, 0xb6, 0xf5, 0x58, + 0xa0, 0x8a, 0x0d, 0x7f, 0x25, 0x0c, 0x3a, 0x37, 0xc0, 0x49, 0x40, 0xe7, 0x5b, 0xb2, 0xdb, 0x1b, + 0x14, 0xe3, 0x1a, 0xa4, 0x13, 0x64, 0x8d, 0xb2, 0x36, 0x53, 0xec, 0x70, 0x8c, 0x0e, 0x8c, 0xe7, + 0x15, 0x50, 0x68, 0xef, 0x96, 0xd9, 0x63, 0x02, 0x8a, 0x9b, 0x58, 0xc4, 0xca, 0x29, 0x24, 0x03, + 0xdc, 0xc5, 0xed, 0x10, 0x83, 0x03, 0x5a, 0x74, 0x6c, 0x99, 0xef, 0xc8, 0x8c, 0x17, 0xaa, 0x7b, + 0x48, 0xc8, 0xe3, 0x10, 0xc9, 0xec, 0xb2, 0xcc, 0x4b, 0x85, 0x95, 0xb3, 0x29, 0x83, 0x4b, 0x82, + 0x15, 0x3d, 0x45, 0xa6, 0x4b, 0x9d, 0xf9, 0xc3, 0xcf, 0xd8, 0x3d, 0xea, 0x36, 0xd6, 0x25, 0x64, + 0xf2, 0xbf, 0x68, 0x27, 0x02, 0x3a, 0x66, 0x52, 0x00, 0x41, 0x0f, 0x16, 0xb5, 0xf8, 0xdc, 0x66, + 0x0a, 0x94, 0x89, 0x13, 0x2f, 0x9c, 0x8c, 0x55, 0x79, 0x26, 0x0d, 0xc2, 0xea, 0xbf, 0x41, 0xf1, + 0x3c, 0x2f, 0x1a, 0x3b, 0x01, 0x33, 0xdb, 0xc7, 0xe4, 0x8b, 0xaa, 0x0e, 0x7e, 0x0f, 0x25, 0xec, + 0xb7, 0xa7, 0xe7, 0xb0, 0x7d, 0xe0, 0x4d, 0x11, 0x70, 0x86, 0xaf, 0xc1, 0x0d, 0xc7, 0xbd, 0xe2, + 0x46, 0xba, 0x62, 0x78, 0xce, 0x5c, 0xab, 0xaa, 0x67, 0x86, 0xe5, 0xd4, 0x87, 0xfb, 0xf6, 0xd7, + 0xcf, 0x09, 0x20, 0x98, 0x35, 0xcd, 0xa8, 0xcc, 0xee, 0xc5, 0x2d, 0xd1, 0xfb, 0x61, 0x34, 0xbb, + 0x24, 0xe2, 0xae, 0x67, 0xc2, 0xec, 0xc9, 0x4e, 0x14, 0x03, 0x99, 0x7c, 0x88, 0xda, 0x5a, 0x05, + 0xa3, 0x57, 0x45, 0xe7, 0x19, 0x55, 0x66, 0xde, 0x69, 0xe8, 0xa0, 0x16, 0x1e, 0x6f, 0x69, 0x39, + 0xec, 0x58, 0xfa, 0x6d, 0x4f, 0x56, 0x97, 0xfa, 0x82, 0x9b, 0xc3, 0x40, 0x48, 0x9b, 0x69, 0x83, + 0x13, 0x5d, 0x25, 0x5f, 0x44, 0x8f, 0x33, 0xf4, 0xca, 0xb3, 0x42, 0x34, 0x8f, 0x84, 0xe5, 0x32, + 0xc7, 0x7d, 0x05, 0xf8, 0xd1, 0xb5, 0x7e, 0xf5, 0x11, 0x5d, 0x53, 0x9c, 0xe6, 0x38, 0x2b, 0x6d, + 0x4b, 0xa8, 0x10, 0xa4, 0x53, 0xc9, 0x26, 0x23, 0xec, 0x47, 0xb3, 0x4e, 0x8c, 0x44, 0x28, 0x5f, + 0xfb, 0xff, 0xdc, 0xa5, 0x79, 0x2b, 0x38, 0xfb, 0x36, 0x8e, 0xa7, 0x0b, 0x1d, 0x98, 0x49, 0x92, + 0xc4, 0xb7, 0xa5, 0xc7, 0xf8, 0x2e, 0x8a, 0x72, 0x47, 0x34, 0x4c, 0x3f, 0x14, 0xbd, 0x91, 0x10, + 0xf9, 0x57, 0xff, 0xe2, 0xc4, 0xb7, 0x76, 0x1b, 0xfb, 0x76, 0xd6, 0xcd, 0xd7, 0xb8, 0x32, 0x50, + 0xa8, 0xb4, 0x7b, 0x1d, 0x3c, 0x89, 0xda, 0x7d, 0x48, 0xd2, 0x51, 0xe2, 0x37, 0xdb, 0x46, 0xc9, + 0xd7, 0xa5, 0xf7, 0x7e, 0x86, 0x73, 0x7b, 0x8a, 0x09, 0x7d, 0xd7, 0xc8, 0x1b, 0x26, 0x34, 0xfd, + 0xb5, 0xb7, 0xfe, 0x24, 0xbd, 0xe0, 0x35, 0x02, 0x37, 0xb7, 0x44, 0xb3, 0x1e, 0x85, 0xd2, 0xe5, + 0xf9, 0x22, 0x38, 0x4d, 0x4d, 0x68, 0xdf, 0x90, 0x8d, 0xb2, 0x7e, 0xa5, 0xff, 0x88, 0xda, 0x21, + 0x53, 0x91, 0x8b, 0xd8, 0x82, 0xaf, 0x79, 0x29, 0x57, 0x67, 0x99, 0x94, 0x9d, 0x5e, 0x6f, 0x0d, + 0xd0, 0xc0, 0x36, 0x2d, 0x96, 0x25, 0xf8, 0x64, 0x3f, 0xfb, 0x64, 0x4c, 0xa0, 0x14, 0x22, 0x9b, + 0xaf, 0xe1, 0xf8, 0xb4, 0x69, 0x49, 0x69, 0xdc, 0x20, 0xda, 0x9a, 0x58, 0xb5, 0x20, 0x3c, 0x41, + 0x56, 0x19, 0xbe, 0x40, 0x83, 0x6c, 0x1f, 0x33, 0x97, 0x19, 0x45, 0xb7, 0x35, 0x43, 0x4e, 0x49, + 0x85, 0xe4, 0xa7, 0xbe, 0xf1, 0x8a, 0xd4, 0xb2, 0xa3, 0x94, 0x7f, 0x9a, 0x57, 0x84, 0x53, 0xcd, + 0xd9, 0x4e, 0x1a, 0x61, 0xed, 0xeb, 0xcd, 0x8d, 0x43, 0x70, 0x7e, 0x75, 0x80, 0xbf, 0x65, 0xdd, + 0xa0, 0x1b, 0xd1, 0x11, 0x18, 0x8f, 0x5e, 0xba, 0xc6, 0x1c, 0x0b, 0x2c, 0xc0, 0xb1, 0x47, 0xa4, + 0x18, 0xf0, 0xb1, 0xa1, 0xd5, 0x5c, 0xd1, 0x6c, 0x3a, 0xf5, 0xf7, 0x58, 0xdf, 0xf5, 0x91, 0x8e, + 0xcf, 0x61, 0x57, 0xb1, 0x59, 0x2b, 0x70, 0xf4, 0x1a, 0x55, 0x82, 0x07, 0x00, 0x77, 0x60, 0x72, + 0x12, 0x12, 0xd7, 0x70, 0x40, 0x8e, 0x0b, 0xd4, 0xbe, 0x82, 0x33, 0x3d, 0xa0, 0xde, 0x86, 0x66, + 0x48, 0x85, 0x91, 0x67, 0xad, 0x47, 0xd6, 0xbf, 0xa8, 0x68, 0xf2, 0x1f, 0xdb, 0xb7, 0x37, 0xc4, + 0xe2, 0x21, 0x28, 0xda, 0x3f, 0xba, 0x9d, 0x3c, 0xae, 0xc3, 0xad, 0x7d, 0x07, 0xf8, 0xdf, 0xf5, + 0x7d, 0x5b, 0x30, 0x72, 0x49, 0x06, 0xe3, 0x8d, 0x81, 0x03, 0x76, 0xc6, 0x7b, 0x8a, 0xc2, 0xc8, + 0xa8, 0x41, 0xd1, 0xd7, 0xcd, 0x40, 0x67, 0x03, 0xc6, 0x92, 0x13, 0x88, 0xca, 0x29, 0x72, 0x28, + 0x36, 0xc7, 0x52, 0x70, 0x47, 0xca, 0xe6, 0xac, 0x39, 0x5c, 0x36, 0xdc, 0x89, 0xa2, 0xd6, 0xb9, + 0x77, 0x8d, 0xef, 0x8f, 0xe4, 0x87, 0x12, 0xaa, 0xdb, 0xf4, 0xb2, 0x3c, 0x0a, 0xac, 0x84, 0x3d, + 0x69, 0x5d, 0x95, 0xb1, 0x20, 0x23, 0x73, 0x20, 0xe9, 0x1b, 0x5e, 0x17, 0x82, 0xb8, 0x02, 0x0b, + 0xf8, 0x4a, 0xff, 0xcc, 0xa0, 0xae, 0x56, 0x48, 0x6e, 0x5e, 0x2a, 0x47, 0xe4, 0x01, 0xe1, 0x97, + 0x77, 0x4e, 0xdc, 0xd4, 0xfe, 0x87, 0xdd, 0xad, 0xb6, 0xe0, 0xa0, 0x12, 0x77, 0x8b, 0x18, 0x9a, + 0x7a, 0xc3, 0x99, 0xb4, 0xf5, 0x07, 0xea, 0x7e, 0x07, 0xc4, 0x17, 0x98, 0x68, 0x58, 0xac, 0x24, + 0xea, 0xb2, 0xe9, 0x11, 0xc9, 0xa0, 0x8b, 0x4d, 0x6a, 0x09, 0x0b, 0x07, 0x9f, 0x27, 0xa6, 0x28, + 0x9e, 0xd9, 0xe0, 0x43, 0x4e, 0x3f, 0x0d, 0xb3, 0x10, 0xab, 0xf6, 0xb1, 0x3e, 0xac, 0xcf, 0x32, + 0x87, 0x0f, 0x54, 0x10, 0xa0, 0x45, 0xef, 0x2b, 0x24, 0x24, 0xbb, 0x17, 0xde, 0x8a, 0x35, 0x41, + 0xd4, 0x4e, 0xdb, 0x88, 0xd3, 0xa3, 0x9b, 0x1e, 0xa8, 0x1e, 0xdc, 0x58, 0xc3, 0x33, 0xe9, 0x27, + 0x1f, 0x86, 0xbc, 0x8a, 0xda, 0x02, 0xa8, 0xee, 0x8b, 0x45, 0x0e, 0x01, 0xb9, 0x78, 0xfa, 0x96, + 0xda, 0x22, 0x22, 0x9b, 0xbc, 0x6f, 0xa8, 0xdb, 0xac, 0x0b, 0x88, 0x11, 0xc1, 0x05, 0x5a, 0x71, + 0x3c, 0x74, 0xad, 0x2a, 0xe1, 0x8d, 0x95, 0x31, 0x1e, 0xfd, 0xa4, 0x73, 0xe1, 0x53, 0xd7, 0x5b, + 0x31, 0x69, 0x65, 0xeb, 0x0b, 0x29, 0xba, 0x45, 0x1f, 0x3f, 0x98, 0x0f, 0x0c, 0x85, 0xfe, 0x22, + 0x9c, 0x4b, 0x15, 0xba, 0x14, 0xd6, 0x90, 0x32, 0x7a, 0xb3, 0xe3, 0x29, 0xae, 0x27, 0xb4, 0x84, + 0x83, 0x08, 0xda, 0x82, 0x3f, 0x86, 0xe2, 0xcf, 0x89, 0x3e, 0xf1, 0xe9, 0xd2, 0x98, 0x34, 0x0f, + 0x61, 0x01, 0x5f, 0x09, 0xa2, 0xeb, 0x11, 0xee, 0x00, 0xfe, 0x8c, 0x0d, 0x26, 0x3e, 0xa2, 0xea, + 0x86, 0xb5, 0xc6, 0xdd, 0x40, 0xe9, 0x83, 0xb8, 0x88, 0xa2, 0xb9, 0x4e, 0x79, 0xd6, 0x6b, 0x0c, + 0xd5, 0x15, 0xa1, 0x8b, 0x67, 0xfd, 0xc6, 0xbd, 0x3d, 0xde, 0xac, 0x56, 0x5e, 0x0b, 0x35, 0xde, + 0x36, 0x26, 0x18, 0x87, 0xd2, 0xa8, 0xbe, 0x37, 0x69, 0x94, 0x31, 0x95, 0x20, 0xb8, 0x0f, 0x91, + 0x21, 0xe1, 0x4f, 0x89, 0xf9, 0x1d, 0xef, 0xef, 0x52, 0xa2, 0x2c, 0x49, 0xad, 0xe6, 0x6a, 0x12, + 0xfb, 0xf2, 0xce, 0xcf, 0xee, 0x83, 0x5f, 0x0c, 0xab, 0xd4, 0x8f, 0x2c, 0x18, 0x03, 0x38, 0x31, + 0x0c, 0x83, 0x77, 0xd0, 0x11, 0xcb, 0x38, 0xd5, 0x11, 0x19, 0x76, 0x64, 0xa8, 0x4e, 0xfe, 0xb3, + 0xef, 0xcf, 0x08, 0x29, 0xd4, 0x32, 0x50, 0xb5, 0x5a, 0xe0, 0x23, 0x16, 0x1d, 0x3a, 0xfd, 0x20, + 0x22, 0x53, 0x9a, 0x46, 0x0f, 0x13, 0xc1, 0xcd, 0xc7, 0xd7, 0x80, 0xf0, 0xd6, 0x72, 0x9b, 0x67, + 0x52, 0x57, 0x7e, 0xb1, 0xaf, 0x91, 0x92, 0x5d, 0xe2, 0xa8, 0x79, 0x13, 0xf3, 0x71, 0xc9, 0xe5, + 0xc5, 0xf5, 0x69, 0x55, 0xf9, 0x17, 0xf1, 0xc0, 0xab, 0x27, 0xb6, 0x25, 0xd7, 0x39, 0x86, 0x48, + 0x96, 0x72, 0xdc, 0xc7, 0x3a, 0x6f, 0xec, 0x0e, 0xce, 0x61, 0x39, 0xe1, 0x14, 0x46, 0xf5, 0x62, + 0xf6, 0xde, 0xa8, 0xee, 0x92, 0x15, 0x2d, 0x7e, 0x38, 0x61, 0xe2, 0x8d, 0xfc, 0xa0, 0x1b, 0xca, + 0xa3, 0x88, 0x0e, 0x5d, 0xf3, 0x4a, 0xee, 0x96, 0xdd, 0xa2, 0xc5, 0x07, 0x7e, 0xf1, 0xc9, 0xa1, + 0xc4, 0xc5, 0xdf, 0x09, 0x1b, 0x59, 0xca, 0xc6, 0xb3, 0x2e, 0x60, 0x68, 0xcc, 0x08, 0x65, 0x82, + 0xdf, 0x2c, 0x65, 0x1b, 0x88, 0xaf, 0xd7, 0x2e, 0x23, 0xa9, 0x5e, 0x0d, 0x07, 0xff, 0x90, 0xb6, + 0xae, 0xe1, 0x3f, 0x46, 0x63, 0x32, 0xb5, 0x70, 0x95, 0xf8, 0xca, 0x46, 0x21, 0xa4, 0xcd, 0x9f, + 0x19, 0x7a, 0xab, 0xe9, 0xd5, 0x63, 0x4b, 0x53, 0xd3, 0x62, 0xc9, 0x3d, 0x05, 0xf3, 0xc5, 0x26, + 0x8a, 0xff, 0x3b, 0x1f, 0x31, 0x9f, 0xf6, 0x4d, 0x68, 0xa8, 0xe2, 0x1d, 0xd7, 0x29, 0x98, 0x52, + 0xde, 0xda, 0x56, 0xdb, 0x41, 0xa9, 0x99, 0x90, 0xc9, 0x9d, 0x80, 0xf1, 0x0f, 0x8b, 0xac, 0x78, + 0xf9, 0x6e, 0x4e, 0x0e, 0x1a, 0xd1, 0x61, 0x40, 0x24, 0x0b, 0xfe, 0x6e, 0xf0, 0x2c, 0x5f, 0x69, + 0x75, 0xf3, 0xbe, 0x76, 0x18, 0x39, 0x73, 0x7f, 0x6a, 0xb1, 0x3d, 0x72, 0x68, 0x63, 0x16, 0x05, + 0xde, 0x80, 0x1e, 0xc1, 0x3e, 0x63, 0xf3, 0x9e, 0x8f, 0xd7, 0x53, 0x44, 0x06, 0x01, 0x72, 0x88, + 0x56, 0x6a, 0x98, 0xa4, 0xee, 0xc4, 0x72, 0x9f, 0x24, 0x03, 0x8c, 0x61, 0xfa, 0x85, 0x47, 0x38, + 0xfd, 0x40, 0xe2, 0x65, 0xf0, 0x03, 0x4b, 0x59, 0x71, 0x89, 0x83, 0x09, 0xbe, 0xd4, 0x2f, 0xe4, + 0x7a, 0x00, 0x5b, 0xcf, 0x85, 0x7b, 0xf3, 0x40, 0xe8, 0xd8, 0x09, 0x71, 0xcb, 0xc1, 0xda, 0x6d, + 0x18, 0x70, 0x99, 0xb4, 0xd6, 0x17, 0x18, 0x52, 0xe7, 0x79, 0x8f, 0x47, 0xf9, 0x15, 0x3e, 0xf0, + 0xcd, 0x1c, 0x10, 0x9d, 0x28, 0xe8, 0x52, 0x00, 0xee, 0x0d, 0x67, 0x2b, 0x25, 0x91, 0x59, 0x9d, + 0x96, 0x92, 0xf5, 0x61, 0xde, 0xc7, 0x1d, 0xcc, 0x2f, 0x25, 0x78, 0x4e, 0xbb, 0x0f, 0xef, 0x71, + 0x9b, 0xac, 0x36, 0x68, 0xcc, 0x6a, 0xa5, 0x33, 0xd0, 0x12, 0x2d, 0xab, 0x24, 0x94, 0x67, 0xdc, + 0xfc, 0xfc, 0x79, 0xc4, 0x0e, 0x5b, 0xa2, 0xa7, 0x8f, 0xe4, 0x8b, 0xac, 0xd8, 0xb3, 0xa3, 0x88, + 0x47, 0xf1, 0x4b, 0xfe, 0xb3, 0x65, 0xca, 0x7a, 0x92, 0x87, 0x7f, 0x25, 0x58, 0x37, 0xf9, 0xaa, + 0x29, 0x9e, 0xfe, 0x16, 0x54, 0x75, 0x89, 0x33, 0x85, 0x0c, 0x56, 0xa5, 0xd0, 0x00, 0x7a, 0xf1, + 0xf0, 0x25, 0x1b, 0x1f, 0x92, 0xf3, 0x48, 0xf5, 0x52, 0xed, 0x03, 0xe0, 0x26, 0x9d, 0x21, 0x92, + 0xad, 0x01, 0x36, 0x0e, 0x08, 0x1f, 0xeb, 0xbb, 0x8d, 0xcb, 0x56, 0x18, 0x40, 0xed, 0x4e, 0x37, + 0xf0, 0x96, 0x3c, 0x48, 0x5b, 0x52, 0xc4, 0x3e, 0xc6, 0x07, 0x5f, 0xe7, 0xf4, 0x33, 0x12, 0x88, + 0x95, 0xca, 0xc1, 0x38, 0x63, 0xa1, 0xc4, 0xb2, 0x64, 0x07, 0x0b, 0x66, 0x0d, 0xae, 0x99, 0xf4, + 0xce, 0xd2, 0xad, 0x09, 0x40, 0x4c, 0x7a, 0xf4, 0x55, 0x12, 0x12, 0x91, 0xd4, 0x33, 0x40, 0xdb, + 0x03, 0x75, 0xc8, 0x76, 0x00, 0xef, 0x02, 0xd0, 0x2b, 0x5c, 0xfb, 0x02, 0x1e, 0xc8, 0x8a, 0xd0, + 0x08, 0x4b, 0x6a, 0x54, 0x77, 0x86, 0x75, 0xc8, 0xfa, 0x58, 0xb7, 0xb7, 0x38, 0x98, 0x4c, 0x37, + 0x2c, 0x66, 0x6a, 0xd3, 0x40, 0x11, 0xc6, 0x53, 0xf3, 0x4c, 0x69, 0x16, 0x81, 0x78, 0xb9, 0x38, + 0x98, 0xd9, 0x8e, 0xfe, 0xf9, 0x4c, 0x50, 0x24, 0x3f, 0xf2, 0xae, 0xf5, 0xf6, 0x30, 0x76, 0x89, + 0x68, 0x24, 0xfb, 0x01, 0x6b, 0x20, 0xd0, 0x16, 0x63, 0xb8, 0x82, 0x57, 0xa1, 0x97, 0x2d, 0xf4, + 0xa4, 0x3f, 0x3b, 0x2f, 0xce, 0x72, 0x26, 0xec, 0x12, 0x65, 0x9b, 0x54, 0x05, 0xf8, 0x27, 0x6a, + 0x45, 0x83, 0x99, 0xad, 0x28, 0x61, 0xdf, 0x67, 0x8a, 0xa5, 0x5e, 0x0b, 0xaa, 0xbd, 0x2d, 0x75, + 0xcd, 0xe5, 0x8c, 0xb9, 0x6d, 0x66, 0x6f, 0x49, 0x1a, 0x3b, 0xdb, 0xc1, 0xb8, 0x75, 0xe4, 0x71, + 0xa4, 0xa3, 0x58, 0xe8, 0x66, 0xcb, 0x7a, 0x29, 0xb5, 0xc0, 0x59, 0x7a, 0xaa, 0xf4, 0x59, 0xa1, + 0xa5, 0x4e, 0xde, 0x8e, 0xe4, 0xc0, 0x10, 0x70, 0x6d, 0x7e, 0xaf, 0xb7, 0x4a, 0x03, 0x69, 0x92, + 0x76, 0xbc, 0xf5, 0xcc, 0x9f, 0x69, 0x50, 0x65, 0x3b, 0xca, 0xa6, 0xf2, 0xde, 0xb3, 0x6e, 0xdf, + 0x47, 0x4d, 0x8f, 0xb4, 0x25, 0x0c, 0x2c, 0x23, 0xdc, 0xdb, 0xbf, 0xc7, 0x5f, 0xa5, 0xde, 0xfc, + 0x70, 0xb1, 0x50, 0xc4, 0x4f, 0x07, 0x7b, 0xbe, 0x84, 0xfd, 0xcb, 0x6b, 0xac, 0xff, 0x2d, 0x29, + 0x75, 0xa5, 0x18, 0xbb, 0x48, 0x2b, 0xc8, 0x15, 0x4c, 0xae, 0xa8, 0x23, 0x33, 0x71, 0x56, 0xc7, + 0xe7, 0xb1, 0x51, 0x1e, 0x15, 0x2d, 0x0f, 0xe8, 0x8a, 0x5b, 0x12, 0x1c, 0x15, 0x75, 0x3a, 0x3b, + 0xb7, 0x8e, 0x05, 0xfc, 0xce, 0x47, 0x91, 0x05, 0xde, 0x9e, 0xec, 0x68, 0x9d, 0x10, 0xff, 0xc6, + 0xb5, 0x4c, 0x10, 0x28, 0x1b, 0x82, 0x62, 0x50, 0x65, 0xb5, 0xc5, 0xa2, 0x7f, 0x38, 0xbc, 0x27, + 0x85, 0x8b, 0x75, 0xa3, 0xa0, 0x96, 0x4e, 0xed, 0xf0, 0x03, 0x53, 0x58, 0x5a, 0x2e, 0x6b, 0xa9, + 0xf5, 0x50, 0x75, 0x48, 0xc3, 0x14, 0xd3, 0xbd, 0xd0, 0x96, 0x16, 0x75, 0x0a, 0x5d, 0x7b, 0x78, + 0x17, 0x24, 0x56, 0x3b, 0x58, 0xb7, 0xa3, 0xf7, 0x94, 0xb0, 0x56, 0x36, 0x99, 0x23, 0x7e, 0x3d, + 0xcd, 0x33, 0xe0, 0x77, 0x8b, 0xc2, 0xf7, 0xd8, 0x8e, 0xa1, 0x31, 0x85, 0x9a, 0x63, 0x9c, 0xea, + 0xbf, 0x64, 0x61, 0x02, 0xad, 0x96, 0x97, 0x7d, 0x85, 0x3c, 0x45, 0xc3, 0x24, 0xc9, 0xa3, 0xa2, + 0x69, 0xaf, 0x88, 0xd0, 0x11, 0x14, 0x7c, 0xb4, 0xf3, 0x68, 0xb1, 0x2c, 0x51, 0x77, 0xa7, 0x67, + 0x28, 0x9f, 0xc6, 0xb4, 0x2c, 0x7c, 0x45, 0x44, 0xc7, 0x19, 0x85, 0xc2, 0x27, 0x83, 0x8d, 0xea, + 0x60, 0x5f, 0xb7, 0x96, 0x84, 0x2e, 0x9c, 0x99, 0x10, 0x22, 0x83, 0x4d, 0x26, 0xbc, 0xbd, 0x02, + 0x8d, 0x59, 0x3f, 0xa6, 0x4e, 0xbf, 0xbb, 0x42, 0xed, 0x7b, 0xa7, 0xf3, 0xee, 0xe6, 0xad, 0x42, + 0xac, 0x39, 0x3f, 0xd8, 0xfc, 0xf1, 0x67, 0x4f, 0x1e, 0x73, 0xac, 0x1a, 0x7b, 0xb8, 0xfa, 0xac, + 0x50, 0x5a, 0x90, 0xa5, 0x9d, 0x2a, 0xf0, 0x0c, 0x51, 0x53, 0x55, 0x83, 0xc1, 0xc0, 0xb7, 0x2e, + 0xd8, 0x18, 0xe6, 0x53, 0xf5, 0xb9, 0x2d, 0x09, 0x28, 0x30, 0x8c, 0x43, 0x6c, 0x89, 0x03, 0x31, + 0xed, 0xb3, 0x3d, 0xf7, 0x9d, 0x7c, 0xd0, 0x22, 0x47, 0xd9, 0x4b, 0x0d, 0x4c, 0x5b, 0x6e, 0xff, + 0x6a, 0x0b, 0x03, 0x5f, 0xe0, 0xd3, 0x4c, 0x83, 0x80, 0xe0, 0x61, 0x75, 0x83, 0x27, 0xe9, 0x45, + 0x58, 0xa4, 0x2a, 0x5e, 0x9e, 0x79, 0x1e, 0xe8, 0x42, 0xd8, 0x66, 0x30, 0x8d, 0xd2, 0xd6, 0x86, + 0xcf, 0xe0, 0x3d, 0x0d, 0xaa, 0x16, 0x0d, 0x0a, 0x0b, 0x46, 0x6a, 0x69, 0xb0, 0x87, 0xda, 0x8b, + 0x75, 0xa3, 0x76, 0x16, 0xcc, 0x5a, 0x3d, 0x91, 0xec, 0x05, 0x1d, 0x0e, 0x50, 0x9c, 0x2e, 0xe8, + 0xf4, 0x4d, 0x15, 0x3e, 0x48, 0x76, 0x81, 0x8f, 0xb8, 0x8f, 0x45, 0x69, 0x4b, 0xb5, 0xd5, 0x08, + 0xc3, 0x32, 0x0b, 0x1e, 0x0b, 0xd9, 0x70, 0x4d, 0x33, 0x00, 0x53, 0xed, 0x61, 0x34, 0xc7, 0x03, + 0x3e, 0xbf, 0x3b, 0xbc, 0x8e, 0xb9, 0xb8, 0x64, 0x95, 0x26, 0x96, 0x8b, 0x1f, 0xb3, 0x07, 0x67, + 0x2b, 0x41, 0x68, 0xc2, 0x6c, 0x60, 0xaf, 0x8c, 0x43, 0xaf, 0xd2, 0xa1, 0x81, 0x11, 0xc9, 0x5e, + 0x06, 0xc0, 0x5d, 0x25, 0x78, 0x64, 0xc8, 0x77, 0xca, 0xd6, 0xa3, 0x9b, 0x43, 0xea, 0x2b, 0x35, + 0x8a, 0x61, 0xaa, 0x2d, 0xa4, 0x42, 0x35, 0x86, 0x9f, 0xab, 0x25, 0xe3, 0x54, 0xee, 0xe0, 0xa5, + 0x04, 0x5e, 0xad, 0x8f, 0x14, 0x9d, 0xf4, 0x29, 0x03, 0xa1, 0x01, 0xf3, 0x95, 0xb8, 0xa7, 0xd3, + 0x5e, 0x4c, 0x9f, 0xb9, 0x13, 0xae, 0xd1, 0xd4, 0x77, 0x73, 0x9c, 0x15, 0xb1, 0x05, 0x7e, 0x24, + 0xaf, 0xe4, 0x85, 0x62, 0x9d, 0xd0, 0x2c, 0xce, 0x40, 0x5f, 0x21, 0x1d, 0xa1, 0x0f, 0xb8, 0xb6, + 0x51, 0xb8, 0xef, 0x5e, 0xa8, 0x6b, 0x41, 0xbc, 0xd4, 0x8b, 0xe5, 0x3a, 0x49, 0x9c, 0x1e, 0xef, + 0x7a, 0x79, 0x6b, 0x42, 0x42, 0xcc, 0xf2, 0xd7, 0xe8, 0x63, 0xa3, 0xac, 0x71, 0x35, 0xa5, 0x98, + 0xdd, 0x92, 0x02, 0xea, 0x50, 0x73, 0xb9, 0xa4, 0xb3, 0x18, 0x8f, 0x81, 0x1a, 0x67, 0x50, 0x6c, + 0xf9, 0x19, 0xe0, 0x0e, 0x90, 0x2d, 0x2c, 0x73, 0x59, 0x5b, 0x5f, 0x10, 0x9f, 0xc0, 0xd2, 0xe2, + 0xf0, 0xab, 0x01, 0x68, 0x74, 0x5c, 0x7b, 0x63, 0x00, 0x21, 0x39, 0xe0, 0x37, 0x1c, 0xb7, 0x8d, + 0x03, 0xe8, 0xef, 0x6e, 0xe0, 0xc0, 0x6c, 0xac, 0x56, 0xfa, 0xa9, 0xdc, 0x9a, 0xcd, 0xc3, 0x85, + 0xe9, 0xaa, 0x2a, 0xf5, 0x14, 0x7e, 0x04, 0x22, 0xc6, 0x3e, 0x2a, 0x0c, 0x6b, 0x82, 0x56, 0xa2, + 0x69, 0x18, 0xe1, 0x96, 0xc1, 0x16, 0x8d, 0xb4, 0x8a, 0x55, 0xba, 0x58, 0xa4, 0xf9, 0xa4, 0x45, + 0xd2, 0xf8, 0x29, 0xe3, 0x52, 0x07, 0xc7, 0xcd, 0xb0, 0xeb, 0x7d, 0x1c, 0xee, 0x5f, 0xa9, 0xb1, + 0xeb, 0xb4, 0xd8, 0xb2, 0xd4, 0x68, 0x27, 0x2c, 0x99, 0xd3, 0xb3, 0x69, 0x9c, 0x34, 0x10, 0x6d, + 0x65, 0xd7, 0xa0, 0xab, 0x51, 0x58, 0xc9, 0xe9, 0x79, 0xd2, 0x4e, 0xca, 0x5c, 0xae, 0x9d, 0x3e, + 0x40, 0xef, 0x21, 0xd7, 0xe3, 0xc6, 0xc4, 0x11, 0x98, 0x94, 0xb6, 0x5e, 0x99, 0x1f, 0x19, 0xc1, + 0x63, 0xd1, 0xe4, 0xc7, 0xda, 0xfc, 0xae, 0x50, 0xea, 0xa8, 0xd5, 0x90, 0x75, 0x0c, 0x9f, 0x23, + 0x51, 0x27, 0xb2, 0xa7, 0xcd, 0x02, 0x91, 0x38, 0xcc, 0x87, 0x4c, 0x28, 0xc5, 0xce, 0xbf, 0x23, + 0x07, 0x1a, 0x0f, 0x07, 0x08, 0xcb, 0x27, 0x42, 0xa3, 0x5f, 0x01, 0x3b, 0x3b, 0x53, 0xc9, 0x8b, + 0x65, 0x95, 0xbe, 0xb6, 0x80, 0xb6, 0x1f, 0xcd, 0xdf, 0x6c, 0x77, 0xc6, 0xad, 0x54, 0x45, 0xfd, + 0xf9, 0xc7, 0x3a, 0xa7, 0xa6, 0x04, 0xd9, 0xdb, 0xf4, 0x4f, 0x28, 0x0f, 0xa3, 0x6a, 0x7a, 0x3f, + 0xe1, 0xfe, 0xd1, 0x33, 0xe6, 0xb7, 0x13, 0xce, 0x5f, 0x82, 0x44, 0xae, 0x5a, 0xdf, 0x82, 0x27, + 0x4b, 0x20, 0x19, 0x46, 0x62, 0x4a, 0x5e, 0xe5, 0xa1, 0x4d, 0x58, 0x1b, 0x99, 0x3c, 0x02, 0xbc, + 0x43, 0xad, 0xf3, 0xad, 0xd0, 0x9f, 0xe8, 0xf5, 0x85, 0x57, 0x6e, 0xce, 0xfe, 0x28, 0x0d, 0xef, + 0xed, 0xb3, 0x1f, 0x58, 0x8a, 0x4e, 0xdf, 0xd9, 0x1f, 0xda, 0xe7, 0xc5, 0x6c, 0x0c, 0xd3, 0x1f, + 0xa6, 0x40, 0xc5, 0x9f, 0xaa, 0x20, 0x1f, 0x8b, 0x57, 0xbd, 0xfd, 0xb3, 0xeb, 0x8e, 0xcd, 0x9c, + 0xa3, 0xdf, 0x5c, 0xfd, 0xf3, 0xb9, 0xaf, 0x3f, 0xaa, 0xbd, 0x48, 0x0a, 0x03, 0xe5, 0xd3, 0x4f, + 0x93, 0xbb, 0x9d, 0x0b, 0x6f, 0x0d, 0xa6, 0x99, 0xf7, 0xaf, 0x1c, 0x00, 0x65, 0x1b, 0x7f, 0x11, + 0xb7, 0x58, 0x2e, 0x37, 0x81, 0x4a, 0x21, 0xd2, 0xc1, 0x7c, 0x1f, 0xab, 0xfa, 0x63, 0x18, 0xed, + 0xb8, 0x2a, 0xce, 0x19, 0xfc, 0x09, 0x02, 0x26, 0x62, 0xd2, 0x4e, 0x03, 0xae, 0x89, 0x53, 0xe4, + 0x4b, 0xc8, 0x01, 0xd1, 0xbc, 0x74, 0x1a, 0x45, 0x8a, 0xc5, 0x2e, 0xae, 0x7c, 0x80, 0x6b, 0x73, + 0xc6, 0xfc, 0x19, 0x42, 0x3c, 0xf3, 0x40, 0x2c, 0x9c, 0x92, 0x93, 0xed, 0x87, 0x64, 0x7f, 0xde, + 0x56, 0x86, 0x3c, 0x53, 0xb6, 0xbd, 0x87, 0x0a, 0xa9, 0x06, 0x37, 0x61, 0x77, 0x97, 0x70, 0x34, + 0xa9, 0x02, 0x1d, 0x61, 0x9e, 0x81, 0xc2, 0x7b, 0x84, 0xea, 0x9c, 0x84, 0x11, 0x12, 0xb1, 0x9d, + 0xef, 0xd6, 0xd0, 0xd6, 0x0b, 0x85, 0x7a, 0x11, 0x6b, 0x93, 0x4f, 0xa3, 0xaa, 0xed, 0xce, 0x05, + 0x64, 0x39, 0x66, 0x03, 0x08, 0x2f, 0xf3, 0xcc, 0x28, 0xe2, 0x49, 0x61, 0x91, 0xc9, 0xb1, 0xa8, + 0xc0, 0xb2, 0xda, 0xff, 0x3c, 0x97, 0xf0, 0x44, 0xce, 0xe0, 0x9b, 0xef, 0x23, 0x52, 0x0e, 0xd7, + 0xd6, 0xd2, 0xaf, 0xa6, 0x54, 0xda, 0x1f, 0x10, 0xc8, 0x86, 0x98, 0xec, 0x1a, 0xa6, 0x1d, 0x5d, + 0x54, 0xa2, 0xbe, 0xc7, 0xe9, 0xc7, 0xf2, 0x29, 0x10, 0xe0, 0x9c, 0x6d, 0xb0, 0x70, 0xe6, 0x8d, + 0xa1, 0xc1, 0x1d, 0x45, 0xe4, 0x0e, 0xb0, 0x90, 0x5c, 0xdb, 0xc4, 0x6e, 0xc2, 0x7a, 0xe4, 0xfb, + 0x7e, 0xf8, 0xdf, 0x21, 0x96, 0x88, 0x9d, 0x76, 0x2a, 0x14, 0x9d, 0xae, 0xdf, 0x03, 0x45, 0x79, + 0xc6, 0x52, 0xcd, 0x3c, 0x4c, 0xb1, 0x5a, 0x2a, 0xcb, 0x8c, 0x42, 0x89, 0xc3, 0xd1, 0xfb, 0x01, + 0x5c, 0xf4, 0xe6, 0x37, 0xb1, 0xe4, 0x9e, 0xc8, 0xbd, 0xfc, 0x71, 0xcb, 0xef, 0x4b, 0x71, 0x6e, + 0x16, 0x17, 0xbd, 0x47, 0x22, 0x7f, 0xff, 0x36, 0x02, 0x1b, 0xd3, 0x84, 0x29, 0xfd, 0xd4, 0x7f, + 0xde, 0x40, 0xfa, 0x83, 0x79, 0xd6, 0xc7, 0x29, 0x53, 0x46, 0xec, 0xc4, 0x8d, 0xdb, 0x55, 0x06, + 0x12, 0x63, 0x7e, 0x10, 0x02, 0x46, 0xbd, 0x56, 0x72, 0xb9, 0x5a, 0x90, 0x3f, 0xb6, 0x41, 0xf3, + 0x48, 0xdf, 0xc1, 0xd3, 0x81, 0xc4, 0xf1, 0xa0, 0x75, 0x91, 0x86, 0xd9, 0x14, 0x0b, 0x87, 0x1d, + 0x88, 0x1c, 0x64, 0x86, 0xe6, 0xdb, 0x0b, 0x05, 0x30, 0xa3, 0xe8, 0x2a, 0x1a, 0xf9, 0xe8, 0x44, + 0x65, 0x81, 0xdc, 0xfd, 0x47, 0x3e, 0xf7, 0xfb, 0x37, 0x55, 0x1d, 0xb9, 0xc7, 0x35, 0x64, 0x4d, + 0xa5, 0x0e, 0xda, 0xb4, 0xff, 0x73, 0x5e, 0xe8, 0xcc, 0x8d, 0x38, 0x5a, 0x69, 0xfd, 0x95, 0xd8, + 0x93, 0xb6, 0x15, 0xe6, 0xd1, 0x50, 0xfd, 0x79, 0x54, 0xf0, 0xfc, 0x1b, 0xa9, 0x5c, 0x33, 0x41, + 0x99, 0xe2, 0x03, 0x89, 0xc7, 0x1c, 0x47, 0x3e, 0x0e, 0x1f, 0x67, 0x07, 0xfe, 0x19, 0xfb, 0xd5, + 0x7f, 0xb6, 0x7b, 0x13, 0x72, 0xed, 0x59, 0x9c, 0xa7, 0x03, 0x7e, 0x44, 0xa3, 0x1a, 0x69, 0x24, + 0xe4, 0xc4, 0x34, 0xff, 0x02, 0x1e, 0x86, 0xbd, 0x43, 0x9b, 0x5b, 0x96, 0xfd, 0x3b, 0x9c, 0x2b, + 0x3a, 0xda, 0x4f, 0x22, 0x05, 0xb7, 0xde, 0x6d, 0x7b, 0xdf, 0xdc, 0x09, 0x98, 0x8d, 0xf2, 0x0e, + 0x60, 0x64, 0xaa, 0x00, 0x90, 0x76, 0x53, 0x6e, 0x3a, 0xb9, 0x9e, 0x1e, 0x6e, 0x64, 0xad, 0xab, + 0x28, 0xa6, 0xfa, 0x85, 0x08, 0x08, 0x1d, 0xa1, 0x45, 0xff, 0x1e, 0x6c, 0x0e, 0x51, 0x19, 0xb7, + 0x35, 0xff, 0x45, 0xe2, 0x2d, 0x22, 0x99, 0xf8, 0x19, 0xf8, 0x35, 0xe6, 0xd0, 0x7f, 0x57, 0x18, + 0xce, 0x6d, 0xbc, 0xce, 0xf8, 0xf7, 0x68, 0xb3, 0x59, 0x01, 0x3d, 0x1a, 0xae, 0x29, 0xee, 0x3e, + 0x19, 0xab, 0xdc, 0x87, 0x73, 0x20, 0x3a, 0x62, 0x00, 0xce, 0x93, 0xc5, 0x5a, 0x00, 0xe7, 0x86, + 0x1d, 0x0c, 0x01, 0x7d, 0x48, 0xb6, 0xf7, 0x06, 0xe5, 0x64, 0x52, 0x8b, 0x49, 0x40, 0xbe, 0xad, + 0x21, 0xfc, 0x39, 0x7e, 0x20, 0x1d, 0xbb, 0xa7, 0x16, 0x26, 0x3b, 0x29, 0xae, 0xdc, 0x7c, 0xec, + 0xd0, 0x49, 0xfb, 0x1d, 0x29, 0xc4, 0x43, 0x24, 0x10, 0x07, 0x27, 0xfe, 0x8f, 0x2d, 0x8f, 0xcc, + 0xed, 0xc0, 0xdb, 0xd1, 0x07, 0xf2, 0x16, 0x27, 0xab, 0x7c, 0xda, 0x19, 0x3c, 0x30, 0x9d, 0x79, + 0xb7, 0xfb, 0x2e, 0xd3, 0x40, 0x56, 0x03, 0xc6, 0xb9, 0x16, 0x33, 0xc1, 0x88, 0xee, 0xc2, 0x1d, + 0x50, 0x51, 0xbe, 0xf6, 0xa3, 0x3e, 0x62, 0xc5, 0x99, 0x83, 0x09, 0x24, 0x68, 0x14, 0x99, 0x1a, + 0x90, 0xad, 0x03, 0xdc, 0xc3, 0x22, 0xf4, 0xd4, 0x79, 0x06, 0x86, 0x3c, 0x45, 0x17, 0x83, 0x11, + 0x4c, 0x3a, 0x07, 0xf5, 0xe8, 0xc1, 0x01, 0x68, 0x04, 0x38, 0x13, 0x86, 0x37, 0x1a, 0x3c, 0x45, + 0xfa, 0x7d, 0x52, 0x15, 0x80, 0x18, 0x93, 0x52, 0xed, 0x1a, 0xf9, 0xc7, 0x34, 0x4a, 0xfc, 0xc3, + 0x6e, 0x1d, 0x19, 0x45, 0x0c, 0xfe, 0x21, 0x91, 0xd7, 0xca, 0x6a, 0x40, 0x6f, 0x73, 0x1a, 0x92, + 0x30, 0x7f, 0xed, 0x89, 0x98, 0x8d, 0x5d, 0xa0, 0x5a, 0x75, 0x22, 0x36, 0x85, 0x4a, 0x72, 0x1d, + 0xba, 0xfb, 0x08, 0x89, 0x76, 0xb7, 0x01, 0x33, 0xdb, 0xa1, 0x3c, 0x1d, 0xb9, 0xf2, 0x83, 0xa9, + 0x48, 0x9b, 0x17, 0xe8, 0xef, 0x59, 0x4c, 0x64, 0x6c, 0x0d, 0x90, 0xb9, 0x2f, 0x80, 0xb2, 0x7f, + 0xd1, 0xfe, 0x85, 0xc8, 0x77, 0x4f, 0x26, 0x25, 0x2e, 0x7f, 0x51, 0x73, 0x62, 0xe7, 0xd2, 0x9f, + 0x1b, 0x16, 0xda, 0x71, 0xb0, 0x6b, 0x62, 0x25, 0xac, 0x12, 0x2f, 0xdc, 0xb6, 0xf2, 0x0e, 0x4e, + 0x8e, 0x7e, 0xcd, 0xdc, 0x72, 0x74, 0x13, 0x1d, 0x62, 0x39, 0x0a, 0xff, 0xea, 0x72, 0x2d, 0x13, + 0x94, 0x63, 0x66, 0x67, 0xcb, 0x0c, 0x3a, 0x54, 0x8c, 0xc7, 0x87, 0x26, 0x39, 0x0d, 0xa3, 0x64, + 0x85, 0x8c, 0x36, 0x35, 0xfc, 0x8b, 0x0f, 0x7f, 0x1a, 0x62, 0xd8, 0xe0, 0x97, 0x37, 0xef, 0x24, + 0x86, 0xb3, 0x19, 0xcf, 0x55, 0x19, 0x1f, 0x47, 0x59, 0x1a, 0x2d, 0xa6, 0x77, 0x3f, 0xa0, 0x1b, + 0xeb, 0x1d, 0x53, 0xfc, 0xfc, 0x47, 0x8b, 0xc6, 0x1f, 0x31, 0x3f, 0x1d, 0xa6, 0xeb, 0xfe, 0x2f, + 0x4c, 0xa1, 0x8d, 0x78, 0x99, 0xb2, 0xc8, 0xc4, 0x49, 0x64, 0x34, 0xf7, 0x98, 0xe0, 0x44, 0xda, + 0xd9, 0x3a, 0x1d, 0xd7, 0xf9, 0x7e, 0x2e, 0xc7, 0xf0, 0x7e, 0x35, 0xe5, 0xc7, 0xcd, 0x6b, 0x80, + 0xf2, 0x4e, 0x8f, 0x3b, 0x1b, 0xa7, 0x30, 0x2d, 0xce, 0x15, 0xb7, 0xc8, 0xb2, 0xbb, 0x1e, 0x47, + 0x55, 0xbb, 0xd0, 0x6d, 0xfd, 0x47, 0x36, 0xa6, 0x6f, 0x4a, 0xeb, 0x9d, 0xca, 0xfe, 0x3d, 0x1d, + 0xcb, 0xd6, 0xfb, 0x85, 0x47, 0x16, 0x41, 0x45, 0x25, 0xc9, 0xfa, 0x50, 0x97, 0x98, 0x8e, 0xe5, + 0xe1, 0xdd, 0x12, 0xce, 0x94, 0x8e, 0x2b, 0x61, 0xe4, 0xdf, 0xe8, 0xfd, 0x67, 0x67, 0xe0, 0x9a, + 0x40, 0x04, 0x59, 0x74, 0xb0, 0xeb, 0x58, 0xaa, 0x4f, 0x60, 0x1a, 0x2c, 0x94, 0xc4, 0xa0, 0x59, + 0xf2, 0x63, 0xcd, 0x03, 0x8c, 0x50, 0x2d, 0x62, 0xaf, 0x59, 0x69, 0x7f, 0x25, 0xea, 0xfd, 0x42, + 0x9e, 0xf3, 0x25, 0x36, 0xf6, 0x94, 0xa2, 0x20, 0x9e, 0x5e, 0x1f, 0x47, 0xdb, 0x4f, 0xc6, 0xad, + 0x80, 0x39, 0xa2, 0x58, 0x80, 0xa6, 0x9c, 0xd0, 0xb7, 0xba, 0x34, 0xe0, 0xc9, 0xba, 0x5e, 0xce, + 0xfa, 0xe7, 0x96, 0xd3, 0x59, 0xe2, 0xdc, 0x4f, 0x4c, 0xe6, 0x74, 0x51, 0xed, 0x4c, 0x89, 0x3f, + 0xcc, 0xeb, 0x35, 0x9f, 0x1a, 0xad, 0x6b, 0x5d, 0xc0, 0x9e, 0x3d, 0xa6, 0xbf, 0x6a, 0x5e, 0xea, + 0x9f, 0x64, 0x85, 0xf2, 0x01, 0x45, 0xda, 0x5a, 0xfe, 0xdd, 0x1b, 0x6f, 0xa0, 0x90, 0xe3, 0x54, + 0xef, 0x83, 0x3a, 0xae, 0x95, 0x45, 0x3e, 0xa9, 0x3e, 0xf5, 0x1e, 0x5e, 0x38, 0x94, 0xab, 0x7a, + 0x68, 0x27, 0xd6, 0xed, 0x7a, 0x89, 0x19, 0xc6, 0x3a, 0x6d, 0x4f, 0xa5, 0x93, 0x57, 0x52, 0xa8, + 0xda, 0x3c, 0x7b, 0x7b, 0x04, 0x51, 0x2e, 0x42, 0xa9, 0x65, 0x5a, 0x3f, 0x49, 0x35, 0xc3, 0x76, + 0x7a, 0x4a, 0xcb, 0x9a, 0xd4, 0x69, 0xcb, 0x28, 0x0c, 0xa5, 0x49, 0xb5, 0xc8, 0xbe, 0xe4, 0x4e, + 0x7f, 0x44, 0xd5, 0x56, 0xb3, 0xf5, 0x8a, 0x15, 0x5e, 0x8f, 0xde, 0x4f, 0x8d, 0x96, 0x4a, 0x0c, + 0xc9, 0x16, 0x62, 0x20, 0xf1, 0xb6, 0x32, 0xde, 0x1c, 0x24, 0x50, 0x49, 0x93, 0xfa, 0x48, 0xd1, + 0x9a, 0x6e, 0x70, 0xaf, 0x7e, 0x4c, 0xc6, 0xdb, 0x19, 0xae, 0x69, 0xcb, 0x2e, 0xf2, 0xf3, 0xe2, + 0x40, 0x25, 0x3c, 0x9b, 0xc2, 0xf7, 0xe7, 0x45, 0x73, 0xbe, 0xb3, 0xa9, 0x04, 0x42, 0xb1, 0xe1, + 0x33, 0xa3, 0x04, 0x86, 0xd8, 0x21, 0x73, 0x4b, 0x19, 0x8c, 0xbb, 0xc4, 0x6a, 0x53, 0x41, 0x62, + 0x5d, 0xf5, 0xd0, 0xba, 0x00, 0x23, 0x7f, 0xf6, 0x1f, 0x4a, 0x4e, 0x69, 0x8f, 0x35, 0xee, 0xfd, + 0xe4, 0x60, 0xa5, 0x37, 0x89, 0xeb, 0x69, 0xe5, 0x96, 0x93, 0x76, 0x0a, 0xe6, 0x84, 0xce, 0x8e, + 0xf3, 0x80, 0x74, 0x1a, 0xe0, 0x3d, 0x25, 0xa5, 0xf9, 0xe4, 0x6c, 0xc6, 0xcb, 0x2a, 0x82, 0x71, + 0x1b, 0x62, 0x21, 0x0e, 0x49, 0xd1, 0xc3, 0x92, 0xaf, 0x34, 0xde, 0x03, 0xea, 0x6c, 0xf3, 0xd3, + 0x4c, 0x4c, 0x79, 0xbe, 0xd1, 0xbd, 0x3c, 0x27, 0x89, 0xd2, 0xa3, 0xfc, 0xfa, 0xd2, 0xc3, 0xd7, + 0xd5, 0x05, 0xcd, 0x7e, 0x45, 0xdd, 0x22, 0x24, 0x25, 0x38, 0xce, 0x7b, 0x91, 0x8e, 0xf5, 0x0b, + 0xbd, 0xf3, 0x6c, 0xa6, 0xc5, 0xde, 0x5b, 0x5d, 0xae, 0xe8, 0x49, 0x58, 0x3b, 0xaa, 0x1b, 0xec, + 0x06, 0x0a, 0xe8, 0xe7, 0x61, 0xcb, 0x8d, 0x59, 0xe7, 0x61, 0x85, 0x07, 0x80, 0x7b, 0xb8, 0x00, + 0x6a, 0xd6, 0x09, 0x9d, 0x94, 0x5b, 0xe2, 0x6d, 0x2e, 0x4b, 0x74, 0xb5, 0xcf, 0x9e, 0xd8, 0x21, + 0x0a, 0xd4, 0x75, 0xd6, 0xac, 0xa4, 0x86, 0x2c, 0x9c, 0x6e, 0xb1, 0xba, 0x9d, 0x26, 0x6e, 0x7c, + 0x44, 0x61, 0x14, 0x00, 0x88, 0x3f, 0xde, 0x8c, 0x34, 0x84, 0x26, 0x46, 0xdc, 0xb6, 0x15, 0x7b, + 0xf5, 0x6c, 0x0e, 0xf5, 0x8c, 0xc9, 0xf2, 0x93, 0xa2, 0xbd, 0x31, 0xd2, 0x30, 0x34, 0xf4, 0x8f, + 0x16, 0x2f, 0x82, 0xd9, 0x3b, 0xf2, 0xaf, 0x0a, 0x80, 0x99, 0xe7, 0x7a, 0x7c, 0x16, 0xba, 0x41, + 0x33, 0xee, 0xb1, 0x29, 0xc8, 0xaa, 0x4d, 0xd3, 0x88, 0x4c, 0x55, 0x93, 0xa2, 0xd9, 0x31, 0xc6, + 0xbf, 0xf4, 0xae, 0xe4, 0x76, 0x31, 0x80, 0x33, 0xad, 0x1f, 0xcf, 0xdf, 0xc5, 0xba, 0xd2, 0xd4, + 0x9e, 0x4f, 0x98, 0x2d, 0x5a, 0x3e, 0x07, 0xce, 0xc3, 0x91, 0x16, 0xbd, 0xcf, 0x98, 0x83, 0x95, + 0x35, 0xac, 0x58, 0x51, 0xcd, 0xc7, 0x48, 0x63, 0x0f, 0xfb, 0xf7, 0x15, 0x34, 0x07, 0x90, 0x7c, + 0xce, 0x8b, 0x86, 0xe5, 0x7a, 0xad, 0x54, 0x92, 0xaf, 0x11, 0x6d, 0x93, 0x45, 0x8d, 0xc3, 0xc6, + 0xbe, 0xb1, 0x21, 0x3a, 0x7e, 0xcd, 0x18, 0x33, 0xa6, 0x0d, 0xb4, 0x0e, 0xcb, 0x76, 0x5e, 0x66, + 0x7f, 0xa8, 0x93, 0xa2, 0xdd, 0x07, 0x87, 0xbd, 0x50, 0xd5, 0x17, 0x88, 0x27, 0x45, 0x61, 0x78, + 0xc0, 0xef, 0x77, 0x6c, 0xd6, 0x2c, 0x64, 0x91, 0x0d, 0x80, 0x7f, 0x61, 0x17, 0x91, 0xf7, 0xbc, + 0x35, 0xa8, 0x3b, 0x24, 0xb5, 0x0f, 0xf5, 0xa2, 0x9d, 0x7a, 0x7b, 0xcd, 0xe5, 0x9f, 0xe8, 0x96, + 0x3f, 0x10, 0xde, 0xa0, 0x58, 0x84, 0xf2, 0x45, 0x8c, 0x9d, 0x7d, 0x9a, 0x32, 0xd7, 0xfb, 0xb5, + 0x4e, 0xd5, 0x52, 0x4f, 0x49, 0x9e, 0xf9, 0x1c, 0x80, 0xaa, 0xa8, 0xd2, 0xd1, 0x71, 0x98, 0x03, + 0xa4, 0x3e, 0x47, 0xe4, 0xaa, 0xd6, 0xbf, 0xe1, 0x59, 0x27, 0x90, 0xb4, 0xba, 0xe5, 0xc5, 0xdc, + 0xc6, 0x30, 0xfb, 0xaa, 0xa1, 0xcb, 0xea, 0x94, 0x76, 0x11, 0xd6, 0x26, 0xdd, 0x4e, 0xa8, 0x3f, + 0xe4, 0x0d, 0x4f, 0x33, 0xea, 0x42, 0xf6, 0x99, 0xcc, 0xce, 0xbe, 0xa3, 0xe1, 0x87, 0x9a, 0x02, + 0x70, 0xbc, 0x49, 0xfb, 0xca, 0xd7, 0x22, 0xa5, 0x47, 0x2c, 0x55, 0x7c, 0x49, 0x0d, 0x69, 0xae, + 0x99, 0x48, 0x25, 0xd3, 0xbb, 0xf5, 0x60, 0x40, 0x70, 0x9a, 0x11, 0xaf, 0xe5, 0x2d, 0xfc, 0x1f, + 0xcf, 0x01, 0x41, 0xe0, 0x8e, 0xd8, 0x1a, 0x2a, 0xb6, 0xd9, 0x7b, 0xc4, 0xb0, 0xf3, 0xd2, 0xe4, + 0xe5, 0x71, 0xbe, 0x0d, 0x6d, 0x06, 0x94, 0xaf, 0x89, 0x37, 0xbc, 0x2e, 0x29, 0xb0, 0x1c, 0x32, + 0x5c, 0xb0, 0x9e, 0x89, 0xc4, 0x76, 0xd7, 0xca, 0x19, 0x5d, 0x95, 0x10, 0x1e, 0xfd, 0x8a, 0x98, + 0x98, 0xc8, 0x97, 0xcc, 0xcb, 0x8b, 0xe3, 0x7a, 0xf3, 0xde, 0x27, 0x55, 0x85, 0x17, 0xfc, 0x2a, + 0x66, 0xd0, 0x6f, 0xd8, 0x18, 0xe2, 0xff, 0x37, 0xff, 0xcc, 0x15, 0xdd, 0x40, 0x51, 0xf8, 0x73, + 0x34, 0x66, 0x6b, 0x9d, 0x21, 0x45, 0x51, 0x1f, 0xda, 0x14, 0xf9, 0xd1, 0x6a, 0x1a, 0x11, 0xfe, + 0x47, 0x8c, 0x2c, 0xca, 0xa7, 0xf8, 0xb9, 0x15, 0x53, 0xe1, 0xb0, 0x3f, 0x5a, 0x62, 0x60, 0x78, + 0xe3, 0x62, 0x05, 0x99, 0x20, 0xdd, 0x73, 0xf1, 0xa6, 0x4b, 0x5f, 0x9d, 0xf6, 0xa0, 0x2b, 0x10, + 0x6e, 0x93, 0xec, 0x87, 0x71, 0xa9, 0x12, 0x43, 0x76, 0x6b, 0xcf, 0x82, 0x11, 0x92, 0x8c, 0x7c, + 0x48, 0xdc, 0xd7, 0x78, 0xa4, 0x68, 0x9a, 0x36, 0x9a, 0xbb, 0xc8, 0x91, 0xd8, 0x4b, 0xad, 0xdd, + 0x5f, 0xba, 0x69, 0x1d, 0xd8, 0x5f, 0xa3, 0x5e, 0x71, 0x97, 0x02, 0x8e, 0xc8, 0x15, 0x8a, 0x4f, + 0x0b, 0x6d, 0x0b, 0xf6, 0x06, 0xc0, 0x44, 0x14, 0x0d, 0x9e, 0x32, 0x55, 0x2e, 0xf8, 0x0f, 0xe7, + 0x36, 0x54, 0x2a, 0x26, 0x80, 0x54, 0x10, 0x3b, 0x91, 0x10, 0xe8, 0x03, 0xe1, 0x21, 0x68, 0x59, + 0x57, 0x56, 0x00, 0x05, 0xb7, 0x64, 0xd2, 0x96, 0xa7, 0x3a, 0xec, 0xe9, 0xd7, 0x1d, 0x62, 0x36, + 0x67, 0x03, 0x4f, 0xf0, 0xcf, 0xf8, 0xcf, 0xc6, 0x28, 0xa4, 0x8f, 0x7a, 0x67, 0x09, 0xd0, 0x4d, + 0xe1, 0x2f, 0x42, 0x1f, 0x42, 0xe0, 0x3d, 0xa6, 0x24, 0xcf, 0x13, 0x33, 0x61, 0x25, 0x76, 0x0c, + 0x1a, 0xbf, 0xfd, 0x63, 0x65, 0x22, 0x74, 0x7f, 0x38, 0x45, 0xf3, 0x06, 0xb2, 0x52, 0x94, 0x35, + 0x5f, 0x7b, 0xd2, 0xab, 0xd8, 0x76, 0xfd, 0xfb, 0x6f, 0x42, 0xd0, 0xc6, 0xd1, 0x55, 0x37, 0xa8, + 0xfc, 0xb1, 0xc4, 0x82, 0xc6, 0x02, 0x58, 0x00, 0xc2, 0x4f, 0x47, 0x21, 0x1a, 0x55, 0x0a, 0x7b, + 0xc0, 0x8b, 0xfe, 0xb5, 0x65, 0xe6, 0xbe, 0x8f, 0xc9, 0x00, 0x03, 0x6a, 0x2b, 0xa0, 0x92, 0x1d, + 0x22, 0xf4, 0xa4, 0x60, 0x51, 0xf8, 0xab, 0xe2, 0xfc, 0xfd, 0x7d, 0x59, 0xb7, 0x71, 0x44, 0xa5, + 0xd6, 0x51, 0x0e, 0xc3, 0x8b, 0xbc, 0xbd, 0xd6, 0x49, 0x97, 0xc1, 0x09, 0x12, 0x16, 0x01, 0xf1, + 0x7e, 0x0c, 0x4d, 0x93, 0xc8, 0x42, 0x16, 0x25, 0x39, 0xdd, 0xc4, 0x80, 0xee, 0x1c, 0xca, 0xcb, + 0x1c, 0xbb, 0xa1, 0xec, 0xd3, 0xc8, 0x53, 0x29, 0x82, 0x5a, 0xe3, 0x0e, 0x9b, 0x2d, 0xde, 0xe4, + 0xc4, 0xf5, 0xe9, 0x9b, 0x5d, 0x0a, 0xd2, 0x37, 0x40, 0x9a, 0x1c, 0x3c, 0x39, 0xa7, 0x39, 0x4d, + 0x31, 0x1d, 0x69, 0x20, 0x7f, 0x55, 0x80, 0x6f, 0x31, 0xe7, 0xc8, 0xbc, 0x9b, 0xf5, 0xa6, 0x95, + 0x3f, 0x8b, 0xe3, 0xca, 0x8a, 0x90, 0x6f, 0xa8, 0xa0, 0xf2, 0xc3, 0xd7, 0x6a, 0x5c, 0xba, 0x85, + 0xf6, 0x40, 0x16, 0xaa, 0xb9, 0x72, 0x17, 0xd3, 0x7d, 0xb1, 0x74, 0x7d, 0xde, 0x7e, 0x19, 0x74, + 0x18, 0x6c, 0x09, 0xf7, 0x3d, 0x10, 0xb5, 0x65, 0xa3, 0x1f, 0xe0, 0x32, 0xb5, 0xcf, 0x6e, 0xf0, + 0xf9, 0x5f, 0xd3, 0xe0, 0x7b, 0x25, 0xd9, 0x95, 0x9e, 0x50, 0x23, 0x3c, 0x64, 0xec, 0x06, 0x1a, + 0x6e, 0xb8, 0x76, 0xa4, 0xc1, 0x03, 0xe0, 0xd6, 0xf0, 0xd2, 0x1f, 0xe1, 0x9c, 0x45, 0x66, 0x56, + 0x5b, 0x88, 0x4c, 0x3f, 0xeb, 0xc9, 0xfa, 0x05, 0xed, 0xc1, 0x70, 0xef, 0xcd, 0xce, 0xc4, 0x5b, + 0x17, 0x86, 0x50, 0xe1, 0x2a, 0x50, 0xf1, 0xf2, 0xc4, 0x22, 0x19, 0x57, 0xec, 0xb2, 0xd5, 0x9a, + 0x53, 0x3f, 0x0e, 0xbd, 0x9a, 0xf5, 0xa8, 0x70, 0x4a, 0x5d, 0x52, 0xff, 0x0a, 0xb1, 0xea, 0xc1, + 0xb6, 0x7c, 0x04, 0x40, 0xac, 0x25, 0x66, 0xb4, 0xe7, 0xaf, 0xd5, 0xe9, 0xa0, 0x61, 0x1e, 0x6d, + 0xa5, 0x8f, 0xf0, 0x38, 0x9f, 0x04, 0x01, 0x60, 0x0d, 0xa4, 0x18, 0x64, 0xaf, 0x28, 0x03, 0xce, + 0x0d, 0x32, 0x23, 0x71, 0xbf, 0x9e, 0x62, 0xd3, 0x73, 0xc9, 0x69, 0xe5, 0xd2, 0x51, 0xee, 0x12, + 0xac, 0xf3, 0x91, 0x5e, 0x40, 0x51, 0xba, 0x1f, 0x9f, 0x67, 0x77, 0xce, 0xe6, 0xab, 0xf3, 0x96, + 0x50, 0x5d, 0x44, 0x0d, 0xf0, 0x3b, 0x11, 0x3a, 0x29, 0xef, 0xd3, 0x0d, 0xa8, 0x6e, 0x39, 0xbf, + 0x48, 0x44, 0x32, 0xc0, 0xbb, 0xab, 0xca, 0x0c, 0xb7, 0x66, 0x9a, 0x77, 0x02, 0xff, 0x85, 0x00, + 0x8e, 0xad, 0x08, 0xc9, 0xbc, 0xb0, 0xe2, 0x5a, 0xb8, 0xd9, 0xb6, 0x65, 0x1d, 0x81, 0x57, 0xcc, + 0x91, 0x91, 0x47, 0x67, 0x80, 0x3e, 0x0f, 0x4f, 0x6a, 0xb2, 0xf8, 0xb5, 0xdc, 0x95, 0xa3, 0x23, + 0x23, 0xca, 0xb8, 0x4e, 0x69, 0xe9, 0xe4, 0xf7, 0x06, 0xe6, 0x4d, 0x49, 0x86, 0x01, 0xc4, 0xa4, + 0x17, 0x44, 0x46, 0xa8, 0xe4, 0x33, 0x76, 0x6f, 0xa9, 0xb4, 0x73, 0x14, 0x25, 0x07, 0x9d, 0x06, + 0xb1, 0xb0, 0x17, 0x76, 0x64, 0xda, 0x58, 0xe0, 0xcf, 0xad, 0xba, 0xb5, 0x89, 0xf4, 0x2e, 0xd0, + 0x2c, 0x6f, 0x9d, 0x5b, 0x73, 0xab, 0x91, 0xa9, 0x33, 0xe4, 0x15, 0x3f, 0xdd, 0xff, 0xa6, 0x5e, + 0x3a, 0xbc, 0x0d, 0x00, 0x51, 0x89, 0x1e, 0x63, 0x4c, 0xf3, 0x75, 0x0e, 0xbc, 0x29, 0x66, 0x11, + 0x9b, 0x30, 0x72, 0x63, 0x22, 0xfc, 0x5d, 0x2f, 0xf0, 0x6f, 0xb5, 0xfa, 0xc4, 0xe2, 0x86, 0xab, + 0x2d, 0xf3, 0x52, 0x3f, 0x98, 0xc1, 0xe4, 0xde, 0x59, 0x00, 0x50, 0x2a, 0x3b, 0x6a, 0x09, 0x24, + 0xf6, 0x29, 0x35, 0xfd, 0x66, 0xd8, 0x46, 0x9b, 0xd9, 0xf5, 0xb4, 0x35, 0x88, 0xce, 0xd5, 0x8e, + 0x8b, 0xca, 0x36, 0x48, 0x80, 0xdd, 0xbb, 0xe2, 0xd1, 0x1d, 0x73, 0x55, 0xd4, 0xc1, 0x09, 0x21, + 0xee, 0xd5, 0xf2, 0xcb, 0x4c, 0x11, 0xd4, 0x3d, 0xe2, 0x66, 0x4c, 0x4f, 0x4e, 0xa4, 0x98, 0xa6, + 0xc7, 0xcc, 0x0a, 0x60, 0x85, 0xa0, 0xd4, 0xfa, 0xe5, 0xcb, 0x0d, 0x3d, 0x95, 0x02, 0x2d, 0x47, + 0x32, 0x1a, 0x01, 0x19, 0x64, 0x6e, 0x95, 0x21, 0x04, 0x8d, 0xb6, 0x03, 0xcc, 0x4f, 0xeb, 0xdc, + 0xcf, 0xf6, 0x85, 0x95, 0x5f, 0xc3, 0xc4, 0xbe, 0x13, 0xc4, 0x7d, 0xa9, 0xfa, 0x7a, 0xc4, 0xe9, + 0x49, 0x53, 0xd7, 0xb9, 0x57, 0xce, 0x23, 0xa8, 0x8b, 0xc8, 0x45, 0x21, 0x67, 0x99, 0x94, 0x07, + 0x12, 0x54, 0x5c, 0x5d, 0x3b, 0x52, 0x05, 0x14, 0x5d, 0xf9, 0x13, 0xd9, 0xe9, 0x6f, 0xc3, 0x46, + 0xfd, 0xd7, 0xa2, 0x4a, 0xce, 0x80, 0x38, 0xc2, 0xb2, 0x39, 0x62, 0x0d, 0x1f, 0xe1, 0xfe, 0x59, + 0x5b, 0x4c, 0x03, 0x1b, 0x54, 0xe9, 0x1e, 0x29, 0x93, 0x9c, 0xba, 0xde, 0x17, 0x14, 0xb9, 0xe0, + 0x6b, 0x1a, 0xdc, 0x69, 0x14, 0x9b, 0x9f, 0x3a, 0x86, 0x8c, 0x57, 0x40, 0xbd, 0xd5, 0xd1, 0x14, + 0x2d, 0x44, 0x38, 0x8f, 0x5d, 0xd5, 0x43, 0x81, 0x43, 0x95, 0xe5, 0x92, 0xc6, 0x6f, 0x20, 0x43, + 0xc0, 0xa3, 0xc2, 0x35, 0x3b, 0x2f, 0x71, 0xa0, 0x9c, 0x2d, 0xb2, 0x96, 0xc4, 0x9f, 0xd1, 0x4a, + 0x28, 0xc9, 0xd8, 0xe6, 0x46, 0xa6, 0xec, 0x84, 0xe9, 0xe0, 0xc3, 0x6f, 0x37, 0x14, 0x3c, 0x8f, + 0x4c, 0x67, 0xb8, 0x8b, 0xed, 0xb9, 0x97, 0xa1, 0x06, 0xf1, 0x0c, 0x3e, 0xf1, 0x4c, 0x4c, 0xcf, + 0x80, 0x80, 0xcd, 0x4e, 0x9d, 0x66, 0x01, 0x0b, 0xb8, 0x0b, 0x83, 0xed, 0xf9, 0x5e, 0x2e, 0x18, + 0x5a, 0x9c, 0x0c, 0x43, 0xd1, 0x9a, 0x96, 0xe9, 0x31, 0x36, 0x50, 0xe1, 0x1c, 0x8e, 0x6c, 0x28, + 0x4c, 0x3e, 0x64, 0xec, 0xa6, 0x57, 0x3e, 0xa5, 0x46, 0x23, 0x82, 0x2e, 0x41, 0xc9, 0xb6, 0x7c, + 0x51, 0x1b, 0x2e, 0xfd, 0x40, 0x46, 0x5a, 0x51, 0x78, 0xb3, 0x34, 0xf1, 0x3e, 0xb2, 0x6e, 0xc2, + 0x73, 0x51, 0x5a, 0xf4, 0xf7, 0x39, 0x78, 0xec, 0x8d, 0xd4, 0xfc, 0xf3, 0x50, 0xa7, 0xdf, 0xd5, + 0x3c, 0x52, 0x03, 0x67, 0xd9, 0x18, 0x43, 0x05, 0x48, 0xec, 0x14, 0xee, 0x0e, 0xee, 0x9b, 0x2d, + 0xa5, 0x1d, 0x60, 0xf2, 0xf7, 0xd0, 0x4a, 0xa1, 0x0e, 0xe4, 0x68, 0x87, 0xeb, 0xce, 0x7d, 0xbb, + 0x90, 0x0b, 0xa4, 0x50, 0x64, 0x48, 0x11, 0x6e, 0x7e, 0x2f, 0xae, 0x31, 0x33, 0x1a, 0x41, 0x18, + 0xbc, 0x8f, 0x0f, 0x30, 0x6a, 0x31, 0xc1, 0x9a, 0xde, 0x4f, 0x13, 0xd0, 0x84, 0x79, 0xb2, 0xf3, + 0x07, 0x43, 0x7a, 0xc6, 0x9e, 0x40, 0x40, 0xd6, 0xec, 0x30, 0x5c, 0x86, 0xe3, 0x58, 0xf3, 0x8b, + 0x35, 0x23, 0x6b, 0xe9, 0x15, 0x03, 0x11, 0x4e, 0x94, 0x22, 0xa8, 0x4d, 0x75, 0x58, 0xc8, 0xb0, + 0xe4, 0x32, 0xf0, 0xed, 0x35, 0x13, 0xb6, 0x09, 0x5b, 0xae, 0x0b, 0x69, 0x39, 0x4f, 0x9f, 0x82, + 0xbb, 0x74, 0x6d, 0xd2, 0xca, 0x18, 0x8d, 0x01, 0xce, 0xa6, 0x5a, 0x37, 0x18, 0x61, 0xdb, 0x61, + 0xa7, 0x2b, 0x95, 0x26, 0xe9, 0x50, 0x3d, 0xe3, 0x1b, 0xcb, 0x59, 0xa4, 0xc1, 0xb1, 0xc7, 0xba, + 0x97, 0x17, 0x54, 0xb3, 0xc4, 0x15, 0x25, 0x2b, 0xd8, 0xa0, 0xb0, 0x1f, 0x20, 0xda, 0x8d, 0x8d, + 0x4f, 0xac, 0x9c, 0xde, 0x56, 0x11, 0xd7, 0xdd, 0x3b, 0xc4, 0xee, 0xde, 0x2f, 0x6e, 0x77, 0x26, + 0xe8, 0xfb, 0xfa, 0x4e, 0xd7, 0x89, 0x0a, 0x15, 0x66, 0xa1, 0x68, 0x05, 0x3b, 0x6c, 0xfd, 0x19, + 0x1b, 0xdc, 0xda, 0x59, 0x14, 0xdc, 0x6b, 0xd0, 0x1e, 0xe1, 0xbc, 0xef, 0x72, 0xba, 0x09, 0xd3, + 0x62, 0x98, 0x68, 0xf6, 0xdc, 0xdc, 0x0a, 0xcd, 0x43, 0x48, 0x65, 0x1c, 0xd2, 0x12, 0xda, 0x62, + 0x31, 0xe2, 0x46, 0x6d, 0x00, 0x65, 0xd9, 0x6b, 0x61, 0xaf, 0x8d, 0x5c, 0xa0, 0x60, 0x84, 0x74, + 0x6c, 0x2a, 0x44, 0x7f, 0x1e, 0x27, 0x81, 0x89, 0xec, 0xa3, 0xb8, 0x67, 0x41, 0xe7, 0xca, 0x14, + 0x1d, 0xfe, 0x6a, 0x36, 0x96, 0x38, 0x11, 0x15, 0x02, 0x38, 0xdc, 0xba, 0xe8, 0x4e, 0x66, 0x99, + 0x51, 0xf7, 0x04, 0x37, 0x10, 0xd6, 0x0f, 0x48, 0x51, 0x07, 0xc9, 0x02, 0xda, 0x94, 0x27, 0x32, + 0xe2, 0x4b, 0x8c, 0x69, 0x3a, 0xe0, 0x28, 0x13, 0x02, 0x8e, 0x0a, 0x04, 0x39, 0x2e, 0x07, 0x32, + 0x06, 0x9f, 0x6e, 0xdd, 0xe7, 0x2a, 0x66, 0xe9, 0xd8, 0x0a, 0x86, 0x24, 0xf9, 0xeb, 0x8b, 0x60, + 0x51, 0xc2, 0x07, 0x99, 0x6b, 0xfe, 0xed, 0x28, 0x23, 0x6a, 0x42, 0xa6, 0x18, 0xf7, 0x17, 0xa1, + 0x5e, 0xec, 0xc3, 0xf4, 0xa1, 0x7a, 0x3f, 0xeb, 0xdf, 0xe4, 0xc7, 0x3c, 0xd5, 0xbc, 0xba, 0xb2, + 0xa5, 0xa5, 0xd4, 0x5d, 0x69, 0x27, 0x61, 0xb8, 0xd0, 0x93, 0x1e, 0x76, 0x0b, 0xeb, 0x41, 0xea, + 0xd8, 0x45, 0x5b, 0xc8, 0x15, 0x59, 0x90, 0xe5, 0x32, 0x8d, 0xc8, 0xdb, 0x08, 0x22, 0xc1, 0x43, + 0xa2, 0x24, 0x4b, 0x04, 0x8a, 0xd2, 0x9b, 0xeb, 0x08, 0x7b, 0x52, 0xcd, 0xde, 0xad, 0xe9, 0x56, + 0x5a, 0x42, 0xda, 0xfc, 0x57, 0x6d, 0x9b, 0xfb, 0x0b, 0x10, 0x0f, 0xeb, 0x5f, 0xd9, 0x17, 0xb1, + 0x7e, 0xba, 0xd1, 0x3d, 0x83, 0xed, 0x90, 0x8f, 0x8b, 0xb2, 0xd2, 0x53, 0x1c, 0xc7, 0xc0, 0xb1, + 0xd8, 0x27, 0x55, 0x02, 0xb1, 0x2e, 0x51, 0x99, 0x34, 0x3e, 0x56, 0x17, 0x05, 0xf6, 0x20, 0x65, + 0x45, 0x6a, 0x74, 0x26, 0x77, 0x63, 0xa3, 0x00, 0xce, 0x54, 0x41, 0x32, 0x63, 0xb1, 0xd7, 0x55, + 0x09, 0x20, 0x53, 0xb2, 0x02, 0x61, 0xec, 0x43, 0x3c, 0x73, 0x44, 0x47, 0x10, 0xcc, 0xcd, 0x91, + 0xae, 0xd9, 0xd3, 0x63, 0x6c, 0x78, 0x2a, 0xf8, 0x90, 0x89, 0xf9, 0xcb, 0x83, 0xb3, 0xcb, 0xdb, + 0x00, 0xa0, 0xb4, 0x94, 0x26, 0xfb, 0xee, 0x38, 0x99, 0x6b, 0xcf, 0xdc, 0x80, 0xdd, 0xbf, 0x0e, + 0x53, 0xb1, 0xaa, 0xc0, 0x02, 0x1a, 0xe2, 0x15, 0xc3, 0x89, 0x6b, 0x77, 0xca, 0xa5, 0x14, 0x57, + 0x68, 0xc4, 0x7e, 0x52, 0xfa, 0xa9, 0xc9, 0x81, 0xd8, 0x5f, 0xfc, 0x9c, 0x28, 0x49, 0x7e, 0x36, + 0xc9, 0x9d, 0xe9, 0xfc, 0x97, 0x82, 0x7b, 0x79, 0x11, 0xc2, 0xe7, 0x23, 0x7e, 0x77, 0x24, 0x07, + 0xa0, 0xef, 0x92, 0x93, 0xab, 0x13, 0x3b, 0x47, 0x6f, 0xd8, 0xc1, 0xab, 0x06, 0xee, 0x4e, 0xc8, + 0x98, 0xb5, 0x56, 0xd6, 0xac, 0x40, 0xe0, 0x41, 0x73, 0x75, 0x11, 0x56, 0x59, 0x0d, 0xad, 0x57, + 0x6a, 0xd7, 0x97, 0xeb, 0x88, 0x21, 0xa7, 0xed, 0x2b, 0xcc, 0x91, 0x83, 0x43, 0xb3, 0xef, 0x33, + 0x85, 0x98, 0x6c, 0xaa, 0xcd, 0xcc, 0x12, 0x9d, 0x78, 0x8a, 0x84, 0x2f, 0x18, 0xb5, 0x48, 0x99, + 0x62, 0xe8, 0xce, 0x33, 0xe0, 0x90, 0x27, 0x4e, 0xb5, 0x9b, 0xbb, 0x38, 0xce, 0x6a, 0x09, 0x5b, + 0x95, 0x89, 0x8d, 0x22, 0x1c, 0x61, 0xe0, 0x87, 0xf5, 0xe3, 0x4e, 0x2e, 0x43, 0xf2, 0x67, 0x40, + 0xf9, 0x1e, 0x98, 0x47, 0x70, 0xa8, 0x1b, 0x4f, 0xed, 0xdb, 0x64, 0x99, 0x15, 0x1f, 0xa0, 0x5b, + 0x15, 0x06, 0x6f, 0x5c, 0xa7, 0x5a, 0x13, 0xcf, 0x98, 0x22, 0x02, 0x48, 0x65, 0xb7, 0xc2, 0x3d, + 0x85, 0x9e, 0x98, 0xe1, 0xf7, 0x05, 0xc1, 0x38, 0x60, 0x08, 0x84, 0xaf, 0x61, 0x78, 0xb7, 0x60, + 0x0e, 0x9d, 0xe5, 0xb2, 0x0a, 0xef, 0x7b, 0x66, 0x8d, 0xfd, 0x15, 0x80, 0x29, 0x18, 0x96, 0x02, + 0x89, 0x26, 0x57, 0xe1, 0x75, 0xbc, 0x6a, 0x68, 0xa0, 0xd8, 0x4c, 0x4e, 0xba, 0x05, 0x57, 0x8d, + 0x7e, 0x2b, 0xc2, 0x40, 0x67, 0xbe, 0xe4, 0xc7, 0x2a, 0x5b, 0x92, 0xcc, 0x6a, 0xd5, 0xb4, 0xbb, + 0xea, 0x0a, 0x13, 0x02, 0xd4, 0xf5, 0xf3, 0x90, 0x88, 0xfb, 0x47, 0x94, 0xea, 0xd8, 0x05, 0xd1, + 0xfc, 0x62, 0xd3, 0xbb, 0xc0, 0xab, 0x51, 0xac, 0x46, 0x6b, 0x30, 0x17, 0xf2, 0x61, 0x9c, 0x8b, + 0xf8, 0x21, 0x7d, 0x2a, 0x43, 0x3e, 0x3a, 0x16, 0x56, 0x3b, 0x2c, 0xdb, 0x5d, 0xa8, 0xde, 0xab, + 0xe4, 0x42, 0xcf, 0xde, 0x94, 0xe0, 0xec, 0xa6, 0x74, 0xed, 0xd8, 0xd3, 0x8d, 0xf3, 0xea, 0x90, + 0x16, 0xf5, 0x3e, 0x97, 0xa7, 0x74, 0xb1, 0xa2, 0x7e, 0x2b, 0x54, 0xe7, 0xcf, 0xfe, 0xa3, 0xf7, + 0x76, 0x08, 0x95, 0x8e, 0xf1, 0xe8, 0x60, 0xc5, 0xb7, 0x0c, 0x42, 0x91, 0x9a, 0x75, 0xba, 0xec, + 0x9b, 0xa0, 0x81, 0xf6, 0xe8, 0xa0, 0xd5, 0x19, 0x42, 0x2c, 0x20, 0xed, 0x43, 0xcf, 0x39, 0x2e, + 0xad, 0xb6, 0x6a, 0x43, 0xe1, 0x83, 0x88, 0x74, 0x37, 0xf8, 0xc2, 0x80, 0x7f, 0xcf, 0xe6, 0x41, + 0x00, 0xe8, 0x7a, 0xfc, 0x70, 0x3b, 0xe4, 0x62, 0xb3, 0x7f, 0xed, 0x0f, 0xa3, 0xc6, 0x00, 0x9d, + 0xbd, 0xe2, 0x62, 0x21, 0xea, 0x5f, 0x29, 0x1c, 0x62, 0x79, 0xbc, 0x26, 0xe8, 0x1b, 0x50, 0x31, + 0xa6, 0x6a, 0x29, 0x4f, 0x6e, 0xd0, 0xf2, 0x05, 0xbb, 0xca, 0x21, 0xde, 0x66, 0x25, 0x1a, 0x04, + 0x52, 0xb6, 0xef, 0x1c, 0x9e, 0x57, 0x58, 0xf2, 0x96, 0xe1, 0x1e, 0x25, 0x57, 0xe4, 0x3c, 0xdf, + 0x60, 0xb2, 0x2c, 0x2a, 0x62, 0x43, 0x5b, 0x08, 0x06, 0x89, 0x77, 0xdb, 0x87, 0xd1, 0x0e, 0xac, + 0xbf, 0x18, 0x54, 0xad, 0x06, 0x02, 0x11, 0x65, 0x0a, 0x93, 0xa4, 0x9d, 0xc8, 0x16, 0xe4, 0xc7, + 0x0f, 0x44, 0x39, 0x94, 0x17, 0x77, 0xab, 0xcd, 0xa9, 0x83, 0x7b, 0xe0, 0xf0, 0xa4, 0x69, 0x27, + 0x00, 0xc6, 0x73, 0x91, 0x81, 0x18, 0x80, 0xff, 0xda, 0xcc, 0xeb, 0xeb, 0xef, 0x7d, 0xf6, 0x2a, + 0x19, 0xb9, 0xa4, 0x05, 0x89, 0x2f, 0x76, 0xe2, 0xf0, 0x0c, 0x98, 0x68, 0xe0, 0x4a, 0xf4, 0xc5, + 0xfb, 0x45, 0x98, 0xe7, 0xa6, 0x03, 0x4a, 0xe0, 0xab, 0x56, 0xac, 0x79, 0x6a, 0xf2, 0x19, 0x23, + 0x18, 0x3b, 0xa3, 0xba, 0x5b, 0x31, 0x56, 0x3c, 0xb7, 0xc1, 0x24, 0x6c, 0xc0, 0x03, 0xc3, 0xdb, + 0xa3, 0x77, 0x47, 0x7b, 0xfa, 0xd2, 0xa5, 0xbd, 0x5e, 0xb5, 0x28, 0x9b, 0xd5, 0x5c, 0xa5, 0xe0, + 0xe3, 0x39, 0xee, 0x06, 0x6a, 0x07, 0x92, 0x62, 0x0f, 0x59, 0x4a, 0x5a, 0xbb, 0x68, 0x7f, 0x47, + 0xa6, 0x97, 0x2a, 0x11, 0x73, 0xe0, 0xe6, 0x64, 0x52, 0x4f, 0x98, 0xce, 0x93, 0x14, 0x55, 0x4f, + 0x2f, 0x3c, 0x44, 0x74, 0x41, 0x0a, 0x2e, 0x9a, 0xfa, 0x3c, 0xd1, 0x80, 0xa0, 0xba, 0x2a, 0x88, + 0x47, 0x6a, 0x35, 0x52, 0xf0, 0x48, 0x4f, 0x20, 0x5c, 0xd4, 0xa4, 0x14, 0xfc, 0xf0, 0x41, 0xce, + 0x9f, 0x81, 0x2e, 0x93, 0x72, 0x63, 0x55, 0xbb, 0x82, 0x7e, 0xe1, 0xe7, 0x9f, 0xa2, 0x35, 0x94, + 0x0a, 0x6d, 0xb5, 0xfc, 0xdf, 0x2f, 0xec, 0xa2, 0x65, 0x97, 0x83, 0xf0, 0xf1, 0xce, 0xae, 0xb3, + 0xa9, 0xfa, 0x6c, 0x96, 0x2b, 0x33, 0x8b, 0x5f, 0x5b, 0xb3, 0x74, 0x8b, 0x73, 0x47, 0xb2, 0x7d, + 0xf3, 0xb8, 0x83, 0x0b, 0x32, 0x45, 0xe2, 0x51, 0xfe, 0xae, 0x42, 0x08, 0xef, 0xfe, 0xdd, 0x62, + 0xdb, 0x32, 0x24, 0xca, 0x25, 0x38, 0x4f, 0x65, 0xf5, 0x8e, 0x75, 0x64, 0x9a, 0xf6, 0x10, 0xa9, + 0xbb, 0x52, 0xcc, 0x45, 0x6e, 0x4e, 0xab, 0xad, 0xed, 0x98, 0x0e, 0x80, 0xb8, 0x11, 0x6b, 0x7c, + 0xda, 0xc6, 0x64, 0x1a, 0x16, 0xb3, 0x20, 0x50, 0x1a, 0xfb, 0xc9, 0xc0, 0x2e, 0x5a, 0x82, 0x33, + 0xab, 0xfd, 0x6d, 0xbd, 0xf9, 0xf7, 0x53, 0x57, 0x5f, 0x4d, 0xfa, 0x72, 0xb7, 0x79, 0xb0, 0xd9, + 0xd6, 0xc6, 0xc4, 0xa5, 0x79, 0x79, 0x9a, 0xf1, 0xaf, 0x5d, 0xa2, 0x9c, 0xdd, 0x45, 0x0e, 0x77, + 0x64, 0x8a, 0x2d, 0xa7, 0xc6, 0x51, 0x2c, 0xd8, 0x2f, 0x7d, 0x40, 0xc0, 0x20, 0x6c, 0x4f, 0x31, + 0xda, 0xa9, 0x16, 0xc7, 0x27, 0x75, 0x3c, 0xa9, 0x6b, 0x74, 0xb6, 0xf9, 0xe2, 0xf4, 0xc0, 0x7c, + 0x6d, 0xd8, 0x66, 0x21, 0x00, 0x6c, 0xbc, 0x3f, 0xb7, 0x04, 0x4d, 0x1e, 0x7f, 0x19, 0xd7, 0x64, + 0x4c, 0xe5, 0xb9, 0x4f, 0x0b, 0xb3, 0xff, 0xea, 0x4a, 0xc5, 0xcd, 0xe8, 0xea, 0x72, 0x99, 0xd3, + 0x88, 0xa3, 0x58, 0xfa, 0xfc, 0xa9, 0x02, 0xcd, 0x8e, 0xe3, 0x2c, 0x7e, 0xb7, 0x14, 0xde, 0xd6, + 0x3b, 0x0e, 0x84, 0x35, 0xd1, 0xd1, 0x65, 0x1a, 0xe4, 0x12, 0x22, 0xfe, 0xa0, 0x48, 0x66, 0x5b, + 0x74, 0x06, 0x3e, 0x7b, 0xd5, 0x91, 0x9e, 0x06, 0xc1, 0x8c, 0x07, 0x8c, 0xc9, 0xde, 0x7c, 0x3e, + 0x92, 0x49, 0xb2, 0xe5, 0xc9, 0x99, 0x36, 0xf4, 0xc5, 0x7a, 0xc2, 0x57, 0xf1, 0xef, 0x7c, 0xed, + 0x69, 0x4f, 0x8b, 0x2a, 0x19, 0x92, 0x8a, 0x0b, 0xa1, 0x15, 0x14, 0xd4, 0x8f, 0x59, 0x5c, 0xad, + 0x3c, 0xeb, 0x12, 0xcf, 0x55, 0x4e, 0x78, 0xca, 0x55, 0xc6, 0x8a, 0x94, 0x14, 0x5c, 0xcc, 0x9e, + 0x4a, 0x51, 0x88, 0x9c, 0x64, 0xae, 0xf0, 0x9f, 0xc5, 0xe2, 0xbf, 0xe5, 0x23, 0xab, 0xd4, 0x7e, + 0x80, 0x32, 0x12, 0x15, 0x84, 0xf6, 0x5a, 0xfc, 0x7b, 0xf9, 0x27, 0x2e, 0xdc, 0x97, 0xcf, 0x63, + 0x69, 0x86, 0x37, 0x77, 0xc6, 0x71, 0x35, 0xa1, 0x06, 0xea, 0xeb, 0xd7, 0x79, 0x8a, 0x5b, 0xc1, + 0x29, 0x82, 0x46, 0xb0, 0x3a, 0x20, 0x11, 0x72, 0xfa, 0x62, 0x1b, 0x66, 0x4c, 0xb3, 0x30, 0x22, + 0x65, 0x1a, 0x75, 0xf8, 0xe9, 0xbe, 0xef, 0xa1, 0xd1, 0x20, 0x0b, 0x40, 0xc8, 0x3b, 0x6c, 0x9c, + 0xd0, 0x87, 0x48, 0xb9, 0x07, 0x39, 0x5b, 0x1f, 0xd7, 0x10, 0x54, 0x16, 0xd8, 0xfb, 0xd0, 0xe5, + 0x04, 0x9d, 0x76, 0x5d, 0xbb, 0xd0, 0x7f, 0xb0, 0x6e, 0xbd, 0x31, 0xa2, 0x7b, 0x3d, 0x88, 0x7b, + 0x9a, 0xac, 0xcc, 0x45, 0x23, 0x68, 0xaf, 0x42, 0xae, 0x72, 0xad, 0x28, 0xa8, 0x91, 0xa2, 0x4c, + 0x2c, 0x95, 0x37, 0x04, 0x80, 0x51, 0x2c, 0x56, 0xb9, 0x2c, 0xc3, 0x50, 0x9e, 0xa4, 0x82, 0x41, + 0x17, 0x7c, 0x0b, 0x18, 0x5e, 0x7e, 0xf4, 0xa5, 0x3f, 0x26, 0xe8, 0xe4, 0x8f, 0x63, 0xa7, 0xa0, + 0x63, 0x64, 0xca, 0xce, 0x85, 0x93, 0x07, 0x57, 0x9e, 0xb3, 0x48, 0x32, 0x67, 0x0d, 0xea, 0x58, + 0x82, 0xc3, 0x1a, 0xd3, 0x66, 0xd7, 0xae, 0x0c, 0xfb, 0x91, 0x12, 0x8b, 0x96, 0x2c, 0xc8, 0x42, + 0x2d, 0xee, 0x8a, 0xc7, 0x00, 0x32, 0x6b, 0x06, 0xfd, 0x15, 0xc1, 0x5b, 0x05, 0xa1, 0xd5, 0x4b, + 0xda, 0x9f, 0x37, 0xda, 0x84, 0xdc, 0xf6, 0xf5, 0x48, 0xc2, 0x9c, 0x42, 0xbe, 0x1e, 0xc5, 0xf6, + 0xab, 0x79, 0x76, 0x5c, 0x4c, 0x72, 0x89, 0x67, 0x54, 0x75, 0x17, 0x3d, 0x36, 0x53, 0x9e, 0x98, + 0x93, 0x8d, 0x5a, 0x15, 0xb3, 0xaa, 0x65, 0x14, 0xb0, 0x60, 0x1e, 0xd8, 0xcb, 0xd5, 0x79, 0x66, + 0x65, 0xc0, 0x44, 0x13, 0xae, 0x75, 0x5c, 0xf4, 0x68, 0x41, 0x0b, 0x6c, 0x30, 0x47, 0xbf, 0x37, + 0x3f, 0x56, 0x38, 0x8a, 0x5c, 0x67, 0xd7, 0x7a, 0x95, 0xb2, 0xac, 0xfb, 0xc3, 0x70, 0x8e, 0x90, + 0x4e, 0x0f, 0xd7, 0xec, 0xc0, 0x38, 0x74, 0x79, 0x99, 0x2e, 0xa0, 0x9d, 0xb2, 0xe5, 0xb4, 0xef, + 0x85, 0x2f, 0x20, 0x8b, 0xd4, 0x97, 0x75, 0x47, 0xcf, 0xbb, 0x0b, 0x66, 0x14, 0x28, 0x0d, 0x37, + 0x3d, 0x51, 0x27, 0xce, 0x09, 0xd8, 0x3f, 0x0d, 0x0b, 0x5c, 0x52, 0xda, 0x18, 0x5a, 0x4c, 0x23, + 0x4d, 0x9e, 0x71, 0xaf, 0xb5, 0x6a, 0xa1, 0x9b, 0xa5, 0x96, 0x85, 0x6c, 0x1b, 0xce, 0xa0, 0xad, + 0x6f, 0xdd, 0xde, 0x60, 0xd3, 0x7f, 0x5d, 0x98, 0x1a, 0x34, 0xad, 0x35, 0x13, 0x9e, 0x4f, 0x68, + 0xf4, 0x00, 0xb9, 0x7e, 0x8a, 0x66, 0x02, 0xda, 0x18, 0x6c, 0x18, 0x42, 0x3e, 0xd4, 0x49, 0x0f, + 0x2e, 0x36, 0xfd, 0x99, 0x30, 0x6a, 0x63, 0xcb, 0xa1, 0x81, 0x1e, 0x04, 0x9d, 0x08, 0xc4, 0x43, + 0x91, 0xf6, 0x5a, 0xbc, 0x1a, 0x95, 0xf2, 0xb8, 0x59, 0x52, 0x8d, 0x06, 0x9c, 0xad, 0x59, 0x50, + 0xfd, 0xf6, 0xe9, 0x82, 0x48, 0x94, 0x55, 0xb7, 0xe1, 0xd0, 0xc1, 0x05, 0xe2, 0xb4, 0x86, 0xd8, + 0x98, 0xe3, 0x03, 0x4a, 0xb2, 0x62, 0xa6, 0xe0, 0xdc, 0x93, 0x84, 0x2e, 0x1b, 0xc9, 0x5b, 0x33, + 0x39, 0xf6, 0xc1, 0xd0, 0x88, 0xb5, 0x9f, 0xb3, 0xec, 0x8a, 0x1c, 0x31, 0xcc, 0xc9, 0x32, 0xe9, + 0xd9, 0xde, 0xbf, 0xa0, 0xe1, 0xc9, 0xd9, 0x37, 0x1c, 0x7a, 0xec, 0x03, 0xf2, 0x5e, 0x87, 0x2c, + 0x48, 0xa3, 0x8e, 0xf9, 0xb7, 0xdf, 0x33, 0xd8, 0x96, 0xcc, 0x8f, 0xa5, 0x34, 0x15, 0xd8, 0xbc, + 0x6d, 0x7b, 0x1a, 0x46, 0x40, 0x9e, 0x84, 0xb6, 0x2e, 0xe3, 0x5c, 0x16, 0x15, 0xfa, 0x31, 0xb2, + 0x89, 0xdf, 0x75, 0x9a, 0x86, 0xb2, 0x7d, 0x8d, 0xd2, 0x18, 0x7f, 0x83, 0xf1, 0x45, 0x09, 0xc0, + 0x72, 0xdf, 0x56, 0xa0, 0xa4, 0xf1, 0x14, 0x1d, 0x0d, 0x88, 0x8f, 0xc5, 0xa8, 0x4a, 0xfd, 0x6e, + 0xd5, 0xe4, 0x65, 0xb1, 0x89, 0xfd, 0xda, 0x5b, 0x3a, 0x2b, 0x6d, 0x1e, 0x84, 0x1c, 0xd0, 0xb7, + 0xd8, 0xa0, 0xdf, 0x69, 0x3f, 0x00, 0xa6, 0x45, 0x07, 0x09, 0x1e, 0x22, 0x51, 0xfe, 0x94, 0x0f, + 0x6e, 0x18, 0x1d, 0x7e, 0x48, 0xe0, 0x0b, 0x33, 0x61, 0x4c, 0xcd, 0x39, 0xa9, 0xf1, 0xe9, 0x35, + 0xd7, 0xfd, 0xb5, 0x9d, 0x18, 0xab, 0x3d, 0x25, 0x67, 0x97, 0xd9, 0x1b, 0x00, 0x7c, 0x65, 0xc9, + 0x19, 0x45, 0xae, 0x3b, 0xf9, 0x03, 0x31, 0x80, 0x2f, 0x57, 0x7b, 0x35, 0x9c, 0x68, 0x0c, 0x29, + 0xb3, 0x60, 0x99, 0x8c, 0x1d, 0x39, 0x50, 0x02, 0xb5, 0x63, 0x64, 0x8e, 0x40, 0x56, 0xac, 0x9c, + 0x17, 0x5a, 0x78, 0xca, 0x60, 0x67, 0xf1, 0xca, 0xc2, 0x59, 0x5c, 0xc9, 0x21, 0x16, 0xd3, 0x4b, + 0xe4, 0xf7, 0x53, 0x1e, 0x1c, 0x8d, 0xbc, 0x86, 0x35, 0xea, 0xfe, 0xc1, 0x62, 0x69, 0xd1, 0x6d, + 0x01, 0xb9, 0xc4, 0x7b, 0xc4, 0xea, 0x81, 0xc7, 0xe3, 0x68, 0xaf, 0x07, 0xfb, 0xc2, 0x4d, 0x80, + 0xe4, 0x05, 0x66, 0x56, 0x1e, 0x36, 0x7d, 0xd0, 0x27, 0x57, 0x71, 0x57, 0x6c, 0x0f, 0x87, 0xe4, + 0x46, 0x59, 0x29, 0x98, 0x3c, 0xb0, 0x53, 0x6f, 0xab, 0xa2, 0xb7, 0xa8, 0xcc, 0x9a, 0x7e, 0x75, + 0xe6, 0x95, 0x3d, 0x07, 0xbb, 0x5c, 0xbe, 0x3d, 0xb9, 0x3f, 0x8d, 0x3a, 0x41, 0x4a, 0x6f, 0x74, + 0x14, 0xc6, 0xaa, 0x48, 0x20, 0x12, 0x47, 0xc1, 0x92, 0x97, 0x7d, 0x43, 0x53, 0x59, 0xf4, 0x2a, + 0x50, 0xe3, 0xc6, 0x9e, 0x5f, 0x9a, 0xc7, 0xaf, 0xf2, 0xa4, 0xee, 0x34, 0xc4, 0x67, 0x22, 0x2c, + 0x26, 0x12, 0x6e, 0xb9, 0x1b, 0x6e, 0xab, 0x62, 0xab, 0xd9, 0x12, 0x1c, 0x4f, 0x13, 0x5d, 0x4b, + 0x06, 0x05, 0xfe, 0x3a, 0xea, 0xd9, 0x52, 0xbf, 0x87, 0xba, 0x7a, 0x87, 0x52, 0x7f, 0x80, 0x0a, + 0x10, 0x65, 0x2d, 0x03, 0x4b, 0xaf, 0x63, 0xb0, 0x8a, 0xef, 0x57, 0x90, 0xef, 0xcc, 0xdd, 0x51, + 0x0a, 0x67, 0x31, 0x83, 0x95, 0x97, 0xdc, 0x43, 0x1e, 0x00, 0x13, 0x22, 0x2d, 0x60, 0xcd, 0x1c, + 0x7c, 0xff, 0x67, 0x90, 0x5c, 0xa2, 0xcb, 0x40, 0x15, 0xdf, 0x15, 0xfb, 0xaa, 0xca, 0x4a, 0x54, + 0xa7, 0xe0, 0x0c, 0x4a, 0xe6, 0xe7, 0x47, 0x13, 0x52, 0xd8, 0x3c, 0xcf, 0x42, 0xdc, 0x17, 0x58, + 0xe3, 0x79, 0xb8, 0xe6, 0x5b, 0xe3, 0xac, 0xd9, 0x49, 0xa5, 0x5f, 0xa4, 0x90, 0xe2, 0x6b, 0x27, + 0x14, 0x6d, 0x05, 0xe1, 0x02, 0x67, 0x9c, 0xf1, 0x4f, 0xc7, 0xe7, 0xe6, 0x10, 0x60, 0x41, 0x97, + 0xec, 0xad, 0xe2, 0x28, 0x15, 0xe9, 0x64, 0xb2, 0xe1, 0x28, 0x81, 0xdf, 0x5d, 0x36, 0x24, 0x67, + 0x8e, 0x94, 0x0a, 0x46, 0x3d, 0x75, 0x86, 0x5a, 0x6d, 0x54, 0xe2, 0x81, 0xa3, 0xc8, 0x4a, 0xdc, + 0xed, 0xa2, 0xc6, 0xbd, 0x48, 0xa1, 0x4a, 0xfc, 0x77, 0x9e, 0x82, 0xb3, 0x83, 0xb3, 0x35, 0x9d, + 0x3a, 0xbd, 0xbb, 0xd6, 0x4d, 0xcf, 0xb5, 0x26, 0xaa, 0x32, 0xe7, 0x4b, 0x8a, 0x46, 0x94, 0x04, + 0x83, 0x00, 0xd5, 0x89, 0x5b, 0x19, 0x18, 0x69, 0x12, 0xa9, 0x53, 0x0d, 0x94, 0xf4, 0x7e, 0xe2, + 0x19, 0x28, 0x9c, 0xf0, 0x06, 0x30, 0xef, 0xcb, 0x14, 0xac, 0x41, 0xa9, 0x92, 0x78, 0xb6, 0x01, + 0xf1, 0x38, 0x3b, 0xd1, 0xa9, 0x43, 0x1c, 0x56, 0x4b, 0x6e, 0x43, 0x3e, 0x06, 0xfa, 0x02, 0x73, + 0xa2, 0xe7, 0xc0, 0x77, 0xad, 0x74, 0x13, 0xec, 0x49, 0xd3, 0x61, 0x9b, 0x79, 0x44, 0x3d, 0xac, + 0xdb, 0x9e, 0x0d, 0x4a, 0x85, 0x83, 0x9a, 0x89, 0xbf, 0xe5, 0x22, 0x98, 0xc7, 0x03, 0x0d, 0xf7, + 0x7c, 0x56, 0xf0, 0x21, 0x93, 0xb1, 0xcb, 0x4c, 0x20, 0x72, 0x1f, 0x7c, 0xc3, 0xb9, 0x1d, 0xe8, + 0x5f, 0x54, 0x50, 0x31, 0xd4, 0xe6, 0x38, 0x41, 0x8b, 0x79, 0xf4, 0xd4, 0x0d, 0xa8, 0xb3, 0x25, + 0x87, 0x64, 0x08, 0x11, 0x9f, 0x0e, 0x28, 0x9b, 0x9f, 0xb9, 0xf4, 0x19, 0x03, 0x40, 0x40, 0xbd, + 0xa3, 0x3d, 0x00, 0xeb, 0x8b, 0xba, 0x61, 0xce, 0x1e, 0x22, 0xbd, 0xb3, 0xb0, 0x7a, 0x6f, 0xc8, + 0x14, 0xd7, 0x53, 0x2d, 0xd6, 0xa0, 0x43, 0xc3, 0x52, 0x1b, 0x13, 0xf8, 0x74, 0x0d, 0x58, 0x0d, + 0x94, 0x7b, 0x24, 0x2a, 0x54, 0xb5, 0xe2, 0x70, 0xec, 0x21, 0x8b, 0x8a, 0x6e, 0xc0, 0x12, 0x18, + 0x9c, 0x1b, 0xc9, 0x11, 0xaf, 0xb2, 0x39, 0xca, 0xbc, 0xf8, 0x8a, 0x6f, 0x5b, 0x48, 0x17, 0x25, + 0x48, 0x37, 0x98, 0x0d, 0x41, 0x6c, 0x62, 0xd2, 0x67, 0x2f, 0x30, 0x9d, 0xee, 0x84, 0xde, 0xf3, + 0x9b, 0x51, 0x6c, 0x33, 0x4a, 0x74, 0x88, 0x3b, 0x4a, 0x2d, 0x32, 0x63, 0xe8, 0x84, 0xe1, 0x79, + 0xf2, 0x2c, 0xbd, 0xc8, 0xf3, 0xd9, 0xcf, 0x00, 0x08, 0x34, 0xc5, 0xfa, 0x73, 0xd0, 0xbf, 0x3b, + 0xe6, 0xc4, 0xa2, 0x45, 0x5e, 0xf7, 0xac, 0xfa, 0x4f, 0x79, 0x3f, 0xe1, 0xce, 0xa7, 0xdd, 0xdc, + 0xed, 0x53, 0xa8, 0x60, 0xca, 0xce, 0x6a, 0x7e, 0x0a, 0x12, 0x91, 0xfb, 0x8f, 0x76, 0x87, 0x28, + 0x89, 0x2d, 0x6a, 0x3a, 0x89, 0x7b, 0xb0, 0xf2, 0x87, 0xcc, 0x3f, 0x7b, 0xea, 0xc8, 0x93, 0xad, + 0x57, 0xe0, 0x4c, 0xfc, 0x3c, 0x70, 0x2f, 0x3e, 0xf8, 0xb2, 0x96, 0xc8, 0x29, 0x9c, 0xa1, 0xab, + 0x68, 0xc2, 0x2e, 0xa9, 0x1a, 0xc2, 0x00, 0x67, 0x4d, 0x8a, 0x1d, 0xd7, 0x4d, 0x7e, 0x47, 0xad, + 0x3e, 0x60, 0x4a, 0x17, 0x3a, 0xba, 0x8e, 0xb1, 0x18, 0xdf, 0xaf, 0xcb, 0x60, 0x76, 0xee, 0x66, + 0xd0, 0x7a, 0x61, 0x22, 0xe0, 0x13, 0xac, 0x5f, 0x2f, 0xb2, 0x79, 0xdb, 0xd9, 0x5e, 0xab, 0xbc, + 0xb3, 0x08, 0x79, 0x68, 0x49, 0xda, 0x55, 0x37, 0x1b, 0x2e, 0xcc, 0xaf, 0x75, 0x9d, 0x9f, 0x25, + 0x36, 0x4a, 0x6d, 0xfb, 0x9f, 0xa3, 0xca, 0x66, 0x2b, 0xb5, 0x7a, 0x09, 0xf3, 0xe4, 0xd7, 0x5a, + 0x59, 0x24, 0x87, 0xc0, 0x72, 0xe1, 0x35, 0xaf, 0xf5, 0x6b, 0x1e, 0x18, 0xd4, 0xf1, 0xf3, 0x43, + 0xa4, 0x73, 0x29, 0xc9, 0xd3, 0x7c, 0xf8, 0xc0, 0x8b, 0x25, 0xe7, 0xe5, 0xde, 0xfd, 0xed, 0xcd, + 0xcb, 0x6f, 0xae, 0x35, 0x63, 0x8d, 0x0a, 0x1f, 0xeb, 0xe4, 0xcc, 0xd5, 0x2c, 0x75, 0xaa, 0xe4, + 0xa7, 0x61, 0x40, 0xdd, 0x90, 0xfe, 0xd6, 0xe2, 0x10, 0x3b, 0xd6, 0x8c, 0x3c, 0x84, 0x87, 0xe2, + 0xf2, 0x7b, 0x59, 0xab, 0x41, 0x67, 0x6c, 0x7c, 0xca, 0xdd, 0x23, 0xf3, 0x22, 0x3b, 0x7c, 0x7e, + 0xc0, 0xe7, 0x6c, 0xa8, 0x20, 0x83, 0xf7, 0xea, 0x54, 0xaa, 0x17, 0x0a, 0xbd, 0xa7, 0x06, 0xef, + 0xe0, 0x46, 0xb9, 0x33, 0xd8, 0x1b, 0xca, 0x7d, 0xc7, 0x76, 0xe9, 0xa4, 0x4e, 0x53, 0xd6, 0xaf, + 0x53, 0x70, 0x62, 0xbd, 0xd2, 0x23, 0x61, 0x22, 0x48, 0x34, 0x05, 0x0d, 0x29, 0xf4, 0x70, 0x8d, + 0x02, 0x5d, 0x1e, 0x14, 0xc1, 0x9b, 0xd3, 0x5a, 0xe7, 0xf8, 0x69, 0xda, 0x7f, 0x95, 0x4f, 0x43, + 0x2b, 0x8f, 0x0a, 0x3e, 0xaf, 0x59, 0x8f, 0x1f, 0x8a, 0xc1, 0xf2, 0x14, 0x5c, 0xb7, 0xc9, 0x2f, + 0x01, 0x3b, 0x70, 0x85, 0xd8, 0x21, 0xd1, 0x6a, 0xc4, 0xc2, 0x7a, 0x9a, 0xdf, 0xb9, 0xec, 0xa5, + 0x3d, 0xd4, 0xee, 0xb0, 0xac, 0xb2, 0x50, 0xcb, 0xae, 0xe4, 0xf2, 0xf9, 0xcd, 0x78, 0xb8, 0x6b, + 0x56, 0x02, 0x25, 0xd2, 0x21, 0x1f, 0xd3, 0x6f, 0xa1, 0x91, 0xa5, 0x15, 0x96, 0xb1, 0x61, 0x4a, + 0xdb, 0xd3, 0x7d, 0xd7, 0xed, 0x1a, 0x74, 0xb1, 0x01, 0x54, 0xfa, 0xa5, 0xff, 0x1c, 0x25, 0xb6, + 0x72, 0x21, 0x21, 0x3f, 0x2b, 0xf9, 0xde, 0xea, 0x12, 0x08, 0xdb, 0x84, 0xa8, 0x74, 0xb1, 0x95, + 0xf1, 0x75, 0x21, 0x0f, 0x05, 0x0e, 0x77, 0x16, 0x06, 0x84, 0xc2, 0x19, 0xed, 0xac, 0x8b, 0x61, + 0x4c, 0x32, 0x1d, 0xae, 0x47, 0x9e, 0xb3, 0x06, 0xb5, 0xe7, 0x31, 0x5c, 0x76, 0xfe, 0x22, 0xc8, + 0x48, 0x75, 0x19, 0x06, 0x06, 0x20, 0x9c, 0x7b, 0x67, 0xc9, 0xc9, 0x4c, 0x0d, 0x4a, 0x30, 0xae, + 0xaf, 0xb1, 0x9b, 0xee, 0x60, 0x3e, 0x4f, 0x5a, 0x4e, 0xa4, 0xdd, 0x13, 0x2b, 0x70, 0xcc, 0x51, + 0xcc, 0x6f, 0xe1, 0x36, 0x0f, 0x21, 0x7c, 0x83, 0xc6, 0xc1, 0x96, 0x17, 0x34, 0xcd, 0xdd, 0x49, + 0x75, 0x39, 0x81, 0x1d, 0x9b, 0xb1, 0x03, 0xe5, 0x5c, 0x11, 0xaf, 0xc8, 0x9f, 0x2b, 0xdd, 0x60, + 0x7c, 0x70, 0x12, 0x9d, 0x34, 0xba, 0x37, 0xcd, 0xfb, 0xdd, 0xcc, 0xed, 0x8c, 0xba, 0xdb, 0x35, + 0x47, 0x71, 0x04, 0x6d, 0x3b, 0xea, 0x29, 0xa1, 0x2f, 0xe5, 0x8a, 0x9e, 0xc4, 0x8f, 0x35, 0x4b, + 0x8d, 0xdc, 0x51, 0xbb, 0xc5, 0x54, 0x8a, 0x60, 0x8e, 0x40, 0xf0, 0x2f, 0x5f, 0x96, 0x11, 0xf8, + 0xb9, 0x23, 0x11, 0xca, 0xa0, 0xea, 0x39, 0x59, 0xb9, 0x5c, 0x6f, 0x98, 0xe4, 0xbc, 0xd1, 0x06, + 0xd9, 0xe2, 0x69, 0xd7, 0x11, 0x63, 0x47, 0x4c, 0x04, 0x9d, 0xf1, 0xf2, 0xdf, 0x8b, 0x78, 0x74, + 0xb8, 0x65, 0xfe, 0xf2, 0x64, 0xce, 0xe0, 0xca, 0xd0, 0x40, 0xd1, 0x1e, 0xa8, 0xc2, 0x06, 0x35, + 0xe7, 0x0b, 0xe5, 0x91, 0x79, 0x73, 0x99, 0xf8, 0x27, 0xc8, 0xea, 0x77, 0x22, 0x0c, 0x17, 0xe4, + 0x04, 0xbc, 0x96, 0x05, 0xf1, 0xbf, 0x3a, 0x4c, 0xab, 0xaf, 0x25, 0x04, 0x24, 0xfd, 0xac, 0x8a, + 0xc7, 0x3e, 0x52, 0xa1, 0x6e, 0x5c, 0x74, 0x6a, 0xaf, 0xd3, 0x7f, 0x7d, 0x22, 0x88, 0xac, 0x15, + 0xec, 0xe8, 0x17, 0x1d, 0x1d, 0xab, 0xda, 0xbd, 0x32, 0x99, 0x10, 0xa1, 0x5c, 0x2f, 0x09, 0x69, + 0x10, 0x98, 0x95, 0xa6, 0xb7, 0x39, 0x60, 0xfd, 0xe4, 0x55, 0x89, 0x56, 0xa6, 0xa3, 0x01, 0xf2, + 0x49, 0xca, 0xeb, 0x0f, 0xbc, 0x4b, 0x18, 0xd6, 0xe1, 0xba, 0xd7, 0x7b, 0xfd, 0x62, 0x95, 0xda, + 0xe1, 0x8b, 0x6e, 0xd2, 0x7f, 0xed, 0xd3, 0x27, 0x3a, 0xf8, 0xdc, 0x95, 0xb2, 0xfb, 0xc2, 0x03, + 0x1c, 0xa8, 0x93, 0xb1, 0xae, 0xdb, 0x1f, 0x3a, 0xff, 0xf8, 0xc6, 0x05, 0x65, 0x28, 0xa1, 0x2c, + 0xd6, 0xa3, 0x51, 0xf7, 0x28, 0x7f, 0xb0, 0x60, 0x09, 0x1a, 0xd6, 0xb2, 0x83, 0x4c, 0x19, 0xb9, + 0xe0, 0x4e, 0x34, 0x55, 0xab, 0xa2, 0x82, 0x1e, 0xf3, 0x9e, 0xcc, 0x3e, 0xd9, 0xf7, 0x67, 0x72, + 0xc7, 0xeb, 0xe6, 0x4e, 0xf8, 0x9a, 0x1a, 0x4d, 0x95, 0xf8, 0x05, 0xfa, 0x9a, 0xa2, 0x3a, 0xb5, + 0x8c, 0x08, 0x35, 0xaa, 0xbd, 0x9b, 0x72, 0x95, 0xc4, 0xca, 0x98, 0xd1, 0x83, 0xa7, 0x91, 0x65, + 0x26, 0x6a, 0x40, 0xdb, 0x74, 0x50, 0xa6, 0x11, 0x2d, 0x29, 0xce, 0x05, 0x4c, 0x79, 0x74, 0x1c, + 0x67, 0xb7, 0xa6, 0x3f, 0xe1, 0xca, 0x00, 0xaf, 0xa5, 0x01, 0xb6, 0x6a, 0xfa, 0x7e, 0x23, 0x41, + 0x46, 0x06, 0x44, 0x02, 0xf9, 0x3b, 0xc2, 0x2c, 0x09, 0xf4, 0x92, 0x31, 0xe4, 0xee, 0xd4, 0x1d, + 0xd2, 0x54, 0xdd, 0x9a, 0x62, 0xce, 0x0e, 0x0e, 0xb8, 0xa1, 0x57, 0x00, 0x7f, 0xcd, 0x6a, 0xdf, + 0x8e, 0x9e, 0xd6, 0x2b, 0xe7, 0xb0, 0x62, 0xd2, 0x32, 0xef, 0x5c, 0x07, 0x05, 0x4c, 0x4c, 0x32, + 0x54, 0x5b, 0x07, 0xe3, 0x5b, 0xc5, 0xd8, 0x34, 0xe5, 0xa9, 0xb8, 0xc2, 0x89, 0xb4, 0xac, 0x63, + 0xf4, 0x10, 0x5a, 0x9c, 0xbf, 0xf3, 0x1e, 0x2f, 0x7b, 0x15, 0x3e, 0x6e, 0x4e, 0x7c, 0xb6, 0x44, + 0x04, 0x7d, 0xa5, 0x0c, 0x8e, 0x5d, 0x77, 0xb6, 0xb8, 0xd7, 0x3e, 0x2a, 0x89, 0x07, 0x9f, 0x1d, + 0x78, 0xa1, 0xd9, 0xc6, 0xb4, 0xf4, 0xf1, 0xe6, 0x7b, 0x76, 0x1f, 0xb4, 0xb5, 0x75, 0xa7, 0xc6, + 0x67, 0x99, 0xfe, 0x29, 0x7d, 0x69, 0xd1, 0x87, 0x6a, 0xda, 0x47, 0x68, 0xfd, 0xdf, 0xef, 0x3f, + 0x44, 0x7b, 0x47, 0x88, 0x99, 0xa2, 0xc3, 0x19, 0xeb, 0x90, 0xce, 0xb6, 0xcd, 0xc8, 0x1f, 0x9d, + 0x52, 0x14, 0x42, 0xbd, 0xdb, 0x8b, 0x39, 0x09, 0x26, 0x35, 0x6e, 0x0b, 0xbe, 0xda, 0xb5, 0x03, + 0x2a, 0x3e, 0x75, 0x96, 0x8b, 0x40, 0x6d, 0xb6, 0x87, 0xf4, 0x9a, 0xc5, 0xf8, 0x7e, 0x02, 0xaf, + 0xbf, 0x36, 0x74, 0x4b, 0x28, 0xfc, 0x22, 0xb9, 0xa0, 0x0a, 0x99, 0xd3, 0x79, 0x0c, 0x20, 0xd5, + 0x96, 0x2f, 0x32, 0x18, 0xad, 0x71, 0x2e, 0xb0, 0x04, 0x51, 0xfe, 0x56, 0x94, 0xd1, 0x47, 0xb4, + 0xa1, 0x6a, 0x21, 0xd6, 0x6a, 0x39, 0xf0, 0x24, 0x89, 0x62, 0x13, 0x80, 0xc9, 0x6a, 0x02, 0x66, + 0x3e, 0xc5, 0xfd, 0x61, 0x35, 0x19, 0x7e, 0x9b, 0xec, 0xc5, 0xec, 0x67, 0x4f, 0x26, 0x9b, 0x9b, + 0x81, 0x35, 0x6e, 0x1f, 0x6f, 0x05, 0x37, 0x5c, 0x0e, 0x09, 0x9c, 0xd3, 0x11, 0xed, 0xf8, 0xeb, + 0x5d, 0x25, 0x79, 0x01, 0x47, 0xad, 0x75, 0x7b, 0xcb, 0x7f, 0xa6, 0x3f, 0xbb, 0x1c, 0x39, 0x24, + 0x9c, 0x6a, 0x5c, 0x0d, 0x24, 0x10, 0x5d, 0x1f, 0x57, 0x51, 0x0d, 0x43, 0xcb, 0xbc, 0x7c, 0xb5, + 0xb6, 0xe9, 0x35, 0x83, 0x33, 0x5e, 0x2d, 0x44, 0xc6, 0x71, 0x71, 0xdd, 0x6b, 0x38, 0x37, 0x4b, + 0xd5, 0x5e, 0xa5, 0xcc, 0x37, 0xa6, 0x68, 0x92, 0xe9, 0xf5, 0x2d, 0x23, 0x5a, 0x19, 0x22, 0x83, + 0x14, 0x74, 0x53, 0x17, 0x1e, 0x07, 0x0d, 0x10, 0x43, 0x58, 0x6e, 0xd0, 0x20, 0xc1, 0xee, 0x56, + 0x22, 0xcf, 0xd9, 0x3f, 0x5d, 0x99, 0xe4, 0x63, 0xb3, 0x56, 0x00, 0x49, 0x31, 0x5a, 0xef, 0x34, + 0x0d, 0x60, 0xe3, 0x7e, 0x81, 0x06, 0xb4, 0x75, 0x7e, 0x89, 0xc5, 0xa9, 0xe8, 0xf3, 0x0c, 0x4d, + 0xbd, 0xad, 0xb3, 0x31, 0x2a, 0xe9, 0xa5, 0xa9, 0xc2, 0xb8, 0xad, 0xd2, 0x2d, 0x71, 0xd7, 0x1a, + 0x5a, 0x49, 0x5e, 0xc1, 0x37, 0xae, 0x0c, 0xdc, 0x56, 0x3f, 0x95, 0x46, 0x7b, 0x7e, 0xc7, 0xc8, + 0xcc, 0xd3, 0x12, 0x9b, 0xcc, 0x12, 0x55, 0x61, 0x21, 0xc0, 0xa2, 0x55, 0xf6, 0x24, 0x5d, 0xa3, + 0x60, 0x31, 0x80, 0x01, 0x8d, 0x8d, 0xd5, 0xdb, 0xb9, 0x38, 0xf0, 0xae, 0xa8, 0xa0, 0xe7, 0x4e, + 0x79, 0xb1, 0x9c, 0xac, 0x3a, 0xc5, 0x9b, 0x7e, 0x69, 0xb0, 0x7f, 0xfa, 0x91, 0x36, 0xa7, 0xc9, + 0xcc, 0x2e, 0x3a, 0x1d, 0xc6, 0x90, 0x6b, 0xed, 0x04, 0x1f, 0x4c, 0x07, 0xe6, 0x9d, 0x12, 0x88, + 0xf3, 0xd7, 0x58, 0xe4, 0x63, 0xd7, 0x04, 0x44, 0xdd, 0xc2, 0xb7, 0x50, 0x01, 0x2c, 0x45, 0x3b, + 0x69, 0x2e, 0x89, 0x6e, 0x5f, 0x38, 0x35, 0x4e, 0x2e, 0xa2, 0xda, 0x82, 0x88, 0x99, 0x5c, 0x26, + 0xd2, 0x8f, 0x1a, 0xe5, 0x0c, 0xef, 0x7f, 0x94, 0x27, 0x3f, 0xe7, 0xd5, 0x55, 0x57, 0xb6, 0x09, + 0x28, 0xb7, 0x03, 0x66, 0x45, 0x45, 0x25, 0xad, 0xdc, 0x87, 0xdb, 0x04, 0xee, 0x50, 0x11, 0xfe, + 0xd2, 0xc8, 0x6d, 0x6a, 0x03, 0x08, 0xd7, 0x48, 0x3d, 0x8f, 0xc3, 0xa2, 0xea, 0xe0, 0xf1, 0x8c, + 0x91, 0x87, 0xe3, 0xe9, 0xc6, 0x63, 0x64, 0x32, 0x0d, 0xbc, 0x7f, 0x49, 0x76, 0x5a, 0xc3, 0x70, + 0xee, 0x17, 0x33, 0x77, 0xb9, 0xd5, 0x4e, 0x6c, 0xe5, 0x8c, 0x32, 0xce, 0xad, 0x17, 0x64, 0xa9, + 0xd6, 0x21, 0x92, 0xd5, 0x4f, 0xd8, 0x22, 0xa2, 0x5e, 0x4f, 0x66, 0xef, 0xd4, 0xa9, 0x87, 0x94, + 0x71, 0xc3, 0xe7, 0x55, 0x0f, 0xf9, 0x6d, 0xf1, 0x7e, 0x1d, 0xa4, 0x60, 0xf4, 0x7e, 0x72, 0xa3, + 0x1d, 0xa8, 0x69, 0x3a, 0x2b, 0x04, 0x59, 0x61, 0xbf, 0xf5, 0xbb, 0x82, 0xba, 0x1e, 0x7c, 0x40, + 0x27, 0x5d, 0xda, 0xa5, 0xdd, 0xf9, 0xd5, 0x93, 0xa3, 0xc3, 0xfe, 0x71, 0xf4, 0xc7, 0xfb, 0x04, + 0x07, 0xa0, 0xfd, 0x63, 0x96, 0x28, 0x47, 0x1e, 0x63, 0xc3, 0xcf, 0x22, 0x8f, 0x93, 0x10, 0x79, + 0x64, 0x6c, 0x6d, 0x5c, 0x21, 0x5a, 0x9b, 0x9e, 0xef, 0xba, 0xfa, 0x9d, 0x31, 0x82, 0xdc, 0xdf, + 0x06, 0xa8, 0xe7, 0x78, 0x20, 0xb8, 0x7c, 0x90, 0xd2, 0xb7, 0xb8, 0x53, 0x82, 0x49, 0x31, 0x00, + 0xdd, 0xb4, 0xbc, 0x9f, 0x14, 0x27, 0x61, 0x48, 0xcc, 0xfd, 0xa3, 0xdb, 0xa9, 0xc5, 0x34, 0x66, + 0x3c, 0xbd, 0x54, 0x7e, 0x3a, 0xd5, 0x11, 0x86, 0x86, 0x73, 0xe8, 0x74, 0x69, 0x1d, 0x45, 0x26, + 0x02, 0x83, 0xe3, 0x0e, 0x14, 0x08, 0xa0, 0x1e, 0x96, 0x1e, 0x58, 0x1a, 0x81, 0xfb, 0x58, 0xf7, + 0x36, 0x55, 0xa3, 0xcf, 0xa3, 0xc5, 0x06, 0xfa, 0xf0, 0x10, 0xe9, 0x32, 0x42, 0xa1, 0x72, 0x94, + 0x24, 0xf3, 0x75, 0x55, 0x4f, 0x88, 0x3b, 0x8f, 0x72, 0xb1, 0xfd, 0x60, 0xee, 0x90, 0x4a, 0x16, + 0x15, 0x15, 0x82, 0xd6, 0x31, 0xb9, 0x3b, 0xed, 0x6e, 0xc2, 0x7b, 0xd1, 0xe6, 0x1c, 0x08, 0xfb, + 0xb0, 0xb2, 0x01, 0x70, 0x1c, 0x2c, 0x0b, 0x3e, 0x11, 0xc4, 0xd2, 0x96, 0xf0, 0x18, 0x0f, 0xdc, + 0x9e, 0x83, 0x4b, 0xe9, 0x3c, 0xca, 0x97, 0x9c, 0x04, 0xdb, 0xa2, 0x06, 0x03, 0x56, 0xdf, 0x3e, + 0xb7, 0x47, 0xe9, 0x26, 0x39, 0xb8, 0x14, 0xd0, 0x24, 0x68, 0xc2, 0x5d, 0x8a, 0x12, 0xeb, 0x5e, + 0x0e, 0xf2, 0xe3, 0x8e, 0x16, 0xaa, 0x56, 0x40, 0x66, 0xea, 0x12, 0xc8, 0x57, 0xaf, 0x7a, 0xf7, + 0x68, 0xb6, 0x54, 0x7d, 0xa0, 0x57, 0x4b, 0xf9, 0x19, 0x8b, 0x6f, 0x94, 0x90, 0x4b, 0x2f, 0x0f, + 0x79, 0x21, 0xcd, 0x09, 0x44, 0x4d, 0x22, 0x52, 0xea, 0xdf, 0x61, 0x14, 0x3c, 0xe5, 0xbf, 0x56, + 0xee, 0x48, 0x07, 0x39, 0x88, 0x96, 0x9d, 0x9b, 0x94, 0xa2, 0x16, 0xba, 0x5e, 0x38, 0xb6, 0x81, + 0xe9, 0x82, 0xb1, 0x4f, 0x3d, 0x2d, 0xc4, 0xe0, 0xde, 0x48, 0x5b, 0x7b, 0x40, 0xe0, 0x2e, 0x1d, + 0x5f, 0x24, 0x6b, 0x7c, 0xa6, 0x01, 0xf4, 0xde, 0x37, 0xcf, 0xb6, 0x24, 0x82, 0x03, 0xb7, 0x2f, + 0x9b, 0x82, 0xd2, 0x08, 0xd0, 0xc0, 0x57, 0xc6, 0x90, 0xb5, 0x6d, 0xee, 0x5a, 0x25, 0x12, 0x58, + 0xd3, 0x66, 0xcf, 0xb8, 0x33, 0xe6, 0x8f, 0x64, 0xdc, 0x53, 0x11, 0xdd, 0x22, 0x7d, 0xf4, 0xb4, + 0x53, 0x90, 0x95, 0x18, 0xf6, 0x14, 0x45, 0xec, 0xd5, 0x04, 0xc4, 0x70, 0x52, 0xc0, 0x71, 0x36, + 0x41, 0xd7, 0x8f, 0x9c, 0x20, 0x11, 0x6c, 0x92, 0xb1, 0xb6, 0xc4, 0x7b, 0xf8, 0xda, 0xce, 0x75, + 0x29, 0xfc, 0xb8, 0xc2, 0xb7, 0x48, 0x87, 0x04, 0x4a, 0x22, 0xfd, 0x50, 0xe9, 0x95, 0xa7, 0xc5, + 0x81, 0xaf, 0x36, 0x85, 0xf0, 0x79, 0xdd, 0x31, 0x3f, 0xdf, 0xff, 0x9c, 0xa3, 0x51, 0x87, 0xe4, + 0x0a, 0x37, 0x06, 0x2c, 0xfe, 0x52, 0xaf, 0x01, 0x34, 0x96, 0xde, 0x8a, 0x7a, 0x28, 0x20, 0x2e, + 0xfe, 0x77, 0x28, 0xd6, 0xe3, 0xe0, 0x83, 0xc0, 0x41, 0x50, 0xcf, 0xe5, 0x68, 0x39, 0xe5, 0x5e, + 0xd6, 0x16, 0x6e, 0x67, 0x38, 0xe8, 0x11, 0xe7, 0x80, 0xcb, 0x29, 0x39, 0x72, 0x26, 0x21, 0x35, + 0x86, 0xfa, 0x90, 0xb2, 0x89, 0x16, 0xaa, 0xee, 0x40, 0x91, 0x6a, 0x55, 0xca, 0x56, 0x63, 0x66, + 0x2c, 0x07, 0x03, 0x21, 0xf9, 0x7b, 0xdc, 0x92, 0xba, 0x44, 0x2f, 0xee, 0x37, 0xd8, 0x83, 0x23, + 0x62, 0x16, 0x24, 0x79, 0xa5, 0x10, 0x84, 0xcb, 0x4e, 0x08, 0x0c, 0x3e, 0xaa, 0x5c, 0x79, 0xe6, + 0x5e, 0x5c, 0x9d, 0x36, 0xd0, 0x98, 0x67, 0x6f, 0x1c, 0x91, 0x9a, 0x35, 0x66, 0x59, 0x3f, 0xa5, + 0xb5, 0xab, 0xde, 0x2b, 0xa7, 0x9e, 0xaf, 0x5e, 0x17, 0x67, 0x8a, 0x59, 0xcf, 0x80, 0xb5, 0xdc, + 0xaf, 0x9f, 0x95, 0x85, 0x53, 0x4a, 0x34, 0xdb, 0x18, 0x37, 0xc3, 0xab, 0x1c, 0x3d, 0x2c, 0xfe, + 0xe1, 0x30, 0xca, 0x25, 0x0b, 0xdf, 0xa2, 0x6b, 0x0b, 0xd4, 0xb3, 0x5f, 0xa3, 0xc2, 0x2b, 0x34, + 0x2c, 0x5c, 0x61, 0x4a, 0xd9, 0xc3, 0x99, 0xb5, 0x60, 0xc2, 0xc1, 0x87, 0xcc, 0x2d, 0x72, 0x82, + 0x80, 0x35, 0x05, 0xb0, 0x3f, 0x64, 0xd2, 0xfa, 0xe0, 0x45, 0x65, 0x60, 0xcd, 0x7d, 0x5b, 0x88, + 0xd9, 0xa3, 0x58, 0x41, 0xf9, 0x59, 0xfb, 0x86, 0xd9, 0x19, 0x8e, 0x3b, 0x09, 0x42, 0xf3, 0xdd, + 0x94, 0xf8, 0xad, 0xe6, 0x7b, 0xe3, 0x2d, 0x63, 0x83, 0x34, 0xfa, 0x27, 0x27, 0x10, 0xaa, 0xf7, + 0xa4, 0xa9, 0x99, 0xfd, 0x1c, 0x0b, 0xa9, 0xc1, 0xaa, 0x18, 0xfa, 0x3c, 0x09, 0xe5, 0x89, 0x54, + 0x8c, 0x93, 0x73, 0x26, 0xc8, 0x29, 0x84, 0xff, 0x89, 0x3c, 0x5c, 0x9d, 0x7f, 0xf4, 0xab, 0x16, + 0x74, 0x6f, 0x67, 0xc2, 0x96, 0x25, 0x95, 0xd9, 0xba, 0xc1, 0xc6, 0x8a, 0xf2, 0x50, 0xf6, 0xe9, + 0xcd, 0xef, 0xde, 0xcb, 0xf1, 0xa9, 0xcf, 0x0e, 0x7c, 0x2c, 0xba, 0x43, 0x55, 0x79, 0x4c, 0x59, + 0x68, 0x0b, 0x67, 0xb8, 0x64, 0x55, 0x2e, 0xb9, 0x79, 0xde, 0x7c, 0x79, 0x24, 0x77, 0xea, 0x8c, + 0xce, 0xc1, 0x92, 0xee, 0x7e, 0x0f, 0x81, 0x43, 0xf9, 0x82, 0x5c, 0x6e, 0x4f, 0xe6, 0x9a, 0x81, + 0xad, 0x39, 0x77, 0xf7, 0x85, 0x5a, 0xca, 0xd9, 0xac, 0xc0, 0x0c, 0xcc, 0x3f, 0xb1, 0x01, 0x7d, + 0x38, 0x97, 0x7c, 0x91, 0x43, 0xef, 0x07, 0xc2, 0xde, 0x84, 0x6f, 0x6d, 0xa0, 0x5c, 0x41, 0xfe, + 0x7b, 0x28, 0xd6, 0x8a, 0xab, 0x95, 0xed, 0xcc, 0x50, 0xf9, 0x0e, 0xd9, 0xb2, 0x1a, 0x9e, 0xc9, + 0x8b, 0xeb, 0x36, 0x4c, 0x91, 0xa4, 0x70, 0x73, 0x14, 0x41, 0x48, 0x1d, 0xe6, 0x1b, 0x3f, 0x5d, + 0xab, 0x54, 0xe8, 0x63, 0xa2, 0x27, 0x00, 0xad, 0xe2, 0x8b, 0x41, 0x31, 0x9e, 0xdf, 0x2f, 0x60, + 0x14, 0x93, 0xe0, 0x27, 0xeb, 0x97, 0xad, 0x28, 0x0b, 0x74, 0xba, 0xe8, 0x35, 0x6b, 0x3f, 0xa7, + 0x61, 0x1e, 0x38, 0xb3, 0x75, 0xa2, 0xe4, 0x82, 0xc4, 0x79, 0x58, 0x93, 0xe4, 0x10, 0xba, 0x11, + 0x82, 0x84, 0x9a, 0x25, 0x1e, 0x45, 0x35, 0xeb, 0x71, 0x20, 0x86, 0x84, 0x72, 0x16, 0x11, 0x74, + 0x11, 0xed, 0x2a, 0xe7, 0xbd, 0x64, 0x4a, 0xf4, 0xed, 0x9c, 0x22, 0xaf, 0xbf, 0x2c, 0x00, 0x63, + 0xed, 0x23, 0xce, 0x42, 0x51, 0x83, 0xad, 0xd9, 0x7d, 0xe5, 0x8c, 0xe6, 0xb0, 0xdc, 0xaa, 0x40, + 0x1a, 0x3d, 0x4b, 0xc9, 0xa8, 0xf7, 0x3b, 0x9e, 0x51, 0xc0, 0xf8, 0xd5, 0x95, 0xf2, 0x1a, 0x26, + 0x15, 0x5f, 0x04, 0x50, 0xb5, 0x18, 0xc7, 0x1d, 0xce, 0xd4, 0xb7, 0x70, 0x36, 0x1b, 0xdb, 0xcc, + 0x21, 0x6a, 0x12, 0xcd, 0xe8, 0xdd, 0xea, 0x2d, 0xef, 0xab, 0xf2, 0xdd, 0xa5, 0x3c, 0x23, 0x1f, + 0x8e, 0x7b, 0x4d, 0xf4, 0x8f, 0x31, 0x87, 0x02, 0xaa, 0x60, 0xae, 0xbe, 0x26, 0x1e, 0x2a, 0x90, + 0x75, 0xce, 0x6f, 0xc6, 0xdc, 0xbb, 0x86, 0xac, 0x94, 0x1b, 0xbc, 0x9b, 0x44, 0xc4, 0xa6, 0xd6, + 0x7b, 0xc2, 0x7e, 0xfa, 0xd0, 0xc3, 0x9b, 0x7c, 0xc6, 0xa0, 0xf1, 0x46, 0x0a, 0xfe, 0x7d, 0xec, + 0x78, 0xc6, 0xcc, 0x19, 0xe9, 0xba, 0x9d, 0x4c, 0x17, 0xe0, 0x61, 0x96, 0x86, 0xf0, 0x28, 0x07, + 0xb3, 0xf5, 0xb4, 0xbc, 0x7a, 0x37, 0x15, 0x65, 0x2c, 0x68, 0x79, 0x8f, 0xc6, 0xf8, 0xf3, 0x89, + 0x68, 0x76, 0xac, 0xa3, 0xe7, 0x0f, 0x08, 0x8f, 0xb4, 0x06, 0x38, 0x24, 0xa2, 0x89, 0xa3, 0xc7, + 0x4e, 0xe8, 0x43, 0x3f, 0xc6, 0x7e, 0xa3, 0x0c, 0xb9, 0x97, 0xf0, 0x03, 0xad, 0xb8, 0x7c, 0xae, + 0xdf, 0x39, 0x4f, 0xa1, 0x1f, 0x02, 0xe2, 0x85, 0xe4, 0x0f, 0x14, 0xe1, 0x70, 0x8d, 0xbe, 0x8f, + 0x1a, 0x0d, 0x4b, 0x59, 0x4d, 0x56, 0x4a, 0x71, 0x62, 0x97, 0x48, 0xd4, 0x68, 0x7d, 0xcc, 0xa4, + 0x96, 0x54, 0x85, 0x02, 0x5c, 0x22, 0xd9, 0x5d, 0xf5, 0xb5, 0x1e, 0x1f, 0xa6, 0x8d, 0x82, 0x3d, + 0x35, 0xf9, 0x8c, 0x28, 0x4b, 0x72, 0xdc, 0x8c, 0x2c, 0xf5, 0x47, 0x2d, 0x4a, 0xdb, 0xa1, 0x58, + 0xf2, 0xf9, 0x4f, 0x6b, 0x95, 0xd7, 0x2d, 0x2b, 0xfb, 0xd1, 0x94, 0xa6, 0x35, 0x31, 0x0b, 0x54, + 0xbf, 0xc8, 0x60, 0x0b, 0xee, 0x46, 0x78, 0x0f, 0xd4, 0xa4, 0x21, 0xe7, 0x0e, 0x30, 0xda, 0xc9, + 0xf6, 0xa5, 0xad, 0x84, 0xbe, 0xe6, 0x4c, 0xcd, 0x00, 0x4d, 0x12, 0xd7, 0x12, 0x5d, 0x3e, 0x00, + 0xf1, 0xd3, 0xd9, 0xb2, 0x90, 0x91, 0xcf, 0xe9, 0x32, 0x06, 0x45, 0xfd, 0x45, 0xf1, 0x1f, 0x10, + 0xb6, 0x94, 0x27, 0x48, 0xe6, 0xd2, 0x38, 0xc0, 0x1a, 0xc6, 0x10, 0xc0, 0x6b, 0xc8, 0x50, 0x40, + 0x7c, 0x2e, 0x21, 0x7e, 0xca, 0x2d, 0xc4, 0xa3, 0xa6, 0x3e, 0x3b, 0x4d, 0x5c, 0x34, 0x82, 0xba, + 0x63, 0xf6, 0x73, 0x17, 0xfc, 0xe0, 0x11, 0x2c, 0x25, 0x1d, 0x9d, 0x54, 0x07, 0x29, 0xc5, 0x77, + 0x5c, 0x36, 0x97, 0x83, 0x64, 0x69, 0xeb, 0x8e, 0xf9, 0x49, 0xdd, 0x1c, 0xda, 0x02, 0x67, 0xef, + 0x2a, 0xaa, 0x2a, 0xde, 0xc0, 0x97, 0x47, 0x6c, 0xf4, 0xc3, 0x2b, 0x18, 0x9d, 0x72, 0xa9, 0x9b, + 0x7e, 0x40, 0x17, 0xa2, 0x3d, 0xa3, 0x51, 0xfa, 0x2d, 0xe4, 0x34, 0x23, 0x26, 0xf8, 0xcc, 0xe6, + 0xae, 0x05, 0x41, 0x79, 0xdd, 0xc5, 0x12, 0xca, 0x20, 0xb0, 0x6c, 0x35, 0x45, 0x0e, 0xa9, 0xec, + 0x67, 0x68, 0x87, 0x18, 0xf4, 0xa1, 0x8a, 0x17, 0x46, 0x46, 0xd0, 0x5c, 0x43, 0x08, 0x9a, 0xd3, + 0x91, 0x58, 0x50, 0xba, 0x81, 0x9b, 0x83, 0xdc, 0x1c, 0x97, 0xbd, 0xae, 0x41, 0x26, 0xf6, 0x53, + 0xec, 0xdf, 0x52, 0x00, 0xaa, 0x43, 0xaf, 0xd4, 0x74, 0x30, 0x24, 0x68, 0x3a, 0x51, 0xd1, 0x07, + 0xe5, 0xf8, 0x18, 0x6d, 0x7c, 0x25, 0xf2, 0xf3, 0x54, 0x65, 0x2d, 0xd7, 0xc2, 0x88, 0x4b, 0x7f, + 0xd0, 0x96, 0xb5, 0xbc, 0xb2, 0xfb, 0xed, 0x99, 0x4b, 0xa6, 0x01, 0x54, 0x64, 0xed, 0x8f, 0x80, + 0xe8, 0x4f, 0x02, 0xa0, 0x3e, 0xc2, 0x0a, 0x54, 0xc0, 0x76, 0x5f, 0x2d, 0xe2, 0xd2, 0x32, 0xd6, + 0xbe, 0xe8, 0xd1, 0xcf, 0x39, 0xf2, 0x04, 0x27, 0x4a, 0x59, 0x31, 0x23, 0xcf, 0x8c, 0xd2, 0xfc, + 0x87, 0xe4, 0xaa, 0x57, 0x1e, 0xd6, 0x90, 0x79, 0x5c, 0x68, 0x11, 0x41, 0xe8, 0x32, 0xfb, 0xa0, + 0x95, 0x74, 0x5b, 0x27, 0x41, 0xbc, 0x51, 0x41, 0xee, 0x55, 0x37, 0x3d, 0x0e, 0xc3, 0xdc, 0x1f, + 0x42, 0x43, 0xd3, 0x47, 0x78, 0xf6, 0x38, 0xa9, 0xdb, 0x31, 0x9b, 0xff, 0xde, 0x33, 0xa3, 0x89, + 0x13, 0x09, 0x9f, 0x2d, 0xc5, 0x85, 0xd6, 0xee, 0x82, 0x91, 0xc1, 0x8a, 0x1f, 0x89, 0x80, 0xe2, + 0x25, 0xb3, 0x3d, 0xa7, 0xb1, 0xa9, 0xea, 0x80, 0x17, 0xfe, 0x76, 0xa3, 0xdc, 0xfd, 0x22, 0x41, + 0xb8, 0x2a, 0xe1, 0xc3, 0x74, 0xe3, 0x43, 0x1d, 0xbc, 0x6f, 0xa6, 0xed, 0x46, 0x77, 0xfe, 0xee, + 0x2f, 0x86, 0x89, 0x74, 0x9d, 0x0b, 0xdf, 0x2a, 0x25, 0x20, 0xdc, 0x46, 0x83, 0x2d, 0x5d, 0x5c, + 0x65, 0xe9, 0xc0, 0xbf, 0xdc, 0x17, 0xc8, 0xf7, 0x97, 0x53, 0xf1, 0x9a, 0x25, 0xfc, 0xbd, 0x68, + 0x3b, 0xa5, 0x8c, 0x85, 0xac, 0x8e, 0x8c, 0xd8, 0xce, 0x1e, 0x6b, 0xca, 0x7c, 0x2c, 0x41, 0x72, + 0x3d, 0x58, 0x79, 0x86, 0x6d, 0x0a, 0xa1, 0xa6, 0x9a, 0xd8, 0x12, 0x40, 0x00, 0xb3, 0x08, 0x8f, + 0x8e, 0x31, 0xcb, 0x29, 0xed, 0x0a, 0x41, 0xf3, 0xde, 0x1f, 0x03, 0xec, 0xaf, 0x91, 0x06, 0x65, + 0x95, 0xb7, 0xdd, 0xa2, 0x9a, 0x30, 0x70, 0x0f, 0x4c, 0x83, 0xc0, 0x4e, 0x1e, 0x20, 0xec, 0x8b, + 0x07, 0x35, 0x83, 0xf7, 0x68, 0x45, 0x0e, 0x44, 0x2a, 0xab, 0xc8, 0xbf, 0x07, 0xbf, 0xa3, 0x1f, + 0x5b, 0xb3, 0x7a, 0x16, 0x55, 0x00, 0x26, 0x4b, 0x4a, 0xe1, 0x15, 0x5c, 0x38, 0xf6, 0xe4, 0xe2, + 0x0d, 0x04, 0xab, 0x4a, 0x14, 0x5f, 0x33, 0x0a, 0xd6, 0xc3, 0xd4, 0x88, 0x69, 0xc1, 0x8b, 0xf2, + 0xb4, 0x65, 0xab, 0xa5, 0x0e, 0x38, 0xd2, 0x7a, 0x5e, 0x8e, 0x7a, 0xca, 0x29, 0xc9, 0x3c, 0xb4, + 0xcd, 0x2f, 0x57, 0x55, 0x0f, 0x0a, 0xd1, 0x87, 0xb3, 0x66, 0xd7, 0xb6, 0xba, 0xb7, 0xb7, 0xa9, + 0xc4, 0x2a, 0xf5, 0xcb, 0xd9, 0x4a, 0x30, 0x41, 0xa9, 0xd3, 0x9d, 0x76, 0xdb, 0xfd, 0x38, 0x74, + 0xaa, 0x5c, 0x6e, 0x5b, 0xef, 0xe2, 0x96, 0x1e, 0x38, 0xbb, 0x9d, 0xe9, 0xe7, 0xbd, 0x0c, 0xb5, + 0x4e, 0xce, 0x6a, 0xfa, 0x3b, 0xf4, 0xc2, 0xc0, 0x96, 0xae, 0x96, 0xfe, 0xb4, 0x45, 0xd6, 0x49, + 0x94, 0x6d, 0x4e, 0xfe, 0xbe, 0xf4, 0xd4, 0xae, 0x7a, 0x9c, 0xef, 0x07, 0xfa, 0xb5, 0xd4, 0x18, + 0x22, 0xf1, 0x88, 0x83, 0x00, 0x6e, 0xe6, 0xf6, 0x58, 0xaa, 0xab, 0x1b, 0x98, 0x6d, 0x75, 0xb5, + 0xce, 0x23, 0xbc, 0x27, 0xff, 0xf7, 0xbc, 0x93, 0xf7, 0xeb, 0xe8, 0x4f, 0xe0, 0x63, 0xc9, 0x8d, + 0xb8, 0x13, 0x86, 0x83, 0x2f, 0x5e, 0x9f, 0x73, 0x48, 0x3e, 0x01, 0x25, 0xfa, 0x6b, 0x67, 0xa4, + 0x97, 0x51, 0xe3, 0xb7, 0xf8, 0xef, 0xcb, 0x92, 0xd9, 0x55, 0x9e, 0x25, 0xe2, 0x42, 0x4e, 0x33, + 0x4d, 0x25, 0xa6, 0x5f, 0x2e, 0x00, 0x09, 0x47, 0xe7, 0x0d, 0xb4, 0x43, 0xa3, 0x9b, 0x9b, 0xed, + 0xe8, 0xe5, 0x6d, 0x1b, 0x64, 0xd2, 0xcf, 0x05, 0x78, 0x38, 0x5e, 0x55, 0x71, 0xe5, 0x36, 0x29, + 0x07, 0xc0, 0xcf, 0x40, 0x81, 0x07, 0x0b, 0x71, 0xf0, 0x5a, 0x65, 0x39, 0x1a, 0x04, 0xa7, 0xa2, + 0xcf, 0xee, 0xa3, 0x74, 0xeb, 0x22, 0x5c, 0x57, 0x20, 0x0a, 0xc3, 0xca, 0xaf, 0x65, 0x9d, 0xc0, + 0x0f, 0xb1, 0xb4, 0xc2, 0x7b, 0x4c, 0xa2, 0x0d, 0xfb, 0xa4, 0x06, 0x7e, 0x1d, 0xb0, 0x06, 0x46, + 0x46, 0xad, 0xe2, 0x12, 0x12, 0xd4, 0x2f, 0x30, 0x09, 0x90, 0xc2, 0x1e, 0xfc, 0xfa, 0x6d, 0xda, + 0x41, 0x7f, 0x48, 0x1b, 0x36, 0x90, 0x3e, 0x5b, 0x6b, 0xc3, 0x7f, 0xd9, 0x20, 0x77, 0x37, 0x2d, + 0x6a, 0x4e, 0x62, 0xef, 0xa6, 0x87, 0x85, 0x65, 0x85, 0x40, 0xdc, 0x1d, 0x4f, 0x55, 0x30, 0x24, + 0x2c, 0x08, 0xfb, 0x2a, 0x6a, 0xfc, 0x26, 0x81, 0xe7, 0x6a, 0x43, 0xa0, 0x0b, 0x0e, 0x86, 0x79, + 0x48, 0xcb, 0x17, 0x68, 0xba, 0xfd, 0x45, 0x62, 0xb8, 0x68, 0x17, 0x69, 0x34, 0x2b, 0x4b, 0x1a, + 0xcf, 0xa4, 0xdd, 0xb5, 0xe4, 0xa8, 0x94, 0x48, 0x40, 0x20, 0x64, 0xae, 0x72, 0x76, 0x23, 0xc2, + 0x89, 0x4b, 0x0d, 0xee, 0xd5, 0xd1, 0x1f, 0x22, 0xc1, 0x27, 0x85, 0x14, 0xa3, 0x04, 0x2c, 0x2d, + 0x16, 0xcf, 0xb1, 0xa8, 0xda, 0x91, 0x03, 0x8d, 0x72, 0xe2, 0x07, 0x24, 0x2d, 0x18, 0x15, 0xce, + 0xd9, 0xfb, 0x30, 0x0c, 0xd3, 0xe5, 0x6f, 0xbb, 0xd6, 0x07, 0x7a, 0x40, 0xa1, 0x97, 0x23, 0x03, + 0x80, 0x85, 0xed, 0x3e, 0x55, 0xdf, 0x92, 0xcf, 0x5d, 0x3a, 0x6b, 0x32, 0x42, 0xff, 0xf1, 0x11, + 0x55, 0x6a, 0x68, 0x5d, 0x93, 0x6e, 0x95, 0x28, 0x1c, 0x8e, 0xca, 0x08, 0x5d, 0x44, 0xb8, 0xf4, + 0x58, 0x7a, 0x29, 0xdd, 0x64, 0xf8, 0xe6, 0xcf, 0xfe, 0x51, 0xc6, 0x9a, 0x8d, 0x93, 0x75, 0x6b, + 0x72, 0x64, 0x02, 0x4d, 0x25, 0x32, 0x1a, 0x28, 0x92, 0xc0, 0xfb, 0xab, 0x9c, 0x0b, 0xf0, 0x3e, + 0x1f, 0x0d, 0xea, 0x06, 0x93, 0x2e, 0x38, 0x10, 0x23, 0x91, 0x12, 0xd5, 0xfd, 0x36, 0xdc, 0x38, + 0x30, 0x24, 0xbf, 0xca, 0x82, 0x0e, 0xc1, 0x6e, 0xad, 0x0e, 0x11, 0x2a, 0xac, 0xa6, 0xb4, 0x1a, + 0xc3, 0x29, 0x7d, 0x11, 0xb9, 0x01, 0x0e, 0xed, 0x08, 0x73, 0x04, 0xf2, 0x1d, 0xa1, 0xb7, 0x22, + 0x12, 0x97, 0xdc, 0xc7, 0x67, 0xdc, 0x6b, 0x3b, 0x5b, 0xbf, 0xc4, 0xd0, 0x64, 0x95, 0x8d, 0xb4, + 0x59, 0x4c, 0xff, 0x3f, 0xa6, 0x17, 0x36, 0x86, 0x2b, 0x7a, 0x0c, 0x40, 0x3c, 0x3a, 0xb3, 0x47, + 0xf6, 0x34, 0x8a, 0x22, 0x38, 0x47, 0xed, 0xf1, 0xcc, 0x61, 0xfc, 0x11, 0xcc, 0x04, 0x1c, 0x62, + 0x2b, 0x35, 0x05, 0x36, 0x8f, 0xcf, 0xde, 0xf3, 0xab, 0xea, 0xac, 0xc0, 0x3a, 0xaf, 0x59, 0xec, + 0xb3, 0x6e, 0x47, 0xb0, 0x42, 0x90, 0x07, 0x30, 0x0f, 0x12, 0xd0, 0xdd, 0xbb, 0xbc, 0x72, 0xda, + 0x14, 0xe2, 0x81, 0x9b, 0xaf, 0x02, 0x72, 0xc1, 0xdc, 0x6f, 0x48, 0xe7, 0xd0, 0x66, 0xbe, 0xb9, + 0x7a, 0xa0, 0xcf, 0xe1, 0x15, 0x47, 0x77, 0x5e, 0xce, 0xc8, 0x96, 0x0c, 0x63, 0xab, 0xde, 0x77, + 0x06, 0xd4, 0x23, 0x7b, 0x48, 0xaa, 0x0d, 0x89, 0x0c, 0xde, 0x37, 0x4b, 0xdd, 0x38, 0x23, 0x8e, + 0x0d, 0x72, 0xef, 0xa7, 0x6f, 0xd4, 0x04, 0x55, 0xa8, 0x2b, 0x0c, 0xd9, 0x46, 0xb0, 0x64, 0x47, + 0xb4, 0x3d, 0x63, 0x42, 0x64, 0xec, 0x6c, 0x49, 0x6f, 0xe1, 0x9a, 0xb6, 0xa9, 0xf2, 0x1b, 0x1e, + 0x2f, 0x61, 0x3c, 0xe1, 0xc9, 0xd2, 0xaa, 0xe3, 0x2d, 0xf1, 0xd2, 0xc4, 0xba, 0x7a, 0xf5, 0x19, + 0x08, 0x50, 0xa3, 0x0f, 0xd4, 0xc1, 0x5b, 0x3d, 0x27, 0x7c, 0x18, 0x7d, 0x86, 0x1d, 0x25, 0xa4, + 0x7f, 0xf9, 0xe7, 0x4c, 0x13, 0x82, 0xd2, 0x20, 0xa5, 0x97, 0x48, 0x58, 0x2f, 0x34, 0x10, 0x4b, + 0x07, 0x13, 0x33, 0x7a, 0x77, 0x45, 0x09, 0xbd, 0xbe, 0xdc, 0x72, 0x31, 0x39, 0x7b, 0xb2, 0xe7, + 0xdb, 0xbc, 0x47, 0x1d, 0x3f, 0x5f, 0xa2, 0x69, 0xff, 0x5a, 0x33, 0x0d, 0xf5, 0x40, 0x37, 0x99, + 0x5d, 0x5c, 0xad, 0x04, 0x5f, 0x43, 0xc8, 0xa8, 0x05, 0xff, 0x86, 0xa3, 0xe6, 0x1b, 0xbb, 0xf6, + 0xe6, 0x48, 0x4c, 0xcd, 0x06, 0xcc, 0xc5, 0x94, 0x22, 0x1e, 0x21, 0xe6, 0x43, 0x42, 0x85, 0x93, + 0x7a, 0x6b, 0xfe, 0xe5, 0x2e, 0x5b, 0x81, 0xbf, 0x88, 0x37, 0xf3, 0xc5, 0x36, 0x18, 0x18, 0xfa, + 0x01, 0x79, 0x45, 0x6c, 0x05, 0x5c, 0x02, 0xb1, 0xca, 0xf6, 0x0b, 0x03, 0xa7, 0x92, 0x85, 0x1a, + 0xdf, 0x30, 0x0a, 0x29, 0x38, 0x48, 0x96, 0xb2, 0xd6, 0x43, 0xfb, 0xf8, 0x40, 0x08, 0xf6, 0x8b, + 0x7d, 0x01, 0x15, 0x5f, 0xb5, 0xd5, 0xe9, 0x63, 0x7c, 0x1b, 0x21, 0xeb, 0x55, 0xf6, 0xa5, 0xbc, + 0xae, 0x48, 0xa5, 0xc3, 0x8a, 0x6f, 0x28, 0xd8, 0x7a, 0x59, 0x5c, 0x8b, 0xd4, 0x34, 0xca, 0xd2, + 0xef, 0xa4, 0x23, 0x08, 0xc6, 0xb8, 0xbf, 0xa1, 0xd3, 0x78, 0xf8, 0xe9, 0x7d, 0x7a, 0x51, 0x1e, + 0x51, 0x54, 0x9a, 0x96, 0x31, 0xb7, 0x45, 0x21, 0xb9, 0xc5, 0xb0, 0x27, 0xac, 0x1e, 0x71, 0xa9, + 0x7c, 0x93, 0xba, 0x97, 0x68, 0xb7, 0x4f, 0x47, 0x50, 0xee, 0xda, 0x94, 0x9b, 0x0e, 0xee, 0x58, + 0x77, 0xf3, 0x96, 0x56, 0xe0, 0x91, 0xcb, 0xbd, 0x12, 0xb9, 0xe4, 0x85, 0x44, 0x1f, 0x77, 0x7c, + 0x23, 0xe6, 0xb0, 0x8a, 0x60, 0xc1, 0xb5, 0x8e, 0x7e, 0x56, 0x7a, 0x5f, 0x00, 0x19, 0x15, 0x50, + 0x47, 0x70, 0xcb, 0xbf, 0x77, 0xaa, 0xa4, 0xe3, 0x08, 0x94, 0x3e, 0x3b, 0x58, 0x83, 0x08, 0x68, + 0x34, 0xd7, 0x86, 0xb3, 0xfa, 0xc1, 0x6a, 0xf8, 0xbd, 0x79, 0x4d, 0x17, 0x2a, 0xb5, 0xe6, 0x2e, + 0xee, 0xde, 0x4f, 0xdb, 0x45, 0x6f, 0x4a, 0x39, 0x9f, 0x65, 0x84, 0x0a, 0x8c, 0x2d, 0x4a, 0xc0, + 0x34, 0x6e, 0xb0, 0x62, 0x49, 0xfd, 0x61, 0x65, 0x68, 0x67, 0x0e, 0x5e, 0xc6, 0xbc, 0x50, 0x12, + 0x33, 0xee, 0xb7, 0xf2, 0x42, 0x2d, 0x9d, 0x0d, 0xcd, 0x46, 0x0c, 0x5d, 0x9c, 0x8b, 0xb6, 0x53, + 0x4c, 0x9b, 0x49, 0xf8, 0xe3, 0xce, 0xaa, 0x82, 0x33, 0x41, 0x6f, 0x4a, 0x60, 0x23, 0x1c, 0x59, + 0x1f, 0x3b, 0x00, 0x0f, 0x53, 0x4e, 0x28, 0xf0, 0x03, 0xe1, 0x2f, 0xba, 0x8b, 0x32, 0x33, 0x21, + 0x25, 0x27, 0xa8, 0x2b, 0xd8, 0x8c, 0x8b, 0xeb, 0x22, 0x96, 0x1e, 0x59, 0x5a, 0x76, 0x21, 0x14, + 0x18, 0x8c, 0x10, 0x5a, 0x4d, 0x09, 0xc8, 0xaf, 0xfe, 0x41, 0x4f, 0x2c, 0xc3, 0xa2, 0x6a, 0x2b, + 0xc4, 0xf9, 0x72, 0xde, 0x1c, 0xc4, 0x30, 0x58, 0x88, 0x2e, 0xfb, 0xde, 0x05, 0x0a, 0xea, 0x14, + 0x45, 0x5d, 0x20, 0xea, 0x5c, 0xd0, 0x71, 0x96, 0x5d, 0x09, 0xc3, 0x84, 0x66, 0xa1, 0x1f, 0xe2, + 0x02, 0xb9, 0xfb, 0x98, 0x2f, 0x0a, 0x6b, 0x8f, 0x95, 0xac, 0x1c, 0x02, 0x63, 0x2f, 0x64, 0x8a, + 0x8d, 0x89, 0x16, 0xe5, 0x78, 0x1f, 0xdd, 0xe1, 0x5d, 0x4e, 0x89, 0x05, 0xe1, 0x0d, 0xdc, 0xbc, + 0xf4, 0xfa, 0xf7, 0x24, 0x8e, 0x80, 0x9c, 0x4b, 0x33, 0x29, 0x81, 0x82, 0xe8, 0x15, 0xd1, 0x85, + 0x2c, 0xe0, 0x14, 0x2a, 0x43, 0xfe, 0x58, 0xe8, 0x91, 0xca, 0xc6, 0xb9, 0x34, 0x7e, 0xdc, 0xd0, + 0xf3, 0xc0, 0x76, 0x3e, 0x0a, 0x3b, 0x63, 0xfb, 0xe5, 0x19, 0xe9, 0xcd, 0xf7, 0x89, 0x9e, 0xc3, + 0xbc, 0x0b, 0x06, 0xbc, 0xe9, 0x01, 0xd4, 0xa7, 0x83, 0x38, 0xb2, 0xc8, 0x71, 0xa7, 0x1c, 0xfb, + 0xbc, 0xdd, 0xf1, 0xb4, 0x87, 0x9f, 0xbc, 0x53, 0x11, 0x6d, 0x67, 0xdf, 0x03, 0x1e, 0x41, 0xd4, + 0x1b, 0x27, 0x3c, 0x9c, 0x4b, 0x9a, 0xac, 0xcc, 0x36, 0x44, 0x8c, 0x31, 0xa3, 0x15, 0x63, 0x28, + 0x47, 0xf0, 0xaf, 0x16, 0xd1, 0xa9, 0x1a, 0x5d, 0xc1, 0x83, 0x8f, 0x5a, 0x9b, 0x70, 0xf0, 0x11, + 0x98, 0xf2, 0x72, 0x3b, 0x99, 0x3d, 0x01, 0x80, 0x74, 0xcb, 0x33, 0xd2, 0x80, 0x26, 0xfd, 0x9a, + 0xba, 0x3e, 0x72, 0x1a, 0xfa, 0xc1, 0xdc, 0x79, 0x22, 0x85, 0x57, 0xfa, 0x03, 0x92, 0xfe, 0xce, + 0x87, 0x9f, 0x96, 0xa3, 0x0a, 0x5f, 0x75, 0x31, 0x69, 0x50, 0x9c, 0x67, 0x4e, 0x4a, 0x6e, 0xa9, + 0xc7, 0x10, 0x8c, 0x75, 0xa2, 0xf6, 0x10, 0xd2, 0xf6, 0xb0, 0x64, 0x1f, 0x23, 0x3f, 0xc1, 0x24, + 0x12, 0xac, 0x4b, 0x75, 0xbd, 0x70, 0xba, 0x29, 0xc6, 0xd0, 0x83, 0x96, 0xcc, 0xdf, 0x0a, 0xef, + 0x02, 0xe3, 0x02, 0xe7, 0x41, 0x45, 0x78, 0x10, 0xdf, 0xfc, 0xbf, 0x70, 0x49, 0xb1, 0xa2, 0x88, + 0x1d, 0x36, 0x4c, 0x8f, 0x21, 0x70, 0x72, 0x99, 0x91, 0x86, 0xe0, 0x60, 0x93, 0x2c, 0xbc, 0x2f, + 0xd7, 0x06, 0x3c, 0x4b, 0xcf, 0x76, 0x2e, 0x60, 0xb1, 0x48, 0xc0, 0x28, 0x2a, 0xfa, 0x8f, 0x54, + 0x54, 0x44, 0x26, 0x70, 0x01, 0xd4, 0x5d, 0x7d, 0xb6, 0x48, 0x19, 0x17, 0x30, 0x87, 0xfb, 0xbe, + 0x3d, 0x8e, 0x15, 0xa8, 0x3c, 0xcb, 0x36, 0x8b, 0x01, 0x51, 0x9d, 0xad, 0xd8, 0xf7, 0xb6, 0xba, + 0x92, 0x32, 0xde, 0x16, 0xfb, 0x07, 0x71, 0xf9, 0x2d, 0x3d, 0x2e, 0xdc, 0xea, 0x6c, 0x6b, 0xe6, + 0xc7, 0xae, 0x2d, 0x38, 0x43, 0xe6, 0xd8, 0xcb, 0x10, 0x08, 0x54, 0x01, 0xc7, 0x21, 0xdc, 0xa7, + 0x1c, 0x8f, 0xcd, 0xe3, 0xa7, 0x79, 0x93, 0x4d, 0x89, 0xd1, 0x0e, 0x22, 0x96, 0x4c, 0xaf, 0x2c, + 0xe0, 0x16, 0x7b, 0x01, 0x6f, 0x04, 0xdb, 0x99, 0x33, 0x32, 0x63, 0x07, 0x19, 0xf8, 0x4c, 0x98, + 0xbf, 0x0e, 0x40, 0x2e, 0xcf, 0xc4, 0xaf, 0x46, 0x2b, 0x1b, 0x24, 0xcd, 0x9b, 0xe8, 0x48, 0x9a, + 0x25, 0x8e, 0xed, 0xe8, 0xf5, 0x98, 0x28, 0x2e, 0x08, 0xbe, 0x8f, 0xb6, 0xd3, 0xf1, 0x8c, 0xea, + 0x4b, 0x66, 0x75, 0x66, 0xfa, 0xf1, 0x75, 0x5c, 0x1c, 0xbb, 0x1f, 0x73, 0xe7, 0x63, 0xd5, 0x12, + 0xbb, 0xb2, 0x4d, 0x97, 0x94, 0x24, 0xc4, 0x73, 0x5d, 0x78, 0x18, 0x31, 0x7a, 0xac, 0x5c, 0x6f, + 0xfb, 0x9f, 0xd2, 0x39, 0xf3, 0x6e, 0x79, 0x3f, 0x60, 0x09, 0x87, 0x63, 0x72, 0xe7, 0xe6, 0x89, + 0x6c, 0x84, 0x5e, 0xb5, 0x46, 0xaa, 0xe1, 0x0f, 0x66, 0xaa, 0xce, 0x23, 0xa7, 0x35, 0x1d, 0x8a, + 0x32, 0xfd, 0x09, 0xf9, 0x6a, 0x52, 0xc5, 0x3a, 0x6e, 0x3b, 0xa3, 0x9d, 0x19, 0x5e, 0x2c, 0x00, + 0x5c, 0x7c, 0x89, 0x2b, 0x48, 0x94, 0x56, 0x2a, 0x98, 0x4a, 0x22, 0x5e, 0x23, 0x32, 0x8e, 0x41, + 0xed, 0x63, 0xee, 0x72, 0x97, 0xa3, 0xc7, 0xc5, 0xbd, 0x5d, 0x26, 0x81, 0x5c, 0x29, 0xbd, 0x09, + 0xfe, 0x19, 0x91, 0x75, 0x30, 0xd7, 0x5d, 0x99, 0x3b, 0x48, 0x70, 0x4f, 0x3b, 0xcf, 0xee, 0xd4, + 0xd4, 0x2b, 0x64, 0x5b, 0x0e, 0x03, 0x75, 0x9f, 0x50, 0x60, 0xa9, 0xeb, 0xac, 0xcd, 0x20, 0xb2, + 0x16, 0x50, 0xcb, 0x19, 0xd6, 0x92, 0xfb, 0x35, 0xe5, 0xb4, 0x42, 0x6b, 0x32, 0x27, 0xfb, 0x1b, + 0x18, 0x44, 0x8c, 0xb7, 0xbb, 0xc3, 0x6b, 0xc7, 0x91, 0x27, 0xb0, 0xe2, 0xe8, 0xa9, 0x42, 0xcc, + 0xeb, 0x92, 0x32, 0x06, 0x5b, 0x80, 0xe8, 0x8a, 0x5b, 0xfb, 0xe5, 0x9b, 0x82, 0x17, 0x27, 0x57, + 0xa0, 0xac, 0xdd, 0xd2, 0xea, 0x00, 0x86, 0x85, 0xfd, 0x6b, 0x89, 0xf5, 0x27, 0x7b, 0xc5, 0x22, + 0x3e, 0xa8, 0x72, 0x59, 0x47, 0x5b, 0x52, 0x5f, 0xd7, 0xb6, 0x20, 0xe8, 0xb5, 0xb7, 0x75, 0x4a, + 0xa8, 0x7a, 0x7f, 0xa4, 0x3b, 0x64, 0x40, 0x8f, 0x33, 0xdd, 0xc3, 0x90, 0xb8, 0xfd, 0x72, 0x7d, + 0xc7, 0x9b, 0xe8, 0xfb, 0x4e, 0xfc, 0x72, 0x47, 0xa9, 0xf3, 0x00, 0x4f, 0x77, 0x40, 0xab, 0x5b, + 0x64, 0x5e, 0xf7, 0x3d, 0x98, 0x28, 0x84, 0xe0, 0xcf, 0xdf, 0xd7, 0xec, 0x7c, 0x7d, 0x97, 0xfe, + 0x14, 0x6c, 0x04, 0xc9, 0x06, 0x88, 0x66, 0x2f, 0x52, 0x95, 0xf3, 0x7b, 0x44, 0x1f, 0x04, 0xbf, + 0xbf, 0x88, 0x1b, 0xfa, 0xd5, 0x6f, 0x4b, 0x77, 0x61, 0xb3, 0x55, 0x8d, 0xd2, 0xef, 0x3b, 0x0a, + 0x4d, 0x85, 0xb8, 0x50, 0xca, 0x0d, 0x94, 0xb0, 0x84, 0x30, 0xd4, 0x61, 0xeb, 0x1f, 0x9a, 0xe5, + 0x2d, 0x2e, 0xfa, 0x78, 0x3e, 0xc9, 0xae, 0xbd, 0xbb, 0x0a, 0xe3, 0x09, 0x56, 0x5a, 0x43, 0x88, + 0x62, 0x99, 0xa7, 0x89, 0xf5, 0x19, 0xce, 0xa2, 0xeb, 0xe4, 0xd8, 0xfe, 0x99, 0xef, 0xf4, 0xb7, + 0x76, 0x2a, 0x49, 0xd1, 0x9a, 0x3e, 0x17, 0x5a, 0x78, 0x1f, 0x63, 0x8d, 0x91, 0x72, 0x95, 0xb5, + 0xf1, 0xb0, 0x38, 0x37, 0x9d, 0x93, 0xfa, 0x73, 0x10, 0x29, 0x06, 0x46, 0xfd, 0xbb, 0x6c, 0xda, + 0xe7, 0xb7, 0x69, 0x7d, 0x76, 0x85, 0xba, 0x3e, 0x0f, 0x50, 0xad, 0x6e, 0xde, 0xcc, 0xa8, 0xe5, + 0x3d, 0xd0, 0x66, 0x6c, 0xb9, 0x6b, 0xbf, 0x8b, 0xc4, 0x0d, 0xb1, 0x2b, 0x47, 0x5b, 0xd3, 0x8d, + 0x02, 0xbb, 0x12, 0xea, 0xcb, 0xe6, 0x13, 0x08, 0x31, 0x2d, 0x6b, 0x3b, 0x92, 0xaa, 0xb0, 0xd1, + 0xcc, 0x98, 0x15, 0x6a, 0x50, 0xdc, 0x62, 0x24, 0x9e, 0xfe, 0x53, 0xe5, 0xde, 0xee, 0xdf, 0x9e, + 0x80, 0xdd, 0xbd, 0x22, 0x85, 0xa5, 0xcb, 0x18, 0xcf, 0xad, 0x59, 0x9b, 0xfa, 0x27, 0x7e, 0x44, + 0x75, 0xd8, 0xd9, 0x64, 0x3f, 0x2d, 0xe1, 0x51, 0xa3, 0x99, 0xef, 0x00, 0x57, 0xed, 0x8c, 0xf3, + 0x0f, 0xb5, 0x01, 0x0e, 0xc9, 0xb4, 0x2d, 0x61, 0x2a, 0xd1, 0x7c, 0xa3, 0x5e, 0x99, 0xd0, 0x69, + 0x70, 0x87, 0xa4, 0xda, 0xa6, 0x41, 0x57, 0xa6, 0x70, 0x9b, 0x8e, 0xa8, 0x9f, 0xf6, 0x5f, 0xf1, + 0x90, 0x71, 0x99, 0x63, 0xf1, 0xf5, 0x0a, 0x9b, 0x03, 0x97, 0x5a, 0x85, 0x83, 0xc2, 0x74, 0x33, + 0xd7, 0x71, 0x0a, 0xc2, 0xbe, 0xc6, 0xde, 0xaa, 0x53, 0x32, 0x55, 0xbd, 0x01, 0xf5, 0x04, 0xdf, + 0x5c, 0x04, 0x60, 0xdb, 0x46, 0xde, 0x26, 0x14, 0x5d, 0x6d, 0xe6, 0x23, 0xc5, 0x13, 0x53, 0xd3, + 0xaa, 0x8a, 0xe0, 0x29, 0xb5, 0xa5, 0xc0, 0x4d, 0x64, 0x25, 0x34, 0x73, 0xc1, 0x7d, 0xb5, 0xbd, + 0x90, 0xcd, 0x51, 0xf7, 0x92, 0x3f, 0x58, 0xf5, 0x87, 0x3a, 0x26, 0x72, 0x50, 0x4a, 0x7a, 0x0e, + 0xb0, 0xa8, 0x28, 0x11, 0x34, 0xd4, 0x59, 0xb5, 0xe7, 0x83, 0xc5, 0xd5, 0x62, 0xaa, 0xf5, 0x8a, + 0x05, 0xf0, 0xdd, 0xd7, 0x9c, 0x33, 0x5c, 0xd8, 0xbf, 0x46, 0xb5, 0x4a, 0x31, 0x77, 0x75, 0x53, + 0xbe, 0x7d, 0xab, 0xbf, 0xa0, 0xcd, 0x37, 0x2f, 0x53, 0xc5, 0x18, 0x9c, 0x05, 0x3b, 0xb6, 0x15, + 0x47, 0x40, 0x26, 0x9b, 0xf0, 0x11, 0xbb, 0x40, 0xad, 0xde, 0x00, 0xdb, 0x35, 0x4a, 0x78, 0x29, + 0x23, 0x5f, 0x8b, 0xce, 0x6f, 0xa7, 0x68, 0x03, 0x63, 0x6d, 0x9e, 0xaf, 0xda, 0x01, 0x2e, 0x69, + 0xc4, 0x73, 0xef, 0x4d, 0xcf, 0xf1, 0x28, 0xf4, 0x30, 0xee, 0x6e, 0x41, 0x81, 0x67, 0xb7, 0x73, + 0x65, 0x88, 0x87, 0xbd, 0x1c, 0x12, 0xe2, 0xe2, 0x28, 0x95, 0x2a, 0x11, 0x56, 0x41, 0x6c, 0xab, + 0x2c, 0x84, 0x16, 0x1f, 0xfe, 0x38, 0xe6, 0xc4, 0xcd, 0x87, 0xc7, 0x78, 0xb6, 0x7e, 0xd2, 0x83, + 0x3d, 0x6a, 0xca, 0xe3, 0x98, 0x8b, 0xca, 0xe9, 0x5f, 0xbf, 0x79, 0x5c, 0xba, 0x84, 0xc6, 0x1a, + 0x51, 0xaf, 0xb4, 0x09, 0x4d, 0x2c, 0x9b, 0x13, 0xe4, 0x37, 0xfc, 0x49, 0x48, 0xa5, 0x4a, 0x27, + 0xe2, 0x2b, 0xda, 0xad, 0x89, 0xfb, 0x87, 0xc1, 0xec, 0x8a, 0xfb, 0xe2, 0xaf, 0x3c, 0xc1, 0xa3, + 0x51, 0x9d, 0xf1, 0x60, 0x3d, 0x6b, 0x36, 0x10, 0x1a, 0x35, 0xc3, 0x12, 0xc9, 0xba, 0xe4, 0xaf, + 0x63, 0x22, 0x12, 0x00, 0x20, 0xd2, 0x1d, 0x5c, 0xa5, 0xc4, 0x52, 0xd9, 0xd8, 0x9f, 0x4e, 0x91, + 0x75, 0x1c, 0xa7, 0xd8, 0xe1, 0x83, 0xda, 0x6b, 0xa3, 0x99, 0x3f, 0x82, 0x68, 0x4f, 0xe3, 0x9e, + 0xfc, 0x46, 0x6b, 0xfe, 0x64, 0x87, 0xf4, 0xc8, 0x11, 0x22, 0x73, 0xcf, 0xde, 0x17, 0xad, 0xf7, + 0xb7, 0xba, 0x36, 0xfb, 0xcf, 0xaa, 0x17, 0x4f, 0x91, 0x2f, 0xd6, 0xef, 0x19, 0xec, 0x09, 0x5a, + 0x1f, 0x4b, 0x3b, 0x83, 0x81, 0xb2, 0xa8, 0x97, 0x97, 0x7f, 0x86, 0xc4, 0xe0, 0xf4, 0x98, 0xf7, + 0x70, 0x28, 0x88, 0xcc, 0x5f, 0xab, 0xa6, 0xd6, 0x0c, 0x6d, 0x65, 0xc6, 0x9d, 0x66, 0x54, 0x94, + 0xa2, 0xf0, 0xef, 0xdd, 0x86, 0xa4, 0x78, 0xa6, 0x85, 0x90, 0x74, 0x99, 0xb4, 0xed, 0xfd, 0xb0, + 0x9e, 0x29, 0x54, 0x69, 0x88, 0x46, 0xec, 0xac, 0x72, 0x20, 0x90, 0x9c, 0x9b, 0x4b, 0xb9, 0xe8, + 0x9f, 0xb9, 0xda, 0xd6, 0xea, 0x36, 0x04, 0x85, 0xcc, 0x04, 0x6f, 0x74, 0xad, 0x7f, 0xb2, 0x2b, + 0xe9, 0x98, 0x57, 0x00, 0xb6, 0x50, 0x52, 0x30, 0x06, 0x80, 0x31, 0xb4, 0x64, 0x7f, 0x04, 0xe2, + 0x34, 0x9a, 0xeb, 0xb1, 0x29, 0x39, 0x2c, 0xc9, 0x3d, 0x6f, 0x78, 0x11, 0x0b, 0x31, 0x78, 0x03, + 0x52, 0x75, 0xff, 0x92, 0x66, 0xbd, 0x29, 0x03, 0x10, 0x84, 0xd4, 0xda, 0xc5, 0x79, 0x07, 0x67, + 0xea, 0xee, 0x1e, 0x8a, 0x80, 0x9e, 0xa8, 0xd6, 0x44, 0xb3, 0x60, 0xf7, 0xa0, 0xc1, 0x85, 0x40, + 0x71, 0xc9, 0x04, 0xe4, 0x3e, 0x4c, 0x1f, 0xea, 0x7c, 0xd6, 0x72, 0x57, 0x32, 0x44, 0x59, 0x63, + 0x60, 0xc5, 0x5c, 0xe8, 0xda, 0x8c, 0x7c, 0x93, 0x49, 0x34, 0xd4, 0xf8, 0xb4, 0xad, 0xf8, 0xa9, + 0xa2, 0x6a, 0xd7, 0xfc, 0x04, 0x75, 0x9c, 0xc0, 0x64, 0x6d, 0x90, 0xe5, 0x77, 0xba, 0x94, 0x05, + 0x8d, 0x09, 0xee, 0xd5, 0xb2, 0x7b, 0xa8, 0x9e, 0x7b, 0x48, 0x43, 0x94, 0xa8, 0x9d, 0x21, 0x60, + 0x04, 0x6d, 0x69, 0xe9, 0x67, 0x03, 0x2f, 0x88, 0xab, 0x5c, 0x4d, 0x17, 0xca, 0xb6, 0x43, 0xa1, + 0x00, 0x8a, 0xa4, 0x10, 0x24, 0x1c, 0x35, 0xe4, 0xbd, 0x7b, 0x3d, 0xcd, 0x76, 0x4d, 0x6e, 0x4c, + 0x09, 0xf2, 0x39, 0x9d, 0x02, 0xe1, 0x55, 0x83, 0x65, 0x67, 0x74, 0x2a, 0x41, 0xd9, 0x8b, 0x64, + 0x26, 0x03, 0xd4, 0x9f, 0x6f, 0xc4, 0x00, 0xa3, 0x4b, 0xf8, 0xdd, 0x79, 0x2c, 0x93, 0x89, 0x40, + 0x1a, 0x0e, 0xf2, 0xd4, 0x09, 0x43, 0x81, 0x06, 0x8e, 0xbc, 0x71, 0xa8, 0xe7, 0x6a, 0x13, 0xde, + 0xc0, 0x16, 0x51, 0x7f, 0x9f, 0xd6, 0x2c, 0x47, 0xac, 0xdf, 0x93, 0x97, 0xa8, 0xe2, 0x79, 0xc7, + 0xb7, 0x4b, 0x99, 0xf4, 0x36, 0x75, 0x19, 0x7c, 0x92, 0x78, 0x34, 0x60, 0xa8, 0xc1, 0x66, 0x30, + 0x8f, 0x9e, 0xd6, 0xed, 0x17, 0xba, 0xb1, 0x4e, 0xd7, 0xcd, 0x45, 0x32, 0xaa, 0x85, 0x50, 0xab, + 0x85, 0xb6, 0xba, 0xd3, 0x47, 0x8d, 0x8c, 0x83, 0x89, 0xa3, 0xe6, 0x62, 0x5f, 0x1b, 0xbd, 0xda, + 0xf5, 0x30, 0x94, 0xcf, 0x84, 0x11, 0x95, 0xf5, 0xe4, 0x73, 0x22, 0x54, 0xe2, 0xbb, 0x82, 0x85, + 0x67, 0x85, 0xc6, 0x4d, 0xa9, 0x04, 0xe0, 0xf2, 0xf6, 0x36, 0xfe, 0x42, 0xb4, 0xc3, 0xa5, 0x08, + 0xff, 0x4e, 0xa3, 0xd4, 0xad, 0x69, 0x95, 0x0b, 0x45, 0x83, 0x7b, 0x99, 0xaa, 0xa5, 0x8b, 0xe4, + 0x57, 0x4f, 0xe9, 0xd8, 0x5e, 0xa8, 0x3d, 0x36, 0xa8, 0x48, 0x3e, 0x87, 0xec, 0x5c, 0x42, 0xfa, + 0x19, 0x18, 0xd7, 0x00, 0xbf, 0xa3, 0xc0, 0x39, 0x27, 0xba, 0x36, 0x36, 0xeb, 0xd4, 0x30, 0x80, + 0xe4, 0x00, 0x88, 0x7b, 0x89, 0x30, 0x3e, 0xdd, 0x25, 0xa7, 0x50, 0x1b, 0xa8, 0xf7, 0x07, 0x4e, + 0xf4, 0x30, 0x71, 0x10, 0xac, 0x59, 0x7b, 0xa0, 0x1f, 0x07, 0xbc, 0x67, 0x95, 0x6a, 0xfa, 0xca, + 0x4b, 0x1c, 0x08, 0xba, 0xb3, 0x58, 0x44, 0xc5, 0x40, 0xf1, 0xcf, 0xa6, 0x22, 0x98, 0xe2, 0x39, + 0xd1, 0x5c, 0x43, 0x98, 0xf5, 0x71, 0x6f, 0x02, 0xec, 0x19, 0xfa, 0x15, 0xf3, 0x6e, 0x04, 0x9f, + 0xc1, 0x7b, 0x57, 0x60, 0x04, 0xaa, 0x75, 0x78, 0x1a, 0xb7, 0x30, 0x06, 0xff, 0x39, 0x08, 0x3c, + 0x91, 0xc0, 0xca, 0x7f, 0x68, 0xa1, 0xaf, 0xa5, 0x36, 0x58, 0x08, 0x69, 0x52, 0x7e, 0x85, 0x4f, + 0x2d, 0xb7, 0x72, 0x2b, 0x9e, 0xef, 0x2a, 0x88, 0x8b, 0x34, 0x20, 0x37, 0xa3, 0x44, 0x71, 0x29, + 0x74, 0x03, 0xf1, 0x19, 0xaa, 0x6a, 0xa6, 0x85, 0x6c, 0x9c, 0x83, 0xc9, 0xf3, 0xfc, 0xb8, 0x65, + 0x4c, 0x09, 0xa7, 0xa3, 0xbd, 0x3c, 0xcb, 0x26, 0xd5, 0xb8, 0x2d, 0x9c, 0x94, 0x72, 0xe5, 0xbb, + 0x2d, 0xe1, 0x72, 0x22, 0x04, 0x05, 0xd1, 0xb9, 0xb1, 0xf5, 0x0e, 0xfe, 0x0a, 0x1e, 0xab, 0xb6, + 0x7d, 0xb7, 0x4c, 0x6f, 0x13, 0x9f, 0x69, 0x1e, 0x09, 0x7f, 0xab, 0x31, 0x4f, 0x72, 0x24, 0xae, + 0x42, 0x5c, 0x6e, 0x3f, 0xb0, 0xb6, 0x05, 0x6f, 0x86, 0x63, 0x74, 0xc0, 0xa9, 0xdd, 0xf1, 0x9a, + 0x29, 0x05, 0xa4, 0x69, 0x33, 0x32, 0x0b, 0x57, 0xca, 0x6b, 0x90, 0xcd, 0x05, 0x3a, 0x12, 0x8d, + 0x42, 0xf5, 0xd5, 0xb6, 0xaf, 0x1b, 0x05, 0x93, 0x0d, 0xe4, 0xd9, 0x2c, 0x48, 0x26, 0x31, 0x53, + 0xd1, 0x22, 0x29, 0xe9, 0x7d, 0xc2, 0x4b, 0x71, 0x49, 0x84, 0x84, 0xa8, 0x1c, 0x2b, 0x73, 0x1b, + 0x1d, 0x4a, 0x96, 0xa9, 0x04, 0x2d, 0x7e, 0x79, 0x60, 0x9a, 0xd4, 0xae, 0x8e, 0x78, 0x61, 0x49, + 0x0e, 0x16, 0xc0, 0x2f, 0x74, 0xd7, 0xfd, 0x3c, 0x78, 0x9a, 0xa0, 0x82, 0x29, 0xda, 0x22, 0x53, + 0xf7, 0x11, 0x7e, 0xb9, 0xcb, 0x93, 0xc5, 0x5d, 0x8b, 0x34, 0x00, 0xa4, 0xfd, 0x26, 0x2f, 0x14, + 0x9c, 0x5a, 0x54, 0xc3, 0xbb, 0xf4, 0x39, 0x85, 0xf0, 0xfe, 0x95, 0xeb, 0xfd, 0x83, 0x8e, 0x86, + 0xe2, 0x3e, 0x97, 0xa9, 0x92, 0x4b, 0xa3, 0x6a, 0xe2, 0xc6, 0xf2, 0xa7, 0x87, 0x3e, 0x92, 0xd0, + 0x45, 0xc0, 0xbd, 0x96, 0x98, 0xbb, 0xb6, 0xa4, 0x24, 0xa9, 0x69, 0x1e, 0x88, 0x24, 0xf6, 0x7d, + 0x25, 0x81, 0x3a, 0xb5, 0x5f, 0xf8, 0x2e, 0x04, 0x7c, 0x35, 0xc1, 0x46, 0x8d, 0xb9, 0x53, 0xbc, + 0xd2, 0x79, 0xd8, 0x16, 0x0c, 0xf8, 0xb9, 0xc5, 0x0e, 0x8b, 0xf4, 0xdc, 0xf3, 0xfd, 0xe8, 0xc4, + 0xfd, 0x19, 0x5e, 0x75, 0xe8, 0xd1, 0x8c, 0x49, 0x72, 0xb9, 0x75, 0xbd, 0x97, 0x5f, 0x65, 0xe8, + 0x64, 0x25, 0x31, 0x5c, 0xd1, 0x67, 0x16, 0xab, 0x33, 0x8d, 0x29, 0xdb, 0x68, 0x7d, 0x75, 0xc3, + 0xe5, 0x90, 0x20, 0x94, 0x88, 0xea, 0xed, 0x22, 0x7c, 0xfd, 0x85, 0x35, 0xb0, 0xdc, 0x48, 0xe8, + 0x70, 0x15, 0x7a, 0xfb, 0xd8, 0x35, 0xb6, 0x4c, 0xe8, 0x0c, 0xcc, 0x4d, 0x0d, 0xbf, 0x80, 0xa3, + 0x3c, 0xc7, 0xa3, 0xd1, 0x5e, 0x6f, 0xd6, 0x45, 0xc0, 0xe7, 0xea, 0xd2, 0xca, 0x99, 0x01, 0x46, + 0x94, 0x70, 0x29, 0xa2, 0x1d, 0x35, 0x2f, 0xfb, 0xa2, 0xc9, 0xc9, 0xa4, 0x79, 0x05, 0x67, 0x85, + 0xfc, 0x71, 0xe7, 0x1b, 0xe8, 0x77, 0x3d, 0x45, 0xa7, 0xff, 0xb5, 0xeb, 0xa1, 0xe3, 0x75, 0xbe, + 0xb2, 0xe5, 0x8f, 0x93, 0x95, 0x81, 0xf9, 0xc7, 0x3e, 0x30, 0x2e, 0x84, 0x1f, 0x5c, 0xee, 0xf1, + 0x1f, 0xdd, 0x21, 0x45, 0x61, 0x28, 0x44, 0xf4, 0x80, 0xbf, 0xe5, 0x0d, 0x6a, 0x1b, 0x5b, 0x40, + 0xd5, 0x81, 0xef, 0x15, 0x13, 0x4c, 0x04, 0x96, 0xe3, 0x74, 0xdd, 0x9d, 0x4f, 0xfd, 0x5e, 0xa1, + 0x65, 0xfc, 0x92, 0xc9, 0xd1, 0xee, 0x59, 0x55, 0x28, 0x78, 0xeb, 0xee, 0x1e, 0x65, 0x4c, 0x4f, + 0x90, 0x80, 0x93, 0x20, 0x66, 0x53, 0x68, 0x66, 0x2f, 0x33, 0x53, 0x08, 0xed, 0x19, 0x2e, 0xe6, + 0x87, 0x32, 0x9c, 0x5f, 0xd2, 0x76, 0x57, 0xe2, 0x54, 0x00, 0x06, 0x1e, 0x3a, 0xc3, 0x76, 0x50, + 0x37, 0xa5, 0xfe, 0xee, 0x34, 0xa8, 0xb6, 0xab, 0xd9, 0x43, 0x52, 0xe1, 0x16, 0x46, 0x77, 0xad, + 0x01, 0xbe, 0x78, 0xe1, 0x6c, 0xe2, 0x08, 0xac, 0x10, 0xa8, 0x6f, 0x29, 0xdd, 0xc9, 0x06, 0x3f, + 0x46, 0xa0, 0x4d, 0x15, 0xce, 0xe9, 0xc3, 0x9e, 0x41, 0x26, 0x83, 0x46, 0xb5, 0x65, 0x52, 0xd7, + 0xf4, 0x4f, 0xca, 0x79, 0x28, 0x23, 0xb6, 0x5c, 0x62, 0x58, 0xf8, 0x59, 0xa8, 0x2f, 0x1b, 0xae, + 0xe5, 0x8a, 0x1b, 0x6a, 0xa4, 0x66, 0x28, 0x96, 0x87, 0xb4, 0x6d, 0x38, 0x4e, 0xa4, 0x57, 0x83, + 0xca, 0x1b, 0x25, 0xc7, 0xc9, 0x8c, 0x43, 0x41, 0x5a, 0x3d, 0x9a, 0xdf, 0x72, 0x00, 0xe8, 0x08, + 0x92, 0x15, 0x68, 0x22, 0x46, 0x25, 0xa6, 0xbe, 0x22, 0x48, 0x6a, 0xdf, 0x1e, 0x66, 0xa7, 0x48, + 0x3e, 0x0f, 0x41, 0x9d, 0xaa, 0x68, 0x7f, 0x57, 0xc1, 0x65, 0xf4, 0x4a, 0xfb, 0x4b, 0x6a, 0x0c, + 0x55, 0x5e, 0x0e, 0x55, 0x89, 0xf0, 0x71, 0xd1, 0x9e, 0xa3, 0x67, 0xa7, 0xf0, 0x81, 0x3d, 0xf5, + 0x2a, 0xcf, 0xd6, 0x12, 0x2c, 0xbb, 0xc8, 0x91, 0x33, 0x26, 0x72, 0x57, 0xe6, 0x94, 0x8d, 0xae, + 0x67, 0xd2, 0x36, 0x4d, 0xec, 0xf0, 0xcb, 0x95, 0x67, 0x66, 0xde, 0xba, 0x6d, 0x2c, 0xba, 0x1f, + 0x54, 0x86, 0x69, 0x4d, 0x71, 0xe1, 0x87, 0xda, 0x41, 0xb6, 0xcd, 0x22, 0x8b, 0xd9, 0x9a, 0xc3, + 0xaa, 0x64, 0x8a, 0x02, 0x49, 0x56, 0xd2, 0x56, 0xd3, 0x8e, 0x91, 0xb8, 0x1f, 0x0e, 0x93, 0xf7, + 0xfb, 0xd0, 0xda, 0xc0, 0xe8, 0x05, 0x6d, 0xb7, 0x78, 0xc7, 0x4a, 0xf4, 0x5c, 0xc9, 0x42, 0x2c, + 0x1d, 0x68, 0x40, 0xb0, 0xd3, 0x8a, 0xff, 0xd2, 0x01, 0xda, 0x0b, 0x6e, 0x27, 0xc7, 0xee, 0x20, + 0x46, 0x94, 0xcc, 0x5b, 0x15, 0xf1, 0xac, 0x01, 0x2a, 0xbe, 0xff, 0xd0, 0x4b, 0xee, 0x90, 0x9c, + 0x86, 0xc5, 0x3c, 0x9e, 0xe2, 0xa8, 0xab, 0x16, 0x77, 0x2d, 0xdd, 0xfa, 0xe4, 0x66, 0x80, 0x66, + 0xe7, 0x3c, 0x15, 0x12, 0xd9, 0x19, 0x5e, 0xed, 0x9a, 0x59, 0x42, 0x6e, 0x79, 0x27, 0xc0, 0xe9, + 0xa6, 0xc0, 0x93, 0xfe, 0x55, 0x7a, 0x40, 0x4c, 0xc1, 0xf1, 0x97, 0xff, 0xeb, 0x21, 0x6c, 0x59, + 0x4f, 0x12, 0x3b, 0xf8, 0xbd, 0xc0, 0xfe, 0xbe, 0x5b, 0xfb, 0x56, 0xdd, 0x5a, 0xe4, 0x03, 0x3f, + 0x05, 0xb5, 0xf2, 0x5d, 0x0a, 0x84, 0xdf, 0xb1, 0x43, 0x2f, 0x57, 0x14, 0x07, 0x92, 0x31, 0x5e, + 0xcd, 0x15, 0x12, 0x31, 0x9f, 0xb5, 0x6c, 0xea, 0x3e, 0xde, 0xcf, 0xce, 0xeb, 0xef, 0xf7, 0x0b, + 0xcd, 0x71, 0x59, 0xd0, 0x71, 0x2c, 0x7b, 0x4b, 0x95, 0x73, 0xdd, 0xcc, 0x38, 0x51, 0x9c, 0x7b, + 0xe4, 0x44, 0x95, 0x2d, 0xa4, 0xbb, 0x40, 0xfb, 0x4b, 0xfa, 0x45, 0x2e, 0xdc, 0x6e, 0x29, 0xd7, + 0x52, 0x47, 0xff, 0x28, 0x28, 0x9d, 0x1a, 0x6e, 0x0a, 0x8e, 0x67, 0x03, 0x2b, 0x81, 0x21, 0xa5, + 0x9e, 0x00, 0x44, 0x87, 0xa0, 0x30, 0x0e, 0x7a, 0xaf, 0x12, 0xc1, 0x3b, 0x10, 0xea, 0xe2, 0x78, + 0xc3, 0x2a, 0xf8, 0x38, 0x6c, 0x06, 0x30, 0xf7, 0xf7, 0x89, 0x23, 0x69, 0x56, 0xc7, 0x88, 0x66, + 0x35, 0xd4, 0x0c, 0xdb, 0x07, 0xbf, 0x9a, 0x83, 0x38, 0x2d, 0xa5, 0xa2, 0xa2, 0x9d, 0xd0, 0x68, + 0x6b, 0x86, 0x1f, 0x23, 0x3c, 0xfe, 0x28, 0x17, 0xc3, 0x63, 0x70, 0x1a, 0x05, 0x50, 0x28, 0x40, + 0x3d, 0x6a, 0x55, 0x1e, 0xe5, 0xc0, 0x8d, 0xd9, 0xa2, 0xbb, 0xd4, 0x45, 0x70, 0x7b, 0xc3, 0x21, + 0x3d, 0x33, 0xae, 0x08, 0x63, 0x38, 0xf9, 0x81, 0x0c, 0x0b, 0xba, 0x62, 0xa4, 0x74, 0xe4, 0x2e, + 0x50, 0x2c, 0x61, 0x71, 0x7f, 0xc7, 0x90, 0x46, 0x85, 0x47, 0x38, 0xd3, 0xd1, 0x1f, 0x21, 0xec, + 0x09, 0x3c, 0xfa, 0xe3, 0x75, 0x6e, 0xa2, 0x03, 0xba, 0xf1, 0x3c, 0xaf, 0x04, 0xe1, 0xb5, 0xe4, + 0x7d, 0x38, 0xa7, 0x5d, 0x89, 0x56, 0xf4, 0x27, 0xc4, 0x86, 0x7e, 0x3a, 0xb6, 0xc4, 0x1d, 0x02, + 0xb6, 0x73, 0x9e, 0xa3, 0x91, 0x70, 0x87, 0x9e, 0x3a, 0xeb, 0x7b, 0x35, 0x2f, 0xed, 0x34, 0xd5, + 0x36, 0xbb, 0xf0, 0x2e, 0x65, 0x60, 0xea, 0x50, 0xde, 0xe1, 0x50, 0x3c, 0xfc, 0x19, 0xdb, 0x50, + 0x7e, 0xea, 0x82, 0x58, 0xbe, 0x7a, 0x97, 0x15, 0xd0, 0x41, 0x37, 0x70, 0x24, 0x66, 0x99, 0x98, + 0x7e, 0x88, 0x42, 0x7d, 0x84, 0x13, 0x40, 0xb9, 0xdf, 0xc0, 0x6a, 0xc7, 0x73, 0x76, 0x06, 0x9b, + 0xa2, 0x23, 0x7a, 0x92, 0x78, 0xb5, 0x67, 0xb3, 0x83, 0xf7, 0xc7, 0xb0, 0x94, 0xfe, 0xa6, 0xed, + 0x27, 0x76, 0x55, 0x83, 0x44, 0xb7, 0x52, 0x38, 0x57, 0x41, 0x98, 0xb5, 0x4f, 0x09, 0x22, 0xd7, + 0x20, 0x61, 0xb5, 0x3e, 0xaf, 0x82, 0x50, 0xde, 0x8b, 0x02, 0x69, 0xad, 0x4a, 0x3c, 0xfc, 0x68, + 0x21, 0x41, 0xa9, 0x83, 0xd3, 0xe5, 0xef, 0xa3, 0x97, 0x50, 0xac, 0x09, 0xe2, 0xe4, 0xf5, 0xba, + 0x7f, 0xab, 0xd5, 0xd1, 0xd6, 0x9b, 0xfd, 0x93, 0xf1, 0x54, 0xda, 0xb0, 0x2e, 0xe6, 0x71, 0xcb, + 0x3e, 0x94, 0x27, 0xa6, 0x3b, 0x23, 0xed, 0xfc, 0xeb, 0x93, 0x43, 0x7e, 0x7a, 0xfa, 0xfd, 0xd2, + 0xb5, 0x98, 0x01, 0x44, 0x59, 0xee, 0x35, 0x9a, 0x55, 0x0a, 0x8c, 0x69, 0x2e, 0x56, 0x0b, 0x19, + 0x68, 0x19, 0x5b, 0xb8, 0x01, 0x1f, 0x2e, 0xcc, 0x5c, 0x2b, 0xb4, 0xbd, 0x26, 0xa4, 0x69, 0x42, + 0x75, 0x71, 0xdb, 0x9f, 0x98, 0xfa, 0x30, 0xdb, 0x0b, 0x1e, 0x02, 0xe1, 0xd6, 0x22, 0xa8, 0xd6, + 0x16, 0x6f, 0x83, 0x26, 0xc3, 0x87, 0x9c, 0x7e, 0x91, 0xea, 0x21, 0x17, 0x50, 0xec, 0x8d, 0x33, + 0xaf, 0xf1, 0x5b, 0xa7, 0xd8, 0x96, 0x77, 0xb7, 0xad, 0x5b, 0x13, 0x08, 0x4b, 0x38, 0x64, 0x7e, + 0x6c, 0x7d, 0xde, 0x9c, 0xaa, 0x73, 0xc7, 0xfe, 0x05, 0xb0, 0x04, 0x42, 0x96, 0x7a, 0x88, 0x52, + 0xf1, 0x2b, 0xed, 0x4d, 0x09, 0x1b, 0x32, 0xfc, 0x91, 0x94, 0x65, 0x6a, 0x8a, 0x32, 0x01, 0x2d, + 0x87, 0x39, 0x6e, 0x17, 0x8f, 0xdf, 0xc0, 0xbf, 0x6f, 0xf0, 0x14, 0x77, 0x7f, 0x7c, 0x8a, 0x9b, + 0x49, 0x3d, 0x23, 0xef, 0xa6, 0x95, 0xc8, 0x44, 0x55, 0xeb, 0x88, 0x9d, 0xd8, 0x8e, 0x9e, 0xca, + 0x30, 0x4e, 0x11, 0x5b, 0xf2, 0x7b, 0x2a, 0xde, 0x9f, 0xe3, 0x2f, 0xfe, 0x6a, 0x68, 0x56, 0x7b, + 0x6f, 0xf0, 0x82, 0xf3, 0xc1, 0x6a, 0x49, 0x20, 0x94, 0xf5, 0xc0, 0x81, 0xdf, 0xc7, 0x85, 0x32, + 0xf1, 0x4b, 0x68, 0x56, 0xa2, 0x5c, 0x38, 0x78, 0xef, 0xb9, 0x10, 0x64, 0xdd, 0xd2, 0x39, 0xa4, + 0x62, 0xcb, 0xa1, 0xf8, 0x81, 0x75, 0x3c, 0xd6, 0x53, 0x92, 0xb0, 0x77, 0x11, 0x95, 0x50, 0xb0, + 0xed, 0x17, 0xbd, 0xaf, 0x74, 0x5a, 0x18, 0x62, 0x0e, 0x4c, 0x18, 0x09, 0x59, 0x37, 0xe2, 0x3e, + 0x54, 0xfd, 0xc6, 0x9d, 0xf7, 0x37, 0x48, 0xe1, 0xc4, 0x65, 0xc5, 0xd9, 0xd1, 0xe1, 0xa3, 0xf2, + 0x48, 0x53, 0x85, 0x36, 0x9c, 0x54, 0x99, 0x41, 0x2a, 0x08, 0xde, 0xb8, 0x14, 0xce, 0x00, 0x59, + 0x2b, 0x06, 0x09, 0x15, 0x28, 0x5c, 0xcb, 0xeb, 0x7e, 0xc5, 0xa6, 0xea, 0x2f, 0xab, 0x98, 0x49, + 0x64, 0x2e, 0x65, 0x81, 0x30, 0x80, 0x4f, 0xee, 0x45, 0x96, 0xd8, 0xcf, 0xbb, 0x28, 0x55, 0xe2, + 0x66, 0x32, 0xd2, 0x2c, 0xd6, 0xd8, 0x9d, 0xa4, 0x19, 0xe3, 0xee, 0x97, 0x94, 0x17, 0x67, 0x89, + 0x77, 0x45, 0x8e, 0xc0, 0xef, 0xd6, 0xb9, 0xf3, 0x62, 0xc5, 0xb2, 0xdf, 0x81, 0x63, 0x4c, 0xae, + 0x39, 0x6d, 0x78, 0x08, 0x1c, 0x77, 0x3e, 0x48, 0xde, 0x01, 0x12, 0x73, 0x6a, 0x3c, 0xef, 0xc4, + 0x8b, 0x48, 0x4a, 0xc0, 0x53, 0x70, 0x42, 0xc3, 0xb2, 0x91, 0xea, 0x0a, 0x8b, 0x0a, 0xd2, 0x63, + 0x9a, 0xaf, 0x8b, 0x11, 0xd0, 0xc4, 0x96, 0xb2, 0x4f, 0xd5, 0x85, 0x3e, 0xb4, 0x57, 0x6b, 0xb2, + 0x48, 0xd3, 0x94, 0x09, 0x26, 0xfd, 0x4d, 0xe7, 0x19, 0x10, 0xa2, 0xfd, 0xd2, 0x76, 0x0a, 0xf6, + 0x0e, 0x7f, 0x2b, 0x25, 0x7f, 0x03, 0xd5, 0x36, 0x56, 0x72, 0xe5, 0x75, 0xec, 0x71, 0xdc, 0x2c, + 0x1a, 0x48, 0x18, 0x62, 0x1b, 0x4c, 0x82, 0x95, 0xbc, 0x81, 0x74, 0x01, 0x50, 0x5c, 0xd1, 0x26, + 0xb8, 0x15, 0xb7, 0xa3, 0x31, 0xbd, 0xe8, 0x44, 0xb0, 0x99, 0x67, 0x99, 0xd9, 0xc3, 0x59, 0x1b, + 0xfc, 0xa2, 0x5d, 0x58, 0x4d, 0x38, 0xac, 0x9c, 0x02, 0xeb, 0x68, 0xd2, 0xb3, 0xb7, 0x3b, 0xa3, + 0x23, 0x8a, 0x8f, 0x76, 0x0c, 0x15, 0xa1, 0x8e, 0xc0, 0xe0, 0x58, 0x9c, 0x88, 0xdd, 0x4c, 0x5a, + 0x94, 0x7c, 0x3c, 0x3e, 0x73, 0xf7, 0x99, 0x6f, 0x13, 0x29, 0xd9, 0x6a, 0x43, 0x0e, 0x9f, 0x2f, + 0x36, 0x3a, 0x99, 0x69, 0x17, 0x02, 0xa4, 0x6e, 0x89, 0xd8, 0xfa, 0xb9, 0xfd, 0x65, 0x66, 0x54, + 0x24, 0x7b, 0x7c, 0x70, 0xd0, 0x5b, 0xbb, 0xff, 0xf3, 0x42, 0x46, 0x53, 0x48, 0x31, 0xf4, 0x53, + 0x56, 0xa4, 0x19, 0x34, 0x77, 0xbc, 0x4c, 0xa2, 0x7b, 0x3f, 0x59, 0xc1, 0x12, 0x1a, 0x81, 0x8f, + 0xdb, 0xd3, 0x38, 0xdf, 0xd5, 0xd2, 0x6d, 0xf1, 0x08, 0xd5, 0xed, 0x79, 0x22, 0xa8, 0x50, 0x93, + 0x81, 0xb1, 0xd9, 0xb7, 0x4d, 0x2f, 0x36, 0x85, 0xd2, 0xab, 0x89, 0xac, 0xcd, 0xb3, 0xbf, 0x80, + 0x67, 0xae, 0x9e, 0x7b, 0x3c, 0xb4, 0x79, 0x52, 0xab, 0x3f, 0x14, 0x7e, 0x5b, 0xa8, 0x91, 0x73, + 0x9e, 0x27, 0x21, 0x19, 0x85, 0x4f, 0x86, 0xae, 0xf8, 0xb0, 0xd7, 0xea, 0x8c, 0xb3, 0xed, 0xba, + 0x26, 0xa7, 0x20, 0xb2, 0xa7, 0xb3, 0xae, 0x2e, 0xb2, 0x4a, 0x77, 0xde, 0x1a, 0xc4, 0x11, 0xe1, + 0xdb, 0x42, 0x92, 0x34, 0xbc, 0xdb, 0x43, 0x0f, 0xd5, 0xb7, 0x29, 0xe5, 0xd7, 0xfe, 0x93, 0xc6, + 0x29, 0x6f, 0x32, 0x56, 0xbf, 0x13, 0x0f, 0xb7, 0x4f, 0x28, 0x54, 0xd4, 0xac, 0xf6, 0xfb, 0xe1, + 0x41, 0x94, 0x93, 0x29, 0xc9, 0xda, 0x35, 0x7a, 0x6c, 0xd5, 0x6a, 0x2d, 0x61, 0x32, 0x76, 0xca, + 0x64, 0x55, 0xd5, 0xce, 0xfb, 0x10, 0x1e, 0xdd, 0x44, 0xd0, 0xaf, 0x3a, 0x4b, 0xeb, 0x6c, 0x8d, + 0x43, 0x89, 0x30, 0x7f, 0xcc, 0xa6, 0x73, 0x78, 0x07, 0xa4, 0xea, 0x56, 0xed, 0x56, 0xb8, 0xa9, + 0xb7, 0xb0, 0x74, 0x4e, 0xaf, 0xaf, 0x64, 0x44, 0x97, 0x5b, 0x7b, 0xd8, 0x87, 0x37, 0x55, 0x7f, + 0x16, 0x5c, 0x52, 0xf8, 0x55, 0x83, 0x03, 0x87, 0x52, 0xb8, 0x52, 0xb9, 0xc5, 0x3f, 0xb2, 0x47, + 0x27, 0x70, 0x91, 0x02, 0xf2, 0xbc, 0x3a, 0x31, 0xd9, 0x65, 0xcf, 0x81, 0x53, 0xec, 0xc6, 0x48, + 0xa5, 0x42, 0xd1, 0xdd, 0xbb, 0xc1, 0x06, 0x26, 0x05, 0x37, 0xd6, 0x7a, 0xf3, 0xc6, 0x79, 0xe6, + 0x95, 0xeb, 0x2b, 0x62, 0x8f, 0xfc, 0x28, 0xfa, 0x4b, 0xaa, 0x8e, 0xce, 0xef, 0xbd, 0x97, 0x25, + 0xbc, 0x97, 0x20, 0x6f, 0x1b, 0x3d, 0xb3, 0x33, 0x83, 0x7d, 0x54, 0xeb, 0x1b, 0x48, 0xe4, 0xd2, + 0x19, 0x47, 0x61, 0x83, 0xbf, 0x42, 0xbe, 0xec, 0x86, 0xae, 0xa2, 0x09, 0x01, 0x42, 0x5c, 0xd8, + 0xc3, 0x53, 0x33, 0x7a, 0x01, 0x4b, 0x4f, 0x99, 0xc6, 0xbd, 0xa2, 0x2d, 0x11, 0x38, 0xbe, 0xac, + 0xc1, 0x6e, 0x3e, 0xba, 0x13, 0x50, 0x73, 0xf8, 0xd1, 0x8d, 0x1e, 0x0b, 0xb5, 0xed, 0x46, 0x7b, + 0x55, 0x25, 0x63, 0x92, 0x58, 0x50, 0x02, 0xc8, 0xb7, 0x9d, 0x65, 0x62, 0xb0, 0x15, 0x12, 0x15, + 0x98, 0x8c, 0xbc, 0x34, 0x21, 0x5e, 0x35, 0x5f, 0xc9, 0x00, 0x97, 0xcd, 0xab, 0xb6, 0x46, 0xc6, + 0x2e, 0xb8, 0xa3, 0x85, 0x65, 0xd8, 0x96, 0x7a, 0xa7, 0x76, 0x6d, 0x3c, 0x2d, 0x04, 0x61, 0xe4, + 0x6c, 0xd6, 0x3f, 0x6b, 0xfe, 0x02, 0x69, 0x56, 0x57, 0x07, 0xd6, 0x15, 0x07, 0x64, 0x24, 0x74, + 0x0d, 0xbe, 0xdf, 0x0f, 0x8e, 0x57, 0x82, 0x87, 0x07, 0x83, 0xc9, 0x92, 0x5d, 0xec, 0xcc, 0x12, + 0x97, 0x23, 0x2f, 0x31, 0xe5, 0xfc, 0x76, 0x39, 0xc7, 0xb9, 0xf4, 0x03, 0xe2, 0x97, 0xbc, 0x3e, + 0x79, 0x28, 0xff, 0x60, 0xd1, 0x4b, 0x2c, 0x67, 0xc3, 0xe0, 0xcf, 0xc0, 0xaa, 0x12, 0xcb, 0xac, + 0xef, 0xa8, 0xaf, 0xea, 0x1a, 0xc5, 0x99, 0xf5, 0x92, 0xe6, 0xc3, 0xfe, 0x4a, 0x9f, 0x76, 0x73, + 0x8b, 0x10, 0xb7, 0x38, 0x43, 0x9e, 0x18, 0x54, 0x58, 0xa2, 0xcd, 0xd6, 0xe6, 0xcf, 0x5c, 0x7a, + 0x8e, 0x2f, 0x00, 0x64, 0xb5, 0xd1, 0x37, 0x15, 0xf8, 0xc6, 0xd4, 0xec, 0x4a, 0x4e, 0xf7, 0x7b, + 0x2e, 0x7a, 0xd7, 0x97, 0xd1, 0xcc, 0xd3, 0x26, 0xcb, 0x17, 0x8b, 0x23, 0xa8, 0xd8, 0x41, 0x81, + 0x86, 0x22, 0xd6, 0xd3, 0x55, 0xe3, 0x93, 0x6b, 0x25, 0x6b, 0x24, 0x02, 0xde, 0x34, 0x4f, 0xc0, + 0x4d, 0x70, 0xe9, 0x3a, 0x58, 0x7b, 0x26, 0x24, 0x17, 0xf6, 0x30, 0x11, 0xde, 0xcf, 0x0e, 0x0c, + 0xdb, 0xcb, 0x94, 0x9a, 0x1f, 0x4e, 0x26, 0x3d, 0x8b, 0x31, 0xfd, 0x03, 0x1f, 0x08, 0x8a, 0xa6, + 0x14, 0x9d, 0x74, 0x61, 0xea, 0x0d, 0x5d, 0x6c, 0x95, 0x1d, 0x32, 0x78, 0x9e, 0xec, 0xad, 0x15, + 0xc1, 0xf5, 0x1b, 0xe6, 0xbf, 0x7e, 0xcf, 0xef, 0x24, 0xc1, 0x66, 0xee, 0x0a, 0xf5, 0x99, 0x18, + 0x46, 0x73, 0x93, 0x0c, 0xcf, 0x16, 0x5f, 0x43, 0x74, 0x9e, 0xfa, 0x0d, 0xc9, 0x85, 0xb1, 0x12, + 0xf4, 0x10, 0x04, 0xbf, 0x3e, 0xa4, 0x38, 0x32, 0x6a, 0xa4, 0xc7, 0x1a, 0x0a, 0xc6, 0x6f, 0x43, + 0x24, 0x51, 0x4a, 0x69, 0xe5, 0xaa, 0xf5, 0xbf, 0xfb, 0x59, 0x7d, 0x57, 0xef, 0x1b, 0xa3, 0x99, + 0x79, 0x94, 0xc4, 0xfb, 0xcc, 0x3f, 0x44, 0x35, 0xff, 0xce, 0x75, 0xfe, 0x9d, 0xa6, 0x99, 0xf3, + 0x65, 0x15, 0xbc, 0x74, 0xc2, 0x19, 0xbe, 0x57, 0x05, 0x1a, 0xdd, 0x68, 0x5c, 0xeb, 0x54, 0x8d, + 0x9c, 0x89, 0xc3, 0x5c, 0xd2, 0x15, 0x4d, 0x7b, 0x0e, 0x94, 0x24, 0xd9, 0x8e, 0x72, 0x5d, 0x22, + 0x05, 0x85, 0xef, 0x24, 0x89, 0x29, 0xb4, 0x8a, 0xfd, 0x2e, 0xf9, 0x0d, 0x13, 0xed, 0x50, 0xd4, + 0xb4, 0x0e, 0x02, 0xf7, 0x6b, 0x6a, 0x98, 0x33, 0xb2, 0xdd, 0x5a, 0xc5, 0xa6, 0x86, 0x1a, 0xb9, + 0xea, 0x97, 0x16, 0xb7, 0xd0, 0xb9, 0x9a, 0xb4, 0x29, 0x12, 0xf3, 0x47, 0x20, 0xc2, 0x41, 0x8b, + 0x26, 0x71, 0xfd, 0x94, 0x44, 0x24, 0xd8, 0xd1, 0x93, 0xd8, 0x87, 0x4b, 0x30, 0x19, 0x6d, 0x97, + 0x2c, 0x7d, 0x4c, 0x48, 0xe5, 0x5d, 0x3e, 0xaf, 0x9b, 0xd5, 0x7f, 0x7a, 0x69, 0x91, 0x25, 0xba, + 0x58, 0x18, 0x5c, 0x2d, 0x36, 0x82, 0x4a, 0x4e, 0x8c, 0x81, 0xa9, 0x67, 0xf0, 0xec, 0xf3, 0x0d, + 0x28, 0x02, 0x49, 0xb3, 0xc5, 0xa8, 0x03, 0x04, 0xfd, 0x0a, 0x7e, 0xd8, 0xb3, 0x6e, 0xaf, 0xda, + 0x23, 0x9a, 0xed, 0x77, 0x31, 0x63, 0x79, 0x0c, 0xac, 0x13, 0x2c, 0xa7, 0xe3, 0x77, 0x03, 0x6f, + 0xc5, 0x8a, 0x34, 0xab, 0xbc, 0x12, 0x4b, 0xb5, 0x6d, 0xb5, 0x72, 0x1c, 0xfe, 0x4d, 0x44, 0x1f, + 0x4a, 0xdf, 0xc7, 0x3c, 0x77, 0xdb, 0xcd, 0xf7, 0x75, 0xdf, 0x6e, 0x4b, 0x33, 0xc0, 0xe2, 0x08, + 0x57, 0xe6, 0x40, 0x36, 0x31, 0xfd, 0x7c, 0x51, 0x61, 0x57, 0x9d, 0xf2, 0x02, 0xbd, 0xd1, 0x52, + 0x76, 0x7e, 0x88, 0xb0, 0x49, 0xed, 0x88, 0xd6, 0xb7, 0x37, 0x68, 0x04, 0xe8, 0xf3, 0x85, 0xcc, + 0x97, 0x2c, 0xf9, 0x71, 0xb9, 0xcb, 0x7f, 0xa8, 0x02, 0x4c, 0xe1, 0x9b, 0x2e, 0x3c, 0x9f, 0xc5, + 0xcf, 0x6e, 0xa9, 0xde, 0x7f, 0xb2, 0xfa, 0x1e, 0x9c, 0x10, 0xe1, 0x9f, 0x00, 0x6e, 0x55, 0xbc, + 0xa8, 0xc9, 0x79, 0x4a, 0x95, 0x8a, 0x80, 0x8c, 0xd2, 0x92, 0x57, 0x0f, 0xcd, 0x58, 0xe1, 0xae, + 0x82, 0x7d, 0x13, 0xd0, 0x13, 0xe1, 0x92, 0xa6, 0x80, 0x16, 0x5a, 0xa8, 0x95, 0xc3, 0x80, 0x02, + 0x70, 0x48, 0x4c, 0x10, 0xd5, 0x6c, 0xfa, 0x49, 0x20, 0x1f, 0xb0, 0x1e, 0x59, 0xe4, 0xa3, 0x0c, + 0xb7, 0xba, 0xd0, 0x25, 0x78, 0xb3, 0xe2, 0x4b, 0xd1, 0x35, 0x7d, 0xb0, 0x8f, 0xdd, 0x19, 0xa9, + 0xac, 0x85, 0xe6, 0xa3, 0xc7, 0x03, 0x14, 0x61, 0x3f, 0xb7, 0x1e, 0x90, 0x66, 0xae, 0x6a, 0xfb, + 0xc1, 0x58, 0xa2, 0x18, 0x6b, 0x66, 0xd5, 0x76, 0x8f, 0x16, 0xfc, 0xdf, 0x7a, 0x45, 0x4a, 0xe1, + 0xd5, 0x81, 0xb1, 0xb8, 0x7d, 0xd1, 0x98, 0xc8, 0x54, 0x42, 0xaa, 0x95, 0x16, 0xe3, 0xc7, 0x9c, + 0x00, 0x9f, 0x88, 0x77, 0xd7, 0x88, 0xd3, 0x30, 0x32, 0xf5, 0x7a, 0xd6, 0x06, 0x5f, 0xcc, 0xac, + 0xf3, 0x6f, 0xc2, 0xe3, 0xeb, 0x17, 0x69, 0xd1, 0x12, 0x0e, 0x5c, 0xa7, 0xd7, 0xfa, 0x8c, 0xe9, + 0x6a, 0xc2, 0x09, 0x8f, 0xd7, 0xc4, 0x93, 0x50, 0x62, 0xd4, 0xa1, 0xdc, 0x58, 0x11, 0x18, 0x60, + 0xcc, 0xcd, 0x44, 0x25, 0x94, 0x14, 0x17, 0xb4, 0x49, 0xca, 0x67, 0x39, 0x1c, 0x72, 0xf1, 0xba, + 0xbe, 0x4d, 0x69, 0x3b, 0x92, 0x09, 0xb1, 0x2b, 0x65, 0xa0, 0x6a, 0xfe, 0x54, 0x9a, 0x8f, 0x3b, + 0xe6, 0xa0, 0x64, 0x8c, 0x68, 0x43, 0x6a, 0x6a, 0xfc, 0xc7, 0x08, 0xed, 0x42, 0x75, 0x40, 0xbc, + 0x56, 0x5b, 0xe3, 0x4c, 0x58, 0xfd, 0xf0, 0x3c, 0xba, 0x08, 0xc7, 0x02, 0xf4, 0xfc, 0x7e, 0xf7, + 0x0f, 0x3c, 0xb9, 0x47, 0xaa, 0x62, 0xa5, 0x25, 0xfa, 0xf8, 0x28, 0x57, 0x4e, 0x51, 0x8a, 0xe4, + 0xe3, 0xab, 0x46, 0xd9, 0xf5, 0xc9, 0x32, 0x73, 0xa6, 0x54, 0x0c, 0x01, 0x2c, 0x55, 0xff, 0xfb, + 0xbc, 0x0c, 0x12, 0x9d, 0xd4, 0x87, 0x51, 0x30, 0xea, 0xb0, 0xc9, 0x37, 0xd5, 0x61, 0x82, 0x16, + 0x85, 0xcd, 0x6e, 0x85, 0xbc, 0x29, 0x68, 0x0b, 0x62, 0x9b, 0x06, 0xca, 0x9a, 0x86, 0xee, 0x53, + 0x6d, 0x86, 0x52, 0xec, 0xa4, 0x46, 0x94, 0x64, 0xf8, 0xc5, 0x12, 0x3f, 0xa7, 0xe1, 0x94, 0xc7, + 0x26, 0x98, 0x14, 0x9d, 0xf8, 0x19, 0xbf, 0x84, 0xca, 0x11, 0x4f, 0x35, 0xff, 0x55, 0x62, 0xc3, + 0x6a, 0x56, 0x9f, 0x79, 0xd3, 0xd7, 0xf7, 0xe4, 0x7a, 0xca, 0xa8, 0x86, 0x86, 0x77, 0xf8, 0x5b, + 0x89, 0x9a, 0x6e, 0x53, 0xc9, 0x9b, 0x89, 0x3c, 0x8b, 0x0b, 0x3b, 0xa4, 0x26, 0xc4, 0x5a, 0x77, + 0x11, 0x0a, 0xb7, 0x56, 0x5a, 0x90, 0x72, 0xa0, 0x69, 0x4e, 0x3b, 0x65, 0xf0, 0x80, 0xda, 0xfc, + 0x58, 0x25, 0x67, 0x2a, 0xd0, 0x85, 0x98, 0x83, 0x40, 0xac, 0xa2, 0xeb, 0x25, 0x82, 0xa7, 0x37, + 0x60, 0xd8, 0x08, 0x86, 0xf5, 0x8c, 0xd6, 0xc3, 0x84, 0xdb, 0x24, 0x89, 0x32, 0x5b, 0x80, 0xd3, + 0x72, 0xbc, 0x4f, 0xaa, 0x18, 0xf5, 0xe3, 0xca, 0xd6, 0x51, 0xe0, 0x8a, 0xa9, 0x9a, 0x18, 0xa3, + 0x90, 0xcc, 0x4c, 0x14, 0x64, 0x5b, 0x15, 0x0b, 0x0a, 0x23, 0xc8, 0xcd, 0xe0, 0xf8, 0x65, 0x04, + 0xb6, 0xb1, 0x12, 0x7b, 0x99, 0x52, 0x25, 0x07, 0x2d, 0xb2, 0x9e, 0xb5, 0x17, 0x8b, 0xa2, 0x3f, + 0x69, 0xf9, 0xe8, 0xaf, 0x26, 0xaa, 0xfc, 0xa1, 0xe0, 0x5c, 0xb7, 0x0e, 0x0a, 0x8c, 0x99, 0xe1, + 0x1c, 0xe5, 0x20, 0xce, 0x2c, 0x77, 0xfa, 0xd6, 0x45, 0x7a, 0xa4, 0xbb, 0x76, 0x36, 0xcc, 0x05, + 0x0a, 0xf2, 0xdc, 0x44, 0xb4, 0x37, 0xd6, 0x80, 0x7f, 0x68, 0x19, 0x0f, 0x2b, 0x9d, 0xb3, 0x85, + 0x30, 0xd4, 0xbd, 0xcc, 0x70, 0x8f, 0xec, 0x0c, 0x66, 0xb9, 0x6a, 0xfa, 0xd9, 0x2a, 0xfd, 0x67, + 0x4f, 0xd2, 0x10, 0x89, 0x15, 0xc2, 0x3b, 0xa5, 0x89, 0xc3, 0x54, 0xc1, 0x3f, 0x65, 0x75, 0xa4, + 0xf9, 0x13, 0xc0, 0x40, 0x61, 0xf8, 0xec, 0x3e, 0xb6, 0xec, 0x0a, 0xd2, 0xf3, 0x22, 0x0b, 0xc8, + 0x12, 0xc7, 0xaa, 0xa7, 0x42, 0xdc, 0xb4, 0x0e, 0x95, 0xb0, 0xa4, 0xab, 0x1b, 0xaf, 0xcb, 0x8b, + 0x53, 0xec, 0xfb, 0x2a, 0x38, 0x17, 0xe1, 0x4e, 0xb2, 0xcb, 0xab, 0x87, 0x71, 0x08, 0x0f, 0x20, + 0x5d, 0xfd, 0x94, 0x95, 0x85, 0x0d, 0x16, 0x5f, 0x9e, 0x9d, 0x47, 0xe0, 0x9e, 0xbb, 0x1e, 0x5d, + 0xcd, 0xd2, 0xd3, 0x49, 0x4a, 0xf1, 0x94, 0x2b, 0xd7, 0x08, 0x8f, 0x03, 0xa1, 0x61, 0x1c, 0x6f, + 0x44, 0x46, 0x9f, 0x3c, 0xa1, 0x2a, 0xb5, 0x80, 0xc6, 0x58, 0x15, 0x75, 0xab, 0x9b, 0xc8, 0xce, + 0xe9, 0x2b, 0xe1, 0x3c, 0x3c, 0xef, 0x07, 0x46, 0x3b, 0xf9, 0xa3, 0xd5, 0x96, 0x1a, 0x25, 0x6d, + 0x89, 0x65, 0x6e, 0xcf, 0xa6, 0xb5, 0x93, 0xfb, 0xad, 0x54, 0x0f, 0xcd, 0x3e, 0x10, 0xc6, 0xe1, + 0x2a, 0x39, 0x2a, 0x83, 0x71, 0x49, 0x2e, 0x46, 0x16, 0x6f, 0x25, 0xbf, 0x7d, 0x50, 0x44, 0xe3, + 0xed, 0x27, 0xba, 0x61, 0x36, 0x17, 0xda, 0x1a, 0x61, 0x41, 0x6d, 0xd9, 0x80, 0x96, 0x35, 0xe9, + 0x2c, 0x08, 0xc3, 0xd3, 0x69, 0x9d, 0x27, 0xba, 0xc6, 0x40, 0x71, 0xcf, 0xa9, 0x25, 0x3d, 0x4d, + 0xee, 0x9a, 0x95, 0xa2, 0x83, 0x3a, 0xd7, 0xea, 0x8d, 0x23, 0x39, 0x5e, 0xcf, 0xf2, 0x04, 0xb3, + 0xdc, 0x8c, 0x2f, 0x45, 0x2c, 0x78, 0xcd, 0xdd, 0x1d, 0x56, 0x25, 0x9e, 0xf3, 0x9d, 0x58, 0xd6, + 0x7c, 0xd6, 0x85, 0xf9, 0xcb, 0x8c, 0x3e, 0xeb, 0x6d, 0xa3, 0x4e, 0xc9, 0x3f, 0xa0, 0x8e, 0x9f, + 0x95, 0x87, 0xa4, 0xc9, 0xd6, 0xcf, 0xba, 0x66, 0x90, 0x09, 0xa4, 0x02, 0xa9, 0x84, 0x09, 0xbe, + 0xf9, 0xb8, 0x0a, 0xd1, 0xb9, 0xc1, 0xdc, 0x34, 0xa0, 0xd5, 0xf1, 0x58, 0x64, 0x88, 0xd1, 0x81, + 0x9c, 0xbe, 0x74, 0x2a, 0x04, 0xc8, 0x5b, 0xdb, 0x21, 0x7f, 0x1b, 0xb9, 0x5a, 0x18, 0x21, 0x15, + 0xcd, 0x9f, 0x74, 0xbe, 0x55, 0x06, 0x6f, 0xad, 0xc6, 0x14, 0xb8, 0xf7, 0x3e, 0x13, 0x25, 0x7f, + 0xf4, 0x2f, 0x48, 0xd7, 0x07, 0xe7, 0xec, 0x8f, 0x19, 0x93, 0x9c, 0x12, 0x85, 0xf7, 0xcb, 0xc8, + 0xec, 0x9f, 0x4a, 0x1b, 0x79, 0x7c, 0xa9, 0x84, 0x81, 0xe8, 0x58, 0xeb, 0x73, 0x57, 0xf3, 0x2a, + 0x6d, 0x5a, 0xb4, 0x8d, 0x67, 0xe4, 0xd8, 0xb5, 0x28, 0x7b, 0xec, 0x80, 0xdf, 0xed, 0x96, 0x1f, + 0xff, 0x9e, 0x59, 0xf3, 0xe4, 0x54, 0x55, 0x4d, 0xd2, 0x08, 0x91, 0x37, 0xe0, 0xeb, 0x4b, 0x33, + 0x43, 0xe4, 0xa1, 0x93, 0xd1, 0xfc, 0xc8, 0x7d, 0xae, 0x46, 0xe2, 0xae, 0x98, 0x89, 0xf2, 0xa7, + 0xcd, 0x76, 0x1b, 0xfc, 0x5c, 0x1a, 0xf1, 0xbd, 0xf2, 0x73, 0xe0, 0xce, 0xda, 0x04, 0x17, 0x11, + 0x68, 0x11, 0xeb, 0xe0, 0x71, 0x4b, 0x56, 0x12, 0x5e, 0xd3, 0xa5, 0x3e, 0x3a, 0x6f, 0x7f, 0xf7, + 0x52, 0x64, 0x4c, 0x0c, 0x9f, 0x8b, 0x29, 0x92, 0xc9, 0xa4, 0xe6, 0x58, 0x05, 0xc3, 0xb6, 0xb2, + 0x97, 0xa1, 0x73, 0x02, 0x02, 0x5e, 0x74, 0xa4, 0xd3, 0xc4, 0x58, 0x7f, 0xab, 0xf4, 0xb2, 0x1f, + 0x9a, 0xb1, 0xbc, 0xe5, 0x3f, 0x62, 0x88, 0x94, 0xb2, 0x5d, 0x35, 0xef, 0xe5, 0xfd, 0x54, 0xe4, + 0x60, 0xd8, 0x78, 0x88, 0xcf, 0xa2, 0x51, 0x55, 0xbb, 0x41, 0x09, 0x83, 0x95, 0x78, 0x9c, 0x25, + 0xab, 0x9b, 0xf8, 0x4b, 0xf7, 0x81, 0x60, 0x22, 0xe9, 0x29, 0xce, 0xc8, 0x90, 0x46, 0x36, 0x63, + 0xbe, 0xf3, 0xd0, 0x6c, 0x33, 0x04, 0x38, 0xf9, 0x2c, 0x0c, 0x17, 0x14, 0x43, 0xf9, 0x9d, 0xbb, + 0x24, 0x12, 0x1d, 0xbc, 0xfe, 0x71, 0x48, 0x4d, 0xfb, 0x27, 0xb8, 0xd6, 0x5e, 0xd8, 0x12, 0xf9, + 0xd9, 0xe6, 0x26, 0x87, 0x99, 0x8b, 0x79, 0xfb, 0x76, 0xaa, 0x4d, 0x8a, 0xf8, 0x3d, 0x96, 0x8e, + 0x73, 0x8d, 0x5c, 0xd9, 0x36, 0x1b, 0xe5, 0xf0, 0xee, 0xf9, 0xa8, 0x4d, 0xc7, 0xb4, 0xf4, 0x86, + 0x51, 0x37, 0x66, 0x74, 0xe8, 0x67, 0x0e, 0x15, 0x27, 0x47, 0x9a, 0x3a, 0x73, 0x3f, 0x60, 0xcc, + 0x67, 0x8f, 0xe6, 0xcd, 0x75, 0x59, 0xbe, 0xfa, 0x67, 0x78, 0x75, 0xf1, 0x17, 0x71, 0x5c, 0xad, + 0x53, 0xd5, 0x9b, 0x92, 0xca, 0x12, 0x6e, 0x69, 0x85, 0x20, 0x3f, 0x8e, 0x04, 0xdb, 0xb2, 0xca, + 0xeb, 0x60, 0x0a, 0x6f, 0x0d, 0x60, 0x0f, 0x1b, 0xa0, 0x49, 0x11, 0x01, 0xf4, 0x36, 0xc8, 0x3f, + 0x6a, 0xf4, 0xb8, 0xa7, 0xc1, 0x3f, 0xa4, 0x05, 0x36, 0x46, 0x40, 0xcb, 0x94, 0xf5, 0xd1, 0x26, + 0xd9, 0x2f, 0x39, 0x1e, 0x1f, 0x9e, 0xe9, 0x4a, 0xd0, 0xdd, 0x79, 0xd9, 0x36, 0x5a, 0x3c, 0xdd, + 0xf0, 0xbe, 0x98, 0xd7, 0xa0, 0xe4, 0xaf, 0xd5, 0xfe, 0x47, 0xc2, 0x1d, 0x9a, 0x51, 0xa8, 0x3a, + 0xc8, 0xdd, 0x62, 0xce, 0xc6, 0xbb, 0x6f, 0x8b, 0x60, 0xdf, 0xb6, 0x33, 0x01, 0xfe, 0x93, 0x41, + 0x62, 0x34, 0x7a, 0x7d, 0x06, 0x3a, 0x4d, 0x3b, 0xbe, 0xb4, 0xb9, 0xfe, 0xcc, 0x85, 0xed, 0x15, + 0x0c, 0x71, 0xa6, 0xae, 0xb3, 0xd2, 0x24, 0xf1, 0xcd, 0x59, 0x86, 0xa8, 0x7e, 0xe4, 0xda, 0xc0, + 0x38, 0x2c, 0x7d, 0xbc, 0x0e, 0xf3, 0x6b, 0x67, 0x0a, 0xb4, 0x1e, 0x71, 0x6f, 0x52, 0x8a, 0x77, + 0xe5, 0x55, 0xca, 0x6c, 0xcb, 0xab, 0xa6, 0xfa, 0x15, 0xa0, 0xaa, 0x4a, 0x70, 0x6f, 0xb5, 0x1f, + 0x48, 0x31, 0xcb, 0x14, 0x93, 0xbf, 0xd4, 0x38, 0x11, 0xf6, 0xe1, 0x9a, 0x1a, 0x6b, 0xde, 0x63, + 0x38, 0x08, 0xae, 0xb5, 0xec, 0x02, 0x2b, 0xcb, 0x46, 0x9e, 0xed, 0x51, 0x1b, 0x97, 0xc5, 0x8b, + 0xa4, 0x62, 0xe7, 0xb2, 0xc4, 0xb8, 0xba, 0x87, 0x6b, 0x41, 0xa4, 0x5e, 0xfe, 0xdc, 0x6c, 0xb5, + 0xbb, 0x12, 0x66, 0xfc, 0x9b, 0xdf, 0xbe, 0x1d, 0xf6, 0x5f, 0xfd, 0x4a, 0xaa, 0x9f, 0x64, 0xed, + 0x7c, 0x72, 0x72, 0xbf, 0x65, 0x6b, 0x23, 0x05, 0x3d, 0xdd, 0xf5, 0x8b, 0x0a, 0xa4, 0x12, 0x8b, + 0xa4, 0x45, 0x80, 0x7b, 0x1d, 0x0c, 0x01, 0x24, 0x60, 0xaa, 0xd1, 0xb3, 0xf1, 0x24, 0x4a, 0xcd, + 0x37, 0x3a, 0x1d, 0x82, 0x8e, 0x9f, 0xbe, 0x5b, 0xcf, 0x40, 0x38, 0xf5, 0x4b, 0x0d, 0x53, 0x9f, + 0x72, 0x2a, 0x76, 0xb9, 0x34, 0xdf, 0x60, 0x47, 0x0a, 0x52, 0xb8, 0x9d, 0xc3, 0xa5, 0xa4, 0x42, + 0xb2, 0xe2, 0xf0, 0xe6, 0x24, 0xb1, 0x91, 0x8c, 0x5f, 0x05, 0xb6, 0x00, 0xf3, 0x74, 0x57, 0xc0, + 0xa7, 0x2a, 0x44, 0x78, 0x17, 0xab, 0xa7, 0x83, 0xa5, 0xcc, 0x84, 0x53, 0x93, 0x1c, 0xa7, 0x67, + 0xbc, 0x89, 0xe2, 0xc7, 0x1a, 0x28, 0x0d, 0x07, 0x21, 0xfe, 0x50, 0x01, 0xbd, 0xf9, 0x2d, 0x08, + 0x42, 0x6f, 0xfb, 0x28, 0xc8, 0x57, 0x84, 0xf9, 0xd1, 0xa7, 0xb3, 0x7e, 0xc6, 0x63, 0x77, 0x13, + 0xb7, 0x12, 0x50, 0xc8, 0x5b, 0x35, 0x23, 0xf2, 0x86, 0x35, 0xe2, 0x10, 0xbb, 0x7f, 0xdf, 0x3c, + 0x4b, 0xe3, 0xce, 0x99, 0x6e, 0x08, 0x65, 0x18, 0x0b, 0x52, 0xb9, 0xd8, 0x62, 0x90, 0x16, 0x8f, + 0x3d, 0xe4, 0xcc, 0x9a, 0xe9, 0x4e, 0x5c, 0x7d, 0x5b, 0x7a, 0xda, 0x17, 0x9f, 0x73, 0x5b, 0xeb, + 0xcf, 0x4e, 0x3e, 0x5a, 0xde, 0xaa, 0xb5, 0x9d, 0x2c, 0x92, 0xea, 0x53, 0xad, 0x66, 0xf6, 0x67, + 0x65, 0x7d, 0x29, 0xaa, 0xb7, 0x3b, 0x1b, 0x22, 0xe6, 0xbe, 0x56, 0x3b, 0xdd, 0x5b, 0xd5, 0x50, + 0xc6, 0xe6, 0x2a, 0x3d, 0x3c, 0xa8, 0xea, 0x9a, 0xa3, 0x08, 0x7e, 0xc5, 0x43, 0xf6, 0x67, 0x0c, + 0xaf, 0x10, 0x2b, 0x58, 0xb7, 0xef, 0x1d, 0x0f, 0x35, 0x3e, 0xf9, 0x94, 0xd8, 0x39, 0x03, 0x31, + 0x8d, 0xf2, 0xe4, 0xd8, 0x6b, 0xdc, 0x9b, 0xa4, 0x7d, 0x5b, 0x79, 0x81, 0xb2, 0xb3, 0xfb, 0x9e, + 0x59, 0x95, 0xfd, 0x57, 0xf1, 0xdf, 0x0b, 0x40, 0xc5, 0xe8, 0x08, 0x08, 0xbc, 0x9a, 0xb7, 0x0b, + 0x36, 0x80, 0xbc, 0x1d, 0x07, 0x63, 0xca, 0xa0, 0x71, 0xbf, 0x0e, 0xb8, 0x4f, 0xd8, 0x12, 0x25, + 0x84, 0x84, 0xc5, 0xb4, 0xb3, 0xa4, 0xc1, 0x2e, 0x5f, 0x92, 0x20, 0x50, 0xf4, 0x34, 0x40, 0x20, + 0x53, 0x84, 0x0c, 0x09, 0xd5, 0xb6, 0xd8, 0xb4, 0x6b, 0x93, 0x1f, 0xf1, 0x80, 0x6c, 0x5f, 0xa9, + 0x14, 0x56, 0xe7, 0x24, 0xa2, 0x6a, 0x62, 0x2d, 0x96, 0x9f, 0x48, 0x32, 0x2b, 0x99, 0x42, 0xb2, + 0xee, 0x07, 0x3a, 0x04, 0xe8, 0x20, 0x47, 0x6b, 0xd0, 0xa5, 0x0a, 0xb3, 0xd8, 0x11, 0x36, 0x70, + 0xc0, 0xa6, 0xd0, 0x9a, 0x94, 0xb6, 0x6b, 0xbe, 0x61, 0xd9, 0xc3, 0xf4, 0x46, 0x02, 0xbb, 0xf3, + 0xcf, 0xf0, 0xf7, 0x8b, 0x77, 0x9e, 0x71, 0x9a, 0xb9, 0x74, 0x57, 0x89, 0x45, 0x98, 0x9b, 0x2a, + 0x55, 0xf1, 0x85, 0xca, 0x4f, 0x35, 0xa6, 0x76, 0xb3, 0x6d, 0x8b, 0x76, 0xa8, 0xc7, 0xe5, 0x46, + 0x9d, 0x48, 0x3c, 0xdd, 0x0b, 0xd6, 0xb5, 0x40, 0x01, 0x3c, 0x57, 0xb5, 0xe0, 0xba, 0x11, 0xd4, + 0x21, 0xf9, 0x9f, 0xab, 0x7c, 0x53, 0x21, 0x2f, 0xc3, 0x60, 0xec, 0xaa, 0x40, 0x2b, 0x83, 0x59, + 0x3e, 0xf6, 0xbe, 0x8b, 0x29, 0x73, 0x3b, 0x4f, 0x45, 0x83, 0x39, 0xf6, 0x24, 0x25, 0xcc, 0x52, + 0x00, 0x7f, 0xce, 0x16, 0xb2, 0xf2, 0x58, 0x82, 0x92, 0xcc, 0x70, 0xf7, 0xdc, 0xc8, 0xa0, 0xe3, + 0xe7, 0x09, 0xd5, 0x35, 0x75, 0xd0, 0xca, 0x55, 0x79, 0x39, 0x19, 0xb5, 0x05, 0x25, 0xbd, 0x3a, + 0x93, 0x1f, 0x7f, 0x44, 0x85, 0x2e, 0x92, 0xb7, 0x04, 0xf9, 0x04, 0x53, 0x08, 0x2c, 0x32, 0xce, + 0x4e, 0x61, 0x40, 0x74, 0x05, 0xf4, 0x07, 0x26, 0xb8, 0x6b, 0x09, 0xd3, 0x45, 0xbd, 0x9b, 0x82, + 0xd4, 0xbd, 0x68, 0x76, 0x36, 0x78, 0xbd, 0x4c, 0xce, 0xb4, 0xd5, 0x1f, 0xae, 0x68, 0x81, 0x48, + 0x81, 0xe6, 0x48, 0x11, 0xdb, 0xa5, 0xfe, 0xa2, 0xc2, 0xaa, 0x83, 0xa1, 0x38, 0x60, 0xae, 0x2a, + 0x3b, 0x48, 0x9a, 0x1a, 0x71, 0x9a, 0xf6, 0x87, 0x02, 0x0c, 0xbd, 0xf4, 0xaf, 0x33, 0xb2, 0x1b, + 0x10, 0xe4, 0xde, 0xeb, 0x54, 0xdb, 0x8c, 0xc6, 0x95, 0x6d, 0x5f, 0x08, 0xee, 0xdb, 0x1e, 0x5f, + 0xef, 0x96, 0x50, 0xed, 0x7b, 0x14, 0x24, 0x11, 0x80, 0x8e, 0xdd, 0x57, 0xea, 0x19, 0xf0, 0x53, + 0xfb, 0xe7, 0xfe, 0x6f, 0xfd, 0x27, 0xe7, 0x66, 0xed, 0xaa, 0x68, 0x48, 0x3d, 0x91, 0x82, 0x25, + 0x1b, 0xd0, 0x43, 0xa5, 0x83, 0x2c, 0xc3, 0x83, 0xcf, 0x85, 0xce, 0x19, 0xc4, 0x65, 0xb6, 0x8f, + 0xd8, 0xfb, 0xdd, 0xe2, 0xa1, 0x31, 0xa2, 0x29, 0x03, 0x9c, 0xe1, 0x40, 0xfc, 0xdf, 0x6c, 0x0c, + 0xf1, 0xcb, 0xca, 0xb5, 0xc0, 0x65, 0xbc, 0xb3, 0x12, 0xa2, 0x0f, 0x55, 0x48, 0x0c, 0xf1, 0x50, + 0x69, 0x5c, 0x24, 0x60, 0x8d, 0xeb, 0x84, 0x1f, 0x9b, 0xfc, 0x0f, 0xbc, 0xfd, 0xe7, 0x36, 0x35, + 0xe1, 0x14, 0x2a, 0x33, 0x79, 0x4c, 0xf3, 0x42, 0x8c, 0xcb, 0xee, 0xbf, 0x3d, 0x0b, 0x89, 0x60, + 0x88, 0x66, 0x2b, 0x8e, 0xcb, 0x09, 0xcc, 0xe2, 0xa9, 0x8d, 0xd7, 0x49, 0x40, 0x7b, 0x9b, 0x53, + 0xca, 0x88, 0xcc, 0x91, 0x81, 0x47, 0x85, 0xcb, 0xa4, 0x6c, 0xf2, 0x40, 0xad, 0x39, 0x5a, 0x44, + 0xca, 0x99, 0x32, 0x0b, 0xf6, 0xcd, 0x55, 0x19, 0x45, 0x7a, 0x46, 0x61, 0x88, 0xb6, 0x9c, 0x74, + 0x78, 0xf1, 0x32, 0x55, 0x13, 0xd5, 0x77, 0xb8, 0x3a, 0x62, 0xeb, 0xc8, 0xbb, 0xbb, 0x30, 0x9f, + 0x9d, 0xd4, 0x19, 0x01, 0x72, 0x9e, 0x30, 0xa7, 0xd3, 0xdc, 0xe0, 0x35, 0xec, 0x54, 0xba, 0x7d, + 0xfd, 0xcd, 0xd5, 0x91, 0x4f, 0x38, 0x83, 0xa1, 0x50, 0x8d, 0x93, 0x12, 0x2b, 0x1d, 0x2d, 0x61, + 0xce, 0x41, 0xda, 0x26, 0xd3, 0x1d, 0xad, 0xe4, 0xeb, 0x56, 0x71, 0xe3, 0xbb, 0x15, 0x47, 0x62, + 0x7e, 0x60, 0xc0, 0x8b, 0xd7, 0xe7, 0xb4, 0xd6, 0xf3, 0xe7, 0x71, 0xb5, 0x6d, 0x36, 0x7a, 0x92, + 0x09, 0xb6, 0xc0, 0x9c, 0xa2, 0xa1, 0x85, 0x87, 0xc9, 0x82, 0x57, 0x7f, 0x07, 0xdd, 0x26, 0xfd, + 0x52, 0x8f, 0x5b, 0xad, 0xa3, 0x44, 0x8d, 0x70, 0x09, 0xdc, 0x5e, 0x8c, 0xfb, 0x87, 0xf8, 0x05, + 0x58, 0x64, 0x01, 0x15, 0x3e, 0xb8, 0x5f, 0x6c, 0x85, 0xf4, 0xce, 0x6f, 0xd1, 0xa1, 0x7b, 0xf9, + 0xb5, 0x55, 0x53, 0x2f, 0x29, 0xca, 0xf9, 0xde, 0xb3, 0xed, 0x7a, 0x62, 0x69, 0x2a, 0xcd, 0x82, + 0xc2, 0xcd, 0x97, 0xaf, 0xc4, 0x43, 0x5a, 0x6d, 0xdd, 0x90, 0xb6, 0xf7, 0xcb, 0xfa, 0x69, 0xd9, + 0x92, 0x47, 0xbd, 0xd6, 0xce, 0x2b, 0x80, 0x0e, 0x5c, 0x77, 0x6d, 0xd4, 0x3d, 0xed, 0x89, 0x18, + 0x06, 0xde, 0x95, 0x36, 0xc3, 0xfc, 0x3d, 0x62, 0x51, 0xe0, 0x71, 0x7e, 0xc7, 0x84, 0xf2, 0xbc, + 0x5d, 0xad, 0x1a, 0x70, 0x8e, 0x70, 0x36, 0x2c, 0xbd, 0x10, 0xe3, 0x88, 0x06, 0xcb, 0xf5, 0xa0, + 0x21, 0x35, 0x59, 0xe0, 0xac, 0xa0, 0xd0, 0xc7, 0x48, 0xcb, 0xaf, 0x2a, 0x00, 0xca, 0xe6, 0x85, + 0x93, 0xf0, 0x69, 0x33, 0xdf, 0xb5, 0xeb, 0xcb, 0x78, 0xc9, 0x19, 0x57, 0xea, 0x59, 0xa7, 0xd8, + 0xd9, 0x82, 0x2d, 0xe8, 0xd9, 0xe3, 0x09, 0x9e, 0x48, 0x8e, 0xe3, 0x51, 0x27, 0x33, 0x9f, 0xa4, + 0x67, 0x8b, 0xa5, 0x80, 0xd5, 0x1c, 0xfa, 0x32, 0xf5, 0xf4, 0xdc, 0x6b, 0xee, 0x88, 0x52, 0x08, + 0xa8, 0xea, 0x42, 0x87, 0x60, 0x89, 0xe4, 0x35, 0xbf, 0x49, 0x4f, 0xf9, 0xaa, 0xb2, 0xeb, 0x6e, + 0x0e, 0x33, 0x7c, 0xa6, 0xef, 0xed, 0xb7, 0x4e, 0x78, 0x93, 0xd7, 0x7a, 0x00, 0x7e, 0xe5, 0x03, + 0xad, 0x9e, 0x80, 0xfa, 0x4e, 0x1e, 0x83, 0x8f, 0xb0, 0xea, 0x7f, 0xed, 0xc9, 0xad, 0x32, 0x5b, + 0xd2, 0x48, 0x70, 0xb4, 0x82, 0xd3, 0xe1, 0x86, 0xe9, 0x88, 0x30, 0x07, 0x8c, 0x4c, 0xb5, 0xa4, + 0x4d, 0x43, 0xf2, 0x66, 0x9d, 0x37, 0x4d, 0x2e, 0x67, 0xfc, 0x87, 0x39, 0x43, 0xad, 0x30, 0xd4, + 0xc6, 0xc1, 0x28, 0xff, 0xda, 0x47, 0x84, 0x7d, 0x59, 0x92, 0x8b, 0xdb, 0x4e, 0xff, 0x85, 0x8c, + 0xf8, 0x6f, 0x0d, 0x8c, 0x81, 0x33, 0xbc, 0xba, 0x54, 0x00, 0xcd, 0x10, 0xf8, 0xd9, 0x3a, 0x55, + 0x9d, 0xaa, 0x8a, 0x3e, 0x0a, 0xcf, 0xf8, 0xea, 0xc9, 0x56, 0x75, 0x96, 0x17, 0x67, 0xe7, 0x66, + 0x93, 0x75, 0xea, 0xb2, 0x5f, 0x78, 0xba, 0xfc, 0x15, 0x08, 0x65, 0x21, 0x6e, 0xa0, 0xf9, 0x66, + 0x08, 0xd8, 0xdc, 0xe2, 0x28, 0xf8, 0x81, 0x42, 0xe1, 0x79, 0x54, 0xbd, 0xbf, 0x4c, 0x94, 0xdf, + 0x86, 0x7b, 0x83, 0xb1, 0xf8, 0x9b, 0x4b, 0xbe, 0x85, 0x11, 0x62, 0x1c, 0x01, 0x7a, 0x0e, 0x30, + 0x7f, 0x49, 0x2d, 0xd5, 0xd2, 0x37, 0x4f, 0xa7, 0xe0, 0xf5, 0x2d, 0xcf, 0xb0, 0x62, 0x9a, 0xca, + 0x02, 0xd1, 0x34, 0x34, 0x64, 0x59, 0x33, 0x3a, 0x2d, 0x10, 0xfe, 0x0a, 0x79, 0xe8, 0x27, 0x6e, + 0x5c, 0x6f, 0x04, 0x28, 0x3b, 0xe6, 0x52, 0x86, 0x08, 0xbf, 0xa9, 0x74, 0x86, 0x80, 0x13, 0xea, + 0xc0, 0x61, 0xdf, 0xcd, 0x8e, 0x1d, 0x32, 0x98, 0xef, 0xe4, 0x42, 0x38, 0x3a, 0x07, 0x93, 0x5b, + 0x7f, 0xc9, 0xcd, 0xf8, 0x4d, 0x39, 0x2b, 0x53, 0x4d, 0xff, 0x14, 0x68, 0x51, 0x6f, 0x28, 0x35, + 0xb8, 0xcd, 0x02, 0x3c, 0xa9, 0x1b, 0xbe, 0x17, 0x61, 0x16, 0x68, 0x9c, 0xde, 0xcf, 0x34, 0xeb, + 0xc1, 0xdf, 0xb6, 0x18, 0x50, 0x19, 0x54, 0x56, 0x48, 0x2a, 0xb3, 0xc9, 0x88, 0xc8, 0x65, 0x2c, + 0x28, 0xe6, 0xf8, 0xe3, 0x6f, 0x7c, 0x96, 0xf9, 0xa5, 0x15, 0x1a, 0x81, 0xe0, 0x11, 0x01, 0x57, + 0xf5, 0xa4, 0x12, 0xbc, 0x67, 0x7c, 0xb6, 0x59, 0x98, 0x41, 0x2e, 0xcd, 0x26, 0x3c, 0x70, 0x71, + 0xec, 0x47, 0xf1, 0x90, 0x06, 0x02, 0x17, 0xba, 0xf0, 0x02, 0x08, 0xda, 0x22, 0xb0, 0x71, 0x33, + 0x38, 0x04, 0x2e, 0xa9, 0xc7, 0xde, 0xa0, 0x7d, 0x27, 0xc7, 0xb2, 0x5f, 0x03, 0xb8, 0x2c, 0x4b, + 0x81, 0x12, 0xb6, 0x9d, 0x2f, 0x9a, 0x64, 0x37, 0x2f, 0x92, 0xab, 0x6b, 0x3b, 0x7f, 0x71, 0xe7, + 0xa0, 0xd5, 0x80, 0xb2, 0x4a, 0xc4, 0xc5, 0xee, 0xcf, 0x17, 0xfd, 0x27, 0xa1, 0x0a, 0x19, 0xc7, + 0x32, 0xeb, 0x14, 0xbc, 0xe2, 0xdd, 0xe6, 0xa2, 0xa5, 0x95, 0xe1, 0x4a, 0xb0, 0xf7, 0x38, 0x78, + 0xd5, 0x26, 0x01, 0x75, 0x21, 0x76, 0x24, 0x09, 0x06, 0xd1, 0x35, 0x42, 0x85, 0x7b, 0x73, 0x35, + 0x04, 0x61, 0x60, 0x4a, 0x3e, 0xe0, 0x9d, 0x08, 0x4f, 0x9f, 0x1d, 0xe3, 0xb2, 0x57, 0xe0, 0x0f, + 0x91, 0x12, 0xbd, 0xe3, 0xc3, 0xe0, 0x9f, 0xe1, 0x8a, 0xcf, 0x17, 0x07, 0x5d, 0xbf, 0x2c, 0x4c, + 0xe2, 0xdf, 0x86, 0xe6, 0x34, 0x57, 0xb9, 0x2e, 0x9f, 0x21, 0xd2, 0xd3, 0xd7, 0xb1, 0x10, 0xd6, + 0xe0, 0x78, 0xed, 0x4f, 0xd1, 0x5b, 0x28, 0xcc, 0xa9, 0xc4, 0x8b, 0xfc, 0x29, 0x79, 0x50, 0x85, + 0x50, 0xef, 0x6b, 0x0e, 0xe2, 0x8c, 0xd6, 0x99, 0x75, 0x18, 0xa0, 0x2b, 0xe5, 0x77, 0xd3, 0x1a, + 0x81, 0x77, 0x92, 0x12, 0x7e, 0x31, 0x1d, 0x87, 0x43, 0x09, 0x00, 0xa8, 0x56, 0xef, 0x6b, 0x68, + 0x10, 0x21, 0xc7, 0x2b, 0xc2, 0xa7, 0x5d, 0x83, 0x9f, 0x1a, 0x50, 0x90, 0x41, 0xb5, 0x22, 0xc9, + 0xf7, 0x4d, 0xce, 0x19, 0xd4, 0xa1, 0x11, 0xa8, 0xfa, 0x8c, 0x35, 0xfa, 0xeb, 0xa8, 0x3f, 0xb8, + 0x98, 0xcd, 0x3f, 0xf1, 0x15, 0xaa, 0xc6, 0xce, 0xdb, 0x22, 0x7c, 0xc3, 0xa3, 0x10, 0x7f, 0xbb, + 0x43, 0xad, 0xbb, 0x00, 0xc1, 0xa0, 0x8d, 0xc4, 0x73, 0x94, 0x31, 0x3c, 0x49, 0x1d, 0x52, 0xd1, + 0x87, 0x1a, 0xb5, 0x74, 0x59, 0xe7, 0xf0, 0x60, 0x7a, 0xd3, 0x27, 0xf4, 0x5d, 0x94, 0xdc, 0x62, + 0xbb, 0xb8, 0x49, 0xb1, 0x7f, 0x45, 0x63, 0x77, 0x91, 0x7d, 0x17, 0xbb, 0x2a, 0x8a, 0x20, 0x7b, + 0xc2, 0xca, 0xbf, 0x1a, 0x7e, 0x3b, 0x7e, 0x77, 0xa9, 0xe3, 0x78, 0x26, 0x37, 0xc1, 0xb1, 0x21, + 0x13, 0xb0, 0xda, 0x0e, 0xfa, 0x95, 0x36, 0x2c, 0xa9, 0x15, 0x29, 0xda, 0xff, 0x64, 0x1e, 0xb9, + 0xeb, 0xa9, 0xb8, 0x24, 0x1e, 0x55, 0xb5, 0xae, 0xb8, 0x5d, 0x72, 0x49, 0x54, 0x94, 0x2e, 0xd7, + 0xf4, 0xbc, 0x57, 0x4e, 0x89, 0xfa, 0x88, 0xb7, 0x66, 0x88, 0xb7, 0xb5, 0x4b, 0x40, 0xd0, 0xa1, + 0x55, 0x9c, 0x80, 0x41, 0x8a, 0xc3, 0x3b, 0xbc, 0xa8, 0x0e, 0xf9, 0xfe, 0xf8, 0x90, 0x73, 0x1e, + 0x3a, 0xa4, 0xad, 0x33, 0xcc, 0xda, 0x94, 0xfc, 0x6f, 0x7d, 0x9e, 0x74, 0x3f, 0x99, 0x2d, 0xbc, + 0xd4, 0x4b, 0x58, 0x29, 0x9a, 0xa6, 0x44, 0xa6, 0xb2, 0x10, 0xd9, 0xeb, 0x08, 0xf4, 0xd8, 0x21, + 0x33, 0x40, 0xb4, 0x0c, 0x12, 0xfe, 0x0e, 0x2e, 0xcf, 0xc4, 0x74, 0x43, 0xa6, 0x14, 0x09, 0x7d, + 0x30, 0xc3, 0x52, 0xac, 0x4a, 0x80, 0x86, 0xf2, 0x28, 0xe7, 0x8a, 0xbb, 0xca, 0xe2, 0x87, 0x31, + 0x86, 0x0a, 0xd6, 0x04, 0x9d, 0x67, 0x34, 0xb4, 0xe4, 0xc8, 0x26, 0xa6, 0xa4, 0x56, 0xa2, 0x48, + 0x6a, 0x22, 0x2a, 0x44, 0x52, 0x91, 0x63, 0xb7, 0xaa, 0xdb, 0x85, 0xff, 0xcd, 0x7f, 0x28, 0x1b, + 0x76, 0xb2, 0xf2, 0xd1, 0xa8, 0xb7, 0xd0, 0xf3, 0x6f, 0x7a, 0x9b, 0x1b, 0xb5, 0xb9, 0x73, 0xa0, + 0xb6, 0x60, 0xad, 0x5b, 0x68, 0xa8, 0xcf, 0x7b, 0x9a, 0x6f, 0xdc, 0x4e, 0x0f, 0xdc, 0xd3, 0xd4, + 0x97, 0x69, 0xfc, 0x86, 0xc4, 0x7a, 0xae, 0x87, 0x31, 0xd8, 0x66, 0x32, 0x35, 0x9d, 0xcc, 0x19, + 0x11, 0xb1, 0xe9, 0xa0, 0x6c, 0xe1, 0x28, 0xed, 0x95, 0x57, 0x0d, 0x63, 0x93, 0xd1, 0x1e, 0x3a, + 0x08, 0x93, 0xe8, 0x2b, 0x8b, 0x2a, 0x24, 0x5e, 0x40, 0x98, 0x84, 0x87, 0x50, 0x68, 0xf1, 0xb2, + 0x9d, 0x3c, 0x07, 0x9e, 0xd1, 0x63, 0x14, 0x28, 0x77, 0xfe, 0x5a, 0x5c, 0xf2, 0x24, 0x9d, 0x1d, + 0x91, 0x2a, 0x4b, 0x81, 0xe3, 0x4e, 0x1b, 0x19, 0xdf, 0xbe, 0xc3, 0x7d, 0x73, 0x26, 0x72, 0x1a, + 0x46, 0x77, 0xc4, 0x2e, 0x8b, 0x5a, 0xbc, 0xf6, 0x49, 0xf2, 0xc4, 0x5c, 0x62, 0xe5, 0x67, 0xbd, + 0x36, 0x9a, 0x65, 0x8f, 0x6d, 0x6c, 0x7b, 0xe1, 0xb5, 0x66, 0x39, 0xb7, 0x60, 0x09, 0x40, 0xab, + 0x55, 0x45, 0x38, 0x6d, 0x14, 0xf2, 0x38, 0x64, 0xdb, 0x5a, 0x46, 0xce, 0xa5, 0x21, 0x62, 0x86, + 0xd4, 0xc4, 0x25, 0xc3, 0xe8, 0x57, 0xdf, 0x3d, 0xc5, 0xbb, 0x1a, 0x3d, 0x99, 0x34, 0x16, 0x31, + 0x11, 0x4e, 0xb2, 0x10, 0x64, 0xcd, 0x69, 0xc9, 0xdc, 0x20, 0x32, 0xb8, 0xc4, 0x82, 0x52, 0xcd, + 0xa8, 0xdf, 0x5d, 0x04, 0x8d, 0x5b, 0xfe, 0xe8, 0x2c, 0x68, 0xe4, 0x09, 0xbb, 0x8f, 0xc1, 0xf6, + 0x10, 0x71, 0x53, 0xd6, 0x17, 0xa2, 0x60, 0x80, 0x8d, 0xb9, 0xff, 0xd0, 0x83, 0xd1, 0x7c, 0x5b, + 0xcb, 0x4e, 0x26, 0x6b, 0xac, 0x36, 0xc7, 0xce, 0x0e, 0xab, 0xa3, 0x8c, 0x68, 0x25, 0x01, 0x82, + 0x96, 0xf5, 0xe9, 0x88, 0x78, 0xa7, 0x8b, 0x65, 0xdc, 0x90, 0xec, 0x61, 0xb3, 0xf2, 0x9a, 0xf6, + 0x91, 0xc3, 0x9f, 0x7a, 0x04, 0x0c, 0xd8, 0xa1, 0x18, 0x66, 0x4c, 0x7c, 0x59, 0xc0, 0x14, 0xc8, + 0xb1, 0xbf, 0x4f, 0xd0, 0xba, 0x22, 0x8c, 0x6f, 0x2a, 0x51, 0x1a, 0xe8, 0xef, 0xd7, 0x08, 0xbf, + 0x9b, 0x93, 0x01, 0x53, 0xe0, 0xdc, 0x6a, 0xe7, 0x98, 0xc5, 0xbf, 0xce, 0x43, 0x3c, 0xa9, 0x00, + 0xb6, 0xe5, 0xc6, 0xe8, 0x21, 0x87, 0x81, 0x23, 0x89, 0x96, 0xcb, 0xf1, 0x48, 0x7e, 0x10, 0x75, + 0x91, 0xb1, 0xcd, 0x49, 0x78, 0x73, 0x21, 0x25, 0x4e, 0x01, 0x5d, 0xda, 0x59, 0xde, 0x9b, 0xbb, + 0xf0, 0x5d, 0x85, 0x7f, 0xba, 0xdc, 0x12, 0x3b, 0x83, 0x19, 0xdc, 0x51, 0x39, 0x29, 0xdb, 0x61, + 0x90, 0xbc, 0x9c, 0x14, 0x59, 0xc0, 0x95, 0x81, 0xe5, 0xe1, 0xd1, 0x63, 0x4f, 0xcb, 0x0a, 0x02, + 0x61, 0x2b, 0xf1, 0xff, 0x81, 0x47, 0x9e, 0xed, 0x3c, 0xc1, 0x2d, 0xae, 0xf2, 0x7e, 0xe9, 0xd1, + 0xa5, 0x60, 0x6f, 0xe2, 0xdd, 0x14, 0x08, 0xcd, 0xa5, 0x9d, 0x3f, 0x26, 0x42, 0xb0, 0xe5, 0xaf, + 0x56, 0x00, 0xe4, 0x6a, 0xca, 0x00, 0x7d, 0x86, 0xf7, 0x06, 0xd3, 0xb9, 0x02, 0x9b, 0x6f, 0x7b, + 0xab, 0x79, 0x5a, 0x89, 0xa4, 0xb9, 0x90, 0x9e, 0x6a, 0xb9, 0x04, 0x97, 0xd2, 0xad, 0x60, 0xc1, + 0xfe, 0xa8, 0x0d, 0xcc, 0x5d, 0x7f, 0x79, 0x20, 0x79, 0x81, 0x88, 0x9f, 0xc3, 0x57, 0x01, 0xa1, + 0xa5, 0xcd, 0x38, 0x6c, 0xbd, 0x78, 0x1a, 0x07, 0xd1, 0x4e, 0x6c, 0x29, 0x35, 0x44, 0x53, 0x09, + 0xb1, 0x1e, 0x4f, 0x80, 0x3d, 0x84, 0x1c, 0xe3, 0x84, 0x80, 0x89, 0xc9, 0xb6, 0x2f, 0x5d, 0x40, + 0x10, 0xa9, 0x2d, 0x5d, 0x9d, 0x6d, 0xc1, 0x9a, 0x18, 0x63, 0x26, 0x35, 0x03, 0xc1, 0x84, 0x67, + 0xf6, 0x06, 0xc6, 0x74, 0xa8, 0x21, 0xe9, 0xd2, 0x49, 0x6b, 0xb3, 0x12, 0xb8, 0x1e, 0x16, 0xd4, + 0x73, 0x7c, 0xa5, 0x90, 0x82, 0x36, 0xf5, 0x74, 0x81, 0xf8, 0x19, 0x64, 0xf9, 0xda, 0x56, 0xe8, + 0x52, 0xca, 0xa2, 0x50, 0x7d, 0xd1, 0x0d, 0x7d, 0x5f, 0x18, 0x77, 0x45, 0x50, 0x89, 0x7e, 0xdd, + 0x06, 0xa8, 0xbd, 0x6e, 0x23, 0xd2, 0xc4, 0x56, 0xbb, 0x88, 0x20, 0xae, 0xa3, 0x13, 0x73, 0x64, + 0xe6, 0x85, 0xa2, 0xc7, 0x1f, 0x68, 0x22, 0xe8, 0xcc, 0xd0, 0x76, 0xa1, 0x05, 0xc5, 0xf8, 0x65, + 0x43, 0xb2, 0xa6, 0x55, 0x5a, 0x70, 0x3c, 0x70, 0xdb, 0x43, 0x7d, 0xe0, 0x95, 0x9d, 0x44, 0x5e, + 0x5c, 0x66, 0x51, 0x34, 0x01, 0x0a, 0xd6, 0xaf, 0xec, 0x3b, 0xbd, 0xf2, 0x71, 0x19, 0xe4, 0xdb, + 0x82, 0x5b, 0x13, 0x58, 0x98, 0x32, 0xee, 0x88, 0xde, 0x5f, 0xde, 0xdc, 0x13, 0x51, 0x52, 0xa4, + 0x25, 0x08, 0x43, 0xc0, 0xc6, 0x93, 0xbf, 0x63, 0x2a, 0xe4, 0xed, 0x96, 0xc9, 0xcb, 0x77, 0xbc, + 0xc8, 0x8f, 0x89, 0x86, 0x16, 0x62, 0x64, 0xd8, 0x79, 0xb9, 0x6d, 0xfa, 0xe2, 0x01, 0x74, 0x1f, + 0xf3, 0xb5, 0xd1, 0xc4, 0x41, 0x90, 0x75, 0xd4, 0x86, 0xa5, 0x51, 0x6a, 0x6d, 0x75, 0xd0, 0xfe, + 0x4a, 0x58, 0x6b, 0x5b, 0xa7, 0x4c, 0xa4, 0xf6, 0xb7, 0x5d, 0x9b, 0x4e, 0x04, 0xe8, 0xf9, 0xab, + 0x00, 0x9e, 0xfc, 0xeb, 0xa6, 0x4b, 0x50, 0x4a, 0x60, 0xf9, 0x1a, 0x2f, 0xbc, 0x0b, 0xe7, 0x67, + 0x98, 0x4b, 0xce, 0xd7, 0xc4, 0x20, 0x5e, 0x4a, 0x13, 0xe0, 0xc5, 0xa8, 0x1a, 0x59, 0xdb, 0xc2, + 0x96, 0xd5, 0x8d, 0x68, 0x63, 0x27, 0x9c, 0xe1, 0x07, 0x39, 0x2e, 0xb0, 0x53, 0x1c, 0xf4, 0xbf, + 0xf5, 0x75, 0x89, 0x06, 0xdb, 0xfd, 0x59, 0xe7, 0xf0, 0x39, 0xac, 0x4a, 0xf5, 0xc9, 0x3d, 0xe2, + 0x14, 0x93, 0x22, 0xb8, 0xf7, 0xf2, 0x5f, 0x43, 0x5a, 0x79, 0x08, 0xf9, 0xb9, 0xb2, 0x8e, 0xd1, + 0x0f, 0x9f, 0x04, 0xf1, 0x14, 0x5d, 0xd7, 0x08, 0x7e, 0x2a, 0xb4, 0xbe, 0x3b, 0x70, 0xa1, 0x82, + 0x0b, 0x5a, 0x8c, 0x2e, 0x17, 0x56, 0x0a, 0x51, 0x72, 0xa2, 0x40, 0xbb, 0x1a, 0xfa, 0x2a, 0x82, + 0xb3, 0xd2, 0xf0, 0x31, 0xe3, 0x3f, 0x4c, 0x61, 0x95, 0x11, 0xa1, 0x64, 0x65, 0xed, 0x58, 0x51, + 0x95, 0xe8, 0x86, 0x07, 0x91, 0xe2, 0xed, 0x12, 0x80, 0x75, 0x87, 0x42, 0x06, 0xc5, 0x1c, 0xe0, + 0xb6, 0x4b, 0x52, 0xc7, 0x5c, 0x92, 0x62, 0x11, 0xf1, 0x4b, 0x99, 0x8d, 0x6f, 0x41, 0xe3, 0xfb, + 0x48, 0xb2, 0x09, 0xc4, 0xb1, 0x18, 0x0a, 0x60, 0xaf, 0xb6, 0xe6, 0xb8, 0xc3, 0x05, 0x73, 0xa3, + 0x56, 0xa6, 0x14, 0xe9, 0x5b, 0x65, 0xdf, 0x8d, 0x8f, 0x27, 0x57, 0xd3, 0x84, 0xd0, 0xde, 0xe7, + 0x81, 0x2c, 0x28, 0x9c, 0x9d, 0x50, 0x33, 0x13, 0xf0, 0xea, 0x04, 0x86, 0x43, 0x46, 0x77, 0x67, + 0x11, 0x67, 0x47, 0x75, 0xfe, 0x64, 0x34, 0xa9, 0x4c, 0x83, 0x8e, 0x64, 0x69, 0xd8, 0xfe, 0x6d, + 0x3b, 0x42, 0x7e, 0xe9, 0xf5, 0x1f, 0x13, 0x47, 0xd9, 0xd4, 0xfe, 0x65, 0xad, 0x42, 0xed, 0x30, + 0xe2, 0x00, 0x86, 0x1e, 0x50, 0xfd, 0xbd, 0xa3, 0xd4, 0xa7, 0x70, 0x52, 0x3a, 0x71, 0xb7, 0x9e, + 0x72, 0x11, 0x64, 0xe7, 0x03, 0x6e, 0x06, 0x61, 0x62, 0xf5, 0xac, 0x29, 0x72, 0x61, 0x1a, 0xa0, + 0x25, 0x96, 0xc5, 0xef, 0x9f, 0x20, 0x1c, 0x32, 0x9d, 0x92, 0xb9, 0x16, 0xd4, 0xab, 0x22, 0x1c, + 0xe5, 0xbf, 0x48, 0x98, 0xc8, 0x6e, 0x77, 0xf3, 0xce, 0x2e, 0x8a, 0x31, 0x4c, 0xaa, 0x18, 0x84, + 0x0b, 0xc1, 0x48, 0x30, 0x94, 0x1b, 0x89, 0xd6, 0x9c, 0xd7, 0xf5, 0xed, 0xa8, 0xe8, 0xcb, 0x5a, + 0x8c, 0xe3, 0x31, 0xbc, 0x3e, 0x8e, 0x18, 0x8d, 0x1a, 0x11, 0xd6, 0x1a, 0x45, 0x05, 0xeb, 0xf6, + 0xd3, 0xc0, 0x40, 0x49, 0x00, 0x1b, 0x96, 0x45, 0x32, 0x40, 0xae, 0x08, 0x35, 0x52, 0xd1, 0x77, + 0x4e, 0xb0, 0x75, 0xfe, 0xfb, 0xb5, 0xc7, 0x1a, 0xea, 0xd1, 0xb6, 0x33, 0x88, 0xd3, 0x37, 0x7a, + 0x5c, 0xf5, 0x08, 0x09, 0xf7, 0x0c, 0xff, 0x3d, 0x1c, 0x5c, 0xb0, 0x14, 0x00, 0x01, 0xa3, 0x44, + 0xeb, 0xac, 0xfd, 0x0e, 0xb6, 0x37, 0x53, 0x24, 0x3d, 0xce, 0x4c, 0xe0, 0x50, 0x40, 0x0d, 0x99, + 0x4b, 0x90, 0x82, 0xe7, 0xdb, 0xb0, 0xa0, 0x58, 0x71, 0x35, 0x5c, 0x49, 0xd1, 0x1c, 0x64, 0x8e, + 0x59, 0xe3, 0xd8, 0xda, 0x1b, 0xbd, 0xea, 0xc6, 0xcf, 0x3a, 0x0e, 0x2e, 0x02, 0xaf, 0x8d, 0x7c, + 0x20, 0x7e, 0x37, 0xe3, 0x55, 0x7f, 0x85, 0x21, 0x8f, 0xbf, 0xea, 0x5a, 0xf1, 0x7c, 0x11, 0xda, + 0x4c, 0xdf, 0xf0, 0xd4, 0x3d, 0x04, 0xce, 0xd2, 0x7b, 0xa8, 0x78, 0xd6, 0x7e, 0x5f, 0xbc, 0xbc, + 0x74, 0x5b, 0xbc, 0x21, 0xbf, 0xd2, 0x15, 0x6e, 0x5b, 0x9f, 0x71, 0x8e, 0xee, 0xf0, 0x7b, 0x15, + 0x19, 0x84, 0x81, 0xe7, 0x2f, 0x4b, 0xef, 0xb3, 0x85, 0x0c, 0xb5, 0x59, 0x92, 0x73, 0x44, 0x15, + 0x58, 0x16, 0x05, 0xec, 0x18, 0xa4, 0xca, 0xcb, 0x13, 0xb4, 0xbe, 0x31, 0x80, 0xdc, 0x78, 0x7f, + 0x19, 0x90, 0xf2, 0xd3, 0x49, 0x26, 0x35, 0x1a, 0xb0, 0x2c, 0x9d, 0xe0, 0xbc, 0xd4, 0x16, 0xc1, + 0x85, 0x48, 0x4b, 0x9a, 0x03, 0x1c, 0x4b, 0xc7, 0x8b, 0xc2, 0xd3, 0x81, 0x75, 0x28, 0x25, 0xcf, + 0x42, 0x49, 0xcb, 0xb5, 0x37, 0x9c, 0x3e, 0x92, 0x8c, 0xb8, 0x15, 0xac, 0xf5, 0x18, 0xfd, 0x47, + 0x18, 0xe3, 0x23, 0x4a, 0x2a, 0x1c, 0x19, 0xaf, 0x05, 0x85, 0x42, 0x52, 0xb1, 0xa5, 0x53, 0x9a, + 0xad, 0xe2, 0x3c, 0xb0, 0x6c, 0x39, 0xaa, 0x4b, 0xa8, 0xd2, 0xe4, 0x96, 0x58, 0x90, 0x42, 0x1f, + 0x6d, 0xd2, 0xee, 0x5e, 0x2d, 0x97, 0xc9, 0xf7, 0x0d, 0x4e, 0x46, 0xb2, 0x06, 0x0f, 0xbd, 0xf3, + 0x57, 0xc9, 0xce, 0x09, 0xd8, 0xd9, 0x53, 0xf9, 0x13, 0x91, 0xe1, 0x6c, 0x10, 0x4c, 0xea, 0x87, + 0xa5, 0x68, 0x57, 0x48, 0x98, 0x54, 0x3b, 0x54, 0x0c, 0x1a, 0xf2, 0xab, 0x15, 0xe4, 0x95, 0x38, + 0x74, 0xb4, 0x67, 0x52, 0xa5, 0xf1, 0x9a, 0xcf, 0x12, 0x1d, 0xd7, 0x44, 0xec, 0x3b, 0x6b, 0xe7, + 0x9d, 0xd0, 0x96, 0xcf, 0x33, 0x93, 0x79, 0x8f, 0x1b, 0xdf, 0xb7, 0x18, 0xb0, 0xe3, 0xf2, 0xd9, + 0x82, 0xf6, 0x17, 0xb7, 0xaf, 0x2c, 0xd5, 0x79, 0x31, 0x93, 0xfe, 0x68, 0xf1, 0x09, 0xb9, 0x3a, + 0xaf, 0x47, 0x85, 0x3c, 0x80, 0x07, 0xd8, 0xbe, 0x32, 0x02, 0x9f, 0x27, 0xd5, 0x85, 0x29, 0xce, + 0xb6, 0x9b, 0xf6, 0x7f, 0xe3, 0x4f, 0x90, 0xf5, 0x8f, 0x5b, 0xa3, 0x6e, 0xa8, 0xb3, 0x46, 0xa6, + 0x0d, 0x5b, 0x51, 0x96, 0x7c, 0xfc, 0x6f, 0xfc, 0x92, 0xdc, 0xda, 0xd6, 0x54, 0x09, 0x02, 0xef, + 0xd2, 0xd5, 0xf6, 0x4e, 0x17, 0x1d, 0xc0, 0xd8, 0x9e, 0x2d, 0x30, 0x79, 0xea, 0x29, 0xcf, 0x97, + 0x30, 0x46, 0xae, 0xe2, 0x80, 0x80, 0xb4, 0x68, 0xae, 0x4d, 0xe0, 0x1b, 0x6b, 0x44, 0xd9, 0xe8, + 0x4e, 0x6b, 0x5a, 0x5a, 0x53, 0x9a, 0xec, 0x40, 0x0b, 0x8f, 0xc8, 0x01, 0x71, 0xe6, 0xa9, 0x1b, + 0x4c, 0x42, 0x2c, 0x2e, 0xda, 0x7d, 0xc2, 0xc8, 0x01, 0x12, 0x9f, 0x3e, 0xde, 0xe2, 0x76, 0xc4, + 0x33, 0x88, 0x1e, 0x48, 0x67, 0x6c, 0x2d, 0x84, 0xd6, 0x4a, 0x2a, 0x3e, 0xf3, 0x33, 0xb9, 0x43, + 0x48, 0x64, 0xbb, 0x95, 0x35, 0x39, 0x47, 0xb8, 0xff, 0xb7, 0x63, 0x80, 0x7f, 0xfe, 0xf9, 0xe7, + 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, + 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, + 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, + 0xf9, 0xe7, 0xff, 0x33, 0xfe, 0x1f, 0x02, 0x6b, 0xea, 0xe5, 0x00, 0x8c, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU102_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 35840, // uncompressed data size (bytes) + 24478, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU102_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU102("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/load/g_booteruc_load_tu10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU102_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x4e, 0x00, 0x62, 0x1d, 0x08, 0x13, 0x4c, 0xc4, 0x43, 0x69, + 0x20, 0x00, 0x00, 0x6e, 0x8b, 0xb6, 0xe9, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU102_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU102_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU102("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/load/g_booteruc_load_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_dbg_tu10x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU102_sig_dbg_data[] = +{ + 0x42, 0x6d, 0x89, 0xe4, 0xc4, 0xbe, 0xa1, 0xcd, 0x38, 0x66, 0xf1, 0xfc, 0x68, 0x01, 0x7d, 0xf0, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU102_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU102_sig_dbg_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU102("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/load/g_booteruc_load_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_prod_tu10x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU102_sig_prod_data[] = +{ + 0xcd, 0x10, 0x11, 0x8c, 0xd2, 0x97, 0x21, 0xe1, 0xbc, 0xcc, 0x59, 0x48, 0x8c, 0x99, 0x8f, 0xa1, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU102_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU102_sig_prod_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU102("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/load/g_booteruc_load_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_tu10x_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU102_patch_loc_data[] = +{ + 0x00, 0x62, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU102_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU102_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU102("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/load/g_booteruc_load_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_tu10x_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU102_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU102_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU102_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU102("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/load/g_booteruc_load_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_tu10x_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU102_patch_meta_data[] = +{ + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU102_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU102_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU102("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/load/g_booteruc_load_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU102_num_sigs_data[] = +{ + 0x01, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU102_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU102_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterLoadUcode_TU102 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU102_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU102_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU102_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU102_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU102_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU102_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU102_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU102_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU102_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU102_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterLoadUcode_TU102(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterLoadUcode_TU102; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_TU116.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_TU116.c new file mode 100644 index 000000000..1300477da --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterLoadUcode_TU116.c @@ -0,0 +1,3455 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU116("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/load/g_booteruc_load_tu11x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 35840 +// COMPRESSED SIZE (bytes): 24474 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU116_image_dbg_data[] = +{ + 0xed, 0xda, 0x53, 0xac, 0x28, 0x4d, 0x17, 0x30, 0xe8, 0x6d, 0x1b, 0x67, 0xdb, 0xb6, 0x6d, 0xdb, + 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0x3c, 0xdb, 0xb6, 0x6d, 0xdb, 0x36, 0xe6, 0xfd, 0xe7, 0x6e, + 0xbe, 0x9b, 0xc9, 0x5c, 0x4e, 0x72, 0x9e, 0xa4, 0xb2, 0x6a, 0x75, 0xd2, 0xbd, 0xfa, 0xa2, 0xba, + 0xd2, 0x55, 0x29, 0x09, 0x00, 0x84, 0x99, 0x0c, 0x80, 0x38, 0x00, 0x50, 0x80, 0xee, 0xa3, 0xfc, + 0xf7, 0x96, 0x1f, 0x40, 0x09, 0x00, 0xc4, 0xff, 0x73, 0x01, 0x02, 0x14, 0xe0, 0xff, 0x4e, 0x23, + 0x00, 0x00, 0x00, 0x7f, 0x92, 0x01, 0x7c, 0xe9, 0x00, 0x00, 0xde, 0x80, 0xde, 0x00, 0xde, 0x81, + 0x62, 0x00, 0x80, 0x00, 0x7a, 0x77, 0x76, 0x7f, 0x7f, 0x7f, 0xe1, 0x23, 0x00, 0x00, 0x01, 0x7e, + 0xf3, 0x40, 0x9a, 0x8e, 0x00, 0x98, 0xd2, 0xd7, 0x01, 0x9b, 0x92, 0x01, 0xa8, 0xd3, 0xd7, 0x81, + 0xfe, 0x0b, 0x24, 0xe9, 0xeb, 0xc0, 0xff, 0x05, 0xdc, 0xff, 0xee, 0x05, 0x48, 0xcf, 0x03, 0x6e, + 0xd9, 0x05, 0xee, 0xcd, 0xcb, 0xdf, 0x4d, 0xcf, 0x03, 0x4c, 0xcf, 0x00, 0x6a, 0xd9, 0x05, 0x6c, + 0x59, 0x07, 0xf2, 0x5d, 0x01, 0x00, 0x80, 0x67, 0x90, 0x00, 0x40, 0x78, 0xcd, 0x03, 0x80, 0xff, + 0xaf, 0x86, 0x19, 0xc8, 0xff, 0xe9, 0xc0, 0xb6, 0x67, 0x00, 0x00, 0x02, 0xfd, 0xd7, 0x7b, 0xe2, + 0x06, 0x8a, 0x03, 0x00, 0x00, 0x8b, 0xf8, 0xaf, 0xd4, 0x77, 0x1e, 0x68, 0xf7, 0xd3, 0xc7, 0x3b, + 0xd8, 0x1b, 0xf0, 0x7f, 0x4f, 0x03, 0x4a, 0x81, 0x87, 0xd8, 0xf9, 0x3f, 0x95, 0xbf, 0xdf, 0x41, + 0x23, 0x00, 0xf4, 0x01, 0xbe, 0x73, 0x41, 0x3a, 0xe3, 0x81, 0x5a, 0x93, 0x11, 0xfe, 0xcb, 0x7f, + 0x3e, 0x00, 0x7c, 0xff, 0x7b, 0x9f, 0x37, 0xa0, 0xee, 0xe2, 0x4f, 0xc0, 0x38, 0x2e, 0x6c, 0xc0, + 0x08, 0x7c, 0x2a, 0x80, 0xfc, 0xbc, 0x37, 0x80, 0xb8, 0xff, 0x62, 0xef, 0xef, 0x13, 0xfd, 0xd7, + 0x8f, 0x04, 0x60, 0x7e, 0x1e, 0xef, 0x1d, 0xc0, 0xff, 0x03, 0xe4, 0x9a, 0x0e, 0xe9, 0xe2, 0x68, + 0x49, 0x79, 0xe2, 0xba, 0x0a, 0xdb, 0x35, 0xa9, 0x08, 0x4e, 0x2e, 0xbf, 0x56, 0xab, 0xae, 0x84, + 0x7e, 0x99, 0x6d, 0xd4, 0x09, 0xd6, 0x21, 0x81, 0x69, 0xc1, 0x8f, 0x8b, 0xfa, 0x9c, 0x97, 0xb5, + 0x8e, 0xfd, 0xc0, 0xda, 0xd6, 0x26, 0x2a, 0x05, 0x0d, 0x81, 0xc6, 0x46, 0x1e, 0x42, 0x48, 0x0f, + 0x06, 0x2d, 0x49, 0xd9, 0xb5, 0x93, 0x5a, 0x14, 0xda, 0xa1, 0xe8, 0x0c, 0x20, 0x22, 0xbb, 0x2e, + 0xe1, 0x8c, 0x3b, 0x69, 0xfd, 0xdd, 0x70, 0x49, 0x01, 0x5d, 0x4e, 0x60, 0x2a, 0xe5, 0xb7, 0x61, + 0x1f, 0x77, 0xec, 0x52, 0x1d, 0x76, 0x9f, 0xd1, 0x1b, 0xae, 0x7f, 0x00, 0x99, 0x2d, 0xd6, 0x42, + 0x41, 0x7e, 0x37, 0xb7, 0xf7, 0x5e, 0xe4, 0x06, 0xf1, 0x91, 0x47, 0x39, 0xa4, 0xb4, 0x65, 0x71, + 0xd8, 0xe4, 0xf5, 0xd1, 0x00, 0x03, 0xfb, 0xa4, 0x29, 0xdb, 0xc9, 0x16, 0x44, 0x7c, 0x7e, 0x44, + 0x1d, 0xc5, 0x80, 0x30, 0x50, 0xe4, 0xc7, 0xc8, 0xf5, 0x5e, 0x57, 0x4d, 0x8a, 0xdd, 0x13, 0xa4, + 0xe1, 0x48, 0x7f, 0xa6, 0x6a, 0x8a, 0x4f, 0xdf, 0xe9, 0xb8, 0xec, 0xe6, 0x9e, 0xa1, 0xf4, 0xb6, + 0xe8, 0x18, 0x7a, 0xfb, 0x97, 0xd4, 0xda, 0x3c, 0x84, 0xfc, 0xbb, 0xf7, 0x4a, 0x77, 0x45, 0xcb, + 0x18, 0x49, 0x80, 0x81, 0x15, 0x4f, 0x49, 0x20, 0x36, 0xd6, 0x0a, 0x35, 0x59, 0x61, 0x39, 0x4d, + 0xc7, 0x4b, 0x9a, 0x8d, 0xc4, 0xa1, 0x61, 0xcc, 0x92, 0x8a, 0x0f, 0x83, 0x4d, 0x7d, 0xba, 0x88, + 0x9a, 0xdc, 0x9c, 0x92, 0x57, 0xc9, 0xf6, 0x64, 0x27, 0x77, 0xfa, 0xe8, 0xc2, 0x50, 0x7c, 0x1b, + 0xde, 0x63, 0x86, 0xa4, 0x91, 0xed, 0x0d, 0xa3, 0x01, 0x67, 0x8d, 0x6f, 0xaf, 0x2b, 0x53, 0x61, + 0x2b, 0xe7, 0x1f, 0x2a, 0x0a, 0x80, 0x93, 0x85, 0xfb, 0x8f, 0x98, 0x5c, 0x7b, 0xaa, 0x46, 0xab, + 0x62, 0x8b, 0x27, 0x9f, 0xa8, 0x44, 0x07, 0x7c, 0xff, 0xdb, 0xfd, 0x81, 0x6a, 0x06, 0xb9, 0x99, + 0x2d, 0x46, 0xc5, 0xbf, 0x3b, 0x65, 0x5f, 0xe5, 0x39, 0x1a, 0x4c, 0x93, 0xcd, 0xc9, 0x36, 0x70, + 0x1c, 0xe9, 0xf9, 0x63, 0x4e, 0x94, 0x76, 0xf3, 0x68, 0xbc, 0xe2, 0xdc, 0xac, 0x8a, 0xc6, 0xf9, + 0xc1, 0x94, 0x8d, 0x66, 0xfb, 0xfc, 0x23, 0x78, 0x71, 0x4d, 0x4d, 0xb3, 0x6b, 0x5b, 0xd1, 0x28, + 0x14, 0x5b, 0x9a, 0xaa, 0x73, 0xae, 0xac, 0x58, 0x20, 0xfe, 0x86, 0x5f, 0xfe, 0x1f, 0x63, 0x6b, + 0x69, 0x0e, 0x24, 0xc0, 0xa4, 0x58, 0x08, 0xfc, 0x15, 0xc1, 0x73, 0xe1, 0x6f, 0xc9, 0x4b, 0x3b, + 0x39, 0xc7, 0xae, 0x48, 0x55, 0xb8, 0x4c, 0xbc, 0xbe, 0xf1, 0xde, 0xec, 0xf9, 0x23, 0x42, 0x72, + 0xca, 0xb3, 0x5c, 0xdb, 0x62, 0x71, 0x3d, 0x5f, 0x62, 0x12, 0x30, 0x4d, 0x73, 0x90, 0xc3, 0x33, + 0x4a, 0x9e, 0xbc, 0x7b, 0xab, 0xc1, 0x9a, 0x58, 0x2e, 0x13, 0x03, 0x54, 0xb1, 0x04, 0x0f, 0x46, + 0x75, 0x47, 0x00, 0x80, 0x66, 0x93, 0xc1, 0x27, 0xef, 0xd1, 0xcc, 0xdb, 0x38, 0xed, 0x72, 0xba, + 0x24, 0x7f, 0x09, 0xf2, 0x8f, 0x26, 0x96, 0xc1, 0xda, 0x3f, 0xfc, 0x3a, 0x12, 0xfd, 0x6a, 0xfc, + 0xf6, 0xde, 0x06, 0xbc, 0xba, 0x68, 0x4b, 0x2e, 0x2a, 0xee, 0x59, 0x20, 0x97, 0x10, 0x5d, 0x07, + 0x75, 0x27, 0x6f, 0xd8, 0x2b, 0x87, 0x4a, 0x6c, 0x23, 0xe6, 0x6d, 0xbd, 0x39, 0xb0, 0xf8, 0xa8, + 0x65, 0x7a, 0xd5, 0x62, 0x3c, 0xae, 0x53, 0xb1, 0xce, 0x39, 0xf8, 0x1b, 0x5d, 0x23, 0xb9, 0x7f, + 0xee, 0xf7, 0xa8, 0x4a, 0x6a, 0xef, 0x94, 0x59, 0x95, 0x9b, 0x2c, 0x07, 0x44, 0xd7, 0xa3, 0x40, + 0x21, 0xfd, 0x68, 0x33, 0x89, 0x28, 0xf4, 0xd4, 0xc2, 0x44, 0x73, 0x7b, 0x2d, 0xec, 0xea, 0xf0, + 0xed, 0xf4, 0xed, 0xdf, 0x73, 0xb8, 0x94, 0x83, 0x60, 0xf3, 0x7e, 0x22, 0x09, 0x98, 0xbf, 0xd0, + 0x66, 0x2c, 0xf4, 0x8c, 0x03, 0x96, 0xd1, 0xc5, 0xbe, 0x6a, 0x54, 0x80, 0x5d, 0x4d, 0x9a, 0x10, + 0x07, 0x93, 0x31, 0x97, 0xdf, 0xba, 0xeb, 0xcc, 0xcf, 0x0c, 0x14, 0xa7, 0xab, 0x48, 0x8b, 0x24, + 0x82, 0xe1, 0x67, 0x42, 0x52, 0x67, 0xb5, 0x28, 0x45, 0xbb, 0x71, 0xfd, 0xaa, 0x15, 0xda, 0x59, + 0xd5, 0xc3, 0x34, 0x6b, 0x07, 0xdd, 0x4e, 0xd4, 0xc9, 0x62, 0x93, 0x30, 0xd6, 0xa5, 0xa6, 0xf8, + 0xe9, 0xeb, 0xe4, 0x88, 0xf9, 0x66, 0x83, 0x10, 0x85, 0xfe, 0xe6, 0x7f, 0x23, 0x3d, 0x2c, 0x5a, + 0x4a, 0xfa, 0x77, 0x4d, 0x39, 0x3a, 0x82, 0xb2, 0xf2, 0x10, 0x72, 0x3d, 0x3f, 0x49, 0x54, 0x79, + 0x16, 0xc5, 0xc1, 0xae, 0xda, 0x8d, 0x44, 0x36, 0x1e, 0x32, 0x7c, 0xc1, 0xb0, 0x08, 0xf6, 0x3f, + 0x92, 0x12, 0x19, 0x2b, 0x1f, 0x8f, 0xd8, 0xdd, 0xb7, 0xa1, 0x36, 0xea, 0x5d, 0xef, 0xc4, 0x89, + 0x15, 0x44, 0x1a, 0x95, 0x85, 0xb0, 0xb9, 0x84, 0x57, 0x82, 0x39, 0x11, 0xaa, 0x85, 0x7a, 0x45, + 0x99, 0x60, 0xb2, 0xa7, 0x5e, 0xc2, 0x14, 0xae, 0x80, 0x90, 0x89, 0xd8, 0x54, 0xf4, 0x7d, 0xea, + 0xe9, 0xf5, 0x60, 0x69, 0x90, 0x52, 0x46, 0x56, 0xf1, 0x11, 0xa0, 0xe6, 0xd6, 0x77, 0xa0, 0xb7, + 0x1e, 0xf3, 0x72, 0xa2, 0xfb, 0xa3, 0xca, 0x23, 0x83, 0xa0, 0xc5, 0xbc, 0x9d, 0x20, 0xe4, 0x22, + 0xd0, 0xb7, 0xc7, 0x60, 0xc1, 0x87, 0xe0, 0x7c, 0xd0, 0x38, 0x78, 0x8b, 0xe4, 0x68, 0x89, 0xec, + 0x9e, 0xf2, 0xc9, 0x12, 0xc6, 0xfc, 0xa5, 0x0d, 0xa6, 0x34, 0xd0, 0xa9, 0x11, 0xcc, 0xc3, 0xb2, + 0x75, 0x22, 0x2b, 0x5c, 0x91, 0xed, 0xc1, 0xa6, 0x37, 0xc2, 0xc0, 0xb9, 0x7c, 0x19, 0x31, 0xaa, + 0x28, 0x3d, 0x5e, 0x20, 0xee, 0x20, 0x07, 0xdb, 0x43, 0x09, 0x81, 0x47, 0x1a, 0x69, 0x63, 0x37, + 0x9b, 0x2f, 0xba, 0xad, 0x07, 0x3a, 0x8d, 0xaa, 0x0b, 0xeb, 0xce, 0x8f, 0xb4, 0x40, 0x16, 0x2c, + 0x9e, 0x91, 0x86, 0x8e, 0xcd, 0x69, 0xc9, 0xd2, 0x23, 0xb6, 0xe7, 0x66, 0xcd, 0xbe, 0xbb, 0x0c, + 0xd2, 0xba, 0x61, 0xf2, 0x80, 0x93, 0x61, 0xd9, 0x87, 0xf9, 0x3d, 0xef, 0xe0, 0x5c, 0x06, 0x0f, + 0xfe, 0x0b, 0x89, 0xd0, 0x27, 0x47, 0x50, 0x2f, 0xc8, 0x56, 0xd5, 0x5f, 0x39, 0x73, 0x23, 0xe9, + 0x91, 0x10, 0xf3, 0xde, 0x91, 0x2a, 0x9c, 0x75, 0x99, 0x04, 0xba, 0xca, 0xc2, 0x86, 0x15, 0x8c, + 0x23, 0x19, 0x42, 0x1a, 0x32, 0x87, 0xd3, 0x5c, 0x19, 0x27, 0xec, 0xa5, 0x37, 0x85, 0x49, 0xba, + 0x7e, 0xa6, 0x65, 0xa5, 0xae, 0x4a, 0xe4, 0xba, 0x65, 0xa6, 0xb1, 0x91, 0x51, 0xf5, 0xc4, 0x6f, + 0x08, 0x63, 0x27, 0x22, 0x97, 0x49, 0xd2, 0x70, 0x9d, 0x3d, 0xed, 0x87, 0x84, 0x09, 0xd5, 0x6f, + 0x2c, 0x0f, 0x7c, 0xbb, 0x95, 0x34, 0xf5, 0xd7, 0x73, 0x80, 0x52, 0x9b, 0x13, 0xa0, 0x8e, 0xfe, + 0x36, 0x4c, 0x32, 0x7c, 0xcc, 0x3f, 0xf2, 0x63, 0xf4, 0x61, 0x6e, 0x08, 0xb2, 0xfc, 0x6a, 0xe6, + 0x81, 0xe7, 0x0c, 0x5b, 0x5f, 0xe1, 0xe0, 0x2b, 0xdf, 0xab, 0xa4, 0x5c, 0xbf, 0xc4, 0x0c, 0x0a, + 0xb7, 0xcb, 0x51, 0x45, 0x2d, 0xa5, 0x78, 0x01, 0x4e, 0xad, 0x90, 0x32, 0xc3, 0xba, 0x9c, 0x13, + 0x10, 0xc8, 0x8c, 0x21, 0xff, 0x9b, 0x11, 0x0d, 0xbe, 0xe8, 0x7e, 0x1d, 0x4c, 0x84, 0x62, 0x84, + 0x51, 0xed, 0x7e, 0x6b, 0xa6, 0x0a, 0x1d, 0x82, 0xc9, 0xd9, 0x71, 0xc5, 0xba, 0x64, 0x57, 0x48, + 0x1a, 0x23, 0x9d, 0x9f, 0x65, 0xc5, 0xf2, 0x8b, 0x9e, 0x3c, 0xad, 0xd9, 0xcb, 0xa3, 0x56, 0x64, + 0x65, 0xc0, 0xe7, 0x78, 0xfb, 0x05, 0xde, 0x5d, 0xd9, 0x56, 0x09, 0x4e, 0xbc, 0xb5, 0x62, 0xef, + 0xc8, 0xb5, 0xde, 0x93, 0xd5, 0xe6, 0xa4, 0x53, 0x61, 0xaf, 0x16, 0x5d, 0x47, 0xf2, 0x02, 0xb8, + 0x60, 0x17, 0x71, 0x7d, 0xad, 0x7a, 0xaf, 0xa8, 0x93, 0xf0, 0x3a, 0x9b, 0x51, 0x11, 0x7c, 0x5e, + 0xef, 0xd3, 0xa1, 0x60, 0xa7, 0x77, 0xc3, 0x96, 0xaa, 0x70, 0xb5, 0x95, 0x5b, 0xd5, 0x04, 0x8e, + 0x9f, 0xc2, 0x72, 0xcb, 0x84, 0x19, 0xa8, 0x23, 0xa1, 0x3f, 0x16, 0x73, 0x2a, 0x62, 0x08, 0xe1, + 0xd4, 0x69, 0x8f, 0xff, 0x98, 0x57, 0x82, 0xe9, 0x1e, 0x80, 0x69, 0x82, 0xc1, 0xc9, 0x55, 0xf9, + 0x4a, 0xb3, 0x2b, 0xaa, 0xad, 0x1a, 0x78, 0x4d, 0x78, 0xd8, 0x08, 0x5f, 0x52, 0xa0, 0x99, 0x70, + 0x07, 0x0d, 0xbf, 0xdb, 0x34, 0x17, 0x60, 0x21, 0xd6, 0xd9, 0x14, 0x17, 0xbe, 0x1b, 0xfc, 0x57, + 0x6f, 0x89, 0x0b, 0xa1, 0x73, 0xd5, 0xf2, 0x96, 0x0f, 0x89, 0x7e, 0xcb, 0x64, 0x9e, 0x76, 0x5e, + 0x88, 0x6d, 0xc8, 0x4b, 0xac, 0xcc, 0x62, 0xa7, 0xeb, 0xcc, 0x25, 0x2b, 0x4a, 0x2e, 0xe2, 0xfb, + 0xb6, 0x8b, 0x28, 0xfa, 0xfc, 0x80, 0x85, 0x58, 0xf4, 0x14, 0xdf, 0x28, 0x62, 0x4b, 0x00, 0xe0, + 0xfa, 0xec, 0xc7, 0xeb, 0xf6, 0xd7, 0xee, 0xa0, 0x49, 0xae, 0xed, 0x54, 0xb0, 0x23, 0xcf, 0xdc, + 0x1a, 0xbe, 0x83, 0x4c, 0x99, 0x52, 0xa9, 0xf9, 0x0b, 0x69, 0x31, 0x51, 0x30, 0xd7, 0xc3, 0xb1, + 0xc0, 0x01, 0x3c, 0x68, 0x57, 0x0c, 0x9e, 0x6a, 0x28, 0xb7, 0xed, 0x00, 0x07, 0x69, 0xd9, 0xe0, + 0xe6, 0xc6, 0x0a, 0x9d, 0x93, 0xba, 0x9b, 0xfd, 0x54, 0x2c, 0xa2, 0xcc, 0x5c, 0x38, 0x14, 0xcb, + 0x71, 0x9b, 0x09, 0x19, 0xd3, 0x73, 0x8c, 0xaf, 0x23, 0x40, 0xc9, 0x73, 0xbc, 0x39, 0xa1, 0x8c, + 0x41, 0x03, 0x37, 0x67, 0x56, 0x7d, 0x84, 0xb0, 0x9c, 0x1d, 0x54, 0x2e, 0xce, 0xae, 0x29, 0x77, + 0xc7, 0xca, 0x97, 0xf3, 0x5c, 0x93, 0xb1, 0xcb, 0xfc, 0x98, 0x00, 0x05, 0xf2, 0x96, 0xd4, 0x2c, + 0xc6, 0x39, 0x10, 0x9b, 0x37, 0xaf, 0x77, 0x3c, 0x57, 0xee, 0xc9, 0x31, 0x01, 0x9a, 0xa6, 0xbf, + 0x21, 0x7d, 0x0f, 0x7e, 0xf8, 0x0d, 0x33, 0x51, 0x12, 0x14, 0x06, 0xbc, 0x16, 0x3f, 0x1f, 0x30, + 0x6b, 0xe7, 0xde, 0x63, 0xb3, 0x4a, 0x3b, 0x01, 0xd0, 0xd9, 0xe6, 0xe1, 0xdb, 0x56, 0xaf, 0xb6, + 0xba, 0x1e, 0x38, 0xb6, 0x0a, 0x0b, 0x3d, 0x35, 0x9c, 0x11, 0xd1, 0x7f, 0x49, 0x81, 0x9f, 0xc4, + 0x36, 0xe5, 0x87, 0x86, 0x14, 0x36, 0x25, 0x30, 0x9b, 0xff, 0x87, 0x2c, 0xa6, 0xc4, 0x0a, 0x82, + 0x2b, 0xeb, 0x21, 0xf4, 0xb7, 0x76, 0xc5, 0x44, 0x78, 0xc9, 0x63, 0xe2, 0x08, 0xf8, 0xb9, 0x33, + 0x5c, 0x34, 0x6b, 0xf9, 0x9c, 0xb6, 0x52, 0x3c, 0xc0, 0xce, 0x5b, 0x1f, 0x19, 0x7e, 0x8e, 0x98, + 0x44, 0xd4, 0xfd, 0xc7, 0x04, 0x77, 0xd7, 0xfc, 0x8d, 0xc5, 0x5f, 0xa4, 0xce, 0x62, 0x41, 0x19, + 0x5a, 0xb7, 0x16, 0x80, 0xbd, 0x96, 0x70, 0xea, 0xe6, 0xee, 0x95, 0xe2, 0x54, 0x40, 0xef, 0x15, + 0x60, 0x00, 0xce, 0x2b, 0x0b, 0x06, 0xaa, 0xd5, 0xc8, 0x0f, 0x67, 0x67, 0xb9, 0x17, 0xa3, 0x88, + 0x81, 0x79, 0x7f, 0x92, 0xc7, 0xd5, 0xa8, 0x1e, 0x7b, 0xf2, 0x36, 0xad, 0x99, 0xf4, 0x99, 0x08, + 0x8a, 0x97, 0x47, 0x20, 0x52, 0xf0, 0x35, 0xd9, 0x86, 0x55, 0x1b, 0x0f, 0x89, 0x92, 0x2e, 0x50, + 0xf5, 0xda, 0x39, 0x33, 0x87, 0x93, 0x89, 0xc0, 0xf4, 0x5c, 0x2a, 0xe4, 0xc6, 0xd0, 0xb2, 0x04, + 0x4f, 0x11, 0xb8, 0x71, 0x82, 0x48, 0x46, 0x60, 0xfe, 0x05, 0x16, 0x19, 0x39, 0x0a, 0x46, 0x74, + 0xe7, 0x04, 0x8f, 0xeb, 0x6e, 0xe6, 0x9c, 0xda, 0x6b, 0x80, 0xd6, 0x86, 0x13, 0x4b, 0xbe, 0x52, + 0x17, 0xbe, 0x46, 0xf2, 0xdc, 0xf2, 0x1d, 0x79, 0xa2, 0x6c, 0x6e, 0xcb, 0xab, 0x52, 0x55, 0xe3, + 0xed, 0xe4, 0xab, 0x16, 0xbe, 0x15, 0x8a, 0xdd, 0x33, 0x8b, 0x6e, 0xc5, 0xe4, 0x41, 0x55, 0x67, + 0xdb, 0x91, 0x76, 0x52, 0xf7, 0xa1, 0xca, 0xc0, 0x50, 0xbe, 0x91, 0xe3, 0x4c, 0x05, 0xb6, 0xa7, + 0x96, 0x2f, 0x44, 0x29, 0xa3, 0xd3, 0x37, 0x5c, 0xb1, 0xb5, 0x2e, 0xa2, 0x2c, 0x6d, 0xc7, 0x32, + 0x40, 0x15, 0x56, 0xba, 0x38, 0xea, 0x95, 0x28, 0x5f, 0x7e, 0x0b, 0x51, 0x9c, 0xdf, 0xc6, 0xdf, + 0xb8, 0xe9, 0x33, 0xef, 0xbd, 0x08, 0xc6, 0xd2, 0x4c, 0x27, 0x21, 0x1b, 0x51, 0xf5, 0x90, 0xb3, + 0x3d, 0xcc, 0x1c, 0x93, 0x11, 0xf1, 0xa0, 0xed, 0x49, 0xb8, 0xec, 0xab, 0xfb, 0x99, 0x49, 0x41, + 0x17, 0xce, 0xe2, 0xba, 0xd8, 0x22, 0x56, 0xcc, 0x49, 0xbc, 0x52, 0xa3, 0x0e, 0x9f, 0x95, 0x0d, + 0x1a, 0xda, 0x4b, 0x52, 0x0e, 0x9a, 0x8e, 0x81, 0x8f, 0xd0, 0x6e, 0xb6, 0x6d, 0xcf, 0x33, 0x70, + 0x6f, 0x5b, 0x66, 0xc9, 0x57, 0xa7, 0x31, 0xd5, 0xec, 0xed, 0xb7, 0xe6, 0x7e, 0x7d, 0xc6, 0x49, + 0x45, 0x79, 0x6e, 0xa9, 0xfd, 0x89, 0xba, 0x54, 0xbd, 0x38, 0xb2, 0xb5, 0x3e, 0x3c, 0xbb, 0xf2, + 0x88, 0xa3, 0xa6, 0x99, 0x50, 0x99, 0x3b, 0x80, 0xf3, 0x5e, 0xd2, 0x14, 0xcf, 0xba, 0x7b, 0xff, + 0xf7, 0x3b, 0x4b, 0x41, 0x73, 0x58, 0xba, 0x4f, 0x22, 0x70, 0x3f, 0x3c, 0x9a, 0x0f, 0xc6, 0x92, + 0xf2, 0x9d, 0xe8, 0x05, 0xf7, 0xbd, 0xb8, 0x98, 0x50, 0x68, 0x88, 0x89, 0xe1, 0xfc, 0xa1, 0xd5, + 0x62, 0x5a, 0x4a, 0x69, 0xdb, 0x1e, 0x4e, 0x9c, 0x89, 0xe3, 0x6b, 0xa4, 0xad, 0x77, 0xd3, 0x2f, + 0x9a, 0xf1, 0x25, 0x16, 0x1c, 0x79, 0xb6, 0x40, 0xba, 0x23, 0xfe, 0x4f, 0x60, 0x63, 0x0b, 0x25, + 0x96, 0xa2, 0x20, 0x46, 0xbc, 0x07, 0x07, 0x71, 0x47, 0x4a, 0x01, 0xa9, 0x55, 0xa5, 0x50, 0xc3, + 0xaf, 0x17, 0x2f, 0xb5, 0x56, 0xb5, 0xd5, 0xb9, 0x04, 0xd1, 0x88, 0xc3, 0x3d, 0x24, 0xf1, 0x8e, + 0xaf, 0xe3, 0x02, 0x06, 0xe7, 0xaa, 0xf8, 0x59, 0x34, 0xa5, 0x6b, 0x7c, 0xa8, 0x84, 0x41, 0x61, + 0x11, 0x8e, 0x31, 0xad, 0xc9, 0x9c, 0x1f, 0x66, 0x5f, 0x31, 0x10, 0x2c, 0x93, 0x45, 0xf5, 0xcc, + 0xb0, 0x36, 0xfa, 0x33, 0x46, 0xc5, 0xc6, 0xaa, 0x97, 0xd6, 0xa4, 0xe7, 0x96, 0xf2, 0x2e, 0x7c, + 0x9e, 0x6b, 0x66, 0xfc, 0x25, 0xc4, 0x9f, 0x6b, 0x2b, 0x55, 0xae, 0x61, 0x4c, 0xc8, 0x94, 0x67, + 0x5c, 0x2a, 0x52, 0xef, 0xc0, 0x17, 0x46, 0xe0, 0xb8, 0x5f, 0xcf, 0x72, 0x02, 0xc9, 0x93, 0x59, + 0xb3, 0xc4, 0x7d, 0x8a, 0x33, 0x0c, 0x39, 0x0b, 0xd9, 0xfc, 0x8a, 0x25, 0xd6, 0xd2, 0x4a, 0x27, + 0x50, 0xca, 0x6b, 0x68, 0x98, 0xfc, 0xfe, 0xf8, 0x48, 0x01, 0x11, 0xac, 0x2a, 0x15, 0x98, 0x8f, + 0x12, 0xee, 0xe5, 0xac, 0x79, 0x2d, 0x6e, 0x56, 0x54, 0x3f, 0xd3, 0x3d, 0x76, 0xab, 0x6e, 0x32, + 0xb5, 0x1b, 0x4c, 0x30, 0xfc, 0x28, 0x15, 0xbe, 0xd7, 0x77, 0x73, 0x65, 0xe5, 0x90, 0xa6, 0xd8, + 0xa9, 0x29, 0xc1, 0x12, 0xfc, 0x81, 0x39, 0x9d, 0x03, 0xb2, 0x57, 0x59, 0x2a, 0x16, 0x26, 0x04, + 0x7d, 0x1f, 0x92, 0xf4, 0xab, 0x06, 0xc9, 0xdc, 0x8b, 0xe1, 0xc7, 0x21, 0x8a, 0x8f, 0x38, 0x2a, + 0x60, 0xde, 0x10, 0x6a, 0xf9, 0xfb, 0x91, 0x0b, 0x87, 0x46, 0x78, 0xd2, 0x8f, 0xd6, 0x1c, 0x9a, + 0xe5, 0xef, 0x95, 0x1b, 0xec, 0xe7, 0xb8, 0xc6, 0x5e, 0xe6, 0x9d, 0x60, 0x8f, 0x6e, 0x70, 0xa4, + 0xb7, 0xe7, 0x46, 0xa4, 0xe9, 0xb8, 0xcc, 0xce, 0xf8, 0x8a, 0x53, 0x7d, 0xfb, 0xc0, 0x8e, 0x01, + 0x3b, 0x9d, 0xe6, 0x68, 0xf9, 0x97, 0xde, 0x9c, 0x5b, 0x1e, 0xdb, 0xb6, 0x5f, 0x25, 0x32, 0xa0, + 0xad, 0x2c, 0x2c, 0xdd, 0x16, 0x7a, 0xd3, 0x74, 0xe9, 0x10, 0x52, 0xdf, 0x55, 0xbc, 0x08, 0x58, + 0x65, 0x61, 0xab, 0x97, 0xe1, 0xfd, 0xfc, 0xa1, 0x20, 0x8f, 0xee, 0xb4, 0x1e, 0x67, 0x66, 0xf9, + 0x10, 0x35, 0x3c, 0xb4, 0x67, 0xa0, 0x61, 0x98, 0xa0, 0xd8, 0x61, 0xe6, 0x16, 0xb8, 0x7a, 0x1c, + 0x3f, 0xda, 0x58, 0x93, 0xa1, 0xcf, 0x26, 0x09, 0xcc, 0x30, 0xe4, 0x9e, 0x6c, 0xf3, 0x40, 0x3a, + 0xf9, 0x31, 0x4b, 0xb2, 0x8f, 0x26, 0xbc, 0xc8, 0x44, 0xa6, 0xed, 0xf1, 0xf9, 0x9b, 0x9f, 0x23, + 0xd6, 0x47, 0x48, 0x93, 0x1d, 0x92, 0x94, 0x64, 0x00, 0x7c, 0xfd, 0xfe, 0x4b, 0xc3, 0x53, 0x3c, + 0x3d, 0x4d, 0xba, 0xd6, 0x05, 0xb6, 0xa6, 0xb7, 0x5d, 0xd4, 0x53, 0x7f, 0x92, 0xf3, 0x4c, 0xf4, + 0x26, 0xdd, 0x8f, 0x27, 0x7f, 0x02, 0x99, 0x1e, 0x8f, 0x66, 0xd9, 0xbb, 0x81, 0x68, 0xde, 0xae, + 0xfa, 0x3c, 0xc6, 0x16, 0x4d, 0xe8, 0xea, 0xe8, 0xe8, 0x46, 0x0f, 0x26, 0x51, 0x98, 0x44, 0xc1, + 0xa8, 0x8f, 0x27, 0xbd, 0x69, 0xd5, 0x60, 0x46, 0xd2, 0xf1, 0x89, 0x3a, 0x9c, 0x91, 0x1f, 0x1e, + 0xbe, 0x3a, 0xa0, 0xb2, 0xe5, 0xfa, 0x0f, 0x1f, 0x88, 0x52, 0x66, 0x4c, 0xda, 0x38, 0x17, 0x55, + 0xf9, 0xe7, 0xeb, 0x31, 0x6f, 0x5c, 0xe5, 0x74, 0x89, 0xfd, 0xe5, 0x0a, 0x36, 0x2b, 0xd4, 0x2c, + 0xbc, 0xe8, 0xd9, 0x93, 0x3e, 0x5f, 0x85, 0x7a, 0x2f, 0x02, 0xc8, 0x1c, 0x3d, 0x26, 0x8f, 0x07, + 0xb2, 0x31, 0x0e, 0x78, 0x23, 0x7d, 0xe3, 0x76, 0x1c, 0x0a, 0x08, 0xf9, 0x2e, 0x6b, 0x37, 0x5a, + 0x69, 0x60, 0x66, 0x58, 0x7e, 0x10, 0x17, 0x7e, 0xa5, 0xf9, 0xf7, 0x51, 0x99, 0x7a, 0xd1, 0x29, + 0x44, 0xe5, 0x8e, 0x9f, 0x88, 0xdf, 0x90, 0x42, 0xfd, 0xf9, 0xbe, 0x74, 0xef, 0xa5, 0x5a, 0xed, + 0x62, 0x92, 0xf7, 0xd9, 0xc9, 0x71, 0xbf, 0x17, 0xa6, 0xdc, 0xbe, 0x81, 0x3f, 0xb2, 0xab, 0xcb, + 0x1e, 0x86, 0x0f, 0xc2, 0x69, 0x0a, 0x63, 0x8a, 0x81, 0x43, 0x68, 0xd3, 0xa4, 0xcf, 0x3b, 0xac, + 0x48, 0x1c, 0x03, 0xf3, 0xd0, 0x6a, 0xd6, 0x87, 0xb2, 0x01, 0x08, 0x47, 0x64, 0xba, 0x98, 0x14, + 0x25, 0xa2, 0x71, 0xeb, 0x06, 0xef, 0x0e, 0x93, 0x22, 0x20, 0x51, 0x5b, 0x63, 0x3e, 0x08, 0x43, + 0x40, 0x82, 0x20, 0x42, 0xdc, 0x0c, 0x86, 0xcb, 0x54, 0xbc, 0xb8, 0x8b, 0x79, 0x21, 0x03, 0x89, + 0x35, 0x36, 0x39, 0xab, 0x6d, 0xf7, 0x5f, 0x47, 0x3b, 0xe3, 0xfa, 0xf5, 0x9e, 0xd4, 0xb7, 0x48, + 0x2d, 0xa1, 0xab, 0x67, 0x7d, 0x3d, 0xc4, 0x0c, 0x03, 0xe6, 0xe8, 0xcc, 0xaf, 0xa6, 0xc8, 0x55, + 0x2d, 0x67, 0xb6, 0x46, 0xf7, 0x70, 0xcb, 0x1b, 0x1e, 0xc5, 0x87, 0x13, 0x51, 0x57, 0xd1, 0x36, + 0x29, 0xfd, 0x77, 0x17, 0x98, 0xf1, 0x61, 0x57, 0x97, 0xca, 0xb7, 0x15, 0x34, 0x53, 0xec, 0x5e, + 0x3f, 0x9a, 0xe5, 0x55, 0x01, 0xc3, 0xd8, 0xea, 0x5c, 0xb3, 0x0d, 0xfb, 0x8a, 0x82, 0x84, 0x8f, + 0x27, 0xea, 0x5c, 0x11, 0x34, 0x28, 0xbd, 0xfd, 0x53, 0x94, 0x24, 0x47, 0x4b, 0x13, 0x13, 0x18, + 0x9a, 0x8b, 0x88, 0xa6, 0x99, 0xe5, 0xee, 0x6d, 0x02, 0x93, 0xef, 0x7a, 0x51, 0x6a, 0xf6, 0x69, + 0xcc, 0xa6, 0x71, 0xad, 0x73, 0x82, 0x23, 0x54, 0x7f, 0xa4, 0xad, 0x4d, 0xf4, 0xb4, 0xfb, 0x24, + 0x15, 0xdd, 0xef, 0xcf, 0x61, 0x02, 0xd9, 0x9b, 0xef, 0x10, 0x81, 0x43, 0x30, 0x0a, 0x67, 0xfe, + 0xe4, 0x4d, 0x47, 0x19, 0xa4, 0x10, 0x84, 0x06, 0x78, 0x9c, 0x41, 0x22, 0x7a, 0xf4, 0xb0, 0x34, + 0x9e, 0xd1, 0x23, 0x8c, 0xd7, 0x22, 0x25, 0xcc, 0x2c, 0xbf, 0xc6, 0x94, 0xbe, 0x11, 0x3a, 0x7e, + 0x0a, 0x00, 0x8a, 0xd4, 0xad, 0x51, 0x78, 0x69, 0x11, 0x56, 0xb1, 0xee, 0x52, 0x1b, 0xf7, 0xc9, + 0x7a, 0x79, 0x29, 0x34, 0xf3, 0x6f, 0x2b, 0xe8, 0x80, 0x52, 0x46, 0xa0, 0x7f, 0x6d, 0x0f, 0xf9, + 0x20, 0x0b, 0x56, 0x31, 0x26, 0x56, 0xfc, 0xb5, 0xbd, 0x50, 0x24, 0xc0, 0x1a, 0x83, 0x6e, 0x76, + 0x8d, 0x96, 0xbb, 0x1a, 0x23, 0xc7, 0x4a, 0xd1, 0x28, 0x5d, 0xf6, 0xda, 0x29, 0xa4, 0xa8, 0x99, + 0x62, 0xe1, 0xcb, 0xa1, 0xa0, 0xb0, 0x1c, 0x17, 0x67, 0xc9, 0x3d, 0xb9, 0xfd, 0x7a, 0x5e, 0x1e, + 0xf8, 0x42, 0x55, 0x6c, 0x22, 0x37, 0x00, 0xa2, 0xad, 0x2f, 0xc5, 0x31, 0x1b, 0x78, 0x4b, 0xf9, + 0xcc, 0x8a, 0x56, 0xbc, 0x99, 0xa3, 0x33, 0x91, 0x21, 0x57, 0xb9, 0x39, 0x84, 0xab, 0x1a, 0x71, + 0xa3, 0x7e, 0xd9, 0x8b, 0x74, 0x3c, 0x45, 0x86, 0x02, 0x91, 0x4a, 0xaf, 0x42, 0xf9, 0x09, 0xd8, + 0x26, 0x55, 0xec, 0x61, 0x52, 0xcb, 0x25, 0xdd, 0x82, 0x54, 0xe3, 0xf2, 0x3f, 0x1c, 0x94, 0x61, + 0x5f, 0x12, 0x4d, 0x2e, 0xc0, 0x5c, 0xae, 0x9f, 0x6e, 0x13, 0xf9, 0xc4, 0xed, 0xa2, 0xea, 0xa3, + 0x0c, 0x20, 0x3a, 0xce, 0x00, 0x56, 0xfa, 0x21, 0xef, 0x06, 0x2e, 0x6b, 0x1a, 0x5f, 0x1f, 0x74, + 0x1a, 0x6e, 0x30, 0x80, 0x26, 0x62, 0x3e, 0xf0, 0x42, 0x40, 0xfb, 0xc0, 0xa7, 0x69, 0x97, 0xd2, + 0xd3, 0x60, 0x19, 0x2b, 0x0e, 0xd0, 0x56, 0x2f, 0xa8, 0x98, 0xfb, 0xe6, 0xda, 0x3b, 0xd7, 0xfd, + 0x12, 0xbd, 0xcb, 0x94, 0xea, 0xc0, 0x3a, 0xee, 0x22, 0x2f, 0x89, 0x4b, 0xcf, 0x2e, 0xd0, 0x41, + 0x3f, 0x45, 0x19, 0x90, 0x18, 0xfb, 0x98, 0x7b, 0x5e, 0x96, 0xae, 0x2b, 0x49, 0xbe, 0xe1, 0x58, + 0xf0, 0x6b, 0x0c, 0xe8, 0xb7, 0xae, 0xaa, 0x02, 0x57, 0xef, 0x1b, 0x74, 0x17, 0x4b, 0x33, 0xd5, + 0xb9, 0x6d, 0xca, 0x11, 0xb4, 0x31, 0x4e, 0x3b, 0x2f, 0x91, 0x9f, 0x1d, 0xef, 0x6c, 0x96, 0x0d, + 0x04, 0x66, 0x56, 0x21, 0x81, 0xa4, 0x17, 0x83, 0xd7, 0x2a, 0x11, 0x11, 0x81, 0x89, 0x02, 0x5e, + 0x80, 0xc1, 0xe7, 0x75, 0x9f, 0x79, 0x64, 0x82, 0x29, 0x80, 0xa7, 0x9a, 0x95, 0xd0, 0x09, 0xc3, + 0x5b, 0xf7, 0x2f, 0x96, 0x66, 0xd2, 0x55, 0x62, 0x31, 0xd3, 0xd9, 0x80, 0xcc, 0x13, 0x7d, 0xbf, + 0xb6, 0xf0, 0x86, 0x17, 0xfe, 0xa5, 0xb7, 0x0b, 0xbf, 0x34, 0x8d, 0x89, 0x24, 0xc0, 0x90, 0x0f, + 0x78, 0xc0, 0xc6, 0x4b, 0x80, 0x83, 0x91, 0xf7, 0xdf, 0xaa, 0xaa, 0xad, 0x5f, 0x56, 0xa0, 0xe4, + 0xa1, 0x1a, 0x1d, 0x0f, 0x4b, 0xbe, 0x1d, 0xff, 0xdb, 0x81, 0x20, 0x68, 0x61, 0xa7, 0x53, 0x62, + 0xac, 0x82, 0x1b, 0x15, 0x50, 0x0d, 0x19, 0x54, 0x35, 0xd7, 0x33, 0x98, 0x34, 0x0e, 0xb1, 0x8c, + 0xc7, 0xab, 0x04, 0xad, 0xd8, 0x27, 0xb5, 0xf1, 0xe8, 0x06, 0x24, 0xcc, 0x6c, 0x1d, 0xac, 0xb7, + 0x5e, 0x2f, 0x39, 0xa6, 0x68, 0xc5, 0x25, 0x93, 0x9f, 0xcf, 0x79, 0x55, 0x23, 0xd1, 0xb4, 0x8d, + 0x91, 0xa2, 0xc0, 0x10, 0xbe, 0xac, 0x1b, 0x5f, 0xbe, 0xc1, 0x67, 0xd9, 0x8b, 0x31, 0xf7, 0x34, + 0xc9, 0xf1, 0xab, 0xcb, 0x99, 0x3d, 0xd5, 0x49, 0x09, 0xd6, 0x21, 0xd5, 0x99, 0x32, 0xea, 0x29, + 0xef, 0x4a, 0x71, 0x95, 0x66, 0x8f, 0x38, 0xc7, 0xac, 0x74, 0x10, 0x5f, 0x7f, 0x9d, 0xc1, 0x1a, + 0x6c, 0x6c, 0x78, 0x71, 0x80, 0x92, 0xd5, 0x6a, 0x40, 0x18, 0x60, 0x53, 0x9d, 0x5d, 0xcc, 0xff, + 0x02, 0x42, 0x4a, 0x7d, 0x2a, 0xcb, 0x02, 0x73, 0x29, 0xa2, 0x59, 0x9f, 0x75, 0x76, 0x31, 0x0e, + 0xca, 0x6a, 0xdf, 0x87, 0x99, 0xe4, 0x1b, 0x3e, 0x9c, 0x84, 0x9e, 0x35, 0xe3, 0xe0, 0x58, 0xec, + 0x19, 0xc8, 0xdf, 0x6d, 0xdc, 0x12, 0xd0, 0x41, 0x2d, 0x84, 0x36, 0x2d, 0x06, 0x82, 0x28, 0x95, + 0x44, 0x39, 0x95, 0x10, 0xb0, 0xbb, 0x46, 0x2f, 0x49, 0x57, 0x8f, 0x52, 0x84, 0x39, 0xa1, 0x0a, + 0x84, 0x38, 0x09, 0x6f, 0x01, 0xb1, 0x05, 0x0e, 0x04, 0x0f, 0xfe, 0xa9, 0x3c, 0xf9, 0x20, 0xf1, + 0x32, 0xa9, 0x6f, 0xa6, 0x9e, 0xdf, 0x0c, 0x78, 0x80, 0x4f, 0x0c, 0x2c, 0xb8, 0x77, 0x67, 0x75, + 0x91, 0x39, 0x54, 0x4b, 0xb9, 0xf6, 0x41, 0x1a, 0x88, 0xb2, 0x27, 0x14, 0x4e, 0xd1, 0x85, 0x70, + 0xb1, 0x03, 0x8a, 0x8e, 0xbf, 0xf2, 0x98, 0x64, 0xd3, 0x40, 0x29, 0x03, 0xba, 0xcf, 0x08, 0x73, + 0x84, 0x8b, 0x1c, 0x6b, 0xf8, 0x5b, 0xdf, 0x15, 0x93, 0xc5, 0xa4, 0xb4, 0xf7, 0x41, 0x35, 0x1f, + 0xec, 0x7a, 0x2f, 0x6d, 0x3e, 0xfc, 0x7a, 0x92, 0x77, 0xc8, 0x7b, 0xb2, 0xa9, 0x68, 0xc1, 0x26, + 0x56, 0x18, 0x2b, 0x8a, 0x24, 0xbf, 0xe4, 0x46, 0x98, 0x6d, 0xb1, 0x5e, 0xb6, 0x60, 0x66, 0xa2, + 0x00, 0x62, 0x71, 0xc4, 0xcd, 0xaa, 0xa9, 0x75, 0x87, 0xe6, 0xaa, 0x46, 0xf8, 0x79, 0x92, 0x4f, + 0x9c, 0xae, 0x59, 0x93, 0x90, 0xc3, 0x50, 0x04, 0xa4, 0x79, 0x61, 0xa6, 0x2e, 0x33, 0x5c, 0xd7, + 0x16, 0x4c, 0xf9, 0x97, 0xf8, 0x72, 0x7b, 0xd0, 0x96, 0x56, 0xbe, 0x4a, 0x03, 0x8f, 0xa7, 0xbf, + 0xca, 0x20, 0x36, 0x09, 0xd2, 0x6b, 0x55, 0x5c, 0x0f, 0xa9, 0x68, 0x6a, 0x80, 0xf5, 0x38, 0x71, + 0x49, 0xee, 0x6f, 0xa7, 0xb0, 0xaf, 0xb3, 0x5e, 0xc5, 0x19, 0x06, 0x01, 0x47, 0xac, 0x52, 0x6e, + 0xdf, 0x9a, 0xfe, 0x9d, 0x08, 0xe1, 0xac, 0x9f, 0xb6, 0xa7, 0x6a, 0xbc, 0x51, 0x7c, 0xdd, 0x64, + 0x0f, 0x10, 0xe3, 0x84, 0x1f, 0x61, 0xd7, 0x11, 0xbf, 0x1d, 0xcb, 0x34, 0x78, 0xa9, 0xd9, 0x92, + 0xe9, 0xb9, 0x35, 0xa9, 0x47, 0x76, 0x2b, 0x56, 0x2e, 0x65, 0xa3, 0x4a, 0x35, 0x03, 0x3a, 0xbe, + 0x26, 0x6d, 0x3c, 0x58, 0x6a, 0x46, 0x5d, 0xa0, 0xd6, 0xe1, 0x0a, 0x00, 0xdc, 0xf9, 0x68, 0xe7, + 0x94, 0xef, 0xdf, 0xaf, 0x49, 0xb9, 0xf2, 0xdd, 0xb4, 0x69, 0x2e, 0x7b, 0x9f, 0x83, 0x0c, 0x00, + 0x0b, 0x14, 0x84, 0x48, 0x00, 0x85, 0x81, 0x41, 0x77, 0x63, 0x0a, 0x98, 0x23, 0xf4, 0x29, 0xad, + 0x7c, 0x6a, 0x72, 0x7b, 0xec, 0xf7, 0xbc, 0x0c, 0x19, 0x8f, 0x2e, 0xf7, 0xd9, 0x66, 0x8c, 0x03, + 0x99, 0xe9, 0xc4, 0xa0, 0xa7, 0xb0, 0xf4, 0x8e, 0xf0, 0x70, 0x5e, 0xd4, 0x4c, 0x88, 0x5c, 0xf6, + 0x54, 0x40, 0xe1, 0x64, 0x93, 0xbc, 0xef, 0x0c, 0x31, 0xd9, 0x56, 0x67, 0x17, 0x07, 0xb5, 0xef, + 0x1f, 0x24, 0x68, 0x11, 0xc8, 0x2b, 0xd1, 0x9d, 0x9f, 0x95, 0x80, 0xb6, 0xd6, 0x46, 0xaf, 0xee, + 0xc7, 0xb0, 0xf9, 0x35, 0xf8, 0x1e, 0xc3, 0x77, 0x4f, 0xd5, 0x6b, 0x12, 0xc6, 0x97, 0x1a, 0x46, + 0xd2, 0xa8, 0xbb, 0x81, 0x42, 0x66, 0x7a, 0xce, 0xb6, 0xa6, 0xee, 0xda, 0xac, 0x60, 0xf8, 0x12, + 0xf5, 0x98, 0xb6, 0xbf, 0x6a, 0x4f, 0x95, 0xfc, 0xcf, 0xec, 0xab, 0x95, 0x7c, 0xa9, 0x84, 0x46, + 0x4b, 0xa8, 0x1d, 0xa2, 0x42, 0xfa, 0xa0, 0x79, 0x72, 0x17, 0xa8, 0x89, 0x1b, 0xe9, 0xfc, 0x15, + 0xbb, 0x87, 0x44, 0x94, 0x15, 0xad, 0xbb, 0x4b, 0x1c, 0x6d, 0x91, 0x06, 0x93, 0x59, 0x1a, 0x95, + 0x1f, 0x5c, 0xf8, 0xdf, 0x8e, 0x7d, 0xe3, 0x21, 0xda, 0x46, 0x90, 0xb1, 0x28, 0xa0, 0x87, 0xe6, + 0x2e, 0xcb, 0xd9, 0xce, 0x9b, 0xf2, 0x0a, 0x3f, 0x6b, 0xa5, 0x3f, 0xb6, 0xff, 0x3b, 0xbf, 0xb3, + 0xaf, 0x8a, 0x9a, 0x8b, 0x7b, 0x74, 0x8d, 0x99, 0xca, 0xd5, 0x08, 0x2c, 0xde, 0x54, 0x37, 0xef, + 0x78, 0x45, 0x9c, 0x81, 0xf1, 0x47, 0xd8, 0x8d, 0xe6, 0xf8, 0xc5, 0x76, 0x76, 0xe7, 0xe9, 0x25, + 0x51, 0x14, 0x72, 0x2d, 0xcb, 0x09, 0x97, 0xc9, 0xa5, 0xaa, 0xee, 0x86, 0xaf, 0x5d, 0xff, 0xfa, + 0x14, 0x5e, 0x17, 0xbf, 0xaa, 0xe6, 0xa4, 0x63, 0xd3, 0x35, 0x6e, 0x73, 0x60, 0x7a, 0xd7, 0xf8, + 0x4c, 0xe6, 0x83, 0x2c, 0x7c, 0x8a, 0x1e, 0xc6, 0x59, 0x8b, 0x30, 0xe4, 0xfe, 0xba, 0x38, 0x15, + 0x3c, 0xcf, 0x37, 0xfc, 0x66, 0xe2, 0x40, 0xd9, 0x3a, 0x1d, 0x82, 0x6e, 0x66, 0xd9, 0x5a, 0x34, + 0x63, 0xe6, 0x0b, 0xb0, 0x87, 0x92, 0xdd, 0x8d, 0xde, 0x7a, 0xe4, 0x2b, 0x6d, 0xd5, 0xf7, 0x73, + 0x16, 0x5b, 0x35, 0x33, 0x00, 0xce, 0xd0, 0x4f, 0x83, 0x55, 0x27, 0x4f, 0x3b, 0x83, 0x88, 0xb3, + 0x13, 0x0d, 0x0e, 0xc5, 0x17, 0x3a, 0x7a, 0x3a, 0x3b, 0x64, 0xf3, 0x6c, 0x3c, 0xb5, 0xee, 0x85, + 0xc1, 0x21, 0xfc, 0x69, 0xda, 0xc6, 0xd3, 0x80, 0x5c, 0xee, 0x02, 0x4e, 0xb7, 0xd9, 0xe8, 0x11, + 0x23, 0x63, 0x28, 0x6d, 0xe6, 0xf7, 0x80, 0xb3, 0xb6, 0xe0, 0x7b, 0x26, 0xae, 0x7a, 0x2d, 0x55, + 0x77, 0x67, 0x98, 0x76, 0xc5, 0x3a, 0xe9, 0x93, 0x78, 0xbd, 0xa8, 0x80, 0x85, 0xb7, 0x45, 0x62, + 0xbf, 0x94, 0x63, 0x17, 0x17, 0x92, 0x3b, 0xad, 0xeb, 0x4e, 0xae, 0x86, 0xdb, 0xa9, 0xe1, 0xec, + 0xb2, 0xed, 0x66, 0x34, 0xe0, 0x61, 0xb5, 0xcb, 0xbb, 0xe3, 0xda, 0x2d, 0xeb, 0xab, 0xd1, 0x08, + 0xb7, 0x7c, 0x5f, 0x84, 0xa6, 0xed, 0x32, 0x3f, 0xa8, 0xc3, 0x3d, 0xbd, 0xd3, 0x43, 0xb8, 0x80, + 0x13, 0x8e, 0x71, 0xbc, 0xa5, 0x55, 0xda, 0xc6, 0x24, 0xc1, 0x9b, 0x27, 0x49, 0x46, 0x99, 0x02, + 0xcc, 0x9e, 0x7b, 0xc1, 0xc8, 0x35, 0xa6, 0x57, 0x79, 0x98, 0xd5, 0xb1, 0xd7, 0x9b, 0x07, 0x12, + 0x01, 0x5a, 0x56, 0x28, 0x89, 0xdf, 0x83, 0x06, 0x05, 0x01, 0x32, 0x67, 0x61, 0xd8, 0xdc, 0x70, + 0xfe, 0x4d, 0xe8, 0x93, 0x3e, 0x7c, 0x4a, 0xf1, 0x8b, 0x39, 0xce, 0x5a, 0x4e, 0x6a, 0xa3, 0xfe, + 0xf0, 0xc9, 0x77, 0xb3, 0xb4, 0x35, 0x36, 0x3e, 0x54, 0xa2, 0xb1, 0xff, 0x9c, 0x92, 0x8d, 0xb1, + 0x07, 0x6a, 0xef, 0x0e, 0x2d, 0x90, 0xaf, 0x04, 0x9c, 0xfe, 0xdc, 0x7e, 0x44, 0xc4, 0x06, 0xcf, + 0xa4, 0x04, 0xd2, 0x6a, 0x31, 0xfd, 0x7d, 0xae, 0x7a, 0x8a, 0x54, 0xe1, 0x1a, 0xb6, 0x18, 0x4a, + 0xb9, 0x6d, 0xe7, 0xf1, 0xca, 0x2a, 0x9e, 0x2f, 0xe1, 0xf1, 0xa3, 0x07, 0x4d, 0x79, 0xed, 0x24, + 0x80, 0x78, 0x63, 0xb9, 0x2f, 0xab, 0xdc, 0xa4, 0xe3, 0xb7, 0x75, 0x56, 0x35, 0x12, 0xf5, 0xcb, + 0xfb, 0x4b, 0x8f, 0xbe, 0xb7, 0xc5, 0xf2, 0xa9, 0x6b, 0x7d, 0xce, 0x2f, 0x46, 0x83, 0x5d, 0x4b, + 0xe4, 0xac, 0x37, 0xb4, 0x2b, 0xa5, 0x38, 0x13, 0x9c, 0xd4, 0x04, 0xab, 0x04, 0x1a, 0xe6, 0xdc, + 0x32, 0x18, 0xe8, 0x63, 0x5c, 0xa3, 0x82, 0xb7, 0xb2, 0xd0, 0xfa, 0x42, 0x36, 0x96, 0x01, 0x83, + 0x93, 0x3d, 0x79, 0xe8, 0xec, 0x56, 0x5e, 0xba, 0x97, 0xf7, 0x61, 0x93, 0x1f, 0x9e, 0x43, 0xe6, + 0x93, 0xe4, 0xcc, 0x09, 0xe3, 0xf2, 0xbb, 0xb9, 0x55, 0x8b, 0xd7, 0x97, 0xff, 0x50, 0xd8, 0xc0, + 0x5a, 0x45, 0xc0, 0xdb, 0x4d, 0x4e, 0x26, 0x04, 0x15, 0xa9, 0xc6, 0xd0, 0x0b, 0xce, 0x08, 0xcf, + 0xb5, 0xea, 0xd1, 0x42, 0xde, 0xad, 0x98, 0xa3, 0x4d, 0x0c, 0xbd, 0x8b, 0x04, 0xf4, 0x75, 0xe9, + 0xda, 0xb1, 0x78, 0xbf, 0x00, 0x63, 0xb6, 0xa0, 0x50, 0xb4, 0xe2, 0xb3, 0x63, 0xdf, 0xbb, 0x80, + 0xfc, 0x07, 0x63, 0xd1, 0x9f, 0x7b, 0x9f, 0x95, 0x3e, 0x35, 0x3e, 0xbf, 0x61, 0x9c, 0xb1, 0xb0, + 0x3c, 0x90, 0xdd, 0xfd, 0xcd, 0x6e, 0xaa, 0xb7, 0x72, 0x55, 0x45, 0xe2, 0xc0, 0xb3, 0x98, 0xc4, + 0x52, 0xf2, 0x91, 0x76, 0x4c, 0x92, 0x61, 0x65, 0x07, 0x72, 0x42, 0x67, 0xbf, 0x72, 0xfa, 0xa3, + 0x21, 0x81, 0x8d, 0xb4, 0x20, 0xb4, 0xc8, 0x8e, 0x6b, 0x9a, 0xab, 0x95, 0x2a, 0x41, 0x54, 0xf9, + 0x7f, 0xbf, 0xc7, 0x25, 0xc7, 0x00, 0x0c, 0xf5, 0x41, 0x7d, 0x66, 0x3f, 0x29, 0x60, 0xc6, 0xd0, + 0xb1, 0xd5, 0x73, 0x51, 0x00, 0xf0, 0x5f, 0xb8, 0x99, 0xd8, 0x68, 0x6d, 0x89, 0xa1, 0x68, 0x78, + 0x78, 0x8e, 0xf8, 0x99, 0x68, 0x26, 0x66, 0x3f, 0x04, 0x43, 0x97, 0x99, 0xcf, 0xa7, 0x20, 0xf1, + 0xc7, 0x7d, 0xb6, 0xb6, 0xb4, 0xda, 0xf8, 0xbf, 0xd8, 0xf5, 0xd4, 0x9b, 0xf5, 0xf1, 0x57, 0xd3, + 0x13, 0xa9, 0x28, 0x0c, 0x9d, 0x5a, 0x2f, 0xa9, 0xa0, 0x2f, 0x86, 0xd7, 0x02, 0x45, 0x8c, 0x38, + 0xd8, 0x78, 0x84, 0xfc, 0x53, 0x3e, 0x24, 0x94, 0xde, 0xa3, 0xb9, 0x46, 0x4e, 0x26, 0xe6, 0x4b, + 0x66, 0xfd, 0xe0, 0x69, 0x8d, 0x82, 0x02, 0x10, 0x6f, 0x14, 0x6e, 0xb5, 0xf8, 0xa9, 0x17, 0x28, + 0x16, 0x37, 0xd7, 0x10, 0x8c, 0x6d, 0xba, 0x7a, 0x1f, 0xb8, 0x1d, 0x32, 0x3f, 0xfe, 0x98, 0x58, + 0x97, 0xe8, 0x76, 0x0b, 0x90, 0x7a, 0x93, 0x65, 0xbf, 0x6b, 0x86, 0xaa, 0x21, 0x71, 0xf1, 0x2a, + 0xd3, 0xd9, 0xe8, 0x2e, 0xf7, 0x89, 0xc4, 0x39, 0x8d, 0xb0, 0xfc, 0x99, 0xdf, 0xdf, 0xba, 0xbb, + 0x80, 0xf7, 0x42, 0x20, 0xc8, 0x08, 0x72, 0x2a, 0x6f, 0x4e, 0xce, 0x56, 0xa2, 0x85, 0x0c, 0xcd, + 0xcf, 0xe4, 0xd3, 0xd7, 0x69, 0x1a, 0xca, 0x85, 0xa4, 0x12, 0x51, 0x5e, 0xab, 0x76, 0x92, 0x87, + 0x22, 0xf7, 0x5f, 0xd8, 0xc0, 0x77, 0x85, 0xde, 0x9a, 0xef, 0x09, 0x35, 0x99, 0x95, 0xdc, 0x0c, + 0xda, 0x90, 0x3b, 0x3d, 0xc0, 0x33, 0x07, 0x63, 0xb6, 0x95, 0x81, 0x72, 0xc1, 0xcc, 0x17, 0xaf, + 0x16, 0x8e, 0x5d, 0xcc, 0x4f, 0xc7, 0x24, 0x93, 0x27, 0xc9, 0x34, 0xe4, 0x39, 0x12, 0xa5, 0x4c, + 0x73, 0xf8, 0xc8, 0xfe, 0xae, 0xeb, 0xbf, 0x2f, 0x8d, 0xfb, 0x9c, 0x17, 0x0f, 0x10, 0xb5, 0x83, + 0xfe, 0xa3, 0xa4, 0x35, 0xe5, 0xc0, 0x46, 0x93, 0x4b, 0x69, 0x33, 0x6a, 0xa4, 0xf0, 0x6f, 0xb8, + 0x96, 0x89, 0xfb, 0x59, 0xcb, 0xd0, 0xed, 0xb2, 0x87, 0x5d, 0x1c, 0x72, 0x55, 0xa3, 0x69, 0xa1, + 0xba, 0x63, 0xcb, 0x19, 0x15, 0x43, 0x3d, 0x24, 0x91, 0x00, 0x4d, 0x24, 0x36, 0x14, 0x87, 0x1c, + 0x97, 0x97, 0x9b, 0x79, 0xa0, 0x7a, 0x06, 0x12, 0x52, 0xb4, 0xae, 0x09, 0x40, 0xfa, 0x3b, 0x3c, + 0x90, 0x20, 0x9c, 0xb2, 0x5f, 0x27, 0xc9, 0xa0, 0x49, 0xcc, 0xd7, 0x3d, 0xec, 0x73, 0xd4, 0xf6, + 0x93, 0xec, 0xbe, 0xb8, 0x09, 0x82, 0x4e, 0xc7, 0x69, 0x26, 0x67, 0xa6, 0x23, 0xc4, 0x3a, 0x53, + 0x45, 0x90, 0x66, 0x8b, 0x85, 0xb9, 0x98, 0x49, 0x29, 0xfe, 0xeb, 0x4e, 0xbd, 0x04, 0xf7, 0x51, + 0x93, 0x19, 0x09, 0x6e, 0x7d, 0xcc, 0x1c, 0x48, 0x43, 0x31, 0x7c, 0x1b, 0x39, 0x96, 0xc0, 0x87, + 0x43, 0xe1, 0x8d, 0x52, 0x3b, 0xac, 0xfe, 0x74, 0xe8, 0xcd, 0xd2, 0x8b, 0xde, 0x8d, 0x04, 0x23, + 0xa7, 0xdd, 0x73, 0xc4, 0x75, 0xae, 0xc5, 0xa0, 0xb0, 0xa4, 0x64, 0x0e, 0x2b, 0x52, 0x2b, 0x44, + 0x72, 0xb2, 0x54, 0x77, 0x05, 0xbd, 0x5f, 0x03, 0x45, 0x9d, 0x3e, 0x17, 0x22, 0xe5, 0x91, 0xd4, + 0x04, 0x64, 0x9f, 0x84, 0xc8, 0x9d, 0x03, 0x25, 0x66, 0xbe, 0x60, 0x62, 0x12, 0xc1, 0xa7, 0x97, + 0x1f, 0x6a, 0xca, 0x74, 0x48, 0xa4, 0xa3, 0x1e, 0x3d, 0xb6, 0xe9, 0x56, 0x21, 0x9b, 0x99, 0x84, + 0xbd, 0x30, 0x29, 0xee, 0x5e, 0x0b, 0x27, 0x28, 0x61, 0xd2, 0x25, 0x2d, 0xa5, 0x86, 0x85, 0x43, + 0x7f, 0x91, 0xc1, 0x9a, 0x72, 0xfa, 0x74, 0x80, 0x3c, 0xf8, 0x37, 0xd9, 0x60, 0xb3, 0x3c, 0x67, + 0xe6, 0x01, 0xab, 0x11, 0x25, 0xee, 0x8f, 0xa8, 0x2b, 0x39, 0xa9, 0x55, 0x37, 0x13, 0x08, 0x4f, + 0xe2, 0xaa, 0xfa, 0xb1, 0xcf, 0x19, 0x29, 0x8e, 0x0c, 0x74, 0xcd, 0xcb, 0x6c, 0x6d, 0x70, 0x9d, + 0xbd, 0xa0, 0xff, 0x88, 0xc2, 0xf2, 0xd9, 0x80, 0x2f, 0x6d, 0x2d, 0x55, 0xb8, 0x13, 0xbc, 0x10, + 0x63, 0xe2, 0xff, 0x61, 0xb8, 0x39, 0x64, 0xdd, 0x77, 0xe5, 0xb8, 0x25, 0xaa, 0x94, 0x5d, 0x0a, + 0x89, 0x9b, 0xea, 0x51, 0x71, 0x23, 0x7a, 0x64, 0xbc, 0x6d, 0x5e, 0x40, 0x0a, 0xad, 0xe9, 0x96, + 0x4f, 0x53, 0xb0, 0x27, 0x34, 0x47, 0x27, 0x5c, 0x10, 0x06, 0xc3, 0x12, 0xf7, 0x1c, 0xfd, 0x50, + 0x94, 0x3e, 0xb5, 0x35, 0xbc, 0xca, 0x1d, 0x83, 0xf9, 0x46, 0x00, 0x7b, 0xe7, 0xf8, 0x2b, 0x47, + 0xaf, 0x10, 0x2e, 0xbb, 0xe9, 0xfa, 0xbe, 0xbe, 0x50, 0xfe, 0x77, 0x39, 0x95, 0x11, 0x79, 0xf9, + 0x09, 0x8b, 0xa6, 0xa4, 0x57, 0xe0, 0x3e, 0x8d, 0x9f, 0x49, 0x14, 0x7b, 0x5c, 0xc1, 0xa2, 0x3b, + 0xc3, 0xa5, 0x9d, 0xe0, 0xf8, 0xdd, 0x4c, 0xb6, 0xc9, 0xa9, 0x06, 0x6b, 0xf0, 0x51, 0xfc, 0xd4, + 0x58, 0x3f, 0x78, 0x59, 0xc2, 0xc1, 0x3d, 0x6e, 0xb0, 0x3e, 0x18, 0x3b, 0x19, 0xef, 0x46, 0x38, + 0xa2, 0x83, 0x4d, 0xd5, 0x36, 0x24, 0x3e, 0x59, 0x93, 0x4b, 0x08, 0xd5, 0xae, 0xde, 0xcf, 0xb3, + 0x30, 0x5d, 0x49, 0x32, 0xd0, 0x58, 0xc2, 0xdc, 0xc1, 0x7e, 0x46, 0x16, 0xd7, 0x43, 0x4c, 0x80, + 0xd5, 0x4b, 0xf3, 0x8f, 0x60, 0x25, 0x62, 0xdf, 0x35, 0xcc, 0x2d, 0x59, 0xb0, 0x74, 0xff, 0x78, + 0xc1, 0xf2, 0xb7, 0x80, 0x99, 0x4f, 0x47, 0x58, 0x8b, 0x56, 0x33, 0x0c, 0x9e, 0xfb, 0xfd, 0x13, + 0x65, 0x4e, 0xc0, 0xb1, 0x77, 0xff, 0x9c, 0xdb, 0x20, 0xd4, 0x3d, 0x65, 0xf5, 0x1f, 0x04, 0xe3, + 0x80, 0x50, 0xfb, 0xef, 0x0c, 0xfc, 0x29, 0x8d, 0x70, 0xbd, 0x9b, 0xa6, 0x78, 0x76, 0x68, 0x1b, + 0xf0, 0xfa, 0xf3, 0xc3, 0x72, 0x9c, 0x17, 0xc6, 0x75, 0x89, 0x02, 0xdc, 0xa7, 0x9c, 0x00, 0xf1, + 0x3d, 0x07, 0x3e, 0xb2, 0x02, 0x6d, 0xff, 0x87, 0x7a, 0x4c, 0x00, 0x2d, 0xf8, 0xb3, 0x24, 0xc4, + 0xc8, 0xdc, 0x3f, 0x6a, 0x3c, 0xa5, 0x12, 0x47, 0xe3, 0x2b, 0xf0, 0xd2, 0xe0, 0xad, 0xc2, 0x7f, + 0x55, 0x6d, 0x4d, 0x1f, 0xb0, 0x8e, 0x30, 0x39, 0xb7, 0xb8, 0x62, 0x73, 0xad, 0x9f, 0x9b, 0xf7, + 0x0e, 0x26, 0xf1, 0xe1, 0x16, 0xd5, 0x1f, 0x17, 0x02, 0xcc, 0x83, 0xa1, 0x6d, 0x98, 0xfe, 0x12, + 0xc2, 0x71, 0xb4, 0xc1, 0x54, 0x27, 0xfc, 0x28, 0x86, 0x8d, 0x89, 0x7c, 0xcc, 0xb1, 0xf0, 0xdf, + 0x17, 0xd1, 0x5a, 0x6d, 0xcf, 0x2f, 0xa9, 0x65, 0x46, 0x42, 0x30, 0xf1, 0xd8, 0xea, 0x93, 0x25, + 0xa5, 0x57, 0x46, 0xf9, 0xc6, 0x26, 0x37, 0x9a, 0xce, 0xd6, 0xba, 0xae, 0x7e, 0xd9, 0xf6, 0x0b, + 0x39, 0x0b, 0x52, 0xb2, 0x44, 0x62, 0x30, 0xa9, 0x66, 0x7a, 0x13, 0x8c, 0xa2, 0x4f, 0x60, 0x9d, + 0xda, 0x4c, 0x99, 0x23, 0x35, 0x8d, 0xcb, 0xd8, 0xb6, 0xc3, 0x97, 0xac, 0x3c, 0x8e, 0xfb, 0x74, + 0xe9, 0x6b, 0xd9, 0x9e, 0x24, 0x41, 0x3d, 0x02, 0x17, 0x15, 0x59, 0x63, 0x9c, 0x67, 0xdb, 0xfa, + 0x1e, 0x37, 0x7e, 0x34, 0x1f, 0x78, 0x62, 0x05, 0x22, 0xf3, 0x2d, 0xef, 0x14, 0x62, 0xc2, 0xfa, + 0x59, 0xe7, 0xff, 0x76, 0x58, 0x65, 0x36, 0x53, 0xb4, 0xda, 0x93, 0xc2, 0xa0, 0x68, 0xfd, 0xc4, + 0x42, 0xb5, 0xe3, 0xbd, 0x3c, 0xc9, 0x35, 0x35, 0x75, 0xf0, 0x43, 0xe0, 0x0c, 0xf7, 0x58, 0xb8, + 0xcb, 0xfb, 0x1f, 0xd6, 0x0e, 0x27, 0x62, 0x84, 0xee, 0x19, 0xaa, 0x68, 0x5a, 0x86, 0xb4, 0x4b, + 0x50, 0xb0, 0x09, 0x57, 0xf0, 0x9e, 0xf8, 0x0b, 0x2e, 0x97, 0x15, 0xf6, 0x98, 0x26, 0x23, 0xaf, + 0xd9, 0x47, 0xd1, 0x08, 0x1f, 0x73, 0xe9, 0xb2, 0xff, 0x71, 0x84, 0x2f, 0xa7, 0xf3, 0x74, 0xa7, + 0x01, 0xf1, 0x0c, 0xb8, 0xb7, 0x01, 0xbe, 0x4a, 0xb0, 0xef, 0x04, 0xd7, 0xd1, 0x3e, 0xc5, 0x39, + 0x4e, 0xf5, 0x96, 0xd1, 0x65, 0x75, 0xe3, 0xa5, 0x9c, 0x4d, 0xcf, 0x4a, 0x26, 0x91, 0x44, 0xae, + 0x87, 0x20, 0x6e, 0x42, 0x57, 0x18, 0x62, 0x17, 0x92, 0xb4, 0xf4, 0x60, 0x3e, 0x77, 0x95, 0x08, + 0xe5, 0x18, 0xe3, 0xe0, 0xa3, 0xa1, 0x2c, 0xf9, 0x88, 0x49, 0x6e, 0x94, 0xcf, 0x0c, 0x3a, 0x8c, + 0x98, 0x54, 0x3a, 0x87, 0x5b, 0xc6, 0xd9, 0x6f, 0x06, 0xfa, 0x91, 0x72, 0x09, 0x34, 0xdb, 0x23, + 0xa3, 0xd8, 0x84, 0x8b, 0x3f, 0x37, 0xf9, 0xde, 0x4d, 0x8a, 0xcb, 0x5c, 0xe9, 0x24, 0x31, 0x78, + 0x47, 0x24, 0xa3, 0x3c, 0xb3, 0xe3, 0xab, 0xeb, 0x4e, 0x83, 0x22, 0x90, 0x49, 0x48, 0x41, 0x9b, + 0xa9, 0xc1, 0x05, 0xf1, 0x26, 0x5a, 0x79, 0x2c, 0x25, 0x1c, 0xee, 0x7e, 0x37, 0xed, 0xc9, 0x22, + 0xc2, 0x65, 0x23, 0xb9, 0x3d, 0xd5, 0x17, 0x43, 0x0b, 0x66, 0x6f, 0x03, 0x2c, 0xf2, 0x0c, 0x00, + 0x30, 0x10, 0x63, 0xa1, 0x0f, 0x11, 0xbb, 0xad, 0x2f, 0xd9, 0x62, 0x73, 0x31, 0x10, 0xe9, 0x3b, + 0xa5, 0x95, 0xb9, 0x89, 0x19, 0xfc, 0x3d, 0xc3, 0x78, 0x1e, 0x8f, 0x26, 0x53, 0xc0, 0x1b, 0xaf, + 0xad, 0xe1, 0x3b, 0x98, 0xab, 0xe7, 0xd0, 0x09, 0x53, 0xee, 0xd8, 0x13, 0xd2, 0x08, 0x2b, 0xad, + 0x25, 0xec, 0x0b, 0xc8, 0xaf, 0xa8, 0xdf, 0x56, 0x38, 0x73, 0x16, 0x3d, 0x00, 0x5c, 0x29, 0x18, + 0xf5, 0x20, 0xe6, 0xbb, 0xe8, 0x42, 0x52, 0x20, 0xca, 0x8b, 0x7c, 0xc2, 0x2d, 0xf2, 0xe2, 0xc4, + 0x53, 0xab, 0x26, 0x36, 0xf6, 0xdf, 0x68, 0x2b, 0x3b, 0xdb, 0x4e, 0x9f, 0x25, 0xfc, 0x8c, 0xa1, + 0x7c, 0xc1, 0x93, 0x48, 0xa4, 0xf3, 0x70, 0x7d, 0x1a, 0xd0, 0xd8, 0xe2, 0x15, 0xf4, 0x85, 0xd9, + 0xb0, 0xeb, 0x1a, 0x32, 0xd3, 0xd6, 0x57, 0x56, 0x69, 0x9c, 0x0c, 0xf1, 0xd0, 0xba, 0x32, 0xf3, + 0x20, 0x2d, 0x48, 0xa3, 0x8d, 0x08, 0xe0, 0x6a, 0xd3, 0x20, 0x9e, 0x43, 0x93, 0xd4, 0x78, 0xf8, + 0x8d, 0x3d, 0x97, 0xae, 0xce, 0x52, 0x0a, 0xdd, 0x73, 0x2f, 0x44, 0x1d, 0x4d, 0x9f, 0x2f, 0x92, + 0x2d, 0x22, 0x61, 0x2e, 0x74, 0x65, 0x39, 0x85, 0x36, 0xcd, 0x95, 0xa1, 0xad, 0xad, 0x5c, 0xfb, + 0x64, 0x18, 0x9e, 0x42, 0xbf, 0x38, 0x53, 0x66, 0x31, 0xeb, 0xc4, 0xeb, 0xd6, 0xff, 0xd5, 0x02, + 0x4b, 0x3a, 0xa2, 0xbe, 0x3c, 0x45, 0xf1, 0xe8, 0xa5, 0x02, 0xdc, 0x0e, 0x93, 0xaa, 0x2b, 0x94, + 0xa5, 0x7c, 0x0f, 0xe8, 0x02, 0xfa, 0xeb, 0x34, 0xfc, 0xbe, 0x40, 0x32, 0x13, 0x3c, 0x6e, 0x82, + 0x47, 0x03, 0xa5, 0x06, 0x25, 0x02, 0xcf, 0xf1, 0x2b, 0x48, 0xd0, 0x8b, 0xf7, 0x69, 0xf5, 0x98, + 0xdb, 0x05, 0xa3, 0xa5, 0x2d, 0xe2, 0x53, 0x8d, 0x97, 0x9f, 0xbd, 0xf3, 0x6d, 0xe1, 0x81, 0x6a, + 0x2c, 0x6d, 0x88, 0x7c, 0x42, 0x9c, 0x82, 0x97, 0x3a, 0xbe, 0x37, 0x53, 0x5c, 0xff, 0x00, 0x22, + 0xad, 0x3c, 0x4b, 0xc7, 0x8c, 0x9c, 0x88, 0x51, 0xa6, 0xdb, 0x7e, 0xc4, 0xf8, 0x34, 0x00, 0x11, + 0xaa, 0x65, 0xab, 0x6a, 0xc8, 0x41, 0xbb, 0xe4, 0xa2, 0x3a, 0xef, 0x2f, 0xc1, 0x5c, 0xb5, 0xc8, + 0xa1, 0xbd, 0x06, 0x81, 0x96, 0xb2, 0x20, 0xa1, 0x87, 0xa7, 0x76, 0x8e, 0x50, 0xb1, 0x7f, 0xd2, + 0x1c, 0x1c, 0x59, 0x9b, 0x7c, 0xbb, 0xf0, 0x79, 0x89, 0x90, 0x78, 0x8b, 0x43, 0x26, 0x30, 0x20, + 0xaa, 0xb5, 0x1a, 0x24, 0x90, 0xeb, 0x9a, 0x2e, 0x1d, 0x51, 0xb1, 0x27, 0x9b, 0xce, 0x83, 0x11, + 0x0b, 0x07, 0x13, 0x73, 0x07, 0x96, 0x65, 0x49, 0x9b, 0xf8, 0x6c, 0x4d, 0xb8, 0xe3, 0x15, 0x76, + 0x02, 0x89, 0x36, 0x60, 0xe8, 0xa1, 0x6b, 0x53, 0xfa, 0x2a, 0xcc, 0x9a, 0x18, 0xf1, 0x19, 0x2d, + 0x36, 0x75, 0x74, 0x84, 0xc2, 0x70, 0x1d, 0x02, 0xe8, 0x34, 0xa6, 0xbf, 0xb0, 0xbb, 0xda, 0xeb, + 0x9b, 0x6f, 0x37, 0x50, 0x97, 0x78, 0xe9, 0x6f, 0xba, 0xd2, 0x66, 0xf5, 0x50, 0xfa, 0x43, 0x04, + 0xbf, 0xfb, 0x8d, 0x42, 0x76, 0xe8, 0xcd, 0xf6, 0xa4, 0xdd, 0xba, 0xf9, 0xa4, 0x6c, 0x31, 0xba, + 0x9c, 0x46, 0xc3, 0x9a, 0x5d, 0xd4, 0xcc, 0xfe, 0x83, 0xa5, 0xc7, 0x12, 0x17, 0x6d, 0x23, 0x11, + 0xfb, 0xe0, 0x93, 0x7d, 0x71, 0xfe, 0x6c, 0x37, 0x6b, 0x8d, 0x8f, 0xc5, 0x07, 0x74, 0x4f, 0x0c, + 0xdf, 0x25, 0x79, 0x13, 0x23, 0xeb, 0x72, 0x34, 0xaf, 0x8f, 0x72, 0xfa, 0xf5, 0xe2, 0x02, 0x00, + 0xa2, 0xb9, 0x34, 0x62, 0xfc, 0x34, 0x38, 0x21, 0x1d, 0xb0, 0xd2, 0x24, 0x8e, 0xbf, 0x49, 0xef, + 0x74, 0xb0, 0xc9, 0xfc, 0x07, 0x47, 0xc0, 0x67, 0x39, 0x5c, 0x32, 0xce, 0x58, 0xc2, 0x67, 0xec, + 0x8c, 0x0f, 0xf9, 0xec, 0x37, 0x55, 0x20, 0x58, 0xb7, 0xcc, 0xd4, 0x7b, 0xb2, 0x7b, 0x2c, 0xde, + 0xa0, 0x36, 0xc4, 0x04, 0xda, 0xe7, 0x7c, 0x27, 0x46, 0x53, 0x3c, 0x1c, 0xb1, 0xc6, 0xf2, 0x64, + 0xe0, 0x3c, 0x8c, 0x1a, 0xcf, 0x75, 0xcf, 0x51, 0xc9, 0xb4, 0xf2, 0xb9, 0x2a, 0xd5, 0xb8, 0xa9, + 0xcc, 0x08, 0x1f, 0xa6, 0x06, 0x4e, 0xe9, 0x20, 0x9a, 0x7f, 0x02, 0x34, 0x3f, 0xeb, 0xe9, 0x37, + 0x17, 0x33, 0x1b, 0xd2, 0xb0, 0xc9, 0x3b, 0x01, 0x31, 0x79, 0x33, 0xa5, 0x8e, 0x1c, 0x02, 0xc4, + 0xce, 0xd8, 0x66, 0xe5, 0xa3, 0x0e, 0x2f, 0x1d, 0xed, 0x70, 0x4e, 0x9c, 0xc5, 0x12, 0x22, 0x1b, + 0xd7, 0x7f, 0x84, 0x66, 0xd4, 0x69, 0xab, 0x96, 0x76, 0x3d, 0x5f, 0x6c, 0x1e, 0x6c, 0x26, 0xe8, + 0x49, 0xd0, 0xe4, 0x23, 0x3a, 0x88, 0x17, 0xf0, 0x20, 0x9e, 0x50, 0x23, 0x31, 0x5a, 0x86, 0xfc, + 0xc5, 0x96, 0xd7, 0x06, 0xbe, 0x81, 0x8c, 0x5a, 0xf8, 0x73, 0xd2, 0xb0, 0x54, 0x42, 0x9b, 0x21, + 0xc6, 0x0d, 0xad, 0x1f, 0xd8, 0x37, 0xc7, 0x6c, 0x3f, 0xfb, 0x76, 0xb2, 0x92, 0x32, 0x4e, 0xa5, + 0xff, 0x4e, 0xa2, 0xe6, 0xa0, 0x56, 0xd8, 0x06, 0x37, 0xd2, 0x28, 0x7e, 0xfe, 0x36, 0xb8, 0x0b, + 0x09, 0xd6, 0x6d, 0x30, 0x38, 0xba, 0xa0, 0x18, 0x7c, 0xad, 0x48, 0x22, 0xfb, 0xc5, 0x90, 0x7f, + 0x74, 0xcf, 0x83, 0x53, 0x0c, 0xd8, 0xfa, 0xd0, 0x4e, 0x81, 0xcd, 0xc5, 0x41, 0xe4, 0x1a, 0xab, + 0x33, 0x5e, 0x5a, 0xd7, 0xfb, 0x95, 0x45, 0xf2, 0x5e, 0x1a, 0x17, 0xbf, 0x72, 0x72, 0x50, 0xcd, + 0xb2, 0x3b, 0x46, 0x09, 0xd3, 0xed, 0x1d, 0x44, 0x62, 0xf6, 0x1e, 0x80, 0xb4, 0xfa, 0x8d, 0xe2, + 0xd1, 0xa1, 0xe8, 0x5b, 0x16, 0xf1, 0x6f, 0x08, 0x7d, 0x11, 0x26, 0xf6, 0xb1, 0x6c, 0xf6, 0x17, + 0xad, 0xd6, 0x6a, 0x97, 0xea, 0xc1, 0xbb, 0x42, 0xa4, 0x2c, 0x1d, 0x93, 0xb9, 0x38, 0xce, 0xaf, + 0xe4, 0xc7, 0x00, 0xf8, 0x7e, 0x33, 0x1f, 0x27, 0xf1, 0x5d, 0x2d, 0xea, 0x49, 0x02, 0x42, 0x4a, + 0xdf, 0xce, 0xbc, 0x65, 0x3d, 0x58, 0xb5, 0x12, 0xe4, 0x85, 0xbc, 0xe1, 0xf4, 0x0f, 0x73, 0x2c, + 0xa1, 0x7a, 0x82, 0x7b, 0x40, 0x99, 0x53, 0x8c, 0xc5, 0xb4, 0xa6, 0x3b, 0x5b, 0x98, 0xb0, 0x67, + 0x24, 0x94, 0x3e, 0x08, 0x9b, 0x0c, 0x06, 0x6e, 0x58, 0x0e, 0xe7, 0x52, 0xcf, 0x82, 0x66, 0x06, + 0xde, 0x08, 0x40, 0x7b, 0x75, 0xd5, 0xed, 0xc6, 0x42, 0xf6, 0x7a, 0xdc, 0xd3, 0x56, 0x8f, 0xcb, + 0xae, 0x1f, 0xb9, 0x87, 0x5e, 0x48, 0x93, 0xe4, 0xdc, 0x7b, 0x01, 0x3c, 0x31, 0x63, 0xa2, 0x62, + 0x78, 0xeb, 0x05, 0xd1, 0xed, 0x6f, 0xeb, 0x75, 0x07, 0x14, 0x29, 0x82, 0x41, 0x69, 0xfe, 0x6f, + 0x22, 0xf2, 0x86, 0x17, 0xdd, 0xbe, 0x53, 0xca, 0xf5, 0xc2, 0x9f, 0x83, 0x06, 0x70, 0x92, 0x5f, + 0x0a, 0xf8, 0x37, 0x9b, 0x14, 0xd7, 0xda, 0x5d, 0x8a, 0x77, 0x57, 0x8c, 0x3e, 0x5a, 0xf5, 0x13, + 0xa9, 0xdb, 0x55, 0xec, 0x38, 0x1f, 0x68, 0x34, 0x91, 0x72, 0xc0, 0x55, 0x60, 0xad, 0xec, 0x09, + 0xeb, 0x31, 0x6e, 0x3c, 0x63, 0x2d, 0x0a, 0x13, 0xd3, 0x57, 0x8c, 0x87, 0xb5, 0xf1, 0xcc, 0x7e, + 0x21, 0x89, 0xc7, 0x6e, 0x71, 0x09, 0x26, 0xc8, 0x6d, 0xa3, 0xd9, 0x28, 0x1b, 0xff, 0x7d, 0x54, + 0xfe, 0xd1, 0x0d, 0x43, 0x0b, 0x21, 0xa7, 0x8a, 0x67, 0xe4, 0xbd, 0xc1, 0x4b, 0x02, 0x06, 0xed, + 0x04, 0xcf, 0xc2, 0xdb, 0x2d, 0x37, 0x3d, 0x21, 0xcb, 0x64, 0x1f, 0xcb, 0x97, 0xe1, 0x25, 0x51, + 0xd1, 0xaa, 0xca, 0xc8, 0x64, 0x4b, 0x07, 0x3c, 0xc7, 0x2a, 0xd1, 0xcd, 0x6b, 0xa6, 0x6a, 0xbf, + 0xe2, 0x4a, 0xe0, 0xbb, 0x1e, 0xe6, 0xf2, 0xec, 0x70, 0xa4, 0xf2, 0xfb, 0xf9, 0x11, 0x5b, 0xfa, + 0xe4, 0xdf, 0x14, 0xc0, 0xd1, 0x4a, 0xaf, 0x5d, 0x24, 0x8f, 0xf9, 0xd6, 0xac, 0xcc, 0xc0, 0xfe, + 0x93, 0xf3, 0xc1, 0x89, 0xcc, 0x0e, 0x24, 0xa2, 0x18, 0x5d, 0x5c, 0xe8, 0xfa, 0x5e, 0x75, 0x3d, + 0x52, 0x5c, 0x63, 0x35, 0x6b, 0xb3, 0xfc, 0xc2, 0xa6, 0x6e, 0x73, 0xcd, 0x3d, 0x52, 0xa7, 0x2b, + 0xb2, 0x78, 0x26, 0xc0, 0x94, 0x07, 0xc1, 0x30, 0xc0, 0xf4, 0xa0, 0x43, 0x5e, 0x31, 0xcc, 0x9e, + 0x23, 0xa9, 0x83, 0x6c, 0x4d, 0x6b, 0x7e, 0xe3, 0x87, 0x74, 0xa8, 0x86, 0x9e, 0x3d, 0xb2, 0xbf, + 0xd7, 0x95, 0xc9, 0x4e, 0x1e, 0xb2, 0x87, 0x9f, 0x2c, 0x51, 0xd8, 0x18, 0xcd, 0xec, 0xd4, 0xee, + 0x7c, 0x89, 0xfe, 0x36, 0x06, 0x38, 0xf8, 0x4c, 0x62, 0xbd, 0x9f, 0xb1, 0xe6, 0xb5, 0xee, 0x07, + 0xb2, 0xcd, 0x6e, 0x00, 0x7e, 0xde, 0x6e, 0x89, 0xb8, 0x74, 0xe5, 0x3d, 0x11, 0x9c, 0x66, 0xac, + 0x59, 0x17, 0x47, 0x62, 0x90, 0x84, 0xdd, 0x74, 0x70, 0x5b, 0xcf, 0x73, 0xc2, 0xc8, 0x38, 0x07, + 0xf6, 0xf6, 0x77, 0x0b, 0x66, 0xb7, 0x7e, 0xdf, 0x6b, 0x52, 0xd1, 0x3a, 0xf7, 0x8e, 0x6b, 0x92, + 0x8d, 0x68, 0x63, 0xb5, 0x00, 0x2e, 0x1c, 0x27, 0xc6, 0xf5, 0xbc, 0xb8, 0xf4, 0xc3, 0x9d, 0x22, + 0xbd, 0xe2, 0xfd, 0xc2, 0xd4, 0x56, 0x7b, 0x2a, 0x86, 0x0f, 0x12, 0xd2, 0xab, 0xdb, 0xf5, 0x30, + 0x92, 0x11, 0x84, 0x38, 0xb2, 0x9c, 0xab, 0x49, 0x30, 0x93, 0xef, 0x83, 0x82, 0xeb, 0x2e, 0xad, + 0x19, 0xd8, 0xa8, 0xbc, 0x7f, 0xca, 0xc4, 0x17, 0xac, 0x97, 0x01, 0xa8, 0x78, 0x9b, 0x82, 0x90, + 0x0e, 0x39, 0x05, 0xb1, 0x8d, 0x6d, 0xcd, 0x95, 0x39, 0xbf, 0xba, 0x15, 0x65, 0xd7, 0x02, 0x40, + 0x9f, 0xd2, 0x4d, 0xbd, 0xa0, 0xc1, 0xf6, 0xfc, 0x1f, 0x67, 0x2e, 0x82, 0xc6, 0x1f, 0x10, 0xd9, + 0x4f, 0xde, 0x6f, 0xb4, 0x28, 0xdf, 0x0b, 0x66, 0xd7, 0x12, 0x7a, 0xf8, 0x82, 0x2d, 0xd5, 0xe6, + 0x32, 0x0c, 0x64, 0x59, 0x75, 0x74, 0x18, 0x61, 0xda, 0xd4, 0x59, 0x3b, 0xfd, 0xae, 0x43, 0xa2, + 0x7f, 0x80, 0x78, 0xcf, 0x5e, 0x80, 0x6f, 0x31, 0xa0, 0x98, 0xae, 0x36, 0x1e, 0x95, 0x42, 0xa2, + 0x59, 0x19, 0xcd, 0x10, 0xe4, 0xbd, 0xab, 0x0b, 0xc0, 0x47, 0xa0, 0xe6, 0xed, 0x09, 0xc8, 0x48, + 0x56, 0x66, 0xc7, 0xe4, 0x0d, 0x37, 0x0c, 0xb6, 0x41, 0xdb, 0x3a, 0x36, 0xb4, 0x80, 0x56, 0x6e, + 0xb5, 0x23, 0x92, 0x24, 0xf6, 0x7e, 0xe8, 0xaf, 0xd5, 0x61, 0xe8, 0xa4, 0x40, 0x76, 0xf8, 0xc7, + 0x1d, 0xbd, 0xbc, 0xc7, 0xef, 0x33, 0xb7, 0xe6, 0x52, 0x79, 0x34, 0xb3, 0x10, 0x1c, 0x04, 0xbd, + 0x73, 0x57, 0x5a, 0xfa, 0xf9, 0xb4, 0x80, 0xa2, 0x31, 0x0d, 0x82, 0xfc, 0x81, 0x65, 0x63, 0xc3, + 0x46, 0x57, 0x56, 0x98, 0xbe, 0x14, 0x1b, 0x4a, 0x40, 0xe4, 0x08, 0x60, 0x1b, 0xad, 0xa0, 0xeb, + 0x2a, 0x3f, 0x31, 0x31, 0xad, 0x6e, 0x6f, 0x91, 0x3b, 0xba, 0xd7, 0x84, 0x61, 0x32, 0x20, 0xa5, + 0x17, 0xc9, 0xc6, 0x33, 0x51, 0xd0, 0x79, 0x12, 0x50, 0x80, 0xef, 0x05, 0x80, 0xbf, 0xbc, 0xfd, + 0x5c, 0xe9, 0x6b, 0x8d, 0x3b, 0x20, 0xa4, 0x84, 0x84, 0x16, 0x58, 0xfb, 0x72, 0x25, 0x9f, 0x72, + 0x8b, 0x95, 0xf0, 0xc2, 0x4b, 0x4e, 0x1f, 0x54, 0x00, 0x25, 0xd4, 0x18, 0x3e, 0xdf, 0x8a, 0x70, + 0x3e, 0x81, 0xca, 0xd8, 0xe4, 0x26, 0x1a, 0x47, 0xad, 0xf2, 0xbf, 0x57, 0x84, 0x59, 0x53, 0x6c, + 0xf7, 0xbf, 0x74, 0x91, 0x85, 0xc5, 0x56, 0x28, 0xa5, 0xc8, 0x39, 0x46, 0x7b, 0x44, 0xef, 0x08, + 0x6e, 0xaf, 0xea, 0x78, 0x56, 0x67, 0x16, 0xce, 0x99, 0x53, 0xc3, 0xfa, 0x3e, 0x74, 0x86, 0xaf, + 0xc7, 0x3a, 0x91, 0x04, 0xae, 0xc3, 0xe5, 0x13, 0x08, 0x63, 0x30, 0x2f, 0x59, 0xfa, 0x6b, 0x2f, + 0x8b, 0xca, 0x1b, 0x62, 0x4a, 0x29, 0x67, 0x94, 0x21, 0x2a, 0x39, 0x92, 0xe7, 0x9e, 0x52, 0x86, + 0x78, 0xd6, 0xdb, 0xb0, 0x3a, 0x7c, 0x9b, 0x56, 0x5a, 0x9e, 0xb6, 0x9f, 0xd4, 0xfc, 0x4a, 0x40, + 0xe4, 0x9f, 0x10, 0xfb, 0x22, 0x42, 0x69, 0x55, 0xf5, 0x70, 0xcd, 0xb4, 0x43, 0x25, 0xb7, 0x1f, + 0x4a, 0xdd, 0x9b, 0x99, 0x8e, 0xca, 0x7c, 0x8b, 0x3a, 0x9b, 0xbc, 0x91, 0x54, 0x9b, 0x09, 0x5a, + 0x34, 0xab, 0xb6, 0x06, 0x37, 0xa0, 0xd5, 0x4a, 0x16, 0x3c, 0x73, 0xca, 0x4e, 0x80, 0xfd, 0x4c, + 0x1d, 0x9a, 0xc4, 0x82, 0xfb, 0xe5, 0x3d, 0x90, 0x79, 0x21, 0x6e, 0x4a, 0xdc, 0xfa, 0x14, 0x03, + 0x4f, 0xc6, 0x36, 0xe4, 0x80, 0x2c, 0x17, 0x70, 0xd0, 0xf5, 0x12, 0xd0, 0x86, 0x68, 0x24, 0x19, + 0x97, 0xf4, 0xce, 0x1c, 0xd3, 0xdd, 0x77, 0xfc, 0x74, 0x2a, 0x7e, 0x63, 0xa8, 0x86, 0x5a, 0xc7, + 0x61, 0x13, 0x36, 0x7c, 0xc6, 0x1a, 0xbe, 0xeb, 0xd7, 0x5a, 0x11, 0xea, 0x1d, 0x27, 0xa2, 0x63, + 0xa6, 0xfb, 0x9f, 0xe0, 0xce, 0xc1, 0xc7, 0xe1, 0xa9, 0x43, 0xc7, 0x42, 0x38, 0xeb, 0x36, 0x05, + 0x9f, 0xc6, 0x3e, 0x4a, 0x31, 0x7d, 0x37, 0xef, 0xd6, 0xf8, 0xb7, 0x7e, 0xdd, 0x08, 0xaf, 0x47, + 0x03, 0xdb, 0xea, 0x7e, 0xb4, 0x4d, 0xe3, 0x4c, 0x5c, 0x56, 0x95, 0x3a, 0x07, 0x78, 0x9a, 0x2c, + 0x8a, 0x82, 0x30, 0x2b, 0x5a, 0x88, 0x90, 0xa3, 0xe6, 0x5f, 0x08, 0x61, 0xa0, 0x43, 0x97, 0xc6, + 0x1c, 0xb0, 0x99, 0x65, 0x58, 0x0f, 0x39, 0xe1, 0x64, 0xa4, 0x73, 0x14, 0x3e, 0x6d, 0x73, 0x5c, + 0x48, 0xc4, 0xf7, 0x9c, 0x61, 0x40, 0x4a, 0x33, 0x1d, 0x30, 0xdb, 0xb4, 0xc5, 0x75, 0xf6, 0xd8, + 0x4b, 0x88, 0x5c, 0x3a, 0xef, 0x80, 0xd9, 0x41, 0xd3, 0x64, 0xe6, 0x8e, 0xfc, 0x3c, 0x14, 0x3d, + 0x6a, 0x02, 0xb5, 0x3a, 0xac, 0x87, 0x15, 0xfd, 0x44, 0xac, 0x7f, 0xc9, 0xda, 0xf5, 0xe3, 0xa8, + 0xd9, 0x55, 0x4c, 0xba, 0x6d, 0xab, 0xb6, 0xde, 0x1e, 0xf5, 0xc4, 0xc8, 0xa1, 0xf3, 0x2b, 0x3d, + 0x68, 0x4d, 0xa4, 0x81, 0xa4, 0xa4, 0xc7, 0xb1, 0x33, 0x52, 0xe7, 0xad, 0x13, 0x8e, 0x24, 0x83, + 0x8c, 0x3a, 0x0d, 0xcf, 0x10, 0xba, 0x27, 0x86, 0xe7, 0xf0, 0x29, 0x40, 0xdd, 0x84, 0x0a, 0x4b, + 0x67, 0x89, 0x99, 0xea, 0x04, 0xab, 0xb9, 0xc9, 0x8a, 0xab, 0xa3, 0x6d, 0x7f, 0x8b, 0xae, 0x22, + 0x62, 0x4a, 0xa6, 0x40, 0xa8, 0x39, 0x38, 0xaa, 0x06, 0xce, 0xba, 0xea, 0x04, 0x59, 0x73, 0x0e, + 0x0b, 0x73, 0x37, 0x7b, 0xea, 0xfe, 0x77, 0xfd, 0xf9, 0xf3, 0x97, 0x3f, 0xd7, 0x76, 0xd2, 0x9a, + 0xf4, 0x1d, 0x14, 0x39, 0xa5, 0x21, 0x23, 0x0c, 0x5f, 0x45, 0x43, 0xe1, 0xbb, 0xae, 0x34, 0xe4, + 0xed, 0xf3, 0x90, 0x1e, 0x72, 0x23, 0xfa, 0xf2, 0x6a, 0x5f, 0xa3, 0xbc, 0x27, 0x16, 0x6b, 0x7b, + 0xd0, 0x64, 0xef, 0xd3, 0x0c, 0xa3, 0xeb, 0x8f, 0xad, 0x02, 0x5b, 0x24, 0xcf, 0xa5, 0xf3, 0xf5, + 0xab, 0xb0, 0x9f, 0xd4, 0x79, 0x49, 0xc5, 0x94, 0x03, 0xdd, 0x60, 0x5c, 0x03, 0x02, 0xd1, 0xb7, + 0x61, 0x33, 0xa2, 0xc6, 0x5f, 0x11, 0x61, 0x5b, 0x7c, 0x2b, 0x26, 0x87, 0xd4, 0x1c, 0x65, 0x1b, + 0x09, 0xa4, 0xf9, 0x43, 0xec, 0xe3, 0x36, 0x7e, 0x21, 0x16, 0x59, 0xfe, 0x28, 0xf9, 0x67, 0x91, + 0x72, 0x01, 0x7a, 0x26, 0xe0, 0x9d, 0xbc, 0xfc, 0x4e, 0xdb, 0x96, 0x1b, 0x03, 0x13, 0x4d, 0xd4, + 0x5f, 0x9a, 0x96, 0x3a, 0xe4, 0x18, 0x6f, 0xc7, 0x13, 0xfe, 0x4f, 0xad, 0x18, 0x02, 0xd9, 0x63, + 0x2a, 0x9f, 0xe8, 0xe2, 0x0f, 0xca, 0x02, 0x9c, 0xae, 0x7c, 0xe4, 0x42, 0xf1, 0x1b, 0x2e, 0x56, + 0xaf, 0xa3, 0x91, 0x4a, 0x94, 0x08, 0x7b, 0xb9, 0xf7, 0x40, 0x18, 0x60, 0xef, 0x80, 0x23, 0x8e, + 0x9a, 0x5c, 0x2d, 0xcf, 0x1a, 0xae, 0x6f, 0x30, 0xcb, 0x47, 0x78, 0x4d, 0x7b, 0x86, 0x3d, 0x13, + 0x25, 0xad, 0x23, 0xe8, 0xb8, 0xef, 0x9a, 0x7e, 0xf7, 0x8e, 0xf7, 0xf5, 0x54, 0xe3, 0xa4, 0x02, + 0x2a, 0x3c, 0x9a, 0xca, 0x04, 0xea, 0x9e, 0x00, 0xef, 0x03, 0x65, 0xfc, 0xd3, 0xbf, 0xd1, 0x42, + 0xd9, 0xe8, 0x55, 0xfd, 0x0b, 0x81, 0xb6, 0xab, 0xd5, 0xc7, 0x0d, 0x91, 0xeb, 0x19, 0x6b, 0xe2, + 0x9d, 0x38, 0x53, 0x4a, 0x83, 0xbb, 0xc3, 0xd0, 0x58, 0x65, 0x23, 0x93, 0x05, 0xe6, 0xcd, 0xe1, + 0x7b, 0x91, 0x1d, 0x2c, 0xcf, 0x35, 0x66, 0x82, 0x75, 0x0e, 0x86, 0x95, 0x89, 0x8d, 0x48, 0xf4, + 0x03, 0x93, 0x6b, 0x23, 0x6b, 0xa3, 0xd7, 0x02, 0x60, 0xa7, 0x43, 0x4c, 0x26, 0xcb, 0xe9, 0xdb, + 0x09, 0x41, 0x50, 0xe4, 0x74, 0x97, 0x06, 0xed, 0x94, 0x39, 0x9d, 0xc7, 0x1a, 0x06, 0xb8, 0x77, + 0x60, 0x5f, 0xcc, 0x70, 0x3a, 0x9e, 0x95, 0x89, 0xd2, 0x4d, 0x6d, 0x60, 0xa2, 0x4a, 0x8d, 0x1f, + 0x91, 0x07, 0x3f, 0xc7, 0xf8, 0xcf, 0x06, 0xea, 0x3a, 0xd6, 0xfd, 0xd8, 0x47, 0xce, 0x04, 0x8e, + 0x1b, 0x0f, 0x4f, 0x9c, 0x75, 0x82, 0x92, 0xee, 0x9b, 0xb5, 0x74, 0x2e, 0x10, 0xaf, 0x05, 0x2b, + 0x3c, 0x34, 0x88, 0x8f, 0x8c, 0x8a, 0x55, 0x22, 0xd3, 0x1d, 0x5b, 0xf4, 0x94, 0xbc, 0x61, 0x20, + 0x3e, 0xe5, 0x94, 0xf0, 0x66, 0x87, 0x90, 0x99, 0x62, 0x43, 0x16, 0x3c, 0xad, 0x0e, 0x18, 0x7d, + 0x20, 0x65, 0x56, 0x2b, 0x49, 0x9a, 0xb1, 0x00, 0x79, 0x6f, 0x51, 0x75, 0xf1, 0xe2, 0xb5, 0x39, + 0x5e, 0xed, 0x6b, 0x1c, 0x63, 0xc1, 0x29, 0xe8, 0x39, 0x13, 0x41, 0x22, 0x8f, 0x7a, 0x2d, 0xd3, + 0xf2, 0x4e, 0x57, 0x5a, 0xe7, 0x88, 0x8a, 0xc9, 0xe2, 0x31, 0xf1, 0x15, 0xca, 0x3d, 0x59, 0x54, + 0xa5, 0x11, 0x64, 0x1b, 0x0e, 0xc0, 0x73, 0x6e, 0xc6, 0xe7, 0xad, 0x71, 0x16, 0xd6, 0xa8, 0x1a, + 0xfd, 0xe1, 0xf2, 0x94, 0x19, 0xf3, 0x5b, 0x70, 0x69, 0xa8, 0xcf, 0x2f, 0x19, 0xe4, 0x35, 0xcf, + 0xa0, 0x17, 0xc3, 0x78, 0x74, 0xc4, 0xec, 0x19, 0x6e, 0xc6, 0x33, 0xbe, 0x0e, 0x61, 0x05, 0x2d, + 0x92, 0x03, 0xc1, 0x95, 0xd4, 0xb2, 0x45, 0xd7, 0x34, 0x01, 0xc6, 0x8b, 0x60, 0x1c, 0x43, 0x07, + 0xfe, 0xc0, 0x54, 0x51, 0x93, 0x77, 0x86, 0x85, 0xf9, 0x47, 0x06, 0x84, 0xb1, 0x03, 0x09, 0x81, + 0x5d, 0x3b, 0x2d, 0x97, 0x55, 0x00, 0xde, 0x11, 0xeb, 0x3f, 0x7d, 0x7a, 0xa5, 0xd7, 0x09, 0x79, + 0x55, 0x23, 0x6c, 0xbe, 0x95, 0x07, 0xc5, 0x23, 0xca, 0x9e, 0x2f, 0x17, 0xe6, 0x16, 0xdf, 0xe7, + 0xd3, 0x5c, 0x8e, 0xfb, 0x10, 0xf2, 0x9a, 0x77, 0x96, 0xd4, 0x1f, 0xdc, 0xa3, 0xbf, 0xb6, 0x6f, + 0x56, 0x34, 0x72, 0x95, 0xf2, 0x3e, 0x12, 0x00, 0x2b, 0xe5, 0xb5, 0x64, 0xdf, 0xfb, 0xca, 0x76, + 0x16, 0x12, 0x53, 0x45, 0x75, 0x88, 0xc2, 0xa4, 0xae, 0xa3, 0xf1, 0x71, 0xc8, 0x38, 0x87, 0xc9, + 0xc0, 0x75, 0x97, 0x90, 0x67, 0x5a, 0xd0, 0xdc, 0x0c, 0x76, 0x30, 0xef, 0x3e, 0x01, 0x9c, 0x30, + 0x53, 0x7c, 0x9d, 0x66, 0x89, 0x82, 0x38, 0x80, 0x74, 0xf8, 0xa8, 0x12, 0x5b, 0xaa, 0x54, 0x75, + 0xe7, 0xc5, 0xad, 0x75, 0xae, 0x36, 0x22, 0x2a, 0xa5, 0x04, 0x39, 0x8d, 0x37, 0xd2, 0x27, 0xf0, + 0x5d, 0x6f, 0x2c, 0xea, 0x52, 0xe7, 0xfb, 0x13, 0x7c, 0xee, 0xe9, 0xe0, 0xec, 0x2d, 0x26, 0xa6, + 0x07, 0xb0, 0x19, 0x9e, 0x8d, 0x23, 0x8f, 0xfd, 0x0a, 0x2d, 0xac, 0xf0, 0x51, 0xc5, 0x99, 0x4f, + 0xaa, 0x34, 0x86, 0xba, 0x89, 0x19, 0x18, 0x70, 0xb1, 0xca, 0xc1, 0x65, 0xec, 0x7f, 0xf5, 0x60, + 0xc8, 0x70, 0x3b, 0xf8, 0x63, 0xd5, 0x04, 0xd1, 0xf8, 0x04, 0x1d, 0xf9, 0x52, 0xcd, 0x78, 0x0c, + 0x2f, 0x4b, 0xa8, 0x3c, 0xcb, 0xdb, 0xc4, 0x55, 0xc4, 0xc7, 0x2d, 0xf1, 0xf0, 0x0d, 0xdd, 0xd1, + 0xe0, 0x38, 0xc1, 0x4e, 0x1c, 0x4b, 0x17, 0xee, 0x4a, 0x39, 0xae, 0x6e, 0xb9, 0x0e, 0x25, 0x79, + 0x94, 0xa0, 0x73, 0xc9, 0xee, 0xd9, 0x93, 0x82, 0xa2, 0x56, 0x20, 0xc4, 0x3a, 0x73, 0x35, 0x4a, + 0xc4, 0x3b, 0xcb, 0x81, 0x80, 0x53, 0x47, 0x25, 0x3b, 0x60, 0xbe, 0x2f, 0x9d, 0x89, 0x9a, 0x7e, + 0x39, 0xcf, 0xfd, 0xec, 0x50, 0x7f, 0xf6, 0x86, 0x85, 0xc8, 0x30, 0x8b, 0x1c, 0x1c, 0xab, 0x3c, + 0xce, 0x93, 0x66, 0x08, 0x6c, 0x95, 0x5d, 0x84, 0x3e, 0x8e, 0x8c, 0x6b, 0xa3, 0xea, 0xf4, 0x80, + 0xdf, 0xc5, 0x87, 0xd7, 0xef, 0xb7, 0xfc, 0xf1, 0x69, 0x6f, 0x4f, 0x48, 0xd3, 0x4b, 0x0b, 0x16, + 0x99, 0x53, 0xf2, 0x47, 0x81, 0x0e, 0x4d, 0x1f, 0x64, 0x28, 0xce, 0x8c, 0xd9, 0x57, 0x55, 0xaa, + 0xc4, 0x9b, 0x02, 0x27, 0xc2, 0x77, 0x12, 0x28, 0x8f, 0x23, 0x22, 0xe2, 0x27, 0xe6, 0x03, 0x37, + 0x6a, 0xdd, 0x00, 0xbd, 0xbf, 0x04, 0x99, 0x94, 0xce, 0x78, 0x16, 0x4a, 0xc9, 0xe2, 0xd4, 0x02, + 0x25, 0x93, 0xfd, 0xc9, 0x79, 0x22, 0x19, 0xf1, 0xdd, 0x9b, 0xd9, 0x30, 0xc8, 0xa7, 0x8d, 0x91, + 0x82, 0x3a, 0x37, 0xd6, 0xb6, 0x85, 0x83, 0x1e, 0x9e, 0x0b, 0x01, 0xb4, 0x15, 0x65, 0x76, 0x92, + 0xe1, 0x12, 0x09, 0x39, 0x78, 0x9c, 0x47, 0xdc, 0x3d, 0x29, 0x48, 0x05, 0x09, 0x2e, 0xf8, 0xd4, + 0xab, 0x5f, 0x7c, 0x66, 0x35, 0x6f, 0x96, 0x24, 0x24, 0x8e, 0x30, 0x7d, 0xc4, 0x0f, 0xdd, 0x13, + 0x7f, 0xbf, 0x75, 0x11, 0x7a, 0x4b, 0x66, 0xf4, 0x7d, 0x65, 0xc2, 0x0c, 0xc3, 0x4a, 0x64, 0x8a, + 0xa1, 0xf8, 0xf2, 0xc4, 0x88, 0x22, 0x44, 0x97, 0x78, 0x8d, 0x9c, 0xf2, 0xcc, 0xf3, 0xde, 0xcc, + 0x17, 0x42, 0xc0, 0x4c, 0x76, 0x4c, 0x80, 0xe9, 0x2d, 0x67, 0xc1, 0xd0, 0x99, 0x64, 0xa6, 0x53, + 0xab, 0x3a, 0x79, 0xfd, 0x4e, 0xed, 0x0f, 0x8e, 0xbe, 0x99, 0xd2, 0x61, 0xd7, 0xd1, 0x1e, 0x75, + 0x40, 0xec, 0xd8, 0x2f, 0xf5, 0x0a, 0xe3, 0x79, 0xdb, 0xa0, 0x0f, 0x6e, 0xd9, 0x1b, 0xe8, 0x25, + 0xd4, 0xcc, 0x73, 0xbd, 0xf4, 0xf5, 0x64, 0xd6, 0xe8, 0xf9, 0xb1, 0x9d, 0x14, 0xc8, 0x86, 0x22, + 0xe4, 0xa0, 0xab, 0x54, 0x76, 0x7a, 0xe3, 0x0c, 0xb0, 0x57, 0x0e, 0x8d, 0x03, 0x7e, 0x2b, 0x16, + 0xbf, 0x71, 0x94, 0x19, 0xb0, 0xdb, 0x62, 0x68, 0x10, 0xcc, 0x01, 0xec, 0x0f, 0x27, 0x2b, 0x34, + 0xbf, 0xb1, 0x46, 0x28, 0x31, 0x89, 0x34, 0xe0, 0xc9, 0x08, 0x5c, 0x78, 0x5a, 0xf6, 0xd6, 0xd9, + 0xec, 0x62, 0x99, 0xf5, 0x4c, 0x5d, 0x2e, 0xbd, 0x69, 0x13, 0x85, 0x87, 0xea, 0x56, 0xce, 0x68, + 0xdb, 0x3b, 0x53, 0xab, 0xa6, 0xd3, 0xff, 0x84, 0x98, 0xbb, 0x90, 0xba, 0xa0, 0x01, 0x68, 0x03, + 0x88, 0xf6, 0x34, 0x10, 0xf4, 0x42, 0x7a, 0xa1, 0x7b, 0x21, 0x7a, 0x4c, 0x79, 0xce, 0xf8, 0x7e, + 0x55, 0x85, 0x8c, 0x83, 0x97, 0x61, 0x1b, 0xca, 0xe4, 0x08, 0x07, 0x59, 0x85, 0x06, 0xa2, 0x89, + 0xd2, 0x6e, 0x1a, 0x8f, 0x8b, 0x03, 0xac, 0x72, 0x56, 0x52, 0x42, 0xc6, 0x95, 0xa6, 0x29, 0x3e, + 0xfb, 0x94, 0xe0, 0x42, 0xe0, 0xbf, 0x00, 0xd5, 0xf0, 0x2c, 0x5a, 0x73, 0x4b, 0x03, 0x8a, 0x50, + 0xfa, 0x1b, 0x96, 0x36, 0x8c, 0xdf, 0x31, 0x3e, 0xaf, 0x38, 0x94, 0x61, 0x55, 0x1e, 0x24, 0xb3, + 0x81, 0x19, 0xb2, 0x5b, 0x90, 0x98, 0x2c, 0xe6, 0x97, 0x16, 0x23, 0x4a, 0xc8, 0x2d, 0x97, 0xaa, + 0x32, 0x92, 0xd1, 0xc0, 0xa9, 0x43, 0x60, 0x47, 0xb3, 0x4a, 0x56, 0xfe, 0xca, 0x73, 0x0c, 0xbf, + 0xb7, 0xe4, 0x20, 0x67, 0x27, 0x16, 0xe4, 0xaa, 0xa9, 0x5c, 0x9a, 0xa2, 0x76, 0x88, 0x63, 0x4c, + 0x67, 0x61, 0xb9, 0x23, 0xa8, 0x31, 0xab, 0x78, 0xf3, 0x62, 0x63, 0x8a, 0x23, 0x75, 0x7a, 0x81, + 0x8d, 0xa0, 0xbd, 0x00, 0x42, 0x81, 0x0a, 0xdd, 0xc0, 0xff, 0xba, 0x01, 0x56, 0xc9, 0x59, 0x6b, + 0xe6, 0x6b, 0x01, 0xa0, 0x34, 0x5d, 0xbf, 0xa2, 0x72, 0xca, 0xd1, 0x97, 0x57, 0x7b, 0xd8, 0xe9, + 0x09, 0x2b, 0xfd, 0x7a, 0x48, 0x39, 0xa2, 0x6b, 0xd6, 0x6d, 0xa7, 0xc2, 0x64, 0x22, 0x4a, 0x20, + 0x22, 0x50, 0x4c, 0x3b, 0x92, 0xe2, 0x4b, 0xc8, 0x74, 0x2b, 0xd3, 0x5a, 0x4e, 0xcf, 0xcb, 0x17, + 0x66, 0xfe, 0xe2, 0x33, 0x54, 0xfc, 0x5d, 0x68, 0x72, 0x58, 0xa7, 0x36, 0xfb, 0xe0, 0x45, 0xf0, + 0x01, 0x3f, 0xfd, 0xfa, 0xb0, 0xa2, 0x6a, 0xb2, 0xdd, 0x87, 0x7c, 0x96, 0x87, 0x46, 0xed, 0xb1, + 0x2e, 0x77, 0xd5, 0x37, 0xd6, 0xac, 0xba, 0x53, 0xac, 0xdd, 0xe8, 0xfb, 0x02, 0xf7, 0x81, 0x36, + 0xca, 0x03, 0xfa, 0xfc, 0xf6, 0xbb, 0xd0, 0xb2, 0x01, 0x79, 0x3b, 0x99, 0x4f, 0xf3, 0x27, 0x41, + 0xec, 0xc4, 0xa9, 0x5b, 0xae, 0xbd, 0x20, 0x7c, 0x2f, 0xf6, 0x4a, 0x94, 0x4a, 0xa3, 0x8d, 0x67, + 0xf3, 0xbd, 0x37, 0xdb, 0xb0, 0xae, 0x13, 0x01, 0xa4, 0x85, 0xff, 0x4e, 0xe0, 0xce, 0x0a, 0x44, + 0x7f, 0xbd, 0xd5, 0x62, 0x69, 0xea, 0x8d, 0xcb, 0x2c, 0x2e, 0x07, 0x1e, 0xdf, 0x72, 0x39, 0x7b, + 0x40, 0xd4, 0xba, 0x94, 0x70, 0xa2, 0x3b, 0xec, 0x00, 0xaa, 0x14, 0x10, 0x18, 0x2b, 0x22, 0x85, + 0x70, 0xd7, 0xf3, 0xbf, 0x1f, 0x4b, 0xc8, 0xff, 0x0b, 0x13, 0xfc, 0xc7, 0xdb, 0xf0, 0x1e, 0x01, + 0x55, 0x75, 0x82, 0x37, 0x76, 0xa6, 0x8c, 0x3b, 0xd6, 0xbb, 0xe6, 0x28, 0x82, 0xa4, 0xdb, 0xed, + 0x20, 0x81, 0xc0, 0x2e, 0x4e, 0x88, 0x0e, 0xa2, 0x0a, 0x94, 0x13, 0xcd, 0x82, 0x3c, 0x87, 0xa5, + 0x69, 0x2a, 0xad, 0xc7, 0x33, 0xb2, 0x88, 0x28, 0xd3, 0x36, 0xc3, 0x0d, 0xa5, 0xb1, 0x5d, 0x72, + 0x9a, 0x8e, 0xe0, 0x8d, 0x8d, 0x20, 0x0f, 0x0e, 0xc1, 0x89, 0x57, 0x3a, 0xff, 0xa0, 0xa1, 0x40, + 0x4a, 0xcb, 0x00, 0x74, 0x68, 0x65, 0x57, 0x0a, 0xc4, 0xa5, 0xbc, 0x09, 0x31, 0x17, 0x0a, 0x32, + 0x7e, 0xd8, 0xe9, 0x89, 0x27, 0xa4, 0x0b, 0x9f, 0xf1, 0x42, 0xe4, 0xbe, 0x2a, 0xd7, 0xf0, 0x20, + 0xf7, 0xdd, 0x03, 0x15, 0x5d, 0x51, 0xd7, 0x97, 0x1f, 0x30, 0xd5, 0x5c, 0xa9, 0x9d, 0xb8, 0x84, + 0x8e, 0xd2, 0xd7, 0x82, 0xd9, 0xe9, 0xa1, 0xc0, 0x83, 0xe3, 0x9e, 0x63, 0x43, 0xa4, 0x59, 0x30, + 0xaa, 0x53, 0x3b, 0x6c, 0x68, 0x55, 0x3d, 0xca, 0x7a, 0xb6, 0xf5, 0xcf, 0x8c, 0x42, 0x99, 0x83, + 0xb7, 0x7c, 0xaf, 0xed, 0xf6, 0x59, 0x71, 0xcb, 0x46, 0xdc, 0xc5, 0x5e, 0x5d, 0x2d, 0x57, 0x9d, + 0x8a, 0x01, 0xf2, 0xa5, 0x34, 0x2a, 0x78, 0x9c, 0x7e, 0x59, 0x3a, 0x46, 0xaf, 0x8d, 0xd0, 0xdf, + 0xbf, 0xe7, 0xfe, 0x58, 0xd0, 0xb9, 0x13, 0x56, 0x0f, 0x57, 0xc3, 0xa2, 0x53, 0x7a, 0x2f, 0x0e, + 0x54, 0x96, 0x42, 0xfa, 0xb1, 0x9c, 0x9f, 0x47, 0xd4, 0x5a, 0xf9, 0x0a, 0x2f, 0x79, 0x1a, 0x23, + 0xfe, 0x68, 0xc7, 0xcf, 0x30, 0x49, 0x3d, 0x46, 0x08, 0x29, 0x7f, 0xec, 0xd6, 0xaa, 0x3e, 0xb2, + 0xe4, 0x84, 0x25, 0x52, 0xee, 0x38, 0x0c, 0xa8, 0x85, 0xa0, 0x0d, 0x0a, 0xc6, 0xda, 0x61, 0xaa, + 0x84, 0xf8, 0x47, 0x96, 0x35, 0x06, 0xf2, 0x4c, 0xae, 0x71, 0x49, 0x93, 0xa4, 0xb1, 0x52, 0x69, + 0x9d, 0xce, 0xc2, 0xa2, 0x46, 0xb4, 0xc5, 0x88, 0x47, 0x0a, 0xb9, 0x4a, 0xb9, 0x91, 0xda, 0x61, + 0xc1, 0xec, 0xf6, 0x67, 0xac, 0x9f, 0x01, 0xfd, 0x13, 0xb3, 0x4a, 0x47, 0x25, 0x32, 0xb9, 0xb7, + 0xc8, 0x45, 0xff, 0x2a, 0x26, 0x8c, 0xf4, 0x4e, 0x7c, 0x1c, 0x38, 0x51, 0x6f, 0x23, 0xfc, 0x06, + 0xbe, 0xbc, 0x26, 0xac, 0xe5, 0x4a, 0x1c, 0x6f, 0x3a, 0x2b, 0x30, 0x88, 0x26, 0x2e, 0xd1, 0x9b, + 0x71, 0x58, 0xc3, 0x74, 0x64, 0x12, 0x30, 0xcd, 0xd6, 0x28, 0x18, 0x65, 0xf8, 0xa5, 0x13, 0x86, + 0xb9, 0x41, 0x50, 0x0a, 0x1a, 0x1c, 0xd5, 0x9c, 0x5c, 0xc3, 0xba, 0xf1, 0xf7, 0xba, 0x8d, 0x92, + 0xc6, 0x2f, 0x2f, 0xe5, 0xf5, 0x81, 0xe3, 0x23, 0x09, 0xf1, 0xe9, 0x2e, 0x66, 0xd8, 0x53, 0x39, + 0x5b, 0xff, 0x5a, 0x15, 0x4f, 0x60, 0xe1, 0x52, 0xc2, 0x1c, 0x93, 0xd0, 0x93, 0xa4, 0xd7, 0x2d, + 0xf1, 0xcc, 0xda, 0x25, 0x78, 0x48, 0x03, 0x74, 0x63, 0x06, 0x43, 0x6e, 0x1e, 0xbd, 0xb7, 0x2f, + 0x56, 0x58, 0x75, 0x9e, 0x9c, 0x4d, 0xa8, 0x85, 0x5c, 0x9c, 0x3a, 0x2b, 0xaa, 0xdf, 0x7c, 0xed, + 0xdf, 0x5c, 0xb0, 0x01, 0x63, 0x71, 0x8f, 0xf5, 0xab, 0x15, 0x5f, 0xa2, 0xf2, 0x1b, 0x1d, 0x29, + 0x55, 0x64, 0x9e, 0x19, 0x59, 0x0a, 0x61, 0x47, 0x6e, 0xa1, 0xae, 0x7d, 0x83, 0xf8, 0x31, 0x59, + 0x49, 0x53, 0xd5, 0xa6, 0x09, 0xc9, 0xbe, 0x95, 0xb3, 0x0d, 0x54, 0xfb, 0xd8, 0xf8, 0xd6, 0x79, + 0x82, 0x00, 0x86, 0x4b, 0x14, 0x3f, 0xd2, 0x87, 0x9b, 0xd3, 0x79, 0x47, 0xfa, 0xf7, 0x37, 0x29, + 0x2d, 0xe5, 0xb3, 0xd9, 0x8b, 0x88, 0x15, 0xed, 0x99, 0x05, 0x5e, 0xee, 0xda, 0xd9, 0x6f, 0x6d, + 0x6a, 0xca, 0x87, 0xae, 0x06, 0xc9, 0x8b, 0x19, 0xdf, 0x86, 0x80, 0x2f, 0xce, 0xea, 0xe8, 0xa5, + 0x84, 0xee, 0x9e, 0x4d, 0x03, 0xb0, 0xfb, 0x56, 0x2f, 0x96, 0xc8, 0x6f, 0x0b, 0x62, 0xed, 0xc2, + 0xb0, 0x86, 0xb6, 0x5f, 0x68, 0x2a, 0x59, 0x1f, 0x68, 0x92, 0xea, 0xad, 0xb6, 0x75, 0xab, 0xd6, + 0xd1, 0x54, 0x40, 0xe9, 0x38, 0x06, 0xb6, 0xef, 0x63, 0x77, 0xd5, 0x0e, 0xa7, 0xb2, 0xbf, 0xa5, + 0x1a, 0x7e, 0x77, 0xfc, 0x71, 0x78, 0x37, 0xde, 0x66, 0x7b, 0x5b, 0x7e, 0xb1, 0xe2, 0xc1, 0xfc, + 0xd8, 0x78, 0xc2, 0xf3, 0xaf, 0xf1, 0x69, 0x80, 0xef, 0xd6, 0x7d, 0xec, 0x9c, 0xc6, 0xd0, 0xc7, + 0xab, 0xf1, 0x3b, 0xea, 0xa9, 0xa1, 0x8a, 0xd8, 0x5b, 0x99, 0x1b, 0x8d, 0x1e, 0xdc, 0xaf, 0x0b, + 0x54, 0xab, 0x1e, 0x1c, 0x84, 0x7a, 0x24, 0x92, 0x69, 0xf9, 0x04, 0xbb, 0xde, 0x14, 0x4c, 0x04, + 0xaf, 0xef, 0x95, 0xac, 0x8d, 0xe7, 0xd9, 0xc4, 0x0e, 0x08, 0x45, 0x8f, 0x9a, 0x24, 0xee, 0x95, + 0xc1, 0x4a, 0xed, 0xb1, 0x4e, 0x7f, 0x31, 0xdc, 0x7e, 0x6d, 0xfe, 0x88, 0x44, 0x18, 0x11, 0x6c, + 0x6c, 0xaa, 0x19, 0x99, 0xe1, 0x2a, 0x9d, 0x32, 0x92, 0xe6, 0xed, 0xb3, 0xea, 0xf3, 0xbe, 0x8e, + 0x60, 0xfc, 0x6e, 0x54, 0xc8, 0x28, 0xf6, 0x5e, 0x14, 0x50, 0x89, 0x6b, 0x43, 0x2b, 0x98, 0x2d, + 0x9d, 0xed, 0x16, 0x0f, 0xab, 0x38, 0xf0, 0xf4, 0xb5, 0x3e, 0x08, 0xbe, 0x93, 0xb8, 0x99, 0x1b, + 0xf3, 0xca, 0x28, 0x47, 0x49, 0x05, 0xb8, 0xc1, 0xeb, 0x87, 0x8d, 0x76, 0xe5, 0xcb, 0x21, 0xd9, + 0x5b, 0x65, 0xa8, 0x1e, 0xec, 0x74, 0x35, 0x2c, 0xaf, 0x36, 0xa2, 0xcd, 0x11, 0x34, 0x52, 0x28, + 0xe1, 0x33, 0xdc, 0xb3, 0x54, 0xaf, 0xd9, 0xc3, 0x19, 0x75, 0xe5, 0xc1, 0xba, 0x22, 0x44, 0x34, + 0xcb, 0x29, 0x4d, 0x3c, 0xa7, 0xce, 0x4d, 0x06, 0x0b, 0xe3, 0x2b, 0xda, 0x50, 0x7f, 0x51, 0xbc, + 0x9b, 0x79, 0xcd, 0xc9, 0x55, 0xd3, 0x65, 0xe8, 0x09, 0x16, 0x08, 0x64, 0x83, 0xb4, 0x53, 0x30, + 0x24, 0x51, 0x36, 0xb1, 0x64, 0x20, 0xb3, 0xa4, 0xbb, 0x45, 0xfa, 0xc3, 0x72, 0xd3, 0x13, 0x9e, + 0xd1, 0xe7, 0x0e, 0xca, 0x70, 0xd5, 0x56, 0xe0, 0x87, 0x73, 0xf2, 0x0d, 0xfa, 0x43, 0x64, 0x36, + 0xa1, 0xb7, 0x92, 0x8d, 0x5d, 0x0b, 0x3b, 0x93, 0x48, 0xe0, 0xc7, 0x67, 0x13, 0xa3, 0x97, 0x30, + 0x50, 0x9b, 0x57, 0x86, 0x25, 0x14, 0x7a, 0xdd, 0x0a, 0xdb, 0x00, 0x9f, 0xb9, 0x27, 0x0a, 0xd5, + 0xb8, 0xab, 0xbd, 0x13, 0x69, 0x69, 0x25, 0xa5, 0xfe, 0x1d, 0x10, 0x51, 0x89, 0x56, 0xa5, 0xa3, + 0x59, 0x1f, 0xa3, 0x20, 0x59, 0x81, 0x15, 0xba, 0x6b, 0x44, 0xf3, 0xba, 0xd0, 0x3b, 0x89, 0x1b, + 0x9e, 0xf0, 0xe9, 0xe3, 0xeb, 0x3c, 0x49, 0x13, 0x6f, 0x86, 0xe9, 0xcc, 0x8a, 0x1f, 0x75, 0x4d, + 0xbc, 0xbb, 0x44, 0xd0, 0x38, 0x72, 0x1d, 0xf3, 0x66, 0x06, 0x1e, 0x20, 0x4a, 0x19, 0x19, 0x1e, + 0x84, 0x35, 0x85, 0x64, 0xb6, 0x32, 0x60, 0xc2, 0xbb, 0xff, 0x36, 0x40, 0xf0, 0x59, 0x6f, 0x80, + 0xfd, 0x9d, 0x2b, 0x09, 0xb1, 0xad, 0xed, 0xeb, 0xcb, 0xf9, 0xbe, 0xc4, 0x85, 0xb9, 0xff, 0xf0, + 0xbb, 0xd6, 0x66, 0x3c, 0xc0, 0x7a, 0xc4, 0x9d, 0x43, 0xc0, 0x81, 0xa5, 0x03, 0x7a, 0xab, 0xb2, + 0xca, 0x66, 0x17, 0x4f, 0x72, 0xfb, 0x2d, 0x9d, 0x3f, 0x15, 0xe5, 0x99, 0xf8, 0x00, 0x41, 0x54, + 0xd1, 0xa4, 0x80, 0x17, 0xa8, 0x31, 0x02, 0x96, 0xa6, 0x6d, 0x6d, 0x44, 0x80, 0x7d, 0xf7, 0xc1, + 0x11, 0xc0, 0x47, 0xb8, 0xf0, 0xbd, 0xbc, 0x75, 0xaf, 0x85, 0xaa, 0x3a, 0x85, 0x80, 0xfe, 0xe7, + 0x76, 0xe2, 0x56, 0xfa, 0xf1, 0x40, 0x4e, 0x4f, 0xec, 0x94, 0x27, 0x07, 0xd8, 0x1f, 0xdc, 0x4c, + 0x56, 0x2f, 0xa1, 0x8b, 0xfc, 0x50, 0x4b, 0x7d, 0x82, 0xf5, 0x06, 0xb5, 0x6a, 0xf3, 0xe2, 0x6f, + 0x11, 0x3f, 0xfe, 0xfb, 0x20, 0x39, 0xff, 0x7b, 0x2a, 0x54, 0x04, 0xd5, 0x5f, 0x27, 0xc8, 0x02, + 0x51, 0x46, 0x1a, 0xfe, 0x4d, 0xf5, 0x06, 0xd7, 0xca, 0xa9, 0x4f, 0x57, 0x4e, 0xea, 0x85, 0x05, + 0x71, 0x34, 0x15, 0x34, 0xbb, 0xe0, 0xea, 0x00, 0xae, 0x00, 0xf9, 0x5a, 0x41, 0x22, 0x02, 0xe6, + 0xf7, 0x31, 0x94, 0xdc, 0xfa, 0xba, 0x17, 0xce, 0xae, 0xbf, 0x78, 0xc6, 0x9b, 0x38, 0x28, 0x5f, + 0x64, 0xdf, 0x19, 0x6f, 0x50, 0xc8, 0xd1, 0x71, 0xd0, 0xd4, 0x3a, 0x5d, 0x91, 0x66, 0x01, 0x5f, + 0x7a, 0xd4, 0xe0, 0x24, 0x67, 0x18, 0x03, 0x77, 0xa9, 0x02, 0x9a, 0x32, 0xd9, 0x91, 0xce, 0xf0, + 0x20, 0xba, 0x99, 0xd9, 0x22, 0xbb, 0xf9, 0xbb, 0xd5, 0x90, 0x66, 0x05, 0xd2, 0x71, 0x13, 0xab, + 0xdd, 0x91, 0x8a, 0x1d, 0xb0, 0xc7, 0xc9, 0x29, 0xd3, 0x8f, 0x03, 0x85, 0x98, 0x30, 0x85, 0xc5, + 0x96, 0x40, 0xcf, 0x52, 0xff, 0xb5, 0xf6, 0xac, 0xa0, 0x7a, 0x09, 0x1c, 0x79, 0x59, 0x93, 0xe5, + 0xfa, 0x2f, 0x43, 0x02, 0xd0, 0x95, 0x89, 0x69, 0x5e, 0x85, 0x43, 0x76, 0xc0, 0x45, 0xe6, 0xb3, + 0x85, 0xee, 0x13, 0x4a, 0x78, 0x6b, 0xb8, 0xe7, 0xd0, 0x60, 0x29, 0xe8, 0x76, 0x14, 0x3f, 0x83, + 0xe7, 0x9d, 0x14, 0xbe, 0x44, 0x3d, 0x20, 0x90, 0x94, 0x8b, 0x46, 0xa8, 0xde, 0x13, 0xfe, 0x08, + 0xdb, 0x56, 0x96, 0x38, 0x01, 0x7b, 0xf6, 0xa5, 0xbf, 0x9c, 0x03, 0xdf, 0x70, 0xaf, 0x5f, 0xa2, + 0x6a, 0xe3, 0xfe, 0x4d, 0x05, 0xfa, 0x7f, 0x16, 0x48, 0x7a, 0x34, 0x10, 0xd9, 0xd3, 0xdf, 0x27, + 0x6f, 0x23, 0xf8, 0xca, 0xea, 0xfe, 0x5c, 0xbd, 0x2b, 0x05, 0x0d, 0xe4, 0x86, 0x45, 0x73, 0x25, + 0x68, 0x10, 0x23, 0x36, 0x47, 0x02, 0x3e, 0x37, 0x45, 0x8f, 0x81, 0x7d, 0x38, 0x28, 0xce, 0xca, + 0xfe, 0x35, 0xfb, 0x8e, 0xb6, 0xde, 0x22, 0x45, 0x77, 0xde, 0xe6, 0xb6, 0x39, 0xfd, 0xf1, 0x23, + 0x29, 0x97, 0x26, 0x7f, 0xc0, 0x70, 0x38, 0xb6, 0x0e, 0x34, 0x54, 0x1a, 0x59, 0x25, 0xec, 0x11, + 0xc2, 0x90, 0xa4, 0xe8, 0x82, 0xb3, 0xd1, 0x8a, 0xbb, 0xdc, 0xc5, 0xce, 0x0b, 0x12, 0xa9, 0xd7, + 0xc4, 0xa1, 0xe7, 0x85, 0x6d, 0xab, 0x1e, 0x7c, 0x0c, 0x83, 0xdc, 0x71, 0x4b, 0xda, 0xea, 0xdd, + 0xd5, 0x9f, 0xf1, 0x78, 0x32, 0x1e, 0xe7, 0xfb, 0xa9, 0x91, 0xdd, 0x19, 0xc8, 0x0f, 0xea, 0x49, + 0x73, 0x34, 0xcc, 0xe2, 0x24, 0x76, 0x8f, 0xf3, 0x50, 0xe0, 0xb1, 0x94, 0xa9, 0xc0, 0x39, 0x06, + 0x46, 0x69, 0x2b, 0x33, 0x54, 0xd3, 0x8b, 0xad, 0x9f, 0x73, 0x8a, 0x27, 0x45, 0x82, 0xbc, 0xcf, + 0xa7, 0xc0, 0x47, 0x9d, 0x09, 0x09, 0xb0, 0x84, 0x1a, 0x7f, 0xe6, 0x8c, 0x18, 0x21, 0x17, 0x02, + 0x5e, 0x67, 0x1c, 0x72, 0x88, 0x92, 0x3b, 0x0e, 0xc2, 0x99, 0xd1, 0x9b, 0xa8, 0x66, 0x0c, 0xbb, + 0xd2, 0xc2, 0x9c, 0xc4, 0xfb, 0x83, 0x70, 0x84, 0xe2, 0xf9, 0x75, 0x5a, 0x98, 0x04, 0x1a, 0xf9, + 0xb8, 0x7e, 0xc3, 0xd7, 0xf7, 0xea, 0x83, 0xdd, 0xd7, 0x08, 0xa3, 0x44, 0x84, 0x30, 0x2a, 0xc0, + 0x24, 0x14, 0xee, 0x7d, 0x00, 0xaf, 0xdc, 0xe9, 0xeb, 0x4f, 0x6e, 0x14, 0x73, 0x6e, 0x08, 0xd4, + 0xc3, 0xa3, 0xe0, 0x9e, 0xbb, 0xd2, 0xe7, 0xc4, 0xe2, 0xaf, 0x47, 0x1f, 0x79, 0x3a, 0x8b, 0x28, + 0x5c, 0xdf, 0x21, 0x6e, 0x9b, 0x71, 0xa2, 0x52, 0x5a, 0xac, 0x15, 0xf6, 0x44, 0x2a, 0x1a, 0x2c, + 0xb1, 0x22, 0x20, 0x5b, 0xe4, 0x7e, 0x2e, 0x74, 0xfe, 0x1e, 0xe7, 0xab, 0x38, 0x9d, 0x54, 0x18, + 0x45, 0xb4, 0x8b, 0x10, 0x69, 0x72, 0x69, 0x10, 0x30, 0x8d, 0xcf, 0xc7, 0x59, 0xb6, 0x1d, 0x5f, + 0xc5, 0x55, 0x88, 0x07, 0xb8, 0x8a, 0x6e, 0xd4, 0x03, 0x8d, 0x0e, 0x01, 0x82, 0x57, 0xc8, 0x6a, + 0x60, 0x29, 0xb7, 0x35, 0x30, 0x1c, 0xfb, 0x65, 0x85, 0xf1, 0x05, 0xf7, 0x23, 0x4b, 0x67, 0x27, + 0x78, 0x22, 0xe0, 0xb1, 0x3b, 0x61, 0x77, 0x28, 0x00, 0xcd, 0xb0, 0x9d, 0x82, 0x46, 0x3c, 0xd1, + 0x34, 0x26, 0xe1, 0xac, 0xd1, 0x3c, 0x58, 0x4e, 0xec, 0xe2, 0x0e, 0xa2, 0x2c, 0xbc, 0x46, 0xd1, + 0xcc, 0xfa, 0x5a, 0xaa, 0x0d, 0x7d, 0xd9, 0x74, 0x38, 0x7f, 0x94, 0x31, 0x81, 0x25, 0x87, 0x26, + 0x6a, 0x52, 0x7b, 0xe3, 0xd6, 0xf6, 0x91, 0x22, 0xd5, 0x0b, 0x60, 0xbf, 0x15, 0x97, 0x8d, 0x68, + 0xde, 0x79, 0x8b, 0x79, 0x6a, 0x66, 0x35, 0x08, 0x68, 0x43, 0x97, 0xa1, 0xa9, 0x5c, 0x2a, 0x84, + 0x4f, 0x94, 0x58, 0x24, 0xe1, 0x95, 0x1d, 0xd6, 0xf9, 0x0b, 0x39, 0x76, 0xb8, 0x41, 0x2d, 0xb3, + 0x5a, 0xb5, 0x6b, 0x73, 0x8c, 0x73, 0x8d, 0xb5, 0x60, 0xc3, 0xf5, 0x9d, 0x4a, 0x7a, 0xb4, 0x3f, + 0x42, 0x71, 0x21, 0x50, 0x2e, 0x5f, 0xf5, 0x73, 0x4d, 0x5e, 0xea, 0x8e, 0x81, 0x29, 0x2a, 0x33, + 0x61, 0x8d, 0x9f, 0x15, 0x16, 0x58, 0xc8, 0x9e, 0x76, 0x0f, 0xa6, 0x27, 0x6a, 0x47, 0x93, 0x1d, + 0x60, 0xc7, 0x54, 0x97, 0xd2, 0x08, 0x42, 0xb7, 0x6c, 0x9e, 0x01, 0xb7, 0x71, 0x38, 0x95, 0x1c, + 0xde, 0x0f, 0x73, 0x15, 0x35, 0x0b, 0x60, 0x90, 0x30, 0x5d, 0xab, 0xd3, 0xed, 0x0c, 0xd0, 0x3a, + 0x3c, 0xc3, 0xeb, 0x66, 0x55, 0x3d, 0x4e, 0x4b, 0x60, 0xf1, 0xbe, 0xad, 0xe9, 0x94, 0x76, 0xa8, + 0x09, 0xee, 0xed, 0x1c, 0x14, 0xca, 0xc1, 0x62, 0x97, 0x07, 0x16, 0xd8, 0x1e, 0xf5, 0x36, 0x75, + 0x50, 0x0f, 0xa6, 0x35, 0xa7, 0x2b, 0xb0, 0xda, 0x83, 0x7f, 0x60, 0x30, 0x06, 0x02, 0xfb, 0xcc, + 0x95, 0x67, 0x5f, 0x6a, 0xe5, 0xf7, 0x51, 0xd0, 0xa9, 0x64, 0xcc, 0x98, 0x57, 0x90, 0x87, 0xf0, + 0x3a, 0x6c, 0x01, 0x85, 0x75, 0x0b, 0x28, 0xeb, 0x55, 0xc2, 0xab, 0x03, 0x61, 0xa2, 0xf7, 0x97, + 0x43, 0xa5, 0x29, 0x9d, 0x6a, 0x43, 0xd3, 0x86, 0x39, 0x52, 0x57, 0x2b, 0x3f, 0x01, 0xf4, 0x05, + 0xb8, 0x84, 0xc4, 0xb3, 0xdd, 0xf8, 0x20, 0x8e, 0x11, 0x09, 0x66, 0x4b, 0xc9, 0x52, 0x24, 0x3f, + 0x2d, 0x74, 0xde, 0x76, 0xda, 0x71, 0xa4, 0xd5, 0x71, 0x3c, 0x0d, 0x7f, 0xec, 0x5b, 0xe3, 0x54, + 0xf7, 0xd7, 0xe4, 0xe9, 0x64, 0x1b, 0x09, 0x30, 0xe3, 0xc7, 0xe5, 0x74, 0xca, 0xd2, 0x88, 0x66, + 0x6d, 0x62, 0x2b, 0xa2, 0x19, 0xff, 0xca, 0xad, 0x33, 0x61, 0x10, 0x8d, 0xfb, 0xcb, 0x34, 0x69, + 0x24, 0x13, 0xe9, 0x91, 0x6b, 0x56, 0xf6, 0xc8, 0x60, 0x0e, 0x77, 0x5e, 0xdb, 0xf0, 0xc1, 0xef, + 0x56, 0x1f, 0xa5, 0x38, 0x77, 0xaa, 0x17, 0xed, 0x93, 0x4c, 0xe4, 0xcd, 0x89, 0xab, 0x1e, 0xa8, + 0xe4, 0x6c, 0x26, 0x28, 0xa3, 0x38, 0x99, 0x1b, 0xd3, 0xc0, 0xdf, 0x35, 0x2e, 0x5b, 0x79, 0x44, + 0xb7, 0xf3, 0x07, 0x60, 0x73, 0x8b, 0x4d, 0x65, 0xe3, 0x27, 0x69, 0x76, 0xcb, 0xe3, 0x3c, 0xb5, + 0xe9, 0x44, 0x9e, 0x0a, 0x4a, 0xcb, 0xc7, 0xac, 0x8b, 0x5e, 0xd1, 0x18, 0x77, 0x9a, 0x17, 0xa8, + 0xae, 0x22, 0x59, 0x38, 0x12, 0xb3, 0xa1, 0x5f, 0x1c, 0xa1, 0xf0, 0xb9, 0x2e, 0xd1, 0xca, 0xa8, + 0xb6, 0x3e, 0xcf, 0xfc, 0xf1, 0xe9, 0xdc, 0xe0, 0x7a, 0x13, 0x21, 0x44, 0xb5, 0xfa, 0xb8, 0x13, + 0xd3, 0xc9, 0x1b, 0x8d, 0x80, 0x12, 0xaa, 0x32, 0xc3, 0x47, 0x30, 0x8b, 0xf5, 0x02, 0xd8, 0x86, + 0xc0, 0x7d, 0x2e, 0xb8, 0x1a, 0x78, 0x9c, 0x5a, 0x59, 0x0f, 0x7d, 0x57, 0x75, 0xc6, 0xe3, 0x2c, + 0xf0, 0x49, 0x76, 0x85, 0x7c, 0x6b, 0x8d, 0x4d, 0xb1, 0x45, 0x35, 0x80, 0xc1, 0xff, 0xac, 0xbd, + 0xd4, 0x72, 0xdc, 0xb6, 0x14, 0xf0, 0xd1, 0x02, 0xdc, 0xa8, 0x27, 0xaa, 0x42, 0x15, 0xf5, 0xb4, + 0x59, 0x2c, 0xe6, 0xfe, 0xbb, 0x71, 0xee, 0xee, 0x28, 0x27, 0x4f, 0xd1, 0xe6, 0x3c, 0x36, 0x83, + 0xb2, 0x74, 0xb9, 0x7f, 0x1c, 0x6d, 0x3a, 0xfd, 0x73, 0xe7, 0x6b, 0x4b, 0x2f, 0x21, 0xde, 0x95, + 0x40, 0x2b, 0x31, 0x6d, 0x5b, 0x06, 0x36, 0x57, 0x42, 0x5d, 0x5a, 0x06, 0xa0, 0xc3, 0x5f, 0x8e, + 0x54, 0x97, 0x6d, 0x7f, 0x20, 0x00, 0x3f, 0x83, 0xa6, 0x60, 0x9f, 0xac, 0x91, 0xcf, 0x4d, 0x70, + 0x5e, 0xbc, 0xa1, 0x23, 0xbf, 0x02, 0x77, 0xc7, 0xca, 0x00, 0xb5, 0xde, 0x7a, 0xc9, 0x66, 0x85, + 0xd8, 0x40, 0x34, 0xef, 0xaf, 0xeb, 0x32, 0x77, 0xde, 0x34, 0x83, 0x6b, 0x4f, 0x24, 0x0a, 0x16, + 0x6b, 0x4b, 0xf6, 0xea, 0x8c, 0x7b, 0x1a, 0x40, 0x66, 0xf6, 0x6c, 0x6b, 0x85, 0x40, 0x8f, 0x3d, + 0x7d, 0xce, 0x9c, 0x5c, 0x4e, 0x73, 0x16, 0xe5, 0x8c, 0xfb, 0x91, 0xb5, 0xe8, 0xf6, 0x19, 0xd9, + 0x9b, 0xc1, 0x9f, 0x24, 0xde, 0x73, 0x5f, 0x82, 0xb5, 0xd1, 0x1d, 0xf2, 0x4b, 0x97, 0x8b, 0x6f, + 0x0c, 0xc7, 0x90, 0xd0, 0x4b, 0xce, 0xf8, 0xe3, 0x43, 0xe0, 0x06, 0x41, 0xbd, 0xda, 0xa1, 0x9e, + 0xb0, 0xbf, 0x16, 0xc2, 0x52, 0x56, 0xde, 0x64, 0x2e, 0x71, 0x19, 0x43, 0x70, 0x6c, 0xf0, 0x89, + 0x51, 0x3b, 0xc2, 0x8c, 0x19, 0x60, 0xa5, 0xd5, 0xc6, 0xbe, 0x09, 0x01, 0x73, 0x53, 0xaf, 0xf2, + 0x89, 0x9e, 0xaa, 0xe1, 0x8e, 0xde, 0x8c, 0x02, 0x6a, 0xd2, 0x35, 0xd1, 0x08, 0x7a, 0x34, 0xed, + 0x89, 0xc1, 0x84, 0x3b, 0x53, 0x3f, 0x7f, 0xbe, 0xda, 0x02, 0x1e, 0x1c, 0xd0, 0x79, 0xa4, 0x94, + 0xdf, 0x4c, 0x77, 0x79, 0xda, 0x9e, 0xa2, 0xca, 0xb7, 0x71, 0x33, 0x21, 0x80, 0x13, 0x00, 0x47, + 0x52, 0x1e, 0x2a, 0xfb, 0x35, 0x78, 0x69, 0xb5, 0xab, 0x71, 0xc1, 0x31, 0x68, 0x2d, 0xbf, 0xbd, + 0x64, 0x5d, 0x80, 0x69, 0x76, 0xbb, 0x92, 0x53, 0x71, 0x62, 0xb8, 0x66, 0x32, 0x0d, 0xd1, 0xa2, + 0x1d, 0x28, 0x77, 0x57, 0x43, 0x5b, 0xcb, 0xd3, 0x70, 0x67, 0x17, 0x7d, 0x3a, 0xef, 0x52, 0xa2, + 0x24, 0x22, 0xa9, 0xaf, 0x90, 0x82, 0x8b, 0xa3, 0xd7, 0xb2, 0x93, 0x9a, 0xfc, 0x44, 0x4f, 0x18, + 0xeb, 0x96, 0x33, 0xf9, 0xa5, 0xb2, 0x0d, 0x83, 0x09, 0x76, 0x83, 0x74, 0x45, 0x52, 0x2d, 0x58, + 0x33, 0xe8, 0x3f, 0xcb, 0xbf, 0xa0, 0xb2, 0x8f, 0x9d, 0xe5, 0xc2, 0x9f, 0x55, 0xbc, 0x97, 0xea, + 0xba, 0x90, 0x6f, 0x44, 0xc3, 0xc4, 0x5e, 0xe7, 0x41, 0xa3, 0xcf, 0x5f, 0x96, 0xed, 0xed, 0xca, + 0x4b, 0x5c, 0x2c, 0xcb, 0xd4, 0xc3, 0x94, 0x3a, 0xf3, 0x7f, 0x30, 0x4f, 0x06, 0xc7, 0xdb, 0xca, + 0x3a, 0x54, 0xbb, 0x04, 0xdc, 0xb2, 0x4d, 0x1b, 0xf9, 0xc3, 0x55, 0x27, 0xf2, 0xa0, 0x5d, 0x48, + 0x89, 0xea, 0x06, 0xa2, 0x0c, 0x04, 0xca, 0x71, 0x08, 0x28, 0xde, 0x5b, 0x88, 0xec, 0x5f, 0xed, + 0xce, 0x24, 0x05, 0xa5, 0xd2, 0xc0, 0x81, 0x2d, 0xa9, 0x0a, 0xa2, 0x49, 0x7c, 0xf9, 0xce, 0x9a, + 0xe7, 0x59, 0x71, 0x8a, 0x81, 0xfc, 0x96, 0xcd, 0x6f, 0xbb, 0x91, 0x1f, 0x31, 0x8b, 0x47, 0x00, + 0xb3, 0xb6, 0x1b, 0xcf, 0x2d, 0x77, 0x88, 0xc8, 0xb0, 0x57, 0x90, 0x78, 0xaf, 0xfd, 0x62, 0x19, + 0xe6, 0x0c, 0xb5, 0x99, 0xd7, 0x0d, 0x3f, 0x95, 0xc0, 0x58, 0x27, 0x8a, 0xf6, 0x68, 0x7e, 0xb3, + 0x51, 0xfc, 0xed, 0x0b, 0x38, 0x32, 0x35, 0xa8, 0xb3, 0xae, 0x2b, 0xf0, 0x19, 0xe9, 0x2f, 0xfe, + 0x70, 0x1a, 0x76, 0xc5, 0x9e, 0x91, 0x19, 0x95, 0x1f, 0x39, 0x5d, 0xae, 0xac, 0x70, 0x2b, 0x73, + 0x3a, 0xb6, 0xfc, 0xb9, 0x5a, 0x26, 0x11, 0x55, 0x4b, 0xe9, 0x93, 0x43, 0xe0, 0x30, 0x94, 0x63, + 0x1d, 0x7f, 0x79, 0x6e, 0x23, 0x71, 0x75, 0x73, 0x49, 0x0e, 0x51, 0x64, 0x79, 0x45, 0x31, 0xad, + 0xb6, 0x4e, 0x3e, 0x13, 0x7e, 0x20, 0x94, 0xba, 0x6a, 0x43, 0xd6, 0x78, 0x6e, 0x03, 0xdc, 0xde, + 0x93, 0xe1, 0xf2, 0x83, 0xfd, 0xa3, 0x19, 0xf0, 0x3a, 0xd3, 0xc8, 0x03, 0xb3, 0xee, 0xaf, 0x47, + 0xc5, 0xc5, 0x53, 0xd5, 0xc9, 0x25, 0x31, 0x5c, 0xe8, 0xd1, 0x07, 0x1b, 0xd0, 0x62, 0xa2, 0x1a, + 0x0f, 0x74, 0xb7, 0xfc, 0xd9, 0xf0, 0xde, 0x82, 0x40, 0x3c, 0xf1, 0xc8, 0xb1, 0xda, 0x00, 0x3f, + 0xb4, 0x25, 0x0c, 0x19, 0xf5, 0xf4, 0xc4, 0xcb, 0xda, 0x7a, 0x1e, 0xd8, 0x78, 0xdb, 0x1f, 0xbd, + 0x1c, 0x6d, 0x33, 0x20, 0xc5, 0x16, 0x74, 0xb6, 0xb2, 0x74, 0x4f, 0x25, 0xfc, 0x47, 0x15, 0x12, + 0x5b, 0x20, 0xe0, 0x06, 0x1f, 0x9e, 0x4e, 0x9f, 0x8b, 0xf8, 0x6c, 0x88, 0x84, 0x7a, 0x3b, 0x37, + 0xbf, 0xa3, 0xc1, 0x7d, 0xe8, 0xf6, 0xf8, 0x00, 0x3d, 0xf9, 0xe2, 0x74, 0x37, 0x13, 0x91, 0x66, + 0xfd, 0xb0, 0xee, 0x76, 0x15, 0x59, 0x11, 0x4f, 0x62, 0x8b, 0xea, 0x8a, 0x6b, 0xa4, 0xa2, 0x5e, + 0x91, 0x5d, 0xb6, 0x13, 0x33, 0x9f, 0xc9, 0x5b, 0x49, 0x49, 0x67, 0x51, 0xc8, 0x03, 0xa7, 0xe8, + 0x1c, 0x2e, 0xac, 0x8a, 0xf5, 0xc2, 0xa5, 0xe5, 0xb8, 0xf2, 0xa2, 0x7a, 0x47, 0xc2, 0x1c, 0x50, + 0x9e, 0x88, 0x65, 0xc0, 0x14, 0x3a, 0x2c, 0x07, 0xd0, 0x40, 0x3c, 0xa4, 0x5c, 0xcb, 0x25, 0x4b, + 0xe5, 0xc2, 0xae, 0x6a, 0x4d, 0xfa, 0x6c, 0xb3, 0x92, 0x62, 0x6f, 0x8e, 0xf3, 0x26, 0x11, 0x2b, + 0x77, 0xc5, 0x66, 0xd1, 0xdd, 0xc7, 0x21, 0x03, 0xdf, 0x20, 0x1b, 0x8a, 0xb2, 0x54, 0xc2, 0x14, + 0x44, 0x7a, 0x3b, 0x6f, 0xfd, 0xfb, 0xb1, 0x74, 0xbc, 0xc8, 0x54, 0xd8, 0xae, 0x5b, 0xa8, 0xba, + 0x96, 0x2e, 0xd6, 0xad, 0xd0, 0xa3, 0xd4, 0x6e, 0x7c, 0x14, 0xe3, 0x5d, 0x86, 0x0f, 0xcc, 0xed, + 0x05, 0x08, 0xfb, 0x45, 0xba, 0xa7, 0x9e, 0xb3, 0xdd, 0xb7, 0x28, 0x35, 0x9d, 0x98, 0x3b, 0xdc, + 0x96, 0x83, 0xc4, 0xf9, 0x56, 0x1c, 0x43, 0xfe, 0xe9, 0xfa, 0xbe, 0x34, 0x9e, 0x1c, 0xf1, 0x62, + 0xff, 0x70, 0x69, 0x1b, 0xb7, 0x30, 0xc1, 0xc8, 0x89, 0x51, 0x44, 0xa7, 0x6d, 0x11, 0x17, 0x74, + 0x9b, 0xa6, 0x89, 0x4d, 0xed, 0xc1, 0xe0, 0xfe, 0xc2, 0xe2, 0xa7, 0x9f, 0x9d, 0xaf, 0x05, 0x2f, + 0x87, 0xea, 0x7a, 0x3f, 0x54, 0x6d, 0x9f, 0xd8, 0x55, 0x6e, 0x08, 0x49, 0x75, 0x4a, 0xac, 0x0f, + 0xf9, 0xb6, 0xde, 0xcc, 0xb9, 0xd0, 0xba, 0xcb, 0x9d, 0x32, 0x19, 0x17, 0x44, 0xba, 0xfe, 0xe2, + 0x10, 0xcd, 0xea, 0xf8, 0x64, 0x9c, 0xcd, 0x31, 0x62, 0xc0, 0xf3, 0x18, 0x10, 0x2c, 0x5f, 0xf9, + 0x36, 0x18, 0x45, 0x91, 0x56, 0x93, 0xa3, 0x63, 0x60, 0x43, 0x7a, 0x13, 0xe9, 0x20, 0xab, 0xb0, + 0xaf, 0x4d, 0xa6, 0xe1, 0x6e, 0x28, 0x02, 0x0c, 0xd6, 0x1b, 0x7d, 0xe8, 0x77, 0x6b, 0xd7, 0x37, + 0x37, 0x96, 0xa6, 0x24, 0xc3, 0x45, 0x85, 0x87, 0xb1, 0xa4, 0xcb, 0xcc, 0x81, 0x09, 0x67, 0xcc, + 0x6d, 0x63, 0x35, 0xa3, 0x62, 0x0c, 0x03, 0xeb, 0x0f, 0x1a, 0x00, 0x45, 0x22, 0x20, 0x40, 0x66, + 0x28, 0x3d, 0x83, 0x07, 0x26, 0x23, 0x64, 0x1b, 0x0a, 0x81, 0x39, 0xfa, 0x8e, 0xd8, 0x76, 0xf1, + 0x4e, 0x09, 0xe9, 0x97, 0x15, 0xb0, 0x2f, 0x8e, 0xda, 0x8f, 0x39, 0xfa, 0x84, 0xef, 0x7b, 0x59, + 0x36, 0x9f, 0x07, 0x24, 0xee, 0x4a, 0x6e, 0x58, 0xd1, 0x06, 0x63, 0x1e, 0x4e, 0x34, 0x9c, 0xca, + 0x28, 0xb8, 0x93, 0xba, 0x07, 0x04, 0x7c, 0x40, 0x65, 0xc5, 0x66, 0x23, 0xfc, 0x8d, 0xac, 0x78, + 0xa5, 0x18, 0xe6, 0x31, 0x90, 0x67, 0x14, 0x34, 0xda, 0x8b, 0xa9, 0x84, 0xf9, 0x96, 0x4e, 0x37, + 0x5c, 0x3a, 0x7f, 0x8c, 0x28, 0x43, 0xe0, 0x18, 0xac, 0xde, 0x50, 0xc9, 0xff, 0xee, 0xff, 0x6c, + 0x1d, 0x8c, 0x33, 0xc0, 0xd4, 0x5b, 0x81, 0xee, 0xd2, 0x47, 0x50, 0x41, 0x3f, 0x6b, 0x9e, 0x3e, + 0x1d, 0x69, 0xf6, 0xa5, 0xb7, 0xa4, 0x0e, 0xac, 0x82, 0xc3, 0xdc, 0x12, 0x92, 0x8b, 0x5f, 0xde, + 0x95, 0xc3, 0x78, 0x1e, 0x62, 0x00, 0x97, 0xa7, 0x92, 0x87, 0x78, 0x7d, 0x18, 0xad, 0x0a, 0xa9, + 0x8f, 0xab, 0xcb, 0x50, 0xc6, 0xcb, 0x23, 0xff, 0x11, 0x97, 0x84, 0x94, 0x11, 0xa3, 0x15, 0x75, + 0xbe, 0x9b, 0xe1, 0xe2, 0x3d, 0x74, 0xfb, 0x89, 0x74, 0x52, 0x66, 0x01, 0xac, 0x1b, 0x44, 0xa3, + 0xf6, 0x95, 0x76, 0xe5, 0x15, 0xa0, 0x18, 0x7f, 0x84, 0xd8, 0x19, 0xcc, 0x1b, 0x18, 0xc1, 0x34, + 0xa9, 0xc8, 0x57, 0x7f, 0x68, 0x7b, 0x22, 0x32, 0x44, 0x52, 0x16, 0x48, 0x1b, 0xf5, 0xb6, 0x4e, + 0x7d, 0x59, 0x95, 0x28, 0x88, 0x95, 0x8c, 0x28, 0x3b, 0xc3, 0x31, 0x98, 0x0a, 0x0d, 0xdb, 0xa7, + 0x30, 0x80, 0x86, 0xe0, 0xef, 0x6d, 0x6a, 0x2e, 0xfd, 0xce, 0x12, 0xf9, 0xd1, 0x22, 0xa6, 0x32, + 0xc4, 0xc6, 0xa0, 0x87, 0x2b, 0xba, 0x4b, 0xd3, 0xf6, 0x82, 0x91, 0x97, 0x0c, 0x92, 0xaf, 0xbb, + 0x44, 0x04, 0x58, 0x2d, 0xae, 0xce, 0x88, 0x75, 0xfb, 0x4d, 0xef, 0x1e, 0x7a, 0xb3, 0x8e, 0x59, + 0x5b, 0x24, 0xfc, 0x0a, 0xd5, 0x67, 0xbc, 0x01, 0x6e, 0xbd, 0xa2, 0x65, 0x1a, 0x93, 0xa7, 0xce, + 0xbb, 0x17, 0x31, 0xe4, 0xfa, 0xc1, 0x89, 0x7c, 0xf7, 0x46, 0x32, 0x7a, 0xb3, 0x73, 0x32, 0x5d, + 0x7e, 0xbd, 0x23, 0x28, 0xcd, 0x26, 0x09, 0xde, 0x4b, 0xb4, 0x76, 0x10, 0x11, 0x35, 0xfe, 0x7b, + 0xa5, 0x0b, 0x61, 0xf4, 0xdd, 0x93, 0x6a, 0xfa, 0xae, 0xed, 0x6f, 0x45, 0x8e, 0x5b, 0x02, 0x7f, + 0x2e, 0xe2, 0xd5, 0x84, 0xbd, 0x8f, 0xbd, 0x7f, 0x01, 0xeb, 0x98, 0xb6, 0x2b, 0xc4, 0x67, 0xb8, + 0xa8, 0xca, 0x33, 0x78, 0x3f, 0x18, 0x78, 0x3b, 0xb3, 0x35, 0x6e, 0xbe, 0x5f, 0x67, 0xfa, 0x67, + 0xb2, 0xc1, 0xef, 0x03, 0x52, 0x46, 0x86, 0x16, 0x75, 0xf4, 0xc2, 0x05, 0x8f, 0xff, 0x6f, 0x3c, + 0x6d, 0xee, 0x64, 0xc7, 0x4f, 0xd8, 0x7a, 0xef, 0xe1, 0x0c, 0xd2, 0x6d, 0x31, 0x40, 0xce, 0x16, + 0xae, 0x59, 0xa4, 0x5b, 0x5a, 0xda, 0x8a, 0x88, 0x53, 0x41, 0xea, 0xc7, 0x0b, 0x11, 0x5d, 0xfc, + 0x92, 0x26, 0x29, 0x83, 0x15, 0xd5, 0x44, 0x75, 0x17, 0xbf, 0x1c, 0xb5, 0xc6, 0xdf, 0xdf, 0x15, + 0xd9, 0x08, 0xae, 0x05, 0x37, 0x9e, 0xe1, 0xb4, 0x11, 0x41, 0x29, 0xeb, 0x7c, 0x45, 0x67, 0xcd, + 0xbd, 0x4b, 0x7c, 0x98, 0x87, 0xbd, 0xbb, 0xbe, 0xc3, 0x07, 0x06, 0xbe, 0xfa, 0x31, 0xc6, 0x9d, + 0x40, 0xdc, 0x2e, 0x02, 0x5f, 0x8a, 0x22, 0xfd, 0x1d, 0x71, 0x68, 0x39, 0x69, 0x7f, 0x78, 0xd7, + 0xbe, 0x1a, 0x46, 0xb2, 0xb0, 0x8b, 0x27, 0x73, 0x48, 0x6f, 0x2f, 0x97, 0x35, 0x05, 0xad, 0xd3, + 0xd6, 0x74, 0x06, 0xb4, 0xa9, 0x97, 0xef, 0x68, 0xdf, 0x19, 0x1e, 0x00, 0x9b, 0xf3, 0xd4, 0x7a, + 0x25, 0x9b, 0x1c, 0xc5, 0x65, 0x35, 0xa9, 0x64, 0x00, 0x0e, 0xf6, 0x56, 0x73, 0x48, 0xf8, 0xe9, + 0x3c, 0xe9, 0x5d, 0x1a, 0x02, 0xbb, 0x44, 0xa6, 0x31, 0xd8, 0x3e, 0x19, 0x6c, 0x4a, 0xc5, 0x5b, + 0x50, 0xd3, 0x8f, 0xb8, 0x68, 0x85, 0x19, 0xbf, 0xd8, 0x35, 0x16, 0x0f, 0xca, 0x44, 0x69, 0x17, + 0x21, 0x93, 0xf8, 0x27, 0xae, 0xee, 0xdc, 0x81, 0x43, 0x57, 0xcb, 0x3b, 0x8f, 0x88, 0xfa, 0x15, + 0x31, 0xe5, 0xaa, 0x2a, 0x0f, 0xee, 0xfa, 0xe5, 0x38, 0x43, 0xee, 0x67, 0x1f, 0xbc, 0xe8, 0xbf, + 0xea, 0x41, 0x38, 0x66, 0x71, 0x2a, 0x78, 0x04, 0x3b, 0xbb, 0x1e, 0x27, 0x4c, 0x3b, 0xe9, 0xcd, + 0xcb, 0x48, 0x0b, 0xab, 0x7f, 0x1b, 0x8a, 0x71, 0xd9, 0x8a, 0x66, 0xf7, 0x36, 0x09, 0x98, 0xfb, + 0xcd, 0x07, 0xc9, 0x26, 0x9f, 0x85, 0xac, 0x2d, 0xed, 0x54, 0xa1, 0x9b, 0xce, 0x4c, 0xca, 0xa6, + 0x5c, 0xa1, 0x08, 0x86, 0x8d, 0x5e, 0x5e, 0x6f, 0xf2, 0xc9, 0x82, 0x6c, 0x56, 0xa4, 0x93, 0x81, + 0xf9, 0xdf, 0x96, 0x4f, 0x3b, 0xe5, 0x23, 0xb1, 0x26, 0x06, 0x53, 0xec, 0xcb, 0x12, 0x49, 0xef, + 0x9d, 0xa5, 0x72, 0xf6, 0xdd, 0xf6, 0xfe, 0x12, 0xe0, 0xa9, 0x9e, 0x03, 0x95, 0xf5, 0x94, 0x97, + 0x15, 0x98, 0xe8, 0x9d, 0xb6, 0x7a, 0x04, 0xe6, 0xda, 0xca, 0x5c, 0x95, 0xaf, 0x80, 0x34, 0xf0, + 0x3a, 0xe2, 0xf6, 0x57, 0x79, 0xdb, 0xb7, 0x14, 0x6d, 0xa6, 0xc2, 0x13, 0x35, 0x5c, 0xf0, 0x7e, + 0x13, 0xc7, 0x17, 0x4f, 0x75, 0x99, 0xd2, 0xbe, 0xad, 0x2f, 0xd8, 0x29, 0xae, 0xaa, 0xa8, 0xbe, + 0x97, 0xcb, 0x39, 0x11, 0x5d, 0x7a, 0x07, 0x88, 0xf0, 0xa6, 0x37, 0xc4, 0xb3, 0x01, 0x0e, 0xa5, + 0xe0, 0x03, 0x3e, 0x5c, 0xc0, 0x58, 0x68, 0x65, 0xcf, 0x72, 0xc4, 0x64, 0x32, 0x16, 0x50, 0xe7, + 0x7a, 0xc6, 0x5a, 0xf3, 0xcc, 0x7f, 0xc7, 0x17, 0xca, 0xaa, 0x56, 0xfd, 0xa0, 0x79, 0x89, 0x14, + 0x49, 0xe0, 0x56, 0x45, 0x51, 0x32, 0x15, 0x13, 0x42, 0x91, 0x31, 0xad, 0x95, 0xdd, 0x39, 0x21, + 0x8c, 0x56, 0x36, 0x16, 0x31, 0x2c, 0x2a, 0xb8, 0x40, 0x4a, 0xc0, 0x6c, 0x66, 0x32, 0x06, 0x1b, + 0xb3, 0x91, 0x6e, 0x02, 0xec, 0xed, 0x13, 0x36, 0x25, 0x14, 0x63, 0x19, 0xe4, 0x7d, 0xbe, 0xed, + 0x50, 0x4d, 0xba, 0x83, 0x90, 0x22, 0x26, 0x06, 0x3a, 0xa2, 0x74, 0x52, 0xf7, 0x51, 0xbb, 0x94, + 0xd0, 0xba, 0xf0, 0xfb, 0xbd, 0xea, 0x4d, 0xfa, 0x6c, 0xb4, 0xde, 0x1f, 0xeb, 0x14, 0xb9, 0x66, + 0x83, 0x48, 0x82, 0xc2, 0xc6, 0x55, 0x4f, 0xe7, 0x9c, 0x9b, 0xf1, 0x9d, 0x66, 0x7e, 0x14, 0xe9, + 0x97, 0x19, 0x46, 0xa2, 0x4a, 0xdd, 0x7d, 0xd6, 0xd4, 0x19, 0xaf, 0xf7, 0x9f, 0x4e, 0xa4, 0xb2, + 0x87, 0xca, 0xe5, 0xc6, 0x14, 0x88, 0xa3, 0xf7, 0x73, 0x5c, 0x4e, 0xab, 0x3f, 0x12, 0xdd, 0x5f, + 0xfe, 0xc2, 0xd0, 0x66, 0xe9, 0x2a, 0xea, 0x50, 0xb9, 0x76, 0x41, 0xd2, 0x48, 0x83, 0xdc, 0xf9, + 0x13, 0x0f, 0x5b, 0x7d, 0x30, 0xbd, 0xb7, 0x6c, 0x82, 0x1a, 0x40, 0x63, 0xf8, 0x72, 0xed, 0x4d, + 0xd5, 0x98, 0xa0, 0x01, 0x34, 0x16, 0x46, 0x39, 0x5b, 0x07, 0xe3, 0xdb, 0xc5, 0x5e, 0x9b, 0xef, + 0x0b, 0xb9, 0xcc, 0x71, 0xe6, 0x32, 0x1b, 0x24, 0x6a, 0x38, 0x38, 0xaa, 0xe0, 0x05, 0x83, 0x1e, + 0x92, 0x21, 0x93, 0x9a, 0x9e, 0x94, 0x79, 0x18, 0x09, 0x96, 0xd3, 0x2b, 0x1a, 0x6d, 0x79, 0x82, + 0xbf, 0xf8, 0xaa, 0x67, 0xf6, 0x6b, 0x36, 0xa6, 0x7a, 0x1c, 0x50, 0x18, 0x37, 0xb9, 0x23, 0x1a, + 0xd7, 0xe5, 0x34, 0xb8, 0x1f, 0x34, 0x2a, 0x73, 0xe1, 0x60, 0x43, 0x63, 0x37, 0xf0, 0x08, 0x15, + 0xd9, 0x2f, 0x25, 0x3b, 0xdb, 0x78, 0x15, 0x15, 0x49, 0x00, 0xa8, 0x06, 0x23, 0x54, 0xe5, 0xf6, + 0x52, 0x12, 0x92, 0x97, 0xb0, 0x8c, 0x02, 0x9a, 0xab, 0xae, 0xdc, 0xbb, 0x50, 0x81, 0x33, 0xf8, + 0x45, 0xa2, 0x28, 0x3e, 0xfa, 0xa8, 0x21, 0x4d, 0x4a, 0xd8, 0x16, 0x39, 0x1a, 0xa8, 0xa6, 0xa9, + 0x89, 0xa3, 0x6d, 0xfc, 0x92, 0xd2, 0xdf, 0xe7, 0x5d, 0x99, 0xe1, 0x55, 0xd4, 0x17, 0x5b, 0xb8, + 0xd3, 0x82, 0x8d, 0xb2, 0x9b, 0x8d, 0xaa, 0x33, 0xc7, 0x72, 0x14, 0x77, 0xa1, 0x11, 0xe6, 0x0e, + 0x77, 0x65, 0x87, 0x7d, 0xc6, 0x8b, 0x67, 0x15, 0x07, 0x62, 0x70, 0x17, 0x06, 0xe8, 0x42, 0x21, + 0x72, 0x69, 0x16, 0x71, 0x31, 0xf6, 0x18, 0xdd, 0xe6, 0x37, 0xfe, 0x88, 0xb9, 0xac, 0x5a, 0x04, + 0x1c, 0x15, 0x55, 0xfc, 0x09, 0x98, 0x21, 0x1b, 0xf2, 0xed, 0xb6, 0xe8, 0xb5, 0x3e, 0x0b, 0xfc, + 0xce, 0x20, 0x3b, 0x89, 0x5a, 0x91, 0x3e, 0x58, 0xb6, 0x02, 0xaa, 0x95, 0xdf, 0x36, 0xad, 0xcd, + 0x0b, 0x75, 0x0e, 0x77, 0x93, 0x5e, 0xbc, 0x0a, 0xa8, 0xe8, 0xbd, 0x28, 0xa5, 0x74, 0xf4, 0xd0, + 0xfa, 0xbc, 0x61, 0xc3, 0xe7, 0x8c, 0x9c, 0xed, 0x65, 0x05, 0x31, 0xdf, 0x94, 0x29, 0x5f, 0xfd, + 0xdc, 0xa7, 0xf0, 0x85, 0xc2, 0xde, 0x96, 0xad, 0x83, 0x9a, 0x9b, 0x48, 0xf6, 0x17, 0xba, 0x70, + 0x7a, 0xdb, 0x82, 0x17, 0x51, 0xda, 0x9b, 0x16, 0x00, 0x61, 0x4d, 0xda, 0x0b, 0x50, 0xaa, 0x99, + 0x08, 0x9f, 0xca, 0xb0, 0xdf, 0x47, 0xd5, 0x2a, 0x4f, 0x9e, 0x57, 0x08, 0xb4, 0x2c, 0x05, 0x28, + 0x06, 0x09, 0xb9, 0x68, 0x28, 0xa3, 0x39, 0x8f, 0x5e, 0xa9, 0xd5, 0x22, 0x68, 0xf7, 0xbb, 0x1b, + 0x3f, 0x6c, 0x0d, 0x07, 0xba, 0x9e, 0xc4, 0x9a, 0xc1, 0xfe, 0x15, 0x9d, 0xb5, 0xed, 0x72, 0xa6, + 0x59, 0x2b, 0x2a, 0x7e, 0x76, 0x13, 0x81, 0x83, 0xb8, 0x64, 0x5e, 0x28, 0x9d, 0x9c, 0x4c, 0x3b, + 0xb2, 0x3e, 0x18, 0xdf, 0x32, 0x7e, 0x9d, 0xb1, 0x78, 0xbd, 0xa3, 0xe8, 0x94, 0x4c, 0x8b, 0x3c, + 0xa8, 0xfe, 0x00, 0xf0, 0xbf, 0xd2, 0xc7, 0xfa, 0x62, 0x65, 0xfa, 0xe3, 0x08, 0xdf, 0xcb, 0x08, + 0xba, 0xf3, 0x2e, 0x24, 0x5c, 0x62, 0x20, 0x7a, 0x73, 0x05, 0x30, 0x9b, 0x52, 0xdd, 0xc2, 0x0f, + 0x3b, 0xa3, 0xa2, 0x4c, 0x93, 0x20, 0x3b, 0xe6, 0x1a, 0x79, 0x6a, 0x25, 0x54, 0x1f, 0x6e, 0x17, + 0xd9, 0x47, 0xb9, 0x1c, 0x03, 0x95, 0x8b, 0x6c, 0x3d, 0xc7, 0xe9, 0x0b, 0xd2, 0x61, 0x6c, 0x24, + 0x25, 0xcc, 0x90, 0x5b, 0xce, 0x78, 0xe6, 0x7b, 0x62, 0x93, 0x03, 0xce, 0x51, 0x69, 0x43, 0xe6, + 0x4e, 0x6b, 0x01, 0x18, 0xdf, 0x98, 0x91, 0x4a, 0x09, 0xc4, 0x07, 0xa1, 0xbe, 0xea, 0x54, 0x84, + 0x7b, 0x28, 0xab, 0x9a, 0xfb, 0x61, 0x55, 0xc4, 0x04, 0x39, 0x7a, 0x5d, 0x57, 0x73, 0x16, 0x09, + 0xe6, 0x37, 0x99, 0x94, 0x73, 0x1a, 0x72, 0xa4, 0x0d, 0x89, 0x7a, 0xf1, 0x3a, 0x99, 0x26, 0xd1, + 0xac, 0x02, 0x30, 0x7e, 0xa7, 0x63, 0x07, 0x75, 0xc2, 0xb0, 0x03, 0x6f, 0x5c, 0xcc, 0x17, 0xe2, + 0x0d, 0xb4, 0x58, 0x13, 0xf7, 0x03, 0x7c, 0xed, 0xcc, 0xd0, 0xc6, 0x60, 0x5e, 0xa4, 0x0f, 0x35, + 0x25, 0x27, 0x2a, 0x0f, 0x54, 0x91, 0x30, 0x2f, 0xed, 0xff, 0xe9, 0x8c, 0xe7, 0x8c, 0xfb, 0x59, + 0x74, 0x02, 0xf4, 0x49, 0x8e, 0x6c, 0x7e, 0xd6, 0xa5, 0xe3, 0xcf, 0x2b, 0x9d, 0x8c, 0x32, 0x97, + 0x76, 0xe9, 0x05, 0x12, 0x36, 0x2f, 0x5b, 0x19, 0x52, 0x7d, 0x78, 0xa1, 0xd5, 0x86, 0x0f, 0x01, + 0x10, 0x86, 0xf3, 0x9d, 0xd2, 0x5e, 0xab, 0xd3, 0xf3, 0xda, 0x21, 0x96, 0x58, 0x07, 0xc3, 0x02, + 0x22, 0x84, 0xf6, 0x60, 0x94, 0xf2, 0x15, 0xbe, 0x55, 0xa5, 0xab, 0x05, 0x10, 0xfc, 0xa4, 0xa2, + 0xb7, 0x9a, 0x3d, 0x41, 0xe5, 0xb0, 0x3f, 0xdf, 0xbc, 0x19, 0xf5, 0x84, 0xcc, 0xf8, 0x82, 0xcc, + 0x3d, 0x9b, 0x9e, 0xa7, 0xde, 0xc1, 0xc7, 0x71, 0xda, 0xf9, 0xa2, 0xb8, 0xe6, 0x53, 0x92, 0xf7, + 0x18, 0xb0, 0x7f, 0x58, 0x64, 0x87, 0x64, 0xf4, 0x57, 0x82, 0x26, 0xbe, 0xf6, 0x81, 0xa7, 0xfe, + 0x04, 0x45, 0x1a, 0x3f, 0x93, 0xf3, 0x39, 0x4c, 0x6d, 0x95, 0x80, 0x72, 0x1d, 0x72, 0xb1, 0x6e, + 0xc9, 0x44, 0xd0, 0x9d, 0xd6, 0x0e, 0x38, 0xff, 0x01, 0x79, 0x05, 0x77, 0x3b, 0xa9, 0xbf, 0x38, + 0x7f, 0x7f, 0x55, 0x47, 0x03, 0x26, 0x30, 0x3b, 0xbc, 0x0c, 0x80, 0x2f, 0x8e, 0x77, 0x7c, 0xdc, + 0xc5, 0x59, 0x9b, 0x8c, 0x1c, 0xfa, 0x0e, 0xd8, 0xe0, 0x4d, 0x38, 0x74, 0x13, 0x0e, 0x8a, 0x99, + 0xd6, 0x0f, 0x4a, 0xda, 0x75, 0xea, 0x77, 0xf5, 0xc2, 0xbd, 0x83, 0x3b, 0xb0, 0xe9, 0x09, 0x3c, + 0x36, 0x92, 0x5a, 0xda, 0x04, 0x62, 0x5e, 0x34, 0x79, 0xc8, 0x24, 0x6b, 0x05, 0x01, 0x9f, 0x58, + 0xea, 0x5a, 0x9f, 0x0f, 0x79, 0x96, 0x98, 0x68, 0x74, 0x21, 0xba, 0x5f, 0x39, 0x5a, 0xe8, 0x08, + 0xfd, 0x9a, 0x66, 0x98, 0x31, 0x18, 0xba, 0xf6, 0x8d, 0x8a, 0x20, 0xf9, 0x88, 0xf4, 0xf7, 0x3a, + 0x9c, 0x12, 0xae, 0xad, 0x9e, 0xae, 0x42, 0x70, 0x50, 0xb2, 0x0e, 0x5d, 0xee, 0xd1, 0x72, 0xe6, + 0xba, 0x1c, 0xc1, 0xd5, 0xff, 0x10, 0xd0, 0x04, 0x04, 0xfe, 0x2a, 0x4b, 0xe9, 0xa2, 0xed, 0xa7, + 0x4d, 0x61, 0xd4, 0x7d, 0x04, 0x32, 0xcc, 0xe6, 0x9e, 0x01, 0x5c, 0xdb, 0x88, 0xdd, 0xdb, 0x0b, + 0x04, 0xb8, 0x37, 0x5c, 0x2d, 0x77, 0xe0, 0x3a, 0x0a, 0x9b, 0x92, 0xd2, 0x6f, 0x1c, 0x87, 0xb7, + 0x22, 0xaf, 0xaf, 0xa5, 0xdf, 0x25, 0x9d, 0x79, 0xa4, 0xc2, 0x8e, 0x57, 0x63, 0xc7, 0x15, 0x8d, + 0xa6, 0x47, 0xd1, 0xe4, 0x74, 0x81, 0xb7, 0x4a, 0xde, 0x9b, 0x02, 0xb8, 0x59, 0x72, 0x07, 0xe4, + 0x59, 0xad, 0x3f, 0xc5, 0x12, 0xd3, 0xe6, 0xa2, 0xba, 0x59, 0xa1, 0x88, 0x93, 0x11, 0xa4, 0xe5, + 0x25, 0x04, 0xc2, 0xc3, 0x27, 0x4f, 0xda, 0x21, 0x49, 0x60, 0x8e, 0x30, 0xae, 0x10, 0x6c, 0x01, + 0xe3, 0xaf, 0xb5, 0xb0, 0x58, 0xb2, 0xdd, 0x05, 0x72, 0xd2, 0x22, 0xa1, 0x43, 0x44, 0x14, 0xb4, + 0x4a, 0x84, 0x78, 0x06, 0xbe, 0xb5, 0x63, 0x49, 0x5b, 0x1d, 0x9e, 0xe0, 0x01, 0xd0, 0x86, 0x49, + 0xf3, 0x05, 0x9d, 0x38, 0x15, 0xf9, 0x42, 0x9c, 0xec, 0x35, 0x9e, 0xc8, 0xbc, 0x16, 0x9b, 0xa5, + 0xdc, 0xd8, 0xc2, 0x90, 0x91, 0x92, 0x2c, 0x0c, 0x69, 0x9a, 0xa6, 0xcd, 0xd9, 0xe8, 0xf0, 0x5a, + 0x23, 0xa3, 0xb1, 0xdb, 0xab, 0x3e, 0xfb, 0x83, 0x7b, 0x40, 0xfe, 0xfe, 0xfa, 0x32, 0x00, 0x3f, + 0x9f, 0x37, 0x72, 0x15, 0x17, 0x96, 0xd8, 0x25, 0xfc, 0x00, 0x74, 0x27, 0x1a, 0xd1, 0xfe, 0xde, + 0xb8, 0x75, 0x0f, 0xfb, 0xc1, 0xf7, 0x5e, 0xbe, 0xa6, 0x6f, 0xff, 0x23, 0xa6, 0x59, 0x58, 0xab, + 0x4a, 0x10, 0xa2, 0xdd, 0x52, 0x36, 0x35, 0x0a, 0x95, 0xf2, 0xd4, 0x33, 0xab, 0xba, 0xcc, 0xcf, + 0x33, 0xe9, 0x22, 0x6d, 0x0d, 0x6c, 0xc6, 0x3d, 0x22, 0x74, 0x9b, 0x41, 0x86, 0x77, 0x71, 0x54, + 0x1b, 0x97, 0x5e, 0x9b, 0xc7, 0x5c, 0x5f, 0x02, 0x91, 0x10, 0x30, 0x38, 0x3b, 0x5e, 0x8b, 0xcd, + 0xbe, 0x92, 0x32, 0x3e, 0x21, 0x71, 0xeb, 0xcf, 0x1b, 0x73, 0xa3, 0xa1, 0xa0, 0x24, 0x58, 0x06, + 0x28, 0x5b, 0x0a, 0x03, 0xbd, 0xfd, 0x37, 0x56, 0x84, 0xfe, 0x23, 0x10, 0xaf, 0xb3, 0x9c, 0xea, + 0x02, 0x97, 0xc9, 0xab, 0x02, 0x32, 0xd0, 0x2c, 0x11, 0x63, 0x08, 0xf7, 0xbb, 0x19, 0xfd, 0x9d, + 0x40, 0xe5, 0x58, 0x02, 0xdd, 0x24, 0x71, 0xac, 0xd5, 0xf7, 0xd3, 0x74, 0x78, 0x53, 0x13, 0xb2, + 0xee, 0x0f, 0x8a, 0x3e, 0xfa, 0x8c, 0xc1, 0x3a, 0x0b, 0x73, 0x6a, 0xe0, 0xab, 0x7a, 0xdb, 0xea, + 0x93, 0x49, 0xd6, 0xf4, 0x95, 0x82, 0x68, 0xcb, 0x64, 0x6c, 0x34, 0x49, 0x32, 0x06, 0xcb, 0xbd, + 0x4a, 0x10, 0x09, 0xe3, 0x74, 0x4e, 0xf4, 0xcf, 0x86, 0xc5, 0x0a, 0x78, 0x55, 0x90, 0x11, 0x51, + 0xf0, 0x1c, 0x79, 0x63, 0x9d, 0x77, 0xc5, 0x63, 0x92, 0x7e, 0x5b, 0x94, 0x44, 0x04, 0x1c, 0xf2, + 0x61, 0x15, 0x6f, 0xba, 0xf9, 0xe5, 0x6e, 0x47, 0x20, 0x45, 0x9e, 0xad, 0xb3, 0xf6, 0x16, 0x4f, + 0x10, 0x51, 0x6b, 0x1e, 0x88, 0xc5, 0x40, 0x48, 0x94, 0x9b, 0xf0, 0x6b, 0x5e, 0xd0, 0x33, 0xf1, + 0x43, 0xd9, 0xbb, 0xea, 0x4d, 0x47, 0xc6, 0xbd, 0x06, 0xe3, 0xe2, 0x5c, 0x4a, 0xde, 0x83, 0xe2, + 0x68, 0x96, 0xa9, 0x6b, 0xbf, 0x22, 0xbc, 0x3f, 0x89, 0x0d, 0xba, 0x3c, 0x02, 0x4b, 0x4c, 0x3b, + 0x38, 0x73, 0x06, 0x2f, 0x3e, 0xa6, 0x23, 0x6e, 0x8e, 0xde, 0xea, 0xa3, 0x9e, 0xb5, 0x75, 0xd9, + 0x9d, 0xaa, 0xcc, 0x0b, 0xac, 0xc5, 0x95, 0x2f, 0x0b, 0xa6, 0xfa, 0x7e, 0x02, 0xd4, 0x55, 0x49, + 0x86, 0xd2, 0x17, 0xb0, 0x1b, 0x2a, 0x30, 0x35, 0x67, 0x17, 0x82, 0x71, 0x97, 0x4a, 0xd8, 0x60, + 0x18, 0x2d, 0x4f, 0x87, 0x3e, 0x21, 0xb0, 0x8f, 0x22, 0xb3, 0xac, 0x30, 0xf0, 0x9f, 0xb7, 0x61, + 0x33, 0xc3, 0x24, 0x8f, 0x16, 0xef, 0x9a, 0xd7, 0x47, 0xc5, 0xf5, 0xee, 0x43, 0x5e, 0xc1, 0xb2, + 0xb8, 0x85, 0x35, 0xa2, 0xb6, 0xee, 0xde, 0x84, 0xa2, 0xa3, 0x95, 0x2b, 0x12, 0x0c, 0xeb, 0xc2, + 0x5c, 0x31, 0xe2, 0x3e, 0x53, 0x5a, 0xe5, 0x9b, 0x9d, 0x3e, 0x04, 0xdd, 0x9b, 0xa6, 0xeb, 0x25, + 0x5a, 0x2b, 0xd4, 0x5c, 0xfa, 0x52, 0x48, 0xfd, 0x86, 0xb3, 0x09, 0xab, 0xe7, 0xbc, 0x16, 0x55, + 0x5c, 0xd8, 0x8e, 0x04, 0x55, 0xf1, 0x81, 0xdb, 0x67, 0xd8, 0x49, 0x31, 0x7a, 0x36, 0x6e, 0xfe, + 0x8d, 0x04, 0x51, 0xa7, 0xc5, 0x3f, 0x5a, 0x02, 0xd7, 0x19, 0x45, 0x7b, 0xb1, 0xa8, 0x07, 0x6d, + 0xf0, 0xf0, 0xa7, 0xe9, 0x84, 0x65, 0x06, 0x96, 0x49, 0xd2, 0x39, 0x0e, 0x41, 0x4e, 0x66, 0xd5, + 0x01, 0xae, 0x95, 0xb6, 0x9e, 0x5c, 0x32, 0xac, 0x99, 0x4f, 0x75, 0x39, 0xe8, 0x33, 0xe7, 0x16, + 0x1f, 0xed, 0x08, 0xf4, 0x36, 0x93, 0x58, 0x15, 0x5d, 0x59, 0xb1, 0x80, 0xfb, 0x62, 0x28, 0xcb, + 0x67, 0x7a, 0xee, 0xc7, 0x2f, 0xaa, 0x6f, 0x18, 0xf3, 0xa5, 0xf0, 0x69, 0x34, 0xe7, 0x9b, 0x70, + 0x53, 0x67, 0xdc, 0x23, 0x6b, 0xbd, 0xc3, 0x7e, 0x6a, 0x57, 0x52, 0x9d, 0x8a, 0xa5, 0x84, 0xf2, + 0x04, 0x44, 0x08, 0x73, 0x37, 0x3f, 0xad, 0xc5, 0x64, 0x9f, 0x96, 0xbf, 0xb0, 0x32, 0x6c, 0x1d, + 0x1d, 0x35, 0x75, 0xf3, 0x44, 0x08, 0x4d, 0x2d, 0x27, 0x2d, 0x74, 0x44, 0x9d, 0x6a, 0x39, 0x6b, + 0xe7, 0xca, 0x31, 0x87, 0x00, 0x8c, 0xdb, 0xc5, 0x0b, 0xb7, 0x26, 0x93, 0xdd, 0x67, 0x00, 0x7a, + 0x87, 0x62, 0xb6, 0x96, 0x2c, 0x0b, 0x8b, 0x36, 0x0a, 0x8e, 0x2d, 0xcf, 0xe5, 0xe9, 0x56, 0x75, + 0xea, 0x3c, 0x43, 0x25, 0x4c, 0x90, 0xc0, 0xe9, 0x97, 0xa7, 0x96, 0xfe, 0x95, 0xc3, 0x94, 0x77, + 0x6e, 0x96, 0x41, 0x22, 0x91, 0xbd, 0xe4, 0x86, 0x4c, 0x13, 0x79, 0xec, 0x24, 0xb8, 0xd5, 0xb0, + 0x6f, 0xe7, 0x5e, 0x1f, 0x82, 0xdb, 0xcf, 0x80, 0xf8, 0xd1, 0xc1, 0x4d, 0x77, 0x9f, 0x80, 0xcf, + 0x0b, 0x86, 0xeb, 0x87, 0xb1, 0xd1, 0x26, 0xc1, 0x36, 0xfa, 0x09, 0xb0, 0xcb, 0xe5, 0x1c, 0x59, + 0xb8, 0x0b, 0xac, 0xbe, 0xfb, 0xcb, 0x10, 0xe4, 0x6f, 0x3b, 0x50, 0xe1, 0x32, 0xd5, 0x23, 0xb4, + 0x78, 0x10, 0x5f, 0xba, 0xd9, 0x96, 0xde, 0x6a, 0x5d, 0x74, 0xa2, 0xe5, 0xab, 0x09, 0x83, 0x33, + 0x69, 0x84, 0xef, 0xf5, 0x1a, 0x44, 0xb2, 0x13, 0x47, 0xce, 0x7c, 0x09, 0x44, 0x4a, 0xa7, 0x36, + 0x1c, 0x53, 0x41, 0xad, 0x71, 0xac, 0xac, 0xdf, 0x07, 0xbf, 0x00, 0x3b, 0xf1, 0x33, 0xd7, 0xc3, + 0x23, 0xf4, 0x4f, 0xbe, 0xcf, 0xd2, 0x93, 0x72, 0xb4, 0x01, 0x99, 0x19, 0x96, 0xfd, 0x16, 0xb4, + 0x06, 0x15, 0x47, 0xbb, 0x5d, 0xe1, 0xea, 0x43, 0x78, 0x13, 0x95, 0x28, 0x3f, 0xbe, 0x85, 0x88, + 0xa0, 0x15, 0xd3, 0x01, 0x7d, 0x24, 0xf3, 0xea, 0x84, 0x3f, 0xfc, 0x8e, 0xcb, 0x66, 0x52, 0x66, + 0xbd, 0x5c, 0xcd, 0xfc, 0xc6, 0x57, 0x5e, 0xe6, 0xaf, 0xfb, 0x2f, 0x05, 0x8b, 0x34, 0x24, 0xe3, + 0xb6, 0xfb, 0x01, 0x72, 0x2f, 0x24, 0xf0, 0x82, 0xd2, 0x24, 0xef, 0xb3, 0x5e, 0x77, 0x5c, 0xf4, + 0xb9, 0x4f, 0x52, 0xed, 0x01, 0xba, 0x87, 0x40, 0xe6, 0x8c, 0x6e, 0x1e, 0x00, 0xfa, 0x70, 0x15, + 0x3d, 0xad, 0xb8, 0x08, 0xa6, 0x40, 0xde, 0xbd, 0x7f, 0x36, 0x5d, 0x1d, 0xb6, 0xf9, 0xd1, 0x4d, + 0x27, 0xbc, 0x5c, 0xfb, 0xe7, 0x76, 0xdb, 0x8e, 0xd2, 0x16, 0xeb, 0xcf, 0xdf, 0xe4, 0x7d, 0xa9, + 0xfa, 0x71, 0xbb, 0x86, 0x81, 0x0c, 0xd3, 0x0b, 0x0d, 0xb7, 0xb1, 0x25, 0x9f, 0xfd, 0xd6, 0x89, + 0x18, 0xa6, 0xbb, 0x6e, 0x28, 0x05, 0x79, 0x5d, 0x56, 0xa5, 0xae, 0xb6, 0x86, 0xed, 0x81, 0x9e, + 0xc8, 0xad, 0xf1, 0x54, 0xd8, 0x38, 0xe2, 0x96, 0x21, 0xcf, 0x77, 0x6f, 0xad, 0x76, 0x31, 0x6b, + 0xd9, 0x44, 0x7c, 0x43, 0x2e, 0xde, 0x85, 0x52, 0x50, 0x7c, 0xdd, 0x21, 0x9d, 0xc4, 0x28, 0xef, + 0x9d, 0x25, 0xac, 0x2d, 0xae, 0x52, 0xe6, 0xec, 0xf5, 0xe1, 0x09, 0x72, 0x56, 0x0c, 0x11, 0xfe, + 0xb0, 0x32, 0x9f, 0xca, 0x76, 0x02, 0xe5, 0x03, 0xb3, 0x49, 0xc2, 0x35, 0xdb, 0xc1, 0xc4, 0x2f, + 0x4b, 0x12, 0x1e, 0xb6, 0x1d, 0x4b, 0x68, 0x2d, 0xef, 0xbb, 0xad, 0x9f, 0xa2, 0x1a, 0xe3, 0x1c, + 0x6a, 0xb3, 0x7a, 0x78, 0xc6, 0xe5, 0x70, 0x76, 0x5f, 0xec, 0x07, 0xc8, 0x4f, 0x79, 0x89, 0x2c, + 0x31, 0xce, 0xea, 0xcb, 0x7e, 0x14, 0x9b, 0xdf, 0x80, 0xb1, 0x47, 0xd0, 0x5a, 0xd9, 0x89, 0x25, + 0x7d, 0x82, 0xc5, 0x32, 0x11, 0x9b, 0x65, 0xf0, 0x28, 0x3a, 0xde, 0xbf, 0x83, 0xfb, 0xad, 0xf7, + 0x0d, 0x7b, 0x37, 0x12, 0xbd, 0x91, 0x0c, 0x91, 0xfb, 0x64, 0xfb, 0x99, 0x73, 0x12, 0x0e, 0xde, + 0xac, 0x49, 0xd0, 0xa3, 0x6f, 0x1c, 0x84, 0x6c, 0x07, 0xb9, 0x4f, 0x68, 0x8e, 0x5b, 0xcd, 0xf7, + 0xb8, 0xed, 0xa4, 0x85, 0x32, 0xa1, 0xd0, 0xf2, 0x7a, 0x84, 0x85, 0x2b, 0xfa, 0x18, 0x96, 0x2c, + 0x84, 0x71, 0x19, 0x05, 0x0e, 0x36, 0x0c, 0x93, 0x64, 0x1d, 0xab, 0x08, 0xd9, 0x2b, 0xb3, 0xf4, + 0x20, 0xf2, 0xd2, 0xcb, 0x7d, 0x7c, 0x72, 0x9a, 0x7c, 0x13, 0x28, 0xb8, 0xf2, 0xbe, 0x48, 0x99, + 0xf4, 0xab, 0x27, 0xc3, 0x74, 0x1b, 0x68, 0x02, 0x62, 0x9b, 0x3e, 0x56, 0xc9, 0xb0, 0x7b, 0x62, + 0x02, 0x19, 0x79, 0xdb, 0xed, 0x13, 0x19, 0xce, 0x7c, 0xda, 0xbf, 0x7f, 0xcc, 0x68, 0x7e, 0xde, + 0xfb, 0xe5, 0x1e, 0x93, 0xc8, 0x76, 0x06, 0x35, 0xf2, 0x60, 0x27, 0x50, 0x07, 0xd9, 0xa3, 0x5a, + 0x48, 0xdf, 0x2a, 0xcb, 0xef, 0x50, 0xd1, 0xc0, 0x4c, 0xbd, 0x23, 0x85, 0x21, 0xfe, 0x99, 0xb5, + 0x2f, 0x67, 0xb5, 0x65, 0x1e, 0x2a, 0x00, 0xb3, 0x29, 0x75, 0xb4, 0x4c, 0x00, 0xdc, 0x6c, 0xc9, + 0x35, 0x24, 0xed, 0x72, 0x64, 0x3b, 0xf7, 0x5e, 0x2c, 0x6e, 0xe0, 0x05, 0x3e, 0xd3, 0xc4, 0xc2, + 0xb2, 0x82, 0x2a, 0x47, 0x3f, 0x34, 0x3d, 0xe0, 0xf9, 0xc1, 0x05, 0x2b, 0xe7, 0x8b, 0xe9, 0x11, + 0x4e, 0xf2, 0xc9, 0x84, 0xa6, 0xe4, 0x22, 0x95, 0x92, 0xbe, 0xc0, 0xb1, 0x34, 0x1c, 0xa9, 0x35, + 0x6b, 0xe9, 0x2b, 0x95, 0x48, 0xb4, 0x29, 0x34, 0xf3, 0x08, 0x72, 0x66, 0x06, 0x90, 0x8f, 0x7e, + 0xa7, 0x33, 0x21, 0x88, 0xb0, 0xd0, 0xc1, 0x0c, 0x72, 0x00, 0x11, 0x54, 0xd4, 0x50, 0x10, 0x81, + 0x3a, 0x0e, 0x8f, 0x26, 0x57, 0x93, 0xe4, 0x4e, 0xc3, 0x0a, 0x24, 0x84, 0xa8, 0xa6, 0x53, 0xec, + 0xb4, 0x55, 0x3a, 0x0f, 0x25, 0xcf, 0x99, 0xb1, 0xb1, 0x71, 0xf5, 0x5f, 0xa6, 0xe9, 0xd8, 0x80, + 0xa2, 0x17, 0x91, 0x45, 0xcd, 0xf1, 0x0b, 0x61, 0xda, 0x6d, 0x69, 0x99, 0x4b, 0x8c, 0x6f, 0xbb, + 0xc9, 0x10, 0x83, 0x8b, 0xa2, 0xe8, 0xa7, 0x9e, 0x2e, 0x50, 0x18, 0x16, 0xf3, 0xfd, 0x39, 0xc5, + 0xb6, 0xcd, 0x4a, 0xcf, 0x36, 0x3c, 0x4d, 0x31, 0xde, 0xc9, 0x9d, 0x33, 0x51, 0x0b, 0x95, 0xf3, + 0x24, 0x61, 0x4a, 0x6e, 0xf6, 0x43, 0x77, 0xeb, 0x2c, 0x23, 0xcc, 0x4b, 0x49, 0x3d, 0x0f, 0x83, + 0x39, 0xb2, 0x72, 0xcb, 0x77, 0xa7, 0x5e, 0x86, 0xcf, 0x0d, 0x42, 0xa4, 0xee, 0x5c, 0x43, 0x07, + 0xfb, 0xa9, 0xd4, 0x1c, 0xcd, 0x97, 0xd4, 0x31, 0x2f, 0x0e, 0x5a, 0xe6, 0x7b, 0x43, 0xff, 0x4e, + 0x57, 0x35, 0xc8, 0x76, 0x98, 0xe4, 0xa0, 0xf5, 0xde, 0x46, 0x97, 0xd0, 0x1b, 0xc5, 0xcc, 0xe1, + 0x3e, 0x86, 0x1c, 0x87, 0x90, 0xd1, 0x12, 0x62, 0xc0, 0x93, 0x69, 0xbf, 0x1b, 0xe7, 0xfd, 0xb3, + 0xb9, 0x10, 0x1f, 0x6a, 0x79, 0xc1, 0x71, 0xf0, 0xbe, 0xd5, 0x08, 0x5e, 0xa7, 0x16, 0x74, 0x51, + 0xb3, 0x02, 0xcf, 0x3e, 0x22, 0xa6, 0x41, 0x3e, 0x68, 0xd5, 0xce, 0xee, 0x9a, 0xfd, 0x79, 0x01, + 0x5f, 0x22, 0x7d, 0xe3, 0x6c, 0xc7, 0x37, 0x59, 0x59, 0x44, 0x47, 0x89, 0x97, 0x20, 0x26, 0x9b, + 0x20, 0x70, 0xd2, 0x9f, 0x94, 0x80, 0x42, 0x4f, 0xc3, 0xac, 0x7b, 0xee, 0x62, 0xb7, 0x7b, 0x6d, + 0xd4, 0x11, 0x36, 0xf3, 0x61, 0xb7, 0x35, 0xea, 0x69, 0x25, 0x1b, 0x57, 0x0f, 0xbe, 0x6a, 0xee, + 0xc8, 0xc7, 0xd1, 0x00, 0x59, 0xe9, 0x2d, 0x9f, 0xf1, 0xc0, 0x3a, 0x21, 0x90, 0x62, 0xeb, 0xbe, + 0xe2, 0xa7, 0xe9, 0x86, 0x6e, 0xc6, 0x4f, 0x52, 0xe9, 0x06, 0x8e, 0xca, 0xa3, 0xb7, 0x92, 0xf2, + 0xb5, 0x27, 0xdd, 0xfc, 0xe8, 0xff, 0x2b, 0xa3, 0x72, 0x1c, 0x2f, 0x70, 0x7d, 0xda, 0x1c, 0xa5, + 0xf1, 0x9b, 0x79, 0xaa, 0x8d, 0x98, 0x49, 0x5b, 0xb4, 0x4c, 0x11, 0xf8, 0xd9, 0x0b, 0xf7, 0x75, + 0x83, 0xbd, 0x83, 0x51, 0x21, 0x23, 0x58, 0xf7, 0xf7, 0x2f, 0x58, 0xd7, 0x34, 0x89, 0xcb, 0xbc, + 0x82, 0x15, 0x03, 0xde, 0xd2, 0xe6, 0xe2, 0x89, 0x58, 0x01, 0xc6, 0xd5, 0xce, 0xfb, 0x51, 0xcb, + 0x72, 0x53, 0x1c, 0x7a, 0xb2, 0xff, 0x25, 0xb1, 0x4f, 0xee, 0x83, 0xe4, 0xd8, 0xd1, 0xe0, 0xad, + 0x65, 0xb8, 0xcf, 0xd5, 0x3a, 0x12, 0x36, 0x63, 0xc2, 0x8b, 0x84, 0xb2, 0xb1, 0x50, 0x95, 0xe7, + 0x1d, 0x17, 0x71, 0x92, 0x20, 0x3e, 0xf6, 0x7d, 0x00, 0xdb, 0xc8, 0x9e, 0xa7, 0xad, 0x5e, 0x05, + 0x2c, 0xaa, 0x71, 0x1c, 0x83, 0xc5, 0x03, 0xcc, 0x96, 0x47, 0xff, 0x2c, 0x82, 0x9a, 0x1f, 0x6a, + 0xe3, 0x1d, 0xb2, 0xb7, 0x44, 0x02, 0xfc, 0x21, 0x91, 0x31, 0xbf, 0x29, 0xf2, 0xe1, 0xaf, 0xa0, + 0x85, 0x11, 0xc6, 0x36, 0xf8, 0x87, 0x21, 0x43, 0x93, 0x69, 0xe4, 0x5b, 0x3c, 0x45, 0xea, 0x04, + 0x88, 0x50, 0x46, 0x2a, 0x54, 0x8a, 0x94, 0xe6, 0x28, 0xa3, 0x23, 0xa0, 0xc3, 0xa3, 0x19, 0xb6, + 0x59, 0xbd, 0xb3, 0xab, 0xec, 0x86, 0xfe, 0x80, 0xf4, 0x63, 0x82, 0xd6, 0x9b, 0x99, 0xdf, 0xbb, + 0x3e, 0x73, 0x7e, 0xe7, 0xeb, 0xc9, 0x99, 0x32, 0x87, 0x98, 0x80, 0xd5, 0xae, 0xdc, 0x80, 0x74, + 0xfc, 0x0e, 0x08, 0x5e, 0xd9, 0x4c, 0x7f, 0x87, 0x7a, 0x18, 0x5c, 0xa6, 0x3d, 0x18, 0x98, 0x14, + 0x7e, 0xf2, 0x97, 0xb8, 0x65, 0xc7, 0x86, 0x04, 0xe1, 0xf4, 0xe6, 0x4e, 0x14, 0xc4, 0xc3, 0xc6, + 0x0f, 0x62, 0x57, 0x68, 0x62, 0xdd, 0x76, 0x08, 0x9a, 0x26, 0xea, 0x67, 0x1d, 0x87, 0x75, 0xe3, + 0xad, 0xab, 0xa4, 0x88, 0x06, 0x2d, 0x9f, 0x66, 0x64, 0x39, 0x7d, 0x4a, 0xbc, 0x7c, 0x5f, 0x7b, + 0x74, 0x46, 0x6b, 0xed, 0x17, 0x5e, 0x1d, 0xbc, 0xe2, 0x90, 0x37, 0x29, 0x70, 0x39, 0xc4, 0x68, + 0x87, 0x9c, 0x06, 0x3f, 0xca, 0xeb, 0x76, 0x12, 0x5b, 0x5b, 0x0c, 0x3f, 0x5b, 0x89, 0x01, 0x22, + 0x86, 0xa3, 0xe1, 0x6a, 0x99, 0xdc, 0x32, 0xe9, 0x8a, 0x96, 0xf8, 0xa9, 0xa8, 0x2e, 0xb0, 0x38, + 0x58, 0x32, 0x58, 0x21, 0xb6, 0x51, 0x96, 0x59, 0xcb, 0xeb, 0x7e, 0x50, 0x03, 0x3d, 0x0b, 0x29, + 0xd6, 0x68, 0x5d, 0xf3, 0xab, 0x49, 0xd8, 0x50, 0x64, 0x21, 0xd7, 0x8e, 0x54, 0x0a, 0x28, 0xb7, + 0x48, 0x74, 0xd4, 0xfd, 0x96, 0x6e, 0xd6, 0xb7, 0xda, 0x68, 0x67, 0x4c, 0x87, 0x8b, 0xef, 0x92, + 0x2d, 0x98, 0x8f, 0x1e, 0x2e, 0xb7, 0xf0, 0x49, 0x03, 0xa8, 0x8f, 0x7b, 0x3f, 0x73, 0x1b, 0x23, + 0xcf, 0x01, 0x80, 0x50, 0x5e, 0x59, 0x95, 0x08, 0x9d, 0x45, 0x55, 0xf0, 0x70, 0x8b, 0x51, 0x21, + 0x34, 0xb2, 0xf2, 0x7a, 0x6c, 0x43, 0xa2, 0x03, 0x12, 0xbe, 0x86, 0xdb, 0x95, 0xa7, 0x42, 0xfc, + 0x98, 0x27, 0xe4, 0xa0, 0xc1, 0x70, 0x76, 0x73, 0x27, 0x99, 0xb2, 0xb8, 0x86, 0x2e, 0xdb, 0xf9, + 0x88, 0x60, 0xd6, 0x19, 0xdf, 0xc9, 0xc8, 0x68, 0x56, 0x97, 0x0a, 0xd2, 0xa8, 0x9f, 0xed, 0xaa, + 0x9f, 0x35, 0x78, 0x14, 0xb6, 0x23, 0xf3, 0x2e, 0x10, 0xac, 0x1b, 0x2f, 0xf7, 0x62, 0x76, 0x3f, + 0x8b, 0xa5, 0xaa, 0x9d, 0x32, 0x25, 0x68, 0xee, 0x7d, 0x95, 0x68, 0xc5, 0x22, 0x4f, 0x98, 0xaf, + 0x5a, 0x31, 0xb9, 0x9a, 0xf6, 0x48, 0xbd, 0xbc, 0x66, 0xc0, 0x38, 0x06, 0x6d, 0x44, 0x5b, 0x89, + 0x4a, 0x55, 0x7e, 0x94, 0x79, 0x60, 0x84, 0x31, 0x51, 0x77, 0xbc, 0xd8, 0xd6, 0x0b, 0x79, 0x20, + 0x0d, 0x57, 0x7e, 0x73, 0x73, 0x9b, 0x70, 0xc2, 0x6e, 0x38, 0x23, 0x88, 0xe0, 0x46, 0x46, 0xd0, + 0x42, 0x5a, 0xcd, 0xbd, 0x35, 0x1d, 0xb4, 0xf1, 0x6e, 0xa3, 0x82, 0x33, 0x82, 0x38, 0xbf, 0x28, + 0x10, 0x01, 0x2c, 0xc4, 0x06, 0xb4, 0x75, 0xfe, 0x04, 0xe8, 0x6e, 0x17, 0x37, 0xce, 0x3b, 0x0f, + 0x08, 0x8e, 0xcd, 0x93, 0x87, 0x1e, 0x13, 0x5f, 0xab, 0x5d, 0x48, 0xca, 0x29, 0x4d, 0xf3, 0x1a, + 0x70, 0xb1, 0xb7, 0x70, 0x79, 0xc0, 0xdf, 0x3c, 0x9c, 0xe9, 0x40, 0xb1, 0x0e, 0x75, 0x1d, 0x6a, + 0x2a, 0x2f, 0x7b, 0xd7, 0xc9, 0x80, 0x6c, 0x3e, 0xbf, 0x4d, 0x9e, 0x3a, 0x5c, 0x44, 0xef, 0x45, + 0xa7, 0x93, 0x3d, 0xe3, 0xf3, 0x9e, 0x72, 0xe2, 0xdf, 0x2f, 0x74, 0x57, 0x88, 0xbd, 0x52, 0x22, + 0x45, 0x90, 0x64, 0x57, 0x48, 0x26, 0xc0, 0x42, 0xf0, 0xdc, 0xa5, 0x71, 0xdd, 0x96, 0x38, 0x9d, + 0x8d, 0xa1, 0x33, 0xdf, 0xcb, 0x7b, 0x1b, 0xcd, 0x0a, 0x4b, 0x4d, 0x8c, 0x11, 0xe4, 0xbc, 0x96, + 0x99, 0x42, 0x7a, 0xf8, 0x70, 0xeb, 0x76, 0xad, 0x20, 0x1d, 0x3a, 0x4b, 0x2c, 0x9b, 0x20, 0x9f, + 0x41, 0x39, 0xfa, 0xcf, 0x54, 0x48, 0xfb, 0x19, 0x83, 0x88, 0xc0, 0x6f, 0x6f, 0xc2, 0x9f, 0xc2, + 0x54, 0xc4, 0x26, 0x41, 0x00, 0x33, 0xfb, 0xc5, 0x1f, 0x89, 0xf7, 0xd0, 0x53, 0xf4, 0xef, 0x07, + 0x9a, 0x44, 0x33, 0x18, 0x0e, 0x43, 0xe3, 0x72, 0xc3, 0x95, 0xc0, 0xb6, 0x58, 0xda, 0xdf, 0x46, + 0x61, 0x2b, 0x63, 0x4d, 0x4c, 0xa3, 0x8c, 0x15, 0x42, 0xd6, 0xf4, 0x9e, 0x65, 0x03, 0xe3, 0xf0, + 0x2e, 0xbc, 0x26, 0x23, 0x37, 0x7a, 0xe9, 0x52, 0x32, 0xa6, 0xc0, 0x1c, 0x72, 0x25, 0x92, 0x8e, + 0x4b, 0x21, 0x82, 0x46, 0xc5, 0x53, 0x5a, 0x55, 0xdd, 0x6c, 0x72, 0x0b, 0x03, 0x35, 0xfe, 0xab, + 0xab, 0x29, 0xec, 0x22, 0x3b, 0xd9, 0x2d, 0x38, 0x0f, 0x17, 0xea, 0xf3, 0xec, 0x59, 0x79, 0xbf, + 0xb5, 0x3c, 0xf2, 0x48, 0xda, 0x30, 0xb6, 0xc2, 0x12, 0xcd, 0x94, 0x7e, 0x9a, 0xfd, 0xca, 0x98, + 0x6f, 0xaf, 0xa8, 0x8b, 0x9b, 0x9d, 0x63, 0xa8, 0xcf, 0x32, 0x9e, 0x0a, 0x91, 0x91, 0xd0, 0xd5, + 0x88, 0x28, 0xf2, 0xd5, 0x95, 0x6a, 0x67, 0xbf, 0x26, 0x7c, 0xa0, 0xc4, 0xaa, 0x49, 0xb7, 0x88, + 0xb9, 0xe3, 0xfc, 0x41, 0xea, 0x38, 0xfa, 0xa6, 0xe5, 0x78, 0xb5, 0xdd, 0xd5, 0x18, 0x64, 0x36, + 0x37, 0xf6, 0x48, 0xa7, 0xa2, 0xcc, 0xe1, 0x48, 0x10, 0x67, 0x94, 0x28, 0x92, 0x6d, 0x91, 0x0a, + 0x9b, 0x08, 0xde, 0x18, 0x03, 0xe6, 0x1b, 0xcc, 0x2d, 0xd4, 0x3c, 0x86, 0x40, 0xa8, 0x0e, 0x58, + 0x9b, 0xda, 0xb2, 0xcd, 0x7b, 0x8f, 0x92, 0x83, 0xac, 0xda, 0x2d, 0x54, 0xa2, 0x12, 0xab, 0x4a, + 0x74, 0x87, 0x79, 0x1c, 0x0b, 0x5a, 0x52, 0x12, 0x8b, 0x7e, 0x59, 0xa0, 0x2a, 0x49, 0xd9, 0x71, + 0x71, 0x48, 0x92, 0x5e, 0x1f, 0x3f, 0x70, 0x4b, 0x9e, 0xd7, 0x63, 0x12, 0x2a, 0x91, 0x10, 0xc9, + 0xce, 0x35, 0x2c, 0x40, 0x91, 0x99, 0xf2, 0xb7, 0x35, 0x42, 0x3f, 0xf0, 0x5e, 0x6f, 0x8a, 0xe0, + 0x55, 0x13, 0xd1, 0xc3, 0x77, 0xe2, 0x10, 0xb2, 0x1d, 0xef, 0x62, 0xd8, 0x49, 0xc9, 0x8c, 0x31, + 0xa4, 0x06, 0x22, 0xc6, 0xb9, 0x70, 0x44, 0x1d, 0xba, 0x2a, 0x87, 0x2c, 0x39, 0xef, 0x67, 0x23, + 0xb5, 0x65, 0xb9, 0x9c, 0x1c, 0x8e, 0x71, 0xb6, 0xcd, 0xf8, 0x7e, 0x1f, 0xe3, 0x48, 0x25, 0xb0, + 0x5f, 0x7f, 0x67, 0xb4, 0x58, 0x2b, 0x28, 0x53, 0x42, 0xad, 0xd9, 0x2b, 0x00, 0x06, 0x13, 0x41, + 0x70, 0xd3, 0xbd, 0xc7, 0xdd, 0xba, 0x25, 0x7e, 0xc2, 0x1c, 0xcd, 0x5a, 0xb5, 0x79, 0xdf, 0x64, + 0xcb, 0x2a, 0x68, 0x15, 0xfa, 0x5c, 0x2a, 0x75, 0x9c, 0x76, 0x9e, 0x2f, 0x17, 0x6c, 0xcf, 0xb2, + 0xee, 0x85, 0x76, 0x39, 0xcb, 0xaf, 0x46, 0x03, 0x4a, 0x62, 0xb9, 0xca, 0x39, 0x1e, 0x1b, 0x4d, + 0xa0, 0x6b, 0xea, 0xe6, 0xce, 0x25, 0x10, 0x85, 0xb9, 0xbd, 0x35, 0xa8, 0x54, 0x11, 0xaa, 0xaa, + 0x98, 0x88, 0x31, 0x78, 0xf7, 0x50, 0x44, 0x2c, 0x2f, 0x09, 0x1b, 0xc6, 0xfc, 0xcd, 0xc1, 0x06, + 0x9f, 0x0a, 0xa4, 0x5b, 0x86, 0xaf, 0x75, 0x97, 0x19, 0xa8, 0x07, 0x70, 0xf7, 0xb9, 0x30, 0xd8, + 0xe6, 0x62, 0xa1, 0x9f, 0x6c, 0xe9, 0xa6, 0x78, 0x44, 0x98, 0x75, 0xdd, 0x42, 0x88, 0x60, 0x08, + 0xca, 0x4f, 0x0e, 0x22, 0x51, 0xc8, 0x5b, 0x10, 0xbf, 0x71, 0xfc, 0x6a, 0x1f, 0x8d, 0x52, 0x57, + 0x86, 0x39, 0xe9, 0xbb, 0x83, 0xfb, 0x72, 0xb7, 0x5d, 0xa2, 0xbc, 0x57, 0x8e, 0xa7, 0x45, 0x74, + 0x95, 0x8c, 0xf8, 0xf5, 0x50, 0xeb, 0x9a, 0x63, 0x99, 0x13, 0xb9, 0x91, 0x75, 0x59, 0x1c, 0xa6, + 0x74, 0x51, 0xe0, 0xa9, 0x4c, 0xd1, 0xfe, 0xbd, 0x90, 0x73, 0x33, 0x4c, 0x00, 0xbc, 0x4e, 0x69, + 0x00, 0x6a, 0x92, 0xdb, 0x40, 0x17, 0x76, 0xdf, 0xc2, 0xbf, 0x4d, 0x23, 0x77, 0x90, 0xb5, 0xdb, + 0xa0, 0xa4, 0xe4, 0xf6, 0x9d, 0xc1, 0x21, 0x49, 0xc0, 0xb3, 0xdd, 0x2d, 0x89, 0xb9, 0x5c, 0xd1, + 0x8d, 0xd1, 0xff, 0x3b, 0xbf, 0xaa, 0x72, 0x89, 0x80, 0x56, 0x28, 0xee, 0x34, 0xc8, 0x14, 0x1c, + 0x83, 0x83, 0xf7, 0x5d, 0xf8, 0x9c, 0xf3, 0x43, 0x4e, 0x03, 0xe0, 0x21, 0x5f, 0x11, 0x31, 0xe8, + 0x4c, 0xab, 0x17, 0x4a, 0xc8, 0xc8, 0xb6, 0xbd, 0xc5, 0xe5, 0x20, 0x20, 0x73, 0xa5, 0x64, 0x9d, + 0x3a, 0xf2, 0x9e, 0x7c, 0xe9, 0xa1, 0x35, 0xc3, 0x7d, 0xe3, 0xf5, 0x73, 0xa3, 0x74, 0x5f, 0x75, + 0xbb, 0x17, 0x05, 0xf4, 0x08, 0x5e, 0x77, 0x8d, 0xb8, 0xf1, 0x09, 0x8b, 0xae, 0x6a, 0x80, 0xb1, + 0x84, 0xa7, 0x7f, 0x87, 0x8f, 0x49, 0x7b, 0x2e, 0x95, 0xe8, 0x00, 0xd7, 0x8b, 0xe0, 0xda, 0x5d, + 0x5c, 0xb2, 0xb7, 0xa0, 0xca, 0x15, 0xb0, 0x61, 0xfb, 0x54, 0xe6, 0x6e, 0x7a, 0xf2, 0x05, 0x0b, + 0x7a, 0x12, 0x43, 0xa7, 0xa7, 0x05, 0x4b, 0xdd, 0x7f, 0x7c, 0x3d, 0x80, 0x65, 0xee, 0xd7, 0x8e, + 0x2b, 0xd0, 0x0c, 0x8c, 0x3a, 0xa4, 0x0a, 0xf4, 0x80, 0x0b, 0x1e, 0xa3, 0xd5, 0x1e, 0x08, 0x12, + 0x9b, 0x75, 0xa5, 0x94, 0x66, 0x99, 0x6e, 0xe8, 0x59, 0xef, 0xa7, 0x85, 0xfa, 0x11, 0x8b, 0x76, + 0xf6, 0x3d, 0x48, 0x3d, 0xbc, 0x7f, 0xc3, 0x44, 0x39, 0x11, 0x05, 0x5a, 0xdd, 0xc6, 0x19, 0xb1, + 0x3c, 0x9b, 0x70, 0x7b, 0xda, 0xcb, 0xd7, 0x4e, 0xc0, 0x3a, 0x85, 0x91, 0xf7, 0x4c, 0x14, 0xf0, + 0x3a, 0xad, 0xb7, 0xf3, 0x89, 0xc9, 0xc2, 0xc9, 0xbf, 0x6e, 0x1e, 0xb6, 0x78, 0x6b, 0x27, 0xc9, + 0x30, 0x94, 0xe5, 0x0f, 0xdd, 0x75, 0x46, 0x37, 0xd7, 0x3a, 0xf6, 0xba, 0xa1, 0x44, 0x27, 0xf1, + 0xdb, 0x7f, 0x18, 0x89, 0x8d, 0xc2, 0x99, 0x1c, 0xe2, 0x74, 0x7b, 0xae, 0xc2, 0x66, 0x64, 0x2d, + 0xee, 0x9a, 0xce, 0xbb, 0x43, 0x01, 0x9f, 0xac, 0x7a, 0x78, 0x19, 0x57, 0x82, 0xda, 0x6a, 0x0f, + 0x48, 0xf6, 0x2b, 0x33, 0xb1, 0x68, 0xd1, 0x08, 0xb7, 0x24, 0xc5, 0xb9, 0x22, 0xef, 0xfa, 0x83, + 0x6e, 0xc2, 0x84, 0x99, 0x3d, 0x2e, 0x29, 0xa6, 0x6d, 0xa7, 0xe2, 0x42, 0x89, 0xc1, 0x28, 0x7a, + 0x1e, 0x0b, 0x98, 0x40, 0x7a, 0xf3, 0xfd, 0xfd, 0x2d, 0x25, 0xbe, 0x09, 0x08, 0x36, 0x2d, 0x27, + 0x4d, 0xe2, 0xf4, 0x8d, 0xce, 0x8d, 0x29, 0xb7, 0x2f, 0xa6, 0xcc, 0x5c, 0x8f, 0x16, 0xfd, 0x41, + 0x38, 0x1f, 0x93, 0xc9, 0xe3, 0xd3, 0x2c, 0xb9, 0x09, 0x72, 0x88, 0xbe, 0x62, 0xa5, 0xb8, 0x35, + 0x13, 0xdb, 0xe0, 0xf6, 0x6b, 0x18, 0x1b, 0x84, 0x9f, 0xba, 0xb0, 0x3b, 0x7b, 0x0d, 0x84, 0xe0, + 0xf2, 0xe4, 0x41, 0x90, 0x00, 0xc8, 0x57, 0x67, 0x77, 0x42, 0x67, 0xc0, 0xe0, 0x1a, 0x3f, 0xa0, + 0xfd, 0xf6, 0xa9, 0xfa, 0xb1, 0x42, 0xf5, 0xdd, 0x55, 0x03, 0xe8, 0x27, 0x66, 0x83, 0x55, 0x92, + 0x77, 0x00, 0xf4, 0xfe, 0x38, 0x8c, 0x77, 0x8c, 0x79, 0x6a, 0xda, 0xd8, 0x4a, 0x13, 0xda, 0xd9, + 0x4b, 0x9d, 0x65, 0x2a, 0xde, 0xef, 0xd1, 0x13, 0x18, 0x9e, 0x33, 0xf2, 0x08, 0xd7, 0xbe, 0x89, + 0x6b, 0xd1, 0xd2, 0x0b, 0xdc, 0x81, 0xcf, 0x18, 0xb9, 0x19, 0x67, 0xf2, 0xc5, 0xb1, 0xa8, 0xf2, + 0xc0, 0x31, 0x0e, 0xce, 0x3d, 0xd6, 0x90, 0x69, 0xe1, 0x4f, 0xf1, 0x8a, 0x9a, 0xeb, 0x9e, 0x88, + 0xee, 0xe1, 0x46, 0x59, 0x4d, 0x34, 0xc4, 0xda, 0xc0, 0xa7, 0x1f, 0x79, 0xb7, 0x05, 0x38, 0xfe, + 0xcd, 0x19, 0x41, 0x3b, 0xbe, 0x0e, 0x01, 0x98, 0x7d, 0xdf, 0x7e, 0xb1, 0x16, 0x6a, 0xf2, 0xf8, + 0xf3, 0xc2, 0x82, 0xdf, 0xeb, 0xca, 0xd8, 0xc2, 0xc2, 0x4a, 0x22, 0xef, 0x35, 0xa0, 0x16, 0xec, + 0x7c, 0x05, 0xd7, 0xb1, 0x76, 0x3c, 0x9a, 0x61, 0xb5, 0x0b, 0x67, 0xfe, 0x15, 0x53, 0x7b, 0xeb, + 0x51, 0xda, 0x83, 0x6a, 0xff, 0xb3, 0x05, 0x7d, 0x5b, 0xec, 0x0a, 0x52, 0xd5, 0xfb, 0x2d, 0xe0, + 0xbc, 0x3e, 0x57, 0xb6, 0xba, 0x5a, 0x06, 0x86, 0x25, 0xdb, 0xe6, 0x3d, 0x70, 0x5a, 0xfb, 0x9a, + 0x6b, 0x11, 0xfd, 0xb3, 0xd9, 0xfa, 0xdb, 0xf9, 0x28, 0x96, 0x1c, 0xa5, 0x21, 0xb5, 0x1e, 0x52, + 0xe4, 0x5d, 0xa9, 0x63, 0x05, 0xd3, 0x8b, 0x31, 0x23, 0x15, 0x0f, 0x11, 0xb4, 0x9e, 0x92, 0xfe, + 0x1e, 0xda, 0x46, 0xf9, 0x89, 0xa5, 0x8a, 0xc9, 0x34, 0xa0, 0x21, 0x32, 0xea, 0xe8, 0xd4, 0xca, + 0x8c, 0x53, 0x46, 0xb2, 0x41, 0x4d, 0x14, 0x86, 0xda, 0x32, 0xb4, 0x63, 0xda, 0x2c, 0xe8, 0x79, + 0x0a, 0x88, 0xb5, 0x14, 0x25, 0xd1, 0xff, 0x7c, 0xf8, 0x67, 0xf2, 0x34, 0x32, 0xa4, 0x66, 0x36, + 0x00, 0x1c, 0x5d, 0xe6, 0xbc, 0x5a, 0x0f, 0x98, 0x90, 0x5c, 0x7f, 0x7b, 0x14, 0x7b, 0x9f, 0xb3, + 0x35, 0x3c, 0x40, 0x36, 0x92, 0xb4, 0xa3, 0xcf, 0x4e, 0xcf, 0x76, 0xc7, 0xd8, 0x84, 0x03, 0x3e, + 0x0c, 0x7f, 0x79, 0x13, 0xcc, 0x4e, 0x68, 0x11, 0x2c, 0xe1, 0x3b, 0x38, 0x21, 0xbf, 0x21, 0xb8, + 0x7d, 0x11, 0x36, 0xf0, 0xe9, 0xdc, 0x00, 0xda, 0x1c, 0x90, 0x9f, 0xa1, 0x20, 0xcd, 0x03, 0xcc, + 0x66, 0x1f, 0x08, 0xa6, 0xb4, 0x2c, 0x76, 0x8c, 0x91, 0xca, 0x84, 0x3f, 0xee, 0x02, 0x62, 0x1a, + 0x16, 0x1b, 0x02, 0x5a, 0xed, 0xc1, 0xae, 0x3a, 0xa8, 0x65, 0x17, 0xa1, 0xd4, 0x61, 0xec, 0x5e, + 0x2f, 0x33, 0x8a, 0xbf, 0xec, 0x84, 0xcd, 0x91, 0x0c, 0x21, 0xe0, 0x4c, 0x1d, 0x40, 0x3a, 0xa3, + 0x3e, 0x77, 0xe3, 0xbe, 0x9d, 0x31, 0xd2, 0xb4, 0xcc, 0xa3, 0xee, 0x84, 0x11, 0xd6, 0x58, 0xe3, + 0x19, 0x0f, 0x57, 0xd7, 0x87, 0x93, 0x0e, 0x1d, 0x2d, 0x27, 0x7f, 0x69, 0x6c, 0xf6, 0xdc, 0x42, + 0x49, 0x4a, 0xb2, 0xd3, 0x5c, 0x42, 0x1b, 0x0b, 0xf8, 0x2b, 0x10, 0x07, 0x13, 0x03, 0x23, 0x23, + 0x4e, 0xea, 0xae, 0x25, 0xa6, 0x02, 0x61, 0xad, 0x8a, 0x6b, 0x4c, 0x53, 0x76, 0xc1, 0x3a, 0x15, + 0x19, 0xf7, 0x09, 0x86, 0xca, 0xd3, 0xfc, 0xf0, 0x33, 0x35, 0xc5, 0xe6, 0xf6, 0x70, 0x5c, 0xec, + 0xbd, 0xb8, 0xa5, 0xbf, 0x4b, 0xfd, 0x27, 0xc1, 0xbf, 0x33, 0x9b, 0x8b, 0x2b, 0x33, 0x8a, 0x5c, + 0x3a, 0x6a, 0xf2, 0xe5, 0xd6, 0xc3, 0x74, 0x32, 0x15, 0x5a, 0xe5, 0x98, 0x78, 0x68, 0x96, 0x0f, + 0xf7, 0xb6, 0x79, 0xe5, 0x43, 0xb7, 0x1d, 0x10, 0x8a, 0x72, 0xa2, 0x42, 0x79, 0x5e, 0x94, 0x24, + 0xfa, 0xe9, 0xea, 0xc1, 0x63, 0x0c, 0x18, 0x9d, 0x96, 0x3f, 0x7e, 0x0e, 0x3f, 0xf8, 0x92, 0x89, + 0xd9, 0x58, 0x5a, 0x30, 0xd7, 0xa8, 0x44, 0x56, 0xa4, 0x77, 0xa9, 0x61, 0xee, 0x99, 0x93, 0xe1, + 0xaf, 0xa1, 0x6c, 0xfb, 0x59, 0xd3, 0x3d, 0x92, 0x9a, 0x7f, 0x56, 0xa8, 0xe0, 0xd8, 0xad, 0x28, + 0x52, 0x7c, 0xf1, 0xb3, 0xe4, 0xf8, 0x8a, 0x5e, 0x4c, 0x51, 0xb4, 0x59, 0xec, 0xdc, 0x94, 0xf2, + 0xde, 0x7c, 0x99, 0x15, 0x0a, 0xe2, 0xf9, 0x25, 0x29, 0x20, 0x51, 0x08, 0x0a, 0x5d, 0x3c, 0x9f, + 0x64, 0x8e, 0x85, 0x3f, 0xe6, 0x34, 0x94, 0xd5, 0x14, 0xd3, 0x17, 0xc7, 0xa0, 0xfb, 0x59, 0x4b, + 0x0c, 0x37, 0x77, 0x20, 0xeb, 0x45, 0x08, 0xce, 0xcb, 0xd9, 0xda, 0xfc, 0x3b, 0xd4, 0x83, 0xd0, + 0x7a, 0xcf, 0x90, 0xd4, 0x7a, 0xce, 0xe3, 0x9f, 0xf1, 0x1d, 0xf2, 0x25, 0x25, 0x69, 0x67, 0x40, + 0x41, 0x2b, 0x8c, 0xf5, 0xd0, 0x6a, 0x20, 0xda, 0xbe, 0x87, 0xb7, 0x59, 0xfe, 0x86, 0xfd, 0xa9, + 0x24, 0x9b, 0x1f, 0x43, 0x4c, 0x07, 0x56, 0x54, 0x99, 0x6f, 0x6a, 0x9d, 0xa8, 0xe6, 0x9f, 0x3d, + 0xc6, 0xeb, 0x5d, 0x0f, 0xd5, 0x67, 0xc8, 0xca, 0x29, 0xd7, 0x5e, 0x9e, 0xf5, 0xf2, 0x97, 0x7c, + 0x7b, 0x6d, 0x65, 0xf8, 0x79, 0x49, 0x07, 0x2a, 0xab, 0x35, 0xa5, 0x72, 0x49, 0x93, 0xa9, 0xb8, + 0xcd, 0x2d, 0xda, 0x0e, 0xbc, 0x0f, 0x4d, 0xbc, 0xbc, 0x9e, 0xb4, 0x1f, 0x29, 0x22, 0xf2, 0x80, + 0xbd, 0x0d, 0x33, 0xf6, 0x94, 0x59, 0x18, 0xa2, 0x2d, 0x48, 0x7f, 0x42, 0xae, 0x37, 0x99, 0x1e, + 0xe8, 0x79, 0xa8, 0x9a, 0xee, 0x8f, 0x7e, 0xe4, 0xd9, 0xd4, 0xb2, 0xaa, 0xca, 0x53, 0x32, 0x6f, + 0xbf, 0x2f, 0xdc, 0x87, 0xf3, 0x03, 0xab, 0x93, 0xb8, 0x5a, 0x1d, 0x65, 0x8c, 0x5a, 0x57, 0x03, + 0xbe, 0xd1, 0x35, 0xf6, 0xa4, 0x68, 0x61, 0x7d, 0x21, 0x56, 0x30, 0x5e, 0x3e, 0x5a, 0x25, 0xec, + 0xdb, 0x64, 0xa2, 0x1d, 0x24, 0x0e, 0xfa, 0xa2, 0x23, 0xe2, 0xb9, 0x8a, 0x23, 0x57, 0x32, 0x7a, + 0xf0, 0xba, 0x6e, 0xa1, 0xc1, 0x3a, 0xee, 0xe3, 0xaf, 0xbf, 0xc9, 0xe5, 0xca, 0xb8, 0x0a, 0x53, + 0xf4, 0x25, 0x27, 0xd9, 0x50, 0xe4, 0x76, 0x10, 0x15, 0x10, 0xea, 0xca, 0x94, 0x3f, 0xc6, 0x01, + 0xd1, 0x66, 0xdb, 0x1f, 0x3a, 0x09, 0xe5, 0x7f, 0x2f, 0xb2, 0xf9, 0x15, 0xb0, 0x5e, 0x41, 0x63, + 0x00, 0x50, 0xfa, 0x15, 0x88, 0xb3, 0x8b, 0xce, 0xaf, 0xfe, 0x1c, 0x34, 0xae, 0xfc, 0xfa, 0x3d, + 0x7e, 0x9f, 0x41, 0xfe, 0xb1, 0xba, 0xe7, 0x35, 0x09, 0xe9, 0x35, 0x1e, 0x21, 0xbf, 0x7d, 0x00, + 0xc6, 0x26, 0xd0, 0x09, 0x92, 0x8f, 0xba, 0x00, 0xa5, 0x76, 0xcd, 0xa3, 0xf3, 0xdf, 0x07, 0x38, + 0xc7, 0x54, 0x51, 0xa4, 0xe2, 0x4b, 0xbf, 0xce, 0xfe, 0xb6, 0x1e, 0xab, 0x2a, 0x15, 0xb7, 0x73, + 0x6f, 0x61, 0x73, 0x27, 0x1d, 0x44, 0xb1, 0xfc, 0x83, 0xcf, 0x48, 0x83, 0x49, 0x50, 0x14, 0x67, + 0xdb, 0xfc, 0x62, 0x83, 0x7d, 0xda, 0x6c, 0x28, 0x97, 0x10, 0xb1, 0x6e, 0xca, 0x0c, 0x37, 0xa8, + 0x02, 0xf4, 0x79, 0x90, 0x35, 0xb1, 0x6e, 0x30, 0x0d, 0xae, 0x62, 0x79, 0xb9, 0xa3, 0x11, 0xa8, + 0x78, 0xff, 0xd9, 0x13, 0xf0, 0xa9, 0xf2, 0x53, 0x8f, 0x63, 0x19, 0xf9, 0x43, 0x70, 0x86, 0xbe, + 0x33, 0x89, 0x40, 0xc7, 0x49, 0xa6, 0x3c, 0xd1, 0x99, 0x75, 0x6f, 0x40, 0x65, 0xd1, 0xd6, 0x45, + 0x6b, 0x02, 0xa0, 0x08, 0x6a, 0xec, 0x6b, 0x72, 0xa3, 0xd0, 0xb5, 0xf3, 0x87, 0xc0, 0x9b, 0x45, + 0x9f, 0xb4, 0x61, 0x4c, 0xf3, 0x46, 0x7c, 0x9e, 0xfb, 0x46, 0xd6, 0x47, 0x0d, 0x6e, 0x9f, 0x43, + 0x87, 0x24, 0xc5, 0xf7, 0x0f, 0x2f, 0x0b, 0xf1, 0x73, 0x0e, 0xe4, 0xed, 0x38, 0xab, 0x75, 0xa0, + 0x7f, 0x6e, 0xf8, 0x88, 0xb2, 0x95, 0x61, 0x6e, 0xb8, 0xbb, 0xa9, 0x0d, 0xf0, 0xe3, 0xda, 0x31, + 0x9d, 0x3d, 0x37, 0x0b, 0xf8, 0x46, 0x79, 0xa4, 0xbf, 0xff, 0xb9, 0x81, 0xc3, 0x3d, 0xc1, 0xc2, + 0x68, 0x2d, 0x3e, 0x65, 0xff, 0xe8, 0xdf, 0x0a, 0x34, 0xac, 0xd9, 0xe2, 0x8b, 0xf5, 0xdc, 0x04, + 0x38, 0x1a, 0x04, 0x19, 0x25, 0xca, 0x33, 0xda, 0x7c, 0xb6, 0x36, 0x92, 0xf6, 0x39, 0x40, 0x58, + 0x1d, 0xed, 0x12, 0x89, 0xd1, 0x5b, 0xbb, 0x21, 0x31, 0x3f, 0xa3, 0x24, 0x71, 0xdf, 0x0d, 0x40, + 0xa5, 0xed, 0xe1, 0x3f, 0xdd, 0x0f, 0xd5, 0x52, 0x03, 0xdd, 0x48, 0x9e, 0xd9, 0x19, 0xee, 0x92, + 0x77, 0x69, 0xfb, 0xa0, 0x79, 0x22, 0xd2, 0x3e, 0xe9, 0x50, 0x3c, 0xfd, 0xa1, 0xe1, 0xac, 0xe9, + 0xde, 0xc9, 0xc1, 0x77, 0x2c, 0xb3, 0xfc, 0x7e, 0x55, 0x5b, 0xb7, 0x26, 0xf2, 0x94, 0x54, 0x75, + 0xb6, 0xa5, 0x19, 0x84, 0x95, 0xdf, 0x29, 0x79, 0xd5, 0x47, 0xec, 0xa1, 0x55, 0x67, 0x86, 0xc0, + 0x6e, 0xe9, 0xd2, 0x11, 0x10, 0xab, 0x65, 0xd2, 0x0f, 0xa1, 0xad, 0xe2, 0x30, 0x57, 0xea, 0x09, + 0x28, 0xf2, 0x3a, 0x12, 0x2f, 0x28, 0xbb, 0xfe, 0xb1, 0xf8, 0x9a, 0x03, 0x98, 0x45, 0x64, 0xfb, + 0xfb, 0x3e, 0xda, 0x18, 0xb2, 0x85, 0x1a, 0xf1, 0x17, 0x74, 0xa1, 0x16, 0xb3, 0x10, 0x3f, 0x32, + 0xbb, 0xd2, 0xfb, 0x27, 0x56, 0xea, 0x03, 0x41, 0x90, 0xf8, 0xa2, 0xf9, 0x0a, 0xfd, 0x59, 0xc0, + 0xc0, 0x44, 0x54, 0x11, 0x0f, 0x20, 0xc4, 0x47, 0xf3, 0x16, 0x30, 0x8d, 0xbd, 0xdc, 0x0a, 0xb1, + 0x5c, 0xf4, 0x22, 0xb3, 0x34, 0x68, 0xc8, 0x4d, 0x61, 0xed, 0xe4, 0xfa, 0x0c, 0x8b, 0x95, 0x9c, + 0xdf, 0x81, 0x02, 0x31, 0xbc, 0x3a, 0xaf, 0xb9, 0xec, 0xf9, 0x25, 0x51, 0x09, 0xa8, 0x06, 0x46, + 0x1b, 0xd0, 0xfe, 0x8b, 0x22, 0xdf, 0x46, 0xde, 0x31, 0x99, 0xa8, 0x03, 0x7f, 0x8a, 0x54, 0x73, + 0xe3, 0xc2, 0xf9, 0xd4, 0xf9, 0xac, 0x65, 0x7a, 0x78, 0x3c, 0x93, 0xbe, 0x91, 0xcf, 0x0b, 0xbf, + 0x47, 0xb4, 0x72, 0xd1, 0x12, 0xff, 0x46, 0x24, 0x6a, 0x62, 0x26, 0x7d, 0x0d, 0x92, 0x40, 0xde, + 0x32, 0x5c, 0xb9, 0x4f, 0xfb, 0x61, 0xb9, 0x25, 0xbe, 0x44, 0x00, 0xb7, 0xb8, 0x2b, 0x43, 0xf6, + 0xf5, 0x7c, 0x4e, 0x50, 0xcd, 0xfb, 0x83, 0x61, 0xbd, 0x74, 0x2e, 0x34, 0x6c, 0x10, 0x5b, 0x91, + 0x39, 0x03, 0x9d, 0xe1, 0xbd, 0x7c, 0x75, 0x1c, 0xbc, 0x5d, 0x8f, 0x57, 0x08, 0x70, 0xca, 0x18, + 0x6e, 0x95, 0xe4, 0x5d, 0x52, 0x2c, 0xea, 0xbc, 0xa9, 0x5d, 0xa8, 0x99, 0xf1, 0x9d, 0xb7, 0x5b, + 0x74, 0x76, 0x98, 0xaf, 0xde, 0xc8, 0x30, 0x49, 0xca, 0x2a, 0x3b, 0x5b, 0xdf, 0x6b, 0x6f, 0xf8, + 0x07, 0xf4, 0x67, 0x57, 0x0b, 0x4f, 0xe2, 0x02, 0xac, 0x99, 0x94, 0xc5, 0x19, 0x19, 0x08, 0xb0, + 0xf6, 0x5d, 0xab, 0xdc, 0xc1, 0xb9, 0x72, 0xd1, 0xb8, 0xa4, 0xcc, 0x5f, 0x46, 0xec, 0x9f, 0x15, + 0x53, 0xc3, 0xf7, 0xb3, 0x67, 0x65, 0xf7, 0x12, 0x6d, 0x5e, 0x4c, 0x15, 0x68, 0x37, 0xfb, 0xca, + 0x89, 0x28, 0x17, 0x89, 0x0d, 0x5c, 0xd3, 0x6c, 0xda, 0xc0, 0xf6, 0xb2, 0x57, 0xcf, 0x6d, 0x1e, + 0x2d, 0x9d, 0x48, 0xa7, 0xcf, 0x7f, 0x14, 0xd5, 0xcc, 0x28, 0x23, 0x82, 0x18, 0x81, 0x08, 0x0c, + 0xfc, 0x6e, 0x57, 0xc2, 0xca, 0x13, 0xcb, 0xce, 0x5d, 0xac, 0x1c, 0x4f, 0x6f, 0x5c, 0xbb, 0x0f, + 0x02, 0x86, 0x45, 0x08, 0x4a, 0x1c, 0x88, 0x29, 0x34, 0xd2, 0xc5, 0x80, 0x89, 0xed, 0xda, 0x0e, + 0x98, 0xd7, 0x46, 0x94, 0x2c, 0xb8, 0x17, 0x21, 0xa9, 0x13, 0xae, 0x88, 0xd5, 0x61, 0xd5, 0xf1, + 0x7e, 0x09, 0x93, 0x3a, 0x84, 0xf4, 0x04, 0x14, 0xaf, 0xdb, 0x09, 0xd0, 0x19, 0xd7, 0xc4, 0xe4, + 0x34, 0xf4, 0x6e, 0xb4, 0x6f, 0x36, 0x5a, 0x83, 0x2a, 0x5f, 0x9f, 0xd4, 0x45, 0xad, 0x66, 0x5b, + 0xef, 0xfb, 0x66, 0xa8, 0x1a, 0xcf, 0xb6, 0x11, 0xfd, 0x6d, 0x63, 0xf1, 0x9a, 0x6e, 0x92, 0x10, + 0x53, 0x73, 0x69, 0x04, 0xb9, 0xad, 0xcc, 0x86, 0x8f, 0x0b, 0x73, 0x02, 0xe2, 0x3c, 0x59, 0xbe, + 0xd1, 0x3d, 0x37, 0xe0, 0x4b, 0xc1, 0xb7, 0x22, 0xaf, 0xb0, 0x08, 0xd0, 0xb3, 0x6f, 0x4b, 0x35, + 0xab, 0x9c, 0xe5, 0x30, 0xfa, 0x43, 0x62, 0xd5, 0x85, 0x48, 0x9c, 0x4d, 0xf0, 0x8a, 0x26, 0x9e, + 0x34, 0x6b, 0x21, 0xe8, 0xff, 0x57, 0x31, 0x41, 0x32, 0x78, 0x6c, 0x05, 0xc6, 0x3d, 0x5c, 0xd9, + 0x97, 0x69, 0xbb, 0x71, 0x3c, 0x4c, 0x4c, 0x09, 0x60, 0x56, 0xb9, 0xaa, 0x6b, 0x16, 0x94, 0x08, + 0x8e, 0x8a, 0x84, 0xa6, 0xf5, 0x31, 0xbb, 0xd2, 0xbc, 0xe2, 0x2b, 0x29, 0x69, 0x50, 0xc0, 0xb9, + 0x2d, 0x2c, 0xbe, 0x69, 0x55, 0x22, 0x72, 0x32, 0xc6, 0xf4, 0xaf, 0xea, 0x36, 0x9e, 0xd1, 0xe2, + 0xb0, 0xdb, 0x26, 0x47, 0x1e, 0xb8, 0x5c, 0x78, 0x9c, 0xe3, 0x69, 0x8f, 0x23, 0x2b, 0x98, 0xa8, + 0x9a, 0x92, 0x87, 0xb9, 0xa0, 0x19, 0xc4, 0x69, 0x82, 0x1e, 0xf4, 0x77, 0x6c, 0xb0, 0xb1, 0x58, + 0xe1, 0x3a, 0xf3, 0xa5, 0x1d, 0xf4, 0x9b, 0x5a, 0x6c, 0x81, 0xa7, 0xfc, 0x15, 0x46, 0x8c, 0x26, + 0x45, 0x85, 0x63, 0xde, 0x56, 0x28, 0x7e, 0x3c, 0xb2, 0x2c, 0x8e, 0xd4, 0x5f, 0x65, 0x8f, 0x2c, + 0xe6, 0x9b, 0xc2, 0x2e, 0x7b, 0xf9, 0x26, 0x47, 0x08, 0xbd, 0x63, 0x57, 0x7a, 0x02, 0xdb, 0x3f, + 0xb4, 0x83, 0x29, 0x43, 0x75, 0x39, 0xc9, 0xa2, 0x06, 0x8d, 0xd3, 0xd1, 0xfc, 0x4a, 0x3f, 0xb7, + 0x1f, 0x92, 0xb5, 0x8b, 0xdb, 0x19, 0x68, 0xc5, 0x7f, 0x9b, 0x42, 0x8a, 0xaf, 0x9b, 0x7e, 0x8c, + 0xbd, 0xfa, 0x06, 0x4c, 0x34, 0x46, 0xed, 0xb7, 0x43, 0x23, 0xa2, 0xb0, 0x4e, 0xdd, 0x02, 0xca, + 0x57, 0xd8, 0xd1, 0xad, 0x46, 0xe0, 0x7f, 0xb1, 0x31, 0x97, 0xf0, 0x4c, 0x9b, 0x85, 0x94, 0xa5, + 0x7b, 0x90, 0x14, 0x4d, 0xb3, 0x40, 0x57, 0x7d, 0x64, 0x03, 0xb6, 0x73, 0xdf, 0x96, 0x82, 0x48, + 0x63, 0xf4, 0x45, 0x59, 0x50, 0xbf, 0x08, 0x08, 0x3b, 0x17, 0xe7, 0x2c, 0xa9, 0xee, 0xc9, 0x24, + 0x55, 0x73, 0xef, 0x56, 0x81, 0x88, 0xa6, 0x58, 0x26, 0x44, 0x23, 0x57, 0xa0, 0xe4, 0x51, 0xe4, + 0xb9, 0x56, 0xd7, 0x15, 0xf0, 0xda, 0x78, 0x9e, 0xbe, 0x9f, 0x36, 0xc0, 0xdb, 0x84, 0x8b, 0x3b, + 0x2a, 0x0e, 0x36, 0x52, 0x2a, 0xf1, 0x70, 0xc7, 0xe3, 0x8a, 0x34, 0x4a, 0xe5, 0x69, 0x9f, 0xe4, + 0x21, 0x02, 0x46, 0x82, 0x80, 0x66, 0xf1, 0x23, 0xdc, 0xb1, 0xab, 0xa4, 0x16, 0xc7, 0x6a, 0xc0, + 0xe3, 0x3f, 0xfc, 0x7e, 0xfb, 0x86, 0x1c, 0xd9, 0x14, 0x27, 0x24, 0x2e, 0x77, 0x1f, 0x77, 0x76, + 0xe6, 0x95, 0xfa, 0xca, 0xe5, 0x86, 0xa9, 0x85, 0xb8, 0x61, 0xdc, 0x48, 0xbb, 0x35, 0x37, 0x2a, + 0xf4, 0x55, 0x51, 0xb9, 0x21, 0x31, 0xa9, 0xe0, 0x9f, 0x7f, 0x3e, 0x15, 0x9e, 0xb1, 0x20, 0xaa, + 0xda, 0x84, 0xcd, 0xd6, 0xc1, 0x72, 0x99, 0x74, 0x92, 0xf0, 0x63, 0xe0, 0xc9, 0x26, 0xa7, 0xe4, + 0x63, 0x94, 0xa9, 0x92, 0x17, 0x6d, 0x4d, 0xec, 0xa3, 0xc3, 0xc9, 0x83, 0xb9, 0xee, 0x19, 0xfc, + 0xc6, 0x1e, 0x3c, 0xf1, 0x20, 0x60, 0x7b, 0xbb, 0x58, 0xdf, 0x85, 0xf6, 0x4a, 0xa0, 0x26, 0x2b, + 0xc1, 0x94, 0x4f, 0x3f, 0xd0, 0x6a, 0x9c, 0xb3, 0xd0, 0x25, 0xf1, 0x63, 0x9b, 0x3d, 0xa4, 0xc2, + 0x5b, 0xd1, 0xef, 0xa4, 0xfa, 0xb1, 0x34, 0x93, 0x14, 0x63, 0xea, 0x77, 0x2e, 0xb2, 0xf1, 0x88, + 0x88, 0x54, 0xf0, 0x71, 0x89, 0xd6, 0xc0, 0xdd, 0x8f, 0xde, 0xa8, 0xe9, 0x06, 0xe9, 0x38, 0xd4, + 0x50, 0x09, 0x89, 0x86, 0x2d, 0x03, 0x8d, 0xb1, 0xff, 0x77, 0xbd, 0xe0, 0x77, 0x4b, 0x6c, 0x36, + 0x64, 0xe9, 0x6e, 0x51, 0x5b, 0x38, 0x2e, 0xf2, 0xc1, 0xc4, 0x46, 0x64, 0xb8, 0xf2, 0x83, 0xa7, + 0xf8, 0x02, 0x78, 0xb2, 0x73, 0xd1, 0xb2, 0xf7, 0x31, 0x03, 0x9c, 0x2d, 0xd3, 0x00, 0x81, 0x78, + 0x50, 0x29, 0xf8, 0xce, 0x7f, 0xb9, 0xb8, 0x69, 0x07, 0xeb, 0xcc, 0x60, 0x91, 0x64, 0x3c, 0xed, + 0x70, 0x9c, 0xda, 0x94, 0x11, 0x2f, 0x7e, 0x71, 0x07, 0xd6, 0x4e, 0xb7, 0x56, 0x77, 0x33, 0x74, + 0xe6, 0x95, 0x62, 0xb6, 0xdd, 0x82, 0xe7, 0x0a, 0x61, 0xf6, 0x63, 0xae, 0xe0, 0xa8, 0x96, 0x75, + 0x8d, 0x89, 0x14, 0xb4, 0x71, 0x3e, 0x84, 0x26, 0x9d, 0x6f, 0x61, 0x66, 0x74, 0x32, 0x50, 0xff, + 0x76, 0x69, 0x90, 0x15, 0x06, 0xa0, 0x68, 0xd8, 0xfd, 0x91, 0x30, 0xfa, 0xff, 0xf6, 0xbf, 0xc8, + 0x38, 0xd0, 0xa8, 0x0e, 0xf6, 0x97, 0xfc, 0x5c, 0x50, 0x12, 0x29, 0x55, 0xe7, 0x9d, 0xa5, 0x6b, + 0x29, 0x11, 0x9f, 0x3e, 0x2b, 0xb7, 0xfd, 0x92, 0x80, 0xd8, 0xf3, 0xf0, 0x59, 0x70, 0x52, 0x71, + 0x76, 0xc1, 0x3e, 0xdb, 0x76, 0x06, 0x27, 0x7f, 0x8e, 0xc0, 0x9e, 0xb1, 0x29, 0x44, 0x83, 0xde, + 0x34, 0xe1, 0xb3, 0xd7, 0x6f, 0x1f, 0xd7, 0x11, 0x15, 0xd0, 0x0b, 0xc1, 0x2a, 0xa6, 0xbb, 0x59, + 0xe3, 0x34, 0x56, 0x8b, 0x58, 0x6a, 0x73, 0x64, 0xc2, 0x61, 0x60, 0xbb, 0x58, 0x86, 0x30, 0x07, + 0x1f, 0xd1, 0x8c, 0x87, 0x8a, 0x90, 0xa7, 0x3f, 0xb2, 0xc6, 0xf1, 0xaf, 0x44, 0x07, 0x14, 0x14, + 0x7b, 0x03, 0xcf, 0x68, 0xe9, 0x5f, 0x53, 0x56, 0xd0, 0x8c, 0x19, 0xe2, 0x07, 0x32, 0x61, 0x41, + 0x60, 0x5e, 0x35, 0x55, 0x46, 0x1f, 0xde, 0x4f, 0xae, 0x9c, 0x35, 0xba, 0x05, 0xfb, 0x23, 0x71, + 0x54, 0x13, 0x13, 0xe8, 0xbb, 0xbf, 0xdb, 0x51, 0x23, 0xe8, 0xa4, 0x33, 0x53, 0x46, 0xfc, 0x11, + 0xdb, 0x76, 0x21, 0x6e, 0xd4, 0xdb, 0x8e, 0x18, 0x3d, 0x64, 0x48, 0x1e, 0xa4, 0x73, 0xae, 0x67, + 0x30, 0x50, 0xf3, 0x8a, 0x97, 0xcb, 0xb0, 0xa1, 0xcb, 0x56, 0x0e, 0xd6, 0x61, 0x1e, 0x8f, 0x22, + 0x58, 0x4b, 0x41, 0xfc, 0x31, 0xe2, 0x7f, 0x14, 0xf8, 0x48, 0x55, 0x80, 0x40, 0x73, 0x85, 0x59, + 0xb6, 0x60, 0x12, 0x7c, 0x23, 0x71, 0xe9, 0x64, 0x59, 0x10, 0xa9, 0xd9, 0x3b, 0x26, 0xd3, 0x37, + 0x91, 0x90, 0x6b, 0x4c, 0xf1, 0x97, 0xf8, 0xe8, 0x07, 0x04, 0xe3, 0x65, 0xa1, 0xd3, 0xa4, 0x46, + 0x07, 0x90, 0x0d, 0xdb, 0x5f, 0x49, 0x43, 0xa0, 0xab, 0xaa, 0x6f, 0x5e, 0x21, 0x46, 0xa3, 0x9d, + 0x42, 0x61, 0xe6, 0x8a, 0x73, 0x60, 0x36, 0xb5, 0x18, 0x39, 0x92, 0x57, 0xc1, 0x34, 0xa7, 0xb1, + 0x05, 0x0b, 0x2d, 0xe4, 0xe1, 0xc6, 0xa7, 0xcc, 0x17, 0xf4, 0x92, 0x39, 0xa3, 0x0b, 0x63, 0x8b, + 0xcc, 0x17, 0x85, 0x5b, 0xa6, 0x18, 0x1d, 0xc9, 0x87, 0x3b, 0x81, 0xed, 0xf7, 0x41, 0x6d, 0x88, + 0xe0, 0xc8, 0xf9, 0xb4, 0xe7, 0x23, 0xf2, 0x5d, 0x38, 0xf5, 0xc4, 0xc5, 0x97, 0x02, 0x81, 0xdf, + 0xcc, 0xf1, 0xd7, 0xf6, 0x0d, 0x2d, 0x67, 0xaf, 0x40, 0x7e, 0xee, 0xef, 0xbb, 0x06, 0xe2, 0xe3, + 0xc8, 0xb6, 0x64, 0x46, 0x1f, 0x18, 0x03, 0x3b, 0xcb, 0x7a, 0x87, 0x81, 0x81, 0xb0, 0x81, 0x37, + 0xb1, 0x88, 0x9a, 0x7a, 0xfc, 0x8f, 0xac, 0x42, 0x75, 0xca, 0xa3, 0x1c, 0x16, 0xea, 0xd0, 0x50, + 0x61, 0xd3, 0xab, 0x46, 0x3d, 0x3b, 0x73, 0x70, 0x9c, 0x42, 0xe8, 0x2a, 0x73, 0x81, 0x80, 0x9e, + 0xfa, 0x97, 0xa5, 0x4f, 0xfa, 0xe9, 0xaa, 0xbd, 0xbe, 0x01, 0x04, 0xf2, 0x6f, 0xea, 0x8e, 0x8f, + 0x68, 0x8d, 0x3f, 0x88, 0x8c, 0x3f, 0x9d, 0xa8, 0x35, 0x7e, 0xcd, 0xb7, 0x58, 0x8a, 0x27, 0x27, + 0x4a, 0xbb, 0xb6, 0xf4, 0x45, 0x51, 0x7e, 0x95, 0x40, 0xa8, 0x51, 0x1c, 0xe8, 0x4a, 0xc7, 0x1f, + 0x2d, 0x0b, 0xa0, 0x28, 0x76, 0x31, 0x0b, 0xac, 0x87, 0xd8, 0xe3, 0x51, 0x08, 0x88, 0x82, 0xc1, + 0xb6, 0xe8, 0x82, 0x6a, 0xaf, 0x65, 0x4e, 0xc3, 0x45, 0x37, 0xcb, 0xad, 0xe6, 0xbb, 0x4d, 0x4f, + 0x9f, 0x22, 0x54, 0x72, 0xe8, 0x85, 0x6c, 0x4d, 0x85, 0x9f, 0xc8, 0x6f, 0x19, 0xf6, 0x3d, 0x10, + 0xf1, 0xd3, 0x3e, 0x93, 0x1b, 0x55, 0x96, 0xea, 0xd0, 0x6e, 0x57, 0x39, 0xe4, 0x7b, 0x0a, 0xc6, + 0x94, 0xf1, 0x76, 0xbe, 0xdd, 0x32, 0x75, 0x55, 0x07, 0x29, 0x96, 0x4c, 0x45, 0xb0, 0x3e, 0x03, + 0x56, 0xef, 0x07, 0x33, 0x8d, 0xe6, 0xfb, 0x92, 0x91, 0xa7, 0x21, 0x83, 0xb0, 0xf7, 0x17, 0x22, + 0xdc, 0x87, 0x56, 0x40, 0x4e, 0xb6, 0x7a, 0xb9, 0xeb, 0xcf, 0x4a, 0x69, 0x8a, 0x54, 0xd6, 0xf4, + 0x3a, 0xa7, 0x4d, 0x02, 0x41, 0xec, 0x33, 0x07, 0x04, 0x7b, 0xe9, 0x56, 0x1c, 0x43, 0xca, 0x49, + 0x2c, 0xe7, 0x7b, 0xb3, 0xcb, 0x83, 0x41, 0x04, 0xf6, 0x3c, 0x5c, 0xda, 0x98, 0xe9, 0xba, 0x44, + 0x5e, 0x9d, 0x52, 0x28, 0x95, 0xe7, 0x2f, 0x89, 0x8b, 0x64, 0xe3, 0x21, 0xa1, 0xe7, 0x82, 0x7d, + 0xa4, 0x78, 0xb6, 0x7c, 0x03, 0xdc, 0x08, 0x12, 0x38, 0x15, 0x8a, 0x56, 0x47, 0x64, 0x5b, 0xe8, + 0xad, 0x5f, 0xb8, 0xb8, 0xfb, 0x03, 0x12, 0x13, 0xa9, 0x6d, 0xa2, 0x45, 0xfa, 0x8d, 0x9a, 0x43, + 0x0b, 0xe2, 0xed, 0x11, 0xba, 0x4d, 0x0c, 0xd6, 0xc4, 0xba, 0xbb, 0xfe, 0xf0, 0x78, 0x3f, 0xed, + 0x46, 0x83, 0xe9, 0x4c, 0x77, 0x37, 0xd7, 0xc2, 0x56, 0xf2, 0xe7, 0xa3, 0xbe, 0x94, 0x6c, 0x4f, + 0xad, 0x7c, 0x4f, 0x86, 0x8a, 0xe0, 0xf1, 0xe1, 0x99, 0xa0, 0x5a, 0xf9, 0xe5, 0xbf, 0x31, 0x69, + 0xd2, 0xa8, 0x56, 0xd9, 0x2c, 0x95, 0xaf, 0xd0, 0xb7, 0x36, 0x8e, 0x52, 0x60, 0x61, 0x08, 0xc6, + 0x8a, 0xd2, 0xa4, 0xdf, 0x92, 0x84, 0x97, 0x06, 0x8b, 0x6b, 0x5c, 0x5e, 0xb1, 0x73, 0xbc, 0x1b, + 0x07, 0xeb, 0x6e, 0xc5, 0xaa, 0x5d, 0x1c, 0xac, 0x2e, 0x22, 0x58, 0xe1, 0x34, 0x65, 0x35, 0x54, + 0x9f, 0xdd, 0x06, 0x5e, 0xd7, 0x0f, 0x6b, 0x96, 0xf3, 0x1d, 0x86, 0x7c, 0x06, 0x78, 0xea, 0x49, + 0x59, 0x27, 0xad, 0x03, 0x68, 0x35, 0x12, 0x77, 0xa0, 0xa3, 0x94, 0x4f, 0x6b, 0x95, 0x3d, 0xc9, + 0xa0, 0x0d, 0xee, 0x6b, 0x04, 0x9d, 0xdb, 0xc6, 0x7e, 0x0d, 0x59, 0xc0, 0x53, 0x21, 0x9f, 0x03, + 0xe9, 0x36, 0xdf, 0x0a, 0x00, 0xb8, 0x48, 0x4f, 0x1d, 0xa4, 0xd1, 0x93, 0xae, 0x3c, 0x56, 0x08, + 0x71, 0x00, 0xa4, 0xb9, 0x8e, 0x1e, 0xc2, 0xdb, 0x5d, 0xdf, 0x66, 0x8e, 0xc2, 0xd4, 0x56, 0xd4, + 0x5d, 0x52, 0xde, 0xa3, 0xde, 0x41, 0xbf, 0x79, 0x58, 0x24, 0x3d, 0x66, 0x8b, 0x3f, 0x90, 0x92, + 0xab, 0x86, 0x9a, 0x89, 0xcb, 0xc8, 0xc7, 0xe4, 0x56, 0xbe, 0x1e, 0x4e, 0x6b, 0xcd, 0x1e, 0x30, + 0xad, 0xea, 0x50, 0x83, 0xf8, 0xd1, 0x07, 0x17, 0x56, 0xf7, 0xbb, 0x43, 0x0b, 0x38, 0x03, 0xc6, + 0xf0, 0x13, 0x86, 0xdd, 0x45, 0x00, 0x57, 0x9d, 0x6a, 0xd0, 0xc9, 0xdd, 0x75, 0x05, 0xae, 0xaf, + 0xf4, 0x55, 0x0b, 0xab, 0x0b, 0xa3, 0xf0, 0x93, 0xcd, 0xb7, 0xe4, 0x34, 0xb7, 0xe6, 0x99, 0x17, + 0xeb, 0x40, 0x88, 0x57, 0x88, 0x2d, 0xb5, 0x82, 0x04, 0x78, 0xcf, 0x20, 0xb5, 0xd8, 0x90, 0x9c, + 0x30, 0xda, 0xbc, 0x12, 0x4c, 0xe3, 0x1b, 0x71, 0xa1, 0x08, 0xce, 0xd7, 0x73, 0x77, 0x19, 0x3d, + 0xf6, 0x30, 0x0d, 0x0c, 0xce, 0x42, 0x3c, 0xb8, 0x2c, 0x17, 0x61, 0x31, 0xee, 0x8a, 0xe1, 0x7d, + 0xbd, 0x6c, 0x3c, 0x09, 0x58, 0xff, 0x39, 0x55, 0x42, 0x59, 0x27, 0x7f, 0x1e, 0x8e, 0xa7, 0x27, + 0x93, 0xb4, 0x06, 0xb9, 0x79, 0x0f, 0x6b, 0x78, 0x97, 0x21, 0x4c, 0x1c, 0xa2, 0xfd, 0x1a, 0x43, + 0xc2, 0x40, 0x87, 0x65, 0xc5, 0x47, 0xbd, 0x9b, 0xfc, 0x86, 0xe4, 0xce, 0x8d, 0x34, 0xac, 0x87, + 0x3c, 0x92, 0xaa, 0xe2, 0x35, 0xc0, 0x80, 0xa7, 0x53, 0xb8, 0x78, 0x14, 0xbb, 0x64, 0x0e, 0xbd, + 0x21, 0xf5, 0x3b, 0xb0, 0x4b, 0x62, 0x0e, 0xea, 0x93, 0x59, 0x2b, 0x1c, 0xd3, 0x8f, 0xf2, 0x04, + 0xca, 0x27, 0x93, 0x9b, 0xb9, 0xce, 0xdb, 0xf7, 0x81, 0xca, 0x16, 0x95, 0xcf, 0x38, 0xfe, 0x58, + 0x18, 0xe5, 0x4b, 0xc7, 0x47, 0x2d, 0xe3, 0x69, 0xc2, 0xd1, 0x30, 0x66, 0xaf, 0x4e, 0xce, 0x3c, + 0xee, 0x13, 0x28, 0x48, 0x21, 0x91, 0xa1, 0xbb, 0x5f, 0x2a, 0xc9, 0xd5, 0x3b, 0xec, 0x03, 0xaf, + 0xcc, 0x03, 0x6c, 0x4c, 0x38, 0x1a, 0x74, 0x58, 0x03, 0x76, 0xe7, 0xb1, 0xf1, 0x26, 0xb3, 0xf9, + 0x5b, 0x7e, 0xd4, 0x45, 0x6a, 0x87, 0x69, 0xd4, 0x46, 0xf8, 0xc1, 0x66, 0x35, 0x9a, 0x65, 0xb4, + 0x75, 0xdc, 0xe6, 0xfd, 0xd3, 0xe0, 0x21, 0xb7, 0x77, 0x4c, 0xd5, 0xcd, 0xcb, 0xfa, 0x24, 0x3e, + 0xcc, 0xd8, 0xfc, 0x98, 0x77, 0x2e, 0xdc, 0x2f, 0x0d, 0x4f, 0x13, 0x37, 0x11, 0x10, 0xb8, 0xdb, + 0x63, 0x1b, 0xeb, 0xe8, 0xe2, 0x17, 0x8d, 0x22, 0x52, 0x59, 0xe6, 0xfe, 0x4a, 0x88, 0xe1, 0x08, + 0x9b, 0xeb, 0x0b, 0xd1, 0xe4, 0x2f, 0xe7, 0x0f, 0x7e, 0xb6, 0xfa, 0x64, 0x5e, 0xc4, 0xcd, 0xff, + 0x35, 0x7e, 0x83, 0xdb, 0x9d, 0x51, 0x68, 0xa7, 0x2f, 0x27, 0x84, 0x28, 0x12, 0xde, 0xb6, 0xcc, + 0x19, 0x6b, 0xc6, 0x0e, 0x5e, 0x1b, 0xcf, 0x08, 0xf8, 0x39, 0x25, 0x8e, 0xe8, 0x9f, 0x62, 0xd7, + 0xd9, 0xc2, 0x49, 0x93, 0xb7, 0x7f, 0x6c, 0x79, 0x13, 0x2d, 0x5f, 0x87, 0x0c, 0xbc, 0x30, 0xac, + 0xbf, 0xe0, 0x38, 0xc6, 0x5e, 0x8d, 0x1e, 0x51, 0x3e, 0x7a, 0x82, 0x97, 0x4e, 0xea, 0x04, 0x87, + 0x00, 0x16, 0xef, 0x97, 0x15, 0x3f, 0x85, 0x32, 0x55, 0x36, 0xde, 0x87, 0x78, 0xbe, 0x83, 0x6b, + 0x5e, 0x50, 0x18, 0x53, 0x27, 0xf0, 0xc4, 0x8a, 0xea, 0xe0, 0x58, 0xc2, 0xb8, 0x8b, 0xf5, 0xe8, + 0x1f, 0x41, 0x7a, 0xae, 0xf3, 0xcd, 0xea, 0x0b, 0x08, 0x6c, 0x5c, 0x75, 0x40, 0xe2, 0xf2, 0x16, + 0xb2, 0x38, 0x99, 0xdf, 0x94, 0x6b, 0xcb, 0x94, 0x7a, 0x13, 0xbd, 0x80, 0xbd, 0xa2, 0xcb, 0x34, + 0x20, 0xcd, 0xa5, 0x07, 0xa7, 0x86, 0x6d, 0x3b, 0xe9, 0xf6, 0xc4, 0x89, 0x11, 0x94, 0x9a, 0x7b, + 0x3d, 0x19, 0x8c, 0x2b, 0xda, 0xad, 0x78, 0x2d, 0x25, 0x0e, 0xdc, 0xae, 0x54, 0xfc, 0xea, 0xbf, + 0xd0, 0x98, 0x84, 0x89, 0x03, 0xe7, 0x33, 0xfc, 0x76, 0x62, 0x88, 0xa2, 0x7b, 0x31, 0xa6, 0x6d, + 0x75, 0x81, 0x28, 0x9e, 0x7e, 0x1b, 0xeb, 0xce, 0x02, 0x4a, 0x73, 0xac, 0x65, 0xb5, 0xce, 0xbd, + 0x43, 0x19, 0xf1, 0xe9, 0x81, 0xc5, 0xb1, 0xc7, 0x13, 0xcb, 0xa6, 0x2c, 0x5f, 0xe9, 0xa9, 0x3b, + 0x96, 0x24, 0xfe, 0x83, 0x42, 0x67, 0x21, 0xf0, 0x04, 0x21, 0xaa, 0xa1, 0xd8, 0x1d, 0x43, 0xe3, + 0xbe, 0xf5, 0xb3, 0x64, 0x7d, 0x87, 0x0b, 0x1c, 0xe8, 0xf4, 0x77, 0x04, 0xfa, 0xcc, 0xd1, 0xd8, + 0x8d, 0x1c, 0x87, 0xbf, 0x2d, 0x0a, 0xb2, 0xb4, 0x98, 0xb4, 0x9b, 0xb3, 0x9c, 0xa3, 0xfa, 0xc6, + 0xd6, 0x33, 0xbf, 0x65, 0xd5, 0xca, 0x84, 0x1c, 0x06, 0x1d, 0x27, 0x53, 0x18, 0x3c, 0xc2, 0x9c, + 0xd9, 0x0d, 0x50, 0x2b, 0x38, 0x9b, 0x76, 0x76, 0x31, 0x56, 0x27, 0x08, 0x3c, 0x1e, 0x56, 0xf7, + 0xba, 0xbf, 0x6b, 0xa9, 0x7d, 0x36, 0x8c, 0x9d, 0x32, 0x06, 0xec, 0x9d, 0x3a, 0x49, 0x10, 0x77, + 0xd8, 0x8e, 0x4b, 0x5f, 0x50, 0xab, 0xc3, 0x4f, 0xe2, 0x5d, 0xfa, 0x28, 0xc7, 0x90, 0xaa, 0x64, + 0xee, 0xba, 0x3e, 0x28, 0x49, 0xaf, 0x32, 0xef, 0xb5, 0xbe, 0x19, 0x3a, 0x22, 0x59, 0xbb, 0xa8, + 0xaa, 0xed, 0x02, 0x53, 0xb6, 0x88, 0xe8, 0x61, 0xde, 0x58, 0x95, 0xf9, 0xb8, 0xb8, 0xcd, 0x7d, + 0xa9, 0xce, 0xd2, 0xd2, 0x62, 0x88, 0xc4, 0xfc, 0x5d, 0x3d, 0x11, 0x72, 0x84, 0x71, 0x47, 0xc1, + 0x5d, 0x76, 0xa3, 0xa5, 0xdc, 0x87, 0x77, 0x7e, 0xa2, 0xef, 0x62, 0xd9, 0x45, 0x7e, 0x75, 0xfe, + 0x29, 0x22, 0x8d, 0x3b, 0xa3, 0x79, 0x20, 0xe4, 0x71, 0x52, 0x3a, 0x8c, 0x43, 0xc3, 0x17, 0xb6, + 0xa7, 0xa3, 0x36, 0xe0, 0x59, 0xe7, 0xe7, 0xac, 0x51, 0xe9, 0x06, 0x9c, 0x95, 0x19, 0x1c, 0x98, + 0x78, 0x37, 0xfb, 0xa4, 0x16, 0x6e, 0xd8, 0x06, 0x8f, 0xe9, 0x28, 0x87, 0x92, 0x9a, 0xde, 0xe4, + 0x0f, 0x38, 0xa7, 0x4d, 0xad, 0x3b, 0xff, 0xc9, 0x7b, 0xf7, 0x80, 0x27, 0xc3, 0xdf, 0xfc, 0x27, + 0xda, 0x11, 0x91, 0xc9, 0xbc, 0x4b, 0x5e, 0x63, 0x56, 0x87, 0xf3, 0xf4, 0xc1, 0xe7, 0xaf, 0x14, + 0xf8, 0xf4, 0xb4, 0x84, 0x1a, 0x10, 0x2f, 0xcf, 0x99, 0x5a, 0xb7, 0xea, 0x91, 0xf6, 0x5e, 0x5b, + 0x15, 0xac, 0xaf, 0x8d, 0xfe, 0x5c, 0x19, 0x7b, 0xae, 0x13, 0x3f, 0xba, 0x1f, 0x69, 0x76, 0xb1, + 0x80, 0x81, 0x91, 0x87, 0x56, 0x51, 0x67, 0x6f, 0xc2, 0xfd, 0x7c, 0x30, 0xc3, 0x79, 0xc7, 0x6e, + 0x76, 0x5e, 0x8f, 0x3a, 0xd4, 0x62, 0x55, 0x38, 0xd5, 0xc0, 0xd8, 0x65, 0x54, 0xe0, 0xfc, 0xb9, + 0xe5, 0x39, 0xd9, 0x1f, 0x52, 0x6f, 0x13, 0x0f, 0x4c, 0x04, 0xb8, 0x40, 0x58, 0x34, 0xe5, 0x54, + 0x1e, 0x65, 0x4f, 0xd4, 0xf3, 0xa1, 0x2e, 0xea, 0x8f, 0x55, 0x1b, 0x10, 0xdd, 0x7a, 0xe9, 0xce, + 0xe6, 0x41, 0xfe, 0x8e, 0x85, 0xfc, 0x34, 0x6d, 0x7b, 0x05, 0x2d, 0x6d, 0x2a, 0xb3, 0x05, 0xa0, + 0xb0, 0x41, 0x7b, 0x41, 0xff, 0xbe, 0xdb, 0xfb, 0xaf, 0x55, 0xf2, 0xcc, 0x02, 0xb0, 0x58, 0x41, + 0xbe, 0xee, 0x23, 0x0c, 0x79, 0x00, 0x37, 0x88, 0x3d, 0x8b, 0x27, 0xcb, 0xfd, 0xb8, 0xfc, 0x6f, + 0xf9, 0x33, 0xb1, 0xf7, 0x11, 0x06, 0x46, 0x23, 0x0f, 0xc0, 0xc1, 0x7d, 0xa7, 0xd7, 0x5d, 0x7a, + 0x7c, 0xe0, 0x59, 0x3e, 0x37, 0x1b, 0xfa, 0xea, 0x04, 0xb6, 0x4c, 0x5c, 0x42, 0xc2, 0x41, 0x46, + 0x34, 0x95, 0xe0, 0x16, 0xf7, 0x93, 0x9c, 0xdd, 0x9a, 0xa2, 0x3f, 0xc2, 0x34, 0xcf, 0x40, 0x09, + 0x77, 0xd3, 0x7a, 0x53, 0x8c, 0xe6, 0x38, 0xb0, 0x6a, 0xf9, 0xce, 0x0b, 0x46, 0x47, 0x3c, 0xe0, + 0x9e, 0xfa, 0x2d, 0x14, 0x22, 0x30, 0xe5, 0xe3, 0x10, 0x58, 0x89, 0xbc, 0x0a, 0xd1, 0x11, 0x03, + 0x19, 0x62, 0x09, 0x8e, 0xd1, 0xc0, 0xfb, 0x01, 0xe6, 0xe7, 0x6b, 0x35, 0x77, 0xfe, 0x94, 0xc2, + 0x05, 0x13, 0xf7, 0xa0, 0x3e, 0xa4, 0x7b, 0x82, 0xcb, 0x6c, 0x4b, 0x47, 0x65, 0x23, 0x36, 0x2e, + 0x66, 0x68, 0x48, 0x50, 0xa6, 0xde, 0x27, 0x31, 0xb2, 0x26, 0x5b, 0xba, 0x15, 0xd4, 0xd7, 0xf8, + 0x79, 0xa5, 0x56, 0xb0, 0xd9, 0x20, 0x5d, 0xfa, 0xc2, 0x01, 0x84, 0xc3, 0x5b, 0x9c, 0xfa, 0x62, + 0x2b, 0xe2, 0x81, 0xc4, 0x3e, 0x94, 0x93, 0x0c, 0x68, 0xad, 0x0d, 0x89, 0x76, 0xf2, 0x38, 0x1f, + 0x74, 0x12, 0x20, 0x88, 0x2a, 0x9a, 0xe9, 0x64, 0xc5, 0xa5, 0xe2, 0x9f, 0x69, 0x85, 0x70, 0x1c, + 0x18, 0x10, 0x1d, 0xa4, 0xbe, 0x5c, 0xa4, 0x5f, 0x05, 0xaa, 0xfa, 0x2f, 0xfc, 0x97, 0xc8, 0x26, + 0xb7, 0x2d, 0xab, 0xe8, 0x8f, 0xca, 0x8c, 0x7d, 0x88, 0xc6, 0xde, 0xf4, 0x5b, 0x46, 0x7b, 0xed, + 0xf8, 0xd9, 0x12, 0xf8, 0x92, 0x76, 0x3f, 0x6b, 0x19, 0x91, 0xe5, 0x58, 0xca, 0x01, 0x91, 0xbd, + 0x1d, 0x48, 0xcf, 0x32, 0x10, 0xc3, 0x1a, 0x4a, 0xce, 0x7d, 0xc8, 0xb7, 0x51, 0x5b, 0x27, 0xe7, + 0x94, 0xc0, 0x36, 0x64, 0x2e, 0xd5, 0x63, 0xf9, 0x92, 0x73, 0x28, 0x34, 0x24, 0x03, 0x2d, 0x8e, + 0xba, 0x8b, 0x5f, 0x25, 0x82, 0x0d, 0xdf, 0x0a, 0x35, 0x5a, 0x8e, 0x2f, 0xbf, 0x17, 0x1d, 0xd5, + 0x44, 0x11, 0xf7, 0x48, 0xbd, 0xba, 0xd9, 0x0a, 0xfa, 0x81, 0xf3, 0xc1, 0xb6, 0xfc, 0x60, 0x44, + 0x09, 0x3e, 0x84, 0x90, 0x21, 0x32, 0xd6, 0x89, 0x29, 0x64, 0xf4, 0xd4, 0x30, 0x12, 0x8b, 0x0f, + 0xbb, 0x6b, 0x10, 0xf8, 0xce, 0x0f, 0xa1, 0xef, 0xb0, 0xcc, 0x04, 0x52, 0x47, 0xd9, 0x4c, 0x0d, + 0xaa, 0xb5, 0x85, 0xbd, 0x64, 0x78, 0x2e, 0xe8, 0x8e, 0xf3, 0xd9, 0x81, 0x55, 0x5a, 0x10, 0xb5, + 0x69, 0x05, 0x68, 0xfb, 0x3a, 0xe3, 0x6e, 0xd5, 0x74, 0x76, 0xa4, 0x17, 0x29, 0x38, 0xfa, 0xb9, + 0x7e, 0xde, 0x5b, 0x89, 0xbe, 0xa9, 0x3d, 0xba, 0x5c, 0xe0, 0x7d, 0x4e, 0x46, 0x1a, 0x0f, 0xa2, + 0x57, 0xdd, 0xbe, 0x94, 0xd3, 0x66, 0xda, 0xa4, 0x22, 0x66, 0x45, 0xa2, 0x9c, 0x6f, 0x20, 0x71, + 0x60, 0x88, 0x1b, 0xa4, 0xfb, 0xd3, 0x1a, 0xca, 0x6b, 0x3a, 0xbf, 0x72, 0x9d, 0x0c, 0x9e, 0xb4, + 0x82, 0x09, 0x9c, 0xc7, 0xbe, 0xa1, 0xad, 0xf7, 0xb8, 0xf0, 0x8a, 0xf6, 0x20, 0x7e, 0x94, 0x5d, + 0x27, 0x4a, 0x58, 0x46, 0x89, 0x65, 0xb9, 0xa6, 0x52, 0xad, 0x38, 0x85, 0x51, 0xad, 0xf4, 0x76, + 0x89, 0x57, 0x05, 0x1c, 0xeb, 0x18, 0xde, 0x87, 0x57, 0xbc, 0xa9, 0x1a, 0xd0, 0xd3, 0x87, 0xdf, + 0xd1, 0x6c, 0x51, 0x12, 0x3c, 0xec, 0xd4, 0xe1, 0x18, 0xd7, 0x0f, 0x2a, 0xcd, 0x81, 0x8a, 0xf0, + 0x39, 0xf3, 0x24, 0x52, 0xbe, 0xf6, 0x67, 0x45, 0xd1, 0xf9, 0xa4, 0xe1, 0xa3, 0x2b, 0xec, 0xb6, + 0x1e, 0x9e, 0x89, 0x60, 0xf5, 0xc3, 0xaf, 0x16, 0xda, 0x1f, 0x09, 0xc9, 0x5c, 0x56, 0xea, 0xe7, + 0x51, 0x5a, 0xa8, 0x09, 0x8e, 0x1d, 0x3b, 0xda, 0x6c, 0xa9, 0x1c, 0x99, 0xfc, 0xc6, 0xa3, 0x1e, + 0xa0, 0xbf, 0xe1, 0x63, 0x14, 0x5a, 0xfc, 0xb9, 0xe5, 0x49, 0xc1, 0x25, 0xe3, 0x9c, 0x9f, 0xf9, + 0x5e, 0xec, 0x00, 0xf7, 0x62, 0x68, 0xbe, 0xa4, 0xbb, 0x3a, 0x0c, 0x33, 0x30, 0x1c, 0x6d, 0xf8, + 0x2a, 0x06, 0x19, 0x6f, 0xe8, 0x57, 0xb6, 0x9d, 0xd8, 0x60, 0xdc, 0xaf, 0x4c, 0x9c, 0xfd, 0xa5, + 0xad, 0x47, 0x71, 0xa1, 0x8c, 0x64, 0x6e, 0xd0, 0x96, 0x60, 0x30, 0x96, 0x2e, 0x3e, 0x53, 0xd5, + 0xb6, 0x92, 0xfa, 0xd5, 0x9c, 0xdd, 0x06, 0xab, 0x00, 0xb6, 0x19, 0xaa, 0x98, 0xc1, 0x65, 0xee, + 0x84, 0xfb, 0x82, 0x14, 0x32, 0x07, 0x5b, 0xe5, 0xe6, 0xc5, 0x9e, 0x6a, 0xc6, 0xeb, 0x94, 0x54, + 0xd4, 0x09, 0x5c, 0x58, 0x6c, 0x4e, 0x82, 0x52, 0x45, 0x7e, 0xee, 0x95, 0xdc, 0x77, 0x89, 0xde, + 0xd1, 0x45, 0x75, 0xde, 0x70, 0xfb, 0x7d, 0xc2, 0x96, 0x0c, 0x37, 0x83, 0x28, 0x6a, 0xdc, 0x38, + 0x10, 0x12, 0xee, 0x66, 0x5a, 0x67, 0x50, 0x2c, 0x19, 0x05, 0x49, 0xbe, 0x6c, 0x00, 0x90, 0x37, + 0x89, 0x5f, 0x9c, 0x9e, 0x3d, 0xc4, 0xc3, 0x61, 0x6c, 0xe9, 0x63, 0xef, 0xe1, 0xf5, 0x75, 0x56, + 0x1e, 0xac, 0x18, 0xc2, 0x1f, 0xc5, 0x66, 0x2b, 0x78, 0xdc, 0xce, 0xd9, 0x4f, 0xee, 0x80, 0x78, + 0xe8, 0xa9, 0x23, 0xfc, 0xbc, 0xab, 0x24, 0x07, 0x38, 0xc5, 0xfd, 0x7b, 0xdc, 0x92, 0xeb, 0x99, + 0x0e, 0x8c, 0x0f, 0x9a, 0x88, 0xc7, 0x47, 0xd3, 0xbb, 0x4c, 0xb4, 0xd8, 0xb5, 0x14, 0x4b, 0x60, + 0x6f, 0x5d, 0x51, 0xc5, 0x9b, 0x28, 0xa9, 0x5f, 0x0e, 0x80, 0x29, 0x85, 0x04, 0xdb, 0xa0, 0xee, + 0xda, 0x14, 0x80, 0x80, 0x5d, 0xf7, 0x00, 0x49, 0x22, 0x22, 0x3e, 0x36, 0xc1, 0x72, 0x97, 0xce, + 0x73, 0x72, 0xf7, 0x54, 0xa4, 0xb1, 0x91, 0x1e, 0x33, 0xf2, 0xcf, 0xe7, 0xd3, 0x96, 0xa4, 0x6d, + 0xa0, 0xd7, 0x2d, 0xd5, 0xb5, 0x15, 0xb6, 0x61, 0x54, 0x26, 0xe6, 0x07, 0xfd, 0x73, 0x1f, 0xd2, + 0xe1, 0x5f, 0x35, 0x3d, 0xa3, 0x0d, 0xbf, 0x18, 0x10, 0x34, 0x50, 0xef, 0x6e, 0x6d, 0xd9, 0xab, + 0xfe, 0x75, 0xd4, 0x96, 0x3a, 0x36, 0xe8, 0xb6, 0xda, 0xaa, 0x19, 0x1c, 0xf6, 0x7e, 0x18, 0xe0, + 0x62, 0xd0, 0x73, 0x0d, 0xc6, 0xf1, 0xcd, 0xbb, 0x13, 0x39, 0xbf, 0x9a, 0x7e, 0x32, 0x88, 0x50, + 0x8b, 0xa3, 0x77, 0xf5, 0x64, 0x92, 0xa1, 0xb4, 0xbb, 0x4b, 0xe5, 0x1b, 0x99, 0x32, 0x86, 0x69, + 0xde, 0x27, 0x47, 0x0d, 0xda, 0x5d, 0xf1, 0xd0, 0x55, 0xdc, 0xb9, 0x4e, 0x1e, 0x1a, 0xde, 0x5b, + 0x55, 0x58, 0x05, 0x3b, 0x46, 0x53, 0x33, 0x6a, 0xd6, 0x31, 0x83, 0xbb, 0xe8, 0x33, 0xe3, 0x58, + 0xf2, 0x37, 0xe9, 0x46, 0x69, 0x00, 0xa3, 0x9d, 0x23, 0x95, 0x5b, 0xd7, 0x90, 0xd9, 0x64, 0x4d, + 0x08, 0x85, 0x11, 0x51, 0x4f, 0x1e, 0x88, 0x1a, 0x9b, 0xc4, 0xad, 0xa6, 0xe6, 0xaa, 0xfc, 0xef, + 0x16, 0x62, 0xf0, 0xef, 0x58, 0x9e, 0xe9, 0xe1, 0x29, 0x9a, 0x28, 0x6e, 0x7d, 0xcd, 0xb7, 0x9a, + 0x35, 0x41, 0x0d, 0x91, 0xfb, 0xab, 0x18, 0x70, 0x5f, 0x9b, 0x3c, 0xa1, 0xac, 0x63, 0x3a, 0xb0, + 0x45, 0x09, 0x14, 0xd5, 0x16, 0xbe, 0x08, 0x20, 0x93, 0xab, 0x6a, 0x9c, 0xd2, 0x01, 0xda, 0xc6, + 0x2d, 0x24, 0x02, 0x4b, 0x6b, 0xb6, 0x8c, 0xb2, 0x44, 0xc5, 0xe3, 0x7f, 0x8b, 0xba, 0xd3, 0xb7, + 0x93, 0x69, 0xe5, 0x73, 0x95, 0x15, 0xdf, 0x8c, 0xba, 0xb3, 0x6b, 0xb6, 0x91, 0xcd, 0x64, 0x9c, + 0x3f, 0xc1, 0x97, 0xaa, 0x0d, 0x0c, 0x5c, 0xfe, 0x40, 0x63, 0xcc, 0x29, 0x3e, 0x8a, 0x61, 0x91, + 0x04, 0x05, 0x87, 0xa9, 0x18, 0x56, 0xba, 0x3a, 0xce, 0x97, 0x6a, 0xf6, 0x42, 0xf3, 0x36, 0x42, + 0x77, 0x52, 0xb5, 0x6e, 0xd8, 0x3b, 0xcb, 0x90, 0x6a, 0x67, 0x36, 0x2d, 0xf5, 0x83, 0xc3, 0xda, + 0x16, 0xa3, 0x35, 0x9e, 0x60, 0xad, 0x8e, 0x93, 0x8f, 0x3e, 0x89, 0x2f, 0xa3, 0x4a, 0xe6, 0x31, + 0xc6, 0x02, 0xa9, 0x06, 0x1a, 0x3c, 0x56, 0xed, 0xdb, 0xe7, 0x8f, 0x06, 0xd7, 0x17, 0x2a, 0x4d, + 0xe4, 0xb8, 0xfe, 0xb9, 0x5b, 0x4c, 0x5d, 0x26, 0x50, 0x80, 0x44, 0xeb, 0x56, 0x0a, 0x50, 0xa5, + 0x4f, 0x09, 0x26, 0x67, 0x1d, 0x05, 0xa5, 0xaf, 0x89, 0x31, 0x16, 0x31, 0x68, 0x18, 0x63, 0x1e, + 0x1b, 0x1d, 0x80, 0xe5, 0xb4, 0xeb, 0x46, 0x1b, 0x21, 0xd2, 0x2b, 0x20, 0xdb, 0x51, 0xb5, 0xfb, + 0xf3, 0x0b, 0x85, 0xe6, 0x66, 0xee, 0x33, 0x3d, 0x2a, 0x67, 0x0f, 0x88, 0xec, 0x35, 0xb9, 0xf6, + 0x57, 0x2f, 0x33, 0xe7, 0x7e, 0xad, 0x5c, 0x31, 0x2d, 0xf3, 0x0f, 0x0f, 0x2f, 0x78, 0x86, 0xc6, + 0xb0, 0x65, 0x74, 0xad, 0xb2, 0x84, 0x35, 0x71, 0x52, 0xb1, 0x36, 0xd0, 0x8a, 0xcf, 0x16, 0xd8, + 0x13, 0x63, 0x06, 0xba, 0x81, 0x2a, 0x50, 0x63, 0x93, 0x73, 0x71, 0x20, 0x24, 0x99, 0x49, 0x92, + 0x2e, 0x02, 0xa9, 0x65, 0x82, 0xc9, 0xf4, 0xa6, 0x72, 0xb9, 0xf9, 0xfd, 0xb5, 0x0c, 0xfc, 0x2d, + 0xf0, 0x94, 0x81, 0x36, 0x4c, 0xd4, 0x73, 0x31, 0x05, 0xbe, 0xb4, 0xcc, 0x3b, 0x8f, 0x76, 0x54, + 0xe2, 0x4a, 0x59, 0x4a, 0xa1, 0x9e, 0x61, 0xd3, 0xaf, 0xa8, 0x9a, 0xad, 0xa2, 0x5d, 0xe7, 0xd2, + 0x6e, 0x92, 0xef, 0x29, 0xa4, 0x02, 0x85, 0xe7, 0x3e, 0xcf, 0x1a, 0xab, 0x09, 0x04, 0x53, 0xbe, + 0x1e, 0xfb, 0xbb, 0xa7, 0x5f, 0xf7, 0x5c, 0xe9, 0xc3, 0x18, 0x5a, 0xfe, 0xec, 0x99, 0x91, 0x70, + 0xe4, 0xa2, 0xc2, 0xb5, 0x96, 0xc1, 0xbc, 0x5f, 0xdc, 0xae, 0xb6, 0xac, 0x4b, 0x78, 0xf3, 0x3c, + 0xee, 0xc4, 0xff, 0xee, 0x57, 0xeb, 0x15, 0x55, 0x4a, 0x94, 0xf9, 0x16, 0xe0, 0x26, 0x95, 0x75, + 0x09, 0xa5, 0xa0, 0x60, 0xb1, 0xe2, 0x66, 0x27, 0xd8, 0xac, 0x8f, 0xb2, 0x6f, 0x82, 0xc6, 0x60, + 0x93, 0x7d, 0x29, 0x42, 0xfc, 0xd2, 0x33, 0xb3, 0x8b, 0x4b, 0xba, 0x65, 0x0e, 0xf4, 0x38, 0xca, + 0x18, 0x07, 0xe6, 0x9c, 0x00, 0x3c, 0x65, 0x16, 0x5d, 0xb3, 0x4e, 0x2c, 0xb7, 0x35, 0x73, 0x21, + 0xdd, 0x36, 0xa9, 0x04, 0x70, 0x91, 0xd6, 0x60, 0x6d, 0xe5, 0xfc, 0xd1, 0xa8, 0x20, 0x22, 0xe3, + 0x50, 0xb0, 0xc9, 0xd4, 0x70, 0x82, 0x51, 0x25, 0xf0, 0x94, 0x48, 0x10, 0xd1, 0x95, 0xc1, 0x90, + 0xaa, 0x0d, 0xd6, 0xb4, 0xeb, 0xcc, 0x9e, 0xf9, 0x03, 0x02, 0xcc, 0xc4, 0x25, 0x02, 0x6e, 0x4a, + 0x72, 0xb1, 0x02, 0xaa, 0xf6, 0x69, 0x36, 0x1e, 0xb7, 0x08, 0x8c, 0xad, 0x0a, 0x51, 0x04, 0x62, + 0xdc, 0x7e, 0xd2, 0x50, 0xa4, 0x95, 0xc3, 0xaf, 0x24, 0x03, 0x54, 0x4c, 0x12, 0x40, 0x81, 0x6c, + 0x72, 0x67, 0x03, 0xbe, 0x14, 0x9a, 0x3b, 0xf3, 0x4a, 0xa4, 0x9f, 0x70, 0xc4, 0x7d, 0x5c, 0x16, + 0xf9, 0xba, 0x34, 0x30, 0x42, 0xff, 0xbf, 0xe7, 0x2b, 0xa5, 0xe3, 0xa2, 0x39, 0xbe, 0x71, 0x49, + 0x3b, 0x0b, 0x49, 0x23, 0xeb, 0x83, 0x28, 0xf1, 0x23, 0xc0, 0x12, 0xdf, 0xdf, 0x76, 0x71, 0x7f, + 0x9b, 0x44, 0xe2, 0xd8, 0xcf, 0x49, 0x43, 0x69, 0x62, 0x41, 0x3f, 0xa6, 0x56, 0x5d, 0xff, 0x98, + 0x89, 0x9d, 0x32, 0xb0, 0x39, 0xd5, 0xa8, 0x2c, 0x5f, 0x0e, 0x1a, 0xfb, 0xe1, 0xad, 0xa8, 0x00, + 0x46, 0xe6, 0x8e, 0x89, 0xaf, 0xb8, 0xcc, 0x32, 0xc9, 0x1d, 0x00, 0x13, 0xda, 0x50, 0xa7, 0x0f, + 0xc0, 0x95, 0x55, 0x5d, 0x61, 0x16, 0xc5, 0x10, 0xac, 0xec, 0x97, 0x83, 0x36, 0x22, 0x1f, 0xd6, + 0x71, 0x0b, 0x8b, 0x44, 0xea, 0x08, 0x34, 0x38, 0xb6, 0x24, 0x49, 0x7c, 0x8a, 0xab, 0xd9, 0x5d, + 0x8a, 0x6b, 0xba, 0xe0, 0xe0, 0x29, 0x46, 0x32, 0xeb, 0xb4, 0xfa, 0xfc, 0x31, 0x60, 0xd0, 0xb4, + 0x06, 0xbc, 0x3b, 0x1e, 0xa2, 0x31, 0x5a, 0x71, 0x9e, 0x25, 0xc8, 0x74, 0x0c, 0x37, 0xc6, 0xe4, + 0x0b, 0x4e, 0x53, 0xf5, 0x7d, 0xf3, 0x5f, 0x76, 0xf7, 0x38, 0x0a, 0xe9, 0x74, 0xcc, 0x1f, 0x27, + 0xf3, 0xaf, 0x48, 0x10, 0xc0, 0xd7, 0xde, 0xc1, 0xfc, 0x7e, 0x14, 0xd1, 0x31, 0x44, 0x4d, 0x35, + 0x96, 0xd1, 0x6e, 0xd2, 0x82, 0x9d, 0xcc, 0x09, 0x3f, 0x26, 0xcb, 0xa5, 0x1c, 0xe4, 0xa5, 0x74, + 0x85, 0xe0, 0x38, 0x3a, 0x08, 0x0d, 0xae, 0x4e, 0xa5, 0x8a, 0x16, 0x1b, 0x5e, 0xbe, 0xf7, 0x77, + 0x85, 0x74, 0x7b, 0xc0, 0x28, 0x92, 0x76, 0x17, 0xf1, 0x35, 0xe3, 0x1c, 0x42, 0xd5, 0x2a, 0xa0, + 0xd6, 0xa1, 0xf8, 0x0c, 0xef, 0xaf, 0x96, 0x49, 0xe2, 0xd0, 0x10, 0xbc, 0x7c, 0x8a, 0x07, 0x6d, + 0x6a, 0x59, 0x8f, 0x5d, 0xfd, 0x61, 0xee, 0xcc, 0xd1, 0xc7, 0x6c, 0x98, 0x90, 0xc4, 0xbc, 0xa5, + 0xa4, 0x51, 0x06, 0xcb, 0xfa, 0x3d, 0xea, 0x0c, 0x73, 0x81, 0xe0, 0x7d, 0x83, 0x5d, 0x6a, 0x9d, + 0x62, 0xf4, 0x42, 0x9e, 0x0e, 0x6b, 0xa5, 0x16, 0x39, 0x52, 0x04, 0x05, 0xa6, 0x4f, 0xcf, 0x01, + 0xff, 0xfc, 0x4b, 0x0e, 0x1c, 0x10, 0x8f, 0x42, 0x95, 0x03, 0xec, 0x34, 0xf9, 0xf8, 0x18, 0x52, + 0x57, 0x4a, 0x8d, 0x16, 0x34, 0xbe, 0xff, 0x0e, 0xfa, 0x86, 0x87, 0xa5, 0xfb, 0xc9, 0x30, 0x17, + 0x5a, 0xd3, 0x24, 0x24, 0x43, 0x2f, 0x0b, 0x5b, 0x9b, 0xa2, 0x44, 0x67, 0x0c, 0xa6, 0x69, 0x83, + 0x58, 0x5b, 0x96, 0x8b, 0x4d, 0xcc, 0x5f, 0xe2, 0x80, 0x4e, 0xf0, 0xd8, 0x1e, 0x71, 0x14, 0x7d, + 0xbc, 0xab, 0x30, 0xce, 0x94, 0xd2, 0x87, 0xad, 0xe0, 0x57, 0xd1, 0x1a, 0x02, 0xfa, 0xdc, 0x2e, + 0xcf, 0x11, 0x68, 0xee, 0x86, 0xef, 0xb1, 0x0e, 0x10, 0xeb, 0x16, 0xa3, 0xc7, 0x2e, 0xa5, 0x54, + 0x2f, 0xe7, 0x49, 0x0d, 0x86, 0x2c, 0xf6, 0xba, 0x57, 0xc8, 0xaf, 0xa8, 0x8d, 0x29, 0xe8, 0xc4, + 0xa0, 0x9c, 0x9a, 0xb2, 0x12, 0xe2, 0x87, 0x7e, 0x4c, 0xd8, 0xef, 0xd0, 0x21, 0x8f, 0xc3, 0xed, + 0x68, 0xa7, 0x8c, 0x1c, 0xa3, 0x20, 0x1e, 0x6b, 0xb2, 0xa7, 0xc0, 0xc1, 0x88, 0x67, 0xa0, 0x4c, + 0xb1, 0x69, 0x04, 0xf7, 0xba, 0x1c, 0x53, 0xe8, 0x3e, 0x01, 0x74, 0x44, 0xd3, 0xf1, 0xdd, 0xc6, + 0x45, 0x24, 0x72, 0x74, 0xe9, 0x6d, 0x3a, 0x4a, 0x28, 0x23, 0x01, 0xf7, 0x65, 0x22, 0x0c, 0x9b, + 0xb3, 0xf1, 0x9a, 0xcf, 0xdc, 0xc7, 0xcb, 0x56, 0x3e, 0x22, 0x35, 0xc8, 0x93, 0xbf, 0x65, 0x9a, + 0xbb, 0xa5, 0x07, 0xaa, 0xe5, 0xa7, 0x8c, 0xf5, 0x71, 0x6e, 0x77, 0x8a, 0x86, 0x59, 0xbd, 0xe5, + 0x8f, 0x3e, 0x1c, 0xf5, 0x75, 0x74, 0xa3, 0x17, 0xff, 0x4a, 0xe5, 0x2a, 0x93, 0x1b, 0x83, 0xda, + 0x80, 0x35, 0x99, 0xf1, 0x41, 0xf8, 0x9b, 0xb0, 0xbf, 0xad, 0x27, 0x14, 0x9f, 0x7d, 0xb0, 0xac, + 0x49, 0x66, 0xcc, 0x4d, 0x28, 0xae, 0x8e, 0x96, 0xb5, 0x72, 0x2c, 0x34, 0x8a, 0xaa, 0xca, 0xb9, + 0xf8, 0x7c, 0xb1, 0xfb, 0x65, 0x3c, 0xc9, 0x1a, 0xdf, 0xb8, 0x0e, 0x66, 0xbd, 0x9b, 0x4a, 0xa7, + 0x25, 0xa9, 0x2e, 0x5d, 0xa3, 0x60, 0x7d, 0x4c, 0xc8, 0x23, 0xad, 0x84, 0x53, 0x15, 0xad, 0x5d, + 0xa6, 0x8e, 0x5f, 0xdc, 0xda, 0x34, 0x2b, 0x78, 0xc0, 0x80, 0xaf, 0x84, 0xde, 0x5a, 0x02, 0x33, + 0x29, 0xc5, 0xb9, 0x9f, 0x56, 0x39, 0x03, 0x7b, 0x68, 0x7e, 0x7c, 0xf8, 0x00, 0x03, 0x86, 0xae, + 0x43, 0xa2, 0xc4, 0x7a, 0x4e, 0xa5, 0xc1, 0x26, 0x77, 0x46, 0x9b, 0x7b, 0xcc, 0x24, 0xc6, 0x9d, + 0x5a, 0xdf, 0x02, 0x4e, 0xcf, 0x52, 0x9b, 0xc8, 0x7d, 0xb2, 0x45, 0x2b, 0x00, 0xff, 0xdd, 0xf6, + 0x9d, 0x14, 0x64, 0xe8, 0x82, 0xc9, 0xfb, 0x74, 0x97, 0x4e, 0xb3, 0x66, 0x20, 0x90, 0xea, 0xf4, + 0xf7, 0x5c, 0xd5, 0xa6, 0x41, 0xdc, 0x8d, 0xa3, 0x97, 0x47, 0xa3, 0xbe, 0x1c, 0x55, 0xf2, 0x70, + 0xdf, 0xe3, 0x4a, 0xb4, 0xc7, 0x0a, 0x83, 0x13, 0xce, 0x82, 0xf9, 0xf3, 0x3c, 0xbb, 0x15, 0xab, + 0x91, 0x1b, 0x93, 0xa4, 0xb1, 0xe4, 0x4c, 0x45, 0x05, 0xfa, 0x7f, 0xcf, 0xd7, 0x3a, 0xf5, 0xaa, + 0xfb, 0x99, 0x97, 0x16, 0x39, 0xc0, 0x3f, 0xd0, 0x25, 0x89, 0x63, 0xc6, 0xd7, 0x04, 0x69, 0xb1, + 0xe8, 0x5a, 0xac, 0x98, 0xdf, 0xd2, 0xa5, 0x69, 0x8d, 0x1a, 0x7e, 0xba, 0xad, 0xfc, 0x79, 0x52, + 0xcb, 0x4c, 0x97, 0x62, 0xa1, 0x7a, 0x9c, 0x7c, 0x9c, 0x25, 0x5b, 0xf8, 0x42, 0x86, 0x83, 0x0f, + 0x39, 0xe4, 0x21, 0xe6, 0x4b, 0x62, 0x3c, 0x0c, 0x47, 0x0b, 0xc1, 0x23, 0xe2, 0x73, 0xfb, 0xd8, + 0xda, 0x61, 0x07, 0xc3, 0xc6, 0xe6, 0x9e, 0xf3, 0x36, 0xdc, 0x75, 0x30, 0x4d, 0x46, 0x35, 0x53, + 0x8a, 0x35, 0x5c, 0xb6, 0x57, 0x32, 0xd9, 0x1d, 0x4f, 0xda, 0x29, 0x13, 0x2c, 0xe9, 0xf7, 0x6f, + 0x8f, 0x92, 0xe3, 0xb6, 0x08, 0x3f, 0x9c, 0xca, 0x1e, 0xbf, 0xd2, 0x85, 0xc6, 0x10, 0x31, 0xd5, + 0x33, 0x65, 0xf4, 0x66, 0xe0, 0x0e, 0xc5, 0x7a, 0xe4, 0xf2, 0x46, 0x9e, 0xe7, 0x3d, 0x1f, 0x04, + 0x35, 0x2b, 0x13, 0xe7, 0x9e, 0x5c, 0xa1, 0x0a, 0x00, 0xa1, 0xc0, 0xa4, 0xf1, 0xe6, 0x88, 0x90, + 0x1c, 0x30, 0xea, 0x5c, 0x89, 0xea, 0xbd, 0x16, 0x11, 0x97, 0xd4, 0x6f, 0x6e, 0xb4, 0x01, 0x7e, + 0x03, 0xb6, 0xbe, 0x66, 0x7f, 0x98, 0x43, 0xaf, 0xa2, 0xb7, 0x80, 0x81, 0x48, 0x28, 0xa0, 0xee, + 0xf7, 0x1f, 0x73, 0x5c, 0x94, 0x18, 0xd7, 0x6c, 0xd4, 0x53, 0x8a, 0xfa, 0xd4, 0x00, 0x1a, 0x7d, + 0xf3, 0x08, 0x80, 0xb5, 0x47, 0x75, 0x1f, 0x5e, 0xc7, 0xa0, 0x86, 0xb2, 0xff, 0x15, 0x48, 0x6d, + 0xfd, 0x64, 0xcb, 0x4f, 0x9b, 0xc2, 0xf2, 0xf6, 0x40, 0x27, 0x58, 0xae, 0x96, 0x8f, 0x27, 0xcb, + 0xe8, 0x56, 0xc2, 0x06, 0x7d, 0xdc, 0xf5, 0x39, 0x3b, 0xb3, 0xb6, 0x37, 0x4c, 0x33, 0xae, 0xe9, + 0x3a, 0x9b, 0x36, 0x83, 0x53, 0x94, 0x54, 0xe4, 0xd8, 0x72, 0xfc, 0xf5, 0x34, 0xb3, 0x53, 0x7f, + 0x1d, 0x49, 0xf3, 0x17, 0xb7, 0xad, 0x26, 0x0a, 0xf4, 0xb7, 0x9b, 0xfd, 0x8b, 0x27, 0xa0, 0x49, + 0x3d, 0x60, 0xd2, 0xe2, 0x0e, 0x2c, 0xb5, 0xed, 0xf8, 0x43, 0xbd, 0x76, 0x51, 0xeb, 0xa5, 0xdd, + 0x75, 0xfc, 0x21, 0x2c, 0x99, 0xe9, 0x84, 0xa1, 0x5c, 0x00, 0xdd, 0x2e, 0xed, 0xc5, 0x88, 0x6e, + 0x0b, 0xe8, 0x31, 0x5a, 0x05, 0xad, 0xb3, 0xa1, 0x39, 0xeb, 0xaf, 0xcc, 0xbc, 0x66, 0xcb, 0x47, + 0xdf, 0xc7, 0xc5, 0x2b, 0x03, 0x9f, 0x56, 0xe5, 0x37, 0x6f, 0x38, 0x07, 0x83, 0xdd, 0x30, 0x6b, + 0xc9, 0x07, 0xfc, 0x5e, 0x5a, 0x8a, 0xc6, 0xfc, 0xe8, 0xc7, 0x78, 0x24, 0xfc, 0x2a, 0xdd, 0xd9, + 0x17, 0xd9, 0x81, 0x42, 0x08, 0x55, 0x77, 0x29, 0x2f, 0x53, 0x05, 0x81, 0x6a, 0x32, 0x6e, 0x38, + 0x95, 0x6c, 0x34, 0xcf, 0x2f, 0xad, 0x0e, 0x4c, 0xa7, 0x93, 0xb8, 0xb3, 0x92, 0x08, 0x8d, 0x2c, + 0xa5, 0xe2, 0xb9, 0x5b, 0x2e, 0xed, 0x26, 0x87, 0x75, 0xaf, 0x67, 0xec, 0x77, 0x8a, 0xb7, 0xfd, + 0xfc, 0x7e, 0x4c, 0xdd, 0xbc, 0x14, 0x11, 0x5b, 0xe9, 0x0b, 0xdb, 0x9d, 0xc6, 0x36, 0x94, 0xe6, + 0x4a, 0x77, 0x3a, 0xab, 0x79, 0x3b, 0x2b, 0x86, 0x1c, 0x80, 0x41, 0x3a, 0xd5, 0x4e, 0x8e, 0x77, + 0x8d, 0x81, 0xc4, 0xee, 0x2b, 0xd7, 0x2e, 0x90, 0xb8, 0x05, 0xb0, 0x84, 0x8d, 0x07, 0x58, 0x62, + 0x0a, 0xb8, 0x3a, 0x54, 0x80, 0x16, 0x0c, 0x5e, 0xc5, 0x0f, 0x3f, 0x7a, 0x25, 0xc0, 0x45, 0x21, + 0xf1, 0x4a, 0x38, 0x0b, 0xeb, 0xad, 0xee, 0xf8, 0x7c, 0x37, 0x9a, 0x2c, 0x3c, 0x98, 0xe7, 0xce, + 0x97, 0xb3, 0x26, 0x4b, 0x55, 0x2d, 0xda, 0xcc, 0x0e, 0xb8, 0xb0, 0xcf, 0x10, 0x13, 0x1c, 0x0e, + 0xbc, 0x80, 0x48, 0xf3, 0x74, 0x50, 0xf5, 0xeb, 0x42, 0x26, 0x00, 0xe2, 0x6c, 0x9c, 0x57, 0x1f, + 0xc9, 0xd6, 0x7f, 0xa0, 0x64, 0x1f, 0xa9, 0xc5, 0x1d, 0x7f, 0x82, 0xfc, 0x3d, 0x33, 0x21, 0x62, + 0x3f, 0x87, 0x17, 0xee, 0x7f, 0x66, 0x85, 0x6f, 0xeb, 0x53, 0x26, 0x79, 0x76, 0x7b, 0x36, 0x20, + 0x3f, 0x2e, 0xd6, 0x73, 0x3c, 0x96, 0xa0, 0xea, 0xe0, 0x85, 0x83, 0x3b, 0x7b, 0x6c, 0xd8, 0x75, + 0x74, 0xe4, 0x16, 0x26, 0x66, 0xd8, 0x8e, 0xba, 0xf8, 0xb3, 0x6a, 0x18, 0x0e, 0x32, 0xc6, 0x72, + 0xcf, 0xa7, 0x4a, 0xd6, 0x7c, 0x82, 0xfb, 0xa3, 0xb5, 0xd6, 0xa2, 0x2a, 0xda, 0xff, 0xb4, 0x68, + 0xe4, 0xf5, 0x35, 0x09, 0x05, 0x87, 0xc0, 0x4c, 0xeb, 0x61, 0x38, 0xf3, 0x4d, 0xd8, 0xe8, 0x13, + 0x0e, 0xad, 0x98, 0x96, 0xa8, 0xc6, 0x00, 0x28, 0x7a, 0x1b, 0x0e, 0x73, 0xc0, 0xcf, 0x90, 0x7d, + 0xdd, 0xe5, 0xd2, 0x9a, 0xd7, 0xb1, 0xa3, 0x93, 0x62, 0x97, 0xc0, 0x05, 0x4f, 0x93, 0xe2, 0x65, + 0x9c, 0xf1, 0x72, 0x61, 0x07, 0xb6, 0xd0, 0x81, 0x80, 0x76, 0xe7, 0x65, 0xdb, 0xe3, 0xc0, 0x46, + 0x93, 0x48, 0x4e, 0x3b, 0x3a, 0x4c, 0x0a, 0x40, 0x6f, 0x63, 0x4c, 0x7b, 0xcc, 0x70, 0x25, 0x76, + 0x83, 0x5d, 0x9d, 0x6b, 0x7e, 0x50, 0x46, 0x35, 0x63, 0x48, 0xb3, 0x05, 0x55, 0x2d, 0x64, 0x09, + 0x95, 0xa2, 0x78, 0x32, 0x06, 0xac, 0x55, 0x5b, 0x30, 0x50, 0xeb, 0x35, 0xf8, 0x35, 0x2f, 0xd2, + 0x71, 0x6e, 0x87, 0x4d, 0xa9, 0x70, 0xae, 0x53, 0xbe, 0x5b, 0x87, 0x12, 0x18, 0xaa, 0xb1, 0x71, + 0xf2, 0x51, 0x89, 0xfc, 0xd8, 0x79, 0x51, 0xd0, 0xe4, 0x7c, 0x43, 0x98, 0xe6, 0x45, 0xff, 0x86, + 0xe9, 0xea, 0xfd, 0xdd, 0x48, 0xe4, 0x2a, 0xb2, 0x81, 0x41, 0x82, 0xdf, 0x06, 0xfa, 0xbf, 0xe3, + 0x9f, 0x16, 0x32, 0x8a, 0x77, 0xc1, 0x46, 0x3f, 0xbe, 0xae, 0xe7, 0x84, 0xc7, 0xd1, 0x4e, 0xa2, + 0xf4, 0x0b, 0x54, 0x45, 0x38, 0x0e, 0x4c, 0x03, 0x6d, 0x2c, 0xd6, 0xa2, 0x9a, 0x4f, 0x75, 0xcb, + 0x90, 0x93, 0xac, 0x7c, 0x5c, 0x57, 0xd1, 0x35, 0xa1, 0x7f, 0x1b, 0x7d, 0x83, 0xdd, 0x98, 0xa7, + 0x8d, 0x65, 0xe4, 0xce, 0x56, 0x5e, 0x3c, 0x9c, 0xe2, 0xb0, 0xdf, 0x9b, 0x0c, 0x2c, 0x4d, 0x59, + 0x26, 0x42, 0x5d, 0x62, 0x3e, 0x34, 0xfa, 0x35, 0x52, 0x49, 0x45, 0x1a, 0xb6, 0x34, 0xb5, 0x19, + 0x35, 0xe2, 0x76, 0x90, 0x12, 0xda, 0xa0, 0x99, 0x14, 0xe3, 0xa4, 0x08, 0x70, 0x5e, 0xfb, 0x38, + 0x28, 0x2e, 0x02, 0xb0, 0x11, 0xc4, 0xe1, 0xa4, 0xac, 0xb6, 0x1e, 0x34, 0x30, 0x2e, 0xfa, 0xf2, + 0x46, 0x72, 0xe7, 0xf1, 0x38, 0xca, 0x0d, 0xa0, 0x6d, 0x82, 0x29, 0xa7, 0xdd, 0xa0, 0xe6, 0x31, + 0x9c, 0xf8, 0x94, 0x08, 0xe8, 0x2e, 0x74, 0x29, 0xaa, 0x85, 0x88, 0xbc, 0xa1, 0x60, 0x3e, 0x19, + 0x40, 0xda, 0xa8, 0x03, 0xaa, 0xfe, 0x4c, 0x49, 0x47, 0x8d, 0xe7, 0x0a, 0x0d, 0x8f, 0x77, 0x89, + 0xa4, 0x80, 0xc2, 0xa3, 0x25, 0x92, 0xb0, 0xd2, 0xa4, 0x37, 0xdc, 0x0d, 0x71, 0xc8, 0xa1, 0x7f, + 0xd1, 0x42, 0x9f, 0xad, 0x8b, 0x38, 0xf3, 0xa4, 0xae, 0xc2, 0x71, 0xfe, 0xfa, 0x82, 0xab, 0xcf, + 0xcc, 0x40, 0xd0, 0xe9, 0xa3, 0x07, 0xff, 0x74, 0x0a, 0xbe, 0x8e, 0x4e, 0x25, 0x42, 0x60, 0x89, + 0x8d, 0x47, 0xe8, 0x58, 0x13, 0x94, 0xf2, 0x86, 0xd4, 0x36, 0x62, 0xeb, 0x65, 0x70, 0xf1, 0x8f, + 0x40, 0x61, 0xfd, 0xcd, 0x57, 0xa3, 0xb8, 0x90, 0xbe, 0xaa, 0xe4, 0x3c, 0x07, 0xcf, 0x5b, 0xc0, + 0x69, 0x65, 0xcd, 0x1a, 0x0f, 0x6a, 0xa9, 0x98, 0x0b, 0xf2, 0x26, 0x80, 0x49, 0xb7, 0xfd, 0x02, + 0x5e, 0x2f, 0x6b, 0x77, 0xf0, 0xdc, 0xa3, 0x84, 0x4f, 0x97, 0xae, 0x41, 0x23, 0x86, 0x60, 0xe2, + 0x5c, 0x31, 0x82, 0x97, 0x7e, 0x8a, 0x50, 0x7b, 0xab, 0x93, 0x08, 0xd6, 0xea, 0x46, 0x52, 0x56, + 0x91, 0x3e, 0x9a, 0x12, 0xee, 0x89, 0xc2, 0xf6, 0x79, 0x45, 0x14, 0xf0, 0x4b, 0xe5, 0xca, 0xe6, + 0x87, 0x66, 0x2d, 0x48, 0x2e, 0x8f, 0x2d, 0x4c, 0xa2, 0x80, 0xe9, 0x9e, 0x77, 0x17, 0xd8, 0x52, + 0xe2, 0xff, 0xd6, 0x37, 0xc1, 0x4a, 0xf0, 0x0e, 0xde, 0x11, 0xfa, 0xa0, 0xaa, 0x4d, 0xb6, 0xa8, + 0xee, 0x13, 0x6d, 0xc8, 0x2c, 0x64, 0x93, 0xba, 0x0c, 0x2d, 0x2d, 0x33, 0xac, 0xf5, 0xbe, 0x73, + 0xef, 0x22, 0x9e, 0x54, 0xf3, 0x7b, 0x23, 0xae, 0xb1, 0xf8, 0xe5, 0x96, 0xc5, 0x94, 0x5a, 0xb9, + 0x8a, 0x6b, 0xec, 0x00, 0x27, 0x4e, 0xc3, 0x99, 0x5d, 0xe5, 0xf6, 0xc7, 0x08, 0xd6, 0xc1, 0xfe, + 0xe9, 0xbb, 0x9d, 0xeb, 0x44, 0x5a, 0x19, 0xc3, 0xe3, 0xc5, 0x92, 0xf7, 0x20, 0x08, 0xc8, 0xcd, + 0x3f, 0x6b, 0x9d, 0xf1, 0xa7, 0xc1, 0x9d, 0x08, 0x36, 0xb1, 0x9f, 0xec, 0xcf, 0xaa, 0x15, 0x63, + 0x91, 0x9d, 0xab, 0x6b, 0x6e, 0xd6, 0x29, 0xf9, 0x83, 0xf1, 0x8e, 0xbb, 0xeb, 0xc2, 0x1f, 0x96, + 0x3b, 0x46, 0x0a, 0x41, 0x7c, 0x96, 0x84, 0xbc, 0xd5, 0x4d, 0x0e, 0x55, 0x86, 0x09, 0x8e, 0x75, + 0x1d, 0xa7, 0xd4, 0xff, 0xff, 0x35, 0x07, 0xf8, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, + 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, + 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, + 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0xf9, 0xe7, 0x9f, 0x7f, 0xfe, 0x7f, 0xe3, + 0xff, 0x02, 0xe1, 0x4c, 0x5a, 0x95, 0x00, 0x8c, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU116_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 35840, // uncompressed data size (bytes) + 24474, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU116_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU116("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/load/g_booteruc_load_tu11x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU116_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x4e, 0x00, 0x62, 0x1d, 0x08, 0x13, 0x4c, 0xc4, 0x43, 0x69, + 0x20, 0x00, 0x00, 0x6e, 0x8b, 0xb6, 0xe9, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU116_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU116_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU116("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/load/g_booteruc_load_tu11x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 35840 +// COMPRESSED SIZE (bytes): 24475 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU116_image_prod_data[] = +{ + 0xed, 0xfb, 0x43, 0x90, 0x28, 0x5d, 0x17, 0x30, 0xe8, 0x96, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0xb6, + 0x6d, 0xdb, 0xb6, 0x5d, 0xa7, 0x6c, 0xdb, 0xb6, 0x5d, 0xa7, 0x6c, 0xdb, 0x46, 0xbf, 0xff, 0x9d, + 0xdd, 0xaf, 0x07, 0x1d, 0x3d, 0xec, 0x88, 0xf3, 0x4c, 0xd6, 0x5e, 0x19, 0x91, 0xb9, 0x72, 0x90, + 0xb9, 0x23, 0xd7, 0x8e, 0xdc, 0x92, 0x00, 0x08, 0xb3, 0x99, 0x00, 0xf1, 0x00, 0xa0, 0x00, 0x3d, + 0x47, 0xf9, 0xef, 0x2d, 0x3f, 0x80, 0x92, 0x00, 0x88, 0xff, 0xe7, 0x00, 0x04, 0x28, 0xc0, 0xff, + 0x2f, 0x8d, 0x04, 0x00, 0x00, 0xfc, 0x49, 0x01, 0xf0, 0xa3, 0x07, 0x00, 0x78, 0x03, 0x7a, 0x03, + 0x78, 0x07, 0x8a, 0x05, 0x00, 0x02, 0xe8, 0xdb, 0xd9, 0xfd, 0xfd, 0xfd, 0x85, 0x8f, 0x04, 0x00, + 0x04, 0xf8, 0xcd, 0x03, 0x69, 0x3a, 0x02, 0x60, 0xce, 0x58, 0x07, 0x6c, 0x4a, 0x01, 0xa0, 0xc9, + 0x58, 0x07, 0xfa, 0x2f, 0x90, 0x66, 0xac, 0x03, 0xff, 0x17, 0xf0, 0xfe, 0x3b, 0x17, 0x20, 0x23, + 0x0f, 0xb8, 0x65, 0x17, 0xb8, 0x2f, 0x2f, 0x7f, 0x37, 0x23, 0x0f, 0x30, 0x23, 0x13, 0xa8, 0x65, + 0x17, 0xb0, 0x65, 0x1d, 0xc8, 0x6f, 0x15, 0x00, 0x00, 0x9e, 0x51, 0x12, 0x00, 0xe1, 0x35, 0x0f, + 0x00, 0xfe, 0xbf, 0x1a, 0xe6, 0x20, 0xff, 0x67, 0x00, 0xdb, 0x9e, 0x09, 0x00, 0x08, 0xf4, 0xdf, + 0xe8, 0x89, 0x07, 0x28, 0x1e, 0x00, 0x00, 0x2c, 0xf2, 0xbf, 0x52, 0xdf, 0x79, 0xa0, 0x3d, 0x4f, + 0x1f, 0xef, 0x60, 0x6f, 0xc0, 0xff, 0x5d, 0x0d, 0x28, 0x15, 0x1e, 0x62, 0xe7, 0xff, 0x54, 0xfe, + 0x7e, 0x07, 0x8d, 0x04, 0x30, 0x00, 0xf8, 0xfe, 0x03, 0xd2, 0x95, 0x00, 0xd4, 0x9a, 0x82, 0xf0, + 0x5f, 0xfe, 0xf3, 0x01, 0xe0, 0xf7, 0xdf, 0xfd, 0xbc, 0x01, 0xf5, 0x14, 0x7f, 0x02, 0xc6, 0x73, + 0xe3, 0x00, 0x46, 0x12, 0x50, 0x03, 0xe4, 0xe7, 0xbd, 0x01, 0xc4, 0xff, 0x17, 0xfb, 0x7e, 0x9f, + 0x18, 0xbe, 0x7e, 0x24, 0x01, 0xf3, 0xf3, 0xf8, 0xee, 0x00, 0xfe, 0xff, 0x48, 0xf5, 0x31, 0x71, + 0x42, 0xfe, 0xa5, 0xda, 0x63, 0x67, 0x51, 0xa6, 0xb5, 0xe3, 0x0e, 0x18, 0xcb, 0x54, 0x90, 0xaa, + 0x00, 0x95, 0x94, 0x1f, 0xcc, 0x2a, 0xe6, 0xc7, 0xd3, 0x1f, 0x7e, 0x71, 0x90, 0xee, 0xfa, 0x83, + 0xd4, 0xb5, 0x7e, 0xda, 0xe1, 0xc6, 0x75, 0x95, 0x27, 0x54, 0x3d, 0xfd, 0x67, 0xc0, 0xe3, 0x09, + 0x9b, 0xe8, 0xe5, 0xca, 0x6b, 0x5b, 0x27, 0xb5, 0x46, 0x22, 0x7c, 0x1f, 0x2e, 0xa8, 0x15, 0xca, + 0x29, 0xe1, 0x01, 0x66, 0x03, 0x90, 0x15, 0x85, 0xdd, 0x08, 0x87, 0x36, 0x43, 0x89, 0x84, 0xe2, + 0xa6, 0x7f, 0x0e, 0x06, 0x87, 0x6d, 0x91, 0x62, 0xed, 0x5e, 0x91, 0xe6, 0xa3, 0xde, 0x78, 0x77, + 0x3f, 0x2a, 0x38, 0x44, 0x86, 0x6b, 0xdb, 0x54, 0x84, 0x69, 0xe0, 0x31, 0x74, 0xd6, 0x00, 0xd2, + 0xdc, 0xcd, 0x89, 0xd1, 0x6d, 0xfa, 0x3e, 0xf6, 0xb0, 0xcb, 0xb8, 0x30, 0x37, 0x0a, 0xf3, 0x3b, + 0xdc, 0x1c, 0x58, 0xf0, 0xfd, 0xb5, 0x39, 0x9d, 0x49, 0x61, 0xbb, 0xc9, 0xd9, 0x69, 0x0a, 0xc4, + 0xfc, 0x33, 0x2e, 0x67, 0x52, 0xe8, 0x61, 0x26, 0x9e, 0xb1, 0x7c, 0x88, 0x83, 0x1f, 0x8e, 0x46, + 0x7a, 0x3c, 0xb2, 0x36, 0x24, 0x1e, 0x65, 0xb8, 0x41, 0x03, 0xc7, 0x0c, 0x13, 0x2f, 0x90, 0x4c, + 0x9a, 0x4e, 0x41, 0xb7, 0xbd, 0x5f, 0x82, 0x76, 0x8f, 0x54, 0xdb, 0xa2, 0x2c, 0xc8, 0xc8, 0xef, + 0x79, 0xde, 0x5d, 0xd2, 0x31, 0x7e, 0x86, 0xbd, 0xa4, 0xfd, 0x82, 0xcc, 0x83, 0x83, 0x96, 0xc2, + 0xcc, 0x40, 0xf5, 0xa6, 0x1c, 0xd2, 0x12, 0xd6, 0x78, 0x77, 0x64, 0xa9, 0x14, 0xd6, 0xbf, 0xf7, + 0xe1, 0xf8, 0xd3, 0x63, 0xfd, 0x31, 0x9e, 0x9f, 0xb8, 0xd8, 0xf8, 0xb1, 0x98, 0xed, 0xf7, 0xcf, + 0x71, 0x47, 0x0e, 0x56, 0xda, 0x3e, 0xb4, 0xcd, 0xb0, 0x17, 0x10, 0x97, 0xa1, 0xd7, 0x20, 0x71, + 0x5a, 0x17, 0x4b, 0x40, 0xfb, 0xe8, 0xdb, 0x96, 0x13, 0x90, 0x25, 0x0a, 0x3d, 0x6a, 0xd4, 0xb4, + 0x46, 0x8f, 0xd9, 0x1a, 0x30, 0xf9, 0x35, 0x5b, 0x63, 0x02, 0xda, 0xdf, 0xc2, 0x7d, 0x82, 0x3e, + 0x6c, 0xfa, 0x91, 0xaf, 0xbf, 0xc0, 0x55, 0x10, 0x55, 0xe1, 0xbe, 0xbd, 0x2b, 0x22, 0xf2, 0xae, + 0xa4, 0x71, 0xa7, 0xf9, 0xca, 0xae, 0x9f, 0x84, 0xe4, 0x1b, 0x29, 0x6a, 0xbb, 0x79, 0x57, 0xd0, + 0x51, 0x85, 0xdc, 0xcc, 0x5c, 0x23, 0xaf, 0x9a, 0x03, 0xec, 0xf5, 0xd2, 0xe6, 0x13, 0x67, 0xc4, + 0xae, 0x8c, 0xf1, 0x94, 0x1d, 0xf9, 0x5e, 0x1f, 0x70, 0x08, 0x13, 0x6f, 0x86, 0x73, 0x6e, 0xaa, + 0x7b, 0xe9, 0xc7, 0x0d, 0xb9, 0xe2, 0xbf, 0xcc, 0x03, 0x9c, 0xa6, 0x0c, 0x8b, 0xad, 0x54, 0xd4, + 0xbd, 0x55, 0x7c, 0x5d, 0x3b, 0x88, 0x34, 0x82, 0xe3, 0x9e, 0xcf, 0x9d, 0xd6, 0xa4, 0x96, 0x44, + 0x36, 0xa0, 0x3a, 0x2b, 0x07, 0x53, 0x1b, 0x3e, 0xc3, 0x2c, 0xb2, 0x18, 0x70, 0x69, 0xbd, 0x11, + 0x1c, 0xf3, 0x96, 0xc3, 0xb1, 0x09, 0xc0, 0x76, 0x3e, 0x5c, 0xa5, 0x2c, 0xb0, 0xb4, 0x6f, 0xab, + 0xbf, 0x69, 0x15, 0xea, 0x9b, 0x73, 0x58, 0x5c, 0x77, 0x28, 0xe3, 0x01, 0x90, 0x72, 0x81, 0x94, + 0xbd, 0xee, 0xd9, 0x8c, 0x48, 0x04, 0x4f, 0x1e, 0x64, 0x83, 0x43, 0x4b, 0x05, 0x52, 0x2e, 0xb4, + 0x12, 0x8d, 0x32, 0xb6, 0x6c, 0x4d, 0x09, 0xcd, 0x05, 0x25, 0xc9, 0x11, 0x84, 0x4f, 0xa9, 0x9e, + 0x9a, 0x02, 0xe2, 0x0a, 0x87, 0x7f, 0xab, 0xb8, 0xd5, 0xdd, 0xf3, 0x29, 0xd7, 0xcb, 0x07, 0xf7, + 0x42, 0xe7, 0x6f, 0x80, 0xd2, 0x9b, 0x07, 0xa8, 0x46, 0x0c, 0x71, 0xb3, 0xcd, 0x22, 0x80, 0x51, + 0x0d, 0x48, 0x19, 0x2a, 0x48, 0x80, 0xb0, 0x53, 0x48, 0x5b, 0x7e, 0x07, 0x3d, 0xef, 0x78, 0x05, + 0xc2, 0xb3, 0x15, 0x1d, 0x11, 0xe8, 0x6e, 0xd6, 0x42, 0x89, 0xb5, 0x27, 0xa9, 0x31, 0x5a, 0x8c, + 0xe9, 0x11, 0xe7, 0xaa, 0xb0, 0xd2, 0x32, 0x8e, 0x7c, 0x45, 0x6b, 0x38, 0x8b, 0x9b, 0x6c, 0x1d, + 0x60, 0x19, 0x48, 0x1d, 0xb0, 0xad, 0x6e, 0xc1, 0x1d, 0x59, 0x38, 0x82, 0x40, 0x62, 0x1b, 0xdb, + 0x7a, 0xfd, 0x58, 0xad, 0x43, 0x43, 0xcf, 0xea, 0x14, 0x13, 0x6f, 0x67, 0x1a, 0xb5, 0x25, 0xe5, + 0x92, 0x58, 0x93, 0xc4, 0xc6, 0x7d, 0x84, 0xcf, 0xfd, 0x56, 0x4f, 0xea, 0xed, 0xf8, 0x3c, 0x71, + 0x12, 0x3f, 0xb5, 0xfb, 0x47, 0xa6, 0xdd, 0x11, 0xcc, 0x2e, 0xcd, 0xba, 0xda, 0x61, 0x0a, 0x02, + 0x55, 0x57, 0xe4, 0x6d, 0x07, 0x6a, 0xb9, 0x33, 0x45, 0xa0, 0xa4, 0x52, 0x78, 0x8c, 0x77, 0xb3, + 0xec, 0x2c, 0xcb, 0xd9, 0xaf, 0xd8, 0xf8, 0xdf, 0xf2, 0x6f, 0xa0, 0x37, 0xe7, 0xfe, 0x26, 0x16, + 0xdb, 0x8a, 0x85, 0xe1, 0x9a, 0x69, 0xa9, 0xf2, 0x18, 0x35, 0xb8, 0x25, 0xd0, 0x34, 0xaa, 0xcc, + 0x04, 0x67, 0xe9, 0xce, 0x87, 0x93, 0x03, 0x7b, 0x44, 0x06, 0xcf, 0x77, 0x56, 0x01, 0xd7, 0x61, + 0xb9, 0x3b, 0x6e, 0xf7, 0x03, 0x13, 0x4a, 0xf8, 0x44, 0xcd, 0x9a, 0x3d, 0x65, 0x51, 0xf3, 0x58, + 0x90, 0x64, 0x85, 0x95, 0xdf, 0xee, 0x32, 0xe9, 0x92, 0x21, 0x58, 0x27, 0x26, 0x6a, 0x58, 0x90, + 0x0a, 0x2a, 0x08, 0x60, 0x96, 0xc0, 0x9f, 0x8a, 0x55, 0x5c, 0x43, 0x29, 0x03, 0xe5, 0x4e, 0xe4, + 0x4f, 0xd3, 0x6d, 0xbb, 0x8d, 0xeb, 0x46, 0xf1, 0x26, 0x25, 0xba, 0x0b, 0xfd, 0x39, 0x35, 0x92, + 0x5b, 0x86, 0x5b, 0xae, 0xdf, 0x11, 0x7f, 0x28, 0x81, 0x98, 0xca, 0xa6, 0x04, 0xab, 0x2c, 0x42, + 0xc5, 0x21, 0x3e, 0x2c, 0x38, 0xfd, 0x8e, 0xb9, 0x6c, 0x3a, 0x46, 0xe8, 0x0d, 0x05, 0xa6, 0x73, + 0x1f, 0xf8, 0xb3, 0xde, 0xd8, 0x57, 0x5d, 0xfd, 0x27, 0x47, 0x40, 0x72, 0x43, 0xb4, 0x3b, 0x39, + 0xa0, 0x43, 0xc7, 0x1c, 0x05, 0xf2, 0x64, 0xee, 0x17, 0xc1, 0x90, 0x9d, 0x24, 0xb5, 0xc4, 0xd1, + 0x39, 0xac, 0xa1, 0xe4, 0xf1, 0x65, 0xcc, 0x66, 0xe3, 0x16, 0xec, 0x26, 0x29, 0x31, 0x78, 0x9d, + 0x05, 0x92, 0xcb, 0xa1, 0xce, 0xa9, 0xc6, 0x38, 0xb3, 0x08, 0x27, 0x2f, 0xdd, 0x0a, 0x88, 0x95, + 0x83, 0xa6, 0x80, 0x43, 0x21, 0x90, 0x50, 0x62, 0xd6, 0xca, 0x2c, 0xcc, 0x91, 0x37, 0x55, 0x69, + 0x77, 0x4e, 0x09, 0xa3, 0x1f, 0x9a, 0x75, 0x63, 0xd1, 0x06, 0xbc, 0xf8, 0x8a, 0x41, 0x26, 0x62, + 0xd7, 0x9d, 0xfe, 0xf4, 0xa2, 0xee, 0x79, 0xaf, 0xd8, 0x5f, 0x9d, 0x9c, 0x26, 0x1c, 0x70, 0x87, + 0x70, 0x00, 0x7c, 0xc2, 0x6d, 0xb3, 0x37, 0x39, 0xe8, 0x62, 0xd4, 0x12, 0x55, 0x2a, 0x15, 0xfb, + 0x3d, 0x39, 0x4c, 0x00, 0x18, 0x36, 0xb6, 0xe7, 0x94, 0x19, 0x32, 0xef, 0x44, 0x2c, 0xd8, 0x8a, + 0x72, 0xa4, 0x69, 0x65, 0x50, 0x30, 0x7f, 0xac, 0x11, 0x75, 0x66, 0x99, 0x8a, 0x65, 0x6e, 0x49, + 0x65, 0xed, 0xb6, 0xa7, 0xc1, 0xef, 0xfb, 0xc6, 0xe2, 0x51, 0xa4, 0x85, 0x66, 0x7b, 0xd8, 0xef, + 0x3e, 0xe0, 0xd4, 0x3f, 0x61, 0x3c, 0x7a, 0xaa, 0x6e, 0xc4, 0x1c, 0x13, 0x03, 0x2d, 0x45, 0xe9, + 0x92, 0xd9, 0x36, 0xce, 0x41, 0xfe, 0xd5, 0x6e, 0x31, 0x80, 0x35, 0x9c, 0xa9, 0x63, 0xe6, 0xef, + 0xfc, 0x7a, 0x71, 0x39, 0xd2, 0x66, 0x71, 0x1d, 0x5d, 0x66, 0x39, 0x04, 0x3c, 0x43, 0x11, 0x9f, + 0x3c, 0xb9, 0x55, 0x0e, 0xe3, 0x9a, 0xb5, 0x60, 0x8d, 0x0e, 0x8c, 0xac, 0xf3, 0xaa, 0xe5, 0x8a, + 0x48, 0x4d, 0x09, 0x1b, 0xca, 0x00, 0x3b, 0x86, 0xe6, 0xdb, 0x67, 0x35, 0x84, 0xac, 0xae, 0x03, + 0xd2, 0x67, 0xc7, 0x28, 0xb9, 0x8f, 0x65, 0xc9, 0x14, 0xa9, 0x15, 0xe9, 0x65, 0x94, 0xf8, 0x52, + 0x03, 0x06, 0x90, 0x88, 0x72, 0x1c, 0xc2, 0x85, 0xe0, 0xe2, 0xd2, 0xfe, 0xfe, 0x68, 0x66, 0x0e, + 0xfb, 0x60, 0xab, 0x82, 0x27, 0x45, 0x39, 0x8a, 0x71, 0xbd, 0x83, 0xf3, 0x39, 0xad, 0x41, 0x00, + 0xee, 0x17, 0xee, 0x2c, 0xd4, 0x0b, 0x03, 0x00, 0xe5, 0x0f, 0xc7, 0x16, 0x70, 0xdf, 0x6c, 0x7e, + 0xbd, 0x3e, 0xd7, 0x51, 0x5f, 0xe7, 0xb5, 0xc6, 0x7e, 0x72, 0xae, 0x2b, 0x80, 0x89, 0x12, 0x8b, + 0x79, 0x86, 0x60, 0xed, 0xda, 0x97, 0x9e, 0x92, 0x61, 0x85, 0xe2, 0x5d, 0x9c, 0x2d, 0x90, 0x25, + 0x79, 0xd5, 0xe0, 0x7e, 0xcd, 0xda, 0x49, 0x7a, 0xf6, 0x93, 0x10, 0x67, 0xdc, 0xa4, 0x6d, 0xa3, + 0x87, 0xb2, 0x18, 0x26, 0x96, 0xcc, 0xcb, 0xc6, 0x4d, 0xd1, 0x23, 0x69, 0xde, 0xb3, 0x88, 0xe0, + 0x4c, 0x44, 0x56, 0x04, 0x68, 0x40, 0x1d, 0x5e, 0x53, 0x8b, 0x8a, 0xc2, 0xca, 0x54, 0x6a, 0x20, + 0x83, 0xfc, 0xd8, 0xeb, 0x81, 0x02, 0x98, 0xa7, 0xa0, 0x8c, 0x22, 0xeb, 0x57, 0x8e, 0x52, 0x38, + 0x6b, 0x11, 0x70, 0x08, 0x71, 0x37, 0xb9, 0xce, 0xa8, 0xe3, 0x46, 0x2c, 0x3d, 0x8e, 0x78, 0x8c, + 0x23, 0x30, 0x72, 0x0e, 0x1a, 0x50, 0xb2, 0xb3, 0x90, 0x75, 0x7e, 0xe5, 0x26, 0x9d, 0x34, 0x56, + 0x87, 0xd9, 0xf6, 0x32, 0xd6, 0xb5, 0xf5, 0xa1, 0x9b, 0xfe, 0xb4, 0x95, 0x47, 0xdc, 0xbb, 0xa0, + 0xe9, 0xd6, 0xa6, 0x04, 0x59, 0x7b, 0xa0, 0xe4, 0x47, 0xa4, 0x87, 0x01, 0xb1, 0x04, 0xa8, 0xbe, + 0xaf, 0xb8, 0x41, 0xee, 0x9c, 0xef, 0xe0, 0x26, 0x63, 0xf9, 0x9c, 0xf8, 0xb1, 0x89, 0x4f, 0xb7, + 0xc9, 0x77, 0x55, 0xe5, 0x79, 0x19, 0xc5, 0x1e, 0x1d, 0xf5, 0xfa, 0x0f, 0x2d, 0xbe, 0x94, 0xbd, + 0xfe, 0x4e, 0xfe, 0x1a, 0x1c, 0x1d, 0x51, 0x7b, 0xbd, 0x74, 0x24, 0x78, 0x66, 0x8a, 0xcd, 0xb4, + 0x8a, 0xa8, 0xb1, 0x10, 0xf0, 0x08, 0xfa, 0x72, 0x6e, 0x65, 0xc1, 0x5c, 0xcb, 0xc4, 0x06, 0xbc, + 0xf3, 0x60, 0x29, 0x95, 0x69, 0xa1, 0x4c, 0x5d, 0xdf, 0x72, 0xbd, 0xd5, 0x49, 0xb3, 0x7a, 0x8f, + 0x46, 0x02, 0xa4, 0x3c, 0x8e, 0xf9, 0x37, 0xdf, 0xbc, 0x32, 0x6c, 0xe2, 0x23, 0x74, 0xd4, 0x34, + 0xdb, 0x5c, 0xca, 0x0d, 0x21, 0xd6, 0x1b, 0x71, 0x59, 0x4f, 0x4b, 0x2c, 0x66, 0xf0, 0x38, 0xc2, + 0xd6, 0x02, 0xab, 0x80, 0x74, 0x91, 0x6b, 0xc5, 0x62, 0xd9, 0xe2, 0xc7, 0x34, 0xc7, 0x70, 0x77, + 0xde, 0xe2, 0x5b, 0x11, 0xce, 0x46, 0x21, 0xcc, 0x24, 0xbe, 0xdb, 0x9e, 0x4a, 0xdc, 0x1d, 0x17, + 0x10, 0xc4, 0xb4, 0xe5, 0x97, 0xa6, 0x7e, 0x29, 0xa8, 0x24, 0x47, 0xca, 0xb5, 0x3e, 0xa1, 0x5e, + 0xb3, 0x8a, 0x31, 0xc5, 0x51, 0xfb, 0x38, 0xa1, 0xab, 0xcb, 0x57, 0x3c, 0xfa, 0xf7, 0xd9, 0x5a, + 0x1c, 0x84, 0x27, 0xf8, 0x8e, 0x94, 0x34, 0xa3, 0x51, 0x97, 0x18, 0xe6, 0xbc, 0x9f, 0xca, 0xee, + 0xee, 0x41, 0xe1, 0xc9, 0x56, 0x95, 0xec, 0xd3, 0x02, 0xdb, 0x82, 0xf9, 0xfb, 0xa7, 0x13, 0x1d, + 0x09, 0xa2, 0x16, 0xc2, 0x7b, 0xfd, 0xfd, 0xd6, 0x09, 0x20, 0x10, 0x0e, 0x12, 0xf8, 0x16, 0x87, + 0x06, 0xf3, 0x40, 0xc5, 0x7c, 0xf1, 0x0c, 0x7c, 0xde, 0x14, 0xec, 0x67, 0x0e, 0x3a, 0x93, 0x2b, + 0x02, 0x07, 0x4a, 0xc4, 0xc5, 0xc3, 0xc4, 0xe8, 0xd9, 0x3a, 0xc3, 0x99, 0x96, 0x0f, 0xb8, 0x05, + 0x7c, 0x94, 0x0e, 0xc6, 0x76, 0xdd, 0x09, 0xd9, 0xce, 0xa8, 0xea, 0x73, 0xa7, 0x3f, 0x22, 0x98, + 0xdf, 0x40, 0xa1, 0x07, 0x9c, 0xf3, 0xef, 0x9e, 0xa1, 0x69, 0x00, 0xf9, 0x1a, 0x1e, 0xc8, 0x44, + 0x80, 0xc5, 0x27, 0x0b, 0x01, 0x33, 0x42, 0x49, 0x62, 0x14, 0x04, 0xd8, 0x7a, 0x54, 0x65, 0x5a, + 0x42, 0x98, 0xf1, 0x90, 0x56, 0xc8, 0x51, 0x88, 0x93, 0x12, 0xde, 0xb1, 0x6d, 0x5f, 0x99, 0x70, + 0xe9, 0x57, 0x2e, 0x5e, 0x42, 0x88, 0x43, 0xf3, 0x8c, 0x27, 0x37, 0x17, 0x3b, 0x7f, 0xb5, 0x90, + 0xeb, 0x7e, 0x37, 0x9a, 0xb2, 0x3b, 0x7d, 0xbd, 0x8d, 0x65, 0x83, 0x7f, 0xf5, 0x9e, 0xbd, 0xbb, + 0x3a, 0x54, 0xf5, 0x73, 0x27, 0xb3, 0x91, 0x56, 0xcd, 0x25, 0x6a, 0xa0, 0x38, 0x91, 0x9c, 0x51, + 0x0e, 0xcc, 0x02, 0xb3, 0xc1, 0xd1, 0x86, 0xb0, 0x0b, 0x59, 0xfa, 0xc5, 0x1f, 0x19, 0x37, 0xab, + 0x3b, 0xdd, 0x19, 0xe3, 0x4c, 0xdd, 0xad, 0x27, 0x2d, 0xa2, 0x39, 0x31, 0x96, 0x4a, 0xf6, 0xda, + 0x35, 0x8e, 0xbd, 0xa1, 0xbf, 0x0c, 0xef, 0x28, 0xb8, 0xe1, 0x1f, 0x8f, 0x50, 0xd6, 0xc7, 0xa6, + 0x45, 0x29, 0xb9, 0xd4, 0xdf, 0x43, 0x50, 0xe7, 0x91, 0x18, 0x6c, 0xf2, 0x71, 0x32, 0x61, 0xe3, + 0xa6, 0x09, 0x24, 0xd0, 0x2f, 0x53, 0x89, 0x1d, 0x8a, 0xcd, 0x0e, 0x97, 0xe1, 0xfe, 0x74, 0x6b, + 0x37, 0xbb, 0x9e, 0xaf, 0xed, 0x57, 0x40, 0xc0, 0xc0, 0x25, 0x04, 0x70, 0x0e, 0xf9, 0x40, 0x98, + 0xdd, 0xf0, 0xeb, 0x32, 0x97, 0xaf, 0xe7, 0x63, 0xe6, 0xe3, 0x9b, 0x3a, 0x2a, 0xe3, 0xf3, 0x46, + 0xd5, 0x07, 0xee, 0x20, 0x80, 0x16, 0x18, 0x7a, 0xb6, 0x6f, 0x71, 0x8b, 0xa1, 0x00, 0xdf, 0xa8, + 0x12, 0x30, 0x6b, 0xa1, 0xf6, 0x06, 0xcb, 0xc7, 0x7b, 0x47, 0x8a, 0xbb, 0xaa, 0x5b, 0xbb, 0xb3, + 0xca, 0x6f, 0x05, 0x9d, 0x55, 0xe9, 0x30, 0xea, 0x6f, 0xbb, 0x8c, 0x72, 0xb4, 0xd3, 0xf3, 0x2e, + 0x6c, 0xd6, 0xee, 0x76, 0x30, 0x9b, 0x5f, 0xc1, 0xed, 0x24, 0x42, 0x1d, 0x04, 0x3f, 0xea, 0x8a, + 0xbf, 0xfb, 0xaf, 0x10, 0x9d, 0xbf, 0xc5, 0xb1, 0x40, 0x26, 0xaa, 0x7d, 0x5f, 0x5b, 0x28, 0xa9, + 0xe9, 0x4a, 0x5a, 0x86, 0x24, 0x67, 0x5b, 0x4c, 0xd4, 0x7d, 0xe9, 0xdb, 0x3a, 0x3a, 0x01, 0x60, + 0xea, 0x17, 0x50, 0xe7, 0x51, 0x1a, 0x32, 0x75, 0xe9, 0x98, 0x7f, 0x42, 0x6c, 0xf0, 0x74, 0x76, + 0x6c, 0x11, 0xcb, 0x06, 0x6d, 0x83, 0x05, 0x4a, 0x37, 0xc2, 0xf1, 0x59, 0x46, 0xb8, 0x3f, 0x2b, + 0x1e, 0x0e, 0xcb, 0x70, 0x65, 0xe7, 0x1b, 0xb3, 0xd0, 0x1f, 0x6d, 0x67, 0xdb, 0x32, 0x3a, 0x98, + 0x0b, 0xa3, 0x16, 0x0b, 0x5a, 0x81, 0x6d, 0x00, 0x0e, 0xc9, 0xb3, 0x6d, 0x9b, 0xdf, 0x7d, 0xa9, + 0xab, 0xa1, 0x19, 0xc0, 0x80, 0xc7, 0x6d, 0xf4, 0x37, 0x32, 0x5c, 0xa6, 0xdc, 0x7a, 0x2f, 0x88, + 0x9d, 0x9c, 0x1c, 0x57, 0x8f, 0x2b, 0xa4, 0xb0, 0x36, 0x51, 0x87, 0x34, 0x8d, 0x64, 0x69, 0x00, + 0x41, 0x49, 0x4d, 0x38, 0xdb, 0x68, 0x58, 0xfa, 0x75, 0x36, 0x54, 0x0c, 0xce, 0x0c, 0x2a, 0x90, + 0xd0, 0x5a, 0xbf, 0xda, 0x60, 0xb9, 0x3f, 0xcf, 0xdb, 0x25, 0x95, 0x02, 0xf6, 0xae, 0x30, 0x92, + 0xef, 0x9c, 0x29, 0xd2, 0x2d, 0x14, 0x76, 0x47, 0x6e, 0x25, 0xa7, 0x78, 0x9f, 0xfd, 0x5f, 0xc7, + 0x6c, 0xc6, 0x89, 0x69, 0xf4, 0x59, 0xae, 0x08, 0x4a, 0x42, 0xb2, 0xde, 0x00, 0x6a, 0x8f, 0xbb, + 0xc4, 0x03, 0x4c, 0xcb, 0x00, 0xd5, 0x30, 0x67, 0xd1, 0x0d, 0x32, 0x88, 0x4b, 0x13, 0x82, 0xda, + 0x88, 0x25, 0x79, 0xdc, 0xf6, 0x12, 0x7e, 0xfe, 0xb2, 0xe0, 0x6a, 0x7c, 0x3c, 0x75, 0xfe, 0x15, + 0x6b, 0x30, 0xe3, 0xe2, 0xe7, 0x4c, 0x51, 0x36, 0x68, 0x0b, 0x36, 0xfd, 0x56, 0xf2, 0x44, 0x3e, + 0xa3, 0xc5, 0xa4, 0x93, 0xde, 0x87, 0xb4, 0xf1, 0x5e, 0x32, 0xcf, 0x6e, 0xfc, 0xfc, 0x2c, 0x4b, + 0x62, 0x9a, 0x55, 0xf4, 0xc3, 0x45, 0x75, 0x77, 0x39, 0xfc, 0x84, 0x40, 0x7f, 0x92, 0x2f, 0x84, + 0x52, 0x66, 0x90, 0xfa, 0x8d, 0x82, 0x75, 0x70, 0xf9, 0xa1, 0x51, 0x67, 0x11, 0xd1, 0xaa, 0xa5, + 0xea, 0x24, 0xa9, 0x28, 0xbc, 0xa0, 0x50, 0x8a, 0x5d, 0x69, 0x31, 0x35, 0x4b, 0xcd, 0x96, 0xf7, + 0x42, 0x21, 0x10, 0x70, 0x07, 0x93, 0x32, 0x31, 0x8c, 0x2a, 0x9d, 0xae, 0xd8, 0xdb, 0x01, 0x00, + 0xc9, 0x64, 0x45, 0x72, 0x94, 0x56, 0x92, 0x8d, 0x6a, 0x4a, 0xf5, 0x69, 0x3a, 0xc4, 0xfd, 0x60, + 0x4c, 0x4e, 0x57, 0xa1, 0x73, 0x82, 0xd3, 0x87, 0x5f, 0xe3, 0x58, 0xbb, 0xd4, 0x61, 0xb9, 0x11, + 0xb5, 0xd4, 0x6f, 0x1c, 0xe5, 0xba, 0x19, 0x8e, 0x33, 0x1f, 0x38, 0xb3, 0x4a, 0x1b, 0xc4, 0x4a, + 0x46, 0x8b, 0x47, 0xf4, 0x6b, 0xfb, 0xc9, 0xea, 0x6f, 0x33, 0x4e, 0xef, 0x47, 0x3e, 0xa9, 0xd3, + 0xa3, 0xc4, 0x82, 0x78, 0xfb, 0x0e, 0x5c, 0x4f, 0x30, 0xa2, 0xa0, 0xfa, 0x36, 0xf8, 0xd4, 0x8d, + 0x1f, 0x50, 0x2b, 0xd7, 0x26, 0x4f, 0x73, 0x91, 0xa4, 0x47, 0xdf, 0x1e, 0x2d, 0xee, 0x59, 0x95, + 0xfd, 0xfb, 0x25, 0xae, 0xed, 0x25, 0x75, 0x19, 0x3a, 0xad, 0x26, 0x39, 0xf8, 0xc5, 0xbc, 0xfa, + 0x39, 0xef, 0xa8, 0xc6, 0xef, 0x9f, 0x9f, 0xd1, 0x10, 0x70, 0xd1, 0x83, 0x72, 0x0a, 0x97, 0xa3, + 0xa5, 0x3f, 0x55, 0xc3, 0x53, 0x74, 0x6d, 0x61, 0x27, 0xf6, 0xca, 0xe7, 0x21, 0x1a, 0x39, 0xd8, + 0xe3, 0x35, 0xc9, 0x0f, 0x0c, 0xc4, 0xf4, 0xc2, 0x80, 0xfe, 0xce, 0x24, 0x34, 0x21, 0x66, 0xe8, + 0xb3, 0xc6, 0x75, 0xa0, 0x10, 0x7f, 0x65, 0xee, 0x2a, 0x90, 0xe3, 0x14, 0x8a, 0x02, 0x14, 0xbf, + 0xbf, 0xad, 0xd7, 0x39, 0x6a, 0x8b, 0x29, 0x73, 0x07, 0x84, 0x64, 0x27, 0xf4, 0x47, 0x28, 0x5f, + 0x49, 0x13, 0x4e, 0x6e, 0x19, 0xfe, 0x59, 0x47, 0x12, 0xb3, 0x7f, 0xa5, 0xd7, 0x0a, 0xb1, 0x32, + 0x80, 0x53, 0xde, 0xfb, 0xa7, 0x69, 0xb1, 0xee, 0x0a, 0xf6, 0xa3, 0x27, 0x86, 0xef, 0x78, 0x99, + 0xe3, 0x00, 0x97, 0x6c, 0x66, 0x9c, 0xbc, 0x2b, 0x90, 0xe4, 0x5a, 0xc2, 0x69, 0x59, 0xe6, 0x62, + 0x88, 0x97, 0x67, 0x48, 0x5c, 0x80, 0xef, 0xf4, 0xd6, 0xf0, 0x26, 0x1f, 0xb7, 0x13, 0x1e, 0xac, + 0x58, 0x0a, 0x25, 0xb0, 0xc9, 0xc1, 0x18, 0x90, 0xd4, 0xbc, 0x37, 0x01, 0x77, 0x41, 0x79, 0x43, + 0x3e, 0x82, 0x01, 0x74, 0xac, 0x1c, 0x40, 0x44, 0xe1, 0x03, 0x14, 0xaa, 0xad, 0xa8, 0x9e, 0x35, + 0x3a, 0x8d, 0x01, 0xc3, 0xc6, 0xec, 0x32, 0xb8, 0x28, 0x5e, 0x94, 0x92, 0xab, 0x0c, 0xca, 0x39, + 0x81, 0xfc, 0x67, 0xa1, 0x82, 0xa3, 0x99, 0xa0, 0xb2, 0x1a, 0x92, 0x1b, 0xdc, 0xad, 0x83, 0x53, + 0x13, 0x28, 0xb2, 0x05, 0x6a, 0x5d, 0x7a, 0xfa, 0x7e, 0xdd, 0x99, 0x93, 0xa2, 0xbd, 0x2c, 0x20, + 0x02, 0x64, 0x82, 0x6f, 0x16, 0xda, 0xa5, 0xaf, 0x3d, 0x07, 0x6b, 0xf3, 0x08, 0xfb, 0x5b, 0x7f, + 0xb4, 0x57, 0x38, 0xd5, 0x84, 0xa1, 0x2f, 0x96, 0x7d, 0x8f, 0xc3, 0xf9, 0x59, 0x04, 0x86, 0x6e, + 0x40, 0x8a, 0x6c, 0x0e, 0x54, 0xe1, 0x62, 0x99, 0x64, 0x09, 0x65, 0xc7, 0x5e, 0x89, 0xe9, 0xe7, + 0xbd, 0xc6, 0x32, 0x70, 0xc1, 0x21, 0x52, 0xb8, 0x53, 0x03, 0x03, 0x7f, 0xd0, 0xd6, 0xe6, 0x94, + 0xaf, 0xd6, 0x4c, 0x8b, 0xc0, 0x48, 0xa8, 0x10, 0xbe, 0x40, 0xc9, 0x17, 0xee, 0x13, 0x41, 0x7e, + 0xd0, 0xde, 0x85, 0x37, 0xa4, 0x17, 0xe7, 0xba, 0x29, 0x7f, 0xc3, 0x31, 0x81, 0xcf, 0x41, 0x34, + 0x86, 0xb5, 0xb3, 0x6a, 0x38, 0x02, 0xcc, 0xc6, 0xac, 0x9d, 0xb4, 0x47, 0xc0, 0x62, 0x8f, 0x64, + 0xca, 0xd1, 0xe1, 0x6c, 0x4f, 0x00, 0x84, 0xac, 0xec, 0x32, 0x31, 0x66, 0x6e, 0x0a, 0xad, 0x1d, + 0x2c, 0xb9, 0x12, 0xcb, 0xe9, 0xf3, 0xc5, 0xc5, 0x0d, 0x35, 0xbe, 0x6a, 0xa1, 0x22, 0x85, 0x9f, + 0x92, 0xe6, 0x6f, 0xac, 0x85, 0x9e, 0x4a, 0x8f, 0x94, 0xa6, 0xd3, 0xda, 0x17, 0x6f, 0xeb, 0xae, + 0x19, 0xdc, 0xdf, 0x1d, 0xef, 0x21, 0x1a, 0x5a, 0x43, 0x8f, 0x7d, 0x96, 0xfc, 0x67, 0x8a, 0xcd, + 0x37, 0x78, 0x43, 0x46, 0xfe, 0x84, 0xb9, 0xb8, 0x4d, 0xcf, 0x6d, 0x95, 0xfb, 0x2a, 0xd5, 0x39, + 0x3b, 0x89, 0x8f, 0x43, 0xbb, 0xdf, 0xaf, 0x6c, 0x87, 0x2a, 0x0d, 0x15, 0xd8, 0xe0, 0xf0, 0x2a, + 0x84, 0xbc, 0xe9, 0x88, 0x5d, 0xaa, 0xd7, 0xac, 0xd6, 0x39, 0xe1, 0x4b, 0x65, 0xed, 0xe6, 0x0f, + 0x9f, 0xf8, 0x72, 0x44, 0x8d, 0x86, 0x9a, 0x39, 0x6d, 0x5f, 0x09, 0xa9, 0x04, 0x49, 0x0e, 0x94, + 0x0a, 0xf8, 0x32, 0x9c, 0xac, 0x94, 0xe3, 0x68, 0x85, 0x20, 0x87, 0xf4, 0x69, 0x76, 0x64, 0x3a, + 0xaf, 0x0c, 0x08, 0x41, 0x61, 0x70, 0x05, 0xea, 0x21, 0xe8, 0x6b, 0x51, 0xa7, 0xfa, 0x26, 0x46, + 0x24, 0x38, 0x88, 0xd8, 0x6d, 0xfa, 0x99, 0x56, 0x5e, 0x9c, 0xd4, 0x3c, 0xd8, 0x08, 0xb7, 0xe4, + 0x5e, 0xe1, 0x41, 0x23, 0x2a, 0x3c, 0x50, 0xf6, 0xb9, 0xbe, 0x83, 0x0a, 0x27, 0x75, 0x5b, 0xef, + 0xfa, 0x99, 0xfd, 0xfb, 0x79, 0x43, 0x27, 0xea, 0x1a, 0x3c, 0xa1, 0x74, 0x7f, 0x01, 0x8e, 0xd9, + 0x58, 0xab, 0xe3, 0x99, 0x9f, 0xa3, 0x3b, 0x26, 0x9a, 0x81, 0x44, 0x75, 0xc8, 0x3d, 0x4c, 0x13, + 0xdc, 0x57, 0x25, 0xe6, 0xb1, 0x7a, 0xcd, 0x96, 0xa6, 0xf5, 0x55, 0xb0, 0x2f, 0x69, 0xdb, 0x66, + 0x58, 0xae, 0x1c, 0xaa, 0x63, 0x7f, 0xfd, 0xab, 0x9b, 0x44, 0x8e, 0xed, 0xe7, 0xab, 0x30, 0xd5, + 0x36, 0xf0, 0x36, 0xd8, 0x03, 0x60, 0xec, 0x6b, 0x46, 0x6b, 0x74, 0x42, 0x48, 0xe4, 0x3a, 0xbb, + 0xe7, 0xf2, 0xd9, 0x4d, 0x98, 0xc8, 0x8f, 0xb0, 0x6f, 0x83, 0xb8, 0xeb, 0x68, 0x16, 0x79, 0xd7, + 0x9f, 0xe6, 0x9d, 0x88, 0x74, 0x57, 0x2b, 0xee, 0xb4, 0xc4, 0xd6, 0x9d, 0xb5, 0x64, 0x56, 0x6e, + 0x30, 0x8a, 0xb4, 0x59, 0x30, 0xc7, 0xa8, 0x81, 0xcf, 0x01, 0xc9, 0xc5, 0x4c, 0x9e, 0x75, 0xaa, + 0xb9, 0x96, 0x82, 0x05, 0x1b, 0xae, 0x2f, 0xf3, 0x28, 0x26, 0x45, 0xa2, 0xd1, 0x15, 0xe2, 0xa3, + 0xde, 0xf1, 0x07, 0x3e, 0x94, 0xf8, 0x0f, 0xc0, 0xba, 0xe9, 0x92, 0x2a, 0x0c, 0x35, 0xdb, 0x58, + 0x59, 0x64, 0xf3, 0x61, 0xf2, 0xe3, 0xf2, 0xe5, 0xf6, 0x9f, 0x26, 0xd6, 0x8e, 0xe0, 0xf0, 0xd5, + 0x72, 0x0a, 0x01, 0x8f, 0x48, 0xf3, 0x2f, 0x6e, 0x91, 0xdf, 0xd6, 0x49, 0x79, 0x65, 0x30, 0x5d, + 0x76, 0xe8, 0xfd, 0x6f, 0xc5, 0x48, 0x28, 0x99, 0xbf, 0xd8, 0x73, 0x86, 0x32, 0x44, 0xa3, 0xbb, + 0x63, 0x96, 0x54, 0x79, 0xb2, 0xdb, 0xf7, 0xdc, 0x5b, 0xfd, 0x1f, 0xfc, 0xab, 0x5e, 0x16, 0x47, + 0xa6, 0x26, 0x2a, 0x1d, 0xe2, 0x53, 0x4b, 0xb3, 0x4a, 0xf3, 0x8e, 0x7d, 0x8b, 0x99, 0xb6, 0x24, + 0x92, 0xc4, 0x21, 0x5f, 0xa3, 0x92, 0x7b, 0x75, 0xb3, 0x0d, 0x64, 0x30, 0xeb, 0x89, 0x18, 0x1b, + 0xe3, 0xaa, 0xcf, 0x9f, 0x76, 0x34, 0xe5, 0x21, 0x27, 0x64, 0x96, 0x21, 0x59, 0xc7, 0xa3, 0x71, + 0x25, 0x37, 0x94, 0x1b, 0x6b, 0xf6, 0xd8, 0x81, 0x23, 0x71, 0x37, 0x81, 0xe0, 0x77, 0x32, 0x09, + 0x5b, 0xe5, 0xfb, 0x29, 0x1a, 0x5e, 0xfc, 0x79, 0x3e, 0x5d, 0xd0, 0xee, 0x9f, 0xb0, 0xf8, 0x3c, + 0x63, 0x61, 0xcb, 0xa3, 0xec, 0x25, 0x65, 0x5c, 0x88, 0x07, 0xfd, 0xcf, 0x69, 0xbd, 0x7b, 0x89, + 0xc1, 0x95, 0x04, 0x59, 0x36, 0x8b, 0xad, 0x7a, 0x54, 0x36, 0xcb, 0x91, 0x99, 0x4a, 0x58, 0x17, + 0x38, 0x15, 0x10, 0xb4, 0xd1, 0x40, 0xc3, 0xea, 0xa4, 0x6f, 0xee, 0x59, 0x70, 0x11, 0x19, 0xd8, + 0x4d, 0x83, 0x82, 0x30, 0x06, 0x45, 0x80, 0xba, 0xfa, 0xb1, 0xe0, 0x9d, 0x92, 0x89, 0x4c, 0x8f, + 0x2c, 0xe9, 0xf3, 0xec, 0x54, 0x53, 0x2f, 0x09, 0x1e, 0x1b, 0xba, 0xe8, 0xa3, 0xbc, 0x54, 0x2d, + 0xc6, 0x10, 0x6e, 0x9c, 0x57, 0xce, 0x0d, 0x7c, 0x01, 0xeb, 0x38, 0x9c, 0x90, 0xc4, 0x81, 0x36, + 0x9a, 0x55, 0xc6, 0x8a, 0x2d, 0x0a, 0xda, 0x26, 0x93, 0x79, 0xc6, 0x7a, 0x49, 0xf1, 0xfe, 0xd9, + 0x37, 0x5c, 0x20, 0xef, 0xd7, 0x14, 0x61, 0x1e, 0x09, 0xda, 0xb6, 0xe1, 0x3e, 0xbb, 0x1d, 0xba, + 0x9f, 0x6d, 0x3d, 0x85, 0x39, 0x6f, 0x42, 0x49, 0x73, 0xe4, 0xec, 0xb3, 0xcd, 0x24, 0x4f, 0x57, + 0x90, 0x59, 0xf6, 0x87, 0xec, 0x11, 0xd6, 0x84, 0x71, 0xcc, 0x20, 0xbf, 0xc9, 0x37, 0x2e, 0xf9, + 0xf2, 0xf4, 0x6a, 0x8e, 0xb7, 0x19, 0xfd, 0xcc, 0xce, 0x67, 0x69, 0xaa, 0xd8, 0x72, 0x6a, 0xd5, + 0x4f, 0x12, 0x3b, 0x31, 0x3b, 0xd3, 0xe1, 0xd1, 0x59, 0x0f, 0x6b, 0x84, 0x0e, 0x97, 0xc8, 0x94, + 0x8b, 0xa1, 0xde, 0xe0, 0x63, 0xac, 0x4e, 0x89, 0xc7, 0x37, 0x84, 0x1f, 0x8b, 0xdd, 0xae, 0x73, + 0x32, 0x93, 0x6c, 0xea, 0x78, 0x4e, 0xc4, 0x44, 0x58, 0xb6, 0x1a, 0xc6, 0xf3, 0x5c, 0xda, 0xc8, + 0xd8, 0xc2, 0x35, 0x15, 0x27, 0x04, 0xa8, 0xfc, 0xa4, 0x91, 0x0a, 0x1f, 0x25, 0x2d, 0x75, 0x2d, + 0xe5, 0x8c, 0x1c, 0xe4, 0xc4, 0xf4, 0x7e, 0x43, 0xa1, 0x35, 0x9a, 0xa8, 0xad, 0xac, 0x2e, 0xd4, + 0x6b, 0xb2, 0x06, 0x56, 0xb5, 0x0e, 0xa2, 0x91, 0x42, 0xf1, 0xb9, 0xad, 0x1f, 0xd3, 0x8d, 0xde, + 0xb0, 0xf5, 0xa0, 0xfa, 0xa0, 0x4d, 0xef, 0x2b, 0xfb, 0x0f, 0xc8, 0x01, 0x6b, 0x02, 0x90, 0x5b, + 0x2c, 0xd1, 0x39, 0x5d, 0xf9, 0xe5, 0x06, 0xc3, 0x9d, 0x3d, 0x7c, 0x30, 0xa9, 0x38, 0xb7, 0xf7, + 0x42, 0x3c, 0x16, 0x8f, 0xbe, 0x42, 0x76, 0x5c, 0xe0, 0x3a, 0x41, 0x36, 0x8d, 0xbd, 0x59, 0xe4, + 0x9e, 0xc7, 0xd1, 0x7a, 0xfe, 0x0a, 0x3d, 0x0b, 0xed, 0x91, 0xb5, 0x87, 0x8d, 0xa0, 0x15, 0x7a, + 0x93, 0x33, 0x4b, 0x2e, 0xc3, 0xa7, 0xf5, 0x15, 0x30, 0x10, 0xce, 0x8c, 0x8a, 0xdf, 0x84, 0xe0, + 0x9b, 0xb4, 0x4f, 0xfd, 0xe9, 0xb0, 0x39, 0xd3, 0xd1, 0xd3, 0xb3, 0xc8, 0x47, 0x0a, 0xa2, 0xfa, + 0xe6, 0x2d, 0xe7, 0x30, 0x33, 0x62, 0xc0, 0x3b, 0xe5, 0xba, 0x38, 0xee, 0x62, 0x33, 0x7e, 0xc5, + 0x51, 0xe4, 0x0b, 0x15, 0xe4, 0xc6, 0xd8, 0x95, 0xd3, 0x09, 0xc9, 0x1d, 0x3f, 0x3e, 0x50, 0x7f, + 0x48, 0x9e, 0x70, 0x21, 0xce, 0x43, 0x24, 0x41, 0xaf, 0x01, 0xbc, 0x1e, 0xff, 0xc8, 0x16, 0xd4, + 0x7e, 0xb9, 0x05, 0x02, 0x02, 0xa0, 0x1c, 0xdd, 0x2f, 0x84, 0x1f, 0x8e, 0x7b, 0xc7, 0x63, 0x50, + 0xef, 0x1f, 0x78, 0xc0, 0x60, 0xaf, 0x6e, 0x09, 0x67, 0xf9, 0xf9, 0x63, 0x5d, 0x17, 0x81, 0xce, + 0x72, 0xf0, 0x51, 0xf4, 0x2e, 0x7c, 0xce, 0x96, 0x14, 0x66, 0xf0, 0x72, 0x68, 0x7b, 0x21, 0xaf, + 0x2c, 0x34, 0x2b, 0xc1, 0x74, 0xd6, 0x48, 0x9b, 0xce, 0xd4, 0xf6, 0x29, 0x2e, 0x3e, 0xc3, 0xc9, + 0xe0, 0x00, 0xbf, 0x53, 0x71, 0xd0, 0x0d, 0x5c, 0x4e, 0x11, 0x4a, 0xbc, 0x48, 0x7d, 0x78, 0x38, + 0x34, 0x56, 0xed, 0x95, 0x5c, 0x76, 0xb2, 0x38, 0x0e, 0xba, 0x61, 0x57, 0x65, 0x5a, 0xa9, 0x4c, + 0xbe, 0x8e, 0x03, 0xbd, 0x23, 0xac, 0x34, 0x63, 0x85, 0x8f, 0x0d, 0x05, 0xee, 0x39, 0x2d, 0x93, + 0x0c, 0x75, 0x14, 0xa5, 0xf6, 0x29, 0x1d, 0xca, 0xec, 0x8b, 0xeb, 0x25, 0x3e, 0x90, 0x6c, 0xfc, + 0xef, 0xfc, 0x8e, 0xb1, 0x91, 0x90, 0x73, 0x9a, 0x3c, 0x2a, 0xde, 0x44, 0x4e, 0x6d, 0x3b, 0x6c, + 0xe1, 0x92, 0xcb, 0xa2, 0x8b, 0xf3, 0x9d, 0x58, 0xcb, 0x63, 0x03, 0xa5, 0x58, 0xce, 0x30, 0xa0, + 0x68, 0x4f, 0x7b, 0xb6, 0xca, 0x21, 0x99, 0x24, 0xe0, 0xdd, 0xf1, 0x05, 0x86, 0x02, 0xd0, 0xf8, + 0x92, 0xd3, 0x6e, 0xef, 0xc3, 0x32, 0xae, 0x6a, 0xea, 0x53, 0x08, 0x49, 0xd2, 0xe2, 0xce, 0x9d, + 0x24, 0x65, 0xea, 0xe2, 0xda, 0x96, 0x8f, 0xe9, 0x06, 0xcb, 0x24, 0xc9, 0xad, 0xbd, 0xdb, 0x44, + 0xc7, 0x70, 0xf7, 0x00, 0x22, 0xf6, 0x1c, 0x9e, 0x7e, 0x82, 0x41, 0x7c, 0x7d, 0xbf, 0xb5, 0x44, + 0xca, 0x90, 0xac, 0xdc, 0xf0, 0x25, 0xba, 0x40, 0xa7, 0x51, 0x26, 0x82, 0x92, 0xf9, 0x0d, 0x1c, + 0xbc, 0x49, 0x0b, 0xee, 0x0e, 0xbd, 0x9b, 0x44, 0xa0, 0x81, 0x1e, 0xd1, 0x1e, 0x31, 0x34, 0x42, + 0xff, 0x6b, 0x80, 0x01, 0xa7, 0xa7, 0xdf, 0xb6, 0xde, 0xb5, 0x31, 0x2d, 0x65, 0x33, 0xa8, 0xfe, + 0x1d, 0x22, 0x08, 0x74, 0xd6, 0xfb, 0x5d, 0x91, 0x11, 0xae, 0xa2, 0xb6, 0x3f, 0x23, 0x75, 0x64, + 0x15, 0xbb, 0x67, 0xc8, 0x9c, 0x54, 0x1c, 0x53, 0xc5, 0x84, 0x85, 0x12, 0x8c, 0xee, 0xdb, 0xaa, + 0xf1, 0x15, 0x86, 0xe7, 0x62, 0xf5, 0xe7, 0x90, 0xf4, 0xe1, 0x28, 0x0f, 0x5a, 0x67, 0x5c, 0xd7, + 0xac, 0xa9, 0x27, 0xb0, 0xda, 0x9e, 0xb7, 0xca, 0xd9, 0x80, 0xc8, 0x75, 0x7f, 0x3f, 0xa6, 0xac, + 0xc4, 0x8d, 0xfb, 0xc6, 0xeb, 0x73, 0x82, 0x97, 0x99, 0xb8, 0x07, 0x86, 0x7a, 0x2a, 0x32, 0x99, + 0x3a, 0x7f, 0x81, 0x8c, 0x28, 0xf1, 0xce, 0x99, 0x51, 0x1b, 0xc1, 0x95, 0x4b, 0x46, 0xd0, 0xf2, + 0xa1, 0xc8, 0x5f, 0x4c, 0x9c, 0x12, 0x6e, 0x88, 0x50, 0x5d, 0x80, 0x9b, 0xcf, 0xc2, 0x35, 0xfc, + 0xba, 0xec, 0xcb, 0x9d, 0xae, 0x33, 0xb1, 0xc2, 0xf6, 0xde, 0xdc, 0x93, 0x7f, 0xe6, 0xb6, 0x45, + 0xf6, 0xf1, 0x23, 0x50, 0xd5, 0xf9, 0x5a, 0xaa, 0x2b, 0x88, 0x99, 0x14, 0xfb, 0x2d, 0xef, 0xdc, + 0x80, 0x87, 0x2c, 0x41, 0xbb, 0x4c, 0x86, 0x88, 0x71, 0x7c, 0xae, 0x13, 0xc8, 0x25, 0x8e, 0x7c, + 0x68, 0xa5, 0x3d, 0x5a, 0x54, 0x9a, 0x55, 0x63, 0x9b, 0xa7, 0x7e, 0xce, 0x4d, 0x21, 0x5d, 0x23, + 0x51, 0x6d, 0x89, 0x7a, 0xd4, 0xc9, 0x79, 0x0b, 0xe1, 0x84, 0x4c, 0xc0, 0x07, 0x8d, 0xca, 0x6c, + 0xc1, 0xbe, 0xb0, 0xda, 0x61, 0xb1, 0x15, 0xb7, 0xe8, 0x62, 0x46, 0x50, 0x9a, 0x3a, 0xcc, 0xd8, + 0x59, 0x48, 0x94, 0x15, 0x6b, 0x61, 0x7e, 0x8c, 0x74, 0xdc, 0x9a, 0x39, 0x1c, 0xda, 0x09, 0x90, + 0x39, 0x16, 0xc7, 0x66, 0x75, 0x65, 0xf4, 0x68, 0x2e, 0x0b, 0xe2, 0x45, 0x82, 0x3b, 0x1c, 0x86, + 0x25, 0xf7, 0x07, 0x84, 0x31, 0x1d, 0x5a, 0xbb, 0x7b, 0x0c, 0xa0, 0x0c, 0x9f, 0xc8, 0x65, 0x42, + 0x1a, 0xf8, 0x16, 0xec, 0x5e, 0x52, 0x0a, 0x70, 0xa6, 0x50, 0xe9, 0x17, 0xbb, 0x51, 0xc9, 0xab, + 0x7e, 0x7d, 0xf3, 0xbe, 0xbc, 0xa3, 0xdc, 0x13, 0x6b, 0xc9, 0xce, 0x0d, 0xe1, 0x97, 0xe0, 0x28, + 0x43, 0x09, 0x2d, 0xcb, 0x8e, 0xd8, 0x1d, 0x83, 0x98, 0x8a, 0x72, 0x20, 0x89, 0xaf, 0xb9, 0x80, + 0x71, 0xb8, 0xe4, 0x58, 0x7c, 0xa5, 0x55, 0xfa, 0x74, 0x15, 0x77, 0x9e, 0x2f, 0xa4, 0x5a, 0x67, + 0x44, 0x6f, 0x84, 0x0e, 0x64, 0x5a, 0x0c, 0xf0, 0x50, 0xe7, 0x33, 0x6f, 0x40, 0xc5, 0xc5, 0x04, + 0xd4, 0x75, 0x50, 0x07, 0x9a, 0xb4, 0xef, 0x70, 0x7c, 0x43, 0x40, 0x01, 0xde, 0xea, 0x80, 0x12, + 0x38, 0xe7, 0xea, 0xdb, 0x66, 0x23, 0x3c, 0x47, 0xc2, 0x98, 0x9e, 0x9d, 0xc1, 0x76, 0x47, 0x9d, + 0xe6, 0xc7, 0x4a, 0x62, 0x11, 0xca, 0x53, 0xbe, 0xad, 0x11, 0x39, 0x37, 0x96, 0xf8, 0x59, 0x12, + 0xe2, 0x23, 0xe3, 0xc0, 0x0a, 0xae, 0xac, 0x58, 0x52, 0x8c, 0x46, 0x2a, 0x8b, 0x55, 0xee, 0xed, + 0x8e, 0x97, 0x6e, 0x44, 0xcc, 0x22, 0xa4, 0x7e, 0xbd, 0xcc, 0xee, 0x39, 0x4d, 0x9a, 0x70, 0x38, + 0x88, 0x41, 0x45, 0x38, 0xc3, 0x9c, 0x49, 0xca, 0x83, 0x79, 0x03, 0xe5, 0xca, 0x3a, 0x2d, 0xcb, + 0xab, 0xa7, 0xf7, 0x7f, 0xdf, 0x47, 0x6a, 0x5a, 0x87, 0xf3, 0x61, 0x5b, 0x2f, 0x10, 0x03, 0x28, + 0xd4, 0xba, 0xb0, 0x13, 0xc8, 0x57, 0xef, 0x06, 0xf1, 0x5a, 0x8e, 0xd0, 0xd4, 0xe9, 0x07, 0xc5, + 0x18, 0x71, 0xe7, 0x25, 0x35, 0xe3, 0x33, 0x6c, 0x14, 0xc5, 0xcc, 0xad, 0xaf, 0x50, 0xd3, 0x6d, + 0x61, 0xe8, 0x5b, 0xa9, 0x20, 0xdf, 0x26, 0x07, 0x02, 0xaf, 0xc0, 0xe4, 0xee, 0x16, 0x01, 0x47, + 0x4c, 0x37, 0x8a, 0xce, 0x5b, 0x2e, 0xf9, 0xdd, 0x39, 0xb6, 0xed, 0xb1, 0x54, 0x42, 0x2d, 0x7f, + 0xbb, 0xfe, 0x4b, 0x04, 0x14, 0x89, 0x64, 0x9e, 0x92, 0x6a, 0x18, 0x00, 0xae, 0x98, 0x69, 0x7e, + 0xc3, 0xfd, 0xec, 0xf9, 0x4a, 0xeb, 0x9a, 0x22, 0xe3, 0xca, 0x45, 0xef, 0x1f, 0x78, 0xd6, 0x2d, + 0x53, 0x43, 0x66, 0xe3, 0xa2, 0x1f, 0x78, 0x68, 0xad, 0x0d, 0xb9, 0x85, 0xbe, 0x30, 0x77, 0xde, + 0x33, 0xec, 0xf4, 0x46, 0x6e, 0x31, 0xc0, 0xe0, 0x9a, 0x5d, 0x8e, 0x78, 0xfc, 0xe3, 0x9b, 0x6c, + 0xc5, 0x13, 0xc5, 0x1a, 0x3b, 0x48, 0x63, 0x89, 0xc1, 0x43, 0x25, 0xb6, 0x49, 0x7c, 0x67, 0xfe, + 0xfb, 0xd0, 0xac, 0xc1, 0xc6, 0xf3, 0x43, 0x3e, 0x53, 0xa8, 0xfe, 0x9b, 0xdc, 0x2f, 0xc1, 0x66, + 0xa2, 0x48, 0xd5, 0xcf, 0xdf, 0x8f, 0xd0, 0xd4, 0xb6, 0xfb, 0xe9, 0xcf, 0xa1, 0x65, 0xb2, 0x5e, + 0x70, 0xfd, 0x36, 0xb1, 0x2f, 0x50, 0x8c, 0x96, 0x3c, 0x7d, 0x17, 0x86, 0x77, 0x50, 0x3c, 0xc1, + 0x4a, 0xa0, 0x0f, 0xb5, 0x3d, 0x8f, 0x4c, 0x49, 0x11, 0x12, 0xc9, 0x21, 0x9e, 0xaa, 0xd8, 0x55, + 0x4a, 0x95, 0xf2, 0x00, 0xcc, 0x4e, 0x63, 0x7b, 0xa0, 0x9c, 0x35, 0x94, 0x2b, 0x39, 0xef, 0x9f, + 0xb7, 0xe6, 0xe2, 0xd7, 0xb2, 0xfa, 0xef, 0x01, 0xf9, 0xeb, 0x02, 0xf9, 0xcb, 0x85, 0x9d, 0x51, + 0xdd, 0xfc, 0xbf, 0xfe, 0x48, 0xd5, 0xf2, 0x12, 0x4b, 0x83, 0xbf, 0x90, 0x6a, 0x4e, 0x04, 0x22, + 0x31, 0xf4, 0xb2, 0xf9, 0x77, 0x9b, 0xa6, 0xb9, 0xc2, 0xe5, 0x15, 0xc6, 0x76, 0x58, 0x04, 0xa2, + 0x68, 0xe6, 0xd5, 0x57, 0x19, 0x79, 0x65, 0x1b, 0xa9, 0xf1, 0x95, 0x26, 0x0f, 0x18, 0xc1, 0x85, + 0x78, 0x9e, 0x77, 0x6b, 0x62, 0x83, 0x9a, 0x62, 0xfd, 0x2f, 0xac, 0x22, 0x8d, 0xb6, 0x9e, 0x58, + 0x93, 0xea, 0xe3, 0x84, 0xec, 0xa2, 0x5e, 0x5f, 0xe1, 0xe9, 0x5c, 0x93, 0x18, 0x75, 0x1b, 0x4a, + 0xba, 0x3f, 0x45, 0xbc, 0xf6, 0xc3, 0xcd, 0x87, 0x26, 0xcd, 0x66, 0x42, 0x2c, 0xaf, 0xf3, 0x9d, + 0xba, 0x6a, 0x43, 0xfd, 0x51, 0x3c, 0xe9, 0xb2, 0xd7, 0xf0, 0xa5, 0xb3, 0xd5, 0x35, 0x5b, 0xed, + 0x78, 0x5e, 0x27, 0x86, 0x2d, 0xaa, 0x59, 0xb5, 0x32, 0x65, 0xdc, 0x84, 0x36, 0x1a, 0xb5, 0x3e, + 0x5c, 0x4c, 0x3f, 0xdf, 0x40, 0x0b, 0xe1, 0x8e, 0x42, 0xf0, 0xbc, 0x28, 0xa5, 0xfb, 0x87, 0x74, + 0xb4, 0x72, 0xb7, 0x49, 0x80, 0x2d, 0x50, 0xa1, 0x39, 0x0f, 0xb2, 0xe7, 0x84, 0x7b, 0x30, 0xd8, + 0x4c, 0xc7, 0x91, 0xef, 0xe9, 0x18, 0xa0, 0xee, 0x11, 0x6a, 0xe7, 0x12, 0xbd, 0xfb, 0x18, 0x1a, + 0xef, 0x0d, 0x20, 0x08, 0x75, 0xe4, 0xd0, 0x0a, 0x38, 0xd1, 0xf9, 0xb5, 0x8c, 0xb5, 0x5e, 0xf7, + 0x83, 0x78, 0xba, 0x3f, 0xd6, 0xc3, 0x46, 0x43, 0xc9, 0x96, 0xd3, 0xda, 0x0a, 0xca, 0xc5, 0x9c, + 0x09, 0x71, 0xd3, 0xe6, 0xbc, 0xe8, 0xd2, 0xb0, 0x44, 0xc7, 0xc2, 0xb3, 0x93, 0xfd, 0x0c, 0x31, + 0x80, 0x15, 0x57, 0xee, 0x1b, 0x3c, 0xba, 0x1a, 0x45, 0x0c, 0xc6, 0x58, 0x90, 0xde, 0xca, 0xe1, + 0x00, 0xec, 0x5d, 0x0e, 0xd0, 0x16, 0x5a, 0xef, 0x15, 0x07, 0xb3, 0xec, 0x55, 0x05, 0xd5, 0xb9, + 0x33, 0x48, 0x5c, 0xeb, 0x27, 0x10, 0xad, 0xc4, 0xb6, 0x6f, 0x3b, 0x57, 0x59, 0xe3, 0x05, 0x15, + 0x49, 0x70, 0x16, 0x12, 0x95, 0x18, 0xdf, 0xb6, 0xde, 0xfc, 0xa7, 0xf4, 0x18, 0x3f, 0x79, 0xfb, + 0x72, 0x4f, 0xd1, 0x38, 0x89, 0x92, 0xad, 0x67, 0x50, 0x64, 0x31, 0x6e, 0x2e, 0xcf, 0x49, 0x2b, + 0x77, 0x62, 0x3e, 0x4c, 0x66, 0xe2, 0x80, 0x11, 0xe8, 0x44, 0x05, 0xd8, 0x28, 0xe9, 0x6c, 0x0e, + 0x45, 0x2e, 0x47, 0x78, 0xfd, 0xa1, 0x9b, 0x82, 0x12, 0xe3, 0x62, 0x2d, 0x51, 0x19, 0x76, 0x6b, + 0x0d, 0x3b, 0x4c, 0xb6, 0x3d, 0x82, 0x5b, 0x54, 0x39, 0x7d, 0x3b, 0x3d, 0x09, 0x46, 0x13, 0x94, + 0x44, 0xf5, 0xf9, 0x4c, 0x78, 0x21, 0x1d, 0x36, 0xae, 0xc5, 0x96, 0xc8, 0xa8, 0x03, 0x57, 0x00, + 0x59, 0xe4, 0x3f, 0x4a, 0xe1, 0x46, 0xf3, 0x31, 0x92, 0x3f, 0xb5, 0xde, 0x5c, 0xc6, 0x83, 0x8e, + 0xec, 0x83, 0x41, 0x7e, 0xda, 0x45, 0x7e, 0x28, 0x36, 0x0b, 0x97, 0x3e, 0x1c, 0xad, 0x86, 0xd4, + 0xf4, 0xb0, 0x7c, 0x5b, 0xdd, 0x07, 0xc1, 0x9c, 0x39, 0x68, 0xc9, 0x74, 0x01, 0x52, 0x57, 0x1a, + 0x0a, 0xa9, 0xf4, 0x1f, 0xa6, 0xe8, 0xe3, 0x95, 0xca, 0x83, 0x7c, 0x18, 0x07, 0x86, 0xbf, 0x68, + 0x32, 0x23, 0x97, 0xd8, 0x26, 0x8a, 0x27, 0x56, 0xda, 0x32, 0xa2, 0x85, 0x39, 0xc6, 0xa9, 0xd3, + 0xad, 0x1c, 0xc5, 0x6f, 0x62, 0xf1, 0xd7, 0x39, 0x09, 0xf0, 0x12, 0x81, 0x2a, 0x79, 0xbe, 0x05, + 0xed, 0x76, 0x35, 0x51, 0x4e, 0xef, 0x52, 0x23, 0x29, 0x5f, 0x4e, 0x85, 0x76, 0x9b, 0xcf, 0xe3, + 0x2a, 0x93, 0x87, 0x57, 0xbc, 0x84, 0x94, 0xd1, 0xe4, 0xe9, 0x59, 0x2b, 0x02, 0x25, 0x28, 0xc2, + 0x68, 0xaa, 0xe2, 0x3e, 0x06, 0x97, 0xe1, 0xad, 0x1f, 0x1f, 0x91, 0xb9, 0x87, 0xe4, 0x3f, 0x36, + 0x66, 0xf8, 0x04, 0xb8, 0x1a, 0x81, 0x01, 0x57, 0x26, 0x91, 0x3a, 0xa0, 0x9d, 0x4c, 0x17, 0x61, + 0x5f, 0x65, 0x97, 0x28, 0xb6, 0x39, 0x65, 0x63, 0xf0, 0xc7, 0x25, 0xc7, 0x04, 0x04, 0x71, 0x7c, + 0x2e, 0x54, 0xba, 0x99, 0xa5, 0x91, 0x07, 0x27, 0xeb, 0xbf, 0x95, 0xdf, 0x43, 0xe0, 0xbb, 0x25, + 0x40, 0x82, 0x1e, 0x08, 0x94, 0x25, 0x2f, 0x31, 0xb1, 0xc5, 0x0f, 0xdd, 0xd1, 0xe5, 0xf0, 0x8e, + 0x84, 0x5e, 0xed, 0xd2, 0x9c, 0x01, 0x4e, 0x83, 0x26, 0x0b, 0x9c, 0xab, 0xb0, 0xaa, 0x74, 0x46, + 0xda, 0x74, 0x62, 0x82, 0x49, 0x42, 0x5b, 0x76, 0x48, 0xb9, 0x19, 0x77, 0x27, 0xc2, 0x5c, 0x7c, + 0x5a, 0xde, 0xb8, 0xd9, 0x07, 0xce, 0xf6, 0x1c, 0x35, 0x72, 0x84, 0xa1, 0x8f, 0x64, 0x88, 0xa9, + 0x70, 0xc7, 0x0d, 0x51, 0x11, 0x9e, 0xf1, 0x51, 0x63, 0x25, 0x21, 0xe7, 0x6b, 0x30, 0x98, 0x92, + 0xf3, 0x36, 0xcd, 0x8c, 0x0b, 0xf7, 0x1f, 0xcb, 0xed, 0xa3, 0x8d, 0x49, 0xe4, 0x95, 0x3c, 0xaf, + 0x3c, 0xed, 0xfc, 0x7c, 0xdb, 0x83, 0xee, 0x61, 0xb7, 0x69, 0x9f, 0x42, 0x2b, 0xd7, 0x9f, 0xd8, + 0xb4, 0x9c, 0x22, 0x26, 0xf0, 0x68, 0xdb, 0x0c, 0xcd, 0x9a, 0xf6, 0x29, 0xdb, 0x42, 0x25, 0x61, + 0x52, 0x0e, 0xe5, 0x85, 0x21, 0xe8, 0x1f, 0x54, 0x8d, 0x8f, 0x29, 0x42, 0x87, 0xdf, 0x10, 0x32, + 0x63, 0xf9, 0x1a, 0x94, 0x04, 0xb6, 0xa7, 0x0f, 0xeb, 0xad, 0x11, 0xb4, 0x2b, 0xd3, 0x21, 0xa6, + 0x51, 0xbf, 0x10, 0x8f, 0x38, 0x87, 0x58, 0xf3, 0xa0, 0x41, 0x5a, 0xb7, 0xd3, 0xb5, 0x86, 0x84, + 0xba, 0x4e, 0x8a, 0x57, 0xe1, 0xf9, 0xbf, 0xa4, 0x45, 0x5a, 0x78, 0xcf, 0x0d, 0xf4, 0x17, 0xa3, + 0x58, 0x78, 0x83, 0x50, 0x25, 0x5c, 0x2e, 0x8e, 0x94, 0x97, 0xa9, 0x99, 0x4e, 0xe5, 0xa8, 0xa0, + 0x8a, 0x03, 0x3b, 0x0e, 0x02, 0xd7, 0x54, 0xc0, 0x99, 0xbf, 0xbe, 0xc4, 0x4a, 0xe9, 0x38, 0x20, + 0xc6, 0x28, 0x2d, 0x4c, 0xfe, 0x51, 0xf5, 0x02, 0xb3, 0x5b, 0x64, 0xaa, 0xad, 0x58, 0xf9, 0xe2, + 0x8d, 0x8c, 0xb2, 0x04, 0xe9, 0xb5, 0x60, 0x8e, 0x13, 0x6d, 0xf8, 0x34, 0x86, 0x22, 0xeb, 0x79, + 0x95, 0x1b, 0xe4, 0x8e, 0x0b, 0xd1, 0x7e, 0xdf, 0x76, 0x19, 0x03, 0xf6, 0x73, 0xc2, 0xee, 0x7c, + 0x12, 0xe2, 0x84, 0xdb, 0x8f, 0x38, 0xdb, 0x39, 0xeb, 0x2e, 0xd8, 0xaa, 0x79, 0xde, 0x56, 0xed, + 0x3f, 0xba, 0xa5, 0xd9, 0x76, 0xe6, 0x28, 0x82, 0x47, 0xf9, 0xd2, 0xd3, 0xce, 0x1e, 0x71, 0xe1, + 0xb7, 0x1e, 0x65, 0x04, 0x2b, 0xba, 0x4c, 0x56, 0x42, 0x0b, 0x1c, 0x2a, 0xeb, 0x78, 0x34, 0xfa, + 0x75, 0xcd, 0xd6, 0x57, 0xf6, 0x48, 0x18, 0xe5, 0xe9, 0xad, 0x3c, 0xca, 0x72, 0x49, 0x60, 0xc3, + 0x9d, 0xa4, 0xf7, 0x24, 0x6b, 0xee, 0x96, 0x19, 0xc5, 0xd7, 0xf0, 0x70, 0x5c, 0x11, 0x9b, 0x60, + 0xa8, 0x13, 0x4f, 0x69, 0x79, 0xf9, 0xa9, 0xe2, 0x87, 0x25, 0x87, 0x32, 0x7a, 0xe0, 0xee, 0xe7, + 0x7f, 0xd4, 0x97, 0xe3, 0xd2, 0xab, 0xdc, 0xf8, 0x7f, 0x01, 0xf3, 0x66, 0xcf, 0x85, 0xe0, 0x83, + 0xc5, 0xa3, 0xe1, 0xbf, 0xe2, 0x82, 0xb7, 0x1c, 0x1f, 0x8c, 0x15, 0x32, 0x88, 0x59, 0x76, 0xdf, + 0xa1, 0xc8, 0xf4, 0x05, 0x54, 0x56, 0x6d, 0x72, 0xe6, 0x2d, 0xd6, 0x32, 0x46, 0xfc, 0x64, 0xa7, + 0x40, 0xcb, 0xb5, 0x19, 0x0f, 0x72, 0x33, 0x71, 0xa1, 0x1c, 0x56, 0xfd, 0x2b, 0x3b, 0x4e, 0x8e, + 0xcb, 0x76, 0x0b, 0xc5, 0x97, 0x45, 0x83, 0xfd, 0x27, 0x3b, 0x78, 0xa7, 0x25, 0x2a, 0x6a, 0xab, + 0x9d, 0x74, 0xe3, 0xf4, 0xbf, 0xa6, 0xcc, 0xa3, 0x54, 0x5c, 0xa1, 0xb2, 0x5f, 0xd5, 0xe5, 0x96, + 0xd8, 0x5b, 0xfc, 0x43, 0x42, 0x95, 0xcd, 0xc4, 0x53, 0x93, 0xc9, 0x75, 0xde, 0x8b, 0xbc, 0x5d, + 0x3d, 0x24, 0x9e, 0x63, 0x77, 0xee, 0x59, 0x70, 0xbc, 0xf9, 0xf6, 0x27, 0xec, 0x9a, 0x62, 0xb3, + 0xd0, 0x76, 0x0e, 0x87, 0x7e, 0x0a, 0xee, 0x85, 0x71, 0x69, 0x7d, 0x60, 0x50, 0x07, 0x32, 0x21, + 0x17, 0x2f, 0xb1, 0xe8, 0x12, 0x05, 0x94, 0xf4, 0xce, 0x28, 0x8d, 0xfc, 0xaa, 0x08, 0x78, 0x49, + 0x68, 0xeb, 0x48, 0x4b, 0x55, 0x8d, 0x3f, 0x5c, 0xc7, 0x72, 0xaf, 0x32, 0x6a, 0x43, 0x5f, 0x3a, + 0xf9, 0x28, 0x68, 0xe9, 0x19, 0x9d, 0xd4, 0x61, 0xf9, 0xbe, 0x63, 0x1a, 0x70, 0xd8, 0xa2, 0x38, + 0xd7, 0x59, 0xe7, 0x23, 0xee, 0x00, 0x3e, 0xf2, 0xbf, 0x95, 0x7b, 0x7f, 0x04, 0x04, 0x12, 0x07, + 0x85, 0x80, 0x8c, 0x6d, 0x30, 0xc5, 0x17, 0x5a, 0xff, 0xa4, 0xc3, 0xc0, 0x04, 0xd9, 0x57, 0x10, + 0x04, 0x1d, 0xac, 0x74, 0xad, 0x7a, 0xc4, 0x0e, 0x43, 0xb4, 0x75, 0x9f, 0xe7, 0x90, 0xf7, 0x7b, + 0xd2, 0x92, 0x89, 0x1f, 0xba, 0x3a, 0x1b, 0xf1, 0x7c, 0x25, 0xbf, 0x52, 0xd1, 0x18, 0x5c, 0x4d, + 0x62, 0x49, 0xc8, 0x9f, 0x1a, 0xee, 0xca, 0x8f, 0x0b, 0x90, 0xa1, 0x56, 0x0d, 0x3f, 0xc3, 0xd9, + 0x27, 0x2d, 0xd9, 0xe6, 0xe6, 0x5b, 0xe9, 0x19, 0x85, 0x96, 0x9b, 0xe7, 0xb0, 0x72, 0x46, 0x6a, + 0x86, 0xcf, 0x2c, 0x31, 0x6e, 0x93, 0xc3, 0x5c, 0x1c, 0x06, 0x33, 0x9f, 0xd1, 0xdc, 0x87, 0x4d, + 0x0c, 0xa2, 0x4c, 0x3e, 0x7b, 0x32, 0x5b, 0xfd, 0x56, 0x15, 0x5a, 0xdd, 0x45, 0xc2, 0x86, 0x43, + 0x3c, 0x12, 0x4f, 0x19, 0x44, 0x6d, 0xb5, 0x87, 0xe9, 0x30, 0xd2, 0x94, 0x20, 0xa1, 0x90, 0x25, + 0x5e, 0xc7, 0xa5, 0xe7, 0x67, 0x35, 0xba, 0xaa, 0x6e, 0x9e, 0xa7, 0x76, 0xec, 0x84, 0x05, 0xee, + 0xc9, 0x85, 0xa8, 0x5b, 0x7e, 0xf7, 0x9b, 0xe5, 0x81, 0xef, 0x1c, 0xd6, 0x83, 0x5c, 0x05, 0x7e, + 0x24, 0x5d, 0x5b, 0xe5, 0xcc, 0x88, 0xc9, 0x95, 0x73, 0xf9, 0xac, 0x1e, 0xf2, 0xd3, 0xe5, 0x1f, + 0x8e, 0x82, 0x83, 0x37, 0xed, 0xe6, 0xa0, 0x19, 0xee, 0x8f, 0x67, 0x02, 0x89, 0x7c, 0x5c, 0x4b, + 0x59, 0x8a, 0xe8, 0x00, 0xcd, 0x6e, 0xa2, 0x2b, 0x68, 0x87, 0xbe, 0xc7, 0xe0, 0xc5, 0x32, 0xf6, + 0xa1, 0xa0, 0xa5, 0xee, 0x79, 0x4a, 0x4b, 0x8e, 0x6a, 0x64, 0xe7, 0xfb, 0x69, 0x3d, 0x49, 0x13, + 0xe2, 0x69, 0xbe, 0xfe, 0x79, 0x5d, 0xcf, 0xc2, 0xbb, 0x1d, 0x58, 0x4e, 0x05, 0xa1, 0x06, 0x4e, + 0x1a, 0x46, 0x56, 0x70, 0x52, 0x54, 0xcb, 0x9f, 0x2d, 0x90, 0xc8, 0xbd, 0x5c, 0xaf, 0xc3, 0xef, + 0x5d, 0x07, 0x64, 0xe2, 0x32, 0xf3, 0xdc, 0xb7, 0xee, 0xda, 0x22, 0x96, 0x73, 0x82, 0xe3, 0xd7, + 0x18, 0x43, 0xc9, 0x0c, 0xad, 0x11, 0x4e, 0xdd, 0x9c, 0x04, 0xcb, 0xb7, 0x8c, 0x7a, 0x25, 0xa6, + 0x3f, 0x93, 0x61, 0x32, 0x4d, 0xd5, 0x93, 0x6c, 0x45, 0x8a, 0x5c, 0xf4, 0x2a, 0x3a, 0x6e, 0x47, + 0xd5, 0x88, 0x44, 0x18, 0xf1, 0x7e, 0x97, 0x31, 0x83, 0xc3, 0x01, 0x55, 0x42, 0xee, 0x81, 0x7a, + 0xee, 0x59, 0x82, 0xc3, 0x48, 0x38, 0x5d, 0x30, 0xd3, 0x04, 0x64, 0x14, 0x9c, 0x7a, 0x47, 0xa0, + 0xa8, 0x33, 0x3a, 0x2e, 0x3e, 0x81, 0xf6, 0xeb, 0x59, 0x2e, 0xc8, 0x7b, 0x73, 0xd3, 0xa8, 0x7c, + 0x2e, 0xb2, 0x26, 0x46, 0x17, 0x8e, 0xad, 0xd1, 0x27, 0x77, 0xd7, 0x77, 0x77, 0x81, 0x9b, 0x35, + 0x1a, 0xf5, 0x4f, 0xe8, 0x0c, 0x38, 0x55, 0x32, 0x30, 0x7c, 0x52, 0x89, 0xc9, 0x22, 0x19, 0x43, + 0x5d, 0x7b, 0xff, 0x56, 0xda, 0x60, 0xe2, 0xbb, 0xfd, 0xc4, 0xd5, 0x80, 0x3f, 0x66, 0x04, 0x83, + 0xaa, 0x81, 0x3c, 0xd8, 0x8a, 0xc8, 0x30, 0x9a, 0x6e, 0xcc, 0xa4, 0x5f, 0xec, 0x95, 0xed, 0x61, + 0x91, 0x1f, 0xc0, 0x50, 0xde, 0x2b, 0xdd, 0xd8, 0x92, 0xda, 0xe3, 0x6e, 0x56, 0x6e, 0xd0, 0xc0, + 0xb8, 0xd0, 0x26, 0xac, 0x1d, 0xe7, 0x6e, 0x13, 0x51, 0x4b, 0xce, 0x13, 0x83, 0x59, 0x57, 0xbb, + 0xc7, 0x62, 0x76, 0x29, 0x0d, 0x66, 0x26, 0xe4, 0x61, 0xe7, 0xbd, 0x80, 0xc7, 0x55, 0x9d, 0x41, + 0xd6, 0xdc, 0x22, 0x0b, 0x26, 0xbe, 0x2e, 0xd6, 0x7c, 0x65, 0xd2, 0xf4, 0x4e, 0x09, 0xdb, 0x27, + 0x27, 0x10, 0xc3, 0x41, 0xc6, 0x6a, 0xae, 0xf4, 0xa4, 0xdb, 0x77, 0x8a, 0x0d, 0x6f, 0xb3, 0xeb, + 0x8d, 0x83, 0xdd, 0x68, 0x35, 0x0b, 0x7b, 0x14, 0x3d, 0xc3, 0x74, 0x20, 0xbc, 0xcf, 0xc4, 0x6a, + 0x6c, 0xbd, 0xf3, 0x7e, 0x89, 0xef, 0x2a, 0xc0, 0x84, 0xcd, 0x9d, 0xaf, 0x16, 0x45, 0x38, 0xd8, + 0x2c, 0x9e, 0x7c, 0xd3, 0xcd, 0x25, 0x1f, 0x39, 0x7e, 0x5f, 0x42, 0xaf, 0x79, 0x90, 0x31, 0xce, + 0xaa, 0xae, 0x58, 0x99, 0x80, 0x67, 0x32, 0x4f, 0x14, 0x99, 0x13, 0x14, 0xaa, 0xab, 0x9e, 0x6b, + 0x5a, 0xd4, 0xef, 0x25, 0x5a, 0xf9, 0xdd, 0x67, 0xa6, 0x91, 0xf6, 0xef, 0xa3, 0xf2, 0xf0, 0x9f, + 0x6e, 0x6c, 0x60, 0xfb, 0xe8, 0x34, 0xa6, 0xaa, 0x10, 0x34, 0x00, 0x0b, 0xc5, 0x85, 0x33, 0x21, + 0xba, 0x9a, 0xab, 0xe4, 0x98, 0xb2, 0x87, 0x4a, 0x53, 0x76, 0x28, 0x0c, 0xce, 0x82, 0x57, 0x84, + 0xe8, 0x59, 0x37, 0xd5, 0xe9, 0x14, 0xcd, 0xc2, 0xb7, 0x1c, 0xbc, 0xfa, 0xb9, 0x26, 0x1a, 0x91, + 0x34, 0x2e, 0xb1, 0xf4, 0x34, 0xb4, 0x9a, 0x10, 0xbf, 0x9d, 0x98, 0xe6, 0x68, 0x92, 0xd3, 0x70, + 0xd7, 0x29, 0x0a, 0x8e, 0xed, 0xe6, 0x7b, 0x11, 0x76, 0x12, 0xe8, 0x6f, 0x47, 0x59, 0x83, 0x12, + 0x0c, 0x1e, 0xb7, 0x75, 0xbd, 0x78, 0xaa, 0x45, 0x5c, 0x45, 0x8a, 0xea, 0x7f, 0x4f, 0xef, 0x92, + 0x5f, 0x59, 0x03, 0x60, 0x48, 0x3d, 0x73, 0x51, 0xcd, 0x96, 0x97, 0xe0, 0x59, 0x72, 0x96, 0xf5, + 0xcb, 0xf2, 0x2a, 0xe5, 0x66, 0x06, 0x44, 0x4a, 0x65, 0x37, 0x33, 0xb5, 0x97, 0x85, 0xb5, 0x08, + 0x80, 0x79, 0x1c, 0x45, 0x5b, 0x7b, 0x03, 0xa7, 0x01, 0xa5, 0xe6, 0xe2, 0xb2, 0x75, 0x3a, 0x99, + 0x27, 0xc5, 0x85, 0x3e, 0x7c, 0x79, 0xc9, 0xd0, 0x7b, 0x8c, 0x22, 0x9c, 0x97, 0x49, 0x15, 0x8d, + 0x3d, 0x34, 0xa4, 0xf3, 0x97, 0x55, 0xe1, 0xd3, 0x93, 0xa0, 0xa1, 0xae, 0xbc, 0xe6, 0xfc, 0xf6, + 0xce, 0x5f, 0x52, 0xc7, 0xa1, 0xd3, 0xb1, 0x16, 0xcc, 0x0b, 0xac, 0x4a, 0x58, 0x25, 0xc8, 0xc0, + 0x56, 0x49, 0x25, 0x2a, 0x0a, 0x51, 0x2d, 0x8e, 0x64, 0x9b, 0xec, 0x9e, 0x66, 0x69, 0xb1, 0xfb, + 0x62, 0x1d, 0xad, 0xfa, 0xa9, 0xd6, 0x1c, 0x5e, 0xc5, 0x8e, 0x1c, 0xb6, 0x65, 0xd0, 0x0d, 0x0a, + 0x29, 0xce, 0xa8, 0xb3, 0x35, 0x71, 0x0e, 0x58, 0x5d, 0xb0, 0x9a, 0xde, 0x05, 0x6e, 0xa3, 0xcd, + 0xd4, 0x62, 0xb6, 0xb8, 0x68, 0x83, 0x6f, 0x28, 0x13, 0x90, 0x7c, 0xd2, 0x5d, 0x3f, 0x39, 0x3e, + 0x74, 0x00, 0xf2, 0x2e, 0xe9, 0xb8, 0xf7, 0xf7, 0xf2, 0x63, 0xba, 0xb6, 0x95, 0x86, 0x87, 0xcf, + 0x7f, 0xae, 0x13, 0x68, 0xf4, 0xa9, 0x79, 0x99, 0xbe, 0x2b, 0x8a, 0xc8, 0x5c, 0xc4, 0xbf, 0x20, + 0x47, 0x55, 0xe7, 0xaa, 0x73, 0xc6, 0x69, 0xa7, 0x24, 0x99, 0x21, 0x2f, 0x93, 0x85, 0x7f, 0x44, + 0x64, 0xa8, 0x2d, 0x72, 0x13, 0x55, 0xa1, 0x98, 0x6c, 0xe7, 0x7f, 0xa6, 0x85, 0xf4, 0xdd, 0x98, + 0x2a, 0x2e, 0x83, 0xe5, 0x72, 0x9e, 0x1b, 0xa8, 0xc0, 0xad, 0x2c, 0xd6, 0x1f, 0xd7, 0x21, 0x40, + 0x2c, 0x94, 0xc8, 0x78, 0x94, 0x24, 0xba, 0x6a, 0xab, 0xb0, 0xb2, 0x7c, 0x84, 0xb0, 0x93, 0x63, + 0xba, 0x68, 0x3b, 0xfd, 0xb8, 0x2b, 0x25, 0x73, 0xce, 0x5c, 0x6b, 0xb9, 0x85, 0x38, 0x44, 0x1f, + 0xd0, 0x30, 0x89, 0x3c, 0x79, 0x07, 0xbd, 0xb1, 0xa8, 0x89, 0x02, 0x8d, 0x88, 0xe1, 0xd0, 0x62, + 0x6d, 0xcd, 0xdd, 0x6b, 0xba, 0x9e, 0x52, 0xa4, 0xed, 0x30, 0x7e, 0xa6, 0x36, 0xb0, 0x5e, 0x3f, + 0x34, 0x39, 0x4c, 0xc9, 0x99, 0x09, 0x65, 0x31, 0x65, 0xb1, 0x82, 0x37, 0xef, 0x6c, 0x30, 0xc1, + 0x88, 0xe7, 0xbc, 0x8a, 0x01, 0xf1, 0x4a, 0x07, 0xe5, 0x92, 0x14, 0x5f, 0x9d, 0xbf, 0x4b, 0xf1, + 0x95, 0x52, 0x99, 0x5d, 0x4c, 0xa7, 0x83, 0x58, 0xbc, 0x14, 0x89, 0x90, 0x87, 0x18, 0xb0, 0x57, + 0x16, 0x70, 0x78, 0xca, 0x0c, 0xb2, 0x0a, 0x81, 0xf5, 0x82, 0x9e, 0xe6, 0x36, 0x1d, 0xa0, 0x34, + 0x04, 0x7b, 0x23, 0xc9, 0x5e, 0xd7, 0x71, 0x6d, 0x48, 0x60, 0x9d, 0x08, 0x58, 0x5f, 0x46, 0xed, + 0xa9, 0x17, 0xf8, 0x8d, 0xf9, 0x52, 0xc3, 0x74, 0xdf, 0x71, 0x21, 0xc2, 0x0b, 0x7c, 0xa4, 0x7d, + 0x96, 0xd2, 0x52, 0x6c, 0xb9, 0x33, 0x72, 0x8b, 0x1e, 0x07, 0x37, 0xa2, 0x79, 0x37, 0xa2, 0xfd, + 0xd1, 0x74, 0xa9, 0xef, 0x6e, 0xc9, 0xbc, 0x1f, 0x6c, 0x26, 0xa3, 0xa0, 0xda, 0x3c, 0x53, 0x7a, + 0x0d, 0xb1, 0x5d, 0xfc, 0x3b, 0x8c, 0x5f, 0xaa, 0xf0, 0xa8, 0x5e, 0x92, 0x3e, 0xa2, 0x80, 0xf4, + 0x50, 0xd6, 0x9d, 0x16, 0xa4, 0xdc, 0x3c, 0x8b, 0x4b, 0xeb, 0x7c, 0xb4, 0x0b, 0x00, 0x8a, 0x76, + 0x55, 0x24, 0xcf, 0xec, 0x39, 0xbb, 0xd8, 0x8f, 0xc0, 0x43, 0xf6, 0x44, 0xeb, 0x01, 0x1d, 0x6f, + 0x26, 0x38, 0xd1, 0x6a, 0xb6, 0xbf, 0xa2, 0xac, 0x51, 0x31, 0x90, 0x85, 0x1e, 0xf5, 0xc8, 0x00, + 0xff, 0x68, 0x33, 0x9f, 0x28, 0xcc, 0x55, 0x74, 0x51, 0xf3, 0x54, 0xd6, 0x4e, 0xc1, 0x43, 0xc5, + 0x28, 0x00, 0x7f, 0xd6, 0xd3, 0x3f, 0xe9, 0xd4, 0x5f, 0x47, 0x19, 0xe2, 0xad, 0x50, 0xe3, 0xd8, + 0xa3, 0x1d, 0xa1, 0x1b, 0x70, 0x30, 0x10, 0xaa, 0x1a, 0x78, 0xd6, 0x11, 0x26, 0x03, 0xac, 0xbf, + 0xd2, 0xeb, 0x62, 0x9e, 0x61, 0xa1, 0x5c, 0x98, 0x41, 0x39, 0x61, 0x5b, 0x5b, 0x18, 0xbf, 0x57, + 0x67, 0x6e, 0x03, 0x77, 0x42, 0x3a, 0x1f, 0xfd, 0x72, 0xd6, 0x1c, 0x8d, 0xc2, 0xf0, 0x28, 0xea, + 0xff, 0x68, 0x97, 0xde, 0x5e, 0xfd, 0xc4, 0xe2, 0x5f, 0xc5, 0x32, 0x6e, 0xc0, 0x33, 0xbf, 0x2d, + 0x06, 0x3e, 0x78, 0x58, 0x63, 0xf2, 0x07, 0x46, 0x43, 0x34, 0xb2, 0x5f, 0xe0, 0xd8, 0x8c, 0xe6, + 0x1a, 0x6d, 0x33, 0x0e, 0x01, 0x65, 0xae, 0x1c, 0x86, 0x64, 0x99, 0xc4, 0x2c, 0x5d, 0x49, 0x82, + 0xd8, 0x1e, 0x1b, 0x41, 0xbe, 0xbf, 0x41, 0x66, 0x1d, 0x6b, 0xc6, 0x7a, 0xcc, 0xfa, 0xe8, 0xed, + 0x23, 0x42, 0x97, 0xcd, 0x7b, 0xee, 0x93, 0x12, 0xa7, 0x81, 0x3c, 0x65, 0xb9, 0x9a, 0xd3, 0x8d, + 0x61, 0x48, 0x02, 0x98, 0xb4, 0x8c, 0x35, 0x27, 0xd4, 0x02, 0x3b, 0x29, 0xbc, 0x62, 0x5e, 0xdb, + 0x5a, 0xc3, 0x5a, 0x04, 0x51, 0x6d, 0xe3, 0x95, 0x72, 0x45, 0x91, 0xea, 0x73, 0xef, 0xd5, 0x4c, + 0x41, 0x09, 0x17, 0x3b, 0x11, 0xad, 0xc6, 0x78, 0x5a, 0x18, 0xe2, 0x4a, 0x41, 0xbd, 0xd6, 0xcc, + 0x26, 0xa3, 0xad, 0xfc, 0x80, 0xdd, 0xa5, 0x4b, 0x4b, 0x51, 0x3e, 0xb9, 0x13, 0x3d, 0x99, 0xda, + 0x30, 0xe4, 0xe5, 0x62, 0x3a, 0xfb, 0x14, 0x8d, 0x8f, 0x56, 0xe9, 0x37, 0xca, 0xd4, 0x76, 0xb5, + 0x0a, 0x6d, 0x43, 0x94, 0x63, 0xbc, 0x80, 0x81, 0x8f, 0x34, 0xa7, 0x21, 0x38, 0xd0, 0x2f, 0x93, + 0x2e, 0x67, 0x9b, 0x4c, 0xed, 0xae, 0xcb, 0x13, 0xe1, 0xf8, 0x3b, 0x53, 0xdb, 0x50, 0xb1, 0x87, + 0xd8, 0x69, 0x17, 0x36, 0x38, 0x8a, 0xdf, 0x30, 0xbe, 0xd9, 0x6f, 0x85, 0x71, 0xe7, 0x3d, 0xd3, + 0xc8, 0x70, 0x5e, 0x25, 0xf0, 0x34, 0x87, 0xef, 0x1c, 0x3e, 0x80, 0x6c, 0xe1, 0x0c, 0xd8, 0x63, + 0xbc, 0xf0, 0x9d, 0x3b, 0x61, 0x5c, 0xc7, 0xce, 0xfd, 0xec, 0x86, 0x96, 0xb5, 0xfb, 0xef, 0xf8, + 0x44, 0x85, 0x56, 0x2f, 0xa6, 0xf1, 0x29, 0x8d, 0x5a, 0x44, 0xe3, 0xe0, 0x86, 0xdd, 0xa7, 0x25, + 0xd7, 0x20, 0xd1, 0xf2, 0x66, 0xd8, 0x1f, 0xb3, 0x81, 0x35, 0x7e, 0xfb, 0xbe, 0x95, 0x28, 0xce, + 0x4c, 0xde, 0x95, 0xd8, 0x1b, 0xdc, 0xa9, 0x2b, 0xd2, 0x8a, 0x7e, 0x67, 0x90, 0xbe, 0xb8, 0x49, + 0x94, 0xf3, 0xa4, 0xd0, 0x7d, 0x13, 0xe7, 0xbe, 0x34, 0xfa, 0x26, 0x71, 0x3d, 0x47, 0x2e, 0x9b, + 0x0c, 0x78, 0xfc, 0x45, 0x05, 0xc1, 0x2f, 0x8a, 0xa4, 0xbf, 0xb9, 0x38, 0xc9, 0xa6, 0x5a, 0x81, + 0x49, 0x3c, 0xc5, 0x3a, 0x11, 0x41, 0xc0, 0x12, 0xfb, 0x2c, 0xe1, 0x16, 0xfa, 0x0e, 0xfb, 0xa4, + 0xd8, 0x3e, 0x6b, 0xd4, 0x79, 0x47, 0xc5, 0x29, 0xfe, 0xbb, 0x63, 0x4b, 0x8c, 0x4f, 0xb3, 0x19, + 0x02, 0x30, 0x99, 0x21, 0xcf, 0x94, 0x82, 0xf1, 0x20, 0x4c, 0x00, 0x2c, 0xcb, 0x44, 0x3a, 0x36, + 0x2a, 0x86, 0xa4, 0x64, 0x93, 0x00, 0x02, 0xce, 0xf3, 0x10, 0x32, 0x9f, 0xed, 0xa5, 0x9e, 0x20, + 0x83, 0x60, 0x8e, 0xa2, 0xb0, 0xb2, 0x87, 0x3b, 0xce, 0x87, 0xba, 0x82, 0xc8, 0x99, 0xb3, 0x2b, + 0xf3, 0xe4, 0xb4, 0x98, 0xfa, 0x09, 0x6d, 0x6e, 0xbd, 0x14, 0x6f, 0xb9, 0xe6, 0xbb, 0xe4, 0x16, + 0xa2, 0x91, 0xbb, 0xf1, 0x74, 0xe4, 0xaa, 0x41, 0x9f, 0x8d, 0x06, 0x3c, 0x58, 0xca, 0xdb, 0x59, + 0x08, 0x94, 0x86, 0x01, 0x68, 0xe9, 0x99, 0x9f, 0xb6, 0x23, 0x29, 0x5b, 0xef, 0xad, 0xf3, 0xe6, + 0x96, 0x0e, 0x2b, 0x2c, 0x88, 0x6f, 0x59, 0x39, 0x6f, 0x38, 0x4b, 0x80, 0xa5, 0xc3, 0x54, 0x66, + 0x62, 0x59, 0x69, 0xce, 0x85, 0xf1, 0x08, 0xff, 0xdb, 0x7f, 0x5a, 0x86, 0x95, 0x0c, 0x0b, 0x46, + 0x0c, 0xaf, 0x8c, 0x2e, 0xc2, 0x03, 0x04, 0x8f, 0x1c, 0x0f, 0xe1, 0xb1, 0x0d, 0x57, 0xd8, 0xf5, + 0x6d, 0x5d, 0xfa, 0x78, 0xb1, 0xfa, 0x2a, 0xfe, 0x80, 0xce, 0xaf, 0x2e, 0x84, 0x1c, 0x50, 0x49, + 0x0a, 0x5f, 0x78, 0xd7, 0xc8, 0x1c, 0xbd, 0x44, 0x90, 0xf9, 0xf7, 0xe9, 0x25, 0x4e, 0x47, 0x7e, + 0xce, 0xe3, 0x44, 0x32, 0x47, 0xb5, 0x14, 0x1f, 0xa6, 0x4b, 0xfd, 0xee, 0xe0, 0xef, 0x08, 0x23, + 0xa1, 0x35, 0x97, 0xd6, 0xf0, 0x76, 0xac, 0xa4, 0xb9, 0x92, 0x18, 0x44, 0x33, 0x06, 0xd6, 0x22, + 0x0a, 0x98, 0x78, 0xd6, 0x5d, 0x25, 0x60, 0x26, 0x47, 0xfb, 0x58, 0xcc, 0xb4, 0x4b, 0xbd, 0xf1, + 0xe7, 0xee, 0xdd, 0x6b, 0x84, 0xaa, 0xe9, 0xd5, 0xc4, 0xe5, 0x38, 0x66, 0x0d, 0xbf, 0x6a, 0xa9, + 0x38, 0x15, 0x19, 0x4b, 0xc4, 0x23, 0xf1, 0xff, 0x80, 0xa7, 0x9c, 0x30, 0xc5, 0x0b, 0x76, 0x87, + 0x4e, 0x06, 0x15, 0x81, 0xa7, 0xa4, 0xef, 0xa5, 0xb9, 0x47, 0xcc, 0x7d, 0xe0, 0x2d, 0xf5, 0x4c, + 0xcc, 0xe6, 0x20, 0x08, 0x23, 0x36, 0xd6, 0x63, 0x97, 0x52, 0x31, 0xe9, 0xb5, 0xae, 0x1b, 0x04, + 0x53, 0x09, 0x62, 0xe5, 0x04, 0x5d, 0xad, 0x42, 0x33, 0x31, 0x93, 0xe2, 0x32, 0x47, 0x2c, 0x07, + 0x1a, 0x4b, 0xc3, 0x01, 0x09, 0xe2, 0x26, 0x6f, 0xc4, 0xf2, 0xcf, 0x1c, 0x9c, 0x0b, 0x93, 0x71, + 0x3f, 0x81, 0xde, 0x0a, 0x9b, 0xe9, 0xf5, 0x71, 0x48, 0x03, 0xd2, 0xe5, 0xb5, 0x9b, 0xab, 0x3c, + 0x7e, 0x78, 0xd4, 0x63, 0x7f, 0xc7, 0xfc, 0x39, 0x7d, 0xab, 0x81, 0x9f, 0x3e, 0x34, 0x52, 0x39, + 0xd6, 0x79, 0x67, 0xaa, 0x3b, 0x0d, 0x7c, 0x6c, 0xaa, 0xc8, 0xc6, 0x51, 0xc5, 0x85, 0x58, 0xf2, + 0x06, 0x67, 0xf4, 0x15, 0x3e, 0xb2, 0xa2, 0xc9, 0xd5, 0x44, 0x16, 0x1a, 0x3f, 0xd3, 0x0e, 0xc0, + 0x6c, 0x6a, 0x95, 0xfe, 0x06, 0x62, 0x91, 0x8d, 0xb8, 0xe9, 0xf9, 0x5b, 0x3a, 0x85, 0x99, 0x8a, + 0x7c, 0x97, 0x82, 0x90, 0xc6, 0xdf, 0xaf, 0x4e, 0x0d, 0x02, 0x73, 0x3b, 0x7c, 0x48, 0x1d, 0x34, + 0xdb, 0x33, 0x34, 0xc6, 0x38, 0x67, 0xee, 0x0d, 0xe3, 0x1d, 0xb6, 0x60, 0x33, 0xa3, 0x18, 0xb0, + 0x9a, 0xff, 0x79, 0x11, 0x17, 0x21, 0x1a, 0xc0, 0xb8, 0xd6, 0x31, 0x6b, 0x06, 0x93, 0x7a, 0x80, + 0x86, 0x2d, 0xd8, 0xc7, 0xcc, 0x36, 0xfc, 0x52, 0x1f, 0x54, 0x25, 0x8c, 0xf3, 0xb9, 0x7a, 0x36, + 0x44, 0x53, 0xba, 0x47, 0x2e, 0xbe, 0x86, 0x93, 0x0b, 0xb3, 0x69, 0x5e, 0xa0, 0x22, 0x39, 0x1f, + 0x9e, 0x90, 0x2a, 0xa1, 0x1a, 0x5e, 0x39, 0x32, 0x9f, 0x4a, 0x35, 0x2f, 0x7e, 0x81, 0xd2, 0xfe, + 0x27, 0x0b, 0xa7, 0xf7, 0x78, 0x52, 0xde, 0xe5, 0x52, 0x95, 0x61, 0x0d, 0x40, 0x9a, 0xae, 0x0d, + 0xbb, 0x0d, 0x03, 0x2b, 0x64, 0xd3, 0x0d, 0x73, 0x9b, 0x32, 0x19, 0xdd, 0xff, 0x32, 0x01, 0x0e, + 0xe3, 0xf8, 0x22, 0x7c, 0x61, 0x7c, 0x04, 0x9d, 0xc4, 0x6b, 0x09, 0xda, 0x0e, 0xf4, 0xa6, 0x64, + 0x62, 0x5d, 0xd4, 0xb9, 0xb5, 0x8d, 0xc0, 0x06, 0x56, 0x64, 0xd0, 0x02, 0xc1, 0xcd, 0x35, 0x9c, + 0x76, 0x85, 0xb6, 0x19, 0x38, 0x2c, 0x90, 0x94, 0x05, 0x38, 0x58, 0x50, 0xdb, 0xa0, 0x53, 0x8d, + 0x9e, 0x5f, 0x3b, 0x38, 0x86, 0x88, 0xd6, 0x4d, 0xd9, 0xb8, 0x94, 0x95, 0x7f, 0x86, 0x73, 0x35, + 0xd9, 0x73, 0x6f, 0xb2, 0x64, 0x2d, 0x18, 0x54, 0x18, 0xea, 0x23, 0x2a, 0x37, 0xad, 0xa2, 0x69, + 0x0a, 0xe7, 0x1c, 0x08, 0xd1, 0x7c, 0xfc, 0x26, 0xad, 0xc6, 0x6d, 0x68, 0x56, 0xb4, 0xcc, 0xc0, + 0x50, 0xb7, 0xd3, 0xe3, 0xf1, 0x72, 0x2f, 0xde, 0x97, 0x0f, 0x28, 0x51, 0x4b, 0x50, 0xa9, 0xe3, + 0x95, 0xbd, 0xea, 0x89, 0xd5, 0x4d, 0x64, 0x61, 0x31, 0x93, 0xc4, 0x72, 0x15, 0x7e, 0xfd, 0x06, + 0x88, 0x2a, 0xd2, 0x7c, 0x10, 0xc6, 0x11, 0xb2, 0x70, 0x1d, 0x39, 0x46, 0x71, 0x46, 0x46, 0x3c, + 0x87, 0x74, 0x5c, 0xab, 0xd5, 0xac, 0xa3, 0x8c, 0xda, 0x4b, 0x8f, 0xc8, 0xa4, 0x26, 0xbc, 0xd1, + 0xab, 0x0f, 0x4c, 0x0b, 0xbb, 0x83, 0xdb, 0xe8, 0x2b, 0x0e, 0xc7, 0x88, 0x31, 0x5c, 0x39, 0xdb, + 0x0f, 0xf7, 0x91, 0xbe, 0xd3, 0xd8, 0xe9, 0x0a, 0x12, 0xc9, 0x96, 0xeb, 0xee, 0xe1, 0x1c, 0xf7, + 0x2f, 0xcb, 0x9b, 0x5d, 0x21, 0x0e, 0x17, 0x09, 0xcc, 0x2a, 0x68, 0xac, 0x8a, 0x56, 0xc0, 0xe5, + 0xd5, 0x55, 0xf1, 0x5d, 0x34, 0xee, 0xd9, 0xf4, 0x17, 0xaa, 0xbb, 0x38, 0x6c, 0x7e, 0x36, 0x04, + 0xf2, 0xea, 0xb8, 0x81, 0xf3, 0xc3, 0x67, 0x7b, 0xe8, 0x6a, 0x1c, 0x49, 0xee, 0x82, 0x3b, 0x11, + 0x32, 0xbc, 0xf4, 0xbb, 0x07, 0x9e, 0x87, 0xa7, 0x40, 0xc9, 0x34, 0x6f, 0x0d, 0xd4, 0x5b, 0xa5, + 0xa8, 0x9d, 0x32, 0xc1, 0x2b, 0x48, 0xe8, 0xaf, 0xd4, 0xdb, 0x16, 0x7a, 0xec, 0xf2, 0x2e, 0x22, + 0x55, 0xb0, 0x15, 0xeb, 0x42, 0x95, 0x0d, 0x6a, 0x70, 0x38, 0xf0, 0x08, 0xbf, 0x72, 0x13, 0x16, + 0xfd, 0xbc, 0x03, 0xda, 0x50, 0x71, 0xc1, 0x01, 0xb2, 0x12, 0x21, 0xe2, 0x47, 0x13, 0x6e, 0x54, + 0x63, 0x2d, 0x9c, 0x20, 0x4e, 0xc5, 0x5a, 0x90, 0x98, 0xb2, 0x47, 0xa2, 0xb1, 0x57, 0xce, 0x62, + 0xb5, 0x5b, 0x3a, 0x90, 0x80, 0x64, 0xd3, 0x56, 0x96, 0x99, 0xad, 0x3d, 0x8d, 0xdf, 0xca, 0xb8, + 0x97, 0x07, 0x27, 0x66, 0x74, 0x28, 0x2f, 0xfd, 0x5c, 0x1e, 0x48, 0x08, 0x14, 0x61, 0xc9, 0x69, + 0xd6, 0x8d, 0xf8, 0x6a, 0xf7, 0xd8, 0xbe, 0xa0, 0x41, 0xf2, 0x21, 0x27, 0x08, 0x78, 0x28, 0xf1, + 0xa5, 0x8b, 0xd6, 0x63, 0xd5, 0x4f, 0xa3, 0x17, 0xe7, 0x64, 0x29, 0x0c, 0x17, 0x5d, 0x38, 0xc8, + 0xe0, 0x0c, 0x70, 0x72, 0xd3, 0x78, 0xeb, 0x09, 0x87, 0xb7, 0x3f, 0x9d, 0x3d, 0xcf, 0x02, 0xe2, + 0xee, 0x76, 0x6d, 0x5a, 0xf0, 0x15, 0x09, 0xb5, 0x04, 0x7c, 0x58, 0x98, 0xbe, 0x44, 0x8d, 0xe6, + 0x4d, 0x79, 0xd1, 0x92, 0xe5, 0x04, 0x58, 0x37, 0xbe, 0xd8, 0x44, 0x9d, 0x05, 0x3b, 0x4a, 0x95, + 0x2b, 0x4e, 0x74, 0x11, 0x76, 0xff, 0x35, 0xb2, 0x19, 0x20, 0x7c, 0x22, 0xa5, 0x66, 0xf9, 0x44, + 0xf7, 0x80, 0x62, 0x71, 0x37, 0xcf, 0xd5, 0x72, 0x2c, 0x66, 0x6d, 0x28, 0xbd, 0x8a, 0x7a, 0x24, + 0x37, 0xca, 0xed, 0x8a, 0xfe, 0x4b, 0x98, 0x93, 0x5e, 0x00, 0x47, 0x18, 0xd3, 0xbe, 0xac, 0x88, + 0x9d, 0xd8, 0x2b, 0x6b, 0x69, 0x0e, 0x63, 0x6f, 0xbb, 0x03, 0xbc, 0xe1, 0x3b, 0x79, 0x4b, 0x90, + 0x97, 0x2f, 0x66, 0x72, 0x59, 0x0c, 0x53, 0xab, 0x81, 0x22, 0x17, 0x3e, 0x25, 0x19, 0xf0, 0x60, + 0x8c, 0xe1, 0x14, 0x62, 0x60, 0x60, 0x73, 0x26, 0x69, 0x46, 0x54, 0xc8, 0xa7, 0xc1, 0x53, 0xe9, + 0x0a, 0xe0, 0x3a, 0xa4, 0xdf, 0x57, 0x2c, 0x44, 0xe2, 0x3b, 0xce, 0x00, 0x7c, 0x13, 0x98, 0xcd, + 0xc4, 0xfb, 0x65, 0xe8, 0xdf, 0xee, 0x21, 0x9f, 0xf9, 0x79, 0x92, 0xe2, 0x63, 0x65, 0x57, 0xdb, + 0xd5, 0x8a, 0x98, 0x9d, 0x05, 0x9a, 0xa1, 0x10, 0xe2, 0xcb, 0xb5, 0xaf, 0x74, 0x11, 0xdb, 0x81, + 0x2b, 0xa9, 0x06, 0xa3, 0xc4, 0x89, 0xe8, 0xe4, 0x27, 0x43, 0x6e, 0xa0, 0xe5, 0xc9, 0xd9, 0x99, + 0xd5, 0x53, 0x3b, 0x96, 0x8e, 0xd7, 0xfa, 0xa7, 0xb2, 0x9e, 0x5f, 0x0b, 0xda, 0x04, 0x33, 0x80, + 0x0f, 0x00, 0x03, 0x6f, 0x32, 0xec, 0xa0, 0xc0, 0x9c, 0x98, 0x6d, 0x2e, 0x61, 0x8a, 0x17, 0x38, + 0xe8, 0x12, 0xaf, 0xcf, 0x69, 0x04, 0xaf, 0x2f, 0xb3, 0x75, 0x33, 0xf5, 0x98, 0xb9, 0x6e, 0xe2, + 0x35, 0x26, 0x4b, 0xb1, 0xed, 0x83, 0x2f, 0x7a, 0x00, 0x0c, 0x33, 0x48, 0x8a, 0xd3, 0x81, 0x34, + 0xc1, 0x6a, 0x4b, 0xaf, 0xfc, 0x36, 0xeb, 0x27, 0x3b, 0xc8, 0x18, 0x12, 0xf9, 0xb3, 0x36, 0x26, + 0x34, 0xff, 0x78, 0xc4, 0x10, 0x9b, 0x3c, 0x85, 0xe4, 0x9f, 0x20, 0x63, 0xdb, 0x6d, 0x96, 0xcd, + 0xae, 0x94, 0x5d, 0x06, 0xed, 0x13, 0xfb, 0x89, 0xa3, 0xb3, 0x41, 0xca, 0x7d, 0x14, 0xd6, 0xe6, + 0x5f, 0x7b, 0x33, 0x40, 0xb7, 0x58, 0x29, 0x21, 0x3b, 0x05, 0x00, 0xc4, 0x7c, 0x1e, 0xef, 0x57, + 0x0f, 0x6e, 0x3b, 0x74, 0xb6, 0x77, 0xe1, 0x1e, 0x01, 0x52, 0xe9, 0x2a, 0x7d, 0x78, 0xca, 0x3b, + 0x8b, 0x4b, 0x5b, 0x59, 0x3c, 0x29, 0x60, 0x77, 0x00, 0xa9, 0x12, 0xeb, 0xa3, 0x18, 0x3e, 0xbd, + 0xbf, 0x4f, 0x86, 0x74, 0x99, 0xe3, 0x8a, 0x74, 0x5c, 0xf0, 0xc7, 0x0c, 0xe8, 0xd0, 0x74, 0xe9, + 0x6a, 0x83, 0x43, 0x23, 0xba, 0x26, 0xf9, 0x9f, 0x2a, 0x9e, 0xf4, 0x55, 0x6b, 0x7a, 0x81, 0x27, + 0xfb, 0x57, 0x57, 0x9d, 0x83, 0xe3, 0x49, 0x18, 0x62, 0x9e, 0xab, 0x43, 0x74, 0x15, 0x8c, 0xe9, + 0xaf, 0xc7, 0xae, 0x46, 0x22, 0x32, 0xcb, 0x7f, 0xcc, 0x68, 0xec, 0x96, 0xd5, 0xed, 0xb1, 0x61, + 0xc1, 0x69, 0x72, 0xec, 0x57, 0x32, 0xed, 0x18, 0x87, 0x6e, 0x5a, 0x15, 0xdd, 0x85, 0xef, 0x0b, + 0x53, 0xd9, 0xef, 0x46, 0xd1, 0xb0, 0x36, 0xaa, 0x9c, 0xd7, 0x78, 0xb6, 0xbe, 0xac, 0xf3, 0xfb, + 0xf5, 0x35, 0xe3, 0x98, 0xb1, 0x9d, 0x7e, 0x8b, 0xdd, 0x16, 0x73, 0x5b, 0xbe, 0x72, 0x03, 0x66, + 0x64, 0x2e, 0xc8, 0xd4, 0x62, 0x18, 0x28, 0x73, 0xbb, 0xd7, 0x2e, 0x6f, 0xa7, 0x19, 0x11, 0x50, + 0x26, 0x31, 0x53, 0xbc, 0xdd, 0xc6, 0xdf, 0x2c, 0x98, 0x9e, 0x01, 0x67, 0xcd, 0x87, 0xc4, 0x33, + 0x4c, 0xc2, 0x75, 0x43, 0x92, 0x97, 0x3b, 0x0c, 0x86, 0x73, 0x31, 0xb5, 0xfe, 0x2e, 0x39, 0xcd, + 0xec, 0x5b, 0x45, 0x95, 0xb3, 0xd1, 0x4d, 0x85, 0xff, 0x61, 0xb9, 0x00, 0xde, 0xdd, 0x73, 0x1b, + 0xee, 0xb6, 0x2b, 0x70, 0x8e, 0x93, 0x42, 0x8e, 0x73, 0x9a, 0x7b, 0x7c, 0x7c, 0x8d, 0x79, 0xd4, + 0x1d, 0x18, 0x50, 0x66, 0x8f, 0xf4, 0x2e, 0xa3, 0xa7, 0x58, 0x94, 0x52, 0x6e, 0x26, 0xe9, 0x0a, + 0x17, 0x1e, 0xcb, 0x8d, 0x1e, 0x32, 0x42, 0x93, 0x00, 0xb8, 0x02, 0x3d, 0x50, 0xf7, 0xdc, 0xd8, + 0x1d, 0x80, 0x72, 0xa9, 0x54, 0x31, 0x92, 0x09, 0x0e, 0x51, 0xd2, 0x37, 0xbc, 0xf5, 0x91, 0xf0, + 0xd2, 0xae, 0x04, 0xa7, 0xab, 0xcd, 0xe9, 0x1c, 0x6a, 0x5f, 0x99, 0x1d, 0x15, 0x44, 0xb6, 0x7d, + 0x3e, 0xff, 0x5c, 0xd4, 0x53, 0xbd, 0xae, 0xec, 0x5e, 0x45, 0xcc, 0xcd, 0x65, 0xad, 0x9a, 0x70, + 0x6a, 0x5b, 0xe7, 0xd6, 0x9a, 0x9f, 0x14, 0xb4, 0xfc, 0xc5, 0xb6, 0x40, 0x6f, 0x71, 0xf7, 0x67, + 0x34, 0xa8, 0x1c, 0x2a, 0x69, 0xbf, 0x1f, 0x78, 0x70, 0xec, 0x64, 0x53, 0x86, 0xac, 0xfc, 0xa5, + 0xab, 0x4e, 0xe4, 0xa8, 0x34, 0xf5, 0x89, 0x38, 0x1b, 0x04, 0x8c, 0x93, 0x78, 0x70, 0x2c, 0xbe, + 0x3a, 0x4a, 0xa1, 0xe0, 0xc5, 0xa1, 0x8d, 0xc3, 0x77, 0x47, 0x55, 0x26, 0x83, 0x2b, 0xd1, 0xda, + 0x9d, 0x43, 0x2b, 0x80, 0xb8, 0x6e, 0xbd, 0xd2, 0x9c, 0x20, 0xf5, 0x20, 0x8f, 0xb3, 0x12, 0x69, + 0x24, 0xf5, 0x10, 0x1c, 0x86, 0x27, 0x75, 0x07, 0xd2, 0x40, 0x20, 0x19, 0x48, 0x54, 0x89, 0x33, + 0xb5, 0x68, 0xc6, 0xe0, 0xf4, 0xbe, 0xee, 0xe1, 0x65, 0x72, 0x56, 0x26, 0xb0, 0x8e, 0xf3, 0x84, + 0xbf, 0x7b, 0x5f, 0x4f, 0x8c, 0x29, 0x9c, 0x30, 0x4b, 0x1e, 0xb3, 0x68, 0xb7, 0x9d, 0xef, 0x97, + 0x7d, 0x12, 0x8a, 0x2c, 0x5f, 0x61, 0x12, 0x46, 0x2d, 0x5a, 0xe7, 0x2d, 0x13, 0x08, 0x51, 0x95, + 0xba, 0x65, 0xa6, 0x36, 0x6d, 0xdc, 0x98, 0x01, 0x52, 0x7c, 0xf2, 0x32, 0x71, 0x6d, 0x02, 0x50, + 0x15, 0x69, 0x94, 0x16, 0xe4, 0x59, 0xf0, 0xe2, 0x3a, 0xb4, 0x2e, 0xaa, 0x55, 0x8c, 0x5e, 0x8a, + 0x0f, 0x4c, 0x55, 0x75, 0x1b, 0x20, 0x9a, 0x9e, 0x32, 0x4b, 0x50, 0xac, 0x98, 0x96, 0x9e, 0x56, + 0xc3, 0x53, 0x48, 0xb0, 0x93, 0x53, 0xde, 0x8e, 0x6d, 0x06, 0x4d, 0x8d, 0xe5, 0x0c, 0x79, 0xaa, + 0xd0, 0x32, 0xc2, 0x92, 0x9b, 0xba, 0x55, 0x75, 0x35, 0xcb, 0x62, 0xe9, 0x0b, 0xd2, 0xbd, 0x86, + 0x3e, 0xfc, 0xd1, 0x29, 0x7b, 0xd5, 0x1a, 0x16, 0x74, 0x9c, 0xbe, 0xc5, 0x5c, 0x16, 0x30, 0x7a, + 0xec, 0x4d, 0xc6, 0xac, 0x7c, 0x78, 0x6b, 0x5e, 0x48, 0xd8, 0x9a, 0xf7, 0xe3, 0xa2, 0xa9, 0x1a, + 0xe4, 0x36, 0x2b, 0x5d, 0xa3, 0x1b, 0xa6, 0x57, 0x73, 0xaa, 0x6a, 0x3b, 0xd1, 0x9d, 0xdf, 0x9f, + 0x9f, 0x98, 0xfa, 0x05, 0xe3, 0xce, 0x66, 0x73, 0xf3, 0x1c, 0x2c, 0x12, 0x25, 0x90, 0x07, 0x52, + 0xc9, 0xc4, 0x22, 0xa5, 0xe6, 0x72, 0xc6, 0x8c, 0xdb, 0xf5, 0x68, 0xd0, 0xbd, 0xe1, 0xb1, 0x9c, + 0xaf, 0xa0, 0x9e, 0x42, 0x94, 0x27, 0x18, 0x8a, 0x1d, 0x9d, 0x93, 0x1b, 0x1f, 0xb5, 0x1b, 0x86, + 0xdc, 0x86, 0xcc, 0xb3, 0x3c, 0xde, 0xb5, 0x3c, 0x48, 0x18, 0x95, 0x81, 0x11, 0xfe, 0x48, 0x6c, + 0x61, 0xfd, 0xd4, 0x35, 0x7f, 0x26, 0xe9, 0x44, 0xfc, 0x51, 0x37, 0x79, 0xc7, 0x82, 0x72, 0xd5, + 0xee, 0x85, 0x7d, 0xa0, 0xf3, 0xb7, 0x9b, 0x8a, 0x04, 0x92, 0x3e, 0x28, 0xd4, 0xb6, 0xa4, 0xaa, + 0xb2, 0x59, 0x2a, 0x6b, 0x4d, 0xb9, 0x18, 0xb7, 0x8e, 0xa2, 0x7b, 0x20, 0x64, 0x9d, 0x23, 0x14, + 0x98, 0xb5, 0x8b, 0x29, 0xd1, 0xcb, 0x8f, 0x71, 0xdc, 0x7a, 0x1f, 0x48, 0x20, 0xdc, 0x62, 0x67, + 0x5e, 0x4b, 0x83, 0xe5, 0x5c, 0x4e, 0x95, 0x21, 0xb0, 0xac, 0xc3, 0xc8, 0xef, 0x69, 0x20, 0x51, + 0x6d, 0x53, 0x0d, 0xc9, 0xd1, 0x8a, 0x2e, 0x6a, 0xc4, 0x5a, 0xd5, 0xb9, 0x94, 0x3b, 0xa3, 0x2f, + 0x17, 0x41, 0x9a, 0xb7, 0x62, 0x1a, 0xe3, 0x0e, 0x29, 0x98, 0xbd, 0xb5, 0xd8, 0xd5, 0xfd, 0xa1, + 0x72, 0x27, 0x8b, 0x6e, 0xff, 0x49, 0x11, 0x46, 0x8e, 0x5b, 0x90, 0xf7, 0x4a, 0x51, 0x60, 0xd1, + 0x46, 0x36, 0x6d, 0x0e, 0xd1, 0x10, 0xfd, 0x5b, 0x41, 0xcc, 0x74, 0xeb, 0x19, 0x67, 0xfb, 0x99, + 0x52, 0x07, 0xfb, 0x31, 0xdd, 0xa6, 0xee, 0xcf, 0x08, 0xc7, 0xc8, 0x02, 0xcf, 0x02, 0x43, 0x86, + 0x4f, 0xa9, 0x9c, 0xbe, 0x0e, 0x2e, 0xd3, 0xfa, 0x89, 0xd3, 0x1e, 0x70, 0x01, 0xe4, 0x65, 0x1f, + 0x4a, 0xba, 0xbf, 0x30, 0x28, 0xa4, 0x7d, 0x8f, 0x79, 0x32, 0x56, 0x97, 0xa8, 0x23, 0x71, 0xc2, + 0xcd, 0xfb, 0x34, 0x5f, 0x3f, 0x4e, 0x41, 0x0b, 0x4f, 0xbb, 0xbc, 0x50, 0xe9, 0x97, 0x36, 0xf7, + 0x37, 0xb7, 0x14, 0x3e, 0xbe, 0x00, 0x6c, 0x9a, 0x23, 0x20, 0xee, 0x64, 0xd1, 0x64, 0x0e, 0x4c, + 0x56, 0xe2, 0xae, 0xca, 0x1e, 0xf9, 0x85, 0xb3, 0xbb, 0x12, 0x45, 0x44, 0x67, 0x60, 0x8c, 0x5c, + 0x16, 0xc9, 0xa1, 0x7e, 0x16, 0x28, 0x13, 0xa1, 0xc7, 0x01, 0xdd, 0x35, 0x58, 0x72, 0xbe, 0x5c, + 0x2b, 0xb7, 0x04, 0x90, 0xfd, 0xfc, 0xde, 0x1d, 0xdd, 0x1e, 0x5e, 0x13, 0x8f, 0xee, 0x1e, 0x13, + 0x3a, 0x01, 0x74, 0x83, 0xa6, 0x44, 0xd5, 0xbd, 0xc7, 0xdf, 0x0a, 0x8e, 0x79, 0xb3, 0xfd, 0x72, + 0x87, 0xa8, 0xf8, 0x89, 0x53, 0xd7, 0xeb, 0x82, 0x14, 0x3d, 0xcd, 0x29, 0x9e, 0x98, 0x6e, 0xec, + 0x03, 0xb4, 0xbf, 0x0f, 0x28, 0x0b, 0x65, 0x2e, 0x56, 0x16, 0xe8, 0x67, 0xa8, 0xb2, 0x59, 0xc4, + 0x70, 0x14, 0x55, 0x13, 0xff, 0x65, 0x13, 0x2c, 0x6f, 0x08, 0xfa, 0xb8, 0x78, 0x46, 0x8d, 0xf4, + 0xfe, 0x94, 0x35, 0xb9, 0x05, 0x94, 0xbf, 0x66, 0x56, 0x20, 0xa7, 0xef, 0x43, 0xe9, 0xe8, 0xdb, + 0xcb, 0xff, 0xc1, 0x39, 0x31, 0x79, 0xb4, 0x72, 0xb8, 0x96, 0xd0, 0x10, 0xe0, 0x4f, 0xed, 0x81, + 0xd4, 0xdc, 0x18, 0x41, 0xfe, 0x2b, 0x29, 0xf6, 0x67, 0xdd, 0x82, 0x74, 0x66, 0x76, 0xee, 0x70, + 0x94, 0x28, 0x55, 0x55, 0x83, 0x63, 0x05, 0x9b, 0x12, 0xba, 0x45, 0x20, 0xf4, 0xcb, 0x66, 0xac, + 0xf4, 0xd3, 0xee, 0x48, 0x7f, 0x10, 0x18, 0x8b, 0xce, 0xef, 0xc5, 0xad, 0x77, 0x2f, 0x46, 0xe0, + 0x76, 0x68, 0x87, 0x0e, 0x01, 0xb1, 0x64, 0xb8, 0x6b, 0x9c, 0x2d, 0xd1, 0xd5, 0x27, 0x56, 0xe3, + 0x94, 0x96, 0x67, 0x27, 0xce, 0xde, 0x88, 0xdb, 0x92, 0x0e, 0x56, 0x15, 0x47, 0xcc, 0xdf, 0x98, + 0x3a, 0x4a, 0x40, 0xb6, 0x63, 0x9e, 0x14, 0x92, 0x17, 0xc5, 0xb4, 0xfd, 0x8d, 0x41, 0xf9, 0xb2, + 0x19, 0x6d, 0x03, 0x18, 0x55, 0x56, 0xcb, 0xaa, 0x94, 0xaa, 0x9a, 0xad, 0x1d, 0x88, 0xa2, 0x1d, + 0xe4, 0x9f, 0x41, 0xe4, 0xfd, 0xe0, 0x14, 0xd5, 0xc9, 0x36, 0xd5, 0x3f, 0x09, 0x2f, 0x66, 0x37, + 0xe3, 0x45, 0xe8, 0xb8, 0xa6, 0xa4, 0x7d, 0xbd, 0x43, 0xcf, 0x8b, 0xf2, 0xd4, 0xc2, 0xf5, 0x98, + 0xf1, 0x79, 0x84, 0xe0, 0x09, 0x5c, 0xe3, 0x90, 0xf1, 0xf7, 0x58, 0x5d, 0xd2, 0x75, 0x3e, 0xaf, + 0x50, 0x9e, 0xcf, 0xcf, 0x02, 0x39, 0xca, 0xf8, 0x91, 0x02, 0x14, 0x4d, 0xc9, 0x82, 0xef, 0xdc, + 0x2d, 0x64, 0xf8, 0x64, 0xa3, 0x43, 0x09, 0x16, 0xd7, 0xc0, 0xa4, 0xb9, 0xdb, 0xf9, 0x7d, 0x10, + 0x29, 0xac, 0xc0, 0x34, 0xa9, 0xca, 0x11, 0xf5, 0x66, 0xa3, 0x38, 0xfc, 0x94, 0x06, 0x84, 0x2e, + 0xd7, 0xdd, 0x66, 0xd9, 0xf8, 0x6a, 0xe4, 0xab, 0x69, 0x05, 0x22, 0x96, 0x43, 0x88, 0xa8, 0x78, + 0x42, 0x37, 0x05, 0x5d, 0xf1, 0xd3, 0x35, 0x51, 0x69, 0x62, 0x06, 0x1b, 0x41, 0x2a, 0x71, 0xee, + 0xf2, 0xb6, 0xcc, 0xae, 0xbc, 0xc3, 0x3f, 0x11, 0x3f, 0xed, 0xdd, 0x8c, 0xbd, 0x98, 0xa6, 0x0e, + 0x17, 0xc2, 0x5b, 0x3e, 0xfa, 0x24, 0x78, 0x35, 0xba, 0xb4, 0xd3, 0xc6, 0x14, 0x47, 0xbb, 0x6d, + 0x18, 0x6f, 0xef, 0x07, 0x79, 0xe1, 0x53, 0x03, 0x27, 0xf3, 0xc6, 0xb0, 0xd0, 0xe4, 0xde, 0x21, + 0x6c, 0x5a, 0x70, 0xc8, 0x85, 0x70, 0xb8, 0x16, 0x82, 0x90, 0x9e, 0x5b, 0xe3, 0x1d, 0xd4, 0x51, + 0x32, 0x31, 0x9a, 0x31, 0xec, 0xa3, 0xd5, 0xbd, 0xb5, 0xbe, 0xb8, 0xb0, 0x6f, 0xc9, 0x8b, 0x17, + 0x4d, 0xf5, 0x1e, 0x4b, 0xb1, 0x68, 0xd8, 0xc8, 0x12, 0xa5, 0xc0, 0xe3, 0xeb, 0x8c, 0x40, 0xb4, + 0xa4, 0x74, 0x27, 0x76, 0xd4, 0x3d, 0x41, 0x43, 0x4b, 0x1d, 0x8f, 0xf3, 0x0c, 0x0f, 0xb6, 0x5b, + 0x39, 0xf4, 0x8d, 0xce, 0xab, 0x42, 0x8e, 0xfc, 0x40, 0x13, 0x4d, 0x96, 0xbb, 0xe5, 0xcc, 0x4d, + 0x99, 0x20, 0x38, 0x24, 0x9a, 0x62, 0xc1, 0x7b, 0x57, 0xdb, 0x74, 0xf5, 0xa3, 0xde, 0x5b, 0x0d, + 0xdd, 0x9a, 0x9e, 0x1f, 0x84, 0x77, 0xf8, 0xca, 0xfb, 0x5b, 0x53, 0x1b, 0xbe, 0xc8, 0xca, 0x0e, + 0xc0, 0xd8, 0xd7, 0x83, 0x98, 0xb4, 0x3b, 0x97, 0x1b, 0xa7, 0x8d, 0x50, 0x7b, 0x5c, 0x81, 0x6d, + 0xe1, 0x5b, 0xf0, 0xe0, 0x30, 0x77, 0xa1, 0xd7, 0x97, 0xf2, 0xe2, 0x27, 0xd2, 0x92, 0x37, 0xde, + 0xd3, 0xea, 0xdb, 0x30, 0x19, 0xf2, 0xac, 0x65, 0x42, 0x17, 0xe0, 0xf0, 0x06, 0x37, 0xc0, 0x83, + 0x6b, 0x8b, 0x33, 0xf9, 0x40, 0x0b, 0xa6, 0x21, 0xa1, 0x52, 0xa2, 0x44, 0x0e, 0xea, 0x27, 0x5f, + 0x71, 0x01, 0x89, 0xb9, 0xab, 0x1e, 0xab, 0x7a, 0xad, 0x7a, 0x08, 0x94, 0x1d, 0xf7, 0x9e, 0x5c, + 0xc1, 0x38, 0x0b, 0xe6, 0xa8, 0x2f, 0xe4, 0xea, 0x25, 0x57, 0xfd, 0xae, 0x56, 0xcc, 0xe3, 0x7d, + 0xbe, 0x4e, 0x9d, 0x2e, 0x47, 0xb4, 0x0f, 0x86, 0xfc, 0xef, 0x05, 0x57, 0x01, 0x9d, 0x89, 0x92, + 0xb9, 0x8d, 0x3b, 0x25, 0xa0, 0x26, 0x91, 0xb9, 0xf9, 0x52, 0xc6, 0x1c, 0x85, 0x67, 0xc9, 0x2e, + 0xd3, 0x7b, 0x88, 0x67, 0x93, 0x2b, 0x01, 0x5d, 0x4d, 0xa3, 0x1f, 0x35, 0xa1, 0x96, 0xd5, 0x46, + 0x96, 0x61, 0x35, 0x9d, 0xaf, 0xeb, 0x3c, 0xfc, 0x7e, 0x3a, 0x17, 0x70, 0x68, 0xcf, 0xe9, 0x64, + 0x80, 0xba, 0x63, 0xc6, 0x69, 0x74, 0x2f, 0x89, 0xf5, 0xd4, 0x4a, 0xe1, 0xf1, 0x7f, 0x7a, 0xd5, + 0x76, 0x1e, 0x90, 0x1e, 0x25, 0x7f, 0xca, 0x1e, 0x06, 0x0d, 0x4e, 0x8f, 0x70, 0xd7, 0x3c, 0x79, + 0x05, 0x02, 0xc3, 0x69, 0x43, 0x81, 0x4c, 0x9d, 0x7a, 0x51, 0x65, 0x61, 0x4f, 0x09, 0x7c, 0xa6, + 0x7f, 0x52, 0xe4, 0x32, 0xd3, 0x65, 0xbc, 0x90, 0x07, 0xe6, 0xef, 0x8a, 0x76, 0x79, 0x78, 0xcc, + 0xbb, 0x27, 0x13, 0xee, 0xca, 0xa0, 0x49, 0x8c, 0xd7, 0x51, 0x20, 0xd9, 0xbc, 0xf8, 0x00, 0x2b, + 0x4e, 0x87, 0x5a, 0xad, 0x4f, 0x20, 0x89, 0xa7, 0x43, 0x08, 0x5a, 0x26, 0xbf, 0x5a, 0x8b, 0xdf, + 0x45, 0x3d, 0x2f, 0xff, 0x42, 0xa1, 0x67, 0xe8, 0x8a, 0x74, 0xcd, 0xea, 0x46, 0x1b, 0xa2, 0xae, + 0x7a, 0x12, 0x25, 0xc5, 0x1f, 0x21, 0x47, 0x07, 0x64, 0x67, 0xd2, 0x2e, 0x31, 0x0d, 0x2a, 0x7a, + 0x69, 0xc2, 0xaf, 0x9d, 0x87, 0xe7, 0x09, 0xde, 0xfd, 0xe9, 0xd0, 0x32, 0x2f, 0x7f, 0xcc, 0xef, + 0x60, 0x66, 0x89, 0x93, 0xed, 0x99, 0xbf, 0x8c, 0x40, 0x25, 0x05, 0x90, 0x4d, 0x18, 0x59, 0x3d, + 0x49, 0xc3, 0xe2, 0x20, 0x3c, 0xe0, 0x9f, 0x83, 0x25, 0x32, 0x94, 0x17, 0x93, 0xae, 0x66, 0xde, + 0x85, 0x34, 0x2a, 0x40, 0x5e, 0x60, 0x96, 0x9f, 0x38, 0xa9, 0x1d, 0xf5, 0xad, 0x46, 0x75, 0xb4, + 0xc8, 0xcb, 0xfa, 0x82, 0x2e, 0xb6, 0x71, 0x87, 0x9b, 0x60, 0xab, 0xf0, 0x0d, 0x65, 0x17, 0x9a, + 0x90, 0x74, 0x47, 0xa4, 0x09, 0xc6, 0xef, 0x6d, 0x02, 0x85, 0x3e, 0x02, 0x15, 0xae, 0x4b, 0x07, + 0xff, 0xa1, 0xa7, 0xc0, 0x29, 0x78, 0x9c, 0xcd, 0xfb, 0xa1, 0xcf, 0xe8, 0x6d, 0xd0, 0xef, 0x19, + 0x86, 0x2d, 0x30, 0xa5, 0x8a, 0x0e, 0x08, 0xe3, 0x2f, 0x10, 0x14, 0xf3, 0xcf, 0x72, 0x45, 0x71, + 0xc2, 0x29, 0x02, 0xa0, 0x01, 0x43, 0xd7, 0x2d, 0xfb, 0xba, 0x2e, 0x14, 0x5b, 0x00, 0x77, 0x46, + 0xe7, 0xf7, 0x01, 0xe1, 0xe3, 0xb6, 0x71, 0xe3, 0xe9, 0x93, 0x8c, 0xce, 0xb9, 0xc2, 0x32, 0xf7, + 0xb4, 0xfc, 0xe2, 0x61, 0x0f, 0xda, 0x8f, 0x55, 0x3e, 0x79, 0xd7, 0xee, 0xce, 0x54, 0x84, 0xea, + 0xee, 0xfa, 0xe6, 0x8d, 0x9e, 0x39, 0x40, 0xa5, 0x34, 0xa3, 0x2d, 0x5f, 0xf8, 0x62, 0x5d, 0xdc, + 0x44, 0x7e, 0xdc, 0x9e, 0xe8, 0xbb, 0x61, 0x39, 0x30, 0x6c, 0x0a, 0x21, 0x9a, 0x5e, 0xea, 0xf1, + 0xec, 0x02, 0x11, 0x92, 0x75, 0x2a, 0x10, 0x37, 0x4e, 0x4e, 0x0f, 0x7a, 0x69, 0x6a, 0xee, 0xa7, + 0x78, 0xc0, 0xb9, 0x55, 0xe0, 0xc0, 0xfc, 0x82, 0xbc, 0x47, 0x5e, 0x9e, 0xb3, 0xa0, 0x16, 0xf6, + 0x6f, 0xd2, 0x9b, 0xf7, 0x16, 0x5b, 0xf3, 0x72, 0x44, 0xf9, 0x0a, 0x16, 0x91, 0x04, 0xbf, 0x0a, + 0x14, 0xb3, 0x78, 0x3f, 0x19, 0xb2, 0x8c, 0x85, 0x7d, 0xbd, 0x75, 0x83, 0x55, 0x5a, 0x93, 0x02, + 0xb6, 0x49, 0xcd, 0x44, 0x38, 0x52, 0xe2, 0x50, 0x93, 0x9b, 0xc0, 0x53, 0x9a, 0xab, 0x9a, 0xae, + 0x47, 0x44, 0x89, 0xcb, 0x27, 0xfe, 0x8d, 0xe7, 0xb3, 0xa1, 0x4f, 0xc8, 0x45, 0x07, 0x6b, 0x8c, + 0x23, 0x9b, 0x2a, 0x12, 0x65, 0x8b, 0x58, 0x95, 0xb5, 0x04, 0xa2, 0xf0, 0x15, 0xfd, 0x85, 0x0c, + 0xa3, 0xb4, 0xc2, 0xa3, 0x10, 0xa6, 0xd8, 0x3d, 0x93, 0xde, 0x5c, 0xbd, 0xda, 0x0a, 0x8a, 0xf0, + 0x3e, 0x6d, 0xdd, 0x80, 0x34, 0x67, 0x98, 0xc4, 0xed, 0x78, 0xab, 0xf9, 0x0a, 0x3a, 0x25, 0x1f, + 0xe6, 0xb0, 0xdd, 0xad, 0x8d, 0x07, 0x68, 0xdf, 0x81, 0x02, 0x63, 0x9f, 0x8d, 0x46, 0x1d, 0x3c, + 0xc2, 0xe1, 0x96, 0x0e, 0x61, 0x8c, 0x6b, 0xeb, 0x29, 0x1d, 0xce, 0xa7, 0xc8, 0x95, 0xb9, 0x8c, + 0xbc, 0x90, 0xf6, 0x1b, 0xfe, 0x49, 0x4d, 0xf2, 0xe1, 0xf1, 0x6f, 0xb4, 0xde, 0x79, 0xf8, 0xe2, + 0xf8, 0x56, 0x18, 0x24, 0xb9, 0x4d, 0x6b, 0x90, 0x3c, 0x81, 0x9e, 0xa4, 0xac, 0xe5, 0x54, 0xd8, + 0xa7, 0x63, 0xb9, 0xb4, 0xde, 0xff, 0xa1, 0xf7, 0xb8, 0xef, 0xec, 0x79, 0x0d, 0x35, 0xb1, 0x49, + 0x95, 0x52, 0xa1, 0x85, 0x09, 0x2d, 0xc3, 0xf3, 0x6c, 0xc5, 0xa4, 0x2d, 0x6e, 0x16, 0x9d, 0x65, + 0x41, 0x41, 0xe5, 0x5c, 0x12, 0x50, 0xd4, 0xd7, 0x68, 0xda, 0x3d, 0x81, 0xd7, 0x01, 0xe8, 0x32, + 0x33, 0x03, 0xae, 0xf3, 0x3f, 0xb5, 0xcb, 0xa7, 0xba, 0xbb, 0xec, 0x25, 0x7b, 0xed, 0x36, 0x43, + 0xb0, 0x6b, 0x70, 0xf4, 0x82, 0x9b, 0xbf, 0x56, 0x51, 0xa3, 0xc1, 0xee, 0x16, 0xc8, 0x05, 0x41, + 0xb6, 0xb4, 0x76, 0x6e, 0x90, 0x17, 0x67, 0x88, 0x9d, 0xab, 0x71, 0x3e, 0x93, 0xc9, 0xb1, 0xd5, + 0x35, 0x41, 0xac, 0x97, 0x1d, 0x50, 0xd1, 0x64, 0xa4, 0x90, 0x8e, 0x18, 0x43, 0x2f, 0x0c, 0x22, + 0xf6, 0xb7, 0x61, 0xb4, 0xbc, 0x91, 0xf0, 0x3c, 0xb4, 0xbf, 0x48, 0x96, 0xe8, 0x32, 0x0f, 0x5b, + 0x57, 0x1e, 0x9c, 0x61, 0x38, 0x6f, 0x1f, 0x39, 0x7f, 0x91, 0x54, 0xb9, 0x30, 0xcd, 0xf8, 0x33, + 0xd0, 0x9b, 0x7e, 0x6f, 0xb3, 0x7a, 0x2e, 0x1a, 0x9a, 0x8e, 0xc8, 0x92, 0x14, 0x64, 0x28, 0x5b, + 0x28, 0x65, 0xdf, 0xab, 0x18, 0xfc, 0xe6, 0x1f, 0xac, 0x3c, 0xf2, 0xee, 0x18, 0xd0, 0x3d, 0xd0, + 0xfd, 0xcc, 0x3c, 0xb4, 0xb4, 0x20, 0x06, 0x1f, 0x80, 0x65, 0x38, 0x47, 0xfe, 0xcc, 0x71, 0xfa, + 0x8a, 0xd1, 0x3e, 0x47, 0x1a, 0xde, 0x6c, 0x8f, 0x0c, 0xaa, 0xb9, 0x8e, 0xf3, 0x1d, 0xec, 0xab, + 0xcf, 0x0f, 0x39, 0x62, 0x6a, 0xe6, 0x7d, 0x3e, 0x31, 0x3c, 0xed, 0x7b, 0xcf, 0x21, 0xef, 0x69, + 0x47, 0x01, 0xc1, 0x55, 0xf7, 0xca, 0x34, 0x17, 0x9a, 0x14, 0xf9, 0xdd, 0x65, 0x67, 0x6c, 0x1b, + 0xe5, 0x9d, 0x51, 0x4e, 0xd1, 0xe9, 0x9d, 0x8c, 0x57, 0x23, 0xb6, 0xbb, 0x83, 0xc3, 0x82, 0xb1, + 0xbe, 0x4d, 0x76, 0x68, 0xac, 0xee, 0x73, 0x9b, 0x47, 0x33, 0xa5, 0x34, 0xb4, 0xe0, 0xab, 0x5e, + 0xa1, 0x42, 0x64, 0x42, 0x7e, 0xdd, 0xcf, 0x3e, 0xa4, 0x6b, 0xfe, 0x39, 0x25, 0x13, 0xab, 0x4f, + 0x78, 0x9d, 0x7a, 0x2e, 0xfd, 0x51, 0x26, 0x3e, 0x7a, 0xeb, 0x59, 0x7b, 0x55, 0xdb, 0x62, 0xa7, + 0x11, 0x04, 0xa5, 0x01, 0xc7, 0x2f, 0x24, 0x8b, 0xa9, 0x10, 0x31, 0x0d, 0xc7, 0x9a, 0xe8, 0x78, + 0x01, 0x32, 0xbe, 0x25, 0x1c, 0x2d, 0xc9, 0xbe, 0x09, 0x7c, 0x15, 0x49, 0xef, 0x30, 0x5a, 0x8f, + 0x89, 0x80, 0x78, 0x28, 0xe5, 0x14, 0x77, 0x97, 0xc9, 0x3a, 0xf0, 0xa2, 0xe0, 0x23, 0x32, 0x19, + 0xd2, 0x52, 0xf1, 0xa4, 0x2b, 0xfc, 0x53, 0x49, 0xa6, 0x50, 0x1c, 0xf8, 0xee, 0x4f, 0x0b, 0x65, + 0xe7, 0x20, 0x7c, 0x03, 0xdc, 0xd6, 0x6f, 0xcf, 0x53, 0xd2, 0x61, 0x1f, 0x36, 0x57, 0x45, 0x14, + 0xb1, 0xd5, 0xaa, 0x4c, 0x21, 0x84, 0x43, 0xcd, 0x27, 0x6c, 0x40, 0xfa, 0x70, 0x0e, 0xe0, 0x46, + 0xed, 0x90, 0x75, 0xdc, 0x9f, 0xcd, 0x9c, 0xe7, 0x3e, 0xb6, 0x2c, 0xde, 0x8b, 0x1f, 0x22, 0x38, + 0x76, 0xc2, 0xf7, 0x5d, 0xa1, 0xfd, 0x71, 0x63, 0xae, 0xe0, 0xe7, 0x60, 0x11, 0xd9, 0xa9, 0x31, + 0x5c, 0x30, 0x66, 0xa8, 0x4f, 0x92, 0x04, 0xa1, 0x70, 0x1c, 0xd0, 0xd5, 0x43, 0x00, 0xe5, 0x7d, + 0x3c, 0x84, 0x8b, 0xa3, 0xfe, 0x7d, 0xec, 0xb4, 0x3f, 0x42, 0x48, 0xca, 0xc7, 0x05, 0x15, 0xbf, + 0x6c, 0xdc, 0x75, 0xc3, 0xbc, 0x16, 0xc6, 0xdb, 0xc2, 0xa7, 0x8a, 0x1a, 0xa5, 0xbd, 0x05, 0x75, + 0x54, 0xf0, 0x3c, 0x13, 0x4c, 0x74, 0xc9, 0xe1, 0x1e, 0x50, 0xbe, 0xfb, 0x66, 0x36, 0x09, 0x04, + 0x2c, 0x01, 0x22, 0x52, 0xeb, 0x69, 0xcb, 0xde, 0x14, 0x77, 0xa7, 0xe5, 0x7f, 0xec, 0x1d, 0x32, + 0xb9, 0xab, 0xb3, 0x2e, 0xa1, 0x93, 0x3f, 0xb6, 0x8a, 0xef, 0x2a, 0xe7, 0x0e, 0x59, 0xed, 0xa1, + 0xa7, 0xdb, 0xc9, 0x2c, 0x83, 0xcf, 0x44, 0x28, 0x96, 0x27, 0xb6, 0x8f, 0xee, 0x3a, 0xe2, 0x8e, + 0xce, 0x35, 0x3e, 0xd6, 0x44, 0xf9, 0x67, 0xdf, 0x0a, 0xbd, 0xc4, 0x1e, 0xa9, 0x12, 0xab, 0x8d, + 0x09, 0x80, 0xde, 0x07, 0x56, 0xa9, 0x8d, 0x07, 0x25, 0x38, 0xbb, 0x07, 0x38, 0x3b, 0xe1, 0xd5, + 0xc4, 0x0a, 0x7d, 0x42, 0x08, 0xe4, 0xfd, 0x96, 0x4f, 0x23, 0x18, 0x70, 0xc3, 0xc1, 0xab, 0x08, + 0x52, 0xb8, 0xa0, 0xa2, 0xa7, 0x03, 0x9f, 0xe6, 0x41, 0xaf, 0xd3, 0x5f, 0x6e, 0xb2, 0xba, 0x25, + 0x1c, 0xd1, 0x1b, 0x4a, 0x97, 0xe7, 0xa6, 0x3c, 0x63, 0x71, 0xef, 0x47, 0xc6, 0x29, 0x37, 0x41, + 0xa0, 0xe3, 0xc3, 0x1e, 0x78, 0xe1, 0x68, 0x83, 0x69, 0x38, 0x02, 0x0e, 0xc7, 0x9e, 0x19, 0x2a, + 0x15, 0x71, 0xbd, 0x88, 0x63, 0xf2, 0xe4, 0x2b, 0xc2, 0xeb, 0xfb, 0x35, 0x1d, 0x14, 0x79, 0xd2, + 0x1a, 0x7f, 0x6e, 0x81, 0x24, 0xb7, 0x11, 0x27, 0xf0, 0xb9, 0xb3, 0x97, 0x83, 0xed, 0x0b, 0x3d, + 0x4b, 0x0a, 0xc6, 0xa2, 0x7b, 0xb5, 0x78, 0xa3, 0x39, 0xe0, 0x3a, 0xd6, 0x95, 0x7e, 0x67, 0xcc, + 0xe5, 0x2d, 0x9f, 0xc8, 0x97, 0x63, 0x3a, 0x30, 0x0e, 0x80, 0xe8, 0x0d, 0x37, 0x92, 0x64, 0xe4, + 0xf7, 0xec, 0x0d, 0xd3, 0xb3, 0xfa, 0xc9, 0xe7, 0x4d, 0xb6, 0x32, 0x5b, 0x65, 0x01, 0x88, 0xa3, + 0xd1, 0x31, 0x01, 0xc2, 0x3a, 0x6b, 0xed, 0x7b, 0x6a, 0x85, 0xde, 0x56, 0x49, 0x51, 0x59, 0x19, + 0x28, 0x31, 0xff, 0xee, 0x45, 0xf3, 0xfc, 0xb1, 0x24, 0x89, 0xe8, 0xe8, 0x4e, 0x71, 0xf1, 0x52, + 0x77, 0x8f, 0x2d, 0x08, 0x8a, 0x74, 0xe1, 0xb3, 0x7b, 0x90, 0x21, 0xf7, 0x90, 0x50, 0x78, 0xc7, + 0xe2, 0xe8, 0x10, 0x5d, 0x8c, 0x88, 0xcd, 0x40, 0xbc, 0x35, 0x1e, 0x39, 0x95, 0x46, 0x6f, 0x43, + 0xd3, 0xde, 0x11, 0x27, 0xa7, 0x85, 0x3e, 0x59, 0x36, 0x35, 0xc2, 0x4b, 0x8b, 0xcb, 0x78, 0xc6, + 0x77, 0xa3, 0x5c, 0x87, 0xd7, 0xb9, 0x75, 0xed, 0xce, 0xe9, 0xdd, 0x7e, 0xa7, 0x54, 0x4e, 0xbc, + 0x88, 0x1f, 0x58, 0x1f, 0x2e, 0x69, 0x97, 0x54, 0x0d, 0xf3, 0xa5, 0x2b, 0x7c, 0x7a, 0x7d, 0x71, + 0xe3, 0x93, 0x4e, 0x53, 0x65, 0xcf, 0x59, 0xc4, 0x01, 0x79, 0x21, 0x85, 0x4c, 0x04, 0x2d, 0xa6, + 0x9a, 0x34, 0x39, 0xcb, 0x89, 0x6c, 0x02, 0xb1, 0x7c, 0xe4, 0x5b, 0x83, 0xf8, 0x6a, 0x4a, 0x2e, + 0xb4, 0xad, 0x6a, 0x51, 0xf5, 0xc1, 0x84, 0x0c, 0xd4, 0x8b, 0x65, 0x72, 0x85, 0x1c, 0x22, 0x74, + 0x12, 0x05, 0x4e, 0x08, 0x27, 0xb8, 0xab, 0x4e, 0x28, 0x8c, 0xe0, 0xbd, 0x2b, 0x57, 0xb5, 0xd7, + 0xd1, 0x27, 0xfc, 0x39, 0xbc, 0x81, 0x5d, 0xcf, 0xaa, 0x68, 0xd7, 0xd2, 0xa6, 0x2c, 0xa5, 0xa5, + 0xa4, 0xf5, 0xd7, 0x50, 0xc9, 0x2f, 0x75, 0xbf, 0xbf, 0x24, 0x61, 0xee, 0xfd, 0xec, 0x43, 0x64, + 0xca, 0x2e, 0x40, 0xf7, 0x38, 0x11, 0x69, 0x31, 0x04, 0xc9, 0x10, 0x87, 0x50, 0x0b, 0x56, 0xc7, + 0xd2, 0x4b, 0xa7, 0x04, 0x56, 0xfc, 0xaf, 0x15, 0x84, 0x21, 0x5e, 0xfb, 0x4e, 0x77, 0x12, 0x24, + 0x9c, 0x9e, 0x35, 0x26, 0xd9, 0x2b, 0x81, 0x13, 0x3c, 0x6e, 0xcb, 0x45, 0x84, 0xfc, 0x28, 0x69, + 0xf3, 0x4c, 0x30, 0x70, 0xf2, 0x2f, 0xf6, 0xed, 0xdb, 0x55, 0x4c, 0x0d, 0x3b, 0x71, 0xa3, 0x21, + 0x4a, 0x3b, 0x62, 0x91, 0xc1, 0x76, 0x2b, 0x9a, 0x17, 0xa4, 0x66, 0xea, 0x4b, 0x9e, 0x44, 0x23, + 0xdb, 0x74, 0x30, 0x06, 0x86, 0x24, 0x77, 0xb9, 0xdf, 0x70, 0x4a, 0x8b, 0xde, 0x93, 0x1a, 0xf9, + 0x3a, 0x35, 0xe6, 0x79, 0x48, 0x05, 0x4b, 0x96, 0x6a, 0xcf, 0x33, 0xf7, 0x50, 0xcc, 0x34, 0xe6, + 0x7b, 0xe4, 0x0d, 0x1f, 0xda, 0x17, 0x1c, 0xd0, 0x88, 0xba, 0x02, 0x46, 0x83, 0x32, 0x84, 0x77, + 0xeb, 0x51, 0x64, 0x79, 0x9f, 0x55, 0xd0, 0xed, 0x60, 0x01, 0xab, 0x59, 0x85, 0x5a, 0x4e, 0xe5, + 0xf5, 0xe4, 0x4a, 0x49, 0xec, 0x01, 0xb1, 0x7b, 0xbe, 0xe4, 0x5f, 0x5d, 0x17, 0x23, 0xf0, 0xc5, + 0x40, 0xc1, 0x15, 0x5d, 0x71, 0xa5, 0xab, 0xc1, 0x75, 0xae, 0x4d, 0x65, 0x14, 0x72, 0x5c, 0x06, + 0xe6, 0x4a, 0x7b, 0x5f, 0x4b, 0xf8, 0x98, 0x09, 0x5a, 0x77, 0x90, 0x2d, 0x66, 0x1f, 0x47, 0x99, + 0xfa, 0xce, 0xda, 0x8b, 0x61, 0x26, 0x6b, 0xe5, 0xc9, 0x15, 0x13, 0xaf, 0x94, 0xd5, 0xcf, 0x7a, + 0xf5, 0xd5, 0x9c, 0x68, 0xa8, 0xc6, 0xf0, 0x05, 0x14, 0x8e, 0xfb, 0x71, 0xc1, 0x48, 0xd2, 0x9b, + 0x20, 0x62, 0xb4, 0xc8, 0xf0, 0xef, 0x94, 0x9c, 0x03, 0xc5, 0x00, 0x5c, 0x32, 0x7d, 0x89, 0x59, + 0xcb, 0x78, 0x90, 0x73, 0x03, 0x98, 0x41, 0x77, 0xae, 0xa8, 0xb6, 0x32, 0xec, 0x39, 0xf6, 0x4f, + 0x52, 0x8d, 0xcf, 0xf8, 0xf9, 0x8d, 0x24, 0x54, 0xf2, 0xca, 0x13, 0xfc, 0x3d, 0xc8, 0x51, 0x1b, + 0x06, 0x18, 0x08, 0x44, 0xdb, 0xb1, 0x50, 0xb8, 0x12, 0x29, 0x61, 0xfe, 0x2e, 0xe2, 0x6e, 0xd6, + 0xe2, 0xfa, 0x78, 0x6f, 0x46, 0x72, 0x76, 0x12, 0x2d, 0x80, 0x7c, 0xdf, 0xcb, 0x29, 0x41, 0x24, + 0xcb, 0x0a, 0xfa, 0x94, 0x39, 0xae, 0x69, 0x0f, 0x44, 0xfe, 0x8f, 0x1b, 0xe0, 0x5e, 0x00, 0x48, + 0x16, 0x9a, 0x9f, 0xa6, 0x26, 0xcf, 0x6c, 0xd2, 0xbc, 0xbf, 0x4d, 0x66, 0x3c, 0xe6, 0x9f, 0xba, + 0x8c, 0x98, 0xd4, 0x01, 0x80, 0x5b, 0x7e, 0x06, 0xce, 0xc1, 0x8e, 0x06, 0x47, 0x3b, 0x07, 0xd7, + 0x38, 0xd4, 0xc2, 0xa6, 0x0a, 0xf6, 0x7f, 0x68, 0xeb, 0xe2, 0x56, 0x18, 0x3a, 0xf1, 0x65, 0x7a, + 0x42, 0x3b, 0xf1, 0x49, 0xd6, 0x34, 0xa3, 0xe3, 0x21, 0xcf, 0x77, 0x8f, 0x51, 0x3e, 0x17, 0xb9, + 0x6d, 0x28, 0x0a, 0x10, 0xfb, 0x41, 0x68, 0x3b, 0x79, 0xd1, 0x0e, 0x29, 0xfd, 0x31, 0x61, 0x94, + 0x16, 0xda, 0xfe, 0x68, 0x22, 0xbf, 0x8c, 0x0e, 0xd1, 0xba, 0xe7, 0x85, 0x52, 0x1b, 0x49, 0x08, + 0xa5, 0xf8, 0xed, 0xde, 0x56, 0x97, 0x1c, 0x1b, 0x7e, 0x5e, 0x92, 0xe1, 0x36, 0xfd, 0xef, 0xfa, + 0x8f, 0x08, 0x5c, 0x63, 0x37, 0x1c, 0x88, 0x62, 0xa1, 0xcb, 0xcf, 0x4a, 0xfc, 0x12, 0x74, 0x0b, + 0x01, 0x4f, 0x2a, 0x60, 0x3e, 0x05, 0x29, 0x6b, 0x79, 0x12, 0xeb, 0xe4, 0xcb, 0xb2, 0x8e, 0xcb, + 0x56, 0x64, 0x80, 0x69, 0xfe, 0x9c, 0xe2, 0x2f, 0xfe, 0x6b, 0x14, 0x95, 0x62, 0xa4, 0x6f, 0xc2, + 0x28, 0x89, 0x6c, 0x90, 0x1b, 0xa2, 0xaf, 0x13, 0x92, 0xde, 0xab, 0x8a, 0x4a, 0x16, 0x5e, 0x80, + 0xf0, 0x02, 0x0d, 0x29, 0x5a, 0xc9, 0xd8, 0xf3, 0x71, 0xc2, 0x2c, 0x33, 0x64, 0xee, 0x57, 0xd4, + 0x04, 0x9c, 0x3a, 0x8c, 0xa6, 0xc0, 0x14, 0xea, 0x6a, 0x70, 0x90, 0x68, 0x27, 0xce, 0xd0, 0x2b, + 0x44, 0x92, 0xfc, 0x16, 0xa6, 0x46, 0x31, 0x8b, 0x22, 0x6a, 0x6e, 0x35, 0x6b, 0xdf, 0x2a, 0x1b, + 0x80, 0xef, 0x18, 0x44, 0x70, 0xe2, 0xd9, 0x3b, 0xdd, 0x35, 0x79, 0xfd, 0xc4, 0xef, 0x6c, 0x66, + 0xbe, 0x22, 0xcf, 0xdc, 0xc6, 0xb8, 0x9e, 0x8d, 0xfe, 0xc7, 0x20, 0xfe, 0x8c, 0x8f, 0xc5, 0x27, + 0xc9, 0xd3, 0x90, 0x60, 0x16, 0x0e, 0x56, 0x19, 0x82, 0x93, 0x62, 0x54, 0x9a, 0x33, 0x3f, 0xf9, + 0x7c, 0x24, 0xee, 0x39, 0x67, 0x7a, 0x93, 0x92, 0x7a, 0xee, 0xc0, 0x82, 0x28, 0x16, 0xb6, 0x82, + 0x2e, 0xad, 0xa9, 0x53, 0x64, 0xa3, 0x80, 0x2e, 0xe0, 0xab, 0xe4, 0xb8, 0x13, 0x92, 0x81, 0xf1, + 0x5e, 0x5f, 0x1f, 0x74, 0xd2, 0x5b, 0xa5, 0x74, 0xf5, 0x0a, 0xe2, 0x8f, 0x4a, 0x1f, 0x8e, 0xbf, + 0x9e, 0x5d, 0x03, 0xdb, 0xb9, 0x93, 0x6e, 0x39, 0xa0, 0x45, 0x8b, 0xa6, 0xab, 0xcd, 0xd2, 0x1d, + 0x49, 0x4f, 0x81, 0xa4, 0x75, 0x31, 0x5c, 0xb6, 0x61, 0x14, 0xb0, 0xbb, 0xc4, 0x33, 0x65, 0xdd, + 0x67, 0x50, 0x6e, 0xea, 0xdd, 0x99, 0x21, 0xf3, 0x04, 0x04, 0x2b, 0x40, 0xd9, 0xfd, 0x2b, 0x48, + 0x05, 0x84, 0x6b, 0x6d, 0xb5, 0x86, 0x21, 0xb8, 0x62, 0x5b, 0xc7, 0xb6, 0xa5, 0xa6, 0x50, 0xd1, + 0x6e, 0x98, 0x96, 0x6d, 0x29, 0xec, 0x4a, 0x2b, 0xd0, 0x32, 0x58, 0x25, 0x81, 0xb6, 0x63, 0x07, + 0x4c, 0xc1, 0x33, 0x86, 0x68, 0x93, 0x28, 0x6e, 0x1f, 0x23, 0x2f, 0x6f, 0xe9, 0x5b, 0xce, 0x4c, + 0xf6, 0x9f, 0x26, 0x50, 0x62, 0xc4, 0xfe, 0xf9, 0x01, 0x71, 0x66, 0x1b, 0xfe, 0x98, 0xe4, 0x9e, + 0xb8, 0x1f, 0x8d, 0x01, 0x50, 0x92, 0x6f, 0x18, 0x97, 0x73, 0xf1, 0x86, 0xfd, 0x6c, 0x2c, 0x4f, + 0x45, 0x61, 0xd5, 0xa6, 0x08, 0x31, 0x95, 0x2a, 0xeb, 0x91, 0x77, 0xff, 0xa6, 0x72, 0x3a, 0xc9, + 0xde, 0x22, 0xd6, 0x1a, 0x1c, 0xd4, 0x25, 0xfa, 0xae, 0xc6, 0x38, 0x29, 0x27, 0xb2, 0xa4, 0x1d, + 0xa1, 0x43, 0xc6, 0x94, 0xf7, 0xfc, 0x90, 0x74, 0xca, 0xfb, 0x9a, 0x79, 0xc1, 0xe1, 0x1d, 0xe4, + 0x20, 0x02, 0xc9, 0xab, 0xb4, 0x7a, 0x58, 0x66, 0xb8, 0x08, 0x62, 0x7e, 0xc9, 0xa4, 0x82, 0x8a, + 0x1c, 0x56, 0xa1, 0x92, 0x72, 0x14, 0x4e, 0xf2, 0x17, 0x29, 0x58, 0x76, 0xc8, 0x28, 0x9b, 0xba, + 0x60, 0x39, 0x7c, 0x17, 0x60, 0x1e, 0x8f, 0x14, 0xe8, 0xa6, 0xe1, 0x27, 0x95, 0xbf, 0x1d, 0x8a, + 0xfd, 0x3f, 0xd0, 0x65, 0x57, 0xa7, 0x79, 0x75, 0xaf, 0x0a, 0x4b, 0x92, 0xe7, 0x44, 0xd7, 0x08, + 0xd8, 0x49, 0x89, 0x11, 0x6a, 0xf1, 0xca, 0x6d, 0xe6, 0xf7, 0xea, 0xc4, 0xce, 0x45, 0xc7, 0xf2, + 0x34, 0xaf, 0x3c, 0xc0, 0xc3, 0xa8, 0x6c, 0x2d, 0x98, 0xb3, 0x73, 0x2d, 0x7f, 0xff, 0x22, 0x9c, + 0x58, 0x26, 0x2c, 0x85, 0xe1, 0xce, 0x4a, 0xe0, 0x02, 0x1a, 0xf5, 0x00, 0xf3, 0x23, 0xbe, 0xa7, + 0xbc, 0x24, 0xf1, 0xfc, 0x7e, 0xb0, 0x7b, 0x21, 0x54, 0x1c, 0x7d, 0xf2, 0x86, 0xb8, 0x96, 0xbc, + 0x79, 0xfd, 0xfa, 0xf5, 0x3e, 0x78, 0xfe, 0xb5, 0xdd, 0x27, 0x34, 0x24, 0x3c, 0x8a, 0xb4, 0x35, + 0x84, 0x58, 0xd2, 0x4d, 0x1f, 0x5f, 0xfe, 0x6b, 0x53, 0x81, 0xa8, 0x19, 0x22, 0xf5, 0xa8, 0x40, + 0x11, 0x71, 0x25, 0xa8, 0xfb, 0xae, 0xbb, 0xf0, 0xd8, 0x87, 0x13, 0x6e, 0x6c, 0xd6, 0xb1, 0x96, + 0x4a, 0x4a, 0xdf, 0xa6, 0x31, 0xb9, 0x02, 0xa7, 0xef, 0xd9, 0xbf, 0x5d, 0xc2, 0x0c, 0xb7, 0x43, + 0x4d, 0x19, 0x92, 0x26, 0x77, 0xbd, 0x24, 0xf4, 0xd7, 0xeb, 0x18, 0xac, 0x23, 0xbb, 0x43, 0x68, + 0xe3, 0x79, 0x2e, 0x67, 0x42, 0xfa, 0xab, 0x4c, 0x48, 0xf2, 0xfb, 0x24, 0xb3, 0xe6, 0xfd, 0xce, + 0x51, 0x9b, 0xc5, 0x6c, 0xac, 0x83, 0x7b, 0x48, 0xcb, 0x1c, 0x03, 0x04, 0x52, 0xdd, 0x84, 0xb9, + 0xa3, 0xa7, 0x9f, 0xaf, 0xb8, 0x3d, 0xda, 0x81, 0xae, 0x8d, 0xf4, 0x43, 0xe3, 0x55, 0xbb, 0x41, + 0xdc, 0x96, 0xbe, 0x1e, 0x69, 0xdf, 0xc2, 0x0d, 0xc1, 0x30, 0x1b, 0xe0, 0xcf, 0x22, 0x48, 0xb5, + 0x9a, 0xb9, 0x23, 0xea, 0x14, 0xb9, 0x08, 0x19, 0x50, 0x8c, 0xd7, 0xc4, 0xae, 0xb5, 0x49, 0xaf, + 0xe1, 0x8a, 0x77, 0x2c, 0x00, 0x4c, 0xfc, 0x0f, 0x14, 0xa0, 0x5a, 0xe1, 0xe6, 0x47, 0x7a, 0x14, + 0xa3, 0xd1, 0xe4, 0x0c, 0x7a, 0x7f, 0x1c, 0xab, 0xac, 0x67, 0xe6, 0xc7, 0xd8, 0x86, 0xfc, 0xec, + 0xfb, 0xe8, 0xad, 0x42, 0x1d, 0x64, 0xea, 0xbc, 0x11, 0x7a, 0x9e, 0x65, 0x9c, 0x1b, 0xe0, 0xc7, + 0xe1, 0xa3, 0x73, 0x8a, 0x61, 0xac, 0x08, 0x6c, 0x04, 0x65, 0xca, 0x7d, 0x14, 0x57, 0x77, 0x39, + 0x90, 0x90, 0x4b, 0x00, 0x1b, 0xee, 0xa2, 0x28, 0x77, 0x4b, 0x69, 0x97, 0x74, 0x0c, 0x9b, 0xda, + 0x0e, 0x06, 0x31, 0x49, 0x1c, 0x0d, 0x0e, 0x88, 0x34, 0x16, 0x86, 0x6b, 0x3a, 0x9c, 0x22, 0x0b, + 0xfe, 0x59, 0x12, 0xaa, 0x29, 0xe0, 0x77, 0xbc, 0x96, 0xda, 0x27, 0x30, 0x21, 0xa3, 0xc3, 0x02, + 0xc2, 0x23, 0x07, 0x75, 0x76, 0x9b, 0xe4, 0xb9, 0x75, 0x42, 0xb0, 0x8b, 0xd6, 0x0a, 0xc8, 0xdf, + 0xf7, 0x8a, 0x59, 0x75, 0x2c, 0xe5, 0xb3, 0xfe, 0xaa, 0x10, 0x30, 0x70, 0xbc, 0xf1, 0x75, 0x84, + 0x2c, 0x27, 0xc8, 0x0f, 0xac, 0x66, 0x1d, 0xd4, 0x11, 0x3e, 0x07, 0x1b, 0x9b, 0x11, 0x99, 0x54, + 0xf9, 0xe8, 0x63, 0xc6, 0x44, 0x01, 0x17, 0xbb, 0x1e, 0x9b, 0x23, 0xaf, 0xee, 0xc7, 0x4c, 0x6b, + 0x86, 0xb4, 0xb5, 0x00, 0xd2, 0x55, 0xd3, 0x2d, 0xca, 0x4a, 0x1e, 0xa3, 0x8f, 0xdc, 0x0f, 0xee, + 0x55, 0x16, 0x49, 0x6d, 0x88, 0x29, 0x28, 0xdb, 0x07, 0xe8, 0x3f, 0x4f, 0x0e, 0x9f, 0xad, 0x00, + 0x04, 0xa7, 0x11, 0xb3, 0x41, 0x31, 0xe7, 0x33, 0x64, 0x11, 0x3e, 0x99, 0x97, 0x86, 0x81, 0x1c, + 0x90, 0xbd, 0xb0, 0xea, 0x58, 0x1f, 0x17, 0xa4, 0x4b, 0x4c, 0xc7, 0xd6, 0xd3, 0x26, 0x5b, 0x75, + 0xbb, 0x5e, 0x2f, 0x11, 0x23, 0x31, 0x4e, 0x93, 0x76, 0x7c, 0xc8, 0xcf, 0x42, 0xda, 0x9e, 0xf5, + 0x11, 0x89, 0x8e, 0x5c, 0x87, 0x6d, 0xdb, 0xe5, 0x29, 0xf5, 0x71, 0xa6, 0xc1, 0x55, 0x7b, 0xcf, + 0x61, 0x93, 0xd6, 0xe8, 0x1a, 0x39, 0xb3, 0xd3, 0x54, 0xda, 0x86, 0x89, 0x46, 0x12, 0x43, 0x20, + 0xe6, 0xd8, 0x9c, 0xca, 0xbe, 0x50, 0x41, 0xcd, 0xf1, 0x3a, 0x38, 0xdb, 0x93, 0xbf, 0xb2, 0xf5, + 0xe7, 0x1d, 0x75, 0x1a, 0x7e, 0x0c, 0x3a, 0x55, 0x0f, 0x3e, 0xaa, 0x57, 0xdf, 0x70, 0x5b, 0x2b, + 0x4c, 0xfe, 0xe6, 0xb5, 0x24, 0x29, 0xf9, 0x32, 0x02, 0x92, 0x06, 0x79, 0x47, 0xed, 0xa6, 0xc0, + 0x24, 0xec, 0xc1, 0xf0, 0xbb, 0x13, 0x08, 0x11, 0x8e, 0x6f, 0x60, 0x47, 0x61, 0xe9, 0xb4, 0xf8, + 0x16, 0x3b, 0x03, 0x9f, 0x96, 0x3d, 0x63, 0x94, 0x7d, 0xe7, 0x73, 0x6b, 0x7e, 0x0a, 0xfe, 0xba, + 0x08, 0x5b, 0x37, 0x38, 0x79, 0x45, 0x83, 0xbc, 0x59, 0x99, 0x4a, 0x6a, 0x33, 0x66, 0x33, 0x64, + 0x83, 0xc6, 0x53, 0xaf, 0x7c, 0x20, 0x79, 0xab, 0x76, 0xcf, 0x1d, 0xd0, 0x87, 0x35, 0xc0, 0x25, + 0x1d, 0x55, 0xf0, 0x9f, 0x96, 0x07, 0xf3, 0x6c, 0x90, 0x7e, 0x46, 0x50, 0x22, 0xaa, 0x00, 0xe7, + 0xdf, 0x68, 0xb6, 0x80, 0x0f, 0x28, 0x96, 0xf1, 0x1b, 0x93, 0x3a, 0x13, 0x89, 0x1e, 0xaf, 0x3e, + 0x8b, 0xcb, 0x92, 0x05, 0x06, 0x22, 0xcc, 0xe3, 0x4e, 0x26, 0xd1, 0xcd, 0x99, 0xa1, 0x8d, 0xed, + 0xa7, 0x61, 0x8f, 0x94, 0x19, 0xa8, 0xb9, 0x7c, 0x7d, 0x90, 0xb5, 0x82, 0x6c, 0x44, 0x26, 0x2e, + 0x61, 0x17, 0xee, 0x31, 0x8c, 0xc3, 0xe0, 0xfc, 0x80, 0x4e, 0xdd, 0x3d, 0x9a, 0xaa, 0x0d, 0x80, + 0xe7, 0x18, 0x38, 0x11, 0x7f, 0x12, 0xde, 0xcd, 0x76, 0xf8, 0xac, 0xfa, 0x22, 0x14, 0x4e, 0x55, + 0x28, 0x75, 0x9f, 0x91, 0x44, 0x3c, 0xd9, 0x99, 0x3b, 0x6b, 0x6f, 0x95, 0xf1, 0x78, 0xe9, 0x10, + 0x9e, 0x01, 0x1f, 0x37, 0x5a, 0xee, 0x4f, 0x1f, 0x05, 0xd0, 0x9a, 0x59, 0x4e, 0xc7, 0xbf, 0x21, + 0xe7, 0xfd, 0x11, 0x16, 0x2c, 0xfb, 0x4e, 0x9a, 0xda, 0x1c, 0xfe, 0xa0, 0xaf, 0x04, 0x22, 0xf3, + 0xdc, 0xa9, 0xea, 0xfe, 0x0d, 0x75, 0xfe, 0x66, 0x73, 0xbc, 0xe3, 0x9d, 0xd6, 0x6f, 0xad, 0xad, + 0x70, 0x6d, 0xb8, 0xac, 0xc2, 0x7d, 0x89, 0xc8, 0xb8, 0x72, 0xc9, 0x7b, 0x08, 0x84, 0x40, 0x30, + 0x47, 0xaa, 0xfe, 0xee, 0x93, 0xd6, 0xcc, 0xf0, 0xcb, 0xb1, 0xf7, 0x6b, 0x56, 0x6c, 0x90, 0x42, + 0x1c, 0xe9, 0xc4, 0x2e, 0xc1, 0xaf, 0x13, 0xb0, 0xdc, 0x3f, 0x98, 0x4b, 0x91, 0x65, 0x57, 0xcd, + 0x44, 0x7c, 0x03, 0x62, 0xd1, 0xdd, 0x47, 0x89, 0x71, 0x17, 0xda, 0x04, 0x07, 0xee, 0xa7, 0x3d, + 0x17, 0x65, 0xd0, 0x2e, 0xcc, 0x5c, 0xeb, 0x12, 0xe7, 0xad, 0x36, 0xd5, 0xb8, 0x4d, 0x01, 0x42, + 0x20, 0x5b, 0x10, 0x7b, 0x42, 0x31, 0x56, 0x57, 0xa6, 0xf2, 0x9d, 0x98, 0xc2, 0xb4, 0x0b, 0xcc, + 0xac, 0x51, 0x4f, 0x4a, 0xa8, 0x18, 0xe3, 0x54, 0xc9, 0x08, 0x4b, 0x4e, 0x55, 0x8a, 0x2a, 0x9c, + 0x7d, 0xbe, 0x04, 0x09, 0xd3, 0x7d, 0xc7, 0x1e, 0xf7, 0xaa, 0xef, 0x5b, 0xf5, 0x83, 0x6c, 0x42, + 0x29, 0x67, 0xe4, 0x84, 0xc3, 0x42, 0xa4, 0x40, 0x43, 0x4f, 0x37, 0x57, 0x28, 0x62, 0xbf, 0x28, + 0x80, 0xb2, 0x0d, 0x2a, 0x13, 0xa5, 0xd9, 0x3f, 0x48, 0x3e, 0xaa, 0xe4, 0x26, 0x49, 0xae, 0xdb, + 0x91, 0xdd, 0xa1, 0x5b, 0x0b, 0xd4, 0x0b, 0x2c, 0x48, 0x91, 0xf6, 0x04, 0xec, 0xec, 0x47, 0x3b, + 0x9f, 0x05, 0x59, 0x14, 0x27, 0xc6, 0x46, 0x01, 0xc6, 0x99, 0x38, 0x13, 0x62, 0x98, 0x10, 0xc6, + 0x80, 0xe8, 0x1b, 0x69, 0x16, 0x6a, 0xfa, 0xea, 0x46, 0x55, 0x50, 0x80, 0x30, 0x2a, 0xb5, 0x17, + 0x5a, 0x2b, 0xa5, 0xda, 0x92, 0x02, 0x44, 0xd3, 0x4d, 0xba, 0xdc, 0x12, 0x3e, 0xa2, 0x57, 0x6a, + 0xfa, 0xfc, 0xd1, 0x11, 0xfe, 0x2a, 0x54, 0x20, 0x10, 0x21, 0x4e, 0x9f, 0x0d, 0xde, 0x49, 0xd5, + 0x31, 0x23, 0x2c, 0x8d, 0x6e, 0xd2, 0x7c, 0x00, 0x35, 0x11, 0x0c, 0x74, 0x06, 0x19, 0x08, 0xec, + 0x7c, 0xee, 0x30, 0x7a, 0xae, 0x46, 0xa7, 0x24, 0x7a, 0x82, 0xff, 0xd9, 0x42, 0xd4, 0xaf, 0xf1, + 0x63, 0x86, 0x43, 0x9e, 0xe1, 0xbf, 0x9f, 0x57, 0x1c, 0x5c, 0xfd, 0x96, 0xee, 0xe3, 0x5e, 0xbf, + 0x5c, 0x39, 0x93, 0x04, 0x42, 0x49, 0x18, 0x53, 0x2a, 0x25, 0x70, 0xaf, 0xaf, 0x70, 0x4f, 0x93, + 0x50, 0x08, 0x56, 0xcb, 0x19, 0xb2, 0xa4, 0xc9, 0x18, 0x0c, 0xb2, 0x50, 0xb4, 0x79, 0x68, 0x7f, + 0x7d, 0x31, 0x7e, 0xf5, 0x34, 0xec, 0xaa, 0x6d, 0x37, 0x26, 0x88, 0x40, 0x31, 0xa3, 0x7b, 0xaa, + 0x42, 0x7c, 0xb7, 0xac, 0x77, 0xa2, 0xe5, 0x44, 0x6a, 0x9e, 0x02, 0x01, 0xdc, 0x5c, 0x95, 0xfb, + 0x8b, 0x0f, 0xca, 0x9c, 0xb6, 0x6a, 0x15, 0x10, 0x67, 0x44, 0x5c, 0x4a, 0x74, 0xdf, 0x09, 0x1c, + 0x7b, 0x5c, 0x73, 0xae, 0xcc, 0xdb, 0x14, 0xe9, 0x9b, 0x1f, 0x79, 0x78, 0x5c, 0x32, 0x96, 0x03, + 0x68, 0xbe, 0x7e, 0x25, 0x90, 0xd1, 0xb0, 0xa8, 0x98, 0x55, 0x84, 0x05, 0x91, 0x78, 0x3e, 0xa2, + 0x10, 0xc1, 0xa2, 0x74, 0x00, 0xc9, 0x41, 0x5a, 0xf9, 0x31, 0x27, 0x35, 0x75, 0x91, 0xba, 0x03, + 0xab, 0xe4, 0x43, 0x3b, 0x22, 0x5d, 0x9f, 0xe6, 0xb9, 0x95, 0xa6, 0x0b, 0x74, 0x5f, 0xe3, 0x26, + 0x38, 0x8c, 0xee, 0xd3, 0x64, 0x03, 0xf4, 0x28, 0x98, 0xdf, 0xcd, 0xe6, 0x90, 0x9a, 0xb8, 0xef, + 0x35, 0x2b, 0xbd, 0xf8, 0x2c, 0x5b, 0x1f, 0xf9, 0xd7, 0xf5, 0x47, 0x08, 0x65, 0x2e, 0x95, 0xf6, + 0xfc, 0x07, 0x8a, 0x4c, 0x6e, 0x04, 0xc0, 0x96, 0x27, 0x06, 0xd3, 0x79, 0x5f, 0x61, 0x7a, 0x2b, + 0x0f, 0xf5, 0x62, 0xaf, 0x59, 0x95, 0xfb, 0x4f, 0x75, 0xbc, 0x78, 0x42, 0x2b, 0xe0, 0x31, 0x17, + 0xab, 0xe1, 0x03, 0x61, 0x86, 0x08, 0xea, 0xeb, 0x6b, 0xd3, 0xe5, 0xc1, 0x63, 0xb2, 0xb3, 0xa8, + 0x25, 0x43, 0xc1, 0x7a, 0x2c, 0x46, 0x42, 0x04, 0xa1, 0x74, 0x3e, 0x2b, 0x3a, 0x50, 0x9b, 0x2d, + 0x0c, 0x02, 0xba, 0x2b, 0x38, 0x20, 0xe7, 0x3e, 0x5f, 0x3d, 0x30, 0xd7, 0x29, 0x33, 0x3a, 0xf0, + 0x35, 0x3a, 0xb9, 0xd5, 0xad, 0x18, 0x05, 0x45, 0xf8, 0x1a, 0x82, 0xd8, 0x3a, 0x23, 0x0d, 0x1e, + 0x48, 0xf2, 0x46, 0x53, 0xfb, 0x56, 0x51, 0x1e, 0xbc, 0xc6, 0x6a, 0xbc, 0xc7, 0x32, 0xe0, 0xe3, + 0x69, 0x99, 0x21, 0xfe, 0xf3, 0xd4, 0x3c, 0x70, 0x22, 0x29, 0x44, 0x45, 0x26, 0xbc, 0x45, 0x7b, + 0x52, 0x48, 0xe9, 0x79, 0x92, 0x5b, 0x10, 0x71, 0x76, 0xbd, 0x76, 0x3d, 0x33, 0xab, 0xc3, 0xd1, + 0xea, 0xef, 0x08, 0x11, 0xc5, 0x79, 0x1f, 0x11, 0x45, 0x27, 0x88, 0xe1, 0xc0, 0x60, 0x74, 0xf0, + 0xf1, 0x22, 0xf3, 0x02, 0x53, 0xb9, 0xef, 0x78, 0x87, 0x17, 0x4d, 0x3d, 0xcf, 0x1f, 0x84, 0x13, + 0x8f, 0xd5, 0x97, 0xb0, 0x89, 0x30, 0x41, 0x65, 0x9c, 0xf7, 0xda, 0x77, 0x82, 0xec, 0x82, 0x0b, + 0x1e, 0xd6, 0x5b, 0xf2, 0xbf, 0x59, 0xa6, 0x97, 0xe6, 0xb0, 0x37, 0x60, 0x83, 0xd0, 0x4e, 0x85, + 0xf1, 0xc0, 0x38, 0xa8, 0x71, 0xed, 0x8c, 0x28, 0x6d, 0x90, 0x4a, 0x46, 0x63, 0xb6, 0x0a, 0x60, + 0xa5, 0xca, 0x72, 0xda, 0x09, 0x32, 0xbe, 0xbf, 0xce, 0x00, 0xa6, 0x5a, 0x10, 0x5e, 0x00, 0xcf, + 0x87, 0xd3, 0xbd, 0xdc, 0xc6, 0xa9, 0xb7, 0xcc, 0xdc, 0x16, 0x7a, 0x82, 0x44, 0x63, 0x84, 0x56, + 0x80, 0xca, 0xfa, 0xf7, 0x29, 0x38, 0xd7, 0x9a, 0x75, 0x40, 0x25, 0x41, 0xc6, 0x01, 0xea, 0x4b, + 0x6a, 0x75, 0x76, 0x68, 0x85, 0x7c, 0x65, 0x94, 0x49, 0x2c, 0x59, 0xf8, 0xe3, 0xc4, 0x2e, 0xc3, + 0xe5, 0x10, 0xe3, 0xa9, 0xf5, 0x8a, 0xb6, 0x1b, 0x49, 0x42, 0x0f, 0xde, 0x74, 0x95, 0xfa, 0xe4, + 0xe0, 0x68, 0x87, 0xd3, 0x77, 0x70, 0x79, 0x00, 0xb0, 0x08, 0x79, 0x4d, 0x55, 0x6c, 0x1e, 0x72, + 0xa0, 0x32, 0x32, 0xf1, 0x8c, 0xdb, 0x10, 0xf6, 0x9d, 0x3b, 0x06, 0x62, 0xd4, 0xe8, 0xdf, 0x1d, + 0x17, 0x0c, 0x3f, 0x43, 0xda, 0x5d, 0x66, 0x1a, 0x7b, 0xc9, 0x1d, 0xdf, 0x78, 0x4e, 0x02, 0xe9, + 0x18, 0x78, 0x53, 0x65, 0xe4, 0x67, 0xcf, 0x34, 0x3b, 0x5e, 0xfb, 0xa2, 0xe1, 0xa6, 0xac, 0x03, + 0x86, 0xc6, 0xfe, 0x13, 0xad, 0xee, 0xfa, 0xfd, 0x61, 0x52, 0xcd, 0x70, 0x21, 0x5f, 0xee, 0x61, + 0xfd, 0xa4, 0x80, 0xbb, 0x62, 0xb5, 0xe1, 0x1d, 0x34, 0xdd, 0x61, 0x39, 0x81, 0x14, 0x7a, 0x98, + 0x1a, 0x1a, 0x51, 0xa1, 0x7c, 0xc0, 0xc1, 0x41, 0x16, 0xdc, 0xab, 0xf2, 0xe0, 0xa8, 0x23, 0x48, + 0x36, 0x41, 0x7a, 0xe0, 0x85, 0x27, 0x33, 0x04, 0x70, 0x6b, 0x66, 0x4c, 0xd7, 0x87, 0x48, 0x16, + 0x8e, 0x79, 0x06, 0x7a, 0x26, 0xc0, 0xab, 0x9c, 0x26, 0x48, 0x5d, 0x90, 0xf2, 0x4d, 0xab, 0x81, + 0xca, 0xba, 0xfc, 0xd8, 0xeb, 0x8a, 0xdc, 0xbd, 0x2a, 0x6d, 0xad, 0xcc, 0xdc, 0xbf, 0x79, 0xc9, + 0xd3, 0x6d, 0x22, 0x45, 0x88, 0xbc, 0xde, 0xb6, 0xc7, 0x78, 0xd1, 0xcd, 0xa9, 0x5e, 0xb5, 0xbc, + 0x62, 0xdd, 0x2c, 0x2b, 0xe9, 0xae, 0x04, 0xb6, 0xb6, 0x4b, 0x71, 0x4b, 0xd9, 0x65, 0x67, 0x3d, + 0xc7, 0x97, 0x58, 0x01, 0x6e, 0x21, 0xa2, 0x28, 0xc8, 0x05, 0x1e, 0x88, 0x3c, 0xae, 0x68, 0xf6, + 0xed, 0x75, 0xb0, 0x66, 0x47, 0x63, 0x16, 0xe6, 0x1f, 0xd6, 0x18, 0xb3, 0x09, 0x73, 0x42, 0xab, + 0xc5, 0xc4, 0x78, 0x50, 0x31, 0x85, 0x87, 0x06, 0xcf, 0x08, 0x01, 0x44, 0x29, 0xab, 0x68, 0x96, + 0x73, 0xd0, 0x2e, 0xdc, 0x3c, 0x46, 0x81, 0x87, 0x79, 0xcf, 0x0a, 0x85, 0xd4, 0x04, 0x03, 0x68, + 0x67, 0xbb, 0x8b, 0xbf, 0x20, 0xa3, 0x03, 0xf8, 0x96, 0xd3, 0x0e, 0xa2, 0xb6, 0x5e, 0xd9, 0x69, + 0xa3, 0x6d, 0xf6, 0xa2, 0xd7, 0xfa, 0x2a, 0xb2, 0xcb, 0x5d, 0x8e, 0x5e, 0x97, 0x94, 0xd1, 0x8e, + 0x1e, 0xb9, 0x67, 0xee, 0x02, 0x9f, 0x3f, 0x21, 0x47, 0xbe, 0x15, 0xda, 0x4e, 0x9d, 0x94, 0x2d, + 0xa5, 0x31, 0x6f, 0x60, 0xce, 0x88, 0x63, 0xce, 0xf3, 0x7e, 0x42, 0x54, 0x0c, 0x71, 0x1e, 0xd9, + 0x76, 0x2c, 0xfb, 0xe7, 0x50, 0xe7, 0x62, 0x18, 0x0b, 0x7e, 0x30, 0x06, 0xf5, 0x80, 0xcc, 0xb9, + 0xcb, 0xa6, 0xbb, 0x0c, 0x3b, 0x31, 0xed, 0x33, 0x42, 0x1d, 0x54, 0x85, 0xee, 0x09, 0xe8, 0x09, + 0xc4, 0xc4, 0x74, 0xef, 0x0f, 0xc4, 0x1f, 0x9d, 0x22, 0x4f, 0x8b, 0xb1, 0xdd, 0x27, 0xf6, 0x85, + 0xb2, 0xac, 0x00, 0x83, 0x90, 0x47, 0x8e, 0xc8, 0x55, 0x27, 0xfa, 0x16, 0x9b, 0x9b, 0xb7, 0x9b, + 0x0f, 0x70, 0x7c, 0xb6, 0x9d, 0xe9, 0x82, 0xcb, 0x77, 0xad, 0xfc, 0x2b, 0x90, 0xa5, 0xb4, 0x08, + 0xc5, 0xa5, 0x5b, 0x85, 0x3e, 0x95, 0x92, 0x95, 0x8b, 0xbe, 0x44, 0xd6, 0x31, 0x1b, 0x7b, 0x4c, + 0x57, 0x4a, 0xec, 0xef, 0xc0, 0x16, 0x24, 0x7c, 0x72, 0x99, 0xb4, 0x26, 0xc1, 0xd7, 0xf3, 0x61, + 0x6f, 0x60, 0x97, 0xf3, 0xeb, 0xe8, 0x2c, 0xef, 0x91, 0xc0, 0xcd, 0x35, 0x45, 0x75, 0xa3, 0x04, + 0x0c, 0xe1, 0x27, 0x76, 0xfb, 0xa4, 0xee, 0xf1, 0x56, 0x84, 0x54, 0x6d, 0x4c, 0x08, 0x67, 0x5c, + 0xb0, 0x79, 0x57, 0x72, 0x4e, 0x7a, 0x7f, 0x8a, 0x9b, 0x85, 0x7f, 0x92, 0xc3, 0x00, 0x29, 0x06, + 0x2e, 0xfa, 0x50, 0x4a, 0xb2, 0x79, 0x00, 0x07, 0x58, 0xdf, 0x7b, 0xdb, 0x63, 0xcd, 0x48, 0x4d, + 0x1e, 0xc1, 0x42, 0x35, 0x1f, 0xb8, 0xc1, 0xe7, 0xbd, 0x24, 0x78, 0xbb, 0x09, 0xc8, 0x96, 0x93, + 0x4f, 0xc9, 0xd3, 0xfb, 0xfb, 0x8e, 0x64, 0x47, 0x77, 0xb1, 0x29, 0xf2, 0xc7, 0xdc, 0xad, 0xd8, + 0xd9, 0xb9, 0xa5, 0xe2, 0xde, 0x4e, 0xcf, 0x1b, 0x70, 0xad, 0x4f, 0xa4, 0x05, 0xaf, 0x71, 0xf5, + 0xc8, 0x5d, 0x4e, 0x70, 0x10, 0xfc, 0x58, 0xb4, 0x34, 0xde, 0x6d, 0xc7, 0x89, 0xa3, 0x79, 0x85, + 0x19, 0xb9, 0xfe, 0xb8, 0x35, 0x65, 0x28, 0x03, 0x7c, 0x8c, 0xb4, 0x58, 0x1b, 0x88, 0x16, 0x48, + 0xe2, 0x23, 0x08, 0x17, 0x67, 0x0d, 0xb0, 0x2d, 0x6f, 0x5c, 0x5c, 0x21, 0xb8, 0xb8, 0xd6, 0xd3, + 0x0e, 0x60, 0xb2, 0x7f, 0x6b, 0x65, 0x4c, 0x90, 0x67, 0x77, 0xbd, 0x83, 0x62, 0xb0, 0xf9, 0x1f, + 0x0e, 0x9c, 0x09, 0x1d, 0xaf, 0x54, 0xd3, 0x15, 0x8c, 0x55, 0xb9, 0x89, 0xb4, 0x04, 0xfd, 0x52, + 0xb5, 0xbc, 0xa1, 0x84, 0x26, 0x63, 0xbe, 0xd0, 0xa0, 0x3c, 0xfa, 0x24, 0xcb, 0x56, 0x00, 0x52, + 0x6a, 0x9b, 0x7c, 0x9a, 0xd9, 0xf0, 0x7b, 0xb2, 0xb1, 0x62, 0xa2, 0x0d, 0x82, 0x07, 0xf2, 0xce, + 0xf7, 0xa5, 0x04, 0xc7, 0x9e, 0x32, 0x16, 0x18, 0xf7, 0x87, 0x8e, 0xb7, 0x02, 0x01, 0x27, 0x8d, + 0x7a, 0x3d, 0x89, 0xdf, 0x81, 0x90, 0x53, 0x99, 0xba, 0xe5, 0x37, 0xe6, 0x79, 0x1b, 0xfd, 0xb4, + 0x1d, 0x79, 0x79, 0x0f, 0xc1, 0x6b, 0xbf, 0xb2, 0x9e, 0xc9, 0xc5, 0xee, 0x23, 0xad, 0xfe, 0x96, + 0x77, 0xaf, 0x04, 0x9d, 0xba, 0xd6, 0x6d, 0x23, 0x88, 0x76, 0x01, 0x88, 0x9e, 0x20, 0xfe, 0xd7, + 0x1f, 0x60, 0xdc, 0xd0, 0xa4, 0x72, 0x7e, 0x50, 0x5a, 0xbb, 0xbb, 0x49, 0xa5, 0x41, 0xbc, 0x47, + 0x9e, 0x81, 0x0e, 0x85, 0x21, 0xea, 0x68, 0xd0, 0xbc, 0xb2, 0x0f, 0x9e, 0x7c, 0xa6, 0x73, 0x49, + 0xd0, 0xb2, 0x02, 0x5d, 0x69, 0x53, 0xcd, 0x91, 0xa2, 0xc4, 0x37, 0xf5, 0x9e, 0x0d, 0x25, 0x67, + 0x2e, 0xd2, 0x92, 0xea, 0xcf, 0x9b, 0x1e, 0x98, 0xe0, 0x73, 0x81, 0xbc, 0xd0, 0xee, 0xae, 0x75, + 0x9a, 0xe2, 0x26, 0xaa, 0x2b, 0xb2, 0x80, 0xe3, 0xd0, 0x82, 0xcd, 0x6a, 0x28, 0x44, 0xa4, 0x40, + 0x55, 0xe4, 0x68, 0x7b, 0xb1, 0x9d, 0xee, 0x19, 0x68, 0x54, 0xd9, 0x3b, 0x9b, 0xbd, 0x5c, 0x14, + 0x75, 0x58, 0x43, 0x9d, 0x19, 0x37, 0x97, 0x67, 0xde, 0x7b, 0x8b, 0x5c, 0x2d, 0xab, 0x40, 0x22, + 0x8a, 0x31, 0x7e, 0x7c, 0x5d, 0xd7, 0x83, 0xb0, 0xc7, 0xf0, 0xbc, 0x61, 0xf2, 0xd6, 0xa2, 0x45, + 0x99, 0x8c, 0x28, 0xbf, 0x9a, 0xb1, 0xb7, 0x7c, 0xd0, 0x0f, 0x62, 0xa2, 0xa2, 0x1c, 0xfd, 0x0a, + 0xb2, 0x50, 0xeb, 0x9c, 0xef, 0x95, 0x26, 0x85, 0x44, 0x5d, 0x43, 0x2e, 0x5e, 0x0d, 0xec, 0x58, + 0x1b, 0x6f, 0xca, 0xd2, 0x6f, 0x0a, 0xe6, 0x56, 0x7d, 0x94, 0x1e, 0xc5, 0xa7, 0x3a, 0x76, 0xda, + 0x52, 0x57, 0x16, 0x8d, 0x23, 0x38, 0x2c, 0x23, 0x50, 0x91, 0x5b, 0x8f, 0x87, 0xf6, 0x30, 0x31, + 0xe0, 0x77, 0x71, 0x4c, 0xca, 0x7a, 0x97, 0x2b, 0x6c, 0x2f, 0x39, 0x81, 0x1b, 0x7b, 0x83, 0x45, + 0xf4, 0x85, 0x88, 0x06, 0x50, 0x3f, 0x4c, 0xd3, 0x1b, 0x86, 0x28, 0x53, 0x2d, 0xbd, 0x6b, 0x4c, + 0x19, 0x29, 0xf8, 0x7f, 0xca, 0x0d, 0xcf, 0xb3, 0x21, 0x72, 0x8c, 0xc1, 0x93, 0x44, 0xd6, 0xbf, + 0x18, 0x54, 0x2d, 0x85, 0x0d, 0x67, 0x3d, 0xab, 0x85, 0x33, 0x1f, 0xd5, 0x18, 0x16, 0x52, 0xe1, + 0xe1, 0x4c, 0x29, 0x54, 0x9f, 0xcb, 0x65, 0x77, 0x1d, 0xf6, 0xa6, 0x15, 0x6b, 0x9d, 0xf6, 0x7c, + 0xea, 0xf4, 0xb6, 0xf2, 0x5c, 0x11, 0x34, 0xed, 0x10, 0x4f, 0x88, 0x82, 0x5f, 0x6b, 0x3c, 0xf8, + 0x24, 0x01, 0x12, 0xd2, 0x42, 0x5d, 0xb3, 0x4f, 0xdd, 0xc1, 0x0a, 0x19, 0x49, 0x32, 0x3e, 0x75, + 0x2a, 0x61, 0x74, 0xd5, 0x18, 0x0a, 0x57, 0xd1, 0x61, 0x52, 0x2d, 0xc4, 0x6c, 0xd2, 0xb8, 0x51, + 0x7d, 0x88, 0xbc, 0x06, 0x68, 0xe7, 0x02, 0xfc, 0xda, 0xec, 0x62, 0x8b, 0x81, 0xa8, 0xd9, 0x27, + 0x46, 0x15, 0x79, 0x97, 0x3a, 0xc4, 0x89, 0x3e, 0x29, 0x2b, 0x60, 0x8b, 0xc8, 0x61, 0x76, 0x7c, + 0xab, 0x1b, 0xa5, 0x75, 0x38, 0x88, 0x18, 0xd2, 0x36, 0xa2, 0xe0, 0xdc, 0xfa, 0x09, 0x46, 0xf4, + 0xf1, 0x72, 0x23, 0x43, 0xd3, 0xcd, 0xbe, 0xd6, 0xd9, 0x27, 0x1d, 0x35, 0x03, 0x7d, 0x8d, 0x8e, + 0x35, 0xa9, 0x58, 0xc6, 0x2b, 0x25, 0x2d, 0xe7, 0xbd, 0x9e, 0x39, 0xa9, 0x94, 0xb4, 0xd7, 0x7a, + 0xf9, 0x7a, 0xef, 0x90, 0x94, 0xbf, 0x89, 0x79, 0x8b, 0xfe, 0x6e, 0x0b, 0x98, 0x83, 0x06, 0x61, + 0xc9, 0x26, 0xf9, 0xe9, 0x18, 0xbe, 0xa7, 0x4b, 0x3b, 0xb3, 0x02, 0xb9, 0xa3, 0x68, 0xbf, 0x71, + 0x8f, 0xf3, 0xb5, 0x3a, 0x73, 0x1a, 0xef, 0x8a, 0x73, 0xd2, 0xdd, 0x22, 0x86, 0x31, 0xfe, 0x2d, + 0x2c, 0xb2, 0x08, 0xab, 0x15, 0xbe, 0xb8, 0xa9, 0x35, 0x8c, 0xbf, 0xd1, 0x73, 0x11, 0x85, 0x25, + 0x0a, 0xab, 0xaa, 0x94, 0x16, 0x88, 0x21, 0x43, 0x1f, 0xa0, 0x9a, 0x43, 0x29, 0x4e, 0x53, 0xf8, + 0xbe, 0xe0, 0xcc, 0x10, 0x9d, 0x91, 0x4f, 0x9e, 0xda, 0x7a, 0xb9, 0x09, 0x69, 0x07, 0xc8, 0x6c, + 0xca, 0xaa, 0x5d, 0x10, 0x86, 0x37, 0xa3, 0x70, 0x89, 0x68, 0xec, 0x69, 0xcc, 0x92, 0x4f, 0x30, + 0x27, 0x42, 0x68, 0x02, 0x2c, 0xbc, 0xe0, 0xc8, 0x82, 0x9b, 0xc3, 0x4d, 0x63, 0x7a, 0xeb, 0x7b, + 0x54, 0x74, 0x2a, 0x5f, 0x16, 0x22, 0x00, 0x91, 0x38, 0xb6, 0x50, 0x3b, 0x3a, 0x76, 0x27, 0x2c, + 0x03, 0xb8, 0xe0, 0x9b, 0x77, 0x6f, 0x6d, 0x1a, 0x23, 0x22, 0x75, 0x2d, 0x2e, 0xb0, 0x0a, 0xe7, + 0x0e, 0x8d, 0xca, 0x8e, 0x42, 0x91, 0x30, 0x35, 0xae, 0x39, 0xee, 0xf7, 0x2e, 0xe9, 0xb6, 0x4a, + 0xae, 0xe1, 0x16, 0x38, 0x78, 0x2b, 0xdb, 0xc8, 0x88, 0xdb, 0x53, 0x4a, 0x39, 0x21, 0xd1, 0x67, + 0x50, 0x95, 0xa1, 0xe7, 0x60, 0x1f, 0xab, 0xab, 0xb4, 0x29, 0x4d, 0xd3, 0xae, 0xc7, 0xa2, 0x5b, + 0xa0, 0xf0, 0x03, 0x66, 0xcb, 0xf7, 0xeb, 0x6b, 0x4e, 0x50, 0x6f, 0xe3, 0xa6, 0xf6, 0xb4, 0x3d, + 0x8d, 0x1c, 0x1b, 0x83, 0xa5, 0xed, 0xc2, 0x2e, 0xc1, 0x22, 0x63, 0x03, 0x99, 0xc6, 0x72, 0xe5, + 0x7a, 0x3d, 0x06, 0xcf, 0x44, 0xf2, 0xb1, 0x36, 0xf8, 0x88, 0x31, 0x55, 0xf7, 0x23, 0xc1, 0x97, + 0x02, 0x67, 0xa3, 0xe7, 0x3c, 0x43, 0xb4, 0x4b, 0xc8, 0x76, 0xd8, 0xf2, 0x3a, 0x1d, 0x08, 0x68, + 0xfb, 0xaa, 0x10, 0x0d, 0x95, 0x85, 0x65, 0x6a, 0xbd, 0x90, 0x4c, 0x94, 0x93, 0x7b, 0x93, 0xc2, + 0x10, 0x2e, 0x67, 0x7d, 0x7f, 0xee, 0xde, 0x3a, 0x11, 0xf5, 0x9d, 0xcb, 0xac, 0x4f, 0xf8, 0x44, + 0x64, 0xe1, 0x80, 0xc2, 0x3e, 0x61, 0x3e, 0x7e, 0xed, 0xcf, 0x8c, 0xe8, 0xac, 0x87, 0x78, 0x78, + 0xdf, 0xd0, 0xe3, 0xd0, 0x46, 0xeb, 0x69, 0xa2, 0x8a, 0xa6, 0x29, 0x72, 0xbb, 0x5c, 0xca, 0x98, + 0xf4, 0xe5, 0x54, 0xd4, 0x5d, 0xcd, 0xb2, 0x8f, 0x63, 0xa2, 0x96, 0xbe, 0x3f, 0x87, 0x78, 0xf3, + 0x98, 0x49, 0x7e, 0x0e, 0x8f, 0xb8, 0x96, 0x64, 0xe3, 0x57, 0xd9, 0x03, 0xa4, 0x23, 0x76, 0x45, + 0xbf, 0x96, 0x66, 0x23, 0xb0, 0xa1, 0xe6, 0xea, 0xe3, 0x60, 0x1f, 0x2a, 0x5c, 0x9e, 0x96, 0xe6, + 0xb5, 0xa1, 0x5d, 0x98, 0xc7, 0x67, 0x21, 0x69, 0x15, 0x81, 0x80, 0x68, 0x9d, 0x81, 0x3f, 0x90, + 0x83, 0x14, 0xac, 0x76, 0x48, 0x83, 0x64, 0x8f, 0x5e, 0x43, 0xee, 0xe8, 0x97, 0x0d, 0xd5, 0xc0, + 0x2f, 0xca, 0xf1, 0xad, 0xd8, 0x05, 0xc5, 0xac, 0x51, 0x71, 0xf7, 0x4d, 0xcd, 0x17, 0xb3, 0x7e, + 0x90, 0xc5, 0x2d, 0x1a, 0x39, 0x31, 0x7e, 0x19, 0x06, 0x66, 0xec, 0x63, 0x9d, 0x83, 0x24, 0xd4, + 0x38, 0x71, 0x4c, 0xae, 0xcd, 0x91, 0xc0, 0x97, 0x2c, 0xdd, 0xcb, 0x30, 0xfc, 0x54, 0x63, 0x11, + 0xa5, 0xe8, 0x1b, 0x0c, 0x9d, 0x61, 0xbf, 0xcc, 0x2b, 0xae, 0x8b, 0x16, 0x84, 0xb8, 0x9c, 0x78, + 0x11, 0x05, 0x17, 0xb2, 0xb6, 0x5f, 0x5f, 0xa1, 0xf2, 0x98, 0x92, 0x43, 0xd2, 0xfe, 0xc1, 0x04, + 0xcd, 0xe7, 0xac, 0x05, 0x8e, 0x31, 0x19, 0xfe, 0x4e, 0xd5, 0xd4, 0xd8, 0xb6, 0x9f, 0x8c, 0xc9, + 0xb3, 0x0a, 0x44, 0x26, 0x1c, 0xda, 0xe7, 0x1a, 0x31, 0xc1, 0x12, 0x94, 0x87, 0x94, 0x22, 0x6b, + 0xfc, 0x8c, 0x90, 0x46, 0x3d, 0x78, 0xf2, 0xd7, 0x7f, 0xd0, 0xcc, 0x5c, 0x10, 0x86, 0x55, 0x9f, + 0xb2, 0xba, 0x5b, 0xea, 0xac, 0x57, 0x47, 0x7d, 0xd8, 0x66, 0xdd, 0xc4, 0x6f, 0xe6, 0x53, 0xda, + 0x46, 0xa5, 0x48, 0x90, 0x78, 0xf9, 0xd9, 0x71, 0x06, 0x80, 0xd1, 0xe4, 0x2c, 0xbc, 0x29, 0xe5, + 0x7b, 0xaf, 0x31, 0xa6, 0x43, 0x3e, 0x41, 0xc0, 0xb7, 0x50, 0x3f, 0xc7, 0xa5, 0x46, 0xcc, 0xdc, + 0x71, 0xe2, 0x7e, 0xd7, 0xa9, 0xd3, 0x94, 0xea, 0x51, 0xd8, 0x70, 0xba, 0xdb, 0xac, 0xbd, 0xe7, + 0xbc, 0xb6, 0x37, 0xdd, 0xfe, 0x23, 0x77, 0x59, 0xa8, 0x69, 0xf1, 0xd5, 0xdb, 0x7d, 0x95, 0x29, + 0x22, 0x91, 0x66, 0x3a, 0x2f, 0xd5, 0x92, 0xc5, 0x13, 0x9f, 0xf9, 0x98, 0xa1, 0x1a, 0x9e, 0xe8, + 0xff, 0x3c, 0x18, 0x2b, 0xf6, 0x0c, 0xef, 0x5b, 0x67, 0x56, 0x9f, 0xc5, 0x2a, 0x31, 0xd3, 0x65, + 0x51, 0xb2, 0xf5, 0x60, 0x95, 0x64, 0xca, 0xa2, 0x52, 0xd4, 0xf3, 0x76, 0x16, 0x5a, 0x93, 0x1e, + 0x83, 0x50, 0x25, 0xc0, 0x3a, 0xf1, 0x00, 0x98, 0x12, 0x50, 0x2e, 0x1c, 0x85, 0xe6, 0x9c, 0x5e, + 0x74, 0xc5, 0x68, 0x21, 0x03, 0x83, 0x8b, 0x21, 0xf9, 0xcc, 0x42, 0x82, 0x60, 0x63, 0x8d, 0xac, + 0x09, 0xf7, 0x78, 0x37, 0x3a, 0xa4, 0x84, 0x5a, 0xeb, 0x0a, 0xbe, 0x50, 0x48, 0xb9, 0x1a, 0xf2, + 0xd7, 0xc2, 0x8f, 0x01, 0xaf, 0x92, 0xae, 0x7a, 0x5f, 0x58, 0xa5, 0xc4, 0xb8, 0xbc, 0xac, 0x57, + 0x5b, 0x5a, 0xbf, 0xed, 0x0e, 0xe3, 0x45, 0xab, 0x0f, 0x62, 0x9e, 0x00, 0xa4, 0xca, 0x9b, 0x7e, + 0xee, 0x22, 0x69, 0x03, 0x14, 0xe5, 0xfb, 0x50, 0x0b, 0xcb, 0x41, 0xdc, 0xdc, 0x17, 0xe5, 0xfe, + 0x54, 0xf5, 0x12, 0x6f, 0x02, 0xd2, 0x5c, 0x23, 0x4a, 0x4f, 0x99, 0x89, 0xe3, 0xab, 0x3b, 0x25, + 0xa4, 0xef, 0x0f, 0x04, 0x00, 0x15, 0x74, 0x31, 0xf0, 0xaf, 0xb4, 0x24, 0x0e, 0xe8, 0x5e, 0xc9, + 0x0c, 0x11, 0x36, 0x9f, 0x92, 0x4c, 0x71, 0xa8, 0xe9, 0x5e, 0x71, 0x1a, 0x54, 0xf3, 0xf7, 0x25, + 0x73, 0x67, 0x25, 0xb4, 0x9f, 0x58, 0x7b, 0x04, 0x69, 0xe1, 0x08, 0xda, 0x9a, 0x4c, 0x20, 0x27, + 0xcd, 0xb8, 0xfd, 0x24, 0xd0, 0x2b, 0x68, 0x04, 0x6c, 0xa6, 0x13, 0xe9, 0x6b, 0xc7, 0x4b, 0x95, + 0x40, 0x6b, 0x22, 0x05, 0xc3, 0xa5, 0x3c, 0xa1, 0x9a, 0x4f, 0xfb, 0x92, 0xcf, 0x53, 0x40, 0xb0, + 0x63, 0x5e, 0xbc, 0x33, 0xa3, 0x86, 0xbf, 0x90, 0xc6, 0xfa, 0x5a, 0x03, 0xdf, 0x83, 0xb5, 0x20, + 0x40, 0x4f, 0x45, 0x90, 0xd0, 0x9b, 0x8d, 0x6f, 0x07, 0x9b, 0x17, 0xe2, 0xd5, 0x6a, 0xf8, 0x35, + 0xdd, 0xc1, 0x92, 0x56, 0x00, 0x49, 0xbb, 0x83, 0xf8, 0x81, 0xb6, 0x36, 0x82, 0x46, 0x49, 0xf1, + 0x54, 0x21, 0x0a, 0x47, 0x0e, 0xfa, 0x75, 0x72, 0xcf, 0x74, 0x9f, 0x08, 0x3c, 0x4b, 0x07, 0xe6, + 0x95, 0x86, 0x24, 0xc8, 0x6f, 0xd2, 0x1b, 0x77, 0x45, 0x74, 0x8c, 0xf9, 0x06, 0x8a, 0xdc, 0x11, + 0xdf, 0xc5, 0xb5, 0x2f, 0x28, 0xfa, 0x2d, 0xac, 0x36, 0xc5, 0xd2, 0xa8, 0x52, 0x00, 0xf1, 0x72, + 0xf6, 0x18, 0xba, 0xb1, 0x04, 0xe4, 0xd3, 0x72, 0xb7, 0xea, 0xe1, 0x25, 0x29, 0x1a, 0x8a, 0x72, + 0x75, 0x5c, 0x9b, 0xa2, 0xe7, 0x60, 0x7b, 0x68, 0x09, 0x41, 0x48, 0xde, 0x19, 0x74, 0xc4, 0xdb, + 0x60, 0xf9, 0x5a, 0xa7, 0xb1, 0xf0, 0x75, 0xcf, 0x1b, 0x03, 0x1e, 0x04, 0xbc, 0x0e, 0xec, 0x20, + 0xb1, 0xbd, 0x1e, 0xf0, 0xc0, 0xca, 0x82, 0x57, 0x0e, 0x70, 0x13, 0xdc, 0xc2, 0xfa, 0xd1, 0x69, + 0x76, 0x89, 0x9d, 0xbd, 0x8c, 0x23, 0x2c, 0xaa, 0x58, 0x58, 0x20, 0xc9, 0x6c, 0xc0, 0xac, 0xde, + 0x26, 0x0e, 0x77, 0x8f, 0xdf, 0x97, 0x3b, 0x1b, 0x23, 0x5d, 0x3e, 0xe8, 0xef, 0xb3, 0x78, 0x53, + 0x9b, 0x7f, 0x1b, 0xb7, 0x31, 0x1d, 0xe3, 0xba, 0xc0, 0x0b, 0x31, 0x48, 0x88, 0x6d, 0x7f, 0xe1, + 0xfc, 0x12, 0xd9, 0xb8, 0x7e, 0x44, 0xa8, 0x8f, 0x75, 0xd9, 0x89, 0x53, 0x4f, 0xa3, 0x98, 0x8b, + 0x6d, 0x9c, 0xfe, 0x81, 0xb9, 0x30, 0x83, 0xf9, 0x9d, 0x85, 0x73, 0xa2, 0x76, 0x82, 0x48, 0x33, + 0x4c, 0xdd, 0x0b, 0x39, 0xf3, 0x66, 0xf4, 0x7c, 0x39, 0x74, 0x62, 0xa2, 0x1b, 0x2b, 0xfc, 0xeb, + 0x97, 0xc2, 0x2f, 0x07, 0xca, 0xb3, 0x59, 0x41, 0xef, 0x3e, 0xf6, 0x24, 0x6a, 0x4d, 0x67, 0x6f, + 0x31, 0x86, 0xb1, 0xc6, 0x17, 0x06, 0xa7, 0x4f, 0x62, 0xa4, 0x79, 0x1c, 0xd2, 0x55, 0xfe, 0xac, + 0xfb, 0x91, 0xb2, 0x04, 0x4a, 0x16, 0x4f, 0x37, 0xd0, 0xa4, 0x51, 0x1f, 0x9c, 0x08, 0x25, 0x16, + 0x54, 0x39, 0xf3, 0x5e, 0xbc, 0x4f, 0xc1, 0x9b, 0x06, 0x1c, 0x19, 0xc9, 0x2f, 0x9c, 0x30, 0x47, + 0xdb, 0xf9, 0x13, 0xf9, 0xb9, 0x7f, 0x13, 0x75, 0xc3, 0xb3, 0xc5, 0xd4, 0x7c, 0x8e, 0x0d, 0xf3, + 0x34, 0x03, 0x5a, 0x62, 0x24, 0x59, 0xed, 0x02, 0x56, 0x94, 0x5e, 0x0e, 0x80, 0xe0, 0x2d, 0xd1, + 0x92, 0xc1, 0xb9, 0x18, 0x80, 0x01, 0x45, 0x15, 0x84, 0x81, 0x84, 0x9f, 0x19, 0x1e, 0xf6, 0xc4, + 0xb1, 0x98, 0x21, 0x57, 0xba, 0x18, 0x9f, 0x0e, 0x13, 0x8e, 0x8d, 0xe9, 0xd6, 0x05, 0x93, 0x4d, + 0xf9, 0x80, 0x07, 0x2b, 0x48, 0x22, 0x4d, 0x51, 0xa2, 0xfe, 0x80, 0x3e, 0x80, 0x18, 0xd6, 0x20, + 0x79, 0xae, 0x5b, 0x48, 0x8a, 0x61, 0x2a, 0x22, 0xf8, 0x64, 0xa9, 0xb4, 0xe4, 0x40, 0x0d, 0x4a, + 0xcf, 0x37, 0xd2, 0x9c, 0xc7, 0xbe, 0xde, 0x59, 0xe6, 0x7a, 0xf7, 0x13, 0x3f, 0x83, 0xc0, 0x17, + 0xe8, 0x0b, 0xc3, 0x5d, 0x5f, 0xda, 0xec, 0xb7, 0x57, 0x1f, 0x87, 0xed, 0x4c, 0xc1, 0x3d, 0xed, + 0xcd, 0x26, 0xcd, 0x25, 0x07, 0x8c, 0x66, 0x46, 0x0b, 0x22, 0x98, 0x9b, 0x71, 0x63, 0xff, 0x05, + 0x6b, 0x69, 0x72, 0xdb, 0x76, 0x55, 0xb2, 0x49, 0xd5, 0xdc, 0xe6, 0xaf, 0xa5, 0xed, 0x6a, 0x8a, + 0x6c, 0x8e, 0x5b, 0xa0, 0x34, 0xa0, 0x95, 0xf4, 0xae, 0x06, 0x47, 0x4c, 0x5b, 0x61, 0x65, 0x7d, + 0x46, 0xb7, 0x05, 0x57, 0xa5, 0xe2, 0x3b, 0xb0, 0x6b, 0x4f, 0x54, 0xae, 0xec, 0x25, 0x01, 0xca, + 0x94, 0x10, 0x19, 0x82, 0x13, 0x1b, 0x40, 0xed, 0x80, 0xa2, 0xee, 0xde, 0x77, 0xa0, 0x98, 0xaf, + 0x69, 0x05, 0xf4, 0x01, 0x28, 0x0f, 0x28, 0xf8, 0xf7, 0x40, 0xde, 0x2e, 0xba, 0x57, 0x7b, 0x86, + 0xff, 0xe7, 0x41, 0x76, 0xa2, 0x98, 0xf2, 0xaf, 0x85, 0x83, 0x12, 0x7e, 0x81, 0xd9, 0x8f, 0x24, + 0x9a, 0x54, 0xc0, 0x60, 0xc2, 0x5d, 0x8e, 0xc6, 0x91, 0x27, 0x89, 0xb8, 0xda, 0x48, 0xc3, 0x99, + 0xfa, 0xeb, 0x1f, 0xde, 0xff, 0x9d, 0x5f, 0x89, 0x4d, 0x1c, 0xe7, 0xdc, 0xac, 0x31, 0x27, 0x17, + 0xa0, 0x0a, 0x8f, 0x7c, 0xa0, 0x4c, 0x3a, 0xca, 0x60, 0x63, 0x39, 0x75, 0x21, 0x44, 0xb9, 0x46, + 0x8b, 0x68, 0x13, 0x50, 0xf8, 0x60, 0x5d, 0xdb, 0x3a, 0xf3, 0xe9, 0x8b, 0xed, 0x7b, 0x8b, 0x9c, + 0x1a, 0x74, 0x0c, 0xee, 0x43, 0x25, 0x2f, 0x31, 0xce, 0xd0, 0xa2, 0xd6, 0x39, 0x83, 0x90, 0x6c, + 0x20, 0xf8, 0xcc, 0xfe, 0xa6, 0x9a, 0x61, 0x35, 0x9e, 0x4d, 0x0b, 0x4b, 0xc8, 0x41, 0xbd, 0x38, + 0x96, 0xfc, 0x4d, 0xaf, 0xc7, 0x4b, 0xda, 0x28, 0x22, 0x3c, 0x0d, 0x09, 0x77, 0x95, 0x1c, 0x53, + 0x96, 0x38, 0x94, 0x0c, 0x3c, 0x79, 0xfc, 0xf5, 0xa3, 0x31, 0xf8, 0xf0, 0xae, 0xa8, 0xee, 0x1a, + 0x09, 0x8f, 0x84, 0x8b, 0x23, 0x0f, 0xcc, 0x36, 0x20, 0xa0, 0xe9, 0x16, 0x0a, 0x56, 0x2c, 0xdf, + 0x17, 0xc2, 0xc5, 0x9d, 0xb7, 0xbf, 0x98, 0xce, 0x2c, 0x29, 0xb0, 0xf6, 0xa1, 0x05, 0xf2, 0x0f, + 0x9e, 0x68, 0xe0, 0x18, 0xba, 0xf3, 0x23, 0x8d, 0x19, 0xf5, 0x23, 0xf5, 0xb4, 0x81, 0xb0, 0x11, + 0x34, 0x10, 0xe7, 0xb7, 0xff, 0xdd, 0xe7, 0x5c, 0x1a, 0x85, 0x5a, 0xa2, 0x46, 0x84, 0xd2, 0xea, + 0x6a, 0xc8, 0x2c, 0xbf, 0xd0, 0xa6, 0x69, 0x55, 0x6f, 0x13, 0x62, 0x74, 0x4a, 0x66, 0xfc, 0xf0, + 0xc0, 0x4f, 0xb8, 0x09, 0xc9, 0x5c, 0x90, 0xd6, 0x36, 0x19, 0xaa, 0xa5, 0xdf, 0x89, 0x94, 0x85, + 0x82, 0x39, 0x61, 0x77, 0x9a, 0xd9, 0xdf, 0x35, 0xca, 0xf3, 0x16, 0x21, 0x85, 0xeb, 0xb6, 0xbe, + 0x22, 0xbc, 0x27, 0x65, 0xda, 0x22, 0xcf, 0x31, 0xf1, 0x29, 0x5e, 0xdf, 0xf3, 0x76, 0x1d, 0xa1, + 0x0a, 0x77, 0x4a, 0x39, 0xae, 0x36, 0xcb, 0x2b, 0xd8, 0xbb, 0xb3, 0x85, 0xed, 0x7a, 0xa4, 0x67, + 0x89, 0xa4, 0x92, 0x51, 0xa4, 0x34, 0xa5, 0xc1, 0x8e, 0xaa, 0xf2, 0xf2, 0x63, 0x01, 0x60, 0x2a, + 0xa8, 0x6e, 0x0d, 0x57, 0xf6, 0xd7, 0x8a, 0x6c, 0x81, 0x05, 0x32, 0xe8, 0x88, 0x02, 0x3e, 0x97, + 0x17, 0x46, 0x3c, 0xdd, 0x5c, 0xc2, 0xd2, 0xe2, 0x39, 0x31, 0xd4, 0x7c, 0x32, 0xe8, 0x98, 0x2d, + 0x51, 0xb2, 0x00, 0x91, 0xb1, 0x61, 0x6d, 0x7f, 0x1f, 0x30, 0x84, 0xb4, 0x26, 0xa1, 0xa2, 0x97, + 0xd2, 0x26, 0x16, 0xad, 0x15, 0x62, 0x64, 0x6e, 0x03, 0xe9, 0x42, 0x46, 0x00, 0xbc, 0x26, 0xdf, + 0xc2, 0x9a, 0xc7, 0x58, 0x58, 0xe5, 0x00, 0xe5, 0x3a, 0x45, 0x12, 0x7c, 0xd5, 0x72, 0xf8, 0x51, + 0xb2, 0xba, 0xc5, 0xa8, 0x1e, 0xd3, 0xf2, 0xbf, 0x5f, 0x74, 0xaf, 0xb5, 0xcf, 0xbf, 0xd7, 0x2b, + 0x5b, 0x82, 0x28, 0xf6, 0x31, 0xf9, 0x2c, 0xdb, 0xa4, 0x65, 0x8b, 0x87, 0xcc, 0x63, 0xf9, 0x08, + 0xfa, 0xa1, 0x75, 0x8e, 0x0b, 0x83, 0xfb, 0xd4, 0x2f, 0xbe, 0x21, 0x1b, 0x12, 0x15, 0x2b, 0x08, + 0xf0, 0xcf, 0xd4, 0x5d, 0xda, 0x36, 0xb4, 0xdb, 0x79, 0x03, 0x89, 0xe0, 0x9c, 0x0b, 0xe6, 0x89, + 0x0c, 0x9c, 0xab, 0x76, 0x13, 0x58, 0x14, 0x1f, 0x51, 0x5f, 0x18, 0x9f, 0x56, 0x24, 0xf0, 0xd0, + 0x1a, 0xb5, 0x2f, 0x46, 0xcd, 0xed, 0xfa, 0x1a, 0x00, 0xc8, 0xeb, 0xaf, 0xb0, 0xb3, 0x5a, 0xed, + 0xa3, 0x0a, 0xb1, 0x48, 0xf6, 0x5f, 0x0e, 0x68, 0x9e, 0x35, 0x9a, 0xff, 0x98, 0xb0, 0x2a, 0x8f, + 0x32, 0xa2, 0x2f, 0x5f, 0xe1, 0x3e, 0xa1, 0x62, 0xc2, 0x82, 0x29, 0x12, 0x89, 0xbe, 0x7b, 0xee, + 0x5a, 0x2c, 0xae, 0x9f, 0x22, 0xfa, 0x73, 0x03, 0x05, 0x1f, 0x63, 0xf2, 0xe2, 0xc8, 0x58, 0x5b, + 0xc2, 0xa4, 0xa7, 0x82, 0x3a, 0xdd, 0x6e, 0x47, 0x4e, 0x6e, 0xdf, 0xa5, 0x5e, 0x1c, 0x77, 0x61, + 0xdf, 0xd0, 0xb7, 0x44, 0x7d, 0xe9, 0x23, 0xf5, 0x3a, 0xef, 0xc5, 0x26, 0x9b, 0xd8, 0x27, 0x55, + 0x32, 0xca, 0xf5, 0xb2, 0x70, 0xf4, 0x04, 0xb0, 0x06, 0xaf, 0x51, 0xd7, 0xc0, 0x9e, 0x7a, 0x24, + 0x1a, 0x2d, 0xbb, 0x17, 0x74, 0xd7, 0x52, 0x2a, 0xf5, 0x36, 0xb0, 0x29, 0xad, 0xc9, 0x95, 0xa7, + 0x10, 0x91, 0x5e, 0x69, 0xab, 0x4b, 0xbf, 0x1f, 0x2f, 0xd0, 0xc1, 0x4b, 0x58, 0x10, 0xef, 0xf4, + 0xe2, 0xe1, 0xb5, 0x69, 0x20, 0x82, 0xb4, 0xc5, 0xb5, 0x20, 0x83, 0x84, 0xb1, 0x12, 0xc6, 0xaa, + 0x11, 0x3a, 0x1f, 0xe5, 0x6e, 0xf8, 0xfa, 0x67, 0x2b, 0x3d, 0x64, 0x9d, 0x6d, 0x47, 0x99, 0xdf, + 0x43, 0x0a, 0xe8, 0x2a, 0x61, 0x2d, 0x89, 0xb4, 0x88, 0xa7, 0x01, 0x0e, 0x91, 0x6f, 0xb7, 0x69, + 0xf3, 0x3b, 0x35, 0x8e, 0xaa, 0xdf, 0x55, 0x85, 0x44, 0xa0, 0xdc, 0x31, 0xd4, 0x54, 0xc4, 0xa4, + 0x5a, 0x5e, 0x88, 0x7b, 0x8d, 0xb0, 0x30, 0x1f, 0xed, 0x33, 0xc9, 0xad, 0x5f, 0x82, 0xd2, 0xc7, + 0x35, 0x0a, 0x2e, 0xf5, 0xc6, 0x43, 0x9a, 0xd7, 0x13, 0x60, 0xf9, 0xeb, 0x0b, 0xd8, 0x99, 0xe0, + 0x2d, 0x4a, 0x6d, 0x9b, 0x79, 0xfa, 0xaa, 0x62, 0x86, 0xf7, 0xca, 0x82, 0x9b, 0x1a, 0x49, 0x8b, + 0x75, 0x0a, 0xdc, 0xd9, 0x1d, 0xac, 0x9a, 0x04, 0xf2, 0xe6, 0x41, 0x83, 0xad, 0x7a, 0xc0, 0x8b, + 0xaf, 0xa2, 0x45, 0x10, 0x48, 0x78, 0xf1, 0x93, 0x8e, 0xb3, 0x02, 0xc6, 0x21, 0xf7, 0x69, 0xfb, + 0xc2, 0x79, 0x35, 0x71, 0x40, 0x4b, 0x0b, 0x1a, 0xe0, 0x75, 0x89, 0xd3, 0x6b, 0x2e, 0x6e, 0x8d, + 0xa0, 0xc2, 0x86, 0x09, 0xff, 0xab, 0xe7, 0xd9, 0x9b, 0x7a, 0x23, 0xaa, 0x5c, 0xe8, 0x58, 0x16, + 0x43, 0xa4, 0xdc, 0xde, 0x79, 0xe3, 0xab, 0xbd, 0xab, 0x6d, 0x60, 0x75, 0xe2, 0xfa, 0x6a, 0xf2, + 0x5d, 0x10, 0xc2, 0x7a, 0xf5, 0xc5, 0x7e, 0xc9, 0xa0, 0xd3, 0xe8, 0x10, 0x07, 0x52, 0x76, 0x1d, + 0x81, 0x26, 0x24, 0xcd, 0x9f, 0xdc, 0x89, 0x0f, 0x6e, 0xc1, 0x12, 0x9c, 0xdc, 0x61, 0xc7, 0x67, + 0x0f, 0x6e, 0xd8, 0x66, 0x35, 0xc2, 0x61, 0x24, 0x7f, 0xee, 0x10, 0x4b, 0x9b, 0xd9, 0x55, 0xbb, + 0xb5, 0xbb, 0xf5, 0xf0, 0xed, 0x45, 0x28, 0xff, 0x2f, 0x80, 0xd2, 0x30, 0xb0, 0xc2, 0xca, 0xbc, + 0xdd, 0x03, 0x91, 0x91, 0xb9, 0xc9, 0xe5, 0x1e, 0x9e, 0x31, 0x0b, 0x88, 0x5c, 0x10, 0xe9, 0xaf, + 0x03, 0x23, 0x29, 0x29, 0x0b, 0xd5, 0x16, 0x1a, 0x32, 0xfa, 0xe6, 0xd5, 0xab, 0xa9, 0x5b, 0x25, + 0xac, 0xc4, 0x3e, 0x5f, 0xd4, 0xde, 0xc6, 0x87, 0xaa, 0x99, 0x68, 0xe9, 0xed, 0x0a, 0xae, 0x2e, + 0x77, 0xbb, 0x86, 0x82, 0x3a, 0xd6, 0x1d, 0x9c, 0x63, 0x0f, 0xc1, 0x13, 0x07, 0xf5, 0x1a, 0xfd, + 0x0a, 0x9b, 0xaf, 0xe2, 0x6b, 0x8e, 0xf5, 0x89, 0xc2, 0x6f, 0xb7, 0xde, 0x24, 0x2f, 0x3c, 0x82, + 0xe0, 0x59, 0x0b, 0x70, 0x8c, 0x17, 0xba, 0x5a, 0xee, 0x2a, 0xa3, 0x13, 0x03, 0x63, 0x11, 0x85, + 0x35, 0x18, 0x6d, 0xe0, 0xfd, 0xfc, 0x17, 0xad, 0x05, 0x72, 0x3b, 0x41, 0x3e, 0xe4, 0x67, 0x91, + 0x25, 0xda, 0x77, 0xf2, 0x57, 0x0f, 0x1f, 0x36, 0xe3, 0xbc, 0x16, 0x41, 0x4a, 0xe4, 0xe8, 0xac, + 0x57, 0xe6, 0x01, 0x05, 0x0c, 0x36, 0xae, 0x20, 0x5d, 0xa5, 0x6f, 0x88, 0x80, 0x05, 0x07, 0x54, + 0xf8, 0x22, 0xbb, 0xdd, 0xab, 0x0e, 0x18, 0x4f, 0x6d, 0xaa, 0xd8, 0x82, 0x28, 0xaf, 0x1e, 0x3c, + 0xd9, 0x17, 0x3c, 0xd8, 0x7b, 0xe2, 0x8a, 0xa0, 0x2b, 0x06, 0x73, 0x02, 0x3b, 0x34, 0x83, 0xb5, + 0xcc, 0xa6, 0xfc, 0x8a, 0xbb, 0xde, 0x00, 0x1b, 0x49, 0xcf, 0xef, 0x32, 0x1c, 0x35, 0xf6, 0xe0, + 0x5f, 0xc4, 0x90, 0x24, 0xcc, 0x5f, 0x51, 0x79, 0x03, 0x3d, 0x3d, 0xc0, 0x3c, 0xca, 0xe8, 0x20, + 0x5a, 0xbd, 0x77, 0x1f, 0x07, 0x32, 0x9c, 0xd7, 0x87, 0x3d, 0xfa, 0xe5, 0xfd, 0x4f, 0xb8, 0xa3, + 0xc9, 0x74, 0x15, 0xf8, 0x11, 0x80, 0x01, 0xa7, 0xe7, 0xaa, 0x4c, 0x01, 0x95, 0xb3, 0x27, 0xf3, + 0xe2, 0xb5, 0x6f, 0x87, 0x28, 0xa6, 0x3b, 0x86, 0x7b, 0xa3, 0x3f, 0x7f, 0x97, 0x43, 0xde, 0xdf, + 0xd3, 0x80, 0x67, 0x5d, 0xfc, 0x3c, 0xe2, 0x01, 0x08, 0x7b, 0x45, 0x39, 0xcf, 0xb5, 0x12, 0xf2, + 0x5e, 0x32, 0xa5, 0x07, 0x9c, 0xcd, 0x57, 0xb2, 0x9a, 0x68, 0xbc, 0x52, 0xc1, 0x09, 0xe6, 0x41, + 0xc3, 0x46, 0x12, 0x3a, 0xcf, 0xcb, 0xda, 0xe3, 0x62, 0x78, 0xec, 0xe0, 0x67, 0x69, 0xf7, 0xcd, + 0xaf, 0x60, 0xe5, 0xae, 0xa2, 0x51, 0x03, 0xcf, 0xcc, 0xdb, 0x63, 0x3b, 0xbc, 0xed, 0x8c, 0x58, + 0x4f, 0x67, 0xf0, 0x6f, 0xd1, 0xc2, 0x6a, 0x03, 0xf2, 0x56, 0x15, 0x5f, 0xd4, 0xcc, 0x55, 0xfc, + 0x8c, 0xf2, 0xb6, 0x54, 0x89, 0x4a, 0x88, 0xff, 0x40, 0x1a, 0x9f, 0x86, 0xcc, 0x83, 0x9d, 0x4e, + 0xa3, 0xb4, 0xd1, 0x5d, 0x57, 0x0f, 0xc8, 0xff, 0x15, 0x58, 0x05, 0xe9, 0x49, 0x7a, 0x5e, 0x62, + 0x46, 0x20, 0xd9, 0x79, 0xd1, 0x63, 0xc0, 0x7c, 0xb3, 0x0e, 0x43, 0x7e, 0x02, 0x6c, 0x39, 0xeb, + 0x7e, 0x5a, 0x36, 0x72, 0xaa, 0xc7, 0xe6, 0x8f, 0x6d, 0xff, 0x52, 0x9b, 0xbf, 0x45, 0xab, 0xa3, + 0x9f, 0x69, 0x87, 0x55, 0x28, 0x6b, 0xaf, 0xa3, 0x82, 0x3f, 0xfc, 0x6c, 0x5d, 0x17, 0x38, 0x03, + 0x02, 0x0f, 0xdc, 0x51, 0xc8, 0x93, 0x0b, 0x56, 0x15, 0x36, 0x50, 0xbe, 0x64, 0xef, 0xec, 0xda, + 0x20, 0xd6, 0x28, 0x45, 0xd2, 0xae, 0xd0, 0x91, 0xf6, 0x70, 0xef, 0x31, 0x82, 0x85, 0xe2, 0xf3, + 0x90, 0x65, 0x6e, 0x12, 0xf2, 0x6c, 0xa5, 0xdc, 0x79, 0x88, 0xb9, 0xf7, 0xee, 0xf5, 0x24, 0x56, + 0x9e, 0x56, 0x64, 0x69, 0x45, 0x53, 0xd5, 0x35, 0x21, 0x98, 0x89, 0x2a, 0x78, 0x38, 0x3b, 0xd7, + 0x3f, 0xf6, 0x8f, 0x6e, 0x30, 0x99, 0x24, 0x36, 0xb3, 0x56, 0x46, 0x42, 0xdb, 0xee, 0xfc, 0x08, + 0x16, 0x05, 0xf3, 0x12, 0x30, 0x95, 0xa7, 0xcd, 0xc1, 0xd1, 0x32, 0x4b, 0xf5, 0x29, 0xd6, 0xcc, + 0xd5, 0x0b, 0x1e, 0x56, 0x12, 0xf5, 0x75, 0x23, 0xd3, 0x82, 0x89, 0x59, 0xce, 0x6e, 0x02, 0x8c, + 0xbf, 0xb4, 0x13, 0xe3, 0x39, 0xe0, 0x8a, 0x0b, 0x31, 0x0e, 0xa5, 0x95, 0xb5, 0xd6, 0x8f, 0xd3, + 0x78, 0xe5, 0xd5, 0xf5, 0xa9, 0xf2, 0xdf, 0xf7, 0x2e, 0xf5, 0x51, 0x59, 0x66, 0x01, 0xf7, 0x88, + 0x30, 0x15, 0x52, 0xc3, 0x0d, 0x99, 0x1f, 0xff, 0x60, 0x25, 0x21, 0x6c, 0xc5, 0x55, 0xef, 0x6c, + 0xe1, 0x41, 0x85, 0x2f, 0x2e, 0xec, 0xdc, 0x88, 0x4b, 0xc7, 0xed, 0x45, 0x8f, 0x4a, 0x2c, 0x34, + 0xd0, 0x95, 0xbd, 0xc1, 0x7d, 0xbb, 0xb2, 0x2e, 0x94, 0x70, 0xce, 0x93, 0x7c, 0x94, 0xf4, 0xd8, + 0x6f, 0xbc, 0xb0, 0xb3, 0xe7, 0xfe, 0xf6, 0xf2, 0xe5, 0x78, 0xac, 0x82, 0xc3, 0xf3, 0x8a, 0x00, + 0x99, 0x5d, 0xcd, 0x5e, 0x75, 0xee, 0x78, 0x6f, 0xab, 0x3d, 0x4d, 0x2d, 0xf5, 0xb1, 0x6f, 0x1c, + 0x66, 0xcf, 0x59, 0x40, 0x77, 0x58, 0xb3, 0x89, 0x25, 0x15, 0x5d, 0xa9, 0x5d, 0x78, 0xac, 0xf6, + 0x86, 0x88, 0x1d, 0xc9, 0x42, 0x71, 0x3c, 0x92, 0xe1, 0x7b, 0xd8, 0x1b, 0x77, 0xd9, 0x98, 0xd5, + 0xc2, 0x1e, 0x61, 0x0d, 0x76, 0x66, 0x52, 0x56, 0xfa, 0x9e, 0x9d, 0x76, 0xf4, 0x5e, 0x6c, 0x89, + 0xdd, 0xfc, 0xca, 0x1a, 0x40, 0x8d, 0x39, 0x41, 0xf7, 0x4e, 0xfe, 0x4d, 0x4d, 0x9a, 0x67, 0xbc, + 0xc4, 0x5f, 0xbe, 0xa7, 0xc5, 0x48, 0x43, 0xe8, 0xdc, 0xb8, 0x20, 0xd4, 0xec, 0x6d, 0xb8, 0x6c, + 0x62, 0xeb, 0x55, 0xa6, 0x51, 0x2d, 0x07, 0x08, 0xd7, 0x16, 0xe0, 0xdb, 0x64, 0x93, 0x3c, 0x41, + 0x39, 0x2b, 0x16, 0x01, 0xbf, 0x34, 0x26, 0x6c, 0xe5, 0x74, 0x99, 0x4a, 0x2e, 0xf1, 0x24, 0x27, + 0x7a, 0xac, 0xa7, 0x63, 0x1f, 0x25, 0x14, 0xc7, 0x61, 0x7c, 0x98, 0x84, 0x9a, 0xf7, 0x72, 0xf0, + 0xe8, 0x07, 0x5e, 0x27, 0x25, 0x85, 0x11, 0x5d, 0x23, 0x5c, 0xd1, 0x2b, 0x67, 0xab, 0xdc, 0x21, + 0x52, 0x9f, 0x80, 0xf2, 0x88, 0x60, 0x02, 0x80, 0x1a, 0xca, 0x3f, 0x2d, 0xb4, 0xd5, 0x42, 0xe7, + 0x98, 0x93, 0xf5, 0x61, 0x8b, 0x06, 0x2e, 0xdd, 0x12, 0x11, 0xbb, 0x86, 0x7f, 0xf4, 0xc4, 0x50, + 0xcd, 0x06, 0xd5, 0xa6, 0xca, 0x76, 0xae, 0x27, 0xe3, 0xd1, 0x80, 0x20, 0xf1, 0x54, 0x99, 0x26, + 0x20, 0x39, 0x8d, 0xfe, 0xe3, 0x2b, 0x94, 0xbb, 0x41, 0x7d, 0x9f, 0x06, 0xd7, 0x0f, 0x5c, 0x88, + 0xb4, 0x46, 0x8a, 0x9c, 0x9e, 0x16, 0x2b, 0x68, 0xea, 0xd0, 0x4e, 0xf3, 0xd2, 0x3f, 0x5f, 0x29, + 0xa3, 0xe5, 0x09, 0x78, 0xe7, 0x2c, 0x29, 0x0d, 0x4b, 0x7f, 0x1f, 0xca, 0x69, 0x24, 0xdd, 0x8d, + 0x0e, 0x17, 0xf3, 0xf6, 0x1e, 0x36, 0x18, 0x60, 0xad, 0x92, 0x03, 0xc1, 0xa9, 0x1a, 0xa9, 0x86, + 0x6c, 0xe8, 0xc4, 0x1a, 0x35, 0x22, 0x45, 0xaa, 0x24, 0x22, 0x25, 0xe3, 0x58, 0xf1, 0xfc, 0x41, + 0xd5, 0x47, 0xe1, 0x51, 0x96, 0xa6, 0x3c, 0x6d, 0xa8, 0xdc, 0x33, 0xf8, 0xd3, 0x55, 0xad, 0xdf, + 0xca, 0xfc, 0x4b, 0xbe, 0x5a, 0x4e, 0x82, 0x57, 0xf0, 0xed, 0xc4, 0x50, 0x14, 0xbc, 0x16, 0x37, + 0x84, 0x48, 0x0e, 0xe2, 0x76, 0xe7, 0x73, 0x5f, 0xb9, 0xf0, 0x91, 0xae, 0x4a, 0x31, 0xdd, 0x50, + 0x45, 0x51, 0xc9, 0xa1, 0x7b, 0xc3, 0xca, 0x9e, 0xe9, 0xa5, 0x47, 0xfc, 0xe9, 0x69, 0x1f, 0xfe, + 0x84, 0x83, 0x69, 0xbe, 0xf3, 0x41, 0xdb, 0xe7, 0xdc, 0x2a, 0xf6, 0xf9, 0xe6, 0xe0, 0x1a, 0x1f, + 0xf5, 0x4e, 0xbd, 0xc1, 0x45, 0x96, 0x5b, 0xee, 0xaa, 0x01, 0x5f, 0x4e, 0xc0, 0xd0, 0x18, 0xef, + 0xad, 0x56, 0xc6, 0x33, 0x98, 0xfd, 0x35, 0xf1, 0x8c, 0x28, 0x19, 0x73, 0x7a, 0xd5, 0x85, 0xad, + 0x4e, 0xd9, 0xe9, 0xb4, 0xbc, 0xe3, 0x83, 0x0d, 0xe4, 0x79, 0x13, 0x0b, 0x3d, 0xa6, 0xbc, 0x09, + 0x6c, 0x3f, 0x3e, 0x8b, 0x3d, 0x11, 0x70, 0x86, 0x9b, 0xfc, 0x42, 0xf3, 0xb4, 0x44, 0x1d, 0x74, + 0x63, 0xf0, 0xc0, 0x72, 0x74, 0x49, 0xe8, 0x8e, 0x58, 0xc7, 0xb8, 0x00, 0x4d, 0x03, 0x90, 0x5c, + 0x61, 0x11, 0xb9, 0xcd, 0x6c, 0x6e, 0x7c, 0x43, 0x1b, 0x0b, 0xad, 0x1f, 0x3d, 0x81, 0x95, 0xea, + 0xf7, 0x86, 0x4d, 0x6e, 0x90, 0xd9, 0xe4, 0x5e, 0x48, 0xaf, 0x2d, 0xcd, 0x9b, 0x06, 0xb7, 0x82, + 0x7e, 0x6b, 0x5b, 0x82, 0x42, 0xc2, 0xfc, 0x74, 0x38, 0x53, 0xb6, 0x08, 0xb2, 0xde, 0x56, 0xf2, + 0x27, 0x28, 0x61, 0x1b, 0x11, 0xe3, 0x75, 0x88, 0xf1, 0x18, 0xee, 0xc3, 0xb8, 0x98, 0x7c, 0x3b, + 0xe9, 0x31, 0x8b, 0x20, 0x9e, 0x5e, 0x87, 0x59, 0x03, 0x0a, 0x16, 0x5e, 0xa0, 0x11, 0xfb, 0x53, + 0x2f, 0x3f, 0xed, 0xa2, 0xf4, 0xda, 0x1e, 0xe3, 0x66, 0x0e, 0x73, 0x06, 0xa8, 0x77, 0x14, 0x07, + 0x0e, 0x4e, 0xd0, 0xfc, 0x56, 0x72, 0xe0, 0xd0, 0xd0, 0xdd, 0x69, 0x2d, 0x3e, 0xe8, 0x46, 0x1d, + 0x6a, 0xdb, 0x6b, 0x2c, 0x40, 0x59, 0x60, 0x68, 0xee, 0x1e, 0x61, 0x03, 0xf2, 0xca, 0xf1, 0xb2, + 0x8a, 0xb3, 0xb9, 0x86, 0xac, 0x0e, 0xf0, 0xed, 0x0d, 0xc9, 0x99, 0x17, 0xe1, 0x1a, 0x29, 0xbf, + 0xee, 0xd0, 0x0b, 0xaf, 0x82, 0x0f, 0xbb, 0x8b, 0x8f, 0x3e, 0x23, 0xb4, 0x4f, 0x48, 0x89, 0x6b, + 0xc6, 0x60, 0x0b, 0xa1, 0x16, 0xb9, 0xe5, 0x44, 0x44, 0x96, 0xd6, 0x46, 0x10, 0xe4, 0x76, 0x35, + 0xd1, 0x25, 0x0c, 0xf4, 0xfc, 0x81, 0xfb, 0xc6, 0x8f, 0x4e, 0x57, 0xa3, 0xdc, 0x2f, 0x0b, 0xa5, + 0x17, 0x88, 0xd7, 0x4c, 0x94, 0x95, 0x25, 0x8b, 0xa6, 0x6b, 0xee, 0xd4, 0x7b, 0x24, 0x3e, 0xf2, + 0x09, 0xab, 0x53, 0xc8, 0xac, 0x16, 0x48, 0x46, 0x07, 0xe8, 0x29, 0x46, 0x29, 0x34, 0xbd, 0x9d, + 0x73, 0x7e, 0x82, 0x0f, 0xad, 0x94, 0x97, 0x18, 0x25, 0xca, 0xa2, 0xd1, 0x65, 0x67, 0x43, 0x6b, + 0x01, 0x61, 0xf4, 0x43, 0x91, 0x5b, 0x4f, 0x41, 0xbd, 0x89, 0x4e, 0x6f, 0xef, 0xdb, 0x40, 0xc7, + 0x77, 0x73, 0xca, 0x79, 0x30, 0x00, 0xbb, 0x2d, 0x46, 0x6e, 0x8d, 0x63, 0xd9, 0x45, 0x2e, 0x88, + 0x1e, 0xbe, 0xcb, 0xca, 0x77, 0x8f, 0xa9, 0x2c, 0x43, 0x9f, 0xc9, 0xeb, 0xc9, 0x26, 0xb5, 0x79, + 0xe7, 0xcd, 0xa2, 0xed, 0xdb, 0xa8, 0x55, 0x55, 0x6c, 0xac, 0xd0, 0x28, 0xc9, 0x41, 0xd2, 0x73, + 0x2d, 0xa1, 0x11, 0x12, 0x89, 0xb8, 0x37, 0xc3, 0xd4, 0xf6, 0x48, 0x0e, 0xa8, 0x23, 0xa3, 0x0e, + 0xe6, 0x71, 0x6e, 0x3b, 0xfe, 0xcc, 0x6e, 0x87, 0x2f, 0xf5, 0x2e, 0x20, 0x68, 0x65, 0x6a, 0x52, + 0x26, 0xb7, 0x26, 0xf8, 0x89, 0xfb, 0x60, 0x6c, 0x95, 0xba, 0xd2, 0x65, 0xcc, 0x1e, 0xb7, 0x87, + 0x58, 0xc1, 0x3b, 0xea, 0x0e, 0xa5, 0xee, 0xd6, 0x16, 0xf9, 0x02, 0xa0, 0xb5, 0xce, 0xf9, 0xeb, + 0x87, 0x8d, 0xa1, 0xd3, 0x4b, 0x12, 0xbd, 0x89, 0x83, 0x52, 0x0d, 0x70, 0xc3, 0x0f, 0x95, 0xc7, + 0x48, 0x87, 0x17, 0xba, 0xa7, 0x74, 0xca, 0xd4, 0xc2, 0x8a, 0xa9, 0x3f, 0x4a, 0x31, 0xa7, 0x55, + 0xe4, 0x34, 0x57, 0x8d, 0xba, 0xd0, 0xa8, 0x46, 0x69, 0x7f, 0xb9, 0x32, 0x9a, 0xc4, 0x53, 0xd3, + 0x85, 0x3d, 0x6b, 0x6e, 0xf4, 0xc9, 0x17, 0xe5, 0xe8, 0x52, 0xd5, 0xde, 0x6e, 0x11, 0xfc, 0x52, + 0xc7, 0xd3, 0x7f, 0x67, 0xe7, 0x82, 0x18, 0x7f, 0xbb, 0xd9, 0x02, 0x66, 0x0d, 0xe9, 0x3b, 0x24, + 0xe4, 0x23, 0x71, 0x28, 0x14, 0xbb, 0x0b, 0x59, 0x92, 0x65, 0xa5, 0x90, 0xc6, 0xe3, 0x3b, 0x0b, + 0x9a, 0x37, 0xd3, 0x70, 0x74, 0x10, 0xc0, 0xf6, 0xc6, 0x8d, 0xbc, 0xa3, 0x49, 0xff, 0x7b, 0xbe, + 0xd5, 0x50, 0x98, 0x44, 0x92, 0xbf, 0x5b, 0xd1, 0x47, 0x4d, 0xc4, 0x74, 0xbd, 0x77, 0x2a, 0xb4, + 0x36, 0xb1, 0xc9, 0x75, 0x40, 0xd5, 0xf0, 0xce, 0x0d, 0xcf, 0xe8, 0xc6, 0x37, 0x46, 0x48, 0x88, + 0x00, 0xdb, 0x7a, 0x98, 0xb3, 0x2a, 0x1c, 0x44, 0xd9, 0x25, 0xa4, 0x2e, 0xbd, 0xb5, 0x55, 0xcd, + 0x3b, 0xaf, 0x56, 0xad, 0x9b, 0xd7, 0x95, 0xf5, 0x3d, 0xbc, 0x51, 0x3a, 0x8c, 0x4f, 0x62, 0x36, + 0x0d, 0xe2, 0xc3, 0xfa, 0x17, 0x7f, 0xf3, 0x77, 0xf2, 0x7f, 0xfb, 0x05, 0xb4, 0x05, 0x02, 0x1d, + 0x38, 0x89, 0x7d, 0xa3, 0x6e, 0x31, 0xd6, 0x7e, 0xff, 0x7a, 0x17, 0x44, 0x7b, 0x75, 0xf7, 0x6e, + 0x58, 0x61, 0xc3, 0x59, 0xa3, 0x1d, 0xc9, 0x20, 0x5c, 0xc0, 0xd8, 0xbd, 0x7a, 0x18, 0x47, 0xfb, + 0x6c, 0x58, 0xef, 0xd4, 0x61, 0x10, 0x58, 0xc6, 0xb0, 0x34, 0x0f, 0x92, 0xe1, 0xc6, 0x29, 0x87, + 0xe2, 0x19, 0xff, 0xf8, 0xaf, 0xdd, 0x92, 0x91, 0x4c, 0xfe, 0x7d, 0x87, 0xb4, 0xdb, 0xa3, 0xa1, + 0xab, 0x39, 0xb0, 0x92, 0x9b, 0xb3, 0xdd, 0xd9, 0x29, 0x1d, 0xca, 0x9c, 0x03, 0xba, 0xf1, 0xde, + 0xfc, 0x60, 0x98, 0x59, 0x22, 0xc2, 0x13, 0xdd, 0x56, 0x7a, 0xb5, 0x53, 0x22, 0x4f, 0x17, 0x6f, + 0xe6, 0xbf, 0x3b, 0x8b, 0x1e, 0xfa, 0xe1, 0xf7, 0x33, 0x68, 0x20, 0x36, 0xff, 0x4f, 0xdf, 0x8b, + 0xe4, 0x4d, 0xa3, 0xe5, 0x1d, 0xbf, 0x00, 0xfc, 0xc3, 0x3b, 0x4c, 0x26, 0x50, 0xb8, 0xb0, 0x81, + 0xea, 0x20, 0xe6, 0xe6, 0xf2, 0x62, 0xee, 0x26, 0x7f, 0xdc, 0xb5, 0x4e, 0x4c, 0x2c, 0x31, 0x8c, + 0xd6, 0x9d, 0xbe, 0x0b, 0x79, 0xd7, 0x7a, 0xb9, 0x8d, 0xfb, 0xd2, 0x8a, 0x8f, 0x06, 0x9a, 0x64, + 0x71, 0x1d, 0x7e, 0x37, 0xff, 0x00, 0xbe, 0xb9, 0x4f, 0x45, 0xb5, 0x30, 0x2a, 0x23, 0x99, 0xdf, + 0x91, 0xee, 0x31, 0x36, 0x1b, 0xd0, 0xaa, 0x1e, 0x30, 0x90, 0xde, 0x07, 0x34, 0x6a, 0xca, 0xfe, + 0x21, 0x89, 0x51, 0x70, 0x11, 0xdf, 0x0a, 0x4e, 0x6f, 0xc8, 0x37, 0x4c, 0xe7, 0x79, 0xf3, 0x7f, + 0xe1, 0x3f, 0xbc, 0x90, 0x09, 0xbe, 0x32, 0x77, 0xac, 0x62, 0xef, 0x40, 0x5f, 0x9a, 0x4b, 0xbe, + 0x14, 0x00, 0xaf, 0x81, 0xf4, 0xc9, 0x24, 0x16, 0x22, 0xe3, 0x48, 0x75, 0x7a, 0x75, 0x9e, 0xe1, + 0x2d, 0x9c, 0x29, 0x69, 0xa1, 0x0e, 0xd4, 0xcd, 0x3d, 0xa9, 0xf7, 0xe0, 0xec, 0xd0, 0x9f, 0x2b, + 0x72, 0x80, 0xda, 0xd5, 0xd0, 0x52, 0x4a, 0xeb, 0xa8, 0xb4, 0x97, 0x56, 0x67, 0x04, 0x76, 0xf9, + 0xca, 0xb0, 0xe2, 0xa2, 0xea, 0x96, 0x1a, 0xb2, 0xfc, 0x2d, 0x63, 0x1f, 0xb8, 0x2f, 0x86, 0xee, + 0x18, 0x86, 0xb3, 0x4e, 0x2c, 0xf5, 0x5c, 0x1e, 0x7a, 0x8a, 0x40, 0x3e, 0x36, 0xce, 0x40, 0x75, + 0x4b, 0x73, 0xcb, 0x41, 0xba, 0x5b, 0x04, 0x74, 0x4b, 0x65, 0xec, 0x88, 0x84, 0x1a, 0x63, 0x46, + 0xea, 0xeb, 0x62, 0x86, 0x16, 0x47, 0x55, 0x14, 0x87, 0xf8, 0xb3, 0x71, 0xab, 0xda, 0xbb, 0xc1, + 0xb1, 0x04, 0x7c, 0xdc, 0x30, 0x8d, 0x96, 0xa8, 0x21, 0x4d, 0xf6, 0xee, 0x38, 0x07, 0x1e, 0x80, + 0xba, 0xed, 0xe8, 0xba, 0xa4, 0x35, 0xaf, 0x7a, 0xaa, 0x70, 0x79, 0x0c, 0xed, 0x39, 0xea, 0x4a, + 0xe6, 0x8c, 0x39, 0xd9, 0xe7, 0x84, 0x0f, 0x9c, 0xa5, 0x44, 0x9a, 0xa6, 0xe9, 0x83, 0x94, 0x31, + 0x31, 0x01, 0x71, 0xf8, 0x08, 0xa8, 0x3e, 0x45, 0x8c, 0x5b, 0xbd, 0x2c, 0xcc, 0x59, 0x1c, 0x60, + 0x63, 0xeb, 0xe8, 0x4b, 0x33, 0x23, 0x26, 0xf5, 0xfe, 0x89, 0x6f, 0x90, 0xfb, 0x21, 0x54, 0x9a, + 0xa3, 0x09, 0xea, 0x1a, 0xb3, 0xd3, 0x90, 0x59, 0xab, 0xcd, 0x2f, 0x8c, 0xa7, 0x5c, 0x91, 0xae, + 0xd9, 0xc6, 0xff, 0x12, 0xbb, 0x68, 0x53, 0x45, 0x57, 0x18, 0x0f, 0x38, 0x31, 0x80, 0xb0, 0x82, + 0x4a, 0x2d, 0x26, 0xe1, 0xd3, 0x9e, 0x65, 0xa6, 0xa6, 0x9d, 0xc4, 0x2a, 0xe3, 0x7d, 0x49, 0x0f, + 0xd5, 0x09, 0xee, 0xe5, 0xda, 0x37, 0xd4, 0xd7, 0xc3, 0xe6, 0xa8, 0x1c, 0xd9, 0x5b, 0x21, 0xc1, + 0xef, 0x55, 0x3c, 0x3e, 0xdf, 0x7a, 0xf9, 0x6a, 0xc9, 0x72, 0x63, 0xb5, 0xa1, 0x23, 0x5d, 0x5a, + 0xae, 0xb4, 0x3e, 0x99, 0xc4, 0x5a, 0x05, 0xd5, 0x80, 0xa0, 0x42, 0xab, 0x6b, 0xfb, 0x71, 0x24, + 0x15, 0x49, 0xe5, 0xf0, 0xfc, 0x4c, 0xf1, 0x6b, 0x1f, 0xa0, 0x5a, 0x8e, 0xce, 0xdd, 0xf4, 0x7a, + 0x53, 0x00, 0x1f, 0xda, 0x28, 0x54, 0xd0, 0x22, 0xe1, 0x2c, 0xf8, 0x50, 0x6b, 0x10, 0xbd, 0x8b, + 0xf1, 0xad, 0x70, 0xf6, 0x68, 0x53, 0xa9, 0x97, 0x78, 0xe5, 0xff, 0xf6, 0xa2, 0x7b, 0xad, 0x99, + 0x98, 0xa7, 0x78, 0xa0, 0xc6, 0xce, 0x64, 0x8d, 0xe5, 0x2e, 0x8e, 0x93, 0x27, 0x7b, 0xd3, 0x6b, + 0x3b, 0xbd, 0x72, 0x6d, 0xb7, 0x14, 0x08, 0x8b, 0xe2, 0x90, 0x1a, 0x2e, 0x92, 0x15, 0x55, 0xd5, + 0x5b, 0xb1, 0x0d, 0xdd, 0xc0, 0x5c, 0xac, 0x2b, 0x3f, 0xf5, 0xc2, 0xf4, 0x58, 0x35, 0xfd, 0x19, + 0x76, 0x9d, 0x75, 0x35, 0x71, 0xa8, 0xc6, 0x11, 0x01, 0x7a, 0xdc, 0x29, 0x5e, 0x04, 0x14, 0xaa, + 0x50, 0x2b, 0xe8, 0x4e, 0xb0, 0x3d, 0x5f, 0x52, 0xca, 0x79, 0x02, 0xf1, 0x7d, 0x63, 0x0f, 0xf1, + 0xcc, 0x01, 0x8b, 0x11, 0x1a, 0x04, 0x49, 0xa7, 0x93, 0x2f, 0x28, 0x6d, 0xbe, 0xbf, 0x5f, 0x89, + 0xcc, 0x30, 0x21, 0xcc, 0x50, 0x1c, 0xdc, 0xee, 0x16, 0xb5, 0xb5, 0x70, 0x8e, 0x66, 0xf5, 0xa6, + 0xf0, 0x55, 0x17, 0x37, 0xd6, 0x33, 0x3d, 0xdd, 0xff, 0x13, 0x05, 0x83, 0xbc, 0x37, 0xa3, 0x83, + 0x12, 0x25, 0xee, 0xe1, 0x27, 0x02, 0x26, 0xaf, 0x12, 0x91, 0x1b, 0x52, 0xad, 0x92, 0x4c, 0x4f, + 0x64, 0xc7, 0x81, 0x0d, 0x5a, 0xbd, 0xe4, 0x0e, 0xf0, 0x98, 0x3b, 0x28, 0x69, 0xbb, 0x78, 0x1b, + 0x94, 0xc6, 0xc7, 0x9d, 0x73, 0x01, 0x2a, 0xab, 0x95, 0x64, 0x2a, 0x30, 0xe8, 0x87, 0xf8, 0xa7, + 0xb0, 0x6d, 0x23, 0x69, 0x54, 0xab, 0xd0, 0x8e, 0x9d, 0x87, 0xb7, 0xee, 0xc8, 0xe0, 0xe8, 0x77, + 0xab, 0xa0, 0x46, 0x70, 0x6b, 0xbe, 0x24, 0x5f, 0x5c, 0x13, 0xbf, 0x14, 0x91, 0xae, 0x65, 0x45, + 0x27, 0xbf, 0x65, 0x92, 0xc8, 0xa9, 0xb2, 0xbd, 0x3e, 0xd1, 0xf2, 0x43, 0xb5, 0xb4, 0x22, 0x5c, + 0xc0, 0x4e, 0xeb, 0x29, 0x39, 0xa3, 0x73, 0x70, 0x9e, 0x73, 0xf2, 0x0d, 0x72, 0x29, 0x8d, 0x80, + 0xa9, 0x73, 0x8f, 0x3d, 0xc3, 0x15, 0xed, 0xa5, 0x4a, 0xd1, 0x5b, 0x71, 0xc4, 0x86, 0x41, 0xe8, + 0x91, 0x48, 0x93, 0x36, 0xa4, 0x61, 0x4a, 0xa2, 0x14, 0x9a, 0x78, 0x41, 0x7b, 0xb2, 0x22, 0xb0, + 0xa1, 0x00, 0xd2, 0x53, 0x2d, 0x80, 0xa7, 0xfe, 0x85, 0x7c, 0x05, 0x03, 0xe6, 0x01, 0xdc, 0x97, + 0x12, 0x29, 0xc5, 0xf8, 0xe7, 0xe7, 0x8e, 0x6b, 0xdb, 0x37, 0x69, 0xd5, 0xf6, 0x4b, 0xc4, 0x31, + 0xfa, 0x47, 0x4b, 0xdf, 0x74, 0x86, 0x72, 0xd3, 0x6b, 0xa0, 0xbd, 0x06, 0x5e, 0x36, 0x3f, 0xe1, + 0x6a, 0x7d, 0x53, 0xc8, 0x61, 0x9a, 0x5f, 0x28, 0x85, 0xd3, 0xdd, 0x50, 0x32, 0xe3, 0xd6, 0xf7, + 0x88, 0xa2, 0x30, 0xcd, 0xf5, 0xb1, 0x5f, 0x46, 0x90, 0x09, 0xbf, 0xcd, 0x53, 0x2c, 0xbd, 0x49, + 0x86, 0xee, 0x17, 0x5b, 0xfc, 0xc4, 0x6c, 0xa1, 0xcd, 0x6f, 0x2c, 0xc7, 0x4e, 0x65, 0x9d, 0x61, + 0xfc, 0x4e, 0x07, 0x93, 0x91, 0x35, 0xc9, 0x2d, 0xaa, 0x52, 0x23, 0x2d, 0x2f, 0xab, 0x06, 0xf4, + 0x6e, 0x8b, 0x87, 0x7a, 0xe0, 0x49, 0x90, 0x99, 0x3a, 0x28, 0x3c, 0xfb, 0x5d, 0x1c, 0x4b, 0x08, + 0x62, 0xf4, 0xa5, 0xd3, 0xd9, 0x19, 0xa8, 0x79, 0x67, 0x35, 0xe2, 0xfd, 0xd0, 0x5e, 0x00, 0xb0, + 0xe6, 0x2c, 0xdd, 0xd5, 0x70, 0x02, 0x55, 0xe6, 0xcf, 0xe2, 0xa2, 0xf7, 0xc4, 0xf6, 0xf4, 0xa4, + 0xfb, 0xef, 0x46, 0x00, 0x9f, 0x2d, 0xa8, 0xca, 0x0b, 0x19, 0x06, 0x17, 0x6a, 0x14, 0x18, 0xf1, + 0x3a, 0x9e, 0x26, 0x0b, 0x6c, 0xb9, 0xf5, 0x03, 0xc5, 0xaa, 0xba, 0x8a, 0x72, 0xff, 0x5d, 0xcd, + 0xe2, 0x88, 0xb6, 0x84, 0x94, 0x7d, 0x5c, 0xbd, 0x4a, 0xdb, 0xc6, 0x98, 0x37, 0x4a, 0xfa, 0xcd, + 0xd2, 0x56, 0x5a, 0xe6, 0x1c, 0x98, 0x79, 0x94, 0xbc, 0x67, 0x14, 0xe4, 0x64, 0x04, 0x9c, 0xe8, + 0xd8, 0x35, 0x6b, 0x41, 0x0f, 0xfe, 0x96, 0xef, 0xdd, 0x29, 0x22, 0x2b, 0x9b, 0x51, 0x01, 0xbf, + 0xff, 0xcb, 0xd8, 0xbe, 0x9c, 0x57, 0xcb, 0x4f, 0x9a, 0x8b, 0x7f, 0x16, 0x94, 0x11, 0x40, 0x0c, + 0x17, 0x0d, 0x1e, 0xaf, 0xe4, 0x5f, 0xd1, 0xf3, 0xda, 0x65, 0x62, 0x4d, 0x7f, 0x87, 0x51, 0x4c, + 0xad, 0x94, 0x03, 0xb5, 0xbd, 0x0d, 0xf8, 0x7d, 0xad, 0x74, 0xb6, 0xaa, 0x9c, 0x0e, 0x1a, 0x56, + 0xdd, 0xd8, 0x21, 0xf6, 0x8a, 0x68, 0x59, 0x9d, 0xf4, 0x54, 0x42, 0x6d, 0x9f, 0x3a, 0xb8, 0x39, + 0x83, 0xe5, 0xf2, 0xe8, 0x29, 0xc2, 0x21, 0x45, 0x51, 0x53, 0xf1, 0x68, 0x86, 0x0d, 0x94, 0x29, + 0xdd, 0x48, 0x09, 0x15, 0x0b, 0x90, 0xd1, 0xa9, 0xf5, 0x93, 0x3b, 0x36, 0x35, 0xe1, 0x80, 0x9d, + 0xcf, 0xa0, 0xe9, 0xc2, 0xe6, 0x20, 0xf9, 0x1f, 0x0a, 0x46, 0xee, 0xb3, 0x48, 0x1c, 0x95, 0x9d, + 0xb0, 0xbc, 0x0e, 0xc4, 0xd1, 0xf3, 0x4f, 0xdc, 0x4c, 0x31, 0xd8, 0xb3, 0xe6, 0x11, 0x66, 0xf4, + 0x98, 0xad, 0x5a, 0x2a, 0x6b, 0xc9, 0x1c, 0xb6, 0x40, 0x44, 0xd1, 0x9c, 0xc7, 0x2e, 0x3d, 0x73, + 0xa0, 0xcd, 0x77, 0x4a, 0x8b, 0x46, 0xd7, 0x8f, 0xd8, 0x13, 0x4d, 0x89, 0xdf, 0x8c, 0xb5, 0xf6, + 0xaf, 0xed, 0x4a, 0xc9, 0xb5, 0xfd, 0x7e, 0x62, 0x6d, 0x12, 0xf2, 0x01, 0xda, 0x4f, 0xc6, 0xde, + 0x0f, 0x49, 0x9d, 0x7a, 0xc7, 0x99, 0xe3, 0xd0, 0xc6, 0xa8, 0x69, 0x74, 0x2c, 0x2f, 0xb1, 0x50, + 0x22, 0x97, 0x51, 0xec, 0x5d, 0x2f, 0xbd, 0x18, 0x15, 0x7b, 0x02, 0x54, 0x0b, 0x88, 0x4e, 0xfe, + 0x78, 0x0e, 0x04, 0x18, 0x42, 0xbf, 0x0c, 0xf8, 0xfb, 0xd9, 0x4d, 0xc6, 0x97, 0x4d, 0xa2, 0xaf, + 0x39, 0x5e, 0x26, 0xf3, 0x39, 0xbe, 0x46, 0xf0, 0x7d, 0x5e, 0x3a, 0xbd, 0xe0, 0x51, 0x95, 0x63, + 0xd0, 0x2a, 0x30, 0x4a, 0xc7, 0x57, 0xff, 0x57, 0xc7, 0xe8, 0x31, 0x0d, 0x29, 0x00, 0x1c, 0xff, + 0xfe, 0x24, 0x23, 0x99, 0x54, 0x71, 0x3e, 0x0f, 0x09, 0x8b, 0xab, 0xb0, 0x55, 0x74, 0xf4, 0x65, + 0x91, 0x63, 0xd6, 0x22, 0x62, 0x69, 0x76, 0xb1, 0x75, 0xac, 0x72, 0x62, 0xfd, 0xcb, 0x52, 0x6a, + 0x0f, 0x9f, 0xf0, 0xc8, 0x28, 0x41, 0x59, 0x31, 0x55, 0xe2, 0x7d, 0x70, 0x3d, 0x44, 0x3a, 0x20, + 0x2a, 0x2a, 0x32, 0x78, 0xc7, 0x1d, 0xd7, 0x92, 0x31, 0x33, 0x52, 0xef, 0x11, 0xf6, 0x25, 0xad, + 0x51, 0x98, 0x69, 0xef, 0xee, 0xed, 0xe6, 0xa7, 0x42, 0xd1, 0x47, 0xc5, 0x0e, 0xda, 0x30, 0xea, + 0xce, 0xe2, 0x7d, 0x4a, 0xaf, 0x39, 0xbe, 0xad, 0x30, 0x92, 0x36, 0xd5, 0xe9, 0x82, 0xdd, 0xb2, + 0xf3, 0x60, 0x7e, 0xd8, 0x76, 0x9a, 0xc4, 0xfe, 0x45, 0xc2, 0xdc, 0x53, 0x7a, 0x34, 0x26, 0x10, + 0x07, 0xd4, 0x10, 0x16, 0x78, 0x91, 0xc9, 0xcd, 0xfa, 0x2e, 0xd6, 0x01, 0x86, 0xa5, 0xbb, 0x34, + 0x4a, 0xac, 0xaf, 0x67, 0x87, 0xea, 0x7a, 0x91, 0x14, 0x71, 0xa6, 0x5b, 0x69, 0x35, 0xeb, 0x93, + 0x9f, 0x2c, 0x7b, 0xf3, 0xde, 0x77, 0x4e, 0x18, 0xf2, 0x23, 0x8f, 0x3a, 0xc6, 0x80, 0xb8, 0xe7, + 0xde, 0x0e, 0xef, 0x0b, 0x26, 0x05, 0xb9, 0x11, 0xc9, 0x41, 0x0a, 0x00, 0x34, 0x8a, 0x60, 0x50, + 0xb5, 0xbd, 0x6a, 0x4e, 0x3f, 0x78, 0x5f, 0x20, 0xf3, 0x5e, 0x43, 0xd5, 0x8e, 0x5d, 0x32, 0x1f, + 0xc9, 0x27, 0x63, 0xd3, 0xab, 0x7a, 0x02, 0xa9, 0xf9, 0xe7, 0xe7, 0xf5, 0x49, 0x6c, 0x7f, 0xfb, + 0x9c, 0x56, 0x49, 0x2f, 0x00, 0x66, 0x17, 0xe7, 0xa9, 0x17, 0x6e, 0x13, 0x86, 0x71, 0xfa, 0x10, + 0xa3, 0x7b, 0x0b, 0x83, 0xa5, 0x37, 0x8d, 0xfd, 0x45, 0x58, 0x2f, 0xc6, 0x80, 0x3b, 0x19, 0x68, + 0x30, 0x68, 0xf8, 0xfc, 0x84, 0x3c, 0xdc, 0x6b, 0x8e, 0x69, 0x2e, 0x12, 0x78, 0x06, 0x08, 0x27, + 0xc9, 0x35, 0x97, 0xe9, 0x31, 0xb4, 0xd6, 0x56, 0x11, 0xb6, 0xcb, 0xdc, 0x2d, 0x7b, 0x92, 0xf7, + 0x65, 0x83, 0xa4, 0xcf, 0xdd, 0xf6, 0x47, 0x7c, 0x96, 0xa2, 0x73, 0xdc, 0x05, 0x05, 0x41, 0x3a, + 0xa2, 0x78, 0xae, 0x00, 0x50, 0xb9, 0x02, 0xa0, 0x5f, 0x68, 0x7c, 0x40, 0x37, 0x1f, 0x41, 0x78, + 0x1c, 0x6f, 0x9c, 0x1f, 0x7c, 0x89, 0xfa, 0xd5, 0xe2, 0x9f, 0xfc, 0x03, 0x8b, 0x75, 0xa2, 0x83, + 0xf3, 0x8e, 0xce, 0x97, 0x9c, 0x14, 0x70, 0x37, 0x9f, 0xbf, 0x61, 0x76, 0x41, 0xdf, 0x7f, 0xc8, + 0xd8, 0xcf, 0x14, 0x42, 0xa5, 0xfe, 0xda, 0x32, 0xf0, 0xa4, 0x95, 0xc4, 0xb0, 0xfa, 0x19, 0x39, + 0xdc, 0x41, 0xd6, 0xfe, 0xf1, 0x18, 0x93, 0xfa, 0x42, 0x80, 0x44, 0x63, 0x70, 0xf6, 0xf7, 0xa0, + 0xb3, 0x44, 0x48, 0x90, 0x59, 0xcd, 0x43, 0xa3, 0x3a, 0x36, 0x04, 0x1e, 0xf2, 0x29, 0x5b, 0x55, + 0xe6, 0x3b, 0xea, 0xdc, 0xb1, 0xd7, 0xfe, 0xae, 0x80, 0x4c, 0x10, 0x22, 0xa0, 0xea, 0xd5, 0x6c, + 0x2a, 0x8f, 0x12, 0x44, 0xf8, 0x96, 0x7d, 0xcf, 0x6a, 0x39, 0xec, 0x4c, 0xc6, 0xb7, 0xe1, 0xe2, + 0xf1, 0xe2, 0x1d, 0x03, 0xb6, 0x9b, 0x04, 0x27, 0x3c, 0x67, 0xf5, 0xc1, 0x13, 0x8c, 0xb8, 0x41, + 0x49, 0x02, 0x26, 0xa3, 0xac, 0x89, 0x27, 0x8c, 0xf3, 0x62, 0xb2, 0x17, 0x5d, 0x48, 0xbf, 0x7a, + 0xb4, 0xe2, 0x7b, 0x1d, 0xc7, 0x3d, 0x96, 0xab, 0x86, 0x71, 0x64, 0x0a, 0xee, 0xce, 0x27, 0xb6, + 0x3f, 0x1e, 0x84, 0xd6, 0x27, 0xc2, 0x7c, 0x14, 0x85, 0xd8, 0x9b, 0x0b, 0xb8, 0x7a, 0xdc, 0xd7, + 0xd8, 0x19, 0x5f, 0xe5, 0x0e, 0x4b, 0xf6, 0xfa, 0xe5, 0xd8, 0xa9, 0xa0, 0xe2, 0x2e, 0x34, 0x7f, + 0xf3, 0x25, 0x30, 0x20, 0x58, 0x4f, 0xf9, 0xfb, 0x0b, 0x2e, 0xb1, 0x6c, 0x78, 0x48, 0xca, 0xf5, + 0x7b, 0x7b, 0xb4, 0x60, 0x86, 0xff, 0x53, 0xca, 0xf6, 0x67, 0xac, 0x4b, 0xa3, 0x4a, 0x9b, 0x2e, + 0x30, 0xcd, 0x29, 0xf9, 0xc6, 0x59, 0x3c, 0xb9, 0xa4, 0xfe, 0xc6, 0x45, 0xe3, 0xb3, 0xf4, 0xc1, + 0xc7, 0xec, 0xb5, 0xaa, 0xab, 0x50, 0xe9, 0x40, 0xdd, 0x5d, 0x2a, 0xb7, 0x21, 0x6a, 0x76, 0x24, + 0x79, 0x73, 0x8d, 0x8d, 0x72, 0x26, 0x66, 0x74, 0x37, 0xb8, 0x34, 0x31, 0x2d, 0x65, 0x96, 0x38, + 0x18, 0x07, 0xe1, 0xed, 0x08, 0x07, 0xed, 0xdc, 0x84, 0x7a, 0x8b, 0x32, 0xda, 0xdd, 0xa5, 0x71, + 0xa6, 0xc7, 0x6b, 0x75, 0xae, 0x36, 0xa8, 0x50, 0x5c, 0x6d, 0xb9, 0x9e, 0x25, 0xa4, 0xad, 0xdc, + 0x7f, 0x35, 0x17, 0x7d, 0x6d, 0x9f, 0xdb, 0x49, 0x73, 0x43, 0xbb, 0x42, 0xfa, 0xa7, 0x9e, 0xc6, + 0xe2, 0x8f, 0xd6, 0x91, 0x2b, 0xca, 0x6c, 0x13, 0xf7, 0x74, 0xcc, 0xed, 0x37, 0x0f, 0xb4, 0x73, + 0xbb, 0x84, 0xe1, 0x51, 0x8a, 0x88, 0xda, 0x64, 0x84, 0x43, 0x12, 0xde, 0x5f, 0x34, 0x80, 0xaf, + 0x2f, 0xe7, 0x09, 0x0f, 0x16, 0x12, 0x6e, 0x25, 0x0d, 0xcd, 0xaf, 0xe5, 0xdb, 0xf3, 0xcd, 0x02, + 0xaa, 0x00, 0xb8, 0x44, 0x3b, 0xa5, 0xb3, 0xed, 0x12, 0xb6, 0x3a, 0x48, 0xe8, 0x43, 0xa4, 0x6c, + 0xf6, 0xf5, 0x5b, 0x32, 0xae, 0xd1, 0x11, 0xde, 0x92, 0x2b, 0xf0, 0x51, 0x05, 0xad, 0x84, 0x1e, + 0xb1, 0xab, 0xce, 0x6f, 0xfe, 0xbf, 0x87, 0x25, 0xd2, 0x6a, 0x96, 0x86, 0x1b, 0x38, 0x01, 0x79, + 0x65, 0xf4, 0xd5, 0x4d, 0x43, 0xdb, 0xef, 0xb4, 0xdb, 0x44, 0xb3, 0x9d, 0xc6, 0x5b, 0x53, 0x4b, + 0xb3, 0xbe, 0xeb, 0x2b, 0x17, 0xf1, 0x99, 0xeb, 0x58, 0x78, 0x07, 0x01, 0x4b, 0xdf, 0x7a, 0xd6, + 0x34, 0xd5, 0x12, 0xcc, 0xa0, 0xd5, 0x44, 0xde, 0xbd, 0xe0, 0xf1, 0x73, 0x2f, 0x3c, 0xea, 0x25, + 0x69, 0xae, 0x04, 0x26, 0x53, 0x97, 0x0b, 0x3a, 0xf3, 0xd7, 0xc4, 0xfd, 0xd7, 0x3e, 0x76, 0x56, + 0xa2, 0xcc, 0xb2, 0x22, 0x50, 0x36, 0x77, 0xcc, 0xc6, 0xaf, 0x28, 0xe9, 0x71, 0xce, 0x15, 0x7f, + 0x69, 0x68, 0xf2, 0x60, 0x6f, 0x94, 0xa6, 0xa2, 0xb7, 0x3c, 0x4a, 0x24, 0x95, 0xc2, 0x54, 0x53, + 0x99, 0x5b, 0x00, 0x2b, 0x6b, 0xc7, 0x45, 0x3a, 0x6a, 0x92, 0xf2, 0x1e, 0xc5, 0x30, 0xc3, 0x97, + 0xf1, 0x7c, 0x56, 0x6f, 0xda, 0x50, 0x86, 0x86, 0x6d, 0x11, 0xc5, 0x9c, 0x22, 0xbb, 0x08, 0x55, + 0xb8, 0x4c, 0x45, 0xf4, 0x8c, 0x4d, 0x90, 0x79, 0x47, 0x4e, 0x77, 0x05, 0x53, 0x5d, 0xc5, 0x5a, + 0x1c, 0x29, 0xb1, 0xb5, 0x8d, 0xfe, 0x14, 0x21, 0x2a, 0xf1, 0xc9, 0xd0, 0x64, 0xbc, 0x3c, 0xce, + 0x9c, 0x6f, 0x0b, 0xd3, 0x33, 0xfb, 0xbb, 0xa3, 0x37, 0xd2, 0x77, 0x4f, 0x3f, 0xca, 0x02, 0x4e, + 0x1a, 0x88, 0x6d, 0xeb, 0x8b, 0x6b, 0x72, 0x85, 0xb2, 0x3a, 0x1d, 0x53, 0xcc, 0x67, 0x6c, 0xcf, + 0x86, 0xc6, 0x7a, 0x5f, 0xf6, 0x97, 0xd1, 0xed, 0x0c, 0xcb, 0x0d, 0x68, 0x08, 0x08, 0x5b, 0x18, + 0x79, 0xa0, 0xac, 0x10, 0x67, 0x59, 0x8a, 0x92, 0xe0, 0x3f, 0x3d, 0xe6, 0x7f, 0x15, 0xca, 0x7f, + 0xdb, 0x05, 0x16, 0xe7, 0x95, 0x5c, 0x90, 0xd4, 0x19, 0x23, 0xd3, 0xc2, 0x0d, 0x25, 0xd8, 0xb3, + 0xda, 0x81, 0xd7, 0xca, 0x6a, 0x98, 0x95, 0xb5, 0xca, 0xb1, 0x4e, 0xee, 0x6d, 0x09, 0xe5, 0x8c, + 0x70, 0xde, 0x1c, 0x45, 0x21, 0xe6, 0xeb, 0xe2, 0xaf, 0x7f, 0x8d, 0xbb, 0x74, 0xf1, 0xad, 0x7d, + 0x76, 0xd0, 0xae, 0xd6, 0xc3, 0xa3, 0xb7, 0x19, 0xf1, 0x73, 0x40, 0x5d, 0xd5, 0x03, 0x4a, 0xd9, + 0x9c, 0x37, 0xce, 0x76, 0x23, 0x33, 0x35, 0xcc, 0x04, 0xad, 0x82, 0xa5, 0x28, 0x53, 0xb2, 0x1d, + 0xfc, 0x36, 0x3d, 0x95, 0x65, 0xfb, 0x3f, 0x2b, 0x1f, 0xdc, 0xd3, 0x5f, 0xa9, 0x98, 0x29, 0xa0, + 0x43, 0x4e, 0x56, 0xce, 0x0f, 0xed, 0x4e, 0x26, 0x54, 0xa7, 0x62, 0x38, 0x69, 0xf8, 0xba, 0x3d, + 0x23, 0x98, 0x17, 0x3e, 0xcd, 0x32, 0x28, 0xf2, 0x52, 0x09, 0x9d, 0x50, 0xc9, 0x61, 0x02, 0x43, + 0xb1, 0xf8, 0x92, 0x6b, 0xec, 0x8e, 0xd4, 0x5e, 0x2c, 0xba, 0x49, 0x32, 0xf7, 0x12, 0xa8, 0xd6, + 0x6f, 0x05, 0xaa, 0x43, 0x25, 0x28, 0x8f, 0x6a, 0x54, 0x49, 0x26, 0x9a, 0x9a, 0x11, 0xc7, 0x2d, + 0x1f, 0x87, 0xa3, 0xc1, 0x74, 0xe0, 0x32, 0xbf, 0x36, 0x8b, 0x43, 0x78, 0x53, 0xf8, 0x09, 0x33, + 0xe9, 0x3d, 0x0d, 0x89, 0xe5, 0x04, 0x9e, 0x53, 0x2d, 0xab, 0xf2, 0xc0, 0xf6, 0x8a, 0x60, 0x71, + 0x1f, 0x5e, 0xe0, 0x71, 0xba, 0x51, 0xe5, 0x41, 0x76, 0x36, 0x29, 0xda, 0x3d, 0xcc, 0xe0, 0x45, + 0xce, 0x3c, 0x5e, 0x6e, 0x04, 0xa3, 0x87, 0xa6, 0x4d, 0x03, 0xe0, 0x82, 0xd0, 0x4b, 0x45, 0xea, + 0x97, 0x24, 0x90, 0x0f, 0xb5, 0x5f, 0xc8, 0xb5, 0x91, 0xf4, 0xce, 0x4d, 0x33, 0xd1, 0x07, 0x2e, + 0x85, 0x1d, 0x63, 0xf9, 0x91, 0xb7, 0x2b, 0x33, 0x5b, 0x4e, 0x5f, 0xf2, 0x39, 0xce, 0x86, 0xb8, + 0x62, 0xc0, 0xd4, 0x7e, 0x33, 0x5e, 0xf4, 0x99, 0x80, 0x24, 0x6f, 0x09, 0x93, 0x52, 0x21, 0x77, + 0x1e, 0x40, 0x07, 0xb6, 0xf4, 0x9a, 0xcf, 0x06, 0x80, 0x82, 0xc1, 0xf5, 0x4b, 0xad, 0x94, 0x7a, + 0xdd, 0xe6, 0xd1, 0xb2, 0xc4, 0x32, 0x30, 0x12, 0x22, 0x5f, 0xde, 0xd7, 0xdf, 0xea, 0x91, 0xb7, + 0x32, 0x28, 0x1b, 0x9c, 0x2f, 0x29, 0x65, 0x9a, 0x01, 0x4b, 0xc5, 0xdb, 0xf8, 0x51, 0xcc, 0x74, + 0x02, 0xe6, 0xd3, 0x8a, 0x7d, 0xfa, 0x1f, 0x9f, 0x1b, 0x1c, 0x40, 0x8e, 0x44, 0xd2, 0xc1, 0x27, + 0xfe, 0x5d, 0x6f, 0xfc, 0x83, 0x0e, 0xc3, 0x2f, 0x19, 0xf7, 0xd7, 0x65, 0x64, 0xc6, 0x59, 0x0e, + 0x90, 0x65, 0x3c, 0x20, 0xf1, 0x53, 0xcf, 0xdb, 0x42, 0x0f, 0xd0, 0x80, 0x97, 0xbe, 0x23, 0x10, + 0x5a, 0x89, 0x1a, 0x74, 0xa3, 0x9b, 0x68, 0x19, 0xe2, 0xcd, 0xb1, 0xf0, 0x83, 0x5f, 0x86, 0xc3, + 0x94, 0xfe, 0x89, 0x69, 0x9d, 0x7b, 0xaf, 0x83, 0x18, 0x84, 0x67, 0x28, 0xcd, 0x70, 0xa5, 0x43, + 0x7c, 0xbd, 0xde, 0xeb, 0xa2, 0x90, 0xdf, 0x54, 0x5e, 0xab, 0xbc, 0xb4, 0x51, 0xaa, 0xa8, 0xe8, + 0x3c, 0xb6, 0x5a, 0xc8, 0x09, 0x79, 0x53, 0xd0, 0xe6, 0xec, 0xca, 0x41, 0xae, 0x2a, 0x2c, 0x10, + 0x85, 0x1b, 0x60, 0x3a, 0xd5, 0xd6, 0xe0, 0xd2, 0xd5, 0x41, 0xf9, 0x13, 0x97, 0xdc, 0xfb, 0x63, + 0xfd, 0xbb, 0x49, 0x81, 0x4b, 0xfc, 0xfe, 0x46, 0x06, 0xa3, 0x22, 0xf5, 0xc5, 0x2a, 0x9e, 0x1d, + 0x53, 0x51, 0x7a, 0x10, 0x2e, 0xcf, 0x21, 0x58, 0xa0, 0x70, 0x71, 0x92, 0x4c, 0xe7, 0x41, 0x33, + 0xc2, 0x96, 0x5f, 0x43, 0x2a, 0x87, 0xc0, 0x58, 0x08, 0x64, 0x06, 0x62, 0xdc, 0x55, 0x85, 0xbd, + 0xd5, 0xb3, 0x29, 0xaf, 0x2b, 0x1d, 0x4b, 0x0c, 0xbf, 0x2b, 0x2f, 0x3f, 0xb0, 0x30, 0x95, 0x05, + 0xe2, 0x10, 0xad, 0xaa, 0x56, 0x71, 0x67, 0x43, 0x79, 0xdb, 0x9a, 0x11, 0xe8, 0x9a, 0x6a, 0x12, + 0xf8, 0x66, 0x32, 0x25, 0x53, 0x2e, 0xca, 0xda, 0xb3, 0xd3, 0x4a, 0x54, 0x4f, 0xdf, 0x2d, 0x50, + 0x3b, 0x2e, 0x29, 0xca, 0x7b, 0xe4, 0x81, 0xda, 0x46, 0x0d, 0x27, 0xde, 0x99, 0xa9, 0xed, 0x4f, + 0xa6, 0x48, 0x5d, 0xe7, 0x73, 0x7e, 0x5c, 0xe8, 0x3b, 0x14, 0x1e, 0xdd, 0xf5, 0x39, 0x68, 0xd0, + 0x14, 0x5c, 0x64, 0x6e, 0xc9, 0x24, 0x29, 0x59, 0x84, 0xec, 0x3a, 0xed, 0xc2, 0x18, 0x28, 0x1c, + 0x99, 0xb8, 0x78, 0x17, 0xf5, 0x66, 0x25, 0xc2, 0xec, 0x45, 0xad, 0x35, 0x1f, 0x8e, 0x9c, 0x5a, + 0x11, 0x14, 0xf2, 0xb1, 0x7e, 0x66, 0x2d, 0x6c, 0xb7, 0x36, 0xcc, 0x1c, 0xaa, 0x11, 0x42, 0x6e, + 0x2e, 0x13, 0xa5, 0x7b, 0xb0, 0xf8, 0x25, 0x42, 0x5e, 0x70, 0x92, 0x42, 0xe4, 0xe3, 0xc8, 0xa3, + 0x07, 0x31, 0x7f, 0xd7, 0x4d, 0x10, 0x65, 0xb4, 0x83, 0x41, 0xa3, 0x1a, 0x0c, 0x38, 0xc4, 0x7d, + 0xa1, 0xfe, 0xb8, 0x91, 0x82, 0xa3, 0x2b, 0x1c, 0x66, 0x02, 0x87, 0xda, 0x80, 0x49, 0x14, 0xef, + 0x39, 0x7d, 0x0d, 0x36, 0xfb, 0xbb, 0xda, 0x10, 0x29, 0x80, 0xab, 0x9e, 0x6d, 0x5f, 0x90, 0x18, + 0xc3, 0x92, 0x22, 0xca, 0xaa, 0x12, 0xcd, 0x68, 0xef, 0xbe, 0xa2, 0x74, 0x36, 0x27, 0x33, 0x8f, + 0x2e, 0x9d, 0x08, 0x69, 0x01, 0xac, 0x88, 0x93, 0xec, 0xeb, 0x7d, 0x5d, 0xe3, 0x46, 0xd3, 0xe2, + 0xe5, 0x34, 0x5a, 0xe8, 0x4a, 0xb5, 0x7d, 0xc9, 0xaa, 0x11, 0x8c, 0x46, 0xab, 0xec, 0xaf, 0xa4, + 0xf4, 0xd0, 0x40, 0x99, 0x9a, 0xaf, 0x3e, 0xb4, 0xe0, 0xae, 0x50, 0x2c, 0xc4, 0xe0, 0x90, 0xa5, + 0x5c, 0x8f, 0x93, 0xe2, 0x3b, 0xce, 0x28, 0x53, 0xd6, 0xdf, 0xb0, 0xc2, 0xa3, 0x99, 0xc0, 0x34, + 0x1c, 0xda, 0x98, 0xad, 0x87, 0x67, 0xd5, 0x73, 0xc0, 0x54, 0x72, 0xb3, 0x96, 0xaa, 0xc1, 0xa0, + 0x9e, 0x3a, 0xee, 0x5d, 0x7b, 0xa8, 0x2d, 0x53, 0xaa, 0x9e, 0x77, 0x39, 0x23, 0x7e, 0x3e, 0x34, + 0xaa, 0xbe, 0xa2, 0x46, 0x88, 0x02, 0x88, 0x0f, 0xf2, 0x7c, 0x90, 0x17, 0xac, 0x70, 0xcc, 0x19, + 0x9c, 0xf9, 0x7d, 0x4c, 0x7c, 0x85, 0xf5, 0x3d, 0x54, 0x45, 0x84, 0xb9, 0x4d, 0x75, 0xaa, 0xd6, + 0x35, 0xb8, 0x3f, 0x33, 0x2f, 0x4a, 0x19, 0x6f, 0x3d, 0x30, 0x6c, 0xac, 0xe5, 0xa8, 0xe3, 0xd6, + 0x89, 0xb6, 0xc7, 0x9c, 0x72, 0x68, 0x06, 0x8c, 0xf4, 0xc7, 0x56, 0xda, 0x73, 0x68, 0x36, 0x6c, + 0x38, 0x22, 0xbd, 0xff, 0xbb, 0x5e, 0xcd, 0x61, 0xc1, 0xfd, 0x11, 0x88, 0x68, 0x2b, 0xad, 0xf2, + 0x78, 0x25, 0x9d, 0x43, 0x5c, 0x5e, 0x78, 0x28, 0x4b, 0x8e, 0xa6, 0xd7, 0xd0, 0x39, 0xa2, 0xb3, + 0xdf, 0x45, 0xa9, 0xfd, 0xa3, 0xd6, 0xd4, 0x08, 0x8f, 0x4b, 0x7c, 0x17, 0xe3, 0xc3, 0x6d, 0x99, + 0x53, 0x99, 0x5e, 0xaa, 0x7a, 0xdc, 0x46, 0x8c, 0xe8, 0x27, 0x63, 0xe7, 0x8d, 0x10, 0x6b, 0x65, + 0xbc, 0xad, 0xd1, 0x58, 0x34, 0x05, 0xe6, 0x59, 0xe3, 0x00, 0x45, 0x82, 0x6d, 0xd4, 0xe0, 0xe6, + 0x57, 0x53, 0x67, 0xd3, 0xf4, 0x1d, 0xe5, 0x78, 0xb8, 0xfe, 0x11, 0x25, 0x10, 0xaf, 0xb0, 0x21, + 0x95, 0xee, 0xec, 0x3d, 0x95, 0xde, 0xdf, 0x85, 0xd4, 0x95, 0xee, 0x16, 0xd2, 0x83, 0x30, 0xe4, + 0x94, 0x7a, 0x4c, 0xa5, 0xe6, 0x07, 0xc3, 0x7a, 0x22, 0xd5, 0x85, 0x47, 0xb1, 0xc3, 0x45, 0x89, + 0xda, 0x35, 0x03, 0x7f, 0xd3, 0xf7, 0x35, 0x39, 0xef, 0xa0, 0x30, 0xa7, 0x4c, 0x98, 0x18, 0xc4, + 0x76, 0x9d, 0x47, 0x85, 0xa5, 0xa7, 0x36, 0x19, 0x39, 0x3d, 0x2d, 0xec, 0xd9, 0x9d, 0x4b, 0x3b, + 0xe4, 0x86, 0x71, 0x22, 0x20, 0x95, 0xfd, 0xbf, 0xff, 0x57, 0xfa, 0x01, 0x43, 0xd1, 0x2a, 0x4b, + 0x7d, 0xdd, 0x47, 0x9d, 0xad, 0xdd, 0x7a, 0x2c, 0x95, 0xfe, 0x49, 0x12, 0x13, 0xca, 0x1a, 0xc1, + 0xb4, 0x0f, 0x69, 0xeb, 0x1f, 0x80, 0xc3, 0xdc, 0x86, 0x15, 0xd2, 0x64, 0xd4, 0x50, 0xf7, 0xf2, + 0xeb, 0xb8, 0x64, 0x48, 0x06, 0xd1, 0x91, 0x46, 0x28, 0x9c, 0x21, 0x73, 0xe0, 0x04, 0x0b, 0x05, + 0x86, 0x0d, 0xe4, 0x2f, 0x45, 0xe1, 0xf3, 0x8a, 0x6b, 0xff, 0x20, 0xe1, 0xb3, 0x83, 0xd9, 0xd6, + 0x9e, 0x28, 0xd0, 0x94, 0x5e, 0x3c, 0x96, 0x38, 0x68, 0x46, 0x77, 0xee, 0x73, 0xf6, 0xa5, 0x19, + 0x88, 0x9f, 0xea, 0xdb, 0x6e, 0xfc, 0x61, 0x81, 0x6f, 0xdb, 0x22, 0xce, 0xda, 0x43, 0xb5, 0xc6, + 0x28, 0x95, 0xb1, 0xde, 0x4d, 0x5d, 0xdc, 0x48, 0x03, 0x59, 0x33, 0x49, 0xaf, 0xe6, 0xb4, 0x88, + 0xea, 0x40, 0xfc, 0x45, 0xae, 0x1f, 0x3b, 0xcd, 0xb7, 0x1a, 0xec, 0x2e, 0xf8, 0xe4, 0x5e, 0xb8, + 0x3d, 0x50, 0x43, 0x09, 0x01, 0x5e, 0xd6, 0x3b, 0xb5, 0x38, 0xd9, 0x11, 0xe9, 0x5e, 0x33, 0x20, + 0x13, 0xc6, 0x67, 0x77, 0x7d, 0x25, 0x76, 0xa8, 0x46, 0xbb, 0xed, 0x18, 0x1e, 0x40, 0xf9, 0x53, + 0xa7, 0x53, 0x8f, 0xa2, 0x5e, 0x1f, 0xd1, 0x35, 0xcd, 0xd1, 0x3a, 0x39, 0xa2, 0x08, 0xb5, 0x66, + 0x6b, 0xde, 0x15, 0x0f, 0xb8, 0xf1, 0x54, 0x47, 0x99, 0xc9, 0x40, 0xb5, 0xbe, 0x3f, 0x4a, 0xda, + 0x2f, 0x55, 0x34, 0xf6, 0x97, 0xcc, 0xe3, 0x0f, 0x5a, 0x8f, 0xb6, 0xf2, 0x24, 0x1a, 0xc9, 0x8a, + 0x35, 0xf8, 0x44, 0xb8, 0xd7, 0xe2, 0xb0, 0x64, 0xf8, 0x04, 0x42, 0x58, 0x79, 0x84, 0x74, 0xbd, + 0xd2, 0x4f, 0x9a, 0xfe, 0x67, 0xd2, 0xe7, 0x4c, 0x36, 0xee, 0x52, 0xee, 0xbb, 0x6f, 0x15, 0xa5, + 0xb8, 0x45, 0x33, 0xcc, 0x5c, 0x16, 0x51, 0xad, 0xf2, 0x47, 0x7e, 0x29, 0xe3, 0x30, 0xfc, 0xd5, + 0xad, 0x35, 0xaa, 0x79, 0xf4, 0x15, 0xcb, 0xbb, 0xae, 0xd1, 0x70, 0x5c, 0x9f, 0x3c, 0xb5, 0x73, + 0x74, 0x82, 0x4e, 0x83, 0x23, 0x5a, 0x4a, 0xe7, 0xa2, 0x6c, 0x2a, 0x7d, 0x7e, 0x8e, 0xa1, 0x4a, + 0x3d, 0xc5, 0x9f, 0xfd, 0xb4, 0x6d, 0x30, 0x4e, 0x9d, 0x1d, 0x6c, 0xab, 0x31, 0xe9, 0x97, 0xde, + 0x57, 0x45, 0xc2, 0xb1, 0xc5, 0x08, 0x72, 0xda, 0xe4, 0xc5, 0xab, 0xc0, 0x90, 0xbc, 0x81, 0xcb, + 0x68, 0xb1, 0xf5, 0xd6, 0x16, 0x5c, 0x30, 0x86, 0x76, 0xe8, 0x00, 0xb9, 0xc3, 0x90, 0x2b, 0x0b, + 0x63, 0x6a, 0xe1, 0xd4, 0xe6, 0x93, 0x24, 0x5c, 0xcf, 0x6a, 0x70, 0x62, 0xa7, 0x1d, 0x72, 0x8b, + 0xc4, 0x66, 0x2e, 0x9c, 0x0b, 0xca, 0x49, 0x77, 0xc9, 0x00, 0x66, 0x7c, 0x50, 0xb2, 0x67, 0x80, + 0xc2, 0x90, 0x25, 0x63, 0xe6, 0xd8, 0xbf, 0x99, 0xe1, 0x81, 0x0e, 0xd2, 0x1d, 0x50, 0xd7, 0xe6, + 0x0d, 0xd1, 0x85, 0x4d, 0x17, 0x2b, 0xb9, 0xc2, 0x84, 0x14, 0x6e, 0x0f, 0x11, 0x00, 0x83, 0x6a, + 0xf7, 0xdc, 0x86, 0x40, 0x5b, 0xf2, 0x09, 0x58, 0xae, 0xb0, 0x0d, 0xaa, 0x2a, 0xd9, 0xd5, 0x7b, + 0x85, 0x28, 0xf6, 0x48, 0xa6, 0x1d, 0xf6, 0xfb, 0xb2, 0xe6, 0x4c, 0x48, 0xf7, 0xed, 0xcd, 0x8d, + 0x40, 0xea, 0x30, 0xe5, 0xca, 0x01, 0x9a, 0x3b, 0x6a, 0xab, 0x79, 0x85, 0x56, 0xfb, 0xf5, 0xe6, + 0x25, 0x6b, 0x4c, 0x98, 0x21, 0xbc, 0x8b, 0xb9, 0x35, 0xc7, 0x28, 0x47, 0x7b, 0xb5, 0xad, 0x24, + 0x82, 0x85, 0x0d, 0x02, 0xfd, 0xd6, 0xa4, 0x8f, 0xb4, 0x2c, 0x44, 0x12, 0x61, 0xf3, 0x4e, 0x68, + 0x9b, 0x36, 0x55, 0x15, 0x37, 0x09, 0x8f, 0xe4, 0xe4, 0x7b, 0x1d, 0xd5, 0x56, 0xae, 0x2b, 0xf1, + 0x4b, 0x1d, 0x23, 0xed, 0x5a, 0x45, 0x97, 0x22, 0xf2, 0x48, 0xce, 0x86, 0xc7, 0xe8, 0x2f, 0xde, + 0x67, 0x82, 0x9f, 0x58, 0x31, 0xa6, 0xeb, 0x21, 0x9a, 0xec, 0xb6, 0xe7, 0x18, 0x59, 0xf3, 0xb0, + 0xa2, 0x86, 0x2e, 0x5c, 0xba, 0xaf, 0x54, 0xef, 0xdc, 0xe8, 0x09, 0x20, 0x34, 0xa0, 0xb0, 0x79, + 0xd5, 0x57, 0xae, 0x47, 0x8f, 0x7d, 0x79, 0x0d, 0xf1, 0x68, 0xac, 0xb0, 0x4c, 0xe9, 0x61, 0x54, + 0x4b, 0x58, 0xc9, 0x57, 0x3b, 0xe3, 0xe7, 0xe3, 0x4f, 0xc2, 0xbb, 0xe1, 0x4d, 0x9b, 0x76, 0x60, + 0x00, 0x47, 0x7a, 0xdf, 0xaf, 0x08, 0x7a, 0x65, 0xd0, 0x95, 0x1e, 0xe1, 0xac, 0xac, 0x35, 0xd8, + 0x13, 0x6f, 0x8b, 0x93, 0xbc, 0x3b, 0xc3, 0x17, 0x29, 0xa4, 0x22, 0x93, 0xb6, 0xf3, 0xef, 0xe7, + 0x00, 0x15, 0x79, 0x27, 0x33, 0xed, 0x70, 0xae, 0x19, 0x54, 0xd7, 0x7b, 0xcd, 0x18, 0x26, 0xd6, + 0x8c, 0xb9, 0x42, 0x78, 0x21, 0xd4, 0x6a, 0xdc, 0x44, 0x4c, 0x10, 0x9d, 0x15, 0x9f, 0x52, 0x76, + 0xf8, 0x70, 0x39, 0x49, 0x83, 0x97, 0x28, 0xc8, 0x94, 0x24, 0x95, 0x7c, 0x2f, 0xb0, 0x94, 0xcf, + 0x03, 0xa5, 0x7f, 0xd7, 0xcf, 0x5d, 0x94, 0xe2, 0x44, 0x3e, 0x7a, 0xe1, 0x3f, 0x7f, 0x6a, 0xa3, + 0x82, 0x22, 0xfb, 0x7b, 0x19, 0x32, 0xcd, 0x07, 0x95, 0x7a, 0xfe, 0x6f, 0xfb, 0xab, 0x8f, 0xc0, + 0x68, 0xda, 0xdd, 0x22, 0x2d, 0xbe, 0xf4, 0x61, 0x30, 0x76, 0xff, 0xc6, 0x4f, 0xb9, 0x15, 0x86, + 0x3b, 0x53, 0x8c, 0x69, 0xd4, 0x3e, 0x64, 0x92, 0xd9, 0xaa, 0x28, 0xf6, 0xb3, 0xfd, 0xcd, 0xa0, + 0x32, 0xb8, 0x6e, 0x1d, 0x21, 0xa3, 0x99, 0x67, 0x66, 0xad, 0x3d, 0xc7, 0xaa, 0x5a, 0xe6, 0xa2, + 0x8d, 0xad, 0xd0, 0x7b, 0x0e, 0x3d, 0x0a, 0xc6, 0xf7, 0x7c, 0x79, 0x9e, 0x91, 0xa9, 0x6f, 0xcd, + 0xe9, 0xfd, 0x11, 0x85, 0x93, 0xa9, 0x1e, 0xff, 0xcb, 0xec, 0x4e, 0x8f, 0x3a, 0x6d, 0xed, 0xe6, + 0x87, 0x73, 0x92, 0x50, 0xe7, 0xc5, 0xce, 0x51, 0xfe, 0x61, 0xf4, 0xc6, 0x90, 0x53, 0x44, 0x89, + 0x5e, 0x1e, 0xde, 0xcd, 0xe5, 0x73, 0x73, 0x0b, 0xa4, 0xce, 0x19, 0xa8, 0x89, 0x8d, 0xae, 0x64, + 0xc8, 0xff, 0x6a, 0x07, 0x51, 0x1b, 0x87, 0x64, 0x1c, 0xf4, 0x2d, 0x65, 0x28, 0x22, 0x5b, 0x78, + 0x7b, 0x5c, 0xe8, 0xfc, 0x88, 0x0c, 0x74, 0xf6, 0xe1, 0x0d, 0x8d, 0x8e, 0xaa, 0x55, 0xb3, 0x37, + 0x9b, 0x05, 0x0e, 0x0d, 0xf3, 0x9a, 0x93, 0x63, 0xe3, 0xaf, 0x4d, 0xb5, 0xd0, 0xb0, 0x78, 0x18, + 0xbd, 0x09, 0x35, 0x06, 0x5f, 0xe1, 0x51, 0x24, 0x1f, 0x53, 0xe5, 0xbd, 0x33, 0x37, 0x6b, 0x05, + 0x23, 0x06, 0xcd, 0xe0, 0xc2, 0xb9, 0xc9, 0x27, 0xc4, 0x46, 0x36, 0x4d, 0xb3, 0x5c, 0xfc, 0x06, + 0x1f, 0x87, 0x2a, 0x45, 0xa6, 0x34, 0x65, 0xd8, 0x2a, 0x46, 0xb5, 0x82, 0xcf, 0x68, 0x69, 0xa2, + 0x22, 0x7c, 0xc5, 0xd2, 0x2d, 0xe9, 0x0d, 0xa7, 0xc0, 0xae, 0x0d, 0x9a, 0x19, 0xa3, 0xfa, 0xd8, + 0x08, 0xff, 0x91, 0x0c, 0xef, 0x6b, 0x27, 0xe4, 0x90, 0x26, 0x6a, 0xe7, 0xf1, 0x03, 0x2e, 0x5f, + 0x73, 0x2a, 0x1a, 0x3d, 0xe6, 0xf5, 0xbb, 0xfe, 0x36, 0x74, 0xc7, 0x32, 0xe0, 0xca, 0x3b, 0x7f, + 0x91, 0x5d, 0xde, 0x99, 0x25, 0x63, 0x9b, 0x5e, 0xd0, 0x04, 0x90, 0x6c, 0xc9, 0x01, 0xca, 0x79, + 0x4e, 0x16, 0x9c, 0x5e, 0x98, 0x49, 0x0d, 0x3b, 0x6f, 0x97, 0x60, 0xca, 0x09, 0x71, 0x78, 0x00, + 0xef, 0xbe, 0xab, 0x19, 0x92, 0xfc, 0xf9, 0xd1, 0xd3, 0x48, 0x3c, 0x5e, 0x2c, 0x48, 0x69, 0x42, + 0x14, 0x86, 0x16, 0x7c, 0x7f, 0x20, 0x47, 0x16, 0x85, 0x6a, 0x69, 0x09, 0x3a, 0x4f, 0x56, 0xc0, + 0x68, 0x3a, 0xa0, 0x76, 0x82, 0x8a, 0x04, 0x5b, 0x70, 0x50, 0x64, 0x4e, 0x7e, 0x3b, 0xa6, 0x9b, + 0x2b, 0x68, 0x15, 0xa8, 0x59, 0x60, 0x26, 0x3b, 0xd9, 0x27, 0xd3, 0x83, 0x80, 0x64, 0x59, 0x75, + 0x45, 0x7c, 0x2a, 0xac, 0xa9, 0x26, 0x93, 0xaa, 0xe2, 0x9d, 0x57, 0x8d, 0x74, 0x91, 0x46, 0x2a, + 0x48, 0xf9, 0x43, 0x72, 0x3d, 0x16, 0x45, 0x1b, 0xd5, 0x9c, 0x8c, 0xa4, 0xb3, 0x6d, 0xe2, 0x2e, + 0x17, 0xb3, 0x87, 0x1b, 0x08, 0x97, 0xe1, 0x73, 0x8c, 0xc2, 0xa7, 0xb6, 0xd6, 0x01, 0xa8, 0x10, + 0x2f, 0x04, 0x94, 0x26, 0x71, 0x37, 0xc7, 0x09, 0x64, 0x28, 0x18, 0x59, 0xb6, 0x1c, 0xf6, 0x4e, + 0xde, 0x64, 0xfd, 0x3a, 0xbc, 0x6d, 0xb4, 0xc1, 0xc2, 0x34, 0xed, 0xe5, 0xc6, 0x53, 0xd9, 0x3a, + 0x25, 0x8c, 0x1f, 0xeb, 0x23, 0x01, 0x35, 0x1a, 0xff, 0x86, 0x81, 0xf7, 0x38, 0xf1, 0x67, 0x18, + 0x9f, 0xea, 0x97, 0xb2, 0x82, 0x6e, 0x2e, 0x4f, 0x53, 0x50, 0x56, 0xdd, 0x5e, 0xce, 0x17, 0xbd, + 0x87, 0x81, 0xf3, 0x90, 0x4b, 0x98, 0xf7, 0x5c, 0x42, 0xf2, 0x4d, 0xe7, 0xeb, 0xa1, 0x70, 0xb1, + 0xf4, 0x55, 0x77, 0x59, 0xfd, 0x4a, 0xb2, 0xe2, 0xc3, 0x11, 0x55, 0x9b, 0x5a, 0x40, 0xa7, 0x28, + 0x76, 0x8e, 0x6a, 0xe1, 0xef, 0x65, 0x36, 0x65, 0xde, 0x4e, 0x18, 0x62, 0xcc, 0x62, 0x95, 0xcc, + 0x8b, 0x98, 0x29, 0xa4, 0x0e, 0xbe, 0xbf, 0x66, 0x00, 0x7a, 0x72, 0x11, 0x5d, 0xa9, 0x4b, 0xc1, + 0xfb, 0x64, 0x9e, 0x74, 0x22, 0x6b, 0x0d, 0x72, 0x7a, 0x60, 0xb6, 0x6f, 0xbf, 0x6b, 0x72, 0xaa, + 0xa4, 0xbd, 0xa4, 0x29, 0x41, 0x1d, 0x03, 0xec, 0xd3, 0x70, 0x86, 0x24, 0x3f, 0xb5, 0xb2, 0x72, + 0xe2, 0x7a, 0x0b, 0x9c, 0x75, 0x22, 0xc9, 0xeb, 0x8f, 0x66, 0x4b, 0x9b, 0x27, 0x2f, 0x98, 0x08, + 0xa0, 0x42, 0x89, 0x5b, 0xdf, 0x89, 0xa3, 0x96, 0xd2, 0x0a, 0x32, 0x01, 0x58, 0x8a, 0xbf, 0x7d, + 0xea, 0xe6, 0xf9, 0xee, 0xd7, 0xac, 0xe0, 0x86, 0x0b, 0x2c, 0x80, 0xca, 0x5d, 0xd7, 0xb8, 0x9b, + 0x6d, 0x85, 0x21, 0xa1, 0x3e, 0x56, 0xfa, 0x23, 0x85, 0x0a, 0x3a, 0xb0, 0x9a, 0x83, 0xe2, 0x96, + 0x38, 0xac, 0x40, 0x7d, 0x15, 0x4d, 0x0d, 0x72, 0x5e, 0xca, 0xba, 0x05, 0xf1, 0x5d, 0xf0, 0xdc, + 0x18, 0x0b, 0x5e, 0xa3, 0x8e, 0x92, 0xc6, 0xf2, 0xbd, 0xb6, 0x2a, 0xcb, 0xb1, 0x19, 0x74, 0x7c, + 0xc9, 0x26, 0x51, 0xda, 0xec, 0x1d, 0x4a, 0xe9, 0xbe, 0xde, 0x79, 0xc6, 0x41, 0x75, 0xd8, 0x1e, + 0xb5, 0x8e, 0xc0, 0x73, 0x2f, 0x17, 0x56, 0x47, 0xde, 0xd3, 0xae, 0x06, 0xb5, 0x1b, 0xfd, 0x08, + 0xdc, 0xda, 0x7c, 0xe0, 0x72, 0xfe, 0x2d, 0x13, 0x1f, 0x44, 0x9b, 0xc2, 0x7b, 0x2c, 0x0e, 0x4a, + 0xc4, 0x55, 0x07, 0x60, 0x2b, 0xfc, 0xa9, 0xc1, 0xfa, 0x59, 0x25, 0x56, 0x38, 0xb2, 0x14, 0x50, + 0xf9, 0x23, 0x37, 0xa6, 0x86, 0x27, 0x31, 0x08, 0x10, 0x8c, 0x9e, 0x68, 0xf3, 0x4f, 0xca, 0xe8, + 0xeb, 0x8e, 0x5f, 0x0b, 0x11, 0xf7, 0xde, 0x8c, 0xd4, 0xd1, 0xbe, 0xa3, 0x36, 0xe6, 0xeb, 0xff, + 0x3e, 0xff, 0xe1, 0xeb, 0x31, 0x02, 0x1b, 0x05, 0x8f, 0xa0, 0x6f, 0xd4, 0x32, 0xb3, 0x34, 0xab, + 0xa6, 0x80, 0xce, 0xb4, 0xda, 0x7a, 0x5d, 0xb5, 0x9a, 0x64, 0x7d, 0xdf, 0x04, 0xca, 0x77, 0x71, + 0x19, 0xf5, 0xf5, 0xb2, 0x63, 0x89, 0xbe, 0xdf, 0x59, 0x5e, 0x75, 0xce, 0x9a, 0xdc, 0x7c, 0x39, + 0x7c, 0x57, 0x60, 0x5d, 0x14, 0x50, 0x26, 0x46, 0x5f, 0x9f, 0xc5, 0x3e, 0xe2, 0xbb, 0x23, 0xe9, + 0xda, 0x96, 0xcd, 0xf6, 0xe0, 0x97, 0x0b, 0x3d, 0xdc, 0x62, 0x24, 0xe1, 0xcc, 0xd7, 0x63, 0x47, + 0xeb, 0x2c, 0xac, 0xcc, 0x8b, 0x99, 0x7b, 0x8a, 0x22, 0xbd, 0x47, 0x26, 0xa7, 0x3e, 0xdd, 0xbd, + 0x32, 0x78, 0x70, 0xd8, 0xd9, 0x2b, 0xb6, 0xc5, 0x6f, 0x05, 0x99, 0xcd, 0xfe, 0x36, 0x9c, 0x4e, + 0x41, 0x47, 0x22, 0x9b, 0x4a, 0x93, 0x1e, 0x4d, 0x70, 0x9e, 0x49, 0x28, 0xda, 0x08, 0xf5, 0x52, + 0x2b, 0x11, 0x56, 0x14, 0x4d, 0x1d, 0x36, 0x63, 0x47, 0xf4, 0x24, 0x85, 0x84, 0x4d, 0x22, 0xa9, + 0x14, 0xd2, 0x89, 0xed, 0xa0, 0x2a, 0x94, 0x85, 0xca, 0xd7, 0xcb, 0x36, 0x6a, 0x15, 0xac, 0x98, + 0xc1, 0x2c, 0xe8, 0x87, 0xc1, 0xf2, 0x08, 0xde, 0xb8, 0x62, 0xd7, 0x7d, 0x05, 0xb1, 0x95, 0x56, + 0x87, 0xa7, 0x8d, 0x9c, 0xf2, 0x6d, 0x2d, 0x2d, 0x38, 0x44, 0x01, 0x1b, 0xf9, 0x71, 0xc3, 0xf0, + 0x53, 0xba, 0x64, 0x70, 0x3e, 0xed, 0x2d, 0xb6, 0xb3, 0x19, 0x2a, 0x3d, 0x02, 0x41, 0x46, 0x04, + 0xf6, 0x91, 0x17, 0x10, 0x1d, 0xdc, 0xd7, 0xc9, 0xfb, 0xac, 0xb5, 0x0a, 0x5c, 0x23, 0xab, 0x1e, + 0x38, 0x3d, 0xaa, 0x54, 0x1c, 0x7f, 0x4f, 0xb4, 0x77, 0x15, 0x45, 0xe5, 0x02, 0xe0, 0xd5, 0x26, + 0xe3, 0x54, 0x63, 0xe2, 0x74, 0x24, 0x63, 0xf5, 0x1e, 0x15, 0x17, 0xd4, 0x59, 0x00, 0xb3, 0x87, + 0xb2, 0xac, 0x7b, 0x61, 0xaf, 0x22, 0x7f, 0x48, 0xa3, 0x87, 0x14, 0x9f, 0xd3, 0xbb, 0xf9, 0x8e, + 0xf5, 0xe8, 0x09, 0xc7, 0x08, 0x97, 0x6f, 0x6b, 0x5c, 0x7b, 0x98, 0x15, 0x5e, 0x82, 0xd5, 0x9f, + 0x62, 0x51, 0xae, 0x22, 0x0c, 0x69, 0xd0, 0xbc, 0xff, 0xd2, 0x0a, 0x59, 0xd2, 0x78, 0x9d, 0xa5, + 0x42, 0xd0, 0xd0, 0x33, 0x2b, 0xce, 0xb3, 0xd2, 0xdd, 0xb2, 0x6f, 0xbf, 0x81, 0x0b, 0x9f, 0x54, + 0xe8, 0x03, 0xfc, 0x6f, 0x7d, 0x9b, 0x64, 0xd9, 0x43, 0xc3, 0x59, 0x97, 0x1c, 0xd7, 0x3e, 0x7f, + 0x8f, 0x91, 0x09, 0x2b, 0x6f, 0x7d, 0xd7, 0xed, 0xe6, 0xb3, 0xc8, 0xec, 0xa0, 0x11, 0xca, 0x2e, + 0x25, 0x6a, 0x33, 0xfd, 0x30, 0xfd, 0x1e, 0x91, 0x1e, 0xa4, 0x5a, 0xc3, 0xac, 0x69, 0x34, 0xb9, + 0x15, 0xbb, 0xae, 0xaf, 0xf1, 0x7e, 0x66, 0x6a, 0xad, 0xb8, 0x6c, 0x69, 0xb9, 0xc1, 0x32, 0x7f, + 0x8a, 0xd3, 0x85, 0x52, 0x9a, 0xf3, 0xd0, 0x0c, 0xd4, 0x0a, 0x2c, 0x23, 0x23, 0x12, 0x4b, 0x07, + 0x9a, 0xde, 0xa8, 0xfd, 0xeb, 0xe0, 0x9f, 0x7c, 0x28, 0x5e, 0xd8, 0x23, 0xe5, 0x44, 0x3f, 0x2b, + 0x1c, 0xf7, 0xa7, 0xc2, 0x6b, 0xc2, 0xc2, 0x5b, 0xd2, 0x92, 0xa7, 0x71, 0x6f, 0x4e, 0xb0, 0x2e, + 0xeb, 0xf9, 0x25, 0x5c, 0xc6, 0xa4, 0x50, 0xfb, 0xbd, 0xa9, 0x28, 0x17, 0x77, 0x82, 0xb1, 0x3a, + 0xdb, 0xc6, 0xc7, 0xc8, 0x82, 0xff, 0xb7, 0x39, 0xc0, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, + 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, + 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, + 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xcf, 0x3f, 0xff, 0xfc, 0xf3, 0xff, + 0x19, 0xff, 0x17, 0xc3, 0x41, 0xce, 0x64, 0x00, 0x8c, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU116_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 35840, // uncompressed data size (bytes) + 24475, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU116_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU116("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/load/g_booteruc_load_tu11x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU116_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x4e, 0x00, 0x62, 0x1d, 0x08, 0x13, 0x4c, 0xc4, 0x43, 0x69, + 0x20, 0x00, 0x00, 0x6e, 0x8b, 0xb6, 0xe9, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU116_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU116_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU116("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/load/g_booteruc_load_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_dbg_tu11x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU116_sig_dbg_data[] = +{ + 0xfc, 0x18, 0x18, 0x86, 0xe5, 0x7d, 0x71, 0x36, 0xe0, 0x60, 0xde, 0x9c, 0x29, 0x7b, 0x20, 0x82, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU116_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU116_sig_dbg_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU116("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/load/g_booteruc_load_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_prod_tu11x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU116_sig_prod_data[] = +{ + 0x1b, 0x87, 0x49, 0x74, 0xf3, 0xf1, 0xc7, 0xb0, 0xf1, 0x23, 0xb2, 0x1c, 0x47, 0x3f, 0xe2, 0xd3, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU116_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU116_sig_prod_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU116("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/load/g_booteruc_load_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_tu11x_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU116_patch_loc_data[] = +{ + 0x00, 0x62, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU116_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU116_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU116("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/load/g_booteruc_load_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_tu11x_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU116_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU116_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU116_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU116("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/load/g_booteruc_load_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_load_sig_tu11x_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU116_patch_meta_data[] = +{ + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU116_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU116_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterLoadUcode_TU116("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/load/g_booteruc_load_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterLoadUcode_TU116_num_sigs_data[] = +{ + 0x01, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterLoadUcode_TU116_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterLoadUcode_TU116_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterLoadUcode_TU116 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU116_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU116_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU116_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU116_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU116_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU116_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU116_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU116_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU116_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterLoadUcode_TU116_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterLoadUcode_TU116(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterLoadUcode_TU116; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_GA100.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_GA100.c new file mode 100644 index 000000000..c4e1562a8 --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_GA100.c @@ -0,0 +1,1371 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA100("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/reload/g_booteruc_reload_ga100_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 9728 +// COMPRESSED SIZE (bytes): 7415 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA100_image_dbg_data[] = +{ + 0xed, 0x99, 0x45, 0x50, 0x1d, 0xda, 0xd2, 0x85, 0xcf, 0xc1, 0xdd, 0x35, 0x01, 0x0e, 0xee, 0x0e, + 0xc1, 0xdd, 0xdd, 0x83, 0xbb, 0xbb, 0x04, 0x87, 0xe0, 0xee, 0x4e, 0xf0, 0x83, 0xbb, 0x06, 0x08, + 0xee, 0xc1, 0xdd, 0xdd, 0xdd, 0x5d, 0x83, 0xfc, 0x79, 0xff, 0xf0, 0x8e, 0xde, 0xf8, 0xd5, 0xfd, + 0x26, 0x6b, 0xf5, 0x1e, 0x74, 0x75, 0xd5, 0xaa, 0xda, 0xd5, 0xbb, 0x76, 0x04, 0xc0, 0x1f, 0xf0, + 0x9e, 0x0c, 0xf0, 0x45, 0x05, 0x00, 0x9e, 0x20, 0x9e, 0x00, 0xcf, 0x10, 0x31, 0x00, 0x08, 0x40, + 0xd7, 0xc6, 0xe6, 0xc7, 0xc7, 0x07, 0x6a, 0x04, 0x00, 0x08, 0xf8, 0x00, 0x43, 0xd5, 0xef, 0x01, + 0xd8, 0xd2, 0x96, 0x81, 0xf5, 0xc9, 0x00, 0xfa, 0xb4, 0x65, 0x88, 0xbf, 0x42, 0x91, 0xb6, 0x0c, + 0xf9, 0x57, 0x88, 0x22, 0x00, 0x00, 0x40, 0x1a, 0x18, 0xb2, 0x71, 0x13, 0xb2, 0x0b, 0x9c, 0xbb, + 0x99, 0x06, 0x06, 0xa6, 0xa5, 0x43, 0x34, 0x6e, 0x02, 0x1b, 0x97, 0x21, 0x7c, 0xbb, 0x00, 0x00, + 0x54, 0x16, 0x69, 0x00, 0xda, 0x23, 0x18, 0xd0, 0x91, 0x1c, 0xf7, 0xb7, 0x65, 0xe3, 0x73, 0x40, + 0xe3, 0xb3, 0x7f, 0x04, 0x20, 0x18, 0xf6, 0xef, 0x11, 0x2a, 0x54, 0x73, 0x3a, 0x80, 0x10, 0xe2, + 0x3f, 0x0e, 0xfe, 0xaf, 0x03, 0xfe, 0xbf, 0x03, 0xfe, 0x75, 0x30, 0xff, 0x71, 0x7f, 0xdb, 0x42, + 0xa4, 0xa0, 0xc2, 0x6d, 0xfc, 0x67, 0x84, 0xb7, 0x67, 0xe8, 0x08, 0x00, 0x11, 0xe0, 0x2d, 0x07, + 0xaa, 0x2d, 0x1e, 0xe2, 0x57, 0x32, 0xda, 0xdf, 0xfa, 0xfd, 0x05, 0xe0, 0xfb, 0x77, 0xb0, 0x27, + 0x88, 0x8e, 0xc2, 0x3f, 0xc0, 0xb8, 0xd9, 0x1b, 0xa8, 0x08, 0x34, 0x58, 0x40, 0x2e, 0xf8, 0x09, + 0x10, 0xf7, 0x57, 0xbb, 0x3e, 0xee, 0x98, 0x5f, 0xdf, 0xa5, 0x81, 0xb9, 0x60, 0x81, 0x65, 0xc0, + 0x7f, 0x03, 0x41, 0xf9, 0xf0, 0x16, 0x0b, 0x96, 0x67, 0x70, 0xdf, 0x96, 0xac, 0xb1, 0x68, 0x13, + 0x63, 0xd0, 0xb3, 0x98, 0xe4, 0x13, 0xe1, 0x64, 0x2b, 0xb3, 0x8c, 0xd6, 0x3a, 0xca, 0xd5, 0xb2, + 0xb5, 0x50, 0x41, 0xd4, 0xbd, 0x99, 0x6a, 0x24, 0x7f, 0x57, 0x18, 0xa7, 0x6d, 0xd1, 0xe3, 0x19, + 0x8d, 0x9b, 0x5b, 0x70, 0xe3, 0x37, 0xbc, 0x99, 0x96, 0x21, 0xfe, 0x68, 0x39, 0x32, 0x42, 0x9b, + 0xa8, 0xa0, 0x21, 0x8e, 0x92, 0x80, 0xf2, 0x83, 0x0a, 0x58, 0xb3, 0x10, 0x37, 0xa1, 0x94, 0x6e, + 0xee, 0x13, 0x6c, 0x7c, 0xce, 0x31, 0x1c, 0xb2, 0x52, 0xa2, 0x4f, 0x92, 0x3f, 0x8c, 0xf3, 0x29, + 0x10, 0x26, 0x2c, 0x18, 0xbd, 0x37, 0xbd, 0x9a, 0x28, 0x83, 0xfc, 0x8f, 0x33, 0x2c, 0x11, 0x86, + 0x26, 0x1c, 0xce, 0x65, 0x09, 0x24, 0x87, 0x02, 0xf9, 0x2f, 0x81, 0x20, 0x19, 0x62, 0x94, 0xcb, + 0x17, 0xb2, 0xe3, 0xad, 0xc8, 0xad, 0xa6, 0x17, 0xd1, 0xf5, 0x59, 0xcd, 0xcc, 0xad, 0xb8, 0x1d, + 0x65, 0x77, 0x5b, 0xb6, 0x35, 0xad, 0xc6, 0x8d, 0xf1, 0xbd, 0x35, 0xda, 0x92, 0x3c, 0xde, 0x85, + 0x03, 0xca, 0xc4, 0xaa, 0xf5, 0xda, 0xc2, 0x08, 0x13, 0x01, 0x85, 0x7e, 0x6d, 0x1d, 0x30, 0x9b, + 0x54, 0x48, 0xed, 0xdc, 0xd1, 0xe5, 0x77, 0x07, 0x09, 0x33, 0x1d, 0xbb, 0xbe, 0x3d, 0x3a, 0x05, + 0x96, 0x21, 0x73, 0x98, 0x8e, 0x96, 0x89, 0x0d, 0x4b, 0xe6, 0xeb, 0xeb, 0xc3, 0x62, 0xf5, 0x10, + 0xd5, 0x43, 0x7d, 0x3b, 0xe3, 0x7b, 0x78, 0xfa, 0x05, 0xd6, 0xd1, 0xa7, 0x9e, 0x91, 0xc9, 0xe6, + 0xa0, 0x92, 0xb4, 0x58, 0xcb, 0x8c, 0x3f, 0xde, 0x75, 0x16, 0x3b, 0xbd, 0x86, 0x1c, 0xf8, 0xbc, + 0xe4, 0x5e, 0x4d, 0xa1, 0xe2, 0x8e, 0xc4, 0xda, 0x92, 0xf8, 0xed, 0x9e, 0x76, 0x7d, 0x1f, 0x07, + 0xc9, 0x6f, 0x05, 0x2e, 0x35, 0xc9, 0x81, 0xa2, 0x4c, 0xa1, 0x25, 0x0f, 0x66, 0xc4, 0x44, 0xde, + 0xe8, 0x08, 0x7c, 0xb5, 0xe2, 0x66, 0x1d, 0xc7, 0x82, 0x23, 0xa1, 0xd8, 0x3d, 0x4d, 0xf5, 0xc9, + 0x77, 0x15, 0x24, 0x4f, 0x52, 0xb7, 0xef, 0xf6, 0x21, 0xaf, 0x8b, 0x03, 0x16, 0xf2, 0xb9, 0xfe, + 0x6b, 0xe5, 0x4c, 0x26, 0x1d, 0x57, 0x74, 0x8a, 0x1a, 0x40, 0x77, 0xf6, 0x21, 0xff, 0xe3, 0xdb, + 0xec, 0x63, 0xc7, 0xe1, 0xd9, 0x15, 0x7f, 0x3b, 0x7c, 0x96, 0xb7, 0xc5, 0xe7, 0x45, 0x27, 0x93, + 0x7e, 0xdc, 0xb2, 0x54, 0x15, 0x74, 0xc2, 0x36, 0x08, 0xd1, 0xc9, 0x01, 0xff, 0x36, 0xd0, 0xa5, + 0x3e, 0xd8, 0x07, 0x47, 0x8c, 0xf4, 0xed, 0xb1, 0xde, 0xd5, 0x5c, 0x1b, 0x77, 0x46, 0x61, 0x6f, + 0x64, 0xfc, 0x97, 0xee, 0xa4, 0xcd, 0x97, 0x36, 0x2d, 0xca, 0xbe, 0xb4, 0x34, 0xab, 0xf8, 0x20, + 0x7d, 0xaa, 0x2d, 0x9c, 0xc9, 0x3f, 0x50, 0x9a, 0x32, 0x6b, 0x70, 0x0d, 0xcc, 0x8e, 0x99, 0x41, + 0xb7, 0x0f, 0xa5, 0x45, 0xaf, 0xb9, 0x57, 0xbd, 0x3b, 0xb3, 0x22, 0x8e, 0x63, 0xe1, 0x0a, 0x8e, + 0x11, 0xd4, 0xef, 0x25, 0xfd, 0xbc, 0xd6, 0x54, 0x6a, 0x77, 0xb1, 0x29, 0x73, 0xb2, 0x6f, 0x12, + 0x8b, 0x45, 0xd0, 0x5e, 0xca, 0x1c, 0xcf, 0x6a, 0x93, 0x5c, 0x3e, 0xad, 0x36, 0xc3, 0x41, 0x1a, + 0xe8, 0x4a, 0xc6, 0x3b, 0x7b, 0x2f, 0xfe, 0xb0, 0x9d, 0x86, 0x9c, 0x7b, 0x29, 0x0a, 0xfb, 0x4b, + 0x36, 0x96, 0xa8, 0x01, 0x46, 0x98, 0x9a, 0xfb, 0xe0, 0x1b, 0x82, 0xf9, 0x7b, 0xfe, 0xeb, 0x1a, + 0x9d, 0x6a, 0x67, 0xab, 0xa8, 0x6a, 0x60, 0x16, 0x3a, 0xd9, 0x9f, 0x3f, 0xf9, 0x44, 0x71, 0xc5, + 0x9a, 0x44, 0x52, 0x55, 0x41, 0x3f, 0x6c, 0xb0, 0xbf, 0xed, 0x2e, 0x6a, 0xf7, 0x80, 0xca, 0xdf, + 0x78, 0x08, 0x30, 0x44, 0x90, 0x9a, 0xee, 0x03, 0xb0, 0x13, 0x46, 0xe7, 0xeb, 0x79, 0xd8, 0x82, + 0x43, 0xb3, 0xad, 0xb8, 0x3d, 0xe7, 0xdc, 0xc0, 0xea, 0x55, 0xd3, 0x09, 0x32, 0xab, 0xdf, 0x45, + 0x9c, 0x4a, 0x30, 0x17, 0x5a, 0xee, 0xdc, 0x99, 0xf4, 0xfc, 0x76, 0x78, 0x5c, 0x3c, 0x13, 0xc3, + 0x12, 0x9a, 0x13, 0xb1, 0x1b, 0x22, 0x9b, 0x8b, 0xe6, 0xa2, 0x6a, 0x3b, 0x6b, 0xdf, 0xce, 0x49, + 0xc3, 0x4d, 0xc3, 0x8d, 0x5c, 0x68, 0x71, 0x9a, 0x69, 0x06, 0x46, 0x36, 0x95, 0x5d, 0x38, 0xba, + 0x7e, 0xb6, 0xdd, 0x29, 0xee, 0xc0, 0xc7, 0x42, 0x54, 0x41, 0x61, 0x4f, 0xf6, 0x8e, 0xbd, 0xb9, + 0x90, 0xf7, 0x7c, 0x13, 0xad, 0x12, 0x57, 0xf2, 0x9f, 0xd9, 0xaf, 0x2b, 0x70, 0x6e, 0xd5, 0xcc, + 0xc6, 0x66, 0x05, 0xe3, 0xd2, 0x86, 0x87, 0x71, 0xef, 0xc2, 0x6c, 0x72, 0x5e, 0x61, 0xb6, 0x69, + 0xce, 0xf1, 0x63, 0x73, 0xe0, 0x36, 0x4d, 0x51, 0x9c, 0x6c, 0xf7, 0x34, 0x7d, 0xdf, 0xea, 0xd1, + 0x88, 0xf1, 0xbb, 0x52, 0xae, 0x18, 0x33, 0x69, 0xed, 0x6c, 0x7e, 0xc0, 0xa1, 0x0a, 0x61, 0x0f, + 0x63, 0x2b, 0x12, 0xff, 0x5e, 0x1e, 0x58, 0x09, 0x53, 0xdf, 0x36, 0xe5, 0x59, 0x03, 0xad, 0x9d, + 0x9c, 0x5b, 0xd9, 0xd9, 0x92, 0x88, 0x31, 0x07, 0x3c, 0x4d, 0x0b, 0x0e, 0xde, 0x8e, 0x58, 0x48, + 0x62, 0x93, 0x8b, 0x78, 0x4d, 0x74, 0xe5, 0xa4, 0x0d, 0x1a, 0x84, 0x9e, 0x48, 0x81, 0x2e, 0x9d, + 0x42, 0x78, 0x05, 0xe7, 0x37, 0x5e, 0xaf, 0x57, 0x1b, 0xf2, 0x43, 0xb7, 0x13, 0x21, 0xdf, 0x07, + 0xc8, 0x5f, 0x1c, 0x58, 0x23, 0x13, 0x7c, 0x3a, 0x0d, 0xce, 0xf2, 0xee, 0x22, 0x2d, 0x47, 0x25, + 0x7e, 0x50, 0xfa, 0x7d, 0x3a, 0x93, 0xce, 0xc6, 0xeb, 0x54, 0xe7, 0x72, 0x85, 0x30, 0x38, 0xe3, + 0x60, 0x1e, 0xbc, 0xcb, 0xc4, 0x3a, 0xee, 0xec, 0x68, 0x86, 0x88, 0x11, 0xcd, 0xa0, 0xed, 0xd6, + 0x95, 0x24, 0x31, 0x1d, 0xc5, 0x19, 0x02, 0x52, 0x85, 0x93, 0xa6, 0x42, 0xa7, 0xbc, 0xea, 0x47, + 0x98, 0xff, 0xed, 0x03, 0xa1, 0xf8, 0x8b, 0x74, 0x3a, 0xca, 0xd8, 0x82, 0xcb, 0x99, 0xc9, 0x10, + 0xc4, 0xfc, 0x73, 0x3b, 0xd9, 0xd4, 0x83, 0x19, 0xd5, 0x77, 0x16, 0x8d, 0xe0, 0x77, 0x51, 0xdf, + 0x38, 0xcb, 0x52, 0xc2, 0x1a, 0xce, 0xd9, 0x61, 0xdc, 0x1b, 0xd1, 0x56, 0xb5, 0xc3, 0x18, 0x56, + 0x1d, 0x17, 0xe7, 0x46, 0xb3, 0x01, 0x21, 0x52, 0x19, 0x0c, 0xa9, 0xd0, 0xc8, 0x66, 0xd7, 0x61, + 0x0d, 0x26, 0xa4, 0x47, 0x28, 0x44, 0xab, 0x63, 0x4e, 0x25, 0x35, 0xd5, 0x31, 0xeb, 0xf2, 0x4b, + 0x43, 0xcf, 0x99, 0x35, 0x7d, 0x89, 0x3e, 0x17, 0x92, 0xf2, 0x1e, 0xce, 0xa5, 0xf2, 0x35, 0xb1, + 0xca, 0xd0, 0x21, 0x57, 0xc8, 0xed, 0x12, 0x1b, 0x89, 0x09, 0x9f, 0x94, 0x6c, 0xe3, 0xaa, 0x8a, + 0x42, 0xd1, 0x11, 0x50, 0x3d, 0x28, 0xc6, 0x1e, 0x91, 0xb5, 0xab, 0xfd, 0xcb, 0x9d, 0x7b, 0x85, + 0xec, 0x97, 0x9c, 0x0b, 0x7a, 0xc6, 0x9e, 0x21, 0x77, 0x9c, 0x56, 0xd3, 0x69, 0xcd, 0xb1, 0xfe, + 0x8a, 0xd2, 0x29, 0xab, 0x73, 0x8b, 0x37, 0x8d, 0xfe, 0xa6, 0xd9, 0x35, 0xbd, 0xc3, 0x4d, 0x3f, + 0x23, 0x69, 0xe8, 0x28, 0xf9, 0xc9, 0xce, 0xbc, 0x86, 0x91, 0xb7, 0x0b, 0xec, 0xf6, 0x3a, 0xfb, + 0x0d, 0x12, 0xa5, 0x72, 0x97, 0x8b, 0xcd, 0x4b, 0x3e, 0x49, 0x8c, 0x1c, 0xff, 0xbc, 0x7d, 0x94, + 0x4f, 0x1b, 0xb0, 0xb3, 0xf0, 0xbf, 0x77, 0xdc, 0x49, 0x29, 0xe7, 0x34, 0x76, 0xad, 0xfa, 0x4f, + 0xc5, 0xe2, 0x23, 0xbc, 0x4a, 0xa6, 0x36, 0xb5, 0x21, 0x48, 0x8b, 0x4b, 0xd6, 0x33, 0xca, 0xe4, + 0x99, 0x6d, 0xa9, 0x40, 0xba, 0x08, 0x19, 0xbb, 0x6a, 0xc9, 0x8b, 0x3c, 0xcb, 0xcc, 0x4d, 0xc3, + 0xcd, 0x5c, 0x0a, 0xcc, 0x9a, 0xf5, 0x19, 0x5c, 0x90, 0x7d, 0x2b, 0x74, 0x70, 0xf2, 0x9b, 0xec, + 0x51, 0x3d, 0x38, 0x9e, 0x2b, 0x5e, 0xf2, 0xec, 0xf6, 0x43, 0xb5, 0x2d, 0x43, 0x65, 0x85, 0x5b, + 0x6a, 0xf9, 0x47, 0xcf, 0x54, 0xca, 0x21, 0x47, 0x81, 0x0d, 0xab, 0xe6, 0xe8, 0x1c, 0xc5, 0x5c, + 0x0a, 0xdf, 0x57, 0x75, 0x11, 0x6b, 0xe9, 0xe6, 0xea, 0x33, 0xf0, 0xb0, 0x89, 0xba, 0x19, 0xfa, + 0x39, 0xe7, 0x10, 0xcd, 0x6e, 0xfc, 0x72, 0x0b, 0x81, 0x59, 0xb1, 0x10, 0x45, 0xb0, 0x71, 0xcb, + 0x82, 0xd1, 0xee, 0x4e, 0x7f, 0x52, 0xf1, 0x73, 0x60, 0xd9, 0x34, 0x3c, 0x86, 0x44, 0x8d, 0x59, + 0xc8, 0x8c, 0xa2, 0xf8, 0xb6, 0xd8, 0x05, 0xfc, 0x93, 0xf6, 0x54, 0x68, 0x89, 0xcf, 0x63, 0xd3, + 0xde, 0x7d, 0xc0, 0x81, 0xb2, 0x8f, 0x74, 0x77, 0x3b, 0x20, 0xa9, 0x73, 0x02, 0xad, 0x66, 0x79, + 0xa2, 0x8a, 0xe8, 0xe2, 0x74, 0xb5, 0xb1, 0x07, 0x46, 0x55, 0xbf, 0xd6, 0x8c, 0x76, 0x39, 0xbb, + 0x9d, 0x86, 0xe5, 0x54, 0xc4, 0xe4, 0x94, 0x5b, 0xf9, 0xb0, 0xcc, 0x31, 0x24, 0x64, 0xd1, 0x35, + 0x40, 0x4c, 0x0f, 0x81, 0x83, 0xa1, 0x78, 0xc7, 0x96, 0x28, 0x3a, 0x71, 0xf0, 0xba, 0x6c, 0x36, + 0xbd, 0x7a, 0xde, 0xb5, 0x84, 0xde, 0xed, 0x6a, 0xf2, 0xde, 0x01, 0x94, 0x37, 0x29, 0xfe, 0x66, + 0xa7, 0xef, 0x35, 0x1d, 0xa1, 0xc3, 0xb3, 0x8e, 0x95, 0xae, 0x98, 0x93, 0xa6, 0xc4, 0x1f, 0xce, + 0xea, 0xa5, 0x63, 0x45, 0x9e, 0xa5, 0x7a, 0xdb, 0xcf, 0xd5, 0xde, 0x33, 0xd5, 0xc3, 0x70, 0x2e, + 0x2f, 0xbc, 0x28, 0xf9, 0x9b, 0xf3, 0xb4, 0x11, 0xd1, 0x61, 0xbc, 0x56, 0xd2, 0x07, 0xc5, 0x34, + 0xfb, 0x9f, 0x05, 0xa9, 0xcd, 0x3b, 0x12, 0xdd, 0x53, 0xec, 0x07, 0xf3, 0xb4, 0x99, 0x33, 0xb4, + 0x1b, 0x50, 0xc8, 0x95, 0xed, 0xf2, 0xd7, 0x3f, 0x3e, 0x0b, 0x15, 0x0c, 0x7a, 0x47, 0x9e, 0x55, + 0xdc, 0x5a, 0xc9, 0x53, 0xef, 0xb9, 0x31, 0x93, 0xde, 0xb4, 0x99, 0x3c, 0xa6, 0x8d, 0x1c, 0x6e, + 0x26, 0xe8, 0x4a, 0x3a, 0xe5, 0xa5, 0xe4, 0xc3, 0xc4, 0x2b, 0xcc, 0xe6, 0x2d, 0x9a, 0xfb, 0xd2, + 0xd6, 0x43, 0x9c, 0x0f, 0xdc, 0x09, 0x51, 0xf1, 0x23, 0x14, 0x0d, 0xb4, 0xe1, 0x46, 0xd0, 0xf8, + 0x63, 0x37, 0x85, 0xeb, 0x0e, 0xfb, 0x81, 0x2b, 0x2f, 0xd3, 0xbb, 0x8a, 0x09, 0x68, 0x19, 0x74, + 0xa0, 0x72, 0x8c, 0xc2, 0x04, 0x5b, 0x09, 0xb2, 0x1a, 0x7b, 0xcd, 0xe4, 0x8d, 0x27, 0x19, 0x26, + 0x7a, 0xf5, 0x3a, 0xe0, 0x74, 0x85, 0x37, 0x68, 0x84, 0xbd, 0xb6, 0x1e, 0x27, 0xdb, 0xa6, 0x54, + 0x0f, 0x36, 0xe4, 0x03, 0xcc, 0x7e, 0x50, 0x6e, 0xeb, 0xd5, 0x93, 0x9d, 0xd9, 0xe5, 0x2f, 0xdf, + 0x4c, 0x42, 0xad, 0x04, 0xc8, 0x2d, 0xbe, 0x8a, 0x43, 0x08, 0xa2, 0x3c, 0xc9, 0x0b, 0x6b, 0x34, + 0xa3, 0xe9, 0x49, 0xf7, 0x9a, 0x9e, 0xff, 0x8c, 0x99, 0xf6, 0xb3, 0x5a, 0x90, 0xdf, 0x88, 0x61, + 0x45, 0x1e, 0xc5, 0x1e, 0x08, 0x9a, 0x66, 0xd5, 0x20, 0x48, 0xfe, 0xec, 0xd4, 0x71, 0x46, 0xb6, + 0xf2, 0x13, 0xaf, 0x1b, 0x51, 0x70, 0x4e, 0x51, 0x88, 0xe1, 0x79, 0x4f, 0x3f, 0x9c, 0x10, 0xb7, + 0x90, 0xed, 0xa2, 0x28, 0xd0, 0x67, 0xae, 0xec, 0x16, 0xab, 0x1f, 0x9a, 0x1a, 0xb9, 0xa6, 0xf2, + 0x80, 0x25, 0xd7, 0x2d, 0x9c, 0xfa, 0x92, 0x27, 0xd1, 0x0a, 0x6b, 0x71, 0xb3, 0x3a, 0xf9, 0x17, + 0xff, 0x59, 0xd2, 0x74, 0x56, 0x4f, 0x51, 0x7a, 0xb7, 0x35, 0xd6, 0xa6, 0x9f, 0x1b, 0xf7, 0xd0, + 0x16, 0x8e, 0xf9, 0x79, 0x3f, 0xb7, 0x7f, 0x53, 0x45, 0x64, 0x49, 0xef, 0x72, 0x35, 0xff, 0x45, + 0xed, 0xa2, 0x80, 0x31, 0xc3, 0x88, 0xac, 0x4d, 0x4e, 0xf1, 0xc5, 0xf6, 0x35, 0xd8, 0x5b, 0x00, + 0xc7, 0xc1, 0x9b, 0xe5, 0x81, 0x11, 0xd5, 0x2c, 0xff, 0x9e, 0xc6, 0xfc, 0xac, 0x46, 0xc5, 0x22, + 0xe6, 0x75, 0xe2, 0xfa, 0xe2, 0xe4, 0x77, 0x98, 0xbb, 0x81, 0xb1, 0xdd, 0x8b, 0x32, 0x69, 0xb9, + 0x3d, 0xc8, 0xac, 0x2c, 0x10, 0x6c, 0x39, 0x6f, 0xa5, 0x8d, 0x11, 0x91, 0x67, 0x15, 0x5f, 0x34, + 0xc8, 0x10, 0x84, 0xa6, 0x59, 0x0e, 0x81, 0x93, 0x7f, 0x5e, 0x28, 0x33, 0xdb, 0xe9, 0xd0, 0xe3, + 0x83, 0x9d, 0xef, 0x9b, 0xe8, 0x39, 0xba, 0x2b, 0xa3, 0x0a, 0xea, 0x6c, 0xb0, 0x15, 0x18, 0x67, + 0xd9, 0x0d, 0x58, 0x8b, 0x76, 0x28, 0xf1, 0xf3, 0x4b, 0xf2, 0xc7, 0xf7, 0x54, 0x1d, 0xf4, 0xc6, + 0x9d, 0x00, 0xe9, 0x09, 0x91, 0x3c, 0x1f, 0xaf, 0x5a, 0xdf, 0x5c, 0xf1, 0xd7, 0xde, 0x66, 0x7e, + 0x56, 0x44, 0x20, 0x3b, 0xda, 0xcd, 0x2a, 0x83, 0x62, 0x0e, 0x88, 0x67, 0x1f, 0x55, 0x5f, 0xf7, + 0x29, 0x57, 0x52, 0xf8, 0x5a, 0x98, 0xa1, 0x00, 0x71, 0x52, 0x6c, 0x07, 0x1e, 0xb4, 0xcd, 0x5f, + 0x80, 0x0b, 0xea, 0x9f, 0x56, 0x75, 0x4f, 0x99, 0xff, 0x9a, 0x0a, 0xd9, 0xe7, 0x1b, 0xf3, 0x35, + 0xcd, 0xa7, 0xde, 0xc4, 0x54, 0xfb, 0xe6, 0x39, 0x71, 0x09, 0x81, 0xa9, 0x72, 0x93, 0x7f, 0x9c, + 0xc2, 0x01, 0xce, 0xc9, 0xc9, 0x71, 0x9f, 0xd2, 0xda, 0x63, 0xdd, 0x1a, 0x49, 0x1d, 0x6e, 0x77, + 0x03, 0xe5, 0x1d, 0x98, 0xff, 0x2e, 0x6a, 0x47, 0x60, 0x8d, 0x3b, 0xdf, 0x18, 0xc8, 0xf7, 0x68, + 0x3f, 0xa1, 0xc7, 0x41, 0xa7, 0x04, 0xb9, 0x5a, 0xaf, 0x67, 0x27, 0x8b, 0x69, 0x57, 0xd8, 0x0a, + 0x4a, 0x7d, 0x4f, 0x36, 0xef, 0xfa, 0xc0, 0xa6, 0xf9, 0x28, 0x88, 0xdf, 0x41, 0xe0, 0x1e, 0xa1, + 0x3c, 0x72, 0x77, 0x4c, 0x51, 0xd1, 0x56, 0x3c, 0xd1, 0xb6, 0x14, 0x5c, 0x09, 0xa9, 0x21, 0x70, + 0x3c, 0x02, 0x44, 0x9a, 0xc9, 0xb3, 0x47, 0x47, 0x7b, 0x49, 0xac, 0x43, 0x5d, 0xe5, 0x99, 0x79, + 0x2d, 0x05, 0x8d, 0xd2, 0x15, 0x94, 0xd8, 0x30, 0x16, 0xf6, 0x31, 0x3d, 0xbc, 0x4a, 0xdd, 0x46, + 0x60, 0xdb, 0xeb, 0x5e, 0x9a, 0x9b, 0x1b, 0x72, 0xb0, 0x29, 0xea, 0xba, 0x92, 0x25, 0xe0, 0x23, + 0x6b, 0xb3, 0x7f, 0x6e, 0x4f, 0x7c, 0x22, 0x26, 0x94, 0x3c, 0x74, 0x39, 0x19, 0x37, 0x63, 0x4f, + 0x02, 0x46, 0x7a, 0x32, 0xbc, 0xfd, 0x24, 0xee, 0xfd, 0xae, 0xc7, 0x16, 0xf2, 0xcd, 0xd1, 0x91, + 0x25, 0x9c, 0xb8, 0xdb, 0x03, 0x8f, 0x40, 0xe0, 0xb8, 0x30, 0x65, 0xa9, 0xd3, 0x1b, 0x99, 0x22, + 0xf0, 0x57, 0x03, 0x1c, 0x27, 0x57, 0x44, 0x9c, 0x91, 0xec, 0xd7, 0x7e, 0x4b, 0xc1, 0xd4, 0x58, + 0xa7, 0x34, 0x20, 0xb9, 0x21, 0x6e, 0x67, 0x43, 0xb9, 0x46, 0x91, 0x20, 0xea, 0x64, 0xf9, 0x0a, + 0xb7, 0x68, 0x5f, 0x6a, 0x8b, 0x21, 0xde, 0xd5, 0xe9, 0x88, 0xa1, 0x57, 0x6e, 0x71, 0xda, 0x71, + 0x85, 0x8d, 0x61, 0x16, 0x24, 0x82, 0x96, 0x56, 0x53, 0x16, 0xfe, 0xa0, 0xd0, 0xc4, 0x0e, 0x52, + 0x7e, 0x0c, 0x48, 0xb1, 0xe9, 0xac, 0x0a, 0xbf, 0xa8, 0xde, 0x7c, 0xa2, 0x82, 0xe2, 0x17, 0xc6, + 0x6d, 0x2f, 0xc2, 0x84, 0xa6, 0x2a, 0xb3, 0xbd, 0x5e, 0xbc, 0xdd, 0xba, 0x64, 0x97, 0xc4, 0x85, + 0x3c, 0x5b, 0x0b, 0x39, 0xca, 0x22, 0x83, 0xff, 0xe7, 0x5f, 0xfb, 0x28, 0xbc, 0xec, 0xec, 0x1a, + 0xba, 0x57, 0xa1, 0x0c, 0x5e, 0x99, 0x89, 0x2d, 0x64, 0x64, 0xe5, 0xc1, 0x7e, 0xb8, 0x04, 0x07, + 0x3c, 0x0e, 0x5c, 0xf1, 0x16, 0xea, 0xb2, 0xbb, 0x6d, 0x0c, 0x48, 0x18, 0x86, 0x0e, 0x09, 0x5b, + 0x27, 0x84, 0xaa, 0x50, 0x19, 0x9a, 0x16, 0x3d, 0xa6, 0x4c, 0xc4, 0xc5, 0xbc, 0xc8, 0xf3, 0xfc, + 0xe7, 0xfc, 0x28, 0x3c, 0x35, 0xc7, 0xd7, 0x5a, 0x08, 0x49, 0x3a, 0x10, 0xfd, 0xc6, 0x6f, 0x89, + 0x73, 0x34, 0xbc, 0xb7, 0x10, 0x10, 0x2a, 0x7a, 0x1b, 0x4e, 0xe9, 0x37, 0xa8, 0x7a, 0x88, 0x03, + 0x72, 0xad, 0x01, 0x1b, 0x12, 0xf8, 0x6f, 0xf6, 0x24, 0xdf, 0x68, 0xba, 0xe4, 0x4f, 0x14, 0x09, + 0x72, 0xfc, 0x2f, 0x82, 0x6d, 0x79, 0xe0, 0x88, 0x0c, 0xb1, 0x21, 0x11, 0x8d, 0x52, 0xde, 0x26, + 0x47, 0xd5, 0x8e, 0x93, 0xa7, 0x1d, 0x39, 0xa3, 0xd8, 0x6a, 0xc3, 0xce, 0x13, 0xe3, 0xa2, 0x0f, + 0x90, 0x67, 0xef, 0xf6, 0x8e, 0xc3, 0x86, 0x90, 0x6c, 0x0a, 0xaa, 0xf9, 0x10, 0x0b, 0xd8, 0x8d, + 0xcb, 0xaa, 0xa4, 0x1b, 0x3f, 0x89, 0xa9, 0xb9, 0x15, 0x31, 0x95, 0xe2, 0x65, 0x7b, 0xca, 0x15, + 0x79, 0x4e, 0x29, 0x3b, 0xd0, 0x7f, 0xd3, 0xaf, 0x65, 0xdd, 0x11, 0x56, 0xd2, 0x0b, 0xd1, 0x03, + 0x74, 0xe9, 0x61, 0x72, 0xe4, 0xc8, 0xbc, 0xba, 0x51, 0x1f, 0x85, 0x59, 0xa6, 0x9f, 0xf5, 0xb1, + 0x2d, 0x6c, 0xf7, 0x6d, 0x5b, 0x66, 0xdf, 0x55, 0xd0, 0x93, 0x5b, 0x58, 0x10, 0xe3, 0x85, 0xe1, + 0x23, 0x04, 0xd4, 0x7d, 0x17, 0xd5, 0xa4, 0xdd, 0x31, 0xe5, 0xb8, 0x12, 0x21, 0x41, 0x6e, 0xef, + 0x0e, 0x15, 0x25, 0xc1, 0x29, 0x0a, 0xec, 0xe5, 0x1a, 0x29, 0x7e, 0x7a, 0x14, 0x70, 0xf1, 0xcf, + 0xa6, 0x3a, 0xc8, 0x78, 0xd6, 0x57, 0x1f, 0x11, 0xe9, 0x4b, 0xf0, 0x84, 0x8c, 0xc4, 0xc4, 0x67, + 0x7c, 0xd2, 0xd9, 0xd4, 0xff, 0x67, 0x3e, 0x5a, 0x20, 0x78, 0x87, 0x62, 0x21, 0x79, 0x05, 0x06, + 0x20, 0x94, 0xca, 0xad, 0xd7, 0x4f, 0xeb, 0xb6, 0x90, 0x23, 0xe5, 0xe0, 0x49, 0xd3, 0x47, 0x91, + 0xb8, 0x41, 0xc3, 0x29, 0xe6, 0xa9, 0x9c, 0xfb, 0x45, 0x9d, 0xbe, 0xe6, 0x2a, 0xea, 0x16, 0x31, + 0x65, 0x3c, 0x3a, 0x88, 0xc8, 0x5e, 0x4d, 0x4f, 0x2c, 0xd4, 0x07, 0xad, 0x8b, 0x5f, 0xf6, 0xc6, + 0xa3, 0xc2, 0x36, 0x49, 0x75, 0x1d, 0xae, 0xda, 0x04, 0x6c, 0xee, 0xf5, 0x70, 0xb5, 0x6c, 0xae, + 0x32, 0x37, 0x49, 0x84, 0xb1, 0xc5, 0xe5, 0xf3, 0x2e, 0x4a, 0x8f, 0xad, 0x5b, 0xb1, 0x9e, 0xf4, + 0xd0, 0x31, 0xb0, 0xb8, 0xe5, 0x66, 0xc7, 0x43, 0xf0, 0xad, 0x9f, 0x84, 0x08, 0x76, 0x59, 0xc3, + 0x1b, 0xcd, 0xb2, 0x9f, 0x9a, 0x9d, 0x17, 0x77, 0xd2, 0x4e, 0xcb, 0x10, 0xb6, 0x1a, 0xf3, 0x68, + 0xdf, 0x87, 0x7b, 0x7c, 0xa5, 0x24, 0xb7, 0x75, 0xd0, 0xcb, 0xfb, 0xe6, 0xc7, 0x46, 0x6a, 0x3d, + 0x5e, 0xf6, 0x7d, 0xd4, 0x0e, 0x64, 0x10, 0xde, 0xe7, 0x25, 0x2e, 0xc5, 0x4c, 0x05, 0xf0, 0x1b, + 0xc1, 0x9e, 0xc2, 0x5e, 0x7a, 0x1e, 0x3f, 0x64, 0x64, 0x40, 0xe1, 0x08, 0x23, 0x2d, 0xe6, 0xe4, + 0x0f, 0xa4, 0x2f, 0xb0, 0xc6, 0xfd, 0xac, 0x22, 0xc2, 0x5e, 0x3f, 0xcc, 0x87, 0xf1, 0xdf, 0xd6, + 0x6a, 0x64, 0x1b, 0x18, 0x6b, 0x42, 0xc1, 0x45, 0x1e, 0x04, 0x5e, 0x90, 0x2a, 0xbc, 0x02, 0xb1, + 0xb2, 0x33, 0x76, 0xb5, 0x1a, 0xfd, 0x5a, 0x8d, 0x6d, 0x42, 0xaf, 0xce, 0xe5, 0x02, 0x3e, 0xef, + 0x2e, 0x49, 0x81, 0x73, 0x8c, 0x6e, 0xb3, 0xd0, 0xe3, 0xf7, 0x13, 0xaa, 0x57, 0xab, 0xd6, 0x25, + 0xd6, 0x2b, 0xda, 0x69, 0x72, 0x34, 0xb8, 0x48, 0x7b, 0x87, 0x78, 0x06, 0x58, 0xcc, 0x26, 0x8d, + 0xae, 0x15, 0xd2, 0x25, 0x20, 0x6a, 0x1b, 0x6c, 0x3a, 0xd3, 0xdf, 0x9e, 0x83, 0x87, 0xda, 0xb9, + 0xfc, 0x51, 0xcd, 0x7f, 0x64, 0xf6, 0xae, 0x1f, 0x9e, 0x0b, 0x87, 0x9e, 0xf9, 0xb8, 0x8b, 0xd7, + 0x69, 0x38, 0x02, 0xe6, 0x43, 0x12, 0xb8, 0xf9, 0xd5, 0xbb, 0xfa, 0xad, 0xf2, 0x3a, 0x0d, 0xd8, + 0x38, 0x77, 0xef, 0x4d, 0xbc, 0x92, 0x13, 0xae, 0x6a, 0x7d, 0x22, 0x8c, 0xe9, 0x4e, 0x9e, 0x99, + 0xf2, 0x2e, 0x1b, 0xc0, 0x5b, 0x40, 0xd9, 0x3c, 0xf1, 0x09, 0x97, 0xb2, 0x6b, 0xaa, 0x69, 0x5c, + 0xde, 0x80, 0x93, 0x9d, 0x3a, 0x76, 0xee, 0x72, 0x41, 0x91, 0x4a, 0x9c, 0x18, 0xb1, 0x37, 0x47, + 0xf4, 0x75, 0x4b, 0x7e, 0x9b, 0x92, 0x93, 0xe1, 0x9e, 0x8e, 0xaf, 0x3b, 0x92, 0xea, 0x8d, 0x23, + 0x8c, 0xef, 0x73, 0x70, 0x2f, 0xcc, 0x7e, 0xcd, 0xea, 0xa4, 0x8a, 0xe7, 0xf4, 0xf0, 0x21, 0x5d, + 0xf5, 0x41, 0x09, 0x07, 0xdf, 0x7b, 0xef, 0x09, 0x26, 0xfd, 0x8e, 0xf9, 0x89, 0xab, 0xf9, 0x01, + 0xb9, 0x72, 0x40, 0xa4, 0xa4, 0x39, 0xe7, 0x1d, 0x74, 0xcb, 0x09, 0x75, 0x49, 0x28, 0xc4, 0x38, + 0xa4, 0xe5, 0xad, 0xd2, 0x92, 0x05, 0x9d, 0x6a, 0xfd, 0xf7, 0xfe, 0xe7, 0xef, 0x0a, 0x3a, 0x91, + 0xad, 0xbd, 0xef, 0x6a, 0x6a, 0xac, 0x29, 0xa5, 0xeb, 0x33, 0x62, 0x4e, 0x71, 0x43, 0xfe, 0x19, + 0x06, 0x60, 0xa2, 0xaa, 0xb1, 0x12, 0xf8, 0xfb, 0xe8, 0x27, 0xb1, 0x1a, 0x74, 0x36, 0x6d, 0x95, + 0x3c, 0xc4, 0x9e, 0x54, 0xe0, 0x53, 0xe7, 0xef, 0x18, 0xd4, 0x98, 0xde, 0xe3, 0x57, 0xd9, 0xc1, + 0x5a, 0xd5, 0x12, 0x90, 0x91, 0x5c, 0xff, 0x34, 0x5e, 0xc0, 0x2d, 0x38, 0x4c, 0x52, 0x00, 0x07, + 0x64, 0xdb, 0x06, 0x66, 0x61, 0x9b, 0x13, 0x12, 0x52, 0x0e, 0x83, 0x06, 0x25, 0x83, 0x31, 0xda, + 0x5e, 0xe5, 0xb7, 0x43, 0x63, 0x0f, 0x65, 0xa8, 0x55, 0x20, 0xab, 0xdc, 0xaa, 0x6d, 0x5f, 0x93, + 0xa9, 0x1a, 0x83, 0xb6, 0xca, 0x23, 0xd2, 0xdc, 0x55, 0xed, 0x8d, 0x60, 0xd4, 0x16, 0xa8, 0x8c, + 0xa0, 0x4f, 0x8f, 0xf4, 0x5c, 0xfb, 0x52, 0xf1, 0x1d, 0xbe, 0x80, 0xe0, 0x3d, 0xc9, 0xcc, 0x43, + 0x9c, 0x75, 0x17, 0x97, 0xbf, 0x92, 0xc1, 0x8d, 0xf2, 0x98, 0x93, 0xb3, 0x52, 0x23, 0xa2, 0x7b, + 0x33, 0x11, 0xce, 0x7e, 0x85, 0xd2, 0xcd, 0x0b, 0x4e, 0x0a, 0x81, 0x94, 0xf8, 0xee, 0x40, 0x14, + 0x22, 0x7b, 0x91, 0xff, 0x99, 0xca, 0x95, 0x19, 0x96, 0x86, 0x23, 0xc4, 0xcc, 0xd9, 0xc2, 0xc8, + 0x50, 0x8f, 0xba, 0xbb, 0x4a, 0xee, 0xf2, 0x9c, 0x12, 0x80, 0xc9, 0x1e, 0xf0, 0x4c, 0x9e, 0x94, + 0x1c, 0x24, 0x24, 0x4c, 0xaa, 0xf3, 0x5c, 0x72, 0xc8, 0x2c, 0xa2, 0x0a, 0x7b, 0x2a, 0xaf, 0x05, + 0xb7, 0xea, 0xf8, 0xaa, 0x2f, 0xd4, 0x10, 0xd0, 0xcf, 0x27, 0x78, 0x01, 0x89, 0x1b, 0x94, 0xba, + 0x81, 0xbc, 0x43, 0x05, 0xe8, 0xb7, 0xb7, 0x9f, 0x65, 0xc0, 0xf1, 0x1c, 0x9d, 0x0e, 0xce, 0xb6, + 0x08, 0x39, 0x27, 0x11, 0x62, 0x5d, 0xdd, 0xa6, 0xe1, 0xdd, 0x22, 0x61, 0x07, 0x15, 0x37, 0x29, + 0xe6, 0xf0, 0xab, 0x2b, 0x6e, 0xf9, 0xe3, 0xc4, 0x11, 0x45, 0x58, 0xbb, 0x7d, 0x57, 0xb7, 0x71, + 0x0e, 0xd6, 0x92, 0x71, 0x0a, 0x23, 0xae, 0x0f, 0xcc, 0xb3, 0x98, 0x97, 0x80, 0x22, 0x49, 0xf2, + 0x2f, 0x01, 0x51, 0x91, 0x9a, 0xb2, 0x0b, 0x0b, 0x7e, 0x78, 0x35, 0x5d, 0x06, 0x24, 0x2d, 0x2d, + 0x80, 0x45, 0x74, 0x85, 0x38, 0xae, 0xe9, 0xa1, 0x59, 0xe4, 0xd1, 0x7e, 0xfa, 0xf6, 0xa8, 0x34, + 0x6f, 0xd9, 0x5e, 0x34, 0xe7, 0xdf, 0xf3, 0xb5, 0x34, 0xa3, 0x85, 0x6c, 0xf0, 0x7b, 0x1a, 0xe3, + 0x01, 0x53, 0x93, 0x8b, 0x70, 0xfb, 0x6a, 0xf3, 0xfd, 0x19, 0xe0, 0xc6, 0x6c, 0x64, 0x7c, 0xe2, + 0xe2, 0xb6, 0x33, 0x95, 0xe7, 0xac, 0x50, 0xa9, 0x53, 0x0e, 0x62, 0xec, 0x24, 0x1d, 0x07, 0x0f, + 0xca, 0x4e, 0xee, 0x5f, 0x86, 0x28, 0x9d, 0x09, 0x05, 0xc8, 0x4f, 0x4d, 0xa5, 0xdf, 0x05, 0x74, + 0x37, 0x86, 0x01, 0xb6, 0xdb, 0x9f, 0x6b, 0xbb, 0xa9, 0x0c, 0x12, 0xe0, 0xf5, 0x82, 0x07, 0x6f, + 0x84, 0x56, 0x6c, 0xa2, 0x59, 0x95, 0xe0, 0x1f, 0x42, 0x2f, 0xd4, 0x61, 0xb2, 0x94, 0x74, 0x4d, + 0x4c, 0x42, 0x0f, 0xad, 0x6d, 0x77, 0x5f, 0x8c, 0x8e, 0x84, 0xb3, 0x0c, 0x8d, 0xd8, 0xa5, 0x02, + 0xf1, 0x5f, 0xe6, 0x08, 0x53, 0x33, 0x2b, 0x49, 0x3f, 0xfd, 0xbc, 0xa6, 0xff, 0x80, 0x6b, 0x99, + 0x10, 0xf1, 0x74, 0x5a, 0x97, 0xbe, 0xad, 0xf5, 0x61, 0xad, 0xb3, 0x15, 0x1f, 0x8b, 0xc1, 0xb4, + 0x41, 0x93, 0x02, 0x17, 0x58, 0xcf, 0x9d, 0x8c, 0x54, 0x5b, 0xe3, 0x76, 0x28, 0xbc, 0xb8, 0x04, + 0xb4, 0x05, 0xac, 0xb6, 0x9e, 0x3a, 0x72, 0x21, 0x40, 0x0b, 0x20, 0xe7, 0x76, 0x86, 0xda, 0xbe, + 0x66, 0xb0, 0x4f, 0x67, 0x05, 0x14, 0xd2, 0x0c, 0x6e, 0x23, 0x6e, 0xb4, 0xd4, 0x9b, 0x12, 0xa6, + 0xd8, 0x6f, 0x92, 0xaf, 0x51, 0xae, 0xa6, 0x0f, 0xa3, 0xd6, 0x49, 0x18, 0x07, 0x5a, 0xec, 0x5e, + 0x3c, 0x67, 0xac, 0xe2, 0xe0, 0x2c, 0xd5, 0x28, 0x09, 0x4c, 0x09, 0x2a, 0x48, 0xd4, 0xd8, 0xe9, + 0x99, 0xca, 0x38, 0x94, 0x0e, 0x68, 0xd2, 0xa6, 0x9d, 0xf3, 0x49, 0xa8, 0x31, 0xb1, 0x2f, 0x9f, + 0x69, 0xb0, 0x31, 0xa1, 0xea, 0x44, 0x8b, 0x3f, 0xfa, 0x99, 0xce, 0x91, 0x07, 0xc9, 0x9d, 0xbb, + 0xe2, 0xed, 0xca, 0x6b, 0xff, 0xa0, 0xa7, 0x3c, 0x1b, 0xec, 0x2b, 0x5f, 0xbc, 0x25, 0xb5, 0x97, + 0xf4, 0x88, 0xed, 0xb5, 0x02, 0x82, 0x0e, 0xb2, 0x76, 0x16, 0x84, 0x63, 0x9c, 0x8e, 0x53, 0x6f, + 0x6a, 0x22, 0x7e, 0x04, 0x5b, 0x3e, 0x8a, 0x41, 0x7c, 0xe6, 0x46, 0xd6, 0xe5, 0xa0, 0x00, 0x0d, + 0x8c, 0xac, 0x71, 0x66, 0x1a, 0xf7, 0xf5, 0xd5, 0x2c, 0xb8, 0x95, 0xc7, 0xd4, 0x99, 0xb0, 0xa4, + 0x31, 0xd0, 0x49, 0xbd, 0xc1, 0x63, 0xa3, 0xb0, 0xe9, 0x29, 0xb9, 0x5d, 0x82, 0xc6, 0xf0, 0xfd, + 0x8a, 0xd1, 0x7f, 0xc1, 0xc7, 0x14, 0xf2, 0x8a, 0xc2, 0xb7, 0x41, 0x01, 0xdb, 0x0a, 0x68, 0x89, + 0x4d, 0xbb, 0xa6, 0x91, 0xc1, 0xa9, 0xfb, 0xe4, 0xaa, 0x04, 0xbd, 0x57, 0x18, 0xd8, 0x22, 0x0f, + 0xfb, 0x19, 0x71, 0x54, 0x89, 0x32, 0x83, 0x18, 0x91, 0x8b, 0x32, 0xe2, 0x9f, 0xe2, 0xfe, 0x44, + 0xae, 0x26, 0x1d, 0x92, 0xba, 0xae, 0x23, 0xc1, 0x52, 0x04, 0xc0, 0xac, 0x89, 0x32, 0xbe, 0x39, + 0x98, 0x3f, 0x8d, 0x94, 0x1a, 0xfa, 0x20, 0xcd, 0x99, 0x41, 0x2f, 0x63, 0xc4, 0x19, 0x82, 0x1e, + 0x0a, 0x45, 0xfb, 0x30, 0x28, 0xf0, 0x86, 0x41, 0xf1, 0xe2, 0xbb, 0x3c, 0x63, 0x93, 0x22, 0x2c, + 0x82, 0xe4, 0xaf, 0x32, 0x3d, 0x2d, 0x6f, 0x13, 0x8d, 0xd8, 0x61, 0xdb, 0xa7, 0x82, 0x52, 0x82, + 0xf7, 0xc3, 0x55, 0xa5, 0xd7, 0x3f, 0x9e, 0x4f, 0x04, 0x89, 0xeb, 0xa2, 0xd8, 0x1f, 0x68, 0xb3, + 0x16, 0x8c, 0x9a, 0x17, 0xfd, 0x28, 0xda, 0x21, 0x71, 0x9b, 0xa5, 0x1a, 0x58, 0x5a, 0xa4, 0x99, + 0x0e, 0x10, 0x1e, 0x1e, 0x0c, 0x6a, 0xa3, 0xce, 0x58, 0x5f, 0x08, 0x4f, 0x54, 0x49, 0x67, 0x31, + 0x73, 0x38, 0xd7, 0x9e, 0xdb, 0x69, 0xe5, 0xbe, 0x80, 0x09, 0x94, 0xe1, 0x4a, 0x98, 0xe2, 0x5f, + 0xdc, 0x88, 0xf2, 0x1f, 0xb0, 0x2e, 0x90, 0x58, 0x2e, 0x32, 0xcc, 0xe7, 0xda, 0x20, 0x91, 0xd9, + 0x5e, 0x64, 0x59, 0xb9, 0x96, 0x53, 0x23, 0x07, 0xa9, 0xb9, 0x72, 0x26, 0x3e, 0xa8, 0xcb, 0x0a, + 0xf4, 0x3c, 0x62, 0x02, 0x56, 0xe7, 0x87, 0x42, 0xa1, 0xc8, 0x18, 0x30, 0xaf, 0x00, 0x6a, 0xb1, + 0xb1, 0xeb, 0xaa, 0xb0, 0xee, 0x8c, 0x7c, 0x05, 0x5b, 0xc8, 0xaf, 0xfc, 0xcb, 0x6f, 0x45, 0xc5, + 0xdd, 0x47, 0x49, 0xe6, 0x81, 0x2e, 0x5c, 0xbc, 0x5f, 0x5a, 0x37, 0xd6, 0xa8, 0x2a, 0xea, 0x1a, + 0x3c, 0x04, 0x29, 0x22, 0xeb, 0xd5, 0x25, 0xeb, 0x36, 0xb8, 0xf1, 0xdf, 0x8b, 0x23, 0xc3, 0x79, + 0xe0, 0x5c, 0x28, 0x49, 0x18, 0x8c, 0x11, 0xa2, 0xef, 0x6f, 0xed, 0x21, 0x8b, 0x2f, 0x02, 0xee, + 0x10, 0x8e, 0x9b, 0x04, 0x1f, 0x24, 0x06, 0x96, 0x68, 0xfd, 0xd6, 0x47, 0x78, 0x42, 0xa4, 0xc9, + 0x5f, 0x73, 0x14, 0xf7, 0x3c, 0x10, 0x38, 0xaf, 0x35, 0xbe, 0x7b, 0xed, 0x4c, 0x63, 0x5f, 0x86, + 0xa4, 0x5b, 0x99, 0xb8, 0xed, 0x44, 0x44, 0xeb, 0xf5, 0x54, 0x28, 0x7b, 0xd7, 0x8b, 0x2b, 0xf4, + 0x4b, 0x49, 0xe5, 0xdd, 0xd9, 0x9d, 0xee, 0xb7, 0x8a, 0x23, 0x16, 0x9e, 0x3c, 0x19, 0xbd, 0x00, + 0x70, 0x76, 0x51, 0x91, 0x30, 0x5a, 0xb3, 0x0a, 0xa9, 0xc6, 0xdc, 0x30, 0x74, 0x70, 0x45, 0x7f, + 0xeb, 0xc0, 0xf5, 0x39, 0xc9, 0x63, 0x5d, 0x26, 0xf4, 0x99, 0x91, 0x26, 0xcd, 0x6c, 0xba, 0xce, + 0x5c, 0xe8, 0xe1, 0x71, 0xea, 0x60, 0xb2, 0x08, 0x70, 0x80, 0x8e, 0xd4, 0x41, 0xc7, 0xfb, 0x24, + 0x3c, 0x7b, 0xa3, 0xac, 0x05, 0x9e, 0xab, 0xa2, 0x5b, 0x37, 0x74, 0xda, 0x1b, 0x78, 0x41, 0x74, + 0x5d, 0xbf, 0x09, 0x29, 0x3b, 0xaf, 0x96, 0x61, 0x1b, 0x8b, 0xf5, 0x73, 0xf1, 0x9f, 0xb9, 0x80, + 0xa5, 0x85, 0x46, 0xdf, 0x02, 0x95, 0x78, 0x9d, 0x91, 0x61, 0xb0, 0xce, 0xc2, 0xc6, 0x8a, 0xc3, + 0xe0, 0xfb, 0x26, 0x4e, 0xb2, 0x19, 0x1e, 0xed, 0x0f, 0x9d, 0x58, 0xd8, 0x25, 0xe7, 0x92, 0x37, + 0xc5, 0xef, 0xb1, 0x36, 0x77, 0x35, 0xa9, 0xd5, 0xef, 0x62, 0xc7, 0x0f, 0x62, 0x5b, 0x8d, 0xef, + 0xe4, 0x6c, 0x02, 0xf9, 0xf5, 0x6f, 0x7e, 0xee, 0xf7, 0x78, 0xa1, 0x4d, 0x7d, 0x04, 0x98, 0x25, + 0x8b, 0x1d, 0xc5, 0xb6, 0xc6, 0x82, 0x06, 0xb4, 0x35, 0xe2, 0xec, 0xdf, 0x77, 0xe0, 0x22, 0x38, + 0x86, 0x30, 0x8d, 0x35, 0x64, 0x71, 0xa6, 0xe0, 0xc6, 0x2a, 0x2b, 0x09, 0x5e, 0x61, 0x46, 0xe0, + 0x27, 0xbe, 0x84, 0x8f, 0x40, 0x7f, 0xfa, 0xe7, 0xfd, 0xab, 0x4d, 0xee, 0x7e, 0x7b, 0x59, 0xc4, + 0x55, 0xde, 0xe2, 0xc6, 0xc5, 0xbf, 0xd5, 0x44, 0xc5, 0x04, 0x61, 0xb3, 0x98, 0x2a, 0x34, 0xf6, + 0xe4, 0xe4, 0x89, 0x24, 0xb7, 0x24, 0xe5, 0x38, 0xa0, 0xd6, 0xb1, 0x45, 0xe6, 0x85, 0xea, 0xec, + 0xc7, 0x12, 0x73, 0x79, 0xb8, 0xfc, 0x90, 0x41, 0x7a, 0x35, 0x01, 0x1c, 0xe8, 0xe1, 0xab, 0xd6, + 0x27, 0x2d, 0xe6, 0x32, 0xb0, 0x8e, 0x0a, 0x9a, 0x3b, 0x69, 0x3d, 0xfa, 0x76, 0x02, 0xc7, 0xf4, + 0x58, 0x68, 0x40, 0x6a, 0x6f, 0x70, 0xd5, 0x89, 0x52, 0xca, 0xfe, 0xf6, 0x15, 0x9b, 0xf2, 0x49, + 0x0f, 0xc9, 0xf7, 0x6c, 0x21, 0x3e, 0xce, 0x3c, 0x1f, 0xdb, 0x4a, 0xff, 0x47, 0xf1, 0x85, 0x73, + 0xbf, 0x67, 0xaa, 0x26, 0x15, 0x7a, 0xbf, 0x13, 0xd5, 0x0e, 0x5f, 0x6e, 0x75, 0xd9, 0x11, 0xcb, + 0x9d, 0x17, 0x4f, 0x4e, 0xbf, 0x05, 0x0d, 0x9d, 0xb9, 0x7a, 0x62, 0x08, 0xde, 0x99, 0x45, 0x07, + 0x1c, 0x34, 0xf7, 0x29, 0x12, 0xfb, 0xf6, 0x86, 0xf1, 0xc3, 0xc8, 0xd1, 0x64, 0x27, 0xde, 0x6f, + 0x87, 0x49, 0xab, 0x3b, 0x9c, 0xf0, 0xd9, 0x1b, 0x3c, 0xd4, 0xda, 0xcc, 0x02, 0xcc, 0xd1, 0x1f, + 0x71, 0xdf, 0xe1, 0xa2, 0x0d, 0x4b, 0x5f, 0x91, 0x26, 0xcd, 0x8b, 0x67, 0x05, 0xef, 0xef, 0x87, + 0xbe, 0xff, 0x51, 0x89, 0x92, 0xaa, 0x1f, 0xc2, 0x21, 0x3c, 0xb3, 0x73, 0xf0, 0x4e, 0xef, 0x8f, + 0x5a, 0x47, 0x85, 0x29, 0x9f, 0x97, 0xef, 0x24, 0x91, 0xc8, 0xe4, 0xb0, 0x6b, 0xb9, 0x23, 0x57, + 0x03, 0xb2, 0xe4, 0x30, 0x90, 0x29, 0xb8, 0xfe, 0xcc, 0x06, 0xa2, 0xb0, 0x40, 0xe1, 0x33, 0xaf, + 0xf9, 0x38, 0x7a, 0xee, 0xa3, 0x83, 0x34, 0x30, 0x55, 0xaf, 0x7c, 0xd9, 0x1c, 0x7b, 0x2d, 0x3f, + 0xa5, 0x67, 0x96, 0x8a, 0x0c, 0x38, 0xd9, 0x07, 0x39, 0xce, 0xc1, 0xf5, 0x93, 0xd1, 0xea, 0xda, + 0xc8, 0xc7, 0xaf, 0x34, 0x8b, 0x93, 0x12, 0xff, 0xfa, 0xac, 0x0e, 0x33, 0x96, 0x7b, 0x5e, 0x1d, + 0x1c, 0xc8, 0xd4, 0x7b, 0x25, 0x9a, 0x52, 0xb6, 0x0f, 0x6c, 0x3a, 0x31, 0x0f, 0x48, 0x8a, 0x1e, + 0xf7, 0x8e, 0x4d, 0x75, 0x19, 0x60, 0x78, 0x64, 0x7c, 0x03, 0xfb, 0x46, 0x79, 0xe0, 0x6f, 0x34, + 0xbc, 0x64, 0x4f, 0x38, 0xd9, 0x8b, 0xfb, 0x3b, 0xe0, 0x6a, 0x03, 0x9a, 0x11, 0x77, 0x28, 0xe6, + 0xcc, 0xb0, 0x0e, 0xc7, 0x28, 0x85, 0xcf, 0x6a, 0x4d, 0xeb, 0x52, 0xb2, 0x2b, 0x18, 0x35, 0xb8, + 0xd4, 0x15, 0x55, 0x1b, 0xc7, 0x94, 0x2f, 0xe2, 0xb7, 0xe5, 0xa4, 0x08, 0xe1, 0x5c, 0xa9, 0x5f, + 0x11, 0x36, 0x4d, 0xfa, 0xb6, 0xed, 0x0e, 0x87, 0x00, 0x17, 0x27, 0x47, 0x4b, 0xc7, 0xf0, 0x65, + 0x57, 0xe4, 0x6a, 0xce, 0x1e, 0x14, 0x27, 0xba, 0xb8, 0x70, 0x8c, 0x17, 0xc7, 0xad, 0x53, 0xf3, + 0x23, 0x71, 0x45, 0x41, 0x66, 0xa2, 0xae, 0x35, 0x2c, 0x5f, 0xbe, 0x65, 0xdc, 0x0d, 0x36, 0x4e, + 0x89, 0xff, 0xf1, 0x1c, 0xea, 0x76, 0x16, 0x98, 0x14, 0x66, 0xfd, 0x8e, 0x89, 0x9b, 0xa7, 0x4f, + 0x91, 0x70, 0xdf, 0x75, 0xaf, 0x1d, 0x5d, 0x55, 0x11, 0x85, 0xf6, 0xb9, 0xe9, 0x94, 0x46, 0x2f, + 0x7b, 0xaf, 0xd9, 0x4c, 0xaa, 0x61, 0xd0, 0x9c, 0xa0, 0xfc, 0xdb, 0xaf, 0x47, 0x1a, 0xfb, 0x94, + 0x79, 0xf9, 0xc2, 0xa4, 0xaf, 0x69, 0x9a, 0x30, 0x9d, 0x0a, 0x1b, 0xa7, 0xc1, 0xbe, 0x1d, 0xd7, + 0x68, 0x84, 0x6b, 0x40, 0x69, 0x38, 0x91, 0xc5, 0xca, 0xd6, 0xbb, 0xc6, 0x69, 0x76, 0x24, 0x1a, + 0x59, 0x09, 0x5c, 0xc5, 0x3d, 0x26, 0x81, 0x46, 0x20, 0x29, 0x56, 0xab, 0x52, 0xae, 0xf9, 0x39, + 0xdb, 0xdb, 0x5b, 0x60, 0xce, 0x98, 0x5e, 0xe6, 0x35, 0xf0, 0x58, 0x00, 0x8c, 0xb0, 0x72, 0x1b, + 0x4e, 0x00, 0x0a, 0xd5, 0x18, 0xeb, 0x0f, 0xfe, 0x88, 0xe4, 0x1e, 0xd6, 0xea, 0xf3, 0xe7, 0x9d, + 0x0e, 0x08, 0xf2, 0x44, 0xcd, 0xd5, 0x45, 0x0c, 0xe0, 0x94, 0x51, 0x14, 0x3c, 0x61, 0x91, 0xb8, + 0x03, 0x26, 0xa5, 0xd5, 0xd1, 0x8a, 0x5b, 0xf1, 0x5c, 0x2d, 0xe7, 0x3e, 0x93, 0x98, 0x7a, 0xbd, + 0xea, 0x0a, 0x47, 0x63, 0xd0, 0x69, 0xf2, 0xa6, 0xc3, 0x37, 0x88, 0xde, 0xff, 0x82, 0xfa, 0x7e, + 0x5a, 0xb9, 0x98, 0x9f, 0xaa, 0x0f, 0x20, 0x00, 0x14, 0x34, 0x26, 0xe7, 0x1e, 0x10, 0x1f, 0x4d, + 0x43, 0x70, 0x63, 0x79, 0x27, 0xab, 0x5c, 0xbf, 0xbe, 0xc4, 0xb8, 0x07, 0x60, 0x72, 0xca, 0x57, + 0xa0, 0xfb, 0xc6, 0x84, 0xef, 0xe8, 0x8f, 0x94, 0xdc, 0x5c, 0xb5, 0x72, 0x87, 0xc3, 0x6b, 0x4e, + 0x40, 0x0e, 0x5f, 0x19, 0x66, 0x1f, 0x77, 0x2a, 0xae, 0xf3, 0x10, 0x7d, 0x49, 0x52, 0x0d, 0xe5, + 0xa7, 0x04, 0x10, 0x56, 0x02, 0x57, 0xd6, 0xab, 0x6a, 0x73, 0xa2, 0x6f, 0xdb, 0x42, 0x68, 0xd4, + 0xd6, 0x75, 0x17, 0x94, 0x91, 0xd1, 0xdd, 0xb8, 0xe7, 0x41, 0xcf, 0xc2, 0x87, 0xdb, 0xc7, 0x37, + 0x31, 0x9c, 0x62, 0xbc, 0x50, 0x42, 0x7e, 0xe7, 0x7d, 0xa2, 0x79, 0xd0, 0x4e, 0x06, 0x87, 0x50, + 0xc5, 0x30, 0xf9, 0xd3, 0x92, 0x6c, 0x6b, 0xd8, 0xdc, 0x13, 0xba, 0xb0, 0x59, 0x16, 0xe2, 0x6e, + 0x12, 0x63, 0x38, 0x38, 0x67, 0xf6, 0x75, 0x99, 0x4c, 0xfa, 0xe7, 0x39, 0xbf, 0xcc, 0xdf, 0x13, + 0x8a, 0xf1, 0x4f, 0x37, 0xa7, 0x73, 0x4f, 0xbf, 0x27, 0xca, 0x3a, 0x46, 0x56, 0xd8, 0x69, 0x0c, + 0x63, 0x25, 0x11, 0x8c, 0x87, 0x33, 0x69, 0x33, 0xb5, 0x5a, 0xf8, 0xd4, 0x33, 0xfe, 0xbc, 0xf3, + 0x72, 0x65, 0x9d, 0x25, 0x2d, 0x43, 0x6a, 0x69, 0x03, 0xce, 0xd1, 0xbf, 0x1e, 0x4f, 0xd4, 0x2c, + 0x69, 0x5f, 0x11, 0xb9, 0x32, 0x8c, 0xad, 0x75, 0xde, 0x6c, 0x25, 0x70, 0x36, 0xb7, 0x98, 0x07, + 0xfd, 0x5c, 0x0b, 0xc3, 0x72, 0x41, 0xba, 0x82, 0xe4, 0x6a, 0xca, 0xd8, 0x24, 0x27, 0x77, 0xf1, + 0xee, 0xd0, 0x95, 0x0e, 0x03, 0x0f, 0xc9, 0x0b, 0x1b, 0xef, 0xc3, 0xd2, 0x73, 0x57, 0xb8, 0x4f, + 0x3a, 0x8c, 0x67, 0x68, 0x31, 0xb9, 0x1a, 0x17, 0x58, 0x16, 0x1b, 0x1f, 0x8f, 0xa8, 0xfc, 0x66, + 0x2c, 0xe1, 0x77, 0x53, 0xab, 0x4b, 0x0d, 0x6a, 0x32, 0xd4, 0x7f, 0xe7, 0xac, 0x2f, 0x34, 0xf3, + 0xf5, 0x1d, 0x64, 0x16, 0x76, 0x4c, 0xeb, 0x97, 0x43, 0xb4, 0x7c, 0x24, 0x8f, 0x21, 0xe6, 0x33, + 0x0e, 0xcc, 0xae, 0x39, 0x36, 0xae, 0x2f, 0xb0, 0x93, 0x46, 0x61, 0xf8, 0xe5, 0x28, 0xda, 0xa7, + 0xd6, 0xf0, 0xe9, 0x90, 0x62, 0x8e, 0xde, 0xfe, 0x0f, 0x35, 0x9d, 0x99, 0x26, 0x9e, 0xf1, 0x71, + 0x41, 0x31, 0xdd, 0xe4, 0x73, 0xbc, 0xc3, 0x07, 0xf6, 0x2d, 0x75, 0x19, 0xf8, 0xec, 0x92, 0x21, + 0x32, 0xb0, 0x5f, 0x95, 0x7b, 0x34, 0x65, 0x80, 0x9c, 0x31, 0x65, 0x45, 0xf8, 0x0b, 0x2d, 0x43, + 0x6b, 0xdf, 0x04, 0x4e, 0xdf, 0x8e, 0xff, 0x97, 0xb4, 0xee, 0x82, 0x8b, 0x8e, 0xc2, 0x2e, 0xf0, + 0x6f, 0x35, 0xa9, 0xd1, 0xb2, 0xa1, 0xba, 0x9b, 0xcc, 0xad, 0x41, 0x47, 0xe0, 0xb5, 0x87, 0x5d, + 0x70, 0xe3, 0x94, 0x1b, 0x7e, 0xfe, 0xcf, 0x4e, 0x99, 0x96, 0x0d, 0x1d, 0x13, 0xdf, 0x83, 0x23, + 0xd3, 0x46, 0x69, 0x31, 0xca, 0x0b, 0x18, 0xa7, 0x94, 0x67, 0x1b, 0x26, 0xcd, 0x27, 0x24, 0x15, + 0x4d, 0xcd, 0x5f, 0x3b, 0xdd, 0xdc, 0x85, 0x2c, 0x1a, 0x66, 0x5b, 0xbf, 0x12, 0x61, 0xbd, 0x89, + 0xfa, 0x9a, 0x29, 0xf7, 0xfe, 0xbc, 0xe3, 0x0d, 0x5c, 0xf3, 0x1c, 0x75, 0x1d, 0x3d, 0xbc, 0x0f, + 0x4d, 0x21, 0x24, 0x7d, 0x70, 0xf4, 0x06, 0xd6, 0xc2, 0xd0, 0x2e, 0x8c, 0xe9, 0x06, 0x6a, 0x5d, + 0x1e, 0x4f, 0x53, 0xff, 0x60, 0x7e, 0x31, 0x87, 0x5e, 0x90, 0xb1, 0xd6, 0xff, 0xf4, 0xa1, 0xee, + 0x78, 0x91, 0x4d, 0x50, 0x3e, 0xf7, 0xcd, 0xbd, 0x76, 0x27, 0xac, 0xc2, 0x91, 0xf3, 0xca, 0xb2, + 0x8e, 0x86, 0x10, 0xd5, 0xb1, 0x56, 0x54, 0x1b, 0x2c, 0x69, 0xf1, 0xab, 0x9b, 0xc1, 0x80, 0xa8, + 0x6d, 0x3f, 0x05, 0xbf, 0x1b, 0x7e, 0x15, 0x60, 0x90, 0xfc, 0xd3, 0x06, 0x72, 0x26, 0x03, 0xef, + 0x20, 0xf9, 0x98, 0xa8, 0xf8, 0xfc, 0x64, 0x68, 0xcd, 0x29, 0x98, 0x1f, 0xa5, 0x35, 0x8f, 0x35, + 0x87, 0x1d, 0x62, 0x8b, 0xaa, 0x84, 0x2a, 0x8c, 0x96, 0xfe, 0x97, 0xb0, 0x32, 0x89, 0x1c, 0x67, + 0x43, 0x66, 0xf4, 0x60, 0x2f, 0x69, 0xdd, 0x97, 0xaa, 0xc0, 0x4c, 0x9a, 0xbd, 0xf3, 0xe9, 0xd3, + 0xc1, 0xf1, 0x6e, 0xa3, 0x6e, 0xcc, 0xda, 0xc4, 0x61, 0xad, 0xf5, 0x32, 0xc5, 0xfb, 0x58, 0x40, + 0xdd, 0x6e, 0x68, 0x41, 0x67, 0xeb, 0x5c, 0x66, 0xa1, 0xff, 0xba, 0x32, 0xb3, 0x35, 0x53, 0x76, + 0xbb, 0xc1, 0x3e, 0x71, 0x36, 0x0c, 0x47, 0x1a, 0xc9, 0x02, 0xaa, 0xed, 0x3f, 0xf7, 0xc1, 0xe6, + 0x22, 0xf6, 0xa8, 0xc7, 0xc6, 0x37, 0xc6, 0x55, 0x93, 0x77, 0x08, 0x8d, 0xdf, 0xa3, 0x3b, 0x84, + 0x66, 0x5d, 0xa9, 0x9d, 0xe9, 0x2a, 0x75, 0x1c, 0x6a, 0x8e, 0x54, 0x9a, 0x8d, 0x87, 0xd0, 0x37, + 0xc6, 0x62, 0xe4, 0x63, 0xd3, 0x99, 0x4f, 0x4e, 0xf5, 0x5c, 0x97, 0x7e, 0xcb, 0x5f, 0x08, 0x34, + 0x59, 0xe6, 0x61, 0x45, 0x45, 0x46, 0x86, 0x3e, 0xa8, 0x1e, 0xec, 0xbf, 0xe2, 0x89, 0x74, 0xac, + 0x11, 0xbb, 0x75, 0xce, 0x27, 0xdd, 0x1c, 0xac, 0x12, 0xad, 0xfe, 0xce, 0x35, 0x69, 0xaa, 0x73, + 0xb8, 0x63, 0x18, 0xd8, 0x75, 0x7a, 0xba, 0x45, 0x47, 0x09, 0xb3, 0x7b, 0xff, 0xed, 0x64, 0x07, + 0xab, 0x17, 0xeb, 0x18, 0xdf, 0x97, 0x6c, 0xc2, 0x91, 0x38, 0x9d, 0x9d, 0x97, 0xbd, 0x91, 0xce, + 0x47, 0xec, 0x8e, 0x76, 0xbb, 0xbe, 0x74, 0xf8, 0x36, 0x85, 0xc9, 0x3c, 0x60, 0x75, 0xa7, 0x20, + 0xd4, 0x3c, 0xe7, 0x9c, 0xa6, 0xd0, 0x4e, 0xfe, 0xb9, 0x46, 0x63, 0x45, 0xe7, 0x82, 0xbd, 0x8c, + 0x56, 0x22, 0x12, 0x06, 0xc3, 0xe9, 0x90, 0xcf, 0xec, 0x89, 0xe8, 0x16, 0xe2, 0xef, 0x9e, 0xa7, + 0xf0, 0x0a, 0x85, 0x65, 0x34, 0xb8, 0xd6, 0xdd, 0xa1, 0x06, 0xfb, 0x7a, 0x7b, 0xc0, 0x68, 0xd9, + 0x92, 0xc3, 0x19, 0xc3, 0x7e, 0xa7, 0x99, 0x6a, 0x94, 0xd0, 0xc5, 0x78, 0x1e, 0x68, 0x8c, 0x41, + 0xf9, 0x28, 0x95, 0xbb, 0x08, 0x27, 0x19, 0x21, 0x1c, 0xf8, 0xea, 0x45, 0x90, 0xf2, 0x49, 0xff, + 0x71, 0x40, 0x5e, 0x9d, 0x51, 0x18, 0x37, 0xb1, 0x13, 0xc6, 0x77, 0x14, 0xe6, 0x7a, 0xe9, 0x07, + 0xc3, 0x44, 0x32, 0x2f, 0xfd, 0x4c, 0xff, 0x10, 0x40, 0xfd, 0x5e, 0x50, 0x2f, 0x19, 0x06, 0xb2, + 0xe3, 0xf2, 0xdc, 0xfb, 0xfa, 0xe6, 0x0b, 0x36, 0x1b, 0x39, 0xb5, 0x17, 0x9b, 0x7d, 0x88, 0x59, + 0x39, 0xf9, 0xc0, 0x71, 0x58, 0x64, 0x23, 0x1a, 0x81, 0x9d, 0x5c, 0x7c, 0xf9, 0x04, 0x9a, 0x93, + 0xda, 0x89, 0x18, 0xdb, 0xcc, 0xe4, 0xa7, 0x72, 0x3e, 0xf8, 0x05, 0x81, 0xbb, 0x58, 0x4a, 0x6c, + 0xf2, 0xc0, 0x98, 0x86, 0xcb, 0xe2, 0x76, 0x93, 0x3b, 0xe8, 0x90, 0x4e, 0x34, 0x99, 0x6d, 0xba, + 0x55, 0xbd, 0xed, 0x06, 0xed, 0xa0, 0xaf, 0x38, 0xdb, 0x31, 0xfc, 0xd4, 0x86, 0x99, 0xdb, 0x36, + 0xb2, 0x5a, 0x0b, 0xfb, 0x7f, 0x4c, 0xce, 0xc6, 0x06, 0xef, 0xf7, 0x61, 0xff, 0xb0, 0xd8, 0xa0, + 0xe8, 0x65, 0xf5, 0x6e, 0x8d, 0xf3, 0xde, 0x97, 0x08, 0xf2, 0x07, 0x19, 0xb7, 0x0d, 0x1f, 0x46, + 0x34, 0xf1, 0xd0, 0x56, 0xd7, 0xd8, 0x28, 0xf0, 0x44, 0xdc, 0xd8, 0xf3, 0xf7, 0x47, 0x8c, 0x93, + 0xff, 0x33, 0x2f, 0x5c, 0x44, 0xaf, 0x5f, 0xbd, 0x75, 0x5d, 0x72, 0xe0, 0x1f, 0xcd, 0xac, 0xe6, + 0x29, 0xb5, 0x8b, 0x72, 0xeb, 0x61, 0x33, 0xea, 0x58, 0x48, 0xe3, 0x32, 0x2e, 0x02, 0x4c, 0x95, + 0x01, 0xb9, 0x96, 0x33, 0xae, 0x67, 0x1d, 0xcb, 0xd8, 0x45, 0x59, 0x56, 0x67, 0x0f, 0xad, 0xc3, + 0x21, 0xe7, 0xd4, 0xc8, 0x62, 0x32, 0x66, 0xbe, 0xa6, 0x56, 0xd4, 0xd1, 0xe3, 0x5a, 0x23, 0x42, + 0x25, 0x84, 0x0a, 0xb8, 0x05, 0x57, 0x88, 0xdf, 0x36, 0xf6, 0x92, 0xeb, 0x74, 0x7b, 0xf6, 0x28, + 0x00, 0xb6, 0xad, 0xf2, 0xf3, 0xac, 0xf9, 0x7c, 0x87, 0x09, 0x84, 0x55, 0xb6, 0xee, 0xe0, 0x22, + 0xd7, 0x7a, 0x03, 0xe4, 0xca, 0xf7, 0x65, 0xb4, 0xd8, 0x2b, 0x30, 0xbf, 0x6b, 0x47, 0x24, 0x6c, + 0xfb, 0x63, 0xba, 0x41, 0x64, 0x7f, 0x93, 0xcd, 0x2a, 0x8c, 0x48, 0xbe, 0x69, 0x5f, 0xe8, 0xda, + 0x71, 0x81, 0x3d, 0x86, 0x12, 0xd4, 0xfb, 0xa9, 0xf9, 0x0c, 0x5b, 0xb1, 0x7f, 0x0e, 0x11, 0xfe, + 0x3c, 0x96, 0x5f, 0x99, 0x00, 0xc2, 0x13, 0xf4, 0x01, 0xd1, 0xe9, 0x68, 0x7f, 0x8f, 0xb6, 0x16, + 0xdd, 0xc3, 0x5d, 0xd6, 0x93, 0xd4, 0xb3, 0x7c, 0x74, 0x85, 0x14, 0xdc, 0x24, 0x60, 0x24, 0x1b, + 0x6b, 0x18, 0x32, 0x0f, 0xe6, 0x0f, 0xcd, 0xa9, 0x93, 0xd3, 0xee, 0xb8, 0xb6, 0x18, 0x45, 0x35, + 0x40, 0xa5, 0x5d, 0x46, 0xdd, 0x8e, 0x3c, 0x77, 0x19, 0xe4, 0xc8, 0x59, 0x30, 0xf5, 0x49, 0xd5, + 0x30, 0x9a, 0xb2, 0x83, 0x0f, 0x67, 0x62, 0xb2, 0xdd, 0x73, 0x8b, 0x02, 0xa6, 0xa8, 0x9f, 0x4d, + 0xf1, 0x2d, 0xf6, 0xbd, 0x17, 0xfb, 0xe3, 0x0e, 0xd6, 0x3f, 0x5d, 0xd4, 0xd7, 0xc2, 0x5c, 0x89, + 0x66, 0xba, 0x5d, 0x49, 0xfb, 0x9d, 0xc9, 0xfd, 0x8b, 0x8d, 0x4b, 0x98, 0x4e, 0x3b, 0xc6, 0x2a, + 0x7d, 0x54, 0xa9, 0x42, 0xb2, 0xb1, 0xc1, 0xdb, 0xfb, 0xe8, 0x22, 0xaa, 0x00, 0xb7, 0x65, 0xfe, + 0xa5, 0xde, 0xf4, 0x54, 0x4f, 0x0e, 0xdd, 0x78, 0xb7, 0x70, 0xf0, 0xae, 0xd2, 0xc9, 0x87, 0x60, + 0xf7, 0x97, 0x1d, 0xe2, 0x42, 0xcb, 0x76, 0xc4, 0x2b, 0xd3, 0x27, 0x19, 0x7a, 0xf8, 0xf9, 0xaf, + 0xe9, 0x81, 0x16, 0xef, 0x17, 0x11, 0x35, 0x55, 0x24, 0x9f, 0xc6, 0x5d, 0x5a, 0x2b, 0x7d, 0x28, + 0x9f, 0xe8, 0xe6, 0xfd, 0x43, 0xa9, 0x88, 0x4a, 0x39, 0x95, 0xb8, 0xb9, 0xc5, 0x69, 0x57, 0x8f, + 0x67, 0x9f, 0x2c, 0xd2, 0x74, 0x87, 0x35, 0xc7, 0x35, 0x11, 0x8b, 0x1b, 0x19, 0x56, 0x34, 0x98, + 0x0e, 0x6d, 0x9c, 0x0e, 0x02, 0xd5, 0x64, 0x40, 0xf7, 0x09, 0xce, 0x47, 0x76, 0xe7, 0x0d, 0x8c, + 0x51, 0x24, 0xf1, 0x79, 0x48, 0xc4, 0xb2, 0x97, 0x16, 0x56, 0x7e, 0x15, 0x38, 0x10, 0x14, 0xb6, + 0x6e, 0x52, 0x59, 0xd7, 0xe2, 0xcd, 0xd0, 0x45, 0x17, 0x14, 0x09, 0xb9, 0xca, 0xce, 0x39, 0xea, + 0x9c, 0xbd, 0x0c, 0x4c, 0x31, 0xd0, 0x8a, 0xdf, 0x8b, 0x8c, 0x22, 0x3d, 0xc7, 0x56, 0x77, 0x84, + 0x44, 0x29, 0x2d, 0x83, 0x0c, 0x46, 0x4d, 0x3f, 0x4f, 0x44, 0x51, 0x20, 0xd1, 0xc9, 0x35, 0xa8, + 0x1b, 0x29, 0xc4, 0x27, 0xbf, 0x26, 0x95, 0x62, 0x73, 0x1f, 0x1b, 0x1f, 0x01, 0xb3, 0x46, 0xa3, + 0xb1, 0xab, 0xd1, 0x3d, 0xc6, 0x05, 0x04, 0x4a, 0x0b, 0x15, 0x92, 0x96, 0xc2, 0x96, 0xdb, 0x75, + 0xea, 0x8c, 0x52, 0x3e, 0x9f, 0xb5, 0x7d, 0x73, 0x7d, 0x18, 0x9f, 0x8d, 0x30, 0xe5, 0x89, 0xdd, + 0xf4, 0xa2, 0x61, 0x59, 0x2c, 0x1f, 0xce, 0x42, 0xae, 0x51, 0xdb, 0xa1, 0xf2, 0xad, 0xba, 0x1c, + 0x24, 0x43, 0xa8, 0x84, 0xbd, 0x73, 0x0a, 0x34, 0x3d, 0x09, 0x8b, 0xb9, 0x6f, 0x13, 0x06, 0xc0, + 0x55, 0xd8, 0x74, 0x1f, 0x46, 0x17, 0xb0, 0xf3, 0x0d, 0x17, 0x4f, 0x9b, 0x07, 0x40, 0xe1, 0xf2, + 0xd1, 0x5b, 0xd5, 0x8d, 0x90, 0x6c, 0x7f, 0x53, 0x51, 0xae, 0xd8, 0xb1, 0xc7, 0xf5, 0x11, 0xad, + 0xf7, 0xb9, 0x8e, 0x16, 0xf2, 0x41, 0xeb, 0xd4, 0xb2, 0x48, 0xe1, 0x61, 0x5b, 0x3e, 0xcc, 0xc8, + 0x0f, 0x45, 0xe7, 0x2c, 0x3a, 0x9f, 0x61, 0x4b, 0x27, 0x9e, 0x55, 0x49, 0xbf, 0x18, 0x4a, 0x61, + 0xd3, 0x97, 0x94, 0x2d, 0x76, 0x7c, 0xa4, 0xab, 0x1a, 0x1e, 0x52, 0xa2, 0x81, 0xb5, 0xbd, 0xc8, + 0x99, 0x9b, 0xea, 0x9a, 0xee, 0xf8, 0xa9, 0x8a, 0x19, 0x17, 0xc5, 0x52, 0xb0, 0x14, 0x6b, 0x1e, + 0x42, 0x4a, 0xc5, 0x98, 0xbb, 0x4b, 0x84, 0x24, 0x19, 0xf4, 0x9c, 0x0e, 0xbe, 0x5f, 0x78, 0xd5, + 0x48, 0x46, 0x56, 0xdb, 0x7d, 0x7a, 0x3a, 0x32, 0xf3, 0x0a, 0x7e, 0xce, 0x9c, 0x6c, 0x29, 0x2a, + 0x8c, 0xa9, 0x09, 0x75, 0x7f, 0x24, 0x95, 0x3d, 0x02, 0x66, 0x5a, 0xd3, 0x38, 0xc3, 0xe1, 0x24, + 0xa5, 0x60, 0xb7, 0xe6, 0x45, 0xb4, 0x88, 0x40, 0x0a, 0x8e, 0x01, 0x25, 0xb7, 0x94, 0x75, 0xa3, + 0xa2, 0xd9, 0x21, 0x1f, 0x12, 0xcc, 0x6c, 0x63, 0x61, 0xd9, 0x13, 0xdc, 0x5d, 0xf2, 0xc6, 0xa5, + 0x87, 0xfd, 0x0e, 0x8e, 0x47, 0xa1, 0xd6, 0x5b, 0x83, 0x22, 0x36, 0xef, 0x87, 0x55, 0xde, 0xcd, + 0x5d, 0xc8, 0x14, 0xea, 0xbe, 0x12, 0x3e, 0x98, 0xb2, 0xdd, 0x8a, 0xb0, 0x10, 0xae, 0x74, 0xbf, + 0x00, 0xcc, 0x28, 0x14, 0xe9, 0xd5, 0xf3, 0x92, 0x30, 0xbc, 0xea, 0x1e, 0x04, 0x0b, 0xca, 0x9f, + 0xf7, 0x46, 0xda, 0x98, 0x51, 0x2e, 0xbc, 0x42, 0x48, 0x88, 0x77, 0x65, 0x4f, 0xce, 0x8a, 0xe4, + 0xbe, 0xee, 0x82, 0xe3, 0x63, 0x16, 0x6d, 0xb4, 0xdf, 0xa1, 0x7a, 0x0e, 0xa4, 0x54, 0xd3, 0x11, + 0xc4, 0x11, 0x8b, 0xe3, 0x16, 0x30, 0x72, 0x14, 0xf8, 0xb9, 0x7c, 0xe1, 0x6c, 0x30, 0xf1, 0xd7, + 0x6f, 0x23, 0x0e, 0x48, 0x4c, 0xd9, 0x5e, 0x2c, 0xbf, 0xa8, 0x27, 0x51, 0x2b, 0x2a, 0xaf, 0xc3, + 0xfa, 0x8b, 0xfd, 0xb9, 0x55, 0xff, 0x1c, 0x46, 0x12, 0xbb, 0x00, 0xed, 0xf3, 0x1b, 0xb1, 0x4e, + 0x65, 0xf2, 0xce, 0xe7, 0x02, 0x92, 0xe7, 0xb5, 0x92, 0x20, 0x70, 0xca, 0x85, 0x92, 0xd0, 0x83, + 0x12, 0xae, 0x44, 0x5c, 0x6c, 0x5a, 0x3a, 0x5a, 0xc8, 0xe5, 0xb1, 0x97, 0x00, 0x50, 0xe2, 0xd5, + 0x35, 0xf6, 0x7b, 0xf4, 0x9f, 0xf9, 0xa7, 0xc8, 0xf5, 0x49, 0x2b, 0xa1, 0x87, 0x76, 0x40, 0x6e, + 0x45, 0x18, 0x41, 0x05, 0xe9, 0xd6, 0x46, 0xfa, 0x93, 0x0a, 0x84, 0xfb, 0x73, 0x7e, 0x7c, 0xa5, + 0x2c, 0x1f, 0x29, 0xc4, 0xcf, 0x61, 0x75, 0x6a, 0xce, 0x06, 0xaf, 0x10, 0x52, 0x77, 0xa0, 0xbe, + 0xe0, 0x7f, 0x1a, 0xe3, 0x78, 0x38, 0xd3, 0x0b, 0x03, 0xb8, 0x8c, 0xa6, 0xc2, 0x79, 0x1a, 0x39, + 0xef, 0xc2, 0xcc, 0x39, 0x26, 0xa7, 0x8f, 0x3d, 0x26, 0xdd, 0x67, 0xa3, 0x70, 0x7a, 0x62, 0x6d, + 0x40, 0xb0, 0xb8, 0xb1, 0x20, 0xe7, 0x8f, 0x18, 0xe8, 0x6a, 0xed, 0x42, 0xc2, 0xe2, 0x38, 0x82, + 0x12, 0xbc, 0x39, 0x3e, 0xe1, 0xb0, 0x2b, 0xce, 0x91, 0xa8, 0xc6, 0x33, 0xa7, 0x91, 0xcf, 0x7a, + 0xf8, 0xbd, 0xf3, 0x49, 0x83, 0x25, 0x21, 0xc5, 0xfa, 0x75, 0x2c, 0xf8, 0x9a, 0x54, 0xa3, 0xee, + 0x41, 0x20, 0x63, 0xfd, 0x27, 0x9b, 0x9b, 0x97, 0xb4, 0x95, 0xbd, 0xa1, 0x28, 0xa9, 0x95, 0x07, + 0xb1, 0x1c, 0x46, 0xdf, 0x4c, 0x41, 0x4f, 0x2b, 0x67, 0x32, 0xc1, 0xee, 0x9f, 0x71, 0x05, 0xbc, + 0x59, 0xe9, 0x31, 0x7d, 0x1e, 0x03, 0x8c, 0xcf, 0xfe, 0x91, 0x25, 0xfc, 0xc4, 0xcf, 0x6e, 0xa7, + 0xa5, 0x0a, 0x3c, 0xe3, 0xd2, 0x1d, 0x65, 0xd4, 0x0a, 0x63, 0xb6, 0xd5, 0xfe, 0x25, 0x45, 0x59, + 0x0d, 0xbf, 0xe1, 0x36, 0x24, 0xc6, 0x6d, 0x90, 0x9d, 0xaf, 0xc2, 0x41, 0xf8, 0xa1, 0x1d, 0x7b, + 0x54, 0xa9, 0xd4, 0x16, 0x9b, 0x9f, 0x6b, 0x48, 0x5e, 0x21, 0x7a, 0xa4, 0xa8, 0x5c, 0xd9, 0x7d, + 0x7b, 0xa7, 0x7a, 0xfc, 0xbf, 0xcd, 0x18, 0x0f, 0xcc, 0x52, 0xce, 0xd2, 0x78, 0x05, 0xb1, 0xd6, + 0x6d, 0x1a, 0xa4, 0x9f, 0xbe, 0x0d, 0x66, 0x7f, 0xa0, 0x66, 0x4f, 0x1f, 0x7a, 0x88, 0xb6, 0xc4, + 0xd2, 0x3f, 0x8a, 0xa2, 0x08, 0x4d, 0x4f, 0x08, 0xba, 0x15, 0x5f, 0xd2, 0xda, 0x9b, 0xd4, 0xe6, + 0xff, 0x28, 0xcf, 0xd1, 0x9c, 0x10, 0xf5, 0x61, 0xff, 0x60, 0x8d, 0xda, 0x6a, 0xb1, 0x44, 0x55, + 0xb8, 0x98, 0x0a, 0x09, 0x20, 0x95, 0x3f, 0x77, 0x2f, 0xdb, 0x52, 0xf5, 0xaf, 0xd4, 0x5e, 0xf1, + 0x94, 0x15, 0x36, 0xec, 0x98, 0x50, 0x29, 0x1a, 0xc9, 0x50, 0x6e, 0xdd, 0xd8, 0xcd, 0x90, 0x6f, + 0xd9, 0xb7, 0x2a, 0x19, 0x98, 0x19, 0x21, 0xef, 0x8e, 0xa2, 0xc9, 0x54, 0xfa, 0x7a, 0xa5, 0xb7, + 0x7b, 0x21, 0xe3, 0xb7, 0x8e, 0xee, 0x68, 0x75, 0xcb, 0xcb, 0x86, 0x16, 0xb6, 0x31, 0x7b, 0x76, + 0x40, 0x6f, 0x85, 0x74, 0x55, 0x13, 0xb7, 0xfc, 0x13, 0xe1, 0x83, 0xb8, 0x6c, 0x9d, 0xab, 0x66, + 0xa9, 0xb9, 0x1c, 0xce, 0x28, 0x15, 0x05, 0x89, 0xd9, 0x0f, 0x4f, 0xa4, 0xf7, 0x81, 0xd9, 0x95, + 0x36, 0x68, 0x0f, 0x8e, 0x3d, 0x72, 0x30, 0xda, 0xa3, 0x5a, 0x43, 0x1e, 0xca, 0xad, 0x34, 0xbd, + 0xcd, 0x81, 0x82, 0xab, 0x71, 0x90, 0xba, 0x03, 0xc7, 0xaa, 0x1b, 0x34, 0x55, 0xc4, 0xaa, 0x30, + 0x53, 0xf4, 0x6a, 0xc8, 0xe2, 0xbc, 0xfb, 0xc5, 0x68, 0xa4, 0x47, 0x7a, 0x4b, 0x5d, 0x25, 0x1d, + 0x9f, 0x37, 0xae, 0x2a, 0x33, 0x42, 0xd9, 0x1b, 0xca, 0xb9, 0x72, 0xc6, 0xf4, 0xc8, 0x2d, 0x30, + 0xea, 0x8b, 0xce, 0x3e, 0x30, 0x85, 0x55, 0x0b, 0x41, 0x2c, 0x87, 0xe0, 0xc4, 0xa1, 0xcb, 0x01, + 0xfd, 0xa6, 0x67, 0xa2, 0xc3, 0x1f, 0x98, 0x97, 0xab, 0xf3, 0x79, 0xed, 0xb1, 0x76, 0x48, 0x51, + 0xff, 0x63, 0x6c, 0x54, 0x2f, 0x7e, 0xc4, 0x90, 0xea, 0x95, 0xd5, 0xd0, 0x2a, 0xa6, 0x5c, 0x1f, + 0x4d, 0x8f, 0x6c, 0xdd, 0x7e, 0xa9, 0x36, 0x99, 0x3e, 0x78, 0x74, 0x19, 0xd9, 0x10, 0x3e, 0xb0, + 0x6a, 0x97, 0x61, 0xc1, 0xf9, 0x5e, 0xf8, 0x14, 0x6c, 0xcb, 0x8f, 0x2b, 0x67, 0xbb, 0x11, 0x90, + 0x36, 0x20, 0xe9, 0x5b, 0x3c, 0xd7, 0x4f, 0x04, 0x5b, 0x81, 0xea, 0x26, 0xe6, 0xc1, 0x6e, 0xa0, + 0x59, 0x80, 0x01, 0x03, 0x89, 0x27, 0xb8, 0xd9, 0x73, 0xb0, 0x5c, 0x7a, 0xb1, 0x88, 0xda, 0xe7, + 0x77, 0x13, 0xef, 0x8c, 0xb8, 0x39, 0xe0, 0xdf, 0xe7, 0x93, 0xec, 0x80, 0x5a, 0x43, 0x3f, 0x94, + 0xe0, 0x10, 0xa2, 0xbf, 0xed, 0x0c, 0xfb, 0xcf, 0xf7, 0x3a, 0x51, 0xe8, 0x79, 0xd6, 0x48, 0x42, + 0x74, 0x74, 0xf0, 0xb5, 0x39, 0xf7, 0x33, 0x69, 0xfa, 0x02, 0x8e, 0xaf, 0xee, 0xf7, 0x95, 0x39, + 0x7a, 0xa0, 0x4c, 0x52, 0xfe, 0xa2, 0x5b, 0xa8, 0x33, 0x97, 0xe9, 0x83, 0xd3, 0x45, 0x62, 0x27, + 0xbf, 0xee, 0xef, 0xaa, 0x06, 0xb2, 0x85, 0x32, 0x84, 0x1c, 0x7e, 0x5a, 0x81, 0x65, 0xc9, 0xe2, + 0xe7, 0x80, 0x6f, 0x63, 0x4d, 0xfa, 0x30, 0x9d, 0x3d, 0x80, 0x9f, 0xa7, 0x28, 0xe8, 0xdb, 0x3f, + 0x3f, 0x45, 0x3f, 0xb6, 0x2b, 0xb0, 0x2c, 0x42, 0x36, 0xcd, 0x01, 0x6e, 0x13, 0xa7, 0x58, 0xad, + 0x25, 0xd3, 0xaf, 0x94, 0x50, 0xbe, 0x31, 0x69, 0x7b, 0x4a, 0x9e, 0x31, 0x73, 0x32, 0x87, 0x4e, + 0xe5, 0x76, 0xb4, 0x1d, 0xe5, 0x98, 0x8d, 0x8d, 0xb1, 0x51, 0x1d, 0x7a, 0x16, 0x2c, 0x26, 0x51, + 0xf1, 0x54, 0x2c, 0xbf, 0x04, 0x7d, 0xf0, 0x76, 0xae, 0x9f, 0x6f, 0x1f, 0xfb, 0x6e, 0xca, 0x50, + 0x75, 0x89, 0x6e, 0xa4, 0xac, 0x7b, 0x80, 0x37, 0x29, 0xe4, 0xca, 0x7e, 0x10, 0x4d, 0xfe, 0xf8, + 0xc8, 0x5c, 0x2d, 0x64, 0x8d, 0x62, 0x0e, 0xbe, 0x76, 0x6f, 0xa7, 0xb1, 0x1b, 0xe0, 0xe3, 0xb9, + 0x01, 0x8c, 0xb8, 0x19, 0xd7, 0xec, 0xe7, 0xc9, 0x60, 0x4e, 0xce, 0x58, 0x2c, 0x43, 0x9e, 0x7d, + 0x38, 0x9d, 0x1b, 0x51, 0x5a, 0x33, 0x2c, 0xe0, 0x2d, 0x82, 0x9d, 0x7c, 0xb0, 0x92, 0x4f, 0x46, + 0x58, 0xfc, 0xae, 0x61, 0xf0, 0x55, 0x34, 0xeb, 0x83, 0xdc, 0xff, 0xd7, 0xeb, 0xff, 0xea, 0x93, + 0xf4, 0x5f, 0xfe, 0xe5, 0x5f, 0xfe, 0xe5, 0x5f, 0xfe, 0xe5, 0x5f, 0xfe, 0xa7, 0xf8, 0x3f, 0xb3, + 0x72, 0xcb, 0x72, 0x00, 0x26, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA100_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 9728, // uncompressed data size (bytes) + 7415, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA100_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA100("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/reload/g_booteruc_reload_ga100_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA100_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x03, 0x62, 0x76, 0x08, 0x13, 0x4c, 0xc8, 0x42, 0x69, + 0x20, 0x00, 0x00, 0x64, 0x80, 0xce, 0x2d, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA100_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA100_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA100("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/reload/g_booteruc_reload_ga100_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 9728 +// COMPRESSED SIZE (bytes): 7415 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA100_image_prod_data[] = +{ + 0xed, 0x99, 0x45, 0x50, 0x1d, 0xda, 0xd2, 0x85, 0x0f, 0xee, 0x16, 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, + 0x5d, 0x83, 0x13, 0x08, 0x0e, 0x41, 0x83, 0xbb, 0xbb, 0xbb, 0x3b, 0x27, 0x9c, 0xe0, 0xee, 0xee, + 0xee, 0x01, 0x0e, 0xee, 0x10, 0x34, 0x38, 0xfc, 0x79, 0xff, 0xf0, 0x8e, 0xde, 0xf8, 0xd5, 0xfd, + 0x26, 0x6b, 0xf5, 0x1e, 0x74, 0x75, 0xd5, 0xaa, 0xda, 0xd5, 0xbb, 0x76, 0x14, 0x20, 0x10, 0xf0, + 0x9e, 0x0a, 0xf0, 0x47, 0x03, 0x00, 0x1e, 0x21, 0x1f, 0x01, 0x4f, 0x90, 0x71, 0x00, 0x48, 0x40, + 0xcf, 0x16, 0xf8, 0xe3, 0xe3, 0x03, 0x2d, 0x0a, 0x00, 0x01, 0xf8, 0x28, 0x80, 0x6e, 0xd8, 0x07, + 0x70, 0x66, 0xae, 0x41, 0x34, 0xa4, 0x02, 0x98, 0x32, 0xd7, 0x20, 0xff, 0x0a, 0x75, 0xe6, 0x1a, + 0xd4, 0x5f, 0x21, 0x89, 0x02, 0x00, 0x00, 0x99, 0x05, 0x50, 0x4d, 0x60, 0xa8, 0x9e, 0x82, 0x42, + 0x70, 0x66, 0x01, 0x44, 0x66, 0x16, 0x64, 0x13, 0x18, 0xa2, 0x69, 0x0d, 0xd2, 0xbf, 0x07, 0x00, + 0x40, 0x63, 0x97, 0x07, 0xa0, 0x3f, 0x14, 0x00, 0xba, 0x52, 0x13, 0xfe, 0xb6, 0x6c, 0x7a, 0x0a, + 0x6a, 0x7a, 0x0a, 0x8c, 0x02, 0x84, 0xc2, 0xfd, 0x3d, 0x42, 0x83, 0x6e, 0xcd, 0x02, 0x10, 0x43, + 0xfe, 0xc7, 0x21, 0xfc, 0x75, 0x10, 0xff, 0xef, 0x20, 0xfe, 0x3a, 0xd8, 0xff, 0xb8, 0xbf, 0x6d, + 0x21, 0xd3, 0xd0, 0xe0, 0xb7, 0xfe, 0x33, 0xc2, 0xdb, 0x13, 0x4c, 0x14, 0x80, 0x04, 0xf0, 0x96, + 0x0f, 0xdd, 0x91, 0x08, 0xd9, 0x9c, 0x8a, 0xfe, 0xb7, 0x7e, 0x7f, 0x06, 0xf8, 0xff, 0x1d, 0xec, + 0x11, 0xb2, 0xab, 0xf4, 0x05, 0x22, 0x61, 0xe1, 0x06, 0x3a, 0x0a, 0x1d, 0x0e, 0x50, 0x58, 0xf0, + 0x08, 0x48, 0xf8, 0xab, 0x3d, 0x1f, 0x77, 0x6c, 0xaf, 0xef, 0xf2, 0x10, 0x85, 0x05, 0x22, 0x6b, + 0x80, 0xff, 0x86, 0x70, 0x04, 0xa5, 0xf6, 0x0b, 0xaa, 0x2e, 0xa9, 0x21, 0x08, 0x33, 0x97, 0xcb, + 0x18, 0xbc, 0xba, 0x29, 0xcc, 0x0d, 0x5e, 0xa9, 0xc3, 0x9c, 0x00, 0xb8, 0x17, 0x52, 0x3a, 0x95, + 0x78, 0xa0, 0x68, 0x9e, 0x23, 0x2a, 0xc2, 0xd2, 0xc2, 0xee, 0x55, 0x04, 0xe3, 0xae, 0x14, 0x78, + 0xf1, 0x4d, 0xab, 0xf5, 0xe5, 0x86, 0x81, 0x52, 0x2d, 0x66, 0x6d, 0x15, 0xe7, 0x55, 0xcd, 0x3e, + 0xe0, 0xd0, 0x06, 0x16, 0x61, 0x5f, 0xb3, 0xfe, 0xfb, 0x3d, 0x69, 0x18, 0x53, 0x1a, 0xea, 0x39, + 0x65, 0xd5, 0xc9, 0xb9, 0xec, 0x36, 0x7c, 0xec, 0x06, 0x5f, 0x9f, 0x7a, 0x62, 0x08, 0x49, 0xf3, + 0xd5, 0x0d, 0xb4, 0xfb, 0x7d, 0xeb, 0xfe, 0xe2, 0x29, 0xe5, 0x81, 0x58, 0x1a, 0x2b, 0x3e, 0x97, + 0x43, 0x91, 0x11, 0x19, 0x0a, 0x7d, 0x94, 0x78, 0xeb, 0x64, 0xf7, 0x4d, 0xa3, 0xb3, 0x22, 0x71, + 0x56, 0xbd, 0xab, 0x2d, 0x33, 0x0c, 0x5b, 0x8e, 0x8d, 0x06, 0x1c, 0xa2, 0x28, 0x36, 0x8b, 0xe8, + 0xf1, 0x02, 0x34, 0x15, 0xe2, 0x49, 0x84, 0x4a, 0x5a, 0x23, 0xe3, 0x20, 0x99, 0x3d, 0xc7, 0x5b, + 0xc8, 0x6c, 0x94, 0x82, 0x9c, 0x36, 0xd4, 0xdd, 0x5a, 0x1f, 0x51, 0xd9, 0xa3, 0x22, 0xc6, 0xaf, + 0xb7, 0x84, 0xe3, 0x96, 0xa9, 0x42, 0x90, 0xea, 0x2c, 0x07, 0xc5, 0x0a, 0x95, 0x15, 0x11, 0xf3, + 0xe2, 0x77, 0xfd, 0x4f, 0x53, 0x80, 0xb0, 0xc0, 0x7b, 0x64, 0xc8, 0xe9, 0xb8, 0xbd, 0xe6, 0x84, + 0xe8, 0x75, 0x14, 0x76, 0x3f, 0x22, 0xe1, 0x85, 0xf7, 0x96, 0x51, 0xf4, 0x5e, 0xdf, 0x0a, 0x0c, + 0xca, 0x66, 0xa2, 0x31, 0xad, 0x95, 0x82, 0x49, 0x11, 0x24, 0xcf, 0x5e, 0xab, 0x0e, 0x35, 0x78, + 0x5e, 0x55, 0x25, 0x6c, 0x30, 0x0c, 0x66, 0x3d, 0x11, 0x57, 0xe3, 0xe9, 0x66, 0x65, 0xad, 0x48, + 0xa2, 0xa5, 0x3f, 0x4b, 0x4a, 0x46, 0x3a, 0x76, 0xa9, 0xe1, 0x10, 0x4b, 0x8e, 0xec, 0x5c, 0x51, + 0x40, 0x58, 0x9c, 0x9b, 0xf4, 0xce, 0x64, 0xe7, 0xe0, 0xf4, 0x89, 0x9d, 0x83, 0x53, 0x0d, 0x2d, + 0xdb, 0x3b, 0x14, 0xce, 0x52, 0x57, 0xc9, 0xc3, 0x31, 0x7e, 0x22, 0x6a, 0xae, 0x5f, 0x13, 0xb4, + 0x42, 0x14, 0x52, 0x22, 0x5c, 0x14, 0x5b, 0x7a, 0xf3, 0x75, 0xf4, 0xc0, 0x53, 0x89, 0x0d, 0x34, + 0x6d, 0x26, 0xfb, 0x97, 0x1b, 0x2e, 0x82, 0x62, 0x9c, 0x08, 0xfe, 0xa8, 0xab, 0x0b, 0x81, 0x94, + 0x42, 0x37, 0x59, 0xbc, 0x89, 0x2c, 0xd4, 0xfe, 0x51, 0xb6, 0xe5, 0x13, 0xa6, 0x1e, 0x66, 0x6e, + 0x4b, 0x09, 0x37, 0xa3, 0xaf, 0x6a, 0xb4, 0x7a, 0x92, 0x56, 0xed, 0xdf, 0x9b, 0x54, 0xcc, 0x4f, + 0xf0, 0x1e, 0x05, 0x0d, 0x68, 0x73, 0xaa, 0x61, 0xed, 0x93, 0x44, 0xe5, 0xf6, 0x0f, 0x63, 0x8d, + 0x30, 0x95, 0xb7, 0x8c, 0x60, 0x53, 0x17, 0xce, 0x66, 0xec, 0x10, 0x3f, 0x75, 0xe1, 0xc8, 0xb2, + 0x92, 0x79, 0x60, 0x60, 0xd7, 0xee, 0x28, 0xde, 0xa4, 0xd1, 0xae, 0x82, 0x8a, 0xf3, 0x3b, 0xab, + 0x0f, 0xda, 0xc9, 0x2c, 0x43, 0x93, 0x7a, 0x43, 0x77, 0x5a, 0x1c, 0x13, 0x77, 0xe3, 0xe9, 0x6a, + 0xb5, 0x67, 0xbf, 0x52, 0xdb, 0xb0, 0x72, 0x7b, 0x57, 0xe6, 0x05, 0x14, 0xdf, 0x5c, 0x09, 0xbe, + 0x49, 0x99, 0x6b, 0x00, 0x00, 0xc8, 0x7e, 0x6f, 0x08, 0x3d, 0xd8, 0x89, 0xf3, 0xd3, 0x84, 0xca, + 0x04, 0xbf, 0x55, 0xb0, 0x66, 0x24, 0x2b, 0x41, 0x38, 0xf1, 0xb6, 0x94, 0x25, 0xc8, 0x7a, 0xfa, + 0xa4, 0x7c, 0x16, 0x73, 0x3d, 0xeb, 0xa2, 0x96, 0x1d, 0x14, 0xb5, 0x36, 0xf1, 0x26, 0x77, 0x5a, + 0x22, 0x1a, 0xfe, 0xb5, 0x3a, 0xc9, 0xf5, 0xb2, 0x04, 0xc6, 0xe9, 0x84, 0xf0, 0x35, 0x62, 0x5f, + 0x77, 0x8c, 0x3c, 0xac, 0x49, 0x10, 0xc8, 0x1c, 0x6f, 0xbb, 0x5b, 0x35, 0x94, 0x74, 0x80, 0x1c, + 0x3d, 0x41, 0x29, 0x95, 0xcd, 0x29, 0x82, 0x0f, 0x87, 0x2c, 0xcf, 0x53, 0xdd, 0x37, 0x51, 0xd6, + 0xaf, 0xb8, 0x67, 0x10, 0x95, 0x5c, 0x8a, 0x43, 0xf6, 0xb0, 0x0c, 0xb7, 0xb7, 0xca, 0x47, 0xcc, + 0xc7, 0xbd, 0xb2, 0xf9, 0x97, 0xe1, 0x28, 0x86, 0x82, 0xa1, 0xd6, 0x1b, 0x6d, 0x0b, 0x74, 0x30, + 0x5d, 0xdd, 0xdb, 0x82, 0x62, 0x14, 0x1d, 0x96, 0xf8, 0x52, 0x5d, 0xbf, 0x59, 0x15, 0x8e, 0x4e, + 0x1c, 0xad, 0x40, 0x89, 0xf9, 0x4b, 0x2b, 0x7c, 0xec, 0x43, 0x9b, 0x3b, 0xcd, 0x1f, 0xb1, 0xf3, + 0x77, 0x7d, 0x92, 0x3f, 0xd4, 0xb6, 0xd7, 0x06, 0x43, 0x73, 0xd4, 0x11, 0x92, 0x29, 0x98, 0xa6, + 0xab, 0xcb, 0x9c, 0xae, 0xbb, 0x7f, 0x56, 0x0c, 0xd1, 0x74, 0x10, 0x4c, 0x10, 0x84, 0x89, 0x62, + 0x2e, 0xbd, 0xce, 0x5c, 0x82, 0xb3, 0x30, 0x4f, 0xe2, 0x08, 0xbc, 0xa2, 0xcd, 0x68, 0x26, 0xa3, + 0x52, 0xa5, 0xcb, 0xe1, 0x51, 0x40, 0xdb, 0x95, 0xd0, 0x3b, 0xbc, 0xe9, 0x3f, 0x79, 0xa5, 0x39, + 0x5f, 0xcb, 0x11, 0x29, 0x9c, 0xe0, 0xff, 0xfc, 0xcc, 0x3d, 0xfc, 0x54, 0xcf, 0x8c, 0x30, 0x18, + 0xa4, 0xd0, 0xf1, 0x46, 0x4b, 0x75, 0x02, 0xc9, 0x9a, 0x98, 0x60, 0xcf, 0x25, 0x8b, 0x7a, 0x09, + 0x89, 0x57, 0x7b, 0x0c, 0x7f, 0xfb, 0x45, 0x90, 0x17, 0x23, 0x40, 0xba, 0xfa, 0x4d, 0xcf, 0x57, + 0xe0, 0xcb, 0x9f, 0x80, 0x9d, 0xb1, 0x61, 0xfe, 0x0e, 0xb0, 0x53, 0xe3, 0x2c, 0x37, 0x90, 0xc5, + 0x67, 0xc8, 0xf1, 0xcd, 0x1b, 0xce, 0xec, 0x7e, 0xf4, 0x29, 0x6f, 0x46, 0x63, 0xa2, 0xf7, 0x73, + 0xbe, 0x2a, 0x54, 0xac, 0x4f, 0xde, 0x59, 0xba, 0xb2, 0x64, 0xa4, 0xea, 0x18, 0xe2, 0xf7, 0xce, + 0x59, 0xa8, 0x69, 0xd7, 0x8f, 0x75, 0x3a, 0xfd, 0x91, 0x95, 0xc0, 0x50, 0x0d, 0xc6, 0xa9, 0xa3, + 0x96, 0x96, 0x85, 0xd8, 0x53, 0x17, 0xdd, 0x7b, 0x93, 0x68, 0xfc, 0x90, 0x0a, 0xde, 0x51, 0x23, + 0x47, 0x19, 0xd1, 0xd3, 0x85, 0x87, 0xbb, 0x0d, 0x7d, 0x30, 0x76, 0x12, 0x26, 0x24, 0x31, 0xdf, + 0xf6, 0x47, 0xa6, 0xd7, 0xf7, 0xdb, 0xe4, 0x9f, 0x14, 0x86, 0x05, 0x3f, 0xc7, 0x66, 0x79, 0x5c, + 0x23, 0xd3, 0x13, 0x65, 0x80, 0xf2, 0xfa, 0x34, 0x4a, 0x10, 0xfc, 0x44, 0x7b, 0xdf, 0xe7, 0xfa, + 0xab, 0x38, 0x54, 0x6f, 0xbe, 0xab, 0x9e, 0x70, 0x0a, 0x20, 0xd3, 0xee, 0xc9, 0x6f, 0x2b, 0x99, + 0x6d, 0x9c, 0x97, 0xc1, 0xcd, 0x34, 0x32, 0x3d, 0x97, 0xf8, 0xa6, 0x77, 0x58, 0x1a, 0x57, 0x4f, + 0x6d, 0x05, 0x18, 0x3b, 0x0d, 0x2f, 0x2d, 0x91, 0x23, 0xc6, 0x15, 0x99, 0x1c, 0x3d, 0x24, 0x9d, + 0xee, 0x1a, 0x74, 0x31, 0x66, 0xee, 0x9f, 0x9d, 0xfb, 0x74, 0x94, 0xce, 0x08, 0xf0, 0xe0, 0xdc, + 0x8a, 0x25, 0x66, 0xfb, 0x94, 0x58, 0x36, 0x2f, 0x7e, 0x53, 0xfc, 0xe8, 0x12, 0x28, 0x39, 0x65, + 0x41, 0x6a, 0x89, 0x7e, 0xdd, 0x45, 0x85, 0xd7, 0x1c, 0xb2, 0x22, 0xa1, 0xcc, 0x69, 0x8c, 0xe6, + 0x3c, 0xc6, 0x45, 0x69, 0xa8, 0xc7, 0xce, 0x76, 0xcf, 0x8e, 0xc3, 0xb7, 0x19, 0x49, 0x8b, 0x51, + 0xa9, 0x8c, 0x66, 0xf1, 0xc7, 0x59, 0x1a, 0xce, 0x10, 0x97, 0x76, 0x1a, 0x83, 0x4c, 0xfb, 0xe5, + 0x8d, 0x23, 0xfd, 0x2c, 0xa8, 0x45, 0x56, 0x58, 0xf5, 0x51, 0x3d, 0x4c, 0x23, 0x9f, 0xd5, 0xd2, + 0x43, 0x13, 0x23, 0xf0, 0xce, 0xd0, 0x39, 0xc6, 0xfa, 0x95, 0x00, 0xca, 0xc0, 0x5e, 0x18, 0xec, + 0x1a, 0x31, 0x45, 0x60, 0x8e, 0xbd, 0xdf, 0xcd, 0x24, 0xf8, 0xa4, 0x2a, 0xfd, 0x07, 0x77, 0xbd, + 0xbe, 0xdc, 0xfb, 0x21, 0x1d, 0x9c, 0x54, 0x61, 0xc5, 0x5b, 0x78, 0x62, 0x31, 0x54, 0x1d, 0x94, + 0x3e, 0x73, 0x62, 0xc4, 0xa0, 0x9b, 0xb0, 0xfa, 0x59, 0xaa, 0x5f, 0xbe, 0xb9, 0xd8, 0xa4, 0x95, + 0xeb, 0xfc, 0x90, 0xcc, 0x03, 0x76, 0x41, 0x5b, 0x66, 0xef, 0x6c, 0xc1, 0xe0, 0x37, 0x83, 0x4a, + 0x94, 0x97, 0x95, 0x0f, 0xbf, 0x8e, 0xa8, 0x90, 0xbe, 0x89, 0x0f, 0x3f, 0x27, 0xfe, 0xfb, 0xfd, + 0xfd, 0xae, 0xd2, 0x46, 0x7d, 0x74, 0x35, 0x9d, 0x02, 0x29, 0xa6, 0x0d, 0x46, 0xf7, 0xc7, 0x52, + 0x73, 0x94, 0xb6, 0x5b, 0xe2, 0x56, 0x82, 0x57, 0x99, 0x00, 0xdc, 0x4d, 0xde, 0xc1, 0x14, 0x30, + 0x20, 0x89, 0x91, 0x76, 0xca, 0x5a, 0xb8, 0x9c, 0x9e, 0xa8, 0x81, 0x8a, 0xfb, 0x30, 0x24, 0x01, + 0x59, 0xe0, 0x9c, 0xad, 0x21, 0xef, 0xda, 0x2a, 0x0f, 0x8c, 0x15, 0x91, 0x7a, 0x60, 0x59, 0xc4, + 0x31, 0x6c, 0xae, 0x3e, 0xa8, 0x90, 0x9c, 0x70, 0x21, 0x36, 0xb9, 0xff, 0x79, 0xa1, 0xdc, 0xf2, + 0xdb, 0x60, 0xce, 0x45, 0x4d, 0x6a, 0x9b, 0x66, 0x76, 0x93, 0xd8, 0x5e, 0x59, 0x24, 0x6b, 0x34, + 0x87, 0x95, 0xe2, 0xc3, 0xa9, 0x75, 0xf8, 0xf0, 0x28, 0x67, 0xb9, 0x30, 0x26, 0x4c, 0xd6, 0xa2, + 0x80, 0x4c, 0xb0, 0xa6, 0xfc, 0x07, 0xbb, 0x06, 0xe5, 0x59, 0xf8, 0x0f, 0x03, 0x6a, 0x4f, 0x2d, + 0x1b, 0xce, 0xac, 0xee, 0xbe, 0xef, 0x24, 0xdc, 0xce, 0xae, 0xad, 0xcb, 0x0d, 0x9d, 0x4b, 0x27, + 0x28, 0xcb, 0xbc, 0x9b, 0xb9, 0x2d, 0x12, 0xe8, 0x0d, 0xfa, 0xd9, 0xf8, 0x39, 0x80, 0x7e, 0x90, + 0x37, 0xdd, 0x11, 0x24, 0xff, 0xdc, 0xf9, 0x73, 0xd5, 0xd4, 0x95, 0x65, 0xe9, 0x95, 0x5a, 0xd7, + 0x97, 0x97, 0x07, 0xe3, 0x72, 0x72, 0x2b, 0xfd, 0xaf, 0xa5, 0xb9, 0x2d, 0x85, 0x5c, 0xb6, 0xf3, + 0x40, 0xa2, 0x41, 0x0e, 0x79, 0x8c, 0xaf, 0x06, 0xfb, 0xa0, 0x54, 0x95, 0x25, 0x16, 0x5a, 0xfc, + 0x29, 0xec, 0x4c, 0xe1, 0x98, 0x5c, 0xd2, 0x88, 0x54, 0xdc, 0x60, 0x4b, 0xdf, 0x06, 0xf3, 0xd0, + 0x88, 0xcb, 0xc1, 0x0c, 0xaa, 0xcd, 0x05, 0x0d, 0xab, 0xd5, 0x60, 0xb4, 0x37, 0x83, 0xfa, 0x44, + 0xa0, 0xfe, 0x5b, 0x40, 0x21, 0x5f, 0x89, 0x81, 0x93, 0x07, 0x20, 0xa2, 0xb4, 0xb1, 0x50, 0x8d, + 0xbf, 0xe1, 0x57, 0xd5, 0x7e, 0xf9, 0x23, 0xed, 0x28, 0x45, 0x82, 0x1e, 0x66, 0xc3, 0x0d, 0x0c, + 0x79, 0x8d, 0x79, 0x6a, 0xfe, 0xee, 0x5b, 0x50, 0x65, 0xfb, 0x7c, 0x4e, 0x69, 0xbf, 0x9c, 0xe9, + 0x59, 0xe4, 0x61, 0x12, 0xd6, 0x70, 0xcc, 0xae, 0x4d, 0x98, 0xb7, 0x27, 0x96, 0x9b, 0xbd, 0x8a, + 0xee, 0xa9, 0x07, 0x65, 0xec, 0x0a, 0x26, 0xb6, 0xb7, 0x41, 0x70, 0x50, 0xfd, 0x3e, 0x56, 0xd2, + 0x54, 0xf9, 0x1c, 0x0e, 0x65, 0x5b, 0xf2, 0x76, 0x1d, 0x71, 0xf2, 0x0b, 0x95, 0xfa, 0x5b, 0x48, + 0x0a, 0x09, 0x09, 0xa5, 0x7c, 0x12, 0xc4, 0x0b, 0x73, 0x91, 0x29, 0xd9, 0xca, 0xbe, 0x0f, 0xd9, + 0x2e, 0xbc, 0x39, 0x07, 0x74, 0x55, 0xf4, 0xb2, 0x4d, 0x09, 0x3d, 0x96, 0xed, 0xd1, 0x43, 0x9f, + 0x8a, 0x4a, 0x8d, 0x86, 0x0a, 0xdd, 0x11, 0xa2, 0xf2, 0xd5, 0x66, 0xf9, 0xd1, 0x0b, 0xb0, 0x2d, + 0x8e, 0xc9, 0xff, 0x68, 0x8e, 0xb8, 0x5f, 0x4f, 0x74, 0xcb, 0xb4, 0x69, 0x46, 0x6b, 0x72, 0xa0, + 0xad, 0xdf, 0xbc, 0x34, 0x5e, 0x8b, 0xf7, 0x01, 0xf0, 0xfa, 0x35, 0x3b, 0x17, 0x3d, 0x2f, 0x59, + 0xc2, 0xe4, 0x75, 0x54, 0x67, 0x35, 0xd1, 0xde, 0xa9, 0xcc, 0x37, 0xa2, 0x06, 0x2c, 0xd4, 0x0a, + 0x1b, 0xa3, 0x71, 0x4c, 0x7f, 0x29, 0x7c, 0x4f, 0xee, 0xca, 0x66, 0x4c, 0x14, 0xfe, 0xc7, 0x09, + 0xbf, 0xa2, 0x6e, 0xcd, 0x75, 0xaf, 0xc7, 0x45, 0x27, 0xf0, 0xa6, 0x09, 0x1a, 0x2e, 0x50, 0x35, + 0x97, 0x6d, 0x1d, 0x92, 0xff, 0xc8, 0x75, 0xcb, 0x99, 0x87, 0x3d, 0x65, 0x61, 0xe1, 0x53, 0xdf, + 0x42, 0x36, 0x37, 0x7e, 0x2d, 0xd2, 0xa0, 0xac, 0x6f, 0xb4, 0x70, 0xc3, 0xfa, 0xae, 0xe1, 0x19, + 0x16, 0x45, 0xca, 0x3d, 0x92, 0x67, 0x5d, 0x72, 0x81, 0xd2, 0xc4, 0xc3, 0x2f, 0x2d, 0xb1, 0x81, + 0xb2, 0xaf, 0xf9, 0xee, 0x78, 0xf0, 0x2e, 0x56, 0xed, 0x91, 0x0c, 0x28, 0x5d, 0x1e, 0x71, 0x33, + 0x3c, 0x23, 0x8b, 0xae, 0xa8, 0x61, 0x7d, 0x3f, 0xe2, 0xee, 0xad, 0x30, 0x3d, 0xa0, 0x56, 0x8b, + 0xd8, 0x9e, 0xc2, 0xf8, 0xb9, 0x3a, 0xa4, 0x78, 0x6a, 0xb3, 0x47, 0xd9, 0x7b, 0x4f, 0x4a, 0x5e, + 0xca, 0xe6, 0xa0, 0xbe, 0xd1, 0xa9, 0xd2, 0x9f, 0x54, 0x49, 0x7e, 0x63, 0x80, 0x22, 0x0e, 0x59, + 0xd0, 0xd2, 0xbf, 0xd8, 0x77, 0x8d, 0x8e, 0x19, 0x52, 0xa2, 0x0e, 0xa3, 0xe0, 0x97, 0x87, 0xd8, + 0x85, 0x0c, 0xa7, 0xe2, 0x75, 0x1f, 0x60, 0xd8, 0x34, 0x2c, 0xe7, 0xce, 0xce, 0x4c, 0xf5, 0x93, + 0xe7, 0xfe, 0x46, 0x34, 0x82, 0x52, 0x68, 0x15, 0x63, 0xc6, 0x21, 0xc6, 0x7a, 0xd6, 0x2b, 0xe2, + 0x79, 0x5e, 0x9c, 0xe6, 0x15, 0x56, 0x97, 0x0a, 0x0c, 0x5a, 0x4a, 0x15, 0x61, 0x44, 0x84, 0xaa, + 0xa8, 0xcf, 0xf1, 0x77, 0xe2, 0xcf, 0xd4, 0xf0, 0xeb, 0xdd, 0xb8, 0x61, 0x77, 0xe8, 0x7a, 0x3d, + 0x55, 0xf6, 0x87, 0x5a, 0x02, 0x07, 0x40, 0xce, 0x7b, 0x84, 0xaa, 0xff, 0xce, 0x56, 0x5e, 0xda, + 0x94, 0x51, 0x8a, 0x33, 0x56, 0x87, 0x32, 0x31, 0x0f, 0x1b, 0x18, 0xe0, 0xde, 0x60, 0xbe, 0x68, + 0xaa, 0x50, 0xe5, 0x41, 0x19, 0x66, 0x29, 0xce, 0x54, 0x4d, 0x09, 0xfd, 0x12, 0x9c, 0x23, 0x1c, + 0xbb, 0xc4, 0x1c, 0xb4, 0x5c, 0xfd, 0xcd, 0x94, 0xab, 0xc7, 0xba, 0x49, 0x82, 0xcc, 0xe6, 0xbb, + 0xa2, 0x94, 0x6a, 0x1c, 0x9b, 0xbb, 0x5c, 0x2d, 0xf4, 0xcd, 0x4b, 0x28, 0x9d, 0xe3, 0xf1, 0x66, + 0x5a, 0xe1, 0x3c, 0x8d, 0x6f, 0xfd, 0xf4, 0x56, 0x72, 0xfc, 0xe2, 0x49, 0x7b, 0xdc, 0x5a, 0xfc, + 0x97, 0x3d, 0x52, 0x43, 0xd1, 0xd7, 0xfe, 0xc7, 0x18, 0xdc, 0x58, 0x24, 0x96, 0x5a, 0x16, 0x9e, + 0xa2, 0x3e, 0xad, 0x21, 0x32, 0xbe, 0x48, 0xd1, 0xc3, 0xf6, 0x71, 0x6f, 0x2a, 0x86, 0x0d, 0xca, + 0x68, 0xcb, 0x58, 0xad, 0x48, 0x54, 0x63, 0x78, 0xf4, 0x86, 0x87, 0xc3, 0x2c, 0x7a, 0x22, 0xc1, + 0x80, 0x43, 0x62, 0xd2, 0xdc, 0x2d, 0x6b, 0x3e, 0xc5, 0x4a, 0x59, 0xdc, 0x18, 0x01, 0x71, 0x83, + 0x7b, 0x7d, 0x27, 0x29, 0xe6, 0x57, 0x93, 0x26, 0xb7, 0xb6, 0xf4, 0x5e, 0x46, 0x65, 0x17, 0x3e, + 0xfc, 0x3b, 0xa1, 0x55, 0xe8, 0xa2, 0xf9, 0xdb, 0x4f, 0xbb, 0x64, 0x3b, 0x40, 0xf6, 0xba, 0x49, + 0xf8, 0xbb, 0x52, 0x5c, 0x72, 0x29, 0x93, 0x9f, 0xc0, 0xe4, 0x6e, 0xb7, 0xe1, 0xf1, 0x28, 0xe3, + 0x88, 0x3d, 0x00, 0xcd, 0xd1, 0xc4, 0x9d, 0xeb, 0x63, 0xb2, 0xaa, 0x80, 0x2c, 0x36, 0xb9, 0x2d, + 0xc9, 0x03, 0x88, 0x01, 0x92, 0x42, 0x5c, 0x74, 0xef, 0x3d, 0x1f, 0xb9, 0x70, 0x15, 0x91, 0xf0, + 0x41, 0x5b, 0x8e, 0x12, 0x69, 0x59, 0x29, 0xe2, 0x96, 0x7a, 0x20, 0xe0, 0xf0, 0xeb, 0xaf, 0xd8, + 0xce, 0xe4, 0x69, 0x4c, 0x26, 0xa8, 0x0d, 0xb5, 0xe1, 0x95, 0x0d, 0xf9, 0x60, 0x47, 0x0a, 0x91, + 0xe8, 0x63, 0x0b, 0x44, 0xe5, 0xa9, 0x45, 0x6b, 0x81, 0x80, 0x50, 0x94, 0x82, 0x6e, 0x19, 0x78, + 0xe9, 0x8d, 0x62, 0x14, 0xbe, 0x99, 0xad, 0xa3, 0xf0, 0x41, 0x69, 0xfc, 0xf1, 0x35, 0x8b, 0x2f, + 0x0f, 0x9a, 0x32, 0x2e, 0x70, 0xfa, 0xbd, 0xcf, 0x87, 0x97, 0xa4, 0xb9, 0xd8, 0xbd, 0x0f, 0x71, + 0x6a, 0xbf, 0xdc, 0xa8, 0x94, 0x96, 0xf8, 0x66, 0x48, 0x75, 0xa3, 0x77, 0x97, 0xf0, 0xe0, 0x3e, + 0xf4, 0xa0, 0xb6, 0x41, 0xa0, 0x5b, 0x50, 0x8b, 0x0b, 0x91, 0xf6, 0x69, 0xbc, 0xcc, 0xca, 0x4b, + 0x77, 0xa9, 0xf3, 0x2a, 0xa4, 0x41, 0xb3, 0x9f, 0x49, 0x8c, 0xc0, 0xf6, 0xf0, 0xa7, 0xec, 0xad, + 0x65, 0xfc, 0xbd, 0xfd, 0xd4, 0x22, 0x15, 0xd6, 0x99, 0x17, 0xaa, 0x33, 0x84, 0x61, 0x1e, 0x39, + 0x49, 0x8e, 0x8b, 0xec, 0xa2, 0x5c, 0x81, 0xe9, 0xe4, 0x97, 0x6c, 0x94, 0x7d, 0xab, 0x3c, 0x41, + 0x05, 0x0b, 0x8b, 0x7a, 0x4f, 0xc5, 0x15, 0x79, 0x50, 0xf9, 0x63, 0x36, 0xd9, 0xce, 0x45, 0xcb, + 0xb7, 0xc6, 0xab, 0x7a, 0xf0, 0x23, 0xaf, 0xae, 0xce, 0xe9, 0x06, 0xb9, 0x3f, 0x16, 0x5f, 0x8a, + 0x2b, 0x0a, 0xb6, 0x8d, 0x8b, 0xcb, 0x9b, 0x91, 0x64, 0x84, 0x94, 0xdd, 0x66, 0xb2, 0xd2, 0x61, + 0x75, 0x31, 0x00, 0xea, 0x3a, 0x87, 0x0e, 0x63, 0xd3, 0x92, 0xc1, 0x9c, 0xf2, 0x73, 0x60, 0x69, + 0xfb, 0x0b, 0x25, 0x23, 0xff, 0x80, 0x6b, 0xec, 0xa0, 0x27, 0x9f, 0x18, 0x4a, 0x08, 0xda, 0xcf, + 0x4d, 0xdc, 0x19, 0xc9, 0x81, 0x81, 0x7a, 0xbe, 0x5b, 0x3a, 0x34, 0x29, 0x33, 0xe0, 0x38, 0x1d, + 0x52, 0xa1, 0x45, 0x64, 0x83, 0xe8, 0xd0, 0xf4, 0x0c, 0x4b, 0xae, 0x19, 0x81, 0xb2, 0xa2, 0x13, + 0xa5, 0xd2, 0x74, 0xef, 0x92, 0x85, 0x31, 0x9a, 0xd9, 0x86, 0xcc, 0xb3, 0xaa, 0x85, 0x55, 0x9a, + 0x5f, 0x72, 0x50, 0xe1, 0xf1, 0x58, 0xe0, 0x84, 0x2b, 0x37, 0x03, 0xb1, 0xce, 0x38, 0x3d, 0xef, + 0xa3, 0x6d, 0x56, 0xd2, 0xca, 0xcb, 0x3f, 0xf3, 0x71, 0xc4, 0xec, 0xdf, 0x39, 0xdf, 0x6c, 0xe5, + 0x12, 0x22, 0xb5, 0x9b, 0x60, 0x2c, 0xd7, 0x8a, 0x89, 0xd1, 0xac, 0x1f, 0xfa, 0xe6, 0x31, 0xdb, + 0xcb, 0x32, 0x27, 0xb5, 0x0b, 0x7d, 0x06, 0x8f, 0xcb, 0x81, 0x90, 0xc8, 0x63, 0x5a, 0x70, 0x53, + 0x2b, 0x96, 0x88, 0xbd, 0x7f, 0xbb, 0xf7, 0x30, 0xf2, 0xea, 0xbf, 0x9b, 0x3e, 0x3d, 0x52, 0x51, + 0x1c, 0x38, 0xf0, 0x39, 0x93, 0x1b, 0xa5, 0x7c, 0x62, 0xe3, 0xdc, 0xcb, 0xed, 0x40, 0x8f, 0xe7, + 0x74, 0xc5, 0x2c, 0xed, 0xe2, 0x1e, 0x56, 0xd5, 0x4d, 0xc3, 0xab, 0x62, 0x79, 0x27, 0x32, 0x5f, + 0x1a, 0x3e, 0x63, 0x78, 0xef, 0x58, 0xdf, 0x79, 0x4b, 0x09, 0xd5, 0xc2, 0x84, 0x3b, 0x4a, 0x50, + 0x3e, 0x2d, 0x10, 0xa8, 0x7d, 0x48, 0x76, 0xdb, 0x77, 0xea, 0xc6, 0x6b, 0x65, 0xea, 0x11, 0x1f, + 0x7b, 0xbf, 0xcb, 0x40, 0x4c, 0xa8, 0x2d, 0x95, 0x01, 0xa4, 0x17, 0x29, 0xc9, 0xd1, 0x88, 0xa1, + 0xa2, 0x4e, 0x4f, 0xc1, 0x7c, 0x5b, 0x16, 0x03, 0x5b, 0x37, 0x3b, 0x44, 0xac, 0x89, 0x77, 0xc2, + 0x63, 0x79, 0x36, 0x53, 0xfe, 0x44, 0x27, 0xf1, 0xba, 0xe5, 0x9a, 0x7d, 0x2f, 0xb1, 0xb4, 0xcb, + 0x9c, 0xc8, 0x52, 0xdb, 0x4d, 0xe9, 0x43, 0x4a, 0xb7, 0x52, 0x6d, 0x80, 0xce, 0x91, 0x24, 0x7f, + 0x34, 0x60, 0x8e, 0xa3, 0xbe, 0x66, 0xfa, 0x13, 0x16, 0x43, 0x13, 0xc5, 0xb5, 0xbf, 0xe9, 0x79, + 0x39, 0xfd, 0x35, 0x6e, 0xc8, 0x15, 0x63, 0x2e, 0x72, 0xea, 0xcf, 0xab, 0x63, 0x5b, 0xc7, 0xa8, + 0xba, 0x39, 0x1a, 0x16, 0x4d, 0x4d, 0xb3, 0x87, 0x6e, 0x7d, 0x19, 0xac, 0xd4, 0x46, 0x75, 0xf3, + 0x74, 0x09, 0xcc, 0xdc, 0x76, 0x86, 0xcb, 0x33, 0xfd, 0x60, 0xf0, 0x13, 0x89, 0xe2, 0x2e, 0x9e, + 0x38, 0x3a, 0xe5, 0x78, 0x67, 0x05, 0xc9, 0x99, 0xa5, 0x46, 0x4a, 0x23, 0x47, 0x43, 0x43, 0x8a, + 0x49, 0xa4, 0x51, 0xbf, 0xa0, 0x68, 0x93, 0xa8, 0xf4, 0x60, 0xaa, 0x8c, 0x98, 0x15, 0xfd, 0x15, + 0xc2, 0x73, 0x03, 0xb2, 0x52, 0x32, 0xe9, 0x63, 0x59, 0x91, 0x07, 0xd8, 0xa0, 0x1c, 0xc7, 0xf8, + 0x5b, 0x48, 0x8a, 0xa1, 0xf5, 0xdb, 0xc4, 0xec, 0x1a, 0xfa, 0xdb, 0x31, 0xc9, 0x4f, 0x79, 0x16, + 0xda, 0xe7, 0x69, 0x8c, 0x71, 0xa3, 0x3e, 0xa7, 0x67, 0x35, 0x52, 0xdb, 0xd6, 0x7c, 0x33, 0x25, + 0x27, 0x47, 0x43, 0x24, 0x58, 0x4a, 0xb2, 0x94, 0x95, 0xf9, 0x3c, 0x26, 0xa4, 0x83, 0x1a, 0x85, + 0xf4, 0xbd, 0xe8, 0xad, 0x8d, 0x4b, 0xdb, 0xe8, 0x85, 0xf0, 0x78, 0xdd, 0x68, 0x88, 0x6d, 0xa2, + 0x82, 0x32, 0x78, 0x07, 0xd2, 0xa9, 0x5d, 0x63, 0x57, 0xd6, 0x95, 0x9b, 0x06, 0x7a, 0x38, 0x70, + 0x56, 0x50, 0xf8, 0xd6, 0xfc, 0xca, 0xf0, 0xb4, 0x91, 0x63, 0xea, 0x3b, 0x6a, 0x1a, 0xe3, 0xeb, + 0x79, 0x27, 0x71, 0xbd, 0x05, 0xda, 0xbb, 0xdd, 0x20, 0x47, 0xd1, 0x9d, 0xb0, 0x5e, 0x94, 0xff, + 0xae, 0xf6, 0x93, 0x8e, 0xfd, 0x20, 0x86, 0x40, 0x49, 0xf9, 0x0f, 0x04, 0x8c, 0x9e, 0x73, 0x73, + 0xcc, 0x3c, 0xee, 0xd0, 0xc4, 0xc0, 0x5d, 0xe5, 0xae, 0xf2, 0xef, 0xd7, 0x74, 0x4d, 0xf3, 0x58, + 0x86, 0xb7, 0xdc, 0x63, 0x7e, 0xd9, 0xb5, 0x32, 0xd2, 0x9b, 0xa2, 0xfe, 0x87, 0x05, 0xe8, 0x88, + 0xf5, 0x1a, 0x6a, 0xd4, 0x55, 0x93, 0x73, 0xa3, 0x98, 0xec, 0x0b, 0xf3, 0xdf, 0xac, 0x8a, 0xeb, + 0x0a, 0xb2, 0x9d, 0x69, 0xb2, 0x88, 0x83, 0xe8, 0xb6, 0xe5, 0x53, 0x83, 0x9e, 0x58, 0x02, 0xef, + 0x1e, 0x66, 0x3e, 0x9f, 0xa5, 0x6f, 0x7d, 0xf1, 0x0f, 0x6a, 0x86, 0x6e, 0xf2, 0x19, 0xe4, 0xa3, + 0x95, 0x28, 0x46, 0x7e, 0x33, 0xe3, 0x28, 0x46, 0x78, 0x42, 0x68, 0xdf, 0x00, 0x31, 0x3e, 0x11, + 0xe1, 0x16, 0x16, 0xf5, 0x69, 0xc7, 0x37, 0xe2, 0xdf, 0xcf, 0xe3, 0x08, 0x09, 0x22, 0xd0, 0x36, + 0x25, 0x47, 0xc9, 0x99, 0xd6, 0x13, 0x42, 0x3b, 0x28, 0xac, 0x27, 0x9d, 0xcb, 0x0b, 0xd4, 0xc5, + 0x26, 0x7e, 0x51, 0x7d, 0x90, 0x8d, 0xe6, 0x87, 0x4e, 0xb1, 0xa0, 0xc7, 0x21, 0xea, 0xdf, 0x72, + 0x46, 0x51, 0x37, 0x39, 0x35, 0xdd, 0x85, 0x28, 0x25, 0x31, 0xa6, 0x27, 0x24, 0x59, 0x41, 0xc9, + 0x65, 0x93, 0x84, 0xff, 0xe5, 0xb0, 0x72, 0x6a, 0x74, 0xed, 0x14, 0x4a, 0xc4, 0x94, 0x01, 0x34, + 0xf0, 0xb5, 0xb5, 0x5d, 0x48, 0xbc, 0xd7, 0x68, 0xec, 0xab, 0x54, 0x53, 0xd9, 0x60, 0xd3, 0xf1, + 0x71, 0xb0, 0xe1, 0x8a, 0x21, 0xf2, 0xb4, 0x14, 0x3b, 0x72, 0x48, 0xef, 0x7f, 0x7c, 0x86, 0x7c, + 0x15, 0xb5, 0x36, 0x93, 0xf3, 0x90, 0x99, 0x73, 0xa0, 0x29, 0x21, 0x7f, 0xa1, 0xdf, 0xb0, 0xa9, + 0x75, 0x30, 0x36, 0x57, 0x95, 0x7a, 0x08, 0xd0, 0x27, 0x10, 0x1c, 0xb4, 0xdb, 0x26, 0x7c, 0x14, + 0xc2, 0x79, 0xd1, 0x77, 0x85, 0x40, 0xcf, 0xb2, 0x17, 0xdf, 0x9d, 0x16, 0x6b, 0x78, 0xc9, 0xde, + 0x2c, 0xe0, 0x9b, 0x44, 0x52, 0xd1, 0xd6, 0x52, 0x1e, 0x68, 0xa3, 0x7e, 0x75, 0xae, 0x54, 0xd7, + 0xb6, 0x4d, 0xa0, 0xb0, 0xbe, 0xc4, 0xdc, 0x28, 0xd0, 0x87, 0xef, 0xf2, 0xd7, 0x4a, 0x13, 0x8e, + 0xd6, 0x60, 0x60, 0x1a, 0x8f, 0x26, 0x28, 0xef, 0x28, 0xe1, 0x94, 0x44, 0x3a, 0x6d, 0x10, 0x83, + 0x11, 0xff, 0xd9, 0x63, 0x1b, 0x9a, 0x59, 0x0b, 0x1c, 0xf8, 0x26, 0x49, 0x14, 0x8e, 0x9d, 0x2b, + 0x7b, 0x32, 0x2d, 0x77, 0xf1, 0x85, 0xf3, 0x77, 0xfc, 0x33, 0x77, 0xf0, 0x58, 0xf4, 0xf1, 0x2c, + 0xfb, 0x1e, 0x95, 0x5d, 0x50, 0xac, 0xd6, 0x1e, 0x97, 0x6b, 0xe3, 0x23, 0xb2, 0x97, 0x26, 0xf5, + 0x04, 0x81, 0xb3, 0x42, 0xda, 0xe3, 0xcf, 0xe6, 0x37, 0x30, 0x0c, 0xec, 0xf8, 0xd9, 0x42, 0xaf, + 0x35, 0xd9, 0x0f, 0x80, 0x5f, 0x4b, 0x2b, 0x19, 0xa9, 0x30, 0xe8, 0x07, 0x9b, 0x1c, 0x13, 0x48, + 0x82, 0xf2, 0xc6, 0x9d, 0x28, 0x98, 0x60, 0xd9, 0xcd, 0xa2, 0xbd, 0x86, 0x99, 0x9b, 0x54, 0x4d, + 0xc8, 0x98, 0x15, 0xbb, 0x39, 0xa1, 0xdc, 0xfe, 0xb2, 0x39, 0x37, 0x9b, 0x2f, 0x69, 0x79, 0xf6, + 0x7d, 0xd4, 0x21, 0x16, 0xb9, 0xba, 0x73, 0x40, 0xb8, 0x46, 0x08, 0xb0, 0xfd, 0x22, 0xee, 0xc2, + 0x73, 0x98, 0x7d, 0x29, 0x14, 0x7d, 0x23, 0xc1, 0x0c, 0x43, 0x10, 0x33, 0x29, 0xdd, 0xfc, 0x07, + 0xd8, 0xc1, 0x0c, 0xae, 0x42, 0x16, 0xe3, 0x08, 0x40, 0xd8, 0x47, 0x8e, 0x2e, 0x81, 0x78, 0x20, + 0xcb, 0x3e, 0x1f, 0x6e, 0x9e, 0x83, 0xff, 0xda, 0x86, 0x1a, 0x94, 0xde, 0x5b, 0x91, 0x8f, 0xe9, + 0x4f, 0x2f, 0x01, 0x1f, 0x5c, 0x9e, 0x3b, 0x2d, 0x16, 0x53, 0xdb, 0xe3, 0xbb, 0x02, 0x35, 0x2e, + 0xf6, 0xec, 0x80, 0xa4, 0xc1, 0x96, 0xb0, 0xa5, 0x06, 0xff, 0x16, 0xbc, 0x57, 0xdc, 0xf4, 0x50, + 0x9e, 0x3a, 0x7d, 0xfd, 0x8e, 0xa9, 0xa7, 0xbd, 0x02, 0xa4, 0xd8, 0x57, 0xa1, 0x68, 0x40, 0x94, + 0xc5, 0xcd, 0x4b, 0x00, 0xd3, 0xe3, 0xce, 0x98, 0x17, 0x7c, 0xcf, 0x50, 0xa7, 0x57, 0xdd, 0x0a, + 0x75, 0xcb, 0xd7, 0x6c, 0xab, 0xfc, 0xf9, 0xfb, 0x47, 0xd4, 0xfc, 0x44, 0x0e, 0xf5, 0x26, 0x12, + 0x56, 0x6f, 0xee, 0xb6, 0xce, 0x5d, 0x37, 0x4e, 0xb8, 0x27, 0xf1, 0x53, 0xbd, 0xbc, 0xf5, 0xd3, + 0x0e, 0x48, 0xdf, 0x97, 0xd1, 0xaa, 0xcb, 0xf5, 0x07, 0xff, 0xd3, 0x3c, 0xd8, 0x98, 0xa8, 0xeb, + 0x6d, 0xc4, 0xe4, 0xab, 0x71, 0xdc, 0x2a, 0x2b, 0xfa, 0x6b, 0xdf, 0x5e, 0x77, 0x09, 0x46, 0xe3, + 0x3d, 0x9f, 0x64, 0xb4, 0x2f, 0x27, 0xb6, 0x23, 0xa2, 0xc9, 0x69, 0x56, 0x65, 0xa4, 0xbc, 0xa6, + 0xc4, 0xb9, 0xb8, 0x43, 0xbd, 0x27, 0x93, 0x3c, 0x87, 0x13, 0x69, 0x0a, 0x47, 0x80, 0x19, 0xfb, + 0x56, 0x6c, 0x25, 0x8e, 0x58, 0xc7, 0x31, 0x48, 0x3f, 0xe1, 0xcf, 0x5e, 0x28, 0x86, 0xeb, 0x0f, + 0x05, 0x82, 0x78, 0x5a, 0x7c, 0x4d, 0x28, 0x17, 0xb6, 0x52, 0x16, 0xba, 0x98, 0x06, 0xb4, 0xf8, + 0xe3, 0x12, 0x21, 0xc3, 0x9e, 0x97, 0xee, 0x5c, 0xea, 0xc4, 0xd9, 0x4d, 0x41, 0x36, 0x62, 0x30, + 0x76, 0xbf, 0x94, 0xc8, 0x1a, 0x22, 0x17, 0x2a, 0xb0, 0x63, 0xd3, 0x42, 0x4b, 0x0b, 0x82, 0xe3, + 0x99, 0xaa, 0xfb, 0xb7, 0xd5, 0xea, 0xdd, 0xae, 0x2d, 0xd7, 0xa9, 0x7b, 0xb8, 0xc0, 0xec, 0x8c, + 0x1f, 0x9e, 0xf0, 0x42, 0xcd, 0x14, 0x95, 0xaf, 0xc7, 0x56, 0xe2, 0x6a, 0xb9, 0x29, 0xf2, 0x12, + 0x83, 0xa7, 0x8e, 0xc4, 0xd5, 0xc3, 0xb4, 0x36, 0xdc, 0x68, 0x55, 0x17, 0x9e, 0x6c, 0x2e, 0x94, + 0xf3, 0xae, 0xa4, 0x2b, 0x9e, 0x17, 0x75, 0xce, 0xc9, 0x41, 0x34, 0xe0, 0xe4, 0x45, 0xc8, 0x28, + 0x2c, 0xc1, 0xd1, 0x18, 0xf9, 0xee, 0xc5, 0x5b, 0x82, 0x79, 0xc7, 0x67, 0xfa, 0x8f, 0x11, 0xbf, + 0x72, 0x37, 0x9e, 0xf4, 0x68, 0xca, 0x9d, 0x1e, 0x4a, 0xf0, 0xe2, 0x76, 0x05, 0x32, 0xd3, 0xbe, + 0xcf, 0x52, 0xe0, 0x32, 0xea, 0xbb, 0xd5, 0x77, 0x3d, 0x87, 0xdf, 0x6f, 0xaa, 0x56, 0x82, 0xf6, + 0xea, 0x4d, 0x74, 0x5f, 0x3b, 0x35, 0x7e, 0xec, 0x41, 0x88, 0x5d, 0x7c, 0xa7, 0xab, 0x98, 0xe9, + 0x68, 0x91, 0x9b, 0x37, 0x9f, 0xd0, 0xe5, 0x62, 0x6f, 0xb4, 0xa7, 0x63, 0xea, 0xab, 0xb8, 0x69, + 0x07, 0x60, 0x8e, 0xca, 0x33, 0xeb, 0x55, 0xec, 0x70, 0x8b, 0x75, 0x97, 0x42, 0x2b, 0x18, 0x31, + 0x30, 0x29, 0x3e, 0x08, 0x4e, 0xc9, 0x56, 0x89, 0xc3, 0x0e, 0x65, 0xbf, 0x70, 0x8d, 0x88, 0xd6, + 0xc2, 0xbe, 0x46, 0x28, 0x73, 0x33, 0xd2, 0x2a, 0x5a, 0x05, 0x5f, 0x75, 0xf2, 0x1b, 0x40, 0x18, + 0x92, 0x63, 0xa1, 0x11, 0x66, 0xf5, 0xf4, 0x2e, 0x01, 0x48, 0xa0, 0x3f, 0x3f, 0xb1, 0x91, 0x31, + 0x9b, 0x08, 0x64, 0x5a, 0x92, 0xc1, 0x08, 0xc8, 0x89, 0x90, 0xd9, 0x75, 0xc7, 0x15, 0x7a, 0xd1, + 0x28, 0x93, 0xbc, 0x33, 0x1e, 0x0f, 0x09, 0xb3, 0x93, 0xc9, 0x8d, 0xe3, 0x57, 0xcb, 0xd4, 0x8f, + 0x22, 0x60, 0x69, 0x5c, 0xf2, 0xe9, 0xa5, 0xbb, 0xdc, 0xab, 0x8f, 0x0d, 0x3e, 0x86, 0x2d, 0x6e, + 0x9a, 0xb3, 0x0c, 0x09, 0x99, 0xa8, 0xd6, 0x93, 0x77, 0xa2, 0xe6, 0x69, 0x3f, 0xa5, 0x69, 0xaa, + 0xb9, 0xd3, 0xf7, 0x2a, 0xe2, 0x25, 0xc1, 0x15, 0x00, 0x69, 0xaa, 0x6c, 0x61, 0x85, 0x81, 0x63, + 0x3b, 0xbe, 0xa8, 0xb8, 0xcd, 0xcc, 0x2a, 0x2a, 0x06, 0x36, 0x51, 0x34, 0x53, 0xf0, 0x4c, 0x04, + 0xbf, 0xbb, 0xea, 0xdf, 0xf0, 0x4c, 0xa7, 0x04, 0x25, 0x09, 0x1c, 0x7d, 0x02, 0x7d, 0xf8, 0xec, + 0xfb, 0xc7, 0xb1, 0x3b, 0xf1, 0xd2, 0xc4, 0xa9, 0x09, 0x79, 0x7a, 0x20, 0x88, 0xe1, 0x8c, 0x86, + 0x82, 0xfc, 0xe1, 0xb7, 0xa8, 0x1d, 0xea, 0xce, 0xc2, 0x51, 0x12, 0x1d, 0x24, 0xbb, 0x53, 0xb1, + 0x68, 0xaf, 0x2c, 0x7d, 0x09, 0x76, 0x72, 0xab, 0xdf, 0x82, 0x52, 0xe1, 0x3c, 0xf6, 0x11, 0x5a, + 0xda, 0x7f, 0xdc, 0xdd, 0x14, 0x54, 0x7e, 0xcc, 0x2f, 0x08, 0x66, 0xdf, 0x84, 0x14, 0x71, 0x11, + 0x0a, 0x0b, 0x32, 0x5d, 0xa6, 0xaa, 0xa3, 0x07, 0x76, 0x6b, 0xe2, 0xcd, 0x40, 0x95, 0xc6, 0xe2, + 0xa9, 0xc4, 0xbf, 0x50, 0x44, 0x3b, 0x99, 0xeb, 0xd1, 0xd0, 0xa1, 0x31, 0x5b, 0x5f, 0xe0, 0x2f, + 0x85, 0x38, 0xbb, 0x83, 0xb6, 0xd0, 0xc6, 0xbb, 0x6a, 0x92, 0xe2, 0x3e, 0x6f, 0x8a, 0x27, 0xaa, + 0xb2, 0xbf, 0x57, 0x32, 0xc7, 0x65, 0x80, 0x62, 0xf3, 0x71, 0x66, 0x80, 0xfe, 0x1c, 0xfa, 0x82, + 0x67, 0x8a, 0xa3, 0x23, 0x71, 0x8c, 0x38, 0xf5, 0x8f, 0x27, 0x15, 0x99, 0x0e, 0x12, 0xe8, 0x8b, + 0xa9, 0xeb, 0x97, 0xb3, 0xb2, 0xc6, 0x28, 0x0d, 0x8f, 0xd4, 0xf7, 0x74, 0xbc, 0x71, 0xd3, 0xb8, + 0xb7, 0xea, 0x45, 0x78, 0xbb, 0xc5, 0x1e, 0x73, 0xe6, 0x22, 0xff, 0xbc, 0x7f, 0x17, 0xba, 0x7f, + 0xd7, 0x83, 0x42, 0x92, 0xca, 0x4f, 0xb0, 0x63, 0x64, 0xb2, 0xa2, 0xe5, 0xdf, 0x5b, 0x08, 0x7b, + 0x51, 0x6d, 0xc0, 0x7b, 0x4e, 0x49, 0xc6, 0x24, 0x61, 0x99, 0xc2, 0x5a, 0x65, 0xa2, 0x59, 0xfc, + 0xa2, 0x07, 0x9b, 0x63, 0x2d, 0xb0, 0x15, 0xc1, 0xd9, 0xe3, 0x41, 0x08, 0xf1, 0x39, 0xc3, 0x31, + 0xa8, 0x8f, 0xb1, 0x2d, 0x1a, 0x9a, 0x79, 0x7c, 0x36, 0x58, 0x7d, 0x3e, 0x68, 0xfa, 0xc1, 0xdd, + 0x21, 0x7a, 0xed, 0x8a, 0x28, 0x62, 0x7e, 0xc4, 0x56, 0xf0, 0xbf, 0x6c, 0x95, 0xb1, 0x60, 0x83, + 0x92, 0x80, 0x08, 0x7c, 0xf8, 0x51, 0xf1, 0x7c, 0x7a, 0xb7, 0x2c, 0xf4, 0x10, 0x3e, 0x25, 0x21, + 0x8d, 0x1d, 0x94, 0x2c, 0x1a, 0xf8, 0x18, 0x72, 0x2d, 0xe7, 0x1b, 0xfb, 0xb5, 0x7a, 0xfb, 0x3b, + 0xd4, 0x70, 0xa2, 0x8a, 0xcd, 0x94, 0xf6, 0x5b, 0x38, 0x35, 0x7a, 0x9e, 0x66, 0x8c, 0x10, 0xfe, + 0x13, 0xdc, 0x76, 0x0d, 0xe6, 0x9b, 0xdf, 0xe0, 0x3d, 0x8b, 0x5d, 0xe2, 0x7a, 0x55, 0xde, 0xef, + 0x29, 0xf3, 0x13, 0xcd, 0xc2, 0x0d, 0xc9, 0xee, 0x27, 0x6d, 0xea, 0x5a, 0x80, 0xc6, 0x1e, 0xd5, + 0x8c, 0xa1, 0x33, 0xc1, 0x25, 0xc2, 0x1e, 0xd6, 0x01, 0xe9, 0x4b, 0x72, 0xc6, 0x1f, 0x34, 0x1d, + 0x70, 0x91, 0xbb, 0x0a, 0x2f, 0x5a, 0xa7, 0x38, 0x56, 0xf1, 0xfe, 0x5d, 0x91, 0x2e, 0x82, 0xd6, + 0xf8, 0x69, 0xfd, 0xec, 0x4a, 0x36, 0x22, 0x8e, 0xc7, 0x36, 0xb1, 0x51, 0xd6, 0xd1, 0xa4, 0x43, + 0x0d, 0x5c, 0xd6, 0x04, 0xf5, 0x26, 0x77, 0x7f, 0x1e, 0x3d, 0xeb, 0xe9, 0x26, 0xc0, 0x50, 0x86, + 0x1a, 0x34, 0xd6, 0x61, 0xab, 0xf9, 0xe7, 0x1f, 0xde, 0xca, 0x48, 0x1e, 0x24, 0x77, 0x5c, 0xe3, + 0xf6, 0x6a, 0x1d, 0xd9, 0x28, 0xe2, 0xc0, 0x92, 0xde, 0x05, 0x15, 0x21, 0x46, 0xe4, 0x42, 0xf6, + 0x60, 0x39, 0x2b, 0x06, 0x44, 0xa6, 0x6e, 0x4a, 0xed, 0x4d, 0x4f, 0x05, 0x74, 0xa9, 0x18, 0x22, + 0x03, 0x75, 0xdd, 0x78, 0xaa, 0x6b, 0x73, 0x51, 0x18, 0xbc, 0x48, 0xdd, 0x39, 0x98, 0xd7, 0x4b, + 0x29, 0xa9, 0xc6, 0xf2, 0x42, 0xc1, 0xce, 0x34, 0xa9, 0x7a, 0xa9, 0x10, 0xe4, 0xe5, 0x1f, 0x21, + 0x59, 0xe1, 0x12, 0x8c, 0xab, 0x81, 0xef, 0x21, 0xfa, 0x8f, 0xfc, 0x0d, 0x89, 0xd0, 0xd5, 0xde, + 0x9d, 0xc3, 0x79, 0x42, 0x4d, 0xbc, 0x5e, 0x0e, 0x5b, 0x17, 0xdb, 0xd8, 0x7a, 0xd9, 0x59, 0x30, + 0x6a, 0xd6, 0xf6, 0xb6, 0x4e, 0xbf, 0x0e, 0x7a, 0x99, 0x2b, 0xcf, 0x57, 0x34, 0x82, 0x4b, 0x21, + 0x37, 0x4a, 0xec, 0x29, 0x90, 0xa1, 0x9a, 0x42, 0xaa, 0x19, 0x0f, 0xd1, 0x42, 0xf2, 0xef, 0x00, + 0x38, 0x75, 0x36, 0x92, 0x19, 0x97, 0xdb, 0x06, 0x96, 0xda, 0x7f, 0x44, 0x6f, 0x8a, 0x8b, 0x9c, + 0x68, 0xc4, 0x82, 0x10, 0xf1, 0xfc, 0x36, 0xcb, 0x9a, 0xfb, 0xaa, 0x15, 0xd3, 0xbb, 0x0e, 0x3a, + 0x42, 0xa7, 0x5e, 0x28, 0x1d, 0xd0, 0xd5, 0x1e, 0x3e, 0xd5, 0x6f, 0xe4, 0xb0, 0xca, 0xb0, 0x7d, + 0x13, 0x7a, 0xfb, 0x70, 0xa9, 0xdc, 0xc2, 0x5e, 0x11, 0x5a, 0x64, 0xb7, 0xd9, 0x96, 0xb5, 0x1b, + 0xa4, 0x35, 0x34, 0x4e, 0xe2, 0x1b, 0xcf, 0xa7, 0xec, 0xf6, 0x04, 0x71, 0x9c, 0x62, 0x65, 0x14, + 0x92, 0x1d, 0xe6, 0x6c, 0x04, 0x13, 0x22, 0x68, 0x72, 0x5a, 0x5c, 0x75, 0x24, 0x76, 0x1d, 0x99, + 0x34, 0xac, 0x57, 0x94, 0x3c, 0x3c, 0x56, 0x0a, 0xa1, 0xa9, 0xf9, 0x4f, 0x0e, 0x4f, 0x8f, 0x83, + 0xdb, 0xcc, 0x12, 0x8e, 0x2f, 0x85, 0x92, 0x74, 0x83, 0xcb, 0x44, 0xcc, 0x8d, 0x6f, 0x69, 0xa2, + 0x8c, 0x82, 0xee, 0xb4, 0x79, 0xf2, 0x26, 0x09, 0x12, 0xf9, 0x96, 0x92, 0xa3, 0x4e, 0x85, 0x78, + 0xbc, 0xa1, 0xb8, 0x35, 0x9b, 0x92, 0xa4, 0x11, 0xb5, 0x40, 0xd7, 0xc9, 0xf3, 0xcb, 0x71, 0xc7, + 0x37, 0x26, 0x8d, 0x85, 0xb9, 0x4c, 0x08, 0x93, 0x72, 0x69, 0x6a, 0x84, 0xbf, 0x82, 0xd2, 0xb2, + 0x71, 0xf3, 0x66, 0x29, 0x8a, 0xe3, 0x5d, 0x4f, 0x43, 0xf7, 0x10, 0x22, 0x3a, 0x1c, 0x4f, 0xb2, + 0xf4, 0x3b, 0xda, 0x62, 0xa3, 0x97, 0x05, 0xab, 0x0a, 0x67, 0xcb, 0x94, 0x33, 0x7c, 0xf2, 0x1b, + 0x70, 0x93, 0x49, 0x73, 0x96, 0x67, 0x60, 0xc1, 0x74, 0xda, 0x15, 0x76, 0x0c, 0xb6, 0xef, 0x13, + 0x82, 0x98, 0xe7, 0x90, 0xc8, 0x3f, 0xe3, 0xef, 0x51, 0xca, 0x75, 0x62, 0x4a, 0x16, 0x59, 0xe1, + 0x3b, 0xd2, 0xb7, 0x69, 0xa5, 0xe1, 0x81, 0xe9, 0x98, 0x0f, 0x65, 0x7c, 0x84, 0x30, 0x29, 0x11, + 0x8a, 0xc4, 0x97, 0x60, 0xe2, 0x80, 0x97, 0xba, 0x69, 0x69, 0xdc, 0xb1, 0xb5, 0xe4, 0x2a, 0x14, + 0xd6, 0x75, 0x26, 0xa3, 0xf7, 0xf3, 0x8c, 0x3a, 0xff, 0x2c, 0xa0, 0x8d, 0x16, 0x8b, 0xa7, 0xee, + 0xb3, 0x50, 0xe9, 0xa1, 0x24, 0x0b, 0x43, 0x26, 0x37, 0x87, 0xac, 0xb8, 0x1f, 0x47, 0xee, 0xc0, + 0x18, 0xa9, 0x69, 0x94, 0x73, 0x02, 0x9d, 0x4f, 0xb7, 0xd7, 0xcf, 0xab, 0xc8, 0x0a, 0x29, 0xec, + 0x6a, 0x2c, 0xe4, 0x61, 0x77, 0x6f, 0xb6, 0x00, 0x17, 0xf4, 0x3a, 0x10, 0xaf, 0x27, 0x6f, 0x5d, + 0xb4, 0x3f, 0xc8, 0x36, 0x9f, 0xdb, 0x92, 0x9b, 0xc4, 0xbc, 0xe5, 0x44, 0x80, 0x31, 0xbe, 0xe8, + 0x2d, 0x36, 0xec, 0x4d, 0x85, 0xc1, 0xb2, 0x08, 0x5b, 0x3f, 0xf3, 0x81, 0xc7, 0x58, 0x57, 0x3c, + 0x37, 0x5b, 0x9f, 0x2f, 0xa1, 0xc8, 0x81, 0x4b, 0x44, 0xf5, 0x2e, 0xd9, 0xce, 0x27, 0x7e, 0xe9, + 0xc3, 0xe1, 0x46, 0x9c, 0x99, 0x73, 0x45, 0xc8, 0x57, 0xbe, 0xe4, 0xeb, 0x11, 0x4e, 0x8d, 0x60, + 0xf6, 0xf1, 0x51, 0x0d, 0x22, 0x39, 0xec, 0x33, 0x04, 0x4b, 0xc7, 0xf9, 0x53, 0x7b, 0x16, 0x2a, + 0xd6, 0xf2, 0xb2, 0x96, 0x01, 0x11, 0x19, 0xbd, 0xc2, 0x0f, 0x8f, 0xfa, 0x82, 0x06, 0x39, 0x60, + 0xb3, 0x48, 0x41, 0x4b, 0xb0, 0x67, 0xd3, 0xc4, 0x15, 0xa4, 0x12, 0xb2, 0x44, 0x7d, 0x26, 0xc3, + 0x42, 0x59, 0xc4, 0x6b, 0xe1, 0xbb, 0x0a, 0xab, 0xd3, 0x80, 0x78, 0x97, 0x97, 0x99, 0x87, 0xfc, + 0x19, 0xd4, 0x40, 0xa7, 0xbe, 0x30, 0x18, 0x56, 0x94, 0x52, 0x5f, 0x1d, 0x95, 0xe3, 0xb8, 0x1b, + 0x41, 0xd5, 0xd0, 0xf7, 0x95, 0x56, 0x42, 0x81, 0xbd, 0xc9, 0x5f, 0x20, 0xb2, 0x6f, 0xa2, 0xe6, + 0x37, 0xcd, 0xbe, 0xff, 0x02, 0x99, 0x22, 0x5f, 0xb0, 0x9f, 0x6b, 0x35, 0x61, 0xd0, 0xaf, 0x10, + 0x3d, 0x34, 0xcc, 0x2e, 0x03, 0xdc, 0x82, 0x43, 0x1b, 0x9b, 0x86, 0xef, 0x03, 0x6c, 0x54, 0x9b, + 0x17, 0x33, 0x60, 0xd2, 0x41, 0xf5, 0xba, 0x40, 0x84, 0xcf, 0x6a, 0x4d, 0x96, 0x4e, 0x3a, 0x70, + 0xd9, 0x01, 0x65, 0xc4, 0xee, 0x35, 0x8b, 0x0b, 0x92, 0x1b, 0x96, 0x5f, 0x60, 0x7c, 0x2b, 0x68, + 0xf6, 0x27, 0x8b, 0xdd, 0xd6, 0x16, 0xf9, 0x8c, 0xfa, 0xc4, 0x47, 0xed, 0x09, 0xaa, 0x6d, 0x1f, + 0x5e, 0x82, 0x6c, 0x02, 0xd2, 0x1d, 0x2e, 0x77, 0xef, 0xad, 0xff, 0xbc, 0xdd, 0x92, 0x8b, 0xf5, + 0x19, 0xd0, 0x1a, 0xa1, 0x23, 0x6a, 0x14, 0xc9, 0x7c, 0x7a, 0x31, 0xb6, 0x52, 0x62, 0x22, 0xb8, + 0x48, 0x3b, 0xc0, 0x4f, 0x3e, 0x2e, 0xa4, 0x1f, 0xc9, 0x38, 0x6a, 0x7a, 0x88, 0x71, 0xb6, 0xda, + 0xf5, 0x58, 0x81, 0xdc, 0xdc, 0xcf, 0xed, 0xa0, 0xde, 0x76, 0x55, 0xeb, 0x5e, 0x67, 0xd0, 0x01, + 0xdf, 0x63, 0xc3, 0x97, 0xc7, 0x83, 0xd2, 0x1c, 0xd0, 0xe0, 0x12, 0x04, 0x58, 0xbd, 0x70, 0xcb, + 0xf2, 0x39, 0x7f, 0xd5, 0xc4, 0x53, 0x2b, 0x1c, 0x92, 0x23, 0x91, 0x8f, 0x5b, 0x98, 0x6a, 0xce, + 0x61, 0xd7, 0xea, 0x98, 0xea, 0x54, 0x1f, 0xab, 0xf5, 0xd5, 0x2b, 0xd5, 0x25, 0x61, 0x14, 0x59, + 0xb2, 0x7d, 0xa1, 0x38, 0x6e, 0xf2, 0xfd, 0x9d, 0x74, 0x55, 0xe5, 0x58, 0x8e, 0x2b, 0x48, 0xd5, + 0xd4, 0xbc, 0x29, 0xc6, 0x53, 0x9d, 0x8a, 0xb5, 0x7f, 0xd1, 0x5e, 0xb1, 0x74, 0x0c, 0x12, 0xa2, + 0x69, 0xd9, 0x30, 0xe7, 0xc5, 0x6c, 0xf3, 0xcd, 0xa5, 0xbf, 0x7d, 0xf4, 0x63, 0x0e, 0x64, 0x73, + 0x80, 0xab, 0x75, 0x12, 0x1e, 0x16, 0x60, 0x0d, 0xc2, 0x0f, 0x57, 0x37, 0x21, 0xd3, 0xa1, 0x83, + 0x3a, 0x5e, 0x9b, 0x12, 0x73, 0x59, 0x93, 0x04, 0x1f, 0x52, 0xaf, 0xe4, 0x8c, 0x80, 0x68, 0x88, + 0x36, 0xbb, 0x25, 0x78, 0x01, 0xac, 0x3e, 0xca, 0x03, 0xe8, 0xb8, 0x07, 0xfd, 0xb3, 0xab, 0x3f, + 0x03, 0x7b, 0xef, 0x24, 0x6d, 0xed, 0x61, 0x2a, 0xe6, 0x48, 0x56, 0x86, 0xa1, 0xa9, 0x7b, 0xfc, + 0x9b, 0xed, 0x45, 0xbd, 0x92, 0xf9, 0x4f, 0xe3, 0x68, 0xc9, 0x73, 0xc5, 0xf0, 0x5f, 0xdb, 0x4a, + 0x4a, 0xce, 0x5b, 0xa7, 0x6e, 0xf5, 0x98, 0xf4, 0x6e, 0xf1, 0x13, 0xf2, 0xfa, 0xff, 0xb9, 0x0f, + 0x0e, 0x63, 0x88, 0xd0, 0x84, 0xcf, 0x08, 0xeb, 0x54, 0x0e, 0x3e, 0x50, 0x54, 0x94, 0x08, 0xf0, + 0x87, 0xd9, 0x66, 0x0b, 0xaa, 0x36, 0xd3, 0xa9, 0x54, 0x9e, 0x47, 0x36, 0x1f, 0x71, 0x71, 0x68, + 0x5b, 0x28, 0xc8, 0xa8, 0x35, 0x6e, 0x69, 0x9d, 0xeb, 0x2f, 0xc6, 0x9f, 0x4b, 0x7e, 0x84, 0xa8, + 0xad, 0xf1, 0x1f, 0x96, 0x60, 0x5d, 0x15, 0xba, 0x2b, 0x08, 0x87, 0x68, 0x7e, 0x72, 0xe7, 0x41, + 0x36, 0x1b, 0x8f, 0x9e, 0x75, 0xb5, 0x18, 0x7c, 0x7f, 0x16, 0x22, 0xe7, 0x98, 0x1e, 0xc4, 0x09, + 0xd5, 0x78, 0xad, 0x9e, 0x90, 0xe3, 0xcf, 0xf6, 0x22, 0x03, 0xc6, 0x82, 0x8b, 0x13, 0x5b, 0xd5, + 0xc1, 0x02, 0xbf, 0xe8, 0x40, 0xa1, 0x58, 0xb0, 0x13, 0xd3, 0x23, 0xc4, 0x17, 0x78, 0x91, 0x5e, + 0xb3, 0xa2, 0xa1, 0x6f, 0x44, 0xee, 0x15, 0x26, 0xdf, 0x5d, 0x84, 0xc2, 0x42, 0xc7, 0x3a, 0xf9, + 0xcd, 0x99, 0xbe, 0x4a, 0x2c, 0x11, 0x9a, 0xd5, 0x70, 0x71, 0xd1, 0x4b, 0x34, 0x54, 0xce, 0x59, + 0x9e, 0x44, 0x7f, 0x16, 0x97, 0x19, 0x27, 0xaf, 0x26, 0x23, 0x0b, 0xd0, 0x7d, 0xdc, 0xa4, 0xfa, + 0x0a, 0xcc, 0xee, 0x40, 0x97, 0xe7, 0xf9, 0xc3, 0xf6, 0xe5, 0xcf, 0x00, 0xcc, 0xa7, 0x04, 0x27, + 0xb6, 0x82, 0xd1, 0xdc, 0x7b, 0x47, 0x0e, 0x3e, 0xca, 0x92, 0x8c, 0x2d, 0xd7, 0xab, 0x58, 0x1b, + 0x8c, 0x7b, 0x56, 0x16, 0x6c, 0x7f, 0xe3, 0x0a, 0x90, 0x38, 0xfb, 0x9d, 0xba, 0xeb, 0x2f, 0x8d, + 0x15, 0x6a, 0xa6, 0x95, 0xaa, 0x72, 0x2f, 0xa4, 0xde, 0xe5, 0x49, 0x17, 0x0c, 0xa9, 0x5d, 0x97, + 0x58, 0x06, 0x1a, 0x82, 0x28, 0xde, 0xe3, 0xab, 0x15, 0xbd, 0x1d, 0xe7, 0x8a, 0x21, 0xfe, 0x97, + 0x5b, 0xb3, 0xf7, 0x9c, 0x06, 0x1d, 0xd3, 0xb7, 0x27, 0xb4, 0xd7, 0x65, 0xbf, 0x05, 0x37, 0x3d, + 0xbb, 0xc2, 0xc4, 0xa9, 0x38, 0x57, 0xd1, 0xf2, 0x2b, 0x67, 0x71, 0x19, 0x33, 0x4b, 0xf5, 0x91, + 0x87, 0x87, 0xdf, 0x96, 0x82, 0x72, 0x1e, 0xcd, 0xfe, 0x0c, 0x95, 0x95, 0x85, 0xe5, 0xc2, 0x8b, + 0x29, 0x1e, 0xb7, 0xb4, 0x81, 0xb7, 0xfb, 0x71, 0x05, 0x3e, 0xdd, 0x84, 0x4a, 0x83, 0x39, 0xe6, + 0xe3, 0x51, 0x18, 0x47, 0xf5, 0x3a, 0xbe, 0x4c, 0x1e, 0x50, 0x4d, 0x12, 0xc6, 0xee, 0x13, 0xc1, + 0x24, 0x14, 0x39, 0x5e, 0x21, 0x01, 0xe7, 0x06, 0x58, 0x80, 0x12, 0x13, 0x8b, 0x55, 0x5f, 0x17, + 0xb7, 0x83, 0x31, 0x20, 0xca, 0xcb, 0x8a, 0x40, 0x79, 0xce, 0x87, 0xb6, 0x11, 0xe9, 0x7d, 0x3d, + 0xfc, 0x3e, 0xdc, 0x4c, 0xd6, 0x2e, 0xa2, 0x83, 0xed, 0x37, 0x24, 0xd5, 0x61, 0xdd, 0x39, 0xba, + 0xa4, 0x7f, 0xe6, 0xe5, 0xb9, 0xaf, 0x71, 0x77, 0xc3, 0xee, 0x6c, 0x11, 0xa5, 0xeb, 0x75, 0xd7, + 0xa1, 0x09, 0xec, 0xcc, 0xa5, 0xf5, 0xad, 0x94, 0x4a, 0x8d, 0x27, 0x60, 0x8e, 0xca, 0x97, 0x73, + 0x16, 0x43, 0x29, 0xab, 0x42, 0x4d, 0xf5, 0xde, 0xdb, 0x22, 0xb4, 0xab, 0x19, 0x39, 0xba, 0xbc, + 0x78, 0x5a, 0xf2, 0x40, 0x1f, 0x92, 0xa0, 0xf5, 0xda, 0xea, 0x43, 0x35, 0xbf, 0x5e, 0xba, 0x15, + 0xfe, 0x28, 0xc2, 0xd1, 0x74, 0x90, 0xe6, 0x76, 0x33, 0x65, 0x49, 0x4b, 0x70, 0xba, 0x46, 0x8a, + 0x8a, 0x3f, 0x71, 0x54, 0xe6, 0xa4, 0x5a, 0x1b, 0x24, 0x29, 0xbb, 0x36, 0xa1, 0x78, 0x27, 0x0d, + 0xd8, 0xee, 0xf0, 0xab, 0x52, 0xf3, 0x49, 0x14, 0xf8, 0x06, 0x92, 0xc3, 0x75, 0x61, 0x67, 0x4f, + 0x5a, 0xb9, 0xe6, 0xb4, 0xb4, 0xdd, 0xd4, 0x3a, 0xd7, 0x32, 0xa2, 0x78, 0x7d, 0x73, 0x97, 0x3b, + 0xb1, 0xf6, 0x88, 0xfa, 0x63, 0x7c, 0xe5, 0x43, 0x75, 0x09, 0x2d, 0xb7, 0xaf, 0xdb, 0x85, 0xa9, + 0x1c, 0xbb, 0x45, 0x8c, 0xe0, 0x19, 0x17, 0xc3, 0x59, 0x43, 0x73, 0xb3, 0x73, 0x11, 0x7f, 0x05, + 0x0b, 0xf0, 0x70, 0x8b, 0xc5, 0x00, 0xda, 0xf3, 0xff, 0xcc, 0x39, 0x9b, 0x99, 0xe4, 0xfb, 0x1c, + 0x3b, 0x5c, 0xa4, 0x77, 0xce, 0x0c, 0x9f, 0xa4, 0x6a, 0x25, 0xee, 0xf9, 0xca, 0x70, 0x41, 0x17, + 0xd3, 0x9f, 0x61, 0xde, 0x83, 0x08, 0x4f, 0x29, 0xfa, 0x11, 0x5f, 0x98, 0xa8, 0x54, 0xf8, 0x79, + 0x4b, 0x1b, 0x28, 0x19, 0x29, 0xf0, 0x3d, 0x07, 0xdd, 0xa2, 0x98, 0xe9, 0x01, 0xa9, 0xa6, 0x05, + 0x0c, 0x6a, 0x65, 0x0f, 0xa6, 0x69, 0xbd, 0x1f, 0x5e, 0x37, 0xda, 0x9f, 0x61, 0xe1, 0xd9, 0x6e, + 0xe1, 0x75, 0xb4, 0x38, 0x58, 0x28, 0x32, 0xa1, 0xe9, 0x44, 0x78, 0xc6, 0x01, 0xaf, 0xc1, 0x13, + 0xc5, 0xce, 0x44, 0x7e, 0x52, 0x9d, 0xb8, 0xc8, 0x4d, 0xea, 0xb5, 0xcc, 0xff, 0x45, 0xd8, 0x2d, + 0x53, 0xef, 0x41, 0x30, 0xf2, 0x4c, 0xcf, 0xd4, 0x02, 0xf7, 0x1b, 0x5c, 0x40, 0xaf, 0xbe, 0x98, + 0x71, 0x35, 0x9a, 0xf8, 0x64, 0x78, 0xf9, 0xa5, 0x58, 0xe2, 0x15, 0xe1, 0x03, 0xb9, 0x7d, 0x45, + 0xc1, 0x74, 0xa1, 0x4f, 0x8b, 0x7c, 0x37, 0x15, 0x22, 0x2c, 0x1a, 0x0e, 0xd3, 0x68, 0xfd, 0x86, + 0xc2, 0x44, 0x38, 0x0b, 0xc4, 0x57, 0x94, 0xcf, 0xe4, 0x8d, 0x9f, 0x91, 0xd6, 0xfd, 0x0c, 0x86, + 0x52, 0xfb, 0x66, 0x12, 0xbb, 0x48, 0x5f, 0x20, 0x90, 0x37, 0xd9, 0x46, 0x21, 0x6c, 0xbc, 0xac, + 0x8f, 0xe6, 0x2c, 0x78, 0x40, 0x7b, 0x45, 0x9e, 0x6e, 0xe9, 0xc1, 0xeb, 0x26, 0xde, 0x93, 0x5a, + 0x19, 0x9a, 0xc8, 0xa5, 0x89, 0xd9, 0xd7, 0x1a, 0x1e, 0xc2, 0xc7, 0x77, 0x6a, 0x94, 0xd9, 0x6c, + 0xde, 0x45, 0x45, 0xa6, 0xcc, 0x8c, 0xb6, 0xaf, 0xc4, 0x15, 0x92, 0x73, 0x6f, 0xe4, 0x25, 0x93, + 0xa9, 0x62, 0xe1, 0x1d, 0x1d, 0xae, 0x79, 0x40, 0x80, 0xe5, 0xef, 0x49, 0x6c, 0xf0, 0x48, 0x4d, + 0xe4, 0x08, 0x11, 0x63, 0x52, 0xe6, 0xff, 0x53, 0x7b, 0xf6, 0xce, 0x59, 0x1d, 0xd4, 0xd3, 0x3f, + 0x57, 0xd9, 0x10, 0x42, 0xaf, 0x44, 0x90, 0xd6, 0x3b, 0xbe, 0x06, 0xa9, 0xa7, 0x33, 0x55, 0x30, + 0x6d, 0xde, 0x9b, 0x37, 0xea, 0xd7, 0xb2, 0x90, 0x3e, 0xbe, 0x70, 0xe5, 0xdd, 0xfc, 0x07, 0xff, + 0x22, 0x13, 0xf1, 0xb6, 0x3a, 0xeb, 0x64, 0xf0, 0x87, 0x4b, 0xff, 0xd8, 0x76, 0x9e, 0x9e, 0x06, + 0xe9, 0xd1, 0x21, 0x7d, 0x92, 0x93, 0x86, 0x2e, 0xf7, 0x55, 0xde, 0x18, 0x2d, 0x7c, 0xc5, 0x5c, + 0x7e, 0xe5, 0x72, 0xd7, 0x6b, 0x2b, 0x63, 0x69, 0x41, 0xd8, 0x8e, 0x35, 0xa7, 0xa8, 0xa9, 0x50, + 0x31, 0x5f, 0x4f, 0x5f, 0x75, 0x7e, 0x52, 0x8a, 0x6d, 0x30, 0x14, 0x69, 0xe3, 0xdf, 0xf5, 0xe8, + 0xbb, 0x17, 0xb7, 0x47, 0x8e, 0x89, 0x81, 0xe5, 0xc1, 0xa4, 0xa8, 0x18, 0x2d, 0xd9, 0x2b, 0x3b, + 0x89, 0x3d, 0xce, 0xbc, 0x39, 0x7f, 0x2e, 0xc8, 0xec, 0x40, 0xbf, 0x45, 0x84, 0xf8, 0x14, 0xf2, + 0xfa, 0x22, 0x67, 0x92, 0xed, 0x31, 0x40, 0x72, 0xa5, 0x00, 0x3d, 0x14, 0x43, 0x4b, 0xeb, 0x11, + 0x6e, 0x09, 0x8e, 0x98, 0x1f, 0x86, 0x45, 0x07, 0x8a, 0x5b, 0x5c, 0x94, 0x5b, 0xaf, 0x8f, 0x81, + 0xb7, 0x0d, 0xdb, 0x25, 0x07, 0x62, 0xc1, 0x45, 0xf1, 0xaa, 0x6b, 0x21, 0xcb, 0x12, 0x1c, 0x04, + 0x17, 0x03, 0xb7, 0x23, 0x94, 0xe9, 0x1e, 0xb2, 0xe6, 0x40, 0x85, 0x5b, 0x32, 0xd2, 0x9a, 0xaf, + 0x6e, 0x5f, 0xb9, 0x6e, 0xd7, 0xea, 0xd7, 0xbc, 0xf7, 0x8b, 0x99, 0xa5, 0xa8, 0x43, 0x5c, 0x19, + 0x85, 0xd7, 0xd3, 0xf3, 0x95, 0x36, 0x1a, 0x5b, 0x05, 0x46, 0x5d, 0x6d, 0xe1, 0xc1, 0x27, 0xcf, + 0xfb, 0x0b, 0x86, 0x23, 0xda, 0xf6, 0xb6, 0x4c, 0x9b, 0xfe, 0x7a, 0x37, 0x10, 0xe1, 0xe2, 0x0e, + 0x65, 0xe4, 0x98, 0x63, 0xed, 0x7a, 0x2c, 0x27, 0x32, 0x15, 0x90, 0x69, 0x7d, 0x57, 0x32, 0xd6, + 0x2a, 0xbe, 0xc6, 0xb9, 0xee, 0x50, 0x63, 0x4b, 0x21, 0x7a, 0x5c, 0x91, 0xaf, 0xd7, 0xbf, 0x05, + 0xa5, 0xd4, 0x50, 0xdc, 0x96, 0x66, 0xce, 0xe7, 0xe3, 0x84, 0xc0, 0x27, 0x12, 0x9f, 0x29, 0x80, + 0xef, 0x9d, 0x5b, 0xa4, 0xa0, 0xe9, 0x79, 0xf5, 0x59, 0x4d, 0x8e, 0x45, 0xae, 0xd4, 0x99, 0x7e, + 0xd1, 0x90, 0x49, 0x86, 0xee, 0x47, 0x37, 0xfe, 0x9f, 0x3b, 0xb3, 0x85, 0x3d, 0x20, 0x1c, 0xbf, + 0x1c, 0x93, 0x24, 0x17, 0x4e, 0x4e, 0x90, 0xf8, 0x5e, 0x78, 0x81, 0xec, 0xc9, 0x02, 0x4f, 0x32, + 0x27, 0x74, 0xd1, 0xc8, 0xb0, 0xf0, 0xe9, 0xa9, 0x53, 0x52, 0x59, 0xe9, 0xab, 0xf0, 0xbd, 0xc3, + 0x7b, 0x4a, 0x3d, 0xae, 0xce, 0x38, 0x95, 0x6d, 0xe9, 0xdb, 0xc6, 0x76, 0x69, 0x4f, 0xab, 0xcd, + 0x99, 0xf4, 0x3a, 0xb4, 0x18, 0xc5, 0x6a, 0x42, 0x7b, 0x6f, 0xb8, 0x2f, 0xdb, 0xf3, 0xf7, 0x33, + 0x47, 0x07, 0x95, 0xc1, 0xbe, 0x44, 0xb4, 0x78, 0x4a, 0x0a, 0x8b, 0x0e, 0x0a, 0x03, 0xa6, 0x70, + 0x94, 0xa4, 0xcb, 0x39, 0xe1, 0x55, 0xea, 0x87, 0x91, 0x75, 0x3b, 0xb7, 0x8c, 0x04, 0x18, 0x20, + 0x23, 0x95, 0x13, 0xf5, 0x3f, 0xf3, 0x4f, 0x05, 0xb5, 0x56, 0xee, 0x75, 0x32, 0x3e, 0x2e, 0x9e, + 0xd6, 0xba, 0xcd, 0x2a, 0x83, 0xcd, 0xa0, 0x47, 0x68, 0xc7, 0xef, 0xc7, 0x68, 0xc1, 0xd3, 0x70, + 0x62, 0xac, 0x8d, 0x24, 0xa0, 0x2b, 0xdc, 0xa6, 0x11, 0x4b, 0x32, 0x91, 0x97, 0x3c, 0xe5, 0x52, + 0xdc, 0xc9, 0x92, 0x9f, 0x16, 0x69, 0x9d, 0x2d, 0x54, 0x73, 0xa5, 0x29, 0xf1, 0x73, 0x89, 0x89, + 0x9a, 0xe1, 0xe3, 0xbc, 0xe9, 0x52, 0x30, 0x9c, 0x21, 0x57, 0xad, 0x97, 0x18, 0x3b, 0x79, 0x43, + 0x2f, 0xab, 0x3c, 0xa5, 0xcc, 0x44, 0x55, 0x94, 0xd7, 0x18, 0x94, 0x09, 0x61, 0x3a, 0x12, 0x27, + 0x6d, 0xd6, 0xe9, 0x76, 0xb6, 0x73, 0xf8, 0xd7, 0x7d, 0x74, 0x79, 0xd1, 0x63, 0x45, 0xa8, 0x67, + 0x07, 0x58, 0x68, 0xbf, 0x3b, 0x17, 0xef, 0x63, 0xab, 0x01, 0x5c, 0xd6, 0x4c, 0xf7, 0x2b, 0x37, + 0x24, 0x68, 0x37, 0xb2, 0x94, 0x64, 0xa0, 0x98, 0xce, 0xfb, 0x7d, 0x6e, 0x18, 0xd9, 0x9b, 0xb8, + 0xba, 0xe5, 0x57, 0x51, 0xec, 0xd2, 0xf7, 0x0f, 0x59, 0x6e, 0xa3, 0x82, 0x5a, 0x9c, 0xa7, 0x52, + 0x77, 0xb3, 0xc1, 0x47, 0xc4, 0xba, 0x88, 0x00, 0x4c, 0xd5, 0xa6, 0x38, 0x8a, 0xce, 0xde, 0xa9, + 0x95, 0x94, 0xf7, 0x8b, 0x1b, 0x77, 0xd4, 0x8c, 0x82, 0xb2, 0xc9, 0xa8, 0xb5, 0xc6, 0xb9, 0x21, + 0x21, 0xaf, 0xe8, 0xe5, 0xe1, 0xc7, 0x0e, 0xb5, 0xf6, 0x6c, 0xf0, 0xcd, 0x37, 0x23, 0xad, 0xf3, + 0xb5, 0x49, 0x3d, 0x30, 0x8f, 0xe4, 0xb3, 0x6e, 0x80, 0xe8, 0x3b, 0xdc, 0x62, 0x6a, 0xd6, 0x52, + 0xf9, 0xe6, 0x6e, 0x60, 0x1b, 0xd0, 0x50, 0x96, 0xc1, 0x6b, 0xc3, 0x5b, 0x11, 0xb9, 0xfd, 0x13, + 0x48, 0xa3, 0xa8, 0x4b, 0xe4, 0xd0, 0x7d, 0xc2, 0x7d, 0x11, 0x65, 0x99, 0x9e, 0xa6, 0x67, 0xf6, + 0x99, 0x7c, 0xa3, 0x1c, 0x64, 0xf6, 0x85, 0x18, 0x60, 0x4a, 0xc3, 0x6d, 0xf0, 0x83, 0x5c, 0x3e, + 0x86, 0xa1, 0xce, 0xe7, 0xb7, 0x05, 0xae, 0x06, 0x04, 0x17, 0x54, 0x81, 0xcc, 0x59, 0xf6, 0xfc, + 0xe0, 0x7c, 0xc7, 0xd4, 0xf3, 0x49, 0x75, 0xff, 0x7b, 0x0a, 0x27, 0xee, 0x62, 0xb2, 0x8a, 0xb4, + 0xc3, 0x47, 0xe5, 0x8f, 0x51, 0x91, 0x80, 0xaa, 0x81, 0xcc, 0xd5, 0x61, 0x01, 0xc4, 0x2f, 0x06, + 0x80, 0x31, 0x35, 0x43, 0x22, 0x83, 0x6e, 0xbf, 0x75, 0x6b, 0xb7, 0x5e, 0x20, 0x7e, 0x4a, 0xec, + 0x93, 0x61, 0xda, 0x89, 0xae, 0x37, 0x49, 0x7b, 0xba, 0xf6, 0xb3, 0x44, 0xaf, 0xcf, 0xf4, 0x4e, + 0x7f, 0xc3, 0x41, 0x8c, 0xe8, 0x22, 0x92, 0x12, 0x74, 0xdc, 0xb9, 0x2e, 0x9f, 0xb9, 0x65, 0x85, + 0x4c, 0x6d, 0x80, 0x81, 0x19, 0xd1, 0x13, 0x90, 0xe0, 0x10, 0x95, 0xfa, 0x07, 0x09, 0x95, 0xd6, + 0x8a, 0x34, 0x0f, 0x2d, 0x27, 0x8a, 0xb6, 0x2c, 0xe5, 0x50, 0xcc, 0x4b, 0x49, 0x0b, 0x5d, 0xe4, + 0xda, 0xe7, 0x78, 0x45, 0x67, 0xf2, 0xc6, 0xb6, 0xf6, 0x62, 0x36, 0xfe, 0x3c, 0xa7, 0x0b, 0x37, + 0x16, 0x1c, 0xea, 0xb1, 0x59, 0x9a, 0x8f, 0xab, 0x2d, 0xcc, 0x02, 0x6e, 0x03, 0xf2, 0x0b, 0xb5, + 0xf7, 0x3a, 0x98, 0xcf, 0x98, 0x62, 0xa6, 0x13, 0xee, 0x71, 0x24, 0xfc, 0x3a, 0x69, 0xd0, 0x4c, + 0x67, 0x39, 0xa5, 0xf9, 0x24, 0x3f, 0x21, 0xd2, 0xa2, 0xdf, 0xbf, 0xe7, 0x3a, 0xa7, 0x05, 0x4d, + 0x9a, 0x52, 0xc9, 0x14, 0x92, 0x3b, 0xbb, 0x13, 0xfa, 0x96, 0x81, 0xc6, 0x71, 0x00, 0x82, 0x11, + 0xec, 0xfb, 0xe6, 0xb9, 0xe6, 0xf4, 0x85, 0xe0, 0xd7, 0x77, 0xca, 0x0d, 0x64, 0x3b, 0x65, 0xd2, + 0xe9, 0x47, 0x95, 0x33, 0x4d, 0xdd, 0x6e, 0xfa, 0x00, 0x87, 0xee, 0xf1, 0xe4, 0xad, 0x7a, 0x80, + 0x96, 0xbc, 0x0f, 0x6c, 0xfb, 0xf1, 0x90, 0x02, 0xee, 0xbc, 0x50, 0x37, 0x62, 0xad, 0x9b, 0xe4, + 0x5d, 0xed, 0xb3, 0x58, 0x0b, 0x91, 0x9a, 0x74, 0x37, 0x61, 0x12, 0x1c, 0x7e, 0xce, 0x34, 0xc2, + 0x40, 0x00, 0x41, 0xa8, 0xb0, 0xf8, 0x31, 0xdf, 0xd9, 0x37, 0x37, 0xa2, 0x75, 0x8a, 0x1c, 0x4f, + 0x8d, 0xce, 0x7e, 0x12, 0x79, 0xe5, 0xa1, 0xb1, 0xe1, 0xe6, 0x3a, 0x7f, 0x32, 0x18, 0x68, 0x65, + 0xa9, 0xef, 0xcb, 0xfb, 0xcb, 0x42, 0xff, 0x7c, 0xaf, 0x8f, 0x9a, 0xc9, 0x04, 0x6d, 0x00, 0xfc, + 0xb5, 0x28, 0x43, 0x5d, 0xae, 0xde, 0xe3, 0xb8, 0xbd, 0x16, 0xf2, 0x3e, 0xdf, 0xa6, 0x0d, 0x52, + 0xcf, 0x0a, 0xb5, 0x73, 0x73, 0x47, 0xf9, 0x7d, 0x4d, 0x81, 0x6f, 0xf1, 0xcb, 0xed, 0xe5, 0x89, + 0x9a, 0x5d, 0x59, 0x3e, 0x8c, 0x28, 0xfc, 0x61, 0x13, 0x95, 0xc7, 0x56, 0x5f, 0x26, 0x43, 0x2e, + 0x4c, 0xa3, 0xf9, 0x4d, 0x4f, 0x27, 0xcf, 0xb9, 0xb2, 0x45, 0x9a, 0x2c, 0x14, 0x46, 0x19, 0xa6, + 0x65, 0x81, 0x34, 0x9b, 0xe3, 0xe0, 0x24, 0x53, 0xdc, 0x67, 0xe6, 0xd9, 0x60, 0xa5, 0x2a, 0xff, + 0xb7, 0x43, 0xc2, 0x94, 0x10, 0xf9, 0x2c, 0x5c, 0x5a, 0x45, 0x6f, 0x04, 0xf5, 0xbd, 0xef, 0x3c, + 0x06, 0xdc, 0x3c, 0xe1, 0x0a, 0x43, 0xc3, 0x21, 0xba, 0x76, 0x1a, 0x49, 0x59, 0x2e, 0x89, 0x65, + 0xfb, 0x31, 0x3c, 0xca, 0x6b, 0x76, 0x98, 0x6e, 0x86, 0x30, 0xa4, 0x4d, 0xcc, 0xe2, 0xe7, 0xf4, + 0x93, 0x3e, 0x0d, 0x2f, 0x19, 0xf9, 0xa2, 0x5d, 0x27, 0x7b, 0x4d, 0x86, 0x0a, 0x3b, 0x84, 0x60, + 0xfc, 0x7d, 0xf2, 0xb7, 0x65, 0xe8, 0xcf, 0xc9, 0xad, 0x3b, 0xfd, 0x0a, 0x40, 0x91, 0x19, 0x5a, + 0x36, 0x5a, 0x0a, 0xda, 0x40, 0xe1, 0xd7, 0xb9, 0x97, 0xca, 0xeb, 0x80, 0xb1, 0x2e, 0x09, 0x26, + 0xfe, 0xf0, 0xd7, 0x4e, 0x9f, 0x9b, 0x26, 0xe2, 0x51, 0xbb, 0xb4, 0x12, 0xce, 0x53, 0xd7, 0xb6, + 0x26, 0x86, 0x6b, 0x5b, 0xd8, 0xcf, 0xa7, 0x3d, 0xb1, 0x45, 0xff, 0xeb, 0xf5, 0x7f, 0xf5, 0x49, + 0xfa, 0x2f, 0xff, 0xf2, 0x2f, 0xff, 0xf2, 0x2f, 0xff, 0xf2, 0x2f, 0xff, 0x53, 0xfc, 0x1f, 0x08, + 0x0f, 0xca, 0xf5, 0x00, 0x26, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA100_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 9728, // uncompressed data size (bytes) + 7415, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA100_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA100("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/reload/g_booteruc_reload_ga100_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA100_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x03, 0x62, 0x76, 0x08, 0x13, 0x4c, 0xc8, 0x42, 0x69, + 0x20, 0x00, 0x00, 0x64, 0x80, 0xce, 0x2d, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA100_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA100_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA100("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/reload/g_booteruc_reload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_dbg_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 384 +// COMPRESSED SIZE (bytes): 397 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA100_sig_dbg_data[] = +{ + 0x01, 0x80, 0x01, 0x7f, 0xfe, 0x16, 0xc5, 0x11, 0xb9, 0xdd, 0xe0, 0xf6, 0xae, 0x5a, 0x47, 0x43, + 0xb6, 0x39, 0x2e, 0xdd, 0x01, 0xa7, 0x5d, 0x5f, 0x05, 0x75, 0xc8, 0xa8, 0xc9, 0xa5, 0xad, 0xde, + 0xf5, 0xe5, 0xa7, 0x0e, 0xfd, 0xd7, 0xa8, 0x85, 0xee, 0x53, 0xce, 0x9a, 0x04, 0x78, 0x5f, 0x79, + 0x7b, 0xd6, 0x56, 0xcf, 0xb0, 0x25, 0x23, 0x45, 0xe8, 0x78, 0xa8, 0x9b, 0x72, 0x31, 0xe4, 0x3c, + 0x16, 0x56, 0x5a, 0x01, 0x2c, 0xe9, 0xf6, 0xaa, 0x89, 0xb2, 0xf7, 0xb1, 0x1c, 0xa4, 0xeb, 0xb8, + 0xa1, 0x61, 0x72, 0xc8, 0xf1, 0xa0, 0x18, 0x41, 0x20, 0x26, 0xcf, 0xb7, 0xeb, 0x89, 0x6a, 0x44, + 0xb3, 0x0c, 0xe3, 0xee, 0x30, 0xf1, 0xd1, 0x2d, 0x34, 0xb5, 0x23, 0x69, 0xfc, 0x6c, 0xc6, 0xa2, + 0x27, 0x2f, 0xa5, 0x24, 0x05, 0x7a, 0x21, 0x6a, 0x18, 0xf2, 0x50, 0xd9, 0xfc, 0x0f, 0x92, 0xe1, + 0x96, 0xf9, 0x3d, 0xb4, 0x90, 0x14, 0x95, 0xb0, 0xd7, 0x04, 0xf1, 0xd4, 0xa3, 0xd0, 0x9e, 0x3f, + 0x56, 0x28, 0x6d, 0x9e, 0x87, 0x9d, 0xd7, 0xd0, 0x43, 0x0a, 0x0d, 0xce, 0x9e, 0xfe, 0xe8, 0x4f, + 0x4d, 0x62, 0xaa, 0xcd, 0xbb, 0x23, 0xc6, 0x0a, 0x31, 0xa3, 0x0b, 0x53, 0x68, 0x68, 0xe0, 0xbd, + 0x63, 0x7d, 0xa4, 0x01, 0xda, 0xd0, 0x9b, 0x0b, 0x0e, 0x55, 0x14, 0xfb, 0x16, 0x55, 0x6c, 0x0f, + 0x21, 0x9f, 0xc3, 0xb8, 0x44, 0x89, 0x8a, 0x4e, 0x71, 0x86, 0x3f, 0x83, 0x3b, 0x04, 0x65, 0xe5, + 0x46, 0xd6, 0x26, 0x54, 0x39, 0x4f, 0x51, 0xed, 0x79, 0xf7, 0x55, 0x3a, 0xe2, 0xcb, 0x4d, 0x9c, + 0xdf, 0x52, 0x25, 0xd8, 0x6a, 0x3a, 0xf9, 0x6e, 0x5f, 0x47, 0x8f, 0xb7, 0xa6, 0xac, 0xf8, 0x99, + 0x53, 0x52, 0xc6, 0xc9, 0xd8, 0xda, 0x49, 0x77, 0xcc, 0x76, 0x15, 0x2b, 0xd4, 0x45, 0xc7, 0xfb, + 0x21, 0xa9, 0x0e, 0x4a, 0x82, 0x23, 0x16, 0x22, 0x2d, 0xc0, 0x14, 0x96, 0xa2, 0x60, 0xa0, 0xf5, + 0xa6, 0xa8, 0x87, 0x09, 0xbc, 0xc7, 0x8e, 0x60, 0xb5, 0x3b, 0x65, 0xeb, 0xb4, 0x7b, 0x61, 0xaa, + 0xc0, 0xe0, 0x78, 0xbd, 0x69, 0xb7, 0x31, 0x91, 0xf9, 0x29, 0x31, 0xe6, 0x5b, 0x62, 0x17, 0xb7, + 0xc2, 0x35, 0x12, 0x30, 0x5f, 0xca, 0xf7, 0xac, 0x90, 0x38, 0x31, 0xc8, 0x81, 0x44, 0xbb, 0xce, + 0xd9, 0x82, 0xc4, 0xfb, 0x8c, 0x9d, 0xdc, 0x42, 0x66, 0xeb, 0x1a, 0xa2, 0xa3, 0x87, 0xad, 0x61, + 0x75, 0xc4, 0x44, 0x36, 0x15, 0x48, 0x53, 0x47, 0xbf, 0xfb, 0xa9, 0x32, 0xbb, 0x5c, 0x71, 0xb7, + 0x01, 0xcb, 0x6a, 0xeb, 0x08, 0x4e, 0x50, 0xc6, 0x8a, 0x92, 0x0e, 0xa0, 0xc0, 0x11, 0xea, 0x81, + 0x72, 0x32, 0x74, 0x70, 0x76, 0x2f, 0x3f, 0x04, 0xc6, 0x99, 0x5d, 0xe0, 0xcb, 0x55, 0xf9, 0x14, + 0x52, 0x5b, 0x6f, 0x7f, 0xb6, 0x2c, 0x6c, 0xa1, 0xb9, 0x80, 0x01, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA100_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 384, // uncompressed data size (bytes) + 397, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA100_sig_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA100("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/reload/g_booteruc_reload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_prod_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 384 +// COMPRESSED SIZE (bytes): 397 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA100_sig_prod_data[] = +{ + 0x01, 0x80, 0x01, 0x7f, 0xfe, 0xf8, 0xeb, 0xf9, 0xc0, 0x24, 0x01, 0x91, 0x70, 0x0b, 0x79, 0x99, + 0x44, 0x86, 0x86, 0x99, 0xdf, 0xc1, 0xca, 0x6f, 0xc4, 0x11, 0x3a, 0xb4, 0xed, 0x0c, 0x6f, 0xea, + 0xeb, 0xa3, 0xa5, 0xe9, 0x0a, 0x5a, 0x40, 0xfe, 0x23, 0x29, 0x61, 0x95, 0x00, 0xeb, 0x3a, 0x88, + 0x35, 0xa6, 0xe5, 0x35, 0x73, 0xed, 0x38, 0x32, 0x04, 0x36, 0x9e, 0x3b, 0xd3, 0xa3, 0x04, 0xf8, + 0xc7, 0xf7, 0xb4, 0x58, 0xe2, 0x94, 0x70, 0x00, 0xfa, 0x67, 0x68, 0x5d, 0xab, 0xbc, 0xbd, 0xc5, + 0xa2, 0x33, 0x87, 0x00, 0x2b, 0x54, 0x78, 0xd6, 0x4f, 0xfa, 0xac, 0x36, 0xe0, 0x39, 0xc2, 0xb1, + 0xd5, 0xb4, 0xd1, 0xa3, 0xc8, 0xab, 0x81, 0x74, 0x49, 0xc2, 0x5d, 0xf7, 0xb2, 0x4f, 0x72, 0x64, + 0x03, 0x12, 0xf3, 0xfc, 0xa6, 0x76, 0xf3, 0x86, 0x13, 0xfd, 0x84, 0xb7, 0x64, 0x09, 0xc4, 0x52, + 0x78, 0xfd, 0x26, 0x0c, 0x16, 0x55, 0x3b, 0x1c, 0x1a, 0x86, 0x13, 0xb8, 0xe0, 0xfb, 0x47, 0x19, + 0x45, 0xc8, 0xc7, 0xfe, 0x4b, 0xc4, 0xe1, 0xbc, 0x43, 0xce, 0x97, 0x62, 0xc7, 0xa3, 0x40, 0x9d, + 0xdb, 0xc7, 0xc6, 0x0b, 0x93, 0xdf, 0xaf, 0x63, 0x71, 0x1f, 0x05, 0x38, 0xc3, 0x51, 0x76, 0x1c, + 0x6e, 0x19, 0x86, 0x60, 0xe8, 0xe2, 0xff, 0x9b, 0x72, 0x31, 0xd0, 0x62, 0xde, 0x96, 0x71, 0xf6, + 0x9b, 0xa9, 0x7c, 0xcd, 0x75, 0xac, 0xa3, 0x65, 0x57, 0x43, 0x71, 0x0f, 0xb4, 0x2b, 0x7e, 0xa8, + 0x18, 0x6a, 0x83, 0x04, 0x90, 0xc7, 0xef, 0xe0, 0xdc, 0xf1, 0x02, 0x7f, 0x70, 0x16, 0xe5, 0x4e, + 0xb4, 0xcc, 0x7c, 0x49, 0x30, 0x64, 0x95, 0x72, 0x50, 0x34, 0xe3, 0x9d, 0x11, 0xd9, 0xcc, 0xc9, + 0xb0, 0xd1, 0x78, 0x5a, 0x0f, 0x45, 0x41, 0x8d, 0xb3, 0xa9, 0xeb, 0x4e, 0x3b, 0xdd, 0xe9, 0x3d, + 0x28, 0xd8, 0x46, 0xfa, 0x6c, 0xcf, 0x3f, 0x09, 0xa7, 0x2c, 0xde, 0x44, 0x14, 0xd1, 0x83, 0x54, + 0xe8, 0x6c, 0xa6, 0xf3, 0x48, 0xe2, 0xad, 0xa6, 0xcb, 0x57, 0x46, 0x04, 0xdd, 0xf0, 0xaf, 0x03, + 0x9a, 0x69, 0x7f, 0x64, 0xe1, 0xe2, 0x05, 0x81, 0x1a, 0xb1, 0x79, 0x3b, 0x95, 0x12, 0x42, 0xb4, + 0x44, 0x3d, 0x62, 0x06, 0x4f, 0xc7, 0xe8, 0xdc, 0x58, 0xc2, 0xf2, 0x47, 0x86, 0x53, 0xf6, 0xc8, + 0x74, 0x14, 0xb6, 0x8b, 0x7e, 0x1b, 0x36, 0xfd, 0x70, 0x55, 0x84, 0xd6, 0x38, 0xd3, 0x65, 0x2b, + 0xf0, 0xac, 0x1d, 0x8b, 0x0b, 0x04, 0x8c, 0x06, 0x28, 0x2d, 0x23, 0x67, 0x2f, 0x24, 0x0c, 0x3c, + 0x5e, 0xf6, 0x28, 0x8c, 0x74, 0x19, 0x2b, 0xcd, 0x64, 0xcb, 0x7e, 0x91, 0x91, 0x30, 0x59, 0x55, + 0x6b, 0x22, 0x86, 0xf2, 0x83, 0xe3, 0xf1, 0x6e, 0x1d, 0x36, 0x30, 0x62, 0xd8, 0x8b, 0xe8, 0xe8, + 0xe4, 0xce, 0x6b, 0x2b, 0x9c, 0x32, 0x5e, 0xcc, 0x39, 0x80, 0x01, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA100_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 384, // uncompressed data size (bytes) + 397, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA100_sig_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA100("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/reload/g_booteruc_reload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_ga100_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA100_patch_loc_data[] = +{ + 0x00, 0x20, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA100_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA100_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA100("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/reload/g_booteruc_reload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_ga100_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA100_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA100_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA100_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA100("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/reload/g_booteruc_reload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_ga100_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA100_patch_meta_data[] = +{ + 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA100_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA100_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA100("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/reload/g_booteruc_reload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA100_num_sigs_data[] = +{ + 0x01, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA100_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA100_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterReloadUcode_GA100 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA100_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA100_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA100_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA100_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA100_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA100_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA100_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA100_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA100_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA100_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterReloadUcode_GA100(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterReloadUcode_GA100; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_GA102.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_GA102.c new file mode 100644 index 000000000..8d62142a8 --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_GA102.c @@ -0,0 +1,1443 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA102("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/reload/g_booteruc_reload_ga10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 9728 +// COMPRESSED SIZE (bytes): 7779 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA102_image_dbg_data[] = +{ + 0xed, 0xd9, 0x53, 0x77, 0x1d, 0x80, 0xb6, 0x36, 0xe0, 0xa8, 0xb1, 0x6d, 0xb3, 0x61, 0x63, 0xa3, + 0xb1, 0x6d, 0x3b, 0x2b, 0xb6, 0xb9, 0x56, 0x9c, 0xc6, 0xb6, 0x6d, 0x63, 0xc5, 0x46, 0x63, 0xdb, + 0x69, 0x93, 0x34, 0xb6, 0xbf, 0xef, 0x7a, 0xff, 0x81, 0x33, 0xce, 0x38, 0xfb, 0xb9, 0x9b, 0xd7, + 0x73, 0xcc, 0x8b, 0xf7, 0x9d, 0x10, 0x10, 0xff, 0xb7, 0xc5, 0xab, 0xa2, 0x43, 0x75, 0xe1, 0xac, + 0xb4, 0x80, 0x79, 0x2c, 0xf4, 0xfc, 0xaa, 0x79, 0xa6, 0x82, 0x8b, 0xef, 0x9d, 0x49, 0xc1, 0x81, + 0x8c, 0xff, 0xcc, 0x06, 0xa5, 0x9f, 0xb3, 0x5f, 0xc0, 0x27, 0xe0, 0xd4, 0xb1, 0x58, 0xc6, 0xb0, + 0x4e, 0x84, 0x1a, 0xff, 0x34, 0xfb, 0x0b, 0xfa, 0x8d, 0xf6, 0x0d, 0x71, 0x00, 0x51, 0x32, 0x67, + 0x08, 0x42, 0x43, 0xb8, 0x70, 0x63, 0x2d, 0x91, 0xef, 0x7a, 0x79, 0x91, 0xea, 0xd9, 0x70, 0xcd, + 0xc9, 0xe1, 0x3f, 0x03, 0x71, 0x81, 0x24, 0x3c, 0x63, 0xbb, 0x11, 0xf0, 0x09, 0x82, 0xee, 0xb5, + 0x18, 0x2c, 0x9f, 0xc2, 0x60, 0xf4, 0xec, 0x62, 0xa0, 0xd2, 0x01, 0x35, 0x9a, 0xa4, 0x27, 0x22, + 0x3d, 0xb2, 0xe9, 0x0c, 0xaf, 0x2e, 0x7e, 0x43, 0x50, 0xb3, 0xc1, 0xba, 0x96, 0x2b, 0x85, 0x3b, + 0x21, 0x94, 0x2a, 0x7c, 0x2a, 0x41, 0x17, 0xb2, 0xf0, 0x41, 0x6b, 0xd9, 0xed, 0x3d, 0x66, 0x89, + 0xcd, 0x84, 0xf2, 0xdc, 0x2d, 0xac, 0x71, 0x09, 0xb8, 0x5c, 0x10, 0xf0, 0x3a, 0xab, 0x96, 0x15, + 0x00, 0xa1, 0x2e, 0xf9, 0x3c, 0x29, 0x8f, 0x69, 0xc1, 0x03, 0xfb, 0x87, 0xca, 0x31, 0x04, 0x43, + 0x8b, 0x84, 0x3c, 0x8f, 0x01, 0x53, 0x52, 0xfc, 0x81, 0x83, 0x8d, 0xd0, 0x67, 0x1e, 0xda, 0x89, + 0x1d, 0x65, 0xf9, 0x85, 0x18, 0x57, 0xa6, 0x7f, 0x7b, 0x95, 0x07, 0xa5, 0xa5, 0x3b, 0xf6, 0x96, + 0x14, 0xeb, 0x92, 0xcd, 0xa8, 0x53, 0xcd, 0x06, 0x20, 0x4d, 0x46, 0xd6, 0x31, 0x29, 0x58, 0x21, + 0x74, 0x9b, 0xdd, 0x8e, 0xc7, 0xd1, 0xd2, 0xf5, 0x1b, 0x7c, 0x18, 0x1d, 0xa9, 0xac, 0x51, 0xe6, + 0x0d, 0x20, 0x1d, 0x94, 0xb7, 0x9f, 0x3e, 0xf0, 0xa9, 0x49, 0x37, 0xac, 0x3f, 0xa5, 0x52, 0xa9, + 0xd5, 0x53, 0xbc, 0x5e, 0x06, 0xf9, 0x5d, 0xb8, 0x51, 0x36, 0x85, 0x04, 0x17, 0xcb, 0x42, 0x54, + 0x35, 0x01, 0x99, 0xd8, 0x70, 0x02, 0xdf, 0x7c, 0xc7, 0xef, 0xf0, 0x8e, 0x1e, 0x3a, 0x84, 0x60, + 0x4a, 0xd0, 0x48, 0x73, 0xb0, 0x53, 0xc2, 0x33, 0x1f, 0x5c, 0x51, 0xbb, 0x66, 0x88, 0x49, 0x76, + 0xd1, 0x2d, 0x97, 0x8f, 0x9c, 0xc5, 0xf2, 0xc2, 0xef, 0xbb, 0xe3, 0xac, 0xaa, 0x87, 0x16, 0xd5, + 0x1e, 0xf1, 0x79, 0xb3, 0xbe, 0xb0, 0xbf, 0x5d, 0x51, 0xd8, 0x26, 0x1a, 0xe8, 0x0a, 0xe5, 0xb2, + 0x2b, 0x66, 0xe7, 0x2a, 0x18, 0x9d, 0x5a, 0x02, 0xab, 0xe6, 0x7e, 0xa0, 0xe5, 0xa8, 0x7e, 0x3b, + 0x86, 0xcb, 0xe1, 0x3e, 0xab, 0x70, 0xf4, 0xe2, 0xcb, 0x08, 0xf5, 0x69, 0x0e, 0x2e, 0xb2, 0x18, + 0xbe, 0xf4, 0x51, 0x20, 0x28, 0x43, 0x97, 0xbb, 0x2b, 0x2b, 0xd4, 0x41, 0x52, 0xa5, 0xaf, 0x87, + 0x0b, 0xf6, 0xce, 0x6d, 0x37, 0x3e, 0x94, 0xab, 0xf6, 0x97, 0x91, 0x72, 0xbf, 0xd6, 0x4f, 0xa0, + 0x0b, 0x7d, 0x74, 0x79, 0x69, 0xd9, 0x58, 0x66, 0x66, 0xb1, 0x33, 0x43, 0x27, 0x74, 0xf5, 0x56, + 0x40, 0x69, 0x35, 0x77, 0xb3, 0x20, 0xd5, 0x7a, 0xbb, 0x55, 0xa8, 0x2e, 0xd5, 0xaa, 0x00, 0xfd, + 0x1d, 0x42, 0x86, 0x28, 0xa2, 0xdf, 0xa7, 0xf8, 0x8e, 0x34, 0x9c, 0x3a, 0x10, 0x36, 0x2b, 0x1a, + 0x17, 0x78, 0x71, 0xc7, 0xb2, 0xe0, 0xf1, 0x05, 0x7e, 0x1d, 0x22, 0xd6, 0xea, 0x43, 0x8e, 0x92, + 0xed, 0xc8, 0xb7, 0x1e, 0x8a, 0x8a, 0xd8, 0xf8, 0x73, 0x5c, 0xee, 0xf1, 0xd3, 0x7e, 0x02, 0xa7, + 0x2c, 0xaa, 0xb6, 0xda, 0x63, 0x41, 0xbf, 0x99, 0x53, 0x62, 0x91, 0xb8, 0x6b, 0x09, 0x10, 0x23, + 0xe3, 0x5e, 0x9f, 0x0a, 0x69, 0xfa, 0xcc, 0x21, 0x45, 0xf4, 0x1b, 0x5a, 0x2c, 0xa4, 0xda, 0x12, + 0xc5, 0xfc, 0xe0, 0x8f, 0x93, 0xda, 0x1c, 0x65, 0xc4, 0x9e, 0x2b, 0x0b, 0xc4, 0x59, 0x38, 0xa7, + 0x3b, 0xfd, 0x8c, 0xe0, 0xfd, 0x77, 0x84, 0x34, 0xcf, 0x09, 0xd9, 0x97, 0xc5, 0x92, 0xba, 0xc6, + 0xe6, 0xb8, 0xb5, 0x45, 0x0d, 0x1e, 0x32, 0xbe, 0xf2, 0x21, 0xcb, 0x6f, 0x14, 0x6d, 0x87, 0x09, + 0x7d, 0x7f, 0x58, 0xb7, 0xa1, 0x15, 0xdd, 0x2e, 0xd9, 0xdf, 0x6a, 0xa3, 0xc3, 0x1d, 0x94, 0xb5, + 0xe5, 0xca, 0xbc, 0xf9, 0xc3, 0x54, 0x46, 0x11, 0xfc, 0x55, 0xfb, 0x3a, 0xf3, 0xb8, 0x46, 0x07, + 0x0b, 0xec, 0x31, 0xa3, 0x90, 0x49, 0xee, 0xae, 0x43, 0xc2, 0x33, 0x67, 0xc4, 0x31, 0xec, 0xb3, + 0x53, 0x95, 0x29, 0xb5, 0xab, 0xa3, 0x78, 0xc7, 0x20, 0x22, 0x1c, 0xc4, 0xc6, 0x36, 0x26, 0xc3, + 0x1b, 0x8c, 0x6d, 0x4a, 0x98, 0x5a, 0x87, 0x7d, 0x1e, 0x30, 0x45, 0x5c, 0xa4, 0x02, 0xd4, 0x4a, + 0x80, 0x65, 0x8d, 0x26, 0x29, 0xa9, 0x0c, 0x7e, 0x4f, 0x9b, 0x2b, 0xd2, 0x3d, 0x21, 0x58, 0x3c, + 0xe1, 0xd7, 0x98, 0x84, 0xce, 0x7d, 0x25, 0x4d, 0xec, 0x7f, 0x77, 0x44, 0x05, 0x30, 0x1a, 0xc3, + 0x17, 0x69, 0x14, 0xfa, 0x3a, 0xa0, 0x69, 0x07, 0xf1, 0x24, 0xc9, 0x35, 0xcb, 0x2f, 0x9a, 0x0f, + 0x69, 0x7c, 0x42, 0x2c, 0xfe, 0x3d, 0x17, 0xcf, 0xd7, 0x44, 0xe1, 0xc4, 0x0d, 0x28, 0x46, 0x32, + 0x62, 0xc3, 0xcf, 0x31, 0x0f, 0xc4, 0xc6, 0x36, 0x50, 0x60, 0x3d, 0xc0, 0xf3, 0xfc, 0x85, 0x99, + 0x37, 0xbc, 0xe7, 0x83, 0xbe, 0x6c, 0x25, 0x92, 0x5d, 0x88, 0xe7, 0x88, 0x5f, 0xd0, 0x74, 0x2e, + 0xdd, 0xbd, 0x6e, 0xea, 0x1a, 0xf9, 0x93, 0x9b, 0x40, 0x2f, 0xc6, 0xf5, 0xe0, 0x3a, 0xcf, 0x7a, + 0xfd, 0xf1, 0x38, 0xce, 0x93, 0xe7, 0x73, 0x43, 0x22, 0x33, 0x28, 0x31, 0x5d, 0xb6, 0x95, 0xe2, + 0x41, 0xc5, 0x2c, 0xa2, 0x05, 0xe9, 0xbd, 0x6a, 0x30, 0x46, 0x2f, 0xaa, 0xb2, 0x89, 0xa1, 0x20, + 0x90, 0xf7, 0x09, 0xd2, 0x7d, 0xc8, 0x02, 0xb6, 0xd6, 0x5c, 0xbe, 0x03, 0x28, 0x0e, 0x38, 0xa3, + 0xa2, 0x52, 0xf6, 0x7d, 0x7a, 0xcd, 0x62, 0x16, 0x31, 0x90, 0x62, 0xab, 0x2a, 0x8b, 0x0d, 0x15, + 0x16, 0xab, 0xc5, 0x17, 0xcb, 0xca, 0x86, 0x8d, 0xbc, 0xc9, 0x40, 0xf1, 0x1f, 0x67, 0xbf, 0x72, + 0xbc, 0xaa, 0xb0, 0x5c, 0xe5, 0x32, 0x97, 0x3e, 0x84, 0x44, 0x2f, 0xb8, 0x0a, 0xa5, 0x65, 0x68, + 0x44, 0x0c, 0x83, 0xd1, 0xea, 0x55, 0xbc, 0xc1, 0xd2, 0x7f, 0xd4, 0x29, 0x18, 0x9c, 0x4f, 0x91, + 0x25, 0x1f, 0x2f, 0xa7, 0xee, 0x7d, 0xff, 0xbd, 0x76, 0xca, 0xac, 0xba, 0xf0, 0xce, 0x37, 0xd0, + 0x80, 0xa6, 0x78, 0x49, 0xd0, 0xaa, 0x0a, 0xbe, 0xf9, 0xec, 0x46, 0x40, 0x2d, 0xdb, 0x77, 0xca, + 0xa0, 0x05, 0x60, 0xd3, 0x2e, 0x6b, 0x23, 0x7b, 0x18, 0xff, 0x94, 0x0b, 0x4a, 0x2e, 0xc8, 0x13, + 0x32, 0xce, 0x81, 0x96, 0xd8, 0x22, 0xe8, 0xe6, 0xed, 0x96, 0xf1, 0x52, 0xf9, 0xf7, 0x34, 0xf7, + 0x5d, 0xf9, 0xb9, 0xf1, 0xaf, 0x07, 0xa0, 0xfe, 0xd5, 0x2f, 0x95, 0x38, 0x48, 0x12, 0xa8, 0x19, + 0xb5, 0xc4, 0x2e, 0xb9, 0x39, 0x3c, 0xf1, 0x19, 0xb2, 0xbd, 0xfc, 0x13, 0xd1, 0x3e, 0x08, 0xba, + 0x03, 0xa6, 0xda, 0x64, 0xaf, 0xa7, 0x7b, 0x87, 0xfe, 0xe2, 0x2f, 0xe9, 0x4e, 0x80, 0xc0, 0xb7, + 0x9f, 0xd4, 0xc4, 0xe2, 0x6a, 0xb4, 0xfe, 0x5c, 0x96, 0xfe, 0x68, 0xb9, 0x2c, 0xf6, 0x8f, 0xd0, + 0xab, 0x19, 0xb8, 0x28, 0xba, 0x09, 0xbf, 0x38, 0x6a, 0x33, 0x71, 0xb8, 0xf9, 0x0b, 0xd6, 0x75, + 0x16, 0xca, 0x42, 0xe9, 0x06, 0xc0, 0x62, 0x74, 0x8b, 0x72, 0x90, 0x67, 0x99, 0x54, 0xaa, 0x36, + 0xd6, 0x01, 0x8a, 0x0d, 0xb2, 0x3d, 0x9e, 0x03, 0x18, 0x1c, 0x00, 0x86, 0x5b, 0x93, 0x14, 0xa5, + 0x62, 0x59, 0xf0, 0xf2, 0xb8, 0x62, 0x33, 0xb8, 0xf7, 0x8a, 0xc5, 0x68, 0x50, 0xeb, 0x80, 0xe0, + 0x21, 0x7b, 0xa7, 0x08, 0x1e, 0x9d, 0x2f, 0xe5, 0x18, 0xf2, 0x3d, 0x87, 0x4e, 0x6b, 0xde, 0x33, + 0xa2, 0x04, 0xcc, 0xc8, 0x80, 0xf3, 0xfc, 0x5a, 0xcb, 0xb2, 0x9c, 0xbd, 0xc7, 0xd2, 0xda, 0x57, + 0xfa, 0x12, 0x10, 0xc1, 0x4b, 0x7c, 0x83, 0xd4, 0x8d, 0x6f, 0x06, 0xe3, 0xc3, 0x8a, 0xeb, 0x7f, + 0xbc, 0xae, 0x68, 0xbc, 0x71, 0x7d, 0x28, 0xb8, 0xcb, 0x5a, 0x32, 0x68, 0x97, 0xee, 0x0e, 0xdf, + 0xbb, 0x91, 0xac, 0x1f, 0x78, 0x7e, 0x93, 0x71, 0xa9, 0x09, 0x1a, 0x51, 0x3f, 0x37, 0x73, 0xd7, + 0xd8, 0x18, 0x5e, 0xc9, 0xc3, 0x22, 0xcc, 0x50, 0x3c, 0x94, 0x8c, 0x4b, 0xcc, 0x7e, 0x6f, 0x25, + 0xf5, 0x1d, 0xbb, 0x9a, 0x36, 0x77, 0xb5, 0x4b, 0x96, 0x1a, 0x7d, 0x9a, 0x58, 0xc9, 0xf3, 0x36, + 0x97, 0x75, 0x07, 0x10, 0xeb, 0xae, 0x0c, 0x05, 0xf3, 0x9d, 0x85, 0x42, 0x47, 0xf1, 0x97, 0x29, + 0x4f, 0x36, 0x7d, 0x85, 0xff, 0x39, 0xff, 0x59, 0x88, 0x24, 0xa6, 0xd6, 0x64, 0x69, 0x12, 0x20, + 0xda, 0x5e, 0xb1, 0xdd, 0x71, 0x1d, 0xc9, 0x3d, 0x26, 0x4d, 0x00, 0xd7, 0x65, 0x94, 0x9d, 0x6c, + 0x84, 0xed, 0xda, 0xe2, 0x5a, 0xab, 0x30, 0x09, 0xb8, 0x0e, 0x83, 0x74, 0x76, 0x54, 0x4d, 0x01, + 0xb4, 0x76, 0x0c, 0xe4, 0x34, 0xb3, 0xd8, 0xb6, 0x57, 0xd6, 0xab, 0x51, 0x97, 0x20, 0xca, 0xee, + 0x71, 0xea, 0x7b, 0x4a, 0x78, 0xa9, 0x1a, 0xa5, 0x89, 0xa5, 0x4f, 0x21, 0x99, 0x66, 0xcc, 0xcf, + 0x4c, 0x23, 0x36, 0xad, 0x68, 0xe0, 0x7d, 0xf0, 0xae, 0x00, 0x0d, 0x53, 0xa3, 0xa3, 0xe2, 0x15, + 0xfb, 0x85, 0x35, 0x58, 0x90, 0xb4, 0x4f, 0xab, 0x41, 0x28, 0xb7, 0x8f, 0x24, 0xa7, 0x2a, 0x96, + 0x68, 0x3d, 0xb0, 0xf0, 0xbc, 0x0f, 0xc5, 0x56, 0x92, 0xad, 0xab, 0xfb, 0x96, 0xdf, 0xa7, 0x3f, + 0x0e, 0xda, 0x04, 0x96, 0x25, 0x90, 0x1f, 0xd2, 0xf9, 0x33, 0xf6, 0x47, 0x16, 0xe8, 0x68, 0xe9, + 0x5b, 0xfb, 0xf0, 0x1a, 0x17, 0xa9, 0x29, 0xb1, 0xd0, 0xb4, 0xb9, 0x30, 0xee, 0xc7, 0x33, 0xf9, + 0xe9, 0xf1, 0x9f, 0xfa, 0x04, 0x19, 0x89, 0xd1, 0xed, 0x58, 0x30, 0x72, 0xbd, 0x68, 0x4b, 0xfe, + 0x71, 0xdd, 0x93, 0xb3, 0x40, 0x45, 0xa2, 0xbb, 0x43, 0x53, 0x87, 0x76, 0x66, 0x88, 0xb5, 0x21, + 0xf2, 0x58, 0x65, 0x4a, 0x63, 0x17, 0xf0, 0x26, 0x73, 0x9f, 0x21, 0xbe, 0x3a, 0xa3, 0x17, 0x37, + 0xd4, 0xf6, 0x3d, 0xde, 0x29, 0x75, 0x2e, 0xf0, 0xcb, 0x49, 0x3a, 0x25, 0xf9, 0x2d, 0x7d, 0x0a, + 0x1e, 0x0b, 0x45, 0x66, 0xdf, 0x71, 0x1e, 0x9d, 0xe3, 0x64, 0x89, 0x43, 0xa2, 0x36, 0xa0, 0xd7, + 0xe6, 0x28, 0x60, 0xe5, 0x6d, 0x53, 0x1d, 0xc8, 0x59, 0x4d, 0x36, 0x5d, 0x06, 0x1f, 0x89, 0x21, + 0x97, 0xc4, 0x2f, 0xb5, 0xe3, 0x8e, 0xf3, 0xb2, 0x75, 0xd1, 0xb5, 0x0f, 0x57, 0x6e, 0x75, 0xdd, + 0x49, 0x5f, 0x11, 0x33, 0x46, 0x2e, 0x7c, 0xe0, 0x91, 0xd0, 0x0a, 0x28, 0xd2, 0x1e, 0xc2, 0xd8, + 0xe6, 0xe3, 0x56, 0xcc, 0x97, 0x99, 0xbc, 0xbb, 0xfb, 0xfa, 0x5f, 0x53, 0x49, 0xdf, 0x13, 0x0d, + 0x69, 0xc0, 0x97, 0xb2, 0x11, 0x0e, 0x9c, 0x5b, 0x53, 0x1a, 0x91, 0x6f, 0xbc, 0x90, 0x46, 0x64, + 0x6c, 0xbe, 0x08, 0xfc, 0x9e, 0xb4, 0xdc, 0x1d, 0x38, 0x06, 0x55, 0xa7, 0xd9, 0x16, 0x6a, 0x5b, + 0x79, 0xdf, 0xcd, 0x7b, 0x5b, 0xb3, 0x5b, 0x43, 0xa6, 0x7a, 0x5b, 0x5b, 0xfa, 0x68, 0x69, 0x12, + 0x84, 0x27, 0x5e, 0x10, 0x44, 0x08, 0x39, 0x09, 0x30, 0x42, 0x28, 0x50, 0x2e, 0x34, 0xed, 0x2b, + 0x53, 0x6b, 0x20, 0x2c, 0x64, 0x54, 0xb2, 0x76, 0xc7, 0x1d, 0x41, 0xc2, 0x8b, 0x76, 0xba, 0xc6, + 0x6a, 0x41, 0x5c, 0xf5, 0xee, 0x12, 0xe6, 0xa7, 0xa3, 0xc7, 0x20, 0x0a, 0xf5, 0x17, 0xfd, 0xd8, + 0xd5, 0x78, 0xbd, 0x9c, 0x08, 0xf4, 0x72, 0x49, 0xbd, 0xa3, 0x6a, 0x08, 0x92, 0x74, 0x69, 0x2b, + 0xb1, 0xb9, 0x57, 0x8f, 0xcf, 0xf4, 0xcd, 0x30, 0x14, 0xff, 0x15, 0xcf, 0x4f, 0x8b, 0x5e, 0xde, + 0xc1, 0x01, 0x2b, 0xac, 0xe9, 0xad, 0x1d, 0x4e, 0x5b, 0xef, 0x74, 0xf5, 0xfb, 0x52, 0xc5, 0x41, + 0x39, 0xe1, 0xfa, 0x6c, 0xa4, 0x0d, 0x6f, 0x7a, 0x73, 0x22, 0xa0, 0xfc, 0xb0, 0xb3, 0x98, 0xf9, + 0xb4, 0x69, 0xd4, 0xc9, 0xd6, 0xe1, 0xa0, 0xc2, 0x29, 0x6f, 0x1c, 0x3c, 0x32, 0x2f, 0xb3, 0x4e, + 0x12, 0x9f, 0xf3, 0x2e, 0xb6, 0xe0, 0x54, 0x93, 0x5d, 0x0b, 0xb6, 0xa6, 0x18, 0x2c, 0x76, 0x3d, + 0x54, 0xf0, 0x07, 0xe8, 0x85, 0x3b, 0x00, 0xb7, 0x24, 0xa9, 0xea, 0x65, 0xd7, 0xbe, 0x17, 0xb2, + 0xc4, 0xab, 0x8c, 0x77, 0xfd, 0xe2, 0xc1, 0x51, 0x46, 0x82, 0x53, 0xdc, 0x21, 0x02, 0x22, 0x53, + 0x3d, 0x35, 0xfe, 0xc5, 0xb1, 0x28, 0xea, 0x79, 0x50, 0x27, 0x7b, 0x00, 0x7f, 0x36, 0xd5, 0xd3, + 0x0a, 0x93, 0x9f, 0x84, 0x2a, 0x6f, 0x0d, 0x67, 0xc0, 0xa3, 0x7f, 0x48, 0xa8, 0x2c, 0x96, 0xf3, + 0x4f, 0x74, 0x0e, 0xf7, 0xfd, 0xc9, 0xe8, 0x45, 0x9a, 0xb8, 0x40, 0xc9, 0xa8, 0xae, 0x88, 0x51, + 0x25, 0x4e, 0xcf, 0x27, 0xfa, 0x70, 0xad, 0xfc, 0x8d, 0x14, 0xa0, 0xd6, 0x84, 0x32, 0x41, 0x66, + 0x4b, 0x64, 0xf3, 0xa1, 0x89, 0x1d, 0xcc, 0x9f, 0xcd, 0xf5, 0x09, 0xfa, 0x64, 0x44, 0x09, 0x4d, + 0x3b, 0x59, 0x1e, 0x51, 0x98, 0x16, 0x3b, 0x4e, 0x78, 0x2d, 0xca, 0xc2, 0x30, 0xb3, 0x2a, 0x65, + 0x9e, 0xca, 0x5c, 0xaa, 0xe8, 0xea, 0x47, 0xa6, 0x70, 0x71, 0xac, 0x29, 0x66, 0x2b, 0xc0, 0x58, + 0xfe, 0xd5, 0xf6, 0x02, 0x16, 0xb6, 0x3d, 0x3e, 0x36, 0xf1, 0x4f, 0x8c, 0x4a, 0xc3, 0xc4, 0x25, + 0x1d, 0x0b, 0xbb, 0x9b, 0x8a, 0x87, 0xcf, 0xf2, 0x21, 0xd4, 0x3d, 0x57, 0x7c, 0xaa, 0x42, 0x4c, + 0xf3, 0x5c, 0xfa, 0x9d, 0xbd, 0xd2, 0x64, 0x27, 0xe7, 0x86, 0x95, 0xb6, 0x89, 0xa1, 0x41, 0x96, + 0x98, 0xe0, 0x5a, 0xcc, 0x59, 0xde, 0xca, 0x29, 0x4e, 0x72, 0x75, 0x6b, 0x7d, 0xd2, 0x42, 0xbf, + 0x9b, 0x1c, 0x95, 0x0f, 0xfc, 0x7e, 0x15, 0x1e, 0x7f, 0xa5, 0xc8, 0xdc, 0x12, 0x66, 0xd1, 0xd0, + 0x83, 0x2f, 0x15, 0x59, 0x43, 0x20, 0x47, 0x99, 0xca, 0xd6, 0xb6, 0xaf, 0xf1, 0x49, 0xba, 0x0c, + 0x0b, 0x8b, 0xdf, 0x77, 0x5e, 0xd6, 0xec, 0x1d, 0x16, 0x4f, 0x24, 0x6a, 0x5a, 0x9e, 0x0d, 0x30, + 0xc2, 0x07, 0xc2, 0x99, 0xa7, 0xac, 0x79, 0xa9, 0xf0, 0xeb, 0x3d, 0xe0, 0x6e, 0x9f, 0x18, 0x54, + 0x51, 0xe0, 0xaf, 0xef, 0xd1, 0x45, 0x13, 0x9e, 0x37, 0x24, 0xd0, 0x9a, 0x0d, 0xb8, 0x84, 0x1c, + 0xd4, 0x92, 0x94, 0xac, 0x3f, 0xad, 0x62, 0x9a, 0x3c, 0x87, 0xa1, 0x89, 0xdb, 0x93, 0x2e, 0x11, + 0xea, 0xd1, 0x1a, 0x8a, 0x69, 0x52, 0x97, 0xab, 0xd2, 0x4a, 0xa1, 0xa5, 0xd4, 0xa7, 0x19, 0xe0, + 0x5b, 0x0b, 0x3a, 0x8c, 0x11, 0x35, 0x71, 0xa6, 0x40, 0x96, 0xc5, 0xec, 0x8c, 0x2b, 0xf9, 0x15, + 0x21, 0x58, 0x27, 0x17, 0x3d, 0x55, 0x01, 0x88, 0x44, 0x08, 0x75, 0x0d, 0xde, 0x42, 0xfe, 0x60, + 0x82, 0xb2, 0xae, 0x66, 0x8f, 0xb1, 0x55, 0xea, 0xf1, 0x6d, 0xd5, 0x0c, 0x31, 0x15, 0xd4, 0x32, + 0x1f, 0x5b, 0x67, 0xb6, 0x10, 0x12, 0x90, 0x53, 0x2d, 0x43, 0x67, 0x1b, 0xc9, 0x87, 0xff, 0x89, + 0x23, 0x4a, 0xfb, 0x98, 0x2b, 0x27, 0x9f, 0x13, 0xab, 0x65, 0x2c, 0x5e, 0x88, 0x82, 0x36, 0x86, + 0x8d, 0xd9, 0x9a, 0xc6, 0xa7, 0x1b, 0xeb, 0x68, 0xcd, 0x1e, 0x67, 0x5a, 0x31, 0x39, 0xd5, 0x05, + 0x58, 0x0a, 0x10, 0x5d, 0xb6, 0x95, 0xa6, 0x13, 0x50, 0x23, 0xaa, 0x1f, 0xe6, 0x73, 0x71, 0xc6, + 0x4b, 0x77, 0xdb, 0xde, 0x4b, 0xef, 0xd7, 0x79, 0xc2, 0x29, 0x8e, 0x8f, 0xa8, 0xd3, 0xcf, 0xe1, + 0xfd, 0xe5, 0x65, 0x3b, 0xd0, 0xfb, 0x2d, 0x97, 0xe4, 0xbf, 0x86, 0xa6, 0x76, 0xe9, 0xd1, 0x97, + 0x99, 0x7b, 0xcc, 0x75, 0xee, 0x02, 0x2b, 0x4d, 0xee, 0xa2, 0x7a, 0x56, 0x27, 0x44, 0x8c, 0x1a, + 0x09, 0xcc, 0x7e, 0x94, 0xbb, 0x53, 0x1b, 0x0e, 0x6a, 0x52, 0xc7, 0x50, 0x85, 0x2e, 0xcf, 0xb1, + 0x6a, 0x39, 0x40, 0xfd, 0x03, 0xbc, 0x79, 0x0e, 0x51, 0x45, 0xfd, 0xb3, 0xf0, 0xca, 0xca, 0xdc, + 0x65, 0x58, 0x7e, 0xdf, 0x4d, 0xed, 0x6c, 0x36, 0x62, 0xc0, 0xf6, 0x16, 0x17, 0xf1, 0xb1, 0xed, + 0x41, 0xe3, 0x4d, 0xe6, 0x40, 0x0f, 0x16, 0xf3, 0x2f, 0x82, 0x7b, 0xae, 0x90, 0x4f, 0xc2, 0xe9, + 0x5e, 0x97, 0x1d, 0xb7, 0xbb, 0x90, 0x89, 0xd0, 0xf1, 0x44, 0x52, 0x63, 0xf7, 0x49, 0x2f, 0x8b, + 0xa7, 0x4d, 0xd0, 0x4f, 0xcd, 0x6f, 0x0f, 0x5c, 0xb7, 0x47, 0x75, 0x7b, 0x12, 0xe5, 0xde, 0x5d, + 0x52, 0x67, 0xe6, 0x89, 0x70, 0xd6, 0x66, 0xbe, 0xa6, 0xe5, 0xe3, 0x8c, 0xcf, 0xbf, 0x32, 0x8d, + 0xcf, 0x94, 0xad, 0x9e, 0x47, 0x2c, 0x3f, 0x47, 0xe4, 0xc4, 0x9b, 0x34, 0x8d, 0x3d, 0x44, 0x20, + 0xc1, 0x75, 0x40, 0xbb, 0xe4, 0x6f, 0xe9, 0x83, 0x3e, 0x6d, 0x2d, 0x77, 0x8c, 0x39, 0x55, 0xe5, + 0xbd, 0x11, 0x48, 0xd8, 0x59, 0xa1, 0xd5, 0xf8, 0xb4, 0xd2, 0x9e, 0x1e, 0xb6, 0xd6, 0xbf, 0xde, + 0x6b, 0x5a, 0x44, 0xff, 0xd8, 0x88, 0x34, 0x58, 0x46, 0x9f, 0x87, 0x68, 0xa6, 0x9c, 0xf6, 0xae, + 0xe1, 0x57, 0xcf, 0x28, 0x48, 0x47, 0x4a, 0x2a, 0xb1, 0x36, 0xf3, 0xb6, 0x53, 0x01, 0x9c, 0x7a, + 0xe7, 0x1d, 0x12, 0xe4, 0x4f, 0xf1, 0x18, 0x1b, 0x62, 0x62, 0xe8, 0x3b, 0xf6, 0x55, 0xdd, 0xca, + 0x82, 0xa1, 0xca, 0x4d, 0x94, 0xd6, 0xe9, 0x19, 0xc7, 0xcf, 0x30, 0xad, 0xb4, 0x65, 0xa9, 0x49, + 0x0d, 0xba, 0xe0, 0xb9, 0x6e, 0xbe, 0xa5, 0x9a, 0xd5, 0x3b, 0x6f, 0xee, 0xa5, 0xc4, 0x2d, 0x1d, + 0xe3, 0x7a, 0x6e, 0xe1, 0x90, 0x65, 0xd8, 0xaf, 0xfa, 0xae, 0xc1, 0x72, 0xca, 0x8d, 0x0a, 0x99, + 0x1f, 0xbd, 0xc2, 0x34, 0x00, 0x7b, 0xa6, 0xf1, 0xd5, 0xe5, 0x00, 0x37, 0x85, 0x39, 0x78, 0x13, + 0x31, 0xcd, 0x5f, 0x4f, 0x65, 0x03, 0xc9, 0x4c, 0xc1, 0xb0, 0x1c, 0x57, 0x7a, 0x7c, 0xa6, 0xe4, + 0x9a, 0xf7, 0x66, 0x89, 0x33, 0x7d, 0x42, 0x79, 0xe7, 0xc6, 0xc6, 0x4d, 0xf3, 0xa6, 0x4a, 0x50, + 0xcd, 0x89, 0xfe, 0x6f, 0xb6, 0x99, 0xa9, 0x93, 0xcf, 0xa2, 0xdb, 0x30, 0xf3, 0x0d, 0x1b, 0xb6, + 0x5e, 0xbc, 0x39, 0xea, 0x16, 0x1a, 0x62, 0x15, 0xe7, 0xa7, 0xd0, 0x26, 0x18, 0x95, 0x29, 0x8c, + 0xbf, 0x8c, 0xb9, 0x6c, 0x9c, 0x4b, 0x64, 0x5b, 0xab, 0x2d, 0xa3, 0xbb, 0x53, 0x7e, 0xe4, 0x6e, + 0xc9, 0x28, 0xb1, 0x0f, 0x5c, 0x11, 0x86, 0x6d, 0x67, 0x11, 0xcd, 0x72, 0x46, 0x06, 0x27, 0x75, + 0x85, 0x63, 0xa2, 0x24, 0x7d, 0x11, 0xcf, 0x0c, 0xe7, 0xfa, 0xd9, 0xc5, 0x1d, 0x37, 0x47, 0xd6, + 0x6b, 0x26, 0x14, 0xa0, 0x63, 0xf9, 0xb7, 0xce, 0x34, 0x30, 0xdb, 0xcc, 0xf2, 0x7d, 0x99, 0xfb, + 0x59, 0xbd, 0x0f, 0xfc, 0x88, 0xdb, 0x1d, 0x77, 0x26, 0x60, 0x5c, 0x5a, 0xa4, 0xb7, 0xa8, 0x01, + 0x3f, 0x70, 0x6e, 0x0a, 0x50, 0x7a, 0x53, 0x69, 0x5a, 0xd7, 0xed, 0x50, 0xae, 0x84, 0x01, 0x38, + 0x13, 0x53, 0xaf, 0xaa, 0x9d, 0x95, 0xd4, 0x2a, 0xd4, 0xa2, 0x68, 0x20, 0x0c, 0x1e, 0x0c, 0x4c, + 0xf6, 0x1a, 0xe3, 0xaa, 0x2d, 0x97, 0x86, 0x17, 0x6c, 0xbc, 0x9c, 0x18, 0xd7, 0x29, 0x79, 0x17, + 0xb9, 0x8e, 0x1c, 0x7c, 0xdc, 0xb4, 0x4e, 0xb4, 0x28, 0xd6, 0xaf, 0x1c, 0x9a, 0x24, 0xd4, 0x66, + 0x5a, 0x39, 0xae, 0x83, 0xf4, 0x26, 0x84, 0x47, 0x8a, 0x3f, 0x85, 0xac, 0x45, 0x64, 0xee, 0xa5, + 0x08, 0xd4, 0xa3, 0x8e, 0xfd, 0x04, 0xc7, 0x0a, 0x7d, 0xbf, 0x5a, 0x85, 0xc3, 0x4e, 0x41, 0x59, + 0x28, 0x71, 0xc1, 0xba, 0xbb, 0x99, 0xda, 0x41, 0xb7, 0x49, 0x27, 0x53, 0x59, 0x96, 0x00, 0x91, + 0x54, 0x1b, 0x58, 0xb5, 0x40, 0x66, 0x84, 0xfa, 0xca, 0x70, 0x57, 0x03, 0xef, 0x77, 0xea, 0x37, + 0x9c, 0xf1, 0xc1, 0x9c, 0xba, 0xcd, 0x84, 0xe4, 0x55, 0x51, 0xe4, 0xe9, 0x2a, 0xe8, 0xf5, 0x0d, + 0x88, 0x1a, 0x9b, 0x2d, 0x3d, 0x97, 0x96, 0x17, 0x25, 0x88, 0x63, 0xda, 0xb1, 0xbc, 0xa3, 0x1f, + 0xb7, 0x9d, 0x4a, 0xf4, 0xbd, 0x1f, 0xce, 0x53, 0x19, 0xff, 0x1e, 0xf3, 0xb6, 0x30, 0x09, 0x11, + 0x41, 0x65, 0x4f, 0x74, 0x61, 0xb8, 0x46, 0xed, 0x42, 0x6d, 0x04, 0xa0, 0x6e, 0x86, 0x73, 0xa5, + 0xeb, 0xe3, 0xb3, 0xdc, 0xb7, 0xc0, 0x48, 0xf4, 0x5b, 0xd9, 0x4f, 0x07, 0x5f, 0x29, 0x39, 0xb9, + 0xd0, 0x7d, 0x25, 0xc9, 0xcd, 0xca, 0x7c, 0xa5, 0x9c, 0x87, 0x82, 0xca, 0x09, 0xba, 0xb5, 0xd3, + 0x72, 0x47, 0x72, 0x48, 0xbd, 0x22, 0x54, 0xae, 0x4c, 0x24, 0xdb, 0xb9, 0x65, 0x13, 0xd3, 0xd2, + 0x4a, 0xde, 0x09, 0x8f, 0xa1, 0x1c, 0xb1, 0xab, 0xa3, 0x5f, 0x4e, 0x98, 0x11, 0xc4, 0x43, 0x09, + 0x54, 0xc9, 0xb0, 0xc6, 0x4d, 0x4d, 0xf4, 0xdc, 0x2a, 0xdf, 0x69, 0x14, 0x87, 0xb8, 0xa8, 0xb5, + 0xdf, 0xe1, 0xd5, 0xba, 0x76, 0x1f, 0x32, 0x46, 0x20, 0x6b, 0x33, 0xab, 0xec, 0xb7, 0x54, 0xf6, + 0xfd, 0xeb, 0xc4, 0x4b, 0xce, 0xd5, 0x0e, 0xa6, 0x6e, 0x58, 0x27, 0x0e, 0x92, 0xeb, 0xdb, 0x16, + 0x21, 0x16, 0x7b, 0xd1, 0x15, 0xd9, 0x3f, 0xe5, 0x6b, 0x15, 0x74, 0xd2, 0xf3, 0xfa, 0x76, 0xf8, + 0x1d, 0xa8, 0x26, 0x0e, 0xa5, 0x5f, 0x87, 0x75, 0x1c, 0x04, 0x14, 0x41, 0xe8, 0x6d, 0x51, 0x9c, + 0x1c, 0x34, 0x69, 0x71, 0xb9, 0x6f, 0x5a, 0x6d, 0x25, 0xf1, 0xad, 0xc7, 0xb0, 0x1d, 0x58, 0x1b, + 0x41, 0xcb, 0xe6, 0xba, 0xb1, 0x0e, 0x9e, 0x8a, 0xd0, 0xa2, 0x0f, 0x32, 0x12, 0xa7, 0x42, 0x83, + 0xfe, 0xa2, 0x64, 0x07, 0xbd, 0xca, 0x80, 0x56, 0x51, 0x3f, 0x96, 0x1f, 0xac, 0x75, 0xc6, 0xcd, + 0x56, 0xd1, 0x64, 0x0b, 0x72, 0x56, 0x0f, 0xf4, 0xaf, 0x11, 0x39, 0xb0, 0xc0, 0x84, 0x70, 0xcc, + 0xa5, 0x09, 0x47, 0xd9, 0xba, 0x44, 0x4e, 0x20, 0x9c, 0x5c, 0xca, 0x03, 0x0b, 0xe9, 0xc2, 0x33, + 0x4c, 0xdb, 0xef, 0x2a, 0xb8, 0x84, 0x2f, 0xc6, 0x79, 0x5e, 0x74, 0x83, 0x32, 0x78, 0xfa, 0x0e, + 0x12, 0x1a, 0x24, 0x70, 0x7b, 0xe6, 0xa7, 0xfd, 0xf5, 0x42, 0xb5, 0x2a, 0x0a, 0x8c, 0x39, 0x1e, + 0x98, 0x40, 0x95, 0x61, 0x88, 0x5c, 0xb9, 0x54, 0x5f, 0x04, 0x15, 0x2f, 0xd6, 0x7a, 0x6c, 0xe1, + 0x23, 0xda, 0x21, 0xf1, 0xce, 0x5b, 0x3e, 0x1a, 0xcb, 0x78, 0x78, 0xcd, 0xc5, 0xe8, 0xbc, 0x4d, + 0x27, 0x57, 0x16, 0x86, 0x93, 0x38, 0x6e, 0xcf, 0x60, 0x00, 0xca, 0xd8, 0x58, 0x6a, 0xc2, 0x2b, + 0x3f, 0xe6, 0xe1, 0x74, 0x01, 0xcb, 0xcd, 0xaa, 0x58, 0xdc, 0x90, 0x49, 0xde, 0x5f, 0x34, 0x4e, + 0x80, 0xab, 0x4b, 0x24, 0x18, 0x3b, 0xac, 0xb7, 0x6d, 0x81, 0x4f, 0xfe, 0xdd, 0xb4, 0xf2, 0x69, + 0x4a, 0x0a, 0x6e, 0x78, 0xe4, 0x06, 0xac, 0x35, 0xe8, 0x96, 0xcf, 0x26, 0xa3, 0x93, 0x40, 0x52, + 0xd7, 0x3a, 0x11, 0x1c, 0x9f, 0xa3, 0xf2, 0x81, 0x2c, 0x97, 0x84, 0x68, 0x96, 0x97, 0xe8, 0x02, + 0xab, 0x74, 0x3b, 0x95, 0xc3, 0x77, 0xd3, 0xd5, 0x2d, 0xdb, 0x90, 0xa5, 0x10, 0xdd, 0x8c, 0xa3, + 0x3f, 0x13, 0xc4, 0x7a, 0xef, 0xc4, 0x45, 0xe5, 0x61, 0x32, 0xbe, 0x4e, 0xd9, 0x34, 0x4d, 0x07, + 0x82, 0x53, 0x01, 0x8d, 0x08, 0x7e, 0xaf, 0x95, 0x30, 0x6d, 0x31, 0x85, 0xba, 0x44, 0x17, 0xea, + 0x12, 0xdc, 0xf0, 0x0f, 0x47, 0xd8, 0xe6, 0xc6, 0xc0, 0x22, 0x90, 0x31, 0xb9, 0x14, 0x2e, 0xda, + 0x03, 0xf1, 0x41, 0x9c, 0xa0, 0x41, 0x4e, 0x9f, 0x3f, 0xfe, 0x65, 0x13, 0xe6, 0x4d, 0x97, 0xd6, + 0x8a, 0x3f, 0x4c, 0x88, 0x2a, 0xac, 0x7a, 0xfb, 0x2e, 0xd9, 0xb1, 0x77, 0x1d, 0x30, 0x7a, 0x69, + 0xc7, 0xf9, 0x77, 0xdc, 0x33, 0x1b, 0xc0, 0x0e, 0x40, 0x08, 0x25, 0x9c, 0x5a, 0xc6, 0x06, 0xfa, + 0xbc, 0x59, 0x68, 0xed, 0x36, 0x7b, 0x10, 0xe1, 0x96, 0xce, 0x2c, 0x2d, 0x62, 0x2d, 0x8c, 0x0f, + 0x97, 0x14, 0xcc, 0x17, 0x2e, 0x5b, 0x1a, 0x58, 0x00, 0x03, 0xd6, 0x3a, 0x3f, 0x11, 0xa5, 0x67, + 0x68, 0x31, 0xd5, 0x9d, 0xc2, 0x7e, 0xe7, 0x96, 0xa0, 0xb6, 0xaa, 0x45, 0xa2, 0xd4, 0x38, 0xcc, + 0xd7, 0x19, 0x37, 0x31, 0x21, 0xb6, 0xf6, 0x7c, 0x13, 0xca, 0x95, 0x09, 0x43, 0x15, 0x0a, 0xe3, + 0x0a, 0xe8, 0x5e, 0x1d, 0xb5, 0x6b, 0x8e, 0xef, 0xb1, 0x97, 0xd5, 0xc7, 0xd2, 0x73, 0x1b, 0x10, + 0xb7, 0x3e, 0x93, 0x98, 0x24, 0x34, 0x7c, 0xd0, 0x73, 0x57, 0xaa, 0x73, 0x57, 0x9a, 0xa4, 0x3c, + 0xa1, 0xd3, 0xf5, 0xe2, 0x74, 0x0b, 0x27, 0x1d, 0x57, 0xa9, 0x23, 0x80, 0xd0, 0x7a, 0xe6, 0x68, + 0xf1, 0x71, 0xae, 0x88, 0x29, 0xf0, 0x7b, 0x4c, 0x35, 0x9b, 0x62, 0xe3, 0x8e, 0xd7, 0x3a, 0xe2, + 0x8c, 0xff, 0xcc, 0x61, 0x01, 0x07, 0x57, 0x0a, 0xb2, 0x29, 0x10, 0xf4, 0x1e, 0x7b, 0xaf, 0xb2, + 0xaf, 0xdb, 0x7c, 0x32, 0xbb, 0x6d, 0x82, 0x38, 0xa4, 0x77, 0x6d, 0x68, 0x50, 0x38, 0x31, 0xcc, + 0x97, 0x38, 0x80, 0xe7, 0x5c, 0x12, 0x7b, 0x21, 0x17, 0x37, 0xee, 0xeb, 0x1c, 0x58, 0x40, 0x32, + 0x61, 0x43, 0x3f, 0xe9, 0x92, 0x59, 0x3b, 0xd7, 0xe6, 0xa9, 0xb0, 0x3f, 0xfe, 0xcf, 0xac, 0xe0, + 0x8c, 0xb7, 0xf1, 0x68, 0x0d, 0x70, 0x74, 0xb7, 0xc8, 0x2f, 0xdc, 0x51, 0x13, 0xa7, 0xcc, 0xdb, + 0x32, 0xa8, 0x5f, 0x03, 0xe9, 0xf1, 0xfe, 0xc9, 0x80, 0x94, 0x95, 0x59, 0x7a, 0x29, 0x23, 0x58, + 0x82, 0x05, 0x4c, 0xed, 0xb4, 0xb0, 0x9e, 0xec, 0xa5, 0x27, 0x53, 0xd5, 0x68, 0xf7, 0xc5, 0x29, + 0x6b, 0x18, 0xc5, 0x72, 0x41, 0x28, 0xd9, 0xa5, 0x8b, 0x57, 0xa1, 0x5c, 0xf0, 0x82, 0xfd, 0x0b, + 0x54, 0xd9, 0x59, 0xa7, 0xe0, 0x41, 0x60, 0xfd, 0x55, 0x1f, 0xd8, 0xcc, 0x37, 0xbc, 0xa0, 0xe0, + 0x52, 0x73, 0x7b, 0x16, 0xca, 0x03, 0x49, 0x53, 0x5c, 0x14, 0x98, 0xb3, 0x60, 0x43, 0x5e, 0xcc, + 0x2d, 0xed, 0x6f, 0xcd, 0x94, 0x14, 0xb3, 0x61, 0x13, 0x3c, 0xc2, 0x2b, 0xca, 0x4e, 0xd9, 0xff, + 0xe7, 0x4d, 0x13, 0x92, 0xf2, 0x6f, 0x72, 0x0c, 0x29, 0xff, 0x78, 0x3b, 0x56, 0xd8, 0x9f, 0xd9, + 0x0f, 0xd9, 0x3a, 0x12, 0xbf, 0x59, 0xad, 0x88, 0xf0, 0x81, 0x63, 0xe1, 0x8e, 0x9d, 0x17, 0xeb, + 0x31, 0xd3, 0xfc, 0xb3, 0x81, 0xbb, 0x4e, 0xe9, 0x98, 0x0d, 0xa2, 0x3f, 0xf4, 0xe5, 0x79, 0x79, + 0xf8, 0xec, 0xec, 0x60, 0x34, 0x3a, 0x70, 0xd0, 0x21, 0xf2, 0x74, 0x31, 0xa0, 0xd0, 0x0e, 0xa1, + 0x87, 0x22, 0x57, 0x9d, 0xfb, 0x71, 0x94, 0xab, 0x38, 0x23, 0x3f, 0xf1, 0xdb, 0xa4, 0x53, 0x80, + 0xd3, 0x5c, 0x08, 0x4e, 0x64, 0x70, 0x6e, 0xb2, 0xb4, 0x6e, 0x23, 0xbf, 0x0a, 0x31, 0xea, 0x13, + 0xac, 0x7f, 0x3e, 0xd1, 0x21, 0x22, 0xac, 0x7c, 0xc7, 0x8e, 0x06, 0x55, 0x91, 0x77, 0x0a, 0xf3, + 0x0e, 0xc0, 0x31, 0xea, 0x69, 0x1f, 0xba, 0x29, 0x15, 0x25, 0x6b, 0x9c, 0xef, 0x46, 0x21, 0x26, + 0x09, 0x90, 0xdc, 0xb1, 0xe6, 0x26, 0x00, 0x1f, 0x79, 0x52, 0x0a, 0x77, 0x46, 0x1d, 0xd0, 0x8f, + 0xb0, 0x47, 0x06, 0xd6, 0xbc, 0x2c, 0x3c, 0xb9, 0x9b, 0x94, 0x6c, 0x3e, 0x40, 0x3a, 0xc7, 0x29, + 0x56, 0x48, 0x25, 0xd3, 0x24, 0xb8, 0x0b, 0xe6, 0x65, 0xc2, 0xfd, 0x37, 0x4c, 0x01, 0xbb, 0xb0, + 0x8e, 0x91, 0xb4, 0xfc, 0x5d, 0x74, 0x86, 0x6d, 0xad, 0x72, 0xd9, 0x85, 0xa7, 0x7f, 0xcb, 0x42, + 0x03, 0xe5, 0xfa, 0xe3, 0xdb, 0x14, 0xd9, 0x19, 0xe7, 0xbf, 0x98, 0x8b, 0x24, 0xb2, 0x82, 0x5d, + 0x1d, 0x5b, 0xeb, 0xc8, 0xde, 0x3c, 0xa0, 0xa9, 0xc0, 0x4e, 0x32, 0xcd, 0xf5, 0xcb, 0x9e, 0xc7, + 0x72, 0x2b, 0x6d, 0x35, 0x70, 0x6e, 0x86, 0xbd, 0xcb, 0xb3, 0xc5, 0x59, 0x6c, 0xc1, 0x95, 0xb8, + 0x89, 0x96, 0x2d, 0xcb, 0x33, 0x89, 0x27, 0x3e, 0xde, 0x59, 0x07, 0xc8, 0xbe, 0xc0, 0x86, 0xee, + 0xd9, 0x30, 0x73, 0xdf, 0xac, 0x21, 0x60, 0x24, 0x3d, 0x61, 0xc0, 0x80, 0x21, 0xde, 0xd2, 0x2a, + 0xe9, 0xc7, 0xbb, 0x73, 0xc7, 0xc0, 0x28, 0xed, 0x4e, 0x78, 0xb9, 0x10, 0x20, 0xe1, 0xed, 0xb8, + 0xaa, 0x92, 0x70, 0x18, 0x49, 0xe7, 0xdb, 0x92, 0x94, 0xc2, 0x34, 0xa6, 0x6d, 0x9d, 0x2a, 0x97, + 0xa8, 0xe1, 0xd4, 0xb4, 0xfe, 0x75, 0x95, 0xf2, 0xb1, 0x63, 0x11, 0x8c, 0xe6, 0x2a, 0xe8, 0xd3, + 0x96, 0x64, 0x98, 0x1f, 0x79, 0x26, 0xd3, 0xbb, 0x23, 0x11, 0xe7, 0x7b, 0xd9, 0x02, 0x18, 0xbd, + 0xe4, 0xbd, 0x1d, 0x59, 0x2c, 0xfd, 0x30, 0x60, 0x85, 0x90, 0x97, 0x7c, 0x27, 0xad, 0x25, 0xda, + 0xd4, 0xf1, 0x10, 0x3a, 0xc2, 0xec, 0xa9, 0x99, 0x38, 0xd1, 0xe9, 0xcf, 0xcc, 0x9e, 0xbd, 0xf4, + 0x88, 0xad, 0xcf, 0x95, 0x19, 0x36, 0x54, 0x28, 0x17, 0x44, 0x35, 0x53, 0x20, 0x86, 0xef, 0x20, + 0x59, 0x88, 0xcf, 0xa5, 0xf0, 0x9b, 0x59, 0xac, 0xdb, 0xee, 0xd4, 0xa1, 0xd4, 0x29, 0x59, 0x6f, + 0x9b, 0x9e, 0x62, 0xbb, 0x5f, 0xde, 0x79, 0x0f, 0x43, 0xfa, 0xda, 0x18, 0xf5, 0xbb, 0x3b, 0xcc, + 0x1a, 0x03, 0x6d, 0x43, 0x33, 0x81, 0x0b, 0x91, 0x63, 0x77, 0x5e, 0xbe, 0x16, 0x03, 0x92, 0x3a, + 0x82, 0xba, 0xf2, 0xf1, 0x97, 0x88, 0xdf, 0x20, 0x45, 0x93, 0x4e, 0xe7, 0x4b, 0x56, 0x61, 0xab, + 0x3a, 0x52, 0x99, 0xa5, 0x0b, 0x20, 0x88, 0x0a, 0x18, 0xf3, 0x18, 0x9e, 0x7e, 0x06, 0xad, 0x3e, + 0xac, 0xc6, 0x75, 0x87, 0x32, 0x3d, 0x26, 0x2a, 0xb3, 0x71, 0x24, 0x61, 0xc2, 0x9a, 0xe9, 0xd7, + 0x77, 0x3b, 0xb4, 0x9b, 0x14, 0x8a, 0x61, 0x59, 0x27, 0x3d, 0x31, 0xe1, 0x59, 0xd3, 0x76, 0x0d, + 0x6e, 0xbb, 0xb5, 0xa2, 0xd9, 0xa6, 0xbe, 0x29, 0x2d, 0x79, 0xb5, 0x08, 0x4b, 0x67, 0xfb, 0x81, + 0x03, 0xe4, 0x5c, 0x0c, 0x1f, 0xc5, 0x7e, 0x7d, 0x28, 0xdf, 0x08, 0x61, 0x4a, 0x85, 0xcd, 0x7f, + 0x50, 0x4a, 0x1f, 0x22, 0xeb, 0xc8, 0x44, 0xe9, 0x85, 0x78, 0xf5, 0x55, 0x98, 0x86, 0xea, 0x19, + 0xb8, 0x66, 0xeb, 0x1f, 0x34, 0xd7, 0x5a, 0x7d, 0x97, 0x95, 0x0e, 0x4c, 0x64, 0x1e, 0xd2, 0x26, + 0x39, 0xf1, 0xb1, 0x46, 0xfb, 0x82, 0x96, 0x79, 0x11, 0x18, 0xc5, 0x88, 0x18, 0x8f, 0xe6, 0x5f, + 0x6e, 0x34, 0x9c, 0xf3, 0xe1, 0x3a, 0x18, 0xf4, 0xe1, 0x11, 0xac, 0xdc, 0x3b, 0x2a, 0xa7, 0x51, + 0xe3, 0x37, 0x18, 0x18, 0x35, 0x68, 0x18, 0x12, 0x38, 0xd0, 0x74, 0x0a, 0x70, 0xcb, 0x1d, 0xb7, + 0x0c, 0x65, 0x06, 0x07, 0x13, 0x0f, 0x8d, 0xb8, 0xdd, 0x45, 0xfb, 0xbd, 0xa9, 0x1e, 0x08, 0x55, + 0x0f, 0x2a, 0x33, 0x35, 0x56, 0x80, 0xb6, 0xa9, 0xf1, 0x9e, 0x24, 0xe6, 0xc0, 0x11, 0xf2, 0xea, + 0x1d, 0xa8, 0x24, 0xb2, 0xb8, 0xfd, 0xf7, 0x3a, 0x0b, 0x44, 0xc5, 0xf9, 0xf4, 0x21, 0x0a, 0xcf, + 0x6d, 0xe7, 0x07, 0x63, 0xc2, 0xce, 0x31, 0x6c, 0xc0, 0xa9, 0x49, 0x5d, 0x66, 0xcd, 0x73, 0x16, + 0x7b, 0x19, 0x37, 0x3d, 0xba, 0xc3, 0xa2, 0x3e, 0x9b, 0x42, 0x23, 0x26, 0x1a, 0x99, 0xc3, 0x10, + 0xe9, 0x05, 0x29, 0x57, 0x10, 0x88, 0x69, 0x03, 0xa3, 0xc1, 0xa1, 0x1c, 0xda, 0x62, 0xf0, 0xf5, + 0x15, 0x57, 0x3c, 0xec, 0x30, 0x57, 0x45, 0x63, 0x23, 0x65, 0xa3, 0x94, 0x5d, 0x7f, 0xb3, 0x35, + 0xfe, 0x69, 0x25, 0x94, 0x74, 0x5e, 0x24, 0x28, 0x7d, 0xe4, 0xea, 0xd6, 0x47, 0x7b, 0xd0, 0x9a, + 0x75, 0xb1, 0x03, 0x06, 0x15, 0xc2, 0x1b, 0xc2, 0x55, 0xdd, 0x31, 0x68, 0x26, 0xea, 0xe7, 0x4f, + 0xf8, 0x54, 0x9e, 0xe4, 0xbb, 0x2f, 0xa3, 0xcc, 0x3f, 0x62, 0x62, 0x98, 0xbf, 0xbf, 0x12, 0x5b, + 0xc6, 0xaf, 0x1f, 0x7b, 0x32, 0xd8, 0x67, 0x31, 0xd1, 0x25, 0xa9, 0x90, 0x38, 0x9a, 0xa5, 0x14, + 0x6d, 0x0f, 0xcc, 0xa6, 0x60, 0x43, 0x06, 0x8e, 0x72, 0x81, 0xe4, 0x44, 0x82, 0x60, 0x7b, 0x43, + 0xc8, 0x93, 0x19, 0xf1, 0xd0, 0x50, 0xc4, 0x1f, 0x3a, 0x06, 0xc8, 0x73, 0x8f, 0xe3, 0xd9, 0x03, + 0x54, 0x50, 0x7b, 0x91, 0x52, 0xe4, 0xf2, 0x75, 0xd9, 0x75, 0x07, 0xaa, 0xdd, 0x09, 0x66, 0x3f, + 0x69, 0x69, 0xc5, 0xfd, 0xac, 0xe4, 0x4e, 0xfc, 0x50, 0xb0, 0x1d, 0x1d, 0xd6, 0x5d, 0xb5, 0x18, + 0x2d, 0x23, 0x45, 0x20, 0xdd, 0xfe, 0x7f, 0x10, 0x7f, 0x3c, 0x71, 0x0c, 0xdb, 0xdd, 0xda, 0xfd, + 0x36, 0x02, 0x8c, 0x82, 0x4a, 0x21, 0x8d, 0x75, 0xe2, 0x7f, 0x92, 0x0c, 0x8f, 0xa0, 0xa4, 0xc1, + 0x43, 0xaf, 0x89, 0x57, 0x7f, 0x8e, 0x6f, 0x1b, 0x9e, 0x22, 0xdf, 0x6f, 0xcb, 0xec, 0x28, 0x19, + 0xf0, 0x7c, 0x17, 0x1f, 0x0d, 0x22, 0x89, 0x64, 0x73, 0xf8, 0xa8, 0xf4, 0x21, 0x0f, 0xd7, 0x81, + 0x1b, 0x9a, 0xef, 0x9c, 0x71, 0x33, 0x43, 0x8b, 0xfd, 0x9e, 0x1e, 0x68, 0x1b, 0x5d, 0xe5, 0x02, + 0x9b, 0x78, 0xb2, 0x3e, 0x45, 0x77, 0xd8, 0xa8, 0xf3, 0xf4, 0x86, 0xef, 0x0d, 0xec, 0x64, 0x29, + 0x22, 0x86, 0x8c, 0x3d, 0x82, 0xad, 0x68, 0x09, 0xc9, 0x59, 0xbf, 0xfd, 0xd6, 0x94, 0xe7, 0xcc, + 0x69, 0x11, 0xc8, 0x5a, 0xac, 0xf1, 0x5e, 0xb0, 0xa8, 0xb5, 0xf8, 0x77, 0xfa, 0x36, 0x4b, 0x57, + 0xc4, 0xb8, 0x46, 0xe4, 0x08, 0xdd, 0xf8, 0x1e, 0x47, 0x4a, 0x6e, 0xa0, 0x0b, 0x32, 0x36, 0xbc, + 0x50, 0x94, 0xa6, 0xdd, 0x45, 0x8f, 0xf3, 0x84, 0x2f, 0xb8, 0x45, 0x4c, 0x34, 0x31, 0xbe, 0xbb, + 0x9f, 0x5b, 0x8d, 0xab, 0xbd, 0xf0, 0xec, 0xdb, 0x23, 0xeb, 0x34, 0x8c, 0xf4, 0x37, 0x62, 0xea, + 0xb5, 0xc7, 0x41, 0xa3, 0xb5, 0xe3, 0x3f, 0x4e, 0x75, 0xf3, 0x09, 0x07, 0xb0, 0x1b, 0xdf, 0x6f, + 0xbf, 0x31, 0xa7, 0xfb, 0xa3, 0x24, 0x53, 0xea, 0xe1, 0x99, 0xe1, 0xe0, 0x10, 0x2b, 0xba, 0x3b, + 0x2a, 0x42, 0x90, 0xfb, 0x2b, 0x78, 0x7f, 0x22, 0x23, 0x62, 0x6c, 0xad, 0xf5, 0xeb, 0xb3, 0xba, + 0x42, 0x68, 0x8c, 0xae, 0x01, 0x9a, 0xe7, 0xce, 0x29, 0xe4, 0x61, 0x84, 0x23, 0x1d, 0xc5, 0x93, + 0xc9, 0x89, 0xd2, 0xd3, 0x5e, 0x9a, 0x81, 0x77, 0x76, 0x6c, 0xc9, 0x88, 0x64, 0x4c, 0xa4, 0x8e, + 0x13, 0x05, 0xe9, 0x3f, 0x94, 0xdb, 0x5e, 0x7e, 0xec, 0x35, 0x5f, 0x77, 0x58, 0xee, 0x1a, 0x99, + 0xc7, 0xfc, 0xad, 0x74, 0x6f, 0x87, 0x28, 0xc6, 0xc6, 0xe4, 0x4c, 0x6f, 0x59, 0x30, 0x29, 0xc8, + 0x9d, 0x86, 0x8f, 0x42, 0xaf, 0xe5, 0xa4, 0x66, 0x21, 0xaa, 0xad, 0xc9, 0x81, 0x88, 0x5b, 0x6d, + 0x8f, 0xfa, 0xb9, 0x5b, 0x8f, 0x3f, 0x0d, 0xa7, 0xca, 0x91, 0x6b, 0x39, 0x0f, 0xd8, 0xab, 0xa0, + 0xff, 0x2b, 0xc1, 0x26, 0x03, 0xd5, 0xe2, 0x66, 0xcb, 0xa6, 0x3e, 0xc0, 0x34, 0xe2, 0x91, 0xf5, + 0xad, 0xb5, 0x27, 0xc4, 0x04, 0xb8, 0xb9, 0x35, 0xf4, 0x59, 0x6b, 0x89, 0x6d, 0xd8, 0xe7, 0x6c, + 0xa5, 0x3e, 0xf6, 0x9e, 0xc2, 0x59, 0xc5, 0x8f, 0xe3, 0xa7, 0x35, 0x5f, 0x72, 0xe7, 0x8a, 0x18, + 0x10, 0xd6, 0x7f, 0x96, 0x66, 0xa8, 0xf1, 0x75, 0xae, 0x81, 0xd2, 0xb7, 0x60, 0xc0, 0xb7, 0x7b, + 0x4c, 0x85, 0x2a, 0x7a, 0xa3, 0xd9, 0xe2, 0xfc, 0xb3, 0xc5, 0x5f, 0xac, 0x49, 0xef, 0xd3, 0xfe, + 0x37, 0xe7, 0xbd, 0xc7, 0x81, 0x46, 0x37, 0x24, 0xbb, 0x4a, 0x2f, 0x71, 0x62, 0x05, 0xb5, 0x5b, + 0xe2, 0xbd, 0x51, 0x45, 0x94, 0xac, 0x6e, 0xdf, 0xa6, 0x86, 0x87, 0x2f, 0x42, 0xb0, 0xc4, 0x1a, + 0xcf, 0x18, 0xc5, 0xc2, 0x83, 0xe0, 0x0e, 0x47, 0xe8, 0x2e, 0x22, 0x07, 0x68, 0x4c, 0x31, 0x8d, + 0x50, 0x88, 0x1a, 0x73, 0x35, 0x63, 0xd4, 0x82, 0xf2, 0x24, 0xbb, 0x5f, 0xcb, 0xb9, 0xd9, 0x71, + 0x5c, 0x1b, 0xdb, 0x85, 0x84, 0x98, 0x04, 0xbf, 0x08, 0x2c, 0x44, 0x3a, 0xba, 0x7b, 0xcd, 0x33, + 0x2a, 0x47, 0xe3, 0x92, 0x1b, 0x44, 0x26, 0xc6, 0x7a, 0xb8, 0x64, 0xf2, 0x2a, 0xdc, 0xc1, 0xe4, + 0x7f, 0x45, 0x4b, 0x1c, 0x9f, 0x7f, 0x2b, 0x47, 0x9e, 0xdc, 0xc8, 0xd3, 0x3c, 0x71, 0xf2, 0xaa, + 0x26, 0xca, 0x6d, 0x02, 0xef, 0x13, 0x32, 0xe9, 0xa6, 0x17, 0x73, 0x0a, 0x10, 0x0c, 0x2e, 0x8b, + 0x8d, 0x17, 0x34, 0xa8, 0xf4, 0x9f, 0x0d, 0x62, 0xca, 0x42, 0xf1, 0xa2, 0xc2, 0xa0, 0x93, 0xa7, + 0x28, 0x6d, 0xae, 0x3f, 0x30, 0xd9, 0x25, 0x78, 0x26, 0xe2, 0xaa, 0xf5, 0xd3, 0xd4, 0x0f, 0x12, + 0x9f, 0x03, 0xa1, 0x6d, 0x47, 0xaa, 0x57, 0x7b, 0xb0, 0x6a, 0x30, 0xc4, 0x15, 0x90, 0x98, 0x1f, + 0xb2, 0x54, 0xe1, 0x5e, 0x4c, 0x31, 0xf2, 0x9f, 0x74, 0x84, 0xfc, 0xe5, 0x5d, 0x3c, 0xef, 0x10, + 0xfb, 0xc8, 0x6c, 0xf7, 0x9c, 0x63, 0xf9, 0x05, 0xc3, 0x4e, 0x28, 0x17, 0x82, 0xaa, 0x41, 0x16, + 0xe9, 0xee, 0x81, 0x4c, 0x35, 0x2a, 0xea, 0xfb, 0xd8, 0x30, 0x4d, 0x24, 0x72, 0x7d, 0xf5, 0x33, + 0x02, 0xcc, 0x56, 0x6e, 0x24, 0xda, 0x67, 0x03, 0x7f, 0xa7, 0x87, 0xf5, 0x9c, 0x13, 0x52, 0xb8, + 0x11, 0xd5, 0x03, 0x7a, 0x65, 0x6a, 0xc5, 0x21, 0x5d, 0x99, 0x70, 0xc7, 0x35, 0x98, 0x13, 0x29, + 0x3e, 0x99, 0x83, 0x83, 0x53, 0xde, 0xb8, 0x90, 0x07, 0xd8, 0x5d, 0x2f, 0x30, 0xae, 0xc6, 0x3c, + 0xbd, 0xa8, 0x4d, 0x68, 0xb8, 0xfe, 0x46, 0x02, 0x61, 0xde, 0x6a, 0x71, 0x75, 0x2e, 0x77, 0xc2, + 0xc5, 0xdd, 0xbd, 0x98, 0x23, 0x59, 0x63, 0x11, 0x7e, 0xf1, 0x33, 0x22, 0x66, 0x42, 0x2b, 0xae, + 0xc0, 0xaf, 0x47, 0xb2, 0xdc, 0x6b, 0xab, 0x5f, 0x59, 0x70, 0xad, 0x36, 0x33, 0x6c, 0xa0, 0xe4, + 0x87, 0x5c, 0x84, 0x4e, 0xb1, 0xab, 0xff, 0x31, 0x53, 0x2c, 0x2f, 0xaa, 0x7e, 0xfe, 0x1e, 0xb5, + 0x95, 0x68, 0x12, 0xe3, 0xec, 0x52, 0x5c, 0xc9, 0xb2, 0xa8, 0x4b, 0x7f, 0x74, 0xa1, 0x74, 0xf6, + 0xa7, 0x0e, 0x66, 0xe6, 0x39, 0xe9, 0x82, 0xcb, 0xa0, 0x4a, 0x83, 0xfc, 0xf1, 0x1e, 0x7d, 0x16, + 0xe7, 0x9e, 0xf6, 0x0a, 0xc8, 0x02, 0x10, 0x18, 0xfe, 0xf1, 0x6e, 0xee, 0x22, 0xcf, 0x79, 0xba, + 0xc1, 0x55, 0x43, 0x83, 0x85, 0xb1, 0x66, 0x82, 0xed, 0x57, 0x56, 0x6c, 0x7c, 0xab, 0x8a, 0x1d, + 0x56, 0x5a, 0x48, 0x89, 0x72, 0x4f, 0x9f, 0x79, 0x96, 0xe9, 0x71, 0xe4, 0xa2, 0xa9, 0x34, 0x8b, + 0xfc, 0x4c, 0x39, 0x40, 0xf6, 0xed, 0x8f, 0x0d, 0x3c, 0x4b, 0xeb, 0x2c, 0x34, 0x12, 0x2a, 0xac, + 0x72, 0x1b, 0x0d, 0x89, 0x90, 0x7d, 0xc7, 0xc5, 0x26, 0x67, 0x82, 0x7e, 0xcd, 0x15, 0x77, 0x68, + 0x57, 0x07, 0x0f, 0x62, 0xb9, 0x1d, 0xc4, 0x5b, 0x70, 0xde, 0xe5, 0x2f, 0xff, 0x53, 0x13, 0x87, + 0xfd, 0x48, 0x05, 0x5f, 0x38, 0x2f, 0xc5, 0x37, 0xf7, 0x8d, 0xfe, 0x2d, 0x6c, 0x24, 0x5d, 0x06, + 0xb7, 0xa1, 0x78, 0x97, 0xbd, 0xe7, 0xba, 0xf5, 0xef, 0xf5, 0x41, 0x35, 0x9e, 0x36, 0xa0, 0xe1, + 0x9b, 0x3c, 0x8d, 0xdc, 0xe6, 0xba, 0x14, 0x57, 0x33, 0x96, 0x91, 0x11, 0xe3, 0xdd, 0x62, 0x07, + 0x07, 0x25, 0x42, 0x53, 0x66, 0x6d, 0xb5, 0xd9, 0x0d, 0x9a, 0x2a, 0xdc, 0x2c, 0x61, 0xed, 0x5c, + 0xc9, 0x23, 0x5e, 0xcd, 0xd4, 0x21, 0xdb, 0x51, 0x45, 0x14, 0x16, 0x5b, 0x1d, 0xe0, 0x3d, 0xec, + 0x12, 0x32, 0x76, 0x7b, 0x65, 0xa0, 0xd7, 0x98, 0xab, 0xd6, 0x2b, 0xdc, 0x3a, 0x93, 0x75, 0xe8, + 0x13, 0x6d, 0x68, 0x3a, 0xf3, 0x3c, 0xeb, 0x04, 0x56, 0xfe, 0xf4, 0xc3, 0xa4, 0xa2, 0xdd, 0xe5, + 0x38, 0xf6, 0x30, 0x8e, 0xae, 0x69, 0x88, 0xe0, 0x34, 0xa4, 0xc7, 0x5c, 0x33, 0xac, 0x9d, 0x60, + 0xb7, 0x86, 0xaa, 0xfc, 0xfb, 0x53, 0x80, 0x2a, 0x3c, 0xb6, 0x76, 0x2b, 0x16, 0xbe, 0x37, 0x7b, + 0xe1, 0x15, 0x3d, 0xc6, 0xb5, 0x59, 0x1f, 0x1d, 0x87, 0x21, 0x05, 0x7f, 0xf1, 0x5e, 0x5e, 0x27, + 0x0c, 0xf3, 0x0f, 0xb2, 0x1d, 0x0a, 0xa3, 0x1b, 0x07, 0x29, 0x66, 0xdb, 0xaa, 0xf2, 0x73, 0x84, + 0xb4, 0x1b, 0x26, 0x3e, 0x89, 0x17, 0xf3, 0x10, 0xba, 0xc4, 0xaf, 0x41, 0xc8, 0x2a, 0x78, 0x45, + 0x6c, 0x6c, 0x2c, 0x6f, 0x96, 0xd2, 0x1b, 0x52, 0x4c, 0x76, 0x37, 0xff, 0xf2, 0x0d, 0x5f, 0x73, + 0x3a, 0x45, 0x63, 0x40, 0xd9, 0x31, 0x91, 0x39, 0xe5, 0x7f, 0xf6, 0x13, 0x77, 0xd1, 0x42, 0xcb, + 0xfa, 0xb9, 0x36, 0x91, 0x47, 0x36, 0x05, 0x7b, 0x91, 0xea, 0x96, 0x45, 0xe2, 0x8d, 0x06, 0xc1, + 0x9d, 0xe6, 0xf3, 0xd6, 0xeb, 0xa7, 0x47, 0x96, 0x37, 0xf8, 0xec, 0x0b, 0x94, 0x9e, 0x7a, 0x5c, + 0x90, 0xb6, 0x48, 0xc5, 0xb7, 0x94, 0x78, 0x76, 0xb9, 0x2b, 0xbd, 0x78, 0x99, 0x7a, 0xf2, 0x39, + 0x25, 0xf8, 0xa8, 0x41, 0x05, 0x5b, 0xc2, 0xd9, 0x07, 0x82, 0x45, 0x65, 0xa8, 0xb6, 0x8a, 0x91, + 0x85, 0x9f, 0x8a, 0xbb, 0x98, 0xd3, 0xd0, 0x29, 0x1d, 0xe7, 0x9e, 0xe3, 0xe8, 0x1e, 0xc2, 0x4d, + 0x50, 0xd5, 0x47, 0x93, 0x74, 0xe5, 0x5e, 0x05, 0xa6, 0x6b, 0x6b, 0xc1, 0x2a, 0x89, 0x78, 0x88, + 0xdb, 0xa0, 0xdb, 0x90, 0xba, 0x58, 0xba, 0xeb, 0x82, 0x1e, 0xaa, 0x94, 0xb1, 0x5c, 0xe5, 0xb5, + 0x52, 0x6d, 0x2d, 0x82, 0x3c, 0x1c, 0x26, 0x65, 0xc4, 0x64, 0xc1, 0x6c, 0xd7, 0xa7, 0x7f, 0x32, + 0x3e, 0x59, 0x09, 0x0a, 0x59, 0xc2, 0x23, 0xd4, 0xef, 0x91, 0x70, 0x2d, 0x4d, 0x56, 0x76, 0xfd, + 0x15, 0x77, 0x61, 0xb6, 0x56, 0xee, 0x43, 0x11, 0xfd, 0xe8, 0x3c, 0x5a, 0x38, 0x95, 0xa8, 0xec, + 0xce, 0xb5, 0x25, 0xff, 0xe0, 0x92, 0x7b, 0x98, 0x1d, 0x0f, 0xbc, 0x7e, 0x46, 0xc5, 0xd1, 0x42, + 0xfb, 0x6c, 0x38, 0xc1, 0x69, 0xab, 0xc6, 0xb7, 0x6a, 0x38, 0x4a, 0xaa, 0xd0, 0x8c, 0xc3, 0xe9, + 0x32, 0xd3, 0xb2, 0x66, 0x25, 0x5c, 0xbc, 0x9b, 0x9f, 0x87, 0x54, 0x9a, 0x71, 0x54, 0x40, 0x21, + 0x2d, 0xc2, 0x88, 0x56, 0x4e, 0x72, 0x38, 0xcc, 0x13, 0x3e, 0x57, 0xf4, 0x38, 0x16, 0x32, 0xd9, + 0xc4, 0xbc, 0xf6, 0x22, 0x5a, 0x91, 0xc3, 0xfb, 0x35, 0xee, 0xd3, 0x88, 0xe2, 0x0c, 0x31, 0x58, + 0x61, 0xcd, 0xcc, 0x17, 0x59, 0x61, 0x47, 0xf2, 0x7d, 0x49, 0x8a, 0xbf, 0xe6, 0x2e, 0x88, 0x61, + 0x40, 0x32, 0xe2, 0xc4, 0xb9, 0x71, 0x66, 0x00, 0x98, 0x3d, 0x5a, 0xf5, 0x32, 0xa7, 0xf3, 0x76, + 0x51, 0x7b, 0x75, 0x9c, 0x2d, 0x30, 0xf6, 0x2b, 0xb0, 0xc3, 0x49, 0x8a, 0x99, 0xf1, 0x6a, 0x37, + 0x06, 0x54, 0xe7, 0x1b, 0xb5, 0x3c, 0xf1, 0x1f, 0x8a, 0x19, 0xd8, 0x39, 0x4b, 0xdf, 0xf6, 0x33, + 0x8d, 0x3b, 0x1d, 0xc1, 0x61, 0xcc, 0x11, 0x29, 0x83, 0x57, 0x75, 0x7f, 0x26, 0x35, 0x0e, 0x93, + 0x20, 0x38, 0xca, 0xeb, 0xbd, 0x05, 0x7d, 0xee, 0x4a, 0x51, 0xbf, 0x96, 0xdd, 0x19, 0x04, 0x56, + 0x43, 0xd8, 0xc3, 0x32, 0x2f, 0xae, 0x25, 0x28, 0xd7, 0x9f, 0xb5, 0x78, 0x87, 0x77, 0xe5, 0xe4, + 0xfc, 0x71, 0x5a, 0x25, 0x2e, 0xe6, 0x59, 0x1a, 0xa5, 0x10, 0x90, 0x57, 0x72, 0x4c, 0x13, 0xdc, + 0x76, 0x2f, 0xe2, 0x60, 0xdd, 0x2e, 0x7f, 0x8f, 0x29, 0xf5, 0x72, 0xd3, 0x8a, 0x12, 0xa0, 0x28, + 0xf3, 0xe9, 0xab, 0xef, 0x58, 0x41, 0xe7, 0xcd, 0x8b, 0x81, 0x26, 0xb5, 0x89, 0xde, 0xe5, 0x4b, + 0xa3, 0x19, 0x73, 0x88, 0x8e, 0x1a, 0x27, 0x37, 0xa0, 0xce, 0x20, 0xea, 0xd2, 0x55, 0x53, 0x94, + 0xd4, 0x24, 0x7d, 0x47, 0x3a, 0x4f, 0x21, 0x81, 0xba, 0xb7, 0x67, 0x5a, 0xbf, 0x7c, 0xb9, 0x10, + 0x4c, 0x8f, 0x97, 0x55, 0xec, 0xea, 0xa1, 0x9f, 0xec, 0xc2, 0xe5, 0x8b, 0xb8, 0xe5, 0xe5, 0x1e, + 0xeb, 0xa1, 0xb0, 0x5c, 0x3e, 0x5b, 0x3b, 0xd9, 0x0e, 0xa3, 0x26, 0x9b, 0x76, 0x08, 0xc8, 0xcb, + 0xbf, 0xd9, 0x16, 0xa5, 0x54, 0x01, 0xf5, 0x13, 0x25, 0x81, 0x29, 0x04, 0x22, 0x6b, 0xbf, 0x5d, + 0xd6, 0x1d, 0x5c, 0x3f, 0x6a, 0xe5, 0x04, 0x4d, 0xe3, 0xd5, 0x47, 0xc5, 0x7a, 0x2f, 0xc2, 0x12, + 0xdb, 0x92, 0xd9, 0x61, 0xfb, 0x6d, 0x18, 0xa6, 0x32, 0xdf, 0x9a, 0x59, 0x40, 0x6c, 0x73, 0x63, + 0xaf, 0x7b, 0xe3, 0x17, 0x0d, 0xcc, 0x0f, 0x4d, 0xea, 0x1a, 0x98, 0xe3, 0xff, 0xd9, 0x8f, 0x19, + 0xf2, 0x18, 0xfe, 0x92, 0x6c, 0x51, 0x7f, 0x9f, 0xae, 0x04, 0x72, 0x8f, 0xd5, 0x1a, 0xc0, 0x8f, + 0x33, 0xff, 0x0d, 0xeb, 0x7c, 0x1e, 0xb0, 0x99, 0x77, 0x00, 0x2d, 0x54, 0xf4, 0xe4, 0xd0, 0xe6, + 0x74, 0xfe, 0xbd, 0xa0, 0x64, 0xe9, 0x66, 0xb1, 0x0e, 0x1a, 0xe7, 0x69, 0x44, 0x6b, 0x50, 0x93, + 0xf6, 0xd2, 0xd1, 0x0a, 0x47, 0x03, 0x24, 0x5b, 0x99, 0x92, 0xcc, 0x10, 0x18, 0xba, 0x71, 0x6f, + 0x2d, 0x3f, 0x0d, 0x4d, 0xf6, 0x05, 0xdf, 0xec, 0x6f, 0x17, 0x34, 0xb8, 0x82, 0xcc, 0xcb, 0x27, + 0x12, 0x69, 0xf9, 0xfa, 0xe7, 0x42, 0x3b, 0xf8, 0xd7, 0x11, 0xca, 0xf4, 0x5f, 0xdc, 0x39, 0x49, + 0x05, 0x31, 0x31, 0xd0, 0x01, 0xd4, 0x1e, 0xbb, 0x07, 0x25, 0x0c, 0x00, 0xd6, 0xe2, 0xe6, 0x48, + 0x47, 0x6c, 0x6b, 0xc4, 0xf8, 0xa3, 0x7e, 0xe7, 0x3d, 0xda, 0xb3, 0x30, 0x26, 0xe1, 0xef, 0x18, + 0x8a, 0xc1, 0x13, 0xc5, 0x3f, 0xfd, 0x83, 0x77, 0x55, 0x7e, 0x83, 0x7f, 0xb7, 0x64, 0x57, 0x33, + 0x48, 0x1f, 0x17, 0xe8, 0x48, 0x0d, 0xc3, 0xcc, 0xd0, 0x18, 0xa5, 0x75, 0x4f, 0x97, 0xf6, 0x8c, + 0xa0, 0x95, 0x6f, 0xbf, 0x1c, 0xbd, 0x27, 0x66, 0xa3, 0x16, 0x01, 0xed, 0x73, 0xc3, 0x7b, 0x0c, + 0x45, 0x41, 0x2b, 0x8e, 0xd9, 0xee, 0xc2, 0xfb, 0x5e, 0x69, 0x03, 0xf8, 0x1e, 0x24, 0x97, 0xba, + 0x03, 0x4b, 0x2d, 0xf1, 0x76, 0x9f, 0x9a, 0x48, 0x6e, 0xae, 0x91, 0x22, 0x2d, 0x92, 0x62, 0xec, + 0xf4, 0x6a, 0xc7, 0x8e, 0x6e, 0xeb, 0xdb, 0x97, 0x82, 0xd6, 0x3b, 0x31, 0x51, 0x29, 0xd7, 0x65, + 0x79, 0xb3, 0x2a, 0x09, 0x4c, 0x53, 0xd5, 0x98, 0x23, 0x68, 0xf2, 0xaa, 0x0f, 0x76, 0xaa, 0x65, + 0x3f, 0x2e, 0x92, 0x36, 0x8f, 0xfd, 0xd9, 0x48, 0x7e, 0x67, 0x87, 0xc3, 0x32, 0xba, 0x08, 0x92, + 0xcd, 0x67, 0xd2, 0x73, 0xda, 0xf9, 0xa6, 0x7e, 0x63, 0xec, 0xe0, 0x9a, 0xcc, 0xca, 0x91, 0x51, + 0x5e, 0xbf, 0xe7, 0x63, 0x7b, 0xa3, 0x13, 0x77, 0x43, 0x03, 0x21, 0x95, 0x93, 0xd8, 0xda, 0x47, + 0xdf, 0x5c, 0x22, 0xe2, 0x7a, 0xfb, 0x46, 0x97, 0x6a, 0x00, 0x18, 0x37, 0xd3, 0x3d, 0x2e, 0xec, + 0xf8, 0x38, 0xa2, 0x5f, 0x94, 0x8f, 0xd1, 0x9d, 0x96, 0xf3, 0xf5, 0xe7, 0xee, 0x9a, 0xd1, 0xfe, + 0xf3, 0x5e, 0x3f, 0x73, 0x3f, 0xe3, 0x0d, 0x52, 0x33, 0x44, 0x22, 0x92, 0xcc, 0xd8, 0xda, 0xa7, + 0xb2, 0xb4, 0xa3, 0x71, 0xef, 0x7d, 0x12, 0x73, 0x2f, 0xf2, 0x11, 0x5c, 0xe6, 0x0b, 0xba, 0x97, + 0x95, 0x33, 0x2d, 0x15, 0x0d, 0x1d, 0x37, 0x66, 0x97, 0xdf, 0x5f, 0x16, 0x42, 0x99, 0x4a, 0x03, + 0xd2, 0x6c, 0x5d, 0xf9, 0x1d, 0x98, 0xab, 0xe6, 0x5f, 0xa7, 0x33, 0x30, 0x1a, 0x80, 0xcd, 0xda, + 0x13, 0x26, 0x46, 0x7c, 0xcb, 0xa6, 0x70, 0x6d, 0xc6, 0x2e, 0x7e, 0xd6, 0xb5, 0x10, 0x77, 0x15, + 0xab, 0x21, 0x70, 0xf3, 0x5e, 0x3d, 0x4b, 0x43, 0x87, 0xf8, 0x82, 0x2a, 0xe4, 0x3f, 0x04, 0xf4, + 0x06, 0x5a, 0x35, 0x0e, 0x3f, 0x02, 0x2a, 0x03, 0x8c, 0x66, 0xe9, 0x21, 0xa3, 0xfa, 0x22, 0xe4, + 0x66, 0x63, 0xc1, 0xf3, 0x70, 0xbf, 0x93, 0x16, 0xb3, 0xcf, 0x1c, 0x7d, 0x49, 0xd7, 0x88, 0xff, + 0x31, 0xed, 0xc5, 0x0a, 0x6e, 0x60, 0x7d, 0x5f, 0x2f, 0x57, 0xe7, 0xe0, 0x0b, 0x5c, 0x49, 0xd8, + 0x39, 0xb7, 0x57, 0x26, 0x64, 0x14, 0x38, 0xea, 0x6d, 0xec, 0x25, 0x0d, 0xbb, 0x98, 0xdb, 0x59, + 0x1f, 0x4b, 0x15, 0x97, 0x56, 0x3e, 0xe6, 0x2d, 0xc3, 0x4d, 0xd2, 0x86, 0x31, 0x69, 0x73, 0x04, + 0x33, 0xb3, 0xc0, 0x2e, 0xfe, 0x35, 0xf1, 0x9b, 0xa9, 0x9a, 0x08, 0xc0, 0xa9, 0x99, 0x91, 0xb6, + 0x01, 0xba, 0x6b, 0x11, 0x98, 0x39, 0x0d, 0xbe, 0x3e, 0x54, 0x21, 0x97, 0x02, 0x79, 0x0e, 0xab, + 0xf7, 0x4e, 0x20, 0x00, 0xd9, 0x0f, 0x63, 0x7b, 0xfe, 0x2e, 0xda, 0xea, 0x28, 0x81, 0x89, 0x9a, + 0xbf, 0x30, 0x82, 0x1b, 0xea, 0x5c, 0x2d, 0x09, 0xfb, 0x03, 0x7e, 0x72, 0x40, 0x75, 0x30, 0x97, + 0x06, 0x20, 0x24, 0x28, 0xb4, 0x21, 0x35, 0x75, 0x9c, 0x6d, 0x1d, 0x66, 0xe6, 0xbc, 0x78, 0x26, + 0x19, 0xd9, 0x9c, 0x2a, 0xde, 0x95, 0x14, 0xa5, 0x98, 0x33, 0x57, 0x9f, 0xac, 0x0b, 0x4c, 0xf7, + 0xb6, 0xc2, 0x7c, 0xaa, 0xf8, 0xb8, 0x4c, 0x2f, 0x9e, 0xd1, 0x93, 0xff, 0x90, 0xf8, 0x8d, 0x99, + 0x3c, 0x51, 0xcd, 0x85, 0xb4, 0xfa, 0xcd, 0x85, 0x53, 0x63, 0x59, 0x1c, 0x06, 0x43, 0xb8, 0x26, + 0x65, 0xa1, 0x62, 0x9c, 0x20, 0xfc, 0x17, 0x9a, 0x8c, 0x62, 0x09, 0x34, 0x1f, 0xc9, 0x8a, 0xc4, + 0xc9, 0xa9, 0x1f, 0x1e, 0x11, 0xce, 0x47, 0xb1, 0x8b, 0x3a, 0xbf, 0x25, 0x74, 0x9c, 0xee, 0xf7, + 0xf5, 0xbb, 0xd2, 0x6d, 0x51, 0x6a, 0xd9, 0xdc, 0x4c, 0xfd, 0x6b, 0xb2, 0x4c, 0x08, 0xbc, 0x4a, + 0x22, 0x96, 0xe3, 0x70, 0x2e, 0x6d, 0x17, 0xe2, 0x0c, 0x05, 0xb7, 0x6a, 0x1d, 0x29, 0x29, 0xbe, + 0x1e, 0x7a, 0xa9, 0x02, 0xa1, 0xbb, 0x04, 0x1a, 0x0c, 0x84, 0x84, 0xfb, 0xa2, 0x25, 0xec, 0x7e, + 0xd8, 0x75, 0xf2, 0x34, 0xe9, 0xab, 0x20, 0x44, 0x5e, 0x25, 0x8a, 0x32, 0x5f, 0x62, 0x0b, 0x7a, + 0x89, 0xd8, 0x8d, 0x0b, 0xeb, 0x45, 0x5e, 0x7f, 0x6a, 0x58, 0x28, 0x92, 0x73, 0x37, 0x1f, 0xda, + 0x70, 0xc1, 0xae, 0x37, 0x5c, 0x73, 0x77, 0xb6, 0x99, 0x2e, 0xbc, 0xae, 0xa9, 0xcb, 0x36, 0xe6, + 0x51, 0x82, 0x18, 0x3d, 0x7a, 0x31, 0x95, 0x2e, 0xcc, 0x64, 0xa8, 0xc5, 0xa8, 0xc5, 0xe6, 0x19, + 0xd8, 0x0e, 0xb4, 0x6d, 0x5d, 0x74, 0x25, 0xb1, 0x10, 0x5b, 0x13, 0x8f, 0x47, 0x50, 0xa2, 0x59, + 0xff, 0x7b, 0xa8, 0xb0, 0xb4, 0x4c, 0xf4, 0x00, 0x79, 0x1a, 0x23, 0xee, 0xd4, 0x97, 0x05, 0xb4, + 0xbb, 0xeb, 0x2d, 0x51, 0x31, 0xb5, 0xc4, 0xb6, 0x22, 0xa9, 0x7a, 0x17, 0xc9, 0x11, 0xf0, 0x9b, + 0x70, 0x72, 0xfa, 0x3d, 0x37, 0xda, 0x23, 0x47, 0x74, 0x7d, 0x57, 0x39, 0x48, 0xc8, 0x31, 0x6d, + 0xcc, 0x55, 0xf8, 0xe7, 0x89, 0x09, 0x33, 0x64, 0xf5, 0x68, 0xe8, 0x4a, 0x4e, 0x3d, 0x4e, 0x07, + 0xc2, 0x21, 0xdb, 0x7f, 0xee, 0x87, 0xd1, 0xe4, 0x1e, 0x53, 0x17, 0x0d, 0xed, 0x33, 0x81, 0x1a, + 0x91, 0xb1, 0x32, 0xeb, 0x2d, 0x66, 0x48, 0xb1, 0x57, 0x0f, 0x35, 0xc8, 0x76, 0x33, 0x48, 0xf7, + 0x4f, 0x17, 0xce, 0xab, 0xef, 0x90, 0xad, 0x64, 0x81, 0x7d, 0xe4, 0xdf, 0x5a, 0x8f, 0x62, 0x38, + 0x8d, 0xca, 0x6c, 0x50, 0x15, 0xf9, 0xdc, 0x07, 0xec, 0x0d, 0x52, 0x44, 0x81, 0xad, 0x0a, 0x3b, + 0x26, 0xd5, 0xa7, 0x31, 0x16, 0xb4, 0xf0, 0x2f, 0xbf, 0x82, 0x16, 0x55, 0x63, 0xad, 0x33, 0xfd, + 0x64, 0x8a, 0xab, 0x48, 0xc3, 0x53, 0xf5, 0x19, 0x8a, 0xeb, 0x13, 0x4f, 0xcc, 0xa4, 0x63, 0xa3, + 0xd9, 0x67, 0x9b, 0x51, 0x78, 0xb2, 0x42, 0x4e, 0xff, 0xad, 0xf2, 0x4c, 0xca, 0x29, 0x2d, 0x93, + 0xa7, 0xac, 0xf9, 0xc0, 0x60, 0x62, 0xa8, 0x61, 0x3f, 0x91, 0xe0, 0x23, 0x44, 0xa6, 0x91, 0x49, + 0x4f, 0xde, 0x3e, 0xb6, 0x26, 0xd0, 0x15, 0x91, 0xf6, 0x5c, 0xee, 0xf0, 0xcf, 0x4e, 0x7b, 0xd4, + 0x72, 0x1f, 0xeb, 0xc5, 0x0f, 0x7a, 0xc7, 0x3a, 0x53, 0xe7, 0x99, 0x76, 0x17, 0x60, 0xda, 0xfa, + 0x65, 0x0a, 0x42, 0x47, 0xf9, 0x2e, 0xc4, 0x15, 0x1a, 0x60, 0x37, 0x11, 0x66, 0x4c, 0xa1, 0xdd, + 0x7a, 0x13, 0xcd, 0xb9, 0x4f, 0xf1, 0xeb, 0x81, 0xcd, 0x85, 0x7b, 0xfc, 0x97, 0x20, 0x05, 0xf8, + 0x4b, 0x37, 0xa0, 0x6e, 0x27, 0x8b, 0xf5, 0x79, 0x46, 0x4e, 0x57, 0x59, 0xa7, 0x2d, 0xc5, 0xf9, + 0x4e, 0x87, 0xab, 0x3d, 0x6a, 0x28, 0x29, 0x5f, 0x4d, 0x5c, 0xf7, 0x75, 0x97, 0x7e, 0x7f, 0xb3, + 0x0f, 0x51, 0x7f, 0x16, 0x23, 0x7b, 0x1e, 0x55, 0xff, 0xbe, 0xb5, 0x12, 0x65, 0xda, 0x5f, 0x97, + 0x89, 0x42, 0x01, 0x17, 0x86, 0x39, 0x0a, 0x8e, 0xec, 0x6a, 0xbb, 0xd6, 0x34, 0x8c, 0x04, 0xa8, + 0x14, 0x4c, 0x26, 0xd4, 0x23, 0x53, 0x13, 0xe7, 0xd6, 0x1c, 0x70, 0xa2, 0x8b, 0x3b, 0x13, 0xcd, + 0x56, 0x12, 0xac, 0x73, 0x6f, 0xba, 0x0d, 0x40, 0x36, 0xf8, 0x21, 0x77, 0x7e, 0xd0, 0xe8, 0x24, + 0xe9, 0x5f, 0xb3, 0xc8, 0x4e, 0xdb, 0x07, 0x58, 0xae, 0xeb, 0x78, 0x76, 0x2d, 0x3e, 0xd8, 0x8e, + 0x6c, 0x5a, 0xb5, 0x1d, 0xfe, 0x01, 0x5b, 0xe7, 0x3d, 0x6d, 0xb5, 0x32, 0x31, 0xab, 0x91, 0xd8, + 0x83, 0xd5, 0x39, 0x2a, 0xe6, 0x42, 0xd4, 0x30, 0x9d, 0x6f, 0x22, 0xe7, 0x1c, 0xed, 0x85, 0x1b, + 0x40, 0x68, 0x7c, 0xcb, 0x5d, 0xdb, 0x41, 0xa4, 0x77, 0x5c, 0x48, 0xca, 0x10, 0x4a, 0x07, 0x46, + 0xe7, 0x45, 0xcf, 0x97, 0x26, 0x57, 0x93, 0x02, 0x52, 0xee, 0xe9, 0xea, 0xc2, 0x0a, 0x19, 0xfc, + 0x24, 0xe4, 0x64, 0x1c, 0x52, 0xc5, 0xac, 0xe7, 0x52, 0x12, 0x0d, 0xdc, 0x81, 0x1b, 0x4e, 0xff, + 0x14, 0x0e, 0xdf, 0x47, 0xf0, 0xf7, 0x74, 0x21, 0x82, 0xdb, 0x9c, 0x91, 0x43, 0xf4, 0x79, 0x85, + 0xf7, 0xe0, 0x40, 0xc5, 0x58, 0xa0, 0x38, 0x5a, 0xf2, 0xfb, 0xed, 0xaf, 0x32, 0xaa, 0x29, 0x1a, + 0x29, 0x40, 0x35, 0x27, 0x8f, 0x97, 0x20, 0xe4, 0x15, 0x59, 0xa2, 0x91, 0xc4, 0x1c, 0x92, 0x75, + 0xb2, 0x1c, 0xac, 0x7b, 0x1f, 0x27, 0xa1, 0x75, 0x90, 0x35, 0x67, 0x15, 0xfc, 0x6b, 0xf9, 0xa8, + 0x0b, 0x41, 0x0f, 0x82, 0x3b, 0x7e, 0xe3, 0xe4, 0x91, 0x9b, 0x99, 0x1c, 0x89, 0x04, 0xab, 0x8a, + 0xb7, 0x8a, 0x10, 0xe6, 0xd8, 0xf3, 0x3f, 0xf3, 0x2f, 0xaf, 0x69, 0xfa, 0x1d, 0xd2, 0x75, 0x27, + 0x87, 0xa5, 0x66, 0xb7, 0x48, 0xad, 0x63, 0x0f, 0x35, 0xdf, 0x52, 0x05, 0x1a, 0x07, 0xa7, 0x6d, + 0xae, 0x48, 0xb6, 0x93, 0x1c, 0x2f, 0x2b, 0xab, 0xe4, 0x92, 0xe1, 0xcd, 0x2e, 0x01, 0xef, 0xa2, + 0x03, 0x7e, 0x12, 0xbf, 0xd6, 0x60, 0x6f, 0xe1, 0xe0, 0x6c, 0x70, 0xd3, 0xe1, 0xdf, 0x4f, 0x34, + 0x30, 0x8e, 0x8d, 0x03, 0x86, 0x31, 0x83, 0xde, 0x55, 0xda, 0x5e, 0x9e, 0xf7, 0x88, 0xa3, 0xaa, + 0x08, 0x0e, 0x82, 0xc9, 0x7e, 0xf8, 0x6e, 0x04, 0xc6, 0xd2, 0xda, 0x4f, 0xe9, 0x58, 0xb7, 0xeb, + 0x5f, 0xc3, 0x53, 0xf5, 0xdb, 0x10, 0xb9, 0xb3, 0x2f, 0x27, 0x23, 0xb9, 0x40, 0x3c, 0x16, 0x86, + 0xa8, 0x5c, 0x1e, 0x7c, 0xb7, 0x92, 0x58, 0x89, 0x45, 0x73, 0xaf, 0xf0, 0xe4, 0x05, 0x58, 0x94, + 0x4a, 0xa9, 0xa4, 0xea, 0xdd, 0x00, 0x7a, 0x5e, 0xc9, 0x12, 0xac, 0x11, 0xc3, 0x3d, 0x69, 0x70, + 0x98, 0x8e, 0xb9, 0x3c, 0x74, 0x72, 0xb1, 0x19, 0x8f, 0x75, 0x26, 0xfc, 0x87, 0x2a, 0xeb, 0xdf, + 0x27, 0xfe, 0xa3, 0x19, 0x6b, 0xec, 0x80, 0xb2, 0x15, 0x1a, 0x85, 0xed, 0xe7, 0xaa, 0xb7, 0x43, + 0x13, 0x62, 0xc3, 0x52, 0xf6, 0xc0, 0x15, 0x61, 0x38, 0xcc, 0x81, 0xe2, 0x61, 0x24, 0x4a, 0x17, + 0xe6, 0x6e, 0xa6, 0xd0, 0x2a, 0x48, 0x5b, 0xc1, 0x66, 0x09, 0xbb, 0x36, 0xf9, 0x1d, 0xca, 0xc0, + 0x4c, 0x19, 0x98, 0x02, 0xc3, 0xc1, 0xa2, 0xf9, 0x87, 0x4e, 0x49, 0x04, 0x2b, 0x67, 0x9a, 0x3d, + 0xc3, 0xce, 0x4d, 0x1e, 0xa2, 0x95, 0x45, 0xcc, 0xe9, 0x70, 0x92, 0x20, 0x1e, 0x2c, 0x67, 0x40, + 0x5f, 0xd0, 0xd9, 0x20, 0x93, 0x78, 0x87, 0x3a, 0x28, 0x06, 0xb6, 0x0e, 0x62, 0x3b, 0xe1, 0xf9, + 0x93, 0x10, 0x98, 0x27, 0x6f, 0x7a, 0x9f, 0xed, 0xe4, 0xd3, 0xe2, 0x5c, 0xe0, 0xa7, 0x12, 0x8f, + 0xb4, 0x78, 0x96, 0xb6, 0x24, 0xa1, 0x38, 0xbe, 0x11, 0x98, 0x7d, 0xc6, 0xc9, 0x5a, 0xa8, 0x9f, + 0xba, 0xbf, 0x3c, 0xba, 0x46, 0x79, 0xc5, 0x0e, 0xb7, 0xef, 0x8d, 0xb3, 0x45, 0xcc, 0xd6, 0xf9, + 0x03, 0x75, 0xbf, 0x32, 0x73, 0x78, 0xc5, 0x41, 0x70, 0xe5, 0x9d, 0xb9, 0x42, 0xbf, 0x2c, 0x21, + 0xd4, 0x64, 0x28, 0x62, 0x4c, 0xb0, 0xbb, 0xe2, 0xa9, 0x9b, 0xdf, 0x43, 0x02, 0x83, 0x49, 0xe4, + 0xb1, 0xe9, 0xa9, 0x71, 0x19, 0x8d, 0xa6, 0x39, 0xf1, 0x7d, 0x88, 0xe2, 0x28, 0xa0, 0xf3, 0x26, + 0xd8, 0x96, 0xa8, 0x8a, 0xe8, 0x59, 0xea, 0x37, 0xbe, 0x4b, 0x00, 0x76, 0xca, 0xd9, 0xe4, 0x8f, + 0xa0, 0x1b, 0x58, 0x02, 0x35, 0x9d, 0x66, 0x90, 0xd7, 0xff, 0xb6, 0xf9, 0x7f, 0xfa, 0xff, 0xfe, + 0x5f, 0xff, 0xf5, 0x5f, 0xff, 0xf5, 0x5f, 0xff, 0x33, 0xfe, 0x1f, 0x5e, 0xa8, 0x94, 0x98, 0x00, + 0x26, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA102_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 9728, // uncompressed data size (bytes) + 7779, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA102_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA102("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/reload/g_booteruc_reload_ga10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA102_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x56, 0x00, 0x62, 0x56, 0x08, 0x13, 0x4c, 0xc8, 0x43, 0x69, + 0x20, 0x00, 0x00, 0xfe, 0x9c, 0x72, 0x04, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA102_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA102_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA102("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/reload/g_booteruc_reload_ga10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 9728 +// COMPRESSED SIZE (bytes): 7780 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA102_image_prod_data[] = +{ + 0xed, 0xd9, 0xd3, 0x5b, 0x25, 0x00, 0xd7, 0x36, 0xf0, 0x6c, 0xdb, 0x98, 0x26, 0xdb, 0xd8, 0x35, + 0xd9, 0xd8, 0xd9, 0xd3, 0xc4, 0x9d, 0x6d, 0xdb, 0xdc, 0x61, 0xc7, 0xc9, 0x4d, 0xb6, 0x6d, 0x4e, + 0x9c, 0x6c, 0xbb, 0x26, 0xbb, 0xa9, 0xf7, 0x3d, 0x7e, 0xfe, 0x81, 0xf7, 0xfa, 0xae, 0xef, 0xf9, + 0x9d, 0xad, 0xe3, 0x75, 0xad, 0x83, 0xfb, 0x5e, 0x50, 0x50, 0xff, 0x7f, 0x4b, 0x8a, 0xa1, 0x47, + 0xce, 0x02, 0x89, 0x59, 0xa7, 0xee, 0x71, 0xa5, 0x8b, 0x47, 0xd2, 0x15, 0xee, 0xfc, 0x61, 0x47, + 0x0e, 0x85, 0x19, 0x74, 0x35, 0x54, 0x9f, 0x9f, 0xaf, 0xe7, 0xef, 0xc9, 0xdd, 0xdd, 0x2b, 0x89, + 0xa7, 0x39, 0x7f, 0xe5, 0xce, 0x29, 0x5a, 0xa0, 0x3a, 0x29, 0x6a, 0xba, 0x6f, 0x3d, 0x94, 0x73, + 0xc2, 0xdd, 0x0b, 0xa2, 0x2e, 0x43, 0x23, 0xfd, 0xd9, 0xbb, 0x35, 0x1f, 0x94, 0x27, 0xc6, 0xd6, + 0x52, 0xc6, 0x92, 0xf2, 0x94, 0x64, 0x3b, 0x8b, 0xfb, 0x29, 0xdb, 0xeb, 0x83, 0x87, 0x44, 0xa7, + 0xfd, 0x90, 0xac, 0x08, 0x5e, 0xe9, 0x20, 0xbb, 0xd1, 0xec, 0x88, 0x3a, 0xf3, 0xa4, 0xad, 0xdf, + 0x02, 0x78, 0x78, 0x7f, 0x9d, 0xa4, 0x76, 0x34, 0x7d, 0xab, 0x12, 0x4b, 0x01, 0x95, 0x23, 0x4c, + 0x8a, 0xac, 0xeb, 0x14, 0xf8, 0x7d, 0x8a, 0x6c, 0x43, 0xbb, 0xb3, 0xb5, 0x1a, 0xe0, 0x22, 0x33, + 0x2f, 0xcd, 0xc1, 0x53, 0x26, 0x64, 0x18, 0x14, 0xbc, 0xe3, 0xc9, 0xe5, 0xed, 0x36, 0x47, 0x03, + 0xed, 0x60, 0x63, 0x28, 0xc5, 0xec, 0x98, 0xe6, 0xb3, 0xa6, 0x5c, 0x1d, 0xc3, 0xef, 0xbf, 0x62, + 0x82, 0xaf, 0x2f, 0xbe, 0xa6, 0x2a, 0x72, 0x78, 0x44, 0xc7, 0x3c, 0x10, 0x78, 0x3a, 0xcc, 0x0f, + 0xf5, 0x9e, 0xfd, 0x00, 0xb2, 0x11, 0xcf, 0x96, 0x53, 0xe4, 0x28, 0x92, 0x2b, 0xd0, 0xc8, 0x8b, + 0xea, 0xfc, 0xd2, 0xbc, 0xcf, 0x53, 0x68, 0xb9, 0xe1, 0x52, 0xa7, 0x56, 0x68, 0x79, 0x7c, 0xe7, + 0x29, 0xdc, 0x70, 0xb6, 0x64, 0xa3, 0x4b, 0xa4, 0x6d, 0x52, 0x69, 0x46, 0x25, 0xf5, 0x39, 0x8f, + 0xb4, 0x95, 0xec, 0x09, 0x21, 0xa2, 0x4a, 0x14, 0x55, 0xfa, 0x49, 0x1f, 0x9f, 0xdb, 0xe9, 0xa8, + 0xe3, 0x6f, 0x9e, 0xe9, 0x16, 0xa0, 0x18, 0x2f, 0x58, 0x2d, 0xa4, 0xc8, 0xfd, 0xf1, 0x4f, 0xeb, + 0x41, 0x4c, 0x0e, 0x75, 0x0b, 0xe2, 0x6a, 0x43, 0x91, 0x4f, 0x11, 0xf6, 0x56, 0x87, 0x14, 0x7f, + 0x3c, 0x67, 0xfc, 0x56, 0x05, 0x7c, 0x9a, 0xa9, 0x39, 0x43, 0xa9, 0xeb, 0x1c, 0x8b, 0x8b, 0x8c, + 0x95, 0x3a, 0xfc, 0xb4, 0xef, 0x5b, 0x49, 0x2f, 0x83, 0x1a, 0x0c, 0xbe, 0x1c, 0xa1, 0xd8, 0x58, + 0x1c, 0x27, 0x15, 0x1c, 0xa8, 0x24, 0x1f, 0xa5, 0x72, 0x66, 0xc3, 0x4e, 0xce, 0xc5, 0xa4, 0x12, + 0xbe, 0xaa, 0x8d, 0xdc, 0xa3, 0x22, 0xda, 0x27, 0x79, 0x19, 0x68, 0xe9, 0x26, 0x45, 0x9a, 0xfe, + 0xd5, 0xfc, 0xc7, 0x84, 0x76, 0x87, 0xa9, 0x57, 0x2d, 0x0a, 0xf3, 0xa9, 0xd5, 0xbe, 0xb2, 0xce, + 0x29, 0xc5, 0xa5, 0x81, 0x5a, 0x3f, 0x62, 0x44, 0x8a, 0xe7, 0xd2, 0x74, 0xc0, 0x43, 0x9e, 0x08, + 0xc7, 0x08, 0xaa, 0xa0, 0x99, 0x6c, 0xf7, 0xfb, 0x37, 0xfc, 0xa7, 0xf6, 0x23, 0xef, 0xad, 0x6a, + 0x90, 0x66, 0xbc, 0x6a, 0x7a, 0xb2, 0xe0, 0xec, 0x98, 0x4d, 0xb9, 0xd1, 0xaf, 0x06, 0xd4, 0x34, + 0x8c, 0x32, 0x0d, 0xb8, 0x52, 0x3e, 0x28, 0x20, 0xf8, 0xfc, 0xfe, 0x38, 0x48, 0x3e, 0xc8, 0x87, + 0x0b, 0x6b, 0x1e, 0xce, 0x8e, 0xb5, 0x1e, 0x9c, 0x57, 0x1e, 0x72, 0x41, 0xa0, 0x66, 0xc9, 0xe0, + 0x2d, 0xf0, 0x77, 0xc7, 0x0c, 0xa3, 0xa2, 0xc2, 0x2b, 0x4c, 0xe7, 0x2e, 0x50, 0xcd, 0xdf, 0x5e, + 0x08, 0x48, 0xc4, 0x05, 0xfa, 0xa1, 0xad, 0xc1, 0xd7, 0x9a, 0x37, 0x88, 0xcd, 0x28, 0x78, 0x54, + 0x2e, 0xfb, 0x8d, 0x04, 0x96, 0xe5, 0x72, 0xed, 0x56, 0x02, 0x1c, 0x57, 0x94, 0x4a, 0x7c, 0x65, + 0xb6, 0xec, 0x44, 0xf6, 0x95, 0x44, 0xbd, 0xe0, 0x25, 0x34, 0x93, 0xd0, 0x95, 0xc1, 0x32, 0x3c, + 0x90, 0x6c, 0x85, 0x01, 0xcf, 0xb7, 0xf4, 0xed, 0x8b, 0xbe, 0x01, 0x50, 0x68, 0xf2, 0x05, 0x54, + 0x96, 0x2e, 0x69, 0x6d, 0x35, 0xa7, 0xcf, 0x88, 0x43, 0x39, 0x5f, 0x16, 0xd3, 0xbf, 0x4b, 0xca, + 0xc7, 0x29, 0x6f, 0xdf, 0x3c, 0x9a, 0x59, 0x76, 0x96, 0xa3, 0x88, 0x8c, 0x97, 0x6b, 0xb8, 0xde, + 0x40, 0x8a, 0xba, 0xa4, 0xe1, 0xe9, 0x69, 0x8d, 0x39, 0x92, 0x3a, 0x7e, 0xe1, 0xc8, 0x29, 0x07, + 0x55, 0x6c, 0x88, 0xf1, 0xd2, 0x6e, 0xad, 0x64, 0x31, 0x1c, 0xc3, 0x77, 0xcf, 0x55, 0xe4, 0x46, + 0xd5, 0xd9, 0xb3, 0xe9, 0x89, 0x85, 0xb5, 0x51, 0x88, 0x6f, 0x51, 0x42, 0x9d, 0xbf, 0x94, 0x27, + 0xc7, 0x9f, 0x68, 0x68, 0x46, 0xd0, 0xeb, 0x2e, 0xbb, 0x10, 0x8b, 0x5b, 0x57, 0xf7, 0x02, 0xb3, + 0xb5, 0xb9, 0x23, 0x79, 0xbe, 0x64, 0xc1, 0xdd, 0x2c, 0x35, 0x4f, 0xbe, 0x9c, 0x63, 0x9a, 0x18, + 0x81, 0xb3, 0x6e, 0x39, 0x0f, 0x6c, 0xfa, 0x3e, 0x34, 0x9d, 0x67, 0xdf, 0x98, 0x63, 0x82, 0x19, + 0x6a, 0x41, 0x0f, 0xc1, 0x28, 0xc5, 0x19, 0xd1, 0xe4, 0xa9, 0xa9, 0x57, 0x9d, 0xf5, 0xc0, 0x2e, + 0xbf, 0xc9, 0x50, 0xeb, 0xfb, 0x93, 0xf2, 0x4c, 0x65, 0x30, 0x2a, 0xfa, 0x6f, 0xde, 0xc2, 0x8d, + 0xde, 0xbf, 0x9f, 0xf8, 0xdb, 0xb0, 0x89, 0x3f, 0x71, 0xda, 0x7b, 0x33, 0x7f, 0xf3, 0x6d, 0x4a, + 0x13, 0x79, 0xae, 0x8a, 0x58, 0x67, 0x83, 0x2c, 0xf2, 0x8d, 0xf1, 0xae, 0x88, 0x2d, 0xf5, 0xf3, + 0x94, 0x2d, 0x4b, 0x83, 0xf0, 0x03, 0xcf, 0x82, 0xf7, 0x09, 0x27, 0x2b, 0xe9, 0xcc, 0xa3, 0xd1, + 0x63, 0xc0, 0x6f, 0xb3, 0x9f, 0xee, 0x72, 0xe5, 0xd3, 0x33, 0x9f, 0xf1, 0xa1, 0x78, 0xfb, 0x06, + 0xd1, 0x5d, 0x51, 0x22, 0x28, 0x15, 0xd3, 0xd6, 0x1a, 0x24, 0xe3, 0x84, 0x65, 0xd7, 0xb6, 0xcf, + 0x64, 0xbd, 0x98, 0xea, 0xb6, 0x56, 0xc4, 0xcf, 0x7c, 0xab, 0xaf, 0xa5, 0x49, 0xce, 0xa6, 0xc0, + 0x26, 0xba, 0xaf, 0x51, 0xf1, 0x4c, 0x3a, 0xc6, 0xc5, 0xce, 0x0f, 0x05, 0xaa, 0x65, 0x4e, 0x4a, + 0x7a, 0x85, 0xd5, 0xfc, 0xa1, 0x86, 0xfc, 0x30, 0x48, 0xfd, 0xf9, 0xbf, 0xd9, 0x97, 0x1c, 0xc2, + 0x7b, 0x5a, 0x77, 0x0c, 0x2c, 0xae, 0xb3, 0xb8, 0x70, 0x98, 0xa6, 0x95, 0xb6, 0xa3, 0xef, 0x50, + 0x1f, 0x03, 0x50, 0x94, 0xa6, 0x5f, 0x02, 0x3a, 0x5c, 0x2f, 0xa4, 0x7e, 0xd3, 0x17, 0x7d, 0x5f, + 0xdb, 0x36, 0x01, 0xfe, 0x76, 0xf7, 0x4f, 0x60, 0xa4, 0x29, 0xff, 0xa9, 0x49, 0x6b, 0xdd, 0x27, + 0xea, 0xd7, 0x81, 0xd9, 0x9a, 0x61, 0x8c, 0xef, 0xc9, 0x19, 0xca, 0xd3, 0xb0, 0xcc, 0xd6, 0xa6, + 0xd7, 0x72, 0xba, 0x22, 0x1b, 0x75, 0xe1, 0x36, 0xc2, 0x02, 0x19, 0x0e, 0x93, 0xd8, 0xc6, 0x08, + 0xce, 0x6f, 0x45, 0x42, 0x80, 0x52, 0x12, 0x49, 0xae, 0x3b, 0x23, 0x52, 0xce, 0x0d, 0x05, 0xc7, + 0x7e, 0x6a, 0x89, 0xdf, 0x60, 0x59, 0x0a, 0x32, 0x50, 0x1a, 0x89, 0x15, 0xbf, 0x98, 0x33, 0xf5, + 0x4c, 0xea, 0x41, 0x84, 0x38, 0xdd, 0x56, 0xc4, 0x8f, 0x76, 0x0c, 0xd5, 0xcf, 0x88, 0xef, 0x58, + 0xca, 0x65, 0xbb, 0xe0, 0xcd, 0x01, 0xf4, 0x83, 0x45, 0xdd, 0x31, 0xf1, 0xab, 0x37, 0x9c, 0x39, + 0x41, 0x93, 0x9e, 0xa3, 0x43, 0x2d, 0xbf, 0xfe, 0xad, 0x20, 0x9c, 0xd6, 0x9f, 0x8b, 0x9f, 0xb6, + 0xb1, 0x72, 0x69, 0x71, 0xc2, 0xa5, 0xb2, 0xda, 0x44, 0x8d, 0x79, 0x87, 0xf2, 0x07, 0xfb, 0x48, + 0x1c, 0x96, 0xa2, 0xce, 0x78, 0xa6, 0x40, 0x65, 0x0e, 0x6b, 0xf1, 0x7b, 0xb2, 0x24, 0x8d, 0x1c, + 0xdd, 0x3b, 0x29, 0xea, 0x33, 0xbd, 0x50, 0x22, 0xfe, 0xd0, 0x4d, 0x0c, 0x5c, 0xeb, 0xc1, 0xfb, + 0x25, 0x63, 0xbd, 0xe2, 0xb9, 0xc7, 0xd0, 0xb4, 0xba, 0x06, 0x98, 0x24, 0xc3, 0xb7, 0xe9, 0x41, + 0x40, 0x11, 0x37, 0x0f, 0x76, 0xd2, 0xa8, 0x09, 0x57, 0x57, 0xde, 0x84, 0xf3, 0x01, 0xb8, 0xfb, + 0x0b, 0xd9, 0xc4, 0x4e, 0x48, 0x70, 0x3d, 0x8f, 0x4e, 0xde, 0x0f, 0xcb, 0x98, 0x59, 0x0f, 0x43, + 0x59, 0xaa, 0x0e, 0x19, 0x5e, 0x6f, 0x6e, 0x42, 0x22, 0x3e, 0xa5, 0x49, 0xbf, 0xb2, 0xd2, 0x13, + 0xe7, 0xa1, 0x79, 0x6e, 0x45, 0x68, 0x54, 0x15, 0x88, 0xd8, 0x03, 0x48, 0x27, 0xa5, 0x8f, 0xce, + 0x2b, 0x4f, 0x05, 0xfa, 0x28, 0x26, 0x91, 0xdb, 0x2e, 0x85, 0xd4, 0xe1, 0x89, 0xd6, 0x5f, 0x1c, + 0xde, 0x58, 0x22, 0xc3, 0xd2, 0xec, 0x88, 0x90, 0xe7, 0xef, 0xb7, 0xea, 0x6f, 0x30, 0x4d, 0x2b, + 0x52, 0x69, 0x3f, 0x6e, 0x73, 0x6e, 0x59, 0xbd, 0xd3, 0x45, 0x4e, 0x8f, 0x8f, 0x39, 0xa9, 0x0e, + 0x6f, 0x84, 0xd6, 0xbe, 0x8a, 0xe8, 0x6f, 0x9d, 0x0c, 0x27, 0xf3, 0xac, 0x18, 0xac, 0x71, 0x3a, + 0x58, 0xd3, 0x51, 0xc7, 0x92, 0x7b, 0xc8, 0x06, 0x38, 0xca, 0x00, 0xf5, 0x23, 0xca, 0xc4, 0x71, + 0x81, 0x2f, 0xa8, 0xf5, 0x0a, 0x7e, 0x57, 0xa7, 0x9b, 0x83, 0x0b, 0x2b, 0xb8, 0x52, 0xac, 0x72, + 0x33, 0xe9, 0xbc, 0xd7, 0x79, 0xf3, 0xbe, 0xe4, 0xe4, 0xeb, 0xff, 0x78, 0x32, 0xda, 0x76, 0x14, + 0x09, 0xd7, 0xec, 0x4e, 0x49, 0x83, 0x7f, 0xcf, 0x6f, 0x9e, 0x3c, 0x34, 0xdb, 0xdc, 0x76, 0xca, + 0x2b, 0x84, 0x1b, 0x38, 0xc3, 0x8d, 0xb0, 0x3c, 0xb3, 0x20, 0x56, 0xad, 0x67, 0x0c, 0xaa, 0x62, + 0xfa, 0x09, 0xe4, 0x16, 0xb0, 0xe3, 0xad, 0x87, 0x52, 0xa0, 0x93, 0xbb, 0x91, 0xc2, 0x52, 0x1f, + 0x62, 0x06, 0xf2, 0x59, 0xe2, 0x43, 0xa1, 0x92, 0x32, 0x10, 0x4a, 0x1b, 0x5e, 0xdb, 0x8f, 0x55, + 0x69, 0xd1, 0x76, 0xad, 0xd3, 0x22, 0x3f, 0xac, 0x14, 0x2c, 0x74, 0x60, 0x44, 0xaf, 0x6b, 0x8d, + 0x8e, 0xbf, 0x2d, 0xc6, 0x01, 0x7f, 0x97, 0x2a, 0xd9, 0xcf, 0xb5, 0xf3, 0x7d, 0xa7, 0x4c, 0xa3, + 0x16, 0xe4, 0x6c, 0xbd, 0x2b, 0x17, 0xbd, 0xb4, 0xc4, 0x3c, 0x9b, 0xe4, 0xea, 0xc6, 0xb6, 0x45, + 0xa8, 0xd4, 0x0a, 0xc1, 0x19, 0xc6, 0xbe, 0x5c, 0x8b, 0xfd, 0xdd, 0x1c, 0x48, 0x75, 0x8d, 0x0c, + 0x9b, 0x21, 0xf8, 0x19, 0xf4, 0x5a, 0x3b, 0xad, 0x5d, 0xc2, 0xc4, 0x36, 0x39, 0xe2, 0x6d, 0x18, + 0xf6, 0x0a, 0x7c, 0xff, 0xd2, 0xe9, 0xd9, 0x45, 0xc3, 0x2f, 0x7d, 0xbb, 0xa5, 0xbf, 0x7c, 0x8e, + 0xb8, 0x41, 0xed, 0x67, 0xb9, 0x8e, 0x89, 0x50, 0x53, 0x1f, 0x76, 0xf2, 0x74, 0x30, 0x24, 0xf0, + 0xd8, 0x21, 0x53, 0xed, 0x28, 0xa1, 0xd1, 0x4c, 0x20, 0x9d, 0x7a, 0x06, 0x83, 0x8d, 0xcd, 0x3b, + 0x7f, 0xdc, 0xc3, 0xa3, 0x83, 0x67, 0x6c, 0x14, 0x5c, 0x25, 0x2d, 0xa2, 0x6d, 0x19, 0x08, 0x3a, + 0xa4, 0x88, 0xf8, 0x9b, 0x92, 0x02, 0xf1, 0x8a, 0x65, 0x88, 0x62, 0x93, 0xed, 0x0b, 0xd9, 0x12, + 0x51, 0x67, 0xc2, 0xc8, 0x66, 0x6f, 0x9b, 0xbe, 0x4d, 0x30, 0x71, 0xae, 0x46, 0x71, 0xce, 0x0f, + 0xa5, 0x47, 0x70, 0xad, 0xa0, 0x4b, 0xa0, 0x5a, 0xb5, 0x96, 0x28, 0x5d, 0x39, 0x7f, 0x1e, 0x30, + 0x6b, 0xd8, 0xd5, 0x92, 0x8f, 0x7e, 0x19, 0x63, 0x85, 0x35, 0x91, 0xd2, 0x48, 0xe9, 0x05, 0x4d, + 0xfc, 0x24, 0x2e, 0x38, 0x9c, 0x22, 0x58, 0x32, 0x94, 0x68, 0x14, 0x73, 0x4b, 0xf7, 0x3c, 0xda, + 0x44, 0x94, 0x2e, 0xc8, 0xaa, 0xd5, 0x49, 0xc7, 0x32, 0x7a, 0x6c, 0x77, 0x1f, 0x03, 0xef, 0xf3, + 0x95, 0x67, 0x92, 0xc9, 0xcc, 0x1b, 0x3e, 0xbb, 0x89, 0x2c, 0xfa, 0x8c, 0xf2, 0xd7, 0x92, 0xb1, + 0x45, 0x8d, 0xf3, 0x60, 0xbf, 0x9a, 0x8f, 0xb8, 0xea, 0x73, 0x68, 0x6d, 0xf6, 0x26, 0x37, 0x00, + 0x45, 0x66, 0xe3, 0x1c, 0xb9, 0xc4, 0x33, 0x95, 0xa5, 0x49, 0x8c, 0x86, 0xb5, 0xfe, 0x48, 0x94, + 0x60, 0x54, 0x6d, 0x6b, 0xf3, 0x9d, 0x47, 0x51, 0x49, 0xe8, 0xb0, 0x07, 0xa3, 0x17, 0xfd, 0x92, + 0x9f, 0x2f, 0xb3, 0xc3, 0x5c, 0xaa, 0xc4, 0xfe, 0x72, 0x38, 0x7a, 0x19, 0xfa, 0xd4, 0x96, 0xfe, + 0x9a, 0x67, 0x00, 0x1a, 0x2f, 0x18, 0x1e, 0xfc, 0x5e, 0x5d, 0xc6, 0x0b, 0x00, 0x15, 0x55, 0xba, + 0xfc, 0xd9, 0x76, 0xe3, 0x90, 0xa6, 0xbd, 0x8e, 0xfe, 0x6b, 0xd2, 0xed, 0x37, 0xe7, 0x2f, 0xba, + 0x77, 0x3a, 0x06, 0x5f, 0xe3, 0x2b, 0x68, 0x56, 0xe4, 0xa4, 0x25, 0x52, 0x27, 0x73, 0x51, 0x32, + 0xde, 0x72, 0xe6, 0xe3, 0x83, 0x16, 0xab, 0x88, 0xee, 0xb5, 0x7e, 0xe4, 0x1c, 0x25, 0x7b, 0x90, + 0xc8, 0xcc, 0x06, 0x9d, 0xd9, 0x5c, 0x4c, 0x15, 0x49, 0x2c, 0x88, 0xa2, 0x52, 0x7d, 0x37, 0x40, + 0xe4, 0x9e, 0xd8, 0xda, 0x43, 0xfe, 0xe6, 0x91, 0x32, 0x79, 0x7c, 0x1b, 0x6c, 0xe8, 0x55, 0xd1, + 0x5a, 0x76, 0x5a, 0x42, 0x46, 0xff, 0x24, 0x94, 0x5d, 0x27, 0xaf, 0xfb, 0xc4, 0xfb, 0x0f, 0xf5, + 0xc6, 0x5b, 0x1a, 0x80, 0x25, 0xca, 0xd1, 0x54, 0x53, 0x99, 0xbc, 0xd9, 0xf0, 0x85, 0xef, 0x6f, + 0xe6, 0x1c, 0x65, 0x08, 0x57, 0xe8, 0xd4, 0xcf, 0xd8, 0x66, 0x40, 0xe5, 0xa3, 0xe0, 0x41, 0x08, + 0xf8, 0xe8, 0x93, 0xed, 0x51, 0x35, 0xe8, 0xa5, 0x80, 0x77, 0xd5, 0xaa, 0x38, 0x9d, 0x27, 0x7b, + 0xea, 0x42, 0x06, 0x08, 0xd0, 0x4f, 0x5d, 0xb7, 0x6c, 0x39, 0xc7, 0x2e, 0x95, 0x81, 0xcc, 0xcd, + 0x65, 0x96, 0xe5, 0x51, 0xf2, 0x25, 0xbd, 0xc4, 0xe4, 0xc8, 0x6d, 0x8e, 0x2b, 0x62, 0xfe, 0x86, + 0x73, 0x50, 0x00, 0xd9, 0xe5, 0x9a, 0x82, 0x65, 0x26, 0xbc, 0xf7, 0x3e, 0x10, 0x82, 0xae, 0xdf, + 0x50, 0x03, 0xe0, 0x75, 0x95, 0x7e, 0x94, 0xd6, 0x9e, 0xa4, 0xde, 0x1e, 0x2a, 0xd8, 0x99, 0xc1, + 0xbd, 0x92, 0xc9, 0x20, 0xa6, 0xbc, 0x18, 0xcf, 0xbf, 0x89, 0x44, 0xdf, 0x6a, 0xca, 0xa1, 0xdd, + 0xcc, 0xe6, 0x57, 0x14, 0x65, 0xcb, 0xdd, 0x5a, 0x0a, 0x9a, 0x52, 0x2e, 0x10, 0xee, 0x26, 0x7a, + 0x60, 0xfb, 0x2b, 0x53, 0x37, 0xab, 0x4b, 0x7f, 0x8c, 0x61, 0x4b, 0xef, 0x53, 0xba, 0x2c, 0x8c, + 0xef, 0xbb, 0xc6, 0x33, 0x7e, 0x52, 0xaa, 0xc8, 0x7b, 0xa9, 0xaa, 0x11, 0x43, 0x2e, 0xf7, 0x37, + 0x47, 0xec, 0x8c, 0x8f, 0xdc, 0x0c, 0x74, 0x97, 0x0a, 0xa9, 0xef, 0xb3, 0x10, 0x84, 0xdb, 0x37, + 0x69, 0xd8, 0x9f, 0xe2, 0xa3, 0xc5, 0x92, 0xf3, 0xbe, 0x3f, 0xa4, 0x2e, 0x27, 0x4d, 0xd1, 0x36, + 0xcb, 0xc5, 0x78, 0x90, 0xe8, 0xb7, 0x18, 0xe0, 0x43, 0x9c, 0xaa, 0x58, 0x6a, 0xf3, 0x9b, 0xb6, + 0x6e, 0x25, 0x1c, 0x27, 0xe6, 0xb8, 0x76, 0x21, 0x5c, 0xf4, 0xdb, 0x01, 0x86, 0x11, 0x37, 0xcb, + 0xa5, 0x2f, 0x5b, 0x79, 0xb9, 0xfe, 0x14, 0xec, 0x96, 0xbf, 0x11, 0x73, 0x07, 0x1f, 0xf5, 0x95, + 0x47, 0x04, 0xcb, 0x99, 0xbd, 0x73, 0x78, 0xf1, 0x84, 0x04, 0x89, 0x52, 0x35, 0x01, 0x99, 0x0b, + 0xc8, 0x72, 0xac, 0x67, 0x12, 0xc2, 0x3c, 0xd9, 0x34, 0x42, 0xb8, 0x01, 0x7b, 0xc8, 0xed, 0x4d, + 0xcb, 0xe5, 0xfb, 0xa9, 0x03, 0x60, 0xec, 0x4b, 0xda, 0x98, 0x73, 0x44, 0x03, 0x56, 0xc5, 0x75, + 0xde, 0x80, 0x61, 0xaa, 0x64, 0xa7, 0x8c, 0x6e, 0x25, 0xc9, 0xad, 0xfb, 0xf1, 0xb9, 0x86, 0x7d, + 0x3e, 0x76, 0x24, 0x65, 0x54, 0xdc, 0xd9, 0x65, 0x4e, 0x7a, 0x91, 0x1a, 0xe8, 0x59, 0x53, 0x7b, + 0x6e, 0xb4, 0xb0, 0xd5, 0xb0, 0x37, 0xa9, 0x42, 0x3a, 0x58, 0x49, 0xd6, 0x10, 0xc2, 0x28, 0x37, + 0x2b, 0x22, 0xef, 0xa7, 0x79, 0xd1, 0xb5, 0x92, 0x7e, 0xfe, 0xf6, 0xdb, 0x18, 0x9b, 0x7f, 0x2d, + 0x9d, 0xfe, 0x57, 0xc0, 0xe1, 0x40, 0x14, 0x41, 0x20, 0x95, 0xbd, 0x4b, 0x0c, 0x25, 0xb1, 0x2d, + 0x99, 0x5e, 0x4f, 0xdd, 0x09, 0x85, 0xfe, 0xbd, 0xaa, 0xdf, 0x98, 0x96, 0x3e, 0x1e, 0xa8, 0x0a, + 0x13, 0x33, 0x8b, 0x3a, 0xe9, 0xb0, 0xb0, 0xcd, 0xfe, 0x65, 0x46, 0x4a, 0x67, 0x70, 0x19, 0x23, + 0x38, 0xfb, 0x59, 0xe1, 0x6f, 0x17, 0x68, 0x61, 0x98, 0x4f, 0xe1, 0xc4, 0x17, 0x86, 0x7d, 0xce, + 0x1b, 0xcc, 0x31, 0x74, 0x0c, 0x5b, 0x5e, 0x25, 0x37, 0x48, 0x8b, 0x9c, 0x9c, 0xb4, 0xad, 0x60, + 0xf8, 0xc0, 0x31, 0x26, 0xad, 0xdc, 0x48, 0x43, 0x67, 0xbe, 0x09, 0x7c, 0x22, 0xf1, 0xcb, 0xf4, + 0x42, 0xcb, 0x9b, 0xb7, 0x90, 0xad, 0xa1, 0x84, 0x0f, 0xcf, 0xcf, 0x78, 0x1f, 0x3a, 0xeb, 0x71, + 0xf9, 0xc8, 0x2e, 0x52, 0x85, 0xb2, 0x42, 0xf0, 0xf9, 0x27, 0xe9, 0xb7, 0xc1, 0x1c, 0xb3, 0xc3, + 0xd3, 0x40, 0xf7, 0x9d, 0xaa, 0x0a, 0x89, 0x1e, 0x8b, 0xd7, 0x21, 0xd5, 0xe7, 0x17, 0x90, 0x07, + 0x06, 0x54, 0x58, 0x2e, 0x62, 0xc0, 0xf4, 0xc4, 0xac, 0xc6, 0x21, 0x03, 0x46, 0xe1, 0x99, 0xf7, + 0x36, 0xce, 0xb8, 0xd3, 0x89, 0xeb, 0xe6, 0x1c, 0x3d, 0x96, 0x89, 0xc0, 0x14, 0x07, 0xf2, 0x0e, + 0xd9, 0xf4, 0x52, 0x85, 0xde, 0xbb, 0x94, 0x27, 0x7c, 0x6c, 0x45, 0x32, 0x05, 0xb4, 0xc9, 0x59, + 0x27, 0x42, 0x96, 0xbd, 0xfe, 0xcc, 0x9c, 0xe6, 0x19, 0x6f, 0x2d, 0x63, 0x81, 0xa5, 0x13, 0x3a, + 0x83, 0x02, 0x01, 0x9b, 0x67, 0xb7, 0xfe, 0x1e, 0x82, 0xb1, 0xf9, 0xdd, 0x07, 0x01, 0x16, 0x3b, + 0xc1, 0x27, 0x73, 0x6f, 0xd5, 0x69, 0x3d, 0xf1, 0x07, 0x6c, 0xc8, 0x80, 0x43, 0xdc, 0x88, 0xd2, + 0x4b, 0x97, 0x95, 0x90, 0x37, 0x02, 0xd5, 0x0c, 0x26, 0x35, 0xf4, 0x43, 0xc9, 0x11, 0x0b, 0x72, + 0x31, 0x43, 0x99, 0x30, 0x33, 0x1d, 0x6b, 0x86, 0x68, 0xde, 0x9c, 0xc4, 0x5e, 0xe9, 0xf1, 0x2d, + 0xb8, 0xdc, 0xe9, 0x06, 0xb6, 0x23, 0x48, 0x1b, 0x62, 0xeb, 0x82, 0xf4, 0x1c, 0x2b, 0xcb, 0x75, + 0x1c, 0x66, 0xa0, 0x8d, 0xd1, 0x76, 0x42, 0x1e, 0xb2, 0x05, 0x5b, 0x7f, 0xc9, 0x69, 0x9e, 0x03, + 0x39, 0x7c, 0xfa, 0xec, 0x58, 0x05, 0x7e, 0xf1, 0x81, 0x36, 0xa9, 0xf2, 0x73, 0x56, 0xea, 0xdf, + 0xe5, 0x63, 0x93, 0xe0, 0x59, 0xbe, 0x39, 0xe2, 0xf5, 0x5e, 0x86, 0x25, 0xe1, 0xb1, 0xf0, 0xdc, + 0x6f, 0x6d, 0x7f, 0x91, 0x09, 0x2e, 0x1e, 0x39, 0xb5, 0xea, 0x36, 0x53, 0xb8, 0x80, 0xc8, 0xfe, + 0xf3, 0x73, 0x25, 0xf7, 0xac, 0xc2, 0x0d, 0x78, 0x0b, 0x63, 0x9b, 0xc3, 0xba, 0xcf, 0xcc, 0xd7, + 0x1d, 0xa4, 0xc9, 0x0d, 0xaa, 0x81, 0x18, 0x51, 0x29, 0x3c, 0xcf, 0xb1, 0xda, 0x9d, 0xa4, 0x46, + 0x8a, 0xd2, 0xeb, 0xa8, 0xc1, 0x20, 0xdb, 0x4e, 0x8b, 0xd6, 0x66, 0x2e, 0xa0, 0xb2, 0xf9, 0x27, + 0xdf, 0xcd, 0x29, 0x25, 0x8d, 0xea, 0x89, 0x89, 0x27, 0x8b, 0x8d, 0x2d, 0x7b, 0xb7, 0x6e, 0x36, + 0xf6, 0x8c, 0x2b, 0x6c, 0x4d, 0xda, 0x0d, 0xb8, 0x87, 0xc9, 0xa6, 0xa6, 0x81, 0x38, 0x7c, 0x39, + 0xed, 0xf0, 0xd7, 0x7b, 0xe6, 0x39, 0x00, 0xcf, 0x75, 0xdf, 0xae, 0xf2, 0x90, 0xf9, 0xe3, 0xfa, + 0x5d, 0xdf, 0x29, 0x8b, 0xad, 0x88, 0xc8, 0x58, 0xf3, 0xc4, 0x80, 0x9e, 0xe2, 0x1c, 0xef, 0x61, + 0xe5, 0xa4, 0x52, 0x13, 0xb7, 0x84, 0xbd, 0x7b, 0x9b, 0xa7, 0x8a, 0x01, 0x47, 0x25, 0x98, 0x7e, + 0xdb, 0x89, 0xf1, 0xee, 0x60, 0x4b, 0xb4, 0x61, 0x70, 0x6f, 0x6c, 0x93, 0xcb, 0x85, 0x48, 0x31, + 0x7c, 0xde, 0x72, 0xf2, 0x6f, 0xa5, 0x7b, 0x35, 0x25, 0xd8, 0x94, 0xb1, 0x64, 0x9f, 0xf1, 0x00, + 0x30, 0x10, 0xca, 0x10, 0xcf, 0x97, 0x99, 0x05, 0xa8, 0x36, 0x63, 0x79, 0xdf, 0x89, 0x59, 0x52, + 0x64, 0x98, 0xf5, 0x9b, 0xe8, 0x23, 0xc6, 0x03, 0x14, 0x21, 0x60, 0xca, 0x10, 0x6a, 0x5c, 0x04, + 0x83, 0xcd, 0xa7, 0x24, 0x96, 0x69, 0xfb, 0x03, 0x4f, 0x45, 0xba, 0x00, 0xfd, 0xa8, 0x51, 0x9c, + 0x77, 0xf5, 0xa6, 0x1c, 0x5e, 0xae, 0xb9, 0x46, 0xa4, 0x60, 0x11, 0x6e, 0x6c, 0x41, 0x88, 0x75, + 0x90, 0x6f, 0x7e, 0x12, 0xac, 0x5f, 0x3c, 0x95, 0xeb, 0x5f, 0x36, 0x0d, 0x5a, 0x2e, 0xeb, 0xec, + 0x7a, 0x9e, 0x22, 0x21, 0x13, 0xee, 0xe9, 0x19, 0x49, 0x6d, 0x56, 0xc8, 0xfd, 0x87, 0x2a, 0xdc, + 0x95, 0xd8, 0xc9, 0x10, 0x8f, 0x6f, 0x25, 0xcc, 0x42, 0xee, 0x10, 0x67, 0xbb, 0xd5, 0x7d, 0x2e, + 0x1b, 0xfe, 0x59, 0xaf, 0x6a, 0xa1, 0xe7, 0x5e, 0xa5, 0xfd, 0xc6, 0xa5, 0xe6, 0xe4, 0x20, 0x4b, + 0x86, 0x1a, 0x8a, 0x39, 0xde, 0x92, 0x70, 0x89, 0x42, 0xd4, 0xd0, 0x6e, 0x56, 0x79, 0xfd, 0x8f, + 0xb6, 0x40, 0x73, 0x21, 0x2d, 0x70, 0xa3, 0x21, 0xd6, 0x85, 0x3a, 0x5c, 0xe3, 0xcb, 0xc2, 0xe8, + 0x9d, 0xed, 0x0a, 0x4d, 0x1c, 0x68, 0x32, 0xda, 0x9a, 0x74, 0x21, 0xe7, 0x73, 0x29, 0xef, 0xdd, + 0xfb, 0xce, 0xd2, 0xc0, 0xd1, 0x2b, 0x99, 0x07, 0x28, 0xfe, 0xfd, 0x26, 0xa4, 0x42, 0x49, 0x12, + 0xec, 0xed, 0xee, 0x37, 0xba, 0x2d, 0xb4, 0x42, 0x5f, 0x46, 0xa3, 0x20, 0xf4, 0x4d, 0xb5, 0x79, + 0x69, 0x66, 0x2c, 0x93, 0x5a, 0x62, 0x29, 0x7d, 0xdd, 0x66, 0x61, 0x41, 0x48, 0xd5, 0xf3, 0x68, + 0x67, 0xcd, 0x9d, 0x70, 0x26, 0x3d, 0x13, 0x38, 0x10, 0x08, 0xb8, 0x49, 0x10, 0xe9, 0xea, 0x22, + 0x31, 0x8b, 0x62, 0x50, 0x13, 0x4c, 0xc1, 0x63, 0x99, 0x95, 0x09, 0x41, 0x39, 0xd5, 0x40, 0xa7, + 0x7f, 0x2f, 0xfb, 0x62, 0xfb, 0x55, 0xfc, 0xb8, 0xe5, 0xf2, 0x6e, 0x1e, 0x6e, 0xb1, 0x92, 0xae, + 0x0c, 0x10, 0x66, 0xb4, 0x21, 0x98, 0x2d, 0x9e, 0xd9, 0x4d, 0x83, 0xc4, 0xb0, 0xa6, 0x95, 0xc3, + 0xbb, 0x96, 0xba, 0x91, 0xdd, 0x45, 0x29, 0x60, 0x37, 0x82, 0xf8, 0x8d, 0x39, 0x29, 0xc4, 0xb4, + 0xae, 0x28, 0xf3, 0xa5, 0x20, 0x74, 0x0a, 0x05, 0xff, 0xb5, 0x23, 0xdc, 0x4b, 0xfe, 0xa7, 0xc4, + 0x37, 0x7f, 0xfb, 0xbb, 0x82, 0x01, 0xd8, 0xcd, 0x9a, 0x65, 0xe9, 0x18, 0x9b, 0x48, 0xa1, 0xd5, + 0xb4, 0x9b, 0x9d, 0xb0, 0x73, 0x88, 0xb6, 0x76, 0xda, 0x2c, 0x63, 0x2d, 0xf6, 0x6b, 0xf6, 0x16, + 0xba, 0x71, 0x1d, 0xc5, 0x6d, 0xc8, 0xcb, 0xd4, 0x1e, 0x23, 0xa9, 0x5a, 0x2a, 0xc7, 0x0f, 0xc3, + 0xa8, 0x38, 0x2c, 0xe2, 0x68, 0x36, 0x86, 0xec, 0x47, 0x19, 0xd8, 0x03, 0x8e, 0x76, 0xce, 0x5a, + 0x85, 0xbb, 0x2a, 0x36, 0x58, 0xb4, 0xe3, 0xbe, 0x60, 0x29, 0xbe, 0x61, 0x49, 0x86, 0x76, 0xe5, + 0x5a, 0x03, 0xce, 0xc0, 0x4a, 0x7f, 0xf6, 0x27, 0x50, 0x81, 0xc9, 0xb1, 0xfa, 0x94, 0x27, 0xd4, + 0x56, 0x7c, 0x7a, 0x40, 0x18, 0xf7, 0xaf, 0xae, 0x3e, 0xc7, 0x4f, 0xb4, 0x72, 0x76, 0x2e, 0x7c, + 0xed, 0x13, 0x14, 0xc0, 0xec, 0xe2, 0xab, 0xc7, 0x1c, 0xd7, 0x9e, 0x9d, 0xe2, 0x92, 0x7c, 0x28, + 0xd3, 0x2a, 0x16, 0x4b, 0x50, 0xd1, 0x57, 0x2b, 0x3e, 0x5e, 0x83, 0xee, 0xdf, 0xce, 0x60, 0x2f, + 0xf8, 0x6a, 0x3d, 0x1e, 0x89, 0x13, 0x7a, 0x9b, 0x8d, 0x0b, 0xec, 0xdd, 0x44, 0x76, 0xb7, 0x6a, + 0x42, 0xef, 0x56, 0x7b, 0xe8, 0xdf, 0x4a, 0xc3, 0x82, 0x63, 0x2e, 0x97, 0x9c, 0xba, 0x93, 0x72, + 0x6b, 0x5f, 0x60, 0xe8, 0x7b, 0xc8, 0x51, 0x37, 0xd5, 0x0f, 0xdb, 0x01, 0x16, 0x53, 0xcc, 0xa3, + 0xa7, 0x3f, 0x8d, 0x71, 0xa7, 0x14, 0x8f, 0xdb, 0x9e, 0xd8, 0xca, 0x24, 0x91, 0x3e, 0xeb, 0x84, + 0x59, 0x2d, 0x72, 0x9c, 0xfd, 0xfc, 0x9e, 0xed, 0x31, 0xa4, 0x9f, 0x85, 0x5e, 0x80, 0x36, 0x77, + 0xfd, 0xe1, 0xea, 0x08, 0xda, 0x01, 0x5b, 0x60, 0x2b, 0x3c, 0xef, 0xe1, 0x3e, 0x2f, 0x91, 0x12, + 0x65, 0x33, 0x94, 0xc1, 0x33, 0x2c, 0x5e, 0xd3, 0xaa, 0x10, 0xe3, 0x19, 0x66, 0x41, 0x75, 0xa2, + 0x3f, 0xfb, 0x1f, 0xce, 0x17, 0x5a, 0x27, 0xb4, 0xb7, 0x98, 0x2b, 0xdd, 0xa4, 0x68, 0x37, 0x42, + 0x12, 0xac, 0x35, 0x28, 0x2b, 0xed, 0x8e, 0x0a, 0xb8, 0xee, 0x25, 0xa5, 0xc5, 0xe5, 0xaa, 0x87, + 0x2a, 0x04, 0x19, 0x7c, 0x7e, 0x21, 0x2b, 0xc8, 0x57, 0xb0, 0x01, 0x98, 0x97, 0xa5, 0x12, 0xde, + 0xba, 0x4e, 0x6b, 0xdc, 0xe2, 0xef, 0x97, 0xfd, 0x28, 0x69, 0x95, 0xe3, 0xcd, 0xa4, 0x95, 0xa2, + 0x6a, 0xbe, 0x85, 0xc8, 0xa8, 0x49, 0x21, 0xe6, 0x5b, 0xde, 0xe9, 0x2d, 0x59, 0xea, 0xc6, 0xf8, + 0xd3, 0x38, 0xcb, 0xef, 0xaf, 0x59, 0x25, 0xa4, 0xef, 0x36, 0x8a, 0x9e, 0x64, 0x4e, 0x61, 0x0f, + 0x92, 0xa7, 0xe4, 0x0b, 0x3d, 0x6d, 0xa1, 0x6a, 0xbb, 0x9b, 0xad, 0x9e, 0xaa, 0x94, 0xf1, 0x9d, + 0x00, 0x06, 0xea, 0x92, 0xb4, 0x24, 0x98, 0xc0, 0x4a, 0xe0, 0xb2, 0xe3, 0xc7, 0xb9, 0xa4, 0xa8, + 0xdf, 0x5a, 0x8a, 0xe5, 0x8d, 0x73, 0xba, 0x41, 0x80, 0x27, 0xa7, 0x1d, 0x9a, 0x76, 0x48, 0x48, + 0x58, 0x55, 0x51, 0x31, 0x6b, 0xbf, 0x5b, 0x69, 0x26, 0xcf, 0x2a, 0x85, 0xe0, 0xcd, 0xfb, 0x1f, + 0xb5, 0x7f, 0x3a, 0xd8, 0xdf, 0x71, 0x1b, 0x8d, 0xf0, 0xe2, 0x13, 0x0c, 0x95, 0xa8, 0x2c, 0xa4, + 0x08, 0xf4, 0x67, 0x6a, 0x94, 0x35, 0x11, 0x85, 0x43, 0x18, 0xce, 0x1f, 0x8c, 0xe3, 0xc9, 0xe4, + 0x4a, 0xd7, 0x94, 0xdd, 0xd9, 0x9f, 0xc6, 0x33, 0x39, 0xb3, 0x06, 0x45, 0xe0, 0xdf, 0x2f, 0xad, + 0xe6, 0x03, 0x2e, 0xac, 0x58, 0x5a, 0xf3, 0x99, 0xc4, 0xb1, 0x7d, 0xa1, 0x2b, 0x34, 0xae, 0x84, + 0x4d, 0xe0, 0x4a, 0xbf, 0xa2, 0x17, 0xb2, 0x46, 0xf3, 0x28, 0xb4, 0xa6, 0x80, 0x1a, 0xbb, 0xe9, + 0x29, 0xae, 0xca, 0xe1, 0x51, 0x60, 0x58, 0xe5, 0xa7, 0xad, 0x2f, 0x63, 0x72, 0x0d, 0xa8, 0x9d, + 0x0e, 0xe4, 0x6c, 0x87, 0x27, 0xde, 0x64, 0xc2, 0x10, 0xe1, 0x4c, 0x2a, 0xf6, 0xb4, 0x9d, 0x0f, + 0x93, 0x52, 0x5a, 0xea, 0xe5, 0x6c, 0x4d, 0xce, 0x8a, 0x0b, 0xe5, 0x7d, 0x64, 0xef, 0xa0, 0x1c, + 0xad, 0x1b, 0xb7, 0x8c, 0xb2, 0x30, 0x0c, 0xf8, 0x8d, 0x04, 0x0c, 0x07, 0xca, 0x61, 0xdc, 0x44, + 0x13, 0x5f, 0x42, 0x51, 0xf5, 0x05, 0xf9, 0x64, 0x7e, 0x3a, 0xaa, 0x4a, 0xae, 0xd3, 0x0a, 0x58, + 0x42, 0xa8, 0x36, 0x61, 0x3e, 0x0d, 0x12, 0x50, 0xb0, 0x82, 0x82, 0x68, 0x25, 0xfe, 0xb4, 0xe4, + 0x3c, 0xf6, 0xd2, 0xb3, 0xf1, 0xf8, 0x14, 0x70, 0xd5, 0x22, 0x24, 0xdb, 0x75, 0x8a, 0x9b, 0x66, + 0x97, 0x97, 0x8e, 0xd7, 0x5c, 0x7f, 0xe9, 0x47, 0x3d, 0xa5, 0x36, 0xbf, 0x9e, 0xb9, 0xd8, 0x4b, + 0x9d, 0x4d, 0x73, 0xa9, 0x8e, 0xb9, 0x30, 0x04, 0xe8, 0x50, 0xf4, 0xe3, 0x00, 0xc3, 0x30, 0x6a, + 0xa9, 0xb4, 0x55, 0x23, 0xa3, 0x48, 0xc8, 0x6c, 0x2b, 0x97, 0xe8, 0xe9, 0x8a, 0x54, 0x2b, 0xcd, + 0xb3, 0x38, 0xaa, 0xcb, 0x55, 0xa0, 0x36, 0x92, 0x7a, 0xfd, 0x5e, 0xd2, 0xb9, 0x0f, 0x43, 0x1b, + 0xd0, 0x91, 0xfa, 0x54, 0xce, 0x64, 0x8c, 0x6f, 0xc3, 0x53, 0x7e, 0xb7, 0xfa, 0x4e, 0x2c, 0x0f, + 0x5a, 0x2d, 0x0a, 0x55, 0xd6, 0x13, 0xc3, 0x3a, 0x84, 0x65, 0x20, 0x40, 0x6c, 0x3a, 0x00, 0xe8, + 0x31, 0x1e, 0x06, 0x1d, 0xe3, 0x0e, 0x3b, 0x5c, 0x6c, 0xc5, 0xc8, 0x29, 0x85, 0xfd, 0xb3, 0xa1, + 0x99, 0xe1, 0x2d, 0x57, 0xb7, 0x10, 0x5d, 0x39, 0x61, 0x3f, 0x0d, 0x12, 0xa6, 0xe0, 0xd1, 0x31, + 0x7e, 0x6b, 0x0c, 0x4b, 0x77, 0x05, 0xe7, 0x32, 0x1e, 0x91, 0x4a, 0x01, 0x13, 0x18, 0x43, 0xb4, + 0x55, 0xc4, 0xf2, 0xfb, 0x28, 0x89, 0x9e, 0xe9, 0x02, 0xdd, 0x46, 0x19, 0x6e, 0x66, 0x8b, 0x96, + 0xb0, 0x5c, 0x61, 0xc2, 0x52, 0x42, 0x4b, 0x06, 0x36, 0x52, 0x58, 0x6b, 0xa2, 0x1b, 0x0a, 0xc6, + 0xf7, 0x8e, 0x15, 0xdd, 0x93, 0x7b, 0xc5, 0x24, 0x20, 0x05, 0xed, 0x5c, 0x3f, 0x89, 0x9e, 0xdf, + 0xe3, 0x96, 0xa4, 0x59, 0xe0, 0xc9, 0x7e, 0x99, 0x08, 0x3d, 0x9a, 0x0b, 0x7a, 0xa3, 0x7c, 0x0a, + 0xc2, 0x6a, 0xe9, 0x2b, 0xca, 0x9e, 0x6f, 0xaa, 0x74, 0xbe, 0x50, 0xb7, 0x70, 0x41, 0x69, 0xae, + 0x3c, 0x0d, 0x0e, 0xc9, 0x1e, 0xe6, 0xd8, 0x2f, 0x8a, 0x0e, 0x97, 0x83, 0xe1, 0x1f, 0x5c, 0x88, + 0x53, 0x03, 0x0a, 0x3f, 0x67, 0x07, 0x36, 0x84, 0x38, 0xe8, 0x7d, 0x1b, 0xf3, 0xc8, 0x1c, 0x3e, + 0xd7, 0xb8, 0xb6, 0x62, 0x23, 0xc1, 0x52, 0x5a, 0x08, 0x14, 0xa4, 0x6f, 0x0d, 0x3f, 0xdc, 0xba, + 0xa1, 0xee, 0xbf, 0x2a, 0x55, 0x3d, 0xb7, 0x9e, 0x4f, 0x9e, 0x0c, 0x21, 0x2e, 0x17, 0xde, 0xe3, + 0x69, 0x66, 0x69, 0x8f, 0x06, 0x1b, 0x2a, 0x4f, 0xaf, 0x51, 0xaa, 0x93, 0x19, 0x21, 0xdc, 0x0b, + 0x73, 0x34, 0x84, 0x7d, 0x93, 0xe1, 0xd0, 0x46, 0xf7, 0xb8, 0x7f, 0x56, 0xe8, 0xef, 0x61, 0x4a, + 0xdd, 0x73, 0x90, 0xbe, 0x47, 0x27, 0x0a, 0xe0, 0x9a, 0x3c, 0x06, 0x1a, 0x96, 0x39, 0xc0, 0x05, + 0x79, 0xad, 0x06, 0xda, 0x7d, 0xe7, 0xd4, 0x62, 0x2d, 0x37, 0xc7, 0xa0, 0x26, 0x1f, 0xd3, 0x92, + 0x44, 0xd7, 0x8a, 0x7e, 0x0e, 0xf8, 0x89, 0xec, 0x46, 0xdb, 0x27, 0x7f, 0x99, 0xe7, 0xb5, 0x0e, + 0xca, 0x92, 0x36, 0xa5, 0x1c, 0xd0, 0xcd, 0x0d, 0xc3, 0xf0, 0x72, 0x14, 0xb2, 0xcb, 0x51, 0x2f, + 0xec, 0x36, 0x05, 0xbb, 0x2e, 0x43, 0x39, 0x6b, 0xb6, 0x9a, 0x19, 0xa8, 0x49, 0xd3, 0x45, 0x0a, + 0xee, 0xd5, 0xb5, 0xb4, 0x6e, 0xc1, 0x3b, 0x76, 0xc2, 0x67, 0xf5, 0xed, 0x42, 0x5f, 0xbd, 0x80, + 0xb5, 0x29, 0x16, 0xa4, 0xef, 0xad, 0x67, 0xcf, 0x90, 0x40, 0x05, 0x23, 0xed, 0xb7, 0xe3, 0x69, + 0xbd, 0x16, 0x07, 0x9c, 0x7f, 0x7f, 0x8e, 0x89, 0xb6, 0x2d, 0x3c, 0x01, 0xa7, 0x13, 0x33, 0x2d, + 0xcf, 0xdd, 0xce, 0x07, 0xa5, 0x77, 0xc2, 0x51, 0x5d, 0xab, 0xc6, 0x77, 0xaa, 0x05, 0x79, 0xac, + 0xaa, 0x5f, 0x7a, 0xa5, 0xa9, 0x65, 0xfc, 0x58, 0xdf, 0xc1, 0x6b, 0xbe, 0xff, 0xe4, 0x68, 0x03, + 0x84, 0xbe, 0x6c, 0x64, 0x87, 0x20, 0x6d, 0xa4, 0x7e, 0x5b, 0x44, 0x20, 0x96, 0xd8, 0x56, 0xde, + 0xbf, 0xc3, 0xde, 0x56, 0x8d, 0xfd, 0x29, 0x86, 0x24, 0x25, 0x4d, 0xf7, 0x71, 0x4b, 0x6f, 0x31, + 0x24, 0x80, 0xfd, 0xad, 0xab, 0x12, 0xe6, 0xa1, 0xfa, 0x0c, 0xa1, 0xf4, 0x93, 0xc7, 0xf1, 0x60, + 0x8d, 0x6f, 0x49, 0x19, 0x35, 0x88, 0x95, 0x59, 0x2e, 0x3c, 0x1f, 0xb7, 0x07, 0x76, 0x06, 0x48, + 0xaf, 0x23, 0xb8, 0xa9, 0xcb, 0xde, 0x36, 0xf6, 0x5c, 0xd6, 0x32, 0xbf, 0xab, 0xb6, 0x46, 0x5d, + 0x0b, 0xc6, 0xe6, 0x75, 0x5b, 0x19, 0x9e, 0xa4, 0x71, 0x7d, 0x16, 0x48, 0x0d, 0xe4, 0xa9, 0xed, + 0xd9, 0xca, 0xc3, 0xf7, 0x9c, 0xe7, 0xd6, 0xb6, 0xd1, 0x3b, 0x8e, 0x09, 0x6d, 0x47, 0xc7, 0x11, + 0x9d, 0x78, 0x6a, 0xc7, 0x55, 0x5a, 0x08, 0xa9, 0x95, 0x7f, 0xd3, 0x82, 0x20, 0xd9, 0xdd, 0x21, + 0x54, 0x15, 0x45, 0x71, 0xab, 0x78, 0x4f, 0x3e, 0x56, 0xdb, 0xe9, 0x51, 0x30, 0x15, 0x72, 0x56, + 0xcc, 0xac, 0x0d, 0xdc, 0x37, 0x5d, 0x11, 0x11, 0xb2, 0xc6, 0x71, 0xf3, 0x3b, 0xab, 0xc3, 0xa4, + 0x19, 0x16, 0xfa, 0x97, 0x06, 0xbd, 0xb3, 0x0f, 0x25, 0x5c, 0x04, 0x40, 0x66, 0xed, 0x0c, 0x1a, + 0xb3, 0xf8, 0x35, 0x89, 0x90, 0x6f, 0x17, 0xe7, 0x82, 0x93, 0x82, 0x5d, 0x52, 0xb1, 0xdf, 0xb9, + 0xd6, 0x96, 0xe0, 0x5c, 0x96, 0xb2, 0xe2, 0xd1, 0x76, 0x07, 0xff, 0x37, 0xaf, 0xef, 0xb1, 0xf4, + 0xba, 0xd0, 0x7f, 0xf8, 0x4c, 0x1a, 0x5b, 0x95, 0xdf, 0x87, 0xc4, 0xad, 0xb6, 0x76, 0x99, 0x70, + 0x54, 0xbb, 0x46, 0xb3, 0xf1, 0xe9, 0x05, 0x23, 0xf8, 0xe7, 0xfd, 0x78, 0xcb, 0x01, 0x09, 0x7b, + 0x31, 0xde, 0xf8, 0x70, 0xd4, 0x41, 0x07, 0xab, 0x46, 0x08, 0xb8, 0xb9, 0x07, 0x88, 0x9f, 0x58, + 0xe4, 0x7b, 0xe9, 0x71, 0xeb, 0x0f, 0x3a, 0xaf, 0xb8, 0x6f, 0xde, 0xea, 0x0b, 0x6b, 0x3b, 0x77, + 0xa8, 0x3b, 0x23, 0x79, 0xe5, 0x7c, 0x45, 0x4b, 0x31, 0x05, 0x8a, 0xa3, 0xb0, 0xff, 0xab, 0x39, + 0xe8, 0x17, 0x2f, 0x93, 0x34, 0xdb, 0xa6, 0x5b, 0x05, 0x4e, 0x2c, 0x10, 0x0a, 0x6f, 0x63, 0x84, + 0x33, 0x54, 0x2c, 0xd0, 0x3d, 0x1a, 0x08, 0xcb, 0xf6, 0xd0, 0xe0, 0x29, 0xe2, 0x92, 0x23, 0x74, + 0xa9, 0xc1, 0x8f, 0x48, 0x11, 0x5c, 0x1f, 0x8f, 0x45, 0x6a, 0x4f, 0xc7, 0xdc, 0x48, 0x34, 0x8c, + 0xe7, 0xa8, 0x05, 0x36, 0x24, 0x25, 0x1a, 0xc3, 0x9b, 0xe6, 0x6e, 0x6b, 0x3f, 0xc3, 0x4b, 0x64, + 0x52, 0xc9, 0x04, 0xaa, 0x9d, 0x5c, 0x99, 0x2a, 0x80, 0xe7, 0xc6, 0xf4, 0x3c, 0x0b, 0xc7, 0x89, + 0x29, 0xfe, 0x46, 0x45, 0x07, 0x0d, 0x01, 0xb0, 0xca, 0x87, 0x4c, 0x15, 0x00, 0x9c, 0x91, 0x8d, + 0x3c, 0xf2, 0xbd, 0x04, 0x6f, 0x26, 0xf0, 0x3f, 0x4a, 0xde, 0x17, 0xa6, 0xd3, 0xa0, 0xfe, 0xd5, + 0x06, 0x57, 0xb4, 0x1e, 0x14, 0x21, 0x38, 0xcf, 0x40, 0x60, 0x13, 0xce, 0xda, 0x4b, 0x77, 0x23, + 0x89, 0x50, 0xbc, 0x36, 0x99, 0xd3, 0x2b, 0x86, 0xe9, 0xd3, 0x02, 0xa9, 0x63, 0xe5, 0x48, 0xd6, + 0x08, 0x09, 0x02, 0x60, 0x5d, 0xba, 0xf8, 0x05, 0xc4, 0xbb, 0x36, 0xa2, 0xdd, 0x95, 0xac, 0x94, + 0x61, 0x92, 0xd8, 0x1c, 0x63, 0xdf, 0xd0, 0x25, 0x8a, 0x83, 0xe3, 0xd8, 0xcd, 0x12, 0x97, 0x01, + 0xa0, 0xac, 0xc2, 0xa5, 0x72, 0xa5, 0x24, 0xcc, 0x74, 0xd4, 0x49, 0xf1, 0xc9, 0x68, 0x30, 0x99, + 0xce, 0x78, 0xdf, 0x03, 0x6c, 0x81, 0xb0, 0x86, 0x75, 0x85, 0xc3, 0x03, 0x08, 0x64, 0xcc, 0x52, + 0xc0, 0x9a, 0xbe, 0x96, 0xe4, 0x59, 0x89, 0x3f, 0xcb, 0xc4, 0xf9, 0x63, 0xa2, 0x68, 0x4b, 0xeb, + 0xb6, 0x85, 0x3e, 0x8d, 0xf9, 0xc9, 0x23, 0xf8, 0x05, 0xb9, 0x14, 0x39, 0xe6, 0xdc, 0xff, 0xab, + 0x8e, 0xb9, 0x8e, 0x6d, 0xea, 0x93, 0xb0, 0x77, 0x10, 0xd3, 0x0f, 0x98, 0x42, 0xc1, 0xd2, 0x0c, + 0xc8, 0x2a, 0x6c, 0xfb, 0x3b, 0x21, 0x21, 0x04, 0x71, 0x31, 0x84, 0x7e, 0x95, 0x9a, 0x74, 0xea, + 0x4c, 0x48, 0x13, 0x63, 0x6d, 0x40, 0x78, 0x4d, 0xb8, 0x04, 0xc4, 0xde, 0xa4, 0xb6, 0xf3, 0x73, + 0xea, 0x06, 0x7e, 0xb1, 0x15, 0x9c, 0x65, 0xf8, 0x48, 0x7e, 0xfa, 0x53, 0x59, 0xc7, 0xc4, 0xe6, + 0x05, 0x84, 0xf4, 0xba, 0x4b, 0x83, 0xa0, 0x04, 0x1c, 0x50, 0xed, 0x6b, 0x47, 0xc5, 0x65, 0x8b, + 0xbc, 0x0d, 0xbb, 0x7b, 0xc2, 0x4b, 0x69, 0xc8, 0xae, 0x5f, 0x8a, 0x04, 0xca, 0x68, 0xdd, 0x57, + 0x41, 0x2b, 0x26, 0x75, 0xa5, 0x5d, 0x56, 0xf7, 0xe6, 0x38, 0x67, 0x84, 0x1d, 0xc4, 0x3b, 0x97, + 0x71, 0x7b, 0x54, 0x46, 0x53, 0x51, 0x07, 0x1f, 0x7f, 0xa1, 0x1f, 0xe9, 0x69, 0x80, 0x0f, 0xfb, + 0x9b, 0x0e, 0x97, 0xfb, 0x8b, 0x8d, 0x10, 0xe6, 0x14, 0x61, 0x62, 0xd1, 0x51, 0xf5, 0x8a, 0x3c, + 0xc6, 0x59, 0x1d, 0x87, 0x69, 0x8a, 0x94, 0xd6, 0xfb, 0x3e, 0xf6, 0xcf, 0x13, 0x6d, 0x77, 0x44, + 0x69, 0xb1, 0x6f, 0xa3, 0x47, 0x9b, 0x59, 0x00, 0x60, 0x55, 0x87, 0x5d, 0xcd, 0x1f, 0xae, 0x7f, + 0x4d, 0x4c, 0xc2, 0xd8, 0x8c, 0x56, 0xae, 0xde, 0xed, 0x07, 0x28, 0x53, 0x28, 0xd0, 0x12, 0x07, + 0x5e, 0x78, 0xd6, 0x65, 0xda, 0x39, 0x72, 0x30, 0xae, 0x4e, 0x62, 0xf2, 0x96, 0x44, 0xb2, 0x40, + 0xdd, 0xba, 0x9c, 0xdb, 0x42, 0xb8, 0x61, 0xcc, 0x7b, 0x3c, 0x6f, 0x35, 0x97, 0x2e, 0x6d, 0xad, + 0x5c, 0x54, 0xac, 0xc5, 0x0b, 0x65, 0xd7, 0x0f, 0x29, 0x54, 0xde, 0xc6, 0x79, 0x49, 0xbe, 0x4c, + 0xa4, 0x88, 0xa6, 0xcb, 0x8a, 0x62, 0x56, 0x62, 0x69, 0x0f, 0x89, 0xfd, 0xbe, 0x91, 0xe4, 0x90, + 0xaa, 0x10, 0x22, 0xb5, 0x66, 0x68, 0x8f, 0xfb, 0x2b, 0xac, 0x3e, 0xf1, 0x94, 0xa0, 0x33, 0x6e, + 0xee, 0x65, 0xa9, 0x2a, 0xad, 0xff, 0xc7, 0xa4, 0x33, 0xcf, 0xcc, 0xb6, 0xf6, 0x95, 0x6b, 0x66, + 0xbd, 0x1a, 0x7f, 0x84, 0xfd, 0x5f, 0x8d, 0xb4, 0x01, 0x5a, 0x78, 0xaf, 0xf4, 0x2d, 0xf9, 0x70, + 0xb6, 0x7d, 0xf8, 0x9d, 0xbe, 0xcd, 0xdf, 0x66, 0xdf, 0xc4, 0x36, 0xf9, 0x49, 0xfa, 0xf9, 0x2b, + 0x3b, 0xa9, 0x88, 0xb1, 0x87, 0xab, 0xb3, 0x28, 0x7a, 0xa1, 0x07, 0xdf, 0x55, 0x78, 0x93, 0xb6, + 0x8b, 0x99, 0xc4, 0x89, 0xfb, 0x48, 0xc4, 0x6a, 0xdc, 0x1c, 0x43, 0xb3, 0x50, 0xb6, 0xc9, 0x2c, + 0xd8, 0x73, 0xf0, 0xbf, 0xab, 0xf4, 0x10, 0x8b, 0xb1, 0xc1, 0x0c, 0x27, 0xd5, 0x5f, 0x55, 0x40, + 0xc5, 0xe7, 0x70, 0xc7, 0x5f, 0xea, 0x3d, 0x09, 0x51, 0x15, 0x5b, 0x9e, 0x79, 0x37, 0xd9, 0xca, + 0x3a, 0xfd, 0x49, 0xd5, 0xaa, 0xe6, 0x87, 0x9f, 0xb1, 0x1d, 0xf3, 0x21, 0xf9, 0xc5, 0xf2, 0x75, + 0x72, 0xf7, 0x71, 0xd0, 0x44, 0x37, 0xa2, 0x95, 0x49, 0xb3, 0x54, 0xef, 0xf7, 0xa2, 0x8d, 0x35, + 0x62, 0xba, 0x87, 0xfb, 0xc6, 0xf4, 0x5d, 0x3b, 0x46, 0xeb, 0xc0, 0xbe, 0x4f, 0x8b, 0xd6, 0x10, + 0x25, 0xa3, 0x24, 0x44, 0x04, 0x07, 0xa1, 0x4b, 0x9f, 0x10, 0xbe, 0x45, 0x8c, 0xa4, 0x98, 0x79, + 0xe7, 0x83, 0xb6, 0xaa, 0x1d, 0x7b, 0x54, 0x5c, 0xa6, 0x0a, 0x6a, 0x8c, 0x20, 0x5f, 0xd0, 0x88, + 0xb2, 0x0b, 0xb2, 0x5c, 0x30, 0x13, 0xb3, 0xb4, 0x85, 0x5b, 0x6a, 0x1e, 0xd7, 0x68, 0x2d, 0x1b, + 0xec, 0xb7, 0x94, 0x80, 0xdb, 0x93, 0x04, 0xb7, 0x7a, 0x8d, 0x2c, 0x73, 0x34, 0xb0, 0xf7, 0xa3, + 0xfa, 0x70, 0xe2, 0x6d, 0xcb, 0xca, 0x7f, 0xe6, 0xf3, 0x03, 0x42, 0x71, 0xd1, 0x83, 0xc5, 0x6b, + 0x04, 0x69, 0x00, 0xb8, 0xc0, 0xed, 0xea, 0xdc, 0x89, 0x8b, 0xa9, 0x6f, 0x4f, 0xce, 0x89, 0x19, + 0xdf, 0x62, 0xbe, 0x83, 0x24, 0x34, 0x45, 0xd9, 0x29, 0x85, 0x7c, 0xd1, 0xf7, 0x2c, 0x2c, 0x9b, + 0x84, 0xd0, 0x11, 0x4e, 0xad, 0x3a, 0x4a, 0xfe, 0xd9, 0x6d, 0x69, 0xbf, 0xb8, 0xf4, 0xaa, 0x2e, + 0xa4, 0xf2, 0x21, 0x69, 0xa0, 0x82, 0x6c, 0x65, 0xa5, 0x6a, 0x0a, 0xf0, 0xb3, 0x8e, 0x9d, 0x48, + 0x29, 0xb3, 0x30, 0x9e, 0xc2, 0x4e, 0xf3, 0xdd, 0x09, 0x10, 0x91, 0x7a, 0xaa, 0x02, 0xa4, 0x82, + 0x3c, 0xad, 0x91, 0x17, 0x07, 0xd2, 0x7c, 0x35, 0x06, 0xaa, 0xe5, 0xad, 0x0d, 0x20, 0x32, 0x0d, + 0x41, 0x7a, 0xd0, 0xcc, 0xb0, 0xda, 0x8b, 0xd8, 0x95, 0x79, 0x70, 0x65, 0x79, 0x8f, 0xc8, 0xf4, + 0xa8, 0xde, 0x00, 0xac, 0x3d, 0xc1, 0xf8, 0x6d, 0x81, 0x73, 0x81, 0x91, 0x51, 0xb1, 0x30, 0x6b, + 0xbc, 0xa3, 0x6b, 0x1d, 0x04, 0x7a, 0xad, 0x2e, 0xaa, 0x7c, 0x4b, 0x59, 0x72, 0x9d, 0x3b, 0x11, + 0xa8, 0x68, 0xdf, 0x3c, 0x9c, 0x3c, 0xf6, 0xe0, 0x49, 0x86, 0x3b, 0xcc, 0xfc, 0x8c, 0xf0, 0xc7, + 0x7d, 0x46, 0x79, 0x14, 0x96, 0x0c, 0xcc, 0x9f, 0x7f, 0xc6, 0x6d, 0xfe, 0xc1, 0x8e, 0x4f, 0xa6, + 0x5c, 0xba, 0x61, 0x00, 0x96, 0x6f, 0x6b, 0x0d, 0xb6, 0x23, 0x79, 0xbb, 0x63, 0x9d, 0xc5, 0x6b, + 0x97, 0xb2, 0x53, 0x0d, 0x3d, 0x68, 0xca, 0x72, 0xaa, 0xaf, 0x0e, 0xd0, 0x3c, 0xa8, 0xc2, 0x7a, + 0xaa, 0x5a, 0xe7, 0x90, 0x41, 0xc8, 0xd0, 0x10, 0x89, 0xe2, 0xf6, 0x85, 0x77, 0x64, 0xfc, 0x1d, + 0xa8, 0xb1, 0x48, 0x1c, 0xaf, 0xeb, 0xed, 0x25, 0x1a, 0x55, 0x1e, 0x4d, 0xfb, 0x20, 0xc5, 0xd9, + 0xaa, 0xd8, 0xbc, 0x79, 0xfc, 0x2f, 0xea, 0x55, 0x64, 0x54, 0xd4, 0xb0, 0x54, 0xf6, 0x82, 0xbe, + 0xb2, 0xa7, 0xa1, 0x5a, 0x2e, 0x3b, 0xe0, 0xa6, 0x40, 0x90, 0xc6, 0x6f, 0xd2, 0xa5, 0xed, 0xd4, + 0x1e, 0x75, 0x0e, 0xf9, 0xd1, 0xd5, 0x45, 0x7c, 0x4d, 0xa1, 0xf0, 0x79, 0x34, 0xc3, 0x3f, 0xa5, + 0xc5, 0x74, 0xa1, 0xb7, 0x54, 0x83, 0xe2, 0x32, 0x53, 0xd0, 0x5b, 0xe5, 0x24, 0xe9, 0x2d, 0x0b, + 0xb8, 0xe6, 0x91, 0xd4, 0xa3, 0x87, 0xc8, 0xff, 0x28, 0x4a, 0xbf, 0xeb, 0x07, 0x14, 0xdc, 0x26, + 0x2d, 0x86, 0xd8, 0x49, 0xb5, 0x77, 0xe3, 0x61, 0xe2, 0x05, 0xc6, 0xcf, 0xd8, 0xbd, 0xa6, 0x16, + 0x94, 0xc4, 0x0b, 0x04, 0x59, 0x69, 0xcb, 0xa7, 0x9b, 0x00, 0x62, 0x36, 0x7f, 0xf7, 0xf2, 0x50, + 0xd6, 0x1c, 0x25, 0xcc, 0xcb, 0xab, 0x0a, 0x50, 0xb1, 0xb4, 0x8c, 0x84, 0xeb, 0xfd, 0xf9, 0xb9, + 0x3d, 0x20, 0x69, 0x27, 0x56, 0x8b, 0x2a, 0x63, 0xa6, 0xbd, 0x44, 0xc7, 0xd6, 0xda, 0xbd, 0x52, + 0x63, 0x1b, 0xe0, 0x39, 0xdb, 0xa0, 0x87, 0x5b, 0x8b, 0xa2, 0x91, 0xd1, 0xcb, 0xfa, 0xda, 0x01, + 0xb3, 0x96, 0xd6, 0x3c, 0x4f, 0x11, 0x8d, 0xad, 0x0a, 0x14, 0x2b, 0xc4, 0xf8, 0x20, 0x5f, 0x1c, + 0x33, 0xd3, 0x5e, 0x40, 0xca, 0x81, 0x27, 0xa3, 0xad, 0x7c, 0xf0, 0xf8, 0x8d, 0x8a, 0xc3, 0x62, + 0x03, 0x16, 0xe9, 0xb9, 0x4d, 0xd3, 0xba, 0x65, 0x73, 0x0d, 0x64, 0xfa, 0xcf, 0x7e, 0xe2, 0x23, + 0xe0, 0x32, 0xa2, 0xdd, 0xdf, 0x11, 0x7b, 0x2c, 0x00, 0xfc, 0x32, 0x8b, 0x41, 0xfa, 0xbb, 0xdf, + 0x2a, 0x27, 0xa4, 0xc4, 0xb9, 0x92, 0x09, 0xc0, 0x78, 0xc8, 0xbc, 0x2f, 0x4f, 0x64, 0x71, 0x11, + 0xef, 0xc7, 0x5f, 0xcf, 0x35, 0x77, 0xe2, 0x74, 0xfc, 0x0f, 0xd2, 0x7e, 0x07, 0x87, 0x6f, 0x06, + 0x95, 0x5e, 0x20, 0x0b, 0xaa, 0xa6, 0x5c, 0x60, 0x84, 0x2e, 0x7d, 0x73, 0x2f, 0x2b, 0x70, 0xf5, + 0x67, 0x0a, 0x7a, 0xaf, 0xcf, 0xd6, 0x1e, 0x2e, 0x95, 0x06, 0x1a, 0x92, 0xfe, 0x55, 0x78, 0x25, + 0xcd, 0xe6, 0x7b, 0x71, 0xc3, 0xfb, 0x4a, 0x95, 0xd2, 0xb7, 0x69, 0x86, 0xcc, 0xbf, 0x4e, 0xef, + 0x85, 0xf0, 0xde, 0x68, 0x65, 0x76, 0x06, 0x45, 0xc6, 0xda, 0xd6, 0xb8, 0x38, 0x05, 0xd4, 0xb2, + 0xde, 0x22, 0x30, 0x43, 0x53, 0x5e, 0xe7, 0xbd, 0x94, 0x53, 0xc1, 0x34, 0x41, 0x88, 0x6a, 0xe7, + 0xe8, 0xe8, 0xce, 0x18, 0x6e, 0xbd, 0x25, 0x73, 0x91, 0x31, 0xf8, 0x28, 0x78, 0x7c, 0xf9, 0x7a, + 0x40, 0x9b, 0x24, 0x2b, 0x39, 0xe3, 0xd5, 0x14, 0xc3, 0x67, 0xf7, 0xae, 0xea, 0x84, 0x2c, 0x64, + 0x63, 0x72, 0xb9, 0x8f, 0xb1, 0xb7, 0x44, 0x1b, 0x97, 0xaa, 0x20, 0x5f, 0x34, 0x89, 0x7c, 0x7a, + 0x21, 0x71, 0xcc, 0xc0, 0x2e, 0xa4, 0xb0, 0x49, 0xe1, 0xb8, 0xb1, 0xc5, 0x3f, 0xb8, 0xad, 0x03, + 0x01, 0xc0, 0x65, 0x40, 0xca, 0xe8, 0xca, 0x39, 0x8f, 0x14, 0x9d, 0x01, 0x69, 0x6f, 0x0f, 0x7b, + 0xaf, 0x1c, 0x44, 0x14, 0x23, 0x86, 0xc6, 0xff, 0x3a, 0x6a, 0xe3, 0xf1, 0x7d, 0x84, 0x1d, 0x8b, + 0x4d, 0xc5, 0xae, 0x02, 0x56, 0x5e, 0xfa, 0xa8, 0x7f, 0x59, 0x32, 0x46, 0x96, 0xfe, 0xae, 0x1b, + 0xb2, 0x4c, 0xd5, 0xdc, 0x31, 0x4a, 0xf6, 0xa5, 0x13, 0x1e, 0x3f, 0x26, 0xcc, 0x0e, 0xd8, 0xc4, + 0xdc, 0xbd, 0x9c, 0xa1, 0x85, 0x18, 0x88, 0x4b, 0xb5, 0x4e, 0xef, 0x9c, 0x18, 0x9f, 0x3b, 0xed, + 0x8b, 0xea, 0xeb, 0x17, 0x55, 0x7d, 0xa7, 0xa1, 0x30, 0x44, 0x9a, 0x2a, 0xf2, 0xaa, 0x50, 0xac, + 0x31, 0xad, 0x6f, 0x9b, 0xf9, 0x4c, 0xe4, 0x2f, 0xad, 0x5f, 0xf9, 0xe2, 0x44, 0x2c, 0xbb, 0x33, + 0x8e, 0x7f, 0x40, 0xf0, 0xe5, 0x9b, 0x79, 0xef, 0xc2, 0xf6, 0x2c, 0x5e, 0xf8, 0xc6, 0x0d, 0xfd, + 0x9e, 0x79, 0x8c, 0xac, 0x8a, 0xce, 0x83, 0xf3, 0x91, 0x96, 0x7d, 0xea, 0x5e, 0x0f, 0x19, 0xbe, + 0x53, 0xea, 0xd3, 0xc9, 0xc0, 0x3f, 0x2e, 0x12, 0x26, 0x6b, 0xbf, 0x7f, 0x3c, 0xb1, 0xf0, 0x71, + 0x00, 0x0e, 0x71, 0x76, 0x7f, 0x34, 0x9b, 0x25, 0x4d, 0x42, 0x1a, 0xf2, 0xad, 0xd5, 0x91, 0x95, + 0xa8, 0x96, 0xa1, 0x73, 0xc7, 0x13, 0xe3, 0x5a, 0x6c, 0x2c, 0xde, 0x44, 0x08, 0xe9, 0x04, 0x3c, + 0x5c, 0xf2, 0x1c, 0x42, 0x9f, 0x1b, 0x8f, 0x4e, 0x21, 0xc9, 0xd9, 0x79, 0x57, 0xf8, 0xaa, 0x1a, + 0x91, 0x0a, 0x42, 0xa4, 0x0b, 0xa9, 0x8a, 0x7e, 0x8f, 0x48, 0x72, 0xa0, 0x5b, 0xcb, 0xc2, 0xf0, + 0x86, 0x2b, 0x25, 0xdc, 0xf5, 0x07, 0x23, 0x36, 0x07, 0x6f, 0x23, 0xa4, 0x6a, 0xe0, 0x86, 0x61, + 0x33, 0x6e, 0x3c, 0xb0, 0x33, 0x02, 0x8e, 0xee, 0xa1, 0x02, 0xf4, 0xaf, 0x46, 0x94, 0x32, 0x3a, + 0x8b, 0x2b, 0xf3, 0x7f, 0x23, 0x5c, 0x30, 0xb1, 0xec, 0x07, 0x3a, 0x29, 0x58, 0x68, 0xb4, 0x2b, + 0xf4, 0x64, 0xfe, 0x78, 0x9b, 0x24, 0x09, 0x9b, 0x9f, 0x42, 0x74, 0xd6, 0x6b, 0x6d, 0x68, 0x01, + 0x39, 0x1e, 0x6a, 0x42, 0x17, 0xb6, 0xe3, 0x13, 0x57, 0x29, 0x2b, 0x17, 0xfb, 0xb5, 0x32, 0x1f, + 0x86, 0x50, 0x45, 0x3e, 0x7b, 0x6a, 0x32, 0x27, 0x27, 0xe2, 0xbe, 0xdf, 0xd0, 0x19, 0xe1, 0xfd, + 0xed, 0x4f, 0xd3, 0x05, 0x92, 0x18, 0x62, 0x62, 0x06, 0xaf, 0xee, 0x92, 0xe0, 0xb6, 0xe6, 0x7f, + 0xf6, 0x63, 0xe4, 0xa5, 0xc4, 0x71, 0x0d, 0xd0, 0xda, 0xb4, 0xc7, 0x70, 0x81, 0x24, 0xf4, 0xfd, + 0x3e, 0xbd, 0x6f, 0xe2, 0x3e, 0x61, 0x37, 0xc2, 0x3c, 0x71, 0x0d, 0xd2, 0xa7, 0x01, 0xe4, 0x1f, + 0x59, 0x84, 0xda, 0x44, 0x0d, 0x9e, 0xea, 0x47, 0xfd, 0x10, 0x07, 0x4d, 0xf6, 0x7b, 0x7a, 0xf8, + 0x84, 0xea, 0x4a, 0x0b, 0x2f, 0xbd, 0x9e, 0x28, 0x37, 0x8a, 0x47, 0x98, 0x1a, 0xc0, 0x2c, 0xc1, + 0xfb, 0xc8, 0x07, 0x42, 0x14, 0x71, 0x04, 0x5a, 0x08, 0x97, 0xa3, 0x04, 0x16, 0x70, 0x85, 0xf5, + 0x09, 0x82, 0x4f, 0x5b, 0x17, 0x9b, 0xbf, 0xe5, 0x92, 0x38, 0xe3, 0x9e, 0x02, 0xe5, 0x79, 0x40, + 0x8f, 0x67, 0x62, 0xc7, 0x39, 0x76, 0xf5, 0x33, 0x81, 0x31, 0x48, 0x32, 0xcd, 0x19, 0xcf, 0x58, + 0x70, 0x37, 0x7f, 0x55, 0x1a, 0x93, 0x56, 0x13, 0x66, 0x50, 0x6f, 0x18, 0x5e, 0x69, 0xee, 0x23, + 0xd8, 0x11, 0xf6, 0x85, 0x9f, 0x43, 0x4a, 0x7d, 0xc3, 0xe4, 0x6a, 0xf9, 0x8f, 0x15, 0x37, 0x3f, + 0x87, 0xc6, 0xe0, 0x64, 0xe6, 0xf1, 0x85, 0x08, 0x23, 0xff, 0x48, 0x00, 0x35, 0xd6, 0x18, 0xcb, + 0xb8, 0xbb, 0x4b, 0x81, 0x84, 0x8c, 0xdf, 0x67, 0x22, 0xe1, 0xb3, 0x5a, 0x61, 0x15, 0x8d, 0x15, + 0x55, 0x1c, 0x1c, 0xe5, 0xb6, 0x99, 0xa3, 0xd5, 0x29, 0x86, 0x56, 0x03, 0xc7, 0xa5, 0x35, 0x8e, + 0x1c, 0xfd, 0x57, 0x6c, 0xed, 0x6a, 0x4f, 0x1f, 0x66, 0x9e, 0x3f, 0x38, 0x65, 0xa3, 0xd4, 0x76, + 0x22, 0xe6, 0x5a, 0xc2, 0x62, 0xa1, 0x92, 0xdd, 0xd7, 0xb4, 0x7c, 0xa7, 0x13, 0xfb, 0x88, 0x94, + 0x50, 0x11, 0xab, 0xb5, 0x74, 0x94, 0x7c, 0x12, 0xbb, 0x87, 0xe4, 0x65, 0x5d, 0x77, 0x02, 0x0d, + 0xe9, 0x14, 0x4b, 0x44, 0x23, 0x80, 0x84, 0x70, 0x56, 0x5e, 0xf0, 0x21, 0x79, 0xa6, 0x3d, 0x19, + 0xc0, 0x99, 0x13, 0x0b, 0x01, 0x3d, 0x52, 0x7d, 0x22, 0x66, 0xd8, 0x2b, 0x7f, 0x00, 0xbb, 0x33, + 0xa2, 0x17, 0xa4, 0xa7, 0xbb, 0xcc, 0xe1, 0xf1, 0xba, 0xee, 0xc6, 0xa5, 0xb2, 0x20, 0x6c, 0xfd, + 0x22, 0x6c, 0x97, 0x21, 0xd6, 0x38, 0x37, 0x72, 0x38, 0x05, 0x1c, 0xd6, 0x4d, 0xf4, 0x42, 0x6d, + 0x11, 0x46, 0x35, 0x99, 0x14, 0xb2, 0x7e, 0xe9, 0x6e, 0x54, 0x9c, 0x8a, 0x15, 0x24, 0x49, 0x42, + 0x54, 0xfd, 0xcf, 0x7b, 0xd5, 0x4d, 0xfc, 0xcc, 0xe4, 0x5a, 0x2a, 0xa4, 0xf9, 0x92, 0x1a, 0x7f, + 0xd9, 0x47, 0xb4, 0x2b, 0x24, 0xf4, 0xe3, 0x97, 0xc9, 0x8d, 0x31, 0x10, 0xef, 0x8b, 0x7e, 0xae, + 0x43, 0xf3, 0x7d, 0x64, 0x5d, 0x92, 0xbf, 0xaf, 0x95, 0x94, 0x0d, 0xa5, 0x24, 0x59, 0xc0, 0x85, + 0x8f, 0x66, 0x3f, 0xb0, 0x5f, 0xe5, 0x5f, 0x38, 0xcd, 0xd7, 0xa8, 0x29, 0xe3, 0x95, 0x59, 0x05, + 0x4b, 0xcb, 0xb4, 0x0c, 0x32, 0xcd, 0x22, 0xdf, 0xe2, 0xf7, 0x42, 0x75, 0x0e, 0x6a, 0xd0, 0x2b, + 0x28, 0xc0, 0x27, 0xe5, 0xc7, 0xfa, 0xb4, 0x26, 0xda, 0x46, 0x40, 0xc5, 0x24, 0xed, 0xa2, 0xc4, + 0xa7, 0x7c, 0x93, 0x60, 0x9a, 0x0a, 0x27, 0x2e, 0x14, 0x8d, 0x5b, 0x56, 0x05, 0x53, 0xe2, 0xc4, + 0x62, 0xbf, 0xb7, 0x75, 0x7d, 0x04, 0x8d, 0x80, 0xa2, 0x0e, 0x18, 0x2f, 0xce, 0x16, 0xdd, 0xf4, + 0x74, 0x13, 0x74, 0x7e, 0x73, 0xb4, 0x73, 0xf8, 0x1c, 0x4f, 0xdb, 0x5c, 0x90, 0x00, 0xc8, 0x54, + 0x52, 0x99, 0xb8, 0x79, 0x46, 0x8b, 0x4f, 0x7d, 0x98, 0xff, 0xe6, 0xc7, 0xec, 0x0d, 0xe9, 0x24, + 0x6d, 0x7b, 0xad, 0xe6, 0x80, 0x28, 0xe2, 0xff, 0xe5, 0x82, 0xfb, 0x36, 0x98, 0xc0, 0x2c, 0xc8, + 0x8e, 0x9a, 0x44, 0xb2, 0x11, 0xac, 0x4e, 0x26, 0x0b, 0xa8, 0x9b, 0x56, 0x21, 0x6d, 0xfa, 0x50, + 0x17, 0xe8, 0xf3, 0x7d, 0xd2, 0xe3, 0xed, 0xc4, 0xac, 0x22, 0xa1, 0x5d, 0x5d, 0x8f, 0x31, 0x99, + 0xb4, 0x44, 0x2e, 0x97, 0x64, 0x59, 0x12, 0xcd, 0xbe, 0xaf, 0x28, 0x9a, 0x3a, 0xdf, 0xb7, 0x16, + 0x72, 0x6d, 0xed, 0x64, 0x79, 0x2b, 0x83, 0xa7, 0x50, 0xeb, 0xe0, 0x30, 0x26, 0xe0, 0x0e, 0x65, + 0x38, 0x28, 0xe9, 0x4a, 0x93, 0x47, 0x21, 0x0d, 0x6f, 0xf9, 0x9a, 0xf6, 0x7a, 0x97, 0x1e, 0x39, + 0x4b, 0xdc, 0xff, 0x66, 0xb6, 0x41, 0x80, 0x54, 0x60, 0x48, 0x84, 0xa6, 0x37, 0xe1, 0xba, 0xea, + 0x5b, 0xc9, 0x21, 0xd1, 0x84, 0xb7, 0xe7, 0x31, 0xf2, 0xe2, 0x61, 0x24, 0xaf, 0x9e, 0x1c, 0xcb, + 0x27, 0x24, 0x4d, 0xe6, 0x1d, 0xb5, 0xa2, 0x7d, 0xc2, 0x85, 0xd9, 0xaa, 0x5d, 0xde, 0x13, 0xeb, + 0xbf, 0xef, 0x93, 0x70, 0x2e, 0x93, 0x1f, 0xe2, 0x7a, 0x5e, 0x8f, 0x8b, 0x55, 0x05, 0x0b, 0xf6, + 0x6f, 0xa1, 0x50, 0x74, 0xe6, 0x7b, 0x74, 0x40, 0xbb, 0x4f, 0x9a, 0xa0, 0xee, 0x73, 0x41, 0x5b, + 0xa6, 0xff, 0xf1, 0xa0, 0x83, 0x4c, 0x16, 0x5f, 0x70, 0x41, 0xbf, 0x26, 0xff, 0x82, 0xbc, 0x32, + 0xbd, 0x27, 0x8a, 0xfb, 0x46, 0x01, 0x33, 0xee, 0x9a, 0xfb, 0x95, 0x9b, 0x9d, 0x3f, 0xc1, 0xbd, + 0x59, 0x78, 0x27, 0x4e, 0x8c, 0x0c, 0x0d, 0x19, 0x00, 0x7d, 0x17, 0xa1, 0x10, 0x97, 0x08, 0x88, + 0x67, 0x69, 0x8f, 0xd8, 0x11, 0xbc, 0x8e, 0xf5, 0xfd, 0x95, 0x1d, 0xe6, 0x30, 0x42, 0x0f, 0xed, + 0x24, 0xe7, 0xde, 0x8d, 0xc6, 0xbf, 0x26, 0x4c, 0xee, 0xda, 0xed, 0xa1, 0x9f, 0x8d, 0x9d, 0xc2, + 0xd6, 0x15, 0x4e, 0x38, 0x51, 0xf7, 0x99, 0x35, 0x70, 0x5e, 0x03, 0xed, 0x62, 0x91, 0x34, 0x85, + 0x74, 0x81, 0x53, 0x6c, 0xe7, 0x94, 0xfe, 0x8a, 0xfb, 0x68, 0x88, 0x74, 0x78, 0x11, 0x2f, 0x95, + 0x3c, 0x59, 0x17, 0x9d, 0x70, 0x80, 0x11, 0x64, 0x09, 0x87, 0x73, 0x50, 0x62, 0xce, 0xbd, 0x0e, + 0xe6, 0x68, 0x7a, 0x20, 0xeb, 0x7f, 0x16, 0x2f, 0xf1, 0xc2, 0x24, 0xc3, 0xcf, 0x59, 0x28, 0x76, + 0xd3, 0xee, 0x9f, 0xd2, 0x88, 0xbf, 0x4b, 0x0a, 0x3d, 0x74, 0xff, 0x4b, 0xfa, 0x59, 0x74, 0xed, + 0x13, 0x49, 0xce, 0x02, 0x7b, 0x7b, 0x92, 0xb1, 0x87, 0x21, 0x1a, 0x77, 0xdc, 0x10, 0x7d, 0x0b, + 0xe3, 0x87, 0x0b, 0xf1, 0x1e, 0xd5, 0xcd, 0x53, 0xc4, 0x66, 0xb7, 0x57, 0x7e, 0x87, 0xd0, 0xd4, + 0x14, 0xd0, 0x80, 0x19, 0xfe, 0x73, 0x3f, 0xb2, 0xb6, 0xbb, 0x9d, 0x01, 0x32, 0x9e, 0x02, 0x3a, + 0x71, 0x33, 0xe0, 0xfb, 0xf0, 0x9a, 0x1a, 0x19, 0xd5, 0x88, 0x2e, 0xbb, 0x9c, 0xbe, 0x2c, 0xad, + 0xd5, 0xe3, 0x3c, 0x09, 0xe4, 0x46, 0xb0, 0x76, 0x4c, 0x5e, 0xa6, 0x5c, 0xd2, 0x17, 0xae, 0xac, + 0x22, 0x54, 0xc6, 0x49, 0x97, 0x5c, 0x94, 0x78, 0x09, 0xca, 0x1c, 0x0e, 0x5f, 0x7b, 0xa7, 0xc8, + 0x2a, 0x7f, 0xa5, 0x1a, 0x74, 0xce, 0x3c, 0xee, 0x58, 0xff, 0x15, 0x67, 0xff, 0x55, 0x04, 0x07, + 0x44, 0x5b, 0x03, 0x4c, 0x9d, 0xb9, 0x99, 0xd0, 0x6a, 0x4b, 0x6e, 0xfc, 0x77, 0x89, 0x68, 0x6a, + 0x70, 0x5c, 0x72, 0x67, 0x46, 0x28, 0x4f, 0x72, 0x18, 0x96, 0xa2, 0x62, 0xeb, 0xe4, 0xe5, 0x8a, + 0x82, 0xeb, 0xf1, 0xae, 0x52, 0x54, 0x0a, 0xd9, 0x38, 0x54, 0x79, 0x7c, 0xb0, 0x90, 0x37, 0x91, + 0x7f, 0xf3, 0x49, 0xa4, 0x48, 0x39, 0xcf, 0x77, 0x33, 0xa9, 0xf5, 0xe8, 0x0c, 0x03, 0xc0, 0xeb, + 0x33, 0xc6, 0x17, 0x0d, 0x16, 0x80, 0x5d, 0x54, 0xfe, 0xe6, 0x5c, 0x0a, 0x99, 0xb1, 0x31, 0x3b, + 0x8d, 0xa6, 0x49, 0x56, 0xd3, 0x86, 0x0d, 0x52, 0xfa, 0x45, 0x31, 0x15, 0x68, 0xa2, 0xfa, 0xbe, + 0xb6, 0x87, 0x99, 0xac, 0xc7, 0x83, 0x58, 0xa7, 0x4d, 0x08, 0xd1, 0xe9, 0x5e, 0xd7, 0x74, 0xa3, + 0x0c, 0x6d, 0xa2, 0xdd, 0x81, 0x3a, 0xf6, 0x2a, 0xd8, 0xd6, 0xdf, 0x75, 0x1f, 0x31, 0x96, 0x2f, + 0x36, 0xda, 0x42, 0x1c, 0x5a, 0x1a, 0x17, 0x3f, 0x99, 0xee, 0x9c, 0x5c, 0x4d, 0xc5, 0xc2, 0x9a, + 0x42, 0xbb, 0x68, 0x64, 0x22, 0xce, 0x44, 0x4f, 0xe3, 0xd0, 0x19, 0x7e, 0x7d, 0xa7, 0xbe, 0x17, + 0x81, 0x22, 0x10, 0x5b, 0x56, 0xd7, 0xa1, 0x47, 0x21, 0x37, 0x97, 0x9f, 0xab, 0x98, 0xb4, 0x89, + 0xa0, 0xcf, 0xc3, 0x23, 0xc2, 0x94, 0x68, 0xbf, 0x05, 0xee, 0x67, 0x51, 0x24, 0x24, 0xe8, 0x1e, + 0x3a, 0xd9, 0x79, 0xcd, 0xb8, 0xa4, 0x69, 0x0d, 0xa4, 0xf1, 0x00, 0x4a, 0x0e, 0xfd, 0x6b, 0x0a, + 0xbb, 0x01, 0x66, 0xe1, 0x9a, 0xbb, 0x9a, 0x77, 0x8d, 0xea, 0x14, 0x5c, 0x9d, 0x92, 0x80, 0x2f, + 0x5d, 0xd2, 0x39, 0xed, 0x88, 0x96, 0xa2, 0x92, 0xf7, 0x1f, 0xe7, 0x3d, 0xec, 0x5f, 0x8b, 0xff, + 0x44, 0x43, 0x2d, 0xb6, 0xb8, 0xa9, 0x8e, 0x29, 0x5f, 0x68, 0x5c, 0x53, 0xb3, 0x68, 0xcf, 0xad, + 0x37, 0xd6, 0x2e, 0x82, 0x91, 0x12, 0x36, 0x84, 0xd6, 0xcc, 0x9b, 0x26, 0x57, 0x80, 0x7a, 0x44, + 0x99, 0xc1, 0x14, 0xb8, 0x6c, 0x55, 0xaa, 0x6b, 0xb6, 0xe4, 0xb7, 0xa8, 0xfa, 0x6d, 0xd1, 0xd4, + 0xeb, 0xb9, 0xad, 0xbc, 0xf0, 0x87, 0x86, 0x4e, 0xdd, 0x5e, 0x07, 0x54, 0xcc, 0x32, 0xeb, 0xdb, + 0x6f, 0x44, 0x34, 0xe0, 0xc6, 0x44, 0x38, 0x81, 0x74, 0x0c, 0xd3, 0x44, 0x5b, 0x66, 0x9c, 0xc8, + 0x1e, 0xa5, 0xf1, 0x05, 0xbe, 0x51, 0x9f, 0x9a, 0xd9, 0x8b, 0xcc, 0xe6, 0x30, 0xc6, 0x5f, 0x84, + 0xe3, 0x21, 0x3a, 0xfc, 0x49, 0xcd, 0xda, 0x7e, 0x4c, 0xed, 0xd2, 0xee, 0xd7, 0xba, 0x09, 0x4f, + 0xa3, 0x30, 0x75, 0x21, 0xbf, 0x22, 0xe0, 0x9f, 0x1f, 0x60, 0xb1, 0xf3, 0x2d, 0x95, 0x47, 0x91, + 0x64, 0x58, 0xf2, 0x20, 0x5e, 0xcf, 0x5f, 0x53, 0xd4, 0xaf, 0x75, 0x7e, 0x51, 0x13, 0xde, 0x1c, + 0x96, 0x7d, 0xea, 0x02, 0xc6, 0x4c, 0xe0, 0x3f, 0xf3, 0xaf, 0xf3, 0x6b, 0xfd, 0x6b, 0x52, 0x59, + 0x17, 0xad, 0x4a, 0x13, 0xe3, 0x9c, 0xa2, 0x37, 0x75, 0xdb, 0xd6, 0x11, 0xb6, 0x82, 0x4d, 0xd5, + 0x1e, 0xc1, 0x6b, 0xb5, 0x48, 0x10, 0x40, 0xa2, 0x64, 0x54, 0x39, 0x74, 0x77, 0x52, 0xd5, 0x7f, + 0x46, 0x9b, 0xb2, 0x8b, 0x9f, 0xbb, 0x43, 0xe9, 0x29, 0x5d, 0x60, 0xad, 0xc2, 0x41, 0xe4, 0x9e, + 0x81, 0xb8, 0xe8, 0xd1, 0x0c, 0x16, 0xe5, 0xd0, 0x13, 0xb1, 0x15, 0x9c, 0xb4, 0xdb, 0xf1, 0xe4, + 0xa7, 0xc6, 0xa9, 0x8d, 0x54, 0xd1, 0x3f, 0x37, 0x18, 0xb1, 0x2e, 0xbf, 0xdc, 0x80, 0x84, 0x6b, + 0x16, 0x8e, 0xd1, 0x1d, 0xb2, 0xdb, 0xd3, 0xba, 0x40, 0xbe, 0x41, 0x67, 0x8c, 0x3a, 0xa9, 0x79, + 0xa1, 0x46, 0x95, 0x17, 0x0f, 0x5b, 0x05, 0x2a, 0x7b, 0xab, 0xaa, 0x68, 0x56, 0x8d, 0x5f, 0x73, + 0x4b, 0x53, 0x0b, 0xe9, 0x6d, 0xd4, 0x04, 0xf1, 0x16, 0xce, 0x05, 0x5f, 0xfd, 0x7d, 0x94, 0x36, + 0xce, 0xea, 0x9b, 0x59, 0x37, 0xbd, 0x73, 0x5e, 0xfd, 0xe1, 0xeb, 0xee, 0xed, 0x7b, 0xef, 0xbe, + 0xe1, 0x2f, 0x3a, 0xa3, 0x5b, 0x5d, 0x21, 0x59, 0xa1, 0x66, 0x18, 0xbb, 0x41, 0x4e, 0x44, 0x61, + 0xd6, 0x3b, 0x3a, 0x28, 0xf9, 0x6a, 0x1b, 0x00, 0x40, 0x00, 0x7c, 0x84, 0x21, 0xa1, 0xbd, 0x05, + 0x44, 0xcf, 0xa3, 0xd8, 0xd3, 0x06, 0x70, 0xa2, 0x3f, 0xfb, 0xf5, 0xf6, 0x07, 0xf6, 0x39, 0x9f, + 0xa0, 0xc2, 0x1c, 0x04, 0x5c, 0xc8, 0x9c, 0xc9, 0xe0, 0x15, 0x1b, 0x57, 0x0b, 0x22, 0x1b, 0x4d, + 0x68, 0xce, 0x78, 0x9b, 0xf1, 0xf8, 0x8c, 0x1b, 0x5c, 0x4f, 0x71, 0x4f, 0xe8, 0x89, 0x89, 0xb6, + 0xd7, 0x5d, 0xaa, 0xec, 0x91, 0x47, 0xe7, 0xee, 0x7d, 0x57, 0xdb, 0x25, 0x14, 0x9d, 0x38, 0x62, + 0xfb, 0x7e, 0xf2, 0x22, 0x2f, 0x6c, 0x7f, 0xb2, 0x1e, 0x46, 0x95, 0x69, 0x8d, 0x2f, 0x09, 0x21, + 0x51, 0xa7, 0x0e, 0xb6, 0x11, 0x38, 0x48, 0x35, 0x11, 0xdf, 0xd2, 0x57, 0xaa, 0x59, 0x7c, 0xee, + 0xf1, 0x28, 0xfd, 0xde, 0xe0, 0xd6, 0xd7, 0xd6, 0x94, 0x6a, 0x89, 0x34, 0x35, 0xcf, 0xfd, 0x24, + 0x0c, 0xef, 0xa8, 0x48, 0xe2, 0xdb, 0xa1, 0x30, 0xde, 0xd6, 0x1c, 0x48, 0x9c, 0xe1, 0x25, 0x98, + 0x89, 0xec, 0x2a, 0xd5, 0x07, 0xff, 0xeb, 0x87, 0x82, 0x9f, 0xfb, 0xdc, 0x20, 0x01, 0xb9, 0x82, + 0x37, 0xd8, 0x13, 0x9b, 0x2e, 0x2f, 0x1b, 0x61, 0xe3, 0x7b, 0x56, 0xd2, 0xc1, 0x20, 0x9d, 0xdf, + 0x90, 0x43, 0x12, 0x9e, 0xdf, 0x51, 0xc1, 0x40, 0xd7, 0x42, 0xad, 0x04, 0xc2, 0x39, 0x74, 0x7b, + 0x86, 0x9d, 0xb6, 0xd2, 0x64, 0x38, 0x39, 0x43, 0x8f, 0xce, 0xff, 0x6b, 0xf3, 0xff, 0xf5, 0xff, + 0xfd, 0xbf, 0xfe, 0xeb, 0xbf, 0xfe, 0xeb, 0xbf, 0xfe, 0x6f, 0xfc, 0x0f, 0x8a, 0x6b, 0x73, 0xad, + 0x00, 0x26, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA102_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 9728, // uncompressed data size (bytes) + 7780, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA102_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA102("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/reload/g_booteruc_reload_ga10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA102_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x56, 0x00, 0x62, 0x56, 0x08, 0x13, 0x4c, 0xc8, 0x43, 0x69, + 0x20, 0x00, 0x00, 0xfe, 0x9c, 0x72, 0x04, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA102_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA102_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA102("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/reload/g_booteruc_reload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_dbg_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 768 +// COMPRESSED SIZE (bytes): 781 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA102_sig_dbg_data[] = +{ + 0x01, 0x00, 0x03, 0xff, 0xfc, 0x6b, 0xef, 0x6d, 0x57, 0x45, 0x00, 0x70, 0xde, 0x18, 0x97, 0x1f, + 0x61, 0x4f, 0xa5, 0x80, 0xfa, 0x38, 0xcd, 0x7d, 0xd5, 0xf4, 0xab, 0x86, 0x92, 0x13, 0x21, 0x9b, + 0x31, 0x7f, 0x0f, 0xd3, 0x12, 0x70, 0xdc, 0xca, 0x5b, 0xdd, 0x8a, 0xf4, 0x9c, 0xa0, 0x73, 0x43, + 0x52, 0x33, 0x9f, 0xd4, 0x28, 0xd9, 0xad, 0x77, 0xe5, 0xa4, 0xdf, 0x8c, 0xc0, 0x1b, 0x99, 0x1f, + 0x9d, 0x6a, 0xe4, 0x1b, 0x4d, 0x51, 0x42, 0x66, 0x1c, 0x4d, 0xfc, 0x66, 0x72, 0x44, 0x26, 0x69, + 0x73, 0xcc, 0x45, 0xb2, 0xbb, 0xa4, 0xd9, 0x79, 0x82, 0x86, 0xc2, 0x2f, 0x8a, 0x95, 0xf8, 0x38, + 0x68, 0x96, 0x67, 0xfa, 0x8d, 0xbe, 0xa3, 0xd9, 0x25, 0xc9, 0x4d, 0x92, 0xce, 0x88, 0xfb, 0x8f, + 0x02, 0x64, 0x5f, 0x98, 0x52, 0x0f, 0x18, 0x47, 0x6f, 0xc1, 0x5d, 0x99, 0xaa, 0xc7, 0xf9, 0x4f, + 0xba, 0x00, 0x1d, 0x27, 0x5d, 0xb9, 0xf4, 0x04, 0xfe, 0xd1, 0x6a, 0x3c, 0x07, 0xd8, 0xdb, 0xac, + 0xdc, 0xa3, 0x70, 0xe1, 0x98, 0x1d, 0x10, 0xe7, 0x84, 0xd3, 0xfb, 0xb8, 0xd9, 0xf7, 0x09, 0x0b, + 0x74, 0x31, 0xde, 0xb7, 0xf3, 0x43, 0x2e, 0xd6, 0xa7, 0x2c, 0x78, 0x0e, 0xb6, 0x5c, 0xe2, 0xac, + 0x09, 0x3b, 0x79, 0x2b, 0x62, 0xd3, 0x60, 0xf6, 0x70, 0x57, 0x46, 0x5d, 0xbd, 0x3a, 0xcc, 0xc9, + 0x8c, 0xd5, 0x85, 0x83, 0xd5, 0x2d, 0xeb, 0x04, 0x7c, 0xb9, 0xf4, 0x36, 0x5e, 0x43, 0xc5, 0x84, + 0xa2, 0x31, 0x4e, 0x57, 0x89, 0xcd, 0x6e, 0x35, 0xca, 0x9c, 0xa4, 0xe0, 0x46, 0xeb, 0x54, 0x59, + 0xbe, 0x54, 0x48, 0xd3, 0x39, 0xbc, 0x07, 0x95, 0xe3, 0xa9, 0x59, 0x5c, 0xce, 0x2d, 0x0b, 0x34, + 0x6d, 0x87, 0x4c, 0x0e, 0xad, 0x22, 0x64, 0x88, 0x3d, 0xa0, 0x42, 0x7e, 0x9c, 0x90, 0x16, 0xd8, + 0x86, 0x57, 0x03, 0x65, 0x13, 0x78, 0x9a, 0x6b, 0xcf, 0x53, 0x9b, 0x42, 0x55, 0x0b, 0xab, 0x8d, + 0xda, 0x91, 0xd4, 0x99, 0x99, 0x57, 0x1d, 0x5c, 0xe6, 0x22, 0xec, 0x3a, 0x67, 0xe7, 0x85, 0xfe, + 0x5a, 0x4b, 0x26, 0x31, 0x7b, 0x2e, 0xc0, 0xb0, 0xfb, 0xb0, 0xdb, 0x44, 0x50, 0x90, 0x13, 0xa1, + 0x79, 0xca, 0x39, 0x8b, 0x7d, 0xa7, 0x0b, 0xad, 0x13, 0xac, 0x35, 0xdb, 0x83, 0xb1, 0xfc, 0x49, + 0xf8, 0x45, 0xe4, 0x21, 0xbb, 0x2e, 0xba, 0x6d, 0x79, 0xf3, 0xb7, 0x03, 0x70, 0x07, 0x18, 0x0c, + 0x45, 0xb9, 0xd6, 0x53, 0xf7, 0x52, 0x4b, 0x9e, 0xe0, 0x77, 0x49, 0x0a, 0xe9, 0xdc, 0x2b, 0x50, + 0xe1, 0xb6, 0xd9, 0xe3, 0xcd, 0x6b, 0xff, 0xab, 0x6e, 0x3e, 0x03, 0x85, 0x97, 0x7d, 0x50, 0xc2, + 0x74, 0x10, 0xaa, 0x66, 0xee, 0x20, 0x22, 0x50, 0x99, 0xaa, 0x23, 0xee, 0x36, 0x8f, 0x05, 0xab, + 0x61, 0xba, 0xcb, 0xfa, 0x6c, 0x2c, 0x2a, 0xe1, 0xf0, 0x42, 0x6b, 0xe5, 0x97, 0x6b, 0x9b, 0x88, + 0xb0, 0x1f, 0x61, 0x10, 0x8c, 0x93, 0x9b, 0x7b, 0x0a, 0x39, 0xbf, 0xcd, 0xa1, 0xde, 0x2e, 0x9d, + 0xcf, 0x76, 0xa3, 0x7b, 0x2b, 0x80, 0xbf, 0x13, 0x78, 0xd2, 0x09, 0xbf, 0xd5, 0xfa, 0x39, 0x1a, + 0x18, 0x7f, 0x64, 0xfd, 0x63, 0x12, 0x3b, 0x58, 0x6f, 0xff, 0xef, 0xc1, 0xe3, 0xb3, 0xbe, 0x4e, + 0xc1, 0xf8, 0xfd, 0x57, 0x26, 0x51, 0xde, 0x17, 0x42, 0xaa, 0x44, 0x1a, 0x70, 0xa5, 0xb9, 0x62, + 0x48, 0x4a, 0x6d, 0x7c, 0x69, 0x09, 0x41, 0x4a, 0x02, 0x90, 0x7c, 0x83, 0x79, 0xd4, 0xb5, 0x25, + 0x7c, 0xff, 0x8f, 0xfa, 0xfe, 0xa2, 0x99, 0xf3, 0x05, 0xe7, 0xab, 0x6d, 0x0f, 0x8f, 0xb2, 0xbe, + 0xc9, 0x58, 0xa1, 0x73, 0xf7, 0x63, 0x43, 0xef, 0x32, 0xa0, 0x3e, 0xd4, 0x58, 0x79, 0x13, 0x49, + 0x3a, 0x92, 0xb1, 0x76, 0xbf, 0x9b, 0x98, 0xa9, 0x94, 0xf1, 0xb8, 0xcc, 0xf1, 0x44, 0x8e, 0xbd, + 0xd3, 0x32, 0x2b, 0xe0, 0x7e, 0xa2, 0x02, 0xfd, 0x75, 0xdd, 0xcf, 0x47, 0x2e, 0x7f, 0x93, 0x73, + 0xb9, 0xff, 0x75, 0xbf, 0xdd, 0x86, 0x55, 0x6a, 0xe6, 0x64, 0x4d, 0x31, 0x9d, 0x72, 0x77, 0x5c, + 0xfe, 0x2d, 0x7e, 0x73, 0x2a, 0xfa, 0x46, 0xb1, 0x86, 0xdf, 0x76, 0x0d, 0x41, 0xf2, 0x92, 0xfd, + 0x0d, 0x72, 0xaf, 0xd5, 0x2c, 0x8b, 0xcb, 0x08, 0x6a, 0xcf, 0x8d, 0x40, 0xda, 0x95, 0x36, 0xe9, + 0x86, 0xfe, 0xc9, 0x1e, 0x9d, 0xf9, 0xa0, 0x26, 0x45, 0xf4, 0x40, 0xcc, 0xcb, 0x51, 0xcc, 0x52, + 0x71, 0x71, 0x80, 0x26, 0x38, 0x91, 0x40, 0xc6, 0xaf, 0x3e, 0x1b, 0x28, 0x1f, 0x36, 0x09, 0xf2, + 0x10, 0x61, 0xd3, 0x12, 0x4f, 0xd5, 0x6f, 0x37, 0xd5, 0x23, 0xb6, 0x9e, 0x25, 0x12, 0xa8, 0x2e, + 0xb1, 0xf3, 0x13, 0x8f, 0xed, 0x44, 0x90, 0xfe, 0x64, 0xf7, 0x96, 0xc1, 0xaf, 0x24, 0x83, 0xf0, + 0xcd, 0x62, 0xf1, 0xe5, 0xa3, 0xe1, 0x2e, 0x65, 0x47, 0xb8, 0xce, 0x6c, 0xfe, 0x2c, 0x50, 0xbd, + 0x7a, 0x29, 0x3b, 0xd0, 0xce, 0x93, 0x1a, 0x57, 0xb2, 0xbd, 0x19, 0xba, 0x43, 0x4d, 0x62, 0x22, + 0xbc, 0x29, 0xfe, 0xa1, 0x49, 0x1e, 0xd3, 0xc9, 0x91, 0x6a, 0xac, 0x31, 0x33, 0x4b, 0x9b, 0x08, + 0xc5, 0x25, 0xea, 0x42, 0xb2, 0x3b, 0x29, 0xb1, 0xf1, 0x4f, 0xc0, 0x77, 0x98, 0x10, 0x29, 0x2b, + 0xd7, 0x96, 0x64, 0x2c, 0x16, 0x05, 0xa9, 0xa3, 0xdd, 0x7c, 0x12, 0x39, 0x47, 0x94, 0x8b, 0x64, + 0x98, 0xfe, 0x7a, 0x2d, 0x63, 0x07, 0xa0, 0xf8, 0xcd, 0xe0, 0x00, 0x28, 0xcb, 0x94, 0xe1, 0x91, + 0xf6, 0x18, 0xfa, 0x34, 0xb2, 0xaf, 0x8b, 0xef, 0x21, 0x1a, 0x08, 0xcb, 0x05, 0x5e, 0xf0, 0x28, + 0x63, 0x67, 0x06, 0x6a, 0x3c, 0xe4, 0x88, 0x3e, 0x79, 0x00, 0x03, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA102_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 768, // uncompressed data size (bytes) + 781, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA102_sig_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA102("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/reload/g_booteruc_reload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_prod_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 768 +// COMPRESSED SIZE (bytes): 420 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA102_sig_prod_data[] = +{ + 0x13, 0x92, 0x50, 0x3e, 0xa9, 0x2d, 0x79, 0x60, 0x61, 0xf2, 0x54, 0x46, 0x11, 0x9f, 0x88, 0x09, + 0x95, 0xbf, 0xb6, 0x29, 0x29, 0xda, 0x57, 0x79, 0x4c, 0x67, 0x99, 0x11, 0x1f, 0xa8, 0x5e, 0x5a, + 0xc2, 0xb7, 0x4c, 0xce, 0x8b, 0xe9, 0x5d, 0x75, 0x6e, 0x90, 0xf8, 0x16, 0x99, 0x68, 0xc1, 0x7e, + 0xce, 0x8f, 0x4f, 0x5f, 0x64, 0xcb, 0xbf, 0xb0, 0xb5, 0xf2, 0x10, 0x38, 0xf4, 0xa7, 0x54, 0xfe, + 0xd2, 0xef, 0x9a, 0x0b, 0x8b, 0x3e, 0xc6, 0x39, 0x1c, 0xc8, 0x77, 0x2e, 0xde, 0xfc, 0x5b, 0x7b, + 0x9b, 0x91, 0xce, 0xf2, 0xc3, 0x3b, 0xcd, 0x56, 0x95, 0xc5, 0xca, 0xf7, 0xba, 0xeb, 0x1e, 0x3d, + 0xa4, 0xb2, 0xd4, 0xe3, 0xcb, 0x1b, 0x59, 0x03, 0x5b, 0x81, 0xe4, 0x3d, 0x56, 0xc1, 0x9f, 0x44, + 0x9e, 0xd5, 0x3c, 0xf7, 0x94, 0x2a, 0x59, 0x9b, 0xf0, 0x70, 0xd3, 0xdd, 0x90, 0x6f, 0xb3, 0xe7, + 0x9f, 0x8a, 0x3d, 0x20, 0xcc, 0x2c, 0x1c, 0xb0, 0x72, 0x6d, 0x48, 0xc2, 0xbb, 0x8f, 0x5f, 0x9d, + 0xc3, 0x1e, 0x84, 0x2f, 0x33, 0x98, 0x7b, 0x48, 0xde, 0x7b, 0x9f, 0xb0, 0xd1, 0x11, 0xf5, 0xe7, + 0xdc, 0x55, 0x1b, 0x73, 0x02, 0x45, 0xb5, 0xd7, 0x78, 0xfb, 0xdc, 0xa9, 0x3d, 0xfe, 0x62, 0xfa, + 0xad, 0x80, 0x0e, 0x71, 0x56, 0xa7, 0xdc, 0x15, 0x77, 0xca, 0x7f, 0x4e, 0xde, 0xe4, 0xf1, 0x68, + 0x63, 0xf7, 0xe2, 0xa7, 0x8f, 0x55, 0x8a, 0x26, 0x1d, 0x16, 0xd1, 0xb4, 0xd1, 0xff, 0xb0, 0x33, + 0xe5, 0xfe, 0xc1, 0x55, 0xa9, 0x5a, 0xe9, 0x93, 0x9e, 0xfd, 0xbd, 0x21, 0xa8, 0x7b, 0x68, 0x45, + 0xce, 0xa7, 0x69, 0x92, 0xe9, 0x6b, 0x37, 0xe6, 0xbc, 0x5e, 0xb9, 0xb5, 0xb8, 0xb1, 0xbe, 0xb7, + 0xf3, 0x50, 0xbc, 0x55, 0xcb, 0x7b, 0x57, 0x11, 0x49, 0xdf, 0xd5, 0x5b, 0xed, 0x2f, 0x9f, 0x52, + 0xf3, 0x76, 0xf0, 0xba, 0x1e, 0xbf, 0x61, 0xed, 0xca, 0x30, 0x07, 0xc5, 0x05, 0xc7, 0x94, 0x17, + 0xe9, 0xfc, 0x5a, 0xb1, 0x6c, 0xd2, 0x54, 0x87, 0x3f, 0x17, 0x0b, 0x35, 0x53, 0x59, 0xae, 0xa4, + 0x1f, 0x5d, 0x6f, 0x26, 0x55, 0x5b, 0x79, 0xe4, 0xeb, 0xee, 0xd9, 0x5f, 0x8a, 0xb2, 0xe4, 0xf6, + 0x3d, 0x59, 0xf0, 0x4d, 0x71, 0xe6, 0x9c, 0x29, 0xfa, 0xbb, 0xa4, 0x9a, 0x7f, 0x70, 0x44, 0x18, + 0xa8, 0x3c, 0xdb, 0x39, 0x9d, 0xb9, 0x90, 0x47, 0x3d, 0x9e, 0xaf, 0x26, 0xf9, 0xb7, 0x93, 0xe8, + 0xaf, 0x2d, 0x4f, 0x37, 0xf9, 0x6f, 0x77, 0x90, 0xf1, 0x3a, 0xbc, 0xbb, 0x7a, 0xcb, 0x11, 0xa3, + 0x7d, 0xdd, 0x17, 0xde, 0x56, 0x56, 0x1f, 0x2a, 0xee, 0xe9, 0xda, 0x72, 0xdd, 0xf7, 0x45, 0xbd, + 0xc9, 0xcc, 0xab, 0xfb, 0x7b, 0xee, 0x9d, 0x9d, 0x19, 0xbb, 0x7a, 0x4d, 0xd1, 0xd3, 0x87, 0x0c, + 0x3b, 0x59, 0x64, 0x5e, 0x5f, 0x8a, 0x32, 0xf8, 0xd4, 0xf1, 0xe7, 0x6d, 0xba, 0x78, 0x40, 0xa6, + 0xc8, 0xb3, 0xb8, 0x77, 0x1f, 0x94, 0x19, 0x46, 0xc1, 0x80, 0x02, 0x00, 0x92, 0xb4, 0x2e, 0x74, + 0x00, 0x03, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA102_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 768, // uncompressed data size (bytes) + 420, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA102_sig_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA102("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/reload/g_booteruc_reload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_ga10x_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA102_patch_loc_data[] = +{ + 0x10, 0x20, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA102_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA102_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA102("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/reload/g_booteruc_reload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_ga10x_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA102_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA102_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA102_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA102("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/reload/g_booteruc_reload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_ga10x_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA102_patch_meta_data[] = +{ + 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA102_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA102_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_GA102("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/reload/g_booteruc_reload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_GA102_num_sigs_data[] = +{ + 0x02, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_GA102_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_GA102_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterReloadUcode_GA102 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA102_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA102_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA102_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA102_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA102_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA102_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA102_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA102_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA102_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_GA102_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterReloadUcode_GA102(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterReloadUcode_GA102; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_TU102.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_TU102.c new file mode 100644 index 000000000..e06f17f0a --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_TU102.c @@ -0,0 +1,1317 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU102("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/reload/g_booteruc_reload_tu10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 8960 +// COMPRESSED SIZE (bytes): 7375 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU102_image_dbg_data[] = +{ + 0xed, 0x99, 0x45, 0x50, 0x1c, 0x5a, 0xb0, 0x86, 0x07, 0x77, 0x18, 0xdc, 0xdd, 0xdd, 0x82, 0x04, + 0x82, 0xbb, 0xcb, 0x10, 0x9c, 0xe0, 0x16, 0x82, 0x3b, 0x04, 0x77, 0x27, 0xb8, 0xc3, 0xe0, 0xae, + 0xc1, 0xdd, 0x1d, 0x02, 0x04, 0x19, 0xdc, 0xdd, 0xdd, 0x79, 0xf7, 0x6d, 0xef, 0xee, 0x6d, 0x5f, + 0xdd, 0x6f, 0xf3, 0xf7, 0xd9, 0x9c, 0xee, 0xea, 0x53, 0xf5, 0xf7, 0xa9, 0xea, 0x08, 0x80, 0x3f, + 0xe0, 0x3d, 0x09, 0xe0, 0x8b, 0x01, 0x00, 0x3c, 0x42, 0x3f, 0x02, 0x9e, 0xa0, 0x63, 0x00, 0xd0, + 0x80, 0xee, 0xf5, 0x8d, 0x8f, 0x8f, 0x0f, 0x8c, 0x08, 0x00, 0x14, 0xe0, 0x23, 0x17, 0xb6, 0x61, + 0x17, 0xc0, 0x9d, 0x06, 0x81, 0x6a, 0x48, 0x02, 0xb0, 0xa4, 0x41, 0xa0, 0xff, 0x11, 0xda, 0x34, + 0x08, 0xcc, 0x3f, 0x42, 0x16, 0x01, 0x00, 0x00, 0xd2, 0x72, 0x61, 0x1a, 0x37, 0x60, 0xba, 0x73, + 0xf3, 0x36, 0xd2, 0x72, 0xa1, 0xd2, 0xd2, 0xa1, 0x1b, 0x37, 0xa0, 0x1a, 0x21, 0xd0, 0xbe, 0x4d, + 0x00, 0x00, 0x06, 0xa7, 0x1c, 0x00, 0xf8, 0x90, 0x0b, 0xc0, 0x80, 0x8d, 0x00, 0x58, 0x22, 0xfc, + 0x6f, 0x80, 0xd4, 0x92, 0x0e, 0x80, 0x82, 0xfe, 0x27, 0xba, 0xfd, 0x02, 0x1d, 0x07, 0x00, 0xc0, + 0x47, 0xfc, 0x93, 0xea, 0x2d, 0x17, 0xae, 0xf3, 0xf6, 0xf9, 0x09, 0xfe, 0x11, 0xe6, 0x9f, 0xdb, + 0xa0, 0x93, 0x31, 0x10, 0xd7, 0xff, 0x37, 0xf3, 0xdb, 0x13, 0x5c, 0x04, 0x80, 0x14, 0xf0, 0x96, + 0x03, 0xdb, 0x1e, 0x0f, 0xdd, 0x94, 0x04, 0xfc, 0xe7, 0xfc, 0xfe, 0x0c, 0xf0, 0xfd, 0xa7, 0x9e, + 0x47, 0xe8, 0xce, 0xc2, 0x17, 0xa8, 0x38, 0xc5, 0x04, 0xa4, 0x08, 0x20, 0x1c, 0x20, 0x2f, 0xf7, + 0x11, 0x10, 0xf7, 0x8f, 0x76, 0x7f, 0xdc, 0x72, 0xbc, 0xbe, 0xcb, 0x41, 0xe5, 0xe5, 0x0a, 0xff, + 0x01, 0xfc, 0x9f, 0xf8, 0xc5, 0xfa, 0xca, 0x63, 0xdd, 0xc8, 0x39, 0xe8, 0x3f, 0x29, 0xcf, 0xab, + 0xfe, 0xab, 0x4b, 0xc6, 0x81, 0xe4, 0xf5, 0x86, 0xd4, 0x06, 0x48, 0xf6, 0xd2, 0x4d, 0x74, 0x6e, + 0x47, 0xd5, 0xe4, 0xcd, 0xa9, 0x9d, 0xbb, 0x54, 0x68, 0xeb, 0x79, 0xbd, 0x3e, 0x26, 0x39, 0x2a, + 0xa4, 0x56, 0xb0, 0x26, 0x93, 0x8e, 0xb9, 0x4f, 0x24, 0xc0, 0xfc, 0x41, 0xbf, 0x4e, 0xed, 0x82, + 0xa9, 0xc8, 0xef, 0x95, 0x59, 0xc2, 0xc5, 0x96, 0x60, 0xc0, 0xe7, 0x4c, 0xf8, 0xf1, 0x69, 0x1b, + 0x9e, 0xfa, 0xf8, 0x44, 0xa7, 0xb6, 0x14, 0x57, 0x7e, 0x67, 0xcb, 0xf3, 0x54, 0xba, 0xcb, 0x86, + 0x50, 0x00, 0xdd, 0xba, 0xec, 0xb0, 0x3b, 0x27, 0x14, 0xcd, 0xab, 0xe4, 0x31, 0x2f, 0x1b, 0x7c, + 0x66, 0x2d, 0x41, 0x36, 0x80, 0x7a, 0xa9, 0xfb, 0x5c, 0x98, 0x3e, 0x3f, 0xdd, 0x8a, 0xf2, 0x80, + 0x1f, 0x37, 0x13, 0x01, 0xf8, 0xb2, 0xd2, 0x1b, 0xcc, 0xe5, 0x54, 0x19, 0x7f, 0x71, 0x12, 0xe8, + 0x8b, 0x26, 0x06, 0x2a, 0x01, 0x2d, 0x69, 0xd2, 0x1b, 0xe9, 0xf6, 0xbf, 0x52, 0x6f, 0x49, 0x92, + 0x99, 0x0d, 0x5a, 0x4b, 0x51, 0x67, 0xff, 0x8e, 0x3c, 0xba, 0x10, 0xef, 0x29, 0xd9, 0x4e, 0x72, + 0x27, 0xd7, 0xaa, 0xc5, 0xd4, 0xb4, 0x14, 0x02, 0x1f, 0x98, 0xc8, 0x86, 0x99, 0xb3, 0x81, 0x88, + 0x9f, 0xb8, 0x08, 0x18, 0x04, 0x69, 0x68, 0x7a, 0xbe, 0x12, 0xde, 0xba, 0xc2, 0xcc, 0x81, 0x73, + 0x17, 0x90, 0x77, 0xf8, 0x2b, 0x73, 0xcc, 0xa9, 0x44, 0x31, 0xa5, 0xb5, 0x51, 0x85, 0x5b, 0x9e, + 0x17, 0xff, 0x28, 0x4b, 0xd6, 0xb4, 0xa8, 0x93, 0x49, 0x17, 0x29, 0x32, 0x09, 0x9e, 0xe9, 0xa0, + 0xfd, 0x29, 0xbd, 0xfa, 0x21, 0x1a, 0xa1, 0x42, 0x61, 0x8b, 0xb2, 0xa9, 0x3f, 0x3c, 0x97, 0x99, + 0x7e, 0xe1, 0xb8, 0x03, 0x74, 0xea, 0x19, 0xe8, 0x1d, 0x81, 0x91, 0xd3, 0x98, 0x03, 0x2d, 0xd8, + 0x85, 0x54, 0x7b, 0xd2, 0xaf, 0x05, 0x2e, 0x06, 0xed, 0x05, 0x63, 0x48, 0xcc, 0xd1, 0x35, 0x88, + 0xf8, 0xd4, 0x74, 0xef, 0x4f, 0x53, 0x43, 0xc4, 0x9f, 0x86, 0x38, 0x41, 0x49, 0x38, 0xee, 0x78, + 0xd0, 0x08, 0xab, 0x1e, 0x61, 0x51, 0xcc, 0x73, 0x74, 0xed, 0x5a, 0xdc, 0x30, 0x5a, 0x67, 0xaa, + 0x9b, 0x9c, 0x0e, 0x50, 0xac, 0x76, 0xb6, 0xa1, 0x50, 0x25, 0x4f, 0x62, 0x68, 0x31, 0xe9, 0xf2, + 0x03, 0xb6, 0x47, 0x2b, 0xa5, 0xc4, 0x8b, 0x8c, 0x6d, 0xae, 0x30, 0x33, 0xe2, 0x26, 0xd8, 0xa5, + 0x55, 0xe2, 0x90, 0x50, 0x9b, 0x37, 0xc4, 0x5a, 0x83, 0x26, 0x92, 0xf6, 0xd2, 0x42, 0x5a, 0x7c, + 0xb6, 0x4c, 0xcb, 0xbc, 0xe0, 0xb8, 0xb1, 0x73, 0x2b, 0x18, 0x99, 0x48, 0x99, 0x1d, 0x80, 0x88, + 0x3a, 0xb9, 0x10, 0xeb, 0xe5, 0x78, 0x20, 0x3c, 0xba, 0x13, 0xe9, 0x03, 0xd6, 0xb9, 0xc4, 0xbd, + 0x4e, 0xd7, 0x6a, 0xb4, 0x96, 0x9d, 0x03, 0x6b, 0x41, 0x39, 0xf8, 0xca, 0x7c, 0x93, 0xfd, 0xba, + 0xa8, 0x6f, 0x60, 0xd8, 0x64, 0x66, 0x03, 0xcb, 0x7f, 0x54, 0xd7, 0x07, 0xce, 0x7e, 0xf4, 0x35, + 0xd0, 0x2d, 0xb7, 0x4a, 0x00, 0x29, 0x07, 0xe5, 0xd3, 0x20, 0x9f, 0x08, 0xa7, 0x00, 0xf6, 0x0a, + 0x44, 0x36, 0x24, 0x2f, 0xd2, 0x1a, 0x5b, 0xbc, 0x70, 0x4a, 0x11, 0x25, 0x36, 0xbb, 0x31, 0xd3, + 0x90, 0xc6, 0xa1, 0xad, 0xe0, 0x5a, 0xaa, 0x7d, 0x57, 0x7d, 0x53, 0x81, 0x98, 0x4a, 0x47, 0xd7, + 0x9b, 0xa8, 0x33, 0x25, 0xdd, 0xaf, 0x44, 0xa8, 0xa2, 0x55, 0xcf, 0x44, 0xb6, 0x8c, 0x37, 0xa2, + 0xaa, 0x43, 0x93, 0x35, 0x71, 0x69, 0x03, 0xb6, 0x0e, 0xed, 0x6a, 0xd2, 0xeb, 0x81, 0x56, 0x27, + 0x26, 0x59, 0x2a, 0xf4, 0xd1, 0xbd, 0x2d, 0x9d, 0xe9, 0xce, 0x8d, 0x6b, 0xfc, 0xd6, 0x14, 0xe7, + 0x1b, 0x02, 0x9f, 0x57, 0xf4, 0xcc, 0x8b, 0x72, 0x3e, 0xf3, 0x35, 0xbb, 0xb9, 0xf3, 0x9f, 0x01, + 0x10, 0xef, 0x5d, 0xb2, 0x77, 0x61, 0xbe, 0x32, 0xa5, 0xe0, 0x30, 0x3c, 0xc8, 0x8c, 0x29, 0x5b, + 0xa7, 0xd5, 0xbb, 0x24, 0x6d, 0xfe, 0x13, 0x03, 0xe5, 0xd1, 0x4a, 0xfa, 0xaf, 0x91, 0x19, 0x4f, + 0xd5, 0x75, 0x80, 0xb9, 0xbb, 0xc2, 0x00, 0x02, 0xaa, 0xeb, 0x88, 0xc9, 0xce, 0x93, 0x5f, 0xd1, + 0x6b, 0x04, 0x1e, 0x55, 0x09, 0xb0, 0xda, 0xf6, 0xb4, 0xff, 0x26, 0xe2, 0x3b, 0xd9, 0x64, 0x80, + 0xb5, 0xcd, 0x79, 0x3d, 0x83, 0x49, 0xad, 0x4f, 0xbe, 0x10, 0xdf, 0xf5, 0xc8, 0x04, 0x12, 0xe6, + 0x64, 0x56, 0x35, 0x01, 0x21, 0xcb, 0xc0, 0xdf, 0x43, 0x46, 0xed, 0x50, 0x83, 0x83, 0x57, 0x2f, + 0x4e, 0x72, 0xdf, 0x45, 0xbd, 0x85, 0x12, 0x23, 0x97, 0x97, 0xbb, 0x0c, 0xdf, 0x2d, 0x40, 0x66, + 0xe0, 0x66, 0x98, 0xd8, 0x00, 0x61, 0xe7, 0x02, 0x5b, 0x2f, 0xdc, 0x6e, 0x38, 0xd2, 0xa4, 0x98, + 0xab, 0x77, 0xb7, 0x6a, 0x59, 0x70, 0x4a, 0x6c, 0x9c, 0x50, 0x21, 0x35, 0x7a, 0x83, 0xc8, 0x11, + 0x74, 0x7e, 0x12, 0x16, 0xa8, 0x4d, 0x69, 0xca, 0x69, 0xfd, 0xee, 0xfe, 0xac, 0xe7, 0xde, 0x54, + 0x5f, 0x92, 0x00, 0x11, 0x7d, 0xf9, 0x93, 0x89, 0x8b, 0xff, 0x5d, 0xa9, 0x6c, 0xf0, 0x24, 0xd3, + 0x83, 0x89, 0x72, 0x24, 0x66, 0x98, 0x7f, 0xca, 0xd2, 0xaf, 0xf5, 0x49, 0xd5, 0x96, 0xab, 0x85, + 0x86, 0x3d, 0xc9, 0xb4, 0x47, 0x04, 0xf4, 0x4b, 0xab, 0x73, 0x02, 0x24, 0x21, 0x93, 0xda, 0x0e, + 0x1c, 0xfc, 0xf7, 0x26, 0x6e, 0x83, 0x7c, 0xb1, 0x12, 0xa8, 0xfe, 0xa1, 0x96, 0x51, 0x50, 0x94, + 0xe7, 0x97, 0x89, 0xb9, 0xcb, 0x1a, 0xd3, 0x30, 0x30, 0x22, 0x44, 0x14, 0x01, 0x9e, 0x76, 0x8c, + 0x18, 0x0b, 0xc0, 0x95, 0x25, 0xe5, 0xf3, 0xd8, 0x33, 0x17, 0x13, 0xbf, 0xcc, 0x52, 0x5d, 0xa4, + 0xf5, 0xf6, 0x32, 0x10, 0xf5, 0x68, 0x5c, 0x24, 0xfd, 0xac, 0x28, 0xa6, 0xf3, 0x3d, 0xa2, 0xe1, + 0x15, 0xd8, 0xe8, 0xc9, 0x6a, 0x2f, 0x39, 0xc1, 0x38, 0x63, 0x9d, 0x94, 0xe4, 0x1f, 0x7b, 0x13, + 0x08, 0x35, 0xd9, 0xa8, 0xd7, 0x4d, 0x55, 0xc8, 0x4a, 0x83, 0x4f, 0x80, 0x26, 0x72, 0x14, 0xb5, + 0xec, 0x4d, 0xcd, 0x52, 0xe4, 0xfc, 0xfd, 0x55, 0x3f, 0xd3, 0x38, 0x5c, 0x0a, 0xb5, 0x91, 0x79, + 0x5d, 0x62, 0x87, 0xce, 0xb0, 0x91, 0x7b, 0xe5, 0x4b, 0xb3, 0xd1, 0x61, 0x02, 0x06, 0x84, 0x57, + 0xe3, 0x75, 0x72, 0x04, 0x58, 0xe4, 0x84, 0x6a, 0x67, 0x2a, 0x78, 0xe5, 0xae, 0xb1, 0xfd, 0x3b, + 0x9e, 0x25, 0x97, 0x66, 0xac, 0x70, 0xc0, 0x04, 0xf1, 0x36, 0xb9, 0x56, 0xb5, 0x78, 0x91, 0x0c, + 0x18, 0x5a, 0x52, 0x93, 0xb3, 0x4e, 0x71, 0x88, 0xe5, 0x33, 0x7c, 0x53, 0xec, 0xeb, 0x1c, 0x9e, + 0xf4, 0xa4, 0x5b, 0x5f, 0x7c, 0x4c, 0x4f, 0x7c, 0x9e, 0x38, 0xcf, 0x6c, 0xee, 0x1a, 0x24, 0x57, + 0x52, 0xe6, 0x81, 0xd0, 0xfe, 0x2b, 0xae, 0x6e, 0x9a, 0x8a, 0xcc, 0xc5, 0x1e, 0x2a, 0xa5, 0x32, + 0x44, 0x2a, 0xcd, 0x57, 0x96, 0x48, 0xc4, 0xd9, 0x7b, 0x4c, 0x92, 0xdd, 0x9f, 0x6c, 0xe7, 0x76, + 0x9b, 0x83, 0x5c, 0xf1, 0xb3, 0xf7, 0x13, 0x25, 0xd9, 0xdd, 0x6c, 0x1d, 0xfb, 0xa3, 0xe7, 0x7e, + 0x38, 0xe5, 0x5f, 0x58, 0x34, 0x88, 0xed, 0x0f, 0xec, 0x62, 0x19, 0xff, 0x05, 0xff, 0x92, 0xfd, + 0x9b, 0x8b, 0xd8, 0xb0, 0x7c, 0xb4, 0xbd, 0x95, 0x29, 0x56, 0x0c, 0xf6, 0x5c, 0x13, 0xd7, 0xc5, + 0xd0, 0x33, 0x55, 0xbf, 0x08, 0x10, 0x1c, 0xb9, 0x5b, 0xfc, 0xfd, 0x9c, 0x33, 0xa8, 0x51, 0xb5, + 0x43, 0xff, 0x97, 0xaf, 0xf8, 0xe6, 0x4a, 0x2e, 0x07, 0x90, 0x48, 0x8d, 0x99, 0x18, 0x4a, 0x98, + 0x5e, 0x20, 0xb8, 0x47, 0x1f, 0x18, 0xe8, 0x72, 0x88, 0x1e, 0x65, 0xf1, 0x22, 0x3b, 0xca, 0xb4, + 0x92, 0x11, 0x19, 0x3f, 0xd8, 0x11, 0xaf, 0x36, 0x98, 0x81, 0x89, 0xcb, 0xbd, 0xdc, 0x45, 0x1d, + 0x74, 0x2b, 0xee, 0xcd, 0xbf, 0x89, 0xae, 0x64, 0x57, 0x17, 0x8b, 0xc1, 0x56, 0x17, 0xef, 0x55, + 0xec, 0x89, 0xc4, 0x1e, 0xe7, 0xcb, 0x25, 0x10, 0xe7, 0x28, 0x60, 0x90, 0xc7, 0xbe, 0x29, 0x45, + 0x15, 0x88, 0x5b, 0x91, 0x31, 0x3a, 0xc9, 0xb1, 0x5c, 0xfa, 0x9b, 0x17, 0xce, 0x1c, 0xc6, 0x7b, + 0x4b, 0x66, 0x29, 0xc8, 0x05, 0xf6, 0xc8, 0xeb, 0x1a, 0x5a, 0x1f, 0x98, 0x1d, 0x54, 0xef, 0xf0, + 0x6a, 0x28, 0xd8, 0xf4, 0x2d, 0x15, 0xa8, 0x67, 0x7c, 0xcc, 0x78, 0x3e, 0xb0, 0x13, 0xeb, 0x04, + 0xad, 0x34, 0x65, 0xcc, 0x40, 0xe2, 0xfe, 0x48, 0x9f, 0xb3, 0xd4, 0x54, 0x35, 0x99, 0x58, 0x23, + 0xaf, 0xec, 0xfe, 0x19, 0x4e, 0x60, 0xb5, 0xff, 0xe3, 0x07, 0xbc, 0x0f, 0x1e, 0x1f, 0x30, 0x32, + 0x55, 0x90, 0x8d, 0xfd, 0x13, 0xe9, 0xbd, 0xeb, 0x82, 0x48, 0x08, 0x60, 0x50, 0x36, 0x82, 0x5c, + 0x91, 0xe5, 0x38, 0xeb, 0xc4, 0x79, 0xa4, 0xa1, 0x50, 0xa4, 0x09, 0x6d, 0xc5, 0xd0, 0x9a, 0xf4, + 0x10, 0xaa, 0x2f, 0x2d, 0xd0, 0xf4, 0xb7, 0x74, 0x6d, 0xea, 0x2b, 0x7e, 0x4f, 0x0e, 0xd7, 0x6f, + 0x19, 0x07, 0xc8, 0x24, 0x9d, 0x14, 0x03, 0xca, 0xc7, 0x99, 0x8a, 0x27, 0x04, 0xb8, 0xf3, 0xfd, + 0x42, 0xcb, 0x74, 0xe4, 0x9d, 0x18, 0x73, 0x76, 0xb8, 0x42, 0xd4, 0x02, 0x20, 0xd8, 0x41, 0x0b, + 0x0b, 0x07, 0xfa, 0x9d, 0x1f, 0x5c, 0x0a, 0xf9, 0x9a, 0x60, 0xb1, 0x37, 0x41, 0x7a, 0x8f, 0xf2, + 0x6a, 0x15, 0x6b, 0x5e, 0x61, 0xf3, 0x98, 0x65, 0xbf, 0xdb, 0x39, 0x5b, 0x30, 0x27, 0xae, 0x61, + 0x5e, 0x7e, 0x11, 0x09, 0x4c, 0x00, 0x20, 0x29, 0x59, 0x69, 0x49, 0x72, 0x22, 0x70, 0x13, 0x60, + 0x66, 0xb5, 0x78, 0x5f, 0x6c, 0x9b, 0x50, 0x98, 0xd1, 0x97, 0xde, 0xb5, 0xe2, 0x1a, 0x9d, 0xc5, + 0x9e, 0x3c, 0xa0, 0xca, 0xf9, 0x48, 0x7f, 0x79, 0x14, 0x36, 0x77, 0x1f, 0x01, 0x02, 0xac, 0xe1, + 0x42, 0x2e, 0xea, 0xc4, 0x27, 0x35, 0xd5, 0xcb, 0x23, 0x79, 0x22, 0xe7, 0x89, 0x7e, 0xca, 0xb3, + 0xdc, 0xa2, 0xbc, 0xc8, 0x6d, 0xa2, 0xea, 0x8b, 0x5f, 0x22, 0xa7, 0x86, 0x86, 0x99, 0xfd, 0x91, + 0x5d, 0x9b, 0x61, 0xd3, 0x8c, 0x5f, 0xa0, 0xfe, 0x63, 0x4c, 0x5a, 0x7a, 0xa4, 0x10, 0x03, 0xd2, + 0xc7, 0x1d, 0x36, 0x47, 0x52, 0xfc, 0xf5, 0xf1, 0xde, 0xb2, 0xb3, 0x30, 0xce, 0xd2, 0xe9, 0x66, + 0x63, 0xe3, 0x55, 0x87, 0x67, 0xdd, 0xa8, 0x8e, 0x68, 0x1c, 0x14, 0xfd, 0x59, 0x29, 0xa5, 0x48, + 0x11, 0x9c, 0xbc, 0x55, 0x35, 0x74, 0xd3, 0xf7, 0x9e, 0xa2, 0x09, 0x7d, 0x2c, 0xa3, 0xa0, 0x46, + 0x96, 0x04, 0x26, 0xbf, 0xcd, 0x61, 0x44, 0x4b, 0xc5, 0x86, 0x56, 0x11, 0x27, 0x92, 0xed, 0xd9, + 0x7e, 0x1f, 0xdc, 0x6b, 0x14, 0xff, 0xe6, 0xc3, 0x5d, 0x65, 0x7c, 0x5a, 0x2d, 0x1c, 0x25, 0x96, + 0x4d, 0xff, 0x3a, 0x12, 0x4f, 0xe4, 0x16, 0xcd, 0x35, 0xcf, 0x8f, 0xce, 0x98, 0xff, 0x85, 0x2b, + 0xf6, 0xc5, 0xe9, 0x7c, 0xae, 0x9d, 0x00, 0x85, 0x5b, 0x8a, 0x4a, 0x49, 0xe3, 0xf1, 0x7c, 0x46, + 0x1f, 0xad, 0x36, 0xbf, 0x1b, 0x8e, 0x31, 0xcd, 0x32, 0x23, 0x4b, 0xd0, 0x5a, 0x0d, 0x9b, 0x6f, + 0xa0, 0x92, 0xc5, 0xae, 0x85, 0xcf, 0x7b, 0xbe, 0xc1, 0xd1, 0x75, 0x84, 0xca, 0xeb, 0x06, 0x09, + 0xcf, 0xbd, 0x11, 0xec, 0x98, 0x66, 0xc5, 0xe1, 0x29, 0x5d, 0xcc, 0xbe, 0x06, 0x54, 0x0e, 0x4f, + 0xc4, 0x73, 0x65, 0x65, 0x68, 0xdd, 0x31, 0x01, 0xec, 0xb6, 0x2e, 0x19, 0xe8, 0xa4, 0xda, 0x0a, + 0xf4, 0x95, 0xea, 0xcd, 0x5f, 0xa2, 0xe1, 0x28, 0x50, 0x4e, 0x2b, 0x8b, 0xae, 0x7e, 0x8c, 0xe0, + 0xba, 0xc6, 0x0a, 0x7b, 0x2d, 0x6c, 0x1d, 0xb6, 0xca, 0x92, 0x8a, 0x91, 0x31, 0x42, 0xba, 0xfb, + 0xa1, 0x54, 0x7f, 0x6f, 0xba, 0x17, 0xc4, 0xc4, 0xcf, 0xf2, 0x75, 0x0a, 0xdc, 0x26, 0x1c, 0x63, + 0xd9, 0xb0, 0x91, 0x18, 0xbb, 0xaa, 0x77, 0x11, 0x3b, 0xc1, 0x42, 0x72, 0x00, 0x73, 0xe4, 0x07, + 0xec, 0x7f, 0xe1, 0xc3, 0x56, 0x2a, 0x0f, 0x73, 0xc1, 0xea, 0x36, 0xc8, 0xd9, 0x11, 0x76, 0x6e, + 0xe4, 0x67, 0x94, 0x1b, 0x4d, 0x50, 0xcd, 0xda, 0x5f, 0xcb, 0xb3, 0x99, 0xdc, 0xeb, 0xc4, 0xbb, + 0x33, 0x13, 0x55, 0xe1, 0x19, 0xfd, 0x47, 0xc7, 0x90, 0x7b, 0x94, 0x06, 0x70, 0x2a, 0xdd, 0x9a, + 0xaf, 0xd5, 0x7e, 0x4b, 0xef, 0xef, 0xbb, 0xae, 0x87, 0xb7, 0xb1, 0xbc, 0x86, 0x96, 0x80, 0x82, + 0x7b, 0x68, 0x77, 0xa4, 0x6b, 0x35, 0xc9, 0xa9, 0xd2, 0xb1, 0x15, 0x58, 0x47, 0xdb, 0xdf, 0xcc, + 0x46, 0x71, 0xd4, 0xe6, 0x38, 0x95, 0xc8, 0x15, 0xbd, 0x34, 0x27, 0xcd, 0x1f, 0xea, 0x5f, 0xc8, + 0x6f, 0x4f, 0xbe, 0x5b, 0x05, 0x94, 0x04, 0xbe, 0xf9, 0xe5, 0x44, 0xb7, 0x98, 0xb8, 0xf6, 0x25, + 0x79, 0x92, 0xba, 0xff, 0xda, 0xba, 0xdb, 0x5c, 0x3f, 0x7d, 0x77, 0x13, 0x98, 0x41, 0x0f, 0xc8, + 0xd7, 0x89, 0x34, 0x8c, 0xaa, 0xd7, 0xd4, 0xcd, 0xfc, 0x67, 0xec, 0x74, 0xea, 0xb3, 0x99, 0x4e, + 0x36, 0x95, 0x77, 0xf4, 0xe3, 0x66, 0x87, 0x91, 0x62, 0x7d, 0xd2, 0xc1, 0x70, 0x59, 0x67, 0x9a, + 0x19, 0x49, 0x3d, 0x43, 0xf1, 0x56, 0x42, 0xc3, 0x23, 0xe0, 0x64, 0x49, 0xe9, 0x9d, 0x53, 0x99, + 0x75, 0xb7, 0xfe, 0xbd, 0xb7, 0xf3, 0x3c, 0x1c, 0x8c, 0x1b, 0xc2, 0x64, 0x68, 0x1e, 0x4d, 0xdd, + 0x07, 0x65, 0x16, 0x07, 0x80, 0xbf, 0x41, 0x0b, 0x3c, 0xef, 0x8d, 0x80, 0x2b, 0xac, 0x5f, 0x92, + 0xf2, 0x7b, 0x98, 0x12, 0xd0, 0x38, 0x69, 0xb6, 0x1a, 0xe2, 0xd0, 0x64, 0xf7, 0x01, 0xa3, 0xd1, + 0x1c, 0xf2, 0xe4, 0x4f, 0xa4, 0xc1, 0x1a, 0x75, 0xd5, 0xa5, 0x79, 0x6d, 0x7c, 0x0b, 0x65, 0x53, + 0x4b, 0x50, 0xce, 0xbf, 0x75, 0x28, 0xc7, 0x41, 0x98, 0xe1, 0x19, 0x89, 0xc9, 0x48, 0xab, 0xf4, + 0xc7, 0xc3, 0xa6, 0x9e, 0x89, 0x47, 0xf9, 0x26, 0xc6, 0xe9, 0x24, 0x5c, 0x83, 0x79, 0x66, 0x64, + 0x96, 0xff, 0xa9, 0xdb, 0x69, 0x1d, 0x1c, 0xd8, 0x87, 0x90, 0x35, 0x8f, 0x84, 0xa2, 0xb4, 0xce, + 0x18, 0x0c, 0xaa, 0xf7, 0xce, 0x32, 0xa0, 0xaa, 0x7b, 0xca, 0x4d, 0x3c, 0xac, 0xf5, 0x79, 0x81, + 0xea, 0x4e, 0x24, 0xb2, 0x66, 0xec, 0xec, 0xaf, 0x04, 0x4b, 0x72, 0xb7, 0x8e, 0x70, 0x0c, 0xd4, + 0xf2, 0x4a, 0x92, 0x30, 0x78, 0xca, 0xb3, 0x38, 0x49, 0x5f, 0xd9, 0x3b, 0xe6, 0x84, 0xac, 0xc9, + 0xe7, 0xc5, 0xd7, 0x6f, 0x38, 0xc9, 0xfe, 0xdc, 0x42, 0xea, 0x56, 0xb0, 0xbc, 0xff, 0x3a, 0xdc, + 0xa5, 0x34, 0x70, 0xe9, 0x38, 0xac, 0xa7, 0xd3, 0x95, 0x13, 0x27, 0x1a, 0x72, 0x83, 0xe6, 0x5b, + 0x45, 0x40, 0xfc, 0xd3, 0x3f, 0xe2, 0xb1, 0xbc, 0xd4, 0x20, 0x42, 0x95, 0x3e, 0x1e, 0xed, 0xf8, + 0x47, 0xaf, 0xed, 0xc7, 0xda, 0xfb, 0xa1, 0xdc, 0xc1, 0xc0, 0x88, 0x27, 0x3f, 0x03, 0xbf, 0x19, + 0xda, 0xaa, 0xea, 0x6a, 0x8f, 0x19, 0x36, 0xb3, 0x93, 0xbc, 0x5d, 0x72, 0xe2, 0x66, 0x4a, 0x91, + 0xcd, 0x2c, 0xb4, 0xd2, 0x3d, 0x2a, 0x82, 0x3e, 0xb7, 0xc6, 0x30, 0x9c, 0xf3, 0xa9, 0x93, 0x99, + 0x4a, 0xc1, 0x47, 0x2d, 0xab, 0xfd, 0x1f, 0x64, 0x91, 0xe5, 0x87, 0x1e, 0x52, 0x34, 0xaa, 0x89, + 0x92, 0xab, 0xa7, 0xfb, 0xf0, 0xed, 0x9a, 0x14, 0xaa, 0x9d, 0xfe, 0xc5, 0xb1, 0xba, 0x2a, 0x59, + 0xd3, 0x44, 0xd7, 0x08, 0x9e, 0xd0, 0x9d, 0xc8, 0x32, 0x4b, 0x46, 0xbf, 0x21, 0xf7, 0x1c, 0x82, + 0x46, 0x28, 0x45, 0x26, 0x3c, 0x77, 0xbe, 0xec, 0x8b, 0x39, 0x4f, 0xde, 0xe4, 0xc0, 0x88, 0xbd, + 0x91, 0x4b, 0x67, 0x8e, 0x4b, 0x99, 0x10, 0x00, 0x16, 0x5b, 0xab, 0xcb, 0x6d, 0xad, 0xe7, 0x25, + 0x0d, 0xf8, 0x01, 0x7f, 0xfb, 0x6f, 0xa7, 0x78, 0x08, 0x2f, 0x74, 0xca, 0xbb, 0xd7, 0x69, 0x95, + 0xe5, 0x58, 0x59, 0xfc, 0x18, 0x22, 0x23, 0xe5, 0x27, 0x08, 0x3a, 0xf6, 0xc6, 0x2d, 0x96, 0x07, + 0x95, 0x1d, 0xb7, 0x19, 0xd8, 0xbe, 0xc5, 0xc2, 0xd6, 0x08, 0x4d, 0x58, 0x92, 0xda, 0x6a, 0x19, + 0xa6, 0x81, 0x28, 0xf3, 0xf3, 0xa6, 0x6c, 0x77, 0x29, 0x19, 0xac, 0x11, 0xb8, 0x3a, 0x71, 0x4a, + 0x51, 0x6c, 0x33, 0x4e, 0xaf, 0x82, 0x4e, 0xa3, 0x2d, 0x6f, 0xdc, 0x5d, 0xa4, 0xba, 0x06, 0xbe, + 0xc9, 0x6b, 0x11, 0xe8, 0xd4, 0x38, 0x7e, 0xe6, 0x9a, 0xb5, 0x5c, 0x32, 0x3d, 0xb6, 0xcf, 0x56, + 0x5d, 0x8a, 0xb3, 0x61, 0x36, 0x34, 0x04, 0x2e, 0x3f, 0x11, 0xce, 0x3c, 0xfb, 0x8a, 0xa4, 0x75, + 0x37, 0x20, 0xc9, 0x2d, 0xa2, 0xc8, 0x88, 0x47, 0xac, 0x4f, 0x92, 0x7f, 0xb1, 0x4b, 0x40, 0x84, + 0x97, 0xb9, 0xfd, 0xef, 0xf7, 0x59, 0x21, 0xa3, 0x49, 0x17, 0x31, 0x4a, 0x07, 0x6b, 0xec, 0xcd, + 0x03, 0xda, 0x7e, 0xc8, 0xa2, 0x26, 0xad, 0xbe, 0xc7, 0x95, 0x74, 0xd3, 0x68, 0x78, 0x19, 0xa7, + 0x39, 0xf9, 0x75, 0x9d, 0xae, 0x5d, 0xa1, 0x55, 0xec, 0xb8, 0x62, 0x7d, 0x5b, 0x8e, 0x6a, 0x6e, + 0xc5, 0xe1, 0x9d, 0x6e, 0xc7, 0xe2, 0xd3, 0x29, 0xc9, 0xaa, 0x4f, 0x1d, 0xe2, 0xd0, 0xbf, 0x33, + 0x8a, 0x0b, 0xdd, 0x9d, 0xa5, 0xfe, 0x2b, 0xdc, 0xd2, 0x60, 0x68, 0xe0, 0xfc, 0x60, 0x06, 0x01, + 0x5d, 0x36, 0x4e, 0xfa, 0x11, 0xf5, 0xcf, 0x88, 0x06, 0x85, 0x97, 0xc7, 0x80, 0x75, 0xa9, 0xe8, + 0xa4, 0xfe, 0x6c, 0x42, 0xf1, 0xe6, 0xb0, 0xb6, 0x28, 0x67, 0x51, 0xbd, 0x88, 0x8f, 0x7a, 0xa0, + 0xa2, 0x4b, 0x9d, 0xe7, 0x07, 0x69, 0xe4, 0x39, 0x8c, 0x9f, 0xea, 0x20, 0x61, 0x0a, 0x57, 0xdc, + 0x1c, 0x1c, 0x7f, 0xfe, 0xc2, 0x1c, 0x2d, 0x2a, 0x94, 0x59, 0x76, 0xce, 0x6f, 0x97, 0x08, 0x28, + 0x16, 0x96, 0x5b, 0xe4, 0x96, 0xd9, 0xbf, 0x4d, 0x2f, 0x4e, 0xcc, 0x3b, 0x86, 0x58, 0x1b, 0xef, + 0x3a, 0x4c, 0xeb, 0x08, 0x7b, 0x0a, 0xf0, 0x0a, 0x21, 0x36, 0xc0, 0x16, 0x1c, 0xb3, 0x39, 0x3a, + 0x0f, 0x87, 0x73, 0x2e, 0xca, 0x35, 0x1a, 0x53, 0xb8, 0x9d, 0xb9, 0x1c, 0xd8, 0x1d, 0x77, 0xd2, + 0xa9, 0x25, 0x44, 0x1d, 0x9b, 0x8c, 0xfa, 0xd7, 0x39, 0xc7, 0x4e, 0x7c, 0x94, 0x14, 0x9f, 0xc8, + 0xcc, 0xf9, 0x21, 0x27, 0x63, 0xad, 0x18, 0xf1, 0xd3, 0xc6, 0x2c, 0xdc, 0x3d, 0xa5, 0x3d, 0x44, + 0xd7, 0xf0, 0x5c, 0x07, 0xf3, 0xde, 0xb9, 0x2a, 0x36, 0x73, 0x22, 0xd1, 0x2c, 0x8b, 0xd0, 0x97, + 0x54, 0x10, 0xba, 0xbe, 0x21, 0x34, 0xfa, 0x28, 0x92, 0x76, 0xe1, 0xe1, 0x20, 0x25, 0xbd, 0xa0, + 0x44, 0x5e, 0x61, 0xd5, 0x39, 0x72, 0x56, 0x29, 0x51, 0x5c, 0xae, 0xd7, 0xcb, 0x5a, 0xb4, 0x4c, + 0x1c, 0x11, 0x8b, 0xca, 0xaa, 0x40, 0xe2, 0x25, 0x7b, 0xa2, 0xa3, 0xe1, 0x4f, 0xf9, 0xe2, 0x68, + 0xe2, 0x64, 0x04, 0xfb, 0x03, 0xe7, 0x62, 0x44, 0x88, 0xa6, 0xbd, 0xb0, 0x46, 0xd0, 0xa6, 0x64, + 0x43, 0x74, 0x63, 0xd4, 0xa7, 0x63, 0x2a, 0x10, 0x7c, 0x8b, 0x1c, 0xd3, 0x91, 0x05, 0x6f, 0xf5, + 0xd0, 0x0b, 0xce, 0x23, 0x66, 0x47, 0xd1, 0xda, 0x8a, 0x89, 0x46, 0x30, 0xda, 0x1e, 0x61, 0x74, + 0x11, 0x24, 0x90, 0x61, 0x56, 0xaa, 0xb4, 0x04, 0x36, 0xd6, 0x2f, 0xa2, 0x2e, 0xed, 0xd6, 0xf8, + 0xc7, 0x3d, 0x0c, 0x5b, 0x1f, 0x34, 0x24, 0x30, 0xc3, 0xe4, 0x37, 0xcc, 0x66, 0x57, 0x65, 0xf4, + 0xab, 0x72, 0xf8, 0x4a, 0xc4, 0xac, 0x04, 0x57, 0x96, 0xf0, 0x2b, 0x18, 0x07, 0x30, 0xbf, 0xb4, + 0x27, 0x40, 0xc7, 0xad, 0x8e, 0xdc, 0x3c, 0x5a, 0x0c, 0x4a, 0x39, 0xb8, 0xd9, 0x0a, 0xd1, 0x1e, + 0x46, 0xef, 0x11, 0x18, 0x95, 0xb1, 0x08, 0xa6, 0xa2, 0xbf, 0x12, 0xb6, 0x07, 0x8f, 0xc7, 0xfd, + 0xf6, 0xc2, 0x2e, 0x57, 0xa6, 0xa0, 0xf3, 0x61, 0x69, 0x10, 0xea, 0x01, 0xb9, 0xe4, 0x4f, 0x7d, + 0x3e, 0x00, 0xb4, 0x9c, 0x96, 0x0f, 0x02, 0x90, 0x79, 0x18, 0xe1, 0x51, 0x40, 0x35, 0x69, 0x53, + 0xdf, 0xa1, 0x3b, 0xc2, 0x14, 0xde, 0x86, 0x6e, 0xca, 0xc6, 0x34, 0xa6, 0x36, 0xc0, 0x94, 0xdf, + 0x63, 0x5d, 0x02, 0x8e, 0x16, 0x0d, 0xaa, 0xe2, 0x2b, 0xe0, 0x7c, 0x6d, 0x82, 0xf2, 0x63, 0x46, + 0x6a, 0x58, 0x73, 0x73, 0xa4, 0xfc, 0xec, 0xc9, 0xca, 0xc4, 0xe3, 0x51, 0xef, 0x46, 0xbf, 0x6a, + 0xcb, 0x6e, 0x49, 0x57, 0xa6, 0xa5, 0xb9, 0x59, 0x6a, 0x0f, 0x7e, 0x40, 0x3d, 0x12, 0x01, 0xca, + 0x62, 0xda, 0x26, 0x70, 0xe6, 0x37, 0xe3, 0x5c, 0x60, 0x26, 0xf0, 0x0c, 0x48, 0xb9, 0x12, 0x87, + 0xe9, 0xdd, 0x6e, 0x74, 0xdd, 0x77, 0x19, 0x46, 0x4b, 0x35, 0xf0, 0x71, 0xbb, 0x0e, 0x2c, 0x51, + 0x97, 0x79, 0x9e, 0x2f, 0xb0, 0x0e, 0xc4, 0x97, 0xba, 0xf2, 0x71, 0xc4, 0x97, 0x29, 0xb2, 0x20, + 0xe1, 0xc9, 0x8c, 0xd8, 0xa1, 0xc6, 0x71, 0x65, 0x80, 0x4f, 0xf5, 0x74, 0xd8, 0x1e, 0x03, 0x2b, + 0xd2, 0xdd, 0xb7, 0x30, 0x03, 0x21, 0x02, 0xa8, 0x25, 0xef, 0x34, 0x67, 0x94, 0xf0, 0x86, 0x29, + 0x14, 0x05, 0x2a, 0x95, 0x42, 0xc6, 0x0a, 0x0b, 0x44, 0xf2, 0x0f, 0xe5, 0x9e, 0x75, 0xd0, 0x15, + 0xb5, 0xcc, 0x75, 0x18, 0xc5, 0x7c, 0x85, 0x3f, 0x05, 0xc1, 0x25, 0x87, 0xd4, 0xd5, 0xb7, 0x4d, + 0x46, 0x18, 0x51, 0xc8, 0xe4, 0x1c, 0x86, 0x9f, 0x7f, 0xfe, 0x88, 0xb3, 0xa5, 0x1d, 0x8a, 0x4f, + 0xe5, 0xa2, 0x24, 0x2b, 0xff, 0x48, 0x1c, 0x45, 0x9c, 0x27, 0x17, 0x06, 0xa1, 0xa8, 0xf7, 0x28, + 0x2c, 0xdf, 0xb8, 0x43, 0x4b, 0x82, 0x57, 0xa8, 0xa2, 0x99, 0x6b, 0x8c, 0xe2, 0x11, 0xcc, 0x9d, + 0x28, 0x4d, 0x26, 0x10, 0x08, 0xba, 0xb0, 0x19, 0x65, 0xc6, 0x5d, 0x8b, 0x47, 0x9a, 0x6c, 0xf8, + 0x92, 0x85, 0xd5, 0xee, 0x99, 0xb5, 0x17, 0x3c, 0xc9, 0x96, 0xe4, 0x28, 0x24, 0xb5, 0xb7, 0x82, + 0xac, 0x4e, 0xe6, 0x66, 0x05, 0xdd, 0x93, 0x23, 0xba, 0xc6, 0x53, 0x05, 0x03, 0xbc, 0x0a, 0x2e, + 0x46, 0xb7, 0xad, 0xb0, 0x8a, 0xd2, 0x6d, 0x9f, 0xe8, 0x53, 0x6f, 0x41, 0xe4, 0xd5, 0xca, 0x85, + 0x43, 0x01, 0x7a, 0x19, 0x11, 0x27, 0x86, 0x82, 0x29, 0x90, 0x4d, 0xbf, 0x3e, 0xbd, 0xcb, 0xef, + 0xb1, 0x3f, 0x41, 0xda, 0xd1, 0xa7, 0x4a, 0x4f, 0x10, 0x42, 0x46, 0xd9, 0x42, 0xa6, 0x4e, 0xd4, + 0x55, 0xae, 0x41, 0x67, 0xed, 0x29, 0x47, 0xa7, 0x42, 0xaf, 0x40, 0xd1, 0x4e, 0xb3, 0xc6, 0x80, + 0x36, 0x16, 0x09, 0xfc, 0x49, 0x4c, 0x09, 0xec, 0xfa, 0xb0, 0xaf, 0x87, 0xc1, 0xcc, 0x50, 0x69, + 0xb4, 0xa4, 0x9a, 0x91, 0x1b, 0x51, 0xdb, 0x33, 0x81, 0xfe, 0x41, 0x8d, 0x91, 0x9d, 0x3b, 0xfc, + 0xc7, 0x16, 0x0c, 0xa9, 0x81, 0xbe, 0x31, 0xab, 0x0d, 0xde, 0xe4, 0xc1, 0x04, 0x7c, 0xc6, 0x48, + 0xc1, 0xde, 0xbe, 0x9d, 0x6c, 0xf1, 0xd2, 0xc5, 0x61, 0x18, 0xe0, 0xe6, 0x46, 0xd0, 0xa9, 0xa6, + 0xe8, 0x47, 0x0b, 0x0c, 0x25, 0x1d, 0x9a, 0x26, 0xe7, 0xfa, 0xbc, 0x9d, 0x49, 0x46, 0xaf, 0xc7, + 0xe6, 0x27, 0x0f, 0x3b, 0xb4, 0x22, 0xf1, 0xc3, 0xb1, 0x7d, 0x98, 0xf1, 0x17, 0x58, 0x47, 0x5d, + 0xb2, 0x28, 0xfc, 0x1e, 0xdf, 0x54, 0x53, 0x46, 0x43, 0x71, 0x92, 0x75, 0xf5, 0x3a, 0x2b, 0xb6, + 0x9f, 0xb8, 0xd7, 0x27, 0x4c, 0x71, 0xc2, 0x9d, 0x06, 0x72, 0x40, 0xef, 0x8d, 0xd9, 0x36, 0xcf, + 0x4e, 0xf6, 0xbb, 0xce, 0xf7, 0xd8, 0xe4, 0x6c, 0x7e, 0xf7, 0x51, 0x82, 0x1a, 0x9e, 0x90, 0x4c, + 0x3a, 0x37, 0x6a, 0x3c, 0x1b, 0xd3, 0x1c, 0xe9, 0x43, 0x8a, 0x6b, 0x79, 0xae, 0xee, 0x68, 0xf8, + 0x5b, 0x34, 0x16, 0x21, 0x21, 0x55, 0xbf, 0xce, 0xc4, 0xb3, 0x85, 0x63, 0xc8, 0x8b, 0xa7, 0x5c, + 0xb8, 0x95, 0x01, 0x6f, 0x8c, 0x34, 0x38, 0x31, 0x95, 0x42, 0xfe, 0xb0, 0xe3, 0x36, 0x83, 0x19, + 0xb0, 0x0b, 0x61, 0xf7, 0x7b, 0x1c, 0x97, 0x2f, 0xbc, 0x04, 0xa3, 0xb2, 0x3c, 0xdc, 0xa1, 0xb9, + 0xcc, 0xda, 0x6a, 0x98, 0x29, 0xc3, 0x82, 0xb4, 0x62, 0xf8, 0x7a, 0x22, 0x25, 0x5e, 0x88, 0xa9, + 0xf8, 0x29, 0xbf, 0x49, 0x71, 0x29, 0xc6, 0x25, 0x05, 0x41, 0x94, 0xb0, 0x9f, 0xa6, 0xbb, 0xf8, + 0xc6, 0x78, 0xe5, 0x46, 0x30, 0x3d, 0x45, 0xcb, 0x76, 0x9d, 0x65, 0x22, 0x7b, 0x86, 0x62, 0x9a, + 0x29, 0xbd, 0x08, 0x9d, 0x79, 0xad, 0x13, 0xcd, 0x59, 0x28, 0x3d, 0x30, 0xed, 0x92, 0x48, 0x64, + 0x7c, 0x83, 0x82, 0xda, 0xa4, 0x8b, 0x8a, 0xf2, 0xdc, 0x02, 0x82, 0x8e, 0x18, 0x50, 0x31, 0xbb, + 0x67, 0x91, 0x3e, 0x27, 0xae, 0x3d, 0x5b, 0x41, 0xfd, 0xb0, 0x15, 0x55, 0x63, 0xc2, 0x4a, 0x58, + 0x80, 0xa4, 0x0c, 0xad, 0xcd, 0xc0, 0xe6, 0x55, 0x2b, 0x33, 0xcb, 0x8d, 0x89, 0xc2, 0x38, 0xca, + 0x6a, 0x70, 0xf1, 0x3c, 0xe9, 0xee, 0x6b, 0x06, 0xd8, 0xb5, 0x8d, 0x73, 0xb0, 0xae, 0x62, 0xaa, + 0x9a, 0x7f, 0x7f, 0x1b, 0x16, 0x8f, 0xbe, 0xc0, 0xc5, 0x20, 0xe2, 0xfe, 0xa2, 0x5c, 0xdc, 0x3e, + 0x44, 0xc9, 0xaf, 0xfa, 0x42, 0xc4, 0x6e, 0xae, 0xb4, 0xfa, 0x1b, 0xdc, 0xea, 0xf7, 0x92, 0x05, + 0xcf, 0xc8, 0xf3, 0xeb, 0xc4, 0x2b, 0xbf, 0xd8, 0xfe, 0x41, 0x84, 0xd3, 0xc0, 0x0b, 0x57, 0xb9, + 0x8e, 0xab, 0x9d, 0x64, 0x90, 0xae, 0xad, 0x5f, 0xf4, 0x4b, 0xb8, 0x36, 0x52, 0x74, 0x1a, 0x52, + 0xa3, 0x5f, 0x0d, 0xc3, 0xe0, 0x71, 0xd8, 0xdf, 0xf2, 0xf4, 0x99, 0xee, 0x5a, 0x07, 0x99, 0x15, + 0x4b, 0x37, 0xf7, 0x48, 0x59, 0xd7, 0xd2, 0x7c, 0xd8, 0x89, 0xe1, 0xde, 0x9c, 0x82, 0x28, 0xe5, + 0xaa, 0x31, 0x81, 0x46, 0x5b, 0xee, 0x56, 0x3d, 0xe8, 0xc8, 0x77, 0x15, 0xfd, 0xf2, 0xc0, 0xb7, + 0x29, 0xf7, 0x47, 0x2f, 0x45, 0xdf, 0x01, 0x12, 0xd4, 0x36, 0x41, 0xd7, 0x87, 0xe5, 0x9f, 0xd3, + 0x74, 0xea, 0xae, 0xaf, 0x2e, 0x72, 0x93, 0xe5, 0x5c, 0x7a, 0xa3, 0x28, 0xb3, 0xa8, 0x60, 0x1b, + 0xae, 0x5d, 0x10, 0x12, 0x12, 0x54, 0xcb, 0x52, 0xf2, 0xda, 0x94, 0x48, 0x1c, 0x01, 0x0f, 0xfd, + 0x58, 0x6e, 0x6d, 0x85, 0xd0, 0x98, 0xa2, 0x52, 0x44, 0x1a, 0x23, 0x52, 0xe9, 0xa2, 0x1b, 0xe1, + 0xe2, 0x9d, 0xdf, 0x27, 0x1d, 0x1d, 0x9d, 0xcb, 0xe6, 0x95, 0x2c, 0x9d, 0x18, 0x8c, 0xc1, 0x41, + 0x13, 0xe6, 0xd2, 0xdf, 0x6a, 0x4c, 0x19, 0xe7, 0xdd, 0xd2, 0xa5, 0x77, 0xba, 0x60, 0x7c, 0xea, + 0xa9, 0xf2, 0x8e, 0xe1, 0xcf, 0x11, 0x6a, 0x51, 0xc1, 0x52, 0x86, 0x5b, 0x21, 0x0d, 0x52, 0x43, + 0x5a, 0xa7, 0x0f, 0xa4, 0x27, 0x9f, 0x68, 0xbe, 0xd6, 0x67, 0xb0, 0xfc, 0xe4, 0xaf, 0x91, 0xd6, + 0x4d, 0x9c, 0xdd, 0x6f, 0x88, 0xdf, 0x8b, 0x1f, 0x7c, 0xcb, 0x01, 0xa5, 0xc5, 0xe1, 0x73, 0x7c, + 0x1f, 0xdf, 0x40, 0xec, 0x06, 0xf2, 0x5e, 0x79, 0x90, 0x79, 0x72, 0x1c, 0xe5, 0x5b, 0x9c, 0xa4, + 0xd4, 0x87, 0x8c, 0x1a, 0xee, 0x18, 0xa1, 0xe1, 0x31, 0x0a, 0x64, 0x14, 0xf8, 0x6b, 0xc7, 0x35, + 0xcb, 0xbe, 0x83, 0xa5, 0xd8, 0xe7, 0x82, 0xf0, 0x64, 0x6b, 0x4e, 0xe4, 0x9f, 0xf2, 0x30, 0x5a, + 0xa3, 0xa4, 0xf2, 0xc9, 0xb4, 0x90, 0x18, 0x57, 0x1e, 0x27, 0x27, 0xff, 0x02, 0x61, 0xc2, 0x69, + 0xf9, 0x24, 0x36, 0x7f, 0xf4, 0x7e, 0x84, 0x9b, 0xca, 0xdd, 0x97, 0xc9, 0xf3, 0x8c, 0x6e, 0xe8, + 0xc4, 0xab, 0xbd, 0xad, 0x00, 0x95, 0x45, 0x1d, 0x19, 0x23, 0xaf, 0x3a, 0x7a, 0x96, 0x0d, 0x94, + 0xb5, 0xff, 0xd2, 0xc8, 0x35, 0xac, 0x57, 0x04, 0x87, 0x75, 0xc5, 0x60, 0xfa, 0xe3, 0xc8, 0xde, + 0xd0, 0x8f, 0x90, 0xdb, 0x6e, 0xc0, 0xa2, 0x4d, 0x1f, 0x10, 0xc6, 0x86, 0x13, 0x19, 0x32, 0xf9, + 0x04, 0xf4, 0x46, 0x21, 0xf0, 0x60, 0x9b, 0x50, 0xbf, 0xb6, 0x25, 0x14, 0x98, 0x19, 0xce, 0x1b, + 0xd4, 0x86, 0x6a, 0x16, 0x2c, 0xfc, 0xca, 0xa0, 0x2a, 0xfe, 0xdd, 0x33, 0xc7, 0x03, 0x1b, 0xc7, + 0xfb, 0x80, 0x21, 0x0c, 0xa6, 0x36, 0xaa, 0x30, 0xd6, 0x7b, 0x93, 0xe5, 0xd9, 0xb6, 0x5f, 0x76, + 0xa8, 0x05, 0xa3, 0x2b, 0x4a, 0x8f, 0xf3, 0xdf, 0x34, 0xf4, 0x45, 0x93, 0xec, 0xba, 0x1f, 0xcb, + 0xbd, 0x6e, 0xea, 0xf8, 0x1f, 0xc6, 0x26, 0xf2, 0x54, 0xa4, 0x39, 0xbd, 0x70, 0xb0, 0xd5, 0xce, + 0x9f, 0xf1, 0x3a, 0x1d, 0xc1, 0x2f, 0x72, 0x29, 0x1c, 0x27, 0x11, 0x0b, 0x94, 0xe1, 0x24, 0x37, + 0x5d, 0xb1, 0xe7, 0x75, 0xac, 0x5e, 0x9e, 0x8f, 0xf4, 0x29, 0x19, 0xfa, 0x96, 0x5f, 0x6e, 0xd9, + 0x08, 0x00, 0x83, 0xa9, 0xb9, 0xff, 0xed, 0xbf, 0x0c, 0x98, 0xc3, 0x9d, 0xe8, 0x62, 0x9a, 0x9c, + 0x87, 0xa0, 0x48, 0x90, 0xb6, 0x63, 0x1b, 0xbf, 0xaa, 0xb5, 0xa1, 0x32, 0xdd, 0x23, 0xd6, 0x88, + 0x0c, 0x28, 0xf6, 0x57, 0x19, 0x67, 0x6a, 0x6b, 0x9f, 0x16, 0x31, 0x3a, 0x27, 0xac, 0x0a, 0xf1, + 0xa1, 0x02, 0x31, 0x3e, 0x67, 0x76, 0x64, 0xe7, 0x65, 0x01, 0x4a, 0x85, 0x5b, 0x37, 0xfb, 0xdb, + 0x5d, 0x65, 0x67, 0x83, 0xf8, 0xcb, 0xb0, 0xce, 0xbd, 0xef, 0x83, 0xd0, 0xb1, 0xce, 0xe8, 0x21, + 0x9f, 0xe1, 0x42, 0xed, 0x39, 0x06, 0xbf, 0x27, 0x23, 0x65, 0xd1, 0x0c, 0xdc, 0x34, 0xd7, 0xa7, + 0xc0, 0x1c, 0x62, 0x43, 0x2b, 0x3c, 0x84, 0x7a, 0xe3, 0x7b, 0x5a, 0x41, 0xc0, 0xfa, 0x65, 0x1a, + 0xc2, 0x6f, 0x7b, 0xe8, 0x5d, 0xa2, 0x25, 0x9f, 0x2c, 0xab, 0x5b, 0xc8, 0xad, 0x44, 0x93, 0x34, + 0xf5, 0x95, 0x39, 0xa1, 0xb4, 0xb9, 0x79, 0x48, 0x48, 0x09, 0x0f, 0x6d, 0x14, 0x57, 0x0d, 0xb2, + 0xaa, 0xec, 0x80, 0x5b, 0x78, 0x57, 0x90, 0x2e, 0x34, 0xb4, 0x41, 0xf1, 0x98, 0x9d, 0xc8, 0xe3, + 0x51, 0xad, 0x0b, 0xd5, 0x71, 0x56, 0x10, 0xca, 0x5f, 0xaf, 0xba, 0xdd, 0xef, 0xcc, 0xc1, 0x06, + 0xc9, 0x0a, 0xa5, 0x90, 0x23, 0x58, 0xca, 0xa2, 0xe4, 0x68, 0x8c, 0x00, 0x9a, 0x0a, 0x1e, 0x44, + 0x05, 0x5c, 0x7d, 0xbc, 0xe0, 0xab, 0x6a, 0x7c, 0x6b, 0x8f, 0xa0, 0xab, 0x77, 0x08, 0xdd, 0x4e, + 0x60, 0xaa, 0x6e, 0x96, 0x5e, 0x30, 0xd9, 0xaf, 0x47, 0x05, 0xee, 0x8b, 0xf1, 0x5f, 0x25, 0xf3, + 0xb4, 0xed, 0xd8, 0x36, 0x42, 0x27, 0x32, 0xc2, 0x3d, 0x75, 0xf1, 0x46, 0xdb, 0x61, 0x38, 0x93, + 0x82, 0x9e, 0x29, 0xef, 0x4a, 0x4a, 0xf2, 0x54, 0x3b, 0x8a, 0x74, 0x3b, 0x76, 0x6f, 0xba, 0x8a, + 0x33, 0xcd, 0x42, 0x4d, 0x5a, 0xd7, 0x80, 0x6a, 0x0d, 0x7f, 0xd5, 0xec, 0x41, 0xe1, 0x90, 0x1e, + 0xf6, 0x2d, 0x7e, 0x8b, 0x8e, 0x5b, 0xbd, 0xf0, 0x72, 0x68, 0xc5, 0x46, 0xe7, 0xc2, 0x21, 0xc4, + 0x9b, 0xe9, 0xdf, 0x94, 0x81, 0x65, 0xa6, 0xc7, 0x2a, 0x44, 0x5a, 0x76, 0x9a, 0x72, 0xf7, 0xbb, + 0xa0, 0xd0, 0xb9, 0x9f, 0xa7, 0x24, 0x5b, 0x2e, 0xd7, 0x91, 0x03, 0x2c, 0xe0, 0xf6, 0x6b, 0xb7, + 0xde, 0x95, 0x0f, 0x4c, 0x24, 0xb0, 0xf1, 0x1c, 0x2f, 0x3f, 0x40, 0xa3, 0x26, 0xb1, 0x6b, 0x25, + 0x6e, 0x98, 0x47, 0xa9, 0x6c, 0x48, 0x8c, 0x18, 0xe7, 0xe2, 0x30, 0x70, 0xa5, 0x0e, 0xc1, 0x00, + 0x75, 0xa9, 0x70, 0xe8, 0x19, 0x65, 0x71, 0x17, 0x48, 0x0f, 0x2b, 0x18, 0xd2, 0xde, 0x96, 0xda, + 0xb8, 0xcb, 0x72, 0xc5, 0xc4, 0x17, 0x5e, 0xe5, 0xdb, 0xb8, 0x47, 0x4f, 0xa0, 0x61, 0xf0, 0x24, + 0x40, 0x44, 0x62, 0x4d, 0x30, 0x3a, 0xe1, 0x5b, 0xb5, 0x8e, 0xe3, 0xf0, 0xa0, 0xb9, 0xd1, 0x49, + 0xd2, 0x35, 0x61, 0x39, 0xee, 0x6f, 0xfc, 0x05, 0x0e, 0x49, 0x75, 0xdd, 0x8e, 0x47, 0xde, 0xe6, + 0xb8, 0x8b, 0x3f, 0xbf, 0x6f, 0x98, 0x39, 0x75, 0x4f, 0x49, 0x51, 0xa1, 0x3b, 0x5b, 0xcc, 0x99, + 0xb5, 0xb1, 0x37, 0x60, 0x4f, 0x0d, 0x2e, 0xa7, 0xf8, 0xec, 0x59, 0xdd, 0xf5, 0xa3, 0xa5, 0xef, + 0x59, 0xf1, 0xcd, 0x2e, 0x46, 0x13, 0xab, 0x05, 0x7a, 0x72, 0x4f, 0xf3, 0xf5, 0xc7, 0xaf, 0x5e, + 0x98, 0xa8, 0x7a, 0x71, 0xcc, 0xfe, 0x03, 0x8d, 0xcf, 0x4e, 0x51, 0xd9, 0x89, 0xe1, 0x7f, 0xe7, + 0xac, 0x37, 0xcf, 0xa1, 0x15, 0xa3, 0x32, 0xf0, 0x0a, 0x0a, 0x7f, 0x9a, 0xe7, 0xe3, 0x54, 0x96, + 0xbb, 0x87, 0xda, 0x05, 0xf8, 0xd3, 0x8f, 0xe3, 0x2a, 0xaa, 0x03, 0x0d, 0xa4, 0x41, 0x5e, 0x14, + 0xad, 0xb7, 0xad, 0xe8, 0xf9, 0xcc, 0x6a, 0xb8, 0xd8, 0x94, 0xe8, 0x85, 0x5d, 0x63, 0x4f, 0xda, + 0x69, 0x1e, 0x9d, 0x35, 0x02, 0x89, 0xdc, 0x52, 0x05, 0x7f, 0x6a, 0xdf, 0xfe, 0x22, 0xa5, 0xc9, + 0x94, 0x89, 0x2e, 0x60, 0x24, 0xf6, 0x5e, 0xb8, 0x34, 0x65, 0x27, 0x2f, 0x6b, 0xd2, 0x42, 0x58, + 0x68, 0x58, 0xbd, 0x74, 0xc4, 0xbe, 0x46, 0x5f, 0xe6, 0x7c, 0x19, 0xe3, 0xd6, 0x2f, 0x85, 0x86, + 0x0b, 0x4b, 0x1e, 0xbb, 0xe5, 0x7e, 0x74, 0x0c, 0xf8, 0x56, 0x2b, 0x14, 0x48, 0x5b, 0x71, 0x6f, + 0x54, 0x5f, 0x3c, 0x75, 0x0a, 0x43, 0xf4, 0x97, 0x52, 0x8c, 0x59, 0xe6, 0x9a, 0xe2, 0x79, 0x2f, + 0x7c, 0x38, 0x2f, 0xe6, 0x68, 0x7d, 0x6e, 0xdd, 0xab, 0xe1, 0xd8, 0x26, 0x96, 0x28, 0x74, 0x5e, + 0x65, 0x4e, 0x80, 0xd2, 0xfc, 0xa3, 0xe3, 0x90, 0x60, 0x20, 0xf7, 0x91, 0xc7, 0x51, 0x76, 0xf8, + 0x4f, 0x3c, 0x71, 0x3b, 0x7c, 0x82, 0xec, 0x7a, 0x06, 0x68, 0x57, 0x25, 0x85, 0x57, 0x31, 0x29, + 0xf5, 0x47, 0xc0, 0xb1, 0xf7, 0x86, 0x21, 0x1f, 0x5a, 0xda, 0x50, 0xc1, 0xa6, 0xd2, 0x76, 0xc4, + 0x0e, 0xaa, 0x06, 0xa5, 0x89, 0x96, 0x02, 0x8a, 0xe4, 0x3a, 0x32, 0x9c, 0x32, 0xfb, 0xec, 0x67, + 0x83, 0x97, 0xd8, 0x93, 0xa2, 0x22, 0xd7, 0x30, 0x62, 0xd1, 0x21, 0x09, 0x6f, 0xc9, 0x7b, 0x14, + 0x93, 0x51, 0x3f, 0x33, 0x78, 0xce, 0x51, 0x8b, 0x63, 0x9e, 0x11, 0x64, 0x44, 0xf6, 0x11, 0xad, + 0xe5, 0x35, 0x36, 0x3a, 0x3c, 0x3b, 0xb7, 0xa9, 0x28, 0xb8, 0x44, 0xab, 0x29, 0xc3, 0x91, 0x8c, + 0xc6, 0x35, 0x4d, 0x55, 0xc4, 0x47, 0x14, 0xea, 0x4e, 0xb7, 0xb7, 0x45, 0x6b, 0x1f, 0x81, 0x5e, + 0x03, 0x73, 0x80, 0x5b, 0x3f, 0x29, 0xd1, 0x44, 0x61, 0x77, 0x20, 0x25, 0xbb, 0x3e, 0x1a, 0x76, + 0x54, 0x37, 0x1f, 0xec, 0x5b, 0x36, 0x29, 0x7e, 0xb3, 0x06, 0x13, 0x7f, 0x39, 0xb1, 0x0a, 0x5e, + 0x03, 0x2d, 0xf6, 0xbb, 0xba, 0x76, 0xcc, 0x09, 0xbf, 0xa9, 0x46, 0x5d, 0x84, 0x89, 0x23, 0x28, + 0xc4, 0x7e, 0x88, 0xec, 0x54, 0xea, 0x31, 0x14, 0x88, 0x27, 0xc3, 0x61, 0xf5, 0xb5, 0x7b, 0x41, + 0x5b, 0x64, 0x3c, 0x4d, 0x5e, 0x6e, 0x03, 0x43, 0x0f, 0x41, 0xa5, 0xdd, 0xf4, 0x59, 0xb4, 0xdf, + 0xd1, 0x61, 0x3e, 0x10, 0x11, 0xd5, 0x76, 0x92, 0xbd, 0x30, 0xc8, 0x37, 0xaf, 0xe7, 0x45, 0x01, + 0xb9, 0x83, 0x5b, 0x99, 0x0c, 0x22, 0xed, 0xf0, 0x6c, 0x9b, 0x9c, 0xe8, 0x05, 0x63, 0x75, 0xbf, + 0xc0, 0x45, 0xbf, 0x35, 0x3d, 0xbc, 0xf3, 0x57, 0xeb, 0x98, 0x84, 0xe8, 0x5c, 0x3b, 0xde, 0x32, + 0xc4, 0x84, 0xe1, 0x9e, 0xca, 0x1d, 0x91, 0x5f, 0xa6, 0x38, 0xcc, 0xf9, 0x26, 0x70, 0xf7, 0x00, + 0xd7, 0x7d, 0xd5, 0xf3, 0x9e, 0xe9, 0x39, 0x29, 0xfa, 0xdc, 0xc6, 0x16, 0xeb, 0xa6, 0x30, 0x23, + 0xde, 0x7d, 0x76, 0xc5, 0xf2, 0x4a, 0x56, 0x19, 0xf1, 0x4d, 0x08, 0xd0, 0xea, 0x11, 0xf3, 0x68, + 0x85, 0x8d, 0xf8, 0x1c, 0x26, 0x36, 0x5b, 0x2d, 0x65, 0x43, 0x77, 0x16, 0xdb, 0x33, 0x26, 0xa5, + 0x94, 0x8e, 0x09, 0x36, 0x43, 0x9b, 0x23, 0x9d, 0x7b, 0xb2, 0xe1, 0x27, 0x46, 0xd7, 0xb1, 0xef, + 0x6d, 0x47, 0x09, 0x09, 0x5f, 0xe6, 0x09, 0x23, 0x55, 0x2c, 0xb7, 0xc3, 0xca, 0x48, 0x64, 0x04, + 0x50, 0xd8, 0x82, 0xf2, 0x7b, 0xb5, 0x56, 0xeb, 0xeb, 0x86, 0x0e, 0x7e, 0xf2, 0xc0, 0xde, 0x7e, + 0x4b, 0xe2, 0xfe, 0xb0, 0x64, 0xb9, 0x17, 0x08, 0x63, 0x8b, 0x0d, 0xd5, 0xab, 0x37, 0x9c, 0x45, + 0xcd, 0x15, 0x38, 0x3f, 0xbb, 0xd6, 0x78, 0x8b, 0x94, 0xd4, 0x9d, 0xb4, 0x5e, 0x37, 0x44, 0x16, + 0xcb, 0x1d, 0x38, 0xc9, 0xd4, 0x57, 0xe9, 0xc3, 0x3b, 0x2a, 0x28, 0x6b, 0x0f, 0xa7, 0x18, 0xac, + 0xe5, 0xfc, 0x2c, 0x73, 0x9e, 0xa2, 0xef, 0x51, 0x12, 0xe7, 0x2f, 0xef, 0x7f, 0x7d, 0xf2, 0x77, + 0xed, 0xcb, 0x18, 0xa1, 0x3a, 0x74, 0x7e, 0xa0, 0xf7, 0x33, 0xc9, 0xc6, 0xcf, 0x42, 0x20, 0x1c, + 0x20, 0x50, 0x9a, 0x04, 0xd0, 0xdb, 0xe7, 0xd5, 0x1b, 0x50, 0xab, 0xed, 0x42, 0x59, 0xb8, 0xfb, + 0x50, 0x21, 0x8d, 0x97, 0x7a, 0x92, 0x38, 0x5b, 0xc2, 0xd5, 0x26, 0x29, 0xf3, 0x63, 0x52, 0x22, + 0x06, 0x81, 0x35, 0xb7, 0xbb, 0x45, 0x0d, 0xc2, 0x58, 0x4b, 0x8c, 0x42, 0xa1, 0x8c, 0xec, 0xd9, + 0xca, 0xa8, 0x52, 0x75, 0xe1, 0xc2, 0x5d, 0xa1, 0xa1, 0xbc, 0x76, 0x24, 0xd8, 0x9e, 0x01, 0x62, + 0x0c, 0xd8, 0x86, 0x91, 0x47, 0x9b, 0xff, 0x52, 0x5f, 0x2b, 0x33, 0x06, 0xa3, 0x7d, 0xb1, 0xbf, + 0x6a, 0x2a, 0x20, 0xcf, 0x19, 0xef, 0x5b, 0x08, 0xcc, 0xe9, 0x5b, 0x0f, 0x18, 0x27, 0x8e, 0x6e, + 0x58, 0x9b, 0x47, 0x8a, 0x9c, 0x19, 0x2c, 0x2a, 0x01, 0xc0, 0xad, 0x7d, 0xb7, 0xef, 0xfd, 0xd6, + 0xb6, 0x43, 0xd1, 0x99, 0x6b, 0xc8, 0xb0, 0xf8, 0xb9, 0xc3, 0xfb, 0xd8, 0x50, 0xab, 0x96, 0xe7, + 0x6a, 0xc2, 0xd7, 0x89, 0xac, 0xc6, 0x71, 0xaf, 0x46, 0x31, 0x1b, 0x5f, 0xce, 0x45, 0x87, 0x1a, + 0xe9, 0x57, 0x2c, 0x87, 0x76, 0xce, 0xd6, 0x82, 0x78, 0xfc, 0x0a, 0x51, 0xd7, 0x26, 0x2b, 0xf1, + 0x93, 0x5f, 0x86, 0xc2, 0x8e, 0x5d, 0x93, 0x54, 0x2b, 0x15, 0xff, 0xf3, 0xf4, 0x7c, 0x22, 0xfa, + 0xf3, 0xf1, 0x83, 0xbf, 0xc7, 0xf8, 0xa5, 0xac, 0x36, 0x92, 0x1e, 0x01, 0x4c, 0x3f, 0xaa, 0x4e, + 0x8a, 0xe1, 0xc4, 0xd1, 0x14, 0x70, 0xce, 0xf9, 0x9b, 0x0e, 0x76, 0xae, 0x35, 0x4b, 0x9e, 0xb1, + 0x06, 0xe9, 0x2b, 0x5c, 0xd8, 0x02, 0x19, 0xf2, 0x27, 0xa4, 0xe4, 0x6a, 0xcc, 0x57, 0xba, 0xeb, + 0xd0, 0x80, 0xd5, 0x64, 0x50, 0x7c, 0x7c, 0x1f, 0xe7, 0x84, 0xe0, 0x2c, 0xf6, 0x3d, 0xdf, 0xb6, + 0x5f, 0xfd, 0xda, 0x13, 0x46, 0xa6, 0x5b, 0x24, 0xf3, 0xdc, 0x85, 0xf2, 0x95, 0x67, 0x29, 0xe3, + 0x16, 0x24, 0xf5, 0x0a, 0x06, 0x9a, 0x51, 0xa3, 0xb8, 0x6f, 0xd4, 0xb3, 0x8d, 0xc5, 0x27, 0x42, + 0xbe, 0xe8, 0x5e, 0x84, 0xe5, 0x00, 0x3e, 0x48, 0x56, 0x40, 0xef, 0x7e, 0xfa, 0xa4, 0x02, 0x27, + 0x4d, 0x1f, 0xdc, 0x69, 0x85, 0xf8, 0xe1, 0x92, 0x98, 0x88, 0x45, 0x9d, 0x5a, 0x45, 0x47, 0x28, + 0xf7, 0xed, 0x1b, 0x65, 0xd5, 0x2e, 0xe6, 0x8e, 0x00, 0xd5, 0x7e, 0x85, 0x97, 0x98, 0x6c, 0x3f, + 0xba, 0x40, 0xff, 0xd5, 0xaf, 0x79, 0x7d, 0xc9, 0x5e, 0xaf, 0x6c, 0xba, 0x94, 0x5b, 0x12, 0xc8, + 0x3e, 0x69, 0xa5, 0xf9, 0xf3, 0xce, 0xa5, 0xe5, 0x59, 0xcf, 0x2e, 0x50, 0x02, 0xa5, 0xf6, 0x70, + 0x0a, 0xa6, 0x44, 0x02, 0xb7, 0x91, 0x61, 0x2a, 0x33, 0xa0, 0x7c, 0xc5, 0x33, 0xc4, 0xac, 0x39, + 0x38, 0x36, 0x5e, 0xb3, 0x9d, 0x78, 0x64, 0x1f, 0x37, 0x02, 0x09, 0xcc, 0x46, 0x92, 0x37, 0x6f, + 0x70, 0x93, 0x10, 0x3b, 0x95, 0xd0, 0xc6, 0x6b, 0x04, 0xbb, 0x28, 0x8e, 0x89, 0x16, 0xf5, 0x73, + 0x55, 0xac, 0xee, 0x11, 0x77, 0x21, 0x73, 0xa8, 0x93, 0x44, 0x38, 0x7a, 0x24, 0xca, 0xab, 0x4d, + 0x02, 0xdd, 0x9b, 0xe2, 0x97, 0xfa, 0x84, 0xb9, 0x58, 0x17, 0x97, 0xf8, 0x97, 0x56, 0x9c, 0x60, + 0xab, 0xdf, 0x37, 0x1a, 0x95, 0x7c, 0xb1, 0x4c, 0x93, 0xff, 0xf6, 0x83, 0x26, 0x1c, 0xe6, 0xd2, + 0x36, 0xcf, 0xfd, 0x28, 0xc7, 0x14, 0x32, 0x62, 0xc6, 0x6f, 0xa2, 0x07, 0xc2, 0xe2, 0x2f, 0xc9, + 0x9f, 0xcf, 0x70, 0x0a, 0x7c, 0x04, 0xeb, 0x2c, 0x11, 0xbf, 0x2f, 0x5f, 0xc9, 0x09, 0x9b, 0x1b, + 0x64, 0xe6, 0x42, 0x91, 0x89, 0xfd, 0xdd, 0x1b, 0xfc, 0x73, 0x3d, 0x36, 0xa8, 0x36, 0x1b, 0xfc, + 0x47, 0xf3, 0x47, 0xf3, 0x0c, 0xab, 0xc4, 0x1f, 0x99, 0x1c, 0x03, 0xe9, 0x70, 0x95, 0x18, 0xc6, + 0x47, 0xf9, 0x5b, 0x6f, 0xce, 0xf6, 0x1b, 0x48, 0xdb, 0xe7, 0x73, 0x6c, 0x46, 0x05, 0xf4, 0x62, + 0x1d, 0xa1, 0x5d, 0xcf, 0xa8, 0x61, 0x0b, 0x85, 0xe9, 0x0c, 0x19, 0xa6, 0x2f, 0x83, 0x60, 0xe3, + 0x0a, 0x5a, 0xb8, 0x10, 0xa2, 0x59, 0x88, 0xf8, 0x7a, 0x47, 0xe4, 0xa0, 0xad, 0x28, 0xdf, 0x27, + 0x71, 0x4d, 0x9d, 0x15, 0x8e, 0x94, 0xbd, 0x3a, 0x84, 0xa5, 0x6c, 0x54, 0x79, 0xaa, 0x66, 0x5d, + 0xda, 0x77, 0xe4, 0xe0, 0x91, 0xd4, 0x6a, 0xeb, 0xf0, 0xe8, 0x52, 0x9d, 0x23, 0x81, 0x41, 0x39, + 0x4e, 0x52, 0xb7, 0x55, 0x31, 0xf5, 0xf4, 0xcc, 0x8d, 0xaa, 0x99, 0x3f, 0x2f, 0x7a, 0x6b, 0xe9, + 0x62, 0x08, 0xf9, 0xd2, 0xf9, 0x19, 0xe7, 0x77, 0xca, 0x7d, 0x73, 0xa5, 0x34, 0x8b, 0x7b, 0x7d, + 0xdd, 0xf1, 0x0b, 0x51, 0xad, 0xa3, 0xf3, 0x81, 0x31, 0x04, 0x4d, 0x59, 0xd1, 0xe0, 0xc8, 0xab, + 0x93, 0x8c, 0x68, 0xc7, 0x11, 0x07, 0x0c, 0x7f, 0x86, 0xcb, 0x94, 0x89, 0x1c, 0xa7, 0xe8, 0x11, + 0x0e, 0x50, 0xab, 0x86, 0x32, 0xd4, 0x4e, 0x37, 0x8d, 0x0c, 0x6e, 0x69, 0x33, 0x00, 0xa8, 0xd9, + 0x1d, 0xc6, 0xe4, 0x54, 0xb4, 0xfc, 0xf2, 0x49, 0xe5, 0x17, 0x19, 0x60, 0x6c, 0xb9, 0x63, 0x7e, + 0x38, 0x21, 0xe0, 0x1a, 0xdf, 0x65, 0x43, 0x18, 0x37, 0x30, 0xaa, 0xd3, 0x73, 0xbf, 0xfa, 0x66, + 0x10, 0xe4, 0xa4, 0xd0, 0xe0, 0x57, 0x06, 0x5a, 0x64, 0x3d, 0x56, 0x31, 0x12, 0x11, 0x07, 0x1d, + 0xb4, 0xe8, 0xe5, 0xf3, 0x96, 0x89, 0x91, 0x78, 0x65, 0xdf, 0xd8, 0x1e, 0x10, 0xe1, 0xf2, 0x8c, + 0x85, 0x49, 0xa9, 0xa9, 0x5c, 0xc7, 0xac, 0xca, 0xd8, 0x94, 0x73, 0xad, 0x18, 0xcd, 0xf5, 0xf9, + 0x84, 0x8c, 0xa2, 0x7f, 0x43, 0xf3, 0x43, 0x30, 0x4a, 0xc1, 0xd2, 0x64, 0x5d, 0x6c, 0x82, 0xae, + 0x65, 0x17, 0x92, 0x7f, 0x68, 0xa8, 0x9b, 0x30, 0x24, 0x19, 0xeb, 0x28, 0xeb, 0xd4, 0x67, 0x5a, + 0x7d, 0xd3, 0xcf, 0xa1, 0x5e, 0x28, 0xaa, 0xbd, 0x4e, 0x69, 0x5d, 0x70, 0x15, 0xe5, 0x35, 0xb6, + 0xf0, 0x7b, 0xf5, 0xe4, 0xcc, 0x8b, 0xb1, 0x3e, 0x25, 0xd3, 0x98, 0x29, 0x24, 0x9e, 0x42, 0x5c, + 0xe3, 0x96, 0x9d, 0xe8, 0xe4, 0xe6, 0xe1, 0x6b, 0x84, 0x79, 0x43, 0xd6, 0xb4, 0x5c, 0xfa, 0x35, + 0x11, 0x74, 0x47, 0x36, 0x0a, 0x6c, 0x56, 0xb7, 0x4b, 0x25, 0xb1, 0xaf, 0x45, 0x1e, 0x0c, 0x3e, + 0x82, 0xc2, 0xc9, 0x68, 0x2b, 0xba, 0x7d, 0x80, 0x85, 0x62, 0xa0, 0xb9, 0x63, 0xa7, 0x20, 0x3c, + 0x0b, 0x54, 0x10, 0x0f, 0x93, 0x51, 0xa9, 0xc6, 0xe1, 0xb8, 0x2a, 0xa4, 0x1a, 0xa7, 0x75, 0x12, + 0xb0, 0xe2, 0x42, 0xe5, 0xb6, 0xe1, 0xc8, 0x18, 0xac, 0x51, 0x50, 0xa9, 0x81, 0x89, 0xd2, 0x9b, + 0x4c, 0x1d, 0xca, 0x5b, 0x13, 0xc7, 0x1c, 0xd0, 0xc6, 0xf3, 0x44, 0x54, 0x59, 0x4e, 0xaf, 0x11, + 0x64, 0x0a, 0x9b, 0x28, 0x77, 0x9a, 0xab, 0x8a, 0x3b, 0xc0, 0xd2, 0xff, 0x02, 0x63, 0xef, 0x54, + 0xf7, 0x90, 0x76, 0x8c, 0xce, 0x42, 0x08, 0xdf, 0xfd, 0xf5, 0xe7, 0x82, 0x44, 0x92, 0x68, 0x55, + 0xfa, 0xda, 0x06, 0xca, 0xd7, 0x9b, 0x77, 0x62, 0x5e, 0x54, 0xfb, 0x01, 0xa8, 0x7f, 0xcf, 0x3f, + 0x79, 0x9d, 0x9d, 0x4d, 0x60, 0xc7, 0x52, 0xa7, 0x65, 0xa4, 0x72, 0xd3, 0x81, 0x83, 0x3c, 0x21, + 0x33, 0x6f, 0xbe, 0xdd, 0x7c, 0x91, 0xe3, 0xb0, 0x30, 0xa7, 0x7d, 0x56, 0x59, 0x31, 0x16, 0x9c, + 0x29, 0x51, 0x5f, 0xe7, 0x57, 0xa0, 0x68, 0x99, 0xca, 0x5d, 0xc8, 0x0d, 0x2d, 0xaa, 0xfc, 0xe1, + 0xe6, 0x31, 0xfc, 0x49, 0x89, 0x8e, 0x1f, 0xb7, 0xf5, 0xfe, 0x51, 0xdc, 0x79, 0xb9, 0xe0, 0x41, + 0xc1, 0x70, 0xdc, 0xb6, 0x33, 0x99, 0xb1, 0xd5, 0x63, 0xf1, 0xec, 0xb3, 0xb8, 0x5b, 0x5c, 0x8b, + 0x67, 0x59, 0xf6, 0x51, 0x98, 0x74, 0xbb, 0x0e, 0xf4, 0xe8, 0x9b, 0xfe, 0x93, 0xe3, 0x82, 0x7c, + 0xcf, 0x68, 0x51, 0x79, 0x53, 0x6e, 0xd1, 0x50, 0x89, 0x81, 0x29, 0xe0, 0x63, 0x62, 0xe3, 0xdc, + 0x67, 0x2d, 0xc6, 0x48, 0x0e, 0x5a, 0x9a, 0x31, 0xe4, 0xea, 0xb5, 0x40, 0xff, 0x7c, 0xdc, 0x6b, + 0x09, 0x4e, 0xbb, 0x88, 0x9f, 0x7e, 0x8b, 0xa3, 0xd6, 0x60, 0xc4, 0x3c, 0xd4, 0x81, 0xa1, 0x0b, + 0xaf, 0x0e, 0xa6, 0xfd, 0x0e, 0x66, 0xf6, 0xdb, 0x69, 0x2c, 0x07, 0x63, 0x25, 0xfe, 0x85, 0x7e, + 0x6d, 0x7a, 0xfb, 0xf1, 0x04, 0x9d, 0x34, 0x4d, 0xa5, 0xf4, 0x18, 0xbc, 0x45, 0x17, 0x0b, 0x99, + 0x01, 0x7e, 0x64, 0xf1, 0x50, 0x32, 0xc7, 0x5e, 0x31, 0xec, 0x24, 0x01, 0x69, 0xb2, 0xe2, 0x5a, + 0x60, 0x39, 0xf1, 0x9a, 0xc3, 0xaa, 0x7d, 0x8b, 0x48, 0x41, 0x7d, 0x95, 0x14, 0x7f, 0x63, 0x70, + 0xef, 0xae, 0xb0, 0x01, 0x94, 0xc4, 0x39, 0x0e, 0xbb, 0xc1, 0x18, 0xbf, 0x57, 0x03, 0x59, 0x84, + 0x6a, 0x31, 0x01, 0xcb, 0x60, 0xbc, 0xb9, 0xd7, 0xc0, 0x47, 0x69, 0xa0, 0x13, 0x96, 0xfe, 0x7d, + 0x76, 0x60, 0x0a, 0xd3, 0x9b, 0x49, 0xcf, 0x97, 0xd8, 0xbf, 0x35, 0x24, 0x3d, 0xa6, 0x8f, 0x64, + 0x61, 0x03, 0x76, 0x91, 0xb6, 0xfa, 0x16, 0xc6, 0x07, 0x2c, 0x82, 0xca, 0x33, 0xf3, 0x07, 0xd8, + 0xff, 0xfe, 0x2f, 0xff, 0xbb, 0x3f, 0x20, 0x76, 0x33, 0x78, 0x5d, 0x5b, 0xcc, 0xea, 0x92, 0x1c, + 0x12, 0x1a, 0xd1, 0x22, 0x47, 0x02, 0xca, 0x9b, 0x96, 0x88, 0xdd, 0xe8, 0xbd, 0x0d, 0xb5, 0x51, + 0xee, 0xe4, 0x1e, 0x12, 0x35, 0xbf, 0x9b, 0xf3, 0xaf, 0x87, 0x22, 0x54, 0x95, 0x4c, 0x04, 0x26, + 0x85, 0x3f, 0x33, 0xb4, 0xff, 0x9e, 0x9f, 0x16, 0xc8, 0xe0, 0xfd, 0x59, 0x1b, 0x31, 0xd7, 0x0d, + 0x4d, 0xfe, 0xb3, 0xa5, 0x9e, 0xcd, 0x26, 0xaa, 0xae, 0x4f, 0x24, 0xee, 0xcc, 0xa9, 0x91, 0x92, + 0xbc, 0x10, 0xe2, 0x47, 0xb0, 0x47, 0x95, 0xe7, 0x80, 0x2c, 0x74, 0xb0, 0x5e, 0xfc, 0x75, 0x94, + 0xde, 0xdb, 0xd3, 0x03, 0xd6, 0x22, 0xb4, 0x5e, 0xc8, 0x7f, 0x2f, 0x95, 0x93, 0x58, 0xdd, 0x44, + 0x9c, 0xa8, 0x19, 0x3a, 0x9a, 0x06, 0x36, 0x9b, 0x40, 0xd8, 0xdd, 0xfd, 0x1b, 0x4d, 0xc7, 0x5b, + 0xbf, 0xca, 0x41, 0xac, 0x5d, 0xbf, 0xe4, 0xf7, 0x57, 0xff, 0xbc, 0x22, 0x07, 0x2a, 0x26, 0x41, + 0x21, 0x76, 0x0b, 0x69, 0x52, 0x07, 0x27, 0x38, 0xbc, 0xa7, 0x35, 0x66, 0x8b, 0xbb, 0x21, 0x10, + 0x7c, 0x19, 0xa0, 0xf6, 0x1d, 0x3c, 0xe8, 0x62, 0x32, 0x58, 0x33, 0xa9, 0xa6, 0x4e, 0x7d, 0x92, + 0xa6, 0x83, 0xdb, 0x68, 0xdc, 0x70, 0x94, 0xb2, 0x3f, 0xac, 0x8d, 0x3f, 0x6d, 0xa9, 0x83, 0xf8, + 0x7d, 0x34, 0x51, 0x31, 0x45, 0x0e, 0x9b, 0x0b, 0x40, 0x71, 0x8e, 0x77, 0xf6, 0xa2, 0x0a, 0x61, + 0xf3, 0xc0, 0x23, 0x67, 0xf8, 0x21, 0xb0, 0xb1, 0x23, 0x12, 0x34, 0x9a, 0x16, 0x9a, 0x93, 0x40, + 0x37, 0xd0, 0xdf, 0x19, 0x5e, 0x8d, 0xa5, 0x5b, 0x52, 0xec, 0xdb, 0x0f, 0xfa, 0xee, 0x06, 0xbd, + 0x03, 0xab, 0x9f, 0x7e, 0x01, 0x4c, 0x1e, 0x98, 0xa9, 0x25, 0x88, 0x91, 0x7a, 0xa3, 0xbc, 0xa1, + 0x53, 0x6b, 0x51, 0x95, 0x96, 0xef, 0xed, 0x52, 0xe9, 0x44, 0xe9, 0xd4, 0xc7, 0x23, 0xdd, 0x8e, + 0xc3, 0xf7, 0xa2, 0x9f, 0xf7, 0x24, 0xbf, 0xd4, 0x83, 0xdf, 0x53, 0x08, 0x0e, 0x79, 0x7a, 0x32, + 0x24, 0x79, 0x4a, 0xb2, 0x48, 0x83, 0x39, 0x94, 0x63, 0x41, 0x3c, 0x6b, 0x38, 0xfa, 0x4d, 0x60, + 0x68, 0x64, 0x9d, 0x15, 0xb0, 0xed, 0x72, 0x44, 0x23, 0xf0, 0x90, 0x7b, 0xa1, 0x60, 0xf0, 0x07, + 0x14, 0x6d, 0x6b, 0x29, 0x7f, 0x94, 0x03, 0x01, 0xe0, 0x74, 0xaa, 0x8a, 0x63, 0x36, 0xee, 0x7b, + 0xa8, 0x75, 0xfe, 0x05, 0x46, 0x7d, 0x3e, 0xb1, 0x8e, 0x23, 0x64, 0x9e, 0x1f, 0x88, 0x8a, 0x7f, + 0x1a, 0xfd, 0xc1, 0x66, 0xa0, 0xe7, 0x52, 0xbf, 0x49, 0xca, 0xf8, 0x67, 0xe7, 0xf3, 0x57, 0x20, + 0x96, 0xce, 0xda, 0x31, 0xba, 0x38, 0x70, 0x91, 0x84, 0xde, 0xe1, 0xd4, 0x1d, 0xe6, 0x23, 0x24, + 0x17, 0x58, 0x2f, 0xeb, 0x1e, 0x43, 0xad, 0xf1, 0x45, 0x80, 0xfc, 0xca, 0x23, 0x4d, 0x89, 0x63, + 0xac, 0xac, 0xa4, 0xfd, 0x38, 0x47, 0xa4, 0x7c, 0x39, 0xad, 0x8f, 0x28, 0x34, 0xce, 0xcd, 0x7f, + 0x3b, 0xa2, 0xf9, 0x02, 0xb6, 0x42, 0x5e, 0x2c, 0x8c, 0xf6, 0xd3, 0x4e, 0x9c, 0x52, 0xa6, 0xaa, + 0x28, 0x2a, 0x75, 0x25, 0x31, 0x3b, 0xbd, 0xd9, 0xa6, 0xcf, 0x8e, 0xb6, 0xba, 0x72, 0x37, 0x2a, + 0xab, 0xba, 0x38, 0x54, 0x66, 0xb8, 0xb7, 0x36, 0xca, 0x25, 0x02, 0x13, 0xca, 0x11, 0x8a, 0x4a, + 0x57, 0xcd, 0xc0, 0x24, 0xe0, 0xe0, 0x7d, 0xd4, 0x64, 0x22, 0xc9, 0x74, 0x46, 0x54, 0x51, 0x1b, + 0x86, 0xdd, 0x20, 0x61, 0x7d, 0x21, 0x01, 0x3d, 0x17, 0xd3, 0x9c, 0x7d, 0x27, 0xd4, 0x17, 0xa8, + 0xc2, 0x63, 0x75, 0x38, 0xad, 0xca, 0xf1, 0xf0, 0xb6, 0xd9, 0x2d, 0x11, 0xc9, 0xd9, 0x46, 0x59, + 0x4d, 0x04, 0x43, 0xf8, 0x49, 0x42, 0x2f, 0xa6, 0x3b, 0x58, 0x47, 0x9d, 0x61, 0x8a, 0xf7, 0x87, + 0x94, 0x17, 0xb4, 0xf9, 0x65, 0xdb, 0x5f, 0x83, 0x30, 0xf8, 0x8a, 0x2b, 0x05, 0x60, 0xaf, 0x01, + 0xf8, 0x3c, 0x13, 0x7f, 0x26, 0x71, 0x18, 0xaa, 0xc8, 0x02, 0x6a, 0x71, 0x00, 0x8c, 0x1f, 0x8c, + 0x50, 0x21, 0x8a, 0x31, 0x7c, 0x2a, 0x26, 0xc8, 0x28, 0xf7, 0x40, 0x35, 0x8d, 0xf6, 0x53, 0xdb, + 0xe9, 0xee, 0x72, 0x38, 0x49, 0xb2, 0x3f, 0xd7, 0x43, 0x24, 0x7b, 0x49, 0x99, 0xb9, 0x52, 0xfa, + 0x37, 0x3f, 0xd4, 0xec, 0x16, 0x67, 0x4c, 0x2a, 0xde, 0x79, 0xd9, 0xac, 0xc7, 0x5b, 0x53, 0xae, + 0x6a, 0xd3, 0x80, 0x5a, 0xc0, 0xa2, 0x59, 0xb9, 0x43, 0x87, 0xc8, 0x9d, 0x0f, 0xb5, 0x0a, 0x72, + 0x33, 0x93, 0x40, 0x31, 0x9d, 0x76, 0xe6, 0xc9, 0xa7, 0xf7, 0xb4, 0xd5, 0xc0, 0x5d, 0x6e, 0x75, + 0x8b, 0xed, 0x98, 0x81, 0x93, 0x0f, 0x63, 0x6b, 0xd4, 0x13, 0x06, 0x4a, 0x1f, 0x86, 0xe3, 0xdf, + 0x05, 0xf5, 0x5c, 0x8a, 0x2a, 0x51, 0x79, 0x89, 0xde, 0xfe, 0x0f, 0x93, 0x29, 0xf5, 0x77, 0xaf, + 0xb4, 0xf6, 0x88, 0x02, 0xfe, 0x52, 0x79, 0x87, 0xa4, 0xd0, 0x9c, 0x90, 0x73, 0xb7, 0xa9, 0xa3, + 0xe4, 0x37, 0xbb, 0xd5, 0xfd, 0x85, 0x10, 0x78, 0x7e, 0xb8, 0xe6, 0x21, 0x16, 0x23, 0x74, 0xc1, + 0x54, 0xc6, 0x50, 0xa5, 0x61, 0xbd, 0xc5, 0xb9, 0x75, 0x6f, 0x4f, 0xe6, 0x82, 0x4b, 0xcd, 0x52, + 0x7d, 0x08, 0xd9, 0x78, 0x4f, 0xa9, 0xf9, 0x52, 0x40, 0xa8, 0xd9, 0x37, 0xe9, 0x97, 0xce, 0x8f, + 0xd8, 0x40, 0x8f, 0x59, 0xdc, 0xac, 0x0f, 0x17, 0xea, 0x55, 0x79, 0x18, 0xbe, 0x91, 0xf4, 0x35, + 0xe4, 0x68, 0xd3, 0xa2, 0x1c, 0x41, 0x31, 0xb1, 0xf3, 0x04, 0x42, 0x63, 0x2c, 0xc8, 0x12, 0xf9, + 0x24, 0x17, 0xad, 0x19, 0x66, 0x8c, 0xaf, 0x29, 0xaf, 0x22, 0x15, 0x83, 0xdb, 0x38, 0x5b, 0xce, + 0x0e, 0xf9, 0x97, 0x1e, 0x24, 0x19, 0x0b, 0x0f, 0x2f, 0x16, 0xda, 0x09, 0x77, 0xbe, 0x31, 0x6d, + 0x63, 0x12, 0x38, 0xc5, 0x57, 0x8c, 0x96, 0xf4, 0x2a, 0xcf, 0x6e, 0x3e, 0x12, 0x2c, 0xea, 0x49, + 0xb8, 0x08, 0xec, 0x84, 0x20, 0x52, 0x2d, 0xa0, 0x89, 0xcf, 0x66, 0xa3, 0xb3, 0xfc, 0xb1, 0xed, + 0x8a, 0x3e, 0x36, 0x9c, 0xba, 0xab, 0xfb, 0xf9, 0x77, 0x6e, 0xe1, 0x18, 0x32, 0xa1, 0x1f, 0x62, + 0x1c, 0xed, 0xb0, 0x9f, 0xbf, 0x71, 0x0d, 0xf5, 0xb2, 0x01, 0x26, 0x72, 0xd6, 0xff, 0xc0, 0x89, + 0xad, 0xbb, 0xf2, 0xb5, 0x57, 0x46, 0x1f, 0xd2, 0x1c, 0x26, 0x62, 0xaf, 0x76, 0x32, 0x20, 0x97, + 0x10, 0x21, 0x10, 0x9f, 0xc5, 0x81, 0x7a, 0xff, 0x6f, 0xdb, 0xc2, 0xff, 0xf8, 0x8f, 0xff, 0xf8, + 0x8f, 0xff, 0xf8, 0xff, 0xc2, 0xff, 0x00, 0xf0, 0xd7, 0x5e, 0x38, 0x00, 0x23, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU102_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 8960, // uncompressed data size (bytes) + 7375, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU102_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU102("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/reload/g_booteruc_reload_tu10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU102_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x05, 0x62, 0x56, 0x08, 0x13, 0x4c, 0xc8, 0x40, 0x69, + 0x20, 0x00, 0x00, 0x8e, 0xa1, 0x42, 0xb2, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU102_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU102_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU102("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/reload/g_booteruc_reload_tu10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 8960 +// COMPRESSED SIZE (bytes): 7373 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU102_image_prod_data[] = +{ + 0xed, 0x99, 0x45, 0x50, 0x1c, 0x5a, 0xb0, 0x86, 0x07, 0x87, 0xe0, 0xee, 0xee, 0x3e, 0xe8, 0xe0, + 0x92, 0xe0, 0x1a, 0x5c, 0x82, 0xbb, 0xbb, 0x4b, 0x70, 0x27, 0x10, 0x7c, 0x70, 0x19, 0x24, 0x40, + 0x80, 0xe0, 0xee, 0xee, 0x1e, 0xdc, 0x61, 0x82, 0x7b, 0x70, 0x79, 0xf7, 0x6d, 0xef, 0xee, 0x6d, + 0x5f, 0xdd, 0x6f, 0xf3, 0xf7, 0xd9, 0x74, 0x77, 0xf5, 0xa9, 0xee, 0x73, 0xaa, 0x3a, 0x06, 0x10, + 0x0c, 0x78, 0x4b, 0x05, 0x7c, 0xc5, 0x04, 0x00, 0x1e, 0x60, 0x1f, 0x00, 0x8f, 0xb0, 0xdf, 0x00, + 0xb0, 0x80, 0xee, 0xcd, 0xad, 0xf7, 0xf7, 0x77, 0xcc, 0x18, 0x00, 0x0c, 0xe0, 0x3d, 0x1f, 0xbe, + 0x7e, 0x1f, 0xc0, 0x03, 0x5e, 0x81, 0xa9, 0x4f, 0x05, 0xb0, 0x81, 0x57, 0x60, 0xff, 0x11, 0x7a, + 0xf0, 0x0a, 0xdc, 0x3f, 0x42, 0x11, 0x03, 0x00, 0x00, 0xc0, 0xf9, 0x70, 0x8d, 0x5b, 0x70, 0xdd, + 0xf9, 0x05, 0x5b, 0xe0, 0x7c, 0x18, 0x70, 0x26, 0x6c, 0xe3, 0x16, 0x4c, 0xe3, 0x0a, 0xec, 0xd7, + 0x26, 0x00, 0x00, 0x93, 0x4b, 0x1e, 0x80, 0x75, 0x9f, 0x0f, 0xc0, 0x84, 0x8f, 0x01, 0x58, 0x23, + 0xfd, 0xaf, 0x81, 0xd2, 0x92, 0x09, 0x80, 0x81, 0xfd, 0xc7, 0xba, 0x15, 0x81, 0x4d, 0x04, 0x00, + 0x10, 0x63, 0xfe, 0x09, 0xf5, 0x9a, 0x8f, 0xd0, 0x79, 0xfb, 0xf4, 0x88, 0xf8, 0x00, 0xf7, 0x8f, + 0x37, 0xd8, 0x34, 0x4c, 0xe4, 0xcd, 0xff, 0x8d, 0xfc, 0xfa, 0x88, 0x10, 0x03, 0x20, 0x07, 0xbc, + 0xe6, 0xc1, 0xb7, 0x7f, 0x87, 0x6d, 0x4a, 0xc5, 0xfa, 0xe7, 0xfc, 0xf6, 0x04, 0xf8, 0xfa, 0x4f, + 0x3e, 0x0f, 0xb0, 0x9d, 0xc5, 0xcf, 0x30, 0x89, 0x4a, 0xc9, 0x28, 0x31, 0x58, 0x08, 0x80, 0x82, + 0xfc, 0x07, 0x40, 0xe2, 0x3f, 0xda, 0xfd, 0x7e, 0x0b, 0x7c, 0x79, 0x93, 0x87, 0x29, 0xc8, 0x17, + 0x9b, 0x01, 0xfc, 0x9f, 0x18, 0x01, 0x0f, 0x35, 0xe6, 0xd4, 0xf4, 0xea, 0x3c, 0x2f, 0x7c, 0x80, + 0xb6, 0xd2, 0x23, 0xcd, 0xa9, 0xb2, 0x23, 0xd5, 0xb6, 0x66, 0x61, 0x8b, 0x2e, 0xc9, 0xbd, 0x31, + 0x90, 0x78, 0x44, 0x87, 0x24, 0x9f, 0x86, 0x3b, 0xed, 0xbe, 0xed, 0x2f, 0xaa, 0x86, 0xcc, 0x1b, + 0x01, 0xc3, 0xbc, 0xa3, 0xb8, 0x86, 0xd8, 0xb9, 0xd7, 0x5d, 0xe9, 0xb7, 0x56, 0x6c, 0x1a, 0x2e, + 0x4e, 0x75, 0x18, 0x74, 0x67, 0x8b, 0x1a, 0xfb, 0x7d, 0x93, 0xb9, 0xaf, 0x26, 0xa4, 0x16, 0xc3, + 0x94, 0x95, 0x72, 0x60, 0x9d, 0x68, 0xc3, 0x7b, 0xfe, 0xc0, 0x87, 0x5f, 0x56, 0xc1, 0xfb, 0xf4, + 0xe8, 0x40, 0x5e, 0x76, 0x68, 0x78, 0x2d, 0x85, 0xd3, 0xf3, 0x3f, 0x66, 0xe0, 0xd6, 0x6a, 0xe5, + 0x27, 0xc7, 0xda, 0x15, 0xba, 0xb1, 0x7a, 0x82, 0x9a, 0xf2, 0x32, 0x33, 0xf1, 0xd4, 0xef, 0xdc, + 0x80, 0x34, 0xbf, 0xa6, 0xbe, 0xc4, 0x9f, 0x2c, 0x56, 0x44, 0xe7, 0x2e, 0xf2, 0xc3, 0x1b, 0x9f, + 0x64, 0x05, 0x3a, 0xd6, 0x8e, 0x64, 0xfc, 0x90, 0x0b, 0xd7, 0xfb, 0x69, 0x34, 0xd1, 0x6a, 0xaa, + 0x81, 0x92, 0x4e, 0x4b, 0x94, 0xd2, 0xf2, 0xc4, 0xcf, 0xb8, 0xa8, 0xb6, 0x0f, 0xff, 0x41, 0x57, + 0xc4, 0xf5, 0xf1, 0x73, 0x0e, 0x52, 0xcd, 0x81, 0x6d, 0xf9, 0x47, 0xb1, 0xb9, 0xda, 0x6f, 0x46, + 0xa8, 0x34, 0xed, 0xe6, 0x1f, 0x05, 0x70, 0x78, 0x82, 0x94, 0x56, 0x7d, 0x33, 0xc3, 0xda, 0xab, + 0xaa, 0x4d, 0xe0, 0xc9, 0x67, 0x4b, 0x7f, 0xf9, 0x72, 0x4a, 0x7e, 0xeb, 0x28, 0x22, 0x69, 0x18, + 0x31, 0x53, 0x25, 0x79, 0x2b, 0xb5, 0x4e, 0x55, 0x10, 0xce, 0x0f, 0x92, 0x6a, 0x43, 0x45, 0x05, + 0x55, 0xe9, 0x59, 0x3d, 0xfc, 0x49, 0x62, 0x06, 0xe3, 0x79, 0x46, 0x86, 0x7d, 0x44, 0x7f, 0x95, + 0x83, 0xcd, 0x98, 0x0a, 0xb7, 0x89, 0x5a, 0xd2, 0xa0, 0x5a, 0x95, 0xa0, 0x83, 0x17, 0x77, 0xab, + 0x61, 0x67, 0x80, 0x95, 0x30, 0x7b, 0x62, 0xb0, 0x60, 0x3f, 0xa0, 0xc6, 0x5a, 0xc2, 0xf4, 0x78, + 0x54, 0xf9, 0x61, 0x1d, 0x82, 0x2b, 0x65, 0xd7, 0xaa, 0xc7, 0x2f, 0x80, 0x51, 0x99, 0x28, 0xe6, + 0xa2, 0x02, 0x7e, 0x71, 0x87, 0xa6, 0x78, 0x6a, 0xf4, 0x44, 0xc3, 0xd2, 0x55, 0x15, 0x62, 0x70, + 0x47, 0xf5, 0x28, 0xed, 0x74, 0xb1, 0x94, 0xbe, 0x9c, 0x02, 0xa7, 0x74, 0x0d, 0xe4, 0x8d, 0x94, + 0xe1, 0x32, 0x52, 0x83, 0xee, 0x86, 0xe5, 0x84, 0x0c, 0x97, 0x86, 0x28, 0x39, 0x9c, 0xc6, 0x86, + 0xcb, 0x73, 0xfc, 0xa9, 0x86, 0xfa, 0x4e, 0x73, 0xd9, 0x32, 0xdd, 0xbd, 0xbe, 0x87, 0x97, 0xfb, + 0x04, 0x01, 0xfd, 0x66, 0x1b, 0x6c, 0xad, 0x32, 0xb4, 0x17, 0x25, 0x93, 0x47, 0x64, 0xea, 0x73, + 0x01, 0xa8, 0x90, 0x8a, 0xb5, 0x99, 0x55, 0x05, 0xb9, 0x4e, 0xb0, 0xd5, 0xde, 0x64, 0x9d, 0x83, + 0xf3, 0x27, 0x90, 0x78, 0x9c, 0x12, 0xce, 0x1e, 0xe2, 0xfb, 0x78, 0x85, 0xe5, 0xe2, 0x96, 0xeb, + 0x48, 0xe7, 0xa4, 0x53, 0x1e, 0x87, 0xac, 0x38, 0xbf, 0x42, 0xf9, 0xc4, 0x2e, 0x69, 0x94, 0xf8, + 0xca, 0x0b, 0xf7, 0x2a, 0x6c, 0x77, 0x05, 0x73, 0x52, 0xdc, 0x15, 0x2e, 0x1c, 0x7f, 0x40, 0x4c, + 0x36, 0x71, 0x86, 0xa5, 0xe4, 0x63, 0xfa, 0x59, 0x98, 0xed, 0x5e, 0x0a, 0xc1, 0x50, 0xa4, 0x70, + 0x59, 0x63, 0xc2, 0x8e, 0xdd, 0xb2, 0xbc, 0xa9, 0xc4, 0x05, 0x77, 0xd1, 0xdc, 0xd5, 0xee, 0x27, + 0xab, 0x32, 0xf3, 0x2b, 0x21, 0xc5, 0xdf, 0x05, 0x44, 0x85, 0xc3, 0xda, 0x80, 0x33, 0xdf, 0xe0, + 0x84, 0x96, 0xec, 0x86, 0x82, 0xbf, 0xde, 0x1d, 0x81, 0x72, 0xc9, 0xe4, 0xbd, 0x3b, 0x7d, 0x75, + 0x56, 0xd6, 0xcc, 0xe9, 0xee, 0x28, 0x78, 0xfe, 0xb9, 0xc9, 0x4b, 0x4b, 0x7b, 0xba, 0xdd, 0x7a, + 0x44, 0x33, 0xea, 0x93, 0xdd, 0x2b, 0x00, 0x79, 0x78, 0xb1, 0xab, 0x18, 0x9a, 0x02, 0x57, 0x24, + 0x6c, 0x7e, 0x25, 0xae, 0xe2, 0xd5, 0xd3, 0x2c, 0x6b, 0xde, 0xc1, 0x88, 0x0c, 0xea, 0x12, 0xab, + 0x51, 0x79, 0x10, 0x79, 0x60, 0x0b, 0x2f, 0xb3, 0x0c, 0x31, 0x6b, 0x91, 0x40, 0x5f, 0xac, 0x96, + 0x93, 0xb7, 0x07, 0x2e, 0x6d, 0x6d, 0x9b, 0xc4, 0x75, 0x37, 0x68, 0x04, 0x5f, 0x7b, 0xf3, 0x6b, + 0x7a, 0x61, 0xba, 0xd2, 0x7d, 0x2a, 0xb9, 0xaf, 0xd6, 0x7e, 0x05, 0xc2, 0xf8, 0xf7, 0xc7, 0xbc, + 0xc9, 0x4c, 0x5d, 0x16, 0x9f, 0xfe, 0x7b, 0xd5, 0x1f, 0xca, 0x7f, 0xbf, 0x36, 0xbc, 0xee, 0x4c, + 0xa0, 0x9e, 0x04, 0x1d, 0xa7, 0x39, 0x3f, 0xc8, 0x39, 0x44, 0x70, 0x0d, 0xfe, 0x08, 0xbd, 0xcd, + 0xda, 0x58, 0x63, 0x95, 0xb2, 0xea, 0xd1, 0xd5, 0x0f, 0x08, 0xe4, 0x62, 0x27, 0xdd, 0x1e, 0x4b, + 0xd8, 0x5b, 0x77, 0xd4, 0xa9, 0x8d, 0xf0, 0x98, 0xa8, 0xe5, 0xff, 0x29, 0xce, 0x30, 0x7b, 0x85, + 0xff, 0x3e, 0x5f, 0x42, 0xee, 0x34, 0xaf, 0x67, 0x2e, 0x99, 0x44, 0x8a, 0x86, 0x89, 0xc5, 0x03, + 0xb1, 0x3b, 0xb4, 0x32, 0x91, 0x9f, 0x87, 0x4e, 0xad, 0xed, 0x45, 0x7a, 0xbd, 0x76, 0x24, 0x3f, + 0x7a, 0x9c, 0x97, 0x5c, 0xfe, 0xc8, 0xe3, 0x04, 0x7e, 0x4f, 0x9e, 0x22, 0x54, 0x31, 0x29, 0x0b, + 0xfb, 0xac, 0xcf, 0xa7, 0xc9, 0xe7, 0xbc, 0x6b, 0xa7, 0xe7, 0xeb, 0xf2, 0x14, 0xc3, 0x3f, 0x6a, + 0x35, 0x5a, 0x8d, 0xb4, 0x5d, 0x4b, 0xa4, 0xf2, 0x6e, 0x50, 0xf7, 0x9e, 0xf0, 0xf2, 0x68, 0xf8, + 0x28, 0x31, 0xb7, 0x96, 0xc9, 0xe0, 0x6e, 0xb0, 0x1e, 0x35, 0xcd, 0xd4, 0x9a, 0xe0, 0xd5, 0x16, + 0x0e, 0x13, 0xdb, 0x86, 0x5e, 0x18, 0xbc, 0xdd, 0xe8, 0x84, 0xce, 0x1e, 0xac, 0x5e, 0xed, 0x18, + 0x3a, 0x06, 0xa4, 0x29, 0x7d, 0xea, 0x7e, 0x24, 0xe3, 0xd0, 0xf6, 0xc9, 0x80, 0xc5, 0x6a, 0x40, + 0xd3, 0x17, 0x48, 0xe7, 0x94, 0xd1, 0xb4, 0x27, 0xde, 0x04, 0x56, 0x00, 0x8a, 0x94, 0xec, 0x35, + 0x2c, 0x71, 0x76, 0x29, 0x71, 0xe6, 0x79, 0x0c, 0x76, 0x2d, 0xcc, 0xcf, 0x31, 0x65, 0x03, 0x97, + 0xce, 0x95, 0xb2, 0x5e, 0xaf, 0x4a, 0xe4, 0xe2, 0xa1, 0x9f, 0x31, 0x14, 0x7a, 0xad, 0x41, 0xdc, + 0xdf, 0xa4, 0xaa, 0x4b, 0x2e, 0x70, 0x3c, 0x4c, 0xa1, 0xcb, 0x5d, 0x05, 0xfa, 0x7d, 0x7f, 0xdf, + 0xe9, 0xef, 0x52, 0x87, 0x1c, 0xec, 0x9e, 0x32, 0x65, 0x37, 0x65, 0x27, 0x32, 0x07, 0x18, 0xaa, + 0xae, 0xe2, 0x66, 0xce, 0x37, 0xe6, 0xaf, 0xd1, 0x1b, 0x42, 0x13, 0x75, 0xf3, 0xb7, 0x17, 0xe0, + 0x2b, 0xd4, 0xe9, 0x0e, 0x53, 0x40, 0x1d, 0x1d, 0x0d, 0xe8, 0xb5, 0x75, 0x16, 0xbf, 0x5b, 0x08, + 0xae, 0xca, 0x04, 0x65, 0xef, 0xdc, 0x35, 0x52, 0xa2, 0x72, 0x68, 0x61, 0xbd, 0x68, 0x7c, 0xe5, + 0xfd, 0xb3, 0xfa, 0x55, 0xca, 0xc0, 0xbf, 0x09, 0xa8, 0xe0, 0x75, 0x74, 0x9f, 0xf8, 0x4a, 0x03, + 0x7d, 0xca, 0x4a, 0x76, 0x1c, 0xbc, 0x34, 0x5c, 0x69, 0x56, 0x49, 0x6a, 0x44, 0xf7, 0x3a, 0xef, + 0xd5, 0xff, 0x64, 0xfb, 0x0e, 0x63, 0xa7, 0xfb, 0xb4, 0x47, 0x3e, 0xc6, 0xf8, 0xf0, 0x8e, 0x81, + 0x71, 0x85, 0x0f, 0x81, 0x5e, 0x62, 0x2d, 0x21, 0x95, 0xcc, 0x8b, 0x9e, 0xde, 0xde, 0x86, 0xbb, + 0x04, 0x9a, 0x0f, 0x60, 0x7e, 0x72, 0x21, 0x19, 0x30, 0x29, 0x43, 0xad, 0x87, 0x64, 0x62, 0x80, + 0xff, 0x96, 0xaa, 0x52, 0xd0, 0xf7, 0xdc, 0x7b, 0xf1, 0x19, 0xb5, 0x21, 0x45, 0xff, 0x9e, 0xcb, + 0xdc, 0xb8, 0x81, 0xd5, 0x14, 0x84, 0x49, 0x54, 0x55, 0x51, 0xf7, 0xbc, 0xb7, 0xb4, 0x68, 0x6a, + 0x5f, 0xd3, 0xd0, 0xda, 0x52, 0xcc, 0xb6, 0x2b, 0x9c, 0x19, 0xb2, 0xce, 0x61, 0xe3, 0x52, 0xdf, + 0x0a, 0xce, 0x08, 0x31, 0xa9, 0x7c, 0x1f, 0x5b, 0x7b, 0x23, 0x69, 0x45, 0x99, 0xea, 0xb3, 0xd3, + 0xbe, 0xb3, 0x11, 0x8d, 0x85, 0xc3, 0xb5, 0x6d, 0x96, 0xd2, 0xce, 0x18, 0xe9, 0xbc, 0xca, 0x99, + 0x5e, 0xed, 0xd6, 0x02, 0xef, 0xf8, 0x80, 0x42, 0x94, 0xc1, 0xe1, 0x27, 0xc3, 0x9d, 0xab, 0x9a, + 0x1e, 0xb2, 0x19, 0xfa, 0xa1, 0x58, 0x7b, 0x60, 0xc6, 0x75, 0x1c, 0xbd, 0xa2, 0xe3, 0xe5, 0xf0, + 0x80, 0xcf, 0xdf, 0x4f, 0xae, 0x79, 0x3d, 0xcb, 0x83, 0xf7, 0x8d, 0x91, 0x48, 0xef, 0x57, 0x27, + 0xb7, 0x22, 0x91, 0xe1, 0xdb, 0x6e, 0x9e, 0x34, 0xcb, 0x02, 0x57, 0x7e, 0x96, 0xb4, 0x83, 0x27, + 0x56, 0xae, 0x77, 0xdc, 0xf8, 0xfd, 0xaf, 0x8f, 0x9f, 0x03, 0xbe, 0x57, 0x92, 0x56, 0xf7, 0x5a, + 0xe2, 0x8c, 0xf7, 0xe0, 0xa4, 0xd2, 0x16, 0xad, 0x2b, 0x79, 0xdf, 0x8b, 0xc2, 0xe0, 0x9a, 0xc2, + 0x77, 0xc5, 0x00, 0x57, 0x70, 0x98, 0x0d, 0x39, 0x9c, 0x74, 0x0e, 0x92, 0x1f, 0xa5, 0x7a, 0x96, + 0x51, 0xdf, 0xbc, 0xd1, 0xc9, 0x8e, 0x99, 0x42, 0x7e, 0x16, 0x02, 0xb1, 0x09, 0xf9, 0x38, 0x5f, + 0x1a, 0xb8, 0x51, 0x2f, 0x77, 0xe9, 0x59, 0xe6, 0x64, 0xe1, 0xc8, 0xcf, 0x79, 0x80, 0xfa, 0xa0, + 0xb2, 0xb9, 0x62, 0x9c, 0xed, 0xdb, 0x60, 0x9e, 0xf5, 0xdb, 0x9f, 0x6f, 0x6f, 0x4d, 0x9a, 0x7d, + 0x0a, 0x61, 0x0a, 0x56, 0x1d, 0xb3, 0xd3, 0x6c, 0xfd, 0xcb, 0x38, 0xc3, 0xba, 0x75, 0xa0, 0x13, + 0xdb, 0xf3, 0xbb, 0xa4, 0x91, 0x0e, 0x8d, 0xe5, 0xa2, 0xbb, 0x09, 0x1b, 0x03, 0x64, 0x7f, 0x42, + 0x4d, 0x19, 0x02, 0x68, 0x6e, 0x70, 0x90, 0xb4, 0xd0, 0x4e, 0x01, 0x52, 0xeb, 0xd9, 0xd1, 0xc8, + 0x02, 0x52, 0x86, 0x73, 0xe9, 0x50, 0x53, 0xf3, 0x94, 0x23, 0x1b, 0xd5, 0x18, 0xdd, 0x82, 0xc7, + 0x08, 0x23, 0xc2, 0xb2, 0x46, 0x1d, 0x43, 0xb2, 0xe7, 0x89, 0x58, 0x5c, 0x60, 0x1d, 0x86, 0x9b, + 0x9b, 0xe7, 0xad, 0xd5, 0x26, 0x81, 0x7b, 0xa4, 0xf1, 0x44, 0x41, 0x22, 0x96, 0xda, 0x2f, 0x7b, + 0x4b, 0x3e, 0x4e, 0x61, 0xcc, 0xef, 0x49, 0xcd, 0x07, 0x1d, 0x7e, 0x61, 0x6e, 0x85, 0x24, 0x7a, + 0xa7, 0x2b, 0x48, 0xfa, 0x3b, 0x00, 0x51, 0x38, 0x2f, 0xe0, 0xfb, 0x9b, 0x3a, 0xe3, 0xdb, 0xe0, + 0x21, 0x8b, 0x53, 0xc1, 0xd9, 0x8a, 0x5d, 0xbe, 0x4e, 0x25, 0x1c, 0x0b, 0x88, 0xa0, 0x20, 0x58, + 0x90, 0xe1, 0x65, 0x4a, 0xbe, 0x29, 0xe3, 0x70, 0x90, 0x3f, 0x2f, 0x9b, 0x74, 0x4c, 0xc8, 0x7c, + 0xe3, 0x2d, 0xe7, 0x91, 0xa9, 0x68, 0x56, 0x71, 0x42, 0xe9, 0xf9, 0x2b, 0x7b, 0xdf, 0x40, 0xa9, + 0xf1, 0x0c, 0xc4, 0x6f, 0xec, 0xd0, 0x1f, 0x54, 0x41, 0x08, 0xe5, 0xbc, 0xf4, 0xc3, 0x37, 0x18, + 0x85, 0x47, 0x7c, 0x7f, 0xc3, 0x75, 0x8e, 0x86, 0x93, 0x14, 0xc9, 0xb5, 0xb9, 0x91, 0x6e, 0x75, + 0xc3, 0x9b, 0xbe, 0x99, 0x11, 0x61, 0x73, 0xe5, 0xd0, 0xa1, 0x7d, 0xfe, 0x48, 0xa7, 0x6d, 0x9e, + 0xd8, 0x77, 0xdb, 0x86, 0x8a, 0x47, 0x70, 0xfa, 0x07, 0x53, 0x38, 0xfd, 0xab, 0xe0, 0x07, 0xce, + 0x86, 0x50, 0x94, 0x7a, 0x45, 0x7e, 0x4c, 0xcd, 0x8e, 0x1f, 0x59, 0xe2, 0x46, 0x97, 0xdd, 0xfa, + 0xb1, 0x8a, 0x5f, 0x5d, 0xcc, 0xc5, 0xf4, 0x93, 0xf0, 0xe6, 0x1d, 0xf5, 0xaf, 0x07, 0x07, 0x62, + 0xc7, 0x1e, 0x98, 0x07, 0x93, 0x34, 0xca, 0x66, 0xbc, 0x2d, 0xb0, 0xa9, 0xbf, 0xf6, 0x03, 0xcb, + 0xb0, 0x3c, 0xa6, 0xa0, 0x13, 0x1d, 0x5f, 0x7a, 0xce, 0x63, 0xec, 0x58, 0x54, 0xbe, 0xea, 0x0f, + 0xd2, 0x27, 0x8d, 0xeb, 0x57, 0xff, 0xe6, 0xdd, 0x63, 0xc5, 0x49, 0xab, 0x58, 0xc4, 0x3c, 0xfe, + 0xf9, 0x4a, 0x77, 0x37, 0xb5, 0x72, 0x69, 0x81, 0xfb, 0x88, 0xa0, 0x6d, 0xf2, 0x95, 0xda, 0xea, + 0x23, 0x1b, 0xd6, 0x0e, 0x4a, 0xd8, 0x95, 0xbf, 0xd3, 0xbc, 0xbb, 0x06, 0x48, 0xd0, 0x82, 0xa6, + 0x88, 0x7b, 0x5d, 0xdc, 0x8b, 0x9d, 0x37, 0x35, 0x71, 0xa8, 0x91, 0x18, 0x05, 0x68, 0xbd, 0x18, + 0xe6, 0x9f, 0xf6, 0x21, 0x82, 0xd8, 0xc9, 0x5e, 0x2b, 0x0e, 0x8a, 0x0d, 0x78, 0x33, 0xb0, 0x3d, + 0x42, 0x87, 0x5c, 0x9e, 0x9e, 0x4e, 0xc0, 0x60, 0x06, 0xe4, 0xc3, 0x0e, 0x8e, 0xa4, 0xc7, 0xfa, + 0xe7, 0x73, 0x96, 0x28, 0x9e, 0x47, 0x20, 0x19, 0xaa, 0x60, 0x3c, 0xe3, 0x7d, 0xa2, 0x12, 0x08, + 0x27, 0x6e, 0x2b, 0x98, 0xe4, 0xa9, 0x51, 0x6e, 0xd7, 0x22, 0xa0, 0x0d, 0xe4, 0x40, 0x45, 0xeb, + 0xc3, 0x17, 0x7b, 0xd7, 0x41, 0x0a, 0xe6, 0xe7, 0xa5, 0xfd, 0x69, 0x83, 0xb9, 0xa0, 0x43, 0x1a, + 0x78, 0xf2, 0x8a, 0x83, 0xea, 0x6c, 0xd5, 0x00, 0x64, 0xa3, 0xaf, 0x23, 0x38, 0x75, 0x11, 0xd1, + 0x9c, 0x64, 0x3c, 0x1f, 0xa7, 0xc0, 0x80, 0xca, 0x31, 0xe1, 0x12, 0x87, 0xe7, 0x58, 0xb6, 0x1b, + 0x45, 0x08, 0x7f, 0x95, 0xcf, 0x68, 0x56, 0x9c, 0x64, 0xc5, 0x96, 0xe1, 0xc7, 0x27, 0xba, 0xf7, + 0xa3, 0xc3, 0x9d, 0xb5, 0xc8, 0x49, 0x95, 0x7e, 0xeb, 0xd3, 0x30, 0x3d, 0x95, 0x67, 0x2b, 0x1e, + 0x86, 0x0b, 0xa6, 0xa3, 0x2c, 0x59, 0xe2, 0x98, 0xe9, 0xee, 0x2e, 0xa8, 0x7a, 0xaa, 0xbc, 0x3d, + 0x2e, 0xc1, 0x03, 0x62, 0x06, 0x5a, 0x43, 0x1a, 0xa9, 0x10, 0x94, 0x40, 0xce, 0xa4, 0x7d, 0x23, + 0x34, 0xe4, 0x0c, 0x90, 0x4c, 0x0b, 0xd6, 0x5a, 0xf7, 0x75, 0x2d, 0xcd, 0xed, 0x4b, 0xea, 0x0d, + 0xc1, 0x93, 0xde, 0xdd, 0xc1, 0xdc, 0x06, 0x30, 0x1c, 0xdc, 0x50, 0xe4, 0x43, 0x06, 0x9a, 0x83, + 0x2f, 0xf4, 0x91, 0x67, 0x1f, 0x7a, 0x1c, 0xea, 0xcb, 0x8d, 0x16, 0x74, 0x33, 0xe5, 0x98, 0x4b, + 0x55, 0x10, 0x44, 0xb4, 0xc1, 0x4b, 0xc4, 0x55, 0x6b, 0xb0, 0xfc, 0xde, 0xde, 0x1f, 0x24, 0xdd, + 0x18, 0xee, 0x1e, 0x6f, 0x7a, 0x7f, 0x0e, 0x93, 0x5f, 0xf0, 0x4e, 0x9f, 0x32, 0x65, 0x48, 0x2c, + 0x7c, 0x73, 0xbb, 0x64, 0xeb, 0x88, 0x13, 0x82, 0x8b, 0x5b, 0x6c, 0x46, 0x35, 0xbd, 0x60, 0xcf, + 0xd2, 0x1c, 0x11, 0x98, 0x38, 0xad, 0xb5, 0x73, 0x27, 0xfa, 0xa5, 0x7d, 0xe7, 0xa3, 0xdc, 0x13, + 0xb8, 0xeb, 0x58, 0xb1, 0x54, 0xe0, 0x30, 0x64, 0x1a, 0xf7, 0xd0, 0x48, 0x05, 0x33, 0x74, 0x12, + 0xc9, 0x08, 0x16, 0xab, 0x34, 0xda, 0x44, 0x06, 0xe5, 0x33, 0x8a, 0xff, 0xfe, 0xd9, 0x6c, 0x40, + 0xbe, 0x19, 0x7d, 0x3e, 0x12, 0x81, 0x8d, 0xa9, 0x41, 0x63, 0x8a, 0x44, 0x62, 0x43, 0x8a, 0x2f, + 0x67, 0x7e, 0x81, 0x62, 0x3e, 0xba, 0x62, 0xee, 0x05, 0xad, 0x9f, 0x3b, 0x6b, 0x86, 0x64, 0xc7, + 0x1d, 0x5e, 0xae, 0x91, 0x97, 0xf0, 0x4c, 0xf2, 0x08, 0x29, 0xce, 0x5a, 0x6f, 0x09, 0x79, 0xf1, + 0x56, 0x24, 0xb5, 0xbc, 0xdc, 0x58, 0x79, 0x5f, 0x0a, 0xec, 0x13, 0x80, 0xd7, 0x1e, 0x67, 0x52, + 0xf2, 0x49, 0x0d, 0xfd, 0xbe, 0xcd, 0x68, 0xa3, 0x59, 0x14, 0xc3, 0xfa, 0x41, 0x4d, 0x8c, 0xb4, + 0x80, 0x99, 0xa5, 0xe1, 0x5e, 0x1e, 0xba, 0xcf, 0x8b, 0x71, 0xdf, 0x8d, 0xee, 0xfe, 0x4a, 0xe3, + 0x11, 0xfd, 0x77, 0x1a, 0x58, 0x13, 0x96, 0xd8, 0x64, 0x82, 0xa5, 0xba, 0xb1, 0x19, 0xfc, 0xcb, + 0x46, 0xc5, 0x9c, 0x5d, 0x1d, 0x22, 0xbf, 0x8d, 0x2f, 0x3f, 0xc0, 0x1e, 0x5f, 0x2c, 0x33, 0x3e, + 0x15, 0x68, 0xb6, 0xe7, 0xa3, 0x38, 0x6d, 0x6a, 0x1d, 0x01, 0x39, 0xb5, 0x0e, 0x71, 0xf9, 0xc0, + 0x0c, 0x07, 0x8f, 0xb1, 0x76, 0x98, 0xbe, 0xe4, 0x74, 0xd2, 0x6c, 0x3c, 0x89, 0xf9, 0xa1, 0x23, + 0x50, 0xed, 0x35, 0x1e, 0xa8, 0x4c, 0x09, 0xec, 0x64, 0xf9, 0x90, 0xf0, 0xcb, 0xc3, 0x34, 0x34, + 0x4d, 0x8d, 0xb0, 0xb4, 0xd8, 0x33, 0x54, 0x41, 0x25, 0x37, 0x5a, 0x30, 0x66, 0x6a, 0x3d, 0xad, + 0x4d, 0xe5, 0xb7, 0xc7, 0xc4, 0x0e, 0x62, 0x95, 0x9b, 0x6a, 0x5e, 0x7e, 0x7f, 0xb0, 0x37, 0x3e, + 0x08, 0x49, 0xc8, 0x8c, 0x8b, 0x80, 0x4c, 0x34, 0x23, 0xd4, 0xb3, 0x0a, 0xae, 0x21, 0xd4, 0xee, + 0x79, 0x19, 0x11, 0x8d, 0x87, 0x9a, 0x0b, 0xaf, 0x55, 0x51, 0x20, 0xce, 0xec, 0xb0, 0xb5, 0x94, + 0x09, 0xe9, 0xdd, 0xec, 0x94, 0x93, 0x47, 0xdb, 0x4d, 0xe0, 0x38, 0xe8, 0xf6, 0x0f, 0xdd, 0x62, + 0x14, 0xac, 0x8b, 0x87, 0x86, 0x22, 0x08, 0x42, 0xa7, 0x3a, 0x5a, 0xd3, 0x38, 0xdf, 0xc4, 0xa9, + 0xed, 0xf8, 0x89, 0x68, 0x9a, 0xb9, 0x62, 0xbd, 0xe6, 0x4b, 0x7a, 0xdd, 0x8f, 0x34, 0xa2, 0x58, + 0xb1, 0x19, 0x42, 0x3d, 0xac, 0xd4, 0xb0, 0x0c, 0x91, 0x66, 0x4a, 0x43, 0x29, 0x4f, 0x2b, 0x34, + 0xfe, 0x20, 0x86, 0x24, 0x8f, 0xdd, 0xdf, 0x22, 0x11, 0xbc, 0x82, 0xc7, 0x7d, 0x36, 0xcb, 0x4a, + 0x0c, 0x99, 0xd7, 0x44, 0xa8, 0xad, 0x5d, 0x95, 0x49, 0x89, 0x7c, 0x1c, 0xb8, 0xdf, 0x8a, 0x1f, + 0x33, 0x2e, 0x96, 0x16, 0x63, 0x78, 0xeb, 0xcb, 0x11, 0xab, 0x6e, 0x24, 0xd3, 0xc8, 0x8b, 0x7e, + 0x83, 0x7a, 0xa8, 0x2e, 0x90, 0xfe, 0x7d, 0x3f, 0x27, 0x68, 0x07, 0xd6, 0xc4, 0xb2, 0x0f, 0x3a, + 0x85, 0x52, 0xfd, 0x9f, 0xc1, 0x6d, 0x47, 0x2a, 0xf8, 0xc6, 0x10, 0x11, 0x5a, 0xfd, 0x10, 0x2d, + 0xbb, 0x72, 0x88, 0x9d, 0x63, 0x91, 0xaf, 0xc6, 0xb0, 0x3e, 0x5c, 0xcf, 0x03, 0x54, 0x2b, 0xde, + 0x1e, 0xdd, 0x6d, 0xac, 0x9d, 0x9b, 0x66, 0xa6, 0x01, 0x2d, 0x3e, 0xca, 0x8c, 0x8f, 0x21, 0x8f, + 0x7e, 0xd6, 0x3e, 0x28, 0xd5, 0xe1, 0xe7, 0x13, 0x9b, 0xcc, 0xe5, 0x80, 0xa3, 0x3e, 0x24, 0x1e, + 0x12, 0xa5, 0x5c, 0x1b, 0xfb, 0xdc, 0x6e, 0xfa, 0x71, 0xb0, 0x41, 0x38, 0x06, 0xd8, 0x8b, 0x65, + 0x5e, 0x83, 0xde, 0x80, 0x24, 0x88, 0x56, 0xd6, 0x4a, 0x98, 0xb3, 0x3d, 0x49, 0x41, 0xe2, 0xe6, + 0x23, 0xf2, 0x13, 0x3b, 0x3e, 0x68, 0x32, 0x86, 0x88, 0x24, 0xf9, 0xe5, 0xe2, 0xa2, 0xb4, 0x96, + 0x3a, 0xeb, 0x06, 0xb2, 0xea, 0x75, 0x27, 0x27, 0xa1, 0x77, 0x8a, 0x65, 0x4f, 0x8d, 0x8b, 0xf7, + 0x23, 0x67, 0xce, 0x21, 0x80, 0x8c, 0x99, 0x5e, 0x7b, 0xfd, 0x03, 0x5f, 0x67, 0x3a, 0xac, 0x3b, + 0x99, 0x0d, 0x26, 0xd1, 0x95, 0x94, 0xb7, 0x6c, 0xc4, 0x9d, 0x2c, 0x93, 0x7b, 0xea, 0x44, 0xcb, + 0x58, 0x74, 0x58, 0x7b, 0x2f, 0xe0, 0xd2, 0xa3, 0xe8, 0x6e, 0x17, 0x8b, 0xb2, 0x97, 0xfb, 0xed, + 0x5c, 0x14, 0xce, 0x31, 0xca, 0x1c, 0x4c, 0xd5, 0xaf, 0xd1, 0xdd, 0x56, 0xa4, 0xa1, 0x49, 0xc2, + 0x9a, 0x40, 0x54, 0x3d, 0x6d, 0x0a, 0x9f, 0x9b, 0xd3, 0x66, 0x67, 0x51, 0x73, 0x16, 0x05, 0x15, + 0x52, 0xbc, 0xac, 0xc5, 0x9f, 0xda, 0x93, 0x41, 0x25, 0xbf, 0x61, 0x93, 0x1e, 0x14, 0xc7, 0xc6, + 0x1a, 0x0d, 0xb3, 0x38, 0x0d, 0x1f, 0x7f, 0x99, 0xac, 0x09, 0xbf, 0x6d, 0x62, 0x0f, 0x28, 0x14, + 0x69, 0x13, 0xa2, 0xd7, 0xcd, 0xfe, 0x98, 0x4e, 0xb2, 0x8d, 0x0e, 0xe9, 0xac, 0xd4, 0x2f, 0xe2, + 0x74, 0x40, 0x9b, 0x79, 0x7b, 0x9d, 0xf5, 0x8f, 0x0e, 0xfb, 0xbe, 0x75, 0xdb, 0x5b, 0x7f, 0x06, + 0x9d, 0x8e, 0x7d, 0x91, 0x3b, 0x53, 0x13, 0xd3, 0xa6, 0xdc, 0x2e, 0x1e, 0xa4, 0x96, 0xf4, 0xf5, + 0xd6, 0xce, 0xda, 0xb8, 0x47, 0x53, 0x68, 0x7d, 0x1d, 0x7e, 0x70, 0x1c, 0xf9, 0xf0, 0xcd, 0x50, + 0xa8, 0x71, 0x61, 0xe7, 0xaf, 0xc5, 0xf7, 0x8f, 0x31, 0xdd, 0x9e, 0x97, 0x96, 0xf1, 0x27, 0x31, + 0x73, 0x34, 0x32, 0x0d, 0x7f, 0xa4, 0xf9, 0xdd, 0xf6, 0xe1, 0x80, 0x86, 0xa3, 0x68, 0xd9, 0xfb, + 0x04, 0x14, 0x1a, 0x1d, 0x4d, 0xdc, 0x43, 0x98, 0x4d, 0x26, 0xbb, 0xaf, 0x94, 0x09, 0x0b, 0xd6, + 0xb4, 0xde, 0x47, 0x67, 0x3f, 0x2a, 0x49, 0xbc, 0xf9, 0x2f, 0xbb, 0x65, 0x7d, 0xee, 0x6f, 0x42, + 0xbf, 0xb9, 0xf1, 0xe7, 0x7b, 0xa0, 0x7f, 0x12, 0x07, 0x03, 0xd9, 0xd1, 0xce, 0x7b, 0x5d, 0xa9, + 0x29, 0xa6, 0x13, 0xde, 0x3f, 0xc7, 0xbe, 0x07, 0x48, 0x1c, 0x7d, 0x70, 0xd7, 0x0e, 0x1c, 0x4e, + 0x40, 0x7a, 0x18, 0x75, 0xbf, 0x8a, 0x13, 0xd1, 0xe3, 0x43, 0x76, 0x06, 0xcb, 0x22, 0xed, 0xd2, + 0x22, 0x3e, 0x98, 0x42, 0xa2, 0x62, 0x0c, 0x0d, 0x10, 0x9f, 0xbf, 0x71, 0x8c, 0x39, 0xff, 0x66, + 0x26, 0xb5, 0xe7, 0x33, 0xd8, 0x28, 0x7a, 0x79, 0x52, 0xc6, 0xaa, 0xda, 0xa1, 0x61, 0xc0, 0xac, + 0x11, 0xb6, 0x1e, 0xe9, 0x1b, 0x64, 0x65, 0xa6, 0xbd, 0xa2, 0xd1, 0x8a, 0x0d, 0x40, 0xe5, 0x02, + 0x4a, 0x24, 0xf5, 0x1b, 0x4e, 0x1d, 0xd8, 0xf3, 0x0f, 0x6b, 0x3a, 0x1a, 0x33, 0x67, 0x0b, 0x2c, + 0xae, 0x20, 0x80, 0xde, 0xe9, 0x1e, 0x6a, 0x07, 0x5c, 0xc8, 0x58, 0x43, 0x51, 0x8f, 0x99, 0xc7, + 0x16, 0xbf, 0x3e, 0x56, 0x76, 0xf5, 0x27, 0xc6, 0x2a, 0xf5, 0x8e, 0xa3, 0xa3, 0xbd, 0xe8, 0xc9, + 0xde, 0x2f, 0x6e, 0xf1, 0x48, 0xde, 0x37, 0x1a, 0x79, 0x17, 0x21, 0xd7, 0x94, 0x4c, 0x3e, 0x94, + 0x72, 0xb0, 0xf6, 0xc0, 0x37, 0x17, 0x76, 0x09, 0x88, 0xf2, 0x64, 0x9f, 0x78, 0x7d, 0x65, 0x4c, + 0x21, 0xdd, 0xd4, 0xb1, 0x5c, 0x54, 0x29, 0x71, 0xb7, 0x94, 0x45, 0xcb, 0xf3, 0xbe, 0xcd, 0xef, + 0xc6, 0x75, 0x4d, 0x1d, 0x25, 0xda, 0x97, 0x31, 0x58, 0xcb, 0xc1, 0xdd, 0x03, 0x5c, 0x55, 0x30, + 0x0d, 0x49, 0xa6, 0x58, 0xd8, 0x97, 0x79, 0x3e, 0x43, 0x5d, 0xc4, 0xae, 0x9c, 0x37, 0xb6, 0x7d, + 0x59, 0x27, 0x3a, 0xc7, 0x32, 0x14, 0x93, 0xe5, 0xb0, 0x38, 0x51, 0xdb, 0xb9, 0x2e, 0x85, 0xd8, + 0xc8, 0xe0, 0xf6, 0xac, 0x0a, 0xd1, 0xdb, 0x98, 0x73, 0x39, 0xd9, 0x2b, 0xe6, 0xd7, 0xfc, 0xc5, + 0x6e, 0x38, 0xd5, 0x3e, 0xa4, 0xd0, 0x9d, 0x04, 0x97, 0x73, 0x09, 0xdd, 0x8e, 0xc2, 0x8f, 0xf5, + 0x9a, 0xf2, 0x08, 0xbf, 0xea, 0x13, 0x2c, 0x7d, 0x72, 0x56, 0x39, 0x42, 0x26, 0xc5, 0xa9, 0xf3, + 0xaf, 0x0c, 0xa9, 0xc0, 0x87, 0x1f, 0x6c, 0x3a, 0x73, 0x5e, 0xb9, 0x7c, 0x3f, 0xb4, 0xac, 0x4f, + 0xb1, 0xd9, 0x3f, 0x3d, 0x4b, 0x4e, 0xa9, 0x89, 0x44, 0x81, 0x63, 0xaa, 0x93, 0x0f, 0xa2, 0xfd, + 0x18, 0x58, 0x94, 0xc4, 0xd6, 0xde, 0x12, 0x5e, 0xbc, 0x05, 0xd9, 0x6f, 0x21, 0x61, 0x09, 0x76, + 0xa5, 0x95, 0xe9, 0x49, 0x40, 0xc8, 0xaa, 0x2e, 0xd4, 0xae, 0x72, 0x4e, 0x93, 0x00, 0x11, 0x7d, + 0x2c, 0x27, 0x97, 0x56, 0xa9, 0x59, 0x94, 0xe8, 0x39, 0x9f, 0x05, 0xcf, 0x28, 0x2b, 0x0e, 0x5a, + 0x7e, 0x4d, 0x30, 0xc3, 0xaf, 0xa1, 0xf4, 0xda, 0xc4, 0xdc, 0x70, 0xf9, 0xed, 0xbd, 0xff, 0x24, + 0xe2, 0x2b, 0x99, 0x38, 0xb2, 0xa1, 0xe8, 0x25, 0xf3, 0x2b, 0x39, 0x5b, 0xe2, 0xce, 0x30, 0x05, + 0xc6, 0x3d, 0x9b, 0x19, 0x16, 0xb5, 0xbf, 0xb8, 0x49, 0xd7, 0xea, 0x9b, 0x9a, 0x9a, 0x87, 0xe1, + 0x78, 0xc5, 0x00, 0x06, 0x1a, 0xd7, 0x0c, 0xb1, 0xd9, 0xa0, 0xfc, 0x2c, 0xc5, 0x65, 0x59, 0x45, + 0x73, 0xf3, 0xc9, 0x67, 0xed, 0x67, 0x4c, 0x30, 0x15, 0xea, 0xc5, 0x6a, 0x72, 0xc7, 0x12, 0x1f, + 0x73, 0x77, 0xf1, 0x15, 0x42, 0x32, 0x15, 0xd4, 0xe1, 0x08, 0xe2, 0xc5, 0xde, 0x84, 0xec, 0x2d, + 0xe1, 0xa2, 0xc6, 0xc8, 0x1c, 0x64, 0xbb, 0x4a, 0x8c, 0x35, 0x55, 0xff, 0xcc, 0x54, 0x29, 0xc5, + 0x7f, 0xdf, 0x99, 0x8e, 0xa2, 0x77, 0x45, 0x0f, 0x3a, 0xbf, 0xf3, 0xf0, 0x20, 0x19, 0x3b, 0x8e, + 0x1a, 0x1d, 0x55, 0x84, 0x2e, 0x33, 0x6d, 0x53, 0x7d, 0x90, 0xa6, 0xf4, 0x34, 0x29, 0xb0, 0x70, + 0x0b, 0xd3, 0xd4, 0x8b, 0xa0, 0x09, 0x86, 0xa2, 0x60, 0x56, 0x8b, 0xe9, 0x29, 0xc1, 0xd1, 0x19, + 0x71, 0xec, 0x79, 0x9c, 0x04, 0x25, 0x04, 0x5b, 0x59, 0xc0, 0xf9, 0x36, 0x93, 0xe6, 0x01, 0x58, + 0x45, 0x69, 0x6a, 0x60, 0x72, 0xfe, 0x14, 0x2e, 0x1c, 0x60, 0xdc, 0xf1, 0x20, 0x99, 0xa6, 0x1c, + 0x5a, 0xe9, 0x9f, 0x0f, 0x1e, 0x08, 0xb8, 0x00, 0xaf, 0x44, 0x65, 0xcb, 0xb7, 0x79, 0x7b, 0x6e, + 0x14, 0x37, 0x65, 0x09, 0xe3, 0xa8, 0xc6, 0x19, 0xbd, 0xfe, 0x8e, 0x0d, 0x29, 0x8b, 0x57, 0xeb, + 0xa2, 0x80, 0xd4, 0x61, 0x4f, 0xf5, 0x52, 0xf0, 0xef, 0x3a, 0x11, 0xd2, 0x02, 0xc9, 0xd2, 0x47, + 0x0f, 0x62, 0x95, 0x0e, 0xc1, 0x43, 0xcc, 0x27, 0x3a, 0x94, 0x5b, 0x14, 0x5a, 0x5b, 0xb9, 0xb2, + 0xa3, 0x51, 0xf8, 0x5e, 0x55, 0x5d, 0x9e, 0x51, 0x6d, 0x2b, 0x64, 0x85, 0xe9, 0xb6, 0xfa, 0xc3, + 0x81, 0x44, 0x33, 0x1a, 0x3d, 0x23, 0x58, 0x97, 0xc1, 0xa5, 0xee, 0xa1, 0x14, 0xcd, 0xfb, 0x7b, + 0xad, 0x2c, 0x76, 0x5a, 0x1b, 0xaa, 0x14, 0x3e, 0x68, 0x91, 0xc4, 0x34, 0x2e, 0xdf, 0xf1, 0xe7, + 0xae, 0xe6, 0x76, 0xb6, 0x8a, 0x49, 0x8b, 0x78, 0xe7, 0xd7, 0x78, 0x1c, 0x74, 0xde, 0x74, 0xbb, + 0x33, 0x28, 0x97, 0x1e, 0xce, 0x2d, 0xfa, 0xb9, 0x9e, 0xc7, 0xd1, 0xa6, 0x89, 0xfb, 0xf9, 0x3a, + 0x75, 0xcf, 0xc0, 0x68, 0x2f, 0xf9, 0x4b, 0xa2, 0x8f, 0xea, 0x91, 0xe2, 0x78, 0x14, 0x9c, 0x86, + 0x48, 0xd3, 0x49, 0x30, 0xa7, 0x5a, 0x51, 0x12, 0x4e, 0x8d, 0x4a, 0xb5, 0x6c, 0x7f, 0xca, 0x04, + 0x53, 0x1c, 0x0f, 0x06, 0x8f, 0x60, 0xbc, 0x90, 0xcd, 0xd3, 0x33, 0x68, 0x45, 0xc4, 0x4a, 0x48, + 0x43, 0x92, 0x52, 0xa1, 0xda, 0x6c, 0x6c, 0x59, 0x7a, 0x69, 0xd8, 0xa8, 0x77, 0xa5, 0x0e, 0x0b, + 0xc0, 0x85, 0x64, 0xdd, 0xa1, 0x49, 0xbb, 0x29, 0x72, 0x50, 0xba, 0x24, 0x39, 0xb1, 0x40, 0x27, + 0xc5, 0x3a, 0xd8, 0xde, 0x6c, 0x76, 0x37, 0x35, 0x5c, 0xdc, 0xce, 0x4c, 0x2b, 0x67, 0x66, 0xaf, + 0x53, 0xcf, 0x37, 0x1d, 0x6c, 0x5f, 0x09, 0x27, 0xbc, 0xca, 0xfb, 0x93, 0xe7, 0x80, 0xf1, 0x07, + 0x29, 0x19, 0x98, 0x17, 0x62, 0x8c, 0x44, 0x26, 0xc4, 0xff, 0x25, 0xa7, 0xe5, 0x4b, 0xc7, 0x8e, + 0xfa, 0x30, 0x38, 0xf4, 0x69, 0x44, 0xfe, 0xbd, 0x08, 0xba, 0xa0, 0x28, 0x7c, 0xd1, 0x61, 0x68, + 0x41, 0x48, 0x0d, 0x1f, 0x22, 0x99, 0x05, 0xa3, 0x73, 0x4a, 0x21, 0xc7, 0x53, 0x4a, 0xcb, 0x7a, + 0xe1, 0xb3, 0xe0, 0xb9, 0x91, 0x14, 0x9a, 0x49, 0x31, 0xac, 0xc7, 0xc8, 0xc0, 0xdb, 0x9e, 0x7f, + 0x2f, 0xde, 0xbc, 0xf3, 0x67, 0x29, 0x51, 0xad, 0xe2, 0x45, 0x53, 0x29, 0x7e, 0xd0, 0x83, 0xb1, + 0x3b, 0xc1, 0xaf, 0x39, 0x78, 0x62, 0x8a, 0x67, 0x0a, 0x8b, 0x20, 0x86, 0xba, 0xd5, 0xf4, 0xa6, + 0xec, 0xfb, 0x0d, 0xc4, 0x29, 0xaa, 0x22, 0x95, 0x66, 0x6e, 0x00, 0x87, 0xb1, 0xe5, 0x19, 0xfb, + 0x5d, 0x78, 0x5c, 0x87, 0xc0, 0x38, 0x25, 0x7d, 0xf9, 0x57, 0x5a, 0xeb, 0xa8, 0xff, 0xe9, 0xa2, + 0x40, 0x09, 0x7e, 0xb5, 0x39, 0x7a, 0x11, 0x8c, 0x15, 0xc6, 0xbe, 0x07, 0x6c, 0xa0, 0xf8, 0x1b, + 0xb2, 0xc4, 0x1b, 0x57, 0x65, 0xfd, 0xa9, 0xba, 0x79, 0x52, 0xe6, 0xd4, 0x26, 0xec, 0xcd, 0x4f, + 0x36, 0x71, 0x93, 0x15, 0x2c, 0xc1, 0xf4, 0xd5, 0x76, 0x78, 0x7d, 0xbb, 0x38, 0x54, 0x5b, 0x5f, + 0xa1, 0xfb, 0x56, 0x72, 0x63, 0xa5, 0x93, 0x4b, 0xb5, 0x1f, 0x20, 0xd7, 0x2f, 0x14, 0x9d, 0xd1, + 0xda, 0x79, 0xec, 0x1d, 0x10, 0xfc, 0xce, 0xb5, 0xf7, 0xf6, 0x5f, 0x4e, 0xe2, 0x4e, 0x49, 0x47, + 0xba, 0x53, 0x93, 0x2b, 0x9c, 0xf9, 0x28, 0x28, 0xa1, 0x48, 0xa6, 0x11, 0x15, 0xf0, 0x59, 0x1e, + 0xd0, 0x8e, 0x6b, 0xb8, 0xc2, 0x9c, 0xfc, 0xed, 0x2b, 0xd2, 0x29, 0x71, 0x12, 0xc1, 0x14, 0x5b, + 0xda, 0x58, 0x4c, 0x17, 0xed, 0xd7, 0x76, 0x8f, 0x6e, 0x6c, 0x07, 0xc2, 0xf4, 0xc9, 0xa3, 0x4b, + 0x01, 0xef, 0x64, 0x81, 0xee, 0x18, 0x39, 0x4a, 0x2d, 0x8b, 0xb5, 0x4d, 0x01, 0x6a, 0xe4, 0x67, + 0x32, 0xcd, 0x9d, 0x56, 0x2c, 0x22, 0x51, 0x50, 0x1c, 0xb7, 0xbb, 0x9d, 0x5d, 0x43, 0x8f, 0x6d, + 0x9f, 0xb9, 0x5b, 0xa7, 0x7c, 0xe6, 0xf0, 0xc3, 0x48, 0x8b, 0x03, 0xdc, 0xec, 0x85, 0x03, 0xe0, + 0xae, 0x32, 0x77, 0x97, 0xb8, 0x55, 0xde, 0x33, 0xc8, 0x48, 0x1f, 0x95, 0xa5, 0xca, 0x94, 0x32, + 0xe7, 0x71, 0xe4, 0xb6, 0x41, 0xe1, 0x0a, 0x68, 0x3e, 0xc2, 0xb6, 0x3e, 0xda, 0xf4, 0x73, 0x33, + 0x48, 0x88, 0x9e, 0x19, 0x3d, 0x64, 0x68, 0xaf, 0x3a, 0x54, 0x53, 0x6d, 0xe1, 0x06, 0x8f, 0xde, + 0xdc, 0xfe, 0xb0, 0xec, 0xc8, 0xde, 0x18, 0x4c, 0x1d, 0x5b, 0x20, 0x1a, 0x52, 0xdf, 0xfb, 0x00, + 0x28, 0x79, 0xe3, 0x14, 0x25, 0x97, 0xfa, 0x75, 0x46, 0xc4, 0x2c, 0xf4, 0xb0, 0xb5, 0x75, 0xed, + 0xda, 0xb9, 0x6a, 0x2d, 0xb1, 0x87, 0x77, 0x32, 0x86, 0xdf, 0x1f, 0x98, 0xba, 0x5e, 0xbd, 0xa4, + 0x19, 0xcb, 0x6a, 0xab, 0x02, 0xcd, 0xe9, 0x1c, 0x5b, 0x6b, 0xcb, 0x96, 0x12, 0xb7, 0x49, 0xa3, + 0xc6, 0x21, 0x43, 0x83, 0x89, 0x8f, 0xc1, 0xfd, 0xf7, 0xfc, 0xc5, 0x02, 0xa0, 0x8f, 0xca, 0x8a, + 0xb4, 0x2e, 0x51, 0x77, 0xaa, 0x7c, 0xc6, 0xa7, 0x6a, 0x6d, 0x5e, 0x43, 0x17, 0x8a, 0xf0, 0x47, + 0x98, 0x86, 0xb6, 0xbd, 0x6f, 0x36, 0x78, 0xb7, 0x64, 0xce, 0x66, 0xe5, 0xd9, 0x95, 0xf3, 0x46, + 0xcb, 0xd2, 0xe3, 0x3f, 0x1e, 0xcb, 0xee, 0x15, 0x9b, 0xdd, 0x2c, 0xaa, 0x96, 0x3c, 0x81, 0x16, + 0xf1, 0x0e, 0x4c, 0x0d, 0x65, 0xbf, 0x01, 0x25, 0x43, 0x62, 0x6d, 0xd7, 0x30, 0x31, 0x53, 0xe3, + 0x1d, 0x34, 0xa2, 0x91, 0x1f, 0xa6, 0x7f, 0xf4, 0xb4, 0xdb, 0x3a, 0x21, 0x9a, 0x4d, 0xa5, 0x76, + 0x2d, 0xdc, 0xa3, 0x06, 0x3f, 0xd8, 0x5b, 0xa0, 0x14, 0x78, 0x17, 0x06, 0x18, 0x13, 0xa2, 0xb7, + 0x4d, 0xab, 0x24, 0x35, 0xab, 0xb0, 0x6f, 0xc4, 0x13, 0xd8, 0x12, 0xc0, 0xcb, 0x29, 0x7b, 0x51, + 0xcb, 0xaf, 0x39, 0xd1, 0x3a, 0x32, 0x7f, 0x82, 0x42, 0x1c, 0xfb, 0xf3, 0xe7, 0x71, 0x8c, 0xa9, + 0xce, 0x69, 0xdc, 0x86, 0x73, 0x90, 0xdd, 0xb3, 0x32, 0x73, 0x94, 0xae, 0x13, 0x31, 0xb2, 0x5c, + 0x22, 0xaf, 0x72, 0xb6, 0x37, 0x28, 0xa3, 0x04, 0x83, 0x12, 0x03, 0x68, 0x0e, 0xa1, 0x97, 0xcc, + 0xca, 0x58, 0x4f, 0x33, 0x03, 0xa4, 0x67, 0xfe, 0x94, 0xb0, 0x48, 0x06, 0xbe, 0x8f, 0x53, 0x5c, + 0x87, 0x39, 0xa2, 0xc1, 0x4e, 0xbc, 0xaa, 0x86, 0xca, 0xf0, 0x17, 0x0a, 0x3a, 0x67, 0x3e, 0xe9, + 0x8a, 0xb1, 0x6a, 0x4a, 0x05, 0x05, 0x5d, 0x20, 0xd3, 0x26, 0xf7, 0x3a, 0xbf, 0xdf, 0xa0, 0xb6, + 0xb6, 0x9c, 0x99, 0x82, 0xdb, 0xdd, 0xde, 0xf2, 0x0d, 0xae, 0x27, 0xc5, 0xcf, 0xb5, 0xd6, 0x98, + 0xd2, 0xe9, 0x92, 0x5a, 0x96, 0xab, 0x1c, 0x35, 0x5a, 0x9d, 0x0b, 0xbd, 0xa7, 0xc6, 0xda, 0x8b, + 0x11, 0x1a, 0x47, 0xd4, 0x7b, 0xdc, 0xa2, 0xba, 0x36, 0x8a, 0x32, 0xe7, 0x95, 0xe3, 0x8b, 0x72, + 0x1f, 0x1e, 0xca, 0x38, 0x0c, 0xda, 0x9b, 0x99, 0xb6, 0x59, 0x84, 0x0d, 0xe9, 0xb6, 0x5f, 0x16, + 0x28, 0xdf, 0xad, 0x66, 0x46, 0xf9, 0x66, 0x14, 0x2a, 0xe2, 0x7b, 0x40, 0xd1, 0xcb, 0xd4, 0x0e, + 0x1d, 0x22, 0x65, 0xeb, 0xcc, 0x4e, 0xec, 0x2b, 0x47, 0x0c, 0x46, 0xc5, 0x8c, 0x62, 0x0e, 0x82, + 0x4f, 0x88, 0x5b, 0x99, 0x77, 0x2f, 0x5d, 0xbd, 0x65, 0xf1, 0xd9, 0xeb, 0x85, 0x89, 0x4d, 0xd9, + 0xd3, 0x8b, 0x4e, 0x8f, 0x7a, 0xa8, 0x7f, 0xa9, 0xdc, 0x3c, 0xfc, 0x43, 0xaf, 0xc8, 0xbc, 0x9d, + 0xff, 0x44, 0x42, 0x18, 0xf1, 0x74, 0x20, 0xfe, 0xb7, 0x95, 0x5e, 0x28, 0x22, 0x20, 0xd6, 0x24, + 0x55, 0xd5, 0x12, 0xbd, 0xa2, 0x11, 0xa4, 0xee, 0xfb, 0x28, 0x22, 0x08, 0x76, 0x47, 0xa1, 0xda, + 0x0a, 0x5a, 0xf3, 0xef, 0x87, 0xfb, 0xa6, 0x89, 0x78, 0x2a, 0xe1, 0x66, 0xa1, 0x76, 0x79, 0x2d, + 0x00, 0x29, 0x67, 0x1b, 0x87, 0x17, 0xd7, 0xf8, 0x30, 0x3e, 0x71, 0x66, 0xa9, 0xfd, 0x52, 0x8d, + 0xdd, 0xc4, 0xed, 0x2a, 0xbb, 0x32, 0x05, 0x7a, 0xba, 0x82, 0x7e, 0xcc, 0xaf, 0xb3, 0xd7, 0xd3, + 0x4e, 0x95, 0xa0, 0xc7, 0x05, 0xdc, 0x6b, 0xa5, 0x56, 0x81, 0x82, 0xd3, 0x61, 0xe8, 0x50, 0xca, + 0xaa, 0x51, 0xfd, 0x78, 0x38, 0x44, 0x25, 0x3a, 0xbd, 0xcd, 0x97, 0x35, 0x01, 0x1f, 0x40, 0x60, + 0xb4, 0x7d, 0x58, 0xdb, 0x3c, 0x91, 0x26, 0xc1, 0xeb, 0xd3, 0x70, 0x9e, 0x49, 0x1b, 0x7a, 0x8d, + 0x9f, 0x7c, 0x8f, 0x31, 0xeb, 0x03, 0xd1, 0x13, 0x98, 0xaf, 0x3c, 0x4a, 0x67, 0xae, 0xf5, 0x02, + 0x67, 0xd8, 0x68, 0x19, 0xea, 0xf2, 0xb7, 0x1f, 0xe4, 0x8f, 0xa5, 0xd0, 0x42, 0x6c, 0xaf, 0xe9, + 0xe6, 0x5b, 0xd2, 0x6a, 0x73, 0x6a, 0x83, 0x44, 0xcb, 0x8b, 0xec, 0x6a, 0x2f, 0xd9, 0x84, 0x52, + 0xe7, 0xbd, 0x79, 0xcb, 0xb1, 0x02, 0x9e, 0xe5, 0x8f, 0xb7, 0x52, 0x09, 0x8e, 0xc9, 0x7f, 0x0b, + 0x44, 0x99, 0x08, 0xcc, 0xdc, 0x2a, 0x74, 0xba, 0x77, 0x19, 0x11, 0xeb, 0x93, 0x0d, 0xa0, 0x91, + 0xa4, 0xd8, 0x23, 0x3f, 0x10, 0x0e, 0xe8, 0xe8, 0xf7, 0xe0, 0xb4, 0x7a, 0x23, 0x56, 0xc2, 0x95, + 0xff, 0xa5, 0x6a, 0x93, 0x61, 0x10, 0x24, 0x8a, 0x33, 0x21, 0xa8, 0x46, 0x02, 0x0b, 0x47, 0x30, + 0xb3, 0x25, 0x82, 0xb1, 0x25, 0x37, 0x6b, 0x03, 0x1c, 0x89, 0x72, 0xea, 0x4d, 0x21, 0xb6, 0x68, + 0x35, 0x4a, 0x28, 0xf2, 0x7d, 0x69, 0x43, 0x7b, 0x86, 0xb8, 0x9c, 0x33, 0xdf, 0xae, 0xd4, 0x1f, + 0x7d, 0xbf, 0x90, 0xdd, 0xd7, 0x17, 0x3c, 0xef, 0x50, 0xe6, 0x2b, 0x10, 0x6b, 0x07, 0xae, 0x3b, + 0x38, 0x5d, 0xe7, 0xe3, 0xf2, 0x92, 0xab, 0xe6, 0xfb, 0x8d, 0x8c, 0x35, 0x9d, 0xf6, 0x6e, 0x16, + 0x26, 0x32, 0xe4, 0x12, 0x9f, 0x78, 0x95, 0xff, 0xd3, 0x19, 0xeb, 0xb2, 0x7f, 0x23, 0xc2, 0x15, + 0xae, 0x38, 0x0e, 0x52, 0xa5, 0x1e, 0x41, 0x2a, 0xba, 0x4d, 0x4e, 0xa4, 0x97, 0xad, 0xe3, 0xfd, + 0x18, 0xfd, 0xb3, 0xe2, 0xcc, 0x59, 0xe4, 0xd9, 0x78, 0x30, 0xf6, 0x3d, 0x4f, 0xcc, 0x0a, 0xb8, + 0x7c, 0x4e, 0x39, 0x4a, 0xda, 0x1f, 0x0e, 0x04, 0xb9, 0xaf, 0xcf, 0x6d, 0xab, 0xa2, 0x7e, 0xba, + 0xe3, 0x63, 0x37, 0x25, 0x77, 0x40, 0xd1, 0xe6, 0x36, 0x1d, 0xe2, 0x36, 0x96, 0x30, 0x83, 0x6b, + 0xa8, 0xd4, 0x79, 0xdf, 0x11, 0xb9, 0xa1, 0xa0, 0x45, 0x17, 0x8f, 0x7e, 0x2f, 0x05, 0xdc, 0x04, + 0x60, 0x90, 0xd5, 0xb2, 0xe3, 0xb0, 0x01, 0xf0, 0x2a, 0x88, 0xf6, 0xfe, 0xfe, 0x8c, 0xb7, 0x6c, + 0xa9, 0x0e, 0xdf, 0xb4, 0xb5, 0x19, 0xa3, 0x13, 0x8c, 0x45, 0xce, 0x39, 0x52, 0xeb, 0x17, 0x6d, + 0x96, 0xa8, 0xc4, 0xef, 0x35, 0x21, 0xcc, 0x2d, 0x69, 0x7d, 0xfc, 0x63, 0x7b, 0x97, 0xfb, 0x37, + 0x62, 0xbf, 0x58, 0xd3, 0x41, 0xfa, 0xb3, 0x3b, 0x98, 0x10, 0x01, 0xab, 0x7f, 0x44, 0x15, 0xee, + 0x5a, 0xb2, 0x1c, 0x0c, 0x0d, 0x76, 0x13, 0xbe, 0x6a, 0x12, 0x54, 0x14, 0xa8, 0x44, 0x0c, 0x56, + 0x1a, 0xbb, 0x64, 0x43, 0xe8, 0x39, 0x32, 0x49, 0x1c, 0xf3, 0x0a, 0x1e, 0x50, 0x6b, 0xfa, 0x08, + 0x88, 0x4d, 0x06, 0x81, 0xb2, 0x0a, 0x51, 0x6d, 0x98, 0x66, 0x5f, 0x13, 0x2f, 0x3b, 0xcd, 0x4e, + 0xfc, 0x94, 0xd5, 0x3e, 0xbf, 0x04, 0xd3, 0xc8, 0x84, 0x7a, 0x09, 0x1f, 0xb3, 0xdd, 0x14, 0xb5, + 0x02, 0xc7, 0xc7, 0x76, 0xb2, 0x25, 0x5c, 0xeb, 0xea, 0xad, 0x1e, 0xfc, 0x07, 0x12, 0x92, 0xfe, + 0xb8, 0xf2, 0x69, 0x7f, 0xfb, 0xa6, 0xc2, 0x9f, 0xc3, 0xd3, 0x6c, 0xce, 0x89, 0x3c, 0x48, 0xe4, + 0x3d, 0x46, 0x98, 0xd0, 0x4d, 0xc4, 0x76, 0x3d, 0xc3, 0x28, 0x78, 0x86, 0x12, 0x87, 0x52, 0x8f, + 0xa7, 0x93, 0x49, 0xff, 0x5a, 0x64, 0xba, 0xef, 0x5e, 0xc1, 0x78, 0x49, 0xa3, 0x19, 0x03, 0xdf, + 0x03, 0xfb, 0x47, 0x7b, 0xf7, 0x37, 0xec, 0x0c, 0x4e, 0xf5, 0x55, 0xb0, 0xf2, 0xc5, 0x79, 0xb0, + 0x5b, 0x23, 0x45, 0xbb, 0xf6, 0x97, 0x85, 0xae, 0x93, 0x2f, 0xb1, 0x3a, 0x68, 0x18, 0x45, 0x08, + 0x9b, 0x5a, 0xad, 0x82, 0x05, 0xcf, 0x75, 0x72, 0xcc, 0x58, 0x48, 0xf2, 0xc1, 0xce, 0xad, 0xf2, + 0xf6, 0xb3, 0xcc, 0xc6, 0x12, 0x97, 0x6a, 0xf5, 0xb9, 0xe6, 0x18, 0x91, 0xf7, 0x6a, 0xf0, 0xf2, + 0x9c, 0x34, 0xc9, 0x3d, 0xf1, 0x6a, 0xe5, 0x8f, 0xf6, 0xab, 0x1c, 0x2c, 0x7a, 0x2f, 0xc8, 0xa6, + 0x97, 0xbb, 0x7b, 0xcc, 0xbf, 0x1b, 0x80, 0x0c, 0xd7, 0xab, 0x51, 0x2c, 0x00, 0x05, 0xc1, 0x52, + 0x79, 0x5d, 0xfa, 0x19, 0xa2, 0xca, 0x38, 0x0f, 0xbf, 0xc4, 0x23, 0x7e, 0xaf, 0x27, 0xbc, 0xed, + 0xa7, 0x49, 0xce, 0x80, 0x31, 0x4b, 0xc2, 0xb3, 0x15, 0x2d, 0x41, 0xf4, 0xec, 0x73, 0x80, 0x17, + 0x5d, 0xce, 0x78, 0xac, 0x55, 0xbc, 0x0b, 0xd1, 0x55, 0x2d, 0x05, 0xe8, 0xc5, 0xc6, 0x60, 0x69, + 0x53, 0xf5, 0x3e, 0xa2, 0xc7, 0x94, 0xb6, 0xa8, 0x2a, 0x2a, 0x14, 0xcb, 0x89, 0x3a, 0x35, 0xb2, + 0x25, 0x2a, 0x3d, 0x72, 0x5c, 0x92, 0x1f, 0xd1, 0x9e, 0x23, 0x8c, 0xf6, 0xcc, 0x29, 0xf0, 0x51, + 0x00, 0xc2, 0x54, 0xf8, 0x34, 0x76, 0xff, 0xdd, 0xa9, 0xb7, 0x4e, 0x2d, 0x1e, 0xf8, 0x9e, 0xd0, + 0x78, 0x8a, 0xb0, 0xac, 0x74, 0x22, 0x52, 0xcd, 0xca, 0xb7, 0xd6, 0x3f, 0xc9, 0x5e, 0x38, 0xdf, + 0xbd, 0x2f, 0x4b, 0x7a, 0x64, 0x5d, 0xc6, 0x83, 0xa8, 0xfa, 0x2a, 0x6b, 0x5f, 0xed, 0x89, 0x46, + 0x3d, 0xd3, 0x1d, 0x70, 0xde, 0x86, 0x1d, 0xd8, 0xce, 0x8d, 0xca, 0xa1, 0x97, 0xb5, 0x64, 0xc6, + 0x2e, 0xb0, 0x7b, 0x30, 0x23, 0x4a, 0xa8, 0xb4, 0x92, 0x72, 0x58, 0x4c, 0x36, 0x6b, 0x3b, 0x70, + 0x6a, 0xe0, 0x4d, 0x7f, 0xa9, 0x58, 0xcc, 0xe0, 0xd9, 0xf8, 0x57, 0x7a, 0xa5, 0xc4, 0xd1, 0x20, + 0x40, 0xb4, 0x8c, 0xbc, 0x41, 0x25, 0xaf, 0xb5, 0x2f, 0x49, 0x0a, 0xff, 0x28, 0x06, 0xdd, 0x83, + 0x54, 0xcf, 0x75, 0x04, 0xd1, 0x64, 0x18, 0xa7, 0x5f, 0xd4, 0x49, 0x7c, 0xf3, 0xc6, 0xa5, 0xe7, + 0xef, 0x54, 0xbf, 0x99, 0xfb, 0x9c, 0xc1, 0xa3, 0x1f, 0x90, 0xd5, 0x7d, 0x46, 0x0f, 0x44, 0x73, + 0xe1, 0xca, 0x85, 0x85, 0x5a, 0x6c, 0x6e, 0xe6, 0x48, 0x7e, 0x0c, 0x67, 0x1d, 0xb1, 0x16, 0x91, + 0x2c, 0x46, 0x98, 0xd8, 0x55, 0x8c, 0x14, 0x7e, 0x31, 0x7d, 0xe9, 0xa2, 0x3f, 0x88, 0xf0, 0x54, + 0x6d, 0xfb, 0x9e, 0xc3, 0x88, 0x29, 0x4b, 0x67, 0x76, 0xc8, 0x31, 0x87, 0x36, 0x79, 0x69, 0x9a, + 0x3a, 0xb6, 0xd2, 0x6d, 0xe5, 0xff, 0x87, 0x95, 0x76, 0xd6, 0xae, 0x2a, 0x57, 0x29, 0xbb, 0x69, + 0xe5, 0x5b, 0x42, 0x11, 0x82, 0xc6, 0x06, 0x14, 0xbe, 0x02, 0x43, 0xe7, 0x7c, 0x6f, 0x8d, 0xf5, + 0xe4, 0x87, 0xfb, 0x8f, 0x31, 0x23, 0x0c, 0x2b, 0x71, 0xd3, 0xa3, 0x19, 0x56, 0xcd, 0x31, 0xc6, + 0xd4, 0x7c, 0x2b, 0x03, 0xc9, 0x2d, 0xb3, 0xc2, 0xa4, 0x30, 0xd4, 0x2e, 0x63, 0x55, 0xf6, 0x7d, + 0xc9, 0x9a, 0xef, 0x08, 0xac, 0xed, 0xc8, 0xdf, 0x74, 0xff, 0x90, 0xaa, 0x9c, 0x3c, 0xfa, 0x54, + 0x1e, 0x28, 0x80, 0x3c, 0x27, 0x05, 0x03, 0x0f, 0x04, 0x00, 0xe2, 0x64, 0xaa, 0x42, 0x0a, 0x64, + 0xee, 0xb3, 0x7f, 0xbb, 0xab, 0xad, 0x25, 0x11, 0x1c, 0x71, 0x4e, 0x42, 0xae, 0x31, 0xc5, 0x68, + 0x8a, 0x20, 0xc4, 0x9c, 0x41, 0x49, 0x88, 0x67, 0x42, 0xe6, 0x68, 0xec, 0x15, 0x7b, 0x63, 0x0d, + 0xe1, 0x39, 0x84, 0xee, 0x3a, 0xc3, 0x3c, 0xa5, 0xb8, 0x78, 0x09, 0x31, 0xdb, 0x93, 0x73, 0xb5, + 0x5a, 0x75, 0x7d, 0x1b, 0x36, 0xdc, 0x92, 0x53, 0xaf, 0xcb, 0xf7, 0x51, 0xd1, 0x39, 0x3d, 0x81, + 0x4f, 0xcb, 0xfc, 0xe2, 0x1b, 0xba, 0x75, 0x22, 0xcb, 0x99, 0x6b, 0xf5, 0x8e, 0xef, 0x33, 0x4d, + 0x0d, 0xed, 0x79, 0x51, 0xcd, 0xb6, 0xcb, 0x89, 0xc7, 0xfa, 0x40, 0x42, 0x3b, 0xc0, 0xba, 0x21, + 0x4d, 0x9c, 0xf5, 0xa6, 0xd9, 0x74, 0x5f, 0xaf, 0xfe, 0xbf, 0xe7, 0x01, 0x0a, 0x3d, 0x54, 0xc8, + 0xf7, 0x0b, 0x16, 0x5c, 0x29, 0x0d, 0xbe, 0xa0, 0x3a, 0xf9, 0x1b, 0x4c, 0x3d, 0xfc, 0x12, 0xe8, + 0x14, 0x87, 0x4e, 0x12, 0xf5, 0xba, 0x93, 0x36, 0x9b, 0x0d, 0x04, 0x26, 0xbe, 0xde, 0x9c, 0x36, + 0x44, 0x79, 0x47, 0x94, 0xe6, 0xfa, 0xc5, 0x33, 0x7b, 0xd7, 0xd7, 0x36, 0xc9, 0xa9, 0xea, 0xb0, + 0x90, 0xf3, 0x03, 0x90, 0xac, 0xc7, 0x8f, 0xef, 0x9c, 0x69, 0x24, 0xad, 0x2e, 0x40, 0xbb, 0xb5, + 0x00, 0x2e, 0x87, 0xbe, 0x3b, 0x84, 0x87, 0x32, 0xa1, 0xe6, 0x4f, 0x28, 0x84, 0x2a, 0x16, 0x95, + 0x86, 0xd1, 0x89, 0xe0, 0xa8, 0xab, 0x7f, 0x5c, 0x8e, 0x3e, 0xee, 0x32, 0xa1, 0xb9, 0x40, 0x65, + 0x0c, 0xf5, 0xa0, 0x36, 0xe3, 0xa5, 0x20, 0x23, 0xd8, 0x12, 0x9b, 0x90, 0x23, 0x40, 0xf3, 0x99, + 0xd0, 0xa7, 0xb1, 0x61, 0xc6, 0x9e, 0x14, 0xd0, 0xc3, 0x07, 0xc9, 0x24, 0x4c, 0x54, 0xcf, 0xd3, + 0x4e, 0x2c, 0x22, 0x2b, 0x56, 0x3c, 0xd4, 0x4a, 0xb3, 0xaf, 0x7a, 0x1c, 0x47, 0x03, 0x88, 0xe6, + 0x7e, 0x2d, 0x71, 0xc1, 0x99, 0x4c, 0xb7, 0x06, 0xac, 0x93, 0xed, 0x66, 0x8a, 0x12, 0x1f, 0xf8, + 0x89, 0x03, 0xcb, 0x74, 0xe8, 0x59, 0x29, 0xb3, 0x8e, 0xbd, 0x47, 0x25, 0xc0, 0xbe, 0xa6, 0x87, + 0x76, 0xf6, 0x09, 0x04, 0x44, 0x81, 0x24, 0xd1, 0xfc, 0x20, 0x08, 0xe5, 0xf5, 0xa6, 0x85, 0x72, + 0xde, 0xc2, 0xef, 0xa4, 0xab, 0x59, 0x3b, 0x17, 0xad, 0x0b, 0x4e, 0x50, 0x66, 0x63, 0xab, 0x85, + 0xe4, 0x27, 0xa7, 0x0c, 0x2b, 0x57, 0xf3, 0x95, 0x05, 0x7c, 0x79, 0x4f, 0xce, 0x11, 0x47, 0x3a, + 0x68, 0x9a, 0xf1, 0x9c, 0xb1, 0x56, 0x6e, 0xb8, 0xfa, 0x52, 0x96, 0xe0, 0x9e, 0x47, 0xf9, 0xa7, + 0x49, 0xf3, 0x8a, 0x6b, 0x62, 0x81, 0xa9, 0xc9, 0xa1, 0xe4, 0xac, 0xdb, 0x8a, 0xb8, 0x61, 0x52, + 0x68, 0x89, 0xe1, 0x00, 0xdd, 0x14, 0x8f, 0x14, 0xb2, 0xe4, 0x10, 0x08, 0x53, 0xe2, 0xeb, 0xe0, + 0x16, 0x91, 0x26, 0x77, 0x06, 0xb7, 0x63, 0x80, 0x29, 0xff, 0xed, 0x3d, 0x7c, 0x22, 0x57, 0x1b, + 0x76, 0x8b, 0x2f, 0xa5, 0xf9, 0x2d, 0x8c, 0x16, 0xd3, 0x88, 0xad, 0x7c, 0xc6, 0xa9, 0x90, 0x3f, + 0x31, 0x68, 0x74, 0x79, 0x80, 0xaa, 0x64, 0x29, 0x4c, 0xc0, 0x3d, 0x9b, 0xca, 0x78, 0xe9, 0x0e, + 0x17, 0xe4, 0x79, 0xb7, 0x65, 0xe3, 0x6a, 0x5e, 0xda, 0x31, 0xda, 0x9c, 0x80, 0x8a, 0xb7, 0xc8, + 0xa4, 0xf8, 0xab, 0xbe, 0x91, 0xaf, 0xc9, 0x02, 0x1b, 0x91, 0x95, 0xc7, 0x22, 0x4c, 0x22, 0x4d, + 0xb5, 0x4d, 0xf3, 0x84, 0x77, 0xfd, 0xc4, 0xee, 0x55, 0x82, 0xba, 0xc5, 0x3f, 0xe1, 0x8c, 0xa2, + 0x4a, 0xee, 0x88, 0x2c, 0x9e, 0xba, 0x50, 0x3c, 0x5f, 0x4b, 0xb8, 0x01, 0x27, 0x87, 0x6c, 0x61, + 0x8e, 0x42, 0x7b, 0x9d, 0xe4, 0xa4, 0xad, 0xcb, 0xa5, 0x6e, 0x80, 0x37, 0x7e, 0x78, 0xed, 0x6d, + 0x8b, 0x9f, 0x85, 0x88, 0x87, 0x8d, 0x8d, 0xad, 0x90, 0x97, 0xff, 0x0d, 0x57, 0xcb, 0x2a, 0x8d, + 0x9d, 0xaa, 0x29, 0x6e, 0x88, 0x9d, 0x97, 0x11, 0x4e, 0x64, 0x8f, 0xa8, 0xf0, 0xf4, 0xd9, 0x48, + 0x31, 0xb0, 0x3e, 0x1d, 0xa5, 0xf2, 0x7b, 0x60, 0x80, 0xac, 0x8c, 0x6c, 0xe3, 0xdf, 0x2e, 0x3e, + 0xc2, 0x8e, 0x85, 0x98, 0xf2, 0xdb, 0xbe, 0x45, 0x41, 0xc2, 0x98, 0xc8, 0xd8, 0x9a, 0x9d, 0x5a, + 0xca, 0xfe, 0xaa, 0xaf, 0xea, 0xb5, 0xb3, 0xa6, 0xd8, 0x5b, 0xe3, 0x28, 0xd1, 0x32, 0xa1, 0x3b, + 0x28, 0xd6, 0xc1, 0xc0, 0x51, 0x59, 0xc9, 0x6f, 0xe9, 0x3a, 0x9f, 0x5b, 0x16, 0x34, 0x62, 0x31, + 0x80, 0xec, 0x21, 0x1a, 0xb5, 0x18, 0x2f, 0x5e, 0x76, 0xbf, 0x9d, 0xfe, 0xfd, 0xfe, 0xe5, 0x91, + 0xc3, 0xb1, 0xd4, 0x0a, 0x11, 0x7f, 0x0a, 0x9b, 0xbf, 0x0e, 0x19, 0x4d, 0xd1, 0xcc, 0x87, 0x3f, + 0xd1, 0x64, 0x13, 0x89, 0xae, 0x9b, 0xde, 0x12, 0x64, 0x04, 0x1a, 0x00, 0x66, 0xa1, 0xea, 0xf4, + 0xe3, 0x25, 0x35, 0xb0, 0x79, 0x13, 0x6c, 0x5c, 0xce, 0x47, 0x52, 0x9f, 0xe3, 0x7e, 0xce, 0x23, + 0x8a, 0x9b, 0x95, 0xa5, 0xda, 0xc8, 0x8d, 0x80, 0xbe, 0x32, 0x22, 0x19, 0x88, 0x0a, 0xbc, 0x18, + 0xa0, 0xda, 0x58, 0x1e, 0x57, 0x11, 0xcf, 0x13, 0x7f, 0x29, 0x49, 0x7d, 0x2e, 0x54, 0x46, 0x7e, + 0x4b, 0x3e, 0x61, 0x67, 0xf9, 0xa2, 0x93, 0xf6, 0xe3, 0x81, 0x21, 0x41, 0x0b, 0x17, 0xee, 0x11, + 0x81, 0xb1, 0xd2, 0xc7, 0xf6, 0xa3, 0x4f, 0x29, 0xf2, 0x57, 0xb8, 0x5b, 0x00, 0x46, 0x07, 0xba, + 0x84, 0x67, 0xc1, 0x3c, 0x03, 0x0e, 0xa1, 0x74, 0x79, 0x89, 0x93, 0x98, 0x24, 0x5e, 0x7f, 0x2c, + 0x93, 0x6e, 0x5b, 0xe3, 0xce, 0xe0, 0xa0, 0x55, 0xf8, 0x16, 0xd4, 0x43, 0x45, 0x51, 0x9b, 0xfe, + 0x9e, 0xf2, 0x6c, 0x23, 0xba, 0xe9, 0xdd, 0x25, 0x96, 0x81, 0x17, 0x15, 0x1f, 0xae, 0xaf, 0x3e, + 0xbd, 0x46, 0xfa, 0x49, 0xda, 0x3a, 0x4a, 0xa9, 0xb7, 0xe0, 0x58, 0xa5, 0x0c, 0xaf, 0xa5, 0x76, + 0x45, 0x5f, 0x82, 0xb7, 0x22, 0x94, 0x72, 0x9b, 0x8a, 0x79, 0xd7, 0x5e, 0x2e, 0x22, 0x67, 0xea, + 0xfe, 0x23, 0x13, 0xbb, 0xbe, 0x9f, 0x8b, 0xa2, 0x9b, 0x89, 0x91, 0x56, 0x61, 0xa2, 0xf2, 0x76, + 0x8b, 0xc8, 0x62, 0xa8, 0xff, 0x18, 0xbb, 0x8d, 0xcf, 0x3d, 0xc5, 0x44, 0xa1, 0x81, 0x87, 0x63, + 0xb5, 0xfe, 0xdc, 0x7b, 0x23, 0xb3, 0x39, 0x7d, 0xfb, 0xe1, 0x59, 0xf2, 0xa6, 0xbe, 0xb9, 0x39, + 0x93, 0x3e, 0x71, 0x11, 0x05, 0xc3, 0x31, 0x20, 0x72, 0xe1, 0xe0, 0x2e, 0xf7, 0x28, 0x70, 0xb5, + 0x33, 0xd2, 0xb2, 0x4f, 0x50, 0x81, 0xc6, 0x67, 0x90, 0x35, 0x81, 0xad, 0xcd, 0xb9, 0xfa, 0xdf, + 0xff, 0xe5, 0x7f, 0xd7, 0x47, 0x35, 0x8c, 0xbc, 0x44, 0x8b, 0x06, 0x51, 0x63, 0xf7, 0xa3, 0x3a, + 0x2b, 0xb5, 0x4f, 0x6d, 0x38, 0xa1, 0x0d, 0x8f, 0x42, 0x5f, 0x3e, 0xe0, 0x1b, 0x52, 0x0a, 0xb6, + 0xd0, 0x50, 0x41, 0x39, 0xde, 0x0b, 0x1b, 0x1d, 0x32, 0x05, 0x8a, 0xee, 0xdf, 0x86, 0xd1, 0x41, + 0xc4, 0x18, 0xd0, 0x6b, 0x95, 0x32, 0xc4, 0x3f, 0xa3, 0x75, 0x0c, 0x7d, 0xd0, 0x36, 0x56, 0x9c, + 0xcc, 0xf3, 0x97, 0xe4, 0xb9, 0xb2, 0x55, 0x96, 0x64, 0x2f, 0x6d, 0x6c, 0x08, 0x66, 0x67, 0xd0, + 0xda, 0x4e, 0xc2, 0x54, 0xb5, 0xc9, 0x3e, 0xa5, 0x27, 0x02, 0x79, 0xec, 0x0f, 0x3b, 0xe4, 0xa1, + 0xea, 0x2f, 0x72, 0x9e, 0xae, 0xaf, 0x6d, 0xf2, 0x17, 0x1c, 0x2a, 0xd6, 0x9c, 0x9a, 0x9e, 0x20, + 0xe1, 0x33, 0xe9, 0xad, 0x79, 0xcf, 0xe4, 0xf4, 0xe7, 0x11, 0x92, 0x61, 0x8f, 0xba, 0x0c, 0x74, + 0x2a, 0x4b, 0x4c, 0x22, 0x63, 0xe3, 0x83, 0x0c, 0x6c, 0x22, 0x9f, 0x79, 0x25, 0x9f, 0xcc, 0x6b, + 0xbf, 0x5e, 0x14, 0x8a, 0xd2, 0xb9, 0xd6, 0x37, 0xc9, 0x54, 0xc4, 0xef, 0x59, 0x20, 0xf5, 0x4b, + 0xf5, 0xc2, 0x2f, 0xea, 0x38, 0x8e, 0x77, 0x31, 0x05, 0xab, 0xe0, 0xed, 0x07, 0xff, 0x56, 0x38, + 0x6f, 0xa3, 0xf9, 0xb7, 0xf9, 0xa9, 0x18, 0xf4, 0x41, 0xf7, 0xf7, 0x19, 0xa7, 0xf9, 0x77, 0x63, + 0xd9, 0xfd, 0x41, 0xd3, 0xac, 0xdc, 0xc8, 0x27, 0xb8, 0xe7, 0x57, 0x9d, 0x2c, 0xc2, 0x52, 0x6c, + 0xd1, 0x8b, 0x2b, 0x78, 0xa0, 0xec, 0xba, 0xea, 0x68, 0x9d, 0x6d, 0x2f, 0x4c, 0xc0, 0xe7, 0x0e, + 0x91, 0xe9, 0xd2, 0xd0, 0xc0, 0xae, 0x3e, 0x55, 0x4a, 0x6c, 0x90, 0xc5, 0x4d, 0x08, 0xc4, 0x23, + 0x88, 0x24, 0x39, 0x32, 0x02, 0xb9, 0xd0, 0x8f, 0x23, 0x2a, 0x4a, 0x8f, 0xf6, 0x05, 0x02, 0x39, + 0x33, 0x5e, 0x6d, 0x82, 0x38, 0x62, 0x49, 0x37, 0xc1, 0xd7, 0x2e, 0x77, 0xfe, 0x18, 0xec, 0xe6, + 0x32, 0xe9, 0xab, 0xff, 0x3c, 0xab, 0xc4, 0xd3, 0x2b, 0xeb, 0xe6, 0xcb, 0x4e, 0x49, 0xb0, 0x5a, + 0x40, 0x60, 0xa7, 0x0b, 0x34, 0xef, 0xfc, 0x23, 0x91, 0x92, 0xab, 0x4d, 0x2e, 0x31, 0x52, 0x75, + 0x69, 0x20, 0x92, 0x88, 0x46, 0xdf, 0xbc, 0x4c, 0x1c, 0x2a, 0x6b, 0x55, 0x9e, 0x86, 0xdd, 0x5f, + 0x3b, 0x6a, 0x8e, 0x51, 0x8c, 0x4b, 0xdd, 0x0e, 0xc3, 0x7e, 0xeb, 0x9e, 0x6d, 0x23, 0x26, 0x54, + 0x48, 0x31, 0x1c, 0x35, 0xb5, 0x23, 0x15, 0x68, 0x29, 0xb4, 0x68, 0x95, 0xb3, 0xcc, 0xb8, 0xc9, + 0x6f, 0x81, 0x9a, 0xe5, 0xe8, 0x7f, 0xd1, 0x7b, 0xab, 0x8a, 0x27, 0xdb, 0x05, 0x46, 0x39, 0xaa, + 0x42, 0x83, 0x45, 0x11, 0x7d, 0xdf, 0x1e, 0x68, 0x85, 0x4e, 0x67, 0x5e, 0x26, 0x87, 0x19, 0x2b, + 0xf3, 0x17, 0x86, 0xab, 0xbd, 0x24, 0xf1, 0xf0, 0xee, 0x6a, 0x7d, 0x9a, 0xbb, 0x45, 0x19, 0x67, + 0x82, 0x9c, 0x58, 0x32, 0xa3, 0x97, 0x0e, 0x1d, 0x16, 0xf2, 0xd9, 0x2f, 0x9e, 0x48, 0x93, 0x0d, + 0xa8, 0xc3, 0x12, 0x3a, 0xb8, 0x33, 0xf0, 0x31, 0x12, 0xe7, 0x93, 0xfa, 0xbf, 0x2b, 0x3d, 0xef, + 0x7f, 0x7f, 0xcb, 0x21, 0xa5, 0x44, 0xfe, 0x93, 0x2d, 0x9b, 0xa7, 0xff, 0xa7, 0xd1, 0x4f, 0x9b, + 0xcb, 0x6b, 0x0d, 0x56, 0x53, 0xa0, 0x3c, 0xbd, 0xe6, 0xfc, 0xa6, 0x64, 0x8a, 0x2e, 0x3a, 0xca, + 0x57, 0xd4, 0xc5, 0xa8, 0x77, 0x50, 0xd2, 0x49, 0x63, 0x56, 0xd6, 0xbf, 0x75, 0xe2, 0xd0, 0x0a, + 0x56, 0x59, 0x41, 0x3a, 0x75, 0x7d, 0x52, 0xaf, 0x23, 0xc5, 0x6e, 0x11, 0x71, 0x30, 0x2a, 0x8b, + 0x0c, 0x7e, 0x64, 0x0b, 0x50, 0xf6, 0xda, 0x86, 0x0d, 0x4f, 0x5c, 0x2e, 0x95, 0x1e, 0xf1, 0x74, + 0x62, 0xed, 0x46, 0x1f, 0x8f, 0x94, 0x2c, 0x17, 0x85, 0xed, 0x57, 0x84, 0x44, 0x7d, 0xd3, 0xe9, + 0x2c, 0x61, 0xd2, 0xe1, 0x56, 0x1f, 0x04, 0x0f, 0xf8, 0x0e, 0x12, 0x54, 0x66, 0xdb, 0x20, 0xae, + 0x25, 0xef, 0x20, 0x60, 0x5b, 0x0a, 0x50, 0x8e, 0xa4, 0x22, 0xaf, 0x34, 0x4a, 0x5b, 0x41, 0x94, + 0x8a, 0x67, 0x09, 0xa1, 0x3e, 0x0d, 0x98, 0x8c, 0x95, 0x96, 0x37, 0xc2, 0xa4, 0x9d, 0x4c, 0x6b, + 0x26, 0xfe, 0x93, 0x50, 0xbc, 0x90, 0xf3, 0x4b, 0x62, 0x93, 0x1f, 0x2a, 0x1d, 0x70, 0xc0, 0xfe, + 0xad, 0x93, 0xc0, 0x44, 0x22, 0xa8, 0xb6, 0x73, 0x33, 0xc3, 0x32, 0xec, 0xf1, 0xe7, 0x72, 0xcf, + 0xea, 0xe8, 0xf6, 0xad, 0x20, 0xad, 0x39, 0xe1, 0x0d, 0x67, 0x63, 0x3c, 0x2e, 0x70, 0xec, 0xb5, + 0x11, 0x55, 0x38, 0xc9, 0x7f, 0x80, 0xcf, 0xae, 0x0c, 0x93, 0x4b, 0x7d, 0xcb, 0x87, 0x86, 0xea, + 0x05, 0xf7, 0x8c, 0xcc, 0x99, 0xe4, 0x8d, 0xf7, 0xaf, 0xeb, 0x5f, 0x89, 0x58, 0xa3, 0xb1, 0x2e, + 0xde, 0x0d, 0xbb, 0xc0, 0x27, 0xb4, 0xc6, 0xe9, 0x45, 0x33, 0x5f, 0xc8, 0xe3, 0x58, 0x2f, 0x5a, + 0xdf, 0x94, 0x57, 0x1b, 0xf2, 0x68, 0xeb, 0xd7, 0x58, 0x5e, 0x7f, 0x04, 0xdc, 0x06, 0x68, 0x65, + 0x3f, 0x1b, 0x01, 0x92, 0x45, 0x2e, 0x27, 0x8b, 0xbd, 0xc3, 0xe4, 0x9f, 0x24, 0xee, 0x7c, 0x12, + 0xcb, 0x2e, 0x51, 0x62, 0xa4, 0x34, 0x3e, 0xa1, 0x49, 0x9b, 0xf1, 0x86, 0x0f, 0x0b, 0xfc, 0x29, + 0x0b, 0xd7, 0xaf, 0x70, 0x07, 0xb4, 0xa9, 0x88, 0xab, 0x86, 0x6c, 0xd4, 0xfa, 0x64, 0x61, 0x0a, + 0x27, 0x7e, 0x16, 0xfe, 0x5c, 0x94, 0x8e, 0x08, 0xb6, 0x76, 0x09, 0xc1, 0x3f, 0x49, 0x22, 0x3f, + 0x39, 0x7d, 0x38, 0x62, 0x9b, 0x0d, 0x58, 0x31, 0x59, 0xd9, 0x7f, 0x1f, 0x51, 0x4e, 0x92, 0xc0, + 0xba, 0x36, 0x15, 0x54, 0xdc, 0x12, 0x23, 0xb6, 0x4a, 0x8c, 0x36, 0x90, 0x51, 0x2a, 0x24, 0x9d, + 0x3b, 0xa8, 0x21, 0xe3, 0x15, 0x86, 0x58, 0x32, 0x59, 0xd0, 0xda, 0xaa, 0xcb, 0x61, 0x7e, 0x70, + 0x2e, 0x1f, 0xbe, 0x18, 0xfd, 0x7e, 0x53, 0x96, 0x3d, 0xae, 0x39, 0x91, 0x2d, 0x42, 0xed, 0xa5, + 0xb2, 0x79, 0xdf, 0xf3, 0xd6, 0xff, 0xb8, 0x1e, 0x1a, 0x6c, 0x9b, 0x13, 0xb2, 0x77, 0x14, 0x55, + 0x72, 0xdc, 0x77, 0xf7, 0x40, 0x5a, 0xce, 0x33, 0x28, 0x1a, 0xe9, 0x13, 0x76, 0x59, 0x97, 0xa5, + 0x78, 0x4b, 0x71, 0x85, 0x2f, 0x97, 0x57, 0xbe, 0x35, 0x46, 0x02, 0x2a, 0xb0, 0xde, 0x8b, 0x61, + 0x18, 0xcc, 0x8b, 0x4c, 0x28, 0x6e, 0xae, 0x64, 0x15, 0xb7, 0x3d, 0xfc, 0xbc, 0x87, 0x82, 0xeb, + 0xf2, 0x7b, 0xf9, 0xb3, 0x3b, 0xc6, 0xff, 0x6d, 0x5b, 0xf8, 0x1f, 0xff, 0xf1, 0x1f, 0xff, 0xf1, + 0x1f, 0xff, 0x5f, 0xf8, 0x1f, 0xab, 0x20, 0x01, 0xd7, 0x00, 0x23, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU102_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 8960, // uncompressed data size (bytes) + 7373, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU102_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU102("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/reload/g_booteruc_reload_tu10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU102_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x05, 0x62, 0x56, 0x08, 0x13, 0x4c, 0xc8, 0x40, 0x69, + 0x20, 0x00, 0x00, 0x8e, 0xa1, 0x42, 0xb2, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU102_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU102_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU102("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/reload/g_booteruc_reload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_dbg_tu10x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU102_sig_dbg_data[] = +{ + 0x0f, 0xa5, 0x47, 0x59, 0x9d, 0xc1, 0xb5, 0x03, 0x7b, 0x20, 0x5f, 0xab, 0xc1, 0x83, 0x57, 0xe7, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU102_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU102_sig_dbg_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU102("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/reload/g_booteruc_reload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_prod_tu10x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU102_sig_prod_data[] = +{ + 0x41, 0x2d, 0x5e, 0x94, 0xd5, 0x69, 0xef, 0xfc, 0x89, 0xc1, 0x6d, 0x07, 0xb7, 0xdd, 0xbb, 0x96, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU102_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU102_sig_prod_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU102("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/reload/g_booteruc_reload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_tu10x_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU102_patch_loc_data[] = +{ + 0x00, 0x1f, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU102_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU102_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU102("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/reload/g_booteruc_reload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_tu10x_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU102_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU102_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU102_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU102("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/reload/g_booteruc_reload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_tu10x_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU102_patch_meta_data[] = +{ + 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU102_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU102_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU102("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/reload/g_booteruc_reload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU102_num_sigs_data[] = +{ + 0x01, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU102_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU102_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterReloadUcode_TU102 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU102_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU102_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU102_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU102_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU102_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU102_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU102_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU102_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU102_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU102_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterReloadUcode_TU102(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterReloadUcode_TU102; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_TU116.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_TU116.c new file mode 100644 index 000000000..63fd8cd6b --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterReloadUcode_TU116.c @@ -0,0 +1,1317 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU116("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/reload/g_booteruc_reload_tu11x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 8960 +// COMPRESSED SIZE (bytes): 7364 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU116_image_dbg_data[] = +{ + 0xed, 0x99, 0x45, 0x50, 0x1c, 0x0a, 0x90, 0x86, 0x87, 0x61, 0xb0, 0xe0, 0x0c, 0xee, 0x4e, 0xf0, + 0x00, 0xc1, 0x9d, 0xe0, 0x0e, 0x43, 0x90, 0x00, 0xc1, 0x83, 0x05, 0x82, 0x04, 0x1b, 0xdc, 0xdd, + 0xdd, 0xdd, 0x5d, 0x83, 0xbb, 0x6b, 0x70, 0x97, 0x20, 0x09, 0x10, 0x9c, 0x01, 0x06, 0xdf, 0xb7, + 0xd7, 0x77, 0xdb, 0xeb, 0xd6, 0xfb, 0x2e, 0x7f, 0xf7, 0xa5, 0xbb, 0xab, 0xba, 0xaa, 0x0f, 0x7f, + 0x87, 0x03, 0xfc, 0x00, 0x2f, 0x49, 0x00, 0x1f, 0x6c, 0x00, 0x00, 0x0e, 0x84, 0x03, 0xee, 0x81, + 0xd1, 0x00, 0x20, 0xa0, 0x67, 0x6b, 0xfb, 0xf5, 0xf5, 0x15, 0x3b, 0x1c, 0x80, 0x00, 0x78, 0xcd, + 0x05, 0x35, 0xee, 0x03, 0x78, 0xd2, 0x56, 0x11, 0x1a, 0x93, 0x00, 0x6c, 0x69, 0xab, 0xc0, 0x7f, + 0x84, 0x21, 0x6d, 0x15, 0xf1, 0x1f, 0xa1, 0x0c, 0x07, 0x00, 0x00, 0x69, 0xb9, 0x88, 0xcd, 0xdb, + 0x88, 0x3d, 0xb9, 0x79, 0xdb, 0x69, 0xb9, 0x08, 0x69, 0xe9, 0xc0, 0xe6, 0x6d, 0x84, 0xe6, 0x55, + 0xa0, 0x4f, 0x0b, 0x00, 0x80, 0xcd, 0xa5, 0x00, 0xc0, 0xb9, 0xcb, 0x05, 0x60, 0x83, 0xc2, 0x01, + 0x96, 0x28, 0xff, 0x1b, 0xa0, 0xb5, 0xa6, 0x03, 0x10, 0x80, 0xff, 0x44, 0x30, 0x11, 0x60, 0x2c, + 0x00, 0x80, 0x1c, 0xfe, 0x4f, 0xab, 0xe7, 0x5c, 0xa4, 0x2e, 0xd8, 0xc3, 0x3d, 0x32, 0x1c, 0xf1, + 0x9f, 0x6a, 0xc0, 0x64, 0x6c, 0xd4, 0xad, 0xff, 0xed, 0xfc, 0x7c, 0x8f, 0x14, 0x0e, 0xa0, 0x00, + 0x3c, 0xe7, 0x80, 0x3a, 0xe2, 0x80, 0x2d, 0x49, 0x38, 0xff, 0xe4, 0x2f, 0x0f, 0x00, 0x9f, 0x7f, + 0xe6, 0x81, 0x03, 0xbb, 0x8a, 0x1e, 0x11, 0x62, 0x21, 0xed, 0xe8, 0xe1, 0x38, 0x48, 0x80, 0xbc, + 0x5c, 0x38, 0x20, 0xf6, 0x1f, 0xed, 0x79, 0x85, 0xbd, 0x7b, 0x7a, 0x51, 0x40, 0xc8, 0xcb, 0x15, + 0xfb, 0x09, 0xf8, 0x3f, 0x11, 0xcf, 0xfe, 0xc4, 0x6b, 0xdd, 0xcc, 0x35, 0xe4, 0x37, 0xa5, 0xc8, + 0xa7, 0x19, 0xdf, 0x2d, 0xe7, 0x40, 0xfe, 0x74, 0x4d, 0x61, 0x8b, 0x43, 0xf9, 0xd8, 0x43, 0x7a, + 0x66, 0x4f, 0xdb, 0xe2, 0xc9, 0xa5, 0x9b, 0xbb, 0x52, 0x64, 0xe7, 0x71, 0xb5, 0x35, 0x2e, 0x3d, + 0x26, 0xaa, 0x51, 0xb8, 0x29, 0x97, 0x8e, 0xfb, 0x9b, 0x54, 0x88, 0xf5, 0x95, 0x69, 0x8b, 0xee, + 0x3b, 0xae, 0xb2, 0x00, 0x34, 0xb3, 0x94, 0x9b, 0x23, 0xc1, 0x90, 0xdf, 0x99, 0xe4, 0xf5, 0xfd, + 0x2e, 0x32, 0xdd, 0xf1, 0x5f, 0xbd, 0xba, 0x32, 0x02, 0xc5, 0xbd, 0x5f, 0x1e, 0x27, 0xb2, 0xdd, + 0xb6, 0x24, 0x42, 0x58, 0xd6, 0xe5, 0x87, 0x3d, 0x39, 0x21, 0x98, 0xd0, 0x52, 0x78, 0x5e, 0x76, + 0xfe, 0xa9, 0xf5, 0x07, 0xca, 0x41, 0x8c, 0x8b, 0x4f, 0x0f, 0x45, 0xe9, 0x8b, 0x33, 0x6d, 0xe8, + 0x77, 0x44, 0xb1, 0xb3, 0xe1, 0x00, 0x91, 0xf5, 0xbe, 0x20, 0x6e, 0xa7, 0xaa, 0xb8, 0xf3, 0xbf, + 0x01, 0x3e, 0x98, 0x92, 0x90, 0x52, 0xc8, 0x8a, 0x36, 0x93, 0xf1, 0xa7, 0x81, 0x27, 0xba, 0x5f, + 0xd2, 0x94, 0xe6, 0x43, 0xd6, 0x32, 0x74, 0xd9, 0x4d, 0x11, 0x47, 0xe7, 0x52, 0xbd, 0xa5, 0xbb, + 0x49, 0x6e, 0x54, 0x3a, 0x75, 0xb8, 0xda, 0x96, 0xa2, 0xf9, 0x7f, 0x4c, 0xe5, 0x43, 0x2d, 0x38, + 0x20, 0x64, 0xf7, 0xdc, 0xc4, 0x6f, 0x85, 0xe9, 0xe9, 0x7b, 0x3f, 0x92, 0xc0, 0x5c, 0x10, 0xe7, + 0xf3, 0x73, 0x97, 0xde, 0xec, 0x09, 0x54, 0xe5, 0x58, 0xd0, 0x4a, 0xe0, 0xca, 0xea, 0x62, 0x88, + 0xb5, 0x3e, 0x2c, 0xff, 0x54, 0x95, 0xae, 0x6d, 0xd5, 0xa4, 0x94, 0x2d, 0x56, 0x66, 0x11, 0x3e, + 0xd5, 0xc3, 0xfc, 0x59, 0x76, 0xf9, 0x55, 0x22, 0x5c, 0x8d, 0xda, 0x0e, 0x7d, 0xc7, 0x60, 0x64, + 0x3e, 0x33, 0xfd, 0xdc, 0x71, 0x0f, 0xc7, 0xa9, 0x77, 0xb0, 0x6f, 0x14, 0x51, 0x41, 0x6b, 0x1e, + 0xb2, 0x64, 0x1f, 0x5c, 0xe3, 0xc1, 0xb4, 0x19, 0xb0, 0x1c, 0x78, 0x10, 0x84, 0xfd, 0x61, 0x9e, + 0xb1, 0x51, 0xdc, 0xbb, 0xb6, 0xe7, 0xf7, 0x0c, 0xdd, 0xaa, 0xd4, 0xfd, 0x30, 0x17, 0x24, 0x09, + 0xdf, 0x8d, 0x10, 0x88, 0xb2, 0xe1, 0x1e, 0x1a, 0xc9, 0x3a, 0xcf, 0xd8, 0xa1, 0xc3, 0x83, 0xa8, + 0x73, 0xaa, 0xbe, 0xc3, 0xe5, 0x80, 0xc0, 0x6e, 0x6f, 0x17, 0x82, 0x50, 0x7a, 0x2f, 0x89, 0x19, + 0x9d, 0xae, 0x38, 0x68, 0x77, 0xb4, 0x5e, 0x46, 0xb6, 0xcc, 0xdc, 0xee, 0x82, 0x38, 0x2b, 0x65, + 0x0a, 0x2e, 0xab, 0x96, 0x5a, 0x0d, 0xb1, 0x7d, 0x46, 0xad, 0x33, 0x6c, 0x21, 0xef, 0x28, 0x2b, + 0x62, 0x20, 0xe2, 0xc8, 0xb4, 0xcc, 0x0b, 0x8a, 0x1d, 0x3f, 0xb3, 0x42, 0x94, 0x8b, 0x90, 0xdb, + 0x03, 0x88, 0x6b, 0x52, 0x89, 0xb2, 0x5f, 0x4c, 0x04, 0x20, 0x63, 0x39, 0x51, 0xdc, 0xe1, 0x9d, + 0x7d, 0xb8, 0xd5, 0xeb, 0xde, 0x88, 0xd2, 0xb1, 0x77, 0x60, 0x2f, 0xac, 0xc8, 0xbf, 0xb4, 0xd8, + 0xe1, 0xbc, 0x2a, 0xee, 0x1f, 0x1c, 0x31, 0x9d, 0xdd, 0xc6, 0xf3, 0x1b, 0xfb, 0xe4, 0x8d, 0xf4, + 0x6d, 0xec, 0x29, 0xc0, 0x35, 0xb7, 0x5a, 0x08, 0x2d, 0x07, 0xfd, 0xfd, 0x10, 0xbf, 0x38, 0x97, + 0x10, 0x78, 0x7d, 0x55, 0x3e, 0x38, 0x2f, 0xc2, 0x1a, 0x2c, 0x55, 0x34, 0xad, 0x8c, 0x1e, 0x93, + 0xdd, 0x9c, 0x69, 0x44, 0xef, 0xd0, 0x5e, 0x78, 0x25, 0xd3, 0xb1, 0xaf, 0xb9, 0xa3, 0x44, 0x46, + 0xab, 0xf7, 0xc9, 0x93, 0xb4, 0x2b, 0x25, 0xdd, 0xb7, 0x54, 0xb4, 0xb2, 0x4d, 0xdf, 0x54, 0xbe, + 0x9c, 0x2f, 0xbc, 0xba, 0x53, 0x9b, 0x3d, 0x71, 0x65, 0x1b, 0x54, 0x8f, 0x79, 0x39, 0x05, 0xbd, + 0x63, 0xd0, 0x8b, 0x4e, 0x96, 0x09, 0x81, 0xbb, 0xb5, 0xa7, 0xb3, 0xdc, 0xb8, 0x72, 0x4f, 0xc0, + 0xcc, 0xf0, 0x3f, 0xa3, 0xf0, 0x43, 0xa3, 0x66, 0x1f, 0x55, 0x0b, 0x58, 0xaf, 0x38, 0x2d, 0x9c, + 0x7f, 0x0e, 0x42, 0xf8, 0x6e, 0x92, 0x3d, 0x8b, 0x0a, 0x54, 0x69, 0x84, 0x47, 0x90, 0x21, 0xe6, + 0x2c, 0xd9, 0x7a, 0x6d, 0x9e, 0xa5, 0x69, 0x8b, 0xef, 0xdf, 0xd2, 0x1c, 0xad, 0xa7, 0xc7, 0x8f, + 0xce, 0x7a, 0xa8, 0x6f, 0x01, 0x2c, 0xdc, 0x94, 0x06, 0x51, 0x30, 0x5c, 0x46, 0x4d, 0xf7, 0xee, + 0x7d, 0x8b, 0x9f, 0xc2, 0x09, 0x69, 0x4b, 0x71, 0x6a, 0xec, 0x4e, 0x06, 0xae, 0xc3, 0x6d, 0x28, + 0xa7, 0xfc, 0xad, 0x6d, 0xcf, 0x1a, 0xde, 0x9a, 0xd6, 0x79, 0x17, 0x88, 0xf2, 0x5f, 0x8d, 0x4e, + 0xa2, 0xe1, 0x4e, 0x65, 0xd5, 0x10, 0x93, 0xb0, 0x0d, 0x2e, 0x1c, 0x32, 0xeb, 0x86, 0x18, 0xfe, + 0x79, 0x82, 0x72, 0x51, 0xf9, 0x2c, 0xeb, 0x2f, 0x95, 0x1a, 0x7f, 0x7f, 0xbc, 0xc9, 0xf0, 0xf9, + 0x05, 0xc8, 0x0c, 0xd8, 0x09, 0x95, 0x1c, 0x24, 0xe9, 0x5a, 0xe2, 0xe8, 0x43, 0xda, 0x0f, 0x43, + 0x9b, 0x92, 0x74, 0xf1, 0xec, 0x51, 0x2f, 0x0f, 0x4a, 0x89, 0x89, 0x15, 0x2d, 0xa2, 0xc3, 0x6a, + 0x14, 0x3f, 0x02, 0x16, 0x24, 0xe1, 0x41, 0xda, 0x55, 0xa6, 0x9d, 0xb6, 0x6e, 0x6e, 0x4f, 0x7b, + 0x6f, 0xcd, 0x0c, 0xa4, 0x89, 0x51, 0xb1, 0xd6, 0xde, 0x9b, 0x7e, 0xf7, 0xbb, 0x29, 0x93, 0x0f, + 0x9a, 0x62, 0xb9, 0x33, 0x55, 0x8d, 0xc0, 0x0d, 0xf5, 0x4b, 0x59, 0x89, 0xdf, 0x9a, 0x52, 0x6f, + 0xbd, 0x5c, 0x6a, 0x3c, 0x90, 0x4e, 0x83, 0xa3, 0x60, 0x5d, 0x58, 0x9d, 0x11, 0xa3, 0x89, 0x9a, + 0xd6, 0x75, 0xe2, 0x13, 0xbd, 0xb4, 0xf0, 0x18, 0x16, 0x48, 0x96, 0x22, 0x0c, 0x0c, 0xb7, 0x8e, + 0x41, 0x22, 0x3d, 0x44, 0x26, 0xe7, 0x2f, 0x6a, 0xcd, 0x42, 0xf3, 0x51, 0x57, 0x25, 0x50, 0x90, + 0x19, 0xc6, 0xc9, 0xf0, 0x00, 0xdc, 0x59, 0x32, 0xde, 0xf0, 0xde, 0xf9, 0xe8, 0xb8, 0x35, 0xb6, + 0x9a, 0x62, 0x9d, 0xe7, 0xc7, 0xc1, 0x48, 0xb8, 0x49, 0xb1, 0xec, 0x83, 0xb2, 0xa4, 0x9e, 0x4d, + 0x78, 0xe3, 0x13, 0x4e, 0xb3, 0x07, 0xfb, 0x37, 0xe9, 0x49, 0xe6, 0x59, 0xeb, 0xa4, 0x24, 0xbf, + 0x98, 0xeb, 0x00, 0x84, 0xa9, 0x66, 0xfd, 0x1e, 0xda, 0x22, 0x76, 0x7a, 0x22, 0x62, 0x4c, 0xf1, + 0xa3, 0xc8, 0x35, 0x4f, 0x3a, 0xb6, 0x62, 0x67, 0x9b, 0x27, 0x83, 0x4c, 0x93, 0x30, 0x19, 0x8c, + 0x66, 0xd6, 0xad, 0x0f, 0x7b, 0x8c, 0x46, 0xcd, 0x3c, 0xeb, 0x22, 0x3f, 0x8c, 0x0f, 0x13, 0xb0, + 0x57, 0xf9, 0xb4, 0x9e, 0xa6, 0x46, 0x71, 0x8a, 0x9d, 0x30, 0xec, 0xcd, 0x84, 0x2f, 0xdd, 0xb4, + 0x76, 0x9b, 0xe2, 0xd8, 0x72, 0xe9, 0xc7, 0x8b, 0x06, 0x4d, 0x51, 0x61, 0xc9, 0x75, 0xea, 0x25, + 0xcb, 0x94, 0x38, 0x21, 0xa5, 0xb5, 0x39, 0x5b, 0xd4, 0x87, 0x78, 0xde, 0x23, 0xd7, 0x25, 0x3e, + 0xce, 0x61, 0x49, 0xf7, 0x9f, 0x1a, 0x4a, 0x8e, 0x99, 0xc8, 0xce, 0x12, 0x17, 0x59, 0x2d, 0x5c, + 0x02, 0x15, 0x4a, 0xcb, 0xdd, 0x2f, 0xcc, 0x3e, 0xc7, 0x4f, 0x33, 0x76, 0x53, 0x63, 0x14, 0xf6, + 0x3d, 0xb7, 0xf8, 0xba, 0xa1, 0xf8, 0xc8, 0x93, 0x8a, 0x3b, 0x7b, 0x8e, 0x4b, 0x73, 0xfa, 0x51, + 0xee, 0xc1, 0x76, 0xdf, 0x51, 0x29, 0x0b, 0x7a, 0xde, 0xd3, 0x50, 0xde, 0xcc, 0xd5, 0x73, 0xc2, + 0x3d, 0x7e, 0x87, 0xd1, 0x2c, 0x80, 0x30, 0x57, 0xed, 0xbe, 0x82, 0x4b, 0xe4, 0xfc, 0x96, 0xfc, + 0x4a, 0x7f, 0x5f, 0x9f, 0xc7, 0x38, 0x27, 0x4d, 0x57, 0x2b, 0x67, 0xb5, 0x33, 0xa5, 0x55, 0xec, + 0x93, 0x6c, 0xd6, 0x53, 0x6e, 0xf8, 0x86, 0x43, 0x90, 0xa8, 0x5c, 0xe3, 0x6e, 0xe7, 0x9d, 0x21, + 0xcd, 0xea, 0x9d, 0x06, 0xf1, 0x0d, 0x34, 0x39, 0x2d, 0x77, 0x5b, 0xbe, 0x44, 0x19, 0x42, 0xbd, + 0x62, 0x6a, 0x1e, 0x89, 0x59, 0x42, 0x38, 0xd2, 0xb2, 0xc2, 0xef, 0x02, 0x3b, 0xf3, 0x25, 0xfa, + 0xd1, 0x2c, 0x52, 0xbb, 0x73, 0xe2, 0x86, 0x3a, 0xe3, 0x34, 0x86, 0x32, 0x70, 0x09, 0x78, 0xd6, + 0xba, 0xe9, 0x02, 0x61, 0x52, 0x9e, 0x02, 0x3b, 0x58, 0x2a, 0xf6, 0xf5, 0x31, 0xd8, 0x1c, 0xf5, + 0x71, 0xd0, 0x12, 0x0f, 0x34, 0x6b, 0x64, 0xfa, 0xfb, 0x8b, 0x89, 0xac, 0xf4, 0xb4, 0x72, 0xe6, + 0x27, 0x61, 0xd2, 0x03, 0xaf, 0xed, 0x04, 0x0b, 0xf7, 0x27, 0x1c, 0x07, 0xe3, 0x00, 0x3e, 0x1e, + 0x0d, 0x37, 0x3e, 0xda, 0x65, 0x59, 0x5e, 0x5a, 0x07, 0xba, 0xc5, 0x25, 0xd1, 0x21, 0xbc, 0x1b, + 0x9d, 0xae, 0xb7, 0xe1, 0x31, 0x2d, 0x9f, 0x53, 0x71, 0xf4, 0x4d, 0x8e, 0x99, 0xcf, 0x06, 0xf7, + 0x62, 0x9c, 0x80, 0xff, 0xac, 0xf6, 0xc1, 0xe1, 0x25, 0xe1, 0x8c, 0xc7, 0x3f, 0x72, 0xad, 0x40, + 0x04, 0xb6, 0x89, 0xa3, 0x24, 0x88, 0x24, 0xb4, 0x31, 0xf0, 0xfa, 0x15, 0xd9, 0x9b, 0x90, 0x1f, + 0x27, 0x22, 0x55, 0x98, 0x43, 0xf0, 0x60, 0x73, 0x41, 0xd9, 0x51, 0x38, 0x2e, 0x23, 0xc3, 0x95, + 0xbc, 0xfe, 0xb7, 0x86, 0x77, 0xd6, 0x5f, 0xe7, 0xd1, 0xc6, 0x22, 0xf1, 0x16, 0xcc, 0x75, 0x23, + 0x6b, 0x8a, 0x43, 0x84, 0xfe, 0x56, 0xfe, 0xae, 0xc2, 0x1d, 0xbe, 0xf2, 0x57, 0x6f, 0x73, 0x43, + 0x02, 0x35, 0xe7, 0xb7, 0x1f, 0xa7, 0x18, 0x65, 0xde, 0xa2, 0xbf, 0x9e, 0xaa, 0x79, 0xac, 0xe2, + 0xec, 0xd9, 0x9c, 0xeb, 0x98, 0x8d, 0xbe, 0x90, 0xe1, 0xce, 0x8d, 0x54, 0x4a, 0x7c, 0x01, 0x08, + 0x77, 0x32, 0x80, 0x90, 0x20, 0x4d, 0x05, 0x41, 0x65, 0xab, 0x1f, 0x13, 0xbe, 0x1c, 0x4c, 0x52, + 0xdc, 0xa2, 0x3f, 0x59, 0xc5, 0x58, 0x54, 0xda, 0xc2, 0xb3, 0xbe, 0xed, 0x77, 0xcd, 0x15, 0xce, + 0x4b, 0x69, 0x59, 0x54, 0x9c, 0x47, 0xe0, 0x24, 0x00, 0xd0, 0x54, 0xac, 0x74, 0xa4, 0xb9, 0x50, + 0x78, 0x88, 0x71, 0xb3, 0x5a, 0x3d, 0xcf, 0x77, 0x4d, 0xa9, 0xcd, 0x99, 0xca, 0x6e, 0xda, 0x08, + 0x8c, 0x4f, 0x63, 0xfe, 0xde, 0x61, 0x28, 0x78, 0xcb, 0x8a, 0xc0, 0xc5, 0x2c, 0xdc, 0x46, 0x71, + 0x00, 0xd6, 0x48, 0xc1, 0xe7, 0xf5, 0x52, 0x53, 0xda, 0x9a, 0x15, 0x11, 0xbc, 0x11, 0x8b, 0xa4, + 0x5e, 0x8a, 0x6c, 0x30, 0xf4, 0x47, 0x85, 0x1d, 0x0c, 0x03, 0xa9, 0x8b, 0x37, 0xa9, 0x21, 0xa1, + 0xe6, 0x3f, 0xe5, 0x37, 0x67, 0x39, 0xb4, 0xe3, 0x96, 0xe8, 0x7e, 0x9a, 0x50, 0x94, 0x1d, 0x29, + 0x45, 0x43, 0x0c, 0x08, 0x46, 0x2c, 0xd0, 0x94, 0xe3, 0x5f, 0x5f, 0x5a, 0xf7, 0x96, 0x26, 0xd8, + 0xba, 0x5c, 0x6d, 0x6d, 0xa1, 0xf5, 0x84, 0xd6, 0xcd, 0x9a, 0xa8, 0x26, 0x81, 0x51, 0x82, 0x2a, + 0x29, 0xc5, 0xca, 0xf9, 0xc9, 0xbf, 0xaa, 0x87, 0xaf, 0xfb, 0x5f, 0x52, 0xb4, 0x81, 0xc7, 0x72, + 0x4a, 0x1a, 0x94, 0x49, 0xf9, 0x54, 0xb0, 0x1c, 0x66, 0xcc, 0x54, 0x30, 0x50, 0x4d, 0x8a, 0x54, + 0xbe, 0x77, 0xf7, 0x65, 0xe8, 0xa0, 0x59, 0xea, 0xb3, 0x37, 0x4f, 0xb5, 0xc9, 0x49, 0x8d, 0x58, + 0xa4, 0x64, 0x36, 0xd3, 0xd3, 0x68, 0x1c, 0xa9, 0x6b, 0x14, 0xf7, 0xa2, 0x00, 0x16, 0x73, 0x81, + 0x08, 0x77, 0xcc, 0xa3, 0xd3, 0xd9, 0x7c, 0x07, 0x31, 0x3a, 0x8f, 0x0c, 0xad, 0x8a, 0x16, 0xfc, + 0x6c, 0xd6, 0x00, 0xb3, 0xee, 0x1e, 0xde, 0x31, 0x86, 0x31, 0xf9, 0x0d, 0x58, 0xbb, 0x78, 0x1e, + 0xc7, 0xde, 0x83, 0x21, 0x6b, 0xdf, 0xca, 0xef, 0xb9, 0xd8, 0xe8, 0xe8, 0x32, 0x4a, 0x0b, 0xbd, + 0x46, 0x23, 0x74, 0x6b, 0x9e, 0x9c, 0x85, 0x81, 0x96, 0x01, 0x78, 0xd4, 0xa4, 0x19, 0x7e, 0x27, + 0xbb, 0x20, 0xcd, 0x3e, 0xb9, 0x45, 0xa6, 0x6b, 0x4c, 0x0c, 0xb8, 0xc2, 0x20, 0x86, 0x6a, 0xd4, + 0x48, 0x80, 0xfc, 0x90, 0xfe, 0xe2, 0x05, 0x26, 0xbe, 0x12, 0xcd, 0x8c, 0xaa, 0xc4, 0xc6, 0xeb, + 0x28, 0x81, 0x4b, 0x8c, 0x18, 0x74, 0xe9, 0xd7, 0x61, 0x9b, 0x3c, 0x85, 0x24, 0x25, 0xf3, 0x6a, + 0xcf, 0x00, 0x82, 0x7a, 0xd3, 0x8e, 0x5b, 0x61, 0x74, 0xdc, 0x1c, 0x7f, 0x97, 0x10, 0x2c, 0xe1, + 0x18, 0xcf, 0x96, 0x83, 0xdc, 0xc4, 0x45, 0xb3, 0x9b, 0xcc, 0x09, 0xb4, 0x9a, 0x03, 0x98, 0xa7, + 0xfa, 0xc3, 0xb9, 0x80, 0x1c, 0xba, 0x5e, 0x75, 0x98, 0x9b, 0xaf, 0x69, 0xfb, 0x26, 0x3b, 0xdc, + 0xde, 0x95, 0xea, 0xf4, 0xfe, 0x44, 0x3c, 0x99, 0x7c, 0x6b, 0xc4, 0x47, 0xe6, 0x0e, 0x0e, 0xea, + 0x51, 0xb6, 0x3d, 0x52, 0x47, 0x66, 0xf6, 0x1b, 0x1b, 0x7f, 0xd3, 0xab, 0x32, 0x88, 0x5f, 0xe5, + 0xfa, 0xe3, 0x4a, 0xa3, 0x49, 0xf6, 0xf7, 0x6f, 0x97, 0xad, 0xb0, 0x76, 0xb6, 0xa7, 0x90, 0x52, + 0x48, 0x50, 0x2f, 0xc3, 0x9e, 0x6c, 0x9d, 0x36, 0x15, 0x6d, 0x3a, 0x58, 0x89, 0x7d, 0xac, 0xe3, + 0xd9, 0x7c, 0x0c, 0x5f, 0x61, 0xfb, 0x70, 0xb2, 0xf2, 0xc1, 0xdd, 0x77, 0xbd, 0x47, 0xa7, 0xe1, + 0x43, 0xe0, 0x3d, 0xc6, 0x8f, 0x7c, 0x37, 0x27, 0x25, 0x1e, 0xde, 0x49, 0x1d, 0xa5, 0x78, 0x52, + 0xeb, 0x38, 0x38, 0x8f, 0x5b, 0xfc, 0xaf, 0x9b, 0x9d, 0xad, 0x93, 0x17, 0x57, 0xa1, 0x59, 0x2c, + 0xff, 0x02, 0xbd, 0x08, 0x92, 0x3b, 0x4e, 0x8e, 0x01, 0x91, 0x89, 0x33, 0x97, 0xaa, 0x3e, 0xe5, + 0xe3, 0x36, 0x30, 0x57, 0xe7, 0x00, 0x41, 0x76, 0x28, 0x05, 0xde, 0x7b, 0x3d, 0xec, 0xef, 0x5b, + 0x2c, 0xb3, 0xa3, 0xa9, 0xa7, 0xe8, 0x9e, 0x2a, 0x98, 0x84, 0xc4, 0x5c, 0x6c, 0x29, 0x7d, 0xf3, + 0x6a, 0x73, 0x6e, 0xd6, 0x61, 0x91, 0xb0, 0x34, 0xcb, 0xdc, 0x55, 0xd7, 0x6c, 0xa8, 0x3c, 0xd1, + 0xae, 0xfe, 0x2c, 0x5e, 0x2c, 0x00, 0xf9, 0x1a, 0x33, 0xe0, 0xac, 0x2f, 0x1c, 0xa9, 0xa8, 0x61, + 0x45, 0xc6, 0xf7, 0xae, 0x42, 0x7f, 0x9d, 0xda, 0x25, 0xc7, 0x71, 0xe8, 0xf1, 0x3e, 0xd2, 0x7c, + 0xf6, 0x5d, 0x6b, 0x62, 0xc1, 0x64, 0x1a, 0xc8, 0xb8, 0xbb, 0x3e, 0x0d, 0xba, 0xfd, 0x39, 0x84, + 0x43, 0x23, 0x41, 0xb5, 0x00, 0xe6, 0x50, 0x81, 0x8f, 0x32, 0xcb, 0x3b, 0x1a, 0x9d, 0x91, 0x56, + 0xe5, 0x47, 0x08, 0xa6, 0x9b, 0x8d, 0x43, 0xff, 0x2c, 0xc9, 0xe5, 0x24, 0x56, 0x8b, 0x7b, 0x6a, + 0x6c, 0x5e, 0xf0, 0xbe, 0xc7, 0x69, 0x2b, 0x3f, 0xa0, 0x1f, 0x25, 0x6b, 0x11, 0x0d, 0x5d, 0x65, + 0x8b, 0x39, 0x08, 0x42, 0x75, 0xb7, 0x22, 0xfb, 0xf9, 0x87, 0xb4, 0x34, 0x55, 0x4c, 0x50, 0x51, + 0x8b, 0xb6, 0xe0, 0x34, 0xa9, 0x35, 0x73, 0xd7, 0x40, 0x55, 0xbe, 0x34, 0x4f, 0xdb, 0xe8, 0xbb, + 0xc1, 0x3a, 0x3e, 0x69, 0xf2, 0xb7, 0x1e, 0x8a, 0x6c, 0x4e, 0xb2, 0x97, 0xdf, 0x1c, 0x73, 0x82, + 0x37, 0x15, 0xf3, 0xe2, 0x1a, 0xb6, 0x9d, 0xe4, 0xbd, 0x7e, 0xa1, 0xf5, 0x28, 0x59, 0xde, 0x7e, + 0x1c, 0xe9, 0x56, 0x19, 0xbc, 0x70, 0x1c, 0xd1, 0xd7, 0xeb, 0xce, 0x89, 0x95, 0x08, 0xbe, 0xc6, + 0xf4, 0xa9, 0x26, 0x26, 0xf3, 0xf2, 0x0b, 0x87, 0x57, 0x94, 0x19, 0x86, 0xab, 0x33, 0xc5, 0x61, + 0x1e, 0x7f, 0xed, 0xb3, 0xeb, 0xcb, 0x99, 0x78, 0x2b, 0x66, 0xf0, 0x52, 0xf4, 0x0b, 0x21, 0xc1, + 0x2f, 0x02, 0x75, 0x1e, 0x75, 0xa3, 0xd7, 0x1c, 0xcc, 0xea, 0xa4, 0x68, 0x9f, 0x9c, 0xb8, 0x93, + 0x52, 0x6c, 0x3b, 0x07, 0x54, 0xb9, 0xc5, 0x40, 0x31, 0xe0, 0xd1, 0x1a, 0x41, 0x72, 0x3e, 0x71, + 0x32, 0x57, 0x2b, 0x7c, 0x45, 0x2b, 0xef, 0x5e, 0x53, 0x61, 0x11, 0xfc, 0x9e, 0xac, 0x85, 0x4b, + 0xe7, 0x12, 0x6d, 0xc6, 0x71, 0x1b, 0xb6, 0x5b, 0x9b, 0x42, 0xbb, 0x37, 0xb0, 0x3c, 0x5e, 0x5f, + 0x2d, 0x6f, 0x96, 0xe8, 0x12, 0xce, 0x1b, 0xb2, 0x17, 0x51, 0x6e, 0xc9, 0xec, 0x3b, 0xec, 0x96, + 0x43, 0xdc, 0x8c, 0xa0, 0xcc, 0x42, 0xe8, 0xc6, 0x9f, 0x7d, 0x3e, 0xef, 0xc1, 0x97, 0x1c, 0x10, + 0x7e, 0x30, 0x7a, 0xe1, 0xfc, 0xee, 0x42, 0x2e, 0x18, 0x80, 0xc7, 0xd1, 0xf6, 0x1d, 0x56, 0xe7, + 0x71, 0x41, 0x9f, 0x7f, 0x47, 0xb4, 0xbb, 0xd0, 0x25, 0x15, 0xcc, 0x07, 0x4c, 0x79, 0x81, 0x9e, + 0x54, 0x5b, 0x8e, 0x97, 0xc7, 0x8d, 0xa3, 0x32, 0xd3, 0xbc, 0x5f, 0xc5, 0x02, 0x6f, 0xc3, 0xf0, + 0xdc, 0x69, 0xed, 0x79, 0xcc, 0xf3, 0xbf, 0xb5, 0x7e, 0xb1, 0x33, 0xc6, 0x14, 0x93, 0xa6, 0xb3, + 0x5a, 0x43, 0x6c, 0x24, 0xcd, 0x14, 0xdc, 0x91, 0xef, 0x29, 0xa3, 0x04, 0x19, 0xe7, 0xd7, 0x24, + 0x4e, 0x2b, 0x4b, 0xee, 0xc4, 0xea, 0x57, 0x32, 0x6a, 0xb5, 0xe7, 0x4d, 0xb8, 0x89, 0xd7, 0xd4, + 0x22, 0xb7, 0x40, 0x97, 0x71, 0x9c, 0x9a, 0x27, 0x4e, 0x5d, 0xb2, 0xd6, 0x4a, 0x67, 0xc6, 0x7f, + 0x73, 0xd4, 0x94, 0xe1, 0x6f, 0x9b, 0x0f, 0x0f, 0xe7, 0x57, 0xfc, 0x15, 0xcb, 0x3c, 0xfd, 0x88, + 0xa6, 0x73, 0x33, 0x28, 0x5d, 0x6d, 0xfa, 0x3e, 0x46, 0xda, 0x7b, 0x9c, 0xb7, 0x52, 0x7d, 0x37, + 0x25, 0xbe, 0x08, 0x8c, 0xfe, 0xef, 0xfd, 0xac, 0x53, 0xd2, 0xa7, 0x8b, 0x1b, 0xa7, 0xe7, 0x6b, + 0x1d, 0x2c, 0x02, 0xda, 0xbf, 0xca, 0x63, 0x24, 0x6d, 0xbc, 0xc4, 0x96, 0xf6, 0xd0, 0x6b, 0x41, + 0x4d, 0xd2, 0x9c, 0x7c, 0xbb, 0x4f, 0x36, 0x2f, 0x31, 0x2b, 0xf7, 0x5c, 0xf0, 0x3e, 0xaf, 0x45, + 0xfe, 0x68, 0xc3, 0xe7, 0x9b, 0xe9, 0xc0, 0xe3, 0xd7, 0x2b, 0xcd, 0x6a, 0x48, 0x1d, 0x7e, 0x67, + 0x70, 0x63, 0x1c, 0x1b, 0xb2, 0x3f, 0x47, 0xb7, 0x20, 0xd6, 0xda, 0x68, 0x64, 0xe8, 0x7c, 0x67, + 0xbe, 0x0a, 0xb9, 0x68, 0x9e, 0xf2, 0x25, 0x1d, 0x98, 0x95, 0x08, 0x0c, 0xab, 0x88, 0xce, 0xff, + 0x44, 0xcb, 0x28, 0xf3, 0x73, 0x07, 0x81, 0x0f, 0x18, 0xf8, 0xb8, 0xdb, 0x29, 0xec, 0xfa, 0x7b, + 0xff, 0x3a, 0x8b, 0xa9, 0x4e, 0xa3, 0x15, 0x59, 0x2b, 0xcf, 0x61, 0xe2, 0x44, 0x0f, 0x0d, 0x57, + 0xac, 0xf2, 0xfa, 0xcf, 0xb1, 0xa0, 0x08, 0x6b, 0x94, 0x84, 0x68, 0x66, 0xf9, 0x99, 0x80, 0x7d, + 0x22, 0xa0, 0x44, 0x4c, 0x61, 0x99, 0x47, 0x8e, 0xff, 0x23, 0xab, 0x8f, 0x4b, 0x22, 0x1a, 0x69, + 0x62, 0xd6, 0xa7, 0x0d, 0xa2, 0xea, 0xf6, 0x25, 0x21, 0x3e, 0x51, 0xd4, 0x46, 0x50, 0xe1, 0x31, + 0x87, 0xa3, 0xf3, 0x48, 0x18, 0xd7, 0xb2, 0x82, 0x3c, 0x3d, 0xc4, 0x33, 0x09, 0xd5, 0x5b, 0xea, + 0x89, 0x7c, 0xb4, 0x83, 0x02, 0x6c, 0x67, 0xae, 0x94, 0xcf, 0x19, 0x14, 0x86, 0xf5, 0x15, 0xae, + 0x42, 0x50, 0xf5, 0x85, 0xd2, 0xcc, 0x14, 0x15, 0xda, 0x39, 0x92, 0xad, 0x12, 0xcb, 0xf2, 0x42, + 0xc7, 0x4e, 0x5b, 0x9c, 0x49, 0x67, 0x15, 0x76, 0xe3, 0xa2, 0xfc, 0x83, 0x0b, 0x8d, 0x7e, 0x4d, + 0x9c, 0xa9, 0xb4, 0x92, 0xc4, 0xe5, 0x19, 0xa5, 0xd9, 0x5b, 0x99, 0xa2, 0x9b, 0x10, 0x1f, 0x2d, + 0xe9, 0x11, 0x3d, 0xe2, 0x12, 0xaf, 0xde, 0x51, 0x12, 0x17, 0x76, 0xf5, 0xf4, 0xf7, 0x88, 0x9e, + 0x79, 0xa8, 0xdc, 0x4c, 0xc0, 0xc3, 0xab, 0x74, 0x35, 0x4e, 0xba, 0x37, 0x2a, 0x0a, 0xf9, 0x84, + 0x3f, 0x96, 0x3e, 0x56, 0x4e, 0x78, 0x20, 0x60, 0x3e, 0x5a, 0x9c, 0x74, 0x06, 0x8a, 0x37, 0x8a, + 0x39, 0x2d, 0x1f, 0xfc, 0x29, 0x5a, 0x73, 0x66, 0xe7, 0xa5, 0x8d, 0xe3, 0x2a, 0x24, 0x37, 0x99, + 0x87, 0x1f, 0xf3, 0xb5, 0xc5, 0x42, 0xea, 0x28, 0xc2, 0x0b, 0x6b, 0x78, 0x9d, 0xbe, 0x8c, 0xf8, + 0x30, 0x46, 0x46, 0x0a, 0x7c, 0xc8, 0x10, 0x54, 0xad, 0x23, 0xb4, 0xbd, 0x75, 0x1e, 0x79, 0x61, + 0xbf, 0x29, 0x30, 0xe1, 0x6e, 0xd4, 0x76, 0xa7, 0x0e, 0xc2, 0x37, 0xe9, 0xab, 0xd4, 0x08, 0xe8, + 0xf1, 0x21, 0x34, 0x03, 0x48, 0x72, 0xc4, 0x5a, 0x09, 0xaf, 0xaf, 0x10, 0x55, 0x32, 0x0f, 0xe2, + 0x8a, 0x74, 0x24, 0x00, 0x63, 0x37, 0x46, 0xaf, 0xe1, 0x5f, 0x86, 0x64, 0x1c, 0x5c, 0xed, 0x44, + 0x19, 0x0e, 0xa3, 0x0e, 0x88, 0x8d, 0xcb, 0xd9, 0x84, 0x53, 0xb1, 0x9e, 0x48, 0x3a, 0x82, 0x26, + 0x62, 0x9b, 0xa0, 0xe0, 0x0a, 0x55, 0x6a, 0x46, 0x6f, 0xb6, 0x46, 0xd1, 0x5e, 0xc8, 0xf7, 0x82, + 0x69, 0xc1, 0x3f, 0x80, 0xd6, 0x93, 0x8a, 0x92, 0xc1, 0xf1, 0xce, 0x17, 0x44, 0x35, 0xa6, 0x42, + 0xf7, 0xaa, 0xec, 0xa7, 0x1b, 0x8c, 0xf4, 0xe7, 0xe1, 0xeb, 0xf2, 0x71, 0xad, 0xe9, 0xed, 0x7c, + 0x1a, 0x9b, 0x98, 0xef, 0xfe, 0x47, 0xcb, 0x86, 0xd5, 0x71, 0x95, 0x48, 0x3e, 0xb6, 0x81, 0x05, + 0xd1, 0xa3, 0xb5, 0xec, 0xb9, 0x39, 0x32, 0xbe, 0xdf, 0x28, 0xcb, 0xa5, 0xe2, 0x30, 0x6e, 0xc6, + 0x3e, 0xea, 0xca, 0xff, 0x92, 0xad, 0x4a, 0x4b, 0x73, 0xb5, 0xd4, 0x1d, 0x7a, 0x45, 0x80, 0x93, + 0x02, 0xca, 0xa3, 0xdb, 0x27, 0xf1, 0x17, 0x05, 0x63, 0xd3, 0x71, 0x70, 0x82, 0x86, 0x30, 0xe9, + 0xb1, 0x7a, 0xf4, 0xa3, 0x0a, 0x1e, 0x2f, 0xf7, 0xdf, 0x8e, 0x95, 0x69, 0x11, 0x11, 0x74, 0xff, + 0xb1, 0xc4, 0x58, 0xe3, 0x7d, 0x38, 0xc7, 0xfb, 0x23, 0xb5, 0xd2, 0x5d, 0x80, 0x2f, 0xb5, 0x46, + 0x9d, 0xb5, 0x1a, 0x96, 0xcc, 0x0c, 0x0e, 0x31, 0x89, 0x2d, 0x07, 0xbc, 0x6f, 0x60, 0x04, 0xbb, + 0x0f, 0xae, 0xcb, 0xf6, 0xc0, 0x10, 0x07, 0x83, 0x85, 0x30, 0x4a, 0x5f, 0xe8, 0x4f, 0x69, 0x90, + 0x8d, 0x52, 0xa8, 0x0b, 0xd5, 0xaa, 0x44, 0x4d, 0x94, 0x96, 0x48, 0x15, 0xef, 0x2a, 0x3c, 0xea, + 0x81, 0x95, 0x75, 0xac, 0xf5, 0xd8, 0x25, 0xfc, 0x45, 0x5e, 0xc2, 0xf9, 0xa5, 0x87, 0x74, 0x35, + 0xb0, 0x16, 0x63, 0xec, 0xc8, 0x37, 0x54, 0xef, 0x8c, 0x04, 0xbd, 0xbe, 0xc6, 0xda, 0x31, 0x0c, + 0xc7, 0xa5, 0x72, 0xd3, 0x50, 0x56, 0xbc, 0x26, 0x8e, 0xa1, 0x2e, 0x52, 0x89, 0x41, 0xd0, 0x35, + 0x7b, 0x95, 0xd6, 0xae, 0xdd, 0x80, 0xd2, 0x4d, 0x21, 0xcb, 0xb5, 0xbe, 0x02, 0x3b, 0x6c, 0xfc, + 0xc3, 0x13, 0xe5, 0xc4, 0x53, 0x57, 0x44, 0xdd, 0x60, 0x66, 0xb9, 0x09, 0x97, 0x92, 0xd1, 0x16, + 0x5b, 0xfe, 0x64, 0x31, 0x8d, 0x5b, 0x56, 0xdd, 0x25, 0x0f, 0xca, 0x15, 0x05, 0x6a, 0x69, 0xdd, + 0x5f, 0x81, 0x56, 0x7f, 0xe7, 0xe7, 0x84, 0xdd, 0x92, 0xc3, 0xbb, 0x27, 0x52, 0x85, 0xfd, 0xa1, + 0x85, 0xe7, 0x63, 0xbb, 0x56, 0x78, 0xc5, 0xe9, 0x76, 0xf7, 0x4c, 0xa9, 0x30, 0x08, 0x55, 0x8d, + 0x6a, 0xd1, 0xb0, 0xbf, 0x7e, 0x46, 0xf8, 0x5f, 0x23, 0xe1, 0x94, 0xd5, 0x1d, 0xdf, 0x7e, 0xfd, + 0x0b, 0x9b, 0x18, 0x2f, 0x88, 0x6e, 0xd4, 0x89, 0xca, 0xfd, 0x2a, 0x09, 0xb3, 0x7c, 0x11, 0x4b, + 0x17, 0xc6, 0x06, 0xf7, 0x90, 0xb3, 0xee, 0xb4, 0xa3, 0x53, 0x11, 0x34, 0x40, 0xa2, 0xcb, 0xbc, + 0xd9, 0xbf, 0x9d, 0xed, 0x03, 0xd1, 0x14, 0xee, 0x07, 0x70, 0x43, 0xe8, 0xc7, 0xc3, 0x20, 0x56, + 0x84, 0x34, 0x06, 0x0a, 0xed, 0x88, 0xed, 0xc8, 0xdd, 0xd9, 0x00, 0xbf, 0xc0, 0xe6, 0x88, 0xae, + 0x3d, 0x81, 0xe3, 0x2f, 0x6f, 0x53, 0x03, 0x7c, 0xa2, 0x37, 0x1a, 0x3d, 0xa9, 0x82, 0x88, 0xf9, + 0x4d, 0xd0, 0x82, 0x3c, 0x7d, 0xba, 0x38, 0xe2, 0x64, 0x4b, 0x42, 0xb1, 0xf3, 0x7f, 0x34, 0x43, + 0x4e, 0xb4, 0x25, 0x5e, 0x5b, 0x11, 0x69, 0x18, 0x31, 0xb5, 0xb9, 0xb6, 0x16, 0xed, 0x4d, 0x33, + 0xfa, 0xdc, 0x77, 0xde, 0xbb, 0xdb, 0x63, 0x16, 0x4b, 0x1d, 0x8e, 0xff, 0x46, 0x9c, 0x78, 0x04, + 0x39, 0x7e, 0xa2, 0x8c, 0x24, 0xea, 0xf5, 0x49, 0x35, 0x63, 0x36, 0x92, 0x22, 0xdf, 0xd2, 0xac, + 0xb7, 0xe2, 0xf0, 0x22, 0xb8, 0xfa, 0xcb, 0x02, 0x3b, 0x77, 0x95, 0x1a, 0xdd, 0x42, 0x44, 0x60, + 0x5d, 0xad, 0x7e, 0xa8, 0x62, 0xf1, 0xf9, 0x1a, 0x93, 0x9c, 0x2d, 0xe0, 0x36, 0x46, 0x5c, 0xcb, + 0x1b, 0x9c, 0xc9, 0xe8, 0x4a, 0x47, 0x68, 0x3b, 0xe8, 0x55, 0x91, 0x3b, 0xe7, 0x3b, 0x0c, 0xdc, + 0x2c, 0xbd, 0xf3, 0xb5, 0x20, 0x93, 0x43, 0x6f, 0x4f, 0x9d, 0xe0, 0xed, 0xeb, 0x60, 0xc3, 0xe4, + 0x18, 0x0f, 0x20, 0x6c, 0x53, 0xfe, 0x38, 0x2c, 0x9b, 0x9f, 0x98, 0x4a, 0xad, 0x78, 0xd8, 0x09, + 0xcb, 0x60, 0x05, 0xec, 0xaf, 0x72, 0xfa, 0xc2, 0x27, 0x14, 0x8b, 0x2e, 0xf2, 0x31, 0xd8, 0xee, + 0x6e, 0x30, 0xbf, 0xcf, 0xd9, 0x69, 0x99, 0xab, 0x82, 0x20, 0x3a, 0xd1, 0xfc, 0xbd, 0x11, 0x1f, + 0x1e, 0xc9, 0x68, 0x05, 0x68, 0x3e, 0xcb, 0x38, 0xe8, 0x7a, 0x51, 0xb7, 0xfe, 0xe8, 0x10, 0xb4, + 0x04, 0xce, 0xa0, 0xb4, 0xb7, 0xd0, 0xba, 0xe0, 0x7a, 0x48, 0x94, 0xef, 0x3b, 0xcb, 0x45, 0xf4, + 0x0e, 0x47, 0xff, 0xa0, 0x81, 0x92, 0x38, 0x37, 0x52, 0x2d, 0xea, 0xca, 0x87, 0xf5, 0x1e, 0x5b, + 0x50, 0x8e, 0x0d, 0x89, 0x1c, 0x13, 0xa6, 0x25, 0x24, 0x77, 0xa0, 0x6d, 0x15, 0xa7, 0xdc, 0xb1, + 0x48, 0xac, 0x27, 0xc4, 0x37, 0xe4, 0x93, 0x6e, 0x3e, 0x58, 0x21, 0x7c, 0xb5, 0x93, 0xd0, 0x60, + 0xc1, 0x4b, 0x58, 0x5a, 0x4d, 0x19, 0xde, 0xcc, 0xe1, 0x3e, 0x12, 0x7b, 0x7a, 0x5d, 0x62, 0x8d, + 0xe1, 0xc8, 0xfc, 0xbc, 0x1e, 0x1c, 0xea, 0xf5, 0xe9, 0xb7, 0xb6, 0xbf, 0x7d, 0xfb, 0xc4, 0x3b, + 0xf6, 0x0d, 0x5c, 0x75, 0x0b, 0x9b, 0xe7, 0x11, 0xa9, 0xa8, 0x73, 0x02, 0x6c, 0x52, 0x1e, 0x11, + 0xd5, 0x92, 0x8e, 0x61, 0x1a, 0x01, 0xf5, 0xc7, 0x6f, 0xc7, 0x88, 0x14, 0x59, 0xc6, 0xa2, 0x2d, + 0xb6, 0x6e, 0x03, 0xaf, 0xd3, 0xc5, 0x5a, 0x18, 0xd0, 0x82, 0x92, 0x6f, 0x77, 0xe2, 0x5c, 0x86, + 0x50, 0x02, 0xd5, 0x7a, 0xee, 0x0e, 0xf2, 0xa1, 0x8f, 0x9d, 0x42, 0x12, 0x62, 0x41, 0x1f, 0x17, + 0x8f, 0x11, 0xd8, 0x90, 0x29, 0x99, 0x48, 0x77, 0x8e, 0x43, 0x17, 0x2a, 0xd2, 0x67, 0x7b, 0xea, + 0x1c, 0xe4, 0xd6, 0x2d, 0x5d, 0xdd, 0x22, 0xe4, 0x5d, 0xca, 0x0a, 0x40, 0x93, 0x23, 0x7d, 0x39, + 0x85, 0x91, 0xaa, 0xd5, 0xe3, 0x42, 0xcd, 0x76, 0x5c, 0xb5, 0x45, 0x6e, 0xef, 0x0a, 0x84, 0xa6, + 0x2a, 0xa9, 0xeb, 0xa2, 0x7c, 0x16, 0xde, 0x3b, 0xfa, 0x0c, 0x92, 0x63, 0xb4, 0x0b, 0xbb, 0xdc, + 0xad, 0x79, 0xcd, 0x30, 0x6a, 0xba, 0x3c, 0x7d, 0x1f, 0x44, 0x22, 0x55, 0x8d, 0x22, 0x2b, 0x9d, + 0xbb, 0x4e, 0x7e, 0xa9, 0xde, 0x69, 0x61, 0xe8, 0x78, 0x78, 0x3e, 0xbc, 0x84, 0xc6, 0xe6, 0x0a, + 0x1f, 0x2c, 0x5d, 0xba, 0x4d, 0x96, 0xa0, 0xb5, 0xbb, 0x76, 0x0a, 0x54, 0xef, 0x7c, 0x7b, 0xa5, + 0xc9, 0x26, 0xd7, 0xc6, 0x9d, 0xb6, 0x93, 0x61, 0xc5, 0x85, 0x73, 0xb5, 0x57, 0x8d, 0xa2, 0xb2, + 0x90, 0xd3, 0xfb, 0x50, 0x8b, 0x37, 0x15, 0x58, 0xb2, 0x64, 0x9c, 0xf5, 0xc8, 0x96, 0xdd, 0x7c, + 0xca, 0x27, 0xa2, 0x9b, 0xae, 0xe8, 0x1c, 0x11, 0x0c, 0xd7, 0x88, 0x0c, 0x92, 0x31, 0xfa, 0x15, + 0xdc, 0x28, 0x33, 0xac, 0x73, 0x72, 0x47, 0xf1, 0xd7, 0x20, 0xde, 0x33, 0xe3, 0x5a, 0x73, 0x4d, + 0x5f, 0x77, 0xc5, 0x99, 0xb7, 0x2c, 0x44, 0x63, 0x6a, 0xa3, 0x10, 0x89, 0xf1, 0xb9, 0xd9, 0x31, + 0x6e, 0x3d, 0x30, 0xdc, 0x35, 0xfa, 0x21, 0xa9, 0xea, 0xe4, 0x3e, 0xfb, 0x6b, 0xb3, 0xd3, 0x5e, + 0xd0, 0xf6, 0x7c, 0xa7, 0x93, 0xfb, 0x87, 0x5b, 0x16, 0xd8, 0xf7, 0x1f, 0x76, 0xd9, 0x20, 0xda, + 0x38, 0x8f, 0x09, 0x02, 0x13, 0xf6, 0xe5, 0xf5, 0xbc, 0xf9, 0x40, 0x42, 0xf9, 0xda, 0xbf, 0x8a, + 0xf7, 0x79, 0xd8, 0x6d, 0x91, 0x32, 0x05, 0x94, 0x3a, 0x68, 0xcc, 0xeb, 0xf0, 0xa9, 0xa9, 0x05, + 0x1c, 0xc4, 0x30, 0x06, 0xfe, 0x0f, 0x3b, 0x5f, 0xfb, 0x5e, 0xc3, 0xcc, 0x14, 0x6e, 0xcb, 0x15, + 0x79, 0xc7, 0xb6, 0xf5, 0xe2, 0x34, 0x9e, 0xd7, 0x71, 0x54, 0x25, 0x1c, 0x99, 0x23, 0x2e, 0x3b, + 0x7b, 0xd7, 0x0c, 0x55, 0x75, 0x17, 0xe8, 0x15, 0x1a, 0xb7, 0x2a, 0x83, 0x42, 0xbb, 0xa3, 0x71, + 0xfd, 0xf0, 0xe5, 0xaf, 0x99, 0x46, 0xa9, 0xec, 0xb6, 0x43, 0x59, 0x5c, 0xb4, 0xaa, 0x94, 0xb9, + 0xf5, 0x73, 0xb8, 0x0e, 0xcf, 0x9a, 0x68, 0xda, 0x94, 0xc0, 0xa6, 0x74, 0x4f, 0xed, 0x09, 0x85, + 0xe6, 0x46, 0x8b, 0x86, 0x75, 0x21, 0xda, 0x85, 0x4b, 0xf1, 0x19, 0xb4, 0x25, 0x4d, 0xbd, 0xf3, + 0xbc, 0xa0, 0x58, 0xbe, 0x3b, 0x6c, 0xb1, 0x7c, 0xba, 0x5e, 0x10, 0x33, 0xed, 0xf4, 0x90, 0x6f, + 0x0e, 0x6e, 0x29, 0x67, 0x61, 0x7f, 0x6c, 0xb9, 0xcf, 0x84, 0xc0, 0x75, 0x63, 0x7f, 0x14, 0xf9, + 0xbe, 0xdb, 0xb1, 0xc2, 0xd3, 0x8e, 0x9e, 0xdf, 0x61, 0x4c, 0x22, 0x6f, 0x65, 0x9a, 0xd3, 0xe3, + 0x3b, 0x8e, 0xba, 0xc5, 0x53, 0x3e, 0xa7, 0x23, 0x64, 0x0e, 0x99, 0x26, 0x4f, 0x10, 0x83, 0x1f, + 0x7d, 0x87, 0x43, 0x34, 0xb7, 0xf4, 0x1f, 0x12, 0xd8, 0xc5, 0xd9, 0x68, 0xbf, 0x8a, 0x91, 0x4f, + 0xc5, 0xc5, 0x2f, 0x5b, 0x21, 0x9c, 0x20, 0x3a, 0x9e, 0x7f, 0xdf, 0xdf, 0xb7, 0xb8, 0x23, 0x5d, + 0x58, 0x92, 0xda, 0x5c, 0x87, 0x90, 0x08, 0x88, 0xae, 0x63, 0xbb, 0x00, 0x65, 0x78, 0x81, 0x8f, + 0x44, 0x2f, 0xe9, 0xa0, 0x87, 0x2b, 0xcd, 0x8b, 0x36, 0xf7, 0xf0, 0xc7, 0x7e, 0x1d, 0x32, 0x2c, + 0x2e, 0x90, 0x1a, 0xd9, 0xa1, 0x12, 0x19, 0x11, 0x57, 0x76, 0x44, 0x57, 0xc6, 0x8c, 0xe6, 0xc8, + 0x55, 0xc7, 0x07, 0xd3, 0xfd, 0xf7, 0xdf, 0x48, 0xdf, 0x79, 0xf5, 0x35, 0xdc, 0xfa, 0xdc, 0x89, + 0x1e, 0xeb, 0x8d, 0x1d, 0xf2, 0x1b, 0x2d, 0xd5, 0x9d, 0x61, 0x0b, 0x78, 0x30, 0xd3, 0x14, 0xcf, + 0x22, 0xcd, 0x70, 0xbf, 0x0f, 0xc8, 0x21, 0x33, 0xb2, 0x22, 0x44, 0x69, 0xa8, 0x48, 0x86, 0xda, + 0xe5, 0xa1, 0xb7, 0x2a, 0x73, 0x9e, 0x74, 0x26, 0x00, 0x3b, 0xbc, 0x89, 0xb3, 0xac, 0x60, 0xab, + 0xb0, 0x0f, 0x2d, 0xb2, 0x74, 0x97, 0x16, 0x24, 0xb2, 0x16, 0x16, 0xc1, 0xc1, 0xa5, 0xbc, 0x0c, + 0x91, 0xdc, 0xb5, 0x6f, 0xd4, 0xe5, 0x07, 0x5d, 0xc3, 0xba, 0x03, 0x3f, 0x01, 0x81, 0x86, 0x25, + 0xe3, 0xf6, 0xe2, 0xf0, 0xa3, 0xba, 0xef, 0xb4, 0xc7, 0x59, 0x81, 0xe8, 0x0b, 0xd0, 0xfa, 0x7d, + 0x1b, 0xd6, 0x20, 0xc3, 0x64, 0xa5, 0xb2, 0xd5, 0x23, 0x10, 0x4d, 0xf1, 0xd7, 0xec, 0x67, 0x50, + 0x9e, 0x7c, 0xdb, 0x63, 0x67, 0x8a, 0x5d, 0x59, 0xf0, 0xfb, 0x1a, 0xc3, 0x99, 0xe7, 0x1f, 0x46, + 0x8b, 0xb5, 0x53, 0x52, 0xac, 0x35, 0x28, 0x83, 0x76, 0x86, 0x1a, 0xae, 0x70, 0x25, 0x9e, 0xf3, + 0x89, 0xf8, 0xd2, 0x45, 0x86, 0x0e, 0xb0, 0xad, 0xe8, 0x5f, 0x39, 0xb1, 0xde, 0xfa, 0x38, 0xe3, + 0xdd, 0x50, 0xfc, 0x29, 0x61, 0x8f, 0x94, 0x17, 0x15, 0x15, 0x45, 0x5a, 0xc3, 0xb9, 0xbc, 0xac, + 0xaa, 0xa3, 0x80, 0xe6, 0x8f, 0x07, 0xf1, 0x30, 0xb1, 0x5a, 0x11, 0x98, 0x96, 0x9f, 0x7a, 0xf6, + 0x90, 0x58, 0x70, 0x2f, 0xe7, 0x2f, 0x81, 0x2f, 0x9d, 0x30, 0xfd, 0xb0, 0x0a, 0xa0, 0x72, 0xb3, + 0x73, 0xd1, 0x30, 0xea, 0xf5, 0x4c, 0x13, 0x4d, 0x40, 0xb9, 0xd9, 0xf1, 0x8f, 0xdd, 0x78, 0x86, + 0x11, 0x18, 0x12, 0x59, 0x90, 0x6f, 0x40, 0x87, 0xa9, 0xc7, 0x8d, 0xd4, 0x55, 0xc4, 0x20, 0x5b, + 0x7e, 0xc7, 0x95, 0x6b, 0xdf, 0xfa, 0x2b, 0x2e, 0x5a, 0xbe, 0xc9, 0x3c, 0x9f, 0x00, 0x40, 0xab, + 0x36, 0xb1, 0x7b, 0x3d, 0x76, 0x84, 0x57, 0xa5, 0x7c, 0x58, 0x92, 0x0c, 0xff, 0xfc, 0x30, 0x60, + 0xbd, 0x1e, 0xc5, 0x10, 0x63, 0xa5, 0x68, 0xf8, 0x01, 0x7d, 0x79, 0x1f, 0x87, 0x09, 0x24, 0x1c, + 0xdc, 0xd1, 0x9e, 0xda, 0xbc, 0xcf, 0x76, 0xc9, 0xc2, 0x1f, 0x56, 0xfd, 0x7e, 0x28, 0xc4, 0x4c, + 0x8e, 0xa3, 0x50, 0x1a, 0xbf, 0x0e, 0x54, 0x0b, 0x76, 0x89, 0x50, 0xab, 0xde, 0xc2, 0x77, 0xb8, + 0xd3, 0xde, 0xee, 0x22, 0xef, 0x9e, 0xb4, 0x9c, 0xf0, 0x33, 0x11, 0x41, 0x42, 0x53, 0xdf, 0xb2, + 0xe7, 0x55, 0xb4, 0x3d, 0xee, 0x16, 0x28, 0xe8, 0x1f, 0x61, 0x4d, 0x3d, 0x50, 0x51, 0x56, 0xea, + 0xc9, 0x96, 0x74, 0x66, 0x6f, 0xee, 0xf3, 0x3f, 0xd0, 0x40, 0xca, 0x29, 0x39, 0x7d, 0xd0, 0x74, + 0x79, 0x6d, 0xed, 0x7f, 0x50, 0x7e, 0xb6, 0x8f, 0xd6, 0xc6, 0x6b, 0x05, 0x4e, 0x1d, 0x68, 0x3f, + 0x7d, 0x8d, 0xef, 0x43, 0x8c, 0x6c, 0x90, 0xc2, 0x1d, 0xf8, 0xa3, 0xf5, 0x67, 0x58, 0xe6, 0xeb, + 0x6d, 0x63, 0x0e, 0x03, 0x67, 0x68, 0x03, 0x8b, 0x64, 0x9d, 0x9f, 0x6b, 0x61, 0x91, 0x97, 0x45, + 0x01, 0x7e, 0x55, 0x85, 0x5b, 0x88, 0xbd, 0xbf, 0x1f, 0xd3, 0x04, 0x81, 0xb2, 0x26, 0x8e, 0xa1, + 0x2c, 0x04, 0x4a, 0xdd, 0x06, 0x6b, 0xc3, 0x2a, 0x60, 0xd5, 0x20, 0x00, 0xd3, 0x60, 0x15, 0x75, + 0x8f, 0xdf, 0xeb, 0xa6, 0xb9, 0x77, 0xd5, 0x0a, 0x25, 0xf2, 0xc8, 0x14, 0xfe, 0xac, 0x7b, 0x5e, + 0x40, 0x4b, 0x93, 0x2b, 0x97, 0x58, 0xc2, 0x4e, 0xec, 0x3b, 0xff, 0xde, 0x92, 0x9d, 0xbc, 0xa6, + 0xcd, 0xb0, 0xca, 0x46, 0xcf, 0x0e, 0xd5, 0x93, 0xfc, 0x18, 0x75, 0x91, 0x23, 0x32, 0xce, 0x63, + 0x50, 0x06, 0x44, 0x0a, 0x4d, 0x1e, 0x87, 0xf1, 0xc0, 0x1d, 0xfd, 0x69, 0x6f, 0x6d, 0x76, 0xf2, + 0xa8, 0x6a, 0x25, 0x0c, 0x69, 0xd2, 0xd0, 0x8a, 0xb4, 0x07, 0xe8, 0x25, 0x59, 0xe5, 0xae, 0xa8, + 0x1f, 0x0e, 0xc2, 0x46, 0xf2, 0xa2, 0x8f, 0xb6, 0xe6, 0xb7, 0xa0, 0x8d, 0xc7, 0xb6, 0x31, 0xa4, + 0x21, 0x8b, 0x6a, 0xf3, 0x42, 0x34, 0x16, 0xaf, 0x9d, 0x87, 0xc4, 0x83, 0xb9, 0x70, 0x5e, 0x47, + 0xf9, 0x91, 0x9f, 0x71, 0x64, 0x1d, 0xc8, 0x09, 0xf2, 0x5b, 0x19, 0x90, 0x7d, 0xb5, 0x14, 0x3e, + 0xe5, 0xa4, 0xd4, 0xaf, 0xfe, 0xc7, 0x9e, 0xdb, 0x46, 0xfc, 0x98, 0x69, 0xc3, 0x85, 0x3b, 0x2a, + 0xbb, 0xe1, 0x7b, 0x18, 0x5a, 0x34, 0xa6, 0x3a, 0x4a, 0xe8, 0xd2, 0x5b, 0x6f, 0x90, 0x54, 0x39, + 0xe7, 0x04, 0x0d, 0x1f, 0x63, 0xfe, 0x16, 0x17, 0xbb, 0x84, 0x92, 0x8d, 0x45, 0xa5, 0x44, 0x7e, + 0xa7, 0x20, 0xde, 0x5e, 0x33, 0x9b, 0x00, 0xe3, 0x07, 0xd9, 0x4c, 0xf0, 0x8e, 0xbe, 0x41, 0xe5, + 0x1c, 0xd5, 0x59, 0xdb, 0xe4, 0x60, 0x24, 0xb4, 0x77, 0x9d, 0x8e, 0xdc, 0x00, 0x8c, 0xf1, 0x70, + 0x6b, 0xa2, 0x88, 0x9f, 0x32, 0x39, 0xa5, 0xc6, 0x9f, 0x6b, 0xd0, 0xb9, 0x3e, 0x2f, 0x5b, 0x7b, + 0x0b, 0xf5, 0x19, 0x5a, 0x00, 0x5c, 0x07, 0x28, 0x48, 0x27, 0x8b, 0x7a, 0x02, 0x68, 0x38, 0x0d, + 0x30, 0xc1, 0x91, 0x3d, 0xfc, 0xa0, 0xe7, 0x6c, 0x0a, 0xa2, 0x1f, 0x5a, 0x2c, 0x02, 0x15, 0x64, + 0x6a, 0x84, 0x8d, 0x0c, 0xe0, 0x17, 0x4d, 0xdd, 0xe8, 0xbf, 0x02, 0x66, 0x5a, 0xf5, 0xe1, 0xa6, + 0x8e, 0x90, 0xe0, 0x6f, 0xc3, 0x94, 0x27, 0x32, 0xf0, 0x10, 0x1c, 0x42, 0xb9, 0x77, 0x56, 0x1f, + 0x7b, 0x96, 0x74, 0xc5, 0x27, 0xd2, 0x14, 0x15, 0xb6, 0xb1, 0xf5, 0x51, 0xd4, 0x3a, 0xcc, 0x1e, + 0x24, 0x06, 0x1c, 0x1d, 0x16, 0x03, 0x50, 0x31, 0xec, 0xa6, 0x38, 0x8b, 0x02, 0x7d, 0xf2, 0x7a, + 0x1f, 0x95, 0xde, 0x74, 0xf2, 0xa8, 0x52, 0xae, 0xca, 0x3a, 0x3c, 0xd8, 0x25, 0x27, 0x42, 0x11, + 0xad, 0x6e, 0x97, 0xb8, 0x99, 0x7e, 0xcd, 0x8c, 0xec, 0x2d, 0xe8, 0x1c, 0x93, 0x93, 0x9e, 0xe9, + 0xc6, 0x59, 0x06, 0x9b, 0xbe, 0xbd, 0xa5, 0x75, 0x43, 0x15, 0x90, 0x2b, 0x09, 0x75, 0xbe, 0x0e, + 0xd8, 0xff, 0x43, 0xe0, 0xb6, 0xe1, 0x71, 0xcb, 0xf2, 0x90, 0x14, 0x75, 0x66, 0x6b, 0x87, 0x77, + 0x5d, 0x94, 0x11, 0xe7, 0x36, 0xb7, 0x6e, 0x79, 0x29, 0xaf, 0x8a, 0xfa, 0x2c, 0x0a, 0x68, 0x73, + 0x8f, 0x86, 0x5b, 0x81, 0x51, 0x1f, 0x42, 0x25, 0xe7, 0x6a, 0x64, 0x6c, 0x19, 0x4f, 0x63, 0x7a, + 0xc7, 0x65, 0x54, 0xd2, 0x71, 0xf3, 0xcd, 0x31, 0xe7, 0x29, 0xe6, 0xef, 0x6d, 0x05, 0xc8, 0xb0, + 0xf4, 0xbe, 0xf5, 0x75, 0xa0, 0x07, 0x87, 0xad, 0xf1, 0x86, 0x52, 0x28, 0x57, 0xd8, 0xe3, 0x65, + 0x24, 0x32, 0x03, 0xa8, 0xed, 0x20, 0x05, 0x7d, 0x3a, 0x1b, 0x0d, 0xf5, 0xc3, 0x7f, 0xbc, 0x78, + 0x41, 0xb0, 0xcf, 0x49, 0x3c, 0xaf, 0x96, 0x6c, 0xb7, 0x42, 0xa1, 0x1c, 0x31, 0x21, 0xfa, 0x0d, + 0x46, 0x73, 0x18, 0xb9, 0x42, 0x67, 0xa7, 0x57, 0x5a, 0xcf, 0x11, 0x9c, 0xed, 0x81, 0x27, 0x66, + 0x61, 0x26, 0xa7, 0xe9, 0xbc, 0x22, 0x21, 0x47, 0x9e, 0x9e, 0x0b, 0x47, 0x85, 0xe5, 0x1d, 0x61, + 0xd4, 0x43, 0x75, 0x5c, 0x82, 0x72, 0x67, 0x29, 0x06, 0xee, 0xa5, 0xb1, 0x7e, 0x8a, 0x7e, 0x57, + 0x7f, 0x17, 0x36, 0x45, 0xc6, 0x49, 0x34, 0x81, 0x05, 0x01, 0x9e, 0x1c, 0x92, 0x44, 0xc2, 0xb1, + 0xec, 0x26, 0x35, 0x0b, 0x30, 0x4a, 0x14, 0x1f, 0x1e, 0x0d, 0xda, 0x31, 0xac, 0x0f, 0xf1, 0x42, + 0xd3, 0x1b, 0x0e, 0xb1, 0x21, 0x86, 0x3a, 0x06, 0x4b, 0xfa, 0x94, 0xd2, 0xdc, 0xf4, 0x55, 0xfe, + 0xed, 0x3b, 0xea, 0x2b, 0x47, 0xaf, 0x8b, 0x35, 0x62, 0xd6, 0x68, 0x75, 0x64, 0xe8, 0xd4, 0xaa, + 0x6f, 0x3c, 0xda, 0x98, 0xd5, 0xaa, 0xcf, 0xbf, 0xf3, 0x54, 0x6a, 0x79, 0xe0, 0xed, 0x29, 0x08, + 0x3f, 0x60, 0x6e, 0x2b, 0x8f, 0x13, 0x03, 0xdb, 0x88, 0xda, 0x9e, 0xea, 0xe4, 0xc6, 0x11, 0x75, + 0xcf, 0x7f, 0x6f, 0x98, 0x09, 0x29, 0x72, 0xc5, 0xf9, 0x14, 0xe1, 0x6c, 0x91, 0xb4, 0x1a, 0x08, + 0x2e, 0x01, 0xae, 0x25, 0x77, 0xda, 0x73, 0x4c, 0x91, 0xc9, 0xb0, 0x00, 0x48, 0x9b, 0x36, 0xdf, + 0xfa, 0x3e, 0xb7, 0xef, 0x51, 0x77, 0xe5, 0x1a, 0xbd, 0x5d, 0x16, 0xec, 0xf4, 0x3c, 0x36, 0xd2, + 0xa9, 0xe3, 0xbd, 0x9c, 0xf4, 0x71, 0xa2, 0xac, 0x75, 0x3c, 0xa8, 0x55, 0xce, 0x26, 0x52, 0xf8, + 0xae, 0x47, 0x87, 0x16, 0x1f, 0xf3, 0x4e, 0x37, 0xe7, 0xd7, 0x92, 0x54, 0xdc, 0x3a, 0x69, 0xf7, + 0x0e, 0x3b, 0xd9, 0xbd, 0x6f, 0x86, 0xd2, 0x9e, 0x7d, 0x8b, 0x4c, 0x1b, 0xad, 0xc0, 0xc3, 0xcc, + 0x62, 0x22, 0xd6, 0xc3, 0xf1, 0x9d, 0x9f, 0xfb, 0xc4, 0x85, 0xbc, 0x2e, 0x9a, 0x3e, 0x31, 0xe2, + 0x00, 0x86, 0x5e, 0x8a, 0xd1, 0xe4, 0xd1, 0x34, 0xce, 0xbc, 0xf3, 0x67, 0x3d, 0x70, 0xae, 0x35, + 0x5b, 0x9e, 0x89, 0x16, 0xc5, 0x13, 0x52, 0xe8, 0x12, 0xe5, 0x9b, 0xf7, 0x68, 0xc9, 0x35, 0xb8, + 0x4f, 0x8c, 0x57, 0x21, 0xfe, 0x1b, 0xc9, 0x90, 0xb8, 0xb8, 0x7e, 0xae, 0x49, 0xe1, 0x39, 0xf0, + 0x2d, 0xff, 0xae, 0x6f, 0xc3, 0xe6, 0x3d, 0x76, 0xa6, 0x6b, 0x04, 0xeb, 0xfc, 0xb9, 0xea, 0xa5, + 0x47, 0x19, 0xf3, 0xaf, 0xd5, 0xd4, 0x4b, 0x44, 0x20, 0xb3, 0x56, 0x49, 0xff, 0x98, 0x47, 0x3b, + 0x9b, 0x77, 0xb8, 0x62, 0xf1, 0xad, 0x38, 0xdb, 0x1f, 0xe4, 0x40, 0x79, 0x21, 0xfd, 0xdb, 0x99, + 0xbf, 0x95, 0xf8, 0x69, 0x06, 0xf9, 0x5d, 0x56, 0xa8, 0xaf, 0xdf, 0x13, 0x13, 0xf1, 0xe8, 0x52, + 0xab, 0x19, 0x49, 0x14, 0x3e, 0x7f, 0xa6, 0xa9, 0xde, 0xc7, 0xdd, 0x13, 0xa2, 0xfd, 0x5d, 0x09, + 0x95, 0x94, 0x1f, 0xc0, 0x12, 0x1a, 0xb8, 0x8c, 0x5f, 0x34, 0x90, 0xee, 0x83, 0x66, 0x33, 0xa6, + 0xc0, 0xc8, 0x57, 0x7f, 0x53, 0x54, 0x59, 0x3c, 0xec, 0x5d, 0x58, 0x9e, 0xce, 0x88, 0x7a, 0x49, + 0x80, 0x65, 0x5a, 0x7f, 0x7c, 0x31, 0x06, 0x81, 0x66, 0x30, 0xe6, 0x40, 0xfe, 0x15, 0xeb, 0x1e, + 0xc1, 0xe6, 0x3f, 0x82, 0x62, 0xe2, 0xb4, 0x3b, 0xc8, 0x46, 0x7f, 0x13, 0x08, 0xe4, 0xec, 0x55, + 0x3e, 0x7b, 0x04, 0x67, 0x38, 0xa7, 0xf9, 0x69, 0x3c, 0xba, 0x45, 0xef, 0x80, 0x96, 0xa5, 0x70, + 0x31, 0x23, 0xbd, 0x36, 0x24, 0xeb, 0xe1, 0x04, 0x4b, 0x99, 0xc3, 0x5d, 0xe4, 0x62, 0x51, 0xa3, + 0x91, 0xd0, 0xf6, 0x0f, 0x58, 0x9e, 0xd4, 0xf1, 0x9a, 0x93, 0x16, 0x92, 0xdd, 0xdc, 0x52, 0x22, + 0x6d, 0xf8, 0x41, 0x56, 0x4d, 0xd7, 0x5a, 0x55, 0xfc, 0x31, 0x2c, 0x53, 0xff, 0xbe, 0x07, 0x2d, + 0xf8, 0xac, 0x65, 0xed, 0x1e, 0xbf, 0x23, 0x1d, 0x53, 0x28, 0xc9, 0x98, 0x3f, 0x4b, 0xfc, 0x99, + 0xb3, 0x88, 0x35, 0xbb, 0xc2, 0xe4, 0x8a, 0x6d, 0xbc, 0x79, 0x8d, 0xc4, 0x5d, 0x59, 0xc6, 0x29, + 0x98, 0xcd, 0x54, 0x46, 0x6d, 0x1f, 0xfe, 0x2e, 0xe7, 0xab, 0x9e, 0x17, 0xac, 0x63, 0x65, 0x50, + 0x53, 0xb8, 0x6a, 0x4a, 0x4c, 0x7f, 0xef, 0x27, 0xe0, 0x30, 0xa6, 0xc6, 0xd5, 0xc1, 0x19, 0x44, + 0xf8, 0x63, 0xa9, 0x0c, 0x10, 0xdb, 0x9e, 0xca, 0x2a, 0x16, 0xa9, 0x42, 0x86, 0x83, 0x0a, 0xcf, + 0x78, 0x3f, 0xc1, 0x16, 0x8e, 0xf2, 0xc8, 0xa4, 0x9f, 0x69, 0x15, 0x38, 0xf2, 0x92, 0xca, 0x80, + 0x84, 0x96, 0xa7, 0x8b, 0x09, 0x95, 0x1f, 0xee, 0xbc, 0x3a, 0xb2, 0x9b, 0x96, 0xb6, 0x79, 0x5c, + 0x79, 0xfa, 0x08, 0xf2, 0x91, 0x38, 0x75, 0x27, 0xff, 0xae, 0x5a, 0xf3, 0xa5, 0x4b, 0xf1, 0xdb, + 0xbf, 0xe7, 0x19, 0xea, 0xf9, 0x83, 0x5b, 0x4a, 0x22, 0xa4, 0xdd, 0xee, 0x35, 0xaa, 0xa2, 0x4f, + 0xed, 0x31, 0xf7, 0xed, 0xa7, 0x96, 0x0e, 0x21, 0x66, 0x0b, 0x05, 0x5d, 0x2a, 0x38, 0x53, 0x22, + 0x32, 0x9b, 0x09, 0xef, 0xf3, 0xe1, 0x79, 0x14, 0x7e, 0x58, 0x13, 0x6a, 0xe5, 0x63, 0x75, 0x9e, + 0x8d, 0xb1, 0x20, 0x9d, 0x3e, 0x42, 0xef, 0x99, 0x6c, 0x83, 0xc2, 0x78, 0x2b, 0x99, 0x8d, 0x81, + 0xc5, 0x60, 0x30, 0x73, 0x65, 0xd0, 0x24, 0x8f, 0xdd, 0xbb, 0xd5, 0xfc, 0xfb, 0xce, 0x8f, 0xd7, + 0xe7, 0x3a, 0x23, 0xb4, 0x7a, 0x0b, 0xae, 0xe9, 0x0e, 0x9a, 0x8f, 0xbd, 0xf0, 0xc0, 0xc6, 0x3a, + 0xeb, 0xad, 0x13, 0x79, 0xcd, 0xb1, 0x5a, 0x31, 0xd6, 0x9d, 0xf5, 0x7e, 0xcf, 0x6d, 0xf1, 0x9d, + 0x81, 0xa5, 0x67, 0x8e, 0x9f, 0x60, 0x46, 0xca, 0xf7, 0xa2, 0xc3, 0xa6, 0x1a, 0x4d, 0xfa, 0xa0, + 0xd9, 0x3a, 0x03, 0x2b, 0x8b, 0xfc, 0x6e, 0x20, 0x3a, 0x37, 0x71, 0x49, 0x21, 0x4d, 0x37, 0x3b, + 0xf3, 0xfc, 0xf3, 0xcf, 0x0f, 0x74, 0xcb, 0x39, 0x3a, 0x46, 0x05, 0xc6, 0x56, 0x90, 0x27, 0xd8, + 0x89, 0xf7, 0xe2, 0x8b, 0x7b, 0x24, 0x38, 0x98, 0xdf, 0x29, 0x9d, 0x4d, 0xc1, 0x70, 0x49, 0x0c, + 0xe5, 0x0d, 0x47, 0xb4, 0xfb, 0x79, 0xa5, 0x8e, 0x4a, 0xf5, 0x8c, 0x4a, 0x77, 0x1d, 0x62, 0x85, + 0xbe, 0x3a, 0x63, 0xef, 0x9a, 0xae, 0x5d, 0x85, 0x3b, 0x47, 0xb2, 0x2d, 0xa5, 0xd8, 0x01, 0x21, + 0xdc, 0x9a, 0x0b, 0x27, 0xf5, 0xab, 0xc7, 0x28, 0x84, 0xc0, 0xbb, 0x1b, 0xea, 0x1f, 0x16, 0xef, + 0x7c, 0xff, 0xaa, 0xa1, 0x86, 0x28, 0x53, 0x73, 0xeb, 0xfb, 0x3a, 0x75, 0x17, 0xe8, 0xe7, 0x1d, + 0x52, 0x6d, 0xbc, 0x97, 0x1b, 0x30, 0x38, 0x35, 0x4a, 0x82, 0xde, 0x32, 0xf6, 0x26, 0x2c, 0xab, + 0x89, 0xea, 0xf9, 0x38, 0x41, 0xe9, 0x4f, 0x1f, 0x79, 0xcd, 0xf5, 0x26, 0xb1, 0x50, 0xae, 0xba, + 0x66, 0xfe, 0x8e, 0xb5, 0x09, 0x68, 0x27, 0xf2, 0xef, 0x97, 0x11, 0x5b, 0xa7, 0xb5, 0xd7, 0x99, + 0xbc, 0xc3, 0xa5, 0xa3, 0x38, 0xab, 0xf6, 0xfc, 0x57, 0xa5, 0x95, 0x59, 0xb6, 0x37, 0x5a, 0xd1, + 0x9d, 0xc3, 0x1b, 0x3c, 0x84, 0xc9, 0xc1, 0x49, 0x24, 0xc3, 0xc7, 0x7b, 0x9a, 0xf3, 0xa0, 0x7e, + 0x8c, 0x43, 0x25, 0x8a, 0xaa, 0x20, 0x2a, 0xba, 0xac, 0xd3, 0x4d, 0xcf, 0x73, 0xef, 0x2f, 0xe8, + 0x5d, 0x14, 0xdf, 0x28, 0x7f, 0x02, 0x74, 0x82, 0x7b, 0x6a, 0xf9, 0x68, 0x9a, 0xfc, 0x9f, 0x7c, + 0xd5, 0xd3, 0x86, 0x57, 0xc2, 0xf4, 0xbf, 0x64, 0x3f, 0x87, 0x10, 0xb4, 0x5e, 0x24, 0x56, 0x38, + 0x70, 0x46, 0xb3, 0x0e, 0x28, 0x58, 0x15, 0x64, 0xa0, 0x14, 0x82, 0x0b, 0xe5, 0xf8, 0x13, 0x27, + 0x54, 0x0b, 0x24, 0x28, 0x94, 0x50, 0x94, 0x16, 0xf2, 0x26, 0xd1, 0xf4, 0x2a, 0x28, 0x71, 0x6b, + 0x2e, 0xc1, 0x4f, 0x60, 0xbc, 0xc1, 0xce, 0xbf, 0x0c, 0x8b, 0x5a, 0x1b, 0x0b, 0xcd, 0x4a, 0xc3, + 0xf0, 0x71, 0x63, 0x05, 0x57, 0x7e, 0x45, 0xff, 0x06, 0xfe, 0xaf, 0xfd, 0xe3, 0x53, 0xe7, 0xf8, + 0x6f, 0x51, 0xc4, 0xfe, 0xed, 0xe7, 0x76, 0x28, 0x24, 0x1c, 0x0b, 0x7d, 0x10, 0xdd, 0x05, 0xbf, + 0xa5, 0x46, 0xbf, 0x80, 0x06, 0x44, 0xf7, 0x5a, 0x71, 0x91, 0xa1, 0xaa, 0x75, 0x4c, 0x1d, 0x41, + 0xba, 0xea, 0x2f, 0x89, 0xda, 0x1a, 0x4b, 0x76, 0x75, 0x77, 0xa0, 0x4c, 0x08, 0x3f, 0xd1, 0x82, + 0x9c, 0x16, 0x3d, 0xd4, 0xe9, 0xea, 0xd4, 0x88, 0xe7, 0x8f, 0x26, 0xc3, 0x70, 0xae, 0xa4, 0xa9, + 0x36, 0xf3, 0x86, 0xb6, 0x42, 0x87, 0xf9, 0xe0, 0x9d, 0x70, 0x95, 0x16, 0x98, 0xeb, 0xe5, 0x66, + 0xfc, 0x13, 0x59, 0x3f, 0x13, 0x27, 0x23, 0x2b, 0x74, 0xfa, 0xdd, 0xdb, 0x5d, 0xed, 0xb9, 0x41, + 0x16, 0x55, 0x4c, 0xa0, 0x1f, 0xff, 0x93, 0x96, 0xc4, 0xcd, 0x87, 0xac, 0x9c, 0xa9, 0x46, 0x1f, + 0x5e, 0x18, 0xe6, 0xcf, 0x23, 0x9b, 0x94, 0xb8, 0x08, 0x8b, 0xf3, 0x3a, 0x2e, 0x89, 0xb5, 0x27, + 0xac, 0x90, 0xe1, 0xd9, 0x4e, 0xe2, 0x9f, 0xc5, 0x50, 0x43, 0x95, 0xcc, 0xf6, 0x54, 0x4c, 0x72, + 0xa7, 0x78, 0x91, 0xac, 0x31, 0xe5, 0xd8, 0xba, 0x6f, 0x24, 0xda, 0x44, 0x27, 0xc9, 0x64, 0x1b, + 0xbd, 0x92, 0xd0, 0x4a, 0xe5, 0x48, 0xf3, 0x5f, 0xc3, 0x01, 0xae, 0xb2, 0x6e, 0x79, 0x4d, 0xdc, + 0x32, 0x5a, 0x31, 0x6d, 0xcf, 0x2c, 0xdd, 0xb8, 0x9b, 0x77, 0x57, 0x2c, 0x42, 0x61, 0xf4, 0xcd, + 0x10, 0xd6, 0x7c, 0x17, 0x81, 0x14, 0xc2, 0x55, 0x0b, 0xd7, 0x91, 0x62, 0x9b, 0x4c, 0x2c, 0x63, + 0xf1, 0x44, 0xbb, 0xf8, 0x95, 0x33, 0x41, 0xa5, 0xeb, 0xbc, 0x4b, 0xeb, 0xca, 0xd1, 0x1a, 0x78, + 0x09, 0x01, 0xc7, 0x23, 0x99, 0xe6, 0x62, 0x44, 0xf8, 0x2a, 0xa8, 0x65, 0x33, 0x46, 0xcf, 0xa7, + 0x42, 0x67, 0xb2, 0xec, 0xa0, 0xa4, 0xfa, 0xb9, 0xab, 0x96, 0xf0, 0x13, 0xbf, 0x8d, 0x67, 0xb2, + 0xaa, 0xbb, 0x9e, 0xfc, 0xa6, 0x0e, 0x44, 0x11, 0x18, 0xd8, 0x5f, 0x16, 0xf8, 0xf4, 0x13, 0xfc, + 0x0b, 0x39, 0xb5, 0xea, 0xe2, 0x5b, 0x83, 0xa8, 0xdf, 0x41, 0x2a, 0x17, 0x99, 0xa6, 0xa9, 0x14, + 0xe9, 0x0f, 0x60, 0x14, 0x3d, 0x88, 0xc1, 0xaa, 0x42, 0x60, 0x4c, 0x94, 0xa5, 0xbe, 0x5c, 0x4d, + 0xea, 0xa1, 0x56, 0xc3, 0x95, 0x8c, 0x06, 0x88, 0xe7, 0x7a, 0xc5, 0x43, 0x5a, 0xc5, 0x5b, 0x06, + 0xc3, 0xde, 0x36, 0x11, 0x21, 0x10, 0x18, 0xa1, 0x99, 0x0f, 0x45, 0x6b, 0x05, 0x53, 0xde, 0x57, + 0x5a, 0x49, 0xe8, 0xe7, 0x9b, 0xfd, 0xdb, 0xcf, 0xe4, 0x74, 0x8b, 0x56, 0xcf, 0x1f, 0x80, 0xcf, + 0x35, 0xf8, 0xf9, 0x9b, 0xc5, 0x88, 0x5f, 0x0e, 0x52, 0x5a, 0x58, 0x76, 0x77, 0x87, 0x66, 0x70, + 0x0c, 0xea, 0x52, 0xff, 0xea, 0x73, 0x3d, 0x9b, 0x84, 0x84, 0x61, 0x47, 0x74, 0xd9, 0x88, 0x2e, + 0xd6, 0xa4, 0x15, 0x32, 0xbd, 0xf5, 0x1f, 0x20, 0x66, 0xdd, 0xbb, 0xd8, 0x88, 0x45, 0xae, 0x6d, + 0x8b, 0x94, 0xac, 0x7d, 0xe8, 0x15, 0x11, 0xb7, 0x82, 0xa2, 0x63, 0x7e, 0xc5, 0xbc, 0xfa, 0x8d, + 0x85, 0x34, 0x92, 0x15, 0x7c, 0x64, 0x9f, 0x11, 0xa3, 0xa9, 0xf0, 0x85, 0xb5, 0x99, 0x92, 0xf2, + 0x25, 0xca, 0x9b, 0x33, 0x3e, 0x1b, 0x39, 0xf4, 0x94, 0x0d, 0x52, 0xff, 0xa1, 0xc1, 0xe6, 0x3e, + 0x3a, 0x63, 0x52, 0x86, 0x27, 0xc1, 0x5f, 0xa7, 0x84, 0xd0, 0x0d, 0xf7, 0xbe, 0xa0, 0x7f, 0x50, + 0xff, 0xde, 0x38, 0x5e, 0x78, 0xeb, 0x8e, 0x7d, 0x8e, 0x21, 0xdc, 0xad, 0x31, 0xff, 0x41, 0x98, + 0x6e, 0xdb, 0xc8, 0xcb, 0x18, 0xd7, 0x4d, 0xf5, 0xe7, 0xa7, 0xd2, 0x12, 0x9f, 0x01, 0x88, 0x8d, + 0x2b, 0x70, 0x0f, 0x64, 0x90, 0x7e, 0x8e, 0x93, 0x4c, 0xa7, 0x8d, 0x68, 0xfe, 0x91, 0x7c, 0x33, + 0x1f, 0x6c, 0xa1, 0x1b, 0xb5, 0x8b, 0x4e, 0xcb, 0x31, 0x8e, 0xef, 0x70, 0x63, 0xc6, 0x5d, 0x4c, + 0xae, 0x6d, 0x7f, 0x49, 0xc5, 0xa7, 0xf4, 0x9a, 0x77, 0xb1, 0xb8, 0x8a, 0x24, 0x74, 0x0c, 0x24, + 0x99, 0x2d, 0x5c, 0x51, 0xdb, 0x17, 0x4a, 0xa5, 0x2e, 0x21, 0x94, 0x54, 0x88, 0x9a, 0xa2, 0xb0, + 0xa3, 0x77, 0xbf, 0x75, 0xe2, 0x43, 0x32, 0x84, 0x55, 0x24, 0xae, 0x87, 0x97, 0xb1, 0x60, 0x67, + 0x27, 0x57, 0x09, 0xa6, 0x4b, 0xce, 0xd0, 0x40, 0x82, 0xb4, 0x70, 0xf6, 0x8b, 0x92, 0x50, 0x95, + 0x12, 0xa9, 0xad, 0x46, 0x32, 0x49, 0x2e, 0xac, 0x44, 0x11, 0x95, 0x07, 0xb1, 0xfa, 0xbc, 0x87, + 0xa7, 0xca, 0x5c, 0x8d, 0x52, 0x46, 0x1e, 0xed, 0x6e, 0xc7, 0x4a, 0x76, 0xf6, 0x15, 0xfe, 0x1c, + 0xff, 0x05, 0xf3, 0x13, 0x0c, 0xb2, 0x5b, 0xe1, 0x5b, 0xca, 0x93, 0x01, 0xa4, 0x4c, 0x6c, 0xa9, + 0x6b, 0x1d, 0x0d, 0xd2, 0x38, 0xcc, 0x55, 0xa0, 0xcb, 0x79, 0x79, 0x10, 0x19, 0xf4, 0x12, 0x73, + 0xcc, 0x15, 0x81, 0x74, 0x6e, 0x8b, 0xe9, 0x8b, 0xd8, 0x75, 0x3a, 0x08, 0x7d, 0xaf, 0x40, 0xc0, + 0xe6, 0x67, 0x18, 0xb4, 0xb1, 0x79, 0x8e, 0xd3, 0x4a, 0xd2, 0xdf, 0x8c, 0xb9, 0xca, 0x51, 0x16, + 0x5e, 0xe4, 0x6e, 0x29, 0xfb, 0x3e, 0xcc, 0xa4, 0x0a, 0x93, 0x48, 0x8a, 0xa3, 0xbe, 0x31, 0x31, + 0x8a, 0x92, 0x54, 0x72, 0xf7, 0x9e, 0x3b, 0xd0, 0xf0, 0xc0, 0xef, 0xf5, 0xed, 0x23, 0xf9, 0xd0, + 0xb7, 0x0b, 0xf1, 0x9b, 0x1c, 0x25, 0xa7, 0xcb, 0x7f, 0xfb, 0xd3, 0x9a, 0x2d, 0xc3, 0x28, 0x6f, + 0xbc, 0x6a, 0xf9, 0x50, 0x64, 0x6d, 0xc1, 0x33, 0xfe, 0x0b, 0x33, 0x0c, 0x3c, 0x6e, 0x52, 0xe7, + 0x6e, 0xaa, 0xb7, 0x84, 0xe1, 0x87, 0x20, 0xf5, 0x19, 0x97, 0x02, 0x88, 0x25, 0xba, 0xc3, 0x87, + 0x8b, 0x8a, 0x8b, 0xa2, 0x9b, 0xf8, 0x2d, 0x4e, 0x9f, 0xe5, 0x29, 0x3a, 0x2a, 0xd4, 0xf4, 0x9d, + 0xa6, 0xf3, 0x8e, 0x53, 0xe5, 0x84, 0x6c, 0x97, 0x57, 0xe7, 0xe8, 0xc1, 0x1a, 0x48, 0xdb, 0xa5, + 0xe8, 0x14, 0x99, 0x96, 0x96, 0x3e, 0xe1, 0x56, 0xac, 0xd5, 0x2c, 0x2d, 0x11, 0x05, 0xc8, 0xbd, + 0xfa, 0xb7, 0xdf, 0x79, 0x38, 0xa5, 0x14, 0x7f, 0x51, 0x47, 0x63, 0x87, 0x32, 0x46, 0x74, 0x89, + 0x26, 0xd4, 0x36, 0xe2, 0x36, 0x8f, 0xee, 0xc7, 0xd2, 0x7d, 0x44, 0xd7, 0x29, 0x2d, 0x7a, 0xf2, + 0xf7, 0x91, 0x0b, 0x41, 0x2c, 0x2f, 0x95, 0xd2, 0x65, 0xd3, 0x4f, 0xef, 0x04, 0x26, 0x49, 0xe2, + 0x3e, 0x04, 0x79, 0x37, 0x88, 0x6a, 0xbd, 0x50, 0x3c, 0x5f, 0x7d, 0xba, 0x7a, 0x79, 0xf3, 0x33, + 0x64, 0x62, 0x11, 0xa7, 0x13, 0x7b, 0x52, 0x06, 0xa9, 0xbc, 0xc3, 0xdd, 0x3c, 0xf6, 0x72, 0x60, + 0x75, 0xd1, 0x39, 0xa2, 0xe3, 0x31, 0x95, 0xb5, 0xfb, 0x8f, 0x84, 0xff, 0x4f, 0xdb, 0x3d, 0x12, + 0xd2, 0x26, 0x57, 0xec, 0xd8, 0xb9, 0x54, 0xf8, 0x08, 0x3a, 0xcf, 0xca, 0x8a, 0x01, 0x00, 0x1f, + 0xb8, 0x3e, 0x52, 0x64, 0xb4, 0x4d, 0x6b, 0x38, 0x9b, 0x30, 0x74, 0xbc, 0xd9, 0x5a, 0xdd, 0x95, + 0x86, 0xf8, 0x63, 0x3f, 0x89, 0x5d, 0xec, 0x0b, 0x81, 0x24, 0xcf, 0xa9, 0xd1, 0x69, 0xa4, 0x43, + 0x93, 0x50, 0x87, 0x0a, 0xda, 0xfc, 0x06, 0xcb, 0x55, 0xf9, 0x94, 0x43, 0x0b, 0x51, 0xe6, 0xea, + 0x97, 0x07, 0x82, 0x9a, 0x7d, 0x57, 0xb1, 0xa1, 0xe9, 0xbe, 0xfe, 0x5e, 0xdc, 0x34, 0x92, 0xc8, + 0x31, 0x55, 0x27, 0x41, 0x14, 0x6a, 0xf5, 0xa6, 0x6a, 0xae, 0x32, 0x24, 0xc2, 0x18, 0xb1, 0xc7, + 0x2c, 0x39, 0x0e, 0x14, 0x5d, 0x0e, 0x06, 0x2e, 0xd6, 0x40, 0x61, 0x95, 0x6b, 0x12, 0xc9, 0x1e, + 0x43, 0x44, 0x99, 0x77, 0x13, 0x79, 0xa2, 0x01, 0x63, 0x2f, 0x12, 0x0a, 0xe4, 0x5d, 0xeb, 0x9d, + 0xd7, 0x5a, 0x83, 0x4a, 0xa9, 0x7c, 0xb8, 0x9f, 0xbc, 0x1b, 0x90, 0x9c, 0xe4, 0xfc, 0x8e, 0x3c, + 0x92, 0xc5, 0x36, 0x78, 0x11, 0xcf, 0xa0, 0x53, 0xc5, 0x06, 0x53, 0xa5, 0xb3, 0x71, 0x63, 0x93, + 0x07, 0x96, 0x4a, 0x67, 0xcc, 0x7f, 0xce, 0x5b, 0xa5, 0xf2, 0x6c, 0x93, 0xe9, 0x0f, 0x13, 0xc1, + 0x1b, 0x5d, 0x6f, 0xdf, 0x94, 0x92, 0xa2, 0x90, 0x9d, 0xc6, 0x42, 0xfa, 0xfe, 0x6f, 0xdf, 0xc2, + 0xff, 0xf8, 0x8f, 0xff, 0xf8, 0x8f, 0xff, 0xf8, 0xff, 0xc2, 0xff, 0x00, 0x07, 0x8b, 0xf4, 0x1e, + 0x00, 0x23, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU116_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 8960, // uncompressed data size (bytes) + 7364, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU116_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU116("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/reload/g_booteruc_reload_tu11x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU116_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x05, 0x62, 0x56, 0x08, 0x13, 0x4c, 0xc8, 0x40, 0x69, + 0x20, 0x00, 0x00, 0x8e, 0xa1, 0x42, 0xb2, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU116_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU116_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU116("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/reload/g_booteruc_reload_tu11x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 8960 +// COMPRESSED SIZE (bytes): 7364 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU116_image_prod_data[] = +{ + 0xed, 0x99, 0x45, 0x54, 0x1c, 0x5a, 0x90, 0x86, 0x9b, 0xc6, 0xdd, 0xdd, 0x09, 0x0e, 0xc1, 0x09, + 0x0e, 0xc1, 0xdd, 0x82, 0x3b, 0xc1, 0xdd, 0x42, 0xd0, 0x40, 0xd0, 0x40, 0xe3, 0x0e, 0x8d, 0x35, + 0xee, 0xee, 0xd0, 0xb8, 0x04, 0xf7, 0xe0, 0x0e, 0x8d, 0x04, 0x27, 0x38, 0x41, 0xe6, 0xcd, 0xf6, + 0xed, 0x66, 0x3b, 0xe7, 0x7d, 0x9b, 0xbf, 0xee, 0xa6, 0xaa, 0xce, 0xa9, 0x73, 0x6b, 0xf1, 0x17, + 0x08, 0x10, 0x04, 0x78, 0x4d, 0x01, 0x04, 0x62, 0x03, 0x00, 0x0f, 0xc0, 0x07, 0xc0, 0x23, 0x30, + 0x16, 0x00, 0x04, 0xf4, 0x6c, 0x6e, 0xbd, 0xbd, 0xbd, 0x61, 0x83, 0x00, 0x70, 0x80, 0x37, 0x08, + 0x42, 0x23, 0x0c, 0xc0, 0x9b, 0xb1, 0x02, 0xd7, 0x98, 0x02, 0x60, 0xcf, 0x58, 0x01, 0xfe, 0x23, + 0x0c, 0x19, 0x2b, 0xf0, 0xff, 0x08, 0x15, 0x08, 0x00, 0x00, 0x64, 0x40, 0xe0, 0x9b, 0xb7, 0xe0, + 0x7b, 0x20, 0x79, 0x5b, 0x19, 0x10, 0xb8, 0x0c, 0x30, 0xb0, 0x79, 0x0b, 0xae, 0x79, 0x05, 0x18, + 0xd8, 0x02, 0x00, 0x60, 0x73, 0x2b, 0x02, 0x70, 0xee, 0x21, 0x00, 0x6c, 0x04, 0x10, 0xc0, 0x16, + 0xf9, 0x7f, 0x03, 0xd4, 0x36, 0x30, 0x00, 0x0e, 0xf8, 0x4f, 0x74, 0x23, 0x0a, 0x8c, 0x07, 0x00, + 0x90, 0x40, 0xff, 0x94, 0x7a, 0x81, 0x20, 0x76, 0xdd, 0x3c, 0x3d, 0x22, 0x3d, 0xc0, 0xff, 0x93, + 0x0d, 0x98, 0x8a, 0x8d, 0xb2, 0xf9, 0xbf, 0x95, 0x5f, 0x1e, 0x11, 0x41, 0x00, 0x4a, 0xc0, 0x4b, + 0x2e, 0x02, 0x34, 0x01, 0xd8, 0x92, 0x82, 0xf3, 0xcf, 0xfb, 0xf5, 0x09, 0x10, 0xf8, 0x4f, 0x3f, + 0x0f, 0xc0, 0xae, 0xa2, 0xbf, 0x70, 0xf1, 0x5a, 0x1d, 0xe8, 0x20, 0x1c, 0x44, 0x40, 0x1e, 0xe4, + 0x01, 0x10, 0xff, 0x8f, 0xf6, 0xbc, 0xdd, 0x70, 0x3d, 0xbf, 0x2a, 0xc2, 0xe5, 0x41, 0xc4, 0x67, + 0x00, 0xff, 0x27, 0x08, 0x44, 0x68, 0xb7, 0x78, 0x7d, 0xfe, 0x06, 0x6d, 0x95, 0xdc, 0x49, 0xb5, + 0x51, 0x34, 0x7e, 0x3a, 0x74, 0xd2, 0x94, 0xb0, 0xa7, 0x3a, 0x48, 0x76, 0x11, 0x21, 0xfa, 0xdd, + 0xcf, 0x19, 0x69, 0x56, 0xe2, 0x63, 0xdd, 0xfa, 0x0b, 0xe5, 0x91, 0xcf, 0xbf, 0x30, 0xd1, 0x1a, + 0x61, 0xad, 0xd9, 0x97, 0xf8, 0x79, 0x68, 0x51, 0x5d, 0xbc, 0xbc, 0xf5, 0xcc, 0xd9, 0x70, 0x3d, + 0x74, 0x00, 0xa5, 0x9f, 0x77, 0x3e, 0xbf, 0x95, 0xa1, 0x26, 0x16, 0x41, 0x13, 0x72, 0x92, 0x8c, + 0x66, 0x00, 0x67, 0x2e, 0x7e, 0x87, 0xc9, 0x72, 0xdb, 0x57, 0xb2, 0xb4, 0xf2, 0xdb, 0xf3, 0x53, + 0x9c, 0xcc, 0x53, 0x37, 0x01, 0x33, 0x94, 0xf7, 0xc5, 0x06, 0x24, 0x54, 0xe3, 0x9c, 0xbc, 0x87, + 0x4f, 0x39, 0x38, 0x07, 0x12, 0xdd, 0x56, 0x15, 0x5b, 0x1d, 0x98, 0x7b, 0xf0, 0xe1, 0xf4, 0xac, + 0xb7, 0x96, 0xfc, 0xe2, 0x4b, 0x0e, 0xfb, 0xf6, 0xe1, 0xd0, 0xda, 0xd1, 0x24, 0x47, 0xbb, 0xbb, + 0x4f, 0x4b, 0x75, 0x44, 0x64, 0x09, 0x6f, 0xae, 0xf3, 0xbe, 0x71, 0xb2, 0x07, 0x07, 0xf2, 0x7c, + 0x2f, 0xaa, 0x52, 0xb9, 0x3d, 0x59, 0xc0, 0x60, 0x1b, 0xe4, 0xc2, 0x84, 0x2d, 0x3f, 0x99, 0x36, + 0x74, 0xf7, 0xea, 0x91, 0x59, 0x18, 0x90, 0xb0, 0x2f, 0x3f, 0xfc, 0x19, 0xb7, 0xd3, 0xa8, 0x8b, + 0x41, 0xe9, 0x61, 0xa9, 0x65, 0x75, 0x1f, 0x8b, 0xa1, 0x4c, 0x22, 0x26, 0x7c, 0x66, 0xd0, 0x65, + 0xbe, 0x9c, 0xe4, 0xe3, 0x17, 0x82, 0x4a, 0xb2, 0x50, 0x3a, 0x23, 0x8a, 0x32, 0xe3, 0xf2, 0x57, + 0xb8, 0xab, 0x53, 0x1f, 0x3c, 0x31, 0x5f, 0x3b, 0xef, 0xb3, 0x98, 0xb7, 0xe4, 0x8e, 0x77, 0x86, + 0x2c, 0x10, 0x7b, 0xc7, 0x3f, 0x4a, 0xc0, 0x19, 0x74, 0xf7, 0x46, 0xb5, 0x20, 0xa6, 0xa1, 0x93, + 0x75, 0xbd, 0xec, 0x6a, 0x4a, 0xae, 0x8f, 0xea, 0x86, 0x40, 0x64, 0x1d, 0x38, 0x6c, 0x4c, 0x41, + 0x75, 0x41, 0x5c, 0x75, 0xbe, 0x0a, 0x90, 0x41, 0xb6, 0x02, 0x4b, 0x8a, 0xf8, 0x28, 0xcf, 0x57, + 0x7b, 0xf1, 0x88, 0x81, 0x92, 0x93, 0xbc, 0x29, 0x8a, 0x69, 0xa5, 0xd1, 0xca, 0xee, 0xe5, 0x9b, + 0x4b, 0x86, 0x09, 0x96, 0x95, 0x4f, 0xf8, 0xdd, 0x09, 0x8a, 0x56, 0x41, 0xea, 0xed, 0xd0, 0x79, + 0x36, 0x9c, 0xa1, 0xd2, 0xc4, 0xd7, 0x6b, 0x7a, 0x6e, 0xc5, 0xef, 0xa5, 0x17, 0x3d, 0x56, 0xec, + 0x4b, 0x93, 0x2a, 0x3f, 0x35, 0x28, 0x02, 0x71, 0xaa, 0x64, 0xeb, 0xb5, 0xf2, 0x97, 0xa8, 0x93, + 0xcb, 0x3e, 0x78, 0xba, 0xb4, 0xb6, 0x48, 0x60, 0xdc, 0x67, 0xed, 0x2d, 0xe7, 0x6b, 0x2a, 0xb5, + 0x06, 0x75, 0x0a, 0x8e, 0x51, 0xc9, 0xac, 0x68, 0x26, 0x0b, 0xfe, 0xd5, 0x8e, 0x05, 0x7b, 0xad, + 0xab, 0xd2, 0xb4, 0x1e, 0x85, 0x66, 0xc7, 0xfa, 0xff, 0xf1, 0x37, 0x20, 0x61, 0xc0, 0xfc, 0x7c, + 0xe9, 0xbe, 0x3b, 0x7a, 0x6d, 0x45, 0xc7, 0x82, 0x5c, 0xf2, 0x91, 0xed, 0xe7, 0x69, 0x29, 0x83, + 0x9d, 0x37, 0x32, 0x73, 0xfe, 0x40, 0x1f, 0x15, 0xba, 0x47, 0xef, 0x06, 0xb4, 0xdb, 0x95, 0xe8, + 0xf4, 0x5a, 0xc9, 0x16, 0xd3, 0xc5, 0x2e, 0xe7, 0xbd, 0x54, 0x5e, 0xe4, 0x0c, 0xa5, 0xc4, 0xd8, + 0xf5, 0xfd, 0xb0, 0xa6, 0x43, 0xb1, 0xfb, 0x97, 0x1f, 0xf5, 0xc5, 0xd7, 0x77, 0xc3, 0x4e, 0xab, + 0x17, 0x48, 0xe7, 0x49, 0x89, 0xe4, 0x4f, 0x82, 0x58, 0xfe, 0x1a, 0x18, 0x8f, 0xa7, 0xc2, 0x1a, + 0x5e, 0xaf, 0x35, 0x2c, 0xc3, 0xc2, 0xcd, 0xfe, 0x3e, 0x4a, 0xef, 0x26, 0x63, 0x10, 0xac, 0xc1, + 0xe3, 0xaa, 0xd4, 0x7b, 0x64, 0xeb, 0x5f, 0x27, 0x34, 0x8e, 0x35, 0x59, 0x04, 0xac, 0x29, 0x62, + 0x62, 0x0c, 0x95, 0x13, 0x75, 0x19, 0x84, 0x29, 0x9d, 0x69, 0xe0, 0x8f, 0xc9, 0x76, 0x6b, 0xb2, + 0xa9, 0x3b, 0xca, 0xfa, 0xb6, 0xc4, 0x53, 0x12, 0x1f, 0x36, 0x53, 0x6c, 0xac, 0xeb, 0x24, 0xa8, + 0x83, 0xdf, 0x4e, 0x5e, 0x12, 0x1e, 0x33, 0xda, 0x33, 0xba, 0x2b, 0x4d, 0xf3, 0x4b, 0x35, 0x3f, + 0x1a, 0x37, 0xa6, 0xab, 0xa8, 0x20, 0xa0, 0x1b, 0x9e, 0x66, 0xef, 0xb6, 0x4c, 0xa6, 0x88, 0x2c, + 0x8c, 0xeb, 0xe5, 0x3a, 0x7f, 0xbe, 0x89, 0xb6, 0xb5, 0x9d, 0x58, 0xb8, 0xa6, 0x13, 0x58, 0xd1, + 0x9e, 0x2f, 0xb7, 0xeb, 0xc0, 0xaa, 0xcc, 0x7f, 0xe1, 0x1d, 0xdf, 0x8d, 0xa5, 0xd0, 0x9c, 0x34, + 0xb3, 0xb3, 0x26, 0x2d, 0x13, 0x1c, 0xfd, 0x26, 0x44, 0x75, 0x4d, 0x2c, 0xf3, 0x79, 0x2a, 0x79, + 0xf4, 0x43, 0x27, 0x42, 0xf2, 0xed, 0xd8, 0xc9, 0x5f, 0x04, 0xf7, 0x27, 0x57, 0xd4, 0x8e, 0x9b, + 0xd8, 0xf3, 0xfe, 0x5c, 0x13, 0xaf, 0x68, 0xd6, 0xd6, 0xab, 0xe8, 0x18, 0x01, 0x4e, 0x94, 0x34, + 0x41, 0x43, 0xaa, 0x26, 0x11, 0x94, 0x48, 0x1c, 0x4f, 0x8a, 0xea, 0xfb, 0xb3, 0x18, 0xbe, 0xb6, + 0x98, 0x05, 0xb7, 0xda, 0x45, 0x7b, 0x63, 0x6f, 0x56, 0x65, 0xa0, 0x34, 0x40, 0x2c, 0x20, 0x06, + 0x74, 0xd5, 0xea, 0x83, 0x47, 0x5e, 0x15, 0x27, 0xe6, 0xa3, 0xef, 0x0a, 0xf8, 0x80, 0x09, 0x6a, + 0x38, 0x98, 0x6e, 0x18, 0x22, 0x55, 0x1c, 0xa8, 0x93, 0xac, 0x1d, 0x91, 0xe3, 0x14, 0x9d, 0x78, + 0x57, 0xb5, 0x0b, 0xf9, 0xf6, 0x27, 0x1d, 0xb8, 0x32, 0x08, 0x65, 0x47, 0xeb, 0x49, 0x5b, 0x25, + 0x06, 0xc6, 0xf1, 0x2a, 0xdb, 0x76, 0xbd, 0x46, 0xa5, 0xa0, 0x3d, 0xb8, 0xcb, 0x2a, 0x96, 0xa5, + 0x34, 0xe4, 0xc5, 0x57, 0xa0, 0x41, 0x43, 0x7e, 0xbc, 0xe1, 0xa1, 0x25, 0xb8, 0x38, 0x15, 0x8a, + 0xc2, 0x5e, 0x6f, 0xe8, 0x79, 0x25, 0x1a, 0x8f, 0x44, 0x98, 0xa5, 0xfe, 0xaa, 0x84, 0x2d, 0xa9, + 0x2e, 0x1b, 0x9f, 0x90, 0xde, 0x54, 0x6b, 0x16, 0xd1, 0x6d, 0x59, 0x13, 0xe1, 0xf8, 0xd3, 0x5b, + 0xcc, 0x0d, 0xa7, 0xbc, 0x2c, 0xfc, 0xf2, 0x64, 0x53, 0x6f, 0x48, 0xe1, 0xd1, 0x0b, 0xf0, 0xb1, + 0x5a, 0x5f, 0xb5, 0xcf, 0xcc, 0xec, 0x79, 0x91, 0x25, 0x72, 0xbb, 0x21, 0x59, 0xd1, 0xdb, 0x5a, + 0x65, 0x9a, 0xb8, 0xb3, 0x96, 0x57, 0xd0, 0xec, 0x66, 0x0a, 0xe6, 0x0c, 0xa8, 0xcb, 0x17, 0x8d, + 0xd8, 0x8e, 0xd3, 0x07, 0xce, 0x2d, 0x2d, 0x8f, 0x79, 0x16, 0x32, 0xb5, 0x20, 0x13, 0xd6, 0x2b, + 0xd5, 0x36, 0xfd, 0xac, 0xe8, 0x41, 0x98, 0xaa, 0x3a, 0x21, 0xc8, 0x54, 0xc1, 0x89, 0xcf, 0x20, + 0xdc, 0xf9, 0x5d, 0xb5, 0x0b, 0x8e, 0x60, 0x62, 0xe3, 0x5f, 0x92, 0x23, 0xf7, 0x00, 0x86, 0x58, + 0xb8, 0x2a, 0x1d, 0x7d, 0x11, 0x91, 0x9a, 0xae, 0xaf, 0x1f, 0x25, 0x44, 0x57, 0x26, 0x39, 0x61, + 0xc2, 0x3c, 0x8d, 0x4c, 0x49, 0x78, 0x1d, 0x5c, 0xbc, 0x74, 0x3f, 0x92, 0x5a, 0x61, 0x6d, 0xb6, + 0x2c, 0xe1, 0xd9, 0xbf, 0x87, 0xd8, 0xc7, 0xcc, 0x4d, 0x55, 0xc8, 0x86, 0x8a, 0x99, 0x8e, 0x60, + 0x2a, 0x5c, 0x1c, 0x23, 0xf3, 0x6c, 0x0a, 0xd7, 0xf6, 0x8f, 0x9c, 0xbe, 0x97, 0x49, 0x7d, 0x46, + 0xba, 0xf3, 0xc6, 0x9c, 0x7d, 0xd0, 0xa9, 0xa3, 0x10, 0xab, 0x78, 0x8c, 0x62, 0x63, 0x43, 0x9a, + 0x31, 0x73, 0x34, 0xcc, 0xcf, 0x24, 0xe3, 0x61, 0xf1, 0xbb, 0x91, 0xc3, 0x14, 0x3b, 0x33, 0x7f, + 0x8e, 0xae, 0xf6, 0x78, 0xed, 0xe0, 0x6e, 0x8e, 0xcd, 0x3b, 0x9b, 0x86, 0x36, 0xe1, 0x42, 0x86, + 0xb5, 0x66, 0xeb, 0xbc, 0x35, 0x53, 0xbc, 0xf2, 0x19, 0x24, 0xfe, 0x3c, 0x03, 0xf4, 0x76, 0xee, + 0x26, 0x79, 0x48, 0x97, 0x34, 0xd1, 0x1a, 0xe0, 0xee, 0xe6, 0xcc, 0x94, 0xb8, 0x44, 0x37, 0xad, + 0x67, 0xb4, 0x60, 0x8c, 0xfe, 0xdf, 0x24, 0xcd, 0xe5, 0x84, 0xc3, 0x78, 0x26, 0x4e, 0x42, 0x0e, + 0x06, 0xf3, 0xf1, 0xa3, 0xf9, 0x32, 0x53, 0xe1, 0x9f, 0x09, 0x58, 0x9a, 0x9d, 0xc3, 0x03, 0x8b, + 0x01, 0xc0, 0x3b, 0xb8, 0xac, 0xf4, 0x6a, 0xb1, 0xab, 0x4c, 0xf7, 0x4f, 0x09, 0x75, 0x98, 0x98, + 0x95, 0x64, 0x86, 0xbd, 0x5f, 0x53, 0x3a, 0xfb, 0xad, 0x8c, 0x60, 0x93, 0xb2, 0xca, 0x4e, 0xe5, + 0x52, 0xf1, 0x9a, 0x3f, 0x93, 0xda, 0x69, 0x4f, 0x31, 0x9b, 0xc2, 0xca, 0x3e, 0xe2, 0xcc, 0x46, + 0x1a, 0xe1, 0x72, 0x59, 0xdb, 0xab, 0x1d, 0x12, 0x53, 0xde, 0xae, 0x21, 0x78, 0x42, 0x5d, 0xb3, + 0x96, 0x82, 0x94, 0x82, 0xc3, 0x3e, 0x8d, 0xbb, 0x5a, 0xd3, 0xd1, 0x9d, 0x3f, 0xc4, 0xb6, 0xe7, + 0xab, 0xae, 0xe0, 0x36, 0x8a, 0x0f, 0x5f, 0xeb, 0x9f, 0x37, 0xc0, 0x2c, 0x03, 0x8d, 0xec, 0x8b, + 0x11, 0xd9, 0x33, 0xfc, 0x30, 0x39, 0x44, 0x90, 0xe1, 0xe7, 0x4f, 0x2e, 0x42, 0xda, 0x91, 0x07, + 0x19, 0x19, 0x0d, 0xd4, 0xec, 0x19, 0x8a, 0xd5, 0x43, 0xb3, 0xc6, 0x05, 0x5c, 0x37, 0x77, 0xef, + 0x1e, 0xb6, 0x6b, 0x88, 0xeb, 0x69, 0x4b, 0xfd, 0x83, 0x0b, 0xdf, 0x29, 0xef, 0xc1, 0x48, 0xeb, + 0xb1, 0x84, 0x3a, 0x1b, 0x3a, 0xa9, 0xc5, 0xac, 0x6c, 0x87, 0x7e, 0x50, 0x13, 0x31, 0x3c, 0x0c, + 0x98, 0xfd, 0x44, 0x0a, 0x78, 0xcf, 0xd6, 0x87, 0xe7, 0xe1, 0x62, 0x0c, 0xa6, 0x29, 0x3f, 0x63, + 0xa4, 0xfc, 0xd6, 0xc7, 0x98, 0x21, 0xdf, 0x64, 0x60, 0x29, 0xc5, 0xa6, 0xa6, 0xfb, 0xc0, 0xa5, + 0x17, 0x8c, 0xbe, 0x70, 0x15, 0xa4, 0xe0, 0x22, 0x85, 0xfa, 0x37, 0x78, 0x16, 0xeb, 0xb4, 0x17, + 0x92, 0x7d, 0xff, 0x92, 0x82, 0xf1, 0x72, 0x54, 0x18, 0x36, 0x52, 0x0c, 0xa9, 0xa6, 0x3d, 0xd8, + 0xbb, 0xda, 0xc4, 0xa1, 0xa4, 0x6e, 0x29, 0x72, 0xf2, 0x2c, 0x21, 0x81, 0xbb, 0x55, 0x07, 0xf0, + 0x6a, 0x5e, 0xfb, 0xbb, 0xb2, 0x2a, 0x4b, 0xda, 0x4b, 0x5b, 0xa9, 0x54, 0xdf, 0x87, 0x9c, 0x5d, + 0x7e, 0xb1, 0xe6, 0x3e, 0xec, 0xeb, 0xac, 0x2e, 0x8d, 0xc5, 0x71, 0x19, 0x99, 0x92, 0x11, 0xc0, + 0xc2, 0xbe, 0x5e, 0x17, 0xb4, 0x8e, 0x4f, 0x73, 0x2c, 0x23, 0x45, 0x97, 0xa6, 0x8c, 0x72, 0x3d, + 0x0e, 0x26, 0x8a, 0x7a, 0x7f, 0x9b, 0x2c, 0xca, 0x99, 0xea, 0x17, 0x8b, 0x6e, 0x96, 0x5c, 0x91, + 0x31, 0x08, 0x2a, 0xbe, 0x76, 0x88, 0xed, 0xcf, 0xcf, 0x42, 0x48, 0x45, 0x44, 0xe9, 0xf7, 0x4b, + 0x90, 0x8e, 0x09, 0x41, 0xed, 0x54, 0xe4, 0x1f, 0xc5, 0x0b, 0x68, 0x4d, 0x2e, 0x64, 0x8e, 0x24, + 0xd1, 0xf4, 0x59, 0x63, 0xdd, 0x00, 0xe1, 0x51, 0xab, 0xfa, 0x55, 0xbf, 0x22, 0x4b, 0x4f, 0xf2, + 0x7c, 0x07, 0xe7, 0xe7, 0xf6, 0x68, 0x51, 0x85, 0xc9, 0x64, 0x39, 0xfc, 0x7e, 0xbc, 0x0f, 0xae, + 0x55, 0x1b, 0x8e, 0x41, 0x74, 0x8b, 0x15, 0xf1, 0x4a, 0xb2, 0xc4, 0x65, 0xc5, 0x75, 0xbc, 0x0a, + 0xc2, 0xf4, 0xbd, 0x28, 0xfb, 0x00, 0xfc, 0xed, 0xce, 0xe6, 0xf8, 0x7e, 0x73, 0xaa, 0x99, 0xed, + 0x5b, 0x3b, 0xfb, 0xd5, 0xd0, 0x84, 0x0b, 0xca, 0x0b, 0x62, 0x8e, 0xc5, 0xf6, 0xe1, 0xfd, 0x8e, + 0x2d, 0x6e, 0x80, 0x04, 0xfb, 0xc4, 0x08, 0x66, 0x4e, 0x5d, 0xa4, 0x06, 0x5d, 0xaf, 0xeb, 0x79, + 0xc1, 0x61, 0xaf, 0x72, 0x16, 0xcf, 0xab, 0xa3, 0x4d, 0xf7, 0xe0, 0xda, 0xf4, 0x2f, 0xb2, 0x51, + 0xf4, 0xe9, 0x1f, 0xad, 0xda, 0x0d, 0xaf, 0xb3, 0xc5, 0x8b, 0x17, 0xb3, 0xc1, 0x7f, 0xc6, 0x3e, + 0xcc, 0x93, 0x4f, 0x6d, 0xec, 0x64, 0xcf, 0x3a, 0xd7, 0x79, 0x20, 0x4c, 0x98, 0x1e, 0x90, 0xc8, + 0xdc, 0xe9, 0x28, 0x29, 0xea, 0x52, 0x88, 0x44, 0xd6, 0xb8, 0xc3, 0xe1, 0xb8, 0x62, 0x52, 0xa7, + 0x17, 0x50, 0xbb, 0x3b, 0x36, 0x24, 0x76, 0xc5, 0x58, 0x3b, 0x06, 0xeb, 0xa5, 0x6f, 0x4d, 0xfe, + 0x72, 0x6a, 0xcd, 0xf0, 0xe9, 0x90, 0x63, 0xa4, 0x94, 0x4f, 0xa7, 0x25, 0x5a, 0x6b, 0xc3, 0xde, + 0x2c, 0x30, 0x32, 0x34, 0xa0, 0x84, 0xb4, 0x05, 0x4c, 0xce, 0x7f, 0xe9, 0x63, 0xa4, 0x6a, 0xab, + 0xca, 0xc9, 0x60, 0x56, 0x9f, 0xe0, 0x5e, 0x85, 0x63, 0xa3, 0xe1, 0x2f, 0x4b, 0x4b, 0x7a, 0x87, + 0x66, 0x71, 0x21, 0x6b, 0x98, 0x57, 0xe0, 0x2d, 0x37, 0x16, 0x15, 0x74, 0xce, 0xff, 0x06, 0x75, + 0x66, 0xfe, 0xf8, 0x3a, 0xa6, 0x90, 0x3e, 0xb7, 0xfb, 0x9d, 0x87, 0x9c, 0x95, 0x10, 0x1c, 0x76, + 0xa0, 0x02, 0x77, 0xf6, 0x15, 0xb2, 0xe5, 0xe5, 0x21, 0xfb, 0xf5, 0x35, 0x10, 0x4f, 0x67, 0x9a, + 0x73, 0x70, 0xa5, 0x41, 0x8e, 0xdd, 0x14, 0x50, 0x8e, 0x41, 0x2b, 0x7f, 0x5a, 0x6e, 0x97, 0x35, + 0x04, 0x4e, 0x39, 0x1e, 0x50, 0xbe, 0xd0, 0x8c, 0xc9, 0xd8, 0xab, 0x37, 0xa2, 0x09, 0x7a, 0xf0, + 0x0a, 0xf9, 0xe8, 0x86, 0xb2, 0xd5, 0x98, 0xce, 0x8d, 0x1c, 0x94, 0x32, 0x3b, 0xdc, 0x0d, 0x52, + 0x69, 0x4d, 0xf7, 0xcc, 0xbf, 0x45, 0x76, 0x89, 0xbc, 0x07, 0xc9, 0xcc, 0xc1, 0x1b, 0x1d, 0xc6, + 0x37, 0xc7, 0xa7, 0xb9, 0x44, 0x77, 0x92, 0x5f, 0xb9, 0xb5, 0x5b, 0xa4, 0x74, 0x3a, 0x9b, 0x88, + 0x39, 0x85, 0x4e, 0x17, 0x3f, 0xfa, 0x43, 0x60, 0x23, 0x93, 0x4e, 0x4e, 0x68, 0xbb, 0xf4, 0x6a, + 0xd9, 0xb3, 0x35, 0xab, 0x0e, 0x31, 0xc1, 0x27, 0x7f, 0x74, 0xf3, 0x48, 0x45, 0x7b, 0x6b, 0x3c, + 0x9d, 0x32, 0x0f, 0x9d, 0x07, 0x48, 0x2b, 0x4c, 0xb7, 0x52, 0x86, 0x9b, 0x9d, 0x8c, 0xab, 0x47, + 0xe6, 0xe5, 0x79, 0xf6, 0xe1, 0x34, 0x31, 0x72, 0x32, 0x85, 0x4e, 0x55, 0x50, 0x3c, 0x16, 0xa0, + 0xf0, 0x2a, 0xe6, 0x61, 0x05, 0x3d, 0x68, 0xc6, 0x97, 0x3f, 0x94, 0xf6, 0x9a, 0xcf, 0xef, 0x06, + 0x48, 0x82, 0xff, 0x60, 0x39, 0x14, 0xf9, 0xbb, 0x1c, 0xd9, 0x14, 0x9d, 0x60, 0x09, 0xc4, 0xd7, + 0xf9, 0x98, 0x75, 0x47, 0xb9, 0xe7, 0x94, 0x1c, 0x0e, 0xd2, 0xe4, 0x5b, 0xf1, 0xac, 0x33, 0xe0, + 0x10, 0x34, 0x61, 0xdd, 0xbb, 0x6e, 0xa4, 0xb5, 0xb1, 0x48, 0x2a, 0x6b, 0x7b, 0xfb, 0xff, 0x0e, + 0x2a, 0xbc, 0x78, 0xb6, 0x89, 0x36, 0xf0, 0x59, 0xc5, 0x7a, 0x76, 0x2e, 0x1e, 0xcb, 0xc7, 0xd9, + 0x92, 0x9e, 0x76, 0x59, 0x3b, 0xe4, 0x40, 0xec, 0x40, 0xe9, 0x23, 0xcb, 0x4f, 0xf7, 0x51, 0xc2, + 0xd4, 0x17, 0x88, 0x39, 0x63, 0x35, 0xd9, 0x92, 0xb5, 0x43, 0x71, 0xf2, 0x6a, 0xa1, 0x79, 0x77, + 0xc2, 0x1c, 0x38, 0x36, 0xd1, 0xa4, 0xc4, 0x49, 0x61, 0xc7, 0x29, 0x73, 0xf9, 0x95, 0x12, 0x9c, + 0xc6, 0x5a, 0xeb, 0x1a, 0x67, 0xfc, 0x42, 0x82, 0x85, 0xe2, 0x73, 0x4a, 0x94, 0xf9, 0xc8, 0xa6, + 0x51, 0x0a, 0xfe, 0x36, 0x7f, 0x94, 0xdc, 0x29, 0x94, 0x1e, 0xc1, 0x43, 0xac, 0x5f, 0x9c, 0xfc, + 0xb7, 0x0e, 0x6f, 0x40, 0x63, 0x80, 0x8c, 0x2f, 0x4c, 0x21, 0xb9, 0xe8, 0x10, 0x5a, 0xcb, 0x47, + 0x55, 0x34, 0xc0, 0x73, 0xac, 0x0f, 0x0f, 0xad, 0x67, 0xa0, 0xdc, 0x51, 0x35, 0x66, 0xfe, 0xae, + 0x16, 0x41, 0xe5, 0x69, 0xc5, 0x18, 0x3c, 0x6e, 0x9e, 0x70, 0xc8, 0xa4, 0x27, 0x84, 0xfe, 0x89, + 0xe5, 0xf6, 0x8c, 0x49, 0x6a, 0xae, 0x7e, 0x1d, 0x5d, 0x02, 0xf2, 0x2e, 0x87, 0xaa, 0xe0, 0x82, + 0x36, 0x2f, 0x8f, 0x73, 0x6f, 0x4b, 0x3a, 0xf0, 0x1a, 0x1e, 0x2a, 0xdf, 0x40, 0x11, 0xfb, 0x4d, + 0x26, 0xd0, 0x14, 0x80, 0xc8, 0xb1, 0x67, 0xd1, 0xf2, 0x32, 0x63, 0xf8, 0x53, 0x66, 0x4c, 0x83, + 0xa7, 0x22, 0x3d, 0xa5, 0x34, 0x46, 0x9d, 0xdd, 0x4f, 0xf4, 0x73, 0x1f, 0xc4, 0xc6, 0xff, 0xd3, + 0x87, 0xa0, 0x78, 0x9f, 0x7b, 0x1f, 0xc5, 0x2c, 0x05, 0xa7, 0x60, 0xfc, 0xbc, 0x81, 0x66, 0x37, + 0x65, 0x00, 0xdf, 0xd9, 0xa5, 0x41, 0xfc, 0xa2, 0xd5, 0x59, 0x75, 0xee, 0x8e, 0xc9, 0xd3, 0xc4, + 0x3b, 0x17, 0xad, 0xd7, 0x55, 0x2c, 0x44, 0x54, 0x10, 0xb4, 0x1d, 0xd7, 0x28, 0x65, 0xe1, 0x89, + 0x0e, 0x4d, 0x6d, 0x06, 0x77, 0x9a, 0x7b, 0x80, 0xcc, 0x86, 0x1a, 0xd1, 0x20, 0x2c, 0x0b, 0x25, + 0x10, 0x37, 0x5e, 0xf0, 0xe8, 0xf4, 0xf9, 0x39, 0x79, 0x92, 0xd0, 0x96, 0xfa, 0x3c, 0xf5, 0xc8, + 0x3f, 0x5f, 0xe7, 0x57, 0x6c, 0x22, 0xa5, 0xf2, 0xed, 0xc3, 0x9c, 0x10, 0x61, 0xf6, 0x89, 0x0c, + 0x26, 0x3b, 0x87, 0x4b, 0x59, 0xbd, 0x31, 0xa5, 0xb6, 0xa4, 0xd0, 0x0b, 0x30, 0xd1, 0x59, 0xca, + 0xf8, 0xa2, 0x4d, 0xb2, 0x58, 0x7b, 0x69, 0xe5, 0x90, 0x73, 0x11, 0x53, 0x43, 0x66, 0x79, 0x73, + 0xd0, 0x8f, 0xa2, 0x21, 0xf2, 0x59, 0xf9, 0x47, 0x81, 0x1a, 0xff, 0x01, 0x19, 0x95, 0x04, 0x95, + 0x32, 0x51, 0x89, 0xd2, 0x9f, 0x38, 0xd5, 0xf9, 0x1f, 0x62, 0x52, 0xeb, 0xe3, 0x67, 0x93, 0x48, + 0xe5, 0xa3, 0x61, 0x53, 0xcd, 0x2b, 0x08, 0x5b, 0xec, 0xbd, 0x27, 0x63, 0x89, 0xe5, 0xea, 0x72, + 0x9d, 0xc4, 0xca, 0x28, 0xc5, 0xa8, 0x97, 0x36, 0xe5, 0xd5, 0x81, 0x87, 0x24, 0x29, 0x79, 0xf7, + 0xa8, 0x4f, 0xac, 0xab, 0x54, 0x14, 0x01, 0xff, 0x9e, 0x0f, 0x58, 0x45, 0xfc, 0x80, 0x8a, 0x3a, + 0xa7, 0x33, 0x18, 0x75, 0x23, 0x7d, 0x7c, 0x3f, 0x2e, 0x07, 0xa5, 0x03, 0x9c, 0x33, 0x40, 0x6e, + 0x53, 0x9d, 0xd3, 0x6e, 0x9b, 0x26, 0xc0, 0x45, 0x8a, 0xd1, 0xeb, 0x30, 0xf2, 0x7e, 0x7f, 0x64, + 0x73, 0x45, 0xb7, 0x80, 0x24, 0xeb, 0x7b, 0x1c, 0x5e, 0x6d, 0x4d, 0xcf, 0x2f, 0xce, 0xcf, 0xa8, + 0x1b, 0x25, 0xdd, 0xd7, 0xfc, 0x89, 0xc8, 0x47, 0xa2, 0xd2, 0x6b, 0x28, 0x96, 0x05, 0x77, 0xf1, + 0x8a, 0x58, 0x9c, 0xbb, 0xd0, 0x20, 0x79, 0x09, 0xbd, 0xcf, 0x98, 0xbd, 0xd0, 0xb4, 0x69, 0xd2, + 0x8c, 0x4b, 0x9c, 0xb4, 0x71, 0x1b, 0x33, 0xf8, 0x52, 0xac, 0xab, 0x3f, 0xd5, 0x20, 0x0b, 0x6b, + 0x7d, 0xb4, 0xac, 0x05, 0x0a, 0xe9, 0xa4, 0x72, 0xce, 0xa7, 0xcf, 0xbe, 0x34, 0xf2, 0xad, 0xd2, + 0xc3, 0x09, 0x63, 0xea, 0xd4, 0x5f, 0xfb, 0xac, 0xa5, 0x35, 0x29, 0x2f, 0x08, 0xd4, 0x4e, 0x16, + 0xa0, 0x2a, 0xe6, 0x91, 0x45, 0xfe, 0xdf, 0x0d, 0x35, 0x2c, 0x04, 0x23, 0xec, 0xfa, 0x2b, 0x65, + 0xd1, 0x06, 0xeb, 0x54, 0x02, 0x86, 0x84, 0x1a, 0x54, 0xbc, 0x82, 0x05, 0xf6, 0x04, 0xbd, 0xd4, + 0x7e, 0x28, 0x30, 0x63, 0x0b, 0xc9, 0x9a, 0x7e, 0x0f, 0x4e, 0x02, 0x62, 0xdb, 0x3b, 0x0c, 0xc7, + 0x99, 0x56, 0xe6, 0xb4, 0xbd, 0x83, 0x25, 0x26, 0x55, 0x09, 0xbb, 0xfc, 0x5a, 0x6f, 0x6e, 0x37, + 0xff, 0xca, 0xd5, 0x70, 0x94, 0xc1, 0x28, 0xc1, 0x2b, 0x48, 0x4a, 0x37, 0xe5, 0xc1, 0xde, 0xf6, + 0xe5, 0x43, 0xae, 0x33, 0xc4, 0x9b, 0x39, 0xd0, 0x82, 0x1b, 0x87, 0xac, 0x96, 0xd6, 0x76, 0xd5, + 0x37, 0x32, 0xa2, 0x5a, 0x4c, 0xb8, 0x27, 0x2e, 0x60, 0x54, 0x93, 0xd7, 0xac, 0xee, 0x81, 0x91, + 0x79, 0x18, 0x91, 0xe4, 0x54, 0x0d, 0x8f, 0x9f, 0xbd, 0x61, 0xc1, 0xc0, 0x5d, 0xd5, 0x95, 0xd8, + 0xb5, 0x51, 0xb0, 0x94, 0xb9, 0x34, 0xaf, 0xa1, 0x2c, 0xaf, 0x8f, 0x10, 0x85, 0xc0, 0xf3, 0xaa, + 0x4c, 0xa5, 0xf5, 0x00, 0xf8, 0xf8, 0xb7, 0x9f, 0xc6, 0xa9, 0xfa, 0x7b, 0x8b, 0x9e, 0xeb, 0x00, + 0x89, 0x67, 0x08, 0x48, 0x0a, 0x3e, 0xe9, 0x20, 0xd1, 0x7d, 0xa9, 0x68, 0x70, 0x84, 0x94, 0x51, + 0x5a, 0xc0, 0x7c, 0xf2, 0x70, 0xde, 0x0a, 0xdc, 0xd8, 0xdc, 0x35, 0x1e, 0x8e, 0x0d, 0xc6, 0x9b, + 0xcb, 0x5c, 0xed, 0xec, 0xb1, 0xdf, 0x25, 0x29, 0x0f, 0xce, 0xbd, 0x3f, 0x9c, 0x5b, 0x2c, 0xc0, + 0x61, 0xfe, 0xe4, 0x97, 0xd8, 0xbb, 0xf1, 0xf9, 0xd4, 0x3a, 0x79, 0x6b, 0xd9, 0x44, 0xaa, 0x30, + 0x7c, 0x0e, 0xee, 0x19, 0x6a, 0x75, 0x32, 0xe6, 0x10, 0xf3, 0x81, 0x80, 0xb1, 0xdd, 0x88, 0xa5, + 0xcc, 0xba, 0x61, 0xc2, 0x96, 0x31, 0x33, 0x2c, 0xb1, 0x7f, 0x0e, 0xca, 0x15, 0xfd, 0xb5, 0x32, + 0xb0, 0x85, 0x2c, 0x2e, 0xb5, 0x35, 0x44, 0x0b, 0x17, 0x67, 0xdb, 0x2d, 0xa1, 0xee, 0xe3, 0x27, + 0xff, 0xb2, 0x1f, 0xe7, 0x0d, 0x3c, 0x27, 0xe3, 0x42, 0x82, 0xb3, 0x87, 0x40, 0x67, 0x89, 0x67, + 0x74, 0x62, 0xc8, 0xd9, 0x3c, 0xac, 0x60, 0x0a, 0xec, 0xed, 0xbf, 0xeb, 0xce, 0x2c, 0xbd, 0x6b, + 0xf5, 0x93, 0x9f, 0x24, 0x35, 0x36, 0x21, 0xbf, 0x08, 0x86, 0x03, 0x58, 0xb0, 0x2a, 0x7f, 0x12, + 0xd6, 0xbc, 0x10, 0xe9, 0x12, 0xd1, 0x47, 0xcf, 0x81, 0x31, 0x24, 0x2a, 0x77, 0x2b, 0x65, 0xf3, + 0x88, 0xef, 0xde, 0x9f, 0x17, 0x49, 0xe4, 0x6a, 0x2f, 0xcd, 0x9c, 0xb2, 0x21, 0xb3, 0xc3, 0xa8, + 0xc3, 0x19, 0xbf, 0x44, 0xc8, 0xe2, 0x94, 0x81, 0x4b, 0x21, 0xc7, 0x63, 0xe8, 0x43, 0xb0, 0x9b, + 0x72, 0xdd, 0xd5, 0xf1, 0xda, 0xc4, 0xa0, 0xf4, 0xf7, 0x34, 0x1c, 0x3f, 0xc8, 0x11, 0x57, 0x56, + 0x85, 0x40, 0x13, 0xb6, 0x69, 0x41, 0xb8, 0x4f, 0x8b, 0x64, 0x14, 0xdf, 0x45, 0xc7, 0x85, 0xc6, + 0xc7, 0x4c, 0x58, 0x11, 0xf7, 0x11, 0x4f, 0xe2, 0x74, 0xa2, 0x23, 0x70, 0xc9, 0xf9, 0xd2, 0xfc, + 0x2c, 0x7d, 0x6e, 0xe3, 0x95, 0x93, 0xdf, 0x1f, 0xe1, 0x6f, 0x4b, 0x58, 0x53, 0x96, 0xa1, 0xe3, + 0x60, 0x7c, 0xd6, 0xcc, 0xc2, 0x87, 0x48, 0x28, 0x84, 0x22, 0x22, 0xc5, 0x60, 0xb4, 0x9f, 0xdc, + 0xca, 0xd4, 0x5e, 0xe3, 0x19, 0xb6, 0x4f, 0xb3, 0x8c, 0x55, 0x52, 0x57, 0x34, 0x4e, 0x12, 0xd8, + 0xf0, 0x9b, 0x3b, 0x1f, 0xe9, 0x65, 0xe4, 0xde, 0x18, 0x4e, 0x79, 0x42, 0x94, 0x3b, 0x2c, 0x18, + 0x25, 0x95, 0x86, 0x77, 0x38, 0xfb, 0x23, 0xc5, 0x17, 0xd3, 0xbc, 0xc4, 0xa9, 0x30, 0x23, 0x76, + 0xc0, 0x62, 0x3b, 0x55, 0xc8, 0xdd, 0xae, 0x0c, 0xaa, 0x79, 0x30, 0x6b, 0x2b, 0xed, 0x76, 0xf9, + 0xf1, 0xf9, 0xa9, 0x4f, 0x33, 0xee, 0x3d, 0x73, 0x60, 0xe7, 0xf7, 0xa8, 0xb3, 0x6a, 0x2f, 0x7b, + 0x56, 0xe0, 0x3c, 0x67, 0x4c, 0x26, 0x3c, 0xc7, 0x4c, 0x12, 0xd6, 0x24, 0x3b, 0xd2, 0x2d, 0xd6, + 0x93, 0x85, 0xee, 0x62, 0xc2, 0x4d, 0xa4, 0x81, 0x9f, 0xa3, 0xfd, 0xd2, 0xc8, 0x37, 0x6c, 0x74, + 0x7b, 0x36, 0x6c, 0x2e, 0x28, 0x70, 0xb9, 0x48, 0x96, 0x9e, 0xc3, 0xed, 0x60, 0x78, 0x2f, 0xee, + 0x04, 0xee, 0x81, 0x76, 0x98, 0x9b, 0x9d, 0x16, 0x7f, 0x03, 0xf0, 0x5a, 0x98, 0x0f, 0xfe, 0xcd, + 0x05, 0xd6, 0x10, 0x1c, 0x0f, 0xbb, 0x50, 0x09, 0x2a, 0x90, 0xb3, 0x7d, 0xb0, 0xae, 0xd5, 0xa5, + 0xee, 0xcf, 0x15, 0x67, 0xc2, 0xda, 0xcb, 0x65, 0xba, 0xbb, 0xd1, 0x43, 0x32, 0x22, 0x31, 0xd6, + 0x04, 0x43, 0xa8, 0x75, 0x04, 0x10, 0x5c, 0x1f, 0x15, 0xc8, 0x1b, 0xfb, 0x25, 0x31, 0xcc, 0x0b, + 0xe1, 0x4c, 0xcc, 0x62, 0x91, 0x3b, 0x14, 0x13, 0x79, 0x70, 0x1a, 0x63, 0x12, 0xa3, 0xde, 0x8b, + 0x50, 0xec, 0x53, 0xb6, 0xf8, 0x44, 0x38, 0x7b, 0x2d, 0xcc, 0x55, 0x77, 0x8c, 0x72, 0x8a, 0x3b, + 0xc5, 0x61, 0x08, 0x03, 0x95, 0x29, 0xf8, 0xf4, 0xea, 0xae, 0x25, 0x13, 0x32, 0x96, 0x67, 0x5e, + 0xed, 0xb9, 0x76, 0xf2, 0x8c, 0xc2, 0x9e, 0x8f, 0x86, 0x7f, 0xfa, 0xba, 0xcb, 0xff, 0xae, 0x33, + 0x51, 0x34, 0x8b, 0x13, 0xfe, 0x5e, 0xe6, 0x6f, 0x0c, 0x2d, 0x97, 0xc6, 0xaf, 0x58, 0x76, 0x6d, + 0x7a, 0x1b, 0x62, 0x70, 0xb1, 0xcb, 0xc8, 0xd0, 0x02, 0x02, 0x9c, 0xb3, 0xce, 0xb1, 0x9e, 0x53, + 0x65, 0x50, 0xc7, 0xe4, 0xe3, 0xa3, 0x26, 0xbb, 0x80, 0x8d, 0x2f, 0x0d, 0x0e, 0x7f, 0x9a, 0xd6, + 0xfb, 0x82, 0x85, 0x67, 0xfc, 0x65, 0xdf, 0xc8, 0xab, 0xe7, 0x68, 0xd8, 0xa4, 0x94, 0x55, 0x50, + 0x4f, 0xc4, 0x05, 0xb5, 0xbf, 0x99, 0xd6, 0x74, 0x81, 0x1e, 0x5f, 0x65, 0x60, 0x0c, 0x83, 0x0b, + 0xdf, 0xa4, 0x7f, 0x0a, 0x59, 0xd2, 0xd2, 0x68, 0xd0, 0x67, 0x99, 0x2f, 0xab, 0xf5, 0x0f, 0x5a, + 0x6c, 0xd7, 0x25, 0xfb, 0x33, 0xbd, 0x6a, 0x57, 0x47, 0xbd, 0xcf, 0x4e, 0x17, 0x91, 0x7c, 0xc9, + 0x7d, 0x40, 0x28, 0xa7, 0x7b, 0x1c, 0x60, 0x31, 0xb1, 0x52, 0xec, 0x72, 0x32, 0x55, 0x04, 0x70, + 0x0f, 0xc7, 0x0b, 0x81, 0xe1, 0xeb, 0x94, 0x43, 0x3a, 0x0b, 0x01, 0x18, 0xcb, 0x0f, 0x5c, 0xc7, + 0x8f, 0x2c, 0xb3, 0x79, 0x2f, 0x6d, 0x91, 0x08, 0x92, 0x03, 0x89, 0x67, 0x4c, 0xbb, 0xc8, 0x14, + 0xe1, 0x83, 0x97, 0x62, 0x91, 0xcd, 0xe9, 0x07, 0x0c, 0x67, 0x06, 0x0b, 0x31, 0x00, 0xa0, 0x5e, + 0xf4, 0x44, 0xa8, 0x37, 0x97, 0x2b, 0xcf, 0x6d, 0x67, 0xa3, 0xba, 0xa4, 0xab, 0xe7, 0x56, 0xa2, + 0x5e, 0x0c, 0x2a, 0xe5, 0x03, 0x4c, 0xac, 0x03, 0x3f, 0x7a, 0x38, 0x1d, 0x43, 0xc6, 0x21, 0xf6, + 0x2f, 0x96, 0x66, 0x94, 0xfd, 0xf3, 0x19, 0xa1, 0x87, 0x13, 0xb4, 0x29, 0x77, 0x18, 0xda, 0xa0, + 0xd8, 0xff, 0x6d, 0x93, 0xd5, 0x63, 0xf6, 0x35, 0x59, 0xc8, 0xf0, 0x55, 0x94, 0x93, 0xb4, 0x00, + 0xb9, 0xf1, 0x6a, 0x78, 0x92, 0x58, 0x6d, 0xcb, 0xf6, 0x25, 0x28, 0x54, 0xcc, 0x3c, 0x3b, 0xbd, + 0xab, 0xca, 0x4a, 0xa8, 0xe4, 0xe0, 0xae, 0x6a, 0xeb, 0xdb, 0xc7, 0x67, 0xdc, 0xe5, 0xe8, 0xf5, + 0x3d, 0x0a, 0xcd, 0x86, 0xc4, 0x0d, 0xdb, 0x2a, 0x22, 0xe7, 0xc7, 0xba, 0x3e, 0x90, 0x67, 0x68, + 0xcb, 0x5b, 0x5e, 0x36, 0xb9, 0x1c, 0xfe, 0x50, 0x46, 0x41, 0xb3, 0x6c, 0x6e, 0x5d, 0xd8, 0x8c, + 0x3d, 0xf7, 0x53, 0xc6, 0x39, 0xc1, 0x51, 0x99, 0xe8, 0x86, 0xf3, 0xd8, 0x82, 0xf0, 0xba, 0x92, + 0xba, 0x1d, 0x00, 0x14, 0xc9, 0x12, 0x8c, 0xaf, 0xb1, 0xff, 0x8a, 0xb5, 0x0a, 0xd8, 0x5a, 0x86, + 0x44, 0xb5, 0xd8, 0xfb, 0xf1, 0x4e, 0x7e, 0xac, 0xc7, 0x7b, 0xd5, 0xa7, 0x2e, 0x01, 0x7f, 0xa9, + 0xc2, 0x15, 0x6f, 0xfb, 0x5a, 0x1a, 0xfc, 0x85, 0xaa, 0xf8, 0xe4, 0xa5, 0xf0, 0x2e, 0xa9, 0x4b, + 0xd2, 0x0e, 0xa7, 0xcb, 0x4d, 0x58, 0x7c, 0x7f, 0x8a, 0xf7, 0x67, 0xe5, 0xbe, 0x5d, 0xd4, 0x02, + 0x8a, 0x2c, 0xf3, 0xb6, 0xf9, 0xbd, 0x63, 0xcd, 0x27, 0x6b, 0x3b, 0x23, 0xfa, 0x6a, 0x2b, 0x51, + 0x4b, 0xf2, 0xf3, 0x1f, 0x39, 0xc6, 0xd4, 0x51, 0xcf, 0xef, 0x0b, 0x74, 0xf1, 0x98, 0x37, 0x87, + 0xd6, 0x4c, 0x93, 0xd7, 0xde, 0xb6, 0x1b, 0x92, 0xbc, 0xbb, 0x45, 0x44, 0xb0, 0x61, 0x13, 0x3b, + 0xa8, 0x1b, 0xe7, 0x62, 0x45, 0xaa, 0xd1, 0xf6, 0xe0, 0x49, 0x8d, 0xd6, 0x03, 0xba, 0x02, 0xb9, + 0x61, 0x9f, 0x16, 0xb9, 0xae, 0xbd, 0x4f, 0xe1, 0x1c, 0xcd, 0xc3, 0x33, 0xcd, 0x51, 0x21, 0xe7, + 0x44, 0xa7, 0x1a, 0x24, 0xc0, 0x1a, 0x42, 0x81, 0xc1, 0xaa, 0x08, 0x0c, 0x49, 0xd5, 0x87, 0x5b, + 0xfb, 0xb0, 0x98, 0x92, 0x49, 0xe6, 0xb1, 0xe0, 0xb5, 0xbd, 0xaf, 0xc9, 0x78, 0x07, 0xb5, 0x3b, + 0x60, 0xa9, 0x6f, 0xb5, 0xa2, 0xe5, 0x1c, 0xbd, 0x42, 0xe1, 0x0e, 0x4e, 0x76, 0x06, 0x4c, 0x1c, + 0x23, 0x2b, 0xcb, 0x18, 0x29, 0xbf, 0x30, 0x82, 0x7a, 0xd3, 0x33, 0x7b, 0xc9, 0xca, 0xb9, 0x29, + 0xdb, 0xf7, 0xfb, 0x1e, 0x03, 0x1f, 0x07, 0xc5, 0xb8, 0xb4, 0x4d, 0x4d, 0xc3, 0xfd, 0x07, 0xc4, + 0x46, 0x0d, 0x18, 0xcb, 0x79, 0x9e, 0x46, 0xb5, 0xeb, 0x8e, 0x13, 0x62, 0xef, 0xcf, 0xa1, 0xda, + 0x1e, 0x9e, 0xef, 0x50, 0x87, 0x0a, 0xd1, 0x25, 0x38, 0xb7, 0x83, 0x85, 0x06, 0x15, 0xc1, 0x79, + 0x1b, 0x14, 0x77, 0x3c, 0x21, 0x72, 0x7e, 0x79, 0x7d, 0xb0, 0x22, 0x9d, 0xcd, 0x62, 0x71, 0xa9, + 0x15, 0xda, 0xad, 0x57, 0x01, 0x64, 0x7a, 0xa5, 0x92, 0xb3, 0x31, 0xa1, 0x4d, 0x46, 0x24, 0x75, + 0x4f, 0x73, 0x77, 0x61, 0xd4, 0x10, 0x0f, 0x69, 0x52, 0x39, 0xe9, 0xb0, 0xb7, 0x5f, 0xed, 0xcf, + 0x82, 0x9c, 0xb2, 0x71, 0xac, 0x2f, 0x3b, 0xea, 0xab, 0xf4, 0x30, 0x29, 0x14, 0x45, 0xdc, 0x59, + 0xaf, 0xba, 0xf9, 0x3e, 0x75, 0x4c, 0x2e, 0x7f, 0x1f, 0xec, 0x2c, 0xc1, 0x8a, 0x86, 0xb2, 0xfa, + 0xcf, 0xac, 0x1a, 0x9b, 0x6f, 0xea, 0xef, 0x6a, 0xc7, 0x9d, 0x15, 0x16, 0x85, 0x13, 0xb8, 0xda, + 0xf8, 0x3a, 0x41, 0xcc, 0x8d, 0xe1, 0xd2, 0x05, 0xf4, 0xe5, 0x95, 0x43, 0x22, 0x94, 0x44, 0x3f, + 0xf0, 0x31, 0x84, 0x56, 0x38, 0x17, 0x53, 0xb0, 0x26, 0x17, 0xa5, 0x8f, 0xcb, 0x3a, 0x4e, 0x6d, + 0x6a, 0xaa, 0x8f, 0xcd, 0x74, 0x61, 0xa2, 0xd3, 0x08, 0x69, 0xaf, 0x21, 0x43, 0xbf, 0xb8, 0x8a, + 0x7d, 0x05, 0x9b, 0x19, 0xdb, 0xc9, 0x1e, 0xe7, 0x0f, 0xd6, 0x66, 0xc5, 0xea, 0xb2, 0x05, 0x8c, + 0xbe, 0x56, 0xcb, 0xea, 0xc1, 0x9f, 0xcb, 0xab, 0x1d, 0x72, 0x77, 0xa2, 0xbc, 0x72, 0x8e, 0x0c, + 0x95, 0x8e, 0x7c, 0x84, 0x0f, 0xe3, 0xbd, 0x8d, 0x81, 0xaf, 0xcf, 0x80, 0xca, 0x85, 0xbc, 0x69, + 0x0e, 0xc5, 0x12, 0xd0, 0x39, 0xdd, 0x10, 0xbe, 0x04, 0xfc, 0x7b, 0xff, 0x92, 0x13, 0xb4, 0x87, + 0xbb, 0xdc, 0xb7, 0xfd, 0x51, 0xe0, 0xed, 0x5e, 0xcb, 0x5f, 0xa1, 0xd2, 0x0d, 0x77, 0x18, 0x63, + 0xaf, 0x28, 0xfa, 0x50, 0x2c, 0x3a, 0xee, 0xed, 0xa2, 0x3a, 0x51, 0x3b, 0x9c, 0x90, 0x16, 0xd5, + 0x76, 0xda, 0x84, 0xf3, 0xa6, 0x18, 0x2a, 0x78, 0x4a, 0x71, 0xf6, 0x63, 0xcd, 0x42, 0x7b, 0x73, + 0x1a, 0xd1, 0xb7, 0x93, 0x86, 0x50, 0xd1, 0xf4, 0x81, 0x68, 0x0a, 0x98, 0xfc, 0x1a, 0xd1, 0xc4, + 0x85, 0xd7, 0xa4, 0x0e, 0x77, 0x1a, 0xea, 0x6d, 0x9c, 0x7b, 0x39, 0x96, 0xfb, 0xac, 0x3b, 0xc4, + 0xd1, 0x94, 0xbf, 0x5a, 0x1b, 0xa4, 0xb1, 0xee, 0xc7, 0x3e, 0xc1, 0x1f, 0x33, 0xf1, 0xd7, 0x01, + 0x0c, 0x82, 0x4b, 0x42, 0x61, 0xdf, 0x67, 0xd8, 0xe8, 0x3f, 0x41, 0x51, 0xce, 0xe1, 0x23, 0x82, + 0x06, 0xe9, 0x9b, 0xf1, 0xa4, 0x33, 0x5a, 0x09, 0xc7, 0x5d, 0x47, 0xc9, 0x54, 0xc6, 0x51, 0xa4, + 0x2f, 0x6a, 0x2a, 0x5a, 0x75, 0xbd, 0xa0, 0x9d, 0x99, 0x10, 0xdc, 0xf1, 0xa1, 0xac, 0xe7, 0xf7, + 0xa5, 0x52, 0x30, 0xba, 0x45, 0x59, 0x26, 0x04, 0xd0, 0xd2, 0x04, 0x04, 0x4c, 0xef, 0x8b, 0xe8, + 0xce, 0x7d, 0xe5, 0x48, 0x2c, 0xa1, 0xd9, 0x54, 0xbd, 0xf8, 0x6d, 0x11, 0x56, 0x3c, 0x6b, 0x95, + 0x63, 0x41, 0x15, 0x7b, 0xfc, 0x64, 0xf9, 0x11, 0x66, 0x1b, 0xac, 0x1f, 0x40, 0xa0, 0xc0, 0x7e, + 0x31, 0x81, 0x5c, 0xbf, 0x0c, 0xf7, 0xd8, 0x59, 0xd1, 0x67, 0x4a, 0xeb, 0x2a, 0xea, 0xee, 0xda, + 0x65, 0x92, 0x15, 0x42, 0x14, 0x10, 0x1c, 0xd4, 0x48, 0x29, 0xec, 0x7b, 0xbb, 0x9d, 0x94, 0x97, + 0xde, 0x51, 0x9d, 0xbf, 0xf7, 0x78, 0x2e, 0xf9, 0xb3, 0x04, 0x1c, 0xab, 0x5f, 0x4c, 0xe1, 0xbe, + 0xec, 0x5b, 0xec, 0xbf, 0x3c, 0x87, 0xe4, 0xe5, 0xfb, 0x09, 0x56, 0x49, 0xf6, 0x46, 0x6a, 0x38, + 0x18, 0x18, 0x41, 0xf8, 0xe7, 0xb3, 0x6a, 0xf9, 0x22, 0x7b, 0xb8, 0xca, 0x15, 0x9e, 0x32, 0x06, + 0x64, 0x35, 0xc5, 0x17, 0xed, 0x3e, 0x91, 0xf0, 0x49, 0x2e, 0xa9, 0xb5, 0x2a, 0x51, 0xfe, 0xc3, + 0xfc, 0x34, 0x17, 0x89, 0xc6, 0x9b, 0xaa, 0x63, 0x29, 0x8c, 0xec, 0xbd, 0xa0, 0x01, 0x7e, 0xc9, + 0x4b, 0x82, 0x90, 0x1f, 0xe4, 0x77, 0x6f, 0x06, 0x2f, 0x7b, 0x07, 0x9f, 0xd7, 0x9e, 0x50, 0x04, + 0xa1, 0xfb, 0x8a, 0x63, 0xe9, 0xd6, 0xaf, 0xc2, 0x80, 0xa4, 0x9c, 0x35, 0xb7, 0x60, 0xaf, 0xb7, + 0x57, 0xe2, 0xfc, 0xeb, 0x26, 0x74, 0x21, 0xb6, 0xa2, 0xb8, 0xe8, 0x84, 0x42, 0x5b, 0x1c, 0xa1, + 0x85, 0xc0, 0xcc, 0x83, 0x91, 0xb8, 0x00, 0x7b, 0x70, 0x81, 0x8b, 0x41, 0x17, 0x62, 0xa9, 0xea, + 0x9d, 0x4d, 0x88, 0x7b, 0x7b, 0x1f, 0x89, 0xf3, 0x8e, 0x8f, 0x98, 0x90, 0xd7, 0x32, 0x11, 0xc5, + 0x8a, 0x02, 0x93, 0x5e, 0x74, 0x60, 0x63, 0xee, 0xb0, 0xf9, 0xa7, 0x2e, 0x86, 0xc1, 0xc5, 0x9a, + 0x1e, 0x12, 0x2f, 0x23, 0x09, 0xf2, 0x0f, 0xfc, 0x91, 0xd8, 0x6e, 0x97, 0xaf, 0x54, 0x5c, 0xbd, + 0x7a, 0xa2, 0xa2, 0xc6, 0x8e, 0xbb, 0xc4, 0xb8, 0xf5, 0x5b, 0x7a, 0xd4, 0x24, 0x15, 0x3c, 0x2e, + 0x01, 0xa9, 0x65, 0xcd, 0x47, 0xa0, 0xc1, 0x16, 0xaf, 0xec, 0xcb, 0xba, 0x25, 0x12, 0x84, 0x0b, + 0x95, 0xaa, 0x62, 0x69, 0x60, 0x92, 0x52, 0x6c, 0xf7, 0xf8, 0x05, 0x7d, 0x46, 0x96, 0xdc, 0xc0, + 0x96, 0xcb, 0x68, 0xff, 0xe8, 0x0d, 0x40, 0xe2, 0x50, 0x23, 0x88, 0x30, 0x3e, 0x05, 0x48, 0xa3, + 0xe3, 0xd4, 0xa8, 0xdd, 0x28, 0x58, 0x12, 0x50, 0x6b, 0x56, 0xa3, 0x9b, 0xd7, 0xd8, 0x1d, 0x8c, + 0x90, 0xc5, 0x51, 0x19, 0x5a, 0xd4, 0xb0, 0xa7, 0xc7, 0x16, 0x26, 0xb1, 0x15, 0x49, 0xd7, 0xb6, + 0xff, 0xe5, 0x6f, 0xe5, 0xf3, 0x15, 0xcf, 0x74, 0x91, 0xa5, 0x2b, 0x5f, 0x84, 0x39, 0x4f, 0x25, + 0x9d, 0x87, 0x02, 0x8b, 0xd0, 0x87, 0x86, 0xf4, 0x97, 0x34, 0x79, 0xb7, 0x6b, 0x1b, 0x0f, 0x4a, + 0x09, 0x14, 0x4f, 0xf3, 0x2c, 0x8a, 0x3d, 0xe9, 0x0b, 0xf7, 0xaf, 0x37, 0x30, 0xc9, 0xe6, 0xb9, + 0x6c, 0xc6, 0x35, 0x2a, 0xcb, 0xa4, 0xe6, 0x4b, 0x89, 0xd2, 0xa5, 0x8f, 0xec, 0x02, 0x33, 0x27, + 0x55, 0x19, 0x59, 0xdc, 0xd5, 0x1d, 0xe6, 0x18, 0xa6, 0xdc, 0x49, 0xc3, 0xde, 0xf8, 0xde, 0xb5, + 0x2c, 0x7e, 0xf3, 0xd7, 0x8d, 0xad, 0x66, 0x22, 0x71, 0xc5, 0x4c, 0x5d, 0x8b, 0x6c, 0xbf, 0x22, + 0x49, 0x77, 0xbf, 0x20, 0x36, 0x95, 0x3d, 0x59, 0x17, 0x73, 0x12, 0x0c, 0x09, 0x9a, 0x7f, 0xaf, + 0xf2, 0x5a, 0x82, 0xc0, 0xf4, 0xc7, 0x7f, 0x52, 0x88, 0xea, 0x65, 0x92, 0xa7, 0x6d, 0x98, 0xb5, + 0x91, 0xdb, 0xb4, 0x84, 0x7c, 0x7f, 0x00, 0x84, 0x53, 0xcf, 0x94, 0x14, 0x4d, 0x72, 0xe0, 0x83, + 0x15, 0x35, 0x56, 0x47, 0xd0, 0xaa, 0x37, 0x2b, 0x22, 0x54, 0xb3, 0xf0, 0x78, 0x42, 0x99, 0x5f, + 0x63, 0x55, 0x0d, 0x8f, 0xa6, 0x67, 0x30, 0x90, 0x32, 0x6c, 0x9b, 0x24, 0x56, 0x53, 0xa3, 0x64, + 0xdd, 0xbb, 0x41, 0xc7, 0x5a, 0x7b, 0xf2, 0xe6, 0xfc, 0x01, 0x9c, 0x55, 0xa0, 0x2b, 0xe4, 0x20, + 0x57, 0x2a, 0xcc, 0x27, 0x14, 0x32, 0xab, 0x55, 0x56, 0x5b, 0x85, 0x12, 0xf6, 0xb2, 0xaa, 0x95, + 0xa6, 0x59, 0x62, 0xcf, 0xca, 0xd0, 0xee, 0xd4, 0x95, 0xe6, 0xd5, 0x59, 0x44, 0xc2, 0x1d, 0x37, + 0x61, 0xb8, 0xec, 0x5c, 0x57, 0x46, 0x53, 0x1e, 0xe1, 0x9c, 0xbd, 0x16, 0xa6, 0x59, 0x67, 0xad, + 0x8f, 0x56, 0xbc, 0x95, 0x25, 0x84, 0x6e, 0x68, 0x13, 0x79, 0x1a, 0x38, 0xdc, 0xd5, 0x78, 0x34, + 0x83, 0x99, 0xba, 0x89, 0x56, 0x16, 0xe1, 0xe1, 0x2b, 0xaf, 0x65, 0x3c, 0x38, 0x93, 0xec, 0x24, + 0xf5, 0xb4, 0x93, 0x41, 0x8c, 0x5c, 0x62, 0x67, 0xb9, 0xb8, 0xcc, 0x95, 0xdb, 0x86, 0xb1, 0x53, + 0xb8, 0xbe, 0xfe, 0x6f, 0x18, 0xad, 0x49, 0x6c, 0xb3, 0x9d, 0xa9, 0x11, 0x08, 0x6a, 0xbe, 0xe9, + 0x12, 0x24, 0x94, 0x67, 0x5e, 0xa5, 0x55, 0xfe, 0x41, 0x93, 0x2e, 0xb5, 0x4e, 0xa2, 0xac, 0xb9, + 0x3e, 0x9a, 0x5a, 0xff, 0x7c, 0xf8, 0x84, 0x95, 0x85, 0xec, 0xb8, 0x0f, 0xa3, 0xb3, 0x13, 0x88, + 0xe5, 0x53, 0x75, 0x59, 0x73, 0xb3, 0xdc, 0x76, 0x34, 0x95, 0x31, 0xe6, 0xde, 0xd2, 0x4d, 0x1e, + 0x42, 0xde, 0x47, 0xd1, 0x85, 0xc1, 0x7b, 0x90, 0x2b, 0x70, 0x33, 0x79, 0x0b, 0x87, 0x29, 0x92, + 0xf3, 0x9c, 0x2d, 0xd6, 0x66, 0x20, 0x36, 0x2a, 0x3d, 0xeb, 0xb6, 0x05, 0x17, 0x41, 0x71, 0x43, + 0xcd, 0x6f, 0x44, 0x39, 0xaa, 0x70, 0xa7, 0xbf, 0x81, 0x26, 0x42, 0x89, 0x18, 0xe7, 0xa6, 0x58, + 0xeb, 0xf9, 0x2c, 0xab, 0x22, 0x3e, 0x3f, 0x7c, 0x4e, 0xb7, 0x98, 0x65, 0x1d, 0xc9, 0x1e, 0x54, + 0x8f, 0xdc, 0x8b, 0x65, 0x46, 0xd6, 0x26, 0xe4, 0x5f, 0xe0, 0x65, 0x76, 0xc4, 0x42, 0xe1, 0x45, + 0x95, 0xb8, 0x53, 0xdf, 0xd0, 0xdf, 0x2d, 0x50, 0xf7, 0xaa, 0x32, 0x5b, 0xd6, 0x6e, 0xf6, 0xc6, + 0x5c, 0x15, 0xe0, 0xe7, 0xe7, 0x3a, 0xe8, 0x89, 0xc6, 0xbb, 0x9c, 0x5f, 0xce, 0x49, 0x7b, 0x43, + 0x96, 0xac, 0xdf, 0xaf, 0xa4, 0xad, 0xcb, 0xb6, 0x2a, 0x62, 0x95, 0xfd, 0xce, 0x13, 0x6d, 0xf5, + 0xc4, 0x02, 0xc2, 0x8c, 0x5b, 0x45, 0xf7, 0x21, 0x3d, 0xc3, 0xc2, 0x12, 0xaa, 0x89, 0xa0, 0x37, + 0x0b, 0x85, 0x3b, 0x18, 0x24, 0xc0, 0x3a, 0xcd, 0x2d, 0xc9, 0x11, 0x8a, 0xf6, 0x45, 0x40, 0x0b, + 0xcc, 0x41, 0x5b, 0xb7, 0xbd, 0x5c, 0x4e, 0xde, 0x6b, 0xb4, 0xb2, 0x95, 0x24, 0xd5, 0xac, 0xd1, + 0xa9, 0x04, 0xc7, 0x67, 0xd2, 0x6e, 0xce, 0x91, 0x1a, 0x9e, 0xd9, 0x8a, 0xc3, 0x3d, 0x66, 0x26, + 0xdd, 0xbb, 0xef, 0xc9, 0xe0, 0xb8, 0xdc, 0x32, 0x58, 0x16, 0xf3, 0xc3, 0x33, 0x8d, 0x30, 0xd3, + 0x0d, 0x47, 0xb0, 0x1f, 0x7a, 0xe6, 0x6c, 0xaa, 0xc7, 0xf2, 0x75, 0xea, 0xd7, 0x17, 0x98, 0x8a, + 0x9a, 0xe5, 0x6d, 0x60, 0xe1, 0xc5, 0x90, 0x3b, 0x8d, 0x4e, 0x68, 0xf9, 0xcf, 0x0e, 0x1f, 0xcb, + 0xd0, 0x01, 0x60, 0x94, 0x3e, 0x9e, 0x72, 0x46, 0x00, 0x02, 0x06, 0xa8, 0x29, 0x92, 0xcf, 0x34, + 0x51, 0x5c, 0xe5, 0x5d, 0x0d, 0xe6, 0x0d, 0x09, 0xae, 0x54, 0x66, 0x92, 0xae, 0xa6, 0x9e, 0x72, + 0x40, 0xc2, 0x3c, 0x8a, 0xd7, 0xd9, 0x75, 0x83, 0x16, 0xe5, 0x21, 0xbd, 0xd7, 0xb5, 0xcf, 0x50, + 0x17, 0xa7, 0x4e, 0x82, 0x71, 0x5e, 0xd7, 0x21, 0x64, 0x5b, 0x1d, 0xdf, 0xd6, 0x82, 0x77, 0xed, + 0x8c, 0x4b, 0x69, 0x54, 0x4c, 0x67, 0x27, 0xd5, 0xf1, 0xe1, 0x29, 0xd9, 0x74, 0x4c, 0x3d, 0xb0, + 0x37, 0x4c, 0x72, 0xe6, 0x2b, 0x94, 0x7a, 0xca, 0x2d, 0xf6, 0xe3, 0x7c, 0xeb, 0xf7, 0xed, 0xa9, + 0x91, 0x80, 0x2b, 0x35, 0x23, 0x18, 0xbc, 0x86, 0x10, 0x6e, 0xcd, 0x77, 0x0a, 0x68, 0x90, 0x14, + 0xfc, 0x16, 0x3a, 0x9d, 0x0b, 0xdb, 0x93, 0x74, 0x00, 0x47, 0x84, 0x9a, 0x3b, 0xff, 0x06, 0x5a, + 0xca, 0xe0, 0xae, 0x6c, 0xe3, 0xaf, 0x7e, 0x06, 0x93, 0x60, 0x7e, 0x9e, 0x4d, 0xf5, 0x8c, 0xe1, + 0x30, 0x16, 0x37, 0x84, 0x3a, 0x27, 0x83, 0x97, 0xa7, 0xe0, 0xe7, 0x73, 0xb8, 0x9f, 0xdc, 0x06, + 0xd3, 0x27, 0xe3, 0xde, 0x05, 0xe5, 0x61, 0xc7, 0x1d, 0xdf, 0x82, 0xcc, 0x3e, 0x9f, 0x9f, 0x73, + 0x4c, 0x4d, 0x72, 0x82, 0x23, 0xfd, 0xe3, 0x9d, 0x83, 0x50, 0x25, 0x1d, 0xb7, 0x72, 0xec, 0x31, + 0xd1, 0x0c, 0x59, 0xcd, 0x07, 0xb9, 0xfa, 0xab, 0x9a, 0x5f, 0xac, 0x70, 0xda, 0x77, 0x37, 0xd8, + 0x0a, 0x08, 0x29, 0x32, 0xe2, 0x36, 0xf2, 0x25, 0xa0, 0xdd, 0x2b, 0x72, 0x46, 0x08, 0xb8, 0xa1, + 0x45, 0xec, 0x43, 0x62, 0x6b, 0x8c, 0xec, 0xe6, 0x04, 0x46, 0x7e, 0xdc, 0xd1, 0x6d, 0x5b, 0x9b, + 0xc5, 0xd8, 0xb1, 0x87, 0x7f, 0x94, 0xdd, 0x69, 0xaa, 0x3d, 0xea, 0x5c, 0x33, 0x47, 0x50, 0x36, + 0x30, 0xb1, 0xfe, 0x1a, 0x4f, 0xe9, 0xd5, 0x2a, 0x1c, 0xee, 0x67, 0x46, 0x06, 0x5c, 0xa2, 0xc7, + 0x44, 0xbb, 0x6a, 0x15, 0x7d, 0x74, 0x5c, 0x26, 0x2a, 0x19, 0xf2, 0x17, 0x24, 0x9c, 0xf8, 0x7c, + 0x72, 0x2e, 0x58, 0x6c, 0x2c, 0x23, 0x96, 0x18, 0xde, 0x42, 0x88, 0xa8, 0x7a, 0xac, 0xd2, 0x4d, + 0xf7, 0xac, 0xee, 0x90, 0x0c, 0x8f, 0x46, 0x23, 0x45, 0xf6, 0x71, 0xb1, 0xb4, 0xd5, 0xeb, 0xda, + 0x3c, 0x0c, 0xa3, 0x5f, 0x15, 0xc3, 0xa1, 0xdb, 0x26, 0x69, 0x10, 0x5d, 0x2c, 0x11, 0xed, 0x84, + 0x76, 0x2e, 0x8a, 0xcd, 0x63, 0x4e, 0xff, 0x7e, 0x03, 0xe9, 0x46, 0x1b, 0xc1, 0x69, 0xad, 0x1f, + 0xd1, 0x57, 0x07, 0x5e, 0xed, 0x6d, 0x2a, 0xa1, 0xb4, 0xc9, 0x18, 0x7e, 0x5f, 0x47, 0xbb, 0x02, + 0xf5, 0x61, 0xcc, 0xc4, 0x93, 0x89, 0x04, 0xaa, 0xbb, 0x2c, 0x16, 0x74, 0x17, 0xf6, 0xab, 0xd3, + 0xb1, 0x9f, 0xd6, 0xf7, 0xef, 0x2d, 0xd9, 0xad, 0x84, 0x47, 0xf3, 0xc2, 0xda, 0xb7, 0x7f, 0xef, + 0x83, 0xe6, 0xbe, 0x4f, 0x79, 0x31, 0xbf, 0x45, 0xc5, 0x3e, 0x2b, 0x84, 0x9f, 0x73, 0x99, 0x88, + 0xc4, 0x2c, 0xee, 0xf3, 0x14, 0xea, 0x33, 0x53, 0x98, 0x51, 0xcb, 0x0b, 0xaf, 0x4c, 0x60, 0x3a, + 0x3a, 0xc0, 0xd5, 0x79, 0x35, 0x54, 0x22, 0xf0, 0xb9, 0x49, 0xc4, 0xc6, 0xee, 0x9e, 0x5a, 0x59, + 0x7a, 0xb3, 0x17, 0xae, 0xfe, 0x98, 0xfd, 0xf9, 0xd8, 0xd7, 0x4c, 0x9e, 0x47, 0x38, 0x74, 0x3b, + 0xea, 0x5b, 0x17, 0xc2, 0xd2, 0x29, 0x1d, 0x69, 0x53, 0x28, 0x47, 0xe6, 0x07, 0x5b, 0xe5, 0xfe, + 0xaa, 0xe4, 0x5b, 0x67, 0xe6, 0x1a, 0xbe, 0x6f, 0x25, 0x02, 0x9c, 0x25, 0xfd, 0xb8, 0x16, 0xfb, + 0xcb, 0x6b, 0xc5, 0x16, 0x4d, 0xe8, 0x5a, 0x29, 0xeb, 0x25, 0xdd, 0x15, 0xa7, 0xd9, 0x36, 0xb6, + 0x7a, 0x94, 0xea, 0xaa, 0x5c, 0x7d, 0xbe, 0x79, 0x57, 0x89, 0x8a, 0x3a, 0x72, 0x07, 0x55, 0x50, + 0xcb, 0xf2, 0xdf, 0xfd, 0xe0, 0x09, 0xd2, 0x86, 0xb3, 0xad, 0x60, 0x6a, 0xd1, 0x81, 0xbd, 0x84, + 0x83, 0xf2, 0xe6, 0x66, 0x16, 0x07, 0x06, 0x09, 0x0b, 0x68, 0x44, 0x14, 0x6c, 0xa6, 0x38, 0xa7, + 0x61, 0xc3, 0x07, 0xa1, 0xb2, 0x32, 0x3c, 0x54, 0xef, 0x4a, 0x21, 0x81, 0xc2, 0x02, 0xef, 0x96, + 0x35, 0x15, 0x2f, 0x32, 0xa5, 0x84, 0x0c, 0x77, 0x75, 0xd5, 0xa3, 0xb5, 0x5a, 0x56, 0xdb, 0x2a, + 0xaf, 0xad, 0x80, 0x0e, 0xf4, 0x47, 0x52, 0x39, 0x0a, 0x41, 0x8d, 0x95, 0xb1, 0x05, 0xd1, 0x3d, + 0x13, 0xf8, 0x57, 0x2d, 0x73, 0x18, 0xcc, 0x8a, 0xe2, 0x0c, 0x48, 0xd6, 0x48, 0x8e, 0x3d, 0xd3, + 0x02, 0x2d, 0x8e, 0x74, 0x9f, 0xfd, 0x9e, 0x6b, 0x08, 0x0e, 0x66, 0xe1, 0x4c, 0x79, 0x3e, 0x55, + 0xcc, 0x4a, 0xfe, 0xa9, 0x28, 0x75, 0xa9, 0x5d, 0xab, 0xd4, 0x3a, 0xec, 0xde, 0x75, 0xad, 0x5f, + 0x99, 0x35, 0x69, 0xeb, 0xd1, 0x07, 0x6b, 0x0a, 0x5c, 0xbc, 0x9e, 0x37, 0x62, 0xd6, 0xb6, 0xb4, + 0xe6, 0x29, 0x75, 0xe7, 0x10, 0x28, 0x0f, 0xe9, 0xf9, 0xf8, 0xe1, 0xfb, 0x6f, 0x05, 0x50, 0x13, + 0xbb, 0xdd, 0x75, 0xa8, 0x49, 0x13, 0xee, 0x47, 0xa0, 0x88, 0xbe, 0xc0, 0x9b, 0x33, 0x85, 0x3a, + 0x0d, 0xbb, 0x38, 0x8a, 0x56, 0x47, 0xff, 0x40, 0x49, 0x8d, 0xc7, 0x66, 0xa8, 0xf4, 0xfa, 0xb1, + 0x88, 0x57, 0xb9, 0x6b, 0xdf, 0x20, 0x14, 0xd0, 0x87, 0x08, 0x3b, 0xfe, 0xf3, 0xeb, 0x1b, 0xb8, + 0x30, 0x21, 0x8a, 0x41, 0xf9, 0x50, 0x57, 0xf2, 0x1d, 0xf9, 0x1b, 0x59, 0x23, 0x5b, 0x06, 0x72, + 0x30, 0x92, 0x44, 0x18, 0x71, 0xbe, 0x66, 0xe8, 0x91, 0x26, 0x10, 0x17, 0xed, 0x4d, 0x15, 0xfc, + 0x36, 0x62, 0xa1, 0x32, 0xba, 0xee, 0x2e, 0xa8, 0x5d, 0x2a, 0x5b, 0x3b, 0x5f, 0xa5, 0xff, 0xda, + 0xef, 0x0d, 0xdf, 0xe2, 0xed, 0x97, 0xce, 0xb5, 0xb8, 0x8b, 0x0a, 0x07, 0x72, 0xbe, 0xd2, 0x5a, + 0xf9, 0xfd, 0xba, 0x47, 0x9c, 0x0d, 0x5f, 0xce, 0x53, 0xb6, 0x0d, 0xe1, 0x3f, 0x4c, 0x34, 0x95, + 0x04, 0x10, 0x07, 0x59, 0x43, 0x55, 0x1b, 0xc7, 0x84, 0xe3, 0x15, 0x9a, 0x17, 0x08, 0xe6, 0x0b, + 0xd7, 0x0f, 0xae, 0xfc, 0x7a, 0x48, 0xcd, 0x92, 0x0e, 0x3c, 0x03, 0x35, 0xf7, 0x19, 0x1c, 0xbf, + 0x6f, 0x16, 0x60, 0xd7, 0xe0, 0xb1, 0xe8, 0xc8, 0x35, 0x0e, 0x72, 0x8c, 0x2a, 0xa7, 0xdc, 0x3c, + 0xcd, 0xe9, 0xfb, 0x5d, 0xdf, 0xae, 0xbd, 0x4e, 0xb5, 0xe4, 0xc5, 0xb9, 0x6f, 0x08, 0xbd, 0x40, + 0xe8, 0x0c, 0x93, 0x4f, 0x7b, 0x8f, 0x48, 0x9a, 0x55, 0x05, 0x84, 0xff, 0x1c, 0x87, 0x11, 0x14, + 0x5a, 0xcf, 0x29, 0x71, 0x1a, 0x67, 0xc7, 0x5d, 0x28, 0xe2, 0xa5, 0x50, 0xfb, 0xdc, 0x77, 0x31, + 0xe7, 0x13, 0xac, 0x7a, 0xba, 0xd3, 0x2e, 0xe0, 0xff, 0xa6, 0xe6, 0x6a, 0x10, 0x76, 0x2e, 0x35, + 0x9f, 0xc8, 0x55, 0xbc, 0x7f, 0x17, 0xc7, 0xb9, 0xfd, 0x85, 0x68, 0xb9, 0xe0, 0xd3, 0x90, 0x85, + 0x83, 0xd4, 0x71, 0x01, 0x90, 0x3c, 0x78, 0x82, 0x33, 0x4f, 0x85, 0x37, 0x68, 0xfe, 0x05, 0x24, + 0x95, 0x68, 0x5e, 0xf3, 0xf1, 0xdf, 0x7e, 0x6e, 0xb6, 0x70, 0x70, 0xf5, 0x68, 0x6b, 0x1c, 0x39, + 0xb7, 0x8c, 0xae, 0x7f, 0x2d, 0x8d, 0xb8, 0xbf, 0x3d, 0x50, 0xa5, 0x83, 0x40, 0x26, 0x73, 0x75, + 0x20, 0xbc, 0x26, 0x35, 0xaa, 0xec, 0x26, 0x30, 0xb5, 0xd4, 0xbc, 0xa2, 0xc9, 0x9d, 0x52, 0xd8, + 0xb2, 0x45, 0xf4, 0xb5, 0x45, 0xf6, 0x16, 0x6f, 0x13, 0x87, 0x44, 0x8a, 0x42, 0x83, 0x9b, 0x3c, + 0xc2, 0x0b, 0x69, 0xc7, 0xd9, 0x0e, 0x78, 0x2a, 0xc1, 0x97, 0xa8, 0xd6, 0x12, 0x63, 0xcf, 0x71, + 0x3c, 0x25, 0x82, 0x6e, 0x3a, 0xf9, 0x50, 0xc9, 0xd8, 0x5a, 0x28, 0x77, 0x75, 0x4b, 0x46, 0xcd, + 0xc7, 0xaa, 0x7c, 0x66, 0xa6, 0xe2, 0x4f, 0x7c, 0x79, 0xb6, 0xf0, 0x9b, 0x59, 0xee, 0xdb, 0x96, + 0xea, 0x95, 0x74, 0x43, 0x75, 0x6d, 0xdd, 0x86, 0xab, 0x9f, 0x49, 0x7c, 0x0a, 0xb3, 0x3c, 0xc2, + 0x26, 0xbd, 0xe7, 0x2a, 0xd2, 0x68, 0x21, 0x06, 0x31, 0x26, 0x66, 0xbc, 0x0b, 0x3e, 0x7e, 0x1c, + 0x17, 0xad, 0x93, 0xec, 0xf4, 0x15, 0x5a, 0x14, 0xc1, 0x42, 0x27, 0x77, 0x3c, 0xf3, 0x95, 0xc8, + 0x1e, 0x59, 0x75, 0x95, 0x68, 0x38, 0x5a, 0x35, 0x46, 0xed, 0x6f, 0xb6, 0xd2, 0xb6, 0x6c, 0xe8, + 0x52, 0xaa, 0x8e, 0x4e, 0x5e, 0xc1, 0x17, 0xe1, 0x68, 0x69, 0x13, 0xf4, 0x88, 0x37, 0xfa, 0x94, + 0xcb, 0x42, 0xa5, 0x36, 0xaa, 0xf7, 0x18, 0x55, 0xb1, 0xd2, 0x24, 0x8e, 0x15, 0xe8, 0xa7, 0x4b, + 0x45, 0x13, 0x6f, 0xf2, 0xae, 0x7a, 0xf5, 0x17, 0xa8, 0x39, 0xc8, 0x7c, 0x76, 0xf8, 0xcb, 0xf6, + 0x8c, 0x3c, 0xe5, 0xf1, 0x78, 0xad, 0x4c, 0xbe, 0x4e, 0xd8, 0x15, 0xda, 0xe5, 0x8a, 0x0b, 0xda, + 0x23, 0x7a, 0xdc, 0xab, 0x6c, 0xa1, 0xf8, 0xdd, 0xac, 0x92, 0x41, 0x80, 0xd6, 0x9f, 0x80, 0x5f, + 0x06, 0x0b, 0x13, 0x04, 0x39, 0xe9, 0xc0, 0xf1, 0xae, 0x07, 0xa1, 0xb5, 0x08, 0x5f, 0xf7, 0x6b, + 0x19, 0x90, 0x6c, 0x55, 0x52, 0xc9, 0xb0, 0x24, 0x13, 0xbb, 0xe0, 0x4e, 0x8f, 0x0e, 0x56, 0xe5, + 0xc4, 0x0e, 0xef, 0xf5, 0x46, 0x67, 0xdb, 0xf2, 0x8a, 0xa6, 0x9f, 0x02, 0x81, 0x81, 0x8d, 0x51, + 0xc0, 0xfe, 0x80, 0xcf, 0xb6, 0xa3, 0x99, 0xf9, 0x71, 0x47, 0x26, 0x94, 0xb8, 0x1d, 0x8f, 0xab, + 0x5e, 0xe4, 0x7e, 0xd3, 0xab, 0xad, 0xd1, 0xba, 0xfe, 0xbd, 0x9d, 0xb6, 0x98, 0xb3, 0xfc, 0x91, + 0x62, 0xbc, 0xa9, 0xeb, 0xf3, 0x25, 0xca, 0xd3, 0xbf, 0xfd, 0x4c, 0x10, 0x24, 0x70, 0xd1, 0x72, + 0xf3, 0x7c, 0xe6, 0x95, 0x49, 0xe0, 0xd4, 0x9b, 0x7d, 0x2e, 0xac, 0x01, 0x39, 0x02, 0xca, 0x0a, + 0xc6, 0x95, 0x03, 0xe9, 0x3d, 0x59, 0x22, 0x31, 0xe1, 0xf5, 0x74, 0x70, 0x40, 0x2e, 0xb3, 0xe8, + 0x5f, 0x79, 0x24, 0x77, 0x7d, 0x42, 0x1e, 0x7b, 0xea, 0xf5, 0xbe, 0xd1, 0xe3, 0x38, 0x6e, 0x1f, + 0x7b, 0x5f, 0x4c, 0xc7, 0x3f, 0x9e, 0x80, 0x7c, 0x44, 0xa3, 0xb7, 0x60, 0x36, 0x3e, 0x96, 0xad, + 0x08, 0xc4, 0xc8, 0x24, 0x4f, 0x6c, 0x85, 0x3f, 0x0a, 0x66, 0xb0, 0x11, 0xad, 0x61, 0xa4, 0xda, + 0xd3, 0xf1, 0x70, 0x15, 0x2b, 0x42, 0xac, 0x6c, 0x7d, 0xa0, 0xbe, 0x85, 0xee, 0x15, 0x9e, 0x2b, + 0xa8, 0x5a, 0xd2, 0xd1, 0x08, 0x58, 0xbf, 0xde, 0xca, 0xf7, 0x31, 0xb4, 0xaa, 0x94, 0xad, 0xf0, + 0x7a, 0x00, 0xa3, 0xa7, 0xf3, 0x9b, 0xc0, 0x91, 0xc4, 0x97, 0x5f, 0x37, 0x69, 0x5f, 0xa8, 0x1a, + 0x5c, 0x89, 0x2f, 0xa9, 0x33, 0x6b, 0x44, 0x9b, 0x1c, 0xe6, 0x9c, 0x5c, 0xf3, 0xaa, 0x2f, 0xe4, + 0xfd, 0x0d, 0x52, 0x18, 0xa6, 0x16, 0x94, 0x38, 0xde, 0xf3, 0x97, 0xe8, 0xa5, 0xf0, 0x5d, 0xf5, + 0x0a, 0xa3, 0xbc, 0xf1, 0x3e, 0x9c, 0xde, 0x4d, 0x47, 0x4f, 0x8f, 0xf1, 0x7c, 0xd4, 0xee, 0x57, + 0xa5, 0x1c, 0x12, 0x38, 0x09, 0x3e, 0x32, 0xfb, 0x6a, 0x32, 0xae, 0x6a, 0x58, 0xeb, 0x63, 0x69, + 0x58, 0xef, 0xe6, 0xb9, 0x83, 0xbd, 0x8f, 0x11, 0x87, 0xb8, 0x8b, 0x91, 0x59, 0xa5, 0x02, 0x0d, + 0xb5, 0x23, 0x7a, 0x6a, 0xd5, 0xaa, 0xc2, 0x65, 0x0a, 0x4b, 0xcf, 0x97, 0x3e, 0xe2, 0xd1, 0x9f, + 0x5c, 0x5c, 0xdb, 0x72, 0x22, 0x73, 0x8e, 0xbb, 0xbc, 0xfa, 0x2e, 0xc3, 0xa6, 0x4a, 0xc1, 0x12, + 0x47, 0x98, 0xf2, 0x57, 0x9d, 0x62, 0x3b, 0xd4, 0xda, 0x73, 0x8e, 0xf0, 0x3e, 0xca, 0x14, 0x35, + 0x86, 0xb7, 0xe7, 0xaf, 0x74, 0xce, 0x40, 0x2a, 0xa1, 0x05, 0x77, 0x5f, 0xf4, 0xd2, 0x6f, 0xf2, + 0xd8, 0x20, 0x27, 0x7c, 0xa9, 0x81, 0xfe, 0x13, 0xb3, 0x08, 0x92, 0xc4, 0xb1, 0xcd, 0x7e, 0xe2, + 0xda, 0xa3, 0x2c, 0x1f, 0xba, 0xd8, 0x39, 0xa6, 0x12, 0x9b, 0xf9, 0xfd, 0x23, 0x14, 0xad, 0x8b, + 0xc1, 0x55, 0xd9, 0xa1, 0xdc, 0xc8, 0xac, 0xe4, 0xc6, 0x61, 0xe9, 0x9f, 0x7c, 0x3d, 0x27, 0x2f, + 0x10, 0x16, 0x79, 0x78, 0x5d, 0x30, 0x3a, 0xbf, 0x1d, 0x61, 0x6a, 0xc9, 0x9b, 0xe0, 0x6b, 0x9d, + 0xe0, 0x43, 0x6c, 0xf6, 0xc4, 0xf7, 0xad, 0x6a, 0x91, 0x97, 0x4b, 0x2b, 0x16, 0xd5, 0x93, 0x8a, + 0x9d, 0xfa, 0x15, 0xc6, 0x63, 0x4d, 0x9d, 0x29, 0x4c, 0x3d, 0x86, 0x0a, 0x56, 0x1f, 0xe6, 0x3f, + 0x83, 0x01, 0x4c, 0xf7, 0x59, 0x68, 0x08, 0xa8, 0xe1, 0xa3, 0x71, 0xff, 0xf6, 0xa7, 0x4f, 0x8f, + 0x48, 0x2f, 0x64, 0xf3, 0x87, 0x90, 0x35, 0xae, 0xd8, 0x27, 0x71, 0x21, 0xbf, 0xf0, 0x9a, 0x96, + 0x15, 0x90, 0x7c, 0x8f, 0xe8, 0xe1, 0xe8, 0x0d, 0xe7, 0x86, 0xf1, 0x24, 0xda, 0xf7, 0x7e, 0x9f, + 0xc0, 0xb1, 0xf5, 0x0b, 0x4e, 0x70, 0x35, 0xf3, 0x5b, 0xc2, 0xe9, 0x36, 0x83, 0x0d, 0x8e, 0x50, + 0xb0, 0x98, 0x36, 0x40, 0x7e, 0x69, 0xd8, 0x7d, 0x21, 0x39, 0x78, 0xfe, 0x24, 0xe9, 0xe9, 0x50, + 0x42, 0x2e, 0x87, 0xc9, 0xc2, 0x5d, 0x1c, 0xf1, 0xa5, 0xc7, 0x5b, 0x0f, 0x56, 0xce, 0x64, 0xcd, + 0xe3, 0xb3, 0xdf, 0x02, 0x4e, 0x73, 0x16, 0xa4, 0xb4, 0x59, 0xd1, 0x80, 0x9b, 0x41, 0x18, 0x0f, + 0xba, 0xed, 0x80, 0x91, 0x7c, 0x67, 0x82, 0xd8, 0x26, 0x70, 0x7e, 0x2f, 0x3d, 0xde, 0x37, 0xc5, + 0x03, 0xc0, 0x3a, 0x8f, 0x37, 0x60, 0x14, 0x5b, 0xe4, 0x9d, 0xe3, 0xfd, 0x55, 0xa7, 0x46, 0x79, + 0xd2, 0x49, 0xd5, 0x7f, 0xc6, 0x23, 0x4b, 0x45, 0x74, 0x76, 0xda, 0x2d, 0x68, 0x7e, 0xed, 0x93, + 0xed, 0x79, 0xb1, 0xdf, 0x8c, 0x75, 0x91, 0x33, 0x86, 0x3d, 0x29, 0x7d, 0xbd, 0x53, 0x3e, 0xc1, + 0x63, 0xa9, 0x8e, 0x19, 0xc6, 0x22, 0xbf, 0x88, 0x1f, 0xc5, 0xdc, 0xb5, 0x0e, 0x10, 0x56, 0xc6, + 0x30, 0xad, 0x2d, 0x93, 0x56, 0xa3, 0x4c, 0x07, 0xa5, 0xad, 0xea, 0xc8, 0x4d, 0xa3, 0xd8, 0xb7, + 0x04, 0x0b, 0x73, 0x4d, 0x7f, 0xc5, 0xb7, 0xfa, 0x00, 0x4a, 0xb4, 0xe2, 0x5f, 0xe6, 0xc3, 0x5a, + 0x8e, 0x45, 0x10, 0xd8, 0xac, 0x4c, 0x05, 0x3d, 0x7d, 0x5a, 0x82, 0xbe, 0x7d, 0x88, 0x8b, 0x56, + 0xdf, 0x18, 0xdf, 0x4f, 0x13, 0x8f, 0x74, 0x69, 0x43, 0xa9, 0x1b, 0xae, 0x0c, 0x4f, 0x65, 0x1c, + 0x65, 0x27, 0x84, 0xd6, 0x60, 0x9a, 0x3b, 0x86, 0xd2, 0x92, 0x41, 0xff, 0x50, 0xc2, 0x9c, 0x1b, + 0x92, 0x2a, 0x8b, 0x4e, 0xec, 0x5b, 0x1c, 0xdb, 0xa4, 0x6b, 0x89, 0x94, 0xb3, 0xf6, 0x42, 0x89, + 0x6b, 0x91, 0x26, 0x15, 0x16, 0xf0, 0xd7, 0x51, 0x6f, 0x75, 0x1d, 0x11, 0x55, 0x51, 0x5a, 0xe3, + 0x0f, 0xd6, 0x89, 0xc3, 0x0a, 0x27, 0xaa, 0x04, 0x0c, 0xa8, 0xfa, 0xf2, 0x48, 0x67, 0x5d, 0xd4, + 0x76, 0xab, 0x71, 0xe1, 0x48, 0xe5, 0x95, 0x41, 0x44, 0x80, 0x40, 0xd9, 0x9a, 0x7b, 0x36, 0x83, + 0x6e, 0xc6, 0xbb, 0x7b, 0x3d, 0x4f, 0x9d, 0xd2, 0x36, 0x2a, 0x62, 0xfa, 0x32, 0x85, 0x9f, 0x69, + 0x31, 0xcb, 0xc2, 0x3d, 0x7a, 0xc3, 0xbf, 0x71, 0x01, 0x53, 0xb6, 0xe4, 0x3a, 0x49, 0xe1, 0xae, + 0xdb, 0xe3, 0x51, 0x9e, 0xde, 0x34, 0xc3, 0xb5, 0xce, 0x0d, 0xd7, 0xa0, 0xfc, 0xff, 0xdb, 0xb5, + 0xf0, 0x3f, 0xfe, 0xe3, 0x3f, 0xfe, 0xe3, 0x3f, 0xfe, 0xbf, 0xf0, 0x3f, 0xa9, 0x90, 0xad, 0xcd, + 0x00, 0x23, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU116_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 8960, // uncompressed data size (bytes) + 7364, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU116_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU116("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/reload/g_booteruc_reload_tu11x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU116_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x05, 0x62, 0x56, 0x08, 0x13, 0x4c, 0xc8, 0x40, 0x69, + 0x20, 0x00, 0x00, 0x8e, 0xa1, 0x42, 0xb2, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU116_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU116_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU116("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/reload/g_booteruc_reload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_dbg_tu11x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU116_sig_dbg_data[] = +{ + 0x1a, 0x34, 0xb6, 0x32, 0x75, 0x24, 0x15, 0x41, 0x5d, 0x9f, 0xa8, 0xaf, 0xca, 0xe0, 0x2c, 0x73, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU116_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU116_sig_dbg_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU116("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/reload/g_booteruc_reload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_prod_tu11x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU116_sig_prod_data[] = +{ + 0xab, 0xbb, 0x13, 0xff, 0x34, 0x6c, 0xc6, 0x0b, 0x42, 0x7b, 0xdf, 0x2b, 0x79, 0x59, 0x10, 0x26, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU116_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU116_sig_prod_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU116("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/reload/g_booteruc_reload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_tu11x_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU116_patch_loc_data[] = +{ + 0x00, 0x1f, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU116_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU116_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU116("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/reload/g_booteruc_reload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_tu11x_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU116_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU116_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU116_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU116("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/reload/g_booteruc_reload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_reload_sig_tu11x_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU116_patch_meta_data[] = +{ + 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU116_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU116_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterReloadUcode_TU116("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/reload/g_booteruc_reload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterReloadUcode_TU116_num_sigs_data[] = +{ + 0x01, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterReloadUcode_TU116_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterReloadUcode_TU116_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterReloadUcode_TU116 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU116_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU116_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU116_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU116_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU116_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU116_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU116_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU116_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU116_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterReloadUcode_TU116_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterReloadUcode_TU116(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterReloadUcode_TU116; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA100.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA100.c new file mode 100644 index 000000000..96f695ace --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA100.c @@ -0,0 +1,1297 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA100("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/unload/g_booteruc_unload_ga100_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 8960 +// COMPRESSED SIZE (bytes): 6817 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA100_image_dbg_data[] = +{ + 0xed, 0x99, 0x45, 0x50, 0x1c, 0x0a, 0xb3, 0xb6, 0x87, 0x21, 0x38, 0x41, 0x83, 0xbb, 0xbb, 0x0e, + 0xee, 0xee, 0xee, 0x04, 0x09, 0x1e, 0x34, 0x68, 0x70, 0x77, 0x77, 0x19, 0x64, 0x70, 0x77, 0x87, + 0x41, 0x83, 0x6b, 0x90, 0x40, 0xd0, 0xe0, 0xee, 0x10, 0xdc, 0xef, 0xf9, 0x16, 0xff, 0xe6, 0x5b, + 0xdd, 0xbb, 0xfd, 0xeb, 0x3c, 0x9b, 0xf7, 0xed, 0x5e, 0x74, 0xf5, 0xa6, 0xab, 0xba, 0xba, 0xa3, + 0x00, 0x00, 0x98, 0xb7, 0x54, 0x80, 0x3f, 0x3a, 0x00, 0xf0, 0x00, 0x7c, 0x00, 0x3c, 0x02, 0xe3, + 0x00, 0x40, 0x40, 0xef, 0xfa, 0xc6, 0xfb, 0xfb, 0x3b, 0x7a, 0x14, 0x00, 0x06, 0xf0, 0x9e, 0xf7, + 0xa1, 0x69, 0x17, 0x00, 0x02, 0x2f, 0xc3, 0x34, 0xa5, 0x02, 0x98, 0xc1, 0xcb, 0xc0, 0x7f, 0x84, + 0x06, 0xbc, 0x0c, 0xfb, 0x8f, 0x90, 0x46, 0x01, 0x00, 0x00, 0x70, 0x1e, 0x6c, 0xcb, 0x06, 0x6c, + 0x6f, 0x5e, 0xfe, 0x06, 0x38, 0x0f, 0x06, 0x9c, 0x09, 0x6c, 0xd9, 0x80, 0x69, 0x59, 0x06, 0xfa, + 0xf7, 0x02, 0x00, 0xe8, 0x1c, 0xf2, 0x00, 0x8c, 0xfb, 0x3c, 0x40, 0x77, 0x6a, 0xc2, 0x3f, 0x25, + 0x5b, 0x1e, 0x83, 0x5a, 0x1e, 0x03, 0xa3, 0x00, 0xa1, 0x1f, 0xfe, 0x49, 0xa1, 0xc3, 0xb4, 0x67, + 0x02, 0x48, 0x80, 0xff, 0x71, 0xb0, 0xff, 0x38, 0x18, 0xe0, 0xff, 0xcb, 0xc1, 0xff, 0xc7, 0xfd, + 0x53, 0x16, 0x98, 0x86, 0x8e, 0xb8, 0xfe, 0x9f, 0x16, 0x5e, 0x1f, 0xe1, 0xa2, 0x00, 0x44, 0x80, + 0xd7, 0xdc, 0x0f, 0x9d, 0x89, 0xc0, 0xd6, 0x54, 0x8c, 0x7f, 0xe2, 0xb7, 0x27, 0x80, 0xff, 0x3f, + 0x8d, 0x3d, 0x00, 0xbb, 0x8b, 0x9f, 0x61, 0x12, 0x5e, 0x33, 0xd0, 0xa3, 0xc8, 0x11, 0x00, 0xf9, + 0x79, 0x0f, 0x80, 0x84, 0x7f, 0xb4, 0xf7, 0xfd, 0x86, 0xfd, 0xe5, 0x4d, 0x1e, 0x26, 0x3f, 0x4f, + 0x64, 0x19, 0xf0, 0xbf, 0x01, 0xef, 0xc3, 0xc9, 0x05, 0xd1, 0x0b, 0x4a, 0xe2, 0xdb, 0x84, 0xdc, + 0x0e, 0x6e, 0x2e, 0x48, 0x23, 0xb0, 0xe8, 0xc6, 0x89, 0xb4, 0xd3, 0x9f, 0xe9, 0xd4, 0xac, 0x5f, + 0xf6, 0x21, 0xfb, 0xb1, 0x73, 0xbf, 0x33, 0x6d, 0x24, 0x8e, 0x29, 0x0c, 0x8a, 0x54, 0xed, 0x9b, + 0x6e, 0x7f, 0x46, 0xbf, 0xd2, 0xb6, 0x22, 0x69, 0x43, 0x94, 0xc2, 0x15, 0x84, 0x54, 0x1f, 0x2e, + 0xd2, 0x50, 0x43, 0xe4, 0xbd, 0x5c, 0x56, 0xa8, 0x7e, 0x3c, 0x58, 0xbd, 0xbf, 0x73, 0x6a, 0x28, + 0x29, 0x98, 0x8c, 0xf7, 0xc5, 0x6e, 0xa8, 0x73, 0x1f, 0x49, 0xff, 0x52, 0x1c, 0x9e, 0x5f, 0xa9, + 0x3f, 0x7a, 0x7a, 0xce, 0x5f, 0x65, 0x9b, 0x1a, 0x5d, 0xda, 0x1d, 0x99, 0x1e, 0xd5, 0x74, 0x8a, + 0x4f, 0x1f, 0xbf, 0x3e, 0xa0, 0xc9, 0x70, 0x59, 0xc7, 0x85, 0xc2, 0x8d, 0x10, 0xa8, 0x8e, 0x98, + 0x46, 0xd0, 0x81, 0x2a, 0xb2, 0xdd, 0x52, 0xfa, 0xf7, 0x06, 0xab, 0xd8, 0x7a, 0x4c, 0x75, 0xe6, + 0x2f, 0xfc, 0x97, 0xe2, 0xce, 0x93, 0xa0, 0xf4, 0x77, 0x0b, 0xa4, 0x8f, 0xd9, 0xae, 0xe5, 0x3b, + 0x37, 0xbe, 0x25, 0x01, 0x57, 0x49, 0x7d, 0x35, 0xec, 0x81, 0xb3, 0xc3, 0x4b, 0xb0, 0x86, 0xf6, + 0x05, 0x87, 0xee, 0xf8, 0x51, 0x4a, 0x7b, 0x5f, 0x18, 0x7e, 0x07, 0x57, 0x61, 0xd1, 0xcd, 0x87, + 0xa9, 0x68, 0x05, 0xe7, 0xd1, 0x28, 0x55, 0x6b, 0xc8, 0x27, 0x20, 0x4c, 0x68, 0xa7, 0xe9, 0x97, + 0xc2, 0xc2, 0xe3, 0x99, 0x33, 0xb4, 0x2c, 0x8a, 0x6b, 0x8d, 0xc4, 0x36, 0xcc, 0x79, 0x2f, 0x3c, + 0xda, 0xa7, 0x9e, 0x87, 0x81, 0x9c, 0x13, 0x83, 0x96, 0x46, 0x0a, 0x04, 0x6a, 0xa3, 0x73, 0xf2, + 0x18, 0x58, 0xaa, 0xd4, 0x9e, 0x1e, 0x99, 0x88, 0x2c, 0xf3, 0x4f, 0xcf, 0x6e, 0x33, 0x26, 0xbf, + 0xb6, 0xfb, 0x68, 0xb5, 0xdd, 0x1b, 0x75, 0xd3, 0x88, 0x1f, 0x70, 0x71, 0xf2, 0xc1, 0x97, 0xba, + 0xc7, 0x64, 0x5d, 0x72, 0x19, 0x18, 0x65, 0xed, 0x4e, 0x6a, 0x72, 0x8f, 0x05, 0xb9, 0x03, 0x17, + 0x17, 0xb0, 0xf7, 0xbc, 0x65, 0x40, 0xce, 0x29, 0x4d, 0xb3, 0x3c, 0xb4, 0xf6, 0x6c, 0x34, 0xa4, + 0x4b, 0x35, 0xf2, 0xfe, 0xfe, 0x1b, 0x61, 0xc3, 0xb3, 0x7f, 0x1e, 0xe7, 0x48, 0x7b, 0x37, 0x87, + 0x19, 0x82, 0x5f, 0x25, 0x4a, 0xa9, 0x7a, 0x51, 0x4c, 0xb8, 0x03, 0x53, 0xba, 0x24, 0xef, 0xb5, + 0x5b, 0x8b, 0x5a, 0x15, 0x16, 0x65, 0x49, 0x4c, 0x44, 0x07, 0xce, 0xb6, 0x10, 0xf5, 0xc9, 0xa7, + 0xee, 0x45, 0xd9, 0x91, 0x4c, 0x3e, 0x8f, 0x55, 0x1a, 0xec, 0xcb, 0xf3, 0xe9, 0x86, 0xff, 0x02, + 0xd6, 0xa8, 0x2a, 0xde, 0x67, 0x94, 0x16, 0xe8, 0xa1, 0xa8, 0xd3, 0xd7, 0x21, 0x04, 0x7a, 0x42, + 0xda, 0xbe, 0xec, 0x28, 0x54, 0xd5, 0xad, 0xce, 0xad, 0x7a, 0xba, 0x87, 0xb7, 0x09, 0xb7, 0x31, + 0x61, 0x1c, 0x55, 0x92, 0x98, 0xb1, 0x7e, 0x82, 0x93, 0x25, 0xe1, 0x14, 0x94, 0x4e, 0x2d, 0x34, + 0xe8, 0xbd, 0x51, 0xae, 0x88, 0x69, 0xa9, 0x67, 0xe7, 0xc1, 0xca, 0xa7, 0x83, 0x4c, 0x90, 0xc4, + 0x11, 0xb0, 0x1e, 0x2f, 0xc0, 0xf5, 0xf6, 0xdd, 0x06, 0x0f, 0x18, 0x85, 0x22, 0x0b, 0x59, 0x6e, + 0xa7, 0x72, 0x77, 0xbd, 0xd3, 0xed, 0xe6, 0x81, 0xc5, 0x12, 0x5a, 0x6d, 0xf9, 0xdf, 0x10, 0xee, + 0x40, 0x39, 0xc6, 0x98, 0x18, 0x09, 0x5b, 0xf3, 0x22, 0x3e, 0x2a, 0x2e, 0xae, 0x47, 0x41, 0x4f, + 0x0c, 0x13, 0xf3, 0xdd, 0x91, 0x86, 0xf3, 0x65, 0xe6, 0x1c, 0xcb, 0xcc, 0x1e, 0x4a, 0x93, 0x2c, + 0xe0, 0x43, 0xe1, 0xf5, 0x20, 0xe5, 0x8c, 0x6d, 0x73, 0xcf, 0xcc, 0xc7, 0xc2, 0x21, 0x5f, 0x4a, + 0xfe, 0x88, 0x79, 0xfc, 0x6e, 0x0a, 0x85, 0x2d, 0x63, 0xb9, 0x8c, 0x49, 0xdd, 0xf7, 0x4c, 0x12, + 0xfc, 0x45, 0x69, 0x98, 0x4a, 0xc2, 0xba, 0x9c, 0x8a, 0xec, 0x53, 0xb2, 0xa3, 0x8d, 0xbf, 0x02, + 0xa9, 0x6f, 0x6f, 0x8d, 0x6f, 0xef, 0xd0, 0xa7, 0x3b, 0x95, 0x26, 0xce, 0x39, 0xe6, 0xd2, 0x08, + 0x7d, 0x16, 0x46, 0xba, 0x45, 0x7e, 0xf4, 0x7d, 0xc6, 0x7c, 0x82, 0x3f, 0x34, 0x44, 0x7e, 0xd0, + 0x60, 0x53, 0xfc, 0xe0, 0x78, 0x12, 0xf0, 0xef, 0xde, 0x9a, 0x9d, 0x95, 0xcc, 0x6e, 0xeb, 0x10, + 0x5b, 0xb3, 0x58, 0xf1, 0x0a, 0xc7, 0xd6, 0x8d, 0x99, 0xaa, 0xe1, 0x2d, 0x77, 0xe5, 0xb5, 0xfc, + 0xf1, 0x66, 0xb2, 0x5f, 0x33, 0xd2, 0x4f, 0x2f, 0xc9, 0x81, 0x6a, 0xbe, 0xc1, 0x63, 0xc3, 0xf2, + 0x43, 0x9e, 0x56, 0x71, 0x30, 0xef, 0x93, 0x18, 0x51, 0xae, 0x37, 0x56, 0x93, 0x44, 0xd7, 0x68, + 0xc2, 0xd5, 0x85, 0x7c, 0xb1, 0xe7, 0x4a, 0xf3, 0x97, 0xe8, 0x65, 0xf3, 0xde, 0x2e, 0x90, 0xd8, + 0xfa, 0xb2, 0xab, 0x2f, 0x6e, 0x37, 0x15, 0xc8, 0x85, 0xbb, 0xdf, 0x3b, 0x46, 0x7e, 0x9b, 0xbd, + 0x8c, 0xd7, 0x1e, 0x35, 0x55, 0x2e, 0x27, 0xf6, 0xc7, 0x8e, 0x94, 0xa2, 0x60, 0x77, 0x2f, 0x76, + 0xa5, 0x18, 0x0e, 0xff, 0x36, 0x49, 0x62, 0x2d, 0x6b, 0x5d, 0x67, 0x51, 0xda, 0x62, 0x71, 0xe0, + 0xf9, 0xa6, 0xca, 0x88, 0x8f, 0xad, 0x67, 0x69, 0x73, 0x70, 0xdb, 0x97, 0xc4, 0x2f, 0x51, 0x42, + 0xf5, 0xc7, 0x03, 0x64, 0x46, 0xf5, 0x90, 0x17, 0x64, 0xbe, 0x6c, 0x42, 0xa5, 0x3a, 0x53, 0x48, + 0x31, 0xbf, 0xd5, 0x63, 0x78, 0x85, 0x23, 0xa5, 0xbf, 0x67, 0x6c, 0x78, 0xb5, 0xbd, 0x2c, 0xef, + 0xe1, 0x78, 0xf0, 0x62, 0x8f, 0x63, 0xba, 0x45, 0xb9, 0xf9, 0xca, 0xd7, 0x67, 0xd5, 0x6a, 0xc3, + 0xff, 0x47, 0xc6, 0x7a, 0x4a, 0xa6, 0xff, 0x16, 0xd7, 0x2b, 0xfc, 0xd3, 0x70, 0xd1, 0xc7, 0xe3, + 0x50, 0xff, 0x0f, 0xc2, 0xf6, 0x72, 0x87, 0x6d, 0x38, 0xf1, 0xcf, 0x28, 0x79, 0x8e, 0x5a, 0x74, + 0x28, 0x25, 0x51, 0xa3, 0xea, 0x4e, 0x66, 0xdb, 0xbb, 0x4f, 0x81, 0x08, 0x3d, 0x26, 0xbc, 0xbb, + 0x69, 0x2a, 0x7b, 0x4b, 0x76, 0xd6, 0x36, 0x0b, 0x67, 0xdf, 0x9c, 0x07, 0x36, 0xda, 0x7c, 0x25, + 0x7f, 0xa2, 0x09, 0xd1, 0x43, 0x44, 0xc8, 0x07, 0xbe, 0xd7, 0x20, 0x12, 0x8a, 0x79, 0xea, 0x9f, + 0x0a, 0xc7, 0xa8, 0xa3, 0x72, 0x9b, 0xc1, 0x36, 0x04, 0x36, 0xd0, 0x0d, 0xe5, 0x8c, 0x5d, 0x41, + 0xe4, 0xbf, 0x47, 0x29, 0xe8, 0x5b, 0xa0, 0x92, 0xa4, 0x70, 0xe2, 0xf0, 0x01, 0xd0, 0x45, 0x87, + 0x45, 0x22, 0xcd, 0xeb, 0x5c, 0x29, 0xe8, 0xb0, 0x8a, 0x06, 0x6c, 0xef, 0x63, 0x12, 0xf0, 0x93, + 0x01, 0x5b, 0xc6, 0xc7, 0xc4, 0x3b, 0x1f, 0x1a, 0x16, 0x9b, 0x44, 0x2e, 0x21, 0x46, 0x89, 0x79, + 0x6d, 0x32, 0x43, 0xe3, 0xe1, 0x2a, 0x29, 0x32, 0x1c, 0x74, 0x9e, 0x49, 0xfd, 0xd8, 0x6b, 0xe0, + 0x8c, 0xe0, 0x66, 0x0e, 0x3d, 0xfb, 0xe3, 0xa8, 0x56, 0xb4, 0x1f, 0xe8, 0x31, 0xe8, 0x64, 0x2c, + 0xc5, 0x0d, 0xa3, 0x4c, 0xf0, 0x23, 0x6e, 0x65, 0x54, 0x84, 0x73, 0x97, 0xab, 0xb5, 0xae, 0xd0, + 0xab, 0x69, 0x4f, 0x39, 0x85, 0xf9, 0x19, 0x8d, 0x31, 0x57, 0xef, 0x70, 0x74, 0xef, 0xd2, 0x25, + 0x83, 0x15, 0xe4, 0xda, 0x9d, 0xaf, 0x13, 0x21, 0xd3, 0x81, 0x34, 0xfa, 0x45, 0x2b, 0xdd, 0x1b, + 0xfa, 0x41, 0x12, 0x9d, 0x60, 0x27, 0x03, 0x8a, 0x59, 0x83, 0x72, 0x46, 0x2d, 0x65, 0x2a, 0x15, + 0xc7, 0xf9, 0x12, 0xc1, 0xee, 0x17, 0x15, 0xc1, 0xbb, 0x02, 0x0d, 0x39, 0x46, 0xa6, 0x6a, 0xc2, + 0x0e, 0x9a, 0x01, 0xf5, 0x31, 0x63, 0xbb, 0x45, 0xbe, 0x4e, 0x39, 0x20, 0x2c, 0x66, 0x2c, 0x49, + 0x2a, 0x33, 0xaf, 0xeb, 0xf7, 0x0c, 0x32, 0xab, 0xc9, 0x7e, 0x63, 0xcb, 0xb1, 0x70, 0xd3, 0x13, + 0x7d, 0x68, 0x52, 0x3e, 0xd8, 0x15, 0x85, 0x45, 0x30, 0x00, 0xe6, 0xfb, 0xd0, 0x30, 0xe2, 0x85, + 0x97, 0x15, 0x67, 0xa1, 0x46, 0x55, 0xe0, 0x54, 0x8b, 0x5c, 0xa8, 0xd1, 0x61, 0xaa, 0x54, 0x7b, + 0x2b, 0xf2, 0xa5, 0x5b, 0xcd, 0xb9, 0x8d, 0xea, 0x30, 0xec, 0x67, 0xc7, 0x03, 0xeb, 0x03, 0x56, + 0xe9, 0x3a, 0x9b, 0x7a, 0x27, 0x4c, 0x7f, 0x88, 0x3c, 0x37, 0xa7, 0x8c, 0x20, 0x26, 0x32, 0xff, + 0x36, 0x36, 0xa9, 0x28, 0xbf, 0x2e, 0xa9, 0xec, 0x4a, 0x8a, 0x8d, 0x99, 0x14, 0x2c, 0x7c, 0xb0, + 0x57, 0xce, 0xb2, 0x3b, 0xbd, 0x53, 0xb4, 0xf5, 0xb3, 0xf7, 0xb6, 0x04, 0x89, 0x92, 0xbd, 0x54, + 0x36, 0x38, 0x19, 0xe4, 0x90, 0x7f, 0x84, 0x68, 0x64, 0x6c, 0x43, 0x85, 0x91, 0xe3, 0x30, 0xc1, + 0x9e, 0x62, 0xc5, 0x90, 0x79, 0xf7, 0x84, 0xf5, 0x7c, 0xbf, 0x3f, 0x08, 0xc0, 0x49, 0x9a, 0xaa, + 0xe8, 0x2c, 0x81, 0x94, 0xf2, 0x7a, 0x4a, 0x1d, 0x23, 0xc1, 0x6e, 0x05, 0x99, 0x56, 0x43, 0x39, + 0xae, 0x9d, 0xe7, 0x9d, 0xc4, 0xba, 0xd2, 0xd7, 0x41, 0x7b, 0x2f, 0xa3, 0x40, 0x61, 0x48, 0xe0, + 0x43, 0x1c, 0x76, 0x27, 0x3e, 0xc2, 0x7d, 0x44, 0x69, 0xbe, 0xbb, 0x17, 0x81, 0x6f, 0xb4, 0xca, + 0x0e, 0x0a, 0xd7, 0x46, 0x24, 0x5c, 0xe7, 0xca, 0x0e, 0x45, 0xa3, 0x9b, 0xbd, 0x00, 0x69, 0x87, + 0x41, 0x36, 0x57, 0xb7, 0x76, 0xbc, 0x3b, 0xe1, 0xa6, 0xb5, 0x52, 0xa0, 0x14, 0x11, 0x33, 0x90, + 0xf6, 0x2c, 0xdc, 0x1f, 0x99, 0xbe, 0xbb, 0x1e, 0xfe, 0x59, 0xd1, 0xbf, 0xf8, 0xca, 0xb6, 0xbe, + 0xe7, 0xd3, 0x27, 0x45, 0xd9, 0xc8, 0x66, 0xb1, 0x6f, 0xd0, 0xa9, 0x49, 0x9f, 0x20, 0xac, 0x7d, + 0x42, 0x22, 0xf6, 0xe9, 0x4b, 0x99, 0xd2, 0xab, 0x0f, 0xb0, 0xf2, 0xeb, 0x4e, 0x66, 0xa3, 0xf3, + 0xd7, 0xca, 0x96, 0xd8, 0xc6, 0xc4, 0x42, 0x2a, 0x63, 0xf1, 0x57, 0xde, 0x5c, 0x0e, 0xee, 0x5b, + 0x11, 0xc8, 0x53, 0x07, 0xe0, 0xb0, 0xc5, 0xa8, 0x1f, 0x54, 0x84, 0xf9, 0x5e, 0x6a, 0xc6, 0x08, + 0xd3, 0x4d, 0xb9, 0xd0, 0xd6, 0x6a, 0x97, 0x5c, 0xbb, 0x1c, 0x67, 0x36, 0x9d, 0x30, 0x51, 0xfc, + 0xee, 0x18, 0xf6, 0x39, 0x93, 0x78, 0x66, 0x3b, 0x55, 0x6b, 0x35, 0x87, 0xc2, 0x51, 0x0c, 0xc2, + 0xdd, 0x4e, 0x9a, 0xbc, 0xb9, 0xcb, 0x18, 0x27, 0x0f, 0x4e, 0xb4, 0x86, 0xf1, 0x9e, 0xf1, 0x94, + 0x86, 0xa4, 0x50, 0xba, 0x5e, 0xee, 0xf8, 0x50, 0x94, 0x64, 0xaf, 0xaf, 0x4d, 0xba, 0x63, 0xac, + 0x08, 0x02, 0xef, 0x41, 0xa9, 0x16, 0xe3, 0x1b, 0xfb, 0x3c, 0x63, 0xbb, 0x8f, 0xbd, 0xe2, 0x43, + 0x27, 0xa4, 0xea, 0xb4, 0x99, 0xc4, 0x8b, 0xda, 0x0d, 0x6e, 0xb7, 0x36, 0x59, 0xe2, 0xd6, 0xd3, + 0xa2, 0x13, 0xd0, 0xb7, 0x07, 0x04, 0x25, 0x74, 0xe7, 0xde, 0x95, 0x36, 0xcf, 0x0c, 0x4f, 0xd7, + 0x57, 0xc9, 0x3d, 0xee, 0x22, 0x6b, 0x1a, 0x2a, 0x79, 0x13, 0x5e, 0xfe, 0x9a, 0x78, 0x4c, 0xff, + 0x7a, 0xfd, 0x87, 0x97, 0xea, 0xc0, 0x0e, 0x2e, 0xde, 0x85, 0x0b, 0x63, 0x22, 0xe5, 0xab, 0x3f, + 0xa8, 0x2d, 0xc2, 0x8a, 0x2d, 0x02, 0xdf, 0x9d, 0xc4, 0xb8, 0x37, 0xd3, 0x36, 0x1d, 0x0a, 0xed, + 0x73, 0x04, 0xd2, 0x6d, 0xa6, 0x14, 0x9d, 0xa8, 0xd9, 0xda, 0x42, 0x1a, 0x40, 0x9a, 0x77, 0x99, + 0x94, 0x0a, 0x25, 0x67, 0x96, 0x05, 0x71, 0xf5, 0xd9, 0xc1, 0xad, 0x0a, 0xe4, 0xe8, 0x14, 0xcd, + 0x5b, 0x74, 0xb5, 0x88, 0xe4, 0x97, 0x07, 0xa4, 0x7e, 0x1c, 0x8a, 0x59, 0xdb, 0xf0, 0x04, 0xc5, + 0xfe, 0x3b, 0xac, 0x3e, 0x9d, 0x0b, 0x99, 0x22, 0xd9, 0xa7, 0x82, 0xc6, 0x6f, 0x94, 0xdc, 0x0d, + 0xf9, 0x27, 0xc7, 0x4d, 0xc4, 0x78, 0xd1, 0x37, 0x86, 0x81, 0x83, 0xcd, 0x24, 0x25, 0x44, 0xb0, + 0x25, 0xc7, 0xd8, 0xef, 0xbf, 0xe6, 0xaa, 0x5b, 0x53, 0x3a, 0x0e, 0xa4, 0xab, 0x14, 0xe1, 0x3a, + 0xcc, 0x14, 0x03, 0xf1, 0x2e, 0x25, 0x98, 0x41, 0x9c, 0xc9, 0xef, 0x4b, 0x61, 0xda, 0x4e, 0x95, + 0xcf, 0xfc, 0x5b, 0xbf, 0x87, 0x53, 0x03, 0x11, 0x6f, 0x13, 0x7f, 0x7d, 0xb3, 0xf1, 0x85, 0xf6, + 0x92, 0x1c, 0xcc, 0x7c, 0xf9, 0xdb, 0x4b, 0xab, 0xa1, 0x9d, 0xcf, 0x9a, 0xc4, 0x56, 0xf5, 0xa0, + 0x7a, 0x82, 0x75, 0xf0, 0xcd, 0x70, 0xa2, 0x18, 0xd8, 0x95, 0x72, 0x89, 0xd2, 0x03, 0x66, 0xfb, + 0x89, 0x1b, 0x2c, 0x07, 0x32, 0x03, 0x52, 0x87, 0xbb, 0xe3, 0x32, 0x67, 0x89, 0x59, 0xe8, 0x0b, + 0x7c, 0x35, 0xf0, 0xda, 0x43, 0x0c, 0xb6, 0xa4, 0x0f, 0x1e, 0x62, 0xad, 0x28, 0x3b, 0xc2, 0x8e, + 0xea, 0x85, 0xc0, 0xf7, 0x88, 0x12, 0xaa, 0x1c, 0x46, 0xee, 0x3a, 0xe9, 0x4c, 0x79, 0x3f, 0xaa, + 0x9b, 0x33, 0x88, 0x08, 0x01, 0x8c, 0xc2, 0x93, 0xf2, 0x9b, 0xe0, 0x08, 0x19, 0x0f, 0x02, 0x95, + 0x7b, 0x28, 0xa4, 0x98, 0x38, 0x09, 0x84, 0x82, 0x60, 0x07, 0xc0, 0x9d, 0x74, 0xe2, 0x1a, 0x70, + 0x70, 0xd7, 0x20, 0x84, 0x53, 0xc4, 0x8d, 0xca, 0xdf, 0x5b, 0x1b, 0xd0, 0x38, 0x9a, 0xfd, 0xbd, + 0x1d, 0x39, 0xaa, 0x8d, 0x00, 0x5e, 0xd2, 0xec, 0xc1, 0xa4, 0x67, 0x9f, 0x3c, 0x0c, 0x16, 0xd8, + 0xb3, 0x2f, 0xbf, 0x23, 0x1e, 0x28, 0xec, 0x8a, 0xe5, 0x04, 0x84, 0x85, 0x28, 0xbc, 0xa8, 0x6d, + 0x36, 0x7c, 0xfd, 0xfe, 0x90, 0x29, 0x1d, 0x33, 0x4c, 0x94, 0x26, 0x52, 0xb3, 0x0d, 0x64, 0xdd, + 0xde, 0x8f, 0xd9, 0xed, 0xa0, 0xb2, 0x2e, 0xe2, 0x47, 0x1a, 0xd0, 0xf1, 0x6d, 0x2a, 0x87, 0x00, + 0x76, 0x1a, 0xa0, 0x7a, 0x9e, 0xdf, 0xd2, 0x69, 0x28, 0x7f, 0x18, 0x1f, 0x98, 0x81, 0xb1, 0x89, + 0x5f, 0xc2, 0x17, 0x82, 0x39, 0xd3, 0x12, 0x4e, 0x83, 0xd7, 0x2b, 0x81, 0xa7, 0x20, 0x0e, 0x78, + 0xe7, 0x41, 0x71, 0x5c, 0xc6, 0x51, 0xf1, 0x5c, 0x3b, 0x3c, 0x11, 0xd6, 0xc0, 0x19, 0xe1, 0x54, + 0x21, 0x6b, 0x17, 0x1a, 0xbc, 0x15, 0x04, 0x5a, 0xb7, 0x5b, 0x77, 0xfe, 0xde, 0x65, 0xb2, 0x90, + 0xda, 0xc9, 0xdf, 0xb2, 0x48, 0xeb, 0x43, 0xdb, 0x4d, 0x2e, 0xbe, 0x0a, 0x93, 0xdc, 0x0e, 0xd0, + 0x07, 0x8f, 0x5c, 0x68, 0x22, 0x94, 0x23, 0xa2, 0xad, 0x79, 0xf7, 0x80, 0xcc, 0xd4, 0x54, 0x4c, + 0x52, 0x94, 0x5a, 0x5e, 0x50, 0x7e, 0x97, 0x67, 0x6c, 0x55, 0x51, 0xc5, 0xe7, 0x09, 0x57, 0xd3, + 0xcb, 0x9f, 0x1a, 0x44, 0x30, 0xd0, 0x90, 0x49, 0xbf, 0xd5, 0x2b, 0x36, 0x73, 0xca, 0x13, 0xd5, + 0x9b, 0x06, 0xba, 0x98, 0xad, 0x42, 0xba, 0x28, 0xc2, 0xbc, 0x43, 0x3e, 0x28, 0xe0, 0xdc, 0x13, + 0x8b, 0xa0, 0xf5, 0xba, 0x7b, 0xda, 0xac, 0x3c, 0x06, 0xca, 0x7d, 0x53, 0x31, 0x81, 0x5e, 0x94, + 0x5a, 0xca, 0x64, 0x5e, 0x64, 0x11, 0x9a, 0x72, 0x9a, 0x58, 0x75, 0x03, 0x1b, 0xc6, 0x84, 0x23, + 0xd8, 0x68, 0xd8, 0xa6, 0xe6, 0x2d, 0x15, 0x4d, 0xae, 0x75, 0x29, 0xc7, 0x37, 0x50, 0x00, 0x7d, + 0xde, 0x93, 0x38, 0x63, 0x06, 0x76, 0xfc, 0x7b, 0xe2, 0xb9, 0x0e, 0x2f, 0xad, 0x44, 0x1e, 0xa8, + 0x56, 0x42, 0xba, 0x7f, 0x88, 0x48, 0x07, 0xd5, 0x4d, 0x1e, 0xf1, 0xf0, 0xa1, 0xce, 0xa1, 0xdd, + 0xc5, 0x73, 0x0c, 0x4f, 0x75, 0x56, 0x30, 0xb3, 0xdd, 0x69, 0x81, 0x4e, 0xaf, 0x06, 0xdd, 0xe6, + 0x05, 0x08, 0x5c, 0xad, 0xef, 0x2c, 0x55, 0x12, 0x1e, 0x17, 0x04, 0x12, 0xf8, 0xbe, 0x06, 0x27, + 0x58, 0xc3, 0x97, 0xe7, 0xc4, 0x97, 0xe9, 0x24, 0x2b, 0xba, 0x5c, 0xb9, 0xe4, 0xd6, 0x56, 0x0f, + 0x51, 0x9c, 0xfc, 0xb1, 0xb9, 0x50, 0x1c, 0xdd, 0x07, 0x89, 0x3a, 0xad, 0x9b, 0x85, 0x22, 0x1e, + 0x8b, 0xe9, 0x1a, 0xe7, 0xaa, 0x68, 0xf3, 0xcd, 0x29, 0xa8, 0x57, 0x6a, 0x74, 0x20, 0x3e, 0x95, + 0x17, 0x4c, 0x05, 0x3b, 0x8e, 0x30, 0x98, 0x4f, 0x15, 0x33, 0x63, 0xbd, 0xa9, 0x5e, 0x73, 0xac, + 0x00, 0xe9, 0x88, 0xd7, 0x5e, 0x0f, 0x94, 0x0c, 0x76, 0x08, 0x02, 0xd7, 0x92, 0xc7, 0x4f, 0x29, + 0x12, 0x45, 0xf3, 0x8d, 0x9e, 0x11, 0x78, 0x77, 0x34, 0x65, 0x11, 0xee, 0xa6, 0x5d, 0xcb, 0x73, + 0xc8, 0x7f, 0x27, 0x82, 0x9d, 0x36, 0x1d, 0xa0, 0xc3, 0x0f, 0x22, 0x9e, 0x88, 0x64, 0x3b, 0xd7, + 0x12, 0xd2, 0x70, 0x2c, 0x0f, 0x10, 0x83, 0x42, 0x0f, 0x3d, 0xb8, 0x6d, 0xae, 0x33, 0x68, 0x4f, + 0xe0, 0x86, 0x53, 0x48, 0xb6, 0xd1, 0x7b, 0x0b, 0x01, 0x24, 0x3a, 0x51, 0x99, 0x24, 0xda, 0x98, + 0x60, 0x18, 0x65, 0xed, 0x84, 0xc4, 0x85, 0x7e, 0x57, 0x0b, 0x1a, 0x34, 0xe6, 0xd5, 0xd6, 0x56, + 0x00, 0x1d, 0x23, 0x63, 0xd9, 0xb5, 0x02, 0x9c, 0x47, 0xed, 0x88, 0xae, 0xa8, 0x0d, 0xc7, 0xda, + 0xef, 0xed, 0x67, 0x57, 0x9b, 0x81, 0xcd, 0xa2, 0x37, 0x94, 0xd4, 0x38, 0x53, 0x3f, 0x6c, 0x56, + 0x5c, 0xa6, 0xae, 0xfc, 0x1e, 0xef, 0x89, 0xec, 0xd3, 0xc4, 0x39, 0xa5, 0x1e, 0xed, 0xf1, 0x85, + 0x5d, 0xb2, 0xd6, 0x3f, 0x29, 0x90, 0x6a, 0xeb, 0x22, 0x72, 0x88, 0xec, 0x9e, 0x5f, 0xef, 0x79, + 0x99, 0xfb, 0x77, 0xf5, 0x0b, 0x8e, 0xc5, 0x2b, 0xe1, 0x5c, 0xab, 0x60, 0x2a, 0xce, 0x29, 0xf3, + 0x6a, 0x65, 0x3f, 0xaf, 0xa6, 0x67, 0xa7, 0x00, 0x7a, 0xbb, 0x10, 0xd8, 0x29, 0x96, 0x10, 0xe4, + 0x5c, 0xcb, 0x3e, 0xf2, 0x73, 0x97, 0x90, 0x70, 0x99, 0x83, 0x74, 0xf8, 0x86, 0x74, 0xb0, 0xe2, + 0x72, 0x5c, 0x2e, 0x06, 0x13, 0xb7, 0x9e, 0x57, 0xf8, 0xb7, 0x90, 0x11, 0x1f, 0xa5, 0x00, 0x77, + 0x7b, 0x71, 0xab, 0xe9, 0x8f, 0x8c, 0xbd, 0x89, 0x5b, 0xae, 0x8e, 0x03, 0xe7, 0xc4, 0x0d, 0xdc, + 0x33, 0xe2, 0x1f, 0x16, 0x31, 0xc5, 0xd8, 0x88, 0x50, 0xf5, 0x2b, 0x81, 0x3d, 0x7d, 0xe6, 0xd1, + 0x34, 0xb2, 0x08, 0x68, 0xdb, 0x11, 0xd9, 0x34, 0xd0, 0xbd, 0x6e, 0x1d, 0x3a, 0x38, 0x87, 0x43, + 0xa3, 0xb5, 0xc9, 0xd3, 0x10, 0x15, 0x38, 0x51, 0xca, 0x71, 0x8c, 0xdd, 0xe4, 0xeb, 0x89, 0x8a, + 0x80, 0x92, 0x77, 0x6f, 0x86, 0xe2, 0xe7, 0xc8, 0x04, 0x47, 0xf3, 0x35, 0xf6, 0x25, 0xe4, 0x29, + 0x42, 0x59, 0x3a, 0x24, 0xfb, 0xc8, 0xb6, 0xb6, 0x46, 0x5b, 0x8c, 0xc4, 0x20, 0x53, 0x16, 0xb1, + 0x76, 0xbc, 0x9d, 0x5f, 0x62, 0x89, 0xa7, 0x9b, 0xe1, 0x02, 0x0d, 0x71, 0x8b, 0xcb, 0xdb, 0x2b, + 0x8a, 0x29, 0x32, 0x07, 0x67, 0xe0, 0x79, 0xe0, 0xe0, 0x15, 0xc2, 0xbe, 0xd4, 0x24, 0x78, 0x5c, + 0xb7, 0xdd, 0xb7, 0xa5, 0x58, 0xf7, 0x57, 0x6d, 0x31, 0xfa, 0xea, 0xde, 0x69, 0x4c, 0x78, 0xfa, + 0x39, 0xa5, 0xc5, 0x77, 0xea, 0x16, 0xc1, 0x2c, 0x3c, 0x1d, 0xd0, 0xd0, 0xad, 0x01, 0x16, 0x63, + 0xd5, 0x82, 0xa9, 0x19, 0xe5, 0xaf, 0x78, 0x9b, 0xc1, 0x12, 0xb9, 0xed, 0x17, 0x01, 0xca, 0xc8, + 0x91, 0x4f, 0xe8, 0xa3, 0xe0, 0x65, 0x02, 0xe6, 0x01, 0x17, 0xc6, 0x6f, 0x81, 0x6c, 0x5e, 0x61, + 0x42, 0x9f, 0x89, 0x6d, 0xdf, 0x6c, 0x8c, 0x2c, 0x7e, 0x1e, 0x97, 0x3d, 0x12, 0x77, 0x31, 0xbc, + 0x83, 0x49, 0xbc, 0xed, 0xcd, 0xb9, 0x34, 0x1b, 0x97, 0xf0, 0xc9, 0x5e, 0x71, 0x5d, 0x8c, 0x2a, + 0x32, 0xf3, 0xf4, 0xe3, 0x59, 0x6b, 0x0b, 0x5b, 0xc6, 0xdc, 0x46, 0xff, 0xb0, 0x9d, 0xf3, 0xfc, + 0x09, 0x59, 0x46, 0x0a, 0xb8, 0xd7, 0x94, 0x40, 0x5e, 0x1c, 0x19, 0x7d, 0x7b, 0x1e, 0xa3, 0x11, + 0x78, 0x87, 0xb2, 0x5d, 0xb6, 0xe6, 0xf1, 0x1c, 0x8d, 0xc6, 0x76, 0x08, 0x86, 0x46, 0x2c, 0x7e, + 0x36, 0x60, 0x34, 0x5b, 0x59, 0x64, 0x30, 0x69, 0x0d, 0x5a, 0x5b, 0x99, 0x38, 0xd7, 0xce, 0x91, + 0xbd, 0x5c, 0x42, 0xf0, 0xdc, 0xc6, 0x79, 0x10, 0x8a, 0x48, 0x87, 0x3c, 0xb3, 0x09, 0x95, 0x5b, + 0x97, 0x54, 0x0a, 0x9a, 0xc0, 0xd8, 0x9d, 0x7c, 0x4d, 0x9a, 0xe5, 0x63, 0x68, 0x2c, 0x7e, 0xf1, + 0x8f, 0x29, 0x3a, 0x7f, 0xb5, 0xf5, 0xab, 0xba, 0xed, 0x44, 0x73, 0x5e, 0xb3, 0xe5, 0xda, 0x4b, + 0x24, 0xf6, 0x62, 0xc9, 0xad, 0xaf, 0x49, 0x15, 0x2e, 0x4d, 0xfb, 0x34, 0xcf, 0x2e, 0xf2, 0x31, + 0xed, 0xe7, 0x9a, 0x84, 0x83, 0xd6, 0x23, 0x1d, 0x2f, 0x22, 0xea, 0x61, 0xe0, 0xa3, 0xe5, 0x9e, + 0xee, 0xb2, 0x45, 0xff, 0xe9, 0x1f, 0xd5, 0xb5, 0x4c, 0xb7, 0xde, 0x55, 0x8b, 0x45, 0x46, 0xb2, + 0x6c, 0x69, 0x61, 0x8d, 0xff, 0x9e, 0x37, 0x19, 0xa1, 0xa7, 0x85, 0xf4, 0x4a, 0xf5, 0x00, 0xfb, + 0xda, 0xa2, 0x7b, 0x21, 0x5b, 0x4c, 0x59, 0x58, 0xce, 0x8e, 0x3b, 0xb6, 0xb0, 0xe4, 0x82, 0xe4, + 0x1e, 0x66, 0x12, 0x5e, 0x41, 0x64, 0x26, 0x24, 0xde, 0x55, 0x72, 0x0b, 0x58, 0x59, 0x87, 0x78, + 0x0d, 0x43, 0xd4, 0x31, 0xae, 0xe2, 0xfe, 0x56, 0x06, 0xf3, 0x0f, 0xfc, 0x41, 0xc6, 0x4c, 0xeb, + 0x4d, 0xeb, 0xc8, 0xee, 0x4c, 0x73, 0x71, 0xa7, 0xed, 0x29, 0x11, 0x37, 0xdb, 0x7f, 0x4c, 0xfa, + 0x4c, 0x9e, 0x1f, 0x29, 0xc7, 0x80, 0xc8, 0x9e, 0x01, 0x98, 0x24, 0x67, 0x17, 0x6b, 0x55, 0x54, + 0xbc, 0xb1, 0xf3, 0x62, 0x2a, 0x39, 0x25, 0x90, 0x41, 0x9f, 0xe7, 0x9d, 0x34, 0x7e, 0x35, 0x76, + 0xb5, 0x0a, 0xdc, 0xca, 0x59, 0x2d, 0x28, 0x05, 0xad, 0x5b, 0x5f, 0xa4, 0x81, 0xc2, 0xf4, 0x83, + 0xba, 0x84, 0xc5, 0xab, 0x22, 0x7f, 0xe5, 0xc7, 0xc9, 0xfe, 0x41, 0x46, 0x94, 0x12, 0xfe, 0x18, + 0xc1, 0xa8, 0xed, 0xbf, 0xfc, 0x77, 0xc8, 0x35, 0x2c, 0x29, 0xa1, 0xdc, 0xe5, 0x6f, 0xf3, 0xbb, + 0xa3, 0x7b, 0xa5, 0x42, 0xdf, 0xe4, 0x79, 0xec, 0x45, 0x1b, 0x32, 0xd6, 0x89, 0x67, 0xa4, 0x42, + 0x60, 0xb9, 0xa7, 0x78, 0x7e, 0x62, 0xd4, 0x78, 0x82, 0x54, 0x3b, 0x3c, 0xd5, 0x34, 0x39, 0xb8, + 0xbc, 0x95, 0x10, 0x4c, 0xf0, 0x1b, 0x63, 0xd9, 0x8b, 0xae, 0xb8, 0xbf, 0xe5, 0x2a, 0x9d, 0x47, + 0xea, 0xc7, 0xfc, 0x42, 0xb5, 0x3a, 0xc0, 0x88, 0xe9, 0xd3, 0xca, 0x5e, 0x2f, 0xa7, 0x4f, 0x43, + 0xa9, 0x1c, 0x43, 0xd0, 0xe3, 0x7b, 0xa8, 0x17, 0x71, 0xcc, 0xe0, 0x80, 0x94, 0xc9, 0xde, 0x3b, + 0x38, 0x9d, 0x65, 0x18, 0xae, 0xf0, 0x0b, 0xe0, 0xc2, 0x35, 0x19, 0xfb, 0xe3, 0x98, 0x40, 0xa8, + 0xad, 0x19, 0x67, 0x0d, 0x60, 0x6b, 0x04, 0xe2, 0xd5, 0x2d, 0xa4, 0xbd, 0xe9, 0x0e, 0x78, 0x87, + 0xbf, 0x60, 0x7b, 0x63, 0x43, 0x6e, 0x65, 0xb2, 0xde, 0x93, 0x3a, 0x71, 0x1e, 0x05, 0x6b, 0xab, + 0x56, 0xd7, 0x31, 0x08, 0xee, 0x34, 0x47, 0xad, 0x38, 0xf3, 0x38, 0xde, 0x7a, 0x04, 0x7f, 0xfa, + 0x89, 0x19, 0xef, 0x8b, 0x7f, 0x16, 0x4a, 0x35, 0xe5, 0x2c, 0x4f, 0xc6, 0x95, 0xa7, 0x6f, 0x58, + 0x4d, 0x3c, 0xa6, 0xc1, 0xaa, 0xb2, 0x46, 0x27, 0x60, 0x29, 0x11, 0x75, 0xd5, 0x6b, 0xde, 0xa7, + 0x9e, 0x27, 0x43, 0xba, 0x27, 0xb3, 0x9a, 0x4a, 0x0d, 0xe9, 0xa7, 0xaa, 0x2d, 0x0f, 0x93, 0xe8, + 0x25, 0x2d, 0x43, 0x31, 0x82, 0xf3, 0x09, 0xba, 0x3c, 0x38, 0x3b, 0x9c, 0xa9, 0x3b, 0x79, 0xe5, + 0xb1, 0x92, 0xea, 0x4e, 0x8d, 0xfb, 0x8c, 0x50, 0x5b, 0xa4, 0xc9, 0xa0, 0xee, 0xad, 0xc2, 0xbc, + 0xef, 0xb6, 0x30, 0xf4, 0xf5, 0x22, 0x2d, 0x5c, 0xba, 0xd6, 0xe8, 0xcc, 0x6c, 0xea, 0x4e, 0xed, + 0x29, 0xa4, 0xb3, 0x77, 0x3c, 0xbc, 0xff, 0xe9, 0xb3, 0x7e, 0xc2, 0xa9, 0x75, 0x16, 0x49, 0x66, + 0x31, 0x48, 0x30, 0xd2, 0x62, 0xa8, 0x80, 0x41, 0x7c, 0x50, 0x3e, 0xae, 0xfd, 0x1b, 0xc9, 0x63, + 0x5b, 0x04, 0x17, 0xf9, 0x50, 0x84, 0x2b, 0xcc, 0x2b, 0x66, 0xc0, 0xe1, 0xf5, 0x12, 0x19, 0xfb, + 0x5b, 0x47, 0x3f, 0xbe, 0xc0, 0xf6, 0x27, 0x6b, 0xff, 0x16, 0x6f, 0x39, 0xe5, 0x3b, 0xc9, 0x59, + 0x84, 0xea, 0x2e, 0x4c, 0x44, 0x6b, 0x2f, 0x4c, 0x1d, 0x9e, 0x8c, 0xba, 0x92, 0x3e, 0x21, 0x35, + 0xcd, 0x23, 0x18, 0x5b, 0x84, 0x4d, 0x02, 0x06, 0x9e, 0xec, 0xe4, 0xad, 0x92, 0x9d, 0x23, 0xde, + 0xa0, 0xc6, 0x3d, 0x2f, 0xd4, 0xf9, 0x15, 0xdc, 0x0f, 0x03, 0x12, 0x59, 0xd3, 0xd2, 0x35, 0xbf, + 0xc7, 0x22, 0xcc, 0xa3, 0x0f, 0x83, 0xee, 0xb8, 0xd0, 0xaf, 0x60, 0xae, 0xd9, 0x73, 0xd3, 0x12, + 0x1a, 0xad, 0xcd, 0x69, 0xef, 0x9b, 0x5a, 0x1e, 0x0e, 0x2a, 0x5c, 0x1c, 0x74, 0xf0, 0xff, 0xba, + 0x98, 0xc2, 0x78, 0x46, 0xbb, 0xf8, 0x47, 0x55, 0xae, 0x7b, 0x9d, 0xab, 0x9c, 0x94, 0xdf, 0xb2, + 0x53, 0x21, 0xbd, 0xd9, 0x2f, 0xf1, 0x75, 0x5e, 0x87, 0xfb, 0x25, 0x6b, 0xa8, 0xda, 0xa9, 0x43, + 0x0d, 0x4f, 0xc4, 0xa6, 0xb8, 0xb5, 0x7f, 0x16, 0xb2, 0xe4, 0x84, 0x72, 0x16, 0x01, 0x80, 0x3f, + 0xc9, 0xf7, 0x61, 0xcb, 0xbd, 0x75, 0xcf, 0x02, 0x5b, 0x98, 0x9c, 0xc7, 0x6b, 0xb0, 0xd7, 0xd2, + 0xab, 0x0f, 0x13, 0x8f, 0x07, 0x45, 0xe3, 0xaa, 0x6a, 0x51, 0x6f, 0x09, 0x6c, 0x3e, 0x6c, 0xf0, + 0x49, 0xba, 0x2f, 0x3f, 0xae, 0xa8, 0xfe, 0x44, 0x91, 0x2c, 0x84, 0xf9, 0xbb, 0x51, 0xfb, 0x48, + 0x4a, 0xee, 0x6e, 0x25, 0xc4, 0x53, 0xfa, 0x69, 0x6d, 0x4e, 0xe8, 0x82, 0x8e, 0x2b, 0x96, 0x3e, + 0xd6, 0x94, 0xba, 0xe6, 0x95, 0xa4, 0x97, 0xe4, 0x06, 0x3e, 0xa2, 0xfd, 0x65, 0x0a, 0xce, 0xdc, + 0x35, 0x1a, 0x3c, 0x84, 0xad, 0x15, 0xa4, 0xa0, 0x30, 0x60, 0x95, 0x51, 0x4b, 0x8c, 0xe8, 0x4e, + 0xc4, 0x7a, 0xd7, 0x40, 0x9a, 0x04, 0xa6, 0x3c, 0x1d, 0x12, 0x1f, 0x8e, 0xbe, 0x48, 0xe4, 0x2c, + 0x3d, 0x99, 0x56, 0x76, 0x47, 0x41, 0x75, 0x45, 0x5a, 0xa5, 0xf7, 0xf8, 0xe8, 0x8b, 0xe6, 0x91, + 0x66, 0x25, 0xc8, 0x93, 0x15, 0x29, 0x18, 0xc3, 0x5a, 0x2b, 0xc5, 0x4f, 0x2e, 0xb7, 0x44, 0x7a, + 0x4c, 0xa1, 0xe6, 0x19, 0xeb, 0xbc, 0xcc, 0x20, 0x71, 0x42, 0x05, 0x1e, 0xd3, 0x65, 0x59, 0xd4, + 0xaa, 0xcb, 0xf9, 0xac, 0xb9, 0x24, 0xf8, 0xa5, 0xcc, 0x0f, 0x24, 0xf5, 0x18, 0x0e, 0x26, 0x87, + 0x04, 0xaf, 0x86, 0x4a, 0x4f, 0xde, 0x2b, 0x5a, 0xee, 0x74, 0x53, 0x12, 0x3a, 0x14, 0xd1, 0xd3, + 0xcb, 0xe2, 0x9c, 0xb3, 0x9d, 0x0f, 0xe2, 0x7d, 0x17, 0x6b, 0x12, 0xbf, 0xab, 0x4c, 0xdd, 0xdd, + 0xd4, 0x44, 0xe0, 0x2e, 0x1e, 0xdb, 0x5b, 0xe7, 0xfd, 0xfb, 0x0e, 0xe7, 0x3b, 0xd4, 0x01, 0xac, + 0x32, 0xbd, 0x99, 0x33, 0xf2, 0x20, 0x9b, 0xfa, 0x9b, 0xac, 0x8c, 0x7d, 0x89, 0xfd, 0x14, 0xc5, + 0x91, 0x1f, 0xdf, 0xbf, 0x2c, 0x29, 0xd6, 0x62, 0xae, 0xe4, 0xc4, 0x9e, 0x43, 0xb1, 0xbf, 0xd6, + 0xa4, 0xad, 0x6a, 0x1d, 0xff, 0x52, 0x8c, 0x0b, 0x3f, 0x0a, 0x0d, 0xc6, 0x45, 0xc9, 0xe0, 0x0e, + 0xae, 0x47, 0xc8, 0x32, 0x9d, 0x51, 0xc0, 0x32, 0x18, 0x97, 0xeb, 0x3f, 0x5b, 0xeb, 0xf0, 0x7d, + 0xbe, 0x9c, 0xb5, 0x32, 0x4d, 0xd9, 0x95, 0x0e, 0xb9, 0x91, 0xb9, 0xaa, 0x60, 0x39, 0xe3, 0xd7, + 0x12, 0x21, 0x1f, 0x32, 0x9d, 0xc1, 0xf6, 0xc7, 0x3f, 0xb7, 0x63, 0x19, 0x6b, 0xd5, 0xa4, 0x49, + 0x17, 0x28, 0x41, 0xf6, 0x31, 0xa1, 0x2f, 0xd5, 0x1d, 0xcd, 0x92, 0xf2, 0x58, 0x9e, 0xc6, 0x5f, + 0xba, 0xcf, 0xf3, 0xf2, 0xbb, 0xb4, 0xc6, 0xc2, 0x66, 0x96, 0x37, 0x7f, 0x28, 0xc3, 0x6e, 0xde, + 0x7d, 0xf9, 0xc5, 0xd9, 0xc1, 0xf2, 0xc0, 0xd6, 0x0d, 0xa1, 0x7c, 0x33, 0xc2, 0xca, 0x90, 0x43, + 0x34, 0x81, 0xfd, 0x6a, 0x80, 0x2b, 0x79, 0x60, 0x35, 0xee, 0x5c, 0x2b, 0xc6, 0xc3, 0xc9, 0xa9, + 0xd3, 0x38, 0xb5, 0xee, 0x58, 0xde, 0xda, 0x1b, 0xc6, 0xee, 0xe2, 0x4e, 0xa4, 0x4c, 0xc2, 0x8a, + 0xf6, 0x17, 0x2d, 0xd9, 0x91, 0x3f, 0x17, 0xcd, 0xca, 0xb5, 0x64, 0xa3, 0xd5, 0x9b, 0x27, 0x5a, + 0x9c, 0xaa, 0x7c, 0x43, 0x06, 0x3d, 0x6f, 0x26, 0xdd, 0xa7, 0x8c, 0x27, 0xe6, 0x36, 0x08, 0x96, + 0xb6, 0x50, 0xdb, 0x24, 0x66, 0xb6, 0xe3, 0x65, 0x79, 0x41, 0xe1, 0x6c, 0xe1, 0x17, 0xfa, 0x70, + 0x6b, 0xe2, 0x8b, 0xb1, 0x1a, 0xbd, 0x2f, 0xb0, 0xcf, 0x9e, 0x4f, 0x2d, 0x57, 0xfc, 0xd3, 0xb1, + 0xef, 0x17, 0x01, 0xe1, 0x56, 0x6a, 0xaa, 0x2b, 0x84, 0xa2, 0x66, 0xff, 0xa5, 0x76, 0x02, 0x55, + 0xe3, 0x08, 0x1f, 0xf8, 0xf5, 0xae, 0x72, 0xdd, 0x14, 0x63, 0xc9, 0x2e, 0x38, 0xe6, 0x8e, 0x19, + 0xc2, 0x93, 0x8b, 0xfe, 0x87, 0x1f, 0xf8, 0xd7, 0xf7, 0xa4, 0xdc, 0x29, 0x9a, 0xe1, 0xc2, 0xb4, + 0x00, 0x92, 0x6a, 0x98, 0x95, 0xb5, 0x9a, 0xfa, 0x5c, 0x84, 0xfd, 0x42, 0x70, 0x72, 0x73, 0x33, + 0xa6, 0x82, 0xa7, 0x63, 0xfb, 0x95, 0x4e, 0x68, 0x57, 0xeb, 0x20, 0x6d, 0x7d, 0x6d, 0x29, 0x5f, + 0x88, 0xae, 0x57, 0x85, 0xcf, 0x82, 0xe7, 0x3b, 0x79, 0x18, 0x5e, 0xa1, 0x49, 0x76, 0xd4, 0xf4, + 0x0b, 0x6d, 0x46, 0x13, 0xb2, 0x1e, 0xa5, 0x08, 0x3e, 0x18, 0xde, 0x7f, 0x39, 0x4c, 0xf2, 0x0e, + 0x74, 0x4f, 0xfa, 0xf4, 0xc9, 0x72, 0xf0, 0x9e, 0x15, 0x39, 0xef, 0x8d, 0xb8, 0xf7, 0xbd, 0xe3, + 0x74, 0x53, 0xcb, 0x0d, 0x12, 0x26, 0x05, 0x1b, 0x38, 0x73, 0x7d, 0xbe, 0x9f, 0x07, 0x2d, 0xa5, + 0xa2, 0x5f, 0x96, 0x5b, 0x92, 0xee, 0x83, 0x45, 0x31, 0x01, 0x60, 0x8b, 0x43, 0x48, 0x23, 0x37, + 0xd7, 0xcd, 0xf0, 0x88, 0x88, 0x1a, 0x76, 0xc9, 0x85, 0x4e, 0xf4, 0xe0, 0xcd, 0x5b, 0x24, 0x18, + 0x44, 0xe2, 0xc4, 0x1d, 0x6e, 0x33, 0xb4, 0x53, 0xac, 0x00, 0x5e, 0xf8, 0x65, 0xd5, 0x4d, 0x15, + 0x8c, 0x50, 0x9e, 0x1f, 0x1f, 0xf0, 0x92, 0x7a, 0x86, 0x30, 0xc1, 0xa6, 0xfa, 0xc4, 0xef, 0x22, + 0x67, 0x74, 0xba, 0x11, 0x1b, 0x02, 0xfa, 0x32, 0x51, 0x9d, 0x63, 0x69, 0xa7, 0x4d, 0x4a, 0xc5, + 0x58, 0x46, 0xda, 0xc2, 0xd2, 0xfe, 0x38, 0x18, 0x2c, 0xdb, 0x61, 0x69, 0x3d, 0xf1, 0x8a, 0xfa, + 0x7a, 0x2c, 0x3a, 0xeb, 0x6f, 0x30, 0xef, 0xe3, 0xde, 0x18, 0xd3, 0x69, 0x45, 0x66, 0x36, 0x47, + 0xc4, 0xfe, 0x74, 0xf9, 0xd1, 0x8c, 0xd1, 0x93, 0xaa, 0x2e, 0x0e, 0x6e, 0x8d, 0x34, 0xd2, 0x6d, + 0xc7, 0xe7, 0x28, 0xf2, 0x92, 0xa9, 0xae, 0x84, 0x16, 0x29, 0xda, 0x7a, 0x78, 0xc2, 0xfe, 0xbe, + 0xb0, 0x1c, 0x81, 0xe6, 0x19, 0x42, 0x7b, 0x9d, 0x61, 0x7c, 0x42, 0x87, 0x33, 0xb9, 0xdb, 0x84, + 0x36, 0x3f, 0xdf, 0x7d, 0xf3, 0x0e, 0x76, 0x73, 0x5a, 0x93, 0x5e, 0x1a, 0x56, 0x24, 0x95, 0x1e, + 0x05, 0xb3, 0x1d, 0x8a, 0xdf, 0x55, 0x1d, 0x65, 0xf2, 0x46, 0x2f, 0x9f, 0x3b, 0x53, 0xd1, 0x91, + 0xf8, 0xb8, 0x7a, 0x27, 0x1e, 0x29, 0xc1, 0xd6, 0xf3, 0x86, 0xbf, 0x76, 0xcd, 0xa6, 0x2e, 0x5c, + 0xb8, 0x6a, 0x0d, 0x4e, 0xd9, 0x30, 0x89, 0x60, 0x9a, 0x94, 0x20, 0xd6, 0x8a, 0x53, 0x4b, 0x16, + 0x79, 0x6a, 0x05, 0x28, 0x6b, 0xb2, 0x2c, 0x20, 0x45, 0x20, 0xcf, 0x8a, 0xf2, 0x13, 0x70, 0xd8, + 0x21, 0xa2, 0xcd, 0x09, 0x27, 0x32, 0xa3, 0xe8, 0x17, 0x24, 0x2e, 0x3f, 0xab, 0x3d, 0xe7, 0x2b, + 0x3f, 0xa6, 0xb3, 0x2d, 0x2e, 0x70, 0xc2, 0x68, 0xb6, 0x13, 0x0a, 0xf8, 0xd8, 0xd6, 0x97, 0x7f, + 0xcc, 0xfe, 0xf9, 0x7c, 0x4b, 0x6a, 0xb3, 0x70, 0x7f, 0x03, 0xae, 0x61, 0x3d, 0x9e, 0x32, 0x0e, + 0x34, 0x64, 0x59, 0xe5, 0x2d, 0xc5, 0x29, 0x01, 0xe2, 0x39, 0xc8, 0xc7, 0xc9, 0xf6, 0x54, 0x63, + 0xd6, 0xb8, 0x34, 0x64, 0xff, 0xde, 0x79, 0xd8, 0x63, 0xcd, 0x37, 0x6c, 0x03, 0xe7, 0xee, 0xd1, + 0x16, 0x4b, 0x1c, 0xc6, 0xf9, 0x71, 0x50, 0xd1, 0x21, 0x45, 0x07, 0xa4, 0xda, 0x63, 0xda, 0xeb, + 0x9f, 0x9b, 0x1b, 0xf4, 0x68, 0x59, 0xa9, 0x84, 0x30, 0x15, 0x85, 0x2a, 0x4f, 0x72, 0x78, 0x25, + 0x1d, 0x61, 0xb5, 0x71, 0xa2, 0xf9, 0x95, 0xd8, 0xb1, 0x84, 0xab, 0x1f, 0xe4, 0x61, 0xf5, 0x40, + 0x73, 0x54, 0xdf, 0x3a, 0xdc, 0x83, 0xfb, 0x70, 0x82, 0x13, 0x1c, 0x81, 0x57, 0xbf, 0xa7, 0x28, + 0x58, 0xeb, 0xb6, 0xca, 0x78, 0x80, 0xfd, 0x35, 0xf1, 0x56, 0x2a, 0xce, 0xe0, 0xc8, 0xde, 0x63, + 0xe3, 0x62, 0x50, 0x38, 0xfd, 0xcf, 0x0d, 0x23, 0x88, 0x51, 0x3b, 0x1c, 0x6a, 0x5e, 0x70, 0x68, + 0x85, 0x2a, 0x4c, 0xbf, 0xc7, 0x5a, 0xbb, 0xda, 0xfe, 0xe7, 0x2f, 0x88, 0x36, 0xb4, 0x4f, 0xc9, + 0x02, 0x43, 0x6b, 0xcd, 0x67, 0xfb, 0x83, 0xa7, 0x65, 0x04, 0x5b, 0xd6, 0xe8, 0x76, 0x42, 0x54, + 0xfb, 0x14, 0xde, 0x98, 0x04, 0x20, 0x4d, 0xcf, 0x8a, 0xe0, 0x3c, 0x4f, 0x21, 0x08, 0x85, 0x38, + 0xdc, 0x6a, 0xd6, 0x00, 0xf2, 0x4e, 0x1b, 0xa2, 0x82, 0x19, 0xd5, 0x74, 0x72, 0x54, 0xb6, 0xa2, + 0xc5, 0x56, 0xd6, 0xd4, 0x49, 0x3b, 0xd1, 0xa5, 0x52, 0xdc, 0xb8, 0x87, 0x56, 0x01, 0x59, 0xfb, + 0x05, 0xef, 0xe7, 0x9d, 0x59, 0x44, 0x2c, 0x56, 0xa4, 0x79, 0x9f, 0x9f, 0x93, 0xfc, 0x35, 0xcc, + 0x24, 0xc8, 0x30, 0x22, 0xbd, 0x23, 0x8f, 0xd1, 0x2c, 0xef, 0x8d, 0x2b, 0x9f, 0xcf, 0x31, 0xe5, + 0x44, 0x89, 0x2b, 0xc6, 0x42, 0x64, 0x9f, 0x3c, 0xdc, 0xab, 0x0b, 0x45, 0xd8, 0x68, 0xb0, 0x0b, + 0x4c, 0x34, 0xdc, 0x89, 0xba, 0x98, 0xb3, 0xcc, 0x26, 0x91, 0xa0, 0x35, 0xe9, 0xb8, 0x1f, 0x3a, + 0xaa, 0xf4, 0x9d, 0xd8, 0x9d, 0xbe, 0xc6, 0xdf, 0xe7, 0x77, 0xae, 0x66, 0xbc, 0xcd, 0x69, 0x92, + 0x28, 0xdb, 0xc1, 0x62, 0x12, 0xfc, 0xa5, 0xb3, 0xed, 0x6b, 0x1b, 0x6d, 0xa6, 0x2b, 0x95, 0x9f, + 0xbf, 0xf8, 0x03, 0xe6, 0x10, 0xfc, 0x72, 0x6e, 0x57, 0x92, 0x32, 0xde, 0xd3, 0xa2, 0xc9, 0x24, + 0x7e, 0xd4, 0xe2, 0x57, 0x49, 0x39, 0xef, 0x2c, 0xea, 0x53, 0x55, 0x54, 0xb1, 0xe3, 0xf4, 0x82, + 0x62, 0x6f, 0x39, 0x74, 0x22, 0x24, 0x9f, 0x8b, 0x90, 0x26, 0x5b, 0x6d, 0xc4, 0x18, 0xc7, 0x97, + 0x1f, 0x3e, 0xbf, 0xa7, 0x6c, 0x1c, 0xba, 0x5d, 0x40, 0x17, 0x77, 0x2a, 0x4c, 0x03, 0x7e, 0x7c, + 0x66, 0xb8, 0xcc, 0xac, 0x16, 0x21, 0x36, 0xef, 0x3b, 0x28, 0x59, 0x3e, 0xed, 0x95, 0x69, 0x44, + 0x2f, 0x05, 0x3b, 0x57, 0x06, 0xdd, 0x1c, 0x9f, 0xf5, 0x95, 0x63, 0xb8, 0x15, 0xec, 0x99, 0xe1, + 0xc6, 0x8f, 0x7e, 0x47, 0xd9, 0x74, 0x77, 0x34, 0x66, 0xcb, 0x18, 0x5d, 0xdc, 0x70, 0xa4, 0x03, + 0xb6, 0xdb, 0xfb, 0x13, 0xd1, 0x10, 0x2f, 0x40, 0x8e, 0x35, 0xde, 0x48, 0x95, 0xbb, 0x89, 0xf0, + 0xe4, 0xf5, 0xd5, 0x23, 0x70, 0xb2, 0xd8, 0xc4, 0xa2, 0xa2, 0xaa, 0xba, 0x0b, 0xfa, 0xd5, 0xbf, + 0xd9, 0xa9, 0x0e, 0xb4, 0x7a, 0xec, 0xdc, 0x5a, 0x2a, 0x44, 0x18, 0xe1, 0x1d, 0xa6, 0x56, 0x2c, + 0x0f, 0x53, 0xcb, 0x69, 0x45, 0x96, 0x68, 0x70, 0xf8, 0x20, 0xd7, 0x02, 0xbe, 0xe9, 0xca, 0x9e, + 0x8b, 0x69, 0xce, 0x69, 0x51, 0xab, 0xa7, 0xe4, 0xcd, 0xb8, 0xed, 0xf8, 0xa5, 0xdb, 0x3f, 0x18, + 0x4c, 0x31, 0x80, 0xc7, 0x6e, 0x4c, 0xfd, 0xd1, 0x51, 0xe2, 0x6e, 0xf8, 0xea, 0xd5, 0x27, 0xce, + 0xe6, 0x8d, 0x16, 0xc8, 0x94, 0x68, 0x61, 0xd2, 0x33, 0x79, 0xb3, 0xbb, 0x6b, 0x4c, 0x3d, 0xf5, + 0x49, 0x86, 0x76, 0x63, 0x73, 0x3b, 0xfb, 0xed, 0x17, 0xda, 0x45, 0xab, 0x6c, 0xd4, 0xea, 0x21, + 0x03, 0xfa, 0xe5, 0xc2, 0xc5, 0x13, 0xeb, 0x52, 0x0f, 0x93, 0xe9, 0x38, 0x46, 0x06, 0x85, 0xf8, + 0x6f, 0x74, 0x91, 0x6c, 0x1b, 0x53, 0x7e, 0x1c, 0x6d, 0xbc, 0x18, 0x30, 0x18, 0x85, 0xfe, 0xf2, + 0x79, 0x5d, 0x67, 0xa8, 0x9b, 0xa9, 0x6d, 0x77, 0x41, 0x8b, 0xdd, 0x64, 0xb0, 0x87, 0x98, 0xd9, + 0x82, 0xdc, 0xbf, 0x9c, 0xe9, 0x71, 0x5e, 0x3f, 0x12, 0xc1, 0x7a, 0xbe, 0x07, 0x2b, 0xbc, 0x0f, + 0x0a, 0x85, 0xb0, 0x84, 0x31, 0xd3, 0xbf, 0x3d, 0x4d, 0xfa, 0xeb, 0xd8, 0xf8, 0xf9, 0x65, 0x3c, + 0x70, 0x20, 0x5a, 0xe2, 0x92, 0xcb, 0x37, 0x6d, 0x4e, 0xb7, 0x76, 0xfb, 0xce, 0xa5, 0xc8, 0x45, + 0x30, 0x53, 0xb1, 0xd6, 0x3e, 0x1d, 0xc8, 0x02, 0x08, 0x91, 0x49, 0x03, 0x47, 0x51, 0x5c, 0x96, + 0x61, 0x8d, 0x0f, 0x85, 0x7f, 0x9c, 0x0c, 0x41, 0xfa, 0xe6, 0x77, 0x16, 0xed, 0xf2, 0xbf, 0x27, + 0xe7, 0xa6, 0xdf, 0x13, 0xf2, 0x62, 0x22, 0x9e, 0x4c, 0x7e, 0xe1, 0xbd, 0xd8, 0x20, 0x9e, 0x2c, + 0x92, 0xd1, 0xca, 0x88, 0x03, 0x29, 0x55, 0xb9, 0xb9, 0x64, 0x20, 0x2c, 0xc1, 0xd4, 0xff, 0xa1, + 0x12, 0x42, 0x1f, 0xa7, 0xcf, 0x3b, 0xd7, 0x55, 0x84, 0x9c, 0x2e, 0xfe, 0x46, 0x94, 0x3e, 0xaa, + 0x16, 0x26, 0xed, 0x4c, 0x9d, 0xd4, 0x4f, 0x11, 0xc4, 0xc8, 0x99, 0x4b, 0x89, 0x9a, 0x89, 0xc9, + 0xf9, 0xae, 0x59, 0xdf, 0xc9, 0xc9, 0x99, 0xc1, 0x18, 0x79, 0xf5, 0x65, 0xd0, 0xa2, 0x96, 0x4f, + 0x11, 0xeb, 0xaa, 0xf6, 0x25, 0x84, 0x09, 0x28, 0xc0, 0xed, 0x17, 0x89, 0x44, 0xe7, 0xf2, 0x78, + 0xf8, 0xcf, 0x70, 0x32, 0xfa, 0x8c, 0x1b, 0x26, 0x67, 0x98, 0x9d, 0x71, 0xf6, 0xf1, 0x87, 0xfd, + 0x3e, 0x45, 0xfe, 0x82, 0x05, 0x27, 0x34, 0x65, 0x44, 0x2c, 0xbe, 0xb2, 0xc8, 0xa4, 0x50, 0x46, + 0xaf, 0x00, 0x37, 0x9c, 0x27, 0x0f, 0x6a, 0x0e, 0x63, 0xab, 0x5b, 0x15, 0x6a, 0xf0, 0x8a, 0xfc, + 0x81, 0x18, 0x43, 0xe4, 0x69, 0x39, 0xc2, 0xde, 0xd0, 0xb3, 0x72, 0x17, 0x5a, 0x64, 0x21, 0x96, + 0x28, 0x8c, 0x9e, 0x1a, 0xc6, 0x49, 0xe0, 0xc1, 0x27, 0x48, 0x95, 0xd3, 0xd9, 0xd7, 0xe4, 0x7e, + 0xc4, 0x6b, 0x42, 0x3d, 0x11, 0xbf, 0x01, 0x22, 0x0d, 0x20, 0x55, 0xd3, 0x09, 0xe2, 0xb5, 0x8b, + 0x2d, 0x12, 0x1a, 0xf2, 0x0d, 0x3e, 0x6f, 0x11, 0x66, 0xee, 0x4d, 0xc0, 0xd7, 0x0a, 0x04, 0x3f, + 0x41, 0x1a, 0x3d, 0x21, 0xd5, 0x4b, 0x44, 0x4a, 0x5f, 0xd7, 0x94, 0xed, 0x0f, 0x5e, 0x58, 0x35, + 0x74, 0x99, 0x25, 0xd3, 0x5b, 0x08, 0x26, 0xa8, 0xb2, 0xc9, 0x93, 0xf0, 0x3e, 0xbc, 0x44, 0xfd, + 0x45, 0x2c, 0xcd, 0x44, 0xe5, 0x7c, 0xc0, 0xe5, 0x68, 0xba, 0x70, 0x0d, 0x65, 0xb6, 0x5b, 0xfb, + 0x9c, 0x2c, 0x59, 0x65, 0xb7, 0x26, 0x87, 0x4f, 0xf8, 0xd7, 0xcf, 0x8b, 0xb7, 0x6e, 0x60, 0x4e, + 0x08, 0x37, 0x33, 0xb2, 0xaa, 0xae, 0xe6, 0x4c, 0x4c, 0xb7, 0x91, 0xf0, 0xe7, 0x73, 0x8c, 0x4d, + 0xb0, 0x63, 0x07, 0x77, 0xca, 0x96, 0xf0, 0xcd, 0x01, 0x07, 0x99, 0xdf, 0xed, 0x70, 0x11, 0xb4, + 0xa1, 0x7b, 0xe3, 0xa9, 0x41, 0xb2, 0x30, 0x78, 0xa8, 0x19, 0xd5, 0x44, 0xeb, 0x52, 0x6b, 0x70, + 0xda, 0xe1, 0x0e, 0x26, 0x1e, 0x4a, 0xb3, 0x51, 0xe6, 0xeb, 0x4d, 0xb3, 0x55, 0x56, 0x98, 0x18, + 0xe6, 0xd1, 0x42, 0x8d, 0xe6, 0xf4, 0xa6, 0x10, 0x43, 0x24, 0x21, 0xe9, 0xe6, 0x22, 0x1a, 0x04, + 0x83, 0x94, 0x20, 0x44, 0x6c, 0xda, 0x7c, 0x04, 0x57, 0xa4, 0xe6, 0x2b, 0xab, 0x4b, 0x8c, 0x0a, + 0xed, 0x1d, 0x78, 0xa4, 0x6b, 0x0e, 0x74, 0x74, 0xc5, 0x6d, 0x33, 0xf4, 0x8d, 0xba, 0x56, 0xfc, + 0xdc, 0xd3, 0xfe, 0x65, 0x95, 0x97, 0x7f, 0x7f, 0x88, 0x6b, 0x24, 0x28, 0xb5, 0x7b, 0x76, 0x2b, + 0xfc, 0xdb, 0x78, 0x5f, 0x0e, 0x96, 0xda, 0x65, 0xc5, 0x83, 0x2c, 0x13, 0xea, 0xa5, 0x46, 0x8d, + 0x06, 0xa9, 0xc8, 0x7e, 0x38, 0x47, 0xf6, 0x94, 0xdd, 0x42, 0x42, 0xfd, 0x6e, 0x11, 0x24, 0x6f, + 0x2d, 0xfd, 0xc7, 0x12, 0x4c, 0xac, 0xdf, 0x26, 0xa9, 0x47, 0xd3, 0x82, 0x9a, 0x79, 0x60, 0x83, + 0x06, 0x50, 0xe9, 0x15, 0x56, 0x47, 0x78, 0x5d, 0xf7, 0xaa, 0xd3, 0xdc, 0x15, 0x90, 0x9a, 0x2c, + 0xf6, 0xd6, 0x96, 0xa2, 0x3a, 0x05, 0x6a, 0x0c, 0x93, 0x17, 0x7a, 0x59, 0x90, 0x4c, 0xed, 0xf2, + 0x8c, 0x85, 0x12, 0xc6, 0xa9, 0x58, 0x55, 0x45, 0x54, 0x81, 0xa7, 0x7d, 0xe5, 0xed, 0x7d, 0x9f, + 0x3a, 0x9d, 0xde, 0xd5, 0x26, 0xac, 0x31, 0xb5, 0xc9, 0xf7, 0xe9, 0x8b, 0x57, 0x4c, 0x16, 0x27, + 0x9d, 0x08, 0x4a, 0x3c, 0x6d, 0xe9, 0x98, 0xae, 0x02, 0x15, 0x98, 0x09, 0x61, 0xfa, 0x90, 0x2a, + 0xd5, 0xba, 0xc2, 0x83, 0x9b, 0xc9, 0xc7, 0xc3, 0x62, 0x60, 0x24, 0x70, 0x33, 0x02, 0xa6, 0x2e, + 0xdd, 0xa3, 0xdf, 0xea, 0x51, 0x19, 0xf7, 0x69, 0x09, 0x02, 0xa4, 0x62, 0x3a, 0xfb, 0xd8, 0x27, + 0x39, 0xfa, 0xdb, 0xfd, 0x1b, 0xad, 0xce, 0xd9, 0x1c, 0xbb, 0xa2, 0x9c, 0x4e, 0x46, 0x6c, 0xa5, + 0x14, 0x37, 0xe9, 0x9f, 0xda, 0x32, 0x91, 0x40, 0x13, 0x72, 0x2c, 0xa1, 0x7f, 0xdc, 0x7f, 0x6e, + 0xb3, 0xd2, 0xcf, 0x89, 0xf2, 0xea, 0x50, 0x74, 0xdf, 0xf0, 0xa0, 0xdf, 0x81, 0xa2, 0xeb, 0x3a, + 0xca, 0x42, 0x84, 0xbf, 0xa5, 0x8f, 0xb8, 0x88, 0x48, 0xec, 0x9b, 0xb0, 0xc0, 0x54, 0x0d, 0x87, + 0x2e, 0xe4, 0xd4, 0xe1, 0xb4, 0x23, 0xed, 0xb0, 0x07, 0x17, 0x5a, 0xae, 0xff, 0x4d, 0x66, 0x28, + 0x97, 0x7b, 0xa9, 0x8a, 0xdb, 0x7a, 0xd9, 0xbc, 0x58, 0x5e, 0x2d, 0x1c, 0x87, 0x69, 0xec, 0x79, + 0x4b, 0xf2, 0xd2, 0xfc, 0x4b, 0x4d, 0xdc, 0x52, 0xe0, 0xcf, 0x0b, 0x13, 0x2c, 0xe8, 0xfa, 0xa3, + 0xd2, 0xef, 0xfb, 0x02, 0xb2, 0xed, 0x26, 0xc5, 0xf8, 0xd8, 0xbc, 0x5e, 0xba, 0x7c, 0x05, 0x86, + 0xdb, 0x9a, 0x3e, 0x6c, 0xb4, 0xb8, 0x67, 0x27, 0x0e, 0x08, 0x17, 0xf4, 0xdb, 0xa5, 0x67, 0xb6, + 0xe1, 0xe9, 0x21, 0x8f, 0xe5, 0xd4, 0x69, 0x9f, 0xfb, 0xea, 0xd8, 0x8b, 0x74, 0x4d, 0x23, 0x10, + 0xab, 0x83, 0xdf, 0xd7, 0x45, 0xc0, 0x9e, 0x15, 0xaf, 0x75, 0xc9, 0xdc, 0x9f, 0x90, 0x35, 0x6e, + 0x82, 0xed, 0x71, 0x93, 0xa9, 0xec, 0x7a, 0x77, 0x4c, 0x6d, 0xff, 0x7b, 0xba, 0x1a, 0x6f, 0x9a, + 0x56, 0xba, 0x4b, 0xdb, 0xd7, 0x9f, 0x2f, 0x7c, 0xc9, 0x49, 0x9b, 0xba, 0x1c, 0xb6, 0xda, 0xd8, + 0x5f, 0x3e, 0x6b, 0xc7, 0x63, 0x44, 0x4b, 0x63, 0xd2, 0x6f, 0x41, 0x27, 0x3e, 0x8d, 0xd9, 0xe4, + 0x21, 0x95, 0xfb, 0x8d, 0x8b, 0x2c, 0xb1, 0xdf, 0x94, 0x29, 0x48, 0xe8, 0xb5, 0x96, 0x10, 0x59, + 0x6f, 0x21, 0xe1, 0x3d, 0x28, 0x2e, 0x85, 0xdc, 0x11, 0xb8, 0x82, 0x73, 0x0d, 0x40, 0xf7, 0xba, + 0xc1, 0x0b, 0x65, 0x31, 0xfb, 0x52, 0x4f, 0xf9, 0xe8, 0x54, 0x2e, 0xc6, 0xe8, 0xb7, 0x8f, 0x81, + 0xa9, 0x77, 0x6d, 0xa6, 0x14, 0x01, 0x27, 0x1c, 0x8c, 0xff, 0xd7, 0x7b, 0xf3, 0xc9, 0xca, 0x7e, + 0x7a, 0xff, 0x37, 0x3c, 0x5a, 0xf4, 0xb2, 0x21, 0x7d, 0x25, 0x9f, 0xe6, 0x6b, 0xef, 0xd8, 0x0c, + 0x20, 0x26, 0x1e, 0xcf, 0xad, 0x54, 0x4a, 0x42, 0x77, 0xdf, 0xbb, 0x24, 0x64, 0xe4, 0xee, 0x17, + 0x98, 0x36, 0xb3, 0x68, 0x09, 0x4f, 0xda, 0x42, 0x0f, 0x5d, 0x6c, 0x53, 0xd5, 0x13, 0x33, 0x38, + 0xfd, 0xb3, 0xf4, 0x7d, 0xbb, 0xce, 0xad, 0x79, 0x58, 0x76, 0x69, 0x9f, 0x43, 0xe0, 0xf8, 0x66, + 0xfb, 0x80, 0x3e, 0xa5, 0x72, 0x04, 0x7e, 0x66, 0xe6, 0x28, 0xc6, 0xf8, 0xcd, 0xf8, 0xdc, 0xc2, + 0x2b, 0x42, 0xbf, 0xd8, 0x07, 0x66, 0xb3, 0x3e, 0x2a, 0x10, 0xd4, 0x5b, 0xb1, 0x08, 0xc4, 0xed, + 0xe9, 0xf3, 0x84, 0xe7, 0x9b, 0x5d, 0xdf, 0xe6, 0x4a, 0xf8, 0x5b, 0xf0, 0x82, 0x6a, 0x80, 0xf8, + 0xb3, 0x79, 0xa8, 0xb0, 0x21, 0x7f, 0x16, 0xa9, 0x57, 0x4b, 0x2f, 0x93, 0x7d, 0x28, 0xf6, 0x90, + 0xf4, 0xc0, 0x8d, 0xc2, 0x58, 0xe0, 0x87, 0xe5, 0x0a, 0xff, 0x2f, 0x23, 0x2f, 0x9c, 0xd2, 0x94, + 0xf6, 0x8c, 0x4f, 0x10, 0x21, 0xbd, 0x8c, 0x60, 0xb6, 0xbb, 0x2d, 0x4b, 0x4d, 0x23, 0xf9, 0x71, + 0xa3, 0x9a, 0xe3, 0x89, 0xdb, 0x79, 0x98, 0xee, 0xd0, 0xc4, 0xaf, 0x27, 0xe5, 0xf0, 0xa0, 0xab, + 0xc2, 0xfd, 0x71, 0xb8, 0xbe, 0xd4, 0x33, 0xc9, 0xf6, 0x77, 0xc0, 0xdc, 0x4e, 0xdd, 0x47, 0xd5, + 0x43, 0x6c, 0x2d, 0xa4, 0x4f, 0x8c, 0xdd, 0x05, 0x13, 0x4a, 0xac, 0xde, 0x84, 0xcc, 0xe4, 0xd5, + 0x75, 0xf8, 0xce, 0x20, 0x4a, 0xdc, 0x8e, 0xdb, 0x55, 0x84, 0x8e, 0x56, 0x1c, 0xfc, 0x38, 0x49, + 0x1a, 0x0f, 0x66, 0x83, 0x1d, 0x09, 0x45, 0x71, 0x05, 0xf7, 0x9e, 0x53, 0x32, 0x3b, 0xd1, 0x78, + 0x06, 0x50, 0x0b, 0x37, 0x86, 0x9f, 0x42, 0xbb, 0xe1, 0xee, 0x26, 0xf3, 0x86, 0xff, 0x2c, 0x78, + 0xec, 0x67, 0x13, 0x4e, 0x09, 0xa1, 0x4c, 0x81, 0x60, 0xac, 0x1e, 0xa8, 0x52, 0xc8, 0xef, 0xf8, + 0x61, 0x18, 0xcb, 0x27, 0x41, 0x1f, 0x60, 0x47, 0xa7, 0x37, 0xe2, 0x77, 0xee, 0x98, 0xf8, 0xe9, + 0xfd, 0xab, 0xcf, 0xe5, 0x45, 0xc1, 0x2b, 0xae, 0x3f, 0x95, 0x21, 0xa3, 0x74, 0xdb, 0x4e, 0x13, + 0x5a, 0x68, 0x39, 0xf9, 0x41, 0x10, 0x65, 0xf9, 0xed, 0x27, 0x83, 0xd8, 0x5d, 0x76, 0x84, 0x23, + 0x2f, 0x21, 0xfe, 0x2a, 0x32, 0x5c, 0xce, 0xf0, 0xc8, 0x72, 0x91, 0x2d, 0x6b, 0xea, 0x5c, 0xd2, + 0xaa, 0x09, 0x09, 0x17, 0x66, 0x9c, 0xa6, 0x3d, 0x04, 0x4b, 0x76, 0xa1, 0xcd, 0x03, 0xfd, 0x28, + 0x89, 0x5f, 0x5c, 0x4b, 0x23, 0xef, 0xd7, 0x2e, 0x4f, 0x3b, 0xda, 0xf7, 0x72, 0x7b, 0xb6, 0x02, + 0x33, 0x66, 0x81, 0x29, 0x97, 0x62, 0x40, 0xe8, 0x13, 0xa2, 0xce, 0xae, 0x7f, 0xb7, 0xa9, 0xed, + 0x77, 0x02, 0x5a, 0xaa, 0xa9, 0x4c, 0x48, 0x8d, 0xf0, 0x37, 0xc2, 0x73, 0x7a, 0xcf, 0xda, 0xd2, + 0x23, 0x06, 0xe7, 0xd1, 0x08, 0xc8, 0x51, 0x19, 0x9e, 0xb5, 0x51, 0xae, 0x04, 0xc5, 0x12, 0xb2, + 0x85, 0x15, 0xfb, 0x33, 0x49, 0x4d, 0xa3, 0x5a, 0x3c, 0xc2, 0x79, 0x2a, 0x89, 0x4a, 0xa2, 0x1d, + 0xb8, 0x22, 0xcd, 0x97, 0x02, 0x65, 0xd2, 0x5b, 0x74, 0x60, 0x9a, 0xef, 0x47, 0xcc, 0x81, 0x8b, + 0x01, 0x7b, 0x25, 0x11, 0x2c, 0xc4, 0x5d, 0x5c, 0x63, 0x4a, 0x30, 0x37, 0x0b, 0xad, 0xe8, 0xe3, + 0x47, 0x2c, 0x35, 0x0e, 0x80, 0x51, 0x9d, 0x13, 0x16, 0x7a, 0x36, 0xf1, 0xec, 0x58, 0xfa, 0xaf, + 0x12, 0x45, 0xc9, 0x73, 0xed, 0xb6, 0x74, 0x32, 0x3b, 0x76, 0x31, 0xe5, 0x88, 0xff, 0xde, 0x4f, + 0xd9, 0xe7, 0x80, 0xb9, 0x19, 0x4a, 0x0b, 0x9d, 0xb4, 0x73, 0x7f, 0x2e, 0xd1, 0xee, 0x56, 0x4e, + 0xbe, 0x5b, 0xf7, 0x41, 0xa7, 0xe2, 0x79, 0x04, 0x5d, 0x61, 0x85, 0xda, 0x46, 0x02, 0x71, 0x23, + 0x16, 0xde, 0x98, 0xd5, 0x79, 0xc0, 0xbd, 0xc2, 0x08, 0x92, 0xf3, 0xe7, 0x8e, 0x58, 0x3c, 0xbe, + 0x9d, 0x06, 0x69, 0xc8, 0x7e, 0x1f, 0xf9, 0x35, 0xf7, 0x38, 0x04, 0x4e, 0x66, 0xc1, 0x43, 0xc8, + 0xcd, 0x18, 0xf8, 0xa6, 0xeb, 0x71, 0x4c, 0x32, 0x6b, 0x1b, 0xfb, 0xfb, 0xac, 0x76, 0xa9, 0x0a, + 0x9d, 0x01, 0x8b, 0x34, 0x78, 0x44, 0xc2, 0x95, 0x05, 0x6a, 0x8d, 0xf9, 0x91, 0x8e, 0x66, 0x87, + 0xf0, 0x64, 0xe5, 0x44, 0x0f, 0xcf, 0xec, 0xe6, 0x73, 0xc2, 0xa9, 0xe2, 0x5b, 0x1e, 0xbd, 0xf6, + 0xc0, 0x01, 0xe4, 0x4a, 0x85, 0x53, 0xf2, 0xbe, 0xf8, 0x0b, 0x14, 0xc2, 0x17, 0xab, 0xb6, 0x4f, + 0xff, 0xbd, 0x1f, 0x14, 0x51, 0xb5, 0x78, 0x94, 0x7b, 0xf5, 0x3d, 0x50, 0x97, 0x1a, 0xc2, 0x0a, + 0x9e, 0x73, 0xff, 0xe9, 0x2e, 0x7a, 0x96, 0xd4, 0xf5, 0x36, 0x92, 0xe2, 0x0e, 0x7e, 0x25, 0xae, + 0x32, 0x37, 0x35, 0x69, 0x92, 0xd3, 0x7b, 0x31, 0x4e, 0x45, 0xd0, 0x5e, 0x44, 0x83, 0xad, 0xfe, + 0xdc, 0x6c, 0xc5, 0x05, 0xd1, 0xbf, 0x95, 0xb9, 0x16, 0x40, 0xe0, 0xb7, 0xd7, 0x77, 0x00, 0x0a, + 0x9d, 0xc3, 0xd3, 0x74, 0xdf, 0xeb, 0x80, 0x47, 0x91, 0xe7, 0xcc, 0x7f, 0x92, 0xc5, 0x3a, 0x8a, + 0x90, 0xd3, 0x91, 0x18, 0xe7, 0xd1, 0x74, 0x1b, 0x10, 0x14, 0x84, 0x9f, 0x16, 0x1c, 0x4c, 0x52, + 0x2a, 0x41, 0x46, 0xe6, 0x91, 0x6c, 0x14, 0x26, 0x86, 0xf3, 0xcf, 0x85, 0x89, 0x8c, 0x50, 0xbf, + 0x29, 0x38, 0xe2, 0x53, 0x6c, 0x29, 0x7f, 0xb4, 0xcf, 0x3a, 0x3f, 0xf1, 0xd9, 0x35, 0x7b, 0x51, + 0xfd, 0xb2, 0x76, 0xe3, 0xec, 0xf7, 0x29, 0xf5, 0x78, 0x9c, 0x23, 0xe0, 0x0a, 0x9e, 0x40, 0x43, + 0xaf, 0x29, 0xc4, 0xe3, 0xff, 0x1a, 0xff, 0xaf, 0x9e, 0x44, 0xff, 0xf2, 0x2f, 0xff, 0xf2, 0x2f, + 0xff, 0xf2, 0x2f, 0xff, 0xf2, 0xff, 0x15, 0xff, 0x03, 0x55, 0x5b, 0xe9, 0x1d, 0x00, 0x23, 0x00, + 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA100_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 8960, // uncompressed data size (bytes) + 6817, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA100_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA100("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/unload/g_booteruc_unload_ga100_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA100_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x06, 0x62, 0x0e, 0x08, 0x13, 0x4c, 0x48, 0x41, 0x69, + 0x20, 0x00, 0x00, 0x6b, 0x3e, 0x38, 0x7d, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA100_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA100_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA100("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/unload/g_booteruc_unload_ga100_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 8960 +// COMPRESSED SIZE (bytes): 6823 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA100_image_prod_data[] = +{ + 0xed, 0x99, 0x45, 0x54, 0x1b, 0x0c, 0xd3, 0xb6, 0x43, 0xb0, 0x04, 0x0f, 0x52, 0x5c, 0x8a, 0xbb, + 0x53, 0xac, 0xb8, 0x3b, 0x45, 0x0a, 0x45, 0x8b, 0x7b, 0x71, 0x28, 0xee, 0xee, 0x45, 0x83, 0x5b, + 0xd1, 0xd0, 0x42, 0x83, 0x15, 0x8a, 0x6b, 0x81, 0xe2, 0xee, 0xa5, 0xc5, 0x82, 0x4b, 0x43, 0x91, + 0xc0, 0xf7, 0xbc, 0x8b, 0x7f, 0xf3, 0xae, 0xbe, 0x6f, 0xfb, 0x9f, 0xe7, 0xda, 0xdc, 0xf7, 0xcc, + 0x62, 0xce, 0x6c, 0xe6, 0x9c, 0x39, 0x33, 0x09, 0x00, 0x00, 0xda, 0x63, 0x16, 0x20, 0x94, 0x10, + 0x00, 0xf8, 0x0b, 0xfc, 0x0b, 0xb8, 0x05, 0xa6, 0x00, 0x80, 0x80, 0x9e, 0xcd, 0xad, 0xa7, 0xa7, + 0x27, 0xc2, 0x04, 0x00, 0x1a, 0xe0, 0xa9, 0x04, 0x03, 0xfe, 0x1b, 0x20, 0x94, 0xb7, 0x82, 0x06, + 0xcf, 0x02, 0x70, 0xe7, 0xad, 0x00, 0xff, 0x11, 0x96, 0xbc, 0x15, 0xf4, 0x7f, 0x84, 0x2e, 0x01, + 0x00, 0x00, 0xe4, 0x95, 0xa0, 0xb7, 0x6c, 0xa1, 0xf7, 0x94, 0x94, 0x6e, 0xe5, 0x95, 0xa0, 0xe5, + 0x41, 0x81, 0x2d, 0x5b, 0x68, 0x2d, 0x2b, 0xc0, 0xd0, 0x1e, 0x00, 0x80, 0x50, 0x40, 0x0d, 0x40, + 0x74, 0x53, 0x02, 0xe8, 0xca, 0x4a, 0xfb, 0xa7, 0x64, 0xcb, 0x6d, 0x44, 0xcb, 0x6d, 0x78, 0x02, + 0x20, 0x1a, 0xe3, 0x9f, 0x14, 0x21, 0x5a, 0x3b, 0x14, 0x40, 0x0b, 0xfc, 0x8f, 0x43, 0xff, 0xc7, + 0xa1, 0x01, 0xff, 0x5f, 0x0e, 0xeb, 0x3f, 0xee, 0x9f, 0xb2, 0xc0, 0x6c, 0x42, 0xd0, 0xe6, 0x7f, + 0x5a, 0x40, 0xdd, 0x62, 0x26, 0x00, 0xa8, 0x01, 0xa8, 0x62, 0x8c, 0xce, 0x74, 0x60, 0x6b, 0x16, + 0xd1, 0x3f, 0xf1, 0xe3, 0x1d, 0x20, 0xf4, 0x9f, 0xc6, 0xfe, 0x02, 0xbb, 0x2a, 0xef, 0xd1, 0xd2, + 0x50, 0xb9, 0x84, 0x09, 0x0c, 0xd8, 0x80, 0xd2, 0x92, 0xbf, 0x80, 0xb4, 0x7f, 0xb4, 0xe7, 0xe9, + 0x9a, 0xff, 0xe1, 0x51, 0x0d, 0xad, 0xb4, 0x44, 0x7a, 0x05, 0xf0, 0xbf, 0xa1, 0xeb, 0xd2, 0x9e, + 0xad, 0x14, 0x33, 0xa0, 0xb3, 0xac, 0xcb, 0x20, 0x7b, 0x8d, 0x30, 0xc3, 0x5a, 0xb6, 0xa5, 0x75, + 0x81, 0x26, 0x56, 0xb0, 0x1c, 0x85, 0xf7, 0x98, 0x59, 0xd5, 0x34, 0xa8, 0x94, 0xfe, 0xce, 0xd4, + 0x23, 0x48, 0x6e, 0x28, 0x99, 0x73, 0xce, 0x81, 0xd0, 0x13, 0x51, 0x6e, 0xaa, 0xa3, 0x9b, 0x9d, + 0xbb, 0x45, 0xd6, 0x0b, 0xf2, 0x8c, 0xcc, 0x43, 0x22, 0xee, 0xb2, 0xd9, 0x58, 0xae, 0xd8, 0x5c, + 0x7c, 0xb6, 0xc6, 0x1b, 0x45, 0xdc, 0x4b, 0x87, 0x0e, 0x2b, 0xcb, 0xf8, 0xd0, 0x6a, 0x31, 0x8e, + 0x19, 0xcb, 0xa5, 0x82, 0x5c, 0x27, 0x7d, 0x28, 0xc0, 0x4c, 0xab, 0xe0, 0x94, 0xc6, 0x56, 0xc7, + 0x9d, 0x1e, 0x71, 0xa1, 0xc0, 0xf3, 0x0e, 0x78, 0x34, 0xb8, 0x91, 0xa3, 0x11, 0xa1, 0x56, 0xdc, + 0x21, 0x16, 0xbe, 0xc1, 0xd1, 0x6f, 0xf3, 0xd8, 0x19, 0xfd, 0x32, 0xcb, 0x5e, 0x35, 0x43, 0x22, + 0x7f, 0x6b, 0x16, 0x70, 0x60, 0x01, 0x8a, 0x81, 0x15, 0xba, 0x24, 0x8b, 0xde, 0x2e, 0xb4, 0x19, + 0xdf, 0x8c, 0x63, 0x28, 0xa9, 0x3e, 0xcc, 0x07, 0xcf, 0x54, 0x6d, 0xbd, 0x68, 0x33, 0xe4, 0xc0, + 0xe4, 0xe0, 0xb4, 0xa5, 0xc8, 0x39, 0xc4, 0x43, 0x42, 0xf6, 0x71, 0x9e, 0x09, 0x8b, 0x42, 0xd6, + 0x98, 0x9a, 0xb5, 0x11, 0x4e, 0x54, 0x4f, 0xc6, 0xfb, 0xfc, 0x85, 0x73, 0xf4, 0xc0, 0x31, 0xef, + 0xf4, 0x85, 0xd9, 0x51, 0xfe, 0xb2, 0x9e, 0x24, 0x7f, 0x69, 0xfe, 0x51, 0x4d, 0xed, 0xd1, 0x3f, + 0x5e, 0x24, 0xb3, 0x3e, 0x7d, 0x84, 0x83, 0xd7, 0xba, 0x40, 0x36, 0x8b, 0x23, 0xba, 0xfd, 0xc5, + 0xf7, 0x82, 0x42, 0xb9, 0xce, 0xcb, 0xdb, 0x4f, 0x45, 0x0f, 0x7d, 0xc1, 0x0d, 0xaa, 0xd7, 0x10, + 0x65, 0x2d, 0x0f, 0x9a, 0xd9, 0x5f, 0xfe, 0xec, 0xef, 0x73, 0x16, 0xe3, 0x27, 0xd5, 0xb7, 0xc5, + 0xce, 0x7b, 0xb0, 0xcb, 0xd8, 0x3a, 0x30, 0xb7, 0x43, 0xca, 0x81, 0x82, 0x2f, 0xef, 0xd7, 0x26, + 0xa2, 0xc7, 0x61, 0xbd, 0xf5, 0x0f, 0x4a, 0xbd, 0xeb, 0x6b, 0xd1, 0xc6, 0x25, 0xbd, 0xfe, 0x37, + 0xf1, 0xed, 0x41, 0xa1, 0xcd, 0x86, 0xa8, 0xb7, 0x61, 0xb5, 0x07, 0x41, 0xdd, 0xb5, 0xef, 0x60, + 0x53, 0xe9, 0xc7, 0x75, 0x24, 0x8c, 0xd0, 0x2f, 0xa9, 0x6d, 0x93, 0x08, 0x67, 0xfa, 0x4d, 0x1b, + 0x38, 0xd4, 0xe7, 0xe2, 0x80, 0x85, 0xfd, 0x8d, 0x6a, 0x23, 0xef, 0x87, 0x06, 0x66, 0x1e, 0x24, + 0xc4, 0x8b, 0xe0, 0xd5, 0x5d, 0xe6, 0x51, 0x95, 0xd8, 0xa7, 0xb6, 0x39, 0x57, 0x3b, 0x2c, 0xa1, + 0x40, 0x6d, 0x61, 0xd8, 0xa6, 0x3e, 0xdb, 0xd6, 0xc3, 0x71, 0x53, 0x50, 0x19, 0x0a, 0xe6, 0xa1, + 0xfd, 0xeb, 0x7b, 0xd6, 0xb7, 0x1e, 0x7d, 0x7e, 0x5d, 0x34, 0x9e, 0xd9, 0x56, 0xc7, 0x1a, 0x53, + 0x7d, 0x08, 0x31, 0xe3, 0x38, 0xe1, 0x36, 0xc5, 0xed, 0x3d, 0xfa, 0xd0, 0xe1, 0x80, 0x53, 0xc5, + 0x14, 0x83, 0xd9, 0x37, 0xf0, 0x9d, 0x5a, 0x32, 0x1a, 0x65, 0xa2, 0x7b, 0x8d, 0xde, 0x75, 0x92, + 0x0e, 0xb7, 0x5d, 0xf9, 0xf3, 0x9f, 0x43, 0xb2, 0x90, 0x12, 0x14, 0x99, 0x34, 0x8d, 0xe9, 0x2a, + 0xb9, 0xca, 0x22, 0x8d, 0xc8, 0x3a, 0x95, 0xd0, 0xb0, 0xca, 0x41, 0xa9, 0x80, 0xaa, 0x6a, 0x55, + 0xc0, 0x13, 0xdb, 0x7b, 0xd2, 0xb0, 0xea, 0x32, 0xe2, 0xde, 0xc0, 0x5e, 0x43, 0xb6, 0xaf, 0x88, + 0x99, 0x39, 0xdb, 0xca, 0xfe, 0x49, 0x7c, 0xd2, 0x44, 0xed, 0x29, 0xd7, 0x9a, 0xc3, 0x22, 0xca, + 0x93, 0xa7, 0xe2, 0xe4, 0xe7, 0x39, 0x5b, 0x9c, 0x6d, 0x32, 0x32, 0x32, 0xcc, 0x13, 0x61, 0x38, + 0x16, 0xb7, 0x49, 0x58, 0xf3, 0x8e, 0x31, 0xb8, 0xb3, 0xc5, 0xc7, 0x8b, 0x6f, 0x13, 0x42, 0x39, + 0x02, 0x05, 0x2f, 0xbd, 0x59, 0xc4, 0x15, 0xa1, 0x75, 0xfe, 0x57, 0x1f, 0x07, 0x59, 0x94, 0x1a, + 0xd9, 0xb2, 0xdf, 0xf3, 0x12, 0xab, 0xcb, 0xf7, 0xe0, 0x0e, 0xcd, 0x6f, 0xd0, 0x4e, 0xbd, 0x15, + 0x88, 0x59, 0x70, 0xac, 0xac, 0x2f, 0xf9, 0xe1, 0x37, 0xe6, 0xc8, 0x3f, 0xc9, 0xb4, 0x3d, 0x60, + 0x03, 0x9f, 0x78, 0xc0, 0x3a, 0x05, 0x0f, 0x49, 0x8c, 0xfd, 0x7c, 0xb9, 0x3c, 0x8d, 0xa5, 0x11, + 0x24, 0xaf, 0xce, 0x55, 0x53, 0x29, 0xdf, 0x6b, 0x28, 0xa3, 0x7d, 0x86, 0x37, 0xe8, 0x44, 0xe8, + 0xff, 0x32, 0xc4, 0xd7, 0x04, 0x04, 0xd7, 0x46, 0x56, 0xbd, 0xfb, 0x5b, 0x66, 0xca, 0xd8, 0xf0, + 0xe2, 0x78, 0x49, 0x5a, 0xd9, 0x0b, 0xeb, 0xb5, 0xd8, 0x28, 0xd8, 0x93, 0x87, 0x58, 0xb0, 0xdf, + 0x7e, 0x4b, 0x62, 0x42, 0xb0, 0xec, 0x70, 0xee, 0xaf, 0xd9, 0x34, 0x7a, 0xcd, 0x1b, 0x1b, 0x2f, + 0x9c, 0x7a, 0x1b, 0x27, 0xaf, 0x72, 0x12, 0xfa, 0xb2, 0xaf, 0x5c, 0xab, 0x8f, 0x82, 0xa9, 0x4d, + 0xdd, 0xaa, 0x37, 0x09, 0x6b, 0x42, 0xf0, 0x26, 0x30, 0xa5, 0xa6, 0x17, 0xde, 0x32, 0xc3, 0x3a, + 0x7d, 0xcc, 0xcf, 0x97, 0x35, 0x19, 0x46, 0x96, 0xcb, 0xdf, 0x25, 0x91, 0x2a, 0x6e, 0x68, 0xcf, + 0x7b, 0x9c, 0xec, 0x76, 0x4d, 0x90, 0x27, 0x68, 0x93, 0x85, 0x16, 0xa5, 0x1a, 0x7a, 0xde, 0xca, + 0x3f, 0xfc, 0xdf, 0x2c, 0xbe, 0x1c, 0x37, 0x93, 0x57, 0xfe, 0x91, 0x30, 0xa9, 0x65, 0x67, 0x58, + 0xa8, 0x0b, 0x0b, 0xd1, 0x3a, 0x54, 0xd6, 0xaf, 0xf9, 0x3d, 0xf2, 0xd6, 0x0b, 0x9d, 0x11, 0xfc, + 0x30, 0x4e, 0x48, 0x97, 0xee, 0x7b, 0xea, 0x75, 0xc4, 0x43, 0xe9, 0x5c, 0x5c, 0xda, 0x2b, 0xf4, + 0x69, 0xfc, 0x14, 0xa8, 0x9c, 0xaf, 0xb2, 0xf9, 0x66, 0x92, 0xdb, 0xb0, 0x41, 0xcc, 0x22, 0x2e, + 0x17, 0x2d, 0xfb, 0xbd, 0x67, 0x15, 0x5e, 0x1a, 0x9f, 0xdb, 0x8b, 0xa1, 0x5a, 0x0d, 0x29, 0x36, + 0x24, 0x0b, 0x53, 0x58, 0xeb, 0x6b, 0x9d, 0x0e, 0xb7, 0xb8, 0xb6, 0xf8, 0x78, 0x00, 0x00, 0x2f, + 0x04, 0x05, 0xee, 0x21, 0x4b, 0x9f, 0x9d, 0xa4, 0xd6, 0xa2, 0x3a, 0xd3, 0x16, 0x05, 0x74, 0xe3, + 0x0e, 0x9a, 0x32, 0xf8, 0x02, 0x05, 0xf7, 0x5f, 0xce, 0xa0, 0x5b, 0x91, 0x28, 0xb7, 0xba, 0x8a, + 0xde, 0x99, 0xd2, 0xde, 0x43, 0x9b, 0x7f, 0x34, 0x37, 0x9c, 0x15, 0xe5, 0xb6, 0x67, 0x4b, 0xba, + 0xab, 0x3f, 0xce, 0x33, 0x13, 0x64, 0x7c, 0x12, 0x18, 0x95, 0x4c, 0x17, 0x48, 0xa6, 0x21, 0x62, + 0x23, 0x4c, 0x6a, 0x74, 0x18, 0x66, 0xab, 0x46, 0xa5, 0x9d, 0x33, 0x26, 0x9d, 0xa3, 0x76, 0x52, + 0xc6, 0x18, 0xdd, 0x97, 0x1c, 0x88, 0x48, 0x58, 0xe4, 0x5c, 0x9f, 0xd4, 0x35, 0xfb, 0xb6, 0x9e, + 0x9a, 0xf5, 0x34, 0x90, 0x4f, 0x6a, 0xd6, 0xf5, 0x29, 0x3c, 0x93, 0xcd, 0xaf, 0x42, 0x71, 0x97, + 0x97, 0xd3, 0xca, 0xbb, 0x87, 0x43, 0x6a, 0x39, 0x04, 0x3a, 0x1c, 0xe5, 0x48, 0x89, 0x26, 0xe4, + 0xf0, 0x8f, 0xe0, 0x91, 0xf3, 0xcf, 0xb7, 0x99, 0x40, 0xe4, 0xa5, 0x68, 0x4a, 0x62, 0xab, 0x1c, + 0x33, 0x59, 0x73, 0x47, 0xcf, 0x1f, 0xfe, 0xca, 0xb8, 0x0a, 0xa1, 0xba, 0xd2, 0x1d, 0x1d, 0x71, + 0xb0, 0xb0, 0xe7, 0xd1, 0xea, 0x38, 0x9f, 0x7f, 0x7a, 0x01, 0x28, 0x97, 0xa4, 0xff, 0xb0, 0x91, + 0xe3, 0x5a, 0x29, 0x87, 0xac, 0xa1, 0xd3, 0x2c, 0x17, 0x79, 0x6b, 0x40, 0x8e, 0x6f, 0x36, 0x7d, + 0x04, 0x1f, 0xe5, 0xe7, 0xfc, 0xc2, 0x56, 0x2f, 0x1a, 0x5d, 0x5c, 0x92, 0x46, 0xe9, 0x60, 0x16, + 0xec, 0x31, 0x29, 0x5c, 0xe7, 0xac, 0xa2, 0x29, 0xdc, 0xfa, 0x40, 0x17, 0x72, 0x5b, 0xba, 0x24, + 0x29, 0xf1, 0x64, 0x63, 0x2e, 0xff, 0x21, 0x4f, 0x29, 0x8a, 0x84, 0x5e, 0x5b, 0x00, 0x40, 0xbb, + 0xa6, 0xff, 0x60, 0x81, 0x4f, 0x07, 0x3b, 0xf5, 0xc7, 0xbf, 0xc3, 0x92, 0x43, 0xe3, 0xc4, 0x72, + 0xf1, 0x96, 0x8b, 0x3b, 0x2b, 0xef, 0xbc, 0xad, 0xf7, 0xb3, 0x82, 0x10, 0xa5, 0x7a, 0xeb, 0x60, + 0xec, 0x95, 0x9a, 0x3f, 0x36, 0x8a, 0x0f, 0x39, 0xc6, 0x86, 0x83, 0x99, 0x20, 0xce, 0x9c, 0xb0, + 0xe7, 0x32, 0x94, 0xe9, 0x15, 0xf5, 0x8e, 0xec, 0x0f, 0x44, 0xd2, 0xbf, 0xc2, 0xcb, 0xcf, 0xe0, + 0x09, 0x73, 0x75, 0x7b, 0x3e, 0xd9, 0x7d, 0x70, 0xf1, 0xc0, 0x7d, 0x8a, 0x94, 0xc5, 0xef, 0x2d, + 0x72, 0x6d, 0x6f, 0x45, 0xf7, 0x78, 0x3f, 0x2c, 0x0c, 0x43, 0x2d, 0x5f, 0xb4, 0x5f, 0x68, 0x98, + 0xd6, 0x0e, 0xfa, 0xeb, 0xf3, 0x3e, 0x90, 0x9e, 0x9f, 0xc1, 0x6a, 0x8e, 0xd6, 0x8e, 0x1e, 0x1f, + 0x18, 0x06, 0x98, 0xa6, 0x7f, 0xaa, 0x99, 0x87, 0xa9, 0x37, 0x96, 0x5e, 0x3a, 0x7f, 0xc7, 0xf4, + 0x78, 0x84, 0xc2, 0xcf, 0xcf, 0x2c, 0x30, 0x31, 0x4d, 0xb2, 0x02, 0x27, 0x2e, 0x52, 0x1b, 0x7c, + 0xc2, 0x15, 0x7f, 0xb4, 0x30, 0xbd, 0xba, 0xad, 0x1d, 0x0b, 0x4d, 0x03, 0x83, 0xec, 0x7e, 0x6e, + 0x92, 0x54, 0x1b, 0x77, 0xaf, 0x75, 0x65, 0x99, 0xf4, 0x60, 0x88, 0x52, 0x47, 0x21, 0x8b, 0xa9, + 0x98, 0xaf, 0x95, 0x29, 0x18, 0x42, 0x28, 0x92, 0x25, 0xeb, 0x59, 0xed, 0xaf, 0x09, 0x26, 0xde, + 0x7e, 0xab, 0x2f, 0x03, 0x9d, 0xa2, 0x7d, 0x26, 0x20, 0xbe, 0x8e, 0xa8, 0xe5, 0x29, 0xf9, 0xf5, + 0x59, 0x52, 0x92, 0xfa, 0xc0, 0x41, 0x28, 0x4d, 0x19, 0xba, 0x2c, 0x33, 0xbe, 0x39, 0x7e, 0x77, + 0x24, 0x2c, 0x59, 0x5f, 0xac, 0xa5, 0xd3, 0x97, 0x7a, 0x66, 0xa1, 0x76, 0x60, 0xb1, 0x12, 0x9f, + 0xdd, 0x7c, 0x4f, 0xe8, 0x78, 0xf5, 0x67, 0x6c, 0xbd, 0x05, 0x31, 0x72, 0x92, 0x32, 0xdb, 0xb4, + 0xdc, 0x51, 0x3d, 0x2f, 0xf1, 0x2e, 0x05, 0x8c, 0x88, 0x07, 0x2c, 0x5a, 0x38, 0x7b, 0x00, 0x02, + 0xdb, 0xc7, 0x54, 0x65, 0x56, 0xee, 0xae, 0x0e, 0xa5, 0xff, 0xe0, 0x23, 0x56, 0x8d, 0x93, 0x8b, + 0x74, 0xb9, 0x3f, 0x9b, 0x39, 0xed, 0x46, 0x88, 0xc3, 0x39, 0xe5, 0x0d, 0x38, 0xf9, 0x0c, 0xc6, + 0x32, 0x23, 0x21, 0x14, 0xd4, 0xac, 0x7b, 0x88, 0xfb, 0x17, 0xea, 0x9d, 0xc6, 0xf0, 0xfe, 0x6a, + 0x05, 0xb1, 0xec, 0x98, 0xd6, 0x98, 0x9f, 0x8e, 0xe9, 0x82, 0xfe, 0xc5, 0x0d, 0x1c, 0xc1, 0xdb, + 0x7c, 0xdb, 0x3f, 0x29, 0x75, 0xf4, 0xe8, 0x36, 0x56, 0x07, 0x56, 0xc3, 0xdd, 0xd4, 0x77, 0x4d, + 0x14, 0x0c, 0x4e, 0x8e, 0xdb, 0x82, 0xe6, 0xcf, 0x89, 0xf7, 0xb4, 0x7a, 0x1b, 0xea, 0x3e, 0xa0, + 0xd2, 0xb8, 0x3a, 0x4e, 0xd7, 0x43, 0x23, 0x89, 0xc9, 0xae, 0x72, 0xa1, 0x94, 0x5a, 0x64, 0x79, + 0x6f, 0xa8, 0x8a, 0xb2, 0xb1, 0xa9, 0x70, 0xfe, 0x66, 0xc6, 0xdf, 0xd5, 0xd4, 0x18, 0x06, 0x04, + 0x3a, 0xbc, 0xab, 0xfa, 0x70, 0xc5, 0x79, 0xd1, 0xb1, 0xd7, 0xdf, 0x48, 0xb3, 0xf2, 0x6a, 0x71, + 0x71, 0xde, 0xbf, 0x97, 0x26, 0xaf, 0xfa, 0x18, 0x0f, 0x48, 0x3e, 0xdf, 0xf3, 0x0e, 0x9d, 0x16, + 0xd3, 0xa7, 0x91, 0xc1, 0x8f, 0xa7, 0xd0, 0x29, 0xd6, 0x68, 0x28, 0x5f, 0xea, 0xa6, 0xd8, 0xdf, + 0xf8, 0x4a, 0x0b, 0xed, 0xa0, 0x3c, 0x47, 0x88, 0x6a, 0x17, 0x16, 0xa4, 0x51, 0x54, 0x16, 0x3d, + 0xf7, 0xfd, 0x8f, 0x4b, 0x42, 0xf4, 0x08, 0xc6, 0xcc, 0x10, 0x96, 0x49, 0xb6, 0xeb, 0x00, 0x3d, + 0x26, 0x2c, 0x94, 0x06, 0x95, 0x1f, 0x30, 0x3b, 0xa9, 0xa7, 0xf2, 0x41, 0xb7, 0x62, 0x89, 0x82, + 0x77, 0xd6, 0x81, 0xf8, 0x24, 0x29, 0xc5, 0x2f, 0x03, 0x3d, 0x0e, 0x3f, 0x35, 0xda, 0x2c, 0x9d, + 0xe9, 0x8a, 0xee, 0x5b, 0x4a, 0x08, 0x7f, 0xf8, 0xeb, 0x8b, 0xad, 0x19, 0xb6, 0xe6, 0x83, 0x01, + 0xbf, 0xb8, 0xc7, 0x8d, 0x7c, 0xfc, 0x8c, 0x30, 0xde, 0xf5, 0x85, 0x18, 0x6f, 0xa9, 0xf9, 0x27, + 0x2e, 0x45, 0x2f, 0x89, 0xbd, 0x4c, 0x86, 0x73, 0x59, 0x23, 0xaa, 0x8c, 0x31, 0x90, 0xd1, 0x02, + 0xaa, 0xaf, 0xd3, 0x37, 0xce, 0x45, 0xbf, 0xf3, 0x62, 0x6b, 0xcb, 0xa7, 0x7f, 0x04, 0x69, 0x71, + 0x65, 0xe8, 0x06, 0x9b, 0x70, 0x7f, 0x9b, 0x90, 0x48, 0x18, 0x2f, 0x05, 0xbd, 0x17, 0x8b, 0xc0, + 0x0a, 0x40, 0x76, 0x20, 0x76, 0x86, 0x32, 0xca, 0x7f, 0x1f, 0x13, 0x56, 0xf8, 0x10, 0x1c, 0x91, + 0x58, 0xbe, 0x52, 0x37, 0x86, 0xdd, 0xac, 0xc0, 0x6e, 0x07, 0x50, 0xef, 0x9c, 0x2c, 0xa5, 0x5a, + 0xca, 0x4f, 0xb4, 0xd2, 0xc7, 0xb2, 0xd2, 0xf7, 0x76, 0x8e, 0x77, 0x81, 0x49, 0x57, 0x7e, 0xb5, + 0x5d, 0xa6, 0xbc, 0x29, 0xb8, 0xc9, 0xcc, 0xfe, 0x09, 0x62, 0x4f, 0x0f, 0x2b, 0xfc, 0x43, 0xe6, + 0x4f, 0x07, 0xee, 0x85, 0x96, 0x11, 0xdf, 0x5d, 0xb4, 0xc3, 0x65, 0xb9, 0x18, 0x5f, 0xb0, 0x34, + 0xfa, 0x8a, 0x5f, 0x9f, 0x60, 0x40, 0x58, 0x1b, 0x62, 0x9c, 0xeb, 0x8c, 0x6d, 0x05, 0xbf, 0x66, + 0xe6, 0x5f, 0x78, 0x4b, 0x2c, 0xf6, 0x81, 0x16, 0x3b, 0xcb, 0xde, 0xad, 0x55, 0x4f, 0x40, 0x61, + 0x5d, 0x15, 0xee, 0xa1, 0xa9, 0xd6, 0x95, 0xd6, 0x70, 0xc7, 0x79, 0x87, 0x2e, 0x5b, 0x85, 0x4c, + 0x0d, 0xe1, 0xbc, 0x2d, 0xe5, 0x80, 0xb4, 0x0e, 0xe1, 0x33, 0xc3, 0xad, 0xa6, 0xe0, 0x30, 0x53, + 0x8b, 0xa1, 0xdf, 0x01, 0xcd, 0x7f, 0xf1, 0xab, 0x8c, 0x7e, 0x24, 0xb5, 0x5c, 0x57, 0xa0, 0xef, + 0x83, 0x20, 0xef, 0x1e, 0xa6, 0x8a, 0xe7, 0xbb, 0xf4, 0x51, 0xeb, 0xae, 0xfa, 0x15, 0xf3, 0x82, + 0x34, 0xbf, 0x77, 0xd0, 0xd5, 0xc5, 0x26, 0x0e, 0xe4, 0x87, 0xeb, 0x56, 0xdc, 0x2b, 0x52, 0xff, + 0x34, 0x25, 0x2a, 0x61, 0xa1, 0x82, 0x82, 0xec, 0x1b, 0x3e, 0x1f, 0xdd, 0x8b, 0xfb, 0x0a, 0xca, + 0x0f, 0x48, 0x8e, 0xe0, 0xfb, 0x7c, 0x99, 0x05, 0xfb, 0xdf, 0xd0, 0x12, 0x88, 0xf5, 0x47, 0x83, + 0xc6, 0xe4, 0xaf, 0x05, 0xa4, 0x5f, 0x47, 0x85, 0x1e, 0x01, 0xc8, 0x2a, 0xbd, 0x1e, 0x71, 0x70, + 0x05, 0x41, 0xe5, 0xd9, 0x41, 0x3c, 0x85, 0xd3, 0xe7, 0x8f, 0x17, 0x38, 0xc8, 0xb6, 0x54, 0x3a, + 0xe4, 0xc7, 0x3d, 0x77, 0xe2, 0x95, 0x0b, 0xdd, 0x0a, 0xa4, 0xc0, 0x02, 0x5e, 0xaa, 0xb8, 0x55, + 0x2a, 0x0f, 0x58, 0x33, 0xe7, 0xc7, 0xa8, 0xd6, 0x33, 0xda, 0x46, 0x26, 0xe5, 0x2b, 0xf7, 0x30, + 0xa0, 0x2d, 0x67, 0x5f, 0x85, 0xa4, 0xab, 0x71, 0x04, 0xc3, 0x10, 0x76, 0x55, 0xe9, 0xc2, 0x5c, + 0xe3, 0xd0, 0x0f, 0x4a, 0xbc, 0x06, 0xd6, 0x2f, 0x00, 0x71, 0x74, 0xb6, 0x1d, 0x83, 0x30, 0xb2, + 0x1f, 0x7e, 0x10, 0x54, 0x22, 0xda, 0x2c, 0x51, 0x7b, 0x39, 0x63, 0x8b, 0x08, 0x2a, 0xc8, 0x96, + 0x3d, 0xa7, 0x89, 0x5e, 0x98, 0x00, 0x23, 0x5c, 0x96, 0x46, 0xd1, 0xec, 0x5d, 0x89, 0x2b, 0x8a, + 0x2e, 0x2e, 0x57, 0xa7, 0xde, 0x62, 0x56, 0x41, 0x95, 0xdf, 0xec, 0x88, 0xe1, 0x7d, 0x79, 0x66, + 0x65, 0xd0, 0xde, 0x98, 0x9a, 0xb6, 0x51, 0xce, 0x98, 0xe6, 0x54, 0x3e, 0xff, 0x50, 0xa3, 0x27, + 0x79, 0x4b, 0xb7, 0xe8, 0x65, 0xe9, 0xd7, 0xa6, 0xd4, 0xb1, 0x3a, 0x42, 0x1e, 0xb5, 0xa3, 0x61, + 0x34, 0x18, 0xcd, 0x86, 0xcc, 0xa8, 0xcf, 0xaa, 0x20, 0xbe, 0x8b, 0xae, 0xdd, 0x37, 0x4e, 0x9c, + 0x3e, 0x1b, 0xe3, 0x52, 0x36, 0x03, 0x0c, 0x1a, 0x62, 0xc7, 0x68, 0x8c, 0x22, 0x02, 0xf7, 0xd9, + 0x60, 0x85, 0x1e, 0x1e, 0x9d, 0x25, 0x4b, 0xfb, 0x46, 0x2f, 0xa3, 0xdd, 0x3a, 0xc1, 0x6b, 0xa7, + 0xb7, 0xca, 0x83, 0x66, 0xc8, 0x58, 0x4b, 0xf6, 0xd8, 0x12, 0x9e, 0xe1, 0xba, 0x0d, 0x3f, 0x3a, + 0xc9, 0xc6, 0x69, 0x9c, 0xe4, 0xa8, 0x34, 0xfb, 0x96, 0x88, 0x9a, 0x54, 0xa2, 0xad, 0x24, 0x3b, + 0xd1, 0x3b, 0x6e, 0xb9, 0x16, 0xc5, 0xc2, 0xad, 0xd8, 0x1b, 0x28, 0x9c, 0xf5, 0x31, 0xd7, 0x96, + 0xcb, 0x9e, 0xbc, 0x42, 0x10, 0x89, 0xef, 0xc4, 0x7f, 0x82, 0xc4, 0xde, 0xef, 0xe6, 0xec, 0xd7, + 0x6f, 0xe2, 0xc6, 0xfb, 0xc5, 0x3d, 0x30, 0x51, 0xf0, 0x48, 0xc9, 0x4a, 0xe6, 0x82, 0x12, 0x35, + 0x6c, 0xbc, 0x53, 0x5d, 0x63, 0x78, 0x69, 0xf3, 0x5a, 0x63, 0x11, 0x85, 0x73, 0xe4, 0xa3, 0x35, + 0x9f, 0x72, 0xc9, 0x7a, 0x13, 0xd4, 0xcc, 0x3d, 0xd4, 0xaa, 0xa7, 0x91, 0x76, 0x81, 0x4f, 0x24, + 0x40, 0xa1, 0x2e, 0x6e, 0x8f, 0xfb, 0x4a, 0xce, 0xf4, 0x89, 0xa3, 0x58, 0xf5, 0x9c, 0x85, 0x67, + 0x5a, 0x02, 0xfb, 0x38, 0x76, 0xd8, 0x32, 0xd9, 0x7f, 0x16, 0x3b, 0x21, 0xf7, 0x55, 0x56, 0xd9, + 0xf7, 0x7d, 0xa3, 0xb0, 0x4c, 0xa1, 0x95, 0xd8, 0xa5, 0xaa, 0x41, 0xde, 0xa2, 0x46, 0xbb, 0x64, + 0xf7, 0x17, 0xaa, 0x92, 0xe2, 0xd2, 0x32, 0x0e, 0x32, 0x3a, 0x7d, 0xa5, 0x01, 0xfc, 0xcf, 0x11, + 0xd6, 0x2e, 0x65, 0x51, 0x3a, 0xdc, 0x72, 0x9d, 0x57, 0x34, 0x33, 0x2b, 0x04, 0x1f, 0x95, 0x82, + 0x41, 0x69, 0x9a, 0xab, 0x0e, 0x83, 0x3c, 0x56, 0x9c, 0x6e, 0xf2, 0xa3, 0x38, 0xc3, 0x51, 0x61, + 0xf1, 0xa7, 0xad, 0x79, 0xb8, 0x13, 0x03, 0xa0, 0x32, 0x87, 0x93, 0x3a, 0x2a, 0x99, 0x42, 0x94, + 0xcf, 0xe8, 0x34, 0xb5, 0x30, 0x53, 0xd6, 0x86, 0x24, 0x7f, 0xcd, 0x6b, 0x03, 0xa1, 0xaa, 0xcc, + 0x56, 0xef, 0x72, 0xe5, 0xf7, 0x94, 0x58, 0x5f, 0x78, 0xb7, 0x21, 0x2e, 0x5e, 0x1c, 0xc9, 0xf1, + 0x70, 0x7b, 0x5d, 0x4e, 0xef, 0x9e, 0x67, 0xf3, 0x1f, 0xda, 0x4c, 0xfe, 0xcc, 0x6e, 0xf6, 0x81, + 0x6a, 0xfb, 0x53, 0x38, 0x3f, 0xc1, 0xa9, 0x7d, 0xb1, 0x3f, 0xda, 0x0d, 0x1b, 0xa7, 0xbd, 0xf7, + 0x1e, 0xec, 0x58, 0xdc, 0x97, 0x24, 0x3f, 0x43, 0xa4, 0x23, 0x42, 0x3b, 0xe7, 0x19, 0x3f, 0xfa, + 0x29, 0x13, 0x1c, 0x68, 0x31, 0xe7, 0x04, 0xad, 0xb0, 0xf6, 0x84, 0x8d, 0x33, 0xb0, 0x18, 0x51, + 0x76, 0x90, 0x4d, 0x2d, 0xa1, 0x7d, 0x27, 0x27, 0xcd, 0x78, 0x72, 0xc1, 0x5b, 0x27, 0xcf, 0xd5, + 0xd0, 0xb7, 0x1c, 0xc1, 0x49, 0x28, 0x41, 0xa1, 0x3a, 0xe3, 0x2b, 0x2b, 0x69, 0xd4, 0x66, 0x24, + 0x58, 0x36, 0x5b, 0xde, 0x3a, 0xb8, 0x3d, 0x91, 0x24, 0x55, 0x0b, 0xe3, 0x93, 0x3e, 0xf2, 0x27, + 0xbd, 0x43, 0x02, 0x41, 0x98, 0x46, 0xc7, 0x7e, 0x61, 0xa1, 0x42, 0x74, 0x22, 0x6d, 0x49, 0x94, + 0x0d, 0xb1, 0xe4, 0x4f, 0xf3, 0xac, 0xa5, 0x0b, 0x18, 0x2f, 0xad, 0xed, 0x44, 0xb6, 0xec, 0x67, + 0x6c, 0x57, 0x1b, 0x7f, 0x68, 0xc6, 0x4a, 0x8f, 0xfc, 0xe1, 0x0d, 0x3a, 0xfd, 0x56, 0x7f, 0x8a, + 0x49, 0x0d, 0x12, 0xf9, 0xf5, 0xcc, 0xda, 0x15, 0xda, 0x8f, 0xb8, 0x5a, 0x14, 0x1f, 0x9f, 0xee, + 0x27, 0x7c, 0xed, 0x34, 0xc0, 0x2d, 0x6f, 0x4f, 0x5e, 0xf7, 0x38, 0xa3, 0xc0, 0x67, 0xd1, 0x5c, + 0xa1, 0x6f, 0xe2, 0xd2, 0x66, 0xf6, 0xce, 0x33, 0x6f, 0x45, 0xc3, 0x73, 0x8e, 0x45, 0x56, 0xf4, + 0x72, 0x69, 0xdc, 0xdc, 0x6e, 0x71, 0x2a, 0x97, 0x6d, 0x79, 0x90, 0x3a, 0xb7, 0x3d, 0x89, 0x77, + 0x59, 0xc7, 0x57, 0x25, 0x50, 0x99, 0x47, 0xd5, 0xf4, 0x80, 0x56, 0x42, 0x87, 0xaf, 0x43, 0x4a, + 0xab, 0xc8, 0xab, 0x60, 0xef, 0x17, 0xab, 0xb2, 0xda, 0xd6, 0xcd, 0x6a, 0x03, 0xa4, 0x65, 0x2c, + 0x21, 0x95, 0x2a, 0xf8, 0xf6, 0xc7, 0x6b, 0xc7, 0x01, 0xf3, 0x12, 0xef, 0x45, 0x74, 0x9b, 0xa5, + 0xcc, 0xad, 0x21, 0x92, 0x0f, 0xb6, 0x5f, 0x42, 0x48, 0x76, 0xd1, 0x54, 0x70, 0x75, 0x41, 0x0b, + 0xca, 0x80, 0x34, 0x36, 0x21, 0x08, 0x89, 0x78, 0x71, 0x7b, 0x10, 0xf6, 0xde, 0xd4, 0x75, 0x9b, + 0xf5, 0x32, 0x66, 0x7d, 0x3e, 0x9e, 0xe6, 0x4f, 0x29, 0xeb, 0x70, 0x66, 0x02, 0x65, 0x5b, 0xbe, + 0x35, 0xf2, 0x1f, 0xa9, 0x02, 0x58, 0x24, 0x5d, 0x92, 0xba, 0x8c, 0x3d, 0xe0, 0xe0, 0xf2, 0x0f, + 0xf9, 0x86, 0x66, 0x6e, 0x11, 0xbc, 0x95, 0xbf, 0x85, 0x9e, 0x51, 0x0a, 0xd4, 0xcc, 0xc7, 0xa8, + 0x03, 0x35, 0x82, 0xfc, 0x40, 0x5e, 0x7e, 0xd6, 0xb4, 0x26, 0xf7, 0x07, 0x26, 0x91, 0x91, 0xb5, + 0x7a, 0x14, 0x74, 0xdc, 0x21, 0xca, 0x1f, 0x73, 0x2f, 0x15, 0x39, 0x6c, 0x3e, 0x7b, 0x7e, 0x9d, + 0xa4, 0xec, 0xe8, 0x9f, 0x90, 0x5d, 0x41, 0x56, 0x82, 0x10, 0x37, 0x15, 0x01, 0x52, 0x53, 0xa5, + 0x3e, 0xd9, 0x39, 0x44, 0x25, 0x78, 0xe3, 0x04, 0x10, 0x2a, 0x0d, 0x03, 0x8c, 0xff, 0x9e, 0xb7, + 0x3d, 0x6c, 0xf5, 0xed, 0x5e, 0x62, 0x24, 0xcb, 0x7c, 0x9b, 0x48, 0x04, 0x71, 0x2d, 0xd5, 0xcc, + 0x88, 0xe4, 0x6c, 0x43, 0xe0, 0xe8, 0xb3, 0x58, 0x1e, 0xa7, 0xaa, 0xd5, 0x65, 0xb3, 0x98, 0x82, + 0x8b, 0x60, 0x5b, 0x33, 0xfa, 0xde, 0x5b, 0xfc, 0x8f, 0xd9, 0x12, 0x47, 0x2e, 0x47, 0xd9, 0x30, + 0xc6, 0xd4, 0x0e, 0x1c, 0x8e, 0x49, 0xea, 0x42, 0xf9, 0xe4, 0x47, 0xcf, 0xe4, 0xf5, 0xc6, 0x82, + 0xfb, 0x8f, 0x08, 0x51, 0xa0, 0xc3, 0x12, 0x70, 0x4d, 0x5f, 0x43, 0xe7, 0x9b, 0xb3, 0x10, 0x3a, + 0x81, 0x59, 0x84, 0x4e, 0xa2, 0x56, 0x42, 0xa9, 0x29, 0xc3, 0x64, 0xbc, 0x76, 0x66, 0xcc, 0x06, + 0xb2, 0x2f, 0x81, 0x00, 0xe2, 0x37, 0x14, 0x41, 0x6f, 0xd1, 0x94, 0xdd, 0x11, 0x2b, 0x13, 0xd7, + 0xdc, 0xee, 0x22, 0x7f, 0x80, 0xb9, 0x24, 0xf9, 0x8b, 0x72, 0x27, 0xf4, 0xe0, 0xbe, 0xf4, 0xcf, + 0x5d, 0xf5, 0xc2, 0xa0, 0xfd, 0x31, 0x8a, 0xaf, 0xd0, 0x58, 0x2b, 0x55, 0xb8, 0x3b, 0x97, 0xd8, + 0xec, 0x11, 0xf8, 0xc6, 0x03, 0x67, 0xf0, 0x42, 0xe3, 0x61, 0xfe, 0xac, 0xe7, 0xd0, 0xd4, 0x00, + 0xc8, 0x0d, 0xf0, 0xbb, 0x7d, 0xd9, 0x12, 0x4a, 0x24, 0x2f, 0xe8, 0x44, 0x69, 0xf8, 0x96, 0xe5, + 0x82, 0xa5, 0xe9, 0x7c, 0x7e, 0x98, 0x09, 0xd3, 0x06, 0xfb, 0x67, 0xae, 0xee, 0xaa, 0xe1, 0xd1, + 0xc1, 0xfa, 0x93, 0xca, 0x71, 0x64, 0x8c, 0xde, 0x8b, 0x0d, 0xae, 0xa7, 0xdf, 0x79, 0x21, 0xc9, + 0xfd, 0xb8, 0x7f, 0xa2, 0x20, 0x6e, 0x30, 0x2e, 0x02, 0xb8, 0xa2, 0x68, 0xbb, 0x93, 0x9a, 0xe8, + 0x18, 0x98, 0x59, 0x40, 0x8a, 0xab, 0xb0, 0xb8, 0x88, 0xdb, 0x93, 0x94, 0xba, 0x04, 0x44, 0x87, + 0x8e, 0xe5, 0x43, 0xbb, 0xce, 0x90, 0xf1, 0x59, 0x0e, 0x72, 0xf3, 0x15, 0x77, 0x0c, 0x1e, 0xa2, + 0x06, 0x52, 0xd9, 0xdb, 0x79, 0x3d, 0x1d, 0x23, 0x52, 0xb5, 0x68, 0x03, 0x55, 0x18, 0xff, 0x2e, + 0x15, 0xc6, 0xc3, 0xf7, 0x67, 0x60, 0x9f, 0xac, 0x8c, 0x9e, 0x7f, 0xdc, 0xd3, 0x23, 0x1b, 0x5a, + 0x7b, 0xac, 0x66, 0xe1, 0x2f, 0xa3, 0x9f, 0xea, 0x51, 0xf4, 0x1e, 0x9b, 0xf6, 0x57, 0xb6, 0x72, + 0x4e, 0xc9, 0x0b, 0xdf, 0x8a, 0x3b, 0x7c, 0x01, 0x5e, 0x39, 0x7a, 0x41, 0x97, 0xbf, 0x6b, 0xb2, + 0xc0, 0x76, 0x2b, 0x00, 0xb6, 0x7a, 0x8d, 0x1a, 0x57, 0x27, 0x91, 0x42, 0x6a, 0x21, 0x8f, 0x5f, + 0x54, 0x0c, 0xc5, 0x9c, 0x02, 0xd6, 0x22, 0xf5, 0xf3, 0xf6, 0xdd, 0x4f, 0x1a, 0x63, 0x51, 0xef, + 0x6b, 0x4b, 0xa3, 0x14, 0x71, 0x13, 0x7d, 0x3c, 0xf8, 0x92, 0x9b, 0x7b, 0x8d, 0x78, 0x3f, 0x9d, + 0x75, 0xde, 0x58, 0xe8, 0xfc, 0xbd, 0x2d, 0x1f, 0xf1, 0x1b, 0xd9, 0x60, 0x32, 0x0b, 0x21, 0xf5, + 0x15, 0x56, 0xe4, 0xac, 0x46, 0xf1, 0xa7, 0x99, 0x8e, 0x71, 0xec, 0xff, 0xf2, 0x5f, 0x92, 0x4e, + 0xf3, 0x4a, 0xe5, 0xc5, 0x33, 0xfd, 0xaa, 0x10, 0xb5, 0x65, 0x84, 0xeb, 0x90, 0xa0, 0x67, 0xe2, + 0x68, 0x55, 0xdf, 0x18, 0x64, 0xd9, 0xa1, 0xee, 0x72, 0x92, 0x76, 0x35, 0x46, 0x7b, 0x3e, 0xb6, + 0x01, 0x7e, 0x43, 0x7f, 0x25, 0x86, 0x2f, 0x5b, 0x5c, 0x35, 0xf3, 0x4c, 0x03, 0x57, 0x09, 0xe4, + 0x33, 0x24, 0x6e, 0x34, 0x06, 0xf7, 0x9b, 0x20, 0x4e, 0x0c, 0xc9, 0x1a, 0x1a, 0x29, 0xa6, 0x27, + 0x79, 0x3c, 0xf3, 0x61, 0x79, 0x88, 0x63, 0xea, 0x4d, 0x67, 0xd2, 0xda, 0xed, 0xb7, 0x16, 0x82, + 0xae, 0x27, 0x87, 0x09, 0xe8, 0x7f, 0x7f, 0x73, 0x7b, 0x00, 0xba, 0xb7, 0x17, 0xa9, 0x59, 0xf6, + 0x0d, 0x50, 0x32, 0x35, 0x3f, 0xd5, 0xee, 0x79, 0xc9, 0xf1, 0xb9, 0x10, 0x3e, 0x9e, 0xfd, 0x77, + 0x15, 0x06, 0x40, 0x56, 0x92, 0x09, 0xc1, 0x96, 0xf4, 0x35, 0x2d, 0xc3, 0xa2, 0x77, 0x86, 0x1a, + 0x72, 0xe7, 0x8c, 0x97, 0xfb, 0xf3, 0x17, 0xeb, 0x1b, 0x8c, 0x15, 0x8b, 0xbf, 0xf0, 0xcf, 0x97, + 0x55, 0xa4, 0xaa, 0x04, 0x9b, 0x33, 0x63, 0x2b, 0xc0, 0x5a, 0x7b, 0x41, 0x1c, 0xe7, 0x9f, 0x8f, + 0x28, 0x36, 0x09, 0x9f, 0xf3, 0x6d, 0xbc, 0x83, 0x34, 0x07, 0x6e, 0x40, 0x89, 0x0e, 0xd6, 0x11, + 0x8a, 0x30, 0x19, 0x7c, 0xf5, 0x46, 0x37, 0xa8, 0xeb, 0x92, 0x39, 0x8b, 0xb2, 0x88, 0x44, 0xa2, + 0x31, 0x41, 0x9b, 0xa6, 0xa1, 0x62, 0x51, 0x9e, 0x4b, 0xd2, 0x74, 0x31, 0xa9, 0xd5, 0x6b, 0x21, + 0xf6, 0xdf, 0xa8, 0xf4, 0x67, 0x3d, 0x9d, 0x98, 0x57, 0x84, 0x8c, 0x50, 0x53, 0x66, 0xda, 0x87, + 0xe9, 0x9a, 0x12, 0x2a, 0x97, 0xc4, 0xbb, 0x1a, 0xc5, 0x8c, 0x09, 0x12, 0xee, 0x0a, 0xaf, 0x84, + 0x93, 0xf5, 0xda, 0xd4, 0x4c, 0x2b, 0xa1, 0x70, 0xf1, 0xec, 0x12, 0xff, 0xee, 0xcb, 0x26, 0x95, + 0xd9, 0x40, 0xbd, 0x79, 0xd8, 0xf7, 0x41, 0xa8, 0xed, 0x4b, 0xd2, 0xfe, 0x1e, 0xae, 0x94, 0x40, + 0xe1, 0x8d, 0xcd, 0x28, 0xe2, 0xd7, 0x11, 0x6e, 0x5d, 0xcc, 0xee, 0x21, 0xd1, 0x39, 0xf1, 0x54, + 0xc8, 0x05, 0xe1, 0xa8, 0x71, 0x5e, 0x2a, 0x50, 0x85, 0x18, 0x77, 0xc7, 0xf2, 0xf0, 0x7a, 0x58, + 0x6b, 0x43, 0x35, 0x24, 0x52, 0x1d, 0xfb, 0xe6, 0xb0, 0xbf, 0xb8, 0xeb, 0xb7, 0x9b, 0x91, 0x1e, + 0xd1, 0x57, 0x9f, 0x97, 0x0c, 0xc4, 0xcd, 0x9c, 0x63, 0x02, 0x8e, 0x8f, 0xe7, 0x30, 0x5e, 0x86, + 0x10, 0xb7, 0x1f, 0x36, 0xe2, 0xbb, 0x3b, 0x38, 0xb5, 0x1b, 0x79, 0x74, 0xbb, 0x50, 0x4e, 0x62, + 0xd5, 0x31, 0x5e, 0x35, 0x45, 0xf6, 0xee, 0x7a, 0xc2, 0x2c, 0x5b, 0xe2, 0xb3, 0x22, 0x26, 0x02, + 0x04, 0x38, 0x8a, 0xae, 0xd4, 0xb0, 0xe1, 0xaf, 0x3c, 0x8e, 0x5f, 0x18, 0x97, 0xd7, 0x72, 0xe4, + 0x7a, 0xfe, 0xaf, 0xeb, 0x8f, 0xf9, 0x6f, 0x03, 0x08, 0xd8, 0x7e, 0x11, 0xc0, 0x1e, 0xd5, 0xcf, + 0x55, 0xaf, 0x94, 0x73, 0xd5, 0x32, 0x17, 0xad, 0x2e, 0x20, 0x5c, 0x15, 0x8b, 0xed, 0xbf, 0x39, + 0x04, 0x99, 0xf4, 0x70, 0xda, 0x17, 0x8c, 0x24, 0xae, 0x4f, 0xa1, 0x96, 0x66, 0x0f, 0xdf, 0x0a, + 0x82, 0x8c, 0x3f, 0x67, 0xb5, 0xe4, 0x8f, 0x7d, 0xa6, 0x7c, 0x7d, 0xa2, 0xb9, 0x4e, 0xd1, 0xb3, + 0xd2, 0x11, 0x27, 0x5f, 0x50, 0x44, 0x65, 0x9d, 0x1f, 0x1f, 0x67, 0x61, 0x0e, 0xbe, 0x11, 0xf9, + 0x0c, 0xc1, 0xe8, 0xf1, 0xac, 0xf4, 0x75, 0xfc, 0x72, 0x12, 0x55, 0x50, 0xef, 0x8a, 0xc6, 0x61, + 0x11, 0x36, 0x6a, 0x27, 0xe8, 0xfc, 0x64, 0xe9, 0x08, 0x26, 0x89, 0x6a, 0x2f, 0x95, 0x0b, 0x3d, + 0x9b, 0x8b, 0xa4, 0xbc, 0xd3, 0x6f, 0xa4, 0x22, 0x01, 0x6d, 0xf8, 0x3c, 0x52, 0x80, 0xd3, 0xa6, + 0x7d, 0x45, 0xfc, 0x7d, 0x3f, 0xd1, 0x3c, 0x0b, 0xb8, 0x95, 0xf9, 0x88, 0x0b, 0xe4, 0x8f, 0xd5, + 0x2f, 0x2f, 0x00, 0x77, 0x44, 0xe1, 0x1b, 0xf6, 0x8d, 0xc8, 0x17, 0xdf, 0xde, 0xa3, 0x56, 0xfd, + 0x2f, 0xee, 0x0f, 0x60, 0x77, 0x87, 0x2a, 0x2e, 0x6a, 0xa5, 0x08, 0x1b, 0x19, 0xab, 0xb0, 0xaf, + 0xa4, 0x47, 0x94, 0x7f, 0x19, 0x2d, 0xe4, 0xa2, 0x4b, 0xdf, 0x09, 0x32, 0x56, 0xb0, 0x69, 0x50, + 0x9a, 0x45, 0xa3, 0x3b, 0x57, 0x3f, 0xef, 0xf7, 0xbd, 0x7f, 0xc4, 0xca, 0x75, 0xca, 0x13, 0x62, + 0x60, 0x9b, 0x6d, 0xa2, 0xde, 0xf3, 0x0d, 0x85, 0xc5, 0x1d, 0xa8, 0x2b, 0xd9, 0x8f, 0xe2, 0x96, + 0x64, 0x0d, 0x2b, 0x5c, 0x66, 0x9d, 0x01, 0xd5, 0xe3, 0xd0, 0x6a, 0x72, 0xc0, 0x29, 0x6b, 0xb7, + 0x14, 0xab, 0xf5, 0x55, 0xa6, 0x1a, 0xe0, 0xfd, 0x28, 0xd7, 0xdf, 0x83, 0x03, 0x41, 0x67, 0x6f, + 0x00, 0xbf, 0x52, 0xc9, 0x9a, 0x81, 0x2a, 0xe5, 0xbe, 0x1e, 0x4f, 0xc4, 0xac, 0x4e, 0xb8, 0x68, + 0x82, 0x63, 0x3b, 0x91, 0x25, 0x23, 0xc3, 0x18, 0x79, 0x22, 0x4f, 0xcc, 0xe8, 0xd1, 0xc3, 0x63, + 0x9d, 0xa0, 0x27, 0xf7, 0xa5, 0xba, 0x64, 0xc4, 0x62, 0x7c, 0x89, 0xdb, 0x15, 0x9d, 0x95, 0xd5, + 0x87, 0x5a, 0x7c, 0xbe, 0x55, 0x6e, 0xf3, 0xc7, 0xe3, 0xdc, 0xa6, 0x50, 0x68, 0xbd, 0x93, 0x81, + 0x23, 0xbb, 0x30, 0xc5, 0xf9, 0xf2, 0x6a, 0x6d, 0x38, 0xa6, 0x0b, 0x37, 0xeb, 0x77, 0xe2, 0x46, + 0xdd, 0xcd, 0x38, 0x60, 0xa9, 0xa3, 0x7a, 0x0a, 0x1c, 0x1d, 0xa4, 0x94, 0xf6, 0xcb, 0x6d, 0xba, + 0x34, 0xff, 0x4e, 0xd4, 0xaa, 0x2e, 0x4c, 0xcc, 0x65, 0x74, 0x33, 0x0f, 0x73, 0xd4, 0xf3, 0x55, + 0x5a, 0xad, 0x7e, 0xc7, 0x57, 0x3c, 0x73, 0x5b, 0x39, 0x50, 0x6f, 0x49, 0xf1, 0x33, 0x0f, 0xfb, + 0x84, 0xc2, 0xc5, 0xdc, 0x07, 0x16, 0xa7, 0x70, 0x8c, 0x37, 0x4e, 0x1f, 0xf9, 0x75, 0x9b, 0xa5, + 0xd5, 0x33, 0x89, 0xb5, 0x05, 0xf2, 0x70, 0xee, 0x09, 0xc6, 0xb5, 0xf4, 0xf5, 0x26, 0x72, 0x4d, + 0x79, 0x52, 0x67, 0xe9, 0x73, 0xce, 0xe1, 0x1e, 0xc1, 0xef, 0x5d, 0x31, 0x8f, 0xd7, 0xe9, 0xae, + 0x8c, 0xe9, 0x7f, 0x43, 0x4f, 0x76, 0xac, 0xe5, 0x6e, 0x5d, 0x56, 0x8f, 0x23, 0x9b, 0x67, 0x2a, + 0xfb, 0x2a, 0x6e, 0x99, 0x1e, 0xc1, 0x54, 0x93, 0x57, 0xb6, 0xd7, 0xd7, 0x68, 0x18, 0xfa, 0x4d, + 0xe5, 0xc2, 0xfd, 0xe2, 0xe6, 0x70, 0xf7, 0xb5, 0xc6, 0x2f, 0x6b, 0x15, 0xdf, 0xad, 0x82, 0x3b, + 0x45, 0x13, 0xb7, 0x21, 0x6e, 0x8f, 0xf4, 0x24, 0x4b, 0x2d, 0x50, 0xc2, 0x9b, 0x99, 0x71, 0x8c, + 0xe0, 0x45, 0x4c, 0x61, 0x2a, 0x0d, 0xf9, 0xa5, 0x91, 0x26, 0x43, 0x8b, 0x61, 0x3a, 0xcc, 0x9f, + 0xdb, 0x41, 0xb8, 0xdf, 0x8e, 0xbc, 0x8b, 0xc2, 0x09, 0xa5, 0xc8, 0x7f, 0x28, 0xdc, 0x25, 0x04, + 0x15, 0xff, 0xa1, 0xa5, 0x73, 0xa4, 0x65, 0x0a, 0xe9, 0xf0, 0x10, 0xad, 0xcd, 0x0c, 0x04, 0x8a, + 0x63, 0xf7, 0xff, 0x72, 0xa7, 0x99, 0x09, 0xf0, 0x71, 0x96, 0x6f, 0x84, 0x8c, 0x23, 0xc4, 0x84, + 0xdc, 0x26, 0x4b, 0xd8, 0x30, 0x04, 0x07, 0xfa, 0x0c, 0x4a, 0x7d, 0xeb, 0x24, 0x42, 0xc2, 0x9d, + 0x47, 0x2f, 0x44, 0xc8, 0x4e, 0xcc, 0x31, 0x2a, 0x9c, 0x6e, 0x71, 0xae, 0x83, 0x1d, 0x08, 0x75, + 0x5f, 0x0c, 0xc1, 0x8e, 0xb3, 0xc1, 0x72, 0x3c, 0x81, 0xe3, 0x6e, 0x92, 0xe4, 0x83, 0xc7, 0xd9, + 0xf7, 0xdb, 0x5b, 0x30, 0xa8, 0x6c, 0xc8, 0x55, 0xb8, 0x4e, 0xf5, 0x82, 0x81, 0xbd, 0x54, 0x01, + 0x3a, 0x3f, 0x27, 0x61, 0x23, 0xdf, 0xbd, 0x57, 0x84, 0x50, 0x81, 0x5f, 0xcc, 0x49, 0x65, 0xbb, + 0x36, 0xba, 0x69, 0x49, 0x60, 0x6f, 0x62, 0xae, 0x73, 0x1e, 0xb9, 0xbc, 0x7c, 0x24, 0xda, 0x89, + 0x49, 0x0f, 0xd0, 0x3d, 0x34, 0x71, 0x25, 0xa1, 0x5a, 0xe0, 0x7d, 0x97, 0x04, 0xf1, 0xc1, 0xda, + 0xd1, 0xb4, 0x53, 0x5f, 0xb0, 0x6a, 0x5b, 0x92, 0x39, 0x9b, 0x14, 0x40, 0xcc, 0x7a, 0xb7, 0xa6, + 0x2d, 0xc5, 0x52, 0x91, 0x4b, 0x66, 0xbd, 0x56, 0x68, 0x76, 0xed, 0x90, 0xbf, 0x6c, 0x2d, 0xa0, + 0x5b, 0x77, 0xe5, 0x49, 0xb5, 0xa3, 0x66, 0x75, 0x47, 0xa4, 0x72, 0xdd, 0x0a, 0x39, 0xee, 0x0c, + 0x62, 0x13, 0xbf, 0xfd, 0xe8, 0x5d, 0xe8, 0x69, 0x44, 0xb1, 0xc8, 0xe0, 0xf2, 0xe6, 0x25, 0xb7, + 0x5e, 0x8d, 0xd1, 0x03, 0xdc, 0x34, 0x0f, 0xbc, 0x79, 0xc4, 0x69, 0xda, 0x8a, 0xd1, 0x57, 0xb7, + 0x6c, 0x47, 0xd5, 0xfe, 0x78, 0x69, 0x8d, 0x81, 0xf9, 0x2a, 0xf9, 0x17, 0x65, 0x8c, 0xf4, 0xca, + 0xb2, 0x2b, 0x68, 0x21, 0xd8, 0x51, 0x78, 0x6f, 0x1f, 0x91, 0x77, 0xc8, 0x40, 0x09, 0x16, 0xe0, + 0x55, 0x77, 0xe5, 0x6d, 0x4c, 0x31, 0x1c, 0x18, 0x5f, 0xe2, 0x2a, 0xef, 0x29, 0x4e, 0x5d, 0x0e, + 0xe9, 0xdd, 0x6b, 0x2f, 0x0f, 0x43, 0x94, 0x55, 0x13, 0xf9, 0x04, 0x11, 0x03, 0x02, 0x8f, 0xac, + 0x11, 0x55, 0x4e, 0x32, 0xa4, 0xd4, 0xf4, 0x68, 0x39, 0x81, 0x4b, 0x3f, 0x2a, 0xbd, 0x8a, 0x1b, + 0x62, 0x53, 0xa4, 0xa5, 0xb8, 0xa2, 0x1b, 0x48, 0x69, 0xdf, 0xbe, 0x8b, 0xc8, 0xd1, 0x3f, 0x97, + 0x65, 0x3f, 0x5b, 0xd9, 0xe8, 0xc8, 0xbd, 0x2d, 0x92, 0xe8, 0xa3, 0xf8, 0x22, 0x43, 0x18, 0x3e, + 0x6a, 0xf1, 0xd0, 0x06, 0x67, 0x01, 0x1f, 0x62, 0xad, 0x4f, 0x4f, 0x93, 0x58, 0xca, 0x60, 0x83, + 0xa4, 0x7f, 0x16, 0x49, 0x25, 0x65, 0xe5, 0x93, 0xfa, 0x60, 0x7f, 0x49, 0xb1, 0x6e, 0xd1, 0x74, + 0x50, 0x16, 0xa1, 0x53, 0xa1, 0xc6, 0x50, 0xa9, 0x5a, 0x38, 0x20, 0x8f, 0x28, 0xb9, 0x3b, 0x5a, + 0x26, 0xf9, 0x58, 0x4d, 0xf2, 0x3c, 0xc3, 0x59, 0x65, 0xc8, 0x67, 0xbc, 0x53, 0x95, 0x84, 0xd8, + 0x32, 0x6a, 0x44, 0xb7, 0x7b, 0x27, 0xc1, 0x23, 0xb4, 0x66, 0x5c, 0x5b, 0xd6, 0xfa, 0x35, 0xfe, + 0xc9, 0x0b, 0x8b, 0xb1, 0xcb, 0x55, 0x46, 0xd8, 0x55, 0xb2, 0x80, 0xfc, 0xfa, 0x70, 0xf8, 0xf5, + 0x34, 0xae, 0x46, 0x2b, 0x95, 0xa6, 0xd0, 0x6a, 0xd2, 0xa6, 0x1d, 0x5e, 0x67, 0xce, 0x60, 0x22, + 0x34, 0x19, 0xf2, 0x67, 0x67, 0xde, 0xc4, 0xf6, 0x78, 0x78, 0x3e, 0xf1, 0x92, 0xe2, 0x51, 0x40, + 0x59, 0x18, 0xae, 0x75, 0x0b, 0x64, 0x57, 0x0f, 0x1a, 0x7a, 0x6c, 0xe4, 0xe3, 0x22, 0xdb, 0x72, + 0x17, 0x26, 0xc2, 0x5c, 0x1a, 0xa5, 0x77, 0x2c, 0xb0, 0x13, 0x11, 0xa4, 0x9a, 0x43, 0xfc, 0x54, + 0x26, 0x5b, 0x21, 0xfe, 0xe3, 0x99, 0x29, 0x9c, 0x98, 0x52, 0x88, 0x1a, 0x82, 0x4a, 0x9f, 0x2d, + 0xd1, 0x90, 0x43, 0x62, 0xe8, 0x26, 0x54, 0x87, 0xa3, 0x5c, 0x01, 0x30, 0x2a, 0x13, 0x97, 0xd3, + 0x5a, 0xe7, 0xcc, 0xcb, 0x17, 0x56, 0x49, 0x1c, 0x3f, 0x13, 0x04, 0x81, 0xb2, 0xce, 0x33, 0x88, + 0x81, 0xcf, 0x3a, 0xba, 0xaf, 0x68, 0xe7, 0xa4, 0x8e, 0x26, 0x16, 0xda, 0x51, 0xb4, 0x1a, 0xcf, + 0x59, 0x69, 0x54, 0xed, 0xb1, 0xf2, 0xe4, 0x78, 0xac, 0x6b, 0x25, 0x31, 0x8b, 0xb3, 0x5f, 0x1f, + 0xbf, 0x2e, 0xdd, 0xb5, 0xa5, 0xd7, 0x2f, 0x99, 0xb2, 0x5a, 0xad, 0x28, 0x1f, 0x61, 0xa1, 0xe8, + 0x1e, 0xbe, 0x5b, 0x97, 0xef, 0x79, 0x17, 0xe8, 0xd6, 0xe1, 0x46, 0xbb, 0x06, 0x4a, 0x7e, 0xe5, + 0xda, 0xc9, 0x97, 0x30, 0x7a, 0xe4, 0x1d, 0xb2, 0xb2, 0xb7, 0x58, 0x14, 0xe6, 0x96, 0x89, 0x9d, + 0x49, 0x45, 0x96, 0xb5, 0x47, 0x3a, 0x76, 0xac, 0x58, 0x91, 0xc2, 0xde, 0xfa, 0xb5, 0x99, 0xf2, + 0x53, 0xc5, 0x15, 0x34, 0x38, 0x93, 0x55, 0x78, 0x2c, 0x20, 0x36, 0x1c, 0xc7, 0x2c, 0xec, 0xfd, + 0x68, 0x92, 0xde, 0x3e, 0x37, 0x87, 0xf9, 0x3a, 0x37, 0xa6, 0x95, 0x6b, 0xe2, 0x1e, 0x85, 0xb6, + 0xb9, 0x70, 0x53, 0x28, 0xa6, 0xc0, 0x7c, 0x54, 0xfa, 0xeb, 0x78, 0x3a, 0x51, 0x7b, 0x2d, 0xf6, + 0x09, 0x6f, 0xa4, 0x34, 0x72, 0x65, 0x5b, 0xa5, 0x40, 0x2b, 0x58, 0x69, 0x78, 0x80, 0xc9, 0xc1, + 0xdc, 0x68, 0x56, 0xe6, 0x70, 0x3f, 0x5e, 0x0d, 0x37, 0xa2, 0x45, 0x67, 0xa5, 0x82, 0x3c, 0xd0, + 0x56, 0x98, 0x2f, 0x47, 0xfc, 0x9b, 0x81, 0x67, 0xa4, 0x7e, 0x51, 0xe9, 0xac, 0xfd, 0x31, 0xf1, + 0xde, 0x95, 0x69, 0xea, 0x94, 0xb5, 0xe7, 0x2c, 0xe1, 0x91, 0xa6, 0xac, 0x24, 0x4b, 0xbd, 0x6a, + 0xbc, 0x55, 0x75, 0x68, 0x29, 0x44, 0x5f, 0x44, 0x92, 0x0d, 0xcc, 0xd9, 0x3a, 0x9b, 0xef, 0x14, + 0xa9, 0xf6, 0x3e, 0x0c, 0xcc, 0x6d, 0xd3, 0x96, 0x5d, 0xa3, 0x2f, 0x7b, 0x9d, 0x26, 0x52, 0x62, + 0x44, 0xbd, 0xca, 0x08, 0x1d, 0xfa, 0x63, 0x95, 0x2c, 0xd8, 0x95, 0x5e, 0xf7, 0xab, 0xab, 0x2d, + 0x86, 0xa0, 0xda, 0x37, 0xcb, 0x62, 0x45, 0xd2, 0x14, 0x45, 0x29, 0x3d, 0x0e, 0xbe, 0xfe, 0xf4, + 0x24, 0xe6, 0x6c, 0xc9, 0xbe, 0x67, 0x3d, 0x9b, 0xc6, 0xff, 0xad, 0x38, 0xfe, 0xcc, 0x6b, 0xf6, + 0x0a, 0xad, 0x1b, 0xf6, 0xc9, 0x3c, 0x22, 0x5d, 0x70, 0x8a, 0x0a, 0xf9, 0x14, 0xea, 0xd4, 0xe6, + 0x26, 0xa5, 0xe5, 0xdd, 0xa4, 0xfc, 0xe0, 0xe1, 0x3e, 0xdb, 0xd9, 0x09, 0x7e, 0x9e, 0x23, 0xbf, + 0x75, 0x10, 0xf7, 0x8a, 0xd4, 0x69, 0xa8, 0x85, 0x71, 0x25, 0xea, 0xbe, 0xb7, 0xf5, 0x05, 0xad, + 0x4f, 0xec, 0xa5, 0x31, 0xba, 0xf6, 0x7a, 0x59, 0x4c, 0xdb, 0x33, 0xd2, 0x4f, 0x90, 0x95, 0xf4, + 0x7d, 0xa7, 0xa1, 0x94, 0x1c, 0x13, 0xc6, 0x9f, 0x0b, 0x9f, 0x21, 0x95, 0x7e, 0x69, 0x83, 0x67, + 0xfd, 0x0f, 0xab, 0x36, 0xd1, 0xa5, 0x67, 0xe6, 0xd5, 0xe9, 0xae, 0x86, 0x4d, 0x3c, 0x95, 0x3c, + 0x9e, 0xec, 0x34, 0x02, 0xa7, 0x3f, 0xf4, 0x4f, 0xd9, 0xa2, 0x7c, 0x8b, 0x15, 0x1c, 0xd1, 0x1e, + 0x5b, 0x23, 0x60, 0xed, 0xfb, 0x05, 0x1b, 0x58, 0xe6, 0x7f, 0x2d, 0xa4, 0xc0, 0x6e, 0x99, 0xa9, + 0x18, 0x8d, 0x25, 0x2f, 0x29, 0x58, 0x68, 0x5c, 0x97, 0x20, 0x3d, 0x22, 0xa7, 0x9b, 0xa1, 0xa1, + 0x78, 0x7d, 0x14, 0x94, 0x1c, 0x65, 0x2c, 0x4d, 0x04, 0x19, 0x16, 0xe6, 0xcd, 0x4f, 0xe7, 0xec, + 0x2a, 0x0a, 0x30, 0xcf, 0x21, 0xbd, 0xb3, 0x0e, 0xb3, 0xca, 0x2f, 0xdf, 0x30, 0x3a, 0xf4, 0xaa, + 0xf5, 0xb6, 0x70, 0x97, 0xd9, 0xce, 0xd3, 0xd6, 0xed, 0x4d, 0xd4, 0xb2, 0x70, 0xb8, 0x06, 0x1f, + 0xcb, 0x7a, 0xb3, 0x19, 0x6f, 0x17, 0xc4, 0x0b, 0x8a, 0x40, 0x13, 0x8d, 0xdb, 0x7a, 0x8d, 0x11, + 0x22, 0x46, 0x54, 0xcc, 0x65, 0xeb, 0x88, 0x9b, 0xba, 0xce, 0x70, 0x58, 0x72, 0x83, 0x55, 0x46, + 0xd0, 0xc2, 0x9b, 0xa0, 0x9c, 0x08, 0x0c, 0x22, 0x54, 0xff, 0xc7, 0x29, 0xdb, 0x2f, 0x24, 0x6b, + 0x23, 0x6f, 0xfc, 0xae, 0x2e, 0x33, 0xdd, 0xe0, 0x74, 0x76, 0xf3, 0x56, 0xdd, 0xac, 0x9f, 0x82, + 0xde, 0x00, 0x21, 0x86, 0x50, 0x31, 0xbf, 0x4c, 0x8b, 0x6f, 0x88, 0x72, 0xb1, 0xd0, 0xf4, 0x9a, + 0x92, 0x08, 0x92, 0x76, 0x2a, 0xd6, 0x2a, 0x0f, 0x89, 0xc3, 0xfe, 0x93, 0x3c, 0x86, 0x8e, 0xb8, + 0x92, 0x0b, 0xeb, 0xb0, 0x56, 0x51, 0xc0, 0xcf, 0x9b, 0xe0, 0xa9, 0x57, 0xcb, 0xe6, 0x9d, 0x74, + 0x51, 0xb2, 0x87, 0x41, 0xab, 0x5c, 0x44, 0x80, 0x8f, 0xb4, 0x6c, 0x81, 0xab, 0x2f, 0x76, 0x95, + 0x3d, 0x6c, 0xd8, 0x5f, 0x09, 0x65, 0x2c, 0xd1, 0x24, 0xc8, 0x5f, 0x26, 0x34, 0x0b, 0x7e, 0x70, + 0x3c, 0xe3, 0xbd, 0xde, 0x4f, 0xdf, 0xae, 0x11, 0x1c, 0x28, 0xb0, 0x12, 0x0a, 0x55, 0x8e, 0x29, + 0x25, 0xe1, 0xd2, 0xf2, 0x2e, 0xe3, 0xfd, 0x58, 0x26, 0x32, 0x33, 0xe1, 0xee, 0x05, 0x75, 0x76, + 0xa0, 0xc0, 0x69, 0x19, 0x9a, 0x32, 0x39, 0x3e, 0x19, 0x95, 0xd4, 0xe6, 0x28, 0x82, 0xe3, 0xd7, + 0x90, 0x00, 0x2e, 0x1f, 0xf0, 0x7e, 0x0c, 0x58, 0xdd, 0xd1, 0xac, 0x92, 0xc8, 0xf9, 0xfd, 0x36, + 0xbe, 0x3e, 0xd2, 0x4d, 0x1b, 0x6d, 0x93, 0xaa, 0x7e, 0xc6, 0xfd, 0x61, 0x8d, 0x0d, 0xae, 0xbb, + 0x6d, 0xc7, 0x6f, 0xca, 0xbd, 0xa3, 0x51, 0x8b, 0x99, 0x67, 0x5c, 0x23, 0x7d, 0x75, 0x2d, 0x59, + 0x64, 0x2a, 0xf2, 0x96, 0xb7, 0xc7, 0x9a, 0x1d, 0x6a, 0x30, 0x51, 0xb7, 0x47, 0xb3, 0x59, 0xa6, + 0xf3, 0x81, 0xba, 0x1e, 0x5a, 0xf6, 0x7d, 0xd9, 0x75, 0xfc, 0x0c, 0x12, 0x77, 0x6f, 0xc6, 0x3c, + 0x75, 0xaa, 0xda, 0x2b, 0xc2, 0xbc, 0x7e, 0x20, 0xa2, 0x90, 0xd9, 0x2d, 0xc1, 0xda, 0x6b, 0x39, + 0x23, 0x90, 0x92, 0x6f, 0x07, 0x7c, 0xe8, 0x0b, 0x0e, 0xcd, 0x63, 0xb8, 0x50, 0x1a, 0x95, 0xd9, + 0x9b, 0x12, 0xa0, 0x50, 0xbe, 0x0d, 0x17, 0x5f, 0x3f, 0x0d, 0x53, 0xc0, 0x68, 0xca, 0x18, 0xf8, + 0x86, 0x6e, 0xfa, 0xd8, 0x4d, 0x41, 0x25, 0x3a, 0x5c, 0x1b, 0xf6, 0x79, 0x21, 0xc3, 0x5c, 0xb5, + 0x69, 0xbd, 0x96, 0x49, 0x67, 0xa4, 0xa2, 0x71, 0xf1, 0xa1, 0x9b, 0xfd, 0x17, 0x8c, 0x94, 0x51, + 0x7a, 0x91, 0x3b, 0x78, 0x2b, 0x7f, 0x7f, 0x68, 0x37, 0x43, 0xfc, 0xe6, 0x71, 0xa6, 0x47, 0x49, + 0x63, 0x64, 0xd5, 0x66, 0x79, 0x34, 0xac, 0xc6, 0xe4, 0x0d, 0xd5, 0xb9, 0x3f, 0x22, 0xf7, 0xc6, + 0x79, 0x5d, 0x3c, 0xb6, 0xef, 0x26, 0x43, 0x6c, 0x1b, 0xbd, 0x22, 0x54, 0x64, 0xe6, 0xc6, 0xdb, + 0xd7, 0x05, 0xef, 0xf1, 0x84, 0xde, 0xb3, 0x8b, 0x29, 0xbe, 0xd1, 0x0d, 0xbc, 0x8f, 0x66, 0xfa, + 0x6e, 0x4e, 0xc3, 0x7b, 0xd5, 0x59, 0xa2, 0x3e, 0xbc, 0xa7, 0x9c, 0xfe, 0x15, 0x2f, 0x32, 0x1b, + 0x30, 0x0d, 0xc2, 0x76, 0xb4, 0x31, 0x37, 0x0b, 0x42, 0x38, 0xbc, 0xd0, 0x4a, 0x6d, 0xbd, 0x9f, + 0xcc, 0xac, 0xc1, 0x0e, 0x17, 0xa4, 0xbe, 0x17, 0xb1, 0x92, 0x6f, 0xe6, 0x6f, 0xa7, 0x6b, 0x92, + 0x33, 0xef, 0x67, 0x5b, 0x51, 0x1c, 0xfc, 0x92, 0xd5, 0x94, 0x69, 0x1f, 0x26, 0x81, 0x3b, 0x7f, + 0x24, 0x96, 0x77, 0x65, 0xe8, 0xa4, 0x74, 0xaf, 0x18, 0xd7, 0x2b, 0x32, 0xea, 0xb1, 0xbb, 0x6b, + 0x3f, 0x4d, 0x1a, 0x2b, 0x50, 0x6d, 0x95, 0x43, 0x85, 0xab, 0xdc, 0xb7, 0x9e, 0x76, 0xe2, 0xe9, + 0xd8, 0x08, 0x8f, 0x7b, 0xee, 0x5e, 0xc3, 0xd9, 0x34, 0x3a, 0x9a, 0xe5, 0xdf, 0xab, 0x22, 0x32, + 0xe2, 0x8c, 0xfb, 0x37, 0x87, 0xe3, 0x9c, 0x32, 0xf0, 0xe6, 0x9e, 0x1a, 0xf7, 0xbe, 0xc2, 0xf5, + 0x4d, 0xc8, 0x49, 0x6f, 0x61, 0xb9, 0x15, 0x93, 0xb4, 0x6f, 0x9c, 0xf9, 0xf4, 0xec, 0xce, 0xa9, + 0xe1, 0x42, 0x6c, 0x9c, 0x34, 0x0a, 0x83, 0x1e, 0xca, 0x4d, 0x88, 0x1c, 0x41, 0x93, 0x6d, 0xff, + 0xd6, 0xff, 0x43, 0xd1, 0x1d, 0xa5, 0xc2, 0x69, 0x3d, 0x6b, 0x35, 0x9e, 0xa3, 0x2d, 0xec, 0x3d, + 0x97, 0x6e, 0x87, 0x25, 0x67, 0xc7, 0xab, 0x5a, 0x87, 0xb0, 0x70, 0x04, 0x39, 0x2e, 0x97, 0xf4, + 0xf5, 0x1d, 0x04, 0xed, 0x31, 0x25, 0x19, 0xa6, 0x21, 0xaf, 0xc2, 0x04, 0xba, 0x29, 0x95, 0x02, + 0x36, 0x4a, 0x09, 0x64, 0xa9, 0x6e, 0x29, 0x56, 0x14, 0x32, 0x12, 0xe8, 0x9f, 0x12, 0x92, 0x67, + 0x65, 0x65, 0x90, 0x42, 0x16, 0x9c, 0x0a, 0x4b, 0x97, 0x17, 0x63, 0x05, 0xe2, 0x93, 0xca, 0x9e, + 0xff, 0xd7, 0x7b, 0x73, 0x43, 0x00, 0xd9, 0x76, 0x56, 0x17, 0x7b, 0x92, 0x8a, 0xab, 0x58, 0x21, + 0x4d, 0x59, 0x48, 0xf3, 0x62, 0x89, 0x80, 0xab, 0x7f, 0x30, 0xcb, 0x84, 0xf8, 0xf4, 0x64, 0xc9, + 0x8d, 0x20, 0x9b, 0xfb, 0x15, 0xae, 0x7b, 0xb2, 0x44, 0x18, 0x80, 0xfe, 0x34, 0x39, 0x35, 0xe2, + 0xa5, 0x0e, 0xa9, 0x58, 0xf7, 0xe5, 0x5b, 0xee, 0xe4, 0x3b, 0x49, 0x13, 0xda, 0xef, 0xeb, 0x79, + 0xea, 0x97, 0xf2, 0x3f, 0xcd, 0x25, 0x5a, 0x73, 0x85, 0xd4, 0xb6, 0x1d, 0x24, 0x03, 0x49, 0x82, + 0x3b, 0x04, 0xa2, 0x0b, 0x56, 0x5d, 0x6e, 0x41, 0x8d, 0xb6, 0x89, 0xe6, 0x88, 0xa9, 0xdb, 0x85, + 0x22, 0x8e, 0xf4, 0x3f, 0xd4, 0x59, 0xf1, 0xc0, 0x4b, 0x47, 0xa6, 0x8c, 0xa2, 0xb7, 0x23, 0x76, + 0xfc, 0x82, 0x03, 0x3c, 0x3e, 0x76, 0x46, 0xa5, 0xbb, 0x4a, 0x94, 0xb5, 0x0e, 0xe5, 0xad, 0xad, + 0xe4, 0x0c, 0x06, 0x73, 0x29, 0xc6, 0x63, 0x53, 0x45, 0x05, 0xc3, 0x95, 0x37, 0x4a, 0xf7, 0x1e, + 0xfa, 0x5d, 0x63, 0x39, 0x4b, 0x91, 0x9c, 0xb8, 0xb9, 0xcc, 0x38, 0x3b, 0xa1, 0xae, 0x43, 0x5d, + 0x78, 0x64, 0xea, 0xbf, 0xe5, 0xb9, 0xce, 0xc7, 0xb4, 0xdc, 0x97, 0x0a, 0xe4, 0x10, 0x13, 0xf9, + 0xfc, 0x56, 0x11, 0x05, 0x85, 0x7b, 0xde, 0xd2, 0x8c, 0xaf, 0xd7, 0x38, 0x4f, 0xbf, 0x7e, 0xfe, + 0x53, 0x08, 0xa7, 0x48, 0xf1, 0xcf, 0x0d, 0x55, 0x65, 0x3a, 0x42, 0xfe, 0x1e, 0x97, 0x3f, 0xfc, + 0xf4, 0xf6, 0x78, 0xfa, 0x7b, 0xc8, 0x22, 0x0b, 0xf3, 0x46, 0x50, 0x71, 0xe8, 0x07, 0x71, 0x34, + 0x66, 0xf0, 0x54, 0xed, 0x58, 0x40, 0x1b, 0x84, 0xe8, 0x42, 0x17, 0x27, 0xd6, 0xa4, 0x04, 0xcb, + 0xe0, 0x3c, 0x96, 0x6e, 0x49, 0xa3, 0x13, 0x58, 0x4d, 0x75, 0x73, 0x38, 0x1c, 0x6a, 0x31, 0xdd, + 0x0f, 0x0d, 0xb4, 0x6a, 0x50, 0x8b, 0xcf, 0x10, 0x75, 0x2f, 0x83, 0x64, 0x99, 0xb7, 0x8f, 0x2e, + 0x08, 0xf2, 0xe9, 0xdf, 0xe3, 0x8c, 0x5e, 0x5d, 0x83, 0x4d, 0xea, 0xbe, 0xa5, 0x3e, 0xdb, 0xbf, + 0xd9, 0xaf, 0x59, 0x79, 0xfe, 0x64, 0xd3, 0x6e, 0xe7, 0x23, 0xda, 0x0a, 0x27, 0xfd, 0x3a, 0xb5, + 0x10, 0xe1, 0xf5, 0x7e, 0xa6, 0x54, 0x55, 0xd3, 0x87, 0x20, 0xf3, 0xdc, 0xa5, 0x17, 0x1c, 0x0d, + 0xc2, 0x8b, 0x83, 0x79, 0xfb, 0x03, 0xa6, 0xea, 0xf1, 0x84, 0xb3, 0xe1, 0x73, 0xb4, 0x85, 0x1b, + 0xad, 0x48, 0x62, 0x1d, 0xa9, 0xc3, 0x6f, 0x35, 0x9b, 0x79, 0x32, 0x2a, 0x42, 0xa9, 0xd1, 0xbb, + 0x97, 0x87, 0x6e, 0x1f, 0xcc, 0xe5, 0x8c, 0xa2, 0x9c, 0x90, 0x37, 0x43, 0x11, 0x4c, 0xa6, 0xec, + 0x01, 0xc2, 0xa2, 0xe5, 0xe4, 0x36, 0x7d, 0xe3, 0x06, 0xa7, 0x37, 0xfe, 0x24, 0xb9, 0x97, 0x03, + 0x15, 0xbe, 0x3e, 0xb7, 0xe7, 0x46, 0x14, 0x6f, 0xb4, 0xfb, 0x31, 0x82, 0xcb, 0x97, 0x8e, 0xbc, + 0xf1, 0xd1, 0x7a, 0x0e, 0xa9, 0xcf, 0x56, 0xa4, 0x02, 0xba, 0x3f, 0x57, 0xf9, 0xe8, 0x0e, 0xcb, + 0x7d, 0xea, 0x2f, 0x20, 0xf1, 0x3e, 0xbb, 0x3d, 0x9d, 0x49, 0x4e, 0xd4, 0x12, 0x7a, 0xb1, 0xac, + 0x43, 0xd9, 0x17, 0xd9, 0xd7, 0xa4, 0x5f, 0x1b, 0x87, 0x8c, 0xfb, 0xf8, 0x21, 0x67, 0x37, 0x10, + 0xbc, 0xc8, 0xcd, 0x43, 0x30, 0x4b, 0xa8, 0x10, 0xec, 0x74, 0x86, 0x5b, 0x61, 0x98, 0x40, 0x4b, + 0x51, 0x61, 0x0f, 0x07, 0x27, 0x15, 0x5d, 0x18, 0x17, 0xcd, 0xcd, 0x52, 0x7a, 0xa8, 0x08, 0x24, + 0x65, 0x68, 0x8c, 0x77, 0xea, 0x2b, 0x74, 0x5b, 0xf8, 0x79, 0x5e, 0xb0, 0xaf, 0x2e, 0x7a, 0x46, + 0x43, 0x8e, 0x8f, 0x2d, 0xff, 0x7b, 0x3f, 0x35, 0x3f, 0xb1, 0xba, 0xcc, 0x97, 0x9f, 0x4b, 0x35, + 0x08, 0x87, 0x9d, 0x66, 0xfa, 0xb5, 0xf2, 0x05, 0xa9, 0x44, 0xe1, 0x79, 0x28, 0xd6, 0x84, 0x85, + 0x6f, 0xb5, 0x29, 0xf9, 0xb3, 0x85, 0xcb, 0x73, 0x3b, 0xae, 0x04, 0xc5, 0xe2, 0xfa, 0x5e, 0xae, + 0x42, 0xa9, 0x32, 0x76, 0x9b, 0xf2, 0xb2, 0xf5, 0x32, 0xc9, 0x10, 0xdc, 0xa7, 0x27, 0xcf, 0xbc, + 0xca, 0x89, 0x97, 0x57, 0x31, 0x24, 0x89, 0x03, 0x9a, 0x95, 0xe9, 0x59, 0xf4, 0xe8, 0xd7, 0xe0, + 0x34, 0xb9, 0x11, 0xe4, 0x61, 0x92, 0x1a, 0x6a, 0xde, 0xfc, 0x7c, 0x5f, 0xd7, 0x7a, 0xcd, 0x05, + 0x35, 0x98, 0x77, 0x86, 0x20, 0xb4, 0x9c, 0x03, 0xa4, 0x56, 0x7c, 0xf9, 0x8a, 0x53, 0xfa, 0xdf, + 0x1f, 0x1c, 0x59, 0x09, 0xbf, 0xe7, 0xd0, 0xb9, 0xbd, 0x36, 0xd3, 0xdc, 0x31, 0xa7, 0x40, 0xc1, + 0x67, 0x48, 0x57, 0xb3, 0x43, 0x14, 0x1f, 0xff, 0x7b, 0x3f, 0xd0, 0x34, 0xfb, 0x14, 0x0b, 0xd7, + 0x9c, 0xe6, 0x25, 0xc3, 0xa9, 0xc8, 0x5b, 0x3c, 0xb4, 0xbe, 0x3d, 0x7f, 0xae, 0x31, 0x42, 0x3a, + 0xc5, 0x47, 0x0d, 0x41, 0x0e, 0xc7, 0x70, 0xe3, 0x2a, 0x2a, 0xe0, 0xc6, 0x14, 0x70, 0x84, 0xb2, + 0xb6, 0xd2, 0xa2, 0x08, 0xfc, 0x9b, 0xbd, 0xa3, 0x5a, 0xd4, 0x54, 0x3a, 0x36, 0x49, 0xed, 0xdf, + 0xc9, 0xa7, 0x7f, 0xa2, 0x8b, 0xd4, 0x42, 0xf5, 0xcb, 0x86, 0x9c, 0x36, 0x49, 0xb0, 0x7f, 0x18, + 0x66, 0x42, 0x40, 0x98, 0xd0, 0x15, 0x51, 0x20, 0x75, 0x07, 0xd7, 0x96, 0x06, 0x75, 0xca, 0xc2, + 0xa7, 0xeb, 0x67, 0x79, 0x0e, 0x78, 0x0a, 0xbc, 0x97, 0x04, 0xd4, 0x0c, 0xc3, 0xa3, 0x2d, 0xea, + 0x8a, 0x73, 0x9f, 0xad, 0x8e, 0xdc, 0xb2, 0x88, 0x18, 0xbe, 0xc9, 0x6a, 0xa9, 0x01, 0x03, 0xcd, + 0xe6, 0x08, 0xda, 0xbd, 0x07, 0x2f, 0xa9, 0x12, 0x7c, 0xc3, 0xb3, 0x2b, 0x84, 0x10, 0xde, 0x5f, + 0x5b, 0x38, 0x2f, 0x9c, 0xb1, 0x4c, 0x11, 0x3d, 0xc9, 0x65, 0xff, 0xd7, 0xf8, 0x7f, 0xf5, 0x24, + 0xfa, 0x97, 0x7f, 0xf9, 0x97, 0x7f, 0xf9, 0x97, 0x7f, 0xf9, 0x97, 0xff, 0xaf, 0xf8, 0x1f, 0xee, + 0x48, 0x8f, 0x20, 0x00, 0x23, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA100_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 8960, // uncompressed data size (bytes) + 6823, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA100_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA100("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/unload/g_booteruc_unload_ga100_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA100_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x06, 0x62, 0x0e, 0x08, 0x13, 0x4c, 0x48, 0x41, 0x69, + 0x20, 0x00, 0x00, 0x6b, 0x3e, 0x38, 0x7d, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA100_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA100_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA100("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/unload/g_booteruc_unload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_dbg_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 384 +// COMPRESSED SIZE (bytes): 397 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA100_sig_dbg_data[] = +{ + 0x01, 0x80, 0x01, 0x7f, 0xfe, 0x3f, 0x1c, 0x05, 0x53, 0xf9, 0x05, 0x6c, 0x56, 0x8b, 0x5d, 0xaf, + 0xb8, 0x82, 0x7d, 0xc7, 0x76, 0xcc, 0x6e, 0x87, 0x43, 0x65, 0xfe, 0x0e, 0x2e, 0x01, 0x85, 0x48, + 0x1c, 0xb9, 0xa6, 0xdf, 0xae, 0x1c, 0x81, 0x50, 0x39, 0x2f, 0x35, 0xca, 0x6f, 0x7b, 0x54, 0xef, + 0x04, 0x3d, 0x1a, 0x3c, 0xd9, 0x5e, 0x1f, 0xa6, 0x39, 0xd2, 0x1e, 0x2e, 0xdd, 0xd6, 0x73, 0x49, + 0xc2, 0x44, 0xaf, 0x0b, 0x4b, 0x1c, 0xab, 0xea, 0xef, 0x8d, 0x25, 0xc4, 0xc3, 0x5e, 0xd7, 0x81, + 0x09, 0x5c, 0x9f, 0x83, 0x3b, 0x9b, 0xda, 0xe3, 0xcb, 0x98, 0x1a, 0x05, 0x6f, 0x87, 0x07, 0x41, + 0x24, 0xf6, 0x0c, 0x22, 0x3e, 0x37, 0xfa, 0x3c, 0xa7, 0xda, 0x02, 0xaf, 0x24, 0xac, 0xc1, 0xe0, + 0x94, 0xb3, 0x68, 0x0f, 0x77, 0x35, 0x76, 0xa5, 0x68, 0x21, 0xc9, 0x22, 0x21, 0xff, 0xd3, 0x77, + 0xfa, 0xdb, 0x55, 0x42, 0x7d, 0x1e, 0x59, 0xf5, 0x23, 0x62, 0xab, 0xcf, 0xe2, 0x7b, 0xf9, 0x8b, + 0xec, 0xc4, 0xcb, 0x0d, 0xe0, 0x62, 0xb0, 0x9f, 0xd8, 0x42, 0xaf, 0x69, 0xe2, 0x29, 0xdd, 0xcd, + 0x68, 0xc6, 0x7c, 0x20, 0xd9, 0x41, 0x6a, 0xf4, 0x2d, 0x1e, 0xe9, 0xb4, 0x5a, 0x27, 0x87, 0x3b, + 0xfb, 0x87, 0xdb, 0x42, 0x30, 0xf5, 0xb6, 0xd4, 0xbf, 0x22, 0x33, 0x75, 0x65, 0x0f, 0xfc, 0xfd, + 0xd9, 0x6a, 0x1e, 0x39, 0x82, 0xe7, 0x75, 0xac, 0xb2, 0x53, 0x0c, 0x4e, 0xcb, 0x37, 0x5f, 0x32, + 0xf2, 0xc2, 0x0f, 0xf6, 0x79, 0x2c, 0xa9, 0x28, 0x1d, 0x84, 0xb5, 0xf3, 0xbc, 0xf9, 0x8c, 0xf4, + 0x1f, 0x9b, 0x58, 0xe4, 0x69, 0x4c, 0x56, 0x74, 0xa3, 0xb1, 0x17, 0xae, 0xbc, 0x64, 0x64, 0xdc, + 0x00, 0x2d, 0xd3, 0xa1, 0x69, 0xeb, 0xbe, 0x9b, 0x12, 0x08, 0x04, 0x92, 0xba, 0x9c, 0x23, 0x56, + 0xd8, 0x95, 0xae, 0xea, 0xf2, 0x4b, 0x32, 0x6c, 0x55, 0xb5, 0x56, 0x2c, 0x86, 0xb0, 0x7b, 0x04, + 0x8d, 0xb4, 0xb8, 0x38, 0xdd, 0x27, 0xd0, 0x83, 0x77, 0xe8, 0x5b, 0x7d, 0x6d, 0x26, 0x89, 0xc0, + 0x11, 0x72, 0xb1, 0xbe, 0xc6, 0x45, 0xff, 0xa8, 0x71, 0xb4, 0xba, 0x96, 0x78, 0xf2, 0x9d, 0xbb, + 0x96, 0x9c, 0xcf, 0xe8, 0xb6, 0x12, 0x25, 0xe2, 0x6c, 0x2a, 0x1f, 0x3a, 0xc6, 0x48, 0x09, 0x82, + 0x79, 0x9e, 0x7c, 0x2c, 0x62, 0xf2, 0xae, 0xc0, 0xb7, 0x7a, 0x97, 0x74, 0xec, 0x26, 0x10, 0x42, + 0xa1, 0x76, 0x79, 0x67, 0xee, 0x9e, 0x39, 0x3f, 0x32, 0x17, 0x0b, 0x0f, 0xee, 0xd4, 0xa2, 0xe4, + 0xea, 0xa8, 0xe0, 0x73, 0x8d, 0x81, 0x25, 0xf8, 0x9e, 0xff, 0x30, 0xd1, 0xe8, 0x2a, 0x11, 0x68, + 0x06, 0x8e, 0xcd, 0x26, 0xd3, 0x56, 0x31, 0xce, 0xb8, 0x00, 0xab, 0x52, 0xca, 0xbd, 0x7b, 0xb5, + 0x4f, 0xc2, 0x3e, 0xa1, 0xbf, 0x88, 0x0c, 0xa6, 0xd0, 0x80, 0x01, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA100_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 384, // uncompressed data size (bytes) + 397, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA100_sig_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA100("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/unload/g_booteruc_unload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_prod_ga100 +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 384 +// COMPRESSED SIZE (bytes): 397 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA100_sig_prod_data[] = +{ + 0x01, 0x80, 0x01, 0x7f, 0xfe, 0xf1, 0x3b, 0x42, 0xb1, 0xf2, 0xda, 0xaa, 0x9e, 0x07, 0x8c, 0x0f, + 0x9f, 0x8d, 0xb8, 0xb7, 0x2d, 0x59, 0xd0, 0x9e, 0xd3, 0xd2, 0x4a, 0xec, 0x0f, 0x8c, 0x84, 0xe0, + 0xd0, 0x81, 0xba, 0x92, 0xc9, 0xf5, 0x4c, 0xa7, 0x9b, 0x3c, 0x7e, 0xa7, 0xb9, 0x61, 0x6c, 0x55, + 0xcc, 0xef, 0xcf, 0xfb, 0x2d, 0x46, 0x1d, 0x08, 0xf7, 0x56, 0xa6, 0x8f, 0x28, 0xd9, 0xc4, 0x69, + 0x92, 0x48, 0x11, 0xb0, 0xad, 0x01, 0xe9, 0x95, 0x0f, 0x62, 0x95, 0xdd, 0x0c, 0xbb, 0xac, 0x02, + 0x65, 0x2d, 0x29, 0x7a, 0x98, 0xcd, 0x36, 0xdd, 0x01, 0xb1, 0x22, 0xba, 0x20, 0xad, 0x90, 0x01, + 0xe1, 0x57, 0x5a, 0x07, 0x6f, 0x8d, 0xaa, 0xa1, 0x8f, 0x00, 0xaa, 0xa9, 0x95, 0xae, 0xbe, 0xa6, + 0xa9, 0x37, 0xeb, 0x29, 0x8f, 0x52, 0x33, 0x31, 0x4b, 0xef, 0x97, 0x98, 0x20, 0x62, 0x38, 0x81, + 0x6d, 0x34, 0xb8, 0x0f, 0x12, 0x72, 0x1f, 0x50, 0xbc, 0xdc, 0x29, 0x16, 0xe9, 0x01, 0xb1, 0xbf, + 0xad, 0x96, 0x25, 0x7c, 0xa1, 0xc6, 0x63, 0x5e, 0xe6, 0x45, 0x9a, 0x99, 0xf6, 0x5e, 0x2d, 0x95, + 0xcb, 0xd6, 0x49, 0x4c, 0x8a, 0x97, 0xea, 0x51, 0x07, 0xe2, 0x67, 0xf6, 0xca, 0x20, 0xda, 0x59, + 0x17, 0x71, 0x26, 0x0b, 0xdc, 0xd6, 0x1a, 0x84, 0x01, 0xca, 0xed, 0xea, 0x5c, 0x82, 0x1d, 0xb4, + 0x91, 0x9f, 0xf1, 0x25, 0xf4, 0x96, 0x9c, 0xee, 0xe7, 0xc9, 0x8e, 0xe5, 0xfe, 0x0e, 0x4d, 0x3d, + 0xdd, 0x05, 0xb8, 0xac, 0x15, 0x1f, 0x7a, 0x8e, 0xc0, 0x16, 0xfd, 0x39, 0xbc, 0xc6, 0x1b, 0x10, + 0x63, 0x06, 0xa9, 0xe9, 0x1f, 0x55, 0xd8, 0x94, 0xb8, 0xd8, 0x17, 0x19, 0xfb, 0x0b, 0x8c, 0xa2, + 0x77, 0xf5, 0xf6, 0x43, 0x6b, 0xa1, 0x64, 0x43, 0xe5, 0xee, 0x91, 0x12, 0x23, 0x2a, 0x2d, 0x7a, + 0x06, 0x07, 0x38, 0x55, 0xa9, 0x6e, 0x54, 0x6a, 0x05, 0xfe, 0x0a, 0x8f, 0x86, 0x8b, 0x82, 0xbb, + 0xd4, 0x3a, 0x4b, 0x08, 0xbc, 0x8d, 0x25, 0x2d, 0x63, 0x09, 0x26, 0x40, 0x58, 0x46, 0x15, 0x35, + 0x5a, 0xbe, 0x86, 0x57, 0x76, 0xf5, 0x3d, 0xd7, 0x96, 0x47, 0x44, 0xa1, 0x8f, 0x06, 0xb4, 0x5c, + 0x6a, 0x74, 0x67, 0xbc, 0xfa, 0x9d, 0x7a, 0xe1, 0xac, 0xa1, 0x17, 0x97, 0xb5, 0xf7, 0x46, 0x7f, + 0x15, 0xc8, 0x12, 0x7c, 0xeb, 0x58, 0x8e, 0xcc, 0xe9, 0x7a, 0x3e, 0x9b, 0x20, 0xcf, 0xad, 0x37, + 0xd0, 0x74, 0x3c, 0xc9, 0x70, 0xe3, 0x81, 0x85, 0x93, 0xe0, 0x5a, 0x7b, 0x0f, 0x41, 0xbb, 0xd4, + 0xe6, 0x37, 0xe3, 0xf7, 0xf8, 0xa5, 0xdb, 0x87, 0xed, 0xd4, 0x92, 0x2b, 0xba, 0x6f, 0x53, 0xb4, + 0x03, 0x81, 0xda, 0x26, 0x7d, 0xa4, 0x4a, 0x14, 0xc0, 0x96, 0x25, 0x2c, 0x41, 0xd9, 0x8a, 0x7d, + 0xd9, 0x53, 0x5b, 0x7f, 0x18, 0x1f, 0x15, 0x0e, 0x63, 0x80, 0x01, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA100_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 384, // uncompressed data size (bytes) + 397, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA100_sig_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA100("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/unload/g_booteruc_unload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_ga100_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA100_patch_loc_data[] = +{ + 0x00, 0x1d, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA100_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA100_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA100("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/unload/g_booteruc_unload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_ga100_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA100_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA100_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA100_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA100("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/unload/g_booteruc_unload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_ga100_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA100_patch_meta_data[] = +{ + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA100_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA100_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA100("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga100/unload/g_booteruc_unload_ga100_ga100_rsa3k_1_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA100_num_sigs_data[] = +{ + 0x01, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA100_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA100_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterUnloadUcode_GA100 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA100_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA100_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA100_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA100_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA100_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA100_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA100_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA100_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA100_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA100_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_GA100(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterUnloadUcode_GA100; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c new file mode 100644 index 000000000..c2bfa9681 --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_GA102.c @@ -0,0 +1,1391 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA102("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/unload/g_booteruc_unload_ga10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 9216 +// COMPRESSED SIZE (bytes): 7365 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA102_image_dbg_data[] = +{ + 0xed, 0xd9, 0x45, 0x50, 0x1c, 0x80, 0xb2, 0xa8, 0x61, 0xdc, 0x35, 0xb8, 0xbb, 0x3b, 0x04, 0x4d, + 0x70, 0x77, 0x06, 0xf7, 0xe0, 0x16, 0x1c, 0x32, 0x10, 0x82, 0x13, 0x7c, 0xf0, 0xc1, 0x25, 0xb8, + 0x0d, 0xee, 0x3e, 0xb8, 0x07, 0x77, 0x09, 0xee, 0xee, 0x0e, 0xef, 0xad, 0xcf, 0xfa, 0x56, 0xdd, + 0xc5, 0x3d, 0xdf, 0xae, 0xd7, 0x5d, 0xf5, 0x2f, 0xba, 0x61, 0x60, 0xfe, 0x6f, 0xfb, 0x42, 0x93, + 0x3d, 0x52, 0xe5, 0xe7, 0x01, 0x0a, 0x75, 0x8e, 0xd0, 0x8e, 0x55, 0xba, 0x6c, 0x0d, 0x2c, 0xb8, + 0x75, 0xa5, 0x6c, 0xf7, 0x67, 0x3b, 0xb5, 0xe8, 0x95, 0x7f, 0xcc, 0x7c, 0x6a, 0xdf, 0x6f, 0x4f, + 0x1e, 0x8a, 0x61, 0x0b, 0x6d, 0x45, 0xad, 0xf0, 0x4d, 0x71, 0x3c, 0x63, 0x5e, 0x69, 0x5e, 0x91, + 0xb6, 0x27, 0x4b, 0xfa, 0x1c, 0x84, 0x5a, 0x13, 0x26, 0x56, 0x5b, 0x49, 0xe6, 0xb3, 0x5c, 0x92, + 0x0f, 0x38, 0xee, 0xaf, 0xd8, 0xdf, 0x39, 0x35, 0x96, 0xfe, 0x92, 0x48, 0xf4, 0xed, 0xfb, 0x40, + 0xfb, 0x3e, 0xaa, 0xc1, 0xa5, 0x24, 0x92, 0xb0, 0x4a, 0x6f, 0xd4, 0xe4, 0xac, 0xbf, 0xda, 0x36, + 0x3d, 0xb6, 0x2c, 0x10, 0x8d, 0x19, 0xc3, 0x7c, 0x42, 0xc8, 0x80, 0xb8, 0x26, 0xa0, 0xde, 0x78, + 0x59, 0xd7, 0x9d, 0xc6, 0x93, 0x14, 0x0e, 0x80, 0x92, 0x4c, 0xd2, 0x86, 0x21, 0xb6, 0xdd, 0x58, + 0x7c, 0x7d, 0xfb, 0xa9, 0xd0, 0x6e, 0x44, 0x7d, 0xea, 0x1a, 0xe9, 0x5b, 0x61, 0xfb, 0x49, 0x50, + 0xca, 0x87, 0x15, 0x2a, 0x66, 0xa6, 0x47, 0xe9, 0xce, 0xad, 0x6f, 0x51, 0xc0, 0x55, 0x42, 0x4f, + 0x25, 0x77, 0xe0, 0xf4, 0xe0, 0x12, 0xbc, 0xb1, 0x63, 0xde, 0x21, 0x90, 0x38, 0x72, 0x6b, 0x45, + 0x72, 0x8d, 0x2c, 0x4a, 0x09, 0xee, 0x7c, 0xac, 0x3f, 0xf7, 0xfe, 0x41, 0x46, 0x5a, 0xa5, 0x42, + 0x53, 0x31, 0x0e, 0x79, 0x4c, 0x27, 0xd9, 0xa0, 0x18, 0x1e, 0x89, 0xc8, 0x92, 0xa5, 0x71, 0x51, + 0x52, 0x7b, 0x08, 0x54, 0x3b, 0xeb, 0xb3, 0xf0, 0xe4, 0x08, 0x3e, 0x0f, 0xe5, 0x73, 0x8b, 0x0f, + 0x5a, 0x1a, 0xca, 0x13, 0xa9, 0x8a, 0xca, 0xfa, 0xc3, 0xc2, 0x01, 0xd1, 0x78, 0x7e, 0x62, 0x23, + 0xb3, 0xce, 0x3d, 0x3d, 0xbb, 0x4b, 0x1d, 0xb7, 0x6d, 0xf9, 0xa5, 0xdd, 0xfc, 0x60, 0xd2, 0xc9, + 0x20, 0x79, 0xf0, 0x99, 0x57, 0x08, 0xa9, 0x18, 0x18, 0x9d, 0x71, 0xf9, 0xd9, 0xc8, 0x24, 0x63, + 0x77, 0x5c, 0x8b, 0x7f, 0x24, 0x08, 0x08, 0xb7, 0xb8, 0x80, 0xb7, 0xe7, 0x23, 0xc7, 0xe7, 0x96, + 0x54, 0x3f, 0x2d, 0xc0, 0xe8, 0xc8, 0xc5, 0x40, 0xb9, 0x54, 0xa9, 0xe8, 0xef, 0xbf, 0x11, 0x3a, + 0x38, 0xbd, 0xf6, 0x34, 0x4b, 0x09, 0xdd, 0x1c, 0x64, 0x09, 0x7e, 0x93, 0x2a, 0xa6, 0x83, 0xa2, + 0x9b, 0xf1, 0x07, 0x26, 0x75, 0x48, 0x3f, 0xe8, 0x34, 0x15, 0x34, 0x29, 0x2d, 0xca, 0x53, 0x98, + 0x89, 0xf7, 0x9d, 0x6d, 0xa1, 0x18, 0x50, 0x4f, 0x3c, 0x88, 0x73, 0xa3, 0x9a, 0x19, 0x8e, 0x94, + 0x1b, 0xed, 0x2b, 0x0a, 0xe9, 0x85, 0xcd, 0xc0, 0x55, 0xaa, 0x2b, 0x3f, 0xa4, 0x16, 0xe7, 0xe9, + 0xa3, 0x03, 0x98, 0xab, 0x91, 0x03, 0xbd, 0xb3, 0x9b, 0xbf, 0xed, 0x28, 0x41, 0xaa, 0x57, 0x67, + 0x57, 0xbd, 0x81, 0x61, 0xcd, 0xa2, 0xcd, 0x6c, 0x38, 0x47, 0xe5, 0x14, 0x16, 0x9c, 0xf8, 0x88, + 0xf2, 0x14, 0xbc, 0x5f, 0x64, 0xc1, 0xf9, 0x46, 0xd0, 0x5b, 0xd5, 0xb2, 0xe8, 0xc6, 0x1a, 0x6e, + 0x81, 0x4f, 0xb9, 0x4c, 0xd9, 0x63, 0x14, 0x31, 0x24, 0x9c, 0xc7, 0x0b, 0x88, 0xd0, 0x9e, 0xbb, + 0xe0, 0x3e, 0x93, 0xdf, 0x68, 0x5f, 0xad, 0xb7, 0xc1, 0xfc, 0x1d, 0x1f, 0x4c, 0xc3, 0x11, 0x96, + 0x4e, 0xba, 0xfa, 0xde, 0xd2, 0x53, 0x18, 0xec, 0x2f, 0x9e, 0x84, 0x28, 0xde, 0x71, 0x5b, 0x73, + 0x62, 0xbf, 0xd4, 0xdc, 0x3d, 0x8e, 0x82, 0x9e, 0x59, 0xc6, 0xe6, 0x3a, 0x23, 0x8c, 0xe7, 0x4a, + 0x2c, 0x79, 0x96, 0xd9, 0xbd, 0x54, 0xc6, 0x39, 0xd2, 0x0e, 0x45, 0xff, 0x05, 0xa9, 0xa6, 0x6e, + 0x5b, 0x7a, 0xa7, 0x3f, 0xe5, 0x0f, 0xf8, 0xd2, 0x0a, 0x87, 0xcf, 0x11, 0x77, 0xd2, 0x28, 0x6d, + 0x99, 0x2a, 0xa4, 0x8e, 0xeb, 0x7d, 0xa4, 0x53, 0x10, 0x2f, 0xca, 0xc2, 0x96, 0x93, 0x56, 0x67, + 0x95, 0x65, 0x9e, 0x52, 0x1d, 0x6d, 0x5c, 0x8b, 0x80, 0xdf, 0xdf, 0xeb, 0xde, 0x3f, 0x5a, 0x9f, + 0xef, 0xd5, 0xea, 0x79, 0x67, 0xd9, 0x8b, 0xc3, 0x0d, 0x38, 0x58, 0x99, 0x16, 0x85, 0xb1, 0xf7, + 0x59, 0x73, 0x49, 0xd6, 0x18, 0xc8, 0xfc, 0x5a, 0x83, 0xcd, 0x89, 0x83, 0x63, 0x29, 0xd2, 0xe6, + 0xa1, 0x95, 0x3b, 0x2b, 0xe9, 0x9d, 0x76, 0x21, 0x0e, 0x16, 0x20, 0xc9, 0x32, 0x97, 0xa6, 0x8d, + 0x29, 0xc8, 0xe0, 0x16, 0x50, 0x75, 0x3d, 0x77, 0xb4, 0x81, 0x6a, 0x66, 0x4a, 0xf6, 0xf9, 0x35, + 0x31, 0x50, 0xc3, 0x37, 0x78, 0x64, 0x50, 0x71, 0xc0, 0xdb, 0x26, 0x06, 0xf6, 0x63, 0x1c, 0x27, + 0xd2, 0xe3, 0xd6, 0x66, 0x9c, 0xec, 0x06, 0x4b, 0xb4, 0x22, 0x5f, 0x08, 0x74, 0xae, 0x32, 0x77, + 0x89, 0x5d, 0x32, 0xe7, 0xe3, 0x9e, 0x0d, 0xaa, 0x29, 0xb9, 0xfa, 0xe6, 0x79, 0x5b, 0x86, 0x96, + 0xbf, 0xfb, 0xa3, 0x6d, 0x68, 0xde, 0xe2, 0x75, 0xb4, 0xea, 0xa8, 0xbe, 0x7c, 0x39, 0xbe, 0x17, + 0x34, 0x54, 0x8c, 0x8e, 0xd7, 0xb9, 0xd8, 0x91, 0x64, 0x3c, 0x38, 0x6f, 0x96, 0xc0, 0x59, 0xd2, + 0xf4, 0x8f, 0x43, 0x65, 0x8b, 0xc3, 0x49, 0xc0, 0x59, 0x9d, 0x95, 0x18, 0x4f, 0xdf, 0xda, 0xfe, + 0xe0, 0xae, 0x27, 0x41, 0x58, 0xaa, 0x88, 0x6e, 0xcd, 0x8b, 0xcf, 0x82, 0xee, 0xf1, 0x4f, 0x90, + 0xe5, 0xb2, 0x19, 0x9d, 0xfa, 0x54, 0x3e, 0xcd, 0xdc, 0x56, 0x97, 0xf1, 0x15, 0x81, 0x8c, 0xc1, + 0x9e, 0xa9, 0xf1, 0xd5, 0xf6, 0xb2, 0xa2, 0x97, 0xcb, 0xc1, 0xab, 0x23, 0x81, 0xf9, 0x16, 0xed, + 0xe6, 0x9b, 0x50, 0x8f, 0x4d, 0x93, 0xbd, 0xf0, 0x9a, 0x9c, 0xdd, 0x84, 0x5c, 0xef, 0x1d, 0xe1, + 0xcf, 0x30, 0xfc, 0xc1, 0x02, 0xcc, 0xe3, 0xdf, 0xfe, 0x08, 0xa2, 0x8e, 0x0a, 0x87, 0xcd, 0x04, + 0xb1, 0x2f, 0xe8, 0x7f, 0x5c, 0xb4, 0x99, 0xd0, 0x8b, 0x22, 0x87, 0x01, 0xae, 0x16, 0xdb, 0xbb, + 0xcf, 0x81, 0xc8, 0x5d, 0x66, 0x82, 0xbb, 0xc9, 0x6a, 0x7b, 0x4b, 0xdf, 0xed, 0x8a, 0x8a, 0xc2, + 0x43, 0xab, 0x11, 0xad, 0x38, 0x11, 0x0d, 0x5b, 0x52, 0xa2, 0xd5, 0xd5, 0x93, 0xa8, 0xfb, 0x7e, + 0x54, 0xa2, 0x90, 0x4a, 0x78, 0x1b, 0x9c, 0x8a, 0x46, 0x03, 0x30, 0xf8, 0x2d, 0xe0, 0x6b, 0x03, + 0x6b, 0x99, 0x06, 0xb2, 0x46, 0xae, 0xb2, 0x15, 0x7f, 0x44, 0x2a, 0x19, 0x58, 0xad, 0x4b, 0x62, + 0x80, 0x5b, 0x37, 0x0e, 0x04, 0xb4, 0x10, 0xe0, 0x1c, 0xbb, 0x4c, 0xcb, 0x0e, 0x77, 0x0b, 0x0f, + 0x34, 0xc3, 0x57, 0x67, 0x91, 0x7a, 0x8e, 0x13, 0x89, 0x96, 0x08, 0x19, 0xbc, 0xcd, 0x0c, 0xcc, + 0xb0, 0xca, 0x9e, 0xd4, 0xc2, 0x7d, 0x36, 0x0b, 0x3f, 0xe7, 0xba, 0xd9, 0x13, 0x24, 0xc9, 0xf1, + 0x30, 0x79, 0x27, 0xf4, 0xe2, 0xad, 0xa7, 0xa5, 0x06, 0x37, 0xf0, 0xe8, 0x3b, 0x1e, 0x47, 0x36, + 0x61, 0x75, 0x63, 0x47, 0x63, 0x53, 0x71, 0x14, 0xd6, 0x0e, 0xb3, 0x21, 0x0d, 0xc9, 0xe8, 0x06, + 0xe8, 0x3e, 0x69, 0xc0, 0x37, 0x9b, 0xd1, 0x89, 0x0a, 0x47, 0xe1, 0x63, 0xa4, 0x7c, 0xad, 0xf7, + 0x0e, 0x68, 0x65, 0xfb, 0x35, 0x19, 0xc5, 0x7e, 0x52, 0xce, 0x8d, 0xd0, 0x2e, 0x01, 0x8f, 0x36, + 0x95, 0x78, 0x06, 0xc1, 0xb1, 0x0d, 0xf0, 0xb3, 0x65, 0x3a, 0x2a, 0x0f, 0x95, 0x99, 0x1a, 0x32, + 0x7b, 0x15, 0x86, 0xda, 0xb3, 0x88, 0xb1, 0xc4, 0x26, 0xa0, 0xf3, 0x2f, 0xe0, 0x21, 0xe6, 0x68, + 0xb1, 0x1d, 0xb7, 0xda, 0x3e, 0xa2, 0x0a, 0xfc, 0x70, 0x78, 0xc5, 0x45, 0xc5, 0x0d, 0xf1, 0xcc, + 0x1b, 0xb2, 0xea, 0x19, 0xba, 0x61, 0x10, 0xc8, 0xf4, 0xdd, 0x25, 0x39, 0x84, 0x60, 0xab, 0x50, + 0xaa, 0xba, 0x42, 0x75, 0xf1, 0x3d, 0x94, 0xf6, 0x57, 0x01, 0x6e, 0x11, 0xbc, 0x71, 0x1c, 0x5c, + 0x4a, 0xb8, 0x49, 0x16, 0xd2, 0x42, 0xfa, 0x0d, 0x16, 0x92, 0x77, 0x2a, 0x7c, 0x17, 0x5a, 0x28, + 0xac, 0x6f, 0x9e, 0xf7, 0xfa, 0xb6, 0x6a, 0x9e, 0x55, 0x6a, 0xf0, 0x99, 0x6a, 0x23, 0xba, 0xe6, + 0x76, 0xa4, 0xf5, 0x4c, 0xa5, 0xc8, 0x25, 0x15, 0xa4, 0x55, 0x3c, 0x68, 0xc1, 0x02, 0x7d, 0x76, + 0x98, 0x4a, 0xe6, 0x0d, 0xad, 0x14, 0x7f, 0x9b, 0x02, 0x2e, 0xbb, 0x0a, 0xf4, 0x93, 0xed, 0x7c, + 0xd2, 0xf5, 0xe4, 0xfd, 0x9c, 0x63, 0x91, 0x79, 0x4d, 0xa1, 0x5a, 0x05, 0xc8, 0x73, 0x63, 0x8b, + 0x11, 0x8a, 0x19, 0x4d, 0x64, 0xf3, 0xc5, 0x26, 0x27, 0x86, 0x9c, 0xa3, 0xda, 0x13, 0x18, 0x58, + 0xa5, 0x0a, 0x22, 0x5a, 0xf9, 0xdc, 0xfc, 0x22, 0x5f, 0x64, 0x9b, 0xba, 0x93, 0x92, 0xf1, 0xf8, + 0x62, 0xd5, 0x88, 0x40, 0x5e, 0xc1, 0x5a, 0xf6, 0x31, 0xda, 0x30, 0x59, 0x93, 0xa4, 0x8f, 0x34, + 0x15, 0x6e, 0x45, 0x5f, 0x62, 0x97, 0xe7, 0xfb, 0x92, 0x3b, 0x7f, 0xa1, 0xc1, 0x21, 0x19, 0x98, + 0x2e, 0x9a, 0x52, 0x54, 0x2c, 0x50, 0xcb, 0x5f, 0xb6, 0x17, 0xa2, 0xc7, 0xad, 0x9f, 0x27, 0x06, + 0x78, 0x49, 0x26, 0xed, 0xae, 0x7e, 0x04, 0xd7, 0xa5, 0x06, 0x5e, 0xc0, 0xb5, 0x9e, 0xd4, 0x77, + 0x3a, 0x46, 0xe8, 0x71, 0x93, 0x2d, 0x32, 0xc0, 0xb4, 0xb9, 0xce, 0xcf, 0x05, 0x35, 0xed, 0x4b, + 0x20, 0x9d, 0x52, 0x68, 0x36, 0xa3, 0x7d, 0x19, 0xf4, 0x7a, 0xfb, 0xf2, 0xb7, 0x04, 0x4f, 0x1b, + 0x22, 0xc6, 0x16, 0xe0, 0x12, 0xe1, 0x3c, 0x00, 0x2b, 0xc7, 0x93, 0x2b, 0xcd, 0xc6, 0xd9, 0xd5, + 0x31, 0x38, 0x87, 0x81, 0x54, 0x12, 0xc2, 0x46, 0xd3, 0xac, 0x37, 0xc3, 0x4c, 0xc2, 0xca, 0xb5, + 0xc3, 0xae, 0x82, 0x03, 0xe3, 0x72, 0x92, 0x01, 0x33, 0x69, 0xfe, 0xc8, 0x38, 0x6b, 0xa2, 0xe4, + 0xd3, 0x5b, 0xf6, 0xd9, 0x92, 0x92, 0x3f, 0x87, 0x89, 0x5b, 0x84, 0x6e, 0xac, 0x40, 0x10, 0xbe, + 0x9c, 0xa1, 0xb4, 0x5d, 0xc8, 0x5f, 0xa9, 0x53, 0x4b, 0x3c, 0x67, 0xfd, 0x0a, 0x5a, 0x24, 0xf1, + 0x92, 0x05, 0x5a, 0x9b, 0xb8, 0x67, 0xf1, 0xb6, 0xcb, 0x0a, 0xef, 0x4f, 0xed, 0x25, 0x08, 0xdb, + 0x17, 0xc5, 0x2c, 0xd2, 0x27, 0x1a, 0xa9, 0xe4, 0x22, 0x5f, 0x98, 0xff, 0x20, 0x31, 0x64, 0x5d, + 0x93, 0xac, 0x14, 0xc7, 0xf0, 0x43, 0x3b, 0x20, 0xd9, 0xfb, 0x97, 0xaa, 0xb6, 0x81, 0x81, 0xab, + 0x7e, 0xaf, 0x83, 0xd4, 0x4b, 0xce, 0xd4, 0x4d, 0x43, 0x7b, 0x68, 0xbd, 0x81, 0x88, 0x39, 0x12, + 0x7e, 0x5e, 0x6f, 0x1d, 0x6a, 0x1f, 0x1d, 0xaf, 0xd1, 0x2c, 0x92, 0x12, 0xd8, 0xb7, 0xa3, 0x0d, + 0xc4, 0x63, 0x09, 0xf4, 0xc4, 0x33, 0xec, 0xb3, 0x42, 0xb2, 0x7e, 0x70, 0xdf, 0x79, 0xd5, 0x8c, + 0xb1, 0x31, 0xfc, 0xe4, 0xe6, 0x1e, 0x0e, 0x53, 0x9f, 0xb4, 0x96, 0x40, 0x75, 0x7c, 0x9a, 0xff, + 0x53, 0xbe, 0x87, 0x41, 0x37, 0xf0, 0xaa, 0xff, 0x3a, 0x27, 0x37, 0xc4, 0x9d, 0x58, 0xad, 0xfb, + 0x2f, 0x57, 0x6a, 0xf8, 0xda, 0xbe, 0x75, 0x5b, 0x9a, 0x90, 0x4a, 0xab, 0x7d, 0x75, 0x4c, 0x8e, + 0x29, 0x9e, 0xed, 0xe9, 0xe0, 0x74, 0xf9, 0x61, 0xe8, 0xf6, 0xd4, 0xf4, 0x50, 0xc9, 0x56, 0x91, + 0x9e, 0x5f, 0x30, 0xd9, 0x5c, 0x6e, 0x61, 0xe6, 0x3e, 0x7e, 0xe3, 0xeb, 0xa6, 0x27, 0x08, 0x8e, + 0x77, 0x9c, 0x96, 0xa9, 0x47, 0x52, 0x95, 0x15, 0x2a, 0x8b, 0xa5, 0x74, 0x8e, 0xa6, 0x94, 0xb4, + 0x51, 0x64, 0xa5, 0x2e, 0xf2, 0xfa, 0x13, 0xef, 0xdf, 0x11, 0x65, 0x97, 0x2a, 0xeb, 0xae, 0x62, + 0x77, 0xad, 0x26, 0x76, 0xa5, 0xb6, 0x7a, 0xf7, 0x3e, 0x1a, 0x65, 0xb3, 0x62, 0xe6, 0xcf, 0x1a, + 0xc2, 0xce, 0x1f, 0xcd, 0xe5, 0x1c, 0xc5, 0x08, 0xef, 0x23, 0xef, 0xa6, 0xad, 0xd0, 0xdf, 0xed, + 0x94, 0x8a, 0x90, 0xbf, 0x6f, 0x36, 0x7f, 0xc1, 0xef, 0x70, 0xf0, 0x68, 0x21, 0x52, 0x1f, 0x9c, + 0x71, 0x58, 0x21, 0x63, 0xef, 0xa7, 0x01, 0x51, 0x49, 0x40, 0x8e, 0x58, 0xae, 0x91, 0xc2, 0x7a, + 0xb1, 0x92, 0xf3, 0x71, 0xa7, 0xaf, 0x2f, 0x9c, 0xa6, 0xc6, 0x3f, 0xf5, 0x56, 0x08, 0x37, 0xb4, + 0xfd, 0xa2, 0x1a, 0x6d, 0x3b, 0x3d, 0xeb, 0x90, 0x01, 0xeb, 0x78, 0xee, 0x7f, 0xf1, 0xf0, 0x70, + 0x3c, 0x3e, 0xb8, 0x94, 0xbd, 0x00, 0xec, 0x1b, 0x4f, 0xd8, 0x79, 0x5d, 0x03, 0x4c, 0xc0, 0xee, + 0xeb, 0xf4, 0xc7, 0xf5, 0x83, 0x3b, 0x8d, 0xa2, 0xc5, 0xe6, 0x9c, 0x17, 0xd8, 0xbf, 0x4f, 0x59, + 0xd6, 0xbf, 0x1b, 0x27, 0x23, 0x16, 0x31, 0xb2, 0xab, 0x30, 0x5a, 0xfa, 0xf2, 0x24, 0xff, 0x73, + 0x3f, 0xb1, 0x60, 0x71, 0x4b, 0xd5, 0x06, 0x42, 0xa4, 0xbc, 0x0a, 0x63, 0x41, 0x44, 0x28, 0x0c, + 0x1b, 0x53, 0xa4, 0xaa, 0x6a, 0x7c, 0x30, 0xc9, 0x67, 0x2f, 0x0c, 0x72, 0x72, 0x35, 0x90, 0x54, + 0xb4, 0xc3, 0xc3, 0x3d, 0x79, 0x5a, 0x7e, 0xc5, 0xa1, 0xc5, 0x95, 0xb3, 0x9f, 0xbd, 0xf5, 0x73, + 0x5c, 0xae, 0xa1, 0x9a, 0x7a, 0x8f, 0x83, 0xf6, 0x36, 0xbf, 0x72, 0xba, 0x8b, 0x49, 0x7d, 0x83, + 0xc9, 0xd6, 0x12, 0x5b, 0x23, 0xd8, 0xe9, 0xac, 0x36, 0xc3, 0x61, 0x99, 0x96, 0x73, 0x05, 0x6f, + 0xbf, 0x79, 0x10, 0xad, 0xf5, 0xf3, 0xd0, 0x57, 0x06, 0x01, 0xb9, 0xb8, 0x1a, 0x20, 0xe2, 0x78, + 0xf2, 0x76, 0x81, 0x20, 0xcf, 0x6b, 0x58, 0xa2, 0xbe, 0x3a, 0xa7, 0x33, 0x63, 0x5b, 0x73, 0x16, + 0x11, 0x27, 0x95, 0x65, 0xc8, 0xd8, 0x83, 0xd6, 0x38, 0xcf, 0xe1, 0xa3, 0x35, 0x51, 0xaf, 0x00, + 0x68, 0xa4, 0x9b, 0x85, 0x8c, 0x7a, 0xb0, 0xfe, 0x1e, 0x32, 0xa2, 0xbc, 0x24, 0x51, 0xe5, 0xee, + 0xa8, 0x68, 0x22, 0x1d, 0x41, 0xf0, 0xda, 0xa8, 0x05, 0xf4, 0x36, 0x0e, 0x90, 0x3e, 0xf6, 0x9d, + 0x16, 0xfc, 0xf6, 0xd4, 0x40, 0xbb, 0x80, 0x79, 0xc7, 0x9e, 0x18, 0x61, 0xd5, 0xf9, 0x98, 0x4f, + 0xb4, 0x7a, 0x0e, 0x78, 0x5a, 0xb3, 0xd0, 0x84, 0xe1, 0xdf, 0xaa, 0x04, 0x0f, 0x0a, 0xce, 0xbe, + 0x11, 0x97, 0x7d, 0x6c, 0xd5, 0xcd, 0xf9, 0xcd, 0x5c, 0x62, 0xe5, 0x8f, 0xe0, 0x64, 0x90, 0xea, + 0x9c, 0x51, 0x3f, 0x76, 0xc0, 0x6d, 0x43, 0x94, 0x7f, 0x7b, 0x32, 0x9c, 0x03, 0x5c, 0xeb, 0x07, + 0x48, 0x54, 0x34, 0x58, 0x0c, 0x0d, 0x91, 0x38, 0xd4, 0x47, 0xa4, 0x57, 0x2f, 0xbc, 0x4d, 0xd1, + 0xa5, 0x06, 0x85, 0x42, 0x3e, 0x69, 0xdf, 0x8b, 0xe7, 0xfe, 0x58, 0x97, 0xed, 0x48, 0x39, 0x7e, + 0x2e, 0xff, 0xf2, 0xe2, 0x1a, 0x76, 0xed, 0xfd, 0xcd, 0x10, 0x0d, 0x14, 0x17, 0x50, 0x9b, 0xf0, + 0xad, 0x3a, 0xe0, 0x2f, 0x13, 0xd6, 0x7b, 0x63, 0x55, 0xd6, 0xe3, 0x93, 0xaf, 0xd1, 0xca, 0x6c, + 0x20, 0xda, 0xa1, 0x27, 0x6f, 0xe8, 0xa2, 0x5e, 0x3c, 0x33, 0xc3, 0xc3, 0xb6, 0xa4, 0x07, 0xed, + 0xd7, 0x3f, 0x23, 0x67, 0x44, 0x79, 0x51, 0xc5, 0xed, 0xbf, 0x7f, 0xda, 0xe4, 0x4c, 0xde, 0x94, + 0xcf, 0x27, 0xd6, 0xba, 0x4b, 0xbc, 0x8f, 0x27, 0xd1, 0x62, 0x9b, 0xb5, 0x1a, 0x7a, 0x88, 0x8e, + 0xa3, 0x8c, 0xca, 0xfe, 0x0a, 0xf5, 0xdb, 0xfd, 0x22, 0x70, 0x45, 0x6d, 0x9c, 0x19, 0x03, 0x37, + 0x67, 0x6b, 0x7b, 0xb9, 0xd5, 0xa3, 0xfb, 0xa7, 0x5b, 0x26, 0x4e, 0x26, 0x40, 0x1d, 0x01, 0x72, + 0xa7, 0x95, 0x0f, 0x63, 0x5d, 0xeb, 0x25, 0x39, 0x64, 0x7d, 0x4c, 0x59, 0x7e, 0xf8, 0x0f, 0xac, + 0xeb, 0x53, 0x46, 0x94, 0xfc, 0x49, 0x51, 0x8a, 0x03, 0xf3, 0xf2, 0xfd, 0x3b, 0xda, 0x00, 0x1b, + 0x9b, 0x71, 0x8c, 0xeb, 0x6c, 0x3b, 0x0f, 0x4a, 0x7b, 0xc3, 0x59, 0xd6, 0x13, 0x42, 0x87, 0x69, + 0x9d, 0x28, 0x4e, 0xf8, 0xc7, 0xd6, 0x84, 0x83, 0x10, 0xb5, 0x55, 0x31, 0x83, 0x45, 0x8a, 0x09, + 0x35, 0x75, 0x9c, 0xe9, 0xd2, 0x64, 0x2a, 0x06, 0x85, 0x23, 0xe1, 0x1c, 0xee, 0x36, 0x15, 0xd0, + 0x69, 0x10, 0x2d, 0xff, 0x8e, 0xbb, 0x5b, 0x88, 0x1c, 0x71, 0x3c, 0x44, 0x46, 0xad, 0x6f, 0xd9, + 0xc3, 0x21, 0x4a, 0x6e, 0x97, 0xe8, 0x4f, 0xb0, 0x73, 0xe5, 0xb2, 0x59, 0xd7, 0x0d, 0x8e, 0x9d, + 0xb5, 0x42, 0x5c, 0x25, 0x0d, 0x71, 0xf9, 0xe4, 0x4e, 0x8f, 0xac, 0xd6, 0xa5, 0xc8, 0xf4, 0x87, + 0xa1, 0xa5, 0x1e, 0x3f, 0xbd, 0x84, 0xbf, 0x9e, 0x0a, 0x45, 0xf1, 0x05, 0x96, 0x8e, 0x0a, 0x62, + 0x50, 0x76, 0xb7, 0xa1, 0x86, 0x21, 0xac, 0xf4, 0xad, 0xc4, 0xe0, 0x59, 0x0f, 0xa2, 0x87, 0xd6, + 0xfd, 0x91, 0x03, 0x61, 0x3d, 0xbc, 0x48, 0x72, 0xc5, 0x61, 0x6a, 0x9b, 0xe8, 0x03, 0xc7, 0xa8, + 0xf8, 0xb0, 0xdf, 0x62, 0x3c, 0x28, 0x0a, 0xf7, 0x41, 0x76, 0xf8, 0xdf, 0x4f, 0x87, 0xf6, 0xee, + 0xc9, 0x84, 0x0d, 0xc3, 0x0c, 0x45, 0x9e, 0xd6, 0xfc, 0x1c, 0xcd, 0x98, 0x09, 0xf2, 0x6f, 0x2e, + 0xa1, 0xd3, 0x0f, 0x34, 0x0c, 0x68, 0x7c, 0xd4, 0xfa, 0xc5, 0xfb, 0x2e, 0x3b, 0x4c, 0xa3, 0x7f, + 0x2e, 0xe0, 0xa7, 0xba, 0xef, 0x40, 0x7c, 0xb4, 0x51, 0xb3, 0xcd, 0xe9, 0xe9, 0x62, 0x2b, 0xbc, + 0x13, 0x00, 0x39, 0x08, 0xab, 0xd5, 0x48, 0x36, 0xa9, 0x53, 0x8d, 0x5b, 0xfb, 0xc0, 0xe2, 0xbc, + 0x5d, 0x34, 0x0c, 0x8c, 0x19, 0x07, 0xff, 0xbd, 0xa2, 0xba, 0x59, 0xb0, 0x2b, 0x7f, 0x19, 0x1f, + 0x41, 0xec, 0xb8, 0x07, 0x60, 0x0f, 0xf7, 0x78, 0x9d, 0x8b, 0x43, 0xb2, 0x11, 0x5c, 0x8d, 0x49, + 0x3c, 0x12, 0xea, 0x88, 0x23, 0x4e, 0xf2, 0x9b, 0xbc, 0x31, 0xe6, 0x61, 0xe8, 0x57, 0xd4, 0xae, + 0x30, 0xf7, 0x0a, 0x81, 0x71, 0x1c, 0xe6, 0x6e, 0x47, 0x63, 0x31, 0x9c, 0x1f, 0x99, 0x39, 0x29, + 0xfe, 0xc6, 0xfa, 0x0d, 0x9b, 0x13, 0x49, 0xf3, 0x28, 0x4d, 0xa7, 0x44, 0x06, 0x5b, 0x79, 0x82, + 0x4c, 0x2e, 0x6f, 0x06, 0x47, 0xa4, 0x54, 0x5f, 0xdd, 0xa7, 0x0b, 0x76, 0xaa, 0xbc, 0x7f, 0x72, + 0x7e, 0xfe, 0x99, 0xab, 0x05, 0x1e, 0x7a, 0x55, 0x2e, 0x49, 0x63, 0x15, 0x95, 0xf7, 0x3a, 0xfb, + 0x75, 0x57, 0x0f, 0xc9, 0xa3, 0x16, 0x6f, 0x26, 0xf5, 0x11, 0x32, 0xb7, 0x55, 0x47, 0x62, 0xf2, + 0x34, 0x0b, 0xd2, 0x8b, 0x15, 0x2e, 0xc8, 0x41, 0x63, 0x52, 0x64, 0xa4, 0xb2, 0x0f, 0x3f, 0x46, + 0x1b, 0x3b, 0xd7, 0xe9, 0xc9, 0x6a, 0xe9, 0x7b, 0x41, 0xe8, 0xb9, 0x69, 0xca, 0xf3, 0xe2, 0xe9, + 0xb2, 0x55, 0xe2, 0x8e, 0x03, 0xc4, 0x8d, 0x1e, 0x0b, 0xdb, 0xda, 0x77, 0x8c, 0xf2, 0xea, 0x78, + 0x72, 0x67, 0xde, 0x14, 0x95, 0xa9, 0xff, 0xf4, 0xe7, 0x21, 0xfb, 0x36, 0x9c, 0xcb, 0x37, 0x93, + 0xa1, 0x19, 0x0b, 0xc6, 0x87, 0x12, 0x46, 0xe8, 0xef, 0x74, 0x30, 0x70, 0xaa, 0xdc, 0x4f, 0xed, + 0x10, 0x8f, 0x8a, 0xc8, 0x20, 0xb5, 0x0d, 0x7e, 0xed, 0x7f, 0xd4, 0x87, 0xd1, 0xe5, 0x55, 0x67, + 0xac, 0x2d, 0xc5, 0x62, 0x65, 0x2a, 0x51, 0xdc, 0x9e, 0x47, 0x4f, 0xac, 0xf7, 0xe8, 0x22, 0xf5, + 0x06, 0x31, 0xcf, 0xbb, 0x2b, 0xb5, 0xde, 0xf0, 0x32, 0x84, 0x47, 0xcd, 0x21, 0xcd, 0x03, 0x0d, + 0xde, 0x7c, 0xbb, 0xfb, 0xd8, 0xa5, 0xce, 0x94, 0x7f, 0xff, 0x3d, 0xa1, 0x63, 0x55, 0x4b, 0x67, + 0x6f, 0xca, 0x2f, 0xbc, 0xcc, 0x07, 0x41, 0xed, 0xb1, 0x66, 0x11, 0x9d, 0x84, 0x6b, 0x58, 0xf2, + 0x5f, 0x7f, 0xa1, 0x3b, 0xbb, 0xb6, 0xc8, 0xaa, 0x70, 0x50, 0x3c, 0x16, 0x48, 0x57, 0x0c, 0x1d, + 0x3a, 0xe7, 0xc7, 0xa9, 0xd9, 0x17, 0x84, 0x2e, 0xe0, 0x32, 0x81, 0x37, 0x94, 0x9e, 0x33, 0x9b, + 0xde, 0xe2, 0xad, 0x25, 0x48, 0x80, 0x11, 0xbb, 0x19, 0xbe, 0xf9, 0x95, 0x81, 0xef, 0xd2, 0x04, + 0x10, 0x42, 0x8f, 0xf6, 0x53, 0xfa, 0x96, 0xc7, 0xb6, 0x59, 0x3c, 0x2c, 0x9e, 0xd1, 0x50, 0x86, + 0x65, 0xb3, 0x7f, 0xf1, 0xb6, 0xe4, 0x3d, 0xd6, 0x79, 0x58, 0xb0, 0x8d, 0x86, 0x0a, 0xd0, 0x1d, + 0xfa, 0xdf, 0xb4, 0x8b, 0xb0, 0x2f, 0xbc, 0xb3, 0x82, 0xa8, 0x89, 0x8c, 0x6c, 0x9a, 0xdf, 0x6c, + 0xe9, 0x6f, 0x83, 0x86, 0x19, 0x92, 0x31, 0xda, 0x87, 0x4a, 0xb2, 0xd0, 0x11, 0xc5, 0xe9, 0xd3, + 0xd5, 0x77, 0x7e, 0x03, 0xf2, 0xb6, 0xe4, 0x63, 0xa1, 0xa8, 0x09, 0x39, 0xad, 0x11, 0x0d, 0x5a, + 0x33, 0xc0, 0x78, 0x12, 0x05, 0x66, 0xb2, 0x12, 0x2a, 0x19, 0xdc, 0x03, 0xbd, 0x83, 0x11, 0x0a, + 0x7d, 0x9f, 0xf7, 0x57, 0xe7, 0x38, 0x5c, 0xee, 0xc1, 0x2a, 0x3d, 0xd3, 0xea, 0x5b, 0xe0, 0xd9, + 0x20, 0x3a, 0xbd, 0xcb, 0xdc, 0x97, 0x35, 0x32, 0x5a, 0xee, 0xd1, 0x4d, 0xe5, 0x64, 0xe5, 0xfa, + 0xf0, 0x69, 0x2c, 0x3e, 0x13, 0x0d, 0xe0, 0x05, 0xd5, 0x38, 0x49, 0x40, 0x31, 0x6e, 0x13, 0x6a, + 0x4c, 0xe1, 0xb7, 0x90, 0x80, 0x55, 0xd5, 0x99, 0xbd, 0x1e, 0xfa, 0xa9, 0xc7, 0x48, 0xc1, 0x13, + 0x7f, 0xa2, 0x8d, 0xa5, 0x3f, 0x5e, 0x33, 0x62, 0x04, 0x23, 0xf1, 0xd7, 0x49, 0xe3, 0x02, 0x18, + 0x13, 0x66, 0x3f, 0x0b, 0x12, 0xb9, 0x4b, 0x6f, 0x76, 0xa9, 0xde, 0xee, 0x5c, 0xbf, 0x2e, 0x7a, + 0x05, 0xa1, 0x08, 0x7d, 0x87, 0x31, 0xe4, 0xe0, 0x0c, 0xf0, 0x3b, 0x8e, 0x5c, 0x53, 0x5c, 0x96, + 0xcf, 0x73, 0xbb, 0xfc, 0x07, 0x4a, 0xfb, 0x92, 0x08, 0x77, 0x93, 0x7c, 0xdf, 0x3f, 0x64, 0xd2, + 0x7e, 0xeb, 0x85, 0xfe, 0x87, 0xa1, 0xaa, 0x55, 0x57, 0x23, 0x95, 0x96, 0xd1, 0xc2, 0x2a, 0x81, + 0xfb, 0x53, 0x10, 0x9d, 0x8a, 0x4e, 0xe1, 0xed, 0xa3, 0x0c, 0xfb, 0x14, 0xc5, 0x47, 0x22, 0xc3, + 0xa8, 0x95, 0x9b, 0xd9, 0x0c, 0xc0, 0xa8, 0x6e, 0x22, 0xff, 0xe7, 0xb1, 0xf6, 0x0e, 0x77, 0xe3, + 0xc7, 0x8f, 0x69, 0xb5, 0x0c, 0x46, 0xea, 0xa8, 0x57, 0x75, 0x4a, 0x76, 0x00, 0x69, 0xf9, 0x69, + 0xf0, 0x69, 0xbf, 0x30, 0x12, 0x8f, 0xa0, 0xeb, 0xc4, 0xa3, 0x27, 0x13, 0x16, 0xb4, 0x32, 0x2a, + 0xbe, 0x66, 0xa1, 0x00, 0x15, 0xc5, 0xc0, 0xe5, 0x21, 0xa8, 0x9d, 0xf9, 0x96, 0x0b, 0x88, 0xc4, + 0x8a, 0x52, 0x68, 0x87, 0xac, 0x32, 0xf7, 0x57, 0xbf, 0xd0, 0x7d, 0xd0, 0xce, 0x85, 0x72, 0xdb, + 0x49, 0x79, 0xe1, 0xd3, 0x56, 0x9b, 0xa1, 0x6f, 0xa6, 0x04, 0x34, 0x08, 0x53, 0x35, 0x05, 0x89, + 0xb4, 0x1d, 0x9a, 0x43, 0x38, 0x40, 0x6f, 0x9f, 0x9d, 0x68, 0xec, 0xc0, 0x4f, 0x60, 0x03, 0x75, + 0x1c, 0x49, 0x7d, 0xdb, 0x74, 0xe9, 0x4f, 0x25, 0x29, 0x4d, 0x6f, 0x2d, 0x3e, 0x7e, 0x09, 0xb4, + 0x69, 0x4a, 0x3f, 0x88, 0xf3, 0x93, 0x57, 0x16, 0xa3, 0xfa, 0xfa, 0x16, 0x6d, 0xba, 0x2c, 0x05, + 0x72, 0x12, 0x48, 0x9d, 0x98, 0xdc, 0x25, 0x53, 0x1f, 0x1c, 0xf2, 0x84, 0x73, 0x6c, 0xa4, 0x54, + 0xfb, 0x3d, 0x5b, 0x77, 0x29, 0x97, 0x39, 0x36, 0xcc, 0x23, 0xff, 0x50, 0x83, 0xad, 0xc9, 0xf8, + 0xbb, 0xce, 0xd6, 0x4a, 0x95, 0x00, 0xe0, 0xfb, 0x37, 0x2b, 0x8e, 0x01, 0xcd, 0x6d, 0x8d, 0x02, + 0xab, 0xb5, 0xd0, 0x59, 0xbc, 0xdb, 0x67, 0xe1, 0x15, 0x55, 0xd2, 0x5e, 0xc2, 0x15, 0xea, 0x71, + 0x3d, 0xe8, 0x5f, 0xe5, 0xcc, 0x68, 0xfb, 0x4f, 0x04, 0x23, 0x0f, 0x18, 0xf7, 0x6e, 0x90, 0x95, + 0x10, 0x04, 0xd0, 0x30, 0xcb, 0xe4, 0xad, 0x2b, 0x27, 0x28, 0xfa, 0xe3, 0x98, 0xf2, 0xb1, 0xa6, + 0x0c, 0x4f, 0x5e, 0x24, 0x71, 0x22, 0xa1, 0x35, 0xa0, 0xaf, 0xe1, 0xb6, 0x9d, 0xd8, 0x18, 0x1e, + 0xa6, 0x45, 0x64, 0x08, 0xe7, 0x0c, 0x18, 0x64, 0xe2, 0x87, 0x3f, 0x3d, 0xad, 0x95, 0x7a, 0xaf, + 0xd4, 0x83, 0x9d, 0x99, 0x2e, 0xe1, 0xb5, 0xf7, 0x8b, 0x1d, 0x65, 0xb4, 0x3f, 0x6e, 0x4d, 0xc3, + 0xfd, 0x3a, 0xb6, 0x9d, 0xbc, 0x2c, 0xed, 0xf4, 0xe7, 0xf5, 0x82, 0xb4, 0x9b, 0xee, 0x52, 0x28, + 0x2a, 0xfc, 0x52, 0x8e, 0xd8, 0xef, 0x21, 0x37, 0xd1, 0x91, 0xe2, 0x18, 0x8a, 0x86, 0x32, 0x71, + 0x30, 0xb0, 0xfa, 0x7d, 0xd2, 0xcd, 0x88, 0x96, 0x2b, 0x29, 0xfb, 0x8c, 0x15, 0xda, 0x95, 0x17, + 0x92, 0x5b, 0xc9, 0x32, 0xf5, 0xed, 0xb0, 0xb5, 0x7a, 0xe7, 0x7b, 0x78, 0x73, 0xa6, 0xa0, 0x04, + 0x02, 0x7e, 0x63, 0x85, 0x32, 0x04, 0x36, 0x1b, 0x69, 0xb6, 0x79, 0x54, 0x25, 0x54, 0x12, 0xf6, + 0x83, 0x85, 0x11, 0x53, 0xf3, 0xd1, 0x0d, 0x77, 0x1f, 0xd4, 0xc3, 0x79, 0x0b, 0x38, 0xc4, 0x72, + 0xfc, 0xb5, 0x38, 0xb6, 0x91, 0xa8, 0xda, 0xe9, 0x37, 0xaa, 0x64, 0x13, 0x18, 0x9f, 0x57, 0xcb, + 0xae, 0x08, 0x2a, 0xde, 0x02, 0xa1, 0x25, 0x08, 0xdd, 0xc8, 0x71, 0x83, 0xe6, 0x3f, 0x5f, 0xf9, + 0x61, 0x14, 0x18, 0xed, 0x63, 0x55, 0x42, 0x2b, 0xaf, 0xe7, 0x80, 0x55, 0x14, 0x56, 0x2a, 0x7e, + 0xee, 0x0c, 0xec, 0x82, 0x98, 0xcb, 0x76, 0xc9, 0x12, 0x1a, 0xf6, 0x57, 0x84, 0x5f, 0x74, 0x0d, + 0xb8, 0x34, 0x29, 0xe3, 0x73, 0x8e, 0x59, 0xe3, 0xa5, 0x48, 0x01, 0xb3, 0x44, 0x6a, 0x00, 0x7a, + 0xd9, 0xc6, 0x0d, 0x81, 0xdc, 0xdc, 0x3c, 0xe6, 0x3b, 0xa2, 0x8f, 0x41, 0x2e, 0x5f, 0xa5, 0x23, + 0xab, 0xb3, 0xa8, 0x42, 0x04, 0xb4, 0x95, 0xea, 0xa5, 0x34, 0x83, 0x88, 0x92, 0x70, 0x0c, 0x3b, + 0x5f, 0xb2, 0xf5, 0x3c, 0xbc, 0x1a, 0x16, 0x15, 0xce, 0xe3, 0x67, 0xc9, 0x22, 0xda, 0x15, 0xa0, + 0x1f, 0x96, 0x01, 0x4d, 0x08, 0xbd, 0xc6, 0x32, 0x56, 0xba, 0x22, 0x44, 0x9a, 0x11, 0xdd, 0x51, + 0x22, 0xc4, 0x9c, 0x77, 0x1a, 0xc2, 0xb7, 0x0b, 0x01, 0xcd, 0xc3, 0x6a, 0x32, 0x2f, 0xfb, 0x66, + 0x34, 0xe6, 0x6c, 0x74, 0x09, 0x19, 0x89, 0x51, 0x37, 0x2f, 0x31, 0xe7, 0x4e, 0x56, 0xea, 0x9e, + 0x4b, 0xc9, 0x81, 0x3c, 0x39, 0xf4, 0xf8, 0xac, 0x45, 0xfe, 0x8e, 0x11, 0xcc, 0xb0, 0xa8, 0x8f, + 0xbe, 0xb5, 0x11, 0xdc, 0xf5, 0xf2, 0xad, 0x9a, 0x17, 0xaf, 0xf5, 0x1e, 0x15, 0x10, 0x09, 0x4a, + 0x62, 0x52, 0x08, 0x0a, 0xb6, 0x95, 0x6a, 0x61, 0x75, 0xe9, 0x60, 0xb6, 0x2a, 0xbf, 0x69, 0xed, + 0x7e, 0xff, 0x1b, 0xf7, 0x13, 0x59, 0x5c, 0xe3, 0xd6, 0x87, 0xe8, 0x09, 0xda, 0xbf, 0x28, 0x5a, + 0xe1, 0x4a, 0x59, 0x0e, 0xdd, 0xe3, 0x72, 0x3a, 0x62, 0x98, 0xd7, 0x60, 0x1d, 0xa9, 0xcf, 0xda, + 0x65, 0xb1, 0x50, 0xbd, 0x5d, 0xd5, 0xb3, 0x2a, 0x6b, 0x02, 0x9f, 0xb2, 0xdc, 0x6a, 0x71, 0x56, + 0x3b, 0x64, 0x2e, 0x5c, 0x48, 0xa9, 0x1a, 0x0d, 0x48, 0x0a, 0x36, 0x4a, 0xcb, 0x91, 0xd8, 0xe9, + 0x79, 0x49, 0xda, 0x1f, 0x99, 0x22, 0x64, 0x27, 0x23, 0xb1, 0x9b, 0x61, 0xab, 0xa6, 0x42, 0x6a, + 0x80, 0x4b, 0x98, 0x22, 0xca, 0x5a, 0x33, 0x1c, 0x36, 0x64, 0x04, 0x5a, 0xee, 0xab, 0xe0, 0x11, + 0x2a, 0x18, 0x6e, 0x7b, 0x2e, 0xc3, 0xfb, 0x79, 0x4f, 0x31, 0x08, 0xd0, 0x93, 0x06, 0x3d, 0xa6, + 0x84, 0x12, 0x3c, 0x9f, 0xe2, 0xab, 0x32, 0xd6, 0x75, 0xfc, 0xf2, 0x38, 0x02, 0x6b, 0x41, 0x41, + 0xde, 0x66, 0xdc, 0xdc, 0x72, 0x53, 0xe9, 0xb6, 0x50, 0xfe, 0x44, 0x0c, 0x93, 0x1f, 0x6a, 0xe9, + 0xbd, 0x4a, 0xba, 0xff, 0x0d, 0xd6, 0xf3, 0xd7, 0xbc, 0x07, 0xe9, 0xde, 0x18, 0x0e, 0xfb, 0xca, + 0x43, 0x90, 0x1d, 0xd9, 0x1a, 0x40, 0x47, 0xaf, 0xf0, 0x06, 0x1e, 0xb9, 0x9f, 0x05, 0x69, 0xf4, + 0x5d, 0x67, 0x72, 0x7a, 0x2c, 0xfe, 0xfa, 0xdc, 0xb0, 0x3a, 0x76, 0x29, 0x8a, 0xc6, 0x93, 0xc5, + 0xf3, 0x62, 0x41, 0x89, 0xbc, 0x2a, 0x51, 0x33, 0xe6, 0x6a, 0xac, 0x32, 0x25, 0xe4, 0x77, 0xd0, + 0xa6, 0x1d, 0xd4, 0x86, 0xe5, 0xc7, 0x4b, 0xa6, 0x85, 0xc2, 0x57, 0xd2, 0x7a, 0xc4, 0x31, 0xa3, + 0xd3, 0x2f, 0xd3, 0x28, 0x31, 0x07, 0xd6, 0x7c, 0x37, 0x85, 0x58, 0x71, 0x67, 0x25, 0x14, 0x70, + 0xe8, 0x24, 0xc4, 0xdd, 0xe3, 0x3e, 0x0f, 0x02, 0x97, 0x9c, 0x64, 0x8f, 0xd3, 0xa1, 0xde, 0x32, + 0xad, 0xff, 0xd9, 0x5f, 0x57, 0xbf, 0x96, 0xf2, 0x06, 0xb0, 0x39, 0xe1, 0xb8, 0x6e, 0x78, 0x01, + 0x91, 0x0d, 0x6f, 0xcb, 0xa4, 0x06, 0x08, 0x49, 0x6a, 0x53, 0x93, 0x59, 0x95, 0x34, 0xf0, 0xed, + 0x1d, 0xb2, 0x75, 0x9c, 0xb0, 0x73, 0xcd, 0xad, 0xd5, 0x06, 0x79, 0x34, 0x04, 0x57, 0xf3, 0x65, + 0x4a, 0xf9, 0xcb, 0x96, 0x23, 0x89, 0xf1, 0x4b, 0x51, 0xce, 0x24, 0x1e, 0xba, 0x14, 0xfe, 0x96, + 0x47, 0x1c, 0x5a, 0x9f, 0xd0, 0x65, 0x19, 0x2a, 0x40, 0x07, 0xd4, 0xfb, 0xde, 0x7f, 0xc6, 0x7d, + 0x54, 0xfb, 0xf5, 0xf6, 0x9f, 0xbd, 0xe0, 0x84, 0xc0, 0x7f, 0xb5, 0x80, 0xfd, 0xd7, 0x2c, 0xa4, + 0x55, 0xa0, 0xa9, 0x10, 0x42, 0x96, 0x3f, 0xcd, 0xa3, 0x03, 0x3d, 0x62, 0xca, 0x05, 0xa3, 0xb2, + 0x15, 0x27, 0x81, 0xb8, 0xd7, 0xe9, 0xaa, 0xe9, 0x0a, 0x8a, 0xd3, 0x9d, 0x4f, 0x7e, 0x91, 0x70, + 0xbf, 0x32, 0x62, 0x25, 0xce, 0x34, 0x9d, 0x11, 0x12, 0xfa, 0x6d, 0x15, 0x09, 0xce, 0x07, 0xae, + 0xbb, 0xa7, 0x39, 0x5b, 0x83, 0xa7, 0xd5, 0xa2, 0x7f, 0xeb, 0xcc, 0x83, 0x75, 0xfa, 0x60, 0x86, + 0x3e, 0xde, 0xf9, 0x2a, 0x18, 0x98, 0xa4, 0x7d, 0x8e, 0xb5, 0x34, 0x85, 0xc5, 0xb1, 0x86, 0xed, + 0x80, 0x5e, 0x8e, 0x29, 0x80, 0x93, 0x93, 0xa4, 0xd6, 0x0c, 0x5c, 0x07, 0xaa, 0x52, 0x97, 0x74, + 0x7e, 0x20, 0x31, 0x8e, 0xe5, 0x95, 0x50, 0x32, 0xb1, 0xea, 0xfc, 0x69, 0x64, 0x05, 0x74, 0xf2, + 0x34, 0xe4, 0x4a, 0xbe, 0xd2, 0x81, 0xb3, 0x65, 0x3a, 0xf0, 0xc6, 0x52, 0x2b, 0x7b, 0xba, 0xee, + 0xed, 0x5d, 0x6d, 0x91, 0xe3, 0xd2, 0x2a, 0x47, 0x9f, 0x3f, 0x2a, 0x95, 0xec, 0x63, 0xbf, 0x3d, + 0xbb, 0x6e, 0xb3, 0x5e, 0x33, 0xb4, 0xe6, 0xc8, 0xd1, 0x4e, 0x3b, 0x5d, 0x14, 0xbf, 0xac, 0x0f, + 0xe0, 0x67, 0x86, 0xe5, 0xc6, 0xf8, 0x03, 0x86, 0x7d, 0x9f, 0xbb, 0x3c, 0x37, 0xe2, 0x7a, 0xb5, + 0x05, 0x3d, 0x48, 0x85, 0x29, 0xd7, 0x73, 0xf6, 0x4d, 0xe5, 0x0a, 0x01, 0x3b, 0x04, 0xbb, 0x37, + 0x96, 0x35, 0xcc, 0x44, 0x23, 0x2e, 0xfe, 0xe4, 0xc4, 0x85, 0x80, 0xce, 0x29, 0x53, 0x9b, 0xf0, + 0x76, 0xd2, 0x34, 0x54, 0x0f, 0xb1, 0x84, 0xca, 0xb3, 0x59, 0x44, 0xa9, 0xbc, 0x57, 0x15, 0x3f, + 0xb9, 0x1a, 0x5e, 0x23, 0x89, 0xf8, 0x7a, 0x25, 0xd5, 0xdb, 0x93, 0x1f, 0x4f, 0x41, 0x02, 0x45, + 0xdf, 0xcf, 0xaf, 0x69, 0xa5, 0x4b, 0xd5, 0xe8, 0x97, 0xa5, 0x67, 0xda, 0xd2, 0x92, 0x43, 0x48, + 0x2d, 0x28, 0x64, 0xeb, 0x97, 0xd6, 0x27, 0xd8, 0xac, 0xe0, 0xe1, 0x70, 0xe5, 0x54, 0x0b, 0x8d, + 0xc1, 0xa8, 0x6b, 0x02, 0x58, 0xc5, 0x54, 0x00, 0x58, 0x4d, 0x53, 0xd6, 0x67, 0xa0, 0xf5, 0x08, + 0x36, 0xd8, 0xe0, 0xfa, 0x58, 0x57, 0x3b, 0x74, 0xef, 0x0f, 0xe0, 0x29, 0x54, 0xed, 0x61, 0x09, + 0x3a, 0x2d, 0x79, 0x7a, 0x27, 0xa0, 0xe7, 0x62, 0x85, 0x31, 0x31, 0x9a, 0xe1, 0xcd, 0x99, 0x81, + 0xf1, 0x62, 0x7b, 0x3e, 0xb1, 0x72, 0xef, 0x3b, 0xe3, 0x63, 0x5b, 0xd5, 0xa0, 0xb7, 0xcf, 0x57, + 0xc1, 0xe1, 0x16, 0xad, 0x55, 0xac, 0x7c, 0xfb, 0xd7, 0x56, 0x55, 0x17, 0x3c, 0x28, 0xc6, 0xa9, + 0x0e, 0x1e, 0x5a, 0xbc, 0x76, 0x96, 0x6f, 0x79, 0x75, 0xe8, 0x15, 0x8a, 0x44, 0xe9, 0x73, 0xc6, + 0x83, 0xe9, 0xd4, 0xdb, 0x97, 0xb5, 0xae, 0x56, 0x11, 0xd0, 0xcf, 0x01, 0x1b, 0xd0, 0x16, 0x35, + 0x58, 0xf2, 0x91, 0x3a, 0xfb, 0x38, 0x8b, 0x84, 0xba, 0x6c, 0x89, 0xcb, 0x60, 0x3a, 0x6d, 0xb9, + 0xd5, 0x09, 0xe6, 0x7d, 0xc8, 0xb0, 0xf7, 0xd5, 0x85, 0x44, 0x4b, 0xe2, 0xbe, 0x47, 0x11, 0x25, + 0x22, 0x36, 0x21, 0xc7, 0x97, 0xb3, 0x2a, 0x03, 0xf0, 0xeb, 0x49, 0x70, 0x8e, 0x2e, 0x2d, 0x30, + 0xb9, 0x26, 0x2a, 0xac, 0x63, 0x75, 0xea, 0x07, 0x01, 0x47, 0x96, 0xaf, 0x5e, 0x0f, 0xc6, 0xd1, + 0xc8, 0x94, 0x6b, 0xa5, 0x2b, 0xff, 0xd9, 0x97, 0x96, 0xfe, 0x29, 0x09, 0xde, 0xbe, 0xdc, 0xb5, + 0xb7, 0xa1, 0xd7, 0xfb, 0xd5, 0x89, 0xaa, 0x9f, 0x8b, 0x30, 0x99, 0x9f, 0x22, 0xdb, 0xa2, 0x5a, + 0x33, 0x67, 0xce, 0x8c, 0x77, 0x69, 0x03, 0xee, 0x74, 0x90, 0x5b, 0xf4, 0xc4, 0xce, 0x1f, 0xb3, + 0x0a, 0x3d, 0x1b, 0x9a, 0xd3, 0xdc, 0x74, 0x2c, 0xe4, 0x5a, 0xb9, 0x84, 0x85, 0xa5, 0xad, 0x48, + 0xc6, 0xa4, 0xce, 0x28, 0x66, 0x79, 0xa1, 0x53, 0xaf, 0xd7, 0xdf, 0x7f, 0x10, 0xa7, 0xd8, 0xcf, + 0x7f, 0x82, 0xbd, 0x8c, 0x66, 0x94, 0x8a, 0x97, 0x0c, 0xb6, 0x29, 0x61, 0xaa, 0xf0, 0x40, 0xa0, + 0xd9, 0x25, 0xc7, 0x71, 0x82, 0x1d, 0x9d, 0x5b, 0x53, 0x2f, 0x00, 0x6f, 0xa0, 0x8a, 0xca, 0x12, + 0x37, 0xdd, 0x4b, 0x91, 0xd4, 0xf1, 0x50, 0x4a, 0x1e, 0x4d, 0xc8, 0xcd, 0x99, 0x48, 0xfe, 0xc4, + 0x4c, 0x5a, 0x82, 0xc9, 0x04, 0x2f, 0xd0, 0xa4, 0xdb, 0x1b, 0x59, 0x6e, 0x8f, 0x23, 0x44, 0xd6, + 0xe7, 0x4f, 0x16, 0xa6, 0x10, 0xa6, 0x96, 0xd3, 0x09, 0xc3, 0x58, 0xf1, 0x2d, 0x8a, 0x0a, 0xdc, + 0xc3, 0x8f, 0x97, 0x11, 0xc7, 0x07, 0x03, 0x6d, 0x08, 0x9c, 0xab, 0x05, 0xa0, 0xbf, 0x9c, 0xf6, + 0x16, 0x9c, 0x8b, 0x4b, 0x86, 0xdd, 0x14, 0x3a, 0x92, 0x7f, 0xf0, 0xfe, 0xa1, 0xd1, 0xb5, 0x1b, + 0xf0, 0x3d, 0xa2, 0xd9, 0xad, 0x38, 0x5e, 0xcf, 0x52, 0xb8, 0xfa, 0xae, 0xf1, 0x75, 0x98, 0x0b, + 0xc6, 0x5a, 0x4f, 0x7e, 0xd2, 0xb7, 0xed, 0x9c, 0x30, 0xb6, 0xd7, 0x03, 0x8e, 0x12, 0x26, 0x98, + 0x54, 0x9e, 0x81, 0x17, 0xe8, 0xdf, 0x50, 0xb5, 0x94, 0xa1, 0x6b, 0x47, 0x56, 0xc7, 0x5e, 0x0a, + 0x49, 0x48, 0x9b, 0x23, 0x24, 0xec, 0x39, 0xc0, 0x43, 0x52, 0xf9, 0x4a, 0x34, 0xb3, 0xed, 0x61, + 0x32, 0x69, 0xb0, 0xf7, 0x63, 0xd1, 0xb1, 0x2f, 0x69, 0x03, 0x07, 0xeb, 0x3b, 0xfd, 0xb8, 0xc5, + 0xe4, 0xa9, 0x42, 0x59, 0xd5, 0x70, 0x75, 0xfc, 0xf1, 0xa1, 0x1f, 0x6d, 0xcf, 0xef, 0xa0, 0xb9, + 0xae, 0x3d, 0xb9, 0xc9, 0x8f, 0xb8, 0x44, 0x9a, 0x1f, 0xe7, 0x1d, 0xbc, 0xe4, 0x5e, 0xa8, 0xa9, + 0xa1, 0x93, 0x86, 0x8b, 0xdf, 0x07, 0x44, 0x60, 0x1b, 0x15, 0xfa, 0xf5, 0xef, 0xe4, 0x20, 0x32, + 0x47, 0x07, 0x21, 0x14, 0xd9, 0x74, 0x72, 0xce, 0xdd, 0xdd, 0x96, 0x8e, 0x99, 0xfe, 0xdb, 0xe7, + 0x76, 0x30, 0x25, 0x9d, 0x61, 0xce, 0xb2, 0xbe, 0x43, 0x7c, 0x41, 0x54, 0xe1, 0xb0, 0xb2, 0xc7, + 0x2f, 0xfc, 0x2f, 0xd3, 0xbe, 0x77, 0x45, 0x6c, 0x6a, 0x82, 0x34, 0x8e, 0xc6, 0xe5, 0x18, 0xb6, + 0x98, 0xb2, 0x88, 0xc3, 0x29, 0x25, 0xa7, 0x29, 0x81, 0x43, 0xe2, 0xa4, 0x4a, 0x83, 0x2c, 0xd4, + 0x68, 0xb9, 0xc3, 0x9f, 0x20, 0x29, 0x0e, 0x89, 0x0c, 0xdf, 0x2b, 0x57, 0xf8, 0xfa, 0xaa, 0x52, + 0x64, 0xfc, 0x23, 0x18, 0x0e, 0x27, 0x08, 0x46, 0xa2, 0x69, 0x3e, 0xdd, 0x88, 0x96, 0x01, 0xbe, + 0x96, 0x5f, 0x68, 0x7f, 0xc3, 0xd3, 0xac, 0x83, 0x2f, 0x06, 0x5e, 0xce, 0xb3, 0x93, 0x09, 0x73, + 0xea, 0x8a, 0x54, 0x5c, 0xaa, 0xa2, 0xc6, 0xac, 0x60, 0x09, 0x75, 0x3f, 0xb9, 0x6f, 0x19, 0x32, + 0x94, 0xaa, 0xa7, 0x36, 0xee, 0xf6, 0x15, 0xe3, 0xb2, 0x32, 0xb7, 0x0e, 0xff, 0x5b, 0x66, 0xa8, + 0x4d, 0x2d, 0xba, 0xdd, 0x95, 0xee, 0x81, 0x55, 0x9f, 0x22, 0xde, 0x0b, 0xa9, 0x91, 0xc9, 0x0e, + 0xd9, 0xf7, 0xe2, 0xeb, 0x92, 0xc9, 0xbc, 0x13, 0xa5, 0xf9, 0xe6, 0x27, 0x6f, 0x2b, 0x17, 0x57, + 0x87, 0x23, 0x49, 0xc2, 0x94, 0xce, 0x97, 0x76, 0xc3, 0xb0, 0x68, 0xc9, 0x87, 0x5a, 0x9b, 0x9d, + 0xc7, 0x8c, 0xca, 0xde, 0x55, 0x93, 0xe6, 0xec, 0xf4, 0xb5, 0x66, 0x86, 0x7e, 0xca, 0xa5, 0xe9, + 0x50, 0x5f, 0xc7, 0xcb, 0x64, 0x73, 0x3e, 0x49, 0xa7, 0x05, 0x3e, 0x86, 0xfa, 0xa5, 0xac, 0x64, + 0xd9, 0x35, 0xc7, 0x29, 0x23, 0x65, 0x23, 0x3b, 0x3f, 0x16, 0x95, 0xc6, 0x06, 0xbc, 0x50, 0x10, + 0xb9, 0xda, 0x76, 0xdb, 0x05, 0xc8, 0x57, 0x75, 0x73, 0x3a, 0x5b, 0x27, 0x9b, 0x6e, 0xd8, 0xd2, + 0x8c, 0x81, 0xf7, 0x4b, 0x4c, 0xc6, 0x93, 0x24, 0x18, 0x71, 0xc4, 0xb3, 0xd6, 0xee, 0x44, 0x89, + 0xd6, 0xfc, 0x79, 0x79, 0xa0, 0xc5, 0x64, 0x12, 0xbd, 0x6c, 0xf6, 0xb9, 0x85, 0x7d, 0x1e, 0xaf, + 0xc2, 0xa1, 0x82, 0x20, 0xf0, 0xe3, 0x64, 0x43, 0xfe, 0x6a, 0x11, 0x87, 0x52, 0xad, 0x3d, 0x4d, + 0xf0, 0x01, 0x61, 0x05, 0x36, 0xd6, 0x27, 0x13, 0x05, 0xe6, 0x22, 0xf0, 0x8f, 0xe8, 0x65, 0x62, + 0x8c, 0x32, 0x1f, 0xd8, 0x65, 0x4a, 0x7f, 0x93, 0xee, 0x37, 0xd4, 0xc7, 0xb8, 0xf9, 0x7d, 0xcc, + 0xeb, 0x15, 0xb7, 0x42, 0xf7, 0x36, 0x55, 0x03, 0x40, 0x06, 0x27, 0x7f, 0x74, 0x0d, 0xe8, 0x8e, + 0x91, 0xf5, 0xf4, 0xbf, 0xba, 0x03, 0x9d, 0xd0, 0xdd, 0xe0, 0x5f, 0xba, 0x87, 0x51, 0xda, 0x4f, + 0xdc, 0x32, 0x65, 0xd6, 0x5c, 0xe7, 0x6f, 0xcc, 0xc9, 0xc1, 0x7a, 0x18, 0xdf, 0xf0, 0x57, 0x6e, + 0x9e, 0xaa, 0x27, 0x7f, 0x55, 0xcc, 0x3b, 0x77, 0x3d, 0x9f, 0x77, 0xd5, 0xd5, 0x92, 0x7c, 0x53, + 0x7e, 0xee, 0xd8, 0x9e, 0x47, 0x51, 0x83, 0xab, 0xb5, 0xb7, 0xf1, 0x62, 0x72, 0x99, 0xb8, 0xbf, + 0x1f, 0x7e, 0x45, 0x04, 0x3d, 0x65, 0x00, 0x91, 0xc7, 0xb4, 0xde, 0xbd, 0xbf, 0xb6, 0x4a, 0x2b, + 0x72, 0x31, 0x52, 0xe1, 0x31, 0x6a, 0xff, 0xf9, 0xb5, 0xac, 0xe9, 0x24, 0xb1, 0x88, 0x7a, 0x87, + 0xb4, 0x23, 0x9e, 0x6b, 0xe0, 0xca, 0x9a, 0x70, 0xa5, 0x35, 0xd8, 0x07, 0xc7, 0x08, 0x5f, 0x47, + 0xea, 0x28, 0x5f, 0x2f, 0xc3, 0xc5, 0xa7, 0xf0, 0xda, 0x10, 0x62, 0x8e, 0xd0, 0xbd, 0xb5, 0xaf, + 0xc7, 0x18, 0xe5, 0xea, 0x6d, 0x88, 0xc9, 0x38, 0x0f, 0x2c, 0x14, 0x18, 0xc2, 0x9d, 0xe3, 0x44, + 0x8d, 0xac, 0xf6, 0xc5, 0x10, 0x15, 0xf0, 0x96, 0x27, 0x54, 0x73, 0xd5, 0x48, 0xed, 0xb5, 0x48, + 0x26, 0xcd, 0xb9, 0xa8, 0x6d, 0xf7, 0xb8, 0x7e, 0x41, 0xb7, 0x46, 0xef, 0xa1, 0x69, 0xbd, 0x77, + 0x42, 0x67, 0xf4, 0x73, 0xe9, 0xbb, 0xc8, 0x47, 0xf6, 0xe5, 0x01, 0x4f, 0x60, 0x2d, 0x7c, 0xa4, + 0xd5, 0xd1, 0x46, 0x43, 0x29, 0xd5, 0xea, 0x55, 0x94, 0xaf, 0x05, 0x2e, 0x63, 0xbf, 0x6b, 0x06, + 0x79, 0x2e, 0x32, 0xeb, 0xe3, 0x77, 0xad, 0xe7, 0x73, 0x30, 0xe3, 0x0f, 0x80, 0x1d, 0xda, 0x84, + 0xab, 0x2d, 0x90, 0x37, 0xbd, 0x8b, 0x6f, 0x38, 0x54, 0xba, 0x1e, 0x6f, 0x94, 0x22, 0x96, 0xa6, + 0x28, 0x73, 0xa7, 0x58, 0x6e, 0xe0, 0x22, 0x5c, 0xfb, 0xf2, 0x46, 0xb1, 0x94, 0xeb, 0x28, 0x42, + 0x7a, 0x51, 0x67, 0x04, 0xe9, 0x0e, 0xcd, 0x4b, 0x50, 0xfb, 0x2e, 0x7c, 0x0e, 0x50, 0xf5, 0x2e, + 0xfd, 0x85, 0xa1, 0xfa, 0x53, 0xe6, 0x38, 0xa7, 0x70, 0x85, 0x92, 0x22, 0xd4, 0xa5, 0x14, 0x16, + 0x19, 0xfa, 0xa5, 0x5f, 0x1a, 0xa9, 0xcc, 0x80, 0xa6, 0x52, 0xe5, 0x16, 0xb8, 0xef, 0x80, 0xbe, + 0x3e, 0xc3, 0x81, 0x96, 0xbe, 0xb4, 0x3b, 0x5a, 0xe5, 0xe4, 0x37, 0x75, 0x37, 0x42, 0x36, 0x8d, + 0x75, 0xd2, 0x0b, 0x4c, 0xda, 0x33, 0x36, 0x71, 0x0b, 0x44, 0xfe, 0xc2, 0x4e, 0x26, 0x1e, 0x18, + 0xbc, 0x45, 0x0e, 0x8e, 0xfd, 0x2a, 0xaf, 0xc9, 0xa8, 0xd9, 0x37, 0x7b, 0x8b, 0x0e, 0x5d, 0xc5, + 0xb6, 0xcf, 0x76, 0xb0, 0xc4, 0xec, 0xeb, 0x6f, 0x1d, 0xf7, 0xb6, 0xc6, 0x7a, 0x93, 0x40, 0x5f, + 0xd9, 0x37, 0xd8, 0x30, 0x69, 0x72, 0xdb, 0x84, 0x08, 0x29, 0x74, 0xec, 0x25, 0xca, 0x33, 0xdf, + 0x68, 0x52, 0x4b, 0x6d, 0x1c, 0x7b, 0x9e, 0xde, 0x3d, 0x26, 0x85, 0x0a, 0x00, 0x2f, 0x31, 0x1c, + 0x89, 0xb0, 0xc0, 0x0f, 0x77, 0x92, 0xb7, 0x3a, 0xe4, 0x58, 0x6b, 0x5e, 0x88, 0x8f, 0xee, 0xdc, + 0x00, 0x91, 0xac, 0x72, 0x79, 0x57, 0xb6, 0xfc, 0x86, 0xb0, 0xa7, 0xa3, 0x7b, 0x82, 0xd5, 0x6f, + 0x8a, 0xef, 0x9a, 0xcf, 0x2a, 0xcc, 0x88, 0x73, 0xfc, 0xa7, 0x90, 0xdc, 0xa0, 0xce, 0x40, 0x10, + 0x9b, 0xcb, 0x7a, 0x8b, 0x0d, 0x81, 0xe9, 0x86, 0x39, 0x05, 0xa0, 0xf0, 0xba, 0xb9, 0x7e, 0x0c, + 0xd6, 0x75, 0xea, 0x6e, 0xb3, 0xab, 0xb9, 0xcb, 0x29, 0x9c, 0x03, 0xc0, 0x94, 0xa6, 0x94, 0xe3, + 0x5a, 0xfc, 0x06, 0xbe, 0x10, 0x93, 0xb1, 0x9a, 0x0c, 0x62, 0x65, 0x96, 0x70, 0x9f, 0x2f, 0x3a, + 0x46, 0xc8, 0x31, 0x4b, 0x95, 0x96, 0x11, 0xa1, 0xf1, 0xc5, 0x4e, 0x0a, 0x45, 0x81, 0xd6, 0xd5, + 0x39, 0xfd, 0x0b, 0xeb, 0x97, 0x6b, 0xd4, 0x45, 0x9d, 0xa8, 0xd5, 0x59, 0xd7, 0x73, 0x6c, 0x93, + 0x80, 0xe3, 0xde, 0xf8, 0xc4, 0x84, 0x12, 0xb8, 0x32, 0x0c, 0xdd, 0x36, 0xd7, 0x18, 0x95, 0x6e, + 0xc2, 0x97, 0x4e, 0x77, 0x39, 0x09, 0x44, 0xe0, 0x56, 0xcf, 0x1b, 0x6f, 0x8b, 0xa9, 0xe8, 0x64, + 0x34, 0xa8, 0x3b, 0x44, 0x70, 0x7a, 0x26, 0xfc, 0x54, 0xa0, 0xb3, 0xf9, 0x9b, 0xbb, 0x8e, 0x14, + 0x02, 0x87, 0x27, 0xaf, 0xbb, 0xd4, 0xdd, 0x7f, 0x83, 0xdc, 0x23, 0x4a, 0x7e, 0xfd, 0x0a, 0xa8, + 0xed, 0xe2, 0xeb, 0xfb, 0xe3, 0x3d, 0xb8, 0x23, 0xcf, 0x3d, 0x80, 0xeb, 0x43, 0xed, 0xa7, 0x70, + 0x0c, 0x9f, 0xa0, 0xc8, 0xa9, 0x14, 0x8a, 0xbf, 0x53, 0xd7, 0x5a, 0x4a, 0x4d, 0xff, 0x79, 0xa2, + 0xa0, 0xdd, 0x69, 0xe2, 0x80, 0x1b, 0x5f, 0xe4, 0xfb, 0x59, 0x15, 0x95, 0x26, 0x5b, 0x73, 0xe5, + 0xed, 0x84, 0xfa, 0x08, 0x6f, 0xb1, 0x43, 0x63, 0x29, 0xb1, 0xd8, 0xfd, 0x86, 0x04, 0x45, 0x39, + 0xda, 0x35, 0x43, 0x16, 0x8d, 0x5d, 0xd8, 0x3e, 0xb0, 0x66, 0x87, 0x68, 0x63, 0x91, 0x12, 0x29, + 0x28, 0x91, 0x09, 0xb4, 0x4e, 0xeb, 0x19, 0x68, 0xc5, 0x1c, 0x32, 0x52, 0x76, 0x1c, 0xfa, 0xf6, + 0x39, 0x9c, 0xb9, 0x03, 0x25, 0x47, 0xfd, 0xfb, 0x2e, 0x82, 0xa1, 0xe2, 0xff, 0xef, 0xd0, 0xcf, + 0x79, 0x1f, 0x0b, 0xc2, 0xfc, 0xa6, 0x07, 0x88, 0x86, 0xf9, 0xc6, 0xe3, 0xd8, 0xd0, 0xdb, 0x49, + 0xc5, 0xe4, 0xc7, 0x3d, 0x41, 0xab, 0x5c, 0x50, 0x22, 0xd9, 0x6a, 0xf6, 0x9f, 0x1e, 0x09, 0xd2, + 0x54, 0x51, 0xd4, 0xd2, 0x0c, 0xf0, 0x3e, 0x29, 0x26, 0x47, 0x2d, 0x63, 0x74, 0xea, 0x50, 0x9e, + 0x96, 0xa6, 0xed, 0x63, 0x36, 0x32, 0x43, 0x42, 0x3e, 0x5e, 0x3f, 0xbb, 0x42, 0xf2, 0x6c, 0x9b, + 0xc7, 0xd7, 0x8f, 0x78, 0x04, 0x27, 0xb0, 0x5b, 0x94, 0xb2, 0xf1, 0x6c, 0x39, 0x16, 0xd3, 0xa4, + 0x2a, 0xff, 0x94, 0x39, 0x9e, 0x38, 0xf2, 0xcf, 0x17, 0xb3, 0xce, 0xd7, 0x12, 0x8d, 0xae, 0x6a, + 0xdd, 0x26, 0x21, 0x39, 0xff, 0xd2, 0xe5, 0x70, 0xac, 0x06, 0xf4, 0xc6, 0x9a, 0x7f, 0xa1, 0x4f, + 0x92, 0x03, 0x91, 0x8f, 0x32, 0xb5, 0x13, 0x50, 0xeb, 0x30, 0xba, 0x7c, 0x32, 0x8d, 0xad, 0xb8, + 0x68, 0x39, 0x39, 0x98, 0x00, 0x3b, 0x8a, 0xc4, 0xd8, 0x0a, 0x4c, 0x3a, 0x10, 0x99, 0xcc, 0xed, + 0xfe, 0x95, 0xe5, 0x41, 0xb1, 0xc8, 0x02, 0x1b, 0x64, 0x6f, 0xb8, 0x86, 0xf6, 0xfc, 0x3a, 0x3e, + 0x53, 0xc9, 0x02, 0x2d, 0x51, 0x7d, 0x0f, 0x35, 0xe0, 0x08, 0x2a, 0x29, 0xd0, 0xaf, 0xf8, 0x34, + 0xde, 0x75, 0x08, 0xd1, 0x1d, 0xf5, 0x95, 0xa5, 0x17, 0xc5, 0xcf, 0xb6, 0x1e, 0x08, 0xf1, 0x8a, + 0x01, 0x07, 0x9f, 0x94, 0x4a, 0x92, 0xdf, 0x4e, 0xf6, 0x77, 0xec, 0x99, 0x3d, 0x98, 0xcf, 0x75, + 0x0c, 0x7a, 0xbd, 0xa7, 0x8b, 0x58, 0xc2, 0x0c, 0x0a, 0xb4, 0xbe, 0x87, 0x00, 0x3f, 0x7d, 0x78, + 0xd6, 0x74, 0x04, 0x7c, 0x14, 0xc9, 0x47, 0x01, 0x9b, 0x1d, 0xc4, 0xb5, 0x74, 0xcc, 0xf6, 0x2c, + 0x4c, 0xd2, 0x1f, 0x9b, 0x7b, 0x1a, 0xba, 0xaf, 0x03, 0xcd, 0x20, 0x4e, 0xd6, 0x0a, 0xab, 0xb1, + 0x9e, 0x57, 0xa4, 0x3d, 0xab, 0xc0, 0x2c, 0x65, 0x32, 0x4a, 0x4c, 0x31, 0x11, 0xa4, 0x12, 0xf3, + 0xb3, 0x3a, 0x0e, 0x74, 0x7d, 0xe7, 0xee, 0x69, 0x86, 0x2d, 0xba, 0xfa, 0x4b, 0x9f, 0xa7, 0x31, + 0xec, 0xa6, 0x3b, 0x1d, 0x20, 0xc5, 0x18, 0x65, 0xbc, 0xda, 0x18, 0x57, 0x18, 0x78, 0x5f, 0xa2, + 0x0d, 0xeb, 0x2a, 0xeb, 0x75, 0x52, 0x0f, 0x79, 0x8b, 0x2b, 0x15, 0x34, 0x9d, 0xe4, 0xc7, 0x55, + 0xaf, 0x2b, 0xad, 0x17, 0x4c, 0xc8, 0xb2, 0x90, 0x69, 0x60, 0xb2, 0x7a, 0x41, 0x7b, 0xb0, 0x6f, + 0xb5, 0xa4, 0x7b, 0xbf, 0x19, 0x85, 0xd9, 0xd7, 0x61, 0x86, 0x84, 0x1b, 0x9d, 0x6b, 0x3d, 0x97, + 0xf1, 0x76, 0x3f, 0x66, 0x80, 0xff, 0xf0, 0x08, 0x22, 0x7b, 0x97, 0x44, 0xaa, 0x4c, 0x58, 0x5f, + 0x72, 0xd5, 0x66, 0x33, 0x1d, 0x3e, 0xc9, 0xfc, 0xe2, 0xf7, 0x8d, 0x9c, 0x5a, 0x52, 0x96, 0x6b, + 0xdd, 0xc2, 0xa8, 0x49, 0xfe, 0x39, 0xfa, 0x8b, 0x17, 0x58, 0x33, 0xdf, 0xc4, 0xed, 0xf8, 0x85, + 0xf9, 0xfd, 0x02, 0x7b, 0x88, 0xa8, 0x19, 0x1b, 0x96, 0xad, 0x5c, 0xba, 0xc1, 0x5e, 0x87, 0xd1, + 0xe4, 0x7d, 0xce, 0x47, 0x5f, 0x41, 0x73, 0x44, 0x97, 0xc4, 0xc2, 0xa5, 0xf7, 0xf9, 0xae, 0x1c, + 0xa3, 0x28, 0x58, 0x70, 0x07, 0xb2, 0xb9, 0x6f, 0xde, 0x55, 0x75, 0xa0, 0x79, 0x48, 0x5d, 0x52, + 0xfb, 0x3a, 0xd4, 0xfe, 0x91, 0xf0, 0x62, 0x61, 0x48, 0x9b, 0x0d, 0x90, 0xcb, 0x0b, 0x8a, 0x82, + 0x80, 0xe9, 0x59, 0xd7, 0xf2, 0xfc, 0x8c, 0x0f, 0x99, 0xf0, 0xaf, 0xcd, 0x84, 0x94, 0x11, 0x56, + 0x19, 0x0d, 0x4e, 0x9c, 0xab, 0xa7, 0x76, 0x8f, 0x54, 0x6f, 0x0a, 0xf3, 0xc1, 0x96, 0x1c, 0x90, + 0xfd, 0xe5, 0x2f, 0x7f, 0xbb, 0x1a, 0x11, 0xa4, 0x58, 0x51, 0xee, 0x63, 0xd9, 0x33, 0x3e, 0x43, + 0x85, 0xdf, 0xe6, 0x0e, 0x2c, 0x1e, 0x4d, 0x2b, 0x48, 0x6a, 0xe2, 0x16, 0xcf, 0x33, 0xdd, 0xe5, + 0x32, 0x65, 0x64, 0x98, 0x35, 0x0a, 0xd6, 0x46, 0x66, 0xec, 0x3a, 0xcd, 0xb1, 0xbe, 0xa8, 0x9d, + 0x3d, 0x9b, 0xcd, 0x66, 0x0c, 0x32, 0x2e, 0x10, 0x91, 0x11, 0xbc, 0x15, 0xb8, 0x69, 0x89, 0x58, + 0xc3, 0xc7, 0x1a, 0xb0, 0x2e, 0xdf, 0x14, 0xad, 0x4b, 0xd0, 0x2b, 0x66, 0xa7, 0x1b, 0x5d, 0x52, + 0xa5, 0xc3, 0x10, 0x95, 0x91, 0x71, 0xee, 0x85, 0xf1, 0xeb, 0xb9, 0x91, 0xa7, 0xa9, 0x78, 0x40, + 0xf4, 0xe5, 0xe4, 0x84, 0x3b, 0x99, 0xe5, 0x72, 0x45, 0x6f, 0xe2, 0x19, 0x70, 0x51, 0xe3, 0x6f, + 0xf3, 0xe7, 0xf0, 0xbb, 0x91, 0x96, 0xa9, 0x53, 0xe4, 0x2f, 0x02, 0xd0, 0x84, 0xd4, 0x68, 0x8a, + 0x7d, 0xc8, 0xad, 0x98, 0x65, 0x62, 0x56, 0xf6, 0xb2, 0x7e, 0xa9, 0xd0, 0x91, 0xdd, 0x84, 0x7a, + 0x39, 0x8a, 0x80, 0x24, 0xe3, 0xbe, 0x0d, 0xf2, 0xb1, 0x30, 0x85, 0xcd, 0x9e, 0x07, 0x75, 0xd5, + 0xf0, 0x5a, 0x96, 0x3e, 0x3a, 0xb4, 0x19, 0xfe, 0xe7, 0xbd, 0xb7, 0x15, 0x33, 0xc4, 0xa7, 0x2a, + 0xb6, 0xba, 0x57, 0x43, 0x9f, 0xe0, 0xc8, 0x86, 0x02, 0x83, 0xe1, 0x4c, 0x1f, 0xf1, 0xfe, 0x9b, + 0x21, 0x8e, 0x98, 0x6d, 0x6f, 0x8a, 0x4b, 0xaa, 0x43, 0x0c, 0x60, 0x4e, 0x7b, 0x01, 0x76, 0xa1, + 0x80, 0xeb, 0x9f, 0xa6, 0xc1, 0xb9, 0x10, 0x9e, 0x31, 0xda, 0x39, 0x3e, 0x1b, 0xe8, 0x67, 0xa7, + 0x08, 0xd3, 0x89, 0x43, 0xa3, 0xc6, 0x30, 0x3f, 0x89, 0xa9, 0xaf, 0x2b, 0xd1, 0x5d, 0xc7, 0xc1, + 0x63, 0xb7, 0x72, 0x93, 0x47, 0x95, 0x30, 0x63, 0x20, 0x39, 0x75, 0xa9, 0x48, 0x2d, 0xb6, 0xa2, + 0xcd, 0x1e, 0xb7, 0x84, 0xce, 0x72, 0x4b, 0x86, 0xd2, 0xec, 0x6a, 0xdd, 0x85, 0x6f, 0x7d, 0x1f, + 0x51, 0xf7, 0x88, 0x3c, 0xe5, 0xb1, 0x5c, 0xd6, 0xb6, 0x9e, 0x41, 0xf0, 0xac, 0x45, 0x6d, 0xe4, + 0xf3, 0x23, 0x13, 0xe2, 0x90, 0x49, 0xe1, 0x0b, 0x73, 0x86, 0x73, 0x74, 0xce, 0x0f, 0xa1, 0xcb, + 0x49, 0xcb, 0x0c, 0x5a, 0xff, 0x35, 0x3b, 0x3f, 0xe3, 0xab, 0x8c, 0xc1, 0x6d, 0x34, 0x34, 0xfc, + 0xa9, 0xf1, 0x42, 0x66, 0x5a, 0xca, 0xc9, 0xf0, 0xda, 0x42, 0x75, 0xe4, 0x2e, 0x5e, 0x1a, 0x14, + 0x6e, 0xfe, 0x3b, 0x5a, 0x7d, 0xb2, 0x83, 0x48, 0xa2, 0x8a, 0xc5, 0xa7, 0x5b, 0x37, 0x51, 0xd8, + 0x40, 0xa3, 0xfb, 0x47, 0xcf, 0xe7, 0x78, 0xe7, 0xee, 0x59, 0x12, 0x65, 0x1d, 0x57, 0x43, 0xbd, + 0xe5, 0x71, 0x83, 0xaa, 0x12, 0x9e, 0x6d, 0x26, 0x0b, 0x90, 0x23, 0xe5, 0x05, 0x06, 0x26, 0x75, + 0xc8, 0x52, 0x5b, 0xe4, 0x93, 0xff, 0x9b, 0xc7, 0x8e, 0xe7, 0xf3, 0x1a, 0xba, 0xea, 0x8f, 0xfe, + 0x9c, 0xc1, 0x89, 0x25, 0xec, 0x83, 0x81, 0xca, 0x65, 0x74, 0xf6, 0xae, 0x04, 0xff, 0xfe, 0x17, + 0x42, 0xfa, 0x72, 0xad, 0xe4, 0x79, 0x96, 0xa9, 0xba, 0x11, 0x09, 0x06, 0xf1, 0x4f, 0xa8, 0x0a, + 0x79, 0xad, 0xb3, 0xbe, 0x8e, 0x1b, 0x4c, 0xfe, 0x4a, 0x1d, 0x9f, 0x69, 0x8f, 0x29, 0x7b, 0x63, + 0x65, 0x18, 0x24, 0x88, 0x7b, 0x34, 0xa5, 0x22, 0xf7, 0x25, 0x88, 0x4e, 0xcc, 0x5d, 0x3b, 0x0b, + 0x74, 0x35, 0x13, 0x55, 0xc1, 0xe3, 0xce, 0x29, 0x5e, 0x35, 0xd1, 0x0b, 0x54, 0x40, 0x2f, 0x50, + 0xed, 0xdd, 0xbe, 0xda, 0xf0, 0x9c, 0x25, 0xcf, 0xb6, 0x90, 0xcc, 0xcc, 0x74, 0xc4, 0x9d, 0x21, + 0x6d, 0xbd, 0xe6, 0x54, 0xf7, 0x26, 0x23, 0x82, 0x82, 0xed, 0x6e, 0x3f, 0xa5, 0x58, 0x2c, 0xe2, + 0xa4, 0x11, 0x90, 0xce, 0xa3, 0x41, 0xbc, 0xb7, 0xfa, 0xc0, 0x83, 0x9b, 0x57, 0xfa, 0x14, 0x4c, + 0x35, 0xdb, 0x23, 0x31, 0xb8, 0x09, 0x07, 0x1c, 0xbb, 0x87, 0xd9, 0xdb, 0xa1, 0xcf, 0x58, 0x81, + 0xba, 0xeb, 0x80, 0x58, 0xbc, 0x60, 0x5f, 0x48, 0x73, 0xcf, 0x2e, 0x3f, 0x91, 0xb8, 0x1e, 0x6c, + 0x01, 0x88, 0x18, 0xe0, 0x00, 0x11, 0xdb, 0x5f, 0x92, 0x95, 0x1b, 0x40, 0x24, 0xbd, 0xbe, 0x09, + 0xad, 0xc8, 0xde, 0xcc, 0x49, 0x9f, 0xf2, 0xed, 0xe9, 0x60, 0x4d, 0x9e, 0x38, 0x92, 0xda, 0x02, + 0xc5, 0x8c, 0xfe, 0xbe, 0x5c, 0xfb, 0x56, 0xf9, 0xae, 0x06, 0x5b, 0xd1, 0x3a, 0x0f, 0x4c, 0x5e, + 0x12, 0xd2, 0x62, 0x71, 0x18, 0x72, 0xa6, 0x7f, 0x8e, 0x65, 0x86, 0xf0, 0x22, 0x49, 0x8b, 0x27, + 0x67, 0x81, 0x04, 0x00, 0xd8, 0x3c, 0x57, 0xf0, 0xf7, 0xe2, 0x5a, 0x2f, 0x0e, 0x1e, 0x24, 0xcb, + 0x85, 0x34, 0xfe, 0xf1, 0x40, 0x3e, 0xfd, 0x0e, 0x55, 0x0a, 0x69, 0x70, 0x41, 0xdf, 0x4b, 0xed, + 0x33, 0x12, 0x3e, 0x2a, 0x54, 0xbf, 0x90, 0x46, 0x2f, 0x93, 0xe1, 0xc2, 0x81, 0x83, 0xaf, 0xc0, + 0x69, 0x27, 0x1a, 0x1f, 0x54, 0x87, 0x19, 0xf9, 0x2e, 0x0a, 0xd7, 0x9a, 0x26, 0x29, 0x29, 0xed, + 0xb6, 0x74, 0x57, 0xf0, 0x54, 0xcd, 0x62, 0xf8, 0x58, 0x7b, 0xfa, 0xe5, 0x2f, 0x3a, 0x54, 0xc4, + 0x68, 0x44, 0x68, 0xc9, 0x3b, 0xde, 0x84, 0x38, 0x85, 0x50, 0x05, 0xf7, 0x8f, 0xf7, 0x10, 0x25, + 0x71, 0xc1, 0x2f, 0xb5, 0x19, 0x10, 0x8b, 0x36, 0x6f, 0xfb, 0x0a, 0x5a, 0xb0, 0xfb, 0x07, 0x1e, + 0xb5, 0x03, 0xda, 0xc7, 0xfe, 0x50, 0x23, 0x20, 0xeb, 0xaf, 0xa3, 0x49, 0x48, 0x0e, 0x0b, 0x83, + 0x09, 0xc3, 0x2e, 0x12, 0xbf, 0xf6, 0x7e, 0xc5, 0xdc, 0xb3, 0x7a, 0xe1, 0xae, 0x0f, 0x49, 0xce, + 0x4f, 0xa1, 0xf5, 0x5c, 0x52, 0xc9, 0xa7, 0x7d, 0x86, 0x0c, 0x73, 0x85, 0x6b, 0xff, 0xe1, 0x72, + 0x2c, 0x36, 0xeb, 0xb8, 0x28, 0x9d, 0xf9, 0x3d, 0x9c, 0x98, 0xcd, 0x6c, 0x7b, 0xe3, 0x87, 0xdf, + 0x5b, 0xca, 0xea, 0xaf, 0xad, 0x99, 0x62, 0x88, 0xe2, 0xc8, 0xfa, 0xd2, 0x3f, 0x30, 0x90, 0x58, + 0xff, 0x30, 0xe9, 0x7f, 0xbb, 0x6d, 0x0c, 0x27, 0x0c, 0x9e, 0x0e, 0x24, 0x1f, 0xb4, 0x54, 0xbc, + 0x2d, 0x73, 0x36, 0x33, 0x4e, 0x2a, 0xc2, 0xcc, 0x18, 0x42, 0xd5, 0x7e, 0xb0, 0x99, 0x9c, 0x92, + 0x24, 0x31, 0xa0, 0x42, 0xa5, 0x5c, 0xcc, 0xd2, 0xa1, 0xf4, 0x88, 0xc6, 0x35, 0xa9, 0x2c, 0x54, + 0xf0, 0xab, 0x7a, 0x6f, 0x3e, 0x3b, 0xe8, 0xf7, 0x09, 0x2b, 0x4d, 0x9f, 0xda, 0x48, 0x1a, 0x51, + 0x1e, 0xfc, 0x5b, 0x40, 0x57, 0x89, 0xd5, 0xaa, 0x12, 0x11, 0x8f, 0x9e, 0x4d, 0x18, 0xe0, 0xa1, + 0x98, 0x79, 0x5e, 0x12, 0xfc, 0x0e, 0x61, 0x41, 0xd5, 0x06, 0xd6, 0xc5, 0xde, 0x66, 0xa0, 0xda, + 0x86, 0xdc, 0xc4, 0x2a, 0x7e, 0x96, 0x16, 0xc3, 0x9d, 0xd2, 0xa0, 0xe1, 0xf6, 0x98, 0x96, 0xb4, + 0x72, 0xb9, 0x12, 0x32, 0xe6, 0x23, 0x86, 0x68, 0x75, 0x62, 0xc0, 0x36, 0x72, 0xed, 0x43, 0xc6, + 0x6a, 0xb0, 0x56, 0xbc, 0x9c, 0xf2, 0xa8, 0x18, 0x65, 0xbd, 0xc3, 0x9f, 0x2d, 0x1c, 0x74, 0x84, + 0xca, 0x11, 0x99, 0x84, 0x07, 0xc4, 0xd0, 0x6d, 0xd1, 0x0d, 0xac, 0x80, 0xb1, 0x19, 0x6c, 0xd4, + 0x2b, 0xb6, 0xf2, 0x0d, 0x7d, 0x4e, 0x7f, 0xfd, 0xdc, 0xf4, 0x05, 0xd3, 0x7f, 0x25, 0xdc, 0x87, + 0xdd, 0xdc, 0xe0, 0x18, 0x9d, 0xb3, 0xb7, 0xef, 0xb8, 0x4a, 0x1b, 0x6b, 0xc7, 0xa1, 0x27, 0x65, + 0xe4, 0x10, 0x7e, 0x77, 0x4b, 0x6f, 0x5c, 0x6a, 0xb5, 0x99, 0x79, 0x6e, 0xd5, 0x5a, 0xfb, 0xbe, + 0x41, 0xe5, 0x5b, 0x8c, 0x0c, 0xbd, 0xe6, 0x51, 0xed, 0xa4, 0x0d, 0x7e, 0xd2, 0x26, 0x26, 0x94, + 0x41, 0xb9, 0xa1, 0x8d, 0x43, 0x47, 0xa5, 0x4b, 0x6e, 0xf1, 0x6c, 0x00, 0xf5, 0x9e, 0x53, 0xb0, + 0x7c, 0x35, 0x1a, 0x1e, 0xd3, 0xd7, 0xf0, 0x71, 0xef, 0x79, 0x77, 0xf3, 0xc3, 0x07, 0x1f, 0x8f, + 0xf2, 0x04, 0x5c, 0x21, 0x91, 0x68, 0xea, 0xd7, 0x87, 0x78, 0xfd, 0x4f, 0xcf, 0xff, 0xdb, 0xff, + 0xd7, 0xff, 0xfa, 0xaf, 0xff, 0xfa, 0xaf, 0xff, 0xfa, 0xdf, 0xf1, 0xff, 0x00, 0xc1, 0x4d, 0x92, + 0x49, 0x00, 0x24, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA102_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 9216, // uncompressed data size (bytes) + 7365, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA102_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA102("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/unload/g_booteruc_unload_ga10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA102_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x03, 0x62, 0x56, 0x08, 0x13, 0x4c, 0xc8, 0x42, 0x69, + 0x20, 0x00, 0x00, 0x8d, 0x53, 0x58, 0xc0, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA102_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA102_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA102("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/unload/g_booteruc_unload_ga10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 9216 +// COMPRESSED SIZE (bytes): 7363 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA102_image_prod_data[] = +{ + 0xed, 0xd9, 0x43, 0x74, 0x1d, 0x00, 0xb7, 0xb0, 0xe1, 0xd8, 0x8d, 0x6d, 0x9e, 0x18, 0x0d, 0x1b, + 0xe3, 0xc4, 0xb6, 0x9d, 0x34, 0x3e, 0xf1, 0x09, 0x1a, 0xdb, 0xb6, 0xcd, 0x13, 0xdb, 0x36, 0x1a, + 0x9b, 0x4d, 0x83, 0xc6, 0x6a, 0xdc, 0x24, 0xff, 0x3f, 0xfe, 0xc6, 0x77, 0xad, 0x3b, 0xb8, 0xdf, + 0x33, 0xdb, 0xe3, 0xbd, 0xd6, 0x3b, 0xd8, 0x1b, 0x0a, 0xea, 0xff, 0xb6, 0xee, 0x7b, 0x53, 0x48, + 0x2a, 0xd5, 0x21, 0x2f, 0xce, 0x82, 0x19, 0xe8, 0x43, 0x9f, 0x6a, 0x74, 0x7f, 0x91, 0x1d, 0x39, + 0x04, 0x66, 0xd8, 0xcd, 0x50, 0x7d, 0x69, 0xa9, 0x91, 0xaf, 0x2f, 0xff, 0xd7, 0x41, 0x59, 0x1c, + 0xcd, 0xf9, 0x2b, 0x57, 0x6e, 0xc9, 0x32, 0xe5, 0x9f, 0x92, 0x96, 0xbf, 0xed, 0x47, 0xb2, 0xce, + 0x38, 0x07, 0x81, 0x54, 0x95, 0x68, 0x24, 0x79, 0xfd, 0xbb, 0x4b, 0x81, 0x05, 0x62, 0x6c, 0x6d, + 0x95, 0x2c, 0x29, 0x4f, 0x89, 0xf6, 0xf3, 0x38, 0x9f, 0x32, 0xfd, 0x3f, 0x70, 0x91, 0x00, 0xda, + 0x0f, 0xc9, 0x0a, 0x49, 0xeb, 0x5d, 0xa4, 0xb7, 0x9a, 0x5d, 0x91, 0x67, 0x9e, 0x74, 0x8d, 0xbb, + 0x82, 0x1e, 0xde, 0xb4, 0x33, 0x54, 0x4e, 0xe6, 0x6f, 0x35, 0x62, 0x29, 0x56, 0x55, 0x08, 0x33, + 0xc2, 0x5b, 0x3a, 0x45, 0xbe, 0x9f, 0xc2, 0x7b, 0xd0, 0x60, 0xb6, 0x76, 0x03, 0x1c, 0x64, 0xe6, + 0xd5, 0x05, 0x78, 0x8a, 0xf8, 0x4c, 0x83, 0xa2, 0x7f, 0xb8, 0xb2, 0x05, 0x51, 0xe4, 0xb6, 0x77, + 0x87, 0xdb, 0x7c, 0xb5, 0xf2, 0x13, 0xc0, 0xaa, 0x9a, 0x6d, 0x70, 0x70, 0x77, 0x12, 0x9f, 0x88, + 0xca, 0xae, 0x82, 0x8b, 0xa8, 0x76, 0xd5, 0xe1, 0x3c, 0x64, 0x30, 0x06, 0x61, 0x4f, 0xdb, 0x05, + 0x64, 0xde, 0x58, 0xe1, 0xda, 0x7d, 0xbd, 0x7a, 0xec, 0x71, 0x31, 0xeb, 0x70, 0xd8, 0x5a, 0x7a, + 0x3a, 0xfa, 0xb5, 0x6a, 0x5d, 0x16, 0x95, 0xae, 0x0e, 0x2a, 0xb7, 0x48, 0x75, 0xf8, 0xa7, 0x0a, + 0xe4, 0xb2, 0xa5, 0x50, 0xba, 0x1f, 0xd7, 0xdc, 0x5b, 0x46, 0x21, 0xc5, 0x37, 0x37, 0x14, 0x6d, + 0xe6, 0xeb, 0xc9, 0x1f, 0x25, 0x1e, 0x97, 0x92, 0x75, 0x15, 0x83, 0xab, 0x5f, 0x9c, 0x20, 0x69, + 0xce, 0x7b, 0x69, 0xd2, 0x66, 0x33, 0xec, 0xc5, 0xf2, 0xf6, 0xb0, 0xd2, 0xb5, 0x92, 0xe7, 0x70, + 0x6e, 0x14, 0x70, 0x47, 0xb1, 0xe4, 0x26, 0x40, 0x16, 0xdb, 0xfa, 0xa0, 0x17, 0x9a, 0xc3, 0x9a, + 0xf3, 0x23, 0xb6, 0x5c, 0x3c, 0xc2, 0xe0, 0x19, 0x3c, 0x6d, 0x19, 0x8d, 0xab, 0x3c, 0x32, 0x01, + 0x51, 0xb9, 0x85, 0xe5, 0x1a, 0xfb, 0xbe, 0x33, 0x35, 0x64, 0xe8, 0x21, 0x4a, 0xf7, 0x97, 0x87, + 0xd7, 0x4e, 0xc8, 0xdd, 0x4b, 0x6d, 0x71, 0x76, 0x37, 0xd2, 0x73, 0x72, 0x5b, 0xdd, 0x66, 0xb6, + 0x1d, 0x26, 0x39, 0xf8, 0xdf, 0xcf, 0x6a, 0xb8, 0x49, 0xde, 0x74, 0xd1, 0x76, 0xb2, 0x6a, 0x0e, + 0x2f, 0x54, 0xcd, 0x41, 0x4e, 0x3a, 0x0c, 0xff, 0x68, 0x70, 0x38, 0xc1, 0x23, 0x6a, 0xa3, 0x7d, + 0x1e, 0xcb, 0xc3, 0xa5, 0x00, 0x77, 0xa1, 0xd2, 0x99, 0x4d, 0x7b, 0xa3, 0x87, 0x1c, 0x21, 0xb6, + 0x31, 0x54, 0x51, 0x2b, 0xe9, 0x2f, 0x23, 0x11, 0xbc, 0xa7, 0xce, 0xe3, 0xa0, 0x71, 0xbd, 0x7a, + 0xb3, 0x26, 0xfe, 0x8b, 0xc2, 0x6a, 0xc8, 0xfa, 0x88, 0x91, 0xf2, 0xab, 0x11, 0xba, 0x86, 0x6c, + 0x97, 0x38, 0x46, 0x8d, 0xe3, 0xc9, 0x3e, 0xc6, 0x5e, 0x83, 0x26, 0x7d, 0xb5, 0x8a, 0x0e, 0x01, + 0xc1, 0x91, 0xbe, 0xa6, 0x16, 0x7d, 0x9b, 0x05, 0xde, 0x2b, 0xe0, 0x99, 0x92, 0xaa, 0x39, 0x27, + 0xc9, 0x21, 0x1d, 0x4c, 0xfb, 0xb0, 0xf4, 0xf7, 0x23, 0xaa, 0x7a, 0x73, 0x9b, 0x2e, 0x06, 0xae, + 0x2d, 0x50, 0x39, 0xe4, 0x7c, 0x30, 0xaa, 0x2f, 0x1e, 0xee, 0x7d, 0x6f, 0xf7, 0x9b, 0xa2, 0x75, + 0xf3, 0x8b, 0x06, 0xe7, 0xa0, 0xa6, 0x70, 0xd6, 0xb5, 0x0e, 0x83, 0x78, 0x3a, 0xa8, 0xa9, 0x46, + 0x69, 0xc9, 0x50, 0x29, 0xd8, 0x9c, 0x65, 0x45, 0xe8, 0xce, 0x9f, 0x43, 0x86, 0xe0, 0x6f, 0x49, + 0x5a, 0xda, 0xef, 0x11, 0xf9, 0xd4, 0x80, 0xc7, 0xba, 0x8e, 0xfb, 0xba, 0x8a, 0xda, 0x28, 0x32, + 0x8b, 0x36, 0x2e, 0xed, 0xd3, 0x42, 0x77, 0x88, 0x8f, 0x99, 0x8b, 0xa6, 0xeb, 0x27, 0x78, 0x9f, + 0xba, 0x77, 0x2b, 0x05, 0x4f, 0x02, 0x93, 0x2d, 0x20, 0xd3, 0x5d, 0xe7, 0x29, 0x9f, 0x47, 0x4d, + 0xfb, 0x4f, 0xa4, 0x55, 0x05, 0x2d, 0xc9, 0xfd, 0x22, 0x6a, 0x41, 0xa8, 0xb5, 0x58, 0xad, 0x85, + 0x29, 0x5c, 0x80, 0x91, 0x74, 0xc3, 0xd4, 0x9e, 0x3a, 0x71, 0xc0, 0x8b, 0x65, 0xeb, 0x4d, 0x9d, + 0x9e, 0xba, 0xd2, 0x85, 0xd8, 0xa4, 0xcf, 0xd5, 0x84, 0x9e, 0x55, 0x4e, 0x71, 0x65, 0xf4, 0xa9, + 0xc6, 0xb2, 0xc2, 0x5a, 0xd8, 0xdf, 0xd4, 0xe4, 0x19, 0x00, 0x9d, 0x29, 0x7e, 0x4d, 0x43, 0x08, + 0xe4, 0x6b, 0x64, 0x2e, 0x36, 0x15, 0xb9, 0x40, 0xa7, 0x05, 0x42, 0xb0, 0xba, 0x78, 0xd4, 0x87, + 0x6d, 0xc1, 0x3e, 0x29, 0xd5, 0x52, 0x48, 0x14, 0x96, 0x80, 0xc9, 0x5d, 0xa2, 0x9a, 0x47, 0xcb, + 0xae, 0xf5, 0x9e, 0xc8, 0xe6, 0x99, 0x44, 0x7c, 0x58, 0xc7, 0xcf, 0x8b, 0x05, 0x02, 0x41, 0x0f, + 0xf8, 0xde, 0xf2, 0x10, 0xae, 0x38, 0x76, 0x20, 0x37, 0xa9, 0xb2, 0x0a, 0x48, 0x36, 0x2e, 0xdb, + 0xa6, 0x2f, 0x20, 0x44, 0x22, 0x3c, 0x62, 0xbe, 0xa1, 0x48, 0x36, 0xd6, 0xc7, 0x14, 0x85, 0x35, + 0x13, 0xc7, 0x0c, 0xbe, 0x53, 0x97, 0xf1, 0xe3, 0xa2, 0xc1, 0x5b, 0xe4, 0xa7, 0x48, 0x2f, 0xd5, + 0xb5, 0x88, 0x3d, 0x56, 0x36, 0x25, 0xdf, 0xb5, 0x30, 0xbd, 0x75, 0x31, 0x61, 0x0a, 0x00, 0x81, + 0x34, 0xb2, 0xcf, 0x60, 0x44, 0xe5, 0x87, 0xd1, 0x0a, 0x86, 0xb4, 0xeb, 0xb3, 0x3a, 0x51, 0xa8, + 0xd3, 0x4e, 0xee, 0x67, 0xb5, 0x6f, 0xa2, 0xde, 0x5a, 0x51, 0x98, 0xc8, 0xd5, 0xe6, 0xb1, 0x17, + 0xb9, 0x35, 0xe1, 0x8e, 0xbc, 0x20, 0x53, 0x09, 0x2b, 0xbf, 0x1d, 0x9b, 0x66, 0x46, 0xaf, 0x41, + 0x77, 0xcb, 0x8b, 0x8d, 0xab, 0x95, 0xb8, 0x1c, 0xb5, 0x3f, 0xf8, 0x76, 0xed, 0xdf, 0x64, 0xea, + 0x51, 0xa0, 0xaa, 0x5b, 0x55, 0xc4, 0x78, 0x05, 0xfc, 0xef, 0x53, 0x34, 0xc9, 0x48, 0x42, 0xbd, + 0x20, 0x03, 0xfa, 0xfa, 0x6d, 0xe6, 0x6b, 0x31, 0xf5, 0x38, 0x53, 0xdf, 0x7b, 0xa2, 0x3b, 0x44, + 0x02, 0x95, 0xe4, 0x88, 0xec, 0xf7, 0x57, 0x7d, 0x96, 0x4d, 0x14, 0x85, 0x5b, 0xb2, 0x1d, 0xc5, + 0x62, 0x9c, 0xcf, 0x8e, 0xde, 0xe9, 0xf4, 0x92, 0xc6, 0x80, 0x2c, 0xb9, 0xde, 0x63, 0xb7, 0x1d, + 0xf3, 0x9b, 0x44, 0x17, 0xea, 0x06, 0x94, 0xa5, 0xc1, 0xa5, 0xc7, 0xbc, 0x71, 0x69, 0x6e, 0x6b, + 0x7a, 0x44, 0xcd, 0x11, 0x89, 0x3f, 0x23, 0xb1, 0x98, 0x25, 0x63, 0x42, 0xed, 0x06, 0x7d, 0xf4, + 0x6c, 0xfd, 0x4a, 0x4c, 0xf4, 0x3c, 0xd9, 0xbf, 0x51, 0xab, 0x34, 0x30, 0x63, 0x35, 0xd0, 0x90, + 0x2b, 0xd2, 0x1c, 0x01, 0x6d, 0x95, 0x3d, 0xe2, 0x95, 0x74, 0xd1, 0x24, 0xba, 0xee, 0x2c, 0xf6, + 0x67, 0x63, 0xd0, 0xdd, 0xfd, 0xf5, 0x66, 0x18, 0x86, 0xb5, 0x3c, 0xdd, 0xf9, 0x5c, 0x0c, 0x42, + 0x3f, 0x83, 0x3a, 0x3e, 0xe5, 0xb3, 0xa7, 0xb0, 0x89, 0x69, 0x3a, 0x0f, 0xcb, 0xc3, 0x1d, 0x72, + 0xcd, 0xb3, 0x74, 0xf1, 0x59, 0xfe, 0x01, 0x2c, 0x5e, 0x5e, 0x23, 0x68, 0x13, 0x49, 0x00, 0xe3, + 0xa6, 0x4e, 0xed, 0xfc, 0x6d, 0xda, 0x0e, 0x83, 0xc7, 0x2f, 0x42, 0xf8, 0x68, 0x2b, 0x85, 0x25, + 0xaa, 0x01, 0x11, 0xc1, 0x98, 0x50, 0xf7, 0x88, 0x43, 0x1a, 0x36, 0x69, 0x5b, 0x6b, 0x72, 0x93, + 0x3f, 0x3d, 0x05, 0x30, 0xa3, 0xeb, 0xf5, 0x3e, 0x8f, 0x64, 0xdd, 0x16, 0xf4, 0xa3, 0x0d, 0x92, + 0x5d, 0xb0, 0x6d, 0x2f, 0x05, 0xb6, 0x86, 0xae, 0xa9, 0x14, 0x05, 0x23, 0x24, 0xfd, 0x6e, 0x2e, + 0x67, 0xa0, 0x1b, 0x1d, 0x07, 0x12, 0x7a, 0xdd, 0xc8, 0x2a, 0xf7, 0x9a, 0x39, 0x73, 0xba, 0x73, + 0xe3, 0xfe, 0x45, 0x9b, 0x79, 0x15, 0x7c, 0x8e, 0x1c, 0x56, 0x3f, 0xf3, 0x2f, 0xf1, 0x8e, 0xdb, + 0x90, 0xab, 0x45, 0x98, 0x3c, 0xa7, 0xe0, 0x46, 0x27, 0x96, 0x0e, 0x45, 0x51, 0x15, 0x21, 0x59, + 0x20, 0x0a, 0x63, 0x3e, 0xad, 0x32, 0x96, 0x99, 0xeb, 0x7f, 0xd2, 0x0b, 0xf5, 0xb2, 0xbd, 0xc6, + 0xeb, 0x66, 0x6e, 0x29, 0xd4, 0x67, 0x5a, 0x7a, 0x47, 0x5e, 0x36, 0x8a, 0x01, 0x93, 0xe7, 0xa0, + 0x2b, 0xe0, 0x84, 0x5d, 0x57, 0xe5, 0x5b, 0x19, 0x0d, 0xf7, 0x89, 0x47, 0xcd, 0x62, 0xfb, 0x0f, + 0xa0, 0x1e, 0xe8, 0xd0, 0x48, 0x54, 0x24, 0xd9, 0x44, 0x0c, 0x28, 0x47, 0x3e, 0x93, 0x69, 0xd7, + 0x00, 0x82, 0x4d, 0xab, 0xab, 0x64, 0x18, 0x42, 0x23, 0x0e, 0x9e, 0x91, 0xb1, 0xc6, 0x97, 0x81, + 0xd0, 0x44, 0x89, 0x26, 0xc3, 0x90, 0x75, 0x5a, 0xc4, 0x77, 0x5e, 0xb6, 0x30, 0xe0, 0xd3, 0xb5, + 0x08, 0x4e, 0x68, 0xd5, 0x5e, 0xa6, 0x96, 0x8f, 0xab, 0xdc, 0xa5, 0x66, 0xf2, 0x6c, 0x62, 0x9b, + 0xa4, 0xeb, 0x90, 0x53, 0xdc, 0x10, 0x54, 0xa8, 0x93, 0xa1, 0xb7, 0xbc, 0x89, 0x5e, 0x4d, 0xae, + 0xe7, 0x79, 0xa2, 0x5a, 0x71, 0xcd, 0x80, 0xfd, 0x78, 0x8c, 0x4e, 0xce, 0x10, 0x19, 0x9d, 0x45, + 0xae, 0x03, 0xf3, 0x38, 0xd7, 0x62, 0xf5, 0xb5, 0x3c, 0xd9, 0x8a, 0x4a, 0x03, 0x35, 0x65, 0xd9, + 0x94, 0x67, 0xbc, 0xb9, 0x6c, 0xb3, 0xad, 0x95, 0x45, 0x31, 0xfb, 0xdd, 0x61, 0xce, 0x81, 0x48, + 0x7a, 0xf1, 0xd2, 0x42, 0x2e, 0x7e, 0xc7, 0x5e, 0xf8, 0x74, 0x7c, 0xee, 0x2e, 0xc4, 0xdf, 0x58, + 0x74, 0x83, 0x95, 0xdf, 0xf0, 0x49, 0xd4, 0xe3, 0x92, 0x38, 0xae, 0x68, 0x44, 0xa7, 0x3f, 0xda, + 0x89, 0x05, 0xa2, 0x88, 0xe4, 0x0c, 0xb6, 0xb5, 0x97, 0x4a, 0x99, 0x3e, 0x2f, 0x5b, 0x70, 0x3c, + 0xe1, 0x85, 0xb7, 0xd5, 0xc8, 0xe0, 0xff, 0xd8, 0x2d, 0x42, 0xfc, 0x09, 0x48, 0x8d, 0x0c, 0xf9, + 0x26, 0x81, 0xa5, 0xe8, 0x4a, 0xaa, 0xb4, 0xf3, 0xb8, 0x56, 0xa7, 0x70, 0x1e, 0xa7, 0x08, 0x2d, + 0x78, 0x29, 0x20, 0xaf, 0xc8, 0x3b, 0x7c, 0xf9, 0xfe, 0xc5, 0xb6, 0x34, 0xd4, 0xff, 0x59, 0x33, + 0xe7, 0x5f, 0x6f, 0x32, 0xcc, 0x47, 0xb4, 0xe9, 0x39, 0x61, 0x82, 0x0d, 0x5d, 0x34, 0x81, 0xd5, + 0x38, 0xdb, 0xfe, 0x0c, 0x0b, 0xbf, 0xe6, 0x9c, 0x29, 0x47, 0x44, 0xa8, 0x46, 0xc2, 0xca, 0x67, + 0xaf, 0x0f, 0xcf, 0xe5, 0x8d, 0xc6, 0x34, 0xab, 0xd5, 0x92, 0xdc, 0x6b, 0xf7, 0x03, 0x49, 0x4c, + 0xb7, 0x75, 0xb8, 0x1f, 0x89, 0x47, 0xd2, 0x85, 0xba, 0xe2, 0xd5, 0x3e, 0xeb, 0xf9, 0xb8, 0xf4, + 0xc7, 0x2d, 0x6f, 0x14, 0x24, 0xf4, 0xb8, 0x60, 0x2a, 0x01, 0x52, 0x89, 0x77, 0x98, 0x9f, 0x66, + 0x56, 0x9a, 0x1b, 0xfb, 0x60, 0x39, 0x8e, 0x2a, 0xc9, 0x05, 0x4c, 0xa4, 0xa6, 0xac, 0xf3, 0xc1, + 0xdd, 0x7b, 0x66, 0x34, 0xd0, 0x59, 0x69, 0x1b, 0x1b, 0xc5, 0xeb, 0x52, 0x92, 0xb0, 0xd7, 0x12, + 0xc9, 0xc6, 0xa3, 0x7a, 0x82, 0x42, 0x42, 0x05, 0x59, 0x9a, 0x56, 0x6e, 0xc8, 0xf5, 0x55, 0xc0, + 0xe6, 0xb0, 0xf9, 0x03, 0x38, 0x3b, 0x23, 0xba, 0xb6, 0xd6, 0x66, 0xfc, 0x7a, 0xe1, 0xd9, 0xa6, + 0x31, 0xd2, 0x84, 0x82, 0x76, 0x5b, 0x01, 0x93, 0x81, 0xeb, 0x8d, 0xf8, 0xf0, 0x95, 0x29, 0x7e, + 0xeb, 0x14, 0x64, 0xfe, 0xb4, 0x54, 0x90, 0xbb, 0x26, 0xb1, 0x94, 0x24, 0x45, 0xab, 0x11, 0x7f, + 0x28, 0xe2, 0x79, 0xb8, 0x40, 0xa5, 0x14, 0xb5, 0x3f, 0x81, 0xe7, 0xa9, 0xae, 0x5d, 0x9a, 0xc6, + 0x2f, 0xf1, 0x18, 0x3d, 0xa9, 0x5b, 0x8f, 0x3f, 0x88, 0x67, 0xb4, 0xde, 0xbc, 0xd7, 0x5b, 0x6f, + 0xef, 0xea, 0x76, 0x59, 0xca, 0x6a, 0x06, 0xee, 0x99, 0x83, 0x05, 0xb4, 0x7e, 0x7d, 0xaa, 0xd8, + 0x11, 0x24, 0x78, 0x3e, 0x25, 0x89, 0xfa, 0xc5, 0x8b, 0x22, 0x45, 0x49, 0x8b, 0xb5, 0x84, 0x90, + 0x7f, 0x4b, 0xe8, 0xba, 0x2f, 0x02, 0xb5, 0x2d, 0xf7, 0xbd, 0xdc, 0xf8, 0x90, 0x68, 0x33, 0x24, + 0xb5, 0x71, 0x31, 0x6d, 0x6d, 0x07, 0xb8, 0x39, 0xfd, 0x7e, 0xf3, 0x50, 0xdd, 0xbc, 0x87, 0x48, + 0x15, 0x20, 0x7b, 0x2a, 0xb5, 0x21, 0x94, 0x7d, 0x8b, 0xfb, 0xdd, 0xbf, 0x34, 0xfc, 0xda, 0xe9, + 0x15, 0x27, 0x93, 0x9e, 0xdf, 0x9e, 0x1d, 0x05, 0xe8, 0x39, 0xf0, 0x1d, 0x07, 0xd9, 0x02, 0x7c, + 0xd3, 0xeb, 0x0f, 0x9f, 0xc0, 0xa7, 0x76, 0x16, 0x01, 0x0b, 0x5d, 0xe5, 0x17, 0xe1, 0x68, 0xae, + 0x0d, 0x26, 0x53, 0x21, 0x74, 0xc9, 0x5e, 0xed, 0xb5, 0xb7, 0xe7, 0x28, 0x07, 0x98, 0x1c, 0x82, + 0x6a, 0x5f, 0x18, 0x72, 0xdb, 0x42, 0xfa, 0x40, 0x59, 0x43, 0xb7, 0xff, 0xb9, 0x1f, 0x93, 0x16, + 0xa0, 0x9d, 0x37, 0x53, 0xb1, 0x31, 0x63, 0xd7, 0x23, 0x73, 0x83, 0xff, 0x79, 0xd8, 0x57, 0x0f, + 0x60, 0xbb, 0x1e, 0xf5, 0xfe, 0x48, 0x31, 0x2b, 0xe8, 0xd0, 0xed, 0x60, 0xd0, 0xf2, 0x4a, 0xc6, + 0xf5, 0x54, 0x26, 0x97, 0x41, 0x12, 0xb7, 0x89, 0xc7, 0xc2, 0x78, 0x58, 0x0d, 0x9a, 0xb7, 0x46, + 0x34, 0xa1, 0x92, 0x57, 0x33, 0x08, 0xab, 0xa2, 0x46, 0x27, 0x49, 0xfe, 0xf5, 0xbd, 0x8e, 0xe4, + 0x19, 0x90, 0xc7, 0x36, 0x8a, 0x3c, 0xf5, 0x4b, 0xcc, 0x83, 0xf5, 0x5a, 0xdd, 0xa5, 0x39, 0x0b, + 0xf0, 0x65, 0x50, 0xea, 0x25, 0xea, 0x28, 0xde, 0xf1, 0x8d, 0x8e, 0x93, 0x7d, 0xc9, 0x03, 0xea, + 0xdf, 0xe2, 0xa9, 0x78, 0xd4, 0x3c, 0x38, 0xc2, 0xe1, 0x90, 0x43, 0xcd, 0x44, 0xc7, 0x0c, 0x1c, + 0xde, 0x7f, 0xd9, 0xd1, 0x6f, 0x76, 0xa1, 0x2e, 0x8f, 0x18, 0x25, 0x92, 0xa3, 0x3b, 0x97, 0xfb, + 0xa2, 0x75, 0xe6, 0xd6, 0x4e, 0x2e, 0x45, 0x1a, 0x2e, 0x3e, 0xc2, 0x66, 0xe1, 0x47, 0x22, 0xa6, + 0x7c, 0x6b, 0x2a, 0x26, 0xd2, 0x86, 0x46, 0xa6, 0x2f, 0x82, 0x33, 0x09, 0x42, 0x15, 0x7a, 0xaf, + 0x24, 0xb6, 0x25, 0x52, 0x4f, 0x0d, 0x3a, 0x51, 0x7b, 0x1c, 0x07, 0x36, 0xbd, 0xa6, 0x0d, 0x7f, + 0x9d, 0x52, 0x31, 0x2b, 0x51, 0x15, 0xb8, 0x7e, 0x9f, 0xd1, 0x82, 0x76, 0x6e, 0x5f, 0x99, 0xe0, + 0x81, 0xc3, 0x33, 0x74, 0x74, 0x6e, 0xfe, 0xce, 0xd5, 0x60, 0x2a, 0x39, 0x7d, 0x53, 0xff, 0xea, + 0xcc, 0x24, 0xae, 0xfd, 0xf4, 0x44, 0x1d, 0x68, 0x2d, 0xd1, 0xb1, 0xff, 0xd5, 0xff, 0x53, 0xf3, + 0xb5, 0x97, 0xb8, 0xc6, 0x01, 0x3b, 0x6a, 0xf2, 0xe2, 0x60, 0xe6, 0xfa, 0xa9, 0xf7, 0x7e, 0x32, + 0xe7, 0x41, 0x87, 0x6f, 0x26, 0x75, 0x4e, 0x56, 0xb5, 0x74, 0x95, 0xed, 0xb9, 0x75, 0x75, 0x9d, + 0x2b, 0x6e, 0xcc, 0xb2, 0x9a, 0x23, 0x72, 0x37, 0xef, 0x75, 0x58, 0x32, 0x71, 0x04, 0x2f, 0x41, + 0x3a, 0x89, 0x38, 0x50, 0xc4, 0xbd, 0xcc, 0x35, 0x67, 0x68, 0xe4, 0xb9, 0x74, 0xce, 0x76, 0x48, + 0x83, 0x35, 0xcd, 0xe6, 0xa8, 0xe4, 0x44, 0xcf, 0x67, 0x91, 0xb0, 0x87, 0x5f, 0xef, 0x92, 0x3a, + 0xaf, 0xf2, 0x04, 0x9a, 0x4f, 0x5c, 0xe6, 0xf5, 0x88, 0x19, 0xfd, 0x28, 0xd5, 0x41, 0x7e, 0xf9, + 0x1e, 0x60, 0x22, 0x34, 0x8e, 0x1f, 0xdd, 0xc2, 0x72, 0xb9, 0x56, 0x51, 0xb2, 0x62, 0x86, 0x08, + 0x5a, 0xe0, 0x28, 0x6e, 0xff, 0x53, 0x8a, 0x10, 0x99, 0x45, 0xb3, 0x45, 0xbc, 0xaa, 0x01, 0x2e, + 0xe4, 0x6a, 0x20, 0xb2, 0xa5, 0xb3, 0xec, 0x1f, 0x81, 0xe6, 0xc6, 0x0c, 0xce, 0x61, 0x23, 0x0b, + 0x74, 0x7e, 0x69, 0xb8, 0xf8, 0x1d, 0xd8, 0x10, 0x2f, 0x63, 0x9c, 0x91, 0xe4, 0xdd, 0x98, 0xe7, + 0x9e, 0xcf, 0xc7, 0xae, 0xf3, 0xcb, 0x46, 0xd9, 0xb6, 0xee, 0x6e, 0x44, 0xa9, 0xe5, 0xe4, 0x9a, + 0x50, 0x48, 0x81, 0xd0, 0x05, 0x45, 0x2d, 0xa9, 0x8a, 0xcf, 0x3a, 0x4f, 0x89, 0xf0, 0xb1, 0x9b, + 0x72, 0x07, 0xb2, 0xb4, 0x2e, 0xaa, 0x0d, 0xcb, 0x4f, 0xc5, 0xec, 0xf2, 0x32, 0x18, 0xa0, 0x98, + 0xf9, 0x7b, 0xe0, 0x2d, 0x46, 0xe6, 0xa0, 0x88, 0x8d, 0x79, 0x85, 0x79, 0x3c, 0xef, 0xc2, 0x8e, + 0x52, 0xec, 0x09, 0x5a, 0x78, 0xf8, 0x49, 0xe3, 0xf5, 0x59, 0xdf, 0xf0, 0xc4, 0x96, 0x4f, 0x42, + 0x79, 0xbd, 0x8b, 0xc2, 0x8f, 0x28, 0x92, 0xce, 0xb3, 0x18, 0x48, 0x10, 0xac, 0x87, 0x95, 0x42, + 0x42, 0xc2, 0x42, 0xc1, 0x1d, 0xb1, 0x52, 0xb1, 0x83, 0x2b, 0x72, 0xf9, 0x61, 0x4d, 0x16, 0xd6, + 0x4e, 0xca, 0xc8, 0x7a, 0x27, 0x67, 0xd3, 0xcb, 0xb3, 0xf6, 0xa4, 0x54, 0x9c, 0x7e, 0x6a, 0xb1, + 0xb9, 0x65, 0x5d, 0x87, 0x58, 0x29, 0x63, 0x44, 0x00, 0x3b, 0x7d, 0x1a, 0x67, 0x96, 0xb8, 0xbe, + 0x8d, 0x50, 0xe8, 0x3b, 0xa4, 0x8e, 0xcb, 0x7b, 0x5f, 0xbe, 0x61, 0xf4, 0x05, 0xd1, 0xa9, 0x34, + 0x2d, 0xe7, 0x77, 0x3b, 0x16, 0x35, 0x08, 0xa6, 0x35, 0xae, 0x12, 0x6d, 0x7c, 0x52, 0xb2, 0x5e, + 0x5f, 0x4d, 0xdf, 0x9f, 0x11, 0xe7, 0xa0, 0xf9, 0x49, 0x5b, 0x1e, 0x6a, 0xae, 0x4b, 0xd4, 0xa5, + 0xca, 0xc3, 0x53, 0xa5, 0xdd, 0x41, 0x4f, 0xe6, 0xc9, 0x4f, 0x2d, 0xc7, 0x96, 0x6a, 0x83, 0x02, + 0xa2, 0xbd, 0x03, 0xff, 0xd1, 0xa1, 0x96, 0xd9, 0x4b, 0x17, 0x2e, 0x1c, 0x87, 0x6d, 0x07, 0xc4, + 0xa8, 0xdb, 0x41, 0x29, 0x93, 0xbf, 0x49, 0x47, 0xff, 0xda, 0x7b, 0x95, 0xbe, 0xe4, 0x3b, 0x74, + 0xc0, 0x03, 0xea, 0xae, 0x2f, 0x7d, 0x9b, 0xfc, 0x4b, 0x19, 0x50, 0xd2, 0x3f, 0xec, 0x39, 0x48, + 0xb5, 0x29, 0xe1, 0xad, 0x43, 0x7b, 0xd2, 0x24, 0x4e, 0x45, 0x39, 0xa0, 0xa1, 0xb3, 0x03, 0xcb, + 0xd8, 0xbf, 0x1e, 0x62, 0x16, 0x2e, 0x36, 0x8c, 0xbd, 0xac, 0xf8, 0xeb, 0x6b, 0x2f, 0x84, 0x9f, + 0xdb, 0xf4, 0xba, 0xa9, 0xdd, 0xc3, 0xa8, 0x51, 0x37, 0x3e, 0x58, 0xfa, 0x85, 0x2c, 0x22, 0xc0, + 0x03, 0x5d, 0x49, 0x64, 0x9b, 0xa6, 0x59, 0x60, 0x40, 0x6d, 0x7f, 0x55, 0x0d, 0xe2, 0x42, 0xd6, + 0x2d, 0x97, 0xd0, 0xee, 0x5c, 0xf9, 0xab, 0x66, 0xbb, 0xdc, 0x5a, 0xa3, 0xe6, 0x2c, 0x6d, 0x32, + 0xe2, 0xb6, 0x68, 0xe4, 0xd3, 0xf2, 0xbc, 0xec, 0xc4, 0xea, 0x07, 0x98, 0xb2, 0x6d, 0xa2, 0x5e, + 0x34, 0x6c, 0xbc, 0x3f, 0xf7, 0x28, 0xcf, 0x84, 0xac, 0xf3, 0x7b, 0xb3, 0x6a, 0x55, 0xcd, 0x04, + 0x4f, 0x52, 0xe1, 0xba, 0x38, 0xf6, 0x5f, 0xfc, 0xa5, 0xcc, 0xac, 0x9e, 0xce, 0x73, 0x9a, 0x0b, + 0x49, 0x59, 0xd6, 0x78, 0x90, 0xcd, 0xb8, 0x44, 0x5a, 0x36, 0x7c, 0x6e, 0xc3, 0x38, 0xa4, 0xff, + 0xe6, 0x00, 0x30, 0xf9, 0x4d, 0x78, 0x1f, 0xba, 0x61, 0xf2, 0xc1, 0x2c, 0x62, 0xfb, 0x88, 0xb2, + 0xa7, 0xc5, 0x13, 0x9d, 0xe2, 0xc7, 0xa4, 0x27, 0xf9, 0xc7, 0x27, 0x66, 0x30, 0xf1, 0xb8, 0xba, + 0xee, 0x57, 0xdc, 0x28, 0xbb, 0x1e, 0xef, 0x23, 0x24, 0xe5, 0x51, 0x01, 0xec, 0x2c, 0xc3, 0xc7, + 0x09, 0x0d, 0x24, 0xd8, 0x37, 0xb7, 0xae, 0x78, 0x7b, 0x37, 0x84, 0xd6, 0x9e, 0x41, 0x3b, 0xee, + 0xa7, 0xf0, 0x97, 0x13, 0x67, 0x91, 0x9c, 0x9f, 0x97, 0x19, 0x34, 0x86, 0xa7, 0x7a, 0x02, 0x68, + 0x16, 0x9b, 0x86, 0x9f, 0xfd, 0xf4, 0x7a, 0xbc, 0xcc, 0x24, 0x93, 0xb9, 0x8d, 0x5a, 0x98, 0x42, + 0x39, 0x83, 0xc6, 0x67, 0x95, 0xf3, 0x33, 0x24, 0xc0, 0xbc, 0x72, 0x3e, 0xf5, 0xce, 0x3c, 0x1c, + 0xaf, 0x7e, 0x42, 0x91, 0xde, 0x8b, 0xed, 0x8f, 0xf1, 0x34, 0x5f, 0x4d, 0x2a, 0x4a, 0x2f, 0x6b, + 0x07, 0x20, 0xe4, 0xf6, 0x09, 0xee, 0x2a, 0x78, 0xa0, 0xd1, 0x45, 0xca, 0x4a, 0x06, 0xd3, 0x41, + 0xbe, 0x8e, 0x3b, 0x5d, 0x1b, 0xb7, 0xaa, 0x96, 0x4f, 0xfb, 0x39, 0xd7, 0xd4, 0xdd, 0x2c, 0x3a, + 0x82, 0x80, 0xc3, 0xe6, 0xef, 0xa4, 0x4b, 0xc7, 0x12, 0x07, 0x23, 0xd1, 0x97, 0x51, 0xb7, 0xe6, + 0xf4, 0xee, 0x36, 0x3c, 0xcc, 0x70, 0x50, 0x47, 0x5e, 0x4c, 0x47, 0xe7, 0xc8, 0xc2, 0xcc, 0xd7, + 0xa6, 0x09, 0x7d, 0xf1, 0x36, 0x36, 0xfa, 0xd5, 0x09, 0xbb, 0xb1, 0x15, 0x42, 0xe7, 0x7d, 0x71, + 0xc3, 0x3f, 0xe8, 0x96, 0x5e, 0xaa, 0x21, 0xd8, 0x7f, 0xcc, 0x11, 0x45, 0xbe, 0x31, 0x33, 0xa5, + 0xca, 0xdc, 0xd0, 0xc1, 0x48, 0xf9, 0x37, 0xb8, 0x10, 0xf7, 0x50, 0x5f, 0x62, 0xe9, 0xbe, 0xb0, + 0x89, 0xac, 0xa1, 0x54, 0xd4, 0x1b, 0x7b, 0x72, 0xad, 0xfa, 0xa5, 0x37, 0xe5, 0xd9, 0x6b, 0x54, + 0x65, 0x5a, 0xcd, 0xc9, 0x8a, 0xca, 0x07, 0x23, 0x36, 0x86, 0x5d, 0xc7, 0x2c, 0x1c, 0x66, 0x17, + 0xd0, 0x59, 0xd2, 0x2a, 0x03, 0x3b, 0xd0, 0xef, 0x68, 0x8f, 0xe7, 0x94, 0xc3, 0x30, 0xe3, 0xec, + 0xee, 0xe7, 0xdf, 0x6e, 0xd9, 0x83, 0x76, 0x57, 0xdd, 0x27, 0xa6, 0x76, 0xd3, 0xa0, 0x36, 0xba, + 0x9f, 0x9f, 0x60, 0xae, 0xce, 0x66, 0x89, 0xe1, 0x92, 0x1f, 0x1b, 0xce, 0x09, 0x50, 0x1b, 0xe0, + 0xe5, 0x51, 0xbd, 0x89, 0x1b, 0x17, 0xdc, 0x9b, 0x8f, 0x46, 0x9c, 0x0b, 0x4b, 0x99, 0x7a, 0x31, + 0x76, 0x24, 0x7e, 0x99, 0xf6, 0x4c, 0x32, 0xdf, 0x5f, 0x77, 0xc2, 0xad, 0x58, 0x83, 0xb2, 0x3c, + 0x7a, 0xee, 0x6b, 0x5f, 0xdf, 0x32, 0x96, 0xfc, 0xc6, 0xdf, 0x7d, 0x10, 0x73, 0xac, 0xaf, 0x9c, + 0x0b, 0x7a, 0xcf, 0x16, 0x45, 0xd4, 0x83, 0x22, 0x8d, 0x36, 0xe7, 0x58, 0xdc, 0x75, 0xca, 0x00, + 0xc3, 0xf9, 0x36, 0x0a, 0x13, 0xa7, 0xe7, 0x5f, 0x05, 0xa7, 0x3d, 0xc0, 0x71, 0xd9, 0xc3, 0x55, + 0x24, 0xe3, 0x89, 0x40, 0x9a, 0xda, 0x0a, 0xed, 0x4f, 0xf1, 0xab, 0xe8, 0x65, 0xab, 0xd7, 0x9c, + 0xca, 0x6e, 0xb5, 0x56, 0xc5, 0x92, 0x1c, 0xae, 0x6c, 0x1e, 0x02, 0xcc, 0x5c, 0x74, 0x0f, 0x88, + 0x09, 0xb9, 0xc3, 0x32, 0xd1, 0x29, 0x24, 0xc3, 0x0c, 0x43, 0x7b, 0x14, 0xd2, 0xb6, 0x44, 0x69, + 0x69, 0x13, 0xb3, 0x41, 0xa7, 0x3a, 0x60, 0x2d, 0xd1, 0x2d, 0x32, 0x28, 0xb6, 0x50, 0xdc, 0x15, + 0x6a, 0x08, 0x57, 0xc8, 0x00, 0xab, 0x74, 0x15, 0x9f, 0x12, 0x73, 0x3e, 0x87, 0xcf, 0x4a, 0x7f, + 0xce, 0x72, 0xd8, 0x03, 0xd3, 0xab, 0x25, 0xf3, 0xc6, 0xa3, 0x6a, 0xc8, 0x5a, 0x98, 0x56, 0xfb, + 0x83, 0x5e, 0xd0, 0x7a, 0x6f, 0xd5, 0x78, 0x57, 0x3e, 0x7c, 0xce, 0x90, 0x5f, 0x4c, 0xee, 0x75, + 0x7c, 0x58, 0x1e, 0x41, 0xf9, 0x47, 0x11, 0x0d, 0x47, 0x80, 0x5a, 0xc5, 0xe6, 0x3c, 0xa0, 0x41, + 0x9d, 0x0f, 0xa9, 0x58, 0x13, 0x6b, 0xb5, 0xaf, 0xb0, 0x5c, 0x86, 0x82, 0x8d, 0xfa, 0x6e, 0x7a, + 0xc2, 0x1d, 0xbe, 0x8a, 0x2b, 0x49, 0x88, 0x04, 0x9f, 0x4e, 0x26, 0x3b, 0x70, 0xbb, 0x7a, 0x3e, + 0x44, 0x4c, 0xae, 0x60, 0x91, 0x7e, 0x3d, 0x75, 0xb8, 0x7c, 0x3e, 0xc7, 0x61, 0x67, 0x59, 0x55, + 0x82, 0xb9, 0xf1, 0x49, 0xd2, 0x4f, 0x55, 0xdc, 0x52, 0x4c, 0xa5, 0x05, 0x91, 0xde, 0x52, 0x5e, + 0x28, 0xaa, 0x0a, 0x09, 0x30, 0xc0, 0xd4, 0x8a, 0x89, 0x39, 0x56, 0x8b, 0xbb, 0xc9, 0xdc, 0xae, + 0x4a, 0xe3, 0x08, 0x2a, 0x0a, 0x38, 0xa8, 0x46, 0xfb, 0x74, 0x84, 0x45, 0x5d, 0xa5, 0xc2, 0x8c, + 0x7d, 0x70, 0x2f, 0x8f, 0x0d, 0x74, 0x16, 0x89, 0xbc, 0x1b, 0xff, 0xb4, 0x80, 0xc2, 0x78, 0x86, + 0xad, 0xc7, 0x17, 0xa9, 0x82, 0x7c, 0xee, 0xed, 0x9c, 0x9d, 0xf1, 0x74, 0x9e, 0x37, 0xb2, 0xd7, + 0xba, 0xfb, 0x16, 0x88, 0xff, 0x34, 0x79, 0xc3, 0x13, 0x84, 0x16, 0x77, 0x72, 0x28, 0x58, 0xeb, + 0xc8, 0x60, 0x78, 0x30, 0xcd, 0x8a, 0x5e, 0x83, 0x56, 0x8d, 0x8a, 0xc9, 0x37, 0x4e, 0xe6, 0x9b, + 0xa3, 0x59, 0xe4, 0xd3, 0x00, 0x95, 0xe3, 0xb3, 0x25, 0x58, 0x73, 0x96, 0xe7, 0x22, 0x6a, 0x48, + 0xd6, 0x37, 0x1e, 0xf2, 0x7c, 0x8e, 0xfd, 0x79, 0xa3, 0x4f, 0x44, 0xdb, 0xa4, 0xa7, 0xdc, 0x33, + 0xfb, 0x5b, 0xad, 0x9a, 0x19, 0xe8, 0x88, 0x68, 0x98, 0x67, 0xf2, 0x99, 0x3b, 0x61, 0x3b, 0xff, + 0xc3, 0xa7, 0x63, 0xcf, 0xfc, 0xb9, 0xcd, 0xc2, 0xf6, 0xf7, 0xa1, 0x13, 0xf5, 0x07, 0xe7, 0x62, + 0xf5, 0x49, 0xaf, 0xee, 0x91, 0x48, 0xa3, 0xd7, 0xef, 0xc7, 0x91, 0x71, 0x14, 0x48, 0xec, 0x99, + 0x50, 0xd0, 0x68, 0x34, 0x32, 0xda, 0xd6, 0x4c, 0x37, 0x68, 0xdd, 0x9d, 0xdf, 0x06, 0x61, 0x21, + 0xd1, 0x1a, 0x45, 0xd7, 0xba, 0x18, 0xf0, 0xb7, 0xea, 0x56, 0xf8, 0x31, 0x22, 0x5b, 0x65, 0x26, + 0x52, 0xeb, 0x1e, 0xa6, 0xae, 0x89, 0x5f, 0xa7, 0x5f, 0x70, 0xf4, 0x82, 0x48, 0xf5, 0xb3, 0x1b, + 0x03, 0x9d, 0x91, 0xdf, 0xfd, 0xdf, 0x5f, 0x0d, 0x94, 0x65, 0x99, 0x68, 0x73, 0x5e, 0x2e, 0xb2, + 0x4b, 0x5c, 0x79, 0xf1, 0x2c, 0x72, 0xfa, 0x1a, 0x62, 0xf9, 0x13, 0x62, 0x01, 0x41, 0x3e, 0xaf, + 0x6b, 0xd7, 0xde, 0x56, 0xe9, 0x2f, 0x53, 0xd9, 0x87, 0x3e, 0x05, 0xf2, 0x36, 0x38, 0xd7, 0x6c, + 0xae, 0x3d, 0xc3, 0x92, 0xa8, 0x1b, 0x38, 0x74, 0xd7, 0x72, 0xfc, 0x31, 0xa3, 0x43, 0x32, 0xd5, + 0xed, 0x33, 0x9b, 0x06, 0x3f, 0xcd, 0x7f, 0xec, 0x0c, 0x04, 0x7b, 0x6b, 0xed, 0x70, 0xb3, 0x30, + 0xdc, 0xf2, 0x47, 0xc1, 0x51, 0x35, 0x3c, 0x64, 0xec, 0x14, 0x9d, 0xf7, 0xd9, 0xe5, 0x9d, 0x12, + 0xad, 0xcd, 0xa4, 0x42, 0x02, 0x1c, 0xca, 0x9e, 0x39, 0x08, 0x0d, 0x73, 0x30, 0x9f, 0xd0, 0x7f, + 0x76, 0x59, 0xed, 0xa6, 0xc6, 0xae, 0x09, 0xe7, 0xa2, 0x3f, 0x4f, 0x20, 0xa3, 0x46, 0x53, 0x20, + 0x61, 0x86, 0xd5, 0x37, 0xa2, 0xa6, 0x11, 0x62, 0x29, 0x9b, 0xfe, 0x12, 0x44, 0x65, 0x50, 0xdf, + 0x77, 0x0b, 0x30, 0x25, 0x97, 0x05, 0x7c, 0xdd, 0xcc, 0xa7, 0xe8, 0x6f, 0xf1, 0xa1, 0xa0, 0xbc, + 0x57, 0xd9, 0x1b, 0x43, 0x1e, 0xf1, 0xcc, 0xd0, 0x7d, 0xdb, 0x0c, 0x5c, 0xe8, 0xcb, 0xcb, 0xab, + 0xa1, 0x5b, 0x2d, 0x72, 0x45, 0x3c, 0xbd, 0xd3, 0xcc, 0xd2, 0x3d, 0x67, 0x71, 0x73, 0x44, 0x75, + 0x8b, 0x73, 0x35, 0x97, 0x61, 0x1b, 0x4c, 0xd8, 0x73, 0x6c, 0xad, 0x80, 0x86, 0xa5, 0x05, 0xa6, + 0x58, 0xd9, 0x76, 0x1b, 0x78, 0x3c, 0x24, 0x38, 0xc8, 0x76, 0x75, 0xbd, 0x97, 0xa4, 0xda, 0xa0, + 0xdc, 0xb9, 0x30, 0xd5, 0x67, 0xe2, 0x54, 0x17, 0x72, 0xcc, 0xed, 0xa2, 0x84, 0x98, 0xf1, 0x4b, + 0x9e, 0xb7, 0x9e, 0xb2, 0x67, 0x1b, 0xc1, 0x67, 0xc8, 0x47, 0x80, 0xcd, 0x50, 0x05, 0x16, 0x5b, + 0xe6, 0xf1, 0xba, 0xe9, 0x0f, 0x6e, 0x8b, 0x65, 0xca, 0x6a, 0xc1, 0x48, 0x0c, 0x23, 0xd9, 0xca, + 0x45, 0x69, 0x11, 0xce, 0xc4, 0xa5, 0x2b, 0xd3, 0xd6, 0xfe, 0xab, 0xc4, 0x4a, 0xbe, 0xb4, 0x96, + 0x2b, 0x38, 0x09, 0x1a, 0x9f, 0xc5, 0x33, 0x3e, 0x1e, 0x49, 0xe3, 0xf3, 0x35, 0x25, 0x03, 0xbe, + 0x08, 0x0a, 0x22, 0x13, 0xa7, 0x78, 0xa3, 0xed, 0x31, 0x13, 0x41, 0x5d, 0xe4, 0xd8, 0xac, 0xa7, + 0x85, 0x57, 0xcb, 0x64, 0xc3, 0xda, 0x5e, 0x69, 0x77, 0x66, 0xb7, 0xc7, 0xd4, 0x48, 0x5c, 0x9d, + 0x92, 0x61, 0xc7, 0x5d, 0xa0, 0x52, 0x7c, 0x66, 0x86, 0x2b, 0x97, 0x90, 0x13, 0x1e, 0x30, 0x6d, + 0xcf, 0x3d, 0xd1, 0x81, 0x73, 0xa1, 0xa0, 0x11, 0x78, 0x9a, 0x0c, 0xb3, 0x56, 0x64, 0x66, 0xd6, + 0xbd, 0x73, 0x20, 0x1f, 0x63, 0x4e, 0x7b, 0x48, 0xbe, 0xac, 0x26, 0x84, 0xe4, 0xf4, 0xb8, 0xa5, + 0xb7, 0x99, 0xc4, 0x47, 0x0a, 0x1b, 0xab, 0xef, 0x8b, 0x39, 0x59, 0x06, 0x86, 0x3b, 0xba, 0xed, + 0xc0, 0xa5, 0x87, 0x30, 0xa0, 0x56, 0x57, 0xd1, 0x3e, 0xf2, 0x3b, 0xaa, 0x25, 0x65, 0x32, 0xa7, + 0x8a, 0x7c, 0x91, 0xc2, 0x44, 0x00, 0x2a, 0x1d, 0xa7, 0x3d, 0x15, 0x34, 0xdd, 0xb6, 0x88, 0x5e, + 0x3a, 0x16, 0x89, 0x29, 0x33, 0x48, 0x7d, 0x1e, 0xe2, 0x0d, 0xbd, 0x2e, 0x55, 0x6d, 0x8d, 0xa6, + 0x41, 0xfb, 0x76, 0x25, 0xb3, 0x9b, 0xeb, 0x8d, 0xf1, 0x70, 0x84, 0x5e, 0xc1, 0xc2, 0xbc, 0xe4, + 0x07, 0x2c, 0x09, 0x0c, 0x93, 0x44, 0xa6, 0x16, 0x01, 0x4c, 0xdd, 0xb2, 0x16, 0xfe, 0x67, 0x7f, + 0x25, 0x5e, 0x05, 0xf2, 0xf9, 0x27, 0x09, 0xcb, 0xee, 0x7e, 0x80, 0xff, 0xd8, 0x61, 0x94, 0xf0, + 0xdf, 0x3d, 0x7f, 0xad, 0xa6, 0x23, 0x0f, 0x6c, 0x07, 0xac, 0xfe, 0xc5, 0x87, 0x4d, 0x29, 0xfa, + 0x46, 0xc0, 0x88, 0x9d, 0xb6, 0x97, 0xbc, 0x94, 0x48, 0xd7, 0xcc, 0xed, 0xc8, 0xb2, 0xb9, 0xdf, + 0x5b, 0x26, 0xee, 0x34, 0xc4, 0xf6, 0x3b, 0x7d, 0x06, 0x3b, 0x04, 0x31, 0xe2, 0xf5, 0x8a, 0x04, + 0xba, 0x3c, 0x24, 0x9d, 0xb2, 0x94, 0xe9, 0x39, 0x1e, 0xb3, 0x3c, 0xec, 0x79, 0xe3, 0x42, 0xdf, + 0xf1, 0x3f, 0x7b, 0x71, 0x0c, 0xe8, 0x00, 0x22, 0x58, 0x92, 0xd4, 0x54, 0x6d, 0xd2, 0x88, 0xf2, + 0x59, 0x56, 0x08, 0x66, 0xda, 0xcb, 0xeb, 0xbd, 0x7e, 0x18, 0x6a, 0x7e, 0xdb, 0xf7, 0x57, 0xc4, + 0x81, 0x5e, 0xa3, 0xfd, 0x89, 0xbc, 0x73, 0x03, 0xa4, 0xd2, 0x40, 0xc3, 0xe4, 0x50, 0xb8, 0xa6, + 0x50, 0xc4, 0x85, 0x8a, 0xd5, 0xf7, 0xef, 0x94, 0xef, 0xe3, 0x75, 0x77, 0x57, 0xa8, 0x9a, 0xde, + 0x71, 0xb6, 0x03, 0x8f, 0x0f, 0xce, 0x30, 0xdc, 0x60, 0x89, 0xa3, 0xbf, 0x74, 0x4f, 0x61, 0x7a, + 0x5b, 0x00, 0xcf, 0xb8, 0x49, 0x4f, 0x23, 0xa7, 0x67, 0x7d, 0xcc, 0x82, 0x36, 0xfa, 0xeb, 0x63, + 0x10, 0x7b, 0x59, 0x64, 0x0c, 0x5d, 0x1d, 0x40, 0xde, 0x27, 0xf7, 0x04, 0xc0, 0x2c, 0xa8, 0xd6, + 0xa5, 0x23, 0x4b, 0xa2, 0xe2, 0xb7, 0xf0, 0x1b, 0x37, 0xbf, 0x1f, 0x2d, 0x41, 0x9d, 0x81, 0x4c, + 0xa5, 0x30, 0x4a, 0x16, 0xe6, 0xf0, 0xb0, 0xb3, 0xa8, 0x67, 0xf0, 0x84, 0xd1, 0xd0, 0x7f, 0x53, + 0xf5, 0xcc, 0xbb, 0x72, 0x10, 0x4c, 0x50, 0x88, 0x73, 0xcf, 0xe3, 0x24, 0xf6, 0xa1, 0xbf, 0x46, + 0xf1, 0x46, 0x66, 0x12, 0xcc, 0x5d, 0x6c, 0xc3, 0x7c, 0xc6, 0x02, 0x16, 0x1d, 0x4e, 0x82, 0x8b, + 0x24, 0xfa, 0x6b, 0xa5, 0xf7, 0xee, 0x39, 0xba, 0x90, 0x7b, 0x5e, 0xd3, 0x3b, 0x62, 0xa0, 0xfb, + 0x97, 0xbe, 0x1c, 0x2e, 0xcb, 0x37, 0x59, 0xc1, 0x49, 0x36, 0x30, 0x48, 0x62, 0xfc, 0x9e, 0x0d, + 0x55, 0x3d, 0x8c, 0x15, 0x18, 0x93, 0x7a, 0x11, 0xe0, 0x6e, 0x65, 0x1f, 0x97, 0x5b, 0x3a, 0x64, + 0xf2, 0x0c, 0x91, 0xc2, 0xdf, 0xbe, 0xb8, 0x2f, 0x9d, 0x3e, 0xa3, 0x0a, 0xf7, 0x1b, 0x49, 0xa0, + 0xaa, 0xb4, 0x4c, 0x3f, 0xb9, 0x8a, 0xe8, 0xc2, 0x24, 0xa6, 0x9f, 0x61, 0xcf, 0x1a, 0x92, 0x68, + 0x46, 0xad, 0xd7, 0x66, 0xd3, 0x94, 0x30, 0x1f, 0x72, 0x48, 0x5a, 0xa4, 0xe1, 0x0d, 0xbf, 0xb3, + 0x29, 0x57, 0xda, 0x1b, 0x3b, 0xeb, 0x01, 0x0f, 0x96, 0xc8, 0xfd, 0x5a, 0xfb, 0x18, 0x23, 0x27, + 0x56, 0x9e, 0x2a, 0x1e, 0xdd, 0x76, 0xa3, 0x80, 0xbb, 0xf9, 0x60, 0xc4, 0xe2, 0xe0, 0x69, 0x4e, + 0xaf, 0x19, 0xba, 0x65, 0x8c, 0xe6, 0x09, 0x92, 0x1a, 0x23, 0x71, 0xfe, 0x12, 0xe0, 0xf0, 0xda, + 0xf7, 0x5b, 0x87, 0x4b, 0xc9, 0xcb, 0xca, 0x8d, 0x2a, 0x0d, 0xe6, 0xcf, 0xa0, 0x79, 0x70, 0x00, + 0x75, 0x28, 0x03, 0x8c, 0x8c, 0x21, 0x21, 0x4b, 0x08, 0xf3, 0x89, 0x44, 0xf2, 0x61, 0x8c, 0xd1, + 0x4e, 0xd9, 0xdb, 0x69, 0x49, 0xc5, 0xc1, 0xa3, 0xac, 0xd3, 0x2d, 0x24, 0x22, 0x3e, 0xd1, 0x2b, + 0x9f, 0x1d, 0xca, 0x37, 0xdf, 0x81, 0x21, 0xd3, 0x43, 0xa8, 0x27, 0x60, 0xd3, 0x29, 0xc3, 0x60, + 0xc1, 0x14, 0x31, 0xc3, 0xeb, 0x38, 0xa6, 0xdc, 0xd6, 0xf6, 0xbb, 0x91, 0xd0, 0xfb, 0x3d, 0xe5, + 0xe1, 0xbb, 0xdc, 0x46, 0xf8, 0x00, 0x99, 0x4f, 0x74, 0xa1, 0xf5, 0x6a, 0x39, 0x95, 0xc5, 0x01, + 0x64, 0x09, 0x07, 0xa3, 0x2f, 0xcf, 0x09, 0x0f, 0x1d, 0x0b, 0x0b, 0x4e, 0x88, 0x69, 0x18, 0x18, + 0xfd, 0x25, 0xf7, 0x5b, 0x43, 0x26, 0xf3, 0x32, 0xf2, 0x76, 0x84, 0xc3, 0x1d, 0x91, 0x72, 0x09, + 0x97, 0x85, 0x8f, 0x0d, 0x6e, 0x29, 0x87, 0xac, 0x54, 0x81, 0x70, 0xcb, 0xb3, 0xcf, 0x80, 0x38, + 0x39, 0xb8, 0x86, 0xed, 0x3f, 0xfb, 0xe2, 0x92, 0x27, 0x6c, 0x6b, 0x9e, 0xea, 0x6d, 0x04, 0xe4, + 0x6d, 0x5a, 0x77, 0x57, 0x7d, 0x20, 0xe1, 0x8b, 0x7d, 0xc8, 0x17, 0x83, 0x82, 0xd5, 0xcc, 0xc0, + 0x07, 0xf4, 0xe7, 0xb6, 0x78, 0x65, 0x03, 0x65, 0xdd, 0x8b, 0xb2, 0x2d, 0xd2, 0xf5, 0x85, 0xce, + 0xb8, 0x34, 0x6d, 0xfa, 0x4e, 0x1b, 0xf0, 0x92, 0x74, 0x50, 0xe0, 0x09, 0x7a, 0x29, 0xed, 0x5c, + 0xc5, 0x57, 0x58, 0xd0, 0xf5, 0x17, 0xa3, 0x9a, 0xa4, 0xbc, 0xca, 0x9f, 0x26, 0x5b, 0x59, 0xa1, + 0xe9, 0xbb, 0x02, 0x53, 0x51, 0xdb, 0xd6, 0xb4, 0x08, 0x24, 0x97, 0x9d, 0xfd, 0xe3, 0xa4, 0x24, + 0xc2, 0x7b, 0xb2, 0x60, 0xf3, 0x12, 0xe3, 0x83, 0xee, 0xba, 0x76, 0xed, 0xd5, 0xb1, 0x81, 0x21, + 0x31, 0x35, 0x29, 0xa1, 0x4b, 0xb4, 0xdf, 0xad, 0xc4, 0x09, 0x29, 0x91, 0x56, 0x8a, 0x3b, 0x7a, + 0x4f, 0x8c, 0xab, 0xf8, 0x02, 0x2d, 0x32, 0x0d, 0xf1, 0x48, 0xe8, 0xbc, 0x12, 0xb6, 0xc0, 0x61, + 0x28, 0xf9, 0x73, 0x31, 0xa3, 0x1e, 0x57, 0x15, 0x74, 0xbc, 0x61, 0x17, 0x3e, 0xb9, 0x1d, 0xba, + 0x43, 0x77, 0xb7, 0xfb, 0x93, 0x19, 0x95, 0xac, 0x4b, 0x3e, 0x82, 0xdc, 0x25, 0xf8, 0xa4, 0x21, + 0x6d, 0x37, 0x69, 0xd6, 0x8f, 0x0f, 0xed, 0x41, 0x21, 0xeb, 0x8f, 0x81, 0x4f, 0x3a, 0x22, 0x5a, + 0xe6, 0x47, 0xa5, 0x10, 0x8d, 0x0a, 0x03, 0xdd, 0x47, 0xfd, 0xea, 0xf8, 0x7c, 0xb1, 0xef, 0xcd, + 0xa1, 0x36, 0xd0, 0xab, 0xfa, 0xdc, 0xc4, 0x09, 0x2e, 0x84, 0x3f, 0x0d, 0xc5, 0xb8, 0x74, 0xfc, + 0xc1, 0x34, 0xcf, 0x48, 0x94, 0x66, 0x36, 0xa3, 0xcd, 0xac, 0xf6, 0x21, 0x27, 0x13, 0xec, 0x64, + 0x05, 0xb1, 0x0f, 0x12, 0x09, 0x71, 0xe9, 0x27, 0xe4, 0xb1, 0xef, 0x83, 0xf2, 0x29, 0x40, 0xd1, + 0x68, 0x83, 0x18, 0xf5, 0xc5, 0x07, 0xb7, 0x0b, 0x61, 0x42, 0x0b, 0x5b, 0xd2, 0x34, 0xd4, 0x14, + 0xf3, 0x50, 0xae, 0x3c, 0x2b, 0x0e, 0xb5, 0xd2, 0x1f, 0x79, 0x04, 0xe1, 0xfb, 0x54, 0xd8, 0x8a, + 0xbf, 0x4f, 0xb7, 0x59, 0x46, 0x8d, 0x74, 0xf7, 0xf2, 0x3c, 0x92, 0xfe, 0xbd, 0x78, 0x59, 0xfc, + 0xbb, 0xe0, 0x8b, 0x8c, 0x61, 0x34, 0x9a, 0x17, 0xcd, 0xea, 0x96, 0xc7, 0x0d, 0xed, 0x19, 0xce, + 0xf9, 0x50, 0x2d, 0xd0, 0x40, 0xed, 0x5b, 0xf5, 0x65, 0xb9, 0x60, 0x1e, 0x01, 0xa2, 0xb4, 0xf9, + 0x7c, 0x8e, 0xf8, 0x0d, 0x04, 0x63, 0x96, 0xf8, 0xbf, 0xae, 0x8e, 0x9d, 0x76, 0xed, 0x05, 0xa9, + 0x24, 0x16, 0x6b, 0xdb, 0xe3, 0xc9, 0xdf, 0x4f, 0x00, 0x7e, 0xf9, 0x48, 0xf9, 0xf8, 0x17, 0xd4, + 0x24, 0xc4, 0xd8, 0x77, 0x9d, 0xc2, 0xd2, 0x78, 0x21, 0xcc, 0x29, 0x64, 0x46, 0x98, 0xdf, 0x00, + 0x4f, 0xdc, 0x14, 0xdd, 0xfd, 0xe5, 0x86, 0x4b, 0x7b, 0x90, 0xd3, 0xed, 0x67, 0xda, 0xe5, 0xfb, + 0xd0, 0x77, 0x78, 0x7b, 0xe9, 0x6c, 0x55, 0xaf, 0x8e, 0x13, 0xca, 0x53, 0xf0, 0x2f, 0xc3, 0x9a, + 0x3c, 0x53, 0xa6, 0x6b, 0x29, 0x6b, 0x4c, 0xd2, 0x9a, 0x63, 0x93, 0x35, 0x46, 0x7d, 0x8e, 0x2b, + 0x40, 0xd8, 0x71, 0x4e, 0xdb, 0xad, 0x16, 0x9a, 0x21, 0x70, 0xf2, 0xb9, 0x56, 0xaf, 0x60, 0x29, + 0x57, 0x7b, 0x78, 0xe6, 0xf2, 0x75, 0x6d, 0x7e, 0x7e, 0xf3, 0x67, 0x64, 0xee, 0x11, 0x7b, 0x4a, + 0x3e, 0xcc, 0x16, 0x1f, 0x9c, 0x6a, 0xa7, 0x7a, 0x12, 0x27, 0xe4, 0x4b, 0x1c, 0x1e, 0xe4, 0xd0, + 0xcd, 0xdd, 0x2c, 0x90, 0x44, 0xb8, 0x4e, 0x90, 0xc0, 0x8b, 0x3b, 0xd4, 0x98, 0xf9, 0xf1, 0xd2, + 0xcd, 0xd1, 0x73, 0xdf, 0xd8, 0x89, 0x25, 0x1b, 0xfc, 0x25, 0xcc, 0xda, 0x96, 0x60, 0xa8, 0x8a, + 0xcd, 0x11, 0xae, 0x94, 0x19, 0xd5, 0x9b, 0x6c, 0x86, 0xfa, 0x0c, 0x39, 0x21, 0xcb, 0x8e, 0x49, + 0xb8, 0xcc, 0x64, 0xe8, 0xbc, 0x91, 0x74, 0x47, 0x9f, 0xb0, 0xa8, 0x79, 0xdb, 0x9a, 0x77, 0xfa, + 0x9b, 0x68, 0xba, 0x27, 0x8c, 0x14, 0x34, 0xa1, 0x8a, 0x29, 0xde, 0xa7, 0x57, 0xd2, 0x52, 0x3c, + 0xc0, 0xb5, 0x71, 0x0c, 0xcf, 0x21, 0x11, 0xb5, 0x75, 0x6b, 0x25, 0xac, 0x2e, 0x43, 0x9b, 0xf2, + 0x49, 0x2e, 0xb4, 0xc3, 0x9b, 0xd9, 0xe5, 0x07, 0x58, 0xe3, 0xde, 0x76, 0xd0, 0xc8, 0x9a, 0xa6, + 0xba, 0x39, 0xcc, 0xe6, 0xeb, 0x92, 0x12, 0x72, 0x81, 0xd2, 0xce, 0x78, 0xb1, 0xa4, 0xe8, 0x56, + 0x6a, 0xd0, 0xfa, 0x9d, 0x02, 0xf1, 0xa2, 0xee, 0xd1, 0x2c, 0xb9, 0x7f, 0x79, 0xc0, 0x92, 0xf4, + 0x55, 0x8a, 0xe2, 0x1f, 0xf2, 0xa5, 0xe3, 0x00, 0x52, 0xd7, 0x10, 0xfd, 0xcf, 0x87, 0x34, 0x8f, + 0x0e, 0x65, 0x1a, 0x37, 0xe5, 0x28, 0x05, 0xa9, 0x37, 0xd9, 0xdf, 0x0c, 0x6d, 0x1e, 0x6d, 0x77, + 0xbb, 0xc7, 0x11, 0x0b, 0x43, 0xa8, 0x68, 0x5c, 0x76, 0x7b, 0x6b, 0x2b, 0x8f, 0x6c, 0x69, 0x49, + 0xbf, 0x67, 0x8d, 0x1e, 0xae, 0x49, 0x6c, 0xfc, 0x95, 0xe9, 0x64, 0x05, 0x0e, 0x6c, 0x9b, 0x32, + 0x27, 0x7d, 0xb9, 0x26, 0x33, 0x73, 0x18, 0xd0, 0x17, 0xb7, 0x40, 0x41, 0x4d, 0xd6, 0xe4, 0x8c, + 0x26, 0x6b, 0xcd, 0xec, 0xa0, 0x02, 0x4e, 0x56, 0x00, 0x5a, 0xc1, 0xce, 0x09, 0x4c, 0xec, 0xb3, + 0xa0, 0xd0, 0x1d, 0x7e, 0xbe, 0x62, 0x9e, 0xc4, 0xbc, 0x24, 0xe7, 0x5e, 0x40, 0x9b, 0xc5, 0x0b, + 0xe0, 0xa4, 0x0f, 0x97, 0xc1, 0x98, 0xb7, 0xd9, 0xbc, 0x9a, 0xef, 0x3b, 0xfb, 0x12, 0x11, 0x7b, + 0xac, 0xb3, 0xb4, 0x39, 0x7d, 0x08, 0x16, 0x0d, 0xc1, 0x93, 0x7c, 0x40, 0x5f, 0xcd, 0x0f, 0x35, + 0x99, 0x33, 0x27, 0x68, 0x27, 0xa2, 0xc3, 0xe3, 0x4e, 0x48, 0x66, 0x60, 0xde, 0x29, 0x39, 0x98, + 0x87, 0x38, 0xa9, 0xa7, 0x27, 0xa9, 0x6d, 0x28, 0x3f, 0x28, 0x22, 0x21, 0xa4, 0xd0, 0x14, 0xf9, + 0x81, 0xd0, 0x4e, 0x52, 0x71, 0xca, 0x06, 0xdd, 0x2e, 0x3e, 0x8a, 0x87, 0x32, 0x32, 0x6a, 0xe7, + 0xd7, 0x0e, 0xba, 0xb9, 0x1a, 0x4b, 0x5e, 0x82, 0x9d, 0x93, 0x70, 0x4a, 0xf0, 0x83, 0x16, 0x02, + 0xf9, 0x1b, 0x77, 0x48, 0x30, 0x2e, 0x20, 0x0f, 0xac, 0x76, 0x96, 0xa4, 0x30, 0xf7, 0x0e, 0x74, + 0xeb, 0x97, 0xbc, 0xb5, 0x98, 0xfb, 0xf3, 0xf7, 0xec, 0xb2, 0xf1, 0xb1, 0xb0, 0x31, 0xaa, 0xb5, + 0xa3, 0xc8, 0xd7, 0x33, 0x40, 0xac, 0x9b, 0x2a, 0x91, 0x38, 0x7a, 0x78, 0x4b, 0x02, 0x72, 0xd6, + 0xd5, 0x79, 0x49, 0x50, 0x5d, 0x3e, 0x9d, 0xa5, 0xde, 0x24, 0xca, 0xdc, 0x50, 0xe4, 0x66, 0xb0, + 0x2d, 0x97, 0x50, 0x20, 0x91, 0xc5, 0x3a, 0xff, 0x61, 0xd6, 0xec, 0x12, 0xa7, 0x4d, 0xc9, 0x69, + 0x00, 0x79, 0xb4, 0x94, 0xfb, 0xb6, 0x9c, 0x82, 0xf5, 0xbd, 0x44, 0x99, 0x77, 0x50, 0x88, 0xca, + 0x92, 0x56, 0x1a, 0x7a, 0x4d, 0xf6, 0xd8, 0x1b, 0x11, 0x91, 0xc3, 0xb9, 0x44, 0xd7, 0x8e, 0x44, + 0x51, 0x50, 0xee, 0x7e, 0x1f, 0xa3, 0xf5, 0xe6, 0x17, 0x2e, 0xcf, 0x5d, 0x31, 0x53, 0x11, 0xeb, + 0x2f, 0xa9, 0x53, 0x4b, 0x6e, 0x97, 0x48, 0x88, 0x4b, 0x6d, 0x13, 0xa2, 0x5a, 0x7b, 0x02, 0xbb, + 0x03, 0xd2, 0x5a, 0x80, 0xe2, 0xd3, 0xc2, 0x28, 0x52, 0xcb, 0x48, 0xcc, 0xab, 0x08, 0xac, 0xc1, + 0x2f, 0xc2, 0xc2, 0x0c, 0x01, 0x83, 0x3b, 0x97, 0x19, 0x0c, 0xc7, 0xdd, 0xb0, 0x0c, 0x09, 0x1b, + 0xe5, 0x3e, 0x49, 0xf6, 0x43, 0x8e, 0x18, 0xbb, 0xbe, 0xca, 0x9c, 0x01, 0xa8, 0xd5, 0x93, 0xf5, + 0xa2, 0x35, 0x5a, 0x8c, 0x84, 0xf8, 0x0b, 0x18, 0x02, 0x13, 0xc2, 0x3f, 0xcd, 0xee, 0xed, 0xaf, + 0x46, 0x67, 0xd6, 0x62, 0x1a, 0xc1, 0x55, 0xf8, 0x9e, 0x35, 0xca, 0xa0, 0x35, 0xad, 0xb2, 0x08, + 0x47, 0xcf, 0x5f, 0x9f, 0x1f, 0x9d, 0x6c, 0x0a, 0xcd, 0xa9, 0x7d, 0x89, 0xdd, 0x98, 0x1e, 0xb1, + 0x09, 0xd5, 0x1f, 0x7b, 0xd1, 0x5c, 0xda, 0xbe, 0xb0, 0x34, 0xc8, 0xce, 0x1d, 0x60, 0xed, 0x89, + 0xc0, 0x8e, 0x21, 0x29, 0x03, 0xaf, 0xac, 0x31, 0xf5, 0xd8, 0xd2, 0xcc, 0x7f, 0xb7, 0x1e, 0x99, + 0xba, 0x4e, 0xdf, 0x63, 0x36, 0x02, 0x19, 0x57, 0x9c, 0xa0, 0x14, 0xb5, 0x9c, 0x98, 0xf9, 0x74, + 0x02, 0x62, 0x69, 0xf5, 0xbf, 0x32, 0x43, 0xcc, 0x6d, 0xb1, 0x25, 0x5b, 0x6e, 0xbf, 0xca, 0x54, + 0x4e, 0x5b, 0xfe, 0x19, 0xf0, 0x2f, 0xee, 0x06, 0x72, 0x77, 0x43, 0x4e, 0xeb, 0xac, 0xce, 0x1d, + 0x75, 0x79, 0x47, 0x07, 0x90, 0x6f, 0x84, 0xfb, 0x68, 0xbc, 0xa0, 0xaa, 0x56, 0x3b, 0x6d, 0x76, + 0x0b, 0x60, 0x8f, 0x06, 0xb8, 0xdb, 0x27, 0x52, 0x75, 0x5a, 0xb7, 0x5d, 0x69, 0x59, 0x0c, 0xc7, + 0xae, 0xcd, 0xcb, 0xd6, 0xbe, 0x89, 0x73, 0xc3, 0x03, 0xa3, 0x33, 0xbf, 0xc7, 0xc7, 0x4b, 0x3c, + 0x88, 0x7d, 0xce, 0x20, 0xaa, 0xd2, 0xa7, 0x76, 0x05, 0x96, 0x66, 0x4e, 0x8b, 0xdb, 0xc7, 0x28, + 0x6a, 0xdc, 0x9c, 0x74, 0xf1, 0x3d, 0x1b, 0x5d, 0xf0, 0x84, 0x12, 0x3b, 0x5d, 0xb8, 0x3b, 0x5f, + 0x17, 0xc7, 0x5a, 0xdb, 0x84, 0xb1, 0xae, 0xa6, 0xe3, 0x4e, 0x24, 0x83, 0x17, 0x6b, 0xad, 0xba, + 0x9e, 0xd0, 0x34, 0x61, 0xc7, 0xa3, 0xe6, 0x4e, 0x9e, 0x1b, 0xa3, 0x45, 0xcd, 0x84, 0x1c, 0x5a, + 0x8a, 0x5a, 0xaa, 0x0b, 0x99, 0x89, 0x9e, 0xe4, 0x2b, 0xb9, 0xbe, 0x70, 0x9f, 0xc8, 0xae, 0xbf, + 0x53, 0xa7, 0x4e, 0xef, 0x60, 0xb9, 0x6d, 0x18, 0xa7, 0xc3, 0x73, 0xb1, 0xf1, 0x11, 0x86, 0xc7, + 0x6b, 0x3e, 0xa7, 0xd6, 0x93, 0x69, 0xc7, 0xf8, 0xc9, 0xc8, 0xb9, 0x74, 0x8d, 0x62, 0x14, 0x16, + 0xa1, 0x38, 0x22, 0xb2, 0x63, 0xdb, 0x45, 0xee, 0x82, 0xe5, 0x84, 0x13, 0x78, 0xa3, 0x14, 0xec, + 0xac, 0xa5, 0x1d, 0xeb, 0x3b, 0x23, 0xaa, 0xfe, 0x54, 0x1a, 0x7e, 0xce, 0x64, 0xdc, 0x55, 0x17, + 0x5d, 0x7b, 0xec, 0xe0, 0x09, 0x79, 0x9e, 0xb9, 0xdf, 0x24, 0x37, 0xfb, 0x54, 0x7a, 0x16, 0xfc, + 0x0d, 0xa1, 0x44, 0xd7, 0x46, 0x2d, 0x5d, 0x97, 0x78, 0xb3, 0x3c, 0xf9, 0x32, 0x6c, 0x61, 0x0c, + 0xab, 0x9e, 0x1c, 0xae, 0x1a, 0x2c, 0x28, 0x57, 0x94, 0x7f, 0x1f, 0x0a, 0x8f, 0xc3, 0x43, 0xcb, + 0x65, 0xc2, 0xf4, 0x5d, 0xcc, 0xb7, 0xd1, 0x2a, 0x9c, 0x80, 0x1a, 0x17, 0xb8, 0xf1, 0xb5, 0x3e, + 0x29, 0x5d, 0x9b, 0xc6, 0x82, 0x82, 0x64, 0xc8, 0x7d, 0xbb, 0xd8, 0x19, 0x74, 0x27, 0x0d, 0xff, + 0xaa, 0x33, 0x3b, 0x5c, 0x17, 0x52, 0xad, 0x8b, 0xea, 0x1f, 0x6d, 0x90, 0x5c, 0xe3, 0xc7, 0xb3, + 0xae, 0x44, 0x24, 0x7b, 0xb0, 0x00, 0xe3, 0x4e, 0x68, 0xbf, 0x30, 0x19, 0x11, 0xff, 0xc2, 0x19, + 0xce, 0x49, 0x0a, 0xd7, 0x7f, 0x91, 0xdd, 0xf7, 0x14, 0xd2, 0x1d, 0x31, 0xbb, 0x83, 0xe8, 0xaf, + 0x97, 0xe6, 0xce, 0x2c, 0x61, 0x23, 0x85, 0x54, 0x77, 0xd6, 0xd0, 0x13, 0x38, 0x19, 0x9b, 0xb2, + 0xa2, 0x7a, 0x16, 0x54, 0x3d, 0x4a, 0x18, 0xaa, 0xb2, 0xa8, 0xc1, 0xbc, 0xdf, 0x37, 0x20, 0xac, + 0xc5, 0xfd, 0x32, 0x29, 0x89, 0xaa, 0x75, 0x95, 0x36, 0x37, 0x67, 0xeb, 0x4d, 0xb1, 0xd5, 0xaa, + 0x6c, 0xc9, 0xb7, 0x01, 0x2e, 0x55, 0x39, 0x94, 0x46, 0xed, 0xd8, 0x73, 0x6d, 0x77, 0xd8, 0x10, + 0x19, 0x1e, 0x2c, 0x3c, 0x21, 0x32, 0x3b, 0x1e, 0x68, 0xbe, 0x69, 0x90, 0x9a, 0xb6, 0x72, 0xb8, + 0xc1, 0x8a, 0xf1, 0x21, 0x8f, 0x76, 0xaf, 0xed, 0x46, 0x20, 0x8b, 0x9f, 0xb1, 0x5b, 0x00, 0x5c, + 0x08, 0xb8, 0x24, 0x59, 0xd7, 0x72, 0xfa, 0xe8, 0xa1, 0xd1, 0x3e, 0xa1, 0x69, 0x1f, 0xa1, 0xf3, + 0xba, 0x23, 0xcd, 0x2e, 0x1d, 0x2a, 0xe2, 0xdb, 0x79, 0xcb, 0x19, 0x8e, 0xb8, 0x79, 0x66, 0xbd, + 0xb6, 0xf0, 0xff, 0xf9, 0x50, 0xdc, 0xb6, 0x2a, 0x48, 0xb0, 0xad, 0xe1, 0x11, 0x45, 0x59, 0xbd, + 0x97, 0x7b, 0x7e, 0xb2, 0xd8, 0x03, 0xba, 0x55, 0x16, 0xce, 0xf7, 0xfa, 0x48, 0xe6, 0x97, 0x59, + 0xfe, 0x44, 0xa9, 0xa7, 0x65, 0x48, 0x1a, 0xc2, 0x84, 0x27, 0x3c, 0x8a, 0x9c, 0x9d, 0x02, 0x71, + 0x6f, 0x1a, 0x3c, 0xc7, 0xe0, 0x23, 0xa8, 0xc0, 0x17, 0x9d, 0x86, 0x74, 0x4d, 0xbd, 0xe9, 0x77, + 0xc4, 0x7a, 0x7d, 0x80, 0xdf, 0x4c, 0x1a, 0xa1, 0x6c, 0xc5, 0xfb, 0x4c, 0xd2, 0x3d, 0xdc, 0x92, + 0x18, 0xbb, 0x0c, 0x50, 0x17, 0xbb, 0x2b, 0x49, 0x15, 0x57, 0xb4, 0x91, 0x34, 0xf0, 0xa8, 0x42, + 0x29, 0xac, 0x29, 0xd6, 0x6c, 0x8c, 0xf8, 0xbd, 0xba, 0x87, 0x49, 0x25, 0x3f, 0x96, 0x7b, 0x18, + 0x9f, 0x66, 0x88, 0x37, 0xb0, 0x2a, 0xa2, 0xf0, 0x23, 0xf7, 0xf1, 0xa7, 0xdf, 0x6b, 0x70, 0x2c, + 0xdb, 0x28, 0xd9, 0x93, 0xdc, 0xe7, 0x6e, 0xbd, 0x36, 0x0b, 0x78, 0x4c, 0x34, 0xdf, 0xf1, 0x16, + 0x4a, 0x3c, 0x12, 0x98, 0x79, 0x70, 0x41, 0x0d, 0x09, 0x12, 0x51, 0x29, 0x13, 0x51, 0xfb, 0x3b, + 0xb9, 0x1c, 0xa7, 0x37, 0xd3, 0xde, 0x65, 0x5d, 0xbb, 0x1a, 0x2b, 0x0e, 0xed, 0x2c, 0x50, 0x73, + 0xce, 0xb3, 0x9d, 0x7c, 0x16, 0xf5, 0xfe, 0xc0, 0x23, 0x48, 0xc7, 0x67, 0xa3, 0x4b, 0x0c, 0xfe, + 0xc5, 0x81, 0x9f, 0x6c, 0xfd, 0x96, 0x1e, 0x4f, 0x37, 0x46, 0x0d, 0x31, 0xdd, 0xc6, 0x56, 0xb4, + 0xd9, 0x8c, 0x35, 0x72, 0x15, 0xd0, 0xa4, 0x7e, 0x13, 0xf7, 0x42, 0xe4, 0x3b, 0xe5, 0xbb, 0x40, + 0xf8, 0xcb, 0x9d, 0x02, 0x8b, 0xa8, 0xb1, 0x6b, 0x1a, 0x2f, 0xf1, 0xbb, 0x4a, 0x62, 0x4d, 0x5f, + 0x52, 0x35, 0xa9, 0x70, 0xc8, 0xb8, 0x88, 0x68, 0x32, 0xd1, 0x38, 0x73, 0x1e, 0x5f, 0xdd, 0xff, + 0x0f, 0x57, 0xec, 0x0e, 0x2a, 0xc6, 0x36, 0x3e, 0x52, 0x91, 0x21, 0x21, 0x9a, 0xde, 0x94, 0xdb, + 0x86, 0x0f, 0x84, 0x43, 0xa2, 0xe5, 0xe1, 0x60, 0x60, 0xf5, 0xac, 0x24, 0x49, 0x53, 0x2d, 0x3a, + 0xc0, 0x34, 0x75, 0x46, 0x82, 0x4b, 0xde, 0x2c, 0x75, 0xa8, 0x7f, 0x8f, 0x17, 0x18, 0xd9, 0x1e, + 0x72, 0x65, 0xb1, 0x58, 0x68, 0x10, 0x32, 0x31, 0xc9, 0x39, 0x22, 0xfe, 0x6c, 0xf5, 0x11, 0xdb, + 0x88, 0xa5, 0xb4, 0x0f, 0xa5, 0xca, 0xfb, 0xe8, 0x88, 0xf6, 0x37, 0x71, 0x8a, 0x6a, 0xc0, 0x15, + 0x6d, 0x8d, 0xde, 0xe4, 0x41, 0x07, 0x99, 0x34, 0xae, 0xe8, 0x82, 0x7e, 0x53, 0xee, 0x05, 0x79, + 0xfd, 0xe7, 0x81, 0x28, 0xce, 0x1b, 0x39, 0xcc, 0xa4, 0x5b, 0x3e, 0x2d, 0x17, 0x3b, 0x5f, 0x3c, + 0xb8, 0x55, 0x68, 0x3f, 0x56, 0x8c, 0x14, 0x0d, 0x59, 0x10, 0xfa, 0x3e, 0x5c, 0x3e, 0x36, 0x41, + 0x30, 0x8e, 0xa5, 0x33, 0x7c, 0x9f, 0xff, 0x26, 0xc6, 0xa7, 0x3c, 0x27, 0xd4, 0x71, 0x8c, 0x1e, + 0xda, 0x59, 0x16, 0xdc, 0x8b, 0x66, 0x29, 0x97, 0x74, 0x4a, 0xc0, 0x30, 0x8e, 0x71, 0x8f, 0xe0, + 0xfb, 0x43, 0x58, 0x46, 0x31, 0xb3, 0x85, 0xc2, 0x1e, 0xd9, 0x07, 0x82, 0xbd, 0xea, 0x28, 0xc4, + 0x78, 0xa4, 0x79, 0xa6, 0xa0, 0xf4, 0x9f, 0xf7, 0x5e, 0xda, 0x61, 0x4e, 0xab, 0x60, 0xf7, 0x67, + 0x18, 0xf7, 0x48, 0x50, 0x2e, 0x6d, 0xbb, 0xfd, 0x99, 0x79, 0x74, 0xac, 0xf3, 0x44, 0xb7, 0xa8, + 0xca, 0xab, 0x40, 0x16, 0x37, 0xd5, 0x7b, 0xc4, 0xb5, 0x22, 0x58, 0xcf, 0x89, 0x59, 0xd7, 0x97, + 0xd1, 0x71, 0x1b, 0x3f, 0x77, 0x96, 0x7a, 0x89, 0xf9, 0xdf, 0x56, 0x00, 0xf7, 0xca, 0x6e, 0xe6, + 0xf7, 0xf7, 0x53, 0xb3, 0x5a, 0xbf, 0x18, 0x17, 0x1d, 0x05, 0xee, 0x6a, 0x6d, 0x62, 0x7f, 0x15, + 0x7c, 0x3a, 0x59, 0xbc, 0x22, 0x3f, 0xcd, 0x1d, 0xf4, 0x2f, 0x25, 0xcb, 0x13, 0x13, 0x5e, 0x1b, + 0x71, 0xd1, 0x64, 0xdf, 0x28, 0xd6, 0xfa, 0x85, 0xa0, 0x1c, 0x2b, 0x90, 0xd1, 0xd6, 0x95, 0x1f, + 0x90, 0xd0, 0x16, 0xf8, 0xa3, 0x99, 0x4e, 0x7d, 0xcd, 0xc8, 0x83, 0xed, 0xca, 0x78, 0x91, 0x36, + 0xbf, 0xb8, 0x86, 0x66, 0xd5, 0x0f, 0x4f, 0x63, 0x6a, 0xf1, 0xfb, 0x3d, 0xe5, 0x97, 0xbb, 0xd0, + 0xec, 0xfa, 0xe5, 0x57, 0x0a, 0x67, 0x23, 0xed, 0x66, 0xf5, 0x9b, 0xaf, 0x48, 0x74, 0x0d, 0xe5, + 0xb9, 0x10, 0x81, 0x4d, 0xd5, 0xc7, 0x0a, 0x4a, 0x57, 0x53, 0xe9, 0x1a, 0x8f, 0xe5, 0x5b, 0x4e, + 0x3e, 0x2a, 0x1f, 0x6a, 0xb0, 0x1a, 0x6a, 0x8a, 0x10, 0xcc, 0x76, 0x64, 0x71, 0xee, 0x19, 0xef, + 0x88, 0x94, 0x6e, 0x2d, 0xf1, 0xeb, 0x16, 0x8d, 0x67, 0x7b, 0xb8, 0x16, 0x15, 0x14, 0x3b, 0xbf, + 0xbb, 0xa6, 0x15, 0x30, 0x05, 0x64, 0xb7, 0x06, 0x94, 0x93, 0x5a, 0x25, 0xb0, 0xb9, 0x38, 0xca, + 0xc5, 0xc2, 0xb3, 0xf4, 0xf2, 0x02, 0xf9, 0xcf, 0xcf, 0x0f, 0xfa, 0xa3, 0x9a, 0xe1, 0x68, 0x82, + 0x29, 0x5b, 0x96, 0xd4, 0x6a, 0x5c, 0x96, 0x83, 0xd9, 0x5f, 0x9a, 0x01, 0xdb, 0x4f, 0x14, 0x07, + 0xc8, 0x4d, 0x93, 0x70, 0xc3, 0x71, 0x8d, 0x57, 0xcf, 0x44, 0x02, 0x86, 0xc6, 0xf6, 0x97, 0xe9, + 0x3f, 0x7e, 0x2d, 0x53, 0x10, 0xa1, 0x5d, 0x96, 0x1a, 0xe6, 0x29, 0x6c, 0x97, 0xf1, 0xae, 0x18, + 0xa1, 0x46, 0x31, 0x49, 0x6b, 0xd4, 0x62, 0xdc, 0x95, 0xc7, 0x40, 0x7a, 0x97, 0x86, 0x85, 0x2f, + 0x87, 0xa6, 0x74, 0xd3, 0xa0, 0xb6, 0xeb, 0x17, 0x30, 0xff, 0x25, 0xc4, 0xe3, 0xa1, 0x6d, 0x9e, + 0xfd, 0xce, 0xf7, 0x3b, 0x8b, 0xa4, 0xca, 0x96, 0x52, 0x91, 0x18, 0xc4, 0xe4, 0x8d, 0xdf, 0xc9, + 0x09, 0x79, 0xe3, 0x8d, 0x79, 0x73, 0x7d, 0x94, 0xf0, 0xc6, 0xd1, 0xb1, 0x8f, 0xb4, 0x75, 0xe8, + 0x61, 0x92, 0x6e, 0xe7, 0x5e, 0x00, 0xe2, 0xde, 0xe9, 0x36, 0xae, 0x6f, 0x9a, 0xe1, 0xa1, 0x91, + 0x80, 0x5c, 0xb4, 0x7d, 0xd2, 0x6e, 0xd6, 0xa6, 0xee, 0x5e, 0x03, 0x57, 0x98, 0xe1, 0x32, 0x84, + 0x4d, 0xee, 0xf4, 0xff, 0xa5, 0x8d, 0x77, 0x97, 0x8d, 0x1d, 0x77, 0x69, 0x11, 0xe9, 0xdc, 0xbb, + 0x85, 0x73, 0x71, 0xcf, 0x4c, 0x7d, 0xb1, 0x8a, 0x63, 0xfc, 0x2e, 0xd4, 0xcd, 0x3d, 0xc4, 0xba, + 0x5a, 0xb2, 0x6f, 0xbc, 0x9f, 0x25, 0x0d, 0x6f, 0xd6, 0x4f, 0x30, 0x6d, 0x13, 0x54, 0x11, 0xf8, + 0x94, 0xde, 0xe4, 0xf9, 0xbb, 0xb0, 0x50, 0xc8, 0x6e, 0xd6, 0x2f, 0x92, 0x36, 0x29, 0x4a, 0x06, + 0x20, 0x33, 0x16, 0x8b, 0x51, 0x3b, 0xd4, 0xc6, 0xf3, 0x37, 0x83, 0x77, 0x0c, 0xd4, 0x8d, 0xda, + 0x02, 0x60, 0x84, 0x99, 0xaf, 0x5c, 0x2c, 0x98, 0x6c, 0xb5, 0xcb, 0xed, 0x78, 0x4f, 0x79, 0x70, + 0x44, 0xfd, 0x59, 0x46, 0xb1, 0x27, 0x64, 0x68, 0x8b, 0x98, 0x19, 0x06, 0xac, 0xa9, 0xa6, 0xde, + 0xed, 0xd0, 0xad, 0x14, 0xc8, 0x5c, 0x0e, 0xb0, 0xf0, 0x6a, 0x71, 0x61, 0xe5, 0x8e, 0xb5, 0x7f, + 0xca, 0x57, 0x7f, 0x3c, 0xb0, 0xa0, 0xb5, 0x37, 0x08, 0x4c, 0x71, 0xa0, 0x36, 0x75, 0xfd, 0xed, + 0x89, 0x24, 0xf8, 0x80, 0x7f, 0x14, 0xa4, 0x6b, 0x8a, 0xbc, 0x3e, 0xb2, 0x22, 0xd9, 0x85, 0xe2, + 0x1b, 0x98, 0xfe, 0x53, 0xd1, 0x2b, 0x9f, 0x2d, 0x4c, 0xf3, 0x69, 0x7a, 0x63, 0xf9, 0xea, 0xf3, + 0x5d, 0x7d, 0x04, 0xe7, 0x05, 0x9c, 0x82, 0x94, 0x20, 0x3e, 0xd8, 0x3b, 0xb1, 0x20, 0x92, 0xa4, + 0x7b, 0xda, 0xf8, 0x78, 0x62, 0x07, 0x7c, 0xa1, 0xd5, 0xb5, 0xa8, 0x9b, 0x1d, 0xb7, 0x80, 0x7a, + 0x6f, 0x94, 0xd5, 0xdf, 0x84, 0x98, 0xde, 0x36, 0x18, 0x70, 0xb6, 0xd2, 0x86, 0x75, 0xac, 0xce, + 0x8a, 0xed, 0x84, 0x3e, 0xe9, 0xac, 0xd7, 0x1a, 0xbb, 0x2b, 0x78, 0xfa, 0x83, 0xc8, 0x29, 0x36, + 0x65, 0x06, 0x6e, 0x4e, 0xea, 0xc8, 0x12, 0x06, 0x6d, 0x85, 0xb9, 0x08, 0xd8, 0xf4, 0x66, 0xab, + 0x02, 0x5d, 0x33, 0xac, 0x34, 0xe0, 0x7a, 0x43, 0xf9, 0x7c, 0x27, 0x54, 0x43, 0x16, 0x8f, 0xa6, + 0x33, 0x2c, 0x87, 0x56, 0xbf, 0x94, 0x34, 0xf0, 0xe3, 0x71, 0x4a, 0x3a, 0x9a, 0x1e, 0xf2, 0x5a, + 0xb8, 0x75, 0xf4, 0x6b, 0x83, 0xa3, 0x77, 0xd0, 0xd2, 0xb1, 0x70, 0x7f, 0x25, 0xe6, 0xbc, 0x1e, + 0xf9, 0x31, 0xd9, 0x6f, 0x94, 0x0e, 0x55, 0x09, 0x6c, 0x7d, 0x78, 0x9e, 0xfd, 0xb0, 0x1e, 0xfd, + 0x6a, 0xdd, 0x34, 0xfc, 0xe4, 0xbc, 0x70, 0x58, 0xdf, 0xeb, 0xe4, 0xcd, 0x46, 0x57, 0xf8, 0xc5, + 0x88, 0xe4, 0x25, 0x45, 0xd9, 0xd4, 0x55, 0x3c, 0x8e, 0x83, 0xbb, 0x59, 0x09, 0xfa, 0x45, 0x19, + 0x5b, 0x8d, 0x2f, 0xbd, 0x8e, 0x32, 0x51, 0x86, 0x32, 0xf0, 0xa8, 0xc3, 0x27, 0x3d, 0x69, 0xc4, + 0x2b, 0xc5, 0xeb, 0x4e, 0x73, 0x25, 0x87, 0x73, 0x1d, 0xd4, 0x27, 0xee, 0xf2, 0x6d, 0x12, 0x66, + 0xc3, 0x09, 0x87, 0xdd, 0x4f, 0xa4, 0x1c, 0x67, 0x1b, 0x4d, 0x92, 0x91, 0x2e, 0x5d, 0x04, 0xb4, + 0x92, 0xf9, 0xb0, 0xff, 0x00, 0x07, 0x82, 0x62, 0x70, 0xbf, 0x46, 0x0d, 0x65, 0xdb, 0x73, 0x06, + 0xe6, 0x6f, 0x77, 0x20, 0xad, 0xfa, 0x92, 0xa2, 0xb3, 0x02, 0x9b, 0x01, 0x96, 0x58, 0xd1, 0x3b, + 0xc1, 0xec, 0xe5, 0x7f, 0xe7, 0x12, 0x30, 0xbd, 0x73, 0x82, 0x79, 0x45, 0x46, 0xa2, 0x40, 0xc1, + 0x12, 0x0e, 0xe5, 0x1d, 0x9e, 0x2c, 0x3f, 0x4b, 0xb6, 0xba, 0x58, 0x02, 0xa3, 0x8d, 0x8e, 0xc1, + 0xb9, 0xb7, 0xb3, 0x1b, 0xb3, 0xbd, 0x20, 0x8f, 0xd8, 0x10, 0x18, 0xf0, 0xbf, 0x39, 0x00, 0x41, + 0x8e, 0xe9, 0xb7, 0x86, 0x3d, 0x49, 0x52, 0xc7, 0x06, 0x4c, 0x35, 0x89, 0xa6, 0x15, 0x47, 0x59, + 0x66, 0xf5, 0x8e, 0xfe, 0x96, 0x8d, 0x3c, 0xc4, 0x4e, 0xe7, 0x2e, 0x96, 0x70, 0xe2, 0x63, 0x15, + 0x3b, 0xfb, 0x4b, 0x7f, 0xf8, 0x8b, 0xfb, 0xf5, 0xf6, 0x7a, 0x09, 0x84, 0x73, 0xe8, 0xce, 0x4c, + 0x07, 0x6d, 0xc5, 0x99, 0x30, 0x32, 0x86, 0x3e, 0x9d, 0xff, 0xe9, 0xf9, 0x7f, 0xfb, 0xff, 0xfa, + 0x5f, 0xff, 0xf5, 0x5f, 0xff, 0xf5, 0x5f, 0xff, 0x3b, 0xfe, 0x1f, 0xd7, 0x21, 0x70, 0x8e, 0x00, + 0x24, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA102_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 9216, // uncompressed data size (bytes) + 7363, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA102_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA102("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/unload/g_booteruc_unload_ga10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA102_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x03, 0x62, 0x56, 0x08, 0x13, 0x4c, 0xc8, 0x42, 0x69, + 0x20, 0x00, 0x00, 0x8d, 0x53, 0x58, 0xc0, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA102_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA102_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA102("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/unload/g_booteruc_unload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_dbg_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 768 +// COMPRESSED SIZE (bytes): 781 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA102_sig_dbg_data[] = +{ + 0x01, 0x00, 0x03, 0xff, 0xfc, 0x0c, 0x62, 0x10, 0xd7, 0x88, 0x37, 0x32, 0x49, 0x2b, 0x50, 0xe2, + 0xcb, 0xcf, 0x76, 0x35, 0xce, 0xea, 0x5b, 0xbb, 0x5c, 0xc3, 0x43, 0x6c, 0x42, 0xf0, 0xf1, 0xc9, + 0xc3, 0x42, 0x39, 0x70, 0xa5, 0x33, 0xbb, 0x98, 0xdb, 0x81, 0x87, 0xca, 0xd8, 0x80, 0x41, 0x04, + 0x1e, 0x4c, 0x39, 0x39, 0xc8, 0x56, 0x9f, 0xa1, 0x4b, 0x96, 0xf9, 0x33, 0xb1, 0x82, 0x4a, 0xb0, + 0x0b, 0xe9, 0x76, 0x26, 0x35, 0x3a, 0xfe, 0xd8, 0x88, 0xd8, 0x0f, 0x59, 0xc4, 0xf5, 0xe7, 0x3d, + 0x10, 0xe0, 0x33, 0x15, 0x92, 0x59, 0xb7, 0x63, 0xa6, 0x66, 0xe9, 0xc6, 0x4d, 0x50, 0x0c, 0x05, + 0x9a, 0x70, 0x19, 0x09, 0xe3, 0x36, 0x18, 0x91, 0x20, 0x2e, 0x84, 0x6c, 0x5d, 0x55, 0x70, 0x91, + 0xce, 0x5a, 0xd0, 0x26, 0x3c, 0x57, 0x7d, 0xbd, 0x41, 0x00, 0xeb, 0x3e, 0x0a, 0x7b, 0xe3, 0xe3, + 0x30, 0x32, 0xf1, 0x1f, 0x4f, 0xbb, 0xa4, 0x0a, 0x4a, 0x3c, 0x7c, 0x8e, 0xba, 0x86, 0x7a, 0xec, + 0x82, 0xae, 0xa2, 0x0b, 0xab, 0x99, 0x63, 0xcd, 0x50, 0x97, 0x7b, 0x3d, 0xce, 0x9d, 0x93, 0x7a, + 0x0f, 0x1e, 0x02, 0x55, 0xca, 0x10, 0x5a, 0x67, 0x37, 0x62, 0xf9, 0x43, 0xeb, 0x83, 0x87, 0x14, + 0x30, 0xe2, 0xea, 0xf3, 0x08, 0x4a, 0x6b, 0xb0, 0x0b, 0x38, 0x58, 0x3d, 0xc1, 0x91, 0xa1, 0x6e, + 0xe4, 0x7a, 0xbd, 0xfe, 0x20, 0x44, 0x74, 0x2a, 0x1e, 0xeb, 0x6a, 0xee, 0xfd, 0x04, 0x68, 0xe6, + 0xcf, 0xd5, 0x3c, 0xa6, 0x53, 0x05, 0xf6, 0x41, 0x7f, 0x6c, 0x22, 0x9d, 0x47, 0xbb, 0xc4, 0xac, + 0x8c, 0x66, 0x68, 0x8a, 0x5d, 0x4e, 0x78, 0x13, 0x10, 0xe2, 0x2c, 0xe1, 0x22, 0x41, 0x7e, 0xd2, + 0x17, 0xd8, 0x20, 0x9b, 0x74, 0xa0, 0x24, 0xfa, 0x07, 0x46, 0x90, 0x68, 0x4a, 0xfc, 0x98, 0x45, + 0x49, 0x8f, 0xee, 0x43, 0xff, 0xff, 0x6b, 0x73, 0x89, 0xe7, 0xdc, 0x0d, 0xb9, 0xb3, 0xf6, 0xad, + 0xb3, 0xe0, 0x80, 0xaf, 0x3f, 0x29, 0xf8, 0xff, 0xf2, 0xfd, 0x0a, 0x9c, 0x1f, 0xcb, 0x23, 0xe4, + 0x4c, 0x0e, 0xa4, 0xe4, 0xe3, 0xa9, 0xf4, 0xd3, 0x92, 0x92, 0xf9, 0xd9, 0xd0, 0xe7, 0xb9, 0x3f, + 0xda, 0x27, 0xb8, 0x5e, 0xaa, 0xf0, 0xdb, 0xbe, 0x28, 0x90, 0x70, 0xc6, 0xe9, 0x1d, 0x60, 0xb1, + 0xad, 0x9f, 0x57, 0x65, 0x60, 0x82, 0x84, 0xda, 0xf3, 0x44, 0x35, 0x1d, 0x72, 0xbc, 0xae, 0x6a, + 0xbe, 0x31, 0x82, 0x0f, 0x57, 0x9a, 0x85, 0x97, 0x67, 0x4a, 0xd2, 0x9d, 0x23, 0x0b, 0x35, 0x84, + 0x0c, 0x76, 0xf5, 0x72, 0xb4, 0x5d, 0x1f, 0x78, 0xc0, 0x9c, 0x8c, 0xd0, 0xd2, 0xcc, 0x4e, 0xd0, + 0x38, 0xc8, 0x1c, 0xa2, 0xae, 0x98, 0x81, 0x86, 0xcf, 0xcb, 0x49, 0x5e, 0x87, 0xeb, 0xef, 0xcb, + 0x3a, 0x00, 0x3a, 0x0d, 0x2e, 0x7c, 0xba, 0x3e, 0x8a, 0xfd, 0x2a, 0x91, 0xd6, 0xa1, 0xb1, 0xa3, + 0x3e, 0xf6, 0xf4, 0x14, 0xd8, 0x9e, 0xbe, 0xad, 0x92, 0x53, 0x92, 0xff, 0x78, 0x1a, 0x42, 0xad, + 0x76, 0x62, 0x01, 0x7c, 0x4e, 0x39, 0xce, 0x37, 0xa9, 0x81, 0xfe, 0xca, 0x3c, 0xd6, 0x2f, 0x46, + 0x75, 0x66, 0x0d, 0xc6, 0x6b, 0xb2, 0xec, 0xa2, 0x6b, 0x6a, 0x60, 0x23, 0xb9, 0xe9, 0xce, 0xfc, + 0x55, 0x7a, 0x29, 0x2f, 0x49, 0xb1, 0x34, 0x64, 0x55, 0x1b, 0x70, 0xe3, 0xd9, 0x87, 0x36, 0x61, + 0xe8, 0xec, 0xac, 0x73, 0x7a, 0x2a, 0x33, 0x6f, 0x35, 0x2f, 0xfd, 0x29, 0xc9, 0xfe, 0xb3, 0x5b, + 0xcf, 0xb0, 0xd1, 0xca, 0xf9, 0x39, 0x1c, 0xe3, 0x2f, 0xdf, 0x98, 0x70, 0xdc, 0x86, 0x70, 0x45, + 0xc1, 0x55, 0xf6, 0x9e, 0xbf, 0x20, 0x7e, 0x79, 0x92, 0xa0, 0x03, 0x65, 0x41, 0xae, 0x72, 0x90, + 0x66, 0xfb, 0xb2, 0xb4, 0x5a, 0xd2, 0x50, 0x65, 0x68, 0x62, 0x8c, 0xaf, 0x30, 0x2e, 0xa6, 0xf1, + 0x9a, 0x73, 0x1a, 0x35, 0xe6, 0xcd, 0xcb, 0x68, 0x33, 0x9e, 0x30, 0x68, 0xab, 0x99, 0x2e, 0xce, + 0x67, 0x78, 0xb4, 0xb3, 0xd9, 0x48, 0x56, 0x74, 0xc5, 0x6a, 0x9b, 0x1b, 0xd7, 0x3b, 0xea, 0x49, + 0x6b, 0xaf, 0x59, 0xe1, 0x02, 0xfc, 0xb2, 0x3c, 0x26, 0x38, 0x9a, 0xc3, 0x0a, 0x17, 0xb8, 0xab, + 0xa6, 0x71, 0x74, 0x7f, 0xc1, 0xc9, 0xb4, 0x3d, 0x37, 0xc8, 0xdd, 0x28, 0xbb, 0x37, 0xc6, 0x07, + 0xbb, 0x8d, 0x71, 0xf1, 0x1b, 0x25, 0x08, 0x54, 0x31, 0x90, 0x47, 0xa1, 0x4c, 0xb1, 0xe4, 0x6c, + 0x83, 0xdc, 0xa6, 0xef, 0x14, 0xe8, 0xcb, 0x49, 0x96, 0x0c, 0x13, 0x4c, 0xd3, 0xb1, 0x99, 0xed, + 0x05, 0x5e, 0x13, 0xf8, 0xf5, 0x86, 0x6a, 0x2b, 0x46, 0x75, 0xa2, 0xd3, 0xb2, 0x1d, 0x12, 0x01, + 0x8c, 0x0b, 0xc4, 0xd4, 0x76, 0xe0, 0x79, 0xe4, 0x15, 0x4d, 0x92, 0xf2, 0x68, 0xdd, 0xa9, 0x79, + 0x89, 0x82, 0xf8, 0x90, 0x18, 0xd6, 0xc8, 0x89, 0x1f, 0x2f, 0xd3, 0x96, 0xaf, 0x4c, 0x2d, 0x65, + 0xc8, 0x75, 0x41, 0x97, 0x08, 0x54, 0x5a, 0xad, 0xe3, 0x8d, 0xa2, 0x6e, 0x50, 0x1c, 0x79, 0xbe, + 0x7c, 0x46, 0xa8, 0xb1, 0xf3, 0x9b, 0x1c, 0xae, 0x3e, 0x6c, 0x07, 0xa1, 0xb4, 0xd4, 0x6e, 0x2d, + 0xb6, 0xc6, 0x55, 0xa8, 0x8c, 0xfd, 0x3c, 0xee, 0xc4, 0xca, 0x8a, 0x39, 0xbf, 0x5f, 0xe5, 0xbb, + 0xdc, 0xe4, 0xf2, 0xf3, 0x30, 0x16, 0xd9, 0xee, 0xa7, 0x8e, 0x8e, 0x67, 0x38, 0x9d, 0x8d, 0xba, + 0x72, 0x7e, 0xd1, 0x62, 0xde, 0x1b, 0xaa, 0xec, 0x3c, 0x26, 0xfc, 0x6d, 0xe0, 0x41, 0xc8, 0xa4, + 0xdc, 0x5f, 0xf8, 0x28, 0xa2, 0x1d, 0x51, 0x0a, 0x75, 0xed, 0x78, 0x41, 0x42, 0xb0, 0xb1, 0x88, + 0x74, 0x4a, 0xcb, 0x70, 0x79, 0x60, 0x65, 0x4b, 0x38, 0x00, 0x03, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA102_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 768, // uncompressed data size (bytes) + 781, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA102_sig_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA102("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/unload/g_booteruc_unload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_prod_ga10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 768 +// COMPRESSED SIZE (bytes): 418 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA102_sig_prod_data[] = +{ + 0x4b, 0x3b, 0x68, 0xf7, 0x2e, 0x61, 0xcb, 0x73, 0xf1, 0x99, 0x52, 0xc1, 0xb6, 0xcf, 0x56, 0x7c, + 0xaf, 0x38, 0x79, 0x7a, 0xc1, 0x85, 0xff, 0x12, 0x52, 0xf6, 0x2b, 0x34, 0xc2, 0x56, 0x05, 0x87, + 0xf4, 0x2b, 0xf0, 0xd8, 0xad, 0x2b, 0xed, 0x8f, 0x0d, 0xf4, 0x75, 0x76, 0xae, 0xdc, 0x2b, 0xbc, + 0x22, 0xbd, 0x6d, 0xbd, 0x86, 0xcd, 0xc2, 0x00, 0x17, 0xaf, 0x48, 0xb6, 0x46, 0xc3, 0x2d, 0x7f, + 0x4c, 0xf2, 0xff, 0xbd, 0x3c, 0x91, 0x51, 0x23, 0xf1, 0xa5, 0xae, 0x7e, 0xe6, 0x7f, 0x33, 0x1e, + 0x6f, 0xd1, 0x87, 0xb6, 0xd7, 0x34, 0x52, 0xcf, 0x6b, 0xeb, 0xf7, 0x4a, 0x04, 0x9b, 0xad, 0x98, + 0x16, 0xc1, 0xed, 0xc2, 0xc4, 0xf6, 0x2e, 0xff, 0x88, 0x81, 0xd0, 0xda, 0x1b, 0x07, 0x9f, 0x09, + 0x9b, 0xd4, 0x49, 0x2a, 0x2a, 0x5c, 0x6e, 0xdd, 0x1a, 0x6f, 0xf2, 0x48, 0x40, 0x5a, 0x67, 0xe3, + 0x0f, 0x96, 0xea, 0x24, 0xc1, 0xbf, 0x6d, 0x7d, 0x8b, 0x6f, 0x29, 0x3a, 0x7d, 0xd6, 0xd9, 0xb1, + 0xe8, 0x92, 0xb9, 0xa9, 0xe4, 0xdc, 0x2b, 0x13, 0x77, 0xf3, 0xbf, 0x49, 0xd5, 0x4a, 0x3f, 0xe3, + 0xe2, 0xd2, 0xd6, 0x13, 0xf0, 0xec, 0x7f, 0xd2, 0xf5, 0x9a, 0xbf, 0x97, 0x64, 0x16, 0xee, 0x3b, + 0xa9, 0x10, 0xe7, 0x6c, 0x56, 0xbe, 0x37, 0xd1, 0xff, 0xb8, 0xdc, 0x6a, 0xcd, 0x3c, 0xbd, 0xfd, + 0x7b, 0x7e, 0x7d, 0xde, 0x94, 0xbe, 0xaf, 0xb3, 0x69, 0x09, 0x6b, 0xfc, 0xae, 0x3b, 0xab, 0x16, + 0x38, 0x96, 0x27, 0x94, 0x4d, 0xba, 0x52, 0xf6, 0xd1, 0x76, 0xe5, 0x26, 0xd7, 0xa6, 0x3d, 0x7a, + 0xc2, 0x5e, 0x3e, 0xa5, 0xe9, 0x6d, 0xbd, 0x8f, 0x3c, 0x5d, 0xad, 0x58, 0x32, 0xdb, 0x5b, 0xcd, + 0x3e, 0x3f, 0x3c, 0x13, 0x7f, 0xc6, 0xb7, 0x62, 0x3e, 0xbf, 0xb9, 0xd7, 0xab, 0xc2, 0x7a, 0xc5, + 0x3d, 0x26, 0x12, 0xe6, 0x9f, 0x4a, 0xc4, 0xe2, 0x77, 0xbb, 0x85, 0x33, 0xb8, 0xcf, 0xeb, 0xed, + 0xf9, 0x33, 0x37, 0xe5, 0x09, 0x53, 0xd3, 0x9d, 0xda, 0xa0, 0xa8, 0x2b, 0x49, 0x47, 0x55, 0xe7, + 0x2d, 0x3d, 0x2e, 0x63, 0xc9, 0xc7, 0xe5, 0x64, 0x70, 0xf0, 0xdb, 0x93, 0x5f, 0x2f, 0xd8, 0x6f, + 0xde, 0x3d, 0xdd, 0x9b, 0x3c, 0xaf, 0xe1, 0xd7, 0x43, 0xd5, 0xec, 0x97, 0xd1, 0xd6, 0xe9, 0x3f, + 0x37, 0x1e, 0xfb, 0xb7, 0x59, 0x73, 0xa2, 0xd6, 0x26, 0x09, 0x77, 0xc6, 0x1d, 0xbf, 0x83, 0x65, + 0x6a, 0xfa, 0x97, 0xd4, 0xad, 0xf6, 0xef, 0x38, 0x6b, 0x12, 0xdc, 0x72, 0x48, 0x33, 0xf7, 0x88, + 0xdc, 0x9d, 0x1b, 0x01, 0x2b, 0x04, 0x37, 0xe5, 0x15, 0x1f, 0x71, 0x58, 0x7a, 0xcc, 0xc9, 0x72, + 0x76, 0xfa, 0xca, 0x6b, 0xd7, 0x97, 0x3e, 0x7f, 0xc1, 0x7b, 0x29, 0xf1, 0xdf, 0xbc, 0xcf, 0x29, + 0xce, 0x33, 0xa5, 0x0c, 0xfb, 0xf5, 0x1e, 0x0a, 0x27, 0x49, 0x15, 0xfa, 0x45, 0xbc, 0x38, 0xaf, + 0x52, 0xe5, 0x1d, 0xce, 0x30, 0x0a, 0x06, 0x14, 0x00, 0x00, 0xa0, 0x3f, 0xc7, 0x35, 0x00, 0x03, + 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA102_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 768, // uncompressed data size (bytes) + 418, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA102_sig_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA102("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/unload/g_booteruc_unload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_ga10x_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA102_patch_loc_data[] = +{ + 0x10, 0x1e, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA102_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA102_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA102("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/unload/g_booteruc_unload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_ga10x_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA102_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA102_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA102_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA102("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/unload/g_booteruc_unload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_ga10x_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA102_patch_meta_data[] = +{ + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA102_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA102_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_GA102("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/ga10x/unload/g_booteruc_unload_ga10x_ga102_rsa3k_0_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_GA102_num_sigs_data[] = +{ + 0x02, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_GA102_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_GA102_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterUnloadUcode_GA102 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA102_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA102_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA102_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA102_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA102_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA102_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA102_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA102_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA102_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_GA102_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_GA102(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterUnloadUcode_GA102; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU102.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU102.c new file mode 100644 index 000000000..0ff75f7ce --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU102.c @@ -0,0 +1,1245 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU102("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/unload/g_booteruc_unload_tu10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 8448 +// COMPRESSED SIZE (bytes): 6785 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU102_image_dbg_data[] = +{ + 0xed, 0x99, 0x45, 0x54, 0x1c, 0x5a, 0xb0, 0xb5, 0x1b, 0x77, 0x0d, 0x4e, 0x70, 0x77, 0xd7, 0x60, + 0x41, 0x83, 0x5b, 0x08, 0x4e, 0xe3, 0x04, 0x08, 0x4e, 0xd0, 0xd0, 0xd0, 0xb8, 0x04, 0x77, 0x6d, + 0x5c, 0x82, 0xbb, 0xbb, 0x4b, 0x1a, 0x77, 0x77, 0x82, 0x5b, 0xa0, 0xf1, 0x77, 0xdf, 0xf4, 0xce, + 0xde, 0xe0, 0x9f, 0xfc, 0xeb, 0x7e, 0x93, 0x5d, 0x7b, 0x72, 0xea, 0x0c, 0xaa, 0xd6, 0xda, 0x67, + 0x9d, 0x08, 0x00, 0x00, 0xee, 0x35, 0x09, 0x00, 0xc2, 0x01, 0x00, 0x60, 0xf0, 0x30, 0xc0, 0x03, + 0x7c, 0x34, 0x00, 0x1e, 0xd0, 0xbd, 0xb1, 0xf9, 0xf6, 0xf6, 0x86, 0x13, 0x01, 0x80, 0x03, 0xbc, + 0x41, 0x10, 0xeb, 0xf7, 0x00, 0x7c, 0x69, 0xcb, 0x70, 0xf5, 0x49, 0x00, 0xf6, 0xb4, 0x65, 0xf8, + 0x7f, 0x84, 0x21, 0x6d, 0x19, 0xe1, 0x1f, 0xa1, 0x8c, 0x00, 0x00, 0x00, 0x69, 0x10, 0x84, 0xc6, + 0x4d, 0x84, 0x6e, 0x48, 0xee, 0x66, 0x1a, 0x04, 0x2e, 0x2d, 0x1d, 0xbe, 0x71, 0x13, 0xae, 0x71, + 0x19, 0x1e, 0xd4, 0x04, 0x00, 0xe0, 0xf0, 0x28, 0x02, 0x70, 0xef, 0x21, 0x00, 0x1c, 0xb8, 0x08, + 0x80, 0x0d, 0xe2, 0xff, 0x16, 0x58, 0x2d, 0xe9, 0x00, 0x38, 0xf8, 0x7f, 0xaa, 0x5b, 0x31, 0xf8, + 0x58, 0x00, 0x00, 0x39, 0xe2, 0x9f, 0x56, 0x2f, 0x10, 0xa4, 0xce, 0xdb, 0xc7, 0x07, 0x64, 0x18, + 0xc2, 0x3f, 0xa7, 0xc1, 0x27, 0xe3, 0xa0, 0x6e, 0xfc, 0x6f, 0xe7, 0x97, 0x07, 0xa4, 0x08, 0x00, + 0x39, 0xe0, 0x25, 0x07, 0xb1, 0x3d, 0x0e, 0xbe, 0x29, 0x09, 0xf7, 0x1f, 0xff, 0xfa, 0x08, 0x00, + 0xfd, 0x73, 0x1f, 0x18, 0x7c, 0x67, 0xe1, 0x13, 0x5c, 0x6c, 0xbb, 0x08, 0x5a, 0x04, 0x35, 0x12, + 0x20, 0x17, 0x02, 0x03, 0xc4, 0xfe, 0xa3, 0xdd, 0x6f, 0xb7, 0xdc, 0xcf, 0xaf, 0x8a, 0x70, 0xb9, + 0x10, 0x89, 0x29, 0xc0, 0xff, 0x09, 0xb4, 0x25, 0x63, 0xc6, 0xd9, 0xe1, 0xa2, 0xd2, 0x84, 0x65, + 0x1d, 0xa1, 0x33, 0x46, 0x39, 0x8a, 0x1c, 0x29, 0xc3, 0x26, 0x13, 0x45, 0xd3, 0x12, 0xc7, 0xa8, + 0x03, 0xf2, 0x5d, 0x1a, 0xeb, 0xbc, 0xd7, 0xef, 0x7a, 0x53, 0xbe, 0xdf, 0x8c, 0x9d, 0xfb, 0x96, + 0xd6, 0x56, 0x09, 0x59, 0x38, 0x68, 0xf4, 0x57, 0x20, 0xb8, 0x21, 0x5d, 0xa4, 0x9c, 0x0c, 0x25, + 0x67, 0x6e, 0xba, 0x51, 0xc4, 0xbb, 0xf2, 0x50, 0x38, 0x3c, 0x61, 0x13, 0x5a, 0xa8, 0x17, 0x63, + 0xcd, 0xe5, 0x60, 0x51, 0x1e, 0x57, 0x76, 0x60, 0x0a, 0xeb, 0x8b, 0x79, 0x8f, 0x58, 0xcc, 0x5c, + 0xf5, 0xfb, 0x1e, 0x0b, 0x18, 0x65, 0x00, 0x98, 0xc9, 0x91, 0x7c, 0x26, 0x2f, 0xb7, 0x53, 0xec, + 0xc7, 0x56, 0xe4, 0x0a, 0xfd, 0x9e, 0x77, 0x29, 0x9a, 0x8a, 0x5d, 0x61, 0xd8, 0xf8, 0xd9, 0x5e, + 0x1f, 0x8f, 0xf0, 0xb8, 0xb5, 0xd0, 0xc1, 0x1a, 0x6a, 0x5c, 0x6e, 0x44, 0x35, 0x4b, 0x9f, 0x2c, + 0x7c, 0xe4, 0xe3, 0xd0, 0xd9, 0x56, 0x47, 0x65, 0xb2, 0xd3, 0x2d, 0x9a, 0xf9, 0x50, 0x6f, 0xc6, + 0x97, 0x64, 0xbf, 0x9e, 0xc3, 0x51, 0xb5, 0xd5, 0x2d, 0x73, 0x95, 0x75, 0xf9, 0x11, 0x92, 0x96, + 0x67, 0xe5, 0xa5, 0x69, 0x54, 0x8d, 0xad, 0x39, 0x91, 0x48, 0xf1, 0x81, 0xb2, 0x40, 0x14, 0xd3, + 0xb8, 0x63, 0x83, 0xc9, 0xde, 0xc8, 0x05, 0x76, 0xa6, 0xfc, 0x52, 0x8e, 0xb6, 0xbb, 0x54, 0x07, + 0xc5, 0x5d, 0xf3, 0xe8, 0x39, 0x1d, 0x3f, 0x1e, 0x87, 0x9a, 0x34, 0x39, 0x5d, 0xf5, 0x29, 0x6d, + 0xdf, 0xa2, 0xf5, 0xf1, 0x76, 0xb1, 0xb4, 0xe1, 0x99, 0x81, 0xb8, 0x66, 0xaa, 0x9b, 0x74, 0x25, + 0x0b, 0xc7, 0x73, 0x5e, 0x33, 0xd1, 0x4a, 0x50, 0xb7, 0x07, 0x5f, 0x7e, 0x93, 0x28, 0x11, 0x1b, + 0x0b, 0xe0, 0x60, 0xe6, 0xea, 0x31, 0x3a, 0xc7, 0x99, 0xad, 0xce, 0xbe, 0xd0, 0xf6, 0xd6, 0x2f, + 0x2a, 0xc1, 0x85, 0x3a, 0xe0, 0x62, 0xbb, 0xaf, 0x82, 0x47, 0x1d, 0xba, 0xc6, 0xab, 0xd5, 0xba, + 0x51, 0xf2, 0x5c, 0x9a, 0xad, 0xcf, 0x37, 0xde, 0x90, 0xe4, 0x80, 0x2d, 0x92, 0x96, 0x3b, 0xe2, + 0xc6, 0xea, 0x34, 0x4d, 0x2c, 0xf1, 0x49, 0x4c, 0x50, 0xcb, 0x32, 0x37, 0x98, 0xb5, 0xce, 0x66, + 0x5b, 0x6a, 0x88, 0x2a, 0xb6, 0xbe, 0x7e, 0x72, 0x69, 0xed, 0x27, 0x01, 0xcb, 0x9a, 0xc1, 0x97, + 0x29, 0x0f, 0x41, 0x72, 0xc4, 0x00, 0xf3, 0xe7, 0x80, 0xc7, 0x91, 0xa5, 0x54, 0x17, 0x06, 0x04, + 0x46, 0x72, 0x5c, 0xa9, 0xb2, 0xe0, 0xa9, 0x70, 0x58, 0xd2, 0xdc, 0x46, 0xf6, 0xbe, 0x07, 0x7e, + 0x39, 0x25, 0x9f, 0x04, 0x28, 0xee, 0x87, 0x70, 0xee, 0x90, 0x8c, 0xfa, 0xe7, 0xc9, 0x0f, 0xeb, + 0x0a, 0xb1, 0x5d, 0xcf, 0x0a, 0x8a, 0x98, 0xf5, 0x53, 0x68, 0x83, 0x50, 0x6d, 0x1f, 0x89, 0xad, + 0xc5, 0x60, 0x03, 0xf2, 0xef, 0x63, 0x7d, 0x6c, 0x31, 0x34, 0xd7, 0x16, 0xd5, 0x7b, 0x00, 0x38, + 0x9b, 0xf1, 0xe0, 0x83, 0x87, 0x9f, 0xfc, 0xeb, 0x14, 0x2d, 0xea, 0x26, 0x0c, 0xad, 0x34, 0xb9, + 0x7b, 0x63, 0xf3, 0xc8, 0x2d, 0x8f, 0xfe, 0x6d, 0x09, 0xfe, 0x95, 0xfe, 0x5b, 0xb0, 0x3e, 0xdf, + 0x0e, 0xce, 0xa2, 0xe3, 0xb2, 0xab, 0x76, 0xe4, 0x2c, 0x17, 0xb1, 0x0e, 0x57, 0x60, 0x68, 0x34, + 0x5b, 0x67, 0x48, 0xe7, 0x9e, 0x9a, 0xe3, 0x0f, 0x11, 0x72, 0x49, 0x76, 0xd5, 0x6e, 0xdd, 0x68, + 0xef, 0xb3, 0x14, 0xf2, 0x3f, 0x22, 0x52, 0x75, 0x1e, 0x91, 0x62, 0xaf, 0x57, 0x5b, 0x6c, 0x45, + 0x55, 0x97, 0x9f, 0x05, 0x3f, 0xd7, 0xdb, 0xf5, 0xc9, 0x2f, 0x47, 0x21, 0xa1, 0xf9, 0x73, 0x66, + 0xd0, 0xb1, 0x00, 0x75, 0xc3, 0xe4, 0x73, 0xba, 0x6d, 0x9d, 0xaa, 0xa9, 0x9d, 0x4c, 0x9d, 0x1f, + 0xb2, 0x3f, 0x7c, 0x0e, 0xc2, 0x82, 0xbc, 0x46, 0xd2, 0xf0, 0x3f, 0x13, 0x43, 0x6d, 0x81, 0x96, + 0xe0, 0x79, 0x12, 0x85, 0xe7, 0x4a, 0x1d, 0x04, 0x0f, 0xab, 0x7a, 0xbc, 0xfe, 0x24, 0xb2, 0x79, + 0x58, 0x67, 0xf5, 0xd7, 0x23, 0x33, 0xad, 0xdf, 0xe5, 0x8c, 0x05, 0x8a, 0xc1, 0x38, 0xd0, 0x90, + 0x94, 0x49, 0x43, 0x56, 0xf9, 0x4e, 0x4a, 0xff, 0x0a, 0xcd, 0x16, 0x41, 0xbd, 0x30, 0x83, 0xaa, + 0x7e, 0xcf, 0x03, 0x3d, 0xa6, 0x98, 0x44, 0xd2, 0x65, 0xe5, 0x09, 0x29, 0xee, 0x6a, 0x75, 0x7a, + 0xc9, 0xc9, 0x20, 0x3c, 0x99, 0xd6, 0xdc, 0x17, 0xfc, 0xeb, 0x59, 0x3b, 0xa5, 0x80, 0x8e, 0x09, + 0x57, 0x57, 0x24, 0x41, 0x71, 0xda, 0x0f, 0x7f, 0x6f, 0x15, 0xcb, 0x7d, 0x0a, 0x62, 0xb1, 0x16, + 0x9d, 0x86, 0x22, 0xeb, 0x76, 0x79, 0x9e, 0x31, 0x05, 0xa4, 0x7b, 0x6f, 0x18, 0xe9, 0x2c, 0x3f, + 0xef, 0x0f, 0x39, 0x5d, 0x35, 0x13, 0xd6, 0x01, 0xcf, 0x36, 0x62, 0x15, 0xf2, 0x22, 0x2d, 0x4a, + 0x42, 0x84, 0xbe, 0x87, 0xff, 0x42, 0x76, 0xa3, 0xd5, 0x0d, 0xf5, 0x8d, 0xb2, 0x22, 0x13, 0x4e, + 0x39, 0xc1, 0xcc, 0x5f, 0x40, 0x64, 0x92, 0x73, 0x28, 0xeb, 0x79, 0x02, 0x02, 0xbb, 0xc8, 0xf5, + 0x19, 0x59, 0x23, 0xcb, 0x25, 0x69, 0x08, 0x73, 0x6a, 0xda, 0x48, 0x9a, 0xf6, 0x25, 0x44, 0x49, + 0x02, 0x08, 0x35, 0xde, 0x21, 0x12, 0x47, 0xc3, 0x0e, 0x70, 0x73, 0x70, 0xb9, 0x5b, 0xa2, 0xc9, + 0x71, 0x42, 0x28, 0x1e, 0x39, 0x5c, 0x7e, 0xc8, 0x65, 0x1b, 0xca, 0x6d, 0x1e, 0x4a, 0xaa, 0xd1, + 0x46, 0xb7, 0x72, 0x06, 0xb3, 0x9a, 0x19, 0x57, 0x4a, 0x43, 0xc8, 0x1d, 0xdd, 0x98, 0xf2, 0x17, + 0xd4, 0xba, 0xde, 0x73, 0x5b, 0x90, 0x52, 0x9c, 0xdc, 0x0d, 0x59, 0x94, 0x15, 0xef, 0xcf, 0xd0, + 0xb7, 0x31, 0x23, 0x6f, 0x11, 0x84, 0xe0, 0xe0, 0x0f, 0x35, 0x0b, 0xdb, 0x4c, 0x17, 0x5c, 0xd4, + 0xc0, 0x1f, 0x46, 0x55, 0x87, 0x75, 0xe6, 0x46, 0xda, 0xbe, 0x93, 0x2e, 0x84, 0xaa, 0x60, 0xc4, + 0x64, 0x37, 0x66, 0x02, 0xe9, 0x9d, 0xd9, 0xdf, 0xed, 0x15, 0xed, 0x26, 0x5d, 0x43, 0x97, 0x09, + 0x82, 0xdd, 0xbc, 0x24, 0x6b, 0x1d, 0xff, 0xb4, 0x89, 0xce, 0xa3, 0x04, 0xb7, 0xa2, 0xe1, 0xf6, + 0xa8, 0xd3, 0xd4, 0x48, 0x0b, 0x95, 0xf7, 0xfe, 0x82, 0x9e, 0x2b, 0x79, 0xc7, 0x47, 0x3f, 0xb4, + 0xa5, 0xc8, 0x66, 0x9e, 0x24, 0x22, 0x95, 0xe6, 0x18, 0x44, 0xff, 0x56, 0xe9, 0xf2, 0x74, 0xee, + 0x08, 0xc1, 0xe9, 0x3f, 0xa2, 0xa8, 0xf1, 0x05, 0x4e, 0x90, 0x31, 0x2e, 0x1f, 0x19, 0xda, 0xeb, + 0x7d, 0xa1, 0xf3, 0x58, 0xb3, 0xd1, 0x2f, 0x35, 0x16, 0x97, 0x4b, 0x6f, 0xa6, 0xb8, 0x91, 0x03, + 0x9b, 0x1e, 0xe4, 0xcc, 0x32, 0xec, 0xf9, 0xe3, 0x93, 0xa1, 0xf7, 0x5a, 0x6a, 0x20, 0x5c, 0xac, + 0x25, 0x4e, 0xb4, 0xfa, 0xf4, 0xb1, 0x1a, 0x89, 0x02, 0xaa, 0xb9, 0x2f, 0xc2, 0x94, 0xc2, 0x47, + 0x02, 0x22, 0x1f, 0x87, 0xaf, 0xa7, 0x06, 0xd0, 0x4a, 0x4f, 0xa1, 0xd7, 0xe2, 0x47, 0xef, 0x4d, + 0x35, 0x77, 0x9e, 0x73, 0x7d, 0x8b, 0xd8, 0x68, 0xcd, 0x50, 0x7b, 0x52, 0x75, 0x28, 0xa7, 0x2c, + 0xe6, 0x5b, 0x10, 0xd1, 0x16, 0x84, 0x76, 0x19, 0x66, 0xcd, 0xb1, 0x3f, 0x5f, 0xf6, 0xff, 0xe2, + 0xef, 0x23, 0x45, 0xe7, 0xe9, 0x1e, 0x69, 0xc3, 0x03, 0x96, 0x02, 0xb8, 0x6d, 0x8d, 0xad, 0x56, + 0x89, 0xa1, 0x1c, 0x72, 0x4a, 0x8b, 0x04, 0x34, 0xaa, 0x6a, 0xe2, 0x08, 0x99, 0x67, 0xb2, 0xb8, + 0xf0, 0xb6, 0x06, 0x23, 0x81, 0x48, 0xf8, 0x3b, 0xe5, 0x6e, 0xc7, 0xac, 0xed, 0x70, 0x15, 0xec, + 0xcb, 0x8a, 0xb3, 0x86, 0xa1, 0xbe, 0xef, 0xa4, 0xea, 0xca, 0xc5, 0xf2, 0x13, 0xee, 0x41, 0xcf, + 0xb4, 0x43, 0x2c, 0x19, 0xab, 0x32, 0xc2, 0x7c, 0xbf, 0xb0, 0x34, 0x73, 0x2c, 0x78, 0x4d, 0xd4, + 0xd4, 0x6e, 0xf4, 0xad, 0xba, 0x16, 0xba, 0x32, 0x74, 0x02, 0xae, 0xf6, 0x31, 0xb2, 0xed, 0x72, + 0x6d, 0x9d, 0x50, 0xce, 0x79, 0x22, 0x80, 0xee, 0xa2, 0x74, 0x60, 0x51, 0x3b, 0x65, 0x9a, 0xda, + 0x38, 0x23, 0x47, 0x96, 0x80, 0xb8, 0x5b, 0x53, 0xc6, 0x1f, 0x6e, 0x29, 0x81, 0x33, 0x90, 0x42, + 0x63, 0x13, 0xda, 0x73, 0x3a, 0x01, 0xbd, 0x72, 0xb8, 0xa5, 0x7e, 0xb7, 0x5d, 0x79, 0xf2, 0xc4, + 0x14, 0x66, 0x56, 0xf5, 0x91, 0x14, 0x10, 0x95, 0x34, 0x82, 0x2f, 0x19, 0xf0, 0x29, 0xab, 0xcf, + 0x77, 0x53, 0x4a, 0x21, 0xde, 0x3d, 0xcd, 0xfe, 0xca, 0x0a, 0xad, 0x4d, 0x5a, 0x7d, 0xe9, 0x22, + 0xd6, 0xa7, 0x17, 0x17, 0xae, 0x37, 0xc5, 0x52, 0x9f, 0xa6, 0xbe, 0x5e, 0xe1, 0x47, 0xfd, 0xa2, + 0x37, 0xe2, 0x57, 0x83, 0xa8, 0xa7, 0xb6, 0x44, 0xd4, 0x99, 0x7e, 0x50, 0x11, 0xf5, 0xc4, 0x97, + 0x3c, 0x96, 0x7a, 0x46, 0x13, 0x8f, 0x72, 0x3f, 0x59, 0x09, 0x5d, 0xc8, 0x7e, 0xe6, 0x34, 0x9e, + 0xfc, 0x17, 0xa7, 0xcb, 0x83, 0xd1, 0x89, 0xbf, 0xbc, 0x0d, 0x11, 0x14, 0x53, 0x25, 0xac, 0x07, + 0x56, 0x27, 0xa4, 0x36, 0x15, 0xa2, 0xa0, 0xb2, 0x83, 0xe1, 0x76, 0xdf, 0xcc, 0x87, 0x02, 0xaf, + 0x8d, 0x45, 0x68, 0x88, 0x52, 0x5a, 0x1b, 0x9b, 0xf6, 0xb8, 0x87, 0xed, 0x66, 0x06, 0xfc, 0x21, + 0x14, 0x2e, 0x5e, 0xa6, 0x7e, 0x6a, 0x80, 0x1c, 0xbb, 0x97, 0xdd, 0x56, 0x49, 0x0f, 0x39, 0x21, + 0x40, 0xed, 0x58, 0x50, 0x31, 0x98, 0x00, 0x77, 0x25, 0xf2, 0x6a, 0x44, 0x69, 0x2f, 0x51, 0x0b, + 0x38, 0xd0, 0x24, 0x1a, 0x1f, 0xd8, 0xa2, 0x0e, 0x35, 0x0a, 0x61, 0x20, 0x19, 0x92, 0xdc, 0xf2, + 0xe3, 0x3c, 0x3f, 0x0e, 0x71, 0x88, 0x68, 0xc0, 0x7a, 0x11, 0x3e, 0xe4, 0xb0, 0x97, 0x36, 0x68, + 0xbe, 0xfb, 0x6a, 0x28, 0x90, 0xce, 0x6e, 0x47, 0x24, 0x5d, 0x63, 0x79, 0x12, 0xc0, 0xa2, 0xc3, + 0x4d, 0x98, 0xbf, 0xab, 0xee, 0x51, 0x09, 0xc4, 0x03, 0xc6, 0x95, 0xce, 0xb5, 0x59, 0x98, 0x72, + 0xb5, 0x3d, 0x17, 0x0b, 0xbb, 0x2e, 0xcf, 0x35, 0xbc, 0xaf, 0xe9, 0x58, 0xdc, 0xb2, 0x60, 0xe9, + 0x87, 0xf8, 0x4c, 0x2f, 0xa9, 0xfc, 0x7c, 0xe5, 0x13, 0x55, 0x9f, 0x89, 0x30, 0x7b, 0x67, 0x64, + 0xb3, 0x99, 0x2d, 0x84, 0xb7, 0xd8, 0xa0, 0xcb, 0x0c, 0x29, 0x30, 0x15, 0x20, 0xe0, 0x0a, 0x75, + 0x1c, 0x2c, 0x79, 0x83, 0x25, 0x2b, 0x20, 0xe4, 0x81, 0x8d, 0x67, 0xd0, 0xd2, 0x4f, 0x8b, 0xc9, + 0x62, 0x34, 0xdb, 0xf7, 0x73, 0x92, 0x39, 0x4b, 0x1a, 0x0c, 0x6e, 0x51, 0x4a, 0xf9, 0x70, 0x76, + 0xee, 0xe8, 0x27, 0xf0, 0x87, 0x08, 0xe6, 0x4e, 0x9b, 0x49, 0x8e, 0xf0, 0x62, 0x47, 0xaf, 0x48, + 0xdd, 0x98, 0x0b, 0xa8, 0xa0, 0x81, 0x5c, 0xf2, 0x12, 0xe7, 0x82, 0x32, 0x7e, 0xcd, 0xd2, 0xcb, + 0xbf, 0x1c, 0x5d, 0xe4, 0xf8, 0x99, 0xde, 0x26, 0x08, 0xba, 0x71, 0x4d, 0x8b, 0x7d, 0xb2, 0xef, + 0xb0, 0x03, 0xd0, 0xdb, 0xa5, 0x23, 0x6a, 0x31, 0xee, 0xb2, 0x04, 0xfd, 0x26, 0xdc, 0x9c, 0x7e, + 0x3a, 0x92, 0x92, 0x8b, 0xac, 0xa3, 0x70, 0x5e, 0x25, 0x4b, 0xd8, 0xa5, 0x2f, 0x32, 0xb9, 0x42, + 0x39, 0x43, 0x1a, 0xee, 0x59, 0x07, 0xbf, 0x44, 0xd8, 0x16, 0x76, 0x05, 0x3b, 0x5c, 0x2f, 0x82, + 0x7e, 0xd9, 0xe7, 0x23, 0xdc, 0xbb, 0x5f, 0x99, 0xdb, 0x73, 0xd2, 0x89, 0xf1, 0x94, 0x15, 0x97, + 0xe2, 0xec, 0x32, 0x71, 0x4a, 0xf6, 0x20, 0xaf, 0x82, 0x5e, 0x23, 0x74, 0x30, 0x8d, 0x17, 0xb5, + 0x3d, 0x92, 0xd5, 0x44, 0x75, 0x08, 0xee, 0xc5, 0x6d, 0x10, 0x45, 0xe9, 0xfc, 0xb5, 0x5c, 0x1c, + 0x4b, 0x08, 0x70, 0x8f, 0x72, 0x27, 0x46, 0x0c, 0xa7, 0xc8, 0x85, 0xa6, 0x35, 0x31, 0x28, 0x0a, + 0x49, 0x97, 0xb1, 0x16, 0x18, 0xc8, 0xf5, 0xcc, 0x16, 0x89, 0xc5, 0xe3, 0x92, 0xb7, 0xda, 0xe4, + 0x84, 0x6a, 0xcf, 0x4e, 0x36, 0xfc, 0xac, 0x7c, 0xfc, 0xea, 0xb9, 0xe6, 0xee, 0xf6, 0x49, 0xe7, + 0x0c, 0x58, 0xca, 0x1d, 0x90, 0x40, 0x77, 0xae, 0x42, 0x99, 0x35, 0xfe, 0x59, 0xdc, 0xf1, 0x59, + 0x4e, 0xed, 0x2e, 0xbd, 0x2f, 0xea, 0x32, 0xf6, 0x5f, 0x09, 0xb5, 0xcb, 0x16, 0xcb, 0xff, 0x3c, + 0x39, 0xf5, 0x87, 0x2e, 0x64, 0xba, 0x51, 0x9c, 0xa5, 0x34, 0xed, 0x2d, 0x01, 0x23, 0x22, 0xc4, + 0x68, 0x1f, 0x28, 0x55, 0x29, 0xe3, 0xd2, 0xb3, 0x91, 0xde, 0xef, 0x12, 0xcf, 0xd6, 0xd1, 0xc5, + 0x94, 0x85, 0x5a, 0x6b, 0xad, 0x77, 0x08, 0xdb, 0x48, 0x2a, 0x69, 0x20, 0xd1, 0xea, 0xe0, 0xb3, + 0x20, 0x4c, 0x5c, 0x93, 0x07, 0x17, 0x0e, 0x4d, 0x0b, 0x8d, 0xcb, 0x23, 0xb4, 0xb4, 0x87, 0x6e, + 0xaf, 0x5c, 0x55, 0xe1, 0x2f, 0x14, 0xb2, 0x32, 0x9d, 0xe3, 0xd2, 0xd2, 0x3b, 0x01, 0x06, 0xaf, + 0x49, 0x1b, 0xff, 0x92, 0x37, 0x53, 0x36, 0x35, 0x58, 0x0d, 0x0b, 0xd7, 0x83, 0x89, 0xa8, 0xcc, + 0x97, 0xf6, 0xb4, 0x46, 0xf6, 0x07, 0x19, 0xb9, 0xe8, 0xaa, 0x4d, 0xf9, 0x9f, 0x3d, 0xae, 0x2c, + 0xeb, 0x68, 0xe3, 0x74, 0xfd, 0x58, 0x89, 0x8c, 0xf2, 0x2d, 0x22, 0x79, 0x3c, 0x75, 0x6a, 0xde, + 0xd5, 0x46, 0x45, 0x3e, 0xbe, 0x5c, 0x20, 0x5b, 0xd5, 0xad, 0x59, 0xd3, 0x1a, 0x03, 0x0e, 0x5c, + 0x29, 0xdb, 0x12, 0x19, 0x7e, 0x22, 0xc3, 0xd2, 0x5e, 0x62, 0xdf, 0x6f, 0x5b, 0x33, 0xbc, 0x4e, + 0x0d, 0xcd, 0xaa, 0x60, 0xb4, 0x34, 0x94, 0xa8, 0x68, 0x6c, 0x40, 0x39, 0xcf, 0x5d, 0xbf, 0xe9, + 0x5c, 0xa8, 0xf6, 0xfb, 0xef, 0xd3, 0xe2, 0x4a, 0x6a, 0x33, 0x15, 0xd6, 0x66, 0x91, 0xde, 0x06, + 0xeb, 0xe9, 0xfe, 0xe8, 0x1b, 0x17, 0x97, 0x63, 0x27, 0x56, 0xaf, 0x58, 0xb6, 0x0a, 0xc7, 0xd0, + 0xeb, 0x8d, 0xb3, 0x61, 0x90, 0xb5, 0x60, 0x7f, 0xdb, 0xaf, 0xc3, 0x21, 0x0e, 0x1c, 0xfc, 0xf6, + 0x05, 0x80, 0x86, 0xce, 0xfa, 0x09, 0x5a, 0x61, 0x3b, 0x52, 0xa7, 0xc0, 0xa1, 0xb1, 0xf4, 0x34, + 0x28, 0x5f, 0xf8, 0xcb, 0xdd, 0xda, 0x97, 0xe6, 0x7e, 0x8a, 0x27, 0x33, 0x33, 0x81, 0xfc, 0xf8, + 0x65, 0xbe, 0x6f, 0xc6, 0x95, 0xc3, 0xdb, 0xad, 0xf0, 0x12, 0x73, 0xae, 0xb2, 0x85, 0x7c, 0x78, + 0xc5, 0x00, 0x12, 0xee, 0xe0, 0x6e, 0xf2, 0x6d, 0xf8, 0x26, 0xd0, 0x82, 0xd5, 0x88, 0xbd, 0x74, + 0x6e, 0xf8, 0xc7, 0x02, 0xf8, 0xa3, 0xd7, 0xf2, 0x04, 0x01, 0x1c, 0xc3, 0xe3, 0x75, 0x9c, 0x83, + 0x15, 0x9f, 0x35, 0xce, 0x59, 0xa0, 0xb9, 0xf1, 0x7e, 0x40, 0xb6, 0xe2, 0xdf, 0xa0, 0x6e, 0x8a, + 0xc0, 0xf0, 0xa2, 0x39, 0x69, 0x2b, 0xff, 0x2e, 0x44, 0x1f, 0xbd, 0xe4, 0x62, 0x2f, 0x8c, 0x03, + 0xf9, 0xda, 0x0c, 0x53, 0x3e, 0xec, 0x17, 0x1a, 0x31, 0x2f, 0xc5, 0xef, 0xbb, 0xd0, 0x5d, 0x31, + 0x55, 0xf5, 0xf9, 0x71, 0xfb, 0xa3, 0x7d, 0xca, 0x30, 0x58, 0x66, 0xe7, 0x75, 0xa5, 0x71, 0xe6, + 0x4a, 0x10, 0xe7, 0xf7, 0x8a, 0x35, 0x98, 0xef, 0x18, 0x8f, 0x9a, 0xde, 0x5b, 0xf8, 0x7a, 0x92, + 0xbd, 0xf0, 0x6a, 0x4b, 0x45, 0xfb, 0x68, 0x90, 0xc2, 0xb7, 0x3f, 0x15, 0x55, 0xb0, 0xb6, 0xf1, + 0x1c, 0x95, 0x6d, 0x86, 0xa0, 0x82, 0x17, 0x01, 0xa9, 0xd2, 0x0a, 0xdc, 0x67, 0xc1, 0x07, 0x95, + 0x94, 0x1c, 0x74, 0xfa, 0x0f, 0xe9, 0xaf, 0xad, 0x79, 0x06, 0x71, 0x35, 0x18, 0xc1, 0xb8, 0x68, + 0x1f, 0x7d, 0x84, 0x95, 0xef, 0xe6, 0x2e, 0x1f, 0x85, 0xb6, 0x39, 0x07, 0xbb, 0xe6, 0x7d, 0xdc, + 0x6a, 0xf3, 0x87, 0xd4, 0x6c, 0x44, 0x25, 0x0c, 0x81, 0x51, 0xfa, 0x54, 0xdf, 0x47, 0x94, 0x87, + 0x8e, 0xc3, 0x67, 0x9d, 0xd0, 0xa3, 0xbd, 0x01, 0x0d, 0xba, 0xe9, 0xae, 0x12, 0xab, 0x49, 0xae, + 0xd3, 0x5e, 0x6d, 0xe6, 0x90, 0x55, 0xa6, 0xfe, 0x21, 0x52, 0xf0, 0x06, 0x88, 0xea, 0xa3, 0x42, + 0x52, 0xb7, 0x50, 0x36, 0xd6, 0xe5, 0xf3, 0x71, 0x12, 0x61, 0x6a, 0x73, 0x74, 0xe0, 0xee, 0x42, + 0x79, 0x6b, 0x18, 0x24, 0x3f, 0x0f, 0xdf, 0xee, 0x78, 0xf5, 0xa6, 0xe6, 0x40, 0x52, 0x73, 0x00, + 0x97, 0x20, 0xa5, 0xae, 0xfb, 0x7d, 0xb4, 0x8f, 0x5a, 0x79, 0x0b, 0xef, 0xa0, 0x60, 0x24, 0xb0, + 0x63, 0x0c, 0xca, 0x4b, 0x39, 0x12, 0x08, 0xe4, 0x1e, 0xc4, 0x34, 0x84, 0xdd, 0x3c, 0xf5, 0xd0, + 0xa0, 0x56, 0xe1, 0x16, 0x72, 0x94, 0x2f, 0x8b, 0x2e, 0xb1, 0x78, 0x0d, 0x16, 0xa3, 0x68, 0x8e, + 0x8c, 0xe6, 0x5a, 0x09, 0x09, 0xb5, 0xe0, 0xe5, 0xa3, 0xfb, 0xf4, 0x5c, 0xa5, 0xda, 0xc7, 0xa7, + 0x0c, 0xea, 0xf5, 0xd9, 0x0b, 0xb0, 0x20, 0x2a, 0x7d, 0x2c, 0x38, 0xf4, 0x3f, 0xdc, 0x78, 0xad, + 0xb8, 0x38, 0xc1, 0x4e, 0xc5, 0x47, 0xc5, 0x2f, 0xd9, 0x79, 0x2a, 0x02, 0x2a, 0xe0, 0xb9, 0x88, + 0xb9, 0x26, 0x54, 0xfa, 0xd1, 0xeb, 0x92, 0xb9, 0x14, 0xb6, 0xec, 0xab, 0xba, 0xad, 0x95, 0x1e, + 0xf5, 0x2e, 0xd4, 0x9f, 0x5a, 0x5f, 0xcd, 0xdc, 0xf4, 0x2d, 0xd5, 0x48, 0xa4, 0x42, 0x36, 0xcc, + 0xbc, 0xac, 0xfa, 0xf3, 0xe6, 0xd2, 0xba, 0x2a, 0xd3, 0xd8, 0x81, 0x3d, 0x5f, 0x85, 0x76, 0x8d, + 0x40, 0xf2, 0x9a, 0xf2, 0x5e, 0xf4, 0x34, 0x4e, 0x5d, 0x6d, 0xec, 0x49, 0x37, 0xe1, 0x2b, 0x7b, + 0x3f, 0xd6, 0x92, 0x27, 0xc4, 0xe8, 0xd5, 0x75, 0x94, 0x9b, 0x82, 0xfe, 0xde, 0x6f, 0x22, 0xa7, + 0xba, 0x57, 0x9c, 0xfe, 0x70, 0x88, 0x27, 0xbd, 0x5d, 0xe8, 0x90, 0xc2, 0xea, 0xfd, 0x4d, 0x78, + 0x8e, 0xc4, 0xc3, 0x43, 0xc0, 0x6d, 0xf5, 0xda, 0x60, 0x28, 0x3d, 0x89, 0x8a, 0x52, 0xa5, 0xdd, + 0x8a, 0xfa, 0x61, 0x84, 0x68, 0x8e, 0x56, 0xe8, 0x1a, 0xb2, 0x2c, 0x20, 0x61, 0x72, 0x5a, 0x16, + 0x82, 0xc8, 0xfc, 0x91, 0x98, 0x5f, 0x90, 0xa3, 0xf3, 0xa0, 0x92, 0xe5, 0xf6, 0xa1, 0x90, 0x80, + 0x28, 0x25, 0xf7, 0xd4, 0xad, 0x50, 0xd6, 0x62, 0x7f, 0x5a, 0xef, 0x73, 0x06, 0x33, 0xc9, 0x4b, + 0x86, 0x44, 0xf1, 0x72, 0x3b, 0xe5, 0xf3, 0xf0, 0xe1, 0xfc, 0x5a, 0x67, 0xfc, 0xe0, 0xdf, 0x9d, + 0xeb, 0x68, 0x9e, 0x7d, 0xed, 0x94, 0x9d, 0xa5, 0xb3, 0xe8, 0x7a, 0x92, 0x74, 0x81, 0x82, 0xec, + 0xee, 0x5a, 0xd1, 0xd8, 0xa9, 0x14, 0x94, 0x90, 0x46, 0x51, 0x55, 0x83, 0x4f, 0xe6, 0xed, 0xfe, + 0xc4, 0x1e, 0x8f, 0x53, 0x52, 0xe4, 0xed, 0xe3, 0x66, 0x96, 0x7a, 0xe6, 0xdf, 0x39, 0x97, 0x32, + 0x24, 0x52, 0xc2, 0xae, 0xa9, 0xb1, 0x7c, 0xc8, 0x74, 0x71, 0xe8, 0xb1, 0x65, 0x15, 0xd5, 0x96, + 0x66, 0xe7, 0x90, 0xb2, 0xa7, 0xf9, 0xe8, 0xd0, 0xc5, 0x3d, 0x8b, 0xdb, 0xa0, 0x22, 0x70, 0x08, + 0x99, 0xc1, 0xd7, 0x9a, 0xab, 0x35, 0x48, 0xcd, 0x41, 0x72, 0x73, 0xd8, 0x85, 0xe3, 0xc9, 0x65, + 0xa2, 0x04, 0x23, 0x78, 0x5d, 0x96, 0x80, 0x87, 0x5b, 0x4e, 0x3d, 0x4a, 0x53, 0xf5, 0xcf, 0xde, + 0x0e, 0xeb, 0x17, 0x0e, 0x1d, 0x24, 0xf4, 0x74, 0x26, 0x56, 0x46, 0x59, 0x67, 0x33, 0x6d, 0x66, + 0x2a, 0x44, 0x7c, 0xaa, 0x46, 0xeb, 0x93, 0xbf, 0x9c, 0x0f, 0x2b, 0x03, 0x3c, 0x51, 0x5b, 0x89, + 0xdb, 0xb9, 0x70, 0xa3, 0x22, 0xf0, 0x8a, 0x31, 0x7a, 0xee, 0x9e, 0xad, 0xae, 0xac, 0xd9, 0xa7, + 0xcf, 0x9b, 0x26, 0x62, 0x55, 0x08, 0x2d, 0xbf, 0x5b, 0x34, 0xdf, 0x7a, 0xc6, 0x80, 0x4f, 0x93, + 0x3c, 0xba, 0xe5, 0xce, 0xda, 0x19, 0xdf, 0x6c, 0xdf, 0xd9, 0x62, 0x9e, 0xe6, 0xe6, 0x1e, 0x36, + 0xf2, 0x25, 0x5d, 0x80, 0x0b, 0x7f, 0x72, 0xce, 0xfb, 0x15, 0x57, 0x0b, 0x39, 0x21, 0x52, 0x11, + 0x73, 0xb6, 0xf4, 0xc6, 0xcb, 0xd8, 0xbe, 0xa6, 0x8b, 0x98, 0xf0, 0x74, 0x4d, 0x7f, 0xed, 0x80, + 0x9d, 0xbb, 0xec, 0x56, 0x59, 0x83, 0xa7, 0xcf, 0x0c, 0x4b, 0x68, 0x04, 0x5f, 0x72, 0x20, 0xd8, + 0x6c, 0x24, 0x37, 0x44, 0x03, 0x09, 0x3b, 0x1a, 0x13, 0xa8, 0xa6, 0xb0, 0x34, 0x54, 0x0d, 0xa4, + 0x37, 0x5b, 0xd1, 0x4e, 0x3b, 0xa4, 0xf4, 0xf7, 0xa8, 0xa0, 0x51, 0x21, 0xa3, 0xf9, 0xe6, 0xe7, + 0x2e, 0x06, 0xcc, 0x73, 0x1b, 0x54, 0xce, 0xa0, 0xfc, 0x06, 0xde, 0x08, 0xdf, 0x8c, 0x9e, 0x9c, + 0x2a, 0xa3, 0xae, 0x01, 0xa0, 0xe6, 0xbf, 0xf7, 0xed, 0x2a, 0xaf, 0x2d, 0xca, 0xbb, 0x6f, 0x97, + 0xad, 0x61, 0x8f, 0xb9, 0x0c, 0x7a, 0x81, 0x7f, 0x81, 0xe2, 0x08, 0x2b, 0x68, 0xcd, 0x4e, 0xb8, + 0x29, 0xe6, 0x78, 0xb6, 0x1a, 0x03, 0xeb, 0x7f, 0x89, 0x2b, 0x1a, 0x7b, 0xf9, 0xd2, 0xee, 0xc3, + 0xfb, 0x49, 0xf3, 0x90, 0x56, 0x2b, 0x16, 0xcd, 0xd7, 0x93, 0xf1, 0xd3, 0xa7, 0xfb, 0x62, 0x46, + 0x80, 0xb7, 0xbb, 0x92, 0x99, 0x23, 0x98, 0xae, 0x85, 0x08, 0x0d, 0x74, 0x17, 0x45, 0x84, 0x81, + 0x9d, 0x4a, 0xbf, 0xd6, 0x8d, 0x62, 0xfe, 0xa7, 0xf6, 0xe9, 0xe0, 0xd5, 0x46, 0xa7, 0xab, 0xd0, + 0xc3, 0xd1, 0x0d, 0xb3, 0x8d, 0xea, 0x3e, 0xdb, 0xfe, 0x69, 0xf4, 0x66, 0xbc, 0x18, 0xc1, 0x74, + 0xd2, 0x64, 0x80, 0xc7, 0xfb, 0x99, 0x31, 0x6a, 0x6b, 0x4a, 0x0b, 0x9b, 0x60, 0x9f, 0x66, 0x9d, + 0x45, 0xcc, 0xe2, 0xc2, 0xde, 0xe8, 0xb8, 0x35, 0x1b, 0x8c, 0xb1, 0x72, 0xf9, 0x1c, 0xca, 0x17, + 0x48, 0x22, 0x4c, 0x40, 0xf5, 0x26, 0xde, 0xfd, 0x59, 0x7d, 0x9c, 0xea, 0x42, 0xfb, 0xd7, 0xca, + 0xcd, 0x56, 0x57, 0xf4, 0xc3, 0x69, 0x0d, 0x47, 0xec, 0x02, 0xc7, 0x7a, 0x66, 0x3a, 0xfe, 0xba, + 0xf6, 0x8e, 0xb4, 0xf8, 0xa5, 0xf5, 0x98, 0x70, 0x1c, 0x99, 0xc3, 0x53, 0xc2, 0xfe, 0xa2, 0x5a, + 0xbe, 0xaa, 0xc8, 0x6c, 0xde, 0xab, 0xca, 0xf9, 0x02, 0x72, 0x36, 0x65, 0x18, 0x64, 0xdd, 0x75, + 0x2c, 0xd5, 0x18, 0xc9, 0x6e, 0x3b, 0x37, 0x08, 0x01, 0xe7, 0x52, 0x77, 0xa0, 0xd7, 0xec, 0xfd, + 0x2d, 0x55, 0x96, 0xf8, 0x53, 0x6c, 0x7d, 0x92, 0xc4, 0xf8, 0x5a, 0x6f, 0xeb, 0x7a, 0xc4, 0xa7, + 0x75, 0x31, 0x7d, 0x12, 0x57, 0xe1, 0xd1, 0x35, 0xd6, 0xc5, 0x8a, 0x76, 0xac, 0x0a, 0x66, 0xd4, + 0x33, 0xe4, 0x9d, 0x7c, 0xdc, 0x20, 0xf8, 0xcc, 0x8c, 0xab, 0xaa, 0xc8, 0x4f, 0xc9, 0x2b, 0x45, + 0xf2, 0x37, 0x98, 0x64, 0xb8, 0x1b, 0x5c, 0xdb, 0xb3, 0xc2, 0x32, 0x28, 0x2a, 0xe9, 0xe8, 0xc7, + 0xfe, 0x10, 0x8a, 0x9c, 0x50, 0x69, 0x6a, 0x82, 0xd7, 0x38, 0xe7, 0xaa, 0x9f, 0x8b, 0x0d, 0xd3, + 0x15, 0xb3, 0xe6, 0x8b, 0xdc, 0xaa, 0xe7, 0x89, 0x3d, 0x62, 0xbe, 0x0d, 0xbf, 0x89, 0x7b, 0x2e, + 0x94, 0x7e, 0x26, 0x86, 0x92, 0xbf, 0xd2, 0x00, 0x7b, 0xb1, 0x15, 0xec, 0x5e, 0x7d, 0xff, 0x0a, + 0x74, 0xf9, 0x26, 0xd2, 0xc8, 0x8f, 0x94, 0x02, 0xb0, 0x7d, 0xde, 0xf2, 0xbb, 0xcf, 0xe5, 0x99, + 0x54, 0x6b, 0x3a, 0x1b, 0xbf, 0x34, 0xb9, 0x8c, 0x15, 0xd9, 0xec, 0xd2, 0x77, 0x17, 0x70, 0xbe, + 0x46, 0x15, 0x66, 0xad, 0x16, 0x4a, 0x52, 0x84, 0xb4, 0xea, 0xfd, 0xac, 0x92, 0xcc, 0x30, 0x2d, + 0x70, 0xaf, 0xec, 0x76, 0x87, 0x6a, 0xb2, 0xd8, 0xfb, 0xbb, 0x82, 0xd4, 0xcd, 0x57, 0x33, 0x33, + 0x24, 0xad, 0x6d, 0x62, 0x42, 0xf7, 0xf4, 0xd6, 0xc7, 0x3c, 0x7e, 0x7e, 0xe8, 0x53, 0xff, 0x30, + 0xfb, 0x55, 0x4b, 0x13, 0x82, 0x35, 0x40, 0x25, 0x4d, 0xbc, 0x08, 0xaa, 0x71, 0xb0, 0x7c, 0x8f, + 0x4d, 0x17, 0x01, 0x67, 0xd6, 0x94, 0x9a, 0xd9, 0xef, 0x71, 0x26, 0x7d, 0x60, 0xe6, 0x3e, 0xfc, + 0xc3, 0x24, 0x45, 0xa6, 0xd8, 0x8f, 0xf5, 0x36, 0x95, 0x04, 0x9a, 0x3a, 0x4d, 0x8a, 0xf2, 0xfb, + 0x19, 0x50, 0x18, 0x6e, 0xde, 0x15, 0xf9, 0x51, 0x44, 0x90, 0x98, 0x0c, 0xd4, 0xec, 0x5a, 0xb6, + 0x2a, 0xfe, 0xed, 0x40, 0xfc, 0x52, 0x17, 0x4f, 0xfb, 0xca, 0xeb, 0x45, 0xaf, 0xec, 0x46, 0xa2, + 0xc0, 0x9f, 0xf0, 0x49, 0x61, 0xfc, 0x8c, 0x7a, 0xfe, 0xcf, 0x37, 0x8d, 0x05, 0x3e, 0x2c, 0x87, + 0xf4, 0xd6, 0x3a, 0x74, 0xdb, 0x87, 0x5c, 0x55, 0x54, 0x67, 0xa4, 0xcd, 0xad, 0xd8, 0x1d, 0xc2, + 0x27, 0x0a, 0xff, 0x96, 0xaf, 0xe5, 0xa5, 0x53, 0x5b, 0xf3, 0x9c, 0x91, 0x67, 0xe7, 0x6e, 0x93, + 0x1a, 0x52, 0x7c, 0x98, 0x20, 0xb5, 0xbb, 0xa8, 0x46, 0xa0, 0x26, 0x1e, 0xe6, 0x87, 0x1a, 0x03, + 0x02, 0x5e, 0x37, 0xa9, 0x0e, 0xee, 0x8f, 0x62, 0x16, 0x36, 0x3e, 0xae, 0xef, 0x45, 0x3c, 0xbf, + 0x13, 0xd7, 0x93, 0xb5, 0x90, 0xd0, 0x63, 0xbb, 0xab, 0x26, 0xdc, 0x5e, 0x30, 0x25, 0x05, 0x8a, + 0x19, 0x06, 0x3f, 0x05, 0x24, 0xb9, 0x79, 0x2a, 0xa1, 0xf2, 0xa2, 0xa0, 0xdf, 0xf6, 0x8d, 0xa2, + 0xdd, 0xf7, 0x7d, 0xc1, 0x67, 0xa2, 0x7b, 0x96, 0x1c, 0xe9, 0x55, 0xcb, 0xd4, 0x4d, 0x0b, 0x34, + 0xac, 0xb4, 0xf7, 0xdb, 0x10, 0xfc, 0xfd, 0x6e, 0x0f, 0x3d, 0x0c, 0x88, 0xe5, 0x7d, 0x2a, 0x6d, + 0xdf, 0x69, 0xab, 0x4f, 0xe0, 0x6f, 0x03, 0xcf, 0x3c, 0x1c, 0x82, 0x06, 0x52, 0x59, 0xfb, 0xd0, + 0x58, 0x21, 0x15, 0xf6, 0x87, 0xf0, 0xdd, 0x2c, 0x78, 0x52, 0x3a, 0x0c, 0xc3, 0x8a, 0xe2, 0x09, + 0x49, 0xf5, 0xf4, 0x56, 0x89, 0xce, 0xbc, 0xd2, 0x17, 0x99, 0x24, 0x53, 0x36, 0xd4, 0xf6, 0x97, + 0x76, 0x53, 0x16, 0xb6, 0x2b, 0x11, 0x4d, 0x41, 0x22, 0xc9, 0xe6, 0x23, 0x6f, 0x8c, 0x78, 0x95, + 0xd2, 0x08, 0x8b, 0x3f, 0x84, 0x65, 0xa0, 0xb6, 0x7a, 0x24, 0xce, 0x6e, 0x4f, 0x35, 0x32, 0xa0, + 0x45, 0xd1, 0xcf, 0x25, 0x70, 0x11, 0xb4, 0xba, 0x9d, 0x2d, 0x89, 0x41, 0xae, 0x98, 0x40, 0xdb, + 0x17, 0x2c, 0xf5, 0x54, 0xd1, 0x27, 0xa1, 0xca, 0x80, 0xe9, 0xd8, 0x5b, 0xd8, 0x89, 0xc9, 0x37, + 0xa5, 0x86, 0x3e, 0x26, 0x99, 0x43, 0x1d, 0xa7, 0xd2, 0xab, 0x72, 0xf3, 0xbe, 0x5b, 0x12, 0xbb, + 0x1c, 0xf6, 0x70, 0xa1, 0x4e, 0x66, 0x55, 0xeb, 0x27, 0x7b, 0xbe, 0x2c, 0x0b, 0x9d, 0xa6, 0x6c, + 0xbf, 0x17, 0x9a, 0x9e, 0x25, 0x27, 0xb8, 0x0b, 0xed, 0x15, 0x44, 0x99, 0x97, 0x08, 0x1a, 0xfd, + 0xab, 0x53, 0xfa, 0x5b, 0x1b, 0x5c, 0x28, 0x0e, 0xa9, 0xfd, 0xee, 0xbf, 0x7c, 0x23, 0x76, 0xab, + 0x1b, 0x56, 0xee, 0x86, 0x88, 0x47, 0x29, 0xdd, 0x14, 0x13, 0xf8, 0xef, 0x7c, 0x62, 0x27, 0xe2, + 0x52, 0xcc, 0x8d, 0x1a, 0xe4, 0x6d, 0x5e, 0x93, 0x16, 0xcd, 0x16, 0x16, 0x44, 0x53, 0x65, 0x7d, + 0x13, 0x1e, 0x3d, 0xdf, 0x6f, 0x1b, 0xdc, 0x73, 0x94, 0x5c, 0x4b, 0x08, 0xf5, 0xae, 0x9b, 0x04, + 0xf8, 0xba, 0x7b, 0x7d, 0x40, 0x16, 0x55, 0xe9, 0x7b, 0xa2, 0xea, 0x00, 0x3b, 0xc4, 0x87, 0x21, + 0xb5, 0x55, 0xc5, 0xed, 0x48, 0x7d, 0x87, 0xdb, 0x78, 0x71, 0x1d, 0x4a, 0x18, 0x56, 0x3a, 0xf8, + 0x04, 0x37, 0x1e, 0x5a, 0xc2, 0x8b, 0xd0, 0xff, 0xa8, 0xf9, 0x97, 0xb1, 0x78, 0x9b, 0x85, 0x6c, + 0x15, 0xdf, 0xaf, 0x94, 0xba, 0x1a, 0x38, 0x2f, 0x79, 0x60, 0xed, 0x31, 0x3b, 0xe4, 0xe2, 0x48, + 0x3d, 0xf2, 0x4c, 0xd0, 0x05, 0x15, 0xb7, 0x33, 0x4b, 0xcf, 0x41, 0xa4, 0x3c, 0x03, 0x9f, 0x55, + 0x34, 0x3f, 0x55, 0x8e, 0x4b, 0x94, 0xe6, 0xb3, 0xeb, 0x28, 0xb6, 0xf7, 0xea, 0xfe, 0x13, 0x57, + 0x6e, 0x43, 0xd8, 0x28, 0x21, 0xdc, 0x52, 0xf3, 0xa3, 0xab, 0xc7, 0x17, 0xae, 0xb4, 0xf8, 0xc1, + 0xa7, 0xd6, 0xbc, 0x85, 0x4e, 0x33, 0x4d, 0xec, 0xa6, 0x43, 0xad, 0x23, 0x29, 0x65, 0x52, 0x33, + 0xc8, 0x00, 0xf9, 0x33, 0x90, 0x27, 0xc2, 0xd4, 0x49, 0x78, 0xe7, 0x89, 0x6a, 0x35, 0x3a, 0xd1, + 0x11, 0xf6, 0x64, 0x43, 0xc4, 0x67, 0x38, 0x5f, 0x79, 0x84, 0x15, 0x9d, 0xd9, 0xaf, 0x89, 0xbf, + 0x2f, 0x86, 0x09, 0x92, 0xb1, 0xb0, 0x5c, 0x9f, 0x75, 0xd7, 0x7b, 0xb0, 0xa9, 0xfe, 0x3d, 0x7f, + 0x4d, 0x21, 0x81, 0x4f, 0x19, 0xec, 0xb5, 0x69, 0x5a, 0x61, 0xd1, 0xc4, 0xb9, 0xd1, 0xae, 0x6c, + 0x69, 0xf1, 0xc3, 0xa0, 0xe1, 0xfa, 0x38, 0x5d, 0xa7, 0x1c, 0x11, 0x0e, 0xc3, 0x0b, 0x32, 0x04, + 0xed, 0x3d, 0xf5, 0x14, 0x41, 0x95, 0xa4, 0xd4, 0x6f, 0xe0, 0x63, 0xdf, 0x4d, 0xa0, 0x10, 0x16, + 0x3f, 0x36, 0xc0, 0x0c, 0xb8, 0xaa, 0xa9, 0xe1, 0x95, 0x74, 0xde, 0xdc, 0x9a, 0xd4, 0xa0, 0x93, + 0x77, 0xc8, 0xe8, 0xae, 0xe9, 0xfb, 0xf1, 0xf1, 0xf0, 0x02, 0xde, 0xc6, 0x09, 0x1b, 0x72, 0xf4, + 0xa7, 0x30, 0x33, 0x32, 0xde, 0x05, 0xbd, 0x09, 0x27, 0xb3, 0xc2, 0x77, 0x74, 0xed, 0xb5, 0x4c, + 0x60, 0x04, 0x1d, 0x95, 0x6b, 0x44, 0x77, 0x65, 0x9d, 0x93, 0x91, 0xc8, 0xd1, 0x03, 0x1a, 0x25, + 0x80, 0x2c, 0x7f, 0xd2, 0x58, 0xc7, 0x27, 0xfb, 0x28, 0x46, 0xa5, 0x9a, 0xbd, 0x65, 0xdf, 0x63, + 0xe2, 0xdc, 0x61, 0xec, 0x99, 0xe7, 0x36, 0x32, 0xcf, 0x6c, 0x16, 0x8a, 0x10, 0xed, 0xee, 0xe2, + 0xb0, 0x8b, 0xed, 0xe7, 0x04, 0xc5, 0x34, 0x9e, 0xfc, 0xe9, 0x69, 0x4d, 0x75, 0xa0, 0xa7, 0x61, + 0xd0, 0x63, 0x40, 0xf5, 0xc3, 0x43, 0x3e, 0xe7, 0x13, 0x0a, 0xcb, 0x2f, 0x74, 0x7b, 0x1d, 0xaf, + 0xf8, 0xa5, 0x5c, 0x73, 0xd6, 0x82, 0x99, 0xa6, 0x77, 0xc7, 0xa0, 0xdb, 0x7e, 0x86, 0x0b, 0x96, + 0x80, 0xda, 0x1d, 0x8e, 0x4f, 0x83, 0x55, 0x7c, 0xb3, 0x31, 0xa8, 0xc3, 0x08, 0x83, 0x5b, 0x1b, + 0x90, 0x3f, 0xb9, 0xea, 0x1c, 0x6b, 0xaf, 0x00, 0x4e, 0x56, 0xd1, 0x95, 0xb4, 0xca, 0x20, 0xa6, + 0x8d, 0x85, 0x78, 0x26, 0x04, 0x76, 0xe4, 0x6c, 0xd2, 0x72, 0xae, 0x92, 0xb0, 0x8a, 0x42, 0xd5, + 0x83, 0xfe, 0x43, 0x92, 0x8b, 0x65, 0xe9, 0xbf, 0x8d, 0xaa, 0x69, 0xdc, 0xbe, 0x73, 0x41, 0xac, + 0x5e, 0x75, 0x7f, 0x28, 0xc0, 0xfc, 0x68, 0x03, 0x22, 0x98, 0x86, 0xe8, 0x94, 0x9a, 0x28, 0xd7, + 0xfb, 0x23, 0x6c, 0xc5, 0xe3, 0x1a, 0xd7, 0xbf, 0x2a, 0x1e, 0x75, 0x24, 0x70, 0x5b, 0x87, 0xb2, + 0x5e, 0x25, 0xdb, 0x36, 0x6b, 0xdb, 0xa2, 0xf1, 0x4a, 0x92, 0xe1, 0xab, 0xfd, 0x57, 0x2f, 0x17, + 0xe7, 0x1d, 0x14, 0x19, 0xe0, 0xf0, 0x5f, 0x0b, 0x47, 0x52, 0xe3, 0xbe, 0x38, 0xaa, 0x7e, 0x1b, + 0xfa, 0xb9, 0x60, 0x38, 0x0e, 0x1f, 0x5f, 0xaa, 0x83, 0x89, 0xe2, 0xf3, 0x33, 0x10, 0x69, 0x01, + 0xc5, 0xca, 0x47, 0x24, 0xe3, 0x82, 0x89, 0xc1, 0x43, 0x0f, 0x35, 0xb8, 0xee, 0x43, 0x0f, 0xeb, + 0xaf, 0x0c, 0x8f, 0x7d, 0xf2, 0x23, 0x9a, 0xbe, 0x1a, 0x42, 0xc1, 0x1d, 0xf9, 0xbb, 0x1a, 0x3f, + 0x29, 0x6a, 0x42, 0xc5, 0xb5, 0x2f, 0x6c, 0xd5, 0x7f, 0x0a, 0x9b, 0xaa, 0x3d, 0x1c, 0xe4, 0x74, + 0x8a, 0x69, 0xb2, 0xeb, 0xce, 0x55, 0x0e, 0x70, 0x3a, 0x60, 0x02, 0x7a, 0xca, 0x7f, 0xb6, 0xc7, + 0x24, 0x19, 0xf2, 0x90, 0x8d, 0xe9, 0x5f, 0x6c, 0x77, 0x92, 0x9b, 0xa2, 0x12, 0x76, 0x10, 0x42, + 0x8d, 0xb8, 0x1b, 0xc9, 0x22, 0xfc, 0x44, 0xe2, 0x95, 0x1b, 0x13, 0xb3, 0x19, 0xc8, 0xe5, 0x1e, + 0x92, 0x16, 0x53, 0x92, 0x13, 0x36, 0x87, 0xeb, 0x09, 0xd9, 0xe3, 0x87, 0x38, 0xcb, 0x3e, 0x52, + 0x3e, 0x0a, 0x36, 0x37, 0x2e, 0x60, 0x32, 0x9e, 0xdf, 0xaa, 0x54, 0x7b, 0xc2, 0xea, 0xf9, 0xc4, + 0x80, 0xe7, 0x67, 0xcd, 0xc9, 0x82, 0xa1, 0x4b, 0x2d, 0x14, 0xa5, 0x4e, 0x30, 0xd5, 0xb2, 0x4b, + 0x1f, 0x88, 0xe0, 0xf5, 0x89, 0x75, 0xa8, 0x74, 0xa4, 0x41, 0xff, 0x9c, 0xca, 0x48, 0x51, 0x87, + 0x39, 0x7f, 0x5f, 0x70, 0x90, 0x2c, 0x4a, 0xde, 0x7a, 0xff, 0xef, 0x69, 0x2b, 0xe6, 0xd9, 0x4e, + 0x65, 0x3a, 0xde, 0x1d, 0x14, 0xf5, 0x14, 0xd0, 0x31, 0x9c, 0xb6, 0x49, 0x9a, 0xea, 0xd5, 0xbb, + 0xf4, 0xae, 0xe5, 0x9f, 0x8f, 0xdf, 0x0c, 0xec, 0xf1, 0x69, 0xea, 0xe9, 0x39, 0xd2, 0x36, 0xd1, + 0xa3, 0xf7, 0x2c, 0xab, 0xa2, 0xaa, 0x81, 0x08, 0x0f, 0x9f, 0x06, 0x97, 0xaf, 0xd6, 0x02, 0xa8, + 0x39, 0x2f, 0x0e, 0x18, 0xd3, 0x8a, 0xf3, 0x66, 0xf9, 0x93, 0x73, 0x87, 0xe1, 0x77, 0xad, 0xaf, + 0x55, 0xa3, 0x63, 0x0d, 0x17, 0x61, 0xe1, 0x71, 0x92, 0x38, 0xaf, 0xa2, 0x84, 0x79, 0xa7, 0xe6, + 0xa1, 0x11, 0x11, 0x71, 0x63, 0xd3, 0x81, 0x2b, 0x55, 0x9e, 0x70, 0x3f, 0x9e, 0xa5, 0xf9, 0xb4, + 0x8f, 0xa2, 0x99, 0x6d, 0xec, 0x8d, 0x55, 0x05, 0xa5, 0x1c, 0xc4, 0xb7, 0xbb, 0xd7, 0xcc, 0x62, + 0xcf, 0x9d, 0x0e, 0x6f, 0x6e, 0xd4, 0xec, 0x51, 0x1e, 0xed, 0xc1, 0xf8, 0xb1, 0x8e, 0xc7, 0x3d, + 0x44, 0x27, 0xcc, 0x9a, 0xdc, 0xf5, 0x19, 0x08, 0xb8, 0xa7, 0xe3, 0xc6, 0x6f, 0xe1, 0x71, 0x1a, + 0xfa, 0x63, 0x16, 0x7f, 0x69, 0x9c, 0xce, 0x9a, 0xc4, 0xef, 0xd5, 0x99, 0x4c, 0xc3, 0x8b, 0x54, + 0x82, 0xd4, 0x64, 0xa0, 0xd5, 0xa5, 0x90, 0xcf, 0xa3, 0x39, 0x48, 0x9a, 0xd2, 0xe3, 0x4d, 0xa7, + 0x86, 0x21, 0x5c, 0x3c, 0x27, 0x2e, 0xaa, 0xbb, 0x39, 0xef, 0x50, 0xb4, 0x28, 0x61, 0xcd, 0x0f, + 0x17, 0x2b, 0xed, 0x2e, 0x3e, 0x1e, 0xbb, 0xc2, 0xe4, 0x14, 0xa9, 0x9e, 0xf2, 0x67, 0xa9, 0x23, + 0xc2, 0x1c, 0x01, 0x71, 0x4b, 0xde, 0xaf, 0xa2, 0x79, 0x59, 0x1d, 0x4c, 0x1b, 0xc4, 0x6e, 0x1a, + 0x9e, 0xd5, 0x18, 0x1e, 0xb3, 0x9a, 0x25, 0x07, 0xc4, 0x89, 0x4b, 0x46, 0x48, 0xa6, 0xdb, 0xc4, + 0xfa, 0xa4, 0xa6, 0x79, 0x3d, 0x38, 0x3d, 0x3a, 0xb9, 0xdc, 0x70, 0x97, 0x89, 0x6a, 0x96, 0x2a, + 0x97, 0xed, 0x7a, 0x7e, 0x46, 0x1b, 0x91, 0x87, 0xc7, 0x83, 0xaa, 0xc2, 0x7f, 0xcc, 0x9c, 0x14, + 0xd2, 0x64, 0xfd, 0xc9, 0xfa, 0x41, 0x97, 0x07, 0x06, 0xdb, 0x63, 0x34, 0x9e, 0x55, 0x1a, 0xc8, + 0x22, 0x57, 0x34, 0xc6, 0x9d, 0xc8, 0xc1, 0x27, 0x85, 0x95, 0xcc, 0x31, 0xe3, 0xfa, 0x46, 0x62, + 0x9b, 0x6e, 0xa9, 0x52, 0xbf, 0xdc, 0x87, 0xe7, 0x73, 0x98, 0x34, 0x49, 0xfb, 0xa7, 0x78, 0x28, + 0x71, 0x93, 0x01, 0x6f, 0xc9, 0xca, 0x21, 0x96, 0xbe, 0xf8, 0x0b, 0x9f, 0x6a, 0x59, 0x57, 0x3f, + 0xf2, 0x09, 0x43, 0x44, 0x46, 0x97, 0x01, 0xa6, 0xd3, 0x45, 0xc0, 0xf5, 0x6e, 0xe6, 0x5b, 0xb0, + 0x64, 0xca, 0x5f, 0xd7, 0xf4, 0x88, 0xe4, 0x8a, 0x1c, 0x24, 0x05, 0x0f, 0x3f, 0xf2, 0x83, 0x0d, + 0x18, 0xfe, 0xd2, 0xcc, 0x8d, 0x85, 0xdf, 0x2b, 0x93, 0xcd, 0x36, 0xac, 0x6d, 0x33, 0xe6, 0x86, + 0x41, 0x12, 0x6d, 0x85, 0x4e, 0x13, 0x6f, 0xc7, 0x19, 0xdd, 0x46, 0x87, 0xec, 0x37, 0x5f, 0x08, + 0xc3, 0x29, 0xa9, 0xf7, 0x36, 0x0b, 0xe1, 0xa1, 0x55, 0xbd, 0x67, 0x79, 0x3b, 0xe3, 0xbb, 0x43, + 0xe4, 0xab, 0x20, 0x07, 0xcb, 0x95, 0x34, 0x29, 0xae, 0x2b, 0x8c, 0x09, 0x72, 0xf6, 0x66, 0x19, + 0xee, 0xe4, 0xf3, 0x72, 0x2f, 0x7b, 0xa0, 0x73, 0x0d, 0x4d, 0xe6, 0xb1, 0x82, 0xa7, 0x40, 0x1c, + 0xf3, 0x16, 0x86, 0x64, 0x1a, 0x56, 0xe1, 0xcc, 0xc9, 0x9e, 0x25, 0x70, 0xf2, 0x5c, 0xe0, 0x9c, + 0x2d, 0x5a, 0xa5, 0x0c, 0xad, 0x70, 0xe7, 0x6d, 0xa9, 0x97, 0x30, 0xeb, 0xb6, 0x0f, 0x74, 0x4b, + 0xf0, 0x7b, 0x64, 0x98, 0x13, 0xd5, 0xa9, 0x29, 0x06, 0x99, 0xf7, 0xca, 0x98, 0x9a, 0x17, 0xbd, + 0x8a, 0xb0, 0x2e, 0x96, 0xfb, 0xd9, 0xc6, 0x96, 0x1a, 0x82, 0x1f, 0xb9, 0xa3, 0x57, 0x8e, 0x19, + 0x79, 0x5b, 0x69, 0x14, 0x86, 0x97, 0x2f, 0x65, 0x04, 0x46, 0xad, 0x33, 0x8b, 0x6d, 0x03, 0xbc, + 0x44, 0x4f, 0xa2, 0x19, 0x80, 0x69, 0x52, 0xa4, 0xe8, 0x18, 0x41, 0xad, 0xca, 0x98, 0xad, 0x74, + 0x8e, 0xe1, 0x56, 0xa4, 0x1e, 0x18, 0x3f, 0x32, 0x87, 0xe6, 0x1d, 0xc0, 0xcf, 0x52, 0x34, 0xd9, + 0x98, 0x23, 0xfb, 0x71, 0xf8, 0xa8, 0xc7, 0x87, 0xc7, 0xab, 0xd3, 0xc5, 0xee, 0xe5, 0x9f, 0xee, + 0x0f, 0x72, 0x3d, 0x11, 0xb7, 0xc0, 0x51, 0x7b, 0x19, 0x6d, 0xaf, 0x50, 0x87, 0xc6, 0x66, 0x7e, + 0xb1, 0xcf, 0x65, 0xe7, 0xf0, 0x7e, 0xbf, 0x59, 0x2b, 0x4f, 0xeb, 0x8a, 0x20, 0x61, 0xf8, 0x6e, + 0x1a, 0x3d, 0x08, 0xc5, 0x2e, 0xe4, 0x28, 0xb5, 0x79, 0x8e, 0xf9, 0xd4, 0xf0, 0xd5, 0x4b, 0x29, + 0x94, 0x57, 0x05, 0x27, 0xbd, 0x36, 0xdb, 0xa8, 0xec, 0x8f, 0xb7, 0xb9, 0x2a, 0x44, 0x04, 0xaf, + 0xcb, 0xf9, 0x03, 0xf7, 0x57, 0x08, 0x07, 0xa8, 0xd3, 0xfe, 0x0c, 0x8f, 0x34, 0xa2, 0x9e, 0x93, + 0xd2, 0x96, 0xe0, 0x9b, 0x1b, 0x8a, 0x24, 0x9f, 0x88, 0xde, 0xf6, 0x44, 0xfe, 0x82, 0x1b, 0xae, + 0x88, 0xf6, 0xe4, 0xc4, 0x45, 0x79, 0x65, 0x4b, 0xf3, 0xf2, 0xca, 0x88, 0x69, 0x8c, 0xc6, 0x91, + 0x1f, 0x11, 0xbd, 0x89, 0x35, 0xab, 0x70, 0x54, 0xaf, 0x8c, 0xfc, 0xd7, 0x15, 0xc6, 0x3a, 0xe9, + 0x78, 0x48, 0xfc, 0x39, 0x0a, 0x26, 0xf6, 0x43, 0xf8, 0x02, 0xb1, 0x79, 0x5c, 0x82, 0x92, 0xce, + 0x1f, 0x8c, 0xa9, 0x72, 0x41, 0x6c, 0x12, 0x14, 0x24, 0x7b, 0x6e, 0x6b, 0x06, 0xb9, 0x2b, 0x49, + 0xc4, 0x72, 0x48, 0x29, 0xca, 0x76, 0xe2, 0xb9, 0xed, 0x25, 0x94, 0xba, 0x32, 0x8a, 0x8c, 0xcf, + 0xa4, 0xb8, 0x8e, 0xde, 0x16, 0xd5, 0xdf, 0x3b, 0x31, 0x23, 0x45, 0xf6, 0x4d, 0xcc, 0xfa, 0xb3, + 0xc5, 0x57, 0xa4, 0x3f, 0xcf, 0x8a, 0xe1, 0x5d, 0x1d, 0x19, 0x98, 0xa5, 0xeb, 0x83, 0xc6, 0x39, + 0xc1, 0xfb, 0xc6, 0x6f, 0x8b, 0xef, 0x14, 0x72, 0x90, 0x1c, 0xb5, 0x71, 0x1a, 0x85, 0x7e, 0xa9, + 0x4d, 0x75, 0x7e, 0x4c, 0xcc, 0xf0, 0x19, 0x98, 0x60, 0x2c, 0x9c, 0xea, 0x0d, 0x04, 0x5f, 0xfd, + 0xc9, 0xdb, 0x83, 0x7b, 0xbb, 0xb6, 0x48, 0xa7, 0x7c, 0x16, 0xa2, 0xd2, 0x81, 0x9f, 0xea, 0x3f, + 0x95, 0xd3, 0x3d, 0x8a, 0xc3, 0x5d, 0x23, 0x6b, 0xc1, 0xc0, 0x30, 0xea, 0x47, 0x9a, 0xe7, 0x77, + 0xe3, 0xbd, 0x70, 0x1e, 0x25, 0x2d, 0x47, 0x2c, 0xd3, 0xd7, 0xfd, 0xad, 0xdc, 0xdd, 0xe4, 0x5a, + 0x9e, 0x9b, 0xc2, 0xe6, 0x65, 0x7f, 0x98, 0xb2, 0x48, 0xf4, 0x7b, 0xfc, 0x80, 0xac, 0xe8, 0x7c, + 0x80, 0xc9, 0x99, 0x46, 0x20, 0x75, 0xf6, 0xa7, 0x66, 0xc1, 0x3e, 0xf0, 0x14, 0x19, 0xff, 0xf5, + 0xf5, 0xdf, 0xef, 0xa9, 0x5e, 0xce, 0xb5, 0x7b, 0x87, 0xf0, 0xcb, 0x64, 0x7c, 0x05, 0x91, 0x2d, + 0x4b, 0xf8, 0x13, 0x1f, 0x4b, 0xef, 0xac, 0x90, 0x4f, 0x8f, 0xe4, 0xa4, 0x60, 0x14, 0xc4, 0xe3, + 0x8f, 0x5c, 0x23, 0x74, 0x87, 0xc9, 0x39, 0x3c, 0x5a, 0xc7, 0xfe, 0x9a, 0x6b, 0x26, 0xcd, 0x06, + 0x73, 0x24, 0xbd, 0xb1, 0x18, 0x02, 0x67, 0x2c, 0x7c, 0xf4, 0xf2, 0x9a, 0x59, 0x1b, 0xca, 0x44, + 0xcb, 0xca, 0x87, 0x29, 0x06, 0x48, 0xa7, 0xe0, 0x0a, 0xdc, 0xd9, 0x12, 0x8b, 0x6b, 0xcc, 0xf8, + 0xe0, 0xeb, 0x2f, 0x27, 0x77, 0x29, 0xd7, 0xbd, 0xc9, 0x52, 0xa1, 0xc8, 0x7f, 0x9b, 0xdc, 0xc9, + 0x9a, 0xde, 0x9c, 0xf5, 0x82, 0x08, 0x10, 0x15, 0xfc, 0x49, 0x0c, 0x3f, 0x74, 0xcd, 0x71, 0x4e, + 0x58, 0x02, 0xe7, 0xeb, 0x41, 0xd8, 0xca, 0x13, 0x60, 0x1c, 0x69, 0xc9, 0x44, 0x0a, 0x04, 0xd1, + 0xf9, 0xf7, 0xaa, 0x4a, 0x7e, 0xe8, 0x96, 0x64, 0xa4, 0x7d, 0xc3, 0x47, 0x5b, 0x8c, 0xb3, 0xf6, + 0x27, 0x47, 0x73, 0x68, 0x22, 0x24, 0x93, 0x89, 0xee, 0xa5, 0xea, 0x85, 0xe5, 0x5a, 0x05, 0xd8, + 0xa2, 0x8f, 0xed, 0x1f, 0xbd, 0xa7, 0x5f, 0x0f, 0xd5, 0xd8, 0xd5, 0x3c, 0x8d, 0x72, 0xd1, 0x82, + 0x24, 0x1c, 0x26, 0xbe, 0xad, 0x31, 0xde, 0xd2, 0xd9, 0x4d, 0x56, 0xd0, 0xa3, 0xaa, 0x31, 0x8c, + 0x2f, 0xdc, 0x2f, 0x6f, 0xa4, 0xd5, 0x6d, 0xe5, 0x3b, 0x52, 0x6f, 0xb6, 0x2c, 0x67, 0x76, 0x49, + 0xe7, 0x45, 0x6e, 0x6d, 0x29, 0x9c, 0xfd, 0x79, 0x08, 0x7c, 0x70, 0x3a, 0xf8, 0xec, 0xf6, 0xbc, + 0xd3, 0xa6, 0xb7, 0xb8, 0xcc, 0xc6, 0x7e, 0xc9, 0xf5, 0x8c, 0x0f, 0x5d, 0x85, 0xed, 0x4f, 0x65, + 0x6d, 0xee, 0x3b, 0x3e, 0xb7, 0xf4, 0x80, 0xb4, 0x2a, 0xb9, 0xa8, 0x53, 0x55, 0xde, 0x65, 0x09, + 0x8f, 0x6f, 0x55, 0x3d, 0xcd, 0x29, 0x7b, 0x2b, 0x8b, 0x82, 0xd9, 0xe7, 0x55, 0x9b, 0x78, 0x2a, + 0xb8, 0x07, 0x86, 0x1d, 0xdc, 0x2f, 0x4a, 0x98, 0x22, 0x64, 0x1c, 0xc7, 0x4a, 0xce, 0x84, 0x98, + 0x10, 0xf9, 0xbe, 0x4e, 0xef, 0x9d, 0x6f, 0x69, 0xbf, 0x2c, 0x6d, 0xa5, 0x18, 0xed, 0x28, 0x73, + 0x74, 0x9c, 0xf9, 0x8c, 0x6a, 0x94, 0x18, 0xeb, 0x33, 0xfd, 0xca, 0x5a, 0x43, 0x8c, 0x0d, 0x7f, + 0x46, 0xc5, 0x3b, 0xfc, 0x7e, 0xd7, 0x92, 0x6e, 0x99, 0x5e, 0x40, 0x91, 0xcc, 0xcb, 0xb4, 0xfc, + 0xfd, 0x62, 0x7c, 0x61, 0x69, 0xc4, 0x2f, 0xa1, 0x44, 0x17, 0x7a, 0x78, 0xb6, 0x8f, 0xc6, 0x01, + 0x11, 0xc5, 0xf5, 0x15, 0x4a, 0xb1, 0x9e, 0x45, 0x27, 0x8d, 0xd9, 0x97, 0xab, 0x37, 0x05, 0xef, + 0x48, 0xd3, 0x7f, 0x5c, 0xf3, 0x4f, 0x02, 0x67, 0xc9, 0x67, 0x55, 0xca, 0xb7, 0x33, 0x7f, 0x33, + 0xa3, 0xbd, 0xf2, 0x26, 0x97, 0xde, 0x40, 0x49, 0xe5, 0x9b, 0x1a, 0x3b, 0x5a, 0x67, 0xb6, 0x8b, + 0xf7, 0xca, 0x08, 0x63, 0x6f, 0x72, 0x50, 0xdb, 0xea, 0x7f, 0xe8, 0xc5, 0x0c, 0x04, 0x34, 0xb8, + 0x59, 0xef, 0x82, 0x1b, 0x75, 0xb4, 0x41, 0xfd, 0x65, 0xfa, 0x6d, 0xb6, 0x9d, 0x47, 0x0c, 0xfa, + 0xf7, 0x01, 0x93, 0x5d, 0xd3, 0x77, 0xc5, 0x40, 0x2c, 0x63, 0x30, 0x8e, 0xf5, 0xef, 0xdf, 0x94, + 0x6b, 0xec, 0xe4, 0x9e, 0x90, 0x40, 0xa1, 0x71, 0x74, 0xe4, 0x95, 0x62, 0xab, 0xa3, 0x3f, 0xc9, + 0xd7, 0xe6, 0x2a, 0x9c, 0xb0, 0xc2, 0x7a, 0x3c, 0x81, 0x7c, 0xcb, 0x19, 0xe8, 0x49, 0x2a, 0xbe, + 0x6e, 0xd7, 0xd0, 0x5e, 0x8d, 0xac, 0x33, 0x22, 0x83, 0x98, 0x87, 0xb9, 0x93, 0x5d, 0x0a, 0xd8, + 0x60, 0x33, 0xf6, 0xef, 0x39, 0x46, 0xca, 0x94, 0xf3, 0x0e, 0xdc, 0x36, 0xc9, 0xd8, 0xc4, 0x5d, + 0x67, 0xfb, 0xb8, 0x96, 0xd8, 0xe7, 0xa4, 0xac, 0x7e, 0x89, 0x30, 0x9e, 0xca, 0x70, 0x59, 0x00, + 0x72, 0x7d, 0xb2, 0x21, 0xd7, 0x98, 0xc7, 0x4e, 0x52, 0xef, 0x1d, 0xd9, 0x75, 0xf3, 0x91, 0x76, + 0x96, 0x30, 0x4a, 0x7b, 0x68, 0xea, 0xdc, 0xb4, 0x01, 0x6b, 0xd9, 0xcc, 0x72, 0xf0, 0x77, 0x94, + 0x66, 0xd7, 0xd0, 0xc3, 0x85, 0x85, 0x07, 0x75, 0x57, 0xbf, 0x01, 0x6d, 0xe0, 0xfd, 0x29, 0x24, + 0x84, 0xe8, 0x80, 0xcc, 0x48, 0x93, 0xff, 0xed, 0x52, 0x3a, 0x57, 0xdb, 0xba, 0xab, 0x92, 0xaf, + 0x82, 0x4a, 0x3e, 0xf9, 0xdf, 0x79, 0x7e, 0xcb, 0x7b, 0x1a, 0x03, 0x2b, 0x3e, 0xa3, 0x10, 0x53, + 0xd8, 0x40, 0xe4, 0xe2, 0x87, 0x96, 0x23, 0x9b, 0xfe, 0xf9, 0xc7, 0xac, 0x50, 0x63, 0x19, 0x25, + 0xd4, 0x55, 0x9e, 0x23, 0x47, 0xe5, 0xd5, 0xce, 0x3a, 0xe3, 0x80, 0xa4, 0x33, 0x56, 0xb8, 0x7d, + 0x40, 0xc1, 0x30, 0x3e, 0x3c, 0x91, 0xa2, 0x79, 0x83, 0x6d, 0xb6, 0xff, 0x01, 0xac, 0x78, 0xf0, + 0xec, 0x00, 0xfa, 0xc1, 0x67, 0xe1, 0x70, 0xe6, 0x21, 0xc0, 0xd6, 0x78, 0xf6, 0x9b, 0x0c, 0xa3, + 0xcc, 0xbe, 0x7f, 0x9e, 0x20, 0x7d, 0x6c, 0xa5, 0x01, 0x66, 0xc3, 0x89, 0xd6, 0x95, 0x79, 0x86, + 0x9b, 0xc5, 0xd0, 0x24, 0xc1, 0x1a, 0xf9, 0xfd, 0x66, 0xfe, 0x4f, 0x39, 0x19, 0x08, 0xbd, 0x3d, + 0x81, 0x19, 0x9b, 0x26, 0x16, 0x91, 0x89, 0x55, 0x02, 0x1f, 0xa6, 0x7e, 0xf0, 0x57, 0xd0, 0xc8, + 0x2e, 0x90, 0x92, 0x56, 0xd8, 0x27, 0x80, 0x52, 0x71, 0x58, 0x00, 0xc4, 0xef, 0x78, 0xef, 0xf7, + 0xb7, 0x86, 0xb4, 0xcb, 0x39, 0xef, 0x0b, 0x30, 0x61, 0x34, 0x8a, 0x97, 0x4a, 0x03, 0xc5, 0x96, + 0xe1, 0xe3, 0xaf, 0x30, 0xdc, 0xf4, 0xb0, 0xec, 0x5f, 0xeb, 0xab, 0xed, 0xd2, 0xc5, 0xf9, 0xa4, + 0x7d, 0x03, 0x09, 0x08, 0x4b, 0xf1, 0x3f, 0x60, 0x1a, 0xdd, 0x9f, 0xac, 0x50, 0x26, 0x16, 0x31, + 0x73, 0x8e, 0xcb, 0xd1, 0x25, 0x4d, 0xf8, 0x95, 0xba, 0xc2, 0xd4, 0x59, 0x2a, 0x1a, 0x97, 0x61, + 0xd9, 0xcb, 0x62, 0xea, 0x0b, 0xdc, 0x63, 0x40, 0x49, 0xad, 0xd9, 0x70, 0x45, 0x6d, 0xb5, 0xa5, + 0x63, 0xc1, 0x76, 0xa5, 0x61, 0x6d, 0xe1, 0x25, 0x3d, 0x7d, 0x5f, 0x9f, 0xc3, 0x4e, 0xa0, 0x2d, + 0x6a, 0xd1, 0x47, 0x21, 0xdc, 0x37, 0x21, 0x5d, 0xd1, 0xd3, 0x88, 0x38, 0x2f, 0xae, 0x2d, 0x3f, + 0x71, 0xb7, 0xa0, 0x73, 0xa8, 0xe2, 0x8c, 0xc7, 0xf0, 0x71, 0x45, 0x6c, 0x2f, 0x74, 0x7a, 0x41, + 0xc4, 0x52, 0x35, 0x1e, 0x84, 0x6c, 0x19, 0x88, 0x4c, 0xd8, 0x11, 0xe5, 0x9b, 0x7f, 0x35, 0x20, + 0x15, 0x80, 0xf0, 0x8e, 0x83, 0xf1, 0xf4, 0x73, 0xa3, 0xe0, 0x74, 0xe5, 0x9f, 0xe8, 0xce, 0x46, + 0xcd, 0x9a, 0xab, 0xaf, 0x3a, 0x47, 0xf9, 0x6a, 0x24, 0x79, 0x43, 0xbe, 0x30, 0xf9, 0x8a, 0x90, + 0x40, 0x8f, 0xb8, 0x0d, 0xfc, 0xbc, 0x22, 0x4f, 0xa8, 0x1f, 0x7a, 0x2b, 0xe3, 0x8b, 0x35, 0x2f, + 0x66, 0x36, 0x2b, 0xb3, 0x6d, 0x40, 0xd2, 0x8c, 0x93, 0xdb, 0x14, 0x25, 0x81, 0x15, 0x65, 0x85, + 0x81, 0xe9, 0x32, 0x0e, 0x7d, 0x63, 0xb0, 0xf9, 0xc4, 0x23, 0x7b, 0x2c, 0x36, 0xde, 0x0b, 0x7e, + 0xbb, 0x7d, 0xf8, 0xb9, 0xae, 0xe5, 0x8c, 0xe5, 0x83, 0xa9, 0x8d, 0x26, 0x1e, 0x5d, 0xa5, 0x6a, + 0xd8, 0xa6, 0xec, 0x0d, 0x8f, 0xda, 0x46, 0x41, 0xa9, 0xd6, 0x40, 0x60, 0x9e, 0xf5, 0x62, 0x5e, + 0x6c, 0x15, 0x02, 0x16, 0xfa, 0x73, 0x84, 0xc5, 0x9c, 0xd4, 0x8a, 0x84, 0x64, 0x66, 0xa3, 0x35, + 0x96, 0x0e, 0xcc, 0xe4, 0x56, 0x5c, 0x47, 0xf7, 0x53, 0x0e, 0x1a, 0x72, 0xb2, 0xe9, 0x57, 0xda, + 0xf7, 0xf5, 0x81, 0xd5, 0xea, 0xae, 0x60, 0xf7, 0x79, 0x37, 0xd4, 0x9c, 0x2a, 0x88, 0xe8, 0xa1, + 0x36, 0x23, 0x2f, 0x2c, 0xf4, 0xdd, 0xe0, 0x68, 0xce, 0x20, 0xaa, 0xc0, 0x96, 0x42, 0x8a, 0x9f, + 0xcf, 0xaa, 0x92, 0x4c, 0xb5, 0xda, 0x9a, 0x0c, 0x51, 0x39, 0x1c, 0x6a, 0x47, 0xd5, 0xfa, 0x9d, + 0x4b, 0x31, 0xfe, 0x42, 0xa4, 0xb5, 0xfa, 0x20, 0xba, 0x5d, 0x31, 0x75, 0x62, 0xcc, 0x6c, 0x1f, + 0xf5, 0x32, 0x73, 0x9d, 0x94, 0x52, 0x9f, 0xae, 0x38, 0x1e, 0xcc, 0x7a, 0x77, 0x0e, 0xb8, 0x68, + 0xef, 0x1b, 0x16, 0xd3, 0x39, 0x78, 0x3d, 0xf6, 0x45, 0xc5, 0x93, 0xa8, 0x41, 0x80, 0xce, 0x69, + 0xcb, 0x24, 0x97, 0xe7, 0x5e, 0x3c, 0xd3, 0x8d, 0x3a, 0x7a, 0x10, 0x49, 0x7f, 0x44, 0xd4, 0xc0, + 0x5a, 0x20, 0x7d, 0x51, 0x59, 0x3c, 0x24, 0x5c, 0x6a, 0xf6, 0x35, 0x54, 0x93, 0x8e, 0xa3, 0x21, + 0x28, 0x86, 0x05, 0x86, 0x47, 0x98, 0x9c, 0x63, 0xe3, 0x2b, 0xdb, 0x63, 0xdb, 0x8f, 0x17, 0xed, + 0xdc, 0xaf, 0xef, 0xc2, 0x82, 0x9e, 0x77, 0x7b, 0xec, 0xc2, 0x2b, 0x85, 0x2d, 0xb9, 0x48, 0x98, + 0x2d, 0xfb, 0x80, 0x22, 0xbb, 0xaf, 0xf1, 0xc2, 0x0b, 0xbd, 0xed, 0xe7, 0x0f, 0xeb, 0x6c, 0xeb, + 0x24, 0x60, 0xcf, 0x8b, 0x0e, 0xe2, 0xc5, 0x58, 0xd5, 0x81, 0x96, 0xc5, 0xf8, 0xe0, 0xff, 0xbe, + 0xa2, 0x25, 0xfe, 0xde, 0xfc, 0x46, 0xd0, 0x1a, 0x46, 0x40, 0xf2, 0xe9, 0x78, 0x0e, 0x5f, 0x92, + 0x1e, 0x52, 0x15, 0x59, 0xd0, 0x8e, 0xd6, 0xc9, 0x92, 0x8c, 0x05, 0x8d, 0x96, 0xde, 0xbd, 0x57, + 0xf4, 0x73, 0xfb, 0x38, 0xa5, 0x2c, 0x46, 0x37, 0x80, 0x2e, 0x77, 0x8c, 0xc4, 0x86, 0x78, 0x27, + 0x20, 0x50, 0xee, 0xb6, 0x5a, 0x5a, 0xab, 0xc2, 0x49, 0x09, 0xe1, 0xa4, 0x1e, 0xba, 0x6c, 0x6f, + 0xaa, 0xc0, 0xa0, 0x26, 0x1b, 0x8e, 0xa2, 0x3f, 0x4a, 0x7c, 0xb7, 0xd6, 0xc9, 0x8c, 0x5e, 0x42, + 0x86, 0x42, 0x7e, 0x16, 0xab, 0xdd, 0xfb, 0xff, 0xda, 0xff, 0xdf, 0x7e, 0x8b, 0xfe, 0xe3, 0x3f, + 0xfe, 0xe3, 0x3f, 0xfe, 0xe3, 0xff, 0x17, 0xfe, 0x07, 0x45, 0xde, 0x41, 0xd0, 0x00, 0x21, 0x00, + 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU102_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 8448, // uncompressed data size (bytes) + 6785, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU102_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU102("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/unload/g_booteruc_unload_tu10x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU102_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x06, 0x62, 0x36, 0x08, 0x13, 0x4c, 0x48, 0x41, 0x69, + 0x20, 0x00, 0x00, 0x37, 0x0f, 0x4b, 0x90, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU102_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU102_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU102("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/unload/g_booteruc_unload_tu10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 8448 +// COMPRESSED SIZE (bytes): 6785 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU102_image_prod_data[] = +{ + 0xed, 0x99, 0x45, 0x54, 0x1c, 0x0a, 0xac, 0xf7, 0x07, 0x77, 0xf7, 0xe2, 0xee, 0xee, 0x1d, 0xac, + 0xb8, 0xfb, 0x40, 0x71, 0x28, 0xee, 0x5a, 0x1c, 0x5a, 0x1c, 0x06, 0x2d, 0x3a, 0x14, 0xb7, 0xe2, + 0x30, 0x0c, 0xee, 0x5e, 0x4a, 0xa1, 0xb8, 0xbb, 0xb6, 0x78, 0xd1, 0xc1, 0xe1, 0xdd, 0xb7, 0xbd, + 0xbb, 0xb7, 0xf8, 0x36, 0xdf, 0xb9, 0xbf, 0xcd, 0x3f, 0xd9, 0x24, 0x59, 0x24, 0xe7, 0x24, 0x27, + 0x71, 0x00, 0x00, 0xc2, 0x4b, 0x3a, 0x20, 0x14, 0x0f, 0x00, 0xb8, 0x43, 0xbc, 0x03, 0xdc, 0x23, + 0x26, 0x02, 0x10, 0x01, 0xbd, 0x1b, 0x9b, 0xaf, 0xaf, 0xaf, 0x78, 0x71, 0x00, 0x04, 0xc0, 0x6b, + 0x01, 0x72, 0xe3, 0x1e, 0x40, 0x10, 0xb2, 0x8c, 0xd0, 0x98, 0x0e, 0xe0, 0x82, 0x2c, 0x23, 0xfe, + 0x23, 0xcc, 0x90, 0x65, 0xa4, 0x7f, 0x84, 0x26, 0x0e, 0x00, 0x00, 0x40, 0x0a, 0x90, 0x9a, 0x37, + 0x91, 0x7a, 0x0b, 0x0a, 0x37, 0x21, 0x05, 0x08, 0x90, 0x6c, 0xc4, 0xe6, 0x4d, 0x84, 0xe6, 0x65, + 0xc4, 0xd0, 0x16, 0x00, 0x00, 0x8f, 0x5f, 0x05, 0x80, 0x7f, 0x5b, 0x00, 0xc0, 0x43, 0x88, 0x03, + 0xd8, 0x23, 0xff, 0xaf, 0x81, 0xd3, 0x96, 0x0d, 0x40, 0x40, 0xfc, 0xc7, 0xba, 0x96, 0x40, 0x4c, + 0x06, 0x00, 0x50, 0xe3, 0xfe, 0x49, 0xf5, 0x5c, 0x80, 0xd2, 0x7d, 0xfd, 0x70, 0x8f, 0x7a, 0x87, + 0xf4, 0x4f, 0x34, 0xc4, 0x0c, 0x3c, 0xf4, 0x8d, 0xff, 0xcd, 0xfc, 0x7c, 0x8f, 0x12, 0x07, 0xa0, + 0x04, 0x3c, 0xe7, 0x23, 0x77, 0xa6, 0x20, 0xb6, 0xa4, 0xe3, 0xff, 0xe3, 0xbf, 0x3c, 0x00, 0x42, + 0xff, 0xa9, 0xe7, 0x0e, 0xb1, 0xbb, 0xf4, 0x11, 0x21, 0xb9, 0x53, 0x1c, 0x23, 0x8e, 0x0e, 0x05, + 0x50, 0x58, 0x70, 0x07, 0x48, 0xfe, 0x47, 0x7b, 0x5f, 0xaf, 0xf9, 0x9e, 0x5e, 0x54, 0x10, 0x0a, + 0x0b, 0xa4, 0xa6, 0x00, 0xff, 0x27, 0xbe, 0x47, 0x88, 0x2c, 0xa5, 0x41, 0x42, 0xec, 0x45, 0x88, + 0x97, 0x49, 0x12, 0xf9, 0x97, 0xb8, 0xb3, 0xee, 0x05, 0x1a, 0x65, 0x2e, 0xe4, 0x56, 0x4e, 0x95, + 0x07, 0x87, 0xbc, 0x3e, 0x97, 0x85, 0x10, 0x73, 0x72, 0xf7, 0xd0, 0x4b, 0xfa, 0x79, 0x94, 0x62, + 0x25, 0x7d, 0x79, 0x3f, 0x56, 0x59, 0xfa, 0xc7, 0x38, 0xb0, 0xe3, 0x62, 0x38, 0xca, 0xd5, 0xb8, + 0x42, 0x72, 0x01, 0xc9, 0xd5, 0x50, 0xe3, 0x37, 0xa9, 0x21, 0x4f, 0x2c, 0x3d, 0xc7, 0x10, 0x5c, + 0xeb, 0xa1, 0x4a, 0x8b, 0xf7, 0x26, 0xd4, 0x9a, 0xcd, 0x98, 0xa2, 0x98, 0x86, 0xe5, 0x08, 0x10, + 0x3d, 0xb7, 0x0f, 0xcf, 0x3b, 0x0c, 0xa9, 0x7e, 0x51, 0x1e, 0x25, 0xa2, 0xe1, 0x67, 0xe3, 0x30, + 0x30, 0xe2, 0x77, 0x15, 0xde, 0x47, 0xeb, 0xcd, 0x90, 0x10, 0x22, 0xda, 0xd8, 0xec, 0x3c, 0x39, + 0xa7, 0x10, 0x54, 0x51, 0x5c, 0x0f, 0x0f, 0x1f, 0xc8, 0xc7, 0x8f, 0x52, 0x46, 0x7c, 0xa0, 0x63, + 0x47, 0x38, 0xc2, 0xf8, 0xaa, 0x15, 0x97, 0x22, 0xdb, 0x91, 0xc9, 0x22, 0x23, 0x1e, 0x61, 0x61, + 0x9e, 0x1b, 0x69, 0x43, 0x5c, 0xef, 0xea, 0xdb, 0xa8, 0x47, 0xee, 0xc9, 0x4c, 0xe0, 0xd0, 0x43, + 0x7f, 0xde, 0x8a, 0x73, 0x5f, 0x25, 0x9e, 0x4c, 0x69, 0x2b, 0x2e, 0x34, 0x9f, 0xb9, 0xc4, 0x08, + 0x55, 0x53, 0xef, 0xb3, 0x89, 0xf2, 0xdb, 0x78, 0xaa, 0x56, 0xc3, 0x82, 0x85, 0xb0, 0xd4, 0x48, + 0x6f, 0x2f, 0xd0, 0x5a, 0x0f, 0xb9, 0xf6, 0x9b, 0xfe, 0x14, 0x56, 0x2f, 0xcd, 0x7d, 0x0d, 0xa4, + 0x61, 0xfa, 0x60, 0x0b, 0xb7, 0xbb, 0x82, 0x5e, 0x1e, 0x92, 0x23, 0x49, 0xdf, 0xda, 0xa7, 0xff, + 0x7d, 0x96, 0x43, 0xca, 0x40, 0x58, 0x1b, 0xc2, 0x98, 0xc8, 0x64, 0x7f, 0x8e, 0xf8, 0x44, 0x5b, + 0x3a, 0xdc, 0xdd, 0xff, 0x57, 0x6a, 0xa2, 0x31, 0xa6, 0xa4, 0xae, 0x66, 0xa2, 0xa7, 0xa1, 0xdb, + 0xe2, 0xa1, 0xf3, 0x27, 0xd3, 0x03, 0x82, 0xfa, 0xcb, 0x20, 0xee, 0x6d, 0x39, 0x66, 0x49, 0x13, + 0xa9, 0x61, 0x8c, 0xcf, 0x5a, 0x48, 0x15, 0x60, 0xf9, 0x07, 0x0b, 0xab, 0x15, 0xe1, 0xe2, 0xef, + 0x54, 0xed, 0x10, 0x6a, 0x8a, 0x50, 0xbb, 0x3d, 0x3f, 0x6f, 0x3f, 0xfc, 0xb3, 0xfd, 0x56, 0x1f, + 0xc3, 0xe8, 0xaf, 0x2c, 0xcc, 0x42, 0x3d, 0x4b, 0xcc, 0x76, 0x2d, 0xf7, 0x59, 0xa9, 0xb8, 0x09, + 0x3a, 0x0d, 0xef, 0x05, 0xf6, 0x9e, 0x54, 0xb4, 0xc9, 0x94, 0xc9, 0x53, 0x0f, 0x45, 0x47, 0x06, + 0x17, 0x1d, 0x87, 0x8c, 0x4c, 0xba, 0xcc, 0xe2, 0x34, 0x9c, 0x5a, 0xbb, 0x39, 0xc5, 0x3e, 0x05, + 0x6f, 0x5e, 0xd8, 0x10, 0x9b, 0x87, 0xf9, 0xba, 0x21, 0xc8, 0xa4, 0xc7, 0x79, 0x07, 0x51, 0x6c, + 0x70, 0xa4, 0x61, 0x4f, 0xaf, 0xa0, 0xe4, 0x7d, 0xa9, 0xdd, 0x27, 0x4a, 0x6b, 0x1b, 0xb8, 0xec, + 0xb0, 0xd7, 0x04, 0x3e, 0x95, 0x53, 0xf8, 0x4f, 0xb7, 0x77, 0x3c, 0xa6, 0x76, 0x0e, 0x1c, 0x34, + 0xa6, 0x23, 0x2e, 0x9a, 0x5d, 0x64, 0x1c, 0x48, 0x9f, 0xa0, 0x16, 0x5d, 0xfd, 0x9d, 0x01, 0x7f, + 0x1c, 0x59, 0xd3, 0x48, 0x43, 0x8f, 0x9a, 0xfa, 0x0c, 0x17, 0x34, 0x44, 0x5b, 0xd3, 0x73, 0xf8, + 0x99, 0x0b, 0x73, 0x66, 0x0f, 0x25, 0xcd, 0x4c, 0xcf, 0x4a, 0x74, 0xfe, 0x05, 0x74, 0x37, 0x9d, + 0xb8, 0xea, 0xbb, 0xd7, 0xe5, 0x91, 0xe3, 0x75, 0x08, 0xa3, 0x1c, 0x99, 0x09, 0xf0, 0x67, 0x99, + 0x15, 0xff, 0x39, 0xdb, 0x92, 0x86, 0x41, 0xff, 0xec, 0x35, 0x27, 0x8f, 0xa0, 0x9a, 0xfb, 0xd8, + 0xeb, 0x8b, 0x46, 0x57, 0x6d, 0x55, 0x90, 0x36, 0x47, 0x4b, 0x71, 0x9d, 0xbf, 0x52, 0x7a, 0x33, + 0x5f, 0x78, 0xbf, 0x54, 0xd4, 0x93, 0x23, 0x17, 0x20, 0xeb, 0xe0, 0x8c, 0x64, 0x94, 0x8a, 0x9d, + 0x31, 0xf0, 0x33, 0x9e, 0x89, 0xe5, 0x57, 0xde, 0x0c, 0x35, 0xd5, 0x45, 0x1e, 0x69, 0x7e, 0x8d, + 0x88, 0x1e, 0x88, 0x24, 0x17, 0x43, 0x8a, 0x0a, 0x58, 0x07, 0xb9, 0xbc, 0x55, 0x83, 0x0e, 0x29, + 0x05, 0x33, 0x0a, 0x1e, 0x19, 0x65, 0x4c, 0x17, 0xef, 0xc7, 0xca, 0xf5, 0x0b, 0xac, 0x2c, 0xe5, + 0xc5, 0xb3, 0x0e, 0xb6, 0xda, 0x77, 0x24, 0x01, 0xdd, 0x81, 0x1c, 0xe8, 0x7c, 0x26, 0x34, 0x13, + 0xe4, 0xa9, 0x06, 0x8e, 0x4f, 0xd0, 0x5f, 0xf7, 0xe8, 0x29, 0xdc, 0x51, 0x55, 0x51, 0x63, 0x3f, + 0x27, 0xdf, 0x7c, 0xfe, 0xdb, 0x45, 0x0d, 0xe1, 0x59, 0x81, 0xb3, 0x39, 0xbb, 0x10, 0xa8, 0x29, + 0x64, 0x66, 0x08, 0x8c, 0x77, 0x23, 0xc8, 0xd3, 0x34, 0x2e, 0x7d, 0x4c, 0xef, 0x4a, 0x87, 0x5f, + 0x75, 0xd7, 0xd2, 0x42, 0xca, 0x14, 0x4d, 0x82, 0x9e, 0x19, 0x7f, 0x64, 0x0e, 0xbd, 0x8d, 0xc1, + 0xc6, 0x8d, 0x11, 0xe5, 0x8c, 0x36, 0x8a, 0x0e, 0x33, 0x74, 0x15, 0xfd, 0x4a, 0xe4, 0xda, 0xd7, + 0x2d, 0x21, 0xb1, 0x8d, 0xe3, 0xe5, 0x87, 0x4e, 0xae, 0x96, 0xe2, 0xfa, 0x56, 0x21, 0xa6, 0x79, + 0x60, 0xbd, 0x10, 0x0f, 0xad, 0x40, 0x40, 0xb6, 0x9e, 0x64, 0x2e, 0xef, 0xf3, 0xe4, 0xb4, 0x6c, + 0x95, 0x19, 0xab, 0x9e, 0x23, 0x40, 0x11, 0x3c, 0xfb, 0x92, 0x6e, 0xdb, 0x5a, 0x69, 0x75, 0xe5, + 0xa2, 0x68, 0x76, 0x82, 0x79, 0x7b, 0xbc, 0x9f, 0x28, 0xbd, 0x64, 0x42, 0xd6, 0x56, 0xb9, 0xd1, + 0xdd, 0x71, 0xfb, 0xf2, 0x52, 0xd2, 0xf2, 0x15, 0x97, 0x4f, 0x0f, 0xe5, 0x76, 0x2a, 0x53, 0xb7, + 0xfa, 0x2f, 0x1c, 0x44, 0xa4, 0x57, 0x17, 0xab, 0xf5, 0x88, 0xc7, 0xdc, 0x48, 0x9b, 0xb2, 0x5f, + 0xbb, 0xf8, 0xd5, 0x7a, 0x3f, 0xab, 0x20, 0x2f, 0xb7, 0x15, 0x11, 0x2b, 0x9b, 0x72, 0x90, 0x67, + 0x10, 0x84, 0x22, 0x75, 0x06, 0x0a, 0xc2, 0xed, 0xce, 0x91, 0xca, 0x18, 0x5e, 0x1c, 0x3a, 0x47, + 0x9c, 0x5e, 0x33, 0xda, 0x38, 0x5c, 0x52, 0xbd, 0xcf, 0x3c, 0x8d, 0x70, 0xdc, 0x4d, 0x23, 0xfd, + 0x1e, 0x0d, 0x2c, 0x6f, 0x4e, 0xda, 0x76, 0xda, 0xfa, 0x10, 0x55, 0x17, 0x59, 0x39, 0x54, 0x3f, + 0x86, 0x5d, 0x14, 0xe3, 0x24, 0x45, 0xd4, 0x76, 0xa1, 0xc3, 0xef, 0xdd, 0xf5, 0xcd, 0x68, 0x01, + 0x66, 0xef, 0x57, 0xf7, 0xdc, 0x3a, 0x7e, 0x9a, 0x7b, 0x5d, 0xf3, 0x4e, 0xb5, 0x4d, 0xbb, 0xd0, + 0x22, 0x20, 0xcd, 0x0d, 0x18, 0xfc, 0xfd, 0xea, 0xb2, 0x3e, 0x4b, 0xf7, 0x46, 0x9c, 0xb8, 0x18, + 0xa1, 0xc6, 0x73, 0x5f, 0xe4, 0xf7, 0x5b, 0xea, 0xcc, 0x1d, 0x39, 0x63, 0x0f, 0x27, 0x76, 0xe6, + 0xfc, 0x05, 0xef, 0x6c, 0x6e, 0xdb, 0x91, 0x18, 0xc4, 0xd4, 0xda, 0x6c, 0x50, 0xa8, 0x1e, 0xb0, + 0xf2, 0xed, 0xb4, 0xe1, 0xcb, 0xae, 0x11, 0x39, 0xe4, 0x3c, 0x61, 0xca, 0x1a, 0x27, 0x50, 0xdf, + 0x2b, 0x19, 0xe5, 0xa8, 0x88, 0x0a, 0x55, 0xd0, 0x00, 0x2e, 0x22, 0xba, 0x91, 0x5e, 0xb0, 0xd0, + 0x90, 0x22, 0x4e, 0xce, 0x1a, 0x77, 0x6b, 0xd1, 0xda, 0xe1, 0x36, 0xac, 0x64, 0x2e, 0xf0, 0x00, + 0x86, 0xad, 0xcf, 0x11, 0x06, 0x67, 0x2b, 0x58, 0x79, 0x9c, 0xee, 0xa1, 0xc8, 0x91, 0xf2, 0x62, + 0xa3, 0x6c, 0x10, 0x29, 0xed, 0x4b, 0x97, 0x71, 0x36, 0xcd, 0x51, 0x29, 0x06, 0x21, 0x0a, 0x05, + 0xe3, 0xb7, 0xbc, 0x2d, 0xb3, 0x37, 0xff, 0x6e, 0x5b, 0x1a, 0xa3, 0x37, 0xc6, 0xe5, 0x05, 0xb6, + 0x5d, 0xdd, 0x57, 0x13, 0x59, 0x92, 0xe0, 0x35, 0x61, 0xcb, 0x01, 0x7c, 0xe7, 0xd3, 0xdb, 0xdf, + 0xd2, 0xdb, 0xb4, 0xa4, 0xb0, 0xc3, 0x8b, 0x72, 0x46, 0xe8, 0x95, 0x87, 0xe3, 0xef, 0x91, 0x97, + 0xe6, 0x8f, 0x4c, 0xc0, 0xb1, 0x69, 0x8a, 0xf8, 0xbd, 0xb2, 0xbc, 0xd2, 0x7f, 0x25, 0x3d, 0xb3, + 0x38, 0x33, 0x92, 0x3c, 0x7d, 0x8e, 0x4e, 0x37, 0x51, 0xd0, 0xe9, 0xc5, 0x29, 0x75, 0x26, 0x7f, + 0xae, 0x9c, 0x8e, 0x19, 0xbd, 0xbf, 0xa7, 0xc1, 0xec, 0xac, 0xe9, 0xf9, 0x46, 0x6a, 0x8c, 0x86, + 0x94, 0x84, 0xa4, 0xc2, 0x6a, 0xa4, 0x26, 0xe0, 0x4a, 0x75, 0xc2, 0x48, 0x2b, 0xfc, 0x51, 0x51, + 0x57, 0x12, 0xf3, 0x79, 0xbf, 0xfb, 0x0d, 0x17, 0x41, 0x58, 0xe0, 0x50, 0x82, 0x46, 0x27, 0xa2, + 0xbf, 0x91, 0x48, 0x10, 0x09, 0xcc, 0x45, 0x3c, 0xfc, 0x99, 0x59, 0xc1, 0x10, 0x2c, 0x21, 0xa6, + 0x55, 0xa4, 0x27, 0x98, 0xd5, 0xb9, 0x03, 0xcf, 0xdb, 0xeb, 0x58, 0x1a, 0x77, 0x5e, 0x90, 0xf0, + 0x31, 0x16, 0x30, 0xdf, 0xca, 0x28, 0xe0, 0xe3, 0x9b, 0x1b, 0x0e, 0x60, 0x4d, 0xa6, 0xa3, 0x1c, + 0x65, 0x74, 0x77, 0xf4, 0x43, 0x8f, 0xdd, 0xe5, 0x37, 0xcd, 0xb6, 0x0a, 0x3b, 0x7a, 0x23, 0xcf, + 0x2b, 0x39, 0x3c, 0xca, 0x12, 0xe1, 0xbc, 0x2f, 0xd1, 0xe3, 0x04, 0x13, 0xc4, 0x7b, 0x92, 0xf1, + 0x68, 0xf6, 0x67, 0xe2, 0xcb, 0xf5, 0x81, 0xa7, 0x0b, 0xbc, 0xb4, 0x5c, 0xa3, 0xd1, 0x62, 0x6f, + 0x99, 0xd0, 0xeb, 0x3d, 0x7d, 0x90, 0x9f, 0x69, 0x14, 0x8f, 0x63, 0x0e, 0x9b, 0xa8, 0x1e, 0xb9, + 0x59, 0x39, 0x00, 0x7f, 0xc1, 0xf3, 0x75, 0x47, 0x79, 0x53, 0xdb, 0x49, 0x80, 0xd1, 0x15, 0xba, + 0xc3, 0xde, 0x26, 0x31, 0x2a, 0xcb, 0x5e, 0x5e, 0xe3, 0x9d, 0x71, 0xd9, 0x92, 0xaa, 0x69, 0xcd, + 0x28, 0x95, 0x78, 0x48, 0x57, 0x13, 0x42, 0x4f, 0x3c, 0x8e, 0x16, 0x9e, 0xd0, 0x47, 0xb7, 0x86, + 0x90, 0x18, 0x75, 0x98, 0x22, 0xc4, 0x47, 0xbb, 0x3b, 0xbf, 0x94, 0xbf, 0xeb, 0x6f, 0x08, 0x8c, + 0x85, 0xdd, 0xa0, 0x9d, 0x72, 0x32, 0xde, 0xe7, 0xbb, 0x17, 0xac, 0x56, 0xe0, 0xdd, 0x03, 0xb2, + 0x2b, 0x38, 0x5a, 0x27, 0x2e, 0x57, 0x31, 0xd5, 0xc4, 0xd2, 0xcc, 0xe9, 0xf9, 0x38, 0x3d, 0x27, + 0x1c, 0x43, 0x2c, 0x51, 0x37, 0xcd, 0x74, 0xec, 0x05, 0x82, 0x7b, 0x43, 0xc6, 0xb1, 0xdd, 0x9a, + 0x5b, 0x84, 0x2c, 0x33, 0x99, 0xc6, 0x22, 0xbe, 0xc4, 0x9e, 0x83, 0xb9, 0xf6, 0x5f, 0x5b, 0x34, + 0x5c, 0xfa, 0xea, 0x1e, 0x59, 0x94, 0xe8, 0x49, 0x03, 0xf2, 0x41, 0x77, 0xb0, 0xe2, 0xf8, 0xb3, + 0x9f, 0xf5, 0x80, 0x7d, 0xda, 0x9c, 0xf9, 0xcb, 0x39, 0xab, 0x92, 0x7d, 0xaa, 0x58, 0x60, 0x15, + 0xf2, 0x87, 0x61, 0xba, 0xf6, 0x3b, 0xe1, 0x2c, 0xfa, 0xd5, 0x3f, 0xb1, 0xb6, 0xe5, 0x96, 0xb9, + 0xbe, 0xee, 0x1c, 0xf4, 0xdd, 0xcc, 0x91, 0x21, 0x34, 0x4f, 0x1e, 0x76, 0xe8, 0x3c, 0x1a, 0x3d, + 0x10, 0x2d, 0xcb, 0x4f, 0x9f, 0xa5, 0x8f, 0x58, 0x82, 0x88, 0x89, 0x84, 0x31, 0x25, 0x25, 0x5b, + 0xa9, 0x7e, 0x33, 0xbb, 0xb7, 0xeb, 0xe1, 0xc5, 0xcd, 0x36, 0xfc, 0x5a, 0x0d, 0x6a, 0x00, 0x18, + 0xb3, 0x3a, 0x20, 0x94, 0x2c, 0x73, 0x88, 0x9f, 0xa4, 0x26, 0xec, 0x46, 0xa5, 0xb1, 0xd4, 0x85, + 0xa8, 0x14, 0x27, 0x6a, 0x59, 0xae, 0xfb, 0xf6, 0x59, 0x91, 0x21, 0xd3, 0xc7, 0x37, 0xa1, 0x3c, + 0x25, 0xc1, 0xe3, 0x65, 0xf4, 0x2a, 0xb6, 0xf9, 0x4d, 0x96, 0xfc, 0x04, 0x0e, 0x04, 0x88, 0x4e, + 0x9a, 0x5c, 0xf8, 0xd3, 0x2b, 0x6b, 0xc5, 0x89, 0x7d, 0x70, 0x73, 0xd2, 0xea, 0x07, 0xaa, 0x87, + 0xa5, 0x41, 0xed, 0x21, 0x93, 0x6c, 0xdd, 0x1b, 0x77, 0x77, 0x1d, 0x75, 0x1f, 0xf1, 0xb3, 0xe8, + 0x16, 0x00, 0xc9, 0x05, 0x63, 0x80, 0xa7, 0xc9, 0x00, 0xad, 0x5d, 0xa4, 0xcf, 0xcc, 0xde, 0x74, + 0x76, 0xb3, 0x7f, 0x4e, 0x76, 0x54, 0xaa, 0x59, 0x8e, 0xc2, 0x66, 0x39, 0x26, 0x79, 0xcc, 0x6d, + 0x6a, 0x6b, 0x38, 0x27, 0x58, 0x83, 0x0f, 0x5f, 0x92, 0x56, 0xef, 0xa3, 0xe1, 0x6a, 0xf1, 0x72, + 0x87, 0xf8, 0xc8, 0xf1, 0x5f, 0x90, 0x3e, 0x8c, 0xcc, 0xd9, 0xb0, 0x69, 0x43, 0x27, 0x30, 0x87, + 0x27, 0xdc, 0x33, 0x50, 0xdd, 0xd3, 0x88, 0x19, 0xae, 0x6f, 0x0e, 0x44, 0x77, 0x90, 0xdf, 0x83, + 0x32, 0xdf, 0x31, 0xb5, 0x98, 0xcc, 0x05, 0x20, 0x61, 0x9c, 0xc7, 0x18, 0x4b, 0xa5, 0x62, 0x86, + 0x1f, 0x13, 0xfd, 0x60, 0x27, 0x91, 0x6f, 0xf1, 0x72, 0xb3, 0xd9, 0xd4, 0x70, 0x96, 0xd8, 0x98, + 0x9f, 0x9c, 0xc2, 0x9a, 0xea, 0x69, 0x6f, 0xf9, 0xb4, 0xa0, 0x02, 0xf5, 0xa1, 0x08, 0x6a, 0x71, + 0x51, 0x8a, 0x76, 0x1f, 0x5f, 0x8f, 0x6a, 0x96, 0xbf, 0x19, 0xe0, 0xa1, 0xa0, 0xe7, 0x96, 0x49, + 0xbb, 0x09, 0x28, 0xca, 0x61, 0x3f, 0x1b, 0xe9, 0xa9, 0x79, 0xcc, 0x48, 0xcd, 0xb3, 0x33, 0xbd, + 0x35, 0x5c, 0x93, 0x91, 0xcb, 0xb2, 0x7f, 0xa6, 0xb1, 0x81, 0x54, 0x45, 0xa1, 0x99, 0xba, 0xae, + 0xe9, 0x71, 0x6a, 0x0a, 0x72, 0xf8, 0xb8, 0xc2, 0xa9, 0x79, 0x3d, 0x9d, 0x3d, 0xaf, 0x70, 0x76, + 0xc2, 0x27, 0x9c, 0x07, 0x7d, 0x1b, 0x6e, 0x76, 0x0c, 0xc4, 0x62, 0xd9, 0x37, 0x2b, 0x1c, 0xb0, + 0x56, 0x03, 0x9f, 0x22, 0xab, 0x58, 0x97, 0x07, 0x6f, 0xfd, 0x36, 0x74, 0xab, 0xe3, 0xb5, 0x73, + 0xbd, 0xcc, 0x17, 0x95, 0x4f, 0x3d, 0x80, 0x79, 0x27, 0xc1, 0x9f, 0x68, 0xbe, 0x78, 0x3d, 0xb7, + 0x53, 0xe1, 0xf8, 0x3e, 0x8b, 0x28, 0x10, 0xf4, 0xee, 0x84, 0x1a, 0xd1, 0xf6, 0xa7, 0x6c, 0x8d, + 0xc2, 0x31, 0x11, 0xfd, 0x97, 0x20, 0x78, 0xd1, 0x04, 0xd7, 0xe1, 0x2a, 0xf7, 0x81, 0x9f, 0x61, + 0x36, 0x92, 0x90, 0xdb, 0x40, 0xe1, 0x1c, 0x11, 0xad, 0x74, 0xde, 0xb9, 0x32, 0x26, 0x07, 0xdf, + 0x67, 0x73, 0xcc, 0x54, 0xc9, 0x19, 0x32, 0xf2, 0x23, 0x96, 0x13, 0x1e, 0x62, 0x26, 0xaf, 0x19, + 0xa2, 0xb5, 0x8b, 0x17, 0xd1, 0x06, 0x12, 0x3e, 0xf7, 0xab, 0x98, 0xeb, 0xb4, 0x28, 0x83, 0xf4, + 0x21, 0x7f, 0xb1, 0xb1, 0x19, 0xbc, 0xa0, 0xee, 0x5f, 0x79, 0xcc, 0xf7, 0xf3, 0xf9, 0xe9, 0x04, + 0xb8, 0x79, 0x37, 0x0b, 0xc2, 0xd2, 0x8e, 0x67, 0x91, 0xfd, 0x61, 0x22, 0xb6, 0x77, 0x51, 0xd7, + 0x0f, 0x4e, 0x82, 0x0d, 0x44, 0xe5, 0x0f, 0x73, 0x6a, 0xa8, 0xa2, 0x72, 0x3c, 0x6f, 0xa0, 0xd0, + 0x1a, 0xfb, 0xf7, 0xa2, 0x31, 0x8f, 0x45, 0x8e, 0xba, 0x93, 0xf5, 0xc4, 0x36, 0xb4, 0x36, 0x3e, + 0x66, 0x35, 0x6f, 0x88, 0x7a, 0x12, 0xee, 0x4d, 0x97, 0x0f, 0xc1, 0xfb, 0x0e, 0x79, 0xc3, 0xd2, + 0xef, 0x24, 0xbb, 0xef, 0x07, 0x7d, 0x62, 0x5d, 0x20, 0x08, 0x1a, 0xa3, 0xae, 0x3b, 0x16, 0x18, + 0xa3, 0xaa, 0x51, 0xbc, 0x54, 0x2b, 0x4d, 0x8b, 0xc2, 0x1f, 0xde, 0x75, 0x22, 0xec, 0xbd, 0xff, + 0xf3, 0x9c, 0xa7, 0x7e, 0x27, 0x0b, 0x6d, 0x7d, 0xee, 0xe5, 0x24, 0x78, 0xef, 0x7c, 0xaa, 0x6d, + 0x3e, 0x2e, 0xa7, 0x06, 0xd3, 0x7c, 0x39, 0x22, 0x5b, 0x9a, 0x23, 0x9c, 0x99, 0x3c, 0x7a, 0x6f, + 0xab, 0x3b, 0x3c, 0x06, 0x70, 0xfc, 0x8e, 0x0e, 0x59, 0xa3, 0x08, 0x4e, 0xd4, 0xe5, 0xe2, 0xd0, + 0xa1, 0xa7, 0xc3, 0xb9, 0x31, 0xa2, 0x2c, 0x4a, 0xe2, 0x65, 0xe3, 0x88, 0xe8, 0x5d, 0x05, 0x15, + 0x3e, 0x48, 0x29, 0xcb, 0x3f, 0x71, 0x81, 0x2c, 0x3c, 0x90, 0x2b, 0x4f, 0x38, 0x15, 0x60, 0xb3, + 0xda, 0xbb, 0x5d, 0xe0, 0xbd, 0xfa, 0xcf, 0xc4, 0xc3, 0xfd, 0xeb, 0x1d, 0xed, 0xd0, 0x73, 0x0b, + 0xfa, 0xc6, 0x2e, 0x69, 0xf3, 0xe5, 0x22, 0xc5, 0x4f, 0x13, 0x47, 0x4d, 0x1e, 0xef, 0x34, 0x0f, + 0x32, 0x2c, 0xea, 0xde, 0xcb, 0xea, 0x41, 0x48, 0x50, 0xed, 0xea, 0xad, 0x5e, 0xd1, 0x51, 0x68, + 0x23, 0xa0, 0xec, 0xc1, 0xf7, 0xa5, 0xc6, 0x56, 0x0e, 0xd5, 0xf1, 0xdb, 0x49, 0x01, 0x1a, 0x9c, + 0x86, 0x52, 0x1f, 0xe4, 0xce, 0xc1, 0x1e, 0xcb, 0x3d, 0x25, 0xdb, 0xc7, 0x78, 0x21, 0x51, 0x84, + 0x17, 0xe7, 0x55, 0xf7, 0x06, 0xc0, 0x93, 0x57, 0xe2, 0xb6, 0x0d, 0xea, 0x70, 0xca, 0xc8, 0x5e, + 0xc3, 0xb0, 0x4c, 0x01, 0xbf, 0xaf, 0xe0, 0x14, 0x03, 0xd2, 0x12, 0x0a, 0xb7, 0xeb, 0xd7, 0x42, + 0x68, 0x6d, 0xf0, 0x66, 0xc6, 0x46, 0xb8, 0xfc, 0x90, 0x28, 0x9d, 0xdf, 0xbd, 0x8a, 0x06, 0x28, + 0xd2, 0xc2, 0x65, 0x0e, 0x04, 0x2b, 0x8f, 0x58, 0x8a, 0xfa, 0x63, 0x5c, 0xdd, 0xba, 0xcd, 0x51, + 0xbc, 0x5c, 0x1a, 0x9f, 0xcf, 0xfb, 0x27, 0xe5, 0x08, 0x28, 0x1b, 0x53, 0x5e, 0x7d, 0xf0, 0x34, + 0x5a, 0x85, 0x18, 0x0b, 0xf5, 0xe1, 0xe2, 0x60, 0xb4, 0xc7, 0x25, 0x1c, 0xf0, 0xb7, 0x11, 0x5e, + 0x54, 0x20, 0x56, 0xb5, 0xb5, 0xa0, 0x7f, 0xcd, 0x3d, 0x5c, 0xab, 0xf7, 0xc7, 0xd6, 0xfc, 0x8b, + 0x58, 0x2e, 0xc0, 0x58, 0x5e, 0x94, 0xd1, 0xe8, 0x39, 0xeb, 0x99, 0xb5, 0xd2, 0x9e, 0xdb, 0x2e, + 0xf7, 0xd2, 0xc3, 0xf9, 0xb8, 0xae, 0x68, 0x75, 0x4f, 0xb5, 0x4b, 0x2e, 0x92, 0x87, 0xff, 0xbb, + 0xf6, 0xcc, 0x04, 0x4b, 0x61, 0x55, 0x48, 0xb3, 0xab, 0x31, 0x9c, 0x35, 0x93, 0x07, 0xcb, 0xd2, + 0xf3, 0xbb, 0x9a, 0xd3, 0x07, 0x80, 0x2d, 0xcc, 0xb6, 0xb2, 0xf3, 0xe8, 0xe9, 0x47, 0x68, 0xa3, + 0xd7, 0x48, 0xb3, 0xf8, 0xa2, 0x13, 0xab, 0x70, 0xe7, 0xea, 0x3e, 0xb1, 0xbb, 0x8f, 0x30, 0xa7, + 0x1c, 0xbd, 0x81, 0xd9, 0xb5, 0x6c, 0xe7, 0xf6, 0x88, 0x5b, 0xf6, 0x47, 0xc7, 0x68, 0xc0, 0xa5, + 0x21, 0x3f, 0x05, 0x92, 0x5a, 0x7d, 0xb5, 0x75, 0x1c, 0x26, 0x39, 0x4f, 0xc2, 0x16, 0x5a, 0x16, + 0x45, 0xe7, 0xfa, 0x6e, 0x93, 0x1a, 0x5a, 0x92, 0x7e, 0xca, 0x37, 0x79, 0x30, 0x50, 0xc5, 0x25, + 0x2f, 0x5e, 0xc0, 0xe9, 0x09, 0xa3, 0x0c, 0xb5, 0xc7, 0x20, 0xc6, 0xa4, 0xdb, 0x2e, 0x35, 0x55, + 0x86, 0x2f, 0x28, 0x28, 0xcc, 0x46, 0xaa, 0x3c, 0xbd, 0x69, 0xa3, 0x37, 0x6c, 0x0e, 0x62, 0x26, + 0x62, 0x3f, 0x26, 0x3c, 0x63, 0x5d, 0x35, 0x4b, 0x1d, 0x1c, 0x1f, 0x66, 0x70, 0x8a, 0x67, 0xc9, + 0x4d, 0x1d, 0x48, 0x68, 0x10, 0x2d, 0x0d, 0xe9, 0xd6, 0x6f, 0x6b, 0x33, 0xd9, 0x6b, 0x1a, 0x47, + 0xa4, 0xc4, 0x4a, 0xc6, 0xab, 0x1d, 0x3c, 0x5a, 0xb5, 0x3c, 0xc9, 0xcf, 0xd9, 0x24, 0xda, 0xa7, + 0xc3, 0xe4, 0xa4, 0xb8, 0x1a, 0xae, 0x62, 0x98, 0x83, 0x37, 0xee, 0x59, 0xeb, 0xb5, 0x46, 0x5b, + 0x67, 0xfd, 0xa2, 0x2d, 0x80, 0xed, 0x94, 0xeb, 0x5e, 0xa4, 0x68, 0x0e, 0xc2, 0xef, 0xec, 0xfa, + 0x2f, 0xec, 0xd1, 0xaa, 0x53, 0x25, 0x27, 0x1a, 0xf8, 0x91, 0x66, 0x34, 0x5d, 0xae, 0x2c, 0x0f, + 0x53, 0x9a, 0xd2, 0x3d, 0x12, 0x31, 0xcf, 0x13, 0xbd, 0xa2, 0xe4, 0xcf, 0x04, 0x01, 0x78, 0x3f, + 0xdd, 0x16, 0x17, 0x1c, 0x85, 0xd0, 0x0c, 0x7e, 0xc4, 0x40, 0x7b, 0x78, 0x66, 0x0e, 0xaa, 0x67, + 0x44, 0x66, 0x36, 0x5d, 0xde, 0x75, 0xb1, 0xf1, 0x7c, 0x7d, 0x1b, 0xb8, 0x62, 0x2a, 0xe8, 0xdc, + 0xd1, 0x66, 0x4b, 0x92, 0x67, 0xc3, 0x4e, 0xff, 0x5e, 0x08, 0x6c, 0x7e, 0x40, 0x74, 0xa6, 0x6d, + 0x51, 0x5a, 0xa6, 0x2d, 0x4e, 0x02, 0x8b, 0x9a, 0x12, 0x4c, 0x21, 0xb4, 0xc7, 0x7c, 0x95, 0x29, + 0x73, 0xac, 0xd9, 0x37, 0x27, 0x02, 0x84, 0xcf, 0x01, 0x9d, 0x4d, 0x70, 0xb1, 0x26, 0x40, 0x46, + 0x24, 0x4d, 0x3d, 0xc4, 0xba, 0x7d, 0x7c, 0xe9, 0x51, 0x7f, 0x86, 0x52, 0x33, 0x44, 0xea, 0x27, + 0xc6, 0xe8, 0xd7, 0x1b, 0xeb, 0xa4, 0xd2, 0x88, 0x86, 0x85, 0x54, 0x9e, 0xc8, 0xc9, 0x01, 0x84, + 0x98, 0x28, 0x90, 0x6f, 0x6d, 0xcd, 0xb7, 0xe2, 0xb2, 0x63, 0xc0, 0x1a, 0x0c, 0xd6, 0x79, 0x97, + 0x93, 0xc3, 0x76, 0xad, 0xb5, 0x90, 0x4d, 0xdd, 0x20, 0x83, 0x63, 0x1a, 0x0c, 0xd3, 0x42, 0x2d, + 0x9e, 0x2a, 0xe4, 0x9f, 0xc8, 0xee, 0xb9, 0x07, 0x51, 0x6e, 0xc3, 0x46, 0x19, 0x65, 0x56, 0x1d, + 0xb4, 0x1f, 0x45, 0xe4, 0xda, 0x05, 0xeb, 0xca, 0x2c, 0xce, 0xf8, 0x35, 0xa8, 0xe4, 0x0c, 0x9d, + 0x19, 0x5c, 0xfb, 0xb7, 0xdc, 0x80, 0xcd, 0x05, 0xcd, 0xb4, 0x74, 0xa4, 0x90, 0x52, 0xf6, 0x55, + 0xb7, 0x7e, 0xa5, 0x92, 0xb9, 0x45, 0x41, 0xa4, 0xf4, 0x66, 0x18, 0x7e, 0x72, 0x02, 0xab, 0xf2, + 0xc5, 0xeb, 0x75, 0x53, 0x0c, 0xd2, 0xd3, 0xbc, 0x76, 0xb0, 0xe7, 0xc7, 0xa5, 0x0b, 0x45, 0xcc, + 0x22, 0x15, 0xda, 0x2b, 0xa7, 0x76, 0x0d, 0x83, 0x2a, 0x62, 0xef, 0x7b, 0xda, 0xe0, 0x8b, 0xfc, + 0x18, 0xd2, 0x11, 0xbc, 0xd4, 0xd6, 0x7f, 0xcf, 0x5b, 0x12, 0xc2, 0x2f, 0xbd, 0x24, 0xca, 0x5f, + 0xcd, 0x28, 0xcc, 0xdd, 0x2e, 0xfb, 0x6d, 0x09, 0xb7, 0xe4, 0x42, 0x33, 0x12, 0xdd, 0x5a, 0xd4, + 0x69, 0xb3, 0x40, 0x8a, 0x94, 0x9e, 0x09, 0xa6, 0x17, 0xbb, 0xd4, 0x9c, 0xf1, 0xa7, 0x96, 0x72, + 0xe9, 0xa3, 0x0a, 0x12, 0x60, 0x5f, 0x05, 0x6b, 0xe1, 0x70, 0xde, 0xa6, 0xd0, 0x71, 0xd6, 0x10, + 0x24, 0xa2, 0xde, 0xf5, 0x19, 0xc8, 0x92, 0xdb, 0x16, 0xb2, 0x66, 0x9a, 0x1d, 0xd2, 0xc0, 0x5a, + 0x05, 0xe0, 0xca, 0xa9, 0x32, 0x7c, 0xe2, 0x29, 0x98, 0xd6, 0xbc, 0xb4, 0xbc, 0x48, 0xd3, 0x70, + 0x22, 0x36, 0xaa, 0xb1, 0x5d, 0xdc, 0xbc, 0x4c, 0xeb, 0x0d, 0xb5, 0xde, 0x7c, 0x27, 0x53, 0xb2, + 0xd8, 0xf4, 0xed, 0xc8, 0xf5, 0xc0, 0xa3, 0x74, 0xa2, 0x22, 0x3f, 0x93, 0x6f, 0x8f, 0x8b, 0xee, + 0x52, 0xd6, 0x9f, 0x87, 0x4c, 0xac, 0xe7, 0xeb, 0x9c, 0x3e, 0xe0, 0x9c, 0xd1, 0x6c, 0x50, 0xed, + 0x0b, 0xa9, 0x41, 0x62, 0x80, 0xa0, 0x91, 0xb8, 0xa5, 0x7d, 0x95, 0x54, 0x3a, 0xb1, 0x32, 0x8f, + 0x60, 0x6a, 0xe1, 0xd4, 0x67, 0x9f, 0xd5, 0x6c, 0xfe, 0x0a, 0xd1, 0x0c, 0xe8, 0xbc, 0x12, 0xfb, + 0x17, 0xed, 0x69, 0x2d, 0x8a, 0x82, 0x9d, 0x4f, 0x58, 0xa1, 0xcf, 0xf3, 0xe8, 0xdd, 0xbe, 0x66, + 0x76, 0xe5, 0xf1, 0x2b, 0x4d, 0x34, 0xeb, 0x30, 0x10, 0xde, 0x43, 0x13, 0x7d, 0xa4, 0x54, 0x8b, + 0x40, 0x9a, 0xf1, 0x9e, 0x75, 0x6c, 0xa6, 0x9f, 0x1f, 0xe3, 0x87, 0x6a, 0x4e, 0x72, 0xd5, 0x0d, + 0x50, 0xe0, 0x2a, 0xb7, 0xa4, 0x95, 0xdc, 0xcd, 0x4b, 0xd6, 0xb0, 0x18, 0xbb, 0x41, 0xf5, 0xb6, + 0xf1, 0xf3, 0xd9, 0xfc, 0xaf, 0x7a, 0x71, 0x34, 0x9b, 0x1a, 0x98, 0x61, 0x0d, 0x99, 0xf2, 0x84, + 0x54, 0x6d, 0x0e, 0x74, 0xd2, 0xee, 0x6d, 0xf4, 0xcc, 0x72, 0x0b, 0x6e, 0xfb, 0xef, 0x37, 0x0f, + 0xbf, 0x7e, 0x61, 0x8a, 0x52, 0x44, 0xb3, 0x91, 0x03, 0xd3, 0xec, 0xbd, 0xbf, 0x71, 0x8b, 0xd1, + 0x3c, 0x98, 0x7f, 0x31, 0x6c, 0x35, 0xd2, 0x63, 0x56, 0x82, 0x02, 0xa0, 0x03, 0x4f, 0x1c, 0x09, + 0xe8, 0x44, 0xed, 0xfe, 0x00, 0xbc, 0x86, 0xe2, 0x54, 0xe6, 0x1e, 0xe9, 0x45, 0xc6, 0xc2, 0xd9, + 0xa8, 0xd4, 0xd4, 0x39, 0x4f, 0xc7, 0x39, 0xa7, 0xdc, 0x29, 0xb7, 0x00, 0xfc, 0xa1, 0xce, 0xbc, + 0x23, 0x33, 0xb1, 0x85, 0xc8, 0x2c, 0xc9, 0x8b, 0xf4, 0xaf, 0xca, 0x3a, 0x46, 0x79, 0x38, 0x4e, + 0x43, 0x57, 0x9c, 0xb5, 0x7c, 0x63, 0xf8, 0xde, 0x6c, 0x5d, 0xbc, 0x5f, 0xc0, 0x65, 0x83, 0x0d, + 0xb8, 0xd3, 0x53, 0x16, 0xb8, 0xa7, 0xb6, 0x0c, 0xea, 0x7d, 0x26, 0x4f, 0xa9, 0x47, 0xf9, 0x11, + 0x56, 0x64, 0xe1, 0xa6, 0xd2, 0x62, 0x5a, 0x7d, 0x25, 0x9d, 0x9e, 0xab, 0xbf, 0x43, 0x3d, 0x92, + 0x04, 0xee, 0xd5, 0xfc, 0xfe, 0x5c, 0xaf, 0x32, 0x4d, 0x2e, 0xd7, 0x0a, 0xf0, 0x5f, 0x9e, 0x49, + 0x48, 0x9f, 0xfa, 0xaa, 0x65, 0xea, 0x1e, 0xef, 0x31, 0xa0, 0xfe, 0xc5, 0xe6, 0xfb, 0x61, 0x0f, + 0x27, 0x3c, 0xdf, 0x50, 0x02, 0x85, 0x5c, 0x56, 0x20, 0xe7, 0xef, 0x22, 0x09, 0x06, 0x7f, 0xe4, + 0xdb, 0xfe, 0x56, 0x79, 0xc4, 0x33, 0xa9, 0x3f, 0x85, 0x6b, 0xbc, 0xd5, 0x88, 0x8a, 0x9c, 0xe8, + 0xa7, 0x96, 0x91, 0x13, 0xdb, 0x5a, 0x54, 0x5c, 0x7a, 0x87, 0xe7, 0xd3, 0x1b, 0xf2, 0x07, 0xd9, + 0xed, 0x84, 0x57, 0x21, 0xb8, 0xb1, 0x61, 0xb0, 0x60, 0x57, 0x63, 0x8a, 0x3a, 0xd0, 0xd0, 0x0c, + 0x92, 0x3f, 0x09, 0x93, 0xd2, 0xb3, 0x35, 0xa4, 0x1b, 0x05, 0xe1, 0xe9, 0xf3, 0xee, 0x9f, 0x02, + 0x85, 0x38, 0x66, 0xb8, 0xb8, 0xb9, 0xf5, 0x83, 0x75, 0x2a, 0xf4, 0x03, 0x40, 0xf5, 0x1f, 0x5c, + 0x61, 0x59, 0xdf, 0x27, 0xa3, 0x07, 0xc6, 0xbe, 0x73, 0x96, 0xf3, 0xa9, 0xdc, 0x1b, 0x0d, 0xc4, + 0x24, 0xdc, 0x8b, 0x21, 0xc7, 0xcf, 0x43, 0xa0, 0x61, 0xa8, 0xee, 0x00, 0x2a, 0x34, 0xc4, 0x00, + 0xa0, 0x90, 0xa4, 0x85, 0xef, 0x93, 0x19, 0x18, 0xaf, 0x37, 0xa8, 0xf5, 0x4e, 0x1a, 0x52, 0x93, + 0x1e, 0x66, 0xb4, 0xd6, 0xb5, 0xf5, 0x35, 0x7c, 0x00, 0x45, 0x52, 0xca, 0xb2, 0x77, 0x7b, 0xad, + 0x9f, 0x03, 0xc4, 0xde, 0x1f, 0x1d, 0x84, 0xc9, 0x94, 0x0e, 0xe2, 0xd2, 0x8b, 0x2b, 0x49, 0x79, + 0x82, 0x7c, 0x40, 0x1e, 0x3d, 0x30, 0x32, 0x16, 0xd0, 0xcf, 0xd3, 0xbc, 0x3b, 0xd8, 0x90, 0x47, + 0x8d, 0x8f, 0xef, 0x1f, 0x84, 0x4a, 0x55, 0x3e, 0x8b, 0xa8, 0x5a, 0xaa, 0x5e, 0x58, 0x4a, 0x76, + 0xdd, 0x3a, 0x01, 0x91, 0x57, 0x23, 0xb1, 0x4f, 0xec, 0x71, 0x71, 0x13, 0xb0, 0x7e, 0x28, 0x10, + 0x9c, 0x2f, 0x75, 0xd3, 0x1d, 0xb2, 0x78, 0xe4, 0xb1, 0x29, 0xa3, 0xe3, 0x4e, 0x4f, 0x2c, 0x40, + 0x32, 0xe5, 0xd4, 0x71, 0xcf, 0xd3, 0xe6, 0xfe, 0x5e, 0xb1, 0x80, 0x71, 0x6c, 0xf1, 0x99, 0x0c, + 0xed, 0x55, 0x56, 0xc8, 0xd5, 0x1e, 0xc1, 0x41, 0x8a, 0xa0, 0x25, 0x86, 0x50, 0xee, 0x89, 0x0a, + 0x8a, 0xc3, 0x29, 0x9d, 0x9e, 0xb9, 0x9c, 0x04, 0x53, 0xac, 0x33, 0x1e, 0x34, 0xc2, 0xd5, 0xeb, + 0x51, 0x1d, 0x0f, 0x67, 0x39, 0xcf, 0xba, 0xbb, 0xc9, 0xa7, 0xc8, 0xf3, 0x0a, 0x8a, 0x4f, 0xd0, + 0x6d, 0x03, 0x23, 0x56, 0x52, 0xf0, 0x71, 0x58, 0xf5, 0x49, 0x04, 0xee, 0xa1, 0x8b, 0xc8, 0xab, + 0xb8, 0x98, 0x2e, 0xce, 0x2d, 0x98, 0xe0, 0xb1, 0xa2, 0xad, 0xc1, 0xab, 0x63, 0x3a, 0xb4, 0x3b, + 0xd6, 0xd2, 0x63, 0x5b, 0x2b, 0x2c, 0xf5, 0x5c, 0x74, 0xde, 0x00, 0xfd, 0xba, 0xc4, 0x7b, 0x4a, + 0x2d, 0xee, 0xcc, 0x68, 0xa2, 0x88, 0xad, 0x77, 0xe8, 0x33, 0x5a, 0xd1, 0xd2, 0x4a, 0x31, 0xc0, + 0xcd, 0x38, 0xb9, 0x65, 0x54, 0x28, 0xd3, 0x15, 0x84, 0xb9, 0xb4, 0xdc, 0x47, 0x40, 0xc4, 0x21, + 0x54, 0x07, 0x93, 0x54, 0xdd, 0x33, 0xac, 0xfa, 0xf7, 0x7e, 0x02, 0x13, 0x39, 0x7b, 0x7d, 0x58, + 0xca, 0xa2, 0xfe, 0xb0, 0xc4, 0x82, 0x74, 0x4b, 0xef, 0x0d, 0x04, 0x8e, 0x98, 0x63, 0xeb, 0x9c, + 0x8c, 0xf2, 0x2a, 0x0d, 0x33, 0x9a, 0xdb, 0xf7, 0x04, 0xd0, 0x27, 0x34, 0xe8, 0x98, 0xfa, 0xa2, + 0xab, 0x5e, 0xa7, 0x67, 0xe6, 0x1a, 0x3b, 0x00, 0x4b, 0xaf, 0x45, 0xc3, 0x78, 0xb0, 0x67, 0x0c, + 0xea, 0x2d, 0xc9, 0x33, 0x91, 0xf6, 0x94, 0x36, 0x9c, 0x0b, 0x46, 0x22, 0x67, 0x6a, 0x9e, 0xa2, + 0xf2, 0xd1, 0x58, 0x9a, 0x2e, 0xbd, 0x59, 0x82, 0x23, 0x20, 0x05, 0xcb, 0x6f, 0x6d, 0x7b, 0x1a, + 0xa3, 0x6f, 0x3d, 0x6f, 0x39, 0x6c, 0xb9, 0xb2, 0xea, 0x12, 0xb5, 0xe3, 0x25, 0x72, 0xaa, 0x3e, + 0x3a, 0xe5, 0x73, 0x6b, 0x33, 0x9d, 0x9f, 0x2c, 0x3d, 0xae, 0x73, 0x24, 0x9d, 0x20, 0x23, 0xe9, + 0xee, 0x34, 0x85, 0xd5, 0x6c, 0xca, 0x3e, 0x71, 0x3f, 0xbb, 0xac, 0x2b, 0xfd, 0x46, 0x9d, 0xc7, + 0x31, 0x57, 0x97, 0x02, 0x4d, 0x82, 0x07, 0x3e, 0xcd, 0xaf, 0x02, 0xdb, 0xe4, 0xf0, 0xc0, 0x3d, + 0x84, 0x8d, 0x5e, 0xa1, 0xf2, 0x1b, 0x07, 0x07, 0x64, 0x91, 0xe7, 0x50, 0x76, 0xdb, 0x6a, 0x81, + 0xaa, 0x4a, 0x8d, 0xae, 0x13, 0x04, 0x8f, 0x9f, 0x47, 0x3f, 0xa5, 0xfb, 0x4b, 0x23, 0xf8, 0x8d, + 0xe0, 0x9b, 0x0a, 0x04, 0xb3, 0x17, 0x98, 0xcd, 0x17, 0x58, 0x6e, 0x48, 0xd5, 0x7e, 0x1f, 0x3e, + 0x71, 0x82, 0x45, 0xa9, 0x3c, 0x0e, 0x3d, 0xf8, 0xcb, 0x11, 0xff, 0xdd, 0x7f, 0x11, 0x3e, 0x53, + 0x82, 0xef, 0x44, 0x92, 0x66, 0x44, 0x3d, 0x83, 0x14, 0x9b, 0xb5, 0x8b, 0xbe, 0xd9, 0x0d, 0x16, + 0x59, 0xed, 0x05, 0x41, 0xdb, 0xa6, 0x9d, 0x14, 0x7f, 0xec, 0x31, 0x53, 0x6b, 0xfe, 0xf8, 0xd9, + 0x72, 0xd2, 0xbf, 0x51, 0x94, 0xcc, 0x92, 0x47, 0x71, 0xec, 0x5b, 0x39, 0xfd, 0x97, 0x5b, 0x7c, + 0x12, 0x9e, 0xa1, 0x70, 0xe7, 0xd8, 0xe7, 0x56, 0xae, 0xca, 0x69, 0x0c, 0x8e, 0x03, 0xd5, 0x71, + 0x6d, 0x2a, 0x12, 0x84, 0x60, 0x0b, 0xf4, 0x87, 0xd9, 0x4f, 0x57, 0xfb, 0x8f, 0x73, 0x8d, 0xe8, + 0x04, 0x2d, 0xf8, 0xab, 0x13, 0xb8, 0x08, 0x77, 0x05, 0x96, 0xc2, 0x67, 0x93, 0x2f, 0xcf, 0x1c, + 0x6b, 0x63, 0x6a, 0xbc, 0x49, 0x08, 0x5a, 0x52, 0xbb, 0x7e, 0x63, 0xcf, 0x6c, 0x64, 0x84, 0xda, + 0x91, 0x6a, 0xd7, 0x35, 0x19, 0x30, 0x54, 0x0f, 0x1b, 0x93, 0xb3, 0x48, 0x61, 0x36, 0x98, 0xc2, + 0xdd, 0x35, 0x5c, 0x3d, 0xdf, 0x51, 0x49, 0x31, 0x36, 0x2c, 0xf6, 0x3d, 0xbd, 0xe9, 0xb0, 0x9d, + 0x71, 0x0d, 0x4a, 0x9e, 0xa9, 0x88, 0x56, 0x2b, 0x8d, 0xde, 0xc7, 0xd5, 0xf4, 0x7f, 0x74, 0x54, + 0x86, 0x07, 0x84, 0x99, 0x16, 0xb9, 0xdf, 0x83, 0xa9, 0xa4, 0x70, 0xdd, 0xb9, 0x9e, 0x77, 0xf4, + 0xe6, 0x8e, 0x99, 0xcf, 0x24, 0xee, 0xb1, 0x3a, 0x63, 0x02, 0xcd, 0x0e, 0xfc, 0xcd, 0xf9, 0x77, + 0x11, 0x4b, 0x7c, 0x06, 0xee, 0x3b, 0xf5, 0x60, 0xda, 0x56, 0x17, 0x61, 0x04, 0x3b, 0x33, 0x28, + 0x31, 0x77, 0x41, 0x1b, 0xfc, 0xaf, 0x92, 0x5a, 0x29, 0xbb, 0xc9, 0xca, 0x5b, 0xfb, 0xb5, 0x35, + 0x61, 0x92, 0x75, 0xd8, 0xdb, 0x32, 0xcd, 0x5b, 0x02, 0x38, 0xf6, 0xdd, 0xea, 0x22, 0xc1, 0x64, + 0x13, 0x0e, 0x11, 0xc5, 0x42, 0x28, 0x0b, 0x8c, 0x59, 0x50, 0x11, 0x6d, 0x2a, 0xf6, 0xf5, 0x6f, + 0x66, 0x18, 0x20, 0x7b, 0xfa, 0xec, 0xfb, 0xac, 0x24, 0x33, 0xa4, 0x77, 0x1f, 0xb3, 0x41, 0x7a, + 0x24, 0x85, 0x24, 0xd0, 0xf7, 0x98, 0xd3, 0x36, 0xeb, 0x0c, 0xde, 0xc7, 0xa6, 0xe7, 0x4b, 0xc3, + 0x52, 0x0e, 0xb3, 0x4c, 0x6f, 0xc6, 0x95, 0xac, 0xdd, 0xa8, 0x22, 0xbf, 0xa7, 0x8f, 0x0d, 0x70, + 0x84, 0xf4, 0x83, 0xbd, 0xab, 0x4d, 0x1e, 0x7f, 0xb8, 0xfd, 0xad, 0x59, 0xe2, 0x49, 0x58, 0xff, + 0xd9, 0x1c, 0x6a, 0x85, 0x4c, 0x32, 0x5e, 0x5f, 0xe0, 0xab, 0x72, 0x3a, 0xa4, 0x8b, 0xda, 0xf4, + 0x3c, 0x24, 0xfe, 0xad, 0x9a, 0x4c, 0x33, 0xf1, 0x15, 0xe9, 0x98, 0x31, 0x1e, 0x01, 0x19, 0x31, + 0x88, 0x91, 0x7e, 0xd5, 0x89, 0x01, 0x3d, 0xdd, 0x23, 0x3f, 0x9a, 0xaf, 0xf0, 0x68, 0x55, 0x8d, + 0x7f, 0x80, 0x20, 0x1d, 0x4e, 0x20, 0x80, 0x99, 0x71, 0x20, 0x16, 0xa2, 0x0e, 0x39, 0x24, 0xd6, + 0xf7, 0x32, 0x87, 0x29, 0xc7, 0xc2, 0xc5, 0x88, 0x78, 0x92, 0x91, 0x1e, 0xd7, 0xf6, 0x0f, 0xce, + 0x4b, 0xc5, 0xfc, 0xa7, 0xa9, 0x20, 0x33, 0xa4, 0xb7, 0x05, 0x69, 0x52, 0xb2, 0xc7, 0xc4, 0xeb, + 0xa4, 0x2b, 0xb3, 0x0c, 0x2b, 0x05, 0x73, 0xed, 0xbf, 0x91, 0x1e, 0x10, 0xa1, 0xb7, 0xa8, 0xd1, + 0xc3, 0x0e, 0x74, 0xc8, 0x91, 0xca, 0xaa, 0x50, 0x6a, 0x12, 0xde, 0x74, 0xcb, 0x5b, 0xcc, 0xe2, + 0xbe, 0xc9, 0x3d, 0xb2, 0x4b, 0xcc, 0x5c, 0x86, 0x79, 0x6f, 0x73, 0xfd, 0x7e, 0x29, 0xdd, 0x96, + 0x68, 0xa8, 0x29, 0x16, 0x5a, 0x19, 0x85, 0x3f, 0x5b, 0xf0, 0x29, 0x23, 0x2d, 0xaa, 0x95, 0xc0, + 0xbc, 0x7b, 0x55, 0xf2, 0x43, 0xb3, 0x83, 0x1b, 0xde, 0x89, 0xdc, 0xbb, 0xea, 0xbc, 0xe6, 0xa7, + 0x6d, 0x2a, 0x16, 0xae, 0x89, 0x1b, 0x1d, 0x38, 0x2d, 0xc5, 0x57, 0x96, 0x15, 0xaf, 0xce, 0xce, + 0x41, 0x75, 0xeb, 0xce, 0x03, 0xf3, 0x6d, 0xdf, 0x03, 0x7a, 0x73, 0xf4, 0xdd, 0xd2, 0xaf, 0x10, + 0x17, 0xac, 0x0a, 0xb7, 0xf7, 0xbe, 0x78, 0xeb, 0x43, 0xce, 0x25, 0x9d, 0xbe, 0x66, 0x5e, 0xd8, + 0x2f, 0xad, 0x53, 0x2f, 0xeb, 0x47, 0x2e, 0x9f, 0x61, 0x3f, 0xb7, 0x7d, 0xd0, 0x2f, 0xca, 0x8d, + 0x96, 0xc1, 0x7f, 0x7e, 0x37, 0x75, 0x1d, 0x06, 0xe7, 0x5f, 0xa1, 0xf8, 0x2a, 0xf6, 0xa6, 0xf9, + 0xc4, 0x2c, 0xdf, 0x6e, 0x45, 0xbd, 0xdb, 0x5e, 0xa3, 0xb5, 0x7f, 0xf7, 0x0d, 0x76, 0xcd, 0xfa, + 0xe7, 0x15, 0xb6, 0xd2, 0xf0, 0x34, 0x6b, 0x90, 0x3f, 0xa0, 0x82, 0x3f, 0xef, 0x38, 0x0c, 0x1f, + 0x35, 0xcc, 0x69, 0xbe, 0x4a, 0xc4, 0x80, 0xb0, 0x71, 0x6b, 0xf6, 0xeb, 0x83, 0x1e, 0xe9, 0x28, + 0x3a, 0xb6, 0xf8, 0x4e, 0xfd, 0x28, 0x87, 0xde, 0xea, 0x97, 0xf0, 0x74, 0x06, 0x87, 0x47, 0x71, + 0x81, 0xd7, 0xa5, 0xbb, 0x1c, 0xf4, 0x79, 0xed, 0xe2, 0x7c, 0x89, 0x32, 0x33, 0xff, 0x14, 0x26, + 0xb0, 0x14, 0x8c, 0x29, 0xbb, 0x47, 0x34, 0x60, 0x33, 0x12, 0xbf, 0x57, 0xe6, 0xec, 0x3b, 0x8e, + 0x53, 0x5b, 0x24, 0x2f, 0xb9, 0x25, 0x2b, 0xde, 0xd4, 0xac, 0x5c, 0xe2, 0xf7, 0x34, 0x38, 0xe7, + 0xfa, 0x20, 0xe6, 0x41, 0x17, 0xbf, 0x6a, 0x3e, 0xde, 0xa1, 0x2e, 0x84, 0xcc, 0xc1, 0xb1, 0x8a, + 0xa3, 0x77, 0xf0, 0x3c, 0xb7, 0x91, 0xc7, 0x98, 0xe0, 0x71, 0x44, 0xc8, 0xce, 0xeb, 0x85, 0x1f, + 0x01, 0xb3, 0x29, 0x72, 0xdd, 0x5d, 0x89, 0x62, 0xb0, 0xd4, 0x90, 0xcb, 0x9e, 0xfc, 0xa1, 0x52, + 0x1d, 0x21, 0xea, 0x14, 0x6c, 0xcd, 0xd5, 0x95, 0xac, 0xde, 0xcc, 0x2b, 0x0c, 0x59, 0xe9, 0x6b, + 0x0c, 0x3a, 0x08, 0x8a, 0xd6, 0xfb, 0xea, 0x15, 0x3b, 0xee, 0xa5, 0xf4, 0xcd, 0x78, 0xbf, 0x0e, + 0x8f, 0xad, 0x75, 0xa6, 0x4c, 0xc9, 0x76, 0x40, 0x4e, 0x7b, 0xfc, 0xf6, 0x69, 0x89, 0x92, 0xa0, + 0xcd, 0xf2, 0xb7, 0x75, 0x4d, 0xb5, 0x2f, 0xe1, 0xa0, 0x29, 0xbb, 0x56, 0x30, 0xc5, 0x71, 0xf3, + 0x41, 0x29, 0x7b, 0xc9, 0x66, 0x53, 0x6e, 0xac, 0x01, 0x34, 0xf5, 0xf2, 0x3b, 0xad, 0xa8, 0x39, + 0xc0, 0x45, 0xce, 0xb0, 0x81, 0x7b, 0x8b, 0x87, 0xf1, 0xc3, 0x0d, 0xfb, 0xf3, 0x43, 0xa7, 0x6c, + 0x1b, 0xfb, 0x44, 0xff, 0x6e, 0x76, 0x25, 0x34, 0xd2, 0x2b, 0x69, 0x74, 0x6d, 0xec, 0x64, 0x8d, + 0x47, 0xc7, 0x0e, 0xa4, 0x82, 0x02, 0xe3, 0xc5, 0x42, 0x93, 0x03, 0x56, 0x76, 0x84, 0xa6, 0x3d, + 0x89, 0x46, 0x2f, 0xb5, 0x06, 0xba, 0x3e, 0x00, 0xf1, 0xaa, 0xcc, 0x68, 0x38, 0xb6, 0x92, 0x0a, + 0x98, 0x3e, 0x05, 0x55, 0x4a, 0x1c, 0x32, 0x26, 0x09, 0xa1, 0x9e, 0x05, 0x55, 0x39, 0x50, 0x7c, + 0x8a, 0xf4, 0x7f, 0x76, 0x91, 0x94, 0x05, 0x7d, 0x90, 0x54, 0x1e, 0xcc, 0xbb, 0xa8, 0xe2, 0x8d, + 0xdb, 0x95, 0x91, 0x9f, 0x17, 0xc9, 0x24, 0xb8, 0x64, 0x38, 0x21, 0xd5, 0x1c, 0x10, 0xf6, 0x13, + 0xa4, 0xf1, 0x3b, 0xd6, 0x50, 0xb9, 0x9e, 0x9d, 0x8e, 0x0e, 0xef, 0x73, 0xe2, 0x9f, 0x6f, 0x90, + 0x7a, 0x8c, 0x82, 0x15, 0xa6, 0x7f, 0x7c, 0x16, 0x09, 0xcd, 0xcc, 0xc7, 0xa8, 0x43, 0x32, 0x5b, + 0xf5, 0x45, 0x6d, 0x8b, 0x9c, 0xc5, 0x2a, 0xd8, 0x27, 0x3e, 0x88, 0xe3, 0xc9, 0xd5, 0x6a, 0xfc, + 0xc2, 0x76, 0xd9, 0x84, 0x12, 0x59, 0x25, 0xf5, 0xbc, 0xe2, 0x72, 0x09, 0x34, 0xcd, 0xc7, 0xe1, + 0xf5, 0xe5, 0x3f, 0x66, 0x71, 0xf9, 0x78, 0x68, 0xb9, 0x84, 0xf4, 0xc3, 0x5c, 0x39, 0xf0, 0xb7, + 0x98, 0x22, 0x31, 0x68, 0xfc, 0x0c, 0x51, 0x7b, 0x74, 0x0c, 0x27, 0xc4, 0x36, 0x9b, 0x26, 0x98, + 0x70, 0x22, 0x0b, 0x4f, 0xaa, 0xfd, 0x67, 0x0c, 0x95, 0x01, 0x7b, 0x40, 0xa3, 0xf6, 0xc0, 0x7b, + 0xa7, 0x17, 0xbc, 0xd1, 0xc6, 0x5b, 0x06, 0xe1, 0xd6, 0x24, 0xa8, 0x55, 0x7b, 0x84, 0x3e, 0xbc, + 0x33, 0xe9, 0x37, 0x7d, 0x6d, 0xa6, 0xdf, 0x4e, 0x3a, 0x6a, 0x46, 0xf7, 0xc7, 0xf6, 0xad, 0x77, + 0x15, 0x49, 0x6d, 0x93, 0xb3, 0x62, 0x8d, 0xb2, 0xc4, 0xb1, 0xd3, 0xbf, 0xa1, 0x6b, 0x94, 0xc8, + 0x01, 0x83, 0x8a, 0x06, 0xdc, 0x60, 0x25, 0x88, 0x3a, 0xd6, 0x90, 0x86, 0xf8, 0x2d, 0xfa, 0xcb, + 0xdf, 0xea, 0x9b, 0xf6, 0x8e, 0xc2, 0x3f, 0xcb, 0xc2, 0xdb, 0x7e, 0x49, 0x23, 0xfc, 0xb0, 0xd5, + 0x8d, 0x92, 0x3d, 0x6b, 0xfe, 0x9a, 0x60, 0xce, 0x11, 0xd3, 0xd2, 0x24, 0x13, 0x58, 0x4b, 0xb3, + 0x4d, 0x57, 0xc2, 0x52, 0xe0, 0xb4, 0xdb, 0xea, 0xe9, 0x6b, 0x80, 0xf4, 0x22, 0xfe, 0x64, 0x41, + 0xbf, 0x72, 0xc7, 0x67, 0x76, 0x31, 0xae, 0xac, 0xbe, 0x94, 0x05, 0x1b, 0x1d, 0xed, 0xda, 0xbb, + 0xaa, 0x3b, 0x4a, 0x34, 0xe6, 0x1b, 0xee, 0x90, 0xf2, 0x10, 0xac, 0x7a, 0xeb, 0x20, 0x60, 0xaf, + 0x76, 0xbf, 0xe6, 0x28, 0x7b, 0xca, 0x8c, 0x8e, 0xe2, 0xdd, 0x8f, 0xe2, 0x45, 0xea, 0xb3, 0x19, + 0xf2, 0x4c, 0x13, 0x35, 0x38, 0x5a, 0xb4, 0xfc, 0x5f, 0x26, 0x39, 0x34, 0x86, 0x03, 0x10, 0xb3, + 0x6f, 0xf6, 0x8e, 0x97, 0xe8, 0xab, 0xf9, 0xc3, 0x25, 0xda, 0xe2, 0x5f, 0xdd, 0xc7, 0xe5, 0xc0, + 0xfa, 0xc3, 0x1e, 0x86, 0xa3, 0xe5, 0xd1, 0x6c, 0x21, 0x31, 0x84, 0xf1, 0xb4, 0x71, 0x4a, 0x52, + 0xb7, 0x96, 0x50, 0x65, 0x66, 0x66, 0xb8, 0x8b, 0x08, 0x9a, 0xe3, 0xb8, 0x40, 0x78, 0x46, 0x73, + 0x24, 0x76, 0xba, 0x31, 0x8e, 0xfd, 0x5f, 0xbf, 0x90, 0xe2, 0xa5, 0xc4, 0x31, 0x03, 0xc2, 0x8f, + 0xa4, 0xf9, 0xf1, 0x4b, 0x4f, 0x69, 0x6f, 0x87, 0xae, 0x93, 0xd0, 0x67, 0xe0, 0x37, 0x2d, 0x62, + 0xfd, 0x9a, 0x34, 0xb0, 0x7e, 0x65, 0x96, 0xde, 0x08, 0xbb, 0xed, 0xc6, 0x0c, 0xa7, 0xe8, 0x6b, + 0x29, 0xe9, 0x4f, 0xb9, 0x4a, 0xc7, 0xa3, 0x6f, 0xa4, 0x52, 0xb2, 0x1e, 0x46, 0xc0, 0xce, 0xee, + 0x8e, 0xa2, 0x9e, 0xaa, 0xaa, 0xc1, 0x8e, 0x42, 0x78, 0x71, 0x7b, 0x2e, 0x3d, 0xfc, 0x85, 0xa9, + 0xd8, 0xe4, 0xc1, 0x37, 0xc1, 0x94, 0x14, 0x02, 0x77, 0xf6, 0xe0, 0x39, 0x4f, 0xa4, 0x9f, 0xba, + 0x7c, 0xa7, 0x7a, 0xc4, 0x33, 0xb6, 0xd0, 0x6d, 0xa6, 0x5b, 0xa4, 0xc6, 0xc5, 0xd3, 0x25, 0xa2, + 0xf1, 0x85, 0xc3, 0x71, 0x7d, 0xf8, 0x05, 0xf4, 0x73, 0x74, 0x41, 0x08, 0x15, 0x7a, 0xbd, 0x79, + 0xc3, 0x76, 0x35, 0xd3, 0x3c, 0x45, 0x36, 0xe8, 0xf8, 0x98, 0x71, 0x46, 0xee, 0xe6, 0xdf, 0xf7, + 0xd4, 0xed, 0xea, 0x5a, 0xc9, 0x55, 0x03, 0xbd, 0xf5, 0x75, 0xd9, 0x46, 0xa8, 0xb7, 0x34, 0x3b, + 0x04, 0x13, 0xd1, 0x3a, 0x99, 0x06, 0x98, 0x18, 0x60, 0xd3, 0x10, 0x7f, 0x8b, 0x5b, 0x42, 0x51, + 0xdf, 0xac, 0xeb, 0x47, 0xd6, 0x5c, 0x6d, 0xaf, 0x76, 0x1b, 0xb9, 0x78, 0x2d, 0x91, 0x3d, 0x76, + 0xf6, 0x8c, 0xd8, 0xb4, 0x83, 0xd3, 0xac, 0x3a, 0xe5, 0x2d, 0x01, 0x13, 0x96, 0xe6, 0x42, 0x78, + 0x17, 0xce, 0xf4, 0x25, 0xa6, 0x37, 0x95, 0xbc, 0x91, 0x33, 0xfd, 0x97, 0x62, 0xa1, 0xf8, 0x30, + 0xef, 0x1d, 0xd6, 0x20, 0xc2, 0x03, 0x56, 0x55, 0x6e, 0x20, 0x37, 0x25, 0xc8, 0xf9, 0x4a, 0x22, + 0xd5, 0x91, 0x96, 0x85, 0x1c, 0x1c, 0x09, 0x42, 0xb0, 0x53, 0x2d, 0x57, 0x21, 0x95, 0x55, 0x4d, + 0x20, 0xff, 0x7d, 0xa8, 0x52, 0xa0, 0xe9, 0x3b, 0xbf, 0x21, 0x81, 0x5f, 0xf6, 0xc7, 0xe1, 0x5b, + 0xef, 0x18, 0x3b, 0x86, 0x60, 0xdf, 0x1b, 0x4c, 0xbf, 0x5a, 0xfe, 0x23, 0x3f, 0xf7, 0x6b, 0xba, + 0x4f, 0x55, 0x92, 0x3b, 0xce, 0x1e, 0x0d, 0x6f, 0xde, 0xb4, 0xf7, 0xe1, 0xde, 0x65, 0xae, 0x28, + 0xf9, 0x25, 0xd9, 0x28, 0xa4, 0x1f, 0x78, 0x1a, 0x78, 0xf9, 0x38, 0x39, 0x59, 0x7a, 0x8a, 0xeb, + 0x07, 0x30, 0xb3, 0x2d, 0x42, 0xd5, 0x6a, 0x5c, 0x74, 0xad, 0x51, 0xfb, 0xbc, 0x12, 0x58, 0x64, + 0x2d, 0x34, 0x7a, 0xd0, 0x23, 0xca, 0x92, 0x13, 0x3b, 0xbe, 0x31, 0xe4, 0xa2, 0x49, 0x64, 0xfb, + 0xf7, 0xf4, 0x8e, 0x4b, 0x7a, 0xd7, 0xaf, 0x22, 0x35, 0x53, 0x59, 0xa0, 0x8b, 0x77, 0xcb, 0x50, + 0xea, 0xc6, 0x67, 0x77, 0x80, 0x99, 0x73, 0x96, 0xb5, 0x47, 0x95, 0xb7, 0xaf, 0xc2, 0x28, 0xe2, + 0x35, 0x4a, 0xde, 0xbf, 0x1a, 0x85, 0xdd, 0x4b, 0xdc, 0x51, 0xdc, 0xd3, 0xdf, 0x49, 0x0a, 0xe5, + 0x6c, 0x51, 0x53, 0xdf, 0x57, 0xbd, 0x92, 0x6b, 0xd8, 0x0a, 0xdb, 0x71, 0xb5, 0xd5, 0x04, 0xad, + 0xa7, 0x34, 0x4b, 0x26, 0x7e, 0x7c, 0x3d, 0x1c, 0x3f, 0x40, 0xfe, 0x7c, 0x5a, 0x8c, 0x80, 0xce, + 0xd6, 0x3f, 0xd1, 0xad, 0xd0, 0xd7, 0xb1, 0xb1, 0xf7, 0xa1, 0x79, 0x78, 0xbb, 0xb9, 0x2e, 0xcd, + 0x2d, 0x58, 0xfd, 0x8b, 0x0b, 0x0a, 0xaa, 0xff, 0xe2, 0xcd, 0xf6, 0xd3, 0x0f, 0x5c, 0x01, 0x91, + 0x2d, 0xff, 0xed, 0xfe, 0xc2, 0x37, 0x98, 0x3b, 0xf1, 0x45, 0x16, 0xed, 0x8d, 0x2b, 0x03, 0x77, + 0x60, 0xf9, 0xbb, 0x0a, 0xef, 0x2a, 0xd6, 0xc5, 0x5d, 0xb4, 0x1d, 0xec, 0x2f, 0x1b, 0x5c, 0x19, + 0xae, 0x45, 0x03, 0xe0, 0xa0, 0x92, 0x03, 0x54, 0xc8, 0xae, 0xf2, 0xfa, 0xc9, 0xe0, 0x54, 0x52, + 0x52, 0x3b, 0xd4, 0x90, 0xd6, 0xd5, 0x25, 0xff, 0xc9, 0xff, 0xf3, 0xa1, 0x07, 0x86, 0x98, 0x55, + 0xfe, 0x40, 0x64, 0x09, 0xc0, 0x6e, 0xdb, 0xe0, 0x9b, 0xfc, 0xce, 0xe5, 0x6e, 0x9c, 0x98, 0x46, + 0x6b, 0xed, 0xf7, 0xa0, 0x2a, 0xc4, 0x12, 0xa4, 0xa4, 0x9a, 0x1f, 0x46, 0x07, 0x2f, 0x0b, 0xb4, + 0x76, 0x5b, 0x74, 0x91, 0xbf, 0x2b, 0x71, 0x68, 0x92, 0xd3, 0x0a, 0x8e, 0x3a, 0x3e, 0x8e, 0x95, + 0x58, 0x7c, 0xec, 0x07, 0x91, 0xf1, 0xff, 0x7a, 0x94, 0x31, 0xee, 0x36, 0x8c, 0x23, 0x1d, 0x36, + 0x7a, 0x78, 0xfe, 0x71, 0x5d, 0x66, 0xf7, 0x9e, 0xb9, 0xc9, 0x1c, 0xc9, 0x3b, 0xe9, 0x18, 0x46, + 0xd5, 0x8e, 0xf1, 0xf5, 0xa6, 0xe2, 0x8f, 0x2a, 0xad, 0xb6, 0x5e, 0xc5, 0x26, 0xe1, 0xc2, 0x3a, + 0x78, 0xf1, 0xb9, 0xec, 0x2b, 0xa1, 0xa8, 0x6a, 0xe2, 0xcc, 0x8d, 0x06, 0x33, 0x33, 0x1e, 0xcf, + 0x5a, 0x43, 0xa5, 0x4c, 0x14, 0x65, 0x75, 0x87, 0xd8, 0x95, 0x80, 0xa4, 0xa9, 0x0b, 0xba, 0x32, + 0xc3, 0x8d, 0xda, 0x2f, 0xeb, 0xec, 0x7c, 0x0e, 0xff, 0xb8, 0x29, 0x1c, 0x84, 0x5a, 0x2e, 0x87, + 0xe4, 0x67, 0xa9, 0xef, 0x8f, 0x0e, 0x2e, 0x0a, 0xdb, 0x7e, 0x57, 0xa3, 0x91, 0x8b, 0x80, 0x8d, + 0x0c, 0x25, 0xd2, 0xbe, 0x04, 0xfc, 0xe2, 0xd7, 0xec, 0x30, 0xf7, 0xc2, 0x66, 0x0b, 0x45, 0x75, + 0xf2, 0x25, 0x3a, 0x0e, 0x23, 0xcf, 0xf7, 0x11, 0x06, 0x5e, 0x5a, 0xa3, 0x7a, 0xdb, 0x5a, 0x4d, + 0x8c, 0xd6, 0x89, 0x6d, 0xc9, 0xab, 0x72, 0x93, 0x57, 0xd3, 0xf5, 0x09, 0x3f, 0x89, 0xbd, 0x5f, + 0x53, 0x95, 0x63, 0xfe, 0xbd, 0xcf, 0x0f, 0x0d, 0x7f, 0x33, 0xcd, 0xca, 0x2d, 0x1f, 0xa1, 0xfe, + 0x55, 0x56, 0x28, 0xb5, 0x95, 0x5a, 0xb6, 0xa7, 0x0d, 0x93, 0xf9, 0x6b, 0x19, 0xe3, 0x0a, 0x32, + 0xda, 0xce, 0xe2, 0x13, 0x92, 0x9e, 0x89, 0x46, 0x91, 0xf2, 0x8a, 0xc0, 0x40, 0xf4, 0xc7, 0xb3, + 0xcd, 0x43, 0x6f, 0x93, 0x99, 0x73, 0x06, 0x52, 0x32, 0xad, 0x5f, 0x95, 0x4e, 0xe9, 0x08, 0x46, + 0x67, 0x57, 0xb8, 0x7d, 0x33, 0x75, 0x84, 0x12, 0xc8, 0xe0, 0x45, 0x7f, 0x96, 0x8f, 0x07, 0x0a, + 0x28, 0x63, 0xef, 0xc1, 0x0f, 0xae, 0x74, 0x55, 0x68, 0x14, 0x93, 0x52, 0xd0, 0x73, 0x75, 0x62, + 0xc5, 0x2a, 0xfa, 0xa8, 0x0a, 0x1f, 0xd1, 0x29, 0x1a, 0x95, 0xc6, 0xa0, 0x77, 0x0c, 0x3e, 0x28, + 0xce, 0x7e, 0xf9, 0x76, 0x0b, 0x8a, 0xa7, 0x5c, 0xcd, 0xd9, 0x58, 0x9f, 0x28, 0x12, 0x55, 0x09, + 0xa3, 0x09, 0x58, 0x07, 0xb4, 0xad, 0x79, 0xd1, 0xd1, 0xdf, 0x22, 0x48, 0xb0, 0x84, 0xac, 0xee, + 0xc6, 0x3e, 0xf0, 0xc6, 0xe8, 0x33, 0x0c, 0x2a, 0xfb, 0x14, 0x6f, 0x2b, 0x4f, 0x14, 0xa5, 0xaa, + 0x84, 0x0e, 0x0e, 0x90, 0x3d, 0x30, 0x8b, 0x69, 0xd8, 0x2d, 0xe6, 0x10, 0xe3, 0xdb, 0xd4, 0x93, + 0x92, 0x71, 0xab, 0x8f, 0xbb, 0x51, 0xee, 0x95, 0xac, 0xe6, 0x04, 0x32, 0x92, 0x4e, 0xd7, 0x32, + 0xd6, 0x99, 0xd7, 0xfc, 0x40, 0x01, 0xd8, 0x1d, 0x24, 0xc0, 0xc8, 0x37, 0x7a, 0xa4, 0xe9, 0x3c, + 0xc4, 0x75, 0xa0, 0x29, 0xf4, 0xd2, 0xaa, 0xd1, 0xb5, 0x4e, 0xc6, 0x6a, 0x64, 0xb8, 0x8e, 0x56, + 0xb9, 0xe3, 0xa2, 0x9d, 0x05, 0xef, 0xe0, 0x2c, 0x3f, 0x69, 0xcf, 0x40, 0xc5, 0x99, 0x8a, 0xac, + 0x14, 0xc0, 0x6f, 0x5e, 0x31, 0x97, 0x24, 0x48, 0x6a, 0x11, 0xce, 0xbe, 0x6c, 0x75, 0x44, 0xf8, + 0x3d, 0xf3, 0xb3, 0xb4, 0xaa, 0xf8, 0xb9, 0x1a, 0x1f, 0x6f, 0x89, 0x45, 0x97, 0xcf, 0x81, 0x76, + 0x1a, 0x47, 0x21, 0x48, 0x63, 0xdc, 0xcc, 0xad, 0x49, 0x63, 0xda, 0xd7, 0x5a, 0x4c, 0xe4, 0xdc, + 0xd3, 0x71, 0xe1, 0xda, 0xa8, 0xf2, 0xf8, 0x18, 0xc1, 0xe6, 0xed, 0x8f, 0x9e, 0x59, 0xb8, 0x2c, + 0x8e, 0xe3, 0xbc, 0x45, 0x3b, 0xf7, 0xf8, 0xa2, 0xd4, 0xf9, 0x47, 0x0d, 0xa5, 0xa1, 0xe1, 0x7c, + 0x38, 0x7d, 0x51, 0x32, 0xce, 0xb4, 0x99, 0xa5, 0xfc, 0x5f, 0x0e, 0xde, 0x1c, 0xde, 0xac, 0xf4, + 0x52, 0x58, 0x22, 0xa5, 0x35, 0x62, 0x84, 0xec, 0x8f, 0x52, 0x94, 0x2a, 0xd9, 0x8e, 0x28, 0xe4, + 0x97, 0x5e, 0xaa, 0x64, 0x8e, 0x55, 0xfa, 0x47, 0xd1, 0xe0, 0xf3, 0x14, 0xf8, 0xc2, 0xa3, 0x64, + 0xfa, 0x59, 0xe3, 0x78, 0x3a, 0x15, 0xf8, 0xf7, 0xd9, 0x4d, 0xb6, 0x45, 0x2d, 0x56, 0x13, 0x23, + 0x88, 0xc2, 0x6c, 0x26, 0x75, 0xf8, 0xa2, 0xe8, 0xdf, 0x60, 0x68, 0x23, 0x0b, 0x6c, 0x19, 0x20, + 0x3e, 0x2c, 0x80, 0x51, 0x36, 0xac, 0x79, 0x93, 0x08, 0xf3, 0x84, 0x07, 0xd9, 0xca, 0x1f, 0x0f, + 0xf8, 0xf8, 0x75, 0x3f, 0x9c, 0x1b, 0x0e, 0xdb, 0xa5, 0x9a, 0x0c, 0xf9, 0x2f, 0xa4, 0xa5, 0xf9, + 0xd5, 0xd8, 0x03, 0x86, 0x80, 0x59, 0x2c, 0x68, 0x35, 0x7f, 0x34, 0xb3, 0xe2, 0x27, 0xda, 0x25, + 0xd9, 0x5c, 0x78, 0x38, 0x1c, 0xb7, 0x09, 0x38, 0x96, 0xfd, 0x7f, 0xa9, 0xdb, 0xc0, 0xbf, 0x96, + 0x94, 0x09, 0xef, 0xfe, 0xb2, 0x19, 0xce, 0x24, 0xb0, 0x77, 0xc7, 0xb0, 0x84, 0xd0, 0xa3, 0x59, + 0x57, 0xef, 0x0c, 0x47, 0x38, 0xef, 0x95, 0x9a, 0x79, 0x0b, 0x77, 0x44, 0x7f, 0xc7, 0x58, 0x96, + 0xc5, 0x96, 0x53, 0x9a, 0x35, 0x44, 0x53, 0xfe, 0x4b, 0x0e, 0x79, 0x7d, 0x0b, 0x6b, 0xa4, 0xc1, + 0x2a, 0x9c, 0x65, 0xd0, 0x70, 0xc7, 0xcd, 0x1f, 0x6c, 0x5a, 0x27, 0xe9, 0x0b, 0x99, 0x6d, 0x3c, + 0x09, 0xa2, 0xa4, 0xf1, 0xcf, 0x44, 0x30, 0x3a, 0x74, 0x0c, 0x1d, 0xe7, 0x51, 0x67, 0x47, 0x56, + 0x29, 0x57, 0xe1, 0x3c, 0x74, 0x8c, 0xb8, 0x64, 0x08, 0x8f, 0xc2, 0x63, 0x44, 0xb0, 0x0e, 0xd5, + 0x6c, 0x2d, 0x22, 0x24, 0x7c, 0x9d, 0x79, 0xab, 0xbf, 0x35, 0x1e, 0x6b, 0x40, 0xfd, 0x18, 0x06, + 0x20, 0x4b, 0xd4, 0x98, 0x30, 0xdf, 0x8c, 0x0a, 0x52, 0xbd, 0x53, 0xc8, 0x65, 0x75, 0x47, 0x71, + 0x54, 0xb6, 0xaf, 0x1f, 0x48, 0x17, 0xf5, 0x32, 0x2d, 0x7e, 0x48, 0xcd, 0x7d, 0x78, 0x57, 0x6d, + 0x37, 0x02, 0x84, 0x48, 0x9d, 0xfc, 0x58, 0xa7, 0x2b, 0x9d, 0xb8, 0xab, 0xf1, 0x41, 0xf1, 0xac, + 0xde, 0xc9, 0x8e, 0x3c, 0x4d, 0x2b, 0x6c, 0x44, 0xd7, 0xd7, 0x56, 0x7a, 0x08, 0x93, 0x34, 0x69, + 0xe6, 0x56, 0x41, 0x89, 0xe2, 0xdc, 0xdd, 0xea, 0x1e, 0xf1, 0x39, 0x05, 0x83, 0xb7, 0x18, 0x99, + 0xcc, 0x76, 0x86, 0xbb, 0x43, 0x50, 0xf1, 0x36, 0x72, 0x7b, 0x33, 0xc2, 0xbc, 0xf5, 0x9b, 0x6f, + 0xe4, 0x6a, 0x3d, 0x9d, 0xc5, 0x3b, 0x84, 0xd5, 0x6f, 0x8b, 0x5f, 0x75, 0xed, 0x33, 0xd4, 0x3a, + 0x82, 0x50, 0x4a, 0x85, 0x12, 0xc2, 0x5a, 0xab, 0x39, 0xa5, 0x1d, 0x0f, 0xb4, 0x77, 0x31, 0x88, + 0x3c, 0x16, 0x96, 0xb4, 0xbd, 0x71, 0xff, 0x5f, 0xfb, 0xff, 0xb7, 0x6f, 0xd1, 0x7f, 0xfc, 0xc7, + 0x7f, 0xfc, 0xc7, 0x7f, 0xfc, 0xff, 0xc2, 0xff, 0x00, 0x09, 0x6c, 0x5a, 0x9f, 0x00, 0x21, 0x00, + 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU102_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 8448, // uncompressed data size (bytes) + 6785, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU102_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU102("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/unload/g_booteruc_unload_tu10x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU102_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x06, 0x62, 0x36, 0x08, 0x13, 0x4c, 0x48, 0x41, 0x69, + 0x20, 0x00, 0x00, 0x37, 0x0f, 0x4b, 0x90, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU102_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU102_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU102("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/unload/g_booteruc_unload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_dbg_tu10x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU102_sig_dbg_data[] = +{ + 0x1d, 0x5d, 0xcc, 0x87, 0xb8, 0x39, 0xaf, 0xfe, 0x80, 0xb7, 0x76, 0x7e, 0x93, 0x93, 0x30, 0xd1, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU102_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU102_sig_dbg_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU102("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/unload/g_booteruc_unload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_prod_tu10x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU102_sig_prod_data[] = +{ + 0xe4, 0x53, 0xfd, 0x5f, 0x0f, 0xf8, 0x70, 0x57, 0x0e, 0xfb, 0x23, 0x4f, 0xbe, 0xe7, 0xc2, 0x95, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU102_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU102_sig_prod_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU102("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/unload/g_booteruc_unload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_tu10x_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU102_patch_loc_data[] = +{ + 0x00, 0x1d, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU102_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU102_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU102("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/unload/g_booteruc_unload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_tu10x_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU102_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU102_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU102_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU102("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/unload/g_booteruc_unload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_tu10x_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU102_patch_meta_data[] = +{ + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU102_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU102_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU102("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu10x/unload/g_booteruc_unload_tu10x_tu102_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU102_num_sigs_data[] = +{ + 0x01, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU102_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU102_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterUnloadUcode_TU102 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU102_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU102_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU102_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU102_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU102_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU102_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU102_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU102_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU102_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU102_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_TU102(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterUnloadUcode_TU102; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU116.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU116.c new file mode 100644 index 000000000..1b2e6ffd7 --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveBooterUnloadUcode_TU116.c @@ -0,0 +1,1245 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU116("image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/unload/g_booteruc_unload_tu11x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 8448 +// COMPRESSED SIZE (bytes): 6800 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU116_image_dbg_data[] = +{ + 0xed, 0x99, 0x45, 0x54, 0x1b, 0x00, 0xb3, 0xb6, 0x13, 0xdc, 0x09, 0x92, 0xe2, 0x50, 0x9c, 0xe0, + 0xee, 0x56, 0xb4, 0x78, 0x29, 0xc5, 0x4b, 0x71, 0x28, 0xa5, 0x48, 0x71, 0x29, 0xee, 0x50, 0xa4, + 0xb8, 0x14, 0x12, 0x5c, 0x82, 0xbb, 0x7b, 0x21, 0x14, 0x08, 0x5e, 0xdc, 0xdd, 0x8b, 0x14, 0x97, + 0xdb, 0xbb, 0xfd, 0x76, 0x77, 0xf1, 0x6f, 0xfe, 0xf3, 0x3d, 0x9b, 0x77, 0x66, 0x33, 0x33, 0x8b, + 0x99, 0x73, 0xde, 0x73, 0x26, 0x0a, 0x00, 0x00, 0x3e, 0x25, 0x03, 0xfc, 0x09, 0x01, 0x80, 0x1b, + 0x94, 0x1b, 0xc0, 0x2d, 0x4a, 0x2c, 0x00, 0x05, 0xd0, 0xb5, 0xb2, 0xfa, 0xfc, 0xfc, 0x4c, 0x18, + 0x05, 0x00, 0x02, 0x9e, 0xa1, 0x68, 0x75, 0x5b, 0x00, 0x81, 0xf4, 0x79, 0x60, 0x5d, 0x32, 0x80, + 0x33, 0x7d, 0x1e, 0xe5, 0x9f, 0x30, 0xa7, 0xcf, 0xa3, 0xfe, 0x13, 0xda, 0x28, 0x00, 0x00, 0x90, + 0x0e, 0x45, 0x6d, 0x58, 0x45, 0xed, 0x82, 0xc2, 0x56, 0xd3, 0xa1, 0xc0, 0xf4, 0x0c, 0x94, 0x86, + 0x55, 0x60, 0xc3, 0x3c, 0x8a, 0x7f, 0x23, 0x00, 0x40, 0xc8, 0xa7, 0x02, 0x00, 0x5d, 0x43, 0x01, + 0x84, 0xc0, 0x28, 0x80, 0x2d, 0xda, 0xff, 0x06, 0xf8, 0xcd, 0x19, 0x00, 0x20, 0xca, 0xbf, 0xe8, + 0x52, 0x12, 0x25, 0x1e, 0x00, 0xc0, 0x88, 0xfa, 0xd7, 0xea, 0x11, 0x8a, 0xde, 0x71, 0x79, 0x77, + 0x8b, 0x71, 0x83, 0xfa, 0xaf, 0x1a, 0x4a, 0x0a, 0x21, 0xd6, 0xca, 0xff, 0x76, 0x7e, 0xbc, 0x45, + 0x8f, 0x02, 0x50, 0x01, 0x1e, 0x73, 0xd0, 0xda, 0x12, 0x50, 0x1a, 0x93, 0x41, 0xff, 0xf2, 0xa7, + 0x3b, 0x80, 0xff, 0xbf, 0x79, 0x6e, 0x50, 0x3a, 0x0a, 0xee, 0x81, 0xf1, 0x48, 0x6c, 0xc2, 0xa8, + 0x97, 0xe8, 0x00, 0x18, 0xf4, 0x06, 0x10, 0xff, 0x4f, 0xbb, 0x9e, 0x2f, 0x79, 0x1f, 0x9e, 0x54, + 0x80, 0x30, 0xa8, 0xf4, 0x38, 0xe0, 0xff, 0x04, 0xf6, 0x9c, 0x09, 0xcb, 0xd4, 0x60, 0x61, 0x49, + 0xe2, 0xbc, 0xae, 0xc8, 0x31, 0x8b, 0x22, 0x4d, 0x8e, 0xac, 0x71, 0xe3, 0x07, 0x15, 0xb3, 0x62, + 0xc7, 0x98, 0x1d, 0xaa, 0x4d, 0x7a, 0x9b, 0xdc, 0x27, 0x77, 0x83, 0x71, 0xdf, 0xcf, 0x26, 0xce, + 0xbd, 0x73, 0x4b, 0x8b, 0x60, 0x08, 0x17, 0xbd, 0xe1, 0x02, 0x14, 0x14, 0xd6, 0x49, 0xc1, 0xcd, + 0x5c, 0x7c, 0xec, 0xaa, 0x1f, 0x43, 0xb6, 0xa9, 0x84, 0x04, 0x12, 0x89, 0x7e, 0x60, 0x40, 0x7a, + 0xb1, 0x54, 0xff, 0xf9, 0x59, 0x98, 0xcb, 0x93, 0x1d, 0x9c, 0xca, 0xfe, 0x68, 0xd1, 0x2d, 0x19, + 0x37, 0x5d, 0x45, 0xdd, 0x6d, 0x79, 0x43, 0x1b, 0x18, 0xc4, 0xea, 0x48, 0x35, 0x99, 0x0b, 0xeb, + 0x90, 0xfc, 0xba, 0x16, 0xbd, 0xc0, 0xb4, 0xe5, 0x5d, 0x82, 0xad, 0xfe, 0xa9, 0x20, 0x62, 0xf8, + 0x78, 0xab, 0x97, 0x4f, 0x74, 0xd8, 0x46, 0x64, 0x67, 0x09, 0x2b, 0x01, 0x16, 0x55, 0x05, 0xe9, + 0x55, 0x40, 0x89, 0xbe, 0x1b, 0x38, 0x5e, 0x6b, 0xaf, 0x48, 0x71, 0xba, 0xc4, 0xb6, 0x18, 0xe8, + 0xc9, 0xd4, 0x4b, 0xf1, 0xeb, 0xde, 0x1d, 0xd2, 0x5c, 0x5c, 0xb3, 0x50, 0x5f, 0x56, 0x42, 0x90, + 0x37, 0x3f, 0xa8, 0xcd, 0x4d, 0x60, 0xbd, 0x59, 0x9b, 0x16, 0x8b, 0x96, 0xea, 0x2f, 0x0d, 0xc6, + 0x34, 0x4b, 0x38, 0x30, 0x1a, 0xeb, 0x89, 0xfe, 0xcd, 0xc9, 0x9a, 0x57, 0xc2, 0xd5, 0x7a, 0x95, + 0xe6, 0xa0, 0xb2, 0x69, 0x11, 0x3b, 0xad, 0xeb, 0xc7, 0xe7, 0x50, 0x9d, 0xae, 0xa8, 0xaf, 0x35, + 0xae, 0xe3, 0x5b, 0xb8, 0x3c, 0xdc, 0x26, 0x99, 0x3e, 0x38, 0xd9, 0x9f, 0xd0, 0x44, 0x77, 0x91, + 0xa1, 0x6a, 0xe9, 0x78, 0xc2, 0x6f, 0x2e, 0x5e, 0xe1, 0xdf, 0xe5, 0x21, 0x90, 0xd7, 0x28, 0xfe, + 0x82, 0x03, 0x02, 0xd8, 0x99, 0x3c, 0xbb, 0x8b, 0xcd, 0x71, 0xe6, 0xa8, 0xb5, 0x2f, 0xb0, 0xbb, + 0xf4, 0x8b, 0x49, 0xfc, 0xf2, 0x32, 0xf0, 0x74, 0xbd, 0xb7, 0x9c, 0x4f, 0x0b, 0xb9, 0xc4, 0xff, + 0xb6, 0x65, 0xa5, 0xf8, 0xa1, 0x24, 0xdb, 0x50, 0x60, 0xb8, 0x3e, 0xd9, 0x81, 0x40, 0x2c, 0x1d, + 0x86, 0x70, 0x65, 0x77, 0x9a, 0x20, 0x93, 0x7e, 0x2d, 0x29, 0xfc, 0xd6, 0x0a, 0x16, 0xca, 0x5e, + 0x6b, 0xbb, 0x2e, 0x3b, 0x40, 0x17, 0x5f, 0x57, 0x37, 0x36, 0xb7, 0xf4, 0x8d, 0x14, 0xb2, 0x64, + 0xa4, 0x37, 0xee, 0x21, 0x4c, 0x85, 0x16, 0x68, 0xf1, 0x10, 0x78, 0x87, 0x98, 0x4b, 0xfb, 0xc2, + 0x8c, 0xca, 0x42, 0x05, 0x92, 0x2d, 0x0d, 0x1d, 0x8f, 0xbc, 0x49, 0x9e, 0x5e, 0xc9, 0xde, 0xf6, + 0x20, 0x86, 0xd3, 0x0a, 0x48, 0xfb, 0x27, 0x7c, 0x15, 0x85, 0x0d, 0xc8, 0x6b, 0xbd, 0x1b, 0x93, + 0x58, 0x56, 0x8e, 0xef, 0x7c, 0x50, 0x56, 0xc1, 0xab, 0x1b, 0xc7, 0xfe, 0x89, 0xd4, 0xf1, 0x91, + 0x5e, 0x9b, 0x0d, 0x35, 0xa2, 0x72, 0xff, 0xd5, 0xcb, 0x11, 0x47, 0x7f, 0x6e, 0x59, 0xb5, 0x05, + 0x00, 0xda, 0x0e, 0x87, 0xee, 0xdc, 0x7e, 0x13, 0x5c, 0xa6, 0x69, 0xd6, 0xfa, 0xc0, 0xdc, 0x42, + 0x0f, 0xdb, 0xfa, 0x35, 0x83, 0xd1, 0x7c, 0x17, 0xd0, 0x9a, 0x18, 0x50, 0x11, 0xb0, 0x76, 0xd3, + 0xeb, 0xdb, 0xce, 0x5d, 0x78, 0x50, 0x7a, 0xd6, 0x86, 0xf1, 0xe3, 0x8b, 0x64, 0xbb, 0x8b, 0x69, + 0x78, 0x2c, 0x47, 0x47, 0x58, 0xc7, 0x96, 0xa6, 0xe3, 0x57, 0x31, 0x2a, 0x19, 0x4e, 0x8d, 0x2e, + 0xfd, 0x58, 0xef, 0xe3, 0x54, 0xaa, 0x7d, 0x31, 0xd9, 0x5a, 0x8f, 0x68, 0xc9, 0xa7, 0xb3, 0x35, + 0x8e, 0xc2, 0xca, 0x3f, 0xef, 0x84, 0xdf, 0xd5, 0x7d, 0xea, 0x55, 0x9a, 0x8f, 0x41, 0xc7, 0x0e, + 0xe0, 0xce, 0x64, 0x84, 0x98, 0xea, 0x47, 0x28, 0xe5, 0x74, 0xd9, 0x39, 0x55, 0xbd, 0x74, 0x32, + 0x73, 0xbe, 0xcd, 0x96, 0x78, 0x17, 0x82, 0x0f, 0x7d, 0x8a, 0xa6, 0x17, 0x7c, 0x20, 0x43, 0xda, + 0x99, 0x5a, 0x05, 0xcd, 0x90, 0x2b, 0x3f, 0x54, 0xe8, 0xa2, 0x7a, 0x58, 0xd7, 0x11, 0xf5, 0x25, + 0x53, 0xce, 0xdc, 0x74, 0x54, 0x7d, 0xdc, 0x33, 0x7f, 0x3b, 0x0a, 0x67, 0xc9, 0x57, 0x09, 0x25, + 0x44, 0x86, 0xa5, 0x8e, 0x19, 0xb3, 0x2b, 0x75, 0xd0, 0x06, 0x94, 0x6b, 0x37, 0x0b, 0x1b, 0x44, + 0x18, 0x55, 0xf6, 0x79, 0xee, 0x18, 0xb0, 0xc6, 0x25, 0x51, 0xcc, 0xab, 0x8d, 0xc8, 0xf2, 0x56, + 0x69, 0x31, 0xc9, 0x8c, 0x85, 0x10, 0xc9, 0xb7, 0xc0, 0x1e, 0x89, 0xcf, 0xa7, 0x3e, 0xa9, 0x06, + 0xb6, 0x8f, 0xb8, 0xb8, 0xa0, 0x0b, 0x4b, 0x31, 0x48, 0xfc, 0xbd, 0x54, 0x81, 0xfb, 0xe4, 0xc7, + 0xe3, 0xcf, 0x3a, 0x0d, 0x44, 0xd7, 0x6e, 0xf2, 0x3d, 0xe0, 0x09, 0xc9, 0xf5, 0x5c, 0xb0, 0x30, + 0x5a, 0xbd, 0xdb, 0x1e, 0x70, 0x3a, 0x6b, 0x02, 0xd7, 0x9a, 0x1e, 0xaf, 0xc4, 0x2b, 0xe7, 0x46, + 0x5b, 0x16, 0x87, 0x89, 0xb8, 0x47, 0x96, 0x61, 0xb8, 0x32, 0xe8, 0x87, 0xfb, 0xc6, 0x58, 0x53, + 0x8a, 0xa6, 0x1e, 0xe2, 0xe5, 0xfd, 0x46, 0x63, 0x55, 0x74, 0x28, 0xed, 0xbe, 0x37, 0x35, 0xed, + 0xa4, 0x32, 0x64, 0x61, 0x8f, 0x86, 0xcb, 0xd0, 0x83, 0x73, 0xaa, 0x5b, 0xc9, 0x1b, 0xb7, 0xa5, + 0xc5, 0xc9, 0x03, 0xc1, 0x6f, 0x48, 0xd0, 0xc8, 0x62, 0x6f, 0x76, 0x40, 0x39, 0x20, 0xde, 0xe6, + 0x58, 0x2a, 0xc2, 0x30, 0x9a, 0x3b, 0xae, 0x2f, 0x5f, 0x15, 0xb3, 0x8d, 0x15, 0x57, 0x77, 0x65, + 0x34, 0x19, 0x62, 0x5b, 0xb8, 0x43, 0xd9, 0xcd, 0x4d, 0x2a, 0xe4, 0xa0, 0x54, 0x8e, 0xae, 0xac, + 0x79, 0xbf, 0x35, 0x3b, 0xa9, 0x79, 0x2d, 0x29, 0x68, 0x0e, 0xaf, 0x06, 0x2c, 0x4b, 0x8b, 0xb6, + 0x27, 0x99, 0x5a, 0xd9, 0x30, 0xd6, 0x48, 0xc3, 0x08, 0x89, 0x07, 0x9a, 0x44, 0x6d, 0x27, 0xf2, + 0x4f, 0xab, 0x51, 0x76, 0x63, 0xaa, 0x22, 0x3a, 0x60, 0xd1, 0x76, 0x24, 0x72, 0x05, 0x48, 0x75, + 0xdc, 0xb8, 0xec, 0x86, 0x2c, 0x53, 0x26, 0x67, 0x4e, 0x92, 0xad, 0xc2, 0xcd, 0xe4, 0x73, 0xe4, + 0x3c, 0x69, 0xa8, 0xab, 0x97, 0x4c, 0x8d, 0xe3, 0x7e, 0xab, 0xf8, 0x0c, 0x66, 0x68, 0x0b, 0x36, + 0xa8, 0x5b, 0x8b, 0xbe, 0x5a, 0x4e, 0x04, 0xde, 0x53, 0x86, 0x3c, 0x51, 0xf5, 0xfe, 0x1e, 0x7b, + 0xdb, 0x9a, 0xaa, 0x90, 0x75, 0x98, 0x84, 0x5e, 0x92, 0x63, 0x14, 0x3b, 0xaa, 0xde, 0xe9, 0xe9, + 0xdc, 0x1e, 0x46, 0xd8, 0xb7, 0x47, 0x53, 0xed, 0x6b, 0x3a, 0x42, 0xc9, 0x32, 0xbf, 0x67, 0x6c, + 0x6f, 0xa0, 0xc7, 0xe8, 0xb1, 0x64, 0x6b, 0x58, 0x62, 0x22, 0xa5, 0x98, 0xd1, 0x44, 0x73, 0xa1, + 0x18, 0x64, 0xb6, 0x93, 0x33, 0xc5, 0xbc, 0x15, 0x40, 0x4c, 0x89, 0xd3, 0x63, 0xf5, 0x06, 0xf5, + 0x74, 0x29, 0x69, 0xa4, 0xc5, 0xa7, 0x97, 0xfd, 0xbd, 0x38, 0xa0, 0x8a, 0xf7, 0x34, 0x42, 0x35, + 0x12, 0x11, 0x18, 0x7d, 0x37, 0x78, 0x3e, 0xde, 0x8f, 0x5d, 0x72, 0x84, 0x3c, 0x97, 0xda, 0xa3, + 0x36, 0xd3, 0xde, 0x78, 0x80, 0xf9, 0x16, 0xae, 0x9b, 0x78, 0xc6, 0xf4, 0xa2, 0xde, 0x71, 0xb3, + 0x9e, 0xce, 0x3e, 0x2f, 0x4b, 0x4f, 0xfc, 0xda, 0x64, 0x9e, 0xb2, 0x20, 0x78, 0xf7, 0xa7, 0xaf, + 0x4c, 0xb0, 0x97, 0x02, 0x87, 0xaf, 0x0b, 0xd1, 0x4a, 0x64, 0x5a, 0x02, 0xe0, 0xb5, 0x33, 0xb1, + 0x5e, 0x24, 0x43, 0x72, 0x29, 0xaa, 0xce, 0x92, 0xd2, 0x6b, 0x68, 0x4a, 0xa1, 0x66, 0x1d, 0x2b, + 0x80, 0x50, 0xec, 0x8c, 0x10, 0xc1, 0xe8, 0xc4, 0x1b, 0x70, 0xd7, 0x03, 0xf6, 0x36, 0x60, 0x39, + 0xe7, 0xbc, 0xca, 0x94, 0x71, 0xb8, 0xef, 0x42, 0x5a, 0x5c, 0xce, 0xd3, 0x8f, 0xae, 0x8d, 0xe7, + 0x55, 0x92, 0x89, 0x01, 0x76, 0xf7, 0x69, 0x53, 0x67, 0x32, 0x77, 0x46, 0x0f, 0x87, 0x49, 0xc6, + 0xaf, 0x6e, 0xaa, 0xcc, 0x1f, 0x75, 0x23, 0x3a, 0x33, 0x75, 0x03, 0xcf, 0xb6, 0x71, 0xb3, 0x3f, + 0xc1, 0xec, 0x9c, 0x30, 0x4f, 0xf8, 0xa2, 0x5a, 0xeb, 0xc3, 0x0f, 0x92, 0xdc, 0xca, 0xe9, 0xe3, + 0x15, 0xc8, 0xbe, 0x7a, 0x6e, 0x84, 0xd7, 0x5e, 0x9a, 0xb1, 0x7c, 0x75, 0x4d, 0x0d, 0x9e, 0x84, + 0x16, 0x98, 0x7c, 0x60, 0x38, 0x61, 0x14, 0x32, 0x80, 0x03, 0xe7, 0xfa, 0x5c, 0x37, 0x95, 0xa8, + 0x92, 0x52, 0xd9, 0xd8, 0xb5, 0x10, 0xa9, 0xfd, 0xce, 0xe7, 0xcc, 0xcc, 0xc6, 0xa4, 0x10, 0x84, + 0x03, 0x1f, 0xeb, 0x94, 0x42, 0xc1, 0x50, 0xba, 0xfd, 0x99, 0x35, 0x76, 0xab, 0x9c, 0xd6, 0xdc, + 0x69, 0xbc, 0x4f, 0x0f, 0x08, 0xd8, 0xb3, 0x2a, 0xef, 0xb6, 0x66, 0x68, 0xec, 0x12, 0x8d, 0x62, + 0xe4, 0x72, 0x63, 0x33, 0x46, 0x71, 0xd1, 0x9a, 0x84, 0x35, 0xd9, 0xe7, 0x5f, 0xf8, 0x72, 0x44, + 0x2f, 0x17, 0x52, 0xc7, 0xf2, 0xc1, 0x03, 0xee, 0xa7, 0x20, 0xad, 0x0f, 0xdd, 0xce, 0x9a, 0x20, + 0x52, 0xd2, 0x73, 0xfa, 0xb3, 0x33, 0x34, 0xf2, 0x97, 0xbf, 0x3e, 0x8a, 0x66, 0xbc, 0x98, 0x7d, + 0xc7, 0xfa, 0x90, 0xc2, 0xb6, 0x5c, 0xfc, 0x43, 0x95, 0xae, 0x6e, 0x96, 0xbc, 0x68, 0x6d, 0x1a, + 0xbc, 0x63, 0x35, 0xcf, 0xf2, 0x4e, 0xb5, 0x95, 0x43, 0x67, 0xd8, 0xc3, 0x6e, 0x35, 0x13, 0x65, + 0x17, 0x09, 0xfc, 0x2e, 0x5f, 0x37, 0xde, 0x4f, 0x45, 0xd0, 0xc3, 0x69, 0xa7, 0x6a, 0x80, 0x91, + 0x18, 0xa8, 0x79, 0x20, 0xac, 0x12, 0x4a, 0x0a, 0x5a, 0x88, 0x3e, 0x43, 0xa8, 0x6e, 0x25, 0xbd, + 0x35, 0xed, 0x6f, 0x14, 0xff, 0x1e, 0xdc, 0xac, 0x85, 0x7c, 0x1f, 0xc6, 0x4c, 0x3e, 0x20, 0xb3, + 0xe6, 0xc7, 0x7d, 0x72, 0x10, 0xe6, 0x10, 0x55, 0x8f, 0xff, 0x28, 0xba, 0xcb, 0x65, 0x2f, 0x67, + 0xd4, 0x74, 0xf5, 0xd1, 0x58, 0x28, 0x83, 0xf3, 0xd3, 0x0b, 0xb9, 0x6a, 0xab, 0xc3, 0x40, 0x88, + 0x2e, 0x2f, 0x38, 0x6f, 0x53, 0xcb, 0x23, 0x41, 0x17, 0xe7, 0xaa, 0xf4, 0x12, 0x7e, 0x4f, 0xd7, + 0x3b, 0xf1, 0xe5, 0xeb, 0xbc, 0x1b, 0xf3, 0xfc, 0x74, 0x3d, 0x75, 0x75, 0xfb, 0xec, 0x9a, 0x25, + 0xa4, 0x0f, 0xea, 0x33, 0x31, 0xa7, 0x8e, 0xb8, 0xdb, 0x60, 0x2c, 0x36, 0xdb, 0x69, 0x5f, 0x52, + 0xca, 0xe4, 0x18, 0xc3, 0xf9, 0xbc, 0x5e, 0xaf, 0xcf, 0x06, 0xcd, 0x37, 0x13, 0x22, 0xe5, 0x09, + 0x77, 0xfc, 0x59, 0xfc, 0x7c, 0x93, 0xa2, 0x8c, 0x9a, 0x1b, 0x64, 0x32, 0x89, 0x9d, 0x71, 0x54, + 0x44, 0x19, 0xa7, 0xdd, 0xb6, 0x9d, 0x93, 0xc2, 0x5d, 0x5c, 0x6f, 0x74, 0x89, 0x59, 0x22, 0x40, + 0xb8, 0x71, 0xc5, 0x34, 0x42, 0x3c, 0x40, 0x3a, 0x7d, 0xd4, 0x44, 0xbe, 0x47, 0x14, 0x3f, 0x74, + 0x46, 0xe1, 0xca, 0x96, 0x4f, 0x87, 0x0c, 0xe6, 0x51, 0x92, 0x3e, 0x11, 0x96, 0xf7, 0x6b, 0x92, + 0x9b, 0x2f, 0x73, 0xfc, 0xa2, 0x28, 0xc8, 0xfa, 0x3c, 0x42, 0xda, 0x05, 0x32, 0x2b, 0xf2, 0xc9, + 0xbe, 0x22, 0x08, 0xc4, 0x69, 0x93, 0x8b, 0xaa, 0xc1, 0xbd, 0xfa, 0x21, 0xec, 0x37, 0xe2, 0xea, + 0xf4, 0xcd, 0x91, 0x82, 0x4a, 0x6c, 0x19, 0x93, 0xfb, 0x2c, 0x45, 0xfa, 0x53, 0xc6, 0x2c, 0xab, + 0x0b, 0x92, 0x3b, 0xac, 0xfe, 0x9a, 0xfd, 0xa7, 0x5e, 0x94, 0x5d, 0x41, 0x67, 0xa8, 0xc3, 0xf9, + 0xac, 0x7f, 0x99, 0x7d, 0x1e, 0xea, 0xb5, 0xdb, 0x99, 0x85, 0x3d, 0x37, 0xa3, 0x24, 0x5f, 0x69, + 0x51, 0x09, 0xe1, 0x26, 0x2b, 0xb7, 0x4c, 0x37, 0xc6, 0xa2, 0xff, 0x53, 0x94, 0x2e, 0x9e, 0xc9, + 0xac, 0x8e, 0x47, 0x8a, 0xa6, 0xb8, 0x2e, 0xe9, 0xb5, 0x94, 0x2d, 0x9a, 0x38, 0x63, 0xc0, 0xdb, + 0x2f, 0x8e, 0xc5, 0xa4, 0xa0, 0x3d, 0xd8, 0x4e, 0x55, 0xa2, 0xff, 0x0d, 0x11, 0xfb, 0xdf, 0x44, + 0xc3, 0x8f, 0xed, 0x10, 0xeb, 0xbc, 0x26, 0x2a, 0x03, 0xf3, 0x59, 0x32, 0xa9, 0x84, 0x94, 0xb5, + 0x56, 0x45, 0x91, 0x9a, 0xe3, 0xc3, 0x15, 0x3f, 0x6b, 0x1f, 0xbf, 0x3a, 0x9e, 0xe9, 0xab, 0x6d, + 0x8a, 0x69, 0x23, 0x08, 0xdc, 0x01, 0xdd, 0xff, 0xca, 0x45, 0x24, 0xab, 0x3a, 0xe0, 0x07, 0xef, + 0xf7, 0x1f, 0x4e, 0x6d, 0x5f, 0x7a, 0x1e, 0xb5, 0xe4, 0xed, 0x3f, 0x82, 0x75, 0x4a, 0x67, 0xe1, + 0xfb, 0xf7, 0x4e, 0x7d, 0xe1, 0xbf, 0xb3, 0x5c, 0x69, 0x8e, 0x53, 0x1b, 0xb7, 0xe6, 0x4c, 0xa3, + 0xa2, 0x24, 0x19, 0x6e, 0x69, 0x35, 0x68, 0x13, 0x32, 0xb2, 0xd1, 0xa9, 0x37, 0xc9, 0xa6, 0x6a, + 0x19, 0xe3, 0x4a, 0xc3, 0x6d, 0xde, 0x2e, 0xb7, 0x8b, 0xda, 0xca, 0xa8, 0xbe, 0x41, 0x67, 0xd0, + 0x25, 0x86, 0xa0, 0x8e, 0x9c, 0x53, 0x85, 0x6a, 0xf6, 0xcf, 0xce, 0xa2, 0xa3, 0x2b, 0xa9, 0x95, + 0x65, 0x49, 0xfd, 0x04, 0x41, 0x93, 0xe7, 0x0b, 0xd8, 0x59, 0x4f, 0x40, 0x0c, 0x4c, 0x4e, 0x80, + 0x9f, 0xe7, 0x14, 0x0d, 0x7f, 0xa9, 0x9a, 0x68, 0x1b, 0xeb, 0xad, 0x07, 0x45, 0xeb, 0x82, 0x5e, + 0xd0, 0x59, 0xcc, 0x6d, 0xbd, 0x45, 0x6c, 0xeb, 0x7d, 0xe3, 0x5f, 0x6e, 0xa3, 0xc6, 0x2f, 0xfb, + 0xd5, 0x8b, 0xf9, 0x5e, 0xfb, 0xd3, 0x3e, 0x0b, 0xfb, 0x8b, 0xf7, 0x79, 0x96, 0xd1, 0x7c, 0x9e, + 0xba, 0xd5, 0x24, 0x35, 0x31, 0xd1, 0x77, 0x8f, 0x74, 0xed, 0xfa, 0x92, 0xa9, 0xa8, 0xaa, 0x56, + 0x16, 0x7e, 0xc7, 0x79, 0x21, 0x31, 0x81, 0x59, 0x35, 0x8f, 0x56, 0xef, 0x04, 0xb0, 0x18, 0x97, + 0x31, 0xe2, 0xc3, 0x10, 0xee, 0x3c, 0xc9, 0x4c, 0x68, 0x17, 0x69, 0x6a, 0xf0, 0xe5, 0x89, 0x56, + 0x75, 0x6a, 0x9e, 0x03, 0x9a, 0xb7, 0xf2, 0x1b, 0x0c, 0x20, 0x19, 0x1d, 0xd6, 0x82, 0x9a, 0x1f, + 0x14, 0x97, 0xa1, 0x06, 0xfa, 0x5f, 0x7b, 0x87, 0xa5, 0x14, 0x39, 0xc9, 0xb4, 0xca, 0xe7, 0xad, + 0x23, 0x71, 0x0d, 0x7a, 0x12, 0x6c, 0x99, 0x15, 0xf8, 0xc3, 0x17, 0xb8, 0x6b, 0xd7, 0xac, 0x6f, + 0x9d, 0x2b, 0x75, 0x0f, 0x6f, 0x3e, 0x8a, 0x67, 0x22, 0xcb, 0xed, 0x10, 0xb5, 0xca, 0x5c, 0x6f, + 0xe6, 0xee, 0x7f, 0x2a, 0x15, 0x94, 0xb9, 0xd9, 0xf8, 0xd2, 0x5f, 0x8f, 0xf3, 0x65, 0x65, 0x25, + 0x52, 0x1d, 0x3c, 0xce, 0xf4, 0x4e, 0xba, 0x70, 0x79, 0x1e, 0x59, 0x75, 0x2b, 0x39, 0x21, 0x23, + 0xc2, 0x5c, 0x6c, 0x69, 0x69, 0xea, 0x45, 0x9c, 0x57, 0x92, 0xa6, 0xaf, 0x72, 0x15, 0x13, 0x4a, + 0x8a, 0x88, 0xce, 0x33, 0xc3, 0xcc, 0x0b, 0x3e, 0x79, 0xcd, 0x8f, 0x90, 0x02, 0x99, 0xef, 0xce, + 0x13, 0x1c, 0xac, 0x05, 0x6c, 0x08, 0x8f, 0x83, 0x6b, 0xeb, 0x2a, 0x6a, 0x7c, 0xe6, 0x26, 0x81, + 0x89, 0x5c, 0xed, 0x5d, 0x31, 0x04, 0x44, 0x83, 0x01, 0x9d, 0x68, 0x3e, 0x06, 0x29, 0x45, 0x5e, + 0xb8, 0x3b, 0x4a, 0x35, 0x99, 0x66, 0x02, 0x04, 0x8f, 0xf4, 0x92, 0x5e, 0x2a, 0xee, 0x9b, 0xc8, + 0x4d, 0x49, 0x0d, 0xad, 0x99, 0x61, 0xfb, 0xbd, 0x6d, 0xda, 0x88, 0x9b, 0xac, 0x8e, 0xf3, 0x0a, + 0x93, 0xac, 0x85, 0x10, 0x6e, 0xf7, 0xf2, 0xa5, 0x1b, 0xdf, 0x5f, 0x7c, 0x9a, 0x06, 0xcf, 0x91, + 0xcb, 0xc9, 0xf6, 0xa2, 0x8b, 0xcd, 0xe5, 0x6d, 0x10, 0x14, 0x1e, 0x47, 0x9b, 0xd4, 0x1e, 0xd1, + 0xec, 0xfa, 0xba, 0xb3, 0xc5, 0x4e, 0x7a, 0xfb, 0x47, 0x21, 0xd9, 0x92, 0x72, 0xd0, 0x83, 0xf0, + 0xad, 0x7a, 0x6a, 0x0e, 0x0e, 0x93, 0x44, 0xc6, 0x53, 0x4b, 0xae, 0x51, 0x42, 0x35, 0x6e, 0x28, + 0x08, 0xfb, 0x95, 0x8f, 0xa8, 0xda, 0xd5, 0xf4, 0x9f, 0x3b, 0x91, 0x75, 0xee, 0x9f, 0x9d, 0x33, + 0x3e, 0xae, 0x35, 0x79, 0x03, 0x9a, 0xb6, 0xe2, 0xd2, 0xc6, 0xa6, 0x31, 0x86, 0x74, 0xee, 0x08, + 0xb5, 0x81, 0x83, 0xc8, 0x29, 0x27, 0x9c, 0x58, 0x0a, 0x27, 0x32, 0x34, 0x26, 0x02, 0x19, 0x6c, + 0x8d, 0x13, 0x3f, 0xf3, 0x96, 0x9d, 0x06, 0x0a, 0xd6, 0xbe, 0x01, 0x8a, 0xa0, 0x15, 0x7f, 0xba, + 0x57, 0xca, 0xc9, 0x5d, 0x22, 0xd9, 0xf8, 0x7f, 0x1e, 0x0e, 0x92, 0xc1, 0x69, 0x4d, 0xb1, 0xc1, + 0x9b, 0xbf, 0xe1, 0x2d, 0x11, 0xd0, 0xbc, 0x5c, 0xe2, 0x4f, 0x07, 0x8b, 0x17, 0xd5, 0x3b, 0x32, + 0xda, 0xfd, 0x20, 0xd2, 0xd4, 0xda, 0x2e, 0xea, 0x58, 0x1f, 0x4d, 0x78, 0x33, 0xff, 0x4f, 0xe1, + 0x68, 0xd3, 0xf6, 0x5f, 0x48, 0x7e, 0x5a, 0x44, 0xb0, 0x29, 0xef, 0x4f, 0x3c, 0xe3, 0x9b, 0x8b, + 0xfb, 0x6e, 0x7a, 0xac, 0x4a, 0x50, 0x01, 0x17, 0x7c, 0x5e, 0x7c, 0x0e, 0xe2, 0xf5, 0xb3, 0x08, + 0x53, 0x1b, 0x31, 0x04, 0xb3, 0x16, 0x11, 0x69, 0x26, 0xca, 0xc3, 0xf1, 0xe9, 0x3e, 0x4b, 0xb3, + 0xff, 0x9e, 0xfa, 0xd3, 0xa0, 0xd7, 0x5e, 0x08, 0x82, 0xa6, 0xfa, 0x2a, 0x7f, 0x37, 0x60, 0x77, + 0xe5, 0xa9, 0xfc, 0xf4, 0x90, 0x20, 0x8d, 0xb8, 0x15, 0xfa, 0x63, 0x54, 0xf5, 0x45, 0xdd, 0xa7, + 0xe2, 0x6c, 0xa7, 0x87, 0x95, 0x0d, 0x33, 0x39, 0x7d, 0xca, 0x2f, 0x05, 0xcd, 0xdb, 0x1a, 0xae, + 0x4b, 0x25, 0x7b, 0x3d, 0xbf, 0xeb, 0x8e, 0x6c, 0xce, 0x26, 0x2f, 0x7a, 0xe7, 0xaa, 0xa5, 0xd3, + 0xa0, 0x2b, 0xe6, 0x5e, 0xd6, 0x7d, 0xb9, 0xd3, 0xe9, 0x9d, 0x15, 0xe9, 0x9c, 0xa6, 0xdd, 0x1f, + 0x45, 0x36, 0xdf, 0xfb, 0x2b, 0x69, 0x2b, 0x79, 0x31, 0xd1, 0x3b, 0x75, 0xb6, 0x72, 0x26, 0x5f, + 0x44, 0x2e, 0x6c, 0x7d, 0x5d, 0x4a, 0x19, 0x91, 0x64, 0xd2, 0xd2, 0x55, 0x6b, 0x0c, 0xf9, 0x7b, + 0xbd, 0x8a, 0x91, 0xe6, 0x56, 0x7e, 0xf4, 0xd5, 0xe1, 0x3b, 0xc5, 0xe5, 0xef, 0x76, 0x59, 0xfc, + 0x9e, 0x51, 0xf0, 0x09, 0x3a, 0x1f, 0x1f, 0x29, 0xaf, 0xf5, 0x53, 0xbd, 0xb1, 0xdc, 0x18, 0x16, + 0x66, 0xa5, 0x4e, 0x0b, 0x96, 0x04, 0xe2, 0xc5, 0x34, 0x83, 0xc8, 0x39, 0x74, 0x5e, 0x48, 0xfa, + 0xc3, 0x51, 0x69, 0x18, 0x1a, 0xdb, 0x2b, 0x32, 0x41, 0x61, 0xae, 0x8e, 0x9d, 0x0a, 0xc8, 0xe5, + 0x6d, 0x01, 0xe9, 0x8b, 0x54, 0xd8, 0x91, 0x6b, 0x81, 0x82, 0xe5, 0xf6, 0x84, 0xc1, 0xbb, 0x4c, + 0x36, 0xf2, 0xc7, 0x4c, 0xe9, 0xa2, 0xf9, 0x36, 0xda, 0x87, 0xc1, 0xdd, 0x99, 0xa5, 0x8e, 0xef, + 0x3f, 0xff, 0x6e, 0x9c, 0xc7, 0xf2, 0x6d, 0xeb, 0xa4, 0x6e, 0xcc, 0x1d, 0xc7, 0xd6, 0x91, 0x67, + 0x08, 0xe5, 0x67, 0x77, 0xd5, 0x88, 0xc7, 0x8f, 0xa7, 0x62, 0x86, 0x35, 0x88, 0x6b, 0x18, 0xbd, + 0xb6, 0x68, 0x0b, 0x20, 0xf3, 0xb8, 0x1b, 0x97, 0xa5, 0x6a, 0x1b, 0x36, 0xb7, 0x32, 0xb0, 0x70, + 0xe7, 0x9e, 0xcb, 0x94, 0x4e, 0x8d, 0x38, 0x7f, 0x89, 0xff, 0x23, 0xcd, 0x19, 0xdd, 0x63, 0x18, + 0x68, 0x6c, 0x7f, 0x08, 0x37, 0x49, 0xb8, 0x37, 0x7c, 0xc5, 0x88, 0x23, 0xe5, 0x59, 0xd4, 0x8a, + 0x14, 0x03, 0xa2, 0x66, 0x85, 0x9e, 0x6b, 0x2f, 0x56, 0xd3, 0xd9, 0xb8, 0xa5, 0x44, 0x4d, 0x93, + 0x7b, 0xd6, 0x14, 0xe4, 0xb3, 0xb6, 0xa6, 0xb9, 0x20, 0x88, 0x28, 0xf1, 0x87, 0xf7, 0x8b, 0x47, + 0x6d, 0x50, 0x01, 0xc4, 0x66, 0x15, 0x67, 0xb8, 0xb1, 0x4c, 0x8c, 0x1f, 0xac, 0xdf, 0xff, 0x38, + 0x9e, 0x6c, 0x35, 0x57, 0x7f, 0x21, 0xa0, 0xf1, 0x7e, 0x79, 0xac, 0xcc, 0x79, 0xb7, 0x22, 0xd0, + 0x13, 0xab, 0x85, 0xac, 0x8d, 0x07, 0x14, 0x13, 0x45, 0x54, 0x84, 0xdb, 0x7d, 0xf5, 0x60, 0x7d, + 0x66, 0xc3, 0x39, 0x71, 0xd2, 0x38, 0x12, 0xaf, 0x0e, 0xbe, 0xae, 0x9e, 0x1b, 0xa2, 0xde, 0xc5, + 0x39, 0x44, 0x84, 0x41, 0x58, 0xac, 0x4d, 0x5f, 0x59, 0x3e, 0xdb, 0x91, 0xd8, 0xe1, 0x1d, 0xc1, + 0x60, 0xbb, 0x0d, 0x02, 0xc9, 0xa7, 0x41, 0x05, 0xdf, 0xb2, 0xbe, 0x4d, 0xc2, 0x93, 0x45, 0x21, + 0x38, 0x76, 0x07, 0x01, 0x44, 0x7e, 0x7d, 0x58, 0x06, 0x0d, 0x44, 0xa3, 0x09, 0x5b, 0xbf, 0x8b, + 0xf9, 0xee, 0x21, 0xa8, 0x76, 0xbe, 0xea, 0xe4, 0xd8, 0x13, 0xc7, 0xc6, 0xc5, 0xf4, 0xc2, 0x8f, + 0x39, 0x50, 0x02, 0x0e, 0xf2, 0x8b, 0x17, 0xfd, 0x89, 0x8a, 0xb0, 0xf3, 0x61, 0x2f, 0xc3, 0x28, + 0x86, 0xde, 0x55, 0xdd, 0xc3, 0xfe, 0x91, 0x18, 0x9b, 0x0c, 0x6a, 0x2c, 0xff, 0x21, 0x91, 0xf7, + 0x33, 0x4d, 0x0f, 0x9d, 0xcc, 0x78, 0x27, 0xb6, 0x58, 0x52, 0x6a, 0x22, 0xa5, 0xbc, 0xae, 0xe0, + 0xa7, 0x5b, 0xd3, 0x83, 0x9b, 0x4f, 0xd0, 0x97, 0x88, 0xff, 0xbc, 0xb7, 0xb3, 0xdc, 0xd6, 0x18, + 0xef, 0xde, 0x4d, 0x8e, 0xfa, 0x2d, 0xb6, 0x52, 0xe4, 0x29, 0xf1, 0xa9, 0x38, 0x8a, 0x84, 0x50, + 0x26, 0xf8, 0x55, 0x30, 0x93, 0x13, 0xe1, 0x32, 0xf9, 0x4e, 0xfc, 0x6d, 0x42, 0xe1, 0xaf, 0x47, + 0xbd, 0x36, 0x1f, 0xfe, 0xd7, 0xda, 0xbb, 0x0c, 0x6f, 0xe3, 0xb1, 0x7d, 0xb7, 0x31, 0xbb, 0x86, + 0xd7, 0xae, 0x3e, 0x9e, 0x86, 0xfb, 0x08, 0xf2, 0x18, 0xae, 0x56, 0x0f, 0xbf, 0xc0, 0xf6, 0xbf, + 0x8a, 0x79, 0x81, 0x4b, 0x90, 0xc6, 0xb4, 0xd4, 0x85, 0x69, 0xb1, 0x5f, 0x73, 0xbf, 0xf3, 0x64, + 0xab, 0xdb, 0x59, 0xe0, 0xe1, 0xe8, 0x8a, 0xd7, 0x4a, 0x77, 0x9d, 0x6d, 0xdf, 0x4b, 0x5f, 0x62, + 0x00, 0x27, 0xe3, 0xf5, 0xcf, 0xe0, 0xb0, 0x7d, 0xa0, 0xb2, 0xb5, 0x28, 0xbd, 0xfe, 0x73, 0x4b, + 0x09, 0x86, 0x61, 0x59, 0x7e, 0xf7, 0x29, 0x6a, 0x8f, 0x1d, 0x25, 0x25, 0xbe, 0xa6, 0x19, 0xd8, + 0xb0, 0xba, 0x35, 0xab, 0x15, 0xdb, 0x68, 0x57, 0xc6, 0xc7, 0xb8, 0x4c, 0x0a, 0x32, 0x5a, 0x4b, + 0x55, 0x1c, 0x88, 0x45, 0xa3, 0xd6, 0xd1, 0x63, 0x38, 0xf7, 0x59, 0x7e, 0xfe, 0xcd, 0xb5, 0x9c, + 0x95, 0x41, 0xbc, 0xac, 0xb3, 0x21, 0x27, 0xf5, 0xc7, 0xe6, 0x97, 0x68, 0x82, 0x4a, 0xa6, 0xdb, + 0x10, 0xfa, 0x05, 0xac, 0x43, 0x91, 0x86, 0x71, 0x13, 0x50, 0x43, 0xcf, 0xa7, 0xa1, 0x9c, 0x39, + 0x1e, 0xb0, 0xbf, 0xec, 0x6c, 0xcc, 0xbd, 0x2d, 0x2f, 0x43, 0xf2, 0xee, 0xf0, 0x51, 0x27, 0xd4, + 0x42, 0x54, 0x42, 0xbb, 0x8d, 0x18, 0x9b, 0xcd, 0x09, 0x8e, 0x11, 0x34, 0x03, 0x29, 0xf1, 0xae, + 0x28, 0x77, 0x01, 0x46, 0xf7, 0x97, 0xb6, 0xdf, 0xff, 0x98, 0x02, 0xfc, 0x9d, 0x2d, 0x6f, 0xc3, + 0x2f, 0x67, 0xc3, 0x3a, 0xc6, 0xd8, 0xc8, 0x03, 0x85, 0xa0, 0x64, 0x65, 0x9e, 0x55, 0x46, 0xbf, + 0x4e, 0x59, 0x28, 0x54, 0xba, 0xc0, 0xa3, 0x04, 0xad, 0xf0, 0xac, 0x4f, 0x89, 0xca, 0x63, 0xaa, + 0x67, 0xe0, 0x1c, 0x04, 0x40, 0x69, 0x72, 0xc2, 0xe5, 0x5e, 0x92, 0x3e, 0x25, 0x38, 0x57, 0x7e, + 0x9b, 0xad, 0x9f, 0x28, 0x9f, 0xb2, 0x98, 0xe5, 0xd5, 0x38, 0x49, 0xea, 0x96, 0xf4, 0xad, 0x1f, + 0x25, 0xeb, 0x3e, 0x55, 0xfd, 0x96, 0x14, 0x4e, 0xf5, 0x44, 0x6f, 0xda, 0x43, 0xa0, 0xfc, 0xe9, + 0xc9, 0xf7, 0xaf, 0x50, 0xa7, 0x6f, 0x12, 0xbd, 0x12, 0xa2, 0x04, 0x40, 0x40, 0xbf, 0x49, 0x99, + 0xd8, 0x1a, 0x4f, 0x11, 0xd9, 0x4d, 0x0b, 0xce, 0xc0, 0x04, 0x5f, 0x5a, 0xda, 0x6e, 0x32, 0x75, + 0xe5, 0x73, 0x3f, 0xc5, 0x14, 0xfc, 0x58, 0x2c, 0x90, 0xa1, 0x09, 0x6b, 0x31, 0xf8, 0x56, 0x29, + 0x93, 0x69, 0x96, 0xef, 0x56, 0xd1, 0xe5, 0x86, 0xd4, 0x86, 0xd8, 0x07, 0x78, 0x56, 0x5e, 0xe9, + 0xa2, 0x4e, 0x70, 0x3f, 0xbf, 0xd9, 0xfe, 0x2c, 0x10, 0xc5, 0x7f, 0xdb, 0xe6, 0xf1, 0x4d, 0xa2, + 0x57, 0x4b, 0x62, 0xea, 0xe3, 0x5b, 0x6d, 0x28, 0x7e, 0x3f, 0x9d, 0x1c, 0xd9, 0xac, 0x7f, 0xb5, + 0x83, 0x15, 0x35, 0x01, 0x63, 0x14, 0xd0, 0xbc, 0x31, 0x2d, 0xab, 0xcf, 0xa3, 0x84, 0xd2, 0x35, + 0x1f, 0xbe, 0x1c, 0x7d, 0xd9, 0x3a, 0x68, 0x36, 0x1d, 0xd9, 0xdc, 0x4a, 0x86, 0x4c, 0x9b, 0xa0, + 0xc0, 0x1c, 0x7d, 0x00, 0x14, 0x44, 0x5a, 0x74, 0x46, 0xbf, 0x12, 0x13, 0x96, 0x94, 0x6b, 0x91, + 0x63, 0xdb, 0xb1, 0xd9, 0xc1, 0x84, 0xd0, 0xcc, 0x0b, 0x15, 0xa8, 0xbb, 0x7b, 0x3d, 0x1a, 0x94, + 0x5e, 0x48, 0xe7, 0x07, 0x80, 0xef, 0x95, 0x87, 0x8f, 0x5f, 0xce, 0xec, 0x3b, 0x3b, 0xb1, 0xae, + 0x24, 0xdb, 0x1b, 0xd9, 0x2c, 0x31, 0xf6, 0x84, 0x65, 0xf3, 0x9b, 0xd5, 0xa1, 0xaf, 0xae, 0xc5, + 0x6f, 0x80, 0xef, 0x69, 0x02, 0x9a, 0x3f, 0xc2, 0x4b, 0xc6, 0xd7, 0x66, 0x9e, 0x68, 0x54, 0xd6, + 0x15, 0x66, 0x14, 0x9a, 0x20, 0x5b, 0x21, 0xe7, 0xa1, 0x7c, 0x6d, 0xc4, 0xda, 0x44, 0x78, 0x12, + 0xd5, 0x46, 0xa4, 0xfc, 0xae, 0xb2, 0xed, 0xbc, 0xaf, 0x24, 0x2d, 0x6d, 0x7d, 0x5c, 0xa8, 0xc5, + 0x3c, 0xdd, 0xc9, 0xea, 0x28, 0x9b, 0xc9, 0x99, 0x08, 0xdc, 0x34, 0x12, 0xcb, 0xc4, 0x52, 0xb8, + 0xed, 0x95, 0x1e, 0x4d, 0x51, 0x8d, 0x82, 0x76, 0x99, 0xb5, 0xd1, 0x41, 0x98, 0x38, 0x97, 0xbd, + 0x43, 0xd8, 0xd7, 0xbd, 0x7a, 0xc4, 0xac, 0x8c, 0x0f, 0x32, 0x88, 0x1e, 0xcd, 0x2c, 0xfd, 0xf4, + 0x60, 0xe3, 0x0a, 0x7b, 0xbf, 0x15, 0xe1, 0x51, 0x92, 0x2d, 0x9c, 0x08, 0x53, 0x7c, 0xef, 0x23, + 0x39, 0xfb, 0x0e, 0x3b, 0x43, 0xd2, 0x00, 0x5b, 0x14, 0xb6, 0xc1, 0x30, 0x6c, 0x7f, 0xf5, 0x25, + 0x89, 0x86, 0x72, 0xd9, 0x88, 0x7d, 0x30, 0xc9, 0x54, 0xd0, 0x98, 0x9c, 0x40, 0x43, 0x17, 0xe1, + 0xbb, 0x4d, 0x4d, 0x69, 0xfa, 0x4d, 0xac, 0x82, 0x21, 0xa4, 0x96, 0x62, 0x33, 0xc3, 0x9e, 0x79, + 0xdf, 0x9d, 0x9b, 0x57, 0xb7, 0xe2, 0x2d, 0xbd, 0x49, 0x09, 0x68, 0xe0, 0x8e, 0x3f, 0x4e, 0xaa, + 0x52, 0x15, 0x01, 0x09, 0x80, 0x42, 0xfa, 0x6b, 0xaa, 0x10, 0x09, 0x9f, 0xb6, 0x34, 0xa2, 0x03, + 0x9b, 0x55, 0xfc, 0xbe, 0x04, 0xcf, 0xfa, 0x2f, 0xae, 0x67, 0xcb, 0xa8, 0x6d, 0xda, 0xa9, 0xa3, + 0xeb, 0xc4, 0xe5, 0xb2, 0x4b, 0xdf, 0x25, 0x14, 0x0f, 0xab, 0x54, 0xf6, 0x14, 0x74, 0xe0, 0x09, + 0x8c, 0x6b, 0xe2, 0xfc, 0x92, 0xc9, 0x79, 0x99, 0xa0, 0xde, 0xa3, 0x7e, 0x41, 0xdd, 0x25, 0x43, + 0x00, 0xbf, 0xb9, 0x3d, 0xd5, 0xa2, 0xb4, 0xae, 0xf1, 0x53, 0x38, 0x99, 0x57, 0x40, 0x4e, 0xd0, + 0xb6, 0x5d, 0x8b, 0x4c, 0x4c, 0x51, 0x91, 0x5e, 0x85, 0xf7, 0x08, 0x63, 0xce, 0x48, 0x87, 0x0c, + 0xfd, 0xd5, 0x2d, 0x19, 0xd5, 0x09, 0x2a, 0x90, 0x82, 0xd6, 0xb8, 0x07, 0xcc, 0x5f, 0x48, 0x5e, + 0xea, 0x47, 0xc0, 0x5d, 0xd1, 0x88, 0x68, 0xe5, 0x1a, 0xe3, 0x82, 0xff, 0xd3, 0x9f, 0x7c, 0x12, + 0xfb, 0x52, 0xc4, 0x8b, 0x15, 0xe2, 0x6d, 0x51, 0x9d, 0x1e, 0xcb, 0x11, 0x11, 0x42, 0x5f, 0x69, + 0x73, 0x11, 0x19, 0x3b, 0xd3, 0x67, 0x17, 0xda, 0xbd, 0x97, 0x52, 0x03, 0x46, 0x7a, 0xd7, 0x8e, + 0x01, 0x7c, 0xdd, 0xbc, 0x24, 0x30, 0xc4, 0xd5, 0x7b, 0xef, 0xe9, 0xda, 0x83, 0x1c, 0xbe, 0x47, + 0xa0, 0xb7, 0x56, 0x26, 0x6c, 0xc8, 0xba, 0x03, 0x57, 0x1e, 0x5d, 0x06, 0x12, 0x07, 0x55, 0x77, + 0x5e, 0x03, 0x87, 0xc3, 0x8b, 0xf9, 0x51, 0xfb, 0xee, 0xb4, 0xff, 0xb2, 0x14, 0xad, 0x43, 0x28, + 0x17, 0x89, 0xfd, 0x4a, 0x5e, 0x56, 0x99, 0xce, 0xc8, 0xec, 0xd8, 0x78, 0x4c, 0x0d, 0xbc, 0xc6, + 0x01, 0x91, 0xef, 0x55, 0x10, 0xc6, 0x85, 0x6b, 0xf1, 0x8f, 0x11, 0xf3, 0x14, 0x33, 0x06, 0x1d, + 0x97, 0x37, 0xdd, 0x57, 0x0c, 0x4b, 0x97, 0xe4, 0x71, 0xea, 0xaa, 0xb4, 0xf5, 0xe8, 0xff, 0xb3, + 0x2b, 0x97, 0x61, 0x1c, 0xb4, 0x50, 0x5e, 0xd9, 0x99, 0xa1, 0xc5, 0x83, 0x53, 0x17, 0x06, 0xe2, + 0xd0, 0x23, 0x1b, 0xfe, 0x02, 0xa7, 0xc9, 0x46, 0x4e, 0xb3, 0x81, 0x16, 0x44, 0x6a, 0xa9, 0xec, + 0x24, 0x06, 0x40, 0xe9, 0xd8, 0xdf, 0x13, 0x75, 0xfc, 0x30, 0xb2, 0xe3, 0x50, 0xa3, 0x0a, 0xe7, + 0xc5, 0x1e, 0xc1, 0x58, 0x7d, 0xd4, 0x3b, 0xa0, 0xaf, 0x12, 0xea, 0x82, 0xee, 0xd4, 0x9e, 0x17, + 0xa1, 0xb5, 0xee, 0xb1, 0x24, 0x5d, 0xea, 0xc0, 0x6b, 0x75, 0xc3, 0x62, 0x41, 0xa3, 0xff, 0xdc, + 0xbf, 0xc6, 0xb0, 0xe0, 0xfb, 0x4c, 0xce, 0x9a, 0xf4, 0xb7, 0x11, 0xb1, 0x64, 0xb0, 0x58, 0x17, + 0x8e, 0xf4, 0xef, 0x83, 0xfe, 0x83, 0x75, 0x09, 0xfa, 0x4e, 0x39, 0x62, 0x5c, 0xc6, 0xa7, 0x94, + 0xa8, 0x3a, 0x5b, 0x5a, 0xa9, 0xc2, 0xea, 0xc9, 0x69, 0x9f, 0x83, 0x0e, 0x7c, 0x57, 0x4d, 0x45, + 0xf0, 0x05, 0x09, 0x00, 0xe6, 0xa6, 0x8b, 0xda, 0x6f, 0xbc, 0x92, 0x4f, 0x9a, 0x5a, 0x92, 0xeb, + 0x75, 0x73, 0x77, 0x59, 0xdc, 0xb4, 0x7d, 0x5f, 0xdd, 0xed, 0x9e, 0xa2, 0xd8, 0x3a, 0x11, 0x40, + 0xf7, 0x28, 0xbf, 0x42, 0x4e, 0x73, 0xce, 0x5f, 0x0a, 0xdc, 0x6a, 0xce, 0xb7, 0x51, 0xd4, 0xfa, + 0x11, 0x09, 0x21, 0x70, 0xb0, 0x78, 0x10, 0xfa, 0x0b, 0xcb, 0xdc, 0x2c, 0x2f, 0x1c, 0x3d, 0x90, + 0x31, 0x15, 0xba, 0x53, 0xbc, 0x0b, 0x5a, 0xcc, 0x64, 0x16, 0x3e, 0x6f, 0x00, 0x7a, 0xf6, 0x49, + 0x1c, 0x1f, 0x9c, 0xdb, 0x4d, 0x3c, 0x73, 0x5d, 0x11, 0x33, 0x6c, 0xe6, 0xe1, 0xa8, 0xb1, 0x6e, + 0x5f, 0xa6, 0x85, 0x69, 0xee, 0x1b, 0x65, 0x14, 0x4f, 0x8c, 0xed, 0x28, 0x0d, 0x9d, 0x0c, 0xe6, + 0xaa, 0x8d, 0xba, 0x8d, 0xe8, 0xbe, 0x7a, 0x28, 0xe5, 0xbc, 0xc6, 0x84, 0x94, 0xe1, 0xd8, 0xeb, + 0x7a, 0x7d, 0x9f, 0x83, 0x59, 0xb0, 0xe7, 0x4f, 0x36, 0x92, 0x1c, 0xf8, 0x5f, 0xf6, 0x31, 0x9f, + 0x42, 0xf8, 0xbc, 0xad, 0x24, 0xcf, 0xbe, 0x9c, 0x03, 0x69, 0x81, 0xe4, 0x3e, 0x36, 0xdf, 0x21, + 0x14, 0xd0, 0x7d, 0x98, 0x16, 0xd7, 0xd2, 0x13, 0x80, 0x9b, 0x5d, 0x7c, 0x21, 0xbd, 0x22, 0x84, + 0xb5, 0xc2, 0x16, 0x99, 0xa5, 0x08, 0x03, 0xbe, 0x09, 0x34, 0x94, 0xb3, 0x35, 0x0f, 0x51, 0x80, + 0xec, 0xf4, 0xed, 0x92, 0x9f, 0xce, 0xcb, 0xfd, 0x6d, 0xd0, 0x48, 0xe7, 0xf5, 0x9d, 0x0e, 0x61, + 0xf7, 0xaa, 0xdd, 0xa7, 0x09, 0x12, 0xc4, 0xee, 0x17, 0xc3, 0x33, 0xc6, 0xa1, 0xd5, 0xc6, 0x3c, + 0xdf, 0x46, 0x70, 0x14, 0x0d, 0xbf, 0x39, 0x2f, 0x2b, 0xbf, 0xd3, 0x95, 0x06, 0xb5, 0x0c, 0xfc, + 0x78, 0x92, 0x69, 0x5d, 0xad, 0x69, 0x8d, 0x25, 0x2a, 0x4e, 0x41, 0xa9, 0x0a, 0x58, 0xfc, 0x33, + 0x3b, 0xe3, 0xa0, 0xc2, 0x0c, 0x24, 0x7e, 0x2a, 0x40, 0xa4, 0x25, 0xe8, 0x39, 0x6a, 0x7c, 0x1e, + 0xf8, 0xf6, 0xdb, 0x78, 0x18, 0xe5, 0x7b, 0x89, 0x2e, 0x1e, 0xa6, 0xcf, 0xb7, 0x60, 0xf4, 0xdf, + 0x98, 0xcc, 0x5c, 0xb2, 0x60, 0xe9, 0xdc, 0x7b, 0xac, 0x77, 0xb8, 0x75, 0xd0, 0xb7, 0xbb, 0x4e, + 0x3d, 0x99, 0x1e, 0xdb, 0x54, 0x7b, 0xf4, 0xbd, 0xd5, 0x60, 0xe1, 0x0d, 0xa5, 0xab, 0x6a, 0x3f, + 0xd9, 0x97, 0x60, 0x95, 0x25, 0x3d, 0x8e, 0xaa, 0xfd, 0x82, 0xc6, 0x2a, 0x0f, 0x07, 0x45, 0xdd, + 0x22, 0xfa, 0xec, 0xda, 0x13, 0xf5, 0x1d, 0xc2, 0xf6, 0x1b, 0x21, 0x03, 0xb5, 0xfd, 0xf5, 0x5f, + 0x32, 0xcc, 0xb9, 0x18, 0x26, 0x4c, 0x8f, 0x76, 0x1b, 0x29, 0x8d, 0x31, 0x89, 0x1b, 0xa8, 0xe1, + 0xef, 0x7f, 0xbb, 0x0e, 0xaf, 0x0a, 0x11, 0x0b, 0x45, 0xde, 0xcf, 0x19, 0xfc, 0x02, 0x2d, 0x38, + 0xa9, 0xcc, 0xa6, 0xa6, 0x24, 0xae, 0x0e, 0xd6, 0x81, 0x39, 0xbf, 0x0f, 0x70, 0x97, 0xbe, 0xa2, + 0xbd, 0x13, 0x6e, 0x6a, 0xf8, 0x8d, 0xc7, 0x72, 0x72, 0xa9, 0x5e, 0xe5, 0x79, 0x53, 0x27, 0x20, + 0x69, 0x7a, 0x72, 0xdc, 0x94, 0x22, 0x1c, 0x3e, 0xd7, 0x4c, 0x53, 0xe2, 0x74, 0xa3, 0x51, 0xfa, + 0xc7, 0x07, 0x2a, 0x7c, 0x7e, 0x68, 0x13, 0x2e, 0x17, 0x6d, 0xd4, 0x37, 0xad, 0x8e, 0x28, 0x6c, + 0xb7, 0x10, 0xec, 0x0d, 0x0d, 0x51, 0xc0, 0xcc, 0x5d, 0xee, 0x1b, 0x9d, 0xb0, 0x66, 0x9b, 0xea, + 0x50, 0xd3, 0xac, 0xb6, 0x2d, 0x79, 0x3d, 0x63, 0x6e, 0xa3, 0x46, 0x96, 0x4c, 0x3d, 0x3b, 0x96, + 0x50, 0xde, 0x39, 0xff, 0xed, 0xee, 0xb3, 0x91, 0x3d, 0x31, 0x7d, 0x1d, 0x13, 0x57, 0xfa, 0x2a, + 0x4e, 0xec, 0x96, 0x55, 0x65, 0x4c, 0x95, 0x29, 0xea, 0xed, 0xeb, 0x9f, 0xf3, 0x67, 0x4b, 0x81, + 0x2f, 0xb9, 0x4f, 0x77, 0x58, 0xd2, 0x8b, 0x72, 0xa7, 0x04, 0x53, 0x60, 0x83, 0x28, 0x9b, 0x36, + 0xe7, 0x1a, 0xb1, 0xf1, 0xc6, 0xb3, 0x37, 0x91, 0x09, 0x32, 0x84, 0x4f, 0xe2, 0xe0, 0xdc, 0x23, + 0x8b, 0xf0, 0xa8, 0xa8, 0x84, 0x5f, 0x13, 0xc1, 0x0b, 0x95, 0x9e, 0xc0, 0xaf, 0x0f, 0x72, 0x02, + 0x3a, 0x7b, 0x74, 0x47, 0x38, 0x94, 0x6d, 0x4c, 0x12, 0x65, 0x6e, 0x10, 0xf7, 0x60, 0xa3, 0x16, + 0x3b, 0x22, 0xa7, 0xdd, 0x8b, 0x0b, 0x4d, 0x7b, 0xcc, 0x3b, 0xfb, 0x20, 0xe2, 0x78, 0xc7, 0x83, + 0xee, 0x17, 0x87, 0x6c, 0xda, 0xbc, 0x75, 0x99, 0xa8, 0xa0, 0xa3, 0x61, 0x93, 0xe7, 0xc8, 0x84, + 0x37, 0x86, 0xbf, 0x2c, 0xff, 0xd2, 0x3b, 0x1d, 0x37, 0x4a, 0x5d, 0x6b, 0xb1, 0x9a, 0x45, 0x16, + 0xaa, 0x87, 0x68, 0xca, 0x23, 0xab, 0x4a, 0xa0, 0xef, 0x86, 0x72, 0xd0, 0xb5, 0xe5, 0x86, 0x1b, + 0x8f, 0x8c, 0x31, 0xdb, 0xdb, 0x26, 0x21, 0x64, 0x65, 0xe2, 0x8c, 0x15, 0x67, 0xe9, 0x6f, 0xfc, + 0x3e, 0x8a, 0x2f, 0xb4, 0x7d, 0xf1, 0xf1, 0xd8, 0x14, 0xa5, 0xa2, 0x49, 0xf3, 0x54, 0x3a, 0x4e, + 0x43, 0x88, 0x72, 0x05, 0x26, 0xcc, 0x79, 0x3f, 0x89, 0xe7, 0xfe, 0x68, 0x67, 0x5d, 0x21, 0x73, + 0x7d, 0xe3, 0x59, 0x85, 0xeb, 0x31, 0xa5, 0x5d, 0xbc, 0x43, 0x96, 0x34, 0xf7, 0x1e, 0xdd, 0x6c, + 0x9d, 0xcc, 0x90, 0xc2, 0x2c, 0xb7, 0x9b, 0xb0, 0x5b, 0x17, 0xc6, 0x0b, 0xfc, 0x93, 0xa4, 0x69, + 0xa5, 0xfe, 0xa7, 0xcd, 0xc0, 0xef, 0xfd, 0x4a, 0xf4, 0xee, 0xc1, 0x4f, 0x0d, 0xd1, 0x7d, 0x73, + 0x27, 0xe5, 0xf4, 0x3a, 0x6a, 0xb1, 0xe5, 0x07, 0xe0, 0x93, 0x15, 0x88, 0x6e, 0xde, 0xaa, 0xda, + 0xdf, 0x85, 0xf9, 0x8c, 0xde, 0xa4, 0x03, 0x23, 0xf4, 0xb0, 0xa0, 0x82, 0x2d, 0x6e, 0xd8, 0xf0, + 0xbd, 0xe4, 0xaa, 0x6b, 0x9a, 0x6c, 0x99, 0xdb, 0xe0, 0x4c, 0x0e, 0xab, 0x36, 0x45, 0xdf, 0x38, + 0x1f, 0x2d, 0x28, 0x05, 0xf0, 0x9c, 0xa2, 0x16, 0x66, 0xe5, 0x4b, 0xfc, 0xfb, 0x75, 0x0d, 0xfb, + 0xe2, 0x2b, 0x01, 0x51, 0xa8, 0xd8, 0xd0, 0x3c, 0xc0, 0x6c, 0xa2, 0xd0, 0x74, 0xb9, 0x8b, 0xed, + 0x32, 0x48, 0x86, 0xbb, 0x23, 0xf5, 0x12, 0x4b, 0x4f, 0x57, 0x1d, 0x1d, 0x74, 0x3f, 0x70, 0x88, + 0x83, 0x03, 0x20, 0x9e, 0x9b, 0xbc, 0xb0, 0xf4, 0x7b, 0x62, 0xb5, 0x5d, 0xbf, 0x69, 0x5d, 0x8d, + 0xbb, 0x60, 0x96, 0xc1, 0x5e, 0x60, 0xd4, 0x26, 0xda, 0x70, 0xc6, 0xb1, 0xd5, 0xa5, 0x1c, 0x15, + 0x08, 0x63, 0x3e, 0xa2, 0xf0, 0x5e, 0x87, 0x80, 0x77, 0xad, 0xeb, 0x3c, 0xe1, 0x6d, 0x2c, 0x24, + 0xbb, 0x18, 0x67, 0x21, 0x0e, 0x56, 0x0b, 0xe9, 0xb2, 0x3c, 0x67, 0xb8, 0x23, 0x54, 0x9c, 0x4d, + 0xf2, 0xbc, 0x29, 0x27, 0x70, 0x2f, 0x7b, 0x53, 0xe7, 0x6a, 0xfa, 0xac, 0x03, 0x65, 0x4f, 0xa1, + 0x04, 0xb6, 0x35, 0x5c, 0x99, 0x74, 0xfc, 0x82, 0xc9, 0xc3, 0x2d, 0x2b, 0xd3, 0xb1, 0x13, 0xa1, + 0x13, 0x8e, 0x58, 0xf5, 0x52, 0xec, 0x82, 0x8d, 0xe7, 0xb9, 0x1e, 0xf0, 0x8f, 0xcb, 0x5e, 0xff, + 0x4b, 0xd2, 0x59, 0x93, 0xa1, 0xcf, 0xdd, 0x49, 0x67, 0xd3, 0x79, 0x0d, 0xaf, 0xd0, 0x32, 0x01, + 0xf2, 0x86, 0xe0, 0xda, 0x78, 0xde, 0x07, 0x5b, 0xbb, 0x97, 0x50, 0xe2, 0xe8, 0x0d, 0x03, 0x38, + 0x5e, 0xf4, 0x65, 0xc5, 0xfb, 0x08, 0xa2, 0x3c, 0xd9, 0xf7, 0x41, 0x58, 0xb5, 0xe6, 0xf1, 0xad, + 0x80, 0xc7, 0xd8, 0x31, 0x6c, 0xa3, 0x20, 0xfa, 0x54, 0x59, 0x46, 0x16, 0xff, 0x16, 0x35, 0xbc, + 0x16, 0x46, 0xc7, 0x48, 0x6b, 0x0a, 0x0f, 0xdc, 0xaf, 0x59, 0x03, 0x33, 0x0e, 0x41, 0x0f, 0xb2, + 0xf4, 0xd9, 0x78, 0x88, 0xed, 0x04, 0x62, 0xac, 0x83, 0xdd, 0x83, 0xc5, 0x89, 0x22, 0x37, 0xf8, + 0xeb, 0xeb, 0x1d, 0x98, 0x27, 0xda, 0x5a, 0x50, 0xcc, 0x56, 0x66, 0xeb, 0x13, 0xd2, 0xa1, 0xa1, + 0x49, 0x50, 0xf2, 0x5d, 0xe9, 0x09, 0x8a, 0xdf, 0x28, 0x7b, 0xc5, 0x51, 0x6d, 0x21, 0x34, 0x82, + 0xd8, 0xf5, 0x4d, 0x37, 0x6a, 0xd1, 0x17, 0x2a, 0xcc, 0x9a, 0x5c, 0xc7, 0xbc, 0x97, 0x28, 0x55, + 0x73, 0xa9, 0xb4, 0x67, 0xf9, 0x87, 0x3d, 0xb6, 0xeb, 0x58, 0x9c, 0x77, 0x97, 0x30, 0xf5, 0x17, + 0xa4, 0x4f, 0xf3, 0x79, 0xfd, 0xd7, 0x67, 0xa8, 0x3b, 0x58, 0x13, 0x01, 0xcc, 0x77, 0xf4, 0xe2, + 0x9e, 0x63, 0x72, 0x56, 0x41, 0x17, 0x17, 0x34, 0xc9, 0x3e, 0x51, 0x3d, 0x6d, 0x49, 0x82, 0xf9, + 0x17, 0x3c, 0x51, 0x6d, 0x29, 0x49, 0xb3, 0x4a, 0x6a, 0x56, 0x16, 0xf0, 0x8a, 0xa8, 0x09, 0xdc, + 0x06, 0xc4, 0xd7, 0xa8, 0x9e, 0xa4, 0xea, 0x45, 0x20, 0xdd, 0x13, 0x8b, 0xe0, 0x79, 0xb9, 0x89, + 0x6e, 0x06, 0x11, 0xba, 0x60, 0x8e, 0xf2, 0x07, 0xfb, 0x01, 0x62, 0xa1, 0xf8, 0x5c, 0x1e, 0x61, + 0x19, 0x67, 0x09, 0x13, 0x3a, 0x98, 0x3f, 0x87, 0x34, 0x0d, 0xf9, 0x96, 0xeb, 0x92, 0x11, 0x6c, + 0x21, 0x99, 0x4c, 0x11, 0x3d, 0x55, 0xed, 0x93, 0x14, 0xac, 0xad, 0x98, 0x56, 0x5f, 0x5e, 0x85, + 0xe5, 0x81, 0x02, 0xe4, 0xe8, 0x6d, 0x59, 0xe5, 0xde, 0x81, 0x17, 0x2d, 0xb6, 0xfd, 0xc1, 0xbc, + 0x2f, 0xdb, 0x29, 0x96, 0x7e, 0xf2, 0x1b, 0x3a, 0x63, 0xe3, 0x64, 0x75, 0x62, 0x4a, 0x9f, 0xcc, + 0xb7, 0xb0, 0xa0, 0x6d, 0x93, 0xe7, 0x59, 0x12, 0xe5, 0x1c, 0x74, 0x47, 0x1d, 0xc2, 0x06, 0x91, + 0x32, 0xcd, 0xf1, 0x8e, 0x57, 0x49, 0x99, 0x3e, 0xfd, 0x23, 0x2c, 0x05, 0xe3, 0x3d, 0xc1, 0x41, + 0x67, 0xfb, 0xb9, 0x5b, 0xc0, 0xe7, 0x73, 0xcb, 0x0c, 0xda, 0x07, 0x11, 0x3a, 0x5d, 0x94, 0xf1, + 0xbe, 0xa3, 0x49, 0x7c, 0xa5, 0x0c, 0x16, 0x5e, 0x1c, 0x86, 0xd7, 0xf8, 0x7a, 0x81, 0xe9, 0x76, + 0x5e, 0x22, 0xfc, 0xa7, 0xce, 0x43, 0x14, 0x70, 0xb4, 0x52, 0x43, 0xfd, 0x51, 0xb5, 0xae, 0x46, + 0x17, 0xb8, 0x3b, 0x47, 0x22, 0xa5, 0x1e, 0xe9, 0xc7, 0x55, 0xad, 0xb1, 0xfc, 0x07, 0xad, 0xce, + 0x4c, 0x66, 0x2f, 0x6f, 0x5d, 0x3e, 0xd3, 0xac, 0xd7, 0x54, 0xde, 0x8e, 0xcd, 0xf9, 0xb7, 0x41, + 0x3e, 0x64, 0x1b, 0xce, 0xfd, 0xb7, 0x7f, 0x47, 0x68, 0x84, 0x3e, 0xbb, 0xbf, 0x39, 0xaa, 0xd4, + 0xea, 0xd5, 0xeb, 0xd6, 0x5e, 0xd8, 0x0b, 0xad, 0xdb, 0x18, 0xae, 0x22, 0x10, 0x65, 0x1f, 0x95, + 0x98, 0x8d, 0xf6, 0x97, 0x1c, 0x12, 0xe3, 0xa3, 0xe8, 0x14, 0xd4, 0xc3, 0x1c, 0x4a, 0x94, 0x67, + 0x8b, 0x52, 0x31, 0xa7, 0x3a, 0x98, 0xf5, 0xfe, 0x2d, 0xba, 0x3e, 0x77, 0xf6, 0x2b, 0x34, 0x54, + 0xb5, 0x5b, 0xd4, 0x1e, 0x0c, 0x66, 0x5a, 0xd7, 0x77, 0x47, 0x9c, 0x4e, 0x2c, 0x9f, 0x04, 0x15, + 0xfe, 0x10, 0xd6, 0xdc, 0xbc, 0x12, 0xe3, 0x39, 0x90, 0x51, 0xa7, 0x76, 0xff, 0xd8, 0xca, 0x56, + 0x97, 0xe6, 0xab, 0x85, 0x9b, 0xa5, 0x4f, 0xd0, 0x33, 0x5a, 0xcc, 0xdb, 0x27, 0xfb, 0x0d, 0x27, + 0xb8, 0x8a, 0xe2, 0x9e, 0x4a, 0x86, 0x3f, 0x48, 0xf5, 0x2d, 0x79, 0x8c, 0x9a, 0x87, 0xb4, 0xd4, + 0x56, 0xd1, 0x54, 0xaf, 0x33, 0x12, 0x12, 0xea, 0x94, 0xac, 0xcf, 0x44, 0x7f, 0xd8, 0x50, 0x35, + 0x44, 0xf3, 0x2c, 0xee, 0xdf, 0x63, 0x81, 0x32, 0x11, 0x77, 0xd5, 0x22, 0x38, 0x60, 0x22, 0xf8, + 0x5c, 0xd1, 0x71, 0x7a, 0x36, 0x4c, 0x9a, 0x17, 0x21, 0x94, 0x1d, 0xf1, 0x11, 0x9e, 0x93, 0x3c, + 0xe3, 0xbb, 0xee, 0xae, 0xbd, 0x08, 0xc8, 0xf7, 0x51, 0x95, 0xc2, 0xc3, 0xc2, 0xc5, 0x43, 0x2e, + 0xe9, 0x4d, 0xb3, 0xe4, 0x93, 0x27, 0xf3, 0x06, 0xd7, 0x99, 0x2a, 0xeb, 0x81, 0x69, 0x45, 0x47, + 0x53, 0xd9, 0x5a, 0xb8, 0x1f, 0x86, 0xc2, 0xe8, 0x0c, 0x96, 0x95, 0x9e, 0x44, 0xc9, 0x62, 0x0a, + 0xde, 0x07, 0xae, 0x5d, 0xbf, 0xbd, 0x74, 0xe3, 0xd0, 0xa8, 0x06, 0xa6, 0xac, 0x83, 0xf1, 0x61, + 0xd9, 0x84, 0xab, 0x0b, 0x1f, 0x56, 0x3c, 0x3f, 0x3d, 0x6e, 0x09, 0x2a, 0xa0, 0xed, 0xe1, 0x57, + 0x80, 0x8c, 0x0e, 0xc8, 0xe6, 0xe5, 0x33, 0x48, 0xcc, 0xed, 0x98, 0x56, 0xb4, 0xd0, 0x08, 0x57, + 0x23, 0x3b, 0xbe, 0x32, 0x16, 0x38, 0x57, 0x32, 0xbb, 0x53, 0xef, 0x4c, 0x05, 0xe7, 0x70, 0xda, + 0xee, 0x81, 0x67, 0xa1, 0xfb, 0x24, 0xde, 0xd1, 0xf3, 0x27, 0x97, 0x91, 0x98, 0xc7, 0x47, 0x01, + 0x11, 0x77, 0x88, 0x46, 0x82, 0xc2, 0x8e, 0xe4, 0xf6, 0x58, 0x56, 0x05, 0x86, 0x87, 0xfc, 0x47, + 0x77, 0x81, 0x25, 0xe5, 0x9a, 0x77, 0xf8, 0x8b, 0xb8, 0x9b, 0x1d, 0xdd, 0x3c, 0xf7, 0x19, 0x3a, + 0x3f, 0x6b, 0xff, 0xd3, 0xbf, 0x5a, 0xff, 0xe4, 0xb5, 0x37, 0x03, 0xd3, 0x2c, 0x4e, 0xb0, 0xa4, + 0x7a, 0x1a, 0xdc, 0xed, 0x79, 0xb6, 0x7a, 0xaa, 0x59, 0xbc, 0xfa, 0xf8, 0xe7, 0x69, 0x53, 0xdf, + 0xcd, 0x98, 0x51, 0x63, 0xc1, 0xf4, 0xf1, 0xcc, 0xee, 0x13, 0xde, 0x22, 0xbe, 0x65, 0x47, 0x78, + 0xf5, 0x85, 0x10, 0xe1, 0x0a, 0x90, 0x68, 0x06, 0x2e, 0x18, 0x2a, 0x8d, 0x78, 0x2d, 0x56, 0x4c, + 0x7f, 0xda, 0x22, 0x74, 0xe1, 0x4c, 0x41, 0x44, 0x38, 0x68, 0x8b, 0xf7, 0xe8, 0x50, 0x09, 0x8d, + 0xaf, 0xea, 0x50, 0xbb, 0x60, 0x67, 0x2b, 0x39, 0x0c, 0x59, 0xae, 0xaf, 0xf8, 0xb8, 0x61, 0x8f, + 0xfa, 0x72, 0x88, 0xcf, 0x93, 0x97, 0x5b, 0xdc, 0x93, 0xc8, 0x8c, 0x10, 0x94, 0x1f, 0x5a, 0x75, + 0x02, 0xe6, 0x64, 0x0d, 0x67, 0xe6, 0xa1, 0x6a, 0x85, 0x3f, 0xca, 0x27, 0xe6, 0xd6, 0x09, 0xf4, + 0xb0, 0xea, 0x6b, 0x2e, 0xa6, 0x86, 0xb1, 0x0e, 0xa4, 0x61, 0x0e, 0x5f, 0x24, 0x2a, 0xb6, 0x9c, + 0x2e, 0x87, 0x38, 0x7c, 0x08, 0xb9, 0x7f, 0x6e, 0x20, 0xd3, 0x17, 0x0b, 0x17, 0x25, 0x76, 0x24, + 0xec, 0xdd, 0xe2, 0xf6, 0x0b, 0x94, 0xc7, 0x10, 0x6a, 0x07, 0x5e, 0x52, 0x3c, 0x98, 0x30, 0x8a, + 0x83, 0xd0, 0xb0, 0x7e, 0x57, 0x3e, 0xe4, 0x54, 0x65, 0x4f, 0x19, 0x1f, 0x39, 0x72, 0xb2, 0x32, + 0xcf, 0xdd, 0x6b, 0xbd, 0xef, 0x38, 0xbc, 0x39, 0xde, 0x01, 0x35, 0x51, 0xfe, 0x4e, 0x69, 0x3c, + 0x0c, 0x45, 0x54, 0x6c, 0x45, 0x44, 0x36, 0xe6, 0x91, 0x7c, 0xa1, 0x6b, 0x3d, 0x37, 0xbc, 0xee, + 0xff, 0xfa, 0x37, 0x17, 0xae, 0xc7, 0x48, 0x7b, 0x8e, 0x1f, 0x6d, 0x3a, 0x84, 0x29, 0xa4, 0x7c, + 0x79, 0x96, 0xe0, 0xcd, 0x52, 0x6e, 0x80, 0x35, 0xf4, 0x3c, 0xee, 0xb8, 0xa5, 0x40, 0x6e, 0x13, + 0x1d, 0xcd, 0x29, 0xf4, 0xf4, 0xf4, 0xe4, 0x3c, 0x10, 0x71, 0x8c, 0x1e, 0xaf, 0xc2, 0xa0, 0xae, + 0x4e, 0x02, 0x02, 0xcb, 0x64, 0x6b, 0x36, 0x45, 0x96, 0xbf, 0x19, 0xcd, 0x9a, 0x23, 0x41, 0x06, + 0xea, 0x19, 0x46, 0xd7, 0xbd, 0x19, 0x95, 0x7c, 0xca, 0xa3, 0x96, 0x96, 0x48, 0xc8, 0xf0, 0x94, + 0xae, 0x92, 0xa2, 0x20, 0x9b, 0x2a, 0x68, 0x46, 0xbc, 0xed, 0xc8, 0x65, 0x0e, 0x22, 0x79, 0xab, + 0xa0, 0xb0, 0xb3, 0x27, 0x40, 0x69, 0xf5, 0x82, 0xd0, 0xd6, 0x85, 0x81, 0x2a, 0x67, 0xb0, 0xee, + 0x9b, 0x7f, 0xfc, 0xa3, 0x1c, 0x6a, 0x60, 0x27, 0x17, 0xc1, 0x2c, 0x7e, 0x33, 0x1e, 0x95, 0x8d, + 0x36, 0x2b, 0x9d, 0x5b, 0xeb, 0xfb, 0xe2, 0x5e, 0x0c, 0xa5, 0x44, 0x34, 0xec, 0xf5, 0xbb, 0x6f, + 0x84, 0x25, 0xfa, 0x66, 0x43, 0x70, 0xcc, 0x41, 0x63, 0x5a, 0xe5, 0xbf, 0x57, 0x51, 0x78, 0xb3, + 0x90, 0x43, 0xcd, 0x45, 0xab, 0xa9, 0xae, 0x61, 0x2a, 0x23, 0xec, 0x40, 0xdb, 0x91, 0xd0, 0xed, + 0x67, 0xb0, 0x76, 0x8a, 0x81, 0xda, 0x53, 0x5c, 0xd4, 0x3b, 0xcf, 0x9e, 0xd5, 0x28, 0xe7, 0xf6, + 0x24, 0x22, 0x0f, 0xba, 0xd6, 0x42, 0xe2, 0xc0, 0x8e, 0xa4, 0xb7, 0x65, 0x16, 0x62, 0xf0, 0x26, + 0x57, 0x14, 0x61, 0xa4, 0x14, 0x3f, 0xad, 0xca, 0xa0, 0x90, 0xbf, 0xa0, 0xe3, 0xb5, 0xdf, 0xdf, + 0x6a, 0x8a, 0x4e, 0xe7, 0x5c, 0x3d, 0x35, 0xbd, 0x2f, 0xdc, 0x3b, 0xe1, 0xe1, 0x86, 0x6c, 0xbc, + 0xb6, 0x63, 0x29, 0xf6, 0xc2, 0x81, 0x22, 0x13, 0xdd, 0xbb, 0x8c, 0xb1, 0x3f, 0x79, 0x94, 0xb0, + 0x16, 0xeb, 0x12, 0xd0, 0x3a, 0x7b, 0x81, 0xfb, 0xe2, 0xc7, 0xbd, 0xea, 0xf2, 0x7e, 0x87, 0xe1, + 0xbc, 0x66, 0x72, 0xaa, 0xc9, 0x29, 0x21, 0xb7, 0x12, 0x71, 0x6c, 0x34, 0x33, 0x84, 0xff, 0xec, + 0x79, 0x0d, 0x05, 0xe5, 0x72, 0x0b, 0x94, 0xc3, 0x6a, 0x78, 0x0c, 0x66, 0x78, 0x67, 0xa1, 0x36, + 0x20, 0xca, 0xf0, 0x7a, 0x3f, 0x48, 0x0a, 0x89, 0x6d, 0x0a, 0xc1, 0x85, 0x56, 0x3a, 0x6d, 0x96, + 0x6d, 0xb3, 0xae, 0xf2, 0xfb, 0x74, 0x3c, 0x47, 0x62, 0xa2, 0x9b, 0xe7, 0xf3, 0x81, 0x63, 0x6b, + 0x7c, 0x8b, 0xb8, 0x88, 0x7f, 0x0d, 0x91, 0xc3, 0xa6, 0x25, 0x4d, 0xf5, 0x9b, 0x1c, 0x85, 0x16, + 0x33, 0x03, 0x4d, 0x13, 0xd8, 0xef, 0xf3, 0x39, 0x83, 0x52, 0x88, 0xb5, 0x62, 0xd0, 0xb3, 0x28, + 0x46, 0x8b, 0x7a, 0x04, 0x32, 0xb7, 0xda, 0xe5, 0x28, 0x0f, 0xb6, 0x13, 0x46, 0x32, 0x4e, 0x02, + 0xb5, 0xce, 0x70, 0x07, 0x3f, 0x17, 0x5d, 0xab, 0xa1, 0x08, 0xc9, 0xd4, 0x2c, 0xca, 0x5f, 0x7d, + 0xf4, 0x5f, 0x8a, 0xf7, 0x2e, 0xe7, 0xfb, 0xf0, 0x2e, 0xf4, 0xb1, 0x8c, 0x48, 0x41, 0xbe, 0xd6, + 0x0d, 0x95, 0xb7, 0x9e, 0x3a, 0xa0, 0xc5, 0xd2, 0x11, 0xc2, 0x8b, 0x2b, 0x55, 0x99, 0x1a, 0x17, + 0xa2, 0xc0, 0x14, 0x36, 0x19, 0xce, 0x7b, 0xaf, 0x4b, 0x9c, 0xa3, 0x60, 0x21, 0x20, 0x72, 0xe4, + 0x31, 0x0d, 0xb3, 0xe2, 0x18, 0xb2, 0x46, 0xf7, 0x58, 0x79, 0x95, 0xff, 0x6c, 0x4b, 0x49, 0xb7, + 0xe0, 0x7e, 0xc2, 0x9a, 0x21, 0xe0, 0x04, 0x17, 0xb3, 0x4c, 0x0f, 0x53, 0xb4, 0xa8, 0x9a, 0x21, + 0xa9, 0x80, 0x30, 0xb6, 0x1d, 0x9a, 0x0d, 0x88, 0xa8, 0x3f, 0x2a, 0xd3, 0xa3, 0xe2, 0x99, 0xc7, + 0xc8, 0xa2, 0x03, 0x71, 0x3d, 0x93, 0x8d, 0xd5, 0x8c, 0x50, 0x22, 0x54, 0x7b, 0x4a, 0x0a, 0x3c, + 0x0b, 0xcb, 0x46, 0x6a, 0x25, 0xbd, 0x62, 0x21, 0x36, 0x3c, 0x8d, 0xa3, 0xde, 0xeb, 0x6c, 0xc9, + 0xa4, 0x44, 0xf1, 0x27, 0x63, 0x17, 0x44, 0x12, 0xb4, 0xef, 0x1d, 0xe3, 0x3a, 0xec, 0x3c, 0x51, + 0xa6, 0x98, 0x22, 0xa6, 0x5b, 0xea, 0x50, 0xf0, 0x98, 0xbc, 0xd9, 0xa8, 0x3c, 0xfb, 0x6a, 0x71, + 0xb1, 0xf1, 0x4f, 0x23, 0xd6, 0x20, 0x3c, 0xc1, 0x8b, 0x0f, 0xbf, 0x77, 0x56, 0x86, 0xb9, 0x14, + 0xdc, 0x79, 0x6c, 0x77, 0x23, 0x3f, 0x6b, 0x6a, 0x64, 0xcb, 0xa8, 0x2b, 0x01, 0x8e, 0x46, 0x9b, + 0xac, 0x5e, 0x97, 0xe6, 0xf8, 0x82, 0x4d, 0xb6, 0x28, 0x5b, 0x05, 0xc2, 0x16, 0x8a, 0xb9, 0xeb, + 0x37, 0xfd, 0xbc, 0xb2, 0x44, 0x21, 0x2e, 0x6c, 0xdb, 0xca, 0x2b, 0xd2, 0x84, 0xc3, 0x1a, 0xdc, + 0x38, 0xf5, 0xed, 0x2a, 0x22, 0x27, 0xfa, 0x13, 0x37, 0x74, 0xed, 0x90, 0x43, 0xda, 0x4b, 0xe4, + 0xe8, 0xb9, 0x44, 0xca, 0x48, 0x59, 0xb8, 0x6f, 0x1f, 0xde, 0xb1, 0x47, 0x98, 0xba, 0x03, 0xf8, + 0x50, 0x50, 0x44, 0xba, 0x7f, 0xc2, 0x60, 0xde, 0xab, 0xb0, 0x45, 0x9e, 0x17, 0xd3, 0x83, 0xa9, + 0xdc, 0x33, 0x94, 0x6b, 0xc5, 0x8e, 0xa6, 0xa7, 0x89, 0x01, 0xf5, 0xf2, 0xf8, 0x5e, 0x3c, 0x5a, + 0x92, 0xb8, 0x4e, 0x10, 0x82, 0x6a, 0xe3, 0x95, 0x04, 0xab, 0xd7, 0x51, 0x9a, 0x43, 0x0f, 0x77, + 0x58, 0x52, 0xe8, 0xfc, 0x24, 0xae, 0x11, 0xa7, 0x77, 0xca, 0x18, 0x6d, 0xb6, 0x7f, 0x09, 0xe0, + 0x0c, 0xb1, 0x4c, 0x8c, 0x3d, 0xed, 0x61, 0xae, 0xe8, 0x05, 0x6d, 0xa1, 0xf7, 0xf8, 0x32, 0xbc, + 0x28, 0x66, 0x72, 0x0b, 0x64, 0xda, 0x4b, 0x22, 0x59, 0xea, 0x60, 0xc3, 0x29, 0xa6, 0xc4, 0xa4, + 0x3a, 0x8e, 0xd7, 0xe9, 0xf9, 0x7f, 0x9d, 0xff, 0xdf, 0xbe, 0x45, 0xff, 0xe5, 0xbf, 0xfc, 0x97, + 0xff, 0xf2, 0x5f, 0xfe, 0x7f, 0xe1, 0x7f, 0x00, 0x46, 0x9f, 0x82, 0x51, 0x00, 0x21, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU116_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 8448, // uncompressed data size (bytes) + 6800, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU116_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU116("header_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/unload/g_booteruc_unload_tu11x_dbg.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU116_header_dbg_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x06, 0x62, 0x36, 0x08, 0x13, 0x4c, 0x48, 0x41, 0x69, + 0x20, 0x00, 0x00, 0x37, 0x0f, 0x4b, 0x90, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU116_header_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU116_header_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU116("image_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/unload/g_booteruc_unload_tu11x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_data_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 8448 +// COMPRESSED SIZE (bytes): 6797 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU116_image_prod_data[] = +{ + 0xed, 0x99, 0x45, 0x54, 0x1c, 0xd0, 0xb2, 0xae, 0xbb, 0x1b, 0x1a, 0x82, 0xbb, 0x43, 0x70, 0x77, + 0x77, 0x97, 0xe0, 0x6e, 0xc1, 0x1d, 0x1a, 0x77, 0x08, 0xc1, 0xdd, 0xb5, 0x81, 0xc6, 0xdd, 0x82, + 0x05, 0x08, 0x1e, 0xdc, 0x83, 0x43, 0xf0, 0xe0, 0x16, 0x82, 0x04, 0x77, 0xb9, 0xb9, 0xd3, 0x33, + 0xbb, 0x83, 0x37, 0x79, 0xeb, 0x7c, 0x93, 0xbf, 0xfe, 0xc9, 0xae, 0x1a, 0xd4, 0xde, 0xab, 0xf6, + 0xaa, 0x18, 0x00, 0x00, 0xf8, 0x0a, 0x05, 0x04, 0x62, 0x00, 0x00, 0xf7, 0xa0, 0x7b, 0xc0, 0x03, + 0x28, 0x01, 0x00, 0x02, 0xf4, 0x6c, 0x6c, 0xbe, 0xbd, 0xbd, 0x61, 0xc4, 0x00, 0x80, 0x80, 0xb7, + 0x02, 0xf8, 0xa6, 0x3d, 0x00, 0x37, 0x6c, 0x05, 0xd8, 0x04, 0x05, 0xb0, 0xc0, 0x56, 0x40, 0xff, + 0x84, 0x16, 0xb6, 0x02, 0xf7, 0x4f, 0xde, 0xc7, 0x00, 0x00, 0x00, 0x58, 0x01, 0x5c, 0xf3, 0x26, + 0x5c, 0x4f, 0x41, 0xe1, 0x26, 0xac, 0x00, 0x08, 0xcb, 0x02, 0x35, 0x6f, 0x02, 0x9b, 0x57, 0x40, + 0x81, 0x2d, 0x00, 0x00, 0x06, 0xa7, 0x02, 0x00, 0xf3, 0xae, 0x00, 0x80, 0x01, 0x8c, 0x01, 0xd8, + 0xc2, 0xff, 0x6f, 0x80, 0xd6, 0x96, 0x05, 0x00, 0x82, 0xfe, 0x45, 0xd7, 0x22, 0xa0, 0x24, 0x00, + 0x00, 0x21, 0xe6, 0x5f, 0xaa, 0x97, 0x02, 0x70, 0xd7, 0xf5, 0xe3, 0x03, 0xc2, 0x3d, 0xdc, 0xbf, + 0xd3, 0x40, 0xe9, 0x18, 0xef, 0x36, 0xfe, 0x37, 0xf3, 0xcb, 0x03, 0x38, 0x06, 0x40, 0x02, 0x78, + 0xc9, 0x87, 0xef, 0x4c, 0x06, 0xb5, 0x40, 0x31, 0xff, 0xf9, 0xd7, 0x47, 0x40, 0xe0, 0xbf, 0x7a, + 0xee, 0x41, 0x5d, 0xa5, 0x4f, 0xc0, 0xa4, 0x29, 0x24, 0x8c, 0x18, 0x0a, 0x30, 0xa0, 0xb0, 0xe0, + 0x1e, 0x90, 0xf4, 0x4f, 0x7b, 0xde, 0xae, 0x39, 0x9e, 0x5f, 0x15, 0x80, 0x85, 0x05, 0x62, 0x33, + 0x80, 0xff, 0x13, 0x8a, 0x3d, 0x5c, 0x82, 0x48, 0x4b, 0x4c, 0x5b, 0xfc, 0x3c, 0x5a, 0xac, 0xce, + 0xc2, 0xc1, 0x23, 0x59, 0xea, 0x8a, 0x55, 0x60, 0x05, 0xb5, 0xfe, 0xec, 0x52, 0xf1, 0xf7, 0xa6, + 0x83, 0xb7, 0xae, 0x4a, 0x9d, 0xf9, 0xd8, 0x9d, 0x2b, 0x87, 0xed, 0xde, 0x42, 0x27, 0x05, 0x52, + 0x35, 0x13, 0xf9, 0x7d, 0xbe, 0xd7, 0x24, 0x54, 0xb7, 0x27, 0x7e, 0xeb, 0x46, 0xe9, 0xb5, 0xf2, + 0x51, 0xdb, 0xe8, 0xa1, 0x2d, 0xc8, 0xee, 0xc9, 0x97, 0xa8, 0xab, 0x40, 0x5e, 0x5c, 0x7e, 0x0b, + 0x52, 0x56, 0x98, 0x26, 0x0d, 0xc3, 0x59, 0xef, 0x34, 0x2a, 0x29, 0xdf, 0x1c, 0xc3, 0xf2, 0x85, + 0x06, 0xcb, 0xe3, 0x57, 0xcb, 0xcd, 0xed, 0xd8, 0xb0, 0x70, 0x65, 0xa1, 0x75, 0x6b, 0x19, 0xae, + 0xbe, 0xab, 0x88, 0x29, 0x33, 0x24, 0x5b, 0x6f, 0x77, 0x4e, 0xef, 0x89, 0x8b, 0x84, 0xdd, 0x4e, + 0xcb, 0xe2, 0xbc, 0x58, 0xa2, 0x97, 0x28, 0x5b, 0x38, 0xc9, 0x87, 0xbb, 0x6f, 0x99, 0x5c, 0xea, + 0xeb, 0x4d, 0x1e, 0xee, 0xe3, 0xf0, 0xb6, 0x4f, 0x89, 0xb9, 0x3f, 0xa4, 0x2e, 0x27, 0x93, 0x38, + 0x2b, 0x07, 0x04, 0xc4, 0xd1, 0x59, 0x94, 0x46, 0x63, 0xea, 0xc2, 0x93, 0x70, 0x07, 0x1b, 0xf4, + 0x49, 0x6d, 0x88, 0xf4, 0x30, 0xa8, 0x1a, 0x28, 0xd5, 0x00, 0x63, 0x3c, 0x03, 0x55, 0xcc, 0x09, + 0x0b, 0x57, 0xfd, 0xa1, 0x16, 0x81, 0x37, 0x33, 0x3e, 0x0a, 0x6e, 0x49, 0x93, 0xfc, 0x65, 0x6d, + 0x7f, 0xe8, 0x7c, 0x05, 0x58, 0x19, 0x6c, 0xcc, 0x74, 0xce, 0x2a, 0x91, 0x20, 0x68, 0x96, 0x9b, + 0x43, 0xf3, 0xe5, 0x68, 0x41, 0xdd, 0x97, 0xfb, 0x4f, 0xbe, 0x2b, 0x57, 0x49, 0xe2, 0xd4, 0xa5, + 0x96, 0x57, 0xa5, 0x7c, 0x6f, 0xf9, 0xfb, 0xed, 0xb9, 0xc4, 0x19, 0xdb, 0x28, 0x8e, 0x83, 0x7e, + 0x20, 0x21, 0x73, 0xbf, 0x7e, 0xea, 0x8c, 0x4e, 0x9e, 0xe0, 0xb6, 0xe1, 0xfb, 0x5f, 0xee, 0x20, + 0x08, 0x2e, 0x3b, 0x5e, 0xec, 0x84, 0x7e, 0x97, 0xcd, 0x32, 0x1c, 0xfd, 0x29, 0x5f, 0x63, 0x32, + 0xfe, 0x52, 0xf1, 0x36, 0x45, 0x0f, 0x09, 0xfb, 0xd0, 0xf3, 0x12, 0x5c, 0xf5, 0xbb, 0xea, 0xa8, + 0x80, 0xee, 0x05, 0x19, 0x35, 0x2f, 0xda, 0xc4, 0xc3, 0x42, 0x2d, 0xaf, 0x27, 0x4a, 0xfa, 0x55, + 0xa8, 0xee, 0x66, 0xc1, 0x09, 0x4a, 0x6c, 0xb1, 0x30, 0xb7, 0xd0, 0xd0, 0xdd, 0xc7, 0x3e, 0xfe, + 0xaf, 0x4a, 0xb6, 0x63, 0xbf, 0xa9, 0xbd, 0x38, 0x93, 0x18, 0xdb, 0x0b, 0xfd, 0x1e, 0xd1, 0x31, + 0xc7, 0xee, 0xcd, 0xa7, 0xbd, 0x75, 0xb6, 0x32, 0xf7, 0x1b, 0xf2, 0x3e, 0xbc, 0x71, 0xf7, 0x09, + 0x5a, 0x73, 0xcc, 0xb5, 0x30, 0x31, 0x77, 0x57, 0x8b, 0x75, 0x6e, 0x60, 0xb1, 0x48, 0x8e, 0x7e, + 0xba, 0xe9, 0x70, 0xa0, 0x85, 0x50, 0x39, 0x82, 0x8d, 0x16, 0x76, 0xc6, 0x57, 0xfd, 0x07, 0x79, + 0x54, 0x08, 0xd1, 0x33, 0xba, 0xa3, 0x05, 0x66, 0x20, 0x83, 0x09, 0xc9, 0x70, 0xce, 0xfe, 0x42, + 0xe5, 0x3c, 0x68, 0xac, 0xf7, 0x8b, 0x6f, 0x19, 0x55, 0x7a, 0x6b, 0xd3, 0xc4, 0x42, 0xe7, 0xb8, + 0xa3, 0xc1, 0x48, 0xaa, 0x21, 0x8c, 0xdd, 0x3e, 0x39, 0x9c, 0xd8, 0x14, 0xd7, 0xbe, 0x74, 0xfd, + 0x03, 0xf3, 0x45, 0x6e, 0x1d, 0xa9, 0x3a, 0xe5, 0xe6, 0x3e, 0x6a, 0x68, 0xd4, 0x2f, 0x86, 0x8d, + 0xe1, 0x04, 0x5d, 0xd7, 0xe9, 0x9f, 0x3e, 0x4a, 0x7c, 0x50, 0xdf, 0x5d, 0xaa, 0x16, 0xd6, 0xf3, + 0x29, 0x64, 0x5c, 0xa9, 0xec, 0xdf, 0x8a, 0x98, 0x39, 0x03, 0x65, 0x7e, 0xeb, 0x63, 0x1a, 0x32, + 0x27, 0xcb, 0xb1, 0x89, 0x86, 0xc3, 0x33, 0xa3, 0xe5, 0xa8, 0xa2, 0x01, 0x91, 0x40, 0x69, 0x9b, + 0xdf, 0xfa, 0x3f, 0x9d, 0x8b, 0x4a, 0x44, 0xe5, 0x68, 0xb8, 0x61, 0xb2, 0x9d, 0x2d, 0x47, 0x50, + 0x1b, 0xfe, 0x60, 0x26, 0x6c, 0xb6, 0x64, 0xc7, 0x9a, 0xae, 0x26, 0xce, 0x80, 0xed, 0x05, 0xc8, + 0xd6, 0x0a, 0x96, 0x36, 0x39, 0xb9, 0xa2, 0x71, 0xd0, 0xba, 0x92, 0xd8, 0xff, 0x45, 0x74, 0xe3, + 0x11, 0x08, 0x85, 0x4f, 0x68, 0x6c, 0xed, 0x0e, 0xe4, 0x75, 0x88, 0x88, 0xf8, 0x5d, 0xaf, 0x91, + 0xf2, 0x77, 0xb2, 0xd1, 0x50, 0x41, 0x35, 0x95, 0x37, 0x55, 0x6e, 0xd4, 0x5f, 0xfa, 0x28, 0x70, + 0xf1, 0xe0, 0xab, 0xb3, 0xd8, 0x27, 0xd2, 0x94, 0x27, 0xc3, 0xde, 0xa1, 0x09, 0x1f, 0x4c, 0x9b, + 0x63, 0x9b, 0xce, 0x36, 0xd4, 0xa2, 0x10, 0x9d, 0x05, 0x35, 0xa7, 0xbe, 0x3a, 0xe1, 0x2c, 0x19, + 0x64, 0x45, 0xa8, 0x88, 0xe5, 0x66, 0xb6, 0x33, 0xa4, 0x92, 0xff, 0x84, 0x4f, 0xfc, 0xbe, 0xf0, + 0x0c, 0x65, 0x6d, 0x7a, 0x29, 0xa5, 0xd4, 0x49, 0x2e, 0x92, 0xcc, 0xc6, 0x40, 0x47, 0xc4, 0xa2, + 0xc1, 0x3b, 0x99, 0xa5, 0x51, 0x7b, 0x32, 0x2c, 0xdb, 0x78, 0x26, 0x8a, 0x1e, 0xce, 0x37, 0x26, + 0x6c, 0xa6, 0xa3, 0x1a, 0xae, 0x9e, 0xd8, 0xc7, 0x6d, 0xbd, 0x17, 0x8e, 0x52, 0xde, 0x3f, 0x76, + 0xca, 0xe6, 0x3a, 0x9b, 0x59, 0xc4, 0x8e, 0x26, 0x3b, 0xb2, 0xf2, 0xef, 0x26, 0x97, 0x31, 0x1d, + 0xa6, 0x43, 0x4a, 0x8a, 0xbe, 0x1d, 0xad, 0xa4, 0xba, 0x04, 0x51, 0x19, 0xc6, 0xa1, 0x5c, 0xd5, + 0x29, 0xb2, 0x16, 0x28, 0x6f, 0xac, 0x70, 0x82, 0x6d, 0xdd, 0x7b, 0xd4, 0x38, 0x56, 0x34, 0x35, + 0xc5, 0x47, 0x2a, 0xff, 0xd0, 0x42, 0xf5, 0x27, 0xfa, 0xcf, 0xaf, 0x1b, 0x1f, 0x2e, 0xcb, 0xde, + 0x8d, 0xe7, 0x30, 0x22, 0x7c, 0x29, 0xc1, 0x23, 0xa5, 0xde, 0x2d, 0x3a, 0x7c, 0x89, 0x3f, 0x6e, + 0xda, 0xc7, 0xec, 0x8e, 0x80, 0x63, 0xf3, 0xe9, 0xcb, 0x5f, 0x69, 0xec, 0xa9, 0xa9, 0xc9, 0xcf, + 0x95, 0x50, 0x58, 0x95, 0xfd, 0x9e, 0x16, 0xdc, 0x6e, 0x64, 0x8b, 0x8b, 0x74, 0x30, 0xfd, 0x86, + 0x69, 0xce, 0x4f, 0x93, 0x5e, 0xe6, 0xe6, 0x11, 0xd9, 0x50, 0x76, 0x75, 0x3b, 0xe2, 0xb8, 0xfa, + 0x17, 0xe1, 0x2c, 0x35, 0x05, 0x9a, 0x62, 0xe1, 0x9c, 0x18, 0x37, 0xff, 0xb7, 0xcb, 0x1d, 0x7c, + 0x19, 0xe2, 0x3b, 0x5f, 0x00, 0xe2, 0x15, 0x60, 0x29, 0x12, 0x50, 0x0f, 0xa1, 0x94, 0x9f, 0xb2, + 0xb7, 0x89, 0x74, 0x13, 0x4d, 0xd7, 0xdc, 0x9c, 0xd6, 0x24, 0xec, 0x45, 0xe1, 0x5d, 0x9d, 0x73, + 0x44, 0x2c, 0x3d, 0xe1, 0x50, 0x0e, 0x06, 0xa8, 0xb9, 0x16, 0x3b, 0xb7, 0x09, 0xdc, 0x38, 0x36, + 0x5e, 0x8e, 0x9d, 0x4b, 0x01, 0x37, 0x28, 0xfb, 0x10, 0xc7, 0xbc, 0xd7, 0xba, 0xd3, 0x42, 0xff, + 0x0c, 0x43, 0xf0, 0x14, 0x9f, 0x1a, 0x9a, 0x0f, 0xa6, 0xeb, 0x84, 0x1b, 0x22, 0x60, 0x13, 0x5f, + 0x5d, 0x2b, 0xf7, 0x1f, 0xc3, 0xec, 0x19, 0x87, 0x9a, 0x16, 0xfa, 0x25, 0x0b, 0x47, 0x1a, 0xf1, + 0x26, 0x7f, 0x32, 0xf1, 0x4c, 0xcf, 0x6b, 0x2f, 0xff, 0xed, 0x6a, 0x08, 0x7c, 0x39, 0xb3, 0xbb, + 0x92, 0x69, 0x66, 0x59, 0x1f, 0x0c, 0xbc, 0x08, 0x3e, 0x0c, 0x4a, 0x1e, 0x8d, 0x1b, 0xaf, 0x1f, + 0xb2, 0x25, 0x22, 0xc4, 0x3f, 0xa7, 0x83, 0xf4, 0xd5, 0x23, 0x70, 0xc0, 0x9b, 0xec, 0xa0, 0xb3, + 0xe6, 0x05, 0x5a, 0x98, 0x42, 0xe1, 0xda, 0xa9, 0xee, 0xc6, 0x25, 0xe1, 0x47, 0x08, 0x95, 0x64, + 0x3a, 0x07, 0x45, 0x49, 0x13, 0x16, 0x38, 0x81, 0x7a, 0x2a, 0xed, 0x20, 0xde, 0x61, 0x47, 0xa3, + 0xdb, 0xe5, 0x65, 0xf4, 0x11, 0x40, 0x67, 0x2e, 0xf0, 0x6b, 0x59, 0x48, 0xb8, 0xb8, 0x9b, 0x5f, + 0xd0, 0xc1, 0xcf, 0xdb, 0xd5, 0x16, 0x6a, 0x77, 0x31, 0x42, 0xa9, 0x2c, 0x2b, 0xb2, 0xe8, 0x9f, + 0xd3, 0xe4, 0xb8, 0xcf, 0xba, 0x3f, 0x1e, 0x07, 0xc0, 0xd5, 0x86, 0x4f, 0x94, 0xdc, 0x68, 0xf4, + 0xee, 0x5a, 0x67, 0x2b, 0xbe, 0x29, 0xce, 0x45, 0x62, 0xb3, 0xc4, 0xd4, 0xc3, 0xc8, 0x53, 0xee, + 0xdb, 0xe8, 0xad, 0x41, 0x9b, 0xae, 0xc6, 0x01, 0x6f, 0x1d, 0x75, 0x31, 0x2b, 0xe1, 0xc1, 0xbb, + 0xd0, 0x47, 0x80, 0x51, 0x98, 0x44, 0xb5, 0x58, 0x1c, 0x38, 0x0f, 0x1a, 0xd4, 0x11, 0xc9, 0x0d, + 0xc7, 0x2b, 0x2f, 0x7c, 0x71, 0x32, 0x10, 0xba, 0x4c, 0x72, 0x8c, 0xd5, 0xf9, 0x7b, 0xf9, 0x1c, + 0x44, 0xbc, 0x0b, 0xed, 0x19, 0x26, 0x04, 0x1e, 0x80, 0xab, 0x8f, 0x00, 0x28, 0x0a, 0x42, 0xc8, + 0xc9, 0xca, 0x46, 0xe1, 0x65, 0x50, 0x21, 0xe2, 0xc7, 0xe6, 0xd8, 0x5b, 0x8a, 0x18, 0x4f, 0x0c, + 0x43, 0xbc, 0xab, 0x60, 0x57, 0xd3, 0x8f, 0x58, 0x8c, 0x09, 0xa4, 0xb3, 0x42, 0x9f, 0xab, 0xb9, + 0x58, 0x69, 0xc8, 0x7d, 0x65, 0xd0, 0x97, 0xf3, 0x8f, 0x14, 0x1f, 0xbb, 0x75, 0xaa, 0x53, 0x2f, + 0x76, 0x23, 0xfa, 0xc1, 0x95, 0x6b, 0xd8, 0xc7, 0x82, 0x2d, 0x76, 0x98, 0xa2, 0x34, 0x5e, 0xa5, + 0xb5, 0x63, 0x55, 0x0f, 0x61, 0xdc, 0x84, 0xbe, 0x60, 0xd1, 0x66, 0xe4, 0x12, 0xa1, 0xa0, 0x62, + 0x2d, 0x21, 0x74, 0x22, 0x31, 0xf6, 0x84, 0x75, 0x14, 0xc5, 0xb7, 0xd9, 0x7d, 0x88, 0x16, 0xff, + 0x61, 0x97, 0x81, 0xa8, 0x69, 0xab, 0x0f, 0x39, 0xb2, 0xd1, 0x0c, 0xfe, 0xbe, 0xde, 0x96, 0x58, + 0xe6, 0x49, 0x67, 0xff, 0x3e, 0xab, 0x44, 0x87, 0xd3, 0x45, 0xd7, 0x4e, 0xc0, 0x60, 0x47, 0xe2, + 0xf3, 0x57, 0x3b, 0xb3, 0x0a, 0xc3, 0xc4, 0x15, 0xb9, 0x24, 0x81, 0x5f, 0x4a, 0x10, 0xdc, 0xa7, + 0xa1, 0x2d, 0x43, 0xfa, 0x19, 0x58, 0xe0, 0xb1, 0x37, 0x47, 0x77, 0x41, 0xe9, 0xa9, 0xa6, 0x1d, + 0x7f, 0x5a, 0xeb, 0xa0, 0xb5, 0xfb, 0x07, 0xa4, 0xc4, 0xb1, 0xe1, 0x7d, 0x4c, 0xe3, 0x38, 0x06, + 0x99, 0xec, 0x84, 0xef, 0x13, 0xaf, 0x5f, 0xdf, 0x4b, 0x04, 0x45, 0x80, 0x3d, 0xc5, 0xbd, 0x6e, + 0xd7, 0x5e, 0x73, 0xd8, 0x99, 0x2b, 0x38, 0x06, 0xf3, 0x5d, 0x7b, 0x60, 0xc3, 0xaf, 0x47, 0x49, + 0xd7, 0xdc, 0x67, 0xaf, 0xc4, 0x97, 0x39, 0x4d, 0xa3, 0x17, 0x59, 0x3f, 0xb0, 0x89, 0xbd, 0xad, + 0xea, 0x50, 0x6b, 0x86, 0x78, 0xa0, 0xa2, 0x66, 0x23, 0xc5, 0x49, 0x7b, 0x3c, 0x26, 0xa4, 0xae, + 0x9f, 0xd8, 0xe2, 0xa7, 0x7b, 0x62, 0x0b, 0x60, 0x67, 0x62, 0x4d, 0x4d, 0xc9, 0xdc, 0xde, 0x41, + 0x4e, 0xb4, 0x54, 0x07, 0xf1, 0xbe, 0x16, 0x79, 0x7b, 0x56, 0x1d, 0x90, 0xec, 0xd1, 0x05, 0xab, + 0xdb, 0x6f, 0x5b, 0x75, 0x9a, 0xf9, 0x85, 0xf3, 0x9f, 0x73, 0x8b, 0x22, 0xd9, 0x7f, 0x90, 0x61, + 0xe5, 0xf9, 0x52, 0xb3, 0x34, 0x36, 0x39, 0xaa, 0x89, 0x12, 0xa3, 0x85, 0x7f, 0x11, 0xad, 0xd1, + 0x63, 0xe7, 0x30, 0x0f, 0x7e, 0x38, 0x91, 0xa6, 0x20, 0x54, 0x60, 0x30, 0xc3, 0x79, 0x58, 0xea, + 0x6d, 0x61, 0xf8, 0x58, 0xc3, 0xc1, 0xe1, 0xe6, 0x9b, 0xd6, 0xea, 0x79, 0x2c, 0x23, 0x57, 0xd7, + 0xdb, 0xbc, 0xb3, 0x0d, 0x1f, 0x6d, 0xf4, 0x36, 0x69, 0x40, 0x55, 0x2c, 0x23, 0x5d, 0xc2, 0x7e, + 0xe7, 0x13, 0x31, 0xd5, 0x95, 0xe0, 0x6c, 0xd0, 0x50, 0x63, 0x36, 0xcb, 0x1c, 0x8e, 0xf4, 0xba, + 0x27, 0xe1, 0x00, 0xff, 0x48, 0xf2, 0x0d, 0x3b, 0xd5, 0x03, 0xa4, 0xeb, 0x02, 0xce, 0x89, 0x64, + 0x48, 0xd2, 0xd5, 0x91, 0x5f, 0x75, 0xd1, 0x51, 0x20, 0x47, 0x39, 0x39, 0x44, 0x12, 0x6c, 0x77, + 0x10, 0xcd, 0x0b, 0x19, 0x2b, 0x74, 0xa5, 0x4b, 0x56, 0x9a, 0xda, 0x09, 0xbd, 0x00, 0x96, 0x7c, + 0xe0, 0x72, 0xf3, 0xba, 0x0e, 0xb3, 0x7e, 0x59, 0xad, 0x8e, 0x58, 0x93, 0x36, 0x2a, 0x3d, 0x70, + 0xc9, 0xe9, 0x8e, 0x54, 0x4b, 0xa2, 0xca, 0x57, 0xaa, 0xbb, 0x93, 0xa2, 0xef, 0xde, 0xf5, 0xa5, + 0xb5, 0xfa, 0xf2, 0x63, 0xfa, 0x9b, 0xf4, 0x55, 0x0d, 0x32, 0x4d, 0xa0, 0xeb, 0x14, 0x4c, 0x21, + 0xd7, 0x45, 0x54, 0x2d, 0x77, 0x6d, 0xc2, 0x5b, 0xd7, 0xc8, 0x8d, 0x8c, 0x80, 0x01, 0xd3, 0x6a, + 0xd3, 0x2e, 0x1f, 0x73, 0x35, 0x7d, 0x5d, 0x68, 0x89, 0xd2, 0xa0, 0x96, 0x64, 0x02, 0xde, 0x49, + 0x69, 0x9e, 0x75, 0xcf, 0xfa, 0xcd, 0x9c, 0xbc, 0xd9, 0x44, 0xf9, 0x4e, 0x9d, 0x73, 0x5a, 0x14, + 0x9b, 0xe9, 0x37, 0x03, 0xac, 0x36, 0xd1, 0x04, 0xf7, 0xb3, 0x42, 0xdf, 0xbe, 0x3d, 0x0d, 0xb7, + 0xc0, 0xd0, 0xef, 0x83, 0xfc, 0xc9, 0x3a, 0x7d, 0xca, 0xad, 0x57, 0x2d, 0xa4, 0xf8, 0xca, 0x8a, + 0x52, 0xa1, 0x23, 0xba, 0x9e, 0x89, 0x5e, 0x89, 0x21, 0x74, 0x32, 0x9c, 0xe7, 0x54, 0x97, 0x08, + 0xf6, 0x8b, 0xfd, 0xb4, 0xd8, 0x71, 0x5a, 0xe7, 0xc2, 0x65, 0xb9, 0xac, 0x10, 0x2f, 0x3e, 0xba, + 0x67, 0xe4, 0x67, 0xed, 0x6b, 0x8d, 0x9e, 0x25, 0xcb, 0x0d, 0x69, 0x9f, 0xcb, 0x48, 0xf7, 0x47, + 0x0d, 0x53, 0xac, 0xd5, 0x73, 0xfc, 0xe9, 0xec, 0x23, 0x11, 0x4e, 0xec, 0x46, 0x38, 0xb7, 0xf9, + 0x03, 0xf4, 0x96, 0x1a, 0x4d, 0xb7, 0x3f, 0xd8, 0x2c, 0x55, 0x6b, 0x3c, 0x9e, 0xa4, 0xe1, 0x0c, + 0xbd, 0x8f, 0xae, 0xd6, 0xae, 0x25, 0x49, 0x03, 0xbb, 0x09, 0x58, 0x4c, 0x79, 0x7e, 0x84, 0x86, + 0x89, 0x9b, 0xca, 0x33, 0xfe, 0xb3, 0xcd, 0xc9, 0x6a, 0xf0, 0x51, 0x26, 0xa6, 0x7f, 0x84, 0x64, + 0x7b, 0x03, 0xc2, 0x16, 0x71, 0xc2, 0x16, 0x5e, 0xf3, 0x40, 0x91, 0xb1, 0xa1, 0xce, 0xfc, 0x21, + 0xe4, 0x81, 0x75, 0x79, 0xae, 0x95, 0xc0, 0xa8, 0xb2, 0x2b, 0x3c, 0xba, 0xa2, 0xe7, 0xc1, 0x69, + 0x7a, 0x0e, 0x64, 0x27, 0x57, 0x13, 0x45, 0xff, 0xc3, 0xc1, 0xa3, 0x5a, 0x54, 0xee, 0x51, 0x97, + 0x90, 0x5e, 0x9e, 0x31, 0x35, 0x23, 0xd4, 0x7b, 0x87, 0xbd, 0x98, 0x84, 0xd1, 0x32, 0xd6, 0x26, + 0x2e, 0x22, 0x10, 0xa4, 0x27, 0x8f, 0x8a, 0x04, 0x2e, 0x64, 0xb8, 0x1f, 0x9b, 0xa2, 0x56, 0x20, + 0xf5, 0xac, 0x56, 0x1f, 0xa8, 0x89, 0x64, 0x5a, 0xe0, 0x3f, 0xdd, 0xea, 0x55, 0x1e, 0x34, 0xcb, + 0x40, 0x8d, 0x34, 0x00, 0xe2, 0xf7, 0x44, 0xd5, 0x26, 0xe6, 0xed, 0x20, 0x0b, 0x22, 0x79, 0xdd, + 0x26, 0x61, 0xe5, 0xd7, 0xd8, 0xb1, 0xcc, 0x68, 0xb0, 0x4c, 0x93, 0x16, 0xc4, 0xda, 0x82, 0xf1, + 0x06, 0x48, 0x40, 0x8b, 0xbf, 0x04, 0xe8, 0xb6, 0x30, 0xc5, 0xd7, 0x42, 0xdc, 0xa9, 0x39, 0x8e, + 0xcb, 0x26, 0xd5, 0x9e, 0x8d, 0x28, 0x35, 0xe4, 0x10, 0x37, 0x2a, 0x1c, 0x6a, 0x71, 0x81, 0x55, + 0x76, 0x0f, 0xa1, 0x8b, 0xc5, 0x7b, 0x18, 0x8a, 0x81, 0x72, 0x58, 0xfe, 0x10, 0xe2, 0x76, 0x79, + 0x02, 0x2a, 0x49, 0xaf, 0x68, 0x9b, 0x19, 0xcf, 0x0e, 0xf1, 0x01, 0x51, 0x4d, 0xd4, 0xa2, 0x2c, + 0x67, 0x3c, 0x52, 0x75, 0x4b, 0x5d, 0x41, 0xf7, 0xaa, 0x65, 0x0c, 0xa0, 0x06, 0xeb, 0x45, 0xe1, + 0x71, 0xd2, 0x36, 0x66, 0x7d, 0xab, 0xae, 0xb5, 0x74, 0xbe, 0xb5, 0xb0, 0xec, 0x0a, 0x68, 0x9b, + 0xe1, 0x85, 0x73, 0x5b, 0xa5, 0xd8, 0xf9, 0x63, 0x97, 0x60, 0x68, 0x83, 0x51, 0x9b, 0x25, 0x08, + 0x9b, 0x22, 0x9b, 0xc6, 0x58, 0xc2, 0x70, 0x63, 0xf6, 0x15, 0x86, 0x0b, 0xb6, 0x0f, 0xb4, 0xe2, + 0x95, 0xba, 0xd9, 0x9a, 0x19, 0xb4, 0x55, 0x40, 0x35, 0xbe, 0x83, 0xb2, 0xfa, 0x34, 0x85, 0xa1, + 0xbb, 0xda, 0x7c, 0x37, 0xd3, 0xed, 0xa3, 0x81, 0x8e, 0x2b, 0xe8, 0x46, 0xcb, 0x56, 0xae, 0x15, + 0x3a, 0x27, 0x0d, 0x26, 0x00, 0x5d, 0xcd, 0x93, 0xdf, 0xb5, 0x0e, 0x17, 0x24, 0xe6, 0x8a, 0xdf, + 0x9f, 0x31, 0x24, 0xfe, 0x00, 0xfb, 0x8a, 0x41, 0xfa, 0x8c, 0x5f, 0x38, 0x5b, 0xb8, 0x07, 0x5c, + 0xda, 0xad, 0x7f, 0x16, 0x83, 0xf1, 0x0c, 0xc6, 0xb0, 0xdd, 0xbc, 0x3e, 0x53, 0xa3, 0x7b, 0x3a, + 0x34, 0x5e, 0x55, 0x86, 0xce, 0xab, 0x50, 0xd5, 0x2b, 0xf7, 0x05, 0xdb, 0x72, 0x4f, 0x3a, 0x3a, + 0x44, 0xeb, 0xde, 0xb5, 0x9f, 0x69, 0x75, 0x88, 0x5e, 0xbb, 0x84, 0xa5, 0x5f, 0xef, 0x6f, 0xb5, + 0xef, 0xc3, 0xb9, 0x58, 0xea, 0xe2, 0xec, 0xa4, 0x8d, 0x6e, 0x11, 0xba, 0xbd, 0xaf, 0x82, 0x9b, + 0x53, 0x01, 0x61, 0x5a, 0x39, 0x78, 0x23, 0x1f, 0xd8, 0xbc, 0x87, 0x6a, 0xcf, 0x9a, 0x80, 0x76, + 0x0a, 0x15, 0xaf, 0x6a, 0x87, 0xc8, 0x70, 0x41, 0x90, 0x8a, 0xe4, 0x2e, 0x7e, 0x97, 0x7e, 0x80, + 0x14, 0x13, 0x61, 0xb8, 0x9b, 0x4b, 0x37, 0x1b, 0xde, 0x36, 0x55, 0xd6, 0xc8, 0xba, 0x6a, 0x7c, + 0xdc, 0xe0, 0xb6, 0xe0, 0x25, 0x02, 0x38, 0xd7, 0xd1, 0x01, 0x9f, 0xe1, 0xea, 0x6e, 0x7d, 0x53, + 0xb7, 0x1b, 0x48, 0xec, 0x27, 0xba, 0xfc, 0x4c, 0xb5, 0x4b, 0x4d, 0x57, 0xef, 0x09, 0x2d, 0xf5, + 0x71, 0xe2, 0x1f, 0x31, 0x9e, 0xc6, 0x9d, 0x42, 0xa3, 0x45, 0x29, 0xea, 0xdd, 0x8c, 0xb0, 0x45, + 0x90, 0xdf, 0x10, 0x29, 0x21, 0xd0, 0xc0, 0x25, 0x56, 0x84, 0xb0, 0xb5, 0xef, 0x18, 0xf3, 0xec, + 0x7e, 0xb2, 0x15, 0x5e, 0xf8, 0x2b, 0xab, 0x6e, 0x20, 0xba, 0x1d, 0xb4, 0x37, 0x3c, 0x59, 0x52, + 0x14, 0x48, 0xc9, 0xe0, 0x3a, 0x51, 0x9a, 0x0d, 0x63, 0xc7, 0xb3, 0xfe, 0x34, 0x9f, 0xf0, 0xcc, + 0x6c, 0x48, 0x4f, 0xef, 0x1f, 0x8f, 0x61, 0x5f, 0x4f, 0xc6, 0x5d, 0x59, 0x16, 0xb7, 0x0b, 0x37, + 0x41, 0xe9, 0x6c, 0x47, 0xa4, 0x12, 0x50, 0xf5, 0x0b, 0xe4, 0xef, 0xef, 0xf7, 0xea, 0xe7, 0x5a, + 0x97, 0x58, 0xaf, 0x46, 0xfe, 0xdd, 0xe8, 0xcc, 0x49, 0xe0, 0xe7, 0x40, 0x51, 0x77, 0x3d, 0x05, + 0x63, 0x51, 0xa9, 0xc2, 0xeb, 0xc4, 0xd8, 0xe0, 0x6e, 0x7c, 0xdc, 0x95, 0xd3, 0xfc, 0xcb, 0x28, + 0x4d, 0x50, 0x66, 0x64, 0x1f, 0x35, 0x9d, 0xbe, 0x09, 0x41, 0xb1, 0xd4, 0x9b, 0x27, 0x57, 0x97, + 0xfd, 0xab, 0x94, 0x8b, 0xbb, 0xd0, 0x70, 0x93, 0x9b, 0x76, 0x97, 0x87, 0x3d, 0x27, 0x41, 0xe3, + 0x7a, 0x77, 0xab, 0x4b, 0xd0, 0xd8, 0x01, 0x18, 0x89, 0x5a, 0xf5, 0xe9, 0x5e, 0xea, 0x88, 0xbf, + 0xad, 0xf1, 0x20, 0xb8, 0x02, 0x39, 0x35, 0x0a, 0x6f, 0x5f, 0x2c, 0xf2, 0x63, 0x64, 0x90, 0x37, + 0x61, 0xac, 0xcd, 0x77, 0xaf, 0x0b, 0xd2, 0x4e, 0xc3, 0xc4, 0x61, 0xe4, 0xdc, 0x20, 0xb3, 0xac, + 0x54, 0xe6, 0xc6, 0x9c, 0x74, 0x9f, 0x36, 0x46, 0x2f, 0x32, 0x57, 0xd0, 0xd5, 0x1f, 0x9f, 0x44, + 0x3f, 0x82, 0xc0, 0x32, 0x8d, 0xd6, 0x90, 0x91, 0x6e, 0x26, 0x99, 0x50, 0xb1, 0x14, 0xff, 0xf5, + 0x54, 0x3b, 0x89, 0x00, 0xb4, 0xa9, 0x26, 0xaa, 0xf2, 0x72, 0xa9, 0xa9, 0x05, 0xd3, 0xb8, 0x94, + 0xa0, 0xb8, 0x29, 0x91, 0x49, 0x49, 0x3d, 0x1d, 0xb5, 0x9a, 0x65, 0x2f, 0x99, 0x08, 0x87, 0xfa, + 0x8e, 0xaa, 0x97, 0xbd, 0x13, 0x80, 0xe8, 0x24, 0x4f, 0xc0, 0x7e, 0x3f, 0x60, 0x73, 0x79, 0xe2, + 0xc2, 0x26, 0xd5, 0x50, 0xd4, 0xe8, 0xb7, 0x26, 0xcd, 0x59, 0x25, 0x3d, 0xee, 0x26, 0x59, 0x4f, + 0x2f, 0x8a, 0x22, 0x8a, 0xd2, 0x60, 0x33, 0x81, 0xc8, 0xc0, 0xbd, 0xdf, 0xaf, 0x20, 0xcd, 0xe1, + 0xd6, 0x02, 0x33, 0x46, 0x20, 0x51, 0xcf, 0x2f, 0xa7, 0xc6, 0xb2, 0x3e, 0x8e, 0x2e, 0x05, 0xe0, + 0x22, 0xca, 0x79, 0x8f, 0xe4, 0xe5, 0xb3, 0xeb, 0xc1, 0x73, 0x6c, 0xb1, 0xe6, 0x22, 0x10, 0xf7, + 0xc2, 0x83, 0x89, 0xf0, 0xdc, 0xd4, 0xb4, 0x6f, 0x80, 0x39, 0x67, 0xb6, 0xe5, 0x92, 0x39, 0xe2, + 0xa1, 0x23, 0xc7, 0xf4, 0x94, 0xcb, 0xf3, 0xeb, 0xb7, 0x3d, 0xa7, 0xfe, 0xd0, 0xd1, 0x94, 0xdb, + 0xc8, 0x86, 0x0b, 0xd5, 0xab, 0x51, 0x15, 0x25, 0xa5, 0x73, 0x26, 0x74, 0x06, 0xae, 0xd9, 0x46, + 0xff, 0xfd, 0x24, 0x3a, 0xa2, 0xd6, 0xe7, 0xe4, 0x7a, 0x7b, 0x78, 0x96, 0xf7, 0x67, 0xd1, 0x8c, + 0xae, 0x88, 0xf0, 0x19, 0x57, 0xd5, 0xed, 0xf3, 0x35, 0x58, 0x9b, 0xab, 0x56, 0x92, 0xca, 0x16, + 0x64, 0x8b, 0xe5, 0xf5, 0xf1, 0x42, 0xf4, 0x2d, 0x7c, 0x5f, 0x83, 0xe6, 0x87, 0xa2, 0xea, 0xb2, + 0xdf, 0x6d, 0x6c, 0xb3, 0x21, 0xf1, 0x1e, 0xf8, 0x46, 0x07, 0xe0, 0x49, 0x1c, 0x7a, 0x44, 0x11, + 0x16, 0x1d, 0xe9, 0x0c, 0xf2, 0x2f, 0x4d, 0x84, 0xb6, 0xac, 0x64, 0xdf, 0x46, 0x95, 0x3e, 0x79, + 0x0a, 0xfc, 0x9e, 0xfb, 0x30, 0x2e, 0x6a, 0x11, 0x33, 0x6a, 0x3f, 0x26, 0x22, 0x62, 0x39, 0x22, + 0xb5, 0x34, 0xf0, 0x9f, 0xf7, 0xad, 0xde, 0xbf, 0x63, 0x23, 0xf3, 0x4b, 0x64, 0x87, 0xf1, 0x49, + 0x80, 0xb0, 0xab, 0x41, 0x99, 0x25, 0x6a, 0xa5, 0xe9, 0xce, 0xb7, 0x2e, 0x2e, 0x28, 0x26, 0x0a, + 0x5e, 0x92, 0x80, 0x32, 0x97, 0xd5, 0x1a, 0x2e, 0xfc, 0xdc, 0xf4, 0xcb, 0x79, 0xd5, 0x8a, 0xa7, + 0xc9, 0xae, 0xfe, 0x54, 0x69, 0x59, 0x64, 0xd0, 0x4c, 0x39, 0x4e, 0x81, 0xda, 0xa8, 0x8b, 0x9a, + 0x66, 0xb7, 0x92, 0xab, 0x40, 0x05, 0xd2, 0x83, 0xce, 0xcf, 0x71, 0x1b, 0xd6, 0x36, 0x23, 0x4e, + 0x9a, 0xaf, 0x55, 0x5f, 0xb2, 0x05, 0x4c, 0x52, 0xf8, 0x3d, 0x4f, 0x3d, 0xb1, 0x7a, 0xd0, 0xb8, + 0x85, 0x54, 0x83, 0x28, 0x8b, 0xdb, 0x65, 0x64, 0xdf, 0xdf, 0x15, 0xd9, 0x87, 0xe1, 0xbd, 0x90, + 0xfe, 0x2a, 0x49, 0x55, 0x25, 0xa4, 0x81, 0xff, 0x86, 0xc4, 0xa0, 0x97, 0x90, 0x17, 0x05, 0xdb, + 0x72, 0x94, 0x17, 0x14, 0x18, 0x47, 0x28, 0xdc, 0xde, 0x72, 0x5d, 0xf1, 0xe1, 0x1d, 0xef, 0xa5, + 0xe4, 0xba, 0x66, 0x04, 0x4e, 0xd4, 0x8b, 0xac, 0x7b, 0xa8, 0xaf, 0x6f, 0x07, 0x2e, 0x4d, 0x84, + 0xac, 0x6a, 0x88, 0x83, 0x67, 0x03, 0xfa, 0x09, 0x83, 0x6b, 0x53, 0x6d, 0x40, 0x6b, 0x48, 0xae, + 0x58, 0x39, 0xf8, 0x56, 0xd7, 0xe5, 0x00, 0x22, 0x17, 0xf3, 0xf3, 0x41, 0x44, 0x68, 0x32, 0xbb, + 0xcd, 0x75, 0xb5, 0xc7, 0xe9, 0x0b, 0x89, 0x7f, 0x19, 0x9f, 0x42, 0x92, 0xd1, 0x97, 0xcb, 0x4c, + 0x55, 0x79, 0x82, 0xd2, 0x13, 0x90, 0xda, 0x39, 0xa1, 0x3e, 0xa4, 0xb1, 0xf5, 0x69, 0xf5, 0xd9, + 0x81, 0xe5, 0xd8, 0xe9, 0x58, 0xcf, 0x1b, 0x8a, 0x6e, 0xcc, 0xf3, 0xa8, 0x31, 0xdc, 0x50, 0x9b, + 0x89, 0x71, 0x61, 0x85, 0x23, 0x28, 0x77, 0x61, 0xb5, 0xc9, 0xb2, 0xf2, 0x40, 0xb1, 0x7a, 0xfc, + 0x2e, 0x6a, 0x16, 0xad, 0xb7, 0xc3, 0xa3, 0x92, 0x85, 0xa8, 0x19, 0xe5, 0xcf, 0x82, 0xf3, 0x53, + 0x5e, 0xc2, 0x65, 0x4b, 0x80, 0x40, 0xb5, 0x31, 0xba, 0x4b, 0xc7, 0xef, 0x37, 0x20, 0x8f, 0xc3, + 0xdb, 0x91, 0xa3, 0x3e, 0xff, 0xc6, 0xee, 0x8f, 0x2a, 0x16, 0x77, 0x0a, 0xe5, 0x77, 0xb4, 0x21, + 0x90, 0x0e, 0x1c, 0xa0, 0xb3, 0xfa, 0x0e, 0x1b, 0xf2, 0x84, 0x9f, 0x93, 0xd2, 0x57, 0xab, 0xdf, + 0x41, 0x65, 0xa4, 0x33, 0xf6, 0xb3, 0xdc, 0x8c, 0x7c, 0xc0, 0x3c, 0x8d, 0x01, 0xb2, 0x2e, 0xb7, + 0x22, 0xbe, 0xe8, 0xa9, 0xf7, 0x05, 0xa6, 0x6e, 0x79, 0x01, 0x2d, 0x8e, 0x8e, 0xcd, 0x61, 0x94, + 0x19, 0x52, 0x0d, 0x6c, 0x3f, 0xd1, 0x11, 0x91, 0x05, 0x83, 0xcd, 0x18, 0x38, 0xf6, 0xf3, 0x99, + 0xfb, 0xbe, 0xf7, 0xbc, 0xc5, 0x8a, 0x8b, 0x13, 0xe5, 0x44, 0x22, 0xa8, 0x4f, 0x29, 0x13, 0xd2, + 0xa0, 0x61, 0x1e, 0x3e, 0x87, 0xcd, 0x39, 0x0f, 0x4b, 0x91, 0xe4, 0xb7, 0x1a, 0x91, 0x5b, 0xf9, + 0xfb, 0x36, 0xc5, 0x51, 0xd9, 0x85, 0xcc, 0x3f, 0x6b, 0xa4, 0xba, 0x09, 0x84, 0x08, 0x4b, 0x6c, + 0x28, 0x51, 0xa4, 0x21, 0x99, 0x03, 0x58, 0xd0, 0xf4, 0xa5, 0x82, 0x75, 0x95, 0x18, 0x88, 0x0e, + 0x13, 0x19, 0xcc, 0xeb, 0x6c, 0xae, 0x51, 0x03, 0x0f, 0x52, 0x59, 0x65, 0x26, 0xbf, 0x16, 0x78, + 0xa0, 0xa4, 0x47, 0x90, 0x4e, 0x75, 0xdd, 0xab, 0x8e, 0xf9, 0x51, 0x5b, 0x34, 0x93, 0xfb, 0x1b, + 0x3a, 0x11, 0x12, 0x8f, 0x68, 0x9d, 0x40, 0x3e, 0xd5, 0x17, 0x28, 0x3d, 0x5e, 0x64, 0x20, 0x81, + 0xe1, 0x0b, 0x82, 0x29, 0xc5, 0xdf, 0x8f, 0x89, 0x4a, 0x7a, 0x6e, 0x6a, 0x39, 0x7a, 0x5f, 0x2d, + 0x51, 0x6d, 0x08, 0x57, 0x67, 0x38, 0x7d, 0xb9, 0x42, 0x99, 0xba, 0x38, 0xcf, 0x1b, 0x18, 0x2a, + 0x21, 0x1e, 0x93, 0xcb, 0x61, 0xd7, 0xd9, 0x90, 0x30, 0x0a, 0xa5, 0x99, 0x29, 0x8b, 0xb6, 0xad, + 0x24, 0x3a, 0x91, 0x4b, 0x77, 0x3c, 0x66, 0xf0, 0x95, 0x92, 0x9b, 0x6e, 0x88, 0xb8, 0x20, 0xda, + 0x8e, 0x42, 0x88, 0x39, 0xad, 0x26, 0xd8, 0xd1, 0x27, 0xbd, 0x8e, 0x79, 0xa1, 0x50, 0x86, 0xcb, + 0x55, 0xfb, 0x2b, 0x46, 0x19, 0x42, 0xd3, 0x06, 0xa2, 0x8c, 0x2b, 0xa1, 0x20, 0x42, 0x20, 0x68, + 0x83, 0x62, 0x39, 0xd0, 0x84, 0x66, 0xd3, 0xf9, 0xe7, 0xd9, 0xab, 0x0f, 0xcf, 0x9c, 0xd1, 0xf3, + 0xe8, 0xea, 0xde, 0x54, 0x5f, 0xc9, 0x75, 0x3f, 0x1c, 0xfe, 0x70, 0x23, 0x9e, 0x61, 0xa5, 0x5b, + 0x86, 0x37, 0x85, 0xa0, 0xe6, 0xda, 0x97, 0x0b, 0x31, 0x58, 0x33, 0x1d, 0x52, 0x9d, 0x2f, 0xee, + 0xb1, 0x3f, 0x1d, 0xf9, 0x7e, 0xfc, 0x03, 0xe3, 0x66, 0xd8, 0x8a, 0x78, 0xb0, 0x97, 0x88, 0xb8, + 0x6d, 0x39, 0x13, 0xee, 0x49, 0xee, 0x4a, 0xc1, 0xfc, 0x0c, 0x5c, 0xb6, 0xfb, 0xa8, 0xa5, 0xc2, + 0x0b, 0xba, 0x9f, 0x80, 0x03, 0x70, 0x78, 0x04, 0x26, 0xfd, 0x0a, 0xe7, 0x52, 0x8c, 0x64, 0x6a, + 0xec, 0x82, 0xa0, 0xda, 0xbb, 0xb0, 0x32, 0x5e, 0xb4, 0x1c, 0xf1, 0x30, 0x47, 0xa7, 0x7a, 0xa7, + 0x81, 0x76, 0x2d, 0x58, 0xb2, 0xe2, 0xfc, 0x41, 0x00, 0xcd, 0xe7, 0x14, 0x93, 0x19, 0xec, 0xe3, + 0x00, 0x7c, 0xd9, 0xd8, 0x4a, 0xf5, 0x6d, 0x1a, 0x46, 0xac, 0x51, 0xf9, 0x6f, 0x39, 0xf5, 0x13, + 0x54, 0x1b, 0x76, 0x57, 0x74, 0x00, 0x1c, 0xd4, 0x6d, 0x46, 0x73, 0xe8, 0xa9, 0x84, 0x8b, 0x79, + 0xd7, 0xd3, 0x4b, 0x86, 0xc4, 0x37, 0x36, 0x24, 0x24, 0xeb, 0xbb, 0x2e, 0xf7, 0x85, 0x92, 0xb9, + 0xfc, 0xe6, 0xbc, 0x9b, 0x41, 0x31, 0xae, 0x41, 0x91, 0xb5, 0xc8, 0xd2, 0x80, 0xc1, 0x0d, 0x8d, + 0xbb, 0x74, 0x2e, 0xc2, 0x37, 0x1e, 0x8c, 0xf3, 0x14, 0xc8, 0xe8, 0x9a, 0x1f, 0x42, 0xb5, 0xba, + 0x71, 0xe7, 0xbb, 0xaa, 0xef, 0x10, 0xb0, 0x5e, 0x5c, 0x8d, 0x25, 0xc2, 0x4b, 0x66, 0xcb, 0x9c, + 0x36, 0x4b, 0xda, 0x74, 0xff, 0xc0, 0x5e, 0xbd, 0xc2, 0xdd, 0xa6, 0x2c, 0xb3, 0x97, 0x65, 0x39, + 0xd9, 0x50, 0xad, 0x4b, 0x0e, 0xda, 0x7f, 0xce, 0x27, 0x73, 0xee, 0x7d, 0x23, 0xe7, 0xac, 0x4c, + 0x34, 0x55, 0x8a, 0xb5, 0x50, 0xf7, 0x30, 0xc3, 0x5c, 0x00, 0x55, 0x21, 0x16, 0x0d, 0xfb, 0xf8, + 0xdf, 0x8b, 0xad, 0xde, 0xfd, 0xcc, 0xf0, 0xe1, 0x79, 0x4f, 0x1d, 0x12, 0x5d, 0x07, 0x8b, 0x0d, + 0x6a, 0xc9, 0x38, 0x01, 0xb0, 0xaf, 0x11, 0xc2, 0x76, 0xa6, 0x16, 0x79, 0xd0, 0xb8, 0xa6, 0x9e, + 0x5a, 0xc9, 0x34, 0xe4, 0x7d, 0xc7, 0x52, 0x4a, 0x0d, 0x24, 0x35, 0xae, 0x26, 0x4d, 0xc5, 0xc9, + 0x43, 0xf2, 0xe4, 0xf0, 0xb1, 0xb3, 0xf2, 0xfd, 0x0c, 0x03, 0x06, 0x61, 0xcc, 0x23, 0x46, 0x06, + 0x0c, 0x8f, 0x8a, 0x72, 0x5a, 0xf3, 0xe3, 0x0c, 0x9b, 0x07, 0x75, 0x9c, 0xa2, 0xc7, 0x42, 0x62, + 0xac, 0x8c, 0xd8, 0x84, 0x49, 0x90, 0x51, 0xd8, 0x00, 0x16, 0x1c, 0xd9, 0x44, 0xf1, 0x14, 0x9b, + 0x09, 0x74, 0x47, 0x46, 0x88, 0x4a, 0x00, 0x01, 0x34, 0x90, 0x90, 0xc2, 0xa0, 0xb1, 0x52, 0xae, + 0xd7, 0x4c, 0x79, 0xc4, 0x9b, 0x4a, 0xff, 0x4d, 0x2f, 0x5b, 0x08, 0x83, 0xd2, 0x4c, 0xdd, 0xa0, + 0x2d, 0x7b, 0xc1, 0x3f, 0xc5, 0xec, 0x87, 0x30, 0x2b, 0x1d, 0x61, 0x5b, 0xf4, 0x94, 0xd4, 0xa4, + 0xc6, 0x67, 0xaf, 0x6f, 0x58, 0xb2, 0xb6, 0x1a, 0xcb, 0x4d, 0xc7, 0xd7, 0xa9, 0xd5, 0xa6, 0x18, + 0xa3, 0xe7, 0x4e, 0xd0, 0x76, 0x7d, 0xc9, 0xb7, 0x3c, 0xfd, 0x7a, 0xf2, 0xd3, 0xe7, 0x9d, 0x5f, + 0xef, 0xd4, 0x5e, 0xaa, 0x8d, 0xf5, 0x2e, 0xed, 0x0b, 0xfe, 0xb3, 0xff, 0x9e, 0xe9, 0xd7, 0x1e, + 0xc2, 0x53, 0xf2, 0x2f, 0x92, 0xfe, 0x4a, 0xe6, 0x14, 0x30, 0x6a, 0x21, 0x05, 0xd0, 0x0d, 0x50, + 0xdf, 0x16, 0xae, 0xed, 0x8c, 0x5a, 0x6c, 0x31, 0xdf, 0x30, 0xe1, 0xa7, 0xad, 0x9b, 0xb6, 0x91, + 0xd8, 0xb4, 0x84, 0x06, 0xdd, 0x03, 0x22, 0xc8, 0x67, 0xca, 0x4b, 0x27, 0xbf, 0x19, 0xd1, 0x8f, + 0xd5, 0xb2, 0xf0, 0x89, 0xb2, 0x9f, 0xa9, 0xcd, 0xef, 0x8e, 0x26, 0x4c, 0xf0, 0xca, 0x29, 0xdc, + 0x82, 0x01, 0x76, 0x1c, 0x09, 0x5a, 0xa4, 0x17, 0xcb, 0xa9, 0xf2, 0xc3, 0x5f, 0xbc, 0x12, 0x91, + 0x1f, 0x31, 0x94, 0xef, 0x6a, 0xbc, 0x76, 0x4c, 0xc4, 0x46, 0x98, 0x53, 0x2f, 0x10, 0x83, 0x5d, + 0xde, 0xa3, 0x3d, 0xbe, 0x23, 0xb6, 0x5a, 0x65, 0xb2, 0x95, 0x2f, 0x67, 0x99, 0xc3, 0xa1, 0x12, + 0xb8, 0x6c, 0xec, 0x9f, 0x49, 0x1f, 0x28, 0xae, 0x61, 0x70, 0xb0, 0xd0, 0x0f, 0x8d, 0xbc, 0x90, + 0xe9, 0xc9, 0x19, 0xa6, 0x56, 0x47, 0x32, 0x6e, 0xd3, 0x40, 0x13, 0xaf, 0x71, 0x38, 0x2d, 0x09, + 0xa1, 0x94, 0xe4, 0xf3, 0xce, 0x63, 0x45, 0x1c, 0x49, 0xab, 0xbc, 0x24, 0x4b, 0x21, 0xbb, 0xa8, + 0x1d, 0x2f, 0xd4, 0xe6, 0x7a, 0xc6, 0x92, 0x88, 0x47, 0xbc, 0xf1, 0x5f, 0xf5, 0x18, 0xff, 0xf2, + 0x22, 0x60, 0x68, 0xae, 0x4d, 0x17, 0x52, 0xf9, 0x81, 0x98, 0x36, 0xd9, 0xe1, 0x37, 0x12, 0xc6, + 0x80, 0x29, 0x19, 0xe7, 0xf1, 0xbd, 0x95, 0xc6, 0xef, 0xe8, 0x63, 0xd8, 0x5b, 0x4c, 0x5e, 0x76, + 0xae, 0x98, 0xce, 0x7b, 0xcf, 0xae, 0x18, 0x26, 0x59, 0xbb, 0x38, 0x4e, 0xc1, 0xa1, 0xdc, 0xe6, + 0x87, 0x1a, 0x2d, 0x4c, 0x14, 0x71, 0x2b, 0xe8, 0x02, 0xf9, 0x81, 0xfb, 0x43, 0x6b, 0xe0, 0xca, + 0x97, 0xac, 0x06, 0x21, 0x4c, 0x25, 0xc1, 0x7a, 0x0c, 0x5b, 0x5a, 0x54, 0x93, 0x29, 0xec, 0x14, + 0xcf, 0x99, 0xda, 0xae, 0x8c, 0x8e, 0x4b, 0x95, 0xc4, 0x77, 0x03, 0x13, 0xb7, 0x06, 0x44, 0x7e, + 0x83, 0x24, 0xc2, 0x95, 0x58, 0x5e, 0x8e, 0x64, 0x0d, 0xcb, 0x56, 0xfc, 0xcc, 0x14, 0xf8, 0xec, + 0x53, 0x23, 0x3b, 0x38, 0x99, 0x6e, 0x34, 0x3a, 0xdf, 0xb3, 0xf5, 0xeb, 0x57, 0xd3, 0xd5, 0x00, + 0x3d, 0x7c, 0xf8, 0xac, 0x56, 0x44, 0xb6, 0xce, 0x1c, 0x86, 0x1c, 0x07, 0x69, 0x39, 0x73, 0x4e, + 0x2d, 0x56, 0xf0, 0xd2, 0x45, 0x49, 0x6b, 0x24, 0x4d, 0xde, 0x06, 0x30, 0xb9, 0xd9, 0x1a, 0x1e, + 0x1c, 0xeb, 0x97, 0x15, 0x72, 0xee, 0x3f, 0xcc, 0x76, 0x6a, 0xcb, 0xc2, 0x4f, 0x39, 0x30, 0x4f, + 0x1f, 0x7d, 0x16, 0x14, 0xcf, 0x4d, 0x98, 0xf5, 0x07, 0x00, 0xb2, 0x0f, 0x4d, 0x82, 0xb6, 0x41, + 0xf0, 0xf1, 0xc9, 0x49, 0xe9, 0x79, 0x1c, 0xd9, 0xef, 0x89, 0x67, 0x3c, 0x9f, 0x0f, 0x68, 0x85, + 0x39, 0xef, 0x70, 0x16, 0x47, 0xcd, 0x3c, 0x2e, 0x9f, 0xda, 0x22, 0x16, 0x13, 0xc5, 0x16, 0x48, + 0x7b, 0x3e, 0xff, 0x51, 0x09, 0xd6, 0xfd, 0xfe, 0x45, 0xf1, 0x11, 0xd5, 0x2c, 0x0f, 0x91, 0x92, + 0x77, 0x8b, 0x2d, 0x0b, 0xa4, 0x24, 0x50, 0x11, 0x56, 0x87, 0x80, 0x6e, 0x0c, 0xe9, 0x45, 0x65, + 0x03, 0x48, 0xbf, 0x26, 0xba, 0xff, 0x34, 0x31, 0xeb, 0xf0, 0x68, 0x6c, 0x0a, 0x31, 0x80, 0xe2, + 0xf3, 0x3c, 0x7c, 0xc9, 0x7a, 0xa8, 0xb5, 0x38, 0x4d, 0xc7, 0xed, 0xfb, 0x5e, 0xce, 0x62, 0xfd, + 0xa0, 0xc0, 0x7c, 0x8c, 0x5e, 0xc4, 0x2e, 0x07, 0xda, 0x3d, 0x84, 0xf8, 0x7e, 0x46, 0x4b, 0x87, + 0x71, 0x3c, 0x72, 0x52, 0x9b, 0x60, 0x2c, 0x10, 0x60, 0xc9, 0x77, 0xb8, 0xaf, 0x9f, 0x71, 0x26, + 0xad, 0xeb, 0xf5, 0x0c, 0x93, 0x57, 0x6b, 0x6c, 0x1d, 0xd1, 0x68, 0xe4, 0x14, 0x67, 0xdd, 0x28, + 0xe8, 0x44, 0x4a, 0xbe, 0xfa, 0xd0, 0xf6, 0x33, 0xec, 0x1e, 0xac, 0xcb, 0xe9, 0x35, 0xd2, 0x8c, + 0x18, 0xca, 0xf3, 0x75, 0xbb, 0x12, 0x4d, 0x77, 0x25, 0x02, 0x19, 0x78, 0x64, 0x50, 0x85, 0xd8, + 0xa2, 0xe0, 0xfb, 0x27, 0xe1, 0xd2, 0x9a, 0x46, 0x5b, 0x0e, 0x04, 0x3e, 0x07, 0xb1, 0xb9, 0x88, + 0xcc, 0x62, 0x6d, 0xae, 0xd7, 0x65, 0x84, 0x9d, 0xd0, 0x30, 0xcb, 0x63, 0x44, 0x52, 0x2a, 0xd6, + 0xe0, 0xc2, 0xb6, 0x10, 0xdd, 0xc1, 0x0f, 0x60, 0xfb, 0x2b, 0x29, 0x59, 0x85, 0x7a, 0x3b, 0x7e, + 0xdc, 0x6a, 0x2f, 0xd2, 0xb8, 0x12, 0x92, 0xde, 0x53, 0x1c, 0x1b, 0x20, 0x06, 0x75, 0x0f, 0x79, + 0xdb, 0x11, 0x77, 0xdf, 0x8c, 0xec, 0xe3, 0xc3, 0xa3, 0x81, 0x7f, 0x87, 0x4c, 0x86, 0xdc, 0xc2, + 0x4c, 0x84, 0x46, 0xa6, 0x6e, 0xac, 0xd4, 0x5f, 0x97, 0xe7, 0xdb, 0x68, 0xb3, 0x46, 0x7c, 0x7d, + 0xe0, 0x6d, 0xf4, 0x4f, 0x21, 0x0e, 0x66, 0xa2, 0x7d, 0x95, 0xa8, 0x41, 0x30, 0xa7, 0x9d, 0x76, + 0xda, 0xa7, 0xc5, 0x02, 0x6b, 0x5a, 0x42, 0x7b, 0x68, 0x5d, 0xe1, 0x2b, 0xbf, 0xb0, 0x5d, 0x46, + 0xfc, 0x37, 0x61, 0x44, 0x94, 0x59, 0x37, 0x80, 0x85, 0xe7, 0x66, 0x80, 0x8c, 0x67, 0x31, 0xe2, + 0x8d, 0xab, 0xe2, 0x3d, 0xeb, 0x5e, 0x5a, 0x84, 0x8e, 0x70, 0x56, 0xa4, 0x36, 0xc1, 0xbb, 0xf6, + 0x10, 0x52, 0x52, 0x4b, 0x3f, 0xed, 0x9a, 0x0c, 0x53, 0x68, 0x09, 0xd6, 0x51, 0x3b, 0x08, 0xbb, + 0xdd, 0x15, 0xae, 0x2c, 0xb1, 0x92, 0x82, 0x8c, 0x2a, 0x5f, 0xf1, 0xb9, 0xbd, 0xb4, 0x55, 0x6d, + 0xc3, 0x7f, 0x87, 0x97, 0x69, 0x34, 0x93, 0xe5, 0x53, 0x75, 0x8f, 0xfe, 0xb4, 0x90, 0x17, 0x24, + 0x9f, 0x8b, 0xc4, 0xc8, 0xe2, 0x46, 0xae, 0x76, 0xe5, 0x76, 0xda, 0x5a, 0xe3, 0x7e, 0x7a, 0x9b, + 0xc3, 0xf3, 0x2c, 0x0d, 0x52, 0xca, 0x85, 0x1d, 0x0f, 0xc4, 0xc8, 0xc6, 0x1b, 0x68, 0xed, 0x4a, + 0x9e, 0xe0, 0xde, 0xc3, 0x21, 0x34, 0x51, 0x1e, 0x0f, 0x0c, 0xa9, 0x09, 0x8e, 0x93, 0xf9, 0x04, + 0x71, 0x8b, 0xc3, 0xd6, 0x32, 0xc3, 0x8c, 0x0a, 0x8a, 0x7a, 0x98, 0xa1, 0x4c, 0x75, 0x3a, 0xa4, + 0x71, 0x74, 0xf2, 0xdc, 0x0e, 0x59, 0xdb, 0x74, 0x1e, 0xfc, 0xf5, 0x43, 0x9e, 0xaa, 0xd7, 0xea, + 0x19, 0x7f, 0xaa, 0x3d, 0x96, 0x05, 0xc1, 0x0d, 0xf1, 0x73, 0xb2, 0x08, 0x37, 0x79, 0xce, 0xa6, + 0x16, 0x59, 0x60, 0x8c, 0xfe, 0x9f, 0xef, 0xf7, 0x7b, 0x95, 0x4e, 0xa2, 0xde, 0xd2, 0x19, 0x8e, + 0x11, 0xf7, 0xa3, 0x6e, 0xea, 0x32, 0xe5, 0x5f, 0xd3, 0x1c, 0x6c, 0x2a, 0xf3, 0xe7, 0x16, 0xdf, + 0x71, 0xaf, 0xb9, 0x81, 0x4a, 0xf1, 0x30, 0xd9, 0x2f, 0x5e, 0x70, 0x82, 0xf8, 0x07, 0x39, 0x24, + 0x16, 0x02, 0x78, 0xc4, 0x29, 0x24, 0xad, 0xc8, 0xb2, 0x2c, 0xf6, 0x37, 0xa1, 0x53, 0x6c, 0x67, + 0x26, 0x69, 0xa0, 0x87, 0x5e, 0x86, 0x11, 0x4b, 0xa9, 0x85, 0xbb, 0x0e, 0xb9, 0x58, 0x2b, 0x0d, + 0x0e, 0x1b, 0xb1, 0x01, 0x22, 0x50, 0xa9, 0x04, 0xff, 0xf8, 0x7b, 0xb6, 0xb9, 0x51, 0xe5, 0xdd, + 0x1c, 0x21, 0x09, 0x77, 0x12, 0x0a, 0x2e, 0x29, 0x84, 0x82, 0xcc, 0xd5, 0x26, 0xa9, 0x84, 0xdc, + 0xf7, 0xd0, 0x10, 0x0a, 0x21, 0xb1, 0xd7, 0x0f, 0xfa, 0x1a, 0xc6, 0xa6, 0x24, 0x96, 0x49, 0x25, + 0xcd, 0x84, 0x53, 0x0b, 0x2b, 0xab, 0x20, 0x81, 0x87, 0x12, 0x83, 0x93, 0x51, 0x69, 0xe7, 0x33, + 0xa8, 0x70, 0x3e, 0x4b, 0xc2, 0xb6, 0x5c, 0xb6, 0xea, 0x48, 0xee, 0x2f, 0x13, 0xd4, 0xc0, 0xc9, + 0x37, 0x3e, 0xc7, 0xe4, 0x2c, 0xb3, 0x11, 0x7c, 0x15, 0xc5, 0x57, 0xbc, 0x9d, 0x8c, 0xb6, 0x5f, + 0x5d, 0xb6, 0x5b, 0x90, 0x2b, 0x9a, 0x7a, 0xf7, 0x53, 0x38, 0x82, 0x41, 0x94, 0xca, 0x67, 0xc9, + 0xce, 0x76, 0xa4, 0x33, 0xeb, 0x8a, 0x45, 0xaf, 0x44, 0x91, 0x34, 0x3c, 0xb9, 0x4f, 0x2e, 0x9d, + 0xf6, 0xde, 0x2d, 0xe1, 0x6b, 0xb9, 0x59, 0xec, 0x03, 0xce, 0x8b, 0x7b, 0x8d, 0xd4, 0x4d, 0xd7, + 0x73, 0xb2, 0x04, 0x8b, 0x96, 0x9d, 0xab, 0x1c, 0x1c, 0xa0, 0x88, 0x70, 0x66, 0x69, 0xa6, 0x9a, + 0x50, 0xf9, 0xa2, 0x2d, 0xe6, 0x99, 0x23, 0x27, 0x74, 0x46, 0x2f, 0x94, 0x7b, 0x64, 0x40, 0x69, + 0x06, 0xcf, 0xcb, 0xeb, 0x09, 0x7e, 0x88, 0x6d, 0xd3, 0xc3, 0x28, 0x98, 0x13, 0x3c, 0x06, 0x2f, + 0xb5, 0xfc, 0x9e, 0xf1, 0x5a, 0x5d, 0x6b, 0x66, 0xcc, 0x29, 0x48, 0xfe, 0xb3, 0x1a, 0xd9, 0x57, + 0x1e, 0x4d, 0x73, 0xb5, 0xe4, 0x95, 0x74, 0x6b, 0x76, 0xda, 0x66, 0x25, 0x0c, 0xad, 0xd8, 0xad, + 0x31, 0xe2, 0x14, 0x52, 0xa5, 0x5c, 0x2c, 0xfd, 0xdd, 0xf8, 0x3f, 0x08, 0x19, 0x11, 0x74, 0xa7, + 0x8b, 0xcd, 0x3f, 0xa1, 0x91, 0x64, 0x98, 0x18, 0xb5, 0xc6, 0x84, 0x46, 0xa0, 0xe8, 0xc1, 0x14, + 0x0a, 0x77, 0xc1, 0xdb, 0xfd, 0xb0, 0x30, 0x2f, 0x9a, 0xae, 0x4e, 0x0c, 0x2f, 0xe8, 0x2e, 0x5a, + 0x79, 0x65, 0x01, 0x6f, 0x0f, 0xa1, 0x30, 0xee, 0xc5, 0xef, 0x37, 0x5b, 0xd4, 0x63, 0xba, 0xf1, + 0x94, 0xba, 0xaa, 0x4c, 0xf5, 0x52, 0xe7, 0x4b, 0x6a, 0x7c, 0xcb, 0x59, 0xa3, 0xbf, 0xd6, 0x50, + 0x31, 0x98, 0x4a, 0xf6, 0xa2, 0x4b, 0x8f, 0xec, 0x22, 0xe1, 0x42, 0x75, 0xa0, 0xac, 0x97, 0xa5, + 0x0b, 0x57, 0x2f, 0x1d, 0xad, 0xee, 0x7c, 0xc4, 0x70, 0x0a, 0x77, 0x52, 0x52, 0x5d, 0x09, 0x59, + 0xc7, 0x4a, 0x46, 0x31, 0x8f, 0x2f, 0x75, 0xd1, 0x06, 0xcf, 0x16, 0x3d, 0xf1, 0x26, 0xb8, 0x61, + 0xb7, 0xf4, 0x4f, 0x48, 0x27, 0x42, 0x51, 0x50, 0x8d, 0xf1, 0xfa, 0x73, 0x85, 0x29, 0xce, 0x31, + 0xf0, 0x0f, 0x96, 0x68, 0x11, 0x6d, 0xaf, 0x6c, 0x8f, 0xae, 0x4f, 0xe8, 0xb6, 0x7f, 0xab, 0x01, + 0xe3, 0xef, 0x9b, 0x1b, 0x2a, 0x64, 0x8d, 0xbb, 0xe3, 0x5c, 0xb9, 0x5a, 0x8c, 0xf6, 0x04, 0xa3, + 0x86, 0x90, 0x56, 0xf6, 0x24, 0x9c, 0xfd, 0x69, 0x5a, 0xad, 0x23, 0xa1, 0x27, 0x3c, 0x29, 0xeb, + 0x23, 0x2f, 0x96, 0xf3, 0xad, 0x6c, 0x9f, 0x42, 0x6d, 0xc1, 0xe5, 0x64, 0x6e, 0xfa, 0x50, 0x30, + 0xb2, 0x24, 0x91, 0xb8, 0x62, 0x29, 0x73, 0x7f, 0xcb, 0xeb, 0x4a, 0x49, 0xe3, 0xe8, 0x29, 0x19, + 0x96, 0x56, 0x64, 0x93, 0x7d, 0xa8, 0xa5, 0x1a, 0x5c, 0xb7, 0x55, 0x44, 0x84, 0xfc, 0x01, 0x6e, + 0x01, 0x9f, 0xfc, 0x68, 0xa8, 0x07, 0xd7, 0x7e, 0xc5, 0x53, 0x50, 0x80, 0xfa, 0x67, 0x36, 0xb0, + 0x8c, 0xa8, 0x4f, 0x50, 0x37, 0x62, 0x8c, 0xc8, 0x2c, 0x95, 0xac, 0x69, 0xbb, 0xb8, 0xd7, 0x23, + 0x1b, 0xf9, 0x73, 0x1c, 0xb2, 0xde, 0xdb, 0x93, 0xeb, 0xc0, 0xd1, 0x27, 0x2f, 0x98, 0x0f, 0xd5, + 0x31, 0x5e, 0xe5, 0x74, 0xc1, 0xf9, 0x81, 0xa8, 0x35, 0x9a, 0x27, 0x59, 0xd6, 0x8a, 0x61, 0xba, + 0xac, 0x2b, 0xa3, 0x15, 0x8a, 0xa0, 0x04, 0xfa, 0x62, 0x17, 0x4c, 0xe0, 0xb9, 0xc7, 0x6e, 0x9a, + 0xe8, 0xa2, 0x68, 0xcc, 0xa1, 0x90, 0x4e, 0x44, 0x8a, 0x93, 0xa4, 0xe8, 0x76, 0xe1, 0x6e, 0x87, + 0xfc, 0xb1, 0x34, 0x33, 0x20, 0x2b, 0x1c, 0x66, 0xdc, 0x4e, 0xc8, 0x32, 0x2b, 0x9f, 0x14, 0xa0, + 0x89, 0x72, 0xdf, 0x53, 0x16, 0x17, 0x6f, 0x6a, 0x7c, 0xd7, 0x60, 0x20, 0x39, 0x52, 0x00, 0xde, + 0xc5, 0xab, 0x4e, 0x5d, 0x33, 0x29, 0x01, 0x2d, 0x0b, 0xc4, 0x6c, 0xa6, 0xd6, 0x66, 0xce, 0xb2, + 0x92, 0xfe, 0x9a, 0x8d, 0x2b, 0xc0, 0x60, 0x47, 0x8d, 0x1c, 0xb0, 0xce, 0x57, 0x19, 0xdb, 0xf8, + 0x59, 0xed, 0x85, 0xf3, 0x81, 0xdf, 0xfb, 0x57, 0x1c, 0xda, 0x67, 0xd9, 0xfa, 0xac, 0x6a, 0x11, + 0x43, 0x7b, 0x39, 0x01, 0xd9, 0x6a, 0xd3, 0xa1, 0x5c, 0x78, 0x5c, 0x1e, 0x71, 0xa6, 0xfd, 0xbf, + 0xcd, 0xfa, 0xe5, 0x41, 0x7a, 0x46, 0x2c, 0xaf, 0x7f, 0x86, 0xc3, 0x96, 0x35, 0xec, 0xb5, 0xe1, + 0x3f, 0xa8, 0xa8, 0xda, 0x36, 0x27, 0x7d, 0xfc, 0x0a, 0x22, 0xc7, 0x4f, 0x14, 0xaf, 0xf1, 0xd1, + 0xcb, 0xad, 0x2e, 0x3e, 0x49, 0x55, 0x82, 0x0d, 0x86, 0x8e, 0x18, 0x34, 0xb0, 0x6a, 0x68, 0x48, + 0xce, 0xc8, 0x72, 0x82, 0xb9, 0x92, 0xfe, 0x04, 0xc2, 0x74, 0x04, 0x38, 0xa7, 0xba, 0xd7, 0xf2, + 0x95, 0x5f, 0x38, 0x29, 0xb7, 0xd6, 0x76, 0xb2, 0x49, 0x77, 0xab, 0x96, 0xda, 0xd3, 0x26, 0x6c, + 0x3c, 0x0e, 0x96, 0xde, 0xe5, 0x53, 0xaa, 0x34, 0xcc, 0xa0, 0x31, 0xd4, 0xe0, 0x8c, 0x6f, 0xdf, + 0xa7, 0xe3, 0x09, 0xd4, 0xdd, 0x67, 0xcd, 0xbc, 0xfd, 0x6d, 0xe8, 0xc9, 0xd8, 0x55, 0x4a, 0x2f, + 0xfe, 0xf7, 0x18, 0xae, 0x12, 0x05, 0x34, 0xe9, 0x4b, 0xfd, 0x25, 0x15, 0xcc, 0xc5, 0x4f, 0xd1, + 0x04, 0xbe, 0xbe, 0x8b, 0xbd, 0xaa, 0xf6, 0xfb, 0x98, 0x1c, 0x9a, 0xf5, 0xad, 0x76, 0xd2, 0x31, + 0x5c, 0x61, 0xe3, 0x90, 0xc5, 0x3b, 0xe3, 0xf6, 0x28, 0x56, 0x45, 0x29, 0x23, 0xfe, 0x3f, 0xe7, + 0x57, 0x1c, 0x6a, 0x89, 0x28, 0x3a, 0x51, 0x1f, 0xba, 0x03, 0x6d, 0xd7, 0xe7, 0x95, 0x84, 0x85, + 0xa5, 0xf5, 0xeb, 0x59, 0xa8, 0xce, 0xd0, 0x9e, 0x71, 0xc5, 0x8a, 0x5a, 0x99, 0xd0, 0x3c, 0x9f, + 0xdf, 0x5d, 0x39, 0x51, 0x29, 0x7b, 0x3a, 0xdc, 0x61, 0x5f, 0x42, 0x87, 0xb0, 0xd0, 0x90, 0xce, + 0x5d, 0x32, 0x32, 0x3d, 0x03, 0x0f, 0xb8, 0x0d, 0x98, 0x16, 0xdb, 0x8e, 0xb7, 0x8c, 0xb4, 0x20, + 0x47, 0xf6, 0x5c, 0x31, 0x78, 0x29, 0x96, 0x9e, 0x42, 0x8a, 0x15, 0xe2, 0x41, 0x36, 0x4e, 0xa9, + 0x94, 0xf3, 0xe2, 0x2c, 0xc7, 0x2d, 0x21, 0x5a, 0xf0, 0x44, 0x65, 0x6d, 0x6a, 0x62, 0xae, 0x54, + 0x94, 0xe1, 0x43, 0x8e, 0x86, 0xbf, 0x89, 0x1d, 0x2c, 0x0e, 0xfe, 0x79, 0xf4, 0x3a, 0x7b, 0xdd, + 0xb3, 0x60, 0x23, 0x39, 0x84, 0x55, 0xf6, 0xed, 0x63, 0xb4, 0xb8, 0x76, 0x6b, 0x49, 0x89, 0x15, + 0x21, 0x9c, 0xf7, 0xe5, 0xb5, 0x12, 0x5a, 0x37, 0x7a, 0x91, 0xcb, 0xb2, 0xd5, 0x99, 0x44, 0xe9, + 0xc1, 0x87, 0xce, 0xf5, 0x01, 0x80, 0x51, 0xa1, 0xce, 0xbd, 0xb6, 0xd4, 0xa7, 0xcd, 0x53, 0x0a, + 0x8e, 0x19, 0x0d, 0x31, 0x07, 0xd7, 0xcd, 0xcc, 0x78, 0x4c, 0xa3, 0x50, 0x98, 0x37, 0x6f, 0x8d, + 0xbe, 0x1b, 0x81, 0x79, 0x5d, 0xeb, 0x48, 0x39, 0xac, 0x85, 0xfb, 0x79, 0x78, 0x29, 0xfb, 0x4b, + 0xe4, 0xc7, 0x3f, 0x3c, 0x08, 0xae, 0xb5, 0x33, 0xc1, 0x2c, 0x81, 0x32, 0x2f, 0x37, 0xa6, 0x40, + 0xd7, 0x78, 0xc2, 0xae, 0x01, 0xce, 0x9a, 0x86, 0x6b, 0xf6, 0x63, 0x27, 0x69, 0xd0, 0xa6, 0x20, + 0x9a, 0x8a, 0x57, 0x7a, 0x4f, 0x82, 0xc0, 0x5e, 0x4e, 0x2a, 0x6a, 0x86, 0xe7, 0xf4, 0xa2, 0xbb, + 0xbe, 0xa2, 0x18, 0x44, 0x5b, 0xa9, 0xe0, 0x8b, 0xbe, 0x51, 0xed, 0xfb, 0x89, 0x0f, 0xcb, 0xcc, + 0x4c, 0x54, 0x29, 0x14, 0x19, 0x88, 0x61, 0x3f, 0x57, 0x8f, 0xe9, 0xe1, 0xf6, 0x10, 0x34, 0x7e, + 0x8d, 0x26, 0xb0, 0x01, 0x90, 0x32, 0x56, 0x7e, 0xb6, 0x6d, 0x06, 0xa1, 0x94, 0x83, 0x16, 0xc5, + 0x52, 0xd3, 0x9a, 0x57, 0x4a, 0xb4, 0xb1, 0x5a, 0x31, 0x54, 0x53, 0xe0, 0xe0, 0x73, 0x1a, 0x77, + 0x26, 0x1d, 0x1a, 0x4a, 0x0a, 0x75, 0xd3, 0x0d, 0xef, 0xb2, 0x7e, 0x74, 0xe5, 0x9d, 0x70, 0x3e, + 0xda, 0xc1, 0x50, 0x33, 0xda, 0xe9, 0xcb, 0xbd, 0x80, 0x94, 0xd2, 0x09, 0x16, 0x6c, 0x0b, 0x71, + 0x38, 0x7d, 0x5d, 0xdf, 0xf7, 0x6c, 0x33, 0x27, 0xd4, 0x27, 0x89, 0xe3, 0x60, 0x15, 0x68, 0xaf, + 0xaa, 0xcb, 0x70, 0x19, 0xe7, 0x25, 0x9c, 0x49, 0x3a, 0x8c, 0x55, 0xcc, 0xe9, 0xd9, 0x62, 0x9f, + 0x27, 0x15, 0xa4, 0xff, 0x86, 0x2d, 0xb8, 0x3f, 0x26, 0xd0, 0x56, 0xb4, 0x92, 0x5e, 0xf8, 0xe4, + 0x94, 0xcf, 0x8d, 0x1a, 0x4e, 0x08, 0xc3, 0x4e, 0xce, 0x28, 0x44, 0x2d, 0xc0, 0xcb, 0x36, 0x78, + 0x14, 0xdd, 0x0f, 0x2a, 0x7f, 0x93, 0x6c, 0xaa, 0xa9, 0xab, 0x86, 0x70, 0x9d, 0xaa, 0xc2, 0xac, + 0xd2, 0xc9, 0x9e, 0x96, 0x0f, 0xc2, 0x65, 0x6a, 0xd0, 0xc4, 0x97, 0x66, 0xc2, 0x78, 0xef, 0xbd, + 0x84, 0xb1, 0x30, 0xae, 0x30, 0x45, 0x47, 0xa2, 0xdf, 0xd9, 0x4f, 0x36, 0xc9, 0xaf, 0x96, 0xcd, + 0xac, 0x59, 0x6f, 0x6a, 0xa0, 0x6a, 0xdd, 0x90, 0xa7, 0x3f, 0x79, 0xfe, 0xb6, 0x20, 0x4f, 0xf1, + 0xa7, 0x30, 0x6d, 0x27, 0x13, 0xfc, 0xf7, 0x73, 0xa1, 0xf9, 0xbc, 0xc4, 0x25, 0xad, 0x70, 0xd6, + 0x6c, 0xa4, 0x94, 0x26, 0x37, 0x21, 0x5d, 0x6f, 0x0a, 0x6f, 0xfd, 0x0c, 0xf3, 0xc0, 0xe2, 0xcf, + 0x20, 0x14, 0x7a, 0xb5, 0xd8, 0xd8, 0xf8, 0xdc, 0x60, 0x54, 0xef, 0xc7, 0x69, 0x4d, 0xd3, 0xae, + 0xb9, 0x18, 0xd3, 0xa0, 0x5c, 0xdb, 0xdb, 0x31, 0xe7, 0xd3, 0xe6, 0xac, 0xc2, 0xb4, 0xbb, 0xa4, + 0x1f, 0xe2, 0x80, 0xdc, 0xe6, 0x64, 0x7e, 0xea, 0xb2, 0xe5, 0x71, 0x54, 0xd1, 0x82, 0xc0, 0x0b, + 0x1f, 0x30, 0xc2, 0x91, 0x16, 0xf4, 0xeb, 0x06, 0x97, 0xa7, 0x73, 0xec, 0x18, 0xfe, 0xe8, 0xf6, + 0xbb, 0x5c, 0x7f, 0x4d, 0x3f, 0xda, 0x95, 0x2f, 0xad, 0xaa, 0xf3, 0xfa, 0x49, 0xf5, 0xe9, 0x83, + 0x66, 0xd3, 0x38, 0x8f, 0xd3, 0xb1, 0x79, 0x64, 0xb8, 0x9d, 0x35, 0x75, 0xce, 0x90, 0xee, 0x09, + 0x5d, 0x88, 0x79, 0x11, 0x24, 0x62, 0xa0, 0xfc, 0x88, 0xc7, 0x20, 0xe1, 0xf5, 0x88, 0x3d, 0xc0, + 0x8e, 0xa6, 0xe8, 0xc6, 0x94, 0x85, 0x34, 0xe8, 0x21, 0x4f, 0x67, 0x06, 0x3d, 0x79, 0x65, 0x71, + 0x40, 0x66, 0x1f, 0xc9, 0xef, 0x22, 0x0b, 0x21, 0x25, 0x3f, 0xb5, 0x0b, 0xc5, 0x69, 0xf6, 0xad, + 0x73, 0x99, 0x68, 0x66, 0x7f, 0x54, 0x39, 0xa7, 0xa5, 0xff, 0xc7, 0x43, 0x60, 0xc6, 0x58, 0xad, + 0x08, 0xdf, 0xfd, 0x99, 0xc6, 0xae, 0xc5, 0x99, 0x3b, 0x79, 0xc3, 0x6a, 0x61, 0x36, 0x7f, 0x17, + 0x05, 0x84, 0x3f, 0xe8, 0xf4, 0x27, 0xd2, 0x06, 0xaf, 0xf6, 0x22, 0xe2, 0x6b, 0x46, 0x00, 0x44, + 0x15, 0x29, 0x2a, 0x9e, 0x34, 0xde, 0x8f, 0x20, 0x52, 0x05, 0xb7, 0x3f, 0x80, 0xf7, 0x34, 0xfe, + 0x6e, 0xfe, 0xfc, 0xa6, 0xc9, 0x38, 0x9b, 0x0e, 0x1c, 0x87, 0x7d, 0xca, 0x6e, 0x88, 0xee, 0xfb, + 0xc7, 0x97, 0xfd, 0xc2, 0x92, 0xdd, 0xe4, 0x09, 0x83, 0xd8, 0xe1, 0xae, 0x48, 0xaa, 0xe9, 0xb2, + 0x39, 0x2e, 0xc2, 0x15, 0x7b, 0xd3, 0xcb, 0xbd, 0xd6, 0xa1, 0xd5, 0x66, 0x0d, 0x47, 0xbd, 0x7b, + 0x10, 0x71, 0x03, 0xfa, 0xc3, 0xc0, 0x50, 0x77, 0x67, 0x5e, 0x42, 0x8d, 0xc1, 0x36, 0xb5, 0x72, + 0x68, 0x64, 0xbe, 0x5d, 0xb5, 0x87, 0x9c, 0x4b, 0xce, 0x2c, 0xca, 0x07, 0xd9, 0x47, 0x20, 0xbf, + 0x94, 0x65, 0x8b, 0x11, 0xb2, 0x27, 0xc7, 0x69, 0x4a, 0xba, 0xdb, 0xff, 0xe4, 0x18, 0x7b, 0x43, + 0x17, 0xcc, 0x17, 0x98, 0xae, 0x4a, 0x93, 0xe6, 0x02, 0xca, 0xf7, 0x96, 0x05, 0x11, 0xa0, 0x10, + 0xad, 0x63, 0xc3, 0x63, 0xa0, 0x35, 0xb4, 0x25, 0xa8, 0x3b, 0xe5, 0x25, 0x7f, 0xaf, 0x4f, 0x8c, + 0x16, 0xbc, 0x69, 0x9d, 0xd7, 0xff, 0x70, 0xd8, 0x78, 0x1e, 0x8f, 0x3c, 0x9b, 0x1a, 0x8c, 0x10, + 0xaa, 0xd9, 0xc0, 0x9a, 0x10, 0x16, 0xc8, 0xd2, 0x0a, 0x87, 0x18, 0x5a, 0x45, 0x28, 0x6c, 0xf4, + 0x8a, 0xea, 0xdb, 0x7b, 0x18, 0x76, 0xe1, 0x1f, 0x49, 0x7a, 0x7a, 0xb1, 0xc3, 0x6b, 0xfe, 0x29, + 0x04, 0x33, 0xf9, 0x9e, 0x85, 0x68, 0x62, 0x1a, 0x22, 0x8c, 0x77, 0x50, 0x37, 0xe5, 0xc4, 0x50, + 0x1a, 0xfb, 0x11, 0xc9, 0x0f, 0x07, 0x5c, 0xf7, 0x16, 0x90, 0x36, 0xef, 0x42, 0xdf, 0xe1, 0x85, + 0xc3, 0xfc, 0xb9, 0x18, 0xfa, 0x51, 0x02, 0x0b, 0xe1, 0x8c, 0xaa, 0x62, 0xc0, 0xde, 0xe8, 0xec, + 0x33, 0xdb, 0xdc, 0x99, 0x50, 0xa9, 0xb1, 0x22, 0x51, 0xbf, 0x6a, 0x24, 0x32, 0xc5, 0x76, 0x9e, + 0xb2, 0xe3, 0x53, 0xc5, 0x52, 0x60, 0x9a, 0x77, 0x46, 0x84, 0x93, 0x9c, 0x34, 0x55, 0xbc, 0xe7, + 0xd9, 0xa7, 0xd4, 0x08, 0x97, 0xad, 0xf1, 0x58, 0x4f, 0x1f, 0x8a, 0x91, 0x7a, 0xa7, 0xc6, 0xab, + 0x98, 0xa2, 0xff, 0xd7, 0xfe, 0xff, 0xb6, 0x2d, 0xfa, 0x2f, 0xff, 0xe5, 0xbf, 0xfc, 0x97, 0xff, + 0xf2, 0xff, 0x0b, 0xff, 0x03, 0x6e, 0xd2, 0x16, 0x00, 0x00, 0x21, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU116_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 8448, // uncompressed data size (bytes) + 6797, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU116_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU116("header_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/unload/g_booteruc_unload_tu11x_prod.h +// FILE TYPE: TEXT +// VAR NAME: booter_ucode_header_tu11x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 36 +// COMPRESSED SIZE (bytes): 27 +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU116_header_prod_data[] = +{ + 0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x06, 0x62, 0x36, 0x08, 0x13, 0x4c, 0x48, 0x41, 0x69, + 0x20, 0x00, 0x00, 0x37, 0x0f, 0x4b, 0x90, 0x24, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU116_header_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 36, // uncompressed data size (bytes) + 27, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU116_header_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU116("sig_dbg") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/unload/g_booteruc_unload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_dbg_tu11x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU116_sig_dbg_data[] = +{ + 0x6a, 0x5e, 0x0c, 0x35, 0x0d, 0x7e, 0x26, 0x93, 0xd3, 0x96, 0x82, 0x5e, 0xcc, 0x37, 0xe3, 0xc6, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU116_sig_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU116_sig_dbg_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU116("sig_prod") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/unload/g_booteruc_unload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_prod_tu11x +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 16 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU116_sig_prod_data[] = +{ + 0x37, 0x5b, 0xb4, 0x8f, 0x0f, 0x86, 0x44, 0xb7, 0xba, 0xdf, 0xb7, 0xbb, 0x72, 0xe4, 0xec, 0x9a, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU116_sig_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 16, // uncompressed data size (bytes) + 16, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU116_sig_prod_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU116("patch_loc") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/unload/g_booteruc_unload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_tu11x_patch_location +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU116_patch_loc_data[] = +{ + 0x00, 0x1d, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU116_patch_loc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU116_patch_loc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU116("patch_sig") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/unload/g_booteruc_unload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_tu11x_patch_signature +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU116_patch_sig_data[] = +{ + 0x00, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU116_patch_sig_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU116_patch_sig_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU116("patch_meta") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/unload/g_booteruc_unload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: booter_unload_sig_tu11x_patch_meta_data +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU116_patch_meta_data[] = +{ + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU116_patch_meta_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12, // uncompressed data size (bytes) + 12, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU116_patch_meta_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveBooterUnloadUcode_TU116("num_sigs") +// FILE NAME: kernel/inc/gsprm/bin/booter/tu11x/unload/g_booteruc_unload_tu11x_tu116_aes_sig.h +// FILE TYPE: TEXT +// VAR NAME: num_sigs_per_ucode +// COMPRESSION: NO +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4 +// COMPRESSED SIZE (bytes): N/A +// +static BINDATA_CONST NvU8 kgspBinArchiveBooterUnloadUcode_TU116_num_sigs_data[] = +{ + 0x01, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveBooterUnloadUcode_TU116_num_sigs_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4, // uncompressed data size (bytes) + 4, // compressed data size (bytes) + kgspBinArchiveBooterUnloadUcode_TU116_num_sigs_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveBooterUnloadUcode_TU116 = +{ + 10, // entryNum + { + // entries[] : { "name", pBinStorage } + { "image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU116_image_dbg_storage_pvt }, + { "header_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU116_header_dbg_storage_pvt }, + { "image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU116_image_prod_storage_pvt }, + { "header_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU116_header_prod_storage_pvt }, + { "sig_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU116_sig_dbg_storage_pvt }, + { "sig_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU116_sig_prod_storage_pvt }, + { "patch_loc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU116_patch_loc_storage_pvt }, + { "patch_sig" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU116_patch_sig_storage_pvt }, + { "patch_meta" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU116_patch_meta_storage_pvt }, + { "num_sigs" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveBooterUnloadUcode_TU116_num_sigs_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_TU116(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveBooterUnloadUcode_TU116; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveGspRmBoot_GA100.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveGspRmBoot_GA100.c new file mode 100644 index 000000000..dff2bd6aa --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveGspRmBoot_GA100.c @@ -0,0 +1,174 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveGspRmBoot_GA100("ucode_image") +// FILE NAME: kernel/inc/gsprm/bin/g_gsprm_ga100_riscv_image.bin +// FILE TYPE: BINARY +// VAR NAME: N/A +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4096 +// COMPRESSED SIZE (bytes): 811 +// +static BINDATA_CONST NvU8 kgspBinArchiveGspRmBoot_GA100_ucode_image_data[] = +{ + 0xed, 0x53, 0x31, 0x4c, 0x14, 0x41, 0x14, 0xfd, 0x33, 0x73, 0xbb, 0xb3, 0x84, 0x0b, 0xde, 0x65, + 0x21, 0x8b, 0x60, 0x82, 0x66, 0x4d, 0xb0, 0x24, 0xb9, 0xe3, 0x16, 0x09, 0xc5, 0xc5, 0x42, 0x2c, + 0xd5, 0xc4, 0x60, 0xa7, 0x71, 0x15, 0x8d, 0x0d, 0x8d, 0xe6, 0xda, 0xc3, 0x15, 0xd6, 0x10, 0x1b, + 0xdd, 0xe4, 0xce, 0x04, 0x13, 0xa9, 0xc0, 0xca, 0x98, 0x2c, 0xb2, 0x24, 0x34, 0x16, 0xca, 0x15, + 0x36, 0x26, 0x46, 0xcf, 0xc2, 0x42, 0x5d, 0xe5, 0x28, 0x2c, 0x40, 0xe5, 0xb8, 0x20, 0x72, 0xfe, + 0xdd, 0x9d, 0xc3, 0xc2, 0xda, 0x6e, 0x5f, 0xb2, 0x79, 0x3b, 0xff, 0xbf, 0xf9, 0x7f, 0xfe, 0x9f, + 0xf9, 0xbd, 0x24, 0x05, 0x6a, 0x9a, 0x50, 0x95, 0x90, 0xfc, 0x04, 0x14, 0x61, 0x02, 0x00, 0xca, + 0x1c, 0xc0, 0xb1, 0x2b, 0x23, 0x96, 0xc7, 0x01, 0xed, 0x3b, 0xfa, 0x48, 0x3a, 0x61, 0x2d, 0x70, + 0xd0, 0x87, 0x26, 0x13, 0x7a, 0xf6, 0x5e, 0x42, 0x2d, 0x7b, 0xd4, 0x3c, 0xc1, 0xa9, 0xd0, 0x0d, + 0x07, 0xba, 0xc0, 0xef, 0x6c, 0x6b, 0x60, 0x96, 0x38, 0x94, 0x31, 0x46, 0xad, 0xf8, 0xfc, 0x57, + 0xe4, 0x1f, 0x3d, 0xde, 0xf2, 0xab, 0xe5, 0x02, 0xf5, 0xcf, 0xf2, 0x3d, 0xe1, 0xef, 0x13, 0xfb, + 0x8d, 0x30, 0x8f, 0x34, 0x90, 0x54, 0xbb, 0x6e, 0x32, 0x6b, 0x9e, 0xb7, 0xf2, 0xe7, 0x02, 0xfb, + 0xec, 0xbb, 0x93, 0xe0, 0x94, 0x56, 0x81, 0xe1, 0x7e, 0x4d, 0x06, 0x50, 0xe5, 0xd5, 0x41, 0xd7, + 0x9e, 0x07, 0x96, 0x93, 0xc1, 0x7a, 0x66, 0x1f, 0xe8, 0xe5, 0x1c, 0xfc, 0x07, 0x2f, 0x1b, 0xf8, + 0x8f, 0xbe, 0x14, 0xf8, 0xf7, 0x5f, 0x36, 0x84, 0x2e, 0x13, 0x69, 0x42, 0x7f, 0x9d, 0x2d, 0xf3, + 0x36, 0xff, 0xf2, 0xfb, 0x3a, 0xea, 0x68, 0xef, 0xd8, 0x28, 0xa8, 0x5a, 0x05, 0x3c, 0x99, 0x42, + 0x86, 0x57, 0xc1, 0x95, 0x96, 0xc1, 0x1f, 0xf7, 0x7e, 0x5a, 0x4b, 0xb6, 0xc2, 0x96, 0x38, 0xf7, + 0x3f, 0xae, 0xfe, 0xb4, 0x16, 0x6d, 0x92, 0x99, 0x2e, 0x81, 0x7f, 0xbd, 0xf2, 0x5d, 0xe5, 0xaf, + 0x7f, 0xa8, 0x5a, 0x81, 0x05, 0x71, 0x59, 0xee, 0x64, 0x56, 0xe5, 0xdb, 0x4d, 0x4f, 0xd9, 0x6d, + 0x66, 0xf8, 0x1a, 0x38, 0xef, 0xbb, 0x89, 0xb9, 0xbe, 0x8e, 0x71, 0xe6, 0xf3, 0xfe, 0x65, 0xbe, + 0xe9, 0x7f, 0xfe, 0xba, 0xc1, 0x3c, 0x3b, 0xa9, 0x26, 0xb0, 0x9e, 0xce, 0x1b, 0x4c, 0xe5, 0x1a, + 0x64, 0x6e, 0x4f, 0x83, 0xbe, 0x58, 0x4b, 0x3a, 0xd2, 0x24, 0xd6, 0x99, 0x10, 0xfd, 0xc9, 0xb7, + 0xf9, 0x49, 0xe9, 0x9b, 0x95, 0x9b, 0x04, 0xbf, 0x51, 0xfa, 0xe6, 0xf0, 0x14, 0x18, 0x9c, 0x80, + 0x53, 0xe6, 0x54, 0x1f, 0x22, 0xa0, 0x8f, 0x7c, 0x02, 0x7d, 0x80, 0x50, 0x3d, 0xbb, 0x89, 0xeb, + 0x4f, 0x54, 0x1f, 0x21, 0xd4, 0x9d, 0x9e, 0xc6, 0xfd, 0x84, 0xd4, 0x8a, 0x32, 0x30, 0x43, 0xa2, + 0x8e, 0xbc, 0x81, 0x79, 0xbf, 0x82, 0x65, 0x70, 0x6a, 0xee, 0xd6, 0x29, 0x33, 0x6c, 0x70, 0xed, + 0x1a, 0xf6, 0x8e, 0x37, 0x5d, 0xbe, 0x05, 0xd6, 0x12, 0x07, 0xc3, 0x7e, 0x75, 0x46, 0xe5, 0xd5, + 0x4d, 0x55, 0x02, 0x30, 0x1f, 0xae, 0x81, 0xe5, 0x61, 0x9f, 0xa4, 0x14, 0xe8, 0x4f, 0xb7, 0xe0, + 0x6a, 0x11, 0xfb, 0x84, 0xf6, 0x80, 0xad, 0xdc, 0x14, 0xb1, 0xce, 0x4f, 0xe1, 0x79, 0x51, 0x37, + 0xae, 0x91, 0x7d, 0xfb, 0xd8, 0x15, 0xc6, 0x72, 0x12, 0x5d, 0xb1, 0x6b, 0x34, 0x38, 0x9b, 0x53, + 0xe5, 0xd4, 0xb5, 0x2b, 0x10, 0xc4, 0xc7, 0x5e, 0x11, 0xf3, 0xcb, 0x2a, 0x65, 0xcb, 0x36, 0xcd, + 0x28, 0x32, 0x31, 0xf7, 0x64, 0xd2, 0x8a, 0x8f, 0x9a, 0xbc, 0x6b, 0x57, 0xf7, 0xf3, 0xf4, 0x62, + 0x1f, 0x26, 0x36, 0x1e, 0xed, 0x44, 0x77, 0x6b, 0xf7, 0x84, 0x77, 0xce, 0xa1, 0x4d, 0x9f, 0xdb, + 0xbf, 0xef, 0x83, 0x81, 0x4d, 0x9f, 0xab, 0x89, 0xf5, 0xe8, 0xc1, 0x48, 0x93, 0x4f, 0xff, 0xb5, + 0xd9, 0xdd, 0x91, 0xed, 0x74, 0xa8, 0x0b, 0xe2, 0x3a, 0x9a, 0x1c, 0x9d, 0xa9, 0xb6, 0x00, 0x6e, + 0x6d, 0x0b, 0xdf, 0xe0, 0xf6, 0x01, 0x73, 0x26, 0x7c, 0xb7, 0x4d, 0x3d, 0x9b, 0x16, 0xbd, 0x86, + 0x37, 0x9a, 0x82, 0xb5, 0x2a, 0x4a, 0x17, 0x1b, 0x52, 0xc0, 0x19, 0x9b, 0x82, 0x59, 0x1b, 0x73, + 0x1d, 0x6e, 0x88, 0xb8, 0xbc, 0x33, 0x8c, 0x7b, 0x6e, 0x8a, 0xce, 0xe2, 0x5b, 0xd3, 0xe7, 0x16, + 0x40, 0x2d, 0x68, 0xe0, 0x15, 0x00, 0xdc, 0x7a, 0x1d, 0x3c, 0xbc, 0x17, 0x13, 0xb8, 0x8c, 0xba, + 0x4b, 0xee, 0x5a, 0x1d, 0xa2, 0x58, 0xa7, 0xd2, 0x0e, 0xee, 0xd5, 0x18, 0xfe, 0x33, 0x96, 0xd6, + 0x3a, 0x90, 0x3b, 0x3a, 0xd2, 0x38, 0x43, 0x94, 0xf5, 0x4b, 0xe1, 0x7e, 0xff, 0x10, 0xdf, 0x63, + 0x06, 0xc3, 0x3c, 0x1a, 0x61, 0x46, 0x07, 0xf2, 0x36, 0x7e, 0x6f, 0x83, 0xdc, 0xa9, 0x95, 0x3b, + 0x1e, 0x98, 0x33, 0x15, 0x60, 0x83, 0x0a, 0xf6, 0xa5, 0xef, 0x37, 0xc6, 0xbe, 0x38, 0xb1, 0x71, + 0x61, 0x27, 0xa8, 0x49, 0xd4, 0xdf, 0xce, 0x82, 0xb9, 0xe9, 0x0f, 0xe6, 0xaa, 0x1f, 0xfc, 0xbb, + 0x38, 0x37, 0xc2, 0xde, 0x9a, 0xa7, 0xd9, 0x6a, 0x7f, 0x34, 0x73, 0xff, 0xd6, 0xfb, 0x22, 0x88, + 0x13, 0x40, 0x49, 0xa4, 0x03, 0x3a, 0x9c, 0x4d, 0xdc, 0x0a, 0xb9, 0x51, 0x8c, 0xd6, 0x47, 0x1f, + 0xf7, 0x85, 0x7c, 0x4c, 0xf0, 0x9c, 0x80, 0x7b, 0xb1, 0xc7, 0x38, 0xf2, 0x64, 0xf8, 0x43, 0x92, + 0x08, 0x9d, 0x1c, 0x71, 0x42, 0xb0, 0x22, 0x38, 0x25, 0xfc, 0xc7, 0x04, 0x77, 0x0b, 0xee, 0x14, + 0x7c, 0x48, 0xf0, 0x47, 0x1a, 0x31, 0x08, 0x74, 0x0a, 0x6e, 0x0a, 0xb4, 0xec, 0x90, 0x12, 0x1f, + 0xe2, 0x5a, 0x3b, 0xc4, 0x88, 0x11, 0x23, 0x46, 0x8c, 0x18, 0x31, 0x62, 0xc4, 0x88, 0x11, 0xe3, + 0x3f, 0xe0, 0x0f, 0xe3, 0x2e, 0x5e, 0xd3, 0x00, 0x10, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveGspRmBoot_GA100_ucode_image_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4096, // uncompressed data size (bytes) + 811, // compressed data size (bytes) + kgspBinArchiveGspRmBoot_GA100_ucode_image_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveGspRmBoot_GA100("ucode_desc") +// FILE NAME: kernel/inc/gsprm/bin/g_gsprm_ga100_riscv_desc.bin +// FILE TYPE: BINARY +// VAR NAME: N/A +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 76 +// COMPRESSED SIZE (bytes): 21 +// +static BINDATA_CONST NvU8 kgspBinArchiveGspRmBoot_GA100_ucode_desc_data[] = +{ + 0x63, 0x61, 0x80, 0x80, 0x0e, 0x16, 0x08, 0x16, 0x60, 0x20, 0x1f, 0x00, 0x00, 0xf5, 0x7a, 0xd2, + 0x3d, 0x4c, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveGspRmBoot_GA100_ucode_desc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 76, // uncompressed data size (bytes) + 21, // compressed data size (bytes) + kgspBinArchiveGspRmBoot_GA100_ucode_desc_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveGspRmBoot_GA100 = +{ + 2, // entryNum + { + // entries[] : { "name", pBinStorage } + { "ucode_image" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveGspRmBoot_GA100_ucode_image_storage_pvt }, + { "ucode_desc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveGspRmBoot_GA100_ucode_desc_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveGspRmBoot_GA100(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveGspRmBoot_GA100; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveGspRmBoot_GA102.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveGspRmBoot_GA102.c new file mode 100644 index 000000000..6e38a9929 --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveGspRmBoot_GA102.c @@ -0,0 +1,1112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveGspRmBoot_GA102("ucode_image_dbg") +// FILE NAME: kernel/inc/gsprm/bin/g_gsprm_skbl_dbg_ga102_riscv_image.bin +// FILE TYPE: BINARY +// VAR NAME: N/A +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12288 +// COMPRESSED SIZE (bytes): 7345 +// +static BINDATA_CONST NvU8 kgspBinArchiveGspRmBoot_GA102_ucode_image_dbg_data[] = +{ + 0xed, 0x97, 0x65, 0x54, 0x94, 0x5d, 0xd7, 0xc7, 0x27, 0x80, 0x19, 0x42, 0xba, 0x04, 0x14, 0x18, + 0x3a, 0x14, 0x44, 0xba, 0x43, 0x4a, 0xa4, 0x04, 0x01, 0xa5, 0x19, 0x4a, 0x40, 0xba, 0x41, 0x7a, + 0x18, 0xba, 0x06, 0x18, 0x90, 0x92, 0x14, 0x50, 0xba, 0xbb, 0x43, 0xa4, 0x14, 0x90, 0x90, 0x6e, + 0x04, 0xa4, 0x1b, 0xf4, 0xd5, 0x1b, 0xee, 0xe7, 0x7d, 0x3e, 0xbe, 0xdf, 0x5f, 0xff, 0x6b, 0xcd, + 0xfa, 0xcd, 0xb5, 0xf7, 0xbe, 0xce, 0x3e, 0x7b, 0xaf, 0x7d, 0xce, 0x5a, 0xd7, 0x44, 0x8f, 0xf4, + 0xfb, 0x72, 0x86, 0xdb, 0x91, 0x6c, 0x82, 0x0e, 0xf4, 0x52, 0x5f, 0xd5, 0x7e, 0x95, 0xb9, 0x79, + 0x7f, 0x9e, 0x64, 0x13, 0xed, 0x90, 0x6a, 0x4d, 0xc9, 0xfe, 0xea, 0x7b, 0x9a, 0x25, 0xb1, 0x12, + 0xa6, 0x89, 0x75, 0x72, 0x26, 0xcf, 0xda, 0xc4, 0xa3, 0x52, 0x98, 0xbe, 0x4a, 0x96, 0x61, 0x55, + 0x98, 0xcf, 0xca, 0x38, 0x43, 0xcb, 0xfd, 0x24, 0x41, 0xc4, 0x1e, 0x3d, 0x8f, 0x68, 0xed, 0x64, + 0xfb, 0x25, 0x99, 0x43, 0xae, 0xe4, 0xa6, 0x3b, 0x7e, 0xdf, 0xf4, 0xf2, 0x6b, 0x81, 0x62, 0xa3, + 0x4e, 0x0a, 0x05, 0x71, 0x85, 0x80, 0x91, 0x40, 0x38, 0x7d, 0x10, 0x30, 0x5b, 0x24, 0xb7, 0x2b, + 0x82, 0x9c, 0x29, 0x70, 0xd7, 0x42, 0x57, 0xf9, 0x47, 0xa8, 0x92, 0x5a, 0xfc, 0xec, 0xda, 0xaa, + 0x0a, 0x54, 0x6c, 0xb9, 0xa9, 0x27, 0x81, 0x50, 0xca, 0xf4, 0xdd, 0x9b, 0x23, 0xd2, 0x82, 0xe9, + 0xa7, 0xee, 0xf6, 0x30, 0xf1, 0xaa, 0xfa, 0x48, 0x2f, 0xb3, 0x15, 0x07, 0xe2, 0xb1, 0xa9, 0xe8, + 0xbe, 0x20, 0x94, 0x16, 0x65, 0xfe, 0x9c, 0x1d, 0xe9, 0x89, 0x03, 0x4c, 0x8c, 0x67, 0x13, 0xa1, + 0xba, 0xba, 0x7e, 0xac, 0xa7, 0xcd, 0x5d, 0x72, 0xa6, 0xf9, 0x3e, 0xeb, 0xd6, 0x34, 0xa5, 0x9b, + 0x05, 0x0f, 0xd4, 0x8e, 0x25, 0x16, 0x39, 0xfc, 0x62, 0xb0, 0x89, 0x03, 0x55, 0x2f, 0xf2, 0x6a, + 0x2a, 0x5f, 0xda, 0xb0, 0x47, 0xb6, 0xe5, 0x1e, 0xf5, 0x8f, 0xe8, 0xf2, 0x85, 0x00, 0x64, 0x7e, + 0x5f, 0x81, 0x1b, 0xf3, 0x78, 0x4a, 0x3c, 0x04, 0xac, 0x82, 0x8f, 0x2d, 0x50, 0xe3, 0xcb, 0x6e, + 0x66, 0x39, 0x44, 0xca, 0xbb, 0x19, 0x24, 0xe3, 0xba, 0xfd, 0x36, 0x5f, 0x0b, 0x0d, 0x02, 0x82, + 0x8c, 0x46, 0xc5, 0x07, 0x79, 0xe8, 0x32, 0x68, 0x65, 0xcb, 0x47, 0x45, 0x66, 0xbb, 0x86, 0x7a, + 0x95, 0xcf, 0xe4, 0x33, 0xcb, 0x1b, 0xf0, 0x23, 0x67, 0xc3, 0x8b, 0x4d, 0xfb, 0x2a, 0x9e, 0x6c, + 0x84, 0x34, 0x31, 0xea, 0x3b, 0x50, 0xdf, 0x9b, 0x0f, 0x0d, 0xb8, 0x1c, 0x8c, 0x3d, 0x94, 0x9d, + 0xac, 0x22, 0x36, 0x6f, 0x75, 0x0c, 0xaf, 0x65, 0x2f, 0xca, 0x0b, 0x76, 0x50, 0x16, 0x93, 0x6d, + 0x35, 0x7e, 0x59, 0x55, 0x92, 0xa3, 0x59, 0x79, 0xb5, 0x55, 0x2c, 0x50, 0xf6, 0x5c, 0xa0, 0x6c, + 0x8a, 0x36, 0x66, 0x7b, 0x0f, 0x33, 0x76, 0xfd, 0xf9, 0x07, 0x37, 0xc1, 0x3a, 0xdd, 0x07, 0x95, + 0x49, 0x0b, 0x1f, 0x1a, 0x30, 0xbf, 0x13, 0x06, 0x7b, 0x64, 0x64, 0x24, 0x87, 0xcc, 0x3c, 0x1b, + 0xc2, 0x6f, 0xce, 0x88, 0xde, 0x20, 0x82, 0x46, 0xeb, 0x18, 0x30, 0x57, 0x7d, 0x12, 0x09, 0x1e, + 0x7e, 0x47, 0xee, 0xc2, 0xf9, 0xc3, 0x1c, 0x99, 0xd8, 0x5a, 0x43, 0x42, 0x77, 0xa9, 0x79, 0x99, + 0xf6, 0x78, 0x45, 0xbe, 0x8f, 0x8a, 0xc9, 0xfd, 0xc8, 0xc8, 0x66, 0x90, 0x90, 0x6c, 0x87, 0x56, + 0x69, 0x31, 0x6d, 0x3f, 0x73, 0x7e, 0x7c, 0x29, 0xf7, 0x49, 0x34, 0xf6, 0x52, 0xf2, 0xea, 0x1c, + 0x65, 0x9e, 0xaa, 0xfd, 0xda, 0xf0, 0x52, 0xb6, 0x2c, 0xb0, 0xf0, 0x45, 0x65, 0x76, 0x16, 0xc2, + 0xa1, 0x35, 0xaa, 0xfc, 0x6e, 0x6b, 0x22, 0x72, 0x98, 0x8a, 0xa7, 0xec, 0x3b, 0x9a, 0x69, 0x29, + 0x25, 0x9a, 0x74, 0xb5, 0xdd, 0xd4, 0x8e, 0x71, 0x46, 0xf0, 0xdd, 0x53, 0x95, 0x06, 0xca, 0xb7, + 0x13, 0xbf, 0xb6, 0xb9, 0xf9, 0x69, 0xb0, 0xd5, 0x06, 0x34, 0x29, 0xe3, 0xea, 0x78, 0x83, 0x31, + 0xf7, 0xcd, 0x5c, 0x52, 0x9c, 0x06, 0xb4, 0xbf, 0x15, 0xb0, 0x98, 0x11, 0x08, 0x3a, 0x57, 0xe0, + 0x84, 0xdd, 0x99, 0x2d, 0xe2, 0x30, 0x4d, 0x90, 0x62, 0x36, 0x5e, 0xcb, 0x92, 0xf5, 0xc3, 0x89, + 0x9d, 0x0d, 0x4d, 0x28, 0x79, 0x33, 0x9e, 0x3d, 0x02, 0xf0, 0x0c, 0x61, 0x9f, 0xb3, 0x4f, 0x5b, + 0x7a, 0x56, 0x9c, 0xa2, 0xd8, 0x1c, 0xf0, 0x3a, 0xb8, 0xe3, 0x2e, 0x15, 0x94, 0xc2, 0x1d, 0x5b, + 0x7b, 0x97, 0x9f, 0xd8, 0x6a, 0xe1, 0xe7, 0x5e, 0x22, 0x7b, 0xb8, 0x78, 0xe7, 0xc1, 0xa5, 0x6d, + 0x54, 0x40, 0xde, 0x6b, 0xe9, 0xe7, 0x89, 0x82, 0xd9, 0xe2, 0xe4, 0x19, 0xf1, 0x0c, 0x9a, 0x15, + 0x0d, 0x46, 0x15, 0x3c, 0xce, 0x6a, 0x98, 0x92, 0x87, 0x28, 0x3e, 0x7b, 0xb4, 0x6f, 0x1a, 0xdc, + 0x54, 0x2d, 0xf9, 0xad, 0x55, 0xb6, 0x22, 0xc6, 0x30, 0x94, 0xb2, 0xc6, 0x16, 0x5b, 0xed, 0xc7, + 0xcf, 0x3a, 0x8e, 0x28, 0x7a, 0xe0, 0x0a, 0x48, 0x71, 0xdb, 0x69, 0x94, 0xb9, 0xa8, 0xce, 0x92, + 0xd3, 0xdc, 0xb7, 0xfb, 0xfe, 0xf9, 0x8c, 0x58, 0xf2, 0xa2, 0xb1, 0x67, 0x5e, 0xc7, 0x0b, 0x16, + 0x86, 0x8f, 0xea, 0x73, 0x1f, 0x92, 0x85, 0x3a, 0xe5, 0x67, 0x52, 0xe7, 0x79, 0xcf, 0x32, 0x51, + 0x7a, 0x06, 0x22, 0x63, 0x0b, 0x5b, 0xfe, 0x8e, 0x0f, 0x7b, 0x87, 0xf6, 0x6f, 0xb1, 0x93, 0xba, + 0xe9, 0x5b, 0x2b, 0xb5, 0x53, 0x7b, 0x37, 0x60, 0xaa, 0xc1, 0x98, 0x88, 0x90, 0x53, 0x1b, 0x38, + 0x53, 0x27, 0x15, 0xd2, 0x57, 0xc3, 0xac, 0x5a, 0x53, 0xa4, 0xc2, 0xd8, 0x15, 0x47, 0xcb, 0x57, + 0x6d, 0xa6, 0xa4, 0x69, 0x12, 0xb8, 0x76, 0x6f, 0xf1, 0xb0, 0xa7, 0xd4, 0x9e, 0x07, 0x3c, 0x31, + 0xf8, 0x05, 0x8a, 0x18, 0x66, 0xf4, 0xaa, 0x5e, 0x3b, 0xdf, 0x0c, 0xd6, 0xeb, 0x64, 0xa9, 0x18, + 0x5e, 0xf3, 0xac, 0x29, 0x2a, 0xa7, 0xd6, 0x8b, 0xbd, 0xd3, 0xf1, 0x64, 0xa3, 0x1a, 0xa3, 0x28, + 0x4d, 0x4a, 0x1c, 0xd5, 0xc9, 0xb1, 0xc2, 0xea, 0xb9, 0x23, 0xa0, 0xe0, 0x50, 0x73, 0x5a, 0x7a, + 0xa9, 0xc3, 0xd8, 0x24, 0xdf, 0x3f, 0x18, 0x07, 0x0c, 0x14, 0x50, 0x61, 0x32, 0x2c, 0xb2, 0x0c, + 0x8e, 0x9c, 0x7e, 0xac, 0x0a, 0x0a, 0xee, 0x76, 0xd3, 0x20, 0x0d, 0x4d, 0x99, 0x7c, 0xdb, 0xef, + 0x85, 0x0c, 0x4e, 0x74, 0x6e, 0xa8, 0xc7, 0x48, 0x9c, 0xae, 0x5e, 0x7a, 0x24, 0x29, 0x73, 0xdb, + 0x73, 0x6b, 0x31, 0x9a, 0x4c, 0x54, 0xfc, 0xce, 0xa4, 0x36, 0xe7, 0x1c, 0xff, 0x8b, 0xb8, 0xf3, + 0x23, 0x9d, 0xbb, 0x3f, 0x11, 0xb9, 0x6f, 0xcf, 0x13, 0xf1, 0x23, 0xe7, 0xc5, 0x71, 0xae, 0x6a, + 0x35, 0x84, 0xc7, 0xbf, 0x54, 0x79, 0xa4, 0x60, 0x00, 0xe2, 0xf9, 0xe8, 0x81, 0xd3, 0x9d, 0xad, + 0x52, 0xb9, 0x12, 0x55, 0x08, 0x62, 0x43, 0x1d, 0xf5, 0xb8, 0x63, 0xec, 0xd5, 0xd5, 0xfa, 0x5b, + 0xd2, 0xb1, 0x13, 0x3e, 0xc9, 0x1b, 0x09, 0xd4, 0x5e, 0xa1, 0x5e, 0x59, 0xd2, 0x0f, 0xba, 0x97, + 0x64, 0xe7, 0x64, 0xba, 0xef, 0xda, 0x3d, 0x59, 0x4d, 0x0b, 0xaa, 0x8a, 0x12, 0x24, 0xf2, 0xd8, + 0xb6, 0x5a, 0xae, 0xb8, 0xcd, 0xc6, 0xfd, 0x9e, 0x51, 0x3f, 0xd2, 0x9a, 0x5d, 0x17, 0x28, 0xa2, + 0xb6, 0xba, 0x0a, 0xc2, 0x7c, 0xa8, 0x7c, 0xea, 0xc3, 0xdf, 0x0a, 0x82, 0x0f, 0xbb, 0xc7, 0x2d, + 0x26, 0x4e, 0xf3, 0x66, 0x7c, 0x64, 0xad, 0x3f, 0xc1, 0x0b, 0xd8, 0xd5, 0xde, 0xaf, 0x3a, 0xaa, + 0x91, 0xa7, 0x8e, 0x42, 0x1b, 0xe2, 0xa6, 0x10, 0xc5, 0x1e, 0xcd, 0xb4, 0x8c, 0x7b, 0x4c, 0x51, + 0x62, 0x94, 0xa5, 0x85, 0x4b, 0x64, 0xbe, 0xed, 0xb0, 0xa3, 0x09, 0x6d, 0x8c, 0x98, 0x52, 0xfa, + 0xa9, 0xf6, 0xd5, 0xae, 0x4f, 0xbb, 0x8d, 0xfc, 0x14, 0xb2, 0x7d, 0xde, 0xc6, 0x09, 0x99, 0x53, + 0x77, 0x4f, 0x26, 0xdd, 0x11, 0xc5, 0xf5, 0x2b, 0x09, 0xc0, 0x4b, 0x95, 0xe8, 0xec, 0x48, 0x96, + 0xf3, 0x2f, 0x0e, 0x82, 0xda, 0x38, 0x62, 0xa9, 0x60, 0xda, 0x6f, 0x4e, 0xf8, 0x7a, 0xa7, 0xe7, + 0x71, 0x58, 0xcc, 0xa0, 0xc8, 0x1b, 0xa7, 0x2c, 0x76, 0x8e, 0x1a, 0x06, 0x2a, 0xd2, 0x9f, 0xe6, + 0x5f, 0x9a, 0xa4, 0x3e, 0x6c, 0x29, 0x43, 0x53, 0x8c, 0x2e, 0xd7, 0x6f, 0xf1, 0x59, 0x14, 0x1b, + 0x13, 0x4c, 0xd5, 0xe7, 0xac, 0xda, 0xcc, 0x41, 0xcc, 0x87, 0x4a, 0xa0, 0x38, 0x99, 0x3e, 0xaf, + 0x3c, 0x64, 0xa7, 0xc6, 0xf6, 0x44, 0xe2, 0xe2, 0x1f, 0xd2, 0x79, 0xf8, 0xbf, 0x4c, 0x5b, 0x68, + 0x1e, 0x1b, 0x21, 0x39, 0xf7, 0x1b, 0x10, 0xc2, 0x0f, 0xb2, 0x1d, 0xdb, 0xc8, 0x56, 0x94, 0xe6, + 0x93, 0xc0, 0x71, 0x84, 0x8e, 0x9e, 0x62, 0xf3, 0x44, 0x4b, 0xd2, 0x01, 0x0e, 0x5d, 0x4f, 0xdf, + 0xf4, 0xb5, 0xaa, 0x2f, 0x62, 0xab, 0xa5, 0x48, 0x7d, 0xd0, 0x89, 0xd5, 0xfc, 0xf1, 0xcb, 0x48, + 0xdd, 0xc7, 0x5e, 0x7a, 0x67, 0xfb, 0xd6, 0xa4, 0xcb, 0x64, 0x59, 0x4b, 0x64, 0xf4, 0x63, 0x3f, + 0x30, 0xad, 0xee, 0xe9, 0x9c, 0xab, 0x3e, 0x5b, 0xe8, 0x19, 0x72, 0x8f, 0x4f, 0x22, 0x44, 0xab, + 0x65, 0x06, 0x1c, 0xfa, 0x91, 0x76, 0xbe, 0xbb, 0x1f, 0x08, 0x0b, 0xc6, 0xeb, 0xd7, 0x9c, 0xb9, + 0xbd, 0x31, 0x79, 0x45, 0x62, 0xc1, 0xe5, 0x71, 0xbf, 0xb0, 0xb0, 0x16, 0x9a, 0xd4, 0x75, 0xaf, + 0x90, 0x00, 0x71, 0xae, 0x91, 0x73, 0x6e, 0x75, 0xce, 0x43, 0xda, 0xee, 0x85, 0x73, 0xa6, 0xee, + 0x49, 0xf4, 0xde, 0xe0, 0x83, 0x55, 0x80, 0xfe, 0x95, 0x2f, 0x5b, 0x68, 0x7b, 0xa2, 0xd3, 0x74, + 0x32, 0x21, 0xa7, 0xc2, 0x60, 0xf1, 0xb7, 0xfa, 0xfd, 0xf5, 0xd7, 0xba, 0x12, 0x5e, 0xab, 0x06, + 0xde, 0x22, 0xd2, 0x9f, 0x2f, 0x0e, 0xbb, 0xb7, 0xc2, 0xcf, 0x75, 0x5a, 0xef, 0xf5, 0xcd, 0x3c, + 0x81, 0x98, 0x88, 0xf0, 0xe5, 0x71, 0xb8, 0x8d, 0x2b, 0x60, 0xa7, 0x3b, 0x61, 0x04, 0xfa, 0xd2, + 0xc5, 0xa2, 0xf6, 0x89, 0xa9, 0xa1, 0x03, 0x0b, 0xac, 0x1d, 0x6e, 0x0e, 0x9a, 0x17, 0x9d, 0x33, + 0x4b, 0x61, 0xb9, 0x41, 0xae, 0x44, 0x12, 0x5a, 0xac, 0xdf, 0x38, 0x8f, 0xf2, 0xaa, 0xb4, 0x18, + 0x2e, 0x08, 0x33, 0xe5, 0x4c, 0x93, 0x03, 0x1d, 0x64, 0x3b, 0x5c, 0x6d, 0x6a, 0xd5, 0x81, 0xa7, + 0x6b, 0x0f, 0xeb, 0x49, 0x29, 0x8b, 0xfb, 0x7e, 0xc8, 0xb2, 0xc0, 0x88, 0x5e, 0xb7, 0x6c, 0xf6, + 0xd1, 0xcb, 0x18, 0xe7, 0x9b, 0xd9, 0x6a, 0xbf, 0x38, 0xa0, 0xab, 0x18, 0xbb, 0x8a, 0xdd, 0x6c, + 0xfb, 0xba, 0x62, 0xde, 0xc6, 0x23, 0x24, 0xf3, 0xc1, 0xac, 0x20, 0x40, 0xda, 0x8c, 0x1d, 0x17, + 0x21, 0x27, 0x56, 0x0b, 0xaf, 0xeb, 0xf8, 0x1a, 0x62, 0xa6, 0x2f, 0xf3, 0xa9, 0x9b, 0x03, 0xfb, + 0x96, 0xf4, 0x43, 0x17, 0xc6, 0x62, 0x84, 0x13, 0x6d, 0x15, 0xb9, 0xfb, 0xc1, 0x30, 0xbb, 0xb0, + 0x53, 0x14, 0xa9, 0xd8, 0x42, 0x3c, 0xd5, 0x98, 0x60, 0x59, 0xcd, 0x73, 0xa1, 0xbb, 0xa2, 0xf7, + 0x4b, 0x9c, 0xa3, 0x9e, 0x86, 0x62, 0xd2, 0xd9, 0x4a, 0x2d, 0x54, 0x16, 0x45, 0x91, 0x1c, 0x0c, + 0xde, 0x8e, 0x25, 0xe6, 0xb3, 0x25, 0xfb, 0x71, 0x7c, 0x7c, 0x58, 0x14, 0xf3, 0x51, 0x40, 0xb2, + 0x05, 0x9f, 0x6d, 0x20, 0x94, 0x93, 0x17, 0x1f, 0xa9, 0xfa, 0xa1, 0xaf, 0xc9, 0x59, 0x78, 0x98, + 0x45, 0x0e, 0x44, 0xf6, 0xa3, 0x1c, 0x74, 0x4b, 0xb2, 0xd0, 0x59, 0x0f, 0xea, 0xad, 0xf2, 0xf4, + 0x4e, 0x63, 0xd6, 0xbe, 0x5c, 0xe3, 0x67, 0x23, 0x9b, 0xea, 0xb3, 0x79, 0xe7, 0x96, 0x38, 0x2b, + 0x82, 0xa5, 0x97, 0x58, 0xf6, 0x26, 0xc2, 0x58, 0x1d, 0x3a, 0x4a, 0x38, 0x67, 0xd5, 0x25, 0xda, + 0xb2, 0xdf, 0x64, 0x77, 0x68, 0x1e, 0x90, 0xdd, 0xcf, 0x27, 0xe6, 0x4d, 0xdf, 0x29, 0xf1, 0x12, + 0x55, 0x00, 0xea, 0xf2, 0x25, 0xd1, 0x95, 0xf4, 0x64, 0xf4, 0xec, 0xeb, 0x71, 0xac, 0xf5, 0x96, + 0xd5, 0xd2, 0x92, 0x10, 0x48, 0x9b, 0xfa, 0x46, 0xef, 0x65, 0x66, 0x24, 0xb1, 0xc9, 0xd1, 0x84, + 0xdf, 0x21, 0xf3, 0xce, 0x78, 0x6e, 0xa7, 0xc0, 0x68, 0x97, 0x50, 0xda, 0x41, 0x4d, 0x0d, 0x04, + 0x58, 0xae, 0xea, 0xa5, 0xed, 0x9a, 0xfe, 0x8a, 0xab, 0x4d, 0x7b, 0xc2, 0xa1, 0xde, 0x19, 0x2e, + 0xaa, 0xb1, 0x00, 0xe4, 0xde, 0x7e, 0x14, 0xe3, 0xb8, 0x81, 0xbb, 0x35, 0x3e, 0xd7, 0xb1, 0x0c, + 0x87, 0x3e, 0x65, 0x7c, 0x9e, 0x1d, 0x35, 0xd4, 0xf2, 0x74, 0x1d, 0xb7, 0x3c, 0x46, 0x29, 0x88, + 0x4d, 0xc8, 0xc6, 0x03, 0x1b, 0x71, 0x3a, 0x98, 0x04, 0xae, 0x5c, 0x69, 0x90, 0xac, 0x68, 0x53, + 0x0e, 0x02, 0x62, 0x2d, 0xd4, 0x53, 0x49, 0xc9, 0xd4, 0xe3, 0x66, 0x42, 0x50, 0x4d, 0xd0, 0x03, + 0xd6, 0x58, 0x63, 0xf5, 0xa4, 0x81, 0x9c, 0x34, 0x49, 0xaf, 0x3d, 0xef, 0x93, 0x8b, 0xa2, 0x33, + 0xb6, 0x4f, 0xd1, 0xf2, 0xaa, 0xb6, 0xe1, 0xe0, 0x8c, 0x39, 0x78, 0x86, 0x54, 0x8e, 0xd3, 0x87, + 0x8e, 0x58, 0x5c, 0x2f, 0xbe, 0x4c, 0xb7, 0x9c, 0x41, 0xa4, 0xf8, 0x64, 0x90, 0x67, 0x23, 0x96, + 0x82, 0x56, 0x29, 0xeb, 0xa1, 0xa1, 0xb6, 0x6c, 0xcf, 0xfe, 0xfe, 0x82, 0x54, 0xa5, 0x87, 0x7f, + 0x77, 0xf3, 0xe3, 0x10, 0xa4, 0xa6, 0x78, 0x8c, 0xb6, 0x4b, 0xf8, 0x66, 0xad, 0x29, 0xa8, 0xc5, + 0x8d, 0x58, 0x4f, 0x96, 0x5b, 0x7e, 0x5c, 0xb3, 0x34, 0xcf, 0xe1, 0x9e, 0xc6, 0x14, 0xa4, 0xcd, + 0x60, 0x0f, 0x33, 0x28, 0xc4, 0x1c, 0x23, 0xd8, 0xc2, 0x1d, 0x28, 0xfa, 0xc1, 0x88, 0x29, 0xe0, + 0xd6, 0x44, 0x4a, 0x37, 0x91, 0x8e, 0x1d, 0xa2, 0x24, 0x98, 0xeb, 0x47, 0x70, 0xd4, 0x6d, 0x36, + 0x94, 0xa4, 0x35, 0xf3, 0x8a, 0xe7, 0x62, 0xfe, 0x29, 0x08, 0x71, 0x20, 0x60, 0x2b, 0x90, 0xcc, + 0x12, 0x95, 0xaf, 0x87, 0x2e, 0x2b, 0xc1, 0xdb, 0xdc, 0x6b, 0x53, 0x7e, 0x8d, 0x9f, 0xa3, 0xa1, + 0x76, 0x86, 0x3f, 0x20, 0xbc, 0x49, 0x35, 0xb8, 0xf2, 0x40, 0x56, 0xbb, 0xa5, 0xe0, 0xfb, 0x7e, + 0xdf, 0x2a, 0x97, 0x71, 0xff, 0x0a, 0xf1, 0x9e, 0xb3, 0xf1, 0x89, 0x97, 0x94, 0x8d, 0xde, 0x04, + 0xb7, 0x68, 0x61, 0xaa, 0x40, 0xd9, 0xcf, 0x1a, 0xa2, 0xf5, 0x33, 0x98, 0xfa, 0x56, 0x91, 0x2f, + 0x62, 0x65, 0x4d, 0xd9, 0x49, 0x57, 0x6f, 0xd0, 0xb3, 0x9a, 0x2d, 0x20, 0xee, 0x76, 0xb6, 0xdf, + 0xda, 0x4c, 0xf7, 0x74, 0xc9, 0xf3, 0xc6, 0x04, 0x09, 0xff, 0xdd, 0x15, 0x42, 0x42, 0x20, 0x83, + 0xd2, 0x43, 0xdb, 0x8e, 0xb0, 0xfa, 0x9c, 0xf4, 0x65, 0x0d, 0x7b, 0xd5, 0xdb, 0xc6, 0x77, 0xa6, + 0x8b, 0x80, 0x17, 0x4c, 0xbd, 0xe6, 0x67, 0x12, 0x62, 0x79, 0xef, 0x90, 0xa5, 0x0a, 0xbb, 0x86, + 0xd9, 0x0e, 0x6d, 0xa4, 0xa8, 0xba, 0x43, 0xb1, 0xec, 0xf7, 0xc2, 0xb0, 0x85, 0xc8, 0x5b, 0x84, + 0xa5, 0x4b, 0xa4, 0x86, 0xd8, 0x6d, 0x04, 0xa1, 0xfc, 0x19, 0xf5, 0xb3, 0xf1, 0x26, 0x9f, 0xa8, + 0xf2, 0xae, 0xbe, 0x9a, 0xea, 0xbe, 0x9d, 0x1a, 0x0d, 0xf7, 0x7c, 0xaf, 0x46, 0xb0, 0x23, 0x5f, + 0x05, 0x0b, 0x5f, 0x0c, 0x9a, 0x58, 0x74, 0x74, 0xd9, 0x61, 0xc9, 0x7f, 0x48, 0x26, 0x18, 0x76, + 0x3f, 0x3f, 0xbd, 0x4e, 0xa1, 0x45, 0x17, 0xcd, 0x4b, 0xd3, 0xd1, 0xd3, 0x2b, 0x36, 0xff, 0x03, + 0xac, 0x3e, 0xdb, 0x15, 0x86, 0x72, 0x53, 0x0e, 0x2c, 0xae, 0x0c, 0xa7, 0x28, 0x03, 0x07, 0x54, + 0x93, 0xa9, 0x2e, 0x97, 0xea, 0xde, 0xe6, 0xb6, 0xd8, 0x08, 0x8c, 0x1f, 0x59, 0x28, 0x1a, 0xa8, + 0x26, 0xf7, 0xe4, 0x59, 0x18, 0xaf, 0xec, 0x06, 0xb8, 0xd9, 0x72, 0x9a, 0x84, 0xb4, 0x78, 0x3a, + 0xbb, 0xa4, 0xc0, 0xac, 0x09, 0x9c, 0x84, 0xb5, 0x78, 0xde, 0xa4, 0xd4, 0x93, 0xe3, 0x0a, 0x7d, + 0x99, 0x1c, 0x64, 0x80, 0xc7, 0x97, 0xc3, 0x12, 0xd7, 0x53, 0x9a, 0xfc, 0x99, 0xc6, 0x9c, 0x77, + 0xa3, 0x36, 0x4e, 0x0d, 0xe5, 0xcb, 0xb4, 0xcb, 0x59, 0xd6, 0x23, 0x4d, 0x0b, 0x5e, 0x98, 0xeb, + 0x3a, 0x46, 0x5b, 0x39, 0x0f, 0x58, 0xf1, 0x30, 0xf7, 0xef, 0x1b, 0xca, 0x9f, 0x09, 0x25, 0xda, + 0x5a, 0x7e, 0x34, 0x40, 0x28, 0x80, 0x9f, 0x79, 0x9c, 0x6e, 0x1d, 0xa4, 0xd7, 0xe5, 0x93, 0xaa, + 0x9b, 0xef, 0x3e, 0x1c, 0x5a, 0x50, 0xf5, 0xd5, 0x75, 0xa2, 0x11, 0xf9, 0x1c, 0x3f, 0xa8, 0x1e, + 0x45, 0x89, 0xe9, 0x13, 0x78, 0xde, 0x7e, 0x34, 0x64, 0xe6, 0x4a, 0x9d, 0x16, 0xac, 0x8a, 0x8b, + 0x36, 0xe2, 0x4c, 0x3b, 0xd0, 0x63, 0x33, 0xb4, 0x22, 0x54, 0x4b, 0x15, 0x1c, 0xaa, 0x2d, 0x67, + 0xf1, 0x1b, 0xf7, 0xaa, 0xc7, 0x88, 0x22, 0xc7, 0xa3, 0x27, 0x3a, 0xba, 0xaf, 0xbf, 0x3b, 0x72, + 0x8a, 0x3a, 0x7c, 0x79, 0x27, 0xfb, 0x49, 0x2a, 0x46, 0x64, 0x34, 0xd7, 0xe2, 0x48, 0xc2, 0xf2, + 0xfe, 0x0e, 0x46, 0xec, 0xb7, 0x41, 0x88, 0x62, 0xf9, 0x17, 0xdc, 0x59, 0xca, 0xc8, 0xfc, 0x4e, + 0x19, 0x6b, 0x22, 0xae, 0x60, 0x36, 0x95, 0x9e, 0x86, 0x03, 0x4a, 0xa5, 0x37, 0x26, 0x05, 0xac, + 0xd5, 0x09, 0x5c, 0x4c, 0xcc, 0x09, 0x44, 0x3f, 0x7b, 0xa5, 0xdb, 0x23, 0x9c, 0x41, 0x80, 0x3b, + 0x8e, 0x65, 0x24, 0x75, 0xa4, 0x0b, 0xe3, 0xc9, 0xca, 0x07, 0xc8, 0x8c, 0x97, 0x84, 0xa9, 0xd4, + 0xb4, 0x3e, 0x5f, 0xee, 0x7f, 0x8e, 0xe8, 0xe3, 0xb6, 0xec, 0x98, 0xc5, 0x97, 0x20, 0x12, 0xf4, + 0x49, 0x15, 0x21, 0x16, 0x49, 0xf2, 0x0d, 0xf5, 0x5b, 0x88, 0x8a, 0x8c, 0xab, 0x29, 0x29, 0xe8, + 0x29, 0x14, 0xe4, 0x4b, 0x3a, 0x28, 0x09, 0x7c, 0x75, 0xbb, 0xe7, 0xde, 0xe3, 0xf9, 0x2f, 0x98, + 0x33, 0xef, 0x5e, 0xc8, 0xc3, 0x88, 0x24, 0xe8, 0xa3, 0x58, 0xdf, 0x11, 0xaa, 0x37, 0x6c, 0x9e, + 0x23, 0x50, 0x0f, 0x39, 0x62, 0x9c, 0xe9, 0x56, 0x1f, 0x16, 0xbf, 0x7b, 0x1a, 0x0a, 0x0b, 0xe5, + 0x04, 0xe9, 0x21, 0x25, 0xa4, 0x04, 0x52, 0x84, 0x22, 0x98, 0x54, 0x2e, 0x7a, 0xcd, 0x03, 0x47, + 0x7c, 0x64, 0x85, 0xba, 0x8b, 0x8d, 0x1d, 0x1f, 0x8e, 0xf1, 0xcc, 0xba, 0xec, 0x5b, 0xad, 0x2d, + 0x21, 0x24, 0xcd, 0x3f, 0x9f, 0xbd, 0xcf, 0x94, 0xf3, 0xb3, 0x12, 0x11, 0x7c, 0x42, 0xbe, 0xc0, + 0x1c, 0xaf, 0x9d, 0xad, 0xde, 0xa1, 0xa9, 0x5d, 0xa9, 0x38, 0x52, 0xc4, 0x4a, 0x96, 0xcf, 0x22, + 0xf1, 0x2c, 0xe6, 0xf1, 0x4c, 0x56, 0x2e, 0x4b, 0x6e, 0x91, 0x89, 0xc9, 0xc7, 0xbb, 0x0b, 0xef, + 0x24, 0x1c, 0x02, 0xf2, 0x1e, 0x2d, 0x0e, 0x57, 0xf2, 0xdf, 0xff, 0x91, 0x11, 0xc6, 0xee, 0x82, + 0xef, 0xff, 0xea, 0xfc, 0x42, 0x74, 0x59, 0xc9, 0x56, 0xbb, 0x9f, 0xc5, 0xe5, 0x6d, 0xad, 0x99, + 0xc0, 0xf3, 0x1c, 0x07, 0xa4, 0xa0, 0xfa, 0x2e, 0x1b, 0xca, 0x2b, 0x99, 0x97, 0x9a, 0x6a, 0x32, + 0x6b, 0x9c, 0x30, 0x3a, 0xb2, 0x63, 0x65, 0xf7, 0x5b, 0xd2, 0xfc, 0xcf, 0xcd, 0xb5, 0xf9, 0x24, + 0x8c, 0xdd, 0xa4, 0x16, 0xa6, 0x29, 0x9f, 0x10, 0x9d, 0xe3, 0xf5, 0x18, 0x4b, 0xd3, 0x21, 0x12, + 0xe6, 0xa0, 0xc5, 0xa5, 0xde, 0x5b, 0xef, 0x7f, 0xc5, 0xc7, 0xb0, 0x57, 0x7e, 0x04, 0x0b, 0x60, + 0x21, 0x54, 0x75, 0x2e, 0x0a, 0x17, 0xd9, 0x72, 0x15, 0x5f, 0xad, 0x2d, 0xda, 0xff, 0x20, 0xea, + 0x78, 0xf9, 0x01, 0xff, 0xd0, 0x28, 0xbb, 0x44, 0xa0, 0xd3, 0x89, 0x17, 0x00, 0x1a, 0xe5, 0x23, + 0x27, 0x53, 0x6e, 0xaf, 0xb4, 0x14, 0x2f, 0x1c, 0xf0, 0x64, 0xa0, 0x25, 0xe3, 0x2f, 0xce, 0xea, + 0x5b, 0x3e, 0x0a, 0x4f, 0xad, 0xf5, 0x12, 0x3c, 0xb6, 0x1a, 0xd2, 0x09, 0x64, 0x36, 0x3f, 0x3f, + 0x8a, 0xef, 0xe9, 0x62, 0x90, 0x14, 0x26, 0xa1, 0x1a, 0x7a, 0xc5, 0x9d, 0xa0, 0x84, 0xcd, 0xb5, + 0xc0, 0x38, 0x8c, 0x2e, 0x39, 0xf8, 0x2e, 0xf1, 0x12, 0x16, 0x0c, 0x7d, 0xaf, 0xd3, 0xf4, 0x69, + 0xbe, 0x80, 0x32, 0x77, 0x29, 0xe0, 0x28, 0xb3, 0xd1, 0x60, 0x9a, 0x4c, 0xb5, 0xc3, 0xed, 0x7b, + 0x93, 0x4d, 0x3a, 0x1b, 0x19, 0xa9, 0x8a, 0x22, 0x6c, 0xf3, 0x65, 0xba, 0x7a, 0x5b, 0x62, 0xd8, + 0xd7, 0x9f, 0x3a, 0x54, 0x6c, 0x29, 0x54, 0x3c, 0x6f, 0x2f, 0x86, 0x05, 0x7b, 0xf6, 0xeb, 0x43, + 0xce, 0x4a, 0x6f, 0xa5, 0xe3, 0xe9, 0x60, 0x32, 0x86, 0x86, 0x2c, 0xb1, 0xe1, 0x8a, 0x3c, 0x96, + 0x54, 0xf1, 0x63, 0x54, 0xab, 0x16, 0x19, 0x31, 0xc8, 0x93, 0x56, 0x97, 0x20, 0xaf, 0x1d, 0x56, + 0xc4, 0xc8, 0x55, 0x38, 0x5d, 0x89, 0xf3, 0x21, 0xbb, 0xfb, 0x91, 0x85, 0x68, 0x7b, 0x65, 0x3c, + 0x76, 0xe5, 0x8b, 0x59, 0xf8, 0xd7, 0xd7, 0xf7, 0x8b, 0x44, 0x56, 0x0d, 0x36, 0x67, 0x0d, 0x45, + 0x1f, 0x1b, 0x02, 0xed, 0x2a, 0xaa, 0x3b, 0xe3, 0x4e, 0x1a, 0x78, 0xc8, 0xea, 0x4a, 0x43, 0xd5, + 0x2f, 0x1e, 0xa1, 0xb5, 0x9d, 0xee, 0xc2, 0x62, 0x4e, 0x06, 0xef, 0x30, 0xfb, 0x3c, 0x1a, 0x54, + 0xf7, 0x32, 0xed, 0xe8, 0x1d, 0x22, 0x5c, 0x5d, 0xc7, 0x0f, 0x18, 0x0d, 0x49, 0x18, 0x69, 0x1d, + 0x5d, 0xcd, 0x85, 0xce, 0x22, 0x29, 0xcf, 0xc3, 0x40, 0xbd, 0xaf, 0x17, 0x7d, 0xc5, 0xe4, 0xdd, + 0xfc, 0x02, 0xab, 0x5a, 0x9b, 0x01, 0xb3, 0xf8, 0x90, 0x96, 0xdd, 0x12, 0xb5, 0xb2, 0x7b, 0x82, + 0x3b, 0xb1, 0x8a, 0x3a, 0x26, 0xf0, 0xb7, 0x45, 0x3f, 0xd3, 0x7a, 0x1c, 0xaf, 0x0c, 0xa0, 0xe3, + 0x31, 0x6d, 0xca, 0xcf, 0xaa, 0x97, 0x60, 0xb7, 0xf9, 0xc2, 0x34, 0xec, 0x54, 0x92, 0x04, 0x4f, + 0x69, 0xb0, 0x9e, 0x74, 0xdb, 0x26, 0x9f, 0xe8, 0x75, 0x95, 0xb7, 0x76, 0xdc, 0x31, 0xe6, 0x90, + 0x4e, 0xaa, 0x3e, 0x7e, 0xe4, 0x3a, 0x6e, 0xc4, 0xc8, 0xd0, 0xdd, 0x6b, 0x93, 0x33, 0x38, 0xbf, + 0x3c, 0x94, 0x39, 0xdd, 0x17, 0x8a, 0xa5, 0xf9, 0xec, 0x28, 0x38, 0xd2, 0x2b, 0xb0, 0x9c, 0x79, + 0xe1, 0x24, 0x3f, 0x24, 0x50, 0xf8, 0x92, 0xec, 0xa9, 0x8b, 0xcf, 0xb3, 0x95, 0xe6, 0x17, 0x63, + 0x63, 0x68, 0x70, 0x70, 0x12, 0x27, 0x5e, 0xcc, 0x42, 0xde, 0x36, 0xea, 0xc1, 0x5e, 0x46, 0xfe, + 0xb0, 0x50, 0x96, 0xa8, 0x2d, 0xbe, 0xfb, 0x01, 0x8f, 0x83, 0x86, 0x79, 0x10, 0x52, 0x32, 0xdb, + 0xda, 0x15, 0x61, 0x4f, 0xb1, 0x42, 0xb1, 0xc8, 0xbf, 0x26, 0x62, 0xdf, 0x03, 0x23, 0xb2, 0x9a, + 0x73, 0xee, 0x1a, 0x6c, 0x9e, 0x75, 0xac, 0x92, 0x04, 0x7c, 0xa2, 0x77, 0x58, 0x3f, 0x73, 0x2c, + 0x3a, 0xfb, 0x55, 0xd6, 0x2b, 0x95, 0xeb, 0x46, 0x9f, 0x6f, 0x73, 0x14, 0xd4, 0x61, 0x6b, 0x54, + 0x2c, 0x8d, 0xa1, 0x92, 0x38, 0x58, 0x57, 0xfa, 0x7a, 0x00, 0x76, 0x9f, 0x37, 0xa8, 0xfc, 0xd8, + 0x22, 0x30, 0xd0, 0x9c, 0x40, 0x21, 0x97, 0xf0, 0xb0, 0x34, 0x8b, 0x77, 0x9d, 0x8c, 0x46, 0x89, + 0x70, 0x92, 0xfe, 0xa7, 0x11, 0xbd, 0x33, 0x41, 0xd8, 0x2b, 0x7e, 0xbb, 0x21, 0x3e, 0xd6, 0xd3, + 0x67, 0x55, 0xf9, 0x42, 0xce, 0x86, 0x9a, 0x71, 0xa8, 0x24, 0xf5, 0xa0, 0x55, 0x4e, 0x70, 0xff, + 0x6a, 0xbb, 0xc6, 0x2a, 0x87, 0x7d, 0x33, 0xa5, 0x47, 0x33, 0x19, 0x99, 0xac, 0x34, 0x2e, 0x7e, + 0xfb, 0xee, 0x72, 0x26, 0xaf, 0x9f, 0x57, 0xad, 0x1d, 0x67, 0x2c, 0x05, 0xea, 0xaa, 0x40, 0x4a, + 0xa1, 0x91, 0x34, 0xca, 0x32, 0x5c, 0x91, 0x29, 0xc2, 0xd8, 0x2f, 0xd3, 0xb2, 0x59, 0x47, 0xa6, + 0x55, 0xb1, 0x77, 0x1f, 0xaf, 0xe2, 0x84, 0x51, 0xf9, 0x03, 0x44, 0x6b, 0x03, 0x4e, 0xbf, 0x87, + 0x5d, 0x10, 0xad, 0xbe, 0xc5, 0xce, 0x68, 0x51, 0x0e, 0xb4, 0x1c, 0x17, 0x7a, 0xaf, 0xab, 0xc1, + 0xed, 0x72, 0x5e, 0xcd, 0xf8, 0x9a, 0xa1, 0xf6, 0xac, 0xb9, 0xe1, 0x50, 0xb1, 0x92, 0xeb, 0xa7, + 0x0e, 0xb1, 0x72, 0x3c, 0x7f, 0x31, 0xdd, 0x0b, 0xff, 0x12, 0x27, 0x44, 0x98, 0xb9, 0x01, 0xd7, + 0xbd, 0xa9, 0x04, 0xbd, 0xef, 0xe3, 0xa3, 0x94, 0x71, 0xbd, 0x58, 0xcf, 0xcb, 0xfa, 0x42, 0xbd, + 0x33, 0xbd, 0xfb, 0xcf, 0x84, 0xbf, 0xd4, 0x20, 0xcd, 0x00, 0x48, 0xe3, 0x83, 0xf9, 0x1e, 0xff, + 0x96, 0xf9, 0x8a, 0x4f, 0x4d, 0xb7, 0xcd, 0x84, 0xd2, 0xf5, 0x72, 0x4a, 0xe5, 0x5f, 0x1c, 0xe2, + 0xda, 0x78, 0x08, 0xa1, 0x09, 0x00, 0xd3, 0x0e, 0xa2, 0x9d, 0x69, 0x88, 0x28, 0xdf, 0x31, 0xd6, + 0xc5, 0x8d, 0xcb, 0x74, 0xec, 0x15, 0x54, 0xa2, 0xaa, 0x92, 0xd4, 0xfb, 0x07, 0x38, 0x36, 0x06, + 0xbe, 0xa5, 0x6f, 0x1d, 0x19, 0xd8, 0x0f, 0xa4, 0xbe, 0xc6, 0xbf, 0xc0, 0xb4, 0x00, 0xce, 0x7b, + 0xc7, 0x8e, 0x2e, 0x94, 0xac, 0xe1, 0xd0, 0x9c, 0x2b, 0xeb, 0xaa, 0x6d, 0x38, 0x08, 0x89, 0x35, + 0x0b, 0x0c, 0x7d, 0xf7, 0xb4, 0xd6, 0xd2, 0x6a, 0xd9, 0xb2, 0xf8, 0xe5, 0x2d, 0x0f, 0xed, 0x14, + 0xb6, 0xb6, 0xdd, 0xe4, 0x81, 0x5e, 0xc9, 0xb9, 0x1c, 0xb1, 0x1d, 0x8e, 0x5c, 0xb0, 0x7d, 0x30, + 0xa0, 0x8c, 0xe5, 0xc7, 0x5c, 0xc8, 0x75, 0x32, 0xa9, 0x99, 0x87, 0xc4, 0x2d, 0x8d, 0xb3, 0x32, + 0x0c, 0x8c, 0x3b, 0xb2, 0xb9, 0xd1, 0x58, 0x59, 0xb9, 0x7e, 0xe8, 0x42, 0xac, 0x04, 0xd8, 0xc1, + 0x72, 0x58, 0xdf, 0xeb, 0x6d, 0x71, 0xcc, 0x13, 0x06, 0x9d, 0x84, 0x67, 0xdd, 0x65, 0xde, 0xd1, + 0x44, 0x31, 0xfc, 0xda, 0x20, 0x84, 0x2d, 0x7f, 0xc4, 0xcd, 0xf1, 0x8d, 0x62, 0xc6, 0xc1, 0x4b, + 0x7f, 0x1b, 0xef, 0xe1, 0xac, 0xad, 0xc6, 0xc8, 0xa9, 0x98, 0x62, 0x5c, 0x96, 0x56, 0xd2, 0x1d, + 0xae, 0xc0, 0xb0, 0xa1, 0xc1, 0xa7, 0xc2, 0x0a, 0xc8, 0x16, 0x53, 0xb0, 0x8f, 0x7c, 0xd1, 0xf5, + 0x85, 0x9e, 0x57, 0x6d, 0x57, 0xd5, 0xff, 0x64, 0xd0, 0x6c, 0xfa, 0x34, 0x28, 0x6e, 0xf4, 0xe5, + 0xaf, 0xb4, 0xfd, 0xca, 0x9c, 0x86, 0x54, 0x6c, 0x2e, 0xe6, 0xc3, 0x5e, 0xf1, 0x10, 0xb5, 0xc7, + 0x03, 0xdd, 0x7d, 0x56, 0x22, 0x1c, 0x97, 0xd8, 0x73, 0xbd, 0x05, 0xfb, 0x8f, 0x51, 0xe4, 0xfc, + 0x92, 0x8d, 0xf6, 0xc0, 0x85, 0x3b, 0xd8, 0x4a, 0x8a, 0x05, 0x92, 0x8a, 0x05, 0xdb, 0x54, 0x54, + 0x3f, 0xc4, 0x6c, 0x30, 0x9c, 0x19, 0xba, 0xb6, 0x87, 0xac, 0x46, 0x36, 0x06, 0xe5, 0xdd, 0xd5, + 0xbe, 0x46, 0x94, 0x83, 0x22, 0x0f, 0x1d, 0x11, 0x6b, 0xdf, 0xf0, 0x57, 0x8f, 0x0b, 0x8e, 0x2d, + 0x85, 0x4a, 0xec, 0x98, 0x6d, 0xc1, 0x79, 0x79, 0x25, 0x78, 0xc0, 0x4d, 0xed, 0xd0, 0xac, 0xdd, + 0x6a, 0x92, 0x0b, 0xff, 0xac, 0xd7, 0x55, 0xa6, 0x4f, 0x46, 0x9f, 0x7d, 0x2c, 0xdc, 0x0a, 0x46, + 0x0e, 0x91, 0x70, 0x6f, 0xaa, 0x17, 0xb6, 0xc4, 0x0c, 0x0f, 0xfe, 0x72, 0x3c, 0x23, 0xb3, 0xb1, + 0xe6, 0xd7, 0x7a, 0x71, 0xc6, 0x8e, 0x37, 0xff, 0xb9, 0xd3, 0x2d, 0x2e, 0xeb, 0x72, 0xfc, 0xbc, + 0x03, 0xf5, 0x50, 0xe0, 0xbd, 0xf6, 0x26, 0x35, 0x58, 0x75, 0x81, 0x9b, 0xed, 0x3c, 0x10, 0xb6, + 0x93, 0xc9, 0x57, 0xe4, 0x82, 0x41, 0xad, 0xe2, 0xdb, 0x54, 0x88, 0x8a, 0xee, 0x2e, 0x10, 0x9a, + 0xd8, 0x8a, 0x1f, 0x55, 0x7b, 0x60, 0x28, 0x5d, 0x5f, 0x78, 0x54, 0xac, 0xdf, 0x51, 0x15, 0xe1, + 0x4e, 0x81, 0xd6, 0xbd, 0x0a, 0x47, 0x8f, 0x24, 0x13, 0xdb, 0x3c, 0x08, 0x3b, 0x36, 0x5c, 0xc6, + 0x26, 0x59, 0x2c, 0x4c, 0x56, 0xe5, 0xdf, 0x9b, 0xd0, 0x59, 0xa6, 0x69, 0x78, 0x45, 0x44, 0x7b, + 0x4a, 0x1d, 0xa9, 0x79, 0x1f, 0x3c, 0x60, 0x6d, 0xa4, 0x33, 0xca, 0x7d, 0x86, 0x7d, 0xd7, 0xfa, + 0x55, 0xa1, 0xdb, 0x20, 0x58, 0xff, 0x53, 0xef, 0xd5, 0x44, 0xbd, 0x77, 0x81, 0x5a, 0x88, 0xdd, + 0xb1, 0x59, 0xc6, 0xa5, 0x57, 0x89, 0x01, 0x85, 0x9f, 0xa5, 0xca, 0x59, 0xda, 0x60, 0x00, 0x93, + 0xe5, 0x7d, 0xc6, 0x01, 0x8b, 0x55, 0x69, 0xb8, 0xf5, 0x1b, 0x2a, 0x87, 0x23, 0x5a, 0xf5, 0x12, + 0xe0, 0xe0, 0x1b, 0xc2, 0x6f, 0x19, 0xdb, 0xab, 0x09, 0xfd, 0xd1, 0x13, 0xf2, 0xef, 0x13, 0x37, + 0x1b, 0x6b, 0xd0, 0xfe, 0xc8, 0x3d, 0x33, 0x88, 0x73, 0xe1, 0x23, 0x98, 0xf0, 0xb0, 0xdb, 0xb7, + 0x0d, 0xbe, 0x8f, 0x52, 0x45, 0x44, 0x09, 0x93, 0x3a, 0xe2, 0xbd, 0xe3, 0xd1, 0xab, 0xee, 0x71, + 0x09, 0xfa, 0xd5, 0xbb, 0xad, 0xce, 0x7d, 0xde, 0xa5, 0x9d, 0xba, 0x03, 0x91, 0xa3, 0x9d, 0xe3, + 0xda, 0xf9, 0x94, 0x81, 0x95, 0x67, 0x19, 0x24, 0x04, 0x7b, 0x96, 0x0f, 0xda, 0xa7, 0x25, 0xa4, + 0x57, 0x19, 0x86, 0xc5, 0x93, 0x15, 0xa0, 0xa6, 0x23, 0x43, 0x95, 0xf9, 0xfd, 0xe7, 0x07, 0x2a, + 0x16, 0x22, 0x24, 0x41, 0xd2, 0x55, 0x4e, 0x6c, 0xa7, 0x23, 0xc5, 0xad, 0x73, 0x8b, 0xf1, 0x38, + 0x5f, 0x60, 0x52, 0x61, 0x19, 0x1c, 0xc3, 0xf3, 0x7b, 0x66, 0x25, 0xba, 0xd3, 0x2b, 0x62, 0x2f, + 0x73, 0x97, 0x9f, 0xbe, 0x9a, 0xc3, 0x99, 0x21, 0x10, 0x42, 0x37, 0x43, 0x2d, 0xce, 0x2c, 0xcf, + 0xc5, 0x66, 0xa7, 0x8d, 0x0b, 0x48, 0x49, 0xe4, 0x86, 0x68, 0x07, 0x0e, 0xbf, 0xc1, 0xee, 0xed, + 0xe7, 0x94, 0xd2, 0x06, 0x4f, 0xce, 0x19, 0xc8, 0x2d, 0x36, 0xb6, 0x8e, 0x48, 0x93, 0xd7, 0x5c, + 0x3e, 0xf2, 0xed, 0xcc, 0x02, 0xd8, 0xfa, 0x04, 0x28, 0x1a, 0xc5, 0x12, 0x35, 0xdc, 0xdb, 0x7a, + 0xba, 0xba, 0x8b, 0xa5, 0xdc, 0x5b, 0x35, 0xb6, 0x64, 0xe0, 0x80, 0x0e, 0x91, 0x31, 0xe9, 0xce, + 0xe9, 0xc1, 0x6b, 0x92, 0x58, 0xf3, 0x48, 0x9e, 0xd2, 0xda, 0xd5, 0x8d, 0x73, 0xa5, 0x50, 0xa5, + 0x3c, 0xc5, 0x66, 0x0f, 0x71, 0x6b, 0xbe, 0xa3, 0xe5, 0xf5, 0xad, 0x3c, 0x2c, 0x33, 0x60, 0xec, + 0x69, 0x20, 0xb4, 0x21, 0xbe, 0x7f, 0x5b, 0xd8, 0x8f, 0x0f, 0xc6, 0x2a, 0x12, 0x12, 0x49, 0x8d, + 0xb5, 0xab, 0x01, 0xbd, 0x64, 0xf2, 0x99, 0x13, 0xb8, 0x42, 0xad, 0x3d, 0x6d, 0x54, 0x7a, 0x1c, + 0xa3, 0x55, 0x30, 0x5a, 0x44, 0x6a, 0x11, 0x0b, 0x64, 0x71, 0xea, 0x66, 0x55, 0xdb, 0x33, 0x5e, + 0xf6, 0x70, 0x75, 0x38, 0x04, 0xcf, 0xe4, 0x4b, 0x49, 0xdf, 0x61, 0x1c, 0xaf, 0xc5, 0x65, 0x8c, + 0x65, 0xd3, 0x22, 0xb0, 0xa8, 0x2d, 0x17, 0xeb, 0x15, 0x0c, 0xd1, 0x23, 0x1e, 0xd3, 0x3b, 0x23, + 0x59, 0xd9, 0x29, 0x67, 0x4b, 0x5e, 0x7a, 0x5e, 0x65, 0x4f, 0x8e, 0xc7, 0x11, 0xf1, 0x4d, 0xe7, + 0x43, 0x2b, 0x0f, 0x32, 0x6b, 0x5c, 0x8d, 0x66, 0xf7, 0x2a, 0xb0, 0xe7, 0xfc, 0x4b, 0xe9, 0xf6, + 0x32, 0x37, 0x66, 0x95, 0x8c, 0xf4, 0x45, 0x3c, 0xf4, 0x4c, 0xf4, 0x4e, 0x41, 0x2f, 0xbf, 0x84, + 0xb5, 0x67, 0xdf, 0x13, 0x7e, 0xe5, 0x0c, 0xd6, 0xd6, 0xc4, 0xe8, 0x94, 0x8c, 0xcc, 0x90, 0x12, + 0xfa, 0xe5, 0x85, 0x00, 0x6f, 0xf4, 0x09, 0x4f, 0x09, 0x79, 0x74, 0xc7, 0xd7, 0xd4, 0xbd, 0xb3, + 0xe9, 0x98, 0x49, 0x91, 0x1d, 0x67, 0xbb, 0x34, 0x93, 0x1a, 0xd9, 0xe5, 0x3c, 0x46, 0x82, 0xc4, + 0xc3, 0xd6, 0xc1, 0x7a, 0xe4, 0xa2, 0x7d, 0x77, 0xbe, 0xf3, 0xa7, 0xf8, 0xe9, 0x92, 0x68, 0x35, + 0xce, 0xb3, 0x55, 0x33, 0xb9, 0xe1, 0xe9, 0xe8, 0x81, 0x3d, 0x3d, 0x1c, 0xf5, 0xae, 0x09, 0x90, + 0x9e, 0xfd, 0x50, 0xc5, 0x5e, 0xf1, 0x47, 0x1d, 0x22, 0xb1, 0x17, 0x6a, 0x0b, 0x85, 0xb7, 0x63, + 0x15, 0xc4, 0xd4, 0xe4, 0x2c, 0xdc, 0x65, 0x47, 0x67, 0x29, 0x5d, 0xc3, 0x9d, 0xa9, 0xd5, 0x7d, + 0x48, 0x1f, 0x90, 0x42, 0x55, 0xd5, 0x8e, 0x07, 0x49, 0x47, 0xcb, 0x40, 0x62, 0xe2, 0xc5, 0x05, + 0x3f, 0x95, 0x28, 0x8c, 0x05, 0x3e, 0x88, 0xda, 0x4a, 0xab, 0x87, 0xd3, 0x6d, 0x7c, 0xa6, 0x3b, + 0x09, 0x95, 0xf2, 0xcf, 0xba, 0xbf, 0xcd, 0x24, 0x00, 0x92, 0xce, 0xb5, 0x42, 0xe6, 0x46, 0xb7, + 0x63, 0x2c, 0xea, 0xa0, 0x0d, 0xf2, 0xf0, 0x6e, 0x91, 0x7c, 0xe4, 0xfb, 0x1e, 0x73, 0xcb, 0xd9, + 0x59, 0x49, 0xa4, 0x41, 0x86, 0x7e, 0x88, 0x4f, 0x1e, 0x7d, 0x5e, 0xb9, 0x9e, 0x59, 0xe7, 0x80, + 0xd7, 0xe4, 0xe1, 0x40, 0x65, 0xd1, 0x7a, 0x29, 0x01, 0xfa, 0x3a, 0x0a, 0x78, 0x06, 0x0e, 0xf2, + 0x99, 0xa3, 0xc1, 0x37, 0xe9, 0x35, 0x75, 0xd9, 0x0a, 0x4b, 0xc7, 0x88, 0x7c, 0x53, 0x70, 0xb6, + 0x5b, 0xa1, 0x4c, 0x3a, 0x46, 0xea, 0xc9, 0xd7, 0x96, 0x27, 0x10, 0xd1, 0xed, 0x01, 0x24, 0xc4, + 0x1c, 0xe7, 0x72, 0xbe, 0x77, 0xa4, 0x0d, 0x8e, 0x96, 0xd5, 0x10, 0x3b, 0x5e, 0x0c, 0x6f, 0x6d, + 0xfe, 0xd2, 0x72, 0x28, 0xc2, 0x61, 0x1c, 0x3a, 0x7e, 0xb0, 0xea, 0x0f, 0x4b, 0x14, 0x37, 0x0c, + 0xaf, 0x8b, 0x7d, 0xeb, 0xc3, 0xc7, 0x30, 0xb9, 0xcf, 0xff, 0xd3, 0x5e, 0xda, 0x01, 0xe8, 0x88, + 0x9b, 0x2d, 0x4a, 0xa0, 0x61, 0xbe, 0xdd, 0x4f, 0xd1, 0xd1, 0x2e, 0x87, 0x5a, 0xba, 0xbb, 0xcd, + 0xca, 0x07, 0x4f, 0x5b, 0xee, 0x91, 0x18, 0x03, 0xbc, 0x3b, 0x35, 0x62, 0x16, 0xee, 0xd2, 0xf8, + 0x46, 0xc1, 0x3f, 0x59, 0x9b, 0x7d, 0x78, 0x1c, 0x36, 0x5f, 0x06, 0xf9, 0x8c, 0x39, 0x5c, 0xb3, + 0x35, 0x12, 0x8b, 0x60, 0x90, 0x34, 0x36, 0x52, 0x3b, 0xa2, 0x3a, 0xf7, 0x6a, 0xa0, 0x75, 0xc0, + 0xf5, 0x9b, 0xed, 0x6f, 0xca, 0x32, 0x21, 0x26, 0xee, 0x7e, 0x37, 0xef, 0x86, 0x29, 0x45, 0x18, + 0x37, 0x1b, 0x4b, 0xa6, 0xcc, 0x60, 0x45, 0x67, 0x63, 0x3d, 0x9c, 0x30, 0x02, 0x3b, 0x1b, 0x8e, + 0x37, 0x14, 0x9d, 0xc4, 0xe1, 0x7b, 0x39, 0xe2, 0x46, 0xec, 0xa8, 0x57, 0x12, 0xc2, 0x44, 0x77, + 0xc1, 0xba, 0x9e, 0x97, 0x7e, 0x41, 0x32, 0x63, 0x7c, 0xcb, 0xcc, 0xef, 0x76, 0x33, 0xb5, 0xdf, + 0x3b, 0x6d, 0x77, 0xc9, 0x4e, 0x12, 0xd4, 0x70, 0x76, 0x87, 0x4b, 0x79, 0x40, 0xb3, 0x55, 0x94, + 0xb1, 0xe2, 0xc4, 0x63, 0xb3, 0x32, 0x75, 0xc4, 0xc7, 0x38, 0x4f, 0x50, 0x48, 0xb5, 0x60, 0xbe, + 0xc7, 0x9e, 0xae, 0x57, 0x0e, 0xfb, 0x08, 0x61, 0x17, 0xcf, 0x21, 0xe0, 0x7c, 0x59, 0xa2, 0xc0, + 0x66, 0x7c, 0x8d, 0x38, 0x33, 0x50, 0x6f, 0xc5, 0xc7, 0x2a, 0xbb, 0x52, 0x3f, 0x3d, 0x22, 0x0b, + 0x42, 0x70, 0x2e, 0xe6, 0xde, 0xa2, 0x6f, 0x12, 0xdc, 0x32, 0x52, 0x7d, 0x27, 0xbe, 0xad, 0xe4, + 0xea, 0xc4, 0xc2, 0x8b, 0x40, 0xa0, 0xae, 0x0b, 0xa4, 0xbb, 0xfe, 0x93, 0x3b, 0x1b, 0x4f, 0xbf, + 0x2a, 0xd6, 0xd1, 0x08, 0xea, 0x72, 0x29, 0x64, 0x76, 0x77, 0x68, 0x9e, 0xba, 0xd7, 0xdd, 0x22, + 0xe2, 0x1b, 0x00, 0x8c, 0x41, 0x88, 0x25, 0xe2, 0xb5, 0xdb, 0xcf, 0x6d, 0xaf, 0x15, 0xaa, 0x5d, + 0xf4, 0x78, 0xf2, 0x16, 0x5e, 0x13, 0xbd, 0x80, 0x7b, 0x99, 0xb1, 0x98, 0xab, 0x1c, 0xaf, 0xbd, + 0x89, 0xff, 0xe6, 0xf3, 0x93, 0x66, 0x84, 0x7d, 0x63, 0xa1, 0x01, 0xdf, 0x74, 0xc7, 0xed, 0x82, + 0x43, 0x01, 0x44, 0x16, 0x84, 0x44, 0xd4, 0xf2, 0xb8, 0x6a, 0x90, 0x36, 0xf1, 0xb6, 0x87, 0x2a, + 0x66, 0x58, 0x58, 0xe5, 0x63, 0xd3, 0xf7, 0x89, 0x4e, 0x99, 0xd9, 0x1d, 0x29, 0x49, 0x19, 0xae, + 0x5f, 0x1c, 0xa8, 0xd5, 0xbe, 0x86, 0x56, 0x11, 0xb0, 0x28, 0xb5, 0xdf, 0x36, 0xf3, 0xd1, 0xb7, + 0xc0, 0x3a, 0x63, 0x01, 0x77, 0x7e, 0x4e, 0x1c, 0x1e, 0xbc, 0x9a, 0xd1, 0x46, 0xcd, 0x29, 0x44, + 0xbc, 0xde, 0x11, 0x1c, 0xab, 0x6f, 0x7e, 0xc9, 0x05, 0x7c, 0x69, 0x8c, 0x65, 0xf6, 0xde, 0x66, + 0x51, 0xcb, 0x32, 0x96, 0x5d, 0x87, 0xf1, 0x33, 0x75, 0xad, 0xe8, 0xc7, 0x83, 0x1a, 0x56, 0x73, + 0xf8, 0x33, 0x13, 0xae, 0x54, 0x40, 0x17, 0x13, 0x18, 0x4d, 0xe6, 0xf6, 0xc3, 0x2d, 0x82, 0x34, + 0x87, 0x38, 0xae, 0x10, 0xb5, 0x4b, 0x7b, 0x35, 0x68, 0x8d, 0x45, 0xb7, 0x8a, 0xf0, 0x9e, 0x25, + 0x40, 0x00, 0x26, 0x3a, 0x88, 0xa5, 0x42, 0xaf, 0xb4, 0x69, 0xb5, 0x8f, 0xa9, 0xae, 0x18, 0x5f, + 0x24, 0x27, 0x5a, 0xf3, 0x7e, 0xc1, 0xa7, 0x0e, 0xec, 0x7f, 0x63, 0xc9, 0x7b, 0xb0, 0xf1, 0xdc, + 0x7a, 0xc2, 0xfb, 0x2b, 0x07, 0x11, 0x67, 0x9d, 0x4e, 0x63, 0x74, 0x63, 0x70, 0xf6, 0xab, 0x19, + 0xcd, 0x37, 0xfc, 0x5e, 0x39, 0x26, 0x87, 0xc3, 0xda, 0xc7, 0x92, 0x01, 0x66, 0x72, 0xe1, 0x46, + 0xc7, 0xeb, 0x5b, 0x03, 0x7c, 0xe5, 0x57, 0x78, 0x2a, 0x3b, 0xb9, 0xf2, 0xbe, 0xb2, 0xe4, 0x3e, + 0xaa, 0xcc, 0xb2, 0xd3, 0x69, 0x71, 0x77, 0x4f, 0x68, 0x02, 0x74, 0x00, 0x76, 0xf4, 0x84, 0xdf, + 0xdb, 0xe5, 0x9f, 0x95, 0x37, 0x3e, 0xf6, 0xe3, 0x39, 0x30, 0x2d, 0x76, 0x83, 0x91, 0xf1, 0x0d, + 0xa6, 0x6a, 0xbd, 0x7e, 0x34, 0x90, 0x6b, 0x87, 0x13, 0xd8, 0x14, 0x3b, 0xe8, 0x30, 0x82, 0x04, + 0x2c, 0xa4, 0x16, 0x30, 0xcd, 0xa6, 0x11, 0xbe, 0x33, 0xc8, 0x09, 0xb2, 0xc6, 0xfc, 0x94, 0x6f, + 0xd9, 0xc1, 0x5a, 0x3e, 0xb0, 0x2d, 0x2a, 0x7d, 0x4f, 0x22, 0x9f, 0xcb, 0xd5, 0x6d, 0xae, 0x80, + 0x4c, 0x5c, 0xb8, 0x54, 0xc8, 0x33, 0x64, 0x4e, 0xb0, 0xde, 0xcc, 0xc7, 0xa4, 0x2c, 0x6e, 0x2f, + 0xba, 0xf0, 0x62, 0xb4, 0xc7, 0xfb, 0x28, 0xd4, 0x90, 0xd9, 0xf7, 0xc9, 0x52, 0x67, 0x36, 0xf5, + 0x22, 0x5d, 0x67, 0x8e, 0xb2, 0xe8, 0x9b, 0xc6, 0x3d, 0x47, 0x99, 0x92, 0x64, 0x7f, 0x9b, 0x96, + 0x49, 0x1e, 0x53, 0xa3, 0xf7, 0x8f, 0x96, 0x86, 0xc5, 0xcb, 0xcc, 0xf6, 0x6c, 0x95, 0xa8, 0xe7, + 0xbc, 0x6d, 0x03, 0xcb, 0x2a, 0x1f, 0x16, 0x67, 0x14, 0x98, 0xc6, 0x74, 0x65, 0x61, 0x72, 0xf7, + 0x75, 0x3f, 0x6d, 0x95, 0xbb, 0x2d, 0x7c, 0xd9, 0x4b, 0xa1, 0xc9, 0x34, 0x1c, 0x31, 0xb0, 0xb5, + 0x17, 0x3c, 0x2f, 0xde, 0x3a, 0x97, 0x09, 0xd9, 0x7c, 0x52, 0xcc, 0x27, 0xac, 0x44, 0xf4, 0x3c, + 0x3b, 0x12, 0xf4, 0x3d, 0xe7, 0xf2, 0xbe, 0x50, 0x9f, 0xa2, 0xa4, 0xb2, 0xf0, 0x6e, 0xa7, 0x94, + 0x5c, 0xd6, 0x40, 0xd5, 0xef, 0xef, 0xd7, 0x8f, 0x56, 0x73, 0xd8, 0x11, 0x8b, 0x7e, 0x84, 0x32, + 0xda, 0x4b, 0x54, 0x47, 0x69, 0x8e, 0x5a, 0x9d, 0xd5, 0xe1, 0xa2, 0xe5, 0x03, 0x95, 0x33, 0xba, + 0x45, 0xbb, 0xa0, 0xac, 0x67, 0xa5, 0x2a, 0x8a, 0x10, 0x7b, 0xdf, 0x1f, 0x33, 0x42, 0x3d, 0xd1, + 0x72, 0x04, 0xc4, 0xe0, 0x91, 0x99, 0xc5, 0x11, 0xf8, 0x19, 0x4d, 0x35, 0xba, 0xf7, 0x8c, 0xff, + 0x90, 0x5d, 0x31, 0xba, 0xde, 0x39, 0x85, 0x53, 0xd3, 0x1e, 0xe7, 0x96, 0xe2, 0x6b, 0xbf, 0x63, + 0xec, 0x1d, 0x4b, 0x0e, 0x55, 0x81, 0x18, 0xc1, 0x45, 0x45, 0xd5, 0x5a, 0x12, 0x5d, 0xd3, 0x7e, + 0x6c, 0x9d, 0x0c, 0xd5, 0x09, 0x29, 0xfe, 0x07, 0xf4, 0x4e, 0x1f, 0xf1, 0xe5, 0xc4, 0x01, 0xb8, + 0xe5, 0x82, 0x69, 0x9f, 0x9d, 0xf4, 0x05, 0xc5, 0x85, 0x10, 0xde, 0x03, 0x9f, 0x99, 0xef, 0xfd, + 0x5c, 0x9e, 0x77, 0x56, 0xb1, 0xd0, 0x6a, 0x2f, 0x0a, 0x53, 0x90, 0x29, 0x83, 0x46, 0xb9, 0x97, + 0xee, 0x0e, 0xf3, 0x11, 0x0d, 0x85, 0x47, 0x6a, 0xd3, 0x1f, 0xd3, 0xc4, 0x6d, 0xa4, 0xd9, 0x36, + 0x5b, 0x06, 0x68, 0xdf, 0x8f, 0xa1, 0x52, 0x48, 0x96, 0x08, 0x78, 0xb5, 0x5b, 0xfd, 0xfd, 0x16, + 0x83, 0x22, 0x3a, 0xec, 0xb2, 0x45, 0x57, 0x61, 0x2e, 0x7a, 0xf2, 0x03, 0x23, 0xe1, 0xad, 0x37, + 0xd4, 0x01, 0xf6, 0xfd, 0x34, 0xc3, 0x1e, 0xec, 0x70, 0x33, 0x69, 0xfb, 0x2e, 0x2d, 0x6b, 0xdd, + 0xfa, 0x0d, 0x3c, 0xa8, 0xf4, 0x17, 0xe5, 0x79, 0x08, 0xc6, 0x34, 0xcb, 0x7c, 0x3e, 0xb9, 0x19, + 0xae, 0xc8, 0xb6, 0x4d, 0x2a, 0xd1, 0x8a, 0x20, 0x25, 0x53, 0xea, 0xac, 0x7e, 0x21, 0x74, 0x67, + 0x15, 0xd1, 0x85, 0xcc, 0xad, 0x29, 0xfd, 0x1c, 0xe6, 0xda, 0xc9, 0x5d, 0xf1, 0x40, 0xb9, 0x38, + 0xae, 0xbd, 0xec, 0xf8, 0x70, 0xc4, 0x5b, 0xc0, 0x51, 0xd5, 0x52, 0xc1, 0xdf, 0x44, 0xdf, 0xd9, + 0x26, 0x41, 0x4e, 0xba, 0x96, 0x9e, 0x37, 0xcd, 0xf1, 0x4e, 0xb2, 0xfb, 0x10, 0x18, 0x49, 0xf6, + 0x78, 0xa3, 0xeb, 0x93, 0x36, 0xf4, 0xc9, 0x5d, 0x6a, 0x9b, 0x2f, 0x7e, 0x66, 0xe7, 0x25, 0x0d, + 0x23, 0x58, 0x14, 0xa9, 0x4d, 0x5b, 0x51, 0xcc, 0xc6, 0xcc, 0x1f, 0x17, 0x57, 0x02, 0xa3, 0x43, + 0x0b, 0xe4, 0x0e, 0xb6, 0x5d, 0x9f, 0x86, 0x7e, 0xc9, 0x12, 0xaa, 0x7a, 0x65, 0x55, 0x81, 0x5b, + 0xd3, 0x78, 0xd9, 0x49, 0xa0, 0xe6, 0x18, 0x65, 0xf3, 0x83, 0x86, 0x6b, 0x76, 0xee, 0xee, 0xc6, + 0x92, 0xbc, 0xd2, 0xf6, 0x87, 0x7e, 0x8e, 0x6c, 0x05, 0xe3, 0xdb, 0x3b, 0x10, 0xe8, 0x79, 0x13, + 0x19, 0xc1, 0x42, 0xcb, 0xba, 0xeb, 0x4b, 0x59, 0x57, 0x0c, 0x51, 0x63, 0xbe, 0x4f, 0xed, 0xcb, + 0xba, 0xf7, 0x6f, 0xe5, 0x5e, 0xe6, 0xe0, 0x4e, 0x3b, 0x93, 0x57, 0x23, 0xc2, 0xf3, 0xc4, 0xfd, + 0x5a, 0xa0, 0xa6, 0xca, 0xd3, 0x88, 0x38, 0x10, 0x36, 0x43, 0xd0, 0xb6, 0xb9, 0x07, 0x80, 0x45, + 0x93, 0x80, 0x57, 0x3d, 0xf9, 0xc9, 0xd9, 0xe5, 0x15, 0x96, 0x8a, 0x7d, 0x0c, 0x1e, 0xff, 0x92, + 0x4f, 0xe1, 0xb3, 0xcf, 0xf7, 0xbc, 0xc7, 0x92, 0xfb, 0x4d, 0xc3, 0x7e, 0xb1, 0x0b, 0x7b, 0xf7, + 0x60, 0xc8, 0x39, 0x4d, 0xff, 0x82, 0xf5, 0x98, 0xbe, 0x8b, 0xfc, 0x52, 0xbf, 0xbe, 0xea, 0x82, + 0x4c, 0x7e, 0xc7, 0xd0, 0xcf, 0xf2, 0x46, 0xaa, 0x66, 0x36, 0x63, 0xfc, 0xab, 0x07, 0xd5, 0xb7, + 0x3e, 0x6b, 0x3f, 0xbb, 0xd1, 0xe7, 0xc3, 0xac, 0x1a, 0x77, 0x71, 0x5c, 0x1b, 0x88, 0x5f, 0x13, + 0x5f, 0x20, 0x0c, 0x03, 0x36, 0xdf, 0x83, 0x11, 0x9e, 0x36, 0x3e, 0xc9, 0x8b, 0xc8, 0x79, 0x2f, + 0xc7, 0xd6, 0x64, 0xd3, 0x97, 0x39, 0xa6, 0x61, 0xc3, 0x8d, 0x69, 0xe8, 0x72, 0x9b, 0xb5, 0x64, + 0x3e, 0x71, 0x2c, 0x3b, 0x7d, 0xa3, 0x45, 0x4a, 0x25, 0x75, 0x85, 0xee, 0xd1, 0x33, 0x19, 0x8d, + 0x10, 0x1c, 0xd8, 0x43, 0xc2, 0xb1, 0xdc, 0x7b, 0x4d, 0x8b, 0xcf, 0xd8, 0xf4, 0xfa, 0x0b, 0xa0, + 0x56, 0x62, 0x86, 0x39, 0xeb, 0xdf, 0x5d, 0xea, 0xe6, 0x75, 0x5e, 0x4c, 0xad, 0x82, 0x61, 0xcc, + 0xcb, 0x9f, 0xe3, 0x51, 0x58, 0x03, 0x1d, 0xad, 0x88, 0x5a, 0xd2, 0x92, 0x07, 0xba, 0x5f, 0xed, + 0x0f, 0xbe, 0x80, 0xda, 0xcc, 0x85, 0x91, 0xe4, 0x77, 0x69, 0x69, 0x3d, 0x58, 0x3c, 0x11, 0x1e, + 0xaf, 0x2d, 0x8c, 0xda, 0xdd, 0x9e, 0xcb, 0x42, 0x92, 0x0a, 0xf9, 0xcd, 0x89, 0xd9, 0x54, 0x36, + 0x9a, 0xd7, 0x40, 0xd1, 0x7b, 0x88, 0x0e, 0xde, 0x2e, 0x4b, 0xd3, 0x33, 0xd0, 0xa7, 0xad, 0x78, + 0xfe, 0x9f, 0x05, 0xbd, 0x57, 0x9b, 0x96, 0x29, 0xe9, 0xa1, 0xdb, 0x25, 0xad, 0x8a, 0xdf, 0x9c, + 0xfd, 0xa5, 0x3f, 0x47, 0x71, 0x3c, 0x19, 0x7e, 0x9a, 0xf2, 0x35, 0x71, 0x93, 0x26, 0xfc, 0x74, + 0x66, 0xa8, 0xfe, 0x53, 0x8b, 0xb0, 0xbf, 0x6e, 0xe9, 0x03, 0xae, 0x77, 0x9b, 0x34, 0x53, 0xdb, + 0x79, 0x0a, 0x65, 0x53, 0xd4, 0x2c, 0x73, 0x9a, 0x2f, 0xdb, 0xb0, 0xbf, 0x9f, 0x8a, 0xf3, 0x43, + 0xdf, 0x75, 0x75, 0xbc, 0x19, 0x3c, 0x23, 0x72, 0xd5, 0x10, 0xc7, 0x18, 0x83, 0xa4, 0xbf, 0xf9, + 0x0e, 0x17, 0x9f, 0x0c, 0x80, 0x31, 0xc0, 0x12, 0x6c, 0xc5, 0x1e, 0x3f, 0x93, 0x0c, 0x6b, 0x26, + 0x7a, 0xe4, 0x91, 0x4b, 0x36, 0x90, 0xa2, 0xc9, 0xb0, 0x68, 0x7e, 0x11, 0x6b, 0x22, 0x51, 0xaf, + 0x27, 0x8b, 0xbd, 0xc0, 0xa5, 0x89, 0x8b, 0x17, 0x7c, 0x6f, 0x95, 0xab, 0xa9, 0xf7, 0xe9, 0x6b, + 0x4c, 0x1c, 0xc3, 0xed, 0xe3, 0x5f, 0x2f, 0x5e, 0x15, 0xd5, 0xc9, 0x5f, 0xa5, 0xcf, 0x50, 0x3e, + 0x5b, 0x71, 0xbc, 0x35, 0x1c, 0x98, 0x48, 0x98, 0x3f, 0x1c, 0xe2, 0x27, 0x38, 0xd9, 0xee, 0xc5, + 0x30, 0xd6, 0x30, 0xa8, 0xb2, 0x2a, 0x08, 0x8d, 0x73, 0x61, 0x32, 0xd4, 0x80, 0x1e, 0xac, 0x0b, + 0x69, 0xb5, 0x35, 0xb3, 0x29, 0xc0, 0x35, 0xf4, 0xa5, 0x23, 0x06, 0x26, 0x67, 0x1c, 0xe7, 0x0a, + 0x17, 0xc5, 0xea, 0x6a, 0x4c, 0x13, 0xe2, 0xa5, 0x9f, 0xbf, 0x12, 0xce, 0xf1, 0xa5, 0xc4, 0x1c, + 0x58, 0x4f, 0x0b, 0xa3, 0x0f, 0xe0, 0x0f, 0xc7, 0x7d, 0x95, 0xee, 0x50, 0x69, 0xb9, 0x4f, 0x35, + 0xc1, 0xcb, 0x88, 0x07, 0xb0, 0x12, 0xe6, 0x87, 0x46, 0x6b, 0x98, 0x71, 0x22, 0x9c, 0xa5, 0xb0, + 0xea, 0x7a, 0xab, 0x29, 0x01, 0x23, 0x6a, 0x9d, 0xa8, 0x99, 0x72, 0xcd, 0x45, 0xba, 0x46, 0x6a, + 0x25, 0xc7, 0xe7, 0x46, 0x9a, 0x39, 0x52, 0x75, 0x7b, 0x8e, 0x79, 0xe3, 0x7a, 0x2d, 0xef, 0xf2, + 0xbe, 0x44, 0x3f, 0x66, 0x6e, 0x37, 0xf1, 0x74, 0xe4, 0x36, 0x3a, 0xe3, 0x82, 0xf3, 0x75, 0x76, + 0x95, 0x16, 0x81, 0xd8, 0xd3, 0x5f, 0x08, 0x05, 0xeb, 0x95, 0x7e, 0xda, 0x9d, 0x31, 0x88, 0xb4, + 0x9d, 0xe8, 0x28, 0x0e, 0x93, 0x46, 0x20, 0x63, 0x0e, 0xac, 0xe4, 0x1a, 0x07, 0xa6, 0x1c, 0xce, + 0xfd, 0xc3, 0x8a, 0x47, 0x7f, 0xe2, 0x62, 0xc9, 0xe0, 0x55, 0x70, 0x25, 0x2c, 0xa5, 0x89, 0x83, + 0x1c, 0xc4, 0xb7, 0x70, 0xd8, 0x8c, 0xe6, 0x2e, 0xa2, 0x46, 0x1f, 0x90, 0xa6, 0x32, 0x6e, 0x2f, + 0xea, 0xba, 0x1f, 0xc9, 0x32, 0x70, 0x03, 0xba, 0x3e, 0xd5, 0x78, 0x91, 0xbd, 0x0f, 0x73, 0x33, + 0x51, 0x34, 0x21, 0x66, 0x7d, 0xb9, 0xf3, 0x36, 0xfb, 0x13, 0xa0, 0x8d, 0xe4, 0x39, 0x32, 0xa6, + 0xc5, 0x94, 0xec, 0x4d, 0xb8, 0xa2, 0x6d, 0x5e, 0xde, 0x07, 0xec, 0xe8, 0x31, 0xf3, 0x22, 0x6f, + 0x1a, 0x16, 0xe6, 0x67, 0xfe, 0x2b, 0x69, 0x24, 0x4d, 0x12, 0x4e, 0x24, 0x77, 0x5f, 0x41, 0x7e, + 0x50, 0x39, 0x1d, 0x4d, 0x7e, 0x7a, 0xac, 0x3b, 0x05, 0xf8, 0xab, 0xbf, 0xfa, 0xab, 0xbf, 0xfa, + 0xab, 0xbf, 0xfa, 0xab, 0xff, 0x77, 0x12, 0x20, 0xa2, 0x04, 0x50, 0x03, 0x81, 0x92, 0x76, 0x80, + 0x79, 0x20, 0x1a, 0x02, 0x00, 0x04, 0xd6, 0xc8, 0xeb, 0xa4, 0xc2, 0xa5, 0x88, 0xc4, 0x97, 0x33, + 0x15, 0x80, 0x0a, 0x4e, 0xbf, 0x23, 0x56, 0x5a, 0x00, 0x80, 0x3b, 0x0e, 0x59, 0xeb, 0x14, 0x18, + 0x00, 0x00, 0x98, 0xd7, 0x54, 0x8b, 0xc6, 0x28, 0x78, 0x2b, 0x6f, 0x59, 0xfc, 0x12, 0x0d, 0x02, + 0x00, 0x50, 0x01, 0xb0, 0x2b, 0xa7, 0x98, 0xa7, 0x84, 0xe8, 0xdf, 0x71, 0x6b, 0x7e, 0x46, 0x02, + 0x7f, 0x56, 0x40, 0x21, 0xdf, 0xc9, 0xa7, 0xc2, 0xa1, 0xf3, 0xa9, 0x32, 0x24, 0xe8, 0x1a, 0x10, + 0x5c, 0x05, 0x02, 0xb8, 0xb6, 0xf6, 0xc8, 0xa5, 0xc2, 0x53, 0x65, 0x10, 0xe1, 0xa1, 0x6b, 0xd7, + 0xd1, 0x7e, 0x67, 0x37, 0x76, 0xd9, 0x3f, 0x76, 0x12, 0xb4, 0x1b, 0x68, 0x49, 0x13, 0xf2, 0xf3, + 0xda, 0x47, 0x47, 0x7d, 0xed, 0x43, 0x3e, 0x4a, 0x85, 0x93, 0x60, 0x72, 0x73, 0x6b, 0x91, 0xc5, + 0xcb, 0x5c, 0x5b, 0xd6, 0x64, 0x52, 0xe1, 0x29, 0x5f, 0xe5, 0x00, 0xe5, 0x58, 0x49, 0x32, 0x14, + 0x58, 0x00, 0x00, 0x09, 0x96, 0x9c, 0x4c, 0x05, 0x32, 0x0f, 0x40, 0x68, 0xb2, 0xe5, 0x0b, 0x64, + 0x59, 0x42, 0x75, 0x9d, 0x47, 0x58, 0x60, 0xca, 0x2d, 0x65, 0x74, 0x9d, 0x5e, 0x7b, 0x57, 0xa5, + 0x08, 0x4d, 0x22, 0xe0, 0x4b, 0xe8, 0xae, 0x53, 0x70, 0x2d, 0x04, 0x7b, 0x09, 0x3e, 0x71, 0x1a, + 0xe1, 0x46, 0xad, 0x2d, 0x0f, 0xe0, 0x81, 0xd4, 0x60, 0x81, 0x00, 0xfc, 0xe8, 0xf6, 0x84, 0xa5, + 0x97, 0x35, 0xc7, 0xa3, 0x16, 0xa5, 0x5e, 0x4b, 0x2b, 0xdd, 0xc7, 0x41, 0x76, 0x0f, 0x83, 0x13, + 0x01, 0x4b, 0x46, 0x3d, 0xc7, 0x24, 0x90, 0xe1, 0x21, 0x2d, 0x8a, 0x3f, 0x6b, 0x80, 0xf9, 0xf3, + 0xe4, 0x5f, 0x53, 0x5c, 0xb9, 0xf3, 0xa0, 0x51, 0x13, 0xb5, 0x00, 0xf8, 0xfa, 0x3a, 0xe0, 0x21, + 0x24, 0x4f, 0x72, 0xc9, 0x16, 0x72, 0xb0, 0xb4, 0xb5, 0xb2, 0x7f, 0x6a, 0x41, 0x82, 0xf1, 0x7b, + 0x7f, 0xa4, 0x98, 0x10, 0xfe, 0xf8, 0xd3, 0xb5, 0x30, 0x04, 0x0c, 0x71, 0x5d, 0x81, 0x24, 0x14, + 0x7b, 0x26, 0xc0, 0x7c, 0xe9, 0x22, 0x71, 0xb7, 0x86, 0x82, 0x10, 0x20, 0xbe, 0x3c, 0x66, 0x21, + 0xbe, 0x5e, 0x43, 0x41, 0x09, 0x10, 0xdf, 0x6d, 0xb2, 0x00, 0x6d, 0x89, 0x1f, 0x82, 0x4e, 0xb3, + 0x12, 0x59, 0x01, 0x01, 0x89, 0x84, 0x0e, 0xe8, 0xdf, 0x39, 0x02, 0xab, 0x57, 0x25, 0x93, 0x4c, + 0x80, 0x44, 0xec, 0x89, 0xc2, 0xcb, 0x94, 0x8e, 0x58, 0x5b, 0x62, 0x0a, 0x85, 0x48, 0xa0, 0x2c, + 0xdc, 0x71, 0x1d, 0x04, 0x35, 0x67, 0x47, 0x93, 0x60, 0x42, 0x7e, 0x15, 0xa2, 0x63, 0xe0, 0x02, + 0xc8, 0x4f, 0xea, 0x24, 0x90, 0xf1, 0xbd, 0xdf, 0x1e, 0xb2, 0x55, 0x40, 0xaa, 0x05, 0xa6, 0x6c, + 0xea, 0x02, 0xcc, 0x22, 0xd5, 0x04, 0x8e, 0x75, 0x02, 0xb8, 0xe9, 0xdb, 0xfd, 0x45, 0x23, 0x4c, + 0xe3, 0x00, 0x3f, 0x8a, 0xdf, 0xbd, 0x02, 0x0b, 0xbc, 0x13, 0xa1, 0x81, 0x13, 0xbe, 0x02, 0x6b, + 0x20, 0xc0, 0x40, 0x22, 0xac, 0x2d, 0xf1, 0x65, 0x7f, 0x39, 0xf8, 0xb2, 0x39, 0x00, 0x28, 0x4b, + 0x96, 0x1f, 0xa8, 0x6d, 0x06, 0x06, 0x0b, 0x62, 0x82, 0x1a, 0x90, 0xe3, 0xa0, 0x00, 0x0a, 0x7f, + 0x54, 0x3b, 0xba, 0x10, 0xfd, 0xbb, 0x3f, 0x40, 0xb8, 0x19, 0x16, 0x38, 0xb0, 0x16, 0x09, 0x6a, + 0x4f, 0x82, 0xdb, 0x93, 0x83, 0xff, 0x64, 0xa8, 0x40, 0x42, 0xa4, 0xca, 0xd1, 0xa9, 0x0b, 0x59, + 0xe6, 0x34, 0x70, 0x78, 0xc8, 0xf1, 0x4d, 0x9e, 0xb7, 0x2c, 0x08, 0xe6, 0x67, 0x2d, 0xd7, 0xb9, + 0xae, 0xa7, 0x66, 0x8d, 0x37, 0x09, 0xfe, 0x27, 0x8b, 0xf0, 0xf2, 0x9f, 0xdc, 0x24, 0x90, 0x35, + 0x76, 0x4a, 0x38, 0x0a, 0x0b, 0x80, 0xcd, 0x40, 0x07, 0x01, 0x5c, 0x5b, 0xe4, 0x7f, 0x5b, 0x48, + 0x3b, 0xae, 0xff, 0x43, 0xfe, 0xf1, 0x4a, 0x12, 0xfd, 0xfb, 0x8c, 0x64, 0xa3, 0x84, 0x27, 0xc8, + 0x91, 0x76, 0xfc, 0xae, 0x06, 0xfe, 0xbf, 0xd5, 0x48, 0xd2, 0xdf, 0x54, 0x03, 0xfd, 0x5d, 0x8d, + 0x60, 0xef, 0xfd, 0x40, 0x01, 0x28, 0xe0, 0xba, 0x12, 0x14, 0x05, 0x16, 0xc8, 0x1f, 0xf5, 0x31, + 0x7a, 0x20, 0x1a, 0x75, 0x72, 0x42, 0x10, 0xda, 0x73, 0x33, 0x7f, 0xa3, 0xe8, 0xdf, 0x91, 0xa8, + 0x10, 0x56, 0xa6, 0xc0, 0x5a, 0x28, 0x00, 0xa5, 0x8d, 0x00, 0xf8, 0xb3, 0x30, 0xbc, 0x3d, 0xbd, + 0x99, 0x3f, 0x79, 0xc6, 0x54, 0x38, 0x7d, 0x6c, 0x4a, 0x62, 0x37, 0x20, 0xa4, 0x03, 0x33, 0x3c, + 0xcd, 0xe2, 0x2a, 0xaa, 0xcd, 0xe2, 0x41, 0x0f, 0x0a, 0x09, 0x31, 0x99, 0x89, 0xba, 0x7e, 0x2f, + 0x1f, 0xe6, 0x2f, 0x4f, 0x81, 0xff, 0x7b, 0x3f, 0xf8, 0x51, 0x30, 0xf4, 0x6f, 0xa2, 0xa2, 0xa2, + 0x60, 0x0f, 0xf2, 0x29, 0xa5, 0x30, 0xc3, 0x5d, 0x7e, 0x81, 0x05, 0xf0, 0x01, 0x0c, 0x74, 0x16, + 0x00, 0x70, 0x0d, 0x3e, 0x80, 0xa6, 0x83, 0xb4, 0x2d, 0x05, 0x09, 0x21, 0x14, 0x09, 0xac, 0x01, + 0xc0, 0x43, 0x7a, 0x00, 0xe0, 0x2a, 0x28, 0x60, 0xa3, 0xe5, 0xf7, 0x4a, 0x46, 0xfa, 0x2d, 0x30, + 0x8b, 0x3f, 0xbb, 0xfc, 0xdf, 0x1a, 0x00, 0x64, 0xff, 0xdd, 0x25, 0x24, 0xec, 0xdf, 0x2e, 0x89, + 0x23, 0xaf, 0x77, 0x65, 0x41, 0x1d, 0x0f, 0x4f, 0x95, 0x0e, 0x0d, 0x3f, 0x9a, 0xbb, 0x99, 0x6f, + 0xea, 0x3f, 0xa7, 0x21, 0x65, 0x9c, 0x05, 0xf0, 0xfb, 0xa4, 0xdc, 0xd4, 0x35, 0xdf, 0xfd, 0x4f, + 0xf7, 0x83, 0xff, 0xed, 0x7e, 0x2b, 0xc1, 0x7f, 0xaf, 0x29, 0x7f, 0x27, 0x09, 0x2e, 0xe4, 0x80, + 0x75, 0x29, 0x7c, 0x78, 0xd3, 0x67, 0x4a, 0x4a, 0x78, 0x93, 0x1c, 0xa6, 0xf9, 0xbf, 0xbd, 0xed, + 0xa1, 0xa0, 0x84, 0xd7, 0x60, 0x11, 0x00, 0x50, 0x50, 0x20, 0xf0, 0x3f, 0xfd, 0xa7, 0x20, 0x85, + 0xdf, 0xb1, 0xa0, 0x86, 0x40, 0xec, 0x93, 0xda, 0xaf, 0x2d, 0x16, 0xe4, 0xa4, 0xf0, 0x1a, 0xcc, + 0xf5, 0x5f, 0x0d, 0x88, 0x5a, 0x40, 0x4c, 0xfb, 0x9f, 0x99, 0x44, 0x05, 0xe7, 0x91, 0xc5, 0x98, + 0x90, 0x40, 0x01, 0xe4, 0x78, 0xed, 0xd7, 0x67, 0x89, 0x99, 0x0c, 0x6a, 0xc2, 0x40, 0x87, 0x09, + 0x14, 0x08, 0xc1, 0x20, 0xbc, 0x26, 0xf8, 0x1f, 0xf2, 0x23, 0x6e, 0xe2, 0x49, 0xc0, 0x15, 0x58, + 0x00, 0x30, 0x2b, 0x18, 0x40, 0xe2, 0x4e, 0x09, 0x58, 0xa2, 0x81, 0xfe, 0x24, 0x34, 0x11, 0x08, + 0xd1, 0xe4, 0x84, 0x1a, 0x33, 0xd0, 0x61, 0xfd, 0x79, 0xeb, 0xd1, 0x35, 0x1f, 0xaa, 0xfd, 0x21, + 0x5e, 0x1b, 0x0a, 0x13, 0x80, 0x87, 0xd7, 0x1e, 0x63, 0x42, 0x2a, 0x8d, 0x08, 0x3b, 0xfa, 0x49, + 0x09, 0xff, 0xd3, 0xc7, 0x3f, 0xf3, 0x02, 0x73, 0xfb, 0xef, 0x29, 0x98, 0x07, 0x2c, 0xda, 0xd3, + 0xff, 0xa7, 0x5a, 0x0b, 0xfc, 0x54, 0xf8, 0xf5, 0x0c, 0xdc, 0x4c, 0x7c, 0xd5, 0xf5, 0x3d, 0x06, + 0xc5, 0x20, 0xfa, 0x03, 0x3a, 0x5e, 0x8c, 0x80, 0x7f, 0x78, 0xea, 0x77, 0xfd, 0xcc, 0x98, 0x4f, + 0xfb, 0x0f, 0x59, 0x6f, 0xf8, 0xf6, 0x46, 0x15, 0x46, 0x54, 0x02, 0xf4, 0xc5, 0xc2, 0xd3, 0x78, + 0xc0, 0x9b, 0x38, 0xac, 0x6b, 0x62, 0xdc, 0x10, 0x7a, 0x43, 0xc2, 0x1b, 0x3f, 0xeb, 0x0d, 0x29, + 0x6f, 0x48, 0x7a, 0x43, 0x9a, 0x1b, 0xce, 0x83, 0xae, 0x09, 0xc0, 0xbc, 0x59, 0xe7, 0x86, 0x9c, + 0xff, 0x3e, 0xdf, 0xec, 0xcf, 0x9e, 0xec, 0x9a, 0xce, 0x37, 0x74, 0xbf, 0xe1, 0x7f, 0x2e, 0x64, + 0x52, 0xca, 0x7f, 0xc0, 0x0c, 0x20, 0xfc, 0x87, 0xbf, 0x6e, 0xf4, 0x1f, 0x3f, 0xf7, 0xcd, 0xef, + 0xb7, 0x24, 0x71, 0xfe, 0xef, 0xf7, 0xfc, 0x5f, 0xfd, 0xd5, 0x5f, 0xfd, 0xd5, 0x5f, 0xfd, 0xd5, + 0x5f, 0xfd, 0xd5, 0x5f, 0xfd, 0xd5, 0x1f, 0xfd, 0x0f, 0xb1, 0xc8, 0xdf, 0x7d, 0x00, 0x30, 0x00, + 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveGspRmBoot_GA102_ucode_image_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12288, // uncompressed data size (bytes) + 7345, // compressed data size (bytes) + kgspBinArchiveGspRmBoot_GA102_ucode_image_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveGspRmBoot_GA102("ucode_desc_dbg") +// FILE NAME: kernel/inc/gsprm/bin/g_gsprm_skbl_dbg_ga102_riscv_desc.bin +// FILE TYPE: BINARY +// VAR NAME: N/A +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 76 +// COMPRESSED SIZE (bytes): 40 +// +static BINDATA_CONST NvU8 kgspBinArchiveGspRmBoot_GA102_ucode_desc_dbg_data[] = +{ + 0x63, 0x61, 0x00, 0x02, 0x05, 0x06, 0x86, 0x0d, 0xac, 0x40, 0xac, 0xca, 0xc0, 0x20, 0xc0, 0x80, + 0x05, 0x70, 0x40, 0x31, 0x33, 0x10, 0x73, 0x03, 0x31, 0x2f, 0x03, 0x03, 0x23, 0x16, 0x75, 0x00, + 0x8d, 0x26, 0xa8, 0x4f, 0x4c, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveGspRmBoot_GA102_ucode_desc_dbg_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 76, // uncompressed data size (bytes) + 40, // compressed data size (bytes) + kgspBinArchiveGspRmBoot_GA102_ucode_desc_dbg_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveGspRmBoot_GA102("ucode_image_prod") +// FILE NAME: kernel/inc/gsprm/bin/g_gsprm_skbl_prod_ga102_riscv_image.bin +// FILE TYPE: BINARY +// VAR NAME: N/A +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 12288 +// COMPRESSED SIZE (bytes): 7340 +// +static BINDATA_CONST NvU8 kgspBinArchiveGspRmBoot_GA102_ucode_image_prod_data[] = +{ + 0xed, 0x94, 0x75, 0x54, 0xd4, 0x5d, 0xb7, 0xc7, 0x27, 0x80, 0x01, 0xa4, 0xbb, 0x5b, 0x1a, 0xa4, + 0xbb, 0x5b, 0x1a, 0x41, 0x42, 0x9a, 0x01, 0x86, 0x52, 0x4a, 0xba, 0x73, 0xe8, 0x8e, 0x91, 0x50, + 0xba, 0x41, 0x1a, 0x29, 0x69, 0x09, 0x01, 0x25, 0xa4, 0xa4, 0x5b, 0x4a, 0x52, 0x42, 0x81, 0xab, + 0x0f, 0x3c, 0xef, 0x7d, 0xff, 0xbc, 0xff, 0x5f, 0xbf, 0x6b, 0xcd, 0xfa, 0xcc, 0xd9, 0x7b, 0xff, + 0xce, 0x3e, 0x7b, 0xaf, 0x7d, 0x8e, 0x12, 0xcd, 0x1b, 0xbf, 0xa8, 0x92, 0x66, 0x03, 0xcd, 0xf8, + 0x8a, 0x67, 0xc8, 0x5b, 0x74, 0xb8, 0xad, 0x3c, 0x82, 0xb9, 0xd4, 0x2b, 0x3c, 0xde, 0xd4, 0xfb, + 0xc2, 0x0b, 0x2d, 0x92, 0x07, 0x6f, 0x2a, 0xf3, 0x59, 0x91, 0xe8, 0x62, 0x9d, 0xf7, 0x7b, 0x6e, + 0x6f, 0xce, 0x21, 0xce, 0x2b, 0xfe, 0xe7, 0xb2, 0xd9, 0x2f, 0xc0, 0x53, 0x59, 0x61, 0x3d, 0x8c, + 0x97, 0x25, 0x0f, 0x20, 0x06, 0x4f, 0xa8, 0xd0, 0x08, 0x65, 0x2a, 0x9d, 0x78, 0xce, 0x15, 0x11, + 0x7b, 0x2a, 0x56, 0xc7, 0x0d, 0x27, 0xc0, 0x6e, 0x75, 0x92, 0x5a, 0x63, 0x1a, 0x59, 0xd5, 0x87, + 0xcd, 0x51, 0xf6, 0x0d, 0x2a, 0x79, 0xc8, 0xdb, 0x39, 0xda, 0x01, 0x18, 0x8a, 0x95, 0x21, 0x2c, + 0x30, 0xb8, 0x4a, 0x85, 0x1f, 0xcb, 0x3b, 0xaf, 0x51, 0xb5, 0x78, 0xa3, 0x97, 0x1a, 0xd0, 0xa3, + 0xcc, 0x89, 0x14, 0x74, 0x0a, 0x9d, 0x12, 0x52, 0xd4, 0xef, 0x92, 0x1f, 0x56, 0xd4, 0x50, 0xd8, + 0x0f, 0xd3, 0x84, 0x14, 0x7e, 0x6e, 0x99, 0xe0, 0x38, 0xfa, 0xef, 0x6d, 0x83, 0x0a, 0xa3, 0xc0, + 0xbd, 0x7c, 0x55, 0x15, 0xf2, 0x1f, 0x9c, 0x6c, 0x48, 0x58, 0xdd, 0x93, 0x64, 0x53, 0x7a, 0x34, + 0x9a, 0xd6, 0xd5, 0x71, 0x95, 0x91, 0x74, 0x84, 0xac, 0x2c, 0xc7, 0x28, 0x30, 0xc6, 0x02, 0xb6, + 0x57, 0x52, 0xdd, 0x7e, 0x0c, 0x8c, 0xde, 0xf6, 0xcc, 0x3d, 0xcf, 0x0c, 0xa6, 0x0e, 0x00, 0x92, + 0x65, 0x88, 0x5c, 0xaa, 0x8f, 0x68, 0x6d, 0x71, 0x1c, 0x0a, 0x14, 0x0a, 0x82, 0x8f, 0x64, 0xc8, + 0xe7, 0x93, 0x95, 0x08, 0x52, 0x5a, 0x9f, 0x26, 0x4d, 0x3d, 0x80, 0x93, 0x85, 0xd8, 0x4f, 0x7e, + 0x0b, 0x54, 0xd0, 0xb4, 0x77, 0x73, 0x2d, 0x3d, 0xd6, 0xb6, 0xcd, 0x59, 0xa3, 0xb1, 0xab, 0xdb, + 0xf3, 0x38, 0x8f, 0xb1, 0x7b, 0xfc, 0xa3, 0xa3, 0x2d, 0x38, 0xd9, 0xe3, 0x4b, 0xfe, 0xe5, 0x34, + 0xb1, 0x53, 0xfc, 0x2b, 0x78, 0xd5, 0x29, 0x6c, 0x47, 0xf0, 0x08, 0x69, 0x23, 0x34, 0x9a, 0x1c, + 0xf6, 0x79, 0x7b, 0xf6, 0x81, 0xb1, 0xf5, 0xc7, 0x86, 0x29, 0xb2, 0xa4, 0x4f, 0x23, 0xb4, 0x82, + 0x32, 0x65, 0x92, 0x92, 0xae, 0xe7, 0x6c, 0x8c, 0x75, 0x4c, 0xb9, 0x2e, 0xa6, 0xf9, 0x85, 0x33, + 0x20, 0xc2, 0xf3, 0x4b, 0xb9, 0x24, 0xd7, 0xb8, 0xc5, 0x93, 0x95, 0x26, 0x99, 0xae, 0xbc, 0x51, + 0x8f, 0x21, 0x5f, 0x98, 0x6c, 0x66, 0x94, 0xa6, 0xc5, 0xd7, 0x0e, 0xb0, 0xba, 0x9b, 0x03, 0x2c, + 0x27, 0x41, 0xd7, 0xa4, 0x33, 0xb2, 0xdd, 0xd3, 0xdc, 0xa4, 0xd9, 0xe2, 0xe1, 0x31, 0x8e, 0x15, + 0x25, 0x53, 0xbc, 0xec, 0x18, 0x06, 0x62, 0xb3, 0xdb, 0xaa, 0x0d, 0x65, 0x7e, 0x8d, 0x79, 0x66, + 0x42, 0x0c, 0xd4, 0x9d, 0xe3, 0x6a, 0xc8, 0xec, 0x18, 0x51, 0xbf, 0xd8, 0xc1, 0x59, 0x19, 0x12, + 0xd7, 0xfd, 0x15, 0xc8, 0xf9, 0x42, 0x5c, 0x99, 0xed, 0x0b, 0xed, 0x07, 0xc2, 0x46, 0xb0, 0xe3, + 0x46, 0xa0, 0xd9, 0x7e, 0x25, 0xed, 0x3a, 0xfb, 0xd4, 0x27, 0xf2, 0x79, 0x24, 0x18, 0x8b, 0xba, + 0xc0, 0xcb, 0xd5, 0xe5, 0xa6, 0x6f, 0x23, 0x5f, 0x97, 0x2c, 0x93, 0x0a, 0x75, 0x5d, 0x60, 0xe9, + 0xc5, 0x3e, 0x40, 0x21, 0xe5, 0xc1, 0xf1, 0xd1, 0xae, 0xca, 0x79, 0x6c, 0xb4, 0xe3, 0x43, 0x33, + 0xe6, 0x5b, 0x75, 0x9b, 0x57, 0xee, 0x93, 0x32, 0x33, 0x1f, 0x5b, 0x7e, 0x85, 0xd1, 0x1b, 0xe5, + 0xcf, 0xda, 0xce, 0xb7, 0xf5, 0x32, 0x1b, 0x87, 0xc5, 0xc8, 0xf8, 0xe2, 0x2f, 0xf7, 0x55, 0x30, + 0xa7, 0x60, 0x09, 0xb2, 0xed, 0x7d, 0x56, 0x36, 0x6c, 0xa1, 0x1d, 0xb0, 0x7b, 0x56, 0xa1, 0x0a, + 0xaa, 0x4a, 0xf5, 0x7a, 0xc6, 0x5b, 0x4e, 0x8d, 0x60, 0x3a, 0x20, 0x6e, 0x35, 0x6d, 0x6e, 0xa9, + 0xeb, 0x56, 0xe6, 0x99, 0xcf, 0x2e, 0xc1, 0x63, 0xda, 0xb8, 0xd1, 0x05, 0x31, 0xf1, 0xe3, 0xf4, + 0x14, 0x40, 0x14, 0x3e, 0xa6, 0x7c, 0x08, 0xf6, 0x9e, 0x95, 0xc4, 0x51, 0xc1, 0xb3, 0xe5, 0x6b, + 0x9c, 0x59, 0xa6, 0x56, 0xfe, 0x96, 0xa1, 0xa4, 0x64, 0x2f, 0x6e, 0xbb, 0xd2, 0x50, 0x8f, 0x4a, + 0x7f, 0xda, 0x23, 0xdc, 0x7b, 0xb3, 0x32, 0x0b, 0x43, 0x9e, 0xec, 0x7c, 0xdc, 0xeb, 0xf2, 0xb6, + 0xfd, 0xc1, 0x73, 0x01, 0x5f, 0x30, 0xe2, 0xfc, 0x63, 0x69, 0xd9, 0xdb, 0x94, 0x5d, 0x6f, 0xba, + 0xb6, 0xde, 0x48, 0xf1, 0xb1, 0xfd, 0x52, 0xf1, 0x0a, 0xbc, 0x02, 0x8e, 0x25, 0xd0, 0xe6, 0x14, + 0x27, 0x55, 0xed, 0x4b, 0xdb, 0x2d, 0x7c, 0x52, 0xa2, 0x2c, 0xc7, 0x5f, 0xbc, 0x2f, 0xd3, 0x9e, + 0xdf, 0xa2, 0xf9, 0x61, 0xe6, 0xb1, 0x01, 0xfc, 0xe2, 0x03, 0xf9, 0xec, 0x84, 0x83, 0x94, 0x97, + 0x61, 0x3a, 0xa2, 0x15, 0xaf, 0xf5, 0xa7, 0x8d, 0x83, 0x1d, 0x8c, 0xe5, 0x2e, 0xfc, 0xe7, 0xe2, + 0xda, 0x9a, 0xa8, 0xa3, 0x0a, 0xf5, 0x41, 0xd9, 0x75, 0x6f, 0x88, 0xf6, 0x88, 0xcc, 0x94, 0x49, + 0xc2, 0xdd, 0xf7, 0xa2, 0xfc, 0x50, 0xb1, 0x19, 0xe6, 0x35, 0x4d, 0x49, 0xaf, 0x10, 0xb9, 0x1e, + 0x11, 0x12, 0x4b, 0x5b, 0x21, 0xf6, 0x0e, 0xaf, 0x6e, 0xd9, 0xe9, 0xc8, 0x3e, 0x27, 0xcc, 0x7c, + 0xe7, 0x93, 0xb3, 0x6e, 0xc8, 0x5c, 0x84, 0x5e, 0x19, 0xa5, 0x2a, 0x33, 0xa1, 0x15, 0xed, 0x4c, + 0x5c, 0xa3, 0x92, 0xd3, 0xc9, 0xd5, 0x01, 0x4b, 0xbf, 0x91, 0x9f, 0xb8, 0x9b, 0x37, 0x22, 0x4b, + 0x5a, 0xcf, 0x58, 0x0c, 0x36, 0x1a, 0xf1, 0xdd, 0x68, 0x5c, 0x25, 0x2e, 0x73, 0xbc, 0x14, 0x2f, + 0x6c, 0x13, 0xd1, 0xee, 0x3f, 0x1d, 0xfe, 0x35, 0x62, 0x27, 0x59, 0x8b, 0x38, 0x04, 0xfc, 0x6a, + 0x08, 0xd0, 0x52, 0xcd, 0x04, 0x75, 0x1c, 0xba, 0x63, 0xbf, 0x12, 0xd0, 0xff, 0x11, 0xe3, 0x3c, + 0x1a, 0x25, 0xf4, 0x70, 0x49, 0x5e, 0xc1, 0xa0, 0x50, 0x35, 0x23, 0x46, 0xab, 0xc6, 0xcb, 0x2f, + 0x63, 0x34, 0x91, 0x8a, 0x4e, 0xae, 0x75, 0x51, 0xe7, 0xb4, 0xca, 0x99, 0x5f, 0x6c, 0x34, 0x10, + 0xf5, 0x00, 0xbb, 0xd1, 0x7f, 0x15, 0xe2, 0x92, 0x55, 0x6f, 0xe0, 0xfe, 0xf8, 0xa2, 0x78, 0x6e, + 0x20, 0xdc, 0xb0, 0x60, 0xd1, 0x98, 0x15, 0x01, 0x5c, 0x20, 0x8b, 0xe5, 0x08, 0xa4, 0x5d, 0x17, + 0x02, 0x59, 0x99, 0x21, 0x09, 0x46, 0x66, 0x3d, 0x52, 0xe7, 0x1d, 0x83, 0x92, 0xdb, 0x1c, 0xbd, + 0x8d, 0x9a, 0xf3, 0x7c, 0x40, 0x44, 0x1c, 0x50, 0xb8, 0x12, 0xe0, 0x6b, 0x9e, 0xff, 0xa6, 0x71, + 0x32, 0x81, 0xa6, 0xcf, 0x32, 0xc0, 0x60, 0xb0, 0x30, 0xaa, 0x83, 0x78, 0xe8, 0x95, 0xb0, 0x38, + 0x26, 0x1d, 0x63, 0xf1, 0xf4, 0x0b, 0x3d, 0x90, 0xaf, 0xdc, 0x84, 0x54, 0xf7, 0x9b, 0x3a, 0x1d, + 0xe3, 0x23, 0x2c, 0x8f, 0x0f, 0xe9, 0x9a, 0x25, 0x39, 0x69, 0x72, 0x9a, 0xad, 0xaa, 0xd1, 0x63, + 0x71, 0xa4, 0x75, 0x9f, 0x92, 0x50, 0x98, 0xe0, 0x14, 0xd2, 0x9b, 0x4e, 0xf4, 0xf8, 0x93, 0x41, + 0xe7, 0xd5, 0x56, 0x6c, 0x98, 0x14, 0x79, 0x03, 0x93, 0x56, 0x43, 0xfa, 0x62, 0x82, 0x88, 0xfd, + 0x4f, 0x3b, 0xb1, 0x9e, 0x1f, 0x96, 0xa7, 0x4c, 0xd6, 0x6c, 0x1f, 0xfe, 0x4c, 0x12, 0x45, 0xa3, + 0x9a, 0x73, 0x91, 0xca, 0xf6, 0x14, 0xe8, 0xaf, 0x3b, 0x4e, 0x25, 0x1d, 0x14, 0xcb, 0x5c, 0x17, + 0x5b, 0xb4, 0x76, 0xa6, 0x06, 0xf6, 0xc4, 0x7d, 0x08, 0xf5, 0x85, 0x8f, 0xa0, 0x2f, 0x75, 0x8b, + 0xc5, 0x95, 0x55, 0x8f, 0xbf, 0xba, 0xb0, 0x7f, 0x13, 0x5e, 0x50, 0xfa, 0xd2, 0xbc, 0x82, 0xad, + 0x57, 0x27, 0x7d, 0x09, 0xd5, 0x2f, 0xad, 0x88, 0xcd, 0x00, 0x69, 0x26, 0x47, 0x61, 0x76, 0x31, + 0xb7, 0x79, 0xf8, 0xe9, 0xc6, 0xd1, 0x61, 0xb5, 0xcd, 0x33, 0x04, 0xb7, 0xfa, 0xa3, 0x42, 0x9d, + 0x66, 0x0c, 0xa0, 0x3c, 0x49, 0x48, 0x8f, 0xe1, 0xd7, 0x72, 0xac, 0x58, 0xbd, 0x4b, 0x44, 0x7e, + 0xbc, 0x56, 0x8d, 0x0c, 0xe5, 0x21, 0xd6, 0xdb, 0xb3, 0xe2, 0x52, 0xf4, 0x1d, 0xb7, 0x57, 0x55, + 0xa6, 0xdf, 0xf1, 0x72, 0x2b, 0x74, 0x4d, 0x17, 0x13, 0xbc, 0xd8, 0x97, 0x02, 0x3b, 0x1f, 0xba, + 0x42, 0xa0, 0x47, 0xc3, 0x7d, 0x9c, 0xb3, 0x73, 0xca, 0x12, 0xdb, 0x71, 0x75, 0x7e, 0xbd, 0x3b, + 0x7c, 0x57, 0xaf, 0x32, 0x43, 0x7d, 0xf0, 0xfa, 0x75, 0x2b, 0x34, 0x53, 0x6a, 0x91, 0x56, 0xb6, + 0xb2, 0xa8, 0xbf, 0x27, 0x6d, 0x70, 0xdf, 0x8a, 0x5f, 0x95, 0xa8, 0xd6, 0x35, 0x85, 0xdb, 0xab, + 0x75, 0x8e, 0x33, 0xea, 0x9c, 0xf8, 0x26, 0x3d, 0x9f, 0xcb, 0xf5, 0x4e, 0x3c, 0x24, 0xd0, 0x76, + 0xd1, 0x3b, 0x32, 0xb0, 0x8d, 0x0f, 0xf5, 0x7b, 0x85, 0x5f, 0x61, 0xb3, 0x38, 0xdf, 0x94, 0x20, + 0x13, 0x9b, 0xa1, 0xe1, 0x3c, 0x27, 0xbd, 0x55, 0xd0, 0x98, 0xb5, 0xa7, 0xc9, 0x3d, 0xf6, 0xb0, + 0x55, 0x8a, 0xf1, 0x9c, 0x7f, 0x88, 0x4d, 0x6c, 0x55, 0x7e, 0x96, 0x99, 0xa8, 0x3c, 0xdf, 0x8a, + 0xbe, 0x0e, 0xff, 0xfb, 0x79, 0x6f, 0xad, 0x5f, 0xef, 0x6b, 0xab, 0x2d, 0xe5, 0x3c, 0x43, 0x76, + 0xd8, 0x8c, 0x2f, 0xec, 0x6b, 0x82, 0xd9, 0x60, 0x73, 0x93, 0x70, 0x1e, 0x43, 0xcc, 0x33, 0xa5, + 0xd3, 0x67, 0x8d, 0x6b, 0x5c, 0xbb, 0xb4, 0x16, 0x79, 0x9c, 0x3f, 0x5a, 0x57, 0x47, 0x4d, 0xcf, + 0xe7, 0x1d, 0x90, 0x7b, 0xd5, 0xa7, 0x4c, 0x58, 0x52, 0xa8, 0xa4, 0x98, 0x84, 0xc4, 0xd9, 0xcb, + 0x3a, 0x9e, 0x8c, 0x6b, 0x9e, 0x4c, 0x7b, 0x93, 0xb9, 0xf4, 0x8e, 0xa5, 0xe1, 0x2e, 0xfe, 0xac, + 0x83, 0x75, 0x14, 0xc4, 0x91, 0x3b, 0x42, 0x1f, 0x84, 0x1b, 0x39, 0xeb, 0xbd, 0xf6, 0x7d, 0x52, + 0xc8, 0xb8, 0x8f, 0x09, 0xc1, 0xb3, 0xac, 0x45, 0x44, 0x48, 0x2c, 0x33, 0x48, 0x4a, 0x35, 0x2b, + 0x2c, 0x23, 0xdb, 0xc9, 0x52, 0xac, 0xba, 0xc5, 0x7a, 0x6f, 0x65, 0x76, 0xfd, 0x62, 0xff, 0x46, + 0xba, 0xcc, 0x54, 0xbd, 0x6c, 0xce, 0xb3, 0xf8, 0xfc, 0x95, 0xe4, 0x17, 0x0e, 0x3a, 0x6b, 0x32, + 0x4b, 0xe7, 0xf9, 0x1f, 0xb5, 0x7b, 0x86, 0x91, 0xd4, 0xd3, 0x98, 0x36, 0xe4, 0x0f, 0xd3, 0x10, + 0x5c, 0xe1, 0x1d, 0x76, 0x3a, 0x2d, 0x31, 0x1d, 0xb7, 0x1f, 0xa8, 0xb2, 0xf8, 0xa6, 0x95, 0xfb, + 0x1b, 0xfa, 0xe5, 0x7b, 0x43, 0x6a, 0xa9, 0x38, 0x44, 0x4a, 0xd1, 0x2d, 0x6e, 0x18, 0x66, 0x57, + 0x62, 0x59, 0xce, 0xb8, 0x67, 0xf7, 0x29, 0x7f, 0x58, 0xa9, 0xd7, 0xa6, 0x36, 0x44, 0x85, 0x7c, + 0xad, 0x99, 0xc8, 0x80, 0xe3, 0xe9, 0x49, 0xf0, 0x79, 0x04, 0xa3, 0x07, 0x68, 0x89, 0x06, 0x2b, + 0x96, 0x4e, 0xe1, 0x3d, 0x8f, 0xa8, 0x0c, 0x4f, 0xe6, 0x4c, 0xd9, 0xea, 0x3d, 0x96, 0x56, 0x6a, + 0xa4, 0x37, 0xf8, 0xb5, 0x60, 0xac, 0xf6, 0xdd, 0x25, 0x3e, 0xf6, 0xb3, 0xb8, 0xdb, 0xa6, 0x26, + 0xe5, 0x7b, 0x41, 0x0d, 0xbe, 0x2a, 0x71, 0x61, 0x8b, 0x6b, 0xcb, 0x15, 0x69, 0x48, 0xd5, 0xfe, + 0x0f, 0xd7, 0x5a, 0x32, 0xfe, 0x35, 0x13, 0x85, 0xce, 0xb2, 0xef, 0xc4, 0xab, 0x78, 0xd1, 0x9a, + 0x94, 0x89, 0x73, 0x45, 0xf3, 0x81, 0x32, 0x94, 0xc7, 0x0f, 0x2e, 0xa5, 0xb4, 0xa9, 0x3b, 0x9e, + 0x51, 0xa7, 0x26, 0x06, 0xf7, 0xe4, 0x64, 0xc7, 0x32, 0xd7, 0x1e, 0x90, 0xa9, 0x30, 0xb8, 0x4e, + 0xb7, 0x8f, 0x08, 0xb5, 0x50, 0xf9, 0x67, 0x7b, 0x08, 0xe8, 0xbf, 0x07, 0xd5, 0xf6, 0x79, 0x27, + 0x72, 0x66, 0x04, 0xb8, 0x91, 0x7b, 0xb9, 0x32, 0xaf, 0xb6, 0x2c, 0x92, 0x71, 0x16, 0xba, 0xce, + 0x78, 0x3c, 0xdd, 0xa8, 0x76, 0xbd, 0x66, 0x90, 0xea, 0x9a, 0xe7, 0x68, 0x14, 0x14, 0xba, 0x70, + 0xc0, 0xc1, 0x60, 0xf5, 0x24, 0x7a, 0xdf, 0x2f, 0x79, 0x2e, 0x45, 0x68, 0x34, 0x48, 0x11, 0xac, + 0x26, 0x4b, 0x89, 0xa8, 0x53, 0xd5, 0xe4, 0x71, 0x12, 0x4f, 0xc6, 0x9b, 0x17, 0x53, 0xfd, 0xf6, + 0x4e, 0x0d, 0x2b, 0x64, 0x2f, 0x56, 0xb4, 0x14, 0xe5, 0x91, 0x19, 0x0e, 0x57, 0x03, 0x7b, 0x8a, + 0xd6, 0x8c, 0x9a, 0xa4, 0x55, 0x4f, 0x4d, 0x09, 0xf9, 0xd4, 0xd9, 0x44, 0x28, 0xb6, 0x3a, 0x42, + 0x2d, 0xf7, 0x6b, 0x5d, 0xf2, 0xed, 0xe9, 0xc0, 0xb6, 0x4c, 0xf7, 0xc8, 0xd1, 0x82, 0x2f, 0x67, + 0x63, 0xa4, 0xcb, 0x16, 0x98, 0x7c, 0x7a, 0xd1, 0x3c, 0x2a, 0x3a, 0x52, 0xf4, 0x70, 0xb5, 0xa6, + 0xc6, 0xe7, 0x63, 0x62, 0x4c, 0x3f, 0xeb, 0x9b, 0x6d, 0xda, 0xbe, 0x81, 0xf5, 0xcf, 0x5e, 0xaf, + 0xb4, 0xad, 0xb2, 0xad, 0xdd, 0xbd, 0x37, 0xd0, 0x32, 0xc8, 0x1f, 0x09, 0x00, 0x8c, 0xf6, 0x15, + 0x77, 0x04, 0xed, 0x9f, 0x46, 0x7a, 0x7e, 0xf5, 0xfe, 0xc0, 0xc9, 0x2a, 0xb7, 0x0b, 0x7a, 0x9f, + 0x16, 0x15, 0xd4, 0x16, 0xcf, 0xab, 0x06, 0x2c, 0x27, 0x39, 0xf3, 0x4e, 0x63, 0xa3, 0x79, 0xae, + 0xa8, 0x13, 0x09, 0xb4, 0xae, 0xe9, 0x44, 0x9e, 0xfe, 0x72, 0x8d, 0xa4, 0x31, 0x8e, 0xc4, 0xe4, + 0xb3, 0x15, 0x1d, 0x61, 0x61, 0xb2, 0x88, 0x43, 0x9d, 0x0c, 0x42, 0xb7, 0x13, 0x6f, 0x99, 0x1e, + 0xbe, 0x12, 0x54, 0x07, 0x37, 0x4b, 0xc3, 0xde, 0x72, 0x81, 0xe9, 0x27, 0x6b, 0x2a, 0x51, 0xa8, + 0x9d, 0x0d, 0x4f, 0x17, 0xe4, 0x0e, 0x7c, 0xa9, 0xda, 0x74, 0xc4, 0xf7, 0x25, 0x76, 0xaa, 0x52, + 0xde, 0xbd, 0xd9, 0xd0, 0xf2, 0x83, 0x52, 0xf5, 0x0b, 0xb4, 0x18, 0x02, 0x83, 0xf5, 0xbf, 0x1d, + 0x71, 0x88, 0x45, 0xb8, 0xaf, 0xeb, 0xae, 0x85, 0x44, 0x6b, 0x79, 0x8e, 0x79, 0xa8, 0x8d, 0x62, + 0x68, 0xa9, 0x9a, 0x0e, 0x08, 0xc7, 0xd7, 0xbc, 0x52, 0x82, 0xac, 0x5d, 0x9a, 0x72, 0xa1, 0xd4, + 0x6b, 0xe5, 0xb4, 0xd8, 0x30, 0x61, 0x98, 0xf1, 0x72, 0x45, 0xe1, 0xea, 0xaf, 0xa1, 0xbf, 0xee, + 0x69, 0x2f, 0x5f, 0xb6, 0x1c, 0x7a, 0x98, 0x6d, 0x51, 0x2c, 0xda, 0xfa, 0x71, 0x96, 0x58, 0x8b, + 0x06, 0x9e, 0x08, 0xde, 0x2b, 0xed, 0xa1, 0x7f, 0xb7, 0x6f, 0xfc, 0xd5, 0x78, 0xe7, 0xb1, 0x99, + 0x5d, 0xa5, 0x49, 0x25, 0x0d, 0x98, 0xbf, 0x52, 0xd0, 0x06, 0x39, 0xb9, 0xff, 0x1c, 0x53, 0xa8, + 0x42, 0xab, 0xf8, 0x63, 0xd8, 0x9c, 0xd7, 0x5b, 0x72, 0xab, 0xcd, 0xf3, 0x77, 0x18, 0x8e, 0xe9, + 0xfa, 0xd5, 0xed, 0x43, 0x9b, 0x11, 0x09, 0x7a, 0x8b, 0x04, 0xf2, 0x71, 0x4f, 0x2c, 0x9f, 0x76, + 0xe4, 0x15, 0xb1, 0x0e, 0x79, 0xcb, 0x0f, 0xb5, 0x8c, 0xb8, 0xc6, 0x0c, 0x99, 0xe1, 0x28, 0x39, + 0x08, 0x8d, 0x3e, 0x81, 0x78, 0x8e, 0xd7, 0x45, 0x33, 0x29, 0xc9, 0x86, 0x7e, 0x61, 0xbb, 0xe6, + 0xf9, 0x5c, 0x28, 0x63, 0xdb, 0x69, 0xbe, 0x4b, 0x35, 0x39, 0xe3, 0xc1, 0x40, 0xf7, 0x4c, 0xae, + 0xa8, 0xa8, 0x67, 0xcc, 0x1f, 0xf3, 0x27, 0x3c, 0xbb, 0xa0, 0xeb, 0x82, 0x61, 0x93, 0xd1, 0xe6, + 0xfd, 0xf9, 0x40, 0x0f, 0x83, 0x98, 0x5d, 0xe6, 0x4d, 0x80, 0x59, 0x90, 0x74, 0xd3, 0xe3, 0xb0, + 0x5a, 0x56, 0xcd, 0xd8, 0x17, 0x6c, 0x0f, 0xca, 0x2a, 0x6f, 0x97, 0x65, 0x6b, 0xdf, 0xbc, 0x05, + 0x37, 0x7f, 0xbf, 0xae, 0x8b, 0xa2, 0x37, 0xb4, 0xa3, 0x44, 0x19, 0xa4, 0xd7, 0x2e, 0xcd, 0x03, + 0x20, 0xce, 0x42, 0x2e, 0xd3, 0xbc, 0xa2, 0x78, 0x34, 0x3b, 0x06, 0xb4, 0x0c, 0xc8, 0x7a, 0xe5, + 0xe4, 0x97, 0x3e, 0x28, 0xd6, 0x24, 0xeb, 0xc9, 0xf3, 0xa4, 0x9d, 0x35, 0x9d, 0xba, 0x11, 0x0d, + 0xb4, 0xd1, 0xc6, 0x2d, 0x4c, 0xda, 0x6a, 0xb0, 0xc8, 0xd3, 0xb8, 0x73, 0x44, 0x9c, 0xf9, 0x04, + 0xf5, 0xbf, 0xf4, 0xef, 0xc4, 0x72, 0x61, 0xc8, 0xa8, 0x3a, 0x6a, 0x0c, 0x47, 0x17, 0x28, 0x23, + 0x80, 0x53, 0xa2, 0xa1, 0x2b, 0x35, 0x5c, 0xdc, 0x3e, 0xb7, 0xdf, 0xde, 0x96, 0x57, 0xad, 0xa8, + 0xdd, 0x6a, 0xcd, 0x85, 0x4f, 0x43, 0xb3, 0x24, 0xcb, 0xd9, 0x38, 0x1c, 0xd7, 0xd0, 0x9d, 0xb7, + 0xae, 0x0b, 0x3e, 0x65, 0xab, 0x08, 0xf4, 0xa7, 0x0a, 0xac, 0x95, 0x6a, 0xc8, 0x2d, 0x9e, 0x82, + 0x83, 0x6e, 0xbd, 0x8e, 0xa2, 0xa2, 0x33, 0x01, 0x5c, 0x4c, 0xe3, 0x81, 0xdc, 0x45, 0xd5, 0xf8, + 0x22, 0xce, 0xae, 0xd0, 0x7d, 0x97, 0x40, 0x4b, 0xe9, 0x56, 0x18, 0xba, 0xe6, 0x3c, 0x3b, 0xc5, + 0xec, 0x27, 0x39, 0xdb, 0x5f, 0x7c, 0x99, 0xd1, 0xbb, 0x33, 0x30, 0xc7, 0x22, 0xa2, 0x1e, 0x9b, + 0xda, 0x54, 0xb7, 0x17, 0xb9, 0x39, 0x7e, 0x9c, 0x07, 0x34, 0xf0, 0x82, 0x4b, 0x82, 0xfc, 0xec, + 0xd7, 0x91, 0xb8, 0xf4, 0x4f, 0xd5, 0x38, 0x38, 0x7d, 0x77, 0x60, 0xa8, 0xef, 0x4e, 0x42, 0x8c, + 0x34, 0xda, 0x11, 0x05, 0x5e, 0xa4, 0x0d, 0x44, 0x1e, 0xae, 0xf3, 0x9f, 0x29, 0x4e, 0x0b, 0x0f, + 0x1c, 0x1d, 0x2f, 0x9c, 0xb0, 0x85, 0xae, 0x04, 0xd0, 0x2d, 0xbf, 0x6a, 0xed, 0xc7, 0x97, 0x21, + 0x63, 0x38, 0x1f, 0x34, 0x81, 0xbe, 0x2a, 0xa5, 0x17, 0x76, 0x1d, 0x99, 0x35, 0xf1, 0xc1, 0xb4, + 0x4d, 0x73, 0xf5, 0x2a, 0x47, 0x76, 0x10, 0x29, 0x2a, 0xeb, 0xd5, 0x48, 0xbc, 0x58, 0x96, 0xa4, + 0x2c, 0xe3, 0x8e, 0xdd, 0x8d, 0xfe, 0xc7, 0xf9, 0x43, 0x46, 0x96, 0x6d, 0x54, 0x87, 0xb2, 0x5f, + 0x86, 0xd1, 0xab, 0x15, 0xf3, 0x88, 0x1f, 0x12, 0xd7, 0xb6, 0xcd, 0x1b, 0xdf, 0x84, 0xac, 0x89, + 0x7d, 0xe2, 0xea, 0x5d, 0xbe, 0x8e, 0x28, 0x8f, 0xad, 0xe2, 0x3e, 0x3f, 0x3a, 0xdd, 0xee, 0x0f, + 0x1f, 0x8c, 0x2f, 0xba, 0xad, 0xa4, 0x41, 0xda, 0xdd, 0xd0, 0x1f, 0xa5, 0x4f, 0x57, 0xed, 0x1f, + 0x3f, 0x6d, 0xc6, 0x4c, 0xa1, 0xf7, 0x68, 0xd4, 0x0c, 0x79, 0xc9, 0xa4, 0x29, 0xe5, 0xe5, 0xb3, + 0xf9, 0x24, 0x06, 0xcf, 0xfa, 0x79, 0xd3, 0x0c, 0x88, 0x45, 0x7d, 0xf0, 0xe6, 0xdd, 0x04, 0x89, + 0xe8, 0x88, 0xeb, 0x19, 0xf5, 0x9a, 0x61, 0x88, 0xee, 0x91, 0xad, 0xc1, 0x22, 0xdd, 0xf7, 0x9c, + 0xbd, 0xf5, 0x54, 0xb3, 0x18, 0xaa, 0xe7, 0x1a, 0xbd, 0x25, 0xe9, 0x55, 0x6a, 0x0e, 0x70, 0xf2, + 0xf5, 0x47, 0xa2, 0xcf, 0x8d, 0x4c, 0x94, 0xd7, 0xc8, 0x15, 0x19, 0xf9, 0x80, 0xb4, 0x65, 0x97, + 0xc2, 0x12, 0xdb, 0x33, 0x8f, 0xe8, 0xaa, 0xa1, 0x09, 0x88, 0x0b, 0xee, 0x67, 0xad, 0x48, 0x30, + 0x4b, 0xa3, 0xe8, 0xcd, 0x5a, 0xed, 0x88, 0xe6, 0xb9, 0x49, 0xc2, 0xe9, 0x8c, 0xa3, 0x64, 0xc2, + 0x46, 0x64, 0xee, 0x50, 0x9a, 0xf9, 0x8b, 0x36, 0x6e, 0x63, 0x35, 0x94, 0x92, 0x0a, 0x56, 0xe4, + 0x39, 0x23, 0x63, 0x8c, 0x51, 0x06, 0x97, 0x8d, 0x3c, 0x9f, 0xd6, 0xe3, 0x0b, 0x6c, 0x9b, 0x03, + 0x53, 0xaf, 0xd2, 0x8f, 0x09, 0xda, 0x06, 0xfd, 0x52, 0xd2, 0x5e, 0xc0, 0x1a, 0x64, 0xb5, 0xd2, + 0xb8, 0x07, 0x3a, 0xd1, 0x23, 0x88, 0x1f, 0xa1, 0x7a, 0x46, 0x82, 0xa2, 0x25, 0xac, 0xdd, 0x3c, + 0xbb, 0x11, 0xf9, 0x18, 0x29, 0x49, 0x1d, 0xe2, 0x04, 0xcb, 0x1f, 0x2c, 0x83, 0xe8, 0xa4, 0xa7, + 0xe6, 0x52, 0x4d, 0xa7, 0x7e, 0x92, 0x32, 0x91, 0xa5, 0xee, 0xbe, 0x3f, 0x84, 0xf5, 0xfa, 0x8d, + 0x2f, 0xae, 0xa9, 0x31, 0xb2, 0x44, 0xef, 0xa7, 0x33, 0x05, 0xcc, 0x77, 0xa9, 0x91, 0x7d, 0xc4, + 0x2b, 0x63, 0x3b, 0x20, 0x91, 0x37, 0xa4, 0x0a, 0xc8, 0xe5, 0x7a, 0xce, 0xfa, 0x6d, 0x67, 0xab, + 0xcf, 0xa8, 0xba, 0xad, 0x7b, 0xdc, 0xb6, 0x32, 0x4d, 0xba, 0x91, 0x34, 0xc7, 0xce, 0x2c, 0xd2, + 0xf1, 0x29, 0xa1, 0x7f, 0x84, 0xf0, 0x4c, 0x99, 0x04, 0xcf, 0xaf, 0x67, 0x5e, 0xfe, 0xce, 0x48, + 0x91, 0xf9, 0x55, 0x51, 0x6d, 0xd5, 0x43, 0xc1, 0x4e, 0xee, 0xfd, 0x59, 0xd7, 0xef, 0x0d, 0x5a, + 0x84, 0xc3, 0x13, 0xd1, 0xd6, 0x41, 0xbe, 0x3e, 0x11, 0x87, 0x6c, 0xf9, 0x39, 0x69, 0x07, 0x52, + 0x19, 0xb5, 0x10, 0x2b, 0xba, 0xc0, 0x02, 0x42, 0xc5, 0xf1, 0x65, 0x06, 0x94, 0x9a, 0xcf, 0x9b, + 0xc5, 0xa8, 0x8c, 0x37, 0xc2, 0xb0, 0xd2, 0x7a, 0x33, 0x2a, 0x1f, 0x6d, 0x4b, 0xa8, 0x91, 0x30, + 0xff, 0x8c, 0x74, 0x09, 0x93, 0xa0, 0x93, 0x3a, 0xde, 0x4b, 0xaa, 0x16, 0x0d, 0xe6, 0x28, 0xdd, + 0x1c, 0x6f, 0x82, 0x57, 0xf5, 0x96, 0xbd, 0x02, 0x8c, 0x0f, 0x70, 0x3c, 0xc8, 0x57, 0x95, 0xc3, + 0xb4, 0x99, 0xb6, 0xf2, 0x6c, 0xe7, 0xf0, 0xa5, 0x1c, 0x23, 0xca, 0x9f, 0x60, 0x9f, 0xb0, 0x9e, + 0xe0, 0x82, 0xb3, 0xba, 0xfc, 0x27, 0x6d, 0x95, 0x93, 0x30, 0x9f, 0xa2, 0x5d, 0x2e, 0x9c, 0x50, + 0x88, 0x8e, 0x68, 0x34, 0xca, 0xc4, 0x52, 0x79, 0x59, 0x88, 0x47, 0x9c, 0x53, 0xe7, 0x09, 0xb4, + 0x11, 0x97, 0x3d, 0xe8, 0xad, 0x6e, 0x7b, 0xb0, 0xdd, 0x3b, 0x21, 0xa0, 0x84, 0xed, 0x33, 0xd2, + 0x65, 0xe4, 0xcd, 0x30, 0x4d, 0xab, 0x3b, 0xd9, 0x31, 0x2f, 0xfa, 0xf4, 0xda, 0x26, 0xee, 0x93, + 0xee, 0x4d, 0x5e, 0xde, 0x27, 0xca, 0x26, 0x65, 0x6f, 0xd4, 0xbe, 0xaa, 0xdd, 0x8f, 0xfc, 0x01, + 0x1a, 0xe9, 0x05, 0xde, 0x49, 0xaf, 0x8c, 0x58, 0x65, 0x9e, 0x29, 0x0c, 0x81, 0x15, 0x5e, 0x5e, + 0x83, 0x77, 0x3f, 0xd2, 0x7e, 0xa9, 0xf8, 0xfa, 0xa8, 0xd4, 0x83, 0xe6, 0x35, 0x32, 0xc7, 0xf9, + 0xd5, 0x15, 0xb3, 0x3e, 0x37, 0x0c, 0x79, 0x1a, 0x4f, 0xf6, 0x98, 0x6a, 0xe5, 0xdc, 0x19, 0xe6, + 0x5c, 0xce, 0x08, 0xa5, 0x05, 0xb0, 0x80, 0x7d, 0xa6, 0x49, 0x1d, 0xea, 0xbe, 0x94, 0xac, 0x72, + 0x7b, 0x16, 0x65, 0xd8, 0x8b, 0x39, 0xd8, 0x17, 0xec, 0x69, 0x3c, 0xeb, 0x51, 0x19, 0x3c, 0x55, + 0xcd, 0xf5, 0x5a, 0x21, 0x7c, 0x6d, 0x96, 0x34, 0x3f, 0x36, 0xdf, 0x80, 0xa1, 0x32, 0xba, 0xeb, + 0xe5, 0xde, 0xf1, 0xca, 0x70, 0x71, 0x65, 0xf9, 0x78, 0x77, 0x79, 0xc5, 0xd9, 0x4e, 0x10, 0x0b, + 0x1a, 0xa7, 0x95, 0x6c, 0x18, 0x96, 0xd2, 0x1d, 0x69, 0x15, 0xe8, 0x10, 0x38, 0x63, 0x49, 0xe8, + 0x13, 0x9e, 0xb0, 0x7d, 0xd8, 0xae, 0xbe, 0x5e, 0x35, 0xcd, 0x3d, 0xcb, 0x19, 0x48, 0xb4, 0xbc, + 0x7e, 0xb6, 0xaa, 0x32, 0x4c, 0xdb, 0x32, 0xa6, 0x2a, 0xfb, 0xe2, 0xd1, 0xf5, 0x63, 0xb5, 0xd2, + 0x9d, 0x96, 0xac, 0xd5, 0x66, 0x79, 0x5e, 0xfc, 0x6e, 0x96, 0x4c, 0xae, 0xfa, 0x4a, 0x7c, 0xe5, + 0x03, 0x4c, 0x0f, 0x33, 0xeb, 0x02, 0x71, 0x46, 0x79, 0x57, 0x27, 0x9a, 0x7a, 0xab, 0x2f, 0x13, + 0x29, 0x76, 0xdb, 0xea, 0x55, 0x68, 0xf2, 0x69, 0x36, 0xa6, 0x25, 0xcb, 0x7b, 0xda, 0x0c, 0x5e, + 0x91, 0x1f, 0xd0, 0x8b, 0x12, 0x45, 0x68, 0xf6, 0x98, 0x37, 0x31, 0x7d, 0x28, 0xe7, 0x28, 0x37, + 0xd9, 0x7c, 0x19, 0xa3, 0xec, 0x38, 0x94, 0x94, 0x7d, 0xa8, 0x75, 0xdf, 0xa8, 0x1b, 0x49, 0xda, + 0x79, 0x2e, 0x9a, 0x9f, 0xd4, 0xeb, 0x38, 0x65, 0x8c, 0x49, 0xf2, 0x38, 0x4a, 0x07, 0xc3, 0x81, + 0x4b, 0x8d, 0x0c, 0xdf, 0x77, 0xc4, 0xda, 0x4d, 0xad, 0x8e, 0x2b, 0x6f, 0x8b, 0xb1, 0x14, 0xf5, + 0x79, 0x12, 0xbe, 0x01, 0x13, 0x8f, 0xa7, 0xad, 0x21, 0x57, 0x58, 0x03, 0xbe, 0x38, 0x92, 0x51, + 0x86, 0x65, 0x4c, 0x5e, 0xa0, 0x59, 0xaa, 0x27, 0xa9, 0x1e, 0x91, 0xe1, 0x5e, 0x93, 0x27, 0xea, + 0xd7, 0xa9, 0x2a, 0x0d, 0xc6, 0xd8, 0xfa, 0xa7, 0xe7, 0xf4, 0xf3, 0xd9, 0xca, 0x37, 0xd3, 0x83, + 0xac, 0xf9, 0xb8, 0x2e, 0x45, 0xc7, 0x33, 0x91, 0x6e, 0xd9, 0x19, 0xbb, 0x26, 0x3f, 0x96, 0xc3, + 0xfa, 0x11, 0xf0, 0xe5, 0xf7, 0xa2, 0xd4, 0xc9, 0xce, 0x76, 0xe1, 0x82, 0x48, 0x7a, 0x34, 0xed, + 0x56, 0xd9, 0x2c, 0x94, 0x48, 0x9f, 0xcd, 0x81, 0x86, 0x56, 0x68, 0x38, 0x87, 0xd8, 0x55, 0x5c, + 0x87, 0x4e, 0xe9, 0xac, 0x61, 0xe5, 0xaa, 0x92, 0x35, 0x5f, 0x28, 0xfd, 0xa6, 0xde, 0x01, 0x34, + 0xe2, 0xe8, 0x2a, 0xad, 0xa2, 0x49, 0x75, 0x0c, 0xf1, 0x2d, 0x97, 0xc6, 0xc2, 0x6c, 0x3b, 0x57, + 0x61, 0x1a, 0xfc, 0x92, 0xd2, 0xd8, 0x2f, 0x0c, 0xc4, 0xcf, 0x67, 0x0c, 0xd0, 0xc5, 0x11, 0x6a, + 0x19, 0xc9, 0x12, 0xc5, 0xc9, 0x8f, 0x8d, 0x67, 0x37, 0xa5, 0x11, 0x33, 0xe4, 0x7d, 0x99, 0xc0, + 0xb8, 0xc3, 0xd6, 0xe6, 0x57, 0x2b, 0x5d, 0xe6, 0xf5, 0x0d, 0x22, 0x66, 0x4b, 0x82, 0xe5, 0x38, + 0x45, 0xd2, 0x9a, 0x0c, 0x31, 0x1b, 0xa3, 0x24, 0xa1, 0xb1, 0x10, 0xc0, 0x4b, 0x1f, 0x75, 0x58, + 0x06, 0x09, 0xfc, 0x38, 0xe7, 0xca, 0x16, 0x4c, 0xa4, 0xab, 0x76, 0x6d, 0xb0, 0x79, 0xb6, 0x67, + 0x60, 0xd6, 0x7a, 0x9d, 0x1c, 0x9b, 0x50, 0x2b, 0x3f, 0x6c, 0x21, 0x3f, 0x38, 0x6f, 0xd6, 0xc8, + 0xd4, 0xe5, 0x4b, 0x9a, 0xcf, 0x90, 0xc3, 0xac, 0x01, 0x74, 0xb5, 0x53, 0x33, 0xab, 0x36, 0x29, + 0x8f, 0x06, 0xc6, 0xf2, 0x4a, 0x5b, 0x60, 0x0f, 0xe8, 0x8f, 0x5c, 0x86, 0x7a, 0x46, 0x45, 0xa0, + 0x9d, 0xc8, 0x07, 0x46, 0x02, 0x0e, 0x06, 0xa5, 0xf5, 0x36, 0xcf, 0xea, 0x92, 0xd5, 0xed, 0x3e, + 0x19, 0x6f, 0x70, 0x18, 0xcd, 0x93, 0x56, 0xde, 0x5e, 0x15, 0xb6, 0x5e, 0x2c, 0xb0, 0x53, 0x99, + 0xc8, 0xe4, 0x6f, 0xa1, 0xfa, 0x1a, 0x84, 0x8d, 0xc8, 0x50, 0x85, 0x1a, 0x1f, 0x4c, 0xbb, 0x13, + 0xf1, 0x78, 0xec, 0xd2, 0xa2, 0x99, 0xbf, 0x8f, 0xd5, 0x2a, 0x2d, 0x18, 0x46, 0x5b, 0xa6, 0x2b, + 0x39, 0x21, 0x24, 0xda, 0xfc, 0x36, 0x7b, 0x8c, 0x83, 0x7d, 0x5b, 0xfa, 0x42, 0xdf, 0x4f, 0x0d, + 0xb9, 0x80, 0x3d, 0x62, 0x99, 0xed, 0xec, 0x63, 0x7a, 0xd8, 0x3b, 0xbd, 0x08, 0x55, 0xfc, 0xde, + 0xdb, 0x52, 0x4b, 0x4f, 0x61, 0x4d, 0xd8, 0x09, 0xcd, 0xcd, 0xb9, 0xc6, 0xe7, 0x91, 0x85, 0xdb, + 0x8a, 0xe8, 0xe9, 0x94, 0x36, 0xea, 0x9a, 0xcf, 0xea, 0x61, 0x99, 0x26, 0x95, 0x35, 0xc4, 0x46, + 0x05, 0xd3, 0x28, 0x61, 0x58, 0x84, 0x56, 0x35, 0x39, 0x37, 0xac, 0xb5, 0x4c, 0x0b, 0xde, 0x37, + 0xf6, 0xf6, 0x7b, 0x94, 0xd4, 0x1f, 0x38, 0xd2, 0x65, 0x5f, 0x48, 0x29, 0x2a, 0x8c, 0xd6, 0xf4, + 0x9f, 0xda, 0x72, 0x38, 0xdb, 0xe3, 0xee, 0xe2, 0x72, 0x5c, 0xbf, 0x4e, 0x28, 0x6a, 0x55, 0xe2, + 0xcc, 0x62, 0x1d, 0x7f, 0x4d, 0xbd, 0x26, 0x43, 0xad, 0x91, 0x74, 0xd8, 0xcc, 0x8b, 0x49, 0x17, + 0xb0, 0xa1, 0x95, 0xb4, 0x00, 0x37, 0x57, 0x7e, 0x38, 0xdf, 0xb9, 0xb7, 0xfb, 0xa8, 0xa9, 0xda, + 0xa5, 0x4c, 0x43, 0x20, 0xcd, 0xe1, 0xd7, 0x7e, 0x16, 0x7d, 0x1c, 0xc8, 0xa9, 0x99, 0x62, 0xd0, + 0x08, 0x38, 0xbe, 0xe1, 0x54, 0xd9, 0x62, 0x34, 0xed, 0xa9, 0xb1, 0xd6, 0x64, 0xdb, 0xf6, 0x21, + 0xf1, 0x29, 0xfe, 0xd7, 0x22, 0xc7, 0xcf, 0x3a, 0xa6, 0x4e, 0x2b, 0x6a, 0xa1, 0x35, 0xe0, 0x8e, + 0x3c, 0xb5, 0xd7, 0xc5, 0x6a, 0xab, 0x52, 0x9e, 0x1c, 0x42, 0x2b, 0xf2, 0x2f, 0x26, 0xf5, 0xa5, + 0x25, 0xc4, 0x44, 0x57, 0xf7, 0xde, 0x5e, 0x0b, 0x9a, 0x3f, 0x95, 0xee, 0xa4, 0x38, 0x28, 0x0e, + 0x60, 0x8d, 0x1e, 0xdc, 0x3f, 0xf1, 0x21, 0x1a, 0x42, 0x29, 0x09, 0x20, 0x98, 0xa8, 0x7f, 0xe6, + 0x19, 0x05, 0xfb, 0x92, 0x62, 0x67, 0x0b, 0x06, 0xcf, 0xc9, 0x5a, 0xae, 0xe2, 0x85, 0x63, 0x42, + 0x80, 0xf8, 0x2e, 0xc4, 0x4c, 0x22, 0x9d, 0xc2, 0x4e, 0x5c, 0x1e, 0xb1, 0xb5, 0x16, 0xa9, 0x91, + 0xd8, 0x12, 0x72, 0x2b, 0xab, 0x3b, 0xa1, 0xab, 0x79, 0x94, 0xad, 0x92, 0x9e, 0x81, 0x1a, 0x1d, + 0x2d, 0xd6, 0xac, 0x19, 0xc3, 0xdc, 0x02, 0xbc, 0x62, 0x29, 0xb6, 0xab, 0x96, 0x42, 0xc6, 0x6b, + 0x02, 0x9c, 0x87, 0xc9, 0x8a, 0x6b, 0x76, 0x8f, 0xb6, 0xc2, 0x08, 0xbd, 0xb9, 0x43, 0xeb, 0x8c, + 0xe0, 0xb2, 0x78, 0xdb, 0xa9, 0x9a, 0x63, 0xe9, 0x42, 0xf3, 0x19, 0x43, 0x81, 0xec, 0xaa, 0x90, + 0x8c, 0xb1, 0x39, 0x2b, 0x44, 0x7f, 0xda, 0x83, 0x98, 0xcc, 0xa6, 0xf4, 0x97, 0x11, 0xa5, 0xef, + 0x51, 0x5b, 0x8d, 0x5a, 0xcb, 0x4e, 0x87, 0x86, 0x29, 0x91, 0xad, 0x33, 0x5f, 0xb0, 0xa1, 0x3d, + 0x16, 0xc9, 0x99, 0x79, 0x48, 0x9a, 0x20, 0x40, 0xfb, 0x92, 0xc6, 0x7b, 0x1e, 0xb4, 0xa3, 0x60, + 0xcb, 0xba, 0x24, 0x3f, 0xa2, 0x04, 0x6d, 0x32, 0x97, 0xf8, 0xfa, 0x61, 0x19, 0xd5, 0xef, 0xdd, + 0x57, 0xf3, 0x66, 0x8c, 0xf7, 0xaf, 0xe1, 0x0d, 0x49, 0xf3, 0x48, 0x3a, 0x05, 0x78, 0x45, 0x3f, + 0x3b, 0x39, 0x3d, 0xb1, 0x12, 0xeb, 0x0e, 0x19, 0xd8, 0x61, 0x11, 0x41, 0xae, 0xe8, 0xc8, 0x5a, + 0x1c, 0x9b, 0xdf, 0xae, 0xe8, 0x07, 0x8c, 0x49, 0x1e, 0x69, 0x81, 0x0f, 0x56, 0x4a, 0x4c, 0xa4, + 0x9a, 0x39, 0x65, 0x90, 0x98, 0x7a, 0x4a, 0xdb, 0x0a, 0x5a, 0x65, 0x59, 0x9e, 0xe0, 0x33, 0x55, + 0xd6, 0x05, 0x7c, 0x31, 0x49, 0x92, 0x6e, 0x88, 0x2f, 0x1d, 0xab, 0x10, 0xd5, 0x9e, 0x4c, 0xd5, + 0x6f, 0xb7, 0x8e, 0x3c, 0xc3, 0x88, 0x2f, 0x8e, 0xae, 0xde, 0x9b, 0x4e, 0x1a, 0x70, 0xb7, 0xee, + 0x38, 0x26, 0x34, 0x22, 0x77, 0xef, 0x59, 0x18, 0x6d, 0x13, 0x99, 0x6a, 0xd1, 0x78, 0xbe, 0xa1, + 0x97, 0x8e, 0xc1, 0x2b, 0x82, 0xd1, 0x1c, 0x8e, 0x49, 0x6e, 0x6c, 0x43, 0x35, 0xac, 0x30, 0x1c, + 0xd8, 0xce, 0xbc, 0x9c, 0x22, 0x99, 0xd8, 0x02, 0xdc, 0x88, 0x3b, 0x38, 0x7e, 0x88, 0xb1, 0xcb, + 0x6a, 0x1c, 0x4c, 0x8b, 0xcb, 0x2d, 0xad, 0x41, 0x36, 0xd9, 0x1c, 0xc6, 0xf3, 0x1a, 0xe2, 0x5a, + 0x40, 0xca, 0x56, 0xbe, 0xc2, 0x26, 0xce, 0x4f, 0x98, 0x07, 0xed, 0xd5, 0x66, 0x87, 0x5b, 0xb4, + 0xce, 0xa4, 0x31, 0x9b, 0xcf, 0x31, 0x28, 0xa9, 0x25, 0x09, 0x5d, 0x2d, 0x3a, 0x96, 0x2e, 0x5b, + 0xdb, 0x39, 0x72, 0x58, 0x12, 0xeb, 0xb9, 0xb9, 0xed, 0x15, 0x4a, 0xc6, 0xda, 0xe6, 0x91, 0x03, + 0xeb, 0xb0, 0xae, 0x17, 0x43, 0x25, 0x0a, 0xdf, 0x46, 0x33, 0x3a, 0xba, 0x66, 0xbb, 0xea, 0x20, + 0xc6, 0xc2, 0xa3, 0x74, 0x39, 0xc7, 0x16, 0xc2, 0x2f, 0x48, 0xfd, 0xbe, 0xa6, 0x17, 0xd3, 0xe3, + 0x8d, 0xf6, 0x49, 0x24, 0x42, 0xec, 0x62, 0x2a, 0x4f, 0x47, 0xdf, 0xf4, 0x47, 0x64, 0x91, 0xba, + 0x68, 0xb1, 0xf1, 0x8e, 0x48, 0x8b, 0x47, 0x84, 0x15, 0x75, 0x08, 0x9c, 0x65, 0x31, 0x8c, 0x6d, + 0xe4, 0x90, 0xd7, 0xf4, 0x52, 0xb1, 0x15, 0x76, 0xb2, 0xc8, 0x26, 0xbc, 0x95, 0x25, 0xba, 0x79, + 0x48, 0x09, 0x9f, 0xfc, 0xf5, 0xa4, 0x2c, 0xd1, 0xf6, 0xc8, 0xba, 0x6a, 0x12, 0x14, 0x39, 0x4c, + 0x1b, 0x73, 0x09, 0xc5, 0xb5, 0xde, 0xdd, 0x5e, 0x4e, 0x18, 0x47, 0xda, 0xb0, 0xc1, 0xea, 0x9f, + 0x37, 0xab, 0x1d, 0xac, 0xbe, 0x80, 0xe1, 0x70, 0xd5, 0x2d, 0x35, 0xbf, 0x57, 0xb9, 0x4a, 0x52, + 0x91, 0xe3, 0x46, 0xd9, 0x5c, 0xea, 0xd0, 0x62, 0x75, 0x0e, 0x7e, 0x67, 0xc0, 0xe2, 0xf0, 0x11, + 0x49, 0xba, 0x34, 0xf7, 0x59, 0x5d, 0x50, 0x65, 0xcd, 0x2c, 0x14, 0xe1, 0xdc, 0xbd, 0x44, 0x5e, + 0x11, 0xaf, 0xab, 0x22, 0xd2, 0xa3, 0xc2, 0x62, 0xdf, 0x1b, 0xd2, 0x68, 0xb9, 0xca, 0x9e, 0x3d, + 0x84, 0xb7, 0xe8, 0xc4, 0x9e, 0xc2, 0xa2, 0x48, 0xb0, 0x13, 0xf6, 0xcd, 0x10, 0x0f, 0x59, 0xe7, + 0xb5, 0x3b, 0x96, 0x4f, 0xd9, 0x7b, 0xb0, 0x3b, 0x8f, 0x4b, 0x90, 0x1c, 0xdf, 0xd7, 0xb0, 0xc1, + 0xf4, 0x4c, 0xee, 0x50, 0xd3, 0x6a, 0x53, 0x81, 0x56, 0x9e, 0x4b, 0xf0, 0xb6, 0xb2, 0xe0, 0x3c, + 0x54, 0x85, 0x90, 0xd4, 0x3b, 0x1e, 0x4f, 0x87, 0x35, 0x7c, 0x9d, 0x86, 0xf5, 0xf6, 0x73, 0x7a, + 0x4a, 0xe8, 0xab, 0xcf, 0xde, 0x42, 0x88, 0x8c, 0xb2, 0xd5, 0x3e, 0x9f, 0x87, 0x64, 0x97, 0xe1, + 0xcf, 0x2e, 0xc1, 0x32, 0x62, 0xbd, 0x31, 0xea, 0x06, 0xd9, 0x91, 0xe2, 0x51, 0x86, 0x69, 0xdc, + 0x46, 0x61, 0xc7, 0x8c, 0xfe, 0xd6, 0x4d, 0xb5, 0x9b, 0x95, 0xf6, 0x13, 0x14, 0x8b, 0xd2, 0x8b, + 0x38, 0x9c, 0xe0, 0xa1, 0xd3, 0xc7, 0xea, 0x1f, 0x7b, 0xad, 0x2b, 0x0c, 0x0b, 0x99, 0x2f, 0xcd, + 0x44, 0xf8, 0x59, 0xec, 0x3b, 0x6b, 0xb3, 0xfc, 0x9d, 0x12, 0x29, 0x16, 0x5b, 0xb4, 0x1c, 0xf0, + 0x14, 0x1c, 0x63, 0x05, 0x56, 0x9e, 0x30, 0x3a, 0xbd, 0x3e, 0x90, 0x35, 0xc4, 0x41, 0x3e, 0x6a, + 0x05, 0x14, 0x12, 0x65, 0xcf, 0xe2, 0x88, 0xac, 0x98, 0x17, 0x77, 0xea, 0x5d, 0x3f, 0xa9, 0xc4, + 0x70, 0xbb, 0x72, 0x7c, 0x9e, 0x53, 0xcc, 0x5c, 0x6b, 0x64, 0x26, 0x2b, 0x2e, 0xd6, 0x1c, 0x88, + 0xfd, 0xe9, 0x23, 0xc9, 0x58, 0xf1, 0x71, 0x7e, 0xc5, 0x6c, 0xe6, 0x1c, 0x8f, 0x39, 0xae, 0x37, + 0xa6, 0x9b, 0xf2, 0xf0, 0xad, 0x21, 0x50, 0xb9, 0x6c, 0xdd, 0x85, 0xfc, 0xa5, 0xeb, 0x23, 0xd9, + 0x46, 0xa2, 0xb7, 0xad, 0xd6, 0xf6, 0xad, 0x51, 0xea, 0xe8, 0xd2, 0xef, 0xf3, 0x4a, 0x7c, 0xfb, + 0x43, 0x72, 0xdd, 0xcd, 0xcb, 0xb0, 0xf7, 0x92, 0xf1, 0xd4, 0x18, 0xde, 0x6c, 0xe2, 0xdc, 0x4a, + 0x26, 0xbb, 0xf5, 0x9e, 0x71, 0x0b, 0x8d, 0x4d, 0x3d, 0x6d, 0xe5, 0x5d, 0xbe, 0x5d, 0x5a, 0xae, + 0x9f, 0x6c, 0x6e, 0xf0, 0x0c, 0x1d, 0xab, 0x19, 0x7d, 0xcc, 0xbd, 0x24, 0xc9, 0xc7, 0xcb, 0x2b, + 0xd9, 0x72, 0xb2, 0x9f, 0x38, 0x35, 0xf1, 0xc1, 0xd9, 0x52, 0xcf, 0x58, 0x97, 0x5e, 0x22, 0x6f, + 0xf0, 0xd6, 0xdb, 0x0d, 0x43, 0x18, 0x17, 0x21, 0x1c, 0x1a, 0x1e, 0x03, 0xc0, 0x48, 0x98, 0xdf, + 0xee, 0x4c, 0xd8, 0x5c, 0x73, 0x7e, 0xd0, 0x33, 0x4a, 0x63, 0xa4, 0x94, 0xdc, 0x1b, 0x9f, 0x42, + 0x22, 0xfb, 0xc3, 0x75, 0x7c, 0x76, 0xcc, 0x5f, 0x63, 0x5b, 0xaf, 0xb4, 0x3b, 0xd4, 0xd4, 0x61, + 0x08, 0x2c, 0x96, 0xcf, 0xb5, 0x95, 0x4c, 0x93, 0x5e, 0xfe, 0xe9, 0x35, 0xb0, 0x0c, 0x32, 0x1c, + 0xd5, 0x97, 0x21, 0x4e, 0xc7, 0x7c, 0xda, 0x49, 0xa0, 0xbe, 0x8b, 0xc4, 0x23, 0xbc, 0x03, 0xf8, + 0xbe, 0xca, 0x45, 0xa3, 0xd1, 0x2d, 0x4a, 0xed, 0xf9, 0x7a, 0x76, 0x9f, 0x9b, 0x0f, 0xfd, 0xe6, + 0x05, 0xe9, 0xf4, 0x08, 0x27, 0x32, 0xfc, 0x79, 0x29, 0xe8, 0x14, 0x8d, 0x66, 0x97, 0xd8, 0xa1, + 0x7e, 0x5c, 0xe2, 0xec, 0xd4, 0x95, 0x64, 0x1d, 0x96, 0x12, 0xc1, 0x59, 0x57, 0x77, 0xaa, 0x15, + 0xb7, 0xd4, 0x73, 0x61, 0x82, 0x87, 0xb7, 0xd1, 0xa2, 0xbd, 0x68, 0x71, 0xcb, 0x93, 0xbd, 0x6c, + 0xb7, 0x32, 0x80, 0x23, 0x9c, 0xff, 0xc8, 0x9e, 0x59, 0xef, 0xc9, 0x99, 0x43, 0xaa, 0x70, 0x9f, + 0xb5, 0x2b, 0xf9, 0x42, 0x42, 0xfd, 0xd7, 0x34, 0xc5, 0x01, 0x8b, 0x7e, 0xea, 0xef, 0x6e, 0x1c, + 0xaa, 0x10, 0x5b, 0x14, 0xf1, 0x7c, 0x82, 0x1b, 0x9e, 0x4c, 0x84, 0x41, 0x52, 0x5b, 0x7d, 0xb0, + 0x73, 0x10, 0x2d, 0x6f, 0x48, 0x62, 0x11, 0xe8, 0x98, 0x44, 0x55, 0xdb, 0x00, 0xda, 0xc5, 0x93, + 0x3b, 0x66, 0x5b, 0x6e, 0xd3, 0x41, 0x7e, 0x72, 0xb0, 0xb0, 0x4e, 0xa1, 0xc0, 0x6c, 0x10, 0x1f, + 0xbd, 0xb2, 0x72, 0xec, 0xbd, 0x61, 0x44, 0x2d, 0xa4, 0x63, 0x95, 0xf8, 0x99, 0x8d, 0xbe, 0x64, + 0x7d, 0xff, 0xed, 0x56, 0x46, 0x5b, 0x4b, 0xd0, 0x27, 0xa7, 0x06, 0xbc, 0x3e, 0xbe, 0xf1, 0x24, + 0x91, 0xb9, 0xb7, 0x71, 0x30, 0x0c, 0x9e, 0xc4, 0xe4, 0x68, 0xea, 0x85, 0x9c, 0x4a, 0xac, 0x21, + 0xac, 0x2f, 0xfc, 0xd5, 0xc1, 0x9c, 0x27, 0x4b, 0x37, 0xa1, 0xb3, 0x7e, 0x59, 0x1f, 0xc1, 0xf4, + 0xee, 0x0f, 0x15, 0x1b, 0xc4, 0x9e, 0x44, 0xaf, 0x2d, 0x81, 0x1e, 0x04, 0x7e, 0x11, 0xb5, 0x5f, + 0xe8, 0x4a, 0x74, 0x17, 0xea, 0x30, 0xd2, 0xca, 0x24, 0xe7, 0x6a, 0xd9, 0x67, 0xa8, 0x75, 0x76, + 0xfb, 0x79, 0xdc, 0x91, 0xa4, 0x64, 0x22, 0x45, 0xe1, 0x39, 0xb4, 0xc3, 0xce, 0x76, 0x91, 0x49, + 0x36, 0x1f, 0xc2, 0xa8, 0x85, 0xa7, 0x65, 0x3c, 0x61, 0xf5, 0x72, 0xdc, 0xe6, 0xea, 0xad, 0xa6, + 0x48, 0x25, 0xe7, 0x9c, 0xf0, 0x0a, 0x8f, 0x51, 0xce, 0xb2, 0x3b, 0x75, 0x6b, 0xb9, 0xd7, 0x11, + 0xc5, 0x29, 0xd1, 0x04, 0x4c, 0x09, 0x90, 0xa6, 0x10, 0x86, 0x71, 0x2e, 0xfc, 0xa3, 0xaf, 0x6f, + 0xf2, 0x39, 0xb5, 0xd4, 0x95, 0x62, 0x98, 0x64, 0x6d, 0xa5, 0x72, 0x62, 0x9e, 0x54, 0x8d, 0x9d, + 0x3f, 0xfd, 0x34, 0xb3, 0xce, 0x72, 0x83, 0x94, 0xb6, 0x06, 0xa7, 0x52, 0x64, 0x36, 0x96, 0x7f, + 0x20, 0x2e, 0x9f, 0x44, 0x7e, 0xa7, 0xea, 0xf6, 0x23, 0xb6, 0x74, 0xbd, 0xee, 0x5c, 0x5c, 0x05, + 0xad, 0x38, 0x83, 0xb1, 0x6b, 0xd9, 0x17, 0x74, 0xa1, 0x15, 0x18, 0x24, 0xcd, 0xbd, 0x0b, 0x57, + 0x6f, 0x1e, 0xd0, 0xe5, 0x2d, 0x46, 0xa9, 0x48, 0xfb, 0x34, 0x24, 0xdb, 0xf1, 0xfb, 0x62, 0xc2, + 0x56, 0x4c, 0x6e, 0x9b, 0x6f, 0x7b, 0x4c, 0x10, 0x8a, 0xaf, 0xf8, 0xc9, 0xcf, 0x5a, 0x2a, 0x0f, + 0x74, 0xac, 0x97, 0xae, 0x86, 0x6f, 0x0f, 0x67, 0xfb, 0xce, 0x85, 0xeb, 0x23, 0xeb, 0x52, 0x23, + 0xed, 0x30, 0x99, 0xd1, 0x77, 0x3c, 0x70, 0x06, 0x3e, 0x4e, 0x6c, 0x09, 0x80, 0x04, 0x14, 0x52, + 0x34, 0x90, 0xdc, 0x10, 0x62, 0xe1, 0x62, 0xd3, 0x2c, 0x35, 0x9e, 0x1a, 0x4a, 0xd8, 0xe5, 0x1a, + 0x2f, 0x9a, 0x84, 0x3e, 0xf9, 0x65, 0x8a, 0xf5, 0xe3, 0x87, 0xc7, 0x88, 0x7f, 0x58, 0x12, 0x54, + 0x6c, 0x58, 0xbe, 0xa6, 0x93, 0xcf, 0x34, 0xed, 0xf1, 0x20, 0xea, 0x42, 0x8d, 0xd7, 0xe7, 0x74, + 0x22, 0x25, 0x4c, 0xc5, 0xf6, 0x69, 0xa2, 0x54, 0x63, 0xef, 0x27, 0x0a, 0xdc, 0x53, 0x0c, 0xb2, + 0x29, 0xb0, 0x5b, 0x2d, 0x99, 0xbf, 0x6e, 0xd1, 0x54, 0x81, 0xc8, 0xf7, 0x1b, 0x0a, 0x1c, 0x8a, + 0xc5, 0xfa, 0xb7, 0x0b, 0x38, 0x88, 0x0f, 0x3e, 0xe4, 0x7d, 0x00, 0xeb, 0x26, 0x63, 0x84, 0xc9, + 0xc4, 0xa5, 0xe4, 0x90, 0x81, 0xb1, 0x8d, 0x96, 0xc9, 0x92, 0x75, 0xd3, 0xc2, 0x0d, 0xd5, 0xa7, + 0x93, 0x54, 0xbe, 0x77, 0x52, 0xa3, 0x29, 0xa6, 0x13, 0x4a, 0x90, 0x55, 0x57, 0x64, 0xb8, 0x9a, + 0x5b, 0xaf, 0x58, 0x7e, 0x27, 0x7f, 0xea, 0x16, 0xae, 0x89, 0x36, 0x99, 0xfe, 0xac, 0xe9, 0xa3, + 0x53, 0x0e, 0x62, 0xc3, 0xbb, 0x48, 0xe7, 0x5b, 0x89, 0xbb, 0x0a, 0xf6, 0x43, 0x14, 0xb9, 0x07, + 0x8e, 0x48, 0x63, 0x05, 0x6f, 0xfd, 0x8b, 0x73, 0x75, 0xdd, 0xda, 0x1a, 0x58, 0x1b, 0x0e, 0x76, + 0x41, 0x6d, 0xda, 0xd6, 0xfc, 0x91, 0xe9, 0x89, 0x5b, 0x30, 0xca, 0xc8, 0xc8, 0x9f, 0x3f, 0x50, + 0x95, 0xed, 0x32, 0xb5, 0x96, 0x14, 0xea, 0xab, 0x8f, 0xb4, 0xd8, 0x1b, 0xd2, 0xaa, 0xaa, 0xa7, + 0x6a, 0x45, 0xf6, 0x89, 0x14, 0x5d, 0x7f, 0xe8, 0xc2, 0x75, 0xb9, 0x79, 0x0b, 0x79, 0x22, 0x02, + 0xa6, 0x86, 0x70, 0x83, 0x18, 0xf7, 0x9c, 0x0a, 0x1f, 0x0e, 0x53, 0xa7, 0xe5, 0x30, 0xf2, 0x07, + 0x99, 0xbc, 0x6c, 0x15, 0xe2, 0x85, 0xa6, 0x9a, 0x3f, 0x7e, 0x34, 0xbf, 0x2f, 0x12, 0xe7, 0x53, + 0x17, 0x5e, 0x13, 0x83, 0x35, 0xd0, 0x01, 0x90, 0x7e, 0x83, 0x8d, 0x9b, 0xf0, 0xbc, 0x69, 0xfe, + 0x0c, 0x04, 0xcf, 0xe1, 0xc2, 0x18, 0xad, 0xed, 0xbc, 0x0a, 0x0c, 0x60, 0x49, 0x4f, 0x45, 0xba, + 0x69, 0x67, 0xb7, 0xc1, 0xd7, 0x6c, 0xcb, 0x4c, 0xbc, 0x90, 0x54, 0x2d, 0x0c, 0xa7, 0x62, 0xb3, + 0x66, 0x64, 0x8e, 0xcb, 0x8d, 0xbc, 0x5d, 0x55, 0xae, 0xa7, 0xd7, 0x57, 0x93, 0x8a, 0x42, 0x4e, + 0xf1, 0x1f, 0xe2, 0xa5, 0x2e, 0xcf, 0x5c, 0x02, 0x0f, 0x87, 0xcb, 0xd3, 0xe7, 0xb0, 0x0f, 0x44, + 0x32, 0x85, 0x4e, 0x0f, 0xbe, 0x10, 0xb2, 0x72, 0x08, 0x0c, 0x71, 0x25, 0x7b, 0x6c, 0x9d, 0x61, + 0xf3, 0x82, 0xa5, 0x0e, 0x9d, 0x84, 0x0b, 0x55, 0x7d, 0x4b, 0xb5, 0xd7, 0x53, 0x8b, 0xf5, 0x8a, + 0x86, 0x85, 0x44, 0xe2, 0x93, 0xce, 0xf4, 0xe7, 0x37, 0xf1, 0xef, 0x82, 0x2b, 0x11, 0x9a, 0xf9, + 0x99, 0xe7, 0xc5, 0xfc, 0xca, 0xfc, 0x91, 0x4d, 0x43, 0xb6, 0xb2, 0x50, 0x20, 0x4b, 0x9b, 0x9f, + 0xc7, 0x71, 0x82, 0xf9, 0x08, 0x7e, 0x31, 0x82, 0xb0, 0x5d, 0x1b, 0x9d, 0xec, 0x8b, 0x73, 0x99, + 0x19, 0x88, 0x5c, 0x05, 0xff, 0x75, 0x86, 0xab, 0x46, 0x8a, 0x2f, 0x1c, 0xec, 0x9d, 0x3e, 0x84, + 0x02, 0x21, 0x49, 0x7b, 0xe2, 0xf5, 0xfe, 0x8c, 0xff, 0xd8, 0x69, 0x3b, 0x7a, 0x02, 0x3c, 0x33, + 0x89, 0xe9, 0x6a, 0x0d, 0x99, 0x3b, 0x61, 0x94, 0xdf, 0xea, 0x55, 0x10, 0x99, 0xbc, 0x34, 0x3a, + 0xab, 0x27, 0x70, 0x6d, 0xae, 0x42, 0xab, 0xae, 0xdd, 0x50, 0xda, 0xe6, 0xa8, 0xa0, 0x6b, 0xde, + 0xfb, 0xb2, 0x4d, 0xd7, 0x6a, 0x02, 0xc7, 0x2f, 0xcb, 0x12, 0x60, 0xd0, 0xd1, 0x2f, 0xf6, 0x23, + 0xd0, 0x90, 0xe3, 0x62, 0xc5, 0x41, 0xc7, 0x3b, 0x60, 0xd9, 0x9c, 0x79, 0x43, 0xda, 0x10, 0x4b, + 0xc2, 0xfa, 0x33, 0xcb, 0xee, 0xcc, 0x6e, 0x0a, 0xad, 0xb6, 0xdb, 0x1a, 0x2b, 0x58, 0x9b, 0x11, + 0x41, 0xf8, 0xe2, 0x67, 0x48, 0xb5, 0x46, 0x2c, 0xad, 0x00, 0x06, 0xf6, 0x93, 0xc8, 0x1a, 0x1e, + 0xc1, 0xde, 0x76, 0xbe, 0x2e, 0x3b, 0x3c, 0x09, 0x59, 0xd3, 0x56, 0x84, 0xdf, 0x1b, 0x5f, 0x5c, + 0x8b, 0x32, 0xb1, 0xe2, 0x97, 0xd3, 0x56, 0xac, 0xfc, 0xd8, 0x4f, 0xbc, 0xc9, 0xbe, 0xd9, 0x0f, + 0x35, 0xdb, 0x95, 0x09, 0x4c, 0x52, 0x66, 0x01, 0x2e, 0xa9, 0x04, 0x90, 0x46, 0xdb, 0xbe, 0xad, + 0x32, 0x7d, 0xa2, 0xc1, 0xa5, 0x68, 0xf5, 0x0f, 0x2b, 0x1f, 0xe1, 0x53, 0x31, 0x03, 0xe6, 0x8c, + 0x17, 0x9b, 0xea, 0x1d, 0xb0, 0xc3, 0x52, 0x59, 0x5a, 0xaa, 0x39, 0x5f, 0x85, 0x1e, 0xbe, 0xa8, + 0x4f, 0x53, 0x8f, 0xd2, 0x70, 0x74, 0xf0, 0xa1, 0x29, 0x93, 0xe4, 0x06, 0x2e, 0x29, 0x0d, 0x07, + 0xf6, 0x17, 0xf6, 0x5d, 0xef, 0x39, 0x05, 0xb9, 0x04, 0x08, 0x57, 0x71, 0xe2, 0x9c, 0x82, 0xd4, + 0x94, 0xcd, 0x87, 0xda, 0x94, 0x68, 0xc7, 0x2e, 0xdb, 0xb6, 0xaa, 0x11, 0x5e, 0x74, 0xf4, 0xc2, + 0x3f, 0x1e, 0x1e, 0x8a, 0x7d, 0x5a, 0xc3, 0xd7, 0x9f, 0x1c, 0x7f, 0xcd, 0x19, 0x7e, 0xfe, 0xe0, + 0xa7, 0x03, 0x2b, 0xdc, 0xae, 0x81, 0x16, 0x2d, 0x55, 0xef, 0x23, 0x3a, 0x4e, 0x14, 0x65, 0x16, + 0xa2, 0x5e, 0xe8, 0xe8, 0xbd, 0x31, 0x27, 0xbd, 0x07, 0xf6, 0x8d, 0xbd, 0x91, 0xe7, 0xcf, 0xbc, + 0xa0, 0x14, 0x4c, 0x0e, 0xd4, 0x0c, 0xaf, 0x77, 0xed, 0x05, 0x8b, 0xca, 0xdf, 0xe6, 0x88, 0x0e, + 0x40, 0xc4, 0x70, 0x37, 0xdf, 0x2c, 0x92, 0xa0, 0x38, 0x5d, 0x31, 0x76, 0x87, 0x63, 0xe2, 0xb7, + 0xa5, 0x73, 0xfa, 0xbd, 0x55, 0x2c, 0xcb, 0x09, 0x8f, 0xea, 0x1e, 0xbb, 0x19, 0xf6, 0x94, 0x73, + 0xcd, 0x07, 0xc3, 0x37, 0x50, 0xa4, 0x91, 0xbe, 0x7e, 0xb7, 0xb1, 0x32, 0xf9, 0x61, 0x25, 0x43, + 0x17, 0xab, 0x33, 0x21, 0x9a, 0x2d, 0xc6, 0xa1, 0x9b, 0xa8, 0xf5, 0xbc, 0x9c, 0xb2, 0x06, 0x8f, + 0x9f, 0x8f, 0x9f, 0x44, 0xc7, 0xc2, 0xa7, 0xc0, 0xc5, 0x7b, 0x2f, 0x35, 0xff, 0x79, 0xf6, 0x93, + 0x09, 0xda, 0x5b, 0x03, 0x28, 0x5c, 0x8e, 0xd9, 0x87, 0xae, 0x5d, 0xfe, 0x40, 0x36, 0x41, 0x5a, + 0xf2, 0xbc, 0x02, 0xdd, 0xdf, 0x7b, 0x43, 0x1b, 0x93, 0x49, 0x77, 0xe2, 0x2c, 0x68, 0x98, 0x84, + 0xf0, 0x73, 0x7f, 0x34, 0x74, 0xbd, 0x33, 0x87, 0xa8, 0x25, 0xd6, 0x54, 0x73, 0xd5, 0x69, 0xaa, + 0xcc, 0x8c, 0xea, 0x42, 0xd5, 0xc8, 0xd3, 0x69, 0x87, 0xc3, 0xce, 0xc3, 0x23, 0x46, 0xbb, 0xca, + 0xd3, 0x52, 0x31, 0x93, 0xcc, 0xc3, 0xd7, 0xc6, 0x58, 0x69, 0x29, 0xce, 0x23, 0xf7, 0xe5, 0xa8, + 0x94, 0xf3, 0x95, 0x16, 0x5e, 0x39, 0x85, 0x16, 0xdb, 0x0c, 0x5e, 0xd9, 0x46, 0xbc, 0xdb, 0x25, + 0xf6, 0x96, 0xae, 0x3f, 0x49, 0x80, 0xcc, 0x84, 0x83, 0x7a, 0xf6, 0xb0, 0x1e, 0xff, 0xf4, 0xb9, + 0x03, 0x06, 0xae, 0x12, 0x56, 0x5e, 0x08, 0x46, 0x5b, 0x2d, 0xe9, 0xab, 0x85, 0xc7, 0x67, 0x91, + 0x5f, 0x94, 0xf9, 0x12, 0x20, 0xef, 0x29, 0x98, 0x72, 0xbe, 0x0d, 0xe4, 0x86, 0xc4, 0xd5, 0x2c, + 0x66, 0xe4, 0x6c, 0x08, 0x89, 0xe4, 0xaf, 0x2f, 0xf2, 0xfd, 0x22, 0x11, 0x7d, 0x56, 0xd5, 0xe6, + 0x4a, 0xb5, 0x86, 0xe9, 0x3d, 0xd2, 0xd4, 0xb0, 0x76, 0xfb, 0x33, 0xd9, 0x56, 0xd6, 0x6c, 0xee, + 0x69, 0xb3, 0x57, 0x0c, 0x3e, 0x8c, 0xda, 0xc8, 0x74, 0xc8, 0x5f, 0x57, 0x19, 0x4b, 0x2d, 0xf4, + 0xe2, 0x29, 0x97, 0x4f, 0x57, 0x89, 0x6b, 0x17, 0x29, 0x8f, 0x28, 0x51, 0x7b, 0x45, 0xe5, 0xce, + 0xb0, 0x33, 0xf9, 0xb4, 0x43, 0x97, 0x3d, 0x6a, 0xa9, 0x15, 0x69, 0x48, 0x12, 0xe1, 0x17, 0x83, + 0x8d, 0x3c, 0xbd, 0xa8, 0xcf, 0xed, 0xc4, 0x66, 0x39, 0xe6, 0xf6, 0x19, 0x4c, 0xf4, 0x5d, 0xc0, + 0x9d, 0xe6, 0x11, 0xdd, 0x13, 0x7d, 0x9b, 0xbc, 0xed, 0xf9, 0x5f, 0x92, 0xbb, 0x22, 0x62, 0x98, + 0xf3, 0xef, 0xb6, 0x27, 0x9e, 0xd8, 0xf7, 0x4d, 0x81, 0x49, 0x84, 0xa8, 0xcd, 0x94, 0x14, 0x65, + 0xf7, 0x04, 0x87, 0x23, 0xa8, 0xd4, 0xf6, 0x08, 0xd6, 0x0c, 0xaa, 0xa2, 0x35, 0x9f, 0x6a, 0x65, + 0x5d, 0xfd, 0x10, 0x07, 0xac, 0xbc, 0xb0, 0x43, 0xf8, 0x47, 0x68, 0xd5, 0x8e, 0xa2, 0x6b, 0x03, + 0xfe, 0xea, 0xaf, 0xfe, 0xea, 0xaf, 0xfe, 0xea, 0xaf, 0xfe, 0xea, 0xff, 0x9d, 0x04, 0x71, 0x49, + 0x00, 0xe4, 0x40, 0xa0, 0x94, 0x23, 0x60, 0x19, 0x98, 0x0e, 0x01, 0x00, 0x42, 0x9a, 0x14, 0xf5, + 0xb3, 0xa0, 0xd2, 0xb8, 0x12, 0xeb, 0xb9, 0x4a, 0x40, 0x25, 0xd7, 0xdf, 0x11, 0x1b, 0x1d, 0x00, + 0x00, 0xa5, 0x73, 0xde, 0x36, 0x31, 0x12, 0x00, 0x00, 0xe6, 0xb3, 0xd4, 0xa5, 0x30, 0x0b, 0xdf, + 0x2b, 0x5a, 0x97, 0xf8, 0x95, 0x0e, 0x02, 0x00, 0x92, 0x83, 0xe9, 0xae, 0x5d, 0x13, 0x9e, 0xe0, + 0xa4, 0xff, 0x8e, 0xdb, 0x0a, 0x34, 0x13, 0xfc, 0xb3, 0x43, 0x32, 0xbc, 0x58, 0x31, 0x0b, 0x8a, + 0xba, 0x9c, 0x25, 0x8b, 0x9f, 0xde, 0x04, 0x82, 0xaa, 0x43, 0x00, 0x77, 0xd6, 0x7e, 0x85, 0x2c, + 0x68, 0x96, 0x6c, 0x58, 0x74, 0xe4, 0xd6, 0x5d, 0x74, 0xe0, 0xe5, 0xbd, 0x5d, 0xfe, 0x8f, 0x1d, + 0x3f, 0xdd, 0x03, 0xb4, 0xa6, 0x03, 0xb9, 0xb9, 0xf3, 0xd1, 0x90, 0xdf, 0xf9, 0xe0, 0x72, 0x59, + 0x50, 0x7c, 0x64, 0x2e, 0x2e, 0x5d, 0xc2, 0x14, 0xd9, 0x3b, 0xcb, 0x96, 0x6c, 0x16, 0x34, 0x73, + 0x4a, 0x01, 0x50, 0x8b, 0x82, 0x90, 0x25, 0x46, 0x01, 0x00, 0xf0, 0x51, 0x14, 0x64, 0xeb, 0xe0, + 0x45, 0x00, 0x1c, 0x8b, 0xbd, 0x00, 0x20, 0xd3, 0x5a, 0x72, 0xdf, 0x55, 0x0c, 0x0c, 0x59, 0x61, + 0xed, 0x4d, 0xdf, 0xc5, 0x9d, 0x77, 0x53, 0x1a, 0xc7, 0x22, 0x06, 0xba, 0x96, 0xde, 0x77, 0x01, + 0x7e, 0x07, 0x41, 0x5b, 0x83, 0xce, 0x5c, 0xc4, 0x78, 0x90, 0xeb, 0x29, 0x02, 0x78, 0x20, 0x4d, + 0x28, 0x20, 0x80, 0x40, 0x7a, 0x77, 0xea, 0x9a, 0x6d, 0xd3, 0x8f, 0x49, 0x58, 0xb5, 0xcf, 0xda, + 0xc6, 0x87, 0x1f, 0xa1, 0x8e, 0xbc, 0xe1, 0x69, 0x80, 0x35, 0xb3, 0xfe, 0x1f, 0xf8, 0x90, 0xcf, + 0x9f, 0x74, 0x89, 0xff, 0xec, 0x01, 0x16, 0x28, 0x52, 0xf4, 0x23, 0xbe, 0xf6, 0xe4, 0x49, 0x4f, + 0x9e, 0x79, 0x07, 0x80, 0x6e, 0x6f, 0x03, 0x78, 0x21, 0x45, 0x52, 0x6b, 0x2f, 0x20, 0x27, 0x6b, + 0x7b, 0x1b, 0xc7, 0x17, 0x30, 0x7c, 0xa4, 0xdf, 0xe7, 0x23, 0x40, 0x86, 0x08, 0xa4, 0x5c, 0x6c, + 0x45, 0x85, 0xd1, 0x85, 0xdd, 0x55, 0x20, 0x85, 0x8a, 0xb6, 0x10, 0x6c, 0xbd, 0xf6, 0x33, 0xed, + 0xb0, 0x89, 0x18, 0x07, 0x20, 0xb1, 0xfe, 0x05, 0x26, 0xb1, 0xdd, 0x44, 0x4c, 0x02, 0x90, 0x38, + 0x6c, 0x87, 0x81, 0xf6, 0x24, 0x4e, 0x41, 0x17, 0x79, 0x69, 0xcc, 0x80, 0xe0, 0x34, 0x1c, 0xe7, + 0xf4, 0xdf, 0x39, 0x42, 0x1a, 0x37, 0xa5, 0x10, 0x16, 0x40, 0x5c, 0xd6, 0x34, 0x91, 0x75, 0x12, + 0x17, 0x94, 0x3d, 0x71, 0xa5, 0x32, 0x38, 0x50, 0x1e, 0xea, 0xb2, 0x0d, 0x42, 0xb5, 0x66, 0x4d, + 0xc7, 0x47, 0x86, 0xdc, 0x96, 0xa5, 0x27, 0x40, 0x05, 0xe1, 0x1f, 0xb5, 0xf0, 0x21, 0xd3, 0x47, + 0xbf, 0x3d, 0x84, 0x9b, 0x80, 0x2c, 0x18, 0xb2, 0x7c, 0xd6, 0x0a, 0x1d, 0x2c, 0xcb, 0x02, 0x8a, + 0x72, 0x0e, 0xb8, 0xef, 0x1b, 0xe7, 0xaa, 0x19, 0xb2, 0x79, 0x70, 0x20, 0xf1, 0xef, 0x5e, 0x81, + 0x05, 0x8b, 0x45, 0x29, 0xa0, 0x38, 0xcf, 0xc1, 0xda, 0x61, 0x60, 0x20, 0x2e, 0xca, 0x9e, 0xc4, + 0x7a, 0x90, 0x02, 0x74, 0xdd, 0x1a, 0x00, 0x94, 0x27, 0x2c, 0x09, 0xd1, 0xb3, 0x02, 0x83, 0x85, + 0x90, 0x41, 0xad, 0xf0, 0x69, 0x50, 0x30, 0x71, 0x50, 0x72, 0x77, 0x7a, 0x59, 0xfa, 0xef, 0xfe, + 0x00, 0xa1, 0x56, 0x28, 0xe0, 0x90, 0x77, 0x70, 0x50, 0x37, 0x02, 0xea, 0x44, 0x04, 0xfe, 0x93, + 0xa1, 0x0e, 0x0e, 0x91, 0xae, 0x4d, 0xcf, 0x5a, 0xc9, 0xb3, 0xa6, 0x80, 0x42, 0x23, 0x7e, 0xdc, + 0xe7, 0xc9, 0x61, 0x0a, 0x63, 0x7c, 0xda, 0x71, 0x97, 0xeb, 0x6e, 0x6a, 0xb6, 0xf8, 0x10, 0xd0, + 0x3f, 0x59, 0x44, 0xd6, 0xff, 0xe4, 0xc6, 0x87, 0x6c, 0xb1, 0x92, 0x40, 0x93, 0x51, 0x00, 0x68, + 0xf4, 0x34, 0x10, 0xc0, 0x9d, 0x45, 0xf1, 0xb7, 0x85, 0xa0, 0xe7, 0xee, 0x3f, 0xe4, 0x1f, 0xaf, + 0x14, 0xee, 0xbf, 0x6b, 0x38, 0x0b, 0x09, 0x34, 0x55, 0x81, 0xa0, 0xe7, 0x77, 0x35, 0xd0, 0xff, + 0xad, 0x46, 0x8a, 0xf6, 0xbe, 0x1a, 0xd4, 0xdf, 0xd5, 0x08, 0x0d, 0x70, 0x86, 0x08, 0xa2, 0x02, + 0xee, 0x2a, 0x49, 0x26, 0x46, 0x01, 0x05, 0x25, 0x0f, 0xc6, 0x8f, 0xc4, 0x27, 0x9f, 0x9f, 0x63, + 0x47, 0xf6, 0xdf, 0xcf, 0xdf, 0x64, 0xfa, 0xef, 0xc8, 0xe4, 0x08, 0xe6, 0x87, 0x21, 0xef, 0x50, + 0x01, 0xc9, 0x7a, 0x61, 0x80, 0x20, 0x26, 0xfa, 0x9c, 0x8b, 0xfb, 0xf9, 0x53, 0x64, 0xc8, 0x82, + 0xd2, 0x26, 0x66, 0xa6, 0x7d, 0x00, 0x44, 0xf4, 0x20, 0x47, 0x67, 0xc3, 0xae, 0xe3, 0xba, 0x60, + 0xdc, 0xfd, 0xc9, 0x70, 0x88, 0xc5, 0x42, 0xdc, 0xdd, 0x77, 0x25, 0x74, 0x41, 0x8a, 0xc4, 0x58, + 0xbf, 0xcf, 0x83, 0x15, 0x47, 0x97, 0xfe, 0x9b, 0xc9, 0x71, 0x71, 0x74, 0xdc, 0x25, 0x24, 0xd2, + 0xc8, 0xd1, 0x6e, 0xb7, 0x60, 0x41, 0x2c, 0x00, 0x3d, 0x0d, 0x0c, 0x00, 0x6e, 0xc2, 0x02, 0x50, + 0xf4, 0x10, 0x74, 0x65, 0xc2, 0x21, 0x38, 0xa2, 0x21, 0x4d, 0x00, 0x68, 0x44, 0x3f, 0x00, 0xdc, + 0x80, 0x0a, 0xf8, 0xd6, 0xf1, 0x7b, 0x27, 0x33, 0xe3, 0x0e, 0x3a, 0xd8, 0x9f, 0x53, 0xfe, 0x6f, + 0x0d, 0x00, 0xc2, 0xff, 0xee, 0x12, 0x9c, 0xee, 0xdf, 0x2e, 0x49, 0xc0, 0xef, 0x4e, 0x05, 0x23, + 0x4f, 0x81, 0x66, 0xc9, 0x44, 0x46, 0x9f, 0x2d, 0xdd, 0xcf, 0x37, 0xf9, 0x9f, 0xdb, 0x90, 0x39, + 0xcd, 0x04, 0xf8, 0x7d, 0x53, 0xee, 0xeb, 0x5a, 0xfe, 0xf0, 0x4f, 0xf7, 0xc3, 0xff, 0xed, 0x7e, + 0x27, 0xf6, 0x7f, 0xef, 0xa9, 0x48, 0x89, 0x80, 0x0a, 0x3b, 0xa3, 0xfc, 0x12, 0x39, 0xbd, 0xef, + 0x33, 0x09, 0x09, 0xb4, 0x5d, 0x01, 0xd9, 0xfa, 0xdf, 0xde, 0xf6, 0x13, 0x93, 0x40, 0x9b, 0x50, + 0xb0, 0x01, 0xc9, 0xa8, 0x40, 0xe0, 0x7f, 0xfa, 0x4f, 0x4c, 0x00, 0xa5, 0x84, 0x91, 0x43, 0x20, + 0x4e, 0x88, 0xee, 0x3b, 0x0b, 0x8c, 0x88, 0x00, 0xda, 0x84, 0xbc, 0x7d, 0xdb, 0x1a, 0xf6, 0x0e, + 0x90, 0xd0, 0xfd, 0x67, 0x26, 0x93, 0xc3, 0x8b, 0x08, 0x13, 0x2c, 0xf0, 0x51, 0x01, 0x44, 0x18, + 0xdd, 0x77, 0x77, 0x89, 0x91, 0x10, 0xd5, 0x82, 0x9e, 0x06, 0x19, 0x28, 0x18, 0x81, 0x84, 0x73, + 0x47, 0xf0, 0x3f, 0x14, 0x08, 0xbb, 0x8f, 0xc7, 0x07, 0xd7, 0xa1, 0x00, 0xc0, 0xcc, 0x60, 0x00, + 0xbe, 0x27, 0x09, 0x60, 0x8d, 0x02, 0xf5, 0x06, 0xc7, 0x42, 0x30, 0x42, 0x87, 0x1d, 0xd5, 0x9c, + 0x9e, 0x06, 0xe5, 0xcf, 0x57, 0x72, 0x77, 0xe4, 0xd5, 0xfc, 0x43, 0x8c, 0xae, 0x64, 0x64, 0x00, + 0x06, 0x46, 0x77, 0x82, 0x05, 0x81, 0x4c, 0x58, 0xd4, 0xd9, 0x0d, 0x09, 0xf4, 0x4f, 0x1f, 0xff, + 0xcc, 0x0b, 0x9d, 0xc7, 0x7f, 0x4f, 0xc1, 0x32, 0x60, 0xd5, 0x89, 0xf6, 0x3f, 0xd5, 0xc2, 0xb0, + 0xb2, 0xa0, 0x77, 0x33, 0x70, 0x3f, 0xf1, 0x0d, 0x77, 0xef, 0x18, 0x2a, 0x12, 0xee, 0x1f, 0xd0, + 0xf0, 0x21, 0x05, 0xff, 0xc3, 0x8b, 0xc0, 0xbb, 0x35, 0x43, 0x09, 0xf5, 0x3f, 0x64, 0xbe, 0x67, + 0xce, 0xbd, 0xea, 0xcc, 0xc8, 0x04, 0x69, 0xab, 0x44, 0xe6, 0x31, 0x80, 0xf7, 0x71, 0x28, 0x77, + 0x44, 0xba, 0x27, 0xea, 0x3d, 0x71, 0xee, 0xfd, 0xcc, 0xf7, 0x24, 0xb9, 0x27, 0xc1, 0x3d, 0x29, + 0xee, 0xb9, 0x0c, 0xba, 0x23, 0x00, 0xf9, 0x7e, 0x9f, 0x7b, 0xb2, 0xff, 0xbb, 0xbe, 0x3f, 0x9f, + 0x13, 0xe1, 0x1d, 0x5f, 0xde, 0xd3, 0xf3, 0x9e, 0xff, 0x79, 0x90, 0x09, 0x48, 0xfe, 0x01, 0x23, + 0x00, 0xe7, 0x1f, 0xde, 0xde, 0xeb, 0x3f, 0x7e, 0xae, 0xfb, 0xdf, 0x6f, 0x49, 0xa1, 0xff, 0xdf, + 0xdf, 0xf9, 0xbf, 0xfa, 0xab, 0xbf, 0xfa, 0xab, 0xbf, 0xfa, 0xab, 0xbf, 0xfa, 0xab, 0xbf, 0xfa, + 0xab, 0x3f, 0xfa, 0x1f, 0xa2, 0x48, 0xac, 0x48, 0x00, 0x30, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveGspRmBoot_GA102_ucode_image_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 12288, // uncompressed data size (bytes) + 7340, // compressed data size (bytes) + kgspBinArchiveGspRmBoot_GA102_ucode_image_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveGspRmBoot_GA102("ucode_desc_prod") +// FILE NAME: kernel/inc/gsprm/bin/g_gsprm_skbl_prod_ga102_riscv_desc.bin +// FILE TYPE: BINARY +// VAR NAME: N/A +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 76 +// COMPRESSED SIZE (bytes): 40 +// +static BINDATA_CONST NvU8 kgspBinArchiveGspRmBoot_GA102_ucode_desc_prod_data[] = +{ + 0x63, 0x61, 0x00, 0x02, 0x05, 0x06, 0x86, 0x0d, 0xac, 0x40, 0xac, 0xca, 0xc0, 0x20, 0xc0, 0x80, + 0x05, 0x70, 0x40, 0x31, 0x33, 0x10, 0x73, 0x03, 0x31, 0x2f, 0x03, 0x03, 0x23, 0x16, 0x75, 0x00, + 0x8d, 0x26, 0xa8, 0x4f, 0x4c, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveGspRmBoot_GA102_ucode_desc_prod_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 76, // uncompressed data size (bytes) + 40, // compressed data size (bytes) + kgspBinArchiveGspRmBoot_GA102_ucode_desc_prod_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveGspRmBoot_GA102 = +{ + 4, // entryNum + { + // entries[] : { "name", pBinStorage } + { "ucode_image_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveGspRmBoot_GA102_ucode_image_dbg_storage_pvt }, + { "ucode_desc_dbg" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveGspRmBoot_GA102_ucode_desc_dbg_storage_pvt }, + { "ucode_image_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveGspRmBoot_GA102_ucode_image_prod_storage_pvt }, + { "ucode_desc_prod" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveGspRmBoot_GA102_ucode_desc_prod_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveGspRmBoot_GA102(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveGspRmBoot_GA102; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_kgspGetBinArchiveGspRmBoot_TU102.c b/src/nvidia/generated/g_bindata_kgspGetBinArchiveGspRmBoot_TU102.c new file mode 100644 index 000000000..cd9ca7c07 --- /dev/null +++ b/src/nvidia/generated/g_bindata_kgspGetBinArchiveGspRmBoot_TU102.c @@ -0,0 +1,174 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveGspRmBoot_TU102("ucode_image") +// FILE NAME: kernel/inc/gsprm/bin/g_gsprm_tu10x_riscv_image.bin +// FILE TYPE: BINARY +// VAR NAME: N/A +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 4096 +// COMPRESSED SIZE (bytes): 811 +// +static BINDATA_CONST NvU8 kgspBinArchiveGspRmBoot_TU102_ucode_image_data[] = +{ + 0xed, 0x53, 0x31, 0x6c, 0xd3, 0x40, 0x14, 0xfd, 0x77, 0x17, 0xfb, 0x5c, 0x11, 0xb5, 0x89, 0x1c, + 0xe4, 0x42, 0x91, 0x0a, 0x32, 0x52, 0x3b, 0x22, 0x25, 0x8d, 0x0b, 0xea, 0x10, 0x31, 0xb4, 0x8c, + 0x80, 0x84, 0xca, 0x06, 0xc2, 0xa5, 0x20, 0x96, 0x2e, 0xa0, 0xae, 0xa9, 0x4c, 0x6b, 0x84, 0x58, + 0xc0, 0x52, 0x82, 0x14, 0x24, 0x3a, 0xb5, 0x4c, 0x08, 0xc9, 0xa5, 0xae, 0xc4, 0xc2, 0x00, 0xcd, + 0xc0, 0x82, 0x84, 0x20, 0x0c, 0x0c, 0x2d, 0xa6, 0x4d, 0x07, 0x86, 0x16, 0x68, 0x1a, 0x15, 0x68, + 0xf8, 0xb6, 0x2f, 0x65, 0x60, 0x66, 0xf3, 0x93, 0xac, 0xe7, 0xfb, 0xff, 0xdd, 0xff, 0xf7, 0xff, + 0xdd, 0xef, 0x21, 0x29, 0x50, 0xd3, 0x84, 0xaa, 0x84, 0x14, 0x26, 0xa0, 0x08, 0x13, 0x00, 0x50, + 0xe6, 0x00, 0x8e, 0x5d, 0x1d, 0xb2, 0x3c, 0x0e, 0x68, 0xdf, 0xd5, 0x87, 0xd2, 0x09, 0x6b, 0x9e, + 0x83, 0x3e, 0x38, 0x95, 0xd0, 0x73, 0xf7, 0x13, 0x6a, 0xd9, 0xa3, 0xe6, 0x69, 0x4e, 0x85, 0xee, + 0x54, 0xa0, 0x0b, 0xfc, 0xce, 0x8e, 0x06, 0x66, 0x89, 0x43, 0x19, 0x63, 0xd4, 0x8b, 0x2f, 0x7f, + 0x46, 0xfe, 0x91, 0x93, 0x6d, 0xbf, 0x5a, 0x9e, 0xa4, 0xfe, 0x79, 0xbe, 0x27, 0xfc, 0xbd, 0x62, + 0xbf, 0x11, 0xe6, 0x91, 0x4e, 0x24, 0xd5, 0x83, 0xb7, 0x98, 0x35, 0xc7, 0xdb, 0xf9, 0xf3, 0x81, + 0xbd, 0xf2, 0x61, 0x18, 0x9c, 0xd2, 0x32, 0x30, 0xdc, 0xaf, 0xc9, 0x00, 0xaa, 0xbc, 0x3c, 0xe0, + 0xda, 0x73, 0xc0, 0xf2, 0x32, 0x58, 0xcf, 0xed, 0xae, 0x1e, 0xce, 0xc1, 0x7f, 0xf8, 0xba, 0x89, + 0xff, 0xe8, 0x4b, 0x81, 0xff, 0xe0, 0x75, 0x53, 0xe8, 0xb2, 0x91, 0x26, 0xf4, 0x37, 0xd8, 0x12, + 0xef, 0xf0, 0xc7, 0x3e, 0x36, 0x50, 0x47, 0x7b, 0x46, 0x47, 0x40, 0xd5, 0xaa, 0xe0, 0xc9, 0x14, + 0xb2, 0xbc, 0x06, 0xae, 0xb4, 0x04, 0xfe, 0xb8, 0xf7, 0xc3, 0x5a, 0xb4, 0x15, 0xb6, 0xc8, 0xb9, + 0xbf, 0xb2, 0xfc, 0xc3, 0x5a, 0xb0, 0x49, 0x76, 0xa6, 0x04, 0xfe, 0x8d, 0xea, 0x37, 0x95, 0xbf, + 0xfd, 0xae, 0x6a, 0x93, 0x2c, 0x88, 0xcb, 0xf2, 0xc3, 0x39, 0x95, 0xef, 0xb4, 0x3c, 0xe5, 0x57, + 0x2b, 0xcb, 0xd7, 0xc1, 0xf9, 0xd8, 0x4d, 0xcc, 0x8d, 0x0d, 0x8c, 0x33, 0x57, 0xf0, 0xc7, 0xf8, + 0x96, 0xff, 0x79, 0x6d, 0x93, 0x79, 0x76, 0x52, 0x4d, 0x60, 0x3d, 0x99, 0x9b, 0x4c, 0xe5, 0x1a, + 0x64, 0x6f, 0xcf, 0x80, 0xbe, 0x50, 0x4f, 0x3a, 0xd2, 0x14, 0xd6, 0x99, 0x10, 0xfd, 0x29, 0x74, + 0xf8, 0x49, 0xe9, 0xab, 0x95, 0x9f, 0x02, 0xbf, 0x59, 0xfa, 0xea, 0xf0, 0x14, 0x18, 0x9c, 0x80, + 0x53, 0xe6, 0x54, 0x1f, 0x24, 0xa0, 0x0f, 0xad, 0x82, 0x7e, 0x82, 0x50, 0x3d, 0xb7, 0x85, 0xeb, + 0x55, 0xaa, 0x0f, 0x11, 0xea, 0xce, 0xcc, 0xe0, 0x7e, 0x42, 0xea, 0x45, 0x19, 0x98, 0x21, 0x51, + 0x47, 0xde, 0xc4, 0xbc, 0x6b, 0x60, 0x19, 0x9c, 0x9a, 0xbf, 0x1a, 0x94, 0x19, 0x36, 0xb8, 0x76, + 0x1d, 0x7b, 0xc7, 0x5b, 0x2e, 0xdf, 0x06, 0x6b, 0x91, 0x83, 0x61, 0xbf, 0x39, 0xa7, 0xf2, 0xda, + 0x96, 0x2a, 0x01, 0x98, 0x8f, 0xd6, 0xc1, 0xf2, 0xb0, 0x4f, 0x52, 0x0a, 0xf4, 0x67, 0xdb, 0x70, + 0xad, 0x88, 0x7d, 0x42, 0x7b, 0xc0, 0x56, 0x7e, 0x9a, 0x58, 0x17, 0xa7, 0xf1, 0xbc, 0xa8, 0x1b, + 0xd7, 0xc8, 0xbe, 0x7d, 0xf4, 0x2a, 0x63, 0x79, 0x89, 0xbe, 0xb0, 0xeb, 0x34, 0x38, 0x9b, 0x53, + 0xe3, 0xd4, 0xb5, 0xab, 0x10, 0xc4, 0xc7, 0x5e, 0x11, 0xf3, 0xcb, 0x32, 0x65, 0x4b, 0x36, 0xcd, + 0x2a, 0x32, 0x31, 0xf7, 0x64, 0xd2, 0x8e, 0x8f, 0x9a, 0x82, 0x6b, 0xd7, 0xf6, 0xf3, 0xf4, 0x60, + 0x1f, 0x26, 0x36, 0x1f, 0xef, 0x46, 0x77, 0x6b, 0x1f, 0x0e, 0xef, 0x9c, 0x43, 0x87, 0x3e, 0xbb, + 0x7f, 0xdf, 0x87, 0x02, 0x9b, 0x3e, 0x5b, 0x17, 0xeb, 0x91, 0x43, 0x91, 0xa6, 0x90, 0xfe, 0x6b, + 0xb3, 0xbb, 0x23, 0xdb, 0xd9, 0x50, 0x17, 0xc4, 0x75, 0x34, 0x39, 0x3a, 0x53, 0x7d, 0x1e, 0xdc, + 0xfa, 0x36, 0xbe, 0xc1, 0x9d, 0x2e, 0xf3, 0x6e, 0xf8, 0x6e, 0x5b, 0x7a, 0x2e, 0x2d, 0x7a, 0x0d, + 0xef, 0x34, 0x05, 0x6b, 0x55, 0x94, 0x83, 0x6c, 0x50, 0x01, 0x67, 0x74, 0x1a, 0x2a, 0x36, 0xe6, + 0x3a, 0xda, 0x14, 0x71, 0x79, 0x26, 0x8c, 0x7b, 0x61, 0x9a, 0x56, 0xf0, 0xad, 0xe9, 0xb3, 0xf3, + 0xa0, 0x4e, 0x6a, 0xe0, 0x4d, 0x02, 0xb8, 0x8d, 0x06, 0x78, 0x78, 0x2f, 0x26, 0x70, 0x19, 0x75, + 0x57, 0xdc, 0xf5, 0x06, 0x44, 0xb1, 0xce, 0xa4, 0x1d, 0xdc, 0xab, 0x31, 0xfc, 0x67, 0x2c, 0xad, + 0x75, 0x22, 0x77, 0x76, 0xa6, 0x71, 0x86, 0x28, 0xeb, 0x93, 0xc2, 0xfd, 0xfe, 0x11, 0xbe, 0xc7, + 0x0c, 0x86, 0x79, 0x34, 0xc2, 0x8c, 0x4e, 0xe4, 0x1d, 0xfc, 0xde, 0x07, 0xb9, 0x53, 0x2f, 0xee, + 0x78, 0x60, 0xde, 0xad, 0x02, 0x1b, 0x50, 0xb0, 0x2f, 0xbd, 0xbf, 0x31, 0xf6, 0xe5, 0x89, 0xcd, + 0x4b, 0xbb, 0x41, 0x4d, 0xa2, 0xfe, 0x03, 0x2c, 0x98, 0x9b, 0xbe, 0x60, 0xae, 0xfa, 0xc0, 0xbf, + 0x87, 0x73, 0x23, 0xec, 0xed, 0x79, 0xaa, 0xd4, 0xfa, 0xa2, 0x99, 0xfb, 0xb7, 0xde, 0x57, 0x41, + 0x9c, 0x00, 0x4a, 0x22, 0x1d, 0xd0, 0xd1, 0x5c, 0x86, 0x86, 0xdc, 0x2c, 0x46, 0xeb, 0xe3, 0x4f, + 0x7a, 0x43, 0xee, 0x17, 0x3c, 0x2b, 0xe0, 0x5e, 0x3e, 0x6c, 0x1c, 0x7b, 0x7a, 0xea, 0x53, 0x92, + 0x08, 0x9d, 0x1c, 0x71, 0x42, 0xb0, 0x22, 0x38, 0x25, 0xfc, 0xfd, 0x82, 0xbb, 0x05, 0x67, 0x04, + 0x1f, 0x11, 0xbc, 0x42, 0x23, 0x06, 0x81, 0x8c, 0xe0, 0x96, 0x40, 0xdb, 0x0e, 0x29, 0xf1, 0x21, + 0xae, 0x1f, 0x80, 0x18, 0x31, 0x62, 0xc4, 0x88, 0x11, 0x23, 0x46, 0x8c, 0x18, 0x31, 0x62, 0xfc, + 0x07, 0xfc, 0x01, 0x6a, 0x8a, 0x29, 0x3b, 0x00, 0x10, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveGspRmBoot_TU102_ucode_image_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 4096, // uncompressed data size (bytes) + 811, // compressed data size (bytes) + kgspBinArchiveGspRmBoot_TU102_ucode_image_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: kgspGetBinArchiveGspRmBoot_TU102("ucode_desc") +// FILE NAME: kernel/inc/gsprm/bin/g_gsprm_tu10x_riscv_desc.bin +// FILE TYPE: BINARY +// VAR NAME: N/A +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 76 +// COMPRESSED SIZE (bytes): 21 +// +static BINDATA_CONST NvU8 kgspBinArchiveGspRmBoot_TU102_ucode_desc_data[] = +{ + 0x63, 0x61, 0x80, 0x80, 0x0e, 0x16, 0x08, 0x16, 0x60, 0x20, 0x1f, 0x00, 0x00, 0xf5, 0x7a, 0xd2, + 0x3d, 0x4c, 0x00, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT kgspBinArchiveGspRmBoot_TU102_ucode_desc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 76, // uncompressed data size (bytes) + 21, // compressed data size (bytes) + kgspBinArchiveGspRmBoot_TU102_ucode_desc_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __kgspGetBinArchiveGspRmBoot_TU102 = +{ + 2, // entryNum + { + // entries[] : { "name", pBinStorage } + { "ucode_image" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveGspRmBoot_TU102_ucode_image_storage_pvt }, + { "ucode_desc" , (const PBINDATA_STORAGE) &g_bindata_pvt.kgspBinArchiveGspRmBoot_TU102_ucode_desc_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *kgspGetBinArchiveGspRmBoot_TU102(struct KernelGsp *pKernelGsp) +{ + return &__kgspGetBinArchiveGspRmBoot_TU102; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_bindata_ksec2GetBinArchiveBlUcode_TU102.c b/src/nvidia/generated/g_bindata_ksec2GetBinArchiveBlUcode_TU102.c new file mode 100644 index 000000000..011591088 --- /dev/null +++ b/src/nvidia/generated/g_bindata_ksec2GetBinArchiveBlUcode_TU102.c @@ -0,0 +1,147 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT! */ + + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: ksec2GetBinArchiveBlUcode_TU102("ucode_image") +// FILE NAME: kernel/inc/sec2/bin/g_sec2_bl_gp10x.h +// FILE TYPE: TEXT +// VAR NAME: sec2_bl_gp10x +// COMPRESSION: YES +// COMPLEX_STRUCT: NO +// DATA SIZE (bytes): 768 +// COMPRESSED SIZE (bytes): 303 +// +static BINDATA_CONST NvU8 ksec2BinArchiveBlUcode_TU102_ucode_image_data[] = +{ + 0xed, 0x8f, 0xbf, 0x4e, 0xc2, 0x50, 0x14, 0xc6, 0x3f, 0x4a, 0x8b, 0xb7, 0x52, 0xa1, 0xad, 0x0e, + 0x24, 0x4c, 0x3e, 0x42, 0x77, 0xd3, 0xb8, 0xfa, 0x06, 0xba, 0xf9, 0x0e, 0xee, 0xee, 0x2c, 0x5a, + 0x93, 0x1a, 0x6f, 0x41, 0x5a, 0x07, 0xe2, 0x03, 0x40, 0x52, 0x19, 0x7a, 0x47, 0x66, 0x1d, 0x70, + 0xf4, 0xcf, 0xe2, 0xc0, 0x04, 0x26, 0x57, 0xd3, 0x96, 0xf4, 0xea, 0xad, 0x38, 0x1a, 0x67, 0x07, + 0xcf, 0x70, 0xbe, 0xe4, 0xcb, 0x39, 0xdf, 0xf9, 0x9d, 0x3b, 0xec, 0x02, 0x42, 0x05, 0x1b, 0x1c, + 0x9b, 0x05, 0x52, 0xa5, 0x03, 0x80, 0x86, 0x3a, 0xed, 0xaf, 0x67, 0x36, 0xed, 0xd9, 0xf1, 0x0b, + 0xf1, 0x9f, 0x5a, 0xf4, 0xb2, 0x59, 0x08, 0x8d, 0x76, 0x37, 0xe9, 0xd9, 0x16, 0xf5, 0x1a, 0xe2, + 0x1d, 0xf1, 0x3d, 0x11, 0x8f, 0xf8, 0x1a, 0x8e, 0x88, 0x9f, 0x11, 0x5f, 0x18, 0x45, 0xa8, 0x15, + 0x91, 0x26, 0x7a, 0xdf, 0xae, 0x41, 0x83, 0x3a, 0x9b, 0x26, 0xd1, 0xc2, 0x3d, 0x2a, 0x90, 0xf7, + 0xd5, 0x71, 0x80, 0x0a, 0xc6, 0x0f, 0xb2, 0x5d, 0x47, 0x9c, 0xbc, 0xa6, 0x6b, 0x23, 0xe0, 0x90, + 0xef, 0x28, 0xa7, 0x40, 0xad, 0x63, 0x02, 0x72, 0x97, 0xf1, 0x3c, 0xab, 0xa5, 0x55, 0xb9, 0x5f, + 0x11, 0xdd, 0x55, 0x4c, 0xb8, 0x31, 0x9c, 0x25, 0xcf, 0x0b, 0xf7, 0xe6, 0xe7, 0x8c, 0xd5, 0xa9, + 0x06, 0x0d, 0xf4, 0x98, 0x9b, 0xf1, 0x39, 0x49, 0x42, 0x4f, 0xf6, 0x12, 0xa3, 0xfb, 0x2b, 0x9c, + 0x49, 0x43, 0x2b, 0x7e, 0x23, 0xfe, 0xac, 0x55, 0xba, 0x17, 0x90, 0x6f, 0x8a, 0x39, 0x18, 0x77, + 0xb7, 0x05, 0x4e, 0x24, 0x50, 0x22, 0xbc, 0x3c, 0xd3, 0xc6, 0x1f, 0xe5, 0xa5, 0x25, 0x27, 0xf3, + 0xb4, 0xaa, 0x93, 0xdb, 0x00, 0x0b, 0x9f, 0xf0, 0x7a, 0x5b, 0x12, 0x57, 0xf6, 0x3c, 0xe4, 0x11, + 0x9a, 0x8a, 0xbe, 0x5f, 0x0a, 0xd1, 0x55, 0x29, 0xa3, 0x2b, 0x18, 0xc3, 0x76, 0x36, 0x71, 0x0f, + 0x04, 0xb2, 0x89, 0x7c, 0xcf, 0xe6, 0x8e, 0xc9, 0x1d, 0x8b, 0x3b, 0x36, 0xf3, 0x25, 0x10, 0x1b, + 0x2c, 0x2d, 0xfc, 0xd7, 0xdf, 0xaa, 0x4f, 0x55, 0x99, 0xb8, 0xc4, 0x00, 0x03, 0x00, 0x00, +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT ksec2BinArchiveBlUcode_TU102_ucode_image_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + 768, // uncompressed data size (bytes) + 303, // compressed data size (bytes) + ksec2BinArchiveBlUcode_TU102_ucode_image_data, // compressed data pointer + NV_TRUE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_DATA) +// +// FUNCTION: ksec2GetBinArchiveBlUcode_TU102("ucode_desc") +// FILE NAME: kernel/inc/sec2/bin/g_sec2_bl_gp10x.h +// FILE TYPE: TEXT +// VAR NAME: sec2_bl_gp10x_desc +// COMPRESSION: NO +// COMPLEX_STRUCT: YES +// DATA SIZE (bytes): sizeof(ksec2BinArchiveBlUcode_TU102_ucode_desc_data) +// COMPRESSED SIZE (bytes): N/A +// +static const RM_FLCN_BL_DESC ksec2BinArchiveBlUcode_TU102_ucode_desc_data = { + 0xfd, + 0, + { + 0x0, + 0x200, + 0x200, + 0x100 + } +}; +#endif // defined(BINDATA_INCLUDE_DATA) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) +BINDATA_STORAGE_PVT ksec2BinArchiveBlUcode_TU102_ucode_desc_storage_pvt; +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DECL) + +#if defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) +{ + sizeof(ksec2BinArchiveBlUcode_TU102_ucode_desc_data), // uncompressed data size (bytes) + sizeof(ksec2BinArchiveBlUcode_TU102_ucode_desc_data), // compressed data size (bytes) + &ksec2BinArchiveBlUcode_TU102_ucode_desc_data, // compressed data pointer + NV_FALSE, // is pData compressed? + NV_TRUE, // contain information for file overriding? + NV_FALSE, // is the data referenced during load? (Only valid when BINDATA_IS_MUTABLE is true) +}, +#endif // defined(BINDATA_INCLUDE_STORAGE_PVT_DEFN) + + +#if defined(BINDATA_INCLUDE_ARCHIVE) +// +// Bindata Archive structure +// +static const BINDATA_ARCHIVE __ksec2GetBinArchiveBlUcode_TU102 = +{ + 2, // entryNum + { + // entries[] : { "name", pBinStorage } + { "ucode_image" , (const PBINDATA_STORAGE) &g_bindata_pvt.ksec2BinArchiveBlUcode_TU102_ucode_image_storage_pvt }, + { "ucode_desc" , (const PBINDATA_STORAGE) &g_bindata_pvt.ksec2BinArchiveBlUcode_TU102_ucode_desc_storage_pvt }, + } +}; + +#endif // defined(BINDATA_INCLUDE_ARCHIVE) + + + +#if defined(BINDATA_INCLUDE_FUNCTION) +const BINDATA_ARCHIVE *ksec2GetBinArchiveBlUcode_TU102(struct KernelSec2 *pKernelSec2) +{ + return &__ksec2GetBinArchiveBlUcode_TU102; +} +#endif // defined(BINDATA_INCLUDE_FUNCTION) + + + + + diff --git a/src/nvidia/generated/g_channel_descendant_nvoc.c b/src/nvidia/generated/g_channel_descendant_nvoc.c new file mode 100644 index 000000000..8bbcdf94f --- /dev/null +++ b/src/nvidia/generated/g_channel_descendant_nvoc.c @@ -0,0 +1,409 @@ +#define NVOC_CHANNEL_DESCENDANT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_channel_descendant_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x43d7c4 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_funcTable_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, PARAM_TO_ENGDESC_FUNCTION * arg_pParamToEngDescFn); +void __nvoc_init_dataField_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ChannelDescendant; + +static const struct NVOC_RTTI __nvoc_rtti_ChannelDescendant_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ChannelDescendant, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_ChannelDescendant_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ChannelDescendant, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ChannelDescendant_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ChannelDescendant, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ChannelDescendant_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ChannelDescendant, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ChannelDescendant_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ChannelDescendant, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ChannelDescendant_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ChannelDescendant, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ChannelDescendant_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ChannelDescendant, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ChannelDescendant_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ChannelDescendant, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_ChannelDescendant = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_ChannelDescendant_ChannelDescendant, + &__nvoc_rtti_ChannelDescendant_Notifier, + &__nvoc_rtti_ChannelDescendant_INotifier, + &__nvoc_rtti_ChannelDescendant_GpuResource, + &__nvoc_rtti_ChannelDescendant_RmResource, + &__nvoc_rtti_ChannelDescendant_RmResourceCommon, + &__nvoc_rtti_ChannelDescendant_RsResource, + &__nvoc_rtti_ChannelDescendant_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant = +{ + /*classInfo=*/ { + /*size=*/ sizeof(ChannelDescendant), + /*classId=*/ classId(ChannelDescendant), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "ChannelDescendant", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ChannelDescendant, + /*pCastInfo=*/ &__nvoc_castinfo_ChannelDescendant, + /*pExportInfo=*/ &__nvoc_export_info_ChannelDescendant +}; + +static NV_STATUS __nvoc_thunk_ChannelDescendant_rmresCheckMemInterUnmap(struct RmResource *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) - __nvoc_rtti_ChannelDescendant_RmResource.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_chandesShareCallback(struct ChannelDescendant *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ChannelDescendant_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_chandesMapTo(struct ChannelDescendant *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_chandesGetOrAllocNotifShare(struct ChannelDescendant *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ChannelDescendant_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_GpuResource_chandesGetMapAddrSpace(struct ChannelDescendant *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ChannelDescendant_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_chandesSetNotificationShare(struct ChannelDescendant *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ChannelDescendant_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_chandesGetRefCount(struct ChannelDescendant *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_chandesAddAdditionalDependants(struct RsClient *pClient, struct ChannelDescendant *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_chandesControl_Prologue(struct ChannelDescendant *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_chandesGetRegBaseOffsetAndSize(struct ChannelDescendant *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ChannelDescendant_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_chandesInternalControlForward(struct ChannelDescendant *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ChannelDescendant_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_chandesUnmapFrom(struct ChannelDescendant *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_chandesControl_Epilogue(struct ChannelDescendant *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_chandesControlLookup(struct ChannelDescendant *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_chandesGetInternalObjectHandle(struct ChannelDescendant *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ChannelDescendant_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_chandesControl(struct ChannelDescendant *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ChannelDescendant_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_chandesUnmap(struct ChannelDescendant *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ChannelDescendant_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_chandesGetMemInterMapParams(struct ChannelDescendant *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ChannelDescendant_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_chandesGetMemoryMappingDescriptor(struct ChannelDescendant *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ChannelDescendant_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_chandesControlFilter(struct ChannelDescendant *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_chandesUnregisterEvent(struct ChannelDescendant *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ChannelDescendant_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_chandesCanCopy(struct ChannelDescendant *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_chandesPreDestruct(struct ChannelDescendant *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_chandesGetNotificationListPtr(struct ChannelDescendant *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ChannelDescendant_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_chandesGetNotificationShare(struct ChannelDescendant *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ChannelDescendant_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_chandesMap(struct ChannelDescendant *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ChannelDescendant_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_chandesAccessCallback(struct ChannelDescendant *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ChannelDescendant_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_ChannelDescendant = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_ChannelDescendant(ChannelDescendant *pThis) { + __nvoc_chandesDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_ChannelDescendant(ChannelDescendant *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, PARAM_TO_ENGDESC_FUNCTION * arg_pParamToEngDescFn) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ChannelDescendant_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_ChannelDescendant_fail_Notifier; + __nvoc_init_dataField_ChannelDescendant(pThis, pRmhalspecowner); + + status = __nvoc_chandesConstruct(pThis, arg_pCallContext, arg_pParams, arg_pParamToEngDescFn); + if (status != NV_OK) goto __nvoc_ctor_ChannelDescendant_fail__init; + goto __nvoc_ctor_ChannelDescendant_exit; // Success + +__nvoc_ctor_ChannelDescendant_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_ChannelDescendant_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_ChannelDescendant_fail_GpuResource: +__nvoc_ctor_ChannelDescendant_exit: + + return status; +} + +static void __nvoc_init_funcTable_ChannelDescendant_1(ChannelDescendant *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__chandesGetSwMethods__ = &chandesGetSwMethods_IMPL; + + pThis->__chandesIsSwMethodStalling__ = &chandesIsSwMethodStalling_IMPL; + + pThis->__chandesCheckMemInterUnmap__ = &chandesCheckMemInterUnmap_IMPL; + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__rmresCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_rmresCheckMemInterUnmap; + + pThis->__chandesShareCallback__ = &__nvoc_thunk_GpuResource_chandesShareCallback; + + pThis->__chandesMapTo__ = &__nvoc_thunk_RsResource_chandesMapTo; + + pThis->__chandesGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_chandesGetOrAllocNotifShare; + + pThis->__chandesGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_chandesGetMapAddrSpace; + + pThis->__chandesSetNotificationShare__ = &__nvoc_thunk_Notifier_chandesSetNotificationShare; + + pThis->__chandesGetRefCount__ = &__nvoc_thunk_RsResource_chandesGetRefCount; + + pThis->__chandesAddAdditionalDependants__ = &__nvoc_thunk_RsResource_chandesAddAdditionalDependants; + + pThis->__chandesControl_Prologue__ = &__nvoc_thunk_RmResource_chandesControl_Prologue; + + pThis->__chandesGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_chandesGetRegBaseOffsetAndSize; + + pThis->__chandesInternalControlForward__ = &__nvoc_thunk_GpuResource_chandesInternalControlForward; + + pThis->__chandesUnmapFrom__ = &__nvoc_thunk_RsResource_chandesUnmapFrom; + + pThis->__chandesControl_Epilogue__ = &__nvoc_thunk_RmResource_chandesControl_Epilogue; + + pThis->__chandesControlLookup__ = &__nvoc_thunk_RsResource_chandesControlLookup; + + pThis->__chandesGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_chandesGetInternalObjectHandle; + + pThis->__chandesControl__ = &__nvoc_thunk_GpuResource_chandesControl; + + pThis->__chandesUnmap__ = &__nvoc_thunk_GpuResource_chandesUnmap; + + pThis->__chandesGetMemInterMapParams__ = &__nvoc_thunk_RmResource_chandesGetMemInterMapParams; + + pThis->__chandesGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_chandesGetMemoryMappingDescriptor; + + pThis->__chandesControlFilter__ = &__nvoc_thunk_RsResource_chandesControlFilter; + + pThis->__chandesUnregisterEvent__ = &__nvoc_thunk_Notifier_chandesUnregisterEvent; + + pThis->__chandesCanCopy__ = &__nvoc_thunk_RsResource_chandesCanCopy; + + pThis->__chandesPreDestruct__ = &__nvoc_thunk_RsResource_chandesPreDestruct; + + pThis->__chandesGetNotificationListPtr__ = &__nvoc_thunk_Notifier_chandesGetNotificationListPtr; + + pThis->__chandesGetNotificationShare__ = &__nvoc_thunk_Notifier_chandesGetNotificationShare; + + pThis->__chandesMap__ = &__nvoc_thunk_GpuResource_chandesMap; + + pThis->__chandesAccessCallback__ = &__nvoc_thunk_RmResource_chandesAccessCallback; +} + +void __nvoc_init_funcTable_ChannelDescendant(ChannelDescendant *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_ChannelDescendant_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_ChannelDescendant(ChannelDescendant *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_ChannelDescendant = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_ChannelDescendant(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_ChannelDescendant(ChannelDescendant **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, PARAM_TO_ENGDESC_FUNCTION * arg_pParamToEngDescFn) { + NV_STATUS status; + Object *pParentObj; + ChannelDescendant *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(ChannelDescendant)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(ChannelDescendant)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ChannelDescendant); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_ChannelDescendant(pThis, pRmhalspecowner); + status = __nvoc_ctor_ChannelDescendant(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams, arg_pParamToEngDescFn); + if (status != NV_OK) goto __nvoc_objCreate_ChannelDescendant_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_ChannelDescendant_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_ChannelDescendant(ChannelDescendant **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + PARAM_TO_ENGDESC_FUNCTION * arg_pParamToEngDescFn = va_arg(args, PARAM_TO_ENGDESC_FUNCTION *); + + status = __nvoc_objCreate_ChannelDescendant(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams, arg_pParamToEngDescFn); + + return status; +} + diff --git a/src/nvidia/generated/g_channel_descendant_nvoc.h b/src/nvidia/generated/g_channel_descendant_nvoc.h new file mode 100644 index 000000000..2d9f1db87 --- /dev/null +++ b/src/nvidia/generated/g_channel_descendant_nvoc.h @@ -0,0 +1,388 @@ +#ifndef _G_CHANNEL_DESCENDANT_NVOC_H_ +#define _G_CHANNEL_DESCENDANT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_channel_descendant_nvoc.h" + +#ifndef _CHANNEL_DESCENDANT_H_ +#define _CHANNEL_DESCENDANT_H_ + +#include "core/core.h" +#include "rmapi/event.h" + +#include "containers/btree.h" +#include "resserv/rs_resource.h" +#include "gpu/gpu_resource.h" +#include "gpu/gpu_resource_desc.h" +#include "kernel/gpu/gpu_halspec.h" + +struct ChannelDescendant; + +#ifndef __NVOC_CLASS_ChannelDescendant_TYPEDEF__ +#define __NVOC_CLASS_ChannelDescendant_TYPEDEF__ +typedef struct ChannelDescendant ChannelDescendant; +#endif /* __NVOC_CLASS_ChannelDescendant_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ChannelDescendant +#define __nvoc_class_id_ChannelDescendant 0x43d7c4 +#endif /* __nvoc_class_id_ChannelDescendant */ + + +struct ContextDma; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + + +struct KernelChannel; + +#ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ +#define __NVOC_CLASS_KernelChannel_TYPEDEF__ +typedef struct KernelChannel KernelChannel; +#endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannel +#define __nvoc_class_id_KernelChannel 0x5d8d70 +#endif /* __nvoc_class_id_KernelChannel */ + + + +/*! + * Definitions for SW methods (emulation of pushbuffer methods in SW) + */ +typedef struct _METHOD METHOD, *PMETHOD; + +typedef NV_STATUS (*METHODPROC)(OBJGPU *, struct ChannelDescendant *, PMETHOD, NvU32, NvV32); + +struct _METHOD +{ + METHODPROC Proc; + NvU32 Low; + NvU32 High; +}; + +/*! + * Determines id engine that should handle the resource allocation. Used when + * there are multiple engines that support the same class id (e.g.: Copy + * Engine). + */ +typedef ENGDESCRIPTOR PARAM_TO_ENGDESC_FUNCTION(OBJGPU *pGpu, NvU32 externalClassId, + void *pAllocParams); + +/*! + * Abstract base class for descendants of XXX_CHANNEL_DMA (Channel) + */ +#ifdef NVOC_CHANNEL_DESCENDANT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct ChannelDescendant { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + NV_STATUS (*__chandesGetSwMethods__)(struct ChannelDescendant *, METHOD **, NvU32 *); + NvBool (*__chandesIsSwMethodStalling__)(struct ChannelDescendant *, NvU32); + NV_STATUS (*__chandesCheckMemInterUnmap__)(struct ChannelDescendant *, NvBool); + NvBool (*__chandesShareCallback__)(struct ChannelDescendant *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__chandesMapTo__)(struct ChannelDescendant *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__chandesGetOrAllocNotifShare__)(struct ChannelDescendant *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__chandesGetMapAddrSpace__)(struct ChannelDescendant *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__chandesSetNotificationShare__)(struct ChannelDescendant *, struct NotifShare *); + NvU32 (*__chandesGetRefCount__)(struct ChannelDescendant *); + void (*__chandesAddAdditionalDependants__)(struct RsClient *, struct ChannelDescendant *, RsResourceRef *); + NV_STATUS (*__chandesControl_Prologue__)(struct ChannelDescendant *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__chandesGetRegBaseOffsetAndSize__)(struct ChannelDescendant *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__chandesInternalControlForward__)(struct ChannelDescendant *, NvU32, void *, NvU32); + NV_STATUS (*__chandesUnmapFrom__)(struct ChannelDescendant *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__chandesControl_Epilogue__)(struct ChannelDescendant *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__chandesControlLookup__)(struct ChannelDescendant *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__chandesGetInternalObjectHandle__)(struct ChannelDescendant *); + NV_STATUS (*__chandesControl__)(struct ChannelDescendant *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__chandesUnmap__)(struct ChannelDescendant *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__chandesGetMemInterMapParams__)(struct ChannelDescendant *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__chandesGetMemoryMappingDescriptor__)(struct ChannelDescendant *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__chandesControlFilter__)(struct ChannelDescendant *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__chandesUnregisterEvent__)(struct ChannelDescendant *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__chandesCanCopy__)(struct ChannelDescendant *); + void (*__chandesPreDestruct__)(struct ChannelDescendant *); + PEVENTNOTIFICATION *(*__chandesGetNotificationListPtr__)(struct ChannelDescendant *); + struct NotifShare *(*__chandesGetNotificationShare__)(struct ChannelDescendant *); + NV_STATUS (*__chandesMap__)(struct ChannelDescendant *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__chandesAccessCallback__)(struct ChannelDescendant *, struct RsClient *, void *, RsAccessRight); + struct KernelChannel *pKernelChannel; + GPU_RESOURCE_DESC resourceDesc; + NvU16 classID; + NvU32 notifyAction; + NvBool bNotifyTrigger; +}; + +#ifndef __NVOC_CLASS_ChannelDescendant_TYPEDEF__ +#define __NVOC_CLASS_ChannelDescendant_TYPEDEF__ +typedef struct ChannelDescendant ChannelDescendant; +#endif /* __NVOC_CLASS_ChannelDescendant_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ChannelDescendant +#define __nvoc_class_id_ChannelDescendant 0x43d7c4 +#endif /* __nvoc_class_id_ChannelDescendant */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +#define __staticCast_ChannelDescendant(pThis) \ + ((pThis)->__nvoc_pbase_ChannelDescendant) + +#ifdef __nvoc_channel_descendant_h_disabled +#define __dynamicCast_ChannelDescendant(pThis) ((ChannelDescendant*)NULL) +#else //__nvoc_channel_descendant_h_disabled +#define __dynamicCast_ChannelDescendant(pThis) \ + ((ChannelDescendant*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ChannelDescendant))) +#endif //__nvoc_channel_descendant_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_ChannelDescendant(ChannelDescendant**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_ChannelDescendant(ChannelDescendant**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, PARAM_TO_ENGDESC_FUNCTION * arg_pParamToEngDescFn); +#define __objCreate_ChannelDescendant(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams, arg_pParamToEngDescFn) \ + __nvoc_objCreate_ChannelDescendant((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams, arg_pParamToEngDescFn) + +#define chandesGetSwMethods(pChannelDescendant, ppMethods, pNumMethods) chandesGetSwMethods_DISPATCH(pChannelDescendant, ppMethods, pNumMethods) +#define chandesIsSwMethodStalling(pChannelDescendant, hHandle) chandesIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define chandesCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) chandesCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define chandesShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) chandesShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define chandesMapTo(pResource, pParams) chandesMapTo_DISPATCH(pResource, pParams) +#define chandesGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) chandesGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define chandesGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) chandesGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define chandesSetNotificationShare(pNotifier, pNotifShare) chandesSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define chandesGetRefCount(pResource) chandesGetRefCount_DISPATCH(pResource) +#define chandesAddAdditionalDependants(pClient, pResource, pReference) chandesAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define chandesControl_Prologue(pResource, pCallContext, pParams) chandesControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define chandesGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) chandesGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define chandesInternalControlForward(pGpuResource, command, pParams, size) chandesInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define chandesUnmapFrom(pResource, pParams) chandesUnmapFrom_DISPATCH(pResource, pParams) +#define chandesControl_Epilogue(pResource, pCallContext, pParams) chandesControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define chandesControlLookup(pResource, pParams, ppEntry) chandesControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define chandesGetInternalObjectHandle(pGpuResource) chandesGetInternalObjectHandle_DISPATCH(pGpuResource) +#define chandesControl(pGpuResource, pCallContext, pParams) chandesControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define chandesUnmap(pGpuResource, pCallContext, pCpuMapping) chandesUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define chandesGetMemInterMapParams(pRmResource, pParams) chandesGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define chandesGetMemoryMappingDescriptor(pRmResource, ppMemDesc) chandesGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define chandesControlFilter(pResource, pCallContext, pParams) chandesControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define chandesUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) chandesUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define chandesCanCopy(pResource) chandesCanCopy_DISPATCH(pResource) +#define chandesPreDestruct(pResource) chandesPreDestruct_DISPATCH(pResource) +#define chandesGetNotificationListPtr(pNotifier) chandesGetNotificationListPtr_DISPATCH(pNotifier) +#define chandesGetNotificationShare(pNotifier) chandesGetNotificationShare_DISPATCH(pNotifier) +#define chandesMap(pGpuResource, pCallContext, pParams, pCpuMapping) chandesMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define chandesAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) chandesAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline void chandesIsolateOnDestruct_b3696a(struct ChannelDescendant *pChannelDescendant) { + return; +} + +#ifdef __nvoc_channel_descendant_h_disabled +static inline void chandesIsolateOnDestruct(struct ChannelDescendant *pChannelDescendant) { + NV_ASSERT_FAILED_PRECOMP("ChannelDescendant was disabled!"); +} +#else //__nvoc_channel_descendant_h_disabled +#define chandesIsolateOnDestruct(pChannelDescendant) chandesIsolateOnDestruct_b3696a(pChannelDescendant) +#endif //__nvoc_channel_descendant_h_disabled + +#define chandesIsolateOnDestruct_HAL(pChannelDescendant) chandesIsolateOnDestruct(pChannelDescendant) + +static inline void chandesDestroy_b3696a(struct ChannelDescendant *pChannelDescendant) { + return; +} + +#ifdef __nvoc_channel_descendant_h_disabled +static inline void chandesDestroy(struct ChannelDescendant *pChannelDescendant) { + NV_ASSERT_FAILED_PRECOMP("ChannelDescendant was disabled!"); +} +#else //__nvoc_channel_descendant_h_disabled +#define chandesDestroy(pChannelDescendant) chandesDestroy_b3696a(pChannelDescendant) +#endif //__nvoc_channel_descendant_h_disabled + +#define chandesDestroy_HAL(pChannelDescendant) chandesDestroy(pChannelDescendant) + +NV_STATUS chandesGetSwMethods_IMPL(struct ChannelDescendant *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods); + +static inline NV_STATUS chandesGetSwMethods_DISPATCH(struct ChannelDescendant *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return pChannelDescendant->__chandesGetSwMethods__(pChannelDescendant, ppMethods, pNumMethods); +} + +NvBool chandesIsSwMethodStalling_IMPL(struct ChannelDescendant *pChannelDescendant, NvU32 hHandle); + +static inline NvBool chandesIsSwMethodStalling_DISPATCH(struct ChannelDescendant *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__chandesIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +NV_STATUS chandesCheckMemInterUnmap_IMPL(struct ChannelDescendant *pChannelDescendant, NvBool bSubdeviceHandleProvided); + +static inline NV_STATUS chandesCheckMemInterUnmap_DISPATCH(struct ChannelDescendant *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__chandesCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool chandesShareCallback_DISPATCH(struct ChannelDescendant *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__chandesShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS chandesMapTo_DISPATCH(struct ChannelDescendant *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__chandesMapTo__(pResource, pParams); +} + +static inline NV_STATUS chandesGetOrAllocNotifShare_DISPATCH(struct ChannelDescendant *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__chandesGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS chandesGetMapAddrSpace_DISPATCH(struct ChannelDescendant *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__chandesGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void chandesSetNotificationShare_DISPATCH(struct ChannelDescendant *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__chandesSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 chandesGetRefCount_DISPATCH(struct ChannelDescendant *pResource) { + return pResource->__chandesGetRefCount__(pResource); +} + +static inline void chandesAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ChannelDescendant *pResource, RsResourceRef *pReference) { + pResource->__chandesAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS chandesControl_Prologue_DISPATCH(struct ChannelDescendant *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__chandesControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS chandesGetRegBaseOffsetAndSize_DISPATCH(struct ChannelDescendant *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__chandesGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS chandesInternalControlForward_DISPATCH(struct ChannelDescendant *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__chandesInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS chandesUnmapFrom_DISPATCH(struct ChannelDescendant *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__chandesUnmapFrom__(pResource, pParams); +} + +static inline void chandesControl_Epilogue_DISPATCH(struct ChannelDescendant *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__chandesControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS chandesControlLookup_DISPATCH(struct ChannelDescendant *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__chandesControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle chandesGetInternalObjectHandle_DISPATCH(struct ChannelDescendant *pGpuResource) { + return pGpuResource->__chandesGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS chandesControl_DISPATCH(struct ChannelDescendant *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__chandesControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS chandesUnmap_DISPATCH(struct ChannelDescendant *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__chandesUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS chandesGetMemInterMapParams_DISPATCH(struct ChannelDescendant *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__chandesGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS chandesGetMemoryMappingDescriptor_DISPATCH(struct ChannelDescendant *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__chandesGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS chandesControlFilter_DISPATCH(struct ChannelDescendant *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__chandesControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS chandesUnregisterEvent_DISPATCH(struct ChannelDescendant *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__chandesUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool chandesCanCopy_DISPATCH(struct ChannelDescendant *pResource) { + return pResource->__chandesCanCopy__(pResource); +} + +static inline void chandesPreDestruct_DISPATCH(struct ChannelDescendant *pResource) { + pResource->__chandesPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *chandesGetNotificationListPtr_DISPATCH(struct ChannelDescendant *pNotifier) { + return pNotifier->__chandesGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *chandesGetNotificationShare_DISPATCH(struct ChannelDescendant *pNotifier) { + return pNotifier->__chandesGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS chandesMap_DISPATCH(struct ChannelDescendant *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__chandesMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool chandesAccessCallback_DISPATCH(struct ChannelDescendant *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__chandesAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS chandesConstruct_IMPL(struct ChannelDescendant *arg_pChannelDescendant, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams, PARAM_TO_ENGDESC_FUNCTION *arg_pParamToEngDescFn); +#define __nvoc_chandesConstruct(arg_pChannelDescendant, arg_pCallContext, arg_pParams, arg_pParamToEngDescFn) chandesConstruct_IMPL(arg_pChannelDescendant, arg_pCallContext, arg_pParams, arg_pParamToEngDescFn) +void chandesDestruct_IMPL(struct ChannelDescendant *pChannelDescendant); +#define __nvoc_chandesDestruct(pChannelDescendant) chandesDestruct_IMPL(pChannelDescendant) +#undef PRIVATE_FIELD + + +//--------------------------------------------------------------------------- +// +// Method prototypes. +// +//--------------------------------------------------------------------------- + +NV_STATUS mthdNoOperation(OBJGPU *, struct ChannelDescendant *, PMETHOD, NvU32, NvU32); + +#endif // _CHANNEL_DESCENDANT_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CHANNEL_DESCENDANT_NVOC_H_ diff --git a/src/nvidia/generated/g_chips2halspec.h b/src/nvidia/generated/g_chips2halspec.h new file mode 100644 index 000000000..7fdb06871 --- /dev/null +++ b/src/nvidia/generated/g_chips2halspec.h @@ -0,0 +1,3 @@ + +#include "g_chips2halspec_nvoc.h" + diff --git a/src/nvidia/generated/g_chips2halspec_nvoc.c b/src/nvidia/generated/g_chips2halspec_nvoc.c new file mode 100644 index 000000000..6bb8d21ed --- /dev/null +++ b/src/nvidia/generated/g_chips2halspec_nvoc.c @@ -0,0 +1,105 @@ +#define NVOC_CHIPS2HALSPEC_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_chips2halspec_nvoc.h" + +void __nvoc_init_halspec_ChipHal(ChipHal *pChipHal, NvU32 arch, NvU32 impl, NvU32 hidrev) +{ + // TU102 + if(arch == 0x16 && impl == 0x2) + { + pChipHal->__nvoc_HalVarIdx = 37; + } + // TU104 + else if(arch == 0x16 && impl == 0x4) + { + pChipHal->__nvoc_HalVarIdx = 38; + } + // TU106 + else if(arch == 0x16 && impl == 0x6) + { + pChipHal->__nvoc_HalVarIdx = 39; + } + // TU116 + else if(arch == 0x16 && impl == 0x8) + { + pChipHal->__nvoc_HalVarIdx = 40; + } + // TU117 + else if(arch == 0x16 && impl == 0x7) + { + pChipHal->__nvoc_HalVarIdx = 41; + } + // GA100 + else if(arch == 0x17 && impl == 0x0) + { + pChipHal->__nvoc_HalVarIdx = 42; + } + // GA102 + else if(arch == 0x17 && impl == 0x2) + { + pChipHal->__nvoc_HalVarIdx = 43; + } + // GA103 + else if(arch == 0x17 && impl == 0x3) + { + pChipHal->__nvoc_HalVarIdx = 44; + } + // GA104 + else if(arch == 0x17 && impl == 0x4) + { + pChipHal->__nvoc_HalVarIdx = 45; + } + // GA106 + else if(arch == 0x17 && impl == 0x6) + { + pChipHal->__nvoc_HalVarIdx = 46; + } + // GA107 + else if(arch == 0x17 && impl == 0x7) + { + pChipHal->__nvoc_HalVarIdx = 47; + } +} + +void __nvoc_init_halspec_RmVariantHal(RmVariantHal *pRmVariantHal, RM_RUNTIME_VARIANT rmVariant) +{ + // PF_KERNEL_ONLY + if(rmVariant == 0x2) + { + pRmVariantHal->__nvoc_HalVarIdx = 1; + } +} + +void __nvoc_init_halspec_DispIpHal(DispIpHal *pDispIpHal, NvU32 ipver) +{ + // DISPv0400 + if(ipver == 0x4000000) + { + pDispIpHal->__nvoc_HalVarIdx = 10; + } + // DISPv0401 + else if(ipver == 0x4010000) + { + pDispIpHal->__nvoc_HalVarIdx = 11; + } + // DISPv0000 + else if(ipver == 0x0) + { + pDispIpHal->__nvoc_HalVarIdx = 15; + } +} + +void __nvoc_init_halspec_DpuIpHal(DpuIpHal *pDpuIpHal, NvU32 ipver) +{ + // DPUv0000 + if(ipver == 0x0) + { + pDpuIpHal->__nvoc_HalVarIdx = 5; + } +} + diff --git a/src/nvidia/generated/g_chips2halspec_nvoc.h b/src/nvidia/generated/g_chips2halspec_nvoc.h new file mode 100644 index 000000000..a03cd49de --- /dev/null +++ b/src/nvidia/generated/g_chips2halspec_nvoc.h @@ -0,0 +1,120 @@ +#ifndef _G_CHIPS2HALSPEC_NVOC_H_ +#define _G_CHIPS2HALSPEC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "g_chips2halspec_nvoc.h" + +#ifndef _CHIPS_2_HALSPEC_H_ +#define _CHIPS_2_HALSPEC_H_ + +#include "nvtypes.h" +#include "rmconfig.h" + +// Several WARs that only visible by NVOC compiler + +#define GPUHAL_ARCH(x) NV_PMC_BOOT_0_ARCHITECTURE_##x +#define GPUHAL_IMPL(x) NV_PMC_BOOT_0_IMPLEMENTATION_##x + +// Create alias 'group' to provide a concise syntax +#define group variant_group + +// Use in hal block to indicate that the function isn't wried to any enabled chips +#define __disabled__ false + +struct ChipHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct ChipHal ChipHal; +void __nvoc_init_halspec_ChipHal(ChipHal*, NvU32, NvU32, NvU32); + +/* + * RM Runtime Variant Halspec + * + * One group of Hal Variants that presents two perspectives: + * + * Operating Environment Perspective: VF / PF / UCODE + * VF | PF | UCODE = true + * VF & PF & UCODE = false + * + * VF : RM is running in VGPU Guest environment. Equals to IS_VIRTUAL(pGpu) + * PF : RM is running in Host/Baremetal in standard PCIE environment + * UCODE : RM is running on microcontroller + * + * Functionality-Based Perspective: KERNEL_ONLY / PHYSICAL_ONLY / MONOLITHIC + * KERNEL_ONLY | PHYSICAL_ONLY | MONOLITHIC = true + * KERNEL_ONLY & PHYSICAL_ONLY & MONOLITHIC = false + * + * KERNEL_ONLY : RM does not own HW. The physical part is offloaded to Ucode. + * PHYSICAL_ONLY : RM owns HW but does not expose services to RM Clients + * MONOLITHIC : RM owns both the interface to the client and the underlying HW. + * + * Note: GSP Client "IS_GSP_CLIENT(pGpu) maps to "PF_KERNEL_ONLY" + * DCE Client maps to "PF_KERNEL_ONLY & T234D" + * + * + * HAL Variants + * +--------+ +----------------+ + * | VF | <-----| VF |--+ + * +--------+ +----------------+ | +---------------+ + * |--> | KERNEL_ONLY | + * +----------------+ | +---------------+ + * +--| PF_KERNEL_ONLY |--+ + * +--------+ | +----------------+ + * | PF | <--| + * +--------+ | +----------------+ +---------------+ + * +--| PF_MONOLITHIC |-----> | MONOLITHIC | + * +----------------+ +---------------+ + * + * +--------+ +----------------+ +---------------+ + * | UCODE | <-----| UCODE |-----> | PHYSICAL_ONLY | + * +--------+ +----------------+ +---------------+ + * + * */ +typedef enum _RM_RUNTIME_VARIANT { + RM_RUNTIME_VARIANT_VF = 1, + RM_RUNTIME_VARIANT_PF_KERNEL_ONLY = 2, + RM_RUNTIME_VARIANT_PF_MONOLITHIC = 3, + RM_RUNTIME_VARIANT_UCODE = 4, +} RM_RUNTIME_VARIANT; + +struct RmVariantHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct RmVariantHal RmVariantHal; +void __nvoc_init_halspec_RmVariantHal(RmVariantHal*, RM_RUNTIME_VARIANT); + +/* DISP IP versions */ +struct DispIpHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct DispIpHal DispIpHal; +void __nvoc_init_halspec_DispIpHal(DispIpHal*, NvU32); + +/* The 'delete' rules for DispIpHal and ChipHal */ +// delete DISPv0400 & ~(TU102 | TU104 | TU106 | TU116 | TU117); +// delete ~DISPv0400 & (TU102 | TU104 | TU106 | TU116 | TU117); +// delete DISPv0401 & ~(GA102 | GA103 | GA104 | GA106 | GA107); +// delete ~DISPv0401 & (GA102 | GA103 | GA104 | GA106 | GA107); + + +/* DPU IP versions */ +struct DpuIpHal { + unsigned short __nvoc_HalVarIdx; +}; +typedef struct DpuIpHal DpuIpHal; +void __nvoc_init_halspec_DpuIpHal(DpuIpHal*, NvU32); + +/* The 'delete' rules for DpuIpHal and ChipHal */ + + +#undef group +#endif /* _CHIPS_2_HALSPEC_H_ */ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CHIPS2HALSPEC_NVOC_H_ diff --git a/src/nvidia/generated/g_chipset_nvoc.c b/src/nvidia/generated/g_chipset_nvoc.c new file mode 100644 index 000000000..e3ddddf7b --- /dev/null +++ b/src/nvidia/generated/g_chipset_nvoc.c @@ -0,0 +1,155 @@ +#define NVOC_CHIPSET_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_chipset_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x547dbb = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJCL; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJCL(OBJCL*); +void __nvoc_init_funcTable_OBJCL(OBJCL*); +NV_STATUS __nvoc_ctor_OBJCL(OBJCL*); +void __nvoc_init_dataField_OBJCL(OBJCL*); +void __nvoc_dtor_OBJCL(OBJCL*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJCL; + +static const struct NVOC_RTTI __nvoc_rtti_OBJCL_OBJCL = { + /*pClassDef=*/ &__nvoc_class_def_OBJCL, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJCL, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJCL_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJCL, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJCL = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJCL_OBJCL, + &__nvoc_rtti_OBJCL_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJCL = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJCL), + /*classId=*/ classId(OBJCL), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJCL", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJCL, + /*pCastInfo=*/ &__nvoc_castinfo_OBJCL, + /*pExportInfo=*/ &__nvoc_export_info_OBJCL +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJCL = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJCL(OBJCL *pThis) { + __nvoc_clDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJCL(OBJCL *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + pThis->setProperty(pThis, PDB_PROP_CL_HAS_RESIZABLE_BAR_ISSUE, ((NvBool)(0 != 0))); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJCL(OBJCL *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJCL_fail_Object; + __nvoc_init_dataField_OBJCL(pThis); + + status = __nvoc_clConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJCL_fail__init; + goto __nvoc_ctor_OBJCL_exit; // Success + +__nvoc_ctor_OBJCL_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJCL_fail_Object: +__nvoc_ctor_OBJCL_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJCL_1(OBJCL *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJCL(OBJCL *pThis) { + __nvoc_init_funcTable_OBJCL_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJCL(OBJCL *pThis) { + pThis->__nvoc_pbase_OBJCL = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJCL(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJCL(OBJCL **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJCL *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJCL)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJCL)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJCL); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJCL(pThis); + status = __nvoc_ctor_OBJCL(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJCL_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJCL_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJCL(OBJCL **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJCL(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_chipset_nvoc.h b/src/nvidia/generated/g_chipset_nvoc.h new file mode 100644 index 000000000..0233c99d7 --- /dev/null +++ b/src/nvidia/generated/g_chipset_nvoc.h @@ -0,0 +1,1070 @@ +#ifndef _G_CHIPSET_NVOC_H_ +#define _G_CHIPSET_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_chipset_nvoc.h" + +#ifndef CHIPSET_H +#define CHIPSET_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for the Core Logic Object. * +* * +\***************************************************************************/ + +#include "platform/hwbc.h" + +// forward declare PcieAerCapability struct +struct PcieAerCapability; + +// forward declaration of PexL1SubstateCapability +struct PexL1SubstateCapability; + +// PCIe Enhanced Configuration space structure +typedef struct PCIECONFIGSPACEBASE PCIECONFIGSPACEBASE; +typedef struct PCIECONFIGSPACEBASE *PPCIECONFIGSPACEBASE; +struct PCIECONFIGSPACEBASE +{ + RmPhysAddr baseAddress; + NvU32 domain; + NvU8 startBusNumber; + NvU8 endBusNumber; + PPCIECONFIGSPACEBASE next; +}; + +// Seen in both nvagp.c and elsewhere +#define PCI_MAX_DOMAINS 65536 +#define PCI_MAX_BUSES 256 +#define PCI_MAX_DEVICES 32 +#define PCI_CLASS_DISPLAY_DEV 0x03 +#define PCI_COMMAND 0x04 +#define PCI_BASE_ADDRESS_0 0x10 /* Aperture Base */ + +#define PCI_VENDOR_ID_AMD 0x1022 +#define PCI_VENDOR_ID_ALI 0x10B9 +#define PCI_VENDOR_ID_NVIDIA 0x10DE + +#define CL_MAX_LINK_WIDTH(p) ((p & 0x3f0) >> 4) // Max Link width is 9:4 +// CL_IS_ROOT_PORT() returns NV_TRUE if root port of PCI-E Root Complex +// Device/Port type is 23:20 +// Root port is 0100b +#define CL_IS_ROOT_PORT(p) (((p & 0xf00000) >> 20) == 0x4) +#define CL_AVG_LINK_WIDTH 8 +#define CL_DAGWOOD_LINK_WIDTH 8 +#define CL_SINGLE_DW_LINK_WIDTH 8 + +// +// Offset arguments to Pcie[Read|WriteRoot]PortConfigReg +// + +// PCI Express capability +#define CL_PCIE_BEGIN 0x0100 +#define CL_PCIE_CAP (CL_PCIE_BEGIN + 0x00) +#define CL_PCIE_DEV_CAP (CL_PCIE_BEGIN + 0x04) +#define CL_PCIE_DEV_CTRL_STATUS (CL_PCIE_BEGIN + 0x08) +#define CL_PCIE_LINK_CAP (CL_PCIE_BEGIN + 0x0C) +#define CL_PCIE_LINK_CTRL_STATUS (CL_PCIE_BEGIN + 0x10) +#define CL_PCIE_SLOT_CAP (CL_PCIE_BEGIN + 0x14) +#define CL_PCIE_SLOT_CTRL_STATUS (CL_PCIE_BEGIN + 0x18) +#define CL_PCIE_ROOT_CTRL_RSVDP (CL_PCIE_BEGIN + 0x1C) +#define CL_PCIE_ROOT_STATUS (CL_PCIE_BEGIN + 0x20) +#define CL_PCIE_DEV_CAP_2 (CL_PCIE_BEGIN + 0x24) +#define CL_PCIE_DEV_CTRL_2 (CL_PCIE_BEGIN + 0x28) +#define CL_PCIE_END (CL_PCIE_BEGIN + 0x2C) + +// PCI Express Capabilities +#define CL_PCIE_CAP_SLOT NVBIT(24) + +// PCI Express Link Control ASPM Control Bits +#define CL_PCIE_LINK_CTRL_STATUS_ASPM_L0S_BIT NVBIT(0) +#define CL_PCIE_LINK_CTRL_STATUS_ASPM_L1_BIT NVBIT(1) +#define CL_PCIE_LINK_CTRL_STATUS_ASPM_MASK \ + (CL_PCIE_LINK_CTRL_STATUS_ASPM_L0S_BIT|CL_PCIE_LINK_CTRL_STATUS_ASPM_L1_BIT) + +// PCI Express Link control ASPM capability Bits +#define CL_PCIE_LINK_CAP_ASPM_L0S_BIT NVBIT(10) +#define CL_PCIE_LINK_CAP_ASPM_L1_BIT NVBIT(11) + +// PCI Express Slot Capabilities +#define CL_PCIE_SLOT_CAP_HOTPLUG_SURPRISE NVBIT(5) +#define CL_PCIE_SLOT_CAP_HOTPLUG_CAPABLE NVBIT(6) + +// +// CL_DEVICE_CONTROL_STATUS bits for hal +// +// From PCI-E manual +#define CL_PCIE_DEVICE_CONTROL_STATUS_CORR_ERROR_DETECTED NVBIT(16) +#define CL_PCIE_DEVICE_CONTROL_STATUS_NON_FATAL_ERROR_DETECTED NVBIT(17) +#define CL_PCIE_DEVICE_CONTROL_STATUS_FATAL_ERROR_DETECTED NVBIT(18) +#define CL_PCIE_DEVICE_CONTROL_STATUS_UNSUPP_REQUEST_DETECTED NVBIT(19) + +// PCI Express Latency Tolerance Reporting Capability Bit +#define CL_PCIE_DEV_CAP_2_LTR_SUPPORTED_BIT NVBIT(11) +#define CL_PCIE_DEV_CTRL_2_LTR_ENABLED_BIT NVBIT(10) + +// Advanced Error Reporting capability +#define CL_AER_BEGIN 0x0200 +#define CL_AER_CAP (CL_AER_BEGIN + 0x00) +#define CL_AER_UNCORRECTABLE_STATUS (CL_AER_BEGIN + 0x04) +#define CL_AER_UNCORRECTABLE_MASK (CL_AER_BEGIN + 0x08) +#define CL_AER_UNCORRECTABLE_SEVERITY (CL_AER_BEGIN + 0x0C) +#define CL_AER_CORRECTABLE_STATUS (CL_AER_BEGIN + 0x10) +#define CL_AER_CORRECTABLE_MASK (CL_AER_BEGIN + 0x14) +#define CL_AER_ADVANCED_CAP_CONTROL (CL_AER_BEGIN + 0x18) +#define CL_AER_HEADER_LOG (CL_AER_BEGIN + 0x1C) +#define CL_AER_ROOT_ERROR_COMMAND (CL_AER_BEGIN + 0x2C) +#define CL_AER_ROOT_ERROR_STATUS (CL_AER_BEGIN + 0x30) +#define CL_AER_ERROR_SOURCE (CL_AER_BEGIN + 0x34) +#define CL_AER_END (CL_AER_BEGIN + 0x34) + +#define CL_IS_L0_SUPPORTED(p) (((p) & CL_PCIE_LINK_CAP_ASPM_L0S_BIT)) +#define CL_IS_L1_SUPPORTED(p) (((p) & CL_PCIE_LINK_CAP_ASPM_L1_BIT)) + +#define CL_IS_LTR_PORT_SUPPORTED(p) (((p) & CL_PCIE_DEV_CAP_2_LTR_SUPPORTED_BIT)) +#define CL_IS_LTR_PORT_ENABLED(p) (((p) & CL_PCIE_DEV_CTRL_2_LTR_ENABLED_BIT)) + +// +// This defines PCI-E Advanced Error Reporting Capability structure per PCI-E manual +// (refer to section 7.10 of PCI Express Base Specification, v1.1) +// +typedef struct PcieAerCapability +{ + NvU32 PexEnhCapHeader; // (+0x00) PCI-E Enhanced Capability Header + NvU32 UncorrErrStatusReg; // (+0x04) Uncorrectable Error Status Register + NvU32 UncorrErrMaskReg; // (+0x08) Uncorrectable Error Mask Register + NvU32 UncorrErrSeverityReg; // (+0x0C) Uncorrectable Error Severity Register + NvU32 CorrErrStatusReg; // (+0x10) Correctable Error Status Register + NvU32 CorrErrMaskReg; // (+0x14) Correctable Error Mask Register + NvU32 AEcapCrtlReg; // (+0x18) Advanced Error Capability and Control Register + struct { // (+0x1C) Header Log Register + NvU32 Header[4]; // (+0x1C-0x2B) + } HeaderLogReg; + NvU32 RootErrCmd; // (+0x2C) Root Error Command + NvU32 RooErrStatus; // (+0x30) Root Error Status + NvU32 ErrSrcReg; // (+0x34) Error Source Register (Correctable Err Src Id + Err Src Id) +} PcieAerCapability, *PPcieAerCapability; + +// Virtual Channel Capability +#define CL_VC_BEGIN 0x0300 +#define CL_VC_RESOURCE_CTRL_0 (CL_VC_BEGIN + 0x14) +#define CL_VC_END (CL_VC_BEGIN + 0x1C) + +typedef struct +{ + NvU16 deviceID; // deviceID + NvU16 vendorID; // vendorID + NvU16 subdeviceID; // subsystem deviceID + NvU16 subvendorID; // subsystem vendorID + NvU8 revisionID; // revision ID +} BUSINFO; + +// L1 PM substates Capability +#define CL_L1_SS_BEGIN 0x0400 +#define CL_L1_SS_CAP_HDR (CL_L1_SS_BEGIN + 0x00) +#define CL_L1_SS_CAP_REG (CL_L1_SS_BEGIN + 0x04) +#define CL_L1_SS_CTRL1_REG (CL_L1_SS_BEGIN + 0x08) +#define CL_L1_SS_CTRL2_REG (CL_L1_SS_BEGIN + 0x0C) +#define CL_L1_SS_END CL_L1_SS_CTRL2_REG + +// +// This defines PCI-E L1 PM Substates Extended Capability structure per PCI-E manual +// (refer to section 7.xx of ECN_L1_PM_Substates_with_CLKREQ_31_May_2013_Rev10a.pdf +// +typedef struct PexL1SubstateCapability +{ + NvU32 PexEnhCapHeader; // (+0x00) PCI-E Enhanced Capability Header + NvU32 Capabilities; // (+0x04) L1 PM Substates capabilities Register + NvU32 Control1Reg; // (+0x08) L1 PM Substates Control1 Register + NvU32 Control2Reg; // (+0x0C) L1 PM Substates Control2 Register +} PexL1SubstateCapability, *PPexL1SubstateCapability; + +typedef struct BUSTOPOLOGYINFO BUSTOPOLOGYINFO; +typedef struct BUSTOPOLOGYINFO *PBUSTOPOLOGYINFO; +struct BUSTOPOLOGYINFO +{ + PBUSTOPOLOGYINFO next; + void *handle; + BUSINFO busInfo; + NvU32 domain; + NvU8 bus, device, func, secBus; + NvU16 pciSubBaseClass; + NvBool bVgaAdapter; +}; + +typedef struct GspSystemInfo GspSystemInfo; + +#ifdef NVOC_CHIPSET_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJCL { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJCL *__nvoc_pbase_OBJCL; + NvBool PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE; + NvBool PDB_PROP_CL_DISABLE_BR03_FLOW_CONTROL; + NvBool PDB_PROP_CL_ASLM_SUPPORTS_NV_LINK_UPGRADE; + NvBool PDB_PROP_CL_ASLM_SUPPORTS_FAST_LINK_UPGRADE; + NvBool PDB_PROP_CL_ASLM_SUPPORTS_HOT_RESET; + NvBool PDB_PROP_CL_ASLM_SUPPORTS_GEN2_LINK_UPGRADE; + NvBool PDB_PROP_CL_FORCE_SNOOP_READS_AND_WRITES_WAR_BUG_410390; + NvBool PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST; + NvBool PDB_PROP_CL_ASPM_L0S_CHIPSET_DISABLED; + NvBool PDB_PROP_CL_ASPM_L1_CHIPSET_DISABLED; + NvBool PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY; + NvBool PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY; + NvBool PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED; + NvBool PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED_GEFORCE; + NvBool PDB_PROP_CL_EXTENDED_TAG_FIELD_NOT_CAPABLE; + NvBool PDB_PROP_CL_NOSNOOP_NOT_CAPABLE; + NvBool PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE; + NvBool PDB_PROP_CL_PCIE_FORCE_GEN2_ENABLE; + NvBool PDB_PROP_CL_PCIE_GEN2_AT_LESS_THAN_X16_DISABLED; + NvBool PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR; + NvBool PDB_PROP_CL_INTEL_CPU_ROOTPORT1_NEEDS_H57_WAR; + NvBool PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ; + NvBool PDB_PROP_CL_ON_PCIE_GEN3_PATSBURG; + NvBool PDB_PROP_CL_IS_EXTERNAL_GPU; + NvBool PDB_PROP_CL_ALLOW_PCIE_GEN3_ON_PATSBURG_WITH_IVBE_CPU; + NvBool PDB_PROP_CL_BUG_999673_P2P_ARBITRARY_SPLIT_WAR; + NvBool PDB_PROP_CL_UPSTREAM_LTR_SUPPORTED; + NvBool PDB_PROP_CL_BUG_1340801_DISABLE_GEN3_ON_GIGABYTE_SNIPER_3; + NvBool PDB_PROP_CL_BUG_1681803_WAR_DISABLE_MSCG; + NvBool PDB_PROP_CL_ON_HASWELL_HOST_BRIDGE; + NvBool PDB_PROP_CL_PCIE_NON_COHERENT_USE_TC0_ONLY; + NvBool PDB_PROP_CL_UNSUPPORTED_CHIPSET; + NvBool PDB_PROP_CL_IS_CHIPSET_IO_COHERENT; + NvBool PDB_PROP_CL_DISABLE_IOMAP_WC; + NvBool PDB_PROP_CL_HAS_RESIZABLE_BAR_ISSUE; + NBADDR NBAddr; + NvBool EnteredRecoverySinceErrorsLastChecked; + struct OBJHWBC *pHWBC; + NvU32 br04HwbcCount; + NBADDR FHBAddr; + BUSINFO FHBBusInfo; + NvU32 Chipset; + NvU32 ChipsetSliBondType; + NvBool ChipsetInitialized; + PPCIECONFIGSPACEBASE pPcieConfigSpaceBase; + NBADDR chipsetIDBusAddr; + BUSINFO chipsetIDInfo; + PBUSTOPOLOGYINFO pBusTopologyInfo; +}; + +#ifndef __NVOC_CLASS_OBJCL_TYPEDEF__ +#define __NVOC_CLASS_OBJCL_TYPEDEF__ +typedef struct OBJCL OBJCL; +#endif /* __NVOC_CLASS_OBJCL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJCL +#define __nvoc_class_id_OBJCL 0x547dbb +#endif /* __nvoc_class_id_OBJCL */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJCL; + +#define __staticCast_OBJCL(pThis) \ + ((pThis)->__nvoc_pbase_OBJCL) + +#ifdef __nvoc_chipset_h_disabled +#define __dynamicCast_OBJCL(pThis) ((OBJCL*)NULL) +#else //__nvoc_chipset_h_disabled +#define __dynamicCast_OBJCL(pThis) \ + ((OBJCL*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJCL))) +#endif //__nvoc_chipset_h_disabled + +#define PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ_BASE_CAST +#define PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ_BASE_NAME PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ +#define PDB_PROP_CL_EXTENDED_TAG_FIELD_NOT_CAPABLE_BASE_CAST +#define PDB_PROP_CL_EXTENDED_TAG_FIELD_NOT_CAPABLE_BASE_NAME PDB_PROP_CL_EXTENDED_TAG_FIELD_NOT_CAPABLE +#define PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED_GEFORCE_BASE_CAST +#define PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED_GEFORCE_BASE_NAME PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED_GEFORCE +#define PDB_PROP_CL_UPSTREAM_LTR_SUPPORTED_BASE_CAST +#define PDB_PROP_CL_UPSTREAM_LTR_SUPPORTED_BASE_NAME PDB_PROP_CL_UPSTREAM_LTR_SUPPORTED +#define PDB_PROP_CL_BUG_1681803_WAR_DISABLE_MSCG_BASE_CAST +#define PDB_PROP_CL_BUG_1681803_WAR_DISABLE_MSCG_BASE_NAME PDB_PROP_CL_BUG_1681803_WAR_DISABLE_MSCG +#define PDB_PROP_CL_ON_PCIE_GEN3_PATSBURG_BASE_CAST +#define PDB_PROP_CL_ON_PCIE_GEN3_PATSBURG_BASE_NAME PDB_PROP_CL_ON_PCIE_GEN3_PATSBURG +#define PDB_PROP_CL_ASLM_SUPPORTS_NV_LINK_UPGRADE_BASE_CAST +#define PDB_PROP_CL_ASLM_SUPPORTS_NV_LINK_UPGRADE_BASE_NAME PDB_PROP_CL_ASLM_SUPPORTS_NV_LINK_UPGRADE +#define PDB_PROP_CL_ASPM_L1_CHIPSET_DISABLED_BASE_CAST +#define PDB_PROP_CL_ASPM_L1_CHIPSET_DISABLED_BASE_NAME PDB_PROP_CL_ASPM_L1_CHIPSET_DISABLED +#define PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE_BASE_CAST +#define PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE_BASE_NAME PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE +#define PDB_PROP_CL_PCIE_GEN2_AT_LESS_THAN_X16_DISABLED_BASE_CAST +#define PDB_PROP_CL_PCIE_GEN2_AT_LESS_THAN_X16_DISABLED_BASE_NAME PDB_PROP_CL_PCIE_GEN2_AT_LESS_THAN_X16_DISABLED +#define PDB_PROP_CL_ALLOW_PCIE_GEN3_ON_PATSBURG_WITH_IVBE_CPU_BASE_CAST +#define PDB_PROP_CL_ALLOW_PCIE_GEN3_ON_PATSBURG_WITH_IVBE_CPU_BASE_NAME PDB_PROP_CL_ALLOW_PCIE_GEN3_ON_PATSBURG_WITH_IVBE_CPU +#define PDB_PROP_CL_UNSUPPORTED_CHIPSET_BASE_CAST +#define PDB_PROP_CL_UNSUPPORTED_CHIPSET_BASE_NAME PDB_PROP_CL_UNSUPPORTED_CHIPSET +#define PDB_PROP_CL_ASLM_SUPPORTS_GEN2_LINK_UPGRADE_BASE_CAST +#define PDB_PROP_CL_ASLM_SUPPORTS_GEN2_LINK_UPGRADE_BASE_NAME PDB_PROP_CL_ASLM_SUPPORTS_GEN2_LINK_UPGRADE +#define PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST_BASE_CAST +#define PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST_BASE_NAME PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST +#define PDB_PROP_CL_BUG_999673_P2P_ARBITRARY_SPLIT_WAR_BASE_CAST +#define PDB_PROP_CL_BUG_999673_P2P_ARBITRARY_SPLIT_WAR_BASE_NAME PDB_PROP_CL_BUG_999673_P2P_ARBITRARY_SPLIT_WAR +#define PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY_BASE_CAST +#define PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY_BASE_NAME PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY +#define PDB_PROP_CL_PCIE_FORCE_GEN2_ENABLE_BASE_CAST +#define PDB_PROP_CL_PCIE_FORCE_GEN2_ENABLE_BASE_NAME PDB_PROP_CL_PCIE_FORCE_GEN2_ENABLE +#define PDB_PROP_CL_DISABLE_BR03_FLOW_CONTROL_BASE_CAST +#define PDB_PROP_CL_DISABLE_BR03_FLOW_CONTROL_BASE_NAME PDB_PROP_CL_DISABLE_BR03_FLOW_CONTROL +#define PDB_PROP_CL_DISABLE_IOMAP_WC_BASE_CAST +#define PDB_PROP_CL_DISABLE_IOMAP_WC_BASE_NAME PDB_PROP_CL_DISABLE_IOMAP_WC +#define PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE_BASE_CAST +#define PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE_BASE_NAME PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE +#define PDB_PROP_CL_NOSNOOP_NOT_CAPABLE_BASE_CAST +#define PDB_PROP_CL_NOSNOOP_NOT_CAPABLE_BASE_NAME PDB_PROP_CL_NOSNOOP_NOT_CAPABLE +#define PDB_PROP_CL_ASPM_L0S_CHIPSET_DISABLED_BASE_CAST +#define PDB_PROP_CL_ASPM_L0S_CHIPSET_DISABLED_BASE_NAME PDB_PROP_CL_ASPM_L0S_CHIPSET_DISABLED +#define PDB_PROP_CL_INTEL_CPU_ROOTPORT1_NEEDS_H57_WAR_BASE_CAST +#define PDB_PROP_CL_INTEL_CPU_ROOTPORT1_NEEDS_H57_WAR_BASE_NAME PDB_PROP_CL_INTEL_CPU_ROOTPORT1_NEEDS_H57_WAR +#define PDB_PROP_CL_BUG_1340801_DISABLE_GEN3_ON_GIGABYTE_SNIPER_3_BASE_CAST +#define PDB_PROP_CL_BUG_1340801_DISABLE_GEN3_ON_GIGABYTE_SNIPER_3_BASE_NAME PDB_PROP_CL_BUG_1340801_DISABLE_GEN3_ON_GIGABYTE_SNIPER_3 +#define PDB_PROP_CL_ASLM_SUPPORTS_HOT_RESET_BASE_CAST +#define PDB_PROP_CL_ASLM_SUPPORTS_HOT_RESET_BASE_NAME PDB_PROP_CL_ASLM_SUPPORTS_HOT_RESET +#define PDB_PROP_CL_IS_EXTERNAL_GPU_BASE_CAST +#define PDB_PROP_CL_IS_EXTERNAL_GPU_BASE_NAME PDB_PROP_CL_IS_EXTERNAL_GPU +#define PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR_BASE_CAST +#define PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR_BASE_NAME PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR +#define PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED_BASE_CAST +#define PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED_BASE_NAME PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED +#define PDB_PROP_CL_FORCE_SNOOP_READS_AND_WRITES_WAR_BUG_410390_BASE_CAST +#define PDB_PROP_CL_FORCE_SNOOP_READS_AND_WRITES_WAR_BUG_410390_BASE_NAME PDB_PROP_CL_FORCE_SNOOP_READS_AND_WRITES_WAR_BUG_410390 +#define PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY_BASE_CAST +#define PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY_BASE_NAME PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY +#define PDB_PROP_CL_ON_HASWELL_HOST_BRIDGE_BASE_CAST +#define PDB_PROP_CL_ON_HASWELL_HOST_BRIDGE_BASE_NAME PDB_PROP_CL_ON_HASWELL_HOST_BRIDGE +#define PDB_PROP_CL_PCIE_NON_COHERENT_USE_TC0_ONLY_BASE_CAST +#define PDB_PROP_CL_PCIE_NON_COHERENT_USE_TC0_ONLY_BASE_NAME PDB_PROP_CL_PCIE_NON_COHERENT_USE_TC0_ONLY +#define PDB_PROP_CL_HAS_RESIZABLE_BAR_ISSUE_BASE_CAST +#define PDB_PROP_CL_HAS_RESIZABLE_BAR_ISSUE_BASE_NAME PDB_PROP_CL_HAS_RESIZABLE_BAR_ISSUE +#define PDB_PROP_CL_IS_CHIPSET_IO_COHERENT_BASE_CAST +#define PDB_PROP_CL_IS_CHIPSET_IO_COHERENT_BASE_NAME PDB_PROP_CL_IS_CHIPSET_IO_COHERENT +#define PDB_PROP_CL_ASLM_SUPPORTS_FAST_LINK_UPGRADE_BASE_CAST +#define PDB_PROP_CL_ASLM_SUPPORTS_FAST_LINK_UPGRADE_BASE_NAME PDB_PROP_CL_ASLM_SUPPORTS_FAST_LINK_UPGRADE + +NV_STATUS __nvoc_objCreateDynamic_OBJCL(OBJCL**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJCL(OBJCL**, Dynamic*, NvU32); +#define __objCreate_OBJCL(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJCL((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS clInit_IMPL(struct OBJGPU *arg0, struct OBJCL *pCl); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clInit(struct OBJGPU *arg0, struct OBJCL *pCl) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clInit(arg0, pCl) clInit_IMPL(arg0, pCl) +#endif //__nvoc_chipset_h_disabled + +#define clInit_HAL(arg0, pCl) clInit(arg0, pCl) + +void clUpdateConfig_IMPL(struct OBJGPU *arg0, struct OBJCL *pCl); + +#ifdef __nvoc_chipset_h_disabled +static inline void clUpdateConfig(struct OBJGPU *arg0, struct OBJCL *pCl) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clUpdateConfig(arg0, pCl) clUpdateConfig_IMPL(arg0, pCl) +#endif //__nvoc_chipset_h_disabled + +#define clUpdateConfig_HAL(arg0, pCl) clUpdateConfig(arg0, pCl) + +NV_STATUS clTeardown_IMPL(struct OBJGPU *arg0, struct OBJCL *pCl); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clTeardown(struct OBJGPU *arg0, struct OBJCL *pCl) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clTeardown(arg0, pCl) clTeardown_IMPL(arg0, pCl) +#endif //__nvoc_chipset_h_disabled + +#define clTeardown_HAL(arg0, pCl) clTeardown(arg0, pCl) + +void clInitPropertiesFromRegistry_IMPL(struct OBJGPU *arg0, struct OBJCL *pCl); + +#ifdef __nvoc_chipset_h_disabled +static inline void clInitPropertiesFromRegistry(struct OBJGPU *arg0, struct OBJCL *pCl) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clInitPropertiesFromRegistry(arg0, pCl) clInitPropertiesFromRegistry_IMPL(arg0, pCl) +#endif //__nvoc_chipset_h_disabled + +#define clInitPropertiesFromRegistry_HAL(arg0, pCl) clInitPropertiesFromRegistry(arg0, pCl) + +NV_STATUS clGetFHBHandle_IMPL(struct OBJCL *arg0, void **arg1, NvU16 *arg2, NvU16 *arg3); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clGetFHBHandle(struct OBJCL *arg0, void **arg1, NvU16 *arg2, NvU16 *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clGetFHBHandle(arg0, arg1, arg2, arg3) clGetFHBHandle_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_chipset_h_disabled + +#define clGetFHBHandle_HAL(arg0, arg1, arg2, arg3) clGetFHBHandle(arg0, arg1, arg2, arg3) + +NvU32 clInitMappingPciBusDevice_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1); + +#ifdef __nvoc_chipset_h_disabled +static inline NvU32 clInitMappingPciBusDevice(struct OBJGPU *arg0, struct OBJCL *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return 0; +} +#else //__nvoc_chipset_h_disabled +#define clInitMappingPciBusDevice(arg0, arg1) clInitMappingPciBusDevice_IMPL(arg0, arg1) +#endif //__nvoc_chipset_h_disabled + +#define clInitMappingPciBusDevice_HAL(arg0, arg1) clInitMappingPciBusDevice(arg0, arg1) + +NV_STATUS clFindFHBAndGetChipsetInfoIndex_IMPL(struct OBJCL *arg0, NvU16 *arg1); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clFindFHBAndGetChipsetInfoIndex(struct OBJCL *arg0, NvU16 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clFindFHBAndGetChipsetInfoIndex(arg0, arg1) clFindFHBAndGetChipsetInfoIndex_IMPL(arg0, arg1) +#endif //__nvoc_chipset_h_disabled + +#define clFindFHBAndGetChipsetInfoIndex_HAL(arg0, arg1) clFindFHBAndGetChipsetInfoIndex(arg0, arg1) + +NV_STATUS clInitPcie_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clInitPcie(struct OBJGPU *arg0, struct OBJCL *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clInitPcie(arg0, arg1) clInitPcie_IMPL(arg0, arg1) +#endif //__nvoc_chipset_h_disabled + +#define clInitPcie_HAL(arg0, arg1) clInitPcie(arg0, arg1) + +void clUpdatePcieConfig_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1); + +#ifdef __nvoc_chipset_h_disabled +static inline void clUpdatePcieConfig(struct OBJGPU *arg0, struct OBJCL *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clUpdatePcieConfig(arg0, arg1) clUpdatePcieConfig_IMPL(arg0, arg1) +#endif //__nvoc_chipset_h_disabled + +#define clUpdatePcieConfig_HAL(arg0, arg1) clUpdatePcieConfig(arg0, arg1) + +NV_STATUS clTeardownPcie_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clTeardownPcie(struct OBJGPU *arg0, struct OBJCL *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clTeardownPcie(arg0, arg1) clTeardownPcie_IMPL(arg0, arg1) +#endif //__nvoc_chipset_h_disabled + +#define clTeardownPcie_HAL(arg0, arg1) clTeardownPcie(arg0, arg1) + +NV_STATUS clPcieReadPortConfigReg_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, PORTDATA *arg2, NvU32 arg3, NvU32 *arg4); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clPcieReadPortConfigReg(struct OBJGPU *arg0, struct OBJCL *arg1, PORTDATA *arg2, NvU32 arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clPcieReadPortConfigReg(arg0, arg1, arg2, arg3, arg4) clPcieReadPortConfigReg_IMPL(arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_chipset_h_disabled + +#define clPcieReadPortConfigReg_HAL(arg0, arg1, arg2, arg3, arg4) clPcieReadPortConfigReg(arg0, arg1, arg2, arg3, arg4) + +NV_STATUS clPcieWriteRootPortConfigReg_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, NvU32 arg2, NvU32 arg3); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clPcieWriteRootPortConfigReg(struct OBJGPU *arg0, struct OBJCL *arg1, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clPcieWriteRootPortConfigReg(arg0, arg1, arg2, arg3) clPcieWriteRootPortConfigReg_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_chipset_h_disabled + +#define clPcieWriteRootPortConfigReg_HAL(arg0, arg1, arg2, arg3) clPcieWriteRootPortConfigReg(arg0, arg1, arg2, arg3) + +NV_STATUS clPcieReadAerCapability_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, struct PcieAerCapability *arg2); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clPcieReadAerCapability(struct OBJGPU *arg0, struct OBJCL *arg1, struct PcieAerCapability *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clPcieReadAerCapability(arg0, arg1, arg2) clPcieReadAerCapability_IMPL(arg0, arg1, arg2) +#endif //__nvoc_chipset_h_disabled + +#define clPcieReadAerCapability_HAL(arg0, arg1, arg2) clPcieReadAerCapability(arg0, arg1, arg2) + +NV_STATUS clPcieReadL1SsCapability_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, struct PexL1SubstateCapability *arg2); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clPcieReadL1SsCapability(struct OBJGPU *arg0, struct OBJCL *arg1, struct PexL1SubstateCapability *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clPcieReadL1SsCapability(arg0, arg1, arg2) clPcieReadL1SsCapability_IMPL(arg0, arg1, arg2) +#endif //__nvoc_chipset_h_disabled + +#define clPcieReadL1SsCapability_HAL(arg0, arg1, arg2) clPcieReadL1SsCapability(arg0, arg1, arg2) + +NV_STATUS clPcieReadDevCtrlStatus_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, NvU32 *arg2, NvU32 *arg3); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clPcieReadDevCtrlStatus(struct OBJGPU *arg0, struct OBJCL *arg1, NvU32 *arg2, NvU32 *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clPcieReadDevCtrlStatus(arg0, arg1, arg2, arg3) clPcieReadDevCtrlStatus_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_chipset_h_disabled + +#define clPcieReadDevCtrlStatus_HAL(arg0, arg1, arg2, arg3) clPcieReadDevCtrlStatus(arg0, arg1, arg2, arg3) + +NV_STATUS clPcieClearDevCtrlStatus_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, NvU32 *arg2); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clPcieClearDevCtrlStatus(struct OBJGPU *arg0, struct OBJCL *arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clPcieClearDevCtrlStatus(arg0, arg1, arg2) clPcieClearDevCtrlStatus_IMPL(arg0, arg1, arg2) +#endif //__nvoc_chipset_h_disabled + +#define clPcieClearDevCtrlStatus_HAL(arg0, arg1, arg2) clPcieClearDevCtrlStatus(arg0, arg1, arg2) + +NvU16 clPcieReadWord_IMPL(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2, NvU8 arg3, NvU8 arg4, NvU32 arg5); + +#ifdef __nvoc_chipset_h_disabled +static inline NvU16 clPcieReadWord(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2, NvU8 arg3, NvU8 arg4, NvU32 arg5) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return 0; +} +#else //__nvoc_chipset_h_disabled +#define clPcieReadWord(arg0, arg1, arg2, arg3, arg4, arg5) clPcieReadWord_IMPL(arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_chipset_h_disabled + +#define clPcieReadWord_HAL(arg0, arg1, arg2, arg3, arg4, arg5) clPcieReadWord(arg0, arg1, arg2, arg3, arg4, arg5) + +NvU32 clPcieReadDword_IMPL(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2, NvU8 arg3, NvU8 arg4, NvU32 arg5); + +#ifdef __nvoc_chipset_h_disabled +static inline NvU32 clPcieReadDword(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2, NvU8 arg3, NvU8 arg4, NvU32 arg5) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return 0; +} +#else //__nvoc_chipset_h_disabled +#define clPcieReadDword(arg0, arg1, arg2, arg3, arg4, arg5) clPcieReadDword_IMPL(arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_chipset_h_disabled + +#define clPcieReadDword_HAL(arg0, arg1, arg2, arg3, arg4, arg5) clPcieReadDword(arg0, arg1, arg2, arg3, arg4, arg5) + +void clPcieWriteWord_IMPL(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2, NvU8 arg3, NvU8 arg4, NvU32 arg5, NvU16 arg6); + +#ifdef __nvoc_chipset_h_disabled +static inline void clPcieWriteWord(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2, NvU8 arg3, NvU8 arg4, NvU32 arg5, NvU16 arg6) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clPcieWriteWord(arg0, arg1, arg2, arg3, arg4, arg5, arg6) clPcieWriteWord_IMPL(arg0, arg1, arg2, arg3, arg4, arg5, arg6) +#endif //__nvoc_chipset_h_disabled + +#define clPcieWriteWord_HAL(arg0, arg1, arg2, arg3, arg4, arg5, arg6) clPcieWriteWord(arg0, arg1, arg2, arg3, arg4, arg5, arg6) + +void clPcieWriteDword_IMPL(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2, NvU8 arg3, NvU8 arg4, NvU32 arg5, NvU32 arg6); + +#ifdef __nvoc_chipset_h_disabled +static inline void clPcieWriteDword(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2, NvU8 arg3, NvU8 arg4, NvU32 arg5, NvU32 arg6) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clPcieWriteDword(arg0, arg1, arg2, arg3, arg4, arg5, arg6) clPcieWriteDword_IMPL(arg0, arg1, arg2, arg3, arg4, arg5, arg6) +#endif //__nvoc_chipset_h_disabled + +#define clPcieWriteDword_HAL(arg0, arg1, arg2, arg3, arg4, arg5, arg6) clPcieWriteDword(arg0, arg1, arg2, arg3, arg4, arg5, arg6) + +NvBool clFindBR04_IMPL(POBJGPU *pGpus, NvU32 NumGpus, NvBool flat, NvU32 devId, struct OBJCL *pCl); + +#ifdef __nvoc_chipset_h_disabled +static inline NvBool clFindBR04(POBJGPU *pGpus, NvU32 NumGpus, NvBool flat, NvU32 devId, struct OBJCL *pCl) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_FALSE; +} +#else //__nvoc_chipset_h_disabled +#define clFindBR04(pGpus, NumGpus, flat, devId, pCl) clFindBR04_IMPL(pGpus, NumGpus, flat, devId, pCl) +#endif //__nvoc_chipset_h_disabled + +#define clFindBR04_HAL(pGpus, NumGpus, flat, devId, pCl) clFindBR04(pGpus, NumGpus, flat, devId, pCl) + +NV_STATUS clResumeBridge_IMPL(struct OBJCL *pCl); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clResumeBridge(struct OBJCL *pCl) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clResumeBridge(pCl) clResumeBridge_IMPL(pCl) +#endif //__nvoc_chipset_h_disabled + +#define clResumeBridge_HAL(pCl) clResumeBridge(pCl) + +NV_STATUS clChangeUpstreamBusSpeed_IMPL(NvU8 primaryBus, struct OBJCL *pCl, NvU32 cmd); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clChangeUpstreamBusSpeed(NvU8 primaryBus, struct OBJCL *pCl, NvU32 cmd) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clChangeUpstreamBusSpeed(primaryBus, pCl, cmd) clChangeUpstreamBusSpeed_IMPL(primaryBus, pCl, cmd) +#endif //__nvoc_chipset_h_disabled + +#define clChangeUpstreamBusSpeed_HAL(primaryBus, pCl, cmd) clChangeUpstreamBusSpeed(primaryBus, pCl, cmd) + +NV_STATUS clGetUpstreamBusSpeed_IMPL(NvU8 primaryBus, struct OBJCL *pCl, NvU32 *speed); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clGetUpstreamBusSpeed(NvU8 primaryBus, struct OBJCL *pCl, NvU32 *speed) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clGetUpstreamBusSpeed(primaryBus, pCl, speed) clGetUpstreamBusSpeed_IMPL(primaryBus, pCl, speed) +#endif //__nvoc_chipset_h_disabled + +#define clGetUpstreamBusSpeed_HAL(primaryBus, pCl, speed) clGetUpstreamBusSpeed(primaryBus, pCl, speed) + +NV_STATUS clHWBCGetUpstreamBAR0_IMPL(NvU8 primaryBus, struct OBJCL *pCl, RmPhysAddr *pBAR0); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clHWBCGetUpstreamBAR0(NvU8 primaryBus, struct OBJCL *pCl, RmPhysAddr *pBAR0) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clHWBCGetUpstreamBAR0(primaryBus, pCl, pBAR0) clHWBCGetUpstreamBAR0_IMPL(primaryBus, pCl, pBAR0) +#endif //__nvoc_chipset_h_disabled + +#define clHWBCGetUpstreamBAR0_HAL(primaryBus, pCl, pBAR0) clHWBCGetUpstreamBAR0(primaryBus, pCl, pBAR0) + +void *clFindP2PBrdg_IMPL(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2, NvU8 *arg3, NvU8 *arg4, NvU8 *arg5, NvU16 *arg6, NvU16 *arg7); + +#ifdef __nvoc_chipset_h_disabled +static inline void *clFindP2PBrdg(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2, NvU8 *arg3, NvU8 *arg4, NvU8 *arg5, NvU16 *arg6, NvU16 *arg7) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NULL; +} +#else //__nvoc_chipset_h_disabled +#define clFindP2PBrdg(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) clFindP2PBrdg_IMPL(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +#endif //__nvoc_chipset_h_disabled + +#define clFindP2PBrdg_HAL(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) clFindP2PBrdg(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + +void *clFindBrdgUpstreamPort_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, NvBool arg2, NvU8 *arg3, NvU8 *arg4, NvU8 *arg5, NvU16 *arg6, NvU16 *arg7, NvU8 *arg8); + +#ifdef __nvoc_chipset_h_disabled +static inline void *clFindBrdgUpstreamPort(struct OBJGPU *arg0, struct OBJCL *arg1, NvBool arg2, NvU8 *arg3, NvU8 *arg4, NvU8 *arg5, NvU16 *arg6, NvU16 *arg7, NvU8 *arg8) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NULL; +} +#else //__nvoc_chipset_h_disabled +#define clFindBrdgUpstreamPort(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) clFindBrdgUpstreamPort_IMPL(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) +#endif //__nvoc_chipset_h_disabled + +#define clFindBrdgUpstreamPort_HAL(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) clFindBrdgUpstreamPort(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + +NV_STATUS clSetPortPcieCapOffset_IMPL(struct OBJCL *arg0, void *arg1, NvU32 *arg2); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clSetPortPcieCapOffset(struct OBJCL *arg0, void *arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clSetPortPcieCapOffset(arg0, arg1, arg2) clSetPortPcieCapOffset_IMPL(arg0, arg1, arg2) +#endif //__nvoc_chipset_h_disabled + +#define clSetPortPcieCapOffset_HAL(arg0, arg1, arg2) clSetPortPcieCapOffset(arg0, arg1, arg2) + +NV_STATUS clGetRsdtXsdtTablesAddr_IMPL(struct OBJCL *arg0, NvU32 *arg1, NvU64 *arg2); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clGetRsdtXsdtTablesAddr(struct OBJCL *arg0, NvU32 *arg1, NvU64 *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clGetRsdtXsdtTablesAddr(arg0, arg1, arg2) clGetRsdtXsdtTablesAddr_IMPL(arg0, arg1, arg2) +#endif //__nvoc_chipset_h_disabled + +#define clGetRsdtXsdtTablesAddr_HAL(arg0, arg1, arg2) clGetRsdtXsdtTablesAddr(arg0, arg1, arg2) + +NvBool clGetMcfgTableFromOS_IMPL(struct OBJCL *arg0, struct OBJOS *arg1, void **arg2, NvU32 *arg3); + +#ifdef __nvoc_chipset_h_disabled +static inline NvBool clGetMcfgTableFromOS(struct OBJCL *arg0, struct OBJOS *arg1, void **arg2, NvU32 *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_FALSE; +} +#else //__nvoc_chipset_h_disabled +#define clGetMcfgTableFromOS(arg0, arg1, arg2, arg3) clGetMcfgTableFromOS_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_chipset_h_disabled + +#define clGetMcfgTableFromOS_HAL(arg0, arg1, arg2, arg3) clGetMcfgTableFromOS(arg0, arg1, arg2, arg3) + +NvU64 clScanForTable_IMPL(struct OBJCL *arg0, struct OBJOS *arg1, NvU64 arg2, NvU64 arg3, NvU32 arg4); + +#ifdef __nvoc_chipset_h_disabled +static inline NvU64 clScanForTable(struct OBJCL *arg0, struct OBJOS *arg1, NvU64 arg2, NvU64 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return 0; +} +#else //__nvoc_chipset_h_disabled +#define clScanForTable(arg0, arg1, arg2, arg3, arg4) clScanForTable_IMPL(arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_chipset_h_disabled + +#define clScanForTable_HAL(arg0, arg1, arg2, arg3, arg4) clScanForTable(arg0, arg1, arg2, arg3, arg4) + +NV_STATUS clStorePcieConfigSpaceBaseFromMcfg_IMPL(struct OBJCL *pCl); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clStorePcieConfigSpaceBaseFromMcfg(struct OBJCL *pCl) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clStorePcieConfigSpaceBaseFromMcfg(pCl) clStorePcieConfigSpaceBaseFromMcfg_IMPL(pCl) +#endif //__nvoc_chipset_h_disabled + +#define clStorePcieConfigSpaceBaseFromMcfg_HAL(pCl) clStorePcieConfigSpaceBaseFromMcfg(pCl) + +NV_STATUS clInsertPcieConfigSpaceBase_IMPL(struct OBJCL *arg0, RmPhysAddr arg1, NvU32 arg2, NvU8 arg3, NvU8 arg4); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clInsertPcieConfigSpaceBase(struct OBJCL *arg0, RmPhysAddr arg1, NvU32 arg2, NvU8 arg3, NvU8 arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clInsertPcieConfigSpaceBase(arg0, arg1, arg2, arg3, arg4) clInsertPcieConfigSpaceBase_IMPL(arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_chipset_h_disabled + +#define clInsertPcieConfigSpaceBase_HAL(arg0, arg1, arg2, arg3, arg4) clInsertPcieConfigSpaceBase(arg0, arg1, arg2, arg3, arg4) + +RmPhysAddr clFindPcieConfigSpaceBase_IMPL(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2); + +#ifdef __nvoc_chipset_h_disabled +static inline RmPhysAddr clFindPcieConfigSpaceBase(struct OBJCL *arg0, NvU32 arg1, NvU8 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + RmPhysAddr ret; + portMemSet(&ret, 0, sizeof(RmPhysAddr)); + return ret; +} +#else //__nvoc_chipset_h_disabled +#define clFindPcieConfigSpaceBase(arg0, arg1, arg2) clFindPcieConfigSpaceBase_IMPL(arg0, arg1, arg2) +#endif //__nvoc_chipset_h_disabled + +#define clFindPcieConfigSpaceBase_HAL(arg0, arg1, arg2) clFindPcieConfigSpaceBase(arg0, arg1, arg2) + +void clFreePcieConfigSpaceBase_IMPL(struct OBJCL *pCl); + +#ifdef __nvoc_chipset_h_disabled +static inline void clFreePcieConfigSpaceBase(struct OBJCL *pCl) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clFreePcieConfigSpaceBase(pCl) clFreePcieConfigSpaceBase_IMPL(pCl) +#endif //__nvoc_chipset_h_disabled + +#define clFreePcieConfigSpaceBase_HAL(pCl) clFreePcieConfigSpaceBase(pCl) + +NV_STATUS clInitDeviceInfo_IMPL(struct OBJCL *arg0, struct OBJGPU *arg1); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clInitDeviceInfo(struct OBJCL *arg0, struct OBJGPU *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clInitDeviceInfo(arg0, arg1) clInitDeviceInfo_IMPL(arg0, arg1) +#endif //__nvoc_chipset_h_disabled + +#define clInitDeviceInfo_HAL(arg0, arg1) clInitDeviceInfo(arg0, arg1) + +void clCountBR_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, NvU8 *arg2, NvU8 *arg3, NvU8 *arg4); + +#ifdef __nvoc_chipset_h_disabled +static inline void clCountBR(struct OBJGPU *arg0, struct OBJCL *arg1, NvU8 *arg2, NvU8 *arg3, NvU8 *arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clCountBR(arg0, arg1, arg2, arg3, arg4) clCountBR_IMPL(arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_chipset_h_disabled + +#define clCountBR_HAL(arg0, arg1, arg2, arg3, arg4) clCountBR(arg0, arg1, arg2, arg3, arg4) + +void clFindCommonBR_IMPL(struct OBJGPU *pGpu1, struct OBJGPU *pGpu2, struct OBJCL *pCl, NvU8 *pBR03Bus, NvU8 *pBR04Bus, NvU8 *pPLXBus, NvBool bScanAll); + +#ifdef __nvoc_chipset_h_disabled +static inline void clFindCommonBR(struct OBJGPU *pGpu1, struct OBJGPU *pGpu2, struct OBJCL *pCl, NvU8 *pBR03Bus, NvU8 *pBR04Bus, NvU8 *pPLXBus, NvBool bScanAll) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clFindCommonBR(pGpu1, pGpu2, pCl, pBR03Bus, pBR04Bus, pPLXBus, bScanAll) clFindCommonBR_IMPL(pGpu1, pGpu2, pCl, pBR03Bus, pBR04Bus, pPLXBus, bScanAll) +#endif //__nvoc_chipset_h_disabled + +#define clFindCommonBR_HAL(pGpu1, pGpu2, pCl, pBR03Bus, pBR04Bus, pPLXBus, bScanAll) clFindCommonBR(pGpu1, pGpu2, pCl, pBR03Bus, pBR04Bus, pPLXBus, bScanAll) + +void clFindCommonDownstreamBR_IMPL(struct OBJGPU *pGpu1, struct OBJGPU *pGpu2, struct OBJCL *pCl, NvU8 *pPciSwitchBus); + +#ifdef __nvoc_chipset_h_disabled +static inline void clFindCommonDownstreamBR(struct OBJGPU *pGpu1, struct OBJGPU *pGpu2, struct OBJCL *pCl, NvU8 *pPciSwitchBus) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clFindCommonDownstreamBR(pGpu1, pGpu2, pCl, pPciSwitchBus) clFindCommonDownstreamBR_IMPL(pGpu1, pGpu2, pCl, pPciSwitchBus) +#endif //__nvoc_chipset_h_disabled + +#define clFindCommonDownstreamBR_HAL(pGpu1, pGpu2, pCl, pPciSwitchBus) clFindCommonDownstreamBR(pGpu1, pGpu2, pCl, pPciSwitchBus) + +void clFindBR_IMPL(struct OBJGPU *pGpu, struct OBJCL *pCl, NvU8 *pBR03Bus, NvU8 *pBR04Bus, NvBool *pBRNotBR04A03, NvBool *pNoUnsupportedBRFound, NvBool *pNoOnboardBR04, NvU8 *pPLXBus); + +#ifdef __nvoc_chipset_h_disabled +static inline void clFindBR(struct OBJGPU *pGpu, struct OBJCL *pCl, NvU8 *pBR03Bus, NvU8 *pBR04Bus, NvBool *pBRNotBR04A03, NvBool *pNoUnsupportedBRFound, NvBool *pNoOnboardBR04, NvU8 *pPLXBus) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clFindBR(pGpu, pCl, pBR03Bus, pBR04Bus, pBRNotBR04A03, pNoUnsupportedBRFound, pNoOnboardBR04, pPLXBus) clFindBR_IMPL(pGpu, pCl, pBR03Bus, pBR04Bus, pBRNotBR04A03, pNoUnsupportedBRFound, pNoOnboardBR04, pPLXBus) +#endif //__nvoc_chipset_h_disabled + +#define clFindBR_HAL(pGpu, pCl, pBR03Bus, pBR04Bus, pBRNotBR04A03, pNoUnsupportedBRFound, pNoOnboardBR04, pPLXBus) clFindBR(pGpu, pCl, pBR03Bus, pBR04Bus, pBRNotBR04A03, pNoUnsupportedBRFound, pNoOnboardBR04, pPLXBus) + +void clSearchBR04_IMPL(struct OBJCL *pCl, NvU8 *pBR04BusArray, NvU8 *pBR04RevArray, NvU8 *pBR04Count); + +#ifdef __nvoc_chipset_h_disabled +static inline void clSearchBR04(struct OBJCL *pCl, NvU8 *pBR04BusArray, NvU8 *pBR04RevArray, NvU8 *pBR04Count) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clSearchBR04(pCl, pBR04BusArray, pBR04RevArray, pBR04Count) clSearchBR04_IMPL(pCl, pBR04BusArray, pBR04RevArray, pBR04Count) +#endif //__nvoc_chipset_h_disabled + +#define clSearchBR04_HAL(pCl, pBR04BusArray, pBR04RevArray, pBR04Count) clSearchBR04(pCl, pBR04BusArray, pBR04RevArray, pBR04Count) + +NV_STATUS clPcieGetMaxCapableLinkWidth_IMPL(struct OBJCL *pCl, struct OBJGPU *pGpu, NvU32 *maxCapableLinkWidth); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clPcieGetMaxCapableLinkWidth(struct OBJCL *pCl, struct OBJGPU *pGpu, NvU32 *maxCapableLinkWidth) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clPcieGetMaxCapableLinkWidth(pCl, pGpu, maxCapableLinkWidth) clPcieGetMaxCapableLinkWidth_IMPL(pCl, pGpu, maxCapableLinkWidth) +#endif //__nvoc_chipset_h_disabled + +#define clPcieGetMaxCapableLinkWidth_HAL(pCl, pGpu, maxCapableLinkWidth) clPcieGetMaxCapableLinkWidth(pCl, pGpu, maxCapableLinkWidth) + +NV_STATUS clPcieIsRelaxedOrderingSafe_IMPL(struct OBJCL *pCl, struct OBJGPU *pGpu, NvBool *result); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clPcieIsRelaxedOrderingSafe(struct OBJCL *pCl, struct OBJGPU *pGpu, NvBool *result) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clPcieIsRelaxedOrderingSafe(pCl, pGpu, result) clPcieIsRelaxedOrderingSafe_IMPL(pCl, pGpu, result) +#endif //__nvoc_chipset_h_disabled + +#define clPcieIsRelaxedOrderingSafe_HAL(pCl, pGpu, result) clPcieIsRelaxedOrderingSafe(pCl, pGpu, result) + +NV_STATUS clStoreBusTopologyCache_IMPL(struct OBJCL *pCl, NvU32 secDomain, NvU16 secBus); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clStoreBusTopologyCache(struct OBJCL *pCl, NvU32 secDomain, NvU16 secBus) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clStoreBusTopologyCache(pCl, secDomain, secBus) clStoreBusTopologyCache_IMPL(pCl, secDomain, secBus) +#endif //__nvoc_chipset_h_disabled + +#define clStoreBusTopologyCache_HAL(pCl, secDomain, secBus) clStoreBusTopologyCache(pCl, secDomain, secBus) + +void clFreeBusTopologyCache_IMPL(struct OBJCL *pCl); + +#ifdef __nvoc_chipset_h_disabled +static inline void clFreeBusTopologyCache(struct OBJCL *pCl) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clFreeBusTopologyCache(pCl) clFreeBusTopologyCache_IMPL(pCl) +#endif //__nvoc_chipset_h_disabled + +#define clFreeBusTopologyCache_HAL(pCl) clFreeBusTopologyCache(pCl) + +NvBool clAreGpusBehindSameBridge_IMPL(struct OBJCL *pCl, struct OBJGPU *pGpu1, struct OBJGPU *pGpu2); + +#ifdef __nvoc_chipset_h_disabled +static inline NvBool clAreGpusBehindSameBridge(struct OBJCL *pCl, struct OBJGPU *pGpu1, struct OBJGPU *pGpu2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_FALSE; +} +#else //__nvoc_chipset_h_disabled +#define clAreGpusBehindSameBridge(pCl, pGpu1, pGpu2) clAreGpusBehindSameBridge_IMPL(pCl, pGpu1, pGpu2) +#endif //__nvoc_chipset_h_disabled + +#define clAreGpusBehindSameBridge_HAL(pCl, pGpu1, pGpu2) clAreGpusBehindSameBridge(pCl, pGpu1, pGpu2) + +NvBool clIsL1MaskEnabledForUpstreamPort_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1); + +#ifdef __nvoc_chipset_h_disabled +static inline NvBool clIsL1MaskEnabledForUpstreamPort(struct OBJGPU *arg0, struct OBJCL *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_FALSE; +} +#else //__nvoc_chipset_h_disabled +#define clIsL1MaskEnabledForUpstreamPort(arg0, arg1) clIsL1MaskEnabledForUpstreamPort_IMPL(arg0, arg1) +#endif //__nvoc_chipset_h_disabled + +#define clIsL1MaskEnabledForUpstreamPort_HAL(arg0, arg1) clIsL1MaskEnabledForUpstreamPort(arg0, arg1) + +NV_STATUS clControlL0sL1LinkControlUpstreamPort_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, NvBool arg2); + +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clControlL0sL1LinkControlUpstreamPort(struct OBJGPU *arg0, struct OBJCL *arg1, NvBool arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clControlL0sL1LinkControlUpstreamPort(arg0, arg1, arg2) clControlL0sL1LinkControlUpstreamPort_IMPL(arg0, arg1, arg2) +#endif //__nvoc_chipset_h_disabled + +#define clControlL0sL1LinkControlUpstreamPort_HAL(arg0, arg1, arg2) clControlL0sL1LinkControlUpstreamPort(arg0, arg1, arg2) + +NvBool clRootportNeedsNosnoopWAR_FWCLIENT(struct OBJGPU *arg0, struct OBJCL *arg1); + +#ifdef __nvoc_chipset_h_disabled +static inline NvBool clRootportNeedsNosnoopWAR(struct OBJGPU *arg0, struct OBJCL *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_FALSE; +} +#else //__nvoc_chipset_h_disabled +#define clRootportNeedsNosnoopWAR(arg0, arg1) clRootportNeedsNosnoopWAR_FWCLIENT(arg0, arg1) +#endif //__nvoc_chipset_h_disabled + +#define clRootportNeedsNosnoopWAR_HAL(arg0, arg1) clRootportNeedsNosnoopWAR(arg0, arg1) + +NV_STATUS clConstruct_IMPL(struct OBJCL *arg_pCl); +#define __nvoc_clConstruct(arg_pCl) clConstruct_IMPL(arg_pCl) +void clDestruct_IMPL(struct OBJCL *pCl); +#define __nvoc_clDestruct(pCl) clDestruct_IMPL(pCl) +NvBool clUpstreamVgaDecodeEnabled_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1); +#ifdef __nvoc_chipset_h_disabled +static inline NvBool clUpstreamVgaDecodeEnabled(struct OBJGPU *arg0, struct OBJCL *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_FALSE; +} +#else //__nvoc_chipset_h_disabled +#define clUpstreamVgaDecodeEnabled(arg0, arg1) clUpstreamVgaDecodeEnabled_IMPL(arg0, arg1) +#endif //__nvoc_chipset_h_disabled + +NV_STATUS clPcieGetRootGenSpeed_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, NvU8 *arg2); +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clPcieGetRootGenSpeed(struct OBJGPU *arg0, struct OBJCL *arg1, NvU8 *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clPcieGetRootGenSpeed(arg0, arg1, arg2) clPcieGetRootGenSpeed_IMPL(arg0, arg1, arg2) +#endif //__nvoc_chipset_h_disabled + +NV_STATUS clPcieGetDownstreamPortLinkCap2_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, NvU32 *arg2); +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clPcieGetDownstreamPortLinkCap2(struct OBJGPU *arg0, struct OBJCL *arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clPcieGetDownstreamPortLinkCap2(arg0, arg1, arg2) clPcieGetDownstreamPortLinkCap2_IMPL(arg0, arg1, arg2) +#endif //__nvoc_chipset_h_disabled + +NV_STATUS clCheckUpstreamLtrSupport_IMPL(struct OBJGPU *arg0, struct OBJCL *arg1, NvBool *arg2); +#ifdef __nvoc_chipset_h_disabled +static inline NV_STATUS clCheckUpstreamLtrSupport(struct OBJGPU *arg0, struct OBJCL *arg1, NvBool *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_chipset_h_disabled +#define clCheckUpstreamLtrSupport(arg0, arg1, arg2) clCheckUpstreamLtrSupport_IMPL(arg0, arg1, arg2) +#endif //__nvoc_chipset_h_disabled + +void clSyncWithGsp_IMPL(struct OBJCL *arg0, GspSystemInfo *arg1); +#ifdef __nvoc_chipset_h_disabled +static inline void clSyncWithGsp(struct OBJCL *arg0, GspSystemInfo *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJCL was disabled!"); +} +#else //__nvoc_chipset_h_disabled +#define clSyncWithGsp(arg0, arg1) clSyncWithGsp_IMPL(arg0, arg1) +#endif //__nvoc_chipset_h_disabled + +#undef PRIVATE_FIELD + + +#endif // CHIPSET_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CHIPSET_NVOC_H_ diff --git a/src/nvidia/generated/g_client_nvoc.c b/src/nvidia/generated/g_client_nvoc.c new file mode 100644 index 000000000..99ed93996 --- /dev/null +++ b/src/nvidia/generated/g_client_nvoc.c @@ -0,0 +1,385 @@ +#define NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_client_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x21d236 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +void __nvoc_init_UserInfo(UserInfo*); +void __nvoc_init_funcTable_UserInfo(UserInfo*); +NV_STATUS __nvoc_ctor_UserInfo(UserInfo*); +void __nvoc_init_dataField_UserInfo(UserInfo*); +void __nvoc_dtor_UserInfo(UserInfo*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_UserInfo; + +static const struct NVOC_RTTI __nvoc_rtti_UserInfo_UserInfo = { + /*pClassDef=*/ &__nvoc_class_def_UserInfo, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_UserInfo, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_UserInfo_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UserInfo, __nvoc_base_RsShared.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UserInfo_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UserInfo, __nvoc_base_RsShared), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_UserInfo = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_UserInfo_UserInfo, + &__nvoc_rtti_UserInfo_RsShared, + &__nvoc_rtti_UserInfo_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo = +{ + /*classInfo=*/ { + /*size=*/ sizeof(UserInfo), + /*classId=*/ classId(UserInfo), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "UserInfo", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_UserInfo, + /*pCastInfo=*/ &__nvoc_castinfo_UserInfo, + /*pExportInfo=*/ &__nvoc_export_info_UserInfo +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_UserInfo = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_UserInfo(UserInfo *pThis) { + __nvoc_userinfoDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_UserInfo(UserInfo *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_UserInfo(UserInfo *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_UserInfo_fail_RsShared; + __nvoc_init_dataField_UserInfo(pThis); + + status = __nvoc_userinfoConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_UserInfo_fail__init; + goto __nvoc_ctor_UserInfo_exit; // Success + +__nvoc_ctor_UserInfo_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_UserInfo_fail_RsShared: +__nvoc_ctor_UserInfo_exit: + + return status; +} + +static void __nvoc_init_funcTable_UserInfo_1(UserInfo *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_UserInfo(UserInfo *pThis) { + __nvoc_init_funcTable_UserInfo_1(pThis); +} + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_UserInfo(UserInfo *pThis) { + pThis->__nvoc_pbase_UserInfo = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; + __nvoc_init_RsShared(&pThis->__nvoc_base_RsShared); + __nvoc_init_funcTable_UserInfo(pThis); +} + +NV_STATUS __nvoc_objCreate_UserInfo(UserInfo **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + UserInfo *pThis; + + pThis = portMemAllocNonPaged(sizeof(UserInfo)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(UserInfo)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_UserInfo); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_UserInfo(pThis); + status = __nvoc_ctor_UserInfo(pThis); + if (status != NV_OK) goto __nvoc_objCreate_UserInfo_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_UserInfo_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_UserInfo(UserInfo **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_UserInfo(ppThis, pParent, createFlags); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb23d83 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient; + +void __nvoc_init_RmClient(RmClient*); +void __nvoc_init_funcTable_RmClient(RmClient*); +NV_STATUS __nvoc_ctor_RmClient(RmClient*, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RmClient(RmClient*); +void __nvoc_dtor_RmClient(RmClient*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClient; + +static const struct NVOC_RTTI __nvoc_rtti_RmClient_RmClient = { + /*pClassDef=*/ &__nvoc_class_def_RmClient, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmClient, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClient_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClient, __nvoc_base_RsClient.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClient_RsClient = { + /*pClassDef=*/ &__nvoc_class_def_RsClient, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClient, __nvoc_base_RsClient), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmClient = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_RmClient_RmClient, + &__nvoc_rtti_RmClient_RsClient, + &__nvoc_rtti_RmClient_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmClient), + /*classId=*/ classId(RmClient), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmClient", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmClient, + /*pCastInfo=*/ &__nvoc_castinfo_RmClient, + /*pExportInfo=*/ &__nvoc_export_info_RmClient +}; + +static NV_STATUS __nvoc_thunk_RmClient_clientValidate(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo) { + return rmclientValidate((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pSecInfo); +} + +static NV_STATUS __nvoc_thunk_RmClient_clientFreeResource(struct RsClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + return rmclientFreeResource((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pServer, pParams); +} + +static NV_STATUS __nvoc_thunk_RmClient_clientInterMap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) { + return rmclientInterMap((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pMapperRef, pMappableRef, pParams); +} + +static void __nvoc_thunk_RmClient_clientInterUnmap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) { + rmclientInterUnmap((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pMapperRef, pParams); +} + +static NV_STATUS __nvoc_thunk_RmClient_clientPostProcessPendingFreeList(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef) { + return rmclientPostProcessPendingFreeList((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), ppFirstLowPriRef); +} + +static NV_STATUS __nvoc_thunk_RsClient_rmclientDestructResourceRef(struct RmClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef) { + return clientDestructResourceRef((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pServer, pResourceRef); +} + +static NV_STATUS __nvoc_thunk_RsClient_rmclientValidateNewResourceHandle(struct RmClient *pClient, NvHandle hResource, NvBool bRestrict) { + return clientValidateNewResourceHandle((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), hResource, bRestrict); +} + +static NV_STATUS __nvoc_thunk_RsClient_rmclientShareResource(struct RmClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + return clientShareResource((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pResourceRef, pSharePolicy, pCallContext); +} + +static NV_STATUS __nvoc_thunk_RsClient_rmclientUnmapMemory(struct RmClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) { + return clientUnmapMemory((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pResourceRef, pLockInfo, ppCpuMapping, pSecInfo); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClient = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsClient(RsClient*); +void __nvoc_dtor_RmClient(RmClient *pThis) { + __nvoc_rmclientDestruct(pThis); + __nvoc_dtor_RsClient(&pThis->__nvoc_base_RsClient); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmClient(RmClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsClient(RsClient* , struct PORT_MEM_ALLOCATOR *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RmClient(RmClient *pThis, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsClient(&pThis->__nvoc_base_RsClient, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClient_fail_RsClient; + __nvoc_init_dataField_RmClient(pThis); + + status = __nvoc_rmclientConstruct(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClient_fail__init; + goto __nvoc_ctor_RmClient_exit; // Success + +__nvoc_ctor_RmClient_fail__init: + __nvoc_dtor_RsClient(&pThis->__nvoc_base_RsClient); +__nvoc_ctor_RmClient_fail_RsClient: +__nvoc_ctor_RmClient_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmClient_1(RmClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__rmclientValidate__ = &rmclientValidate_IMPL; + + pThis->__rmclientFreeResource__ = &rmclientFreeResource_IMPL; + + pThis->__rmclientInterMap__ = &rmclientInterMap_IMPL; + + pThis->__rmclientInterUnmap__ = &rmclientInterUnmap_IMPL; + + pThis->__rmclientPostProcessPendingFreeList__ = &rmclientPostProcessPendingFreeList_IMPL; + + pThis->__nvoc_base_RsClient.__clientValidate__ = &__nvoc_thunk_RmClient_clientValidate; + + pThis->__nvoc_base_RsClient.__clientFreeResource__ = &__nvoc_thunk_RmClient_clientFreeResource; + + pThis->__nvoc_base_RsClient.__clientInterMap__ = &__nvoc_thunk_RmClient_clientInterMap; + + pThis->__nvoc_base_RsClient.__clientInterUnmap__ = &__nvoc_thunk_RmClient_clientInterUnmap; + + pThis->__nvoc_base_RsClient.__clientPostProcessPendingFreeList__ = &__nvoc_thunk_RmClient_clientPostProcessPendingFreeList; + + pThis->__rmclientDestructResourceRef__ = &__nvoc_thunk_RsClient_rmclientDestructResourceRef; + + pThis->__rmclientValidateNewResourceHandle__ = &__nvoc_thunk_RsClient_rmclientValidateNewResourceHandle; + + pThis->__rmclientShareResource__ = &__nvoc_thunk_RsClient_rmclientShareResource; + + pThis->__rmclientUnmapMemory__ = &__nvoc_thunk_RsClient_rmclientUnmapMemory; +} + +void __nvoc_init_funcTable_RmClient(RmClient *pThis) { + __nvoc_init_funcTable_RmClient_1(pThis); +} + +void __nvoc_init_RsClient(RsClient*); +void __nvoc_init_RmClient(RmClient *pThis) { + pThis->__nvoc_pbase_RmClient = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsClient.__nvoc_base_Object; + pThis->__nvoc_pbase_RsClient = &pThis->__nvoc_base_RsClient; + __nvoc_init_RsClient(&pThis->__nvoc_base_RsClient); + __nvoc_init_funcTable_RmClient(pThis); +} + +NV_STATUS __nvoc_objCreate_RmClient(RmClient **ppThis, Dynamic *pParent, NvU32 createFlags, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RmClient *pThis; + + pThis = portMemAllocNonPaged(sizeof(RmClient)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RmClient)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RmClient); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsClient.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsClient.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RmClient(pThis); + status = __nvoc_ctor_RmClient(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RmClient_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RmClient_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RmClient(RmClient **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct PORT_MEM_ALLOCATOR * arg_pAllocator = va_arg(args, struct PORT_MEM_ALLOCATOR *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RmClient(ppThis, pParent, createFlags, arg_pAllocator, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_client_nvoc.h b/src/nvidia/generated/g_client_nvoc.h new file mode 100644 index 000000000..499e23af5 --- /dev/null +++ b/src/nvidia/generated/g_client_nvoc.h @@ -0,0 +1,323 @@ +#ifndef _G_CLIENT_NVOC_H_ +#define _G_CLIENT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_client_nvoc.h" + +#ifndef _CLIENT_H_ +#define _CLIENT_H_ + +#include "ctrl/ctrl0000/ctrl0000proc.h" // NV_PROC_NAME_MAX_LENGTH +#include "containers/btree.h" +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_client.h" +#include "rmapi/resource.h" +#include "rmapi/event.h" +#include "nvsecurityinfo.h" + +// event information definitions +typedef struct _def_client_system_event_info CLI_SYSTEM_EVENT_INFO, *PCLI_SYSTEM_EVENT_INFO; + +/** + * This ref-counted object is shared by all clients that were registered under + * the same user and is used to identify clients from the same user. + */ +#ifdef NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct UserInfo { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsShared __nvoc_base_RsShared; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + struct UserInfo *__nvoc_pbase_UserInfo; + PUID_TOKEN pUidToken; +}; + +#ifndef __NVOC_CLASS_UserInfo_TYPEDEF__ +#define __NVOC_CLASS_UserInfo_TYPEDEF__ +typedef struct UserInfo UserInfo; +#endif /* __NVOC_CLASS_UserInfo_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UserInfo +#define __nvoc_class_id_UserInfo 0x21d236 +#endif /* __nvoc_class_id_UserInfo */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo; + +#define __staticCast_UserInfo(pThis) \ + ((pThis)->__nvoc_pbase_UserInfo) + +#ifdef __nvoc_client_h_disabled +#define __dynamicCast_UserInfo(pThis) ((UserInfo*)NULL) +#else //__nvoc_client_h_disabled +#define __dynamicCast_UserInfo(pThis) \ + ((UserInfo*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(UserInfo))) +#endif //__nvoc_client_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_UserInfo(UserInfo**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_UserInfo(UserInfo**, Dynamic*, NvU32); +#define __objCreate_UserInfo(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_UserInfo((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS userinfoConstruct_IMPL(struct UserInfo *arg_pUserInfo); +#define __nvoc_userinfoConstruct(arg_pUserInfo) userinfoConstruct_IMPL(arg_pUserInfo) +void userinfoDestruct_IMPL(struct UserInfo *pUserInfo); +#define __nvoc_userinfoDestruct(pUserInfo) userinfoDestruct_IMPL(pUserInfo) +#undef PRIVATE_FIELD + + +// Flags for RmClient +#define RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT 0x00000001 +#define RMAPI_CLIENT_FLAG_DELETE_PENDING 0x00000002 + +// Values for client debugger state +#define RMAPI_CLIENT_DEBUGGER_STATE_NOT_SET 0x00000000 +#define RMAPI_CLIENT_DEBUGGER_STATE_COMPUTE_ACTIVE 0x00000001 +#define RMAPI_CLIENT_DEBUGGER_STATE_DEBUG_ACTIVE 0x00000002 + +#ifdef NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmClient { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsClient __nvoc_base_RsClient; + struct Object *__nvoc_pbase_Object; + struct RsClient *__nvoc_pbase_RsClient; + struct RmClient *__nvoc_pbase_RmClient; + NV_STATUS (*__rmclientValidate__)(struct RmClient *, const API_SECURITY_INFO *); + NV_STATUS (*__rmclientFreeResource__)(struct RmClient *, struct RsServer *, struct RS_RES_FREE_PARAMS_INTERNAL *); + NV_STATUS (*__rmclientInterMap__)(struct RmClient *, struct RsResourceRef *, struct RsResourceRef *, struct RS_INTER_MAP_PARAMS *); + void (*__rmclientInterUnmap__)(struct RmClient *, struct RsResourceRef *, struct RS_INTER_UNMAP_PARAMS *); + NV_STATUS (*__rmclientPostProcessPendingFreeList__)(struct RmClient *, struct RsResourceRef **); + NV_STATUS (*__rmclientDestructResourceRef__)(struct RmClient *, RsServer *, struct RsResourceRef *); + NV_STATUS (*__rmclientValidateNewResourceHandle__)(struct RmClient *, NvHandle, NvBool); + NV_STATUS (*__rmclientShareResource__)(struct RmClient *, struct RsResourceRef *, RS_SHARE_POLICY *, struct CALL_CONTEXT *); + NV_STATUS (*__rmclientUnmapMemory__)(struct RmClient *, struct RsResourceRef *, struct RS_LOCK_INFO *, struct RsCpuMapping **, API_SECURITY_INFO *); + RS_PRIV_LEVEL cachedPrivilege; + NvBool bIsRootNonPriv; + NvU32 ProcID; + NvU32 SubProcessID; + char SubProcessName[100]; + NvBool bIsSubProcessDisabled; + NvU32 Flags; + NvU32 ClientDebuggerState; + void *pOSInfo; + char name[100]; + CLI_SYSTEM_EVENT_INFO CliSysEventInfo; + PSECURITY_TOKEN pSecurityToken; + struct UserInfo *pUserInfo; + NvBool bIsClientVirtualMode; + PNODE pCliSyncGpuBoostTree; +}; + +#ifndef __NVOC_CLASS_RmClient_TYPEDEF__ +#define __NVOC_CLASS_RmClient_TYPEDEF__ +typedef struct RmClient RmClient; +#endif /* __NVOC_CLASS_RmClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmClient +#define __nvoc_class_id_RmClient 0xb23d83 +#endif /* __nvoc_class_id_RmClient */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient; + +#define __staticCast_RmClient(pThis) \ + ((pThis)->__nvoc_pbase_RmClient) + +#ifdef __nvoc_client_h_disabled +#define __dynamicCast_RmClient(pThis) ((RmClient*)NULL) +#else //__nvoc_client_h_disabled +#define __dynamicCast_RmClient(pThis) \ + ((RmClient*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmClient))) +#endif //__nvoc_client_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmClient(RmClient**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmClient(RmClient**, Dynamic*, NvU32, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RmClient(ppNewObj, pParent, createFlags, arg_pAllocator, arg_pParams) \ + __nvoc_objCreate_RmClient((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pAllocator, arg_pParams) + +#define rmclientValidate(pClient, pSecInfo) rmclientValidate_DISPATCH(pClient, pSecInfo) +#define rmclientFreeResource(pClient, pServer, pParams) rmclientFreeResource_DISPATCH(pClient, pServer, pParams) +#define rmclientInterMap(pClient, pMapperRef, pMappableRef, pParams) rmclientInterMap_DISPATCH(pClient, pMapperRef, pMappableRef, pParams) +#define rmclientInterUnmap(pClient, pMapperRef, pParams) rmclientInterUnmap_DISPATCH(pClient, pMapperRef, pParams) +#define rmclientPostProcessPendingFreeList(pClient, ppFirstLowPriRef) rmclientPostProcessPendingFreeList_DISPATCH(pClient, ppFirstLowPriRef) +#define rmclientDestructResourceRef(pClient, pServer, pResourceRef) rmclientDestructResourceRef_DISPATCH(pClient, pServer, pResourceRef) +#define rmclientValidateNewResourceHandle(pClient, hResource, bRestrict) rmclientValidateNewResourceHandle_DISPATCH(pClient, hResource, bRestrict) +#define rmclientShareResource(pClient, pResourceRef, pSharePolicy, pCallContext) rmclientShareResource_DISPATCH(pClient, pResourceRef, pSharePolicy, pCallContext) +#define rmclientUnmapMemory(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) rmclientUnmapMemory_DISPATCH(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) +NV_STATUS rmclientValidate_IMPL(struct RmClient *pClient, const API_SECURITY_INFO *pSecInfo); + +static inline NV_STATUS rmclientValidate_DISPATCH(struct RmClient *pClient, const API_SECURITY_INFO *pSecInfo) { + return pClient->__rmclientValidate__(pClient, pSecInfo); +} + +NV_STATUS rmclientFreeResource_IMPL(struct RmClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS rmclientFreeResource_DISPATCH(struct RmClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + return pClient->__rmclientFreeResource__(pClient, pServer, pParams); +} + +NV_STATUS rmclientInterMap_IMPL(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams); + +static inline NV_STATUS rmclientInterMap_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) { + return pClient->__rmclientInterMap__(pClient, pMapperRef, pMappableRef, pParams); +} + +void rmclientInterUnmap_IMPL(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams); + +static inline void rmclientInterUnmap_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) { + pClient->__rmclientInterUnmap__(pClient, pMapperRef, pParams); +} + +NV_STATUS rmclientPostProcessPendingFreeList_IMPL(struct RmClient *pClient, struct RsResourceRef **ppFirstLowPriRef); + +static inline NV_STATUS rmclientPostProcessPendingFreeList_DISPATCH(struct RmClient *pClient, struct RsResourceRef **ppFirstLowPriRef) { + return pClient->__rmclientPostProcessPendingFreeList__(pClient, ppFirstLowPriRef); +} + +static inline NV_STATUS rmclientDestructResourceRef_DISPATCH(struct RmClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef) { + return pClient->__rmclientDestructResourceRef__(pClient, pServer, pResourceRef); +} + +static inline NV_STATUS rmclientValidateNewResourceHandle_DISPATCH(struct RmClient *pClient, NvHandle hResource, NvBool bRestrict) { + return pClient->__rmclientValidateNewResourceHandle__(pClient, hResource, bRestrict); +} + +static inline NV_STATUS rmclientShareResource_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + return pClient->__rmclientShareResource__(pClient, pResourceRef, pSharePolicy, pCallContext); +} + +static inline NV_STATUS rmclientUnmapMemory_DISPATCH(struct RmClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) { + return pClient->__rmclientUnmapMemory__(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo); +} + +NV_STATUS rmclientConstruct_IMPL(struct RmClient *arg_pClient, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_rmclientConstruct(arg_pClient, arg_pAllocator, arg_pParams) rmclientConstruct_IMPL(arg_pClient, arg_pAllocator, arg_pParams) +void rmclientDestruct_IMPL(struct RmClient *pClient); +#define __nvoc_rmclientDestruct(pClient) rmclientDestruct_IMPL(pClient) +RS_PRIV_LEVEL rmclientGetCachedPrivilege_IMPL(struct RmClient *pClient); +#ifdef __nvoc_client_h_disabled +static inline RS_PRIV_LEVEL rmclientGetCachedPrivilege(struct RmClient *pClient) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + RS_PRIV_LEVEL ret; + portMemSet(&ret, 0, sizeof(RS_PRIV_LEVEL)); + return ret; +} +#else //__nvoc_client_h_disabled +#define rmclientGetCachedPrivilege(pClient) rmclientGetCachedPrivilege_IMPL(pClient) +#endif //__nvoc_client_h_disabled + +NvBool rmclientIsAdmin_IMPL(struct RmClient *pClient, RS_PRIV_LEVEL privLevel); +#ifdef __nvoc_client_h_disabled +static inline NvBool rmclientIsAdmin(struct RmClient *pClient, RS_PRIV_LEVEL privLevel) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NV_FALSE; +} +#else //__nvoc_client_h_disabled +#define rmclientIsAdmin(pClient, privLevel) rmclientIsAdmin_IMPL(pClient, privLevel) +#endif //__nvoc_client_h_disabled + +void rmclientSetClientFlags_IMPL(struct RmClient *pClient, NvU32 clientFlags); +#ifdef __nvoc_client_h_disabled +static inline void rmclientSetClientFlags(struct RmClient *pClient, NvU32 clientFlags) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); +} +#else //__nvoc_client_h_disabled +#define rmclientSetClientFlags(pClient, clientFlags) rmclientSetClientFlags_IMPL(pClient, clientFlags) +#endif //__nvoc_client_h_disabled + +void *rmclientGetSecurityToken_IMPL(struct RmClient *pClient); +#ifdef __nvoc_client_h_disabled +static inline void *rmclientGetSecurityToken(struct RmClient *pClient) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NULL; +} +#else //__nvoc_client_h_disabled +#define rmclientGetSecurityToken(pClient) rmclientGetSecurityToken_IMPL(pClient) +#endif //__nvoc_client_h_disabled + +NvBool rmclientIsCapableOrAdmin_IMPL(struct RmClient *pClient, NvU32 capability, RS_PRIV_LEVEL privLevel); +#ifdef __nvoc_client_h_disabled +static inline NvBool rmclientIsCapableOrAdmin(struct RmClient *pClient, NvU32 capability, RS_PRIV_LEVEL privLevel) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NV_FALSE; +} +#else //__nvoc_client_h_disabled +#define rmclientIsCapableOrAdmin(pClient, capability, privLevel) rmclientIsCapableOrAdmin_IMPL(pClient, capability, privLevel) +#endif //__nvoc_client_h_disabled + +NvBool rmclientIsCapable_IMPL(struct RmClient *pClient, NvU32 capability); +#ifdef __nvoc_client_h_disabled +static inline NvBool rmclientIsCapable(struct RmClient *pClient, NvU32 capability) { + NV_ASSERT_FAILED_PRECOMP("RmClient was disabled!"); + return NV_FALSE; +} +#else //__nvoc_client_h_disabled +#define rmclientIsCapable(pClient, capability) rmclientIsCapable_IMPL(pClient, capability) +#endif //__nvoc_client_h_disabled + +#undef PRIVATE_FIELD + + +MAKE_LIST(RmClientList, RmClient*); +extern RmClientList g_clientListBehindGpusLock; +MAKE_LIST(UserInfoList, UserInfo*); +extern UserInfoList g_userInfoList; + + +// +// Convenience rmclientXxxByHandle util macros. Ideally, code operates on +// pClient directly instead of hClient but providing these for compatibility +// to hClient-heavy code. +// +RS_PRIV_LEVEL rmclientGetCachedPrivilegeByHandle(NvHandle hClient); +NvBool rmclientIsAdminByHandle(NvHandle hClient, RS_PRIV_LEVEL privLevel); +NvBool rmclientSetClientFlagsByHandle(NvHandle hClient, NvU32 clientFlags); +void rmclientPromoteDebuggerStateByHandle(NvHandle hClient, NvU32 newMinimumState); +void *rmclientGetSecurityTokenByHandle(NvHandle hClient); +NV_STATUS rmclientUserClientSecurityCheckByHandle(NvHandle hClient, const API_SECURITY_INFO *pSecInfo); +NvBool rmclientIsCapableOrAdminByHandle(NvHandle hClient, NvU32 capability, RS_PRIV_LEVEL privLevel); +NvBool rmclientIsCapableByHandle(NvHandle hClient, NvU32 capability); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CLIENT_NVOC_H_ diff --git a/src/nvidia/generated/g_client_resource_nvoc.c b/src/nvidia/generated/g_client_resource_nvoc.c new file mode 100644 index 000000000..217aa4107 --- /dev/null +++ b/src/nvidia/generated/g_client_resource_nvoc.c @@ -0,0 +1,1763 @@ +#define NVOC_CLIENT_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_client_resource_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x37a701 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClientResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_RmClientResource(RmClientResource*); +void __nvoc_init_funcTable_RmClientResource(RmClientResource*); +NV_STATUS __nvoc_ctor_RmClientResource(RmClientResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RmClientResource(RmClientResource*); +void __nvoc_dtor_RmClientResource(RmClientResource*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClientResource; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_RmClientResource = { + /*pClassDef=*/ &__nvoc_class_def_RmClientResource, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmClientResource, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_RsClientResource = { + /*pClassDef=*/ &__nvoc_class_def_RsClientResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_RsClientResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmClientResource_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmClientResource, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmClientResource = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_RmClientResource_RmClientResource, + &__nvoc_rtti_RmClientResource_Notifier, + &__nvoc_rtti_RmClientResource_INotifier, + &__nvoc_rtti_RmClientResource_RmResourceCommon, + &__nvoc_rtti_RmClientResource_RsClientResource, + &__nvoc_rtti_RmClientResource_RsResource, + &__nvoc_rtti_RmClientResource_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmClientResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmClientResource), + /*classId=*/ classId(RmClientResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmClientResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmClientResource, + /*pCastInfo=*/ &__nvoc_castinfo_RmClientResource, + /*pExportInfo=*/ &__nvoc_export_info_RmClientResource +}; + +static NvBool __nvoc_thunk_RmClientResource_resAccessCallback(struct RsResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return cliresAccessCallback((struct RmClientResource *)(((unsigned char *)pRmCliRes) - __nvoc_rtti_RmClientResource_RsResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NvBool __nvoc_thunk_RmClientResource_resShareCallback(struct RsResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return cliresShareCallback((struct RmClientResource *)(((unsigned char *)pRmCliRes) - __nvoc_rtti_RmClientResource_RsResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresControl(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresUnmap(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresMapTo(struct RmClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pParams); +} + +static void __nvoc_thunk_Notifier_cliresSetNotificationShare(struct RmClientResource *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_RmClientResource_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresControlFilter(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_cliresAddAdditionalDependants(struct RsClient *pClient, struct RmClientResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_cliresGetRefCount(struct RmClientResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_cliresUnregisterEvent(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_RmClientResource_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_cliresCanCopy(struct RmClientResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresControl_Prologue(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl_Prologue((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_cliresPreDestruct(struct RmClientResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresUnmapFrom(struct RmClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_cliresGetNotificationListPtr(struct RmClientResource *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_RmClientResource_Notifier.offset)); +} + +static void __nvoc_thunk_RsResource_cliresControl_Epilogue(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + resControl_Epilogue((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_cliresGetNotificationShare(struct RmClientResource *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_RmClientResource_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresControlLookup(struct RmClientResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_cliresMap(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmClientResource_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_cliresGetOrAllocNotifShare(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_RmClientResource_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_RmClientResource[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetCpuInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x102u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetCpuInfo" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4013u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetChipsetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4013u) + /*flags=*/ 0x4013u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x104u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetChipsetInfo" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemSetMemorySize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x107u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemSetMemorySize" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetClassList_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x108u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetClassList" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemNotifyEvent_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x110u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemNotifyEvent" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetPlatformType_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x111u, + /*paramSize=*/ sizeof(NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetPlatformType" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x121u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemDebugCtrlRmMsg" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetHwbcInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x124u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetHwbcInfo" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetP2pCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x127u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetP2pCaps" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetP2pCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x12bu, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetP2pCapsV2" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetPerfSensorCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x12cu, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetPerfSensorCounters" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetExtendedPerfSensorCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x12eu, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetExtendedPerfSensorCounters" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetGpusPowerStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x134u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetGpusPowerStatus" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetPrivilegedStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x135u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetPrivilegedStatus" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetFabricStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x136u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetFabricStatus" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetRmInstanceId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x139u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetRmInstanceId" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetP2pCapsMatrix_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13au, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetP2pCapsMatrix" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13cu, + /*paramSize=*/ sizeof(NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemSyncExternalFabricMgmt" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13du, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetClientDatabaseInfo" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetBuildVersionV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x13eu, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetBuildVersionV2" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSystemGetFeatures_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x1f0u, + /*paramSize=*/ sizeof(NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSystemGetFeatures" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetAttachedIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x201u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetAttachedIds" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetIdInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x202u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetIdInfo" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetInitStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x203u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetInitStatus" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetDeviceIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x204u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetDeviceIds" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetIdInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x205u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetIdInfoV2" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetProbedIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x214u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetProbedIds" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuAttachIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x215u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_ATTACH_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuAttachIds" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuDetachIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x216u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_DETACH_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuDetachIds" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetPciInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x21bu, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetPciInfo" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetSvmSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x240u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetSvmSize" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetUuidInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x274u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetUuidInfo" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetUuidFromGpuId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x275u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetUuidFromGpuId" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuModifyGpuDrainState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x278u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuModifyGpuDrainState" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuQueryGpuDrainState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x279u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuQueryGpuDrainState" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuGetMemOpEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x27bu, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuGetMemOpEnable" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuDisableNvlinkInit_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x281u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuDisableNvlinkInit" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdLegacyConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x282u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdLegacyConfig" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdIdleChannels_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x283u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdIdleChannels" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGsyncGetAttachedIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x301u, + /*paramSize=*/ sizeof(NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGsyncGetAttachedIds" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGsyncGetIdInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x302u, + /*paramSize=*/ sizeof(NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGsyncGetIdInfo" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdEventSetNotification_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x501u, + /*paramSize=*/ sizeof(NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdEventSetNotification" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdEventGetSystemEventStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x502u, + /*paramSize=*/ sizeof(NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdEventGetSystemEventStatus" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdNvdGetDumpSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x601u, + /*paramSize=*/ sizeof(NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdNvdGetDumpSize" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdNvdGetDump_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x602u, + /*paramSize=*/ sizeof(NV0000_CTRL_NVD_GET_DUMP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdNvdGetDump" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdNvdGetTimestamp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*flags=*/ 0x813u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x603u, + /*paramSize=*/ sizeof(NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdNvdGetTimestamp" +#endif + }, + { /* [46] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdNvdGetNvlogInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x604u, + /*paramSize=*/ sizeof(NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdNvdGetNvlogInfo" +#endif + }, + { /* [47] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdNvdGetNvlogBufferInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x605u, + /*paramSize=*/ sizeof(NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdNvdGetNvlogBufferInfo" +#endif + }, + { /* [48] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdNvdGetNvlog_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x606u, + /*paramSize=*/ sizeof(NV0000_CTRL_NVD_GET_NVLOG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdNvdGetNvlog" +#endif + }, + { /* [49] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdNvdGetRcerrRpt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x607u, + /*paramSize=*/ sizeof(NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdNvdGetRcerrRpt" +#endif + }, + { /* [50] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSetSubProcessID_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x901u, + /*paramSize=*/ sizeof(NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSetSubProcessID" +#endif + }, + { /* [51] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x902u, + /*paramSize=*/ sizeof(NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdDisableSubProcessUserdIsolation" +#endif + }, + { /* [52] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSyncGpuBoostInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa01u, + /*paramSize=*/ sizeof(NV0000_SYNC_GPU_BOOST_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSyncGpuBoostInfo" +#endif + }, + { /* [53] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSyncGpuBoostGroupCreate_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*flags=*/ 0x5u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa02u, + /*paramSize=*/ sizeof(NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSyncGpuBoostGroupCreate" +#endif + }, + { /* [54] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSyncGpuBoostGroupDestroy_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*flags=*/ 0x5u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa03u, + /*paramSize=*/ sizeof(NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSyncGpuBoostGroupDestroy" +#endif + }, + { /* [55] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdSyncGpuBoostGroupInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa04u, + /*paramSize=*/ sizeof(NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdSyncGpuBoostGroupInfo" +#endif + }, + { /* [56] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuAcctSetAccountingState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb01u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuAcctSetAccountingState" +#endif + }, + { /* [57] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuAcctGetAccountingState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb02u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuAcctGetAccountingState" +#endif + }, + { /* [58] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuAcctGetProcAccountingInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb03u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuAcctGetProcAccountingInfo" +#endif + }, + { /* [59] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuAcctGetAccountingPids_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb04u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuAcctGetAccountingPids" +#endif + }, + { /* [60] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdGpuAcctClearAccountingData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb05u, + /*paramSize=*/ sizeof(NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdGpuAcctClearAccountingData" +#endif + }, + { /* [61] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetAddrSpaceType_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd01u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetAddrSpaceType" +#endif + }, + { /* [62] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetHandleInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd02u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetHandleInfo" +#endif + }, + { /* [63] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetAccessRights_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd03u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetAccessRights" +#endif + }, + { /* [64] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientSetInheritedSharePolicy_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd04u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientSetInheritedSharePolicy" +#endif + }, + { /* [65] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientGetChildHandle_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd05u, + /*paramSize=*/ sizeof(NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientGetChildHandle" +#endif + }, + { /* [66] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdClientShareObject_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xd06u, + /*paramSize=*/ sizeof(NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdClientShareObject" +#endif + }, + { /* [67] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixFlushUserCache_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d02u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixFlushUserCache" +#endif + }, + { /* [68] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixExportObjectToFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d05u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixExportObjectToFd" +#endif + }, + { /* [69] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixImportObjectFromFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d06u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixImportObjectFromFd" +#endif + }, + { /* [70] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d08u, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixGetExportObjectInfo" +#endif + }, + { /* [71] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d0au, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixCreateExportObjectFd" +#endif + }, + { /* [72] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixExportObjectsToFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d0bu, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixExportObjectsToFd" +#endif + }, + { /* [73] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3d0cu, + /*paramSize=*/ sizeof(NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_RmClientResource.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cliresCtrlCmdOsUnixImportObjectsFromFd" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClientResource = +{ + /*numEntries=*/ 74, + /*pExportEntries=*/ __nvoc_exported_method_def_RmClientResource +}; + +void __nvoc_dtor_RsClientResource(RsClientResource*); +void __nvoc_dtor_RmResourceCommon(RmResourceCommon*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_RmClientResource(RmClientResource *pThis) { + __nvoc_cliresDestruct(pThis); + __nvoc_dtor_RsClientResource(&pThis->__nvoc_base_RsClientResource); + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmClientResource(RmClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsClientResource(RsClientResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon* ); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_RmClientResource(RmClientResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsClientResource(&pThis->__nvoc_base_RsClientResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail_RsClientResource; + status = __nvoc_ctor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail_RmResourceCommon; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail_Notifier; + __nvoc_init_dataField_RmClientResource(pThis); + + status = __nvoc_cliresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmClientResource_fail__init; + goto __nvoc_ctor_RmClientResource_exit; // Success + +__nvoc_ctor_RmClientResource_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_RmClientResource_fail_Notifier: + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); +__nvoc_ctor_RmClientResource_fail_RmResourceCommon: + __nvoc_dtor_RsClientResource(&pThis->__nvoc_base_RsClientResource); +__nvoc_ctor_RmClientResource_fail_RsClientResource: +__nvoc_ctor_RmClientResource_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmClientResource_1(RmClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__cliresAccessCallback__ = &cliresAccessCallback_IMPL; + + pThis->__cliresShareCallback__ = &cliresShareCallback_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__cliresCtrlCmdSystemGetCpuInfo__ = &cliresCtrlCmdSystemGetCpuInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemGetFeatures__ = &cliresCtrlCmdSystemGetFeatures_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdSystemGetBuildVersionV2__ = &cliresCtrlCmdSystemGetBuildVersionV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4013u) + pThis->__cliresCtrlCmdSystemGetChipsetInfo__ = &cliresCtrlCmdSystemGetChipsetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__cliresCtrlCmdSystemSetMemorySize__ = &cliresCtrlCmdSystemSetMemorySize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemGetClassList__ = &cliresCtrlCmdSystemGetClassList_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemNotifyEvent__ = &cliresCtrlCmdSystemNotifyEvent_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdSystemGetPlatformType__ = &cliresCtrlCmdSystemGetPlatformType_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemDebugCtrlRmMsg__ = &cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemGetHwbcInfo__ = &cliresCtrlCmdSystemGetHwbcInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdSystemGetP2pCaps__ = &cliresCtrlCmdSystemGetP2pCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdSystemGetP2pCapsV2__ = &cliresCtrlCmdSystemGetP2pCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdSystemGetP2pCapsMatrix__ = &cliresCtrlCmdSystemGetP2pCapsMatrix_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemGetPerfSensorCounters__ = &cliresCtrlCmdSystemGetPerfSensorCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemGetExtendedPerfSensorCounters__ = &cliresCtrlCmdSystemGetExtendedPerfSensorCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdSystemGetGpusPowerStatus__ = &cliresCtrlCmdSystemGetGpusPowerStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSystemGetPrivilegedStatus__ = &cliresCtrlCmdSystemGetPrivilegedStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__cliresCtrlCmdSystemGetFabricStatus__ = &cliresCtrlCmdSystemGetFabricStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__cliresCtrlCmdSystemGetRmInstanceId__ = &cliresCtrlCmdSystemGetRmInstanceId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__cliresCtrlCmdSystemGetClientDatabaseInfo__ = &cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdNvdGetDumpSize__ = &cliresCtrlCmdNvdGetDumpSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdNvdGetDump__ = &cliresCtrlCmdNvdGetDump_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + pThis->__cliresCtrlCmdNvdGetTimestamp__ = &cliresCtrlCmdNvdGetTimestamp_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__cliresCtrlCmdNvdGetNvlogInfo__ = &cliresCtrlCmdNvdGetNvlogInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__cliresCtrlCmdNvdGetNvlogBufferInfo__ = &cliresCtrlCmdNvdGetNvlogBufferInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__cliresCtrlCmdNvdGetNvlog__ = &cliresCtrlCmdNvdGetNvlog_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdNvdGetRcerrRpt__ = &cliresCtrlCmdNvdGetRcerrRpt_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__cliresCtrlCmdClientGetAddrSpaceType__ = &cliresCtrlCmdClientGetAddrSpaceType_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdClientGetHandleInfo__ = &cliresCtrlCmdClientGetHandleInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdClientGetAccessRights__ = &cliresCtrlCmdClientGetAccessRights_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdClientSetInheritedSharePolicy__ = &cliresCtrlCmdClientSetInheritedSharePolicy_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdClientShareObject__ = &cliresCtrlCmdClientShareObject_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdClientGetChildHandle__ = &cliresCtrlCmdClientGetChildHandle_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdGpuGetAttachedIds__ = &cliresCtrlCmdGpuGetAttachedIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__cliresCtrlCmdGpuGetIdInfo__ = &cliresCtrlCmdGpuGetIdInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__cliresCtrlCmdGpuGetIdInfoV2__ = &cliresCtrlCmdGpuGetIdInfoV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdGpuGetInitStatus__ = &cliresCtrlCmdGpuGetInitStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__cliresCtrlCmdGpuGetDeviceIds__ = &cliresCtrlCmdGpuGetDeviceIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdGpuGetProbedIds__ = &cliresCtrlCmdGpuGetProbedIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdGpuAttachIds__ = &cliresCtrlCmdGpuAttachIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdGpuDetachIds__ = &cliresCtrlCmdGpuDetachIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGpuGetSvmSize__ = &cliresCtrlCmdGpuGetSvmSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__cliresCtrlCmdGpuGetPciInfo__ = &cliresCtrlCmdGpuGetPciInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGpuGetUuidInfo__ = &cliresCtrlCmdGpuGetUuidInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGpuGetUuidFromGpuId__ = &cliresCtrlCmdGpuGetUuidFromGpuId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__cliresCtrlCmdGpuModifyGpuDrainState__ = &cliresCtrlCmdGpuModifyGpuDrainState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdGpuQueryGpuDrainState__ = &cliresCtrlCmdGpuQueryGpuDrainState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdGpuGetMemOpEnable__ = &cliresCtrlCmdGpuGetMemOpEnable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__cliresCtrlCmdGpuDisableNvlinkInit__ = &cliresCtrlCmdGpuDisableNvlinkInit_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdLegacyConfig__ = &cliresCtrlCmdLegacyConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdIdleChannels__ = &cliresCtrlCmdIdleChannels_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGsyncGetAttachedIds__ = &cliresCtrlCmdGsyncGetAttachedIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGsyncGetIdInfo__ = &cliresCtrlCmdGsyncGetIdInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdEventSetNotification__ = &cliresCtrlCmdEventSetNotification_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdEventGetSystemEventStatus__ = &cliresCtrlCmdEventGetSystemEventStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixExportObjectToFd__ = &cliresCtrlCmdOsUnixExportObjectToFd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixImportObjectFromFd__ = &cliresCtrlCmdOsUnixImportObjectFromFd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixGetExportObjectInfo__ = &cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixCreateExportObjectFd__ = &cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixExportObjectsToFd__ = &cliresCtrlCmdOsUnixExportObjectsToFd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__cliresCtrlCmdOsUnixImportObjectsFromFd__ = &cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdOsUnixFlushUserCache__ = &cliresCtrlCmdOsUnixFlushUserCache_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__cliresCtrlCmdGpuAcctSetAccountingState__ = &cliresCtrlCmdGpuAcctSetAccountingState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGpuAcctGetAccountingState__ = &cliresCtrlCmdGpuAcctGetAccountingState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGpuAcctGetProcAccountingInfo__ = &cliresCtrlCmdGpuAcctGetProcAccountingInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdGpuAcctGetAccountingPids__ = &cliresCtrlCmdGpuAcctGetAccountingPids_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__cliresCtrlCmdGpuAcctClearAccountingData__ = &cliresCtrlCmdGpuAcctClearAccountingData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdSetSubProcessID__ = &cliresCtrlCmdSetSubProcessID_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cliresCtrlCmdDisableSubProcessUserdIsolation__ = &cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdSyncGpuBoostInfo__ = &cliresCtrlCmdSyncGpuBoostInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + pThis->__cliresCtrlCmdSyncGpuBoostGroupCreate__ = &cliresCtrlCmdSyncGpuBoostGroupCreate_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + pThis->__cliresCtrlCmdSyncGpuBoostGroupDestroy__ = &cliresCtrlCmdSyncGpuBoostGroupDestroy_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__cliresCtrlCmdSyncGpuBoostGroupInfo__ = &cliresCtrlCmdSyncGpuBoostGroupInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__cliresCtrlCmdSystemSyncExternalFabricMgmt__ = &cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL; +#endif + + pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__resAccessCallback__ = &__nvoc_thunk_RmClientResource_resAccessCallback; + + pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__resShareCallback__ = &__nvoc_thunk_RmClientResource_resShareCallback; + + pThis->__cliresControl__ = &__nvoc_thunk_RsResource_cliresControl; + + pThis->__cliresUnmap__ = &__nvoc_thunk_RsResource_cliresUnmap; + + pThis->__cliresMapTo__ = &__nvoc_thunk_RsResource_cliresMapTo; + + pThis->__cliresSetNotificationShare__ = &__nvoc_thunk_Notifier_cliresSetNotificationShare; + + pThis->__cliresControlFilter__ = &__nvoc_thunk_RsResource_cliresControlFilter; + + pThis->__cliresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_cliresAddAdditionalDependants; + + pThis->__cliresGetRefCount__ = &__nvoc_thunk_RsResource_cliresGetRefCount; + + pThis->__cliresUnregisterEvent__ = &__nvoc_thunk_Notifier_cliresUnregisterEvent; + + pThis->__cliresCanCopy__ = &__nvoc_thunk_RsResource_cliresCanCopy; + + pThis->__cliresControl_Prologue__ = &__nvoc_thunk_RsResource_cliresControl_Prologue; + + pThis->__cliresPreDestruct__ = &__nvoc_thunk_RsResource_cliresPreDestruct; + + pThis->__cliresUnmapFrom__ = &__nvoc_thunk_RsResource_cliresUnmapFrom; + + pThis->__cliresGetNotificationListPtr__ = &__nvoc_thunk_Notifier_cliresGetNotificationListPtr; + + pThis->__cliresControl_Epilogue__ = &__nvoc_thunk_RsResource_cliresControl_Epilogue; + + pThis->__cliresGetNotificationShare__ = &__nvoc_thunk_Notifier_cliresGetNotificationShare; + + pThis->__cliresControlLookup__ = &__nvoc_thunk_RsResource_cliresControlLookup; + + pThis->__cliresMap__ = &__nvoc_thunk_RsResource_cliresMap; + + pThis->__cliresGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_cliresGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_RmClientResource(RmClientResource *pThis) { + __nvoc_init_funcTable_RmClientResource_1(pThis); +} + +void __nvoc_init_RsClientResource(RsClientResource*); +void __nvoc_init_RmResourceCommon(RmResourceCommon*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_RmClientResource(RmClientResource *pThis) { + pThis->__nvoc_pbase_RmClientResource = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RsClientResource = &pThis->__nvoc_base_RsClientResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_RsClientResource(&pThis->__nvoc_base_RsClientResource); + __nvoc_init_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_RmClientResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RmClientResource(RmClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RmClientResource *pThis; + + pThis = portMemAllocNonPaged(sizeof(RmClientResource)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RmClientResource)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RmClientResource); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsClientResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RmClientResource(pThis); + status = __nvoc_ctor_RmClientResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RmClientResource_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RmClientResource_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RmClientResource(RmClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RmClientResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_client_resource_nvoc.h b/src/nvidia/generated/g_client_resource_nvoc.h new file mode 100644 index 000000000..b5e0a98a2 --- /dev/null +++ b/src/nvidia/generated/g_client_resource_nvoc.h @@ -0,0 +1,843 @@ +#ifndef _G_CLIENT_RESOURCE_NVOC_H_ +#define _G_CLIENT_RESOURCE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_client_resource_nvoc.h" + +#ifndef _CLIENT_RESOURCE_H_ +#define _CLIENT_RESOURCE_H_ + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_client.h" +#include "rmapi/resource.h" +#include "rmapi/event.h" +#include "rmapi/control.h" + +#include "ctrl/ctrl0000/ctrl0000gpu.h" +#include "ctrl/ctrl0000/ctrl0000gpuacct.h" +#include "ctrl/ctrl0000/ctrl0000gsync.h" +#include "ctrl/ctrl0000/ctrl0000diag.h" +#include "ctrl/ctrl0000/ctrl0000event.h" +#include "ctrl/ctrl0000/ctrl0000nvd.h" +#include "ctrl/ctrl0000/ctrl0000proc.h" +#include "ctrl/ctrl0000/ctrl0000syncgpuboost.h" +#include "ctrl/ctrl0000/ctrl0000gspc.h" +#include "ctrl/ctrl0000/ctrl0000vgpu.h" +#include "ctrl/ctrl0000/ctrl0000client.h" + +/* include appropriate os-specific command header */ +#if defined(NV_UNIX) || defined(NV_QNX) +#include "ctrl/ctrl0000/ctrl0000unix.h" +#endif + +#ifdef NVOC_CLIENT_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmClientResource { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsClientResource __nvoc_base_RsClientResource; + struct RmResourceCommon __nvoc_base_RmResourceCommon; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RsClientResource *__nvoc_pbase_RsClientResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct RmClientResource *__nvoc_pbase_RmClientResource; + NvBool (*__cliresAccessCallback__)(struct RmClientResource *, struct RsClient *, void *, RsAccessRight); + NvBool (*__cliresShareCallback__)(struct RmClientResource *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__cliresCtrlCmdSystemGetCpuInfo__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetFeatures__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetBuildVersionV2__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetChipsetInfo__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemSetMemorySize__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetClassList__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemNotifyEvent__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetPlatformType__)(struct RmClientResource *, NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemDebugCtrlRmMsg__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetHwbcInfo__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetP2pCaps__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetP2pCapsV2__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetP2pCapsMatrix__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetPerfSensorCounters__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetExtendedPerfSensorCounters__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetGpusPowerStatus__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetPrivilegedStatus__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetFabricStatus__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetRmInstanceId__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemGetClientDatabaseInfo__)(struct RmClientResource *, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdNvdGetDumpSize__)(struct RmClientResource *, NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdNvdGetDump__)(struct RmClientResource *, NV0000_CTRL_NVD_GET_DUMP_PARAMS *); + NV_STATUS (*__cliresCtrlCmdNvdGetTimestamp__)(struct RmClientResource *, NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS *); + NV_STATUS (*__cliresCtrlCmdNvdGetNvlogInfo__)(struct RmClientResource *, NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdNvdGetNvlogBufferInfo__)(struct RmClientResource *, NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdNvdGetNvlog__)(struct RmClientResource *, NV0000_CTRL_NVD_GET_NVLOG_PARAMS *); + NV_STATUS (*__cliresCtrlCmdNvdGetRcerrRpt__)(struct RmClientResource *, NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientGetAddrSpaceType__)(struct RmClientResource *, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientGetHandleInfo__)(struct RmClientResource *, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientGetAccessRights__)(struct RmClientResource *, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientSetInheritedSharePolicy__)(struct RmClientResource *, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientShareObject__)(struct RmClientResource *, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *); + NV_STATUS (*__cliresCtrlCmdClientGetChildHandle__)(struct RmClientResource *, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetAttachedIds__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetIdInfo__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetIdInfoV2__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetInitStatus__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetDeviceIds__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetProbedIds__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuAttachIds__)(struct RmClientResource *, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuDetachIds__)(struct RmClientResource *, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetSvmSize__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetPciInfo__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetUuidInfo__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetUuidFromGpuId__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuModifyGpuDrainState__)(struct RmClientResource *, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuQueryGpuDrainState__)(struct RmClientResource *, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuGetMemOpEnable__)(struct RmClientResource *, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuDisableNvlinkInit__)(struct RmClientResource *, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *); + NV_STATUS (*__cliresCtrlCmdLegacyConfig__)(struct RmClientResource *, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *); + NV_STATUS (*__cliresCtrlCmdIdleChannels__)(struct RmClientResource *, NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGsyncGetAttachedIds__)(struct RmClientResource *, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGsyncGetIdInfo__)(struct RmClientResource *, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdEventSetNotification__)(struct RmClientResource *, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *); + NV_STATUS (*__cliresCtrlCmdEventGetSystemEventStatus__)(struct RmClientResource *, NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixExportObjectToFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixImportObjectFromFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixGetExportObjectInfo__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixCreateExportObjectFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixExportObjectsToFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixImportObjectsFromFd__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *); + NV_STATUS (*__cliresCtrlCmdOsUnixFlushUserCache__)(struct RmClientResource *, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuAcctSetAccountingState__)(struct RmClientResource *, NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuAcctGetAccountingState__)(struct RmClientResource *, NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuAcctGetProcAccountingInfo__)(struct RmClientResource *, NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuAcctGetAccountingPids__)(struct RmClientResource *, NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS *); + NV_STATUS (*__cliresCtrlCmdGpuAcctClearAccountingData__)(struct RmClientResource *, NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSetSubProcessID__)(struct RmClientResource *, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *); + NV_STATUS (*__cliresCtrlCmdDisableSubProcessUserdIsolation__)(struct RmClientResource *, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSyncGpuBoostInfo__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupCreate__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupDestroy__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSyncGpuBoostGroupInfo__)(struct RmClientResource *, NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS *); + NV_STATUS (*__cliresCtrlCmdSystemSyncExternalFabricMgmt__)(struct RmClientResource *, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *); + NV_STATUS (*__cliresControl__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__cliresUnmap__)(struct RmClientResource *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__cliresMapTo__)(struct RmClientResource *, RS_RES_MAP_TO_PARAMS *); + void (*__cliresSetNotificationShare__)(struct RmClientResource *, struct NotifShare *); + NV_STATUS (*__cliresControlFilter__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__cliresAddAdditionalDependants__)(struct RsClient *, struct RmClientResource *, RsResourceRef *); + NvU32 (*__cliresGetRefCount__)(struct RmClientResource *); + NV_STATUS (*__cliresUnregisterEvent__)(struct RmClientResource *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__cliresCanCopy__)(struct RmClientResource *); + NV_STATUS (*__cliresControl_Prologue__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__cliresPreDestruct__)(struct RmClientResource *); + NV_STATUS (*__cliresUnmapFrom__)(struct RmClientResource *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__cliresGetNotificationListPtr__)(struct RmClientResource *); + void (*__cliresControl_Epilogue__)(struct RmClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__cliresGetNotificationShare__)(struct RmClientResource *); + NV_STATUS (*__cliresControlLookup__)(struct RmClientResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__cliresMap__)(struct RmClientResource *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__cliresGetOrAllocNotifShare__)(struct RmClientResource *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_RmClientResource_TYPEDEF__ +#define __NVOC_CLASS_RmClientResource_TYPEDEF__ +typedef struct RmClientResource RmClientResource; +#endif /* __NVOC_CLASS_RmClientResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmClientResource +#define __nvoc_class_id_RmClientResource 0x37a701 +#endif /* __nvoc_class_id_RmClientResource */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClientResource; + +#define __staticCast_RmClientResource(pThis) \ + ((pThis)->__nvoc_pbase_RmClientResource) + +#ifdef __nvoc_client_resource_h_disabled +#define __dynamicCast_RmClientResource(pThis) ((RmClientResource*)NULL) +#else //__nvoc_client_resource_h_disabled +#define __dynamicCast_RmClientResource(pThis) \ + ((RmClientResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmClientResource))) +#endif //__nvoc_client_resource_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmClientResource(RmClientResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmClientResource(RmClientResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RmClientResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RmClientResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define cliresAccessCallback(pRmCliRes, pInvokingClient, pAllocParams, accessRight) cliresAccessCallback_DISPATCH(pRmCliRes, pInvokingClient, pAllocParams, accessRight) +#define cliresShareCallback(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy) cliresShareCallback_DISPATCH(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy) +#define cliresCtrlCmdSystemGetCpuInfo(pRmCliRes, pCpuInfoParams) cliresCtrlCmdSystemGetCpuInfo_DISPATCH(pRmCliRes, pCpuInfoParams) +#define cliresCtrlCmdSystemGetFeatures(pRmCliRes, pParams) cliresCtrlCmdSystemGetFeatures_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetBuildVersionV2(pRmCliRes, pParams) cliresCtrlCmdSystemGetBuildVersionV2_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetChipsetInfo(pRmCliRes, pChipsetInfo) cliresCtrlCmdSystemGetChipsetInfo_DISPATCH(pRmCliRes, pChipsetInfo) +#define cliresCtrlCmdSystemSetMemorySize(pRmCliRes, pParams) cliresCtrlCmdSystemSetMemorySize_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetClassList(pRmCliRes, pParams) cliresCtrlCmdSystemGetClassList_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemNotifyEvent(pRmCliRes, pParams) cliresCtrlCmdSystemNotifyEvent_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetPlatformType(pRmCliRes, pSysParams) cliresCtrlCmdSystemGetPlatformType_DISPATCH(pRmCliRes, pSysParams) +#define cliresCtrlCmdSystemDebugCtrlRmMsg(pRmCliRes, pParams) cliresCtrlCmdSystemDebugCtrlRmMsg_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetHwbcInfo(pRmCliRes, pParams) cliresCtrlCmdSystemGetHwbcInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetP2pCaps(pRmCliRes, pP2PParams) cliresCtrlCmdSystemGetP2pCaps_DISPATCH(pRmCliRes, pP2PParams) +#define cliresCtrlCmdSystemGetP2pCapsV2(pRmCliRes, pP2PParams) cliresCtrlCmdSystemGetP2pCapsV2_DISPATCH(pRmCliRes, pP2PParams) +#define cliresCtrlCmdSystemGetP2pCapsMatrix(pRmCliRes, pP2PParams) cliresCtrlCmdSystemGetP2pCapsMatrix_DISPATCH(pRmCliRes, pP2PParams) +#define cliresCtrlCmdSystemGetPerfSensorCounters(pRmCliRes, pParams) cliresCtrlCmdSystemGetPerfSensorCounters_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetExtendedPerfSensorCounters(pRmCliRes, pParams) cliresCtrlCmdSystemGetExtendedPerfSensorCounters_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetGpusPowerStatus(pRmCliRes, pGpusPowerStatus) cliresCtrlCmdSystemGetGpusPowerStatus_DISPATCH(pRmCliRes, pGpusPowerStatus) +#define cliresCtrlCmdSystemGetPrivilegedStatus(pRmCliRes, pParams) cliresCtrlCmdSystemGetPrivilegedStatus_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetFabricStatus(pRmCliRes, pParams) cliresCtrlCmdSystemGetFabricStatus_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemGetRmInstanceId(pRmCliRes, pRmInstanceIdParams) cliresCtrlCmdSystemGetRmInstanceId_DISPATCH(pRmCliRes, pRmInstanceIdParams) +#define cliresCtrlCmdSystemGetClientDatabaseInfo(pRmCliRes, pParams) cliresCtrlCmdSystemGetClientDatabaseInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdNvdGetDumpSize(pRmCliRes, pDumpSizeParams) cliresCtrlCmdNvdGetDumpSize_DISPATCH(pRmCliRes, pDumpSizeParams) +#define cliresCtrlCmdNvdGetDump(pRmCliRes, pDumpParams) cliresCtrlCmdNvdGetDump_DISPATCH(pRmCliRes, pDumpParams) +#define cliresCtrlCmdNvdGetTimestamp(pRmCliRes, pTimestampParams) cliresCtrlCmdNvdGetTimestamp_DISPATCH(pRmCliRes, pTimestampParams) +#define cliresCtrlCmdNvdGetNvlogInfo(pRmCliRes, pParams) cliresCtrlCmdNvdGetNvlogInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdNvdGetNvlogBufferInfo(pRmCliRes, pParams) cliresCtrlCmdNvdGetNvlogBufferInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdNvdGetNvlog(pRmCliRes, pParams) cliresCtrlCmdNvdGetNvlog_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdNvdGetRcerrRpt(pRmCliRes, pReportParams) cliresCtrlCmdNvdGetRcerrRpt_DISPATCH(pRmCliRes, pReportParams) +#define cliresCtrlCmdClientGetAddrSpaceType(pRmCliRes, pParams) cliresCtrlCmdClientGetAddrSpaceType_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetHandleInfo(pRmCliRes, pParams) cliresCtrlCmdClientGetHandleInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetAccessRights(pRmCliRes, pParams) cliresCtrlCmdClientGetAccessRights_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientSetInheritedSharePolicy(pRmCliRes, pParams) cliresCtrlCmdClientSetInheritedSharePolicy_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientShareObject(pRmCliRes, pParams) cliresCtrlCmdClientShareObject_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdClientGetChildHandle(pRmCliRes, pParams) cliresCtrlCmdClientGetChildHandle_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuGetAttachedIds(pRmCliRes, pGpuAttachedIds) cliresCtrlCmdGpuGetAttachedIds_DISPATCH(pRmCliRes, pGpuAttachedIds) +#define cliresCtrlCmdGpuGetIdInfo(pRmCliRes, pGpuIdInfoParams) cliresCtrlCmdGpuGetIdInfo_DISPATCH(pRmCliRes, pGpuIdInfoParams) +#define cliresCtrlCmdGpuGetIdInfoV2(pRmCliRes, pGpuIdInfoParams) cliresCtrlCmdGpuGetIdInfoV2_DISPATCH(pRmCliRes, pGpuIdInfoParams) +#define cliresCtrlCmdGpuGetInitStatus(pRmCliRes, pGpuInitStatusParams) cliresCtrlCmdGpuGetInitStatus_DISPATCH(pRmCliRes, pGpuInitStatusParams) +#define cliresCtrlCmdGpuGetDeviceIds(pRmCliRes, pDeviceIdsParams) cliresCtrlCmdGpuGetDeviceIds_DISPATCH(pRmCliRes, pDeviceIdsParams) +#define cliresCtrlCmdGpuGetProbedIds(pRmCliRes, pGpuProbedIds) cliresCtrlCmdGpuGetProbedIds_DISPATCH(pRmCliRes, pGpuProbedIds) +#define cliresCtrlCmdGpuAttachIds(pRmCliRes, pGpuAttachIds) cliresCtrlCmdGpuAttachIds_DISPATCH(pRmCliRes, pGpuAttachIds) +#define cliresCtrlCmdGpuDetachIds(pRmCliRes, pGpuDetachIds) cliresCtrlCmdGpuDetachIds_DISPATCH(pRmCliRes, pGpuDetachIds) +#define cliresCtrlCmdGpuGetSvmSize(pRmCliRes, pSvmSizeGetParams) cliresCtrlCmdGpuGetSvmSize_DISPATCH(pRmCliRes, pSvmSizeGetParams) +#define cliresCtrlCmdGpuGetPciInfo(pRmCliRes, pPciInfoParams) cliresCtrlCmdGpuGetPciInfo_DISPATCH(pRmCliRes, pPciInfoParams) +#define cliresCtrlCmdGpuGetUuidInfo(pRmCliRes, pParams) cliresCtrlCmdGpuGetUuidInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuGetUuidFromGpuId(pRmCliRes, pParams) cliresCtrlCmdGpuGetUuidFromGpuId_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuModifyGpuDrainState(pRmCliRes, pParams) cliresCtrlCmdGpuModifyGpuDrainState_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuQueryGpuDrainState(pRmCliRes, pParams) cliresCtrlCmdGpuQueryGpuDrainState_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuGetMemOpEnable(pRmCliRes, pMemOpEnableParams) cliresCtrlCmdGpuGetMemOpEnable_DISPATCH(pRmCliRes, pMemOpEnableParams) +#define cliresCtrlCmdGpuDisableNvlinkInit(pRmCliRes, pParams) cliresCtrlCmdGpuDisableNvlinkInit_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdLegacyConfig(pRmCliRes, pParams) cliresCtrlCmdLegacyConfig_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdIdleChannels(pRmCliRes, pParams) cliresCtrlCmdIdleChannels_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGsyncGetAttachedIds(pRmCliRes, pGsyncAttachedIds) cliresCtrlCmdGsyncGetAttachedIds_DISPATCH(pRmCliRes, pGsyncAttachedIds) +#define cliresCtrlCmdGsyncGetIdInfo(pRmCliRes, pGsyncIdInfoParams) cliresCtrlCmdGsyncGetIdInfo_DISPATCH(pRmCliRes, pGsyncIdInfoParams) +#define cliresCtrlCmdEventSetNotification(pRmCliRes, pEventSetNotificationParams) cliresCtrlCmdEventSetNotification_DISPATCH(pRmCliRes, pEventSetNotificationParams) +#define cliresCtrlCmdEventGetSystemEventStatus(pRmCliRes, pSystemEventStatusParams) cliresCtrlCmdEventGetSystemEventStatus_DISPATCH(pRmCliRes, pSystemEventStatusParams) +#define cliresCtrlCmdOsUnixExportObjectToFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixExportObjectToFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixImportObjectFromFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixImportObjectFromFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixGetExportObjectInfo(pRmCliRes, pParams) cliresCtrlCmdOsUnixGetExportObjectInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixCreateExportObjectFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixCreateExportObjectFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixExportObjectsToFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixExportObjectsToFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixImportObjectsFromFd(pRmCliRes, pParams) cliresCtrlCmdOsUnixImportObjectsFromFd_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdOsUnixFlushUserCache(pRmCliRes, pAddressSpaceParams) cliresCtrlCmdOsUnixFlushUserCache_DISPATCH(pRmCliRes, pAddressSpaceParams) +#define cliresCtrlCmdGpuAcctSetAccountingState(pRmCliRes, pParams) cliresCtrlCmdGpuAcctSetAccountingState_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuAcctGetAccountingState(pRmCliRes, pParams) cliresCtrlCmdGpuAcctGetAccountingState_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdGpuAcctGetProcAccountingInfo(pRmCliRes, pAcctInfoParams) cliresCtrlCmdGpuAcctGetProcAccountingInfo_DISPATCH(pRmCliRes, pAcctInfoParams) +#define cliresCtrlCmdGpuAcctGetAccountingPids(pRmCliRes, pAcctPidsParams) cliresCtrlCmdGpuAcctGetAccountingPids_DISPATCH(pRmCliRes, pAcctPidsParams) +#define cliresCtrlCmdGpuAcctClearAccountingData(pRmCliRes, pParams) cliresCtrlCmdGpuAcctClearAccountingData_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSetSubProcessID(pRmCliRes, pParams) cliresCtrlCmdSetSubProcessID_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdDisableSubProcessUserdIsolation(pRmCliRes, pParams) cliresCtrlCmdDisableSubProcessUserdIsolation_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSyncGpuBoostInfo(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSyncGpuBoostGroupCreate(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupCreate_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSyncGpuBoostGroupDestroy(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupDestroy_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSyncGpuBoostGroupInfo(pRmCliRes, pParams) cliresCtrlCmdSyncGpuBoostGroupInfo_DISPATCH(pRmCliRes, pParams) +#define cliresCtrlCmdSystemSyncExternalFabricMgmt(pRmCliRes, pExtFabricMgmtParams) cliresCtrlCmdSystemSyncExternalFabricMgmt_DISPATCH(pRmCliRes, pExtFabricMgmtParams) +#define cliresControl(pResource, pCallContext, pParams) cliresControl_DISPATCH(pResource, pCallContext, pParams) +#define cliresUnmap(pResource, pCallContext, pCpuMapping) cliresUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define cliresMapTo(pResource, pParams) cliresMapTo_DISPATCH(pResource, pParams) +#define cliresSetNotificationShare(pNotifier, pNotifShare) cliresSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define cliresControlFilter(pResource, pCallContext, pParams) cliresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define cliresAddAdditionalDependants(pClient, pResource, pReference) cliresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define cliresGetRefCount(pResource) cliresGetRefCount_DISPATCH(pResource) +#define cliresUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) cliresUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define cliresCanCopy(pResource) cliresCanCopy_DISPATCH(pResource) +#define cliresControl_Prologue(pResource, pCallContext, pParams) cliresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define cliresPreDestruct(pResource) cliresPreDestruct_DISPATCH(pResource) +#define cliresUnmapFrom(pResource, pParams) cliresUnmapFrom_DISPATCH(pResource, pParams) +#define cliresGetNotificationListPtr(pNotifier) cliresGetNotificationListPtr_DISPATCH(pNotifier) +#define cliresControl_Epilogue(pResource, pCallContext, pParams) cliresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define cliresGetNotificationShare(pNotifier) cliresGetNotificationShare_DISPATCH(pNotifier) +#define cliresControlLookup(pResource, pParams, ppEntry) cliresControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define cliresMap(pResource, pCallContext, pParams, pCpuMapping) cliresMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define cliresGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) cliresGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NvBool cliresAccessCallback_IMPL(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + +static inline NvBool cliresAccessCallback_DISPATCH(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pRmCliRes->__cliresAccessCallback__(pRmCliRes, pInvokingClient, pAllocParams, accessRight); +} + +NvBool cliresShareCallback_IMPL(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +static inline NvBool cliresShareCallback_DISPATCH(struct RmClientResource *pRmCliRes, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pRmCliRes->__cliresShareCallback__(pRmCliRes, pInvokingClient, pParentRef, pSharePolicy); +} + +NV_STATUS cliresCtrlCmdSystemGetCpuInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetCpuInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetCpuInfo__(pRmCliRes, pCpuInfoParams); +} + +NV_STATUS cliresCtrlCmdSystemGetFeatures_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetFeatures_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetFeatures__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetBuildVersionV2_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetBuildVersionV2_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetBuildVersionV2__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetChipsetInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS *pChipsetInfo); + +static inline NV_STATUS cliresCtrlCmdSystemGetChipsetInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS *pChipsetInfo) { + return pRmCliRes->__cliresCtrlCmdSystemGetChipsetInfo__(pRmCliRes, pChipsetInfo); +} + +NV_STATUS cliresCtrlCmdSystemSetMemorySize_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemSetMemorySize_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemSetMemorySize__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetClassList_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetClassList_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetClassList__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemNotifyEvent_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemNotifyEvent_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemNotifyEvent__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetPlatformType_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS *pSysParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetPlatformType_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS *pSysParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetPlatformType__(pRmCliRes, pSysParams); +} + +NV_STATUS cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemDebugCtrlRmMsg_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemDebugCtrlRmMsg__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetHwbcInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetHwbcInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetHwbcInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetP2pCaps_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS *pP2PParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetP2pCaps_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS *pP2PParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetP2pCaps__(pRmCliRes, pP2PParams); +} + +NV_STATUS cliresCtrlCmdSystemGetP2pCapsV2_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS *pP2PParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetP2pCapsV2_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS *pP2PParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetP2pCapsV2__(pRmCliRes, pP2PParams); +} + +NV_STATUS cliresCtrlCmdSystemGetP2pCapsMatrix_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS *pP2PParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetP2pCapsMatrix_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS *pP2PParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetP2pCapsMatrix__(pRmCliRes, pP2PParams); +} + +NV_STATUS cliresCtrlCmdSystemGetPerfSensorCounters_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetPerfSensorCounters_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetPerfSensorCounters__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetExtendedPerfSensorCounters_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetExtendedPerfSensorCounters_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetExtendedPerfSensorCounters__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetGpusPowerStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS *pGpusPowerStatus); + +static inline NV_STATUS cliresCtrlCmdSystemGetGpusPowerStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS *pGpusPowerStatus) { + return pRmCliRes->__cliresCtrlCmdSystemGetGpusPowerStatus__(pRmCliRes, pGpusPowerStatus); +} + +NV_STATUS cliresCtrlCmdSystemGetPrivilegedStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetPrivilegedStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetPrivilegedStatus__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetFabricStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetFabricStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetFabricStatus__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemGetRmInstanceId_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetRmInstanceId_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetRmInstanceId__(pRmCliRes, pRmInstanceIdParams); +} + +NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSystemGetClientDatabaseInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdNvdGetDumpSize_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS *pDumpSizeParams); + +static inline NV_STATUS cliresCtrlCmdNvdGetDumpSize_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS *pDumpSizeParams) { + return pRmCliRes->__cliresCtrlCmdNvdGetDumpSize__(pRmCliRes, pDumpSizeParams); +} + +NV_STATUS cliresCtrlCmdNvdGetDump_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_DUMP_PARAMS *pDumpParams); + +static inline NV_STATUS cliresCtrlCmdNvdGetDump_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_DUMP_PARAMS *pDumpParams) { + return pRmCliRes->__cliresCtrlCmdNvdGetDump__(pRmCliRes, pDumpParams); +} + +NV_STATUS cliresCtrlCmdNvdGetTimestamp_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS *pTimestampParams); + +static inline NV_STATUS cliresCtrlCmdNvdGetTimestamp_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS *pTimestampParams) { + return pRmCliRes->__cliresCtrlCmdNvdGetTimestamp__(pRmCliRes, pTimestampParams); +} + +NV_STATUS cliresCtrlCmdNvdGetNvlogInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdNvdGetNvlogInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdNvdGetNvlogInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdNvdGetNvlogBufferInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdNvdGetNvlogBufferInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdNvdGetNvlogBufferInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdNvdGetNvlog_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_NVLOG_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdNvdGetNvlog_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_NVD_GET_NVLOG_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdNvdGetNvlog__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdNvdGetRcerrRpt_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS *pReportParams); + +static inline NV_STATUS cliresCtrlCmdNvdGetRcerrRpt_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS *pReportParams) { + return pRmCliRes->__cliresCtrlCmdNvdGetRcerrRpt__(pRmCliRes, pReportParams); +} + +NV_STATUS cliresCtrlCmdClientGetAddrSpaceType_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientGetAddrSpaceType_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetAddrSpaceType__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientGetHandleInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientGetHandleInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetHandleInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientGetAccessRights_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientGetAccessRights_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetAccessRights__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientSetInheritedSharePolicy_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientSetInheritedSharePolicy_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientSetInheritedSharePolicy__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientShareObject_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientShareObject_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientShareObject__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdClientGetChildHandle_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdClientGetChildHandle_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdClientGetChildHandle__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuGetAttachedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds); + +static inline NV_STATUS cliresCtrlCmdGpuGetAttachedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds) { + return pRmCliRes->__cliresCtrlCmdGpuGetAttachedIds__(pRmCliRes, pGpuAttachedIds); +} + +NV_STATUS cliresCtrlCmdGpuGetIdInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetIdInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetIdInfo__(pRmCliRes, pGpuIdInfoParams); +} + +NV_STATUS cliresCtrlCmdGpuGetIdInfoV2_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetIdInfoV2_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetIdInfoV2__(pRmCliRes, pGpuIdInfoParams); +} + +NV_STATUS cliresCtrlCmdGpuGetInitStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetInitStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetInitStatus__(pRmCliRes, pGpuInitStatusParams); +} + +NV_STATUS cliresCtrlCmdGpuGetDeviceIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetDeviceIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetDeviceIds__(pRmCliRes, pDeviceIdsParams); +} + +NV_STATUS cliresCtrlCmdGpuGetProbedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds); + +static inline NV_STATUS cliresCtrlCmdGpuGetProbedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds) { + return pRmCliRes->__cliresCtrlCmdGpuGetProbedIds__(pRmCliRes, pGpuProbedIds); +} + +NV_STATUS cliresCtrlCmdGpuAttachIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds); + +static inline NV_STATUS cliresCtrlCmdGpuAttachIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds) { + return pRmCliRes->__cliresCtrlCmdGpuAttachIds__(pRmCliRes, pGpuAttachIds); +} + +NV_STATUS cliresCtrlCmdGpuDetachIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds); + +static inline NV_STATUS cliresCtrlCmdGpuDetachIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds) { + return pRmCliRes->__cliresCtrlCmdGpuDetachIds__(pRmCliRes, pGpuDetachIds); +} + +NV_STATUS cliresCtrlCmdGpuGetSvmSize_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *pSvmSizeGetParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetSvmSize_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *pSvmSizeGetParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetSvmSize__(pRmCliRes, pSvmSizeGetParams); +} + +NV_STATUS cliresCtrlCmdGpuGetPciInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetPciInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetPciInfo__(pRmCliRes, pPciInfoParams); +} + +NV_STATUS cliresCtrlCmdGpuGetUuidInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetUuidInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetUuidInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuGetUuidFromGpuId_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetUuidFromGpuId_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetUuidFromGpuId__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuModifyGpuDrainState_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuModifyGpuDrainState_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuModifyGpuDrainState__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuQueryGpuDrainState_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuQueryGpuDrainState_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuQueryGpuDrainState__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuGetMemOpEnable_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams); + +static inline NV_STATUS cliresCtrlCmdGpuGetMemOpEnable_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams) { + return pRmCliRes->__cliresCtrlCmdGpuGetMemOpEnable__(pRmCliRes, pMemOpEnableParams); +} + +NV_STATUS cliresCtrlCmdGpuDisableNvlinkInit_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuDisableNvlinkInit_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuDisableNvlinkInit__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdLegacyConfig_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdLegacyConfig_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdLegacyConfig__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdIdleChannels_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdIdleChannels_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdIdleChannels__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGsyncGetAttachedIds_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds); + +static inline NV_STATUS cliresCtrlCmdGsyncGetAttachedIds_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds) { + return pRmCliRes->__cliresCtrlCmdGsyncGetAttachedIds__(pRmCliRes, pGsyncAttachedIds); +} + +NV_STATUS cliresCtrlCmdGsyncGetIdInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams); + +static inline NV_STATUS cliresCtrlCmdGsyncGetIdInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams) { + return pRmCliRes->__cliresCtrlCmdGsyncGetIdInfo__(pRmCliRes, pGsyncIdInfoParams); +} + +NV_STATUS cliresCtrlCmdEventSetNotification_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams); + +static inline NV_STATUS cliresCtrlCmdEventSetNotification_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams) { + return pRmCliRes->__cliresCtrlCmdEventSetNotification__(pRmCliRes, pEventSetNotificationParams); +} + +NV_STATUS cliresCtrlCmdEventGetSystemEventStatus_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *pSystemEventStatusParams); + +static inline NV_STATUS cliresCtrlCmdEventGetSystemEventStatus_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *pSystemEventStatusParams) { + return pRmCliRes->__cliresCtrlCmdEventGetSystemEventStatus__(pRmCliRes, pSystemEventStatusParams); +} + +NV_STATUS cliresCtrlCmdOsUnixExportObjectToFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixExportObjectToFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixExportObjectToFd__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixImportObjectFromFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixImportObjectFromFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECT_FROM_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixImportObjectFromFd__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixGetExportObjectInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixGetExportObjectInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_GET_EXPORT_OBJECT_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixGetExportObjectInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixCreateExportObjectFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixCreateExportObjectFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_CREATE_EXPORT_OBJECT_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixCreateExportObjectFd__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixExportObjectsToFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixExportObjectsToFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_EXPORT_OBJECTS_TO_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixExportObjectsToFd__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixImportObjectsFromFd_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixImportObjectsFromFd_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_IMPORT_OBJECTS_FROM_FD_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixImportObjectsFromFd__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdOsUnixFlushUserCache_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams); + +static inline NV_STATUS cliresCtrlCmdOsUnixFlushUserCache_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_OS_UNIX_FLUSH_USER_CACHE_PARAMS *pAddressSpaceParams) { + return pRmCliRes->__cliresCtrlCmdOsUnixFlushUserCache__(pRmCliRes, pAddressSpaceParams); +} + +NV_STATUS cliresCtrlCmdGpuAcctSetAccountingState_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuAcctSetAccountingState_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuAcctSetAccountingState__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuAcctGetAccountingState_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuAcctGetAccountingState_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuAcctGetAccountingState__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdGpuAcctGetProcAccountingInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS *pAcctInfoParams); + +static inline NV_STATUS cliresCtrlCmdGpuAcctGetProcAccountingInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS *pAcctInfoParams) { + return pRmCliRes->__cliresCtrlCmdGpuAcctGetProcAccountingInfo__(pRmCliRes, pAcctInfoParams); +} + +NV_STATUS cliresCtrlCmdGpuAcctGetAccountingPids_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS *pAcctPidsParams); + +static inline NV_STATUS cliresCtrlCmdGpuAcctGetAccountingPids_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS *pAcctPidsParams) { + return pRmCliRes->__cliresCtrlCmdGpuAcctGetAccountingPids__(pRmCliRes, pAcctPidsParams); +} + +NV_STATUS cliresCtrlCmdGpuAcctClearAccountingData_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdGpuAcctClearAccountingData_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdGpuAcctClearAccountingData__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSetSubProcessID_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSetSubProcessID_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSetSubProcessID__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdDisableSubProcessUserdIsolation_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdDisableSubProcessUserdIsolation__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSyncGpuBoostInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_SYNC_GPU_BOOST_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSyncGpuBoostInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_SYNC_GPU_BOOST_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSyncGpuBoostInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSyncGpuBoostGroupCreate_IMPL(struct RmClientResource *pRmCliRes, NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSyncGpuBoostGroupCreate_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSyncGpuBoostGroupCreate__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSyncGpuBoostGroupDestroy_IMPL(struct RmClientResource *pRmCliRes, NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSyncGpuBoostGroupDestroy_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSyncGpuBoostGroupDestroy__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSyncGpuBoostGroupInfo_IMPL(struct RmClientResource *pRmCliRes, NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS *pParams); + +static inline NV_STATUS cliresCtrlCmdSyncGpuBoostGroupInfo_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS *pParams) { + return pRmCliRes->__cliresCtrlCmdSyncGpuBoostGroupInfo__(pRmCliRes, pParams); +} + +NV_STATUS cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams); + +static inline NV_STATUS cliresCtrlCmdSystemSyncExternalFabricMgmt_DISPATCH(struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams) { + return pRmCliRes->__cliresCtrlCmdSystemSyncExternalFabricMgmt__(pRmCliRes, pExtFabricMgmtParams); +} + +static inline NV_STATUS cliresControl_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__cliresControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS cliresUnmap_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__cliresUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS cliresMapTo_DISPATCH(struct RmClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__cliresMapTo__(pResource, pParams); +} + +static inline void cliresSetNotificationShare_DISPATCH(struct RmClientResource *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__cliresSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS cliresControlFilter_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__cliresControlFilter__(pResource, pCallContext, pParams); +} + +static inline void cliresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RmClientResource *pResource, RsResourceRef *pReference) { + pResource->__cliresAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 cliresGetRefCount_DISPATCH(struct RmClientResource *pResource) { + return pResource->__cliresGetRefCount__(pResource); +} + +static inline NV_STATUS cliresUnregisterEvent_DISPATCH(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__cliresUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool cliresCanCopy_DISPATCH(struct RmClientResource *pResource) { + return pResource->__cliresCanCopy__(pResource); +} + +static inline NV_STATUS cliresControl_Prologue_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__cliresControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void cliresPreDestruct_DISPATCH(struct RmClientResource *pResource) { + pResource->__cliresPreDestruct__(pResource); +} + +static inline NV_STATUS cliresUnmapFrom_DISPATCH(struct RmClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__cliresUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *cliresGetNotificationListPtr_DISPATCH(struct RmClientResource *pNotifier) { + return pNotifier->__cliresGetNotificationListPtr__(pNotifier); +} + +static inline void cliresControl_Epilogue_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__cliresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline struct NotifShare *cliresGetNotificationShare_DISPATCH(struct RmClientResource *pNotifier) { + return pNotifier->__cliresGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS cliresControlLookup_DISPATCH(struct RmClientResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__cliresControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS cliresMap_DISPATCH(struct RmClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__cliresMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS cliresGetOrAllocNotifShare_DISPATCH(struct RmClientResource *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__cliresGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS cliresConstruct_IMPL(struct RmClientResource *arg_pRmCliRes, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_cliresConstruct(arg_pRmCliRes, arg_pCallContext, arg_pParams) cliresConstruct_IMPL(arg_pRmCliRes, arg_pCallContext, arg_pParams) +void cliresDestruct_IMPL(struct RmClientResource *pRmCliRes); +#define __nvoc_cliresDestruct(pRmCliRes) cliresDestruct_IMPL(pRmCliRes) +#undef PRIVATE_FIELD + + +NV_STATUS CliGetSystemP2pCaps(NvU32 *gpuIds, + NvU32 gpuCount, + NvU32 *p2pCaps, + NvU32 *p2pOptimalReadCEs, + NvU32 *p2pOptimalWriteCEs, + NvU8 *p2pCapsStatus, + NvU32 *pBusPeerIds); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CLIENT_RESOURCE_NVOC_H_ diff --git a/src/nvidia/generated/g_compute_instance_subscription_nvoc.c b/src/nvidia/generated/g_compute_instance_subscription_nvoc.c new file mode 100644 index 000000000..8bee30fe8 --- /dev/null +++ b/src/nvidia/generated/g_compute_instance_subscription_nvoc.c @@ -0,0 +1,358 @@ +#define NVOC_COMPUTE_INSTANCE_SUBSCRIPTION_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_compute_instance_subscription_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xd1f238 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ComputeInstanceSubscription; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_ComputeInstanceSubscription(ComputeInstanceSubscription*); +void __nvoc_init_funcTable_ComputeInstanceSubscription(ComputeInstanceSubscription*); +NV_STATUS __nvoc_ctor_ComputeInstanceSubscription(ComputeInstanceSubscription*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_ComputeInstanceSubscription(ComputeInstanceSubscription*); +void __nvoc_dtor_ComputeInstanceSubscription(ComputeInstanceSubscription*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ComputeInstanceSubscription; + +static const struct NVOC_RTTI __nvoc_rtti_ComputeInstanceSubscription_ComputeInstanceSubscription = { + /*pClassDef=*/ &__nvoc_class_def_ComputeInstanceSubscription, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ComputeInstanceSubscription, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_ComputeInstanceSubscription_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ComputeInstanceSubscription, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ComputeInstanceSubscription_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ComputeInstanceSubscription, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ComputeInstanceSubscription_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ComputeInstanceSubscription, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ComputeInstanceSubscription_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ComputeInstanceSubscription, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ComputeInstanceSubscription_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ComputeInstanceSubscription, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_ComputeInstanceSubscription = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_ComputeInstanceSubscription_ComputeInstanceSubscription, + &__nvoc_rtti_ComputeInstanceSubscription_GpuResource, + &__nvoc_rtti_ComputeInstanceSubscription_RmResource, + &__nvoc_rtti_ComputeInstanceSubscription_RmResourceCommon, + &__nvoc_rtti_ComputeInstanceSubscription_RsResource, + &__nvoc_rtti_ComputeInstanceSubscription_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_ComputeInstanceSubscription = +{ + /*classInfo=*/ { + /*size=*/ sizeof(ComputeInstanceSubscription), + /*classId=*/ classId(ComputeInstanceSubscription), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "ComputeInstanceSubscription", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ComputeInstanceSubscription, + /*pCastInfo=*/ &__nvoc_castinfo_ComputeInstanceSubscription, + /*pExportInfo=*/ &__nvoc_export_info_ComputeInstanceSubscription +}; + +static NvBool __nvoc_thunk_ComputeInstanceSubscription_resCanCopy(struct RsResource *arg0) { + return cisubscriptionCanCopy((struct ComputeInstanceSubscription *)(((unsigned char *)arg0) - __nvoc_rtti_ComputeInstanceSubscription_RsResource.offset)); +} + +static NvBool __nvoc_thunk_GpuResource_cisubscriptionShareCallback(struct ComputeInstanceSubscription *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ComputeInstanceSubscription_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_cisubscriptionControl(struct ComputeInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ComputeInstanceSubscription_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_cisubscriptionUnmap(struct ComputeInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ComputeInstanceSubscription_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_cisubscriptionGetMemInterMapParams(struct ComputeInstanceSubscription *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ComputeInstanceSubscription_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_cisubscriptionGetMemoryMappingDescriptor(struct ComputeInstanceSubscription *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ComputeInstanceSubscription_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_cisubscriptionGetMapAddrSpace(struct ComputeInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ComputeInstanceSubscription_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_cisubscriptionGetInternalObjectHandle(struct ComputeInstanceSubscription *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ComputeInstanceSubscription_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_cisubscriptionControlFilter(struct ComputeInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ComputeInstanceSubscription_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_cisubscriptionAddAdditionalDependants(struct RsClient *pClient, struct ComputeInstanceSubscription *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ComputeInstanceSubscription_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_cisubscriptionGetRefCount(struct ComputeInstanceSubscription *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ComputeInstanceSubscription_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_cisubscriptionCheckMemInterUnmap(struct ComputeInstanceSubscription *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ComputeInstanceSubscription_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_cisubscriptionMapTo(struct ComputeInstanceSubscription *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ComputeInstanceSubscription_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_cisubscriptionControl_Prologue(struct ComputeInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ComputeInstanceSubscription_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_cisubscriptionGetRegBaseOffsetAndSize(struct ComputeInstanceSubscription *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ComputeInstanceSubscription_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_cisubscriptionInternalControlForward(struct ComputeInstanceSubscription *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ComputeInstanceSubscription_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_cisubscriptionPreDestruct(struct ComputeInstanceSubscription *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ComputeInstanceSubscription_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_cisubscriptionUnmapFrom(struct ComputeInstanceSubscription *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ComputeInstanceSubscription_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_cisubscriptionControl_Epilogue(struct ComputeInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ComputeInstanceSubscription_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_cisubscriptionControlLookup(struct ComputeInstanceSubscription *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ComputeInstanceSubscription_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_cisubscriptionMap(struct ComputeInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ComputeInstanceSubscription_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_cisubscriptionAccessCallback(struct ComputeInstanceSubscription *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ComputeInstanceSubscription_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ComputeInstanceSubscription[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) cisubscriptionCtrlCmdGetUuid_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc6380101u, + /*paramSize=*/ sizeof(NVC638_CTRL_GET_UUID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ComputeInstanceSubscription.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "cisubscriptionCtrlCmdGetUuid" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_ComputeInstanceSubscription = +{ + /*numEntries=*/ 1, + /*pExportEntries=*/ __nvoc_exported_method_def_ComputeInstanceSubscription +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_ComputeInstanceSubscription(ComputeInstanceSubscription *pThis) { + __nvoc_cisubscriptionDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_ComputeInstanceSubscription(ComputeInstanceSubscription *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_ComputeInstanceSubscription(ComputeInstanceSubscription *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ComputeInstanceSubscription_fail_GpuResource; + __nvoc_init_dataField_ComputeInstanceSubscription(pThis); + + status = __nvoc_cisubscriptionConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ComputeInstanceSubscription_fail__init; + goto __nvoc_ctor_ComputeInstanceSubscription_exit; // Success + +__nvoc_ctor_ComputeInstanceSubscription_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_ComputeInstanceSubscription_fail_GpuResource: +__nvoc_ctor_ComputeInstanceSubscription_exit: + + return status; +} + +static void __nvoc_init_funcTable_ComputeInstanceSubscription_1(ComputeInstanceSubscription *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__cisubscriptionCanCopy__ = &cisubscriptionCanCopy_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__cisubscriptionCtrlCmdGetUuid__ = &cisubscriptionCtrlCmdGetUuid_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_ComputeInstanceSubscription_resCanCopy; + + pThis->__cisubscriptionShareCallback__ = &__nvoc_thunk_GpuResource_cisubscriptionShareCallback; + + pThis->__cisubscriptionControl__ = &__nvoc_thunk_GpuResource_cisubscriptionControl; + + pThis->__cisubscriptionUnmap__ = &__nvoc_thunk_GpuResource_cisubscriptionUnmap; + + pThis->__cisubscriptionGetMemInterMapParams__ = &__nvoc_thunk_RmResource_cisubscriptionGetMemInterMapParams; + + pThis->__cisubscriptionGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_cisubscriptionGetMemoryMappingDescriptor; + + pThis->__cisubscriptionGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_cisubscriptionGetMapAddrSpace; + + pThis->__cisubscriptionGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_cisubscriptionGetInternalObjectHandle; + + pThis->__cisubscriptionControlFilter__ = &__nvoc_thunk_RsResource_cisubscriptionControlFilter; + + pThis->__cisubscriptionAddAdditionalDependants__ = &__nvoc_thunk_RsResource_cisubscriptionAddAdditionalDependants; + + pThis->__cisubscriptionGetRefCount__ = &__nvoc_thunk_RsResource_cisubscriptionGetRefCount; + + pThis->__cisubscriptionCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_cisubscriptionCheckMemInterUnmap; + + pThis->__cisubscriptionMapTo__ = &__nvoc_thunk_RsResource_cisubscriptionMapTo; + + pThis->__cisubscriptionControl_Prologue__ = &__nvoc_thunk_RmResource_cisubscriptionControl_Prologue; + + pThis->__cisubscriptionGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_cisubscriptionGetRegBaseOffsetAndSize; + + pThis->__cisubscriptionInternalControlForward__ = &__nvoc_thunk_GpuResource_cisubscriptionInternalControlForward; + + pThis->__cisubscriptionPreDestruct__ = &__nvoc_thunk_RsResource_cisubscriptionPreDestruct; + + pThis->__cisubscriptionUnmapFrom__ = &__nvoc_thunk_RsResource_cisubscriptionUnmapFrom; + + pThis->__cisubscriptionControl_Epilogue__ = &__nvoc_thunk_RmResource_cisubscriptionControl_Epilogue; + + pThis->__cisubscriptionControlLookup__ = &__nvoc_thunk_RsResource_cisubscriptionControlLookup; + + pThis->__cisubscriptionMap__ = &__nvoc_thunk_GpuResource_cisubscriptionMap; + + pThis->__cisubscriptionAccessCallback__ = &__nvoc_thunk_RmResource_cisubscriptionAccessCallback; +} + +void __nvoc_init_funcTable_ComputeInstanceSubscription(ComputeInstanceSubscription *pThis) { + __nvoc_init_funcTable_ComputeInstanceSubscription_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_ComputeInstanceSubscription(ComputeInstanceSubscription *pThis) { + pThis->__nvoc_pbase_ComputeInstanceSubscription = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_ComputeInstanceSubscription(pThis); +} + +NV_STATUS __nvoc_objCreate_ComputeInstanceSubscription(ComputeInstanceSubscription **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + ComputeInstanceSubscription *pThis; + + pThis = portMemAllocNonPaged(sizeof(ComputeInstanceSubscription)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(ComputeInstanceSubscription)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ComputeInstanceSubscription); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_ComputeInstanceSubscription(pThis); + status = __nvoc_ctor_ComputeInstanceSubscription(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_ComputeInstanceSubscription_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_ComputeInstanceSubscription_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_ComputeInstanceSubscription(ComputeInstanceSubscription **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_ComputeInstanceSubscription(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_compute_instance_subscription_nvoc.h b/src/nvidia/generated/g_compute_instance_subscription_nvoc.h new file mode 100644 index 000000000..d81d4e390 --- /dev/null +++ b/src/nvidia/generated/g_compute_instance_subscription_nvoc.h @@ -0,0 +1,266 @@ +#ifndef _G_COMPUTE_INSTANCE_SUBSCRIPTION_NVOC_H_ +#define _G_COMPUTE_INSTANCE_SUBSCRIPTION_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains the functions managing MIG compute instance subscriptions + * + *****************************************************************************/ + +#include "g_compute_instance_subscription_nvoc.h" + +#ifndef COMPUTE_INSTANCE_SUBSCRIPTION_H +#define COMPUTE_INSTANCE_SUBSCRIPTION_H + +#include "class/clc638.h" +#include "ctrl/ctrlc638.h" +#include "gpu/gpu_resource.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +#ifdef NVOC_COMPUTE_INSTANCE_SUBSCRIPTION_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct ComputeInstanceSubscription { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct ComputeInstanceSubscription *__nvoc_pbase_ComputeInstanceSubscription; + NvBool (*__cisubscriptionCanCopy__)(struct ComputeInstanceSubscription *); + NV_STATUS (*__cisubscriptionCtrlCmdGetUuid__)(struct ComputeInstanceSubscription *, NVC638_CTRL_GET_UUID_PARAMS *); + NvBool (*__cisubscriptionShareCallback__)(struct ComputeInstanceSubscription *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__cisubscriptionControl__)(struct ComputeInstanceSubscription *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__cisubscriptionUnmap__)(struct ComputeInstanceSubscription *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__cisubscriptionGetMemInterMapParams__)(struct ComputeInstanceSubscription *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__cisubscriptionGetMemoryMappingDescriptor__)(struct ComputeInstanceSubscription *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__cisubscriptionGetMapAddrSpace__)(struct ComputeInstanceSubscription *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__cisubscriptionGetInternalObjectHandle__)(struct ComputeInstanceSubscription *); + NV_STATUS (*__cisubscriptionControlFilter__)(struct ComputeInstanceSubscription *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__cisubscriptionAddAdditionalDependants__)(struct RsClient *, struct ComputeInstanceSubscription *, RsResourceRef *); + NvU32 (*__cisubscriptionGetRefCount__)(struct ComputeInstanceSubscription *); + NV_STATUS (*__cisubscriptionCheckMemInterUnmap__)(struct ComputeInstanceSubscription *, NvBool); + NV_STATUS (*__cisubscriptionMapTo__)(struct ComputeInstanceSubscription *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__cisubscriptionControl_Prologue__)(struct ComputeInstanceSubscription *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__cisubscriptionGetRegBaseOffsetAndSize__)(struct ComputeInstanceSubscription *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__cisubscriptionInternalControlForward__)(struct ComputeInstanceSubscription *, NvU32, void *, NvU32); + void (*__cisubscriptionPreDestruct__)(struct ComputeInstanceSubscription *); + NV_STATUS (*__cisubscriptionUnmapFrom__)(struct ComputeInstanceSubscription *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__cisubscriptionControl_Epilogue__)(struct ComputeInstanceSubscription *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__cisubscriptionControlLookup__)(struct ComputeInstanceSubscription *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__cisubscriptionMap__)(struct ComputeInstanceSubscription *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__cisubscriptionAccessCallback__)(struct ComputeInstanceSubscription *, struct RsClient *, void *, RsAccessRight); + MIG_COMPUTE_INSTANCE *pMIGComputeInstance; + NvU64 dupedCapDescriptor; +}; + +#ifndef __NVOC_CLASS_ComputeInstanceSubscription_TYPEDEF__ +#define __NVOC_CLASS_ComputeInstanceSubscription_TYPEDEF__ +typedef struct ComputeInstanceSubscription ComputeInstanceSubscription; +#endif /* __NVOC_CLASS_ComputeInstanceSubscription_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ComputeInstanceSubscription +#define __nvoc_class_id_ComputeInstanceSubscription 0xd1f238 +#endif /* __nvoc_class_id_ComputeInstanceSubscription */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ComputeInstanceSubscription; + +#define __staticCast_ComputeInstanceSubscription(pThis) \ + ((pThis)->__nvoc_pbase_ComputeInstanceSubscription) + +#ifdef __nvoc_compute_instance_subscription_h_disabled +#define __dynamicCast_ComputeInstanceSubscription(pThis) ((ComputeInstanceSubscription*)NULL) +#else //__nvoc_compute_instance_subscription_h_disabled +#define __dynamicCast_ComputeInstanceSubscription(pThis) \ + ((ComputeInstanceSubscription*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ComputeInstanceSubscription))) +#endif //__nvoc_compute_instance_subscription_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_ComputeInstanceSubscription(ComputeInstanceSubscription**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_ComputeInstanceSubscription(ComputeInstanceSubscription**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_ComputeInstanceSubscription(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_ComputeInstanceSubscription((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define cisubscriptionCanCopy(arg0) cisubscriptionCanCopy_DISPATCH(arg0) +#define cisubscriptionCtrlCmdGetUuid(arg0, arg1) cisubscriptionCtrlCmdGetUuid_DISPATCH(arg0, arg1) +#define cisubscriptionShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) cisubscriptionShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define cisubscriptionControl(pGpuResource, pCallContext, pParams) cisubscriptionControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define cisubscriptionUnmap(pGpuResource, pCallContext, pCpuMapping) cisubscriptionUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define cisubscriptionGetMemInterMapParams(pRmResource, pParams) cisubscriptionGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define cisubscriptionGetMemoryMappingDescriptor(pRmResource, ppMemDesc) cisubscriptionGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define cisubscriptionGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) cisubscriptionGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define cisubscriptionGetInternalObjectHandle(pGpuResource) cisubscriptionGetInternalObjectHandle_DISPATCH(pGpuResource) +#define cisubscriptionControlFilter(pResource, pCallContext, pParams) cisubscriptionControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define cisubscriptionAddAdditionalDependants(pClient, pResource, pReference) cisubscriptionAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define cisubscriptionGetRefCount(pResource) cisubscriptionGetRefCount_DISPATCH(pResource) +#define cisubscriptionCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) cisubscriptionCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define cisubscriptionMapTo(pResource, pParams) cisubscriptionMapTo_DISPATCH(pResource, pParams) +#define cisubscriptionControl_Prologue(pResource, pCallContext, pParams) cisubscriptionControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define cisubscriptionGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) cisubscriptionGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define cisubscriptionInternalControlForward(pGpuResource, command, pParams, size) cisubscriptionInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define cisubscriptionPreDestruct(pResource) cisubscriptionPreDestruct_DISPATCH(pResource) +#define cisubscriptionUnmapFrom(pResource, pParams) cisubscriptionUnmapFrom_DISPATCH(pResource, pParams) +#define cisubscriptionControl_Epilogue(pResource, pCallContext, pParams) cisubscriptionControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define cisubscriptionControlLookup(pResource, pParams, ppEntry) cisubscriptionControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define cisubscriptionMap(pGpuResource, pCallContext, pParams, pCpuMapping) cisubscriptionMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define cisubscriptionAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) cisubscriptionAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool cisubscriptionCanCopy_IMPL(struct ComputeInstanceSubscription *arg0); + +static inline NvBool cisubscriptionCanCopy_DISPATCH(struct ComputeInstanceSubscription *arg0) { + return arg0->__cisubscriptionCanCopy__(arg0); +} + +NV_STATUS cisubscriptionCtrlCmdGetUuid_IMPL(struct ComputeInstanceSubscription *arg0, NVC638_CTRL_GET_UUID_PARAMS *arg1); + +static inline NV_STATUS cisubscriptionCtrlCmdGetUuid_DISPATCH(struct ComputeInstanceSubscription *arg0, NVC638_CTRL_GET_UUID_PARAMS *arg1) { + return arg0->__cisubscriptionCtrlCmdGetUuid__(arg0, arg1); +} + +static inline NvBool cisubscriptionShareCallback_DISPATCH(struct ComputeInstanceSubscription *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__cisubscriptionShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS cisubscriptionControl_DISPATCH(struct ComputeInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__cisubscriptionControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS cisubscriptionUnmap_DISPATCH(struct ComputeInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__cisubscriptionUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS cisubscriptionGetMemInterMapParams_DISPATCH(struct ComputeInstanceSubscription *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__cisubscriptionGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS cisubscriptionGetMemoryMappingDescriptor_DISPATCH(struct ComputeInstanceSubscription *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__cisubscriptionGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS cisubscriptionGetMapAddrSpace_DISPATCH(struct ComputeInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__cisubscriptionGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle cisubscriptionGetInternalObjectHandle_DISPATCH(struct ComputeInstanceSubscription *pGpuResource) { + return pGpuResource->__cisubscriptionGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS cisubscriptionControlFilter_DISPATCH(struct ComputeInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__cisubscriptionControlFilter__(pResource, pCallContext, pParams); +} + +static inline void cisubscriptionAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ComputeInstanceSubscription *pResource, RsResourceRef *pReference) { + pResource->__cisubscriptionAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 cisubscriptionGetRefCount_DISPATCH(struct ComputeInstanceSubscription *pResource) { + return pResource->__cisubscriptionGetRefCount__(pResource); +} + +static inline NV_STATUS cisubscriptionCheckMemInterUnmap_DISPATCH(struct ComputeInstanceSubscription *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__cisubscriptionCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS cisubscriptionMapTo_DISPATCH(struct ComputeInstanceSubscription *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__cisubscriptionMapTo__(pResource, pParams); +} + +static inline NV_STATUS cisubscriptionControl_Prologue_DISPATCH(struct ComputeInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__cisubscriptionControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS cisubscriptionGetRegBaseOffsetAndSize_DISPATCH(struct ComputeInstanceSubscription *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__cisubscriptionGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS cisubscriptionInternalControlForward_DISPATCH(struct ComputeInstanceSubscription *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__cisubscriptionInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void cisubscriptionPreDestruct_DISPATCH(struct ComputeInstanceSubscription *pResource) { + pResource->__cisubscriptionPreDestruct__(pResource); +} + +static inline NV_STATUS cisubscriptionUnmapFrom_DISPATCH(struct ComputeInstanceSubscription *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__cisubscriptionUnmapFrom__(pResource, pParams); +} + +static inline void cisubscriptionControl_Epilogue_DISPATCH(struct ComputeInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__cisubscriptionControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS cisubscriptionControlLookup_DISPATCH(struct ComputeInstanceSubscription *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__cisubscriptionControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS cisubscriptionMap_DISPATCH(struct ComputeInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__cisubscriptionMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool cisubscriptionAccessCallback_DISPATCH(struct ComputeInstanceSubscription *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__cisubscriptionAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS cisubscriptionGetComputeInstanceSubscription_IMPL(struct RsClient *arg0, NvHandle arg1, struct ComputeInstanceSubscription **arg2); +#define cisubscriptionGetComputeInstanceSubscription(arg0, arg1, arg2) cisubscriptionGetComputeInstanceSubscription_IMPL(arg0, arg1, arg2) +NV_STATUS cisubscriptionConstruct_IMPL(struct ComputeInstanceSubscription *arg_pComputeInstanceSubscription, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_cisubscriptionConstruct(arg_pComputeInstanceSubscription, arg_pCallContext, arg_pParams) cisubscriptionConstruct_IMPL(arg_pComputeInstanceSubscription, arg_pCallContext, arg_pParams) +NV_STATUS cisubscriptionCopyConstruct_IMPL(struct ComputeInstanceSubscription *arg0, CALL_CONTEXT *arg1, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg2); +#ifdef __nvoc_compute_instance_subscription_h_disabled +static inline NV_STATUS cisubscriptionCopyConstruct(struct ComputeInstanceSubscription *arg0, CALL_CONTEXT *arg1, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg2) { + NV_ASSERT_FAILED_PRECOMP("ComputeInstanceSubscription was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_compute_instance_subscription_h_disabled +#define cisubscriptionCopyConstruct(arg0, arg1, arg2) cisubscriptionCopyConstruct_IMPL(arg0, arg1, arg2) +#endif //__nvoc_compute_instance_subscription_h_disabled + +void cisubscriptionDestruct_IMPL(struct ComputeInstanceSubscription *arg0); +#define __nvoc_cisubscriptionDestruct(arg0) cisubscriptionDestruct_IMPL(arg0) +#undef PRIVATE_FIELD + + +#endif // COMPUTE_INSTANCE_SUBSCRIPTION_H + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_COMPUTE_INSTANCE_SUBSCRIPTION_NVOC_H_ diff --git a/src/nvidia/generated/g_console_mem_nvoc.c b/src/nvidia/generated/g_console_mem_nvoc.c new file mode 100644 index 000000000..1dafe3169 --- /dev/null +++ b/src/nvidia/generated/g_console_mem_nvoc.c @@ -0,0 +1,323 @@ +#define NVOC_CONSOLE_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_console_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xaac69e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ConsoleMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_ConsoleMemory(ConsoleMemory*); +void __nvoc_init_funcTable_ConsoleMemory(ConsoleMemory*); +NV_STATUS __nvoc_ctor_ConsoleMemory(ConsoleMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_ConsoleMemory(ConsoleMemory*); +void __nvoc_dtor_ConsoleMemory(ConsoleMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ConsoleMemory; + +static const struct NVOC_RTTI __nvoc_rtti_ConsoleMemory_ConsoleMemory = { + /*pClassDef=*/ &__nvoc_class_def_ConsoleMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ConsoleMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_ConsoleMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ConsoleMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ConsoleMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ConsoleMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ConsoleMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ConsoleMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ConsoleMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ConsoleMemory, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ConsoleMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ConsoleMemory, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_ConsoleMemory = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_ConsoleMemory_ConsoleMemory, + &__nvoc_rtti_ConsoleMemory_Memory, + &__nvoc_rtti_ConsoleMemory_RmResource, + &__nvoc_rtti_ConsoleMemory_RmResourceCommon, + &__nvoc_rtti_ConsoleMemory_RsResource, + &__nvoc_rtti_ConsoleMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_ConsoleMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(ConsoleMemory), + /*classId=*/ classId(ConsoleMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "ConsoleMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ConsoleMemory, + /*pCastInfo=*/ &__nvoc_castinfo_ConsoleMemory, + /*pExportInfo=*/ &__nvoc_export_info_ConsoleMemory +}; + +static NvBool __nvoc_thunk_ConsoleMemory_resCanCopy(struct RsResource *pConsoleMemory) { + return conmemCanCopy((struct ConsoleMemory *)(((unsigned char *)pConsoleMemory) - __nvoc_rtti_ConsoleMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_conmemCheckMemInterUnmap(struct ConsoleMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_ConsoleMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_conmemControl(struct ConsoleMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_ConsoleMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_conmemUnmap(struct ConsoleMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_ConsoleMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_conmemGetMemInterMapParams(struct ConsoleMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_ConsoleMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_conmemGetMemoryMappingDescriptor(struct ConsoleMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_ConsoleMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_conmemGetMapAddrSpace(struct ConsoleMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_ConsoleMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_conmemShareCallback(struct ConsoleMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_conmemControlFilter(struct ConsoleMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_conmemAddAdditionalDependants(struct RsClient *pClient, struct ConsoleMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_conmemGetRefCount(struct ConsoleMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_conmemMapTo(struct ConsoleMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_conmemControl_Prologue(struct ConsoleMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_conmemIsReady(struct ConsoleMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_ConsoleMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_conmemCheckCopyPermissions(struct ConsoleMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_ConsoleMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_conmemPreDestruct(struct ConsoleMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_conmemUnmapFrom(struct ConsoleMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_conmemControl_Epilogue(struct ConsoleMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_conmemControlLookup(struct ConsoleMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_conmemMap(struct ConsoleMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_ConsoleMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_conmemAccessCallback(struct ConsoleMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ConsoleMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_ConsoleMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_ConsoleMemory(ConsoleMemory *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_ConsoleMemory(ConsoleMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_ConsoleMemory(ConsoleMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ConsoleMemory_fail_Memory; + __nvoc_init_dataField_ConsoleMemory(pThis); + + status = __nvoc_conmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ConsoleMemory_fail__init; + goto __nvoc_ctor_ConsoleMemory_exit; // Success + +__nvoc_ctor_ConsoleMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_ConsoleMemory_fail_Memory: +__nvoc_ctor_ConsoleMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_ConsoleMemory_1(ConsoleMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__conmemCanCopy__ = &conmemCanCopy_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_ConsoleMemory_resCanCopy; + + pThis->__conmemCheckMemInterUnmap__ = &__nvoc_thunk_Memory_conmemCheckMemInterUnmap; + + pThis->__conmemControl__ = &__nvoc_thunk_Memory_conmemControl; + + pThis->__conmemUnmap__ = &__nvoc_thunk_Memory_conmemUnmap; + + pThis->__conmemGetMemInterMapParams__ = &__nvoc_thunk_Memory_conmemGetMemInterMapParams; + + pThis->__conmemGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_conmemGetMemoryMappingDescriptor; + + pThis->__conmemGetMapAddrSpace__ = &__nvoc_thunk_Memory_conmemGetMapAddrSpace; + + pThis->__conmemShareCallback__ = &__nvoc_thunk_RmResource_conmemShareCallback; + + pThis->__conmemControlFilter__ = &__nvoc_thunk_RsResource_conmemControlFilter; + + pThis->__conmemAddAdditionalDependants__ = &__nvoc_thunk_RsResource_conmemAddAdditionalDependants; + + pThis->__conmemGetRefCount__ = &__nvoc_thunk_RsResource_conmemGetRefCount; + + pThis->__conmemMapTo__ = &__nvoc_thunk_RsResource_conmemMapTo; + + pThis->__conmemControl_Prologue__ = &__nvoc_thunk_RmResource_conmemControl_Prologue; + + pThis->__conmemIsReady__ = &__nvoc_thunk_Memory_conmemIsReady; + + pThis->__conmemCheckCopyPermissions__ = &__nvoc_thunk_Memory_conmemCheckCopyPermissions; + + pThis->__conmemPreDestruct__ = &__nvoc_thunk_RsResource_conmemPreDestruct; + + pThis->__conmemUnmapFrom__ = &__nvoc_thunk_RsResource_conmemUnmapFrom; + + pThis->__conmemControl_Epilogue__ = &__nvoc_thunk_RmResource_conmemControl_Epilogue; + + pThis->__conmemControlLookup__ = &__nvoc_thunk_RsResource_conmemControlLookup; + + pThis->__conmemMap__ = &__nvoc_thunk_Memory_conmemMap; + + pThis->__conmemAccessCallback__ = &__nvoc_thunk_RmResource_conmemAccessCallback; +} + +void __nvoc_init_funcTable_ConsoleMemory(ConsoleMemory *pThis) { + __nvoc_init_funcTable_ConsoleMemory_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_ConsoleMemory(ConsoleMemory *pThis) { + pThis->__nvoc_pbase_ConsoleMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_ConsoleMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_ConsoleMemory(ConsoleMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + ConsoleMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(ConsoleMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(ConsoleMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ConsoleMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_ConsoleMemory(pThis); + status = __nvoc_ctor_ConsoleMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_ConsoleMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_ConsoleMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_ConsoleMemory(ConsoleMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_ConsoleMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_console_mem_nvoc.h b/src/nvidia/generated/g_console_mem_nvoc.h new file mode 100644 index 000000000..b55113bbc --- /dev/null +++ b/src/nvidia/generated/g_console_mem_nvoc.h @@ -0,0 +1,224 @@ +#ifndef _G_CONSOLE_MEM_NVOC_H_ +#define _G_CONSOLE_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_console_mem_nvoc.h" + +#ifndef _CONSOLE_MEMORY_H_ +#define _CONSOLE_MEMORY_H_ + +#include "mem_mgr/mem.h" + +/*! + * This class is used to create hMemory referencing reserved console memory + */ +#ifdef NVOC_CONSOLE_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct ConsoleMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct ConsoleMemory *__nvoc_pbase_ConsoleMemory; + NvBool (*__conmemCanCopy__)(struct ConsoleMemory *); + NV_STATUS (*__conmemCheckMemInterUnmap__)(struct ConsoleMemory *, NvBool); + NV_STATUS (*__conmemControl__)(struct ConsoleMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__conmemUnmap__)(struct ConsoleMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__conmemGetMemInterMapParams__)(struct ConsoleMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__conmemGetMemoryMappingDescriptor__)(struct ConsoleMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__conmemGetMapAddrSpace__)(struct ConsoleMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__conmemShareCallback__)(struct ConsoleMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__conmemControlFilter__)(struct ConsoleMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__conmemAddAdditionalDependants__)(struct RsClient *, struct ConsoleMemory *, RsResourceRef *); + NvU32 (*__conmemGetRefCount__)(struct ConsoleMemory *); + NV_STATUS (*__conmemMapTo__)(struct ConsoleMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__conmemControl_Prologue__)(struct ConsoleMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__conmemIsReady__)(struct ConsoleMemory *); + NV_STATUS (*__conmemCheckCopyPermissions__)(struct ConsoleMemory *, struct OBJGPU *, NvHandle); + void (*__conmemPreDestruct__)(struct ConsoleMemory *); + NV_STATUS (*__conmemUnmapFrom__)(struct ConsoleMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__conmemControl_Epilogue__)(struct ConsoleMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__conmemControlLookup__)(struct ConsoleMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__conmemMap__)(struct ConsoleMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__conmemAccessCallback__)(struct ConsoleMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_ConsoleMemory_TYPEDEF__ +#define __NVOC_CLASS_ConsoleMemory_TYPEDEF__ +typedef struct ConsoleMemory ConsoleMemory; +#endif /* __NVOC_CLASS_ConsoleMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ConsoleMemory +#define __nvoc_class_id_ConsoleMemory 0xaac69e +#endif /* __nvoc_class_id_ConsoleMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ConsoleMemory; + +#define __staticCast_ConsoleMemory(pThis) \ + ((pThis)->__nvoc_pbase_ConsoleMemory) + +#ifdef __nvoc_console_mem_h_disabled +#define __dynamicCast_ConsoleMemory(pThis) ((ConsoleMemory*)NULL) +#else //__nvoc_console_mem_h_disabled +#define __dynamicCast_ConsoleMemory(pThis) \ + ((ConsoleMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ConsoleMemory))) +#endif //__nvoc_console_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_ConsoleMemory(ConsoleMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_ConsoleMemory(ConsoleMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_ConsoleMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_ConsoleMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define conmemCanCopy(pConsoleMemory) conmemCanCopy_DISPATCH(pConsoleMemory) +#define conmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) conmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define conmemControl(pMemory, pCallContext, pParams) conmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define conmemUnmap(pMemory, pCallContext, pCpuMapping) conmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define conmemGetMemInterMapParams(pMemory, pParams) conmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define conmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) conmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define conmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) conmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define conmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) conmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define conmemControlFilter(pResource, pCallContext, pParams) conmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define conmemAddAdditionalDependants(pClient, pResource, pReference) conmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define conmemGetRefCount(pResource) conmemGetRefCount_DISPATCH(pResource) +#define conmemMapTo(pResource, pParams) conmemMapTo_DISPATCH(pResource, pParams) +#define conmemControl_Prologue(pResource, pCallContext, pParams) conmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define conmemIsReady(pMemory) conmemIsReady_DISPATCH(pMemory) +#define conmemCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) conmemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define conmemPreDestruct(pResource) conmemPreDestruct_DISPATCH(pResource) +#define conmemUnmapFrom(pResource, pParams) conmemUnmapFrom_DISPATCH(pResource, pParams) +#define conmemControl_Epilogue(pResource, pCallContext, pParams) conmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define conmemControlLookup(pResource, pParams, ppEntry) conmemControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define conmemMap(pMemory, pCallContext, pParams, pCpuMapping) conmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define conmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) conmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool conmemCanCopy_IMPL(struct ConsoleMemory *pConsoleMemory); + +static inline NvBool conmemCanCopy_DISPATCH(struct ConsoleMemory *pConsoleMemory) { + return pConsoleMemory->__conmemCanCopy__(pConsoleMemory); +} + +static inline NV_STATUS conmemCheckMemInterUnmap_DISPATCH(struct ConsoleMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__conmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS conmemControl_DISPATCH(struct ConsoleMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__conmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS conmemUnmap_DISPATCH(struct ConsoleMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__conmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS conmemGetMemInterMapParams_DISPATCH(struct ConsoleMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__conmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS conmemGetMemoryMappingDescriptor_DISPATCH(struct ConsoleMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__conmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS conmemGetMapAddrSpace_DISPATCH(struct ConsoleMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__conmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool conmemShareCallback_DISPATCH(struct ConsoleMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__conmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS conmemControlFilter_DISPATCH(struct ConsoleMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__conmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline void conmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ConsoleMemory *pResource, RsResourceRef *pReference) { + pResource->__conmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 conmemGetRefCount_DISPATCH(struct ConsoleMemory *pResource) { + return pResource->__conmemGetRefCount__(pResource); +} + +static inline NV_STATUS conmemMapTo_DISPATCH(struct ConsoleMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__conmemMapTo__(pResource, pParams); +} + +static inline NV_STATUS conmemControl_Prologue_DISPATCH(struct ConsoleMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__conmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS conmemIsReady_DISPATCH(struct ConsoleMemory *pMemory) { + return pMemory->__conmemIsReady__(pMemory); +} + +static inline NV_STATUS conmemCheckCopyPermissions_DISPATCH(struct ConsoleMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__conmemCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void conmemPreDestruct_DISPATCH(struct ConsoleMemory *pResource) { + pResource->__conmemPreDestruct__(pResource); +} + +static inline NV_STATUS conmemUnmapFrom_DISPATCH(struct ConsoleMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__conmemUnmapFrom__(pResource, pParams); +} + +static inline void conmemControl_Epilogue_DISPATCH(struct ConsoleMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__conmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS conmemControlLookup_DISPATCH(struct ConsoleMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__conmemControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS conmemMap_DISPATCH(struct ConsoleMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__conmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool conmemAccessCallback_DISPATCH(struct ConsoleMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__conmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS conmemConstruct_IMPL(struct ConsoleMemory *arg_pConsoleMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_conmemConstruct(arg_pConsoleMemory, arg_pCallContext, arg_pParams) conmemConstruct_IMPL(arg_pConsoleMemory, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CONSOLE_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_context_dma_nvoc.c b/src/nvidia/generated/g_context_dma_nvoc.c new file mode 100644 index 000000000..f686355af --- /dev/null +++ b/src/nvidia/generated/g_context_dma_nvoc.c @@ -0,0 +1,427 @@ +#define NVOC_CONTEXT_DMA_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_context_dma_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x88441b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_ContextDma(ContextDma*); +void __nvoc_init_funcTable_ContextDma(ContextDma*); +NV_STATUS __nvoc_ctor_ContextDma(ContextDma*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_ContextDma(ContextDma*); +void __nvoc_dtor_ContextDma(ContextDma*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ContextDma; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_ContextDma = { + /*pClassDef=*/ &__nvoc_class_def_ContextDma, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ContextDma, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ContextDma_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_ContextDma = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_ContextDma_ContextDma, + &__nvoc_rtti_ContextDma_Notifier, + &__nvoc_rtti_ContextDma_INotifier, + &__nvoc_rtti_ContextDma_RmResource, + &__nvoc_rtti_ContextDma_RmResourceCommon, + &__nvoc_rtti_ContextDma_RsResource, + &__nvoc_rtti_ContextDma_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma = +{ + /*classInfo=*/ { + /*size=*/ sizeof(ContextDma), + /*classId=*/ classId(ContextDma), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "ContextDma", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ContextDma, + /*pCastInfo=*/ &__nvoc_castinfo_ContextDma, + /*pExportInfo=*/ &__nvoc_export_info_ContextDma +}; + +static NV_STATUS __nvoc_thunk_ContextDma_resMapTo(struct RsResource *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams) { + return ctxdmaMapTo((struct ContextDma *)(((unsigned char *)pContextDma) - __nvoc_rtti_ContextDma_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_ContextDma_resUnmapFrom(struct RsResource *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams) { + return ctxdmaUnmapFrom((struct ContextDma *)(((unsigned char *)pContextDma) - __nvoc_rtti_ContextDma_RsResource.offset), pParams); +} + +static NvBool __nvoc_thunk_RmResource_ctxdmaShareCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_ctxdmaCheckMemInterUnmap(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_RmResource_ctxdmaAccessCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_ctxdmaGetMemInterMapParams(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), ppMemDesc); +} + +static void __nvoc_thunk_Notifier_ctxdmaSetNotificationShare(struct ContextDma *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControl(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControlFilter(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_ctxdmaGetRefCount(struct ContextDma *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_ctxdmaUnregisterEvent(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_ctxdmaUnmap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pCpuMapping); +} + +static NvBool __nvoc_thunk_RsResource_ctxdmaCanCopy(struct ContextDma *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_ctxdmaControl_Prologue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_ctxdmaAddAdditionalDependants(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_ctxdmaPreDestruct(struct ContextDma *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_ctxdmaGetNotificationListPtr(struct ContextDma *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset)); +} + +static void __nvoc_thunk_RmResource_ctxdmaControl_Epilogue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pCallContext, pParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_ctxdmaGetNotificationShare(struct ContextDma *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControlLookup(struct ContextDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_ctxdmaMap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_ctxdmaGetOrAllocNotifShare(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ContextDma[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdUpdateContextdma_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20101u, + /*paramSize=*/ sizeof(NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ctxdmaCtrlCmdUpdateContextdma" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdBindContextdma_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20102u, + /*paramSize=*/ sizeof(NV0002_CTRL_BIND_CONTEXTDMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ctxdmaCtrlCmdBindContextdma" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdUnbindContextdma_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20103u, + /*paramSize=*/ sizeof(NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ctxdmaCtrlCmdUnbindContextdma" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_ContextDma = +{ + /*numEntries=*/ 3, + /*pExportEntries=*/ __nvoc_exported_method_def_ContextDma +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_ContextDma(ContextDma *pThis) { + __nvoc_ctxdmaDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_ContextDma(ContextDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_ContextDma(ContextDma *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail_RmResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail_Notifier; + __nvoc_init_dataField_ContextDma(pThis); + + status = __nvoc_ctxdmaConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail__init; + goto __nvoc_ctor_ContextDma_exit; // Success + +__nvoc_ctor_ContextDma_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_ContextDma_fail_Notifier: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_ContextDma_fail_RmResource: +__nvoc_ctor_ContextDma_exit: + + return status; +} + +static void __nvoc_init_funcTable_ContextDma_1(ContextDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__ctxdmaValidate__ = &ctxdmaValidate_IMPL; + + pThis->__ctxdmaGetKernelVA__ = &ctxdmaGetKernelVA_IMPL; + + pThis->__ctxdmaMapTo__ = &ctxdmaMapTo_IMPL; + + pThis->__ctxdmaUnmapFrom__ = &ctxdmaUnmapFrom_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__ctxdmaCtrlCmdUpdateContextdma__ = &ctxdmaCtrlCmdUpdateContextdma_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ctxdmaCtrlCmdBindContextdma__ = &ctxdmaCtrlCmdBindContextdma_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ctxdmaCtrlCmdUnbindContextdma__ = &ctxdmaCtrlCmdUnbindContextdma_IMPL; +#endif + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMapTo__ = &__nvoc_thunk_ContextDma_resMapTo; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmapFrom__ = &__nvoc_thunk_ContextDma_resUnmapFrom; + + pThis->__ctxdmaShareCallback__ = &__nvoc_thunk_RmResource_ctxdmaShareCallback; + + pThis->__ctxdmaCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_ctxdmaCheckMemInterUnmap; + + pThis->__ctxdmaAccessCallback__ = &__nvoc_thunk_RmResource_ctxdmaAccessCallback; + + pThis->__ctxdmaGetMemInterMapParams__ = &__nvoc_thunk_RmResource_ctxdmaGetMemInterMapParams; + + pThis->__ctxdmaGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor; + + pThis->__ctxdmaSetNotificationShare__ = &__nvoc_thunk_Notifier_ctxdmaSetNotificationShare; + + pThis->__ctxdmaControl__ = &__nvoc_thunk_RsResource_ctxdmaControl; + + pThis->__ctxdmaControlFilter__ = &__nvoc_thunk_RsResource_ctxdmaControlFilter; + + pThis->__ctxdmaGetRefCount__ = &__nvoc_thunk_RsResource_ctxdmaGetRefCount; + + pThis->__ctxdmaUnregisterEvent__ = &__nvoc_thunk_Notifier_ctxdmaUnregisterEvent; + + pThis->__ctxdmaUnmap__ = &__nvoc_thunk_RsResource_ctxdmaUnmap; + + pThis->__ctxdmaCanCopy__ = &__nvoc_thunk_RsResource_ctxdmaCanCopy; + + pThis->__ctxdmaControl_Prologue__ = &__nvoc_thunk_RmResource_ctxdmaControl_Prologue; + + pThis->__ctxdmaAddAdditionalDependants__ = &__nvoc_thunk_RsResource_ctxdmaAddAdditionalDependants; + + pThis->__ctxdmaPreDestruct__ = &__nvoc_thunk_RsResource_ctxdmaPreDestruct; + + pThis->__ctxdmaGetNotificationListPtr__ = &__nvoc_thunk_Notifier_ctxdmaGetNotificationListPtr; + + pThis->__ctxdmaControl_Epilogue__ = &__nvoc_thunk_RmResource_ctxdmaControl_Epilogue; + + pThis->__ctxdmaGetNotificationShare__ = &__nvoc_thunk_Notifier_ctxdmaGetNotificationShare; + + pThis->__ctxdmaControlLookup__ = &__nvoc_thunk_RsResource_ctxdmaControlLookup; + + pThis->__ctxdmaMap__ = &__nvoc_thunk_RsResource_ctxdmaMap; + + pThis->__ctxdmaGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_ctxdmaGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_ContextDma(ContextDma *pThis) { + __nvoc_init_funcTable_ContextDma_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_ContextDma(ContextDma *pThis) { + pThis->__nvoc_pbase_ContextDma = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_ContextDma(pThis); +} + +NV_STATUS __nvoc_objCreate_ContextDma(ContextDma **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + ContextDma *pThis; + + pThis = portMemAllocNonPaged(sizeof(ContextDma)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(ContextDma)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ContextDma); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_ContextDma(pThis); + status = __nvoc_ctor_ContextDma(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_ContextDma_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_ContextDma_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_ContextDma(ContextDma **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_ContextDma(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_context_dma_nvoc.h b/src/nvidia/generated/g_context_dma_nvoc.h new file mode 100644 index 000000000..77e92cdca --- /dev/null +++ b/src/nvidia/generated/g_context_dma_nvoc.h @@ -0,0 +1,356 @@ +#ifndef _G_CONTEXT_DMA_NVOC_H_ +#define _G_CONTEXT_DMA_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_context_dma_nvoc.h" + +#ifndef CONTEXT_DMA_H +#define CONTEXT_DMA_H + +#include "core/core.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "rmapi/resource.h" +#include "rmapi/event.h" +#include "ctrl/ctrl0002.h" +#include "rmapi/control.h" // for macro RMCTRL_EXPORT etc. +#include "nvlimits.h" + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +/*! + * RM internal class representing NV01_CONTEXT_DMA + */ +#ifdef NVOC_CONTEXT_DMA_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct ContextDma { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ContextDma *__nvoc_pbase_ContextDma; + NV_STATUS (*__ctxdmaValidate__)(struct ContextDma *, NvU64, NvU64); + NV_STATUS (*__ctxdmaGetKernelVA__)(struct ContextDma *, NvU64, NvU64, void **, NvU32); + NV_STATUS (*__ctxdmaMapTo__)(struct ContextDma *, struct RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__ctxdmaUnmapFrom__)(struct ContextDma *, struct RS_RES_UNMAP_FROM_PARAMS *); + NV_STATUS (*__ctxdmaCtrlCmdUpdateContextdma__)(struct ContextDma *, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *); + NV_STATUS (*__ctxdmaCtrlCmdBindContextdma__)(struct ContextDma *, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *); + NV_STATUS (*__ctxdmaCtrlCmdUnbindContextdma__)(struct ContextDma *, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *); + NvBool (*__ctxdmaShareCallback__)(struct ContextDma *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__ctxdmaCheckMemInterUnmap__)(struct ContextDma *, NvBool); + NvBool (*__ctxdmaAccessCallback__)(struct ContextDma *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__ctxdmaGetMemInterMapParams__)(struct ContextDma *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__ctxdmaGetMemoryMappingDescriptor__)(struct ContextDma *, struct MEMORY_DESCRIPTOR **); + void (*__ctxdmaSetNotificationShare__)(struct ContextDma *, struct NotifShare *); + NV_STATUS (*__ctxdmaControl__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__ctxdmaControlFilter__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__ctxdmaGetRefCount__)(struct ContextDma *); + NV_STATUS (*__ctxdmaUnregisterEvent__)(struct ContextDma *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__ctxdmaUnmap__)(struct ContextDma *, struct CALL_CONTEXT *, RsCpuMapping *); + NvBool (*__ctxdmaCanCopy__)(struct ContextDma *); + NV_STATUS (*__ctxdmaControl_Prologue__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__ctxdmaAddAdditionalDependants__)(struct RsClient *, struct ContextDma *, RsResourceRef *); + void (*__ctxdmaPreDestruct__)(struct ContextDma *); + PEVENTNOTIFICATION *(*__ctxdmaGetNotificationListPtr__)(struct ContextDma *); + void (*__ctxdmaControl_Epilogue__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__ctxdmaGetNotificationShare__)(struct ContextDma *); + NV_STATUS (*__ctxdmaControlLookup__)(struct ContextDma *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__ctxdmaMap__)(struct ContextDma *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__ctxdmaGetOrAllocNotifShare__)(struct ContextDma *, NvHandle, NvHandle, struct NotifShare **); + NvU32 Class; + NvU32 Flags; + NvBool bReadOnly; + NvU32 CacheSnoop; + NvU32 Type; + NvU64 Limit; + NV_ADDRESS_SPACE AddressSpace; + NvBool bUnicast; + void *KernelVAddr[8]; + void *KernelPriv; + NvU64 FbAperture[8]; + NvU64 FbApertureLen[8]; + struct Memory *pMemory; + struct MEMORY_DESCRIPTOR *pMemDesc; + NvU32 Instance[8]; + NvU32 InstRefCount[8]; + struct OBJGPU *pGpu; + struct Device *pDevice; +}; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma; + +#define __staticCast_ContextDma(pThis) \ + ((pThis)->__nvoc_pbase_ContextDma) + +#ifdef __nvoc_context_dma_h_disabled +#define __dynamicCast_ContextDma(pThis) ((ContextDma*)NULL) +#else //__nvoc_context_dma_h_disabled +#define __dynamicCast_ContextDma(pThis) \ + ((ContextDma*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ContextDma))) +#endif //__nvoc_context_dma_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_ContextDma(ContextDma**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_ContextDma(ContextDma**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_ContextDma(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_ContextDma((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define ctxdmaValidate(pContextDma, start, len) ctxdmaValidate_DISPATCH(pContextDma, start, len) +#define ctxdmaGetKernelVA(pContextDma, start, len, arg0, VA_idx) ctxdmaGetKernelVA_DISPATCH(pContextDma, start, len, arg0, VA_idx) +#define ctxdmaMapTo(pContextDma, pParams) ctxdmaMapTo_DISPATCH(pContextDma, pParams) +#define ctxdmaUnmapFrom(pContextDma, pParams) ctxdmaUnmapFrom_DISPATCH(pContextDma, pParams) +#define ctxdmaCtrlCmdUpdateContextdma(pContextDma, pUpdateCtxtDmaParams) ctxdmaCtrlCmdUpdateContextdma_DISPATCH(pContextDma, pUpdateCtxtDmaParams) +#define ctxdmaCtrlCmdBindContextdma(pContextDma, pBindCtxtDmaParams) ctxdmaCtrlCmdBindContextdma_DISPATCH(pContextDma, pBindCtxtDmaParams) +#define ctxdmaCtrlCmdUnbindContextdma(pContextDma, pUnbindCtxtDmaParams) ctxdmaCtrlCmdUnbindContextdma_DISPATCH(pContextDma, pUnbindCtxtDmaParams) +#define ctxdmaShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) ctxdmaShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define ctxdmaCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) ctxdmaCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define ctxdmaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) ctxdmaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define ctxdmaGetMemInterMapParams(pRmResource, pParams) ctxdmaGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define ctxdmaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) ctxdmaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define ctxdmaSetNotificationShare(pNotifier, pNotifShare) ctxdmaSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define ctxdmaControl(pResource, pCallContext, pParams) ctxdmaControl_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaControlFilter(pResource, pCallContext, pParams) ctxdmaControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaGetRefCount(pResource) ctxdmaGetRefCount_DISPATCH(pResource) +#define ctxdmaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) ctxdmaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define ctxdmaUnmap(pResource, pCallContext, pCpuMapping) ctxdmaUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define ctxdmaCanCopy(pResource) ctxdmaCanCopy_DISPATCH(pResource) +#define ctxdmaControl_Prologue(pResource, pCallContext, pParams) ctxdmaControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaAddAdditionalDependants(pClient, pResource, pReference) ctxdmaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define ctxdmaPreDestruct(pResource) ctxdmaPreDestruct_DISPATCH(pResource) +#define ctxdmaGetNotificationListPtr(pNotifier) ctxdmaGetNotificationListPtr_DISPATCH(pNotifier) +#define ctxdmaControl_Epilogue(pResource, pCallContext, pParams) ctxdmaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define ctxdmaGetNotificationShare(pNotifier) ctxdmaGetNotificationShare_DISPATCH(pNotifier) +#define ctxdmaControlLookup(pResource, pParams, ppEntry) ctxdmaControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define ctxdmaMap(pResource, pCallContext, pParams, pCpuMapping) ctxdmaMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define ctxdmaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) ctxdmaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS ctxdmaValidate_IMPL(struct ContextDma *pContextDma, NvU64 start, NvU64 len); + +static inline NV_STATUS ctxdmaValidate_DISPATCH(struct ContextDma *pContextDma, NvU64 start, NvU64 len) { + return pContextDma->__ctxdmaValidate__(pContextDma, start, len); +} + +NV_STATUS ctxdmaGetKernelVA_IMPL(struct ContextDma *pContextDma, NvU64 start, NvU64 len, void **arg0, NvU32 VA_idx); + +static inline NV_STATUS ctxdmaGetKernelVA_DISPATCH(struct ContextDma *pContextDma, NvU64 start, NvU64 len, void **arg0, NvU32 VA_idx) { + return pContextDma->__ctxdmaGetKernelVA__(pContextDma, start, len, arg0, VA_idx); +} + +NV_STATUS ctxdmaMapTo_IMPL(struct ContextDma *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams); + +static inline NV_STATUS ctxdmaMapTo_DISPATCH(struct ContextDma *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams) { + return pContextDma->__ctxdmaMapTo__(pContextDma, pParams); +} + +NV_STATUS ctxdmaUnmapFrom_IMPL(struct ContextDma *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams); + +static inline NV_STATUS ctxdmaUnmapFrom_DISPATCH(struct ContextDma *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pContextDma->__ctxdmaUnmapFrom__(pContextDma, pParams); +} + +NV_STATUS ctxdmaCtrlCmdUpdateContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxtDmaParams); + +static inline NV_STATUS ctxdmaCtrlCmdUpdateContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxtDmaParams) { + return pContextDma->__ctxdmaCtrlCmdUpdateContextdma__(pContextDma, pUpdateCtxtDmaParams); +} + +NV_STATUS ctxdmaCtrlCmdBindContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxtDmaParams); + +static inline NV_STATUS ctxdmaCtrlCmdBindContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxtDmaParams) { + return pContextDma->__ctxdmaCtrlCmdBindContextdma__(pContextDma, pBindCtxtDmaParams); +} + +NV_STATUS ctxdmaCtrlCmdUnbindContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxtDmaParams); + +static inline NV_STATUS ctxdmaCtrlCmdUnbindContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxtDmaParams) { + return pContextDma->__ctxdmaCtrlCmdUnbindContextdma__(pContextDma, pUnbindCtxtDmaParams); +} + +static inline NvBool ctxdmaShareCallback_DISPATCH(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__ctxdmaShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS ctxdmaCheckMemInterUnmap_DISPATCH(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__ctxdmaCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NvBool ctxdmaAccessCallback_DISPATCH(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__ctxdmaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS ctxdmaGetMemInterMapParams_DISPATCH(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__ctxdmaGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS ctxdmaGetMemoryMappingDescriptor_DISPATCH(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__ctxdmaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline void ctxdmaSetNotificationShare_DISPATCH(struct ContextDma *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__ctxdmaSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS ctxdmaControl_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__ctxdmaControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ctxdmaControlFilter_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__ctxdmaControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 ctxdmaGetRefCount_DISPATCH(struct ContextDma *pResource) { + return pResource->__ctxdmaGetRefCount__(pResource); +} + +static inline NV_STATUS ctxdmaUnregisterEvent_DISPATCH(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__ctxdmaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS ctxdmaUnmap_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__ctxdmaUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool ctxdmaCanCopy_DISPATCH(struct ContextDma *pResource) { + return pResource->__ctxdmaCanCopy__(pResource); +} + +static inline NV_STATUS ctxdmaControl_Prologue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__ctxdmaControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void ctxdmaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference) { + pResource->__ctxdmaAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void ctxdmaPreDestruct_DISPATCH(struct ContextDma *pResource) { + pResource->__ctxdmaPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *ctxdmaGetNotificationListPtr_DISPATCH(struct ContextDma *pNotifier) { + return pNotifier->__ctxdmaGetNotificationListPtr__(pNotifier); +} + +static inline void ctxdmaControl_Epilogue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__ctxdmaControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline struct NotifShare *ctxdmaGetNotificationShare_DISPATCH(struct ContextDma *pNotifier) { + return pNotifier->__ctxdmaGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS ctxdmaControlLookup_DISPATCH(struct ContextDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__ctxdmaControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS ctxdmaMap_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__ctxdmaMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS ctxdmaGetOrAllocNotifShare_DISPATCH(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__ctxdmaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS ctxdmaConstruct_IMPL(struct ContextDma *arg_pCtxdma, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_ctxdmaConstruct(arg_pCtxdma, arg_pCallContext, arg_pParams) ctxdmaConstruct_IMPL(arg_pCtxdma, arg_pCallContext, arg_pParams) +void ctxdmaDestruct_IMPL(struct ContextDma *pCtxdma); +#define __nvoc_ctxdmaDestruct(pCtxdma) ctxdmaDestruct_IMPL(pCtxdma) +NvBool ctxdmaIsBound_IMPL(struct ContextDma *pContextDma); +#ifdef __nvoc_context_dma_h_disabled +static inline NvBool ctxdmaIsBound(struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("ContextDma was disabled!"); + return NV_FALSE; +} +#else //__nvoc_context_dma_h_disabled +#define ctxdmaIsBound(pContextDma) ctxdmaIsBound_IMPL(pContextDma) +#endif //__nvoc_context_dma_h_disabled + +NV_STATUS ctxdmaGetByHandle_IMPL(struct RsClient *pClient, NvHandle hContextDma, struct ContextDma **arg0); +#define ctxdmaGetByHandle(pClient, hContextDma, arg0) ctxdmaGetByHandle_IMPL(pClient, hContextDma, arg0) +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +#if RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS == 1 + +/** + * @warning This function is deprecated! Please use ctxdmaGetByHandle. + */ +NV_STATUS CliGetContextDma(NvHandle hClient, NvHandle hContextDma, struct ContextDma **); + +#endif + +#endif /* CONTEXT_DMA_H */ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_CONTEXT_DMA_NVOC_H_ diff --git a/src/nvidia/generated/g_dbgbuffer_nvoc.c b/src/nvidia/generated/g_dbgbuffer_nvoc.c new file mode 100644 index 000000000..9be61cb84 --- /dev/null +++ b/src/nvidia/generated/g_dbgbuffer_nvoc.c @@ -0,0 +1,336 @@ +#define NVOC_DBGBUFFER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_dbgbuffer_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x5e7a1b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DebugBufferApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_DebugBufferApi(DebugBufferApi*); +void __nvoc_init_funcTable_DebugBufferApi(DebugBufferApi*); +NV_STATUS __nvoc_ctor_DebugBufferApi(DebugBufferApi*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DebugBufferApi(DebugBufferApi*); +void __nvoc_dtor_DebugBufferApi(DebugBufferApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DebugBufferApi; + +static const struct NVOC_RTTI __nvoc_rtti_DebugBufferApi_DebugBufferApi = { + /*pClassDef=*/ &__nvoc_class_def_DebugBufferApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DebugBufferApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DebugBufferApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DebugBufferApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DebugBufferApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DebugBufferApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DebugBufferApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DebugBufferApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DebugBufferApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DebugBufferApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DebugBufferApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DebugBufferApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DebugBufferApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_DebugBufferApi_DebugBufferApi, + &__nvoc_rtti_DebugBufferApi_GpuResource, + &__nvoc_rtti_DebugBufferApi_RmResource, + &__nvoc_rtti_DebugBufferApi_RmResourceCommon, + &__nvoc_rtti_DebugBufferApi_RsResource, + &__nvoc_rtti_DebugBufferApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DebugBufferApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DebugBufferApi), + /*classId=*/ classId(DebugBufferApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DebugBufferApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DebugBufferApi, + /*pCastInfo=*/ &__nvoc_castinfo_DebugBufferApi, + /*pExportInfo=*/ &__nvoc_export_info_DebugBufferApi +}; + +static NV_STATUS __nvoc_thunk_DebugBufferApi_gpuresMap(struct GpuResource *pDebugBufferApi, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return dbgbufMap((struct DebugBufferApi *)(((unsigned char *)pDebugBufferApi) - __nvoc_rtti_DebugBufferApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_DebugBufferApi_gpuresUnmap(struct GpuResource *pDebugBufferApi, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return dbgbufUnmap((struct DebugBufferApi *)(((unsigned char *)pDebugBufferApi) - __nvoc_rtti_DebugBufferApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_DebugBufferApi_gpuresGetMapAddrSpace(struct GpuResource *pDebugBufferApi, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return dbgbufGetMapAddrSpace((struct DebugBufferApi *)(((unsigned char *)pDebugBufferApi) - __nvoc_rtti_DebugBufferApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NV_STATUS __nvoc_thunk_DebugBufferApi_rmresGetMemoryMappingDescriptor(struct RmResource *pDebugBufferApi, MEMORY_DESCRIPTOR **ppMemDesc) { + return dbgbufGetMemoryMappingDescriptor((struct DebugBufferApi *)(((unsigned char *)pDebugBufferApi) - __nvoc_rtti_DebugBufferApi_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_GpuResource_dbgbufShareCallback(struct DebugBufferApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DebugBufferApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dbgbufControl(struct DebugBufferApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DebugBufferApi_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dbgbufGetMemInterMapParams(struct DebugBufferApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DebugBufferApi_RmResource.offset), pParams); +} + +static NvHandle __nvoc_thunk_GpuResource_dbgbufGetInternalObjectHandle(struct DebugBufferApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DebugBufferApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dbgbufControlFilter(struct DebugBufferApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_dbgbufAddAdditionalDependants(struct RsClient *pClient, struct DebugBufferApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_dbgbufGetRefCount(struct DebugBufferApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_dbgbufCheckMemInterUnmap(struct DebugBufferApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DebugBufferApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_dbgbufMapTo(struct DebugBufferApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dbgbufControl_Prologue(struct DebugBufferApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dbgbufGetRegBaseOffsetAndSize(struct DebugBufferApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DebugBufferApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_dbgbufCanCopy(struct DebugBufferApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dbgbufInternalControlForward(struct DebugBufferApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DebugBufferApi_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_dbgbufPreDestruct(struct DebugBufferApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dbgbufUnmapFrom(struct DebugBufferApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dbgbufControl_Epilogue(struct DebugBufferApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dbgbufControlLookup(struct DebugBufferApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RsResource.offset), pParams, ppEntry); +} + +static NvBool __nvoc_thunk_RmResource_dbgbufAccessCallback(struct DebugBufferApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DebugBufferApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DebugBufferApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_DebugBufferApi(DebugBufferApi *pThis) { + __nvoc_dbgbufDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DebugBufferApi(DebugBufferApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DebugBufferApi(DebugBufferApi *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DebugBufferApi_fail_GpuResource; + __nvoc_init_dataField_DebugBufferApi(pThis); + + status = __nvoc_dbgbufConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DebugBufferApi_fail__init; + goto __nvoc_ctor_DebugBufferApi_exit; // Success + +__nvoc_ctor_DebugBufferApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DebugBufferApi_fail_GpuResource: +__nvoc_ctor_DebugBufferApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_DebugBufferApi_1(DebugBufferApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dbgbufMap__ = &dbgbufMap_IMPL; + + pThis->__dbgbufUnmap__ = &dbgbufUnmap_IMPL; + + pThis->__dbgbufGetMapAddrSpace__ = &dbgbufGetMapAddrSpace_IMPL; + + pThis->__dbgbufGetMemoryMappingDescriptor__ = &dbgbufGetMemoryMappingDescriptor_IMPL; + + pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_DebugBufferApi_gpuresMap; + + pThis->__nvoc_base_GpuResource.__gpuresUnmap__ = &__nvoc_thunk_DebugBufferApi_gpuresUnmap; + + pThis->__nvoc_base_GpuResource.__gpuresGetMapAddrSpace__ = &__nvoc_thunk_DebugBufferApi_gpuresGetMapAddrSpace; + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__rmresGetMemoryMappingDescriptor__ = &__nvoc_thunk_DebugBufferApi_rmresGetMemoryMappingDescriptor; + + pThis->__dbgbufShareCallback__ = &__nvoc_thunk_GpuResource_dbgbufShareCallback; + + pThis->__dbgbufControl__ = &__nvoc_thunk_GpuResource_dbgbufControl; + + pThis->__dbgbufGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dbgbufGetMemInterMapParams; + + pThis->__dbgbufGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dbgbufGetInternalObjectHandle; + + pThis->__dbgbufControlFilter__ = &__nvoc_thunk_RsResource_dbgbufControlFilter; + + pThis->__dbgbufAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dbgbufAddAdditionalDependants; + + pThis->__dbgbufGetRefCount__ = &__nvoc_thunk_RsResource_dbgbufGetRefCount; + + pThis->__dbgbufCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dbgbufCheckMemInterUnmap; + + pThis->__dbgbufMapTo__ = &__nvoc_thunk_RsResource_dbgbufMapTo; + + pThis->__dbgbufControl_Prologue__ = &__nvoc_thunk_RmResource_dbgbufControl_Prologue; + + pThis->__dbgbufGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_dbgbufGetRegBaseOffsetAndSize; + + pThis->__dbgbufCanCopy__ = &__nvoc_thunk_RsResource_dbgbufCanCopy; + + pThis->__dbgbufInternalControlForward__ = &__nvoc_thunk_GpuResource_dbgbufInternalControlForward; + + pThis->__dbgbufPreDestruct__ = &__nvoc_thunk_RsResource_dbgbufPreDestruct; + + pThis->__dbgbufUnmapFrom__ = &__nvoc_thunk_RsResource_dbgbufUnmapFrom; + + pThis->__dbgbufControl_Epilogue__ = &__nvoc_thunk_RmResource_dbgbufControl_Epilogue; + + pThis->__dbgbufControlLookup__ = &__nvoc_thunk_RsResource_dbgbufControlLookup; + + pThis->__dbgbufAccessCallback__ = &__nvoc_thunk_RmResource_dbgbufAccessCallback; +} + +void __nvoc_init_funcTable_DebugBufferApi(DebugBufferApi *pThis) { + __nvoc_init_funcTable_DebugBufferApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_DebugBufferApi(DebugBufferApi *pThis) { + pThis->__nvoc_pbase_DebugBufferApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_DebugBufferApi(pThis); +} + +NV_STATUS __nvoc_objCreate_DebugBufferApi(DebugBufferApi **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DebugBufferApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(DebugBufferApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DebugBufferApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DebugBufferApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DebugBufferApi(pThis); + status = __nvoc_ctor_DebugBufferApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DebugBufferApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DebugBufferApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DebugBufferApi(DebugBufferApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DebugBufferApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_dbgbuffer_nvoc.h b/src/nvidia/generated/g_dbgbuffer_nvoc.h new file mode 100644 index 000000000..d087c40f4 --- /dev/null +++ b/src/nvidia/generated/g_dbgbuffer_nvoc.h @@ -0,0 +1,247 @@ +#ifndef _G_DBGBUFFER_NVOC_H_ +#define _G_DBGBUFFER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_dbgbuffer_nvoc.h" + +#ifndef _DBGBUFFER_H_ +#define _DBGBUFFER_H_ + +#include "core/core.h" +#include "gpu/gpu_resource.h" +#include "class/cl00db.h" +#include "diagnostics/nv_debug_dump.h" + +// **************************************************************************** +// Type definitions +// **************************************************************************** + + +/*! + * RM internal class representing NV40_DEBUG_BUFFER + */ +#ifdef NVOC_DBGBUFFER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DebugBufferApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct DebugBufferApi *__nvoc_pbase_DebugBufferApi; + NV_STATUS (*__dbgbufMap__)(struct DebugBufferApi *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__dbgbufUnmap__)(struct DebugBufferApi *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__dbgbufGetMapAddrSpace__)(struct DebugBufferApi *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__dbgbufGetMemoryMappingDescriptor__)(struct DebugBufferApi *, MEMORY_DESCRIPTOR **); + NvBool (*__dbgbufShareCallback__)(struct DebugBufferApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dbgbufControl__)(struct DebugBufferApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dbgbufGetMemInterMapParams__)(struct DebugBufferApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NvHandle (*__dbgbufGetInternalObjectHandle__)(struct DebugBufferApi *); + NV_STATUS (*__dbgbufControlFilter__)(struct DebugBufferApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__dbgbufAddAdditionalDependants__)(struct RsClient *, struct DebugBufferApi *, RsResourceRef *); + NvU32 (*__dbgbufGetRefCount__)(struct DebugBufferApi *); + NV_STATUS (*__dbgbufCheckMemInterUnmap__)(struct DebugBufferApi *, NvBool); + NV_STATUS (*__dbgbufMapTo__)(struct DebugBufferApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dbgbufControl_Prologue__)(struct DebugBufferApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dbgbufGetRegBaseOffsetAndSize__)(struct DebugBufferApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__dbgbufCanCopy__)(struct DebugBufferApi *); + NV_STATUS (*__dbgbufInternalControlForward__)(struct DebugBufferApi *, NvU32, void *, NvU32); + void (*__dbgbufPreDestruct__)(struct DebugBufferApi *); + NV_STATUS (*__dbgbufUnmapFrom__)(struct DebugBufferApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dbgbufControl_Epilogue__)(struct DebugBufferApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dbgbufControlLookup__)(struct DebugBufferApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvBool (*__dbgbufAccessCallback__)(struct DebugBufferApi *, struct RsClient *, void *, RsAccessRight); + MEMORY_DESCRIPTOR *pMemDesc; +}; + +#ifndef __NVOC_CLASS_DebugBufferApi_TYPEDEF__ +#define __NVOC_CLASS_DebugBufferApi_TYPEDEF__ +typedef struct DebugBufferApi DebugBufferApi; +#endif /* __NVOC_CLASS_DebugBufferApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DebugBufferApi +#define __nvoc_class_id_DebugBufferApi 0x5e7a1b +#endif /* __nvoc_class_id_DebugBufferApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DebugBufferApi; + +#define __staticCast_DebugBufferApi(pThis) \ + ((pThis)->__nvoc_pbase_DebugBufferApi) + +#ifdef __nvoc_dbgbuffer_h_disabled +#define __dynamicCast_DebugBufferApi(pThis) ((DebugBufferApi*)NULL) +#else //__nvoc_dbgbuffer_h_disabled +#define __dynamicCast_DebugBufferApi(pThis) \ + ((DebugBufferApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DebugBufferApi))) +#endif //__nvoc_dbgbuffer_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DebugBufferApi(DebugBufferApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DebugBufferApi(DebugBufferApi**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DebugBufferApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DebugBufferApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dbgbufMap(pDebugBufferApi, pCallContext, pParams, pCpuMapping) dbgbufMap_DISPATCH(pDebugBufferApi, pCallContext, pParams, pCpuMapping) +#define dbgbufUnmap(pDebugBufferApi, pCallContext, pCpuMapping) dbgbufUnmap_DISPATCH(pDebugBufferApi, pCallContext, pCpuMapping) +#define dbgbufGetMapAddrSpace(pDebugBufferApi, pCallContext, mapFlags, pAddrSpace) dbgbufGetMapAddrSpace_DISPATCH(pDebugBufferApi, pCallContext, mapFlags, pAddrSpace) +#define dbgbufGetMemoryMappingDescriptor(pDebugBufferApi, ppMemDesc) dbgbufGetMemoryMappingDescriptor_DISPATCH(pDebugBufferApi, ppMemDesc) +#define dbgbufShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dbgbufShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dbgbufControl(pGpuResource, pCallContext, pParams) dbgbufControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dbgbufGetMemInterMapParams(pRmResource, pParams) dbgbufGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dbgbufGetInternalObjectHandle(pGpuResource) dbgbufGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dbgbufControlFilter(pResource, pCallContext, pParams) dbgbufControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dbgbufAddAdditionalDependants(pClient, pResource, pReference) dbgbufAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dbgbufGetRefCount(pResource) dbgbufGetRefCount_DISPATCH(pResource) +#define dbgbufCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dbgbufCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dbgbufMapTo(pResource, pParams) dbgbufMapTo_DISPATCH(pResource, pParams) +#define dbgbufControl_Prologue(pResource, pCallContext, pParams) dbgbufControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dbgbufGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) dbgbufGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define dbgbufCanCopy(pResource) dbgbufCanCopy_DISPATCH(pResource) +#define dbgbufInternalControlForward(pGpuResource, command, pParams, size) dbgbufInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dbgbufPreDestruct(pResource) dbgbufPreDestruct_DISPATCH(pResource) +#define dbgbufUnmapFrom(pResource, pParams) dbgbufUnmapFrom_DISPATCH(pResource, pParams) +#define dbgbufControl_Epilogue(pResource, pCallContext, pParams) dbgbufControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dbgbufControlLookup(pResource, pParams, ppEntry) dbgbufControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dbgbufAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dbgbufAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS dbgbufMap_IMPL(struct DebugBufferApi *pDebugBufferApi, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS dbgbufMap_DISPATCH(struct DebugBufferApi *pDebugBufferApi, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pDebugBufferApi->__dbgbufMap__(pDebugBufferApi, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS dbgbufUnmap_IMPL(struct DebugBufferApi *pDebugBufferApi, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS dbgbufUnmap_DISPATCH(struct DebugBufferApi *pDebugBufferApi, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pDebugBufferApi->__dbgbufUnmap__(pDebugBufferApi, pCallContext, pCpuMapping); +} + +NV_STATUS dbgbufGetMapAddrSpace_IMPL(struct DebugBufferApi *pDebugBufferApi, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS dbgbufGetMapAddrSpace_DISPATCH(struct DebugBufferApi *pDebugBufferApi, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pDebugBufferApi->__dbgbufGetMapAddrSpace__(pDebugBufferApi, pCallContext, mapFlags, pAddrSpace); +} + +NV_STATUS dbgbufGetMemoryMappingDescriptor_IMPL(struct DebugBufferApi *pDebugBufferApi, MEMORY_DESCRIPTOR **ppMemDesc); + +static inline NV_STATUS dbgbufGetMemoryMappingDescriptor_DISPATCH(struct DebugBufferApi *pDebugBufferApi, MEMORY_DESCRIPTOR **ppMemDesc) { + return pDebugBufferApi->__dbgbufGetMemoryMappingDescriptor__(pDebugBufferApi, ppMemDesc); +} + +static inline NvBool dbgbufShareCallback_DISPATCH(struct DebugBufferApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dbgbufShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dbgbufControl_DISPATCH(struct DebugBufferApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dbgbufControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dbgbufGetMemInterMapParams_DISPATCH(struct DebugBufferApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dbgbufGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NvHandle dbgbufGetInternalObjectHandle_DISPATCH(struct DebugBufferApi *pGpuResource) { + return pGpuResource->__dbgbufGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dbgbufControlFilter_DISPATCH(struct DebugBufferApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dbgbufControlFilter__(pResource, pCallContext, pParams); +} + +static inline void dbgbufAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DebugBufferApi *pResource, RsResourceRef *pReference) { + pResource->__dbgbufAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 dbgbufGetRefCount_DISPATCH(struct DebugBufferApi *pResource) { + return pResource->__dbgbufGetRefCount__(pResource); +} + +static inline NV_STATUS dbgbufCheckMemInterUnmap_DISPATCH(struct DebugBufferApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dbgbufCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dbgbufMapTo_DISPATCH(struct DebugBufferApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dbgbufMapTo__(pResource, pParams); +} + +static inline NV_STATUS dbgbufControl_Prologue_DISPATCH(struct DebugBufferApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dbgbufControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dbgbufGetRegBaseOffsetAndSize_DISPATCH(struct DebugBufferApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__dbgbufGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool dbgbufCanCopy_DISPATCH(struct DebugBufferApi *pResource) { + return pResource->__dbgbufCanCopy__(pResource); +} + +static inline NV_STATUS dbgbufInternalControlForward_DISPATCH(struct DebugBufferApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dbgbufInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void dbgbufPreDestruct_DISPATCH(struct DebugBufferApi *pResource) { + pResource->__dbgbufPreDestruct__(pResource); +} + +static inline NV_STATUS dbgbufUnmapFrom_DISPATCH(struct DebugBufferApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dbgbufUnmapFrom__(pResource, pParams); +} + +static inline void dbgbufControl_Epilogue_DISPATCH(struct DebugBufferApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dbgbufControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dbgbufControlLookup_DISPATCH(struct DebugBufferApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dbgbufControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvBool dbgbufAccessCallback_DISPATCH(struct DebugBufferApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dbgbufAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dbgbufConstruct_IMPL(struct DebugBufferApi *arg_pDebugBufferApi, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dbgbufConstruct(arg_pDebugBufferApi, arg_pCallContext, arg_pParams) dbgbufConstruct_IMPL(arg_pDebugBufferApi, arg_pCallContext, arg_pParams) +void dbgbufDestruct_IMPL(struct DebugBufferApi *pDebugBufferApi); +#define __nvoc_dbgbufDestruct(pDebugBufferApi) dbgbufDestruct_IMPL(pDebugBufferApi) +#undef PRIVATE_FIELD + + +#endif // _DBGBUFFER_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DBGBUFFER_NVOC_H_ diff --git a/src/nvidia/generated/g_dce_client_nvoc.h b/src/nvidia/generated/g_dce_client_nvoc.h new file mode 100644 index 000000000..29147d09b --- /dev/null +++ b/src/nvidia/generated/g_dce_client_nvoc.h @@ -0,0 +1,285 @@ +#ifndef _G_DCE_CLIENT_NVOC_H_ +#define _G_DCE_CLIENT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_dce_client_nvoc.h" + +#ifndef _DCE_CLIENT_H_ +#define _DCE_CLIENT_H_ + +/*! + * @file dce_client.h + * @brief Provides definitions for all DceClient data structures and interfaces. + */ + +#include "gpu/eng_state.h" +#include "core/core.h" +#include "objrpc.h" +#include "os/dce_rm_client_ipc.h" + +/*! + * Temporary alias of DceClient to OBJDCECLIENTRM + */ +#define DceClient OBJDCECLIENTRM + +/*! + * Defines the structure used to contain all generic information related to + * the DceClient. + */ +#ifdef NVOC_DCE_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJDCECLIENTRM { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct OBJDCECLIENTRM *__nvoc_pbase_OBJDCECLIENTRM; + NV_STATUS (*__dceclientConstructEngine__)(struct OBJGPU *, struct OBJDCECLIENTRM *, ENGDESCRIPTOR); + void (*__dceclientStateDestroy__)(struct OBJGPU *, struct OBJDCECLIENTRM *); + NV_STATUS (*__dceclientReconcileTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *); + NV_STATUS (*__dceclientStateLoad__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientStateUnload__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientStateInitLocked__)(POBJGPU, struct OBJDCECLIENTRM *); + NV_STATUS (*__dceclientStatePreLoad__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientStatePostUnload__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientStatePreUnload__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientStateInitUnlocked__)(POBJGPU, struct OBJDCECLIENTRM *); + void (*__dceclientInitMissing__)(POBJGPU, struct OBJDCECLIENTRM *); + NV_STATUS (*__dceclientStatePreInitLocked__)(POBJGPU, struct OBJDCECLIENTRM *); + NV_STATUS (*__dceclientStatePreInitUnlocked__)(POBJGPU, struct OBJDCECLIENTRM *); + NV_STATUS (*__dceclientGetTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *); + NV_STATUS (*__dceclientCompareTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *, void *); + void (*__dceclientFreeTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *); + NV_STATUS (*__dceclientStatePostLoad__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32); + NV_STATUS (*__dceclientAllocTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void **); + NV_STATUS (*__dceclientSetTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *); + NvBool (*__dceclientIsPresent__)(POBJGPU, struct OBJDCECLIENTRM *); + struct OBJRPC *pRpc; + NvU32 clientId[2]; +}; + +#ifndef __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +#define __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +typedef struct OBJDCECLIENTRM OBJDCECLIENTRM; +#endif /* __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDCECLIENTRM +#define __nvoc_class_id_OBJDCECLIENTRM 0x61649c +#endif /* __nvoc_class_id_OBJDCECLIENTRM */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM; + +#define __staticCast_OBJDCECLIENTRM(pThis) \ + ((pThis)->__nvoc_pbase_OBJDCECLIENTRM) + +#ifdef __nvoc_dce_client_h_disabled +#define __dynamicCast_OBJDCECLIENTRM(pThis) ((OBJDCECLIENTRM*)NULL) +#else //__nvoc_dce_client_h_disabled +#define __dynamicCast_OBJDCECLIENTRM(pThis) \ + ((OBJDCECLIENTRM*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJDCECLIENTRM))) +#endif //__nvoc_dce_client_h_disabled + +#define PDB_PROP_DCECLIENT_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_DCECLIENT_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_OBJDCECLIENTRM(OBJDCECLIENTRM**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJDCECLIENTRM(OBJDCECLIENTRM**, Dynamic*, NvU32); +#define __objCreate_OBJDCECLIENTRM(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJDCECLIENTRM((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define dceclientConstructEngine(arg0, arg1, arg2) dceclientConstructEngine_DISPATCH(arg0, arg1, arg2) +#define dceclientStateDestroy(arg0, arg1) dceclientStateDestroy_DISPATCH(arg0, arg1) +#define dceclientReconcileTunableState(pGpu, pEngstate, pTunableState) dceclientReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dceclientStateLoad(pGpu, pEngstate, arg0) dceclientStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define dceclientStateUnload(pGpu, pEngstate, arg0) dceclientStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define dceclientStateInitLocked(pGpu, pEngstate) dceclientStateInitLocked_DISPATCH(pGpu, pEngstate) +#define dceclientStatePreLoad(pGpu, pEngstate, arg0) dceclientStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define dceclientStatePostUnload(pGpu, pEngstate, arg0) dceclientStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define dceclientStatePreUnload(pGpu, pEngstate, arg0) dceclientStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define dceclientStateInitUnlocked(pGpu, pEngstate) dceclientStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define dceclientInitMissing(pGpu, pEngstate) dceclientInitMissing_DISPATCH(pGpu, pEngstate) +#define dceclientStatePreInitLocked(pGpu, pEngstate) dceclientStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define dceclientStatePreInitUnlocked(pGpu, pEngstate) dceclientStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define dceclientGetTunableState(pGpu, pEngstate, pTunableState) dceclientGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dceclientCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) dceclientCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define dceclientFreeTunableState(pGpu, pEngstate, pTunableState) dceclientFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dceclientStatePostLoad(pGpu, pEngstate, arg0) dceclientStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define dceclientAllocTunableState(pGpu, pEngstate, ppTunableState) dceclientAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define dceclientSetTunableState(pGpu, pEngstate, pTunableState) dceclientSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dceclientIsPresent(pGpu, pEngstate) dceclientIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS dceclientConstructEngine_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, ENGDESCRIPTOR arg2); + +static inline NV_STATUS dceclientConstructEngine_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, ENGDESCRIPTOR arg2) { + return arg1->__dceclientConstructEngine__(arg0, arg1, arg2); +} + +void dceclientStateDestroy_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1); + +static inline void dceclientStateDestroy_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1) { + arg1->__dceclientStateDestroy__(arg0, arg1); +} + +static inline NV_STATUS dceclientReconcileTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + return pEngstate->__dceclientReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS dceclientStateLoad_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return pEngstate->__dceclientStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dceclientStateUnload_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return pEngstate->__dceclientStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dceclientStateInitLocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__dceclientStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStatePreLoad_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return pEngstate->__dceclientStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dceclientStatePostUnload_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return pEngstate->__dceclientStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dceclientStatePreUnload_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return pEngstate->__dceclientStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dceclientStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__dceclientStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void dceclientInitMissing_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + pEngstate->__dceclientInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__dceclientStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__dceclientStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dceclientGetTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + return pEngstate->__dceclientGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS dceclientCompareTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__dceclientCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void dceclientFreeTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + pEngstate->__dceclientFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS dceclientStatePostLoad_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) { + return pEngstate->__dceclientStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dceclientAllocTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void **ppTunableState) { + return pEngstate->__dceclientAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS dceclientSetTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) { + return pEngstate->__dceclientSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool dceclientIsPresent_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) { + return pEngstate->__dceclientIsPresent__(pGpu, pEngstate); +} + +NV_STATUS dceclientInitRpcInfra_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1); +#ifdef __nvoc_dce_client_h_disabled +static inline NV_STATUS dceclientInitRpcInfra(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_dce_client_h_disabled +#define dceclientInitRpcInfra(arg0, arg1) dceclientInitRpcInfra_IMPL(arg0, arg1) +#endif //__nvoc_dce_client_h_disabled + +void dceclientDeinitRpcInfra_IMPL(struct OBJDCECLIENTRM *arg0); +#ifdef __nvoc_dce_client_h_disabled +static inline void dceclientDeinitRpcInfra(struct OBJDCECLIENTRM *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); +} +#else //__nvoc_dce_client_h_disabled +#define dceclientDeinitRpcInfra(arg0) dceclientDeinitRpcInfra_IMPL(arg0) +#endif //__nvoc_dce_client_h_disabled + +NV_STATUS dceclientDceRmInit_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvBool arg2); +#ifdef __nvoc_dce_client_h_disabled +static inline NV_STATUS dceclientDceRmInit(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvBool arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_dce_client_h_disabled +#define dceclientDceRmInit(arg0, arg1, arg2) dceclientDceRmInit_IMPL(arg0, arg1, arg2) +#endif //__nvoc_dce_client_h_disabled + +NV_STATUS dceclientSendRpc_IMPL(struct OBJDCECLIENTRM *arg0, void *arg1, NvU32 arg2); +#ifdef __nvoc_dce_client_h_disabled +static inline NV_STATUS dceclientSendRpc(struct OBJDCECLIENTRM *arg0, void *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_dce_client_h_disabled +#define dceclientSendRpc(arg0, arg1, arg2) dceclientSendRpc_IMPL(arg0, arg1, arg2) +#endif //__nvoc_dce_client_h_disabled + +#undef PRIVATE_FIELD + + +NV_STATUS rpcRmApiControl_dce(RM_API *pRmApi, + NvHandle hClient, NvHandle hObject, + NvU32 cmd, void *pParamStructPtr, + NvU32 paramsSize); +NV_STATUS rpcRmApiAlloc_dce(RM_API *pRmApi, + NvHandle hClient, NvHandle hParent, + NvHandle hObject, NvU32 hClass, + void *pAllocParams); +NV_STATUS rpcRmApiDupObject_dce(RM_API *pRmApi, NvHandle hClient, + NvHandle hParent, NvHandle *phObject, NvHandle hClientSrc, + NvHandle hObjectSrc, NvU32 flags); +NV_STATUS rpcRmApiFree_dce(RM_API *pRmApi, NvHandle hClient, NvHandle hObject); +NV_STATUS rpcDceRmInit_dce(RM_API *pRmApi, NvBool bInit); +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DCE_CLIENT_NVOC_H_ diff --git a/src/nvidia/generated/g_deferred_api_nvoc.c b/src/nvidia/generated/g_deferred_api_nvoc.c new file mode 100644 index 000000000..cbbb6e212 --- /dev/null +++ b/src/nvidia/generated/g_deferred_api_nvoc.c @@ -0,0 +1,475 @@ +#define NVOC_DEFERRED_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_deferred_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x8ea933 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DeferredApiObject; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_DeferredApiObject(DeferredApiObject*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DeferredApiObject(DeferredApiObject*); +NV_STATUS __nvoc_ctor_DeferredApiObject(DeferredApiObject*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DeferredApiObject(DeferredApiObject*); +void __nvoc_dtor_DeferredApiObject(DeferredApiObject*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DeferredApiObject; + +static const struct NVOC_RTTI __nvoc_rtti_DeferredApiObject_DeferredApiObject = { + /*pClassDef=*/ &__nvoc_class_def_DeferredApiObject, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DeferredApiObject, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DeferredApiObject_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DeferredApiObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DeferredApiObject_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DeferredApiObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DeferredApiObject_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DeferredApiObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DeferredApiObject_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DeferredApiObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DeferredApiObject_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DeferredApiObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DeferredApiObject_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DeferredApiObject, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DeferredApiObject_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DeferredApiObject, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DeferredApiObject_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DeferredApiObject, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DeferredApiObject = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_DeferredApiObject_DeferredApiObject, + &__nvoc_rtti_DeferredApiObject_ChannelDescendant, + &__nvoc_rtti_DeferredApiObject_Notifier, + &__nvoc_rtti_DeferredApiObject_INotifier, + &__nvoc_rtti_DeferredApiObject_GpuResource, + &__nvoc_rtti_DeferredApiObject_RmResource, + &__nvoc_rtti_DeferredApiObject_RmResourceCommon, + &__nvoc_rtti_DeferredApiObject_RsResource, + &__nvoc_rtti_DeferredApiObject_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DeferredApiObject = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DeferredApiObject), + /*classId=*/ classId(DeferredApiObject), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DeferredApiObject", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DeferredApiObject, + /*pCastInfo=*/ &__nvoc_castinfo_DeferredApiObject, + /*pExportInfo=*/ &__nvoc_export_info_DeferredApiObject +}; + +static NV_STATUS __nvoc_thunk_DeferredApiObject_chandesGetSwMethods(struct ChannelDescendant *pDeferredApi, METHOD **ppMethods, NvU32 *pNumMethods) { + return defapiGetSwMethods((struct DeferredApiObject *)(((unsigned char *)pDeferredApi) - __nvoc_rtti_DeferredApiObject_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NvBool __nvoc_thunk_DeferredApiObject_chandesIsSwMethodStalling(struct ChannelDescendant *pDeferredApi, NvU32 hDeferredApi) { + return defapiIsSwMethodStalling((struct DeferredApiObject *)(((unsigned char *)pDeferredApi) - __nvoc_rtti_DeferredApiObject_ChannelDescendant.offset), hDeferredApi); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_defapiCheckMemInterUnmap(struct DeferredApiObject *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_DeferredApiObject_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_defapiShareCallback(struct DeferredApiObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DeferredApiObject_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_defapiAccessCallback(struct DeferredApiObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_defapiMapTo(struct DeferredApiObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_defapiGetMapAddrSpace(struct DeferredApiObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DeferredApiObject_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_defapiSetNotificationShare(struct DeferredApiObject *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DeferredApiObject_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_defapiGetRefCount(struct DeferredApiObject *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_defapiAddAdditionalDependants(struct RsClient *pClient, struct DeferredApiObject *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_defapiControl_Prologue(struct DeferredApiObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_defapiGetRegBaseOffsetAndSize(struct DeferredApiObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DeferredApiObject_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_defapiInternalControlForward(struct DeferredApiObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DeferredApiObject_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_defapiUnmapFrom(struct DeferredApiObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_defapiControl_Epilogue(struct DeferredApiObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_defapiControlLookup(struct DeferredApiObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_defapiGetInternalObjectHandle(struct DeferredApiObject *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DeferredApiObject_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_defapiControl(struct DeferredApiObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DeferredApiObject_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_defapiUnmap(struct DeferredApiObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DeferredApiObject_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_defapiGetMemInterMapParams(struct DeferredApiObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DeferredApiObject_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_defapiGetMemoryMappingDescriptor(struct DeferredApiObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DeferredApiObject_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_defapiControlFilter(struct DeferredApiObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_defapiUnregisterEvent(struct DeferredApiObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DeferredApiObject_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_defapiCanCopy(struct DeferredApiObject *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_defapiPreDestruct(struct DeferredApiObject *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DeferredApiObject_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_defapiGetNotificationListPtr(struct DeferredApiObject *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DeferredApiObject_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_defapiGetNotificationShare(struct DeferredApiObject *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DeferredApiObject_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_defapiMap(struct DeferredApiObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DeferredApiObject_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_defapiGetOrAllocNotifShare(struct DeferredApiObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DeferredApiObject_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DeferredApiObject[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) defapiCtrlCmdDeferredApi_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50800101u, + /*paramSize=*/ sizeof(NV5080_CTRL_DEFERRED_API_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DeferredApiObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "defapiCtrlCmdDeferredApi" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) defapiCtrlCmdRemoveApi_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50800102u, + /*paramSize=*/ sizeof(NV5080_CTRL_REMOVE_API_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DeferredApiObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "defapiCtrlCmdRemoveApi" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) defapiCtrlCmdDeferredApiV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50800103u, + /*paramSize=*/ sizeof(NV5080_CTRL_DEFERRED_API_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DeferredApiObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "defapiCtrlCmdDeferredApiV2" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DeferredApiObject = +{ + /*numEntries=*/ 3, + /*pExportEntries=*/ __nvoc_exported_method_def_DeferredApiObject +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_DeferredApiObject(DeferredApiObject *pThis) { + __nvoc_defapiDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DeferredApiObject(DeferredApiObject *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, PARAM_TO_ENGDESC_FUNCTION *); +NV_STATUS __nvoc_ctor_DeferredApiObject(DeferredApiObject *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, ((void *)0)); + if (status != NV_OK) goto __nvoc_ctor_DeferredApiObject_fail_ChannelDescendant; + __nvoc_init_dataField_DeferredApiObject(pThis); + + status = __nvoc_defapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DeferredApiObject_fail__init; + goto __nvoc_ctor_DeferredApiObject_exit; // Success + +__nvoc_ctor_DeferredApiObject_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_DeferredApiObject_fail_ChannelDescendant: +__nvoc_ctor_DeferredApiObject_exit: + + return status; +} + +static void __nvoc_init_funcTable_DeferredApiObject_1(DeferredApiObject *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__defapiGetSwMethods__ = &defapiGetSwMethods_IMPL; + + pThis->__defapiIsSwMethodStalling__ = &defapiIsSwMethodStalling_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__defapiCtrlCmdDeferredApi__ = &defapiCtrlCmdDeferredApi_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__defapiCtrlCmdDeferredApiV2__ = &defapiCtrlCmdDeferredApiV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__defapiCtrlCmdRemoveApi__ = &defapiCtrlCmdRemoveApi_IMPL; +#endif + + pThis->__nvoc_base_ChannelDescendant.__chandesGetSwMethods__ = &__nvoc_thunk_DeferredApiObject_chandesGetSwMethods; + + pThis->__nvoc_base_ChannelDescendant.__chandesIsSwMethodStalling__ = &__nvoc_thunk_DeferredApiObject_chandesIsSwMethodStalling; + + pThis->__defapiCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_defapiCheckMemInterUnmap; + + pThis->__defapiShareCallback__ = &__nvoc_thunk_GpuResource_defapiShareCallback; + + pThis->__defapiAccessCallback__ = &__nvoc_thunk_RmResource_defapiAccessCallback; + + pThis->__defapiMapTo__ = &__nvoc_thunk_RsResource_defapiMapTo; + + pThis->__defapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_defapiGetMapAddrSpace; + + pThis->__defapiSetNotificationShare__ = &__nvoc_thunk_Notifier_defapiSetNotificationShare; + + pThis->__defapiGetRefCount__ = &__nvoc_thunk_RsResource_defapiGetRefCount; + + pThis->__defapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_defapiAddAdditionalDependants; + + pThis->__defapiControl_Prologue__ = &__nvoc_thunk_RmResource_defapiControl_Prologue; + + pThis->__defapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_defapiGetRegBaseOffsetAndSize; + + pThis->__defapiInternalControlForward__ = &__nvoc_thunk_GpuResource_defapiInternalControlForward; + + pThis->__defapiUnmapFrom__ = &__nvoc_thunk_RsResource_defapiUnmapFrom; + + pThis->__defapiControl_Epilogue__ = &__nvoc_thunk_RmResource_defapiControl_Epilogue; + + pThis->__defapiControlLookup__ = &__nvoc_thunk_RsResource_defapiControlLookup; + + pThis->__defapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_defapiGetInternalObjectHandle; + + pThis->__defapiControl__ = &__nvoc_thunk_GpuResource_defapiControl; + + pThis->__defapiUnmap__ = &__nvoc_thunk_GpuResource_defapiUnmap; + + pThis->__defapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_defapiGetMemInterMapParams; + + pThis->__defapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_defapiGetMemoryMappingDescriptor; + + pThis->__defapiControlFilter__ = &__nvoc_thunk_RsResource_defapiControlFilter; + + pThis->__defapiUnregisterEvent__ = &__nvoc_thunk_Notifier_defapiUnregisterEvent; + + pThis->__defapiCanCopy__ = &__nvoc_thunk_RsResource_defapiCanCopy; + + pThis->__defapiPreDestruct__ = &__nvoc_thunk_RsResource_defapiPreDestruct; + + pThis->__defapiGetNotificationListPtr__ = &__nvoc_thunk_Notifier_defapiGetNotificationListPtr; + + pThis->__defapiGetNotificationShare__ = &__nvoc_thunk_Notifier_defapiGetNotificationShare; + + pThis->__defapiMap__ = &__nvoc_thunk_GpuResource_defapiMap; + + pThis->__defapiGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_defapiGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_DeferredApiObject(DeferredApiObject *pThis) { + __nvoc_init_funcTable_DeferredApiObject_1(pThis); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_DeferredApiObject(DeferredApiObject *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DeferredApiObject = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_DeferredApiObject(pThis); +} + +NV_STATUS __nvoc_objCreate_DeferredApiObject(DeferredApiObject **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DeferredApiObject *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DeferredApiObject)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DeferredApiObject)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DeferredApiObject); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DeferredApiObject(pThis, pRmhalspecowner); + status = __nvoc_ctor_DeferredApiObject(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DeferredApiObject_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DeferredApiObject_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DeferredApiObject(DeferredApiObject **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DeferredApiObject(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_deferred_api_nvoc.h b/src/nvidia/generated/g_deferred_api_nvoc.h new file mode 100644 index 000000000..0e2c3eca9 --- /dev/null +++ b/src/nvidia/generated/g_deferred_api_nvoc.h @@ -0,0 +1,344 @@ +#ifndef _G_DEFERRED_API_NVOC_H_ +#define _G_DEFERRED_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_deferred_api_nvoc.h" + +#ifndef DEFERRED_API_H +#define DEFERRED_API_H 1 + +#include "core/core.h" +#include "kernel/gpu/fifo/channel_descendant.h" +#include "rmapi/control.h" +#include "ctrl/ctrl5080.h" +#include "nvsecurityinfo.h" + +#define DEFERRED_API_INFO_FLAGS_HAS_EXECUTED 0x00000001 +#define DEFERRED_API_INFO_FLAGS_HAS_TLB_FLUSHED 0x00000002 +#define DEFERRED_API_INFO_FLAGS_HAS_PRIVATE_DATA_ALLOC 0x00000004 + +typedef struct _def_deferred_api_info +{ + NODE Node; + NvHandle Handle; + NvHandle Client; + NvU32 Flags; // see DEFERRED_API_INFO_FLAGS_* defines + RS_PRIV_LEVEL privLevel; // privilege level of the client that initiated deferred call. + void * pDeferredApiInfo; + NvP64 pDeferredPrivateData; +} DEFERRED_API_INFO, *PDEFERRED_API_INFO; + +// RS-TODO: Delete. Keeping old typedef for transition. +typedef struct DeferredApiObject *PDEFERRED_API_OBJECT; + +#ifndef __NVOC_CLASS_DeferredApiObject_TYPEDEF__ +#define __NVOC_CLASS_DeferredApiObject_TYPEDEF__ +typedef struct DeferredApiObject DeferredApiObject; +#endif /* __NVOC_CLASS_DeferredApiObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DeferredApiObject +#define __nvoc_class_id_DeferredApiObject 0x8ea933 +#endif /* __nvoc_class_id_DeferredApiObject */ + + + +NV_STATUS Class5080GetDeferredApiInfo( + PDEFERRED_API_OBJECT pDeferredApiObject, + NvHandle hDeferredApi, + PDEFERRED_API_INFO *ppCliDeferredApi +); + + +/*! + * RM internal class representing NV50_DEFERRED_API_CLASS + */ +#ifdef NVOC_DEFERRED_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DeferredApiObject { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct DeferredApiObject *__nvoc_pbase_DeferredApiObject; + NV_STATUS (*__defapiGetSwMethods__)(struct DeferredApiObject *, METHOD **, NvU32 *); + NvBool (*__defapiIsSwMethodStalling__)(struct DeferredApiObject *, NvU32); + NV_STATUS (*__defapiCtrlCmdDeferredApi__)(struct DeferredApiObject *, NV5080_CTRL_DEFERRED_API_PARAMS *); + NV_STATUS (*__defapiCtrlCmdDeferredApiV2__)(struct DeferredApiObject *, NV5080_CTRL_DEFERRED_API_V2_PARAMS *); + NV_STATUS (*__defapiCtrlCmdRemoveApi__)(struct DeferredApiObject *, NV5080_CTRL_REMOVE_API_PARAMS *); + NV_STATUS (*__defapiCheckMemInterUnmap__)(struct DeferredApiObject *, NvBool); + NvBool (*__defapiShareCallback__)(struct DeferredApiObject *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__defapiAccessCallback__)(struct DeferredApiObject *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__defapiMapTo__)(struct DeferredApiObject *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__defapiGetMapAddrSpace__)(struct DeferredApiObject *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__defapiSetNotificationShare__)(struct DeferredApiObject *, struct NotifShare *); + NvU32 (*__defapiGetRefCount__)(struct DeferredApiObject *); + void (*__defapiAddAdditionalDependants__)(struct RsClient *, struct DeferredApiObject *, RsResourceRef *); + NV_STATUS (*__defapiControl_Prologue__)(struct DeferredApiObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__defapiGetRegBaseOffsetAndSize__)(struct DeferredApiObject *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__defapiInternalControlForward__)(struct DeferredApiObject *, NvU32, void *, NvU32); + NV_STATUS (*__defapiUnmapFrom__)(struct DeferredApiObject *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__defapiControl_Epilogue__)(struct DeferredApiObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__defapiControlLookup__)(struct DeferredApiObject *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__defapiGetInternalObjectHandle__)(struct DeferredApiObject *); + NV_STATUS (*__defapiControl__)(struct DeferredApiObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__defapiUnmap__)(struct DeferredApiObject *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__defapiGetMemInterMapParams__)(struct DeferredApiObject *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__defapiGetMemoryMappingDescriptor__)(struct DeferredApiObject *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__defapiControlFilter__)(struct DeferredApiObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__defapiUnregisterEvent__)(struct DeferredApiObject *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__defapiCanCopy__)(struct DeferredApiObject *); + void (*__defapiPreDestruct__)(struct DeferredApiObject *); + PEVENTNOTIFICATION *(*__defapiGetNotificationListPtr__)(struct DeferredApiObject *); + struct NotifShare *(*__defapiGetNotificationShare__)(struct DeferredApiObject *); + NV_STATUS (*__defapiMap__)(struct DeferredApiObject *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__defapiGetOrAllocNotifShare__)(struct DeferredApiObject *, NvHandle, NvHandle, struct NotifShare **); + PNODE DeferredApiList; + NvU32 NumWaitingOnTLBFlush; +}; + +#ifndef __NVOC_CLASS_DeferredApiObject_TYPEDEF__ +#define __NVOC_CLASS_DeferredApiObject_TYPEDEF__ +typedef struct DeferredApiObject DeferredApiObject; +#endif /* __NVOC_CLASS_DeferredApiObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DeferredApiObject +#define __nvoc_class_id_DeferredApiObject 0x8ea933 +#endif /* __nvoc_class_id_DeferredApiObject */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DeferredApiObject; + +#define __staticCast_DeferredApiObject(pThis) \ + ((pThis)->__nvoc_pbase_DeferredApiObject) + +#ifdef __nvoc_deferred_api_h_disabled +#define __dynamicCast_DeferredApiObject(pThis) ((DeferredApiObject*)NULL) +#else //__nvoc_deferred_api_h_disabled +#define __dynamicCast_DeferredApiObject(pThis) \ + ((DeferredApiObject*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DeferredApiObject))) +#endif //__nvoc_deferred_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DeferredApiObject(DeferredApiObject**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DeferredApiObject(DeferredApiObject**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DeferredApiObject(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DeferredApiObject((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define defapiGetSwMethods(pDeferredApi, ppMethods, pNumMethods) defapiGetSwMethods_DISPATCH(pDeferredApi, ppMethods, pNumMethods) +#define defapiIsSwMethodStalling(pDeferredApi, hDeferredApi) defapiIsSwMethodStalling_DISPATCH(pDeferredApi, hDeferredApi) +#define defapiCtrlCmdDeferredApi(pDeferredApiObj, pDeferredApi) defapiCtrlCmdDeferredApi_DISPATCH(pDeferredApiObj, pDeferredApi) +#define defapiCtrlCmdDeferredApiV2(pDeferredApiObj, pDeferredApi) defapiCtrlCmdDeferredApiV2_DISPATCH(pDeferredApiObj, pDeferredApi) +#define defapiCtrlCmdRemoveApi(pDeferredApiObj, pRemoveApi) defapiCtrlCmdRemoveApi_DISPATCH(pDeferredApiObj, pRemoveApi) +#define defapiCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) defapiCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define defapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) defapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define defapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) defapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define defapiMapTo(pResource, pParams) defapiMapTo_DISPATCH(pResource, pParams) +#define defapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) defapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define defapiSetNotificationShare(pNotifier, pNotifShare) defapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define defapiGetRefCount(pResource) defapiGetRefCount_DISPATCH(pResource) +#define defapiAddAdditionalDependants(pClient, pResource, pReference) defapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define defapiControl_Prologue(pResource, pCallContext, pParams) defapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define defapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) defapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define defapiInternalControlForward(pGpuResource, command, pParams, size) defapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define defapiUnmapFrom(pResource, pParams) defapiUnmapFrom_DISPATCH(pResource, pParams) +#define defapiControl_Epilogue(pResource, pCallContext, pParams) defapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define defapiControlLookup(pResource, pParams, ppEntry) defapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define defapiGetInternalObjectHandle(pGpuResource) defapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define defapiControl(pGpuResource, pCallContext, pParams) defapiControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define defapiUnmap(pGpuResource, pCallContext, pCpuMapping) defapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define defapiGetMemInterMapParams(pRmResource, pParams) defapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define defapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) defapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define defapiControlFilter(pResource, pCallContext, pParams) defapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define defapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) defapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define defapiCanCopy(pResource) defapiCanCopy_DISPATCH(pResource) +#define defapiPreDestruct(pResource) defapiPreDestruct_DISPATCH(pResource) +#define defapiGetNotificationListPtr(pNotifier) defapiGetNotificationListPtr_DISPATCH(pNotifier) +#define defapiGetNotificationShare(pNotifier) defapiGetNotificationShare_DISPATCH(pNotifier) +#define defapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) defapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define defapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) defapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS defapiGetSwMethods_IMPL(struct DeferredApiObject *pDeferredApi, METHOD **ppMethods, NvU32 *pNumMethods); + +static inline NV_STATUS defapiGetSwMethods_DISPATCH(struct DeferredApiObject *pDeferredApi, METHOD **ppMethods, NvU32 *pNumMethods) { + return pDeferredApi->__defapiGetSwMethods__(pDeferredApi, ppMethods, pNumMethods); +} + +NvBool defapiIsSwMethodStalling_IMPL(struct DeferredApiObject *pDeferredApi, NvU32 hDeferredApi); + +static inline NvBool defapiIsSwMethodStalling_DISPATCH(struct DeferredApiObject *pDeferredApi, NvU32 hDeferredApi) { + return pDeferredApi->__defapiIsSwMethodStalling__(pDeferredApi, hDeferredApi); +} + +NV_STATUS defapiCtrlCmdDeferredApi_IMPL(struct DeferredApiObject *pDeferredApiObj, NV5080_CTRL_DEFERRED_API_PARAMS *pDeferredApi); + +static inline NV_STATUS defapiCtrlCmdDeferredApi_DISPATCH(struct DeferredApiObject *pDeferredApiObj, NV5080_CTRL_DEFERRED_API_PARAMS *pDeferredApi) { + return pDeferredApiObj->__defapiCtrlCmdDeferredApi__(pDeferredApiObj, pDeferredApi); +} + +NV_STATUS defapiCtrlCmdDeferredApiV2_IMPL(struct DeferredApiObject *pDeferredApiObj, NV5080_CTRL_DEFERRED_API_V2_PARAMS *pDeferredApi); + +static inline NV_STATUS defapiCtrlCmdDeferredApiV2_DISPATCH(struct DeferredApiObject *pDeferredApiObj, NV5080_CTRL_DEFERRED_API_V2_PARAMS *pDeferredApi) { + return pDeferredApiObj->__defapiCtrlCmdDeferredApiV2__(pDeferredApiObj, pDeferredApi); +} + +NV_STATUS defapiCtrlCmdRemoveApi_IMPL(struct DeferredApiObject *pDeferredApiObj, NV5080_CTRL_REMOVE_API_PARAMS *pRemoveApi); + +static inline NV_STATUS defapiCtrlCmdRemoveApi_DISPATCH(struct DeferredApiObject *pDeferredApiObj, NV5080_CTRL_REMOVE_API_PARAMS *pRemoveApi) { + return pDeferredApiObj->__defapiCtrlCmdRemoveApi__(pDeferredApiObj, pRemoveApi); +} + +static inline NV_STATUS defapiCheckMemInterUnmap_DISPATCH(struct DeferredApiObject *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__defapiCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool defapiShareCallback_DISPATCH(struct DeferredApiObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__defapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool defapiAccessCallback_DISPATCH(struct DeferredApiObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__defapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS defapiMapTo_DISPATCH(struct DeferredApiObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__defapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS defapiGetMapAddrSpace_DISPATCH(struct DeferredApiObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__defapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void defapiSetNotificationShare_DISPATCH(struct DeferredApiObject *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__defapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 defapiGetRefCount_DISPATCH(struct DeferredApiObject *pResource) { + return pResource->__defapiGetRefCount__(pResource); +} + +static inline void defapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DeferredApiObject *pResource, RsResourceRef *pReference) { + pResource->__defapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS defapiControl_Prologue_DISPATCH(struct DeferredApiObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__defapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS defapiGetRegBaseOffsetAndSize_DISPATCH(struct DeferredApiObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__defapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS defapiInternalControlForward_DISPATCH(struct DeferredApiObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__defapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS defapiUnmapFrom_DISPATCH(struct DeferredApiObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__defapiUnmapFrom__(pResource, pParams); +} + +static inline void defapiControl_Epilogue_DISPATCH(struct DeferredApiObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__defapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS defapiControlLookup_DISPATCH(struct DeferredApiObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__defapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle defapiGetInternalObjectHandle_DISPATCH(struct DeferredApiObject *pGpuResource) { + return pGpuResource->__defapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS defapiControl_DISPATCH(struct DeferredApiObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__defapiControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS defapiUnmap_DISPATCH(struct DeferredApiObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__defapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS defapiGetMemInterMapParams_DISPATCH(struct DeferredApiObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__defapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS defapiGetMemoryMappingDescriptor_DISPATCH(struct DeferredApiObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__defapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS defapiControlFilter_DISPATCH(struct DeferredApiObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__defapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS defapiUnregisterEvent_DISPATCH(struct DeferredApiObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__defapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool defapiCanCopy_DISPATCH(struct DeferredApiObject *pResource) { + return pResource->__defapiCanCopy__(pResource); +} + +static inline void defapiPreDestruct_DISPATCH(struct DeferredApiObject *pResource) { + pResource->__defapiPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *defapiGetNotificationListPtr_DISPATCH(struct DeferredApiObject *pNotifier) { + return pNotifier->__defapiGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *defapiGetNotificationShare_DISPATCH(struct DeferredApiObject *pNotifier) { + return pNotifier->__defapiGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS defapiMap_DISPATCH(struct DeferredApiObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__defapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS defapiGetOrAllocNotifShare_DISPATCH(struct DeferredApiObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__defapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS defapiConstruct_IMPL(struct DeferredApiObject *arg_pDeferredApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_defapiConstruct(arg_pDeferredApi, arg_pCallContext, arg_pParams) defapiConstruct_IMPL(arg_pDeferredApi, arg_pCallContext, arg_pParams) +void defapiDestruct_IMPL(struct DeferredApiObject *pDeferredApi); +#define __nvoc_defapiDestruct(pDeferredApi) defapiDestruct_IMPL(pDeferredApi) +#undef PRIVATE_FIELD + + +#endif // DEFERRED_API_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DEFERRED_API_NVOC_H_ diff --git a/src/nvidia/generated/g_device_nvoc.c b/src/nvidia/generated/g_device_nvoc.c new file mode 100644 index 000000000..e713760f8 --- /dev/null +++ b/src/nvidia/generated/g_device_nvoc.c @@ -0,0 +1,1424 @@ +#define NVOC_DEVICE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_device_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xe0ac20 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Device; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_Device(Device*); +void __nvoc_init_funcTable_Device(Device*); +NV_STATUS __nvoc_ctor_Device(Device*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Device(Device*); +void __nvoc_dtor_Device(Device*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Device; + +static const struct NVOC_RTTI __nvoc_rtti_Device_Device = { + /*pClassDef=*/ &__nvoc_class_def_Device, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Device, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Device_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Device_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Device_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Device_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Device_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Device = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_Device_Device, + &__nvoc_rtti_Device_GpuResource, + &__nvoc_rtti_Device_RmResource, + &__nvoc_rtti_Device_RmResourceCommon, + &__nvoc_rtti_Device_RsResource, + &__nvoc_rtti_Device_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Device = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Device), + /*classId=*/ classId(Device), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Device", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Device, + /*pCastInfo=*/ &__nvoc_castinfo_Device, + /*pExportInfo=*/ &__nvoc_export_info_Device +}; + +static NV_STATUS __nvoc_thunk_Device_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return deviceControl((struct Device *)(((unsigned char *)pResource) - __nvoc_rtti_Device_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Device_gpuresInternalControlForward(struct GpuResource *pDevice, NvU32 command, void *pParams, NvU32 size) { + return deviceInternalControlForward((struct Device *)(((unsigned char *)pDevice) - __nvoc_rtti_Device_GpuResource.offset), command, pParams, size); +} + +static NvBool __nvoc_thunk_GpuResource_deviceShareCallback(struct Device *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_deviceUnmap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_deviceGetMemInterMapParams(struct Device *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_deviceGetMemoryMappingDescriptor(struct Device *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_deviceGetMapAddrSpace(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_deviceGetInternalObjectHandle(struct Device *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_deviceControlFilter(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_deviceAddAdditionalDependants(struct RsClient *pClient, struct Device *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_deviceGetRefCount(struct Device *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_deviceCheckMemInterUnmap(struct Device *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_deviceMapTo(struct Device *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_deviceControl_Prologue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_deviceGetRegBaseOffsetAndSize(struct Device *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_deviceCanCopy(struct Device *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_devicePreDestruct(struct Device *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_deviceUnmapFrom(struct Device *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_deviceControl_Epilogue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_deviceControlLookup(struct Device *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_deviceMap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_deviceAccessCallback(struct Device *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdBifReset_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800102u, + /*paramSize=*/ sizeof(NV0080_CTRL_BIF_RESET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdBifReset" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdBifGetDmaBaseSysmemAddr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800103u, + /*paramSize=*/ sizeof(NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdBifGetDmaBaseSysmemAddr" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdBifAspmFeatureSupported_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800104u, + /*paramSize=*/ sizeof(NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdBifAspmFeatureSupported" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetClasslist_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*flags=*/ 0x813u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800201u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetClasslist" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetNumSubdevices_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800280u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetNumSubdevices" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*flags=*/ 0x5u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800287u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuModifyGpuSwStatePersistence" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800288u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuQueryGpuSwStatePersistence" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetVirtualizationMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800289u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetVirtualizationMode" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetSparseTextureComputeMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80028cu, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetSparseTextureComputeMode" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuSetSparseTextureComputeMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80028du, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuSetSparseTextureComputeMode" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetVgxCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80028eu, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetVgxCaps" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetClasslistV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*flags=*/ 0x813u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800292u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetClasslistV2" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800293u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetFindSubDeviceHandle" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetBrandCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*flags=*/ 0x211u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800294u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuGetBrandCaps" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x800296u, + /*paramSize=*/ sizeof(NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdGpuSetVgpuVfBar1Size" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdKGrGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*flags=*/ 0x812u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801102u, + /*paramSize=*/ sizeof(NV0080_CTRL_GR_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdKGrGetCaps" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdKGrGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801104u, + /*paramSize=*/ sizeof(NV0080_CTRL_GR_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdKGrGetInfo" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdKGrGetTpcPartitionMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801107u, + /*paramSize=*/ sizeof(NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdKGrGetTpcPartitionMode" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdKGrSetTpcPartitionMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801108u, + /*paramSize=*/ sizeof(NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdKGrSetTpcPartitionMode" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdKGrGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*flags=*/ 0x812u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801109u, + /*paramSize=*/ sizeof(NV0080_CTRL_GR_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdKGrGetCapsV2" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdKGrGetInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801110u, + /*paramSize=*/ sizeof(NV0080_CTRL_GR_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdKGrGetInfoV2" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFbGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801301u, + /*paramSize=*/ sizeof(NV0080_CTRL_FB_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFbGetCaps" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFbGetCompbitStoreInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801306u, + /*paramSize=*/ sizeof(NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFbGetCompbitStoreInfo" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFbGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801307u, + /*paramSize=*/ sizeof(NV0080_CTRL_FB_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFbGetCapsV2" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdHostGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801401u, + /*paramSize=*/ sizeof(NV0080_CTRL_HOST_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdHostGetCaps" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdHostGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801402u, + /*paramSize=*/ sizeof(NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdHostGetCapsV2" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFifoGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801701u, + /*paramSize=*/ sizeof(NV0080_CTRL_FIFO_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFifoGetCaps" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFifoStartSelectedChannels_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801705u, + /*paramSize=*/ sizeof(NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFifoStartSelectedChannels" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFifoGetEngineContextProperties_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801707u, + /*paramSize=*/ sizeof(NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFifoGetEngineContextProperties" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFifoGetChannelList_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80170du, + /*paramSize=*/ sizeof(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFifoGetChannelList" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2211u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFifoGetLatencyBufferSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2211u) + /*flags=*/ 0x2211u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80170eu, + /*paramSize=*/ sizeof(NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFifoGetLatencyBufferSize" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFifoSetChannelProperties_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80170fu, + /*paramSize=*/ sizeof(NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFifoSetChannelProperties" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFifoStopRunlist_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801711u, + /*paramSize=*/ sizeof(NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFifoStopRunlist" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFifoStartRunlist_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801712u, + /*paramSize=*/ sizeof(NV0080_CTRL_FIFO_START_RUNLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFifoStartRunlist" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFifoGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801713u, + /*paramSize=*/ sizeof(NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFifoGetCapsV2" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdFifoIdleChannels_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801714u, + /*paramSize=*/ sizeof(NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdFifoIdleChannels" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaGetPteInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801801u, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaGetPteInfo" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaFlush_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801805u, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_FLUSH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaFlush" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaAdvSchedGetVaCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801806u, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaAdvSchedGetVaCaps" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaGetPdeInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801809u, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaGetPdeInfo" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaSetPteInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80180au, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaSetPteInfo" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaInvalidateTLB_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80180cu, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaInvalidateTLB" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80180du, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaGetCaps" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaSetVASpaceSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80180eu, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaSetVASpaceSize" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaUpdatePde2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x80180fu, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaUpdatePde2" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaEnablePrivilegedRange_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801810u, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaEnablePrivilegedRange" +#endif + }, + { /* [46] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaSetDefaultVASpace_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801812u, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaSetDefaultVASpace" +#endif + }, + { /* [47] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaSetPageDirectory_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801813u, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaSetPageDirectory" +#endif + }, + { /* [48] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdDmaUnsetPageDirectory_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801814u, + /*paramSize=*/ sizeof(NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdDmaUnsetPageDirectory" +#endif + }, + { /* [49] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdMsencGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801b01u, + /*paramSize=*/ sizeof(NV0080_CTRL_MSENC_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdMsencGetCaps" +#endif + }, + { /* [50] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdBspGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801c02u, + /*paramSize=*/ sizeof(NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdBspGetCapsV2" +#endif + }, + { /* [51] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdOsUnixVTSwitch_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*flags=*/ 0x1u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801e01u, + /*paramSize=*/ sizeof(NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdOsUnixVTSwitch" +#endif + }, + { /* [52] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdOsUnixVTGetFBInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*flags=*/ 0x1u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801e02u, + /*paramSize=*/ sizeof(NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdOsUnixVTGetFBInfo" +#endif + }, + { /* [53] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdNvjpgGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x801f02u, + /*paramSize=*/ sizeof(NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdNvjpgGetCapsV2" +#endif + }, + { /* [54] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdInternalPerfCudaLimitDisable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x802004u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdInternalPerfCudaLimitDisable" +#endif + }, + { /* [55] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x802006u, + /*paramSize=*/ sizeof(NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount" +#endif + }, + { /* [56] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) deviceCtrlCmdInternalPerfCudaLimitSetControl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe10u) + /*flags=*/ 0xe10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x802009u, + /*paramSize=*/ sizeof(NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "deviceCtrlCmdInternalPerfCudaLimitSetControl" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Device = +{ + /*numEntries=*/ 57, + /*pExportEntries=*/ __nvoc_exported_method_def_Device +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Device(Device *pThis) { + __nvoc_deviceDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Device(Device *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Device(Device *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Device_fail_GpuResource; + __nvoc_init_dataField_Device(pThis); + + status = __nvoc_deviceConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Device_fail__init; + goto __nvoc_ctor_Device_exit; // Success + +__nvoc_ctor_Device_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_Device_fail_GpuResource: +__nvoc_ctor_Device_exit: + + return status; +} + +static void __nvoc_init_funcTable_Device_1(Device *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__deviceControl__ = &deviceControl_IMPL; + + pThis->__deviceInternalControlForward__ = &deviceInternalControlForward_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__deviceCtrlCmdBifReset__ = &deviceCtrlCmdBifReset_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__deviceCtrlCmdBifGetDmaBaseSysmemAddr__ = &deviceCtrlCmdBifGetDmaBaseSysmemAddr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__deviceCtrlCmdBifAspmFeatureSupported__ = &deviceCtrlCmdBifAspmFeatureSupported_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__deviceCtrlCmdDmaGetPteInfo__ = &deviceCtrlCmdDmaGetPteInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__deviceCtrlCmdDmaUpdatePde2__ = &deviceCtrlCmdDmaUpdatePde2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__deviceCtrlCmdDmaSetPageDirectory__ = &deviceCtrlCmdDmaSetPageDirectory_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__deviceCtrlCmdDmaUnsetPageDirectory__ = &deviceCtrlCmdDmaUnsetPageDirectory_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__deviceCtrlCmdDmaFlush__ = &deviceCtrlCmdDmaFlush_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__deviceCtrlCmdDmaAdvSchedGetVaCaps__ = &deviceCtrlCmdDmaAdvSchedGetVaCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__deviceCtrlCmdDmaGetPdeInfo__ = &deviceCtrlCmdDmaGetPdeInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__deviceCtrlCmdDmaSetPteInfo__ = &deviceCtrlCmdDmaSetPteInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__deviceCtrlCmdDmaInvalidateTLB__ = &deviceCtrlCmdDmaInvalidateTLB_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__deviceCtrlCmdDmaGetCaps__ = &deviceCtrlCmdDmaGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__deviceCtrlCmdDmaSetVASpaceSize__ = &deviceCtrlCmdDmaSetVASpaceSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__deviceCtrlCmdDmaEnablePrivilegedRange__ = &deviceCtrlCmdDmaEnablePrivilegedRange_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__deviceCtrlCmdDmaSetDefaultVASpace__ = &deviceCtrlCmdDmaSetDefaultVASpace_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + pThis->__deviceCtrlCmdKGrGetCaps__ = &deviceCtrlCmdKGrGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + pThis->__deviceCtrlCmdKGrGetCapsV2__ = &deviceCtrlCmdKGrGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__deviceCtrlCmdKGrGetInfo__ = &deviceCtrlCmdKGrGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__deviceCtrlCmdKGrGetInfoV2__ = &deviceCtrlCmdKGrGetInfoV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__deviceCtrlCmdKGrGetTpcPartitionMode__ = &deviceCtrlCmdKGrGetTpcPartitionMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__deviceCtrlCmdKGrSetTpcPartitionMode__ = &deviceCtrlCmdKGrSetTpcPartitionMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__deviceCtrlCmdFbGetCompbitStoreInfo__ = &deviceCtrlCmdFbGetCompbitStoreInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__deviceCtrlCmdFbGetCaps__ = &deviceCtrlCmdFbGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__deviceCtrlCmdFbGetCapsV2__ = &deviceCtrlCmdFbGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__deviceCtrlCmdFifoGetCaps__ = &deviceCtrlCmdFifoGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__deviceCtrlCmdFifoGetCapsV2__ = &deviceCtrlCmdFifoGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__deviceCtrlCmdFifoStartSelectedChannels__ = &deviceCtrlCmdFifoStartSelectedChannels_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__deviceCtrlCmdFifoGetEngineContextProperties__ = &deviceCtrlCmdFifoGetEngineContextProperties_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__deviceCtrlCmdFifoStopRunlist__ = &deviceCtrlCmdFifoStopRunlist_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__deviceCtrlCmdFifoStartRunlist__ = &deviceCtrlCmdFifoStartRunlist_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__deviceCtrlCmdFifoGetChannelList__ = &deviceCtrlCmdFifoGetChannelList_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2211u) + pThis->__deviceCtrlCmdFifoGetLatencyBufferSize__ = &deviceCtrlCmdFifoGetLatencyBufferSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__deviceCtrlCmdFifoSetChannelProperties__ = &deviceCtrlCmdFifoSetChannelProperties_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__deviceCtrlCmdFifoIdleChannels__ = &deviceCtrlCmdFifoIdleChannels_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__deviceCtrlCmdHostGetCaps__ = &deviceCtrlCmdHostGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__deviceCtrlCmdHostGetCapsV2__ = &deviceCtrlCmdHostGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe10u) + pThis->__deviceCtrlCmdInternalPerfCudaLimitSetControl__ = &deviceCtrlCmdInternalPerfCudaLimitSetControl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__deviceCtrlCmdInternalPerfCudaLimitDisable__ = &deviceCtrlCmdInternalPerfCudaLimitDisable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount__ = &deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + pThis->__deviceCtrlCmdGpuGetClasslist__ = &deviceCtrlCmdGpuGetClasslist_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + pThis->__deviceCtrlCmdGpuGetClasslistV2__ = &deviceCtrlCmdGpuGetClasslistV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__deviceCtrlCmdGpuGetNumSubdevices__ = &deviceCtrlCmdGpuGetNumSubdevices_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + pThis->__deviceCtrlCmdGpuModifyGpuSwStatePersistence__ = &deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__deviceCtrlCmdGpuQueryGpuSwStatePersistence__ = &deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__deviceCtrlCmdGpuGetVirtualizationMode__ = &deviceCtrlCmdGpuGetVirtualizationMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__deviceCtrlCmdGpuSetVgpuVfBar1Size__ = &deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__deviceCtrlCmdGpuGetSparseTextureComputeMode__ = &deviceCtrlCmdGpuGetSparseTextureComputeMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__deviceCtrlCmdGpuSetSparseTextureComputeMode__ = &deviceCtrlCmdGpuSetSparseTextureComputeMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__deviceCtrlCmdGpuGetVgxCaps__ = &deviceCtrlCmdGpuGetVgxCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + pThis->__deviceCtrlCmdGpuGetBrandCaps__ = &deviceCtrlCmdGpuGetBrandCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__deviceCtrlCmdGpuGetFindSubDeviceHandle__ = &deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__deviceCtrlCmdMsencGetCaps__ = &deviceCtrlCmdMsencGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__deviceCtrlCmdBspGetCapsV2__ = &deviceCtrlCmdBspGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__deviceCtrlCmdNvjpgGetCapsV2__ = &deviceCtrlCmdNvjpgGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + pThis->__deviceCtrlCmdOsUnixVTSwitch__ = &deviceCtrlCmdOsUnixVTSwitch_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + pThis->__deviceCtrlCmdOsUnixVTGetFBInfo__ = &deviceCtrlCmdOsUnixVTGetFBInfo_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_Device_gpuresControl; + + pThis->__nvoc_base_GpuResource.__gpuresInternalControlForward__ = &__nvoc_thunk_Device_gpuresInternalControlForward; + + pThis->__deviceShareCallback__ = &__nvoc_thunk_GpuResource_deviceShareCallback; + + pThis->__deviceUnmap__ = &__nvoc_thunk_GpuResource_deviceUnmap; + + pThis->__deviceGetMemInterMapParams__ = &__nvoc_thunk_RmResource_deviceGetMemInterMapParams; + + pThis->__deviceGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_deviceGetMemoryMappingDescriptor; + + pThis->__deviceGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_deviceGetMapAddrSpace; + + pThis->__deviceGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_deviceGetInternalObjectHandle; + + pThis->__deviceControlFilter__ = &__nvoc_thunk_RsResource_deviceControlFilter; + + pThis->__deviceAddAdditionalDependants__ = &__nvoc_thunk_RsResource_deviceAddAdditionalDependants; + + pThis->__deviceGetRefCount__ = &__nvoc_thunk_RsResource_deviceGetRefCount; + + pThis->__deviceCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_deviceCheckMemInterUnmap; + + pThis->__deviceMapTo__ = &__nvoc_thunk_RsResource_deviceMapTo; + + pThis->__deviceControl_Prologue__ = &__nvoc_thunk_RmResource_deviceControl_Prologue; + + pThis->__deviceGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_deviceGetRegBaseOffsetAndSize; + + pThis->__deviceCanCopy__ = &__nvoc_thunk_RsResource_deviceCanCopy; + + pThis->__devicePreDestruct__ = &__nvoc_thunk_RsResource_devicePreDestruct; + + pThis->__deviceUnmapFrom__ = &__nvoc_thunk_RsResource_deviceUnmapFrom; + + pThis->__deviceControl_Epilogue__ = &__nvoc_thunk_RmResource_deviceControl_Epilogue; + + pThis->__deviceControlLookup__ = &__nvoc_thunk_RsResource_deviceControlLookup; + + pThis->__deviceMap__ = &__nvoc_thunk_GpuResource_deviceMap; + + pThis->__deviceAccessCallback__ = &__nvoc_thunk_RmResource_deviceAccessCallback; +} + +void __nvoc_init_funcTable_Device(Device *pThis) { + __nvoc_init_funcTable_Device_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Device(Device *pThis) { + pThis->__nvoc_pbase_Device = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_Device(pThis); +} + +NV_STATUS __nvoc_objCreate_Device(Device **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Device *pThis; + + pThis = portMemAllocNonPaged(sizeof(Device)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Device)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Device); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Device(pThis); + status = __nvoc_ctor_Device(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Device_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Device_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Device(Device **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Device(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_device_nvoc.h b/src/nvidia/generated/g_device_nvoc.h new file mode 100644 index 000000000..d21e89d58 --- /dev/null +++ b/src/nvidia/generated/g_device_nvoc.h @@ -0,0 +1,840 @@ +#ifndef _G_DEVICE_NVOC_H_ +#define _G_DEVICE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_device_nvoc.h" + +#ifndef _DEVICE_H_ +#define _DEVICE_H_ + +#include "core/core.h" + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "nvoc/utility.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" +#include "containers/btree.h" + +#include "gpu/gpu_resource.h" +#include "mem_mgr/vaspace.h" + +#include "ctrl/ctrl0080.h" // rmcontrol params + +// Forward declaration +struct HOST_VGPU_DEVICE; +struct OBJVASPACE; + +#ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +typedef struct OBJVASPACE OBJVASPACE; +#endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVASPACE +#define __nvoc_class_id_OBJVASPACE 0x6c347f +#endif /* __nvoc_class_id_OBJVASPACE */ + + + +// TODO: Remove this after adding KERNEL_HOST_VGPU_DEVICE +typedef struct HOST_VGPU_DEVICE KERNEL_HOST_VGPU_DEVICE; + +/** + * A device consists of one or more GPUs. Devices provide broadcast + * semantics; that is, operations involving a device are applied to all GPUs + * in the device. + */ +#ifdef NVOC_DEVICE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Device { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct Device *__nvoc_pbase_Device; + NV_STATUS (*__deviceControl__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__deviceInternalControlForward__)(struct Device *, NvU32, void *, NvU32); + NV_STATUS (*__deviceCtrlCmdBifReset__)(struct Device *, NV0080_CTRL_BIF_RESET_PARAMS *); + NV_STATUS (*__deviceCtrlCmdBifGetDmaBaseSysmemAddr__)(struct Device *, NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS *); + NV_STATUS (*__deviceCtrlCmdBifAspmFeatureSupported__)(struct Device *, NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaGetPteInfo__)(struct Device *, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaUpdatePde2__)(struct Device *, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaSetPageDirectory__)(struct Device *, NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaUnsetPageDirectory__)(struct Device *, NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaFlush__)(struct Device *, NV0080_CTRL_DMA_FLUSH_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaAdvSchedGetVaCaps__)(struct Device *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaGetPdeInfo__)(struct Device *, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaSetPteInfo__)(struct Device *, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaInvalidateTLB__)(struct Device *, NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaGetCaps__)(struct Device *, NV0080_CTRL_DMA_GET_CAPS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaSetVASpaceSize__)(struct Device *, NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaEnablePrivilegedRange__)(struct Device *, NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdDmaSetDefaultVASpace__)(struct Device *, NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdKGrGetCaps__)(struct Device *, NV0080_CTRL_GR_GET_CAPS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdKGrGetCapsV2__)(struct Device *, NV0080_CTRL_GR_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__deviceCtrlCmdKGrGetInfo__)(struct Device *, NV0080_CTRL_GR_GET_INFO_PARAMS *); + NV_STATUS (*__deviceCtrlCmdKGrGetInfoV2__)(struct Device *, NV0080_CTRL_GR_GET_INFO_V2_PARAMS *); + NV_STATUS (*__deviceCtrlCmdKGrGetTpcPartitionMode__)(struct Device *, NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdKGrSetTpcPartitionMode__)(struct Device *, NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFbGetCompbitStoreInfo__)(struct Device *, NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFbGetCaps__)(struct Device *, NV0080_CTRL_FB_GET_CAPS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFbGetCapsV2__)(struct Device *, NV0080_CTRL_FB_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFifoGetCaps__)(struct Device *, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFifoGetCapsV2__)(struct Device *, NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFifoStartSelectedChannels__)(struct Device *, NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFifoGetEngineContextProperties__)(struct Device *, NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFifoStopRunlist__)(struct Device *, NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFifoStartRunlist__)(struct Device *, NV0080_CTRL_FIFO_START_RUNLIST_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFifoGetChannelList__)(struct Device *, NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFifoGetLatencyBufferSize__)(struct Device *, NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFifoSetChannelProperties__)(struct Device *, NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS *); + NV_STATUS (*__deviceCtrlCmdFifoIdleChannels__)(struct Device *, NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdHostGetCaps__)(struct Device *, NV0080_CTRL_HOST_GET_CAPS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdHostGetCapsV2__)(struct Device *, NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__deviceCtrlCmdInternalPerfCudaLimitSetControl__)(struct Device *, NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS *); + NV_STATUS (*__deviceCtrlCmdInternalPerfCudaLimitDisable__)(struct Device *); + NV_STATUS (*__deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount__)(struct Device *, NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetClasslist__)(struct Device *, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetClasslistV2__)(struct Device *, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetNumSubdevices__)(struct Device *, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuModifyGpuSwStatePersistence__)(struct Device *, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuQueryGpuSwStatePersistence__)(struct Device *, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetVirtualizationMode__)(struct Device *, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuSetVgpuVfBar1Size__)(struct Device *, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetSparseTextureComputeMode__)(struct Device *, NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuSetSparseTextureComputeMode__)(struct Device *, NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetVgxCaps__)(struct Device *, NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetBrandCaps__)(struct Device *, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdGpuGetFindSubDeviceHandle__)(struct Device *, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *); + NV_STATUS (*__deviceCtrlCmdMsencGetCaps__)(struct Device *, NV0080_CTRL_MSENC_GET_CAPS_PARAMS *); + NV_STATUS (*__deviceCtrlCmdBspGetCapsV2__)(struct Device *, NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2 *); + NV_STATUS (*__deviceCtrlCmdNvjpgGetCapsV2__)(struct Device *, NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__deviceCtrlCmdOsUnixVTSwitch__)(struct Device *, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *); + NV_STATUS (*__deviceCtrlCmdOsUnixVTGetFBInfo__)(struct Device *, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *); + NvBool (*__deviceShareCallback__)(struct Device *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__deviceUnmap__)(struct Device *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__deviceGetMemInterMapParams__)(struct Device *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__deviceGetMemoryMappingDescriptor__)(struct Device *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__deviceGetMapAddrSpace__)(struct Device *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__deviceGetInternalObjectHandle__)(struct Device *); + NV_STATUS (*__deviceControlFilter__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__deviceAddAdditionalDependants__)(struct RsClient *, struct Device *, RsResourceRef *); + NvU32 (*__deviceGetRefCount__)(struct Device *); + NV_STATUS (*__deviceCheckMemInterUnmap__)(struct Device *, NvBool); + NV_STATUS (*__deviceMapTo__)(struct Device *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__deviceControl_Prologue__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__deviceGetRegBaseOffsetAndSize__)(struct Device *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__deviceCanCopy__)(struct Device *); + void (*__devicePreDestruct__)(struct Device *); + NV_STATUS (*__deviceUnmapFrom__)(struct Device *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__deviceControl_Epilogue__)(struct Device *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__deviceControlLookup__)(struct Device *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__deviceMap__)(struct Device *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__deviceAccessCallback__)(struct Device *, struct RsClient *, void *, RsAccessRight); + NvU32 deviceInst; + NvU32 PerfReqCnt; + PNODE DevMemoryTable; + NvBool bSliGpuBoostSyncActivate; + NvBool bPerfOptpActive; + NvU32 nPerfOptpRefCnt; + NvU32 nCudaLimitRefCnt; + struct OBJVASPACE *pVASpace; + NvHandle hClientShare; + NvHandle hTargetClient; + NvHandle hTargetDevice; + NvU32 deviceAllocFlags; + NvU32 deviceInternalAllocFlags; + NvU64 vaStartInternal; + NvU64 vaLimitInternal; + NvU64 vaSize; + NvU32 vaMode; + struct HOST_VGPU_DEVICE *pHostVgpuDevice; + KERNEL_HOST_VGPU_DEVICE *pKernelHostVgpuDevice; +}; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Device; + +#define __staticCast_Device(pThis) \ + ((pThis)->__nvoc_pbase_Device) + +#ifdef __nvoc_device_h_disabled +#define __dynamicCast_Device(pThis) ((Device*)NULL) +#else //__nvoc_device_h_disabled +#define __dynamicCast_Device(pThis) \ + ((Device*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Device))) +#endif //__nvoc_device_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Device(Device**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Device(Device**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Device(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Device((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define deviceControl(pResource, pCallContext, pParams) deviceControl_DISPATCH(pResource, pCallContext, pParams) +#define deviceInternalControlForward(pDevice, command, pParams, size) deviceInternalControlForward_DISPATCH(pDevice, command, pParams, size) +#define deviceCtrlCmdBifReset(pDevice, pBifResetParams) deviceCtrlCmdBifReset_DISPATCH(pDevice, pBifResetParams) +#define deviceCtrlCmdBifGetDmaBaseSysmemAddr(pDevice, pBifDmaBaseSysmemParams) deviceCtrlCmdBifGetDmaBaseSysmemAddr_DISPATCH(pDevice, pBifDmaBaseSysmemParams) +#define deviceCtrlCmdBifAspmFeatureSupported(pDevice, pBifAspmParams) deviceCtrlCmdBifAspmFeatureSupported_DISPATCH(pDevice, pBifAspmParams) +#define deviceCtrlCmdDmaGetPteInfo(pDevice, pParams) deviceCtrlCmdDmaGetPteInfo_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdDmaUpdatePde2(pDevice, pParams) deviceCtrlCmdDmaUpdatePde2_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdDmaSetPageDirectory(pDevice, pParams) deviceCtrlCmdDmaSetPageDirectory_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdDmaUnsetPageDirectory(pDevice, pParams) deviceCtrlCmdDmaUnsetPageDirectory_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdDmaFlush(pDevice, flushParams) deviceCtrlCmdDmaFlush_DISPATCH(pDevice, flushParams) +#define deviceCtrlCmdDmaAdvSchedGetVaCaps(pDevice, pParams) deviceCtrlCmdDmaAdvSchedGetVaCaps_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdDmaGetPdeInfo(pDevice, pParams) deviceCtrlCmdDmaGetPdeInfo_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdDmaSetPteInfo(pDevice, pParams) deviceCtrlCmdDmaSetPteInfo_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdDmaInvalidateTLB(pDevice, pParams) deviceCtrlCmdDmaInvalidateTLB_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdDmaGetCaps(pDevice, pDmaCapsParams) deviceCtrlCmdDmaGetCaps_DISPATCH(pDevice, pDmaCapsParams) +#define deviceCtrlCmdDmaSetVASpaceSize(pDevice, pParams) deviceCtrlCmdDmaSetVASpaceSize_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdDmaEnablePrivilegedRange(pDevice, pParams) deviceCtrlCmdDmaEnablePrivilegedRange_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdDmaSetDefaultVASpace(pDevice, pParams) deviceCtrlCmdDmaSetDefaultVASpace_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdKGrGetCaps(pDevice, pParams) deviceCtrlCmdKGrGetCaps_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdKGrGetCapsV2(pDevice, pParams) deviceCtrlCmdKGrGetCapsV2_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdKGrGetInfo(pDevice, pParams) deviceCtrlCmdKGrGetInfo_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdKGrGetInfoV2(pDevice, pParams) deviceCtrlCmdKGrGetInfoV2_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdKGrGetTpcPartitionMode(pDevice, pParams) deviceCtrlCmdKGrGetTpcPartitionMode_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdKGrSetTpcPartitionMode(pDevice, pParams) deviceCtrlCmdKGrSetTpcPartitionMode_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdFbGetCompbitStoreInfo(pDevice, pCompbitStoreParams) deviceCtrlCmdFbGetCompbitStoreInfo_DISPATCH(pDevice, pCompbitStoreParams) +#define deviceCtrlCmdFbGetCaps(pDevice, pFbCapsParams) deviceCtrlCmdFbGetCaps_DISPATCH(pDevice, pFbCapsParams) +#define deviceCtrlCmdFbGetCapsV2(pDevice, pFbCapsParams) deviceCtrlCmdFbGetCapsV2_DISPATCH(pDevice, pFbCapsParams) +#define deviceCtrlCmdFifoGetCaps(pDevice, pFifoCapsParams) deviceCtrlCmdFifoGetCaps_DISPATCH(pDevice, pFifoCapsParams) +#define deviceCtrlCmdFifoGetCapsV2(pDevice, pFifoCapsParams) deviceCtrlCmdFifoGetCapsV2_DISPATCH(pDevice, pFifoCapsParams) +#define deviceCtrlCmdFifoStartSelectedChannels(pDevice, pStartSel) deviceCtrlCmdFifoStartSelectedChannels_DISPATCH(pDevice, pStartSel) +#define deviceCtrlCmdFifoGetEngineContextProperties(pDevice, pParams) deviceCtrlCmdFifoGetEngineContextProperties_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdFifoStopRunlist(pDevice, pStopRunlistParams) deviceCtrlCmdFifoStopRunlist_DISPATCH(pDevice, pStopRunlistParams) +#define deviceCtrlCmdFifoStartRunlist(pDevice, pStartRunlistParams) deviceCtrlCmdFifoStartRunlist_DISPATCH(pDevice, pStartRunlistParams) +#define deviceCtrlCmdFifoGetChannelList(pDevice, pChannelParams) deviceCtrlCmdFifoGetChannelList_DISPATCH(pDevice, pChannelParams) +#define deviceCtrlCmdFifoGetLatencyBufferSize(pDevice, pGetLatencyBufferSizeParams) deviceCtrlCmdFifoGetLatencyBufferSize_DISPATCH(pDevice, pGetLatencyBufferSizeParams) +#define deviceCtrlCmdFifoSetChannelProperties(pDevice, pSetChannelPropertiesParams) deviceCtrlCmdFifoSetChannelProperties_DISPATCH(pDevice, pSetChannelPropertiesParams) +#define deviceCtrlCmdFifoIdleChannels(pDevice, pParams) deviceCtrlCmdFifoIdleChannels_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdHostGetCaps(pDevice, pHostCapsParams) deviceCtrlCmdHostGetCaps_DISPATCH(pDevice, pHostCapsParams) +#define deviceCtrlCmdHostGetCapsV2(pDevice, pHostCapsParamsV2) deviceCtrlCmdHostGetCapsV2_DISPATCH(pDevice, pHostCapsParamsV2) +#define deviceCtrlCmdInternalPerfCudaLimitSetControl(pDevice, pParams) deviceCtrlCmdInternalPerfCudaLimitSetControl_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdInternalPerfCudaLimitDisable(pDevice) deviceCtrlCmdInternalPerfCudaLimitDisable_DISPATCH(pDevice) +#define deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount(pDevice, pParams) deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetClasslist(pDevice, pClassListParams) deviceCtrlCmdGpuGetClasslist_DISPATCH(pDevice, pClassListParams) +#define deviceCtrlCmdGpuGetClasslistV2(pDevice, pParams) deviceCtrlCmdGpuGetClasslistV2_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetNumSubdevices(pDevice, pSubDeviceCountParams) deviceCtrlCmdGpuGetNumSubdevices_DISPATCH(pDevice, pSubDeviceCountParams) +#define deviceCtrlCmdGpuModifyGpuSwStatePersistence(pDevice, pParams) deviceCtrlCmdGpuModifyGpuSwStatePersistence_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuQueryGpuSwStatePersistence(pDevice, pParams) deviceCtrlCmdGpuQueryGpuSwStatePersistence_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetVirtualizationMode(pDevice, pParams) deviceCtrlCmdGpuGetVirtualizationMode_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuSetVgpuVfBar1Size(pDevice, pParams) deviceCtrlCmdGpuSetVgpuVfBar1Size_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetSparseTextureComputeMode(pDevice, pModeParams) deviceCtrlCmdGpuGetSparseTextureComputeMode_DISPATCH(pDevice, pModeParams) +#define deviceCtrlCmdGpuSetSparseTextureComputeMode(pDevice, pModeParams) deviceCtrlCmdGpuSetSparseTextureComputeMode_DISPATCH(pDevice, pModeParams) +#define deviceCtrlCmdGpuGetVgxCaps(pDevice, pParams) deviceCtrlCmdGpuGetVgxCaps_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetBrandCaps(pDevice, pParams) deviceCtrlCmdGpuGetBrandCaps_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdGpuGetFindSubDeviceHandle(pDevice, pParams) deviceCtrlCmdGpuGetFindSubDeviceHandle_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdMsencGetCaps(pDevice, pMsencCapsParams) deviceCtrlCmdMsencGetCaps_DISPATCH(pDevice, pMsencCapsParams) +#define deviceCtrlCmdBspGetCapsV2(pDevice, pBspCapParams) deviceCtrlCmdBspGetCapsV2_DISPATCH(pDevice, pBspCapParams) +#define deviceCtrlCmdNvjpgGetCapsV2(pDevice, pNvjpgCapsParams) deviceCtrlCmdNvjpgGetCapsV2_DISPATCH(pDevice, pNvjpgCapsParams) +#define deviceCtrlCmdOsUnixVTSwitch(pDevice, pParams) deviceCtrlCmdOsUnixVTSwitch_DISPATCH(pDevice, pParams) +#define deviceCtrlCmdOsUnixVTGetFBInfo(pDevice, pParams) deviceCtrlCmdOsUnixVTGetFBInfo_DISPATCH(pDevice, pParams) +#define deviceShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) deviceShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define deviceUnmap(pGpuResource, pCallContext, pCpuMapping) deviceUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define deviceGetMemInterMapParams(pRmResource, pParams) deviceGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define deviceGetMemoryMappingDescriptor(pRmResource, ppMemDesc) deviceGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define deviceGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) deviceGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define deviceGetInternalObjectHandle(pGpuResource) deviceGetInternalObjectHandle_DISPATCH(pGpuResource) +#define deviceControlFilter(pResource, pCallContext, pParams) deviceControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define deviceAddAdditionalDependants(pClient, pResource, pReference) deviceAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define deviceGetRefCount(pResource) deviceGetRefCount_DISPATCH(pResource) +#define deviceCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) deviceCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define deviceMapTo(pResource, pParams) deviceMapTo_DISPATCH(pResource, pParams) +#define deviceControl_Prologue(pResource, pCallContext, pParams) deviceControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define deviceGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) deviceGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define deviceCanCopy(pResource) deviceCanCopy_DISPATCH(pResource) +#define devicePreDestruct(pResource) devicePreDestruct_DISPATCH(pResource) +#define deviceUnmapFrom(pResource, pParams) deviceUnmapFrom_DISPATCH(pResource, pParams) +#define deviceControl_Epilogue(pResource, pCallContext, pParams) deviceControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define deviceControlLookup(pResource, pParams, ppEntry) deviceControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define deviceMap(pGpuResource, pCallContext, pParams, pCpuMapping) deviceMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define deviceAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) deviceAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS deviceControl_IMPL(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS deviceControl_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__deviceControl__(pResource, pCallContext, pParams); +} + +NV_STATUS deviceInternalControlForward_IMPL(struct Device *pDevice, NvU32 command, void *pParams, NvU32 size); + +static inline NV_STATUS deviceInternalControlForward_DISPATCH(struct Device *pDevice, NvU32 command, void *pParams, NvU32 size) { + return pDevice->__deviceInternalControlForward__(pDevice, command, pParams, size); +} + +NV_STATUS deviceCtrlCmdBifReset_IMPL(struct Device *pDevice, NV0080_CTRL_BIF_RESET_PARAMS *pBifResetParams); + +static inline NV_STATUS deviceCtrlCmdBifReset_DISPATCH(struct Device *pDevice, NV0080_CTRL_BIF_RESET_PARAMS *pBifResetParams) { + return pDevice->__deviceCtrlCmdBifReset__(pDevice, pBifResetParams); +} + +NV_STATUS deviceCtrlCmdBifGetDmaBaseSysmemAddr_IMPL(struct Device *pDevice, NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS *pBifDmaBaseSysmemParams); + +static inline NV_STATUS deviceCtrlCmdBifGetDmaBaseSysmemAddr_DISPATCH(struct Device *pDevice, NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS *pBifDmaBaseSysmemParams) { + return pDevice->__deviceCtrlCmdBifGetDmaBaseSysmemAddr__(pDevice, pBifDmaBaseSysmemParams); +} + +NV_STATUS deviceCtrlCmdBifAspmFeatureSupported_IMPL(struct Device *pDevice, NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS *pBifAspmParams); + +static inline NV_STATUS deviceCtrlCmdBifAspmFeatureSupported_DISPATCH(struct Device *pDevice, NV0080_CTRL_BIF_SET_ASPM_FEATURE_PARAMS *pBifAspmParams) { + return pDevice->__deviceCtrlCmdBifAspmFeatureSupported__(pDevice, pBifAspmParams); +} + +NV_STATUS deviceCtrlCmdDmaGetPteInfo_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaGetPteInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaGetPteInfo__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdDmaUpdatePde2_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaUpdatePde2_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaUpdatePde2__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdDmaSetPageDirectory_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaSetPageDirectory_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaSetPageDirectory__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdDmaUnsetPageDirectory_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaUnsetPageDirectory_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaUnsetPageDirectory__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdDmaFlush_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_FLUSH_PARAMS *flushParams); + +static inline NV_STATUS deviceCtrlCmdDmaFlush_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_FLUSH_PARAMS *flushParams) { + return pDevice->__deviceCtrlCmdDmaFlush__(pDevice, flushParams); +} + +NV_STATUS deviceCtrlCmdDmaAdvSchedGetVaCaps_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaAdvSchedGetVaCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaAdvSchedGetVaCaps__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdDmaGetPdeInfo_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaGetPdeInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaGetPdeInfo__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdDmaSetPteInfo_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaSetPteInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaSetPteInfo__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdDmaInvalidateTLB_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaInvalidateTLB_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaInvalidateTLB__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdDmaGetCaps_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_GET_CAPS_PARAMS *pDmaCapsParams); + +static inline NV_STATUS deviceCtrlCmdDmaGetCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_GET_CAPS_PARAMS *pDmaCapsParams) { + return pDevice->__deviceCtrlCmdDmaGetCaps__(pDevice, pDmaCapsParams); +} + +NV_STATUS deviceCtrlCmdDmaSetVASpaceSize_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaSetVASpaceSize_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaSetVASpaceSize__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdDmaEnablePrivilegedRange_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaEnablePrivilegedRange_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaEnablePrivilegedRange__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdDmaSetDefaultVASpace_IMPL(struct Device *pDevice, NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdDmaSetDefaultVASpace_DISPATCH(struct Device *pDevice, NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdDmaSetDefaultVASpace__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdKGrGetCaps_IMPL(struct Device *pDevice, NV0080_CTRL_GR_GET_CAPS_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdKGrGetCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_GR_GET_CAPS_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdKGrGetCaps__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdKGrGetCapsV2_IMPL(struct Device *pDevice, NV0080_CTRL_GR_GET_CAPS_V2_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdKGrGetCapsV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_GR_GET_CAPS_V2_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdKGrGetCapsV2__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdKGrGetInfo_IMPL(struct Device *pDevice, NV0080_CTRL_GR_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdKGrGetInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_GR_GET_INFO_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdKGrGetInfo__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdKGrGetInfoV2_IMPL(struct Device *pDevice, NV0080_CTRL_GR_GET_INFO_V2_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdKGrGetInfoV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_GR_GET_INFO_V2_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdKGrGetInfoV2__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdKGrGetTpcPartitionMode_IMPL(struct Device *pDevice, NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdKGrGetTpcPartitionMode_DISPATCH(struct Device *pDevice, NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdKGrGetTpcPartitionMode__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdKGrSetTpcPartitionMode_IMPL(struct Device *pDevice, NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdKGrSetTpcPartitionMode_DISPATCH(struct Device *pDevice, NV0080_CTRL_GR_TPC_PARTITION_MODE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdKGrSetTpcPartitionMode__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdFbGetCompbitStoreInfo_IMPL(struct Device *pDevice, NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS *pCompbitStoreParams); + +static inline NV_STATUS deviceCtrlCmdFbGetCompbitStoreInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_FB_GET_COMPBIT_STORE_INFO_PARAMS *pCompbitStoreParams) { + return pDevice->__deviceCtrlCmdFbGetCompbitStoreInfo__(pDevice, pCompbitStoreParams); +} + +NV_STATUS deviceCtrlCmdFbGetCaps_IMPL(struct Device *pDevice, NV0080_CTRL_FB_GET_CAPS_PARAMS *pFbCapsParams); + +static inline NV_STATUS deviceCtrlCmdFbGetCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_FB_GET_CAPS_PARAMS *pFbCapsParams) { + return pDevice->__deviceCtrlCmdFbGetCaps__(pDevice, pFbCapsParams); +} + +NV_STATUS deviceCtrlCmdFbGetCapsV2_IMPL(struct Device *pDevice, NV0080_CTRL_FB_GET_CAPS_V2_PARAMS *pFbCapsParams); + +static inline NV_STATUS deviceCtrlCmdFbGetCapsV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_FB_GET_CAPS_V2_PARAMS *pFbCapsParams) { + return pDevice->__deviceCtrlCmdFbGetCapsV2__(pDevice, pFbCapsParams); +} + +NV_STATUS deviceCtrlCmdFifoGetCaps_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *pFifoCapsParams); + +static inline NV_STATUS deviceCtrlCmdFifoGetCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *pFifoCapsParams) { + return pDevice->__deviceCtrlCmdFifoGetCaps__(pDevice, pFifoCapsParams); +} + +NV_STATUS deviceCtrlCmdFifoGetCapsV2_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS *pFifoCapsParams); + +static inline NV_STATUS deviceCtrlCmdFifoGetCapsV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS *pFifoCapsParams) { + return pDevice->__deviceCtrlCmdFifoGetCapsV2__(pDevice, pFifoCapsParams); +} + +NV_STATUS deviceCtrlCmdFifoStartSelectedChannels_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *pStartSel); + +static inline NV_STATUS deviceCtrlCmdFifoStartSelectedChannels_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *pStartSel) { + return pDevice->__deviceCtrlCmdFifoStartSelectedChannels__(pDevice, pStartSel); +} + +NV_STATUS deviceCtrlCmdFifoGetEngineContextProperties_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdFifoGetEngineContextProperties_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdFifoGetEngineContextProperties__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdFifoStopRunlist_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS *pStopRunlistParams); + +static inline NV_STATUS deviceCtrlCmdFifoStopRunlist_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_STOP_RUNLIST_PARAMS *pStopRunlistParams) { + return pDevice->__deviceCtrlCmdFifoStopRunlist__(pDevice, pStopRunlistParams); +} + +NV_STATUS deviceCtrlCmdFifoStartRunlist_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_START_RUNLIST_PARAMS *pStartRunlistParams); + +static inline NV_STATUS deviceCtrlCmdFifoStartRunlist_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_START_RUNLIST_PARAMS *pStartRunlistParams) { + return pDevice->__deviceCtrlCmdFifoStartRunlist__(pDevice, pStartRunlistParams); +} + +NV_STATUS deviceCtrlCmdFifoGetChannelList_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *pChannelParams); + +static inline NV_STATUS deviceCtrlCmdFifoGetChannelList_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *pChannelParams) { + return pDevice->__deviceCtrlCmdFifoGetChannelList__(pDevice, pChannelParams); +} + +NV_STATUS deviceCtrlCmdFifoGetLatencyBufferSize_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS *pGetLatencyBufferSizeParams); + +static inline NV_STATUS deviceCtrlCmdFifoGetLatencyBufferSize_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_GET_LATENCY_BUFFER_SIZE_PARAMS *pGetLatencyBufferSizeParams) { + return pDevice->__deviceCtrlCmdFifoGetLatencyBufferSize__(pDevice, pGetLatencyBufferSizeParams); +} + +NV_STATUS deviceCtrlCmdFifoSetChannelProperties_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS *pSetChannelPropertiesParams); + +static inline NV_STATUS deviceCtrlCmdFifoSetChannelProperties_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_SET_CHANNEL_PROPERTIES_PARAMS *pSetChannelPropertiesParams) { + return pDevice->__deviceCtrlCmdFifoSetChannelProperties__(pDevice, pSetChannelPropertiesParams); +} + +NV_STATUS deviceCtrlCmdFifoIdleChannels_IMPL(struct Device *pDevice, NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdFifoIdleChannels_DISPATCH(struct Device *pDevice, NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdFifoIdleChannels__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdHostGetCaps_IMPL(struct Device *pDevice, NV0080_CTRL_HOST_GET_CAPS_PARAMS *pHostCapsParams); + +static inline NV_STATUS deviceCtrlCmdHostGetCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_HOST_GET_CAPS_PARAMS *pHostCapsParams) { + return pDevice->__deviceCtrlCmdHostGetCaps__(pDevice, pHostCapsParams); +} + +NV_STATUS deviceCtrlCmdHostGetCapsV2_IMPL(struct Device *pDevice, NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS *pHostCapsParamsV2); + +static inline NV_STATUS deviceCtrlCmdHostGetCapsV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS *pHostCapsParamsV2) { + return pDevice->__deviceCtrlCmdHostGetCapsV2__(pDevice, pHostCapsParamsV2); +} + +NV_STATUS deviceCtrlCmdInternalPerfCudaLimitSetControl_IMPL(struct Device *pDevice, NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdInternalPerfCudaLimitSetControl_DISPATCH(struct Device *pDevice, NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdInternalPerfCudaLimitSetControl__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdInternalPerfCudaLimitDisable_IMPL(struct Device *pDevice); + +static inline NV_STATUS deviceCtrlCmdInternalPerfCudaLimitDisable_DISPATCH(struct Device *pDevice) { + return pDevice->__deviceCtrlCmdInternalPerfCudaLimitDisable__(pDevice); +} + +NV_STATUS deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount_IMPL(struct Device *pDevice, NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount_DISPATCH(struct Device *pDevice, NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdInternalPerfGetUnderpoweredGpuCount__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuGetClasslist_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetClasslist_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams) { + return pDevice->__deviceCtrlCmdGpuGetClasslist__(pDevice, pClassListParams); +} + +NV_STATUS deviceCtrlCmdGpuGetClasslistV2_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetClasslistV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetClasslistV2__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuGetNumSubdevices_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetNumSubdevices_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams) { + return pDevice->__deviceCtrlCmdGpuGetNumSubdevices__(pDevice, pSubDeviceCountParams); +} + +NV_STATUS deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuModifyGpuSwStatePersistence_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuModifyGpuSwStatePersistence__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuQueryGpuSwStatePersistence_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuQueryGpuSwStatePersistence__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuGetVirtualizationMode_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetVirtualizationMode_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetVirtualizationMode__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuSetVgpuVfBar1Size_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuSetVgpuVfBar1Size__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuGetSparseTextureComputeMode_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS *pModeParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetSparseTextureComputeMode_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS *pModeParams) { + return pDevice->__deviceCtrlCmdGpuGetSparseTextureComputeMode__(pDevice, pModeParams); +} + +NV_STATUS deviceCtrlCmdGpuSetSparseTextureComputeMode_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS *pModeParams); + +static inline NV_STATUS deviceCtrlCmdGpuSetSparseTextureComputeMode_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS *pModeParams) { + return pDevice->__deviceCtrlCmdGpuSetSparseTextureComputeMode__(pDevice, pModeParams); +} + +NV_STATUS deviceCtrlCmdGpuGetVgxCaps_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetVgxCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetVgxCaps__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuGetBrandCaps_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetBrandCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdGpuGetBrandCaps__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL(struct Device *pDevice, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams); + +static inline NV_STATUS deviceCtrlCmdGpuGetFindSubDeviceHandle_DISPATCH(struct Device *pDevice, NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams) { + return pDevice->__deviceCtrlCmdGpuGetFindSubDeviceHandle__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdMsencGetCaps_IMPL(struct Device *pDevice, NV0080_CTRL_MSENC_GET_CAPS_PARAMS *pMsencCapsParams); + +static inline NV_STATUS deviceCtrlCmdMsencGetCaps_DISPATCH(struct Device *pDevice, NV0080_CTRL_MSENC_GET_CAPS_PARAMS *pMsencCapsParams) { + return pDevice->__deviceCtrlCmdMsencGetCaps__(pDevice, pMsencCapsParams); +} + +NV_STATUS deviceCtrlCmdBspGetCapsV2_IMPL(struct Device *pDevice, NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2 *pBspCapParams); + +static inline NV_STATUS deviceCtrlCmdBspGetCapsV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2 *pBspCapParams) { + return pDevice->__deviceCtrlCmdBspGetCapsV2__(pDevice, pBspCapParams); +} + +NV_STATUS deviceCtrlCmdNvjpgGetCapsV2_IMPL(struct Device *pDevice, NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS *pNvjpgCapsParams); + +static inline NV_STATUS deviceCtrlCmdNvjpgGetCapsV2_DISPATCH(struct Device *pDevice, NV0080_CTRL_NVJPG_GET_CAPS_V2_PARAMS *pNvjpgCapsParams) { + return pDevice->__deviceCtrlCmdNvjpgGetCapsV2__(pDevice, pNvjpgCapsParams); +} + +NV_STATUS deviceCtrlCmdOsUnixVTSwitch_IMPL(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdOsUnixVTSwitch_DISPATCH(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdOsUnixVTSwitch__(pDevice, pParams); +} + +NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_IMPL(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams); + +static inline NV_STATUS deviceCtrlCmdOsUnixVTGetFBInfo_DISPATCH(struct Device *pDevice, NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS *pParams) { + return pDevice->__deviceCtrlCmdOsUnixVTGetFBInfo__(pDevice, pParams); +} + +static inline NvBool deviceShareCallback_DISPATCH(struct Device *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__deviceShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS deviceUnmap_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__deviceUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS deviceGetMemInterMapParams_DISPATCH(struct Device *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__deviceGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS deviceGetMemoryMappingDescriptor_DISPATCH(struct Device *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__deviceGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS deviceGetMapAddrSpace_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__deviceGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle deviceGetInternalObjectHandle_DISPATCH(struct Device *pGpuResource) { + return pGpuResource->__deviceGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS deviceControlFilter_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__deviceControlFilter__(pResource, pCallContext, pParams); +} + +static inline void deviceAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Device *pResource, RsResourceRef *pReference) { + pResource->__deviceAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 deviceGetRefCount_DISPATCH(struct Device *pResource) { + return pResource->__deviceGetRefCount__(pResource); +} + +static inline NV_STATUS deviceCheckMemInterUnmap_DISPATCH(struct Device *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__deviceCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS deviceMapTo_DISPATCH(struct Device *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__deviceMapTo__(pResource, pParams); +} + +static inline NV_STATUS deviceControl_Prologue_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__deviceControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS deviceGetRegBaseOffsetAndSize_DISPATCH(struct Device *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__deviceGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool deviceCanCopy_DISPATCH(struct Device *pResource) { + return pResource->__deviceCanCopy__(pResource); +} + +static inline void devicePreDestruct_DISPATCH(struct Device *pResource) { + pResource->__devicePreDestruct__(pResource); +} + +static inline NV_STATUS deviceUnmapFrom_DISPATCH(struct Device *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__deviceUnmapFrom__(pResource, pParams); +} + +static inline void deviceControl_Epilogue_DISPATCH(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__deviceControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS deviceControlLookup_DISPATCH(struct Device *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__deviceControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS deviceMap_DISPATCH(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__deviceMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool deviceAccessCallback_DISPATCH(struct Device *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__deviceAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS deviceConstruct_IMPL(struct Device *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_deviceConstruct(arg_pResource, arg_pCallContext, arg_pParams) deviceConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void deviceDestruct_IMPL(struct Device *pResource); +#define __nvoc_deviceDestruct(pResource) deviceDestruct_IMPL(pResource) +NV_STATUS deviceInit_IMPL(struct Device *pDevice, struct CALL_CONTEXT *pCallContext, NvHandle hClient, NvHandle hDevice, NvU32 deviceInst, NvHandle hClientShare, NvHandle hTargetClient, NvHandle hTargetDevice, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 allocFlags, NvU32 vaMode); +#ifdef __nvoc_device_h_disabled +static inline NV_STATUS deviceInit(struct Device *pDevice, struct CALL_CONTEXT *pCallContext, NvHandle hClient, NvHandle hDevice, NvU32 deviceInst, NvHandle hClientShare, NvHandle hTargetClient, NvHandle hTargetDevice, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 allocFlags, NvU32 vaMode) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_device_h_disabled +#define deviceInit(pDevice, pCallContext, hClient, hDevice, deviceInst, hClientShare, hTargetClient, hTargetDevice, vaSize, vaStartInternal, vaLimitInternal, allocFlags, vaMode) deviceInit_IMPL(pDevice, pCallContext, hClient, hDevice, deviceInst, hClientShare, hTargetClient, hTargetDevice, vaSize, vaStartInternal, vaLimitInternal, allocFlags, vaMode) +#endif //__nvoc_device_h_disabled + +NV_STATUS deviceGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDevice, struct Device **ppDevice); +#define deviceGetByHandle(pClient, hDevice, ppDevice) deviceGetByHandle_IMPL(pClient, hDevice, ppDevice) +NV_STATUS deviceGetByInstance_IMPL(struct RsClient *pClient, NvU32 deviceInstance, struct Device **ppDevice); +#define deviceGetByInstance(pClient, deviceInstance, ppDevice) deviceGetByInstance_IMPL(pClient, deviceInstance, ppDevice) +NV_STATUS deviceGetByGpu_IMPL(struct RsClient *pClient, struct OBJGPU *pGpu, NvBool bAnyInGroup, struct Device **ppDevice); +#define deviceGetByGpu(pClient, pGpu, bAnyInGroup, ppDevice) deviceGetByGpu_IMPL(pClient, pGpu, bAnyInGroup, ppDevice) +NV_STATUS deviceGetDefaultVASpace_IMPL(struct Device *pDevice, struct OBJVASPACE **ppVAS); +#ifdef __nvoc_device_h_disabled +static inline NV_STATUS deviceGetDefaultVASpace(struct Device *pDevice, struct OBJVASPACE **ppVAS) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_device_h_disabled +#define deviceGetDefaultVASpace(pDevice, ppVAS) deviceGetDefaultVASpace_IMPL(pDevice, ppVAS) +#endif //__nvoc_device_h_disabled + +NV_STATUS deviceSetClientShare_IMPL(struct Device *pDevice, NvHandle hClientShare, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 deviceAllocFlags); +#ifdef __nvoc_device_h_disabled +static inline NV_STATUS deviceSetClientShare(struct Device *pDevice, NvHandle hClientShare, NvU64 vaSize, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 deviceAllocFlags) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_device_h_disabled +#define deviceSetClientShare(pDevice, hClientShare, vaSize, vaStartInternal, vaLimitInternal, deviceAllocFlags) deviceSetClientShare_IMPL(pDevice, hClientShare, vaSize, vaStartInternal, vaLimitInternal, deviceAllocFlags) +#endif //__nvoc_device_h_disabled + +void deviceRemoveFromClientShare_IMPL(struct Device *pDevice); +#ifdef __nvoc_device_h_disabled +static inline void deviceRemoveFromClientShare(struct Device *pDevice) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); +} +#else //__nvoc_device_h_disabled +#define deviceRemoveFromClientShare(pDevice) deviceRemoveFromClientShare_IMPL(pDevice) +#endif //__nvoc_device_h_disabled + +NV_STATUS deviceSetDefaultVASpace_IMPL(struct Device *pDevice, NvHandle hVASpace); +#ifdef __nvoc_device_h_disabled +static inline NV_STATUS deviceSetDefaultVASpace(struct Device *pDevice, NvHandle hVASpace) { + NV_ASSERT_FAILED_PRECOMP("Device was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_device_h_disabled +#define deviceSetDefaultVASpace(pDevice, hVASpace) deviceSetDefaultVASpace_IMPL(pDevice, hVASpace) +#endif //__nvoc_device_h_disabled + +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +/** + * WARNING: This function is deprecated! Please use deviceGetByHandle. + */ +struct Device *CliGetDeviceInfo(NvHandle, NvHandle); + +/** + * WARNING: This function is deprecated and use is *strongly* discouraged + * (especially for new code!) + * + * From the function name (CliSetGpuContext) it appears as a simple accessor but + * violates expectations by modifying the SLI BC threadstate (calls to + * GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully managed + * by the caller. + * + * Instead of using this routine, please use deviceGetByHandle then call + * GPU_RES_GET_GPU, GPU_RES_GET_GPUGRP, GPU_RES_SET_THREAD_BC_STATE as needed. + * + * Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice, + * pSubdevice, the base pResource type, and any resource that inherits from + * GpuResource. That is, instead of using CliSetGpuContext or + * CliSetSubDeviceContext, please use following pattern to look up the pGpu: + * + * OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource) + * + * To set the threadstate, please use: + * + * GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource); + */ +NV_STATUS CliSetGpuContext(NvHandle, NvHandle, OBJGPU **, struct OBJGPUGRP **); + +/** + * WARNING: This function is deprecated! Please use gpuGetByRef() + */ +OBJGPU *CliGetGpuFromContext(RsResourceRef *pContextRef, NvBool *pbBroadcast); + +/** + * WARNING: This function is deprecated! Please use gpuGetByHandle() + */ +OBJGPU *CliGetGpuFromHandle(NvHandle hClient, NvHandle hResource, NvBool *pbBroadcast); + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DEVICE_NVOC_H_ diff --git a/src/nvidia/generated/g_disp_capabilities_nvoc.c b/src/nvidia/generated/g_disp_capabilities_nvoc.c new file mode 100644 index 000000000..3a417eb7e --- /dev/null +++ b/src/nvidia/generated/g_disp_capabilities_nvoc.c @@ -0,0 +1,329 @@ +#define NVOC_DISP_CAPABILITIES_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_capabilities_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x99db3e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_DispCapabilities(DispCapabilities*); +void __nvoc_init_funcTable_DispCapabilities(DispCapabilities*); +NV_STATUS __nvoc_ctor_DispCapabilities(DispCapabilities*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispCapabilities(DispCapabilities*); +void __nvoc_dtor_DispCapabilities(DispCapabilities*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCapabilities; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_DispCapabilities = { + /*pClassDef=*/ &__nvoc_class_def_DispCapabilities, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispCapabilities, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispCapabilities = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_DispCapabilities_DispCapabilities, + &__nvoc_rtti_DispCapabilities_GpuResource, + &__nvoc_rtti_DispCapabilities_RmResource, + &__nvoc_rtti_DispCapabilities_RmResourceCommon, + &__nvoc_rtti_DispCapabilities_RsResource, + &__nvoc_rtti_DispCapabilities_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispCapabilities), + /*classId=*/ classId(DispCapabilities), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispCapabilities", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispCapabilities, + /*pCastInfo=*/ &__nvoc_castinfo_DispCapabilities, + /*pExportInfo=*/ &__nvoc_export_info_DispCapabilities +}; + +static NV_STATUS __nvoc_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispcapGetRegBaseOffsetAndSize((struct DispCapabilities *)(((unsigned char *)pDispCapabilities) - __nvoc_rtti_DispCapabilities_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_GpuResource_dispcapShareCallback(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispcapControl(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispcapUnmap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcapGetMemInterMapParams(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcapGetMemoryMappingDescriptor(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispcapGetMapAddrSpace(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_dispcapGetInternalObjectHandle(struct DispCapabilities *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcapControlFilter(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_dispcapAddAdditionalDependants(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_dispcapGetRefCount(struct DispCapabilities *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcapCheckMemInterUnmap(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcapMapTo(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcapControl_Prologue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_dispcapCanCopy(struct DispCapabilities *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispcapInternalControlForward(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_dispcapPreDestruct(struct DispCapabilities *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcapUnmapFrom(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispcapControl_Epilogue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcapControlLookup(struct DispCapabilities *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispcapMap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_dispcapAccessCallback(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCapabilities = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_DispCapabilities(DispCapabilities *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispCapabilities(DispCapabilities *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispCapabilities(DispCapabilities *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCapabilities_fail_GpuResource; + __nvoc_init_dataField_DispCapabilities(pThis); + + status = __nvoc_dispcapConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCapabilities_fail__init; + goto __nvoc_ctor_DispCapabilities_exit; // Success + +__nvoc_ctor_DispCapabilities_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DispCapabilities_fail_GpuResource: +__nvoc_ctor_DispCapabilities_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispCapabilities_1(DispCapabilities *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dispcapGetRegBaseOffsetAndSize__ = &dispcapGetRegBaseOffsetAndSize_IMPL; + + pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize; + + pThis->__dispcapShareCallback__ = &__nvoc_thunk_GpuResource_dispcapShareCallback; + + pThis->__dispcapControl__ = &__nvoc_thunk_GpuResource_dispcapControl; + + pThis->__dispcapUnmap__ = &__nvoc_thunk_GpuResource_dispcapUnmap; + + pThis->__dispcapGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispcapGetMemInterMapParams; + + pThis->__dispcapGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispcapGetMemoryMappingDescriptor; + + pThis->__dispcapGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispcapGetMapAddrSpace; + + pThis->__dispcapGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispcapGetInternalObjectHandle; + + pThis->__dispcapControlFilter__ = &__nvoc_thunk_RsResource_dispcapControlFilter; + + pThis->__dispcapAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispcapAddAdditionalDependants; + + pThis->__dispcapGetRefCount__ = &__nvoc_thunk_RsResource_dispcapGetRefCount; + + pThis->__dispcapCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispcapCheckMemInterUnmap; + + pThis->__dispcapMapTo__ = &__nvoc_thunk_RsResource_dispcapMapTo; + + pThis->__dispcapControl_Prologue__ = &__nvoc_thunk_RmResource_dispcapControl_Prologue; + + pThis->__dispcapCanCopy__ = &__nvoc_thunk_RsResource_dispcapCanCopy; + + pThis->__dispcapInternalControlForward__ = &__nvoc_thunk_GpuResource_dispcapInternalControlForward; + + pThis->__dispcapPreDestruct__ = &__nvoc_thunk_RsResource_dispcapPreDestruct; + + pThis->__dispcapUnmapFrom__ = &__nvoc_thunk_RsResource_dispcapUnmapFrom; + + pThis->__dispcapControl_Epilogue__ = &__nvoc_thunk_RmResource_dispcapControl_Epilogue; + + pThis->__dispcapControlLookup__ = &__nvoc_thunk_RsResource_dispcapControlLookup; + + pThis->__dispcapMap__ = &__nvoc_thunk_GpuResource_dispcapMap; + + pThis->__dispcapAccessCallback__ = &__nvoc_thunk_RmResource_dispcapAccessCallback; +} + +void __nvoc_init_funcTable_DispCapabilities(DispCapabilities *pThis) { + __nvoc_init_funcTable_DispCapabilities_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_DispCapabilities(DispCapabilities *pThis) { + pThis->__nvoc_pbase_DispCapabilities = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_DispCapabilities(pThis); +} + +NV_STATUS __nvoc_objCreate_DispCapabilities(DispCapabilities **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispCapabilities *pThis; + + pThis = portMemAllocNonPaged(sizeof(DispCapabilities)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispCapabilities)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispCapabilities); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DispCapabilities(pThis); + status = __nvoc_ctor_DispCapabilities(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispCapabilities_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispCapabilities_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispCapabilities(DispCapabilities **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispCapabilities(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_disp_capabilities_nvoc.h b/src/nvidia/generated/g_disp_capabilities_nvoc.h new file mode 100644 index 000000000..185980a24 --- /dev/null +++ b/src/nvidia/generated/g_disp_capabilities_nvoc.h @@ -0,0 +1,239 @@ +#ifndef _G_DISP_CAPABILITIES_NVOC_H_ +#define _G_DISP_CAPABILITIES_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispCapabilities class. +* +******************************************************************************/ + +#include "g_disp_capabilities_nvoc.h" + +#ifndef DISP_CAPABILITIES_H +#define DISP_CAPABILITIES_H + +#include "gpu/gpu_resource.h" + +/*! + * RM internal class representing NVXXXX_DISP_CAPABILITIES + */ +#ifdef NVOC_DISP_CAPABILITIES_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispCapabilities { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct DispCapabilities *__nvoc_pbase_DispCapabilities; + NV_STATUS (*__dispcapGetRegBaseOffsetAndSize__)(struct DispCapabilities *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__dispcapShareCallback__)(struct DispCapabilities *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispcapControl__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispcapUnmap__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispcapGetMemInterMapParams__)(struct DispCapabilities *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispcapGetMemoryMappingDescriptor__)(struct DispCapabilities *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispcapGetMapAddrSpace__)(struct DispCapabilities *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__dispcapGetInternalObjectHandle__)(struct DispCapabilities *); + NV_STATUS (*__dispcapControlFilter__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__dispcapAddAdditionalDependants__)(struct RsClient *, struct DispCapabilities *, RsResourceRef *); + NvU32 (*__dispcapGetRefCount__)(struct DispCapabilities *); + NV_STATUS (*__dispcapCheckMemInterUnmap__)(struct DispCapabilities *, NvBool); + NV_STATUS (*__dispcapMapTo__)(struct DispCapabilities *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispcapControl_Prologue__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispcapCanCopy__)(struct DispCapabilities *); + NV_STATUS (*__dispcapInternalControlForward__)(struct DispCapabilities *, NvU32, void *, NvU32); + void (*__dispcapPreDestruct__)(struct DispCapabilities *); + NV_STATUS (*__dispcapUnmapFrom__)(struct DispCapabilities *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispcapControl_Epilogue__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispcapControlLookup__)(struct DispCapabilities *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispcapMap__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__dispcapAccessCallback__)(struct DispCapabilities *, struct RsClient *, void *, RsAccessRight); + NvU32 ControlOffset; + NvU32 ControlLength; +}; + +#ifndef __NVOC_CLASS_DispCapabilities_TYPEDEF__ +#define __NVOC_CLASS_DispCapabilities_TYPEDEF__ +typedef struct DispCapabilities DispCapabilities; +#endif /* __NVOC_CLASS_DispCapabilities_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCapabilities +#define __nvoc_class_id_DispCapabilities 0x99db3e +#endif /* __nvoc_class_id_DispCapabilities */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities; + +#define __staticCast_DispCapabilities(pThis) \ + ((pThis)->__nvoc_pbase_DispCapabilities) + +#ifdef __nvoc_disp_capabilities_h_disabled +#define __dynamicCast_DispCapabilities(pThis) ((DispCapabilities*)NULL) +#else //__nvoc_disp_capabilities_h_disabled +#define __dynamicCast_DispCapabilities(pThis) \ + ((DispCapabilities*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispCapabilities))) +#endif //__nvoc_disp_capabilities_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispCapabilities(DispCapabilities**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispCapabilities(DispCapabilities**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispCapabilities(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispCapabilities((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispcapGetRegBaseOffsetAndSize(pDispCapabilities, pGpu, pOffset, pSize) dispcapGetRegBaseOffsetAndSize_DISPATCH(pDispCapabilities, pGpu, pOffset, pSize) +#define dispcapShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispcapShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispcapControl(pGpuResource, pCallContext, pParams) dispcapControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispcapUnmap(pGpuResource, pCallContext, pCpuMapping) dispcapUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispcapGetMemInterMapParams(pRmResource, pParams) dispcapGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispcapGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispcapGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispcapGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispcapGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispcapGetInternalObjectHandle(pGpuResource) dispcapGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispcapControlFilter(pResource, pCallContext, pParams) dispcapControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispcapAddAdditionalDependants(pClient, pResource, pReference) dispcapAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispcapGetRefCount(pResource) dispcapGetRefCount_DISPATCH(pResource) +#define dispcapCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispcapCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispcapMapTo(pResource, pParams) dispcapMapTo_DISPATCH(pResource, pParams) +#define dispcapControl_Prologue(pResource, pCallContext, pParams) dispcapControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispcapCanCopy(pResource) dispcapCanCopy_DISPATCH(pResource) +#define dispcapInternalControlForward(pGpuResource, command, pParams, size) dispcapInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispcapPreDestruct(pResource) dispcapPreDestruct_DISPATCH(pResource) +#define dispcapUnmapFrom(pResource, pParams) dispcapUnmapFrom_DISPATCH(pResource, pParams) +#define dispcapControl_Epilogue(pResource, pCallContext, pParams) dispcapControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispcapControlLookup(pResource, pParams, ppEntry) dispcapControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispcapMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispcapMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispcapAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispcapAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS dispcapGetRegBaseOffsetAndSize_IMPL(struct DispCapabilities *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS dispcapGetRegBaseOffsetAndSize_DISPATCH(struct DispCapabilities *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispCapabilities->__dispcapGetRegBaseOffsetAndSize__(pDispCapabilities, pGpu, pOffset, pSize); +} + +static inline NvBool dispcapShareCallback_DISPATCH(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispcapShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispcapControl_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispcapControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispcapUnmap_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispcapUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispcapGetMemInterMapParams_DISPATCH(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispcapGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispcapGetMemoryMappingDescriptor_DISPATCH(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispcapGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispcapGetMapAddrSpace_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispcapGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle dispcapGetInternalObjectHandle_DISPATCH(struct DispCapabilities *pGpuResource) { + return pGpuResource->__dispcapGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispcapControlFilter_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispcapControlFilter__(pResource, pCallContext, pParams); +} + +static inline void dispcapAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference) { + pResource->__dispcapAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 dispcapGetRefCount_DISPATCH(struct DispCapabilities *pResource) { + return pResource->__dispcapGetRefCount__(pResource); +} + +static inline NV_STATUS dispcapCheckMemInterUnmap_DISPATCH(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispcapCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispcapMapTo_DISPATCH(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispcapMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispcapControl_Prologue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispcapControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispcapCanCopy_DISPATCH(struct DispCapabilities *pResource) { + return pResource->__dispcapCanCopy__(pResource); +} + +static inline NV_STATUS dispcapInternalControlForward_DISPATCH(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispcapInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void dispcapPreDestruct_DISPATCH(struct DispCapabilities *pResource) { + pResource->__dispcapPreDestruct__(pResource); +} + +static inline NV_STATUS dispcapUnmapFrom_DISPATCH(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispcapUnmapFrom__(pResource, pParams); +} + +static inline void dispcapControl_Epilogue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispcapControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispcapControlLookup_DISPATCH(struct DispCapabilities *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispcapControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispcapMap_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispcapMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool dispcapAccessCallback_DISPATCH(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispcapAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dispcapConstruct_IMPL(struct DispCapabilities *arg_pDispCapabilities, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispcapConstruct(arg_pDispCapabilities, arg_pCallContext, arg_pParams) dispcapConstruct_IMPL(arg_pDispCapabilities, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // DISP_CAPABILITIES_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISP_CAPABILITIES_NVOC_H_ diff --git a/src/nvidia/generated/g_disp_channel_nvoc.c b/src/nvidia/generated/g_disp_channel_nvoc.c new file mode 100644 index 000000000..e925b3cc9 --- /dev/null +++ b/src/nvidia/generated/g_disp_channel_nvoc.c @@ -0,0 +1,1146 @@ +#define NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_channel_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xbd2ff3 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_DispChannel(DispChannel*); +void __nvoc_init_funcTable_DispChannel(DispChannel*); +NV_STATUS __nvoc_ctor_DispChannel(DispChannel*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma); +void __nvoc_init_dataField_DispChannel(DispChannel*); +void __nvoc_dtor_DispChannel(DispChannel*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannel; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_DispChannel = { + /*pClassDef=*/ &__nvoc_class_def_DispChannel, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispChannel, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannel_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannel, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispChannel = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_DispChannel_DispChannel, + &__nvoc_rtti_DispChannel_Notifier, + &__nvoc_rtti_DispChannel_INotifier, + &__nvoc_rtti_DispChannel_GpuResource, + &__nvoc_rtti_DispChannel_RmResource, + &__nvoc_rtti_DispChannel_RmResourceCommon, + &__nvoc_rtti_DispChannel_RsResource, + &__nvoc_rtti_DispChannel_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispChannel), + /*classId=*/ classId(DispChannel), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispChannel", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispChannel, + /*pCastInfo=*/ &__nvoc_castinfo_DispChannel, + /*pExportInfo=*/ &__nvoc_export_info_DispChannel +}; + +static NV_STATUS __nvoc_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispchnGetRegBaseOffsetAndSize((struct DispChannel *)(((unsigned char *)pDispChannel) - __nvoc_rtti_DispChannel_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_GpuResource_dispchnShareCallback(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnMapTo(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchnGetOrAllocNotifShare(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannel_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnCheckMemInterUnmap(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannel_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnGetMapAddrSpace(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_dispchnSetNotificationShare(struct DispChannel *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannel_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_dispchnGetRefCount(struct DispChannel *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchnAddAdditionalDependants(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnControl_Prologue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnInternalControlForward(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnUnmapFrom(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispchnControl_Epilogue(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnControlLookup(struct DispChannel *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_dispchnGetInternalObjectHandle(struct DispChannel *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnControl(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnUnmap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnGetMemInterMapParams(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannel_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnGetMemoryMappingDescriptor(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannel_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnControlFilter(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchnUnregisterEvent(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannel_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_dispchnCanCopy(struct DispChannel *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchnPreDestruct(struct DispChannel *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispchnGetNotificationListPtr(struct DispChannel *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannel_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispchnGetNotificationShare(struct DispChannel *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannel_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnMap(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannel_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_dispchnAccessCallback(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannel_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannel = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_DispChannel(DispChannel *pThis) { + __nvoc_dispchnDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispChannel(DispChannel *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_DispChannel(DispChannel *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispChannel_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_DispChannel_fail_Notifier; + __nvoc_init_dataField_DispChannel(pThis); + + status = __nvoc_dispchnConstruct(pThis, arg_pCallContext, arg_pParams, arg_isDma); + if (status != NV_OK) goto __nvoc_ctor_DispChannel_fail__init; + goto __nvoc_ctor_DispChannel_exit; // Success + +__nvoc_ctor_DispChannel_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_DispChannel_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DispChannel_fail_GpuResource: +__nvoc_ctor_DispChannel_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispChannel_1(DispChannel *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dispchnGetRegBaseOffsetAndSize__ = &dispchnGetRegBaseOffsetAndSize_IMPL; + + pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispChannel_gpuresGetRegBaseOffsetAndSize; + + pThis->__dispchnShareCallback__ = &__nvoc_thunk_GpuResource_dispchnShareCallback; + + pThis->__dispchnMapTo__ = &__nvoc_thunk_RsResource_dispchnMapTo; + + pThis->__dispchnGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispchnGetOrAllocNotifShare; + + pThis->__dispchnCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispchnCheckMemInterUnmap; + + pThis->__dispchnGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispchnGetMapAddrSpace; + + pThis->__dispchnSetNotificationShare__ = &__nvoc_thunk_Notifier_dispchnSetNotificationShare; + + pThis->__dispchnGetRefCount__ = &__nvoc_thunk_RsResource_dispchnGetRefCount; + + pThis->__dispchnAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispchnAddAdditionalDependants; + + pThis->__dispchnControl_Prologue__ = &__nvoc_thunk_RmResource_dispchnControl_Prologue; + + pThis->__dispchnInternalControlForward__ = &__nvoc_thunk_GpuResource_dispchnInternalControlForward; + + pThis->__dispchnUnmapFrom__ = &__nvoc_thunk_RsResource_dispchnUnmapFrom; + + pThis->__dispchnControl_Epilogue__ = &__nvoc_thunk_RmResource_dispchnControl_Epilogue; + + pThis->__dispchnControlLookup__ = &__nvoc_thunk_RsResource_dispchnControlLookup; + + pThis->__dispchnGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispchnGetInternalObjectHandle; + + pThis->__dispchnControl__ = &__nvoc_thunk_GpuResource_dispchnControl; + + pThis->__dispchnUnmap__ = &__nvoc_thunk_GpuResource_dispchnUnmap; + + pThis->__dispchnGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispchnGetMemInterMapParams; + + pThis->__dispchnGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispchnGetMemoryMappingDescriptor; + + pThis->__dispchnControlFilter__ = &__nvoc_thunk_RsResource_dispchnControlFilter; + + pThis->__dispchnUnregisterEvent__ = &__nvoc_thunk_Notifier_dispchnUnregisterEvent; + + pThis->__dispchnCanCopy__ = &__nvoc_thunk_RsResource_dispchnCanCopy; + + pThis->__dispchnPreDestruct__ = &__nvoc_thunk_RsResource_dispchnPreDestruct; + + pThis->__dispchnGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispchnGetNotificationListPtr; + + pThis->__dispchnGetNotificationShare__ = &__nvoc_thunk_Notifier_dispchnGetNotificationShare; + + pThis->__dispchnMap__ = &__nvoc_thunk_GpuResource_dispchnMap; + + pThis->__dispchnAccessCallback__ = &__nvoc_thunk_RmResource_dispchnAccessCallback; +} + +void __nvoc_init_funcTable_DispChannel(DispChannel *pThis) { + __nvoc_init_funcTable_DispChannel_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_DispChannel(DispChannel *pThis) { + pThis->__nvoc_pbase_DispChannel = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_DispChannel(pThis); +} + +NV_STATUS __nvoc_objCreate_DispChannel(DispChannel **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma) { + NV_STATUS status; + Object *pParentObj; + DispChannel *pThis; + + pThis = portMemAllocNonPaged(sizeof(DispChannel)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispChannel)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispChannel); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DispChannel(pThis); + status = __nvoc_ctor_DispChannel(pThis, arg_pCallContext, arg_pParams, arg_isDma); + if (status != NV_OK) goto __nvoc_objCreate_DispChannel_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispChannel_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispChannel(DispChannel **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + NvU32 arg_isDma = va_arg(args, NvU32); + + status = __nvoc_objCreate_DispChannel(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams, arg_isDma); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x10dec3 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +void __nvoc_init_DispChannelPio(DispChannelPio*); +void __nvoc_init_funcTable_DispChannelPio(DispChannelPio*); +NV_STATUS __nvoc_ctor_DispChannelPio(DispChannelPio*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispChannelPio(DispChannelPio*); +void __nvoc_dtor_DispChannelPio(DispChannelPio*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannelPio; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_DispChannelPio = { + /*pClassDef=*/ &__nvoc_class_def_DispChannelPio, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispChannelPio, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelPio_DispChannel = { + /*pClassDef=*/ &__nvoc_class_def_DispChannel, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelPio, __nvoc_base_DispChannel), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispChannelPio = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_DispChannelPio_DispChannelPio, + &__nvoc_rtti_DispChannelPio_DispChannel, + &__nvoc_rtti_DispChannelPio_Notifier, + &__nvoc_rtti_DispChannelPio_INotifier, + &__nvoc_rtti_DispChannelPio_GpuResource, + &__nvoc_rtti_DispChannelPio_RmResource, + &__nvoc_rtti_DispChannelPio_RmResourceCommon, + &__nvoc_rtti_DispChannelPio_RsResource, + &__nvoc_rtti_DispChannelPio_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispChannelPio), + /*classId=*/ classId(DispChannelPio), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispChannelPio", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispChannelPio, + /*pCastInfo=*/ &__nvoc_castinfo_DispChannelPio, + /*pExportInfo=*/ &__nvoc_export_info_DispChannelPio +}; + +static NvBool __nvoc_thunk_GpuResource_dispchnpioShareCallback(struct DispChannelPio *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnpioMapTo(struct DispChannelPio *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchnpioGetOrAllocNotifShare(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelPio_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnpioCheckMemInterUnmap(struct DispChannelPio *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnpioGetMapAddrSpace(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_dispchnpioSetNotificationShare(struct DispChannelPio *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelPio_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_dispchnpioGetRefCount(struct DispChannelPio *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchnpioAddAdditionalDependants(struct RsClient *pClient, struct DispChannelPio *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnpioControl_Prologue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_DispChannel_dispchnpioGetRegBaseOffsetAndSize(struct DispChannelPio *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispchnGetRegBaseOffsetAndSize((struct DispChannel *)(((unsigned char *)pDispChannel) + __nvoc_rtti_DispChannelPio_DispChannel.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnpioInternalControlForward(struct DispChannelPio *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnpioUnmapFrom(struct DispChannelPio *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispchnpioControl_Epilogue(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnpioControlLookup(struct DispChannelPio *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_dispchnpioGetInternalObjectHandle(struct DispChannelPio *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnpioControl(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnpioUnmap(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnpioGetMemInterMapParams(struct DispChannelPio *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchnpioGetMemoryMappingDescriptor(struct DispChannelPio *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchnpioControlFilter(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchnpioUnregisterEvent(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelPio_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_dispchnpioCanCopy(struct DispChannelPio *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchnpioPreDestruct(struct DispChannelPio *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispchnpioGetNotificationListPtr(struct DispChannelPio *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelPio_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispchnpioGetNotificationShare(struct DispChannelPio *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelPio_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchnpioMap(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelPio_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_dispchnpioAccessCallback(struct DispChannelPio *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelPio_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannelPio = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_DispChannel(DispChannel*); +void __nvoc_dtor_DispChannelPio(DispChannelPio *pThis) { + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispChannelPio(DispChannelPio *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DispChannel(DispChannel* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, NvU32); +NV_STATUS __nvoc_ctor_DispChannelPio(DispChannelPio *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DispChannel(&pThis->__nvoc_base_DispChannel, arg_pCallContext, arg_pParams, ((NvBool)(0 != 0))); + if (status != NV_OK) goto __nvoc_ctor_DispChannelPio_fail_DispChannel; + __nvoc_init_dataField_DispChannelPio(pThis); + + status = __nvoc_dispchnpioConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispChannelPio_fail__init; + goto __nvoc_ctor_DispChannelPio_exit; // Success + +__nvoc_ctor_DispChannelPio_fail__init: + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); +__nvoc_ctor_DispChannelPio_fail_DispChannel: +__nvoc_ctor_DispChannelPio_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispChannelPio_1(DispChannelPio *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dispchnpioShareCallback__ = &__nvoc_thunk_GpuResource_dispchnpioShareCallback; + + pThis->__dispchnpioMapTo__ = &__nvoc_thunk_RsResource_dispchnpioMapTo; + + pThis->__dispchnpioGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispchnpioGetOrAllocNotifShare; + + pThis->__dispchnpioCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispchnpioCheckMemInterUnmap; + + pThis->__dispchnpioGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispchnpioGetMapAddrSpace; + + pThis->__dispchnpioSetNotificationShare__ = &__nvoc_thunk_Notifier_dispchnpioSetNotificationShare; + + pThis->__dispchnpioGetRefCount__ = &__nvoc_thunk_RsResource_dispchnpioGetRefCount; + + pThis->__dispchnpioAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispchnpioAddAdditionalDependants; + + pThis->__dispchnpioControl_Prologue__ = &__nvoc_thunk_RmResource_dispchnpioControl_Prologue; + + pThis->__dispchnpioGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispChannel_dispchnpioGetRegBaseOffsetAndSize; + + pThis->__dispchnpioInternalControlForward__ = &__nvoc_thunk_GpuResource_dispchnpioInternalControlForward; + + pThis->__dispchnpioUnmapFrom__ = &__nvoc_thunk_RsResource_dispchnpioUnmapFrom; + + pThis->__dispchnpioControl_Epilogue__ = &__nvoc_thunk_RmResource_dispchnpioControl_Epilogue; + + pThis->__dispchnpioControlLookup__ = &__nvoc_thunk_RsResource_dispchnpioControlLookup; + + pThis->__dispchnpioGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispchnpioGetInternalObjectHandle; + + pThis->__dispchnpioControl__ = &__nvoc_thunk_GpuResource_dispchnpioControl; + + pThis->__dispchnpioUnmap__ = &__nvoc_thunk_GpuResource_dispchnpioUnmap; + + pThis->__dispchnpioGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispchnpioGetMemInterMapParams; + + pThis->__dispchnpioGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispchnpioGetMemoryMappingDescriptor; + + pThis->__dispchnpioControlFilter__ = &__nvoc_thunk_RsResource_dispchnpioControlFilter; + + pThis->__dispchnpioUnregisterEvent__ = &__nvoc_thunk_Notifier_dispchnpioUnregisterEvent; + + pThis->__dispchnpioCanCopy__ = &__nvoc_thunk_RsResource_dispchnpioCanCopy; + + pThis->__dispchnpioPreDestruct__ = &__nvoc_thunk_RsResource_dispchnpioPreDestruct; + + pThis->__dispchnpioGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispchnpioGetNotificationListPtr; + + pThis->__dispchnpioGetNotificationShare__ = &__nvoc_thunk_Notifier_dispchnpioGetNotificationShare; + + pThis->__dispchnpioMap__ = &__nvoc_thunk_GpuResource_dispchnpioMap; + + pThis->__dispchnpioAccessCallback__ = &__nvoc_thunk_RmResource_dispchnpioAccessCallback; +} + +void __nvoc_init_funcTable_DispChannelPio(DispChannelPio *pThis) { + __nvoc_init_funcTable_DispChannelPio_1(pThis); +} + +void __nvoc_init_DispChannel(DispChannel*); +void __nvoc_init_DispChannelPio(DispChannelPio *pThis) { + pThis->__nvoc_pbase_DispChannelPio = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DispChannel = &pThis->__nvoc_base_DispChannel; + __nvoc_init_DispChannel(&pThis->__nvoc_base_DispChannel); + __nvoc_init_funcTable_DispChannelPio(pThis); +} + +NV_STATUS __nvoc_objCreate_DispChannelPio(DispChannelPio **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispChannelPio *pThis; + + pThis = portMemAllocNonPaged(sizeof(DispChannelPio)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispChannelPio)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispChannelPio); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DispChannelPio(pThis); + status = __nvoc_ctor_DispChannelPio(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispChannelPio_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispChannelPio_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispChannelPio(DispChannelPio **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispChannelPio(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xfe3d2e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +void __nvoc_init_DispChannelDma(DispChannelDma*); +void __nvoc_init_funcTable_DispChannelDma(DispChannelDma*); +NV_STATUS __nvoc_ctor_DispChannelDma(DispChannelDma*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispChannelDma(DispChannelDma*); +void __nvoc_dtor_DispChannelDma(DispChannelDma*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannelDma; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_DispChannelDma = { + /*pClassDef=*/ &__nvoc_class_def_DispChannelDma, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispChannelDma, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispChannelDma_DispChannel = { + /*pClassDef=*/ &__nvoc_class_def_DispChannel, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispChannelDma, __nvoc_base_DispChannel), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispChannelDma = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_DispChannelDma_DispChannelDma, + &__nvoc_rtti_DispChannelDma_DispChannel, + &__nvoc_rtti_DispChannelDma_Notifier, + &__nvoc_rtti_DispChannelDma_INotifier, + &__nvoc_rtti_DispChannelDma_GpuResource, + &__nvoc_rtti_DispChannelDma_RmResource, + &__nvoc_rtti_DispChannelDma_RmResourceCommon, + &__nvoc_rtti_DispChannelDma_RsResource, + &__nvoc_rtti_DispChannelDma_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispChannelDma), + /*classId=*/ classId(DispChannelDma), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispChannelDma", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispChannelDma, + /*pCastInfo=*/ &__nvoc_castinfo_DispChannelDma, + /*pExportInfo=*/ &__nvoc_export_info_DispChannelDma +}; + +static NvBool __nvoc_thunk_GpuResource_dispchndmaShareCallback(struct DispChannelDma *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchndmaMapTo(struct DispChannelDma *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchndmaGetOrAllocNotifShare(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelDma_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchndmaCheckMemInterUnmap(struct DispChannelDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchndmaGetMapAddrSpace(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_dispchndmaSetNotificationShare(struct DispChannelDma *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelDma_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_dispchndmaGetRefCount(struct DispChannelDma *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchndmaAddAdditionalDependants(struct RsClient *pClient, struct DispChannelDma *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchndmaControl_Prologue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_DispChannel_dispchndmaGetRegBaseOffsetAndSize(struct DispChannelDma *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispchnGetRegBaseOffsetAndSize((struct DispChannel *)(((unsigned char *)pDispChannel) + __nvoc_rtti_DispChannelDma_DispChannel.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchndmaInternalControlForward(struct DispChannelDma *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchndmaUnmapFrom(struct DispChannelDma *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispchndmaControl_Epilogue(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchndmaControlLookup(struct DispChannelDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_dispchndmaGetInternalObjectHandle(struct DispChannelDma *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchndmaControl(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchndmaUnmap(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchndmaGetMemInterMapParams(struct DispChannelDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispchndmaGetMemoryMappingDescriptor(struct DispChannelDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispchndmaControlFilter(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispchndmaUnregisterEvent(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelDma_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_dispchndmaCanCopy(struct DispChannelDma *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispchndmaPreDestruct(struct DispChannelDma *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispchndmaGetNotificationListPtr(struct DispChannelDma *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelDma_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispchndmaGetNotificationShare(struct DispChannelDma *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispChannelDma_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispchndmaMap(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispChannelDma_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_dispchndmaAccessCallback(struct DispChannelDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispChannelDma_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispChannelDma = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_DispChannel(DispChannel*); +void __nvoc_dtor_DispChannelDma(DispChannelDma *pThis) { + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispChannelDma(DispChannelDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DispChannel(DispChannel* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, NvU32); +NV_STATUS __nvoc_ctor_DispChannelDma(DispChannelDma *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DispChannel(&pThis->__nvoc_base_DispChannel, arg_pCallContext, arg_pParams, ((NvBool)(0 == 0))); + if (status != NV_OK) goto __nvoc_ctor_DispChannelDma_fail_DispChannel; + __nvoc_init_dataField_DispChannelDma(pThis); + + status = __nvoc_dispchndmaConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispChannelDma_fail__init; + goto __nvoc_ctor_DispChannelDma_exit; // Success + +__nvoc_ctor_DispChannelDma_fail__init: + __nvoc_dtor_DispChannel(&pThis->__nvoc_base_DispChannel); +__nvoc_ctor_DispChannelDma_fail_DispChannel: +__nvoc_ctor_DispChannelDma_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispChannelDma_1(DispChannelDma *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dispchndmaShareCallback__ = &__nvoc_thunk_GpuResource_dispchndmaShareCallback; + + pThis->__dispchndmaMapTo__ = &__nvoc_thunk_RsResource_dispchndmaMapTo; + + pThis->__dispchndmaGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispchndmaGetOrAllocNotifShare; + + pThis->__dispchndmaCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispchndmaCheckMemInterUnmap; + + pThis->__dispchndmaGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispchndmaGetMapAddrSpace; + + pThis->__dispchndmaSetNotificationShare__ = &__nvoc_thunk_Notifier_dispchndmaSetNotificationShare; + + pThis->__dispchndmaGetRefCount__ = &__nvoc_thunk_RsResource_dispchndmaGetRefCount; + + pThis->__dispchndmaAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispchndmaAddAdditionalDependants; + + pThis->__dispchndmaControl_Prologue__ = &__nvoc_thunk_RmResource_dispchndmaControl_Prologue; + + pThis->__dispchndmaGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispChannel_dispchndmaGetRegBaseOffsetAndSize; + + pThis->__dispchndmaInternalControlForward__ = &__nvoc_thunk_GpuResource_dispchndmaInternalControlForward; + + pThis->__dispchndmaUnmapFrom__ = &__nvoc_thunk_RsResource_dispchndmaUnmapFrom; + + pThis->__dispchndmaControl_Epilogue__ = &__nvoc_thunk_RmResource_dispchndmaControl_Epilogue; + + pThis->__dispchndmaControlLookup__ = &__nvoc_thunk_RsResource_dispchndmaControlLookup; + + pThis->__dispchndmaGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispchndmaGetInternalObjectHandle; + + pThis->__dispchndmaControl__ = &__nvoc_thunk_GpuResource_dispchndmaControl; + + pThis->__dispchndmaUnmap__ = &__nvoc_thunk_GpuResource_dispchndmaUnmap; + + pThis->__dispchndmaGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispchndmaGetMemInterMapParams; + + pThis->__dispchndmaGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispchndmaGetMemoryMappingDescriptor; + + pThis->__dispchndmaControlFilter__ = &__nvoc_thunk_RsResource_dispchndmaControlFilter; + + pThis->__dispchndmaUnregisterEvent__ = &__nvoc_thunk_Notifier_dispchndmaUnregisterEvent; + + pThis->__dispchndmaCanCopy__ = &__nvoc_thunk_RsResource_dispchndmaCanCopy; + + pThis->__dispchndmaPreDestruct__ = &__nvoc_thunk_RsResource_dispchndmaPreDestruct; + + pThis->__dispchndmaGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispchndmaGetNotificationListPtr; + + pThis->__dispchndmaGetNotificationShare__ = &__nvoc_thunk_Notifier_dispchndmaGetNotificationShare; + + pThis->__dispchndmaMap__ = &__nvoc_thunk_GpuResource_dispchndmaMap; + + pThis->__dispchndmaAccessCallback__ = &__nvoc_thunk_RmResource_dispchndmaAccessCallback; +} + +void __nvoc_init_funcTable_DispChannelDma(DispChannelDma *pThis) { + __nvoc_init_funcTable_DispChannelDma_1(pThis); +} + +void __nvoc_init_DispChannel(DispChannel*); +void __nvoc_init_DispChannelDma(DispChannelDma *pThis) { + pThis->__nvoc_pbase_DispChannelDma = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DispChannel.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DispChannel = &pThis->__nvoc_base_DispChannel; + __nvoc_init_DispChannel(&pThis->__nvoc_base_DispChannel); + __nvoc_init_funcTable_DispChannelDma(pThis); +} + +NV_STATUS __nvoc_objCreate_DispChannelDma(DispChannelDma **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispChannelDma *pThis; + + pThis = portMemAllocNonPaged(sizeof(DispChannelDma)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispChannelDma)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispChannelDma); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DispChannel.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DispChannelDma(pThis); + status = __nvoc_ctor_DispChannelDma(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispChannelDma_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispChannelDma_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispChannelDma(DispChannelDma **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispChannelDma(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_disp_channel_nvoc.h b/src/nvidia/generated/g_disp_channel_nvoc.h new file mode 100644 index 000000000..53ef73b2e --- /dev/null +++ b/src/nvidia/generated/g_disp_channel_nvoc.h @@ -0,0 +1,781 @@ +#ifndef _G_DISP_CHANNEL_NVOC_H_ +#define _G_DISP_CHANNEL_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispChannel and its derived classes. +* +******************************************************************************/ + +#include "g_disp_channel_nvoc.h" + +#ifndef DISP_CHANNEL_H +#define DISP_CHANNEL_H + +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" + +struct ContextDma; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + + +struct DispObject; + +#ifndef __NVOC_CLASS_DispObject_TYPEDEF__ +#define __NVOC_CLASS_DispObject_TYPEDEF__ +typedef struct DispObject DispObject; +#endif /* __NVOC_CLASS_DispObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispObject +#define __nvoc_class_id_DispObject 0x999839 +#endif /* __nvoc_class_id_DispObject */ + + + +/*! + * Base class for display channels + */ +#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispChannel { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DispChannel *__nvoc_pbase_DispChannel; + NV_STATUS (*__dispchnGetRegBaseOffsetAndSize__)(struct DispChannel *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__dispchnShareCallback__)(struct DispChannel *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispchnMapTo__)(struct DispChannel *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispchnGetOrAllocNotifShare__)(struct DispChannel *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__dispchnCheckMemInterUnmap__)(struct DispChannel *, NvBool); + NV_STATUS (*__dispchnGetMapAddrSpace__)(struct DispChannel *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__dispchnSetNotificationShare__)(struct DispChannel *, struct NotifShare *); + NvU32 (*__dispchnGetRefCount__)(struct DispChannel *); + void (*__dispchnAddAdditionalDependants__)(struct RsClient *, struct DispChannel *, RsResourceRef *); + NV_STATUS (*__dispchnControl_Prologue__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnInternalControlForward__)(struct DispChannel *, NvU32, void *, NvU32); + NV_STATUS (*__dispchnUnmapFrom__)(struct DispChannel *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispchnControl_Epilogue__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnControlLookup__)(struct DispChannel *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__dispchnGetInternalObjectHandle__)(struct DispChannel *); + NV_STATUS (*__dispchnControl__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnUnmap__)(struct DispChannel *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispchnGetMemInterMapParams__)(struct DispChannel *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispchnGetMemoryMappingDescriptor__)(struct DispChannel *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispchnControlFilter__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnUnregisterEvent__)(struct DispChannel *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__dispchnCanCopy__)(struct DispChannel *); + void (*__dispchnPreDestruct__)(struct DispChannel *); + PEVENTNOTIFICATION *(*__dispchnGetNotificationListPtr__)(struct DispChannel *); + struct NotifShare *(*__dispchnGetNotificationShare__)(struct DispChannel *); + NV_STATUS (*__dispchnMap__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__dispchnAccessCallback__)(struct DispChannel *, struct RsClient *, void *, RsAccessRight); + struct DispObject *pDispObject; + NvU32 DispClass; + NvU32 InstanceNumber; + NvP64 pControl; + NvP64 pPriv; + NvU32 ControlOffset; + NvU32 ControlLength; + NvBool bIsDma; +}; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel; + +#define __staticCast_DispChannel(pThis) \ + ((pThis)->__nvoc_pbase_DispChannel) + +#ifdef __nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannel(pThis) ((DispChannel*)NULL) +#else //__nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannel(pThis) \ + ((DispChannel*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannel))) +#endif //__nvoc_disp_channel_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispChannel(DispChannel**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispChannel(DispChannel**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma); +#define __objCreate_DispChannel(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams, arg_isDma) \ + __nvoc_objCreate_DispChannel((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams, arg_isDma) + +#define dispchnGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchnGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize) +#define dispchnShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchnShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispchnMapTo(pResource, pParams) dispchnMapTo_DISPATCH(pResource, pParams) +#define dispchnGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchnGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define dispchnCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchnCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispchnGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchnGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispchnSetNotificationShare(pNotifier, pNotifShare) dispchnSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispchnGetRefCount(pResource) dispchnGetRefCount_DISPATCH(pResource) +#define dispchnAddAdditionalDependants(pClient, pResource, pReference) dispchnAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispchnControl_Prologue(pResource, pCallContext, pParams) dispchnControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnInternalControlForward(pGpuResource, command, pParams, size) dispchnInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispchnUnmapFrom(pResource, pParams) dispchnUnmapFrom_DISPATCH(pResource, pParams) +#define dispchnControl_Epilogue(pResource, pCallContext, pParams) dispchnControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnControlLookup(pResource, pParams, ppEntry) dispchnControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispchnGetInternalObjectHandle(pGpuResource) dispchnGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispchnControl(pGpuResource, pCallContext, pParams) dispchnControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispchnUnmap(pGpuResource, pCallContext, pCpuMapping) dispchnUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispchnGetMemInterMapParams(pRmResource, pParams) dispchnGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispchnGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchnGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispchnControlFilter(pResource, pCallContext, pParams) dispchnControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispchnUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchnUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispchnCanCopy(pResource) dispchnCanCopy_DISPATCH(pResource) +#define dispchnPreDestruct(pResource) dispchnPreDestruct_DISPATCH(pResource) +#define dispchnGetNotificationListPtr(pNotifier) dispchnGetNotificationListPtr_DISPATCH(pNotifier) +#define dispchnGetNotificationShare(pNotifier) dispchnGetNotificationShare_DISPATCH(pNotifier) +#define dispchnMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchnMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispchnAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchnAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS dispchnGetRegBaseOffsetAndSize_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS dispchnGetRegBaseOffsetAndSize_DISPATCH(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispChannel->__dispchnGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize); +} + +static inline NvBool dispchnShareCallback_DISPATCH(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispchnShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispchnMapTo_DISPATCH(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispchnMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispchnGetOrAllocNotifShare_DISPATCH(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispchnGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS dispchnCheckMemInterUnmap_DISPATCH(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispchnCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispchnGetMapAddrSpace_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispchnGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void dispchnSetNotificationShare_DISPATCH(struct DispChannel *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispchnSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 dispchnGetRefCount_DISPATCH(struct DispChannel *pResource) { + return pResource->__dispchnGetRefCount__(pResource); +} + +static inline void dispchnAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference) { + pResource->__dispchnAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS dispchnControl_Prologue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchnControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnInternalControlForward_DISPATCH(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispchnInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS dispchnUnmapFrom_DISPATCH(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispchnUnmapFrom__(pResource, pParams); +} + +static inline void dispchnControl_Epilogue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispchnControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnControlLookup_DISPATCH(struct DispChannel *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispchnControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle dispchnGetInternalObjectHandle_DISPATCH(struct DispChannel *pGpuResource) { + return pGpuResource->__dispchnGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispchnControl_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispchnControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnUnmap_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchnUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispchnGetMemInterMapParams_DISPATCH(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispchnGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispchnGetMemoryMappingDescriptor_DISPATCH(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispchnGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispchnControlFilter_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchnControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnUnregisterEvent_DISPATCH(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispchnUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool dispchnCanCopy_DISPATCH(struct DispChannel *pResource) { + return pResource->__dispchnCanCopy__(pResource); +} + +static inline void dispchnPreDestruct_DISPATCH(struct DispChannel *pResource) { + pResource->__dispchnPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *dispchnGetNotificationListPtr_DISPATCH(struct DispChannel *pNotifier) { + return pNotifier->__dispchnGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *dispchnGetNotificationShare_DISPATCH(struct DispChannel *pNotifier) { + return pNotifier->__dispchnGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispchnMap_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchnMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool dispchnAccessCallback_DISPATCH(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispchnAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dispchnConstruct_IMPL(struct DispChannel *arg_pDispChannel, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams, NvU32 arg_isDma); +#define __nvoc_dispchnConstruct(arg_pDispChannel, arg_pCallContext, arg_pParams, arg_isDma) dispchnConstruct_IMPL(arg_pDispChannel, arg_pCallContext, arg_pParams, arg_isDma) +void dispchnDestruct_IMPL(struct DispChannel *pDispChannel); +#define __nvoc_dispchnDestruct(pDispChannel) dispchnDestruct_IMPL(pDispChannel) +void dispchnSetRegBaseOffsetAndSize_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu); +#ifdef __nvoc_disp_channel_h_disabled +static inline void dispchnSetRegBaseOffsetAndSize(struct DispChannel *pDispChannel, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!"); +} +#else //__nvoc_disp_channel_h_disabled +#define dispchnSetRegBaseOffsetAndSize(pDispChannel, pGpu) dispchnSetRegBaseOffsetAndSize_IMPL(pDispChannel, pGpu) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS dispchnGrabChannel_IMPL(struct DispChannel *pDispChannel, NvHandle hClient, NvHandle hParent, NvHandle hChannel, NvU32 hClass, void *pAllocParms); +#ifdef __nvoc_disp_channel_h_disabled +static inline NV_STATUS dispchnGrabChannel(struct DispChannel *pDispChannel, NvHandle hClient, NvHandle hParent, NvHandle hChannel, NvU32 hClass, void *pAllocParms) { + NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_channel_h_disabled +#define dispchnGrabChannel(pDispChannel, hClient, hParent, hChannel, hClass, pAllocParms) dispchnGrabChannel_IMPL(pDispChannel, hClient, hParent, hChannel, hClass, pAllocParms) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS dispchnBindCtx_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, struct ContextDma *pContextDma); +#ifdef __nvoc_disp_channel_h_disabled +static inline NV_STATUS dispchnBindCtx(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_channel_h_disabled +#define dispchnBindCtx(pDispChannel, pGpu, pContextDma) dispchnBindCtx_IMPL(pDispChannel, pGpu, pContextDma) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS dispchnUnbindCtx_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, struct ContextDma *pContextDma); +#ifdef __nvoc_disp_channel_h_disabled +static inline NV_STATUS dispchnUnbindCtx(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_channel_h_disabled +#define dispchnUnbindCtx(pDispChannel, pGpu, pContextDma) dispchnUnbindCtx_IMPL(pDispChannel, pGpu, pContextDma) +#endif //__nvoc_disp_channel_h_disabled + +NV_STATUS dispchnGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDisplayChannel, struct DispChannel **ppDispChannel); +#define dispchnGetByHandle(pClient, hDisplayChannel, ppDispChannel) dispchnGetByHandle_IMPL(pClient, hDisplayChannel, ppDispChannel) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_XXX_CHANNEL_PIO + */ +#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispChannelPio { + const struct NVOC_RTTI *__nvoc_rtti; + struct DispChannel __nvoc_base_DispChannel; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DispChannel *__nvoc_pbase_DispChannel; + struct DispChannelPio *__nvoc_pbase_DispChannelPio; + NvBool (*__dispchnpioShareCallback__)(struct DispChannelPio *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispchnpioMapTo__)(struct DispChannelPio *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispchnpioGetOrAllocNotifShare__)(struct DispChannelPio *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__dispchnpioCheckMemInterUnmap__)(struct DispChannelPio *, NvBool); + NV_STATUS (*__dispchnpioGetMapAddrSpace__)(struct DispChannelPio *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__dispchnpioSetNotificationShare__)(struct DispChannelPio *, struct NotifShare *); + NvU32 (*__dispchnpioGetRefCount__)(struct DispChannelPio *); + void (*__dispchnpioAddAdditionalDependants__)(struct RsClient *, struct DispChannelPio *, RsResourceRef *); + NV_STATUS (*__dispchnpioControl_Prologue__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnpioGetRegBaseOffsetAndSize__)(struct DispChannelPio *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__dispchnpioInternalControlForward__)(struct DispChannelPio *, NvU32, void *, NvU32); + NV_STATUS (*__dispchnpioUnmapFrom__)(struct DispChannelPio *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispchnpioControl_Epilogue__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnpioControlLookup__)(struct DispChannelPio *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__dispchnpioGetInternalObjectHandle__)(struct DispChannelPio *); + NV_STATUS (*__dispchnpioControl__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnpioUnmap__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispchnpioGetMemInterMapParams__)(struct DispChannelPio *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispchnpioGetMemoryMappingDescriptor__)(struct DispChannelPio *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispchnpioControlFilter__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchnpioUnregisterEvent__)(struct DispChannelPio *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__dispchnpioCanCopy__)(struct DispChannelPio *); + void (*__dispchnpioPreDestruct__)(struct DispChannelPio *); + PEVENTNOTIFICATION *(*__dispchnpioGetNotificationListPtr__)(struct DispChannelPio *); + struct NotifShare *(*__dispchnpioGetNotificationShare__)(struct DispChannelPio *); + NV_STATUS (*__dispchnpioMap__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__dispchnpioAccessCallback__)(struct DispChannelPio *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_DispChannelPio_TYPEDEF__ +#define __NVOC_CLASS_DispChannelPio_TYPEDEF__ +typedef struct DispChannelPio DispChannelPio; +#endif /* __NVOC_CLASS_DispChannelPio_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelPio +#define __nvoc_class_id_DispChannelPio 0x10dec3 +#endif /* __nvoc_class_id_DispChannelPio */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio; + +#define __staticCast_DispChannelPio(pThis) \ + ((pThis)->__nvoc_pbase_DispChannelPio) + +#ifdef __nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelPio(pThis) ((DispChannelPio*)NULL) +#else //__nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelPio(pThis) \ + ((DispChannelPio*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannelPio))) +#endif //__nvoc_disp_channel_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispChannelPio(DispChannelPio**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispChannelPio(DispChannelPio**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispChannelPio(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispChannelPio((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispchnpioShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchnpioShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispchnpioMapTo(pResource, pParams) dispchnpioMapTo_DISPATCH(pResource, pParams) +#define dispchnpioGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchnpioGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define dispchnpioCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchnpioCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispchnpioGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchnpioGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispchnpioSetNotificationShare(pNotifier, pNotifShare) dispchnpioSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispchnpioGetRefCount(pResource) dispchnpioGetRefCount_DISPATCH(pResource) +#define dispchnpioAddAdditionalDependants(pClient, pResource, pReference) dispchnpioAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispchnpioControl_Prologue(pResource, pCallContext, pParams) dispchnpioControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchnpioGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize) +#define dispchnpioInternalControlForward(pGpuResource, command, pParams, size) dispchnpioInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispchnpioUnmapFrom(pResource, pParams) dispchnpioUnmapFrom_DISPATCH(pResource, pParams) +#define dispchnpioControl_Epilogue(pResource, pCallContext, pParams) dispchnpioControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioControlLookup(pResource, pParams, ppEntry) dispchnpioControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispchnpioGetInternalObjectHandle(pGpuResource) dispchnpioGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispchnpioControl(pGpuResource, pCallContext, pParams) dispchnpioControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispchnpioUnmap(pGpuResource, pCallContext, pCpuMapping) dispchnpioUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispchnpioGetMemInterMapParams(pRmResource, pParams) dispchnpioGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispchnpioGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchnpioGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispchnpioControlFilter(pResource, pCallContext, pParams) dispchnpioControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispchnpioUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchnpioUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispchnpioCanCopy(pResource) dispchnpioCanCopy_DISPATCH(pResource) +#define dispchnpioPreDestruct(pResource) dispchnpioPreDestruct_DISPATCH(pResource) +#define dispchnpioGetNotificationListPtr(pNotifier) dispchnpioGetNotificationListPtr_DISPATCH(pNotifier) +#define dispchnpioGetNotificationShare(pNotifier) dispchnpioGetNotificationShare_DISPATCH(pNotifier) +#define dispchnpioMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchnpioMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispchnpioAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchnpioAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool dispchnpioShareCallback_DISPATCH(struct DispChannelPio *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispchnpioShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispchnpioMapTo_DISPATCH(struct DispChannelPio *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispchnpioMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispchnpioGetOrAllocNotifShare_DISPATCH(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispchnpioGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS dispchnpioCheckMemInterUnmap_DISPATCH(struct DispChannelPio *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispchnpioCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispchnpioGetMapAddrSpace_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispchnpioGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void dispchnpioSetNotificationShare_DISPATCH(struct DispChannelPio *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispchnpioSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 dispchnpioGetRefCount_DISPATCH(struct DispChannelPio *pResource) { + return pResource->__dispchnpioGetRefCount__(pResource); +} + +static inline void dispchnpioAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannelPio *pResource, RsResourceRef *pReference) { + pResource->__dispchnpioAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS dispchnpioControl_Prologue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchnpioControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnpioGetRegBaseOffsetAndSize_DISPATCH(struct DispChannelPio *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispChannel->__dispchnpioGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize); +} + +static inline NV_STATUS dispchnpioInternalControlForward_DISPATCH(struct DispChannelPio *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispchnpioInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS dispchnpioUnmapFrom_DISPATCH(struct DispChannelPio *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispchnpioUnmapFrom__(pResource, pParams); +} + +static inline void dispchnpioControl_Epilogue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispchnpioControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnpioControlLookup_DISPATCH(struct DispChannelPio *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispchnpioControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle dispchnpioGetInternalObjectHandle_DISPATCH(struct DispChannelPio *pGpuResource) { + return pGpuResource->__dispchnpioGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispchnpioControl_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispchnpioControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnpioUnmap_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchnpioUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispchnpioGetMemInterMapParams_DISPATCH(struct DispChannelPio *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispchnpioGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispchnpioGetMemoryMappingDescriptor_DISPATCH(struct DispChannelPio *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispchnpioGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispchnpioControlFilter_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchnpioControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchnpioUnregisterEvent_DISPATCH(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispchnpioUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool dispchnpioCanCopy_DISPATCH(struct DispChannelPio *pResource) { + return pResource->__dispchnpioCanCopy__(pResource); +} + +static inline void dispchnpioPreDestruct_DISPATCH(struct DispChannelPio *pResource) { + pResource->__dispchnpioPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *dispchnpioGetNotificationListPtr_DISPATCH(struct DispChannelPio *pNotifier) { + return pNotifier->__dispchnpioGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *dispchnpioGetNotificationShare_DISPATCH(struct DispChannelPio *pNotifier) { + return pNotifier->__dispchnpioGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispchnpioMap_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchnpioMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool dispchnpioAccessCallback_DISPATCH(struct DispChannelPio *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispchnpioAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dispchnpioConstruct_IMPL(struct DispChannelPio *arg_pDispChannelPio, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispchnpioConstruct(arg_pDispChannelPio, arg_pCallContext, arg_pParams) dispchnpioConstruct_IMPL(arg_pDispChannelPio, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_XXX_CHANNEL_DMA + */ +#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispChannelDma { + const struct NVOC_RTTI *__nvoc_rtti; + struct DispChannel __nvoc_base_DispChannel; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DispChannel *__nvoc_pbase_DispChannel; + struct DispChannelDma *__nvoc_pbase_DispChannelDma; + NvBool (*__dispchndmaShareCallback__)(struct DispChannelDma *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispchndmaMapTo__)(struct DispChannelDma *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispchndmaGetOrAllocNotifShare__)(struct DispChannelDma *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__dispchndmaCheckMemInterUnmap__)(struct DispChannelDma *, NvBool); + NV_STATUS (*__dispchndmaGetMapAddrSpace__)(struct DispChannelDma *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__dispchndmaSetNotificationShare__)(struct DispChannelDma *, struct NotifShare *); + NvU32 (*__dispchndmaGetRefCount__)(struct DispChannelDma *); + void (*__dispchndmaAddAdditionalDependants__)(struct RsClient *, struct DispChannelDma *, RsResourceRef *); + NV_STATUS (*__dispchndmaControl_Prologue__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchndmaGetRegBaseOffsetAndSize__)(struct DispChannelDma *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__dispchndmaInternalControlForward__)(struct DispChannelDma *, NvU32, void *, NvU32); + NV_STATUS (*__dispchndmaUnmapFrom__)(struct DispChannelDma *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispchndmaControl_Epilogue__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchndmaControlLookup__)(struct DispChannelDma *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__dispchndmaGetInternalObjectHandle__)(struct DispChannelDma *); + NV_STATUS (*__dispchndmaControl__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchndmaUnmap__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispchndmaGetMemInterMapParams__)(struct DispChannelDma *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispchndmaGetMemoryMappingDescriptor__)(struct DispChannelDma *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispchndmaControlFilter__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispchndmaUnregisterEvent__)(struct DispChannelDma *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__dispchndmaCanCopy__)(struct DispChannelDma *); + void (*__dispchndmaPreDestruct__)(struct DispChannelDma *); + PEVENTNOTIFICATION *(*__dispchndmaGetNotificationListPtr__)(struct DispChannelDma *); + struct NotifShare *(*__dispchndmaGetNotificationShare__)(struct DispChannelDma *); + NV_STATUS (*__dispchndmaMap__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__dispchndmaAccessCallback__)(struct DispChannelDma *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_DispChannelDma_TYPEDEF__ +#define __NVOC_CLASS_DispChannelDma_TYPEDEF__ +typedef struct DispChannelDma DispChannelDma; +#endif /* __NVOC_CLASS_DispChannelDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelDma +#define __nvoc_class_id_DispChannelDma 0xfe3d2e +#endif /* __nvoc_class_id_DispChannelDma */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma; + +#define __staticCast_DispChannelDma(pThis) \ + ((pThis)->__nvoc_pbase_DispChannelDma) + +#ifdef __nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelDma(pThis) ((DispChannelDma*)NULL) +#else //__nvoc_disp_channel_h_disabled +#define __dynamicCast_DispChannelDma(pThis) \ + ((DispChannelDma*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannelDma))) +#endif //__nvoc_disp_channel_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispChannelDma(DispChannelDma**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispChannelDma(DispChannelDma**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispChannelDma(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispChannelDma((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispchndmaShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchndmaShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispchndmaMapTo(pResource, pParams) dispchndmaMapTo_DISPATCH(pResource, pParams) +#define dispchndmaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchndmaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define dispchndmaCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchndmaCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispchndmaGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchndmaGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispchndmaSetNotificationShare(pNotifier, pNotifShare) dispchndmaSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispchndmaGetRefCount(pResource) dispchndmaGetRefCount_DISPATCH(pResource) +#define dispchndmaAddAdditionalDependants(pClient, pResource, pReference) dispchndmaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispchndmaControl_Prologue(pResource, pCallContext, pParams) dispchndmaControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchndmaGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize) +#define dispchndmaInternalControlForward(pGpuResource, command, pParams, size) dispchndmaInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispchndmaUnmapFrom(pResource, pParams) dispchndmaUnmapFrom_DISPATCH(pResource, pParams) +#define dispchndmaControl_Epilogue(pResource, pCallContext, pParams) dispchndmaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaControlLookup(pResource, pParams, ppEntry) dispchndmaControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispchndmaGetInternalObjectHandle(pGpuResource) dispchndmaGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispchndmaControl(pGpuResource, pCallContext, pParams) dispchndmaControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispchndmaUnmap(pGpuResource, pCallContext, pCpuMapping) dispchndmaUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispchndmaGetMemInterMapParams(pRmResource, pParams) dispchndmaGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispchndmaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchndmaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispchndmaControlFilter(pResource, pCallContext, pParams) dispchndmaControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispchndmaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchndmaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispchndmaCanCopy(pResource) dispchndmaCanCopy_DISPATCH(pResource) +#define dispchndmaPreDestruct(pResource) dispchndmaPreDestruct_DISPATCH(pResource) +#define dispchndmaGetNotificationListPtr(pNotifier) dispchndmaGetNotificationListPtr_DISPATCH(pNotifier) +#define dispchndmaGetNotificationShare(pNotifier) dispchndmaGetNotificationShare_DISPATCH(pNotifier) +#define dispchndmaMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchndmaMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispchndmaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchndmaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool dispchndmaShareCallback_DISPATCH(struct DispChannelDma *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispchndmaShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispchndmaMapTo_DISPATCH(struct DispChannelDma *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispchndmaMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispchndmaGetOrAllocNotifShare_DISPATCH(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispchndmaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS dispchndmaCheckMemInterUnmap_DISPATCH(struct DispChannelDma *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispchndmaCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispchndmaGetMapAddrSpace_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispchndmaGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void dispchndmaSetNotificationShare_DISPATCH(struct DispChannelDma *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispchndmaSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 dispchndmaGetRefCount_DISPATCH(struct DispChannelDma *pResource) { + return pResource->__dispchndmaGetRefCount__(pResource); +} + +static inline void dispchndmaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannelDma *pResource, RsResourceRef *pReference) { + pResource->__dispchndmaAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS dispchndmaControl_Prologue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchndmaControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchndmaGetRegBaseOffsetAndSize_DISPATCH(struct DispChannelDma *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispChannel->__dispchndmaGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize); +} + +static inline NV_STATUS dispchndmaInternalControlForward_DISPATCH(struct DispChannelDma *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispchndmaInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS dispchndmaUnmapFrom_DISPATCH(struct DispChannelDma *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispchndmaUnmapFrom__(pResource, pParams); +} + +static inline void dispchndmaControl_Epilogue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispchndmaControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchndmaControlLookup_DISPATCH(struct DispChannelDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispchndmaControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle dispchndmaGetInternalObjectHandle_DISPATCH(struct DispChannelDma *pGpuResource) { + return pGpuResource->__dispchndmaGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispchndmaControl_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispchndmaControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchndmaUnmap_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchndmaUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispchndmaGetMemInterMapParams_DISPATCH(struct DispChannelDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispchndmaGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispchndmaGetMemoryMappingDescriptor_DISPATCH(struct DispChannelDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispchndmaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispchndmaControlFilter_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispchndmaControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispchndmaUnregisterEvent_DISPATCH(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispchndmaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool dispchndmaCanCopy_DISPATCH(struct DispChannelDma *pResource) { + return pResource->__dispchndmaCanCopy__(pResource); +} + +static inline void dispchndmaPreDestruct_DISPATCH(struct DispChannelDma *pResource) { + pResource->__dispchndmaPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *dispchndmaGetNotificationListPtr_DISPATCH(struct DispChannelDma *pNotifier) { + return pNotifier->__dispchndmaGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *dispchndmaGetNotificationShare_DISPATCH(struct DispChannelDma *pNotifier) { + return pNotifier->__dispchndmaGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispchndmaMap_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispchndmaMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool dispchndmaAccessCallback_DISPATCH(struct DispChannelDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispchndmaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dispchndmaConstruct_IMPL(struct DispChannelDma *arg_pDispChannelDma, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispchndmaConstruct(arg_pDispChannelDma, arg_pCallContext, arg_pParams) dispchndmaConstruct_IMPL(arg_pDispChannelDma, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // DISP_CHANNEL_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISP_CHANNEL_NVOC_H_ diff --git a/src/nvidia/generated/g_disp_inst_mem_nvoc.c b/src/nvidia/generated/g_disp_inst_mem_nvoc.c new file mode 100644 index 000000000..a813df6ce --- /dev/null +++ b/src/nvidia/generated/g_disp_inst_mem_nvoc.c @@ -0,0 +1,260 @@ +#define NVOC_DISP_INST_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_inst_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x8223e2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* ); +void __nvoc_init_dataField_DisplayInstanceMemory(DisplayInstanceMemory*, RmHalspecOwner* ); +void __nvoc_dtor_DisplayInstanceMemory(DisplayInstanceMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DisplayInstanceMemory; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayInstanceMemory_DisplayInstanceMemory = { + /*pClassDef=*/ &__nvoc_class_def_DisplayInstanceMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DisplayInstanceMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayInstanceMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayInstanceMemory, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DisplayInstanceMemory = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_DisplayInstanceMemory_DisplayInstanceMemory, + &__nvoc_rtti_DisplayInstanceMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DisplayInstanceMemory), + /*classId=*/ classId(DisplayInstanceMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DisplayInstanceMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DisplayInstanceMemory, + /*pCastInfo=*/ &__nvoc_castinfo_DisplayInstanceMemory, + /*pExportInfo=*/ &__nvoc_export_info_DisplayInstanceMemory +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DisplayInstanceMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_DisplayInstanceMemory(DisplayInstanceMemory *pThis) { + __nvoc_instmemDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_DisplayInstanceMemory_fail_Object; + __nvoc_init_dataField_DisplayInstanceMemory(pThis, pRmhalspecowner); + + status = __nvoc_instmemConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_DisplayInstanceMemory_fail__init; + goto __nvoc_ctor_DisplayInstanceMemory_exit; // Success + +__nvoc_ctor_DisplayInstanceMemory_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_DisplayInstanceMemory_fail_Object: +__nvoc_ctor_DisplayInstanceMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_DisplayInstanceMemory_1(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); + + // Hal function -- instmemGetSize + if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__instmemGetSize__ = &instmemGetSize_v03_00; + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__instmemGetSize__ = &instmemGetSize_f2d351; + } + + // Hal function -- instmemGetHashTableBaseAddr + if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__instmemGetHashTableBaseAddr__ = &instmemGetHashTableBaseAddr_v03_00; + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__instmemGetHashTableBaseAddr__ = &instmemGetHashTableBaseAddr_4a4dee; + } + + // Hal function -- instmemIsValid + if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__instmemIsValid__ = &instmemIsValid_v03_00; + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__instmemIsValid__ = &instmemIsValid_491d52; + } + + // Hal function -- instmemGenerateHashTableData + if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__instmemGenerateHashTableData__ = &instmemGenerateHashTableData_v03_00; + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__instmemGenerateHashTableData__ = &instmemGenerateHashTableData_4a4dee; + } + + // Hal function -- instmemHashFunc + if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__instmemHashFunc__ = &instmemHashFunc_v03_00; + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__instmemHashFunc__ = &instmemHashFunc_46f6a7; + } + + // Hal function -- instmemCommitContextDma + if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__instmemCommitContextDma__ = &instmemCommitContextDma_v03_00; + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__instmemCommitContextDma__ = &instmemCommitContextDma_46f6a7; + } + + // Hal function -- instmemUpdateContextDma + if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__instmemUpdateContextDma__ = &instmemUpdateContextDma_v03_00; + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__instmemUpdateContextDma__ = &instmemUpdateContextDma_46f6a7; + } +} + +void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_DisplayInstanceMemory_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_DisplayInstanceMemory(DisplayInstanceMemory *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DisplayInstanceMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_DisplayInstanceMemory(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_DisplayInstanceMemory(DisplayInstanceMemory **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + DisplayInstanceMemory *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DisplayInstanceMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DisplayInstanceMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DisplayInstanceMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DisplayInstanceMemory(pThis, pRmhalspecowner); + status = __nvoc_ctor_DisplayInstanceMemory(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_DisplayInstanceMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DisplayInstanceMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DisplayInstanceMemory(DisplayInstanceMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_DisplayInstanceMemory(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_disp_inst_mem_nvoc.h b/src/nvidia/generated/g_disp_inst_mem_nvoc.h new file mode 100644 index 000000000..88bab8eb4 --- /dev/null +++ b/src/nvidia/generated/g_disp_inst_mem_nvoc.h @@ -0,0 +1,341 @@ +#ifndef _G_DISP_INST_MEM_NVOC_H_ +#define _G_DISP_INST_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_disp_inst_mem_nvoc.h" + +#ifndef DISPLAY_INSTANCE_MEMORY_H +#define DISPLAY_INSTANCE_MEMORY_H + +/* ------------------------ Includes --------------------------------------- */ +#include "nvtypes.h" +#include "nvoc/utility.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu/mem_mgr/mem_desc.h" + +/* ------------------------ Forward Declaration ---------------------------- */ +typedef struct OBJEHEAP OBJEHEAP; +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + +struct ContextDma; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + + + +/* ------------------------ Macros & Defines ------------------------------- */ +#define KERNEL_DISPLAY_GET_INST_MEM(p) ((p)->pInst) +#define DISP_INST_MEM_ALIGN 0x10000 + +/* ------------------------ Types definitions ------------------------------ */ +/*! + * A software hash table entry + */ +typedef struct +{ + struct ContextDma *pContextDma; + struct DispChannel *pDispChannel; +} SW_HASH_TABLE_ENTRY; + +#ifdef NVOC_DISP_INST_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DisplayInstanceMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct DisplayInstanceMemory *__nvoc_pbase_DisplayInstanceMemory; + void (*__instmemGetSize__)(OBJGPU *, struct DisplayInstanceMemory *, NvU32 *, NvU32 *); + NvU32 (*__instmemGetHashTableBaseAddr__)(OBJGPU *, struct DisplayInstanceMemory *); + NvBool (*__instmemIsValid__)(OBJGPU *, struct DisplayInstanceMemory *, NvU32); + NvU32 (*__instmemGenerateHashTableData__)(OBJGPU *, struct DisplayInstanceMemory *, NvU32, NvU32, NvU32); + NV_STATUS (*__instmemHashFunc__)(OBJGPU *, struct DisplayInstanceMemory *, NvHandle, NvHandle, NvU32, NvU32 *); + NV_STATUS (*__instmemCommitContextDma__)(OBJGPU *, struct DisplayInstanceMemory *, struct ContextDma *); + NV_STATUS (*__instmemUpdateContextDma__)(OBJGPU *, struct DisplayInstanceMemory *, struct ContextDma *, NvU64 *, NvU64 *, NvHandle, NvU32); + NV_ADDRESS_SPACE instMemAddrSpace; + NvU32 instMemAttr; + NvU64 instMemBase; + NvU32 instMemSize; + MEMORY_DESCRIPTOR *pAllocedInstMemDesc; + MEMORY_DESCRIPTOR *pInstMemDesc; + void *pInstMem; + NvU32 nHashTableEntries; + NvU32 hashTableBaseAddr; + SW_HASH_TABLE_ENTRY *pHashTable; + OBJEHEAP *pInstHeap; +}; + +#ifndef __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +#define __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +typedef struct DisplayInstanceMemory DisplayInstanceMemory; +#endif /* __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DisplayInstanceMemory +#define __nvoc_class_id_DisplayInstanceMemory 0x8223e2 +#endif /* __nvoc_class_id_DisplayInstanceMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayInstanceMemory; + +#define __staticCast_DisplayInstanceMemory(pThis) \ + ((pThis)->__nvoc_pbase_DisplayInstanceMemory) + +#ifdef __nvoc_disp_inst_mem_h_disabled +#define __dynamicCast_DisplayInstanceMemory(pThis) ((DisplayInstanceMemory*)NULL) +#else //__nvoc_disp_inst_mem_h_disabled +#define __dynamicCast_DisplayInstanceMemory(pThis) \ + ((DisplayInstanceMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DisplayInstanceMemory))) +#endif //__nvoc_disp_inst_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DisplayInstanceMemory(DisplayInstanceMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DisplayInstanceMemory(DisplayInstanceMemory**, Dynamic*, NvU32); +#define __objCreate_DisplayInstanceMemory(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_DisplayInstanceMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define instmemGetSize(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) instmemGetSize_DISPATCH(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) +#define instmemGetSize_HAL(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) instmemGetSize_DISPATCH(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize) +#define instmemGetHashTableBaseAddr(pGpu, pInstMem) instmemGetHashTableBaseAddr_DISPATCH(pGpu, pInstMem) +#define instmemGetHashTableBaseAddr_HAL(pGpu, pInstMem) instmemGetHashTableBaseAddr_DISPATCH(pGpu, pInstMem) +#define instmemIsValid(pGpu, pInstMem, offset) instmemIsValid_DISPATCH(pGpu, pInstMem, offset) +#define instmemIsValid_HAL(pGpu, pInstMem, offset) instmemIsValid_DISPATCH(pGpu, pInstMem, offset) +#define instmemGenerateHashTableData(pGpu, pInstMem, hClient, offset, dispChannelNum) instmemGenerateHashTableData_DISPATCH(pGpu, pInstMem, hClient, offset, dispChannelNum) +#define instmemGenerateHashTableData_HAL(pGpu, pInstMem, hClient, offset, dispChannelNum) instmemGenerateHashTableData_DISPATCH(pGpu, pInstMem, hClient, offset, dispChannelNum) +#define instmemHashFunc(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) instmemHashFunc_DISPATCH(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) +#define instmemHashFunc_HAL(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) instmemHashFunc_DISPATCH(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result) +#define instmemCommitContextDma(pGpu, pInstMem, pContextDma) instmemCommitContextDma_DISPATCH(pGpu, pInstMem, pContextDma) +#define instmemCommitContextDma_HAL(pGpu, pInstMem, pContextDma) instmemCommitContextDma_DISPATCH(pGpu, pInstMem, pContextDma) +#define instmemUpdateContextDma(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) instmemUpdateContextDma_DISPATCH(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) +#define instmemUpdateContextDma_HAL(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) instmemUpdateContextDma_DISPATCH(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo) +static inline void instmemDecommitContextDma_b3696a(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + return; +} + +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemDecommitContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemDecommitContextDma(pGpu, pInstMem, pContextDma) instmemDecommitContextDma_b3696a(pGpu, pInstMem, pContextDma) +#endif //__nvoc_disp_inst_mem_h_disabled + +#define instmemDecommitContextDma_HAL(pGpu, pInstMem, pContextDma) instmemDecommitContextDma(pGpu, pInstMem, pContextDma) + +void instmemGetSize_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *pTotalInstMemSize, NvU32 *pHashTableSize); + +static inline void instmemGetSize_f2d351(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *pTotalInstMemSize, NvU32 *pHashTableSize) { + NV_ASSERT_PRECOMP(0); +} + +static inline void instmemGetSize_DISPATCH(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *pTotalInstMemSize, NvU32 *pHashTableSize) { + pInstMem->__instmemGetSize__(pGpu, pInstMem, pTotalInstMemSize, pHashTableSize); +} + +NvU32 instmemGetHashTableBaseAddr_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem); + +static inline NvU32 instmemGetHashTableBaseAddr_4a4dee(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) { + return 0; +} + +static inline NvU32 instmemGetHashTableBaseAddr_DISPATCH(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) { + return pInstMem->__instmemGetHashTableBaseAddr__(pGpu, pInstMem); +} + +NvBool instmemIsValid_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset); + +static inline NvBool instmemIsValid_491d52(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool instmemIsValid_DISPATCH(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset) { + return pInstMem->__instmemIsValid__(pGpu, pInstMem, offset); +} + +NvU32 instmemGenerateHashTableData_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 hClient, NvU32 offset, NvU32 dispChannelNum); + +static inline NvU32 instmemGenerateHashTableData_4a4dee(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 hClient, NvU32 offset, NvU32 dispChannelNum) { + return 0; +} + +static inline NvU32 instmemGenerateHashTableData_DISPATCH(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 hClient, NvU32 offset, NvU32 dispChannelNum) { + return pInstMem->__instmemGenerateHashTableData__(pGpu, pInstMem, hClient, offset, dispChannelNum); +} + +NV_STATUS instmemHashFunc_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvHandle hClient, NvHandle hContextDma, NvU32 dispChannelNum, NvU32 *result); + +static inline NV_STATUS instmemHashFunc_46f6a7(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvHandle hClient, NvHandle hContextDma, NvU32 dispChannelNum, NvU32 *result) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS instmemHashFunc_DISPATCH(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvHandle hClient, NvHandle hContextDma, NvU32 dispChannelNum, NvU32 *result) { + return pInstMem->__instmemHashFunc__(pGpu, pInstMem, hClient, hContextDma, dispChannelNum, result); +} + +NV_STATUS instmemCommitContextDma_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma); + +static inline NV_STATUS instmemCommitContextDma_46f6a7(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS instmemCommitContextDma_DISPATCH(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma) { + return pInstMem->__instmemCommitContextDma__(pGpu, pInstMem, pContextDma); +} + +NV_STATUS instmemUpdateContextDma_v03_00(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, NvU64 *pNewAddress, NvU64 *pNewLimit, NvHandle hMemory, NvU32 comprInfo); + +static inline NV_STATUS instmemUpdateContextDma_46f6a7(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, NvU64 *pNewAddress, NvU64 *pNewLimit, NvHandle hMemory, NvU32 comprInfo) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS instmemUpdateContextDma_DISPATCH(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, NvU64 *pNewAddress, NvU64 *pNewLimit, NvHandle hMemory, NvU32 comprInfo) { + return pInstMem->__instmemUpdateContextDma__(pGpu, pInstMem, pContextDma, pNewAddress, pNewLimit, hMemory, comprInfo); +} + +NV_STATUS instmemConstruct_IMPL(struct DisplayInstanceMemory *arg_pInstMem); +#define __nvoc_instmemConstruct(arg_pInstMem) instmemConstruct_IMPL(arg_pInstMem) +void instmemDestruct_IMPL(struct DisplayInstanceMemory *pInstMem); +#define __nvoc_instmemDestruct(pInstMem) instmemDestruct_IMPL(pInstMem) +NV_STATUS instmemStateInitLocked_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemStateInitLocked(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateInitLocked(pGpu, pInstMem) instmemStateInitLocked_IMPL(pGpu, pInstMem) +#endif //__nvoc_disp_inst_mem_h_disabled + +void instmemStateDestroy_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemStateDestroy(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateDestroy(pGpu, pInstMem) instmemStateDestroy_IMPL(pGpu, pInstMem) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemStateLoad_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemStateLoad(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateLoad(pGpu, pInstMem, flags) instmemStateLoad_IMPL(pGpu, pInstMem, flags) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemStateUnload_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemStateUnload(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemStateUnload(pGpu, pInstMem, flags) instmemStateUnload_IMPL(pGpu, pInstMem, flags) +#endif //__nvoc_disp_inst_mem_h_disabled + +void instmemSetMemory_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NV_ADDRESS_SPACE dispInstMemAddrSpace, NvU32 dispInstMemAttr, NvU64 dispInstMemBase, NvU32 dispInstMemSize); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline void instmemSetMemory(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NV_ADDRESS_SPACE dispInstMemAddrSpace, NvU32 dispInstMemAttr, NvU64 dispInstMemBase, NvU32 dispInstMemSize) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemSetMemory(pGpu, pInstMem, dispInstMemAddrSpace, dispInstMemAttr, dispInstMemBase, dispInstMemSize) instmemSetMemory_IMPL(pGpu, pInstMem, dispInstMemAddrSpace, dispInstMemAttr, dispInstMemBase, dispInstMemSize) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemBindContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemBindContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemBindContextDma(pGpu, pInstMem, pContextDma, pDispChannel) instmemBindContextDma_IMPL(pGpu, pInstMem, pContextDma, pDispChannel) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemUnbindContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemUnbindContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, struct ContextDma *pContextDma, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemUnbindContextDma(pGpu, pInstMem, pContextDma, pDispChannel) instmemUnbindContextDma_IMPL(pGpu, pInstMem, pContextDma, pDispChannel) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemReserveContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *offset); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemReserveContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 *offset) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemReserveContextDma(pGpu, pInstMem, offset) instmemReserveContextDma_IMPL(pGpu, pInstMem, offset) +#endif //__nvoc_disp_inst_mem_h_disabled + +NV_STATUS instmemFreeContextDma_IMPL(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset); +#ifdef __nvoc_disp_inst_mem_h_disabled +static inline NV_STATUS instmemFreeContextDma(OBJGPU *pGpu, struct DisplayInstanceMemory *pInstMem, NvU32 offset) { + NV_ASSERT_FAILED_PRECOMP("DisplayInstanceMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_inst_mem_h_disabled +#define instmemFreeContextDma(pGpu, pInstMem, offset) instmemFreeContextDma_IMPL(pGpu, pInstMem, offset) +#endif //__nvoc_disp_inst_mem_h_disabled + +#undef PRIVATE_FIELD + + +#endif // DISPLAY_INSTANCE_MEMORY_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISP_INST_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_disp_objs_nvoc.c b/src/nvidia/generated/g_disp_objs_nvoc.c new file mode 100644 index 000000000..aedf5c9b2 --- /dev/null +++ b/src/nvidia/generated/g_disp_objs_nvoc.c @@ -0,0 +1,4562 @@ +#define NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_objs_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xe9980c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_DisplayApi(DisplayApi*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DisplayApi(DisplayApi*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DisplayApi(DisplayApi*, RmHalspecOwner* ); +void __nvoc_dtor_DisplayApi(DisplayApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DisplayApi; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_DisplayApi = { + /*pClassDef=*/ &__nvoc_class_def_DisplayApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DisplayApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DisplayApi_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DisplayApi, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DisplayApi = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_DisplayApi_DisplayApi, + &__nvoc_rtti_DisplayApi_Notifier, + &__nvoc_rtti_DisplayApi_INotifier, + &__nvoc_rtti_DisplayApi_RmResource, + &__nvoc_rtti_DisplayApi_RmResourceCommon, + &__nvoc_rtti_DisplayApi_RsResource, + &__nvoc_rtti_DisplayApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DisplayApi), + /*classId=*/ classId(DisplayApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DisplayApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DisplayApi, + /*pCastInfo=*/ &__nvoc_castinfo_DisplayApi, + /*pExportInfo=*/ &__nvoc_export_info_DisplayApi +}; + +static NV_STATUS __nvoc_thunk_DisplayApi_resControl(struct RsResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *)pDisplayApi) - __nvoc_rtti_DisplayApi_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_rmresControl_Prologue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *)pDisplayApi) - __nvoc_rtti_DisplayApi_RmResource.offset), pCallContext, pRsParams); +} + +static void __nvoc_thunk_DisplayApi_rmresControl_Epilogue(struct RmResource *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *)pDisplayApi) - __nvoc_rtti_DisplayApi_RmResource.offset), pCallContext, pRsParams); +} + +static NvBool __nvoc_thunk_RmResource_dispapiShareCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispapiCheckMemInterUnmap(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DisplayApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_RmResource_dispapiAccessCallback(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispapiGetMemInterMapParams(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DisplayApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispapiGetMemoryMappingDescriptor(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DisplayApi_RmResource.offset), ppMemDesc); +} + +static void __nvoc_thunk_Notifier_dispapiSetNotificationShare(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DisplayApi_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiControlFilter(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_dispapiGetRefCount(struct DisplayApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispapiUnregisterEvent(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DisplayApi_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiUnmap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pCallContext, pCpuMapping); +} + +static NvBool __nvoc_thunk_RsResource_dispapiCanCopy(struct DisplayApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiMapTo(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_dispapiAddAdditionalDependants(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_dispapiPreDestruct(struct DisplayApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiUnmapFrom(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispapiGetNotificationListPtr(struct DisplayApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DisplayApi_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispapiGetNotificationShare(struct DisplayApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DisplayApi_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiControlLookup(struct DisplayApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispapiMap(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DisplayApi_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispapiGetOrAllocNotifShare(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DisplayApi_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DisplayApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_DisplayApi(DisplayApi *pThis) { + __nvoc_dispapiDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DisplayApi_fail_RmResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_DisplayApi_fail_Notifier; + __nvoc_init_dataField_DisplayApi(pThis, pRmhalspecowner); + + status = __nvoc_dispapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DisplayApi_fail__init; + goto __nvoc_ctor_DisplayApi_exit; // Success + +__nvoc_ctor_DisplayApi_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_DisplayApi_fail_Notifier: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_DisplayApi_fail_RmResource: +__nvoc_ctor_DisplayApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_DisplayApi_1(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__dispapiControl__ = &dispapiControl_IMPL; + + pThis->__dispapiControl_Prologue__ = &dispapiControl_Prologue_IMPL; + + pThis->__dispapiControl_Epilogue__ = &dispapiControl_Epilogue_IMPL; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resControl__ = &__nvoc_thunk_DisplayApi_resControl; + + pThis->__nvoc_base_RmResource.__rmresControl_Prologue__ = &__nvoc_thunk_DisplayApi_rmresControl_Prologue; + + pThis->__nvoc_base_RmResource.__rmresControl_Epilogue__ = &__nvoc_thunk_DisplayApi_rmresControl_Epilogue; + + pThis->__dispapiShareCallback__ = &__nvoc_thunk_RmResource_dispapiShareCallback; + + pThis->__dispapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispapiCheckMemInterUnmap; + + pThis->__dispapiAccessCallback__ = &__nvoc_thunk_RmResource_dispapiAccessCallback; + + pThis->__dispapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispapiGetMemInterMapParams; + + pThis->__dispapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispapiGetMemoryMappingDescriptor; + + pThis->__dispapiSetNotificationShare__ = &__nvoc_thunk_Notifier_dispapiSetNotificationShare; + + pThis->__dispapiControlFilter__ = &__nvoc_thunk_RsResource_dispapiControlFilter; + + pThis->__dispapiGetRefCount__ = &__nvoc_thunk_RsResource_dispapiGetRefCount; + + pThis->__dispapiUnregisterEvent__ = &__nvoc_thunk_Notifier_dispapiUnregisterEvent; + + pThis->__dispapiUnmap__ = &__nvoc_thunk_RsResource_dispapiUnmap; + + pThis->__dispapiCanCopy__ = &__nvoc_thunk_RsResource_dispapiCanCopy; + + pThis->__dispapiMapTo__ = &__nvoc_thunk_RsResource_dispapiMapTo; + + pThis->__dispapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispapiAddAdditionalDependants; + + pThis->__dispapiPreDestruct__ = &__nvoc_thunk_RsResource_dispapiPreDestruct; + + pThis->__dispapiUnmapFrom__ = &__nvoc_thunk_RsResource_dispapiUnmapFrom; + + pThis->__dispapiGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispapiGetNotificationListPtr; + + pThis->__dispapiGetNotificationShare__ = &__nvoc_thunk_Notifier_dispapiGetNotificationShare; + + pThis->__dispapiControlLookup__ = &__nvoc_thunk_RsResource_dispapiControlLookup; + + pThis->__dispapiMap__ = &__nvoc_thunk_RsResource_dispapiMap; + + pThis->__dispapiGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispapiGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_DisplayApi_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_DisplayApi(DisplayApi *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DisplayApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_DisplayApi(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_DisplayApi(DisplayApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DisplayApi *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DisplayApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DisplayApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DisplayApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DisplayApi(pThis, pRmhalspecowner); + status = __nvoc_ctor_DisplayApi(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DisplayApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DisplayApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DisplayApi(DisplayApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DisplayApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x999839 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +void __nvoc_init_DispObject(DispObject*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DispObject(DispObject*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_DispObject(DispObject*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispObject(DispObject*, RmHalspecOwner* ); +void __nvoc_dtor_DispObject(DispObject*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispObject; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_DispObject = { + /*pClassDef=*/ &__nvoc_class_def_DispObject, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispObject, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispObject_DisplayApi = { + /*pClassDef=*/ &__nvoc_class_def_DisplayApi, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispObject, __nvoc_base_DisplayApi), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispObject = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_DispObject_DispObject, + &__nvoc_rtti_DispObject_DisplayApi, + &__nvoc_rtti_DispObject_Notifier, + &__nvoc_rtti_DispObject_INotifier, + &__nvoc_rtti_DispObject_RmResource, + &__nvoc_rtti_DispObject_RmResourceCommon, + &__nvoc_rtti_DispObject_RsResource, + &__nvoc_rtti_DispObject_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispObject), + /*classId=*/ classId(DispObject), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispObject", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispObject, + /*pCastInfo=*/ &__nvoc_castinfo_DispObject, + /*pExportInfo=*/ &__nvoc_export_info_DispObject +}; + +static NvBool __nvoc_thunk_RmResource_dispobjShareCallback(struct DispObject *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispobjControl(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispObject_DisplayApi.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RmResource_dispobjAccessCallback(struct DispObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispobjGetMemInterMapParams(struct DispObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispObject_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispobjGetMemoryMappingDescriptor(struct DispObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispObject_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispobjCheckMemInterUnmap(struct DispObject *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispObject_RmResource.offset), bSubdeviceHandleProvided); +} + +static void __nvoc_thunk_Notifier_dispobjSetNotificationShare(struct DispObject *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispObject_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjControlFilter(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_dispobjGetRefCount(struct DispObject *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispobjUnregisterEvent(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispObject_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjUnmap(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispobjControl_Prologue(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispObject_DisplayApi.offset), pCallContext, pRsParams); +} + +static NvBool __nvoc_thunk_RsResource_dispobjCanCopy(struct DispObject *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjMapTo(struct DispObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_dispobjAddAdditionalDependants(struct RsClient *pClient, struct DispObject *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_dispobjPreDestruct(struct DispObject *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjUnmapFrom(struct DispObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispobjGetNotificationListPtr(struct DispObject *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispObject_Notifier.offset)); +} + +static void __nvoc_thunk_DisplayApi_dispobjControl_Epilogue(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispObject_DisplayApi.offset), pCallContext, pRsParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispobjGetNotificationShare(struct DispObject *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispObject_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjControlLookup(struct DispObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispobjMap(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispObject_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispobjGetOrAllocNotifShare(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispObject_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispObject[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetPinsetCount_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700115u, + /*paramSize=*/ sizeof(NV5070_CTRL_GET_PINSET_COUNT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetPinsetCount" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetPinsetPeer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700116u, + /*paramSize=*/ sizeof(NV5070_CTRL_GET_PINSET_PEER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetPinsetPeer" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetRmFreeFlags_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700117u, + /*paramSize=*/ sizeof(NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetRmFreeFlags" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdIMPSetGetParameter_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700118u, + /*paramSize=*/ sizeof(NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdIMPSetGetParameter" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetMempoolWARForBlitTearing_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700119u, + /*paramSize=*/ sizeof(NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetMempoolWARForBlitTearing" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700202u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgStatus" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgUnderflowProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700203u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgUnderflowProp" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetRgUnderflowProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700204u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetRgUnderflowProp" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgFliplockProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700205u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgFliplockProp" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetRgFliplockProp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700206u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetRgFliplockProp" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgConnectedLockpin_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700207u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgConnectedLockpin" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgConnectedLockpinStateless_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x5070020au, + /*paramSize=*/ sizeof(NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgConnectedLockpinStateless" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetPinsetLockpins_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x5070020bu, + /*paramSize=*/ sizeof(NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetPinsetLockpins" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetRgScanLine_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x5070020cu, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetRgScanLine" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetFrameLockHeaderLockPins_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x5070020du, + /*paramSize=*/ sizeof(NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetFrameLockHeaderLockPins" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetSorSeqCtl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700301u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetSorSeqCtl" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetSorSeqCtl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700302u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetSorSeqCtl" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetPiorSeqCtl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700303u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetPiorSeqCtl" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetPiorSeqCtl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700304u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetPiorSeqCtl" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSeqProgSpeed_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700305u, + /*paramSize=*/ sizeof(NV5070_CTRL_SEQ_PROG_SPEED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSeqProgSpeed" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetSorPwm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700420u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetSorPwm" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetSorPwm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700421u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetSorPwm" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetSorOpMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700422u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetSorOpMode" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetSorOpMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700423u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetSorOpMode" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdGetPiorOpMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700430u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdGetPiorOpMode" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetPiorOpMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700431u, + /*paramSize=*/ sizeof(NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetPiorOpMode" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSetSorFlushMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700457u, + /*paramSize=*/ sizeof(NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSetSorFlushMode" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdSystemGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700709u, + /*paramSize=*/ sizeof(NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdSystemGetCapsV2" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdEventSetTrigger_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700902u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdEventSetTrigger" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispobjCtrlCmdEventSetMemoryNotifies_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x50700903u, + /*paramSize=*/ sizeof(NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispobjCtrlCmdEventSetMemoryNotifies" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispObject = +{ + /*numEntries=*/ 30, + /*pExportEntries=*/ __nvoc_exported_method_def_DispObject +}; + +void __nvoc_dtor_DisplayApi(DisplayApi*); +void __nvoc_dtor_DispObject(DispObject *pThis) { + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispObject_fail_DisplayApi; + __nvoc_init_dataField_DispObject(pThis, pRmhalspecowner); + + status = __nvoc_dispobjConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispObject_fail__init; + goto __nvoc_ctor_DispObject_exit; // Success + +__nvoc_ctor_DispObject_fail__init: + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); +__nvoc_ctor_DispObject_fail_DisplayApi: +__nvoc_ctor_DispObject_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispObject_1(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetPinsetCount__ = &dispobjCtrlCmdGetPinsetCount_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetPinsetPeer__ = &dispobjCtrlCmdGetPinsetPeer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSetMempoolWARForBlitTearing__ = &dispobjCtrlCmdSetMempoolWARForBlitTearing_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetPinsetLockpins__ = &dispobjCtrlCmdGetPinsetLockpins_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispobjCtrlCmdGetFrameLockHeaderLockPins__ = &dispobjCtrlCmdGetFrameLockHeaderLockPins_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetPiorSeqCtl__ = &dispobjCtrlCmdGetPiorSeqCtl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSetPiorSeqCtl__ = &dispobjCtrlCmdSetPiorSeqCtl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetPiorOpMode__ = &dispobjCtrlCmdGetPiorOpMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSetPiorOpMode__ = &dispobjCtrlCmdSetPiorOpMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__dispobjCtrlCmdEventSetMemoryNotifies__ = &dispobjCtrlCmdEventSetMemoryNotifies_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispobjCtrlCmdSetRmFreeFlags__ = &dispobjCtrlCmdSetRmFreeFlags_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdIMPSetGetParameter__ = &dispobjCtrlCmdIMPSetGetParameter_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispobjCtrlCmdGetRgStatus__ = &dispobjCtrlCmdGetRgStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetRgUnderflowProp__ = &dispobjCtrlCmdGetRgUnderflowProp_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSetRgUnderflowProp__ = &dispobjCtrlCmdSetRgUnderflowProp_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetRgFliplockProp__ = &dispobjCtrlCmdGetRgFliplockProp_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispobjCtrlCmdSetRgFliplockProp__ = &dispobjCtrlCmdSetRgFliplockProp_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetRgConnectedLockpin__ = &dispobjCtrlCmdGetRgConnectedLockpin_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispobjCtrlCmdGetRgConnectedLockpinStateless__ = &dispobjCtrlCmdGetRgConnectedLockpinStateless_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetRgScanLine__ = &dispobjCtrlCmdGetRgScanLine_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetSorSeqCtl__ = &dispobjCtrlCmdGetSorSeqCtl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSetSorSeqCtl__ = &dispobjCtrlCmdSetSorSeqCtl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSeqProgSpeed__ = &dispobjCtrlCmdSeqProgSpeed_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetSorPwm__ = &dispobjCtrlCmdGetSorPwm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSetSorPwm__ = &dispobjCtrlCmdSetSorPwm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdGetSorOpMode__ = &dispobjCtrlCmdGetSorOpMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSetSorOpMode__ = &dispobjCtrlCmdSetSorOpMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSetSorFlushMode__ = &dispobjCtrlCmdSetSorFlushMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispobjCtrlCmdSystemGetCapsV2__ = &dispobjCtrlCmdSystemGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__dispobjCtrlCmdEventSetTrigger__ = &dispobjCtrlCmdEventSetTrigger_IMPL; +#endif + + pThis->__dispobjShareCallback__ = &__nvoc_thunk_RmResource_dispobjShareCallback; + + pThis->__dispobjControl__ = &__nvoc_thunk_DisplayApi_dispobjControl; + + pThis->__dispobjAccessCallback__ = &__nvoc_thunk_RmResource_dispobjAccessCallback; + + pThis->__dispobjGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispobjGetMemInterMapParams; + + pThis->__dispobjGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispobjGetMemoryMappingDescriptor; + + pThis->__dispobjCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispobjCheckMemInterUnmap; + + pThis->__dispobjSetNotificationShare__ = &__nvoc_thunk_Notifier_dispobjSetNotificationShare; + + pThis->__dispobjControlFilter__ = &__nvoc_thunk_RsResource_dispobjControlFilter; + + pThis->__dispobjGetRefCount__ = &__nvoc_thunk_RsResource_dispobjGetRefCount; + + pThis->__dispobjUnregisterEvent__ = &__nvoc_thunk_Notifier_dispobjUnregisterEvent; + + pThis->__dispobjUnmap__ = &__nvoc_thunk_RsResource_dispobjUnmap; + + pThis->__dispobjControl_Prologue__ = &__nvoc_thunk_DisplayApi_dispobjControl_Prologue; + + pThis->__dispobjCanCopy__ = &__nvoc_thunk_RsResource_dispobjCanCopy; + + pThis->__dispobjMapTo__ = &__nvoc_thunk_RsResource_dispobjMapTo; + + pThis->__dispobjAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispobjAddAdditionalDependants; + + pThis->__dispobjPreDestruct__ = &__nvoc_thunk_RsResource_dispobjPreDestruct; + + pThis->__dispobjUnmapFrom__ = &__nvoc_thunk_RsResource_dispobjUnmapFrom; + + pThis->__dispobjGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispobjGetNotificationListPtr; + + pThis->__dispobjControl_Epilogue__ = &__nvoc_thunk_DisplayApi_dispobjControl_Epilogue; + + pThis->__dispobjGetNotificationShare__ = &__nvoc_thunk_Notifier_dispobjGetNotificationShare; + + pThis->__dispobjControlLookup__ = &__nvoc_thunk_RsResource_dispobjControlLookup; + + pThis->__dispobjMap__ = &__nvoc_thunk_RsResource_dispobjMap; + + pThis->__dispobjGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispobjGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_DispObject_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_DisplayApi(DisplayApi*, RmHalspecOwner* ); +void __nvoc_init_DispObject(DispObject *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DispObject = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DisplayApi; + __nvoc_init_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner); + __nvoc_init_funcTable_DispObject(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_DispObject(DispObject **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispObject *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DispObject)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispObject)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispObject); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DispObject(pThis, pRmhalspecowner); + status = __nvoc_ctor_DispObject(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispObject_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispObject_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispObject(DispObject **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispObject(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x36aa0b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvDispApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject; + +void __nvoc_init_NvDispApi(NvDispApi*, RmHalspecOwner* ); +void __nvoc_init_funcTable_NvDispApi(NvDispApi*); +NV_STATUS __nvoc_ctor_NvDispApi(NvDispApi*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_NvDispApi(NvDispApi*); +void __nvoc_dtor_NvDispApi(NvDispApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NvDispApi; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_NvDispApi = { + /*pClassDef=*/ &__nvoc_class_def_NvDispApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NvDispApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_DisplayApi = { + /*pClassDef=*/ &__nvoc_class_def_DisplayApi, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject.__nvoc_base_DisplayApi), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDispApi_DispObject = { + /*pClassDef=*/ &__nvoc_class_def_DispObject, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDispApi, __nvoc_base_DispObject), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_NvDispApi = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_NvDispApi_NvDispApi, + &__nvoc_rtti_NvDispApi_DispObject, + &__nvoc_rtti_NvDispApi_DisplayApi, + &__nvoc_rtti_NvDispApi_Notifier, + &__nvoc_rtti_NvDispApi_INotifier, + &__nvoc_rtti_NvDispApi_RmResource, + &__nvoc_rtti_NvDispApi_RmResourceCommon, + &__nvoc_rtti_NvDispApi_RsResource, + &__nvoc_rtti_NvDispApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_NvDispApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(NvDispApi), + /*classId=*/ classId(NvDispApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "NvDispApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NvDispApi, + /*pCastInfo=*/ &__nvoc_castinfo_NvDispApi, + /*pExportInfo=*/ &__nvoc_export_info_NvDispApi +}; + +static NvBool __nvoc_thunk_RmResource_nvdispapiShareCallback(struct NvDispApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_nvdispapiControl(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_NvDispApi_DisplayApi.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RmResource_nvdispapiAccessCallback(struct NvDispApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvdispapiGetMemInterMapParams(struct NvDispApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_NvDispApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvdispapiGetMemoryMappingDescriptor(struct NvDispApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_NvDispApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvdispapiCheckMemInterUnmap(struct NvDispApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_NvDispApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static void __nvoc_thunk_Notifier_nvdispapiSetNotificationShare(struct NvDispApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvDispApi_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiControlFilter(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_nvdispapiGetRefCount(struct NvDispApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_nvdispapiUnregisterEvent(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvDispApi_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiUnmap(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_nvdispapiControl_Prologue(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_NvDispApi_DisplayApi.offset), pCallContext, pRsParams); +} + +static NvBool __nvoc_thunk_RsResource_nvdispapiCanCopy(struct NvDispApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiMapTo(struct NvDispApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_nvdispapiAddAdditionalDependants(struct RsClient *pClient, struct NvDispApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_nvdispapiPreDestruct(struct NvDispApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiUnmapFrom(struct NvDispApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_nvdispapiGetNotificationListPtr(struct NvDispApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvDispApi_Notifier.offset)); +} + +static void __nvoc_thunk_DisplayApi_nvdispapiControl_Epilogue(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_NvDispApi_DisplayApi.offset), pCallContext, pRsParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_nvdispapiGetNotificationShare(struct NvDispApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvDispApi_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiControlLookup(struct NvDispApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdispapiMap(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvDispApi_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_nvdispapiGetOrAllocNotifShare(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvDispApi_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_NvDispApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdIdleChannel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700101u, + /*paramSize=*/ sizeof(NVC370_CTRL_IDLE_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdIdleChannel" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdSetAccl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700102u, + /*paramSize=*/ sizeof(NVC370_CTRL_SET_ACCL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdSetAccl" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdGetAccl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700103u, + /*paramSize=*/ sizeof(NVC370_CTRL_GET_ACCL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdGetAccl" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdGetChannelInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700104u, + /*paramSize=*/ sizeof(NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdGetChannelInfo" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdGetLockpinsCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700201u, + /*paramSize=*/ sizeof(NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdGetLockpinsCaps" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdSetSwaprdyGpioWar_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700202u, + /*paramSize=*/ sizeof(NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdSetSwaprdyGpioWar" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3700602u, + /*paramSize=*/ sizeof(NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_NvDispApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_NvDispApi = +{ + /*numEntries=*/ 7, + /*pExportEntries=*/ __nvoc_exported_method_def_NvDispApi +}; + +void __nvoc_dtor_DispObject(DispObject*); +void __nvoc_dtor_NvDispApi(NvDispApi *pThis) { + __nvoc_dtor_DispObject(&pThis->__nvoc_base_DispObject); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_NvDispApi(NvDispApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DispObject(DispObject* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_NvDispApi(NvDispApi *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DispObject(&pThis->__nvoc_base_DispObject, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_NvDispApi_fail_DispObject; + __nvoc_init_dataField_NvDispApi(pThis); + + status = __nvoc_nvdispapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_NvDispApi_fail__init; + goto __nvoc_ctor_NvDispApi_exit; // Success + +__nvoc_ctor_NvDispApi_fail__init: + __nvoc_dtor_DispObject(&pThis->__nvoc_base_DispObject); +__nvoc_ctor_NvDispApi_fail_DispObject: +__nvoc_ctor_NvDispApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_NvDispApi_1(NvDispApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__nvdispapiCtrlCmdIdleChannel__ = &nvdispapiCtrlCmdIdleChannel_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__nvdispapiCtrlCmdSetAccl__ = &nvdispapiCtrlCmdSetAccl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__nvdispapiCtrlCmdGetAccl__ = &nvdispapiCtrlCmdGetAccl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__nvdispapiCtrlCmdGetChannelInfo__ = &nvdispapiCtrlCmdGetChannelInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__nvdispapiCtrlCmdSetSwaprdyGpioWar__ = &nvdispapiCtrlCmdSetSwaprdyGpioWar_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__nvdispapiCtrlCmdGetLockpinsCaps__ = &nvdispapiCtrlCmdGetLockpinsCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides__ = &nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_IMPL; +#endif + + pThis->__nvdispapiShareCallback__ = &__nvoc_thunk_RmResource_nvdispapiShareCallback; + + pThis->__nvdispapiControl__ = &__nvoc_thunk_DisplayApi_nvdispapiControl; + + pThis->__nvdispapiAccessCallback__ = &__nvoc_thunk_RmResource_nvdispapiAccessCallback; + + pThis->__nvdispapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_nvdispapiGetMemInterMapParams; + + pThis->__nvdispapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_nvdispapiGetMemoryMappingDescriptor; + + pThis->__nvdispapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_nvdispapiCheckMemInterUnmap; + + pThis->__nvdispapiSetNotificationShare__ = &__nvoc_thunk_Notifier_nvdispapiSetNotificationShare; + + pThis->__nvdispapiControlFilter__ = &__nvoc_thunk_RsResource_nvdispapiControlFilter; + + pThis->__nvdispapiGetRefCount__ = &__nvoc_thunk_RsResource_nvdispapiGetRefCount; + + pThis->__nvdispapiUnregisterEvent__ = &__nvoc_thunk_Notifier_nvdispapiUnregisterEvent; + + pThis->__nvdispapiUnmap__ = &__nvoc_thunk_RsResource_nvdispapiUnmap; + + pThis->__nvdispapiControl_Prologue__ = &__nvoc_thunk_DisplayApi_nvdispapiControl_Prologue; + + pThis->__nvdispapiCanCopy__ = &__nvoc_thunk_RsResource_nvdispapiCanCopy; + + pThis->__nvdispapiMapTo__ = &__nvoc_thunk_RsResource_nvdispapiMapTo; + + pThis->__nvdispapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_nvdispapiAddAdditionalDependants; + + pThis->__nvdispapiPreDestruct__ = &__nvoc_thunk_RsResource_nvdispapiPreDestruct; + + pThis->__nvdispapiUnmapFrom__ = &__nvoc_thunk_RsResource_nvdispapiUnmapFrom; + + pThis->__nvdispapiGetNotificationListPtr__ = &__nvoc_thunk_Notifier_nvdispapiGetNotificationListPtr; + + pThis->__nvdispapiControl_Epilogue__ = &__nvoc_thunk_DisplayApi_nvdispapiControl_Epilogue; + + pThis->__nvdispapiGetNotificationShare__ = &__nvoc_thunk_Notifier_nvdispapiGetNotificationShare; + + pThis->__nvdispapiControlLookup__ = &__nvoc_thunk_RsResource_nvdispapiControlLookup; + + pThis->__nvdispapiMap__ = &__nvoc_thunk_RsResource_nvdispapiMap; + + pThis->__nvdispapiGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_nvdispapiGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_NvDispApi(NvDispApi *pThis) { + __nvoc_init_funcTable_NvDispApi_1(pThis); +} + +void __nvoc_init_DispObject(DispObject*, RmHalspecOwner* ); +void __nvoc_init_NvDispApi(NvDispApi *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_NvDispApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi; + pThis->__nvoc_pbase_DispObject = &pThis->__nvoc_base_DispObject; + __nvoc_init_DispObject(&pThis->__nvoc_base_DispObject, pRmhalspecowner); + __nvoc_init_funcTable_NvDispApi(pThis); +} + +NV_STATUS __nvoc_objCreate_NvDispApi(NvDispApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + NvDispApi *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(NvDispApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(NvDispApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_NvDispApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DispObject.__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_NvDispApi(pThis, pRmhalspecowner); + status = __nvoc_ctor_NvDispApi(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_NvDispApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_NvDispApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_NvDispApi(NvDispApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_NvDispApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x6aa5e2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObj; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +void __nvoc_init_DispSwObj(DispSwObj*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DispSwObj(DispSwObj*); +NV_STATUS __nvoc_ctor_DispSwObj(DispSwObj*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispSwObj(DispSwObj*); +void __nvoc_dtor_DispSwObj(DispSwObj*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSwObj; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_DispSwObj = { + /*pClassDef=*/ &__nvoc_class_def_DispSwObj, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispSwObj, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObj_DisplayApi = { + /*pClassDef=*/ &__nvoc_class_def_DisplayApi, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObj, __nvoc_base_DisplayApi), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispSwObj = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_DispSwObj_DispSwObj, + &__nvoc_rtti_DispSwObj_DisplayApi, + &__nvoc_rtti_DispSwObj_Notifier, + &__nvoc_rtti_DispSwObj_INotifier, + &__nvoc_rtti_DispSwObj_RmResource, + &__nvoc_rtti_DispSwObj_RmResourceCommon, + &__nvoc_rtti_DispSwObj_RsResource, + &__nvoc_rtti_DispSwObj_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObj = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispSwObj), + /*classId=*/ classId(DispSwObj), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispSwObj", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispSwObj, + /*pCastInfo=*/ &__nvoc_castinfo_DispSwObj, + /*pExportInfo=*/ &__nvoc_export_info_DispSwObj +}; + +static NvBool __nvoc_thunk_RmResource_dispswobjShareCallback(struct DispSwObj *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispswobjControl(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispSwObj_DisplayApi.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RmResource_dispswobjAccessCallback(struct DispSwObj *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispswobjGetMemInterMapParams(struct DispSwObj *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSwObj_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispswobjGetMemoryMappingDescriptor(struct DispSwObj *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSwObj_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispswobjCheckMemInterUnmap(struct DispSwObj *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSwObj_RmResource.offset), bSubdeviceHandleProvided); +} + +static void __nvoc_thunk_Notifier_dispswobjSetNotificationShare(struct DispSwObj *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObj_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjControlFilter(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_dispswobjGetRefCount(struct DispSwObj *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispswobjUnregisterEvent(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObj_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjUnmap(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispswobjControl_Prologue(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispSwObj_DisplayApi.offset), pCallContext, pRsParams); +} + +static NvBool __nvoc_thunk_RsResource_dispswobjCanCopy(struct DispSwObj *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjMapTo(struct DispSwObj *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_dispswobjAddAdditionalDependants(struct RsClient *pClient, struct DispSwObj *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_dispswobjPreDestruct(struct DispSwObj *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjUnmapFrom(struct DispSwObj *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispswobjGetNotificationListPtr(struct DispSwObj *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObj_Notifier.offset)); +} + +static void __nvoc_thunk_DisplayApi_dispswobjControl_Epilogue(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispSwObj_DisplayApi.offset), pCallContext, pRsParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispswobjGetNotificationShare(struct DispSwObj *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObj_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjControlLookup(struct DispSwObj *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswobjMap(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObj_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispswobjGetOrAllocNotifShare(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObj_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispSwObj[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdIsModePossible_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720101u, + /*paramSize=*/ sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdIsModePossible" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdIsModePossibleOrSettings_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720102u, + /*paramSize=*/ sizeof(NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdIsModePossibleOrSettings" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdVideoAdaptiveRefreshRate_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720103u, + /*paramSize=*/ sizeof(NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdVideoAdaptiveRefreshRate" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswobjCtrlCmdGetActiveViewportPointIn_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*flags=*/ 0x211u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3720104u, + /*paramSize=*/ sizeof(NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObj.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswobjCtrlCmdGetActiveViewportPointIn" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSwObj = +{ + /*numEntries=*/ 4, + /*pExportEntries=*/ __nvoc_exported_method_def_DispSwObj +}; + +void __nvoc_dtor_DisplayApi(DisplayApi*); +void __nvoc_dtor_DispSwObj(DispSwObj *pThis) { + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispSwObj(DispSwObj *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispSwObj(DispSwObj *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSwObj_fail_DisplayApi; + __nvoc_init_dataField_DispSwObj(pThis); + + status = __nvoc_dispswobjConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSwObj_fail__init; + goto __nvoc_ctor_DispSwObj_exit; // Success + +__nvoc_ctor_DispSwObj_fail__init: + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); +__nvoc_ctor_DispSwObj_fail_DisplayApi: +__nvoc_ctor_DispSwObj_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispSwObj_1(DispSwObj *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispswobjCtrlCmdIsModePossible__ = &dispswobjCtrlCmdIsModePossible_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispswobjCtrlCmdIsModePossibleOrSettings__ = &dispswobjCtrlCmdIsModePossibleOrSettings_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispswobjCtrlCmdVideoAdaptiveRefreshRate__ = &dispswobjCtrlCmdVideoAdaptiveRefreshRate_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + pThis->__dispswobjCtrlCmdGetActiveViewportPointIn__ = &dispswobjCtrlCmdGetActiveViewportPointIn_IMPL; +#endif + + pThis->__dispswobjShareCallback__ = &__nvoc_thunk_RmResource_dispswobjShareCallback; + + pThis->__dispswobjControl__ = &__nvoc_thunk_DisplayApi_dispswobjControl; + + pThis->__dispswobjAccessCallback__ = &__nvoc_thunk_RmResource_dispswobjAccessCallback; + + pThis->__dispswobjGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispswobjGetMemInterMapParams; + + pThis->__dispswobjGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispswobjGetMemoryMappingDescriptor; + + pThis->__dispswobjCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispswobjCheckMemInterUnmap; + + pThis->__dispswobjSetNotificationShare__ = &__nvoc_thunk_Notifier_dispswobjSetNotificationShare; + + pThis->__dispswobjControlFilter__ = &__nvoc_thunk_RsResource_dispswobjControlFilter; + + pThis->__dispswobjGetRefCount__ = &__nvoc_thunk_RsResource_dispswobjGetRefCount; + + pThis->__dispswobjUnregisterEvent__ = &__nvoc_thunk_Notifier_dispswobjUnregisterEvent; + + pThis->__dispswobjUnmap__ = &__nvoc_thunk_RsResource_dispswobjUnmap; + + pThis->__dispswobjControl_Prologue__ = &__nvoc_thunk_DisplayApi_dispswobjControl_Prologue; + + pThis->__dispswobjCanCopy__ = &__nvoc_thunk_RsResource_dispswobjCanCopy; + + pThis->__dispswobjMapTo__ = &__nvoc_thunk_RsResource_dispswobjMapTo; + + pThis->__dispswobjAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispswobjAddAdditionalDependants; + + pThis->__dispswobjPreDestruct__ = &__nvoc_thunk_RsResource_dispswobjPreDestruct; + + pThis->__dispswobjUnmapFrom__ = &__nvoc_thunk_RsResource_dispswobjUnmapFrom; + + pThis->__dispswobjGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispswobjGetNotificationListPtr; + + pThis->__dispswobjControl_Epilogue__ = &__nvoc_thunk_DisplayApi_dispswobjControl_Epilogue; + + pThis->__dispswobjGetNotificationShare__ = &__nvoc_thunk_Notifier_dispswobjGetNotificationShare; + + pThis->__dispswobjControlLookup__ = &__nvoc_thunk_RsResource_dispswobjControlLookup; + + pThis->__dispswobjMap__ = &__nvoc_thunk_RsResource_dispswobjMap; + + pThis->__dispswobjGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispswobjGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_DispSwObj(DispSwObj *pThis) { + __nvoc_init_funcTable_DispSwObj_1(pThis); +} + +void __nvoc_init_DisplayApi(DisplayApi*, RmHalspecOwner* ); +void __nvoc_init_DispSwObj(DispSwObj *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DispSwObj = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DisplayApi; + __nvoc_init_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner); + __nvoc_init_funcTable_DispSwObj(pThis); +} + +NV_STATUS __nvoc_objCreate_DispSwObj(DispSwObj **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispSwObj *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DispSwObj)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispSwObj)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispSwObj); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DispSwObj(pThis, pRmhalspecowner); + status = __nvoc_ctor_DispSwObj(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispSwObj_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispSwObj_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispSwObj(DispSwObj **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispSwObj(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x41f4f2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +void __nvoc_init_DispCommon(DispCommon*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DispCommon(DispCommon*); +NV_STATUS __nvoc_ctor_DispCommon(DispCommon*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispCommon(DispCommon*); +void __nvoc_dtor_DispCommon(DispCommon*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCommon; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_DispCommon = { + /*pClassDef=*/ &__nvoc_class_def_DispCommon, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispCommon, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispCommon_DisplayApi = { + /*pClassDef=*/ &__nvoc_class_def_DisplayApi, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispCommon, __nvoc_base_DisplayApi), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispCommon = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_DispCommon_DispCommon, + &__nvoc_rtti_DispCommon_DisplayApi, + &__nvoc_rtti_DispCommon_Notifier, + &__nvoc_rtti_DispCommon_INotifier, + &__nvoc_rtti_DispCommon_RmResource, + &__nvoc_rtti_DispCommon_RmResourceCommon, + &__nvoc_rtti_DispCommon_RsResource, + &__nvoc_rtti_DispCommon_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispCommon = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispCommon), + /*classId=*/ classId(DispCommon), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispCommon", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispCommon, + /*pCastInfo=*/ &__nvoc_castinfo_DispCommon, + /*pExportInfo=*/ &__nvoc_export_info_DispCommon +}; + +static NvBool __nvoc_thunk_RmResource_dispcmnShareCallback(struct DispCommon *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispcmnControl(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return dispapiControl((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispCommon_DisplayApi.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RmResource_dispcmnAccessCallback(struct DispCommon *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcmnGetMemInterMapParams(struct DispCommon *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCommon_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcmnGetMemoryMappingDescriptor(struct DispCommon *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCommon_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispcmnCheckMemInterUnmap(struct DispCommon *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCommon_RmResource.offset), bSubdeviceHandleProvided); +} + +static void __nvoc_thunk_Notifier_dispcmnSetNotificationShare(struct DispCommon *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispCommon_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnControlFilter(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_dispcmnGetRefCount(struct DispCommon *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispcmnUnregisterEvent(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispCommon_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnUnmap(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_DisplayApi_dispcmnControl_Prologue(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return dispapiControl_Prologue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispCommon_DisplayApi.offset), pCallContext, pRsParams); +} + +static NvBool __nvoc_thunk_RsResource_dispcmnCanCopy(struct DispCommon *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnMapTo(struct DispCommon *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_dispcmnAddAdditionalDependants(struct RsClient *pClient, struct DispCommon *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_dispcmnPreDestruct(struct DispCommon *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnUnmapFrom(struct DispCommon *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispcmnGetNotificationListPtr(struct DispCommon *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispCommon_Notifier.offset)); +} + +static void __nvoc_thunk_DisplayApi_dispcmnControl_Epilogue(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + dispapiControl_Epilogue((struct DisplayApi *)(((unsigned char *)pDisplayApi) + __nvoc_rtti_DispCommon_DisplayApi.offset), pCallContext, pRsParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispcmnGetNotificationShare(struct DispCommon *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispCommon_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnControlLookup(struct DispCommon *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispcmnMap(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCommon_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispcmnGetOrAllocNotifShare(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispCommon_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispCommon[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetNumHeads_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730102u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetNumHeads" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetScanline_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730108u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetScanline" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetVblankCounter_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730109u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetVblankCounter" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetVblankEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73010au, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetVblankEnable" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetSuppported_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730120u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetSuppported" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetConnectState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730122u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetConnectState" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHotplugConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730123u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHotplugConfig" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHeadRoutingMap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730125u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHeadRoutingMap" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetActive_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730126u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetActive" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730138u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetCapsV2" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetInternalDisplays_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73015bu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetInternalDisplays" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetBootDisplays_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730166u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetBootDisplays" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73017bu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHotplugUnplugState" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdClearELVBlock_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73017du, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdClearELVBlock" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemArmLightweightSupervisor_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73017eu, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemArmLightweightSupervisor" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemConfigVrrPstateSwitch_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730184u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemConfigVrrPstateSwitch" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730190u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemQueryDisplayIdsWithMux" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730196u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemAllocateDisplayBandwidth" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemGetHotplugEventConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730197u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemGetHotplugEventConfig" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemSetHotplugEventConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730198u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemSetHotplugEventConfig" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpRecordChannelRegisters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73019bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpRecordChannelRegisters" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSystemCheckSidebandI2cSupport_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73019cu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSystemCheckSidebandI2cSupport" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetI2cPortid_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730211u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetI2cPortid" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetType_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730240u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetType" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificFakeDevice_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730243u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificFakeDevice" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetEdidV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730245u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetEdidV2" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetEdidV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730246u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetEdidV2" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetConnectorData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730250u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetConnectorData" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730273u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiEnable" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificCtrlHdmi_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730274u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificCtrlHdmi" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetAllHeadMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730287u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetAllHeadMask" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetOdPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730288u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetOdPacket" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetOdPacketCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730289u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetOdPacketCtrl" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetPclkLimit_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73028au, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetPclkLimit" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificOrGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73028bu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificOrGetInfo" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiSinkCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730293u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiSinkCaps" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetMonitorPower_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730295u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetMonitorPower" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73029au, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificApplyEdidOverrideV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a1u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificApplyEdidOverrideV2" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetHdmiGpuCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a2u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetHdmiGpuCaps" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificDisplayChange_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a4u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificDisplayChange" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificGetHdmiScdcData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a6u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificGetHdmiScdcData" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificIsDirectmodeDisplay_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a7u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificIsDirectmodeDisplay" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a8u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificSetSharedGenericPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302a9u, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificSetSharedGenericPacket" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302aau, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificAcquireSharedGenericPacket" +#endif + }, + { /* [46] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x7302abu, + /*paramSize=*/ sizeof(NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdSpecificReleaseSharedGenericPacket" +#endif + }, + { /* [47] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdInternalGetHotplugUnplugState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x730401u, + /*paramSize=*/ sizeof(NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdInternalGetHotplugUnplugState" +#endif + }, + { /* [48] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731140u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetInfo" +#endif + }, + { /* [49] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetDisplayportDongleInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731142u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetDisplayportDongleInfo" +#endif + }, + { /* [50] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpSetEldAudioCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731144u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpSetEldAudioCaps" +#endif + }, + { /* [51] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetSpreadSpectrum_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73114cu, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetSpreadSpectrum" +#endif + }, + { /* [52] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpUpdateDynamicDfpCache_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73114eu, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpUpdateDynamicDfpCache" +#endif + }, + { /* [53] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpSetAudioEnable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731150u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpSetAudioEnable" +#endif + }, + { /* [54] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpAssignSor_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731152u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpAssignSor" +#endif + }, + { /* [55] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetPadlinkMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731153u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetPadlinkMask" +#endif + }, + { /* [56] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetLcdGpioPinNum_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731154u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetLcdGpioPinNum" +#endif + }, + { /* [57] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpConfigTwoHeadOneOr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731156u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpConfigTwoHeadOneOr" +#endif + }, + { /* [58] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpDscCrcControl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731157u, + /*paramSize=*/ sizeof(NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpDscCrcControl" +#endif + }, + { /* [59] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpInitMuxData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731158u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpInitMuxData" +#endif + }, + { /* [60] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpSwitchDispMux_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731160u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpSwitchDispMux" +#endif + }, + { /* [61] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpRunPreDispMuxOperations_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731161u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpRunPreDispMuxOperations" +#endif + }, + { /* [62] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpRunPostDispMuxOperations_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731162u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpRunPostDispMuxOperations" +#endif + }, + { /* [63] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetDispMuxStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731163u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetDispMuxStatus" +#endif + }, + { /* [64] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpGetDsiModeTiming_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731166u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpGetDsiModeTiming" +#endif + }, + { /* [65] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpAuxchCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731341u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_AUXCH_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpAuxchCtrl" +#endif + }, + { /* [66] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpAuxchSetSema_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731342u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpAuxchSetSema" +#endif + }, + { /* [67] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731343u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpCtrl" +#endif + }, + { /* [68] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetLaneData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731345u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_LANE_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetLaneData" +#endif + }, + { /* [69] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetLaneData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731346u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_LANE_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetLaneData" +#endif + }, + { /* [70] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetTestpattern_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731347u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetTestpattern" +#endif + }, + { /* [71] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731351u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data" +#endif + }, + { /* [72] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731352u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data" +#endif + }, + { /* [73] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpMainLinkCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731356u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpMainLinkCtrl" +#endif + }, + { /* [74] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetAudioMuteStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731358u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetAudioMuteStream" +#endif + }, + { /* [75] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetAudioMuteStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731359u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetAudioMuteStream" +#endif + }, + { /* [76] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpASSRCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73135au, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_ASSR_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpASSRCtrl" +#endif + }, + { /* [77] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpTopologyAllocateDisplayId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73135bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpTopologyAllocateDisplayId" +#endif + }, + { /* [78] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpTopologyFreeDisplayId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73135cu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpTopologyFreeDisplayId" +#endif + }, + { /* [79] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetLinkConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731360u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetLinkConfig" +#endif + }, + { /* [80] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetEDPData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731361u, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_EDP_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetEDPData" +#endif + }, + { /* [81] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731362u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigStream" +#endif + }, + { /* [82] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetRateGov_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731363u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetRateGov" +#endif + }, + { /* [83] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetManualDisplayPort_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731365u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetManualDisplayPort" +#endif + }, + { /* [84] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetEcf_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731366u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_ECF_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetEcf" +#endif + }, + { /* [85] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSendACT_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731367u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSendACT" +#endif + }, + { /* [86] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + /*flags=*/ 0x212u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731369u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetCaps" +#endif + }, + { /* [87] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetMSAProperties_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136au, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetMSAProperties" +#endif + }, + { /* [88] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGenerateFakeInterrupt" +#endif + }, + { /* [89] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigRadScratchReg_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136cu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigRadScratchReg" +#endif + }, + { /* [90] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigSingleHeadMultiStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136eu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigSingleHeadMultiStream" +#endif + }, + { /* [91] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetTriggerSelect_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73136fu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetTriggerSelect" +#endif + }, + { /* [92] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetTriggerAll_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731370u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetTriggerAll" +#endif + }, + { /* [93] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetAuxLogData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731373u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetAuxLogData" +#endif + }, + { /* [94] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigIndexedLinkRates_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731377u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigIndexedLinkRates" +#endif + }, + { /* [95] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpSetStereoMSAProperties_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731378u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpSetStereoMSAProperties" +#endif + }, + { /* [96] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigureFec_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137au, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigureFec" +#endif + }, + { /* [97] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpConfigMacroPad_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137bu, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpConfigMacroPad" +#endif + }, + { /* [98] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpAuxchI2cTransferCtrl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137cu, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpAuxchI2cTransferCtrl" +#endif + }, + { /* [99] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpEnableVrr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137du, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpEnableVrr" +#endif + }, + { /* [100] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetGenericInfoframe_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137eu, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetGenericInfoframe" +#endif + }, + { /* [101] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDpGetMsaAttributes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x73137fu, + /*paramSize=*/ sizeof(NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDpGetMsaAttributes" +#endif + }, + { /* [102] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispcmnCtrlCmdDfpInternalLcdOverdrive_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x731380u, + /*paramSize=*/ sizeof(NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispCommon.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispcmnCtrlCmdDfpInternalLcdOverdrive" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCommon = +{ + /*numEntries=*/ 103, + /*pExportEntries=*/ __nvoc_exported_method_def_DispCommon +}; + +void __nvoc_dtor_DisplayApi(DisplayApi*); +void __nvoc_dtor_DispCommon(DispCommon *pThis) { + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispCommon(DispCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_DisplayApi(DisplayApi* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispCommon(DispCommon *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCommon_fail_DisplayApi; + __nvoc_init_dataField_DispCommon(pThis); + + status = __nvoc_dispcmnConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispCommon_fail__init; + goto __nvoc_ctor_DispCommon_exit; // Success + +__nvoc_ctor_DispCommon_fail__init: + __nvoc_dtor_DisplayApi(&pThis->__nvoc_base_DisplayApi); +__nvoc_ctor_DispCommon_fail_DisplayApi: +__nvoc_ctor_DispCommon_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispCommon_1(DispCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetVblankCounter__ = &dispcmnCtrlCmdSystemGetVblankCounter_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetVblankEnable__ = &dispcmnCtrlCmdSystemGetVblankEnable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdSystemGetInternalDisplays__ = &dispcmnCtrlCmdSystemGetInternalDisplays_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpEnableVrr__ = &dispcmnCtrlCmdDpEnableVrr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdClearELVBlock__ = &dispcmnCtrlCmdClearELVBlock_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificDisplayChange__ = &dispcmnCtrlCmdSpecificDisplayChange_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpGetSpreadSpectrum__ = &dispcmnCtrlCmdDfpGetSpreadSpectrum_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpGetLcdGpioPinNum__ = &dispcmnCtrlCmdDfpGetLcdGpioPinNum_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetAudioMuteStream__ = &dispcmnCtrlCmdDpGetAudioMuteStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDpAuxchI2cTransferCtrl__ = &dispcmnCtrlCmdDpAuxchI2cTransferCtrl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpASSRCtrl__ = &dispcmnCtrlCmdDpASSRCtrl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetEcf__ = &dispcmnCtrlCmdDpSetEcf_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdDfpSwitchDispMux__ = &dispcmnCtrlCmdDfpSwitchDispMux_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDfpRunPreDispMuxOperations__ = &dispcmnCtrlCmdDfpRunPreDispMuxOperations_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDfpRunPostDispMuxOperations__ = &dispcmnCtrlCmdDfpRunPostDispMuxOperations_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDfpGetDispMuxStatus__ = &dispcmnCtrlCmdDfpGetDispMuxStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDfpRecordChannelRegisters__ = &dispcmnCtrlCmdDfpRecordChannelRegisters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdDfpInternalLcdOverdrive__ = &dispcmnCtrlCmdDfpInternalLcdOverdrive_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetCapsV2__ = &dispcmnCtrlCmdSystemGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdSystemGetNumHeads__ = &dispcmnCtrlCmdSystemGetNumHeads_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetScanline__ = &dispcmnCtrlCmdSystemGetScanline_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdSystemGetSuppported__ = &dispcmnCtrlCmdSystemGetSuppported_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetConnectState__ = &dispcmnCtrlCmdSystemGetConnectState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__dispcmnCtrlCmdSystemGetHotplugUnplugState__ = &dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__dispcmnCtrlCmdInternalGetHotplugUnplugState__ = &dispcmnCtrlCmdInternalGetHotplugUnplugState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetHeadRoutingMap__ = &dispcmnCtrlCmdSystemGetHeadRoutingMap_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetActive__ = &dispcmnCtrlCmdSystemGetActive_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetBootDisplays__ = &dispcmnCtrlCmdSystemGetBootDisplays_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSystemQueryDisplayIdsWithMux__ = &dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSystemCheckSidebandI2cSupport__ = &dispcmnCtrlCmdSystemCheckSidebandI2cSupport_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__dispcmnCtrlCmdSystemAllocateDisplayBandwidth__ = &dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetHotplugConfig__ = &dispcmnCtrlCmdSystemGetHotplugConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemGetHotplugEventConfig__ = &dispcmnCtrlCmdSystemGetHotplugEventConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSystemSetHotplugEventConfig__ = &dispcmnCtrlCmdSystemSetHotplugEventConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSystemArmLightweightSupervisor__ = &dispcmnCtrlCmdSystemArmLightweightSupervisor_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSystemConfigVrrPstateSwitch__ = &dispcmnCtrlCmdSystemConfigVrrPstateSwitch_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdSpecificGetType__ = &dispcmnCtrlCmdSpecificGetType_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificGetEdidV2__ = &dispcmnCtrlCmdSpecificGetEdidV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__dispcmnCtrlCmdSpecificSetEdidV2__ = &dispcmnCtrlCmdSpecificSetEdidV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificFakeDevice__ = &dispcmnCtrlCmdSpecificFakeDevice_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificGetConnectorData__ = &dispcmnCtrlCmdSpecificGetConnectorData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiEnable__ = &dispcmnCtrlCmdSpecificSetHdmiEnable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificCtrlHdmi__ = &dispcmnCtrlCmdSpecificCtrlHdmi_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificGetAllHeadMask__ = &dispcmnCtrlCmdSpecificGetAllHeadMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificSetOdPacket__ = &dispcmnCtrlCmdSpecificSetOdPacket_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificAcquireSharedGenericPacket__ = &dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificSetSharedGenericPacket__ = &dispcmnCtrlCmdSpecificSetSharedGenericPacket_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificReleaseSharedGenericPacket__ = &dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificSetOdPacketCtrl__ = &dispcmnCtrlCmdSpecificSetOdPacketCtrl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdSpecificOrGetInfo__ = &dispcmnCtrlCmdSpecificOrGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificGetPclkLimit__ = &dispcmnCtrlCmdSpecificGetPclkLimit_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiSinkCaps__ = &dispcmnCtrlCmdSpecificSetHdmiSinkCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificSetMonitorPower__ = &dispcmnCtrlCmdSpecificSetMonitorPower_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig__ = &dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificApplyEdidOverrideV2__ = &dispcmnCtrlCmdSpecificApplyEdidOverrideV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdSpecificGetI2cPortid__ = &dispcmnCtrlCmdSpecificGetI2cPortid_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificGetHdmiGpuCaps__ = &dispcmnCtrlCmdSpecificGetHdmiGpuCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificGetHdmiScdcData__ = &dispcmnCtrlCmdSpecificGetHdmiScdcData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificIsDirectmodeDisplay__ = &dispcmnCtrlCmdSpecificIsDirectmodeDisplay_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation__ = &dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdDfpGetInfo__ = &dispcmnCtrlCmdDfpGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpGetDisplayportDongleInfo__ = &dispcmnCtrlCmdDfpGetDisplayportDongleInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDfpSetEldAudioCaps__ = &dispcmnCtrlCmdDfpSetEldAudioCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDfpSetAudioEnable__ = &dispcmnCtrlCmdDfpSetAudioEnable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpUpdateDynamicDfpCache__ = &dispcmnCtrlCmdDfpUpdateDynamicDfpCache_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpAssignSor__ = &dispcmnCtrlCmdDfpAssignSor_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpDscCrcControl__ = &dispcmnCtrlCmdDfpDscCrcControl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDfpInitMuxData__ = &dispcmnCtrlCmdDfpInitMuxData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpGetDsiModeTiming__ = &dispcmnCtrlCmdDfpGetDsiModeTiming_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpConfigTwoHeadOneOr__ = &dispcmnCtrlCmdDfpConfigTwoHeadOneOr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDfpGetPadlinkMask__ = &dispcmnCtrlCmdDfpGetPadlinkMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpAuxchCtrl__ = &dispcmnCtrlCmdDpAuxchCtrl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpAuxchSetSema__ = &dispcmnCtrlCmdDpAuxchSetSema_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpCtrl__ = &dispcmnCtrlCmdDpCtrl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetLaneData__ = &dispcmnCtrlCmdDpGetLaneData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetLaneData__ = &dispcmnCtrlCmdDpSetLaneData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetTestpattern__ = &dispcmnCtrlCmdDpSetTestpattern_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpMainLinkCtrl__ = &dispcmnCtrlCmdDpMainLinkCtrl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__dispcmnCtrlCmdDpSetAudioMuteStream__ = &dispcmnCtrlCmdDpSetAudioMuteStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetLinkConfig__ = &dispcmnCtrlCmdDpGetLinkConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetEDPData__ = &dispcmnCtrlCmdDpGetEDPData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpTopologyAllocateDisplayId__ = &dispcmnCtrlCmdDpTopologyAllocateDisplayId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpTopologyFreeDisplayId__ = &dispcmnCtrlCmdDpTopologyFreeDisplayId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigStream__ = &dispcmnCtrlCmdDpConfigStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigSingleHeadMultiStream__ = &dispcmnCtrlCmdDpConfigSingleHeadMultiStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetRateGov__ = &dispcmnCtrlCmdDpSetRateGov_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSendACT__ = &dispcmnCtrlCmdDpSendACT_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetManualDisplayPort__ = &dispcmnCtrlCmdDpSetManualDisplayPort_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x212u) + pThis->__dispcmnCtrlCmdDpGetCaps__ = &dispcmnCtrlCmdDpGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetMSAProperties__ = &dispcmnCtrlCmdDpSetMSAProperties_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetStereoMSAProperties__ = &dispcmnCtrlCmdDpSetStereoMSAProperties_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__dispcmnCtrlCmdDpGenerateFakeInterrupt__ = &dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigRadScratchReg__ = &dispcmnCtrlCmdDpConfigRadScratchReg_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetTriggerSelect__ = &dispcmnCtrlCmdDpSetTriggerSelect_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetTriggerAll__ = &dispcmnCtrlCmdDpSetTriggerAll_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetAuxLogData__ = &dispcmnCtrlCmdDpGetAuxLogData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigIndexedLinkRates__ = &dispcmnCtrlCmdDpConfigIndexedLinkRates_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigureFec__ = &dispcmnCtrlCmdDpConfigureFec_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetGenericInfoframe__ = &dispcmnCtrlCmdDpGetGenericInfoframe_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetMsaAttributes__ = &dispcmnCtrlCmdDpGetMsaAttributes_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpConfigMacroPad__ = &dispcmnCtrlCmdDpConfigMacroPad_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data__ = &dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data__ = &dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_IMPL; +#endif + + pThis->__dispcmnShareCallback__ = &__nvoc_thunk_RmResource_dispcmnShareCallback; + + pThis->__dispcmnControl__ = &__nvoc_thunk_DisplayApi_dispcmnControl; + + pThis->__dispcmnAccessCallback__ = &__nvoc_thunk_RmResource_dispcmnAccessCallback; + + pThis->__dispcmnGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispcmnGetMemInterMapParams; + + pThis->__dispcmnGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispcmnGetMemoryMappingDescriptor; + + pThis->__dispcmnCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispcmnCheckMemInterUnmap; + + pThis->__dispcmnSetNotificationShare__ = &__nvoc_thunk_Notifier_dispcmnSetNotificationShare; + + pThis->__dispcmnControlFilter__ = &__nvoc_thunk_RsResource_dispcmnControlFilter; + + pThis->__dispcmnGetRefCount__ = &__nvoc_thunk_RsResource_dispcmnGetRefCount; + + pThis->__dispcmnUnregisterEvent__ = &__nvoc_thunk_Notifier_dispcmnUnregisterEvent; + + pThis->__dispcmnUnmap__ = &__nvoc_thunk_RsResource_dispcmnUnmap; + + pThis->__dispcmnControl_Prologue__ = &__nvoc_thunk_DisplayApi_dispcmnControl_Prologue; + + pThis->__dispcmnCanCopy__ = &__nvoc_thunk_RsResource_dispcmnCanCopy; + + pThis->__dispcmnMapTo__ = &__nvoc_thunk_RsResource_dispcmnMapTo; + + pThis->__dispcmnAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispcmnAddAdditionalDependants; + + pThis->__dispcmnPreDestruct__ = &__nvoc_thunk_RsResource_dispcmnPreDestruct; + + pThis->__dispcmnUnmapFrom__ = &__nvoc_thunk_RsResource_dispcmnUnmapFrom; + + pThis->__dispcmnGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispcmnGetNotificationListPtr; + + pThis->__dispcmnControl_Epilogue__ = &__nvoc_thunk_DisplayApi_dispcmnControl_Epilogue; + + pThis->__dispcmnGetNotificationShare__ = &__nvoc_thunk_Notifier_dispcmnGetNotificationShare; + + pThis->__dispcmnControlLookup__ = &__nvoc_thunk_RsResource_dispcmnControlLookup; + + pThis->__dispcmnMap__ = &__nvoc_thunk_RsResource_dispcmnMap; + + pThis->__dispcmnGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispcmnGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_DispCommon(DispCommon *pThis) { + __nvoc_init_funcTable_DispCommon_1(pThis); +} + +void __nvoc_init_DisplayApi(DisplayApi*, RmHalspecOwner* ); +void __nvoc_init_DispCommon(DispCommon *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DispCommon = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_DisplayApi.__nvoc_base_Notifier; + pThis->__nvoc_pbase_DisplayApi = &pThis->__nvoc_base_DisplayApi; + __nvoc_init_DisplayApi(&pThis->__nvoc_base_DisplayApi, pRmhalspecowner); + __nvoc_init_funcTable_DispCommon(pThis); +} + +NV_STATUS __nvoc_objCreate_DispCommon(DispCommon **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispCommon *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DispCommon)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispCommon)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispCommon); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_DisplayApi.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DispCommon(pThis, pRmhalspecowner); + status = __nvoc_ctor_DispCommon(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispCommon_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispCommon_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispCommon(DispCommon **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispCommon(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_disp_objs_nvoc.h b/src/nvidia/generated/g_disp_objs_nvoc.h new file mode 100644 index 000000000..1af355547 --- /dev/null +++ b/src/nvidia/generated/g_disp_objs_nvoc.h @@ -0,0 +1,2340 @@ +#ifndef _G_DISP_OBJS_NVOC_H_ +#define _G_DISP_OBJS_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing the display - both Disp and DispCommon +* entries with their insides (DispChannelList and DispDmaControlList) +* +******************************************************************************/ + +#include "g_disp_objs_nvoc.h" + +#ifndef DISP_OBJS_H +#define DISP_OBJS_H + +#include "rmapi/event.h" +#include "rmapi/resource.h" + +#include "gpu/gpu_halspec.h" + +/* + * On T234, RM is in kernel mode, so when RM is running in kernel mode it + * does not allow usermode clients like MODs to call control calls that are + * marked as KERNEL_PRIVILEGED. + * So defining new macro DISPLAY_PRIVILEGED(i.e PRIVILEGED) for Tegra and mark + * control calls needed by MODs with this so that MODs running as root can call + * these control calls. However keeping same privilege level for DGPUs which + * does not change the current behaviour. + */ +#define DISPLAY_PRIVILEGED KERNEL_PRIVILEGED + +#include "ctrl/ctrl0073.h" +#include "ctrl/ctrl5070/ctrl5070event.h" +#include "ctrl/ctrl5070/ctrl5070or.h" +#include "ctrl/ctrl5070/ctrl5070seq.h" +#include "ctrl/ctrl5070/ctrl5070system.h" +#include "ctrl/ctrlc370/ctrlc370chnc.h" +#include "ctrl/ctrlc370/ctrlc370event.h" +#include "ctrl/ctrlc370/ctrlc370rg.h" +#include "ctrl/ctrlc370/ctrlc370verif.h" +#include "ctrl/ctrlc372/ctrlc372base.h" +#include "ctrl/ctrlc372/ctrlc372chnc.h" + +// **************************************************************************** +// Type definitions +// **************************************************************************** + +struct OBJGPU; +struct Device; +struct Memory; +struct RsResource; +struct RmResource; +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + + +#define DISPAPI_GET_GPU(pDispRes) staticCast(pDispRes, DisplayApi)->pGpuInRmctrl + +#define DISPAPI_GET_GPUGRP(pDispRes) staticCast(pDispRes, DisplayApi)->pGpuGrp + +/*! + * Base class for many of display's RsResource subclasses + */ +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DisplayApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DisplayApi *__nvoc_pbase_DisplayApi; + NV_STATUS (*__dispapiControl__)(struct DisplayApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispapiControl_Prologue__)(struct DisplayApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__dispapiControl_Epilogue__)(struct DisplayApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispapiShareCallback__)(struct DisplayApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispapiCheckMemInterUnmap__)(struct DisplayApi *, NvBool); + NvBool (*__dispapiAccessCallback__)(struct DisplayApi *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__dispapiGetMemInterMapParams__)(struct DisplayApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispapiGetMemoryMappingDescriptor__)(struct DisplayApi *, struct MEMORY_DESCRIPTOR **); + void (*__dispapiSetNotificationShare__)(struct DisplayApi *, struct NotifShare *); + NV_STATUS (*__dispapiControlFilter__)(struct DisplayApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__dispapiGetRefCount__)(struct DisplayApi *); + NV_STATUS (*__dispapiUnregisterEvent__)(struct DisplayApi *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__dispapiUnmap__)(struct DisplayApi *, struct CALL_CONTEXT *, RsCpuMapping *); + NvBool (*__dispapiCanCopy__)(struct DisplayApi *); + NV_STATUS (*__dispapiMapTo__)(struct DisplayApi *, RS_RES_MAP_TO_PARAMS *); + void (*__dispapiAddAdditionalDependants__)(struct RsClient *, struct DisplayApi *, RsResourceRef *); + void (*__dispapiPreDestruct__)(struct DisplayApi *); + NV_STATUS (*__dispapiUnmapFrom__)(struct DisplayApi *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__dispapiGetNotificationListPtr__)(struct DisplayApi *); + struct NotifShare *(*__dispapiGetNotificationShare__)(struct DisplayApi *); + NV_STATUS (*__dispapiControlLookup__)(struct DisplayApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispapiMap__)(struct DisplayApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__dispapiGetOrAllocNotifShare__)(struct DisplayApi *, NvHandle, NvHandle, struct NotifShare **); + struct OBJGPU *pGpuInRmctrl; + struct OBJGPUGRP *pGpuGrp; + NvBool bBcResource; + NvU32 *pNotifyActions[8]; + NvU32 numNotifiers; + NvHandle hNotifierMemory; + struct Memory *pNotifierMemory; +}; + +#ifndef __NVOC_CLASS_DisplayApi_TYPEDEF__ +#define __NVOC_CLASS_DisplayApi_TYPEDEF__ +typedef struct DisplayApi DisplayApi; +#endif /* __NVOC_CLASS_DisplayApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DisplayApi +#define __nvoc_class_id_DisplayApi 0xe9980c +#endif /* __nvoc_class_id_DisplayApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DisplayApi; + +#define __staticCast_DisplayApi(pThis) \ + ((pThis)->__nvoc_pbase_DisplayApi) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DisplayApi(pThis) ((DisplayApi*)NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DisplayApi(pThis) \ + ((DisplayApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DisplayApi))) +#endif //__nvoc_disp_objs_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DisplayApi(DisplayApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DisplayApi(DisplayApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DisplayApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DisplayApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispapiControl(pDisplayApi, pCallContext, pParams) dispapiControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispapiControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispapiControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispapiControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispapiControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispapiGetMemInterMapParams(pRmResource, pParams) dispapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispapiSetNotificationShare(pNotifier, pNotifShare) dispapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispapiControlFilter(pResource, pCallContext, pParams) dispapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispapiGetRefCount(pResource) dispapiGetRefCount_DISPATCH(pResource) +#define dispapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispapiUnmap(pResource, pCallContext, pCpuMapping) dispapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispapiCanCopy(pResource) dispapiCanCopy_DISPATCH(pResource) +#define dispapiMapTo(pResource, pParams) dispapiMapTo_DISPATCH(pResource, pParams) +#define dispapiAddAdditionalDependants(pClient, pResource, pReference) dispapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispapiPreDestruct(pResource) dispapiPreDestruct_DISPATCH(pResource) +#define dispapiUnmapFrom(pResource, pParams) dispapiUnmapFrom_DISPATCH(pResource, pParams) +#define dispapiGetNotificationListPtr(pNotifier) dispapiGetNotificationListPtr_DISPATCH(pNotifier) +#define dispapiGetNotificationShare(pNotifier) dispapiGetNotificationShare_DISPATCH(pNotifier) +#define dispapiControlLookup(pResource, pParams, ppEntry) dispapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispapiMap(pResource, pCallContext, pParams, pCpuMapping) dispapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS dispapiSetUnicastAndSynchronize_KERNEL(struct DisplayApi *pDisplayApi, struct OBJGPUGRP *pGpuGroup, struct OBJGPU **ppGpu, NvU32 subDeviceInstance); + +#ifdef __nvoc_disp_objs_h_disabled +static inline NV_STATUS dispapiSetUnicastAndSynchronize(struct DisplayApi *pDisplayApi, struct OBJGPUGRP *pGpuGroup, struct OBJGPU **ppGpu, NvU32 subDeviceInstance) { + NV_ASSERT_FAILED_PRECOMP("DisplayApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_objs_h_disabled +#define dispapiSetUnicastAndSynchronize(pDisplayApi, pGpuGroup, ppGpu, subDeviceInstance) dispapiSetUnicastAndSynchronize_KERNEL(pDisplayApi, pGpuGroup, ppGpu, subDeviceInstance) +#endif //__nvoc_disp_objs_h_disabled + +#define dispapiSetUnicastAndSynchronize_HAL(pDisplayApi, pGpuGroup, ppGpu, subDeviceInstance) dispapiSetUnicastAndSynchronize(pDisplayApi, pGpuGroup, ppGpu, subDeviceInstance) + +NV_STATUS dispapiControl_IMPL(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS dispapiControl_DISPATCH(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__dispapiControl__(pDisplayApi, pCallContext, pParams); +} + +NV_STATUS dispapiControl_Prologue_IMPL(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); + +static inline NV_STATUS dispapiControl_Prologue_DISPATCH(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__dispapiControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +void dispapiControl_Epilogue_IMPL(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams); + +static inline void dispapiControl_Epilogue_DISPATCH(struct DisplayApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__dispapiControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispapiShareCallback_DISPATCH(struct DisplayApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__dispapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispapiCheckMemInterUnmap_DISPATCH(struct DisplayApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NvBool dispapiAccessCallback_DISPATCH(struct DisplayApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispapiGetMemInterMapParams_DISPATCH(struct DisplayApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispapiGetMemoryMappingDescriptor_DISPATCH(struct DisplayApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline void dispapiSetNotificationShare_DISPATCH(struct DisplayApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispapiControlFilter_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 dispapiGetRefCount_DISPATCH(struct DisplayApi *pResource) { + return pResource->__dispapiGetRefCount__(pResource); +} + +static inline NV_STATUS dispapiUnregisterEvent_DISPATCH(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispapiUnmap_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__dispapiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool dispapiCanCopy_DISPATCH(struct DisplayApi *pResource) { + return pResource->__dispapiCanCopy__(pResource); +} + +static inline NV_STATUS dispapiMapTo_DISPATCH(struct DisplayApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispapiMapTo__(pResource, pParams); +} + +static inline void dispapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DisplayApi *pResource, RsResourceRef *pReference) { + pResource->__dispapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void dispapiPreDestruct_DISPATCH(struct DisplayApi *pResource) { + pResource->__dispapiPreDestruct__(pResource); +} + +static inline NV_STATUS dispapiUnmapFrom_DISPATCH(struct DisplayApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispapiUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *dispapiGetNotificationListPtr_DISPATCH(struct DisplayApi *pNotifier) { + return pNotifier->__dispapiGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *dispapiGetNotificationShare_DISPATCH(struct DisplayApi *pNotifier) { + return pNotifier->__dispapiGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispapiControlLookup_DISPATCH(struct DisplayApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispapiMap_DISPATCH(struct DisplayApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__dispapiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispapiGetOrAllocNotifShare_DISPATCH(struct DisplayApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispapiConstruct_IMPL(struct DisplayApi *arg_pDisplayApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispapiConstruct(arg_pDisplayApi, arg_pCallContext, arg_pParams) dispapiConstruct_IMPL(arg_pDisplayApi, arg_pCallContext, arg_pParams) +void dispapiDestruct_IMPL(struct DisplayApi *pDisplayApi); +#define __nvoc_dispapiDestruct(pDisplayApi) dispapiDestruct_IMPL(pDisplayApi) +NV_STATUS dispapiCtrlCmdEventSetNotification_IMPL(struct DisplayApi *pDisplayApi, NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams); +#ifdef __nvoc_disp_objs_h_disabled +static inline NV_STATUS dispapiCtrlCmdEventSetNotification(struct DisplayApi *pDisplayApi, NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams) { + NV_ASSERT_FAILED_PRECOMP("DisplayApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_objs_h_disabled +#define dispapiCtrlCmdEventSetNotification(pDisplayApi, pSetEventParams) dispapiCtrlCmdEventSetNotification_IMPL(pDisplayApi, pSetEventParams) +#endif //__nvoc_disp_objs_h_disabled + +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_DISPLAY. Parent for all other display + * resources (channels, etc). Allocated under a device or subdevice. + * + * Only one instance of this class is allowed per-GPU. Multi-instance restrictions + * are enforced by resource_list.h + */ +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispObject { + const struct NVOC_RTTI *__nvoc_rtti; + struct DisplayApi __nvoc_base_DisplayApi; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DisplayApi *__nvoc_pbase_DisplayApi; + struct DispObject *__nvoc_pbase_DispObject; + NV_STATUS (*__dispobjCtrlCmdGetPinsetCount__)(struct DispObject *, NV5070_CTRL_GET_PINSET_COUNT_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetPinsetPeer__)(struct DispObject *, NV5070_CTRL_GET_PINSET_PEER_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetMempoolWARForBlitTearing__)(struct DispObject *, NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetPinsetLockpins__)(struct DispObject *, NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetFrameLockHeaderLockPins__)(struct DispObject *, NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetPiorSeqCtl__)(struct DispObject *, NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetPiorSeqCtl__)(struct DispObject *, NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetPiorOpMode__)(struct DispObject *, NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetPiorOpMode__)(struct DispObject *, NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdEventSetMemoryNotifies__)(struct DispObject *, NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetRmFreeFlags__)(struct DispObject *, NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdIMPSetGetParameter__)(struct DispObject *, NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgStatus__)(struct DispObject *, NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgUnderflowProp__)(struct DispObject *, NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetRgUnderflowProp__)(struct DispObject *, NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgFliplockProp__)(struct DispObject *, NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetRgFliplockProp__)(struct DispObject *, NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgConnectedLockpin__)(struct DispObject *, NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgConnectedLockpinStateless__)(struct DispObject *, NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetRgScanLine__)(struct DispObject *, NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetSorSeqCtl__)(struct DispObject *, NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetSorSeqCtl__)(struct DispObject *, NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSeqProgSpeed__)(struct DispObject *, NV5070_CTRL_SEQ_PROG_SPEED_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetSorPwm__)(struct DispObject *, NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetSorPwm__)(struct DispObject *, NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdGetSorOpMode__)(struct DispObject *, NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetSorOpMode__)(struct DispObject *, NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSetSorFlushMode__)(struct DispObject *, NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdSystemGetCapsV2__)(struct DispObject *, NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__dispobjCtrlCmdEventSetTrigger__)(struct DispObject *); + NvBool (*__dispobjShareCallback__)(struct DispObject *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispobjControl__)(struct DispObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispobjAccessCallback__)(struct DispObject *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__dispobjGetMemInterMapParams__)(struct DispObject *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispobjGetMemoryMappingDescriptor__)(struct DispObject *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispobjCheckMemInterUnmap__)(struct DispObject *, NvBool); + void (*__dispobjSetNotificationShare__)(struct DispObject *, struct NotifShare *); + NV_STATUS (*__dispobjControlFilter__)(struct DispObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__dispobjGetRefCount__)(struct DispObject *); + NV_STATUS (*__dispobjUnregisterEvent__)(struct DispObject *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__dispobjUnmap__)(struct DispObject *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__dispobjControl_Prologue__)(struct DispObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispobjCanCopy__)(struct DispObject *); + NV_STATUS (*__dispobjMapTo__)(struct DispObject *, RS_RES_MAP_TO_PARAMS *); + void (*__dispobjAddAdditionalDependants__)(struct RsClient *, struct DispObject *, RsResourceRef *); + void (*__dispobjPreDestruct__)(struct DispObject *); + NV_STATUS (*__dispobjUnmapFrom__)(struct DispObject *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__dispobjGetNotificationListPtr__)(struct DispObject *); + void (*__dispobjControl_Epilogue__)(struct DispObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__dispobjGetNotificationShare__)(struct DispObject *); + NV_STATUS (*__dispobjControlLookup__)(struct DispObject *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispobjMap__)(struct DispObject *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__dispobjGetOrAllocNotifShare__)(struct DispObject *, NvHandle, NvHandle, struct NotifShare **); + NvU32 rmFreeFlags; +}; + +#ifndef __NVOC_CLASS_DispObject_TYPEDEF__ +#define __NVOC_CLASS_DispObject_TYPEDEF__ +typedef struct DispObject DispObject; +#endif /* __NVOC_CLASS_DispObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispObject +#define __nvoc_class_id_DispObject 0x999839 +#endif /* __nvoc_class_id_DispObject */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispObject; + +#define __staticCast_DispObject(pThis) \ + ((pThis)->__nvoc_pbase_DispObject) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DispObject(pThis) ((DispObject*)NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DispObject(pThis) \ + ((DispObject*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispObject))) +#endif //__nvoc_disp_objs_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispObject(DispObject**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispObject(DispObject**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispObject(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispObject((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispobjCtrlCmdGetPinsetCount(pDispObject, pParams) dispobjCtrlCmdGetPinsetCount_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetPinsetPeer(pDispObject, pParams) dispobjCtrlCmdGetPinsetPeer_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetMempoolWARForBlitTearing(pDispObject, pParams) dispobjCtrlCmdSetMempoolWARForBlitTearing_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetPinsetLockpins(pDispObject, pParams) dispobjCtrlCmdGetPinsetLockpins_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetFrameLockHeaderLockPins(pDispObject, pParams) dispobjCtrlCmdGetFrameLockHeaderLockPins_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetPiorSeqCtl(pDispObject, pParams) dispobjCtrlCmdGetPiorSeqCtl_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetPiorSeqCtl(pDispObject, pParams) dispobjCtrlCmdSetPiorSeqCtl_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetPiorOpMode(pDispObject, pParams) dispobjCtrlCmdGetPiorOpMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetPiorOpMode(pDispObject, pParams) dispobjCtrlCmdSetPiorOpMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdEventSetMemoryNotifies(pDispObject, pSetMemoryNotifiesParams) dispobjCtrlCmdEventSetMemoryNotifies_DISPATCH(pDispObject, pSetMemoryNotifiesParams) +#define dispobjCtrlCmdSetRmFreeFlags(pDispObject, pParams) dispobjCtrlCmdSetRmFreeFlags_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdIMPSetGetParameter(pDispObject, pImpSetGetParams) dispobjCtrlCmdIMPSetGetParameter_DISPATCH(pDispObject, pImpSetGetParams) +#define dispobjCtrlCmdGetRgStatus(pDispObject, pParams) dispobjCtrlCmdGetRgStatus_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgUnderflowProp(pDispObject, pParams) dispobjCtrlCmdGetRgUnderflowProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetRgUnderflowProp(pDispObject, pParams) dispobjCtrlCmdSetRgUnderflowProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgFliplockProp(pDispObject, pParams) dispobjCtrlCmdGetRgFliplockProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetRgFliplockProp(pDispObject, pParams) dispobjCtrlCmdSetRgFliplockProp_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgConnectedLockpin(pDispObject, pParams) dispobjCtrlCmdGetRgConnectedLockpin_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgConnectedLockpinStateless(pDispObject, pParams) dispobjCtrlCmdGetRgConnectedLockpinStateless_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetRgScanLine(pDispObject, pParams) dispobjCtrlCmdGetRgScanLine_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetSorSeqCtl(pDispObject, pParams) dispobjCtrlCmdGetSorSeqCtl_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetSorSeqCtl(pDispObject, pParams) dispobjCtrlCmdSetSorSeqCtl_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSeqProgSpeed(pDispObject, pParams) dispobjCtrlCmdSeqProgSpeed_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetSorPwm(pDispObject, pParams) dispobjCtrlCmdGetSorPwm_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetSorPwm(pDispObject, pParams) dispobjCtrlCmdSetSorPwm_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdGetSorOpMode(pDispObject, pParams) dispobjCtrlCmdGetSorOpMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetSorOpMode(pDispObject, pParams) dispobjCtrlCmdSetSorOpMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSetSorFlushMode(pDispObject, pParams) dispobjCtrlCmdSetSorFlushMode_DISPATCH(pDispObject, pParams) +#define dispobjCtrlCmdSystemGetCapsV2(pDispObject, pCapsParams) dispobjCtrlCmdSystemGetCapsV2_DISPATCH(pDispObject, pCapsParams) +#define dispobjCtrlCmdEventSetTrigger(pDispObject) dispobjCtrlCmdEventSetTrigger_DISPATCH(pDispObject) +#define dispobjShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispobjShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispobjControl(pDisplayApi, pCallContext, pParams) dispobjControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispobjAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispobjAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispobjGetMemInterMapParams(pRmResource, pParams) dispobjGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispobjGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispobjGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispobjCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispobjCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispobjSetNotificationShare(pNotifier, pNotifShare) dispobjSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispobjControlFilter(pResource, pCallContext, pParams) dispobjControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispobjGetRefCount(pResource) dispobjGetRefCount_DISPATCH(pResource) +#define dispobjUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispobjUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispobjUnmap(pResource, pCallContext, pCpuMapping) dispobjUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispobjControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispobjControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispobjCanCopy(pResource) dispobjCanCopy_DISPATCH(pResource) +#define dispobjMapTo(pResource, pParams) dispobjMapTo_DISPATCH(pResource, pParams) +#define dispobjAddAdditionalDependants(pClient, pResource, pReference) dispobjAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispobjPreDestruct(pResource) dispobjPreDestruct_DISPATCH(pResource) +#define dispobjUnmapFrom(pResource, pParams) dispobjUnmapFrom_DISPATCH(pResource, pParams) +#define dispobjGetNotificationListPtr(pNotifier) dispobjGetNotificationListPtr_DISPATCH(pNotifier) +#define dispobjControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispobjControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispobjGetNotificationShare(pNotifier) dispobjGetNotificationShare_DISPATCH(pNotifier) +#define dispobjControlLookup(pResource, pParams, ppEntry) dispobjControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispobjMap(pResource, pCallContext, pParams, pCpuMapping) dispobjMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispobjGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispobjGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS dispobjConstructHal_IMPL(struct DispObject *pDispObject, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_disp_objs_h_disabled +static inline NV_STATUS dispobjConstructHal(struct DispObject *pDispObject, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("DispObject was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_disp_objs_h_disabled +#define dispobjConstructHal(pDispObject, pCallContext, pParams) dispobjConstructHal_IMPL(pDispObject, pCallContext, pParams) +#endif //__nvoc_disp_objs_h_disabled + +#define dispobjConstructHal_HAL(pDispObject, pCallContext, pParams) dispobjConstructHal(pDispObject, pCallContext, pParams) + +NV_STATUS dispobjCtrlCmdGetPinsetCount_IMPL(struct DispObject *pDispObject, NV5070_CTRL_GET_PINSET_COUNT_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetPinsetCount_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_GET_PINSET_COUNT_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetPinsetCount__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetPinsetPeer_IMPL(struct DispObject *pDispObject, NV5070_CTRL_GET_PINSET_PEER_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetPinsetPeer_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_GET_PINSET_PEER_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetPinsetPeer__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetMempoolWARForBlitTearing_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetMempoolWARForBlitTearing_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SET_MEMPOOL_WAR_FOR_BLIT_TEARING_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetMempoolWARForBlitTearing__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetPinsetLockpins_IMPL(struct DispObject *pDispObject, NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetPinsetLockpins_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_GET_PINSET_LOCKPINS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetPinsetLockpins__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetFrameLockHeaderLockPins_IMPL(struct DispObject *pDispObject, NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetFrameLockHeaderLockPins_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_GET_FRAMELOCK_HEADER_LOCKPINS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetFrameLockHeaderLockPins__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetPiorSeqCtl_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetPiorSeqCtl_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetPiorSeqCtl__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetPiorSeqCtl_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetPiorSeqCtl_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetPiorSeqCtl__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetPiorOpMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetPiorOpMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_PIOR_OP_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetPiorOpMode__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetPiorOpMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetPiorOpMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_PIOR_OP_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetPiorOpMode__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdEventSetMemoryNotifies_IMPL(struct DispObject *pDispObject, NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams); + +static inline NV_STATUS dispobjCtrlCmdEventSetMemoryNotifies_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams) { + return pDispObject->__dispobjCtrlCmdEventSetMemoryNotifies__(pDispObject, pSetMemoryNotifiesParams); +} + +NV_STATUS dispobjCtrlCmdSetRmFreeFlags_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetRmFreeFlags_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SET_RMFREE_FLAGS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetRmFreeFlags__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdIMPSetGetParameter_IMPL(struct DispObject *pDispObject, NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS *pImpSetGetParams); + +static inline NV_STATUS dispobjCtrlCmdIMPSetGetParameter_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_IMP_SET_GET_PARAMETER_PARAMS *pImpSetGetParams) { + return pDispObject->__dispobjCtrlCmdIMPSetGetParameter__(pDispObject, pImpSetGetParams); +} + +NV_STATUS dispobjCtrlCmdGetRgStatus_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgStatus_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_STATUS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgStatus__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetRgUnderflowProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgUnderflowProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_UNDERFLOW_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgUnderflowProp__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetRgUnderflowProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetRgUnderflowProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_UNDERFLOW_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetRgUnderflowProp__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetRgFliplockProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgFliplockProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_FLIPLOCK_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgFliplockProp__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetRgFliplockProp_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetRgFliplockProp_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_RG_FLIPLOCK_PROP_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetRgFliplockProp__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetRgConnectedLockpin_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgConnectedLockpin_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_CONNECTED_LOCKPIN_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgConnectedLockpin__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetRgConnectedLockpinStateless_IMPL(struct DispObject *pDispObject, NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgConnectedLockpinStateless_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_GET_RG_CONNECTED_LOCKPIN_STATELESS_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgConnectedLockpinStateless__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetRgScanLine_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetRgScanLine_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_RG_SCAN_LINE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetRgScanLine__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetSorSeqCtl_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetSorSeqCtl_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetSorSeqCtl__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetSorSeqCtl_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetSorSeqCtl_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetSorSeqCtl__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSeqProgSpeed_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SEQ_PROG_SPEED_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSeqProgSpeed_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SEQ_PROG_SPEED_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSeqProgSpeed__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetSorPwm_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetSorPwm_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_PWM_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetSorPwm__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetSorPwm_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetSorPwm_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_PWM_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetSorPwm__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdGetSorOpMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdGetSorOpMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_GET_SOR_OP_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdGetSorOpMode__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetSorOpMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetSorOpMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_CMD_SET_SOR_OP_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetSorOpMode__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSetSorFlushMode_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS *pParams); + +static inline NV_STATUS dispobjCtrlCmdSetSorFlushMode_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SET_SOR_FLUSH_MODE_PARAMS *pParams) { + return pDispObject->__dispobjCtrlCmdSetSorFlushMode__(pDispObject, pParams); +} + +NV_STATUS dispobjCtrlCmdSystemGetCapsV2_IMPL(struct DispObject *pDispObject, NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams); + +static inline NV_STATUS dispobjCtrlCmdSystemGetCapsV2_DISPATCH(struct DispObject *pDispObject, NV5070_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams) { + return pDispObject->__dispobjCtrlCmdSystemGetCapsV2__(pDispObject, pCapsParams); +} + +NV_STATUS dispobjCtrlCmdEventSetTrigger_IMPL(struct DispObject *pDispObject); + +static inline NV_STATUS dispobjCtrlCmdEventSetTrigger_DISPATCH(struct DispObject *pDispObject) { + return pDispObject->__dispobjCtrlCmdEventSetTrigger__(pDispObject); +} + +static inline NvBool dispobjShareCallback_DISPATCH(struct DispObject *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__dispobjShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispobjControl_DISPATCH(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__dispobjControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NvBool dispobjAccessCallback_DISPATCH(struct DispObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispobjAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispobjGetMemInterMapParams_DISPATCH(struct DispObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispobjGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispobjGetMemoryMappingDescriptor_DISPATCH(struct DispObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispobjGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispobjCheckMemInterUnmap_DISPATCH(struct DispObject *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispobjCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline void dispobjSetNotificationShare_DISPATCH(struct DispObject *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispobjSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispobjControlFilter_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispobjControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 dispobjGetRefCount_DISPATCH(struct DispObject *pResource) { + return pResource->__dispobjGetRefCount__(pResource); +} + +static inline NV_STATUS dispobjUnregisterEvent_DISPATCH(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispobjUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispobjUnmap_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__dispobjUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispobjControl_Prologue_DISPATCH(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__dispobjControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispobjCanCopy_DISPATCH(struct DispObject *pResource) { + return pResource->__dispobjCanCopy__(pResource); +} + +static inline NV_STATUS dispobjMapTo_DISPATCH(struct DispObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispobjMapTo__(pResource, pParams); +} + +static inline void dispobjAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispObject *pResource, RsResourceRef *pReference) { + pResource->__dispobjAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void dispobjPreDestruct_DISPATCH(struct DispObject *pResource) { + pResource->__dispobjPreDestruct__(pResource); +} + +static inline NV_STATUS dispobjUnmapFrom_DISPATCH(struct DispObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispobjUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *dispobjGetNotificationListPtr_DISPATCH(struct DispObject *pNotifier) { + return pNotifier->__dispobjGetNotificationListPtr__(pNotifier); +} + +static inline void dispobjControl_Epilogue_DISPATCH(struct DispObject *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__dispobjControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline struct NotifShare *dispobjGetNotificationShare_DISPATCH(struct DispObject *pNotifier) { + return pNotifier->__dispobjGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispobjControlLookup_DISPATCH(struct DispObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispobjControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispobjMap_DISPATCH(struct DispObject *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__dispobjMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispobjGetOrAllocNotifShare_DISPATCH(struct DispObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispobjGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispobjConstruct_IMPL(struct DispObject *arg_pDispObject, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispobjConstruct(arg_pDispObject, arg_pCallContext, arg_pParams) dispobjConstruct_IMPL(arg_pDispObject, arg_pCallContext, arg_pParams) +NV_STATUS dispobjGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDispObject, struct DispObject **ppDispObject); +#define dispobjGetByHandle(pClient, hDispObject, ppDispObject) dispobjGetByHandle_IMPL(pClient, hDispObject, ppDispObject) +NV_STATUS dispobjGetByDevice_IMPL(struct RsClient *pClient, struct Device *pDevice, struct DispObject **ppDispObject); +#define dispobjGetByDevice(pClient, pDevice, ppDispObject) dispobjGetByDevice_IMPL(pClient, pDevice, ppDispObject) +void dispobjClearRmFreeFlags_IMPL(struct DispObject *pDispObject); +#ifdef __nvoc_disp_objs_h_disabled +static inline void dispobjClearRmFreeFlags(struct DispObject *pDispObject) { + NV_ASSERT_FAILED_PRECOMP("DispObject was disabled!"); +} +#else //__nvoc_disp_objs_h_disabled +#define dispobjClearRmFreeFlags(pDispObject) dispobjClearRmFreeFlags_IMPL(pDispObject) +#endif //__nvoc_disp_objs_h_disabled + +NvBool dispobjGetRmFreeFlags_IMPL(struct DispObject *pDispObject); +#ifdef __nvoc_disp_objs_h_disabled +static inline NvBool dispobjGetRmFreeFlags(struct DispObject *pDispObject) { + NV_ASSERT_FAILED_PRECOMP("DispObject was disabled!"); + return NV_FALSE; +} +#else //__nvoc_disp_objs_h_disabled +#define dispobjGetRmFreeFlags(pDispObject) dispobjGetRmFreeFlags_IMPL(pDispObject) +#endif //__nvoc_disp_objs_h_disabled + +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing NvDisp's XXX_DISPLAY (C370, C570...etc). Parent for + * all other display resources (channels, etc). Allocated under a device or subdevice. + * + * Only one instance of this class is allowed per-GPU. Multi-instance restrictions + * are enforced by resource_list.h + */ +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct NvDispApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct DispObject __nvoc_base_DispObject; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DisplayApi *__nvoc_pbase_DisplayApi; + struct DispObject *__nvoc_pbase_DispObject; + struct NvDispApi *__nvoc_pbase_NvDispApi; + NV_STATUS (*__nvdispapiCtrlCmdIdleChannel__)(struct NvDispApi *, NVC370_CTRL_IDLE_CHANNEL_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdSetAccl__)(struct NvDispApi *, NVC370_CTRL_SET_ACCL_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdGetAccl__)(struct NvDispApi *, NVC370_CTRL_GET_ACCL_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdGetChannelInfo__)(struct NvDispApi *, NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdSetSwaprdyGpioWar__)(struct NvDispApi *, NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdGetLockpinsCaps__)(struct NvDispApi *, NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS *); + NV_STATUS (*__nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides__)(struct NvDispApi *, NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS *); + NvBool (*__nvdispapiShareCallback__)(struct NvDispApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__nvdispapiControl__)(struct NvDispApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__nvdispapiAccessCallback__)(struct NvDispApi *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__nvdispapiGetMemInterMapParams__)(struct NvDispApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__nvdispapiGetMemoryMappingDescriptor__)(struct NvDispApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__nvdispapiCheckMemInterUnmap__)(struct NvDispApi *, NvBool); + void (*__nvdispapiSetNotificationShare__)(struct NvDispApi *, struct NotifShare *); + NV_STATUS (*__nvdispapiControlFilter__)(struct NvDispApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__nvdispapiGetRefCount__)(struct NvDispApi *); + NV_STATUS (*__nvdispapiUnregisterEvent__)(struct NvDispApi *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__nvdispapiUnmap__)(struct NvDispApi *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__nvdispapiControl_Prologue__)(struct NvDispApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__nvdispapiCanCopy__)(struct NvDispApi *); + NV_STATUS (*__nvdispapiMapTo__)(struct NvDispApi *, RS_RES_MAP_TO_PARAMS *); + void (*__nvdispapiAddAdditionalDependants__)(struct RsClient *, struct NvDispApi *, RsResourceRef *); + void (*__nvdispapiPreDestruct__)(struct NvDispApi *); + NV_STATUS (*__nvdispapiUnmapFrom__)(struct NvDispApi *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__nvdispapiGetNotificationListPtr__)(struct NvDispApi *); + void (*__nvdispapiControl_Epilogue__)(struct NvDispApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__nvdispapiGetNotificationShare__)(struct NvDispApi *); + NV_STATUS (*__nvdispapiControlLookup__)(struct NvDispApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__nvdispapiMap__)(struct NvDispApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__nvdispapiGetOrAllocNotifShare__)(struct NvDispApi *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_NvDispApi_TYPEDEF__ +#define __NVOC_CLASS_NvDispApi_TYPEDEF__ +typedef struct NvDispApi NvDispApi; +#endif /* __NVOC_CLASS_NvDispApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDispApi +#define __nvoc_class_id_NvDispApi 0x36aa0b +#endif /* __nvoc_class_id_NvDispApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvDispApi; + +#define __staticCast_NvDispApi(pThis) \ + ((pThis)->__nvoc_pbase_NvDispApi) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_NvDispApi(pThis) ((NvDispApi*)NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_NvDispApi(pThis) \ + ((NvDispApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NvDispApi))) +#endif //__nvoc_disp_objs_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_NvDispApi(NvDispApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NvDispApi(NvDispApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_NvDispApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_NvDispApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define nvdispapiCtrlCmdIdleChannel(pNvDispApi, pParams) nvdispapiCtrlCmdIdleChannel_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdSetAccl(pNvDispApi, pParams) nvdispapiCtrlCmdSetAccl_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdGetAccl(pNvDispApi, pParams) nvdispapiCtrlCmdGetAccl_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdGetChannelInfo(pNvDispApi, pParams) nvdispapiCtrlCmdGetChannelInfo_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdSetSwaprdyGpioWar(pNvDispApi, pParams) nvdispapiCtrlCmdSetSwaprdyGpioWar_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdGetLockpinsCaps(pNvDispApi, pParams) nvdispapiCtrlCmdGetLockpinsCaps_DISPATCH(pNvDispApi, pParams) +#define nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides(pNvDispApi, pParams) nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_DISPATCH(pNvDispApi, pParams) +#define nvdispapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) nvdispapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define nvdispapiControl(pDisplayApi, pCallContext, pParams) nvdispapiControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define nvdispapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) nvdispapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define nvdispapiGetMemInterMapParams(pRmResource, pParams) nvdispapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define nvdispapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) nvdispapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define nvdispapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) nvdispapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define nvdispapiSetNotificationShare(pNotifier, pNotifShare) nvdispapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define nvdispapiControlFilter(pResource, pCallContext, pParams) nvdispapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define nvdispapiGetRefCount(pResource) nvdispapiGetRefCount_DISPATCH(pResource) +#define nvdispapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) nvdispapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define nvdispapiUnmap(pResource, pCallContext, pCpuMapping) nvdispapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define nvdispapiControl_Prologue(pDisplayApi, pCallContext, pRsParams) nvdispapiControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define nvdispapiCanCopy(pResource) nvdispapiCanCopy_DISPATCH(pResource) +#define nvdispapiMapTo(pResource, pParams) nvdispapiMapTo_DISPATCH(pResource, pParams) +#define nvdispapiAddAdditionalDependants(pClient, pResource, pReference) nvdispapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define nvdispapiPreDestruct(pResource) nvdispapiPreDestruct_DISPATCH(pResource) +#define nvdispapiUnmapFrom(pResource, pParams) nvdispapiUnmapFrom_DISPATCH(pResource, pParams) +#define nvdispapiGetNotificationListPtr(pNotifier) nvdispapiGetNotificationListPtr_DISPATCH(pNotifier) +#define nvdispapiControl_Epilogue(pDisplayApi, pCallContext, pRsParams) nvdispapiControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define nvdispapiGetNotificationShare(pNotifier) nvdispapiGetNotificationShare_DISPATCH(pNotifier) +#define nvdispapiControlLookup(pResource, pParams, ppEntry) nvdispapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define nvdispapiMap(pResource, pCallContext, pParams, pCpuMapping) nvdispapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define nvdispapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) nvdispapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS nvdispapiCtrlCmdIdleChannel_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_IDLE_CHANNEL_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdIdleChannel_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_IDLE_CHANNEL_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdIdleChannel__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdSetAccl_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_ACCL_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdSetAccl_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_ACCL_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdSetAccl__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdGetAccl_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_ACCL_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdGetAccl_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_ACCL_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdGetAccl__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdGetChannelInfo_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdGetChannelInfo_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_GET_CHANNEL_INFO_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdGetChannelInfo__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdSetSwaprdyGpioWar_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdSetSwaprdyGpioWar_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_SET_SWAPRDY_GPIO_WAR_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdSetSwaprdyGpioWar__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdGetLockpinsCaps_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdGetLockpinsCaps_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_GET_LOCKPINS_CAPS_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdGetLockpinsCaps__(pNvDispApi, pParams); +} + +NV_STATUS nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_IMPL(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS *pParams); + +static inline NV_STATUS nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides_DISPATCH(struct NvDispApi *pNvDispApi, NVC370_CTRL_CMD_SET_FORCE_MODESWITCH_FLAGS_OVERRIDES_PARAMS *pParams) { + return pNvDispApi->__nvdispapiCtrlCmdSetForceModeswitchFlagsOverrides__(pNvDispApi, pParams); +} + +static inline NvBool nvdispapiShareCallback_DISPATCH(struct NvDispApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nvdispapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS nvdispapiControl_DISPATCH(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__nvdispapiControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NvBool nvdispapiAccessCallback_DISPATCH(struct NvDispApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvdispapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS nvdispapiGetMemInterMapParams_DISPATCH(struct NvDispApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvdispapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS nvdispapiGetMemoryMappingDescriptor_DISPATCH(struct NvDispApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvdispapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS nvdispapiCheckMemInterUnmap_DISPATCH(struct NvDispApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__nvdispapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline void nvdispapiSetNotificationShare_DISPATCH(struct NvDispApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvdispapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS nvdispapiControlFilter_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvdispapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 nvdispapiGetRefCount_DISPATCH(struct NvDispApi *pResource) { + return pResource->__nvdispapiGetRefCount__(pResource); +} + +static inline NV_STATUS nvdispapiUnregisterEvent_DISPATCH(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvdispapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS nvdispapiUnmap_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__nvdispapiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS nvdispapiControl_Prologue_DISPATCH(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__nvdispapiControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool nvdispapiCanCopy_DISPATCH(struct NvDispApi *pResource) { + return pResource->__nvdispapiCanCopy__(pResource); +} + +static inline NV_STATUS nvdispapiMapTo_DISPATCH(struct NvDispApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvdispapiMapTo__(pResource, pParams); +} + +static inline void nvdispapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct NvDispApi *pResource, RsResourceRef *pReference) { + pResource->__nvdispapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void nvdispapiPreDestruct_DISPATCH(struct NvDispApi *pResource) { + pResource->__nvdispapiPreDestruct__(pResource); +} + +static inline NV_STATUS nvdispapiUnmapFrom_DISPATCH(struct NvDispApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvdispapiUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *nvdispapiGetNotificationListPtr_DISPATCH(struct NvDispApi *pNotifier) { + return pNotifier->__nvdispapiGetNotificationListPtr__(pNotifier); +} + +static inline void nvdispapiControl_Epilogue_DISPATCH(struct NvDispApi *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__nvdispapiControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline struct NotifShare *nvdispapiGetNotificationShare_DISPATCH(struct NvDispApi *pNotifier) { + return pNotifier->__nvdispapiGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS nvdispapiControlLookup_DISPATCH(struct NvDispApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__nvdispapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS nvdispapiMap_DISPATCH(struct NvDispApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__nvdispapiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS nvdispapiGetOrAllocNotifShare_DISPATCH(struct NvDispApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvdispapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS nvdispapiConstruct_IMPL(struct NvDispApi *arg_pNvdispApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_nvdispapiConstruct(arg_pNvdispApi, arg_pCallContext, arg_pParams) nvdispapiConstruct_IMPL(arg_pNvdispApi, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_DISPLAY_SW + * + * With NvDisplay, we have divided classes into HW & SW classes. HW class provides + * interface for register/methods. SW class provides rmctrls. Clients can use + * multiple SW classes on a chip, but only one HW class. NVC372_DISPLAY_SW is SW + * class of NvDisplay family chips. + * + * Multi-instance restrictions are enforced by resource_list.h + */ +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispSwObj { + const struct NVOC_RTTI *__nvoc_rtti; + struct DisplayApi __nvoc_base_DisplayApi; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DisplayApi *__nvoc_pbase_DisplayApi; + struct DispSwObj *__nvoc_pbase_DispSwObj; + NV_STATUS (*__dispswobjCtrlCmdIsModePossible__)(struct DispSwObj *, NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *); + NV_STATUS (*__dispswobjCtrlCmdIsModePossibleOrSettings__)(struct DispSwObj *, NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *); + NV_STATUS (*__dispswobjCtrlCmdVideoAdaptiveRefreshRate__)(struct DispSwObj *, NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *); + NV_STATUS (*__dispswobjCtrlCmdGetActiveViewportPointIn__)(struct DispSwObj *, NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *); + NvBool (*__dispswobjShareCallback__)(struct DispSwObj *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispswobjControl__)(struct DispSwObj *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispswobjAccessCallback__)(struct DispSwObj *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__dispswobjGetMemInterMapParams__)(struct DispSwObj *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispswobjGetMemoryMappingDescriptor__)(struct DispSwObj *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispswobjCheckMemInterUnmap__)(struct DispSwObj *, NvBool); + void (*__dispswobjSetNotificationShare__)(struct DispSwObj *, struct NotifShare *); + NV_STATUS (*__dispswobjControlFilter__)(struct DispSwObj *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__dispswobjGetRefCount__)(struct DispSwObj *); + NV_STATUS (*__dispswobjUnregisterEvent__)(struct DispSwObj *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__dispswobjUnmap__)(struct DispSwObj *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__dispswobjControl_Prologue__)(struct DispSwObj *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispswobjCanCopy__)(struct DispSwObj *); + NV_STATUS (*__dispswobjMapTo__)(struct DispSwObj *, RS_RES_MAP_TO_PARAMS *); + void (*__dispswobjAddAdditionalDependants__)(struct RsClient *, struct DispSwObj *, RsResourceRef *); + void (*__dispswobjPreDestruct__)(struct DispSwObj *); + NV_STATUS (*__dispswobjUnmapFrom__)(struct DispSwObj *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__dispswobjGetNotificationListPtr__)(struct DispSwObj *); + void (*__dispswobjControl_Epilogue__)(struct DispSwObj *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__dispswobjGetNotificationShare__)(struct DispSwObj *); + NV_STATUS (*__dispswobjControlLookup__)(struct DispSwObj *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispswobjMap__)(struct DispSwObj *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__dispswobjGetOrAllocNotifShare__)(struct DispSwObj *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_DispSwObj_TYPEDEF__ +#define __NVOC_CLASS_DispSwObj_TYPEDEF__ +typedef struct DispSwObj DispSwObj; +#endif /* __NVOC_CLASS_DispSwObj_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSwObj +#define __nvoc_class_id_DispSwObj 0x6aa5e2 +#endif /* __nvoc_class_id_DispSwObj */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObj; + +#define __staticCast_DispSwObj(pThis) \ + ((pThis)->__nvoc_pbase_DispSwObj) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DispSwObj(pThis) ((DispSwObj*)NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DispSwObj(pThis) \ + ((DispSwObj*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispSwObj))) +#endif //__nvoc_disp_objs_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispSwObj(DispSwObj**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispSwObj(DispSwObj**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispSwObj(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispSwObj((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispswobjCtrlCmdIsModePossible(pDispSwObj, pParams) dispswobjCtrlCmdIsModePossible_DISPATCH(pDispSwObj, pParams) +#define dispswobjCtrlCmdIsModePossibleOrSettings(pDispSwObj, pParams) dispswobjCtrlCmdIsModePossibleOrSettings_DISPATCH(pDispSwObj, pParams) +#define dispswobjCtrlCmdVideoAdaptiveRefreshRate(pDispSwObj, pParams) dispswobjCtrlCmdVideoAdaptiveRefreshRate_DISPATCH(pDispSwObj, pParams) +#define dispswobjCtrlCmdGetActiveViewportPointIn(pDispSwObj, pParams) dispswobjCtrlCmdGetActiveViewportPointIn_DISPATCH(pDispSwObj, pParams) +#define dispswobjShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispswobjShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispswobjControl(pDisplayApi, pCallContext, pParams) dispswobjControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispswobjAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispswobjAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispswobjGetMemInterMapParams(pRmResource, pParams) dispswobjGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispswobjGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispswobjGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispswobjCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispswobjCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispswobjSetNotificationShare(pNotifier, pNotifShare) dispswobjSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispswobjControlFilter(pResource, pCallContext, pParams) dispswobjControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispswobjGetRefCount(pResource) dispswobjGetRefCount_DISPATCH(pResource) +#define dispswobjUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispswobjUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispswobjUnmap(pResource, pCallContext, pCpuMapping) dispswobjUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispswobjControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispswobjControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispswobjCanCopy(pResource) dispswobjCanCopy_DISPATCH(pResource) +#define dispswobjMapTo(pResource, pParams) dispswobjMapTo_DISPATCH(pResource, pParams) +#define dispswobjAddAdditionalDependants(pClient, pResource, pReference) dispswobjAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispswobjPreDestruct(pResource) dispswobjPreDestruct_DISPATCH(pResource) +#define dispswobjUnmapFrom(pResource, pParams) dispswobjUnmapFrom_DISPATCH(pResource, pParams) +#define dispswobjGetNotificationListPtr(pNotifier) dispswobjGetNotificationListPtr_DISPATCH(pNotifier) +#define dispswobjControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispswobjControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispswobjGetNotificationShare(pNotifier) dispswobjGetNotificationShare_DISPATCH(pNotifier) +#define dispswobjControlLookup(pResource, pParams, ppEntry) dispswobjControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispswobjMap(pResource, pCallContext, pParams, pCpuMapping) dispswobjMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispswobjGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispswobjGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS dispswobjCtrlCmdIsModePossible_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pParams); + +static inline NV_STATUS dispswobjCtrlCmdIsModePossible_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdIsModePossible__(pDispSwObj, pParams); +} + +NV_STATUS dispswobjCtrlCmdIsModePossibleOrSettings_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *pParams); + +static inline NV_STATUS dispswobjCtrlCmdIsModePossibleOrSettings_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_IS_MODE_POSSIBLE_OR_SETTINGS_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdIsModePossibleOrSettings__(pDispSwObj, pParams); +} + +NV_STATUS dispswobjCtrlCmdVideoAdaptiveRefreshRate_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *pParams); + +static inline NV_STATUS dispswobjCtrlCmdVideoAdaptiveRefreshRate_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_VIDEO_ADAPTIVE_REFRESH_RATE_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdVideoAdaptiveRefreshRate__(pDispSwObj, pParams); +} + +NV_STATUS dispswobjCtrlCmdGetActiveViewportPointIn_IMPL(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *pParams); + +static inline NV_STATUS dispswobjCtrlCmdGetActiveViewportPointIn_DISPATCH(struct DispSwObj *pDispSwObj, NVC372_CTRL_CMD_GET_ACTIVE_VIEWPORT_POINT_IN_PARAMS *pParams) { + return pDispSwObj->__dispswobjCtrlCmdGetActiveViewportPointIn__(pDispSwObj, pParams); +} + +static inline NvBool dispswobjShareCallback_DISPATCH(struct DispSwObj *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__dispswobjShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispswobjControl_DISPATCH(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__dispswobjControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NvBool dispswobjAccessCallback_DISPATCH(struct DispSwObj *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispswobjAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispswobjGetMemInterMapParams_DISPATCH(struct DispSwObj *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispswobjGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispswobjGetMemoryMappingDescriptor_DISPATCH(struct DispSwObj *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispswobjGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispswobjCheckMemInterUnmap_DISPATCH(struct DispSwObj *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispswobjCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline void dispswobjSetNotificationShare_DISPATCH(struct DispSwObj *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispswobjSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispswobjControlFilter_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispswobjControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 dispswobjGetRefCount_DISPATCH(struct DispSwObj *pResource) { + return pResource->__dispswobjGetRefCount__(pResource); +} + +static inline NV_STATUS dispswobjUnregisterEvent_DISPATCH(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispswobjUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispswobjUnmap_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__dispswobjUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispswobjControl_Prologue_DISPATCH(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__dispswobjControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispswobjCanCopy_DISPATCH(struct DispSwObj *pResource) { + return pResource->__dispswobjCanCopy__(pResource); +} + +static inline NV_STATUS dispswobjMapTo_DISPATCH(struct DispSwObj *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispswobjMapTo__(pResource, pParams); +} + +static inline void dispswobjAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispSwObj *pResource, RsResourceRef *pReference) { + pResource->__dispswobjAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void dispswobjPreDestruct_DISPATCH(struct DispSwObj *pResource) { + pResource->__dispswobjPreDestruct__(pResource); +} + +static inline NV_STATUS dispswobjUnmapFrom_DISPATCH(struct DispSwObj *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispswobjUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *dispswobjGetNotificationListPtr_DISPATCH(struct DispSwObj *pNotifier) { + return pNotifier->__dispswobjGetNotificationListPtr__(pNotifier); +} + +static inline void dispswobjControl_Epilogue_DISPATCH(struct DispSwObj *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__dispswobjControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline struct NotifShare *dispswobjGetNotificationShare_DISPATCH(struct DispSwObj *pNotifier) { + return pNotifier->__dispswobjGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispswobjControlLookup_DISPATCH(struct DispSwObj *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispswobjControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispswobjMap_DISPATCH(struct DispSwObj *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__dispswobjMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispswobjGetOrAllocNotifShare_DISPATCH(struct DispSwObj *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispswobjGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispswobjConstruct_IMPL(struct DispSwObj *arg_pDispSwObj, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispswobjConstruct(arg_pDispSwObj, arg_pCallContext, arg_pParams) dispswobjConstruct_IMPL(arg_pDispSwObj, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +/*! + * RM internal class representing XXX_DISPLAY_COMMON (class id: 0x0073) + * + * Only one instance of this class is allowed per-GPU. Multi-instance restrictions + * are enforced by resource_list.h + */ +#ifdef NVOC_DISP_OBJS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispCommon { + const struct NVOC_RTTI *__nvoc_rtti; + struct DisplayApi __nvoc_base_DisplayApi; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DisplayApi *__nvoc_pbase_DisplayApi; + struct DispCommon *__nvoc_pbase_DispCommon; + NV_STATUS (*__dispcmnCtrlCmdSystemGetVblankCounter__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetVblankEnable__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetInternalDisplays__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpEnableVrr__)(struct DispCommon *, NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdClearELVBlock__)(struct DispCommon *, NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificDisplayChange__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetSpreadSpectrum__)(struct DispCommon *, NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetLcdGpioPinNum__)(struct DispCommon *, NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetAudioMuteStream__)(struct DispCommon *, NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpAuxchI2cTransferCtrl__)(struct DispCommon *, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpASSRCtrl__)(struct DispCommon *, NV0073_CTRL_DP_ASSR_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetEcf__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_ECF_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpSwitchDispMux__)(struct DispCommon *, NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpRunPreDispMuxOperations__)(struct DispCommon *, NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpRunPostDispMuxOperations__)(struct DispCommon *, NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetDispMuxStatus__)(struct DispCommon *, NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpRecordChannelRegisters__)(struct DispCommon *, NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpInternalLcdOverdrive__)(struct DispCommon *, NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetCapsV2__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetNumHeads__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetScanline__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetSuppported__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetConnectState__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetHotplugUnplugState__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdInternalGetHotplugUnplugState__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetHeadRoutingMap__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetActive__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetBootDisplays__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemQueryDisplayIdsWithMux__)(struct DispCommon *, NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemCheckSidebandI2cSupport__)(struct DispCommon *, NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemAllocateDisplayBandwidth__)(struct DispCommon *, NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetHotplugConfig__)(struct DispCommon *, NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemGetHotplugEventConfig__)(struct DispCommon *, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemSetHotplugEventConfig__)(struct DispCommon *, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemArmLightweightSupervisor__)(struct DispCommon *, NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSystemConfigVrrPstateSwitch__)(struct DispCommon *, NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetType__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetEdidV2__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetEdidV2__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificFakeDevice__)(struct DispCommon *, NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetConnectorData__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiEnable__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificCtrlHdmi__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetAllHeadMask__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetOdPacket__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificAcquireSharedGenericPacket__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetSharedGenericPacket__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificReleaseSharedGenericPacket__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetOdPacketCtrl__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificOrGetInfo__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetPclkLimit__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiSinkCaps__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetMonitorPower__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificApplyEdidOverrideV2__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetI2cPortid__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetHdmiGpuCaps__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificGetHdmiScdcData__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificIsDirectmodeDisplay__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation__)(struct DispCommon *, NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetInfo__)(struct DispCommon *, NV0073_CTRL_DFP_GET_INFO_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetDisplayportDongleInfo__)(struct DispCommon *, NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpSetEldAudioCaps__)(struct DispCommon *, NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpSetAudioEnable__)(struct DispCommon *, NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpUpdateDynamicDfpCache__)(struct DispCommon *, NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpAssignSor__)(struct DispCommon *, NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpDscCrcControl__)(struct DispCommon *, NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpInitMuxData__)(struct DispCommon *, NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetDsiModeTiming__)(struct DispCommon *, NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpConfigTwoHeadOneOr__)(struct DispCommon *, NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDfpGetPadlinkMask__)(struct DispCommon *, NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpAuxchCtrl__)(struct DispCommon *, NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpAuxchSetSema__)(struct DispCommon *, NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpCtrl__)(struct DispCommon *, NV0073_CTRL_DP_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetLaneData__)(struct DispCommon *, NV0073_CTRL_DP_LANE_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetLaneData__)(struct DispCommon *, NV0073_CTRL_DP_LANE_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetTestpattern__)(struct DispCommon *, NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpMainLinkCtrl__)(struct DispCommon *, NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetAudioMuteStream__)(struct DispCommon *, NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetLinkConfig__)(struct DispCommon *, NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetEDPData__)(struct DispCommon *, NV0073_CTRL_DP_GET_EDP_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpTopologyAllocateDisplayId__)(struct DispCommon *, NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpTopologyFreeDisplayId__)(struct DispCommon *, NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigStream__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigSingleHeadMultiStream__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetRateGov__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSendACT__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetManualDisplayPort__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetCaps__)(struct DispCommon *, NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetMSAProperties__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetStereoMSAProperties__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGenerateFakeInterrupt__)(struct DispCommon *, NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigRadScratchReg__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetTriggerSelect__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetTriggerAll__)(struct DispCommon *, NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetAuxLogData__)(struct DispCommon *, NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigIndexedLinkRates__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigureFec__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetGenericInfoframe__)(struct DispCommon *, NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetMsaAttributes__)(struct DispCommon *, NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpConfigMacroPad__)(struct DispCommon *, NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data__)(struct DispCommon *, NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *); + NV_STATUS (*__dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data__)(struct DispCommon *, NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *); + NvBool (*__dispcmnShareCallback__)(struct DispCommon *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispcmnControl__)(struct DispCommon *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispcmnAccessCallback__)(struct DispCommon *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__dispcmnGetMemInterMapParams__)(struct DispCommon *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispcmnGetMemoryMappingDescriptor__)(struct DispCommon *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispcmnCheckMemInterUnmap__)(struct DispCommon *, NvBool); + void (*__dispcmnSetNotificationShare__)(struct DispCommon *, struct NotifShare *); + NV_STATUS (*__dispcmnControlFilter__)(struct DispCommon *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__dispcmnGetRefCount__)(struct DispCommon *); + NV_STATUS (*__dispcmnUnregisterEvent__)(struct DispCommon *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__dispcmnUnmap__)(struct DispCommon *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__dispcmnControl_Prologue__)(struct DispCommon *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispcmnCanCopy__)(struct DispCommon *); + NV_STATUS (*__dispcmnMapTo__)(struct DispCommon *, RS_RES_MAP_TO_PARAMS *); + void (*__dispcmnAddAdditionalDependants__)(struct RsClient *, struct DispCommon *, RsResourceRef *); + void (*__dispcmnPreDestruct__)(struct DispCommon *); + NV_STATUS (*__dispcmnUnmapFrom__)(struct DispCommon *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__dispcmnGetNotificationListPtr__)(struct DispCommon *); + void (*__dispcmnControl_Epilogue__)(struct DispCommon *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__dispcmnGetNotificationShare__)(struct DispCommon *); + NV_STATUS (*__dispcmnControlLookup__)(struct DispCommon *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispcmnMap__)(struct DispCommon *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__dispcmnGetOrAllocNotifShare__)(struct DispCommon *, NvHandle, NvHandle, struct NotifShare **); + NvU32 hotPlugMaskToBeReported; + NvU32 hotUnplugMaskToBeReported; +}; + +#ifndef __NVOC_CLASS_DispCommon_TYPEDEF__ +#define __NVOC_CLASS_DispCommon_TYPEDEF__ +typedef struct DispCommon DispCommon; +#endif /* __NVOC_CLASS_DispCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCommon +#define __nvoc_class_id_DispCommon 0x41f4f2 +#endif /* __nvoc_class_id_DispCommon */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCommon; + +#define __staticCast_DispCommon(pThis) \ + ((pThis)->__nvoc_pbase_DispCommon) + +#ifdef __nvoc_disp_objs_h_disabled +#define __dynamicCast_DispCommon(pThis) ((DispCommon*)NULL) +#else //__nvoc_disp_objs_h_disabled +#define __dynamicCast_DispCommon(pThis) \ + ((DispCommon*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispCommon))) +#endif //__nvoc_disp_objs_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispCommon(DispCommon**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispCommon(DispCommon**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispCommon(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispCommon((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispcmnCtrlCmdSystemGetVblankCounter(pDispCommon, pVBCounterParams) dispcmnCtrlCmdSystemGetVblankCounter_DISPATCH(pDispCommon, pVBCounterParams) +#define dispcmnCtrlCmdSystemGetVblankEnable(pDispCommon, pVBEnableParams) dispcmnCtrlCmdSystemGetVblankEnable_DISPATCH(pDispCommon, pVBEnableParams) +#define dispcmnCtrlCmdSystemGetInternalDisplays(pDispCommon, pInternalDisplaysParams) dispcmnCtrlCmdSystemGetInternalDisplays_DISPATCH(pDispCommon, pInternalDisplaysParams) +#define dispcmnCtrlCmdDpEnableVrr(pDispCommon, pParams) dispcmnCtrlCmdDpEnableVrr_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdClearELVBlock(pDispCommon, pParams) dispcmnCtrlCmdClearELVBlock_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificDisplayChange(pDispCommon, pParams) dispcmnCtrlCmdSpecificDisplayChange_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetSpreadSpectrum(pDispCommon, pParams) dispcmnCtrlCmdDfpGetSpreadSpectrum_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetLcdGpioPinNum(pDispCommon, pParams) dispcmnCtrlCmdDfpGetLcdGpioPinNum_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetAudioMuteStream(pDispCommon, pParams) dispcmnCtrlCmdDpGetAudioMuteStream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpAuxchI2cTransferCtrl(pDispCommon, pParams) dispcmnCtrlCmdDpAuxchI2cTransferCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpASSRCtrl(pDispCommon, pParams) dispcmnCtrlCmdDpASSRCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetEcf(pDispCommon, pCtrlEcfParams) dispcmnCtrlCmdDpSetEcf_DISPATCH(pDispCommon, pCtrlEcfParams) +#define dispcmnCtrlCmdDfpSwitchDispMux(pDispCommon, pParams) dispcmnCtrlCmdDfpSwitchDispMux_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpRunPreDispMuxOperations(pDispCommon, pParams) dispcmnCtrlCmdDfpRunPreDispMuxOperations_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpRunPostDispMuxOperations(pDispCommon, pParams) dispcmnCtrlCmdDfpRunPostDispMuxOperations_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetDispMuxStatus(pDispCommon, pParams) dispcmnCtrlCmdDfpGetDispMuxStatus_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpRecordChannelRegisters(pDispCommon, pParams) dispcmnCtrlCmdDfpRecordChannelRegisters_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpInternalLcdOverdrive(pDispCommon, pParams) dispcmnCtrlCmdDfpInternalLcdOverdrive_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemGetCapsV2(pDispCommon, pCapsParams) dispcmnCtrlCmdSystemGetCapsV2_DISPATCH(pDispCommon, pCapsParams) +#define dispcmnCtrlCmdSystemGetNumHeads(pDispCommon, pNumHeadsParams) dispcmnCtrlCmdSystemGetNumHeads_DISPATCH(pDispCommon, pNumHeadsParams) +#define dispcmnCtrlCmdSystemGetScanline(pDispCommon, pScanlineParams) dispcmnCtrlCmdSystemGetScanline_DISPATCH(pDispCommon, pScanlineParams) +#define dispcmnCtrlCmdSystemGetSuppported(pDispCommon, pSupportedParams) dispcmnCtrlCmdSystemGetSuppported_DISPATCH(pDispCommon, pSupportedParams) +#define dispcmnCtrlCmdSystemGetConnectState(pDispCommon, pConnectParams) dispcmnCtrlCmdSystemGetConnectState_DISPATCH(pDispCommon, pConnectParams) +#define dispcmnCtrlCmdSystemGetHotplugUnplugState(pDispCommon, pHotplugParams) dispcmnCtrlCmdSystemGetHotplugUnplugState_DISPATCH(pDispCommon, pHotplugParams) +#define dispcmnCtrlCmdInternalGetHotplugUnplugState(pDispCommon, pHotplugParams) dispcmnCtrlCmdInternalGetHotplugUnplugState_DISPATCH(pDispCommon, pHotplugParams) +#define dispcmnCtrlCmdSystemGetHeadRoutingMap(pDispCommon, pMapParams) dispcmnCtrlCmdSystemGetHeadRoutingMap_DISPATCH(pDispCommon, pMapParams) +#define dispcmnCtrlCmdSystemGetActive(pDispCommon, pActiveParams) dispcmnCtrlCmdSystemGetActive_DISPATCH(pDispCommon, pActiveParams) +#define dispcmnCtrlCmdSystemGetBootDisplays(pDispCommon, pParams) dispcmnCtrlCmdSystemGetBootDisplays_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemQueryDisplayIdsWithMux(pDispCommon, pParams) dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemCheckSidebandI2cSupport(pDispCommon, pParams) dispcmnCtrlCmdSystemCheckSidebandI2cSupport_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemAllocateDisplayBandwidth(pDispCommon, pParams) dispcmnCtrlCmdSystemAllocateDisplayBandwidth_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemGetHotplugConfig(pDispCommon, pHotplugParams) dispcmnCtrlCmdSystemGetHotplugConfig_DISPATCH(pDispCommon, pHotplugParams) +#define dispcmnCtrlCmdSystemGetHotplugEventConfig(pDispCommon, pParams) dispcmnCtrlCmdSystemGetHotplugEventConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemSetHotplugEventConfig(pDispCommon, pParams) dispcmnCtrlCmdSystemSetHotplugEventConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemArmLightweightSupervisor(pDispCommon, pParams) dispcmnCtrlCmdSystemArmLightweightSupervisor_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSystemConfigVrrPstateSwitch(pDispCommon, pParams) dispcmnCtrlCmdSystemConfigVrrPstateSwitch_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetType(pDispCommon, pDisplayTypeParams) dispcmnCtrlCmdSpecificGetType_DISPATCH(pDispCommon, pDisplayTypeParams) +#define dispcmnCtrlCmdSpecificGetEdidV2(pDispCommon, pEdidParams) dispcmnCtrlCmdSpecificGetEdidV2_DISPATCH(pDispCommon, pEdidParams) +#define dispcmnCtrlCmdSpecificSetEdidV2(pDispCommon, pEdidParams) dispcmnCtrlCmdSpecificSetEdidV2_DISPATCH(pDispCommon, pEdidParams) +#define dispcmnCtrlCmdSpecificFakeDevice(pDispCommon, pTestParams) dispcmnCtrlCmdSpecificFakeDevice_DISPATCH(pDispCommon, pTestParams) +#define dispcmnCtrlCmdSpecificGetConnectorData(pDispCommon, pConnectorParams) dispcmnCtrlCmdSpecificGetConnectorData_DISPATCH(pDispCommon, pConnectorParams) +#define dispcmnCtrlCmdSpecificSetHdmiEnable(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiEnable_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificCtrlHdmi(pDispCommon, pParams) dispcmnCtrlCmdSpecificCtrlHdmi_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetAllHeadMask(pDispCommon, pAllHeadMaskParams) dispcmnCtrlCmdSpecificGetAllHeadMask_DISPATCH(pDispCommon, pAllHeadMaskParams) +#define dispcmnCtrlCmdSpecificSetOdPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetOdPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificAcquireSharedGenericPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetSharedGenericPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetSharedGenericPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificReleaseSharedGenericPacket(pDispCommon, pParams) dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetOdPacketCtrl(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetOdPacketCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificOrGetInfo(pDispCommon, pParams) dispcmnCtrlCmdSpecificOrGetInfo_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetPclkLimit(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetPclkLimit_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetHdmiSinkCaps(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiSinkCaps_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetMonitorPower(pDispCommon, setMonitorPowerParams) dispcmnCtrlCmdSpecificSetMonitorPower_DISPATCH(pDispCommon, setMonitorPowerParams) +#define dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificApplyEdidOverrideV2(pDispCommon, pEdidOverrideParams) dispcmnCtrlCmdSpecificApplyEdidOverrideV2_DISPATCH(pDispCommon, pEdidOverrideParams) +#define dispcmnCtrlCmdSpecificGetI2cPortid(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetI2cPortid_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetHdmiGpuCaps(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetHdmiGpuCaps_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificGetHdmiScdcData(pDispCommon, pParams) dispcmnCtrlCmdSpecificGetHdmiScdcData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificIsDirectmodeDisplay(pDispCommon, pParams) dispcmnCtrlCmdSpecificIsDirectmodeDisplay_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation(pDispCommon, pParams) dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetInfo(pDispCommon, pParams) dispcmnCtrlCmdDfpGetInfo_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetDisplayportDongleInfo(pDispCommon, pParams) dispcmnCtrlCmdDfpGetDisplayportDongleInfo_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpSetEldAudioCaps(pDispCommon, pEldAudioCapsParams) dispcmnCtrlCmdDfpSetEldAudioCaps_DISPATCH(pDispCommon, pEldAudioCapsParams) +#define dispcmnCtrlCmdDfpSetAudioEnable(pDispCommon, pParams) dispcmnCtrlCmdDfpSetAudioEnable_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpUpdateDynamicDfpCache(pDispCommon, pParams) dispcmnCtrlCmdDfpUpdateDynamicDfpCache_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpAssignSor(pDispCommon, pParams) dispcmnCtrlCmdDfpAssignSor_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpDscCrcControl(pDispCommon, pParams) dispcmnCtrlCmdDfpDscCrcControl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpInitMuxData(pDispCommon, pParams) dispcmnCtrlCmdDfpInitMuxData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetDsiModeTiming(pDispCommon, pParams) dispcmnCtrlCmdDfpGetDsiModeTiming_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpConfigTwoHeadOneOr(pDispCommon, pParams) dispcmnCtrlCmdDfpConfigTwoHeadOneOr_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDfpGetPadlinkMask(pDispCommon, pParams) dispcmnCtrlCmdDfpGetPadlinkMask_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpAuxchCtrl(pDispCommon, pAuxchCtrlParams) dispcmnCtrlCmdDpAuxchCtrl_DISPATCH(pDispCommon, pAuxchCtrlParams) +#define dispcmnCtrlCmdDpAuxchSetSema(pDispCommon, pSemaParams) dispcmnCtrlCmdDpAuxchSetSema_DISPATCH(pDispCommon, pSemaParams) +#define dispcmnCtrlCmdDpCtrl(pDispCommon, pParams) dispcmnCtrlCmdDpCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetLaneData(pDispCommon, pParams) dispcmnCtrlCmdDpGetLaneData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetLaneData(pDispCommon, pParams) dispcmnCtrlCmdDpSetLaneData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetTestpattern(pDispCommon, pParams) dispcmnCtrlCmdDpSetTestpattern_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpMainLinkCtrl(pDispCommon, pParams) dispcmnCtrlCmdDpMainLinkCtrl_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetAudioMuteStream(pDispCommon, pParams) dispcmnCtrlCmdDpSetAudioMuteStream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetLinkConfig(pDispCommon, pParams) dispcmnCtrlCmdDpGetLinkConfig_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetEDPData(pDispCommon, pParams) dispcmnCtrlCmdDpGetEDPData_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpTopologyAllocateDisplayId(pDispCommon, pParams) dispcmnCtrlCmdDpTopologyAllocateDisplayId_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpTopologyFreeDisplayId(pDispCommon, pParams) dispcmnCtrlCmdDpTopologyFreeDisplayId_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigStream(pDispCommon, pParams) dispcmnCtrlCmdDpConfigStream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigSingleHeadMultiStream(pDispCommon, pParams) dispcmnCtrlCmdDpConfigSingleHeadMultiStream_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetRateGov(pDispCommon, pParams) dispcmnCtrlCmdDpSetRateGov_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSendACT(pDispCommon, pParams) dispcmnCtrlCmdDpSendACT_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetManualDisplayPort(pDispCommon, pParams) dispcmnCtrlCmdDpSetManualDisplayPort_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetCaps(pDispCommon, pParams) dispcmnCtrlCmdDpGetCaps_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetMSAProperties(pDispCommon, pParams) dispcmnCtrlCmdDpSetMSAProperties_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetStereoMSAProperties(pDispCommon, pParams) dispcmnCtrlCmdDpSetStereoMSAProperties_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGenerateFakeInterrupt(pDispCommon, pParams) dispcmnCtrlCmdDpGenerateFakeInterrupt_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigRadScratchReg(pDispCommon, pParams) dispcmnCtrlCmdDpConfigRadScratchReg_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetTriggerSelect(pDispCommon, pTriggerSelectParams) dispcmnCtrlCmdDpSetTriggerSelect_DISPATCH(pDispCommon, pTriggerSelectParams) +#define dispcmnCtrlCmdDpSetTriggerAll(pDispCommon, pTriggerAllParams) dispcmnCtrlCmdDpSetTriggerAll_DISPATCH(pDispCommon, pTriggerAllParams) +#define dispcmnCtrlCmdDpGetAuxLogData(pDispCommon, pDpAuxBufferWrapper) dispcmnCtrlCmdDpGetAuxLogData_DISPATCH(pDispCommon, pDpAuxBufferWrapper) +#define dispcmnCtrlCmdDpConfigIndexedLinkRates(pDispCommon, pParams) dispcmnCtrlCmdDpConfigIndexedLinkRates_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigureFec(pDispCommon, pParams) dispcmnCtrlCmdDpConfigureFec_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetGenericInfoframe(pDispCommon, pParams) dispcmnCtrlCmdDpGetGenericInfoframe_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetMsaAttributes(pDispCommon, pParams) dispcmnCtrlCmdDpGetMsaAttributes_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpConfigMacroPad(pDispCommon, pParams) dispcmnCtrlCmdDpConfigMacroPad_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data(pDispCommon, pParams) dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(pDispCommon, pParams) +#define dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data(pDispCommon, pParams) dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(pDispCommon, pParams) +#define dispcmnShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) dispcmnShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispcmnControl(pDisplayApi, pCallContext, pParams) dispcmnControl_DISPATCH(pDisplayApi, pCallContext, pParams) +#define dispcmnAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispcmnAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispcmnGetMemInterMapParams(pRmResource, pParams) dispcmnGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispcmnGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispcmnGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispcmnCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispcmnCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispcmnSetNotificationShare(pNotifier, pNotifShare) dispcmnSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispcmnControlFilter(pResource, pCallContext, pParams) dispcmnControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispcmnGetRefCount(pResource) dispcmnGetRefCount_DISPATCH(pResource) +#define dispcmnUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispcmnUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispcmnUnmap(pResource, pCallContext, pCpuMapping) dispcmnUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define dispcmnControl_Prologue(pDisplayApi, pCallContext, pRsParams) dispcmnControl_Prologue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispcmnCanCopy(pResource) dispcmnCanCopy_DISPATCH(pResource) +#define dispcmnMapTo(pResource, pParams) dispcmnMapTo_DISPATCH(pResource, pParams) +#define dispcmnAddAdditionalDependants(pClient, pResource, pReference) dispcmnAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispcmnPreDestruct(pResource) dispcmnPreDestruct_DISPATCH(pResource) +#define dispcmnUnmapFrom(pResource, pParams) dispcmnUnmapFrom_DISPATCH(pResource, pParams) +#define dispcmnGetNotificationListPtr(pNotifier) dispcmnGetNotificationListPtr_DISPATCH(pNotifier) +#define dispcmnControl_Epilogue(pDisplayApi, pCallContext, pRsParams) dispcmnControl_Epilogue_DISPATCH(pDisplayApi, pCallContext, pRsParams) +#define dispcmnGetNotificationShare(pNotifier) dispcmnGetNotificationShare_DISPATCH(pNotifier) +#define dispcmnControlLookup(pResource, pParams, ppEntry) dispcmnControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispcmnMap(pResource, pCallContext, pParams, pCpuMapping) dispcmnMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define dispcmnGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispcmnGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS dispcmnCtrlCmdSystemGetVblankCounter_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS *pVBCounterParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetVblankCounter_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_VBLANK_COUNTER_PARAMS *pVBCounterParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetVblankCounter__(pDispCommon, pVBCounterParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetVblankEnable_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS *pVBEnableParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetVblankEnable_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_VBLANK_ENABLE_PARAMS *pVBEnableParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetVblankEnable__(pDispCommon, pVBEnableParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetInternalDisplays_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS *pInternalDisplaysParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetInternalDisplays_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_INTERNAL_DISPLAYS_PARAMS *pInternalDisplaysParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetInternalDisplays__(pDispCommon, pInternalDisplaysParams); +} + +NV_STATUS dispcmnCtrlCmdDpEnableVrr_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpEnableVrr_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_ENABLE_VRR_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpEnableVrr__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdClearELVBlock_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdClearELVBlock_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_CLEAR_ELV_BLOCK_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdClearELVBlock__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificDisplayChange_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificDisplayChange_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_DISPLAY_CHANGE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificDisplayChange__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetSpreadSpectrum_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetSpreadSpectrum_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_SPREAD_SPECTRUM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetSpreadSpectrum__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetLcdGpioPinNum_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetLcdGpioPinNum_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_LCD_GPIO_PIN_NUM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetLcdGpioPinNum__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetAudioMuteStream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetAudioMuteStream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_AUDIO_MUTESTREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetAudioMuteStream__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpAuxchI2cTransferCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpAuxchI2cTransferCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_I2C_TRANSFER_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpAuxchI2cTransferCtrl__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpASSRCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_ASSR_CTRL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpASSRCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_ASSR_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpASSRCtrl__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetEcf_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_ECF_PARAMS *pCtrlEcfParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetEcf_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_ECF_PARAMS *pCtrlEcfParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetEcf__(pDispCommon, pCtrlEcfParams); +} + +NV_STATUS dispcmnCtrlCmdDfpSwitchDispMux_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpSwitchDispMux_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_SWITCH_DISP_MUX_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpSwitchDispMux__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpRunPreDispMuxOperations_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpRunPreDispMuxOperations_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_RUN_PRE_DISP_MUX_OPERATIONS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpRunPreDispMuxOperations__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpRunPostDispMuxOperations_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpRunPostDispMuxOperations_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_RUN_POST_DISP_MUX_OPERATIONS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpRunPostDispMuxOperations__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetDispMuxStatus_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetDispMuxStatus_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_GET_DISP_MUX_STATUS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetDispMuxStatus__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpRecordChannelRegisters_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpRecordChannelRegisters_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_RECORD_CHANNEL_REGS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpRecordChannelRegisters__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpInternalLcdOverdrive_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpInternalLcdOverdrive_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_AUXCH_OD_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpInternalLcdOverdrive__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetCapsV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetCapsV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CAPS_V2_PARAMS *pCapsParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetCapsV2__(pDispCommon, pCapsParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetNumHeads_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *pNumHeadsParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetNumHeads_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *pNumHeadsParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetNumHeads__(pDispCommon, pNumHeadsParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetScanline_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS *pScanlineParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetScanline_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SCANLINE_PARAMS *pScanlineParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetScanline__(pDispCommon, pScanlineParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetSuppported_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *pSupportedParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetSuppported_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *pSupportedParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetSuppported__(pDispCommon, pSupportedParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetConnectState_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *pConnectParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetConnectState_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *pConnectParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetConnectState__(pDispCommon, pConnectParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHotplugUnplugState_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHotplugUnplugState__(pDispCommon, pHotplugParams); +} + +NV_STATUS dispcmnCtrlCmdInternalGetHotplugUnplugState_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams); + +static inline NV_STATUS dispcmnCtrlCmdInternalGetHotplugUnplugState_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams) { + return pDispCommon->__dispcmnCtrlCmdInternalGetHotplugUnplugState__(pDispCommon, pHotplugParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetHeadRoutingMap_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS *pMapParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHeadRoutingMap_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_HEAD_ROUTING_MAP_PARAMS *pMapParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHeadRoutingMap__(pDispCommon, pMapParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetActive_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *pActiveParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetActive_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *pActiveParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetActive__(pDispCommon, pActiveParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetBootDisplays_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetBootDisplays_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_BOOT_DISPLAYS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetBootDisplays__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemQueryDisplayIdsWithMux_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_QUERY_DISPLAY_IDS_WITH_MUX_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemQueryDisplayIdsWithMux__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemCheckSidebandI2cSupport_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemCheckSidebandI2cSupport_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SYSTEM_CHECK_SIDEBAND_I2C_SUPPORT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemCheckSidebandI2cSupport__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemAllocateDisplayBandwidth_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemAllocateDisplayBandwidth__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetHotplugConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS *pHotplugParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHotplugConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_GET_SET_HOTPLUG_CONFIG_PARAMS *pHotplugParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHotplugConfig__(pDispCommon, pHotplugParams); +} + +NV_STATUS dispcmnCtrlCmdSystemGetHotplugEventConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemGetHotplugEventConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemGetHotplugEventConfig__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemSetHotplugEventConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemSetHotplugEventConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_HOTPLUG_EVENT_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemSetHotplugEventConfig__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemArmLightweightSupervisor_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemArmLightweightSupervisor_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_ARM_LIGHTWEIGHT_SUPERVISOR_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemArmLightweightSupervisor__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSystemConfigVrrPstateSwitch_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSystemConfigVrrPstateSwitch_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SYSTEM_CONFIG_VRR_PSTATE_SWITCH_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSystemConfigVrrPstateSwitch__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetType_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS *pDisplayTypeParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetType_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_TYPE_PARAMS *pDisplayTypeParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetType__(pDispCommon, pDisplayTypeParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetEdidV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *pEdidParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetEdidV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *pEdidParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetEdidV2__(pDispCommon, pEdidParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetEdidV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *pEdidParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetEdidV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_EDID_V2_PARAMS *pEdidParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetEdidV2__(pDispCommon, pEdidParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificFakeDevice_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS *pTestParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificFakeDevice_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_SPECIFIC_FAKE_DEVICE_PARAMS *pTestParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificFakeDevice__(pDispCommon, pTestParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetConnectorData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *pConnectorParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetConnectorData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *pConnectorParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetConnectorData__(pDispCommon, pConnectorParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiEnable_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiEnable_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiEnable__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificCtrlHdmi_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificCtrlHdmi_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_CTRL_HDMI_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificCtrlHdmi__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetAllHeadMask_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *pAllHeadMaskParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetAllHeadMask_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *pAllHeadMaskParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetAllHeadMask__(pDispCommon, pAllHeadMaskParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetOdPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetOdPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetOdPacket__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificAcquireSharedGenericPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_ACQUIRE_SHARED_GENERIC_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificAcquireSharedGenericPacket__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetSharedGenericPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetSharedGenericPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_SHARED_GENERIC_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetSharedGenericPacket__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificReleaseSharedGenericPacket_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_RELEASE_SHARED_GENERIC_PACKET_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificReleaseSharedGenericPacket__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetOdPacketCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetOdPacketCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_OD_PACKET_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetOdPacketCtrl__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificOrGetInfo_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificOrGetInfo_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificOrGetInfo__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetPclkLimit_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetPclkLimit_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_PCLK_LIMIT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetPclkLimit__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiSinkCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiSinkCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiSinkCaps__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetMonitorPower_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS *setMonitorPowerParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetMonitorPower_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_MONITOR_POWER_PARAMS *setMonitorPowerParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetMonitorPower__(pDispCommon, setMonitorPowerParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_SET_HDMI_FRL_LINK_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiFrlLinkConfig__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificApplyEdidOverrideV2_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *pEdidOverrideParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificApplyEdidOverrideV2_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_APPLY_EDID_OVERRIDE_V2_PARAMS *pEdidOverrideParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificApplyEdidOverrideV2__(pDispCommon, pEdidOverrideParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetI2cPortid_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetI2cPortid_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_I2C_PORTID_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetI2cPortid__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetHdmiGpuCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetHdmiGpuCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_GPU_CAPS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetHdmiGpuCaps__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificGetHdmiScdcData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificGetHdmiScdcData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_SCDC_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificGetHdmiScdcData__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificIsDirectmodeDisplay_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificIsDirectmodeDisplay_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_IS_DIRECTMODE_DISPLAY_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificIsDirectmodeDisplay__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_SPECIFIC_GET_HDMI_FRL_CAPACITY_COMPUTATION_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdSpecificSetHdmiFrlCapacityComputation__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetInfo_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetInfo_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetInfo__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetDisplayportDongleInfo_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetDisplayportDongleInfo_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_DISPLAYPORT_DONGLE_INFO_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetDisplayportDongleInfo__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpSetEldAudioCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *pEldAudioCapsParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpSetEldAudioCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *pEldAudioCapsParams) { + return pDispCommon->__dispcmnCtrlCmdDfpSetEldAudioCaps__(pDispCommon, pEldAudioCapsParams); +} + +NV_STATUS dispcmnCtrlCmdDfpSetAudioEnable_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpSetAudioEnable_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpSetAudioEnable__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpUpdateDynamicDfpCache_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpUpdateDynamicDfpCache_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_UPDATE_DYNAMIC_DFP_CACHE_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpUpdateDynamicDfpCache__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpAssignSor_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpAssignSor_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpAssignSor__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpDscCrcControl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpDscCrcControl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_DSC_CRC_CONTROL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpDscCrcControl__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpInitMuxData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpInitMuxData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_INIT_MUX_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpInitMuxData__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetDsiModeTiming_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetDsiModeTiming_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DFP_GET_DSI_MODE_TIMING_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetDsiModeTiming__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpConfigTwoHeadOneOr_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpConfigTwoHeadOneOr_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_CONFIG_TWO_HEAD_ONE_OR_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpConfigTwoHeadOneOr__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDfpGetPadlinkMask_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDfpGetPadlinkMask_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DFP_GET_PADLINK_MASK_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDfpGetPadlinkMask__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpAuxchCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *pAuxchCtrlParams); + +static inline NV_STATUS dispcmnCtrlCmdDpAuxchCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *pAuxchCtrlParams) { + return pDispCommon->__dispcmnCtrlCmdDpAuxchCtrl__(pDispCommon, pAuxchCtrlParams); +} + +NV_STATUS dispcmnCtrlCmdDpAuxchSetSema_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS *pSemaParams); + +static inline NV_STATUS dispcmnCtrlCmdDpAuxchSetSema_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_AUXCH_SET_SEMA_PARAMS *pSemaParams) { + return pDispCommon->__dispcmnCtrlCmdDpAuxchSetSema__(pDispCommon, pSemaParams); +} + +NV_STATUS dispcmnCtrlCmdDpCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_CTRL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpCtrl__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetLaneData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetLaneData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetLaneData__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetLaneData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetLaneData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_LANE_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetLaneData__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetTestpattern_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetTestpattern_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_TESTPATTERN_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetTestpattern__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpMainLinkCtrl_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpMainLinkCtrl_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_MAIN_LINK_CTRL_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpMainLinkCtrl__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetAudioMuteStream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetAudioMuteStream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetAudioMuteStream__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetLinkConfig_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetLinkConfig_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_LINK_CONFIG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetLinkConfig__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetEDPData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_EDP_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetEDPData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_EDP_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetEDPData__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpTopologyAllocateDisplayId_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpTopologyAllocateDisplayId_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpTopologyAllocateDisplayId__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpTopologyFreeDisplayId_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpTopologyFreeDisplayId_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpTopologyFreeDisplayId__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpConfigStream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigStream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigStream__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpConfigSingleHeadMultiStream_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigSingleHeadMultiStream_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_SINGLE_HEAD_MULTI_STREAM_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigSingleHeadMultiStream__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetRateGov_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetRateGov_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_RATE_GOV_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetRateGov__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSendACT_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSendACT_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SEND_ACT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSendACT__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetManualDisplayPort_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetManualDisplayPort_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetManualDisplayPort__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetCaps_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetCaps_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetCaps__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetMSAProperties_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetMSAProperties_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetMSAProperties__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetStereoMSAProperties_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetStereoMSAProperties_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_STEREO_MSA_PROPERTIES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetStereoMSAProperties__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGenerateFakeInterrupt_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGenerateFakeInterrupt__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpConfigRadScratchReg_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigRadScratchReg_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_RAD_SCRATCH_REG_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigRadScratchReg__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetTriggerSelect_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS *pTriggerSelectParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetTriggerSelect_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_SELECT_PARAMS *pTriggerSelectParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetTriggerSelect__(pDispCommon, pTriggerSelectParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetTriggerAll_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS *pTriggerAllParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetTriggerAll_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_SET_TRIGGER_ALL_PARAMS *pTriggerAllParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetTriggerAll__(pDispCommon, pTriggerAllParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetAuxLogData_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *pDpAuxBufferWrapper); + +static inline NV_STATUS dispcmnCtrlCmdDpGetAuxLogData_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_GET_AUXLOGGER_BUFFER_DATA_PARAMS *pDpAuxBufferWrapper) { + return pDispCommon->__dispcmnCtrlCmdDpGetAuxLogData__(pDispCommon, pDpAuxBufferWrapper); +} + +NV_STATUS dispcmnCtrlCmdDpConfigIndexedLinkRates_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigIndexedLinkRates_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigIndexedLinkRates__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpConfigureFec_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigureFec_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIGURE_FEC_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigureFec__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetGenericInfoframe_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetGenericInfoframe_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_GENERIC_INFOFRAME_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetGenericInfoframe__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetMsaAttributes_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetMsaAttributes_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_MSA_ATTRIBUTES_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetMsaAttributes__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpConfigMacroPad_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpConfigMacroPad_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_CMD_DP_CONFIG_MACRO_PAD_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpConfigMacroPad__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_SET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpSetPreemphasisDrivecurrentPostcursor2Data__(pDispCommon, pParams); +} + +NV_STATUS dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_IMPL(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams); + +static inline NV_STATUS dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data_DISPATCH(struct DispCommon *pDispCommon, NV0073_CTRL_DP_GET_PREEMPHASIS_DRIVECURRENT_POSTCURSOR2_DATA_PARAMS *pParams) { + return pDispCommon->__dispcmnCtrlCmdDpGetPreemphasisDrivecurrentPostcursor2Data__(pDispCommon, pParams); +} + +static inline NvBool dispcmnShareCallback_DISPATCH(struct DispCommon *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__dispcmnShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispcmnControl_DISPATCH(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDisplayApi->__dispcmnControl__(pDisplayApi, pCallContext, pParams); +} + +static inline NvBool dispcmnAccessCallback_DISPATCH(struct DispCommon *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispcmnAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispcmnGetMemInterMapParams_DISPATCH(struct DispCommon *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispcmnGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispcmnGetMemoryMappingDescriptor_DISPATCH(struct DispCommon *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispcmnGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispcmnCheckMemInterUnmap_DISPATCH(struct DispCommon *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispcmnCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline void dispcmnSetNotificationShare_DISPATCH(struct DispCommon *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispcmnSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS dispcmnControlFilter_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispcmnControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 dispcmnGetRefCount_DISPATCH(struct DispCommon *pResource) { + return pResource->__dispcmnGetRefCount__(pResource); +} + +static inline NV_STATUS dispcmnUnregisterEvent_DISPATCH(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispcmnUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS dispcmnUnmap_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__dispcmnUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispcmnControl_Prologue_DISPATCH(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + return pDisplayApi->__dispcmnControl_Prologue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline NvBool dispcmnCanCopy_DISPATCH(struct DispCommon *pResource) { + return pResource->__dispcmnCanCopy__(pResource); +} + +static inline NV_STATUS dispcmnMapTo_DISPATCH(struct DispCommon *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispcmnMapTo__(pResource, pParams); +} + +static inline void dispcmnAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispCommon *pResource, RsResourceRef *pReference) { + pResource->__dispcmnAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void dispcmnPreDestruct_DISPATCH(struct DispCommon *pResource) { + pResource->__dispcmnPreDestruct__(pResource); +} + +static inline NV_STATUS dispcmnUnmapFrom_DISPATCH(struct DispCommon *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispcmnUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *dispcmnGetNotificationListPtr_DISPATCH(struct DispCommon *pNotifier) { + return pNotifier->__dispcmnGetNotificationListPtr__(pNotifier); +} + +static inline void dispcmnControl_Epilogue_DISPATCH(struct DispCommon *pDisplayApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams) { + pDisplayApi->__dispcmnControl_Epilogue__(pDisplayApi, pCallContext, pRsParams); +} + +static inline struct NotifShare *dispcmnGetNotificationShare_DISPATCH(struct DispCommon *pNotifier) { + return pNotifier->__dispcmnGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispcmnControlLookup_DISPATCH(struct DispCommon *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispcmnControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispcmnMap_DISPATCH(struct DispCommon *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__dispcmnMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispcmnGetOrAllocNotifShare_DISPATCH(struct DispCommon *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispcmnGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispcmnConstruct_IMPL(struct DispCommon *arg_pDispCommon, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispcmnConstruct(arg_pDispCommon, arg_pCallContext, arg_pParams) dispcmnConstruct_IMPL(arg_pDispCommon, arg_pCallContext, arg_pParams) +NV_STATUS dispcmnGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDispCommon, struct DispCommon **ppDispCommon); +#define dispcmnGetByHandle(pClient, hDispCommon, ppDispCommon) dispcmnGetByHandle_IMPL(pClient, hDispCommon, ppDispCommon) +void dispcmnGetByDevice_IMPL(struct RsClient *pClient, NvHandle hDevice, struct DispCommon **ppDispCommon); +#define dispcmnGetByDevice(pClient, hDevice, ppDispCommon) dispcmnGetByDevice_IMPL(pClient, hDevice, ppDispCommon) +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +/** + * @warning This function is deprecated! Please use dispchnGetByHandle. + */ +NV_STATUS CliFindDispChannelInfo(NvHandle, NvHandle, struct DispChannel **ppDispChannel, NvHandle*); + +/** + * @warning This function is deprecated! Please use dispcmnGetByHandle. + */ +NvBool CliGetDispCommonInfo(NvHandle, NvHandle, struct DisplayApi **); + +/** + * @warning This function is deprecated! Please use dispobjGetByHandle. + */ +NvBool CliGetDispInfo(NvHandle, NvHandle, struct DisplayApi **); + +/** + * @warning This function is deprecated! Please use dispobjGetByHandle. + */ +struct DisplayApi *CliGetDispFromDispHandle(NvHandle hClient, NvHandle hDisp); + +#endif // DISP_OBJS_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISP_OBJS_NVOC_H_ diff --git a/src/nvidia/generated/g_disp_sf_user_nvoc.c b/src/nvidia/generated/g_disp_sf_user_nvoc.c new file mode 100644 index 000000000..c7c3a33c1 --- /dev/null +++ b/src/nvidia/generated/g_disp_sf_user_nvoc.c @@ -0,0 +1,329 @@ +#define NVOC_DISP_SF_USER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_disp_sf_user_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xba7439 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_DispSfUser(DispSfUser*); +void __nvoc_init_funcTable_DispSfUser(DispSfUser*); +NV_STATUS __nvoc_ctor_DispSfUser(DispSfUser*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispSfUser(DispSfUser*); +void __nvoc_dtor_DispSfUser(DispSfUser*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSfUser; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_DispSfUser = { + /*pClassDef=*/ &__nvoc_class_def_DispSfUser, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispSfUser, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispSfUser = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_DispSfUser_DispSfUser, + &__nvoc_rtti_DispSfUser_GpuResource, + &__nvoc_rtti_DispSfUser_RmResource, + &__nvoc_rtti_DispSfUser_RmResourceCommon, + &__nvoc_rtti_DispSfUser_RsResource, + &__nvoc_rtti_DispSfUser_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispSfUser), + /*classId=*/ classId(DispSfUser), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispSfUser", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispSfUser, + /*pCastInfo=*/ &__nvoc_castinfo_DispSfUser, + /*pExportInfo=*/ &__nvoc_export_info_DispSfUser +}; + +static NV_STATUS __nvoc_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return dispsfGetRegBaseOffsetAndSize((struct DispSfUser *)(((unsigned char *)pDispSfUser) - __nvoc_rtti_DispSfUser_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_GpuResource_dispsfShareCallback(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispsfControl(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispsfUnmap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispsfGetMemInterMapParams(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispsfGetMemoryMappingDescriptor(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispsfGetMapAddrSpace(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_dispsfGetInternalObjectHandle(struct DispSfUser *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispsfControlFilter(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_dispsfAddAdditionalDependants(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_dispsfGetRefCount(struct DispSfUser *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispsfCheckMemInterUnmap(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispsfMapTo(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispsfControl_Prologue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_dispsfCanCopy(struct DispSfUser *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispsfInternalControlForward(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_dispsfPreDestruct(struct DispSfUser *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispsfUnmapFrom(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispsfControl_Epilogue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispsfControlLookup(struct DispSfUser *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispsfMap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_dispsfAccessCallback(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSfUser = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_DispSfUser(DispSfUser *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispSfUser(DispSfUser *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_DispSfUser(DispSfUser *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSfUser_fail_GpuResource; + __nvoc_init_dataField_DispSfUser(pThis); + + status = __nvoc_dispsfConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSfUser_fail__init; + goto __nvoc_ctor_DispSfUser_exit; // Success + +__nvoc_ctor_DispSfUser_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DispSfUser_fail_GpuResource: +__nvoc_ctor_DispSfUser_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispSfUser_1(DispSfUser *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dispsfGetRegBaseOffsetAndSize__ = &dispsfGetRegBaseOffsetAndSize_IMPL; + + pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize; + + pThis->__dispsfShareCallback__ = &__nvoc_thunk_GpuResource_dispsfShareCallback; + + pThis->__dispsfControl__ = &__nvoc_thunk_GpuResource_dispsfControl; + + pThis->__dispsfUnmap__ = &__nvoc_thunk_GpuResource_dispsfUnmap; + + pThis->__dispsfGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispsfGetMemInterMapParams; + + pThis->__dispsfGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispsfGetMemoryMappingDescriptor; + + pThis->__dispsfGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispsfGetMapAddrSpace; + + pThis->__dispsfGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispsfGetInternalObjectHandle; + + pThis->__dispsfControlFilter__ = &__nvoc_thunk_RsResource_dispsfControlFilter; + + pThis->__dispsfAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispsfAddAdditionalDependants; + + pThis->__dispsfGetRefCount__ = &__nvoc_thunk_RsResource_dispsfGetRefCount; + + pThis->__dispsfCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispsfCheckMemInterUnmap; + + pThis->__dispsfMapTo__ = &__nvoc_thunk_RsResource_dispsfMapTo; + + pThis->__dispsfControl_Prologue__ = &__nvoc_thunk_RmResource_dispsfControl_Prologue; + + pThis->__dispsfCanCopy__ = &__nvoc_thunk_RsResource_dispsfCanCopy; + + pThis->__dispsfInternalControlForward__ = &__nvoc_thunk_GpuResource_dispsfInternalControlForward; + + pThis->__dispsfPreDestruct__ = &__nvoc_thunk_RsResource_dispsfPreDestruct; + + pThis->__dispsfUnmapFrom__ = &__nvoc_thunk_RsResource_dispsfUnmapFrom; + + pThis->__dispsfControl_Epilogue__ = &__nvoc_thunk_RmResource_dispsfControl_Epilogue; + + pThis->__dispsfControlLookup__ = &__nvoc_thunk_RsResource_dispsfControlLookup; + + pThis->__dispsfMap__ = &__nvoc_thunk_GpuResource_dispsfMap; + + pThis->__dispsfAccessCallback__ = &__nvoc_thunk_RmResource_dispsfAccessCallback; +} + +void __nvoc_init_funcTable_DispSfUser(DispSfUser *pThis) { + __nvoc_init_funcTable_DispSfUser_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_DispSfUser(DispSfUser *pThis) { + pThis->__nvoc_pbase_DispSfUser = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_DispSfUser(pThis); +} + +NV_STATUS __nvoc_objCreate_DispSfUser(DispSfUser **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispSfUser *pThis; + + pThis = portMemAllocNonPaged(sizeof(DispSfUser)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispSfUser)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispSfUser); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DispSfUser(pThis); + status = __nvoc_ctor_DispSfUser(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispSfUser_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispSfUser_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispSfUser(DispSfUser **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispSfUser(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_disp_sf_user_nvoc.h b/src/nvidia/generated/g_disp_sf_user_nvoc.h new file mode 100644 index 000000000..0baf74e72 --- /dev/null +++ b/src/nvidia/generated/g_disp_sf_user_nvoc.h @@ -0,0 +1,239 @@ +#ifndef _G_DISP_SF_USER_NVOC_H_ +#define _G_DISP_SF_USER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispSfUser class. +* +******************************************************************************/ + +#include "g_disp_sf_user_nvoc.h" + +#ifndef DISP_SF_USER_H +#define DISP_SF_USER_H + +#include "gpu/gpu_resource.h" + +/*! + * RM internal class representing NVXXXX_DISP_SF_USER + */ +#ifdef NVOC_DISP_SF_USER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispSfUser { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct DispSfUser *__nvoc_pbase_DispSfUser; + NV_STATUS (*__dispsfGetRegBaseOffsetAndSize__)(struct DispSfUser *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__dispsfShareCallback__)(struct DispSfUser *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__dispsfControl__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispsfUnmap__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispsfGetMemInterMapParams__)(struct DispSfUser *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispsfGetMemoryMappingDescriptor__)(struct DispSfUser *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__dispsfGetMapAddrSpace__)(struct DispSfUser *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__dispsfGetInternalObjectHandle__)(struct DispSfUser *); + NV_STATUS (*__dispsfControlFilter__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__dispsfAddAdditionalDependants__)(struct RsClient *, struct DispSfUser *, RsResourceRef *); + NvU32 (*__dispsfGetRefCount__)(struct DispSfUser *); + NV_STATUS (*__dispsfCheckMemInterUnmap__)(struct DispSfUser *, NvBool); + NV_STATUS (*__dispsfMapTo__)(struct DispSfUser *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispsfControl_Prologue__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__dispsfCanCopy__)(struct DispSfUser *); + NV_STATUS (*__dispsfInternalControlForward__)(struct DispSfUser *, NvU32, void *, NvU32); + void (*__dispsfPreDestruct__)(struct DispSfUser *); + NV_STATUS (*__dispsfUnmapFrom__)(struct DispSfUser *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispsfControl_Epilogue__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispsfControlLookup__)(struct DispSfUser *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__dispsfMap__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__dispsfAccessCallback__)(struct DispSfUser *, struct RsClient *, void *, RsAccessRight); + NvU32 ControlOffset; + NvU32 ControlLength; +}; + +#ifndef __NVOC_CLASS_DispSfUser_TYPEDEF__ +#define __NVOC_CLASS_DispSfUser_TYPEDEF__ +typedef struct DispSfUser DispSfUser; +#endif /* __NVOC_CLASS_DispSfUser_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSfUser +#define __nvoc_class_id_DispSfUser 0xba7439 +#endif /* __nvoc_class_id_DispSfUser */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser; + +#define __staticCast_DispSfUser(pThis) \ + ((pThis)->__nvoc_pbase_DispSfUser) + +#ifdef __nvoc_disp_sf_user_h_disabled +#define __dynamicCast_DispSfUser(pThis) ((DispSfUser*)NULL) +#else //__nvoc_disp_sf_user_h_disabled +#define __dynamicCast_DispSfUser(pThis) \ + ((DispSfUser*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispSfUser))) +#endif //__nvoc_disp_sf_user_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispSfUser(DispSfUser**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispSfUser(DispSfUser**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispSfUser(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispSfUser((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispsfGetRegBaseOffsetAndSize(pDispSfUser, pGpu, pOffset, pSize) dispsfGetRegBaseOffsetAndSize_DISPATCH(pDispSfUser, pGpu, pOffset, pSize) +#define dispsfShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispsfShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispsfControl(pGpuResource, pCallContext, pParams) dispsfControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispsfUnmap(pGpuResource, pCallContext, pCpuMapping) dispsfUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispsfGetMemInterMapParams(pRmResource, pParams) dispsfGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispsfGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispsfGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispsfGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispsfGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispsfGetInternalObjectHandle(pGpuResource) dispsfGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispsfControlFilter(pResource, pCallContext, pParams) dispsfControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispsfAddAdditionalDependants(pClient, pResource, pReference) dispsfAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispsfGetRefCount(pResource) dispsfGetRefCount_DISPATCH(pResource) +#define dispsfCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispsfCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define dispsfMapTo(pResource, pParams) dispsfMapTo_DISPATCH(pResource, pParams) +#define dispsfControl_Prologue(pResource, pCallContext, pParams) dispsfControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispsfCanCopy(pResource) dispsfCanCopy_DISPATCH(pResource) +#define dispsfInternalControlForward(pGpuResource, command, pParams, size) dispsfInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispsfPreDestruct(pResource) dispsfPreDestruct_DISPATCH(pResource) +#define dispsfUnmapFrom(pResource, pParams) dispsfUnmapFrom_DISPATCH(pResource, pParams) +#define dispsfControl_Epilogue(pResource, pCallContext, pParams) dispsfControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispsfControlLookup(pResource, pParams, ppEntry) dispsfControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispsfMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispsfMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispsfAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispsfAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS dispsfGetRegBaseOffsetAndSize_IMPL(struct DispSfUser *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS dispsfGetRegBaseOffsetAndSize_DISPATCH(struct DispSfUser *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pDispSfUser->__dispsfGetRegBaseOffsetAndSize__(pDispSfUser, pGpu, pOffset, pSize); +} + +static inline NvBool dispsfShareCallback_DISPATCH(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispsfShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS dispsfControl_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispsfControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispsfUnmap_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispsfUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispsfGetMemInterMapParams_DISPATCH(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispsfGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispsfGetMemoryMappingDescriptor_DISPATCH(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispsfGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS dispsfGetMapAddrSpace_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispsfGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle dispsfGetInternalObjectHandle_DISPATCH(struct DispSfUser *pGpuResource) { + return pGpuResource->__dispsfGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispsfControlFilter_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispsfControlFilter__(pResource, pCallContext, pParams); +} + +static inline void dispsfAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference) { + pResource->__dispsfAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 dispsfGetRefCount_DISPATCH(struct DispSfUser *pResource) { + return pResource->__dispsfGetRefCount__(pResource); +} + +static inline NV_STATUS dispsfCheckMemInterUnmap_DISPATCH(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__dispsfCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS dispsfMapTo_DISPATCH(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispsfMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispsfControl_Prologue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispsfControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool dispsfCanCopy_DISPATCH(struct DispSfUser *pResource) { + return pResource->__dispsfCanCopy__(pResource); +} + +static inline NV_STATUS dispsfInternalControlForward_DISPATCH(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispsfInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void dispsfPreDestruct_DISPATCH(struct DispSfUser *pResource) { + pResource->__dispsfPreDestruct__(pResource); +} + +static inline NV_STATUS dispsfUnmapFrom_DISPATCH(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispsfUnmapFrom__(pResource, pParams); +} + +static inline void dispsfControl_Epilogue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispsfControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispsfControlLookup_DISPATCH(struct DispSfUser *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispsfControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS dispsfMap_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispsfMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool dispsfAccessCallback_DISPATCH(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispsfAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS dispsfConstruct_IMPL(struct DispSfUser *arg_pDispSfUser, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispsfConstruct(arg_pDispSfUser, arg_pCallContext, arg_pParams) dispsfConstruct_IMPL(arg_pDispSfUser, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // DISP_SF_USER_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISP_SF_USER_NVOC_H_ diff --git a/src/nvidia/generated/g_dispsw_nvoc.c b/src/nvidia/generated/g_dispsw_nvoc.c new file mode 100644 index 000000000..2bd90294e --- /dev/null +++ b/src/nvidia/generated/g_dispsw_nvoc.c @@ -0,0 +1,452 @@ +#define NVOC_DISPSW_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_dispsw_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x99ad6d = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObject; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_DispSwObject(DispSwObject*, RmHalspecOwner* ); +void __nvoc_init_funcTable_DispSwObject(DispSwObject*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_DispSwObject(DispSwObject*, RmHalspecOwner* , CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DispSwObject(DispSwObject*, RmHalspecOwner* ); +void __nvoc_dtor_DispSwObject(DispSwObject*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSwObject; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObject_DispSwObject = { + /*pClassDef=*/ &__nvoc_class_def_DispSwObject, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispSwObject, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObject_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObject_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObject_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObject_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObject_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObject_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObject_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DispSwObject_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DispSwObject, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DispSwObject = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_DispSwObject_DispSwObject, + &__nvoc_rtti_DispSwObject_ChannelDescendant, + &__nvoc_rtti_DispSwObject_Notifier, + &__nvoc_rtti_DispSwObject_INotifier, + &__nvoc_rtti_DispSwObject_GpuResource, + &__nvoc_rtti_DispSwObject_RmResource, + &__nvoc_rtti_DispSwObject_RmResourceCommon, + &__nvoc_rtti_DispSwObject_RsResource, + &__nvoc_rtti_DispSwObject_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObject = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DispSwObject), + /*classId=*/ classId(DispSwObject), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DispSwObject", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispSwObject, + /*pCastInfo=*/ &__nvoc_castinfo_DispSwObject, + /*pExportInfo=*/ &__nvoc_export_info_DispSwObject +}; + +static NV_STATUS __nvoc_thunk_DispSwObject_chandesGetSwMethods(struct ChannelDescendant *pDispSw, METHOD **ppMethods, NvU32 *pNumMethods) { + return dispswGetSwMethods((struct DispSwObject *)(((unsigned char *)pDispSw) - __nvoc_rtti_DispSwObject_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_dispswCheckMemInterUnmap(struct DispSwObject *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_DispSwObject_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_dispswShareCallback(struct DispSwObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSwObject_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_dispswAccessCallback(struct DispSwObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswMapTo(struct DispSwObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispswGetMapAddrSpace(struct DispSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSwObject_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_dispswSetNotificationShare(struct DispSwObject *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObject_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_dispswGetRefCount(struct DispSwObject *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispswAddAdditionalDependants(struct RsClient *pClient, struct DispSwObject *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispswControl_Prologue(struct DispSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispswGetRegBaseOffsetAndSize(struct DispSwObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSwObject_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispswInternalControlForward(struct DispSwObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSwObject_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswUnmapFrom(struct DispSwObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_dispswControl_Epilogue(struct DispSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswControlLookup(struct DispSwObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_dispswGetInternalObjectHandle(struct DispSwObject *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSwObject_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispswControl(struct DispSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSwObject_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispswUnmap(struct DispSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSwObject_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispswGetMemInterMapParams(struct DispSwObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSwObject_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_dispswGetMemoryMappingDescriptor(struct DispSwObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSwObject_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_ChannelDescendant_dispswIsSwMethodStalling(struct DispSwObject *pChannelDescendant, NvU32 hHandle) { + return chandesIsSwMethodStalling((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_DispSwObject_ChannelDescendant.offset), hHandle); +} + +static NV_STATUS __nvoc_thunk_RsResource_dispswControlFilter(struct DispSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispswUnregisterEvent(struct DispSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObject_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_dispswCanCopy(struct DispSwObject *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_dispswPreDestruct(struct DispSwObject *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSwObject_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_dispswGetNotificationListPtr(struct DispSwObject *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObject_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_dispswGetNotificationShare(struct DispSwObject *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObject_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_dispswMap(struct DispSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSwObject_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_dispswGetOrAllocNotifShare(struct DispSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DispSwObject_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DispSwObject[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) dispswCtrlCmdNotifyOnVblank_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90720101u, + /*paramSize=*/ sizeof(NV9072_CTRL_CMD_NOTIFY_ON_VBLANK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DispSwObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "dispswCtrlCmdNotifyOnVblank" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSwObject = +{ + /*numEntries=*/ 1, + /*pExportEntries=*/ __nvoc_exported_method_def_DispSwObject +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_DispSwObject(DispSwObject *pThis) { + __nvoc_dispswDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DispSwObject(DispSwObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, PARAM_TO_ENGDESC_FUNCTION *); +NV_STATUS __nvoc_ctor_DispSwObject(DispSwObject *pThis, RmHalspecOwner *pRmhalspecowner, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, ((void *)0)); + if (status != NV_OK) goto __nvoc_ctor_DispSwObject_fail_ChannelDescendant; + __nvoc_init_dataField_DispSwObject(pThis, pRmhalspecowner); + + status = __nvoc_dispswConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DispSwObject_fail__init; + goto __nvoc_ctor_DispSwObject_exit; // Success + +__nvoc_ctor_DispSwObject_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_DispSwObject_fail_ChannelDescendant: +__nvoc_ctor_DispSwObject_exit: + + return status; +} + +static void __nvoc_init_funcTable_DispSwObject_1(DispSwObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal function -- dispswGetSwMethods + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__dispswGetSwMethods__ = &dispswGetSwMethods_46f6a7; + } + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__dispswCtrlCmdNotifyOnVblank__ = &dispswCtrlCmdNotifyOnVblank_IMPL; +#endif + + pThis->__nvoc_base_ChannelDescendant.__chandesGetSwMethods__ = &__nvoc_thunk_DispSwObject_chandesGetSwMethods; + + pThis->__dispswCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_dispswCheckMemInterUnmap; + + pThis->__dispswShareCallback__ = &__nvoc_thunk_GpuResource_dispswShareCallback; + + pThis->__dispswAccessCallback__ = &__nvoc_thunk_RmResource_dispswAccessCallback; + + pThis->__dispswMapTo__ = &__nvoc_thunk_RsResource_dispswMapTo; + + pThis->__dispswGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispswGetMapAddrSpace; + + pThis->__dispswSetNotificationShare__ = &__nvoc_thunk_Notifier_dispswSetNotificationShare; + + pThis->__dispswGetRefCount__ = &__nvoc_thunk_RsResource_dispswGetRefCount; + + pThis->__dispswAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispswAddAdditionalDependants; + + pThis->__dispswControl_Prologue__ = &__nvoc_thunk_RmResource_dispswControl_Prologue; + + pThis->__dispswGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_dispswGetRegBaseOffsetAndSize; + + pThis->__dispswInternalControlForward__ = &__nvoc_thunk_GpuResource_dispswInternalControlForward; + + pThis->__dispswUnmapFrom__ = &__nvoc_thunk_RsResource_dispswUnmapFrom; + + pThis->__dispswControl_Epilogue__ = &__nvoc_thunk_RmResource_dispswControl_Epilogue; + + pThis->__dispswControlLookup__ = &__nvoc_thunk_RsResource_dispswControlLookup; + + pThis->__dispswGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispswGetInternalObjectHandle; + + pThis->__dispswControl__ = &__nvoc_thunk_GpuResource_dispswControl; + + pThis->__dispswUnmap__ = &__nvoc_thunk_GpuResource_dispswUnmap; + + pThis->__dispswGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispswGetMemInterMapParams; + + pThis->__dispswGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispswGetMemoryMappingDescriptor; + + pThis->__dispswIsSwMethodStalling__ = &__nvoc_thunk_ChannelDescendant_dispswIsSwMethodStalling; + + pThis->__dispswControlFilter__ = &__nvoc_thunk_RsResource_dispswControlFilter; + + pThis->__dispswUnregisterEvent__ = &__nvoc_thunk_Notifier_dispswUnregisterEvent; + + pThis->__dispswCanCopy__ = &__nvoc_thunk_RsResource_dispswCanCopy; + + pThis->__dispswPreDestruct__ = &__nvoc_thunk_RsResource_dispswPreDestruct; + + pThis->__dispswGetNotificationListPtr__ = &__nvoc_thunk_Notifier_dispswGetNotificationListPtr; + + pThis->__dispswGetNotificationShare__ = &__nvoc_thunk_Notifier_dispswGetNotificationShare; + + pThis->__dispswMap__ = &__nvoc_thunk_GpuResource_dispswMap; + + pThis->__dispswGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_dispswGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_DispSwObject(DispSwObject *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_DispSwObject_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_DispSwObject(DispSwObject *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_DispSwObject = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_DispSwObject(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_DispSwObject(DispSwObject **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DispSwObject *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(DispSwObject)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DispSwObject)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispSwObject); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_DispSwObject(pThis, pRmhalspecowner); + status = __nvoc_ctor_DispSwObject(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DispSwObject_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DispSwObject_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DispSwObject(DispSwObject **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DispSwObject(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_dispsw_nvoc.h b/src/nvidia/generated/g_dispsw_nvoc.h new file mode 100644 index 000000000..a06e4d4de --- /dev/null +++ b/src/nvidia/generated/g_dispsw_nvoc.h @@ -0,0 +1,358 @@ +#ifndef _G_DISPSW_NVOC_H_ +#define _G_DISPSW_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_dispsw_nvoc.h" + +#ifndef DISPSW_H +#define DISPSW_H + +#include "core/core.h" +#include "kernel/gpu/fifo/channel_descendant.h" +#include "gpu/disp/vblank_callback/vblank.h" +#include "gpu/gpu_halspec.h" + +#include "ctrl/ctrl9072.h" + +/* ------------------------ Types definitions ------------------------------ */ +#define F_SEMAPHORE_ADDR_HI_VALID (NVBIT(0)) +#define F_SEMAPHORE_ADDR_LO_VALID (NVBIT(1)) +#define F_SEMAPHORE_ADDR_VALID (F_SEMAPHORE_ADDR_HI_VALID | F_SEMAPHORE_ADDR_LO_VALID) +#define F_SEMAPHORE_RELEASE (NVBIT(2)) +#define F_NOTIFIER_FILL (NVBIT(3)) +/* ------------------------ Types definitions ------------------------------ */ +typedef struct DispSwObject *PDISP_EVENT_SW_OBJECT; + +#ifndef __NVOC_CLASS_DispSwObject_TYPEDEF__ +#define __NVOC_CLASS_DispSwObject_TYPEDEF__ +typedef struct DispSwObject DispSwObject; +#endif /* __NVOC_CLASS_DispSwObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSwObject +#define __nvoc_class_id_DispSwObject 0x99ad6d +#endif /* __nvoc_class_id_DispSwObject */ + + + +/* ------------------------ Macros & Defines ------------------------------- */ +typedef struct DISPCOMMONOBJECT +{ + struct { + /*! + * A semaphore release location is specified by GPU virtual address. + */ + union { + NvU64 GPUVA; + } Addr; + NvU32 ReleaseValue; + NvU32 ReleaseTrigger; + VBLANKCALLBACK ReleaseCallback; + } Semaphore; + + NvU32 SwapReadyMode; + NvU32 Head; + + /*! + * a pointer back to the "child" (encapsulating) arch-specific object + */ + PDISP_EVENT_SW_OBJECT DispObject; +} DISPCOMMONOBJECT, *PDISPCOMMONOBJECT; + +struct DISP_SW_OBJECT_NOTIFY +{ + NvU64 NotifierGPUVA; + NvU32 NotifyAction; + NvU32 NotifyTrigger; + VBLANKCALLBACK Callback; +}; + +NV_STATUS dispswReleaseSemaphoreAndNotifierFill(struct OBJGPU *pGpu, + NvU64 gpuVA, + NvU32 vaSpace, + NvU32 releasevalue, + NvU32 flags, + NvU32 completionStatus, + NvHandle hClient, + NvHandle hEvent); + +/*! + * RM internal class representing GF100_DISP_SW + */ +#ifdef NVOC_DISPSW_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DispSwObject { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct DispSwObject *__nvoc_pbase_DispSwObject; + NV_STATUS (*__dispswGetSwMethods__)(struct DispSwObject *, METHOD **, NvU32 *); + NV_STATUS (*__dispswCtrlCmdNotifyOnVblank__)(struct DispSwObject *, NV9072_CTRL_CMD_NOTIFY_ON_VBLANK_PARAMS *); + NV_STATUS (*__dispswCheckMemInterUnmap__)(struct DispSwObject *, NvBool); + NvBool (*__dispswShareCallback__)(struct DispSwObject *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__dispswAccessCallback__)(struct DispSwObject *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__dispswMapTo__)(struct DispSwObject *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__dispswGetMapAddrSpace__)(struct DispSwObject *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__dispswSetNotificationShare__)(struct DispSwObject *, struct NotifShare *); + NvU32 (*__dispswGetRefCount__)(struct DispSwObject *); + void (*__dispswAddAdditionalDependants__)(struct RsClient *, struct DispSwObject *, RsResourceRef *); + NV_STATUS (*__dispswControl_Prologue__)(struct DispSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispswGetRegBaseOffsetAndSize__)(struct DispSwObject *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__dispswInternalControlForward__)(struct DispSwObject *, NvU32, void *, NvU32); + NV_STATUS (*__dispswUnmapFrom__)(struct DispSwObject *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__dispswControl_Epilogue__)(struct DispSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispswControlLookup__)(struct DispSwObject *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__dispswGetInternalObjectHandle__)(struct DispSwObject *); + NV_STATUS (*__dispswControl__)(struct DispSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispswUnmap__)(struct DispSwObject *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__dispswGetMemInterMapParams__)(struct DispSwObject *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__dispswGetMemoryMappingDescriptor__)(struct DispSwObject *, struct MEMORY_DESCRIPTOR **); + NvBool (*__dispswIsSwMethodStalling__)(struct DispSwObject *, NvU32); + NV_STATUS (*__dispswControlFilter__)(struct DispSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__dispswUnregisterEvent__)(struct DispSwObject *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__dispswCanCopy__)(struct DispSwObject *); + void (*__dispswPreDestruct__)(struct DispSwObject *); + PEVENTNOTIFICATION *(*__dispswGetNotificationListPtr__)(struct DispSwObject *); + struct NotifShare *(*__dispswGetNotificationShare__)(struct DispSwObject *); + NV_STATUS (*__dispswMap__)(struct DispSwObject *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__dispswGetOrAllocNotifShare__)(struct DispSwObject *, NvHandle, NvHandle, struct NotifShare **); + NvU32 Flags; + DISPCOMMONOBJECT DispCommon; + struct DISP_SW_OBJECT_NOTIFY NotifyOnVBlank; + NvU32 PresentInterval; +}; + +#ifndef __NVOC_CLASS_DispSwObject_TYPEDEF__ +#define __NVOC_CLASS_DispSwObject_TYPEDEF__ +typedef struct DispSwObject DispSwObject; +#endif /* __NVOC_CLASS_DispSwObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSwObject +#define __nvoc_class_id_DispSwObject 0x99ad6d +#endif /* __nvoc_class_id_DispSwObject */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSwObject; + +#define __staticCast_DispSwObject(pThis) \ + ((pThis)->__nvoc_pbase_DispSwObject) + +#ifdef __nvoc_dispsw_h_disabled +#define __dynamicCast_DispSwObject(pThis) ((DispSwObject*)NULL) +#else //__nvoc_dispsw_h_disabled +#define __dynamicCast_DispSwObject(pThis) \ + ((DispSwObject*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispSwObject))) +#endif //__nvoc_dispsw_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DispSwObject(DispSwObject**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DispSwObject(DispSwObject**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DispSwObject(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DispSwObject((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define dispswGetSwMethods(pDispSw, ppMethods, pNumMethods) dispswGetSwMethods_DISPATCH(pDispSw, ppMethods, pNumMethods) +#define dispswGetSwMethods_HAL(pDispSw, ppMethods, pNumMethods) dispswGetSwMethods_DISPATCH(pDispSw, ppMethods, pNumMethods) +#define dispswCtrlCmdNotifyOnVblank(pDispSwObject, pNotifyParams) dispswCtrlCmdNotifyOnVblank_DISPATCH(pDispSwObject, pNotifyParams) +#define dispswCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) dispswCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define dispswShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispswShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define dispswAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispswAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define dispswMapTo(pResource, pParams) dispswMapTo_DISPATCH(pResource, pParams) +#define dispswGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispswGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define dispswSetNotificationShare(pNotifier, pNotifShare) dispswSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define dispswGetRefCount(pResource) dispswGetRefCount_DISPATCH(pResource) +#define dispswAddAdditionalDependants(pClient, pResource, pReference) dispswAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define dispswControl_Prologue(pResource, pCallContext, pParams) dispswControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define dispswGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) dispswGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define dispswInternalControlForward(pGpuResource, command, pParams, size) dispswInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define dispswUnmapFrom(pResource, pParams) dispswUnmapFrom_DISPATCH(pResource, pParams) +#define dispswControl_Epilogue(pResource, pCallContext, pParams) dispswControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define dispswControlLookup(pResource, pParams, ppEntry) dispswControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define dispswGetInternalObjectHandle(pGpuResource) dispswGetInternalObjectHandle_DISPATCH(pGpuResource) +#define dispswControl(pGpuResource, pCallContext, pParams) dispswControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define dispswUnmap(pGpuResource, pCallContext, pCpuMapping) dispswUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define dispswGetMemInterMapParams(pRmResource, pParams) dispswGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define dispswGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispswGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define dispswIsSwMethodStalling(pChannelDescendant, hHandle) dispswIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define dispswControlFilter(pResource, pCallContext, pParams) dispswControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define dispswUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispswUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define dispswCanCopy(pResource) dispswCanCopy_DISPATCH(pResource) +#define dispswPreDestruct(pResource) dispswPreDestruct_DISPATCH(pResource) +#define dispswGetNotificationListPtr(pNotifier) dispswGetNotificationListPtr_DISPATCH(pNotifier) +#define dispswGetNotificationShare(pNotifier) dispswGetNotificationShare_DISPATCH(pNotifier) +#define dispswMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispswMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define dispswGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispswGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +static inline NV_STATUS dispswGetSwMethods_46f6a7(struct DispSwObject *pDispSw, METHOD **ppMethods, NvU32 *pNumMethods) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS dispswGetSwMethods_DISPATCH(struct DispSwObject *pDispSw, METHOD **ppMethods, NvU32 *pNumMethods) { + return pDispSw->__dispswGetSwMethods__(pDispSw, ppMethods, pNumMethods); +} + +NV_STATUS dispswCtrlCmdNotifyOnVblank_IMPL(struct DispSwObject *pDispSwObject, NV9072_CTRL_CMD_NOTIFY_ON_VBLANK_PARAMS *pNotifyParams); + +static inline NV_STATUS dispswCtrlCmdNotifyOnVblank_DISPATCH(struct DispSwObject *pDispSwObject, NV9072_CTRL_CMD_NOTIFY_ON_VBLANK_PARAMS *pNotifyParams) { + return pDispSwObject->__dispswCtrlCmdNotifyOnVblank__(pDispSwObject, pNotifyParams); +} + +static inline NV_STATUS dispswCheckMemInterUnmap_DISPATCH(struct DispSwObject *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__dispswCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool dispswShareCallback_DISPATCH(struct DispSwObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__dispswShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool dispswAccessCallback_DISPATCH(struct DispSwObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__dispswAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS dispswMapTo_DISPATCH(struct DispSwObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__dispswMapTo__(pResource, pParams); +} + +static inline NV_STATUS dispswGetMapAddrSpace_DISPATCH(struct DispSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__dispswGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void dispswSetNotificationShare_DISPATCH(struct DispSwObject *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__dispswSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 dispswGetRefCount_DISPATCH(struct DispSwObject *pResource) { + return pResource->__dispswGetRefCount__(pResource); +} + +static inline void dispswAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispSwObject *pResource, RsResourceRef *pReference) { + pResource->__dispswAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS dispswControl_Prologue_DISPATCH(struct DispSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispswControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispswGetRegBaseOffsetAndSize_DISPATCH(struct DispSwObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__dispswGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS dispswInternalControlForward_DISPATCH(struct DispSwObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__dispswInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS dispswUnmapFrom_DISPATCH(struct DispSwObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__dispswUnmapFrom__(pResource, pParams); +} + +static inline void dispswControl_Epilogue_DISPATCH(struct DispSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__dispswControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispswControlLookup_DISPATCH(struct DispSwObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__dispswControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle dispswGetInternalObjectHandle_DISPATCH(struct DispSwObject *pGpuResource) { + return pGpuResource->__dispswGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS dispswControl_DISPATCH(struct DispSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__dispswControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS dispswUnmap_DISPATCH(struct DispSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispswUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS dispswGetMemInterMapParams_DISPATCH(struct DispSwObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__dispswGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS dispswGetMemoryMappingDescriptor_DISPATCH(struct DispSwObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__dispswGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvBool dispswIsSwMethodStalling_DISPATCH(struct DispSwObject *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__dispswIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +static inline NV_STATUS dispswControlFilter_DISPATCH(struct DispSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__dispswControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS dispswUnregisterEvent_DISPATCH(struct DispSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__dispswUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool dispswCanCopy_DISPATCH(struct DispSwObject *pResource) { + return pResource->__dispswCanCopy__(pResource); +} + +static inline void dispswPreDestruct_DISPATCH(struct DispSwObject *pResource) { + pResource->__dispswPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *dispswGetNotificationListPtr_DISPATCH(struct DispSwObject *pNotifier) { + return pNotifier->__dispswGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *dispswGetNotificationShare_DISPATCH(struct DispSwObject *pNotifier) { + return pNotifier->__dispswGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS dispswMap_DISPATCH(struct DispSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__dispswMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS dispswGetOrAllocNotifShare_DISPATCH(struct DispSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__dispswGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS dispswConstruct_IMPL(struct DispSwObject *arg_pDispSw, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_dispswConstruct(arg_pDispSw, arg_pCallContext, arg_pParams) dispswConstruct_IMPL(arg_pDispSw, arg_pCallContext, arg_pParams) +void dispswDestruct_IMPL(struct DispSwObject *pDispSw); +#define __nvoc_dispswDestruct(pDispSw) dispswDestruct_IMPL(pDispSw) +#undef PRIVATE_FIELD + + +#endif // DISPSW_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_DISPSW_NVOC_H_ diff --git a/src/nvidia/generated/g_eng_desc_nvoc.h b/src/nvidia/generated/g_eng_desc_nvoc.h new file mode 100644 index 000000000..79cbf1fcd --- /dev/null +++ b/src/nvidia/generated/g_eng_desc_nvoc.h @@ -0,0 +1,1505 @@ +#ifndef _G_ENG_DESC_NVOC_H_ +#define _G_ENG_DESC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_eng_desc_nvoc.h" + +#ifndef _ENG_DESC_H_ +#define _ENG_DESC_H_ + +#include "core/core.h" + +// +// Engine descriptors +// +// An ENGDESCRIPTOR carries both an NVOC_CLASS_ID and an instance ID. For example, +// to specify the engine CE1, use MKENGDESC(classId(OBJCE), 1). +// +#define ENGDESC_CLASS 31:8 +#define ENGDESC_INST 7:0 + +#define MKENGDESC(class, inst) ((((NvU32)(class)) << SF_SHIFT(ENGDESC_CLASS)) | \ + ((inst ) << SF_SHIFT(ENGDESC_INST ))) + +#define ENGDESC_FIELD(desc, field) (((desc) >> SF_SHIFT(ENGDESC ## field)) & \ + SF_MASK(ENGDESC ## field)) + +typedef NvU32 ENGDESCRIPTOR, *PENGDESCRIPTOR; + + +// +// Class declarations to get classIds for use with ENGDESCRIPTOR +// +struct OBJINVALID; + +#ifndef __NVOC_CLASS_OBJINVALID_TYPEDEF__ +#define __NVOC_CLASS_OBJINVALID_TYPEDEF__ +typedef struct OBJINVALID OBJINVALID; +#endif /* __NVOC_CLASS_OBJINVALID_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJINVALID +#define __nvoc_class_id_OBJINVALID 0xb33b15 +#endif /* __nvoc_class_id_OBJINVALID */ + + // classId only. Not a real class +struct OBJSWENG; + +#ifndef __NVOC_CLASS_OBJSWENG_TYPEDEF__ +#define __NVOC_CLASS_OBJSWENG_TYPEDEF__ +typedef struct OBJSWENG OBJSWENG; +#endif /* __NVOC_CLASS_OBJSWENG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSWENG +#define __nvoc_class_id_OBJSWENG 0x95a6f5 +#endif /* __nvoc_class_id_OBJSWENG */ + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct Falcon; + +#ifndef __NVOC_CLASS_Falcon_TYPEDEF__ +#define __NVOC_CLASS_Falcon_TYPEDEF__ +typedef struct Falcon Falcon; +#endif /* __NVOC_CLASS_Falcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Falcon +#define __nvoc_class_id_Falcon 0xdc5264 +#endif /* __nvoc_class_id_Falcon */ + + +struct OBJMC; + +#ifndef __NVOC_CLASS_OBJMC_TYPEDEF__ +#define __NVOC_CLASS_OBJMC_TYPEDEF__ +typedef struct OBJMC OBJMC; +#endif /* __NVOC_CLASS_OBJMC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJMC +#define __nvoc_class_id_OBJMC 0x9aad0e +#endif /* __nvoc_class_id_OBJMC */ + + +struct KernelMc; + +#ifndef __NVOC_CLASS_KernelMc_TYPEDEF__ +#define __NVOC_CLASS_KernelMc_TYPEDEF__ +typedef struct KernelMc KernelMc; +#endif /* __NVOC_CLASS_KernelMc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMc +#define __nvoc_class_id_KernelMc 0x3827ff +#endif /* __nvoc_class_id_KernelMc */ + + +struct PrivRing; + +#ifndef __NVOC_CLASS_PrivRing_TYPEDEF__ +#define __NVOC_CLASS_PrivRing_TYPEDEF__ +typedef struct PrivRing PrivRing; +#endif /* __NVOC_CLASS_PrivRing_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PrivRing +#define __nvoc_class_id_PrivRing 0x4c57c4 +#endif /* __nvoc_class_id_PrivRing */ + + +struct SwIntr; + +#ifndef __NVOC_CLASS_SwIntr_TYPEDEF__ +#define __NVOC_CLASS_SwIntr_TYPEDEF__ +typedef struct SwIntr SwIntr; +#endif /* __NVOC_CLASS_SwIntr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SwIntr +#define __nvoc_class_id_SwIntr 0x5ca633 +#endif /* __nvoc_class_id_SwIntr */ + + +struct MemorySystem; + +#ifndef __NVOC_CLASS_MemorySystem_TYPEDEF__ +#define __NVOC_CLASS_MemorySystem_TYPEDEF__ +typedef struct MemorySystem MemorySystem; +#endif /* __NVOC_CLASS_MemorySystem_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemorySystem +#define __nvoc_class_id_MemorySystem 0x174e21 +#endif /* __nvoc_class_id_MemorySystem */ + + +struct KernelMemorySystem; + +#ifndef __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ +#define __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ +typedef struct KernelMemorySystem KernelMemorySystem; +#endif /* __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMemorySystem +#define __nvoc_class_id_KernelMemorySystem 0x7faff1 +#endif /* __nvoc_class_id_KernelMemorySystem */ + + +struct MemoryManager; + +#ifndef __NVOC_CLASS_MemoryManager_TYPEDEF__ +#define __NVOC_CLASS_MemoryManager_TYPEDEF__ +typedef struct MemoryManager MemoryManager; +#endif /* __NVOC_CLASS_MemoryManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryManager +#define __nvoc_class_id_MemoryManager 0x22ad47 +#endif /* __nvoc_class_id_MemoryManager */ + + +struct OBJFBFLCN; + +#ifndef __NVOC_CLASS_OBJFBFLCN_TYPEDEF__ +#define __NVOC_CLASS_OBJFBFLCN_TYPEDEF__ +typedef struct OBJFBFLCN OBJFBFLCN; +#endif /* __NVOC_CLASS_OBJFBFLCN_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFBFLCN +#define __nvoc_class_id_OBJFBFLCN 0x8a20bf +#endif /* __nvoc_class_id_OBJFBFLCN */ + + +struct OBJHSHUB; + +#ifndef __NVOC_CLASS_OBJHSHUB_TYPEDEF__ +#define __NVOC_CLASS_OBJHSHUB_TYPEDEF__ +typedef struct OBJHSHUB OBJHSHUB; +#endif /* __NVOC_CLASS_OBJHSHUB_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHSHUB +#define __nvoc_class_id_OBJHSHUB 0x43d089 +#endif /* __nvoc_class_id_OBJHSHUB */ + + +struct OBJTMR; + +#ifndef __NVOC_CLASS_OBJTMR_TYPEDEF__ +#define __NVOC_CLASS_OBJTMR_TYPEDEF__ +typedef struct OBJTMR OBJTMR; +#endif /* __NVOC_CLASS_OBJTMR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJTMR +#define __nvoc_class_id_OBJTMR 0x9ddede +#endif /* __nvoc_class_id_OBJTMR */ + + +struct VirtMemAllocator; + +#ifndef __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +#define __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +typedef struct VirtMemAllocator VirtMemAllocator; +#endif /* __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtMemAllocator +#define __nvoc_class_id_VirtMemAllocator 0x899e48 +#endif /* __nvoc_class_id_VirtMemAllocator */ + + +struct Graphics; + +#ifndef __NVOC_CLASS_Graphics_TYPEDEF__ +#define __NVOC_CLASS_Graphics_TYPEDEF__ +typedef struct Graphics Graphics; +#endif /* __NVOC_CLASS_Graphics_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Graphics +#define __nvoc_class_id_Graphics 0xd334df +#endif /* __nvoc_class_id_Graphics */ + + +struct OBJGR; + +#ifndef __NVOC_CLASS_OBJGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGR_TYPEDEF__ +typedef struct OBJGR OBJGR; +#endif /* __NVOC_CLASS_OBJGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGR +#define __nvoc_class_id_OBJGR 0xb0940a +#endif /* __nvoc_class_id_OBJGR */ + + // classId only. Not a real class. Bug 200664045 +struct GraphicsManager; + +#ifndef __NVOC_CLASS_GraphicsManager_TYPEDEF__ +#define __NVOC_CLASS_GraphicsManager_TYPEDEF__ +typedef struct GraphicsManager GraphicsManager; +#endif /* __NVOC_CLASS_GraphicsManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GraphicsManager +#define __nvoc_class_id_GraphicsManager 0x2f465a +#endif /* __nvoc_class_id_GraphicsManager */ + + +struct KernelGraphicsManager; + +#ifndef __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ +typedef struct KernelGraphicsManager KernelGraphicsManager; +#endif /* __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsManager +#define __nvoc_class_id_KernelGraphicsManager 0xd22179 +#endif /* __nvoc_class_id_KernelGraphicsManager */ + + +struct MIGManager; + +#ifndef __NVOC_CLASS_MIGManager_TYPEDEF__ +#define __NVOC_CLASS_MIGManager_TYPEDEF__ +typedef struct MIGManager MIGManager; +#endif /* __NVOC_CLASS_MIGManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGManager +#define __nvoc_class_id_MIGManager 0xfd75d0 +#endif /* __nvoc_class_id_MIGManager */ + + +struct KernelMIGManager; + +#ifndef __NVOC_CLASS_KernelMIGManager_TYPEDEF__ +#define __NVOC_CLASS_KernelMIGManager_TYPEDEF__ +typedef struct KernelMIGManager KernelMIGManager; +#endif /* __NVOC_CLASS_KernelMIGManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMIGManager +#define __nvoc_class_id_KernelMIGManager 0x01c1bf +#endif /* __nvoc_class_id_KernelMIGManager */ + + +struct SMDebugger; + +#ifndef __NVOC_CLASS_SMDebugger_TYPEDEF__ +#define __NVOC_CLASS_SMDebugger_TYPEDEF__ +typedef struct SMDebugger SMDebugger; +#endif /* __NVOC_CLASS_SMDebugger_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SMDebugger +#define __nvoc_class_id_SMDebugger 0x12018b +#endif /* __nvoc_class_id_SMDebugger */ + + +struct KernelGraphics; + +#ifndef __NVOC_CLASS_KernelGraphics_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphics_TYPEDEF__ +typedef struct KernelGraphics KernelGraphics; +#endif /* __NVOC_CLASS_KernelGraphics_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphics +#define __nvoc_class_id_KernelGraphics 0xea3fa9 +#endif /* __nvoc_class_id_KernelGraphics */ + + +struct KernelFifo; + +#ifndef __NVOC_CLASS_KernelFifo_TYPEDEF__ +#define __NVOC_CLASS_KernelFifo_TYPEDEF__ +typedef struct KernelFifo KernelFifo; +#endif /* __NVOC_CLASS_KernelFifo_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFifo +#define __nvoc_class_id_KernelFifo 0xf3e155 +#endif /* __nvoc_class_id_KernelFifo */ + + +struct OBJFIFO; + +#ifndef __NVOC_CLASS_OBJFIFO_TYPEDEF__ +#define __NVOC_CLASS_OBJFIFO_TYPEDEF__ +typedef struct OBJFIFO OBJFIFO; +#endif /* __NVOC_CLASS_OBJFIFO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFIFO +#define __nvoc_class_id_OBJFIFO 0xb02365 +#endif /* __nvoc_class_id_OBJFIFO */ + + +struct OBJOS; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + + +struct OBJBUS; + +#ifndef __NVOC_CLASS_OBJBUS_TYPEDEF__ +#define __NVOC_CLASS_OBJBUS_TYPEDEF__ +typedef struct OBJBUS OBJBUS; +#endif /* __NVOC_CLASS_OBJBUS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJBUS +#define __nvoc_class_id_OBJBUS 0xcc4c31 +#endif /* __nvoc_class_id_OBJBUS */ + + +struct KernelBus; + +#ifndef __NVOC_CLASS_KernelBus_TYPEDEF__ +#define __NVOC_CLASS_KernelBus_TYPEDEF__ +typedef struct KernelBus KernelBus; +#endif /* __NVOC_CLASS_KernelBus_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelBus +#define __nvoc_class_id_KernelBus 0xd2ac57 +#endif /* __nvoc_class_id_KernelBus */ + + +struct OBJINFOROM; + +#ifndef __NVOC_CLASS_OBJINFOROM_TYPEDEF__ +#define __NVOC_CLASS_OBJINFOROM_TYPEDEF__ +typedef struct OBJINFOROM OBJINFOROM; +#endif /* __NVOC_CLASS_OBJINFOROM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJINFOROM +#define __nvoc_class_id_OBJINFOROM 0x0e1639 +#endif /* __nvoc_class_id_OBJINFOROM */ + + +struct Perf; + +#ifndef __NVOC_CLASS_Perf_TYPEDEF__ +#define __NVOC_CLASS_Perf_TYPEDEF__ +typedef struct Perf Perf; +#endif /* __NVOC_CLASS_Perf_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Perf +#define __nvoc_class_id_Perf 0xed0b65 +#endif /* __nvoc_class_id_Perf */ + + +struct KernelPerf; + +#ifndef __NVOC_CLASS_KernelPerf_TYPEDEF__ +#define __NVOC_CLASS_KernelPerf_TYPEDEF__ +typedef struct KernelPerf KernelPerf; +#endif /* __NVOC_CLASS_KernelPerf_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelPerf +#define __nvoc_class_id_KernelPerf 0xc53a57 +#endif /* __nvoc_class_id_KernelPerf */ + + +struct OBJBIF; + +#ifndef __NVOC_CLASS_OBJBIF_TYPEDEF__ +#define __NVOC_CLASS_OBJBIF_TYPEDEF__ +typedef struct OBJBIF OBJBIF; +#endif /* __NVOC_CLASS_OBJBIF_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJBIF +#define __nvoc_class_id_OBJBIF 0xd1c956 +#endif /* __nvoc_class_id_OBJBIF */ + + +struct KernelBif; + +#ifndef __NVOC_CLASS_KernelBif_TYPEDEF__ +#define __NVOC_CLASS_KernelBif_TYPEDEF__ +typedef struct KernelBif KernelBif; +#endif /* __NVOC_CLASS_KernelBif_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelBif +#define __nvoc_class_id_KernelBif 0xdbe523 +#endif /* __nvoc_class_id_KernelBif */ + + +struct OBJSF; + +#ifndef __NVOC_CLASS_OBJSF_TYPEDEF__ +#define __NVOC_CLASS_OBJSF_TYPEDEF__ +typedef struct OBJSF OBJSF; +#endif /* __NVOC_CLASS_OBJSF_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSF +#define __nvoc_class_id_OBJSF 0x0bd720 +#endif /* __nvoc_class_id_OBJSF */ + + +struct OBJGPIO; + +#ifndef __NVOC_CLASS_OBJGPIO_TYPEDEF__ +#define __NVOC_CLASS_OBJGPIO_TYPEDEF__ +typedef struct OBJGPIO OBJGPIO; +#endif /* __NVOC_CLASS_OBJGPIO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPIO +#define __nvoc_class_id_OBJGPIO 0x05c7b5 +#endif /* __nvoc_class_id_OBJGPIO */ + + +struct ClockManager; + +#ifndef __NVOC_CLASS_ClockManager_TYPEDEF__ +#define __NVOC_CLASS_ClockManager_TYPEDEF__ +typedef struct ClockManager ClockManager; +#endif /* __NVOC_CLASS_ClockManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ClockManager +#define __nvoc_class_id_ClockManager 0xbcadd3 +#endif /* __nvoc_class_id_ClockManager */ + + +struct KernelDisplay; + +#ifndef __NVOC_CLASS_KernelDisplay_TYPEDEF__ +#define __NVOC_CLASS_KernelDisplay_TYPEDEF__ +typedef struct KernelDisplay KernelDisplay; +#endif /* __NVOC_CLASS_KernelDisplay_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelDisplay +#define __nvoc_class_id_KernelDisplay 0x55952e +#endif /* __nvoc_class_id_KernelDisplay */ + + +struct OBJDISP; + +#ifndef __NVOC_CLASS_OBJDISP_TYPEDEF__ +#define __NVOC_CLASS_OBJDISP_TYPEDEF__ +typedef struct OBJDISP OBJDISP; +#endif /* __NVOC_CLASS_OBJDISP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDISP +#define __nvoc_class_id_OBJDISP 0xd1755e +#endif /* __nvoc_class_id_OBJDISP */ + + +struct OBJDPU; + +#ifndef __NVOC_CLASS_OBJDPU_TYPEDEF__ +#define __NVOC_CLASS_OBJDPU_TYPEDEF__ +typedef struct OBJDPU OBJDPU; +#endif /* __NVOC_CLASS_OBJDPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDPU +#define __nvoc_class_id_OBJDPU 0x23486d +#endif /* __nvoc_class_id_OBJDPU */ + + +struct OBJFAN; + +#ifndef __NVOC_CLASS_OBJFAN_TYPEDEF__ +#define __NVOC_CLASS_OBJFAN_TYPEDEF__ +typedef struct OBJFAN OBJFAN; +#endif /* __NVOC_CLASS_OBJFAN_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFAN +#define __nvoc_class_id_OBJFAN 0xda9ade +#endif /* __nvoc_class_id_OBJFAN */ + + +struct DisplayInstanceMemory; + +#ifndef __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +#define __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ +typedef struct DisplayInstanceMemory DisplayInstanceMemory; +#endif /* __NVOC_CLASS_DisplayInstanceMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DisplayInstanceMemory +#define __nvoc_class_id_DisplayInstanceMemory 0x8223e2 +#endif /* __nvoc_class_id_DisplayInstanceMemory */ + + +struct KernelHead; + +#ifndef __NVOC_CLASS_KernelHead_TYPEDEF__ +#define __NVOC_CLASS_KernelHead_TYPEDEF__ +typedef struct KernelHead KernelHead; +#endif /* __NVOC_CLASS_KernelHead_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelHead +#define __nvoc_class_id_KernelHead 0x0145e6 +#endif /* __nvoc_class_id_KernelHead */ + + +struct OBJVOLT; + +#ifndef __NVOC_CLASS_OBJVOLT_TYPEDEF__ +#define __NVOC_CLASS_OBJVOLT_TYPEDEF__ +typedef struct OBJVOLT OBJVOLT; +#endif /* __NVOC_CLASS_OBJVOLT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVOLT +#define __nvoc_class_id_OBJVOLT 0xa68120 +#endif /* __nvoc_class_id_OBJVOLT */ + + +struct Intr; + +#ifndef __NVOC_CLASS_Intr_TYPEDEF__ +#define __NVOC_CLASS_Intr_TYPEDEF__ +typedef struct Intr Intr; +#endif /* __NVOC_CLASS_Intr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Intr +#define __nvoc_class_id_Intr 0xc06e44 +#endif /* __nvoc_class_id_Intr */ + + +struct OBJHDA; + +#ifndef __NVOC_CLASS_OBJHDA_TYPEDEF__ +#define __NVOC_CLASS_OBJHDA_TYPEDEF__ +typedef struct OBJHDA OBJHDA; +#endif /* __NVOC_CLASS_OBJHDA_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDA +#define __nvoc_class_id_OBJHDA 0xd3bfb4 +#endif /* __nvoc_class_id_OBJHDA */ + + +struct OBJI2C; + +#ifndef __NVOC_CLASS_OBJI2C_TYPEDEF__ +#define __NVOC_CLASS_OBJI2C_TYPEDEF__ +typedef struct OBJI2C OBJI2C; +#endif /* __NVOC_CLASS_OBJI2C_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJI2C +#define __nvoc_class_id_OBJI2C 0x2bc374 +#endif /* __nvoc_class_id_OBJI2C */ + + +struct KernelRc; + +#ifndef __NVOC_CLASS_KernelRc_TYPEDEF__ +#define __NVOC_CLASS_KernelRc_TYPEDEF__ +typedef struct KernelRc KernelRc; +#endif /* __NVOC_CLASS_KernelRc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelRc +#define __nvoc_class_id_KernelRc 0x4888db +#endif /* __nvoc_class_id_KernelRc */ + + +struct OBJRC; + +#ifndef __NVOC_CLASS_OBJRC_TYPEDEF__ +#define __NVOC_CLASS_OBJRC_TYPEDEF__ +typedef struct OBJRC OBJRC; +#endif /* __NVOC_CLASS_OBJRC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJRC +#define __nvoc_class_id_OBJRC 0x42d150 +#endif /* __nvoc_class_id_OBJRC */ + + +struct OBJSOR; + +#ifndef __NVOC_CLASS_OBJSOR_TYPEDEF__ +#define __NVOC_CLASS_OBJSOR_TYPEDEF__ +typedef struct OBJSOR OBJSOR; +#endif /* __NVOC_CLASS_OBJSOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSOR +#define __nvoc_class_id_OBJSOR 0x5ccbfa +#endif /* __nvoc_class_id_OBJSOR */ + + +struct OBJDAC; + +#ifndef __NVOC_CLASS_OBJDAC_TYPEDEF__ +#define __NVOC_CLASS_OBJDAC_TYPEDEF__ +typedef struct OBJDAC OBJDAC; +#endif /* __NVOC_CLASS_OBJDAC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDAC +#define __nvoc_class_id_OBJDAC 0x4b1802 +#endif /* __nvoc_class_id_OBJDAC */ + + +struct OBJPIOR; + +#ifndef __NVOC_CLASS_OBJPIOR_TYPEDEF__ +#define __NVOC_CLASS_OBJPIOR_TYPEDEF__ +typedef struct OBJPIOR OBJPIOR; +#endif /* __NVOC_CLASS_OBJPIOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPIOR +#define __nvoc_class_id_OBJPIOR 0x0128a3 +#endif /* __nvoc_class_id_OBJPIOR */ + + +struct OBJHEAD; + +#ifndef __NVOC_CLASS_OBJHEAD_TYPEDEF__ +#define __NVOC_CLASS_OBJHEAD_TYPEDEF__ +typedef struct OBJHEAD OBJHEAD; +#endif /* __NVOC_CLASS_OBJHEAD_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHEAD +#define __nvoc_class_id_OBJHEAD 0x74dd86 +#endif /* __nvoc_class_id_OBJHEAD */ + + +struct OBJVGA; + +#ifndef __NVOC_CLASS_OBJVGA_TYPEDEF__ +#define __NVOC_CLASS_OBJVGA_TYPEDEF__ +typedef struct OBJVGA OBJVGA; +#endif /* __NVOC_CLASS_OBJVGA_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVGA +#define __nvoc_class_id_OBJVGA 0x84e0bc +#endif /* __nvoc_class_id_OBJVGA */ + + +struct OBJSTEREO; + +#ifndef __NVOC_CLASS_OBJSTEREO_TYPEDEF__ +#define __NVOC_CLASS_OBJSTEREO_TYPEDEF__ +typedef struct OBJSTEREO OBJSTEREO; +#endif /* __NVOC_CLASS_OBJSTEREO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSTEREO +#define __nvoc_class_id_OBJSTEREO 0x9fd931 +#endif /* __nvoc_class_id_OBJSTEREO */ + + +struct OBJOR; + +#ifndef __NVOC_CLASS_OBJOR_TYPEDEF__ +#define __NVOC_CLASS_OBJOR_TYPEDEF__ +typedef struct OBJOR OBJOR; +#endif /* __NVOC_CLASS_OBJOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOR +#define __nvoc_class_id_OBJOR 0x215d6b +#endif /* __nvoc_class_id_OBJOR */ + + +struct OBJBSP; + +#ifndef __NVOC_CLASS_OBJBSP_TYPEDEF__ +#define __NVOC_CLASS_OBJBSP_TYPEDEF__ +typedef struct OBJBSP OBJBSP; +#endif /* __NVOC_CLASS_OBJBSP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJBSP +#define __nvoc_class_id_OBJBSP 0x8f99e1 +#endif /* __nvoc_class_id_OBJBSP */ + + +struct OBJCIPHER; + +#ifndef __NVOC_CLASS_OBJCIPHER_TYPEDEF__ +#define __NVOC_CLASS_OBJCIPHER_TYPEDEF__ +typedef struct OBJCIPHER OBJCIPHER; +#endif /* __NVOC_CLASS_OBJCIPHER_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJCIPHER +#define __nvoc_class_id_OBJCIPHER 0x8dd911 +#endif /* __nvoc_class_id_OBJCIPHER */ + + +struct OBJFUSE; + +#ifndef __NVOC_CLASS_OBJFUSE_TYPEDEF__ +#define __NVOC_CLASS_OBJFUSE_TYPEDEF__ +typedef struct OBJFUSE OBJFUSE; +#endif /* __NVOC_CLASS_OBJFUSE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFUSE +#define __nvoc_class_id_OBJFUSE 0x95ba71 +#endif /* __nvoc_class_id_OBJFUSE */ + + +struct OBJHDCP; + +#ifndef __NVOC_CLASS_OBJHDCP_TYPEDEF__ +#define __NVOC_CLASS_OBJHDCP_TYPEDEF__ +typedef struct OBJHDCP OBJHDCP; +#endif /* __NVOC_CLASS_OBJHDCP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDCP +#define __nvoc_class_id_OBJHDCP 0x426d44 +#endif /* __nvoc_class_id_OBJHDCP */ + + +struct OBJHDMI; + +#ifndef __NVOC_CLASS_OBJHDMI_TYPEDEF__ +#define __NVOC_CLASS_OBJHDMI_TYPEDEF__ +typedef struct OBJHDMI OBJHDMI; +#endif /* __NVOC_CLASS_OBJHDMI_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDMI +#define __nvoc_class_id_OBJHDMI 0x2213b6 +#endif /* __nvoc_class_id_OBJHDMI */ + + +struct Therm; + +#ifndef __NVOC_CLASS_Therm_TYPEDEF__ +#define __NVOC_CLASS_Therm_TYPEDEF__ +typedef struct Therm Therm; +#endif /* __NVOC_CLASS_Therm_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Therm +#define __nvoc_class_id_Therm 0x6c1e56 +#endif /* __nvoc_class_id_Therm */ + + +struct OBJSEQ; + +#ifndef __NVOC_CLASS_OBJSEQ_TYPEDEF__ +#define __NVOC_CLASS_OBJSEQ_TYPEDEF__ +typedef struct OBJSEQ OBJSEQ; +#endif /* __NVOC_CLASS_OBJSEQ_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSEQ +#define __nvoc_class_id_OBJSEQ 0x45da4a +#endif /* __nvoc_class_id_OBJSEQ */ + + +struct OBJDPAUX; + +#ifndef __NVOC_CLASS_OBJDPAUX_TYPEDEF__ +#define __NVOC_CLASS_OBJDPAUX_TYPEDEF__ +typedef struct OBJDPAUX OBJDPAUX; +#endif /* __NVOC_CLASS_OBJDPAUX_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDPAUX +#define __nvoc_class_id_OBJDPAUX 0xfd2ab9 +#endif /* __nvoc_class_id_OBJDPAUX */ + + +struct Pmu; + +#ifndef __NVOC_CLASS_Pmu_TYPEDEF__ +#define __NVOC_CLASS_Pmu_TYPEDEF__ +typedef struct Pmu Pmu; +#endif /* __NVOC_CLASS_Pmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Pmu +#define __nvoc_class_id_Pmu 0xf3d722 +#endif /* __nvoc_class_id_Pmu */ + + +struct KernelPmu; + +#ifndef __NVOC_CLASS_KernelPmu_TYPEDEF__ +#define __NVOC_CLASS_KernelPmu_TYPEDEF__ +typedef struct KernelPmu KernelPmu; +#endif /* __NVOC_CLASS_KernelPmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelPmu +#define __nvoc_class_id_KernelPmu 0xab9d7d +#endif /* __nvoc_class_id_KernelPmu */ + + +struct Lpwr; + +#ifndef __NVOC_CLASS_Lpwr_TYPEDEF__ +#define __NVOC_CLASS_Lpwr_TYPEDEF__ +typedef struct Lpwr Lpwr; +#endif /* __NVOC_CLASS_Lpwr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Lpwr +#define __nvoc_class_id_Lpwr 0x112230 +#endif /* __nvoc_class_id_Lpwr */ + + +struct OBJISOHUB; + +#ifndef __NVOC_CLASS_OBJISOHUB_TYPEDEF__ +#define __NVOC_CLASS_OBJISOHUB_TYPEDEF__ +typedef struct OBJISOHUB OBJISOHUB; +#endif /* __NVOC_CLASS_OBJISOHUB_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJISOHUB +#define __nvoc_class_id_OBJISOHUB 0x7c5e0d +#endif /* __nvoc_class_id_OBJISOHUB */ + + +struct Pmgr; + +#ifndef __NVOC_CLASS_Pmgr_TYPEDEF__ +#define __NVOC_CLASS_Pmgr_TYPEDEF__ +typedef struct Pmgr Pmgr; +#endif /* __NVOC_CLASS_Pmgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Pmgr +#define __nvoc_class_id_Pmgr 0x894574 +#endif /* __nvoc_class_id_Pmgr */ + + +struct OBJHDACODEC; + +#ifndef __NVOC_CLASS_OBJHDACODEC_TYPEDEF__ +#define __NVOC_CLASS_OBJHDACODEC_TYPEDEF__ +typedef struct OBJHDACODEC OBJHDACODEC; +#endif /* __NVOC_CLASS_OBJHDACODEC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHDACODEC +#define __nvoc_class_id_OBJHDACODEC 0xa576e2 +#endif /* __nvoc_class_id_OBJHDACODEC */ + + +struct Spi; + +#ifndef __NVOC_CLASS_Spi_TYPEDEF__ +#define __NVOC_CLASS_Spi_TYPEDEF__ +typedef struct Spi Spi; +#endif /* __NVOC_CLASS_Spi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Spi +#define __nvoc_class_id_Spi 0x824313 +#endif /* __nvoc_class_id_Spi */ + + +struct OBJUVM; + +#ifndef __NVOC_CLASS_OBJUVM_TYPEDEF__ +#define __NVOC_CLASS_OBJUVM_TYPEDEF__ +typedef struct OBJUVM OBJUVM; +#endif /* __NVOC_CLASS_OBJUVM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJUVM +#define __nvoc_class_id_OBJUVM 0xf9a17d +#endif /* __nvoc_class_id_OBJUVM */ + + +struct OBJSEC2; + +#ifndef __NVOC_CLASS_OBJSEC2_TYPEDEF__ +#define __NVOC_CLASS_OBJSEC2_TYPEDEF__ +typedef struct OBJSEC2 OBJSEC2; +#endif /* __NVOC_CLASS_OBJSEC2_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSEC2 +#define __nvoc_class_id_OBJSEC2 0x28c408 +#endif /* __nvoc_class_id_OBJSEC2 */ + + +struct OBJPMS; + +#ifndef __NVOC_CLASS_OBJPMS_TYPEDEF__ +#define __NVOC_CLASS_OBJPMS_TYPEDEF__ +typedef struct OBJPMS OBJPMS; +#endif /* __NVOC_CLASS_OBJPMS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPMS +#define __nvoc_class_id_OBJPMS 0x9e3810 +#endif /* __nvoc_class_id_OBJPMS */ + + +struct OBJENGSTATE; + +#ifndef __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +#define __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +typedef struct OBJENGSTATE OBJENGSTATE; +#endif /* __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJENGSTATE +#define __nvoc_class_id_OBJENGSTATE 0x7a7ed6 +#endif /* __nvoc_class_id_OBJENGSTATE */ + + +struct OBJLSFM; + +#ifndef __NVOC_CLASS_OBJLSFM_TYPEDEF__ +#define __NVOC_CLASS_OBJLSFM_TYPEDEF__ +typedef struct OBJLSFM OBJLSFM; +#endif /* __NVOC_CLASS_OBJLSFM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJLSFM +#define __nvoc_class_id_OBJLSFM 0x9a25e4 +#endif /* __nvoc_class_id_OBJLSFM */ + + +struct OBJACR; + +#ifndef __NVOC_CLASS_OBJACR_TYPEDEF__ +#define __NVOC_CLASS_OBJACR_TYPEDEF__ +typedef struct OBJACR OBJACR; +#endif /* __NVOC_CLASS_OBJACR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJACR +#define __nvoc_class_id_OBJACR 0xdb32a1 +#endif /* __nvoc_class_id_OBJACR */ + + +struct OBJGPULOG; + +#ifndef __NVOC_CLASS_OBJGPULOG_TYPEDEF__ +#define __NVOC_CLASS_OBJGPULOG_TYPEDEF__ +typedef struct OBJGPULOG OBJGPULOG; +#endif /* __NVOC_CLASS_OBJGPULOG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPULOG +#define __nvoc_class_id_OBJGPULOG 0xdd19be +#endif /* __nvoc_class_id_OBJGPULOG */ + + +struct KernelNvlink; + +#ifndef __NVOC_CLASS_KernelNvlink_TYPEDEF__ +#define __NVOC_CLASS_KernelNvlink_TYPEDEF__ +typedef struct KernelNvlink KernelNvlink; +#endif /* __NVOC_CLASS_KernelNvlink_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelNvlink +#define __nvoc_class_id_KernelNvlink 0xce6818 +#endif /* __nvoc_class_id_KernelNvlink */ + + +struct Nvlink; + +#ifndef __NVOC_CLASS_Nvlink_TYPEDEF__ +#define __NVOC_CLASS_Nvlink_TYPEDEF__ +typedef struct Nvlink Nvlink; +#endif /* __NVOC_CLASS_Nvlink_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Nvlink +#define __nvoc_class_id_Nvlink 0x790a3c +#endif /* __nvoc_class_id_Nvlink */ + + +struct OBJHWPM; + +#ifndef __NVOC_CLASS_OBJHWPM_TYPEDEF__ +#define __NVOC_CLASS_OBJHWPM_TYPEDEF__ +typedef struct OBJHWPM OBJHWPM; +#endif /* __NVOC_CLASS_OBJHWPM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHWPM +#define __nvoc_class_id_OBJHWPM 0x97e43b +#endif /* __nvoc_class_id_OBJHWPM */ + + +struct OBJGPUMON; + +#ifndef __NVOC_CLASS_OBJGPUMON_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUMON_TYPEDEF__ +typedef struct OBJGPUMON OBJGPUMON; +#endif /* __NVOC_CLASS_OBJGPUMON_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUMON +#define __nvoc_class_id_OBJGPUMON 0x2b424b +#endif /* __nvoc_class_id_OBJGPUMON */ + + +struct OBJGRIDDISPLAYLESS; + +#ifndef __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +#define __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +typedef struct OBJGRIDDISPLAYLESS OBJGRIDDISPLAYLESS; +#endif /* __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGRIDDISPLAYLESS +#define __nvoc_class_id_OBJGRIDDISPLAYLESS 0x20fd5a +#endif /* __nvoc_class_id_OBJGRIDDISPLAYLESS */ + + +struct FECS; + +#ifndef __NVOC_CLASS_FECS_TYPEDEF__ +#define __NVOC_CLASS_FECS_TYPEDEF__ +typedef struct FECS FECS; +#endif /* __NVOC_CLASS_FECS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FECS +#define __nvoc_class_id_FECS 0x5ee8dc +#endif /* __nvoc_class_id_FECS */ + + +struct GPCCS; + +#ifndef __NVOC_CLASS_GPCCS_TYPEDEF__ +#define __NVOC_CLASS_GPCCS_TYPEDEF__ +typedef struct GPCCS GPCCS; +#endif /* __NVOC_CLASS_GPCCS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GPCCS +#define __nvoc_class_id_GPCCS 0x4781e8 +#endif /* __nvoc_class_id_GPCCS */ + + +struct OBJCE; + +#ifndef __NVOC_CLASS_OBJCE_TYPEDEF__ +#define __NVOC_CLASS_OBJCE_TYPEDEF__ +typedef struct OBJCE OBJCE; +#endif /* __NVOC_CLASS_OBJCE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJCE +#define __nvoc_class_id_OBJCE 0x793ceb +#endif /* __nvoc_class_id_OBJCE */ + + +struct KernelCE; + +#ifndef __NVOC_CLASS_KernelCE_TYPEDEF__ +#define __NVOC_CLASS_KernelCE_TYPEDEF__ +typedef struct KernelCE KernelCE; +#endif /* __NVOC_CLASS_KernelCE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCE +#define __nvoc_class_id_KernelCE 0x242aca +#endif /* __nvoc_class_id_KernelCE */ + + +struct OBJMSENC; + +#ifndef __NVOC_CLASS_OBJMSENC_TYPEDEF__ +#define __NVOC_CLASS_OBJMSENC_TYPEDEF__ +typedef struct OBJMSENC OBJMSENC; +#endif /* __NVOC_CLASS_OBJMSENC_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJMSENC +#define __nvoc_class_id_OBJMSENC 0xe97b6c +#endif /* __nvoc_class_id_OBJMSENC */ + + +struct OBJNVJPG; + +#ifndef __NVOC_CLASS_OBJNVJPG_TYPEDEF__ +#define __NVOC_CLASS_OBJNVJPG_TYPEDEF__ +typedef struct OBJNVJPG OBJNVJPG; +#endif /* __NVOC_CLASS_OBJNVJPG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJNVJPG +#define __nvoc_class_id_OBJNVJPG 0x2b3a54 +#endif /* __nvoc_class_id_OBJNVJPG */ + + +struct OBJFAS; + +#ifndef __NVOC_CLASS_OBJFAS_TYPEDEF__ +#define __NVOC_CLASS_OBJFAS_TYPEDEF__ +typedef struct OBJFAS OBJFAS; +#endif /* __NVOC_CLASS_OBJFAS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFAS +#define __nvoc_class_id_OBJFAS 0x4ddf48 +#endif /* __nvoc_class_id_OBJFAS */ + + +struct OBJVMMU; + +#ifndef __NVOC_CLASS_OBJVMMU_TYPEDEF__ +#define __NVOC_CLASS_OBJVMMU_TYPEDEF__ +typedef struct OBJVMMU OBJVMMU; +#endif /* __NVOC_CLASS_OBJVMMU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMMU +#define __nvoc_class_id_OBJVMMU 0xdf8918 +#endif /* __nvoc_class_id_OBJVMMU */ + + +struct Gsp; + +#ifndef __NVOC_CLASS_Gsp_TYPEDEF__ +#define __NVOC_CLASS_Gsp_TYPEDEF__ +typedef struct Gsp Gsp; +#endif /* __NVOC_CLASS_Gsp_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Gsp +#define __nvoc_class_id_Gsp 0xda3de4 +#endif /* __nvoc_class_id_Gsp */ + + +struct OBJFSP; + +#ifndef __NVOC_CLASS_OBJFSP_TYPEDEF__ +#define __NVOC_CLASS_OBJFSP_TYPEDEF__ +typedef struct OBJFSP OBJFSP; +#endif /* __NVOC_CLASS_OBJFSP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFSP +#define __nvoc_class_id_OBJFSP 0xd39158 +#endif /* __nvoc_class_id_OBJFSP */ + + +struct KernelFsp; + +#ifndef __NVOC_CLASS_KernelFsp_TYPEDEF__ +#define __NVOC_CLASS_KernelFsp_TYPEDEF__ +typedef struct KernelFsp KernelFsp; +#endif /* __NVOC_CLASS_KernelFsp_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFsp +#define __nvoc_class_id_KernelFsp 0x87fb96 +#endif /* __nvoc_class_id_KernelFsp */ + + +struct OBJOFA; + +#ifndef __NVOC_CLASS_OBJOFA_TYPEDEF__ +#define __NVOC_CLASS_OBJOFA_TYPEDEF__ +typedef struct OBJOFA OBJOFA; +#endif /* __NVOC_CLASS_OBJOFA_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJOFA +#define __nvoc_class_id_OBJOFA 0xdd7bab +#endif /* __nvoc_class_id_OBJOFA */ + + +struct KernelIoctrl; + +#ifndef __NVOC_CLASS_KernelIoctrl_TYPEDEF__ +#define __NVOC_CLASS_KernelIoctrl_TYPEDEF__ +typedef struct KernelIoctrl KernelIoctrl; +#endif /* __NVOC_CLASS_KernelIoctrl_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelIoctrl +#define __nvoc_class_id_KernelIoctrl 0x880c7d +#endif /* __nvoc_class_id_KernelIoctrl */ + + +struct Ioctrl; + +#ifndef __NVOC_CLASS_Ioctrl_TYPEDEF__ +#define __NVOC_CLASS_Ioctrl_TYPEDEF__ +typedef struct Ioctrl Ioctrl; +#endif /* __NVOC_CLASS_Ioctrl_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Ioctrl +#define __nvoc_class_id_Ioctrl 0x11ce10 +#endif /* __nvoc_class_id_Ioctrl */ + + +struct KernelNvdec; + +#ifndef __NVOC_CLASS_KernelNvdec_TYPEDEF__ +#define __NVOC_CLASS_KernelNvdec_TYPEDEF__ +typedef struct KernelNvdec KernelNvdec; +#endif /* __NVOC_CLASS_KernelNvdec_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelNvdec +#define __nvoc_class_id_KernelNvdec 0xaba9df +#endif /* __nvoc_class_id_KernelNvdec */ + + +struct KernelSec2; + +#ifndef __NVOC_CLASS_KernelSec2_TYPEDEF__ +#define __NVOC_CLASS_KernelSec2_TYPEDEF__ +typedef struct KernelSec2 KernelSec2; +#endif /* __NVOC_CLASS_KernelSec2_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSec2 +#define __nvoc_class_id_KernelSec2 0x2f36c9 +#endif /* __nvoc_class_id_KernelSec2 */ + + +struct KernelGsp; + +#ifndef __NVOC_CLASS_KernelGsp_TYPEDEF__ +#define __NVOC_CLASS_KernelGsp_TYPEDEF__ +typedef struct KernelGsp KernelGsp; +#endif /* __NVOC_CLASS_KernelGsp_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGsp +#define __nvoc_class_id_KernelGsp 0x311d4e +#endif /* __nvoc_class_id_KernelGsp */ + + +struct OBJDCECLIENTRM; + +#ifndef __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +#define __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ +typedef struct OBJDCECLIENTRM OBJDCECLIENTRM; +#endif /* __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDCECLIENTRM +#define __nvoc_class_id_OBJDCECLIENTRM 0x61649c +#endif /* __nvoc_class_id_OBJDCECLIENTRM */ + + +struct OBJDISPMACRO; + +#ifndef __NVOC_CLASS_OBJDISPMACRO_TYPEDEF__ +#define __NVOC_CLASS_OBJDISPMACRO_TYPEDEF__ +typedef struct OBJDISPMACRO OBJDISPMACRO; +#endif /* __NVOC_CLASS_OBJDISPMACRO_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDISPMACRO +#define __nvoc_class_id_OBJDISPMACRO 0xa1cad2 +#endif /* __nvoc_class_id_OBJDISPMACRO */ + + +struct OBJNNE; + +#ifndef __NVOC_CLASS_OBJNNE_TYPEDEF__ +#define __NVOC_CLASS_OBJNNE_TYPEDEF__ +typedef struct OBJNNE OBJNNE; +#endif /* __NVOC_CLASS_OBJNNE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJNNE +#define __nvoc_class_id_OBJNNE 0xc7f0f8 +#endif /* __nvoc_class_id_OBJNNE */ + + +struct Smbpbi; + +#ifndef __NVOC_CLASS_Smbpbi_TYPEDEF__ +#define __NVOC_CLASS_Smbpbi_TYPEDEF__ +typedef struct Smbpbi Smbpbi; +#endif /* __NVOC_CLASS_Smbpbi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Smbpbi +#define __nvoc_class_id_Smbpbi 0x884e68 +#endif /* __nvoc_class_id_Smbpbi */ + + +struct OBJDSI; + +#ifndef __NVOC_CLASS_OBJDSI_TYPEDEF__ +#define __NVOC_CLASS_OBJDSI_TYPEDEF__ +typedef struct OBJDSI OBJDSI; +#endif /* __NVOC_CLASS_OBJDSI_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDSI +#define __nvoc_class_id_OBJDSI 0x2e9a64 +#endif /* __nvoc_class_id_OBJDSI */ + + +struct OBJDCB; + +#ifndef __NVOC_CLASS_OBJDCB_TYPEDEF__ +#define __NVOC_CLASS_OBJDCB_TYPEDEF__ +typedef struct OBJDCB OBJDCB; +#endif /* __NVOC_CLASS_OBJDCB_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDCB +#define __nvoc_class_id_OBJDCB 0xf931d4 +#endif /* __nvoc_class_id_OBJDCB */ + + +struct KernelGmmu; + +#ifndef __NVOC_CLASS_KernelGmmu_TYPEDEF__ +#define __NVOC_CLASS_KernelGmmu_TYPEDEF__ +typedef struct KernelGmmu KernelGmmu; +#endif /* __NVOC_CLASS_KernelGmmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGmmu +#define __nvoc_class_id_KernelGmmu 0x29362f +#endif /* __nvoc_class_id_KernelGmmu */ + + +struct OBJGMMU; + +#ifndef __NVOC_CLASS_OBJGMMU_TYPEDEF__ +#define __NVOC_CLASS_OBJGMMU_TYPEDEF__ +typedef struct OBJGMMU OBJGMMU; +#endif /* __NVOC_CLASS_OBJGMMU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGMMU +#define __nvoc_class_id_OBJGMMU 0xd7a41d +#endif /* __nvoc_class_id_OBJGMMU */ + + +// +// Engine tags to be used by both RM/HAL to reference specific engines. +// +// These values are used in the engine descriptor table +// as well as in the class descriptor table. +// +#define ENG_INVALID MKENGDESC(classId(OBJINVALID), 0) +#define ENG_SW MKENGDESC(classId(OBJSWENG), 0) +#define ENG_GPU MKENGDESC(classId(OBJGPU), 0) +#define ENG_FLCN MKENGDESC(classId(Falcon), 0) +#define ENG_MC MKENGDESC(classId(OBJMC), 0) +#define ENG_KERNEL_MC MKENGDESC(classId(KernelMc), 0) +#define ENG_PRIV_RING MKENGDESC(classId(PrivRing), 0) +#define ENG_SW_INTR MKENGDESC(classId(SwIntr), 0) +#define ENG_MEMORY_SYSTEM MKENGDESC(classId(MemorySystem), 0) +#define ENG_KERNEL_MEMORY_SYSTEM MKENGDESC(classId(KernelMemorySystem), 0) +#define ENG_MEMORY_MANAGER MKENGDESC(classId(MemoryManager), 0) +#define ENG_FBFLCN MKENGDESC(classId(OBJFBFLCN), 0) +#define ENG_TMR MKENGDESC(classId(OBJTMR), 0) +#define ENG_DMA MKENGDESC(classId(VirtMemAllocator), 0) +#define ENG_KERNEL_FIFO MKENGDESC(classId(KernelFifo), 0) +#define ENG_FIFO MKENGDESC(classId(OBJFIFO), 0) +#define ENG_OS MKENGDESC(classId(OBJOS), 0) +#define ENG_BUS MKENGDESC(classId(OBJBUS), 0) +#define ENG_KERNEL_BUS MKENGDESC(classId(KernelBus), 0) +#define ENG_INFOROM MKENGDESC(classId(OBJINFOROM), 0) +#define ENG_PERF MKENGDESC(classId(Perf), 0) +#define ENG_KERNEL_PERF MKENGDESC(classId(KernelPerf), 0) +#define ENG_BIF MKENGDESC(classId(OBJBIF), 0) +#define ENG_KERNEL_BIF MKENGDESC(classId(KernelBif), 0) +#define ENG_SF MKENGDESC(classId(OBJSF), 0) +#define ENG_GPIO MKENGDESC(classId(OBJGPIO), 0) +#define ENG_CLK MKENGDESC(classId(ClockManager), 0) +#define ENG_KERNEL_DISPLAY MKENGDESC(classId(KernelDisplay), 0) +#define ENG_DISP MKENGDESC(classId(OBJDISP), 0) +#define ENG_DPU MKENGDESC(classId(OBJDPU), 0) +#define ENG_FAN MKENGDESC(classId(OBJFAN), 0) +#define ENG_INST MKENGDESC(classId(DisplayInstanceMemory), 0) +#define ENG_KERNEL_HEAD MKENGDESC(classId(KernelHead), 0) +#define ENG_VOLT MKENGDESC(classId(OBJVOLT), 0) +#define ENG_INTR MKENGDESC(classId(Intr), 0) +#define ENG_HDA MKENGDESC(classId(OBJHDA), 0) +#define ENG_I2C MKENGDESC(classId(OBJI2C), 0) +#define ENG_KERNEL_RC MKENGDESC(classId(KernelRc), 0) +#define ENG_RC MKENGDESC(classId(OBJRC), 0) +#define ENG_SOR MKENGDESC(classId(OBJSOR), 0) +#define ENG_DAC MKENGDESC(classId(OBJDAC), 0) +#define ENG_PIOR MKENGDESC(classId(OBJPIOR), 0) +#define ENG_HEAD MKENGDESC(classId(OBJHEAD), 0) +#define ENG_VGA MKENGDESC(classId(OBJVGA), 0) +#define ENG_STEREO MKENGDESC(classId(OBJSTEREO), 0) +#define ENG_OR MKENGDESC(classId(OBJOR), 0) +#define ENG_BSP MKENGDESC(classId(OBJBSP), 0) +#define ENG_CIPHER MKENGDESC(classId(OBJCIPHER), 0) +#define ENG_FUSE MKENGDESC(classId(OBJFUSE), 0) +#define ENG_HDCP MKENGDESC(classId(OBJHDCP), 0) +#define ENG_HDMI MKENGDESC(classId(OBJHDMI), 0) +#define ENG_THERM MKENGDESC(classId(Therm), 0) +#define ENG_SEQ MKENGDESC(classId(OBJSEQ), 0) +#define ENG_DPAUX MKENGDESC(classId(OBJDPAUX), 0) +#define ENG_PMU MKENGDESC(classId(Pmu), 0) +#define ENG_KERNEL_PMU MKENGDESC(classId(KernelPmu), 0) +#define ENG_LPWR MKENGDESC(classId(Lpwr), 0) +#define ENG_ISOHUB MKENGDESC(classId(OBJISOHUB), 0) +#define ENG_PMGR MKENGDESC(classId(Pmgr), 0) +#define ENG_HDACODEC MKENGDESC(classId(OBJHDACODEC), 0) +#define ENG_SPI MKENGDESC(classId(Spi), 0) +#define ENG_UVM MKENGDESC(classId(OBJUVM), 0) +#define ENG_SEC2 MKENGDESC(classId(OBJSEC2), 0) +#define ENG_PMS MKENGDESC(classId(OBJPMS), 0) +#define ENG_ENGSTATE MKENGDESC(classId(OBJENGSTATE), 0) +#define ENG_LSFM MKENGDESC(classId(OBJLSFM), 0) +#define ENG_ACR MKENGDESC(classId(OBJACR), 0) +#define ENG_GPULOG MKENGDESC(classId(OBJGPULOG), 0) +#define ENG_NVLINK MKENGDESC(classId(Nvlink), 0) +#define ENG_HWPM MKENGDESC(classId(OBJHWPM), 0) +#define ENG_GPUMON MKENGDESC(classId(OBJGPUMON), 0) +#define ENG_GRIDDISPLAYLESS MKENGDESC(classId(OBJGRIDDISPLAYLESS), 0) +#define ENG_VMMU MKENGDESC(classId(OBJVMMU), 0) +#define ENG_NVJPG MKENGDESC(classId(OBJNVJPG), 0) +#define ENG_GSP MKENGDESC(classId(Gsp), 0) +#define ENG_FSP MKENGDESC(classId(OBJFSP), 0) +#define ENG_KERNEL_FSP MKENGDESC(classId(KernelFsp), 0) +#define ENG_OFA MKENGDESC(classId(OBJOFA), 0) +#define ENG_KERNEL_GSP MKENGDESC(classId(KernelGsp), 0) +#define ENG_KERNEL_NVDEC MKENGDESC(classId(KernelNvdec), 0) +#define ENG_KERNEL_SEC2 MKENGDESC(classId(KernelSec2), 0) +#define ENG_DISPMACRO MKENGDESC(classId(OBJDISPMACRO), 0) +#define ENG_NNE MKENGDESC(classId(OBJNNE), 0) +#define ENG_SMBPBI MKENGDESC(classId(Smbpbi), 0) +#define ENG_DSI MKENGDESC(classId(OBJDSI), 0) +#define ENG_DCECLIENTRM MKENGDESC(classId(OBJDCECLIENTRM), 0) +#define ENG_DCB MKENGDESC(classId(OBJDCB), 0) +#define ENG_KERNEL_NVLINK MKENGDESC(classId(KernelNvlink), 0) +#define ENG_GMMU MKENGDESC(classId(OBJGMMU), 0) +#define ENG_KERNEL_GMMU MKENGDESC(classId(KernelGmmu), 0) + +// Indexed CE engine tag reference +#define ENG_CE(x) MKENGDESC(classId(OBJCE), x) +#define ENG_CE__SIZE_1 10 +#define IS_CE(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJCE)) +#define GET_CE_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed MSENC Engine Tag Reference +#define ENG_MSENC(x) MKENGDESC(classId(OBJMSENC), x) +#define ENG_MSENC__SIZE_1 3 +#define IS_MSENC(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJMSENC)) +#define GET_MSENC_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed BSP/NVDEC Engine Tag Reference +#define ENG_NVDEC(x) MKENGDESC(classId(OBJBSP), x) +#define ENG_NVDEC__SIZE_1 5 +#define IS_NVDEC(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJBSP)) +#define GET_NVDEC_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed GR engine tag reference +#define ENG_GR(x) MKENGDESC(classId(Graphics), x) +#define ENG_GR__SIZE_1 8 +#define IS_GR(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(Graphics)) +#define GET_GR_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed NVJPEG Engine Tag Reference +#define ENG_NVJPEG(x) MKENGDESC(classId(OBJNVJPG), x) +#define ENG_NVJPEG__SIZE_1 1 +#define IS_NVJPEG(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJNVJPG)) +#define GET_NVJPEG_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed FECS engine tag reference +#define ENG_FECS(x) MKENGDESC(classId(FECS), x) +#define ENG_FECS__SIZE_1 8 +#define IS_FECS(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(FECS)) +#define GET_FECS_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed GPCCS engine tag reference +#define ENG_GPCCS(x) MKENGDESC(classId(GPCCS), x) +#define ENG_GPCCS__SIZE_1 8 +#define IS_GPCCS(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(GPCCS)) +#define GET_GPCCS_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed IOCTRL engine tag reference +#define ENG_IOCTRL(x) MKENGDESC(classId(Ioctrl), x) +#define ENG_IOCTRL__SIZE_1 3 +#define IS_IOCTRL(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(Ioctrl)) +#define GET_IOCTRL_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed HSHUB engine tag reference +#define ENG_HSHUB(x) MKENGDESC(classId(OBJHSHUB), x) +#define ENG_HSHUB__SIZE_1 5 +#define IS_HSHUB(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(OBJHSHUB)) +#define GET_HSHUB_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +// Indexed IOCTRL engine tag reference +#define ENG_KERNEL_IOCTRL(x) MKENGDESC(classId(KernelIoctrl), x) +#define ENG_KERNEL_IOCTRL__SIZE_1 3 +#define IS_KERNEL_IOCTRL(engDesc) (ENGDESC_FIELD(engDesc, _CLASS) == classId(KernelIoctrl)) +#define GET_KERNEL_IOCTRL_IDX(engDesc) ENGDESC_FIELD(engDesc, _INST) + +#endif // _ENG_DESC_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_ENG_DESC_NVOC_H_ diff --git a/src/nvidia/generated/g_eng_state_nvoc.c b/src/nvidia/generated/g_eng_state_nvoc.c new file mode 100644 index 000000000..6ed668adb --- /dev/null +++ b/src/nvidia/generated/g_eng_state_nvoc.c @@ -0,0 +1,189 @@ +#define NVOC_ENG_STATE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_eng_state_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x7a7ed6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_funcTable_OBJENGSTATE(OBJENGSTATE*); +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_dataField_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJENGSTATE; + +static const struct NVOC_RTTI __nvoc_rtti_OBJENGSTATE_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJENGSTATE, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJENGSTATE_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJENGSTATE, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJENGSTATE = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJENGSTATE_OBJENGSTATE, + &__nvoc_rtti_OBJENGSTATE_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJENGSTATE), + /*classId=*/ classId(OBJENGSTATE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJENGSTATE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJENGSTATE, + /*pCastInfo=*/ &__nvoc_castinfo_OBJENGSTATE, + /*pExportInfo=*/ &__nvoc_export_info_OBJENGSTATE +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJENGSTATE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE *pThis) { + __nvoc_engstateDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJENGSTATE(OBJENGSTATE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJENGSTATE_fail_Object; + __nvoc_init_dataField_OBJENGSTATE(pThis); + goto __nvoc_ctor_OBJENGSTATE_exit; // Success + +__nvoc_ctor_OBJENGSTATE_fail_Object: +__nvoc_ctor_OBJENGSTATE_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJENGSTATE_1(OBJENGSTATE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__engstateConstructEngine__ = &engstateConstructEngine_IMPL; + + pThis->__engstateInitMissing__ = &engstateInitMissing_IMPL; + + pThis->__engstateStatePreInitLocked__ = &engstateStatePreInitLocked_IMPL; + + pThis->__engstateStatePreInitUnlocked__ = &engstateStatePreInitUnlocked_IMPL; + + pThis->__engstateStateInitLocked__ = &engstateStateInitLocked_IMPL; + + pThis->__engstateStateInitUnlocked__ = &engstateStateInitUnlocked_IMPL; + + pThis->__engstateStatePreLoad__ = &engstateStatePreLoad_IMPL; + + pThis->__engstateStateLoad__ = &engstateStateLoad_IMPL; + + pThis->__engstateStatePostLoad__ = &engstateStatePostLoad_IMPL; + + pThis->__engstateStatePreUnload__ = &engstateStatePreUnload_IMPL; + + pThis->__engstateStateUnload__ = &engstateStateUnload_IMPL; + + pThis->__engstateStatePostUnload__ = &engstateStatePostUnload_IMPL; + + pThis->__engstateStateDestroy__ = &engstateStateDestroy_IMPL; + + pThis->__engstateAllocTunableState__ = &engstateAllocTunableState_IMPL; + + pThis->__engstateFreeTunableState__ = &engstateFreeTunableState_IMPL; + + pThis->__engstateGetTunableState__ = &engstateGetTunableState_IMPL; + + pThis->__engstateSetTunableState__ = &engstateSetTunableState_IMPL; + + pThis->__engstateReconcileTunableState__ = &engstateReconcileTunableState_IMPL; + + pThis->__engstateCompareTunableState__ = &engstateCompareTunableState_IMPL; + + pThis->__engstateIsPresent__ = &engstateIsPresent_IMPL; +} + +void __nvoc_init_funcTable_OBJENGSTATE(OBJENGSTATE *pThis) { + __nvoc_init_funcTable_OBJENGSTATE_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJENGSTATE(OBJENGSTATE *pThis) { + pThis->__nvoc_pbase_OBJENGSTATE = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJENGSTATE(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJENGSTATE(OBJENGSTATE **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJENGSTATE *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJENGSTATE)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJENGSTATE)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJENGSTATE); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJENGSTATE(pThis); + status = __nvoc_ctor_OBJENGSTATE(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJENGSTATE_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJENGSTATE_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJENGSTATE(OBJENGSTATE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJENGSTATE(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_eng_state_nvoc.h b/src/nvidia/generated/g_eng_state_nvoc.h new file mode 100644 index 000000000..f9853bde6 --- /dev/null +++ b/src/nvidia/generated/g_eng_state_nvoc.h @@ -0,0 +1,385 @@ +#ifndef _G_ENG_STATE_NVOC_H_ +#define _G_ENG_STATE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_eng_state_nvoc.h" + +#ifndef _ENG_STATE_H_ +#define _ENG_STATE_H_ + +/*! + * @file eng_state.h + * @brief Provides definitions for all OBJENGSTATE data structures and interfaces. + */ + +#include "core/core.h" +#include "gpu/eng_desc.h" + +typedef enum ENGSTATE_STATE +{ + ENGSTATE_STATE_UNDEFINED = 0, + ENGSTATE_STATE_CONSTRUCT, + ENGSTATE_STATE_PRE_INIT, + ENGSTATE_STATE_INIT, + ENGSTATE_STATE_PRE_LOAD, + ENGSTATE_STATE_LOAD, + ENGSTATE_STATE_POST_LOAD, + ENGSTATE_STATE_PRE_UNLOAD, + ENGSTATE_STATE_UNLOAD, + ENGSTATE_STATE_POST_UNLOAD, + ENGSTATE_STATE_DESTROY, + ENGSTATE_STATE_COUNT // Keep this last +} ENGSTATE_STATE; + +// Stats data stored for every state transition +typedef struct ENGSTATE_STATS +{ + NvS32 memoryAllocCount; + NvS32 memoryAllocSize; + NvU32 transitionTimeUs; +} ENGSTATE_STATS; + +// Temporary transition data, not stored +typedef struct ENGSTATE_TRANSITION_DATA +{ + NvS64 memoryAllocCount; + NvS64 memoryAllocSize; + NvU64 transitionStartTimeNs; +} ENGSTATE_TRANSITION_DATA; + +typedef struct OBJENGSTATE *POBJENGSTATE; + +#define ENG_GET_FIFO(p) (engstateGetFifo(staticCast((p), OBJENGSTATE))) +#define ENG_GET_ENG_DESC(p) (staticCast((p), OBJENGSTATE)->engDesc) + + +/*! + * Defines the structure used to contain all generic information related to + * the OBJENGSTATE. + */ +#ifdef NVOC_ENG_STATE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJENGSTATE { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + NV_STATUS (*__engstateConstructEngine__)(POBJGPU, POBJENGSTATE, ENGDESCRIPTOR); + void (*__engstateInitMissing__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateStatePreInitLocked__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateStatePreInitUnlocked__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateStateInitLocked__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateStateInitUnlocked__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateStatePreLoad__)(POBJGPU, POBJENGSTATE, NvU32); + NV_STATUS (*__engstateStateLoad__)(POBJGPU, POBJENGSTATE, NvU32); + NV_STATUS (*__engstateStatePostLoad__)(POBJGPU, POBJENGSTATE, NvU32); + NV_STATUS (*__engstateStatePreUnload__)(POBJGPU, POBJENGSTATE, NvU32); + NV_STATUS (*__engstateStateUnload__)(POBJGPU, POBJENGSTATE, NvU32); + NV_STATUS (*__engstateStatePostUnload__)(POBJGPU, POBJENGSTATE, NvU32); + void (*__engstateStateDestroy__)(POBJGPU, POBJENGSTATE); + NV_STATUS (*__engstateAllocTunableState__)(POBJGPU, POBJENGSTATE, void **); + void (*__engstateFreeTunableState__)(POBJGPU, POBJENGSTATE, void *); + NV_STATUS (*__engstateGetTunableState__)(POBJGPU, POBJENGSTATE, void *); + NV_STATUS (*__engstateSetTunableState__)(POBJGPU, POBJENGSTATE, void *); + NV_STATUS (*__engstateReconcileTunableState__)(POBJGPU, POBJENGSTATE, void *); + NV_STATUS (*__engstateCompareTunableState__)(POBJGPU, POBJENGSTATE, void *, void *); + NvBool (*__engstateIsPresent__)(POBJGPU, POBJENGSTATE); + NvBool PDB_PROP_ENGSTATE_IS_MISSING; + ENGDESCRIPTOR engDesc; + void *pOriginalTunableState; + struct OBJGPU *pGpu; + ENGSTATE_STATE currentState; + ENGSTATE_STATS stats[11]; + char name[100]; +}; + +#ifndef __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +#define __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ +typedef struct OBJENGSTATE OBJENGSTATE; +#endif /* __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJENGSTATE +#define __nvoc_class_id_OBJENGSTATE 0x7a7ed6 +#endif /* __nvoc_class_id_OBJENGSTATE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +#define __staticCast_OBJENGSTATE(pThis) \ + ((pThis)->__nvoc_pbase_OBJENGSTATE) + +#ifdef __nvoc_eng_state_h_disabled +#define __dynamicCast_OBJENGSTATE(pThis) ((OBJENGSTATE*)NULL) +#else //__nvoc_eng_state_h_disabled +#define __dynamicCast_OBJENGSTATE(pThis) \ + ((OBJENGSTATE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJENGSTATE))) +#endif //__nvoc_eng_state_h_disabled + +#define PDB_PROP_ENGSTATE_IS_MISSING_BASE_CAST +#define PDB_PROP_ENGSTATE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_OBJENGSTATE(OBJENGSTATE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJENGSTATE(OBJENGSTATE**, Dynamic*, NvU32); +#define __objCreate_OBJENGSTATE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJENGSTATE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define engstateConstructEngine(pGpu, pEngstate, arg0) engstateConstructEngine_DISPATCH(pGpu, pEngstate, arg0) +#define engstateInitMissing(pGpu, pEngstate) engstateInitMissing_DISPATCH(pGpu, pEngstate) +#define engstateStatePreInitLocked(pGpu, pEngstate) engstateStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define engstateStatePreInitUnlocked(pGpu, pEngstate) engstateStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define engstateStateInitLocked(pGpu, pEngstate) engstateStateInitLocked_DISPATCH(pGpu, pEngstate) +#define engstateStateInitUnlocked(pGpu, pEngstate) engstateStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define engstateStatePreLoad(pGpu, pEngstate, arg0) engstateStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStateLoad(pGpu, pEngstate, arg0) engstateStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStatePostLoad(pGpu, pEngstate, arg0) engstateStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStatePreUnload(pGpu, pEngstate, arg0) engstateStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStateUnload(pGpu, pEngstate, arg0) engstateStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStatePostUnload(pGpu, pEngstate, arg0) engstateStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define engstateStateDestroy(pGpu, pEngstate) engstateStateDestroy_DISPATCH(pGpu, pEngstate) +#define engstateAllocTunableState(pGpu, pEngstate, ppTunableState) engstateAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define engstateFreeTunableState(pGpu, pEngstate, pTunableState) engstateFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define engstateGetTunableState(pGpu, pEngstate, pTunableState) engstateGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define engstateSetTunableState(pGpu, pEngstate, pTunableState) engstateSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define engstateReconcileTunableState(pGpu, pEngstate, pTunableState) engstateReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define engstateCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) engstateCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define engstateIsPresent(pGpu, pEngstate) engstateIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS engstateConstructEngine_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, ENGDESCRIPTOR arg0); + +static inline NV_STATUS engstateConstructEngine_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, ENGDESCRIPTOR arg0) { + return pEngstate->__engstateConstructEngine__(pGpu, pEngstate, arg0); +} + +void engstateInitMissing_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline void engstateInitMissing_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + pEngstate->__engstateInitMissing__(pGpu, pEngstate); +} + +NV_STATUS engstateStatePreInitLocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline NV_STATUS engstateStatePreInitLocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + return pEngstate->__engstateStatePreInitLocked__(pGpu, pEngstate); +} + +NV_STATUS engstateStatePreInitUnlocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline NV_STATUS engstateStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + return pEngstate->__engstateStatePreInitUnlocked__(pGpu, pEngstate); +} + +NV_STATUS engstateStateInitLocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline NV_STATUS engstateStateInitLocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + return pEngstate->__engstateStateInitLocked__(pGpu, pEngstate); +} + +NV_STATUS engstateStateInitUnlocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline NV_STATUS engstateStateInitUnlocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + return pEngstate->__engstateStateInitUnlocked__(pGpu, pEngstate); +} + +NV_STATUS engstateStatePreLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStatePreLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStatePreLoad__(pGpu, pEngstate, arg0); +} + +NV_STATUS engstateStateLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStateLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStateLoad__(pGpu, pEngstate, arg0); +} + +NV_STATUS engstateStatePostLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStatePostLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStatePostLoad__(pGpu, pEngstate, arg0); +} + +NV_STATUS engstateStatePreUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStatePreUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStatePreUnload__(pGpu, pEngstate, arg0); +} + +NV_STATUS engstateStateUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStateUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStateUnload__(pGpu, pEngstate, arg0); +} + +NV_STATUS engstateStatePostUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0); + +static inline NV_STATUS engstateStatePostUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) { + return pEngstate->__engstateStatePostUnload__(pGpu, pEngstate, arg0); +} + +void engstateStateDestroy_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline void engstateStateDestroy_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + pEngstate->__engstateStateDestroy__(pGpu, pEngstate); +} + +NV_STATUS engstateAllocTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void **ppTunableState); + +static inline NV_STATUS engstateAllocTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void **ppTunableState) { + return pEngstate->__engstateAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +void engstateFreeTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState); + +static inline void engstateFreeTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) { + pEngstate->__engstateFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +NV_STATUS engstateGetTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState); + +static inline NV_STATUS engstateGetTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) { + return pEngstate->__engstateGetTunableState__(pGpu, pEngstate, pTunableState); +} + +NV_STATUS engstateSetTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState); + +static inline NV_STATUS engstateSetTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) { + return pEngstate->__engstateSetTunableState__(pGpu, pEngstate, pTunableState); +} + +NV_STATUS engstateReconcileTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState); + +static inline NV_STATUS engstateReconcileTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) { + return pEngstate->__engstateReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +NV_STATUS engstateCompareTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunables1, void *pTunables2); + +static inline NV_STATUS engstateCompareTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__engstateCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +NvBool engstateIsPresent_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); + +static inline NvBool engstateIsPresent_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) { + return pEngstate->__engstateIsPresent__(pGpu, pEngstate); +} + +NV_STATUS engstateConstructBase_IMPL(struct OBJENGSTATE *arg0, struct OBJGPU *arg1, ENGDESCRIPTOR arg2); +#ifdef __nvoc_eng_state_h_disabled +static inline NV_STATUS engstateConstructBase(struct OBJENGSTATE *arg0, struct OBJGPU *arg1, ENGDESCRIPTOR arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_eng_state_h_disabled +#define engstateConstructBase(arg0, arg1, arg2) engstateConstructBase_IMPL(arg0, arg1, arg2) +#endif //__nvoc_eng_state_h_disabled + +void engstateLogStateTransitionPre_IMPL(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2); +#ifdef __nvoc_eng_state_h_disabled +static inline void engstateLogStateTransitionPre(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); +} +#else //__nvoc_eng_state_h_disabled +#define engstateLogStateTransitionPre(arg0, arg1, arg2) engstateLogStateTransitionPre_IMPL(arg0, arg1, arg2) +#endif //__nvoc_eng_state_h_disabled + +void engstateLogStateTransitionPost_IMPL(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2); +#ifdef __nvoc_eng_state_h_disabled +static inline void engstateLogStateTransitionPost(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); +} +#else //__nvoc_eng_state_h_disabled +#define engstateLogStateTransitionPost(arg0, arg1, arg2) engstateLogStateTransitionPost_IMPL(arg0, arg1, arg2) +#endif //__nvoc_eng_state_h_disabled + +const char *engstateGetName_IMPL(struct OBJENGSTATE *arg0); +#ifdef __nvoc_eng_state_h_disabled +static inline const char *engstateGetName(struct OBJENGSTATE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NULL; +} +#else //__nvoc_eng_state_h_disabled +#define engstateGetName(arg0) engstateGetName_IMPL(arg0) +#endif //__nvoc_eng_state_h_disabled + +void engstateDestruct_IMPL(POBJENGSTATE pEngstate); +#define __nvoc_engstateDestruct(pEngstate) engstateDestruct_IMPL(pEngstate) +NV_STATUS engstateStatePreInit_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); +#ifdef __nvoc_eng_state_h_disabled +static inline NV_STATUS engstateStatePreInit(POBJGPU pGpu, POBJENGSTATE pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_eng_state_h_disabled +#define engstateStatePreInit(pGpu, pEngstate) engstateStatePreInit_IMPL(pGpu, pEngstate) +#endif //__nvoc_eng_state_h_disabled + +NV_STATUS engstateStateInit_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate); +#ifdef __nvoc_eng_state_h_disabled +static inline NV_STATUS engstateStateInit(POBJGPU pGpu, POBJENGSTATE pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_eng_state_h_disabled +#define engstateStateInit(pGpu, pEngstate) engstateStateInit_IMPL(pGpu, pEngstate) +#endif //__nvoc_eng_state_h_disabled + +ENGDESCRIPTOR engstateGetDescriptor_IMPL(POBJENGSTATE pEngstate); +#ifdef __nvoc_eng_state_h_disabled +static inline ENGDESCRIPTOR engstateGetDescriptor(POBJENGSTATE pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + ENGDESCRIPTOR ret; + portMemSet(&ret, 0, sizeof(ENGDESCRIPTOR)); + return ret; +} +#else //__nvoc_eng_state_h_disabled +#define engstateGetDescriptor(pEngstate) engstateGetDescriptor_IMPL(pEngstate) +#endif //__nvoc_eng_state_h_disabled + +struct OBJFIFO *engstateGetFifo_IMPL(POBJENGSTATE pEngstate); +#ifdef __nvoc_eng_state_h_disabled +static inline struct OBJFIFO *engstateGetFifo(POBJENGSTATE pEngstate) { + NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!"); + return NULL; +} +#else //__nvoc_eng_state_h_disabled +#define engstateGetFifo(pEngstate) engstateGetFifo_IMPL(pEngstate) +#endif //__nvoc_eng_state_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _ENG_STATE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_ENG_STATE_NVOC_H_ diff --git a/src/nvidia/generated/g_engines_pb.c b/src/nvidia/generated/g_engines_pb.c new file mode 100644 index 000000000..a763ad65c --- /dev/null +++ b/src/nvidia/generated/g_engines_pb.c @@ -0,0 +1,298 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#include "nvtypes.h" +#include "prbrt.h" +#include "g_engines_pb.h" + +// 'Mc' field defaults + +// 'Mc' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_eng_mc[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + NVDEBUG_ENG_MC_RMDATA, + 0, + PRB_MAYBE_FIELD_NAME("rm_data") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + NVDEBUG_ENG_MC_PCIBARINFO, + 0, + PRB_MAYBE_FIELD_NAME("pci_bars") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + REGS_REGSANDMEM, + 0, + PRB_MAYBE_FIELD_NAME("regs") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'Gpu' field defaults + +// 'Gpu' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_eng_gpu[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("gpu_id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_sli") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_notebook") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_virtual") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_full_power") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 6, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_in_fullchip_reset") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 7, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_in_sec_bus_reset") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 8, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_in_gc6_reset") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 9, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_suspended") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 10, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_lost") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 11, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_accessible") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 37, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + REGS_REGSANDMEM, + 0, + PRB_MAYBE_FIELD_NAME("regs") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'Nvd' field defaults + +// 'Nvd' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_eng_nvd[] = { + { + 1, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + REGS_REGSANDMEM, + 0, + PRB_MAYBE_FIELD_NAME("regs") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'RmData' field defaults + +// 'RmData' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_eng_mc_rmdata[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("pmcBoot0") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'PciBarInfo' field defaults + +// 'PciBarInfo' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_eng_mc_pcibarinfo[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("offset") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("length") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// Message descriptors +const PRB_MSG_DESC prb_messages_nvdebug_eng[] = { + { + 3, + prb_fields_nvdebug_eng_mc, + PRB_MAYBE_MESSAGE_NAME("NvDebug.Eng.Mc") + }, + { + 12, + prb_fields_nvdebug_eng_gpu, + PRB_MAYBE_MESSAGE_NAME("NvDebug.Eng.Gpu") + }, + { + 1, + prb_fields_nvdebug_eng_nvd, + PRB_MAYBE_MESSAGE_NAME("NvDebug.Eng.Nvd") + }, + { + 1, + prb_fields_nvdebug_eng_mc_rmdata, + PRB_MAYBE_MESSAGE_NAME("NvDebug.Eng.Mc.RmData") + }, + { + 2, + prb_fields_nvdebug_eng_mc_pcibarinfo, + PRB_MAYBE_MESSAGE_NAME("NvDebug.Eng.Mc.PciBarInfo") + }, +}; + +// Service descriptors +const PRB_SERVICE_DESC prb_services_nvdebug_eng[] = { + { 0 } +}; + diff --git a/src/nvidia/generated/g_engines_pb.h b/src/nvidia/generated/g_engines_pb.h new file mode 100644 index 000000000..7159a5656 --- /dev/null +++ b/src/nvidia/generated/g_engines_pb.h @@ -0,0 +1,97 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#ifndef G_ENGINES_PB_H__ +#define G_ENGINES_PB_H__ + +#include "g_regs_pb.h" + +extern const PRB_MSG_DESC prb_messages_nvdebug_eng[]; + +// Message descriptor pointers +#define NVDEBUG_ENG_MC (&prb_messages_nvdebug_eng[0]) +#define NVDEBUG_ENG_GPU (&prb_messages_nvdebug_eng[1]) +#define NVDEBUG_ENG_NVD (&prb_messages_nvdebug_eng[2]) +#define NVDEBUG_ENG_MC_RMDATA (&prb_messages_nvdebug_eng[3]) +#define NVDEBUG_ENG_MC_PCIBARINFO (&prb_messages_nvdebug_eng[4]) + +// Message maximum lengths +// Does not include repeated fields, strings and byte arrays. +#define NVDEBUG_ENG_MC_LEN 66 +#define NVDEBUG_ENG_GPU_LEN 56 +#define NVDEBUG_ENG_NVD_LEN 30 +#define NVDEBUG_ENG_MC_RMDATA_LEN 6 +#define NVDEBUG_ENG_MC_PCIBARINFO_LEN 22 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_eng_mc[]; + +// 'Mc' field descriptor pointers +#define NVDEBUG_ENG_MC_RM_DATA (&prb_fields_nvdebug_eng_mc[0]) +#define NVDEBUG_ENG_MC_PCI_BARS (&prb_fields_nvdebug_eng_mc[1]) +#define NVDEBUG_ENG_MC_REGS (&prb_fields_nvdebug_eng_mc[2]) + +// 'Mc' field lengths +#define NVDEBUG_ENG_MC_RM_DATA_LEN 9 +#define NVDEBUG_ENG_MC_PCI_BARS_LEN 25 +#define NVDEBUG_ENG_MC_REGS_LEN 29 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_eng_gpu[]; + +// 'Gpu' field descriptor pointers +#define NVDEBUG_ENG_GPU_GPU_ID (&prb_fields_nvdebug_eng_gpu[0]) +#define NVDEBUG_ENG_GPU_IS_SLI (&prb_fields_nvdebug_eng_gpu[1]) +#define NVDEBUG_ENG_GPU_IS_NOTEBOOK (&prb_fields_nvdebug_eng_gpu[2]) +#define NVDEBUG_ENG_GPU_IS_VIRTUAL (&prb_fields_nvdebug_eng_gpu[3]) +#define NVDEBUG_ENG_GPU_IS_FULL_POWER (&prb_fields_nvdebug_eng_gpu[4]) +#define NVDEBUG_ENG_GPU_IS_IN_FULLCHIP_RESET (&prb_fields_nvdebug_eng_gpu[5]) +#define NVDEBUG_ENG_GPU_IS_IN_SEC_BUS_RESET (&prb_fields_nvdebug_eng_gpu[6]) +#define NVDEBUG_ENG_GPU_IS_IN_GC6_RESET (&prb_fields_nvdebug_eng_gpu[7]) +#define NVDEBUG_ENG_GPU_IS_SUSPENDED (&prb_fields_nvdebug_eng_gpu[8]) +#define NVDEBUG_ENG_GPU_IS_LOST (&prb_fields_nvdebug_eng_gpu[9]) +#define NVDEBUG_ENG_GPU_IS_ACCESSIBLE (&prb_fields_nvdebug_eng_gpu[10]) +#define NVDEBUG_ENG_GPU_REGS (&prb_fields_nvdebug_eng_gpu[11]) + +// 'Gpu' field lengths +#define NVDEBUG_ENG_GPU_GPU_ID_LEN 5 +#define NVDEBUG_ENG_GPU_IS_SLI_LEN 1 +#define NVDEBUG_ENG_GPU_IS_NOTEBOOK_LEN 1 +#define NVDEBUG_ENG_GPU_IS_VIRTUAL_LEN 1 +#define NVDEBUG_ENG_GPU_IS_FULL_POWER_LEN 1 +#define NVDEBUG_ENG_GPU_IS_IN_FULLCHIP_RESET_LEN 1 +#define NVDEBUG_ENG_GPU_IS_IN_SEC_BUS_RESET_LEN 1 +#define NVDEBUG_ENG_GPU_IS_IN_GC6_RESET_LEN 1 +#define NVDEBUG_ENG_GPU_IS_SUSPENDED_LEN 1 +#define NVDEBUG_ENG_GPU_IS_LOST_LEN 1 +#define NVDEBUG_ENG_GPU_IS_ACCESSIBLE_LEN 1 +#define NVDEBUG_ENG_GPU_REGS_LEN 29 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_eng_nvd[]; + +// 'Nvd' field descriptor pointers +#define NVDEBUG_ENG_NVD_REGS (&prb_fields_nvdebug_eng_nvd[0]) + +// 'Nvd' field lengths +#define NVDEBUG_ENG_NVD_REGS_LEN 29 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_eng_mc_rmdata[]; + +// 'RmData' field descriptor pointers +#define NVDEBUG_ENG_MC_RMDATA_PMCBOOT0 (&prb_fields_nvdebug_eng_mc_rmdata[0]) + +// 'RmData' field lengths +#define NVDEBUG_ENG_MC_RMDATA_PMCBOOT0_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_eng_mc_pcibarinfo[]; + +// 'PciBarInfo' field descriptor pointers +#define NVDEBUG_ENG_MC_PCIBARINFO_OFFSET (&prb_fields_nvdebug_eng_mc_pcibarinfo[0]) +#define NVDEBUG_ENG_MC_PCIBARINFO_LENGTH (&prb_fields_nvdebug_eng_mc_pcibarinfo[1]) + +// 'PciBarInfo' field lengths +#define NVDEBUG_ENG_MC_PCIBARINFO_OFFSET_LEN 10 +#define NVDEBUG_ENG_MC_PCIBARINFO_LENGTH_LEN 10 + +extern const PRB_SERVICE_DESC prb_services_nvdebug_eng[]; + +// Service descriptor pointers + +#endif // G_ENGINES_PB_H__ diff --git a/src/nvidia/generated/g_event_buffer_nvoc.c b/src/nvidia/generated/g_event_buffer_nvoc.c new file mode 100644 index 000000000..7ac02b9bc --- /dev/null +++ b/src/nvidia/generated/g_event_buffer_nvoc.c @@ -0,0 +1,379 @@ +#define NVOC_EVENT_BUFFER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_event_buffer_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x63502b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_EventBuffer(EventBuffer*); +void __nvoc_init_funcTable_EventBuffer(EventBuffer*); +NV_STATUS __nvoc_ctor_EventBuffer(EventBuffer*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_EventBuffer(EventBuffer*); +void __nvoc_dtor_EventBuffer(EventBuffer*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_EventBuffer; + +static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_EventBuffer = { + /*pClassDef=*/ &__nvoc_class_def_EventBuffer, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_EventBuffer, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_EventBuffer = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_EventBuffer_EventBuffer, + &__nvoc_rtti_EventBuffer_RmResource, + &__nvoc_rtti_EventBuffer_RmResourceCommon, + &__nvoc_rtti_EventBuffer_RsResource, + &__nvoc_rtti_EventBuffer_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer = +{ + /*classInfo=*/ { + /*size=*/ sizeof(EventBuffer), + /*classId=*/ classId(EventBuffer), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "EventBuffer", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_EventBuffer, + /*pCastInfo=*/ &__nvoc_castinfo_EventBuffer, + /*pExportInfo=*/ &__nvoc_export_info_EventBuffer +}; + +static NvBool __nvoc_thunk_RmResource_eventbufferShareCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventbufferCheckMemInterUnmap(struct EventBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferControl(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventbufferGetMemInterMapParams(struct EventBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventbufferGetMemoryMappingDescriptor(struct EventBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_eventbufferGetRefCount(struct EventBuffer *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferControlFilter(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_eventbufferAddAdditionalDependants(struct RsClient *pClient, struct EventBuffer *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferUnmap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventbufferControl_Prologue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_eventbufferCanCopy(struct EventBuffer *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferMapTo(struct EventBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_eventbufferPreDestruct(struct EventBuffer *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferUnmapFrom(struct EventBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_eventbufferControl_Epilogue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferControlLookup(struct EventBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventbufferMap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_eventbufferAccessCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_EventBuffer[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdEnableEvent_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0101u, + /*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdEnableEvent" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdUpdateGet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0102u, + /*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdUpdateGet" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdFlush_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0104u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdFlush" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cd0105u, + /*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "eventbuffertBufferCtrlCmdPostTelemetryEvent" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_EventBuffer = +{ + /*numEntries=*/ 4, + /*pExportEntries=*/ __nvoc_exported_method_def_EventBuffer +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_EventBuffer(EventBuffer *pThis) { + __nvoc_eventbufferDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_EventBuffer(EventBuffer *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_EventBuffer(EventBuffer *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_EventBuffer_fail_RmResource; + __nvoc_init_dataField_EventBuffer(pThis); + + status = __nvoc_eventbufferConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_EventBuffer_fail__init; + goto __nvoc_ctor_EventBuffer_exit; // Success + +__nvoc_ctor_EventBuffer_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_EventBuffer_fail_RmResource: +__nvoc_ctor_EventBuffer_exit: + + return status; +} + +static void __nvoc_init_funcTable_EventBuffer_1(EventBuffer *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__eventbuffertBufferCtrlCmdEnableEvent__ = &eventbuffertBufferCtrlCmdEnableEvent_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__eventbuffertBufferCtrlCmdUpdateGet__ = &eventbuffertBufferCtrlCmdUpdateGet_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__eventbuffertBufferCtrlCmdFlush__ = &eventbuffertBufferCtrlCmdFlush_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__eventbuffertBufferCtrlCmdPostTelemetryEvent__ = &eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL; +#endif + + pThis->__eventbufferShareCallback__ = &__nvoc_thunk_RmResource_eventbufferShareCallback; + + pThis->__eventbufferCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_eventbufferCheckMemInterUnmap; + + pThis->__eventbufferControl__ = &__nvoc_thunk_RsResource_eventbufferControl; + + pThis->__eventbufferGetMemInterMapParams__ = &__nvoc_thunk_RmResource_eventbufferGetMemInterMapParams; + + pThis->__eventbufferGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_eventbufferGetMemoryMappingDescriptor; + + pThis->__eventbufferGetRefCount__ = &__nvoc_thunk_RsResource_eventbufferGetRefCount; + + pThis->__eventbufferControlFilter__ = &__nvoc_thunk_RsResource_eventbufferControlFilter; + + pThis->__eventbufferAddAdditionalDependants__ = &__nvoc_thunk_RsResource_eventbufferAddAdditionalDependants; + + pThis->__eventbufferUnmap__ = &__nvoc_thunk_RsResource_eventbufferUnmap; + + pThis->__eventbufferControl_Prologue__ = &__nvoc_thunk_RmResource_eventbufferControl_Prologue; + + pThis->__eventbufferCanCopy__ = &__nvoc_thunk_RsResource_eventbufferCanCopy; + + pThis->__eventbufferMapTo__ = &__nvoc_thunk_RsResource_eventbufferMapTo; + + pThis->__eventbufferPreDestruct__ = &__nvoc_thunk_RsResource_eventbufferPreDestruct; + + pThis->__eventbufferUnmapFrom__ = &__nvoc_thunk_RsResource_eventbufferUnmapFrom; + + pThis->__eventbufferControl_Epilogue__ = &__nvoc_thunk_RmResource_eventbufferControl_Epilogue; + + pThis->__eventbufferControlLookup__ = &__nvoc_thunk_RsResource_eventbufferControlLookup; + + pThis->__eventbufferMap__ = &__nvoc_thunk_RsResource_eventbufferMap; + + pThis->__eventbufferAccessCallback__ = &__nvoc_thunk_RmResource_eventbufferAccessCallback; +} + +void __nvoc_init_funcTable_EventBuffer(EventBuffer *pThis) { + __nvoc_init_funcTable_EventBuffer_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_EventBuffer(EventBuffer *pThis) { + pThis->__nvoc_pbase_EventBuffer = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_EventBuffer(pThis); +} + +NV_STATUS __nvoc_objCreate_EventBuffer(EventBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + EventBuffer *pThis; + + pThis = portMemAllocNonPaged(sizeof(EventBuffer)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(EventBuffer)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_EventBuffer); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_EventBuffer(pThis); + status = __nvoc_ctor_EventBuffer(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_EventBuffer_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_EventBuffer_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_EventBuffer(EventBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_EventBuffer(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_event_buffer_nvoc.h b/src/nvidia/generated/g_event_buffer_nvoc.h new file mode 100644 index 000000000..6a7e12c80 --- /dev/null +++ b/src/nvidia/generated/g_event_buffer_nvoc.h @@ -0,0 +1,288 @@ +#ifndef _G_EVENT_BUFFER_NVOC_H_ +#define _G_EVENT_BUFFER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_event_buffer_nvoc.h" + +#ifndef _EVENT_BUFFER_H_ +#define _EVENT_BUFFER_H_ + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "rmapi/event.h" +#include "rmapi/resource.h" +#include "ctrl/ctrl90cd.h" +#include "eventbufferproducer.h" + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +typedef struct +{ + // + // Addr: user RO address + // Priv: return cookie to be passed to unmap + // + NvP64 headerAddr; + NvP64 headerPriv; + NvP64 recordBuffAddr; + NvP64 recordBuffPriv; + NvP64 vardataBuffAddr; + NvP64 vardataBuffPriv; +} EVENT_BUFFER_MAP_INFO; + +// This class shares buffers between kernel and usermode +#ifdef NVOC_EVENT_BUFFER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct EventBuffer { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct EventBuffer *__nvoc_pbase_EventBuffer; + NV_STATUS (*__eventbuffertBufferCtrlCmdEnableEvent__)(struct EventBuffer *, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *); + NV_STATUS (*__eventbuffertBufferCtrlCmdUpdateGet__)(struct EventBuffer *, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *); + NV_STATUS (*__eventbuffertBufferCtrlCmdFlush__)(struct EventBuffer *); + NV_STATUS (*__eventbuffertBufferCtrlCmdPostTelemetryEvent__)(struct EventBuffer *, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *); + NvBool (*__eventbufferShareCallback__)(struct EventBuffer *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__eventbufferCheckMemInterUnmap__)(struct EventBuffer *, NvBool); + NV_STATUS (*__eventbufferControl__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__eventbufferGetMemInterMapParams__)(struct EventBuffer *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__eventbufferGetMemoryMappingDescriptor__)(struct EventBuffer *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__eventbufferGetRefCount__)(struct EventBuffer *); + NV_STATUS (*__eventbufferControlFilter__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__eventbufferAddAdditionalDependants__)(struct RsClient *, struct EventBuffer *, RsResourceRef *); + NV_STATUS (*__eventbufferUnmap__)(struct EventBuffer *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__eventbufferControl_Prologue__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__eventbufferCanCopy__)(struct EventBuffer *); + NV_STATUS (*__eventbufferMapTo__)(struct EventBuffer *, RS_RES_MAP_TO_PARAMS *); + void (*__eventbufferPreDestruct__)(struct EventBuffer *); + NV_STATUS (*__eventbufferUnmapFrom__)(struct EventBuffer *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__eventbufferControl_Epilogue__)(struct EventBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__eventbufferControlLookup__)(struct EventBuffer *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__eventbufferMap__)(struct EventBuffer *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__eventbufferAccessCallback__)(struct EventBuffer *, struct RsClient *, void *, RsAccessRight); + struct MEMORY_DESCRIPTOR *pHeaderDesc; + struct MEMORY_DESCRIPTOR *pRecordBufDesc; + struct MEMORY_DESCRIPTOR *pVardataBufDesc; + NvHandle hSubDevice; + NvU32 subDeviceInst; + EVENT_BUFFER_MAP_INFO kernelMapInfo; + EVENT_BUFFER_MAP_INFO clientMapInfo; + NvHandle hClient; + NvU16 seqNo; + NvBool bNotifyPending; + PEVENTNOTIFICATION pListeners; + EVENT_BUFFER_PRODUCER_INFO producerInfo; + struct Memory *pHeader; + struct Memory *pRecord; + struct Memory *pVardata; + NvHandle hInternalClient; + NvHandle hInternalDevice; + NvHandle hInternalSubdevice; + NvHandle hInternalHeader; + NvHandle hInternalBuffer; +}; + +#ifndef __NVOC_CLASS_EventBuffer_TYPEDEF__ +#define __NVOC_CLASS_EventBuffer_TYPEDEF__ +typedef struct EventBuffer EventBuffer; +#endif /* __NVOC_CLASS_EventBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_EventBuffer +#define __nvoc_class_id_EventBuffer 0x63502b +#endif /* __nvoc_class_id_EventBuffer */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer; + +#define __staticCast_EventBuffer(pThis) \ + ((pThis)->__nvoc_pbase_EventBuffer) + +#ifdef __nvoc_event_buffer_h_disabled +#define __dynamicCast_EventBuffer(pThis) ((EventBuffer*)NULL) +#else //__nvoc_event_buffer_h_disabled +#define __dynamicCast_EventBuffer(pThis) \ + ((EventBuffer*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(EventBuffer))) +#endif //__nvoc_event_buffer_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_EventBuffer(EventBuffer**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_EventBuffer(EventBuffer**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_EventBuffer(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_EventBuffer((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define eventbuffertBufferCtrlCmdEnableEvent(pEventBuffer, pEnableParams) eventbuffertBufferCtrlCmdEnableEvent_DISPATCH(pEventBuffer, pEnableParams) +#define eventbuffertBufferCtrlCmdUpdateGet(pEventBuffer, pUpdateParams) eventbuffertBufferCtrlCmdUpdateGet_DISPATCH(pEventBuffer, pUpdateParams) +#define eventbuffertBufferCtrlCmdFlush(pEventBuffer) eventbuffertBufferCtrlCmdFlush_DISPATCH(pEventBuffer) +#define eventbuffertBufferCtrlCmdPostTelemetryEvent(pEventBuffer, pPostTelemetryEvent) eventbuffertBufferCtrlCmdPostTelemetryEvent_DISPATCH(pEventBuffer, pPostTelemetryEvent) +#define eventbufferShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) eventbufferShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define eventbufferCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) eventbufferCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define eventbufferControl(pResource, pCallContext, pParams) eventbufferControl_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferGetMemInterMapParams(pRmResource, pParams) eventbufferGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define eventbufferGetMemoryMappingDescriptor(pRmResource, ppMemDesc) eventbufferGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define eventbufferGetRefCount(pResource) eventbufferGetRefCount_DISPATCH(pResource) +#define eventbufferControlFilter(pResource, pCallContext, pParams) eventbufferControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferAddAdditionalDependants(pClient, pResource, pReference) eventbufferAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define eventbufferUnmap(pResource, pCallContext, pCpuMapping) eventbufferUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define eventbufferControl_Prologue(pResource, pCallContext, pParams) eventbufferControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferCanCopy(pResource) eventbufferCanCopy_DISPATCH(pResource) +#define eventbufferMapTo(pResource, pParams) eventbufferMapTo_DISPATCH(pResource, pParams) +#define eventbufferPreDestruct(pResource) eventbufferPreDestruct_DISPATCH(pResource) +#define eventbufferUnmapFrom(pResource, pParams) eventbufferUnmapFrom_DISPATCH(pResource, pParams) +#define eventbufferControl_Epilogue(pResource, pCallContext, pParams) eventbufferControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define eventbufferControlLookup(pResource, pParams, ppEntry) eventbufferControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define eventbufferMap(pResource, pCallContext, pParams, pCpuMapping) eventbufferMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define eventbufferAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) eventbufferAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS eventbuffertBufferCtrlCmdEnableEvent_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams); + +static inline NV_STATUS eventbuffertBufferCtrlCmdEnableEvent_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams) { + return pEventBuffer->__eventbuffertBufferCtrlCmdEnableEvent__(pEventBuffer, pEnableParams); +} + +NV_STATUS eventbuffertBufferCtrlCmdUpdateGet_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams); + +static inline NV_STATUS eventbuffertBufferCtrlCmdUpdateGet_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams) { + return pEventBuffer->__eventbuffertBufferCtrlCmdUpdateGet__(pEventBuffer, pUpdateParams); +} + +NV_STATUS eventbuffertBufferCtrlCmdFlush_IMPL(struct EventBuffer *pEventBuffer); + +static inline NV_STATUS eventbuffertBufferCtrlCmdFlush_DISPATCH(struct EventBuffer *pEventBuffer) { + return pEventBuffer->__eventbuffertBufferCtrlCmdFlush__(pEventBuffer); +} + +NV_STATUS eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent); + +static inline NV_STATUS eventbuffertBufferCtrlCmdPostTelemetryEvent_DISPATCH(struct EventBuffer *pEventBuffer, NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent) { + return pEventBuffer->__eventbuffertBufferCtrlCmdPostTelemetryEvent__(pEventBuffer, pPostTelemetryEvent); +} + +static inline NvBool eventbufferShareCallback_DISPATCH(struct EventBuffer *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__eventbufferShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS eventbufferCheckMemInterUnmap_DISPATCH(struct EventBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__eventbufferCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS eventbufferControl_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventbufferControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventbufferGetMemInterMapParams_DISPATCH(struct EventBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__eventbufferGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS eventbufferGetMemoryMappingDescriptor_DISPATCH(struct EventBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__eventbufferGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 eventbufferGetRefCount_DISPATCH(struct EventBuffer *pResource) { + return pResource->__eventbufferGetRefCount__(pResource); +} + +static inline NV_STATUS eventbufferControlFilter_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventbufferControlFilter__(pResource, pCallContext, pParams); +} + +static inline void eventbufferAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct EventBuffer *pResource, RsResourceRef *pReference) { + pResource->__eventbufferAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS eventbufferUnmap_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__eventbufferUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS eventbufferControl_Prologue_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventbufferControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool eventbufferCanCopy_DISPATCH(struct EventBuffer *pResource) { + return pResource->__eventbufferCanCopy__(pResource); +} + +static inline NV_STATUS eventbufferMapTo_DISPATCH(struct EventBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__eventbufferMapTo__(pResource, pParams); +} + +static inline void eventbufferPreDestruct_DISPATCH(struct EventBuffer *pResource) { + pResource->__eventbufferPreDestruct__(pResource); +} + +static inline NV_STATUS eventbufferUnmapFrom_DISPATCH(struct EventBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__eventbufferUnmapFrom__(pResource, pParams); +} + +static inline void eventbufferControl_Epilogue_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__eventbufferControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventbufferControlLookup_DISPATCH(struct EventBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__eventbufferControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS eventbufferMap_DISPATCH(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__eventbufferMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool eventbufferAccessCallback_DISPATCH(struct EventBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__eventbufferAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS eventbufferConstruct_IMPL(struct EventBuffer *arg_pEventBuffer, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_eventbufferConstruct(arg_pEventBuffer, arg_pCallContext, arg_pParams) eventbufferConstruct_IMPL(arg_pEventBuffer, arg_pCallContext, arg_pParams) +void eventbufferDestruct_IMPL(struct EventBuffer *pEventBuffer); +#define __nvoc_eventbufferDestruct(pEventBuffer) eventbufferDestruct_IMPL(pEventBuffer) +#undef PRIVATE_FIELD + + +NV_STATUS eventBufferAdd(struct EventBuffer *pEventBuffer, void* pEventData, NvU32 recordType, NvBool* bNotify, NvP64 *pHandle); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_EVENT_BUFFER_NVOC_H_ diff --git a/src/nvidia/generated/g_event_nvoc.c b/src/nvidia/generated/g_event_nvoc.c new file mode 100644 index 000000000..8d94c5b24 --- /dev/null +++ b/src/nvidia/generated/g_event_nvoc.c @@ -0,0 +1,684 @@ +#define NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_event_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xd5f150 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +void __nvoc_init_NotifShare(NotifShare*); +void __nvoc_init_funcTable_NotifShare(NotifShare*); +NV_STATUS __nvoc_ctor_NotifShare(NotifShare*); +void __nvoc_init_dataField_NotifShare(NotifShare*); +void __nvoc_dtor_NotifShare(NotifShare*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NotifShare; + +static const struct NVOC_RTTI __nvoc_rtti_NotifShare_NotifShare = { + /*pClassDef=*/ &__nvoc_class_def_NotifShare, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NotifShare, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_NotifShare_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NotifShare, __nvoc_base_RsShared.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NotifShare_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NotifShare, __nvoc_base_RsShared), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_NotifShare = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_NotifShare_NotifShare, + &__nvoc_rtti_NotifShare_RsShared, + &__nvoc_rtti_NotifShare_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare = +{ + /*classInfo=*/ { + /*size=*/ sizeof(NotifShare), + /*classId=*/ classId(NotifShare), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "NotifShare", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NotifShare, + /*pCastInfo=*/ &__nvoc_castinfo_NotifShare, + /*pExportInfo=*/ &__nvoc_export_info_NotifShare +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_NotifShare = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_NotifShare(NotifShare *pThis) { + __nvoc_shrnotifDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_NotifShare(NotifShare *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_NotifShare(NotifShare *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_NotifShare_fail_RsShared; + __nvoc_init_dataField_NotifShare(pThis); + + status = __nvoc_shrnotifConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_NotifShare_fail__init; + goto __nvoc_ctor_NotifShare_exit; // Success + +__nvoc_ctor_NotifShare_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_NotifShare_fail_RsShared: +__nvoc_ctor_NotifShare_exit: + + return status; +} + +static void __nvoc_init_funcTable_NotifShare_1(NotifShare *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_NotifShare(NotifShare *pThis) { + __nvoc_init_funcTable_NotifShare_1(pThis); +} + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_NotifShare(NotifShare *pThis) { + pThis->__nvoc_pbase_NotifShare = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; + __nvoc_init_RsShared(&pThis->__nvoc_base_RsShared); + __nvoc_init_funcTable_NotifShare(pThis); +} + +NV_STATUS __nvoc_objCreate_NotifShare(NotifShare **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + NotifShare *pThis; + + pThis = portMemAllocNonPaged(sizeof(NotifShare)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(NotifShare)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_NotifShare); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_NotifShare(pThis); + status = __nvoc_ctor_NotifShare(pThis); + if (status != NV_OK) goto __nvoc_objCreate_NotifShare_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_NotifShare_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_NotifShare(NotifShare **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_NotifShare(ppThis, pParent, createFlags); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xa4ecfc = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Event; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_Event(Event*); +void __nvoc_init_funcTable_Event(Event*); +NV_STATUS __nvoc_ctor_Event(Event*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Event(Event*); +void __nvoc_dtor_Event(Event*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Event; + +static const struct NVOC_RTTI __nvoc_rtti_Event_Event = { + /*pClassDef=*/ &__nvoc_class_def_Event, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Event, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Event_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Event_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Event_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Event_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Event = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_Event_Event, + &__nvoc_rtti_Event_RmResource, + &__nvoc_rtti_Event_RmResourceCommon, + &__nvoc_rtti_Event_RsResource, + &__nvoc_rtti_Event_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Event = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Event), + /*classId=*/ classId(Event), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Event", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Event, + /*pCastInfo=*/ &__nvoc_castinfo_Event, + /*pExportInfo=*/ &__nvoc_export_info_Event +}; + +static NvBool __nvoc_thunk_RmResource_eventShareCallback(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventCheckMemInterUnmap(struct Event *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventControl(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventGetMemInterMapParams(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventGetMemoryMappingDescriptor(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_eventGetRefCount(struct Event *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventControlFilter(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_eventAddAdditionalDependants(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventUnmap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_eventControl_Prologue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_eventCanCopy(struct Event *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventMapTo(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_eventPreDestruct(struct Event *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventUnmapFrom(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_eventControl_Epilogue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventControlLookup(struct Event *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_eventMap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_eventAccessCallback(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Event = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Event(Event *pThis) { + __nvoc_eventDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Event(Event *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Event(Event *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Event_fail_RmResource; + __nvoc_init_dataField_Event(pThis); + + status = __nvoc_eventConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Event_fail__init; + goto __nvoc_ctor_Event_exit; // Success + +__nvoc_ctor_Event_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_Event_fail_RmResource: +__nvoc_ctor_Event_exit: + + return status; +} + +static void __nvoc_init_funcTable_Event_1(Event *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__eventShareCallback__ = &__nvoc_thunk_RmResource_eventShareCallback; + + pThis->__eventCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_eventCheckMemInterUnmap; + + pThis->__eventControl__ = &__nvoc_thunk_RsResource_eventControl; + + pThis->__eventGetMemInterMapParams__ = &__nvoc_thunk_RmResource_eventGetMemInterMapParams; + + pThis->__eventGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_eventGetMemoryMappingDescriptor; + + pThis->__eventGetRefCount__ = &__nvoc_thunk_RsResource_eventGetRefCount; + + pThis->__eventControlFilter__ = &__nvoc_thunk_RsResource_eventControlFilter; + + pThis->__eventAddAdditionalDependants__ = &__nvoc_thunk_RsResource_eventAddAdditionalDependants; + + pThis->__eventUnmap__ = &__nvoc_thunk_RsResource_eventUnmap; + + pThis->__eventControl_Prologue__ = &__nvoc_thunk_RmResource_eventControl_Prologue; + + pThis->__eventCanCopy__ = &__nvoc_thunk_RsResource_eventCanCopy; + + pThis->__eventMapTo__ = &__nvoc_thunk_RsResource_eventMapTo; + + pThis->__eventPreDestruct__ = &__nvoc_thunk_RsResource_eventPreDestruct; + + pThis->__eventUnmapFrom__ = &__nvoc_thunk_RsResource_eventUnmapFrom; + + pThis->__eventControl_Epilogue__ = &__nvoc_thunk_RmResource_eventControl_Epilogue; + + pThis->__eventControlLookup__ = &__nvoc_thunk_RsResource_eventControlLookup; + + pThis->__eventMap__ = &__nvoc_thunk_RsResource_eventMap; + + pThis->__eventAccessCallback__ = &__nvoc_thunk_RmResource_eventAccessCallback; +} + +void __nvoc_init_funcTable_Event(Event *pThis) { + __nvoc_init_funcTable_Event_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_Event(Event *pThis) { + pThis->__nvoc_pbase_Event = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_Event(pThis); +} + +NV_STATUS __nvoc_objCreate_Event(Event **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Event *pThis; + + pThis = portMemAllocNonPaged(sizeof(Event)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Event)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Event); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Event(pThis); + status = __nvoc_ctor_Event(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Event_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Event_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Event(Event **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Event(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xf8f965 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +void __nvoc_init_INotifier(INotifier*); +void __nvoc_init_funcTable_INotifier(INotifier*); +NV_STATUS __nvoc_ctor_INotifier(INotifier*, struct CALL_CONTEXT * arg_pCallContext); +void __nvoc_init_dataField_INotifier(INotifier*); +void __nvoc_dtor_INotifier(INotifier*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_INotifier; + +static const struct NVOC_RTTI __nvoc_rtti_INotifier_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_INotifier, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_INotifier = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_INotifier_INotifier, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier = +{ + /*classInfo=*/ { + /*size=*/ sizeof(INotifier), + /*classId=*/ classId(INotifier), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "INotifier", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_INotifier, + /*pExportInfo=*/ &__nvoc_export_info_INotifier +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_INotifier = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_INotifier(INotifier *pThis) { + __nvoc_inotifyDestruct(pThis); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_INotifier(INotifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_INotifier(INotifier *pThis, struct CALL_CONTEXT * arg_pCallContext) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_INotifier(pThis); + + status = __nvoc_inotifyConstruct(pThis, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_INotifier_fail__init; + goto __nvoc_ctor_INotifier_exit; // Success + +__nvoc_ctor_INotifier_fail__init: +__nvoc_ctor_INotifier_exit: + + return status; +} + +static void __nvoc_init_funcTable_INotifier_1(INotifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__inotifyGetNotificationListPtr__ = NULL; + + pThis->__inotifySetNotificationShare__ = NULL; + + pThis->__inotifyGetNotificationShare__ = NULL; + + pThis->__inotifyUnregisterEvent__ = NULL; + + pThis->__inotifyGetOrAllocNotifShare__ = NULL; +} + +void __nvoc_init_funcTable_INotifier(INotifier *pThis) { + __nvoc_init_funcTable_INotifier_1(pThis); +} + +void __nvoc_init_INotifier(INotifier *pThis) { + pThis->__nvoc_pbase_INotifier = pThis; + __nvoc_init_funcTable_INotifier(pThis); +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xa8683b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_funcTable_Notifier(Notifier*); +NV_STATUS __nvoc_ctor_Notifier(Notifier*, struct CALL_CONTEXT * arg_pCallContext); +void __nvoc_init_dataField_Notifier(Notifier*); +void __nvoc_dtor_Notifier(Notifier*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Notifier; + +static const struct NVOC_RTTI __nvoc_rtti_Notifier_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Notifier, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Notifier_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Notifier, __nvoc_base_INotifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Notifier = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_Notifier_Notifier, + &__nvoc_rtti_Notifier_INotifier, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Notifier), + /*classId=*/ classId(Notifier), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Notifier", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_Notifier, + /*pExportInfo=*/ &__nvoc_export_info_Notifier +}; + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset)); +} + +static void __nvoc_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Notifier = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_INotifier(INotifier*); +void __nvoc_dtor_Notifier(Notifier *pThis) { + __nvoc_notifyDestruct(pThis); + __nvoc_dtor_INotifier(&pThis->__nvoc_base_INotifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Notifier(Notifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_INotifier(INotifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_Notifier(Notifier *pThis, struct CALL_CONTEXT * arg_pCallContext) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_INotifier(&pThis->__nvoc_base_INotifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_Notifier_fail_INotifier; + __nvoc_init_dataField_Notifier(pThis); + + status = __nvoc_notifyConstruct(pThis, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_Notifier_fail__init; + goto __nvoc_ctor_Notifier_exit; // Success + +__nvoc_ctor_Notifier_fail__init: + __nvoc_dtor_INotifier(&pThis->__nvoc_base_INotifier); +__nvoc_ctor_Notifier_fail_INotifier: +__nvoc_ctor_Notifier_exit: + + return status; +} + +static void __nvoc_init_funcTable_Notifier_1(Notifier *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL; + + pThis->__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL; + + pThis->__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL; + + pThis->__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL; + + pThis->__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL; + + pThis->__nvoc_base_INotifier.__inotifyGetNotificationListPtr__ = &__nvoc_thunk_Notifier_inotifyGetNotificationListPtr; + + pThis->__nvoc_base_INotifier.__inotifyGetNotificationShare__ = &__nvoc_thunk_Notifier_inotifyGetNotificationShare; + + pThis->__nvoc_base_INotifier.__inotifySetNotificationShare__ = &__nvoc_thunk_Notifier_inotifySetNotificationShare; + + pThis->__nvoc_base_INotifier.__inotifyUnregisterEvent__ = &__nvoc_thunk_Notifier_inotifyUnregisterEvent; + + pThis->__nvoc_base_INotifier.__inotifyGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_inotifyGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_Notifier(Notifier *pThis) { + __nvoc_init_funcTable_Notifier_1(pThis); +} + +void __nvoc_init_INotifier(INotifier*); +void __nvoc_init_Notifier(Notifier *pThis) { + pThis->__nvoc_pbase_Notifier = pThis; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_INotifier; + __nvoc_init_INotifier(&pThis->__nvoc_base_INotifier); + __nvoc_init_funcTable_Notifier(pThis); +} + diff --git a/src/nvidia/generated/g_event_nvoc.h b/src/nvidia/generated/g_event_nvoc.h new file mode 100644 index 000000000..ab8d3a0c5 --- /dev/null +++ b/src/nvidia/generated/g_event_nvoc.h @@ -0,0 +1,529 @@ +#ifndef _G_EVENT_NVOC_H_ +#define _G_EVENT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_event_nvoc.h" + +#ifndef _EVENT_H_ +#define _EVENT_H_ + +#include "class/cl0000.h" // NV0000_NOTIFIERS_MAXCOUNT + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_server.h" +#include "rmapi/resource.h" + +typedef struct _def_system_event_queue SYSTEM_EVENTS_QUEUE; + +struct EVENTNOTIFICATION +{ + NvHandle hEventClient; + NvHandle hEvent; + NvU32 subdeviceInst; + NvU32 NotifyIndex; // NVnnnn_NOTIFIERS_xyz + NvU32 NotifyType; // Event class. NV01_EVENT_OS_EVENT for example. + NvBool bUserOsEventHandle; // Event was allocated from user app. + NvBool bBroadcastEvent; // Wait for all subdevices before sending event. + NvBool bClientRM; // Event was allocated from client RM. + NvBool bSubdeviceSpecificEvent; // SubdeviceSpecificValue is valid. + NvU32 SubdeviceSpecificValue; // NV0005_NOTIFY_INDEX_SUBDEVICE + NvBool bEventDataRequired; // nv_post_event allocates memory for Data. + NvBool bNonStallIntrEvent; + NvU32 NotifyTriggerCount; // Used with bBroadcastEvent. + NvP64 Data; + struct EVENTNOTIFICATION *Next; +}; +typedef struct EVENTNOTIFICATION EVENTNOTIFICATION, *PEVENTNOTIFICATION; + +struct INotifier; + +#ifndef __NVOC_CLASS_INotifier_TYPEDEF__ +#define __NVOC_CLASS_INotifier_TYPEDEF__ +typedef struct INotifier INotifier; +#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_INotifier +#define __nvoc_class_id_INotifier 0xf8f965 +#endif /* __nvoc_class_id_INotifier */ + + + +#define NV_SYSTEM_EVENT_QUEUE_SIZE 16 +struct _def_system_event_queue +{ + NvU32 Head; + NvU32 Tail; + struct event_queue + { + NvU32 event; + NvU32 status; + } EventQueue[NV_SYSTEM_EVENT_QUEUE_SIZE]; +}; + +struct _def_client_system_event_info +{ + SYSTEM_EVENTS_QUEUE systemEventsQueue; + NvU32 notifyActions[NV0000_NOTIFIERS_MAXCOUNT]; +}; + +/** + * This class represents data that is shared between one notifier and any + * events that are registered with the notifier. + * + * Instances of this class are ref-counted and will be kept alive until + * the notifier and all of its events have been freed. + */ +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct NotifShare { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsShared __nvoc_base_RsShared; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + struct NotifShare *__nvoc_pbase_NotifShare; + struct INotifier *pNotifier; + NvHandle hNotifierClient; + NvHandle hNotifierResource; + EVENTNOTIFICATION *pEventList; +}; + +#ifndef __NVOC_CLASS_NotifShare_TYPEDEF__ +#define __NVOC_CLASS_NotifShare_TYPEDEF__ +typedef struct NotifShare NotifShare; +#endif /* __NVOC_CLASS_NotifShare_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NotifShare +#define __nvoc_class_id_NotifShare 0xd5f150 +#endif /* __nvoc_class_id_NotifShare */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare; + +#define __staticCast_NotifShare(pThis) \ + ((pThis)->__nvoc_pbase_NotifShare) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_NotifShare(pThis) ((NotifShare*)NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_NotifShare(pThis) \ + ((NotifShare*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NotifShare))) +#endif //__nvoc_event_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_NotifShare(NotifShare**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NotifShare(NotifShare**, Dynamic*, NvU32); +#define __objCreate_NotifShare(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_NotifShare((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS shrnotifConstruct_IMPL(struct NotifShare *arg_pNotifShare); +#define __nvoc_shrnotifConstruct(arg_pNotifShare) shrnotifConstruct_IMPL(arg_pNotifShare) +void shrnotifDestruct_IMPL(struct NotifShare *pNotifShare); +#define __nvoc_shrnotifDestruct(pNotifShare) shrnotifDestruct_IMPL(pNotifShare) +#undef PRIVATE_FIELD + + +/** + * This class represents event notification consumers + */ +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Event { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Event *__nvoc_pbase_Event; + NvBool (*__eventShareCallback__)(struct Event *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__eventCheckMemInterUnmap__)(struct Event *, NvBool); + NV_STATUS (*__eventControl__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__eventGetMemInterMapParams__)(struct Event *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__eventGetMemoryMappingDescriptor__)(struct Event *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__eventGetRefCount__)(struct Event *); + NV_STATUS (*__eventControlFilter__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__eventAddAdditionalDependants__)(struct RsClient *, struct Event *, RsResourceRef *); + NV_STATUS (*__eventUnmap__)(struct Event *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__eventControl_Prologue__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__eventCanCopy__)(struct Event *); + NV_STATUS (*__eventMapTo__)(struct Event *, RS_RES_MAP_TO_PARAMS *); + void (*__eventPreDestruct__)(struct Event *); + NV_STATUS (*__eventUnmapFrom__)(struct Event *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__eventControl_Epilogue__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__eventControlLookup__)(struct Event *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__eventMap__)(struct Event *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__eventAccessCallback__)(struct Event *, struct RsClient *, void *, RsAccessRight); + struct NotifShare *pNotifierShare; + NvHandle hNotifierClient; + NvHandle hNotifierResource; + NvHandle hEvent; +}; + +#ifndef __NVOC_CLASS_Event_TYPEDEF__ +#define __NVOC_CLASS_Event_TYPEDEF__ +typedef struct Event Event; +#endif /* __NVOC_CLASS_Event_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Event +#define __nvoc_class_id_Event 0xa4ecfc +#endif /* __nvoc_class_id_Event */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Event; + +#define __staticCast_Event(pThis) \ + ((pThis)->__nvoc_pbase_Event) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_Event(pThis) ((Event*)NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_Event(pThis) \ + ((Event*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Event))) +#endif //__nvoc_event_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Event(Event**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Event(Event**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Event(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Event((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define eventShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) eventShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define eventCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) eventCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define eventControl(pResource, pCallContext, pParams) eventControl_DISPATCH(pResource, pCallContext, pParams) +#define eventGetMemInterMapParams(pRmResource, pParams) eventGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define eventGetMemoryMappingDescriptor(pRmResource, ppMemDesc) eventGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define eventGetRefCount(pResource) eventGetRefCount_DISPATCH(pResource) +#define eventControlFilter(pResource, pCallContext, pParams) eventControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define eventAddAdditionalDependants(pClient, pResource, pReference) eventAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define eventUnmap(pResource, pCallContext, pCpuMapping) eventUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define eventControl_Prologue(pResource, pCallContext, pParams) eventControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define eventCanCopy(pResource) eventCanCopy_DISPATCH(pResource) +#define eventMapTo(pResource, pParams) eventMapTo_DISPATCH(pResource, pParams) +#define eventPreDestruct(pResource) eventPreDestruct_DISPATCH(pResource) +#define eventUnmapFrom(pResource, pParams) eventUnmapFrom_DISPATCH(pResource, pParams) +#define eventControl_Epilogue(pResource, pCallContext, pParams) eventControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define eventControlLookup(pResource, pParams, ppEntry) eventControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define eventMap(pResource, pCallContext, pParams, pCpuMapping) eventMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define eventAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) eventAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool eventShareCallback_DISPATCH(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__eventShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS eventCheckMemInterUnmap_DISPATCH(struct Event *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__eventCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS eventControl_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventGetMemInterMapParams_DISPATCH(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__eventGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS eventGetMemoryMappingDescriptor_DISPATCH(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__eventGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 eventGetRefCount_DISPATCH(struct Event *pResource) { + return pResource->__eventGetRefCount__(pResource); +} + +static inline NV_STATUS eventControlFilter_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventControlFilter__(pResource, pCallContext, pParams); +} + +static inline void eventAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference) { + pResource->__eventAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS eventUnmap_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__eventUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS eventControl_Prologue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__eventControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool eventCanCopy_DISPATCH(struct Event *pResource) { + return pResource->__eventCanCopy__(pResource); +} + +static inline NV_STATUS eventMapTo_DISPATCH(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__eventMapTo__(pResource, pParams); +} + +static inline void eventPreDestruct_DISPATCH(struct Event *pResource) { + pResource->__eventPreDestruct__(pResource); +} + +static inline NV_STATUS eventUnmapFrom_DISPATCH(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__eventUnmapFrom__(pResource, pParams); +} + +static inline void eventControl_Epilogue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__eventControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS eventControlLookup_DISPATCH(struct Event *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__eventControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS eventMap_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__eventMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool eventAccessCallback_DISPATCH(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__eventAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS eventConstruct_IMPL(struct Event *arg_pEvent, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_eventConstruct(arg_pEvent, arg_pCallContext, arg_pParams) eventConstruct_IMPL(arg_pEvent, arg_pCallContext, arg_pParams) +void eventDestruct_IMPL(struct Event *pEvent); +#define __nvoc_eventDestruct(pEvent) eventDestruct_IMPL(pEvent) +NV_STATUS eventInit_IMPL(struct Event *pEvent, struct CALL_CONTEXT *pCallContext, NvHandle hNotifierClient, NvHandle hNotifierResource, PEVENTNOTIFICATION **pppEventNotification); +#ifdef __nvoc_event_h_disabled +static inline NV_STATUS eventInit(struct Event *pEvent, struct CALL_CONTEXT *pCallContext, NvHandle hNotifierClient, NvHandle hNotifierResource, PEVENTNOTIFICATION **pppEventNotification) { + NV_ASSERT_FAILED_PRECOMP("Event was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_event_h_disabled +#define eventInit(pEvent, pCallContext, hNotifierClient, hNotifierResource, pppEventNotification) eventInit_IMPL(pEvent, pCallContext, hNotifierClient, hNotifierResource, pppEventNotification) +#endif //__nvoc_event_h_disabled + +#undef PRIVATE_FIELD + + +/** + * Mix-in interface for resources that send notifications to events + */ +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct INotifier { + const struct NVOC_RTTI *__nvoc_rtti; + struct INotifier *__nvoc_pbase_INotifier; + PEVENTNOTIFICATION *(*__inotifyGetNotificationListPtr__)(struct INotifier *); + void (*__inotifySetNotificationShare__)(struct INotifier *, struct NotifShare *); + struct NotifShare *(*__inotifyGetNotificationShare__)(struct INotifier *); + NV_STATUS (*__inotifyUnregisterEvent__)(struct INotifier *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__inotifyGetOrAllocNotifShare__)(struct INotifier *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_INotifier_TYPEDEF__ +#define __NVOC_CLASS_INotifier_TYPEDEF__ +typedef struct INotifier INotifier; +#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_INotifier +#define __nvoc_class_id_INotifier 0xf8f965 +#endif /* __nvoc_class_id_INotifier */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +#define __staticCast_INotifier(pThis) \ + ((pThis)->__nvoc_pbase_INotifier) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_INotifier(pThis) ((INotifier*)NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_INotifier(pThis) \ + ((INotifier*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(INotifier))) +#endif //__nvoc_event_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_INotifier(INotifier**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_INotifier(INotifier**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext); +#define __objCreate_INotifier(ppNewObj, pParent, createFlags, arg_pCallContext) \ + __nvoc_objCreate_INotifier((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext) + +#define inotifyGetNotificationListPtr(pNotifier) inotifyGetNotificationListPtr_DISPATCH(pNotifier) +#define inotifySetNotificationShare(pNotifier, pNotifShare) inotifySetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define inotifyGetNotificationShare(pNotifier) inotifyGetNotificationShare_DISPATCH(pNotifier) +#define inotifyUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) inotifyUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define inotifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) inotifyGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +static inline PEVENTNOTIFICATION *inotifyGetNotificationListPtr_DISPATCH(struct INotifier *pNotifier) { + return pNotifier->__inotifyGetNotificationListPtr__(pNotifier); +} + +static inline void inotifySetNotificationShare_DISPATCH(struct INotifier *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__inotifySetNotificationShare__(pNotifier, pNotifShare); +} + +static inline struct NotifShare *inotifyGetNotificationShare_DISPATCH(struct INotifier *pNotifier) { + return pNotifier->__inotifyGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS inotifyUnregisterEvent_DISPATCH(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__inotifyUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS inotifyGetOrAllocNotifShare_DISPATCH(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__inotifyGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS inotifyConstruct_IMPL(struct INotifier *arg_pNotifier, struct CALL_CONTEXT *arg_pCallContext); +#define __nvoc_inotifyConstruct(arg_pNotifier, arg_pCallContext) inotifyConstruct_IMPL(arg_pNotifier, arg_pCallContext) +void inotifyDestruct_IMPL(struct INotifier *pNotifier); +#define __nvoc_inotifyDestruct(pNotifier) inotifyDestruct_IMPL(pNotifier) +PEVENTNOTIFICATION inotifyGetNotificationList_IMPL(struct INotifier *pNotifier); +#ifdef __nvoc_event_h_disabled +static inline PEVENTNOTIFICATION inotifyGetNotificationList(struct INotifier *pNotifier) { + NV_ASSERT_FAILED_PRECOMP("INotifier was disabled!"); + return NULL; +} +#else //__nvoc_event_h_disabled +#define inotifyGetNotificationList(pNotifier) inotifyGetNotificationList_IMPL(pNotifier) +#endif //__nvoc_event_h_disabled + +#undef PRIVATE_FIELD + + +/** + * Basic implementation for event notification mix-in + */ +#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Notifier { + const struct NVOC_RTTI *__nvoc_rtti; + struct INotifier __nvoc_base_INotifier; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + PEVENTNOTIFICATION *(*__notifyGetNotificationListPtr__)(struct Notifier *); + struct NotifShare *(*__notifyGetNotificationShare__)(struct Notifier *); + void (*__notifySetNotificationShare__)(struct Notifier *, struct NotifShare *); + NV_STATUS (*__notifyUnregisterEvent__)(struct Notifier *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__notifyGetOrAllocNotifShare__)(struct Notifier *, NvHandle, NvHandle, struct NotifShare **); + struct NotifShare *pNotifierShare; +}; + +#ifndef __NVOC_CLASS_Notifier_TYPEDEF__ +#define __NVOC_CLASS_Notifier_TYPEDEF__ +typedef struct Notifier Notifier; +#endif /* __NVOC_CLASS_Notifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Notifier +#define __nvoc_class_id_Notifier 0xa8683b +#endif /* __nvoc_class_id_Notifier */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +#define __staticCast_Notifier(pThis) \ + ((pThis)->__nvoc_pbase_Notifier) + +#ifdef __nvoc_event_h_disabled +#define __dynamicCast_Notifier(pThis) ((Notifier*)NULL) +#else //__nvoc_event_h_disabled +#define __dynamicCast_Notifier(pThis) \ + ((Notifier*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Notifier))) +#endif //__nvoc_event_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Notifier(Notifier**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Notifier(Notifier**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext); +#define __objCreate_Notifier(ppNewObj, pParent, createFlags, arg_pCallContext) \ + __nvoc_objCreate_Notifier((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext) + +#define notifyGetNotificationListPtr(pNotifier) notifyGetNotificationListPtr_DISPATCH(pNotifier) +#define notifyGetNotificationShare(pNotifier) notifyGetNotificationShare_DISPATCH(pNotifier) +#define notifySetNotificationShare(pNotifier, pNotifShare) notifySetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define notifyUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) notifyUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define notifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) notifyGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +PEVENTNOTIFICATION *notifyGetNotificationListPtr_IMPL(struct Notifier *pNotifier); + +static inline PEVENTNOTIFICATION *notifyGetNotificationListPtr_DISPATCH(struct Notifier *pNotifier) { + return pNotifier->__notifyGetNotificationListPtr__(pNotifier); +} + +struct NotifShare *notifyGetNotificationShare_IMPL(struct Notifier *pNotifier); + +static inline struct NotifShare *notifyGetNotificationShare_DISPATCH(struct Notifier *pNotifier) { + return pNotifier->__notifyGetNotificationShare__(pNotifier); +} + +void notifySetNotificationShare_IMPL(struct Notifier *pNotifier, struct NotifShare *pNotifShare); + +static inline void notifySetNotificationShare_DISPATCH(struct Notifier *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__notifySetNotificationShare__(pNotifier, pNotifShare); +} + +NV_STATUS notifyUnregisterEvent_IMPL(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent); + +static inline NV_STATUS notifyUnregisterEvent_DISPATCH(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__notifyUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +NV_STATUS notifyGetOrAllocNotifShare_IMPL(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare); + +static inline NV_STATUS notifyGetOrAllocNotifShare_DISPATCH(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__notifyGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS notifyConstruct_IMPL(struct Notifier *arg_pNotifier, struct CALL_CONTEXT *arg_pCallContext); +#define __nvoc_notifyConstruct(arg_pNotifier, arg_pCallContext) notifyConstruct_IMPL(arg_pNotifier, arg_pCallContext) +void notifyDestruct_IMPL(struct Notifier *pNotifier); +#define __nvoc_notifyDestruct(pNotifier) notifyDestruct_IMPL(pNotifier) +#undef PRIVATE_FIELD + + +void CliAddSystemEvent(NvU32, NvU32); +NvBool CliDelObjectEvents(NvHandle hClient, NvHandle hObject); +NvBool CliGetEventInfo(NvHandle hClient, NvHandle hEvent, struct Event **ppEvent); +NV_STATUS CliGetEventNotificationList(NvHandle hClient, NvHandle hObject, + struct INotifier **ppNotifier, + PEVENTNOTIFICATION **pppEventNotification); + +NV_STATUS registerEventNotification(PEVENTNOTIFICATION*, NvHandle, NvHandle, NvHandle, NvU32, NvU32, NvP64, NvBool); +NV_STATUS unregisterEventNotification(PEVENTNOTIFICATION*, NvHandle, NvHandle, NvHandle); +NV_STATUS unregisterEventNotificationWithData(PEVENTNOTIFICATION *, NvHandle, NvHandle, NvHandle, NvBool, NvP64); +NV_STATUS bindEventNotificationToSubdevice(PEVENTNOTIFICATION, NvHandle, NvU32); +NV_STATUS engineNonStallIntrNotify(OBJGPU *, NvU32); +NV_STATUS notifyEvents(OBJGPU*, EVENTNOTIFICATION*, NvU32, NvU32, NvU32, NV_STATUS, NvU32); +NV_STATUS engineNonStallIntrNotifyEvent(OBJGPU *, NvU32, NvHandle); + +#endif // _EVENT_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_EVENT_NVOC_H_ diff --git a/src/nvidia/generated/g_fabric_nvoc.c b/src/nvidia/generated/g_fabric_nvoc.c new file mode 100644 index 000000000..45f8bfa81 --- /dev/null +++ b/src/nvidia/generated/g_fabric_nvoc.c @@ -0,0 +1,154 @@ +#define NVOC_FABRIC_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_fabric_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x0ac791 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Fabric; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_Fabric(Fabric*); +void __nvoc_init_funcTable_Fabric(Fabric*); +NV_STATUS __nvoc_ctor_Fabric(Fabric*); +void __nvoc_init_dataField_Fabric(Fabric*); +void __nvoc_dtor_Fabric(Fabric*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Fabric; + +static const struct NVOC_RTTI __nvoc_rtti_Fabric_Fabric = { + /*pClassDef=*/ &__nvoc_class_def_Fabric, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Fabric, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Fabric_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Fabric, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Fabric = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_Fabric_Fabric, + &__nvoc_rtti_Fabric_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Fabric = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Fabric), + /*classId=*/ classId(Fabric), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Fabric", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Fabric, + /*pCastInfo=*/ &__nvoc_castinfo_Fabric, + /*pExportInfo=*/ &__nvoc_export_info_Fabric +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Fabric = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_Fabric(Fabric *pThis) { + __nvoc_fabricDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Fabric(Fabric *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_Fabric(Fabric *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_Fabric_fail_Object; + __nvoc_init_dataField_Fabric(pThis); + + status = __nvoc_fabricConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_Fabric_fail__init; + goto __nvoc_ctor_Fabric_exit; // Success + +__nvoc_ctor_Fabric_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_Fabric_fail_Object: +__nvoc_ctor_Fabric_exit: + + return status; +} + +static void __nvoc_init_funcTable_Fabric_1(Fabric *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_Fabric(Fabric *pThis) { + __nvoc_init_funcTable_Fabric_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_Fabric(Fabric *pThis) { + pThis->__nvoc_pbase_Fabric = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_Fabric(pThis); +} + +NV_STATUS __nvoc_objCreate_Fabric(Fabric **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + Fabric *pThis; + + pThis = portMemAllocNonPaged(sizeof(Fabric)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Fabric)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Fabric); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Fabric(pThis); + status = __nvoc_ctor_Fabric(pThis); + if (status != NV_OK) goto __nvoc_objCreate_Fabric_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Fabric_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Fabric(Fabric **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_Fabric(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_fabric_nvoc.h b/src/nvidia/generated/g_fabric_nvoc.h new file mode 100644 index 000000000..cd0059ed6 --- /dev/null +++ b/src/nvidia/generated/g_fabric_nvoc.h @@ -0,0 +1,128 @@ +#ifndef _G_FABRIC_NVOC_H_ +#define _G_FABRIC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains the functions managing the NVLink fabric + * + *****************************************************************************/ + +#include "g_fabric_nvoc.h" + +#ifndef _FABRIC_H_ +#define _FABRIC_H_ + +#include "core/core.h" +#include "core/system.h" +#include "class/cl000f.h" +#include "ctrl/ctrl000f.h" + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +// +// The Fabric object is used to encapsulate the NVLink fabric +// +#ifdef NVOC_FABRIC_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Fabric { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct Fabric *__nvoc_pbase_Fabric; + NvU32 flags; +}; + +#ifndef __NVOC_CLASS_Fabric_TYPEDEF__ +#define __NVOC_CLASS_Fabric_TYPEDEF__ +typedef struct Fabric Fabric; +#endif /* __NVOC_CLASS_Fabric_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Fabric +#define __nvoc_class_id_Fabric 0x0ac791 +#endif /* __nvoc_class_id_Fabric */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Fabric; + +#define __staticCast_Fabric(pThis) \ + ((pThis)->__nvoc_pbase_Fabric) + +#ifdef __nvoc_fabric_h_disabled +#define __dynamicCast_Fabric(pThis) ((Fabric*)NULL) +#else //__nvoc_fabric_h_disabled +#define __dynamicCast_Fabric(pThis) \ + ((Fabric*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Fabric))) +#endif //__nvoc_fabric_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Fabric(Fabric**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Fabric(Fabric**, Dynamic*, NvU32); +#define __objCreate_Fabric(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_Fabric((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS fabricConstruct_IMPL(struct Fabric *arg_pFabric); +#define __nvoc_fabricConstruct(arg_pFabric) fabricConstruct_IMPL(arg_pFabric) +void fabricDestruct_IMPL(struct Fabric *pFabric); +#define __nvoc_fabricDestruct(pFabric) fabricDestruct_IMPL(pFabric) +void fabricSetFmSessionFlags_IMPL(struct Fabric *pFabric, NvU32 flags); +#ifdef __nvoc_fabric_h_disabled +static inline void fabricSetFmSessionFlags(struct Fabric *pFabric, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("Fabric was disabled!"); +} +#else //__nvoc_fabric_h_disabled +#define fabricSetFmSessionFlags(pFabric, flags) fabricSetFmSessionFlags_IMPL(pFabric, flags) +#endif //__nvoc_fabric_h_disabled + +NvU32 fabricGetFmSessionFlags_IMPL(struct Fabric *pFabric); +#ifdef __nvoc_fabric_h_disabled +static inline NvU32 fabricGetFmSessionFlags(struct Fabric *pFabric) { + NV_ASSERT_FAILED_PRECOMP("Fabric was disabled!"); + return 0; +} +#else //__nvoc_fabric_h_disabled +#define fabricGetFmSessionFlags(pFabric) fabricGetFmSessionFlags_IMPL(pFabric) +#endif //__nvoc_fabric_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _FABRIC_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_FABRIC_NVOC_H_ diff --git a/src/nvidia/generated/g_fabric_vaspace_nvoc.c b/src/nvidia/generated/g_fabric_vaspace_nvoc.c new file mode 100644 index 000000000..cfdf1601b --- /dev/null +++ b/src/nvidia/generated/g_fabric_vaspace_nvoc.c @@ -0,0 +1,353 @@ +#define NVOC_FABRIC_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_fabric_vaspace_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x8c8f3d = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_FABRIC_VASPACE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; + +void __nvoc_init_FABRIC_VASPACE(FABRIC_VASPACE*); +void __nvoc_init_funcTable_FABRIC_VASPACE(FABRIC_VASPACE*); +NV_STATUS __nvoc_ctor_FABRIC_VASPACE(FABRIC_VASPACE*); +void __nvoc_init_dataField_FABRIC_VASPACE(FABRIC_VASPACE*); +void __nvoc_dtor_FABRIC_VASPACE(FABRIC_VASPACE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_FABRIC_VASPACE; + +static const struct NVOC_RTTI __nvoc_rtti_FABRIC_VASPACE_FABRIC_VASPACE = { + /*pClassDef=*/ &__nvoc_class_def_FABRIC_VASPACE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_FABRIC_VASPACE, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_FABRIC_VASPACE_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FABRIC_VASPACE, __nvoc_base_OBJVASPACE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE = { + /*pClassDef=*/ &__nvoc_class_def_OBJVASPACE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FABRIC_VASPACE, __nvoc_base_OBJVASPACE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_FABRIC_VASPACE = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_FABRIC_VASPACE_FABRIC_VASPACE, + &__nvoc_rtti_FABRIC_VASPACE_OBJVASPACE, + &__nvoc_rtti_FABRIC_VASPACE_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_FABRIC_VASPACE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(FABRIC_VASPACE), + /*classId=*/ classId(FABRIC_VASPACE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "FABRIC_VASPACE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_FABRIC_VASPACE, + /*pCastInfo=*/ &__nvoc_castinfo_FABRIC_VASPACE, + /*pExportInfo=*/ &__nvoc_export_info_FABRIC_VASPACE +}; + +static NV_STATUS __nvoc_thunk_FABRIC_VASPACE_vaspaceConstruct_(struct OBJVASPACE *pFabricVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return fabricvaspaceConstruct_((struct FABRIC_VASPACE *)(((unsigned char *)pFabricVAS) - __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +static NV_STATUS __nvoc_thunk_FABRIC_VASPACE_vaspaceAlloc(struct OBJVASPACE *pFabricVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSize, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return fabricvaspaceAlloc((struct FABRIC_VASPACE *)(((unsigned char *)pFabricVAS) - __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), size, align, rangeLo, rangeHi, pageSize, flags, pAddr); +} + +static NV_STATUS __nvoc_thunk_FABRIC_VASPACE_vaspaceFree(struct OBJVASPACE *pFabricVAS, NvU64 vAddr) { + return fabricvaspaceFree((struct FABRIC_VASPACE *)(((unsigned char *)pFabricVAS) - __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), vAddr); +} + +static NV_STATUS __nvoc_thunk_FABRIC_VASPACE_vaspaceMap(struct OBJVASPACE *pFabricVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) { + return fabricvaspaceMap((struct FABRIC_VASPACE *)(((unsigned char *)pFabricVAS) - __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu, vaLo, vaHi, pTarget, flags); +} + +static void __nvoc_thunk_FABRIC_VASPACE_vaspaceUnmap(struct OBJVASPACE *pFabricVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) { + fabricvaspaceUnmap((struct FABRIC_VASPACE *)(((unsigned char *)pFabricVAS) - __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu, vaLo, vaHi); +} + +static NV_STATUS __nvoc_thunk_FABRIC_VASPACE_vaspaceApplyDefaultAlignment(struct OBJVASPACE *pFabricVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return fabricvaspaceApplyDefaultAlignment((struct FABRIC_VASPACE *)(((unsigned char *)pFabricVAS) - __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +static NV_STATUS __nvoc_thunk_FABRIC_VASPACE_vaspaceGetVasInfo(struct OBJVASPACE *pFabricVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return fabricvaspaceGetVasInfo((struct FABRIC_VASPACE *)(((unsigned char *)pFabricVAS) - __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_FABRIC_VASPACE_vaspacePinRootPageDir(struct OBJVASPACE *pFabricVAS, struct OBJGPU *pGpu) { + return fabricvaspacePinRootPageDir((struct FABRIC_VASPACE *)(((unsigned char *)pFabricVAS) - __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu); +} + +static void __nvoc_thunk_FABRIC_VASPACE_vaspaceUnpinRootPageDir(struct OBJVASPACE *pFabricVAS, struct OBJGPU *pGpu) { + fabricvaspaceUnpinRootPageDir((struct FABRIC_VASPACE *)(((unsigned char *)pFabricVAS) - __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu); +} + +static void __nvoc_thunk_FABRIC_VASPACE_vaspaceInvalidateTlb(struct OBJVASPACE *pFabricVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type) { + fabricvaspaceInvalidateTlb((struct FABRIC_VASPACE *)(((unsigned char *)pFabricVAS) - __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu, type); +} + +static NvBool __nvoc_thunk_OBJVASPACE_fabricvaspaceIsMirrored(struct FABRIC_VASPACE *pVAS) { + return vaspaceIsMirrored((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJVASPACE_fabricvaspaceIsExternallyOwned(struct FABRIC_VASPACE *pVAS) { + return vaspaceIsExternallyOwned((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJVASPACE_fabricvaspaceIsInternalVaRestricted(struct FABRIC_VASPACE *pVAS) { + return vaspaceIsInternalVaRestricted((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset)); +} + +static NvU32 __nvoc_thunk_OBJVASPACE_fabricvaspaceGetFlags(struct FABRIC_VASPACE *pVAS) { + return vaspaceGetFlags((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJVASPACE_fabricvaspaceIsAtsEnabled(struct FABRIC_VASPACE *pVAS) { + return vaspaceIsAtsEnabled((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset)); +} + +static NvU32 __nvoc_thunk_OBJVASPACE_fabricvaspaceGetBigPageSize(struct FABRIC_VASPACE *pVAS) { + return vaspaceGetBigPageSize((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_fabricvaspaceGetPteInfo(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) { + return vaspaceGetPteInfo((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu, pParams, pPhysAddr); +} + +static NvU64 __nvoc_thunk_OBJVASPACE_fabricvaspaceGetVaLimit(struct FABRIC_VASPACE *pVAS) { + return vaspaceGetVaLimit((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset)); +} + +static PMEMORY_DESCRIPTOR __nvoc_thunk_OBJVASPACE_fabricvaspaceGetPageDirBase(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu) { + return vaspaceGetPageDirBase((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu); +} + +static PMEMORY_DESCRIPTOR __nvoc_thunk_OBJVASPACE_fabricvaspaceGetKernelPageDirBase(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu) { + return vaspaceGetKernelPageDirBase((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu); +} + +static NvU32 __nvoc_thunk_OBJVASPACE_fabricvaspaceGetMapPageSize(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock) { + return vaspaceGetMapPageSize((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu, pMemBlock); +} + +static struct OBJEHEAP *__nvoc_thunk_OBJVASPACE_fabricvaspaceGetHeap(struct FABRIC_VASPACE *pVAS) { + return vaspaceGetHeap((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJVASPACE_fabricvaspaceIsFaultCapable(struct FABRIC_VASPACE *pVAS) { + return vaspaceIsFaultCapable((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset)); +} + +static NvU64 __nvoc_thunk_OBJVASPACE_fabricvaspaceGetVaStart(struct FABRIC_VASPACE *pVAS) { + return vaspaceGetVaStart((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_fabricvaspaceIncAllocRefCnt(struct FABRIC_VASPACE *pVAS, NvU64 vAddr) { + return vaspaceIncAllocRefCnt((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), vAddr); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_fabricvaspaceSetPteInfo(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) { + return vaspaceSetPteInfo((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu, pParams); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_fabricvaspaceGetPasid(struct FABRIC_VASPACE *pVAS, NvU32 *pPasid) { + return vaspaceGetPasid((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pPasid); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_fabricvaspaceGetPageTableInfo(struct FABRIC_VASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) { + return vaspaceGetPageTableInfo((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_fabricvaspaceReserveMempool(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) { + return vaspaceReserveMempool((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_FABRIC_VASPACE_OBJVASPACE.offset), pGpu, hClient, size, pageSizeLockMask, flags); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_FABRIC_VASPACE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJVASPACE(OBJVASPACE*); +void __nvoc_dtor_FABRIC_VASPACE(FABRIC_VASPACE *pThis) { + __nvoc_fabricvaspaceDestruct(pThis); + __nvoc_dtor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_FABRIC_VASPACE(FABRIC_VASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE* ); +NV_STATUS __nvoc_ctor_FABRIC_VASPACE(FABRIC_VASPACE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + if (status != NV_OK) goto __nvoc_ctor_FABRIC_VASPACE_fail_OBJVASPACE; + __nvoc_init_dataField_FABRIC_VASPACE(pThis); + goto __nvoc_ctor_FABRIC_VASPACE_exit; // Success + +__nvoc_ctor_FABRIC_VASPACE_fail_OBJVASPACE: +__nvoc_ctor_FABRIC_VASPACE_exit: + + return status; +} + +static void __nvoc_init_funcTable_FABRIC_VASPACE_1(FABRIC_VASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__fabricvaspaceConstruct___ = &fabricvaspaceConstruct__IMPL; + + pThis->__fabricvaspaceAlloc__ = &fabricvaspaceAlloc_IMPL; + + pThis->__fabricvaspaceFree__ = &fabricvaspaceFree_IMPL; + + pThis->__fabricvaspaceMap__ = &fabricvaspaceMap_IMPL; + + pThis->__fabricvaspaceUnmap__ = &fabricvaspaceUnmap_IMPL; + + pThis->__fabricvaspaceApplyDefaultAlignment__ = &fabricvaspaceApplyDefaultAlignment_IMPL; + + pThis->__fabricvaspaceGetVasInfo__ = &fabricvaspaceGetVasInfo_IMPL; + + pThis->__fabricvaspacePinRootPageDir__ = &fabricvaspacePinRootPageDir_IMPL; + + pThis->__fabricvaspaceUnpinRootPageDir__ = &fabricvaspaceUnpinRootPageDir_IMPL; + + pThis->__fabricvaspaceInvalidateTlb__ = &fabricvaspaceInvalidateTlb_IMPL; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceConstruct___ = &__nvoc_thunk_FABRIC_VASPACE_vaspaceConstruct_; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceAlloc__ = &__nvoc_thunk_FABRIC_VASPACE_vaspaceAlloc; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceFree__ = &__nvoc_thunk_FABRIC_VASPACE_vaspaceFree; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceMap__ = &__nvoc_thunk_FABRIC_VASPACE_vaspaceMap; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceUnmap__ = &__nvoc_thunk_FABRIC_VASPACE_vaspaceUnmap; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceApplyDefaultAlignment__ = &__nvoc_thunk_FABRIC_VASPACE_vaspaceApplyDefaultAlignment; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVasInfo__ = &__nvoc_thunk_FABRIC_VASPACE_vaspaceGetVasInfo; + + pThis->__nvoc_base_OBJVASPACE.__vaspacePinRootPageDir__ = &__nvoc_thunk_FABRIC_VASPACE_vaspacePinRootPageDir; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceUnpinRootPageDir__ = &__nvoc_thunk_FABRIC_VASPACE_vaspaceUnpinRootPageDir; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceInvalidateTlb__ = &__nvoc_thunk_FABRIC_VASPACE_vaspaceInvalidateTlb; + + pThis->__fabricvaspaceIsMirrored__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceIsMirrored; + + pThis->__fabricvaspaceIsExternallyOwned__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceIsExternallyOwned; + + pThis->__fabricvaspaceIsInternalVaRestricted__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceIsInternalVaRestricted; + + pThis->__fabricvaspaceGetFlags__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetFlags; + + pThis->__fabricvaspaceIsAtsEnabled__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceIsAtsEnabled; + + pThis->__fabricvaspaceGetBigPageSize__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetBigPageSize; + + pThis->__fabricvaspaceGetPteInfo__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetPteInfo; + + pThis->__fabricvaspaceGetVaLimit__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetVaLimit; + + pThis->__fabricvaspaceGetPageDirBase__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetPageDirBase; + + pThis->__fabricvaspaceGetKernelPageDirBase__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetKernelPageDirBase; + + pThis->__fabricvaspaceGetMapPageSize__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetMapPageSize; + + pThis->__fabricvaspaceGetHeap__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetHeap; + + pThis->__fabricvaspaceIsFaultCapable__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceIsFaultCapable; + + pThis->__fabricvaspaceGetVaStart__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetVaStart; + + pThis->__fabricvaspaceIncAllocRefCnt__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceIncAllocRefCnt; + + pThis->__fabricvaspaceSetPteInfo__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceSetPteInfo; + + pThis->__fabricvaspaceGetPasid__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetPasid; + + pThis->__fabricvaspaceGetPageTableInfo__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceGetPageTableInfo; + + pThis->__fabricvaspaceReserveMempool__ = &__nvoc_thunk_OBJVASPACE_fabricvaspaceReserveMempool; +} + +void __nvoc_init_funcTable_FABRIC_VASPACE(FABRIC_VASPACE *pThis) { + __nvoc_init_funcTable_FABRIC_VASPACE_1(pThis); +} + +void __nvoc_init_OBJVASPACE(OBJVASPACE*); +void __nvoc_init_FABRIC_VASPACE(FABRIC_VASPACE *pThis) { + pThis->__nvoc_pbase_FABRIC_VASPACE = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJVASPACE = &pThis->__nvoc_base_OBJVASPACE; + __nvoc_init_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + __nvoc_init_funcTable_FABRIC_VASPACE(pThis); +} + +NV_STATUS __nvoc_objCreate_FABRIC_VASPACE(FABRIC_VASPACE **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + FABRIC_VASPACE *pThis; + + pThis = portMemAllocNonPaged(sizeof(FABRIC_VASPACE)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(FABRIC_VASPACE)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_FABRIC_VASPACE); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_FABRIC_VASPACE(pThis); + status = __nvoc_ctor_FABRIC_VASPACE(pThis); + if (status != NV_OK) goto __nvoc_objCreate_FABRIC_VASPACE_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_FABRIC_VASPACE_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_FABRIC_VASPACE(FABRIC_VASPACE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_FABRIC_VASPACE(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_fabric_vaspace_nvoc.h b/src/nvidia/generated/g_fabric_vaspace_nvoc.h new file mode 100644 index 000000000..87fd835ee --- /dev/null +++ b/src/nvidia/generated/g_fabric_vaspace_nvoc.h @@ -0,0 +1,385 @@ +#ifndef _G_FABRIC_VASPACE_NVOC_H_ +#define _G_FABRIC_VASPACE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_fabric_vaspace_nvoc.h" + +#ifndef FABRIC_VASPACE_H +#define FABRIC_VASPACE_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: fabric_vaspace.h * +* Defines and structures used for Fabric Virtual Address Space Object. * +\***************************************************************************/ + +#include "mmu/mmu_walk.h" +#include "mmu/gmmu_fmt.h" +#include "core/core.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/gpu_vaspace.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "ctrl/ctrl0080/ctrl0080dma.h" + +#include "containers/list.h" +#include "containers/map.h" +#include "mem_mgr/pool_alloc.h" + +typedef struct +{ + NODE Node; + MEMORY_DESCRIPTOR *pVidMemDesc; + NvU64 offset; +} FABRIC_VA_TO_GPA_MAP_NODE; + +/*! + * RM-registered/managed Fabric virtual address space. + */ +#ifdef NVOC_FABRIC_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct FABRIC_VASPACE { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJVASPACE __nvoc_base_OBJVASPACE; + struct Object *__nvoc_pbase_Object; + struct OBJVASPACE *__nvoc_pbase_OBJVASPACE; + struct FABRIC_VASPACE *__nvoc_pbase_FABRIC_VASPACE; + NV_STATUS (*__fabricvaspaceConstruct___)(struct FABRIC_VASPACE *, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32); + NV_STATUS (*__fabricvaspaceAlloc__)(struct FABRIC_VASPACE *, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *); + NV_STATUS (*__fabricvaspaceFree__)(struct FABRIC_VASPACE *, NvU64); + NV_STATUS (*__fabricvaspaceMap__)(struct FABRIC_VASPACE *, struct OBJGPU *, const NvU64, const NvU64, const MMU_MAP_TARGET *, const VAS_MAP_FLAGS); + void (*__fabricvaspaceUnmap__)(struct FABRIC_VASPACE *, struct OBJGPU *, const NvU64, const NvU64); + NV_STATUS (*__fabricvaspaceApplyDefaultAlignment__)(struct FABRIC_VASPACE *, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *); + NV_STATUS (*__fabricvaspaceGetVasInfo__)(struct FABRIC_VASPACE *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *); + NV_STATUS (*__fabricvaspacePinRootPageDir__)(struct FABRIC_VASPACE *, struct OBJGPU *); + void (*__fabricvaspaceUnpinRootPageDir__)(struct FABRIC_VASPACE *, struct OBJGPU *); + void (*__fabricvaspaceInvalidateTlb__)(struct FABRIC_VASPACE *, struct OBJGPU *, VAS_PTE_UPDATE_TYPE); + NvBool (*__fabricvaspaceIsMirrored__)(struct FABRIC_VASPACE *); + NvBool (*__fabricvaspaceIsExternallyOwned__)(struct FABRIC_VASPACE *); + NvBool (*__fabricvaspaceIsInternalVaRestricted__)(struct FABRIC_VASPACE *); + NvU32 (*__fabricvaspaceGetFlags__)(struct FABRIC_VASPACE *); + NvBool (*__fabricvaspaceIsAtsEnabled__)(struct FABRIC_VASPACE *); + NvU32 (*__fabricvaspaceGetBigPageSize__)(struct FABRIC_VASPACE *); + NV_STATUS (*__fabricvaspaceGetPteInfo__)(struct FABRIC_VASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *, RmPhysAddr *); + NvU64 (*__fabricvaspaceGetVaLimit__)(struct FABRIC_VASPACE *); + PMEMORY_DESCRIPTOR (*__fabricvaspaceGetPageDirBase__)(struct FABRIC_VASPACE *, struct OBJGPU *); + PMEMORY_DESCRIPTOR (*__fabricvaspaceGetKernelPageDirBase__)(struct FABRIC_VASPACE *, struct OBJGPU *); + NvU32 (*__fabricvaspaceGetMapPageSize__)(struct FABRIC_VASPACE *, struct OBJGPU *, EMEMBLOCK *); + struct OBJEHEAP *(*__fabricvaspaceGetHeap__)(struct FABRIC_VASPACE *); + NvBool (*__fabricvaspaceIsFaultCapable__)(struct FABRIC_VASPACE *); + NvU64 (*__fabricvaspaceGetVaStart__)(struct FABRIC_VASPACE *); + NV_STATUS (*__fabricvaspaceIncAllocRefCnt__)(struct FABRIC_VASPACE *, NvU64); + NV_STATUS (*__fabricvaspaceSetPteInfo__)(struct FABRIC_VASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *); + NV_STATUS (*__fabricvaspaceGetPasid__)(struct FABRIC_VASPACE *, NvU32 *); + NV_STATUS (*__fabricvaspaceGetPageTableInfo__)(struct FABRIC_VASPACE *, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *); + NV_STATUS (*__fabricvaspaceReserveMempool__)(struct FABRIC_VASPACE *, struct OBJGPU *, NvHandle, NvU64, NvU64, NvU32); + struct OBJVASPACE *pGVAS; + NvU32 flags; + NvHandle hClient; + NvHandle hDevice; + NODE *pFabricVaToGpaMap; + NvU32 gfid; + NvBool bRpcAlloc; +}; + +#ifndef __NVOC_CLASS_FABRIC_VASPACE_TYPEDEF__ +#define __NVOC_CLASS_FABRIC_VASPACE_TYPEDEF__ +typedef struct FABRIC_VASPACE FABRIC_VASPACE; +#endif /* __NVOC_CLASS_FABRIC_VASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FABRIC_VASPACE +#define __nvoc_class_id_FABRIC_VASPACE 0x8c8f3d +#endif /* __nvoc_class_id_FABRIC_VASPACE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_FABRIC_VASPACE; + +#define __staticCast_FABRIC_VASPACE(pThis) \ + ((pThis)->__nvoc_pbase_FABRIC_VASPACE) + +#ifdef __nvoc_fabric_vaspace_h_disabled +#define __dynamicCast_FABRIC_VASPACE(pThis) ((FABRIC_VASPACE*)NULL) +#else //__nvoc_fabric_vaspace_h_disabled +#define __dynamicCast_FABRIC_VASPACE(pThis) \ + ((FABRIC_VASPACE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(FABRIC_VASPACE))) +#endif //__nvoc_fabric_vaspace_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_FABRIC_VASPACE(FABRIC_VASPACE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_FABRIC_VASPACE(FABRIC_VASPACE**, Dynamic*, NvU32); +#define __objCreate_FABRIC_VASPACE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_FABRIC_VASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define fabricvaspaceConstruct_(pFabricVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) fabricvaspaceConstruct__DISPATCH(pFabricVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) +#define fabricvaspaceAlloc(pFabricVAS, size, align, rangeLo, rangeHi, pageSize, flags, pAddr) fabricvaspaceAlloc_DISPATCH(pFabricVAS, size, align, rangeLo, rangeHi, pageSize, flags, pAddr) +#define fabricvaspaceFree(pFabricVAS, vAddr) fabricvaspaceFree_DISPATCH(pFabricVAS, vAddr) +#define fabricvaspaceMap(pFabricVAS, pGpu, vaLo, vaHi, pTarget, flags) fabricvaspaceMap_DISPATCH(pFabricVAS, pGpu, vaLo, vaHi, pTarget, flags) +#define fabricvaspaceUnmap(pFabricVAS, pGpu, vaLo, vaHi) fabricvaspaceUnmap_DISPATCH(pFabricVAS, pGpu, vaLo, vaHi) +#define fabricvaspaceApplyDefaultAlignment(pFabricVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) fabricvaspaceApplyDefaultAlignment_DISPATCH(pFabricVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) +#define fabricvaspaceGetVasInfo(pFabricVAS, pParams) fabricvaspaceGetVasInfo_DISPATCH(pFabricVAS, pParams) +#define fabricvaspacePinRootPageDir(pFabricVAS, pGpu) fabricvaspacePinRootPageDir_DISPATCH(pFabricVAS, pGpu) +#define fabricvaspaceUnpinRootPageDir(pFabricVAS, pGpu) fabricvaspaceUnpinRootPageDir_DISPATCH(pFabricVAS, pGpu) +#define fabricvaspaceInvalidateTlb(pFabricVAS, pGpu, type) fabricvaspaceInvalidateTlb_DISPATCH(pFabricVAS, pGpu, type) +#define fabricvaspaceIsMirrored(pVAS) fabricvaspaceIsMirrored_DISPATCH(pVAS) +#define fabricvaspaceIsExternallyOwned(pVAS) fabricvaspaceIsExternallyOwned_DISPATCH(pVAS) +#define fabricvaspaceIsInternalVaRestricted(pVAS) fabricvaspaceIsInternalVaRestricted_DISPATCH(pVAS) +#define fabricvaspaceGetFlags(pVAS) fabricvaspaceGetFlags_DISPATCH(pVAS) +#define fabricvaspaceIsAtsEnabled(pVAS) fabricvaspaceIsAtsEnabled_DISPATCH(pVAS) +#define fabricvaspaceGetBigPageSize(pVAS) fabricvaspaceGetBigPageSize_DISPATCH(pVAS) +#define fabricvaspaceGetPteInfo(pVAS, pGpu, pParams, pPhysAddr) fabricvaspaceGetPteInfo_DISPATCH(pVAS, pGpu, pParams, pPhysAddr) +#define fabricvaspaceGetVaLimit(pVAS) fabricvaspaceGetVaLimit_DISPATCH(pVAS) +#define fabricvaspaceGetPageDirBase(pVAS, pGpu) fabricvaspaceGetPageDirBase_DISPATCH(pVAS, pGpu) +#define fabricvaspaceGetKernelPageDirBase(pVAS, pGpu) fabricvaspaceGetKernelPageDirBase_DISPATCH(pVAS, pGpu) +#define fabricvaspaceGetMapPageSize(pVAS, pGpu, pMemBlock) fabricvaspaceGetMapPageSize_DISPATCH(pVAS, pGpu, pMemBlock) +#define fabricvaspaceGetHeap(pVAS) fabricvaspaceGetHeap_DISPATCH(pVAS) +#define fabricvaspaceIsFaultCapable(pVAS) fabricvaspaceIsFaultCapable_DISPATCH(pVAS) +#define fabricvaspaceGetVaStart(pVAS) fabricvaspaceGetVaStart_DISPATCH(pVAS) +#define fabricvaspaceIncAllocRefCnt(pVAS, vAddr) fabricvaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr) +#define fabricvaspaceSetPteInfo(pVAS, pGpu, pParams) fabricvaspaceSetPteInfo_DISPATCH(pVAS, pGpu, pParams) +#define fabricvaspaceGetPasid(pVAS, pPasid) fabricvaspaceGetPasid_DISPATCH(pVAS, pPasid) +#define fabricvaspaceGetPageTableInfo(pVAS, pParams) fabricvaspaceGetPageTableInfo_DISPATCH(pVAS, pParams) +#define fabricvaspaceReserveMempool(pVAS, pGpu, hClient, size, pageSizeLockMask, flags) fabricvaspaceReserveMempool_DISPATCH(pVAS, pGpu, hClient, size, pageSizeLockMask, flags) +NV_STATUS fabricvaspaceConstruct__IMPL(struct FABRIC_VASPACE *pFabricVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags); + +static inline NV_STATUS fabricvaspaceConstruct__DISPATCH(struct FABRIC_VASPACE *pFabricVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return pFabricVAS->__fabricvaspaceConstruct___(pFabricVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +NV_STATUS fabricvaspaceAlloc_IMPL(struct FABRIC_VASPACE *pFabricVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSize, VAS_ALLOC_FLAGS flags, NvU64 *pAddr); + +static inline NV_STATUS fabricvaspaceAlloc_DISPATCH(struct FABRIC_VASPACE *pFabricVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSize, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return pFabricVAS->__fabricvaspaceAlloc__(pFabricVAS, size, align, rangeLo, rangeHi, pageSize, flags, pAddr); +} + +NV_STATUS fabricvaspaceFree_IMPL(struct FABRIC_VASPACE *pFabricVAS, NvU64 vAddr); + +static inline NV_STATUS fabricvaspaceFree_DISPATCH(struct FABRIC_VASPACE *pFabricVAS, NvU64 vAddr) { + return pFabricVAS->__fabricvaspaceFree__(pFabricVAS, vAddr); +} + +NV_STATUS fabricvaspaceMap_IMPL(struct FABRIC_VASPACE *pFabricVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags); + +static inline NV_STATUS fabricvaspaceMap_DISPATCH(struct FABRIC_VASPACE *pFabricVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) { + return pFabricVAS->__fabricvaspaceMap__(pFabricVAS, pGpu, vaLo, vaHi, pTarget, flags); +} + +void fabricvaspaceUnmap_IMPL(struct FABRIC_VASPACE *pFabricVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi); + +static inline void fabricvaspaceUnmap_DISPATCH(struct FABRIC_VASPACE *pFabricVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) { + pFabricVAS->__fabricvaspaceUnmap__(pFabricVAS, pGpu, vaLo, vaHi); +} + +NV_STATUS fabricvaspaceApplyDefaultAlignment_IMPL(struct FABRIC_VASPACE *pFabricVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask); + +static inline NV_STATUS fabricvaspaceApplyDefaultAlignment_DISPATCH(struct FABRIC_VASPACE *pFabricVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return pFabricVAS->__fabricvaspaceApplyDefaultAlignment__(pFabricVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +NV_STATUS fabricvaspaceGetVasInfo_IMPL(struct FABRIC_VASPACE *pFabricVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams); + +static inline NV_STATUS fabricvaspaceGetVasInfo_DISPATCH(struct FABRIC_VASPACE *pFabricVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return pFabricVAS->__fabricvaspaceGetVasInfo__(pFabricVAS, pParams); +} + +NV_STATUS fabricvaspacePinRootPageDir_IMPL(struct FABRIC_VASPACE *pFabricVAS, struct OBJGPU *pGpu); + +static inline NV_STATUS fabricvaspacePinRootPageDir_DISPATCH(struct FABRIC_VASPACE *pFabricVAS, struct OBJGPU *pGpu) { + return pFabricVAS->__fabricvaspacePinRootPageDir__(pFabricVAS, pGpu); +} + +void fabricvaspaceUnpinRootPageDir_IMPL(struct FABRIC_VASPACE *pFabricVAS, struct OBJGPU *pGpu); + +static inline void fabricvaspaceUnpinRootPageDir_DISPATCH(struct FABRIC_VASPACE *pFabricVAS, struct OBJGPU *pGpu) { + pFabricVAS->__fabricvaspaceUnpinRootPageDir__(pFabricVAS, pGpu); +} + +void fabricvaspaceInvalidateTlb_IMPL(struct FABRIC_VASPACE *pFabricVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type); + +static inline void fabricvaspaceInvalidateTlb_DISPATCH(struct FABRIC_VASPACE *pFabricVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type) { + pFabricVAS->__fabricvaspaceInvalidateTlb__(pFabricVAS, pGpu, type); +} + +static inline NvBool fabricvaspaceIsMirrored_DISPATCH(struct FABRIC_VASPACE *pVAS) { + return pVAS->__fabricvaspaceIsMirrored__(pVAS); +} + +static inline NvBool fabricvaspaceIsExternallyOwned_DISPATCH(struct FABRIC_VASPACE *pVAS) { + return pVAS->__fabricvaspaceIsExternallyOwned__(pVAS); +} + +static inline NvBool fabricvaspaceIsInternalVaRestricted_DISPATCH(struct FABRIC_VASPACE *pVAS) { + return pVAS->__fabricvaspaceIsInternalVaRestricted__(pVAS); +} + +static inline NvU32 fabricvaspaceGetFlags_DISPATCH(struct FABRIC_VASPACE *pVAS) { + return pVAS->__fabricvaspaceGetFlags__(pVAS); +} + +static inline NvBool fabricvaspaceIsAtsEnabled_DISPATCH(struct FABRIC_VASPACE *pVAS) { + return pVAS->__fabricvaspaceIsAtsEnabled__(pVAS); +} + +static inline NvU32 fabricvaspaceGetBigPageSize_DISPATCH(struct FABRIC_VASPACE *pVAS) { + return pVAS->__fabricvaspaceGetBigPageSize__(pVAS); +} + +static inline NV_STATUS fabricvaspaceGetPteInfo_DISPATCH(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) { + return pVAS->__fabricvaspaceGetPteInfo__(pVAS, pGpu, pParams, pPhysAddr); +} + +static inline NvU64 fabricvaspaceGetVaLimit_DISPATCH(struct FABRIC_VASPACE *pVAS) { + return pVAS->__fabricvaspaceGetVaLimit__(pVAS); +} + +static inline PMEMORY_DESCRIPTOR fabricvaspaceGetPageDirBase_DISPATCH(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__fabricvaspaceGetPageDirBase__(pVAS, pGpu); +} + +static inline PMEMORY_DESCRIPTOR fabricvaspaceGetKernelPageDirBase_DISPATCH(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__fabricvaspaceGetKernelPageDirBase__(pVAS, pGpu); +} + +static inline NvU32 fabricvaspaceGetMapPageSize_DISPATCH(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock) { + return pVAS->__fabricvaspaceGetMapPageSize__(pVAS, pGpu, pMemBlock); +} + +static inline struct OBJEHEAP *fabricvaspaceGetHeap_DISPATCH(struct FABRIC_VASPACE *pVAS) { + return pVAS->__fabricvaspaceGetHeap__(pVAS); +} + +static inline NvBool fabricvaspaceIsFaultCapable_DISPATCH(struct FABRIC_VASPACE *pVAS) { + return pVAS->__fabricvaspaceIsFaultCapable__(pVAS); +} + +static inline NvU64 fabricvaspaceGetVaStart_DISPATCH(struct FABRIC_VASPACE *pVAS) { + return pVAS->__fabricvaspaceGetVaStart__(pVAS); +} + +static inline NV_STATUS fabricvaspaceIncAllocRefCnt_DISPATCH(struct FABRIC_VASPACE *pVAS, NvU64 vAddr) { + return pVAS->__fabricvaspaceIncAllocRefCnt__(pVAS, vAddr); +} + +static inline NV_STATUS fabricvaspaceSetPteInfo_DISPATCH(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) { + return pVAS->__fabricvaspaceSetPteInfo__(pVAS, pGpu, pParams); +} + +static inline NV_STATUS fabricvaspaceGetPasid_DISPATCH(struct FABRIC_VASPACE *pVAS, NvU32 *pPasid) { + return pVAS->__fabricvaspaceGetPasid__(pVAS, pPasid); +} + +static inline NV_STATUS fabricvaspaceGetPageTableInfo_DISPATCH(struct FABRIC_VASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) { + return pVAS->__fabricvaspaceGetPageTableInfo__(pVAS, pParams); +} + +static inline NV_STATUS fabricvaspaceReserveMempool_DISPATCH(struct FABRIC_VASPACE *pVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) { + return pVAS->__fabricvaspaceReserveMempool__(pVAS, pGpu, hClient, size, pageSizeLockMask, flags); +} + +void fabricvaspaceDestruct_IMPL(struct FABRIC_VASPACE *pFabricVAS); +#define __nvoc_fabricvaspaceDestruct(pFabricVAS) fabricvaspaceDestruct_IMPL(pFabricVAS) +NV_STATUS fabricvaspaceAllocNonContiguous_IMPL(struct FABRIC_VASPACE *pFabricVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSize, VAS_ALLOC_FLAGS flags, NvU64 **ppAddr, NvU32 *pNumAddr); +#ifdef __nvoc_fabric_vaspace_h_disabled +static inline NV_STATUS fabricvaspaceAllocNonContiguous(struct FABRIC_VASPACE *pFabricVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSize, VAS_ALLOC_FLAGS flags, NvU64 **ppAddr, NvU32 *pNumAddr) { + NV_ASSERT_FAILED_PRECOMP("FABRIC_VASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_fabric_vaspace_h_disabled +#define fabricvaspaceAllocNonContiguous(pFabricVAS, size, align, rangeLo, rangeHi, pageSize, flags, ppAddr, pNumAddr) fabricvaspaceAllocNonContiguous_IMPL(pFabricVAS, size, align, rangeLo, rangeHi, pageSize, flags, ppAddr, pNumAddr) +#endif //__nvoc_fabric_vaspace_h_disabled + +void fabricvaspaceBatchFree_IMPL(struct FABRIC_VASPACE *pFabricVAS, NvU64 *pAddr, NvU32 numAddr, NvU32 stride); +#ifdef __nvoc_fabric_vaspace_h_disabled +static inline void fabricvaspaceBatchFree(struct FABRIC_VASPACE *pFabricVAS, NvU64 *pAddr, NvU32 numAddr, NvU32 stride) { + NV_ASSERT_FAILED_PRECOMP("FABRIC_VASPACE was disabled!"); +} +#else //__nvoc_fabric_vaspace_h_disabled +#define fabricvaspaceBatchFree(pFabricVAS, pAddr, numAddr, stride) fabricvaspaceBatchFree_IMPL(pFabricVAS, pAddr, numAddr, stride) +#endif //__nvoc_fabric_vaspace_h_disabled + +NV_STATUS fabricvaspaceGetFreeHeap_IMPL(struct FABRIC_VASPACE *pFabricVAS, NvU64 *pFreeSize); +#ifdef __nvoc_fabric_vaspace_h_disabled +static inline NV_STATUS fabricvaspaceGetFreeHeap(struct FABRIC_VASPACE *pFabricVAS, NvU64 *pFreeSize) { + NV_ASSERT_FAILED_PRECOMP("FABRIC_VASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_fabric_vaspace_h_disabled +#define fabricvaspaceGetFreeHeap(pFabricVAS, pFreeSize) fabricvaspaceGetFreeHeap_IMPL(pFabricVAS, pFreeSize) +#endif //__nvoc_fabric_vaspace_h_disabled + +NV_STATUS fabricvaspaceGetGpaMemdesc_IMPL(struct FABRIC_VASPACE *pFabricVAS, MEMORY_DESCRIPTOR *pFabricMemdesc, struct OBJGPU *pMappingGpu, MEMORY_DESCRIPTOR **ppAdjustedMemdesc); +#ifdef __nvoc_fabric_vaspace_h_disabled +static inline NV_STATUS fabricvaspaceGetGpaMemdesc(struct FABRIC_VASPACE *pFabricVAS, MEMORY_DESCRIPTOR *pFabricMemdesc, struct OBJGPU *pMappingGpu, MEMORY_DESCRIPTOR **ppAdjustedMemdesc) { + NV_ASSERT_FAILED_PRECOMP("FABRIC_VASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_fabric_vaspace_h_disabled +#define fabricvaspaceGetGpaMemdesc(pFabricVAS, pFabricMemdesc, pMappingGpu, ppAdjustedMemdesc) fabricvaspaceGetGpaMemdesc_IMPL(pFabricVAS, pFabricMemdesc, pMappingGpu, ppAdjustedMemdesc) +#endif //__nvoc_fabric_vaspace_h_disabled + +void fabricvaspacePutGpaMemdesc_IMPL(struct FABRIC_VASPACE *pFabricVAS, MEMORY_DESCRIPTOR *pMemDesc); +#ifdef __nvoc_fabric_vaspace_h_disabled +static inline void fabricvaspacePutGpaMemdesc(struct FABRIC_VASPACE *pFabricVAS, MEMORY_DESCRIPTOR *pMemDesc) { + NV_ASSERT_FAILED_PRECOMP("FABRIC_VASPACE was disabled!"); +} +#else //__nvoc_fabric_vaspace_h_disabled +#define fabricvaspacePutGpaMemdesc(pFabricVAS, pMemDesc) fabricvaspacePutGpaMemdesc_IMPL(pFabricVAS, pMemDesc) +#endif //__nvoc_fabric_vaspace_h_disabled + +NV_STATUS fabricvaspaceVaToGpaMapInsert_IMPL(struct FABRIC_VASPACE *pFabricVAS, NvU64 vAddr, MEMORY_DESCRIPTOR *pVidMemDesc, NvU64 offset); +#ifdef __nvoc_fabric_vaspace_h_disabled +static inline NV_STATUS fabricvaspaceVaToGpaMapInsert(struct FABRIC_VASPACE *pFabricVAS, NvU64 vAddr, MEMORY_DESCRIPTOR *pVidMemDesc, NvU64 offset) { + NV_ASSERT_FAILED_PRECOMP("FABRIC_VASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_fabric_vaspace_h_disabled +#define fabricvaspaceVaToGpaMapInsert(pFabricVAS, vAddr, pVidMemDesc, offset) fabricvaspaceVaToGpaMapInsert_IMPL(pFabricVAS, vAddr, pVidMemDesc, offset) +#endif //__nvoc_fabric_vaspace_h_disabled + +void fabricvaspaceVaToGpaMapRemove_IMPL(struct FABRIC_VASPACE *pFabricVAS, NvU64 vAddr); +#ifdef __nvoc_fabric_vaspace_h_disabled +static inline void fabricvaspaceVaToGpaMapRemove(struct FABRIC_VASPACE *pFabricVAS, NvU64 vAddr) { + NV_ASSERT_FAILED_PRECOMP("FABRIC_VASPACE was disabled!"); +} +#else //__nvoc_fabric_vaspace_h_disabled +#define fabricvaspaceVaToGpaMapRemove(pFabricVAS, vAddr) fabricvaspaceVaToGpaMapRemove_IMPL(pFabricVAS, vAddr) +#endif //__nvoc_fabric_vaspace_h_disabled + +#undef PRIVATE_FIELD + + +#endif // FABRIC_VASPACE_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_FABRIC_VASPACE_NVOC_H_ diff --git a/src/nvidia/generated/g_fbsr_nvoc.c b/src/nvidia/generated/g_fbsr_nvoc.c new file mode 100644 index 000000000..d87d5d4b9 --- /dev/null +++ b/src/nvidia/generated/g_fbsr_nvoc.c @@ -0,0 +1,189 @@ +#define NVOC_FBSR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_fbsr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xa30fe6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJFBSR; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJFBSR(OBJFBSR*, RmHalspecOwner* ); +void __nvoc_init_funcTable_OBJFBSR(OBJFBSR*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJFBSR(OBJFBSR*, RmHalspecOwner* ); +void __nvoc_init_dataField_OBJFBSR(OBJFBSR*, RmHalspecOwner* ); +void __nvoc_dtor_OBJFBSR(OBJFBSR*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJFBSR; + +static const struct NVOC_RTTI __nvoc_rtti_OBJFBSR_OBJFBSR = { + /*pClassDef=*/ &__nvoc_class_def_OBJFBSR, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJFBSR, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJFBSR_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJFBSR, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJFBSR = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJFBSR_OBJFBSR, + &__nvoc_rtti_OBJFBSR_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJFBSR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJFBSR), + /*classId=*/ classId(OBJFBSR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJFBSR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJFBSR, + /*pCastInfo=*/ &__nvoc_castinfo_OBJFBSR, + /*pExportInfo=*/ &__nvoc_export_info_OBJFBSR +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJFBSR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJFBSR(OBJFBSR *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJFBSR(OBJFBSR *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJFBSR(OBJFBSR *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJFBSR_fail_Object; + __nvoc_init_dataField_OBJFBSR(pThis, pRmhalspecowner); + goto __nvoc_ctor_OBJFBSR_exit; // Success + +__nvoc_ctor_OBJFBSR_fail_Object: +__nvoc_ctor_OBJFBSR_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJFBSR_1(OBJFBSR *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + // Hal function -- fbsrBegin + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__fbsrBegin__ = &fbsrBegin_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__fbsrBegin__ = &fbsrBegin_GM107; + } + else if (0) + { + } + + // Hal function -- fbsrEnd + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__fbsrEnd__ = &fbsrEnd_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__fbsrEnd__ = &fbsrEnd_GM107; + } + else if (0) + { + } +} + +void __nvoc_init_funcTable_OBJFBSR(OBJFBSR *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_OBJFBSR_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJFBSR(OBJFBSR *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_OBJFBSR = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJFBSR(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_OBJFBSR(OBJFBSR **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJFBSR *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(OBJFBSR)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJFBSR)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJFBSR); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_OBJFBSR(pThis, pRmhalspecowner); + status = __nvoc_ctor_OBJFBSR(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_OBJFBSR_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJFBSR_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJFBSR(OBJFBSR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJFBSR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_fbsr_nvoc.h b/src/nvidia/generated/g_fbsr_nvoc.h new file mode 100644 index 000000000..551f53478 --- /dev/null +++ b/src/nvidia/generated/g_fbsr_nvoc.h @@ -0,0 +1,256 @@ +#ifndef _G_FBSR_NVOC_H_ +#define _G_FBSR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_fbsr_nvoc.h" + +#ifndef FBSR_H +#define FBSR_H + +#include "core/core.h" +#include "gpu/gpu.h" + + + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: OBJFBSR.H * +* Defines and structures used for the FB Save/Restore Engine Object. * +\***************************************************************************/ + +typedef enum +{ + FBSR_OP_SIZE_BUF = 0, + FBSR_OP_SAVE, + FBSR_OP_RESTORE, + FBSR_OP_DESTROY, +} FBSR_OP_TYPE; + +typedef struct _def_fbsr_node +{ + struct _def_fbsr_node *pNext; + NvU32 data[1]; +} FBSR_NODE, *PFBSR_NODE; + +typedef struct +{ + void* sectionHandle; + void* pMdl; // MDL + NvP64 sysAddr; + NvU64 maxLength; // MAX VA size allocated + NvU64 avblViewSz; // Chunk of mapped view that's unprocessed i.e., not restored or can be saved. +} BACKINGSTORE_SECTION_INFO; + +typedef struct OBJFBSR *POBJFBSR; + +#ifndef __NVOC_CLASS_OBJFBSR_TYPEDEF__ +#define __NVOC_CLASS_OBJFBSR_TYPEDEF__ +typedef struct OBJFBSR OBJFBSR; +#endif /* __NVOC_CLASS_OBJFBSR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFBSR +#define __nvoc_class_id_OBJFBSR 0xa30fe6 +#endif /* __nvoc_class_id_OBJFBSR */ + + + +#ifdef NVOC_FBSR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJFBSR { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJFBSR *__nvoc_pbase_OBJFBSR; + NV_STATUS (*__fbsrBegin__)(struct OBJGPU *, struct OBJFBSR *, FBSR_OP_TYPE); + NV_STATUS (*__fbsrEnd__)(struct OBJGPU *, struct OBJFBSR *); + NvU32 type; + struct OBJCE *pCe; + FBSR_OP_TYPE op; + MEMORY_DESCRIPTOR *pSysMemDesc; + PFBSR_NODE pSysMemNodeHead; + PFBSR_NODE pSysMemNodeCurrent; + BACKINGSTORE_SECTION_INFO pagedBufferInfo; + NvU32 *pPinnedBuffer; + NvU8 *pDmaBuffer; + void *pMapCookie; + NvU64 length; + NvU64 sysOffset; + NvBool bOperationFailed; + NvBool bValid; + NvBool bInitialized; + NvBool bRawModeWasEnabled; + MEMORY_DESCRIPTOR *pSysReservedMemDesc; +}; + +#ifndef __NVOC_CLASS_OBJFBSR_TYPEDEF__ +#define __NVOC_CLASS_OBJFBSR_TYPEDEF__ +typedef struct OBJFBSR OBJFBSR; +#endif /* __NVOC_CLASS_OBJFBSR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJFBSR +#define __nvoc_class_id_OBJFBSR 0xa30fe6 +#endif /* __nvoc_class_id_OBJFBSR */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJFBSR; + +#define __staticCast_OBJFBSR(pThis) \ + ((pThis)->__nvoc_pbase_OBJFBSR) + +#ifdef __nvoc_fbsr_h_disabled +#define __dynamicCast_OBJFBSR(pThis) ((OBJFBSR*)NULL) +#else //__nvoc_fbsr_h_disabled +#define __dynamicCast_OBJFBSR(pThis) \ + ((OBJFBSR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJFBSR))) +#endif //__nvoc_fbsr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJFBSR(OBJFBSR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJFBSR(OBJFBSR**, Dynamic*, NvU32); +#define __objCreate_OBJFBSR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJFBSR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define fbsrBegin(pGpu, pFbsr, op) fbsrBegin_DISPATCH(pGpu, pFbsr, op) +#define fbsrBegin_HAL(pGpu, pFbsr, op) fbsrBegin_DISPATCH(pGpu, pFbsr, op) +#define fbsrEnd(pGpu, pFbsr) fbsrEnd_DISPATCH(pGpu, pFbsr) +#define fbsrEnd_HAL(pGpu, pFbsr) fbsrEnd_DISPATCH(pGpu, pFbsr) +NV_STATUS fbsrInit_GM107(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr); + +#ifdef __nvoc_fbsr_h_disabled +static inline NV_STATUS fbsrInit(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr) { + NV_ASSERT_FAILED_PRECOMP("OBJFBSR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_fbsr_h_disabled +#define fbsrInit(pGpu, pFbsr) fbsrInit_GM107(pGpu, pFbsr) +#endif //__nvoc_fbsr_h_disabled + +#define fbsrInit_HAL(pGpu, pFbsr) fbsrInit(pGpu, pFbsr) + +void fbsrDestroy_GM107(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr); + +#ifdef __nvoc_fbsr_h_disabled +static inline void fbsrDestroy(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr) { + NV_ASSERT_FAILED_PRECOMP("OBJFBSR was disabled!"); +} +#else //__nvoc_fbsr_h_disabled +#define fbsrDestroy(pGpu, pFbsr) fbsrDestroy_GM107(pGpu, pFbsr) +#endif //__nvoc_fbsr_h_disabled + +#define fbsrDestroy_HAL(pGpu, pFbsr) fbsrDestroy(pGpu, pFbsr) + +void fbsrCopyMemoryMemDesc_GM107(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr, MEMORY_DESCRIPTOR *pVidMemDesc); + +#ifdef __nvoc_fbsr_h_disabled +static inline void fbsrCopyMemoryMemDesc(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr, MEMORY_DESCRIPTOR *pVidMemDesc) { + NV_ASSERT_FAILED_PRECOMP("OBJFBSR was disabled!"); +} +#else //__nvoc_fbsr_h_disabled +#define fbsrCopyMemoryMemDesc(pGpu, pFbsr, pVidMemDesc) fbsrCopyMemoryMemDesc_GM107(pGpu, pFbsr, pVidMemDesc) +#endif //__nvoc_fbsr_h_disabled + +#define fbsrCopyMemoryMemDesc_HAL(pGpu, pFbsr, pVidMemDesc) fbsrCopyMemoryMemDesc(pGpu, pFbsr, pVidMemDesc) + +NV_STATUS fbsrBegin_GA100(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr, FBSR_OP_TYPE op); + +NV_STATUS fbsrBegin_GM107(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr, FBSR_OP_TYPE op); + +static inline NV_STATUS fbsrBegin_46f6a7(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr, FBSR_OP_TYPE op) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS fbsrBegin_DISPATCH(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr, FBSR_OP_TYPE op) { + return pFbsr->__fbsrBegin__(pGpu, pFbsr, op); +} + +NV_STATUS fbsrEnd_GA100(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr); + +NV_STATUS fbsrEnd_GM107(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr); + +static inline NV_STATUS fbsrEnd_5baef9(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS fbsrEnd_DISPATCH(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr) { + return pFbsr->__fbsrEnd__(pGpu, pFbsr); +} + +NV_STATUS fbsrObjectInit_IMPL(struct OBJFBSR *pFbsr, NvU32 arg0); +#ifdef __nvoc_fbsr_h_disabled +static inline NV_STATUS fbsrObjectInit(struct OBJFBSR *pFbsr, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJFBSR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_fbsr_h_disabled +#define fbsrObjectInit(pFbsr, arg0) fbsrObjectInit_IMPL(pFbsr, arg0) +#endif //__nvoc_fbsr_h_disabled + +NV_STATUS fbsrReserveSysMemoryForPowerMgmt_IMPL(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr, NvU64 arg0); +#ifdef __nvoc_fbsr_h_disabled +static inline NV_STATUS fbsrReserveSysMemoryForPowerMgmt(struct OBJGPU *pGpu, struct OBJFBSR *pFbsr, NvU64 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJFBSR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_fbsr_h_disabled +#define fbsrReserveSysMemoryForPowerMgmt(pGpu, pFbsr, arg0) fbsrReserveSysMemoryForPowerMgmt_IMPL(pGpu, pFbsr, arg0) +#endif //__nvoc_fbsr_h_disabled + +void fbsrFreeReservedSysMemoryForPowerMgmt_IMPL(struct OBJFBSR *pFbsr); +#ifdef __nvoc_fbsr_h_disabled +static inline void fbsrFreeReservedSysMemoryForPowerMgmt(struct OBJFBSR *pFbsr) { + NV_ASSERT_FAILED_PRECOMP("OBJFBSR was disabled!"); +} +#else //__nvoc_fbsr_h_disabled +#define fbsrFreeReservedSysMemoryForPowerMgmt(pFbsr) fbsrFreeReservedSysMemoryForPowerMgmt_IMPL(pFbsr) +#endif //__nvoc_fbsr_h_disabled + +#undef PRIVATE_FIELD + + +// Method used for copying +#define FBSR_TYPE_WDDM_FAST_DMA_DEFERRED_NONPAGED 0 // Pre-reserve paged region during boot. Map and pin the region and DMA copy to it from FB and unpin it. +#define FBSR_TYPE_WDDM_SLOW_CPU_PAGED 1 // Pre-reserve paged region during boot. Map in chunks of 64K and DMA copy to a scratch space of 64K and cpu copy to the paged region +#define FBSR_TYPE_PAGED_DMA 2 // Copy using DMA approach, allocated from paged pool +#define FBSR_TYPE_PERSISTENT 3 // Copy using DMA approach, memory will be persistent after allocation to avoid from system VM fragmentation. +#define FBSR_TYPE_DMA 4 // Copy using DMA. Fastest. +#define FBSR_TYPE_CPU 5 // CPU. Used when we don't have enough resources for DMA. +#define FBSR_TYPE_FILE 6 // DMA from FB to scratch sysmem buffer of 64K size , which in turn copies to temporary file backed system memory + +#define NUM_FBSR_TYPES (FBSR_TYPE_FILE + 1) + +#endif // FBSR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_FBSR_NVOC_H_ diff --git a/src/nvidia/generated/g_fecs_event_list_nvoc.h b/src/nvidia/generated/g_fecs_event_list_nvoc.h new file mode 100644 index 000000000..05c45d2a1 --- /dev/null +++ b/src/nvidia/generated/g_fecs_event_list_nvoc.h @@ -0,0 +1,252 @@ +#ifndef _G_FECS_EVENT_LIST_NVOC_H_ +#define _G_FECS_EVENT_LIST_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_fecs_event_list_nvoc.h" + +#ifndef _FECS_EVENT_LIST_H_ +#define _FECS_EVENT_LIST_H_ + +/*! + * @file fecs_event_list.h + * @brief Provides definition for FECS callback on EventBuffer, as well as a list holding the subscribers to the event + */ + +#include "core/core.h" +#include "class/cl90cd.h" +#include "class/cl90cdfecs.h" +#include "containers/multimap.h" +#include "resserv/resserv.h" + +#include "ctrl/ctrl2080/ctrl2080gr.h" // NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD + +struct KernelGraphics; + +#ifndef __NVOC_CLASS_KernelGraphics_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphics_TYPEDEF__ +typedef struct KernelGraphics KernelGraphics; +#endif /* __NVOC_CLASS_KernelGraphics_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphics +#define __nvoc_class_id_KernelGraphics 0xea3fa9 +#endif /* __nvoc_class_id_KernelGraphics */ + + +struct EventBuffer; + +#ifndef __NVOC_CLASS_EventBuffer_TYPEDEF__ +#define __NVOC_CLASS_EventBuffer_TYPEDEF__ +typedef struct EventBuffer EventBuffer; +#endif /* __NVOC_CLASS_EventBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_EventBuffer +#define __nvoc_class_id_EventBuffer 0x63502b +#endif /* __nvoc_class_id_EventBuffer */ + + + +// TODO move to cl90cdfecs.h +#define NV_EVENT_BUFFER_INVALID_MIG_GI 0xFF +#define NV_EVENT_BUFFER_INVALID_MIG_CI 0xFF +#define NV_EVENT_BUFFER_HIDDEN_MIG_GI 0xFE +#define NV_EVENT_BUFFER_HIDDEN_MIG_CI 0xFE +#define NV_EVENT_BUFFER_KERNEL_MIG_GI 0xFD +#define NV_EVENT_BUFFER_KERNEL_MIG_CI 0xFD + +typedef struct +{ + struct EventBuffer *pEventBuffer; + NvHandle hClient; + NvHandle hNotifier; + NvHandle hEventBuffer; + + NvU64 pUserInfo; + + NvBool bAdmin; + NvBool bKernel; + NvU32 eventMask; + + NvU8 version; + NvU32 swizzId; + NvU32 computeInstanceId; + +} NV_EVENT_BUFFER_BIND_POINT_FECS; +MAKE_MULTIMAP(FecsEventBufferBindMultiMap, NV_EVENT_BUFFER_BIND_POINT_FECS); +MAKE_MAP(EventBufferMap, EventBuffer*); + +typedef struct +{ + NvU8 tag; + NvU64 userInfo; + NvU32 context_id; + NvU32 pid; + NvU32 subpid; + NvU8 swizzId; + NvU8 computeInstanceId; + NvU16 dropCount; + NvU64 timestamp; + NvU64 noisyTimestamp; +} FECS_EVENT_NOTIFICATION_DATA; + +typedef struct +{ + NV_EVENT_BUFFER_RECORD_HEADER header; + NV_EVENT_BUFFER_FECS_RECORD_V2 record; +} FECS_EVENT_RECORD_OUTPUT; +ct_assert(NV_OFFSETOF(FECS_EVENT_RECORD_OUTPUT, record) == sizeof(NV_EVENT_BUFFER_RECORD_HEADER)); +ct_assert(sizeof(FECS_EVENT_RECORD_OUTPUT) == sizeof(NV_EVENT_BUFFER_RECORD_HEADER) + sizeof(NV_EVENT_BUFFER_FECS_RECORD_V2)); + + +NV_STATUS fecsAddBindpoint +( + OBJGPU *pGpu, + struct RsClient *pClient, + RsResourceRef *pEventBufferRef, + NvHandle hNotifier, + NvBool bAllUsers, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD levelOfDetail, + NvU32 eventFilter, + NvU8 version, + NvU32 *pReasonCode +); + +// Set cached internal routing handles and GR index for MIG +void fecsSetRoutingInfo +( + OBJGPU *pGpu, + struct KernelGraphics *pKernelGraphics, + NvHandle hClient, + NvHandle hSubdevice, + NvU32 localGrEngineIdx +); + +// Clear cached internal routing handles and GR index for MIG +void fecsClearRoutingInfo(OBJGPU *, struct KernelGraphics *); + +/*! Opaque FECS event buffer private data */ +typedef struct KGRAPHICS_FECS_TRACE_INFO KGRAPHICS_FECS_TRACE_INFO; + +NV_STATUS fecsCtxswLoggingInit +( + OBJGPU *pGpu, + struct KernelGraphics *pKernelGraphics, + KGRAPHICS_FECS_TRACE_INFO **ppFecsTraceInfo +); + +void fecsCtxswLoggingTeardown(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/*! set num records to process per intr */ +void fecsSetRecordsPerIntr +( + OBJGPU *pGpu, + struct KernelGraphics *pKernelGraphics, + NvU32 recordsPerIntr +); + +/** + * Returns true if a GR's FECS trace buffer has a record ready for processing + */ +NvBool fecsBufferChanged(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/** + * Scrub the FECS tracing buffer and enable tracing (if a buffer has been mapped). + * + * The ctx logging state in GR will be set to enabled if the operation succeeded. + */ +void fecsBufferReset(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/*! Is the FECS trace buffer mapped? */ +NvBool fecsBufferIsMapped(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/** + * Map the FECS trace buffer + */ +NV_STATUS fecsBufferMap(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/** + * Unmap the FECS trace buffer + */ +void fecsBufferUnmap(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/** + * Disable FECS trace logging, unmap the trace log buffer, and remove any + * registered FECS trace logging callbacks. + * + * The ctx logging state in GR will be set to disabled after this operation. + */ +void fecsBufferTeardown(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/** + * Disable FECS logging at hardware level + */ +void fecsBufferDisableHw(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +void fecsRemoveAllBindpoints(struct EventBuffer *pEventBuffer); +void fecsRemoveBindpoint(OBJGPU *pGpu, NvU64 uid, NV_EVENT_BUFFER_BIND_POINT_FECS* pBind); + +/* The callback function that transfers FECS Buffer entries to an EventBuffer */ +void nvEventBufferFecsCallback(OBJGPU *pGpu, void *pArgs); + +void notifyEventBuffers +( + OBJGPU *pGpu, + FecsEventBufferBindMultiMapSubmap *pSubmap, + FECS_EVENT_NOTIFICATION_DATA const *pRecord +); + +/*! Atomically set intr callback pending, return NV_TRUE if wasn't pending prior */ +NvBool fecsSignalIntrPendingIfNotPending(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/*! Atomically clear intr callback pending, return NV_TRUE if was pending */ +NvBool fecsClearIntrPendingIfPending(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/*! Atomically check is intr callback pending */ +NvBool fecsIsIntrPending(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/*! Opaque VGPU fecs event buffer private data */ +typedef struct VGPU_FECS_TRACE_STAGING_BUFFER VGPU_FECS_TRACE_STAGING_BUFFER; + +/*! Retrieve the current VGPU staging buffer */ +VGPU_FECS_TRACE_STAGING_BUFFER *fecsGetVgpuStagingBuffer(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics); + +/*! Store the given VGPU staging buffer */ +void fecsSetVgpuStagingBuffer +( + OBJGPU *pGpu, + struct KernelGraphics *pKernelGraphics, + VGPU_FECS_TRACE_STAGING_BUFFER *pStagingBuffer +); + +#endif // _FECS_EVENT_LIST_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_FECS_EVENT_LIST_NVOC_H_ diff --git a/src/nvidia/generated/g_fla_mem_nvoc.c b/src/nvidia/generated/g_fla_mem_nvoc.c new file mode 100644 index 000000000..2e598d130 --- /dev/null +++ b/src/nvidia/generated/g_fla_mem_nvoc.c @@ -0,0 +1,326 @@ +#define NVOC_FLA_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_fla_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xe61ee1 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_FlaMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_FlaMemory(FlaMemory*); +void __nvoc_init_funcTable_FlaMemory(FlaMemory*); +NV_STATUS __nvoc_ctor_FlaMemory(FlaMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_FlaMemory(FlaMemory*); +void __nvoc_dtor_FlaMemory(FlaMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_FlaMemory; + +static const struct NVOC_RTTI __nvoc_rtti_FlaMemory_FlaMemory = { + /*pClassDef=*/ &__nvoc_class_def_FlaMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_FlaMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_FlaMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FlaMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_FlaMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FlaMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_FlaMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FlaMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_FlaMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FlaMemory, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_FlaMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FlaMemory, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_FlaMemory = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_FlaMemory_FlaMemory, + &__nvoc_rtti_FlaMemory_Memory, + &__nvoc_rtti_FlaMemory_RmResource, + &__nvoc_rtti_FlaMemory_RmResourceCommon, + &__nvoc_rtti_FlaMemory_RsResource, + &__nvoc_rtti_FlaMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_FlaMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(FlaMemory), + /*classId=*/ classId(FlaMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "FlaMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_FlaMemory, + /*pCastInfo=*/ &__nvoc_castinfo_FlaMemory, + /*pExportInfo=*/ &__nvoc_export_info_FlaMemory +}; + +static NvBool __nvoc_thunk_FlaMemory_resCanCopy(struct RsResource *pFlaMemory) { + return flamemCanCopy((struct FlaMemory *)(((unsigned char *)pFlaMemory) - __nvoc_rtti_FlaMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_flamemCheckMemInterUnmap(struct FlaMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_FlaMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_flamemControl(struct FlaMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_FlaMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_flamemUnmap(struct FlaMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_FlaMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_flamemGetMemInterMapParams(struct FlaMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_FlaMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_flamemGetMemoryMappingDescriptor(struct FlaMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_FlaMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_flamemGetMapAddrSpace(struct FlaMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_FlaMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_flamemShareCallback(struct FlaMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_flamemControlFilter(struct FlaMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_flamemAddAdditionalDependants(struct RsClient *pClient, struct FlaMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_flamemGetRefCount(struct FlaMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_flamemMapTo(struct FlaMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_flamemControl_Prologue(struct FlaMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_flamemIsReady(struct FlaMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_FlaMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_flamemCheckCopyPermissions(struct FlaMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_FlaMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_flamemPreDestruct(struct FlaMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_flamemUnmapFrom(struct FlaMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_flamemControl_Epilogue(struct FlaMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_flamemControlLookup(struct FlaMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_flamemMap(struct FlaMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_FlaMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_flamemAccessCallback(struct FlaMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_FlaMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_FlaMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_FlaMemory(FlaMemory *pThis) { + __nvoc_flamemDestruct(pThis); + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_FlaMemory(FlaMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_FlaMemory(FlaMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_FlaMemory_fail_Memory; + __nvoc_init_dataField_FlaMemory(pThis); + + status = __nvoc_flamemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_FlaMemory_fail__init; + goto __nvoc_ctor_FlaMemory_exit; // Success + +__nvoc_ctor_FlaMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_FlaMemory_fail_Memory: +__nvoc_ctor_FlaMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_FlaMemory_1(FlaMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__flamemCanCopy__ = &flamemCanCopy_IMPL; + + pThis->__flamemCopyConstruct__ = &flamemCopyConstruct_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_FlaMemory_resCanCopy; + + pThis->__flamemCheckMemInterUnmap__ = &__nvoc_thunk_Memory_flamemCheckMemInterUnmap; + + pThis->__flamemControl__ = &__nvoc_thunk_Memory_flamemControl; + + pThis->__flamemUnmap__ = &__nvoc_thunk_Memory_flamemUnmap; + + pThis->__flamemGetMemInterMapParams__ = &__nvoc_thunk_Memory_flamemGetMemInterMapParams; + + pThis->__flamemGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_flamemGetMemoryMappingDescriptor; + + pThis->__flamemGetMapAddrSpace__ = &__nvoc_thunk_Memory_flamemGetMapAddrSpace; + + pThis->__flamemShareCallback__ = &__nvoc_thunk_RmResource_flamemShareCallback; + + pThis->__flamemControlFilter__ = &__nvoc_thunk_RsResource_flamemControlFilter; + + pThis->__flamemAddAdditionalDependants__ = &__nvoc_thunk_RsResource_flamemAddAdditionalDependants; + + pThis->__flamemGetRefCount__ = &__nvoc_thunk_RsResource_flamemGetRefCount; + + pThis->__flamemMapTo__ = &__nvoc_thunk_RsResource_flamemMapTo; + + pThis->__flamemControl_Prologue__ = &__nvoc_thunk_RmResource_flamemControl_Prologue; + + pThis->__flamemIsReady__ = &__nvoc_thunk_Memory_flamemIsReady; + + pThis->__flamemCheckCopyPermissions__ = &__nvoc_thunk_Memory_flamemCheckCopyPermissions; + + pThis->__flamemPreDestruct__ = &__nvoc_thunk_RsResource_flamemPreDestruct; + + pThis->__flamemUnmapFrom__ = &__nvoc_thunk_RsResource_flamemUnmapFrom; + + pThis->__flamemControl_Epilogue__ = &__nvoc_thunk_RmResource_flamemControl_Epilogue; + + pThis->__flamemControlLookup__ = &__nvoc_thunk_RsResource_flamemControlLookup; + + pThis->__flamemMap__ = &__nvoc_thunk_Memory_flamemMap; + + pThis->__flamemAccessCallback__ = &__nvoc_thunk_RmResource_flamemAccessCallback; +} + +void __nvoc_init_funcTable_FlaMemory(FlaMemory *pThis) { + __nvoc_init_funcTable_FlaMemory_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_FlaMemory(FlaMemory *pThis) { + pThis->__nvoc_pbase_FlaMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_FlaMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_FlaMemory(FlaMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + FlaMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(FlaMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(FlaMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_FlaMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_FlaMemory(pThis); + status = __nvoc_ctor_FlaMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_FlaMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_FlaMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_FlaMemory(FlaMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_FlaMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_fla_mem_nvoc.h b/src/nvidia/generated/g_fla_mem_nvoc.h new file mode 100644 index 000000000..00fac5930 --- /dev/null +++ b/src/nvidia/generated/g_fla_mem_nvoc.h @@ -0,0 +1,237 @@ +#ifndef _G_FLA_MEM_NVOC_H_ +#define _G_FLA_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_fla_mem_nvoc.h" + +#ifndef _FLA_MEMORY_H_ +#define _FLA_MEMORY_H_ + +#include "mem_mgr/mem.h" + +/*! + * This class is used by the FLA clients to allocate FLA memory handle + */ +#ifdef NVOC_FLA_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct FlaMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct FlaMemory *__nvoc_pbase_FlaMemory; + NvBool (*__flamemCanCopy__)(struct FlaMemory *); + NV_STATUS (*__flamemCopyConstruct__)(struct FlaMemory *, CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + NV_STATUS (*__flamemCheckMemInterUnmap__)(struct FlaMemory *, NvBool); + NV_STATUS (*__flamemControl__)(struct FlaMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__flamemUnmap__)(struct FlaMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__flamemGetMemInterMapParams__)(struct FlaMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__flamemGetMemoryMappingDescriptor__)(struct FlaMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__flamemGetMapAddrSpace__)(struct FlaMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__flamemShareCallback__)(struct FlaMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__flamemControlFilter__)(struct FlaMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__flamemAddAdditionalDependants__)(struct RsClient *, struct FlaMemory *, RsResourceRef *); + NvU32 (*__flamemGetRefCount__)(struct FlaMemory *); + NV_STATUS (*__flamemMapTo__)(struct FlaMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__flamemControl_Prologue__)(struct FlaMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__flamemIsReady__)(struct FlaMemory *); + NV_STATUS (*__flamemCheckCopyPermissions__)(struct FlaMemory *, struct OBJGPU *, NvHandle); + void (*__flamemPreDestruct__)(struct FlaMemory *); + NV_STATUS (*__flamemUnmapFrom__)(struct FlaMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__flamemControl_Epilogue__)(struct FlaMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__flamemControlLookup__)(struct FlaMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__flamemMap__)(struct FlaMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__flamemAccessCallback__)(struct FlaMemory *, struct RsClient *, void *, RsAccessRight); + NvU32 peerDeviceInst; + NvU32 peerGpuInst; + NvHandle hDupedExportMemory; +}; + +#ifndef __NVOC_CLASS_FlaMemory_TYPEDEF__ +#define __NVOC_CLASS_FlaMemory_TYPEDEF__ +typedef struct FlaMemory FlaMemory; +#endif /* __NVOC_CLASS_FlaMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FlaMemory +#define __nvoc_class_id_FlaMemory 0xe61ee1 +#endif /* __nvoc_class_id_FlaMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_FlaMemory; + +#define __staticCast_FlaMemory(pThis) \ + ((pThis)->__nvoc_pbase_FlaMemory) + +#ifdef __nvoc_fla_mem_h_disabled +#define __dynamicCast_FlaMemory(pThis) ((FlaMemory*)NULL) +#else //__nvoc_fla_mem_h_disabled +#define __dynamicCast_FlaMemory(pThis) \ + ((FlaMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(FlaMemory))) +#endif //__nvoc_fla_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_FlaMemory(FlaMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_FlaMemory(FlaMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_FlaMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_FlaMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define flamemCanCopy(pFlaMemory) flamemCanCopy_DISPATCH(pFlaMemory) +#define flamemCopyConstruct(pFlaMemory, pCallContext, pParams) flamemCopyConstruct_DISPATCH(pFlaMemory, pCallContext, pParams) +#define flamemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) flamemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define flamemControl(pMemory, pCallContext, pParams) flamemControl_DISPATCH(pMemory, pCallContext, pParams) +#define flamemUnmap(pMemory, pCallContext, pCpuMapping) flamemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define flamemGetMemInterMapParams(pMemory, pParams) flamemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define flamemGetMemoryMappingDescriptor(pMemory, ppMemDesc) flamemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define flamemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) flamemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define flamemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) flamemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define flamemControlFilter(pResource, pCallContext, pParams) flamemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define flamemAddAdditionalDependants(pClient, pResource, pReference) flamemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define flamemGetRefCount(pResource) flamemGetRefCount_DISPATCH(pResource) +#define flamemMapTo(pResource, pParams) flamemMapTo_DISPATCH(pResource, pParams) +#define flamemControl_Prologue(pResource, pCallContext, pParams) flamemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define flamemIsReady(pMemory) flamemIsReady_DISPATCH(pMemory) +#define flamemCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) flamemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define flamemPreDestruct(pResource) flamemPreDestruct_DISPATCH(pResource) +#define flamemUnmapFrom(pResource, pParams) flamemUnmapFrom_DISPATCH(pResource, pParams) +#define flamemControl_Epilogue(pResource, pCallContext, pParams) flamemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define flamemControlLookup(pResource, pParams, ppEntry) flamemControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define flamemMap(pMemory, pCallContext, pParams, pCpuMapping) flamemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define flamemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) flamemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool flamemCanCopy_IMPL(struct FlaMemory *pFlaMemory); + +static inline NvBool flamemCanCopy_DISPATCH(struct FlaMemory *pFlaMemory) { + return pFlaMemory->__flamemCanCopy__(pFlaMemory); +} + +NV_STATUS flamemCopyConstruct_IMPL(struct FlaMemory *pFlaMemory, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS flamemCopyConstruct_DISPATCH(struct FlaMemory *pFlaMemory, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + return pFlaMemory->__flamemCopyConstruct__(pFlaMemory, pCallContext, pParams); +} + +static inline NV_STATUS flamemCheckMemInterUnmap_DISPATCH(struct FlaMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__flamemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS flamemControl_DISPATCH(struct FlaMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__flamemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS flamemUnmap_DISPATCH(struct FlaMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__flamemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS flamemGetMemInterMapParams_DISPATCH(struct FlaMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__flamemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS flamemGetMemoryMappingDescriptor_DISPATCH(struct FlaMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__flamemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS flamemGetMapAddrSpace_DISPATCH(struct FlaMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__flamemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool flamemShareCallback_DISPATCH(struct FlaMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__flamemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS flamemControlFilter_DISPATCH(struct FlaMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__flamemControlFilter__(pResource, pCallContext, pParams); +} + +static inline void flamemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct FlaMemory *pResource, RsResourceRef *pReference) { + pResource->__flamemAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 flamemGetRefCount_DISPATCH(struct FlaMemory *pResource) { + return pResource->__flamemGetRefCount__(pResource); +} + +static inline NV_STATUS flamemMapTo_DISPATCH(struct FlaMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__flamemMapTo__(pResource, pParams); +} + +static inline NV_STATUS flamemControl_Prologue_DISPATCH(struct FlaMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__flamemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS flamemIsReady_DISPATCH(struct FlaMemory *pMemory) { + return pMemory->__flamemIsReady__(pMemory); +} + +static inline NV_STATUS flamemCheckCopyPermissions_DISPATCH(struct FlaMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__flamemCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void flamemPreDestruct_DISPATCH(struct FlaMemory *pResource) { + pResource->__flamemPreDestruct__(pResource); +} + +static inline NV_STATUS flamemUnmapFrom_DISPATCH(struct FlaMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__flamemUnmapFrom__(pResource, pParams); +} + +static inline void flamemControl_Epilogue_DISPATCH(struct FlaMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__flamemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS flamemControlLookup_DISPATCH(struct FlaMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__flamemControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS flamemMap_DISPATCH(struct FlaMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__flamemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool flamemAccessCallback_DISPATCH(struct FlaMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__flamemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS flamemConstruct_IMPL(struct FlaMemory *arg_pFlaMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_flamemConstruct(arg_pFlaMemory, arg_pCallContext, arg_pParams) flamemConstruct_IMPL(arg_pFlaMemory, arg_pCallContext, arg_pParams) +void flamemDestruct_IMPL(struct FlaMemory *pFlaMemory); +#define __nvoc_flamemDestruct(pFlaMemory) flamemDestruct_IMPL(pFlaMemory) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_FLA_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_fm_session_api_nvoc.c b/src/nvidia/generated/g_fm_session_api_nvoc.c new file mode 100644 index 000000000..46a91e290 --- /dev/null +++ b/src/nvidia/generated/g_fm_session_api_nvoc.c @@ -0,0 +1,400 @@ +#define NVOC_FM_SESSION_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_fm_session_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xdfbd08 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_FmSessionApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_FmSessionApi(FmSessionApi*); +void __nvoc_init_funcTable_FmSessionApi(FmSessionApi*); +NV_STATUS __nvoc_ctor_FmSessionApi(FmSessionApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_FmSessionApi(FmSessionApi*); +void __nvoc_dtor_FmSessionApi(FmSessionApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_FmSessionApi; + +static const struct NVOC_RTTI __nvoc_rtti_FmSessionApi_FmSessionApi = { + /*pClassDef=*/ &__nvoc_class_def_FmSessionApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_FmSessionApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_FmSessionApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FmSessionApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_FmSessionApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FmSessionApi, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_FmSessionApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FmSessionApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_FmSessionApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FmSessionApi, __nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_FmSessionApi_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FmSessionApi, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_FmSessionApi_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(FmSessionApi, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_FmSessionApi = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_FmSessionApi_FmSessionApi, + &__nvoc_rtti_FmSessionApi_Notifier, + &__nvoc_rtti_FmSessionApi_INotifier, + &__nvoc_rtti_FmSessionApi_RmResource, + &__nvoc_rtti_FmSessionApi_RmResourceCommon, + &__nvoc_rtti_FmSessionApi_RsResource, + &__nvoc_rtti_FmSessionApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_FmSessionApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(FmSessionApi), + /*classId=*/ classId(FmSessionApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "FmSessionApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_FmSessionApi, + /*pCastInfo=*/ &__nvoc_castinfo_FmSessionApi, + /*pExportInfo=*/ &__nvoc_export_info_FmSessionApi +}; + +static NvBool __nvoc_thunk_RmResource_fmsessionapiShareCallback(struct FmSessionApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_fmsessionapiCheckMemInterUnmap(struct FmSessionApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_FmSessionApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_RmResource_fmsessionapiAccessCallback(struct FmSessionApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RmResource_fmsessionapiGetMemInterMapParams(struct FmSessionApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_FmSessionApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_fmsessionapiGetMemoryMappingDescriptor(struct FmSessionApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_FmSessionApi_RmResource.offset), ppMemDesc); +} + +static void __nvoc_thunk_Notifier_fmsessionapiSetNotificationShare(struct FmSessionApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_FmSessionApi_Notifier.offset), pNotifShare); +} + +static NV_STATUS __nvoc_thunk_RsResource_fmsessionapiControl(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_fmsessionapiControlFilter(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_fmsessionapiGetRefCount(struct FmSessionApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Notifier_fmsessionapiUnregisterEvent(struct FmSessionApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_FmSessionApi_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NV_STATUS __nvoc_thunk_RsResource_fmsessionapiUnmap(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset), pCallContext, pCpuMapping); +} + +static NvBool __nvoc_thunk_RsResource_fmsessionapiCanCopy(struct FmSessionApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_fmsessionapiControl_Prologue(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_fmsessionapiMapTo(struct FmSessionApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_fmsessionapiAddAdditionalDependants(struct RsClient *pClient, struct FmSessionApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset), pReference); +} + +static void __nvoc_thunk_RsResource_fmsessionapiPreDestruct(struct FmSessionApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_fmsessionapiUnmapFrom(struct FmSessionApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset), pParams); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_fmsessionapiGetNotificationListPtr(struct FmSessionApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_FmSessionApi_Notifier.offset)); +} + +static void __nvoc_thunk_RmResource_fmsessionapiControl_Epilogue(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RmResource.offset), pCallContext, pParams); +} + +static struct NotifShare *__nvoc_thunk_Notifier_fmsessionapiGetNotificationShare(struct FmSessionApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_FmSessionApi_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_fmsessionapiControlLookup(struct FmSessionApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_fmsessionapiMap(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_FmSessionApi_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_fmsessionapiGetOrAllocNotifShare(struct FmSessionApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_FmSessionApi_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_FmSessionApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) fmsessionapiCtrlCmdSetFmState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xf0101u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_FmSessionApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "fmsessionapiCtrlCmdSetFmState" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) fmsessionapiCtrlCmdClearFmState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xf0102u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_FmSessionApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "fmsessionapiCtrlCmdClearFmState" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_FmSessionApi = +{ + /*numEntries=*/ 2, + /*pExportEntries=*/ __nvoc_exported_method_def_FmSessionApi +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_FmSessionApi(FmSessionApi *pThis) { + __nvoc_fmsessionapiDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_FmSessionApi(FmSessionApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_FmSessionApi(FmSessionApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_FmSessionApi_fail_RmResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_FmSessionApi_fail_Notifier; + __nvoc_init_dataField_FmSessionApi(pThis); + + status = __nvoc_fmsessionapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_FmSessionApi_fail__init; + goto __nvoc_ctor_FmSessionApi_exit; // Success + +__nvoc_ctor_FmSessionApi_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_FmSessionApi_fail_Notifier: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_FmSessionApi_fail_RmResource: +__nvoc_ctor_FmSessionApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_FmSessionApi_1(FmSessionApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__fmsessionapiCtrlCmdSetFmState__ = &fmsessionapiCtrlCmdSetFmState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__fmsessionapiCtrlCmdClearFmState__ = &fmsessionapiCtrlCmdClearFmState_IMPL; +#endif + + pThis->__fmsessionapiShareCallback__ = &__nvoc_thunk_RmResource_fmsessionapiShareCallback; + + pThis->__fmsessionapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_fmsessionapiCheckMemInterUnmap; + + pThis->__fmsessionapiAccessCallback__ = &__nvoc_thunk_RmResource_fmsessionapiAccessCallback; + + pThis->__fmsessionapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_fmsessionapiGetMemInterMapParams; + + pThis->__fmsessionapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_fmsessionapiGetMemoryMappingDescriptor; + + pThis->__fmsessionapiSetNotificationShare__ = &__nvoc_thunk_Notifier_fmsessionapiSetNotificationShare; + + pThis->__fmsessionapiControl__ = &__nvoc_thunk_RsResource_fmsessionapiControl; + + pThis->__fmsessionapiControlFilter__ = &__nvoc_thunk_RsResource_fmsessionapiControlFilter; + + pThis->__fmsessionapiGetRefCount__ = &__nvoc_thunk_RsResource_fmsessionapiGetRefCount; + + pThis->__fmsessionapiUnregisterEvent__ = &__nvoc_thunk_Notifier_fmsessionapiUnregisterEvent; + + pThis->__fmsessionapiUnmap__ = &__nvoc_thunk_RsResource_fmsessionapiUnmap; + + pThis->__fmsessionapiCanCopy__ = &__nvoc_thunk_RsResource_fmsessionapiCanCopy; + + pThis->__fmsessionapiControl_Prologue__ = &__nvoc_thunk_RmResource_fmsessionapiControl_Prologue; + + pThis->__fmsessionapiMapTo__ = &__nvoc_thunk_RsResource_fmsessionapiMapTo; + + pThis->__fmsessionapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_fmsessionapiAddAdditionalDependants; + + pThis->__fmsessionapiPreDestruct__ = &__nvoc_thunk_RsResource_fmsessionapiPreDestruct; + + pThis->__fmsessionapiUnmapFrom__ = &__nvoc_thunk_RsResource_fmsessionapiUnmapFrom; + + pThis->__fmsessionapiGetNotificationListPtr__ = &__nvoc_thunk_Notifier_fmsessionapiGetNotificationListPtr; + + pThis->__fmsessionapiControl_Epilogue__ = &__nvoc_thunk_RmResource_fmsessionapiControl_Epilogue; + + pThis->__fmsessionapiGetNotificationShare__ = &__nvoc_thunk_Notifier_fmsessionapiGetNotificationShare; + + pThis->__fmsessionapiControlLookup__ = &__nvoc_thunk_RsResource_fmsessionapiControlLookup; + + pThis->__fmsessionapiMap__ = &__nvoc_thunk_RsResource_fmsessionapiMap; + + pThis->__fmsessionapiGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_fmsessionapiGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_FmSessionApi(FmSessionApi *pThis) { + __nvoc_init_funcTable_FmSessionApi_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_FmSessionApi(FmSessionApi *pThis) { + pThis->__nvoc_pbase_FmSessionApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_FmSessionApi(pThis); +} + +NV_STATUS __nvoc_objCreate_FmSessionApi(FmSessionApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + FmSessionApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(FmSessionApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(FmSessionApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_FmSessionApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_FmSessionApi(pThis); + status = __nvoc_ctor_FmSessionApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_FmSessionApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_FmSessionApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_FmSessionApi(FmSessionApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_FmSessionApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_fm_session_api_nvoc.h b/src/nvidia/generated/g_fm_session_api_nvoc.h new file mode 100644 index 000000000..10f6ad99f --- /dev/null +++ b/src/nvidia/generated/g_fm_session_api_nvoc.h @@ -0,0 +1,280 @@ +#ifndef _G_FM_SESSION_API_NVOC_H_ +#define _G_FM_SESSION_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains the functions managing the FM session + * + *****************************************************************************/ + +#include "g_fm_session_api_nvoc.h" + +#ifndef FM_SESSION_API_H +#define FM_SESSION_API_H + +#include "rmapi/resource.h" +#include "rmapi/event.h" + +#include "ctrl/ctrl000f.h" + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +// +// FM session information +// +// A client which owns FmSessionApi is identified as Fabric Manager in RM land. +// +// Key attributes of FmSessionApi class: +// - There can be only one instance of FmSessionApi system-wide. This ensures that +// there is only one fabric manager daemon running in the system with exclusive +// access to FmObject. +// - hClient is parent of FmSessionApi. +// - Only NV01_ROOT_USER should be allowed to allocate FmSessionApi. +// - FmSessionApi can be allocated only by privileged clients. +// - RmApi lock must be held. +// +#ifdef NVOC_FM_SESSION_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct FmSessionApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct FmSessionApi *__nvoc_pbase_FmSessionApi; + NV_STATUS (*__fmsessionapiCtrlCmdSetFmState__)(struct FmSessionApi *); + NV_STATUS (*__fmsessionapiCtrlCmdClearFmState__)(struct FmSessionApi *); + NvBool (*__fmsessionapiShareCallback__)(struct FmSessionApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__fmsessionapiCheckMemInterUnmap__)(struct FmSessionApi *, NvBool); + NvBool (*__fmsessionapiAccessCallback__)(struct FmSessionApi *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__fmsessionapiGetMemInterMapParams__)(struct FmSessionApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__fmsessionapiGetMemoryMappingDescriptor__)(struct FmSessionApi *, struct MEMORY_DESCRIPTOR **); + void (*__fmsessionapiSetNotificationShare__)(struct FmSessionApi *, struct NotifShare *); + NV_STATUS (*__fmsessionapiControl__)(struct FmSessionApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__fmsessionapiControlFilter__)(struct FmSessionApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__fmsessionapiGetRefCount__)(struct FmSessionApi *); + NV_STATUS (*__fmsessionapiUnregisterEvent__)(struct FmSessionApi *, NvHandle, NvHandle, NvHandle, NvHandle); + NV_STATUS (*__fmsessionapiUnmap__)(struct FmSessionApi *, struct CALL_CONTEXT *, RsCpuMapping *); + NvBool (*__fmsessionapiCanCopy__)(struct FmSessionApi *); + NV_STATUS (*__fmsessionapiControl_Prologue__)(struct FmSessionApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__fmsessionapiMapTo__)(struct FmSessionApi *, RS_RES_MAP_TO_PARAMS *); + void (*__fmsessionapiAddAdditionalDependants__)(struct RsClient *, struct FmSessionApi *, RsResourceRef *); + void (*__fmsessionapiPreDestruct__)(struct FmSessionApi *); + NV_STATUS (*__fmsessionapiUnmapFrom__)(struct FmSessionApi *, RS_RES_UNMAP_FROM_PARAMS *); + PEVENTNOTIFICATION *(*__fmsessionapiGetNotificationListPtr__)(struct FmSessionApi *); + void (*__fmsessionapiControl_Epilogue__)(struct FmSessionApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + struct NotifShare *(*__fmsessionapiGetNotificationShare__)(struct FmSessionApi *); + NV_STATUS (*__fmsessionapiControlLookup__)(struct FmSessionApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__fmsessionapiMap__)(struct FmSessionApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__fmsessionapiGetOrAllocNotifShare__)(struct FmSessionApi *, NvHandle, NvHandle, struct NotifShare **); + NvU64 dupedCapDescriptor; +}; + +#ifndef __NVOC_CLASS_FmSessionApi_TYPEDEF__ +#define __NVOC_CLASS_FmSessionApi_TYPEDEF__ +typedef struct FmSessionApi FmSessionApi; +#endif /* __NVOC_CLASS_FmSessionApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FmSessionApi +#define __nvoc_class_id_FmSessionApi 0xdfbd08 +#endif /* __nvoc_class_id_FmSessionApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_FmSessionApi; + +#define __staticCast_FmSessionApi(pThis) \ + ((pThis)->__nvoc_pbase_FmSessionApi) + +#ifdef __nvoc_fm_session_api_h_disabled +#define __dynamicCast_FmSessionApi(pThis) ((FmSessionApi*)NULL) +#else //__nvoc_fm_session_api_h_disabled +#define __dynamicCast_FmSessionApi(pThis) \ + ((FmSessionApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(FmSessionApi))) +#endif //__nvoc_fm_session_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_FmSessionApi(FmSessionApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_FmSessionApi(FmSessionApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_FmSessionApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_FmSessionApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define fmsessionapiCtrlCmdSetFmState(pFmSessionApi) fmsessionapiCtrlCmdSetFmState_DISPATCH(pFmSessionApi) +#define fmsessionapiCtrlCmdClearFmState(pFmSessionApi) fmsessionapiCtrlCmdClearFmState_DISPATCH(pFmSessionApi) +#define fmsessionapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) fmsessionapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define fmsessionapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) fmsessionapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define fmsessionapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) fmsessionapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define fmsessionapiGetMemInterMapParams(pRmResource, pParams) fmsessionapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define fmsessionapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) fmsessionapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define fmsessionapiSetNotificationShare(pNotifier, pNotifShare) fmsessionapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define fmsessionapiControl(pResource, pCallContext, pParams) fmsessionapiControl_DISPATCH(pResource, pCallContext, pParams) +#define fmsessionapiControlFilter(pResource, pCallContext, pParams) fmsessionapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define fmsessionapiGetRefCount(pResource) fmsessionapiGetRefCount_DISPATCH(pResource) +#define fmsessionapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) fmsessionapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define fmsessionapiUnmap(pResource, pCallContext, pCpuMapping) fmsessionapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define fmsessionapiCanCopy(pResource) fmsessionapiCanCopy_DISPATCH(pResource) +#define fmsessionapiControl_Prologue(pResource, pCallContext, pParams) fmsessionapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define fmsessionapiMapTo(pResource, pParams) fmsessionapiMapTo_DISPATCH(pResource, pParams) +#define fmsessionapiAddAdditionalDependants(pClient, pResource, pReference) fmsessionapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define fmsessionapiPreDestruct(pResource) fmsessionapiPreDestruct_DISPATCH(pResource) +#define fmsessionapiUnmapFrom(pResource, pParams) fmsessionapiUnmapFrom_DISPATCH(pResource, pParams) +#define fmsessionapiGetNotificationListPtr(pNotifier) fmsessionapiGetNotificationListPtr_DISPATCH(pNotifier) +#define fmsessionapiControl_Epilogue(pResource, pCallContext, pParams) fmsessionapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define fmsessionapiGetNotificationShare(pNotifier) fmsessionapiGetNotificationShare_DISPATCH(pNotifier) +#define fmsessionapiControlLookup(pResource, pParams, ppEntry) fmsessionapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define fmsessionapiMap(pResource, pCallContext, pParams, pCpuMapping) fmsessionapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define fmsessionapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) fmsessionapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS fmsessionapiCtrlCmdSetFmState_IMPL(struct FmSessionApi *pFmSessionApi); + +static inline NV_STATUS fmsessionapiCtrlCmdSetFmState_DISPATCH(struct FmSessionApi *pFmSessionApi) { + return pFmSessionApi->__fmsessionapiCtrlCmdSetFmState__(pFmSessionApi); +} + +NV_STATUS fmsessionapiCtrlCmdClearFmState_IMPL(struct FmSessionApi *pFmSessionApi); + +static inline NV_STATUS fmsessionapiCtrlCmdClearFmState_DISPATCH(struct FmSessionApi *pFmSessionApi) { + return pFmSessionApi->__fmsessionapiCtrlCmdClearFmState__(pFmSessionApi); +} + +static inline NvBool fmsessionapiShareCallback_DISPATCH(struct FmSessionApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__fmsessionapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS fmsessionapiCheckMemInterUnmap_DISPATCH(struct FmSessionApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__fmsessionapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NvBool fmsessionapiAccessCallback_DISPATCH(struct FmSessionApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__fmsessionapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS fmsessionapiGetMemInterMapParams_DISPATCH(struct FmSessionApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__fmsessionapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS fmsessionapiGetMemoryMappingDescriptor_DISPATCH(struct FmSessionApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__fmsessionapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline void fmsessionapiSetNotificationShare_DISPATCH(struct FmSessionApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__fmsessionapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NV_STATUS fmsessionapiControl_DISPATCH(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__fmsessionapiControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS fmsessionapiControlFilter_DISPATCH(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__fmsessionapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 fmsessionapiGetRefCount_DISPATCH(struct FmSessionApi *pResource) { + return pResource->__fmsessionapiGetRefCount__(pResource); +} + +static inline NV_STATUS fmsessionapiUnregisterEvent_DISPATCH(struct FmSessionApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__fmsessionapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NV_STATUS fmsessionapiUnmap_DISPATCH(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__fmsessionapiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NvBool fmsessionapiCanCopy_DISPATCH(struct FmSessionApi *pResource) { + return pResource->__fmsessionapiCanCopy__(pResource); +} + +static inline NV_STATUS fmsessionapiControl_Prologue_DISPATCH(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__fmsessionapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS fmsessionapiMapTo_DISPATCH(struct FmSessionApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__fmsessionapiMapTo__(pResource, pParams); +} + +static inline void fmsessionapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct FmSessionApi *pResource, RsResourceRef *pReference) { + pResource->__fmsessionapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline void fmsessionapiPreDestruct_DISPATCH(struct FmSessionApi *pResource) { + pResource->__fmsessionapiPreDestruct__(pResource); +} + +static inline NV_STATUS fmsessionapiUnmapFrom_DISPATCH(struct FmSessionApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__fmsessionapiUnmapFrom__(pResource, pParams); +} + +static inline PEVENTNOTIFICATION *fmsessionapiGetNotificationListPtr_DISPATCH(struct FmSessionApi *pNotifier) { + return pNotifier->__fmsessionapiGetNotificationListPtr__(pNotifier); +} + +static inline void fmsessionapiControl_Epilogue_DISPATCH(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__fmsessionapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline struct NotifShare *fmsessionapiGetNotificationShare_DISPATCH(struct FmSessionApi *pNotifier) { + return pNotifier->__fmsessionapiGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS fmsessionapiControlLookup_DISPATCH(struct FmSessionApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__fmsessionapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS fmsessionapiMap_DISPATCH(struct FmSessionApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__fmsessionapiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS fmsessionapiGetOrAllocNotifShare_DISPATCH(struct FmSessionApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__fmsessionapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS fmsessionapiConstruct_IMPL(struct FmSessionApi *arg_pFmSessionApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_fmsessionapiConstruct(arg_pFmSessionApi, arg_pCallContext, arg_pParams) fmsessionapiConstruct_IMPL(arg_pFmSessionApi, arg_pCallContext, arg_pParams) +void fmsessionapiDestruct_IMPL(struct FmSessionApi *pFmSessionApi); +#define __nvoc_fmsessionapiDestruct(pFmSessionApi) fmsessionapiDestruct_IMPL(pFmSessionApi) +#undef PRIVATE_FIELD + + +#endif // FM_SESSION_API_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_FM_SESSION_API_NVOC_H_ diff --git a/src/nvidia/generated/g_generic_engine_nvoc.c b/src/nvidia/generated/g_generic_engine_nvoc.c new file mode 100644 index 000000000..da30a409d --- /dev/null +++ b/src/nvidia/generated/g_generic_engine_nvoc.c @@ -0,0 +1,381 @@ +#define NVOC_GENERIC_ENGINE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_generic_engine_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4bc329 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_GenericEngineApi(GenericEngineApi*); +void __nvoc_init_funcTable_GenericEngineApi(GenericEngineApi*); +NV_STATUS __nvoc_ctor_GenericEngineApi(GenericEngineApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_GenericEngineApi(GenericEngineApi*); +void __nvoc_dtor_GenericEngineApi(GenericEngineApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericEngineApi; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_GenericEngineApi = { + /*pClassDef=*/ &__nvoc_class_def_GenericEngineApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GenericEngineApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GenericEngineApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_GenericEngineApi_GenericEngineApi, + &__nvoc_rtti_GenericEngineApi_GpuResource, + &__nvoc_rtti_GenericEngineApi_RmResource, + &__nvoc_rtti_GenericEngineApi_RmResourceCommon, + &__nvoc_rtti_GenericEngineApi_RsResource, + &__nvoc_rtti_GenericEngineApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GenericEngineApi), + /*classId=*/ classId(GenericEngineApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GenericEngineApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GenericEngineApi, + /*pCastInfo=*/ &__nvoc_castinfo_GenericEngineApi, + /*pExportInfo=*/ &__nvoc_export_info_GenericEngineApi +}; + +static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresMap(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return genapiMap((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresGetMapAddrSpace(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return genapiGetMapAddrSpace((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresControl(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return genapiControl((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_genapiShareCallback(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_genapiUnmap(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_genapiGetMemInterMapParams(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_genapiGetMemoryMappingDescriptor(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), ppMemDesc); +} + +static NvHandle __nvoc_thunk_GpuResource_genapiGetInternalObjectHandle(struct GenericEngineApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_genapiControlFilter(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_genapiAddAdditionalDependants(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_genapiGetRefCount(struct GenericEngineApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_genapiCheckMemInterUnmap(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_genapiMapTo(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_genapiControl_Prologue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_genapiGetRegBaseOffsetAndSize(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_genapiCanCopy(struct GenericEngineApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_genapiInternalControlForward(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_genapiPreDestruct(struct GenericEngineApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_genapiUnmapFrom(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_genapiControl_Epilogue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_genapiControlLookup(struct GenericEngineApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams, ppEntry); +} + +static NvBool __nvoc_thunk_RmResource_genapiAccessCallback(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_GenericEngineApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) genapiCtrlCmdMasterGetErrorIntrOffsetMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90e60101u, + /*paramSize=*/ sizeof(NV90E6_CTRL_MASTER_GET_ERROR_INTR_OFFSET_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GenericEngineApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "genapiCtrlCmdMasterGetErrorIntrOffsetMask" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) genapiCtrlCmdMasterGetVirtualFunctionErrorContIntrMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90e60102u, + /*paramSize=*/ sizeof(NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GenericEngineApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "genapiCtrlCmdMasterGetVirtualFunctionErrorContIntrMask" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericEngineApi = +{ + /*numEntries=*/ 2, + /*pExportEntries=*/ __nvoc_exported_method_def_GenericEngineApi +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_GenericEngineApi(GenericEngineApi *pThis) { + __nvoc_genapiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GenericEngineApi(GenericEngineApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GenericEngineApi(GenericEngineApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GenericEngineApi_fail_GpuResource; + __nvoc_init_dataField_GenericEngineApi(pThis); + + status = __nvoc_genapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GenericEngineApi_fail__init; + goto __nvoc_ctor_GenericEngineApi_exit; // Success + +__nvoc_ctor_GenericEngineApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_GenericEngineApi_fail_GpuResource: +__nvoc_ctor_GenericEngineApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_GenericEngineApi_1(GenericEngineApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__genapiMap__ = &genapiMap_IMPL; + + pThis->__genapiGetMapAddrSpace__ = &genapiGetMapAddrSpace_IMPL; + + pThis->__genapiControl__ = &genapiControl_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__genapiCtrlCmdMasterGetErrorIntrOffsetMask__ = &genapiCtrlCmdMasterGetErrorIntrOffsetMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__genapiCtrlCmdMasterGetVirtualFunctionErrorContIntrMask__ = &genapiCtrlCmdMasterGetVirtualFunctionErrorContIntrMask_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_GenericEngineApi_gpuresMap; + + pThis->__nvoc_base_GpuResource.__gpuresGetMapAddrSpace__ = &__nvoc_thunk_GenericEngineApi_gpuresGetMapAddrSpace; + + pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_GenericEngineApi_gpuresControl; + + pThis->__genapiShareCallback__ = &__nvoc_thunk_GpuResource_genapiShareCallback; + + pThis->__genapiUnmap__ = &__nvoc_thunk_GpuResource_genapiUnmap; + + pThis->__genapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_genapiGetMemInterMapParams; + + pThis->__genapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_genapiGetMemoryMappingDescriptor; + + pThis->__genapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_genapiGetInternalObjectHandle; + + pThis->__genapiControlFilter__ = &__nvoc_thunk_RsResource_genapiControlFilter; + + pThis->__genapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_genapiAddAdditionalDependants; + + pThis->__genapiGetRefCount__ = &__nvoc_thunk_RsResource_genapiGetRefCount; + + pThis->__genapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_genapiCheckMemInterUnmap; + + pThis->__genapiMapTo__ = &__nvoc_thunk_RsResource_genapiMapTo; + + pThis->__genapiControl_Prologue__ = &__nvoc_thunk_RmResource_genapiControl_Prologue; + + pThis->__genapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_genapiGetRegBaseOffsetAndSize; + + pThis->__genapiCanCopy__ = &__nvoc_thunk_RsResource_genapiCanCopy; + + pThis->__genapiInternalControlForward__ = &__nvoc_thunk_GpuResource_genapiInternalControlForward; + + pThis->__genapiPreDestruct__ = &__nvoc_thunk_RsResource_genapiPreDestruct; + + pThis->__genapiUnmapFrom__ = &__nvoc_thunk_RsResource_genapiUnmapFrom; + + pThis->__genapiControl_Epilogue__ = &__nvoc_thunk_RmResource_genapiControl_Epilogue; + + pThis->__genapiControlLookup__ = &__nvoc_thunk_RsResource_genapiControlLookup; + + pThis->__genapiAccessCallback__ = &__nvoc_thunk_RmResource_genapiAccessCallback; +} + +void __nvoc_init_funcTable_GenericEngineApi(GenericEngineApi *pThis) { + __nvoc_init_funcTable_GenericEngineApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_GenericEngineApi(GenericEngineApi *pThis) { + pThis->__nvoc_pbase_GenericEngineApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_GenericEngineApi(pThis); +} + +NV_STATUS __nvoc_objCreate_GenericEngineApi(GenericEngineApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + GenericEngineApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(GenericEngineApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GenericEngineApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GenericEngineApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_GenericEngineApi(pThis); + status = __nvoc_ctor_GenericEngineApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GenericEngineApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GenericEngineApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GenericEngineApi(GenericEngineApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GenericEngineApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_generic_engine_nvoc.h b/src/nvidia/generated/g_generic_engine_nvoc.h new file mode 100644 index 000000000..3939cbd9c --- /dev/null +++ b/src/nvidia/generated/g_generic_engine_nvoc.h @@ -0,0 +1,255 @@ +#ifndef _G_GENERIC_ENGINE_NVOC_H_ +#define _G_GENERIC_ENGINE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_generic_engine_nvoc.h" + +#ifndef _GENERICENGINEAPI_H_ +#define _GENERICENGINEAPI_H_ + +#include "gpu/gpu_resource.h" +#include "ctrl/ctrl90e6.h" +#include "rmapi/resource.h" // for macro RMCTRL_EXPORT etc. + +/*! + * RM internal class providing a generic engine API to RM clients (e.g.: + * GF100_SUBDEVICE_GRAPHICS and GF100_SUBDEVICE_FB). Classes are primarily used + * for exposing BAR0 mappings and controls. + */ +#ifdef NVOC_GENERIC_ENGINE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GenericEngineApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct GenericEngineApi *__nvoc_pbase_GenericEngineApi; + NV_STATUS (*__genapiMap__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__genapiGetMapAddrSpace__)(struct GenericEngineApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__genapiControl__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__genapiCtrlCmdMasterGetErrorIntrOffsetMask__)(struct GenericEngineApi *, NV90E6_CTRL_MASTER_GET_ERROR_INTR_OFFSET_MASK_PARAMS *); + NV_STATUS (*__genapiCtrlCmdMasterGetVirtualFunctionErrorContIntrMask__)(struct GenericEngineApi *, NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS *); + NvBool (*__genapiShareCallback__)(struct GenericEngineApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__genapiUnmap__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__genapiGetMemInterMapParams__)(struct GenericEngineApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__genapiGetMemoryMappingDescriptor__)(struct GenericEngineApi *, struct MEMORY_DESCRIPTOR **); + NvHandle (*__genapiGetInternalObjectHandle__)(struct GenericEngineApi *); + NV_STATUS (*__genapiControlFilter__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__genapiAddAdditionalDependants__)(struct RsClient *, struct GenericEngineApi *, RsResourceRef *); + NvU32 (*__genapiGetRefCount__)(struct GenericEngineApi *); + NV_STATUS (*__genapiCheckMemInterUnmap__)(struct GenericEngineApi *, NvBool); + NV_STATUS (*__genapiMapTo__)(struct GenericEngineApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__genapiControl_Prologue__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__genapiGetRegBaseOffsetAndSize__)(struct GenericEngineApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__genapiCanCopy__)(struct GenericEngineApi *); + NV_STATUS (*__genapiInternalControlForward__)(struct GenericEngineApi *, NvU32, void *, NvU32); + void (*__genapiPreDestruct__)(struct GenericEngineApi *); + NV_STATUS (*__genapiUnmapFrom__)(struct GenericEngineApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__genapiControl_Epilogue__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__genapiControlLookup__)(struct GenericEngineApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvBool (*__genapiAccessCallback__)(struct GenericEngineApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_GenericEngineApi_TYPEDEF__ +#define __NVOC_CLASS_GenericEngineApi_TYPEDEF__ +typedef struct GenericEngineApi GenericEngineApi; +#endif /* __NVOC_CLASS_GenericEngineApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GenericEngineApi +#define __nvoc_class_id_GenericEngineApi 0x4bc329 +#endif /* __nvoc_class_id_GenericEngineApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi; + +#define __staticCast_GenericEngineApi(pThis) \ + ((pThis)->__nvoc_pbase_GenericEngineApi) + +#ifdef __nvoc_generic_engine_h_disabled +#define __dynamicCast_GenericEngineApi(pThis) ((GenericEngineApi*)NULL) +#else //__nvoc_generic_engine_h_disabled +#define __dynamicCast_GenericEngineApi(pThis) \ + ((GenericEngineApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GenericEngineApi))) +#endif //__nvoc_generic_engine_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GenericEngineApi(GenericEngineApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GenericEngineApi(GenericEngineApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_GenericEngineApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GenericEngineApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define genapiMap(pGenericEngineApi, pCallContext, pParams, pCpuMapping) genapiMap_DISPATCH(pGenericEngineApi, pCallContext, pParams, pCpuMapping) +#define genapiGetMapAddrSpace(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace) genapiGetMapAddrSpace_DISPATCH(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace) +#define genapiControl(pGenericEngineApi, pCallContext, pParams) genapiControl_DISPATCH(pGenericEngineApi, pCallContext, pParams) +#define genapiCtrlCmdMasterGetErrorIntrOffsetMask(pGenericEngineApi, pParams) genapiCtrlCmdMasterGetErrorIntrOffsetMask_DISPATCH(pGenericEngineApi, pParams) +#define genapiCtrlCmdMasterGetVirtualFunctionErrorContIntrMask(pGenericEngineApi, pParams) genapiCtrlCmdMasterGetVirtualFunctionErrorContIntrMask_DISPATCH(pGenericEngineApi, pParams) +#define genapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) genapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define genapiUnmap(pGpuResource, pCallContext, pCpuMapping) genapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define genapiGetMemInterMapParams(pRmResource, pParams) genapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define genapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) genapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define genapiGetInternalObjectHandle(pGpuResource) genapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define genapiControlFilter(pResource, pCallContext, pParams) genapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define genapiAddAdditionalDependants(pClient, pResource, pReference) genapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define genapiGetRefCount(pResource) genapiGetRefCount_DISPATCH(pResource) +#define genapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) genapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define genapiMapTo(pResource, pParams) genapiMapTo_DISPATCH(pResource, pParams) +#define genapiControl_Prologue(pResource, pCallContext, pParams) genapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define genapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) genapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define genapiCanCopy(pResource) genapiCanCopy_DISPATCH(pResource) +#define genapiInternalControlForward(pGpuResource, command, pParams, size) genapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define genapiPreDestruct(pResource) genapiPreDestruct_DISPATCH(pResource) +#define genapiUnmapFrom(pResource, pParams) genapiUnmapFrom_DISPATCH(pResource, pParams) +#define genapiControl_Epilogue(pResource, pCallContext, pParams) genapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define genapiControlLookup(pResource, pParams, ppEntry) genapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define genapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) genapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS genapiMap_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); + +static inline NV_STATUS genapiMap_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGenericEngineApi->__genapiMap__(pGenericEngineApi, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS genapiGetMapAddrSpace_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS genapiGetMapAddrSpace_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGenericEngineApi->__genapiGetMapAddrSpace__(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace); +} + +NV_STATUS genapiControl_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS genapiControl_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGenericEngineApi->__genapiControl__(pGenericEngineApi, pCallContext, pParams); +} + +NV_STATUS genapiCtrlCmdMasterGetErrorIntrOffsetMask_IMPL(struct GenericEngineApi *pGenericEngineApi, NV90E6_CTRL_MASTER_GET_ERROR_INTR_OFFSET_MASK_PARAMS *pParams); + +static inline NV_STATUS genapiCtrlCmdMasterGetErrorIntrOffsetMask_DISPATCH(struct GenericEngineApi *pGenericEngineApi, NV90E6_CTRL_MASTER_GET_ERROR_INTR_OFFSET_MASK_PARAMS *pParams) { + return pGenericEngineApi->__genapiCtrlCmdMasterGetErrorIntrOffsetMask__(pGenericEngineApi, pParams); +} + +NV_STATUS genapiCtrlCmdMasterGetVirtualFunctionErrorContIntrMask_IMPL(struct GenericEngineApi *pGenericEngineApi, NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS *pParams); + +static inline NV_STATUS genapiCtrlCmdMasterGetVirtualFunctionErrorContIntrMask_DISPATCH(struct GenericEngineApi *pGenericEngineApi, NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS *pParams) { + return pGenericEngineApi->__genapiCtrlCmdMasterGetVirtualFunctionErrorContIntrMask__(pGenericEngineApi, pParams); +} + +static inline NvBool genapiShareCallback_DISPATCH(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__genapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS genapiUnmap_DISPATCH(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__genapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS genapiGetMemInterMapParams_DISPATCH(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__genapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS genapiGetMemoryMappingDescriptor_DISPATCH(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__genapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvHandle genapiGetInternalObjectHandle_DISPATCH(struct GenericEngineApi *pGpuResource) { + return pGpuResource->__genapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS genapiControlFilter_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__genapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void genapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference) { + pResource->__genapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 genapiGetRefCount_DISPATCH(struct GenericEngineApi *pResource) { + return pResource->__genapiGetRefCount__(pResource); +} + +static inline NV_STATUS genapiCheckMemInterUnmap_DISPATCH(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__genapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS genapiMapTo_DISPATCH(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__genapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS genapiControl_Prologue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__genapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS genapiGetRegBaseOffsetAndSize_DISPATCH(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__genapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool genapiCanCopy_DISPATCH(struct GenericEngineApi *pResource) { + return pResource->__genapiCanCopy__(pResource); +} + +static inline NV_STATUS genapiInternalControlForward_DISPATCH(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__genapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void genapiPreDestruct_DISPATCH(struct GenericEngineApi *pResource) { + pResource->__genapiPreDestruct__(pResource); +} + +static inline NV_STATUS genapiUnmapFrom_DISPATCH(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__genapiUnmapFrom__(pResource, pParams); +} + +static inline void genapiControl_Epilogue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__genapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS genapiControlLookup_DISPATCH(struct GenericEngineApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__genapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvBool genapiAccessCallback_DISPATCH(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__genapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS genapiConstruct_IMPL(struct GenericEngineApi *arg_pGenericEngineApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_genapiConstruct(arg_pGenericEngineApi, arg_pCallContext, arg_pParams) genapiConstruct_IMPL(arg_pGenericEngineApi, arg_pCallContext, arg_pParams) +void genapiDestruct_IMPL(struct GenericEngineApi *pGenericEngineApi); +#define __nvoc_genapiDestruct(pGenericEngineApi) genapiDestruct_IMPL(pGenericEngineApi) +#undef PRIVATE_FIELD + + +#endif // _GENERICENGINEAPI_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GENERIC_ENGINE_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_acct_nvoc.c b/src/nvidia/generated/g_gpu_acct_nvoc.c new file mode 100644 index 000000000..4e9d9eb1b --- /dev/null +++ b/src/nvidia/generated/g_gpu_acct_nvoc.c @@ -0,0 +1,154 @@ +#define NVOC_GPU_ACCT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_acct_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x0f1350 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuAccounting; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_GpuAccounting(GpuAccounting*); +void __nvoc_init_funcTable_GpuAccounting(GpuAccounting*); +NV_STATUS __nvoc_ctor_GpuAccounting(GpuAccounting*); +void __nvoc_init_dataField_GpuAccounting(GpuAccounting*); +void __nvoc_dtor_GpuAccounting(GpuAccounting*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuAccounting; + +static const struct NVOC_RTTI __nvoc_rtti_GpuAccounting_GpuAccounting = { + /*pClassDef=*/ &__nvoc_class_def_GpuAccounting, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuAccounting, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuAccounting_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuAccounting, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GpuAccounting = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_GpuAccounting_GpuAccounting, + &__nvoc_rtti_GpuAccounting_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuAccounting = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuAccounting), + /*classId=*/ classId(GpuAccounting), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuAccounting", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuAccounting, + /*pCastInfo=*/ &__nvoc_castinfo_GpuAccounting, + /*pExportInfo=*/ &__nvoc_export_info_GpuAccounting +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuAccounting = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_GpuAccounting(GpuAccounting *pThis) { + __nvoc_gpuacctDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuAccounting(GpuAccounting *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_GpuAccounting(GpuAccounting *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_GpuAccounting_fail_Object; + __nvoc_init_dataField_GpuAccounting(pThis); + + status = __nvoc_gpuacctConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_GpuAccounting_fail__init; + goto __nvoc_ctor_GpuAccounting_exit; // Success + +__nvoc_ctor_GpuAccounting_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_GpuAccounting_fail_Object: +__nvoc_ctor_GpuAccounting_exit: + + return status; +} + +static void __nvoc_init_funcTable_GpuAccounting_1(GpuAccounting *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_GpuAccounting(GpuAccounting *pThis) { + __nvoc_init_funcTable_GpuAccounting_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_GpuAccounting(GpuAccounting *pThis) { + pThis->__nvoc_pbase_GpuAccounting = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_GpuAccounting(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuAccounting(GpuAccounting **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + GpuAccounting *pThis; + + pThis = portMemAllocNonPaged(sizeof(GpuAccounting)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GpuAccounting)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuAccounting); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_GpuAccounting(pThis); + status = __nvoc_ctor_GpuAccounting(pThis); + if (status != NV_OK) goto __nvoc_objCreate_GpuAccounting_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GpuAccounting_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuAccounting(GpuAccounting **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_GpuAccounting(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_acct_nvoc.h b/src/nvidia/generated/g_gpu_acct_nvoc.h new file mode 100644 index 000000000..9b307ab2f --- /dev/null +++ b/src/nvidia/generated/g_gpu_acct_nvoc.h @@ -0,0 +1,269 @@ +#ifndef _G_GPU_ACCT_NVOC_H_ +#define _G_GPU_ACCT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_gpu_acct_nvoc.h" + +#ifndef _GPU_ACCT_H_ +#define _GPU_ACCT_H_ + +#include "core/system.h" +#include "containers/map.h" +#include "containers/list.h" +#include "ctrl/ctrl0000/ctrl0000gpuacct.h" +#include "ctrl/ctrl0000/ctrl0000gpu.h" // NV0000_CTRL_GPU_MAX_ATTACHED_GPUS +#include "ctrl/ctrl2080/ctrl2080perf.h" // NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS + +typedef struct TMR_EVENT TMR_EVENT; + +// Sum of NV_MAX_LIVE_ACCT_PROCESS and NV_MAX_DEAD_ACCT_PROCESS is defined as +// NV0000_GPUACCT_PID_MAX_COUNT, which is equal to the max number of processes +// that can be returned by NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_PIDS RM +// control call. +// A VM/host can have a max of 4k compute/graphics processes, so the +// NV0000_GPUACCT_PID_MAX_COUNT limit for max no processes returned by +// NV0000_CTRL_CMD_GPUACCT_GET_ACCOUNTING_PIDS RM control is sufficient. +#define NV_MAX_LIVE_ACCT_PROCESS 3872 +#define NV_MAX_DEAD_ACCT_PROCESS 128 + +#define NV_GPUACCT_PROC_TYPE_CPU 0 +#define NV_GPUACCT_PROC_TYPE_GPU 1 + +#define NV_INVALID_VM_INDEX 0xFFFFFFFF +#define NV_INVALID_VM_PID 0xFFFFFFFF + +#define IS_VALID_SUBPID(x) (((x) > 0) && ((x) != 0xFFFFFFFF)) + +typedef struct +{ + NvU32 procId; // Pid of the process. + NvU32 procType; // Type of the process. + NvU32 gpuUtil; // Process's average GR engine utilization. + NvU64 sumUtil; // Running sum of process's GR engine utilization. + NvU32 fbUtil; // Process's average FB bandwidth utilization. + NvU64 sumFbUtil; // Running sum of process's FB bandwidth utilization. + NvU32 startSampleCount; // At process start, this variable is set to + // the current total sample count of GPU. + NvU32 totalSampleCount; // At process stop, this variable is set to + // difference of + // (Current total sample count - Start sample count). + NvU64 maxFbUsage; // Process's high watermark FB allocated (in bytes). + NvU64 startTime; // Time when accounting is started for the process. + NvU64 endTime; // Time when accounting is stopped for the process. + NvU32 refCount; // Count to keep track of accounting requests. + + // Following members are only used on Grid host. + NvU32 isGuestProcess; // Set if the entry corresponds to a guest VM process. + + MapNode mapNode; // Node in the Map. + ListNode listNode; // Node in the list. +} GPUACCT_PROC_ENTRY; + +MAKE_INTRUSIVE_MAP (GPU_ACCT_PROC_MAP, GPUACCT_PROC_ENTRY, mapNode); +MAKE_INTRUSIVE_LIST(GPU_ACCT_PROC_LIST, GPUACCT_PROC_ENTRY, listNode); + +typedef struct +{ + GPU_ACCT_PROC_MAP procMap; + GPU_ACCT_PROC_LIST procList; +} GPU_ACCT_PROC_DATA_STORE; + +typedef struct +{ + TMR_EVENT *pTmrEvent; // Pointer to the timer event created to schedule main callback + NvU64 lastUpdateTimestamp; // Time stamp of last PMU sample set. + NvU32 totalSampleCount; // Total samples of GPU of since accounting started for this GPU. + + // Pre-allocated buffer for making ctrl calls in callbacks + NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS *pSamplesParams; + + GPU_ACCT_PROC_DATA_STORE liveProcAcctInfo; // Pointer to list of live processes + // running on this GPU. + GPU_ACCT_PROC_DATA_STORE deadProcAcctInfo; // Pointer to list of dead processes + // running on this GPU. + +} GPUACCT_GPU_INSTANCE_INFO; + +#ifdef NVOC_GPU_ACCT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GpuAccounting { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct GpuAccounting *__nvoc_pbase_GpuAccounting; + GPUACCT_GPU_INSTANCE_INFO gpuInstanceInfo[32]; +}; + +#ifndef __NVOC_CLASS_GpuAccounting_TYPEDEF__ +#define __NVOC_CLASS_GpuAccounting_TYPEDEF__ +typedef struct GpuAccounting GpuAccounting; +#endif /* __NVOC_CLASS_GpuAccounting_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuAccounting +#define __nvoc_class_id_GpuAccounting 0x0f1350 +#endif /* __nvoc_class_id_GpuAccounting */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuAccounting; + +#define __staticCast_GpuAccounting(pThis) \ + ((pThis)->__nvoc_pbase_GpuAccounting) + +#ifdef __nvoc_gpu_acct_h_disabled +#define __dynamicCast_GpuAccounting(pThis) ((GpuAccounting*)NULL) +#else //__nvoc_gpu_acct_h_disabled +#define __dynamicCast_GpuAccounting(pThis) \ + ((GpuAccounting*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuAccounting))) +#endif //__nvoc_gpu_acct_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GpuAccounting(GpuAccounting**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuAccounting(GpuAccounting**, Dynamic*, NvU32); +#define __objCreate_GpuAccounting(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_GpuAccounting((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS gpuacctConstruct_IMPL(struct GpuAccounting *arg_); +#define __nvoc_gpuacctConstruct(arg_) gpuacctConstruct_IMPL(arg_) +void gpuacctDestruct_IMPL(struct GpuAccounting *arg0); +#define __nvoc_gpuacctDestruct(arg0) gpuacctDestruct_IMPL(arg0) +NV_STATUS gpuacctGetAccountingMode_IMPL(struct GpuAccounting *arg0, NvU32 arg1, NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS *arg2); +#ifdef __nvoc_gpu_acct_h_disabled +static inline NV_STATUS gpuacctGetAccountingMode(struct GpuAccounting *arg0, NvU32 arg1, NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS *arg2) { + NV_ASSERT_FAILED_PRECOMP("GpuAccounting was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_acct_h_disabled +#define gpuacctGetAccountingMode(arg0, arg1, arg2) gpuacctGetAccountingMode_IMPL(arg0, arg1, arg2) +#endif //__nvoc_gpu_acct_h_disabled + +NV_STATUS gpuacctEnableAccounting_IMPL(struct GpuAccounting *arg0, NvU32 arg1, NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS *arg2); +#ifdef __nvoc_gpu_acct_h_disabled +static inline NV_STATUS gpuacctEnableAccounting(struct GpuAccounting *arg0, NvU32 arg1, NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS *arg2) { + NV_ASSERT_FAILED_PRECOMP("GpuAccounting was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_acct_h_disabled +#define gpuacctEnableAccounting(arg0, arg1, arg2) gpuacctEnableAccounting_IMPL(arg0, arg1, arg2) +#endif //__nvoc_gpu_acct_h_disabled + +NV_STATUS gpuacctDisableAccounting_IMPL(struct GpuAccounting *arg0, NvU32 arg1, NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS *arg2); +#ifdef __nvoc_gpu_acct_h_disabled +static inline NV_STATUS gpuacctDisableAccounting(struct GpuAccounting *arg0, NvU32 arg1, NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS *arg2) { + NV_ASSERT_FAILED_PRECOMP("GpuAccounting was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_acct_h_disabled +#define gpuacctDisableAccounting(arg0, arg1, arg2) gpuacctDisableAccounting_IMPL(arg0, arg1, arg2) +#endif //__nvoc_gpu_acct_h_disabled + +NV_STATUS gpuacctClearAccountingData_IMPL(struct GpuAccounting *arg0, NvU32 arg1, NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS *arg2); +#ifdef __nvoc_gpu_acct_h_disabled +static inline NV_STATUS gpuacctClearAccountingData(struct GpuAccounting *arg0, NvU32 arg1, NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS *arg2) { + NV_ASSERT_FAILED_PRECOMP("GpuAccounting was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_acct_h_disabled +#define gpuacctClearAccountingData(arg0, arg1, arg2) gpuacctClearAccountingData_IMPL(arg0, arg1, arg2) +#endif //__nvoc_gpu_acct_h_disabled + +NV_STATUS gpuacctStartGpuAccounting_IMPL(struct GpuAccounting *arg0, NvU32 arg1, NvU32 arg2, NvU32 arg3); +#ifdef __nvoc_gpu_acct_h_disabled +static inline NV_STATUS gpuacctStartGpuAccounting(struct GpuAccounting *arg0, NvU32 arg1, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("GpuAccounting was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_acct_h_disabled +#define gpuacctStartGpuAccounting(arg0, arg1, arg2, arg3) gpuacctStartGpuAccounting_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_gpu_acct_h_disabled + +NV_STATUS gpuacctStopGpuAccounting_IMPL(struct GpuAccounting *arg0, NvU32 arg1, NvU32 arg2, NvU32 arg3); +#ifdef __nvoc_gpu_acct_h_disabled +static inline NV_STATUS gpuacctStopGpuAccounting(struct GpuAccounting *arg0, NvU32 arg1, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("GpuAccounting was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_acct_h_disabled +#define gpuacctStopGpuAccounting(arg0, arg1, arg2, arg3) gpuacctStopGpuAccounting_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_gpu_acct_h_disabled + +NV_STATUS gpuacctUpdateProcPeakFbUsage_IMPL(struct GpuAccounting *arg0, NvU32 arg1, NvU32 arg2, NvU32 arg3, NvU64 arg4); +#ifdef __nvoc_gpu_acct_h_disabled +static inline NV_STATUS gpuacctUpdateProcPeakFbUsage(struct GpuAccounting *arg0, NvU32 arg1, NvU32 arg2, NvU32 arg3, NvU64 arg4) { + NV_ASSERT_FAILED_PRECOMP("GpuAccounting was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_acct_h_disabled +#define gpuacctUpdateProcPeakFbUsage(arg0, arg1, arg2, arg3, arg4) gpuacctUpdateProcPeakFbUsage_IMPL(arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_gpu_acct_h_disabled + +NV_STATUS gpuacctGetProcAcctInfo_IMPL(struct GpuAccounting *arg0, NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS *arg1); +#ifdef __nvoc_gpu_acct_h_disabled +static inline NV_STATUS gpuacctGetProcAcctInfo(struct GpuAccounting *arg0, NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS *arg1) { + NV_ASSERT_FAILED_PRECOMP("GpuAccounting was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_acct_h_disabled +#define gpuacctGetProcAcctInfo(arg0, arg1) gpuacctGetProcAcctInfo_IMPL(arg0, arg1) +#endif //__nvoc_gpu_acct_h_disabled + +NV_STATUS gpuacctGetAcctPids_IMPL(struct GpuAccounting *arg0, NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS *arg1); +#ifdef __nvoc_gpu_acct_h_disabled +static inline NV_STATUS gpuacctGetAcctPids(struct GpuAccounting *arg0, NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS *arg1) { + NV_ASSERT_FAILED_PRECOMP("GpuAccounting was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_acct_h_disabled +#define gpuacctGetAcctPids(arg0, arg1) gpuacctGetAcctPids_IMPL(arg0, arg1) +#endif //__nvoc_gpu_acct_h_disabled + +NV_STATUS gpuacctSetProcType_IMPL(struct GpuAccounting *arg0, NvU32 arg1, NvU32 arg2, NvU32 arg3, NvU32 arg4); +#ifdef __nvoc_gpu_acct_h_disabled +static inline NV_STATUS gpuacctSetProcType(struct GpuAccounting *arg0, NvU32 arg1, NvU32 arg2, NvU32 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("GpuAccounting was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_acct_h_disabled +#define gpuacctSetProcType(arg0, arg1, arg2, arg3, arg4) gpuacctSetProcType_IMPL(arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_gpu_acct_h_disabled + +#undef PRIVATE_FIELD + + +void gpuacctProcessGpuUtil(GPUACCT_GPU_INSTANCE_INFO *, NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE *); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_ACCT_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_boost_mgr_nvoc.c b/src/nvidia/generated/g_gpu_boost_mgr_nvoc.c new file mode 100644 index 000000000..e812acf3e --- /dev/null +++ b/src/nvidia/generated/g_gpu_boost_mgr_nvoc.c @@ -0,0 +1,154 @@ +#define NVOC_GPU_BOOST_MGR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_boost_mgr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x9f6bbf = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUBOOSTMGR; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR*); +void __nvoc_init_funcTable_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR*); +NV_STATUS __nvoc_ctor_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR*); +void __nvoc_init_dataField_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR*); +void __nvoc_dtor_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUBOOSTMGR; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUBOOSTMGR_OBJGPUBOOSTMGR = { + /*pClassDef=*/ &__nvoc_class_def_OBJGPUBOOSTMGR, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUBOOSTMGR, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUBOOSTMGR_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPUBOOSTMGR, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPUBOOSTMGR = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJGPUBOOSTMGR_OBJGPUBOOSTMGR, + &__nvoc_rtti_OBJGPUBOOSTMGR_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUBOOSTMGR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPUBOOSTMGR), + /*classId=*/ classId(OBJGPUBOOSTMGR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPUBOOSTMGR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUBOOSTMGR, + /*pCastInfo=*/ &__nvoc_castinfo_OBJGPUBOOSTMGR, + /*pExportInfo=*/ &__nvoc_export_info_OBJGPUBOOSTMGR +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUBOOSTMGR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR *pThis) { + __nvoc_gpuboostmgrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUBOOSTMGR_fail_Object; + __nvoc_init_dataField_OBJGPUBOOSTMGR(pThis); + + status = __nvoc_gpuboostmgrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUBOOSTMGR_fail__init; + goto __nvoc_ctor_OBJGPUBOOSTMGR_exit; // Success + +__nvoc_ctor_OBJGPUBOOSTMGR_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJGPUBOOSTMGR_fail_Object: +__nvoc_ctor_OBJGPUBOOSTMGR_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJGPUBOOSTMGR_1(OBJGPUBOOSTMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR *pThis) { + __nvoc_init_funcTable_OBJGPUBOOSTMGR_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR *pThis) { + pThis->__nvoc_pbase_OBJGPUBOOSTMGR = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJGPUBOOSTMGR(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJGPUBOOSTMGR *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJGPUBOOSTMGR)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJGPUBOOSTMGR)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPUBOOSTMGR); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJGPUBOOSTMGR(pThis); + status = __nvoc_ctor_OBJGPUBOOSTMGR(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPUBOOSTMGR_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJGPUBOOSTMGR_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJGPUBOOSTMGR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_boost_mgr_nvoc.h b/src/nvidia/generated/g_gpu_boost_mgr_nvoc.h new file mode 100644 index 000000000..954cc03a9 --- /dev/null +++ b/src/nvidia/generated/g_gpu_boost_mgr_nvoc.h @@ -0,0 +1,254 @@ +#ifndef _G_GPU_BOOST_MGR_NVOC_H_ +#define _G_GPU_BOOST_MGR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_gpu_boost_mgr_nvoc.h" + +#ifndef GPU_BOOST_MGR_H +#define GPU_BOOST_MGR_H + +/*! + * @file + * @brief Definition of the Sync Gpu Boost Manager object + */ + +/* --------------------------- Includes --------------------------------------*/ +#include "core/core.h" +#include "core/system.h" +#include "ctrl/ctrl0000/ctrl0000syncgpuboost.h" // NV0000_SYNC_GPU_BOOST_MAX_GROUPS + +#include "containers/btree.h" +#include "nvlimits.h" + +/* ----------------------------- Macros --------------------------------------*/ +/*! + * This macro shall be used to iterate over all the GPUs in a Sync GPU Boost Group + * @param[in] pBoostMgr @ref OBJGPUBOOSTMGR pointer + * @param[in] grpId ID of the SGBG to loop over + * @param[in][out] pGpuItr Pointer to OBJGPU, used by the loop to iterate over. + * + * For every successful iteration, pGpuItr will point to the GPU being looped over. + * After all the iterations are complete, pGpuItr will be NULL. + */ +#define GPUBOOSTMGR_ITR_START(pBoostMgr, grpId, pGpuItr) \ +{ \ + NvU32 itrIdx = 0; \ + NV_ASSERT(NULL != (pBoostMgr)); \ + while (NULL != ((pGpuItr) = gpuboostmgrGpuItr((pBoostMgr), (grpId), &(itrIdx)))) \ + { + +#define GPUBOOSTMGR_ITR_END \ + } \ +} + +/* --------------------------- Datatypes ------------------------------------ */ + +/*! + * Defines a group of GPUs linked together for a synchronized workload. + * The linking is independent of the SLI status of the GPUs. + */ +typedef struct SYNC_GPU_BOOST_GROUP +{ + // Number of elements in @ref gpuIds + NvU32 gpuCount; + + // Number of clients holding a reference to this SGBG + NvU32 refCount; + + // IDs of GPUs to be put in the Sync Boost Group + NvU32 gpuIds[NV_MAX_DEVICES]; + + // If this group represents a bridgeless SLI + NvBool bBridgeless; +} SYNC_GPU_BOOST_GROUP; + +typedef struct OBJGPUBOOSTMGR *POBJGPUBOOSTMGR; + +/*! + * This is the Sync Gpu Boost Manager for RM. It keeps track of the + * Sync Gpu Boost Groups defined for the system and provides various methods related + * to their management. + */ +#ifdef NVOC_GPU_BOOST_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJGPUBOOSTMGR { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJGPUBOOSTMGR *__nvoc_pbase_OBJGPUBOOSTMGR; + NODE *pGpuIdTree; + NvU32 groupCount; + SYNC_GPU_BOOST_GROUP pBoostGroups[16]; +}; + +#ifndef __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ +typedef struct OBJGPUBOOSTMGR OBJGPUBOOSTMGR; +#endif /* __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUBOOSTMGR +#define __nvoc_class_id_OBJGPUBOOSTMGR 0x9f6bbf +#endif /* __nvoc_class_id_OBJGPUBOOSTMGR */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUBOOSTMGR; + +#define __staticCast_OBJGPUBOOSTMGR(pThis) \ + ((pThis)->__nvoc_pbase_OBJGPUBOOSTMGR) + +#ifdef __nvoc_gpu_boost_mgr_h_disabled +#define __dynamicCast_OBJGPUBOOSTMGR(pThis) ((OBJGPUBOOSTMGR*)NULL) +#else //__nvoc_gpu_boost_mgr_h_disabled +#define __dynamicCast_OBJGPUBOOSTMGR(pThis) \ + ((OBJGPUBOOSTMGR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUBOOSTMGR))) +#endif //__nvoc_gpu_boost_mgr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPUBOOSTMGR(OBJGPUBOOSTMGR**, Dynamic*, NvU32); +#define __objCreate_OBJGPUBOOSTMGR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJGPUBOOSTMGR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS gpuboostmgrConstruct_IMPL(struct OBJGPUBOOSTMGR *arg_pBoostMgr); +#define __nvoc_gpuboostmgrConstruct(arg_pBoostMgr) gpuboostmgrConstruct_IMPL(arg_pBoostMgr) +void gpuboostmgrDestruct_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr); +#define __nvoc_gpuboostmgrDestruct(pBoostMgr) gpuboostmgrDestruct_IMPL(pBoostMgr) +NV_STATUS gpuboostmgrCreateGroup_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr, NV0000_SYNC_GPU_BOOST_GROUP_CONFIG *pBoostConfig); +#ifdef __nvoc_gpu_boost_mgr_h_disabled +static inline NV_STATUS gpuboostmgrCreateGroup(struct OBJGPUBOOSTMGR *pBoostMgr, NV0000_SYNC_GPU_BOOST_GROUP_CONFIG *pBoostConfig) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUBOOSTMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_boost_mgr_h_disabled +#define gpuboostmgrCreateGroup(pBoostMgr, pBoostConfig) gpuboostmgrCreateGroup_IMPL(pBoostMgr, pBoostConfig) +#endif //__nvoc_gpu_boost_mgr_h_disabled + +NV_STATUS gpuboostmgrDestroyGroup_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 boostGroupId); +#ifdef __nvoc_gpu_boost_mgr_h_disabled +static inline NV_STATUS gpuboostmgrDestroyGroup(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 boostGroupId) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUBOOSTMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_boost_mgr_h_disabled +#define gpuboostmgrDestroyGroup(pBoostMgr, boostGroupId) gpuboostmgrDestroyGroup_IMPL(pBoostMgr, boostGroupId) +#endif //__nvoc_gpu_boost_mgr_h_disabled + +NV_STATUS gpuboostmgrQueryGroups_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr, NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS *pParams); +#ifdef __nvoc_gpu_boost_mgr_h_disabled +static inline NV_STATUS gpuboostmgrQueryGroups(struct OBJGPUBOOSTMGR *pBoostMgr, NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUBOOSTMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_boost_mgr_h_disabled +#define gpuboostmgrQueryGroups(pBoostMgr, pParams) gpuboostmgrQueryGroups_IMPL(pBoostMgr, pParams) +#endif //__nvoc_gpu_boost_mgr_h_disabled + +NV_STATUS gpuboostmgrCheckConfig_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr, NV0000_SYNC_GPU_BOOST_GROUP_CONFIG *pBoostConfig); +#ifdef __nvoc_gpu_boost_mgr_h_disabled +static inline NV_STATUS gpuboostmgrCheckConfig(struct OBJGPUBOOSTMGR *pBoostMgr, NV0000_SYNC_GPU_BOOST_GROUP_CONFIG *pBoostConfig) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUBOOSTMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_boost_mgr_h_disabled +#define gpuboostmgrCheckConfig(pBoostMgr, pBoostConfig) gpuboostmgrCheckConfig_IMPL(pBoostMgr, pBoostConfig) +#endif //__nvoc_gpu_boost_mgr_h_disabled + +NV_STATUS gpuboostmgrValidateGroupId_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 boostGroupId); +#ifdef __nvoc_gpu_boost_mgr_h_disabled +static inline NV_STATUS gpuboostmgrValidateGroupId(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 boostGroupId) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUBOOSTMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_boost_mgr_h_disabled +#define gpuboostmgrValidateGroupId(pBoostMgr, boostGroupId) gpuboostmgrValidateGroupId_IMPL(pBoostMgr, boostGroupId) +#endif //__nvoc_gpu_boost_mgr_h_disabled + +NV_STATUS gpuboostmgrIncrementRefCount_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 boostGroupId); +#ifdef __nvoc_gpu_boost_mgr_h_disabled +static inline NV_STATUS gpuboostmgrIncrementRefCount(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 boostGroupId) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUBOOSTMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_boost_mgr_h_disabled +#define gpuboostmgrIncrementRefCount(pBoostMgr, boostGroupId) gpuboostmgrIncrementRefCount_IMPL(pBoostMgr, boostGroupId) +#endif //__nvoc_gpu_boost_mgr_h_disabled + +NV_STATUS gpuboostmgrDecrementRefCount_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 boostGroupId); +#ifdef __nvoc_gpu_boost_mgr_h_disabled +static inline NV_STATUS gpuboostmgrDecrementRefCount(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 boostGroupId) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUBOOSTMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_boost_mgr_h_disabled +#define gpuboostmgrDecrementRefCount(pBoostMgr, boostGroupId) gpuboostmgrDecrementRefCount_IMPL(pBoostMgr, boostGroupId) +#endif //__nvoc_gpu_boost_mgr_h_disabled + +OBJGPU *gpuboostmgrGpuItr_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 grpId, NvU32 *pIndex); +#ifdef __nvoc_gpu_boost_mgr_h_disabled +static inline OBJGPU *gpuboostmgrGpuItr(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 grpId, NvU32 *pIndex) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUBOOSTMGR was disabled!"); + return NULL; +} +#else //__nvoc_gpu_boost_mgr_h_disabled +#define gpuboostmgrGpuItr(pBoostMgr, grpId, pIndex) gpuboostmgrGpuItr_IMPL(pBoostMgr, grpId, pIndex) +#endif //__nvoc_gpu_boost_mgr_h_disabled + +NV_STATUS gpuboostmgrGetBoostGrpIdFromGpu_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr, OBJGPU *pGpu, NvU32 *pGrpId); +#ifdef __nvoc_gpu_boost_mgr_h_disabled +static inline NV_STATUS gpuboostmgrGetBoostGrpIdFromGpu(struct OBJGPUBOOSTMGR *pBoostMgr, OBJGPU *pGpu, NvU32 *pGrpId) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUBOOSTMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_boost_mgr_h_disabled +#define gpuboostmgrGetBoostGrpIdFromGpu(pBoostMgr, pGpu, pGrpId) gpuboostmgrGetBoostGrpIdFromGpu_IMPL(pBoostMgr, pGpu, pGrpId) +#endif //__nvoc_gpu_boost_mgr_h_disabled + +NvBool gpuboostmgrIsBoostGrpActive_IMPL(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 grpId); +#ifdef __nvoc_gpu_boost_mgr_h_disabled +static inline NvBool gpuboostmgrIsBoostGrpActive(struct OBJGPUBOOSTMGR *pBoostMgr, NvU32 grpId) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUBOOSTMGR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_boost_mgr_h_disabled +#define gpuboostmgrIsBoostGrpActive(pBoostMgr, grpId) gpuboostmgrIsBoostGrpActive_IMPL(pBoostMgr, grpId) +#endif //__nvoc_gpu_boost_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif // GPU_BOOST_MGR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_BOOST_MGR_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_class_list.c b/src/nvidia/generated/g_gpu_class_list.c new file mode 100644 index 000000000..945a03ff2 --- /dev/null +++ b/src/nvidia/generated/g_gpu_class_list.c @@ -0,0 +1,709 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_TU102(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halTU102ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_DISP_SW, ENG_SW }, + { GF100_HDACODEC, ENG_HDACODEC }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVC371_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC4B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC4B7_VIDEO_ENCODER, ENG_MSENC(0) }, + { NVC570_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC573_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC57A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC57B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC57D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC57E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { TURING_A, ENG_GR(0) }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_COMPUTE_A, ENG_GR(0) }, + { TURING_DMA_COPY_A, ENG_CE(0) }, + { TURING_DMA_COPY_A, ENG_CE(1) }, + { TURING_DMA_COPY_A, ENG_CE(2) }, + { TURING_DMA_COPY_A, ENG_CE(3) }, + { TURING_DMA_COPY_A, ENG_CE(4) }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALTU102_NUM_CLASS_DESCS (sizeof(halTU102ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALTU102_NUM_CLASS_DESCS); + + *pNumClasses = HALTU102_NUM_CLASS_DESCS; + return halTU102ClassDescriptorList; +} + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_TU104(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halTU104ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_DISP_SW, ENG_SW }, + { GF100_HDACODEC, ENG_HDACODEC }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVC371_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC4B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC4B0_VIDEO_DECODER, ENG_NVDEC(1) }, + { NVC4B7_VIDEO_ENCODER, ENG_MSENC(0) }, + { NVC570_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC573_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC57A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC57B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC57D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC57E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { TURING_A, ENG_GR(0) }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_COMPUTE_A, ENG_GR(0) }, + { TURING_DMA_COPY_A, ENG_CE(0) }, + { TURING_DMA_COPY_A, ENG_CE(1) }, + { TURING_DMA_COPY_A, ENG_CE(2) }, + { TURING_DMA_COPY_A, ENG_CE(3) }, + { TURING_DMA_COPY_A, ENG_CE(4) }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALTU104_NUM_CLASS_DESCS (sizeof(halTU104ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALTU104_NUM_CLASS_DESCS); + + *pNumClasses = HALTU104_NUM_CLASS_DESCS; + return halTU104ClassDescriptorList; +} + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_TU106(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halTU106ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_DISP_SW, ENG_SW }, + { GF100_HDACODEC, ENG_HDACODEC }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVC371_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC4B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC4B0_VIDEO_DECODER, ENG_NVDEC(1) }, + { NVC4B0_VIDEO_DECODER, ENG_NVDEC(2) }, + { NVC4B7_VIDEO_ENCODER, ENG_MSENC(0) }, + { NVC570_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC573_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC57A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC57B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC57D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC57E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { TURING_A, ENG_GR(0) }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_COMPUTE_A, ENG_GR(0) }, + { TURING_DMA_COPY_A, ENG_CE(0) }, + { TURING_DMA_COPY_A, ENG_CE(1) }, + { TURING_DMA_COPY_A, ENG_CE(2) }, + { TURING_DMA_COPY_A, ENG_CE(3) }, + { TURING_DMA_COPY_A, ENG_CE(4) }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALTU106_NUM_CLASS_DESCS (sizeof(halTU106ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALTU106_NUM_CLASS_DESCS); + + *pNumClasses = HALTU106_NUM_CLASS_DESCS; + return halTU106ClassDescriptorList; +} + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_TU116(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halTU116ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_DISP_SW, ENG_SW }, + { GF100_HDACODEC, ENG_HDACODEC }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVC371_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC4B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC4B7_VIDEO_ENCODER, ENG_MSENC(0) }, + { NVC570_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC573_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC57A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC57B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC57D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC57E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { TURING_A, ENG_GR(0) }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_COMPUTE_A, ENG_GR(0) }, + { TURING_DMA_COPY_A, ENG_CE(0) }, + { TURING_DMA_COPY_A, ENG_CE(1) }, + { TURING_DMA_COPY_A, ENG_CE(2) }, + { TURING_DMA_COPY_A, ENG_CE(3) }, + { TURING_DMA_COPY_A, ENG_CE(4) }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALTU116_NUM_CLASS_DESCS (sizeof(halTU116ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALTU116_NUM_CLASS_DESCS); + + *pNumClasses = HALTU116_NUM_CLASS_DESCS; + return halTU116ClassDescriptorList; +} + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_TU117(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halTU117ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_DISP_SW, ENG_SW }, + { GF100_HDACODEC, ENG_HDACODEC }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVB4B7_VIDEO_ENCODER, ENG_MSENC(0) }, + { NVC371_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC4B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC570_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC573_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC57A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC57B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC57D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC57E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { TURING_A, ENG_GR(0) }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_COMPUTE_A, ENG_GR(0) }, + { TURING_DMA_COPY_A, ENG_CE(0) }, + { TURING_DMA_COPY_A, ENG_CE(1) }, + { TURING_DMA_COPY_A, ENG_CE(2) }, + { TURING_DMA_COPY_A, ENG_CE(3) }, + { TURING_DMA_COPY_A, ENG_CE(4) }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALTU117_NUM_CLASS_DESCS (sizeof(halTU117ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALTU117_NUM_CLASS_DESCS); + + *pNumClasses = HALTU117_NUM_CLASS_DESCS; + return halTU117ClassDescriptorList; +} + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_GA100(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halGA100ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { AMPERE_A, ENG_GR(0) }, + { AMPERE_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { AMPERE_COMPUTE_A, ENG_GR(0) }, + { AMPERE_COMPUTE_A, ENG_GR(1) }, + { AMPERE_COMPUTE_A, ENG_GR(2) }, + { AMPERE_COMPUTE_A, ENG_GR(3) }, + { AMPERE_COMPUTE_A, ENG_GR(4) }, + { AMPERE_COMPUTE_A, ENG_GR(5) }, + { AMPERE_COMPUTE_A, ENG_GR(6) }, + { AMPERE_COMPUTE_A, ENG_GR(7) }, + { AMPERE_DMA_COPY_A, ENG_CE(0) }, + { AMPERE_DMA_COPY_A, ENG_CE(1) }, + { AMPERE_DMA_COPY_A, ENG_CE(2) }, + { AMPERE_DMA_COPY_A, ENG_CE(3) }, + { AMPERE_DMA_COPY_A, ENG_CE(4) }, + { AMPERE_DMA_COPY_A, ENG_CE(5) }, + { AMPERE_DMA_COPY_A, ENG_CE(6) }, + { AMPERE_DMA_COPY_A, ENG_CE(7) }, + { AMPERE_DMA_COPY_A, ENG_CE(8) }, + { AMPERE_DMA_COPY_A, ENG_CE(9) }, + { AMPERE_USERMODE_A, ENG_GPU }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVC4D1_VIDEO_NVJPG, ENG_NVJPG }, + { NVC6B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC6B0_VIDEO_DECODER, ENG_NVDEC(1) }, + { NVC6B0_VIDEO_DECODER, ENG_NVDEC(2) }, + { NVC6B0_VIDEO_DECODER, ENG_NVDEC(3) }, + { NVC6B0_VIDEO_DECODER, ENG_NVDEC(4) }, + { NVC6FA_VIDEO_OFA, ENG_OFA }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALGA100_NUM_CLASS_DESCS (sizeof(halGA100ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALGA100_NUM_CLASS_DESCS); + + *pNumClasses = HALGA100_NUM_CLASS_DESCS; + return halGA100ClassDescriptorList; +} + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_GA102(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halGA102ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { AMPERE_B, ENG_GR(0) }, + { AMPERE_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { AMPERE_COMPUTE_B, ENG_GR(0) }, + { AMPERE_DMA_COPY_B, ENG_CE(0) }, + { AMPERE_DMA_COPY_B, ENG_CE(1) }, + { AMPERE_DMA_COPY_B, ENG_CE(2) }, + { AMPERE_DMA_COPY_B, ENG_CE(3) }, + { AMPERE_DMA_COPY_B, ENG_CE(4) }, + { AMPERE_USERMODE_A, ENG_GPU }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_DISP_SW, ENG_SW }, + { GF100_HDACODEC, ENG_HDACODEC }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC670_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC671_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC673_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC67A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC67B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC7B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC7B0_VIDEO_DECODER, ENG_NVDEC(1) }, + { NVC7B7_VIDEO_ENCODER, ENG_MSENC(0) }, + { NVC7FA_VIDEO_OFA, ENG_OFA }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALGA102_NUM_CLASS_DESCS (sizeof(halGA102ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALGA102_NUM_CLASS_DESCS); + + *pNumClasses = HALGA102_NUM_CLASS_DESCS; + return halGA102ClassDescriptorList; +} + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_GA103(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halGA103ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { AMPERE_B, ENG_GR(0) }, + { AMPERE_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { AMPERE_COMPUTE_B, ENG_GR(0) }, + { AMPERE_DMA_COPY_B, ENG_CE(0) }, + { AMPERE_DMA_COPY_B, ENG_CE(1) }, + { AMPERE_DMA_COPY_B, ENG_CE(2) }, + { AMPERE_DMA_COPY_B, ENG_CE(3) }, + { AMPERE_DMA_COPY_B, ENG_CE(4) }, + { AMPERE_USERMODE_A, ENG_GPU }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_DISP_SW, ENG_SW }, + { GF100_HDACODEC, ENG_HDACODEC }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC670_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC671_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC673_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC67A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC67B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC7B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC7B0_VIDEO_DECODER, ENG_NVDEC(1) }, + { NVC7B7_VIDEO_ENCODER, ENG_MSENC(0) }, + { NVC7FA_VIDEO_OFA, ENG_OFA }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALGA103_NUM_CLASS_DESCS (sizeof(halGA103ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALGA103_NUM_CLASS_DESCS); + + *pNumClasses = HALGA103_NUM_CLASS_DESCS; + return halGA103ClassDescriptorList; +} + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_GA104(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halGA104ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { AMPERE_B, ENG_GR(0) }, + { AMPERE_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { AMPERE_COMPUTE_B, ENG_GR(0) }, + { AMPERE_DMA_COPY_B, ENG_CE(0) }, + { AMPERE_DMA_COPY_B, ENG_CE(1) }, + { AMPERE_DMA_COPY_B, ENG_CE(2) }, + { AMPERE_DMA_COPY_B, ENG_CE(3) }, + { AMPERE_DMA_COPY_B, ENG_CE(4) }, + { AMPERE_USERMODE_A, ENG_GPU }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_DISP_SW, ENG_SW }, + { GF100_HDACODEC, ENG_HDACODEC }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC670_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC671_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC673_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC67A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC67B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC7B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC7B0_VIDEO_DECODER, ENG_NVDEC(1) }, + { NVC7B7_VIDEO_ENCODER, ENG_MSENC(0) }, + { NVC7FA_VIDEO_OFA, ENG_OFA }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALGA104_NUM_CLASS_DESCS (sizeof(halGA104ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALGA104_NUM_CLASS_DESCS); + + *pNumClasses = HALGA104_NUM_CLASS_DESCS; + return halGA104ClassDescriptorList; +} + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_GA106(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halGA106ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { AMPERE_B, ENG_GR(0) }, + { AMPERE_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { AMPERE_COMPUTE_B, ENG_GR(0) }, + { AMPERE_DMA_COPY_B, ENG_CE(0) }, + { AMPERE_DMA_COPY_B, ENG_CE(1) }, + { AMPERE_DMA_COPY_B, ENG_CE(2) }, + { AMPERE_DMA_COPY_B, ENG_CE(3) }, + { AMPERE_DMA_COPY_B, ENG_CE(4) }, + { AMPERE_USERMODE_A, ENG_GPU }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_DISP_SW, ENG_SW }, + { GF100_HDACODEC, ENG_HDACODEC }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC670_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC671_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC673_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC67A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC67B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC7B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC7B0_VIDEO_DECODER, ENG_NVDEC(1) }, + { NVC7B7_VIDEO_ENCODER, ENG_MSENC(0) }, + { NVC7FA_VIDEO_OFA, ENG_OFA }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALGA106_NUM_CLASS_DESCS (sizeof(halGA106ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALGA106_NUM_CLASS_DESCS); + + *pNumClasses = HALGA106_NUM_CLASS_DESCS; + return halGA106ClassDescriptorList; +} + + + +const CLASSDESCRIPTOR * +gpuGetClassDescriptorList_GA107(POBJGPU pGpu, NvU32 *pNumClasses) +{ + static const CLASSDESCRIPTOR halGA107ClassDescriptorList[] = { + { ACCESS_COUNTER_NOTIFY_BUFFER, ENG_GR(0) }, + { AMPERE_B, ENG_GR(0) }, + { AMPERE_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { AMPERE_COMPUTE_B, ENG_GR(0) }, + { AMPERE_DMA_COPY_B, ENG_CE(0) }, + { AMPERE_DMA_COPY_B, ENG_CE(1) }, + { AMPERE_DMA_COPY_B, ENG_CE(2) }, + { AMPERE_DMA_COPY_B, ENG_CE(3) }, + { AMPERE_DMA_COPY_B, ENG_CE(4) }, + { AMPERE_USERMODE_A, ENG_GPU }, + { FERMI_CONTEXT_SHARE_A, ENG_KERNEL_FIFO }, + { FERMI_TWOD_A, ENG_GR(0) }, + { FERMI_VASPACE_A, ENG_DMA }, + { G84_PERFBUFFER, ENG_BUS }, + { GF100_DISP_SW, ENG_SW }, + { GF100_HDACODEC, ENG_HDACODEC }, + { GF100_SUBDEVICE_MASTER, ENG_GPU }, + { GF100_TIMED_SEMAPHORE_SW, ENG_SW }, + { GF100_ZBC_CLEAR, ENG_KERNEL_MEMORY_SYSTEM }, + { GP100_UVM_SW, ENG_SW }, + { KEPLER_CHANNEL_GROUP_A, ENG_KERNEL_FIFO }, + { KEPLER_INLINE_TO_MEMORY_B, ENG_GR(0) }, + { MMU_FAULT_BUFFER, ENG_GR(0) }, + { NV0060_SYNC_GPU_BOOST, ENG_GPU }, + { NV01_MEMORY_VIRTUAL, ENG_DMA }, + { NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY }, + { NV04_SOFTWARE_TEST, ENG_SW }, + { NV50_DEFERRED_API_CLASS, ENG_SW }, + { NV50_MEMORY_VIRTUAL, ENG_DMA }, + { NV50_P2P, ENG_BUS }, + { NV50_THIRD_PARTY_P2P, ENG_BUS }, + { NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY }, + { NVC670_DISPLAY, ENG_KERNEL_DISPLAY }, + { NVC671_DISP_SF_USER, ENG_KERNEL_DISPLAY }, + { NVC673_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY }, + { NVC67A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY }, + { NVC67B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC67E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY }, + { NVC7B0_VIDEO_DECODER, ENG_NVDEC(0) }, + { NVC7B0_VIDEO_DECODER, ENG_NVDEC(1) }, + { NVC7B7_VIDEO_ENCODER, ENG_MSENC(0) }, + { NVC7FA_VIDEO_OFA, ENG_OFA }, + { TURING_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { TURING_USERMODE_A, ENG_GPU }, + { VOLTA_CHANNEL_GPFIFO_A, ENG_KERNEL_FIFO }, + { VOLTA_USERMODE_A, ENG_GPU }, + }; + + #define HALGA107_NUM_CLASS_DESCS (sizeof(halGA107ClassDescriptorList) / sizeof(CLASSDESCRIPTOR)) + + ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALGA107_NUM_CLASS_DESCS); + + *pNumClasses = HALGA107_NUM_CLASS_DESCS; + return halGA107ClassDescriptorList; +} + + diff --git a/src/nvidia/generated/g_gpu_db_nvoc.c b/src/nvidia/generated/g_gpu_db_nvoc.c new file mode 100644 index 000000000..b1d3150e3 --- /dev/null +++ b/src/nvidia/generated/g_gpu_db_nvoc.c @@ -0,0 +1,154 @@ +#define NVOC_GPU_DB_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_db_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xcdd250 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_GpuDb(GpuDb*); +void __nvoc_init_funcTable_GpuDb(GpuDb*); +NV_STATUS __nvoc_ctor_GpuDb(GpuDb*); +void __nvoc_init_dataField_GpuDb(GpuDb*); +void __nvoc_dtor_GpuDb(GpuDb*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuDb; + +static const struct NVOC_RTTI __nvoc_rtti_GpuDb_GpuDb = { + /*pClassDef=*/ &__nvoc_class_def_GpuDb, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuDb, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuDb_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuDb, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GpuDb = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_GpuDb_GpuDb, + &__nvoc_rtti_GpuDb_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuDb), + /*classId=*/ classId(GpuDb), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuDb", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuDb, + /*pCastInfo=*/ &__nvoc_castinfo_GpuDb, + /*pExportInfo=*/ &__nvoc_export_info_GpuDb +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuDb = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_GpuDb(GpuDb *pThis) { + __nvoc_gpudbDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuDb(GpuDb *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_GpuDb(GpuDb *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_GpuDb_fail_Object; + __nvoc_init_dataField_GpuDb(pThis); + + status = __nvoc_gpudbConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_GpuDb_fail__init; + goto __nvoc_ctor_GpuDb_exit; // Success + +__nvoc_ctor_GpuDb_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_GpuDb_fail_Object: +__nvoc_ctor_GpuDb_exit: + + return status; +} + +static void __nvoc_init_funcTable_GpuDb_1(GpuDb *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_GpuDb(GpuDb *pThis) { + __nvoc_init_funcTable_GpuDb_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_GpuDb(GpuDb *pThis) { + pThis->__nvoc_pbase_GpuDb = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_GpuDb(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuDb(GpuDb **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + GpuDb *pThis; + + pThis = portMemAllocNonPaged(sizeof(GpuDb)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GpuDb)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuDb); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_GpuDb(pThis); + status = __nvoc_ctor_GpuDb(pThis); + if (status != NV_OK) goto __nvoc_objCreate_GpuDb_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GpuDb_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuDb(GpuDb **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_GpuDb(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_db_nvoc.h b/src/nvidia/generated/g_gpu_db_nvoc.h new file mode 100644 index 000000000..3013f662e --- /dev/null +++ b/src/nvidia/generated/g_gpu_db_nvoc.h @@ -0,0 +1,154 @@ +#ifndef _G_GPU_DB_NVOC_H_ +#define _G_GPU_DB_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_db_nvoc.h" + +#ifndef GPU_DB_H +#define GPU_DB_H + +#include "core/core.h" +#include "containers/list.h" +#include "gpu/gpu_uuid.h" + +typedef struct NBADDR NBADDR; + +// **************************************************************************** +// Type Definitions +// **************************************************************************** +// +// The GPU database object is used to encapsulate the GPUINFO +// + +/*! + * @brief Compute policy data for a GPU + * Saved policy information for a GPU that can be retrieved later + */ +typedef struct GPU_COMPUTE_POLICY_INFO +{ + // + // Timeslice config for channels/TSG's on a runlist. The timeslice configs + // are restricted to four levels : default, short, medium and long. + // + NvU32 timeslice; + // Future policies to be added here +} GPU_COMPUTE_POLICY_INFO; + +typedef struct +{ + NvU32 domain; + NvU8 bus; + NvU8 device; + NvU8 function; + NvBool bValid; +} PCI_PORT_INFO; + +#define GPUDB_CLK_PROP_TOP_POLS_COUNT 1 + +/*! + * @brief Clock Propagation Topology Policies control data + */ +typedef struct +{ + NvU8 chosenIdx[GPUDB_CLK_PROP_TOP_POLS_COUNT]; +} GPU_CLK_PROP_TOP_POLS_CONTROL; + +typedef struct +{ + NvU8 uuid[RM_SHA1_GID_SIZE]; + PCI_PORT_INFO pciPortInfo; + PCI_PORT_INFO upstreamPciPortInfo; + GPU_COMPUTE_POLICY_INFO policyInfo; + NvBool bShutdownState; + GPU_CLK_PROP_TOP_POLS_CONTROL clkPropTopPolsControl; +} GPU_INFO_LIST_NODE, *PGPU_INFO_LIST_NODE; + +MAKE_LIST(GpuInfoList, GPU_INFO_LIST_NODE); + +#ifdef NVOC_GPU_DB_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GpuDb { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct GpuDb *__nvoc_pbase_GpuDb; + GpuInfoList gpuList; + PORT_MUTEX *pLock; +}; + +#ifndef __NVOC_CLASS_GpuDb_TYPEDEF__ +#define __NVOC_CLASS_GpuDb_TYPEDEF__ +typedef struct GpuDb GpuDb; +#endif /* __NVOC_CLASS_GpuDb_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuDb +#define __nvoc_class_id_GpuDb 0xcdd250 +#endif /* __nvoc_class_id_GpuDb */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb; + +#define __staticCast_GpuDb(pThis) \ + ((pThis)->__nvoc_pbase_GpuDb) + +#ifdef __nvoc_gpu_db_h_disabled +#define __dynamicCast_GpuDb(pThis) ((GpuDb*)NULL) +#else //__nvoc_gpu_db_h_disabled +#define __dynamicCast_GpuDb(pThis) \ + ((GpuDb*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuDb))) +#endif //__nvoc_gpu_db_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GpuDb(GpuDb**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuDb(GpuDb**, Dynamic*, NvU32); +#define __objCreate_GpuDb(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_GpuDb((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS gpudbConstruct_IMPL(struct GpuDb *arg_pGpuDb); +#define __nvoc_gpudbConstruct(arg_pGpuDb) gpudbConstruct_IMPL(arg_pGpuDb) +void gpudbDestruct_IMPL(struct GpuDb *pGpuDb); +#define __nvoc_gpudbDestruct(pGpuDb) gpudbDestruct_IMPL(pGpuDb) +#undef PRIVATE_FIELD + + +NV_STATUS gpudbRegisterGpu(const NvU8 *pUuid, const NBADDR *pUpstreamPortPciInfo, NvU64 pciInfo); +NV_STATUS gpudbSetGpuComputePolicyConfig(const NvU8 *uuid, NvU32 policyType, GPU_COMPUTE_POLICY_INFO *policyInfo); +NV_STATUS gpudbGetGpuComputePolicyConfigs(const NvU8 *uuid, GPU_COMPUTE_POLICY_INFO *policyInfo); +NV_STATUS gpudbSetClockPoliciesControl(const NvU8 *uuid, GPU_CLK_PROP_TOP_POLS_CONTROL *pControl); +NV_STATUS gpudbGetClockPoliciesControl(const NvU8 *uuid, GPU_CLK_PROP_TOP_POLS_CONTROL *pControl); +NV_STATUS gpudbSetShutdownState(const NvU8 *pUuid); +#endif // GPU_DB_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_DB_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_group_nvoc.c b/src/nvidia/generated/g_gpu_group_nvoc.c new file mode 100644 index 000000000..3e792c4f8 --- /dev/null +++ b/src/nvidia/generated/g_gpu_group_nvoc.c @@ -0,0 +1,148 @@ +#define NVOC_GPU_GROUP_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_group_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xe40531 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJGPUGRP(OBJGPUGRP*); +void __nvoc_init_funcTable_OBJGPUGRP(OBJGPUGRP*); +NV_STATUS __nvoc_ctor_OBJGPUGRP(OBJGPUGRP*); +void __nvoc_init_dataField_OBJGPUGRP(OBJGPUGRP*); +void __nvoc_dtor_OBJGPUGRP(OBJGPUGRP*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUGRP; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUGRP_OBJGPUGRP = { + /*pClassDef=*/ &__nvoc_class_def_OBJGPUGRP, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUGRP, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUGRP_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPUGRP, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPUGRP = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJGPUGRP_OBJGPUGRP, + &__nvoc_rtti_OBJGPUGRP_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPUGRP), + /*classId=*/ classId(OBJGPUGRP), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPUGRP", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUGRP, + /*pCastInfo=*/ &__nvoc_castinfo_OBJGPUGRP, + /*pExportInfo=*/ &__nvoc_export_info_OBJGPUGRP +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUGRP = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJGPUGRP(OBJGPUGRP *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPUGRP(OBJGPUGRP *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJGPUGRP(OBJGPUGRP *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUGRP_fail_Object; + __nvoc_init_dataField_OBJGPUGRP(pThis); + goto __nvoc_ctor_OBJGPUGRP_exit; // Success + +__nvoc_ctor_OBJGPUGRP_fail_Object: +__nvoc_ctor_OBJGPUGRP_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJGPUGRP_1(OBJGPUGRP *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJGPUGRP(OBJGPUGRP *pThis) { + __nvoc_init_funcTable_OBJGPUGRP_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJGPUGRP(OBJGPUGRP *pThis) { + pThis->__nvoc_pbase_OBJGPUGRP = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJGPUGRP(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGPUGRP(OBJGPUGRP **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJGPUGRP *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJGPUGRP)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJGPUGRP)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPUGRP); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJGPUGRP(pThis); + status = __nvoc_ctor_OBJGPUGRP(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPUGRP_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJGPUGRP_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUGRP(OBJGPUGRP **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJGPUGRP(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_group_nvoc.h b/src/nvidia/generated/g_gpu_group_nvoc.h new file mode 100644 index 000000000..dca3585c8 --- /dev/null +++ b/src/nvidia/generated/g_gpu_group_nvoc.h @@ -0,0 +1,308 @@ +#ifndef _G_GPU_GROUP_NVOC_H_ +#define _G_GPU_GROUP_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_group_nvoc.h" + +#ifndef GPU_GROUP_H +#define GPU_GROUP_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for GPUGRP Object. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "nvoc/object.h" +#include "nvlimits.h" + +struct OBJVASPACE; +struct OBJGPU; + +/*! + * @brief Specialization of @ref FOR_EACH_INDEX_IN_MASK for looping + * over each GPU in an instance bitmask and processing the GPU in + * unicast mode. + * + * @note This macro is constructed to handle 'continue' and 'break' + * statements but not 'return.' Do NOT return directly from the loop - + * use status variable and 'break' to safely abort. + * + * @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64) + * @param[in,out] pGpu Local GPU variable to use. + * @param[in] mask GPU instance bitmask. + */ +#define FOR_EACH_GPU_IN_MASK_UC(maskWidth, pSys, pGpu, mask) \ +{ \ + NvU32 gpuInstance; \ + NvBool bOrigBcState = NV_FALSE; \ + NvBool bEntryBcState = NV_FALSE; \ + OBJGPU *pEntryGpu = pGpu; \ + pGpu = NULL; \ + if (pEntryGpu != NULL) \ + { \ + bEntryBcState = gpumgrGetBcEnabledStatus(pEntryGpu); \ + } \ + FOR_EACH_INDEX_IN_MASK(maskWidth, gpuInstance, mask) \ + { \ + if (NULL != pGpu) /* continue */ \ + { \ + gpumgrSetBcEnabledStatus(pGpu, bOrigBcState); \ + } \ + pGpu = gpumgrGetGpu(gpuInstance); \ + if (pGpu == NULL) \ + { /* We should never hit this assert */ \ + NV_ASSERT(0); /* But it occurs very rarely */ \ + continue; /* It needs to be debugged */ \ + } \ + bOrigBcState = gpumgrGetBcEnabledStatus(pGpu); \ + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); \ + +#define FOR_EACH_GPU_IN_MASK_UC_END \ + } \ + FOR_EACH_INDEX_IN_MASK_END \ + if (NULL != pGpu) /* break */ \ + { \ + gpumgrSetBcEnabledStatus(pGpu, bOrigBcState); \ + pGpu = NULL; \ + } \ + if (pEntryGpu != NULL) \ + { \ + NV_ASSERT(bEntryBcState == gpumgrGetBcEnabledStatus(pEntryGpu));\ + pGpu = pEntryGpu; \ + } \ +} + +typedef struct _def_vid_link_node +{ + /*! + * GPU instance for this node + */ + NvU32 gpuInstance; + /*! + * DrPort that receives data from Child GPU + */ + NvU32 ParentDrPort; + /*! + * DrPort that sources data to a Parent GPU + */ + NvU32 ChildDrPort; +} SLILINKNODE; + +typedef struct OBJGPUGRP *POBJGPUGRP; + +#ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +typedef struct OBJGPUGRP OBJGPUGRP; +#endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUGRP +#define __nvoc_class_id_OBJGPUGRP 0xe40531 +#endif /* __nvoc_class_id_OBJGPUGRP */ + + + +#ifdef NVOC_GPU_GROUP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJGPUGRP { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJGPUGRP *__nvoc_pbase_OBJGPUGRP; + NvU32 gpuMask; + NvU32 gpuSliLinkMask; + NvU32 linkingGpuMask; + NvU32 attachedGpuMaskAtLinking; + SLILINKNODE SliLinkOrder[8]; + NvU32 ConnectionCount; + NvU32 flags; + NvU32 displayFlags; + NvBool bcEnabled; + struct OBJGPU *parentGpu; + struct OBJVASPACE *pGlobalVASpace; +}; + +#ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +typedef struct OBJGPUGRP OBJGPUGRP; +#endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUGRP +#define __nvoc_class_id_OBJGPUGRP 0xe40531 +#endif /* __nvoc_class_id_OBJGPUGRP */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP; + +#define __staticCast_OBJGPUGRP(pThis) \ + ((pThis)->__nvoc_pbase_OBJGPUGRP) + +#ifdef __nvoc_gpu_group_h_disabled +#define __dynamicCast_OBJGPUGRP(pThis) ((OBJGPUGRP*)NULL) +#else //__nvoc_gpu_group_h_disabled +#define __dynamicCast_OBJGPUGRP(pThis) \ + ((OBJGPUGRP*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUGRP))) +#endif //__nvoc_gpu_group_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUGRP(OBJGPUGRP**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPUGRP(OBJGPUGRP**, Dynamic*, NvU32); +#define __objCreate_OBJGPUGRP(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJGPUGRP((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS gpugrpCreate_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpCreate(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpCreate(pGpuGrp, gpuMask) gpugrpCreate_IMPL(pGpuGrp, gpuMask) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpDestroy_IMPL(struct OBJGPUGRP *pGpuGrp); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpDestroy(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpDestroy(pGpuGrp) gpugrpDestroy_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +NvU32 gpugrpGetGpuMask_IMPL(struct OBJGPUGRP *pGpuGrp); +#ifdef __nvoc_gpu_group_h_disabled +static inline NvU32 gpugrpGetGpuMask(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return 0; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetGpuMask(pGpuGrp) gpugrpGetGpuMask_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +void gpugrpSetGpuMask_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask); +#ifdef __nvoc_gpu_group_h_disabled +static inline void gpugrpSetGpuMask(struct OBJGPUGRP *pGpuGrp, NvU32 gpuMask) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpSetGpuMask(pGpuGrp, gpuMask) gpugrpSetGpuMask_IMPL(pGpuGrp, gpuMask) +#endif //__nvoc_gpu_group_h_disabled + +NvBool gpugrpGetBcEnabledState_IMPL(struct OBJGPUGRP *pGpuGrp); +#ifdef __nvoc_gpu_group_h_disabled +static inline NvBool gpugrpGetBcEnabledState(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetBcEnabledState(pGpuGrp) gpugrpGetBcEnabledState_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +void gpugrpSetBcEnabledState_IMPL(struct OBJGPUGRP *pGpuGrp, NvBool bcState); +#ifdef __nvoc_gpu_group_h_disabled +static inline void gpugrpSetBcEnabledState(struct OBJGPUGRP *pGpuGrp, NvBool bcState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpSetBcEnabledState(pGpuGrp, bcState) gpugrpSetBcEnabledState_IMPL(pGpuGrp, bcState) +#endif //__nvoc_gpu_group_h_disabled + +void gpugrpSetParentGpu_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pParentGpu); +#ifdef __nvoc_gpu_group_h_disabled +static inline void gpugrpSetParentGpu(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pParentGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpSetParentGpu(pGpuGrp, pParentGpu) gpugrpSetParentGpu_IMPL(pGpuGrp, pParentGpu) +#endif //__nvoc_gpu_group_h_disabled + +struct OBJGPU *gpugrpGetParentGpu_IMPL(struct OBJGPUGRP *pGpuGrp); +#ifdef __nvoc_gpu_group_h_disabled +static inline struct OBJGPU *gpugrpGetParentGpu(struct OBJGPUGRP *pGpuGrp) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NULL; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetParentGpu(pGpuGrp) gpugrpGetParentGpu_IMPL(pGpuGrp) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpCreateGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu, NvU32 vaspaceClass, NvU64 vaStart, NvU64 vaEnd, NvU32 vaspaceFlags, struct OBJVASPACE **ppGlobalVAS); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpCreateGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu, NvU32 vaspaceClass, NvU64 vaStart, NvU64 vaEnd, NvU32 vaspaceFlags, struct OBJVASPACE **ppGlobalVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpCreateGlobalVASpace(pGpuGrp, pGpu, vaspaceClass, vaStart, vaEnd, vaspaceFlags, ppGlobalVAS) gpugrpCreateGlobalVASpace_IMPL(pGpuGrp, pGpu, vaspaceClass, vaStart, vaEnd, vaspaceFlags, ppGlobalVAS) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpDestroyGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpDestroyGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpDestroyGlobalVASpace(pGpuGrp, pGpu) gpugrpDestroyGlobalVASpace_IMPL(pGpuGrp, pGpu) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpGetGlobalVASpace_IMPL(struct OBJGPUGRP *pGpuGrp, struct OBJVASPACE **ppGlobalVAS); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpGetGlobalVASpace(struct OBJGPUGRP *pGpuGrp, struct OBJVASPACE **ppGlobalVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetGlobalVASpace(pGpuGrp, ppGlobalVAS) gpugrpGetGlobalVASpace_IMPL(pGpuGrp, ppGlobalVAS) +#endif //__nvoc_gpu_group_h_disabled + +NV_STATUS gpugrpGetGpuFromSubDeviceInstance_IMPL(struct OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, struct OBJGPU **ppGpu); +#ifdef __nvoc_gpu_group_h_disabled +static inline NV_STATUS gpugrpGetGpuFromSubDeviceInstance(struct OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, struct OBJGPU **ppGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUGRP was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_group_h_disabled +#define gpugrpGetGpuFromSubDeviceInstance(pGpuGrp, subDeviceInst, ppGpu) gpugrpGetGpuFromSubDeviceInstance_IMPL(pGpuGrp, subDeviceInst, ppGpu) +#endif //__nvoc_gpu_group_h_disabled + +#undef PRIVATE_FIELD + + +#endif // GPU_GROUP_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_GROUP_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_halspec_nvoc.c b/src/nvidia/generated/g_gpu_halspec_nvoc.c new file mode 100644 index 000000000..fd4826455 --- /dev/null +++ b/src/nvidia/generated/g_gpu_halspec_nvoc.c @@ -0,0 +1,96 @@ +#define NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_halspec_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x34a6d6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner; + +void __nvoc_init_RmHalspecOwner(RmHalspecOwner*, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver); +void __nvoc_init_funcTable_RmHalspecOwner(RmHalspecOwner*); +NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner*); +void __nvoc_init_dataField_RmHalspecOwner(RmHalspecOwner*); +void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmHalspecOwner; + +static const struct NVOC_RTTI __nvoc_rtti_RmHalspecOwner_RmHalspecOwner = { + /*pClassDef=*/ &__nvoc_class_def_RmHalspecOwner, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmHalspecOwner, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmHalspecOwner = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_RmHalspecOwner_RmHalspecOwner, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmHalspecOwner), + /*classId=*/ classId(RmHalspecOwner), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmHalspecOwner", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_RmHalspecOwner, + /*pExportInfo=*/ &__nvoc_export_info_RmHalspecOwner +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmHalspecOwner = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmHalspecOwner(RmHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_RmHalspecOwner(pThis); + goto __nvoc_ctor_RmHalspecOwner_exit; // Success + +__nvoc_ctor_RmHalspecOwner_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmHalspecOwner_1(RmHalspecOwner *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_RmHalspecOwner(RmHalspecOwner *pThis) { + __nvoc_init_funcTable_RmHalspecOwner_1(pThis); +} + +void __nvoc_init_RmHalspecOwner(RmHalspecOwner *pThis, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver) { + pThis->__nvoc_pbase_RmHalspecOwner = pThis; + __nvoc_init_halspec_ChipHal(&pThis->chipHal, ChipHal_arch, ChipHal_impl, ChipHal_hidrev); + __nvoc_init_halspec_RmVariantHal(&pThis->rmVariantHal, RmVariantHal_rmVariant); + __nvoc_init_halspec_DispIpHal(&pThis->dispIpHal, DispIpHal_ipver); + __nvoc_init_funcTable_RmHalspecOwner(pThis); +} + diff --git a/src/nvidia/generated/g_gpu_halspec_nvoc.h b/src/nvidia/generated/g_gpu_halspec_nvoc.h new file mode 100644 index 000000000..8b7d5d35e --- /dev/null +++ b/src/nvidia/generated/g_gpu_halspec_nvoc.h @@ -0,0 +1,91 @@ +#ifndef _G_GPU_HALSPEC_NVOC_H_ +#define _G_GPU_HALSPEC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_gpu_halspec_nvoc.h" + +#ifndef GPU_HALSPEC_H +#define GPU_HALSPEC_H + +#include "g_chips2halspec.h" // NVOC halspec, generated by rmconfig.pl + +#ifdef NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmHalspecOwner { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner; + struct ChipHal chipHal; + struct RmVariantHal rmVariantHal; + struct DispIpHal dispIpHal; +}; + +#ifndef __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ +#define __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ +typedef struct RmHalspecOwner RmHalspecOwner; +#endif /* __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmHalspecOwner +#define __nvoc_class_id_RmHalspecOwner 0x34a6d6 +#endif /* __nvoc_class_id_RmHalspecOwner */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner; + +#define __staticCast_RmHalspecOwner(pThis) \ + ((pThis)->__nvoc_pbase_RmHalspecOwner) + +#ifdef __nvoc_gpu_halspec_h_disabled +#define __dynamicCast_RmHalspecOwner(pThis) ((RmHalspecOwner*)NULL) +#else //__nvoc_gpu_halspec_h_disabled +#define __dynamicCast_RmHalspecOwner(pThis) \ + ((RmHalspecOwner*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmHalspecOwner))) +#endif //__nvoc_gpu_halspec_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmHalspecOwner(RmHalspecOwner**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmHalspecOwner(RmHalspecOwner**, Dynamic*, NvU32, + NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, + RM_RUNTIME_VARIANT RmVariantHal_rmVariant, + NvU32 DispIpHal_ipver); +#define __objCreate_RmHalspecOwner(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver) \ + __nvoc_objCreate_RmHalspecOwner((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver) + +#undef PRIVATE_FIELD + + +#endif // GPU_HALSPEC_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_HALSPEC_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_instance_subscription_nvoc.c b/src/nvidia/generated/g_gpu_instance_subscription_nvoc.c new file mode 100644 index 000000000..1c8358177 --- /dev/null +++ b/src/nvidia/generated/g_gpu_instance_subscription_nvoc.c @@ -0,0 +1,453 @@ +#define NVOC_GPU_INSTANCE_SUBSCRIPTION_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_instance_subscription_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x91fde7 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GPUInstanceSubscription; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_GPUInstanceSubscription(GPUInstanceSubscription*); +void __nvoc_init_funcTable_GPUInstanceSubscription(GPUInstanceSubscription*); +NV_STATUS __nvoc_ctor_GPUInstanceSubscription(GPUInstanceSubscription*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_GPUInstanceSubscription(GPUInstanceSubscription*); +void __nvoc_dtor_GPUInstanceSubscription(GPUInstanceSubscription*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GPUInstanceSubscription; + +static const struct NVOC_RTTI __nvoc_rtti_GPUInstanceSubscription_GPUInstanceSubscription = { + /*pClassDef=*/ &__nvoc_class_def_GPUInstanceSubscription, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GPUInstanceSubscription, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GPUInstanceSubscription_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GPUInstanceSubscription, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GPUInstanceSubscription_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GPUInstanceSubscription, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GPUInstanceSubscription_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GPUInstanceSubscription, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GPUInstanceSubscription_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GPUInstanceSubscription, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GPUInstanceSubscription_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GPUInstanceSubscription, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GPUInstanceSubscription = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_GPUInstanceSubscription_GPUInstanceSubscription, + &__nvoc_rtti_GPUInstanceSubscription_GpuResource, + &__nvoc_rtti_GPUInstanceSubscription_RmResource, + &__nvoc_rtti_GPUInstanceSubscription_RmResourceCommon, + &__nvoc_rtti_GPUInstanceSubscription_RsResource, + &__nvoc_rtti_GPUInstanceSubscription_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GPUInstanceSubscription = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GPUInstanceSubscription), + /*classId=*/ classId(GPUInstanceSubscription), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GPUInstanceSubscription", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GPUInstanceSubscription, + /*pCastInfo=*/ &__nvoc_castinfo_GPUInstanceSubscription, + /*pExportInfo=*/ &__nvoc_export_info_GPUInstanceSubscription +}; + +static NvBool __nvoc_thunk_GPUInstanceSubscription_resCanCopy(struct RsResource *arg0) { + return gisubscriptionCanCopy((struct GPUInstanceSubscription *)(((unsigned char *)arg0) - __nvoc_rtti_GPUInstanceSubscription_RsResource.offset)); +} + +static NvBool __nvoc_thunk_GpuResource_gisubscriptionShareCallback(struct GPUInstanceSubscription *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GPUInstanceSubscription_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_gisubscriptionControl(struct GPUInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GPUInstanceSubscription_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_gisubscriptionUnmap(struct GPUInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GPUInstanceSubscription_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_gisubscriptionGetMemInterMapParams(struct GPUInstanceSubscription *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GPUInstanceSubscription_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_gisubscriptionGetMemoryMappingDescriptor(struct GPUInstanceSubscription *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GPUInstanceSubscription_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_gisubscriptionGetMapAddrSpace(struct GPUInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GPUInstanceSubscription_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_gisubscriptionGetInternalObjectHandle(struct GPUInstanceSubscription *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GPUInstanceSubscription_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gisubscriptionControlFilter(struct GPUInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GPUInstanceSubscription_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_gisubscriptionAddAdditionalDependants(struct RsClient *pClient, struct GPUInstanceSubscription *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GPUInstanceSubscription_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_gisubscriptionGetRefCount(struct GPUInstanceSubscription *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GPUInstanceSubscription_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_gisubscriptionCheckMemInterUnmap(struct GPUInstanceSubscription *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GPUInstanceSubscription_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_gisubscriptionMapTo(struct GPUInstanceSubscription *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GPUInstanceSubscription_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_gisubscriptionControl_Prologue(struct GPUInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GPUInstanceSubscription_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_gisubscriptionGetRegBaseOffsetAndSize(struct GPUInstanceSubscription *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GPUInstanceSubscription_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_gisubscriptionInternalControlForward(struct GPUInstanceSubscription *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GPUInstanceSubscription_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_gisubscriptionPreDestruct(struct GPUInstanceSubscription *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GPUInstanceSubscription_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gisubscriptionUnmapFrom(struct GPUInstanceSubscription *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GPUInstanceSubscription_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_gisubscriptionControl_Epilogue(struct GPUInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GPUInstanceSubscription_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_gisubscriptionControlLookup(struct GPUInstanceSubscription *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GPUInstanceSubscription_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_gisubscriptionMap(struct GPUInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GPUInstanceSubscription_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_gisubscriptionAccessCallback(struct GPUInstanceSubscription *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GPUInstanceSubscription_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_GPUInstanceSubscription[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) gisubscriptionCtrlCmdExecPartitionsCreate_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc6370101u, + /*paramSize=*/ sizeof(NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GPUInstanceSubscription.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "gisubscriptionCtrlCmdExecPartitionsCreate" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) gisubscriptionCtrlCmdExecPartitionsDelete_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc6370102u, + /*paramSize=*/ sizeof(NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GPUInstanceSubscription.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "gisubscriptionCtrlCmdExecPartitionsDelete" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) gisubscriptionCtrlCmdExecPartitionsGet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc6370103u, + /*paramSize=*/ sizeof(NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GPUInstanceSubscription.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "gisubscriptionCtrlCmdExecPartitionsGet" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) gisubscriptionCtrlCmdExecPartitionsGetActiveIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc6370104u, + /*paramSize=*/ sizeof(NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GPUInstanceSubscription.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "gisubscriptionCtrlCmdExecPartitionsGetActiveIds" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) gisubscriptionCtrlCmdExecPartitionsExport_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + /*flags=*/ 0x400u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc6370105u, + /*paramSize=*/ sizeof(NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GPUInstanceSubscription.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "gisubscriptionCtrlCmdExecPartitionsExport" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) gisubscriptionCtrlCmdExecPartitionsImport_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + /*flags=*/ 0x400u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc6370106u, + /*paramSize=*/ sizeof(NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GPUInstanceSubscription.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "gisubscriptionCtrlCmdExecPartitionsImport" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GPUInstanceSubscription = +{ + /*numEntries=*/ 6, + /*pExportEntries=*/ __nvoc_exported_method_def_GPUInstanceSubscription +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_GPUInstanceSubscription(GPUInstanceSubscription *pThis) { + __nvoc_gisubscriptionDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GPUInstanceSubscription(GPUInstanceSubscription *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GPUInstanceSubscription(GPUInstanceSubscription *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GPUInstanceSubscription_fail_GpuResource; + __nvoc_init_dataField_GPUInstanceSubscription(pThis); + + status = __nvoc_gisubscriptionConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GPUInstanceSubscription_fail__init; + goto __nvoc_ctor_GPUInstanceSubscription_exit; // Success + +__nvoc_ctor_GPUInstanceSubscription_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_GPUInstanceSubscription_fail_GpuResource: +__nvoc_ctor_GPUInstanceSubscription_exit: + + return status; +} + +static void __nvoc_init_funcTable_GPUInstanceSubscription_1(GPUInstanceSubscription *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__gisubscriptionCanCopy__ = &gisubscriptionCanCopy_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__gisubscriptionCtrlCmdExecPartitionsCreate__ = &gisubscriptionCtrlCmdExecPartitionsCreate_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__gisubscriptionCtrlCmdExecPartitionsDelete__ = &gisubscriptionCtrlCmdExecPartitionsDelete_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__gisubscriptionCtrlCmdExecPartitionsGet__ = &gisubscriptionCtrlCmdExecPartitionsGet_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__gisubscriptionCtrlCmdExecPartitionsGetActiveIds__ = &gisubscriptionCtrlCmdExecPartitionsGetActiveIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + pThis->__gisubscriptionCtrlCmdExecPartitionsExport__ = &gisubscriptionCtrlCmdExecPartitionsExport_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + pThis->__gisubscriptionCtrlCmdExecPartitionsImport__ = &gisubscriptionCtrlCmdExecPartitionsImport_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_GPUInstanceSubscription_resCanCopy; + + pThis->__gisubscriptionShareCallback__ = &__nvoc_thunk_GpuResource_gisubscriptionShareCallback; + + pThis->__gisubscriptionControl__ = &__nvoc_thunk_GpuResource_gisubscriptionControl; + + pThis->__gisubscriptionUnmap__ = &__nvoc_thunk_GpuResource_gisubscriptionUnmap; + + pThis->__gisubscriptionGetMemInterMapParams__ = &__nvoc_thunk_RmResource_gisubscriptionGetMemInterMapParams; + + pThis->__gisubscriptionGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_gisubscriptionGetMemoryMappingDescriptor; + + pThis->__gisubscriptionGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_gisubscriptionGetMapAddrSpace; + + pThis->__gisubscriptionGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_gisubscriptionGetInternalObjectHandle; + + pThis->__gisubscriptionControlFilter__ = &__nvoc_thunk_RsResource_gisubscriptionControlFilter; + + pThis->__gisubscriptionAddAdditionalDependants__ = &__nvoc_thunk_RsResource_gisubscriptionAddAdditionalDependants; + + pThis->__gisubscriptionGetRefCount__ = &__nvoc_thunk_RsResource_gisubscriptionGetRefCount; + + pThis->__gisubscriptionCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_gisubscriptionCheckMemInterUnmap; + + pThis->__gisubscriptionMapTo__ = &__nvoc_thunk_RsResource_gisubscriptionMapTo; + + pThis->__gisubscriptionControl_Prologue__ = &__nvoc_thunk_RmResource_gisubscriptionControl_Prologue; + + pThis->__gisubscriptionGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_gisubscriptionGetRegBaseOffsetAndSize; + + pThis->__gisubscriptionInternalControlForward__ = &__nvoc_thunk_GpuResource_gisubscriptionInternalControlForward; + + pThis->__gisubscriptionPreDestruct__ = &__nvoc_thunk_RsResource_gisubscriptionPreDestruct; + + pThis->__gisubscriptionUnmapFrom__ = &__nvoc_thunk_RsResource_gisubscriptionUnmapFrom; + + pThis->__gisubscriptionControl_Epilogue__ = &__nvoc_thunk_RmResource_gisubscriptionControl_Epilogue; + + pThis->__gisubscriptionControlLookup__ = &__nvoc_thunk_RsResource_gisubscriptionControlLookup; + + pThis->__gisubscriptionMap__ = &__nvoc_thunk_GpuResource_gisubscriptionMap; + + pThis->__gisubscriptionAccessCallback__ = &__nvoc_thunk_RmResource_gisubscriptionAccessCallback; +} + +void __nvoc_init_funcTable_GPUInstanceSubscription(GPUInstanceSubscription *pThis) { + __nvoc_init_funcTable_GPUInstanceSubscription_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_GPUInstanceSubscription(GPUInstanceSubscription *pThis) { + pThis->__nvoc_pbase_GPUInstanceSubscription = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_GPUInstanceSubscription(pThis); +} + +NV_STATUS __nvoc_objCreate_GPUInstanceSubscription(GPUInstanceSubscription **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + GPUInstanceSubscription *pThis; + + pThis = portMemAllocNonPaged(sizeof(GPUInstanceSubscription)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GPUInstanceSubscription)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GPUInstanceSubscription); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_GPUInstanceSubscription(pThis); + status = __nvoc_ctor_GPUInstanceSubscription(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GPUInstanceSubscription_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GPUInstanceSubscription_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GPUInstanceSubscription(GPUInstanceSubscription **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GPUInstanceSubscription(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_instance_subscription_nvoc.h b/src/nvidia/generated/g_gpu_instance_subscription_nvoc.h new file mode 100644 index 000000000..c82bbd70e --- /dev/null +++ b/src/nvidia/generated/g_gpu_instance_subscription_nvoc.h @@ -0,0 +1,325 @@ +#ifndef _G_GPU_INSTANCE_SUBSCRIPTION_NVOC_H_ +#define _G_GPU_INSTANCE_SUBSCRIPTION_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains the functions managing GPU instance subscriptions + * + *****************************************************************************/ + +#include "g_gpu_instance_subscription_nvoc.h" + +#ifndef GPU_INSTANCE_SUBSCRIPTION_H +#define GPU_INSTANCE_SUBSCRIPTION_H + +#include "class/clc637.h" +#include "gpu/gpu_resource.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +#ifdef NVOC_GPU_INSTANCE_SUBSCRIPTION_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GPUInstanceSubscription { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct GPUInstanceSubscription *__nvoc_pbase_GPUInstanceSubscription; + NvBool (*__gisubscriptionCanCopy__)(struct GPUInstanceSubscription *); + NV_STATUS (*__gisubscriptionCtrlCmdExecPartitionsCreate__)(struct GPUInstanceSubscription *, NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS *); + NV_STATUS (*__gisubscriptionCtrlCmdExecPartitionsDelete__)(struct GPUInstanceSubscription *, NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS *); + NV_STATUS (*__gisubscriptionCtrlCmdExecPartitionsGet__)(struct GPUInstanceSubscription *, NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS *); + NV_STATUS (*__gisubscriptionCtrlCmdExecPartitionsGetActiveIds__)(struct GPUInstanceSubscription *, NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS *); + NV_STATUS (*__gisubscriptionCtrlCmdExecPartitionsExport__)(struct GPUInstanceSubscription *, NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *); + NV_STATUS (*__gisubscriptionCtrlCmdExecPartitionsImport__)(struct GPUInstanceSubscription *, NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *); + NvBool (*__gisubscriptionShareCallback__)(struct GPUInstanceSubscription *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__gisubscriptionControl__)(struct GPUInstanceSubscription *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gisubscriptionUnmap__)(struct GPUInstanceSubscription *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__gisubscriptionGetMemInterMapParams__)(struct GPUInstanceSubscription *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__gisubscriptionGetMemoryMappingDescriptor__)(struct GPUInstanceSubscription *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__gisubscriptionGetMapAddrSpace__)(struct GPUInstanceSubscription *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__gisubscriptionGetInternalObjectHandle__)(struct GPUInstanceSubscription *); + NV_STATUS (*__gisubscriptionControlFilter__)(struct GPUInstanceSubscription *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__gisubscriptionAddAdditionalDependants__)(struct RsClient *, struct GPUInstanceSubscription *, RsResourceRef *); + NvU32 (*__gisubscriptionGetRefCount__)(struct GPUInstanceSubscription *); + NV_STATUS (*__gisubscriptionCheckMemInterUnmap__)(struct GPUInstanceSubscription *, NvBool); + NV_STATUS (*__gisubscriptionMapTo__)(struct GPUInstanceSubscription *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__gisubscriptionControl_Prologue__)(struct GPUInstanceSubscription *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gisubscriptionGetRegBaseOffsetAndSize__)(struct GPUInstanceSubscription *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__gisubscriptionInternalControlForward__)(struct GPUInstanceSubscription *, NvU32, void *, NvU32); + void (*__gisubscriptionPreDestruct__)(struct GPUInstanceSubscription *); + NV_STATUS (*__gisubscriptionUnmapFrom__)(struct GPUInstanceSubscription *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__gisubscriptionControl_Epilogue__)(struct GPUInstanceSubscription *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gisubscriptionControlLookup__)(struct GPUInstanceSubscription *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__gisubscriptionMap__)(struct GPUInstanceSubscription *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__gisubscriptionAccessCallback__)(struct GPUInstanceSubscription *, struct RsClient *, void *, RsAccessRight); + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance; + NvBool bDeviceProfiling; + NvBool bIsDuped; + NvU64 dupedCapDescriptor; +}; + +#ifndef __NVOC_CLASS_GPUInstanceSubscription_TYPEDEF__ +#define __NVOC_CLASS_GPUInstanceSubscription_TYPEDEF__ +typedef struct GPUInstanceSubscription GPUInstanceSubscription; +#endif /* __NVOC_CLASS_GPUInstanceSubscription_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GPUInstanceSubscription +#define __nvoc_class_id_GPUInstanceSubscription 0x91fde7 +#endif /* __nvoc_class_id_GPUInstanceSubscription */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GPUInstanceSubscription; + +#define __staticCast_GPUInstanceSubscription(pThis) \ + ((pThis)->__nvoc_pbase_GPUInstanceSubscription) + +#ifdef __nvoc_gpu_instance_subscription_h_disabled +#define __dynamicCast_GPUInstanceSubscription(pThis) ((GPUInstanceSubscription*)NULL) +#else //__nvoc_gpu_instance_subscription_h_disabled +#define __dynamicCast_GPUInstanceSubscription(pThis) \ + ((GPUInstanceSubscription*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GPUInstanceSubscription))) +#endif //__nvoc_gpu_instance_subscription_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GPUInstanceSubscription(GPUInstanceSubscription**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GPUInstanceSubscription(GPUInstanceSubscription**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_GPUInstanceSubscription(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GPUInstanceSubscription((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define gisubscriptionCanCopy(arg0) gisubscriptionCanCopy_DISPATCH(arg0) +#define gisubscriptionCtrlCmdExecPartitionsCreate(arg0, arg1) gisubscriptionCtrlCmdExecPartitionsCreate_DISPATCH(arg0, arg1) +#define gisubscriptionCtrlCmdExecPartitionsDelete(arg0, arg1) gisubscriptionCtrlCmdExecPartitionsDelete_DISPATCH(arg0, arg1) +#define gisubscriptionCtrlCmdExecPartitionsGet(arg0, arg1) gisubscriptionCtrlCmdExecPartitionsGet_DISPATCH(arg0, arg1) +#define gisubscriptionCtrlCmdExecPartitionsGetActiveIds(arg0, arg1) gisubscriptionCtrlCmdExecPartitionsGetActiveIds_DISPATCH(arg0, arg1) +#define gisubscriptionCtrlCmdExecPartitionsExport(arg0, arg1) gisubscriptionCtrlCmdExecPartitionsExport_DISPATCH(arg0, arg1) +#define gisubscriptionCtrlCmdExecPartitionsImport(arg0, arg1) gisubscriptionCtrlCmdExecPartitionsImport_DISPATCH(arg0, arg1) +#define gisubscriptionShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) gisubscriptionShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define gisubscriptionControl(pGpuResource, pCallContext, pParams) gisubscriptionControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define gisubscriptionUnmap(pGpuResource, pCallContext, pCpuMapping) gisubscriptionUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define gisubscriptionGetMemInterMapParams(pRmResource, pParams) gisubscriptionGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define gisubscriptionGetMemoryMappingDescriptor(pRmResource, ppMemDesc) gisubscriptionGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define gisubscriptionGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) gisubscriptionGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define gisubscriptionGetInternalObjectHandle(pGpuResource) gisubscriptionGetInternalObjectHandle_DISPATCH(pGpuResource) +#define gisubscriptionControlFilter(pResource, pCallContext, pParams) gisubscriptionControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define gisubscriptionAddAdditionalDependants(pClient, pResource, pReference) gisubscriptionAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define gisubscriptionGetRefCount(pResource) gisubscriptionGetRefCount_DISPATCH(pResource) +#define gisubscriptionCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) gisubscriptionCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define gisubscriptionMapTo(pResource, pParams) gisubscriptionMapTo_DISPATCH(pResource, pParams) +#define gisubscriptionControl_Prologue(pResource, pCallContext, pParams) gisubscriptionControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gisubscriptionGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) gisubscriptionGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define gisubscriptionInternalControlForward(pGpuResource, command, pParams, size) gisubscriptionInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define gisubscriptionPreDestruct(pResource) gisubscriptionPreDestruct_DISPATCH(pResource) +#define gisubscriptionUnmapFrom(pResource, pParams) gisubscriptionUnmapFrom_DISPATCH(pResource, pParams) +#define gisubscriptionControl_Epilogue(pResource, pCallContext, pParams) gisubscriptionControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gisubscriptionControlLookup(pResource, pParams, ppEntry) gisubscriptionControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define gisubscriptionMap(pGpuResource, pCallContext, pParams, pCpuMapping) gisubscriptionMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define gisubscriptionAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gisubscriptionAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool gisubscriptionCanCopy_IMPL(struct GPUInstanceSubscription *arg0); + +static inline NvBool gisubscriptionCanCopy_DISPATCH(struct GPUInstanceSubscription *arg0) { + return arg0->__gisubscriptionCanCopy__(arg0); +} + +NV_STATUS gisubscriptionCtrlCmdExecPartitionsCreate_IMPL(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS *arg1); + +static inline NV_STATUS gisubscriptionCtrlCmdExecPartitionsCreate_DISPATCH(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS *arg1) { + return arg0->__gisubscriptionCtrlCmdExecPartitionsCreate__(arg0, arg1); +} + +NV_STATUS gisubscriptionCtrlCmdExecPartitionsDelete_IMPL(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS *arg1); + +static inline NV_STATUS gisubscriptionCtrlCmdExecPartitionsDelete_DISPATCH(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS *arg1) { + return arg0->__gisubscriptionCtrlCmdExecPartitionsDelete__(arg0, arg1); +} + +NV_STATUS gisubscriptionCtrlCmdExecPartitionsGet_IMPL(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS *arg1); + +static inline NV_STATUS gisubscriptionCtrlCmdExecPartitionsGet_DISPATCH(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS *arg1) { + return arg0->__gisubscriptionCtrlCmdExecPartitionsGet__(arg0, arg1); +} + +NV_STATUS gisubscriptionCtrlCmdExecPartitionsGetActiveIds_IMPL(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS *arg1); + +static inline NV_STATUS gisubscriptionCtrlCmdExecPartitionsGetActiveIds_DISPATCH(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS *arg1) { + return arg0->__gisubscriptionCtrlCmdExecPartitionsGetActiveIds__(arg0, arg1); +} + +NV_STATUS gisubscriptionCtrlCmdExecPartitionsExport_IMPL(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *arg1); + +static inline NV_STATUS gisubscriptionCtrlCmdExecPartitionsExport_DISPATCH(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *arg1) { + return arg0->__gisubscriptionCtrlCmdExecPartitionsExport__(arg0, arg1); +} + +NV_STATUS gisubscriptionCtrlCmdExecPartitionsImport_IMPL(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *arg1); + +static inline NV_STATUS gisubscriptionCtrlCmdExecPartitionsImport_DISPATCH(struct GPUInstanceSubscription *arg0, NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *arg1) { + return arg0->__gisubscriptionCtrlCmdExecPartitionsImport__(arg0, arg1); +} + +static inline NvBool gisubscriptionShareCallback_DISPATCH(struct GPUInstanceSubscription *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__gisubscriptionShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS gisubscriptionControl_DISPATCH(struct GPUInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__gisubscriptionControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS gisubscriptionUnmap_DISPATCH(struct GPUInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__gisubscriptionUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS gisubscriptionGetMemInterMapParams_DISPATCH(struct GPUInstanceSubscription *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__gisubscriptionGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS gisubscriptionGetMemoryMappingDescriptor_DISPATCH(struct GPUInstanceSubscription *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__gisubscriptionGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS gisubscriptionGetMapAddrSpace_DISPATCH(struct GPUInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__gisubscriptionGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle gisubscriptionGetInternalObjectHandle_DISPATCH(struct GPUInstanceSubscription *pGpuResource) { + return pGpuResource->__gisubscriptionGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS gisubscriptionControlFilter_DISPATCH(struct GPUInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gisubscriptionControlFilter__(pResource, pCallContext, pParams); +} + +static inline void gisubscriptionAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GPUInstanceSubscription *pResource, RsResourceRef *pReference) { + pResource->__gisubscriptionAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 gisubscriptionGetRefCount_DISPATCH(struct GPUInstanceSubscription *pResource) { + return pResource->__gisubscriptionGetRefCount__(pResource); +} + +static inline NV_STATUS gisubscriptionCheckMemInterUnmap_DISPATCH(struct GPUInstanceSubscription *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__gisubscriptionCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS gisubscriptionMapTo_DISPATCH(struct GPUInstanceSubscription *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__gisubscriptionMapTo__(pResource, pParams); +} + +static inline NV_STATUS gisubscriptionControl_Prologue_DISPATCH(struct GPUInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gisubscriptionControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gisubscriptionGetRegBaseOffsetAndSize_DISPATCH(struct GPUInstanceSubscription *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__gisubscriptionGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS gisubscriptionInternalControlForward_DISPATCH(struct GPUInstanceSubscription *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__gisubscriptionInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void gisubscriptionPreDestruct_DISPATCH(struct GPUInstanceSubscription *pResource) { + pResource->__gisubscriptionPreDestruct__(pResource); +} + +static inline NV_STATUS gisubscriptionUnmapFrom_DISPATCH(struct GPUInstanceSubscription *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__gisubscriptionUnmapFrom__(pResource, pParams); +} + +static inline void gisubscriptionControl_Epilogue_DISPATCH(struct GPUInstanceSubscription *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__gisubscriptionControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gisubscriptionControlLookup_DISPATCH(struct GPUInstanceSubscription *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__gisubscriptionControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS gisubscriptionMap_DISPATCH(struct GPUInstanceSubscription *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__gisubscriptionMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool gisubscriptionAccessCallback_DISPATCH(struct GPUInstanceSubscription *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__gisubscriptionAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvBool gisubscriptionIsDeviceProfiling(struct GPUInstanceSubscription *pGPUInstanceSubscription) { + return pGPUInstanceSubscription->bDeviceProfiling; +} + +NV_STATUS gisubscriptionGetGPUInstanceSubscription_IMPL(struct RsClient *arg0, NvHandle arg1, struct GPUInstanceSubscription **arg2); +#define gisubscriptionGetGPUInstanceSubscription(arg0, arg1, arg2) gisubscriptionGetGPUInstanceSubscription_IMPL(arg0, arg1, arg2) +NvBool gisubscriptionShouldClassBeFreedOnUnsubscribe_IMPL(NvU32 internalClassId); +#define gisubscriptionShouldClassBeFreedOnUnsubscribe(internalClassId) gisubscriptionShouldClassBeFreedOnUnsubscribe_IMPL(internalClassId) +void gisubscriptionCleanupOnUnsubscribe_IMPL(CALL_CONTEXT *arg0); +#define gisubscriptionCleanupOnUnsubscribe(arg0) gisubscriptionCleanupOnUnsubscribe_IMPL(arg0) +NV_STATUS gisubscriptionConstruct_IMPL(struct GPUInstanceSubscription *arg_pGPUInstanceSubscription, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_gisubscriptionConstruct(arg_pGPUInstanceSubscription, arg_pCallContext, arg_pParams) gisubscriptionConstruct_IMPL(arg_pGPUInstanceSubscription, arg_pCallContext, arg_pParams) +NV_STATUS gisubscriptionCopyConstruct_IMPL(struct GPUInstanceSubscription *arg0, CALL_CONTEXT *arg1, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg2); +#ifdef __nvoc_gpu_instance_subscription_h_disabled +static inline NV_STATUS gisubscriptionCopyConstruct(struct GPUInstanceSubscription *arg0, CALL_CONTEXT *arg1, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg2) { + NV_ASSERT_FAILED_PRECOMP("GPUInstanceSubscription was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_instance_subscription_h_disabled +#define gisubscriptionCopyConstruct(arg0, arg1, arg2) gisubscriptionCopyConstruct_IMPL(arg0, arg1, arg2) +#endif //__nvoc_gpu_instance_subscription_h_disabled + +void gisubscriptionDestruct_IMPL(struct GPUInstanceSubscription *arg0); +#define __nvoc_gisubscriptionDestruct(arg0) gisubscriptionDestruct_IMPL(arg0) +NvBool gisubscriptionIsDuped_IMPL(struct GPUInstanceSubscription *arg0); +#ifdef __nvoc_gpu_instance_subscription_h_disabled +static inline NvBool gisubscriptionIsDuped(struct GPUInstanceSubscription *arg0) { + NV_ASSERT_FAILED_PRECOMP("GPUInstanceSubscription was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_instance_subscription_h_disabled +#define gisubscriptionIsDuped(arg0) gisubscriptionIsDuped_IMPL(arg0) +#endif //__nvoc_gpu_instance_subscription_h_disabled + +#undef PRIVATE_FIELD + + +#endif // GPU_INSTANCE_SUBSCRIPTION_H + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_INSTANCE_SUBSCRIPTION_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c b/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c new file mode 100644 index 000000000..2e30ec588 --- /dev/null +++ b/src/nvidia/generated/g_gpu_mgmt_api_nvoc.c @@ -0,0 +1,322 @@ +#define NVOC_GPU_MGMT_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_mgmt_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x376305 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_GpuManagementApi(GpuManagementApi*); +void __nvoc_init_funcTable_GpuManagementApi(GpuManagementApi*); +NV_STATUS __nvoc_ctor_GpuManagementApi(GpuManagementApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_GpuManagementApi(GpuManagementApi*); +void __nvoc_dtor_GpuManagementApi(GpuManagementApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuManagementApi; + +static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_GpuManagementApi = { + /*pClassDef=*/ &__nvoc_class_def_GpuManagementApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuManagementApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GpuManagementApi = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_GpuManagementApi_GpuManagementApi, + &__nvoc_rtti_GpuManagementApi_RmResource, + &__nvoc_rtti_GpuManagementApi_RmResourceCommon, + &__nvoc_rtti_GpuManagementApi_RsResource, + &__nvoc_rtti_GpuManagementApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuManagementApi), + /*classId=*/ classId(GpuManagementApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuManagementApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuManagementApi, + /*pCastInfo=*/ &__nvoc_castinfo_GpuManagementApi, + /*pExportInfo=*/ &__nvoc_export_info_GpuManagementApi +}; + +static NvBool __nvoc_thunk_RmResource_gpumgmtapiShareCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiCheckMemInterUnmap(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControl(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiGetMemInterMapParams(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_gpumgmtapiGetRefCount(struct GpuManagementApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControlFilter(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_gpumgmtapiAddAdditionalDependants(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiUnmap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiControl_Prologue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_gpumgmtapiCanCopy(struct GpuManagementApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiMapTo(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_gpumgmtapiPreDestruct(struct GpuManagementApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiUnmapFrom(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_gpumgmtapiControl_Epilogue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControlLookup(struct GpuManagementApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiMap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_gpumgmtapiAccessCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_GpuManagementApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) gpumgmtapiCtrlCmdSetShutdownState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x200101u, + /*paramSize=*/ sizeof(NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_GpuManagementApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "gpumgmtapiCtrlCmdSetShutdownState" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuManagementApi = +{ + /*numEntries=*/ 1, + /*pExportEntries=*/ __nvoc_exported_method_def_GpuManagementApi +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_GpuManagementApi(GpuManagementApi *pThis) { + __nvoc_gpumgmtapiDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuManagementApi(GpuManagementApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GpuManagementApi(GpuManagementApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuManagementApi_fail_RmResource; + __nvoc_init_dataField_GpuManagementApi(pThis); + + status = __nvoc_gpumgmtapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuManagementApi_fail__init; + goto __nvoc_ctor_GpuManagementApi_exit; // Success + +__nvoc_ctor_GpuManagementApi_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_GpuManagementApi_fail_RmResource: +__nvoc_ctor_GpuManagementApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_GpuManagementApi_1(GpuManagementApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__gpumgmtapiCtrlCmdSetShutdownState__ = &gpumgmtapiCtrlCmdSetShutdownState_IMPL; +#endif + + pThis->__gpumgmtapiShareCallback__ = &__nvoc_thunk_RmResource_gpumgmtapiShareCallback; + + pThis->__gpumgmtapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_gpumgmtapiCheckMemInterUnmap; + + pThis->__gpumgmtapiControl__ = &__nvoc_thunk_RsResource_gpumgmtapiControl; + + pThis->__gpumgmtapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_gpumgmtapiGetMemInterMapParams; + + pThis->__gpumgmtapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor; + + pThis->__gpumgmtapiGetRefCount__ = &__nvoc_thunk_RsResource_gpumgmtapiGetRefCount; + + pThis->__gpumgmtapiControlFilter__ = &__nvoc_thunk_RsResource_gpumgmtapiControlFilter; + + pThis->__gpumgmtapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_gpumgmtapiAddAdditionalDependants; + + pThis->__gpumgmtapiUnmap__ = &__nvoc_thunk_RsResource_gpumgmtapiUnmap; + + pThis->__gpumgmtapiControl_Prologue__ = &__nvoc_thunk_RmResource_gpumgmtapiControl_Prologue; + + pThis->__gpumgmtapiCanCopy__ = &__nvoc_thunk_RsResource_gpumgmtapiCanCopy; + + pThis->__gpumgmtapiMapTo__ = &__nvoc_thunk_RsResource_gpumgmtapiMapTo; + + pThis->__gpumgmtapiPreDestruct__ = &__nvoc_thunk_RsResource_gpumgmtapiPreDestruct; + + pThis->__gpumgmtapiUnmapFrom__ = &__nvoc_thunk_RsResource_gpumgmtapiUnmapFrom; + + pThis->__gpumgmtapiControl_Epilogue__ = &__nvoc_thunk_RmResource_gpumgmtapiControl_Epilogue; + + pThis->__gpumgmtapiControlLookup__ = &__nvoc_thunk_RsResource_gpumgmtapiControlLookup; + + pThis->__gpumgmtapiMap__ = &__nvoc_thunk_RsResource_gpumgmtapiMap; + + pThis->__gpumgmtapiAccessCallback__ = &__nvoc_thunk_RmResource_gpumgmtapiAccessCallback; +} + +void __nvoc_init_funcTable_GpuManagementApi(GpuManagementApi *pThis) { + __nvoc_init_funcTable_GpuManagementApi_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_GpuManagementApi(GpuManagementApi *pThis) { + pThis->__nvoc_pbase_GpuManagementApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_GpuManagementApi(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuManagementApi(GpuManagementApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + GpuManagementApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(GpuManagementApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GpuManagementApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuManagementApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_GpuManagementApi(pThis); + status = __nvoc_ctor_GpuManagementApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GpuManagementApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GpuManagementApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuManagementApi(GpuManagementApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GpuManagementApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h b/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h new file mode 100644 index 000000000..72e3eba78 --- /dev/null +++ b/src/nvidia/generated/g_gpu_mgmt_api_nvoc.h @@ -0,0 +1,221 @@ +#ifndef _G_GPU_MGMT_API_NVOC_H_ +#define _G_GPU_MGMT_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_mgmt_api_nvoc.h" + +#ifndef GPU_MGMT_API_H +#define GPU_MGMT_API_H + +#include "rmapi/resource.h" +#include "ctrl/ctrl0020.h" + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +// +// GpuManagementApi class information +// +// This is a global GPU class will help us to route IOCTLs to probed +// and persistent GPU state +// + +#ifdef NVOC_GPU_MGMT_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GpuManagementApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuManagementApi *__nvoc_pbase_GpuManagementApi; + NV_STATUS (*__gpumgmtapiCtrlCmdSetShutdownState__)(struct GpuManagementApi *, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *); + NvBool (*__gpumgmtapiShareCallback__)(struct GpuManagementApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__gpumgmtapiCheckMemInterUnmap__)(struct GpuManagementApi *, NvBool); + NV_STATUS (*__gpumgmtapiControl__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gpumgmtapiGetMemInterMapParams__)(struct GpuManagementApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__gpumgmtapiGetMemoryMappingDescriptor__)(struct GpuManagementApi *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__gpumgmtapiGetRefCount__)(struct GpuManagementApi *); + NV_STATUS (*__gpumgmtapiControlFilter__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__gpumgmtapiAddAdditionalDependants__)(struct RsClient *, struct GpuManagementApi *, RsResourceRef *); + NV_STATUS (*__gpumgmtapiUnmap__)(struct GpuManagementApi *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__gpumgmtapiControl_Prologue__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__gpumgmtapiCanCopy__)(struct GpuManagementApi *); + NV_STATUS (*__gpumgmtapiMapTo__)(struct GpuManagementApi *, RS_RES_MAP_TO_PARAMS *); + void (*__gpumgmtapiPreDestruct__)(struct GpuManagementApi *); + NV_STATUS (*__gpumgmtapiUnmapFrom__)(struct GpuManagementApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__gpumgmtapiControl_Epilogue__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gpumgmtapiControlLookup__)(struct GpuManagementApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__gpumgmtapiMap__)(struct GpuManagementApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__gpumgmtapiAccessCallback__)(struct GpuManagementApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_GpuManagementApi_TYPEDEF__ +#define __NVOC_CLASS_GpuManagementApi_TYPEDEF__ +typedef struct GpuManagementApi GpuManagementApi; +#endif /* __NVOC_CLASS_GpuManagementApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuManagementApi +#define __nvoc_class_id_GpuManagementApi 0x376305 +#endif /* __nvoc_class_id_GpuManagementApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi; + +#define __staticCast_GpuManagementApi(pThis) \ + ((pThis)->__nvoc_pbase_GpuManagementApi) + +#ifdef __nvoc_gpu_mgmt_api_h_disabled +#define __dynamicCast_GpuManagementApi(pThis) ((GpuManagementApi*)NULL) +#else //__nvoc_gpu_mgmt_api_h_disabled +#define __dynamicCast_GpuManagementApi(pThis) \ + ((GpuManagementApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuManagementApi))) +#endif //__nvoc_gpu_mgmt_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GpuManagementApi(GpuManagementApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuManagementApi(GpuManagementApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_GpuManagementApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GpuManagementApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define gpumgmtapiCtrlCmdSetShutdownState(pGpuMgmt, pParams) gpumgmtapiCtrlCmdSetShutdownState_DISPATCH(pGpuMgmt, pParams) +#define gpumgmtapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) gpumgmtapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define gpumgmtapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) gpumgmtapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define gpumgmtapiControl(pResource, pCallContext, pParams) gpumgmtapiControl_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiGetMemInterMapParams(pRmResource, pParams) gpumgmtapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define gpumgmtapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) gpumgmtapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define gpumgmtapiGetRefCount(pResource) gpumgmtapiGetRefCount_DISPATCH(pResource) +#define gpumgmtapiControlFilter(pResource, pCallContext, pParams) gpumgmtapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiAddAdditionalDependants(pClient, pResource, pReference) gpumgmtapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define gpumgmtapiUnmap(pResource, pCallContext, pCpuMapping) gpumgmtapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define gpumgmtapiControl_Prologue(pResource, pCallContext, pParams) gpumgmtapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiCanCopy(pResource) gpumgmtapiCanCopy_DISPATCH(pResource) +#define gpumgmtapiMapTo(pResource, pParams) gpumgmtapiMapTo_DISPATCH(pResource, pParams) +#define gpumgmtapiPreDestruct(pResource) gpumgmtapiPreDestruct_DISPATCH(pResource) +#define gpumgmtapiUnmapFrom(pResource, pParams) gpumgmtapiUnmapFrom_DISPATCH(pResource, pParams) +#define gpumgmtapiControl_Epilogue(pResource, pCallContext, pParams) gpumgmtapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gpumgmtapiControlLookup(pResource, pParams, ppEntry) gpumgmtapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define gpumgmtapiMap(pResource, pCallContext, pParams, pCpuMapping) gpumgmtapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define gpumgmtapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gpumgmtapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS gpumgmtapiCtrlCmdSetShutdownState_IMPL(struct GpuManagementApi *pGpuMgmt, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams); + +static inline NV_STATUS gpumgmtapiCtrlCmdSetShutdownState_DISPATCH(struct GpuManagementApi *pGpuMgmt, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams) { + return pGpuMgmt->__gpumgmtapiCtrlCmdSetShutdownState__(pGpuMgmt, pParams); +} + +static inline NvBool gpumgmtapiShareCallback_DISPATCH(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__gpumgmtapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS gpumgmtapiCheckMemInterUnmap_DISPATCH(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__gpumgmtapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS gpumgmtapiControl_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gpumgmtapiControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpumgmtapiGetMemInterMapParams_DISPATCH(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__gpumgmtapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS gpumgmtapiGetMemoryMappingDescriptor_DISPATCH(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__gpumgmtapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 gpumgmtapiGetRefCount_DISPATCH(struct GpuManagementApi *pResource) { + return pResource->__gpumgmtapiGetRefCount__(pResource); +} + +static inline NV_STATUS gpumgmtapiControlFilter_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gpumgmtapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void gpumgmtapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference) { + pResource->__gpumgmtapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS gpumgmtapiUnmap_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__gpumgmtapiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS gpumgmtapiControl_Prologue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gpumgmtapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool gpumgmtapiCanCopy_DISPATCH(struct GpuManagementApi *pResource) { + return pResource->__gpumgmtapiCanCopy__(pResource); +} + +static inline NV_STATUS gpumgmtapiMapTo_DISPATCH(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__gpumgmtapiMapTo__(pResource, pParams); +} + +static inline void gpumgmtapiPreDestruct_DISPATCH(struct GpuManagementApi *pResource) { + pResource->__gpumgmtapiPreDestruct__(pResource); +} + +static inline NV_STATUS gpumgmtapiUnmapFrom_DISPATCH(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__gpumgmtapiUnmapFrom__(pResource, pParams); +} + +static inline void gpumgmtapiControl_Epilogue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__gpumgmtapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpumgmtapiControlLookup_DISPATCH(struct GpuManagementApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__gpumgmtapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS gpumgmtapiMap_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__gpumgmtapiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool gpumgmtapiAccessCallback_DISPATCH(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__gpumgmtapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS gpumgmtapiConstruct_IMPL(struct GpuManagementApi *arg_pGpuMgmt, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_gpumgmtapiConstruct(arg_pGpuMgmt, arg_pCallContext, arg_pParams) gpumgmtapiConstruct_IMPL(arg_pGpuMgmt, arg_pCallContext, arg_pParams) +void gpumgmtapiDestruct_IMPL(struct GpuManagementApi *pGpuMgmt); +#define __nvoc_gpumgmtapiDestruct(pGpuMgmt) gpumgmtapiDestruct_IMPL(pGpuMgmt) +#undef PRIVATE_FIELD + + +#endif // GPU_MGMT_API_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_MGMT_API_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_mgr_nvoc.c b/src/nvidia/generated/g_gpu_mgr_nvoc.c new file mode 100644 index 000000000..8f939ace5 --- /dev/null +++ b/src/nvidia/generated/g_gpu_mgr_nvoc.c @@ -0,0 +1,154 @@ +#define NVOC_GPU_MGR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_mgr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xcf1b25 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJGPUMGR(OBJGPUMGR*); +void __nvoc_init_funcTable_OBJGPUMGR(OBJGPUMGR*); +NV_STATUS __nvoc_ctor_OBJGPUMGR(OBJGPUMGR*); +void __nvoc_init_dataField_OBJGPUMGR(OBJGPUMGR*); +void __nvoc_dtor_OBJGPUMGR(OBJGPUMGR*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUMGR; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMGR_OBJGPUMGR = { + /*pClassDef=*/ &__nvoc_class_def_OBJGPUMGR, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUMGR, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMGR_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPUMGR, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPUMGR = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJGPUMGR_OBJGPUMGR, + &__nvoc_rtti_OBJGPUMGR_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPUMGR), + /*classId=*/ classId(OBJGPUMGR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPUMGR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUMGR, + /*pCastInfo=*/ &__nvoc_castinfo_OBJGPUMGR, + /*pExportInfo=*/ &__nvoc_export_info_OBJGPUMGR +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUMGR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJGPUMGR(OBJGPUMGR *pThis) { + __nvoc_gpumgrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPUMGR(OBJGPUMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJGPUMGR(OBJGPUMGR *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUMGR_fail_Object; + __nvoc_init_dataField_OBJGPUMGR(pThis); + + status = __nvoc_gpumgrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUMGR_fail__init; + goto __nvoc_ctor_OBJGPUMGR_exit; // Success + +__nvoc_ctor_OBJGPUMGR_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJGPUMGR_fail_Object: +__nvoc_ctor_OBJGPUMGR_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJGPUMGR_1(OBJGPUMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJGPUMGR(OBJGPUMGR *pThis) { + __nvoc_init_funcTable_OBJGPUMGR_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJGPUMGR(OBJGPUMGR *pThis) { + pThis->__nvoc_pbase_OBJGPUMGR = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJGPUMGR(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGPUMGR(OBJGPUMGR **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJGPUMGR *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJGPUMGR)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJGPUMGR)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPUMGR); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJGPUMGR(pThis); + status = __nvoc_ctor_OBJGPUMGR(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPUMGR_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJGPUMGR_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUMGR(OBJGPUMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJGPUMGR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_mgr_nvoc.h b/src/nvidia/generated/g_gpu_mgr_nvoc.h new file mode 100644 index 000000000..51589d7ea --- /dev/null +++ b/src/nvidia/generated/g_gpu_mgr_nvoc.h @@ -0,0 +1,462 @@ +#ifndef _G_GPU_MGR_NVOC_H_ +#define _G_GPU_MGR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_mgr_nvoc.h" + +#ifndef _GPUMGR_H_ +#define _GPUMGR_H_ + +// +// GPU Manager Defines and Structures +// + +struct OBJGPU; +#include "core/core.h" +#include "core/system.h" +#include "nvlimits.h" +#include "gpu_mgr/gpu_group.h" +#include "gpu/gpu_uuid.h" +#include "gpu/gpu_device_mapping.h" +#include "gpu/gpu_access.h" +#include "ctrl/ctrl0000/ctrl0000gpu.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" +#include "ctrl/ctrlc637.h" +#include "nvoc/utility.h" +#include "nv_firmware_types.h" + +#include "gpu/perf/kern_perf_gpuboostsync.h" + +#include "class/cl2080.h" // NV2080_ENGINE_TYPE_* + +#include "utils/nvbitvector.h" +TYPEDEF_BITVECTOR(MC_ENGINE_BITVECTOR); + +#define GPUMGR_MAX_GPU_INSTANCES 8 +#define GPUMGR_MAX_COMPUTE_INSTANCES 8 + +MAKE_BITVECTOR(ENGTYPE_BIT_VECTOR, NV2080_ENGINE_TYPE_LAST); +typedef ENGTYPE_BIT_VECTOR *PENGTYPE_BIT_VECTOR; + +// +// Terminology: +// GPU -> entity sitting on the bus +// Device -> broadcast semantics; maps to one or more GPUs +// Subdevice -> unicast semantics; maps to a single GPU +// + + +//////////////////////////////////////////////////////////////////////////////// +// DO NOT ADD NEW STUBS HERE // +//////////////////////////////////////////////////////////////////////////////// +#define gpumgrGetGpuLinkCount(deviceInstance) ((NvU32) 0) +#define gpumgrGetSliLinkOutputMaskFromGpu(pGpu) ((NvU32) 0) +#define gpumgrGetVidLinkOutputMaskFromGpu(pGpu) ((NvU32) 0) +#define gpumgrGetSliLinkOrderCount(pGpu) ((NvU32) 0) +#define gpumgrGetSliLinkConnectionCount(pGpu) ((NvU32) 0) +#define gpumgrGetSLIConfig(gpuInstance, onlyWithSliLink) ((NvU32) 0) +#define gpumgrDisableVidLink(pGpu, head, max_dr_port) +#define gpumgrGetGpuVidLinkMaxPixelClock(pGpu, pMaxPclkMhz) (NV_ERR_NOT_SUPPORTED) +#define gpumgrPinsetToPinsetTableIndex(pinset, pPinsetIndex) (NV_ERR_NOT_SUPPORTED) +#define gpumgrGetBcEnabledStatus(g) (NV_FALSE) +#define gpumgrGetBcEnabledStatusEx(g, t) (NV_FALSE) +#define gpumgrSetBcEnabledStatus(g, b) do { NvBool b2 = b; (void)b2; } while (0) +#define gpumgrSLILoopReentrancy(pGpu, l, r, i, pFuncStr) +#define gpumgrSLILoopReentrancyPop(pGpu) ((NvU32)0) +#define gpumgrSLILoopReentrancyPush(pGpu, sliLoopReentrancy) do { NvU32 x = sliLoopReentrancy; (void)x; } while(0) + + +typedef struct +{ + NvU32 gpuId; + NvU64 gpuDomainBusDevice; + NvBool bInitAttempted; + NvBool bDrainState; // no new client connections to this GPU + NvBool bRemoveIdle; // remove this GPU once it's idle (detached) + NvBool bExcluded; // this gpu is marked as excluded; do not use + NvBool bUuidValid; // cached uuid is valid + NvBool bSkipHwNvlinkDisable; //skip HW registers configuration for disabled links + NvU32 initDisabledNvlinksMask; + NV_STATUS initStatus; + NvU8 uuid[RM_SHA1_GID_SIZE]; + OS_RM_CAPS *pOsRmCaps; // "Opaque" pointer to os-specific capabilities +} PROBEDGPU; + +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_FLIPS 11:4 +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME 12:12 +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME_INVALID 0x0000000 +#define NV_DEVICE_DISPLAY_FLAGS_AFR_FRAME_TIME_VALID 0x0000001 + +/*! + * Structure for tracking resources allocated for saving primary GPU's VBIOS + * state. This is used for TDR/fullchip reset recovery. The GPU object gets + * destroyed, so the data belongs here. + */ +typedef struct _def_gpumgr_save_vbios_state +{ + RmPhysAddr vgaWorkspaceVidMemBase; //__nvoc_pbase_OBJGPUMGR) + +#ifdef __nvoc_gpu_mgr_h_disabled +#define __dynamicCast_OBJGPUMGR(pThis) ((OBJGPUMGR*)NULL) +#else //__nvoc_gpu_mgr_h_disabled +#define __dynamicCast_OBJGPUMGR(pThis) \ + ((OBJGPUMGR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUMGR))) +#endif //__nvoc_gpu_mgr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUMGR(OBJGPUMGR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPUMGR(OBJGPUMGR**, Dynamic*, NvU32); +#define __objCreate_OBJGPUMGR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJGPUMGR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS gpumgrConstruct_IMPL(struct OBJGPUMGR *arg_); +#define __nvoc_gpumgrConstruct(arg_) gpumgrConstruct_IMPL(arg_) +void gpumgrDestruct_IMPL(struct OBJGPUMGR *arg0); +#define __nvoc_gpumgrDestruct(arg0) gpumgrDestruct_IMPL(arg0) +void gpumgrAddSystemNvlinkTopo_IMPL(NvU64 DomainBusDevice); +#define gpumgrAddSystemNvlinkTopo(DomainBusDevice) gpumgrAddSystemNvlinkTopo_IMPL(DomainBusDevice) +NvBool gpumgrGetSystemNvlinkTopo_IMPL(NvU64 DomainBusDevice, struct NVLINK_TOPOLOGY_PARAMS *pTopoParams); +#define gpumgrGetSystemNvlinkTopo(DomainBusDevice, pTopoParams) gpumgrGetSystemNvlinkTopo_IMPL(DomainBusDevice, pTopoParams) +void gpumgrUpdateSystemNvlinkTopo_IMPL(NvU64 DomainBusDevice, struct NVLINK_TOPOLOGY_PARAMS *pTopoParams); +#define gpumgrUpdateSystemNvlinkTopo(DomainBusDevice, pTopoParams) gpumgrUpdateSystemNvlinkTopo_IMPL(DomainBusDevice, pTopoParams) +NV_STATUS gpumgrSetGpuInitDisabledNvlinks_IMPL(NvU32 gpuId, NvU32 mask, NvBool bSkipHwNvlinkDisable); +#define gpumgrSetGpuInitDisabledNvlinks(gpuId, mask, bSkipHwNvlinkDisable) gpumgrSetGpuInitDisabledNvlinks_IMPL(gpuId, mask, bSkipHwNvlinkDisable) +NV_STATUS gpumgrGetGpuInitDisabledNvlinks_IMPL(NvU32 gpuId, NvU32 *pMask, NvBool *pbSkipHwNvlinkDisable); +#define gpumgrGetGpuInitDisabledNvlinks(gpuId, pMask, pbSkipHwNvlinkDisable) gpumgrGetGpuInitDisabledNvlinks_IMPL(gpuId, pMask, pbSkipHwNvlinkDisable) +NvBool gpumgrCheckIndirectPeer_IMPL(struct OBJGPU *pGpu, struct OBJGPU *pRemoteGpu); +#define gpumgrCheckIndirectPeer(pGpu, pRemoteGpu) gpumgrCheckIndirectPeer_IMPL(pGpu, pRemoteGpu) +void gpumgrAddSystemMIGInstanceTopo_IMPL(NvU64 domainBusDevice); +#define gpumgrAddSystemMIGInstanceTopo(domainBusDevice) gpumgrAddSystemMIGInstanceTopo_IMPL(domainBusDevice) +NvBool gpumgrGetSystemMIGInstanceTopo_IMPL(NvU64 domainBusDevice, struct GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY **ppTopoParams); +#define gpumgrGetSystemMIGInstanceTopo(domainBusDevice, ppTopoParams) gpumgrGetSystemMIGInstanceTopo_IMPL(domainBusDevice, ppTopoParams) +void gpumgrUnregisterRmCapsForMIGGI_IMPL(NvU64 gpuDomainBusDevice); +#define gpumgrUnregisterRmCapsForMIGGI(gpuDomainBusDevice) gpumgrUnregisterRmCapsForMIGGI_IMPL(gpuDomainBusDevice) +void gpumgrUpdateBoardId_IMPL(struct OBJGPU *arg0); +#define gpumgrUpdateBoardId(arg0) gpumgrUpdateBoardId_IMPL(arg0) +void gpumgrServiceInterrupts_IMPL(NvU32 arg0, MC_ENGINE_BITVECTOR *arg1, NvBool arg2); +#define gpumgrServiceInterrupts(arg0, arg1, arg2) gpumgrServiceInterrupts_IMPL(arg0, arg1, arg2) +#undef PRIVATE_FIELD + + +typedef struct { + NvBool specified; // Set this flag when using this struct + + DEVICE_MAPPING deviceMapping[SOC_DEV_MAPPING_MAX]; // Register Aperture mapping + NvU32 socChipId0; // Chip ID used for HAL binding + NvU32 iovaspaceId; // SMMU client ID +} SOCGPUATTACHARG; + +// +// Packages up system/bus state for attach process. +// +typedef struct GPUATTACHARG +{ + GPUHWREG *regBaseAddr; + GPUHWREG *fbBaseAddr; + GPUHWREG *instBaseAddr; + RmPhysAddr devPhysAddr; + RmPhysAddr fbPhysAddr; + RmPhysAddr instPhysAddr; + RmPhysAddr ioPhysAddr; + NvU64 nvDomainBusDeviceFunc; + NvU32 regLength; + NvU64 fbLength; + NvU32 instLength; + NvU32 intLine; + void *pOsAttachArg; + NvBool bIsSOC; + NvU32 socDeviceCount; + DEVICE_MAPPING socDeviceMappings[GPU_MAX_DEVICE_MAPPINGS]; + NvU32 socId; + NvU32 socSubId; + NvU32 socChipId0; + NvU32 iovaspaceId; + NvBool bRequestFwClientRm; + + // + // The SOC-specific fields above are legacy fields that were added for + // ARCH MODS iGPU verification. There is a plan to deprecate these fields as + // part of an effort to clean up the existing iGPU code in RM. + // + // Starting with T234D+, the SOCGPUATTACHARG field below will be used to + // pass the required attach info for a single SOC device from the RM OS + // layer to core RM. + // + SOCGPUATTACHARG socDeviceArgs; +} GPUATTACHARG; + +NV_STATUS gpumgrGetGpuAttachInfo(NvU32 *pGpuCnt, NvU32 *pGpuMask); +NV_STATUS gpumgrGetProbedGpuIds(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *); +NV_STATUS gpumgrGetProbedGpuDomainBusDevice(NvU32 gpuId, NvU64 *gpuDomainBusDevice); +NV_STATUS gpumgrGetAttachedGpuIds(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *); +NV_STATUS gpumgrGetGpuIdInfo(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *); +NV_STATUS gpumgrGetGpuIdInfoV2(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *); +void gpumgrSetGpuId(OBJGPU*, NvU32 gpuId); +NV_STATUS gpumgrGetGpuInitStatus(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *); +void gpumgrSetGpuInitStatus(NvU32 gpuId, NV_STATUS status); +OBJGPU* gpumgrGetGpuFromId(NvU32 gpuId); +OBJGPU* gpumgrGetGpuFromUuid(const NvU8 *pGpuUuid, NvU32 flags); +OBJGPU* gpumgrGetGpuFromBusInfo(NvU32 domain, NvU8 bus, NvU8 device); +NvU32 gpumgrGetDefaultPrimaryGpu(NvU32 gpuMask); +NV_STATUS gpumgrAllocGpuInstance(NvU32 *pDeviceInstance); +NV_STATUS gpumgrRegisterGpuId(NvU32 gpuId, NvU64 gpuDomainBusDevice); +NV_STATUS gpumgrUnregisterGpuId(NvU32 gpuId); +NV_STATUS gpumgrExcludeGpuId(NvU32 gpuId); +NV_STATUS gpumgrSetUuid(NvU32 gpuId, NvU8 *uuid); +NV_STATUS gpumgrGetGpuUuidInfo(NvU32 gpuId, NvU8 **ppUuidStr, NvU32 *pUuidStrLen, NvU32 uuidFlags); +NvBool gpumgrIsDeviceRmFirmwareCapable(NvU16 devId, NvU32 pmcBoot42, NvBool *pbEnableByDefault); +NV_STATUS gpumgrAttachGpu(NvU32 deviceInstance, GPUATTACHARG *); +NV_STATUS gpumgrDetachGpu(NvU32 deviceInstance); +OBJGPU* gpumgrGetNextGpu(NvU32 gpuMask, NvU32 *pStartIndex); +NV_STATUS gpumgrStatePreInitGpu(OBJGPU*); +NV_STATUS gpumgrStateInitGpu(OBJGPU*); +NV_STATUS gpumgrStateLoadGpu(OBJGPU*, NvU32); +NV_STATUS gpumgrAllocDeviceInstance(NvU32 *pDeviceInstance); +NV_STATUS gpumgrCreateDevice(NvU32 *pDeviceInstance, NvU32 gpuMask, NvU32 *pGpuIdsOrdinal); +NV_STATUS gpumgrDestroyDevice(NvU32 deviceInstance); +NvU32 gpumgrGetDeviceInstanceMask(void); +NvU32 gpumgrGetDeviceGpuMask(NvU32 deviceInstance); +NV_STATUS gpumgrIsDeviceInstanceValid(NvU32 deviceInstance); +NvU32 gpumgrGetPrimaryForDevice(NvU32 deviceInstance); +NvBool gpumgrIsSubDeviceInstanceValid(NvU32 subDeviceInstance); +NvBool gpumgrIsDeviceEnabled(NvU32 deviceInstance); +NvU32 gpumgrGetGpuMask(OBJGPU *pGpu); +OBJGPU* gpumgrGetGpu(NvU32 deviceInstance); +OBJGPU* gpumgrGetSomeGpu(void); +NvU32 gpumgrGetSubDeviceCount(NvU32 gpuMask); +NvU32 gpumgrGetSubDeviceCountFromGpu(OBJGPU *pGpu); +NvU32 gpumgrGetSubDeviceMaxValuePlus1(OBJGPU *pGpu); +NvU32 gpumgrGetSubDeviceInstanceFromGpu(OBJGPU *pGpu); +OBJGPU* gpumgrGetParentGPU(OBJGPU *pGpu); +void gpumgrSetParentGPU(OBJGPU *pGpu, OBJGPU *pParentGpu); +NvBool gpumgrIsGpuDisplayParent(OBJGPU*); +OBJGPU* gpumgrGetDisplayParent(OBJGPU*); +NV_STATUS gpumgrGetGpuLockAndDrPorts(OBJGPU*, OBJGPU*, NvU32 *, NvU32 *); +NV_STATUS gpumgrGetBootPrimary(OBJGPU **ppGpu); +OBJGPU* gpumgrGetMGpu(void); +RmPhysAddr gpumgrGetGpuPhysFbAddr(OBJGPU*); +OBJGPU* gpumgrGetGpuFromSubDeviceInst(NvU32, NvU32); +NV_STATUS gpumgrAddDeviceInstanceToGpus(NvU32 gpuMask); +NV_STATUS gpumgrRemoveDeviceInstanceFromGpus(NvU32 gpuMask); +NV_STATUS gpumgrConstructGpuGrpObject(struct OBJGPUMGR *pGpuMgr, NvU32 gpuMask, struct OBJGPUGRP **ppGpuGrp); +struct OBJGPUGRP* gpumgrGetGpuGrpFromGpu(OBJGPU *pGpu); +struct OBJGPUGRP* gpumgrGetGpuGrpFromInstance(NvU32 gpugrpInstance); +NV_STATUS gpumgrModifyGpuDrainState(NvU32 gpuId, NvBool bEnable, NvBool bRemove, NvBool bLinkDisable); +NV_STATUS gpumgrQueryGpuDrainState(NvU32 gpuId, NvBool *pBEnable, NvBool *pBRemove); +NvBool gpumgrIsGpuPointerValid(OBJGPU *pGpu); +NvU32 gpumgrGetGrpMaskFromGpuInst(NvU32 gpuInst); +void gpumgrAddDeviceMaskToGpuInstTable(NvU32 gpuMask); +void gpumgrClearDeviceMaskFromGpuInstTable(NvU32 gpuMask); +NvBool gpumgrSetGpuAcquire(OBJGPU *pGpu); +void gpumgrSetGpuRelease(void); +NvU8 gpumgrGetGpuBridgeType(void); + +// +// gpumgrIsSubDeviceCountOne +// +static NV_INLINE NvBool +gpumgrIsSubDeviceCountOne(NvU32 gpuMask) +{ + // + // A fast version of gpumgrGetSubDeviceCount(gpumask) == 1. + // Make sure it returns 0 for gpuMask==0, just like gpumgrGetSubDeviceCount(0)!!! + // + return gpuMask != 0 && (gpuMask&(gpuMask-1)) == 0; +} + +// +// gpumgrIsParentGPU +// +static NV_INLINE NvBool +gpumgrIsParentGPU(OBJGPU *pGpu) +{ + return gpumgrGetParentGPU(pGpu) == pGpu; +} + +#endif // _GPUMGR_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_MGR_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_nvoc.c b/src/nvidia/generated/g_gpu_nvoc.c new file mode 100644 index 000000000..09c6f32e6 --- /dev/null +++ b/src/nvidia/generated/g_gpu_nvoc.c @@ -0,0 +1,645 @@ +#define NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x7ef3cb = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +void __nvoc_init_OBJGPU(OBJGPU*, NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, RM_RUNTIME_VARIANT RmVariantHal_rmVariant, NvU32 DispIpHal_ipver); +void __nvoc_init_funcTable_OBJGPU(OBJGPU*); +NV_STATUS __nvoc_ctor_OBJGPU(OBJGPU*, NvU32 arg_gpuInstance); +void __nvoc_init_dataField_OBJGPU(OBJGPU*); +void __nvoc_dtor_OBJGPU(OBJGPU*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPU; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_OBJGPU = { + /*pClassDef=*/ &__nvoc_class_def_OBJGPU, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPU, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_RmHalspecOwner = { + /*pClassDef=*/ &__nvoc_class_def_RmHalspecOwner, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_RmHalspecOwner), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_OBJTRACEABLE = { + /*pClassDef=*/ &__nvoc_class_def_OBJTRACEABLE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_OBJTRACEABLE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPU = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_OBJGPU_OBJGPU, + &__nvoc_rtti_OBJGPU_OBJTRACEABLE, + &__nvoc_rtti_OBJGPU_RmHalspecOwner, + &__nvoc_rtti_OBJGPU_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPU), + /*classId=*/ classId(OBJGPU), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPU", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPU, + /*pCastInfo=*/ &__nvoc_castinfo_OBJGPU, + /*pExportInfo=*/ &__nvoc_export_info_OBJGPU +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPU = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner*); +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_dtor_OBJGPU(OBJGPU *pThis) { + __nvoc_gpuDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + __nvoc_dtor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner); + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) { + ChipHal *chipHal = &staticCast(pThis, RmHalspecOwner)->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &staticCast(pThis, RmHalspecOwner)->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + pThis->setProperty(pThis, PDB_PROP_GPU_IS_CONNECTED, ((NvBool)(0 == 0))); + + // NVOC Property Hal field -- PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_IS_UEFI + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_IS_UEFI, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_ZERO_FB + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_ZERO_FB, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_MIG_SUPPORTED + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->setProperty(pThis, PDB_PROP_GPU_MIG_SUPPORTED, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_MIG_SUPPORTED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED, ((NvBool)(0 != 0))); + } + + pThis->boardId = ~0; + + pThis->deviceInstance = 32; + + // Hal field -- isVirtual + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->isVirtual = ((NvBool)(0 != 0)); + } + + // Hal field -- isGspClient + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->isGspClient = ((NvBool)(0 == 0)); + } + else if (0) + { + } + + pThis->bIsDebugModeEnabled = ((NvBool)(0 != 0)); + + pThis->numOfMclkLockRequests = 0U; + + pThis->bUseRegisterAccessMap = !(0); + + pThis->boardInfo = ((void *)0); + + // Hal field -- bUnifiedMemorySpaceEnabled + if (0) + { + } + // default + else + { + pThis->bUnifiedMemorySpaceEnabled = ((NvBool)(0 != 0)); + } + + // Hal field -- bWarBug200577889SriovHeavyEnabled + pThis->bWarBug200577889SriovHeavyEnabled = ((NvBool)(0 != 0)); + + // Hal field -- bNeed4kPageIsolation + if (0) + { + } + // default + else + { + pThis->bNeed4kPageIsolation = ((NvBool)(0 != 0)); + } + + // Hal field -- bInstLoc47bitPaWar + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bInstLoc47bitPaWar = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bInstLoc47bitPaWar = ((NvBool)(0 != 0)); + } + + // Hal field -- bIsBarPteInSysmemSupported + if (0) + { + } + // default + else + { + pThis->bIsBarPteInSysmemSupported = ((NvBool)(0 != 0)); + } + + // Hal field -- bClientRmAllocatedCtxBuffer + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bClientRmAllocatedCtxBuffer = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bClientRmAllocatedCtxBuffer = ((NvBool)(0 != 0)); + } + + // Hal field -- bVidmemPreservationBrokenBug3172217 + if (0) + { + } + // default + else + { + pThis->bVidmemPreservationBrokenBug3172217 = ((NvBool)(0 != 0)); + } + + // Hal field -- bInstanceMemoryAlwaysCached + if (0) + { + } + // default + else + { + pThis->bInstanceMemoryAlwaysCached = ((NvBool)(0 != 0)); + } + + pThis->bIsGeforce = ((NvBool)(0 == 0)); + + // Hal field -- bComputePolicyTimesliceSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bComputePolicyTimesliceSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bComputePolicyTimesliceSupported = ((NvBool)(0 != 0)); + } +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE* ); +NV_STATUS __nvoc_ctor_OBJGPU(OBJGPU *pThis, NvU32 arg_gpuInstance) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_Object; + status = __nvoc_ctor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_RmHalspecOwner; + status = __nvoc_ctor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_OBJTRACEABLE; + __nvoc_init_dataField_OBJGPU(pThis); + + status = __nvoc_gpuConstruct(pThis, arg_gpuInstance); + if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail__init; + goto __nvoc_ctor_OBJGPU_exit; // Success + +__nvoc_ctor_OBJGPU_fail__init: + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); +__nvoc_ctor_OBJGPU_fail_OBJTRACEABLE: + __nvoc_dtor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner); +__nvoc_ctor_OBJGPU_fail_RmHalspecOwner: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJGPU_fail_Object: +__nvoc_ctor_OBJGPU_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) { + ChipHal *chipHal = &staticCast(pThis, RmHalspecOwner)->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &staticCast(pThis, RmHalspecOwner)->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal function -- gpuGetChildrenPresent + if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000320UL) )) /* ChipHal: TU102 | TU116 | TU117 */ + { + pThis->__gpuGetChildrenPresent__ = &gpuGetChildrenPresent_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000040UL) )) /* ChipHal: TU104 */ + { + pThis->__gpuGetChildrenPresent__ = &gpuGetChildrenPresent_TU104; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000080UL) )) /* ChipHal: TU106 */ + { + pThis->__gpuGetChildrenPresent__ = &gpuGetChildrenPresent_TU106; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__gpuGetChildrenPresent__ = &gpuGetChildrenPresent_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__gpuGetChildrenPresent__ = &gpuGetChildrenPresent_GA102; + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- gpuGetClassDescriptorList + if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000120UL) )) /* ChipHal: TU102 | TU116 */ + { + pThis->__gpuGetClassDescriptorList__ = &gpuGetClassDescriptorList_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000040UL) )) /* ChipHal: TU104 */ + { + pThis->__gpuGetClassDescriptorList__ = &gpuGetClassDescriptorList_TU104; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000080UL) )) /* ChipHal: TU106 */ + { + pThis->__gpuGetClassDescriptorList__ = &gpuGetClassDescriptorList_TU106; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000200UL) )) /* ChipHal: TU117 */ + { + pThis->__gpuGetClassDescriptorList__ = &gpuGetClassDescriptorList_TU117; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__gpuGetClassDescriptorList__ = &gpuGetClassDescriptorList_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__gpuGetClassDescriptorList__ = &gpuGetClassDescriptorList_GA102; + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- gpuClearFbhubPoisonIntrForBug2924523 + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__gpuClearFbhubPoisonIntrForBug2924523__ = &gpuClearFbhubPoisonIntrForBug2924523_GA100_KERNEL; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__gpuClearFbhubPoisonIntrForBug2924523__ = &gpuClearFbhubPoisonIntrForBug2924523_56cd7a; + } + } + else if (0) + { +#if 0 + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__gpuClearFbhubPoisonIntrForBug2924523__ = &gpuClearFbhubPoisonIntrForBug2924523_GA100_PHYSICAL; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__gpuClearFbhubPoisonIntrForBug2924523__ = &gpuClearFbhubPoisonIntrForBug2924523_56cd7a; + } +#endif + } + + // Hal function -- gpuConstructDeviceInfoTable + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__gpuConstructDeviceInfoTable__ = &gpuConstructDeviceInfoTable_FWCLIENT; + } + // default + else + { + pThis->__gpuConstructDeviceInfoTable__ = &gpuConstructDeviceInfoTable_56cd7a; + } + } + else if (0) + { +#if 0 + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__gpuConstructDeviceInfoTable__ = &gpuConstructDeviceInfoTable_GA100; + } + // default + else + { + pThis->__gpuConstructDeviceInfoTable__ = &gpuConstructDeviceInfoTable_56cd7a; + } +#endif + } + + // Hal function -- gpuGetFlaVasSize + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__gpuGetFlaVasSize__ = &gpuGetFlaVasSize_GA100; + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__gpuGetFlaVasSize__ = &gpuGetFlaVasSize_474d46; + } +} + +void __nvoc_init_funcTable_OBJGPU(OBJGPU *pThis) { + __nvoc_init_funcTable_OBJGPU_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_RmHalspecOwner(RmHalspecOwner*, NvU32, NvU32, NvU32, RM_RUNTIME_VARIANT, NvU32); +void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_OBJGPU(OBJGPU *pThis, NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, RM_RUNTIME_VARIANT RmVariantHal_rmVariant, NvU32 DispIpHal_ipver) { + pThis->__nvoc_pbase_OBJGPU = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + pThis->__nvoc_pbase_RmHalspecOwner = &pThis->__nvoc_base_RmHalspecOwner; + pThis->__nvoc_pbase_OBJTRACEABLE = &pThis->__nvoc_base_OBJTRACEABLE; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver); + __nvoc_init_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + __nvoc_init_funcTable_OBJGPU(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU **ppThis, Dynamic *pParent, NvU32 createFlags, NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev, RM_RUNTIME_VARIANT RmVariantHal_rmVariant, NvU32 DispIpHal_ipver, NvU32 arg_gpuInstance) { + NV_STATUS status; + Object *pParentObj; + OBJGPU *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJGPU)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJGPU)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPU); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJGPU(pThis, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver); + status = __nvoc_ctor_OBJGPU(pThis, arg_gpuInstance); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPU_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJGPU_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + NvU32 ChipHal_arch = va_arg(args, NvU32); + NvU32 ChipHal_impl = va_arg(args, NvU32); + NvU32 ChipHal_hidrev = va_arg(args, NvU32); + RM_RUNTIME_VARIANT RmVariantHal_rmVariant = va_arg(args, RM_RUNTIME_VARIANT); + NvU32 DispIpHal_ipver = va_arg(args, NvU32); + NvU32 arg_gpuInstance = va_arg(args, NvU32); + + status = __nvoc_objCreate_OBJGPU(ppThis, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_nvoc.h b/src/nvidia/generated/g_gpu_nvoc.h new file mode 100644 index 000000000..d3649f9a7 --- /dev/null +++ b/src/nvidia/generated/g_gpu_nvoc.h @@ -0,0 +1,3534 @@ +#ifndef _G_GPU_NVOC_H_ +#define _G_GPU_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_nvoc.h" + +#ifndef _OBJGPU_H_ +#define _OBJGPU_H_ + +/*! + * @file + * @brief Resource Manager Defines and Structures: Defines and structures used for the GPU Object. + */ + +/*! + * + * Forward declaration of SEQSCRIPT - here because it is used by many clients + * and we don't want objseq.h to have to be included everywhere, so adding this + * here. See NVCR 12827752 + * + */ +typedef struct _SEQSCRIPT SEQSCRIPT, *PSEQSCRIPT; + +typedef struct GPUATTACHARG GPUATTACHARG; + +/* + * WARNING -- Avoid including headers in gpu.h + * A change in gpu.h and headers included by gpu.h triggers recompilation of most RM + * files in an incremental build. We should keep the list of included header as short as + * possible. + * Especially, GPU's child module should not have its object header being included here. + * A child module generally includes the header of its parent. A child module header included + * by the parent module affects all the sibling modules. + * */ +#include "ctrl/ctrl0080/ctrl0080gpu.h" // NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS (form hal) +#include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_CMD_INTERNAL_MAX_BSPS/NVENCS +#include "ctrl/ctrl2080/ctrl2080nvd.h" +#include "class/cl2080.h" +#include "class/cl90cd.h" + +#include "nvlimits.h" +#include "utils/nv_enum.h" + +#include "gpu/gpu_timeout.h" +#include "gpu/gpu_access.h" + +#include "platform/acpi_common.h" +#include "acpigenfuncs.h" +#include "nvacpitypes.h" +#include "platform/sli/sli.h" + +#include "core/core.h" +#include "core/system.h" +#include "core/info_block.h" +#include "core/hal.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/gpu_resource_desc.h" +#include "diagnostics/traceable.h" +#include "gpu/gpu_uuid.h" +#include "prereq_tracker/prereq_tracker.h" +#include "gpu/gpu_halspec.h" + +#include "rmapi/control.h" +#include "rmapi/event.h" +#include "rmapi/rmapi.h" + +#include "kernel/gpu/gr/fecs_event_list.h" +#include "class/cl90cdfecs.h" + +#include "nvdevid.h" +#include "nv_arch.h" + +#include "g_rmconfig_util.h" // prototypes for rmconfig utility functions, eg: rmcfg_IsGK104() + +// TODO - the forward declaration of OS_GPU_INFO should be simplified +typedef struct nv_state_t OS_GPU_INFO; + +struct OBJGMMU; + +#ifndef __NVOC_CLASS_OBJGMMU_TYPEDEF__ +#define __NVOC_CLASS_OBJGMMU_TYPEDEF__ +typedef struct OBJGMMU OBJGMMU; +#endif /* __NVOC_CLASS_OBJGMMU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGMMU +#define __nvoc_class_id_OBJGMMU 0xd7a41d +#endif /* __nvoc_class_id_OBJGMMU */ + + +struct OBJGRIDDISPLAYLESS; + +#ifndef __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +#define __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ +typedef struct OBJGRIDDISPLAYLESS OBJGRIDDISPLAYLESS; +#endif /* __NVOC_CLASS_OBJGRIDDISPLAYLESS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGRIDDISPLAYLESS +#define __nvoc_class_id_OBJGRIDDISPLAYLESS 0x20fd5a +#endif /* __nvoc_class_id_OBJGRIDDISPLAYLESS */ + + +struct OBJHOSTENG; + +#ifndef __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ +#define __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ +typedef struct OBJHOSTENG OBJHOSTENG; +#endif /* __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHOSTENG +#define __nvoc_class_id_OBJHOSTENG 0xb356e7 +#endif /* __nvoc_class_id_OBJHOSTENG */ + + +struct OBJPMUCLIENT; + +#ifndef __NVOC_CLASS_OBJPMUCLIENT_TYPEDEF__ +#define __NVOC_CLASS_OBJPMUCLIENT_TYPEDEF__ +typedef struct OBJPMUCLIENT OBJPMUCLIENT; +#endif /* __NVOC_CLASS_OBJPMUCLIENT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPMUCLIENT +#define __nvoc_class_id_OBJPMUCLIENT 0xea631d +#endif /* __nvoc_class_id_OBJPMUCLIENT */ + + +struct OBJINTRABLE; + +#ifndef __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ +#define __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ +typedef struct OBJINTRABLE OBJINTRABLE; +#endif /* __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJINTRABLE +#define __nvoc_class_id_OBJINTRABLE 0x31ccb7 +#endif /* __nvoc_class_id_OBJINTRABLE */ + + +struct OBJVBIOS; + +#ifndef __NVOC_CLASS_OBJVBIOS_TYPEDEF__ +#define __NVOC_CLASS_OBJVBIOS_TYPEDEF__ +typedef struct OBJVBIOS OBJVBIOS; +#endif /* __NVOC_CLASS_OBJVBIOS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVBIOS +#define __nvoc_class_id_OBJVBIOS 0x5dc772 +#endif /* __nvoc_class_id_OBJVBIOS */ + + +struct NvDebugDump; + +#ifndef __NVOC_CLASS_NvDebugDump_TYPEDEF__ +#define __NVOC_CLASS_NvDebugDump_TYPEDEF__ +typedef struct NvDebugDump NvDebugDump; +#endif /* __NVOC_CLASS_NvDebugDump_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDebugDump +#define __nvoc_class_id_NvDebugDump 0x7e80a2 +#endif /* __nvoc_class_id_NvDebugDump */ + + +struct GpuMutexMgr; + +#ifndef __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ +#define __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ +typedef struct GpuMutexMgr GpuMutexMgr; +#endif /* __NVOC_CLASS_GpuMutexMgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuMutexMgr +#define __nvoc_class_id_GpuMutexMgr 0x9d93b2 +#endif /* __nvoc_class_id_GpuMutexMgr */ + + +struct KernelFalcon; + +#ifndef __NVOC_CLASS_KernelFalcon_TYPEDEF__ +#define __NVOC_CLASS_KernelFalcon_TYPEDEF__ +typedef struct KernelFalcon KernelFalcon; +#endif /* __NVOC_CLASS_KernelFalcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFalcon +#define __nvoc_class_id_KernelFalcon 0xb6b1af +#endif /* __nvoc_class_id_KernelFalcon */ + + +struct KernelChannel; + +#ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ +#define __NVOC_CLASS_KernelChannel_TYPEDEF__ +typedef struct KernelChannel KernelChannel; +#endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannel +#define __nvoc_class_id_KernelChannel 0x5d8d70 +#endif /* __nvoc_class_id_KernelChannel */ + + +struct GenericKernelFalcon; + +#ifndef __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ +#define __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ +typedef struct GenericKernelFalcon GenericKernelFalcon; +#endif /* __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GenericKernelFalcon +#define __nvoc_class_id_GenericKernelFalcon 0xabcf08 +#endif /* __nvoc_class_id_GenericKernelFalcon */ + + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + +struct RsClient; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +#ifndef PARTITIONID_INVALID +#define PARTITIONID_INVALID 0xFFFFFFFF +#endif +typedef struct MIG_INSTANCE_REF MIG_INSTANCE_REF; +typedef struct NV2080_CTRL_GPU_REG_OP NV2080_CTRL_GPU_REG_OP; + +typedef enum +{ + BRANDING_TYPE_UNCACHED, + BRANDING_TYPE_NONE, + BRANDING_TYPE_QUADRO_GENERIC, + BRANDING_TYPE_QUADRO_AD, + BRANDING_TYPE_NVS_NVIDIA, // "NVIDIA NVS" + BRANDING_TYPE_VGX, +} BRANDING_TYPE; + +typedef enum +{ + COMPUTE_BRANDING_TYPE_NONE, + COMPUTE_BRANDING_TYPE_TESLA, +} COMPUTE_BRANDING_TYPE; + +#define MAX_DSM_SUPPORTED_FUNCS_RTN_LEN 8 // # bytes to store supported functions + +typedef struct { + // supported function status and cache + NvU32 suppFuncStatus; + NvU8 suppFuncs[MAX_DSM_SUPPORTED_FUNCS_RTN_LEN]; + NvU32 suppFuncsLen; + NvBool bArg3isInteger; + // callback status and cache + NvU32 callbackStatus; + NvU32 callback; +} ACPI_DSM_CACHE; + +typedef struct { + + ACPI_DSM_CACHE dsm[ACPI_DSM_FUNCTION_COUNT]; + ACPI_DSM_FUNCTION dispStatusHotplugFunc; + ACPI_DSM_FUNCTION dispStatusConfigFunc; + ACPI_DSM_FUNCTION perfPostPowerStateFunc; + ACPI_DSM_FUNCTION stereo3dStateActiveFunc; + NvU32 dsmPlatCapsCache[ACPI_DSM_FUNCTION_COUNT]; + NvU32 MDTLFeatureSupport; + + // cache of generic func/subfunction remappings. + ACPI_DSM_FUNCTION dsmCurrentFunc[NV_ACPI_GENERIC_FUNC_COUNT]; + NvU32 dsmCurrentSubFunc[NV_ACPI_GENERIC_FUNC_COUNT]; + NvU32 dsmCurrentFuncSupport; + +} ACPI_DATA; + + +#define OOR_ARCH_DEF(x) \ + NV_ENUM_ENTRY(x, OOR_ARCH_X86_64, 0x00000000) \ + NV_ENUM_ENTRY(x, OOR_ARCH_PPC64LE, 0x00000001) \ + NV_ENUM_ENTRY(x, OOR_ARCH_ARM, 0x00000002) \ + NV_ENUM_ENTRY(x, OOR_ARCH_AARCH64, 0x00000003) \ + NV_ENUM_ENTRY(x, OOR_ARCH_NONE, 0x00000004) + +NV_ENUM_DEF(OOR_ARCH, OOR_ARCH_DEF) + +typedef struct +{ + NvU32 classId; + NvU32 flags; +} GPUCHILDORDER; + +typedef struct +{ + NvU32 classId; + NvU32 instances; +} GPUCHILDPRESENT; + +// GPU Child Order Flags +#define GCO_LIST_INIT NVBIT(0) // entry is used for init ordering (DO NOT USE) +#define GCO_LIST_LOAD NVBIT(1) // entry is used for load and postload ordering (DO NOT USE) +#define GCO_LIST_UNLOAD NVBIT(2) // entry is used for unload and preunload ordering (DO NOT USE) +#define GCO_LIST_DESTROY NVBIT(3) // entry is used for destroy order (DO NOT USE) +#define GCO_LIST_ALL (GCO_LIST_INIT | GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY) + // ^ entry is used for all list types (RECOMMENDED) +#define GCO_ALL (GCO_LIST_ALL) + + +typedef struct +{ + NvU32 childTypeIdx; + NvU32 childInst; + NvU32 gpuChildPtrOffset; +} GPU_CHILD_ITER; + +typedef GPU_CHILD_ITER ENGSTATE_ITER; +typedef GPU_CHILD_ITER PMUCLIENT_ITER; + +// +// Object 'get' macros for GPU relative object retrievals. +// + +#define ENG_GET_GPU(p) objFindAncestorOfType(OBJGPU, (p)) + +// GPU_GET_FIFO_UC is autogenerated, returns per Gpu pFifo. +#define GPU_GET_FIFO(p) GPU_GET_FIFO_UC(p) + +// GPU_GET_KERNEL_FIFO_UC is autogenerated, returns per Gpu pKernelFifo. +#define GPU_GET_KERNEL_FIFO(p) gpuGetKernelFifoShared(p) + +#define GPU_GET_HEAP(p) (RMCFG_MODULE_HEAP ? MEMORY_MANAGER_GET_HEAP(GPU_GET_MEMORY_MANAGER(p)) : NULL) + +#define GPU_GET_HAL(p) (RMCFG_MODULE_HAL ? (p)->pHal : NULL) + +#define GPU_GET_OS(p) (RMCFG_MODULE_OS ? (p)->pOS : NULL) // TBD: replace with SYS_GET_OS +#define GPU_QUICK_PATH_GET_OS(p) GPU_GET_OS(p) // TBD: remove + +#define GPU_GET_REGISTER_ACCESS(g) (&(g)->registerAccess) + +// Returns the pRmApi that routes to the physical driver, either via RPC or local calls +#define GPU_GET_PHYSICAL_RMAPI(g) (&(g)->physicalRmApi) + +// +// Defines and helpers for encoding and decoding PCI domain, bus and device. +// +// Ideally these would live in objbus.h (or somewhere else more appropriate) and +// not gpu/gpu.h, but keep them here for now while support for 32-bit domains is +// being added as part of bug 1904645. +// + +// DRF macros for GPUBUSINFO::nvDomainBusDeviceFunc +#define NVGPU_BUSDEVICE_DOMAIN 63:32 +#define NVGPU_BUSDEVICE_BUS 15:8 +#define NVGPU_BUSDEVICE_DEVICE 7:0 + +static NV_INLINE NvU32 gpuDecodeDomain(NvU64 gpuDomainBusDevice) +{ + return (NvU32)DRF_VAL64(GPU, _BUSDEVICE, _DOMAIN, gpuDomainBusDevice); +} + +static NV_INLINE NvU8 gpuDecodeBus(NvU64 gpuDomainBusDevice) +{ + return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _BUS, gpuDomainBusDevice); +} + +static NV_INLINE NvU8 gpuDecodeDevice(NvU64 gpuDomainBusDevice) +{ + return (NvU8)DRF_VAL64(GPU, _BUSDEVICE, _DEVICE, gpuDomainBusDevice); +} + +static NV_INLINE NvU64 gpuEncodeDomainBusDevice(NvU32 domain, NvU8 bus, NvU8 device) +{ + return DRF_NUM64(GPU, _BUSDEVICE, _DOMAIN, domain) | + DRF_NUM64(GPU, _BUSDEVICE, _BUS, bus) | + DRF_NUM64(GPU, _BUSDEVICE, _DEVICE, device); +} + +static NV_INLINE NvU32 gpuEncodeBusDevice(NvU8 bus, NvU8 device) +{ + NvU64 busDevice = gpuEncodeDomainBusDevice(0, bus, device); + + // Bus and device are guaranteed to fit in the lower 32bits + return (NvU32)busDevice; +} + +// +// Generate a 32-bit id from domain, bus and device tuple. +// +NvU32 gpuGenerate32BitId(NvU32 domain, NvU8 bus, NvU8 device); + +// +// Helpers for getting domain, bus and device of a GPU +// +// Ideally these would be inline functions, but NVOC doesn't support that today, +// tracked in bug 1905882 +// +#define gpuGetDBDF(pGpu) ((pGpu)->busInfo.nvDomainBusDeviceFunc) +#define gpuGetDomain(pGpu) gpuDecodeDomain((pGpu)->busInfo.nvDomainBusDeviceFunc) +#define gpuGetBus(pGpu) gpuDecodeBus((pGpu)->busInfo.nvDomainBusDeviceFunc) +#define gpuGetDevice(pGpu) gpuDecodeDevice((pGpu)->busInfo.nvDomainBusDeviceFunc) + +#undef NVGPU_BUSDEVICE_DOMAIN +#undef NVGPU_BUSDEVICE_BUS +#undef NVGPU_BUSDEVICE_DEVICE + +// +// MaskRevision constants. +// +#define GPU_NO_MASK_REVISION 0x00 +#define GPU_MASK_REVISION_A1 0xA1 +#define GPU_MASK_REVISION_A2 0xA2 +#define GPU_MASK_REVISION_A3 0xA3 +#define GPU_MASK_REVISION_A4 0xA4 +#define GPU_MASK_REVISION_A5 0xA5 +#define GPU_MASK_REVISION_A6 0xA6 +#define GPU_MASK_REVISION_B1 0xB1 +#define GPU_MASK_REVISION_B2 0xB2 +#define GPU_MASK_REVISION_C1 0xC1 +#define GPU_MASK_REVISION_D1 0xD1 + +#define GPU_GET_MASKREVISION(pGpu) (((gpuGetChipMajRev(pGpu))<<4)|(gpuGetChipMinRev(pGpu))) + +// +// Revision constants. +// +#define GPU_NO_REVISION 0xFF +#define GPU_REVISION_0 0x00 +#define GPU_REVISION_1 0x01 +#define GPU_REVISION_2 0x02 +#define GPU_REVISION_3 0x03 +#define GPU_REVISION_4 0x04 +#define GPU_REVISION_5 0x05 +#define GPU_REVISION_6 0x06 +#define GPU_REVISION_7 0x07 +#define GPU_REVISION_8 0x08 +#define GPU_REVISION_9 0x09 +#define GPU_REVISION_A 0x0A +#define GPU_REVISION_B 0x0B +#define GPU_REVISION_C 0x0C +#define GPU_REVISION_D 0x0D +#define GPU_REVISION_E 0x0E +#define GPU_REVISION_F 0x0F + +// +// One extra nibble should be added to the architecture version read from the +// PMC boot register to represent the architecture number in RM. +// +#define GPU_ARCH_SHIFT 0x4 + +// Registry key for inst mem modification defines +#define INSTMEM_TAG_MASK (0xf0000000) +#define INSTMEM_TAG(a) ((INSTMEM_TAG_MASK & (a)) >> 28) + + +typedef struct +{ + + NvU32 PCIDeviceID; + NvU32 Manufacturer; + NvU32 PCISubDeviceID; + NvU32 PCIRevisionID; + NvU32 Subrevision; + + // + // ImplentationExternal and ArchitectureExternal are only valid if they are + // not both zero. They are used when we want to report a different + // arch/imp to an external client. For example, MCP73 is almost the same + // as MCP67, so we report the MCP67 arch/imp to external clients of MCP73. + // (If an MCP73 client really needs to know that it is running on MCP73 + // instead of MCP67, it should check capability bits.) + // + NvU32 ImplementationExternal; + NvU32 ArchitectureExternal; +} GPUIDINFO; + + +typedef struct +{ + NvU32 impl; + NvU32 arch; + NvU32 majorRev; + NvU32 minorRev; + NvU32 devIDStrap; + NvU32 minorExtRev; +} PMCBOOT0; + +typedef struct +{ + NvU32 impl; + NvU32 arch; + NvU32 majorRev; + NvU32 minorRev; + NvU32 devIDStrap; + NvU32 minorExtRev; +} PMCBOOT42; + +// +// Random collection of bus-related configuration state. +// +typedef struct +{ + RmPhysAddr gpuPhysAddr; + RmPhysAddr gpuPhysFbAddr; + RmPhysAddr gpuPhysInstAddr; + RmPhysAddr gpuPhysIoAddr; + NvU32 iovaspaceId; + NvU32 IntLine; + NvU32 IsrHooked; + NvU64 nvDomainBusDeviceFunc; + OOR_ARCH oorArch; +} GPUBUSINFO; + +typedef struct +{ + PCLASSDESCRIPTOR pClasses; + NvU32 *pSuppressClasses; + NvU32 numClasses; + NvBool bSuppressRead; +} GPUCLASSDB, *PGPUCLASSDB; + +typedef struct +{ + const CLASSDESCRIPTOR *pClassDescriptors; + NvU32 numClassDescriptors; + + PENGDESCRIPTOR pEngineInitDescriptors; + PENGDESCRIPTOR pEngineDestroyDescriptors; + PENGDESCRIPTOR pEngineLoadDescriptors; + PENGDESCRIPTOR pEngineUnloadDescriptors; + NvU32 numEngineDescriptors; +} GPU_ENGINE_ORDER, *PGPU_ENGINE_ORDER; + +// +// PCI Express Support +// +typedef struct NBADDR +{ + NvU32 domain; + NvU8 bus; + NvU8 device; + NvU8 func; + NvU8 valid; + void *handle; +} NBADDR; + +typedef struct +{ + NBADDR addr; + void *vAddr; // virtual address of the port, if it has been mapped . Not used starting with Win10 BuildXXXXX + NvU32 PCIECapPtr; // offset of the PCIE capptr in the NB + // Capability register set in enhanced configuration space + // + NvU32 PCIEErrorCapPtr; // offset of the Advanced Error Reporting Capability register set + NvU32 PCIEVCCapPtr; // offset of the Virtual Channel (VC) Capability register set + NvU32 PCIEL1SsCapPtr; // Offset of the L1 Substates Capabilities + NvU16 DeviceID, VendorID; // device and vendor ID for port +} PORTDATA; + +typedef struct // GPU specific data for core logic object, stored in GPU object +{ + PORTDATA upstreamPort; // the upstream port info for the GPU + // If there is a switch this is equal to boardDownstreamPort + // If there is no switch this is equal to rootPort + PORTDATA rootPort; // The root port of the PCI-E root complex + PORTDATA boardUpstreamPort; // If there is no BR03 this is equal to rootPort. + PORTDATA boardDownstreamPort; // If there is no BR03 these data are not set. +} GPUCLDATA; + + +// +// Flags for gpuStateLoad() and gpuStateUnload() routines. Flags *must* be used +// symmetrically across an Unload/Load pair. +// +#define GPU_STATE_FLAGS_PRESERVING NVBIT(0) // GPU state is preserved +#define GPU_STATE_FLAGS_VGA_TRANSITION NVBIT(1) // To be used with GPU_STATE_FLAGS_PRESERVING. +#define GPU_STATE_FLAGS_PM_TRANSITION NVBIT(2) // To be used with GPU_STATE_FLAGS_PRESERVING. +#define GPU_STATE_FLAGS_PM_SUSPEND NVBIT(3) +#define GPU_STATE_FLAGS_PM_HIBERNATE NVBIT(4) +#define GPU_STATE_FLAGS_GC6_TRANSITION NVBIT(5) // To be used with GPU_STATE_FLAGS_PRESERVING. +#define GPU_STATE_DEFAULT 0 // Default flags for destructive state loads + // and unloads + +typedef struct engine_event_node +{ + PEVENTNOTIFICATION pEventNotify; + struct Memory *pMemory; + struct engine_event_node *pNext; +} ENGINE_EVENT_NODE; + +// Linked list of per engine non-stall event nodes +typedef struct +{ + ENGINE_EVENT_NODE *pEventNode; + // lock to protect above list + PORT_SPINLOCK *pSpinlock; +} ENGINE_EVENT_LIST; + +struct OBJHWBC; +typedef struct hwbc_list +{ + struct OBJHWBC *pHWBC; + struct hwbc_list *pNext; +} HWBC_LIST; + +typedef struct SRIOV_P2P_INFO +{ + NvU32 gfid; + NvBool bAllowP2pAccess; + NvU32 accessRefCount; + NvU32 destRefCount; +} SRIOV_P2P_INFO, *PSRIOV_P2P_INFO; + +// +// typedef of private struct used in OBJGPU's data field +// + +typedef struct +{ + NvBool isInitialized; + NvU8 uuid[RM_SHA1_GID_SIZE]; +} _GPU_UUID; + +typedef struct +{ + NvBool bValid; + NvU8 id; +} _GPU_PCIE_PEER_CLIQUE; + +typedef struct +{ + NvU32 platformId; // used to identify soc + NvU32 implementationId; // soc-specific + NvU32 revisionId; // soc-revision + PMCBOOT0 pmcBoot0; + PMCBOOT42 pmcBoot42; + NvU8 subRevision; // sub-revision (NV_FUSE_OPT_SUBREVISION on GPU) +} _GPU_CHIP_INFO; + + +// Engine Database +typedef struct +{ + NvU32 size; + NvU32 *pType; + NvBool bValid; +} _GPU_ENGINE_DB; + +#define MAX_NUM_BARS (8) +// SRIOV state +typedef struct +{ + /*! + * Total number of VFs available in this GPU + */ + NvU32 totalVFs; + + /*! + * First VF Offset + */ + NvU32 firstVFOffset; + + /*! + * Max GFID possible + */ + NvU32 maxGfid; + + /*! + * Physical offset of Virtual BAR0 register. Stores the offset if the GPU is + * a physical function, else 0 + */ + NvU32 virtualRegPhysOffset; + + /*! + * Allocated GFIDs. Will be used to ensure plugins doesn't use same GFID for multiple VFs + */ + NvU8 *pAllocatedGfids; + + /*! + * The sizes of the BAR regions on the VF + */ + NvU64 vfBarSize[MAX_NUM_BARS]; + + /*! + * First PF's BAR addresses + */ + NvU64 firstVFBarAddress[MAX_NUM_BARS]; + + /*! + * If the VF BARs are 64-bit addressable + */ + NvBool b64bitVFBar0; + NvBool b64bitVFBar1; + NvBool b64bitVFBar2; + + /*! + * GFID used for P2P access + */ + PSRIOV_P2P_INFO pP2PInfo; + NvBool bP2PAllocated; + NvU32 maxP2pGfid; +} _GPU_SRIOV_STATE; + +// Max # of instances for GPU children +#define GPU_MAX_CES 10 +#define GPU_MAX_GRS 8 +#define GPU_MAX_FIFOS 1 +#define GPU_MAX_MSENCS NV2080_CTRL_CMD_INTERNAL_MAX_MSENCS +#define GPU_MAX_NVDECS NV2080_CTRL_CMD_INTERNAL_MAX_BSPS +#define GPU_MAX_NVJPGS 8 +#define GPU_MAX_HSHUBS 5 + +// +// Macro defines for OBJGPU fields -- Macro defines inside NVOC class block is +// gone after NVOC preprocessing stage. For macros used outside gpu/gpu.h should +// not be defined inside the class block. +// + +// +// Maximum number of Falcon objects that can be allocated on one GPU. +// This is purely a software limit and can be raised freely as more are added. +// +#define GPU_MAX_FALCON_ENGINES \ + ENG_IOCTRL__SIZE_1 + \ + ENG_GPCCS__SIZE_1 + \ + ENG_FECS__SIZE_1 + \ + ENG_NVJPEG__SIZE_1 + \ + ENG_NVDEC__SIZE_1 + \ + ENG_MSENC__SIZE_1 + \ + 32 + +// for OBJGPU::pRmCtrlDeferredCmd +#define MAX_DEFERRED_CMDS 2 + +// for OBJGPU::computeModeRefCount +#define NV_GPU_MODE_GRAPHICS_MODE 0x00000001 +#define NV_GPU_MODE_COMPUTE_MODE 0x00000002 +#define NV_GPU_COMPUTE_REFCOUNT_COMMAND_INCREMENT 0x0000000a +#define NV_GPU_COMPUTE_REFCOUNT_COMMAND_DECREMENT 0x0000000b + +// +// Structure to hold information obtained from +// parsing the DEVICE_INFO2 table during init. +// + +typedef struct NV2080_CTRL_INTERNAL_DEVICE_INFO DEVICE_INFO2_TABLE; + +#define NV_GPU_INTERNAL_DEVICE_HANDLE 0xABCD0080 +#define NV_GPU_INTERNAL_SUBDEVICE_HANDLE 0xABCD2080 + +// +// NV GPU simulation mode defines +// Keep in sync with os.h SIM MODE defines until osGetSimulationMode is deprecated. +// +#ifndef NV_SIM_MODE_DEFS +#define NV_SIM_MODE_DEFS +#define NV_SIM_MODE_HARDWARE 0U +#define NV_SIM_MODE_RTL 1U +#define NV_SIM_MODE_CMODEL 2U +#define NV_SIM_MODE_MODS_AMODEL 3U +#define NV_SIM_MODE_TEGRA_FPGA 4U +#define NV_SIM_MODE_INVALID (~0x0U) +#endif + +// +// The actual GPU object definition +// +#ifdef NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJGPU { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct RmHalspecOwner __nvoc_base_RmHalspecOwner; + struct OBJTRACEABLE __nvoc_base_OBJTRACEABLE; + struct Object *__nvoc_pbase_Object; + struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner; + struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; + struct OBJGPU *__nvoc_pbase_OBJGPU; + const GPUCHILDPRESENT *(*__gpuGetChildrenPresent__)(struct OBJGPU *, NvU32 *); + const CLASSDESCRIPTOR *(*__gpuGetClassDescriptorList__)(struct OBJGPU *, NvU32 *); + NV_STATUS (*__gpuClearFbhubPoisonIntrForBug2924523__)(struct OBJGPU *); + NV_STATUS (*__gpuConstructDeviceInfoTable__)(struct OBJGPU *); + NvU64 (*__gpuGetFlaVasSize__)(struct OBJGPU *, NvBool); + NvBool PDB_PROP_GPU_IN_STANDBY; + NvBool PDB_PROP_GPU_IN_HIBERNATE; + NvBool PDB_PROP_GPU_IN_PM_CODEPATH; + NvBool PDB_PROP_GPU_IN_PM_RESUME_CODEPATH; + NvBool PDB_PROP_GPU_STATE_INITIALIZED; + NvBool PDB_PROP_GPU_EMULATION; + NvBool PDB_PROP_GPU_PRIMARY_DEVICE; + NvBool PDB_PROP_GPU_HYBRID_MGPU; + NvBool PDB_PROP_GPU_ALTERNATE_TREE_ENABLED; + NvBool PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS; + NvBool PDB_PROP_GPU_3D_CONTROLLER; + NvBool PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM; + NvBool PDB_PROP_GPU_IS_CONNECTED; + NvBool PDB_PROP_GPU_BROKEN_FB; + NvBool PDB_PROP_GPU_IN_FULLCHIP_RESET; + NvBool PDB_PROP_GPU_IN_SECONDARY_BUS_RESET; + NvBool PDB_PROP_GPU_IN_GC6_RESET; + NvBool PDB_PROP_GPU_IS_GEMINI; + NvBool PDB_PROP_GPU_PERSISTENT_SW_STATE; + NvBool PDB_PROP_GPU_COHERENT_CPU_MAPPING; + NvBool PDB_PROP_GPU_IS_LOST; + NvBool PDB_PROP_GPU_IN_TIMEOUT_RECOVERY; + NvBool PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT; + NvBool PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY; + NvBool PDB_PROP_GPU_ATS_SUPPORTED; + NvBool PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING; + NvBool PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE; + NvBool PDB_PROP_GPU_IS_UEFI; + NvBool PDB_PROP_GPU_ZERO_FB; + NvBool PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE; + NvBool PDB_PROP_GPU_MIG_SUPPORTED; + NvBool PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED; + NvBool PDB_PROP_GPU_SWRL_GRANULAR_LOCKING; + NvBool PDB_PROP_GPU_IN_SLI_LINK_CODEPATH; + NvBool PDB_PROP_GPU_IS_PLX_PRESENT; + NvBool PDB_PROP_GPU_IS_BR03_PRESENT; + NvBool PDB_PROP_GPU_IS_BR04_PRESENT; + NvBool PDB_PROP_GPU_BEHIND_BRIDGE; + NvBool PDB_PROP_GPU_BEHIND_BR03; + NvBool PDB_PROP_GPU_BEHIND_BR04; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED; + NvBool PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY; + NvBool PDB_PROP_GPU_RM_UNLINKED_SLI; + NvBool PDB_PROP_GPU_SLI_LINK_ACTIVE; + NvBool PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST; + NvBool PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH; + NvBool PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL; + NvBool PDB_PROP_GPU_IS_MOBILE; + NvBool PDB_PROP_GPU_RTD3_GC6_ACTIVE; + NvBool PDB_PROP_GPU_FAST_GC6_ACTIVE; + NvBool PDB_PROP_GPU_ACCOUNTING_ON; + NvBool PDB_PROP_GPU_INACCESSIBLE; + NvBool PDB_PROP_GPU_NVLINK_SYSMEM; + NvBool PDB_PROP_GPU_C2C_SYSMEM; + NvBool PDB_PROP_GPU_IN_TCC_MODE; + NvBool PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE; + NvBool PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K; + NvBool PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT; + NvBool PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT; + NvBool PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS; + NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU; + NvBool PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA; + NvBool PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA; + NvBool PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED; + NvBool PDB_PROP_GPU_NV_USERMODE_ENABLED; + NvBool PDB_PROP_GPU_IN_FATAL_ERROR; + OS_GPU_INFO *pOsGpuInfo; + OS_RM_CAPS *pOsRmCaps; + NvU32 halImpl; + void *hPci; + ENGINE_EVENT_LIST engineNonstallIntr[52]; + NvBool bIsSOC; + NvU32 gpuInstance; + NvU32 gpuDisabled; + NvU32 gpuId; + NvU32 boardId; + NvU32 deviceInstance; + NvU32 subdeviceInstance; + NvS32 numaNodeId; + _GPU_UUID gpuUuid; + _GPU_PCIE_PEER_CLIQUE pciePeerClique; + NvU32 i2cPortForExtdev; + GPUIDINFO idInfo; + _GPU_CHIP_INFO chipInfo; + GPUBUSINFO busInfo; + GPU_ENGINE_ORDER engineOrder; + GPUCLASSDB classDB; + NvU32 chipId0; + NvU32 chipId1; + NvU32 pmcEnable; + NvU32 pmcRmOwnsIntrMask; + NvBool testIntr; + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *gspSupportedEngines; + NvU32 numCEs; + NvU32 ceFaultMethodBufferSize; + NvBool isVirtual; + NvBool isGspClient; + NvU64 fbLength; + NvU32 instLength; + NvBool instSetViaAttachArg; + NvU32 activeFBIOs; + NvU64 gpuVbiosPostTime; + NvBool bIsCeMapInitialized; + NvBool bIsKCeMapInitialized; + NvU32 uefiScanoutSurfaceSizeInMB; + RmPhysAddr dmaStartAddress; + NvU32 gpuDeviceMapCount; + DEVICE_MAPPING deviceMappings[60]; + PIO_APERTURE pIOApertures[12]; + DEVICE_MAPPING *pDeviceMappingsByDeviceInstance[12]; + void *gpuCfgAddr; + TIMEOUT_DATA timeoutData; + NvU32 computeModeRules; + NvS32 computeModeRefCount; + NvHandle hComputeModeReservation; + NvBool bIsDebugModeEnabled; + NvU32 masterFromSLIConfig; + NvU32 sliStatus; + PENG_INFO_LINK_NODE infoList; + struct OBJOS *pOS; + struct OBJHAL *pHal; + struct KernelBif *pKernelBif; + struct KernelMc *pKernelMc; + struct SwIntr *pSwIntr; + struct KernelMemorySystem *pKernelMemorySystem; + struct MemoryManager *pMemoryManager; + struct KernelDisplay *pKernelDisplay; + struct OBJTMR *pTmr; + struct KernelBus *pKernelBus; + struct KernelGmmu *pKernelGmmu; + struct KernelNvdec *pKernelNvdec; + struct KernelSec2 *pKernelSec2; + struct KernelGsp *pKernelGsp; + struct VirtMemAllocator *pDma; + struct KernelMIGManager *pKernelMIGManager; + struct KernelGraphicsManager *pKernelGraphicsManager; + struct KernelGraphics *pKernelGraphics[8]; + struct KernelPerf *pKernelPerf; + struct KernelRc *pKernelRc; + struct Intr *pIntr; + struct KernelPmu *pKernelPmu; + struct KernelCE *pKCe[10]; + struct KernelFifo *pKernelFifo; + struct OBJUVM *pUvm; + struct NvDebugDump *pNvd; + struct KernelNvlink *pKernelNvlink; + struct OBJGPUMON *pGpuMon; + struct OBJSWENG *pSwEng; + HWBC_LIST *pHWBCList; + GPUCLDATA gpuClData; + _GPU_ENGINE_DB engineDB; + NvU32 engineDBSize; + NvU32 instCacheOverride; + NvS32 numOfMclkLockRequests; + NvU32 netlistNum; + RmCtrlDeferredCmd pRmCtrlDeferredCmd[2]; + ACPI_DATA acpi; + NvU32 activeFifoEventMthdNotifiers; + struct Falcon *constructedFalcons[60]; + NvU32 numConstructedFalcons; + struct GenericKernelFalcon *genericKernelFalcons[60]; + NvU32 numGenericKernelFalcons; + NvU8 *pUserRegisterAccessMap; + NvU8 *pUnrestrictedRegisterAccessMap; + NvU32 userRegisterAccessMapSize; + struct PrereqTracker *pPrereqTracker; + RegisterAccess registerAccess; + NvBool bUseRegisterAccessMap; + NvU32 *pRegopOffsetScratchBuffer; + NvU32 *pRegopOffsetAddrScratchBuffer; + NvU32 regopScratchBufferMaxOffsets; + _GPU_SRIOV_STATE sriovState; + NvU64 vmmuSegmentSize; + NvHandle hDefaultClientShare; + NvHandle hDefaultClientShareDevice; + NvHandle hDefaultClientShareSubDevice; + NvU32 externalKernelClientCount; + DEVICE_INFO2_TABLE *pDeviceInfoTable; + NvU32 numDeviceInfoEntries; + NvHandle hInternalClient; + NvHandle hInternalDevice; + NvHandle hInternalSubdevice; + struct Subdevice *pCachedSubdevice; + struct RsClient *pCachedRsClient; + RM_API physicalRmApi; + NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo; + NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *boardInfo; + NvBool bBar2MovedByVtd; + NvBool bBar1Is64Bit; + NvBool bSurpriseRemovalSupported; + NvBool bTwoStageRcRecoveryEnabled; + NvBool bReplayableTraceEnabled; + NvBool bInD3Cold; + NvBool bIsSimulation; + NvBool bIsModsAmodel; + NvBool bIsFmodel; + NvBool bIsRtlsim; + NvBool bIsPassthru; + NvBool bIsVirtualWithSriov; + NvBool bStateLoading; + NvBool bStateUnloading; + NvBool bStateLoaded; + NvBool bFullyConstructed; + NvBool bUnifiedMemorySpaceEnabled; + NvBool bSriovEnabled; + NvBool bWarBug200577889SriovHeavyEnabled; + NvBool bCacheOnlyMode; + NvBool bNeed4kPageIsolation; + NvBool bSplitVasManagementServerClientRm; + NvU32 instLocOverrides; + NvU32 instLocOverrides2; + NvU32 instLocOverrides3; + NvU32 instLocOverrides4; + NvBool bInstLoc47bitPaWar; + NvU32 instVprOverrides; + NvU32 optimizeUseCaseOverride; + NvS16 fecsCtxswLogConsumerCount; + NvS16 videoCtxswLogConsumerCount; + EventBufferMap vgpuFecsTraceStagingBindings; + FecsEventBufferBindMultiMap fecsEventBufferBindingsUid; + struct OBJVASPACE *pFabricVAS; + NvBool bPipelinedPteMemEnabled; + NvBool bIsBarPteInSysmemSupported; + NvBool bRegUsesGlobalSurfaceOverrides; + NvBool bClientRmAllocatedCtxBuffer; + NvBool bIterativeMmuWalker; + NvBool bEccPageRetirementWithSliAllowed; + NvBool bVidmemPreservationBrokenBug3172217; + NvBool bInstanceMemoryAlwaysCached; + NvBool bRmProfilingPrivileged; + NvBool bGeforceSmb; + NvBool bIsGeforce; + NvBool bIsQuadro; + NvBool bIsVgx; + NvBool bIsNvidiaNvs; + NvBool bIsTitan; + NvBool bIsTesla; + BRANDING_TYPE brandingCache; + NvBool bComputePolicyTimesliceSupported; + NvBool bGlobalPoisonFuseEnabled; + RmPhysAddr simAccessBufPhysAddr; +}; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU; + +#define __staticCast_OBJGPU(pThis) \ + ((pThis)->__nvoc_pbase_OBJGPU) + +#ifdef __nvoc_gpu_h_disabled +#define __dynamicCast_OBJGPU(pThis) ((OBJGPU*)NULL) +#else //__nvoc_gpu_h_disabled +#define __dynamicCast_OBJGPU(pThis) \ + ((OBJGPU*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPU))) +#endif //__nvoc_gpu_h_disabled + +#define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_CAST +#define PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL_BASE_NAME PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_CAST +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU +#define PDB_PROP_GPU_INACCESSIBLE_BASE_CAST +#define PDB_PROP_GPU_INACCESSIBLE_BASE_NAME PDB_PROP_GPU_INACCESSIBLE +#define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_DO_NOT_CHECK_REG_ACCESS_IN_PM_CODEPATH +#define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_CAST +#define PDB_PROP_GPU_IN_FATAL_ERROR_BASE_NAME PDB_PROP_GPU_IN_FATAL_ERROR +#define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_IN_PM_RESUME_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_RESUME_CODEPATH +#define PDB_PROP_GPU_IN_STANDBY_BASE_CAST +#define PDB_PROP_GPU_IN_STANDBY_BASE_NAME PDB_PROP_GPU_IN_STANDBY +#define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED +#define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_CAST +#define PDB_PROP_GPU_COHERENT_CPU_MAPPING_BASE_NAME PDB_PROP_GPU_COHERENT_CPU_MAPPING +#define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_CAST +#define PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY_BASE_NAME PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY +#define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_SLI_LINK_ACTIVE_BASE_NAME PDB_PROP_GPU_SLI_LINK_ACTIVE +#define PDB_PROP_GPU_IN_TCC_MODE_BASE_CAST +#define PDB_PROP_GPU_IN_TCC_MODE_BASE_NAME PDB_PROP_GPU_IN_TCC_MODE +#define PDB_PROP_GPU_C2C_SYSMEM_BASE_CAST +#define PDB_PROP_GPU_C2C_SYSMEM_BASE_NAME PDB_PROP_GPU_C2C_SYSMEM +#define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_CAST +#define PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING_BASE_NAME PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING +#define PDB_PROP_GPU_IN_GC6_RESET_BASE_CAST +#define PDB_PROP_GPU_IN_GC6_RESET_BASE_NAME PDB_PROP_GPU_IN_GC6_RESET +#define PDB_PROP_GPU_HYBRID_MGPU_BASE_CAST +#define PDB_PROP_GPU_HYBRID_MGPU_BASE_NAME PDB_PROP_GPU_HYBRID_MGPU +#define PDB_PROP_GPU_3D_CONTROLLER_BASE_CAST +#define PDB_PROP_GPU_3D_CONTROLLER_BASE_NAME PDB_PROP_GPU_3D_CONTROLLER +#define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE +#define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED_BASE_NAME PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED +#define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_CAST +#define PDB_PROP_GPU_RM_UNLINKED_SLI_BASE_NAME PDB_PROP_GPU_RM_UNLINKED_SLI +#define PDB_PROP_GPU_IS_UEFI_BASE_CAST +#define PDB_PROP_GPU_IS_UEFI_BASE_NAME PDB_PROP_GPU_IS_UEFI +#define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_CAST +#define PDB_PROP_GPU_IN_SECONDARY_BUS_RESET_BASE_NAME PDB_PROP_GPU_IN_SECONDARY_BUS_RESET +#define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_CAST +#define PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT_BASE_NAME PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT +#define PDB_PROP_GPU_IS_CONNECTED_BASE_CAST +#define PDB_PROP_GPU_IS_CONNECTED_BASE_NAME PDB_PROP_GPU_IS_CONNECTED +#define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_CAST +#define PDB_PROP_GPU_IS_PLX_PRESENT_BASE_NAME PDB_PROP_GPU_IS_PLX_PRESENT +#define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_CAST +#define PDB_PROP_GPU_NVLINK_SYSMEM_BASE_NAME PDB_PROP_GPU_NVLINK_SYSMEM +#define PDB_PROP_GPU_IS_MOBILE_BASE_CAST +#define PDB_PROP_GPU_IS_MOBILE_BASE_NAME PDB_PROP_GPU_IS_MOBILE +#define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_RTD3_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_RTD3_GC6_ACTIVE +#define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_CAST +#define PDB_PROP_GPU_ALTERNATE_TREE_ENABLED_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_ENABLED +#define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_CAST +#define PDB_PROP_GPU_PERSISTENT_SW_STATE_BASE_NAME PDB_PROP_GPU_PERSISTENT_SW_STATE +#define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_IN_PM_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_PM_CODEPATH +#define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_CAST +#define PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT_BASE_NAME PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED +#define PDB_PROP_GPU_BEHIND_BR03_BASE_CAST +#define PDB_PROP_GPU_BEHIND_BR03_BASE_NAME PDB_PROP_GPU_BEHIND_BR03 +#define PDB_PROP_GPU_BEHIND_BR04_BASE_CAST +#define PDB_PROP_GPU_BEHIND_BR04_BASE_NAME PDB_PROP_GPU_BEHIND_BR04 +#define PDB_PROP_GPU_MIG_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_MIG_SUPPORTED_BASE_NAME PDB_PROP_GPU_MIG_SUPPORTED +#define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_CAST +#define PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE_BASE_NAME PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE +#define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_CAST +#define PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE_BASE_NAME PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE +#define PDB_PROP_GPU_ACCOUNTING_ON_BASE_CAST +#define PDB_PROP_GPU_ACCOUNTING_ON_BASE_NAME PDB_PROP_GPU_ACCOUNTING_ON +#define PDB_PROP_GPU_IN_HIBERNATE_BASE_CAST +#define PDB_PROP_GPU_IN_HIBERNATE_BASE_NAME PDB_PROP_GPU_IN_HIBERNATE +#define PDB_PROP_GPU_BROKEN_FB_BASE_CAST +#define PDB_PROP_GPU_BROKEN_FB_BASE_NAME PDB_PROP_GPU_BROKEN_FB +#define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_CAST +#define PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT_BASE_NAME PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT +#define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_CAST +#define PDB_PROP_GPU_IN_TIMEOUT_RECOVERY_BASE_NAME PDB_PROP_GPU_IN_TIMEOUT_RECOVERY +#define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_CAST +#define PDB_PROP_GPU_FAST_GC6_ACTIVE_BASE_NAME PDB_PROP_GPU_FAST_GC6_ACTIVE +#define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_CAST +#define PDB_PROP_GPU_IN_FULLCHIP_RESET_BASE_NAME PDB_PROP_GPU_IN_FULLCHIP_RESET +#define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_CAST +#define PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA_BASE_NAME PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_CAST +#define PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA_BASE_NAME PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VSGA +#define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_CAST +#define PDB_PROP_GPU_IN_SLI_LINK_CODEPATH_BASE_NAME PDB_PROP_GPU_IN_SLI_LINK_CODEPATH +#define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_CAST +#define PDB_PROP_GPU_IS_BR03_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR03_PRESENT +#define PDB_PROP_GPU_IS_GEMINI_BASE_CAST +#define PDB_PROP_GPU_IS_GEMINI_BASE_NAME PDB_PROP_GPU_IS_GEMINI +#define PDB_PROP_GPU_STATE_INITIALIZED_BASE_CAST +#define PDB_PROP_GPU_STATE_INITIALIZED_BASE_NAME PDB_PROP_GPU_STATE_INITIALIZED +#define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_CAST +#define PDB_PROP_GPU_NV_USERMODE_ENABLED_BASE_NAME PDB_PROP_GPU_NV_USERMODE_ENABLED +#define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_CAST +#define PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS_BASE_NAME PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS +#define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_CAST +#define PDB_PROP_GPU_IS_BR04_PRESENT_BASE_NAME PDB_PROP_GPU_IS_BR04_PRESENT +#define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_CAST +#define PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM_BASE_NAME PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM +#define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_CAST +#define PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED_BASE_NAME PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED +#define PDB_PROP_GPU_ZERO_FB_BASE_CAST +#define PDB_PROP_GPU_ZERO_FB_BASE_NAME PDB_PROP_GPU_ZERO_FB +#define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_CAST +#define PDB_PROP_GPU_SWRL_GRANULAR_LOCKING_BASE_NAME PDB_PROP_GPU_SWRL_GRANULAR_LOCKING +#define PDB_PROP_GPU_ATS_SUPPORTED_BASE_CAST +#define PDB_PROP_GPU_ATS_SUPPORTED_BASE_NAME PDB_PROP_GPU_ATS_SUPPORTED +#define PDB_PROP_GPU_EMULATION_BASE_CAST +#define PDB_PROP_GPU_EMULATION_BASE_NAME PDB_PROP_GPU_EMULATION +#define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_CAST +#define PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS_BASE_NAME PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS +#define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_CAST +#define PDB_PROP_GPU_PRIMARY_DEVICE_BASE_NAME PDB_PROP_GPU_PRIMARY_DEVICE +#define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_CAST +#define PDB_PROP_GPU_BEHIND_BRIDGE_BASE_NAME PDB_PROP_GPU_BEHIND_BRIDGE +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_CAST +#define PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY_BASE_NAME PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY +#define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_CAST +#define PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST_BASE_NAME PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST +#define PDB_PROP_GPU_IS_LOST_BASE_CAST +#define PDB_PROP_GPU_IS_LOST_BASE_NAME PDB_PROP_GPU_IS_LOST +#define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_CAST +#define PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K_BASE_NAME PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K + +NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU**, Dynamic*, NvU32, NvU32, NvU32, NvU32, RM_RUNTIME_VARIANT, NvU32, NvU32 arg_gpuInstance); +#define __objCreate_OBJGPU(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance) \ + __nvoc_objCreate_OBJGPU((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance) + +#define gpuGetChildrenPresent(pGpu, pNumEntries) gpuGetChildrenPresent_DISPATCH(pGpu, pNumEntries) +#define gpuGetChildrenPresent_HAL(pGpu, pNumEntries) gpuGetChildrenPresent_DISPATCH(pGpu, pNumEntries) +#define gpuGetClassDescriptorList(pGpu, arg0) gpuGetClassDescriptorList_DISPATCH(pGpu, arg0) +#define gpuGetClassDescriptorList_HAL(pGpu, arg0) gpuGetClassDescriptorList_DISPATCH(pGpu, arg0) +#define gpuClearFbhubPoisonIntrForBug2924523(pGpu) gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(pGpu) +#define gpuClearFbhubPoisonIntrForBug2924523_HAL(pGpu) gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(pGpu) +#define gpuConstructDeviceInfoTable(pGpu) gpuConstructDeviceInfoTable_DISPATCH(pGpu) +#define gpuConstructDeviceInfoTable_HAL(pGpu) gpuConstructDeviceInfoTable_DISPATCH(pGpu) +#define gpuGetFlaVasSize(pGpu, bNvswitchVirtualization) gpuGetFlaVasSize_DISPATCH(pGpu, bNvswitchVirtualization) +#define gpuGetFlaVasSize_HAL(pGpu, bNvswitchVirtualization) gpuGetFlaVasSize_DISPATCH(pGpu, bNvswitchVirtualization) +static inline NV_STATUS gpuConstructPhysical_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuConstructPhysical(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuConstructPhysical(pGpu) gpuConstructPhysical_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuConstructPhysical_HAL(pGpu) gpuConstructPhysical(pGpu) + +static inline void gpuDestructPhysical_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestructPhysical(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestructPhysical(pGpu) gpuDestructPhysical_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestructPhysical_HAL(pGpu) gpuDestructPhysical(pGpu) + +NV_STATUS gpuStatePreInit_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStatePreInit(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStatePreInit(pGpu) gpuStatePreInit_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuStatePreInit_HAL(pGpu) gpuStatePreInit(pGpu) + +NV_STATUS gpuStateLoad_IMPL(struct OBJGPU *pGpu, NvU32 arg0); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateLoad(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateLoad(pGpu, arg0) gpuStateLoad_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuStateLoad_HAL(pGpu, arg0) gpuStateLoad(pGpu, arg0) + +NV_STATUS gpuStateDestroy_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateDestroy(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateDestroy(pGpu) gpuStateDestroy_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuStateDestroy_HAL(pGpu) gpuStateDestroy(pGpu) + +static inline NV_STATUS gpuApplyOverrides_46f6a7(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuApplyOverrides(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuApplyOverrides(pGpu, arg0, arg1) gpuApplyOverrides_46f6a7(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuApplyOverrides_HAL(pGpu, arg0, arg1) gpuApplyOverrides(pGpu, arg0, arg1) + +static inline NV_STATUS gpuInitDevinitOverridesFromRegistry_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitDevinitOverridesFromRegistry(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitDevinitOverridesFromRegistry(pGpu) gpuInitDevinitOverridesFromRegistry_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitDevinitOverridesFromRegistry_HAL(pGpu) gpuInitDevinitOverridesFromRegistry(pGpu) + +static inline NV_STATUS gpuApplyDevinitReg032Override_46f6a7(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuApplyDevinitReg032Override(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuApplyDevinitReg032Override(pGpu, arg0, arg1) gpuApplyDevinitReg032Override_46f6a7(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuApplyDevinitReg032Override_HAL(pGpu, arg0, arg1) gpuApplyDevinitReg032Override(pGpu, arg0, arg1) + +static inline NV_STATUS gpuCheckPCIIDMismatch_56cd7a(struct OBJGPU *pGpu, struct OBJVBIOS *arg0) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuCheckPCIIDMismatch(struct OBJGPU *pGpu, struct OBJVBIOS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckPCIIDMismatch(pGpu, arg0) gpuCheckPCIIDMismatch_56cd7a(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuCheckPCIIDMismatch_HAL(pGpu, arg0) gpuCheckPCIIDMismatch(pGpu, arg0) + +static inline NvBool gpuCheckGpuIDMismatch_491d52(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckGpuIDMismatch(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckGpuIDMismatch(pGpu, arg0, arg1) gpuCheckGpuIDMismatch_491d52(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuCheckGpuIDMismatch_HAL(pGpu, arg0, arg1) gpuCheckGpuIDMismatch(pGpu, arg0, arg1) + +NV_STATUS gpuGetNameString_KERNEL(struct OBJGPU *pGpu, NvU32 arg0, void *arg1); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetNameString(struct OBJGPU *pGpu, NvU32 arg0, void *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetNameString(pGpu, arg0, arg1) gpuGetNameString_KERNEL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetNameString_HAL(pGpu, arg0, arg1) gpuGetNameString(pGpu, arg0, arg1) + +NV_STATUS gpuGetShortNameString_KERNEL(struct OBJGPU *pGpu, NvU8 *arg0); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetShortNameString(struct OBJGPU *pGpu, NvU8 *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetShortNameString(pGpu, arg0) gpuGetShortNameString_KERNEL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetShortNameString_HAL(pGpu, arg0) gpuGetShortNameString(pGpu, arg0) + +void gpuInitBranding_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuInitBranding(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuInitBranding(pGpu) gpuInitBranding_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitBranding_HAL(pGpu) gpuInitBranding(pGpu) + +BRANDING_TYPE gpuDetectBranding_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline BRANDING_TYPE gpuDetectBranding(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + BRANDING_TYPE ret; + portMemSet(&ret, 0, sizeof(BRANDING_TYPE)); + return ret; +} +#else //__nvoc_gpu_h_disabled +#define gpuDetectBranding(pGpu) gpuDetectBranding_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDetectBranding_HAL(pGpu) gpuDetectBranding(pGpu) + +COMPUTE_BRANDING_TYPE gpuDetectComputeBranding_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline COMPUTE_BRANDING_TYPE gpuDetectComputeBranding(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + COMPUTE_BRANDING_TYPE ret; + portMemSet(&ret, 0, sizeof(COMPUTE_BRANDING_TYPE)); + return ret; +} +#else //__nvoc_gpu_h_disabled +#define gpuDetectComputeBranding(pGpu) gpuDetectComputeBranding_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDetectComputeBranding_HAL(pGpu) gpuDetectComputeBranding(pGpu) + +BRANDING_TYPE gpuDetectVgxBranding_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline BRANDING_TYPE gpuDetectVgxBranding(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + BRANDING_TYPE ret; + portMemSet(&ret, 0, sizeof(BRANDING_TYPE)); + return ret; +} +#else //__nvoc_gpu_h_disabled +#define gpuDetectVgxBranding(pGpu) gpuDetectVgxBranding_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDetectVgxBranding_HAL(pGpu) gpuDetectVgxBranding(pGpu) + +static inline void gpuDeterminePersistantIllumSettings_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDeterminePersistantIllumSettings(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDeterminePersistantIllumSettings(pGpu) gpuDeterminePersistantIllumSettings_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDeterminePersistantIllumSettings_HAL(pGpu) gpuDeterminePersistantIllumSettings(pGpu) + +static inline NV_STATUS gpuInitSliIllumination_46f6a7(struct OBJGPU *pGpu) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitSliIllumination(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitSliIllumination(pGpu) gpuInitSliIllumination_46f6a7(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitSliIllumination_HAL(pGpu) gpuInitSliIllumination(pGpu) + +NV_STATUS gpuBuildGenericKernelFalconList_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBuildGenericKernelFalconList(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBuildGenericKernelFalconList(pGpu) gpuBuildGenericKernelFalconList_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuBuildGenericKernelFalconList_HAL(pGpu) gpuBuildGenericKernelFalconList(pGpu) + +void gpuDestroyGenericKernelFalconList_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyGenericKernelFalconList(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyGenericKernelFalconList(pGpu) gpuDestroyGenericKernelFalconList_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestroyGenericKernelFalconList_HAL(pGpu) gpuDestroyGenericKernelFalconList(pGpu) + +struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); + +#ifdef __nvoc_gpu_h_disabled +static inline struct GenericKernelFalcon *gpuGetGenericKernelFalconForEngine(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGenericKernelFalconForEngine(pGpu, arg0) gpuGetGenericKernelFalconForEngine_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetGenericKernelFalconForEngine_HAL(pGpu, arg0) gpuGetGenericKernelFalconForEngine(pGpu, arg0) + +void gpuRegisterGenericKernelFalconIntrService_IMPL(struct OBJGPU *pGpu, void *pRecords); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuRegisterGenericKernelFalconIntrService(struct OBJGPU *pGpu, void *pRecords) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService_IMPL(pGpu, pRecords) +#endif //__nvoc_gpu_h_disabled + +#define gpuRegisterGenericKernelFalconIntrService_HAL(pGpu, pRecords) gpuRegisterGenericKernelFalconIntrService(pGpu, pRecords) + +static inline void gpuGetHwDefaults_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetHwDefaults(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetHwDefaults(pGpu) gpuGetHwDefaults_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetHwDefaults_HAL(pGpu) gpuGetHwDefaults(pGpu) + +RmPhysAddr gpuGetDmaEndAddress_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline RmPhysAddr gpuGetDmaEndAddress(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + RmPhysAddr ret; + portMemSet(&ret, 0, sizeof(RmPhysAddr)); + return ret; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDmaEndAddress(pGpu) gpuGetDmaEndAddress_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetDmaEndAddress_HAL(pGpu) gpuGetDmaEndAddress(pGpu) + +static inline NV_STATUS gpuMarkDeviceForReset_46f6a7(struct OBJGPU *pGpu) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuMarkDeviceForReset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuMarkDeviceForReset(pGpu) gpuMarkDeviceForReset_46f6a7(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuMarkDeviceForReset_HAL(pGpu) gpuMarkDeviceForReset(pGpu) + +static inline NV_STATUS gpuMarkDeviceForDrainAndReset_46f6a7(struct OBJGPU *pGpu) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuMarkDeviceForDrainAndReset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuMarkDeviceForDrainAndReset(pGpu) gpuMarkDeviceForDrainAndReset_46f6a7(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuMarkDeviceForDrainAndReset_HAL(pGpu) gpuMarkDeviceForDrainAndReset(pGpu) + +static inline NvU32 gpuGetSliFingerPinsetMask_4a4dee(struct OBJGPU *pGpu) { + return 0; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetSliFingerPinsetMask(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetSliFingerPinsetMask(pGpu) gpuGetSliFingerPinsetMask_4a4dee(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetSliFingerPinsetMask_HAL(pGpu) gpuGetSliFingerPinsetMask(pGpu) + +static inline NV_STATUS gpuPrivSecInitRegistryOverrides_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPrivSecInitRegistryOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPrivSecInitRegistryOverrides(pGpu) gpuPrivSecInitRegistryOverrides_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPrivSecInitRegistryOverrides_HAL(pGpu) gpuPrivSecInitRegistryOverrides(pGpu) + +static inline void gpuDestroyOverrides_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyOverrides(pGpu) gpuDestroyOverrides_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestroyOverrides_HAL(pGpu) gpuDestroyOverrides(pGpu) + +NV_STATUS gpuWriteBusConfigReg_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 value); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteBusConfigReg(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteBusConfigReg(pGpu, index, value) gpuWriteBusConfigReg_GM107(pGpu, index, value) +#endif //__nvoc_gpu_h_disabled + +#define gpuWriteBusConfigReg_HAL(pGpu, index, value) gpuWriteBusConfigReg(pGpu, index, value) + +NV_STATUS gpuReadBusConfigReg_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 *data); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadBusConfigReg(struct OBJGPU *pGpu, NvU32 index, NvU32 *data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadBusConfigReg(pGpu, index, data) gpuReadBusConfigReg_GM107(pGpu, index, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadBusConfigReg_HAL(pGpu, index, data) gpuReadBusConfigReg(pGpu, index, data) + +NV_STATUS gpuReadBusConfigRegEx_GM107(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadBusConfigRegEx(struct OBJGPU *pGpu, NvU32 index, NvU32 *data, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadBusConfigRegEx(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx_GM107(pGpu, index, data, pThreadState) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadBusConfigRegEx_HAL(pGpu, index, data, pThreadState) gpuReadBusConfigRegEx(pGpu, index, data, pThreadState) + +NV_STATUS gpuReadFunctionConfigReg_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadFunctionConfigReg(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 *data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadFunctionConfigReg(pGpu, function, reg, data) gpuReadFunctionConfigReg_GM107(pGpu, function, reg, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadFunctionConfigReg_HAL(pGpu, function, reg, data) gpuReadFunctionConfigReg(pGpu, function, reg, data) + +NV_STATUS gpuWriteFunctionConfigReg_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteFunctionConfigReg(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteFunctionConfigReg(pGpu, function, reg, data) gpuWriteFunctionConfigReg_GM107(pGpu, function, reg, data) +#endif //__nvoc_gpu_h_disabled + +#define gpuWriteFunctionConfigReg_HAL(pGpu, function, reg, data) gpuWriteFunctionConfigReg(pGpu, function, reg, data) + +NV_STATUS gpuWriteFunctionConfigRegEx_GM107(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteFunctionConfigRegEx(struct OBJGPU *pGpu, NvU32 function, NvU32 reg, NvU32 data, THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteFunctionConfigRegEx(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx_GM107(pGpu, function, reg, data, pThreadState) +#endif //__nvoc_gpu_h_disabled + +#define gpuWriteFunctionConfigRegEx_HAL(pGpu, function, reg, data, pThreadState) gpuWriteFunctionConfigRegEx(pGpu, function, reg, data, pThreadState) + +NV_STATUS gpuSetPower_GM107(struct OBJGPU *pGpu, NvU32 arg1, NvU32 arg2, NvU32 arg3); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetPower(struct OBJGPU *pGpu, NvU32 arg1, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetPower(pGpu, arg1, arg2, arg3) gpuSetPower_GM107(pGpu, arg1, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetPower_HAL(pGpu, arg1, arg2, arg3) gpuSetPower(pGpu, arg1, arg2, arg3) + +void gpuGetIdInfo_GM107(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetIdInfo(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetIdInfo(pGpu) gpuGetIdInfo_GM107(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetIdInfo_HAL(pGpu) gpuGetIdInfo(pGpu) + +static inline void gpuUpdateIdInfo_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuUpdateIdInfo(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuUpdateIdInfo(pGpu) gpuUpdateIdInfo_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuUpdateIdInfo_HAL(pGpu) gpuUpdateIdInfo(pGpu) + +static inline NvU32 gpuGetDeviceIDList_4a4dee(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg0) { + return 0; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetDeviceIDList(struct OBJGPU *pGpu, DEVICE_ID_MAPPING **arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDeviceIDList(pGpu, arg0) gpuGetDeviceIDList_4a4dee(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetDeviceIDList_HAL(pGpu, arg0) gpuGetDeviceIDList(pGpu, arg0) + +NV_STATUS gpuGenGidData_FWCLIENT(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGenGidData(struct OBJGPU *pGpu, NvU8 *pGidData, NvU32 gidSize, NvU32 gidFlags) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData_FWCLIENT(pGpu, pGidData, gidSize, gidFlags) +#endif //__nvoc_gpu_h_disabled + +#define gpuGenGidData_HAL(pGpu, pGidData, gidSize, gidFlags) gpuGenGidData(pGpu, pGidData, gidSize, gidFlags) + +NvU8 gpuGetChipSubRev_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU8 gpuGetChipSubRev(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetChipSubRev(pGpu) gpuGetChipSubRev_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetChipSubRev_HAL(pGpu) gpuGetChipSubRev(pGpu) + +NvU32 gpuGetEmulationRev1_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetEmulationRev1(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetEmulationRev1(pGpu) gpuGetEmulationRev1_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetEmulationRev1_HAL(pGpu) gpuGetEmulationRev1(pGpu) + +static inline NV_STATUS gpuPerformUniversalValidation_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPerformUniversalValidation(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPerformUniversalValidation(pGpu) gpuPerformUniversalValidation_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuPerformUniversalValidation_HAL(pGpu) gpuPerformUniversalValidation(pGpu) + +NvU32 gpuGetVirtRegPhysOffset_TU102(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetVirtRegPhysOffset(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetVirtRegPhysOffset(pGpu) gpuGetVirtRegPhysOffset_TU102(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetVirtRegPhysOffset_HAL(pGpu) gpuGetVirtRegPhysOffset(pGpu) + +NV_STATUS gpuGetRegBaseOffset_FWCLIENT(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetRegBaseOffset(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetRegBaseOffset(pGpu, arg0, arg1) gpuGetRegBaseOffset_FWCLIENT(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetRegBaseOffset_HAL(pGpu, arg0, arg1) gpuGetRegBaseOffset(pGpu, arg0, arg1) + +void gpuHandleSanityCheckRegReadError_GM107(struct OBJGPU *pGpu, NvU32 addr, NvU32 value); + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuHandleSanityCheckRegReadError(struct OBJGPU *pGpu, NvU32 addr, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuHandleSanityCheckRegReadError(pGpu, addr, value) gpuHandleSanityCheckRegReadError_GM107(pGpu, addr, value) +#endif //__nvoc_gpu_h_disabled + +#define gpuHandleSanityCheckRegReadError_HAL(pGpu, addr, value) gpuHandleSanityCheckRegReadError(pGpu, addr, value) + +static inline void gpuGetSanityCheckRegReadError_b3696a(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuGetSanityCheckRegReadError(struct OBJGPU *pGpu, NvU32 value, const char **pErrorString) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError_b3696a(pGpu, value, pErrorString) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetSanityCheckRegReadError_HAL(pGpu, value, pErrorString) gpuGetSanityCheckRegReadError(pGpu, value, pErrorString) + +static inline NV_STATUS gpuSanityCheckVirtRegAccess_56cd7a(struct OBJGPU *pGpu, NvU32 arg0) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckVirtRegAccess(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckVirtRegAccess(pGpu, arg0) gpuSanityCheckVirtRegAccess_56cd7a(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuSanityCheckVirtRegAccess_HAL(pGpu, arg0) gpuSanityCheckVirtRegAccess(pGpu, arg0) + +NV_STATUS gpuInitRegistryOverrides_KERNEL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitRegistryOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitRegistryOverrides(pGpu) gpuInitRegistryOverrides_KERNEL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitRegistryOverrides_HAL(pGpu) gpuInitRegistryOverrides(pGpu) + +NV_STATUS gpuInitInstLocOverrides_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitInstLocOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitInstLocOverrides(pGpu) gpuInitInstLocOverrides_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitInstLocOverrides_HAL(pGpu) gpuInitInstLocOverrides(pGpu) + +const GPUCHILDORDER *gpuGetChildrenOrder_GM200(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +#ifdef __nvoc_gpu_h_disabled +static inline const GPUCHILDORDER *gpuGetChildrenOrder(struct OBJGPU *pGpu, NvU32 *pNumEntries) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetChildrenOrder(pGpu, pNumEntries) gpuGetChildrenOrder_GM200(pGpu, pNumEntries) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetChildrenOrder_HAL(pGpu, pNumEntries) gpuGetChildrenOrder(pGpu, pNumEntries) + +NvU32 gpuGetPhysAddrWidth_TU102(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetPhysAddrWidth(struct OBJGPU *pGpu, NV_ADDRESS_SPACE arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetPhysAddrWidth(pGpu, arg0) gpuGetPhysAddrWidth_TU102(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetPhysAddrWidth_HAL(pGpu, arg0) gpuGetPhysAddrWidth(pGpu, arg0) + +NV_STATUS gpuInitSriov_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitSriov(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitSriov(pGpu) gpuInitSriov_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuInitSriov_HAL(pGpu) gpuInitSriov(pGpu) + +NV_STATUS gpuDeinitSriov_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeinitSriov(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeinitSriov(pGpu) gpuDeinitSriov_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDeinitSriov_HAL(pGpu) gpuDeinitSriov(pGpu) + +static inline NV_STATUS gpuCreateDefaultClientShare_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuCreateDefaultClientShare(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuCreateDefaultClientShare(pGpu) gpuCreateDefaultClientShare_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuCreateDefaultClientShare_HAL(pGpu) gpuCreateDefaultClientShare(pGpu) + +static inline void gpuDestroyDefaultClientShare_b3696a(struct OBJGPU *pGpu) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyDefaultClientShare(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyDefaultClientShare(pGpu) gpuDestroyDefaultClientShare_b3696a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuDestroyDefaultClientShare_HAL(pGpu) gpuDestroyDefaultClientShare(pGpu) + +NvU32 gpuGetActiveFBIOs_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetActiveFBIOs(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetActiveFBIOs(pGpu) gpuGetActiveFBIOs_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetActiveFBIOs_HAL(pGpu) gpuGetActiveFBIOs(pGpu) + +static inline NvBool gpuIsDebuggerActive_8031b9(struct OBJGPU *pGpu) { + return pGpu->bIsDebugModeEnabled; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsDebuggerActive(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsDebuggerActive(pGpu) gpuIsDebuggerActive_8031b9(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsDebuggerActive_HAL(pGpu) gpuIsDebuggerActive(pGpu) + +NV_STATUS gpuExecGrCtxRegops_GK104(struct OBJGPU *pGpu, struct Graphics *arg0, struct KernelChannel *arg1, NV2080_CTRL_GPU_REG_OP *pRegOps, NvU32 regOpCount, RMTIMEOUT *pTimeout, NvBool bStopCtxsw); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuExecGrCtxRegops(struct OBJGPU *pGpu, struct Graphics *arg0, struct KernelChannel *arg1, NV2080_CTRL_GPU_REG_OP *pRegOps, NvU32 regOpCount, RMTIMEOUT *pTimeout, NvBool bStopCtxsw) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuExecGrCtxRegops(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) gpuExecGrCtxRegops_GK104(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) +#endif //__nvoc_gpu_h_disabled + +#define gpuExecGrCtxRegops_HAL(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) gpuExecGrCtxRegops(pGpu, arg0, arg1, pRegOps, regOpCount, pTimeout, bStopCtxsw) + +NvU32 gpuReadBAR1Size_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuReadBAR1Size(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadBAR1Size(pGpu) gpuReadBAR1Size_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuReadBAR1Size_HAL(pGpu) gpuReadBAR1Size(pGpu) + +NvBool gpuCheckPageRetirementSupport_GSPCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckPageRetirementSupport(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckPageRetirementSupport(pGpu) gpuCheckPageRetirementSupport_GSPCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuCheckPageRetirementSupport_HAL(pGpu) gpuCheckPageRetirementSupport(pGpu) + +NvBool gpuIsInternalSku_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsInternalSku(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsInternalSku(pGpu) gpuIsInternalSku_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsInternalSku_HAL(pGpu) gpuIsInternalSku(pGpu) + +static inline NvBool gpuCheckIsP2PAllocated_491d52(struct OBJGPU *pGpu) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckIsP2PAllocated(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckIsP2PAllocated(pGpu) gpuCheckIsP2PAllocated_491d52(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuCheckIsP2PAllocated_HAL(pGpu) gpuCheckIsP2PAllocated(pGpu) + +NV_STATUS gpuVerifyExistence_IMPL(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuVerifyExistence(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuVerifyExistence(pGpu) gpuVerifyExistence_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuVerifyExistence_HAL(pGpu) gpuVerifyExistence(pGpu) + +static inline void gpuResetVFRegisters_b3696a(struct OBJGPU *pGpu, NvU32 gfid) { + return; +} + +#ifdef __nvoc_gpu_h_disabled +static inline void gpuResetVFRegisters(struct OBJGPU *pGpu, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuResetVFRegisters(pGpu, gfid) gpuResetVFRegisters_b3696a(pGpu, gfid) +#endif //__nvoc_gpu_h_disabled + +#define gpuResetVFRegisters_HAL(pGpu, gfid) gpuResetVFRegisters(pGpu, gfid) + +NvU32 gpuGetLitterValues_FWCLIENT(struct OBJGPU *pGpu, NvU32 index); + +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetLitterValues(struct OBJGPU *pGpu, NvU32 index) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetLitterValues(pGpu, index) gpuGetLitterValues_FWCLIENT(pGpu, index) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetLitterValues_HAL(pGpu, index) gpuGetLitterValues(pGpu, index) + +static inline NvBool gpuIsAtsSupportedWithSmcMemPartitioning_491d52(struct OBJGPU *pGpu) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsAtsSupportedWithSmcMemPartitioning(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsAtsSupportedWithSmcMemPartitioning(pGpu) gpuIsAtsSupportedWithSmcMemPartitioning_491d52(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsAtsSupportedWithSmcMemPartitioning_HAL(pGpu) gpuIsAtsSupportedWithSmcMemPartitioning(pGpu) + +NvBool gpuIsGlobalPoisonFuseEnabled_FWCLIENT(struct OBJGPU *pGpu); + +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsGlobalPoisonFuseEnabled(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsGlobalPoisonFuseEnabled(pGpu) gpuIsGlobalPoisonFuseEnabled_FWCLIENT(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuIsGlobalPoisonFuseEnabled_HAL(pGpu) gpuIsGlobalPoisonFuseEnabled(pGpu) + +static inline NV_STATUS gpuSetCacheOnlyModeOverrides_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetCacheOnlyModeOverrides(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetCacheOnlyModeOverrides(pGpu) gpuSetCacheOnlyModeOverrides_56cd7a(pGpu) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetCacheOnlyModeOverrides_HAL(pGpu) gpuSetCacheOnlyModeOverrides(pGpu) + +NV_STATUS gpuGetCeFaultMethodBufferSize_KERNEL(struct OBJGPU *arg0, NvU32 *arg1); + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetCeFaultMethodBufferSize(struct OBJGPU *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetCeFaultMethodBufferSize(arg0, arg1) gpuGetCeFaultMethodBufferSize_KERNEL(arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +#define gpuGetCeFaultMethodBufferSize_HAL(arg0, arg1) gpuGetCeFaultMethodBufferSize(arg0, arg1) + +static inline NV_STATUS gpuSetVFBarSizes_56cd7a(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0) { + return NV_OK; +} + +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetVFBarSizes(struct OBJGPU *pGpu, NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetVFBarSizes(pGpu, arg0) gpuSetVFBarSizes_56cd7a(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#define gpuSetVFBarSizes_HAL(pGpu, arg0) gpuSetVFBarSizes(pGpu, arg0) + +const GPUCHILDPRESENT *gpuGetChildrenPresent_TU102(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +const GPUCHILDPRESENT *gpuGetChildrenPresent_TU104(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +const GPUCHILDPRESENT *gpuGetChildrenPresent_TU106(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +const GPUCHILDPRESENT *gpuGetChildrenPresent_GA100(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +const GPUCHILDPRESENT *gpuGetChildrenPresent_GA102(struct OBJGPU *pGpu, NvU32 *pNumEntries); + +static inline const GPUCHILDPRESENT *gpuGetChildrenPresent_DISPATCH(struct OBJGPU *pGpu, NvU32 *pNumEntries) { + return pGpu->__gpuGetChildrenPresent__(pGpu, pNumEntries); +} + +const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU102(struct OBJGPU *pGpu, NvU32 *arg0); + +const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU104(struct OBJGPU *pGpu, NvU32 *arg0); + +const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU106(struct OBJGPU *pGpu, NvU32 *arg0); + +const CLASSDESCRIPTOR *gpuGetClassDescriptorList_TU117(struct OBJGPU *pGpu, NvU32 *arg0); + +const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GA100(struct OBJGPU *pGpu, NvU32 *arg0); + +const CLASSDESCRIPTOR *gpuGetClassDescriptorList_GA102(struct OBJGPU *pGpu, NvU32 *arg0); + +static inline const CLASSDESCRIPTOR *gpuGetClassDescriptorList_DISPATCH(struct OBJGPU *pGpu, NvU32 *arg0) { + return pGpu->__gpuGetClassDescriptorList__(pGpu, arg0); +} + +NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_GA100_KERNEL(struct OBJGPU *pGpu); + +static inline NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_GA100_PHYSICAL(struct OBJGPU *pGpu); + +static inline NV_STATUS gpuClearFbhubPoisonIntrForBug2924523_DISPATCH(struct OBJGPU *pGpu) { + return pGpu->__gpuClearFbhubPoisonIntrForBug2924523__(pGpu); +} + +NV_STATUS gpuConstructDeviceInfoTable_FWCLIENT(struct OBJGPU *pGpu); + +static inline NV_STATUS gpuConstructDeviceInfoTable_56cd7a(struct OBJGPU *pGpu) { + return NV_OK; +} + +NV_STATUS gpuConstructDeviceInfoTable_GA100(struct OBJGPU *pGpu); + +static inline NV_STATUS gpuConstructDeviceInfoTable_DISPATCH(struct OBJGPU *pGpu) { + return pGpu->__gpuConstructDeviceInfoTable__(pGpu); +} + +NvU64 gpuGetFlaVasSize_GA100(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization); + +static inline NvU64 gpuGetFlaVasSize_474d46(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU64 gpuGetFlaVasSize_DISPATCH(struct OBJGPU *pGpu, NvBool bNvswitchVirtualization) { + return pGpu->__gpuGetFlaVasSize__(pGpu, bNvswitchVirtualization); +} + +static inline PENGDESCRIPTOR gpuGetInitEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineInitDescriptors; +} + +static inline PENGDESCRIPTOR gpuGetLoadEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineLoadDescriptors; +} + +static inline PENGDESCRIPTOR gpuGetUnloadEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineUnloadDescriptors; +} + +static inline PENGDESCRIPTOR gpuGetDestroyEngineDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.pEngineDestroyDescriptors; +} + +static inline NvU32 gpuGetNumEngDescriptors(struct OBJGPU *pGpu) { + return pGpu->engineOrder.numEngineDescriptors; +} + +static inline NvU32 gpuGetMode(struct OBJGPU *pGpu) { + return pGpu->computeModeRefCount > 0 ? 2 : 1; +} + +static inline ACPI_DSM_FUNCTION gpuGetDispStatusHotplugFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.dispStatusHotplugFunc; +} + +static inline ACPI_DSM_FUNCTION gpuGetDispStatusConfigFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.dispStatusConfigFunc; +} + +static inline ACPI_DSM_FUNCTION gpuGetPerfPostPowerStateFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.perfPostPowerStateFunc; +} + +static inline ACPI_DSM_FUNCTION gpuGetStereo3dStateActiveFunc(struct OBJGPU *pGpu) { + return pGpu->acpi.stereo3dStateActiveFunc; +} + +static inline NvU32 gpuGetPmcBoot0(struct OBJGPU *pGpu) { + return pGpu->chipId0; +} + +static inline struct OBJFIFO *gpuGetFifoShared(struct OBJGPU *pGpu) { + return ((void *)0); +} + +static inline ENGSTATE_ITER gpuGetEngstateIter(struct OBJGPU *pGpu) { + GPU_CHILD_ITER it = { 0 }; + return it; +} + +static inline RmPhysAddr gpuGetDmaStartAddress(struct OBJGPU *pGpu) { + return pGpu->dmaStartAddress; +} + +static inline NvBool gpuIsCCFeatureEnabled(struct OBJGPU *pGpu) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool gpuIsApmFeatureEnabled(struct OBJGPU *pGpu) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool gpuIsCCorApmFeatureEnabled(struct OBJGPU *pGpu) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool gpuIsInCCorApmDevMode(struct OBJGPU *pGpu) { + return ((NvBool)(0 != 0)); +} + +static inline NV_STATUS gpuFreeEventHandle(struct OBJGPU *pGpu) { + return NV_OK; +} + +static inline NvU32 gpuGetChipMajRev(struct OBJGPU *pGpu) { + return pGpu->chipInfo.pmcBoot42.majorRev; +} + +static inline NvU32 gpuGetChipMinRev(struct OBJGPU *pGpu) { + return pGpu->chipInfo.pmcBoot42.minorRev; +} + +static inline NvU32 gpuGetChipImpl(struct OBJGPU *pGpu) { + return pGpu->chipInfo.implementationId; +} + +static inline NvU32 gpuGetChipArch(struct OBJGPU *pGpu) { + return pGpu->chipInfo.platformId; +} + +static inline NvU32 gpuGetChipMinExtRev(struct OBJGPU *pGpu) { + return pGpu->chipInfo.pmcBoot42.minorExtRev; +} + +static inline NvU64 gpuGetVmmuSegmentSize(struct OBJGPU *pGpu) { + return pGpu->vmmuSegmentSize; +} + +static inline const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *gpuGetChipInfo(struct OBJGPU *pGpu) { + return pGpu->pChipInfo; +} + +static inline NvBool gpuIsBar2MovedByVtd(struct OBJGPU *pGpu) { + return pGpu->bBar2MovedByVtd; +} + +static inline NvBool gpuIsBar1Size64Bit(struct OBJGPU *pGpu) { + return pGpu->bBar1Is64Bit; +} + +static inline NvBool gpuIsSurpriseRemovalSupported(struct OBJGPU *pGpu) { + return pGpu->bSurpriseRemovalSupported; +} + +static inline NvBool gpuIsReplayableTraceEnabled(struct OBJGPU *pGpu) { + return pGpu->bReplayableTraceEnabled; +} + +static inline NvBool gpuIsStateLoading(struct OBJGPU *pGpu) { + return pGpu->bStateLoading; +} + +static inline NvBool gpuIsStateUnloading(struct OBJGPU *pGpu) { + return pGpu->bStateUnloading; +} + +static inline NvBool gpuIsStateLoaded(struct OBJGPU *pGpu) { + return pGpu->bStateLoaded; +} + +static inline NvBool gpuIsFullyConstructed(struct OBJGPU *pGpu) { + return pGpu->bFullyConstructed; +} + +static inline NvBool gpuIsUnifiedMemorySpaceEnabled(struct OBJGPU *pGpu) { + return pGpu->bUnifiedMemorySpaceEnabled; +} + +static inline NvBool gpuIsSriovEnabled(struct OBJGPU *pGpu) { + return pGpu->bSriovEnabled; +} + +static inline NvBool gpuIsCacheOnlyModeEnabled(struct OBJGPU *pGpu) { + return pGpu->bCacheOnlyMode; +} + +static inline NvBool gpuIsSplitVasManagementServerClientRmEnabled(struct OBJGPU *pGpu) { + return pGpu->bSplitVasManagementServerClientRm; +} + +static inline NvBool gpuIsWarBug200577889SriovHeavyEnabled(struct OBJGPU *pGpu) { + return pGpu->bWarBug200577889SriovHeavyEnabled; +} + +static inline NvBool gpuIsPipelinedPteMemEnabled(struct OBJGPU *pGpu) { + return pGpu->bPipelinedPteMemEnabled; +} + +static inline NvBool gpuIsBarPteInSysmemSupported(struct OBJGPU *pGpu) { + return pGpu->bIsBarPteInSysmemSupported; +} + +static inline NvBool gpuIsRegUsesGlobalSurfaceOverridesEnabled(struct OBJGPU *pGpu) { + return pGpu->bRegUsesGlobalSurfaceOverrides; +} + +static inline NvBool gpuIsTwoStageRcRecoveryEnabled(struct OBJGPU *pGpu) { + return pGpu->bTwoStageRcRecoveryEnabled; +} + +static inline NvBool gpuIsInD3Cold(struct OBJGPU *pGpu) { + return pGpu->bInD3Cold; +} + +static inline NvBool gpuIsClientRmAllocatedCtxBufferEnabled(struct OBJGPU *pGpu) { + return pGpu->bClientRmAllocatedCtxBuffer; +} + +static inline NvBool gpuIsIterativeMmuWalkerEnabled(struct OBJGPU *pGpu) { + return pGpu->bIterativeMmuWalker; +} + +static inline NvBool gpuIsEccPageRetirementWithSliAllowed(struct OBJGPU *pGpu) { + return pGpu->bEccPageRetirementWithSliAllowed; +} + +static inline NvBool gpuIsVidmemPreservationBrokenBug3172217(struct OBJGPU *pGpu) { + return pGpu->bVidmemPreservationBrokenBug3172217; +} + +static inline NvBool gpuIsInstanceMemoryAlwaysCached(struct OBJGPU *pGpu) { + return pGpu->bInstanceMemoryAlwaysCached; +} + +static inline NvBool gpuIsRmProfilingPrivileged(struct OBJGPU *pGpu) { + return pGpu->bRmProfilingPrivileged; +} + +static inline NvBool gpuIsGeforceSmb(struct OBJGPU *pGpu) { + return pGpu->bGeforceSmb; +} + +static inline NvBool gpuIsGeforceBranded(struct OBJGPU *pGpu) { + return pGpu->bIsGeforce; +} + +static inline NvBool gpuIsQuadroBranded(struct OBJGPU *pGpu) { + return pGpu->bIsQuadro; +} + +static inline NvBool gpuIsVgxBranded(struct OBJGPU *pGpu) { + return pGpu->bIsVgx; +} + +static inline NvBool gpuIsNvidiaNvsBranded(struct OBJGPU *pGpu) { + return pGpu->bIsNvidiaNvs; +} + +static inline NvBool gpuIsTitanBranded(struct OBJGPU *pGpu) { + return pGpu->bIsTitan; +} + +static inline NvBool gpuIsTeslaBranded(struct OBJGPU *pGpu) { + return pGpu->bIsTesla; +} + +static inline NvBool gpuIsComputePolicyTimesliceSupported(struct OBJGPU *pGpu) { + return pGpu->bComputePolicyTimesliceSupported; +} + +NV_STATUS gpuConstruct_IMPL(struct OBJGPU *arg_pGpu, NvU32 arg_gpuInstance); +#define __nvoc_gpuConstruct(arg_pGpu, arg_gpuInstance) gpuConstruct_IMPL(arg_pGpu, arg_gpuInstance) +NV_STATUS gpuBindHalLegacy_IMPL(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBindHalLegacy(struct OBJGPU *pGpu, NvU32 chipId0, NvU32 chipId1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBindHalLegacy(pGpu, chipId0, chipId1) gpuBindHalLegacy_IMPL(pGpu, chipId0, chipId1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuPostConstruct_IMPL(struct OBJGPU *pGpu, GPUATTACHARG *arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuPostConstruct(struct OBJGPU *pGpu, GPUATTACHARG *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuPostConstruct(pGpu, arg0) gpuPostConstruct_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuCreateObject_IMPL(struct OBJGPU *pGpu, NVOC_CLASS_ID arg0, NvU32 arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuCreateObject(struct OBJGPU *pGpu, NVOC_CLASS_ID arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuCreateObject(pGpu, arg0, arg1) gpuCreateObject_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +void gpuDestruct_IMPL(struct OBJGPU *pGpu); +#define __nvoc_gpuDestruct(pGpu) gpuDestruct_IMPL(pGpu) +NV_STATUS gpuStateInit_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateInit(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateInit(pGpu) gpuStateInit_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuStateUnload_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuStateUnload(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuStateUnload(pGpu, arg0) gpuStateUnload_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuInitDispIpHal_IMPL(struct OBJGPU *pGpu, NvU32 ipver); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitDispIpHal(struct OBJGPU *pGpu, NvU32 ipver) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitDispIpHal(pGpu, ipver) gpuInitDispIpHal_IMPL(pGpu, ipver) +#endif //__nvoc_gpu_h_disabled + +void gpuServiceInterruptsAllGpus_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuServiceInterruptsAllGpus(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuServiceInterruptsAllGpus(pGpu) gpuServiceInterruptsAllGpus_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsImplementation_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsImplementation(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsImplementation(pGpu, arg0, arg1, arg2) gpuIsImplementation_IMPL(pGpu, arg0, arg1, arg2) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsImplementationOrBetter_IMPL(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsImplementationOrBetter(struct OBJGPU *pGpu, HAL_IMPLEMENTATION arg0, NvU32 arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsImplementationOrBetter(pGpu, arg0, arg1, arg2) gpuIsImplementationOrBetter_IMPL(pGpu, arg0, arg1, arg2) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsGpuFullPower_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsGpuFullPower(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsGpuFullPower(pGpu) gpuIsGpuFullPower_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsGpuFullPowerForPmResume_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsGpuFullPowerForPmResume(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsGpuFullPowerForPmResume(pGpu) gpuIsGpuFullPowerForPmResume_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuBuildClassDB_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuBuildClassDB(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuBuildClassDB(pGpu) gpuBuildClassDB_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDestroyClassDB_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDestroyClassDB(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyClassDB(pGpu) gpuDestroyClassDB_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteEngineFromClassDB_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteEngineFromClassDB(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteEngineFromClassDB(pGpu, arg0) gpuDeleteEngineFromClassDB_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteEngineOnPreInit_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteEngineOnPreInit(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteEngineOnPreInit(pGpu, arg0) gpuDeleteEngineOnPreInit_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddClassToClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddClassToClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddClassToClassDBByEngTag(pGpu, arg0) gpuAddClassToClassDBByEngTag_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddClassToClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddClassToClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddClassToClassDBByClassId(pGpu, arg0) gpuAddClassToClassDBByClassId_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddClassToClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddClassToClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddClassToClassDBByEngTagClassId(pGpu, arg0, arg1) gpuAddClassToClassDBByEngTagClassId_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteClassFromClassDBByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteClassFromClassDBByClassId(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteClassFromClassDBByClassId(pGpu, arg0) gpuDeleteClassFromClassDBByClassId_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteClassFromClassDBByEngTag_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteClassFromClassDBByEngTag(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteClassFromClassDBByEngTag(pGpu, arg0) gpuDeleteClassFromClassDBByEngTag_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuDeleteClassFromClassDBByEngTagClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuDeleteClassFromClassDBByEngTagClassId(pGpu, arg0, arg1) gpuDeleteClassFromClassDBByEngTagClassId_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsClassSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsClassSupported(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsClassSupported(pGpu, arg0) gpuIsClassSupported_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetClassByClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, PCLASSDESCRIPTOR *arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetClassByClassId(struct OBJGPU *pGpu, NvU32 arg0, PCLASSDESCRIPTOR *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetClassByClassId(pGpu, arg0, arg1) gpuGetClassByClassId_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetClassByEngineAndClassId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1, PCLASSDESCRIPTOR *arg2); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetClassByEngineAndClassId(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1, PCLASSDESCRIPTOR *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetClassByEngineAndClassId(pGpu, arg0, arg1, arg2) gpuGetClassByEngineAndClassId_IMPL(pGpu, arg0, arg1, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetClassList_IMPL(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 arg2); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetClassList(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetClassList(pGpu, arg0, arg1, arg2) gpuGetClassList_IMPL(pGpu, arg0, arg1, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuConstructEngineTable_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuConstructEngineTable(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuConstructEngineTable(pGpu) gpuConstructEngineTable_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuDestroyEngineTable_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDestroyEngineTable(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDestroyEngineTable(pGpu) gpuDestroyEngineTable_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuUpdateEngineTable_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuUpdateEngineTable(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuUpdateEngineTable(pGpu) gpuUpdateEngineTable_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuCheckEngineTable_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckEngineTable(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckEngineTable(pGpu, arg0) gpuCheckEngineTable_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuXlateEngDescToClientEngineId_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0, NvU32 *arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuXlateEngDescToClientEngineId(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuXlateEngDescToClientEngineId(pGpu, arg0, arg1) gpuXlateEngDescToClientEngineId_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuXlateClientEngineIdToEngDesc_IMPL(struct OBJGPU *pGpu, NvU32 arg0, ENGDESCRIPTOR *arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuXlateClientEngineIdToEngDesc(struct OBJGPU *pGpu, NvU32 arg0, ENGDESCRIPTOR *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuXlateClientEngineIdToEngDesc(pGpu, arg0, arg1) gpuXlateClientEngineIdToEngDesc_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetFlcnFromClientEngineId_IMPL(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetFlcnFromClientEngineId(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetFlcnFromClientEngineId(pGpu, arg0, arg1) gpuGetFlcnFromClientEngineId_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsEngDescSupported_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsEngDescSupported(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsEngDescSupported(pGpu, arg0) gpuIsEngDescSupported_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuReadBusConfigCycle_IMPL(struct OBJGPU *pGpu, NvU32 index, NvU32 *pData); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuReadBusConfigCycle(struct OBJGPU *pGpu, NvU32 index, NvU32 *pData) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuReadBusConfigCycle(pGpu, index, pData) gpuReadBusConfigCycle_IMPL(pGpu, index, pData) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuWriteBusConfigCycle_IMPL(struct OBJGPU *pGpu, NvU32 index, NvU32 value); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuWriteBusConfigCycle(struct OBJGPU *pGpu, NvU32 index, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuWriteBusConfigCycle(pGpu, index, value) gpuWriteBusConfigCycle_IMPL(pGpu, index, value) +#endif //__nvoc_gpu_h_disabled + +NvU32 gpuGetGpuMask_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetGpuMask(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGpuMask(pGpu) gpuGetGpuMask_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuChangeComputeModeRefCount_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuChangeComputeModeRefCount(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuChangeComputeModeRefCount(pGpu, arg0) gpuChangeComputeModeRefCount_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuEnterShutdown_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuEnterShutdown(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuEnterShutdown(pGpu) gpuEnterShutdown_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheck_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheck(struct OBJGPU *pGpu, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheck(pGpu, arg0, arg1) gpuSanityCheck_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +DEVICE_MAPPING *gpuGetDeviceMapping_IMPL(struct OBJGPU *pGpu, DEVICE_INDEX arg0, NvU32 arg1); +#ifdef __nvoc_gpu_h_disabled +static inline DEVICE_MAPPING *gpuGetDeviceMapping(struct OBJGPU *pGpu, DEVICE_INDEX arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDeviceMapping(pGpu, arg0, arg1) gpuGetDeviceMapping_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID_IMPL(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1); +#ifdef __nvoc_gpu_h_disabled +static inline DEVICE_MAPPING *gpuGetDeviceMappingFromDeviceID(struct OBJGPU *pGpu, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetDeviceMappingFromDeviceID(pGpu, arg0, arg1) gpuGetDeviceMappingFromDeviceID_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetGidInfo_IMPL(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetGidInfo(struct OBJGPU *pGpu, NvU8 **ppGidString, NvU32 *pGidStrlen, NvU32 gidFlags) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetGidInfo(pGpu, ppGidString, pGidStrlen, gidFlags) gpuGetGidInfo_IMPL(pGpu, ppGidString, pGidStrlen, gidFlags) +#endif //__nvoc_gpu_h_disabled + +void gpuSetThreadBcState_IMPL(struct OBJGPU *pGpu, NvBool arg0); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetThreadBcState(struct OBJGPU *pGpu, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetThreadBcState(pGpu, arg0) gpuSetThreadBcState_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +void gpuSetDisconnectedProperties_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetDisconnectedProperties(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetDisconnectedProperties(pGpu) gpuSetDisconnectedProperties_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuAddConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuAddConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuAddConstructedFalcon(pGpu, arg0) gpuAddConstructedFalcon_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuRemoveConstructedFalcon_IMPL(struct OBJGPU *pGpu, struct Falcon *arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuRemoveConstructedFalcon(struct OBJGPU *pGpu, struct Falcon *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuRemoveConstructedFalcon(pGpu, arg0) gpuRemoveConstructedFalcon_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetConstructedFalcon_IMPL(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetConstructedFalcon(struct OBJGPU *pGpu, NvU32 arg0, struct Falcon **arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetConstructedFalcon(pGpu, arg0, arg1) gpuGetConstructedFalcon_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetSparseTextureComputeMode_IMPL(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetSparseTextureComputeMode(pGpu, arg0, arg1, arg2) gpuGetSparseTextureComputeMode_IMPL(pGpu, arg0, arg1, arg2) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSetSparseTextureComputeMode_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetSparseTextureComputeMode(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetSparseTextureComputeMode(pGpu, arg0) gpuSetSparseTextureComputeMode_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +struct OBJENGSTATE *gpuGetEngstate_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); +#ifdef __nvoc_gpu_h_disabled +static inline struct OBJENGSTATE *gpuGetEngstate(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetEngstate(pGpu, arg0) gpuGetEngstate_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +struct OBJENGSTATE *gpuGetEngstateNoShare_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); +#ifdef __nvoc_gpu_h_disabled +static inline struct OBJENGSTATE *gpuGetEngstateNoShare(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetEngstateNoShare(pGpu, arg0) gpuGetEngstateNoShare_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +struct KernelFifo *gpuGetKernelFifoShared_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline struct KernelFifo *gpuGetKernelFifoShared(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetKernelFifoShared(pGpu) gpuGetKernelFifoShared_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuGetNextEngstate_IMPL(struct OBJGPU *pGpu, ENGSTATE_ITER *pIt, struct OBJENGSTATE **ppEngState); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuGetNextEngstate(struct OBJGPU *pGpu, ENGSTATE_ITER *pIt, struct OBJENGSTATE **ppEngState) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetNextEngstate(pGpu, pIt, ppEngState) gpuGetNextEngstate_IMPL(pGpu, pIt, ppEngState) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuGetNextStaticIntrable_IMPL(struct OBJGPU *pGpu, GPU_CHILD_ITER *pIt, struct OBJINTRABLE **ppIntrable); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuGetNextStaticIntrable(struct OBJGPU *pGpu, GPU_CHILD_ITER *pIt, struct OBJINTRABLE **ppIntrable) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetNextStaticIntrable(pGpu, pIt, ppIntrable) gpuGetNextStaticIntrable_IMPL(pGpu, pIt, ppIntrable) +#endif //__nvoc_gpu_h_disabled + +struct OBJHOSTENG *gpuGetHosteng_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0); +#ifdef __nvoc_gpu_h_disabled +static inline struct OBJHOSTENG *gpuGetHosteng(struct OBJGPU *pGpu, ENGDESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NULL; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetHosteng(pGpu, arg0) gpuGetHosteng_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuConstructUserRegisterAccessMap_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuConstructUserRegisterAccessMap(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuConstructUserRegisterAccessMap(pGpu) gpuConstructUserRegisterAccessMap_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuInitRegisterAccessMap_IMPL(struct OBJGPU *pGpu, NvU8 *arg0, NvU32 arg1, const NvU8 *arg2, const NvU32 arg3); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuInitRegisterAccessMap(struct OBJGPU *pGpu, NvU8 *arg0, NvU32 arg1, const NvU8 *arg2, const NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuInitRegisterAccessMap(pGpu, arg0, arg1, arg2, arg3) gpuInitRegisterAccessMap_IMPL(pGpu, arg0, arg1, arg2, arg3) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSetUserRegisterAccessPermissions_IMPL(struct OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetUserRegisterAccessPermissions(pGpu, offset, size, bAllow) gpuSetUserRegisterAccessPermissions_IMPL(pGpu, offset, size, bAllow) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSetUserRegisterAccessPermissionsInBulk_IMPL(struct OBJGPU *pGpu, const NvU32 *regOffsetsAndSizesArr, NvU32 arrSizeBytes, NvBool bAllow); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetUserRegisterAccessPermissionsInBulk(struct OBJGPU *pGpu, const NvU32 *regOffsetsAndSizesArr, NvU32 arrSizeBytes, NvBool bAllow) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetUserRegisterAccessPermissionsInBulk(pGpu, regOffsetsAndSizesArr, arrSizeBytes, bAllow) gpuSetUserRegisterAccessPermissionsInBulk_IMPL(pGpu, regOffsetsAndSizesArr, arrSizeBytes, bAllow) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuGetUserRegisterAccessPermissions_IMPL(struct OBJGPU *pGpu, NvU32 offset); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuGetUserRegisterAccessPermissions(struct OBJGPU *pGpu, NvU32 offset) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetUserRegisterAccessPermissions(pGpu, offset) gpuGetUserRegisterAccessPermissions_IMPL(pGpu, offset) +#endif //__nvoc_gpu_h_disabled + +void gpuDumpCallbackRegister_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuDumpCallbackRegister(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuDumpCallbackRegister(pGpu) gpuDumpCallbackRegister_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheckGfid_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckGfid(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckGfid(pGpu, gfid, bInUse) gpuSanityCheckGfid_IMPL(pGpu, gfid, bInUse) +#endif //__nvoc_gpu_h_disabled + +void gpuSetGfidUsage_IMPL(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuSetGfidUsage(struct OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuSetGfidUsage(pGpu, gfid, bInUse) gpuSetGfidUsage_IMPL(pGpu, gfid, bInUse) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSetExternalKernelClientCount_IMPL(struct OBJGPU *pGpu, NvBool bIncr); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSetExternalKernelClientCount(struct OBJGPU *pGpu, NvBool bIncr) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSetExternalKernelClientCount(pGpu, bIncr) gpuSetExternalKernelClientCount_IMPL(pGpu, bIncr) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuIsInUse_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuIsInUse(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuIsInUse(pGpu) gpuIsInUse_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvU32 gpuGetUserClientCount_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetUserClientCount(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetUserClientCount(pGpu) gpuGetUserClientCount_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NvU32 gpuGetExternalClientCount_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvU32 gpuGetExternalClientCount(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return 0; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetExternalClientCount(pGpu) gpuGetExternalClientCount_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuNotifySubDeviceEvent_IMPL(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuNotifySubDeviceEvent(struct OBJGPU *pGpu, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuNotifySubDeviceEvent(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) gpuNotifySubDeviceEvent_IMPL(pGpu, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuGetProcWithObject_IMPL(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuGetProcWithObject(struct OBJGPU *pGpu, NvU32 elementID, NvU32 internalClassId, NvU32 *pPidArray, NvU32 *pPidArrayCount, MIG_INSTANCE_REF *pRef) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuGetProcWithObject(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) gpuGetProcWithObject_IMPL(pGpu, elementID, internalClassId, pPidArray, pPidArrayCount, pRef) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuFindClientInfoWithPidIterator_IMPL(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuFindClientInfoWithPidIterator(struct OBJGPU *pGpu, NvU32 pid, NvU32 subPid, NvU32 internalClassId, NV2080_CTRL_GPU_PID_INFO_DATA *pData, NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, MIG_INSTANCE_REF *pRef, NvBool bGlobalInfo) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuFindClientInfoWithPidIterator(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) gpuFindClientInfoWithPidIterator_IMPL(pGpu, pid, subPid, internalClassId, pData, pSmcInfo, pRef, bGlobalInfo) +#endif //__nvoc_gpu_h_disabled + +NvBool gpuCheckSysmemAccess_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline NvBool gpuCheckSysmemAccess(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_h_disabled +#define gpuCheckSysmemAccess(pGpu) gpuCheckSysmemAccess_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +void gpuInitChipInfo_IMPL(struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_h_disabled +static inline void gpuInitChipInfo(struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); +} +#else //__nvoc_gpu_h_disabled +#define gpuInitChipInfo(pGpu) gpuInitChipInfo_IMPL(pGpu) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheckRegRead_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckRegRead(struct OBJGPU *pGpu, NvU32 addr, NvU32 size, void *pValue) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckRegRead(pGpu, addr, size, pValue) gpuSanityCheckRegRead_IMPL(pGpu, addr, size, pValue) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuSanityCheckRegisterAccess_IMPL(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuSanityCheckRegisterAccess(struct OBJGPU *pGpu, NvU32 addr, NvU32 *pRetVal) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuSanityCheckRegisterAccess(pGpu, addr, pRetVal) gpuSanityCheckRegisterAccess_IMPL(pGpu, addr, pRetVal) +#endif //__nvoc_gpu_h_disabled + +NV_STATUS gpuValidateRegOffset_IMPL(struct OBJGPU *pGpu, NvU32 arg0); +#ifdef __nvoc_gpu_h_disabled +static inline NV_STATUS gpuValidateRegOffset(struct OBJGPU *pGpu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJGPU was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_h_disabled +#define gpuValidateRegOffset(pGpu, arg0) gpuValidateRegOffset_IMPL(pGpu, arg0) +#endif //__nvoc_gpu_h_disabled + +#undef PRIVATE_FIELD + + +// Look up pGpu associated with a pResourceRef +NV_STATUS gpuGetByRef (RsResourceRef *pContextRef, NvBool *pbBroadcast, struct OBJGPU **ppGpu); + +// Look up pGpu associated with a hResource +NV_STATUS gpuGetByHandle(struct RsClient *pClient, NvHandle hResource, NvBool *pbBroadcast, struct OBJGPU **ppGpu); + +// Checks if an SR-IOV GFID is in use +#define GPU_IS_SRIOV_GFID_IN_USE(gfid) ((gpuSanityCheckGfid(pGpu, gfid, NV_TRUE) == NV_ERR_IN_USE) ? NV_TRUE : NV_FALSE) + +#define GPU_GFID_PF (0) +#define IS_GFID_PF(gfid) ((gfid) == GPU_GFID_PF) +#define IS_GFID_VF(gfid) ((gfid) != GPU_GFID_PF) +// Invalid P2P GFID +#define INVALID_P2P_GFID (0xFFFFFFFF) + +// +// Generates GPU child accessor macros (i.e.: GPU_GET_{ENG}) +// +#define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return pGpu->gpuField; } \ + ct_assert(numInstances == 1); + +#define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return index < numInstances ? pGpu->gpuField[index] : NULL; } + +#include "gpu/gpu_child_list.h" + +static NV_FORCEINLINE struct Graphics *GPU_GET_GR(struct OBJGPU *pGpu) { return NULL; } + +// Temporary stubs +#if RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS +#define GPU_CHILD_LIST_DISABLED_ONLY +#define GPU_CHILD_SINGLE_INST(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu) { return NULL; } + +#define GPU_CHILD_MULTI_INST(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + static NV_FORCEINLINE className *accessorName(OBJGPU *pGpu, NvU32 index) { return NULL; } + +#include "gpu/gpu_child_list.h" +#endif // RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS + + +// +// Inline functions +// + +// +// This function returns subdevice mask for a GPU. +// For non SLI, subdeviceInstance is 0, so this +// function will always return 1. +// + +static NV_INLINE NvU32 +gpuGetSubdeviceMask +( + struct OBJGPU *pGpu +) +{ + return 1 << pGpu->subdeviceInstance; +} + +static NV_INLINE NvU32 +gpuGetInstance +( + struct OBJGPU *pGpu +) +{ + return pGpu->gpuInstance; +} + +static NV_INLINE NvU32 +gpuGetDeviceInstance +( + struct OBJGPU *pGpu +) +{ + return pGpu->deviceInstance; +} + +NV_INLINE +static NvU32 gpuGetNumCEs(struct OBJGPU *pGpu) +{ + return pGpu->numCEs; +} + +// +// Per GPU mode flags macros. In general these macros should not be +// used and all code paths should be the same on all environments. +// However occasionally a tweak is needed to work around a limitation +// or improve speed on non-hardware. Is_RTLSIM normally is handled +// in the IS_SIMULATION case and should almost never be used. +// +// IS_EMULATION actual emulation hardware +// IS_SIMULATION fmodel or RTL simulation +// IS_MODS_AMODEL amodel under mods for trace player +// IS_LIVE_AMODEL amodel under windows for 3D drivers (removed) +// IS_RTLSIM RTL simulation +// IS_SILICON Real hardware +// IS_VIRTUAL RM is running within a guest VM +// IS_GSP_CLIENT RM is a GSP/DCE client with GPU support offloaded to GSP/DCE +// IS_FW_CLIENT RM is a firmware client with GPU support offloaded microprocessor +// + +#define IS_EMULATION(pGpu) ((pGpu)->getProperty((pGpu), PDB_PROP_GPU_EMULATION)) +#define IS_SIMULATION(pGpu) (pGpu->bIsSimulation) +#define IS_MODS_AMODEL(pGpu) (pGpu->bIsModsAmodel) +#define IS_FMODEL(pGpu) (pGpu->bIsFmodel) +#define IS_RTLSIM(pGpu) (pGpu->bIsRtlsim) +#define IS_SILICON(pGpu) (!(IS_EMULATION(pGpu) || IS_SIMULATION(pGpu))) +#define IS_PASSTHRU(pGpu) ((pGpu)->bIsPassthru) +#define IS_GSP_CLIENT(pGpu) ((RMCFG_FEATURE_GSP_CLIENT_RM || RMCFG_FEATURE_DCE_CLIENT_RM) && (pGpu)->isGspClient) +#define IS_FW_CLIENT(pGpu) IS_GSP_CLIENT(pGpu) // TODO to be removed +#define IS_VIRTUAL(pGpu) NV_FALSE +#define IS_VIRTUAL_WITH_SRIOV(pGpu) NV_FALSE +#define IS_VIRTUAL_WITH_HEAVY_SRIOV(pGpu) NV_FALSE +#define IS_VIRTUAL_WITH_FULL_SRIOV(pGpu) NV_FALSE +#define IS_VIRTUAL_WITHOUT_SRIOV(pGpu) NV_FALSE +#define IS_SRIOV_HEAVY(pGpu) NV_FALSE +#define IS_SRIOV_HEAVY_GUEST(pGpu) NV_FALSE +#define IS_SRIOV_FULL_GUEST(pGpu) NV_FALSE +#define IS_SRIOV_HEAVY_HOST(pGpu) NV_FALSE +#define IS_SRIOV_FULL_HOST(pGpu) NV_FALSE +#define IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) NV_FALSE + +extern GPU_CHILD_ITER gpuGetPossibleEngDescriptorIter(void); +extern NvBool gpuGetNextPossibleEngDescriptor(GPU_CHILD_ITER *pIt, ENGDESCRIPTOR *pEngDesc); + +NV_STATUS gpuCtrlExecRegOps(struct OBJGPU *, struct Graphics *, NvHandle, NvHandle, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool); +NV_STATUS gpuValidateRegOps(struct OBJGPU *, NV2080_CTRL_GPU_REG_OP *, NvU32, NvBool, NvBool); + +// GPU Sanity Check Flags +#define GPU_SANITY_CHECK_FLAGS_BOOT_0 NVBIT(0) +#define GPU_SANITY_CHECK_FLAGS_OFF_BY_N NVBIT(1) +#define GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH NVBIT(2) +#define GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED NVBIT(3) +#define GPU_SANITY_CHECK_FLAGS_FB NVBIT(4) + +#define GPU_SANITY_CHECK_FLAGS_NONE 0x0 +#define GPU_SANITY_CHECK_FLAGS_ALL 0xffffffff + +// +// Macro for checking if GPU is in reset. +// +#define API_GPU_IN_RESET_SANITY_CHECK(pGpu) \ + ((NULL == pGpu) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING)) + +// +// Marco for checking if GPU is still connected. +// +#define API_GPU_ATTACHED_SANITY_CHECK(pGpu) \ + ((NULL != pGpu) && \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET)) + +// +// Macro for checking if GPU has Full Sanity +// +#define FULL_GPU_SANITY_CHECK(pGpu) \ + ((NULL != pGpu) && \ + gpuIsGpuFullPower(pGpu) && \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST) && \ + gpuCheckSysmemAccess(pGpu)) + +// +// Macro for checking if GPU has Full Sanity +// +#define FULL_GPU_SANITY_FOR_PM_RESUME(pGpu) \ + ((NULL != pGpu) && \ + gpuIsGpuFullPowerForPmResume(pGpu) && \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && \ + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST)) + +// +// Macro for checking if GPU is in the recovery path +// +#define API_GPU_IN_RECOVERY_SANITY_CHECK(pGpu) \ + ((NULL == pGpu) || \ + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY)) + +// +// Identifiers for gpuGetRegBaseOffset HAL interface. +// +#define NV_REG_BASE_GR (0x00000001) +#define NV_REG_BASE_PM (0x00000002) +#define NV_REG_BASE_TIMER (0x00000003) +#define NV_REG_BASE_DFD (0x00000004) +#define NV_REG_BASE_FLUSH (0x00000005) +#define NV_REG_BASE_LTCG (0x00000006) +#define NV_REG_BASE_TOP (0x00000007) +#define NV_REG_BASE_MASTER (0x0000000A) +#define NV_REG_BASE_USERMODE (0x0000000B) +#define NV_REG_BASE_LAST NV_REG_BASE_USERMODE +ct_assert(NV_REG_BASE_LAST < NV2080_CTRL_INTERNAL_GET_CHIP_INFO_REG_BASE_MAX); + +// Macros for CPU family information +#define NV_CPU_FAMILY 3:0 +#define NV_CPU_EXTENDED_FAMILY 11:4 + +// Macros for CPU model information +#define NV_CPU_MODEL 3:0 +#define NV_CPU_EXTENDED_MODEL 7:4 + +// Macros for AMD CPU information +#define NV_CPU_ID_AMD_FAMILY 0xF +#define NV_CPU_ID_AMD_EXTENDED_FAMILY 0xA +#define NV_CPU_ID_AMD_MODEL 0x0 +#define NV_CPU_ID_AMD_EXTENDED_MODEL 0x4 + +// Macros for Intel CPU information +#define NV_CPU_ID_INTEL_FAMILY 0x6 +#define NV_CPU_ID_INTEL_EXTENDED_FAMILY 0x0 +#define NV_CPU_ID_INTEL_CORE_S_MODEL 0x7 +#define NV_CPU_ID_INTEL_CORE_P_MODEL 0xA +#define NV_CPU_ID_INTEL_EXTENDED_MODEL 0x9 + +#define GPU_READ_PRI_ERROR_MASK 0xFFF00000 +#define GPU_READ_PRI_ERROR_CODE 0xBAD00000 + +// +// Define for invalid register value. GPU could have fallen off the bus or +// the GPU could be in reset. +// +#define GPU_REG_VALUE_INVALID 0xFFFFFFFF + +// +// Hal InfoBlock access interface +// +#define gpuGetInfoBlock(pGpu, pListHead, dataId) getInfoPtr(pListHead, dataId) +#define gpuAddInfoBlock(pGpu, ppListHead, dataId, size) addInfoPtr(ppListHead, dataId, size) +#define gpuDeleteInfoBlock(pGpu, ppListHead, dataId) deleteInfoPtr(ppListHead, dataId); +#define gpuTestInfoBlock(pGpu, pListHead, dataId) testInfoPtr(pListHead, dataId); + +// Static info getters +void *gpuGetStaticInfo(struct OBJGPU *pGpu); +#define GPU_GET_STATIC_INFO(pGpu) gpuGetStaticInfo(pGpu) +void *gpuGetGspStaticInfo(struct OBJGPU *pGpu); +#define GPU_GET_GSP_STATIC_INFO(pGpu) gpuGetGspStaticInfo(pGpu) + + +#define IS_GPU_GC6_STATE_POWERED_ON(obj) NV_TRUE +#define IS_GPU_GC6_STATE_EXITED(obj) NV_FALSE +#define IS_GPU_GC6_STATE_ENTERING(obj) NV_FALSE +#define IS_GPU_GC6_STATE_ENTERED(obj) NV_FALSE +#define IS_GPU_GC6_STATE_EXITING(obj) NV_FALSE + +#endif // _OBJGPU_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_resource_nvoc.c b/src/nvidia/generated/g_gpu_resource_nvoc.c new file mode 100644 index 000000000..00b40f0cf --- /dev/null +++ b/src/nvidia/generated/g_gpu_resource_nvoc.c @@ -0,0 +1,309 @@ +#define NVOC_GPU_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_resource_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x5d5d9f = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_funcTable_GpuResource(GpuResource*); +NV_STATUS __nvoc_ctor_GpuResource(GpuResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_GpuResource(GpuResource*); +void __nvoc_dtor_GpuResource(GpuResource*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuResource; + +static const struct NVOC_RTTI __nvoc_rtti_GpuResource_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuResource, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuResource_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GpuResource = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_GpuResource_GpuResource, + &__nvoc_rtti_GpuResource_RmResource, + &__nvoc_rtti_GpuResource_RmResourceCommon, + &__nvoc_rtti_GpuResource_RsResource, + &__nvoc_rtti_GpuResource_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GpuResource), + /*classId=*/ classId(GpuResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GpuResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuResource, + /*pCastInfo=*/ &__nvoc_castinfo_GpuResource, + /*pExportInfo=*/ &__nvoc_export_info_GpuResource +}; + +static NV_STATUS __nvoc_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pCpuMapping); +} + +static NvBool __nvoc_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_gpuresControlLookup(struct GpuResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams, ppEntry); +} + +static NvBool __nvoc_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_GpuResource(GpuResource *pThis) { + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GpuResource(GpuResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_GpuResource(GpuResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuResource_fail_RmResource; + __nvoc_init_dataField_GpuResource(pThis); + + status = __nvoc_gpuresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_GpuResource_fail__init; + goto __nvoc_ctor_GpuResource_exit; // Success + +__nvoc_ctor_GpuResource_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_GpuResource_fail_RmResource: +__nvoc_ctor_GpuResource_exit: + + return status; +} + +static void __nvoc_init_funcTable_GpuResource_1(GpuResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__gpuresControl__ = &gpuresControl_IMPL; + + pThis->__gpuresMap__ = &gpuresMap_IMPL; + + pThis->__gpuresUnmap__ = &gpuresUnmap_IMPL; + + pThis->__gpuresShareCallback__ = &gpuresShareCallback_IMPL; + + pThis->__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL; + + pThis->__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL; + + pThis->__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL; + + pThis->__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resControl__ = &__nvoc_thunk_GpuResource_resControl; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMap__ = &__nvoc_thunk_GpuResource_resMap; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmap__ = &__nvoc_thunk_GpuResource_resUnmap; + + pThis->__nvoc_base_RmResource.__rmresShareCallback__ = &__nvoc_thunk_GpuResource_rmresShareCallback; + + pThis->__gpuresCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_gpuresCheckMemInterUnmap; + + pThis->__gpuresGetMemInterMapParams__ = &__nvoc_thunk_RmResource_gpuresGetMemInterMapParams; + + pThis->__gpuresGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_gpuresGetMemoryMappingDescriptor; + + pThis->__gpuresGetRefCount__ = &__nvoc_thunk_RsResource_gpuresGetRefCount; + + pThis->__gpuresControlFilter__ = &__nvoc_thunk_RsResource_gpuresControlFilter; + + pThis->__gpuresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_gpuresAddAdditionalDependants; + + pThis->__gpuresControl_Prologue__ = &__nvoc_thunk_RmResource_gpuresControl_Prologue; + + pThis->__gpuresCanCopy__ = &__nvoc_thunk_RsResource_gpuresCanCopy; + + pThis->__gpuresMapTo__ = &__nvoc_thunk_RsResource_gpuresMapTo; + + pThis->__gpuresPreDestruct__ = &__nvoc_thunk_RsResource_gpuresPreDestruct; + + pThis->__gpuresUnmapFrom__ = &__nvoc_thunk_RsResource_gpuresUnmapFrom; + + pThis->__gpuresControl_Epilogue__ = &__nvoc_thunk_RmResource_gpuresControl_Epilogue; + + pThis->__gpuresControlLookup__ = &__nvoc_thunk_RsResource_gpuresControlLookup; + + pThis->__gpuresAccessCallback__ = &__nvoc_thunk_RmResource_gpuresAccessCallback; +} + +void __nvoc_init_funcTable_GpuResource(GpuResource *pThis) { + __nvoc_init_funcTable_GpuResource_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_GpuResource(GpuResource *pThis) { + pThis->__nvoc_pbase_GpuResource = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_GpuResource(pThis); +} + +NV_STATUS __nvoc_objCreate_GpuResource(GpuResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + GpuResource *pThis; + + pThis = portMemAllocNonPaged(sizeof(GpuResource)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GpuResource)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuResource); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_GpuResource(pThis); + status = __nvoc_ctor_GpuResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_GpuResource_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GpuResource_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GpuResource(GpuResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_GpuResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_resource_nvoc.h b/src/nvidia/generated/g_gpu_resource_nvoc.h new file mode 100644 index 000000000..80136153c --- /dev/null +++ b/src/nvidia/generated/g_gpu_resource_nvoc.h @@ -0,0 +1,329 @@ +#ifndef _G_GPU_RESOURCE_NVOC_H_ +#define _G_GPU_RESOURCE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_gpu_resource_nvoc.h" + +#ifndef _GPURESOURCE_H_ +#define _GPURESOURCE_H_ + +#include "core/core.h" +#include "gpu/mem_mgr/mem_desc.h" + +#include "rmapi/resource.h" + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + + +#define GPU_RES_GET_GPU(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pGpu +#define GPU_RES_GET_GPUGRP(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pGpuGrp +#define GPU_RES_GET_DEVICE(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pDevice +#define GPU_RES_GET_SUBDEVICE(pRes) staticCastNoPtrCheck((pRes), GpuResource)->pSubdevice + +#define GPU_RES_SET_THREAD_BC_STATE(pRes) do { \ + gpuSetThreadBcState(staticCastNoPtrCheck((pRes), GpuResource)->pGpu, \ + staticCastNoPtrCheck((pRes), GpuResource)->bBcResource); \ + } while(0) + +/*! + * Abstract base class for common CPU mapping operations + */ +#ifdef NVOC_GPU_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GpuResource { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + NV_STATUS (*__gpuresControl__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gpuresMap__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__gpuresUnmap__)(struct GpuResource *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NvBool (*__gpuresShareCallback__)(struct GpuResource *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__gpuresGetRegBaseOffsetAndSize__)(struct GpuResource *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__gpuresGetMapAddrSpace__)(struct GpuResource *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__gpuresInternalControlForward__)(struct GpuResource *, NvU32, void *, NvU32); + NvHandle (*__gpuresGetInternalObjectHandle__)(struct GpuResource *); + NV_STATUS (*__gpuresCheckMemInterUnmap__)(struct GpuResource *, NvBool); + NV_STATUS (*__gpuresGetMemInterMapParams__)(struct GpuResource *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__gpuresGetMemoryMappingDescriptor__)(struct GpuResource *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__gpuresGetRefCount__)(struct GpuResource *); + NV_STATUS (*__gpuresControlFilter__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__gpuresAddAdditionalDependants__)(struct RsClient *, struct GpuResource *, RsResourceRef *); + NV_STATUS (*__gpuresControl_Prologue__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__gpuresCanCopy__)(struct GpuResource *); + NV_STATUS (*__gpuresMapTo__)(struct GpuResource *, RS_RES_MAP_TO_PARAMS *); + void (*__gpuresPreDestruct__)(struct GpuResource *); + NV_STATUS (*__gpuresUnmapFrom__)(struct GpuResource *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__gpuresControl_Epilogue__)(struct GpuResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__gpuresControlLookup__)(struct GpuResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvBool (*__gpuresAccessCallback__)(struct GpuResource *, struct RsClient *, void *, RsAccessRight); + struct OBJGPUGRP *pGpuGrp; + struct OBJGPU *pGpu; + struct Device *pDevice; + struct Subdevice *pSubdevice; + NvBool bBcResource; +}; + +#ifndef __NVOC_CLASS_GpuResource_TYPEDEF__ +#define __NVOC_CLASS_GpuResource_TYPEDEF__ +typedef struct GpuResource GpuResource; +#endif /* __NVOC_CLASS_GpuResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuResource +#define __nvoc_class_id_GpuResource 0x5d5d9f +#endif /* __nvoc_class_id_GpuResource */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +#define __staticCast_GpuResource(pThis) \ + ((pThis)->__nvoc_pbase_GpuResource) + +#ifdef __nvoc_gpu_resource_h_disabled +#define __dynamicCast_GpuResource(pThis) ((GpuResource*)NULL) +#else //__nvoc_gpu_resource_h_disabled +#define __dynamicCast_GpuResource(pThis) \ + ((GpuResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuResource))) +#endif //__nvoc_gpu_resource_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GpuResource(GpuResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GpuResource(GpuResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_GpuResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_GpuResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define gpuresControl(pGpuResource, pCallContext, pParams) gpuresControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define gpuresMap(pGpuResource, pCallContext, pParams, pCpuMapping) gpuresMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define gpuresUnmap(pGpuResource, pCallContext, pCpuMapping) gpuresUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define gpuresShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) gpuresShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define gpuresGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) gpuresGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define gpuresGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) gpuresGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define gpuresInternalControlForward(pGpuResource, command, pParams, size) gpuresInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define gpuresGetInternalObjectHandle(pGpuResource) gpuresGetInternalObjectHandle_DISPATCH(pGpuResource) +#define gpuresCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) gpuresCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define gpuresGetMemInterMapParams(pRmResource, pParams) gpuresGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define gpuresGetMemoryMappingDescriptor(pRmResource, ppMemDesc) gpuresGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define gpuresGetRefCount(pResource) gpuresGetRefCount_DISPATCH(pResource) +#define gpuresControlFilter(pResource, pCallContext, pParams) gpuresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define gpuresAddAdditionalDependants(pClient, pResource, pReference) gpuresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define gpuresControl_Prologue(pResource, pCallContext, pParams) gpuresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define gpuresCanCopy(pResource) gpuresCanCopy_DISPATCH(pResource) +#define gpuresMapTo(pResource, pParams) gpuresMapTo_DISPATCH(pResource, pParams) +#define gpuresPreDestruct(pResource) gpuresPreDestruct_DISPATCH(pResource) +#define gpuresUnmapFrom(pResource, pParams) gpuresUnmapFrom_DISPATCH(pResource, pParams) +#define gpuresControl_Epilogue(pResource, pCallContext, pParams) gpuresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define gpuresControlLookup(pResource, pParams, ppEntry) gpuresControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define gpuresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gpuresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS gpuresControl_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS gpuresControl_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__gpuresControl__(pGpuResource, pCallContext, pParams); +} + +NV_STATUS gpuresMap_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); + +static inline NV_STATUS gpuresMap_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__gpuresMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS gpuresUnmap_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); + +static inline NV_STATUS gpuresUnmap_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__gpuresUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +NvBool gpuresShareCallback_IMPL(struct GpuResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +static inline NvBool gpuresShareCallback_DISPATCH(struct GpuResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__gpuresShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +NV_STATUS gpuresGetRegBaseOffsetAndSize_IMPL(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS gpuresGetRegBaseOffsetAndSize_DISPATCH(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__gpuresGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +NV_STATUS gpuresGetMapAddrSpace_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS gpuresGetMapAddrSpace_DISPATCH(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__gpuresGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +NV_STATUS gpuresInternalControlForward_IMPL(struct GpuResource *pGpuResource, NvU32 command, void *pParams, NvU32 size); + +static inline NV_STATUS gpuresInternalControlForward_DISPATCH(struct GpuResource *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__gpuresInternalControlForward__(pGpuResource, command, pParams, size); +} + +NvHandle gpuresGetInternalObjectHandle_IMPL(struct GpuResource *pGpuResource); + +static inline NvHandle gpuresGetInternalObjectHandle_DISPATCH(struct GpuResource *pGpuResource) { + return pGpuResource->__gpuresGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS gpuresCheckMemInterUnmap_DISPATCH(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__gpuresCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS gpuresGetMemInterMapParams_DISPATCH(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__gpuresGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS gpuresGetMemoryMappingDescriptor_DISPATCH(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__gpuresGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 gpuresGetRefCount_DISPATCH(struct GpuResource *pResource) { + return pResource->__gpuresGetRefCount__(pResource); +} + +static inline NV_STATUS gpuresControlFilter_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gpuresControlFilter__(pResource, pCallContext, pParams); +} + +static inline void gpuresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference) { + pResource->__gpuresAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS gpuresControl_Prologue_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__gpuresControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool gpuresCanCopy_DISPATCH(struct GpuResource *pResource) { + return pResource->__gpuresCanCopy__(pResource); +} + +static inline NV_STATUS gpuresMapTo_DISPATCH(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__gpuresMapTo__(pResource, pParams); +} + +static inline void gpuresPreDestruct_DISPATCH(struct GpuResource *pResource) { + pResource->__gpuresPreDestruct__(pResource); +} + +static inline NV_STATUS gpuresUnmapFrom_DISPATCH(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__gpuresUnmapFrom__(pResource, pParams); +} + +static inline void gpuresControl_Epilogue_DISPATCH(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__gpuresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS gpuresControlLookup_DISPATCH(struct GpuResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__gpuresControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvBool gpuresAccessCallback_DISPATCH(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__gpuresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS gpuresConstruct_IMPL(struct GpuResource *arg_pGpuResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_gpuresConstruct(arg_pGpuResource, arg_pCallContext, arg_pParams) gpuresConstruct_IMPL(arg_pGpuResource, arg_pCallContext, arg_pParams) +NV_STATUS gpuresCopyConstruct_IMPL(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_gpu_resource_h_disabled +static inline NV_STATUS gpuresCopyConstruct(struct GpuResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_resource_h_disabled +#define gpuresCopyConstruct(pGpuResource, pCallContext, pParams) gpuresCopyConstruct_IMPL(pGpuResource, pCallContext, pParams) +#endif //__nvoc_gpu_resource_h_disabled + +void gpuresSetGpu_IMPL(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvBool bBcResource); +#ifdef __nvoc_gpu_resource_h_disabled +static inline void gpuresSetGpu(struct GpuResource *pGpuResource, struct OBJGPU *pGpu, NvBool bBcResource) { + NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!"); +} +#else //__nvoc_gpu_resource_h_disabled +#define gpuresSetGpu(pGpuResource, pGpu, bBcResource) gpuresSetGpu_IMPL(pGpuResource, pGpu, bBcResource) +#endif //__nvoc_gpu_resource_h_disabled + +void gpuresControlSetup_IMPL(struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, struct GpuResource *pGpuResource); +#ifdef __nvoc_gpu_resource_h_disabled +static inline void gpuresControlSetup(struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, struct GpuResource *pGpuResource) { + NV_ASSERT_FAILED_PRECOMP("GpuResource was disabled!"); +} +#else //__nvoc_gpu_resource_h_disabled +#define gpuresControlSetup(pParams, pGpuResource) gpuresControlSetup_IMPL(pParams, pGpuResource) +#endif //__nvoc_gpu_resource_h_disabled + +NV_STATUS gpuresGetByHandle_IMPL(struct RsClient *pClient, NvHandle hResource, struct GpuResource **ppGpuResource); +#define gpuresGetByHandle(pClient, hResource, ppGpuResource) gpuresGetByHandle_IMPL(pClient, hResource, ppGpuResource) +NV_STATUS gpuresGetByDeviceOrSubdeviceHandle_IMPL(struct RsClient *pClient, NvHandle hResource, struct GpuResource **ppGpuResource); +#define gpuresGetByDeviceOrSubdeviceHandle(pClient, hResource, ppGpuResource) gpuresGetByDeviceOrSubdeviceHandle_IMPL(pClient, hResource, ppGpuResource) +#undef PRIVATE_FIELD + + +#endif // _GPURESOURCE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_RESOURCE_NVOC_H_ diff --git a/src/nvidia/generated/g_gpu_vaspace_nvoc.c b/src/nvidia/generated/g_gpu_vaspace_nvoc.c new file mode 100644 index 000000000..34dee63d3 --- /dev/null +++ b/src/nvidia/generated/g_gpu_vaspace_nvoc.c @@ -0,0 +1,387 @@ +#define NVOC_GPU_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_gpu_vaspace_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xba5875 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGVASPACE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; + +void __nvoc_init_OBJGVASPACE(OBJGVASPACE*); +void __nvoc_init_funcTable_OBJGVASPACE(OBJGVASPACE*); +NV_STATUS __nvoc_ctor_OBJGVASPACE(OBJGVASPACE*); +void __nvoc_init_dataField_OBJGVASPACE(OBJGVASPACE*); +void __nvoc_dtor_OBJGVASPACE(OBJGVASPACE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGVASPACE; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGVASPACE_OBJGVASPACE = { + /*pClassDef=*/ &__nvoc_class_def_OBJGVASPACE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGVASPACE, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGVASPACE_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGVASPACE, __nvoc_base_OBJVASPACE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGVASPACE_OBJVASPACE = { + /*pClassDef=*/ &__nvoc_class_def_OBJVASPACE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGVASPACE, __nvoc_base_OBJVASPACE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGVASPACE = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_OBJGVASPACE_OBJGVASPACE, + &__nvoc_rtti_OBJGVASPACE_OBJVASPACE, + &__nvoc_rtti_OBJGVASPACE_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGVASPACE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGVASPACE), + /*classId=*/ classId(OBJGVASPACE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGVASPACE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGVASPACE, + /*pCastInfo=*/ &__nvoc_castinfo_OBJGVASPACE, + /*pExportInfo=*/ &__nvoc_export_info_OBJGVASPACE +}; + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceConstruct_(struct OBJVASPACE *pGVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return gvaspaceConstruct_((struct OBJGVASPACE *)(((unsigned char *)pGVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceReserveMempool(struct OBJVASPACE *pGVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) { + return gvaspaceReserveMempool((struct OBJGVASPACE *)(((unsigned char *)pGVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu, hClient, size, pageSizeLockMask, flags); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceAlloc(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return gvaspaceAlloc((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceFree(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return gvaspaceFree((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), vAddr); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceApplyDefaultAlignment(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return gvaspaceApplyDefaultAlignment((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceIncAllocRefCnt(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return gvaspaceIncAllocRefCnt((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), vAddr); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceMap(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) { + return gvaspaceMap((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu, vaLo, vaHi, pTarget, flags); +} + +static void __nvoc_thunk_OBJGVASPACE_vaspaceUnmap(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) { + gvaspaceUnmap((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu, vaLo, vaHi); +} + +static struct OBJEHEAP *__nvoc_thunk_OBJGVASPACE_vaspaceGetHeap(struct OBJVASPACE *pVAS) { + return gvaspaceGetHeap((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset)); +} + +static NvU32 __nvoc_thunk_OBJGVASPACE_vaspaceGetMapPageSize(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock) { + return gvaspaceGetMapPageSize((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu, pMemBlock); +} + +static NvU32 __nvoc_thunk_OBJGVASPACE_vaspaceGetBigPageSize(struct OBJVASPACE *pVAS) { + return gvaspaceGetBigPageSize((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset)); +} + +static NvU32 __nvoc_thunk_OBJGVASPACE_vaspaceGetFlags(struct OBJVASPACE *pVAS) { + return gvaspaceGetFlags((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJGVASPACE_vaspaceIsMirrored(struct OBJVASPACE *pVAS) { + return gvaspaceIsMirrored((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJGVASPACE_vaspaceIsFaultCapable(struct OBJVASPACE *pVAS) { + return gvaspaceIsFaultCapable((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJGVASPACE_vaspaceIsExternallyOwned(struct OBJVASPACE *pVAS) { + return gvaspaceIsExternallyOwned((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJGVASPACE_vaspaceIsAtsEnabled(struct OBJVASPACE *pVAS) { + return gvaspaceIsAtsEnabled((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceGetPasid(struct OBJVASPACE *pVAS, NvU32 *pPasid) { + return gvaspaceGetPasid((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pPasid); +} + +static PMEMORY_DESCRIPTOR __nvoc_thunk_OBJGVASPACE_vaspaceGetPageDirBase(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + return gvaspaceGetPageDirBase((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu); +} + +static PMEMORY_DESCRIPTOR __nvoc_thunk_OBJGVASPACE_vaspaceGetKernelPageDirBase(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + return gvaspaceGetKernelPageDirBase((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspacePinRootPageDir(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + return gvaspacePinRootPageDir((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu); +} + +static void __nvoc_thunk_OBJGVASPACE_vaspaceUnpinRootPageDir(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + gvaspaceUnpinRootPageDir((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu); +} + +static void __nvoc_thunk_OBJGVASPACE_vaspaceInvalidateTlb(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type) { + gvaspaceInvalidateTlb((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu, type); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceGetVasInfo(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return gvaspaceGetVasInfo((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceGetPageTableInfo(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) { + return gvaspaceGetPageTableInfo((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceGetPteInfo(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) { + return gvaspaceGetPteInfo((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu, pParams, pPhysAddr); +} + +static NV_STATUS __nvoc_thunk_OBJGVASPACE_vaspaceSetPteInfo(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) { + return gvaspaceSetPteInfo((struct OBJGVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset), pGpu, pParams); +} + +static NvBool __nvoc_thunk_OBJGVASPACE_vaspaceIsInternalVaRestricted(struct OBJVASPACE *pGVAS) { + return gvaspaceIsInternalVaRestricted((struct OBJGVASPACE *)(((unsigned char *)pGVAS) - __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset)); +} + +static NvU64 __nvoc_thunk_OBJVASPACE_gvaspaceGetVaLimit(struct OBJGVASPACE *pVAS) { + return vaspaceGetVaLimit((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset)); +} + +static NvU64 __nvoc_thunk_OBJVASPACE_gvaspaceGetVaStart(struct OBJGVASPACE *pVAS) { + return vaspaceGetVaStart((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJGVASPACE_OBJVASPACE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGVASPACE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJVASPACE(OBJVASPACE*); +void __nvoc_dtor_OBJGVASPACE(OBJGVASPACE *pThis) { + __nvoc_gvaspaceDestruct(pThis); + __nvoc_dtor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGVASPACE(OBJGVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE* ); +NV_STATUS __nvoc_ctor_OBJGVASPACE(OBJGVASPACE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + if (status != NV_OK) goto __nvoc_ctor_OBJGVASPACE_fail_OBJVASPACE; + __nvoc_init_dataField_OBJGVASPACE(pThis); + goto __nvoc_ctor_OBJGVASPACE_exit; // Success + +__nvoc_ctor_OBJGVASPACE_fail_OBJVASPACE: +__nvoc_ctor_OBJGVASPACE_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJGVASPACE_1(OBJGVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__gvaspaceConstruct___ = &gvaspaceConstruct__IMPL; + + pThis->__gvaspaceReserveMempool__ = &gvaspaceReserveMempool_IMPL; + + pThis->__gvaspaceAlloc__ = &gvaspaceAlloc_IMPL; + + pThis->__gvaspaceFree__ = &gvaspaceFree_IMPL; + + pThis->__gvaspaceApplyDefaultAlignment__ = &gvaspaceApplyDefaultAlignment_IMPL; + + pThis->__gvaspaceIncAllocRefCnt__ = &gvaspaceIncAllocRefCnt_IMPL; + + pThis->__gvaspaceMap__ = &gvaspaceMap_IMPL; + + pThis->__gvaspaceUnmap__ = &gvaspaceUnmap_IMPL; + + pThis->__gvaspaceGetHeap__ = &gvaspaceGetHeap_IMPL; + + pThis->__gvaspaceGetMapPageSize__ = &gvaspaceGetMapPageSize_IMPL; + + pThis->__gvaspaceGetBigPageSize__ = &gvaspaceGetBigPageSize_IMPL; + + pThis->__gvaspaceGetFlags__ = &gvaspaceGetFlags_IMPL; + + pThis->__gvaspaceIsMirrored__ = &gvaspaceIsMirrored_IMPL; + + pThis->__gvaspaceIsFaultCapable__ = &gvaspaceIsFaultCapable_IMPL; + + pThis->__gvaspaceIsExternallyOwned__ = &gvaspaceIsExternallyOwned_IMPL; + + pThis->__gvaspaceIsAtsEnabled__ = &gvaspaceIsAtsEnabled_IMPL; + + pThis->__gvaspaceGetPasid__ = &gvaspaceGetPasid_IMPL; + + pThis->__gvaspaceGetPageDirBase__ = &gvaspaceGetPageDirBase_IMPL; + + pThis->__gvaspaceGetKernelPageDirBase__ = &gvaspaceGetKernelPageDirBase_IMPL; + + pThis->__gvaspacePinRootPageDir__ = &gvaspacePinRootPageDir_IMPL; + + pThis->__gvaspaceUnpinRootPageDir__ = &gvaspaceUnpinRootPageDir_IMPL; + + pThis->__gvaspaceInvalidateTlb__ = &gvaspaceInvalidateTlb_IMPL; + + pThis->__gvaspaceGetVasInfo__ = &gvaspaceGetVasInfo_IMPL; + + pThis->__gvaspaceGetPageTableInfo__ = &gvaspaceGetPageTableInfo_IMPL; + + pThis->__gvaspaceGetPteInfo__ = &gvaspaceGetPteInfo_IMPL; + + pThis->__gvaspaceSetPteInfo__ = &gvaspaceSetPteInfo_IMPL; + + pThis->__gvaspaceIsInternalVaRestricted__ = &gvaspaceIsInternalVaRestricted_IMPL; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceConstruct___ = &__nvoc_thunk_OBJGVASPACE_vaspaceConstruct_; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceReserveMempool__ = &__nvoc_thunk_OBJGVASPACE_vaspaceReserveMempool; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceAlloc__ = &__nvoc_thunk_OBJGVASPACE_vaspaceAlloc; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceFree__ = &__nvoc_thunk_OBJGVASPACE_vaspaceFree; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceApplyDefaultAlignment__ = &__nvoc_thunk_OBJGVASPACE_vaspaceApplyDefaultAlignment; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceIncAllocRefCnt__ = &__nvoc_thunk_OBJGVASPACE_vaspaceIncAllocRefCnt; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceMap__ = &__nvoc_thunk_OBJGVASPACE_vaspaceMap; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceUnmap__ = &__nvoc_thunk_OBJGVASPACE_vaspaceUnmap; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetHeap__ = &__nvoc_thunk_OBJGVASPACE_vaspaceGetHeap; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetMapPageSize__ = &__nvoc_thunk_OBJGVASPACE_vaspaceGetMapPageSize; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetBigPageSize__ = &__nvoc_thunk_OBJGVASPACE_vaspaceGetBigPageSize; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetFlags__ = &__nvoc_thunk_OBJGVASPACE_vaspaceGetFlags; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceIsMirrored__ = &__nvoc_thunk_OBJGVASPACE_vaspaceIsMirrored; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceIsFaultCapable__ = &__nvoc_thunk_OBJGVASPACE_vaspaceIsFaultCapable; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceIsExternallyOwned__ = &__nvoc_thunk_OBJGVASPACE_vaspaceIsExternallyOwned; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceIsAtsEnabled__ = &__nvoc_thunk_OBJGVASPACE_vaspaceIsAtsEnabled; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetPasid__ = &__nvoc_thunk_OBJGVASPACE_vaspaceGetPasid; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetPageDirBase__ = &__nvoc_thunk_OBJGVASPACE_vaspaceGetPageDirBase; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetKernelPageDirBase__ = &__nvoc_thunk_OBJGVASPACE_vaspaceGetKernelPageDirBase; + + pThis->__nvoc_base_OBJVASPACE.__vaspacePinRootPageDir__ = &__nvoc_thunk_OBJGVASPACE_vaspacePinRootPageDir; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceUnpinRootPageDir__ = &__nvoc_thunk_OBJGVASPACE_vaspaceUnpinRootPageDir; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceInvalidateTlb__ = &__nvoc_thunk_OBJGVASPACE_vaspaceInvalidateTlb; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVasInfo__ = &__nvoc_thunk_OBJGVASPACE_vaspaceGetVasInfo; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetPageTableInfo__ = &__nvoc_thunk_OBJGVASPACE_vaspaceGetPageTableInfo; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetPteInfo__ = &__nvoc_thunk_OBJGVASPACE_vaspaceGetPteInfo; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceSetPteInfo__ = &__nvoc_thunk_OBJGVASPACE_vaspaceSetPteInfo; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceIsInternalVaRestricted__ = &__nvoc_thunk_OBJGVASPACE_vaspaceIsInternalVaRestricted; + + pThis->__gvaspaceGetVaLimit__ = &__nvoc_thunk_OBJVASPACE_gvaspaceGetVaLimit; + + pThis->__gvaspaceGetVaStart__ = &__nvoc_thunk_OBJVASPACE_gvaspaceGetVaStart; +} + +void __nvoc_init_funcTable_OBJGVASPACE(OBJGVASPACE *pThis) { + __nvoc_init_funcTable_OBJGVASPACE_1(pThis); +} + +void __nvoc_init_OBJVASPACE(OBJVASPACE*); +void __nvoc_init_OBJGVASPACE(OBJGVASPACE *pThis) { + pThis->__nvoc_pbase_OBJGVASPACE = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJVASPACE = &pThis->__nvoc_base_OBJVASPACE; + __nvoc_init_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + __nvoc_init_funcTable_OBJGVASPACE(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJGVASPACE(OBJGVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJGVASPACE *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJGVASPACE)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJGVASPACE)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGVASPACE); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJGVASPACE(pThis); + status = __nvoc_ctor_OBJGVASPACE(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJGVASPACE_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJGVASPACE_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGVASPACE(OBJGVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJGVASPACE(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_gpu_vaspace_nvoc.h b/src/nvidia/generated/g_gpu_vaspace_nvoc.h new file mode 100644 index 000000000..e5b2ff300 --- /dev/null +++ b/src/nvidia/generated/g_gpu_vaspace_nvoc.h @@ -0,0 +1,708 @@ +#ifndef _G_GPU_VASPACE_NVOC_H_ +#define _G_GPU_VASPACE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_gpu_vaspace_nvoc.h" + +#ifndef GPU_VASPACE_H +#define GPU_VASPACE_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: gpu_vaspace.h * +* Defines and structures used for GPU Virtual Address Space Object. * +\***************************************************************************/ + +#include "mmu/mmu_walk.h" +#include "mmu/gmmu_fmt.h" + +#include "core/core.h" +#include "mem_mgr/vaspace.h" // base class object header +#include "ctrl/ctrl90f1.h" + +#include "containers/list.h" +#include "containers/map.h" +#include "mem_mgr/pool_alloc.h" + +MAKE_MAP(GVAS_CHANGRP_MAP, NvU32); +typedef GVAS_CHANGRP_MAPIter GVAS_CHANGRP_MAP_ITER; + +typedef struct OBJGVASPACE *POBJGVASPACE; + +#ifndef __NVOC_CLASS_OBJGVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJGVASPACE_TYPEDEF__ +typedef struct OBJGVASPACE OBJGVASPACE; +#endif /* __NVOC_CLASS_OBJGVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGVASPACE +#define __nvoc_class_id_OBJGVASPACE 0xba5875 +#endif /* __nvoc_class_id_OBJGVASPACE */ + + +typedef struct KernelChannelGroup KernelChannelGroup; + +#ifndef __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ +#define __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ +typedef struct KernelChannelGroup KernelChannelGroup; +#endif /* __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannelGroup +#define __nvoc_class_id_KernelChannelGroup 0xec6de1 +#endif /* __nvoc_class_id_KernelChannelGroup */ + + + +/*! + * Max number of ranges of VA reserved for partial page tables. + */ +#define GVAS_MAX_PARTIAL_PAGE_TABLE_RANGES 5 + +// On Linux, CUDA apps allocate VA in bottom 4GB and also above 8GB +#define SPLIT_VAS_SERVER_RM_MANAGED_VA_START 0x100000000ULL // 4GB +#define SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE 0x20000000ULL // 512MB + +/*! + * Tracks ranges currently mapped (per-GPU). + */ +typedef struct +{ + /*! + * Embedded btree node. + */ + NODE node; + + /*! + * Mask of GPUs with this mapping. + */ + NvU32 gpuMask; +} GVAS_MAPPING; + +/*! + * Virtual Address Space Block - Data tracked per virtual allocation + */ +typedef struct +{ + /*! + * Tree of current mappings. + */ + GVAS_MAPPING *pMapTree; + /*! + * Mask of page sizes locked down at VA reservation. + */ + NvU64 pageSizeLockMask; + /*! + * Flags passed by user on VA alloc (reservation). + */ + VAS_ALLOC_FLAGS flags; + /*! + * Level of management. + */ + VA_MANAGEMENT management; +} GVAS_BLOCK, *PGVAS_BLOCK; + +/*! + * Virtual address range. + */ +struct VA_RANGE_GPU +{ + NvU64 vaLo; + NvU64 vaHi; +}; + +typedef struct VA_RANGE_GPU VA_RANGE_GPU; + +/*! + * List of virtual address range. + */ +MAKE_LIST(ReservedPageTableEntriesList, VA_RANGE_GPU); + +/*! + * Per-VAS per-GPU state. + */ +typedef struct GVAS_GPU_STATE +{ + /*! + * GMMU format for this GPU (structured for heterogenous SLI). + */ + const GMMU_FMT *pFmt; + /*! + * MMU walk library state. + */ + MMU_WALK *pWalk; + /*! + * Mirrored Root Page Dir for UVM mirroring. + */ + MMU_WALK_MEMDESC *pMirroredRoot; + /*! + * RM-internal root page directory for shared VAS management. + */ + MEMORY_DESCRIPTOR *pRootInternal; + /*! + * List head of 4K page cache used for suballocating BPTs + */ + MEMORY_DESCRIPTOR_LIST unpackedMemDescList; + /*! + * Reserved page table entries for the GVA space. + */ + ReservedPageTableEntriesList reservedPageTableEntries; + + /*! + * FLA Dummy page for short term WAR for FLA Security issue documented in + * bug: 3059741. + */ + struct + { + /*! + * Memory handle for Dummy Big page + */ + NvHandle hMemory; + /*! + * PTE Entry for 64K Dummy page size + */ + GMMU_ENTRY_VALUE pte; + }flaDummyPage; +} GVAS_GPU_STATE; + +/*! + * GVAS definition of the MMU walker user context. + */ +struct MMU_WALK_USER_CTX +{ + struct OBJGVASPACE *pGVAS; + OBJGPU *pGpu; + GVAS_GPU_STATE *pGpuState; + const GVAS_BLOCK *pBlock; + const NvU32 *pChID; + NvU32 gfid; +}; + + +/*! + * RM-registered/managed GPU virtual address space. + */ +#ifdef NVOC_GPU_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJGVASPACE { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJVASPACE __nvoc_base_OBJVASPACE; + struct Object *__nvoc_pbase_Object; + struct OBJVASPACE *__nvoc_pbase_OBJVASPACE; + struct OBJGVASPACE *__nvoc_pbase_OBJGVASPACE; + NV_STATUS (*__gvaspaceConstruct___)(struct OBJGVASPACE *, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32); + NV_STATUS (*__gvaspaceReserveMempool__)(struct OBJGVASPACE *, struct OBJGPU *, NvHandle, NvU64, NvU64, NvU32); + NV_STATUS (*__gvaspaceAlloc__)(struct OBJGVASPACE *, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *); + NV_STATUS (*__gvaspaceFree__)(struct OBJGVASPACE *, NvU64); + NV_STATUS (*__gvaspaceApplyDefaultAlignment__)(struct OBJGVASPACE *, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *); + NV_STATUS (*__gvaspaceIncAllocRefCnt__)(struct OBJGVASPACE *, NvU64); + NV_STATUS (*__gvaspaceMap__)(struct OBJGVASPACE *, struct OBJGPU *, const NvU64, const NvU64, const MMU_MAP_TARGET *, const VAS_MAP_FLAGS); + void (*__gvaspaceUnmap__)(struct OBJGVASPACE *, struct OBJGPU *, const NvU64, const NvU64); + struct OBJEHEAP *(*__gvaspaceGetHeap__)(struct OBJGVASPACE *); + NvU32 (*__gvaspaceGetMapPageSize__)(struct OBJGVASPACE *, struct OBJGPU *, EMEMBLOCK *); + NvU32 (*__gvaspaceGetBigPageSize__)(struct OBJGVASPACE *); + NvU32 (*__gvaspaceGetFlags__)(struct OBJGVASPACE *); + NvBool (*__gvaspaceIsMirrored__)(struct OBJGVASPACE *); + NvBool (*__gvaspaceIsFaultCapable__)(struct OBJGVASPACE *); + NvBool (*__gvaspaceIsExternallyOwned__)(struct OBJGVASPACE *); + NvBool (*__gvaspaceIsAtsEnabled__)(struct OBJGVASPACE *); + NV_STATUS (*__gvaspaceGetPasid__)(struct OBJGVASPACE *, NvU32 *); + PMEMORY_DESCRIPTOR (*__gvaspaceGetPageDirBase__)(struct OBJGVASPACE *, struct OBJGPU *); + PMEMORY_DESCRIPTOR (*__gvaspaceGetKernelPageDirBase__)(struct OBJGVASPACE *, struct OBJGPU *); + NV_STATUS (*__gvaspacePinRootPageDir__)(struct OBJGVASPACE *, struct OBJGPU *); + void (*__gvaspaceUnpinRootPageDir__)(struct OBJGVASPACE *, struct OBJGPU *); + void (*__gvaspaceInvalidateTlb__)(struct OBJGVASPACE *, struct OBJGPU *, VAS_PTE_UPDATE_TYPE); + NV_STATUS (*__gvaspaceGetVasInfo__)(struct OBJGVASPACE *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *); + NV_STATUS (*__gvaspaceGetPageTableInfo__)(struct OBJGVASPACE *, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *); + NV_STATUS (*__gvaspaceGetPteInfo__)(struct OBJGVASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *, RmPhysAddr *); + NV_STATUS (*__gvaspaceSetPteInfo__)(struct OBJGVASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *); + NvBool (*__gvaspaceIsInternalVaRestricted__)(struct OBJGVASPACE *); + NvU64 (*__gvaspaceGetVaLimit__)(struct OBJGVASPACE *); + NvU64 (*__gvaspaceGetVaStart__)(struct OBJGVASPACE *); + struct OBJEHEAP *pHeap; + NvU32 bigPageSize; + NvU64 maxPageSizeSupported; + NvU64 compPageSize; + NvU64 extManagedAlign; + NvU32 flags; + NvU64 partialPtVaRangeSize; + NvU64 partialPtVaRangeBase[5]; + NvU32 numPartialPtRanges; + NvBool bIsMirrored; + NvBool bIsFaultCapable; + NvBool bIsExternallyOwned; + MEMORY_DESCRIPTOR *pExternalPDB; + NvBool bIsAtsEnabled; + NvU32 processAddrSpaceId; + NvU64 vaLimitMax; + GVAS_GPU_STATE *pGpuStates; + GVAS_CHANGRP_MAP chanGrpMap; + NvU64 vaStartInternal; + NvU64 vaLimitInternal; + NvBool bRMInternalRestrictedVaRange; + NvU64 vaStartServerRMOwned; + NvU64 vaLimitServerRMOwned; + RM_POOL_ALLOC_MEM_RESERVE_INFO *pPageTableMemPool; +}; + +#ifndef __NVOC_CLASS_OBJGVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJGVASPACE_TYPEDEF__ +typedef struct OBJGVASPACE OBJGVASPACE; +#endif /* __NVOC_CLASS_OBJGVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGVASPACE +#define __nvoc_class_id_OBJGVASPACE 0xba5875 +#endif /* __nvoc_class_id_OBJGVASPACE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGVASPACE; + +#define __staticCast_OBJGVASPACE(pThis) \ + ((pThis)->__nvoc_pbase_OBJGVASPACE) + +#ifdef __nvoc_gpu_vaspace_h_disabled +#define __dynamicCast_OBJGVASPACE(pThis) ((OBJGVASPACE*)NULL) +#else //__nvoc_gpu_vaspace_h_disabled +#define __dynamicCast_OBJGVASPACE(pThis) \ + ((OBJGVASPACE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGVASPACE))) +#endif //__nvoc_gpu_vaspace_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJGVASPACE(OBJGVASPACE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGVASPACE(OBJGVASPACE**, Dynamic*, NvU32); +#define __objCreate_OBJGVASPACE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJGVASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define gvaspaceConstruct_(pGVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) gvaspaceConstruct__DISPATCH(pGVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) +#define gvaspaceReserveMempool(pGVAS, pGpu, hClient, size, pageSizeLockMask, flags) gvaspaceReserveMempool_DISPATCH(pGVAS, pGpu, hClient, size, pageSizeLockMask, flags) +#define gvaspaceAlloc(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) gvaspaceAlloc_DISPATCH(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) +#define gvaspaceFree(pVAS, vAddr) gvaspaceFree_DISPATCH(pVAS, vAddr) +#define gvaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) gvaspaceApplyDefaultAlignment_DISPATCH(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) +#define gvaspaceIncAllocRefCnt(pVAS, vAddr) gvaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr) +#define gvaspaceMap(pVAS, pGpu, vaLo, vaHi, pTarget, flags) gvaspaceMap_DISPATCH(pVAS, pGpu, vaLo, vaHi, pTarget, flags) +#define gvaspaceUnmap(pVAS, pGpu, vaLo, vaHi) gvaspaceUnmap_DISPATCH(pVAS, pGpu, vaLo, vaHi) +#define gvaspaceGetHeap(pVAS) gvaspaceGetHeap_DISPATCH(pVAS) +#define gvaspaceGetMapPageSize(pVAS, pGpu, pMemBlock) gvaspaceGetMapPageSize_DISPATCH(pVAS, pGpu, pMemBlock) +#define gvaspaceGetBigPageSize(pVAS) gvaspaceGetBigPageSize_DISPATCH(pVAS) +#define gvaspaceGetFlags(pVAS) gvaspaceGetFlags_DISPATCH(pVAS) +#define gvaspaceIsMirrored(pVAS) gvaspaceIsMirrored_DISPATCH(pVAS) +#define gvaspaceIsFaultCapable(pVAS) gvaspaceIsFaultCapable_DISPATCH(pVAS) +#define gvaspaceIsExternallyOwned(pVAS) gvaspaceIsExternallyOwned_DISPATCH(pVAS) +#define gvaspaceIsAtsEnabled(pVAS) gvaspaceIsAtsEnabled_DISPATCH(pVAS) +#define gvaspaceGetPasid(pVAS, pPasid) gvaspaceGetPasid_DISPATCH(pVAS, pPasid) +#define gvaspaceGetPageDirBase(pVAS, pGpu) gvaspaceGetPageDirBase_DISPATCH(pVAS, pGpu) +#define gvaspaceGetKernelPageDirBase(pVAS, pGpu) gvaspaceGetKernelPageDirBase_DISPATCH(pVAS, pGpu) +#define gvaspacePinRootPageDir(pVAS, pGpu) gvaspacePinRootPageDir_DISPATCH(pVAS, pGpu) +#define gvaspaceUnpinRootPageDir(pVAS, pGpu) gvaspaceUnpinRootPageDir_DISPATCH(pVAS, pGpu) +#define gvaspaceInvalidateTlb(pVAS, pGpu, type) gvaspaceInvalidateTlb_DISPATCH(pVAS, pGpu, type) +#define gvaspaceGetVasInfo(pVAS, pParams) gvaspaceGetVasInfo_DISPATCH(pVAS, pParams) +#define gvaspaceGetPageTableInfo(pVAS, pParams) gvaspaceGetPageTableInfo_DISPATCH(pVAS, pParams) +#define gvaspaceGetPteInfo(pVAS, pGpu, pParams, pPhysAddr) gvaspaceGetPteInfo_DISPATCH(pVAS, pGpu, pParams, pPhysAddr) +#define gvaspaceSetPteInfo(pVAS, pGpu, pParams) gvaspaceSetPteInfo_DISPATCH(pVAS, pGpu, pParams) +#define gvaspaceIsInternalVaRestricted(pGVAS) gvaspaceIsInternalVaRestricted_DISPATCH(pGVAS) +#define gvaspaceGetVaLimit(pVAS) gvaspaceGetVaLimit_DISPATCH(pVAS) +#define gvaspaceGetVaStart(pVAS) gvaspaceGetVaStart_DISPATCH(pVAS) +NV_STATUS gvaspaceConstruct__IMPL(struct OBJGVASPACE *pGVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags); + +static inline NV_STATUS gvaspaceConstruct__DISPATCH(struct OBJGVASPACE *pGVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return pGVAS->__gvaspaceConstruct___(pGVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +NV_STATUS gvaspaceReserveMempool_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags); + +static inline NV_STATUS gvaspaceReserveMempool_DISPATCH(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) { + return pGVAS->__gvaspaceReserveMempool__(pGVAS, pGpu, hClient, size, pageSizeLockMask, flags); +} + +NV_STATUS gvaspaceAlloc_IMPL(struct OBJGVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr); + +static inline NV_STATUS gvaspaceAlloc_DISPATCH(struct OBJGVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return pVAS->__gvaspaceAlloc__(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +NV_STATUS gvaspaceFree_IMPL(struct OBJGVASPACE *pVAS, NvU64 vAddr); + +static inline NV_STATUS gvaspaceFree_DISPATCH(struct OBJGVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__gvaspaceFree__(pVAS, vAddr); +} + +NV_STATUS gvaspaceApplyDefaultAlignment_IMPL(struct OBJGVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask); + +static inline NV_STATUS gvaspaceApplyDefaultAlignment_DISPATCH(struct OBJGVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return pVAS->__gvaspaceApplyDefaultAlignment__(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +NV_STATUS gvaspaceIncAllocRefCnt_IMPL(struct OBJGVASPACE *pVAS, NvU64 vAddr); + +static inline NV_STATUS gvaspaceIncAllocRefCnt_DISPATCH(struct OBJGVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__gvaspaceIncAllocRefCnt__(pVAS, vAddr); +} + +NV_STATUS gvaspaceMap_IMPL(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags); + +static inline NV_STATUS gvaspaceMap_DISPATCH(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) { + return pVAS->__gvaspaceMap__(pVAS, pGpu, vaLo, vaHi, pTarget, flags); +} + +void gvaspaceUnmap_IMPL(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi); + +static inline void gvaspaceUnmap_DISPATCH(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) { + pVAS->__gvaspaceUnmap__(pVAS, pGpu, vaLo, vaHi); +} + +struct OBJEHEAP *gvaspaceGetHeap_IMPL(struct OBJGVASPACE *pVAS); + +static inline struct OBJEHEAP *gvaspaceGetHeap_DISPATCH(struct OBJGVASPACE *pVAS) { + return pVAS->__gvaspaceGetHeap__(pVAS); +} + +NvU32 gvaspaceGetMapPageSize_IMPL(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock); + +static inline NvU32 gvaspaceGetMapPageSize_DISPATCH(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock) { + return pVAS->__gvaspaceGetMapPageSize__(pVAS, pGpu, pMemBlock); +} + +NvU32 gvaspaceGetBigPageSize_IMPL(struct OBJGVASPACE *pVAS); + +static inline NvU32 gvaspaceGetBigPageSize_DISPATCH(struct OBJGVASPACE *pVAS) { + return pVAS->__gvaspaceGetBigPageSize__(pVAS); +} + +NvU32 gvaspaceGetFlags_IMPL(struct OBJGVASPACE *pVAS); + +static inline NvU32 gvaspaceGetFlags_DISPATCH(struct OBJGVASPACE *pVAS) { + return pVAS->__gvaspaceGetFlags__(pVAS); +} + +NvBool gvaspaceIsMirrored_IMPL(struct OBJGVASPACE *pVAS); + +static inline NvBool gvaspaceIsMirrored_DISPATCH(struct OBJGVASPACE *pVAS) { + return pVAS->__gvaspaceIsMirrored__(pVAS); +} + +NvBool gvaspaceIsFaultCapable_IMPL(struct OBJGVASPACE *pVAS); + +static inline NvBool gvaspaceIsFaultCapable_DISPATCH(struct OBJGVASPACE *pVAS) { + return pVAS->__gvaspaceIsFaultCapable__(pVAS); +} + +NvBool gvaspaceIsExternallyOwned_IMPL(struct OBJGVASPACE *pVAS); + +static inline NvBool gvaspaceIsExternallyOwned_DISPATCH(struct OBJGVASPACE *pVAS) { + return pVAS->__gvaspaceIsExternallyOwned__(pVAS); +} + +NvBool gvaspaceIsAtsEnabled_IMPL(struct OBJGVASPACE *pVAS); + +static inline NvBool gvaspaceIsAtsEnabled_DISPATCH(struct OBJGVASPACE *pVAS) { + return pVAS->__gvaspaceIsAtsEnabled__(pVAS); +} + +NV_STATUS gvaspaceGetPasid_IMPL(struct OBJGVASPACE *pVAS, NvU32 *pPasid); + +static inline NV_STATUS gvaspaceGetPasid_DISPATCH(struct OBJGVASPACE *pVAS, NvU32 *pPasid) { + return pVAS->__gvaspaceGetPasid__(pVAS, pPasid); +} + +PMEMORY_DESCRIPTOR gvaspaceGetPageDirBase_IMPL(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu); + +static inline PMEMORY_DESCRIPTOR gvaspaceGetPageDirBase_DISPATCH(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__gvaspaceGetPageDirBase__(pVAS, pGpu); +} + +PMEMORY_DESCRIPTOR gvaspaceGetKernelPageDirBase_IMPL(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu); + +static inline PMEMORY_DESCRIPTOR gvaspaceGetKernelPageDirBase_DISPATCH(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__gvaspaceGetKernelPageDirBase__(pVAS, pGpu); +} + +NV_STATUS gvaspacePinRootPageDir_IMPL(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu); + +static inline NV_STATUS gvaspacePinRootPageDir_DISPATCH(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__gvaspacePinRootPageDir__(pVAS, pGpu); +} + +void gvaspaceUnpinRootPageDir_IMPL(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu); + +static inline void gvaspaceUnpinRootPageDir_DISPATCH(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu) { + pVAS->__gvaspaceUnpinRootPageDir__(pVAS, pGpu); +} + +void gvaspaceInvalidateTlb_IMPL(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type); + +static inline void gvaspaceInvalidateTlb_DISPATCH(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type) { + pVAS->__gvaspaceInvalidateTlb__(pVAS, pGpu, type); +} + +NV_STATUS gvaspaceGetVasInfo_IMPL(struct OBJGVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams); + +static inline NV_STATUS gvaspaceGetVasInfo_DISPATCH(struct OBJGVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return pVAS->__gvaspaceGetVasInfo__(pVAS, pParams); +} + +NV_STATUS gvaspaceGetPageTableInfo_IMPL(struct OBJGVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams); + +static inline NV_STATUS gvaspaceGetPageTableInfo_DISPATCH(struct OBJGVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) { + return pVAS->__gvaspaceGetPageTableInfo__(pVAS, pParams); +} + +NV_STATUS gvaspaceGetPteInfo_IMPL(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr); + +static inline NV_STATUS gvaspaceGetPteInfo_DISPATCH(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) { + return pVAS->__gvaspaceGetPteInfo__(pVAS, pGpu, pParams, pPhysAddr); +} + +NV_STATUS gvaspaceSetPteInfo_IMPL(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams); + +static inline NV_STATUS gvaspaceSetPteInfo_DISPATCH(struct OBJGVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) { + return pVAS->__gvaspaceSetPteInfo__(pVAS, pGpu, pParams); +} + +NvBool gvaspaceIsInternalVaRestricted_IMPL(struct OBJGVASPACE *pGVAS); + +static inline NvBool gvaspaceIsInternalVaRestricted_DISPATCH(struct OBJGVASPACE *pGVAS) { + return pGVAS->__gvaspaceIsInternalVaRestricted__(pGVAS); +} + +static inline NvU64 gvaspaceGetVaLimit_DISPATCH(struct OBJGVASPACE *pVAS) { + return pVAS->__gvaspaceGetVaLimit__(pVAS); +} + +static inline NvU64 gvaspaceGetVaStart_DISPATCH(struct OBJGVASPACE *pVAS) { + return pVAS->__gvaspaceGetVaStart__(pVAS); +} + +static inline NvU32 gvaspaceGetReservedVaspaceBase(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu) { + if (!((0) && (pGpu))) { + return 1048576; + } + return 67108864; +} + +void gvaspaceDestruct_IMPL(struct OBJGVASPACE *pGVAS); +#define __nvoc_gvaspaceDestruct(pGVAS) gvaspaceDestruct_IMPL(pGVAS) +NV_STATUS gvaspaceExternalRootDirCommit_IMPL(struct OBJGVASPACE *pGVAS, NvHandle hClient, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceExternalRootDirCommit(struct OBJGVASPACE *pGVAS, NvHandle hClient, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceExternalRootDirCommit(pGVAS, hClient, pGpu, pParams) gvaspaceExternalRootDirCommit_IMPL(pGVAS, hClient, pGpu, pParams) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceExternalRootDirRevoke_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceExternalRootDirRevoke(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceExternalRootDirRevoke(pGVAS, pGpu, pParams) gvaspaceExternalRootDirRevoke_IMPL(pGVAS, pGpu, pParams) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceResize_IMPL(struct OBJGVASPACE *pGVAS, NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS *pParams); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceResize(struct OBJGVASPACE *pGVAS, NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceResize(pGVAS, pParams) gvaspaceResize_IMPL(pGVAS, pParams) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceUpdatePde2_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *pParams); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceUpdatePde2(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceUpdatePde2(pGVAS, pGpu, pParams) gvaspaceUpdatePde2_IMPL(pGVAS, pGpu, pParams) +#endif //__nvoc_gpu_vaspace_h_disabled + +const struct GMMU_FMT *gvaspaceGetGmmuFmt_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline const struct GMMU_FMT *gvaspaceGetGmmuFmt(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NULL; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceGetGmmuFmt(pGVAS, pGpu) gvaspaceGetGmmuFmt_IMPL(pGVAS, pGpu) +#endif //__nvoc_gpu_vaspace_h_disabled + +GVAS_GPU_STATE *gvaspaceGetGpuState_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline GVAS_GPU_STATE *gvaspaceGetGpuState(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NULL; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceGetGpuState(pGVAS, pGpu) gvaspaceGetGpuState_IMPL(pGVAS, pGpu) +#endif //__nvoc_gpu_vaspace_h_disabled + +void gvaspaceWalkUserCtxAcquire_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, const GVAS_BLOCK *pVASBlock, struct MMU_WALK_USER_CTX *pUserCtx); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline void gvaspaceWalkUserCtxAcquire(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, const GVAS_BLOCK *pVASBlock, struct MMU_WALK_USER_CTX *pUserCtx) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, pVASBlock, pUserCtx) gvaspaceWalkUserCtxAcquire_IMPL(pGVAS, pGpu, pVASBlock, pUserCtx) +#endif //__nvoc_gpu_vaspace_h_disabled + +void gvaspaceWalkUserCtxRelease_IMPL(struct OBJGVASPACE *pGVAS, struct MMU_WALK_USER_CTX *pUserCtx); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline void gvaspaceWalkUserCtxRelease(struct OBJGVASPACE *pGVAS, struct MMU_WALK_USER_CTX *pUserCtx) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceWalkUserCtxRelease(pGVAS, pUserCtx) gvaspaceWalkUserCtxRelease_IMPL(pGVAS, pUserCtx) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceIncChanGrpRefCnt_IMPL(struct OBJGVASPACE *pGVAS, KernelChannelGroup *pKernelChannelGroup); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceIncChanGrpRefCnt(struct OBJGVASPACE *pGVAS, KernelChannelGroup *pKernelChannelGroup) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceIncChanGrpRefCnt(pGVAS, pKernelChannelGroup) gvaspaceIncChanGrpRefCnt_IMPL(pGVAS, pKernelChannelGroup) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceDecChanGrpRefCnt_IMPL(struct OBJGVASPACE *pGVAS, KernelChannelGroup *pKernelChannelGroup); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceDecChanGrpRefCnt(struct OBJGVASPACE *pGVAS, KernelChannelGroup *pKernelChannelGroup) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceDecChanGrpRefCnt(pGVAS, pKernelChannelGroup) gvaspaceDecChanGrpRefCnt_IMPL(pGVAS, pKernelChannelGroup) +#endif //__nvoc_gpu_vaspace_h_disabled + +NvU32 gvaspaceGetChanGrpRefCnt_IMPL(struct OBJGVASPACE *pGVAS, KernelChannelGroup *pKernelChannelGroup); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NvU32 gvaspaceGetChanGrpRefCnt(struct OBJGVASPACE *pGVAS, KernelChannelGroup *pKernelChannelGroup) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return 0; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceGetChanGrpRefCnt(pGVAS, pKernelChannelGroup) gvaspaceGetChanGrpRefCnt_IMPL(pGVAS, pKernelChannelGroup) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceCheckChanGrpRefCnt_IMPL(struct OBJGVASPACE *pGVAS, KernelChannelGroup *pKernelChannelGroup); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceCheckChanGrpRefCnt(struct OBJGVASPACE *pGVAS, KernelChannelGroup *pKernelChannelGroup) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceCheckChanGrpRefCnt(pGVAS, pKernelChannelGroup) gvaspaceCheckChanGrpRefCnt_IMPL(pGVAS, pKernelChannelGroup) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceUnregisterAllChanGrps_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceUnregisterAllChanGrps(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceUnregisterAllChanGrps(pGVAS, pGpu) gvaspaceUnregisterAllChanGrps_IMPL(pGVAS, pGpu) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceReservePageTableEntries_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const NvU64 pageSizeMask); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceReservePageTableEntries(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const NvU64 pageSizeMask) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceReservePageTableEntries(pGVAS, pGpu, vaLo, vaHi, pageSizeMask) gvaspaceReservePageTableEntries_IMPL(pGVAS, pGpu, vaLo, vaHi, pageSizeMask) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceReleasePageTableEntries_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const NvU64 pageSizeMask); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceReleasePageTableEntries(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const NvU64 pageSizeMask) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceReleasePageTableEntries(pGVAS, pGpu, vaLo, vaHi, pageSizeMask) gvaspaceReleasePageTableEntries_IMPL(pGVAS, pGpu, vaLo, vaHi, pageSizeMask) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceGetPageLevelInfo_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS *pParams); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceGetPageLevelInfo(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu, NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceGetPageLevelInfo(pGVAS, pGpu, pParams) gvaspaceGetPageLevelInfo_IMPL(pGVAS, pGpu, pParams) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceCopyServerRmReservedPdesToServerRm_IMPL(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceCopyServerRmReservedPdesToServerRm(struct OBJGVASPACE *pGVAS, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceCopyServerRmReservedPdesToServerRm(pGVAS, pGpu) gvaspaceCopyServerRmReservedPdesToServerRm_IMPL(pGVAS, pGpu) +#endif //__nvoc_gpu_vaspace_h_disabled + +NV_STATUS gvaspaceGetFreeHeap_IMPL(struct OBJGVASPACE *pGVAS, NvU64 *pFreeSize); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NV_STATUS gvaspaceGetFreeHeap(struct OBJGVASPACE *pGVAS, NvU64 *pFreeSize) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceGetFreeHeap(pGVAS, pFreeSize) gvaspaceGetFreeHeap_IMPL(pGVAS, pFreeSize) +#endif //__nvoc_gpu_vaspace_h_disabled + +NvBool gvaspaceIsInUse_IMPL(struct OBJGVASPACE *pGVAS); +#ifdef __nvoc_gpu_vaspace_h_disabled +static inline NvBool gvaspaceIsInUse(struct OBJGVASPACE *pGVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJGVASPACE was disabled!"); + return NV_FALSE; +} +#else //__nvoc_gpu_vaspace_h_disabled +#define gvaspaceIsInUse(pGVAS) gvaspaceIsInUse_IMPL(pGVAS) +#endif //__nvoc_gpu_vaspace_h_disabled + +#undef PRIVATE_FIELD + + +#endif // GPU_VASPACE_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_GPU_VASPACE_NVOC_H_ diff --git a/src/nvidia/generated/g_gr_pb.c b/src/nvidia/generated/g_gr_pb.c new file mode 100644 index 000000000..94c15a8aa --- /dev/null +++ b/src/nvidia/generated/g_gr_pb.c @@ -0,0 +1,173 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#include "nvtypes.h" +#include "prbrt.h" +#include "g_gr_pb.h" + +// 'GR_ERR_TYP' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_gr_err_typ[] = { + { + 255, + PRB_MAYBE_ENUM_NAME("GR_ERR_TYP_UNKNOWN") + }, +}; + +const PRB_ENUM_DESC prb_enums_gr_gr_err_typ = { + prb_enum_mappings_gr_err_typ, + 1, + PRB_MAYBE_ENUM_NAME("GR_ERR_TYP") +}; + +// 'GR_EXCPTN_SUBTYP' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_gr_excptn_subtyp[] = { + { + 255, + PRB_MAYBE_ENUM_NAME("GR_EXCPTN_SUBTYP_UNKNOWN") + }, +}; + +const PRB_ENUM_DESC prb_enums_gr_gr_excptn_subtyp = { + prb_enum_mappings_gr_excptn_subtyp, + 1, + PRB_MAYBE_ENUM_NAME("GR_EXCPTN_SUBTYP") +}; + +// 'Exception' field defaults + +// 'Exception' field descriptors +const PRB_FIELD_DESC prb_fields_gr_exception[] = { + { + 1, + { + PRB_REQUIRED, + PRB_MESSAGE, + 0, + }, + GR_EXCEPTION_EXCEPTIONDATA, + 0, + PRB_MAYBE_FIELD_NAME("ed") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + REGS_REGSANDMEM, + 0, + PRB_MAYBE_FIELD_NAME("nv50_regs") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + REGS_REGSANDMEM, + 0, + PRB_MAYBE_FIELD_NAME("gt200_regs") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + REGS_REGSANDMEM, + 0, + PRB_MAYBE_FIELD_NAME("gt212_regs") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + REGS_REGSANDMEM, + 0, + PRB_MAYBE_FIELD_NAME("gf100_regs") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'ExceptionData' field defaults + +// 'ExceptionData' field descriptors +const PRB_FIELD_DESC prb_fields_gr_exception_exceptiondata[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("gpu_instance") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("ch_id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("error_sequence_number") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("time_stamp") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// Message descriptors +const PRB_MSG_DESC prb_messages_gr[] = { + { + 5, + prb_fields_gr_exception, + PRB_MAYBE_MESSAGE_NAME("Gr.Exception") + }, + { + 4, + prb_fields_gr_exception_exceptiondata, + PRB_MAYBE_MESSAGE_NAME("Gr.Exception.ExceptionData") + }, +}; + +// Service descriptors +const PRB_SERVICE_DESC prb_services_gr[] = { + { 0 } +}; + diff --git a/src/nvidia/generated/g_gr_pb.h b/src/nvidia/generated/g_gr_pb.h new file mode 100644 index 000000000..e4f7e646d --- /dev/null +++ b/src/nvidia/generated/g_gr_pb.h @@ -0,0 +1,63 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#ifndef G_GR_PB_H__ +#define G_GR_PB_H__ + +#include "g_regs_pb.h" + +extern const PRB_ENUM_DESC prb_enums_gr_gr_err_typ; + +// 'GR_ERR_TYP' enumeration values +#define GR_GR_ERR_TYP_UNKNOWN 255 + +extern const PRB_ENUM_DESC prb_enums_gr_gr_excptn_subtyp; + +// 'GR_EXCPTN_SUBTYP' enumeration values +#define GR_GR_EXCPTN_SUBTYP_UNKNOWN 255 + +extern const PRB_MSG_DESC prb_messages_gr[]; + +// Message descriptor pointers +#define GR_EXCEPTION (&prb_messages_gr[0]) +#define GR_EXCEPTION_EXCEPTIONDATA (&prb_messages_gr[1]) + +// Message maximum lengths +// Does not include repeated fields, strings and byte arrays. +#define GR_EXCEPTION_LEN 153 +#define GR_EXCEPTION_EXCEPTIONDATA_LEN 29 + +extern const PRB_FIELD_DESC prb_fields_gr_exception[]; + +// 'Exception' field descriptor pointers +#define GR_EXCEPTION_ED (&prb_fields_gr_exception[0]) +#define GR_EXCEPTION_NV50_REGS (&prb_fields_gr_exception[1]) +#define GR_EXCEPTION_GT200_REGS (&prb_fields_gr_exception[2]) +#define GR_EXCEPTION_GT212_REGS (&prb_fields_gr_exception[3]) +#define GR_EXCEPTION_GF100_REGS (&prb_fields_gr_exception[4]) + +// 'Exception' field lengths +#define GR_EXCEPTION_ED_LEN 32 +#define GR_EXCEPTION_NV50_REGS_LEN 29 +#define GR_EXCEPTION_GT200_REGS_LEN 29 +#define GR_EXCEPTION_GT212_REGS_LEN 29 +#define GR_EXCEPTION_GF100_REGS_LEN 29 + +extern const PRB_FIELD_DESC prb_fields_gr_exception_exceptiondata[]; + +// 'ExceptionData' field descriptor pointers +#define GR_EXCEPTION_EXCEPTIONDATA_GPU_INSTANCE (&prb_fields_gr_exception_exceptiondata[0]) +#define GR_EXCEPTION_EXCEPTIONDATA_CH_ID (&prb_fields_gr_exception_exceptiondata[1]) +#define GR_EXCEPTION_EXCEPTIONDATA_ERROR_SEQUENCE_NUMBER (&prb_fields_gr_exception_exceptiondata[2]) +#define GR_EXCEPTION_EXCEPTIONDATA_TIME_STAMP (&prb_fields_gr_exception_exceptiondata[3]) + +// 'ExceptionData' field lengths +#define GR_EXCEPTION_EXCEPTIONDATA_GPU_INSTANCE_LEN 5 +#define GR_EXCEPTION_EXCEPTIONDATA_CH_ID_LEN 5 +#define GR_EXCEPTION_EXCEPTIONDATA_ERROR_SEQUENCE_NUMBER_LEN 5 +#define GR_EXCEPTION_EXCEPTIONDATA_TIME_STAMP_LEN 10 + +extern const PRB_SERVICE_DESC prb_services_gr[]; + +// Service descriptor pointers + +#endif // G_GR_PB_H__ diff --git a/src/nvidia/generated/g_hal.h b/src/nvidia/generated/g_hal.h new file mode 100644 index 000000000..2671c56f5 --- /dev/null +++ b/src/nvidia/generated/g_hal.h @@ -0,0 +1,153 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// HAL support for use in HAL setup +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_hal.h +// + +#ifndef _G_RMCFG_HAL_H_ +#define _G_RMCFG_HAL_H_ + + +typedef struct DISP_HAL_IFACES *PDISP_HAL_IFACES; +typedef struct DPU_HAL_IFACES *PDPU_HAL_IFACES; +typedef struct GPIO_HAL_IFACES *PGPIO_HAL_IFACES; +typedef struct RPC_HAL_IFACES *PRPC_HAL_IFACES; +typedef struct RPCSTRUCTURECOPY_HAL_IFACES *PRPCSTRUCTURECOPY_HAL_IFACES; + + + +// +// per-GPU list of function ptrs to setup iface for each engine +// + +typedef struct { + + void (*rpcHalIfacesSetupFn)(PRPC_HAL_IFACES pRpcHal); + +} HAL_IFACE_SETUP, *PHAL_IFACE_SETUP; + + + +// +// IP_VERSIONS support +// + +typedef struct IGRP_IP_VERSIONS_TABLE_INFO IGRP_IP_VERSIONS_TABLE_INFO; + +// generic form of Head_iGrp_ipVersions_getInfo typedef + +typedef NV_STATUS IGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *); +typedef void IGrp_ipVersions_install(IGRP_IP_VERSIONS_TABLE_INFO *); +typedef NV_STATUS IGrp_ipVersions_wrapup(IGRP_IP_VERSIONS_TABLE_INFO *); + +// a single inclusive version range +typedef struct { + NvU32 v0; + NvU32 v1; +} IGRP_IP_VERSION_RANGE; + + +typedef struct { + const IGRP_IP_VERSION_RANGE *pRanges; + NvU32 numRanges; + IGrp_ipVersions_install *ifacesInstallFn; +} IGRP_IP_VERSIONS_ENTRY; + + +struct IGRP_IP_VERSIONS_TABLE_INFO { + POBJGPU pGpu; + Dynamic *pDynamic; // eg: pBiff + + const IGRP_IP_VERSIONS_ENTRY *pTable; + NvU32 numEntries; + IGrp_ipVersions_wrapup *ifacesWrapupFn; // overrides and asserts +}; + +// HAL_IMPLEMENTATION enum +typedef enum +{ + HAL_IMPL_GF100, + HAL_IMPL_GF100B, + HAL_IMPL_GF104, + HAL_IMPL_GF104B, + HAL_IMPL_GF106, + HAL_IMPL_GF106B, + HAL_IMPL_GF108, + HAL_IMPL_GF110D, + HAL_IMPL_GF110, + HAL_IMPL_GF117, + HAL_IMPL_GF118, + HAL_IMPL_GF119, + HAL_IMPL_GF110F, + HAL_IMPL_GF110F2, + HAL_IMPL_GF110F3, + HAL_IMPL_GK104, + HAL_IMPL_GK106, + HAL_IMPL_GK107, + HAL_IMPL_GK20A, + HAL_IMPL_GK110, + HAL_IMPL_GK110B, + HAL_IMPL_GK110C, + HAL_IMPL_GK208, + HAL_IMPL_GK208S, + HAL_IMPL_GM107, + HAL_IMPL_GM108, + HAL_IMPL_GM200, + HAL_IMPL_GM204, + HAL_IMPL_GM206, + HAL_IMPL_GP100, + HAL_IMPL_GP102, + HAL_IMPL_GP104, + HAL_IMPL_GP106, + HAL_IMPL_GP107, + HAL_IMPL_GP108, + HAL_IMPL_GV100, + HAL_IMPL_GV11B, + HAL_IMPL_TU102, + HAL_IMPL_TU104, + HAL_IMPL_TU106, + HAL_IMPL_TU116, + HAL_IMPL_TU117, + HAL_IMPL_GA100, + HAL_IMPL_GA102, + HAL_IMPL_GA103, + HAL_IMPL_GA104, + HAL_IMPL_GA106, + HAL_IMPL_GA107, + HAL_IMPL_GA10B, + HAL_IMPL_GA102F, + HAL_IMPL_T001_FERMI_NOT_EXIST, + HAL_IMPL_T124, + HAL_IMPL_T132, + HAL_IMPL_T210, + HAL_IMPL_T186, + HAL_IMPL_T194, + HAL_IMPL_T002_TURING_NOT_EXIST, + HAL_IMPL_T234, + HAL_IMPL_T234D, + HAL_IMPL_AMODEL, + + HAL_IMPL_MAXIMUM, // NOTE: this symbol must be at the end of the enum list. + // It is used to allocate arrays and control loop iterations. +} HAL_IMPLEMENTATION; + +// +// HAL implementation names for debug & logging use +// +#define HAL_IMPL_NAME_LIST \ + { HAL_IMPL_TU102, "TU102" }, \ + { HAL_IMPL_TU104, "TU104" }, \ + { HAL_IMPL_TU106, "TU106" }, \ + { HAL_IMPL_TU116, "TU116" }, \ + { HAL_IMPL_TU117, "TU117" }, \ + { HAL_IMPL_GA100, "GA100" }, \ + { HAL_IMPL_GA102, "GA102" }, \ + { HAL_IMPL_GA103, "GA103" }, \ + { HAL_IMPL_GA104, "GA104" }, \ + { HAL_IMPL_GA106, "GA106" }, \ + { HAL_IMPL_GA107, "GA107" } + + +#endif // _G_RMCFG_HAL_H_ diff --git a/src/nvidia/generated/g_hal_archimpl.h b/src/nvidia/generated/g_hal_archimpl.h new file mode 100644 index 000000000..5c6d8f38b --- /dev/null +++ b/src/nvidia/generated/g_hal_archimpl.h @@ -0,0 +1,94 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Hal registration entry points. +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_hal_archimpl.h +// +// Chips: TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +// + +#ifndef _G_RMCFG_HAL_ARCHIMPL_H_ +#define _G_RMCFG_HAL_ARCHIMPL_H_ + +#include "g_hal.h" + +// OpenRM for Tegra build uses different include path +// The following lines refer to the same file. +// TODO: merge them +#include "published/nv_ref.h" + +// +// CHIPID array Implementation +// +const struct ChipID +{ + NvU32 arch; + NvU32 impl; + NvU32 hidrev; +} chipID[] = { + { 0x0, 0x0, 0x0 } , // GF100 (disabled) + { 0x0, 0x0, 0x0 } , // GF100B (disabled) + { 0x0, 0x0, 0x0 } , // GF104 (disabled) + { 0x0, 0x0, 0x0 } , // GF104B (disabled) + { 0x0, 0x0, 0x0 } , // GF106 (disabled) + { 0x0, 0x0, 0x0 } , // GF106B (disabled) + { 0x0, 0x0, 0x0 } , // GF108 (disabled) + { 0x0, 0x0, 0x0 } , // GF110D (disabled) + { 0x0, 0x0, 0x0 } , // GF110 (disabled) + { 0x0, 0x0, 0x0 } , // GF117 (disabled) + { 0x0, 0x0, 0x0 } , // GF118 (disabled) + { 0x0, 0x0, 0x0 } , // GF119 (disabled) + { 0x0, 0x0, 0x0 } , // GF110F (disabled) + { 0x0, 0x0, 0x0 } , // GF110F2 (disabled) + { 0x0, 0x0, 0x0 } , // GF110F3 (disabled) + { 0x0, 0x0, 0x0 } , // GK104 (disabled) + { 0x0, 0x0, 0x0 } , // GK106 (disabled) + { 0x0, 0x0, 0x0 } , // GK107 (disabled) + { 0x0, 0x0, 0x0 } , // GK20A (disabled) + { 0x0, 0x0, 0x0 } , // GK110 (disabled) + { 0x0, 0x0, 0x0 } , // GK110B (disabled) + { 0x0, 0x0, 0x0 } , // GK110C (disabled) + { 0x0, 0x0, 0x0 } , // GK208 (disabled) + { 0x0, 0x0, 0x0 } , // GK208S (disabled) + { 0x0, 0x0, 0x0 } , // GM107 (disabled) + { 0x0, 0x0, 0x0 } , // GM108 (disabled) + { 0x0, 0x0, 0x0 } , // GM200 (disabled) + { 0x0, 0x0, 0x0 } , // GM204 (disabled) + { 0x0, 0x0, 0x0 } , // GM206 (disabled) + { 0x0, 0x0, 0x0 } , // GP100 (disabled) + { 0x0, 0x0, 0x0 } , // GP102 (disabled) + { 0x0, 0x0, 0x0 } , // GP104 (disabled) + { 0x0, 0x0, 0x0 } , // GP106 (disabled) + { 0x0, 0x0, 0x0 } , // GP107 (disabled) + { 0x0, 0x0, 0x0 } , // GP108 (disabled) + { 0x0, 0x0, 0x0 } , // GV100 (disabled) + { 0x0, 0x0, 0x0 } , // GV11B (disabled) + { NV_PMC_BOOT_0_ARCHITECTURE_TU100, NV_PMC_BOOT_0_IMPLEMENTATION_2, 0x0 } , // TU102 + { NV_PMC_BOOT_0_ARCHITECTURE_TU100, NV_PMC_BOOT_0_IMPLEMENTATION_4, 0x0 } , // TU104 + { NV_PMC_BOOT_0_ARCHITECTURE_TU100, NV_PMC_BOOT_0_IMPLEMENTATION_6, 0x0 } , // TU106 + { NV_PMC_BOOT_0_ARCHITECTURE_TU110, NV_PMC_BOOT_0_IMPLEMENTATION_8, 0x0 } , // TU116 + { NV_PMC_BOOT_0_ARCHITECTURE_TU110, NV_PMC_BOOT_0_IMPLEMENTATION_7, 0x0 } , // TU117 + { NV_PMC_BOOT_0_ARCHITECTURE_GA100, NV_PMC_BOOT_0_IMPLEMENTATION_0, 0x0 } , // GA100 + { NV_PMC_BOOT_0_ARCHITECTURE_GA100, NV_PMC_BOOT_0_IMPLEMENTATION_2, 0x0 } , // GA102 + { NV_PMC_BOOT_0_ARCHITECTURE_GA100, NV_PMC_BOOT_0_IMPLEMENTATION_3, 0x0 } , // GA103 + { NV_PMC_BOOT_0_ARCHITECTURE_GA100, NV_PMC_BOOT_0_IMPLEMENTATION_4, 0x0 } , // GA104 + { NV_PMC_BOOT_0_ARCHITECTURE_GA100, NV_PMC_BOOT_0_IMPLEMENTATION_6, 0x0 } , // GA106 + { NV_PMC_BOOT_0_ARCHITECTURE_GA100, NV_PMC_BOOT_0_IMPLEMENTATION_7, 0x0 } , // GA107 + { 0x0, 0x0, 0x0 } , // GA10B (disabled) + { 0x0, 0x0, 0x0 } , // GA102F (disabled) + { 0x0, 0x0, 0x0 } , // T001_FERMI_NOT_EXIST (disabled) + { 0x0, 0x0, 0x0 } , // T124 (disabled) + { 0x0, 0x0, 0x0 } , // T132 (disabled) + { 0x0, 0x0, 0x0 } , // T210 (disabled) + { 0x0, 0x0, 0x0 } , // T186 (disabled) + { 0x0, 0x0, 0x0 } , // T194 (disabled) + { 0x0, 0x0, 0x0 } , // T002_TURING_NOT_EXIST (disabled) + { 0x0, 0x0, 0x0 } , // T234 (disabled) + { 0x0, 0x0, 0x0 } , // T234D (disabled) + { 0x0, 0x0, 0x0 } , // AMODEL (disabled) + +}; + +#endif // _G_RMCFG_HAL_ARCHIMPL_H_ + diff --git a/src/nvidia/generated/g_hal_mgr_nvoc.c b/src/nvidia/generated/g_hal_mgr_nvoc.c new file mode 100644 index 000000000..b2e449098 --- /dev/null +++ b/src/nvidia/generated/g_hal_mgr_nvoc.c @@ -0,0 +1,154 @@ +#define NVOC_HAL_MGR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_hal_mgr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xbf26de = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJHALMGR(OBJHALMGR*); +void __nvoc_init_funcTable_OBJHALMGR(OBJHALMGR*); +NV_STATUS __nvoc_ctor_OBJHALMGR(OBJHALMGR*); +void __nvoc_init_dataField_OBJHALMGR(OBJHALMGR*); +void __nvoc_dtor_OBJHALMGR(OBJHALMGR*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHALMGR; + +static const struct NVOC_RTTI __nvoc_rtti_OBJHALMGR_OBJHALMGR = { + /*pClassDef=*/ &__nvoc_class_def_OBJHALMGR, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHALMGR, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJHALMGR_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJHALMGR, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJHALMGR = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJHALMGR_OBJHALMGR, + &__nvoc_rtti_OBJHALMGR_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJHALMGR), + /*classId=*/ classId(OBJHALMGR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJHALMGR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJHALMGR, + /*pCastInfo=*/ &__nvoc_castinfo_OBJHALMGR, + /*pExportInfo=*/ &__nvoc_export_info_OBJHALMGR +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHALMGR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJHALMGR(OBJHALMGR *pThis) { + __nvoc_halmgrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJHALMGR(OBJHALMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJHALMGR(OBJHALMGR *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJHALMGR_fail_Object; + __nvoc_init_dataField_OBJHALMGR(pThis); + + status = __nvoc_halmgrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJHALMGR_fail__init; + goto __nvoc_ctor_OBJHALMGR_exit; // Success + +__nvoc_ctor_OBJHALMGR_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJHALMGR_fail_Object: +__nvoc_ctor_OBJHALMGR_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJHALMGR_1(OBJHALMGR *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJHALMGR(OBJHALMGR *pThis) { + __nvoc_init_funcTable_OBJHALMGR_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJHALMGR(OBJHALMGR *pThis) { + pThis->__nvoc_pbase_OBJHALMGR = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJHALMGR(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJHALMGR(OBJHALMGR **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJHALMGR *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJHALMGR)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJHALMGR)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJHALMGR); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJHALMGR(pThis); + status = __nvoc_ctor_OBJHALMGR(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJHALMGR_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJHALMGR_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJHALMGR(OBJHALMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJHALMGR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_hal_mgr_nvoc.h b/src/nvidia/generated/g_hal_mgr_nvoc.h new file mode 100644 index 000000000..30f2cf43b --- /dev/null +++ b/src/nvidia/generated/g_hal_mgr_nvoc.h @@ -0,0 +1,139 @@ +#ifndef _G_HAL_MGR_NVOC_H_ +#define _G_HAL_MGR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_hal_mgr_nvoc.h" + +#ifndef _HAL_MGR_H_ +#define _HAL_MGR_H_ + +#include "core/core.h" +#include "core/info_block.h" +#include "core/hal.h" + +#define HALMGR_GET_HAL(p, halid) halmgrGetHal((p), halid) + +typedef struct OBJHALMGR *POBJHALMGR; + +#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +typedef struct OBJHALMGR OBJHALMGR; +#endif /* __NVOC_CLASS_OBJHALMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHALMGR +#define __nvoc_class_id_OBJHALMGR 0xbf26de +#endif /* __nvoc_class_id_OBJHALMGR */ + + + +#ifdef NVOC_HAL_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJHALMGR { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJHALMGR *__nvoc_pbase_OBJHALMGR; + struct OBJHAL *pHalList[60]; +}; + +#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +typedef struct OBJHALMGR OBJHALMGR; +#endif /* __NVOC_CLASS_OBJHALMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHALMGR +#define __nvoc_class_id_OBJHALMGR 0xbf26de +#endif /* __nvoc_class_id_OBJHALMGR */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR; + +#define __staticCast_OBJHALMGR(pThis) \ + ((pThis)->__nvoc_pbase_OBJHALMGR) + +#ifdef __nvoc_hal_mgr_h_disabled +#define __dynamicCast_OBJHALMGR(pThis) ((OBJHALMGR*)NULL) +#else //__nvoc_hal_mgr_h_disabled +#define __dynamicCast_OBJHALMGR(pThis) \ + ((OBJHALMGR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHALMGR))) +#endif //__nvoc_hal_mgr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJHALMGR(OBJHALMGR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJHALMGR(OBJHALMGR**, Dynamic*, NvU32); +#define __objCreate_OBJHALMGR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJHALMGR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS halmgrConstruct_IMPL(struct OBJHALMGR *arg_); +#define __nvoc_halmgrConstruct(arg_) halmgrConstruct_IMPL(arg_) +void halmgrDestruct_IMPL(struct OBJHALMGR *arg0); +#define __nvoc_halmgrDestruct(arg0) halmgrDestruct_IMPL(arg0) +NV_STATUS halmgrCreateHal_IMPL(struct OBJHALMGR *arg0, NvU32 arg1); +#ifdef __nvoc_hal_mgr_h_disabled +static inline NV_STATUS halmgrCreateHal(struct OBJHALMGR *arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_hal_mgr_h_disabled +#define halmgrCreateHal(arg0, arg1) halmgrCreateHal_IMPL(arg0, arg1) +#endif //__nvoc_hal_mgr_h_disabled + +NV_STATUS halmgrGetHalForGpu_IMPL(struct OBJHALMGR *arg0, NvU32 arg1, NvU32 arg2, NvU32 *arg3); +#ifdef __nvoc_hal_mgr_h_disabled +static inline NV_STATUS halmgrGetHalForGpu(struct OBJHALMGR *arg0, NvU32 arg1, NvU32 arg2, NvU32 *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_hal_mgr_h_disabled +#define halmgrGetHalForGpu(arg0, arg1, arg2, arg3) halmgrGetHalForGpu_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_hal_mgr_h_disabled + +struct OBJHAL *halmgrGetHal_IMPL(struct OBJHALMGR *arg0, NvU32 arg1); +#ifdef __nvoc_hal_mgr_h_disabled +static inline struct OBJHAL *halmgrGetHal(struct OBJHALMGR *arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJHALMGR was disabled!"); + return NULL; +} +#else //__nvoc_hal_mgr_h_disabled +#define halmgrGetHal(arg0, arg1) halmgrGetHal_IMPL(arg0, arg1) +#endif //__nvoc_hal_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HAL_MGR_NVOC_H_ diff --git a/src/nvidia/generated/g_hal_nvoc.c b/src/nvidia/generated/g_hal_nvoc.c new file mode 100644 index 000000000..2cdd8ef74 --- /dev/null +++ b/src/nvidia/generated/g_hal_nvoc.c @@ -0,0 +1,148 @@ +#define NVOC_HAL_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_hal_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xe803b6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJHAL(OBJHAL*); +void __nvoc_init_funcTable_OBJHAL(OBJHAL*); +NV_STATUS __nvoc_ctor_OBJHAL(OBJHAL*); +void __nvoc_init_dataField_OBJHAL(OBJHAL*); +void __nvoc_dtor_OBJHAL(OBJHAL*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHAL; + +static const struct NVOC_RTTI __nvoc_rtti_OBJHAL_OBJHAL = { + /*pClassDef=*/ &__nvoc_class_def_OBJHAL, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHAL, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJHAL_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJHAL, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJHAL = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJHAL_OBJHAL, + &__nvoc_rtti_OBJHAL_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJHAL), + /*classId=*/ classId(OBJHAL), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJHAL", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJHAL, + /*pCastInfo=*/ &__nvoc_castinfo_OBJHAL, + /*pExportInfo=*/ &__nvoc_export_info_OBJHAL +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHAL = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJHAL(OBJHAL *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJHAL(OBJHAL *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJHAL(OBJHAL *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJHAL_fail_Object; + __nvoc_init_dataField_OBJHAL(pThis); + goto __nvoc_ctor_OBJHAL_exit; // Success + +__nvoc_ctor_OBJHAL_fail_Object: +__nvoc_ctor_OBJHAL_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJHAL_1(OBJHAL *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJHAL(OBJHAL *pThis) { + __nvoc_init_funcTable_OBJHAL_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJHAL(OBJHAL *pThis) { + pThis->__nvoc_pbase_OBJHAL = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJHAL(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJHAL(OBJHAL **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJHAL *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJHAL)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJHAL)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJHAL); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJHAL(pThis); + status = __nvoc_ctor_OBJHAL(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJHAL_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJHAL_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJHAL(OBJHAL **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJHAL(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_hal_nvoc.h b/src/nvidia/generated/g_hal_nvoc.h new file mode 100644 index 000000000..4fa2daa8d --- /dev/null +++ b/src/nvidia/generated/g_hal_nvoc.h @@ -0,0 +1,146 @@ +#ifndef _G_HAL_NVOC_H_ +#define _G_HAL_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_hal_nvoc.h" + +#ifndef _OBJHAL_H_ +#define _OBJHAL_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: hal.h * +* Defines and structures used for the HAL Object. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "core/info_block.h" + +// +// HAL Info Block Id: +// +// 31 7 0 +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | 24 bits | 8 bits | +// .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// Info ID # Impl +// +// Impl: The hal implementation +// Info ID number: unique id for a particular info type +// +#define MKHALINFOID(impl,infoId) (((infoId & 0xffffff) << 8) | (impl & 0xff)) + +typedef struct MODULEDESCRIPTOR MODULEDESCRIPTOR, *PMODULEDESCRIPTOR; + +struct MODULEDESCRIPTOR { + + // (rmconfig) per-obj function ptr to init hal interfaces + const HAL_IFACE_SETUP *pHalSetIfaces; +}; + +typedef struct OBJHAL *POBJHAL; + +#ifndef __NVOC_CLASS_OBJHAL_TYPEDEF__ +#define __NVOC_CLASS_OBJHAL_TYPEDEF__ +typedef struct OBJHAL OBJHAL; +#endif /* __NVOC_CLASS_OBJHAL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHAL +#define __nvoc_class_id_OBJHAL 0xe803b6 +#endif /* __nvoc_class_id_OBJHAL */ + + +#ifdef NVOC_HAL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJHAL { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJHAL *__nvoc_pbase_OBJHAL; + struct MODULEDESCRIPTOR moduleDescriptor; +}; + +#ifndef __NVOC_CLASS_OBJHAL_TYPEDEF__ +#define __NVOC_CLASS_OBJHAL_TYPEDEF__ +typedef struct OBJHAL OBJHAL; +#endif /* __NVOC_CLASS_OBJHAL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHAL +#define __nvoc_class_id_OBJHAL 0xe803b6 +#endif /* __nvoc_class_id_OBJHAL */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL; + +#define __staticCast_OBJHAL(pThis) \ + ((pThis)->__nvoc_pbase_OBJHAL) + +#ifdef __nvoc_hal_h_disabled +#define __dynamicCast_OBJHAL(pThis) ((OBJHAL*)NULL) +#else //__nvoc_hal_h_disabled +#define __dynamicCast_OBJHAL(pThis) \ + ((OBJHAL*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHAL))) +#endif //__nvoc_hal_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJHAL(OBJHAL**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJHAL(OBJHAL**, Dynamic*, NvU32); +#define __objCreate_OBJHAL(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJHAL((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +PMODULEDESCRIPTOR objhalGetModuleDescriptor_IMPL(struct OBJHAL *pHal); +#ifdef __nvoc_hal_h_disabled +static inline PMODULEDESCRIPTOR objhalGetModuleDescriptor(struct OBJHAL *pHal) { + NV_ASSERT_FAILED_PRECOMP("OBJHAL was disabled!"); + return NULL; +} +#else //__nvoc_hal_h_disabled +#define objhalGetModuleDescriptor(pHal) objhalGetModuleDescriptor_IMPL(pHal) +#endif //__nvoc_hal_h_disabled + +#undef PRIVATE_FIELD + + +//-------------------------------------------------------------------- +// RM routines. +//-------------------------------------------------------------------- + +NV_STATUS ipVersionsSetupHal(struct OBJGPU *, void *pDynamic, IGrp_ipVersions_getInfo getInfoFn); + +#endif // _OBJHAL_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HAL_NVOC_H_ diff --git a/src/nvidia/generated/g_hal_private.h b/src/nvidia/generated/g_hal_private.h new file mode 100644 index 000000000..2ba33973b --- /dev/null +++ b/src/nvidia/generated/g_hal_private.h @@ -0,0 +1,233 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Private HAL support for halgen. +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_hal_private.h +// +// Chips: TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +// + +// +// This file is included in several .c files for chips hal register and engines +// hal function assignment. The macros RMCFG_ENGINE_SETUP and RMCFG_HAL_SETUP_xxx +// are used to provide different content for those .c files. +// + +#ifndef _G_RMCFG_HAL_PRIVATE_H_ +#define _G_RMCFG_HAL_PRIVATE_H_ + +#include "g_hal.h" + +// establish the per-chip RMCFG_HAL_SETUP_chip #defines as needed. +#if defined(RMCFG_ENGINE_SETUP) + +// setup all enabled chip families +#if defined(RMCFG_HAL_SETUP_ALL) +# define RMCFG_HAL_SETUP_TU10X 1 +# define RMCFG_HAL_SETUP_GA10X 1 +#endif // RMCFG_HAL_SETUP_ALL + +// +// setup all enabled chips in each enabled family +// + +#if defined(RMCFG_HAL_SETUP_TU10X) +# define RMCFG_HAL_SETUP_TU102 1 +# define RMCFG_HAL_SETUP_TU104 1 +# define RMCFG_HAL_SETUP_TU106 1 +# define RMCFG_HAL_SETUP_TU116 1 +# define RMCFG_HAL_SETUP_TU117 1 +#endif // TU10X + +#if defined(RMCFG_HAL_SETUP_GA10X) +# define RMCFG_HAL_SETUP_GA100 1 +# define RMCFG_HAL_SETUP_GA102 1 +# define RMCFG_HAL_SETUP_GA103 1 +# define RMCFG_HAL_SETUP_GA104 1 +# define RMCFG_HAL_SETUP_GA106 1 +# define RMCFG_HAL_SETUP_GA107 1 +#endif // GA10X + +#endif // RMCFG_ENGINE_SETUP + +// pull in private headers for each engine +#include "g_os_private.h" +#include "g_rpc_private.h" + + +// +// per-GPU structure with an interface init function for each engine +// + +// registerHalModule function declaration +NV_STATUS registerHalModule(NvU32, const HAL_IFACE_SETUP *); + +#if defined(RMCFG_HAL_SETUP_TU102) + +static const HAL_IFACE_SETUP halIface_TU102 = { + + rpcHalIfacesSetup_TU102, + +}; + +NV_STATUS registerHalModule_TU102(void) +{ + return registerHalModule(HAL_IMPL_TU102, &halIface_TU102); +} + +#endif // TU10X or TU102 + +#if defined(RMCFG_HAL_SETUP_TU104) + +static const HAL_IFACE_SETUP halIface_TU104 = { + + rpcHalIfacesSetup_TU104, + +}; + +NV_STATUS registerHalModule_TU104(void) +{ + return registerHalModule(HAL_IMPL_TU104, &halIface_TU104); +} + +#endif // TU10X or TU104 + +#if defined(RMCFG_HAL_SETUP_TU106) + +static const HAL_IFACE_SETUP halIface_TU106 = { + + rpcHalIfacesSetup_TU106, + +}; + +NV_STATUS registerHalModule_TU106(void) +{ + return registerHalModule(HAL_IMPL_TU106, &halIface_TU106); +} + +#endif // TU10X or TU106 + +#if defined(RMCFG_HAL_SETUP_TU116) + +static const HAL_IFACE_SETUP halIface_TU116 = { + + rpcHalIfacesSetup_TU116, + +}; + +NV_STATUS registerHalModule_TU116(void) +{ + return registerHalModule(HAL_IMPL_TU116, &halIface_TU116); +} + +#endif // TU10X or TU116 + +#if defined(RMCFG_HAL_SETUP_TU117) + +static const HAL_IFACE_SETUP halIface_TU117 = { + + rpcHalIfacesSetup_TU117, + +}; + +NV_STATUS registerHalModule_TU117(void) +{ + return registerHalModule(HAL_IMPL_TU117, &halIface_TU117); +} + +#endif // TU10X or TU117 + +#if defined(RMCFG_HAL_SETUP_GA100) + +static const HAL_IFACE_SETUP halIface_GA100 = { + + rpcHalIfacesSetup_GA100, + +}; + +NV_STATUS registerHalModule_GA100(void) +{ + return registerHalModule(HAL_IMPL_GA100, &halIface_GA100); +} + +#endif // GA10X or GA100 + +#if defined(RMCFG_HAL_SETUP_GA102) + +static const HAL_IFACE_SETUP halIface_GA102 = { + + rpcHalIfacesSetup_GA102, + +}; + +NV_STATUS registerHalModule_GA102(void) +{ + return registerHalModule(HAL_IMPL_GA102, &halIface_GA102); +} + +#endif // GA10X or GA102 + +#if defined(RMCFG_HAL_SETUP_GA103) + +static const HAL_IFACE_SETUP halIface_GA103 = { + + rpcHalIfacesSetup_GA103, + +}; + +NV_STATUS registerHalModule_GA103(void) +{ + return registerHalModule(HAL_IMPL_GA103, &halIface_GA103); +} + +#endif // GA10X or GA103 + +#if defined(RMCFG_HAL_SETUP_GA104) + +static const HAL_IFACE_SETUP halIface_GA104 = { + + rpcHalIfacesSetup_GA104, + +}; + +NV_STATUS registerHalModule_GA104(void) +{ + return registerHalModule(HAL_IMPL_GA104, &halIface_GA104); +} + +#endif // GA10X or GA104 + +#if defined(RMCFG_HAL_SETUP_GA106) + +static const HAL_IFACE_SETUP halIface_GA106 = { + + rpcHalIfacesSetup_GA106, + +}; + +NV_STATUS registerHalModule_GA106(void) +{ + return registerHalModule(HAL_IMPL_GA106, &halIface_GA106); +} + +#endif // GA10X or GA106 + +#if defined(RMCFG_HAL_SETUP_GA107) + +static const HAL_IFACE_SETUP halIface_GA107 = { + + rpcHalIfacesSetup_GA107, + +}; + +NV_STATUS registerHalModule_GA107(void) +{ + return registerHalModule(HAL_IMPL_GA107, &halIface_GA107); +} + +#endif // GA10X or GA107 + + + +#endif // _G_RMCFG_HAL_PRIVATE_H_ diff --git a/src/nvidia/generated/g_hal_register.h b/src/nvidia/generated/g_hal_register.h new file mode 100644 index 000000000..b3b4d49c0 --- /dev/null +++ b/src/nvidia/generated/g_hal_register.h @@ -0,0 +1,115 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Hal registration entry points. +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_hal_register.h +// +// Chips: TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +// + +#ifndef _G_RMCFG_HAL_REGISTER_H_ +#define _G_RMCFG_HAL_REGISTER_H_ + +// +// per-family HAL registration entry points +// + + +NV_STATUS registerHalModule_TU102(void); +NV_STATUS registerHalModule_TU104(void); +NV_STATUS registerHalModule_TU106(void); +NV_STATUS registerHalModule_TU116(void); +NV_STATUS registerHalModule_TU117(void); + +static NV_STATUS NV_INLINE REGISTER_TU10X_HALS(void) +{ + NV_STATUS rmStatus; + + rmStatus = registerHalModule_TU102(); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = registerHalModule_TU104(); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = registerHalModule_TU106(); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = registerHalModule_TU116(); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = registerHalModule_TU117(); + if (rmStatus != NV_OK) + return rmStatus; + + return NV_OK; +} + +NV_STATUS registerHalModule_GA100(void); +NV_STATUS registerHalModule_GA102(void); +NV_STATUS registerHalModule_GA103(void); +NV_STATUS registerHalModule_GA104(void); +NV_STATUS registerHalModule_GA106(void); +NV_STATUS registerHalModule_GA107(void); + +static NV_STATUS NV_INLINE REGISTER_GA10X_HALS(void) +{ + NV_STATUS rmStatus; + + rmStatus = registerHalModule_GA100(); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = registerHalModule_GA102(); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = registerHalModule_GA103(); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = registerHalModule_GA104(); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = registerHalModule_GA106(); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = registerHalModule_GA107(); + if (rmStatus != NV_OK) + return rmStatus; + + return NV_OK; +} + +// +// This routine can be used by platform dependent code to +// enable all HAL modules. +// +static NV_STATUS NV_INLINE REGISTER_ALL_HALS(void) +{ + NV_STATUS rmStatus; + + rmStatus = REGISTER_TU10X_HALS(); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + rmStatus = REGISTER_GA10X_HALS(); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + return NV_OK; +} + + + +#endif // _G_RMCFG_HAL_REGISTER_H_ diff --git a/src/nvidia/generated/g_hal_stubs.h b/src/nvidia/generated/g_hal_stubs.h new file mode 100644 index 000000000..6ab5f5cd9 --- /dev/null +++ b/src/nvidia/generated/g_hal_stubs.h @@ -0,0 +1,908 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// HAL stubs, generated by rmconfig. +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_hal_stubs.h +// +// Chips: TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +// + +#ifndef _G_RMCFG_HAL_STUBS_H_ +#define _G_RMCFG_HAL_STUBS_H_ + +// pull in private headers for each engine +#include "g_os_private.h" +#include "g_rpc_private.h" + + +#include "g_hal.h" + +// HACK: a global var unique to the ipVersions _UNASSIGNED routines to ensure this function +// is not aliased by link-time-optimizations with a _STUB fn that might actually +// be assigned to a hal method as that would break the _HAL_VERIFY_INTERFACE +// test. +NV_STATUS iGrp_ipVersions_UNIQUIFIER; + +// the "_UNASSIGNED" function for all IP_VERSIONS dynamic interfaces +NV_STATUS iGrp_ipVersions_UNASSIGNED(void) +{ + NV_ASSERT_PRECOMP(0 && "iGrp_ipVersions_UNASSIGNED"); + return NV_ERR_NOT_SUPPORTED + + iGrp_ipVersions_UNIQUIFIER; // will be 0 +} + +// +// generated _STUB functions +// + +// DISP:hal:IGRP_IP_VERSIONS_GET_INFO - DISP disabled +NV_STATUS disp_iGrp_ipVersions_getInfo_STUB( + IGRP_IP_VERSIONS_TABLE_INFO *pArg1 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// DPU:hal:IGRP_IP_VERSIONS_GET_INFO - DPU disabled +NV_STATUS dpu_iGrp_ipVersions_getInfo_STUB( + IGRP_IP_VERSIONS_TABLE_INFO *pArg1 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:GET_SWAP_READY_FUNC_FOR_PINSET - GPIO disabled +NvU32 gpioGetSwapReadyFuncForPinset_STUB( + NvU32 pinset +) +{ + NV_ASSERT_PRECOMP(0 && "gpioGetSwapReadyFuncForPinset_STUB() GPIO: HAL_INTERFACES: GET_SWAP_READY_FUNC_FOR_PINSET"); + return (NvU32) 0; +} + +// GPIO:hal:GET_FEATURE_STATE_HAL - GPIO disabled +NV_STATUS gpioGetFeatureStateHal_STUB( + POBJGPIO pGpio, + NvU32 function, + NvU32 feature, + NvBool *bState +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:SET_FEATURE_STATE_HAL - GPIO disabled +NV_STATUS gpioSetFeatureStateHal_STUB( + POBJGPIO pGpio, + NvU32 function, + NvU32 feature, + NvBool bState +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:INTERRUPT_PENDING - GPIO disabled +NvBool gpioInterruptPending_STUB( + POBJGPIO pGpio +) +{ + return NV_FALSE; +} + +// GPIO:hal:DISABLE_INTERRUPTS - GPIO disabled +void gpioDisableInterrupts_STUB( + POBJGPIO pGpio +) +{ +} + +// GPIO:hal:CLEAR_INTERRUPTS - GPIO disabled +void gpioClearInterrupts_STUB( + POBJGPIO pGpio +) +{ +} + +// GPIO:hal:READ_INTERRUPT_STATUS - GPIO disabled +void gpioReadInterruptStatus_STUB( + POBJGPIO pGpio, + NvU64 *intrStatus +) +{ +} + +// GPIO:hal:SERVICE_EVENT - GPIO disabled +NV_STATUS gpioServiceEvent_STUB( + POBJGPIO pGpio, + NvU64 *intrStatus +) +{ + return NV_OK; +} + +// GPIO:hal:GET_INTERRUPT_HAL - GPIO disabled +NvBool gpioGetInterruptHal_STUB( + POBJGPIO pGpio, + NvU32 gpioFunc, + NvU32 direction +) +{ + return NV_FALSE; +} + +// GPIO:hal:SET_INTERRUPT_HAL - GPIO disabled +NV_STATUS gpioSetInterruptHal_STUB( + POBJGPIO pGpio, + NvU32 gpioFunc, + NvU32 direction, + NvU32 enable +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:GET_INTERRUPT_ENABLE_HAL - GPIO disabled +NvBool gpioGetInterruptEnableHal_STUB( + POBJGPIO pGpio, + NvU32 gpioFunc, + NvU32 direction +) +{ + return NV_FALSE; +} + +// GPIO:hal:INIT_HW - GPIO disabled +NV_STATUS gpioInitHw_STUB( + POBJGPIO pGpio +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:DESTROY_HW - GPIO disabled +NV_STATUS gpioDestroyHw_STUB( + POBJGPIO pGpio +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:UPDATE_AND_PROGRAM_LCD_GPIO_ENTRIES - GPIO disabled +void gpioUpdateAndProgramLcdGpioEntries_STUB( + POBJGPIO pGpio, + NvU32 displayId, + NvBool bWriteHw +) +{ +} + +// GPIO:hal:GET_REGISTER_FOR_FUNCTION_HAL - GPIO disabled +void gpioGetRegisterForFunctionHal_STUB( + POBJGPIO pGpio, + NvU32 func, + NvU32 *reg, + NvU32 *oldValue, + NvU32 *value_1, + NvU32 *value_0 +) +{ +} + +// GPIO:hal:WRITE_HW_ENUM_HAL - GPIO disabled +NV_STATUS gpioWriteHwEnumHal_STUB( + POBJGPIO pGpio, + NvU32 function, + NvU8 outHwEnum +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:GET_EXCEPTION_DATA - GPIO disabled +NV_STATUS gpioGetExceptionData_STUB( + POBJGPIO pGpio +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:GET_REGISTER_AND_MASK_HAL - GPIO disabled +NV_STATUS gpioGetRegisterAndMaskHal_STUB( + POBJGPIO pGpio, + NvU32 Function, + NvU32 State, + NvU32 *Register, + NvU32 *Mask, + NvU32 *Value +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:GET_TRIGGER_REGISTER_AND_MASK_HAL - GPIO disabled +NV_STATUS gpioGetTriggerRegisterAndMaskHal_STUB( + POBJGPIO pGpio, + NvU32 *pRegAddr, + NvU32 *pAndMask, + NvU32 *pOrMask, + NvBool bDone +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:GET_PIN_COUNT - GPIO disabled +NvU32 gpioGetPinCount_STUB( + POBJGPIO pGpio +) +{ + return (NvU32) 0; +} + +// GPIO:hal:DUMP_RC_ERROR_REGS - GPIO disabled +NV_STATUS gpioDumpRCErrorRegs_STUB( + POBJGPU pGpu, + POBJGPIO pGpio, + PRB_ENCODER *pArg3 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:INSERT_FRAME_LOCK_HEADER_LOCK_PIN_ENTRY - GPIO disabled +NV_STATUS gpioInsertFrameLockHeaderLockPinEntry_STUB( + POBJGPIO pGpio, + NvU32 function, + NvBool bIsFrameLockHeaderLockPin +) +{ + return NV_OK; +} + +// GPIO:hal:GET_FRAME_LOCK_HEADER_LOCK_PINS - GPIO disabled +NV_STATUS gpioGetFrameLockHeaderLockPins_STUB( + POBJGPIO pGpio, + NvU32 *pFrameLockPin, + NvU32 *pRasterLockPin, + NvU32 *pFlipLockPin +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:PIN_WRITE_FUNCTION_STATUS - GPIO disabled +NV_STATUS gpioPinWriteFunctionStatus_STUB( + POBJGPIO pGpio, + NvU32 gpioFunc, + NvU32 pin, + NvBool bEnabled +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:ACTIVATE_HW_FUNCTION_HAL - GPIO disabled +NV_STATUS gpioActivateHwFunctionHal_STUB( + POBJGPIO pGpio, + NvU32 gpioFunc, + NvU32 pin +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:DEACTIVATE_HW_FUNCTION_HAL - GPIO disabled +NV_STATUS gpioDeactivateHwFunctionHal_STUB( + POBJGPIO pGpio, + NvU32 gpioFunc, + NvU32 pin +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:INIT_PIN_FEATURE_FLAG - GPIO disabled +NV_STATUS gpioInitPinFeatureFlag_STUB( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU8 outHwEnum, + NvU8 inHwEnum +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:PROGRAM_PIN - GPIO disabled +NV_STATUS gpioProgramPin_STUB( + POBJGPIO pGpio, + NvU32 gpioPinDCB, + NvU32 halIndex, + NvBool bTrigger +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:RM_PMU_SYNC_STATE_INIT_HAL - GPIO disabled +NV_STATUS gpioRmPmuSyncStateInitHal_STUB( + POBJGPIO pGpio +) +{ + return NV_OK; +} + +// GPIO:hal:FUNC_TO_SOR_IDX_HAL - GPIO disabled +NvU32 gpioFuncToSorIdxHal_STUB( + POBJGPIO pGpio, + POBJGPU pGpu, + NvU32 gpioFunc +) +{ + return NV_U32_MAX; +} + +// GPIO:hal:OVERRIDE_GPIO_WAR_FOR_BUG_1795624 - GPIO disabled +void gpioOverrideGpioWarForBug1795624_STUB( + POBJGPIO pGpio +) +{ +} + +// GPIO:hal:SET_SWAPRDY_FOR_BUG_200374184 - GPIO disabled +NV_STATUS gpioSetSwaprdyForBug200374184_STUB( + POBJGPU pGpu, + NvU32 swaprdyOutPin, + NvBool bEnable +) +{ + return NV_OK; +} + +// GPIO:hal:OUTPUT_TRIGGER_UPDATE_UC - GPIO disabled +NV_STATUS gpioOutputTriggerUpdateUC_STUB( + POBJGPU pGpu, + POBJGPIO pGpio +) +{ + return NV_OK; +} + +// GPIO:hal:OVERRIDE_GPIO_WAR_FOR_BUG_2701109 - GPIO disabled +void gpioOverrideGpioWarForBug2701109_STUB( + POBJGPU pGpu, + POBJGPIO pGpio +) +{ +} + +// GPIO:hal:INIT_SW - GPIO disabled +NV_STATUS gpioInitSw_STUB( + POBJGPIO pGpio +) +{ + return NV_OK; +} + +// GPIO:hal:DESTROY_SW - GPIO disabled +void gpioDestroySw_STUB( + POBJGPIO pGpio +) +{ +} + +// GPIO:hal:INIT_AND_GET_PIN_NUM - GPIO disabled +NvU32 gpioInitAndGetPinNum_STUB( + POBJGPIO pGpio, + NvU32 arg2, + NvU32 *pArg3 +) +{ + return (NvU32) 0; +} + +// GPIO:hal:OVERRIDE_GPIO_WAR_FOR_BUG_2561134 - GPIO disabled +void gpioOverrideGpioWarForBug2561134_STUB( + POBJGPU pGpu, + POBJGPIO pGpio +) +{ +} + +// GPIO:hal:SET_PROPERTIES_LIST - GPIO disabled +void gpioSetPropertiesList_STUB( + POBJGPU pGpu, + POBJGPIO pGpio +) +{ +} + +// GPIO:hal:READ_INPUT - GPIO disabled +NV_STATUS gpioReadInput_MISSING( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU32 halIndex, + NvU32 *pValue +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:PROGRAM_OUTPUT - GPIO disabled +void gpioProgramOutput_MISSING( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU32 value, + NvU32 halIndex +) +{ +} + +// GPIO:hal:READ_OUTPUT - GPIO disabled +NvBool gpioReadOutput_MISSING( + POBJGPIO pGpio, + NvU32 gpioPin +) +{ + return NV_FALSE; +} + +// GPIO:hal:PROGRAM_DIRECTION - GPIO disabled +void gpioProgramDirection_MISSING( + POBJGPIO pGpio, + NvU32 gpioPin, + NvBool input, + NvU32 halIndex +) +{ +} + +// GPIO:hal:READ_DIRECTION - GPIO disabled +NvBool gpioReadDirection_MISSING( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU32 halIndex +) +{ + return NV_FALSE; +} + +// GPIO:hal:INIT_DEFAULT_ENTRIES - GPIO disabled +void gpioInitDefaultEntries_MISSING( + POBJGPIO pGpio +) +{ +} + +// GPIO:hal:SET_STATE_LIST_HAL - GPIO disabled +void gpioSetStateListHal_MISSING( + POBJGPIO pGpio, + PGPIO_FUNC_LIST_ITEM pList, + NvU32 count +) +{ +} + +// GPIO:hal:IS_FEATURE_AVAILABLE_HAL - GPIO disabled +NvBool gpioIsFeatureAvailableHal_MISSING( + POBJGPIO pGpio, + NvU32 function, + NvU32 feature +) +{ + return NV_FALSE; +} + +// GPIO:hal:GET_PWM_CONTROL_HAL - GPIO disabled +NV_STATUS gpioGetPwmControlHal_MISSING( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU32 *pFlags, + NvU32 *pPeriod, + NvU32 *pDutyCycle, + NvU32 halIndex +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:SET_PWM_CONTROL_HAL - GPIO disabled +NV_STATUS gpioSetPwmControlHal_MISSING( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU32 *pFlags, + NvU32 *pPeriod, + NvU32 *pDutyCycle, + NvU32 halIndex, + NvBool bSkipPinInit +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:GET_PWM_PARAMETERS_HAL - GPIO disabled +NV_STATUS gpioGetPwmParametersHal_MISSING( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU32 *pMaxPeriod, + NvU32 halIndex +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:GET_FUNC_PWM_SENSE - GPIO disabled +NvU32 gpioGetFuncPwmSense_MISSING( + POBJGPIO pGpio, + NvU32 func +) +{ + return (NvU32) 0; +} + +// GPIO:hal:WRITE_PIN_HW_ENUM - GPIO disabled +NV_STATUS gpioWritePinHwEnum_MISSING( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU8 outHwEnum +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// GPIO:hal:READ_INPUT - GPIO disabled +NV_STATUS gpioReadInput_FWCLIENT( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU32 halIndex, + NvU32 *pValue +) +{ + return NV_OK; +} + +// GPIO:hal:PROGRAM_OUTPUT - GPIO disabled +void gpioProgramOutput_FWCLIENT( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU32 value, + NvU32 halIndex +) +{ +} + +// GPIO:hal:READ_OUTPUT - GPIO disabled +NvBool gpioReadOutput_FWCLIENT( + POBJGPIO pGpio, + NvU32 gpioPin +) +{ + return NV_FALSE; +} + +// GPIO:hal:PROGRAM_DIRECTION - GPIO disabled +void gpioProgramDirection_FWCLIENT( + POBJGPIO pGpio, + NvU32 gpioPin, + NvBool input, + NvU32 halIndex +) +{ +} + +// GPIO:hal:READ_DIRECTION - GPIO disabled +NvBool gpioReadDirection_FWCLIENT( + POBJGPIO pGpio, + NvU32 gpioPin, + NvU32 halIndex +) +{ + return NV_FALSE; +} + +// GPIO:hal:GET_INTERRUPT_HAL - GPIO disabled +NvBool gpioGetInterruptHal_FWCLIENT( + POBJGPIO pGpio, + NvU32 gpioFunc, + NvU32 direction +) +{ + return NV_FALSE; +} + +// GPIO:hal:SET_INTERRUPT_HAL - GPIO disabled +NV_STATUS gpioSetInterruptHal_FWCLIENT( + POBJGPIO pGpio, + NvU32 gpioFunc, + NvU32 direction, + NvU32 enable +) +{ + return NV_OK; +} + +// GPIO:hal:GET_EXCEPTION_DATA - GPIO disabled +NV_STATUS gpioGetExceptionData_FWCLIENT( + POBJGPIO pGpio +) +{ + return NV_OK; +} + +// GPIO:hal:INIT_SW - GPIO disabled +NV_STATUS gpioInitSw_FWCLIENT( + POBJGPIO pGpio +) +{ + return NV_OK; +} + +// GPIO:hal:DESTROY_SW - GPIO disabled +void gpioDestroySw_FWCLIENT( + POBJGPIO pGpio +) +{ +} + +// GPIO:hal:INIT_AND_GET_PIN_NUM - GPIO disabled +NvU32 gpioInitAndGetPinNum_FWCLIENT( + POBJGPIO pGpio, + NvU32 arg2, + NvU32 *pArg3 +) +{ + return (NvU32) 0; +} + +// RPC:hal:VGPU_PF_REG_READ32 - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcVgpuPfRegRead32_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + NvU64 arg3, + NvU32 *pArg4, + NvU32 arg5 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:DUMP_PROTOBUF_COMPONENT - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcDumpProtobufComponent_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState, + NVDUMP_COMPONENT component +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:ALLOC_MEMORY - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcAllocMemory_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + NvHandle arg3, + NvHandle arg4, + NvHandle arg5, + NvU32 arg6, + NvU32 arg7, + MEMORY_DESCRIPTOR *pArg8 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:GPU_EXEC_REG_OPS - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcGpuExecRegOps_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + NvHandle arg3, + NvHandle arg4, + NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS *pArg5, + NV2080_CTRL_GPU_REG_OP *pArg6 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:RMFS_INIT - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcRmfsInit_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + PMEMORY_DESCRIPTOR arg3 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:UNSET_PAGE_DIRECTORY - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcUnsetPageDirectory_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + NvHandle arg3, + NvHandle arg4, + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pArg5 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:GET_GSP_STATIC_INFO - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcGetGspStaticInfo_STUB( + POBJGPU pGpu, + POBJRPC pRpc +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:GSP_SET_SYSTEM_INFO - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcGspSetSystemInfo_STUB( + POBJGPU pGpu, + POBJRPC pRpc +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:RMFS_CLEANUP - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcRmfsCleanup_STUB( + POBJGPU pGpu, + POBJRPC pRpc +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:SET_PAGE_DIRECTORY - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcSetPageDirectory_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + NvHandle arg3, + NvHandle arg4, + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pArg5 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:UNLOADING_GUEST_DRIVER - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcUnloadingGuestDriver_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + NvBool arg3, + NvBool arg4, + NvU32 arg5 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:SET_REGISTRY - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcSetRegistry_STUB( + POBJGPU pGpu, + POBJRPC pRpc +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:RMFS_CLOSE_QUEUE - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcRmfsCloseQueue_STUB( + POBJGPU pGpu, + POBJRPC pRpc +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:GET_STATIC_INFO - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcGetStaticInfo_STUB( + POBJGPU pGpu, + POBJRPC pRpc +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:IDLE_CHANNELS - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcIdleChannels_STUB( + OBJGPU *pArg1, + POBJRPC pRpc, + NvHandle *phclients, + NvHandle *phdevices, + NvHandle *phchannels, + NvU32 nentries, + NvU32 flags, + NvU32 timeout +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:UPDATE_BAR_PDE - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcUpdateBarPde_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + NV_RPC_UPDATE_PDE_BAR_TYPE arg3, + NvU64 arg4, + NvU64 arg5 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:MAP_MEMORY_DMA - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcMapMemoryDma_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + NvHandle arg3, + NvHandle arg4, + NvHandle arg5, + NvHandle arg6, + NvU64 arg7, + NvU64 arg8, + NvU32 arg9, + NvU64 *pArg10 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:UNMAP_MEMORY_DMA - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcUnmapMemoryDma_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + NvHandle arg3, + NvHandle arg4, + NvHandle arg5, + NvHandle arg6, + NvU32 arg7, + NvU64 arg8 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPC:hal:RMFS_TEST - TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +NV_STATUS rpcRmfsTest_STUB( + POBJGPU pGpu, + POBJRPC pRpc, + NvU32 arg3, + NvU32 arg4, + NvU32 arg5, + NvU32 arg6 +) +{ + return NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION; +} + +// RPCSTRUCTURECOPY:hal:IGRP_IP_VERSIONS_GET_INFO - RPCSTRUCTURECOPY disabled +NV_STATUS rpcstructurecopy_iGrp_ipVersions_getInfo_STUB( + IGRP_IP_VERSIONS_TABLE_INFO *pArg1 +) +{ + return NV_ERR_NOT_SUPPORTED; +} + + + + +// +// "missing engine" setup sequences, if any +// + +// Install the _MISSING overrides for GPIO: HAL_INTERFACES +void gpioHalIfacesSetup_MISSING(GPIO_HAL_IFACES *pGpioHal) +{ + // GPIO disabled by rmconfig; no additional MISSING support needed +} + + + + + + +#endif // _G_RMCFG_HAL_STUBS_H_ diff --git a/src/nvidia/generated/g_hda_codec_api_nvoc.c b/src/nvidia/generated/g_hda_codec_api_nvoc.c new file mode 100644 index 000000000..4d72d26fd --- /dev/null +++ b/src/nvidia/generated/g_hda_codec_api_nvoc.c @@ -0,0 +1,327 @@ +#define NVOC_HDA_CODEC_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_hda_codec_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xf59a20 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_Hdacodec(Hdacodec*); +void __nvoc_init_funcTable_Hdacodec(Hdacodec*); +NV_STATUS __nvoc_ctor_Hdacodec(Hdacodec*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Hdacodec(Hdacodec*); +void __nvoc_dtor_Hdacodec(Hdacodec*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Hdacodec; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_Hdacodec = { + /*pClassDef=*/ &__nvoc_class_def_Hdacodec, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Hdacodec, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Hdacodec = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_Hdacodec_Hdacodec, + &__nvoc_rtti_Hdacodec_GpuResource, + &__nvoc_rtti_Hdacodec_RmResource, + &__nvoc_rtti_Hdacodec_RmResourceCommon, + &__nvoc_rtti_Hdacodec_RsResource, + &__nvoc_rtti_Hdacodec_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Hdacodec), + /*classId=*/ classId(Hdacodec), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Hdacodec", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Hdacodec, + /*pCastInfo=*/ &__nvoc_castinfo_Hdacodec, + /*pExportInfo=*/ &__nvoc_export_info_Hdacodec +}; + +static NvBool __nvoc_thunk_GpuResource_hdacodecShareCallback(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecControl(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecUnmap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_hdacodecGetMemInterMapParams(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_hdacodecGetMemoryMappingDescriptor(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecGetMapAddrSpace(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_hdacodecGetInternalObjectHandle(struct Hdacodec *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_hdacodecControlFilter(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_hdacodecAddAdditionalDependants(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_hdacodecGetRefCount(struct Hdacodec *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_hdacodecCheckMemInterUnmap(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_hdacodecMapTo(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_hdacodecControl_Prologue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_hdacodecCanCopy(struct Hdacodec *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecInternalControlForward(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_hdacodecPreDestruct(struct Hdacodec *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_hdacodecUnmapFrom(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_hdacodecControl_Epilogue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_hdacodecControlLookup(struct Hdacodec *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_hdacodecMap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_hdacodecAccessCallback(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Hdacodec = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Hdacodec(Hdacodec *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Hdacodec(Hdacodec *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Hdacodec(Hdacodec *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Hdacodec_fail_GpuResource; + __nvoc_init_dataField_Hdacodec(pThis); + + status = __nvoc_hdacodecConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Hdacodec_fail__init; + goto __nvoc_ctor_Hdacodec_exit; // Success + +__nvoc_ctor_Hdacodec_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_Hdacodec_fail_GpuResource: +__nvoc_ctor_Hdacodec_exit: + + return status; +} + +static void __nvoc_init_funcTable_Hdacodec_1(Hdacodec *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__hdacodecShareCallback__ = &__nvoc_thunk_GpuResource_hdacodecShareCallback; + + pThis->__hdacodecControl__ = &__nvoc_thunk_GpuResource_hdacodecControl; + + pThis->__hdacodecUnmap__ = &__nvoc_thunk_GpuResource_hdacodecUnmap; + + pThis->__hdacodecGetMemInterMapParams__ = &__nvoc_thunk_RmResource_hdacodecGetMemInterMapParams; + + pThis->__hdacodecGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_hdacodecGetMemoryMappingDescriptor; + + pThis->__hdacodecGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_hdacodecGetMapAddrSpace; + + pThis->__hdacodecGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_hdacodecGetInternalObjectHandle; + + pThis->__hdacodecControlFilter__ = &__nvoc_thunk_RsResource_hdacodecControlFilter; + + pThis->__hdacodecAddAdditionalDependants__ = &__nvoc_thunk_RsResource_hdacodecAddAdditionalDependants; + + pThis->__hdacodecGetRefCount__ = &__nvoc_thunk_RsResource_hdacodecGetRefCount; + + pThis->__hdacodecCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_hdacodecCheckMemInterUnmap; + + pThis->__hdacodecMapTo__ = &__nvoc_thunk_RsResource_hdacodecMapTo; + + pThis->__hdacodecControl_Prologue__ = &__nvoc_thunk_RmResource_hdacodecControl_Prologue; + + pThis->__hdacodecGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize; + + pThis->__hdacodecCanCopy__ = &__nvoc_thunk_RsResource_hdacodecCanCopy; + + pThis->__hdacodecInternalControlForward__ = &__nvoc_thunk_GpuResource_hdacodecInternalControlForward; + + pThis->__hdacodecPreDestruct__ = &__nvoc_thunk_RsResource_hdacodecPreDestruct; + + pThis->__hdacodecUnmapFrom__ = &__nvoc_thunk_RsResource_hdacodecUnmapFrom; + + pThis->__hdacodecControl_Epilogue__ = &__nvoc_thunk_RmResource_hdacodecControl_Epilogue; + + pThis->__hdacodecControlLookup__ = &__nvoc_thunk_RsResource_hdacodecControlLookup; + + pThis->__hdacodecMap__ = &__nvoc_thunk_GpuResource_hdacodecMap; + + pThis->__hdacodecAccessCallback__ = &__nvoc_thunk_RmResource_hdacodecAccessCallback; +} + +void __nvoc_init_funcTable_Hdacodec(Hdacodec *pThis) { + __nvoc_init_funcTable_Hdacodec_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Hdacodec(Hdacodec *pThis) { + pThis->__nvoc_pbase_Hdacodec = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_Hdacodec(pThis); +} + +NV_STATUS __nvoc_objCreate_Hdacodec(Hdacodec **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Hdacodec *pThis; + + pThis = portMemAllocNonPaged(sizeof(Hdacodec)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Hdacodec)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Hdacodec); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Hdacodec(pThis); + status = __nvoc_ctor_Hdacodec(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Hdacodec_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Hdacodec_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Hdacodec(Hdacodec **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Hdacodec(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_hda_codec_api_nvoc.h b/src/nvidia/generated/g_hda_codec_api_nvoc.h new file mode 100644 index 000000000..39aebb188 --- /dev/null +++ b/src/nvidia/generated/g_hda_codec_api_nvoc.h @@ -0,0 +1,229 @@ +#ifndef _G_HDA_CODEC_API_NVOC_H_ +#define _G_HDA_CODEC_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_hda_codec_api_nvoc.h" + +#ifndef HDA_CODEC_API_H +#define HDA_CODEC_API_H + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_resource.h" +#include "ctrl/ctrl90ec.h" +#include "gpu/gpu_resource.h" + +#ifdef NVOC_HDA_CODEC_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Hdacodec { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct Hdacodec *__nvoc_pbase_Hdacodec; + NvBool (*__hdacodecShareCallback__)(struct Hdacodec *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__hdacodecControl__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__hdacodecUnmap__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__hdacodecGetMemInterMapParams__)(struct Hdacodec *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__hdacodecGetMemoryMappingDescriptor__)(struct Hdacodec *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__hdacodecGetMapAddrSpace__)(struct Hdacodec *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__hdacodecGetInternalObjectHandle__)(struct Hdacodec *); + NV_STATUS (*__hdacodecControlFilter__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__hdacodecAddAdditionalDependants__)(struct RsClient *, struct Hdacodec *, RsResourceRef *); + NvU32 (*__hdacodecGetRefCount__)(struct Hdacodec *); + NV_STATUS (*__hdacodecCheckMemInterUnmap__)(struct Hdacodec *, NvBool); + NV_STATUS (*__hdacodecMapTo__)(struct Hdacodec *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__hdacodecControl_Prologue__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__hdacodecGetRegBaseOffsetAndSize__)(struct Hdacodec *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__hdacodecCanCopy__)(struct Hdacodec *); + NV_STATUS (*__hdacodecInternalControlForward__)(struct Hdacodec *, NvU32, void *, NvU32); + void (*__hdacodecPreDestruct__)(struct Hdacodec *); + NV_STATUS (*__hdacodecUnmapFrom__)(struct Hdacodec *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__hdacodecControl_Epilogue__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__hdacodecControlLookup__)(struct Hdacodec *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__hdacodecMap__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__hdacodecAccessCallback__)(struct Hdacodec *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_Hdacodec_TYPEDEF__ +#define __NVOC_CLASS_Hdacodec_TYPEDEF__ +typedef struct Hdacodec Hdacodec; +#endif /* __NVOC_CLASS_Hdacodec_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Hdacodec +#define __nvoc_class_id_Hdacodec 0xf59a20 +#endif /* __nvoc_class_id_Hdacodec */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec; + +#define __staticCast_Hdacodec(pThis) \ + ((pThis)->__nvoc_pbase_Hdacodec) + +#ifdef __nvoc_hda_codec_api_h_disabled +#define __dynamicCast_Hdacodec(pThis) ((Hdacodec*)NULL) +#else //__nvoc_hda_codec_api_h_disabled +#define __dynamicCast_Hdacodec(pThis) \ + ((Hdacodec*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Hdacodec))) +#endif //__nvoc_hda_codec_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Hdacodec(Hdacodec**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Hdacodec(Hdacodec**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Hdacodec(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Hdacodec((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define hdacodecShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) hdacodecShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define hdacodecControl(pGpuResource, pCallContext, pParams) hdacodecControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define hdacodecUnmap(pGpuResource, pCallContext, pCpuMapping) hdacodecUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define hdacodecGetMemInterMapParams(pRmResource, pParams) hdacodecGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define hdacodecGetMemoryMappingDescriptor(pRmResource, ppMemDesc) hdacodecGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define hdacodecGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) hdacodecGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define hdacodecGetInternalObjectHandle(pGpuResource) hdacodecGetInternalObjectHandle_DISPATCH(pGpuResource) +#define hdacodecControlFilter(pResource, pCallContext, pParams) hdacodecControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecAddAdditionalDependants(pClient, pResource, pReference) hdacodecAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define hdacodecGetRefCount(pResource) hdacodecGetRefCount_DISPATCH(pResource) +#define hdacodecCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) hdacodecCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define hdacodecMapTo(pResource, pParams) hdacodecMapTo_DISPATCH(pResource, pParams) +#define hdacodecControl_Prologue(pResource, pCallContext, pParams) hdacodecControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) hdacodecGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define hdacodecCanCopy(pResource) hdacodecCanCopy_DISPATCH(pResource) +#define hdacodecInternalControlForward(pGpuResource, command, pParams, size) hdacodecInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define hdacodecPreDestruct(pResource) hdacodecPreDestruct_DISPATCH(pResource) +#define hdacodecUnmapFrom(pResource, pParams) hdacodecUnmapFrom_DISPATCH(pResource, pParams) +#define hdacodecControl_Epilogue(pResource, pCallContext, pParams) hdacodecControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define hdacodecControlLookup(pResource, pParams, ppEntry) hdacodecControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define hdacodecMap(pGpuResource, pCallContext, pParams, pCpuMapping) hdacodecMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define hdacodecAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) hdacodecAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool hdacodecShareCallback_DISPATCH(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__hdacodecShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS hdacodecControl_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__hdacodecControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS hdacodecUnmap_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__hdacodecUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS hdacodecGetMemInterMapParams_DISPATCH(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__hdacodecGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS hdacodecGetMemoryMappingDescriptor_DISPATCH(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__hdacodecGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS hdacodecGetMapAddrSpace_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__hdacodecGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle hdacodecGetInternalObjectHandle_DISPATCH(struct Hdacodec *pGpuResource) { + return pGpuResource->__hdacodecGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS hdacodecControlFilter_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__hdacodecControlFilter__(pResource, pCallContext, pParams); +} + +static inline void hdacodecAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference) { + pResource->__hdacodecAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 hdacodecGetRefCount_DISPATCH(struct Hdacodec *pResource) { + return pResource->__hdacodecGetRefCount__(pResource); +} + +static inline NV_STATUS hdacodecCheckMemInterUnmap_DISPATCH(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__hdacodecCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS hdacodecMapTo_DISPATCH(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__hdacodecMapTo__(pResource, pParams); +} + +static inline NV_STATUS hdacodecControl_Prologue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__hdacodecControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS hdacodecGetRegBaseOffsetAndSize_DISPATCH(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__hdacodecGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool hdacodecCanCopy_DISPATCH(struct Hdacodec *pResource) { + return pResource->__hdacodecCanCopy__(pResource); +} + +static inline NV_STATUS hdacodecInternalControlForward_DISPATCH(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__hdacodecInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void hdacodecPreDestruct_DISPATCH(struct Hdacodec *pResource) { + pResource->__hdacodecPreDestruct__(pResource); +} + +static inline NV_STATUS hdacodecUnmapFrom_DISPATCH(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__hdacodecUnmapFrom__(pResource, pParams); +} + +static inline void hdacodecControl_Epilogue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__hdacodecControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS hdacodecControlLookup_DISPATCH(struct Hdacodec *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__hdacodecControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS hdacodecMap_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__hdacodecMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool hdacodecAccessCallback_DISPATCH(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__hdacodecAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS hdacodecConstruct_IMPL(struct Hdacodec *arg_pHdacodecApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_hdacodecConstruct(arg_pHdacodecApi, arg_pCallContext, arg_pParams) hdacodecConstruct_IMPL(arg_pHdacodecApi, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HDA_CODEC_API_NVOC_H_ diff --git a/src/nvidia/generated/g_heap_nvoc.c b/src/nvidia/generated/g_heap_nvoc.c new file mode 100644 index 000000000..9583b7e78 --- /dev/null +++ b/src/nvidia/generated/g_heap_nvoc.c @@ -0,0 +1,149 @@ +#define NVOC_HEAP_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_heap_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x556e9a = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Heap; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_Heap(Heap*); +void __nvoc_init_funcTable_Heap(Heap*); +NV_STATUS __nvoc_ctor_Heap(Heap*); +void __nvoc_init_dataField_Heap(Heap*); +void __nvoc_dtor_Heap(Heap*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Heap; + +static const struct NVOC_RTTI __nvoc_rtti_Heap_Heap = { + /*pClassDef=*/ &__nvoc_class_def_Heap, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Heap, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Heap_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Heap, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Heap = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_Heap_Heap, + &__nvoc_rtti_Heap_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Heap = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Heap), + /*classId=*/ classId(Heap), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Heap", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Heap, + /*pCastInfo=*/ &__nvoc_castinfo_Heap, + /*pExportInfo=*/ &__nvoc_export_info_Heap +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Heap = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_Heap(Heap *pThis) { + __nvoc_heapDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Heap(Heap *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_Heap(Heap *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_Heap_fail_Object; + __nvoc_init_dataField_Heap(pThis); + goto __nvoc_ctor_Heap_exit; // Success + +__nvoc_ctor_Heap_fail_Object: +__nvoc_ctor_Heap_exit: + + return status; +} + +static void __nvoc_init_funcTable_Heap_1(Heap *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_Heap(Heap *pThis) { + __nvoc_init_funcTable_Heap_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_Heap(Heap *pThis) { + pThis->__nvoc_pbase_Heap = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_Heap(pThis); +} + +NV_STATUS __nvoc_objCreate_Heap(Heap **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + Heap *pThis; + + pThis = portMemAllocNonPaged(sizeof(Heap)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Heap)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Heap); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Heap(pThis); + status = __nvoc_ctor_Heap(pThis); + if (status != NV_OK) goto __nvoc_objCreate_Heap_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Heap_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Heap(Heap **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_Heap(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_heap_nvoc.h b/src/nvidia/generated/g_heap_nvoc.h new file mode 100644 index 000000000..4b7164947 --- /dev/null +++ b/src/nvidia/generated/g_heap_nvoc.h @@ -0,0 +1,666 @@ +#ifndef _G_HEAP_NVOC_H_ +#define _G_HEAP_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_heap_nvoc.h" + +#ifndef _OBJHEAP_H_ +#define _OBJHEAP_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for the Heap Object. The heap object * +* is responsible for allocating memory based on usage and memory * +* configuration. * +* * +\***************************************************************************/ + +#include "nvlimits.h" // NV_MAX_SUBDEVICES +#include "gpu/mem_mgr/heap_base.h" +#include "core/core.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h" +#include "ctrl/ctrl2080/ctrl2080fb.h" // NV2080_CTRL_FB_OFFLINED_ADDRESS_INFO +#include "resserv/resserv.h" +#include "resserv/rs_resource.h" +#include "containers/eheap_old.h" + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + + +typedef struct +{ + NvU64 alignment; + NvU64 allocLo; + NvU64 allocAl; + NvU64 allocHi; + NvU64 allocSize; + NvBool ignoreBankPlacement; +} OBJHEAP_ALLOC_DATA; + +// New Stuff for WDDM +typedef struct +{ + NvU32 client; + NvU32 owner; + NvU32 type; + NvU32 flags; + NvU32 *pHeight; + NvU32 *pWidth; + NvU32 *pPitch; + NvU64 *pSize; + NvU64 *pAlignment; + NvU32 *pAttr; + NvU32 *pAttr2; + NvU32 *pKind; + NvU32 bankPlacement; + NvBool ignoreBankPlacement; + NvU64 pad; + NvU64 alignAdjust; + NvU32 format; +} HEAP_ALLOC_HINT_PARAMS; + +typedef struct +{ + NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS *pUserParams; + NvU32 pad; + NvU32 hwResId; + void *bindResultFunc; + void *pHandle; + HWRESOURCE_INFO hwResource; +} MEMORY_HW_RESOURCES_ALLOCATION_REQUEST; + +// +// Growth placement and direction modifiers - the grow direction if a bank placement +// fails (bit 7) | grow direction within a bank (bit 6) | bank number (bits 0..5) +// gives the algorithm the information to try placing in the specified bank with the +// specified direction within a bank. If a bank placement fails, use the grow direction +// to search for free space anywhere. Each bank placement group (image, depth, ..) +// gets MEM_NUM_BANKS_TO_TRY such bytes in a NvU32. +// + +// grow direction within a bank +#define BANK_MEM_GROW_UP 0x00 +#define BANK_MEM_GROW_DOWN 0x40 +#define BANK_MEM_GROW_MASK 0x40 + +// grow direction if a bank placement fails +#define MEM_GROW_UP 0x00 +#define MEM_GROW_DOWN 0x80 +#define MEM_GROW_MASK 0x80 + +// other defines +#define MEM_BANK_MASK 0x3F +#define MEM_NO_BANK_SELECTION 0xFF +#define MEM_NUM_BANKS_TO_TRY 0x1 // check a max of 1 bank +#define MEM_BANK_DATA_SIZE 0x8 // store everything in a byte + +// +// defines the number of NvU32's in the bank placement array, which defines +// what kinds of allocations go where (see heap.c) +// If more types need to be handled separately in terms of bank placement, +// increase this number, add another define, and add some code to heapCreate +// and heapAlloc +// +#define BANK_PLACEMENT_IMAGE 0 +#define BANK_PLACEMENT_DEPTH 1 +#define BANK_PLACEMENT_TEX_OVERLAY_FONT 2 +#define BANK_PLACEMENT_OTHER 3 +#define BANK_PLACEMENT_NUM_GROUPS 0x00000004 + +// +// Currently the HEAP_MEM_BLOCK refCount field is only 8 bits. +// +#define HEAP_MAX_REF_COUNT 0xFFFFFFFF + +// +// any allocations done for internal RM data structures from the heap should be +// marked as one of the following values. +// this is required so the RM can report back the internal scratch memory allocations +// in NVOS32_FUNCTION_INFO_TYPE_ALLOC_BLOCKS::NVOS32_TYPE_RM_SCRATCH +// +#define HEAP_OWNER_RM_SCRATCH_BEGIN 0xDEAF0000 +#define HEAP_OWNER_RM_CHANNEL_INSTMEM (HEAP_OWNER_RM_SCRATCH_BEGIN + 1) +#define HEAP_OWNER_RM_CHANNEL_CTX_BUFFER (HEAP_OWNER_RM_SCRATCH_BEGIN + 2) +#define HEAP_OWNER_RM_VIDEO_UCODE (HEAP_OWNER_RM_SCRATCH_BEGIN + 3) +#define HEAP_OWNER_RM_FB_BUG_147656 (HEAP_OWNER_RM_SCRATCH_BEGIN + 4) +#define HEAP_OWNER_RM_FB_BUG_177053 (HEAP_OWNER_RM_SCRATCH_BEGIN + 5) +#define HEAP_OWNER_RM_DSI_INST_MEM (HEAP_OWNER_RM_SCRATCH_BEGIN + 6) +#define HEAP_OWNER_RM_CTX_SAVE_AREAS (HEAP_OWNER_RM_SCRATCH_BEGIN + 7) +#define HEAP_OWNER_RM_RESERVED_REGION (HEAP_OWNER_RM_SCRATCH_BEGIN + 8) +#define HEAP_OWNER_RM_SCRATCH_END (HEAP_OWNER_RM_SCRATCH_BEGIN + 9) // make this the last + +#define HEAP_OWNER_RM_KERNEL_CLIENT (HEAP_OWNER_RM_SCRATCH_END + 1) +#define HEAP_OWNER_PMA_RESERVED_REGION (HEAP_OWNER_RM_SCRATCH_END + 2) +#define HEAP_OWNER_RM_CLIENT_GENERIC (HEAP_OWNER_RM_SCRATCH_END + 3) + +// +// size of the texture buffer array, when more than 4 clients detected, +// kill one of the clients listed in the client texture buffer +// +#define MAX_TEXTURE_CLIENT_IDS 4 + + +// +// HEAP object is being created for multiple usecases now. Initial heap object created during RM init manages the whole FB +// and there are usecases such as PhysicalMemorySuballocator which uses HEAP to manage its internal allocations. We need to +// differentiate these heaps to allow/block certain features such as scrub/PMA etc. +// +typedef enum +{ + HEAP_TYPE_RM_GLOBAL = 0x0, // HEAP created by RM to manage entire FB + HEAP_TYPE_PHYS_MEM_SUBALLOCATOR = 0x1, // HEAP created by clients to manage Physical Memory Suballocations + HEAP_TYPE_PARTITION_LOCAL = 0x2, // HEAP created by RM to manage memory assigned to a SMC partition +} HEAP_TYPE_INTERNAL; + +/*! + * Structure to hold references to PhysMemSubAlloc resource + */ +typedef struct _def_physmemsuballoc_data +{ + void *pObject; // PMSA object + MEMORY_DESCRIPTOR *pMemDesc; // Parent memdesc from which memory managed by PMSA is allocated +} PHYS_MEM_SUBALLOCATOR_DATA; + +typedef struct MEM_BLOCK MEM_BLOCK; +struct MEM_BLOCK +{ + NvBool allocedMemDesc; + NvU8 reserved0; + NvU8 reserved1; + NvU32 owner; + NvHandle mhandle; + NvU64 begin; + NvU64 align; + NvU64 alignPad; // padding to beginning of surface from aligned start (hack for NV50 perf work) + NvU64 end; + NvU32 textureId; + NvU32 format; + NvU32 pitch; // allocated surface pitch, needed for realloc + NvU32 height; // allocated surface height, needed for realloc + NvU32 width; // allocated surface width, needed for realloc + NvU32 refCount; + NODE node; + MEMORY_DESCRIPTOR *pMemDesc; // Back pointer to the memory descriptor for this allocation + HWRESOURCE_INFO hwResource; + union + { + NvU32 type; + MEM_BLOCK *prevFree; + } u0; + union + { + MEM_BLOCK *nextFree; + } u1; + MEM_BLOCK *prev; + MEM_BLOCK *next; + + // hooks into noncontig block freelist + MEM_BLOCK *nextFreeNoncontig; + MEM_BLOCK *prevFreeNoncontig; + MEM_BLOCK *noncontigAllocListNext; +}; + +typedef struct TEX_INFO +{ + NvU32 clientId; // texture client id + NvU32 refCount; // how many textures have been allocated wrt this client + NvU8 placementFlags; // how texture is grown + NvBool mostRecentAllocatedFlag; // most recently allocated client +} TEX_INFO; + +#define NV_HEAP_PAGE_OFFLINE_TYPE 31:29 +#define NV_HEAP_PAGE_OFFLINE_PAGE_NUMBER 27:0 + +typedef struct +{ + MEMORY_DESCRIPTOR *pMemDesc; // memory descriptor for the blacklisted page + NvU64 physOffset; // physical offset of blacklisted FB address + NvU64 size; // size of the blacklisted page + NvBool bIsValid; // If the blacklisted address is still managed by RM + NvBool bPendingRetirement; // if the dynamically blacklisted pages is pending to be retired. +} BLACKLIST_CHUNK; + +typedef struct +{ + BLACKLIST_CHUNK *pBlacklistChunks; + NvU32 count; +} BLACKLIST; + +typedef struct +{ + NvU32 count; + BLACKLIST_ADDRESS* data; +} BLACKLIST_ADDRESSES; + +#define SHUFFLE_STRIDE_MAX 5 + +#ifdef NVOC_HEAP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Heap { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct Heap *__nvoc_pbase_Heap; + NvBool PDB_PROP_HEAP_NONCONTIG_ALLOC_BY_DEFAULT; + NvBool PDB_PROP_HEAP_PAGE_SHUFFLE; + HEAP_TYPE_INTERNAL heapType; + void *pHeapTypeSpecificData; + NvU64 refCount; + NvBool bHasFbRegions; + NvU64 base; + NvU64 total; + NvU64 free; + NvU64 reserved; + struct MEM_BLOCK *pBlockList; + struct MEM_BLOCK *pFreeBlockList; + NODE *pBlockTree; + NvHandle memHandle; + NvU32 numBlocks; + TEX_INFO textureData[4]; + struct MEM_BLOCK *pNoncontigFreeBlockList; + BLACKLIST_ADDRESSES blackListAddresses; + BLACKLIST blackList; + NvU32 dynamicBlacklistSize; + NvU32 staticBlacklistSize; + NvU32 placementStrategy[4]; + NvU32 shuffleStrides[5]; + NvU32 shuffleStrideIndex; + PMA pmaObject; + NvU64 peakInternalUsage; + NvU64 peakExternalUsage; + NvU64 currInternalUsage; + NvU64 currExternalUsage; +}; + +#ifndef __NVOC_CLASS_Heap_TYPEDEF__ +#define __NVOC_CLASS_Heap_TYPEDEF__ +typedef struct Heap Heap; +#endif /* __NVOC_CLASS_Heap_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Heap +#define __nvoc_class_id_Heap 0x556e9a +#endif /* __nvoc_class_id_Heap */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Heap; + +#define __staticCast_Heap(pThis) \ + ((pThis)->__nvoc_pbase_Heap) + +#ifdef __nvoc_heap_h_disabled +#define __dynamicCast_Heap(pThis) ((Heap*)NULL) +#else //__nvoc_heap_h_disabled +#define __dynamicCast_Heap(pThis) \ + ((Heap*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Heap))) +#endif //__nvoc_heap_h_disabled + +#define PDB_PROP_HEAP_NONCONTIG_ALLOC_BY_DEFAULT_BASE_CAST +#define PDB_PROP_HEAP_NONCONTIG_ALLOC_BY_DEFAULT_BASE_NAME PDB_PROP_HEAP_NONCONTIG_ALLOC_BY_DEFAULT +#define PDB_PROP_HEAP_PAGE_SHUFFLE_BASE_CAST +#define PDB_PROP_HEAP_PAGE_SHUFFLE_BASE_NAME PDB_PROP_HEAP_PAGE_SHUFFLE + +NV_STATUS __nvoc_objCreateDynamic_Heap(Heap**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Heap(Heap**, Dynamic*, NvU32); +#define __objCreate_Heap(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_Heap((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS heapInit_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3, HEAP_TYPE_INTERNAL arg4, NvU32 arg5, void *arg6); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapInit(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3, HEAP_TYPE_INTERNAL arg4, NvU32 arg5, void *arg6) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapInit(arg0, arg1, arg2, arg3, arg4, arg5, arg6) heapInit_IMPL(arg0, arg1, arg2, arg3, arg4, arg5, arg6) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapInitInternal_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3, HEAP_TYPE_INTERNAL arg4, void *arg5); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapInitInternal(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3, HEAP_TYPE_INTERNAL arg4, void *arg5) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapInitInternal(arg0, arg1, arg2, arg3, arg4, arg5) heapInitInternal_IMPL(arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_heap_h_disabled + +void heapDestruct_IMPL(struct Heap *arg0); +#define __nvoc_heapDestruct(arg0) heapDestruct_IMPL(arg0) +NV_STATUS heapAlloc_IMPL(struct OBJGPU *arg0, NvHandle arg1, struct Heap *arg2, MEMORY_ALLOCATION_REQUEST *arg3, NvHandle arg4, OBJHEAP_ALLOC_DATA *arg5, FB_ALLOC_INFO *arg6, HWRESOURCE_INFO **arg7, NvBool *arg8, NvBool arg9, NvBool arg10); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapAlloc(struct OBJGPU *arg0, NvHandle arg1, struct Heap *arg2, MEMORY_ALLOCATION_REQUEST *arg3, NvHandle arg4, OBJHEAP_ALLOC_DATA *arg5, FB_ALLOC_INFO *arg6, HWRESOURCE_INFO **arg7, NvBool *arg8, NvBool arg9, NvBool arg10) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapAlloc(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) heapAlloc_IMPL(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapFree_IMPL(struct OBJGPU *pGpu, struct Heap *pHeap, NvU32 owner, MEMORY_DESCRIPTOR *pMemDesc); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapFree(struct OBJGPU *pGpu, struct Heap *pHeap, NvU32 owner, MEMORY_DESCRIPTOR *pMemDesc) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapFree(pGpu, pHeap, owner, pMemDesc) heapFree_IMPL(pGpu, pHeap, owner, pMemDesc) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapReference_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU32 arg2, MEMORY_DESCRIPTOR *arg3); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapReference(struct OBJGPU *arg0, struct Heap *arg1, NvU32 arg2, MEMORY_DESCRIPTOR *arg3) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapReference(arg0, arg1, arg2, arg3) heapReference_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapInfo_IMPL(struct Heap *arg0, NvU64 *arg1, NvU64 *arg2, NvU64 *arg3, NvU64 *arg4, NvU64 *arg5); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapInfo(struct Heap *arg0, NvU64 *arg1, NvU64 *arg2, NvU64 *arg3, NvU64 *arg4, NvU64 *arg5) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapInfo(arg0, arg1, arg2, arg3, arg4, arg5) heapInfo_IMPL(arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapInfoTypeAllocBlocks_IMPL(struct Heap *arg0, NvU32 arg1, NvU64 *arg2); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapInfoTypeAllocBlocks(struct Heap *arg0, NvU32 arg1, NvU64 *arg2) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapInfoTypeAllocBlocks(arg0, arg1, arg2) heapInfoTypeAllocBlocks_IMPL(arg0, arg1, arg2) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapGetSize_IMPL(struct Heap *arg0, NvU64 *arg1); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapGetSize(struct Heap *arg0, NvU64 *arg1) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapGetSize(arg0, arg1) heapGetSize_IMPL(arg0, arg1) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapGetFree_IMPL(struct Heap *arg0, NvU64 *arg1); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapGetFree(struct Heap *arg0, NvU64 *arg1) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapGetFree(arg0, arg1) heapGetFree_IMPL(arg0, arg1) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapGetUsableSize_IMPL(struct Heap *arg0, NvU64 *arg1); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapGetUsableSize(struct Heap *arg0, NvU64 *arg1) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapGetUsableSize(arg0, arg1) heapGetUsableSize_IMPL(arg0, arg1) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapGetBase_IMPL(struct Heap *arg0, NvU64 *arg1); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapGetBase(struct Heap *arg0, NvU64 *arg1) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapGetBase(arg0, arg1) heapGetBase_IMPL(arg0, arg1) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapGetBlock_IMPL(struct Heap *arg0, NvU64 arg1, struct MEM_BLOCK **arg2); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapGetBlock(struct Heap *arg0, NvU64 arg1, struct MEM_BLOCK **arg2) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapGetBlock(arg0, arg1, arg2) heapGetBlock_IMPL(arg0, arg1, arg2) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapGetBlockHandle_IMPL(struct Heap *arg0, NvU32 arg1, NvU32 arg2, NvU64 arg3, NvBool arg4, NvHandle *arg5); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapGetBlockHandle(struct Heap *arg0, NvU32 arg1, NvU32 arg2, NvU64 arg3, NvBool arg4, NvHandle *arg5) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapGetBlockHandle(arg0, arg1, arg2, arg3, arg4, arg5) heapGetBlockHandle_IMPL(arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_heap_h_disabled + +NvU32 heapGetNumBlocks_IMPL(struct Heap *arg0); +#ifdef __nvoc_heap_h_disabled +static inline NvU32 heapGetNumBlocks(struct Heap *arg0) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return 0; +} +#else //__nvoc_heap_h_disabled +#define heapGetNumBlocks(arg0) heapGetNumBlocks_IMPL(arg0) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapGetBlockInfo_IMPL(struct Heap *arg0, NvU32 arg1, NVOS32_HEAP_DUMP_BLOCK *arg2); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapGetBlockInfo(struct Heap *arg0, NvU32 arg1, NVOS32_HEAP_DUMP_BLOCK *arg2) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapGetBlockInfo(arg0, arg1, arg2) heapGetBlockInfo_IMPL(arg0, arg1, arg2) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapAllocHint_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvHandle arg2, NvHandle arg3, HEAP_ALLOC_HINT_PARAMS *arg4); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapAllocHint(struct OBJGPU *arg0, struct Heap *arg1, NvHandle arg2, NvHandle arg3, HEAP_ALLOC_HINT_PARAMS *arg4) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapAllocHint(arg0, arg1, arg2, arg3, arg4) heapAllocHint_IMPL(arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapHwAlloc_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvHandle arg2, NvHandle arg3, NvHandle arg4, MEMORY_HW_RESOURCES_ALLOCATION_REQUEST *arg5, NvU32 *arg6, NvU32 *arg7); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapHwAlloc(struct OBJGPU *arg0, struct Heap *arg1, NvHandle arg2, NvHandle arg3, NvHandle arg4, MEMORY_HW_RESOURCES_ALLOCATION_REQUEST *arg5, NvU32 *arg6, NvU32 *arg7) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapHwAlloc(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) heapHwAlloc_IMPL(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +#endif //__nvoc_heap_h_disabled + +void heapHwFree_IMPL(struct OBJGPU *arg0, struct Heap *arg1, struct Memory *arg2, NvU32 arg3); +#ifdef __nvoc_heap_h_disabled +static inline void heapHwFree(struct OBJGPU *arg0, struct Heap *arg1, struct Memory *arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); +} +#else //__nvoc_heap_h_disabled +#define heapHwFree(arg0, arg1, arg2, arg3) heapHwFree_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapFreeBlockCount_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU32 *arg2); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapFreeBlockCount(struct OBJGPU *arg0, struct Heap *arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapFreeBlockCount(arg0, arg1, arg2) heapFreeBlockCount_IMPL(arg0, arg1, arg2) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapFreeBlockInfo_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU32 arg2, void *arg3); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapFreeBlockInfo(struct OBJGPU *arg0, struct Heap *arg1, NvU32 arg2, void *arg3) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapFreeBlockInfo(arg0, arg1, arg2, arg3) heapFreeBlockInfo_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapInitRegistryOverrides_IMPL(struct OBJGPU *arg0, struct Heap *arg1); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapInitRegistryOverrides(struct OBJGPU *arg0, struct Heap *arg1) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapInitRegistryOverrides(arg0, arg1) heapInitRegistryOverrides_IMPL(arg0, arg1) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapBlackListPages_IMPL(struct OBJGPU *arg0, struct Heap *arg1); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapBlackListPages(struct OBJGPU *arg0, struct Heap *arg1) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapBlackListPages(arg0, arg1) heapBlackListPages_IMPL(arg0, arg1) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapFreeBlackListedPages_IMPL(struct OBJGPU *arg0, struct Heap *arg1); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapFreeBlackListedPages(struct OBJGPU *arg0, struct Heap *arg1) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapFreeBlackListedPages(arg0, arg1) heapFreeBlackListedPages_IMPL(arg0, arg1) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapAddPageToBlackList_IMPL(struct OBJGPU *pGpu, struct Heap *pHeap, NvU64 pageNumber, NvU32 type); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapAddPageToBlackList(struct OBJGPU *pGpu, struct Heap *pHeap, NvU64 pageNumber, NvU32 type) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapAddPageToBlackList(pGpu, pHeap, pageNumber, type) heapAddPageToBlackList_IMPL(pGpu, pHeap, pageNumber, type) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapStoreBlackList_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU64 *arg2, NvU64 *arg3, NvU32 arg4); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapStoreBlackList(struct OBJGPU *arg0, struct Heap *arg1, NvU64 *arg2, NvU64 *arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapStoreBlackList(arg0, arg1, arg2, arg3, arg4) heapStoreBlackList_IMPL(arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_heap_h_disabled + +NvBool heapIsPmaManaged_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3); +#ifdef __nvoc_heap_h_disabled +static inline NvBool heapIsPmaManaged(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_FALSE; +} +#else //__nvoc_heap_h_disabled +#define heapIsPmaManaged(arg0, arg1, arg2, arg3) heapIsPmaManaged_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_heap_h_disabled + +NvU32 heapAddRef_IMPL(struct Heap *arg0); +#ifdef __nvoc_heap_h_disabled +static inline NvU32 heapAddRef(struct Heap *arg0) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return 0; +} +#else //__nvoc_heap_h_disabled +#define heapAddRef(arg0) heapAddRef_IMPL(arg0) +#endif //__nvoc_heap_h_disabled + +NvU32 heapRemoveRef_IMPL(struct Heap *arg0); +#ifdef __nvoc_heap_h_disabled +static inline NvU32 heapRemoveRef(struct Heap *arg0) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return 0; +} +#else //__nvoc_heap_h_disabled +#define heapRemoveRef(arg0) heapRemoveRef_IMPL(arg0) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapResize_IMPL(struct Heap *arg0, NvS64 arg1); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapResize(struct Heap *arg0, NvS64 arg1) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapResize(arg0, arg1) heapResize_IMPL(arg0, arg1) +#endif //__nvoc_heap_h_disabled + +void heapFilterBlackListPages_IMPL(struct Heap *arg0, NvU64 arg1, NvU64 arg2); +#ifdef __nvoc_heap_h_disabled +static inline void heapFilterBlackListPages(struct Heap *arg0, NvU64 arg1, NvU64 arg2) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); +} +#else //__nvoc_heap_h_disabled +#define heapFilterBlackListPages(arg0, arg1, arg2) heapFilterBlackListPages_IMPL(arg0, arg1, arg2) +#endif //__nvoc_heap_h_disabled + +NV_STATUS heapStorePendingBlackList_IMPL(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3); +#ifdef __nvoc_heap_h_disabled +static inline NV_STATUS heapStorePendingBlackList(struct OBJGPU *arg0, struct Heap *arg1, NvU64 arg2, NvU64 arg3) { + NV_ASSERT_FAILED_PRECOMP("Heap was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_heap_h_disabled +#define heapStorePendingBlackList(arg0, arg1, arg2, arg3) heapStorePendingBlackList_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_heap_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _OBJHEAP_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HEAP_NVOC_H_ diff --git a/src/nvidia/generated/g_host_eng_nvoc.c b/src/nvidia/generated/g_host_eng_nvoc.c new file mode 100644 index 000000000..871fefe6f --- /dev/null +++ b/src/nvidia/generated/g_host_eng_nvoc.c @@ -0,0 +1,93 @@ +#define NVOC_HOST_ENG_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_host_eng_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb356e7 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHOSTENG; + +void __nvoc_init_OBJHOSTENG(OBJHOSTENG*); +void __nvoc_init_funcTable_OBJHOSTENG(OBJHOSTENG*); +NV_STATUS __nvoc_ctor_OBJHOSTENG(OBJHOSTENG*); +void __nvoc_init_dataField_OBJHOSTENG(OBJHOSTENG*); +void __nvoc_dtor_OBJHOSTENG(OBJHOSTENG*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHOSTENG; + +static const struct NVOC_RTTI __nvoc_rtti_OBJHOSTENG_OBJHOSTENG = { + /*pClassDef=*/ &__nvoc_class_def_OBJHOSTENG, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHOSTENG, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJHOSTENG = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_OBJHOSTENG_OBJHOSTENG, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHOSTENG = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJHOSTENG), + /*classId=*/ classId(OBJHOSTENG), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJHOSTENG", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_OBJHOSTENG, + /*pExportInfo=*/ &__nvoc_export_info_OBJHOSTENG +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHOSTENG = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJHOSTENG(OBJHOSTENG *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJHOSTENG(OBJHOSTENG *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJHOSTENG(OBJHOSTENG *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_OBJHOSTENG(pThis); + goto __nvoc_ctor_OBJHOSTENG_exit; // Success + +__nvoc_ctor_OBJHOSTENG_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJHOSTENG_1(OBJHOSTENG *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__hostengHaltEngine__ = &hostengHaltEngine_IMPL; + + pThis->__hostengHaltAndReset__ = &hostengHaltAndReset_IMPL; + + pThis->__hostengReset__ = &hostengReset_IMPL; +} + +void __nvoc_init_funcTable_OBJHOSTENG(OBJHOSTENG *pThis) { + __nvoc_init_funcTable_OBJHOSTENG_1(pThis); +} + +void __nvoc_init_OBJHOSTENG(OBJHOSTENG *pThis) { + pThis->__nvoc_pbase_OBJHOSTENG = pThis; + __nvoc_init_funcTable_OBJHOSTENG(pThis); +} + diff --git a/src/nvidia/generated/g_host_eng_nvoc.h b/src/nvidia/generated/g_host_eng_nvoc.h new file mode 100644 index 000000000..db1a743c3 --- /dev/null +++ b/src/nvidia/generated/g_host_eng_nvoc.h @@ -0,0 +1,122 @@ +#ifndef _G_HOST_ENG_NVOC_H_ +#define _G_HOST_ENG_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_host_eng_nvoc.h" + +#ifndef HOST_ENG_H +#define HOST_ENG_H + +/*! + * @file host_eng.h + * @brief Provides definitions for all OBJHOSTENG data structures and interfaces. + */ + +#include "core/core.h" +#include "gpu/gpu_timeout.h" + +#include "kernel/gpu/fifo/kernel_channel.h" + +typedef struct OBJHOSTENG *POBJHOSTENG; + +/*! + * Interface class for all Hosteng modules. + */ +#ifdef NVOC_HOST_ENG_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJHOSTENG { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJHOSTENG *__nvoc_pbase_OBJHOSTENG; + NV_STATUS (*__hostengHaltEngine__)(struct OBJGPU *, struct OBJHOSTENG *); + NV_STATUS (*__hostengHaltAndReset__)(struct OBJGPU *, struct OBJHOSTENG *, RMTIMEOUT *); + NV_STATUS (*__hostengReset__)(struct OBJGPU *, struct OBJHOSTENG *, NvBool, struct KernelChannel *, struct KernelChannel **); +}; + +#ifndef __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ +#define __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ +typedef struct OBJHOSTENG OBJHOSTENG; +#endif /* __NVOC_CLASS_OBJHOSTENG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHOSTENG +#define __nvoc_class_id_OBJHOSTENG 0xb356e7 +#endif /* __nvoc_class_id_OBJHOSTENG */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHOSTENG; + +#define __staticCast_OBJHOSTENG(pThis) \ + ((pThis)->__nvoc_pbase_OBJHOSTENG) + +#ifdef __nvoc_host_eng_h_disabled +#define __dynamicCast_OBJHOSTENG(pThis) ((OBJHOSTENG*)NULL) +#else //__nvoc_host_eng_h_disabled +#define __dynamicCast_OBJHOSTENG(pThis) \ + ((OBJHOSTENG*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHOSTENG))) +#endif //__nvoc_host_eng_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJHOSTENG(OBJHOSTENG**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJHOSTENG(OBJHOSTENG**, Dynamic*, NvU32); +#define __objCreate_OBJHOSTENG(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJHOSTENG((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define hostengHaltEngine(pGpu, pHosteng) hostengHaltEngine_DISPATCH(pGpu, pHosteng) +#define hostengHaltAndReset(pGpu, pHosteng, pRmTimeout) hostengHaltAndReset_DISPATCH(pGpu, pHosteng, pRmTimeout) +#define hostengReset(pGpu, pHosteng, bReload, pKernelChannel, ppCurrentKernelChannel) hostengReset_DISPATCH(pGpu, pHosteng, bReload, pKernelChannel, ppCurrentKernelChannel) +NV_STATUS hostengHaltEngine_IMPL(struct OBJGPU *pGpu, struct OBJHOSTENG *pHosteng); + +static inline NV_STATUS hostengHaltEngine_DISPATCH(struct OBJGPU *pGpu, struct OBJHOSTENG *pHosteng) { + return pHosteng->__hostengHaltEngine__(pGpu, pHosteng); +} + +NV_STATUS hostengHaltAndReset_IMPL(struct OBJGPU *pGpu, struct OBJHOSTENG *pHosteng, RMTIMEOUT *pRmTimeout); + +static inline NV_STATUS hostengHaltAndReset_DISPATCH(struct OBJGPU *pGpu, struct OBJHOSTENG *pHosteng, RMTIMEOUT *pRmTimeout) { + return pHosteng->__hostengHaltAndReset__(pGpu, pHosteng, pRmTimeout); +} + +NV_STATUS hostengReset_IMPL(struct OBJGPU *pGpu, struct OBJHOSTENG *pHosteng, NvBool bReload, struct KernelChannel *pKernelChannel, struct KernelChannel **ppCurrentKernelChannel); + +static inline NV_STATUS hostengReset_DISPATCH(struct OBJGPU *pGpu, struct OBJHOSTENG *pHosteng, NvBool bReload, struct KernelChannel *pKernelChannel, struct KernelChannel **ppCurrentKernelChannel) { + return pHosteng->__hostengReset__(pGpu, pHosteng, bReload, pKernelChannel, ppCurrentKernelChannel); +} + +#undef PRIVATE_FIELD + + +#endif // HOST_ENG_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HOST_ENG_NVOC_H_ diff --git a/src/nvidia/generated/g_hw_resources_nvoc.c b/src/nvidia/generated/g_hw_resources_nvoc.c new file mode 100644 index 000000000..cf1c4487f --- /dev/null +++ b/src/nvidia/generated/g_hw_resources_nvoc.c @@ -0,0 +1,324 @@ +#define NVOC_HW_RESOURCES_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_hw_resources_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x9a2a71 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryHwResources; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_MemoryHwResources(MemoryHwResources*); +void __nvoc_init_funcTable_MemoryHwResources(MemoryHwResources*); +NV_STATUS __nvoc_ctor_MemoryHwResources(MemoryHwResources*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_MemoryHwResources(MemoryHwResources*); +void __nvoc_dtor_MemoryHwResources(MemoryHwResources*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryHwResources; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryHwResources_MemoryHwResources = { + /*pClassDef=*/ &__nvoc_class_def_MemoryHwResources, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MemoryHwResources, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryHwResources_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryHwResources, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryHwResources_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryHwResources, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryHwResources_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryHwResources, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryHwResources_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryHwResources, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryHwResources_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryHwResources, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_MemoryHwResources = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_MemoryHwResources_MemoryHwResources, + &__nvoc_rtti_MemoryHwResources_Memory, + &__nvoc_rtti_MemoryHwResources_RmResource, + &__nvoc_rtti_MemoryHwResources_RmResourceCommon, + &__nvoc_rtti_MemoryHwResources_RsResource, + &__nvoc_rtti_MemoryHwResources_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryHwResources = +{ + /*classInfo=*/ { + /*size=*/ sizeof(MemoryHwResources), + /*classId=*/ classId(MemoryHwResources), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "MemoryHwResources", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MemoryHwResources, + /*pCastInfo=*/ &__nvoc_castinfo_MemoryHwResources, + /*pExportInfo=*/ &__nvoc_export_info_MemoryHwResources +}; + +static NvBool __nvoc_thunk_MemoryHwResources_resCanCopy(struct RsResource *pMemoryHwResources) { + return hwresCanCopy((struct MemoryHwResources *)(((unsigned char *)pMemoryHwResources) - __nvoc_rtti_MemoryHwResources_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_hwresCheckMemInterUnmap(struct MemoryHwResources *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryHwResources_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_hwresControl(struct MemoryHwResources *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryHwResources_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_hwresUnmap(struct MemoryHwResources *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryHwResources_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_hwresGetMemInterMapParams(struct MemoryHwResources *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryHwResources_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_hwresGetMemoryMappingDescriptor(struct MemoryHwResources *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryHwResources_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_hwresGetMapAddrSpace(struct MemoryHwResources *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryHwResources_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_hwresShareCallback(struct MemoryHwResources *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_hwresControlFilter(struct MemoryHwResources *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_hwresAddAdditionalDependants(struct RsClient *pClient, struct MemoryHwResources *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_hwresGetRefCount(struct MemoryHwResources *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_hwresMapTo(struct MemoryHwResources *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_hwresControl_Prologue(struct MemoryHwResources *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_hwresIsReady(struct MemoryHwResources *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryHwResources_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_hwresCheckCopyPermissions(struct MemoryHwResources *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryHwResources_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_hwresPreDestruct(struct MemoryHwResources *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_hwresUnmapFrom(struct MemoryHwResources *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_hwresControl_Epilogue(struct MemoryHwResources *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_hwresControlLookup(struct MemoryHwResources *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_hwresMap(struct MemoryHwResources *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryHwResources_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_hwresAccessCallback(struct MemoryHwResources *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryHwResources_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryHwResources = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_MemoryHwResources(MemoryHwResources *pThis) { + __nvoc_hwresDestruct(pThis); + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_MemoryHwResources(MemoryHwResources *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_MemoryHwResources(MemoryHwResources *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MemoryHwResources_fail_Memory; + __nvoc_init_dataField_MemoryHwResources(pThis); + + status = __nvoc_hwresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MemoryHwResources_fail__init; + goto __nvoc_ctor_MemoryHwResources_exit; // Success + +__nvoc_ctor_MemoryHwResources_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_MemoryHwResources_fail_Memory: +__nvoc_ctor_MemoryHwResources_exit: + + return status; +} + +static void __nvoc_init_funcTable_MemoryHwResources_1(MemoryHwResources *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__hwresCanCopy__ = &hwresCanCopy_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_MemoryHwResources_resCanCopy; + + pThis->__hwresCheckMemInterUnmap__ = &__nvoc_thunk_Memory_hwresCheckMemInterUnmap; + + pThis->__hwresControl__ = &__nvoc_thunk_Memory_hwresControl; + + pThis->__hwresUnmap__ = &__nvoc_thunk_Memory_hwresUnmap; + + pThis->__hwresGetMemInterMapParams__ = &__nvoc_thunk_Memory_hwresGetMemInterMapParams; + + pThis->__hwresGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_hwresGetMemoryMappingDescriptor; + + pThis->__hwresGetMapAddrSpace__ = &__nvoc_thunk_Memory_hwresGetMapAddrSpace; + + pThis->__hwresShareCallback__ = &__nvoc_thunk_RmResource_hwresShareCallback; + + pThis->__hwresControlFilter__ = &__nvoc_thunk_RsResource_hwresControlFilter; + + pThis->__hwresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_hwresAddAdditionalDependants; + + pThis->__hwresGetRefCount__ = &__nvoc_thunk_RsResource_hwresGetRefCount; + + pThis->__hwresMapTo__ = &__nvoc_thunk_RsResource_hwresMapTo; + + pThis->__hwresControl_Prologue__ = &__nvoc_thunk_RmResource_hwresControl_Prologue; + + pThis->__hwresIsReady__ = &__nvoc_thunk_Memory_hwresIsReady; + + pThis->__hwresCheckCopyPermissions__ = &__nvoc_thunk_Memory_hwresCheckCopyPermissions; + + pThis->__hwresPreDestruct__ = &__nvoc_thunk_RsResource_hwresPreDestruct; + + pThis->__hwresUnmapFrom__ = &__nvoc_thunk_RsResource_hwresUnmapFrom; + + pThis->__hwresControl_Epilogue__ = &__nvoc_thunk_RmResource_hwresControl_Epilogue; + + pThis->__hwresControlLookup__ = &__nvoc_thunk_RsResource_hwresControlLookup; + + pThis->__hwresMap__ = &__nvoc_thunk_Memory_hwresMap; + + pThis->__hwresAccessCallback__ = &__nvoc_thunk_RmResource_hwresAccessCallback; +} + +void __nvoc_init_funcTable_MemoryHwResources(MemoryHwResources *pThis) { + __nvoc_init_funcTable_MemoryHwResources_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_MemoryHwResources(MemoryHwResources *pThis) { + pThis->__nvoc_pbase_MemoryHwResources = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_MemoryHwResources(pThis); +} + +NV_STATUS __nvoc_objCreate_MemoryHwResources(MemoryHwResources **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + MemoryHwResources *pThis; + + pThis = portMemAllocNonPaged(sizeof(MemoryHwResources)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(MemoryHwResources)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MemoryHwResources); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_MemoryHwResources(pThis); + status = __nvoc_ctor_MemoryHwResources(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_MemoryHwResources_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_MemoryHwResources_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_MemoryHwResources(MemoryHwResources **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_MemoryHwResources(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_hw_resources_nvoc.h b/src/nvidia/generated/g_hw_resources_nvoc.h new file mode 100644 index 000000000..e5c0ca1da --- /dev/null +++ b/src/nvidia/generated/g_hw_resources_nvoc.h @@ -0,0 +1,226 @@ +#ifndef _G_HW_RESOURCES_NVOC_H_ +#define _G_HW_RESOURCES_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_hw_resources_nvoc.h" + +#ifndef _HW_RESOURCES_H_ +#define _HW_RESOURCES_H_ + +#include "mem_mgr/mem.h" + +/*! + * Allocator for normal virtual, video and system memory + */ +#ifdef NVOC_HW_RESOURCES_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct MemoryHwResources { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct MemoryHwResources *__nvoc_pbase_MemoryHwResources; + NvBool (*__hwresCanCopy__)(struct MemoryHwResources *); + NV_STATUS (*__hwresCheckMemInterUnmap__)(struct MemoryHwResources *, NvBool); + NV_STATUS (*__hwresControl__)(struct MemoryHwResources *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__hwresUnmap__)(struct MemoryHwResources *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__hwresGetMemInterMapParams__)(struct MemoryHwResources *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__hwresGetMemoryMappingDescriptor__)(struct MemoryHwResources *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__hwresGetMapAddrSpace__)(struct MemoryHwResources *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__hwresShareCallback__)(struct MemoryHwResources *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__hwresControlFilter__)(struct MemoryHwResources *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__hwresAddAdditionalDependants__)(struct RsClient *, struct MemoryHwResources *, RsResourceRef *); + NvU32 (*__hwresGetRefCount__)(struct MemoryHwResources *); + NV_STATUS (*__hwresMapTo__)(struct MemoryHwResources *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__hwresControl_Prologue__)(struct MemoryHwResources *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__hwresIsReady__)(struct MemoryHwResources *); + NV_STATUS (*__hwresCheckCopyPermissions__)(struct MemoryHwResources *, struct OBJGPU *, NvHandle); + void (*__hwresPreDestruct__)(struct MemoryHwResources *); + NV_STATUS (*__hwresUnmapFrom__)(struct MemoryHwResources *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__hwresControl_Epilogue__)(struct MemoryHwResources *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__hwresControlLookup__)(struct MemoryHwResources *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__hwresMap__)(struct MemoryHwResources *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__hwresAccessCallback__)(struct MemoryHwResources *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_MemoryHwResources_TYPEDEF__ +#define __NVOC_CLASS_MemoryHwResources_TYPEDEF__ +typedef struct MemoryHwResources MemoryHwResources; +#endif /* __NVOC_CLASS_MemoryHwResources_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryHwResources +#define __nvoc_class_id_MemoryHwResources 0x9a2a71 +#endif /* __nvoc_class_id_MemoryHwResources */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryHwResources; + +#define __staticCast_MemoryHwResources(pThis) \ + ((pThis)->__nvoc_pbase_MemoryHwResources) + +#ifdef __nvoc_hw_resources_h_disabled +#define __dynamicCast_MemoryHwResources(pThis) ((MemoryHwResources*)NULL) +#else //__nvoc_hw_resources_h_disabled +#define __dynamicCast_MemoryHwResources(pThis) \ + ((MemoryHwResources*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MemoryHwResources))) +#endif //__nvoc_hw_resources_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_MemoryHwResources(MemoryHwResources**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MemoryHwResources(MemoryHwResources**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_MemoryHwResources(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_MemoryHwResources((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define hwresCanCopy(pMemoryHwResources) hwresCanCopy_DISPATCH(pMemoryHwResources) +#define hwresCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) hwresCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define hwresControl(pMemory, pCallContext, pParams) hwresControl_DISPATCH(pMemory, pCallContext, pParams) +#define hwresUnmap(pMemory, pCallContext, pCpuMapping) hwresUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define hwresGetMemInterMapParams(pMemory, pParams) hwresGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define hwresGetMemoryMappingDescriptor(pMemory, ppMemDesc) hwresGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define hwresGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) hwresGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define hwresShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) hwresShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define hwresControlFilter(pResource, pCallContext, pParams) hwresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define hwresAddAdditionalDependants(pClient, pResource, pReference) hwresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define hwresGetRefCount(pResource) hwresGetRefCount_DISPATCH(pResource) +#define hwresMapTo(pResource, pParams) hwresMapTo_DISPATCH(pResource, pParams) +#define hwresControl_Prologue(pResource, pCallContext, pParams) hwresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define hwresIsReady(pMemory) hwresIsReady_DISPATCH(pMemory) +#define hwresCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) hwresCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define hwresPreDestruct(pResource) hwresPreDestruct_DISPATCH(pResource) +#define hwresUnmapFrom(pResource, pParams) hwresUnmapFrom_DISPATCH(pResource, pParams) +#define hwresControl_Epilogue(pResource, pCallContext, pParams) hwresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define hwresControlLookup(pResource, pParams, ppEntry) hwresControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define hwresMap(pMemory, pCallContext, pParams, pCpuMapping) hwresMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define hwresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) hwresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool hwresCanCopy_IMPL(struct MemoryHwResources *pMemoryHwResources); + +static inline NvBool hwresCanCopy_DISPATCH(struct MemoryHwResources *pMemoryHwResources) { + return pMemoryHwResources->__hwresCanCopy__(pMemoryHwResources); +} + +static inline NV_STATUS hwresCheckMemInterUnmap_DISPATCH(struct MemoryHwResources *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__hwresCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS hwresControl_DISPATCH(struct MemoryHwResources *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__hwresControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS hwresUnmap_DISPATCH(struct MemoryHwResources *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__hwresUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS hwresGetMemInterMapParams_DISPATCH(struct MemoryHwResources *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__hwresGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS hwresGetMemoryMappingDescriptor_DISPATCH(struct MemoryHwResources *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__hwresGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS hwresGetMapAddrSpace_DISPATCH(struct MemoryHwResources *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__hwresGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool hwresShareCallback_DISPATCH(struct MemoryHwResources *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__hwresShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS hwresControlFilter_DISPATCH(struct MemoryHwResources *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__hwresControlFilter__(pResource, pCallContext, pParams); +} + +static inline void hwresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct MemoryHwResources *pResource, RsResourceRef *pReference) { + pResource->__hwresAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 hwresGetRefCount_DISPATCH(struct MemoryHwResources *pResource) { + return pResource->__hwresGetRefCount__(pResource); +} + +static inline NV_STATUS hwresMapTo_DISPATCH(struct MemoryHwResources *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__hwresMapTo__(pResource, pParams); +} + +static inline NV_STATUS hwresControl_Prologue_DISPATCH(struct MemoryHwResources *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__hwresControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS hwresIsReady_DISPATCH(struct MemoryHwResources *pMemory) { + return pMemory->__hwresIsReady__(pMemory); +} + +static inline NV_STATUS hwresCheckCopyPermissions_DISPATCH(struct MemoryHwResources *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__hwresCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void hwresPreDestruct_DISPATCH(struct MemoryHwResources *pResource) { + pResource->__hwresPreDestruct__(pResource); +} + +static inline NV_STATUS hwresUnmapFrom_DISPATCH(struct MemoryHwResources *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__hwresUnmapFrom__(pResource, pParams); +} + +static inline void hwresControl_Epilogue_DISPATCH(struct MemoryHwResources *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__hwresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS hwresControlLookup_DISPATCH(struct MemoryHwResources *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__hwresControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS hwresMap_DISPATCH(struct MemoryHwResources *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__hwresMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool hwresAccessCallback_DISPATCH(struct MemoryHwResources *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__hwresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS hwresConstruct_IMPL(struct MemoryHwResources *arg_pMemoryHwResources, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_hwresConstruct(arg_pMemoryHwResources, arg_pCallContext, arg_pParams) hwresConstruct_IMPL(arg_pMemoryHwResources, arg_pCallContext, arg_pParams) +void hwresDestruct_IMPL(struct MemoryHwResources *pMemoryHwResources); +#define __nvoc_hwresDestruct(pMemoryHwResources) hwresDestruct_IMPL(pMemoryHwResources) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HW_RESOURCES_NVOC_H_ diff --git a/src/nvidia/generated/g_hypervisor_nvoc.h b/src/nvidia/generated/g_hypervisor_nvoc.h new file mode 100644 index 000000000..29f00a3ac --- /dev/null +++ b/src/nvidia/generated/g_hypervisor_nvoc.h @@ -0,0 +1,137 @@ +#ifndef _G_HYPERVISOR_NVOC_H_ +#define _G_HYPERVISOR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_hypervisor_nvoc.h" + +#ifndef HYPERVISOR_H +#define HYPERVISOR_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: hypervisor.h * +* Defines and structures used for the hypervisor object. * +\***************************************************************************/ + +#include "core/core.h" +#include "nvoc/utility.h" +#include "nv-hypervisor.h" +#include "mem_mgr/mem.h" + +typedef struct OBJHYPERVISOR *POBJHYPERVISOR; + +#ifndef __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +#define __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +typedef struct OBJHYPERVISOR OBJHYPERVISOR; +#endif /* __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHYPERVISOR +#define __nvoc_class_id_OBJHYPERVISOR 0x33c1ba +#endif /* __nvoc_class_id_OBJHYPERVISOR */ + + +typedef struct HOST_VGPU_DEVICE HOST_VGPU_DEVICE; + +#ifdef NVOC_HYPERVISOR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJHYPERVISOR { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJHYPERVISOR *__nvoc_pbase_OBJHYPERVISOR; + NvBool bDetected; + NvBool bIsHVMGuest; + HYPERVISOR_TYPE type; + NvBool bIsHypervHost; + NvBool bIsHypervVgpuSupported; +}; + +#ifndef __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +#define __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +typedef struct OBJHYPERVISOR OBJHYPERVISOR; +#endif /* __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHYPERVISOR +#define __nvoc_class_id_OBJHYPERVISOR 0x33c1ba +#endif /* __nvoc_class_id_OBJHYPERVISOR */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHYPERVISOR; + +#define __staticCast_OBJHYPERVISOR(pThis) \ + ((pThis)->__nvoc_pbase_OBJHYPERVISOR) + +#ifdef __nvoc_hypervisor_h_disabled +#define __dynamicCast_OBJHYPERVISOR(pThis) ((OBJHYPERVISOR*)NULL) +#else //__nvoc_hypervisor_h_disabled +#define __dynamicCast_OBJHYPERVISOR(pThis) \ + ((OBJHYPERVISOR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJHYPERVISOR))) +#endif //__nvoc_hypervisor_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJHYPERVISOR(OBJHYPERVISOR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJHYPERVISOR(OBJHYPERVISOR**, Dynamic*, NvU32); +#define __objCreate_OBJHYPERVISOR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJHYPERVISOR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +static inline NvBool hypervisorIsVgxHyper_491d52(void) { + return ((NvBool)(0 != 0)); +} + +#define hypervisorIsVgxHyper() hypervisorIsVgxHyper_491d52() +#define hypervisorIsVgxHyper_HAL() hypervisorIsVgxHyper() + +static inline NvBool hypervisorCheckForAdminAccess(NvHandle hClient, NvU32 rmCtrlId) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool hypervisorCheckForObjectAccess(NvHandle hClient) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool hypervisorCheckForGspOffloadAccess(POBJGPU pGpu, NvU32 rmCtrlId) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool hypervisorIsType(HYPERVISOR_TYPE hyperType) { + return ((NvBool)(0 != 0)); +} + +#undef PRIVATE_FIELD + + +#endif // HYPERVISOR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_HYPERVISOR_NVOC_H_ diff --git a/src/nvidia/generated/g_i2c_api_nvoc.c b/src/nvidia/generated/g_i2c_api_nvoc.c new file mode 100644 index 000000000..834684c15 --- /dev/null +++ b/src/nvidia/generated/g_i2c_api_nvoc.c @@ -0,0 +1,432 @@ +#define NVOC_I2C_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_i2c_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xceb8f6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_I2cApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_I2cApi(I2cApi*); +void __nvoc_init_funcTable_I2cApi(I2cApi*); +NV_STATUS __nvoc_ctor_I2cApi(I2cApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_I2cApi(I2cApi*); +void __nvoc_dtor_I2cApi(I2cApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_I2cApi; + +static const struct NVOC_RTTI __nvoc_rtti_I2cApi_I2cApi = { + /*pClassDef=*/ &__nvoc_class_def_I2cApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_I2cApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_I2cApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(I2cApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_I2cApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(I2cApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_I2cApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(I2cApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_I2cApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(I2cApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_I2cApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(I2cApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_I2cApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_I2cApi_I2cApi, + &__nvoc_rtti_I2cApi_GpuResource, + &__nvoc_rtti_I2cApi_RmResource, + &__nvoc_rtti_I2cApi_RmResourceCommon, + &__nvoc_rtti_I2cApi_RsResource, + &__nvoc_rtti_I2cApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_I2cApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(I2cApi), + /*classId=*/ classId(I2cApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "I2cApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_I2cApi, + /*pCastInfo=*/ &__nvoc_castinfo_I2cApi, + /*pExportInfo=*/ &__nvoc_export_info_I2cApi +}; + +static NvBool __nvoc_thunk_GpuResource_i2capiShareCallback(struct I2cApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_I2cApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_i2capiControl(struct I2cApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_I2cApi_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_i2capiUnmap(struct I2cApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_I2cApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_i2capiGetMemInterMapParams(struct I2cApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_I2cApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_i2capiGetMemoryMappingDescriptor(struct I2cApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_I2cApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_i2capiGetMapAddrSpace(struct I2cApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_I2cApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_i2capiGetInternalObjectHandle(struct I2cApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_I2cApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_i2capiControlFilter(struct I2cApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_i2capiAddAdditionalDependants(struct RsClient *pClient, struct I2cApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_i2capiGetRefCount(struct I2cApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_i2capiCheckMemInterUnmap(struct I2cApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_I2cApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_i2capiMapTo(struct I2cApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_i2capiControl_Prologue(struct I2cApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_i2capiGetRegBaseOffsetAndSize(struct I2cApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_I2cApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_i2capiCanCopy(struct I2cApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_i2capiInternalControlForward(struct I2cApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_I2cApi_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_i2capiPreDestruct(struct I2cApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_i2capiUnmapFrom(struct I2cApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_i2capiControl_Epilogue(struct I2cApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_i2capiControlLookup(struct I2cApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_i2capiMap(struct I2cApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_I2cApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_i2capiAccessCallback(struct I2cApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_I2cApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_I2cApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) i2capiCtrlCmdI2cGetPortInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x402c0101u, + /*paramSize=*/ sizeof(NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_I2cApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "i2capiCtrlCmdI2cGetPortInfo" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) i2capiCtrlCmdI2cIndexed_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x402c0102u, + /*paramSize=*/ sizeof(NV402C_CTRL_I2C_INDEXED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_I2cApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "i2capiCtrlCmdI2cIndexed" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) i2capiCtrlCmdI2cGetPortSpeed_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x402c0103u, + /*paramSize=*/ sizeof(NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_I2cApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "i2capiCtrlCmdI2cGetPortSpeed" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) i2capiCtrlCmdI2cTableGetDevInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x402c0104u, + /*paramSize=*/ sizeof(NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_I2cApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "i2capiCtrlCmdI2cTableGetDevInfo" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) i2capiCtrlCmdI2cTransaction_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x402c0105u, + /*paramSize=*/ sizeof(NV402C_CTRL_I2C_TRANSACTION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_I2cApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "i2capiCtrlCmdI2cTransaction" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_I2cApi = +{ + /*numEntries=*/ 5, + /*pExportEntries=*/ __nvoc_exported_method_def_I2cApi +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_I2cApi(I2cApi *pThis) { + __nvoc_i2capiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_I2cApi(I2cApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_I2cApi(I2cApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_I2cApi_fail_GpuResource; + __nvoc_init_dataField_I2cApi(pThis); + + status = __nvoc_i2capiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_I2cApi_fail__init; + goto __nvoc_ctor_I2cApi_exit; // Success + +__nvoc_ctor_I2cApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_I2cApi_fail_GpuResource: +__nvoc_ctor_I2cApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_I2cApi_1(I2cApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__i2capiCtrlCmdI2cGetPortInfo__ = &i2capiCtrlCmdI2cGetPortInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__i2capiCtrlCmdI2cIndexed__ = &i2capiCtrlCmdI2cIndexed_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__i2capiCtrlCmdI2cGetPortSpeed__ = &i2capiCtrlCmdI2cGetPortSpeed_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__i2capiCtrlCmdI2cTableGetDevInfo__ = &i2capiCtrlCmdI2cTableGetDevInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__i2capiCtrlCmdI2cTransaction__ = &i2capiCtrlCmdI2cTransaction_IMPL; +#endif + + pThis->__i2capiShareCallback__ = &__nvoc_thunk_GpuResource_i2capiShareCallback; + + pThis->__i2capiControl__ = &__nvoc_thunk_GpuResource_i2capiControl; + + pThis->__i2capiUnmap__ = &__nvoc_thunk_GpuResource_i2capiUnmap; + + pThis->__i2capiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_i2capiGetMemInterMapParams; + + pThis->__i2capiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_i2capiGetMemoryMappingDescriptor; + + pThis->__i2capiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_i2capiGetMapAddrSpace; + + pThis->__i2capiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_i2capiGetInternalObjectHandle; + + pThis->__i2capiControlFilter__ = &__nvoc_thunk_RsResource_i2capiControlFilter; + + pThis->__i2capiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_i2capiAddAdditionalDependants; + + pThis->__i2capiGetRefCount__ = &__nvoc_thunk_RsResource_i2capiGetRefCount; + + pThis->__i2capiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_i2capiCheckMemInterUnmap; + + pThis->__i2capiMapTo__ = &__nvoc_thunk_RsResource_i2capiMapTo; + + pThis->__i2capiControl_Prologue__ = &__nvoc_thunk_RmResource_i2capiControl_Prologue; + + pThis->__i2capiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_i2capiGetRegBaseOffsetAndSize; + + pThis->__i2capiCanCopy__ = &__nvoc_thunk_RsResource_i2capiCanCopy; + + pThis->__i2capiInternalControlForward__ = &__nvoc_thunk_GpuResource_i2capiInternalControlForward; + + pThis->__i2capiPreDestruct__ = &__nvoc_thunk_RsResource_i2capiPreDestruct; + + pThis->__i2capiUnmapFrom__ = &__nvoc_thunk_RsResource_i2capiUnmapFrom; + + pThis->__i2capiControl_Epilogue__ = &__nvoc_thunk_RmResource_i2capiControl_Epilogue; + + pThis->__i2capiControlLookup__ = &__nvoc_thunk_RsResource_i2capiControlLookup; + + pThis->__i2capiMap__ = &__nvoc_thunk_GpuResource_i2capiMap; + + pThis->__i2capiAccessCallback__ = &__nvoc_thunk_RmResource_i2capiAccessCallback; +} + +void __nvoc_init_funcTable_I2cApi(I2cApi *pThis) { + __nvoc_init_funcTable_I2cApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_I2cApi(I2cApi *pThis) { + pThis->__nvoc_pbase_I2cApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_I2cApi(pThis); +} + +NV_STATUS __nvoc_objCreate_I2cApi(I2cApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + I2cApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(I2cApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(I2cApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_I2cApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_I2cApi(pThis); + status = __nvoc_ctor_I2cApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_I2cApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_I2cApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_I2cApi(I2cApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_I2cApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_i2c_api_nvoc.h b/src/nvidia/generated/g_i2c_api_nvoc.h new file mode 100644 index 000000000..ebb1d1e3f --- /dev/null +++ b/src/nvidia/generated/g_i2c_api_nvoc.h @@ -0,0 +1,271 @@ +#ifndef _G_I2C_API_NVOC_H_ +#define _G_I2C_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_i2c_api_nvoc.h" + +#ifndef _I2CAPI_H_ +#define _I2CAPI_H_ + +#include "gpu/gpu_resource.h" + +#include "ctrl/ctrl402c.h" + +/*! + * RM internal class representing NV40_I2C (child of SubDevice) + */ +#ifdef NVOC_I2C_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct I2cApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct I2cApi *__nvoc_pbase_I2cApi; + NV_STATUS (*__i2capiCtrlCmdI2cGetPortInfo__)(struct I2cApi *, NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS *); + NV_STATUS (*__i2capiCtrlCmdI2cIndexed__)(struct I2cApi *, NV402C_CTRL_I2C_INDEXED_PARAMS *); + NV_STATUS (*__i2capiCtrlCmdI2cGetPortSpeed__)(struct I2cApi *, NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS *); + NV_STATUS (*__i2capiCtrlCmdI2cTableGetDevInfo__)(struct I2cApi *, NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS *); + NV_STATUS (*__i2capiCtrlCmdI2cTransaction__)(struct I2cApi *, NV402C_CTRL_I2C_TRANSACTION_PARAMS *); + NvBool (*__i2capiShareCallback__)(struct I2cApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__i2capiControl__)(struct I2cApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__i2capiUnmap__)(struct I2cApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__i2capiGetMemInterMapParams__)(struct I2cApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__i2capiGetMemoryMappingDescriptor__)(struct I2cApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__i2capiGetMapAddrSpace__)(struct I2cApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__i2capiGetInternalObjectHandle__)(struct I2cApi *); + NV_STATUS (*__i2capiControlFilter__)(struct I2cApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__i2capiAddAdditionalDependants__)(struct RsClient *, struct I2cApi *, RsResourceRef *); + NvU32 (*__i2capiGetRefCount__)(struct I2cApi *); + NV_STATUS (*__i2capiCheckMemInterUnmap__)(struct I2cApi *, NvBool); + NV_STATUS (*__i2capiMapTo__)(struct I2cApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__i2capiControl_Prologue__)(struct I2cApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__i2capiGetRegBaseOffsetAndSize__)(struct I2cApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__i2capiCanCopy__)(struct I2cApi *); + NV_STATUS (*__i2capiInternalControlForward__)(struct I2cApi *, NvU32, void *, NvU32); + void (*__i2capiPreDestruct__)(struct I2cApi *); + NV_STATUS (*__i2capiUnmapFrom__)(struct I2cApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__i2capiControl_Epilogue__)(struct I2cApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__i2capiControlLookup__)(struct I2cApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__i2capiMap__)(struct I2cApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__i2capiAccessCallback__)(struct I2cApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_I2cApi_TYPEDEF__ +#define __NVOC_CLASS_I2cApi_TYPEDEF__ +typedef struct I2cApi I2cApi; +#endif /* __NVOC_CLASS_I2cApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_I2cApi +#define __nvoc_class_id_I2cApi 0xceb8f6 +#endif /* __nvoc_class_id_I2cApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_I2cApi; + +#define __staticCast_I2cApi(pThis) \ + ((pThis)->__nvoc_pbase_I2cApi) + +#ifdef __nvoc_i2c_api_h_disabled +#define __dynamicCast_I2cApi(pThis) ((I2cApi*)NULL) +#else //__nvoc_i2c_api_h_disabled +#define __dynamicCast_I2cApi(pThis) \ + ((I2cApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(I2cApi))) +#endif //__nvoc_i2c_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_I2cApi(I2cApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_I2cApi(I2cApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_I2cApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_I2cApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define i2capiCtrlCmdI2cGetPortInfo(pI2cApi, pParams) i2capiCtrlCmdI2cGetPortInfo_DISPATCH(pI2cApi, pParams) +#define i2capiCtrlCmdI2cIndexed(pI2cApi, pParams) i2capiCtrlCmdI2cIndexed_DISPATCH(pI2cApi, pParams) +#define i2capiCtrlCmdI2cGetPortSpeed(pI2cApi, pParams) i2capiCtrlCmdI2cGetPortSpeed_DISPATCH(pI2cApi, pParams) +#define i2capiCtrlCmdI2cTableGetDevInfo(pI2cApi, pParams) i2capiCtrlCmdI2cTableGetDevInfo_DISPATCH(pI2cApi, pParams) +#define i2capiCtrlCmdI2cTransaction(pI2cApi, pParams) i2capiCtrlCmdI2cTransaction_DISPATCH(pI2cApi, pParams) +#define i2capiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) i2capiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define i2capiControl(pGpuResource, pCallContext, pParams) i2capiControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define i2capiUnmap(pGpuResource, pCallContext, pCpuMapping) i2capiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define i2capiGetMemInterMapParams(pRmResource, pParams) i2capiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define i2capiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) i2capiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define i2capiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) i2capiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define i2capiGetInternalObjectHandle(pGpuResource) i2capiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define i2capiControlFilter(pResource, pCallContext, pParams) i2capiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define i2capiAddAdditionalDependants(pClient, pResource, pReference) i2capiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define i2capiGetRefCount(pResource) i2capiGetRefCount_DISPATCH(pResource) +#define i2capiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) i2capiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define i2capiMapTo(pResource, pParams) i2capiMapTo_DISPATCH(pResource, pParams) +#define i2capiControl_Prologue(pResource, pCallContext, pParams) i2capiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define i2capiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) i2capiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define i2capiCanCopy(pResource) i2capiCanCopy_DISPATCH(pResource) +#define i2capiInternalControlForward(pGpuResource, command, pParams, size) i2capiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define i2capiPreDestruct(pResource) i2capiPreDestruct_DISPATCH(pResource) +#define i2capiUnmapFrom(pResource, pParams) i2capiUnmapFrom_DISPATCH(pResource, pParams) +#define i2capiControl_Epilogue(pResource, pCallContext, pParams) i2capiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define i2capiControlLookup(pResource, pParams, ppEntry) i2capiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define i2capiMap(pGpuResource, pCallContext, pParams, pCpuMapping) i2capiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define i2capiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) i2capiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS i2capiCtrlCmdI2cGetPortInfo_IMPL(struct I2cApi *pI2cApi, NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS *pParams); + +static inline NV_STATUS i2capiCtrlCmdI2cGetPortInfo_DISPATCH(struct I2cApi *pI2cApi, NV402C_CTRL_I2C_GET_PORT_INFO_PARAMS *pParams) { + return pI2cApi->__i2capiCtrlCmdI2cGetPortInfo__(pI2cApi, pParams); +} + +NV_STATUS i2capiCtrlCmdI2cIndexed_IMPL(struct I2cApi *pI2cApi, NV402C_CTRL_I2C_INDEXED_PARAMS *pParams); + +static inline NV_STATUS i2capiCtrlCmdI2cIndexed_DISPATCH(struct I2cApi *pI2cApi, NV402C_CTRL_I2C_INDEXED_PARAMS *pParams) { + return pI2cApi->__i2capiCtrlCmdI2cIndexed__(pI2cApi, pParams); +} + +NV_STATUS i2capiCtrlCmdI2cGetPortSpeed_IMPL(struct I2cApi *pI2cApi, NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS *pParams); + +static inline NV_STATUS i2capiCtrlCmdI2cGetPortSpeed_DISPATCH(struct I2cApi *pI2cApi, NV402C_CTRL_I2C_GET_PORT_SPEED_PARAMS *pParams) { + return pI2cApi->__i2capiCtrlCmdI2cGetPortSpeed__(pI2cApi, pParams); +} + +NV_STATUS i2capiCtrlCmdI2cTableGetDevInfo_IMPL(struct I2cApi *pI2cApi, NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS *pParams); + +static inline NV_STATUS i2capiCtrlCmdI2cTableGetDevInfo_DISPATCH(struct I2cApi *pI2cApi, NV402C_CTRL_I2C_TABLE_GET_DEV_INFO_PARAMS *pParams) { + return pI2cApi->__i2capiCtrlCmdI2cTableGetDevInfo__(pI2cApi, pParams); +} + +NV_STATUS i2capiCtrlCmdI2cTransaction_IMPL(struct I2cApi *pI2cApi, NV402C_CTRL_I2C_TRANSACTION_PARAMS *pParams); + +static inline NV_STATUS i2capiCtrlCmdI2cTransaction_DISPATCH(struct I2cApi *pI2cApi, NV402C_CTRL_I2C_TRANSACTION_PARAMS *pParams) { + return pI2cApi->__i2capiCtrlCmdI2cTransaction__(pI2cApi, pParams); +} + +static inline NvBool i2capiShareCallback_DISPATCH(struct I2cApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__i2capiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS i2capiControl_DISPATCH(struct I2cApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__i2capiControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS i2capiUnmap_DISPATCH(struct I2cApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__i2capiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS i2capiGetMemInterMapParams_DISPATCH(struct I2cApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__i2capiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS i2capiGetMemoryMappingDescriptor_DISPATCH(struct I2cApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__i2capiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS i2capiGetMapAddrSpace_DISPATCH(struct I2cApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__i2capiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle i2capiGetInternalObjectHandle_DISPATCH(struct I2cApi *pGpuResource) { + return pGpuResource->__i2capiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS i2capiControlFilter_DISPATCH(struct I2cApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__i2capiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void i2capiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct I2cApi *pResource, RsResourceRef *pReference) { + pResource->__i2capiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 i2capiGetRefCount_DISPATCH(struct I2cApi *pResource) { + return pResource->__i2capiGetRefCount__(pResource); +} + +static inline NV_STATUS i2capiCheckMemInterUnmap_DISPATCH(struct I2cApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__i2capiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS i2capiMapTo_DISPATCH(struct I2cApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__i2capiMapTo__(pResource, pParams); +} + +static inline NV_STATUS i2capiControl_Prologue_DISPATCH(struct I2cApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__i2capiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS i2capiGetRegBaseOffsetAndSize_DISPATCH(struct I2cApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__i2capiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool i2capiCanCopy_DISPATCH(struct I2cApi *pResource) { + return pResource->__i2capiCanCopy__(pResource); +} + +static inline NV_STATUS i2capiInternalControlForward_DISPATCH(struct I2cApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__i2capiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void i2capiPreDestruct_DISPATCH(struct I2cApi *pResource) { + pResource->__i2capiPreDestruct__(pResource); +} + +static inline NV_STATUS i2capiUnmapFrom_DISPATCH(struct I2cApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__i2capiUnmapFrom__(pResource, pParams); +} + +static inline void i2capiControl_Epilogue_DISPATCH(struct I2cApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__i2capiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS i2capiControlLookup_DISPATCH(struct I2cApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__i2capiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS i2capiMap_DISPATCH(struct I2cApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__i2capiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool i2capiAccessCallback_DISPATCH(struct I2cApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__i2capiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS i2capiConstruct_IMPL(struct I2cApi *arg_pI2cApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_i2capiConstruct(arg_pI2cApi, arg_pCallContext, arg_pParams) i2capiConstruct_IMPL(arg_pI2cApi, arg_pCallContext, arg_pParams) +void i2capiDestruct_IMPL(struct I2cApi *pI2cApi); +#define __nvoc_i2capiDestruct(pI2cApi) i2capiDestruct_IMPL(pI2cApi) +#undef PRIVATE_FIELD + +#endif // _I2CAPI_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_I2C_API_NVOC_H_ diff --git a/src/nvidia/generated/g_intr_nvoc.c b/src/nvidia/generated/g_intr_nvoc.c new file mode 100644 index 000000000..1e71b0622 --- /dev/null +++ b/src/nvidia/generated/g_intr_nvoc.c @@ -0,0 +1,571 @@ +#define NVOC_INTR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_intr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xc06e44 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Intr; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_Intr(Intr*, RmHalspecOwner* ); +void __nvoc_init_funcTable_Intr(Intr*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_Intr(Intr*, RmHalspecOwner* ); +void __nvoc_init_dataField_Intr(Intr*, RmHalspecOwner* ); +void __nvoc_dtor_Intr(Intr*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Intr; + +static const struct NVOC_RTTI __nvoc_rtti_Intr_Intr = { + /*pClassDef=*/ &__nvoc_class_def_Intr, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Intr, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Intr_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Intr, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Intr_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Intr, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Intr = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_Intr_Intr, + &__nvoc_rtti_Intr_OBJENGSTATE, + &__nvoc_rtti_Intr_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Intr = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Intr), + /*classId=*/ classId(Intr), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Intr", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Intr, + /*pCastInfo=*/ &__nvoc_castinfo_Intr, + /*pExportInfo=*/ &__nvoc_export_info_Intr +}; + +static NV_STATUS __nvoc_thunk_Intr_engstateConstructEngine(OBJGPU *pGpu, struct OBJENGSTATE *pIntr, ENGDESCRIPTOR arg0) { + return intrConstructEngine(pGpu, (struct Intr *)(((unsigned char *)pIntr) - __nvoc_rtti_Intr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_Intr_engstateStateInitUnlocked(OBJGPU *pGpu, struct OBJENGSTATE *pIntr) { + return intrStateInitUnlocked(pGpu, (struct Intr *)(((unsigned char *)pIntr) - __nvoc_rtti_Intr_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_Intr_engstateStateInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pIntr) { + return intrStateInitLocked(pGpu, (struct Intr *)(((unsigned char *)pIntr) - __nvoc_rtti_Intr_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_Intr_engstateStateDestroy(OBJGPU *pGpu, struct OBJENGSTATE *pIntr) { + intrStateDestroy(pGpu, (struct Intr *)(((unsigned char *)pIntr) - __nvoc_rtti_Intr_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_Intr_engstateStateLoad(OBJGPU *pGpu, struct OBJENGSTATE *pIntr, NvU32 arg0) { + return intrStateLoad(pGpu, (struct Intr *)(((unsigned char *)pIntr) - __nvoc_rtti_Intr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_Intr_engstateStateUnload(OBJGPU *pGpu, struct OBJENGSTATE *pIntr, NvU32 arg0) { + return intrStateUnload(pGpu, (struct Intr *)(((unsigned char *)pIntr) - __nvoc_rtti_Intr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrReconcileTunableState(POBJGPU pGpu, struct Intr *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrStatePreLoad(POBJGPU pGpu, struct Intr *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrStatePostUnload(POBJGPU pGpu, struct Intr *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrStatePreUnload(POBJGPU pGpu, struct Intr *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_intrInitMissing(POBJGPU pGpu, struct Intr *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrStatePreInitLocked(POBJGPU pGpu, struct Intr *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrStatePreInitUnlocked(POBJGPU pGpu, struct Intr *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrGetTunableState(POBJGPU pGpu, struct Intr *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrCompareTunableState(POBJGPU pGpu, struct Intr *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_intrFreeTunableState(POBJGPU pGpu, struct Intr *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrStatePostLoad(POBJGPU pGpu, struct Intr *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrAllocTunableState(POBJGPU pGpu, struct Intr *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_intrSetTunableState(POBJGPU pGpu, struct Intr *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_intrIsPresent(POBJGPU pGpu, struct Intr *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_Intr_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Intr = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_Intr(Intr *pThis) { + __nvoc_intrDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Intr(Intr *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_INTR_HOST_DRIVEN_ENGINES_REMOVED_FROM_PMC + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_INTR_HOST_DRIVEN_ENGINES_REMOVED_FROM_PMC, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_INTR_HOST_DRIVEN_ENGINES_REMOVED_FROM_PMC, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_INTR_READ_ONLY_EVEN_NUMBERED_INTR_LEAF_REGS + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->setProperty(pThis, PDB_PROP_INTR_READ_ONLY_EVEN_NUMBERED_INTR_LEAF_REGS, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_INTR_READ_ONLY_EVEN_NUMBERED_INTR_LEAF_REGS, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_INTR_ENUMERATIONS_ON_ENGINE_RESET + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_INTR_ENUMERATIONS_ON_ENGINE_RESET, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_INTR_ENUMERATIONS_ON_ENGINE_RESET, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_INTR_SIMPLIFIED_VBLANK_HANDLING_FOR_CTRL_TREE + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_INTR_SIMPLIFIED_VBLANK_HANDLING_FOR_CTRL_TREE, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_INTR_SIMPLIFIED_VBLANK_HANDLING_FOR_CTRL_TREE, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_INTR_MASK_SUPPORTED + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_INTR_MASK_SUPPORTED, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_INTR_MASK_SUPPORTED, ((NvBool)(0 != 0))); + } + + // Hal field -- bDefaultNonstallNotify + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bDefaultNonstallNotify = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bDefaultNonstallNotify = ((NvBool)(0 != 0)); + } + + pThis->bTablesPopulated = ((NvBool)(0 != 0)); + + pThis->numPhysicalEntries = 0; + + pThis->numKernelEntries = 0; +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_Intr(Intr *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_Intr_fail_OBJENGSTATE; + __nvoc_init_dataField_Intr(pThis, pRmhalspecowner); + goto __nvoc_ctor_Intr_exit; // Success + +__nvoc_ctor_Intr_fail_OBJENGSTATE: +__nvoc_ctor_Intr_exit: + + return status; +} + +static void __nvoc_init_funcTable_Intr_1(Intr *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__intrConstructEngine__ = &intrConstructEngine_IMPL; + + pThis->__intrStateInitUnlocked__ = &intrStateInitUnlocked_IMPL; + + pThis->__intrStateInitLocked__ = &intrStateInitLocked_IMPL; + + pThis->__intrStateDestroy__ = &intrStateDestroy_IMPL; + + // Hal function -- intrDecodeStallIntrEn + if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__intrDecodeStallIntrEn__ = &intrDecodeStallIntrEn_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrDecodeStallIntrEn__ = &intrDecodeStallIntrEn_4a4dee; + } + + // Hal function -- intrGetNonStallBaseVector + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__intrGetNonStallBaseVector__ = &intrGetNonStallBaseVector_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrGetNonStallBaseVector__ = &intrGetNonStallBaseVector_c067f9; + } + else if (0) + { + } + + // Hal function -- intrGetUvmSharedLeafEnDisableMask + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__intrGetUvmSharedLeafEnDisableMask__ = &intrGetUvmSharedLeafEnDisableMask_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrGetUvmSharedLeafEnDisableMask__ = &intrGetUvmSharedLeafEnDisableMask_GA100; + } + else if (0) + { + } + + // Hal function -- intrSetDisplayInterruptEnable + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrSetDisplayInterruptEnable__ = &intrSetDisplayInterruptEnable_TU102; + } + // default + else + { + pThis->__intrSetDisplayInterruptEnable__ = &intrSetDisplayInterruptEnable_b3696a; + } + + // Hal function -- intrReadRegTopEnSet + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__intrReadRegTopEnSet__ = &intrReadRegTopEnSet_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrReadRegTopEnSet__ = &intrReadRegTopEnSet_GA102; + } + else if (0) + { + } + + // Hal function -- intrWriteRegTopEnSet + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__intrWriteRegTopEnSet__ = &intrWriteRegTopEnSet_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrWriteRegTopEnSet__ = &intrWriteRegTopEnSet_GA102; + } + else if (0) + { + } + + // Hal function -- intrWriteRegTopEnClear + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__intrWriteRegTopEnClear__ = &intrWriteRegTopEnClear_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrWriteRegTopEnClear__ = &intrWriteRegTopEnClear_GA102; + } + else if (0) + { + } + + // Hal function -- intrSanityCheckEngineIntrStallVector + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrSanityCheckEngineIntrStallVector__ = &intrSanityCheckEngineIntrStallVector_GA100; + } + else if (0) + { + } + // default + else + { + pThis->__intrSanityCheckEngineIntrStallVector__ = &intrSanityCheckEngineIntrStallVector_b3696a; + } + + // Hal function -- intrSanityCheckEngineIntrNotificationVector + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrSanityCheckEngineIntrNotificationVector__ = &intrSanityCheckEngineIntrNotificationVector_GA100; + } + else if (0) + { + } + // default + else + { + pThis->__intrSanityCheckEngineIntrNotificationVector__ = &intrSanityCheckEngineIntrNotificationVector_b3696a; + } + + // Hal function -- intrStateLoad + if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrStateLoad__ = &intrStateLoad_TU102; + } + else if (0) + { + } + + // Hal function -- intrStateUnload + if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrStateUnload__ = &intrStateUnload_TU102; + } + else if (0) + { + } + + // Hal function -- intrSetIntrMask + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__intrSetIntrMask__ = &intrSetIntrMask_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrSetIntrMask__ = &intrSetIntrMask_46f6a7; + } + + // Hal function -- intrSetIntrEnInHw + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__intrSetIntrEnInHw__ = &intrSetIntrEnInHw_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrSetIntrEnInHw__ = &intrSetIntrEnInHw_d44104; + } + + // Hal function -- intrGetIntrEnFromHw + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__intrGetIntrEnFromHw__ = &intrGetIntrEnFromHw_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__intrGetIntrEnFromHw__ = &intrGetIntrEnFromHw_b2b553; + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_Intr_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitUnlocked__ = &__nvoc_thunk_Intr_engstateStateInitUnlocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_Intr_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_Intr_engstateStateDestroy; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_Intr_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_Intr_engstateStateUnload; + + pThis->__intrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_intrReconcileTunableState; + + pThis->__intrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_intrStatePreLoad; + + pThis->__intrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_intrStatePostUnload; + + pThis->__intrStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_intrStatePreUnload; + + pThis->__intrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_intrInitMissing; + + pThis->__intrStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_intrStatePreInitLocked; + + pThis->__intrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_intrStatePreInitUnlocked; + + pThis->__intrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_intrGetTunableState; + + pThis->__intrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_intrCompareTunableState; + + pThis->__intrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_intrFreeTunableState; + + pThis->__intrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_intrStatePostLoad; + + pThis->__intrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_intrAllocTunableState; + + pThis->__intrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_intrSetTunableState; + + pThis->__intrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_intrIsPresent; +} + +void __nvoc_init_funcTable_Intr(Intr *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_Intr_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_Intr(Intr *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_Intr = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_Intr(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_Intr(Intr **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + Intr *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(Intr)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Intr)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Intr); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_Intr(pThis, pRmhalspecowner); + status = __nvoc_ctor_Intr(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_Intr_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Intr_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Intr(Intr **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_Intr(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_intr_nvoc.h b/src/nvidia/generated/g_intr_nvoc.h new file mode 100644 index 000000000..312cdfb59 --- /dev/null +++ b/src/nvidia/generated/g_intr_nvoc.h @@ -0,0 +1,1886 @@ +#ifndef _G_INTR_NVOC_H_ +#define _G_INTR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_intr_nvoc.h" + +#ifndef INTR_H +#define INTR_H + +#include "gpu/gpu.h" +#include "gpu/eng_state.h" +#include "kernel/gpu/intrable/intrable.h" +#include "nvoc/utility.h" +#include "utils/nvbitvector.h" +#include "dev_ctrl_defines.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "kernel/gpu/intr/intr_service.h" + +// +// Interrupt Type +// +// Abstraction of the disabled/software/hardware enumeration in NV_PMC_INTR_EN_0_INTA +// !!! This enumeration must exactly match NV_PMC_INTR_EN_0_INTA !!! +// + +#define INTERRUPT_TYPE_DISABLED 0 +#define INTERRUPT_TYPE_HARDWARE 1 +#define INTERRUPT_TYPE_SOFTWARE 2 +#define INTERRUPT_TYPE_MULTI 3 +#define INTERRUPT_TYPE_MAX INTERRUPT_TYPE_MULTI + +#define INTERRUPT_MASK_DISABLED 0x00000000 +#define INTERRUPT_MASK_HARDWARE 0x7fffffff +#define INTERRUPT_MASK_SOFTWARE 0x80000000 +#define INTERRUPT_MASK_ENABLED 0xffffffff + +/** + * @brief Each entry corresponds to a top level interrupt + */ +typedef struct +{ + /** MC_ENGINE_IDX* value */ + NvU16 mcEngine; + /** Bit in top level PMC interrupt registers */ + NvU32 pmcIntrMask; + /** Interrupt vector in CTRL interrupt tree (Turing+). For non-host driven + * engines, this is their single interrupt vector at top level; while for + * host driven engines, this is their stalling interrupt vector + */ + NvU32 intrVector; + /** Nonstalling interrupt vector in CTRL interrupt tree (Turing+). Only + * valid for host driven engines. NV_INTR_VECTOR_INVALID signifies + * unavailable + */ + NvU32 intrVectorNonStall; + /** Boolean set to NV_TRUE if Nonstalling interrupt is disabled in CTRL + * interrupt tree (Turing+). This may change to Enum in future. We are + * moving towards the direction where GSP-RM is fully aware of MC interrupt + * table and it provisions a subset of entries to CPU RM. + */ + NvBool bDisableNonStall; +} INTR_TABLE_ENTRY; + +#define INTR_TABLE_MAX_INTRS_PER_ENTRY 4 + +// +// The max number of interrupts we can fit in the dynamically populated, +// but statically sized, interrupt table. +// +#define INTR_TABLE_MAX_STATIC_PHYSICAL_INTRS 17 +#define INTR_TABLE_MAX_STATIC_KERNEL_INTRS 17 + +#define POPULATE_INTR_TABLE(pTable, numEntries, localMcEngineIdxs, localIntrVectors, localCount, localMax) \ +NV_ASSERT(numEntries + localCount <= localMax); \ +for (i = 0; i < localCount; i++) \ +{ \ + (pTable)[(numEntries)].mcEngine = (localMcEngineIdxs)[i]; \ + (pTable)[(numEntries)].pmcIntrMask = NV_PMC_INTR_INVALID_MASK; \ + (pTable)[(numEntries)].intrVector = (intrVectors)[i]; \ + (pTable)[(numEntries)].intrVectorNonStall = NV_INTR_VECTOR_INVALID; \ + numEntries++; \ +} + +// Default value for intrStuckThreshold +#define INTR_STUCK_THRESHOLD 1000 + +#define INTR_TABLE_INIT_KERNEL (1 << 0) +#define INTR_TABLE_INIT_PHYSICAL (1 << 1) + +/** + * @brief This enum specifies the type of DPC node + * INTERRUPT_BASED_DPC: DPC queued for an interrupt source + * SPECIAL_DPC : DPC queued within processing of another interrupt + * source + * + * Currently only used on Fermi+. + */ +typedef enum +{ + INTERRUPT_BASED_DPC=0, + SPECIAL_DPC +} DPCTYPE; + +/** + * @brief This is a structure for a node on the DPC Queue + * dpctype: Type of DPC for processing + * dpcdata: Data required for dpc processing + * This union will contain dpctype specific data + * pNext : Pointer to the next DPC node + * + * Currently only used on Fermi+. + */ +typedef struct _DPCNODE +{ + DPCTYPE dpctype; + union _dpcdata + { + MC_ENGINE_BITVECTOR pendingEngines; + } dpcdata; + + struct _DPCNODE *pNext; +} DPCNODE; + +/** + * @brief This is a structure for the DPC Queue + * numEntries: Number of entries currently on DPC queue (debugging purpose) + * pFront : Front pointer for the queue + * pRear : Rear pointer for the queue + * + * Currently only used on Fermi+. + */ +typedef struct +{ + NvU32 numEntries; + DPCNODE *pFront; + DPCNODE *pRear; +} DPCQUEUE; + +// Data related to PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING +typedef struct +{ + NvU32 flags; + NvU32 cached; // Pascal+, to implement intr mask in SW. + MC_ENGINE_BITVECTOR engMaskUnblocked; + MC_ENGINE_BITVECTOR engMaskOrig; + MC_ENGINE_BITVECTOR engMaskIntrsSeen; + MC_ENGINE_BITVECTOR engMaskIntrsDisabled; +} INTR_MASK; + +// +// interrupt mask information used for lazy interrupt disable and interrupt +// masking for locking. +// +typedef struct +{ + NvU32 intrEnable; + MC_ENGINE_BITVECTOR intrMask; +} INTR_MASK_CTX; + +// +// IntrMask Locking Flag Defines +// +#define INTR_MASK_FLAGS_ISR_SKIP_MASK_UPDATE NVBIT(0) + +#ifdef NVOC_INTR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Intr { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct Intr *__nvoc_pbase_Intr; + NV_STATUS (*__intrConstructEngine__)(OBJGPU *, struct Intr *, ENGDESCRIPTOR); + NV_STATUS (*__intrStateInitUnlocked__)(OBJGPU *, struct Intr *); + NV_STATUS (*__intrStateInitLocked__)(OBJGPU *, struct Intr *); + void (*__intrStateDestroy__)(OBJGPU *, struct Intr *); + NvU32 (*__intrDecodeStallIntrEn__)(OBJGPU *, struct Intr *, NvU32); + NvU32 (*__intrGetNonStallBaseVector__)(OBJGPU *, struct Intr *); + NvU64 (*__intrGetUvmSharedLeafEnDisableMask__)(OBJGPU *, struct Intr *); + void (*__intrSetDisplayInterruptEnable__)(OBJGPU *, struct Intr *, NvBool, struct THREAD_STATE_NODE *); + NvU32 (*__intrReadRegTopEnSet__)(OBJGPU *, struct Intr *, NvU32, struct THREAD_STATE_NODE *); + void (*__intrWriteRegTopEnSet__)(OBJGPU *, struct Intr *, NvU32, NvU32, struct THREAD_STATE_NODE *); + void (*__intrWriteRegTopEnClear__)(OBJGPU *, struct Intr *, NvU32, NvU32, struct THREAD_STATE_NODE *); + void (*__intrSanityCheckEngineIntrStallVector__)(OBJGPU *, struct Intr *, NvU32, NvU16); + void (*__intrSanityCheckEngineIntrNotificationVector__)(OBJGPU *, struct Intr *, NvU32, NvU16); + NV_STATUS (*__intrStateLoad__)(OBJGPU *, struct Intr *, NvU32); + NV_STATUS (*__intrStateUnload__)(OBJGPU *, struct Intr *, NvU32); + NV_STATUS (*__intrSetIntrMask__)(OBJGPU *, struct Intr *, union MC_ENGINE_BITVECTOR *, struct THREAD_STATE_NODE *); + void (*__intrSetIntrEnInHw__)(OBJGPU *, struct Intr *, NvU32, struct THREAD_STATE_NODE *); + NvU32 (*__intrGetIntrEnFromHw__)(OBJGPU *, struct Intr *, struct THREAD_STATE_NODE *); + NV_STATUS (*__intrReconcileTunableState__)(POBJGPU, struct Intr *, void *); + NV_STATUS (*__intrStatePreLoad__)(POBJGPU, struct Intr *, NvU32); + NV_STATUS (*__intrStatePostUnload__)(POBJGPU, struct Intr *, NvU32); + NV_STATUS (*__intrStatePreUnload__)(POBJGPU, struct Intr *, NvU32); + void (*__intrInitMissing__)(POBJGPU, struct Intr *); + NV_STATUS (*__intrStatePreInitLocked__)(POBJGPU, struct Intr *); + NV_STATUS (*__intrStatePreInitUnlocked__)(POBJGPU, struct Intr *); + NV_STATUS (*__intrGetTunableState__)(POBJGPU, struct Intr *, void *); + NV_STATUS (*__intrCompareTunableState__)(POBJGPU, struct Intr *, void *, void *); + void (*__intrFreeTunableState__)(POBJGPU, struct Intr *, void *); + NV_STATUS (*__intrStatePostLoad__)(POBJGPU, struct Intr *, NvU32); + NV_STATUS (*__intrAllocTunableState__)(POBJGPU, struct Intr *, void **); + NV_STATUS (*__intrSetTunableState__)(POBJGPU, struct Intr *, void *); + NvBool (*__intrIsPresent__)(POBJGPU, struct Intr *); + NvBool PDB_PROP_INTR_ENABLE_DETAILED_LOGS; + NvBool PDB_PROP_INTR_HOST_DRIVEN_ENGINES_REMOVED_FROM_PMC; + NvBool PDB_PROP_INTR_READ_ONLY_EVEN_NUMBERED_INTR_LEAF_REGS; + NvBool PDB_PROP_INTR_ENUMERATIONS_ON_ENGINE_RESET; + NvBool PDB_PROP_INTR_SIMPLIFIED_VBLANK_HANDLING_FOR_CTRL_TREE; + NvBool PDB_PROP_INTR_DISABLE_PER_INTR_DPC_QUEUEING; + NvBool PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING; + NvBool PDB_PROP_INTR_MASK_SUPPORTED; + NvU32 nonStallPmcIntrMask; + NvU64 uvmSharedCpuLeafEn; + NvU64 uvmSharedCpuLeafEnDisableMask; + NvU32 replayableFaultIntrVector; + NvU32 accessCntrIntrVector; + NvU32 displayIntrVector; + NvU32 cpuTopEnMask; + IntrServiceRecord intrServiceTable[155]; + NvBool bDefaultNonstallNotify; + NvU32 intrTableSz; + INTR_TABLE_ENTRY *pIntrTable; + INTR_TABLE_ENTRY pStaticPhysicalTable[17]; + INTR_TABLE_ENTRY pStaticKernelTable[17]; + NvBool bDpcStarted; + union MC_ENGINE_BITVECTOR pmcIntrPending; + DPCQUEUE dpcQueue; + NvU32 intrStuckThreshold; + INTR_MASK intrMask; + union MC_ENGINE_BITVECTOR helperEngineMask; + NvU32 intrEn0; + NvU32 intrCachedEn0; + NvU32 intrCachedEnSet; + NvU32 intrCachedEnClear; + NvU32 intrEn0Orig; + NvBool halIntrEnabled; + NvU32 saveIntrEn0; + NvBool bTablesPopulated; + NvU32 numPhysicalEntries; + NvU32 numKernelEntries; +}; + +#ifndef __NVOC_CLASS_Intr_TYPEDEF__ +#define __NVOC_CLASS_Intr_TYPEDEF__ +typedef struct Intr Intr; +#endif /* __NVOC_CLASS_Intr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Intr +#define __nvoc_class_id_Intr 0xc06e44 +#endif /* __nvoc_class_id_Intr */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Intr; + +#define __staticCast_Intr(pThis) \ + ((pThis)->__nvoc_pbase_Intr) + +#ifdef __nvoc_intr_h_disabled +#define __dynamicCast_Intr(pThis) ((Intr*)NULL) +#else //__nvoc_intr_h_disabled +#define __dynamicCast_Intr(pThis) \ + ((Intr*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Intr))) +#endif //__nvoc_intr_h_disabled + +#define PDB_PROP_INTR_HOST_DRIVEN_ENGINES_REMOVED_FROM_PMC_BASE_CAST +#define PDB_PROP_INTR_HOST_DRIVEN_ENGINES_REMOVED_FROM_PMC_BASE_NAME PDB_PROP_INTR_HOST_DRIVEN_ENGINES_REMOVED_FROM_PMC +#define PDB_PROP_INTR_SIMPLIFIED_VBLANK_HANDLING_FOR_CTRL_TREE_BASE_CAST +#define PDB_PROP_INTR_SIMPLIFIED_VBLANK_HANDLING_FOR_CTRL_TREE_BASE_NAME PDB_PROP_INTR_SIMPLIFIED_VBLANK_HANDLING_FOR_CTRL_TREE +#define PDB_PROP_INTR_MASK_SUPPORTED_BASE_CAST +#define PDB_PROP_INTR_MASK_SUPPORTED_BASE_NAME PDB_PROP_INTR_MASK_SUPPORTED +#define PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING_BASE_CAST +#define PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING_BASE_NAME PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING +#define PDB_PROP_INTR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_INTR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_INTR_ENABLE_DETAILED_LOGS_BASE_CAST +#define PDB_PROP_INTR_ENABLE_DETAILED_LOGS_BASE_NAME PDB_PROP_INTR_ENABLE_DETAILED_LOGS +#define PDB_PROP_INTR_ENUMERATIONS_ON_ENGINE_RESET_BASE_CAST +#define PDB_PROP_INTR_ENUMERATIONS_ON_ENGINE_RESET_BASE_NAME PDB_PROP_INTR_ENUMERATIONS_ON_ENGINE_RESET +#define PDB_PROP_INTR_READ_ONLY_EVEN_NUMBERED_INTR_LEAF_REGS_BASE_CAST +#define PDB_PROP_INTR_READ_ONLY_EVEN_NUMBERED_INTR_LEAF_REGS_BASE_NAME PDB_PROP_INTR_READ_ONLY_EVEN_NUMBERED_INTR_LEAF_REGS +#define PDB_PROP_INTR_DISABLE_PER_INTR_DPC_QUEUEING_BASE_CAST +#define PDB_PROP_INTR_DISABLE_PER_INTR_DPC_QUEUEING_BASE_NAME PDB_PROP_INTR_DISABLE_PER_INTR_DPC_QUEUEING + +NV_STATUS __nvoc_objCreateDynamic_Intr(Intr**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Intr(Intr**, Dynamic*, NvU32); +#define __objCreate_Intr(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_Intr((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define intrConstructEngine(pGpu, pIntr, arg0) intrConstructEngine_DISPATCH(pGpu, pIntr, arg0) +#define intrStateInitUnlocked(pGpu, pIntr) intrStateInitUnlocked_DISPATCH(pGpu, pIntr) +#define intrStateInitLocked(pGpu, pIntr) intrStateInitLocked_DISPATCH(pGpu, pIntr) +#define intrStateDestroy(pGpu, pIntr) intrStateDestroy_DISPATCH(pGpu, pIntr) +#define intrDecodeStallIntrEn(pGpu, pIntr, arg0) intrDecodeStallIntrEn_DISPATCH(pGpu, pIntr, arg0) +#define intrDecodeStallIntrEn_HAL(pGpu, pIntr, arg0) intrDecodeStallIntrEn_DISPATCH(pGpu, pIntr, arg0) +#define intrGetNonStallBaseVector(pGpu, pIntr) intrGetNonStallBaseVector_DISPATCH(pGpu, pIntr) +#define intrGetNonStallBaseVector_HAL(pGpu, pIntr) intrGetNonStallBaseVector_DISPATCH(pGpu, pIntr) +#define intrGetUvmSharedLeafEnDisableMask(pGpu, pIntr) intrGetUvmSharedLeafEnDisableMask_DISPATCH(pGpu, pIntr) +#define intrGetUvmSharedLeafEnDisableMask_HAL(pGpu, pIntr) intrGetUvmSharedLeafEnDisableMask_DISPATCH(pGpu, pIntr) +#define intrSetDisplayInterruptEnable(pGpu, pIntr, bEnable, pThreadState) intrSetDisplayInterruptEnable_DISPATCH(pGpu, pIntr, bEnable, pThreadState) +#define intrSetDisplayInterruptEnable_HAL(pGpu, pIntr, bEnable, pThreadState) intrSetDisplayInterruptEnable_DISPATCH(pGpu, pIntr, bEnable, pThreadState) +#define intrReadRegTopEnSet(pGpu, pIntr, arg0, arg1) intrReadRegTopEnSet_DISPATCH(pGpu, pIntr, arg0, arg1) +#define intrReadRegTopEnSet_HAL(pGpu, pIntr, arg0, arg1) intrReadRegTopEnSet_DISPATCH(pGpu, pIntr, arg0, arg1) +#define intrWriteRegTopEnSet(pGpu, pIntr, arg0, arg1, arg2) intrWriteRegTopEnSet_DISPATCH(pGpu, pIntr, arg0, arg1, arg2) +#define intrWriteRegTopEnSet_HAL(pGpu, pIntr, arg0, arg1, arg2) intrWriteRegTopEnSet_DISPATCH(pGpu, pIntr, arg0, arg1, arg2) +#define intrWriteRegTopEnClear(pGpu, pIntr, arg0, arg1, arg2) intrWriteRegTopEnClear_DISPATCH(pGpu, pIntr, arg0, arg1, arg2) +#define intrWriteRegTopEnClear_HAL(pGpu, pIntr, arg0, arg1, arg2) intrWriteRegTopEnClear_DISPATCH(pGpu, pIntr, arg0, arg1, arg2) +#define intrSanityCheckEngineIntrStallVector(pGpu, pIntr, vector, mcEngine) intrSanityCheckEngineIntrStallVector_DISPATCH(pGpu, pIntr, vector, mcEngine) +#define intrSanityCheckEngineIntrStallVector_HAL(pGpu, pIntr, vector, mcEngine) intrSanityCheckEngineIntrStallVector_DISPATCH(pGpu, pIntr, vector, mcEngine) +#define intrSanityCheckEngineIntrNotificationVector(pGpu, pIntr, vector, mcEngine) intrSanityCheckEngineIntrNotificationVector_DISPATCH(pGpu, pIntr, vector, mcEngine) +#define intrSanityCheckEngineIntrNotificationVector_HAL(pGpu, pIntr, vector, mcEngine) intrSanityCheckEngineIntrNotificationVector_DISPATCH(pGpu, pIntr, vector, mcEngine) +#define intrStateLoad(pGpu, pIntr, arg0) intrStateLoad_DISPATCH(pGpu, pIntr, arg0) +#define intrStateLoad_HAL(pGpu, pIntr, arg0) intrStateLoad_DISPATCH(pGpu, pIntr, arg0) +#define intrStateUnload(pGpu, pIntr, arg0) intrStateUnload_DISPATCH(pGpu, pIntr, arg0) +#define intrStateUnload_HAL(pGpu, pIntr, arg0) intrStateUnload_DISPATCH(pGpu, pIntr, arg0) +#define intrSetIntrMask(pGpu, pIntr, arg0, arg1) intrSetIntrMask_DISPATCH(pGpu, pIntr, arg0, arg1) +#define intrSetIntrMask_HAL(pGpu, pIntr, arg0, arg1) intrSetIntrMask_DISPATCH(pGpu, pIntr, arg0, arg1) +#define intrSetIntrEnInHw(pGpu, pIntr, arg0, arg1) intrSetIntrEnInHw_DISPATCH(pGpu, pIntr, arg0, arg1) +#define intrSetIntrEnInHw_HAL(pGpu, pIntr, arg0, arg1) intrSetIntrEnInHw_DISPATCH(pGpu, pIntr, arg0, arg1) +#define intrGetIntrEnFromHw(pGpu, pIntr, arg0) intrGetIntrEnFromHw_DISPATCH(pGpu, pIntr, arg0) +#define intrGetIntrEnFromHw_HAL(pGpu, pIntr, arg0) intrGetIntrEnFromHw_DISPATCH(pGpu, pIntr, arg0) +#define intrReconcileTunableState(pGpu, pEngstate, pTunableState) intrReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define intrStatePreLoad(pGpu, pEngstate, arg0) intrStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define intrStatePostUnload(pGpu, pEngstate, arg0) intrStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define intrStatePreUnload(pGpu, pEngstate, arg0) intrStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define intrInitMissing(pGpu, pEngstate) intrInitMissing_DISPATCH(pGpu, pEngstate) +#define intrStatePreInitLocked(pGpu, pEngstate) intrStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define intrStatePreInitUnlocked(pGpu, pEngstate) intrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define intrGetTunableState(pGpu, pEngstate, pTunableState) intrGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define intrCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) intrCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define intrFreeTunableState(pGpu, pEngstate, pTunableState) intrFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define intrStatePostLoad(pGpu, pEngstate, arg0) intrStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define intrAllocTunableState(pGpu, pEngstate, ppTunableState) intrAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define intrSetTunableState(pGpu, pEngstate, pTunableState) intrSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define intrIsPresent(pGpu, pEngstate) intrIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS intrCheckFecsEventbufferPending_IMPL(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, NvBool *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrCheckFecsEventbufferPending(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, NvBool *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrCheckFecsEventbufferPending(pGpu, pIntr, arg0, arg1) intrCheckFecsEventbufferPending_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrCheckFecsEventbufferPending_HAL(pGpu, pIntr, arg0, arg1) intrCheckFecsEventbufferPending(pGpu, pIntr, arg0, arg1) + +NV_STATUS intrCheckAndServiceFecsEventbuffer_IMPL(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrCheckAndServiceFecsEventbuffer(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrCheckAndServiceFecsEventbuffer(pGpu, pIntr, arg0, arg1) intrCheckAndServiceFecsEventbuffer_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrCheckAndServiceFecsEventbuffer_HAL(pGpu, pIntr, arg0, arg1) intrCheckAndServiceFecsEventbuffer(pGpu, pIntr, arg0, arg1) + +static inline NV_STATUS intrStateDestroyPhysical_56cd7a(OBJGPU *pGpu, struct Intr *pIntr) { + return NV_OK; +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrStateDestroyPhysical(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrStateDestroyPhysical(pGpu, pIntr) intrStateDestroyPhysical_56cd7a(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrStateDestroyPhysical_HAL(pGpu, pIntr) intrStateDestroyPhysical(pGpu, pIntr) + +static inline void intrSetInterruptMaskBug1470153War_b3696a(OBJGPU *pGpu, struct Intr *pIntr) { + return; +} + +#ifdef __nvoc_intr_h_disabled +static inline void intrSetInterruptMaskBug1470153War(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrSetInterruptMaskBug1470153War(pGpu, pIntr) intrSetInterruptMaskBug1470153War_b3696a(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrSetInterruptMaskBug1470153War_HAL(pGpu, pIntr) intrSetInterruptMaskBug1470153War(pGpu, pIntr) + +NV_STATUS intrGetPendingNonStall_TU102(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetPendingNonStall(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetPendingNonStall(pGpu, pIntr, arg0, arg1) intrGetPendingNonStall_TU102(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetPendingNonStall_HAL(pGpu, pIntr, arg0, arg1) intrGetPendingNonStall(pGpu, pIntr, arg0, arg1) + +NV_STATUS intrServiceNonStall_TU102(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrServiceNonStall(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrServiceNonStall(pGpu, pIntr, arg0, arg1) intrServiceNonStall_TU102(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrServiceNonStall_HAL(pGpu, pIntr, arg0, arg1) intrServiceNonStall(pGpu, pIntr, arg0, arg1) + +NvU32 intrGetNonStallEnable_TU102(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetNonStallEnable(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetNonStallEnable(pGpu, pIntr, arg0) intrGetNonStallEnable_TU102(pGpu, pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +#define intrGetNonStallEnable_HAL(pGpu, pIntr, arg0) intrGetNonStallEnable(pGpu, pIntr, arg0) + +void intrDisableNonStall_TU102(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_intr_h_disabled +static inline void intrDisableNonStall(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrDisableNonStall(pGpu, pIntr, arg0) intrDisableNonStall_TU102(pGpu, pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +#define intrDisableNonStall_HAL(pGpu, pIntr, arg0) intrDisableNonStall(pGpu, pIntr, arg0) + +void intrRestoreNonStall_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline void intrRestoreNonStall(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrRestoreNonStall(pGpu, pIntr, arg0, arg1) intrRestoreNonStall_TU102(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrRestoreNonStall_HAL(pGpu, pIntr, arg0, arg1) intrRestoreNonStall(pGpu, pIntr, arg0, arg1) + +void intrGetStallInterruptMode_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 *pIntrmode, NvBool *pPending); + +#ifdef __nvoc_intr_h_disabled +static inline void intrGetStallInterruptMode(OBJGPU *pGpu, struct Intr *pIntr, NvU32 *pIntrmode, NvBool *pPending) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrGetStallInterruptMode(pGpu, pIntr, pIntrmode, pPending) intrGetStallInterruptMode_TU102(pGpu, pIntr, pIntrmode, pPending) +#endif //__nvoc_intr_h_disabled + +#define intrGetStallInterruptMode_HAL(pGpu, pIntr, pIntrmode, pPending) intrGetStallInterruptMode(pGpu, pIntr, pIntrmode, pPending) + +void intrEncodeStallIntrEn_GP100(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrEn, NvU32 *pIntrEnSet, NvU32 *pIntrEnClear); + +#ifdef __nvoc_intr_h_disabled +static inline void intrEncodeStallIntrEn(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrEn, NvU32 *pIntrEnSet, NvU32 *pIntrEnClear) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrEncodeStallIntrEn(pGpu, pIntr, intrEn, pIntrEnSet, pIntrEnClear) intrEncodeStallIntrEn_GP100(pGpu, pIntr, intrEn, pIntrEnSet, pIntrEnClear) +#endif //__nvoc_intr_h_disabled + +#define intrEncodeStallIntrEn_HAL(pGpu, pIntr, intrEn, pIntrEnSet, pIntrEnClear) intrEncodeStallIntrEn(pGpu, pIntr, intrEn, pIntrEnSet, pIntrEnClear) + +NV_STATUS intrCheckAndServiceNonReplayableFault_TU102(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrCheckAndServiceNonReplayableFault(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrCheckAndServiceNonReplayableFault(pGpu, pIntr, arg0) intrCheckAndServiceNonReplayableFault_TU102(pGpu, pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +#define intrCheckAndServiceNonReplayableFault_HAL(pGpu, pIntr, arg0) intrCheckAndServiceNonReplayableFault(pGpu, pIntr, arg0) + +static inline NvU32 intrGetStallBaseVector_4a4dee(OBJGPU *pGpu, struct Intr *pIntr) { + return 0; +} + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetStallBaseVector(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetStallBaseVector(pGpu, pIntr) intrGetStallBaseVector_4a4dee(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrGetStallBaseVector_HAL(pGpu, pIntr) intrGetStallBaseVector(pGpu, pIntr) + +void intrEnableLeaf_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrVector); + +#ifdef __nvoc_intr_h_disabled +static inline void intrEnableLeaf(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrVector) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrEnableLeaf(pGpu, pIntr, intrVector) intrEnableLeaf_TU102(pGpu, pIntr, intrVector) +#endif //__nvoc_intr_h_disabled + +#define intrEnableLeaf_HAL(pGpu, pIntr, intrVector) intrEnableLeaf(pGpu, pIntr, intrVector) + +void intrDisableLeaf_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrVector); + +#ifdef __nvoc_intr_h_disabled +static inline void intrDisableLeaf(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrVector) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrDisableLeaf(pGpu, pIntr, intrVector) intrDisableLeaf_TU102(pGpu, pIntr, intrVector) +#endif //__nvoc_intr_h_disabled + +#define intrDisableLeaf_HAL(pGpu, pIntr, intrVector) intrDisableLeaf(pGpu, pIntr, intrVector) + +void intrEnableTopNonstall_TU102(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *pThreadState); + +#ifdef __nvoc_intr_h_disabled +static inline void intrEnableTopNonstall(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrEnableTopNonstall(pGpu, pIntr, pThreadState) intrEnableTopNonstall_TU102(pGpu, pIntr, pThreadState) +#endif //__nvoc_intr_h_disabled + +#define intrEnableTopNonstall_HAL(pGpu, pIntr, pThreadState) intrEnableTopNonstall(pGpu, pIntr, pThreadState) + +void intrDisableTopNonstall_TU102(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *pThreadState); + +#ifdef __nvoc_intr_h_disabled +static inline void intrDisableTopNonstall(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrDisableTopNonstall(pGpu, pIntr, pThreadState) intrDisableTopNonstall_TU102(pGpu, pIntr, pThreadState) +#endif //__nvoc_intr_h_disabled + +#define intrDisableTopNonstall_HAL(pGpu, pIntr, pThreadState) intrDisableTopNonstall(pGpu, pIntr, pThreadState) + +void intrSetStall_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrType, struct THREAD_STATE_NODE *pThreadState); + +#ifdef __nvoc_intr_h_disabled +static inline void intrSetStall(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrType, struct THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrSetStall(pGpu, pIntr, intrType, pThreadState) intrSetStall_TU102(pGpu, pIntr, intrType, pThreadState) +#endif //__nvoc_intr_h_disabled + +#define intrSetStall_HAL(pGpu, pIntr, intrType, pThreadState) intrSetStall(pGpu, pIntr, intrType, pThreadState) + +void intrClearLeafVector_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState); + +#ifdef __nvoc_intr_h_disabled +static inline void intrClearLeafVector(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrClearLeafVector(pGpu, pIntr, vector, pThreadState) intrClearLeafVector_TU102(pGpu, pIntr, vector, pThreadState) +#endif //__nvoc_intr_h_disabled + +#define intrClearLeafVector_HAL(pGpu, pIntr, vector, pThreadState) intrClearLeafVector(pGpu, pIntr, vector, pThreadState) + +static inline void intrClearCpuLeafVector_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState) { + return; +} + +#ifdef __nvoc_intr_h_disabled +static inline void intrClearCpuLeafVector(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrClearCpuLeafVector(pGpu, pIntr, vector, pThreadState) intrClearCpuLeafVector_b3696a(pGpu, pIntr, vector, pThreadState) +#endif //__nvoc_intr_h_disabled + +#define intrClearCpuLeafVector_HAL(pGpu, pIntr, vector, pThreadState) intrClearCpuLeafVector(pGpu, pIntr, vector, pThreadState) + +static inline void intrWriteCpuRegLeaf_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + return; +} + +#ifdef __nvoc_intr_h_disabled +static inline void intrWriteCpuRegLeaf(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrWriteCpuRegLeaf(pGpu, pIntr, arg0, arg1, arg2) intrWriteCpuRegLeaf_b3696a(pGpu, pIntr, arg0, arg1, arg2) +#endif //__nvoc_intr_h_disabled + +#define intrWriteCpuRegLeaf_HAL(pGpu, pIntr, arg0, arg1, arg2) intrWriteCpuRegLeaf(pGpu, pIntr, arg0, arg1, arg2) + +NvBool intrIsVectorPending_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState); + +#ifdef __nvoc_intr_h_disabled +static inline NvBool intrIsVectorPending(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, struct THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_FALSE; +} +#else //__nvoc_intr_h_disabled +#define intrIsVectorPending(pGpu, pIntr, vector, pThreadState) intrIsVectorPending_TU102(pGpu, pIntr, vector, pThreadState) +#endif //__nvoc_intr_h_disabled + +#define intrIsVectorPending_HAL(pGpu, pIntr, vector, pThreadState) intrIsVectorPending(pGpu, pIntr, vector, pThreadState) + +NV_STATUS intrSetStallSWIntr_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrSetStallSWIntr(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrSetStallSWIntr(pGpu, pIntr) intrSetStallSWIntr_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrSetStallSWIntr_HAL(pGpu, pIntr) intrSetStallSWIntr(pGpu, pIntr) + +NV_STATUS intrClearStallSWIntr_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrClearStallSWIntr(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrClearStallSWIntr(pGpu, pIntr) intrClearStallSWIntr_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrClearStallSWIntr_HAL(pGpu, pIntr) intrClearStallSWIntr(pGpu, pIntr) + +void intrEnableStallSWIntr_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline void intrEnableStallSWIntr(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrEnableStallSWIntr(pGpu, pIntr) intrEnableStallSWIntr_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrEnableStallSWIntr_HAL(pGpu, pIntr) intrEnableStallSWIntr(pGpu, pIntr) + +void intrDisableStallSWIntr_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline void intrDisableStallSWIntr(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrDisableStallSWIntr(pGpu, pIntr) intrDisableStallSWIntr_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrDisableStallSWIntr_HAL(pGpu, pIntr) intrDisableStallSWIntr(pGpu, pIntr) + +static inline NV_STATUS intrEnableVirtualIntrLeaf_46f6a7(OBJGPU *pGpu, struct Intr *pIntr, NvU32 gfid) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrEnableVirtualIntrLeaf(OBJGPU *pGpu, struct Intr *pIntr, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrEnableVirtualIntrLeaf(pGpu, pIntr, gfid) intrEnableVirtualIntrLeaf_46f6a7(pGpu, pIntr, gfid) +#endif //__nvoc_intr_h_disabled + +#define intrEnableVirtualIntrLeaf_HAL(pGpu, pIntr, gfid) intrEnableVirtualIntrLeaf(pGpu, pIntr, gfid) + +static inline void intrServiceVirtual_b3696a(OBJGPU *pGpu, struct Intr *pIntr) { + return; +} + +#ifdef __nvoc_intr_h_disabled +static inline void intrServiceVirtual(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrServiceVirtual(pGpu, pIntr) intrServiceVirtual_b3696a(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrServiceVirtual_HAL(pGpu, pIntr) intrServiceVirtual(pGpu, pIntr) + +static inline void intrResetIntrRegistersForVF_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 gfid) { + return; +} + +#ifdef __nvoc_intr_h_disabled +static inline void intrResetIntrRegistersForVF(OBJGPU *pGpu, struct Intr *pIntr, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrResetIntrRegistersForVF(pGpu, pIntr, gfid) intrResetIntrRegistersForVF_b3696a(pGpu, pIntr, gfid) +#endif //__nvoc_intr_h_disabled + +#define intrResetIntrRegistersForVF_HAL(pGpu, pIntr, gfid) intrResetIntrRegistersForVF(pGpu, pIntr, gfid) + +static inline NV_STATUS intrSaveIntrRegValue_46f6a7(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 *arg1, NvU32 *arg2) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrSaveIntrRegValue(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 *arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrSaveIntrRegValue(pGpu, pIntr, arg0, arg1, arg2) intrSaveIntrRegValue_46f6a7(pGpu, pIntr, arg0, arg1, arg2) +#endif //__nvoc_intr_h_disabled + +#define intrSaveIntrRegValue_HAL(pGpu, pIntr, arg0, arg1, arg2) intrSaveIntrRegValue(pGpu, pIntr, arg0, arg1, arg2) + +static inline NV_STATUS intrRestoreIntrRegValue_46f6a7(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, NvU32 *arg2) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrRestoreIntrRegValue(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrRestoreIntrRegValue(pGpu, pIntr, arg0, arg1, arg2) intrRestoreIntrRegValue_46f6a7(pGpu, pIntr, arg0, arg1, arg2) +#endif //__nvoc_intr_h_disabled + +#define intrRestoreIntrRegValue_HAL(pGpu, pIntr, arg0, arg1, arg2) intrRestoreIntrRegValue(pGpu, pIntr, arg0, arg1, arg2) + +static inline NV_STATUS intrTriggerCpuDoorbellForVF_46f6a7(OBJGPU *pGpu, struct Intr *pIntr, NvU32 gfid) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrTriggerCpuDoorbellForVF(OBJGPU *pGpu, struct Intr *pIntr, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrTriggerCpuDoorbellForVF(pGpu, pIntr, gfid) intrTriggerCpuDoorbellForVF_46f6a7(pGpu, pIntr, gfid) +#endif //__nvoc_intr_h_disabled + +#define intrTriggerCpuDoorbellForVF_HAL(pGpu, pIntr, gfid) intrTriggerCpuDoorbellForVF(pGpu, pIntr, gfid) + +static inline NV_STATUS intrTriggerPrivDoorbell_46f6a7(OBJGPU *pGpu, struct Intr *pIntr, NvU32 gfid) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrTriggerPrivDoorbell(OBJGPU *pGpu, struct Intr *pIntr, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrTriggerPrivDoorbell(pGpu, pIntr, gfid) intrTriggerPrivDoorbell_46f6a7(pGpu, pIntr, gfid) +#endif //__nvoc_intr_h_disabled + +#define intrTriggerPrivDoorbell_HAL(pGpu, pIntr, gfid) intrTriggerPrivDoorbell(pGpu, pIntr, gfid) + +void intrRetriggerTopLevel_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline void intrRetriggerTopLevel(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrRetriggerTopLevel(pGpu, pIntr) intrRetriggerTopLevel_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrRetriggerTopLevel_HAL(pGpu, pIntr) intrRetriggerTopLevel(pGpu, pIntr) + +NV_STATUS intrGetLeafStatus_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 *arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetLeafStatus(OBJGPU *pGpu, struct Intr *pIntr, NvU32 *arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetLeafStatus(pGpu, pIntr, arg0, arg1) intrGetLeafStatus_TU102(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetLeafStatus_HAL(pGpu, pIntr, arg0, arg1) intrGetLeafStatus(pGpu, pIntr, arg0, arg1) + +NV_STATUS intrGetPendingDisplayIntr_TU102(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *pEngines, struct THREAD_STATE_NODE *pThreadState); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetPendingDisplayIntr(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *pEngines, struct THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetPendingDisplayIntr(pGpu, pIntr, pEngines, pThreadState) intrGetPendingDisplayIntr_TU102(pGpu, pIntr, pEngines, pThreadState) +#endif //__nvoc_intr_h_disabled + +#define intrGetPendingDisplayIntr_HAL(pGpu, pIntr, pEngines, pThreadState) intrGetPendingDisplayIntr(pGpu, pIntr, pEngines, pThreadState) + +void intrDumpState_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline void intrDumpState(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrDumpState(pGpu, pIntr) intrDumpState_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrDumpState_HAL(pGpu, pIntr) intrDumpState(pGpu, pIntr) + +NV_STATUS intrCacheIntrFields_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrCacheIntrFields(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrCacheIntrFields(pGpu, pIntr) intrCacheIntrFields_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrCacheIntrFields_HAL(pGpu, pIntr) intrCacheIntrFields(pGpu, pIntr) + +NvU32 intrReadRegLeafEnSet_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrReadRegLeafEnSet(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrReadRegLeafEnSet(pGpu, pIntr, arg0, arg1) intrReadRegLeafEnSet_TU102(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrReadRegLeafEnSet_HAL(pGpu, pIntr, arg0, arg1) intrReadRegLeafEnSet(pGpu, pIntr, arg0, arg1) + +NvU32 intrReadRegLeaf_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrReadRegLeaf(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrReadRegLeaf(pGpu, pIntr, arg0, arg1) intrReadRegLeaf_TU102(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrReadRegLeaf_HAL(pGpu, pIntr, arg0, arg1) intrReadRegLeaf(pGpu, pIntr, arg0, arg1) + +NvU32 intrReadRegTop_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrReadRegTop(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrReadRegTop(pGpu, pIntr, arg0, arg1) intrReadRegTop_TU102(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrReadRegTop_HAL(pGpu, pIntr, arg0, arg1) intrReadRegTop(pGpu, pIntr, arg0, arg1) + +void intrWriteRegLeafEnSet_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2); + +#ifdef __nvoc_intr_h_disabled +static inline void intrWriteRegLeafEnSet(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrWriteRegLeafEnSet(pGpu, pIntr, arg0, arg1, arg2) intrWriteRegLeafEnSet_TU102(pGpu, pIntr, arg0, arg1, arg2) +#endif //__nvoc_intr_h_disabled + +#define intrWriteRegLeafEnSet_HAL(pGpu, pIntr, arg0, arg1, arg2) intrWriteRegLeafEnSet(pGpu, pIntr, arg0, arg1, arg2) + +void intrWriteRegLeafEnClear_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2); + +#ifdef __nvoc_intr_h_disabled +static inline void intrWriteRegLeafEnClear(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrWriteRegLeafEnClear(pGpu, pIntr, arg0, arg1, arg2) intrWriteRegLeafEnClear_TU102(pGpu, pIntr, arg0, arg1, arg2) +#endif //__nvoc_intr_h_disabled + +#define intrWriteRegLeafEnClear_HAL(pGpu, pIntr, arg0, arg1, arg2) intrWriteRegLeafEnClear(pGpu, pIntr, arg0, arg1, arg2) + +void intrWriteRegLeaf_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2); + +#ifdef __nvoc_intr_h_disabled +static inline void intrWriteRegLeaf(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrWriteRegLeaf(pGpu, pIntr, arg0, arg1, arg2) intrWriteRegLeaf_TU102(pGpu, pIntr, arg0, arg1, arg2) +#endif //__nvoc_intr_h_disabled + +#define intrWriteRegLeaf_HAL(pGpu, pIntr, arg0, arg1, arg2) intrWriteRegLeaf(pGpu, pIntr, arg0, arg1, arg2) + +NvU32 intrGetStallSubtreeLast_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetStallSubtreeLast(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetStallSubtreeLast(pGpu, pIntr) intrGetStallSubtreeLast_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrGetStallSubtreeLast_HAL(pGpu, pIntr) intrGetStallSubtreeLast(pGpu, pIntr) + +NvU32 intrGetNumLeaves_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetNumLeaves(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetNumLeaves(pGpu, pIntr) intrGetNumLeaves_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrGetNumLeaves_HAL(pGpu, pIntr) intrGetNumLeaves(pGpu, pIntr) + +NvU32 intrGetLeafSize_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetLeafSize(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetLeafSize(pGpu, pIntr) intrGetLeafSize_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrGetLeafSize_HAL(pGpu, pIntr) intrGetLeafSize(pGpu, pIntr) + +NvU32 intrGetIntrTopNonStallMask_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetIntrTopNonStallMask(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetIntrTopNonStallMask(pGpu, pIntr) intrGetIntrTopNonStallMask_TU102(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrGetIntrTopNonStallMask_HAL(pGpu, pIntr) intrGetIntrTopNonStallMask(pGpu, pIntr) + +static inline NvU32 intrUpdateIntrCtrlValue_4a4dee(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrCtrl, NvU32 routing) { + return 0; +} + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrUpdateIntrCtrlValue(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrCtrl, NvU32 routing) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrUpdateIntrCtrlValue(pGpu, pIntr, intrCtrl, routing) intrUpdateIntrCtrlValue_4a4dee(pGpu, pIntr, intrCtrl, routing) +#endif //__nvoc_intr_h_disabled + +#define intrUpdateIntrCtrlValue_HAL(pGpu, pIntr, intrCtrl, routing) intrUpdateIntrCtrlValue(pGpu, pIntr, intrCtrl, routing) + +static inline void intrSetRouting_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrCtrl, NvU32 intrCtrlReg, NvU32 routing) { + return; +} + +#ifdef __nvoc_intr_h_disabled +static inline void intrSetRouting(OBJGPU *pGpu, struct Intr *pIntr, NvU32 intrCtrl, NvU32 intrCtrlReg, NvU32 routing) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrSetRouting(pGpu, pIntr, intrCtrl, intrCtrlReg, routing) intrSetRouting_b3696a(pGpu, pIntr, intrCtrl, intrCtrlReg, routing) +#endif //__nvoc_intr_h_disabled + +#define intrSetRouting_HAL(pGpu, pIntr, intrCtrl, intrCtrlReg, routing) intrSetRouting(pGpu, pIntr, intrCtrl, intrCtrlReg, routing) + +static inline void intrRouteFBInterruptsToSystemFirmware_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvBool bEnable) { + return; +} + +#ifdef __nvoc_intr_h_disabled +static inline void intrRouteFBInterruptsToSystemFirmware(OBJGPU *pGpu, struct Intr *pIntr, NvBool bEnable) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrRouteFBInterruptsToSystemFirmware(pGpu, pIntr, bEnable) intrRouteFBInterruptsToSystemFirmware_b3696a(pGpu, pIntr, bEnable) +#endif //__nvoc_intr_h_disabled + +#define intrRouteFBInterruptsToSystemFirmware_HAL(pGpu, pIntr, bEnable) intrRouteFBInterruptsToSystemFirmware(pGpu, pIntr, bEnable) + +static inline NV_STATUS intrInitDynamicInterruptTable_5baef9(OBJGPU *pGpu, struct Intr *pIntr, struct OBJFIFO *arg0, INTR_TABLE_ENTRY *arg1, NvU32 arg2, NvU32 initFlags) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrInitDynamicInterruptTable(OBJGPU *pGpu, struct Intr *pIntr, struct OBJFIFO *arg0, INTR_TABLE_ENTRY *arg1, NvU32 arg2, NvU32 initFlags) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrInitDynamicInterruptTable(pGpu, pIntr, arg0, arg1, arg2, initFlags) intrInitDynamicInterruptTable_5baef9(pGpu, pIntr, arg0, arg1, arg2, initFlags) +#endif //__nvoc_intr_h_disabled + +#define intrInitDynamicInterruptTable_HAL(pGpu, pIntr, arg0, arg1, arg2, initFlags) intrInitDynamicInterruptTable(pGpu, pIntr, arg0, arg1, arg2, initFlags) + +static inline NV_STATUS intrInitAnyInterruptTable_5baef9(OBJGPU *pGpu, struct Intr *pIntr, INTR_TABLE_ENTRY **ppIntrTable, NvU32 *pIntrTableSz, NvU32 initFlags) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrInitAnyInterruptTable(OBJGPU *pGpu, struct Intr *pIntr, INTR_TABLE_ENTRY **ppIntrTable, NvU32 *pIntrTableSz, NvU32 initFlags) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrInitAnyInterruptTable(pGpu, pIntr, ppIntrTable, pIntrTableSz, initFlags) intrInitAnyInterruptTable_5baef9(pGpu, pIntr, ppIntrTable, pIntrTableSz, initFlags) +#endif //__nvoc_intr_h_disabled + +#define intrInitAnyInterruptTable_HAL(pGpu, pIntr, ppIntrTable, pIntrTableSz, initFlags) intrInitAnyInterruptTable(pGpu, pIntr, ppIntrTable, pIntrTableSz, initFlags) + +NV_STATUS intrInitInterruptTable_KERNEL(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrInitInterruptTable(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrInitInterruptTable(pGpu, pIntr) intrInitInterruptTable_KERNEL(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrInitInterruptTable_HAL(pGpu, pIntr) intrInitInterruptTable(pGpu, pIntr) + +NV_STATUS intrGetInterruptTable_IMPL(OBJGPU *pGpu, struct Intr *pIntr, INTR_TABLE_ENTRY **arg0, NvU32 *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetInterruptTable(OBJGPU *pGpu, struct Intr *pIntr, INTR_TABLE_ENTRY **arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetInterruptTable(pGpu, pIntr, arg0, arg1) intrGetInterruptTable_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetInterruptTable_HAL(pGpu, pIntr, arg0, arg1) intrGetInterruptTable(pGpu, pIntr, arg0, arg1) + +NV_STATUS intrDestroyInterruptTable_IMPL(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrDestroyInterruptTable(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrDestroyInterruptTable(pGpu, pIntr) intrDestroyInterruptTable_IMPL(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrDestroyInterruptTable_HAL(pGpu, pIntr) intrDestroyInterruptTable(pGpu, pIntr) + +static inline NV_STATUS intrGetStaticVFmcEngines_5baef9(OBJGPU *pGpu, struct Intr *pIntr, NvU16 **ppMcEngines, NvU32 *pCount) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetStaticVFmcEngines(OBJGPU *pGpu, struct Intr *pIntr, NvU16 **ppMcEngines, NvU32 *pCount) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetStaticVFmcEngines(pGpu, pIntr, ppMcEngines, pCount) intrGetStaticVFmcEngines_5baef9(pGpu, pIntr, ppMcEngines, pCount) +#endif //__nvoc_intr_h_disabled + +#define intrGetStaticVFmcEngines_HAL(pGpu, pIntr, ppMcEngines, pCount) intrGetStaticVFmcEngines(pGpu, pIntr, ppMcEngines, pCount) + +static inline NV_STATUS intrGetStaticInterruptTable_5baef9(OBJGPU *pGpu, struct Intr *pIntr, INTR_TABLE_ENTRY *pTable, NvU32 *pCount, NvU32 maxCount, NvU32 initFlags) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetStaticInterruptTable(OBJGPU *pGpu, struct Intr *pIntr, INTR_TABLE_ENTRY *pTable, NvU32 *pCount, NvU32 maxCount, NvU32 initFlags) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetStaticInterruptTable(pGpu, pIntr, pTable, pCount, maxCount, initFlags) intrGetStaticInterruptTable_5baef9(pGpu, pIntr, pTable, pCount, maxCount, initFlags) +#endif //__nvoc_intr_h_disabled + +#define intrGetStaticInterruptTable_HAL(pGpu, pIntr, pTable, pCount, maxCount, initFlags) intrGetStaticInterruptTable(pGpu, pIntr, pTable, pCount, maxCount, initFlags) + +static inline NvU32 intrGetGPUHostInterruptTableSize_5baef9(OBJGPU *pGpu, struct Intr *pIntr, NvU32 initFlags) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetGPUHostInterruptTableSize(OBJGPU *pGpu, struct Intr *pIntr, NvU32 initFlags) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetGPUHostInterruptTableSize(pGpu, pIntr, initFlags) intrGetGPUHostInterruptTableSize_5baef9(pGpu, pIntr, initFlags) +#endif //__nvoc_intr_h_disabled + +#define intrGetGPUHostInterruptTableSize_HAL(pGpu, pIntr, initFlags) intrGetGPUHostInterruptTableSize(pGpu, pIntr, initFlags) + +static inline NV_STATUS intrInitGPUHostInterruptTable_5baef9(OBJGPU *pGpu, struct Intr *pIntr, INTR_TABLE_ENTRY *arg0, NvU32 arg1, NvU32 initFlags) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrInitGPUHostInterruptTable(OBJGPU *pGpu, struct Intr *pIntr, INTR_TABLE_ENTRY *arg0, NvU32 arg1, NvU32 initFlags) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrInitGPUHostInterruptTable(pGpu, pIntr, arg0, arg1, initFlags) intrInitGPUHostInterruptTable_5baef9(pGpu, pIntr, arg0, arg1, initFlags) +#endif //__nvoc_intr_h_disabled + +#define intrInitGPUHostInterruptTable_HAL(pGpu, pIntr, arg0, arg1, initFlags) intrInitGPUHostInterruptTable(pGpu, pIntr, arg0, arg1, initFlags) + +static inline NV_STATUS intrInitEngineSchedInterruptTable_5baef9(OBJGPU *pGpu, struct Intr *pIntr, INTR_TABLE_ENTRY *arg0, NvU32 arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrInitEngineSchedInterruptTable(OBJGPU *pGpu, struct Intr *pIntr, INTR_TABLE_ENTRY *arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrInitEngineSchedInterruptTable(pGpu, pIntr, arg0, arg1) intrInitEngineSchedInterruptTable_5baef9(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrInitEngineSchedInterruptTable_HAL(pGpu, pIntr, arg0, arg1) intrInitEngineSchedInterruptTable(pGpu, pIntr, arg0, arg1) + +void intrServiceStall_IMPL(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline void intrServiceStall(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrServiceStall(pGpu, pIntr) intrServiceStall_IMPL(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrServiceStall_HAL(pGpu, pIntr) intrServiceStall(pGpu, pIntr) + +void intrServiceStallList_IMPL(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, NvBool arg1); + +#ifdef __nvoc_intr_h_disabled +static inline void intrServiceStallList(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrServiceStallList(pGpu, pIntr, arg0, arg1) intrServiceStallList_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrServiceStallList_HAL(pGpu, pIntr, arg0, arg1) intrServiceStallList(pGpu, pIntr, arg0, arg1) + +void intrServiceStallSingle_IMPL(OBJGPU *pGpu, struct Intr *pIntr, NvU16 arg0, NvBool arg1); + +#ifdef __nvoc_intr_h_disabled +static inline void intrServiceStallSingle(OBJGPU *pGpu, struct Intr *pIntr, NvU16 arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrServiceStallSingle(pGpu, pIntr, arg0, arg1) intrServiceStallSingle_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrServiceStallSingle_HAL(pGpu, pIntr, arg0, arg1) intrServiceStallSingle(pGpu, pIntr, arg0, arg1) + +void intrProcessDPCQueue_IMPL(OBJGPU *pGpu, struct Intr *pIntr); + +#ifdef __nvoc_intr_h_disabled +static inline void intrProcessDPCQueue(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrProcessDPCQueue(pGpu, pIntr) intrProcessDPCQueue_IMPL(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrProcessDPCQueue_HAL(pGpu, pIntr) intrProcessDPCQueue(pGpu, pIntr) + +NV_STATUS intrGetIntrMask_GP100(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetIntrMask(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetIntrMask(pGpu, pIntr, arg0, arg1) intrGetIntrMask_GP100(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetIntrMask_HAL(pGpu, pIntr, arg0, arg1) intrGetIntrMask(pGpu, pIntr, arg0, arg1) + +static inline NV_STATUS intrGetEccIntrMaskOffset_5baef9(OBJGPU *pGpu, struct Intr *pIntr, NvU32 *arg0, NvU32 *arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetEccIntrMaskOffset(OBJGPU *pGpu, struct Intr *pIntr, NvU32 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetEccIntrMaskOffset(pGpu, pIntr, arg0, arg1) intrGetEccIntrMaskOffset_5baef9(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetEccIntrMaskOffset_HAL(pGpu, pIntr, arg0, arg1) intrGetEccIntrMaskOffset(pGpu, pIntr, arg0, arg1) + +static inline NV_STATUS intrGetNvlinkIntrMaskOffset_5baef9(OBJGPU *pGpu, struct Intr *pIntr, NvU32 *arg0, NvU32 *arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetNvlinkIntrMaskOffset(OBJGPU *pGpu, struct Intr *pIntr, NvU32 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetNvlinkIntrMaskOffset(pGpu, pIntr, arg0, arg1) intrGetNvlinkIntrMaskOffset_5baef9(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetNvlinkIntrMaskOffset_HAL(pGpu, pIntr, arg0, arg1) intrGetNvlinkIntrMaskOffset(pGpu, pIntr, arg0, arg1) + +static inline NV_STATUS intrGetEccVirtualFunctionIntrMask_5baef9(OBJGPU *pGpu, struct Intr *pIntr, NvHandle arg0, NvU32 *arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetEccVirtualFunctionIntrMask(OBJGPU *pGpu, struct Intr *pIntr, NvHandle arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetEccVirtualFunctionIntrMask(pGpu, pIntr, arg0, arg1) intrGetEccVirtualFunctionIntrMask_5baef9(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetEccVirtualFunctionIntrMask_HAL(pGpu, pIntr, arg0, arg1) intrGetEccVirtualFunctionIntrMask(pGpu, pIntr, arg0, arg1) + +static inline NV_STATUS intrGetNvlinkVirtualFunctionIntrMask_5baef9(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetNvlinkVirtualFunctionIntrMask(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetNvlinkVirtualFunctionIntrMask(pGpu, pIntr, arg0, arg1) intrGetNvlinkVirtualFunctionIntrMask_5baef9(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetNvlinkVirtualFunctionIntrMask_HAL(pGpu, pIntr, arg0, arg1) intrGetNvlinkVirtualFunctionIntrMask(pGpu, pIntr, arg0, arg1) + +static inline NvU32 intrGetEccVirtualFunctionIntrSmcMaskAll_5baef9(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetEccVirtualFunctionIntrSmcMaskAll(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetEccVirtualFunctionIntrSmcMaskAll(pGpu, pIntr) intrGetEccVirtualFunctionIntrSmcMaskAll_5baef9(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrGetEccVirtualFunctionIntrSmcMaskAll_HAL(pGpu, pIntr) intrGetEccVirtualFunctionIntrSmcMaskAll(pGpu, pIntr) + +static inline NvBool intrRequiresPossibleErrorNotifier_491d52(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *pEngines) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_intr_h_disabled +static inline NvBool intrRequiresPossibleErrorNotifier(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *pEngines) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_FALSE; +} +#else //__nvoc_intr_h_disabled +#define intrRequiresPossibleErrorNotifier(pGpu, pIntr, pEngines) intrRequiresPossibleErrorNotifier_491d52(pGpu, pIntr, pEngines) +#endif //__nvoc_intr_h_disabled + +#define intrRequiresPossibleErrorNotifier_HAL(pGpu, pIntr, pEngines) intrRequiresPossibleErrorNotifier(pGpu, pIntr, pEngines) + +static inline NvU32 intrReadErrCont_491d52(OBJGPU *pGpu, struct Intr *pIntr) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrReadErrCont(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrReadErrCont(pGpu, pIntr) intrReadErrCont_491d52(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +#define intrReadErrCont_HAL(pGpu, pIntr) intrReadErrCont(pGpu, pIntr) + +NV_STATUS intrGetPendingStall_GP100(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetPendingStall(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetPendingStall(pGpu, pIntr, arg0, arg1) intrGetPendingStall_GP100(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetPendingStall_HAL(pGpu, pIntr, arg0, arg1) intrGetPendingStall(pGpu, pIntr, arg0, arg1) + +NV_STATUS intrGetPendingStallEngines_TU102(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetPendingStallEngines(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetPendingStallEngines(pGpu, pIntr, arg0, arg1) intrGetPendingStallEngines_TU102(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetPendingStallEngines_HAL(pGpu, pIntr, arg0, arg1) intrGetPendingStallEngines(pGpu, pIntr, arg0, arg1) + +NvBool intrIsIntrEnabled_IMPL(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_intr_h_disabled +static inline NvBool intrIsIntrEnabled(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_FALSE; +} +#else //__nvoc_intr_h_disabled +#define intrIsIntrEnabled(pGpu, pIntr, arg0) intrIsIntrEnabled_IMPL(pGpu, pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +#define intrIsIntrEnabled_HAL(pGpu, pIntr, arg0) intrIsIntrEnabled(pGpu, pIntr, arg0) + +static inline void intrSetHubLeafIntr_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, struct THREAD_STATE_NODE *arg3) { + return; +} + +#ifdef __nvoc_intr_h_disabled +static inline void intrSetHubLeafIntr(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, struct THREAD_STATE_NODE *arg3) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrSetHubLeafIntr(pGpu, pIntr, arg0, arg1, arg2, arg3) intrSetHubLeafIntr_b3696a(pGpu, pIntr, arg0, arg1, arg2, arg3) +#endif //__nvoc_intr_h_disabled + +#define intrSetHubLeafIntr_HAL(pGpu, pIntr, arg0, arg1, arg2, arg3) intrSetHubLeafIntr(pGpu, pIntr, arg0, arg1, arg2, arg3) + +void intrGetHubLeafIntrPending_STUB(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_intr_h_disabled +static inline void intrGetHubLeafIntrPending(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrGetHubLeafIntrPending(pGpu, pIntr, arg0, arg1) intrGetHubLeafIntrPending_STUB(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#define intrGetHubLeafIntrPending_HAL(pGpu, pIntr, arg0, arg1) intrGetHubLeafIntrPending(pGpu, pIntr, arg0, arg1) + +NV_STATUS intrConstructEngine_IMPL(OBJGPU *pGpu, struct Intr *pIntr, ENGDESCRIPTOR arg0); + +static inline NV_STATUS intrConstructEngine_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, ENGDESCRIPTOR arg0) { + return pIntr->__intrConstructEngine__(pGpu, pIntr, arg0); +} + +NV_STATUS intrStateInitUnlocked_IMPL(OBJGPU *pGpu, struct Intr *pIntr); + +static inline NV_STATUS intrStateInitUnlocked_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr) { + return pIntr->__intrStateInitUnlocked__(pGpu, pIntr); +} + +NV_STATUS intrStateInitLocked_IMPL(OBJGPU *pGpu, struct Intr *pIntr); + +static inline NV_STATUS intrStateInitLocked_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr) { + return pIntr->__intrStateInitLocked__(pGpu, pIntr); +} + +void intrStateDestroy_IMPL(OBJGPU *pGpu, struct Intr *pIntr); + +static inline void intrStateDestroy_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr) { + pIntr->__intrStateDestroy__(pGpu, pIntr); +} + +NvU32 intrDecodeStallIntrEn_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0); + +static inline NvU32 intrDecodeStallIntrEn_4a4dee(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0) { + return 0; +} + +static inline NvU32 intrDecodeStallIntrEn_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0) { + return pIntr->__intrDecodeStallIntrEn__(pGpu, pIntr, arg0); +} + +NvU32 intrGetNonStallBaseVector_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +static inline NvU32 intrGetNonStallBaseVector_c067f9(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 intrGetNonStallBaseVector_4a4dee(OBJGPU *pGpu, struct Intr *pIntr) { + return 0; +} + +static inline NvU32 intrGetNonStallBaseVector_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr) { + return pIntr->__intrGetNonStallBaseVector__(pGpu, pIntr); +} + +NvU64 intrGetUvmSharedLeafEnDisableMask_TU102(OBJGPU *pGpu, struct Intr *pIntr); + +NvU64 intrGetUvmSharedLeafEnDisableMask_GA100(OBJGPU *pGpu, struct Intr *pIntr); + +static inline NvU64 intrGetUvmSharedLeafEnDisableMask_5baef9(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NvU64 intrGetUvmSharedLeafEnDisableMask_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr) { + return pIntr->__intrGetUvmSharedLeafEnDisableMask__(pGpu, pIntr); +} + +void intrSetDisplayInterruptEnable_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvBool bEnable, struct THREAD_STATE_NODE *pThreadState); + +static inline void intrSetDisplayInterruptEnable_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvBool bEnable, struct THREAD_STATE_NODE *pThreadState) { + return; +} + +static inline void intrSetDisplayInterruptEnable_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, NvBool bEnable, struct THREAD_STATE_NODE *pThreadState) { + pIntr->__intrSetDisplayInterruptEnable__(pGpu, pIntr, bEnable, pThreadState); +} + +NvU32 intrReadRegTopEnSet_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1); + +NvU32 intrReadRegTopEnSet_GA102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1); + +static inline NvU32 intrReadRegTopEnSet_b2b553(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1) { + return 0; +} + +static inline NvU32 intrReadRegTopEnSet_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1) { + return pIntr->__intrReadRegTopEnSet__(pGpu, pIntr, arg0, arg1); +} + +void intrWriteRegTopEnSet_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2); + +void intrWriteRegTopEnSet_GA102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2); + +static inline void intrWriteRegTopEnSet_d44104(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + return; +} + +static inline void intrWriteRegTopEnSet_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + pIntr->__intrWriteRegTopEnSet__(pGpu, pIntr, arg0, arg1, arg2); +} + +void intrWriteRegTopEnClear_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2); + +void intrWriteRegTopEnClear_GA102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2); + +static inline void intrWriteRegTopEnClear_d44104(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + return; +} + +static inline void intrWriteRegTopEnClear_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + pIntr->__intrWriteRegTopEnClear__(pGpu, pIntr, arg0, arg1, arg2); +} + +void intrSanityCheckEngineIntrStallVector_GA100(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, NvU16 mcEngine); + +static inline void intrSanityCheckEngineIntrStallVector_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, NvU16 mcEngine) { + return; +} + +static inline void intrSanityCheckEngineIntrStallVector_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, NvU16 mcEngine) { + pIntr->__intrSanityCheckEngineIntrStallVector__(pGpu, pIntr, vector, mcEngine); +} + +void intrSanityCheckEngineIntrNotificationVector_GA100(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, NvU16 mcEngine); + +static inline void intrSanityCheckEngineIntrNotificationVector_b3696a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, NvU16 mcEngine) { + return; +} + +static inline void intrSanityCheckEngineIntrNotificationVector_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, NvU32 vector, NvU16 mcEngine) { + pIntr->__intrSanityCheckEngineIntrNotificationVector__(pGpu, pIntr, vector, mcEngine); +} + +NV_STATUS intrStateLoad_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0); + +static inline NV_STATUS intrStateLoad_56cd7a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0) { + return NV_OK; +} + +static inline NV_STATUS intrStateLoad_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0) { + return pIntr->__intrStateLoad__(pGpu, pIntr, arg0); +} + +NV_STATUS intrStateUnload_TU102(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0); + +static inline NV_STATUS intrStateUnload_56cd7a(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0) { + return NV_OK; +} + +static inline NV_STATUS intrStateUnload_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0) { + return pIntr->__intrStateUnload__(pGpu, pIntr, arg0); +} + +NV_STATUS intrSetIntrMask_GP100(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1); + +static inline NV_STATUS intrSetIntrMask_46f6a7(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS intrSetIntrMask_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + return pIntr->__intrSetIntrMask__(pGpu, pIntr, arg0, arg1); +} + +void intrSetIntrEnInHw_GP100(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1); + +static inline void intrSetIntrEnInHw_d44104(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1) { + return; +} + +static inline void intrSetIntrEnInHw_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, struct THREAD_STATE_NODE *arg1) { + pIntr->__intrSetIntrEnInHw__(pGpu, pIntr, arg0, arg1); +} + +NvU32 intrGetIntrEnFromHw_GP100(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0); + +static inline NvU32 intrGetIntrEnFromHw_b2b553(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0) { + return 0; +} + +static inline NvU32 intrGetIntrEnFromHw_DISPATCH(OBJGPU *pGpu, struct Intr *pIntr, struct THREAD_STATE_NODE *arg0) { + return pIntr->__intrGetIntrEnFromHw__(pGpu, pIntr, arg0); +} + +static inline NV_STATUS intrReconcileTunableState_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate, void *pTunableState) { + return pEngstate->__intrReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS intrStatePreLoad_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate, NvU32 arg0) { + return pEngstate->__intrStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS intrStatePostUnload_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate, NvU32 arg0) { + return pEngstate->__intrStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS intrStatePreUnload_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate, NvU32 arg0) { + return pEngstate->__intrStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline void intrInitMissing_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate) { + pEngstate->__intrInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS intrStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate) { + return pEngstate->__intrStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS intrStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate) { + return pEngstate->__intrStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS intrGetTunableState_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate, void *pTunableState) { + return pEngstate->__intrGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS intrCompareTunableState_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__intrCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void intrFreeTunableState_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate, void *pTunableState) { + pEngstate->__intrFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS intrStatePostLoad_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate, NvU32 arg0) { + return pEngstate->__intrStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS intrAllocTunableState_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate, void **ppTunableState) { + return pEngstate->__intrAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS intrSetTunableState_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate, void *pTunableState) { + return pEngstate->__intrSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool intrIsPresent_DISPATCH(POBJGPU pGpu, struct Intr *pEngstate) { + return pEngstate->__intrIsPresent__(pGpu, pEngstate); +} + +void intrDestruct_IMPL(struct Intr *pIntr); +#define __nvoc_intrDestruct(pIntr) intrDestruct_IMPL(pIntr) +NV_STATUS intrServiceNonStallBottomHalf_IMPL(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1); +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrServiceNonStallBottomHalf(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrServiceNonStallBottomHalf(pGpu, pIntr, arg0, arg1) intrServiceNonStallBottomHalf_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +NV_STATUS intrServiceNotificationRecords_IMPL(OBJGPU *pGpu, struct Intr *pIntr, NvU16 mcEngineIdx, struct THREAD_STATE_NODE *arg0); +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrServiceNotificationRecords(OBJGPU *pGpu, struct Intr *pIntr, NvU16 mcEngineIdx, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrServiceNotificationRecords(pGpu, pIntr, mcEngineIdx, arg0) intrServiceNotificationRecords_IMPL(pGpu, pIntr, mcEngineIdx, arg0) +#endif //__nvoc_intr_h_disabled + +void intrServiceStallListAllGpusCond_IMPL(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, NvBool arg1); +#ifdef __nvoc_intr_h_disabled +static inline void intrServiceStallListAllGpusCond(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrServiceStallListAllGpusCond(pGpu, pIntr, arg0, arg1) intrServiceStallListAllGpusCond_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +void intrServiceStallListDevice_IMPL(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, NvBool arg1); +#ifdef __nvoc_intr_h_disabled +static inline void intrServiceStallListDevice(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrServiceStallListDevice(pGpu, pIntr, arg0, arg1) intrServiceStallListDevice_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +NvU32 intrServiceInterruptRecords_IMPL(OBJGPU *pGpu, struct Intr *pIntr, NvU16 arg0, NvBool *arg1); +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrServiceInterruptRecords(OBJGPU *pGpu, struct Intr *pIntr, NvU16 arg0, NvBool *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrServiceInterruptRecords(pGpu, pIntr, arg0, arg1) intrServiceInterruptRecords_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +void intrQueueDpc_IMPL(OBJGPU *pGpu, struct Intr *pIntr, DPCQUEUE *arg0, DPCNODE *arg1); +#ifdef __nvoc_intr_h_disabled +static inline void intrQueueDpc(OBJGPU *pGpu, struct Intr *pIntr, DPCQUEUE *arg0, DPCNODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrQueueDpc(pGpu, pIntr, arg0, arg1) intrQueueDpc_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +DPCNODE *intrDequeueDpc_IMPL(OBJGPU *pGpu, struct Intr *pIntr, DPCQUEUE *arg0); +#ifdef __nvoc_intr_h_disabled +static inline DPCNODE *intrDequeueDpc(OBJGPU *pGpu, struct Intr *pIntr, DPCQUEUE *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NULL; +} +#else //__nvoc_intr_h_disabled +#define intrDequeueDpc(pGpu, pIntr, arg0) intrDequeueDpc_IMPL(pGpu, pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +NvBool intrIsDpcQueueEmpty_IMPL(OBJGPU *pGpu, struct Intr *pIntr, DPCQUEUE *arg0); +#ifdef __nvoc_intr_h_disabled +static inline NvBool intrIsDpcQueueEmpty(OBJGPU *pGpu, struct Intr *pIntr, DPCQUEUE *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_FALSE; +} +#else //__nvoc_intr_h_disabled +#define intrIsDpcQueueEmpty(pGpu, pIntr, arg0) intrIsDpcQueueEmpty_IMPL(pGpu, pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +void intrQueueInterruptBasedDpc_IMPL(OBJGPU *pGpu, struct Intr *pIntr, NvU16 arg0); +#ifdef __nvoc_intr_h_disabled +static inline void intrQueueInterruptBasedDpc(OBJGPU *pGpu, struct Intr *pIntr, NvU16 arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrQueueInterruptBasedDpc(pGpu, pIntr, arg0) intrQueueInterruptBasedDpc_IMPL(pGpu, pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +NvU32 intrConvertEngineMaskToPmcIntrMask_IMPL(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0); +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrConvertEngineMaskToPmcIntrMask(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrConvertEngineMaskToPmcIntrMask(pGpu, pIntr, arg0) intrConvertEngineMaskToPmcIntrMask_IMPL(pGpu, pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +void intrConvertPmcIntrMaskToEngineMask_IMPL(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, union MC_ENGINE_BITVECTOR *arg1); +#ifdef __nvoc_intr_h_disabled +static inline void intrConvertPmcIntrMaskToEngineMask(OBJGPU *pGpu, struct Intr *pIntr, NvU32 arg0, union MC_ENGINE_BITVECTOR *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrConvertPmcIntrMaskToEngineMask(pGpu, pIntr, arg0, arg1) intrConvertPmcIntrMaskToEngineMask_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +NvU32 intrGetVectorFromEngineId_IMPL(OBJGPU *pGpu, struct Intr *pIntr, NvU16 mcEngineId, NvBool bNonStall); +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetVectorFromEngineId(OBJGPU *pGpu, struct Intr *pIntr, NvU16 mcEngineId, NvBool bNonStall) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetVectorFromEngineId(pGpu, pIntr, mcEngineId, bNonStall) intrGetVectorFromEngineId_IMPL(pGpu, pIntr, mcEngineId, bNonStall) +#endif //__nvoc_intr_h_disabled + +NV_STATUS intrGetSmallestNotificationVector_IMPL(OBJGPU *pGpu, struct Intr *pIntr, NvU32 *arg0); +#ifdef __nvoc_intr_h_disabled +static inline NV_STATUS intrGetSmallestNotificationVector(OBJGPU *pGpu, struct Intr *pIntr, NvU32 *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intr_h_disabled +#define intrGetSmallestNotificationVector(pGpu, pIntr, arg0) intrGetSmallestNotificationVector_IMPL(pGpu, pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +void intrSetIntrMaskUnblocked_IMPL(struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0); +#ifdef __nvoc_intr_h_disabled +static inline void intrSetIntrMaskUnblocked(struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrSetIntrMaskUnblocked(pIntr, arg0) intrSetIntrMaskUnblocked_IMPL(pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +void intrGetIntrMaskUnblocked_IMPL(struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0); +#ifdef __nvoc_intr_h_disabled +static inline void intrGetIntrMaskUnblocked(struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrGetIntrMaskUnblocked(pIntr, arg0) intrGetIntrMaskUnblocked_IMPL(pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +void intrSetIntrMaskFlags_IMPL(struct Intr *pIntr, NvU32 arg0); +#ifdef __nvoc_intr_h_disabled +static inline void intrSetIntrMaskFlags(struct Intr *pIntr, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrSetIntrMaskFlags(pIntr, arg0) intrSetIntrMaskFlags_IMPL(pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +NvU32 intrGetIntrMaskFlags_IMPL(struct Intr *pIntr); +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetIntrMaskFlags(struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetIntrMaskFlags(pIntr) intrGetIntrMaskFlags_IMPL(pIntr) +#endif //__nvoc_intr_h_disabled + +void intrSetDefaultIntrEn_IMPL(struct Intr *pIntr, NvU32 arg0); +#ifdef __nvoc_intr_h_disabled +static inline void intrSetDefaultIntrEn(struct Intr *pIntr, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrSetDefaultIntrEn(pIntr, arg0) intrSetDefaultIntrEn_IMPL(pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +NvU32 intrGetDefaultIntrEn_IMPL(struct Intr *pIntr); +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetDefaultIntrEn(struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetDefaultIntrEn(pIntr) intrGetDefaultIntrEn_IMPL(pIntr) +#endif //__nvoc_intr_h_disabled + +void intrSetIntrEn_IMPL(struct Intr *pIntr, NvU32 arg0); +#ifdef __nvoc_intr_h_disabled +static inline void intrSetIntrEn(struct Intr *pIntr, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrSetIntrEn(pIntr, arg0) intrSetIntrEn_IMPL(pIntr, arg0) +#endif //__nvoc_intr_h_disabled + +NvU32 intrGetIntrEn_IMPL(struct Intr *pIntr); +#ifdef __nvoc_intr_h_disabled +static inline NvU32 intrGetIntrEn(struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); + return 0; +} +#else //__nvoc_intr_h_disabled +#define intrGetIntrEn(pIntr) intrGetIntrEn_IMPL(pIntr) +#endif //__nvoc_intr_h_disabled + +void intrSaveIntrEn0FromHw_IMPL(OBJGPU *pGpu, struct Intr *pIntr); +#ifdef __nvoc_intr_h_disabled +static inline void intrSaveIntrEn0FromHw(OBJGPU *pGpu, struct Intr *pIntr) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrSaveIntrEn0FromHw(pGpu, pIntr) intrSaveIntrEn0FromHw_IMPL(pGpu, pIntr) +#endif //__nvoc_intr_h_disabled + +void intrGetGmmuInterrupts_IMPL(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1); +#ifdef __nvoc_intr_h_disabled +static inline void intrGetGmmuInterrupts(OBJGPU *pGpu, struct Intr *pIntr, union MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("Intr was disabled!"); +} +#else //__nvoc_intr_h_disabled +#define intrGetGmmuInterrupts(pGpu, pIntr, arg0, arg1) intrGetGmmuInterrupts_IMPL(pGpu, pIntr, arg0, arg1) +#endif //__nvoc_intr_h_disabled + +#undef PRIVATE_FIELD + + +// This mask is used for interrupts that should be masked off in the PMC tree +#define NV_PMC_INTR_INVALID_MASK (0) + +#define INTR_WRITE_TABLE(status, pTable, maxCount, count, entry) \ + do { \ + if ((count) < (maxCount)) \ + { \ + (pTable)[count] = entry; \ + } \ + else \ + { \ + status = NV_ERR_BUFFER_TOO_SMALL; \ + } \ + count += 1; \ + } while(0) + +#endif // INTR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_INTR_NVOC_H_ diff --git a/src/nvidia/generated/g_intr_service_nvoc.c b/src/nvidia/generated/g_intr_service_nvoc.c new file mode 100644 index 000000000..7636f6f7d --- /dev/null +++ b/src/nvidia/generated/g_intr_service_nvoc.c @@ -0,0 +1,95 @@ +#define NVOC_INTR_SERVICE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_intr_service_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x2271cc = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +void __nvoc_init_IntrService(IntrService*); +void __nvoc_init_funcTable_IntrService(IntrService*); +NV_STATUS __nvoc_ctor_IntrService(IntrService*); +void __nvoc_init_dataField_IntrService(IntrService*); +void __nvoc_dtor_IntrService(IntrService*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_IntrService; + +static const struct NVOC_RTTI __nvoc_rtti_IntrService_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_IntrService, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_IntrService = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_IntrService_IntrService, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService = +{ + /*classInfo=*/ { + /*size=*/ sizeof(IntrService), + /*classId=*/ classId(IntrService), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "IntrService", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_IntrService, + /*pExportInfo=*/ &__nvoc_export_info_IntrService +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_IntrService = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_IntrService(IntrService *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_IntrService(IntrService *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_IntrService(IntrService *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_IntrService(pThis); + goto __nvoc_ctor_IntrService_exit; // Success + +__nvoc_ctor_IntrService_exit: + + return status; +} + +static void __nvoc_init_funcTable_IntrService_1(IntrService *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__intrservRegisterIntrService__ = &intrservRegisterIntrService_IMPL; + + pThis->__intrservClearInterrupt__ = &intrservClearInterrupt_IMPL; + + pThis->__intrservServiceInterrupt__ = &intrservServiceInterrupt_IMPL; + + pThis->__intrservServiceNotificationInterrupt__ = &intrservServiceNotificationInterrupt_IMPL; +} + +void __nvoc_init_funcTable_IntrService(IntrService *pThis) { + __nvoc_init_funcTable_IntrService_1(pThis); +} + +void __nvoc_init_IntrService(IntrService *pThis) { + pThis->__nvoc_pbase_IntrService = pThis; + __nvoc_init_funcTable_IntrService(pThis); +} + diff --git a/src/nvidia/generated/g_intr_service_nvoc.h b/src/nvidia/generated/g_intr_service_nvoc.h new file mode 100644 index 000000000..214be5ad0 --- /dev/null +++ b/src/nvidia/generated/g_intr_service_nvoc.h @@ -0,0 +1,157 @@ +#ifndef _G_INTR_SERVICE_NVOC_H_ +#define _G_INTR_SERVICE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_intr_service_nvoc.h" + +#ifndef INTR_SERVICE_H +#define INTR_SERVICE_H + +/*! + * @brief Provides definitions for IntrService class, + * which is an interface for classes which can handle interrupts. + */ + +#include "core/core.h" +#include "core/thread_state.h" +#include "gpu/gpu.h" +#include "kernel/gpu/intr/engine_idx.h" + +struct IntrService; + +#ifndef __NVOC_CLASS_IntrService_TYPEDEF__ +#define __NVOC_CLASS_IntrService_TYPEDEF__ +typedef struct IntrService IntrService; +#endif /* __NVOC_CLASS_IntrService_TYPEDEF__ */ + +#ifndef __nvoc_class_id_IntrService +#define __nvoc_class_id_IntrService 0x2271cc +#endif /* __nvoc_class_id_IntrService */ + + + +typedef struct { + struct IntrService *pInterruptService; + struct IntrService *pNotificationService; + NvBool bFifoWaiveNotify; +} IntrServiceRecord; + +typedef struct { + NvU16 engineIdx; +} IntrServiceClearInterruptArguments; + +typedef struct { + NvU16 engineIdx; +} IntrServiceServiceInterruptArguments; + +typedef struct { + THREAD_STATE_NODE *pThreadState; + NvU16 engineIdx; +} IntrServiceServiceNotificationInterruptArguments; + +#ifdef NVOC_INTR_SERVICE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct IntrService { + const struct NVOC_RTTI *__nvoc_rtti; + struct IntrService *__nvoc_pbase_IntrService; + void (*__intrservRegisterIntrService__)(struct OBJGPU *, struct IntrService *, IntrServiceRecord *); + NvBool (*__intrservClearInterrupt__)(struct OBJGPU *, struct IntrService *, IntrServiceClearInterruptArguments *); + NvU32 (*__intrservServiceInterrupt__)(struct OBJGPU *, struct IntrService *, IntrServiceServiceInterruptArguments *); + NV_STATUS (*__intrservServiceNotificationInterrupt__)(struct OBJGPU *, struct IntrService *, IntrServiceServiceNotificationInterruptArguments *); +}; + +#ifndef __NVOC_CLASS_IntrService_TYPEDEF__ +#define __NVOC_CLASS_IntrService_TYPEDEF__ +typedef struct IntrService IntrService; +#endif /* __NVOC_CLASS_IntrService_TYPEDEF__ */ + +#ifndef __nvoc_class_id_IntrService +#define __nvoc_class_id_IntrService 0x2271cc +#endif /* __nvoc_class_id_IntrService */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +#define __staticCast_IntrService(pThis) \ + ((pThis)->__nvoc_pbase_IntrService) + +#ifdef __nvoc_intr_service_h_disabled +#define __dynamicCast_IntrService(pThis) ((IntrService*)NULL) +#else //__nvoc_intr_service_h_disabled +#define __dynamicCast_IntrService(pThis) \ + ((IntrService*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(IntrService))) +#endif //__nvoc_intr_service_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_IntrService(IntrService**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_IntrService(IntrService**, Dynamic*, NvU32); +#define __objCreate_IntrService(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_IntrService((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define intrservRegisterIntrService(pGpu, pIntrService, pRecords) intrservRegisterIntrService_DISPATCH(pGpu, pIntrService, pRecords) +#define intrservClearInterrupt(pGpu, pIntrService, pParams) intrservClearInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define intrservServiceInterrupt(pGpu, pIntrService, pParams) intrservServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define intrservServiceNotificationInterrupt(pGpu, pIntrService, pParams) intrservServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams) +void intrservRegisterIntrService_IMPL(struct OBJGPU *pGpu, struct IntrService *pIntrService, IntrServiceRecord pRecords[155]); + +static inline void intrservRegisterIntrService_DISPATCH(struct OBJGPU *pGpu, struct IntrService *pIntrService, IntrServiceRecord pRecords[155]) { + pIntrService->__intrservRegisterIntrService__(pGpu, pIntrService, pRecords); +} + +NvBool intrservClearInterrupt_IMPL(struct OBJGPU *pGpu, struct IntrService *pIntrService, IntrServiceClearInterruptArguments *pParams); + +static inline NvBool intrservClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct IntrService *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return pIntrService->__intrservClearInterrupt__(pGpu, pIntrService, pParams); +} + +NvU32 intrservServiceInterrupt_IMPL(struct OBJGPU *pGpu, struct IntrService *pIntrService, IntrServiceServiceInterruptArguments *pParams); + +static inline NvU32 intrservServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct IntrService *pIntrService, IntrServiceServiceInterruptArguments *pParams) { + return pIntrService->__intrservServiceInterrupt__(pGpu, pIntrService, pParams); +} + +NV_STATUS intrservServiceNotificationInterrupt_IMPL(struct OBJGPU *pGpu, struct IntrService *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams); + +static inline NV_STATUS intrservServiceNotificationInterrupt_DISPATCH(struct OBJGPU *pGpu, struct IntrService *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return pIntrService->__intrservServiceNotificationInterrupt__(pGpu, pIntrService, pParams); +} + +#undef PRIVATE_FIELD + + +#endif // INTR_SERVICE_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_INTR_SERVICE_NVOC_H_ diff --git a/src/nvidia/generated/g_intrable_nvoc.c b/src/nvidia/generated/g_intrable_nvoc.c new file mode 100644 index 000000000..5e6406d64 --- /dev/null +++ b/src/nvidia/generated/g_intrable_nvoc.c @@ -0,0 +1,117 @@ +#define NVOC_INTRABLE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_intrable_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x31ccb7 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJINTRABLE; + +void __nvoc_init_OBJINTRABLE(OBJINTRABLE*, RmHalspecOwner* ); +void __nvoc_init_funcTable_OBJINTRABLE(OBJINTRABLE*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJINTRABLE(OBJINTRABLE*, RmHalspecOwner* ); +void __nvoc_init_dataField_OBJINTRABLE(OBJINTRABLE*, RmHalspecOwner* ); +void __nvoc_dtor_OBJINTRABLE(OBJINTRABLE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJINTRABLE; + +static const struct NVOC_RTTI __nvoc_rtti_OBJINTRABLE_OBJINTRABLE = { + /*pClassDef=*/ &__nvoc_class_def_OBJINTRABLE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJINTRABLE, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJINTRABLE = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_OBJINTRABLE_OBJINTRABLE, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJINTRABLE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJINTRABLE), + /*classId=*/ classId(OBJINTRABLE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJINTRABLE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_OBJINTRABLE, + /*pExportInfo=*/ &__nvoc_export_info_OBJINTRABLE +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJINTRABLE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJINTRABLE(OBJINTRABLE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJINTRABLE(OBJINTRABLE *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJINTRABLE(OBJINTRABLE *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_OBJINTRABLE(pThis, pRmhalspecowner); + + status = __nvoc_intrableConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJINTRABLE_fail__init; + goto __nvoc_ctor_OBJINTRABLE_exit; // Success + +__nvoc_ctor_OBJINTRABLE_fail__init: +__nvoc_ctor_OBJINTRABLE_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJINTRABLE_1(OBJINTRABLE *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__intrableGetNotificationIntrVector__ = &intrableGetNotificationIntrVector_IMPL; + + pThis->__intrableGetPhysicalIntrVectors__ = &intrableGetPhysicalIntrVectors_IMPL; + + pThis->__intrableGetKernelIntrVectors__ = &intrableGetKernelIntrVectors_IMPL; + + pThis->__intrableSetNotificationIntrVector__ = &intrableSetNotificationIntrVector_IMPL; +} + +void __nvoc_init_funcTable_OBJINTRABLE(OBJINTRABLE *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_OBJINTRABLE_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJINTRABLE(OBJINTRABLE *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_OBJINTRABLE = pThis; + __nvoc_init_funcTable_OBJINTRABLE(pThis, pRmhalspecowner); +} + diff --git a/src/nvidia/generated/g_intrable_nvoc.h b/src/nvidia/generated/g_intrable_nvoc.h new file mode 100644 index 000000000..81687e343 --- /dev/null +++ b/src/nvidia/generated/g_intrable_nvoc.h @@ -0,0 +1,208 @@ +#ifndef _G_INTRABLE_NVOC_H_ +#define _G_INTRABLE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_intrable_nvoc.h" + +#ifndef INTRABLE_H +#define INTRABLE_H + +/*! + * @file intrable.h + * @brief Provides definitions for all OBJINTRABLE data structures and interfaces. + */ + +#include "core/core.h" +#include "gpu/gpu_halspec.h" + +typedef struct OBJINTRABLE *POBJINTRABLE; + +/*! + * Interface class for all Intrable modules. + */ +#ifdef NVOC_INTRABLE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJINTRABLE { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJINTRABLE *__nvoc_pbase_OBJINTRABLE; + NV_STATUS (*__intrableGetNotificationIntrVector__)(OBJGPU *, struct OBJINTRABLE *, NvU32 *); + NV_STATUS (*__intrableGetPhysicalIntrVectors__)(OBJGPU *, struct OBJINTRABLE *, NvU32, NvU32 *, NvU32 *, NvU32 *); + NV_STATUS (*__intrableGetKernelIntrVectors__)(OBJGPU *, struct OBJINTRABLE *, NvU32, NvU32 *, NvU32 *, NvU32 *); + NV_STATUS (*__intrableSetNotificationIntrVector__)(OBJGPU *, struct OBJINTRABLE *, NvU32); + NvU32 partitionAssignedNotificationVector; + NvU32 originalNotificationIntrVector; +}; + +#ifndef __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ +#define __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ +typedef struct OBJINTRABLE OBJINTRABLE; +#endif /* __NVOC_CLASS_OBJINTRABLE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJINTRABLE +#define __nvoc_class_id_OBJINTRABLE 0x31ccb7 +#endif /* __nvoc_class_id_OBJINTRABLE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJINTRABLE; + +#define __staticCast_OBJINTRABLE(pThis) \ + ((pThis)->__nvoc_pbase_OBJINTRABLE) + +#ifdef __nvoc_intrable_h_disabled +#define __dynamicCast_OBJINTRABLE(pThis) ((OBJINTRABLE*)NULL) +#else //__nvoc_intrable_h_disabled +#define __dynamicCast_OBJINTRABLE(pThis) \ + ((OBJINTRABLE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJINTRABLE))) +#endif //__nvoc_intrable_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJINTRABLE(OBJINTRABLE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJINTRABLE(OBJINTRABLE**, Dynamic*, NvU32); +#define __objCreate_OBJINTRABLE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJINTRABLE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define intrableGetNotificationIntrVector(pGpu, pIntrable, pIntrVector) intrableGetNotificationIntrVector_DISPATCH(pGpu, pIntrable, pIntrVector) +#define intrableGetPhysicalIntrVectors(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount) intrableGetPhysicalIntrVectors_DISPATCH(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount) +#define intrableGetKernelIntrVectors(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount) intrableGetKernelIntrVectors_DISPATCH(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount) +#define intrableSetNotificationIntrVector(pGpu, pIntrable, intrVector) intrableSetNotificationIntrVector_DISPATCH(pGpu, pIntrable, intrVector) +static inline NvU32 intrableUpdateIntrCtrlValue_4a4dee(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 intrCtrl, NvU32 routing) { + return 0; +} + +#ifdef __nvoc_intrable_h_disabled +static inline NvU32 intrableUpdateIntrCtrlValue(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 intrCtrl, NvU32 routing) { + NV_ASSERT_FAILED_PRECOMP("OBJINTRABLE was disabled!"); + return 0; +} +#else //__nvoc_intrable_h_disabled +#define intrableUpdateIntrCtrlValue(pGpu, pIntrable, intrCtrl, routing) intrableUpdateIntrCtrlValue_4a4dee(pGpu, pIntrable, intrCtrl, routing) +#endif //__nvoc_intrable_h_disabled + +#define intrableUpdateIntrCtrlValue_HAL(pGpu, pIntrable, intrCtrl, routing) intrableUpdateIntrCtrlValue(pGpu, pIntrable, intrCtrl, routing) + +static inline void intrableSetRouting_b3696a(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 intrCtrl, NvU32 intrCtrlReg, NvU32 routing) { + return; +} + +#ifdef __nvoc_intrable_h_disabled +static inline void intrableSetRouting(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 intrCtrl, NvU32 intrCtrlReg, NvU32 routing) { + NV_ASSERT_FAILED_PRECOMP("OBJINTRABLE was disabled!"); +} +#else //__nvoc_intrable_h_disabled +#define intrableSetRouting(pGpu, pIntrable, intrCtrl, intrCtrlReg, routing) intrableSetRouting_b3696a(pGpu, pIntrable, intrCtrl, intrCtrlReg, routing) +#endif //__nvoc_intrable_h_disabled + +#define intrableSetRouting_HAL(pGpu, pIntrable, intrCtrl, intrCtrlReg, routing) intrableSetRouting(pGpu, pIntrable, intrCtrl, intrCtrlReg, routing) + +NV_STATUS intrableGetNotificationIntrVector_IMPL(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 *pIntrVector); + +static inline NV_STATUS intrableGetNotificationIntrVector_DISPATCH(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 *pIntrVector) { + return pIntrable->__intrableGetNotificationIntrVector__(pGpu, pIntrable, pIntrVector); +} + +NV_STATUS intrableGetPhysicalIntrVectors_IMPL(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount); + +static inline NV_STATUS intrableGetPhysicalIntrVectors_DISPATCH(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return pIntrable->__intrableGetPhysicalIntrVectors__(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount); +} + +NV_STATUS intrableGetKernelIntrVectors_IMPL(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount); + +static inline NV_STATUS intrableGetKernelIntrVectors_DISPATCH(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return pIntrable->__intrableGetKernelIntrVectors__(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount); +} + +NV_STATUS intrableSetNotificationIntrVector_IMPL(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 intrVector); + +static inline NV_STATUS intrableSetNotificationIntrVector_DISPATCH(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 intrVector) { + return pIntrable->__intrableSetNotificationIntrVector__(pGpu, pIntrable, intrVector); +} + +NV_STATUS intrableConstruct_IMPL(struct OBJINTRABLE *arg_pIntrable); +#define __nvoc_intrableConstruct(arg_pIntrable) intrableConstruct_IMPL(arg_pIntrable) +NV_STATUS intrableCacheAndSetPartitionNotificationIntrVector_IMPL(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 intrVector); +#ifdef __nvoc_intrable_h_disabled +static inline NV_STATUS intrableCacheAndSetPartitionNotificationIntrVector(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable, NvU32 intrVector) { + NV_ASSERT_FAILED_PRECOMP("OBJINTRABLE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intrable_h_disabled +#define intrableCacheAndSetPartitionNotificationIntrVector(pGpu, pIntrable, intrVector) intrableCacheAndSetPartitionNotificationIntrVector_IMPL(pGpu, pIntrable, intrVector) +#endif //__nvoc_intrable_h_disabled + +NV_STATUS intrableSetPartitionNotificationIntrVector_IMPL(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable); +#ifdef __nvoc_intrable_h_disabled +static inline NV_STATUS intrableSetPartitionNotificationIntrVector(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable) { + NV_ASSERT_FAILED_PRECOMP("OBJINTRABLE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intrable_h_disabled +#define intrableSetPartitionNotificationIntrVector(pGpu, pIntrable) intrableSetPartitionNotificationIntrVector_IMPL(pGpu, pIntrable) +#endif //__nvoc_intrable_h_disabled + +NV_STATUS intrableGetPartitionNotificationIntrVector_IMPL(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable); +#ifdef __nvoc_intrable_h_disabled +static inline NV_STATUS intrableGetPartitionNotificationIntrVector(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable) { + NV_ASSERT_FAILED_PRECOMP("OBJINTRABLE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intrable_h_disabled +#define intrableGetPartitionNotificationIntrVector(pGpu, pIntrable) intrableGetPartitionNotificationIntrVector_IMPL(pGpu, pIntrable) +#endif //__nvoc_intrable_h_disabled + +NV_STATUS intrableRevertNotificationIntrVector_IMPL(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable); +#ifdef __nvoc_intrable_h_disabled +static inline NV_STATUS intrableRevertNotificationIntrVector(OBJGPU *pGpu, struct OBJINTRABLE *pIntrable) { + NV_ASSERT_FAILED_PRECOMP("OBJINTRABLE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_intrable_h_disabled +#define intrableRevertNotificationIntrVector(pGpu, pIntrable) intrableRevertNotificationIntrVector_IMPL(pGpu, pIntrable) +#endif //__nvoc_intrable_h_disabled + +#undef PRIVATE_FIELD + + +#define INTRABLE_MAX_INTR_PER_ENGINE (1) +#define INTRABLE_MAX_INTR_PER_HOST_ENGINE (1) + +#define INTR_ROUTE_DISABLE 0 +#define INTR_ROUTE_PHYSICAL 1 +#define INTR_ROUTE_KERNEL 2 + +#endif // INTRABLE_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_INTRABLE_NVOC_H_ diff --git a/src/nvidia/generated/g_io_vaspace_nvoc.c b/src/nvidia/generated/g_io_vaspace_nvoc.c new file mode 100644 index 000000000..eff89cc51 --- /dev/null +++ b/src/nvidia/generated/g_io_vaspace_nvoc.c @@ -0,0 +1,349 @@ +#define NVOC_IO_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_io_vaspace_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x28ed9c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; + +void __nvoc_init_OBJIOVASPACE(OBJIOVASPACE*); +void __nvoc_init_funcTable_OBJIOVASPACE(OBJIOVASPACE*); +NV_STATUS __nvoc_ctor_OBJIOVASPACE(OBJIOVASPACE*); +void __nvoc_init_dataField_OBJIOVASPACE(OBJIOVASPACE*); +void __nvoc_dtor_OBJIOVASPACE(OBJIOVASPACE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJIOVASPACE; + +static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_OBJIOVASPACE = { + /*pClassDef=*/ &__nvoc_class_def_OBJIOVASPACE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJIOVASPACE, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_OBJVASPACE = { + /*pClassDef=*/ &__nvoc_class_def_OBJVASPACE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJIOVASPACE = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_OBJIOVASPACE_OBJIOVASPACE, + &__nvoc_rtti_OBJIOVASPACE_OBJVASPACE, + &__nvoc_rtti_OBJIOVASPACE_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJIOVASPACE), + /*classId=*/ classId(OBJIOVASPACE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJIOVASPACE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJIOVASPACE, + /*pCastInfo=*/ &__nvoc_castinfo_OBJIOVASPACE, + /*pExportInfo=*/ &__nvoc_export_info_OBJIOVASPACE +}; + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceConstruct_(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return iovaspaceConstruct_((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceAlloc(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return iovaspaceAlloc((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceFree(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return iovaspaceFree((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), vAddr); +} + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return iovaspaceApplyDefaultAlignment((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return iovaspaceIncAllocRefCnt((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), vAddr); +} + +static NvU64 __nvoc_thunk_OBJIOVASPACE_vaspaceGetVaStart(struct OBJVASPACE *pVAS) { + return iovaspaceGetVaStart((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NvU64 __nvoc_thunk_OBJIOVASPACE_vaspaceGetVaLimit(struct OBJVASPACE *pVAS) { + return iovaspaceGetVaLimit((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceGetVasInfo(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return iovaspaceGetVasInfo((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pParams); +} + +static NvBool __nvoc_thunk_OBJVASPACE_iovaspaceIsMirrored(struct OBJIOVASPACE *pVAS) { + return vaspaceIsMirrored((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJVASPACE_iovaspaceIsExternallyOwned(struct OBJIOVASPACE *pVAS) { + return vaspaceIsExternallyOwned((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJVASPACE_iovaspaceIsInternalVaRestricted(struct OBJIOVASPACE *pVAS) { + return vaspaceIsInternalVaRestricted((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NvU32 __nvoc_thunk_OBJVASPACE_iovaspaceGetFlags(struct OBJIOVASPACE *pVAS) { + return vaspaceGetFlags((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJVASPACE_iovaspaceIsAtsEnabled(struct OBJIOVASPACE *pVAS) { + return vaspaceIsAtsEnabled((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NvU32 __nvoc_thunk_OBJVASPACE_iovaspaceGetBigPageSize(struct OBJIOVASPACE *pVAS) { + return vaspaceGetBigPageSize((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_iovaspaceGetPteInfo(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) { + return vaspaceGetPteInfo((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu, pParams, pPhysAddr); +} + +static PMEMORY_DESCRIPTOR __nvoc_thunk_OBJVASPACE_iovaspaceGetPageDirBase(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu) { + return vaspaceGetPageDirBase((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu); +} + +static PMEMORY_DESCRIPTOR __nvoc_thunk_OBJVASPACE_iovaspaceGetKernelPageDirBase(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu) { + return vaspaceGetKernelPageDirBase((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu); +} + +static NvU32 __nvoc_thunk_OBJVASPACE_iovaspaceGetMapPageSize(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock) { + return vaspaceGetMapPageSize((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu, pMemBlock); +} + +static struct OBJEHEAP *__nvoc_thunk_OBJVASPACE_iovaspaceGetHeap(struct OBJIOVASPACE *pVAS) { + return vaspaceGetHeap((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static NvBool __nvoc_thunk_OBJVASPACE_iovaspaceIsFaultCapable(struct OBJIOVASPACE *pVAS) { + return vaspaceIsFaultCapable((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset)); +} + +static void __nvoc_thunk_OBJVASPACE_iovaspaceUnmap(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) { + vaspaceUnmap((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu, vaLo, vaHi); +} + +static void __nvoc_thunk_OBJVASPACE_iovaspaceInvalidateTlb(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type) { + vaspaceInvalidateTlb((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu, type); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_iovaspacePinRootPageDir(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu) { + return vaspacePinRootPageDir((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu); +} + +static void __nvoc_thunk_OBJVASPACE_iovaspaceUnpinRootPageDir(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu) { + vaspaceUnpinRootPageDir((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_iovaspaceSetPteInfo(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) { + return vaspaceSetPteInfo((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu, pParams); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_iovaspaceGetPasid(struct OBJIOVASPACE *pVAS, NvU32 *pPasid) { + return vaspaceGetPasid((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pPasid); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_iovaspaceGetPageTableInfo(struct OBJIOVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) { + return vaspaceGetPageTableInfo((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_iovaspaceReserveMempool(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) { + return vaspaceReserveMempool((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu, hClient, size, pageSizeLockMask, flags); +} + +static NV_STATUS __nvoc_thunk_OBJVASPACE_iovaspaceMap(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) { + return vaspaceMap((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pGpu, vaLo, vaHi, pTarget, flags); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJIOVASPACE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJVASPACE(OBJVASPACE*); +void __nvoc_dtor_OBJIOVASPACE(OBJIOVASPACE *pThis) { + __nvoc_iovaspaceDestruct(pThis); + __nvoc_dtor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJIOVASPACE(OBJIOVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE* ); +NV_STATUS __nvoc_ctor_OBJIOVASPACE(OBJIOVASPACE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + if (status != NV_OK) goto __nvoc_ctor_OBJIOVASPACE_fail_OBJVASPACE; + __nvoc_init_dataField_OBJIOVASPACE(pThis); + goto __nvoc_ctor_OBJIOVASPACE_exit; // Success + +__nvoc_ctor_OBJIOVASPACE_fail_OBJVASPACE: +__nvoc_ctor_OBJIOVASPACE_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJIOVASPACE_1(OBJIOVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__iovaspaceConstruct___ = &iovaspaceConstruct__IMPL; + + pThis->__iovaspaceAlloc__ = &iovaspaceAlloc_IMPL; + + pThis->__iovaspaceFree__ = &iovaspaceFree_IMPL; + + pThis->__iovaspaceApplyDefaultAlignment__ = &iovaspaceApplyDefaultAlignment_IMPL; + + pThis->__iovaspaceIncAllocRefCnt__ = &iovaspaceIncAllocRefCnt_IMPL; + + pThis->__iovaspaceGetVaStart__ = &iovaspaceGetVaStart_IMPL; + + pThis->__iovaspaceGetVaLimit__ = &iovaspaceGetVaLimit_IMPL; + + pThis->__iovaspaceGetVasInfo__ = &iovaspaceGetVasInfo_IMPL; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceConstruct___ = &__nvoc_thunk_OBJIOVASPACE_vaspaceConstruct_; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceAlloc__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceAlloc; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceFree__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceFree; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceApplyDefaultAlignment__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceIncAllocRefCnt__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVaStart__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVaStart; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVaLimit__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVaLimit; + + pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVasInfo__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVasInfo; + + pThis->__iovaspaceIsMirrored__ = &__nvoc_thunk_OBJVASPACE_iovaspaceIsMirrored; + + pThis->__iovaspaceIsExternallyOwned__ = &__nvoc_thunk_OBJVASPACE_iovaspaceIsExternallyOwned; + + pThis->__iovaspaceIsInternalVaRestricted__ = &__nvoc_thunk_OBJVASPACE_iovaspaceIsInternalVaRestricted; + + pThis->__iovaspaceGetFlags__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetFlags; + + pThis->__iovaspaceIsAtsEnabled__ = &__nvoc_thunk_OBJVASPACE_iovaspaceIsAtsEnabled; + + pThis->__iovaspaceGetBigPageSize__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetBigPageSize; + + pThis->__iovaspaceGetPteInfo__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetPteInfo; + + pThis->__iovaspaceGetPageDirBase__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetPageDirBase; + + pThis->__iovaspaceGetKernelPageDirBase__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetKernelPageDirBase; + + pThis->__iovaspaceGetMapPageSize__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetMapPageSize; + + pThis->__iovaspaceGetHeap__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetHeap; + + pThis->__iovaspaceIsFaultCapable__ = &__nvoc_thunk_OBJVASPACE_iovaspaceIsFaultCapable; + + pThis->__iovaspaceUnmap__ = &__nvoc_thunk_OBJVASPACE_iovaspaceUnmap; + + pThis->__iovaspaceInvalidateTlb__ = &__nvoc_thunk_OBJVASPACE_iovaspaceInvalidateTlb; + + pThis->__iovaspacePinRootPageDir__ = &__nvoc_thunk_OBJVASPACE_iovaspacePinRootPageDir; + + pThis->__iovaspaceUnpinRootPageDir__ = &__nvoc_thunk_OBJVASPACE_iovaspaceUnpinRootPageDir; + + pThis->__iovaspaceSetPteInfo__ = &__nvoc_thunk_OBJVASPACE_iovaspaceSetPteInfo; + + pThis->__iovaspaceGetPasid__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetPasid; + + pThis->__iovaspaceGetPageTableInfo__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetPageTableInfo; + + pThis->__iovaspaceReserveMempool__ = &__nvoc_thunk_OBJVASPACE_iovaspaceReserveMempool; + + pThis->__iovaspaceMap__ = &__nvoc_thunk_OBJVASPACE_iovaspaceMap; +} + +void __nvoc_init_funcTable_OBJIOVASPACE(OBJIOVASPACE *pThis) { + __nvoc_init_funcTable_OBJIOVASPACE_1(pThis); +} + +void __nvoc_init_OBJVASPACE(OBJVASPACE*); +void __nvoc_init_OBJIOVASPACE(OBJIOVASPACE *pThis) { + pThis->__nvoc_pbase_OBJIOVASPACE = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJVASPACE = &pThis->__nvoc_base_OBJVASPACE; + __nvoc_init_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE); + __nvoc_init_funcTable_OBJIOVASPACE(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJIOVASPACE(OBJIOVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJIOVASPACE *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJIOVASPACE)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJIOVASPACE)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJIOVASPACE); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJIOVASPACE(pThis); + status = __nvoc_ctor_OBJIOVASPACE(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJIOVASPACE_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJIOVASPACE_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJIOVASPACE(OBJIOVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJIOVASPACE(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_io_vaspace_nvoc.h b/src/nvidia/generated/g_io_vaspace_nvoc.h new file mode 100644 index 000000000..a63b3a9c3 --- /dev/null +++ b/src/nvidia/generated/g_io_vaspace_nvoc.h @@ -0,0 +1,417 @@ +#ifndef _G_IO_VASPACE_NVOC_H_ +#define _G_IO_VASPACE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_io_vaspace_nvoc.h" + +#ifndef _IOVASPACE_H_ +#define _IOVASPACE_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: IOVASPACE.H * +* Defines and structures used for IOMMU Virtual Address Space Object. * +\***************************************************************************/ + +#include "mem_mgr/vaspace.h" // base class object header + +#define NV_IOVA_DOMAIN_NONE (~(NvU32)0) + +typedef struct OBJIOVASPACE *POBJIOVASPACE; + +#ifndef __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ +typedef struct OBJIOVASPACE OBJIOVASPACE; +#endif /* __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJIOVASPACE +#define __nvoc_class_id_OBJIOVASPACE 0x28ed9c +#endif /* __nvoc_class_id_OBJIOVASPACE */ + + + +typedef struct IOVAMAPPING IOVAMAPPING; +typedef struct IOVAMAPPING *PIOVAMAPPING; + +// Opaque pointer for the OS layer to use +typedef struct OS_IOVA_MAPPING_DATA *POS_IOVA_MAPPING_DATA; + +struct IOVAMAPPING +{ + NvU32 iovaspaceId; + + // + // Refcount of the mapping. + // + // Each iovaspaceAcquireMapping() call increments the refcount, and each + // iovaspaceReleaseMapping() call decrements it. Additionally, submappings + // increment the refcount of their root mapping on creation and only + // decrement it when they are destroyed. + // + // Mappings are destroyed when their refcount reaches 0. + // + // Notably a mapping can be destroyed regardless of its refcount with + // iovaspaceDestroyMapping(). Destroying a root mapping destroys all of its + // submappings as well. + // + NvU32 refcount; + + PMEMORY_DESCRIPTOR pPhysMemDesc; + + // + // Maintain a hierarchy of IOVA mappings. The "root" mapping will generally + // be tied to the root memory descriptor. That mapping can have submappings + // within the same IOVA space that correspond to submemory descriptors of + // the root memory descriptor. + // + // Also, the root memory descriptor may have multiple IOVA mappings (up to + // one per IOVA space), so those need to be tracked in association directly + // with the root memory descriptor. + // + // The memory descriptor (root or submemory) always points to a single IOVA + // mapping. For root memory descriptors, that mapping is the head of a list + // in which each mapping covers a unique IOVA space. For submemory + // descriptors, there can only be one IOVA mapping, corresponding to the + // IOVA space of the pGpu associated with the submemory descriptor. + // + union + { + struct IOVAMAPPING *pParent; + struct IOVAMAPPING *pChildren; + } link; + + // + // For root mappings, this points to the next root mapping for the same + // parent physical memory descriptor (e.g., a root mapping for a different + // IOVA space). + // + // For submappings, this instead points to the next submapping of the + // parent root mapping, since a submemory descriptor may only have a single + // IOVA mapping (which is a submapping of an IOVA mapping on the root + // memory descriptor). + // + struct IOVAMAPPING *pNext; + + // OS data associated with this mapping. Core RM doesn't touch this. + POS_IOVA_MAPPING_DATA pOsData; + + // + // If the memory is contiguous, this array consists of one element. + // If the memory is discontiguous, this array is actually larger and has + // one entry for each physical page in pPhysMemDesc. As a result, this + // structure must be allocated from the heap. + // + RmPhysAddr iovaArray[1]; + // WARNING: DO NOT place anything behind the IOVA array! +}; + +/*! + * Virtual address space for a system's IOMMU translation. + */ +#ifdef NVOC_IO_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJIOVASPACE { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJVASPACE __nvoc_base_OBJVASPACE; + struct Object *__nvoc_pbase_Object; + struct OBJVASPACE *__nvoc_pbase_OBJVASPACE; + struct OBJIOVASPACE *__nvoc_pbase_OBJIOVASPACE; + NV_STATUS (*__iovaspaceConstruct___)(struct OBJIOVASPACE *, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32); + NV_STATUS (*__iovaspaceAlloc__)(struct OBJIOVASPACE *, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *); + NV_STATUS (*__iovaspaceFree__)(struct OBJIOVASPACE *, NvU64); + NV_STATUS (*__iovaspaceApplyDefaultAlignment__)(struct OBJIOVASPACE *, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *); + NV_STATUS (*__iovaspaceIncAllocRefCnt__)(struct OBJIOVASPACE *, NvU64); + NvU64 (*__iovaspaceGetVaStart__)(struct OBJIOVASPACE *); + NvU64 (*__iovaspaceGetVaLimit__)(struct OBJIOVASPACE *); + NV_STATUS (*__iovaspaceGetVasInfo__)(struct OBJIOVASPACE *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *); + NvBool (*__iovaspaceIsMirrored__)(struct OBJIOVASPACE *); + NvBool (*__iovaspaceIsExternallyOwned__)(struct OBJIOVASPACE *); + NvBool (*__iovaspaceIsInternalVaRestricted__)(struct OBJIOVASPACE *); + NvU32 (*__iovaspaceGetFlags__)(struct OBJIOVASPACE *); + NvBool (*__iovaspaceIsAtsEnabled__)(struct OBJIOVASPACE *); + NvU32 (*__iovaspaceGetBigPageSize__)(struct OBJIOVASPACE *); + NV_STATUS (*__iovaspaceGetPteInfo__)(struct OBJIOVASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *, RmPhysAddr *); + PMEMORY_DESCRIPTOR (*__iovaspaceGetPageDirBase__)(struct OBJIOVASPACE *, struct OBJGPU *); + PMEMORY_DESCRIPTOR (*__iovaspaceGetKernelPageDirBase__)(struct OBJIOVASPACE *, struct OBJGPU *); + NvU32 (*__iovaspaceGetMapPageSize__)(struct OBJIOVASPACE *, struct OBJGPU *, EMEMBLOCK *); + struct OBJEHEAP *(*__iovaspaceGetHeap__)(struct OBJIOVASPACE *); + NvBool (*__iovaspaceIsFaultCapable__)(struct OBJIOVASPACE *); + void (*__iovaspaceUnmap__)(struct OBJIOVASPACE *, struct OBJGPU *, const NvU64, const NvU64); + void (*__iovaspaceInvalidateTlb__)(struct OBJIOVASPACE *, struct OBJGPU *, VAS_PTE_UPDATE_TYPE); + NV_STATUS (*__iovaspacePinRootPageDir__)(struct OBJIOVASPACE *, struct OBJGPU *); + void (*__iovaspaceUnpinRootPageDir__)(struct OBJIOVASPACE *, struct OBJGPU *); + NV_STATUS (*__iovaspaceSetPteInfo__)(struct OBJIOVASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *); + NV_STATUS (*__iovaspaceGetPasid__)(struct OBJIOVASPACE *, NvU32 *); + NV_STATUS (*__iovaspaceGetPageTableInfo__)(struct OBJIOVASPACE *, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *); + NV_STATUS (*__iovaspaceReserveMempool__)(struct OBJIOVASPACE *, struct OBJGPU *, NvHandle, NvU64, NvU64, NvU32); + NV_STATUS (*__iovaspaceMap__)(struct OBJIOVASPACE *, struct OBJGPU *, const NvU64, const NvU64, const MMU_MAP_TARGET *, const VAS_MAP_FLAGS); + NvU64 mappingCount; +}; + +#ifndef __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ +typedef struct OBJIOVASPACE OBJIOVASPACE; +#endif /* __NVOC_CLASS_OBJIOVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJIOVASPACE +#define __nvoc_class_id_OBJIOVASPACE 0x28ed9c +#endif /* __nvoc_class_id_OBJIOVASPACE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE; + +#define __staticCast_OBJIOVASPACE(pThis) \ + ((pThis)->__nvoc_pbase_OBJIOVASPACE) + +#ifdef __nvoc_io_vaspace_h_disabled +#define __dynamicCast_OBJIOVASPACE(pThis) ((OBJIOVASPACE*)NULL) +#else //__nvoc_io_vaspace_h_disabled +#define __dynamicCast_OBJIOVASPACE(pThis) \ + ((OBJIOVASPACE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJIOVASPACE))) +#endif //__nvoc_io_vaspace_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJIOVASPACE(OBJIOVASPACE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJIOVASPACE(OBJIOVASPACE**, Dynamic*, NvU32); +#define __objCreate_OBJIOVASPACE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJIOVASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define iovaspaceConstruct_(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) iovaspaceConstruct__DISPATCH(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) +#define iovaspaceAlloc(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) iovaspaceAlloc_DISPATCH(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) +#define iovaspaceFree(pVAS, vAddr) iovaspaceFree_DISPATCH(pVAS, vAddr) +#define iovaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) iovaspaceApplyDefaultAlignment_DISPATCH(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) +#define iovaspaceIncAllocRefCnt(pVAS, vAddr) iovaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr) +#define iovaspaceGetVaStart(pVAS) iovaspaceGetVaStart_DISPATCH(pVAS) +#define iovaspaceGetVaLimit(pVAS) iovaspaceGetVaLimit_DISPATCH(pVAS) +#define iovaspaceGetVasInfo(pVAS, pParams) iovaspaceGetVasInfo_DISPATCH(pVAS, pParams) +#define iovaspaceIsMirrored(pVAS) iovaspaceIsMirrored_DISPATCH(pVAS) +#define iovaspaceIsExternallyOwned(pVAS) iovaspaceIsExternallyOwned_DISPATCH(pVAS) +#define iovaspaceIsInternalVaRestricted(pVAS) iovaspaceIsInternalVaRestricted_DISPATCH(pVAS) +#define iovaspaceGetFlags(pVAS) iovaspaceGetFlags_DISPATCH(pVAS) +#define iovaspaceIsAtsEnabled(pVAS) iovaspaceIsAtsEnabled_DISPATCH(pVAS) +#define iovaspaceGetBigPageSize(pVAS) iovaspaceGetBigPageSize_DISPATCH(pVAS) +#define iovaspaceGetPteInfo(pVAS, pGpu, pParams, pPhysAddr) iovaspaceGetPteInfo_DISPATCH(pVAS, pGpu, pParams, pPhysAddr) +#define iovaspaceGetPageDirBase(pVAS, pGpu) iovaspaceGetPageDirBase_DISPATCH(pVAS, pGpu) +#define iovaspaceGetKernelPageDirBase(pVAS, pGpu) iovaspaceGetKernelPageDirBase_DISPATCH(pVAS, pGpu) +#define iovaspaceGetMapPageSize(pVAS, pGpu, pMemBlock) iovaspaceGetMapPageSize_DISPATCH(pVAS, pGpu, pMemBlock) +#define iovaspaceGetHeap(pVAS) iovaspaceGetHeap_DISPATCH(pVAS) +#define iovaspaceIsFaultCapable(pVAS) iovaspaceIsFaultCapable_DISPATCH(pVAS) +#define iovaspaceUnmap(pVAS, pGpu, vaLo, vaHi) iovaspaceUnmap_DISPATCH(pVAS, pGpu, vaLo, vaHi) +#define iovaspaceInvalidateTlb(pVAS, pGpu, type) iovaspaceInvalidateTlb_DISPATCH(pVAS, pGpu, type) +#define iovaspacePinRootPageDir(pVAS, pGpu) iovaspacePinRootPageDir_DISPATCH(pVAS, pGpu) +#define iovaspaceUnpinRootPageDir(pVAS, pGpu) iovaspaceUnpinRootPageDir_DISPATCH(pVAS, pGpu) +#define iovaspaceSetPteInfo(pVAS, pGpu, pParams) iovaspaceSetPteInfo_DISPATCH(pVAS, pGpu, pParams) +#define iovaspaceGetPasid(pVAS, pPasid) iovaspaceGetPasid_DISPATCH(pVAS, pPasid) +#define iovaspaceGetPageTableInfo(pVAS, pParams) iovaspaceGetPageTableInfo_DISPATCH(pVAS, pParams) +#define iovaspaceReserveMempool(pVAS, pGpu, hClient, size, pageSizeLockMask, flags) iovaspaceReserveMempool_DISPATCH(pVAS, pGpu, hClient, size, pageSizeLockMask, flags) +#define iovaspaceMap(pVAS, pGpu, vaLo, vaHi, pTarget, flags) iovaspaceMap_DISPATCH(pVAS, pGpu, vaLo, vaHi, pTarget, flags) +NV_STATUS iovaspaceConstruct__IMPL(struct OBJIOVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags); + +static inline NV_STATUS iovaspaceConstruct__DISPATCH(struct OBJIOVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return pVAS->__iovaspaceConstruct___(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +NV_STATUS iovaspaceAlloc_IMPL(struct OBJIOVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr); + +static inline NV_STATUS iovaspaceAlloc_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return pVAS->__iovaspaceAlloc__(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +NV_STATUS iovaspaceFree_IMPL(struct OBJIOVASPACE *pVAS, NvU64 vAddr); + +static inline NV_STATUS iovaspaceFree_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__iovaspaceFree__(pVAS, vAddr); +} + +NV_STATUS iovaspaceApplyDefaultAlignment_IMPL(struct OBJIOVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask); + +static inline NV_STATUS iovaspaceApplyDefaultAlignment_DISPATCH(struct OBJIOVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return pVAS->__iovaspaceApplyDefaultAlignment__(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +NV_STATUS iovaspaceIncAllocRefCnt_IMPL(struct OBJIOVASPACE *pVAS, NvU64 vAddr); + +static inline NV_STATUS iovaspaceIncAllocRefCnt_DISPATCH(struct OBJIOVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__iovaspaceIncAllocRefCnt__(pVAS, vAddr); +} + +NvU64 iovaspaceGetVaStart_IMPL(struct OBJIOVASPACE *pVAS); + +static inline NvU64 iovaspaceGetVaStart_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceGetVaStart__(pVAS); +} + +NvU64 iovaspaceGetVaLimit_IMPL(struct OBJIOVASPACE *pVAS); + +static inline NvU64 iovaspaceGetVaLimit_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceGetVaLimit__(pVAS); +} + +NV_STATUS iovaspaceGetVasInfo_IMPL(struct OBJIOVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams); + +static inline NV_STATUS iovaspaceGetVasInfo_DISPATCH(struct OBJIOVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return pVAS->__iovaspaceGetVasInfo__(pVAS, pParams); +} + +static inline NvBool iovaspaceIsMirrored_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceIsMirrored__(pVAS); +} + +static inline NvBool iovaspaceIsExternallyOwned_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceIsExternallyOwned__(pVAS); +} + +static inline NvBool iovaspaceIsInternalVaRestricted_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceIsInternalVaRestricted__(pVAS); +} + +static inline NvU32 iovaspaceGetFlags_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceGetFlags__(pVAS); +} + +static inline NvBool iovaspaceIsAtsEnabled_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceIsAtsEnabled__(pVAS); +} + +static inline NvU32 iovaspaceGetBigPageSize_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceGetBigPageSize__(pVAS); +} + +static inline NV_STATUS iovaspaceGetPteInfo_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) { + return pVAS->__iovaspaceGetPteInfo__(pVAS, pGpu, pParams, pPhysAddr); +} + +static inline PMEMORY_DESCRIPTOR iovaspaceGetPageDirBase_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__iovaspaceGetPageDirBase__(pVAS, pGpu); +} + +static inline PMEMORY_DESCRIPTOR iovaspaceGetKernelPageDirBase_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__iovaspaceGetKernelPageDirBase__(pVAS, pGpu); +} + +static inline NvU32 iovaspaceGetMapPageSize_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock) { + return pVAS->__iovaspaceGetMapPageSize__(pVAS, pGpu, pMemBlock); +} + +static inline struct OBJEHEAP *iovaspaceGetHeap_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceGetHeap__(pVAS); +} + +static inline NvBool iovaspaceIsFaultCapable_DISPATCH(struct OBJIOVASPACE *pVAS) { + return pVAS->__iovaspaceIsFaultCapable__(pVAS); +} + +static inline void iovaspaceUnmap_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) { + pVAS->__iovaspaceUnmap__(pVAS, pGpu, vaLo, vaHi); +} + +static inline void iovaspaceInvalidateTlb_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type) { + pVAS->__iovaspaceInvalidateTlb__(pVAS, pGpu, type); +} + +static inline NV_STATUS iovaspacePinRootPageDir_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__iovaspacePinRootPageDir__(pVAS, pGpu); +} + +static inline void iovaspaceUnpinRootPageDir_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu) { + pVAS->__iovaspaceUnpinRootPageDir__(pVAS, pGpu); +} + +static inline NV_STATUS iovaspaceSetPteInfo_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) { + return pVAS->__iovaspaceSetPteInfo__(pVAS, pGpu, pParams); +} + +static inline NV_STATUS iovaspaceGetPasid_DISPATCH(struct OBJIOVASPACE *pVAS, NvU32 *pPasid) { + return pVAS->__iovaspaceGetPasid__(pVAS, pPasid); +} + +static inline NV_STATUS iovaspaceGetPageTableInfo_DISPATCH(struct OBJIOVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) { + return pVAS->__iovaspaceGetPageTableInfo__(pVAS, pParams); +} + +static inline NV_STATUS iovaspaceReserveMempool_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) { + return pVAS->__iovaspaceReserveMempool__(pVAS, pGpu, hClient, size, pageSizeLockMask, flags); +} + +static inline NV_STATUS iovaspaceMap_DISPATCH(struct OBJIOVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) { + return pVAS->__iovaspaceMap__(pVAS, pGpu, vaLo, vaHi, pTarget, flags); +} + +void iovaspaceDestruct_IMPL(struct OBJIOVASPACE *pIOVAS); +#define __nvoc_iovaspaceDestruct(pIOVAS) iovaspaceDestruct_IMPL(pIOVAS) +NV_STATUS iovaspaceAcquireMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PMEMORY_DESCRIPTOR pIovaMapping); +#ifdef __nvoc_io_vaspace_h_disabled +static inline NV_STATUS iovaspaceAcquireMapping(struct OBJIOVASPACE *pIOVAS, PMEMORY_DESCRIPTOR pIovaMapping) { + NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_io_vaspace_h_disabled +#define iovaspaceAcquireMapping(pIOVAS, pIovaMapping) iovaspaceAcquireMapping_IMPL(pIOVAS, pIovaMapping) +#endif //__nvoc_io_vaspace_h_disabled + +void iovaspaceReleaseMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping); +#ifdef __nvoc_io_vaspace_h_disabled +static inline void iovaspaceReleaseMapping(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping) { + NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!"); +} +#else //__nvoc_io_vaspace_h_disabled +#define iovaspaceReleaseMapping(pIOVAS, pIovaMapping) iovaspaceReleaseMapping_IMPL(pIOVAS, pIovaMapping) +#endif //__nvoc_io_vaspace_h_disabled + +void iovaspaceDestroyMapping_IMPL(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping); +#ifdef __nvoc_io_vaspace_h_disabled +static inline void iovaspaceDestroyMapping(struct OBJIOVASPACE *pIOVAS, PIOVAMAPPING pIovaMapping) { + NV_ASSERT_FAILED_PRECOMP("OBJIOVASPACE was disabled!"); +} +#else //__nvoc_io_vaspace_h_disabled +#define iovaspaceDestroyMapping(pIOVAS, pIovaMapping) iovaspaceDestroyMapping_IMPL(pIOVAS, pIovaMapping) +#endif //__nvoc_io_vaspace_h_disabled + +#undef PRIVATE_FIELD + + +struct OBJIOVASPACE* iovaspaceFromId(NvU32 iovaspaceId); +struct OBJIOVASPACE* iovaspaceFromMapping(PIOVAMAPPING pIovaMapping); + +// +// Helper that looks up the IOVAS from the mapping and then calls +// iovaspaceDestroyMapping(). +// +void iovaMappingDestroy(PIOVAMAPPING pIovaMapping); + +#endif // _IOVASPACE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_IO_VASPACE_NVOC_H_ diff --git a/src/nvidia/generated/g_journal_nvoc.c b/src/nvidia/generated/g_journal_nvoc.c new file mode 100644 index 000000000..4062a862e --- /dev/null +++ b/src/nvidia/generated/g_journal_nvoc.c @@ -0,0 +1,174 @@ +#define NVOC_JOURNAL_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_journal_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x15dec8 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJRCDB; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +void __nvoc_init_OBJRCDB(OBJRCDB*); +void __nvoc_init_funcTable_OBJRCDB(OBJRCDB*); +NV_STATUS __nvoc_ctor_OBJRCDB(OBJRCDB*); +void __nvoc_init_dataField_OBJRCDB(OBJRCDB*); +void __nvoc_dtor_OBJRCDB(OBJRCDB*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJRCDB; + +static const struct NVOC_RTTI __nvoc_rtti_OBJRCDB_OBJRCDB = { + /*pClassDef=*/ &__nvoc_class_def_OBJRCDB, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJRCDB, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJRCDB_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJRCDB, __nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJRCDB_OBJTRACEABLE = { + /*pClassDef=*/ &__nvoc_class_def_OBJTRACEABLE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJRCDB, __nvoc_base_OBJTRACEABLE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJRCDB = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_OBJRCDB_OBJRCDB, + &__nvoc_rtti_OBJRCDB_OBJTRACEABLE, + &__nvoc_rtti_OBJRCDB_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJRCDB = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJRCDB), + /*classId=*/ classId(OBJRCDB), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJRCDB", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJRCDB, + /*pCastInfo=*/ &__nvoc_castinfo_OBJRCDB, + /*pExportInfo=*/ &__nvoc_export_info_OBJRCDB +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJRCDB = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_dtor_OBJRCDB(OBJRCDB *pThis) { + __nvoc_rcdbDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJRCDB(OBJRCDB *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + pThis->setProperty(pThis, PDB_PROP_RCDB_COMPRESS, ((NvBool)(0 == 0))); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE* ); +NV_STATUS __nvoc_ctor_OBJRCDB(OBJRCDB *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJRCDB_fail_Object; + status = __nvoc_ctor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + if (status != NV_OK) goto __nvoc_ctor_OBJRCDB_fail_OBJTRACEABLE; + __nvoc_init_dataField_OBJRCDB(pThis); + + status = __nvoc_rcdbConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJRCDB_fail__init; + goto __nvoc_ctor_OBJRCDB_exit; // Success + +__nvoc_ctor_OBJRCDB_fail__init: + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); +__nvoc_ctor_OBJRCDB_fail_OBJTRACEABLE: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJRCDB_fail_Object: +__nvoc_ctor_OBJRCDB_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJRCDB_1(OBJRCDB *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJRCDB(OBJRCDB *pThis) { + __nvoc_init_funcTable_OBJRCDB_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_OBJRCDB(OBJRCDB *pThis) { + pThis->__nvoc_pbase_OBJRCDB = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + pThis->__nvoc_pbase_OBJTRACEABLE = &pThis->__nvoc_base_OBJTRACEABLE; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + __nvoc_init_funcTable_OBJRCDB(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJRCDB(OBJRCDB **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJRCDB *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJRCDB)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJRCDB)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJRCDB); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJRCDB(pThis); + status = __nvoc_ctor_OBJRCDB(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJRCDB_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJRCDB_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJRCDB(OBJRCDB **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJRCDB(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_journal_nvoc.h b/src/nvidia/generated/g_journal_nvoc.h new file mode 100644 index 000000000..593e6554c --- /dev/null +++ b/src/nvidia/generated/g_journal_nvoc.h @@ -0,0 +1,509 @@ +#ifndef _G_JOURNAL_NVOC_H_ +#define _G_JOURNAL_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_journal_nvoc.h" + +#ifndef _JOURNAL_H_ +#define _JOURNAL_H_ + +// +// Journal object defines and Structures +// + +#include "kernel/core/core.h" + +#include "kernel/core/system.h" +#include "kernel/diagnostics/journal_structs.h" +#include "kernel/diagnostics/nv_debug_dump.h" +#include "kernel/diagnostics/traceable.h" + +#include "ctrl/ctrl2080/ctrl2080nvd.h" + +#include "lib/protobuf/prb.h" +#include "nvdump.h" + +// Local definition to set the default MMU Fault Type. +#define NV_PFAULT_FAULT_TYPE_UNKNOWN 0x000000FF; +// Local definition to set the default MMU Error Source/Client ID. +#define NV_PFIFO_INTR_MMU_FAULT_INFO_CLIENT_UNKNOWN 0x000000FF; + +#define NOCAT_COLLECT_PERF 0 +#define NOCAT_PROBE_FB_MEMORY 0 + + + +typedef struct _def_assert_list +{ + NvU32 QualifyingStackSize; // The number of stack entries to check in order + // to consider a record unique + NvU32 Size; // size of list of pointers to records + NvU32 Count; // number of records in the record list + PRmRCCommonAssert_RECORD *ppList; // list of pointers to records. +} JOURNAL_ASSERT_LIST; + +typedef struct _def_event_journal +{ + NvU8* pBuffer; // pointer to buffer + NvU8* pFree; // pointer to first free byte in the buffer + NVCDRecordCollection* pCurrCollection; // current collection being created + NvU32 BufferSize; // size of buffer in bytes + NvU32 BufferRemaining; // remaining bytes in buffer; + NvU32 RecordCount; // total # of records currently in the buffer + JOURNAL_ASSERT_LIST AssertList; // list of asserts within the journal. +} EVENT_JOURNAL; + +typedef struct _def_sys_error_info +{ + volatile NvU32 InUse; // Atomically set when a thread is accessing the structure + NvU32 LogCount; // Count of Logged Event Messages + NvU32 ErrorCount; // Count of Logged Event Messages + NvU32 TotalErrorCount; // Total Number of Errors Encountered + void * pErrorList; // Error List + void * pNextError; // Used to walk error list +} SYS_ERROR_INFO; + +typedef struct RING_BUFFER_LOG RING_BUFFER_LOG, *PRING_BUFFER_LOG; + +// A node in the linked list of ring buffers +struct RING_BUFFER_LOG +{ + RMCD_RECORD_TYPE entryType; // Type of the entries stored in this ring buffer. + NvU32 maxEntries; // Capacity of the ring buffer + NvU32 maxBufferSize; + NvU32 headIndex; // Index of the first item in the ring buffer. + NvU32 refCount; // Now many GPUs are using this Ring Buffer + // + // NOTE: If you want to look at the most recent item added to the ring buffer, + // you need to look at the entry at (headIndex+numEntries-1)%maxEntries + // + NvU32 numEntries; + NvU32 bufferSize; + NvU8 *pBuffer; // Buffer holding the entries + RING_BUFFER_LOG *pNextRingBuffer; + +}; + +// +// A list of circular buffers. Each circular buffer is capable of +// storing the last n events of a particular type. +// +typedef struct +{ + RING_BUFFER_LOG* pFirstEntry; // Pointer to the first circular buffer in the list + NvU32 NumRingBuffers; + +} RING_BUFFER_LOG_COLLECTION; + +typedef struct +{ + RmRCCommonJournal_RECORD common; + const PRB_FIELD_DESC *fieldDesc; +} RM_DATA_COLLECTION_RECORD; + +// this is the structure to hold a NOCAT report. +typedef struct +{ + NvU32 id; // record id + NV2080_NOCAT_JOURNAL_GPU_STATE nocatGpuState; // contains the state of the + // associated GPU if there is one, + NV2080_NOCAT_JOURNAL_ENTRY nocatJournalEntry; // the NOCAT report data -- IDs, diag data etc. +} RM_NOCAT_JOURNAL_ENTRY; + +#define ASSERT_CALL_STACK_SIZE 10 +#define NOCAT_CACHE_FRESHNESS_PERIOD_MS 10ULL +typedef struct +{ + NvU32 callStack[ASSERT_CALL_STACK_SIZE]; // Call stack when the assert occurred. + NvU32 count; +} RM_NOCAT_ASSERT_DIAG_BUFFER; + +typedef struct _nocatQueueDescriptor +{ + NvU64 loadAddress; + NvU32 nextRecordId; + NvU32 nextReportedId; + NvU32 nocatLastRecordType; + NvBool journalLocked; + NvU32 lastRecordId[NV2080_NOCAT_JOURNAL_REC_TYPE_COUNT]; + RM_NOCAT_ASSERT_DIAG_BUFFER lastAssertData; + NvU8 tag[NV2080_NOCAT_JOURNAL_MAX_STR_LEN]; + NvU64 cacheFreshnessPeriodticks; + NV2080_NOCAT_JOURNAL_GPU_STATE nocatGpuState; // cache contains the state of the + // associated GPU if there is one. + + NvU32 nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COUNTER_COUNT]; +} nocatQueueDescriptor; + +#ifdef NVOC_JOURNAL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJRCDB { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct OBJTRACEABLE __nvoc_base_OBJTRACEABLE; + struct Object *__nvoc_pbase_Object; + struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; + struct OBJRCDB *__nvoc_pbase_OBJRCDB; + NvBool PDB_PROP_RCDB_COMPRESS; + NvBool PDB_PROP_RCDB_IN_DEFERRED_DUMP_CODEPATH; + SYS_ERROR_INFO ErrorInfo; + EVENT_JOURNAL Journal; + NvU32 BugcheckCount; + RING_BUFFER_LOG_COLLECTION RingBufferColl; + NVD_STATE nvDumpState; + rcErrorCounterEntry rcErrorCounterArray[10]; + NvBool bPrevDriverCodeExecuted; + char *previousDriverVersion; + char *previousDriverBranch; + NvU32 prevDriverChangelist; + NvU32 driverLoadCount; + NvU16 RcErrRptNextIdx; + NvBool RcErrRptRecordsDropped; + struct Falcon *pCrashedFlcn; + nocatQueueDescriptor nocatJournalDescriptor; +}; + +#ifndef __NVOC_CLASS_OBJRCDB_TYPEDEF__ +#define __NVOC_CLASS_OBJRCDB_TYPEDEF__ +typedef struct OBJRCDB OBJRCDB; +#endif /* __NVOC_CLASS_OBJRCDB_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJRCDB +#define __nvoc_class_id_OBJRCDB 0x15dec8 +#endif /* __nvoc_class_id_OBJRCDB */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJRCDB; + +#define __staticCast_OBJRCDB(pThis) \ + ((pThis)->__nvoc_pbase_OBJRCDB) + +#ifdef __nvoc_journal_h_disabled +#define __dynamicCast_OBJRCDB(pThis) ((OBJRCDB*)NULL) +#else //__nvoc_journal_h_disabled +#define __dynamicCast_OBJRCDB(pThis) \ + ((OBJRCDB*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJRCDB))) +#endif //__nvoc_journal_h_disabled + +#define PDB_PROP_RCDB_IN_DEFERRED_DUMP_CODEPATH_BASE_CAST +#define PDB_PROP_RCDB_IN_DEFERRED_DUMP_CODEPATH_BASE_NAME PDB_PROP_RCDB_IN_DEFERRED_DUMP_CODEPATH +#define PDB_PROP_RCDB_COMPRESS_BASE_CAST +#define PDB_PROP_RCDB_COMPRESS_BASE_NAME PDB_PROP_RCDB_COMPRESS + +NV_STATUS __nvoc_objCreateDynamic_OBJRCDB(OBJRCDB**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJRCDB(OBJRCDB**, Dynamic*, NvU32); +#define __objCreate_OBJRCDB(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJRCDB((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS rcdbConstruct_IMPL(struct OBJRCDB *arg_pRcdb); +#define __nvoc_rcdbConstruct(arg_pRcdb) rcdbConstruct_IMPL(arg_pRcdb) +void rcdbDestruct_IMPL(struct OBJRCDB *pRcdb); +#define __nvoc_rcdbDestruct(pRcdb) rcdbDestruct_IMPL(pRcdb) +NV_STATUS rcdbSavePreviousDriverVersion_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbSavePreviousDriverVersion(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbSavePreviousDriverVersion(pGpu, pRcdb) rcdbSavePreviousDriverVersion_IMPL(pGpu, pRcdb) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbClearErrorHistory_IMPL(struct OBJRCDB *pRcdb); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbClearErrorHistory(struct OBJRCDB *pRcdb) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbClearErrorHistory(pRcdb) rcdbClearErrorHistory_IMPL(pRcdb) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbDeleteErrorElement_IMPL(struct OBJRCDB *pRcdb, void *arg0); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbDeleteErrorElement(struct OBJRCDB *pRcdb, void *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbDeleteErrorElement(pRcdb, arg0) rcdbDeleteErrorElement_IMPL(pRcdb, arg0) +#endif //__nvoc_journal_h_disabled + +void rcdbDestroyRingBufferCollection_IMPL(struct OBJRCDB *pRcdb); +#ifdef __nvoc_journal_h_disabled +static inline void rcdbDestroyRingBufferCollection(struct OBJRCDB *pRcdb) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); +} +#else //__nvoc_journal_h_disabled +#define rcdbDestroyRingBufferCollection(pRcdb) rcdbDestroyRingBufferCollection_IMPL(pRcdb) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbAllocNextJournalRec_IMPL(struct OBJRCDB *pRcdb, NVCD_RECORD **arg0, NvU8 arg1, NvU8 arg2, NvU16 arg3); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbAllocNextJournalRec(struct OBJRCDB *pRcdb, NVCD_RECORD **arg0, NvU8 arg1, NvU8 arg2, NvU16 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbAllocNextJournalRec(pRcdb, arg0, arg1, arg2, arg3) rcdbAllocNextJournalRec_IMPL(pRcdb, arg0, arg1, arg2, arg3) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbAddBugCheckRec_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, NvU32 bugCheckCode); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbAddBugCheckRec(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, NvU32 bugCheckCode) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbAddBugCheckRec(pGpu, pRcdb, bugCheckCode) rcdbAddBugCheckRec_IMPL(pGpu, pRcdb, bugCheckCode) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbAddPowerStateRec_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, NvU32 powerEvent, NvU32 state, NvU32 fastBootPowerState); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbAddPowerStateRec(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, NvU32 powerEvent, NvU32 state, NvU32 fastBootPowerState) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbAddPowerStateRec(pGpu, pRcdb, powerEvent, state, fastBootPowerState) rcdbAddPowerStateRec_IMPL(pGpu, pRcdb, powerEvent, state, fastBootPowerState) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbDumpInitGpuAccessibleFlag_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbDumpInitGpuAccessibleFlag(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbDumpInitGpuAccessibleFlag(pGpu, pRcdb) rcdbDumpInitGpuAccessibleFlag_IMPL(pGpu, pRcdb) +#endif //__nvoc_journal_h_disabled + +NvU8 *rcdbCreateRingBuffer_IMPL(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type, NvU32 maxEntries); +#ifdef __nvoc_journal_h_disabled +static inline NvU8 *rcdbCreateRingBuffer(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type, NvU32 maxEntries) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NULL; +} +#else //__nvoc_journal_h_disabled +#define rcdbCreateRingBuffer(pRcdb, type, maxEntries) rcdbCreateRingBuffer_IMPL(pRcdb, type, maxEntries) +#endif //__nvoc_journal_h_disabled + +void rcdbDestroyRingBuffer_IMPL(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type); +#ifdef __nvoc_journal_h_disabled +static inline void rcdbDestroyRingBuffer(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); +} +#else //__nvoc_journal_h_disabled +#define rcdbDestroyRingBuffer(pRcdb, type) rcdbDestroyRingBuffer_IMPL(pRcdb, type) +#endif //__nvoc_journal_h_disabled + +void rcdbAddRecToRingBuffer_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type, NvU32 recordSize, NvU8 *pRecord); +#ifdef __nvoc_journal_h_disabled +static inline void rcdbAddRecToRingBuffer(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type, NvU32 recordSize, NvU8 *pRecord) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); +} +#else //__nvoc_journal_h_disabled +#define rcdbAddRecToRingBuffer(pGpu, pRcdb, type, recordSize, pRecord) rcdbAddRecToRingBuffer_IMPL(pGpu, pRcdb, type, recordSize, pRecord) +#endif //__nvoc_journal_h_disabled + +NvU32 rcdbGetOcaRecordSize_IMPL(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type); +#ifdef __nvoc_journal_h_disabled +static inline NvU32 rcdbGetOcaRecordSize(struct OBJRCDB *pRcdb, RMCD_RECORD_TYPE type) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return 0; +} +#else //__nvoc_journal_h_disabled +#define rcdbGetOcaRecordSize(pRcdb, type) rcdbGetOcaRecordSize_IMPL(pRcdb, type) +#endif //__nvoc_journal_h_disabled + +NvU32 rcdbDumpJournal_IMPL(struct OBJRCDB *pRcdb, struct OBJGPU *pGpu, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, const PRB_FIELD_DESC *pFieldDesc); +#ifdef __nvoc_journal_h_disabled +static inline NvU32 rcdbDumpJournal(struct OBJRCDB *pRcdb, struct OBJGPU *pGpu, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, const PRB_FIELD_DESC *pFieldDesc) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return 0; +} +#else //__nvoc_journal_h_disabled +#define rcdbDumpJournal(pRcdb, pGpu, pPrbEnc, pNvDumpState, pFieldDesc) rcdbDumpJournal_IMPL(pRcdb, pGpu, pPrbEnc, pNvDumpState, pFieldDesc) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbDumpComponent_IMPL(struct OBJRCDB *pRcdb, NvU32 component, NVDUMP_BUFFER *pBuffer, NVDUMP_BUFFER_POLICY policy, PrbBufferCallback *pBufferCallback); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbDumpComponent(struct OBJRCDB *pRcdb, NvU32 component, NVDUMP_BUFFER *pBuffer, NVDUMP_BUFFER_POLICY policy, PrbBufferCallback *pBufferCallback) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbDumpComponent(pRcdb, component, pBuffer, policy, pBufferCallback) rcdbDumpComponent_IMPL(pRcdb, component, pBuffer, policy, pBufferCallback) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbDumpSystemInfo_IMPL(struct OBJRCDB *pRcdb, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbDumpSystemInfo(struct OBJRCDB *pRcdb, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbDumpSystemInfo(pRcdb, pPrbEnc, pNvDumpState) rcdbDumpSystemInfo_IMPL(pRcdb, pPrbEnc, pNvDumpState) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbDumpSystemFunc_IMPL(struct OBJRCDB *pRcdb, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbDumpSystemFunc(struct OBJRCDB *pRcdb, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbDumpSystemFunc(pRcdb, pPrbEnc, pNvDumpState) rcdbDumpSystemFunc_IMPL(pRcdb, pPrbEnc, pNvDumpState) +#endif //__nvoc_journal_h_disabled + +NvU32 rcdbDumpErrorCounters_IMPL(struct OBJRCDB *pRcDB, struct OBJGPU *pGpu, PRB_ENCODER *pPrbEnc); +#ifdef __nvoc_journal_h_disabled +static inline NvU32 rcdbDumpErrorCounters(struct OBJRCDB *pRcDB, struct OBJGPU *pGpu, PRB_ENCODER *pPrbEnc) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return 0; +} +#else //__nvoc_journal_h_disabled +#define rcdbDumpErrorCounters(pRcDB, pGpu, pPrbEnc) rcdbDumpErrorCounters_IMPL(pRcDB, pGpu, pPrbEnc) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbGetRcDiagRecBoundaries_IMPL(struct OBJRCDB *pRcdb, NvU16 *arg0, NvU16 *arg1, NvU32 arg2, NvU32 arg3); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbGetRcDiagRecBoundaries(struct OBJRCDB *pRcdb, NvU16 *arg0, NvU16 *arg1, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbGetRcDiagRecBoundaries(pRcdb, arg0, arg1, arg2, arg3) rcdbGetRcDiagRecBoundaries_IMPL(pRcdb, arg0, arg1, arg2, arg3) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbAddRcDiagRec_IMPL(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RmRcDiag_RECORD *arg0); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbAddRcDiagRec(struct OBJGPU *pGpu, struct OBJRCDB *pRcdb, RmRcDiag_RECORD *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbAddRcDiagRec(pGpu, pRcdb, arg0) rcdbAddRcDiagRec_IMPL(pGpu, pRcdb, arg0) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbGetRcDiagRec_IMPL(struct OBJRCDB *pRcdb, NvU16 arg0, RmRCCommonJournal_RECORD **arg1, NvU32 arg2, NvU32 arg3); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbGetRcDiagRec(struct OBJRCDB *pRcdb, NvU16 arg0, RmRCCommonJournal_RECORD **arg1, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbGetRcDiagRec(pRcdb, arg0, arg1, arg2, arg3) rcdbGetRcDiagRec_IMPL(pRcdb, arg0, arg1, arg2, arg3) +#endif //__nvoc_journal_h_disabled + +NV_STATUS rcdbUpdateRcDiagRecContext_IMPL(struct OBJRCDB *pRcdb, NvU16 arg0, NvU16 arg1, NvU32 arg2, NvU32 arg3); +#ifdef __nvoc_journal_h_disabled +static inline NV_STATUS rcdbUpdateRcDiagRecContext(struct OBJRCDB *pRcdb, NvU16 arg0, NvU16 arg1, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJRCDB was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_journal_h_disabled +#define rcdbUpdateRcDiagRecContext(pRcdb, arg0, arg1, arg2, arg3) rcdbUpdateRcDiagRecContext_IMPL(pRcdb, arg0, arg1, arg2, arg3) +#endif //__nvoc_journal_h_disabled + +void rcdbInitNocatGpuCache_IMPL(struct OBJGPU *pGpu); +#define rcdbInitNocatGpuCache(pGpu) rcdbInitNocatGpuCache_IMPL(pGpu) +void rcdbCleanupNocatGpuCache_IMPL(struct OBJGPU *pGpu); +#define rcdbCleanupNocatGpuCache(pGpu) rcdbCleanupNocatGpuCache_IMPL(pGpu) +#undef PRIVATE_FIELD + + +typedef struct +{ + NvU8 recType; + NvU32 bugcheck; + const char *pSource; + NvU32 subsystem; + NvU64 errorCode NV_ALIGN_BYTES(8); + NvU32 diagBufferLen; + NvU8 *pDiagBuffer; + const char *pFaultingEngine; + NvU32 tdrReason; +} NOCAT_JOURNAL_PARAMS; + +NV_STATUS rcdbAddRmDclMsg(void* msg, NvU16 size, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS rcdbAddRmEngDump(struct OBJGPU *pGpu, NvU32 component); +NV_STATUS rcdbAddRmGpuDump(struct OBJGPU *pGpu); +void rcdbSetCommonJournalRecord(struct OBJGPU *pGpu, PRmRCCommonJournal_RECORD pRec); +void rcdbAddCrashedFalcon(struct Falcon *pFlcn); + +NV_STATUS rcdbAddAssertJournalRec(void* pGpu, void** ppRec, NvU8 jGroup, + NvU8 type, NvU16 size, NvU32 level, NvU64 key); +NV_STATUS rcdbAddAssertJournalRecWithLine(void *pVoidGpu, NvU32 lineNum, + void** ppRec, NvU8 jGroup, NvU8 type, NvU16 size, NvU32 level, NvU64 key); + +NvU32 rcdbNocatInsertNocatError(struct OBJGPU *pGpu, + NOCAT_JOURNAL_PARAMS *nocatJournalEntry); + +NvU32 rcdbNocatInsertBugcheck(NvU32 deviceInstance, NvU32 bugCheckCode); + +NV_STATUS rcdbNocatInitEngineErrorEvent(NOCAT_JOURNAL_PARAMS *nocatJournalEntry); +NvU32 rcdbNocatInsertEngineError(struct OBJGPU *pGpu, + const char *pSource, NvU32 subsystem, NvU64 errorCode, + NvU8 *pDiagBuffer, NvU32 diagBufferLen); + +NvU32 rcdbNocatInsertRMCDErrorEvent(struct OBJGPU *pGpu, NvU32 recType, + const char *pSource, NvU32 subsystem, NvU64 errorCode, const char *pFault, + RMCD_ERROR_BLOCK *pRcdError); + +NV_STATUS rcdbNocatInitTDRErrorEvent(NOCAT_JOURNAL_PARAMS *nocatJournalEntry); +NvU32 rcdbNocatInsertTDRError(struct OBJGPU *pGpu, + const char *pSource, NvU32 subsystem, NvU64 errorCode, + NvU32 tdrBucket, + NvU8 *pDiagBuffer, NvU32 diagBufferLen, + NvU32 tdrReason, const char *pFaultingApp); + +NV_STATUS rcdbNocatInitRCErrorEvent(NOCAT_JOURNAL_PARAMS *nocatJournalEntry); +NvU32 rcdbNocatInsertRCError(struct OBJGPU* pGpu, + NvU32 subsystem, NvU64 errorCode, + NvU32 rcDiagRecStartIdx, NvU32 rcDiagRecEndIdx, + void* pAppId); + +NV_STATUS rcdbReportNextNocatJournalEntry(NV2080_NOCAT_JOURNAL_RECORD* pReport); + +NV_STATUS rcdbSetNocatTdrReason(NV2080CtrlNocatJournalDataTdrReason *pReasonData); + +NvU32 rcdbGetNocatOutstandingCount(Journal *pRcdb); + +NV_STATUS rcdbNocatJournalReportTestFill(void); + +#endif // _JOURNAL_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_JOURNAL_NVOC_H_ diff --git a/src/nvidia/generated/g_journal_pb.c b/src/nvidia/generated/g_journal_pb.c new file mode 100644 index 000000000..048e8b319 --- /dev/null +++ b/src/nvidia/generated/g_journal_pb.c @@ -0,0 +1,546 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#include "nvtypes.h" +#include "prbrt.h" +#include "g_journal_pb.h" + +// 'Assert.RecTyp' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_assert_rectyp[] = { + { + 120, + PRB_MAYBE_ENUM_NAME("ASSERT") + }, + { + 121, + PRB_MAYBE_ENUM_NAME("TIMEOUT") + }, + { + 124, + PRB_MAYBE_ENUM_NAME("DEBUG_BREAKPOINT") + }, + { + 139, + PRB_MAYBE_ENUM_NAME("ASSERT_V3") + }, + { + 140, + PRB_MAYBE_ENUM_NAME("TIMEOUT_V3") + }, + { + 141, + PRB_MAYBE_ENUM_NAME("DEBUG_BREAKPOINT_V3") + }, +}; + +const PRB_ENUM_DESC prb_enums_journal_assert_rectyp = { + prb_enum_mappings_assert_rectyp, + 6, + PRB_MAYBE_ENUM_NAME("RecTyp") +}; + +// 'BadRead.MemSpace' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_badread_memspace[] = { + { + 1, + PRB_MAYBE_ENUM_NAME("BAR0") + }, + { + 2, + PRB_MAYBE_ENUM_NAME("FB") + }, + { + 3, + PRB_MAYBE_ENUM_NAME("INSTANCE") + }, + { + 4, + PRB_MAYBE_ENUM_NAME("PCI") + }, +}; + +const PRB_ENUM_DESC prb_enums_journal_badread_memspace = { + prb_enum_mappings_badread_memspace, + 4, + PRB_MAYBE_ENUM_NAME("MemSpace") +}; + +// 'BadRead.Reason' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_badread_reason[] = { + { + 1, + PRB_MAYBE_ENUM_NAME("GPU_OFF_BUS") + }, + { + 2, + PRB_MAYBE_ENUM_NAME("LOW_POWER") + }, + { + 3, + PRB_MAYBE_ENUM_NAME("PCI_DEVICE_DISABLED") + }, + { + 4, + PRB_MAYBE_ENUM_NAME("GPU_RESET") + }, + { + 5, + PRB_MAYBE_ENUM_NAME("DWORD_SHIFT") + }, + { + 6, + PRB_MAYBE_ENUM_NAME("UNKNOWN") + }, +}; + +const PRB_ENUM_DESC prb_enums_journal_badread_reason = { + prb_enum_mappings_badread_reason, + 6, + PRB_MAYBE_ENUM_NAME("Reason") +}; + +// 'Common' field defaults +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU32 journal_common_gpu_tag_default = 0;) +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU64 journal_common_cpu_tag_default = 0;) +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU64 journal_common_time_stamp_default = 0;) +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU64 journal_common_state_mask_default = 0;) + +// 'Common' field descriptors +const PRB_FIELD_DESC prb_fields_journal_common[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0 | PRB_HAS_DEFAULT, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("gpu_tag") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)&journal_common_gpu_tag_default) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT64, + 0 | PRB_HAS_DEFAULT, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("cpu_tag") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)&journal_common_cpu_tag_default) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT64, + 0 | PRB_HAS_DEFAULT, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("time_stamp") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)&journal_common_time_stamp_default) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_UINT64, + 0 | PRB_HAS_DEFAULT, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("state_mask") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)&journal_common_state_mask_default) + }, +}; + +// 'Assert' field defaults +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU64 journal_assert_last_time_stamp_default = 0;) +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU64 journal_assert_last_time_default = 0;) +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU32 journal_assert_gpu_tag_default = 0;) +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU32 journal_assert_count_default = 1;) +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU32 journal_assert_level_default = 1;) + +// 'Assert' field descriptors +const PRB_FIELD_DESC prb_fields_journal_assert[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_journal_assert_rectyp, + PRB_MAYBE_FIELD_NAME("type") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 9, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0 | PRB_IS_DEPRECATED, + }, + JOURNAL_COMMON, + 0, + PRB_MAYBE_FIELD_NAME("common_obsolete") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 10, + { + PRB_OPTIONAL, + PRB_UINT64, + 0 | PRB_HAS_DEFAULT, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("last_time_stamp") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)&journal_assert_last_time_stamp_default) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT64, + 0 | PRB_IS_DEPRECATED, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("first_time") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT64, + 0 | PRB_HAS_DEFAULT | PRB_IS_DEPRECATED, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("last_time") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)&journal_assert_last_time_default) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("breakpoint_addr_hint") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_OPTIONAL, + PRB_UINT32, + 0 | PRB_HAS_DEFAULT | PRB_IS_DEPRECATED, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("gpu_tag") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)&journal_assert_gpu_tag_default) + }, + { + 6, + { + PRB_OPTIONAL, + PRB_UINT32, + 0 | PRB_HAS_DEFAULT, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("count") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)&journal_assert_count_default) + }, + { + 7, + { + PRB_OPTIONAL, + PRB_UINT32, + 0 | PRB_HAS_DEFAULT, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("level") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)&journal_assert_level_default) + }, + { + 8, + { + PRB_REPEATED, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("call_stack") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 11, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("source_line") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'RvaHeader' field defaults + +// 'RvaHeader' field descriptors +const PRB_FIELD_DESC prb_fields_journal_rvaheader[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("driver_start") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("offset") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("pointer_size") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("unique_id_high") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("unique_id_low") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 6, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("age") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'BadRead' field defaults + +// 'BadRead' field descriptors +const PRB_FIELD_DESC prb_fields_journal_badread[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_journal_badread_memspace, + PRB_MAYBE_FIELD_NAME("memory_space") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("offset") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("mask") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("value") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_journal_badread_reason, + PRB_MAYBE_FIELD_NAME("reason") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 6, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0 | PRB_IS_DEPRECATED, + }, + JOURNAL_COMMON, + 0, + PRB_MAYBE_FIELD_NAME("common_obsolete") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'BugCheck' field defaults +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU32 journal_bugcheck_gpu_tag_default = 0;) + +// 'BugCheck' field descriptors +const PRB_FIELD_DESC prb_fields_journal_bugcheck[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("code") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0 | PRB_HAS_DEFAULT | PRB_IS_DEPRECATED, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("gpu_tag") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)&journal_bugcheck_gpu_tag_default) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT64, + 0 | PRB_IS_DEPRECATED, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("time_stamp") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0 | PRB_IS_DEPRECATED, + }, + JOURNAL_COMMON, + 0, + PRB_MAYBE_FIELD_NAME("common_obsolete") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// Message descriptors +const PRB_MSG_DESC prb_messages_journal[] = { + { + 4, + prb_fields_journal_common, + PRB_MAYBE_MESSAGE_NAME("Journal.Common") + }, + { + 11, + prb_fields_journal_assert, + PRB_MAYBE_MESSAGE_NAME("Journal.Assert") + }, + { + 6, + prb_fields_journal_rvaheader, + PRB_MAYBE_MESSAGE_NAME("Journal.RvaHeader") + }, + { + 6, + prb_fields_journal_badread, + PRB_MAYBE_MESSAGE_NAME("Journal.BadRead") + }, + { + 4, + prb_fields_journal_bugcheck, + PRB_MAYBE_MESSAGE_NAME("Journal.BugCheck") + }, +}; + +// Service descriptors +const PRB_SERVICE_DESC prb_services_journal[] = { + { 0 } +}; + diff --git a/src/nvidia/generated/g_journal_pb.h b/src/nvidia/generated/g_journal_pb.h new file mode 100644 index 000000000..e3b8ddbe4 --- /dev/null +++ b/src/nvidia/generated/g_journal_pb.h @@ -0,0 +1,148 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#ifndef G_JOURNAL_PB_H__ +#define G_JOURNAL_PB_H__ + + +extern const PRB_ENUM_DESC prb_enums_journal_assert_rectyp; + +// 'Assert.RecTyp' enumeration values +#define JOURNAL_ASSERT_ASSERT 120 +#define JOURNAL_ASSERT_TIMEOUT 121 +#define JOURNAL_ASSERT_DEBUG_BREAKPOINT 124 +#define JOURNAL_ASSERT_ASSERT_V3 139 +#define JOURNAL_ASSERT_TIMEOUT_V3 140 +#define JOURNAL_ASSERT_DEBUG_BREAKPOINT_V3 141 + +extern const PRB_ENUM_DESC prb_enums_journal_badread_memspace; + +// 'BadRead.MemSpace' enumeration values +#define JOURNAL_BADREAD_BAR0 1 +#define JOURNAL_BADREAD_FB 2 +#define JOURNAL_BADREAD_INSTANCE 3 +#define JOURNAL_BADREAD_PCI 4 + +extern const PRB_ENUM_DESC prb_enums_journal_badread_reason; + +// 'BadRead.Reason' enumeration values +#define JOURNAL_BADREAD_GPU_OFF_BUS 1 +#define JOURNAL_BADREAD_LOW_POWER 2 +#define JOURNAL_BADREAD_PCI_DEVICE_DISABLED 3 +#define JOURNAL_BADREAD_GPU_RESET 4 +#define JOURNAL_BADREAD_DWORD_SHIFT 5 +#define JOURNAL_BADREAD_UNKNOWN 6 + +extern const PRB_MSG_DESC prb_messages_journal[]; + +// Message descriptor pointers +#define JOURNAL_COMMON (&prb_messages_journal[0]) +#define JOURNAL_ASSERT (&prb_messages_journal[1]) +#define JOURNAL_RVAHEADER (&prb_messages_journal[2]) +#define JOURNAL_BADREAD (&prb_messages_journal[3]) +#define JOURNAL_BUGCHECK (&prb_messages_journal[4]) + +// Message maximum lengths +// Does not include repeated fields, strings and byte arrays. +#define JOURNAL_COMMON_LEN 39 +#define JOURNAL_ASSERT_LEN 125 +#define JOURNAL_RVAHEADER_LEN 51 +#define JOURNAL_BADREAD_LEN 67 +#define JOURNAL_BUGCHECK_LEN 66 + +extern const PRB_FIELD_DESC prb_fields_journal_common[]; + +// 'Common' field descriptor pointers +#define JOURNAL_COMMON_GPU_TAG (&prb_fields_journal_common[0]) +#define JOURNAL_COMMON_CPU_TAG (&prb_fields_journal_common[1]) +#define JOURNAL_COMMON_TIME_STAMP (&prb_fields_journal_common[2]) +#define JOURNAL_COMMON_STATE_MASK (&prb_fields_journal_common[3]) + +// 'Common' field lengths +#define JOURNAL_COMMON_GPU_TAG_LEN 5 +#define JOURNAL_COMMON_CPU_TAG_LEN 10 +#define JOURNAL_COMMON_TIME_STAMP_LEN 10 +#define JOURNAL_COMMON_STATE_MASK_LEN 10 + +extern const PRB_FIELD_DESC prb_fields_journal_assert[]; + +// 'Assert' field descriptor pointers +#define JOURNAL_ASSERT_TYPE (&prb_fields_journal_assert[0]) +#define JOURNAL_ASSERT_COMMON_OBSOLETE (&prb_fields_journal_assert[1]) +#define JOURNAL_ASSERT_LAST_TIME_STAMP (&prb_fields_journal_assert[2]) +#define JOURNAL_ASSERT_FIRST_TIME (&prb_fields_journal_assert[3]) +#define JOURNAL_ASSERT_LAST_TIME (&prb_fields_journal_assert[4]) +#define JOURNAL_ASSERT_BREAKPOINT_ADDR_HINT (&prb_fields_journal_assert[5]) +#define JOURNAL_ASSERT_GPU_TAG (&prb_fields_journal_assert[6]) +#define JOURNAL_ASSERT_COUNT (&prb_fields_journal_assert[7]) +#define JOURNAL_ASSERT_LEVEL (&prb_fields_journal_assert[8]) +#define JOURNAL_ASSERT_CALL_STACK (&prb_fields_journal_assert[9]) +#define JOURNAL_ASSERT_SOURCE_LINE (&prb_fields_journal_assert[10]) + +// 'Assert' field lengths +#define JOURNAL_ASSERT_TYPE_LEN 2 +#define JOURNAL_ASSERT_COMMON_OBSOLETE_LEN 42 +#define JOURNAL_ASSERT_LAST_TIME_STAMP_LEN 10 +#define JOURNAL_ASSERT_FIRST_TIME_LEN 10 +#define JOURNAL_ASSERT_LAST_TIME_LEN 10 +#define JOURNAL_ASSERT_BREAKPOINT_ADDR_HINT_LEN 10 +#define JOURNAL_ASSERT_GPU_TAG_LEN 5 +#define JOURNAL_ASSERT_COUNT_LEN 5 +#define JOURNAL_ASSERT_LEVEL_LEN 5 +#define JOURNAL_ASSERT_CALL_STACK_LEN 10 +#define JOURNAL_ASSERT_SOURCE_LINE_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_journal_rvaheader[]; + +// 'RvaHeader' field descriptor pointers +#define JOURNAL_RVAHEADER_DRIVER_START (&prb_fields_journal_rvaheader[0]) +#define JOURNAL_RVAHEADER_OFFSET (&prb_fields_journal_rvaheader[1]) +#define JOURNAL_RVAHEADER_POINTER_SIZE (&prb_fields_journal_rvaheader[2]) +#define JOURNAL_RVAHEADER_UNIQUE_ID_HIGH (&prb_fields_journal_rvaheader[3]) +#define JOURNAL_RVAHEADER_UNIQUE_ID_LOW (&prb_fields_journal_rvaheader[4]) +#define JOURNAL_RVAHEADER_AGE (&prb_fields_journal_rvaheader[5]) + +// 'RvaHeader' field lengths +#define JOURNAL_RVAHEADER_DRIVER_START_LEN 10 +#define JOURNAL_RVAHEADER_OFFSET_LEN 5 +#define JOURNAL_RVAHEADER_POINTER_SIZE_LEN 5 +#define JOURNAL_RVAHEADER_UNIQUE_ID_HIGH_LEN 10 +#define JOURNAL_RVAHEADER_UNIQUE_ID_LOW_LEN 10 +#define JOURNAL_RVAHEADER_AGE_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_journal_badread[]; + +// 'BadRead' field descriptor pointers +#define JOURNAL_BADREAD_MEMORY_SPACE (&prb_fields_journal_badread[0]) +#define JOURNAL_BADREAD_OFFSET (&prb_fields_journal_badread[1]) +#define JOURNAL_BADREAD_MASK (&prb_fields_journal_badread[2]) +#define JOURNAL_BADREAD_VALUE (&prb_fields_journal_badread[3]) +#define JOURNAL_BADREAD_REASON (&prb_fields_journal_badread[4]) +#define JOURNAL_BADREAD_COMMON_OBSOLETE (&prb_fields_journal_badread[5]) + +// 'BadRead' field lengths +#define JOURNAL_BADREAD_MEMORY_SPACE_LEN 2 +#define JOURNAL_BADREAD_OFFSET_LEN 5 +#define JOURNAL_BADREAD_MASK_LEN 5 +#define JOURNAL_BADREAD_VALUE_LEN 5 +#define JOURNAL_BADREAD_REASON_LEN 2 +#define JOURNAL_BADREAD_COMMON_OBSOLETE_LEN 42 + +extern const PRB_FIELD_DESC prb_fields_journal_bugcheck[]; + +// 'BugCheck' field descriptor pointers +#define JOURNAL_BUGCHECK_CODE (&prb_fields_journal_bugcheck[0]) +#define JOURNAL_BUGCHECK_GPU_TAG (&prb_fields_journal_bugcheck[1]) +#define JOURNAL_BUGCHECK_TIME_STAMP (&prb_fields_journal_bugcheck[2]) +#define JOURNAL_BUGCHECK_COMMON_OBSOLETE (&prb_fields_journal_bugcheck[3]) + +// 'BugCheck' field lengths +#define JOURNAL_BUGCHECK_CODE_LEN 5 +#define JOURNAL_BUGCHECK_GPU_TAG_LEN 5 +#define JOURNAL_BUGCHECK_TIME_STAMP_LEN 10 +#define JOURNAL_BUGCHECK_COMMON_OBSOLETE_LEN 42 + +extern const PRB_SERVICE_DESC prb_services_journal[]; + +// Service descriptor pointers + +#endif // G_JOURNAL_PB_H__ diff --git a/src/nvidia/generated/g_kern_bus_nvoc.c b/src/nvidia/generated/g_kern_bus_nvoc.c new file mode 100644 index 000000000..527deb935 --- /dev/null +++ b/src/nvidia/generated/g_kern_bus_nvoc.c @@ -0,0 +1,613 @@ +#define NVOC_KERN_BUS_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kern_bus_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xd2ac57 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelBus; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelBus(KernelBus*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelBus(KernelBus*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelBus(KernelBus*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelBus(KernelBus*, RmHalspecOwner* ); +void __nvoc_dtor_KernelBus(KernelBus*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelBus; + +static const struct NVOC_RTTI __nvoc_rtti_KernelBus_KernelBus = { + /*pClassDef=*/ &__nvoc_class_def_KernelBus, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelBus, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelBus_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelBus, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelBus_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelBus, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelBus = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelBus_KernelBus, + &__nvoc_rtti_KernelBus_OBJENGSTATE, + &__nvoc_rtti_KernelBus_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelBus = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelBus), + /*classId=*/ classId(KernelBus), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelBus", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelBus, + /*pCastInfo=*/ &__nvoc_castinfo_KernelBus, + /*pExportInfo=*/ &__nvoc_export_info_KernelBus +}; + +static NV_STATUS __nvoc_thunk_KernelBus_engstateConstructEngine(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus, ENGDESCRIPTOR arg0) { + return kbusConstructEngine(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_KernelBus_engstateStatePreInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus) { + return kbusStatePreInitLocked(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelBus_engstateStateInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus) { + return kbusStateInitLocked(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelBus_engstateStatePostLoad(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus, NvU32 arg0) { + return kbusStatePostLoad(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_KernelBus_engstateStatePreUnload(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus, NvU32 arg0) { + return kbusStatePreUnload(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_KernelBus_engstateStateUnload(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus, NvU32 flags) { + return kbusStateUnload(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset), flags); +} + +static void __nvoc_thunk_KernelBus_engstateStateDestroy(OBJGPU *pGpu, struct OBJENGSTATE *pKernelBus) { + kbusStateDestroy(pGpu, (struct KernelBus *)(((unsigned char *)pKernelBus) - __nvoc_rtti_KernelBus_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusReconcileTunableState(POBJGPU pGpu, struct KernelBus *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStateLoad(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStatePreLoad(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStatePostUnload(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStateInitUnlocked(POBJGPU pGpu, struct KernelBus *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kbusInitMissing(POBJGPU pGpu, struct KernelBus *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusStatePreInitUnlocked(POBJGPU pGpu, struct KernelBus *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusGetTunableState(POBJGPU pGpu, struct KernelBus *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusCompareTunableState(POBJGPU pGpu, struct KernelBus *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kbusFreeTunableState(POBJGPU pGpu, struct KernelBus *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusAllocTunableState(POBJGPU pGpu, struct KernelBus *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbusSetTunableState(POBJGPU pGpu, struct KernelBus *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kbusIsPresent(POBJGPU pGpu, struct KernelBus *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBus_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelBus = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelBus(KernelBus *pThis) { + __nvoc_kbusDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelBus(KernelBus *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal field -- bFlaDummyPageEnabled + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bFlaDummyPageEnabled = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bFlaDummyPageEnabled = ((NvBool)(0 != 0)); + } + + // Hal field -- bP2pMailboxClientAllocatedBug3466714VoltaAndUp + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bP2pMailboxClientAllocatedBug3466714VoltaAndUp = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bP2pMailboxClientAllocatedBug3466714VoltaAndUp = ((NvBool)(0 != 0)); + } + + // Hal field -- bBug2751296LimitBar2PtSize + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bBug2751296LimitBar2PtSize = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bBug2751296LimitBar2PtSize = ((NvBool)(0 != 0)); + } + + // Hal field -- bAllowReflectedMappingAccess + if (0) + { + } + // default + else + { + pThis->bAllowReflectedMappingAccess = ((NvBool)(0 != 0)); + } + + // Hal field -- bBar2Tunnelled + if (0) + { + } + // default + else + { + pThis->bBar2Tunnelled = ((NvBool)(0 != 0)); + } + + // Hal field -- bBar2InternalOnly + if (0) + { + } + else if (0) + { + } + // default + else + { + pThis->bBar2InternalOnly = ((NvBool)(0 != 0)); + } + + // Hal field -- bReadCpuPointerToFlush + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bReadCpuPointerToFlush = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bReadCpuPointerToFlush = ((NvBool)(0 != 0)); + } + + // NVOC Property Hal field -- PDB_PROP_KBUS_IS_MISSING + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KBUS_IS_MISSING, ((NvBool)(0 != 0))); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelBus(KernelBus *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelBus_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelBus(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelBus_exit; // Success + +__nvoc_ctor_KernelBus_fail_OBJENGSTATE: +__nvoc_ctor_KernelBus_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelBus_1(KernelBus *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__kbusConstructEngine__ = &kbusConstructEngine_IMPL; + + // Hal function -- kbusStatePreInitLocked + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusStatePreInitLocked__ = &kbusStatePreInitLocked_GM107; + } + else if (0) + { + } + else if (0) + { + } + + pThis->__kbusStateInitLocked__ = &kbusStateInitLocked_IMPL; + + // Hal function -- kbusStatePostLoad + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusStatePostLoad__ = &kbusStatePostLoad_GM107; + } + else if (0) + { + } + + // Hal function -- kbusStatePreUnload + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusStatePreUnload__ = &kbusStatePreUnload_GM107; + } + else if (0) + { + } + + // Hal function -- kbusStateUnload + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusStateUnload__ = &kbusStateUnload_GM107; + } + else if (0) + { + } + } + + // Hal function -- kbusStateDestroy + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusStateDestroy__ = &kbusStateDestroy_GM107; + } + else if (0) + { + } + + // Hal function -- kbusGetUnusedPciePeerId + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusGetUnusedPciePeerId__ = &kbusGetUnusedPciePeerId_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kbusGetUnusedPciePeerId__ = &kbusGetUnusedPciePeerId_TU102; + } + + // Hal function -- kbusGetNvlinkP2PPeerId + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000060UL) )) /* ChipHal: TU102 | TU104 */ + { + pThis->__kbusGetNvlinkP2PPeerId__ = &kbusGetNvlinkP2PPeerId_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusGetNvlinkP2PPeerId__ = &kbusGetNvlinkP2PPeerId_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__kbusGetNvlinkP2PPeerId__ = &kbusGetNvlinkP2PPeerId_56cd7a; + } + + // Hal function -- kbusRemoveNvlinkPeerMapping + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusRemoveNvlinkPeerMapping__ = &kbusRemoveNvlinkPeerMapping_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__kbusRemoveNvlinkPeerMapping__ = &kbusRemoveNvlinkPeerMapping_56cd7a; + } + + // Hal function -- kbusUnreserveP2PPeerIds + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusUnreserveP2PPeerIds__ = &kbusUnreserveP2PPeerIds_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kbusUnreserveP2PPeerIds__ = &kbusUnreserveP2PPeerIds_46f6a7; + } + + // Hal function -- kbusAllocateFlaVaspace + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbusAllocateFlaVaspace__ = &kbusAllocateFlaVaspace_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusAllocateFlaVaspace__ = &kbusAllocateFlaVaspace_395e98; + } + + // Hal function -- kbusAllocateHostManagedFlaVaspace + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbusAllocateHostManagedFlaVaspace__ = &kbusAllocateHostManagedFlaVaspace_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusAllocateHostManagedFlaVaspace__ = &kbusAllocateHostManagedFlaVaspace_395e98; + } + + // Hal function -- kbusInitFla + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbusInitFla__ = &kbusInitFla_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusInitFla__ = &kbusInitFla_ac1694; + } + + // Hal function -- kbusGetFlaVaspace + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbusGetFlaVaspace__ = &kbusGetFlaVaspace_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusGetFlaVaspace__ = &kbusGetFlaVaspace_395e98; + } + + // Hal function -- kbusDestroyFla + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbusDestroyFla__ = &kbusDestroyFla_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusDestroyFla__ = &kbusDestroyFla_d44104; + } + + // Hal function -- kbusDestroyHostManagedFlaVaspace + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbusDestroyHostManagedFlaVaspace__ = &kbusDestroyHostManagedFlaVaspace_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusDestroyHostManagedFlaVaspace__ = &kbusDestroyHostManagedFlaVaspace_d44104; + } + + // Hal function -- kbusVerifyFlaRange + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbusVerifyFlaRange__ = &kbusVerifyFlaRange_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusVerifyFlaRange__ = &kbusVerifyFlaRange_bf6dfa; + } + + // Hal function -- kbusConstructFlaInstBlk + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbusConstructFlaInstBlk__ = &kbusConstructFlaInstBlk_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusConstructFlaInstBlk__ = &kbusConstructFlaInstBlk_395e98; + } + + // Hal function -- kbusDestructFlaInstBlk + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbusDestructFlaInstBlk__ = &kbusDestructFlaInstBlk_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusDestructFlaInstBlk__ = &kbusDestructFlaInstBlk_d44104; + } + + // Hal function -- kbusValidateFlaBaseAddress + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbusValidateFlaBaseAddress__ = &kbusValidateFlaBaseAddress_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusValidateFlaBaseAddress__ = &kbusValidateFlaBaseAddress_395e98; + } + + // Hal function -- kbusIsDirectMappingAllowed + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kbusIsDirectMappingAllowed__ = &kbusIsDirectMappingAllowed_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusIsDirectMappingAllowed__ = &kbusIsDirectMappingAllowed_GA100; + } + + // Hal function -- kbusUseDirectSysmemMap + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kbusUseDirectSysmemMap__ = &kbusUseDirectSysmemMap_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbusUseDirectSysmemMap__ = &kbusUseDirectSysmemMap_GA100; + } + else if (0) + { + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelBus_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_KernelBus_engstateStatePreInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelBus_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostLoad__ = &__nvoc_thunk_KernelBus_engstateStatePostLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreUnload__ = &__nvoc_thunk_KernelBus_engstateStatePreUnload; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelBus_engstateStateUnload; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelBus_engstateStateDestroy; + + pThis->__kbusReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbusReconcileTunableState; + + pThis->__kbusStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kbusStateLoad; + + pThis->__kbusStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kbusStatePreLoad; + + pThis->__kbusStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kbusStatePostUnload; + + pThis->__kbusStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kbusStateInitUnlocked; + + pThis->__kbusInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kbusInitMissing; + + pThis->__kbusStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kbusStatePreInitUnlocked; + + pThis->__kbusGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbusGetTunableState; + + pThis->__kbusCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbusCompareTunableState; + + pThis->__kbusFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbusFreeTunableState; + + pThis->__kbusAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbusAllocTunableState; + + pThis->__kbusSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbusSetTunableState; + + pThis->__kbusIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kbusIsPresent; +} + +void __nvoc_init_funcTable_KernelBus(KernelBus *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelBus_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelBus(KernelBus *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelBus = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelBus(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelBus(KernelBus **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelBus *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelBus)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelBus)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelBus); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelBus(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelBus(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelBus_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelBus_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelBus(KernelBus **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelBus(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kern_bus_nvoc.h b/src/nvidia/generated/g_kern_bus_nvoc.h new file mode 100644 index 000000000..a9126e231 --- /dev/null +++ b/src/nvidia/generated/g_kern_bus_nvoc.h @@ -0,0 +1,2209 @@ +#ifndef _G_KERN_BUS_NVOC_H_ +#define _G_KERN_BUS_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kern_bus_nvoc.h" + +#ifndef KERN_BUS_H +#define KERN_BUS_H + +#include "core/core.h" +#include "gpu/eng_state.h" +#include "gpu/gpu_halspec.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "containers/list.h" +#include "nvoc/utility.h" +#include "gpu/mmu/kern_gmmu.h" // VMMU_MAX_GFID +#include "mmu/mmu_walk.h" // MMU_WALK +#include "mmu/gmmu_fmt.h" // GMMU_FMT +#include "mem_mgr/vaspace.h" +#include "ctrl/ctrl0000/ctrl0000system.h" // NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS +#include "ctrl/ctrl2080/ctrl2080bus.h" + +#define MAX_PCI_BARS 8 + +// +// Virtual BAR2 mapping info is shared by tesla and fermi code +// +#if defined(NV_UNIX) && defined(NVCPU_X86_64) +// 64-bit Unix can support many more mappings than some other operating systems: +#define BUS_BAR2_MAX_MAPPINGS 200 +#else +#define BUS_BAR2_MAX_MAPPINGS 50 +#endif + +typedef enum +{ + BAR2_MODE_PHYSICAL = 0, + BAR2_MODE_VIRTUAL, +} BAR2_MODE; + +/*! + * @brief Helper macro to return NV_TRUE if the input BAR offset (i) is a 64-bit + * offset. Used by several functions in the bus HAL. + */ +#define IS_BAR_64(i) (((i) & 0x00000006) == 0x00000004) + +// Test buffer size used in the coherent link test +#define BUS_COHERENT_LINK_TEST_BUFFER_SIZE 0x100 + +// FLA flags +#define NV_BUS_INVALID_FLA_ADDR NV_U64_MAX +#define NV_BUS_FLA_VASPACE_ADDR_HI 47 // FLA is a 47b VAspace + + +// +// kbusUpdateRmAperture flags +// +// TLB invalidate +#define UPDATE_RM_APERTURE_FLAGS_INVALIDATE NVBIT(0) +// Indicates we're done with specified mapping +#define UPDATE_RM_APERTURE_FLAGS_DISCARD NVBIT(1) +// Indicates we're done with mapping and marking sparse in PTE +#define UPDATE_RM_APERTURE_FLAGS_SPARSIFY NVBIT(2) +// Indicates we're updating page tables for CPU invisible va range +#define UPDATE_RM_APERTURE_FLAGS_CPU_INVISIBLE_RANGE NVBIT(3) + +// +// kbusMapFbAperture flags +// +#define BUS_MAP_FB_FLAGS_NONE (0) +#define BUS_MAP_FB_FLAGS_MAP_RSVD_BAR1 NVBIT(0) +#define BUS_MAP_FB_FLAGS_DISABLE_ENCRYPTION NVBIT(1) +#define BUS_MAP_FB_FLAGS_MAP_DOWNWARDS NVBIT(2) // bug 624482 puts USERD mapping to the top of bar1 for Fermi +// NOTE: these two are exclusive (but not easy to change to DRF style now) +#define BUS_MAP_FB_FLAGS_READ_ONLY NVBIT(3) +#define BUS_MAP_FB_FLAGS_WRITE_ONLY NVBIT(4) +#define BUS_MAP_FB_FLAGS_MAP_UNICAST NVBIT(5) +#define BUS_MAP_FB_FLAGS_MAP_OFFSET_FIXED NVBIT(6) +#define BUS_MAP_FB_FLAGS_PRE_INIT NVBIT(7) + +#define BUS_MAP_FB_FLAGS_FERMI_INVALID ~(BUS_MAP_FB_FLAGS_MAP_DOWNWARDS | \ + BUS_MAP_FB_FLAGS_DISABLE_ENCRYPTION | \ + BUS_MAP_FB_FLAGS_READ_ONLY | \ + BUS_MAP_FB_FLAGS_WRITE_ONLY | \ + BUS_MAP_FB_FLAGS_MAP_UNICAST | \ + BUS_MAP_FB_FLAGS_MAP_OFFSET_FIXED | \ + BUS_MAP_FB_FLAGS_PRE_INIT) + +#define BUS_MAP_FB_FLAGS_NV5X_INVALID ~(BUS_MAP_FB_FLAGS_MAP_RSVD_BAR1 | BUS_MAP_FB_FLAGS_DISABLE_ENCRYPTION) + +// +// kbusFlush flags +// +#define BUS_FLUSH_VIDEO_MEMORY NVBIT(0) +#define BUS_FLUSH_SYSTEM_MEMORY NVBIT(1) +#define BUS_FLUSH_USE_PCIE_READ NVBIT(2) + +// +// Peer to peer (P2P) defines +// +#define P2P_MAX_NUM_PEERS 8 + +#define BUS_INVALID_PEER 0xffffffff + +#define PCIE_P2P_WRITE_MAILBOX_SIZE ((NvU64)64*1024) // since Fermi+ +#define PCIE_P2P_INVALID_WRITE_MAILBOX_ADDR ~((NvU64)0) + +// +// BARs defines +// +#define BUS_BAR_0 0 +#define BUS_BAR_1 1 +#define BUS_BAR_2 2 +#define BUS_BAR_3 3 +#define BUS_NUM_BARS 4 + +#define BUS_BAR2_APERTURE_MB 32 +#define BUS_BAR2_RM_APERTURE_MB 16 + +// Inst Block +#define GF100_BUS_INSTANCEBLOCK_SIZE 4096 + +// @ref busMigrateBarMapping_GV100 to see how FB region is organized +#define COHERENT_CPU_MAPPING_WPR 0x0 +#define COHERENT_CPU_MAPPING_REGION_1 0x1 +#define COHERENT_CPU_MAPPING_REGION_2 0x2 +#define COHERENT_CPU_MAPPING_TOTAL_REGIONS 0x3 // Should change it when num of regions changed + +typedef struct +{ + NvU64 vAddr; // Bar2 addr returned by eheap + NvU8 *pRtnPtr; // Bar2 addr + lin addr of bar2 base + + MEMORY_DESCRIPTOR *pMemDesc; // memory descriptor for this mapping + MEM_DESC_DESTROY_CALLBACK memDescCallback; + + ListNode node; +} VirtualBar2MapEntry; + +typedef struct +{ + NvU32 refCount; + NvU32 remotePeerId; + NvBool bReserved; + PMEMORY_DESCRIPTOR pRemoteP2PDomMemDesc; + PMEMORY_DESCRIPTOR pRemoteWMBoxMemDesc; +} KBUS_PCIE_PEER; + +MAKE_INTRUSIVE_LIST(VirtualBar2MapList, VirtualBar2MapEntry, node); + +#ifdef NVOC_KERN_BUS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct __nvoc_inner_struc_KernelBus_1__ { + RmPhysAddr physAddr; + NvU64 apertureLength; + struct OBJVASPACE *pVAS; + NvU64 instBlockBase; + MEMORY_DESCRIPTOR *pInstBlkMemDesc; +}; + +struct __nvoc_inner_struc_KernelBus_2__ { + NvU32 busBar1PeerRefcount[32]; + NvU32 busBar1HostRefcount[32]; +}; + +struct __nvoc_inner_struc_KernelBus_3__ { + RmPhysAddr physAddr; + NvU64 bar2OffsetInBar0Window; + NvU64 cpuVisibleBase; + NvU64 cpuVisibleLimit; + NvU64 cpuInvisibleBase; + NvU64 cpuInvisibleLimit; + NvU64 rmApertureBase; + NvU64 rmApertureLimit; + NvU64 vaLimit; + NvU64 pdeBase; + MEMORY_DESCRIPTOR *pPDEMemDesc; + NvU64 pteBase; + NvU64 instBlockBase; + MEMORY_DESCRIPTOR *pInstBlkMemDesc; + NvU64 pdeBaseForBootstrap; + MEMORY_DESCRIPTOR *pPDEMemDescForBootstrap; + NvU64 pteBaseForBootstrap; + NvBool bBootstrap; + NvBool bMigrating; + MMU_WALK *pWalk; + MEMORY_DESCRIPTOR *pWalkStagingBuffer; + const struct GMMU_FMT *pFmt; + NvU32 numPageDirs; + NvU32 pageDirSize; + NvU32 numPageTbls; + NvU32 pageTblSize; + NvU32 pageDirInit; + NvU32 pageTblInit; +}; + +struct __nvoc_inner_struc_KernelBus_4__ { + struct OBJEHEAP *pVASpaceHeap; + struct OBJEHEAP *pVASpaceHiddenHeap; + VirtualBar2MapEntry *pMapListMemory; + VirtualBar2MapList freeMapList; + VirtualBar2MapList cachedMapList; + VirtualBar2MapList usedMapList; + MEMORY_DESCRIPTOR *pPageLevelsMemDesc; + NvU8 *pPageLevels; + MEMORY_DESCRIPTOR *pPageLevelsMemDescForBootstrap; + NvU8 *pPageLevelsForBootstrap; + MEMORY_DESCRIPTOR *pPTEMemDesc; + NvU8 *pCpuMapping; + NvU32 vAlignment; + NvU32 flags; + MEMORY_DESCRIPTOR *pPDB; + NvU32 mapCount; + NvU32 cacheHit; + NvU32 evictions; +}; + +struct __nvoc_inner_struc_KernelBus_5__ { + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubDevice; + NvHandle hFlaVASpace; + struct OBJVASPACE *pFlaVAS; + PMEMORY_DESCRIPTOR pInstblkMemDesc; + NvBool bFlaAllocated; + NvBool bFlaBind; + NvBool bFlaRangeRegistered; + NvU64 base; + NvU64 size; +}; + +struct __nvoc_inner_struc_KernelBus_6__ { + NvBool bCoherentCpuMapping; + NvU32 nrMapping; + NvP64 pCpuMapping[3]; + NvU64 size[3]; + NvU64 refcnt[3]; + RmPhysAddr physAddr[3]; +}; + +struct __nvoc_inner_struc_KernelBus_7__ { + NvBool bNvlinkPeerIdsReserved; + NvU32 busNvlinkPeerNumberMask[32]; + NvU32 busNvlinkMappingRefcountPerGpu[32]; + NvU32 busNvlinkMappingRefcountPerPeerId[8]; + NvU32 busNvlinkMappingRefcountPerPeerIdSpa[8]; +}; + +struct __nvoc_inner_struc_KernelBus_8__ { + NvU32 peerNumberMask[32]; + KBUS_PCIE_PEER busPeer[8]; + NvU64 writeMailboxBar1Addr; + NvU64 writeMailboxTotalSize; +}; + +struct __nvoc_inner_struc_KernelBus_9__ { + NvU32 busC2CPeerNumberMask[32]; + NvU32 busC2CMappingRefcountPerPeerId[8]; +}; + + +struct KernelBus { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelBus *__nvoc_pbase_KernelBus; + NV_STATUS (*__kbusConstructEngine__)(OBJGPU *, struct KernelBus *, ENGDESCRIPTOR); + NV_STATUS (*__kbusStatePreInitLocked__)(OBJGPU *, struct KernelBus *); + NV_STATUS (*__kbusStateInitLocked__)(OBJGPU *, struct KernelBus *); + NV_STATUS (*__kbusStatePostLoad__)(OBJGPU *, struct KernelBus *, NvU32); + NV_STATUS (*__kbusStatePreUnload__)(OBJGPU *, struct KernelBus *, NvU32); + NV_STATUS (*__kbusStateUnload__)(OBJGPU *, struct KernelBus *, NvU32); + void (*__kbusStateDestroy__)(OBJGPU *, struct KernelBus *); + NvU32 (*__kbusGetUnusedPciePeerId__)(OBJGPU *, struct KernelBus *); + NV_STATUS (*__kbusGetNvlinkP2PPeerId__)(OBJGPU *, struct KernelBus *, OBJGPU *, struct KernelBus *, NvU32 *); + NV_STATUS (*__kbusRemoveNvlinkPeerMapping__)(OBJGPU *, struct KernelBus *, OBJGPU *, NvU32, NvU32); + NV_STATUS (*__kbusUnreserveP2PPeerIds__)(OBJGPU *, struct KernelBus *, NvU32); + NV_STATUS (*__kbusAllocateFlaVaspace__)(OBJGPU *, struct KernelBus *, NvU64, NvU64); + NV_STATUS (*__kbusAllocateHostManagedFlaVaspace__)(OBJGPU *, struct KernelBus *, NvHandle, NvHandle, NvHandle, NvHandle, NvU64, NvU64, NvU32); + NV_STATUS (*__kbusInitFla__)(OBJGPU *, struct KernelBus *, NvU64, NvU64); + NV_STATUS (*__kbusGetFlaVaspace__)(OBJGPU *, struct KernelBus *, struct OBJVASPACE **); + void (*__kbusDestroyFla__)(OBJGPU *, struct KernelBus *); + void (*__kbusDestroyHostManagedFlaVaspace__)(OBJGPU *, struct KernelBus *, NvU32); + NvBool (*__kbusVerifyFlaRange__)(OBJGPU *, struct KernelBus *, NvU64, NvU64); + NV_STATUS (*__kbusConstructFlaInstBlk__)(OBJGPU *, struct KernelBus *, NvU32); + void (*__kbusDestructFlaInstBlk__)(OBJGPU *, struct KernelBus *); + NV_STATUS (*__kbusValidateFlaBaseAddress__)(OBJGPU *, struct KernelBus *, NvU64); + NV_STATUS (*__kbusIsDirectMappingAllowed__)(OBJGPU *, struct KernelBus *, PMEMORY_DESCRIPTOR, NvU32, NvBool *); + NV_STATUS (*__kbusUseDirectSysmemMap__)(OBJGPU *, struct KernelBus *, MEMORY_DESCRIPTOR *, NvBool *); + NV_STATUS (*__kbusReconcileTunableState__)(POBJGPU, struct KernelBus *, void *); + NV_STATUS (*__kbusStateLoad__)(POBJGPU, struct KernelBus *, NvU32); + NV_STATUS (*__kbusStatePreLoad__)(POBJGPU, struct KernelBus *, NvU32); + NV_STATUS (*__kbusStatePostUnload__)(POBJGPU, struct KernelBus *, NvU32); + NV_STATUS (*__kbusStateInitUnlocked__)(POBJGPU, struct KernelBus *); + void (*__kbusInitMissing__)(POBJGPU, struct KernelBus *); + NV_STATUS (*__kbusStatePreInitUnlocked__)(POBJGPU, struct KernelBus *); + NV_STATUS (*__kbusGetTunableState__)(POBJGPU, struct KernelBus *, void *); + NV_STATUS (*__kbusCompareTunableState__)(POBJGPU, struct KernelBus *, void *, void *); + void (*__kbusFreeTunableState__)(POBJGPU, struct KernelBus *, void *); + NV_STATUS (*__kbusAllocTunableState__)(POBJGPU, struct KernelBus *, void **); + NV_STATUS (*__kbusSetTunableState__)(POBJGPU, struct KernelBus *, void *); + NvBool (*__kbusIsPresent__)(POBJGPU, struct KernelBus *); + NvU32 totalPciBars; + RmPhysAddr pciBars[8]; + NvU64 pciBarSizes[8]; + NvBool bPciBarSizesValid; + NvU64 cachedBar0WindowVidOffset; + NvU8 *pWriteCombinedBar0Window; + NvU8 *pUncachedBar0Window; + NvU8 *pDefaultBar0Pointer; + NvU64 physicalBar0WindowSize; + struct __nvoc_inner_struc_KernelBus_1__ bar1[64]; + struct __nvoc_inner_struc_KernelBus_2__ bar1PeerInfo; + struct __nvoc_inner_struc_KernelBus_3__ bar2[64]; + struct __nvoc_inner_struc_KernelBus_4__ virtualBar2[64]; + struct __nvoc_inner_struc_KernelBus_5__ flaInfo; + NvBool bFlaSupported; + NvBool bFlaEnabled; + NvBool bFlaDummyPageEnabled; + struct __nvoc_inner_struc_KernelBus_6__ coherentCpuMapping; + NvU64 coherentLinkTestBufferBase; + struct __nvoc_inner_struc_KernelBus_7__ p2p; + struct __nvoc_inner_struc_KernelBus_8__ p2pPcie; + struct __nvoc_inner_struc_KernelBus_9__ c2cPeerInfo; + NvU32 numPeers; + NvBool p2pMapSpecifyId; + NvU32 p2pMapPeerId; + NvBool bP2pInitialized; + NvBool bP2pMailboxClientAllocated; + NvBool bP2pMailboxClientAllocatedBug3466714VoltaAndUp; + NvBool bBar1P2pCapable; + NvBool bBar1Force64KBMapping; + NvBool bBar1PhysicalModeEnabled; + NvBool bIsBar2Initialized; + NvBool bBar2SysmemAccessEnabled; + NvBool bBar2TestSkipped; + NvBool bUsePhysicalBar2InitPagetable; + NvBool bPreserveBar1ConsoleEnabled; + NvBool bBar1ConsolePreserved; + NvBool bBug2751296LimitBar2PtSize; + NvBool bAllowReflectedMappingAccess; + NvBool bBar2Tunnelled; + NvBool bBar2InternalOnly; + NvBool bFbFlushDisabled; + PMEMORY_DESCRIPTOR pFlushMemDesc; + NvU8 *pReadToFlush; + NvBool bReadCpuPointerToFlush; + NvU32 PTEBAR2Aperture; + NvU32 PTEBAR2Attr; + NvU32 PDEBAR2Aperture; + NvU32 PDEBAR2Attr; + NvU32 InstBlkAperture; + NvU32 InstBlkAttr; +}; + +#ifndef __NVOC_CLASS_KernelBus_TYPEDEF__ +#define __NVOC_CLASS_KernelBus_TYPEDEF__ +typedef struct KernelBus KernelBus; +#endif /* __NVOC_CLASS_KernelBus_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelBus +#define __nvoc_class_id_KernelBus 0xd2ac57 +#endif /* __nvoc_class_id_KernelBus */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelBus; + +#define __staticCast_KernelBus(pThis) \ + ((pThis)->__nvoc_pbase_KernelBus) + +#ifdef __nvoc_kern_bus_h_disabled +#define __dynamicCast_KernelBus(pThis) ((KernelBus*)NULL) +#else //__nvoc_kern_bus_h_disabled +#define __dynamicCast_KernelBus(pThis) \ + ((KernelBus*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelBus))) +#endif //__nvoc_kern_bus_h_disabled + +#define PDB_PROP_KBUS_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KBUS_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelBus(KernelBus**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelBus(KernelBus**, Dynamic*, NvU32); +#define __objCreate_KernelBus(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelBus((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kbusConstructEngine(pGpu, pKernelBus, arg0) kbusConstructEngine_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusStatePreInitLocked(pGpu, pKernelBus) kbusStatePreInitLocked_DISPATCH(pGpu, pKernelBus) +#define kbusStatePreInitLocked_HAL(pGpu, pKernelBus) kbusStatePreInitLocked_DISPATCH(pGpu, pKernelBus) +#define kbusStateInitLocked(pGpu, pKernelBus) kbusStateInitLocked_DISPATCH(pGpu, pKernelBus) +#define kbusStatePostLoad(pGpu, pKernelBus, arg0) kbusStatePostLoad_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusStatePostLoad_HAL(pGpu, pKernelBus, arg0) kbusStatePostLoad_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusStatePreUnload(pGpu, pKernelBus, arg0) kbusStatePreUnload_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusStatePreUnload_HAL(pGpu, pKernelBus, arg0) kbusStatePreUnload_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusStateUnload(pGpu, pKernelBus, flags) kbusStateUnload_DISPATCH(pGpu, pKernelBus, flags) +#define kbusStateUnload_HAL(pGpu, pKernelBus, flags) kbusStateUnload_DISPATCH(pGpu, pKernelBus, flags) +#define kbusStateDestroy(pGpu, pKernelBus) kbusStateDestroy_DISPATCH(pGpu, pKernelBus) +#define kbusStateDestroy_HAL(pGpu, pKernelBus) kbusStateDestroy_DISPATCH(pGpu, pKernelBus) +#define kbusGetUnusedPciePeerId(pGpu, pKernelBus) kbusGetUnusedPciePeerId_DISPATCH(pGpu, pKernelBus) +#define kbusGetUnusedPciePeerId_HAL(pGpu, pKernelBus) kbusGetUnusedPciePeerId_DISPATCH(pGpu, pKernelBus) +#define kbusGetNvlinkP2PPeerId(pGpu0, pKernelBus0, pGpu1, pKernelBus1, nvlinkPeer) kbusGetNvlinkP2PPeerId_DISPATCH(pGpu0, pKernelBus0, pGpu1, pKernelBus1, nvlinkPeer) +#define kbusGetNvlinkP2PPeerId_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, nvlinkPeer) kbusGetNvlinkP2PPeerId_DISPATCH(pGpu0, pKernelBus0, pGpu1, pKernelBus1, nvlinkPeer) +#define kbusRemoveNvlinkPeerMapping(pGpu, pKernelBus, pGpu1, arg0, attributes) kbusRemoveNvlinkPeerMapping_DISPATCH(pGpu, pKernelBus, pGpu1, arg0, attributes) +#define kbusRemoveNvlinkPeerMapping_HAL(pGpu, pKernelBus, pGpu1, arg0, attributes) kbusRemoveNvlinkPeerMapping_DISPATCH(pGpu, pKernelBus, pGpu1, arg0, attributes) +#define kbusUnreserveP2PPeerIds(pGpu, pKernelBus, peerMask) kbusUnreserveP2PPeerIds_DISPATCH(pGpu, pKernelBus, peerMask) +#define kbusUnreserveP2PPeerIds_HAL(pGpu, pKernelBus, peerMask) kbusUnreserveP2PPeerIds_DISPATCH(pGpu, pKernelBus, peerMask) +#define kbusAllocateFlaVaspace(pGpu, pKernelBus, arg0, arg1) kbusAllocateFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1) +#define kbusAllocateFlaVaspace_HAL(pGpu, pKernelBus, arg0, arg1) kbusAllocateFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1) +#define kbusAllocateHostManagedFlaVaspace(pGpu, pKernelBus, arg0, arg1, arg2, arg3, arg4, arg5, arg6) kbusAllocateHostManagedFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1, arg2, arg3, arg4, arg5, arg6) +#define kbusAllocateHostManagedFlaVaspace_HAL(pGpu, pKernelBus, arg0, arg1, arg2, arg3, arg4, arg5, arg6) kbusAllocateHostManagedFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0, arg1, arg2, arg3, arg4, arg5, arg6) +#define kbusInitFla(pGpu, pKernelBus, base, size) kbusInitFla_DISPATCH(pGpu, pKernelBus, base, size) +#define kbusInitFla_HAL(pGpu, pKernelBus, base, size) kbusInitFla_DISPATCH(pGpu, pKernelBus, base, size) +#define kbusGetFlaVaspace(pGpu, pKernelBus, arg0) kbusGetFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusGetFlaVaspace_HAL(pGpu, pKernelBus, arg0) kbusGetFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusDestroyFla(pGpu, pKernelBus) kbusDestroyFla_DISPATCH(pGpu, pKernelBus) +#define kbusDestroyFla_HAL(pGpu, pKernelBus) kbusDestroyFla_DISPATCH(pGpu, pKernelBus) +#define kbusDestroyHostManagedFlaVaspace(pGpu, pKernelBus, arg0) kbusDestroyHostManagedFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusDestroyHostManagedFlaVaspace_HAL(pGpu, pKernelBus, arg0) kbusDestroyHostManagedFlaVaspace_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusVerifyFlaRange(pGpu, pKernelBus, arg0, arg1) kbusVerifyFlaRange_DISPATCH(pGpu, pKernelBus, arg0, arg1) +#define kbusVerifyFlaRange_HAL(pGpu, pKernelBus, arg0, arg1) kbusVerifyFlaRange_DISPATCH(pGpu, pKernelBus, arg0, arg1) +#define kbusConstructFlaInstBlk(pGpu, pKernelBus, arg0) kbusConstructFlaInstBlk_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusConstructFlaInstBlk_HAL(pGpu, pKernelBus, arg0) kbusConstructFlaInstBlk_DISPATCH(pGpu, pKernelBus, arg0) +#define kbusDestructFlaInstBlk(pGpu, pKernelBus) kbusDestructFlaInstBlk_DISPATCH(pGpu, pKernelBus) +#define kbusDestructFlaInstBlk_HAL(pGpu, pKernelBus) kbusDestructFlaInstBlk_DISPATCH(pGpu, pKernelBus) +#define kbusValidateFlaBaseAddress(pGpu, pKernelBus, flaBaseAddr) kbusValidateFlaBaseAddress_DISPATCH(pGpu, pKernelBus, flaBaseAddr) +#define kbusValidateFlaBaseAddress_HAL(pGpu, pKernelBus, flaBaseAddr) kbusValidateFlaBaseAddress_DISPATCH(pGpu, pKernelBus, flaBaseAddr) +#define kbusIsDirectMappingAllowed(pGpu, pKernelBus, arg0, arg1, arg2) kbusIsDirectMappingAllowed_DISPATCH(pGpu, pKernelBus, arg0, arg1, arg2) +#define kbusIsDirectMappingAllowed_HAL(pGpu, pKernelBus, arg0, arg1, arg2) kbusIsDirectMappingAllowed_DISPATCH(pGpu, pKernelBus, arg0, arg1, arg2) +#define kbusUseDirectSysmemMap(pGpu, pKernelBus, arg0, arg1) kbusUseDirectSysmemMap_DISPATCH(pGpu, pKernelBus, arg0, arg1) +#define kbusUseDirectSysmemMap_HAL(pGpu, pKernelBus, arg0, arg1) kbusUseDirectSysmemMap_DISPATCH(pGpu, pKernelBus, arg0, arg1) +#define kbusReconcileTunableState(pGpu, pEngstate, pTunableState) kbusReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kbusStateLoad(pGpu, pEngstate, arg0) kbusStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kbusStatePreLoad(pGpu, pEngstate, arg0) kbusStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kbusStatePostUnload(pGpu, pEngstate, arg0) kbusStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kbusStateInitUnlocked(pGpu, pEngstate) kbusStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kbusInitMissing(pGpu, pEngstate) kbusInitMissing_DISPATCH(pGpu, pEngstate) +#define kbusStatePreInitUnlocked(pGpu, pEngstate) kbusStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kbusGetTunableState(pGpu, pEngstate, pTunableState) kbusGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kbusCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kbusCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kbusFreeTunableState(pGpu, pEngstate, pTunableState) kbusFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kbusAllocTunableState(pGpu, pEngstate, ppTunableState) kbusAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kbusSetTunableState(pGpu, pEngstate, pTunableState) kbusSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kbusIsPresent(pGpu, pEngstate) kbusIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kbusInitBarsSize_KERNEL(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusInitBarsSize(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusInitBarsSize(pGpu, pKernelBus) kbusInitBarsSize_KERNEL(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusInitBarsSize_HAL(pGpu, pKernelBus) kbusInitBarsSize(pGpu, pKernelBus) + +NV_STATUS kbusConstructHal_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusConstructHal(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusConstructHal(pGpu, pKernelBus) kbusConstructHal_GM107(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusConstructHal_HAL(pGpu, pKernelBus) kbusConstructHal(pGpu, pKernelBus) + +NV_STATUS kbusStateInitLockedKernel_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusStateInitLockedKernel(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusStateInitLockedKernel(pGpu, pKernelBus) kbusStateInitLockedKernel_GM107(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusStateInitLockedKernel_HAL(pGpu, pKernelBus) kbusStateInitLockedKernel(pGpu, pKernelBus) + +static inline NV_STATUS kbusStateInitLockedPhysical_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return NV_OK; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusStateInitLockedPhysical(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusStateInitLockedPhysical(pGpu, pKernelBus) kbusStateInitLockedPhysical_56cd7a(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusStateInitLockedPhysical_HAL(pGpu, pKernelBus) kbusStateInitLockedPhysical(pGpu, pKernelBus) + +NvU8 *kbusMapBar2Aperture_VBAR2(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc, NvU32 transfer_flags); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU8 *kbusMapBar2Aperture(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc, NvU32 transfer_flags) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NULL; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusMapBar2Aperture(pGpu, pKernelBus, pMemDesc, transfer_flags) kbusMapBar2Aperture_VBAR2(pGpu, pKernelBus, pMemDesc, transfer_flags) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusMapBar2Aperture_HAL(pGpu, pKernelBus, pMemDesc, transfer_flags) kbusMapBar2Aperture(pGpu, pKernelBus, pMemDesc, transfer_flags) + +NvU8 *kbusValidateBar2ApertureMapping_VBAR2(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc, NvU8 *p); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU8 *kbusValidateBar2ApertureMapping(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc, NvU8 *p) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NULL; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusValidateBar2ApertureMapping(pGpu, pKernelBus, pMemDesc, p) kbusValidateBar2ApertureMapping_VBAR2(pGpu, pKernelBus, pMemDesc, p) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusValidateBar2ApertureMapping_HAL(pGpu, pKernelBus, pMemDesc, p) kbusValidateBar2ApertureMapping(pGpu, pKernelBus, pMemDesc, p) + +void kbusUnmapBar2ApertureWithFlags_VBAR2(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc, NvU8 **pCpuPtr, NvU32 flags); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusUnmapBar2ApertureWithFlags(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc, NvU8 **pCpuPtr, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusUnmapBar2ApertureWithFlags(pGpu, pKernelBus, pMemDesc, pCpuPtr, flags) kbusUnmapBar2ApertureWithFlags_VBAR2(pGpu, pKernelBus, pMemDesc, pCpuPtr, flags) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusUnmapBar2ApertureWithFlags_HAL(pGpu, pKernelBus, pMemDesc, pCpuPtr, flags) kbusUnmapBar2ApertureWithFlags(pGpu, pKernelBus, pMemDesc, pCpuPtr, flags) + +NV_STATUS kbusUpdateRmAperture_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0, NvU64 arg1, NvU64 arg2, NvU32 arg3); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusUpdateRmAperture(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0, NvU64 arg1, NvU64 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusUpdateRmAperture(pGpu, pKernelBus, arg0, arg1, arg2, arg3) kbusUpdateRmAperture_GM107(pGpu, pKernelBus, arg0, arg1, arg2, arg3) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusUpdateRmAperture_HAL(pGpu, pKernelBus, arg0, arg1, arg2, arg3) kbusUpdateRmAperture(pGpu, pKernelBus, arg0, arg1, arg2, arg3) + +NV_STATUS kbusSetupBar2GpuVaSpace_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSetupBar2GpuVaSpace(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetupBar2GpuVaSpace(pGpu, pKernelBus, gfid) kbusSetupBar2GpuVaSpace_GM107(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetupBar2GpuVaSpace_HAL(pGpu, pKernelBus, gfid) kbusSetupBar2GpuVaSpace(pGpu, pKernelBus, gfid) + +NV_STATUS kbusTeardownBar2GpuVaSpace_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusTeardownBar2GpuVaSpace(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusTeardownBar2GpuVaSpace(pGpu, pKernelBus, gfid) kbusTeardownBar2GpuVaSpace_GM107(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusTeardownBar2GpuVaSpace_HAL(pGpu, pKernelBus, gfid) kbusTeardownBar2GpuVaSpace(pGpu, pKernelBus, gfid) + +NvU32 kbusGetSizeOfBar2PageTables_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU32 kbusGetSizeOfBar2PageTables(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetSizeOfBar2PageTables(pGpu, pKernelBus) kbusGetSizeOfBar2PageTables_GM107(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetSizeOfBar2PageTables_HAL(pGpu, pKernelBus) kbusGetSizeOfBar2PageTables(pGpu, pKernelBus) + +NvU32 kbusGetSizeOfBar2PageDirs_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU32 kbusGetSizeOfBar2PageDirs(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetSizeOfBar2PageDirs(pGpu, pKernelBus) kbusGetSizeOfBar2PageDirs_GM107(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetSizeOfBar2PageDirs_HAL(pGpu, pKernelBus) kbusGetSizeOfBar2PageDirs(pGpu, pKernelBus) + +NvU64 kbusGetVaLimitForBar2_KERNEL(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU64 kbusGetVaLimitForBar2(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetVaLimitForBar2(pGpu, pKernelBus) kbusGetVaLimitForBar2_KERNEL(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetVaLimitForBar2_HAL(pGpu, pKernelBus) kbusGetVaLimitForBar2(pGpu, pKernelBus) + +NV_STATUS kbusCommitBar2_KERNEL(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 flags); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusCommitBar2(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusCommitBar2(pGpu, pKernelBus, flags) kbusCommitBar2_KERNEL(pGpu, pKernelBus, flags) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusCommitBar2_HAL(pGpu, pKernelBus, flags) kbusCommitBar2(pGpu, pKernelBus, flags) + +MMU_WALK *kbusGetBar2GmmuWalker_GM107(struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline MMU_WALK *kbusGetBar2GmmuWalker(struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NULL; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetBar2GmmuWalker(pKernelBus) kbusGetBar2GmmuWalker_GM107(pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetBar2GmmuWalker_HAL(pKernelBus) kbusGetBar2GmmuWalker(pKernelBus) + +const struct GMMU_FMT *kbusGetBar2GmmuFmt_GM107(struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline const struct GMMU_FMT *kbusGetBar2GmmuFmt(struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NULL; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetBar2GmmuFmt(pKernelBus) kbusGetBar2GmmuFmt_GM107(pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetBar2GmmuFmt_HAL(pKernelBus) kbusGetBar2GmmuFmt(pKernelBus) + +NV_STATUS kbusPatchBar1Pdb_GSPCLIENT(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusPatchBar1Pdb(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusPatchBar1Pdb(pGpu, pKernelBus) kbusPatchBar1Pdb_GSPCLIENT(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusPatchBar1Pdb_HAL(pGpu, pKernelBus) kbusPatchBar1Pdb(pGpu, pKernelBus) + +NV_STATUS kbusPatchBar2Pdb_GSPCLIENT(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusPatchBar2Pdb(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusPatchBar2Pdb(pGpu, pKernelBus) kbusPatchBar2Pdb_GSPCLIENT(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusPatchBar2Pdb_HAL(pGpu, pKernelBus) kbusPatchBar2Pdb(pGpu, pKernelBus) + +NV_STATUS kbusSetBarsApertureSize_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSetBarsApertureSize(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetBarsApertureSize(pGpu, pKernelBus, gfid) kbusSetBarsApertureSize_GM107(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetBarsApertureSize_HAL(pGpu, pKernelBus, gfid) kbusSetBarsApertureSize(pGpu, pKernelBus, gfid) + +NV_STATUS kbusConstructVirtualBar2_VBAR2(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusConstructVirtualBar2(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusConstructVirtualBar2(pGpu, pKernelBus, gfid) kbusConstructVirtualBar2_VBAR2(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusConstructVirtualBar2_HAL(pGpu, pKernelBus, gfid) kbusConstructVirtualBar2(pGpu, pKernelBus, gfid) + +void kbusDestructVirtualBar2_VBAR2(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvBool shutdown, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusDestructVirtualBar2(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvBool shutdown, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusDestructVirtualBar2(pGpu, pKernelBus, shutdown, gfid) kbusDestructVirtualBar2_VBAR2(pGpu, pKernelBus, shutdown, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusDestructVirtualBar2_HAL(pGpu, pKernelBus, shutdown, gfid) kbusDestructVirtualBar2(pGpu, pKernelBus, shutdown, gfid) + +void kbusFlushVirtualBar2_VBAR2(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvBool shutdown, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusFlushVirtualBar2(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvBool shutdown, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusFlushVirtualBar2(pGpu, pKernelBus, shutdown, gfid) kbusFlushVirtualBar2_VBAR2(pGpu, pKernelBus, shutdown, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusFlushVirtualBar2_HAL(pGpu, pKernelBus, shutdown, gfid) kbusFlushVirtualBar2(pGpu, pKernelBus, shutdown, gfid) + +NV_STATUS kbusInitVirtualBar2_VBAR2(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusInitVirtualBar2(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusInitVirtualBar2(pGpu, pKernelBus) kbusInitVirtualBar2_VBAR2(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusInitVirtualBar2_HAL(pGpu, pKernelBus) kbusInitVirtualBar2(pGpu, pKernelBus) + +NV_STATUS kbusPreInitVirtualBar2_VBAR2(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusPreInitVirtualBar2(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusPreInitVirtualBar2(pGpu, pKernelBus) kbusPreInitVirtualBar2_VBAR2(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusPreInitVirtualBar2_HAL(pGpu, pKernelBus) kbusPreInitVirtualBar2(pGpu, pKernelBus) + +NV_STATUS kbusConstructVirtualBar2CpuVisibleHeap_VBAR2(struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusConstructVirtualBar2CpuVisibleHeap(struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusConstructVirtualBar2CpuVisibleHeap(pKernelBus, gfid) kbusConstructVirtualBar2CpuVisibleHeap_VBAR2(pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusConstructVirtualBar2CpuVisibleHeap_HAL(pKernelBus, gfid) kbusConstructVirtualBar2CpuVisibleHeap(pKernelBus, gfid) + +static inline NV_STATUS kbusConstructVirtualBar2CpuInvisibleHeap_56cd7a(struct KernelBus *pKernelBus, NvU32 gfid) { + return NV_OK; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusConstructVirtualBar2CpuInvisibleHeap(struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusConstructVirtualBar2CpuInvisibleHeap(pKernelBus, gfid) kbusConstructVirtualBar2CpuInvisibleHeap_56cd7a(pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusConstructVirtualBar2CpuInvisibleHeap_HAL(pKernelBus, gfid) kbusConstructVirtualBar2CpuInvisibleHeap(pKernelBus, gfid) + +static inline NV_STATUS kbusMapCpuInvisibleBar2Aperture_46f6a7(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc, NvU64 *pVaddr, NvU64 allocSize, NvU32 allocFlags, NvU32 gfid) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusMapCpuInvisibleBar2Aperture(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc, NvU64 *pVaddr, NvU64 allocSize, NvU32 allocFlags, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusMapCpuInvisibleBar2Aperture(pGpu, pKernelBus, pMemDesc, pVaddr, allocSize, allocFlags, gfid) kbusMapCpuInvisibleBar2Aperture_46f6a7(pGpu, pKernelBus, pMemDesc, pVaddr, allocSize, allocFlags, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusMapCpuInvisibleBar2Aperture_HAL(pGpu, pKernelBus, pMemDesc, pVaddr, allocSize, allocFlags, gfid) kbusMapCpuInvisibleBar2Aperture(pGpu, pKernelBus, pMemDesc, pVaddr, allocSize, allocFlags, gfid) + +static inline void kbusUnmapCpuInvisibleBar2Aperture_b3696a(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc, NvU64 vAddr, NvU32 gfid) { + return; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusUnmapCpuInvisibleBar2Aperture(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc, NvU64 vAddr, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusUnmapCpuInvisibleBar2Aperture(pGpu, pKernelBus, pMemDesc, vAddr, gfid) kbusUnmapCpuInvisibleBar2Aperture_b3696a(pGpu, pKernelBus, pMemDesc, vAddr, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusUnmapCpuInvisibleBar2Aperture_HAL(pGpu, pKernelBus, pMemDesc, vAddr, gfid) kbusUnmapCpuInvisibleBar2Aperture(pGpu, pKernelBus, pMemDesc, vAddr, gfid) + +NV_STATUS kbusSetupCpuPointerForBusFlush_GV100(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSetupCpuPointerForBusFlush(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetupCpuPointerForBusFlush(pGpu, pKernelBus) kbusSetupCpuPointerForBusFlush_GV100(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetupCpuPointerForBusFlush_HAL(pGpu, pKernelBus) kbusSetupCpuPointerForBusFlush(pGpu, pKernelBus) + +void kbusDestroyCpuPointerForBusFlush_GV100(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusDestroyCpuPointerForBusFlush(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusDestroyCpuPointerForBusFlush(pGpu, pKernelBus) kbusDestroyCpuPointerForBusFlush_GV100(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusDestroyCpuPointerForBusFlush_HAL(pGpu, pKernelBus) kbusDestroyCpuPointerForBusFlush(pGpu, pKernelBus) + +NV_STATUS kbusSetupBar2CpuAperture_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSetupBar2CpuAperture(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetupBar2CpuAperture(pGpu, pKernelBus, gfid) kbusSetupBar2CpuAperture_GM107(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetupBar2CpuAperture_HAL(pGpu, pKernelBus, gfid) kbusSetupBar2CpuAperture(pGpu, pKernelBus, gfid) + +NV_STATUS kbusTeardownBar2CpuAperture_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusTeardownBar2CpuAperture(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusTeardownBar2CpuAperture(pGpu, pKernelBus, gfid) kbusTeardownBar2CpuAperture_GM107(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusTeardownBar2CpuAperture_HAL(pGpu, pKernelBus, gfid) kbusTeardownBar2CpuAperture(pGpu, pKernelBus, gfid) + +NV_STATUS kbusSetP2PMailboxBar1Area_GM200(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 mailboxBar1Addr, NvU32 mailboxTotalSize); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSetP2PMailboxBar1Area(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 mailboxBar1Addr, NvU32 mailboxTotalSize) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetP2PMailboxBar1Area(pGpu, pKernelBus, mailboxBar1Addr, mailboxTotalSize) kbusSetP2PMailboxBar1Area_GM200(pGpu, pKernelBus, mailboxBar1Addr, mailboxTotalSize) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetP2PMailboxBar1Area_HAL(pGpu, pKernelBus, mailboxBar1Addr, mailboxTotalSize) kbusSetP2PMailboxBar1Area(pGpu, pKernelBus, mailboxBar1Addr, mailboxTotalSize) + +void kbusUnsetP2PMailboxBar1Area_GM200(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusUnsetP2PMailboxBar1Area(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusUnsetP2PMailboxBar1Area(pGpu, pKernelBus) kbusUnsetP2PMailboxBar1Area_GM200(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusUnsetP2PMailboxBar1Area_HAL(pGpu, pKernelBus) kbusUnsetP2PMailboxBar1Area(pGpu, pKernelBus) + +NV_STATUS kbusAllocP2PMailboxBar1_GM200(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid, NvU64 vaRangeMax); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusAllocP2PMailboxBar1(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid, NvU64 vaRangeMax) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusAllocP2PMailboxBar1(pGpu, pKernelBus, gfid, vaRangeMax) kbusAllocP2PMailboxBar1_GM200(pGpu, pKernelBus, gfid, vaRangeMax) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusAllocP2PMailboxBar1_HAL(pGpu, pKernelBus, gfid, vaRangeMax) kbusAllocP2PMailboxBar1(pGpu, pKernelBus, gfid, vaRangeMax) + +void kbusGetP2PMailboxAttributes_GM200(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 *pMailboxAreaSize, NvU32 *pMailboxAlignmentSize, NvU32 *pMailboxMaxOffset64KB); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusGetP2PMailboxAttributes(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 *pMailboxAreaSize, NvU32 *pMailboxAlignmentSize, NvU32 *pMailboxMaxOffset64KB) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetP2PMailboxAttributes(pGpu, pKernelBus, pMailboxAreaSize, pMailboxAlignmentSize, pMailboxMaxOffset64KB) kbusGetP2PMailboxAttributes_GM200(pGpu, pKernelBus, pMailboxAreaSize, pMailboxAlignmentSize, pMailboxMaxOffset64KB) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetP2PMailboxAttributes_HAL(pGpu, pKernelBus, pMailboxAreaSize, pMailboxAlignmentSize, pMailboxMaxOffset64KB) kbusGetP2PMailboxAttributes(pGpu, pKernelBus, pMailboxAreaSize, pMailboxAlignmentSize, pMailboxMaxOffset64KB) + +RmPhysAddr kbusSetupMailboxAccess_GM200(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pGpu1, NvU32 localPeerId, PMEMORY_DESCRIPTOR *ppWMBoxMemDesc); + +#ifdef __nvoc_kern_bus_h_disabled +static inline RmPhysAddr kbusSetupMailboxAccess(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pGpu1, NvU32 localPeerId, PMEMORY_DESCRIPTOR *ppWMBoxMemDesc) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + RmPhysAddr ret; + portMemSet(&ret, 0, sizeof(RmPhysAddr)); + return ret; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetupMailboxAccess(pGpu, pKernelBus, pGpu1, localPeerId, ppWMBoxMemDesc) kbusSetupMailboxAccess_GM200(pGpu, pKernelBus, pGpu1, localPeerId, ppWMBoxMemDesc) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetupMailboxAccess_HAL(pGpu, pKernelBus, pGpu1, localPeerId, ppWMBoxMemDesc) kbusSetupMailboxAccess(pGpu, pKernelBus, pGpu1, localPeerId, ppWMBoxMemDesc) + +void kbusDestroyPeerAccess_GM200(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerNum); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusDestroyPeerAccess(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerNum) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusDestroyPeerAccess(pGpu, pKernelBus, peerNum) kbusDestroyPeerAccess_GM200(pGpu, pKernelBus, peerNum) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusDestroyPeerAccess_HAL(pGpu, pKernelBus, peerNum) kbusDestroyPeerAccess(pGpu, pKernelBus, peerNum) + +NV_STATUS kbusCreateP2PMapping_GP100(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *peer0, NvU32 *peer1, NvU32 attributes); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusCreateP2PMapping(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *peer0, NvU32 *peer1, NvU32 attributes) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusCreateP2PMapping(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusCreateP2PMapping_GP100(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusCreateP2PMapping_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusCreateP2PMapping(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) + +NV_STATUS kbusRemoveP2PMapping_GP100(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 peer0, NvU32 peer1, NvU32 attributes); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusRemoveP2PMapping(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 peer0, NvU32 peer1, NvU32 attributes) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusRemoveP2PMapping(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusRemoveP2PMapping_GP100(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusRemoveP2PMapping_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusRemoveP2PMapping(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) + +NvU32 kbusGetPeerId_GP100(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pPeerGpu); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU32 kbusGetPeerId(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pPeerGpu) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetPeerId(pGpu, pKernelBus, pPeerGpu) kbusGetPeerId_GP100(pGpu, pKernelBus, pPeerGpu) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetPeerId_HAL(pGpu, pKernelBus, pPeerGpu) kbusGetPeerId(pGpu, pKernelBus, pPeerGpu) + +NvU32 kbusGetPeerIdFromTable_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 locPeerIdx, NvU32 remPeerIdx); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU32 kbusGetPeerIdFromTable(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 locPeerIdx, NvU32 remPeerIdx) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetPeerIdFromTable(pGpu, pKernelBus, locPeerIdx, remPeerIdx) kbusGetPeerIdFromTable_GM107(pGpu, pKernelBus, locPeerIdx, remPeerIdx) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetPeerIdFromTable_HAL(pGpu, pKernelBus, locPeerIdx, remPeerIdx) kbusGetPeerIdFromTable(pGpu, pKernelBus, locPeerIdx, remPeerIdx) + +NvU32 kbusGetUnusedPeerId_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU32 kbusGetUnusedPeerId(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetUnusedPeerId(pGpu, pKernelBus) kbusGetUnusedPeerId_GM107(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetUnusedPeerId_HAL(pGpu, pKernelBus) kbusGetUnusedPeerId(pGpu, pKernelBus) + +NV_STATUS kbusIsPeerIdValid_GP100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerId); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusIsPeerIdValid(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerId) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusIsPeerIdValid(pGpu, pKernelBus, peerId) kbusIsPeerIdValid_GP100(pGpu, pKernelBus, peerId) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusIsPeerIdValid_HAL(pGpu, pKernelBus, peerId) kbusIsPeerIdValid(pGpu, pKernelBus, peerId) + +NV_STATUS kbusReserveP2PPeerIds_GM200(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerMask); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusReserveP2PPeerIds(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerMask) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusReserveP2PPeerIds(pGpu, pKernelBus, peerMask) kbusReserveP2PPeerIds_GM200(pGpu, pKernelBus, peerMask) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusReserveP2PPeerIds_HAL(pGpu, pKernelBus, peerMask) kbusReserveP2PPeerIds(pGpu, pKernelBus, peerMask) + +NV_STATUS kbusCreateP2PMappingForMailbox_GM200(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *peer0, NvU32 *peer1, NvU32 attributes); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusCreateP2PMappingForMailbox(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *peer0, NvU32 *peer1, NvU32 attributes) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusCreateP2PMappingForMailbox(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusCreateP2PMappingForMailbox_GM200(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusCreateP2PMappingForMailbox_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusCreateP2PMappingForMailbox(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) + +NV_STATUS kbusRemoveP2PMappingForMailbox_GM200(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 peer0, NvU32 peer1, NvU32 attributes); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusRemoveP2PMappingForMailbox(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 peer0, NvU32 peer1, NvU32 attributes) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusRemoveP2PMappingForMailbox(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusRemoveP2PMappingForMailbox_GM200(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusRemoveP2PMappingForMailbox_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusRemoveP2PMappingForMailbox(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) + +void kbusSetupMailboxes_GM200(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 arg0, NvU32 arg1); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusSetupMailboxes(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetupMailboxes(pGpu0, pKernelBus0, pGpu1, pKernelBus1, arg0, arg1) kbusSetupMailboxes_GM200(pGpu0, pKernelBus0, pGpu1, pKernelBus1, arg0, arg1) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetupMailboxes_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, arg0, arg1) kbusSetupMailboxes(pGpu0, pKernelBus0, pGpu1, pKernelBus1, arg0, arg1) + +void kbusWriteP2PWmbTag_GM200(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 remote2Local, NvU64 p2pWmbTag); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusWriteP2PWmbTag(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 remote2Local, NvU64 p2pWmbTag) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusWriteP2PWmbTag(pGpu, pKernelBus, remote2Local, p2pWmbTag) kbusWriteP2PWmbTag_GM200(pGpu, pKernelBus, remote2Local, p2pWmbTag) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusWriteP2PWmbTag_HAL(pGpu, pKernelBus, remote2Local, p2pWmbTag) kbusWriteP2PWmbTag(pGpu, pKernelBus, remote2Local, p2pWmbTag) + +RmPhysAddr kbusSetupP2PDomainAccess_GM200(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, PMEMORY_DESCRIPTOR *ppP2PDomMemDesc); + +#ifdef __nvoc_kern_bus_h_disabled +static inline RmPhysAddr kbusSetupP2PDomainAccess(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, PMEMORY_DESCRIPTOR *ppP2PDomMemDesc) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + RmPhysAddr ret; + portMemSet(&ret, 0, sizeof(RmPhysAddr)); + return ret; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetupP2PDomainAccess(pGpu0, pKernelBus0, pGpu1, ppP2PDomMemDesc) kbusSetupP2PDomainAccess_GM200(pGpu0, pKernelBus0, pGpu1, ppP2PDomMemDesc) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetupP2PDomainAccess_HAL(pGpu0, pKernelBus0, pGpu1, ppP2PDomMemDesc) kbusSetupP2PDomainAccess(pGpu0, pKernelBus0, pGpu1, ppP2PDomMemDesc) + +NvBool kbusNeedWarForBug999673_GM200(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pRemoteGpu); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvBool kbusNeedWarForBug999673(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pRemoteGpu) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusNeedWarForBug999673(pGpu, pKernelBus, pRemoteGpu) kbusNeedWarForBug999673_GM200(pGpu, pKernelBus, pRemoteGpu) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusNeedWarForBug999673_HAL(pGpu, pKernelBus, pRemoteGpu) kbusNeedWarForBug999673(pGpu, pKernelBus, pRemoteGpu) + +NV_STATUS kbusCreateP2PMappingForNvlink_GP100(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *peer0, NvU32 *peer1, NvU32 attributes); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusCreateP2PMappingForNvlink(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *peer0, NvU32 *peer1, NvU32 attributes) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusCreateP2PMappingForNvlink(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusCreateP2PMappingForNvlink_GP100(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusCreateP2PMappingForNvlink_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusCreateP2PMappingForNvlink(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) + +NV_STATUS kbusRemoveP2PMappingForNvlink_GP100(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 peer0, NvU32 peer1, NvU32 attributes); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusRemoveP2PMappingForNvlink(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 peer0, NvU32 peer1, NvU32 attributes) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusRemoveP2PMappingForNvlink(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusRemoveP2PMappingForNvlink_GP100(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusRemoveP2PMappingForNvlink_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusRemoveP2PMappingForNvlink(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) + +static inline NV_STATUS kbusCreateP2PMappingForC2C_46f6a7(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *peer0, NvU32 *peer1, NvU32 attributes) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusCreateP2PMappingForC2C(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *peer0, NvU32 *peer1, NvU32 attributes) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusCreateP2PMappingForC2C(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusCreateP2PMappingForC2C_46f6a7(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusCreateP2PMappingForC2C_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusCreateP2PMappingForC2C(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) + +static inline NV_STATUS kbusRemoveP2PMappingForC2C_46f6a7(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 peer0, NvU32 peer1, NvU32 attributes) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusRemoveP2PMappingForC2C(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 peer0, NvU32 peer1, NvU32 attributes) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusRemoveP2PMappingForC2C(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusRemoveP2PMappingForC2C_46f6a7(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusRemoveP2PMappingForC2C_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) kbusRemoveP2PMappingForC2C(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes) + +NvU32 kbusGetNvlinkPeerNumberMask_GP100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerId); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU32 kbusGetNvlinkPeerNumberMask(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerId) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetNvlinkPeerNumberMask(pGpu, pKernelBus, peerId) kbusGetNvlinkPeerNumberMask_GP100(pGpu, pKernelBus, peerId) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetNvlinkPeerNumberMask_HAL(pGpu, pKernelBus, peerId) kbusGetNvlinkPeerNumberMask(pGpu, pKernelBus, peerId) + +void kbusUnlinkP2P_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBu); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusUnlinkP2P(OBJGPU *pGpu, struct KernelBus *pKernelBu) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusUnlinkP2P(pGpu, pKernelBu) kbusUnlinkP2P_GM107(pGpu, pKernelBu) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusUnlinkP2P_HAL(pGpu, pKernelBu) kbusUnlinkP2P(pGpu, pKernelBu) + +static inline NV_STATUS kbusCreateP2PMappingForBar1P2P_46f6a7(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 attributes) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusCreateP2PMappingForBar1P2P(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 attributes) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusCreateP2PMappingForBar1P2P(pGpu0, pKernelBus0, pGpu1, pKernelBus1, attributes) kbusCreateP2PMappingForBar1P2P_46f6a7(pGpu0, pKernelBus0, pGpu1, pKernelBus1, attributes) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusCreateP2PMappingForBar1P2P_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, attributes) kbusCreateP2PMappingForBar1P2P(pGpu0, pKernelBus0, pGpu1, pKernelBus1, attributes) + +static inline NV_STATUS kbusRemoveP2PMappingForBar1P2P_46f6a7(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 attributes) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusRemoveP2PMappingForBar1P2P(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 attributes) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusRemoveP2PMappingForBar1P2P(pGpu0, pKernelBus0, pGpu1, pKernelBus1, attributes) kbusRemoveP2PMappingForBar1P2P_46f6a7(pGpu0, pKernelBus0, pGpu1, pKernelBus1, attributes) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusRemoveP2PMappingForBar1P2P_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, attributes) kbusRemoveP2PMappingForBar1P2P(pGpu0, pKernelBus0, pGpu1, pKernelBus1, attributes) + +static inline NvBool kbusIsPcieBar1P2PMapping_491d52(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvBool kbusIsPcieBar1P2PMapping(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusIsPcieBar1P2PMapping(pGpu0, pKernelBus0, pGpu1, pKernelBus1) kbusIsPcieBar1P2PMapping_491d52(pGpu0, pKernelBus0, pGpu1, pKernelBus1) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusIsPcieBar1P2PMapping_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1) kbusIsPcieBar1P2PMapping(pGpu0, pKernelBus0, pGpu1, pKernelBus1) + +static inline NvBool kbusIsPcieBar1P2PCapable_491d52(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvBool kbusIsPcieBar1P2PCapable(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusIsPcieBar1P2PCapable(pGpu0, pKernelBus0, pGpu1, pKernelBus1) kbusIsPcieBar1P2PCapable_491d52(pGpu0, pKernelBus0, pGpu1, pKernelBus1) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusIsPcieBar1P2PCapable_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1) kbusIsPcieBar1P2PCapable(pGpu0, pKernelBus0, pGpu1, pKernelBus1) + +NV_STATUS kbusSetupUnbindFla_KERNEL(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSetupUnbindFla(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetupUnbindFla(pGpu, pKernelBus) kbusSetupUnbindFla_KERNEL(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetupUnbindFla_HAL(pGpu, pKernelBus) kbusSetupUnbindFla(pGpu, pKernelBus) + +NV_STATUS kbusSetupBindFla_KERNEL(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSetupBindFla(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetupBindFla(pGpu, pKernelBus, gfid) kbusSetupBindFla_KERNEL(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetupBindFla_HAL(pGpu, pKernelBus, gfid) kbusSetupBindFla(pGpu, pKernelBus, gfid) + +NV_STATUS kbusFlushSingle_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 flags); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusFlushSingle(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusFlushSingle(pGpu, pKernelBus, flags) kbusFlushSingle_GM107(pGpu, pKernelBus, flags) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusFlushSingle_HAL(pGpu, pKernelBus, flags) kbusFlushSingle(pGpu, pKernelBus, flags) + +NV_STATUS kbusSendSysmembarSingle_KERNEL(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSendSysmembarSingle(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSendSysmembarSingle(pGpu, pKernelBus) kbusSendSysmembarSingle_KERNEL(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSendSysmembarSingle_HAL(pGpu, pKernelBus) kbusSendSysmembarSingle(pGpu, pKernelBus) + +void kbusInitPciBars_GM107(struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusInitPciBars(struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusInitPciBars(pKernelBus) kbusInitPciBars_GM107(pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusInitPciBars_HAL(pKernelBus) kbusInitPciBars(pKernelBus) + +NV_STATUS kbusInitBarsBaseInfo_GM107(struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusInitBarsBaseInfo(struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusInitBarsBaseInfo(pKernelBus) kbusInitBarsBaseInfo_GM107(pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusInitBarsBaseInfo_HAL(pKernelBus) kbusInitBarsBaseInfo(pKernelBus) + +NV_STATUS kbusMemAccessBar0Window_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 physAddr, void *pData, NvU64 accessSize, NvBool bRead, NV_ADDRESS_SPACE addrSpace); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusMemAccessBar0Window(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 physAddr, void *pData, NvU64 accessSize, NvBool bRead, NV_ADDRESS_SPACE addrSpace) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusMemAccessBar0Window(pGpu, pKernelBus, physAddr, pData, accessSize, bRead, addrSpace) kbusMemAccessBar0Window_GM107(pGpu, pKernelBus, physAddr, pData, accessSize, bRead, addrSpace) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusMemAccessBar0Window_HAL(pGpu, pKernelBus, physAddr, pData, accessSize, bRead, addrSpace) kbusMemAccessBar0Window(pGpu, pKernelBus, physAddr, pData, accessSize, bRead, addrSpace) + +NV_STATUS kbusMemCopyBar0Window_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, RmPhysAddr physAddr, void *pData, NvLength copySize, NvBool bRead); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusMemCopyBar0Window(OBJGPU *pGpu, struct KernelBus *pKernelBus, RmPhysAddr physAddr, void *pData, NvLength copySize, NvBool bRead) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusMemCopyBar0Window(pGpu, pKernelBus, physAddr, pData, copySize, bRead) kbusMemCopyBar0Window_GM107(pGpu, pKernelBus, physAddr, pData, copySize, bRead) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusMemCopyBar0Window_HAL(pGpu, pKernelBus, physAddr, pData, copySize, bRead) kbusMemCopyBar0Window(pGpu, pKernelBus, physAddr, pData, copySize, bRead) + +NV_STATUS kbusSetBAR0WindowVidOffset_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 vidOffset); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSetBAR0WindowVidOffset(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 vidOffset) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetBAR0WindowVidOffset(pGpu, pKernelBus, vidOffset) kbusSetBAR0WindowVidOffset_GM107(pGpu, pKernelBus, vidOffset) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, vidOffset) kbusSetBAR0WindowVidOffset(pGpu, pKernelBus, vidOffset) + +NvU64 kbusGetBAR0WindowVidOffset_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU64 kbusGetBAR0WindowVidOffset(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetBAR0WindowVidOffset(pGpu, pKernelBus) kbusGetBAR0WindowVidOffset_GM107(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus) kbusGetBAR0WindowVidOffset(pGpu, pKernelBus) + +NvU64 kbusGetBAR0WindowAddress_GM107(struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU64 kbusGetBAR0WindowAddress(struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetBAR0WindowAddress(pKernelBus) kbusGetBAR0WindowAddress_GM107(pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetBAR0WindowAddress_HAL(pKernelBus) kbusGetBAR0WindowAddress(pKernelBus) + +NV_STATUS kbusSetupBar0WindowBeforeBar2Bootstrap_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 *arg0); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSetupBar0WindowBeforeBar2Bootstrap(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSetupBar0WindowBeforeBar2Bootstrap(pGpu, pKernelBus, arg0) kbusSetupBar0WindowBeforeBar2Bootstrap_GM107(pGpu, pKernelBus, arg0) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusSetupBar0WindowBeforeBar2Bootstrap_HAL(pGpu, pKernelBus, arg0) kbusSetupBar0WindowBeforeBar2Bootstrap(pGpu, pKernelBus, arg0) + +void kbusRestoreBar0WindowAfterBar2Bootstrap_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusRestoreBar0WindowAfterBar2Bootstrap(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusRestoreBar0WindowAfterBar2Bootstrap(pGpu, pKernelBus, arg0) kbusRestoreBar0WindowAfterBar2Bootstrap_GM107(pGpu, pKernelBus, arg0) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusRestoreBar0WindowAfterBar2Bootstrap_HAL(pGpu, pKernelBus, arg0) kbusRestoreBar0WindowAfterBar2Bootstrap(pGpu, pKernelBus, arg0) + +NV_STATUS kbusInitBar2_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusInitBar2(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusInitBar2(pGpu, pKernelBus, gfid) kbusInitBar2_GM107(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusInitBar2_HAL(pGpu, pKernelBus, gfid) kbusInitBar2(pGpu, pKernelBus, gfid) + +NV_STATUS kbusDestroyBar2_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusDestroyBar2(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusDestroyBar2(pGpu, pKernelBus, gfid) kbusDestroyBar2_GM107(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusDestroyBar2_HAL(pGpu, pKernelBus, gfid) kbusDestroyBar2(pGpu, pKernelBus, gfid) + +NV_STATUS kbusVerifyBar2_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR memDescIn, NvU8 *pCpuPtrIn, NvU64 offset, NvU64 size); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusVerifyBar2(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR memDescIn, NvU8 *pCpuPtrIn, NvU64 offset, NvU64 size) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusVerifyBar2(pGpu, pKernelBus, memDescIn, pCpuPtrIn, offset, size) kbusVerifyBar2_GM107(pGpu, pKernelBus, memDescIn, pCpuPtrIn, offset, size) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusVerifyBar2_HAL(pGpu, pKernelBus, memDescIn, pCpuPtrIn, offset, size) kbusVerifyBar2(pGpu, pKernelBus, memDescIn, pCpuPtrIn, offset, size) + +static inline NV_STATUS kbusVerifyCoherentLink_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return NV_OK; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusVerifyCoherentLink(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusVerifyCoherentLink(pGpu, pKernelBus) kbusVerifyCoherentLink_56cd7a(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusVerifyCoherentLink_HAL(pGpu, pKernelBus) kbusVerifyCoherentLink(pGpu, pKernelBus) + +NV_STATUS kbusInitBar1_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusInitBar1(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusInitBar1(pGpu, pKernelBus, gfid) kbusInitBar1_GM107(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusInitBar1_HAL(pGpu, pKernelBus, gfid) kbusInitBar1(pGpu, pKernelBus, gfid) + +NV_STATUS kbusDestroyBar1_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusDestroyBar1(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusDestroyBar1(pGpu, pKernelBus, gfid) kbusDestroyBar1_GM107(pGpu, pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusDestroyBar1_HAL(pGpu, pKernelBus, gfid) kbusDestroyBar1(pGpu, pKernelBus, gfid) + +NV_STATUS kbusMapFbAperture_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0, NvU64 offset, NvU64 *pAperOffset, NvU64 *pLength, NvU32 flags, NvHandle hClient); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusMapFbAperture(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0, NvU64 offset, NvU64 *pAperOffset, NvU64 *pLength, NvU32 flags, NvHandle hClient) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusMapFbAperture(pGpu, pKernelBus, arg0, offset, pAperOffset, pLength, flags, hClient) kbusMapFbAperture_GM107(pGpu, pKernelBus, arg0, offset, pAperOffset, pLength, flags, hClient) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusMapFbAperture_HAL(pGpu, pKernelBus, arg0, offset, pAperOffset, pLength, flags, hClient) kbusMapFbAperture(pGpu, pKernelBus, arg0, offset, pAperOffset, pLength, flags, hClient) + +NV_STATUS kbusUnmapFbAperture_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0, NvU64 aperOffset, NvU64 length, NvU32 flags); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusUnmapFbAperture(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0, NvU64 aperOffset, NvU64 length, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusUnmapFbAperture(pGpu, pKernelBus, arg0, aperOffset, length, flags) kbusUnmapFbAperture_GM107(pGpu, pKernelBus, arg0, aperOffset, length, flags) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusUnmapFbAperture_HAL(pGpu, pKernelBus, arg0, aperOffset, length, flags) kbusUnmapFbAperture(pGpu, pKernelBus, arg0, aperOffset, length, flags) + +void kbusReleaseRmAperture_VBAR2(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0); + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusReleaseRmAperture(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusReleaseRmAperture(pGpu, pKernelBus, arg0) kbusReleaseRmAperture_VBAR2(pGpu, pKernelBus, arg0) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusReleaseRmAperture_HAL(pGpu, pKernelBus, arg0) kbusReleaseRmAperture(pGpu, pKernelBus, arg0) + +struct OBJVASPACE *kbusGetBar1VASpace_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +#ifdef __nvoc_kern_bus_h_disabled +static inline struct OBJVASPACE *kbusGetBar1VASpace(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NULL; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetBar1VASpace(pGpu, pKernelBus) kbusGetBar1VASpace_GM107(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusGetBar1VASpace_HAL(pGpu, pKernelBus) kbusGetBar1VASpace(pGpu, pKernelBus) + +static inline NV_STATUS kbusInitInstBlk_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR pInstBlkMemDesc, PMEMORY_DESCRIPTOR pPDB, NvU64 vaLimit, NvU32 bigPageSize, struct OBJVASPACE *pVAS) { + return NV_OK; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusInitInstBlk(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR pInstBlkMemDesc, PMEMORY_DESCRIPTOR pPDB, NvU64 vaLimit, NvU32 bigPageSize, struct OBJVASPACE *pVAS) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusInitInstBlk(pGpu, pKernelBus, pInstBlkMemDesc, pPDB, vaLimit, bigPageSize, pVAS) kbusInitInstBlk_56cd7a(pGpu, pKernelBus, pInstBlkMemDesc, pPDB, vaLimit, bigPageSize, pVAS) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusInitInstBlk_HAL(pGpu, pKernelBus, pInstBlkMemDesc, pPDB, vaLimit, bigPageSize, pVAS) kbusInitInstBlk(pGpu, pKernelBus, pInstBlkMemDesc, pPDB, vaLimit, bigPageSize, pVAS) + +static inline NV_STATUS kbusBar1InstBlkVasUpdate_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return NV_OK; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusBar1InstBlkVasUpdate(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusBar1InstBlkVasUpdate(pGpu, pKernelBus) kbusBar1InstBlkVasUpdate_56cd7a(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusBar1InstBlkVasUpdate_HAL(pGpu, pKernelBus) kbusBar1InstBlkVasUpdate(pGpu, pKernelBus) + +NvBool kbusCheckEngine_KERNEL(OBJGPU *pGpu, struct KernelBus *pKernelBus, ENGDESCRIPTOR desc); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvBool kbusCheckEngine(OBJGPU *pGpu, struct KernelBus *pKernelBus, ENGDESCRIPTOR desc) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusCheckEngine(pGpu, pKernelBus, desc) kbusCheckEngine_KERNEL(pGpu, pKernelBus, desc) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusCheckEngine_HAL(pGpu, pKernelBus, desc) kbusCheckEngine(pGpu, pKernelBus, desc) + +static inline NV_STATUS kbusFlushPcieForBar0Doorbell_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return NV_OK; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusFlushPcieForBar0Doorbell(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusFlushPcieForBar0Doorbell(pGpu, pKernelBus) kbusFlushPcieForBar0Doorbell_56cd7a(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusFlushPcieForBar0Doorbell_HAL(pGpu, pKernelBus) kbusFlushPcieForBar0Doorbell(pGpu, pKernelBus) + +NV_STATUS kbusFlush_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 flags); + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusFlush(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusFlush(pGpu, pKernelBus, flags) kbusFlush_GM107(pGpu, pKernelBus, flags) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusFlush_HAL(pGpu, pKernelBus, flags) kbusFlush(pGpu, pKernelBus, flags) + +static inline NV_STATUS kbusCreateCoherentCpuMapping_46f6a7(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvBool bFlush) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusCreateCoherentCpuMapping(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvBool bFlush) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusCreateCoherentCpuMapping(pGpu, pKernelBus, bFlush) kbusCreateCoherentCpuMapping_46f6a7(pGpu, pKernelBus, bFlush) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusCreateCoherentCpuMapping_HAL(pGpu, pKernelBus, bFlush) kbusCreateCoherentCpuMapping(pGpu, pKernelBus, bFlush) + +static inline NvU8 *kbusMapCoherentCpuMapping_9e2234(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0) { + return ((void *)0); +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU8 *kbusMapCoherentCpuMapping(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NULL; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusMapCoherentCpuMapping(pGpu, pKernelBus, arg0) kbusMapCoherentCpuMapping_9e2234(pGpu, pKernelBus, arg0) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusMapCoherentCpuMapping_HAL(pGpu, pKernelBus, arg0) kbusMapCoherentCpuMapping(pGpu, pKernelBus, arg0) + +static inline void kbusUnmapCoherentCpuMapping_d44104(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0) { + return; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusUnmapCoherentCpuMapping(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusUnmapCoherentCpuMapping(pGpu, pKernelBus, arg0) kbusUnmapCoherentCpuMapping_d44104(pGpu, pKernelBus, arg0) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusUnmapCoherentCpuMapping_HAL(pGpu, pKernelBus, arg0) kbusUnmapCoherentCpuMapping(pGpu, pKernelBus, arg0) + +static inline void kbusTeardownCoherentCpuMappingAcr_b3696a(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusTeardownCoherentCpuMappingAcr(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusTeardownCoherentCpuMappingAcr(pGpu, pKernelBus) kbusTeardownCoherentCpuMappingAcr_b3696a(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusTeardownCoherentCpuMappingAcr_HAL(pGpu, pKernelBus) kbusTeardownCoherentCpuMappingAcr(pGpu, pKernelBus) + +static inline void kbusTeardownCoherentCpuMapping_d44104(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvBool arg0) { + return; +} + +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusTeardownCoherentCpuMapping(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusTeardownCoherentCpuMapping(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping_d44104(pGpu, pKernelBus, arg0) +#endif //__nvoc_kern_bus_h_disabled + +#define kbusTeardownCoherentCpuMapping_HAL(pGpu, pKernelBus, arg0) kbusTeardownCoherentCpuMapping(pGpu, pKernelBus, arg0) + +NV_STATUS kbusConstructEngine_IMPL(OBJGPU *pGpu, struct KernelBus *pKernelBus, ENGDESCRIPTOR arg0); + +static inline NV_STATUS kbusConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, ENGDESCRIPTOR arg0) { + return pKernelBus->__kbusConstructEngine__(pGpu, pKernelBus, arg0); +} + +NV_STATUS kbusStatePreInitLocked_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +static inline NV_STATUS kbusStatePreInitLocked_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return NV_OK; +} + +static inline NV_STATUS kbusStatePreInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return pKernelBus->__kbusStatePreInitLocked__(pGpu, pKernelBus); +} + +NV_STATUS kbusStateInitLocked_IMPL(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +static inline NV_STATUS kbusStateInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return pKernelBus->__kbusStateInitLocked__(pGpu, pKernelBus); +} + +NV_STATUS kbusStatePostLoad_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0); + +static inline NV_STATUS kbusStatePostLoad_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) { + return NV_OK; +} + +static inline NV_STATUS kbusStatePostLoad_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) { + return pKernelBus->__kbusStatePostLoad__(pGpu, pKernelBus, arg0); +} + +NV_STATUS kbusStatePreUnload_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0); + +static inline NV_STATUS kbusStatePreUnload_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) { + return NV_OK; +} + +static inline NV_STATUS kbusStatePreUnload_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) { + return pKernelBus->__kbusStatePreUnload__(pGpu, pKernelBus, arg0); +} + +static inline NV_STATUS kbusStateUnload_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 flags) { + return NV_OK; +} + +NV_STATUS kbusStateUnload_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 flags); + +static inline NV_STATUS kbusStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 flags) { + return pKernelBus->__kbusStateUnload__(pGpu, pKernelBus, flags); +} + +void kbusStateDestroy_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +static inline void kbusStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + pKernelBus->__kbusStateDestroy__(pGpu, pKernelBus); +} + +NvU32 kbusGetUnusedPciePeerId_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +NvU32 kbusGetUnusedPciePeerId_TU102(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +static inline NvU32 kbusGetUnusedPciePeerId_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return pKernelBus->__kbusGetUnusedPciePeerId__(pGpu, pKernelBus); +} + +NV_STATUS kbusGetNvlinkP2PPeerId_GP100(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *nvlinkPeer); + +NV_STATUS kbusGetNvlinkP2PPeerId_GA100(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *nvlinkPeer); + +static inline NV_STATUS kbusGetNvlinkP2PPeerId_56cd7a(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *nvlinkPeer) { + return NV_OK; +} + +static inline NV_STATUS kbusGetNvlinkP2PPeerId_DISPATCH(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, struct KernelBus *pKernelBus1, NvU32 *nvlinkPeer) { + return pKernelBus0->__kbusGetNvlinkP2PPeerId__(pGpu0, pKernelBus0, pGpu1, pKernelBus1, nvlinkPeer); +} + +NV_STATUS kbusRemoveNvlinkPeerMapping_GP100(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pGpu1, NvU32 arg0, NvU32 attributes); + +static inline NV_STATUS kbusRemoveNvlinkPeerMapping_56cd7a(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pGpu1, NvU32 arg0, NvU32 attributes) { + return NV_OK; +} + +static inline NV_STATUS kbusRemoveNvlinkPeerMapping_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, OBJGPU *pGpu1, NvU32 arg0, NvU32 attributes) { + return pKernelBus->__kbusRemoveNvlinkPeerMapping__(pGpu, pKernelBus, pGpu1, arg0, attributes); +} + +NV_STATUS kbusUnreserveP2PPeerIds_GP100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerMask); + +static inline NV_STATUS kbusUnreserveP2PPeerIds_46f6a7(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerMask) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kbusUnreserveP2PPeerIds_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 peerMask) { + return pKernelBus->__kbusUnreserveP2PPeerIds__(pGpu, pKernelBus, peerMask); +} + +NV_STATUS kbusAllocateFlaVaspace_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0, NvU64 arg1); + +static inline NV_STATUS kbusAllocateFlaVaspace_395e98(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0, NvU64 arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kbusAllocateFlaVaspace_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0, NvU64 arg1) { + return pKernelBus->__kbusAllocateFlaVaspace__(pGpu, pKernelBus, arg0, arg1); +} + +NV_STATUS kbusAllocateHostManagedFlaVaspace_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvHandle arg0, NvHandle arg1, NvHandle arg2, NvHandle arg3, NvU64 arg4, NvU64 arg5, NvU32 arg6); + +static inline NV_STATUS kbusAllocateHostManagedFlaVaspace_395e98(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvHandle arg0, NvHandle arg1, NvHandle arg2, NvHandle arg3, NvU64 arg4, NvU64 arg5, NvU32 arg6) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kbusAllocateHostManagedFlaVaspace_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvHandle arg0, NvHandle arg1, NvHandle arg2, NvHandle arg3, NvU64 arg4, NvU64 arg5, NvU32 arg6) { + return pKernelBus->__kbusAllocateHostManagedFlaVaspace__(pGpu, pKernelBus, arg0, arg1, arg2, arg3, arg4, arg5, arg6); +} + +NV_STATUS kbusInitFla_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 base, NvU64 size); + +static inline NV_STATUS kbusInitFla_ac1694(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 base, NvU64 size) { + return NV_OK; +} + +static inline NV_STATUS kbusInitFla_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 base, NvU64 size) { + return pKernelBus->__kbusInitFla__(pGpu, pKernelBus, base, size); +} + +NV_STATUS kbusGetFlaVaspace_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, struct OBJVASPACE **arg0); + +static inline NV_STATUS kbusGetFlaVaspace_395e98(OBJGPU *pGpu, struct KernelBus *pKernelBus, struct OBJVASPACE **arg0) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kbusGetFlaVaspace_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, struct OBJVASPACE **arg0) { + return pKernelBus->__kbusGetFlaVaspace__(pGpu, pKernelBus, arg0); +} + +void kbusDestroyFla_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +static inline void kbusDestroyFla_d44104(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return; +} + +static inline void kbusDestroyFla_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + pKernelBus->__kbusDestroyFla__(pGpu, pKernelBus); +} + +void kbusDestroyHostManagedFlaVaspace_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0); + +static inline void kbusDestroyHostManagedFlaVaspace_d44104(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) { + return; +} + +static inline void kbusDestroyHostManagedFlaVaspace_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) { + pKernelBus->__kbusDestroyHostManagedFlaVaspace__(pGpu, pKernelBus, arg0); +} + +NvBool kbusVerifyFlaRange_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0, NvU64 arg1); + +static inline NvBool kbusVerifyFlaRange_bf6dfa(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0, NvU64 arg1) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kbusVerifyFlaRange_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 arg0, NvU64 arg1) { + return pKernelBus->__kbusVerifyFlaRange__(pGpu, pKernelBus, arg0, arg1); +} + +NV_STATUS kbusConstructFlaInstBlk_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0); + +static inline NV_STATUS kbusConstructFlaInstBlk_395e98(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kbusConstructFlaInstBlk_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU32 arg0) { + return pKernelBus->__kbusConstructFlaInstBlk__(pGpu, pKernelBus, arg0); +} + +void kbusDestructFlaInstBlk_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus); + +static inline void kbusDestructFlaInstBlk_d44104(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + return; +} + +static inline void kbusDestructFlaInstBlk_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + pKernelBus->__kbusDestructFlaInstBlk__(pGpu, pKernelBus); +} + +NV_STATUS kbusValidateFlaBaseAddress_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 flaBaseAddr); + +static inline NV_STATUS kbusValidateFlaBaseAddress_395e98(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 flaBaseAddr) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kbusValidateFlaBaseAddress_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU64 flaBaseAddr) { + return pKernelBus->__kbusValidateFlaBaseAddress__(pGpu, pKernelBus, flaBaseAddr); +} + +NV_STATUS kbusIsDirectMappingAllowed_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0, NvU32 arg1, NvBool *arg2); + +NV_STATUS kbusIsDirectMappingAllowed_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0, NvU32 arg1, NvBool *arg2); + +static inline NV_STATUS kbusIsDirectMappingAllowed_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, PMEMORY_DESCRIPTOR arg0, NvU32 arg1, NvBool *arg2) { + return pKernelBus->__kbusIsDirectMappingAllowed__(pGpu, pKernelBus, arg0, arg1, arg2); +} + +NV_STATUS kbusUseDirectSysmemMap_GM107(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *arg0, NvBool *arg1); + +NV_STATUS kbusUseDirectSysmemMap_GA100(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *arg0, NvBool *arg1); + +static inline NV_STATUS kbusUseDirectSysmemMap_46f6a7(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *arg0, NvBool *arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kbusUseDirectSysmemMap_DISPATCH(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *arg0, NvBool *arg1) { + return pKernelBus->__kbusUseDirectSysmemMap__(pGpu, pKernelBus, arg0, arg1); +} + +static inline NV_STATUS kbusReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, void *pTunableState) { + return pEngstate->__kbusReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kbusStateLoad_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) { + return pEngstate->__kbusStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kbusStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) { + return pEngstate->__kbusStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kbusStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, NvU32 arg0) { + return pEngstate->__kbusStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kbusStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate) { + return pEngstate->__kbusStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kbusInitMissing_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate) { + pEngstate->__kbusInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kbusStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate) { + return pEngstate->__kbusStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kbusGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, void *pTunableState) { + return pEngstate->__kbusGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kbusCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kbusCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kbusFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, void *pTunableState) { + pEngstate->__kbusFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kbusAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, void **ppTunableState) { + return pEngstate->__kbusAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kbusSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate, void *pTunableState) { + return pEngstate->__kbusSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kbusIsPresent_DISPATCH(POBJGPU pGpu, struct KernelBus *pEngstate) { + return pEngstate->__kbusIsPresent__(pGpu, pEngstate); +} + +static inline NvBool kbusIsBar1Force64KBMappingEnabled(struct KernelBus *pKernelBus) { + return pKernelBus->bBar1Force64KBMapping; +} + +static inline NvBool kbusIsBar1PhysicalModeEnabled(struct KernelBus *pKernelBus) { + return pKernelBus->bBar1PhysicalModeEnabled; +} + +static inline NvBool kbusIsBar2Initialized(struct KernelBus *pKernelBus) { + return pKernelBus->bIsBar2Initialized; +} + +static inline NvBool kbusIsBar2SysmemAccessEnabled(struct KernelBus *pKernelBus) { + return pKernelBus->bBar2SysmemAccessEnabled; +} + +static inline NvBool kbusIsBar2TestSkipped(struct KernelBus *pKernelBus) { + return pKernelBus->bBar2TestSkipped; +} + +static inline NvBool kbusIsPhysicalBar2InitPagetableEnabled(struct KernelBus *pKernelBus) { + return pKernelBus->bUsePhysicalBar2InitPagetable; +} + +static inline NvBool kbusIsFlaSupported(struct KernelBus *pKernelBus) { + return pKernelBus->bFlaSupported; +} + +static inline NvBool kbusIsFlaEnabled(struct KernelBus *pKernelBus) { + return pKernelBus->bFlaEnabled; +} + +static inline NvBool kbusIsFlaDummyPageEnabled(struct KernelBus *pKernelBus) { + return pKernelBus->bFlaDummyPageEnabled; +} + +static inline NvBool kbusIsBug2751296LimitBar2PtSize(struct KernelBus *pKernelBus) { + return pKernelBus->bBug2751296LimitBar2PtSize; +} + +static inline NvBool kbusIsReflectedMappingAccessAllowed(struct KernelBus *pKernelBus) { + return pKernelBus->bAllowReflectedMappingAccess; +} + +static inline NvBool kbusIsPreserveBar1ConsoleEnabled(struct KernelBus *pKernelBus) { + return pKernelBus->bPreserveBar1ConsoleEnabled; +} + +static inline NvBool kbusIsP2pInitialized(struct KernelBus *pKernelBus) { + return pKernelBus->bP2pInitialized; +} + +static inline NvBool kbusIsP2pMailboxClientAllocated(struct KernelBus *pKernelBus) { + return pKernelBus->bP2pMailboxClientAllocated; +} + +static inline NvBool kbusIsFbFlushDisabled(struct KernelBus *pKernelBus) { + return pKernelBus->bFbFlushDisabled; +} + +static inline NvBool kbusIsReadCpuPointerToFlushEnabled(struct KernelBus *pKernelBus) { + return pKernelBus->bReadCpuPointerToFlush; +} + +static inline void kbusSetBar1P2pCapable(struct KernelBus *pKernelBus, NvBool val) { + pKernelBus->bBar1P2pCapable = val; +} + +void kbusDestruct_IMPL(struct KernelBus *pKernelBus); +#define __nvoc_kbusDestruct(pKernelBus) kbusDestruct_IMPL(pKernelBus) +void kbusGetDeviceCaps_IMPL(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU8 *pHostCaps, NvBool bCapsInitialized); +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusGetDeviceCaps(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvU8 *pHostCaps, NvBool bCapsInitialized) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetDeviceCaps(pGpu, pKernelBus, pHostCaps, bCapsInitialized) kbusGetDeviceCaps_IMPL(pGpu, pKernelBus, pHostCaps, bCapsInitialized) +#endif //__nvoc_kern_bus_h_disabled + +void kbusDestroyMailbox_IMPL(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, NvU32 peerIdx); +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusDestroyMailbox(OBJGPU *pGpu0, struct KernelBus *pKernelBus0, OBJGPU *pGpu1, NvU32 peerIdx) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusDestroyMailbox(pGpu0, pKernelBus0, pGpu1, peerIdx) kbusDestroyMailbox_IMPL(pGpu0, pKernelBus0, pGpu1, peerIdx) +#endif //__nvoc_kern_bus_h_disabled + +RmPhysAddr kbusSetupPeerBarAccess_IMPL(OBJGPU *pGpu0, OBJGPU *pGpu1, RmPhysAddr arg0, NvU64 arg1, PMEMORY_DESCRIPTOR *arg2); +#define kbusSetupPeerBarAccess(pGpu0, pGpu1, arg0, arg1, arg2) kbusSetupPeerBarAccess_IMPL(pGpu0, pGpu1, arg0, arg1, arg2) +NV_STATUS kbusSendSysmembar_IMPL(OBJGPU *pGpu, struct KernelBus *pKernelBus); +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSendSysmembar(OBJGPU *pGpu, struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSendSysmembar(pGpu, pKernelBus) kbusSendSysmembar_IMPL(pGpu, pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +NV_STATUS kbusSendBusInfo_IMPL(OBJGPU *pGpu, struct KernelBus *pKernelBus, NV2080_CTRL_BUS_INFO *pBusInfo); +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusSendBusInfo(OBJGPU *pGpu, struct KernelBus *pKernelBus, NV2080_CTRL_BUS_INFO *pBusInfo) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusSendBusInfo(pGpu, pKernelBus, pBusInfo) kbusSendBusInfo_IMPL(pGpu, pKernelBus, pBusInfo) +#endif //__nvoc_kern_bus_h_disabled + +NvU64 kbusGetPciBarSize_IMPL(struct KernelBus *pKernelBus, NvU32 index); +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU64 kbusGetPciBarSize(struct KernelBus *pKernelBus, NvU32 index) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetPciBarSize(pKernelBus, index) kbusGetPciBarSize_IMPL(pKernelBus, index) +#endif //__nvoc_kern_bus_h_disabled + +RmPhysAddr kbusGetPciBarOffset_IMPL(struct KernelBus *pKernelBus, NvU32 index); +#ifdef __nvoc_kern_bus_h_disabled +static inline RmPhysAddr kbusGetPciBarOffset(struct KernelBus *pKernelBus, NvU32 index) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + RmPhysAddr ret; + portMemSet(&ret, 0, sizeof(RmPhysAddr)); + return ret; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetPciBarOffset(pKernelBus, index) kbusGetPciBarOffset_IMPL(pKernelBus, index) +#endif //__nvoc_kern_bus_h_disabled + +void kbusDetermineBar1Force64KBMapping_IMPL(struct KernelBus *pKernelBus); +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusDetermineBar1Force64KBMapping(struct KernelBus *pKernelBus) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusDetermineBar1Force64KBMapping(pKernelBus) kbusDetermineBar1Force64KBMapping_IMPL(pKernelBus) +#endif //__nvoc_kern_bus_h_disabled + +void kbusDetermineBar1ApertureLength_IMPL(struct KernelBus *pKernelBus, NvU32 gfid); +#ifdef __nvoc_kern_bus_h_disabled +static inline void kbusDetermineBar1ApertureLength(struct KernelBus *pKernelBus, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); +} +#else //__nvoc_kern_bus_h_disabled +#define kbusDetermineBar1ApertureLength(pKernelBus, gfid) kbusDetermineBar1ApertureLength_IMPL(pKernelBus, gfid) +#endif //__nvoc_kern_bus_h_disabled + +NV_STATUS kbusMapFbApertureByHandle_IMPL(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvHandle hClient, NvHandle hMemory, NvU64 offset, NvU64 size, NvU64 *pBar1Va); +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusMapFbApertureByHandle(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvHandle hClient, NvHandle hMemory, NvU64 offset, NvU64 size, NvU64 *pBar1Va) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusMapFbApertureByHandle(pGpu, pKernelBus, hClient, hMemory, offset, size, pBar1Va) kbusMapFbApertureByHandle_IMPL(pGpu, pKernelBus, hClient, hMemory, offset, size, pBar1Va) +#endif //__nvoc_kern_bus_h_disabled + +NV_STATUS kbusUnmapFbApertureByHandle_IMPL(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvHandle hClient, NvHandle hMemory, NvU64 bar1Va); +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusUnmapFbApertureByHandle(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvHandle hClient, NvHandle hMemory, NvU64 bar1Va) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusUnmapFbApertureByHandle(pGpu, pKernelBus, hClient, hMemory, bar1Va) kbusUnmapFbApertureByHandle_IMPL(pGpu, pKernelBus, hClient, hMemory, bar1Va) +#endif //__nvoc_kern_bus_h_disabled + +NV_STATUS kbusGetBar1VARangeForClient_IMPL(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvHandle arg0, struct NV_RANGE *arg1); +#ifdef __nvoc_kern_bus_h_disabled +static inline NV_STATUS kbusGetBar1VARangeForClient(OBJGPU *pGpu, struct KernelBus *pKernelBus, NvHandle arg0, struct NV_RANGE *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetBar1VARangeForClient(pGpu, pKernelBus, arg0, arg1) kbusGetBar1VARangeForClient_IMPL(pGpu, pKernelBus, arg0, arg1) +#endif //__nvoc_kern_bus_h_disabled + +NvU32 kbusGetFlushAperture_IMPL(struct KernelBus *pKernelBus, NV_ADDRESS_SPACE addrSpace); +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU32 kbusGetFlushAperture(struct KernelBus *pKernelBus, NV_ADDRESS_SPACE addrSpace) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return 0; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusGetFlushAperture(pKernelBus, addrSpace) kbusGetFlushAperture_IMPL(pKernelBus, addrSpace) +#endif //__nvoc_kern_bus_h_disabled + +NvU8 *kbusCpuOffsetInBar2WindowGet_IMPL(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc); +#ifdef __nvoc_kern_bus_h_disabled +static inline NvU8 *kbusCpuOffsetInBar2WindowGet(OBJGPU *pGpu, struct KernelBus *pKernelBus, MEMORY_DESCRIPTOR *pMemDesc) { + NV_ASSERT_FAILED_PRECOMP("KernelBus was disabled!"); + return NULL; +} +#else //__nvoc_kern_bus_h_disabled +#define kbusCpuOffsetInBar2WindowGet(pGpu, pKernelBus, pMemDesc) kbusCpuOffsetInBar2WindowGet_IMPL(pGpu, pKernelBus, pMemDesc) +#endif //__nvoc_kern_bus_h_disabled + +#undef PRIVATE_FIELD + + +#define KBUS_BAR2_ENABLED(pKernelBus) (!(pKernelBus)->bBar2Tunnelled || (pKernelBus)->bBar2InternalOnly) +#define KBUS_BAR2_TUNNELLED(pKernelBus) ((pKernelBus)->bBar2Tunnelled) +#define kbusMapRmAperture_HAL(pGpu, pMemDesc) memdescMapInternal(pGpu, pMemDesc, 0) + +#define kbusUnmapRmApertureWithFlags_HAL(pGpu, pMemDesc, pCpuPtr, flags) (memdescUnmapInternal(pGpu, pMemDesc, flags), ((void) (*(pCpuPtr) = NULL))) + +#define kbusUnmapRmAperture_HAL(pGpu, pMemDesc, pCpuPtr, bFlush) \ + kbusUnmapRmApertureWithFlags_HAL(pGpu, pMemDesc, pCpuPtr, \ + (bFlush) ? TRANSFER_FLAGS_NONE : TRANSFER_FLAGS_DEFER_FLUSH) + +#endif // KERN_BUS_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERN_BUS_NVOC_H_ diff --git a/src/nvidia/generated/g_kern_disp_nvoc.c b/src/nvidia/generated/g_kern_disp_nvoc.c new file mode 100644 index 000000000..f4deaa1dc --- /dev/null +++ b/src/nvidia/generated/g_kern_disp_nvoc.c @@ -0,0 +1,506 @@ +#define NVOC_KERN_DISP_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kern_disp_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x55952e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +void __nvoc_init_KernelDisplay(KernelDisplay*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelDisplay(KernelDisplay*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelDisplay(KernelDisplay*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelDisplay(KernelDisplay*, RmHalspecOwner* ); +void __nvoc_dtor_KernelDisplay(KernelDisplay*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelDisplay; + +static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_KernelDisplay = { + /*pClassDef=*/ &__nvoc_class_def_KernelDisplay, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelDisplay, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelDisplay, __nvoc_base_IntrService), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelDisplay = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_KernelDisplay_KernelDisplay, + &__nvoc_rtti_KernelDisplay_IntrService, + &__nvoc_rtti_KernelDisplay_OBJENGSTATE, + &__nvoc_rtti_KernelDisplay_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelDisplay), + /*classId=*/ classId(KernelDisplay), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelDisplay", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelDisplay, + /*pCastInfo=*/ &__nvoc_castinfo_KernelDisplay, + /*pExportInfo=*/ &__nvoc_export_info_KernelDisplay +}; + +static NV_STATUS __nvoc_thunk_KernelDisplay_engstateConstructEngine(OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, ENGDESCRIPTOR engDesc) { + return kdispConstructEngine(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), engDesc); +} + +static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStatePreInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) { + return kdispStatePreInitLocked(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) { + return kdispStateInitLocked(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_KernelDisplay_engstateStateDestroy(OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) { + kdispStateDestroy(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateLoad(OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags) { + return kdispStateLoad(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), flags); +} + +static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateUnload(OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags) { + return kdispStateUnload(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), flags); +} + +static void __nvoc_thunk_KernelDisplay_intrservRegisterIntrService(OBJGPU *pGpu, struct IntrService *pKernelDisplay, IntrServiceRecord pRecords[155]) { + kdispRegisterIntrService(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_IntrService.offset), pRecords); +} + +static NvU32 __nvoc_thunk_KernelDisplay_intrservServiceInterrupt(OBJGPU *pGpu, struct IntrService *pKernelDisplay, IntrServiceServiceInterruptArguments *pParams) { + return kdispServiceInterrupt(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispReconcileTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_IntrService_kdispServiceNotificationInterrupt(OBJGPU *pGpu, struct KernelDisplay *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return intrservServiceNotificationInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelDisplay_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreLoad(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePostUnload(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreUnload(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStateInitUnlocked(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kdispInitMissing(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreInitUnlocked(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispGetTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispCompareTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kdispFreeTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_IntrService_kdispClearInterrupt(OBJGPU *pGpu, struct KernelDisplay *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelDisplay_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePostLoad(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispAllocTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispSetTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kdispIsPresent(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelDisplay = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_IntrService(IntrService*); +void __nvoc_dtor_KernelDisplay(KernelDisplay *pThis) { + __nvoc_kdispDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_KDISP_IS_MISSING + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->setProperty(pThis, PDB_PROP_KDISP_IS_MISSING, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KDISP_IMP_ENABLE + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KDISP_IMP_ENABLE, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KDISP_IMP_ENABLE, ((NvBool)(0 != 0))); + } + + pThis->pStaticInfo = ((void *)0); + + pThis->bWarPurgeSatellitesOnCoreFree = ((NvBool)(0 != 0)); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_IntrService(IntrService* ); +NV_STATUS __nvoc_ctor_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelDisplay_fail_OBJENGSTATE; + status = __nvoc_ctor_IntrService(&pThis->__nvoc_base_IntrService); + if (status != NV_OK) goto __nvoc_ctor_KernelDisplay_fail_IntrService; + __nvoc_init_dataField_KernelDisplay(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelDisplay_exit; // Success + +__nvoc_ctor_KernelDisplay_fail_IntrService: + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); +__nvoc_ctor_KernelDisplay_fail_OBJENGSTATE: +__nvoc_ctor_KernelDisplay_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelDisplay_1(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal; + const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(dispIpHal); + PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx); + + pThis->__kdispConstructEngine__ = &kdispConstructEngine_IMPL; + + pThis->__kdispStatePreInitLocked__ = &kdispStatePreInitLocked_IMPL; + + pThis->__kdispStateInitLocked__ = &kdispStateInitLocked_IMPL; + + pThis->__kdispStateDestroy__ = &kdispStateDestroy_IMPL; + + pThis->__kdispStateLoad__ = &kdispStateLoad_IMPL; + + pThis->__kdispStateUnload__ = &kdispStateUnload_IMPL; + + pThis->__kdispRegisterIntrService__ = &kdispRegisterIntrService_IMPL; + + // Hal function -- kdispServiceInterrupt + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__kdispServiceInterrupt__ = &kdispServiceInterrupt_d3ef2b; + } + else if (0) + { + } + + // Hal function -- kdispSelectClass + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__kdispSelectClass__ = &kdispSelectClass_46f6a7; + } + else if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__kdispSelectClass__ = &kdispSelectClass_v03_00_KERNEL; + } + } + else if (0) + { +#if 0 + if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__kdispSelectClass__ = &kdispSelectClass_46f6a7; + } + else if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__kdispSelectClass__ = &kdispSelectClass_v03_00_KERNEL; + } +#endif + } + + // Hal function -- kdispGetChannelNum + if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__kdispGetChannelNum__ = &kdispGetChannelNum_46f6a7; + } + else if (0) + { + } + else if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__kdispGetChannelNum__ = &kdispGetChannelNum_v03_00; + } + + // Hal function -- kdispGetDisplayCapsBaseAndSize + if (((( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ && (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ )) + { + pThis->__kdispGetDisplayCapsBaseAndSize__ = &kdispGetDisplayCapsBaseAndSize_b3696a; + } + else if (((( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ && (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* DispIpHal: DISPv0400 */ ) || + ((( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ && (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000800UL) )) /* DispIpHal: DISPv0401 */ )) + { + pThis->__kdispGetDisplayCapsBaseAndSize__ = &kdispGetDisplayCapsBaseAndSize_v03_00; + } + + // Hal function -- kdispGetDisplaySfUserBaseAndSize + if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__kdispGetDisplaySfUserBaseAndSize__ = &kdispGetDisplaySfUserBaseAndSize_b3696a; + } + else if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__kdispGetDisplaySfUserBaseAndSize__ = &kdispGetDisplaySfUserBaseAndSize_v03_00; + } + + // Hal function -- kdispGetDisplayChannelUserBaseAndSize + if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00008000UL) )) /* DispIpHal: DISPv0000 */ + { + pThis->__kdispGetDisplayChannelUserBaseAndSize__ = &kdispGetDisplayChannelUserBaseAndSize_46f6a7; + } + else if (0) + { + } + else if (0) + { + } + else if (( ((dispIpHal_HalVarIdx >> 5) == 0UL) && ((1UL << (dispIpHal_HalVarIdx & 0x1f)) & 0x00000c00UL) )) /* DispIpHal: DISPv0400 | DISPv0401 */ + { + pThis->__kdispGetDisplayChannelUserBaseAndSize__ = &kdispGetDisplayChannelUserBaseAndSize_v03_00; + } + + // Hal function -- kdispGetVgaWorkspaceBase + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kdispGetVgaWorkspaceBase__ = &kdispGetVgaWorkspaceBase_v04_00; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kdispGetVgaWorkspaceBase__ = &kdispGetVgaWorkspaceBase_491d52; + } + // default + else + { + pThis->__kdispGetVgaWorkspaceBase__ = &kdispGetVgaWorkspaceBase_ceaee8; + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelDisplay_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_KernelDisplay_engstateStatePreInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelDisplay_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelDisplay_engstateStateDestroy; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelDisplay_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelDisplay_engstateStateUnload; + + pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_KernelDisplay_intrservRegisterIntrService; + + pThis->__nvoc_base_IntrService.__intrservServiceInterrupt__ = &__nvoc_thunk_KernelDisplay_intrservServiceInterrupt; + + pThis->__kdispReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispReconcileTunableState; + + pThis->__kdispServiceNotificationInterrupt__ = &__nvoc_thunk_IntrService_kdispServiceNotificationInterrupt; + + pThis->__kdispStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreLoad; + + pThis->__kdispStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePostUnload; + + pThis->__kdispStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreUnload; + + pThis->__kdispStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kdispStateInitUnlocked; + + pThis->__kdispInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kdispInitMissing; + + pThis->__kdispStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreInitUnlocked; + + pThis->__kdispGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispGetTunableState; + + pThis->__kdispCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispCompareTunableState; + + pThis->__kdispFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispFreeTunableState; + + pThis->__kdispClearInterrupt__ = &__nvoc_thunk_IntrService_kdispClearInterrupt; + + pThis->__kdispStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePostLoad; + + pThis->__kdispAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispAllocTunableState; + + pThis->__kdispSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispSetTunableState; + + pThis->__kdispIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kdispIsPresent; +} + +void __nvoc_init_funcTable_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelDisplay_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_IntrService(IntrService*); +void __nvoc_init_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelDisplay = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_IntrService(&pThis->__nvoc_base_IntrService); + __nvoc_init_funcTable_KernelDisplay(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelDisplay(KernelDisplay **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelDisplay *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelDisplay)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelDisplay)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelDisplay); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelDisplay(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelDisplay(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelDisplay_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelDisplay_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelDisplay(KernelDisplay **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelDisplay(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kern_disp_nvoc.h b/src/nvidia/generated/g_kern_disp_nvoc.h new file mode 100644 index 000000000..fa6d57289 --- /dev/null +++ b/src/nvidia/generated/g_kern_disp_nvoc.h @@ -0,0 +1,671 @@ +#ifndef _G_KERN_DISP_NVOC_H_ +#define _G_KERN_DISP_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kern_disp_nvoc.h" + +#ifndef KERN_DISP_H +#define KERN_DISP_H + +/****************************************************************************** +* +* Kernel Display module header +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#include "gpu/eng_state.h" +#include "gpu/gpu_halspec.h" +#include "gpu/disp/kern_disp_type.h" +#include "gpu/disp/kern_disp_max.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/disp/vblank_callback/vblank.h" + +#include "kernel/gpu/intr/intr_service.h" + +#include "ctrl/ctrl2080/ctrl2080internal.h" + +typedef NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS KernelDisplayStaticInfo; + +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + +struct RgLineCallback; + +#ifndef __NVOC_CLASS_RgLineCallback_TYPEDEF__ +#define __NVOC_CLASS_RgLineCallback_TYPEDEF__ +typedef struct RgLineCallback RgLineCallback; +#endif /* __NVOC_CLASS_RgLineCallback_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RgLineCallback +#define __nvoc_class_id_RgLineCallback 0xa3ff1c +#endif /* __nvoc_class_id_RgLineCallback */ + + + +#define KDISP_GET_HEAD(pKernelDisplay, headID) (RMCFG_MODULE_KERNEL_HEAD ? kdispGetHead(pKernelDisplay, headID) : NULL) + +/*! + * KernelDisp is a logical abstraction of the GPU Display Engine. The + * Public API of the Display Engine is exposed through this object, and any + * interfaces which do not manage the underlying Display hardware can be + * managed by this object. + */ +#ifdef NVOC_KERN_DISP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelDisplay { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct IntrService __nvoc_base_IntrService; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct IntrService *__nvoc_pbase_IntrService; + struct KernelDisplay *__nvoc_pbase_KernelDisplay; + NV_STATUS (*__kdispConstructEngine__)(OBJGPU *, struct KernelDisplay *, ENGDESCRIPTOR); + NV_STATUS (*__kdispStatePreInitLocked__)(OBJGPU *, struct KernelDisplay *); + NV_STATUS (*__kdispStateInitLocked__)(OBJGPU *, struct KernelDisplay *); + void (*__kdispStateDestroy__)(OBJGPU *, struct KernelDisplay *); + NV_STATUS (*__kdispStateLoad__)(OBJGPU *, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispStateUnload__)(OBJGPU *, struct KernelDisplay *, NvU32); + void (*__kdispRegisterIntrService__)(OBJGPU *, struct KernelDisplay *, IntrServiceRecord *); + NvU32 (*__kdispServiceInterrupt__)(OBJGPU *, struct KernelDisplay *, IntrServiceServiceInterruptArguments *); + NV_STATUS (*__kdispSelectClass__)(OBJGPU *, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispGetChannelNum__)(struct KernelDisplay *, DISPCHNCLASS, NvU32, NvU32 *); + void (*__kdispGetDisplayCapsBaseAndSize__)(OBJGPU *, struct KernelDisplay *, NvU32 *, NvU32 *); + void (*__kdispGetDisplaySfUserBaseAndSize__)(OBJGPU *, struct KernelDisplay *, NvU32 *, NvU32 *); + NV_STATUS (*__kdispGetDisplayChannelUserBaseAndSize__)(OBJGPU *, struct KernelDisplay *, DISPCHNCLASS, NvU32, NvU32 *, NvU32 *); + NvBool (*__kdispGetVgaWorkspaceBase__)(OBJGPU *, struct KernelDisplay *, NvU64 *); + NV_STATUS (*__kdispReconcileTunableState__)(POBJGPU, struct KernelDisplay *, void *); + NV_STATUS (*__kdispServiceNotificationInterrupt__)(OBJGPU *, struct KernelDisplay *, IntrServiceServiceNotificationInterruptArguments *); + NV_STATUS (*__kdispStatePreLoad__)(POBJGPU, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispStatePostUnload__)(POBJGPU, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispStatePreUnload__)(POBJGPU, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispStateInitUnlocked__)(POBJGPU, struct KernelDisplay *); + void (*__kdispInitMissing__)(POBJGPU, struct KernelDisplay *); + NV_STATUS (*__kdispStatePreInitUnlocked__)(POBJGPU, struct KernelDisplay *); + NV_STATUS (*__kdispGetTunableState__)(POBJGPU, struct KernelDisplay *, void *); + NV_STATUS (*__kdispCompareTunableState__)(POBJGPU, struct KernelDisplay *, void *, void *); + void (*__kdispFreeTunableState__)(POBJGPU, struct KernelDisplay *, void *); + NvBool (*__kdispClearInterrupt__)(OBJGPU *, struct KernelDisplay *, IntrServiceClearInterruptArguments *); + NV_STATUS (*__kdispStatePostLoad__)(POBJGPU, struct KernelDisplay *, NvU32); + NV_STATUS (*__kdispAllocTunableState__)(POBJGPU, struct KernelDisplay *, void **); + NV_STATUS (*__kdispSetTunableState__)(POBJGPU, struct KernelDisplay *, void *); + NvBool (*__kdispIsPresent__)(POBJGPU, struct KernelDisplay *); + NvBool PDB_PROP_KDISP_IMP_ENABLE; + struct DisplayInstanceMemory *pInst; + struct KernelHead *pKernelHead[4]; + const KernelDisplayStaticInfo *pStaticInfo; + NvBool bWarPurgeSatellitesOnCoreFree; + struct RgLineCallback *rgLineCallbackPerHead[4][2]; + NvU32 isrVblankHeads; +}; + +#ifndef __NVOC_CLASS_KernelDisplay_TYPEDEF__ +#define __NVOC_CLASS_KernelDisplay_TYPEDEF__ +typedef struct KernelDisplay KernelDisplay; +#endif /* __NVOC_CLASS_KernelDisplay_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelDisplay +#define __nvoc_class_id_KernelDisplay 0x55952e +#endif /* __nvoc_class_id_KernelDisplay */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay; + +#define __staticCast_KernelDisplay(pThis) \ + ((pThis)->__nvoc_pbase_KernelDisplay) + +#ifdef __nvoc_kern_disp_h_disabled +#define __dynamicCast_KernelDisplay(pThis) ((KernelDisplay*)NULL) +#else //__nvoc_kern_disp_h_disabled +#define __dynamicCast_KernelDisplay(pThis) \ + ((KernelDisplay*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelDisplay))) +#endif //__nvoc_kern_disp_h_disabled + +#define PDB_PROP_KDISP_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KDISP_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_KDISP_IMP_ENABLE_BASE_CAST +#define PDB_PROP_KDISP_IMP_ENABLE_BASE_NAME PDB_PROP_KDISP_IMP_ENABLE + +NV_STATUS __nvoc_objCreateDynamic_KernelDisplay(KernelDisplay**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelDisplay(KernelDisplay**, Dynamic*, NvU32); +#define __objCreate_KernelDisplay(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelDisplay((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kdispConstructEngine(pGpu, pKernelDisplay, engDesc) kdispConstructEngine_DISPATCH(pGpu, pKernelDisplay, engDesc) +#define kdispStatePreInitLocked(pGpu, pKernelDisplay) kdispStatePreInitLocked_DISPATCH(pGpu, pKernelDisplay) +#define kdispStateInitLocked(pGpu, pKernelDisplay) kdispStateInitLocked_DISPATCH(pGpu, pKernelDisplay) +#define kdispStateDestroy(pGpu, pKernelDisplay) kdispStateDestroy_DISPATCH(pGpu, pKernelDisplay) +#define kdispStateLoad(pGpu, pKernelDisplay, flags) kdispStateLoad_DISPATCH(pGpu, pKernelDisplay, flags) +#define kdispStateUnload(pGpu, pKernelDisplay, flags) kdispStateUnload_DISPATCH(pGpu, pKernelDisplay, flags) +#define kdispRegisterIntrService(pGpu, pKernelDisplay, pRecords) kdispRegisterIntrService_DISPATCH(pGpu, pKernelDisplay, pRecords) +#define kdispServiceInterrupt(pGpu, pKernelDisplay, pParams) kdispServiceInterrupt_DISPATCH(pGpu, pKernelDisplay, pParams) +#define kdispServiceInterrupt_HAL(pGpu, pKernelDisplay, pParams) kdispServiceInterrupt_DISPATCH(pGpu, pKernelDisplay, pParams) +#define kdispSelectClass(pGpu, pKernelDisplay, swClass) kdispSelectClass_DISPATCH(pGpu, pKernelDisplay, swClass) +#define kdispSelectClass_HAL(pGpu, pKernelDisplay, swClass) kdispSelectClass_DISPATCH(pGpu, pKernelDisplay, swClass) +#define kdispGetChannelNum(pKernelDisplay, channelClass, channelInstance, pChannelNum) kdispGetChannelNum_DISPATCH(pKernelDisplay, channelClass, channelInstance, pChannelNum) +#define kdispGetChannelNum_HAL(pKernelDisplay, channelClass, channelInstance, pChannelNum) kdispGetChannelNum_DISPATCH(pKernelDisplay, channelClass, channelInstance, pChannelNum) +#define kdispGetDisplayCapsBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplayCapsBaseAndSize_DISPATCH(pGpu, pKernelDisplay, pOffset, pSize) +#define kdispGetDisplayCapsBaseAndSize_HAL(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplayCapsBaseAndSize_DISPATCH(pGpu, pKernelDisplay, pOffset, pSize) +#define kdispGetDisplaySfUserBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplaySfUserBaseAndSize_DISPATCH(pGpu, pKernelDisplay, pOffset, pSize) +#define kdispGetDisplaySfUserBaseAndSize_HAL(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplaySfUserBaseAndSize_DISPATCH(pGpu, pKernelDisplay, pOffset, pSize) +#define kdispGetDisplayChannelUserBaseAndSize(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) kdispGetDisplayChannelUserBaseAndSize_DISPATCH(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) +#define kdispGetDisplayChannelUserBaseAndSize_HAL(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) kdispGetDisplayChannelUserBaseAndSize_DISPATCH(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) +#define kdispGetVgaWorkspaceBase(pGpu, pKernelDisplay, pOffset) kdispGetVgaWorkspaceBase_DISPATCH(pGpu, pKernelDisplay, pOffset) +#define kdispGetVgaWorkspaceBase_HAL(pGpu, pKernelDisplay, pOffset) kdispGetVgaWorkspaceBase_DISPATCH(pGpu, pKernelDisplay, pOffset) +#define kdispReconcileTunableState(pGpu, pEngstate, pTunableState) kdispReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kdispServiceNotificationInterrupt(pGpu, pIntrService, pParams) kdispServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define kdispStatePreLoad(pGpu, pEngstate, arg0) kdispStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kdispStatePostUnload(pGpu, pEngstate, arg0) kdispStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kdispStatePreUnload(pGpu, pEngstate, arg0) kdispStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kdispStateInitUnlocked(pGpu, pEngstate) kdispStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kdispInitMissing(pGpu, pEngstate) kdispInitMissing_DISPATCH(pGpu, pEngstate) +#define kdispStatePreInitUnlocked(pGpu, pEngstate) kdispStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kdispGetTunableState(pGpu, pEngstate, pTunableState) kdispGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kdispCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kdispCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kdispFreeTunableState(pGpu, pEngstate, pTunableState) kdispFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kdispClearInterrupt(pGpu, pIntrService, pParams) kdispClearInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define kdispStatePostLoad(pGpu, pEngstate, arg0) kdispStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kdispAllocTunableState(pGpu, pEngstate, ppTunableState) kdispAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kdispSetTunableState(pGpu, pEngstate, pTunableState) kdispSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kdispIsPresent(pGpu, pEngstate) kdispIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kdispConstructInstMem_IMPL(struct KernelDisplay *pKernelDisplay); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispConstructInstMem(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispConstructInstMem(pKernelDisplay) kdispConstructInstMem_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispConstructInstMem_HAL(pKernelDisplay) kdispConstructInstMem(pKernelDisplay) + +void kdispDestructInstMem_IMPL(struct KernelDisplay *pKernelDisplay); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispDestructInstMem(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispDestructInstMem(pKernelDisplay) kdispDestructInstMem_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispDestructInstMem_HAL(pKernelDisplay) kdispDestructInstMem(pKernelDisplay) + +static inline NvS32 kdispGetBaseOffset_4a4dee(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + return 0; +} + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvS32 kdispGetBaseOffset(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return 0; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetBaseOffset(pGpu, pKernelDisplay) kdispGetBaseOffset_4a4dee(pGpu, pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispGetBaseOffset_HAL(pGpu, pKernelDisplay) kdispGetBaseOffset(pGpu, pKernelDisplay) + +static inline NV_STATUS kdispImportImpData_56cd7a(struct KernelDisplay *pKernelDisplay) { + return NV_OK; +} + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispImportImpData(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispImportImpData(pKernelDisplay) kdispImportImpData_56cd7a(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispImportImpData_HAL(pKernelDisplay) kdispImportImpData(pKernelDisplay) + +static inline NV_STATUS kdispArbAndAllocDisplayBandwidth_46f6a7(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, enum DISPLAY_ICC_BW_CLIENT iccBwClient, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispArbAndAllocDisplayBandwidth(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, enum DISPLAY_ICC_BW_CLIENT iccBwClient, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispArbAndAllocDisplayBandwidth(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispArbAndAllocDisplayBandwidth_46f6a7(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispArbAndAllocDisplayBandwidth_HAL(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispArbAndAllocDisplayBandwidth(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) + +NV_STATUS kdispSetPushBufferParamsToPhysical_IMPL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvHandle hObjectBuffer, struct ContextDma *pBufferContextDma, NvU32 hClass, NvU32 channelInstance, DISPCHNCLASS internalDispChnClass); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispSetPushBufferParamsToPhysical(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvHandle hObjectBuffer, struct ContextDma *pBufferContextDma, NvU32 hClass, NvU32 channelInstance, DISPCHNCLASS internalDispChnClass) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSetPushBufferParamsToPhysical(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) kdispSetPushBufferParamsToPhysical_IMPL(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispSetPushBufferParamsToPhysical_HAL(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) kdispSetPushBufferParamsToPhysical(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) + +static inline NV_STATUS kdispAcquireDispChannelHw_56cd7a(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvU32 channelInstance, NvHandle hObjectBuffer, NvU32 initialGetPutOffset, NvBool allowGrabWithinSameClient, NvBool connectPbAtGrab) { + return NV_OK; +} + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispAcquireDispChannelHw(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvU32 channelInstance, NvHandle hObjectBuffer, NvU32 initialGetPutOffset, NvBool allowGrabWithinSameClient, NvBool connectPbAtGrab) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispAcquireDispChannelHw(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) kdispAcquireDispChannelHw_56cd7a(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispAcquireDispChannelHw_HAL(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) kdispAcquireDispChannelHw(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) + +static inline NV_STATUS kdispReleaseDispChannelHw_56cd7a(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + return NV_OK; +} + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispReleaseDispChannelHw(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispReleaseDispChannelHw(pKernelDisplay, pDispChannel) kdispReleaseDispChannelHw_56cd7a(pKernelDisplay, pDispChannel) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispReleaseDispChannelHw_HAL(pKernelDisplay, pDispChannel) kdispReleaseDispChannelHw(pKernelDisplay, pDispChannel) + +NV_STATUS kdispMapDispChannel_IMPL(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispMapDispChannel(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispMapDispChannel(pKernelDisplay, pDispChannel) kdispMapDispChannel_IMPL(pKernelDisplay, pDispChannel) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispMapDispChannel_HAL(pKernelDisplay, pDispChannel) kdispMapDispChannel(pKernelDisplay, pDispChannel) + +void kdispUnbindUnmapDispChannel_IMPL(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispUnbindUnmapDispChannel(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispUnbindUnmapDispChannel(pKernelDisplay, pDispChannel) kdispUnbindUnmapDispChannel_IMPL(pKernelDisplay, pDispChannel) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispUnbindUnmapDispChannel_HAL(pKernelDisplay, pDispChannel) kdispUnbindUnmapDispChannel(pKernelDisplay, pDispChannel) + +NV_STATUS kdispRegisterRgLineCallback_IMPL(struct KernelDisplay *pKernelDisplay, struct RgLineCallback *pRgLineCallback, NvU32 head, NvU32 rgIntrLine, NvBool bEnable); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispRegisterRgLineCallback(struct KernelDisplay *pKernelDisplay, struct RgLineCallback *pRgLineCallback, NvU32 head, NvU32 rgIntrLine, NvBool bEnable) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) kdispRegisterRgLineCallback_IMPL(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispRegisterRgLineCallback_HAL(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) + +void kdispInvokeRgLineCallback_KERNEL(struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 rgIntrLine, NvBool bIsIrqlIsr); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispInvokeRgLineCallback(struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 rgIntrLine, NvBool bIsIrqlIsr) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) kdispInvokeRgLineCallback_KERNEL(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispInvokeRgLineCallback_HAL(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) + +void kdispServiceVblank_KERNEL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2); + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispServiceVblank(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispServiceVblank(pGpu, pKernelDisplay, arg0, arg1, arg2) kdispServiceVblank_KERNEL(pGpu, pKernelDisplay, arg0, arg1, arg2) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispServiceVblank_HAL(pGpu, pKernelDisplay, arg0, arg1, arg2) kdispServiceVblank(pGpu, pKernelDisplay, arg0, arg1, arg2) + +NvU32 kdispReadPendingVblank_KERNEL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_kern_disp_h_disabled +static inline NvU32 kdispReadPendingVblank(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return 0; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispReadPendingVblank(pGpu, pKernelDisplay, arg0) kdispReadPendingVblank_KERNEL(pGpu, pKernelDisplay, arg0) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, arg0) kdispReadPendingVblank(pGpu, pKernelDisplay, arg0) + +static inline void kdispInvokeDisplayModesetCallback_b3696a(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) { + return; +} + +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispInvokeDisplayModesetCallback(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispInvokeDisplayModesetCallback(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispInvokeDisplayModesetCallback_b3696a(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) +#endif //__nvoc_kern_disp_h_disabled + +#define kdispInvokeDisplayModesetCallback_HAL(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispInvokeDisplayModesetCallback(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) + +NV_STATUS kdispConstructEngine_IMPL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, ENGDESCRIPTOR engDesc); + +static inline NV_STATUS kdispConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, ENGDESCRIPTOR engDesc) { + return pKernelDisplay->__kdispConstructEngine__(pGpu, pKernelDisplay, engDesc); +} + +NV_STATUS kdispStatePreInitLocked_IMPL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +static inline NV_STATUS kdispStatePreInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + return pKernelDisplay->__kdispStatePreInitLocked__(pGpu, pKernelDisplay); +} + +NV_STATUS kdispStateInitLocked_IMPL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +static inline NV_STATUS kdispStateInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + return pKernelDisplay->__kdispStateInitLocked__(pGpu, pKernelDisplay); +} + +void kdispStateDestroy_IMPL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay); + +static inline void kdispStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) { + pKernelDisplay->__kdispStateDestroy__(pGpu, pKernelDisplay); +} + +NV_STATUS kdispStateLoad_IMPL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags); + +static inline NV_STATUS kdispStateLoad_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags) { + return pKernelDisplay->__kdispStateLoad__(pGpu, pKernelDisplay, flags); +} + +NV_STATUS kdispStateUnload_IMPL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags); + +static inline NV_STATUS kdispStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags) { + return pKernelDisplay->__kdispStateUnload__(pGpu, pKernelDisplay, flags); +} + +void kdispRegisterIntrService_IMPL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, IntrServiceRecord pRecords[155]); + +static inline void kdispRegisterIntrService_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, IntrServiceRecord pRecords[155]) { + pKernelDisplay->__kdispRegisterIntrService__(pGpu, pKernelDisplay, pRecords); +} + +static inline NvU32 kdispServiceInterrupt_d3ef2b(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, IntrServiceServiceInterruptArguments *pParams) { + kdispServiceVblank(pGpu, pKernelDisplay, 0, ((2) | (16)), ((void *)0)); + return NV_OK; +} + +static inline NvU32 kdispServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, IntrServiceServiceInterruptArguments *pParams) { + return pKernelDisplay->__kdispServiceInterrupt__(pGpu, pKernelDisplay, pParams); +} + +static inline NV_STATUS kdispSelectClass_ef1e3d(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass) { + NV_ASSERT_FAILED_PRECOMP("Cannot call kdispSelectClass on ucode"); + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kdispSelectClass_46f6a7(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass) { + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS kdispSelectClass_v03_00_KERNEL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass); + +NV_STATUS kdispSelectClass_v03_00_KERNEL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass); + +static inline NV_STATUS kdispSelectClass_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass) { + return pKernelDisplay->__kdispSelectClass__(pGpu, pKernelDisplay, swClass); +} + +static inline NV_STATUS kdispGetChannelNum_46f6a7(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum) { + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS kdispGetChannelNum_v03_00(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum); + +static inline NV_STATUS kdispGetChannelNum_DISPATCH(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum) { + return pKernelDisplay->__kdispGetChannelNum__(pKernelDisplay, channelClass, channelInstance, pChannelNum); +} + +static inline void kdispGetDisplayCapsBaseAndSize_b3696a(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) { + return; +} + +void kdispGetDisplayCapsBaseAndSize_v03_00(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize); + +static inline void kdispGetDisplayCapsBaseAndSize_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) { + pKernelDisplay->__kdispGetDisplayCapsBaseAndSize__(pGpu, pKernelDisplay, pOffset, pSize); +} + +static inline void kdispGetDisplaySfUserBaseAndSize_b3696a(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) { + return; +} + +void kdispGetDisplaySfUserBaseAndSize_v03_00(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize); + +static inline void kdispGetDisplaySfUserBaseAndSize_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) { + pKernelDisplay->__kdispGetDisplaySfUserBaseAndSize__(pGpu, pKernelDisplay, pOffset, pSize); +} + +static inline NV_STATUS kdispGetDisplayChannelUserBaseAndSize_46f6a7(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize) { + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS kdispGetDisplayChannelUserBaseAndSize_v03_00(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS kdispGetDisplayChannelUserBaseAndSize_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize) { + return pKernelDisplay->__kdispGetDisplayChannelUserBaseAndSize__(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize); +} + +NvBool kdispGetVgaWorkspaceBase_v04_00(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset); + +static inline NvBool kdispGetVgaWorkspaceBase_491d52(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kdispGetVgaWorkspaceBase_ceaee8(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) { + NV_ASSERT_PRECOMP(0); + return ((NvBool)(0 != 0)); +} + +static inline NvBool kdispGetVgaWorkspaceBase_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) { + return pKernelDisplay->__kdispGetVgaWorkspaceBase__(pGpu, pKernelDisplay, pOffset); +} + +static inline NV_STATUS kdispReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return pEngstate->__kdispReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kdispServiceNotificationInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return pIntrService->__kdispServiceNotificationInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS kdispStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return pEngstate->__kdispStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kdispStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return pEngstate->__kdispStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kdispStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return pEngstate->__kdispStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kdispStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return pEngstate->__kdispStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kdispInitMissing_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + pEngstate->__kdispInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kdispStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return pEngstate->__kdispStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kdispGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return pEngstate->__kdispGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kdispCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kdispCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kdispFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + pEngstate->__kdispFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kdispClearInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelDisplay *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return pIntrService->__kdispClearInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS kdispStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) { + return pEngstate->__kdispStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kdispAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void **ppTunableState) { + return pEngstate->__kdispAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kdispSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) { + return pEngstate->__kdispSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kdispIsPresent_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) { + return pEngstate->__kdispIsPresent__(pGpu, pEngstate); +} + +void kdispDestruct_IMPL(struct KernelDisplay *pKernelDisplay); +#define __nvoc_kdispDestruct(pKernelDisplay) kdispDestruct_IMPL(pKernelDisplay) +NV_STATUS kdispConstructKhead_IMPL(struct KernelDisplay *pKernelDisplay); +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispConstructKhead(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispConstructKhead(pKernelDisplay) kdispConstructKhead_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +void kdispDestructKhead_IMPL(struct KernelDisplay *pKernelDisplay); +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispDestructKhead(struct KernelDisplay *pKernelDisplay) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispDestructKhead(pKernelDisplay) kdispDestructKhead_IMPL(pKernelDisplay) +#endif //__nvoc_kern_disp_h_disabled + +NV_STATUS kdispGetIntChnClsForHwCls_IMPL(struct KernelDisplay *pKernelDisplay, NvU32 hwClass, DISPCHNCLASS *pDispChnClass); +#ifdef __nvoc_kern_disp_h_disabled +static inline NV_STATUS kdispGetIntChnClsForHwCls(struct KernelDisplay *pKernelDisplay, NvU32 hwClass, DISPCHNCLASS *pDispChnClass) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_disp_h_disabled +#define kdispGetIntChnClsForHwCls(pKernelDisplay, hwClass, pDispChnClass) kdispGetIntChnClsForHwCls_IMPL(pKernelDisplay, hwClass, pDispChnClass) +#endif //__nvoc_kern_disp_h_disabled + +void kdispNotifyEvent_IMPL(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16); +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispNotifyEvent(OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispNotifyEvent(pGpu, pKernelDisplay, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) kdispNotifyEvent_IMPL(pGpu, pKernelDisplay, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) +#endif //__nvoc_kern_disp_h_disabled + +void kdispSetWarPurgeSatellitesOnCoreFree_IMPL(struct KernelDisplay *pKernelDisplay, NvBool value); +#ifdef __nvoc_kern_disp_h_disabled +static inline void kdispSetWarPurgeSatellitesOnCoreFree(struct KernelDisplay *pKernelDisplay, NvBool value) { + NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!"); +} +#else //__nvoc_kern_disp_h_disabled +#define kdispSetWarPurgeSatellitesOnCoreFree(pKernelDisplay, value) kdispSetWarPurgeSatellitesOnCoreFree_IMPL(pKernelDisplay, value) +#endif //__nvoc_kern_disp_h_disabled + +#undef PRIVATE_FIELD + + +static NV_INLINE struct KernelHead* +kdispGetHead +( + struct KernelDisplay *pKernelDisplay, + NvU32 head +) +{ + if (head >= OBJ_MAX_HEADS) + { + return NULL; + } + + return pKernelDisplay->pKernelHead[head]; +} +#endif // KERN_DISP_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERN_DISP_NVOC_H_ diff --git a/src/nvidia/generated/g_kern_gmmu_nvoc.c b/src/nvidia/generated/g_kern_gmmu_nvoc.c new file mode 100644 index 000000000..5a6751ade --- /dev/null +++ b/src/nvidia/generated/g_kern_gmmu_nvoc.c @@ -0,0 +1,505 @@ +#define NVOC_KERN_GMMU_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kern_gmmu_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x29362f = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGmmu; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +void __nvoc_init_KernelGmmu(KernelGmmu*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelGmmu(KernelGmmu*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelGmmu(KernelGmmu*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelGmmu(KernelGmmu*, RmHalspecOwner* ); +void __nvoc_dtor_KernelGmmu(KernelGmmu*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGmmu; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGmmu_KernelGmmu = { + /*pClassDef=*/ &__nvoc_class_def_KernelGmmu, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelGmmu, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGmmu_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGmmu, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGmmu_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGmmu, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGmmu_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGmmu, __nvoc_base_IntrService), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelGmmu = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_KernelGmmu_KernelGmmu, + &__nvoc_rtti_KernelGmmu_IntrService, + &__nvoc_rtti_KernelGmmu_OBJENGSTATE, + &__nvoc_rtti_KernelGmmu_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGmmu = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelGmmu), + /*classId=*/ classId(KernelGmmu), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelGmmu", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelGmmu, + /*pCastInfo=*/ &__nvoc_castinfo_KernelGmmu, + /*pExportInfo=*/ &__nvoc_export_info_KernelGmmu +}; + +static NV_STATUS __nvoc_thunk_KernelGmmu_engstateConstructEngine(OBJGPU *pGpu, struct OBJENGSTATE *pKernelGmmu, ENGDESCRIPTOR arg0) { + return kgmmuConstructEngine(pGpu, (struct KernelGmmu *)(((unsigned char *)pKernelGmmu) - __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_KernelGmmu_engstateStateInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pKernelGmmu) { + return kgmmuStateInitLocked(pGpu, (struct KernelGmmu *)(((unsigned char *)pKernelGmmu) - __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelGmmu_engstateStatePostLoad(OBJGPU *pGpu, struct OBJENGSTATE *pKernelGmmu, NvU32 flags) { + return kgmmuStatePostLoad(pGpu, (struct KernelGmmu *)(((unsigned char *)pKernelGmmu) - __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), flags); +} + +static void __nvoc_thunk_KernelGmmu_engstateStateDestroy(OBJGPU *pGpu, struct OBJENGSTATE *pKernelGmmu) { + kgmmuStateDestroy(pGpu, (struct KernelGmmu *)(((unsigned char *)pKernelGmmu) - __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_KernelGmmu_intrservRegisterIntrService(OBJGPU *pGpu, struct IntrService *pKernelGmmu, IntrServiceRecord arg0[155]) { + kgmmuRegisterIntrService(pGpu, (struct KernelGmmu *)(((unsigned char *)pKernelGmmu) - __nvoc_rtti_KernelGmmu_IntrService.offset), arg0); +} + +static NvU32 __nvoc_thunk_KernelGmmu_intrservServiceInterrupt(OBJGPU *pGpu, struct IntrService *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams) { + return kgmmuServiceInterrupt(pGpu, (struct KernelGmmu *)(((unsigned char *)pKernelGmmu) - __nvoc_rtti_KernelGmmu_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuReconcileTunableState(POBJGPU pGpu, struct KernelGmmu *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuStateLoad(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuStateUnload(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_IntrService_kgmmuServiceNotificationInterrupt(struct OBJGPU *pGpu, struct KernelGmmu *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return intrservServiceNotificationInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelGmmu_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuStatePreLoad(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuStatePostUnload(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuStatePreUnload(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuStateInitUnlocked(POBJGPU pGpu, struct KernelGmmu *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kgmmuInitMissing(POBJGPU pGpu, struct KernelGmmu *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuStatePreInitLocked(POBJGPU pGpu, struct KernelGmmu *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuStatePreInitUnlocked(POBJGPU pGpu, struct KernelGmmu *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuGetTunableState(POBJGPU pGpu, struct KernelGmmu *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuCompareTunableState(POBJGPU pGpu, struct KernelGmmu *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kgmmuFreeTunableState(POBJGPU pGpu, struct KernelGmmu *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_IntrService_kgmmuClearInterrupt(struct OBJGPU *pGpu, struct KernelGmmu *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelGmmu_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuAllocTunableState(POBJGPU pGpu, struct KernelGmmu *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgmmuSetTunableState(POBJGPU pGpu, struct KernelGmmu *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kgmmuIsPresent(POBJGPU pGpu, struct KernelGmmu *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGmmu_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGmmu = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_IntrService(IntrService*); +void __nvoc_dtor_KernelGmmu(KernelGmmu *pThis) { + __nvoc_kgmmuDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelGmmu(KernelGmmu *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED, ((NvBool)(0 != 0))); + } + + // Hal field -- defaultBigPageSize + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->defaultBigPageSize = (64 * 1024); + } + else if (0) + { + } + + // Hal field -- bHugePageSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bHugePageSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bHugePageSupported = ((NvBool)(0 != 0)); + } + + // Hal field -- bPageSize512mbSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bPageSize512mbSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bPageSize512mbSupported = ((NvBool)(0 != 0)); + } + + // Hal field -- bBug2720120WarEnabled + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->bBug2720120WarEnabled = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bBug2720120WarEnabled = ((NvBool)(0 != 0)); + } + + // Hal field -- bVaspaceInteropSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bVaspaceInteropSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bVaspaceInteropSupported = ((NvBool)(0 != 0)); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_IntrService(IntrService* ); +NV_STATUS __nvoc_ctor_KernelGmmu(KernelGmmu *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelGmmu_fail_OBJENGSTATE; + status = __nvoc_ctor_IntrService(&pThis->__nvoc_base_IntrService); + if (status != NV_OK) goto __nvoc_ctor_KernelGmmu_fail_IntrService; + __nvoc_init_dataField_KernelGmmu(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelGmmu_exit; // Success + +__nvoc_ctor_KernelGmmu_fail_IntrService: + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); +__nvoc_ctor_KernelGmmu_fail_OBJENGSTATE: +__nvoc_ctor_KernelGmmu_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelGmmu_1(KernelGmmu *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__kgmmuConstructEngine__ = &kgmmuConstructEngine_IMPL; + + pThis->__kgmmuStateInitLocked__ = &kgmmuStateInitLocked_IMPL; + + // Hal function -- kgmmuStatePostLoad + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__kgmmuStatePostLoad__ = &kgmmuStatePostLoad_IMPL; + } + else if (0) + { + } + + pThis->__kgmmuStateDestroy__ = &kgmmuStateDestroy_IMPL; + + pThis->__kgmmuRegisterIntrService__ = &kgmmuRegisterIntrService_IMPL; + + pThis->__kgmmuServiceInterrupt__ = &kgmmuServiceInterrupt_IMPL; + + // Hal function -- kgmmuInstBlkVaLimitGet + if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kgmmuInstBlkVaLimitGet__ = &kgmmuInstBlkVaLimitGet_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgmmuInstBlkVaLimitGet__ = &kgmmuInstBlkVaLimitGet_f03539; + } + + // Hal function -- kgmmuSetTlbInvalidateMembarWarParameters + if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kgmmuSetTlbInvalidateMembarWarParameters__ = &kgmmuSetTlbInvalidateMembarWarParameters_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgmmuSetTlbInvalidateMembarWarParameters__ = &kgmmuSetTlbInvalidateMembarWarParameters_4a4dee; + } + + // Hal function -- kgmmuSetTlbInvalidationScope + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgmmuSetTlbInvalidationScope__ = &kgmmuSetTlbInvalidationScope_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kgmmuSetTlbInvalidationScope__ = &kgmmuSetTlbInvalidationScope_46f6a7; + } + + // Hal function -- kgmmuFmtInitLevels + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kgmmuFmtInitLevels__ = &kgmmuFmtInitLevels_GP10X; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgmmuFmtInitLevels__ = &kgmmuFmtInitLevels_GA10X; + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- kgmmuSetupWarForBug2720120 + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kgmmuSetupWarForBug2720120__ = &kgmmuSetupWarForBug2720120_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgmmuSetupWarForBug2720120__ = &kgmmuSetupWarForBug2720120_56cd7a; + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelGmmu_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelGmmu_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostLoad__ = &__nvoc_thunk_KernelGmmu_engstateStatePostLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelGmmu_engstateStateDestroy; + + pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_KernelGmmu_intrservRegisterIntrService; + + pThis->__nvoc_base_IntrService.__intrservServiceInterrupt__ = &__nvoc_thunk_KernelGmmu_intrservServiceInterrupt; + + pThis->__kgmmuReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgmmuReconcileTunableState; + + pThis->__kgmmuStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kgmmuStateLoad; + + pThis->__kgmmuStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kgmmuStateUnload; + + pThis->__kgmmuServiceNotificationInterrupt__ = &__nvoc_thunk_IntrService_kgmmuServiceNotificationInterrupt; + + pThis->__kgmmuStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kgmmuStatePreLoad; + + pThis->__kgmmuStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kgmmuStatePostUnload; + + pThis->__kgmmuStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kgmmuStatePreUnload; + + pThis->__kgmmuStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgmmuStateInitUnlocked; + + pThis->__kgmmuInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kgmmuInitMissing; + + pThis->__kgmmuStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kgmmuStatePreInitLocked; + + pThis->__kgmmuStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgmmuStatePreInitUnlocked; + + pThis->__kgmmuGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgmmuGetTunableState; + + pThis->__kgmmuCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgmmuCompareTunableState; + + pThis->__kgmmuFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgmmuFreeTunableState; + + pThis->__kgmmuClearInterrupt__ = &__nvoc_thunk_IntrService_kgmmuClearInterrupt; + + pThis->__kgmmuAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgmmuAllocTunableState; + + pThis->__kgmmuSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgmmuSetTunableState; + + pThis->__kgmmuIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kgmmuIsPresent; +} + +void __nvoc_init_funcTable_KernelGmmu(KernelGmmu *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelGmmu_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_IntrService(IntrService*); +void __nvoc_init_KernelGmmu(KernelGmmu *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelGmmu = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_IntrService(&pThis->__nvoc_base_IntrService); + __nvoc_init_funcTable_KernelGmmu(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelGmmu *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelGmmu)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelGmmu)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelGmmu); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelGmmu(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelGmmu(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelGmmu_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelGmmu_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelGmmu(KernelGmmu **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelGmmu(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kern_gmmu_nvoc.h b/src/nvidia/generated/g_kern_gmmu_nvoc.h new file mode 100644 index 000000000..54b2137e8 --- /dev/null +++ b/src/nvidia/generated/g_kern_gmmu_nvoc.h @@ -0,0 +1,1530 @@ +#ifndef _G_KERN_GMMU_NVOC_H_ +#define _G_KERN_GMMU_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel GMMU module header +* Defines and structures used on CPU RM for the GMMU object. +* +******************************************************************************/ + +#include "g_kern_gmmu_nvoc.h" + +#ifndef KERN_GMMU_H +#define KERN_GMMU_H + +#include "core/core.h" +#include "core/strict.h" +#include "nvtypes.h" +#include "nvoc/prelude.h" +#include "nvoc/object.h" +#include "gpu/mmu/mmu_trace.h" +#include "mmu/gmmu_fmt.h" +#include "class/cl90f1.h" // FERMI_VASPACE_A + +#include "gpu/gpu_timeout.h" +#include "containers/queue.h" +#include "gpu/eng_state.h" +#include "gpu/intr/intr_service.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" // RM_PAGE_SIZE_64K +#include "mmu/mmu_walk.h" + +#include "gpu/gpu_halspec.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS + +#include "class/clc369.h" // MMU_FAULT_BUFFER + +typedef struct COMPR_INFO COMPR_INFO; + +typedef struct GVAS_GPU_STATE GVAS_GPU_STATE; + +/*! + * Family of GMMU formats sharing the same version and PDE/PTE defines + * but with differing big page sizes. + * The term "family" is used here in the mathematical (set theory) sense. + * + * nv4kPte: GV100+ supports NV4K encoding, @ref gmmuStateInitHal_GV100 for more + * + */ +typedef struct +{ + GMMU_FMT_PDE_MULTI pdeMulti; + GMMU_FMT_PDE pde; + GMMU_FMT_PTE pte; + GMMU_ENTRY_VALUE sparsePte; + GMMU_ENTRY_VALUE sparsePde; + GMMU_ENTRY_VALUE sparsePdeMulti; + GMMU_ENTRY_VALUE nv4kPte; + GMMU_ENTRY_VALUE bug2720120WarPde0; + GMMU_ENTRY_VALUE bug2720120WarPde1; + GMMU_FMT *pFmts[GMMU_FMT_MAX_BIG_PAGE_SIZES]; +} GMMU_FMT_FAMILY; + +/*! + * This structure contains information needed for issuing a TLB invalidate. + */ +typedef struct +{ + RmPhysAddr pdbAddress; + NvU32 pdbAperture; + NvU32 gfid; + NvU32 regVal; + RMTIMEOUT timeout; +} TLB_INVALIDATE_PARAMS; + +typedef enum +{ + NON_REPLAYABLE_FAULT_BUFFER = 0, + REPLAYABLE_FAULT_BUFFER, + //this should always be the last entry + NUM_FAULT_BUFFERS +} FAULT_BUFFER_TYPE; + +/*! + * This structure holds information about a page + * of memory backing the fault buffer. + */ +typedef struct +{ + /*! Virtual address of this page */ + NvP64 pAddress; + + /*! Cookie returned by memdescMap() */ + NvP64 pPriv; +} GMMU_FAULT_BUFFER_PAGE; + +/*! + * This structure holds the information about MMU HW Fault buffer which is mapped on BAR2 + * and is utilized by MMU for reporting MMU faults to SW + */ +struct HW_FAULT_BUFFER +{ + NvU64 bar2FaultBufferAddr; + MEMORY_DESCRIPTOR *pFaultBufferMemDesc; + /*! + * cookie that is stored for the CPU mapping + */ + NvP64 hCpuFaultBuffer; + NvP64 kernelVaddr; + + GMMU_FAULT_BUFFER_PAGE *pBufferPages; + + NvU32 cachedGetIndex; + + /*! + * cached fault buffer size + */ + NvU32 faultBufferSize; +}; + +/*! + * This structure holds information about the client shadow fault buffer. + */ +typedef struct +{ + /*! + * Pointer to circular queue structure shared by the RM with a + * privileged client, used as the shadow fault buffer for holding + * non-replayable faults. + * This structure is shared between CPU-RM and GSP-RM in GSP + * enabled driver. + */ + NvP64 pQueue; + + /*! Memory descriptors associated with the queue. */ + MEMORY_DESCRIPTOR *pQueueMemDesc; + + NvP64 pQueueAddress; + + /*! + * Execution context for the queue. Holds environment specific + * data that enable queue usage + */ + QueueContext queueContext; + + /*! Cookie returned by memdescMap() */ + NvP64 pQueuePriv; + + /*! Memory descriptor associated with the buffer. */ + MEMORY_DESCRIPTOR *pBufferMemDesc; + + NvP64 pBufferAddress; + + /*! Cookie returned by memdescMap() */ + NvP64 pBufferPriv; + + /*! GSP only split mapping of the buffer. */ + GMMU_FAULT_BUFFER_PAGE *pBufferPages; + + NvU32 numBufferPages; +} GMMU_CLIENT_SHADOW_FAULT_BUFFER; + +/*! + * Top level structure containing all dataStructures used in MMU fault handling. + */ +struct GMMU_FAULT_BUFFER +{ + struct HW_FAULT_BUFFER hwFaultBuffers[NUM_FAULT_BUFFERS]; + + /*! + * Unique client and object handle stored + * In VOLTA this is for MMU_FAULT_BUFFER, in PASCAL for MAXWELL_FAULT_BUFFER_A + */ + NvHandle hFaultBufferClient; + NvHandle hFaultBufferObject; + + /*! + * Pointer to Circular Queue structure used as shadow fault buffer for + * holding fatal fault packets serviced by RM + */ + NvP64 pRmShadowFaultBuffer; + + /*! + * Client shadow fault buffer data and pointer protected by gpu locks. + */ + GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer; + GMMU_CLIENT_SHADOW_FAULT_BUFFER clientShadowFaultBuffer; + + /*! + * SpinLock to protect shadow buffer pointers + */ + PORT_SPINLOCK *pShadowFaultBufLock; + + /*! + * Flag stating fatalfault interrupt pending + */ + NvS32 fatalFaultIntrPending; +}; + +typedef struct GMMU_FAULT_PACKET +{ + // 32 bytes MMU fault packet + NvU8 faultPacket[NVC369_BUF_SIZE]; +} GMMU_FAULT_PACKET; + +// Initialize Circular Queue for MMU Shadow fault buffer +MAKE_QUEUE_CIRCULAR(GMMU_SHADOW_FAULT_BUF, GMMU_FAULT_PACKET); + + +/*! + * Structure that holds different parameters passed by an engine to kgmmuInstBlkInit + * for initializing their instance blocks. + */ +typedef struct +{ + NvBool bIsClientAdmin; + NvBool bIsFaultReplayable; + /* + * Defer the bus flush during the instance block init. + * If this field is set, the kgmmuInstBlkInit() routine won't do flush after the CPU writes. + * The caller of the kgmmuInstBlkInit() function has to explicit flush. + * This is useful if the caller does back to back updates to instance block. + * For e.g. Subcontext array init during channel setup. + */ + NvBool bDeferFlush; + NvU64 uvmKernelPrivRegion; + + // Instance block is being updated for a zombie subcontext. + NvBool bIsZombieSubctx; + NvU8 *pInstBlk; // VA of instance block. +} INST_BLK_INIT_PARAMS, *PINST_BLK_INIT_PARAMS; + +#define VMMU_MAX_GFID 64 + +#ifdef NVOC_KERN_GMMU_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelGmmu { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct IntrService __nvoc_base_IntrService; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct IntrService *__nvoc_pbase_IntrService; + struct KernelGmmu *__nvoc_pbase_KernelGmmu; + NV_STATUS (*__kgmmuConstructEngine__)(OBJGPU *, struct KernelGmmu *, ENGDESCRIPTOR); + NV_STATUS (*__kgmmuStateInitLocked__)(OBJGPU *, struct KernelGmmu *); + NV_STATUS (*__kgmmuStatePostLoad__)(OBJGPU *, struct KernelGmmu *, NvU32); + void (*__kgmmuStateDestroy__)(OBJGPU *, struct KernelGmmu *); + void (*__kgmmuRegisterIntrService__)(OBJGPU *, struct KernelGmmu *, IntrServiceRecord *); + NvU32 (*__kgmmuServiceInterrupt__)(OBJGPU *, struct KernelGmmu *, IntrServiceServiceInterruptArguments *); + NV_STATUS (*__kgmmuInstBlkVaLimitGet__)(struct KernelGmmu *, struct OBJVASPACE *, NvU32, INST_BLK_INIT_PARAMS *, NvU32 *, NvU64 *); + NvU32 (*__kgmmuSetTlbInvalidateMembarWarParameters__)(OBJGPU *, struct KernelGmmu *, TLB_INVALIDATE_PARAMS *); + NV_STATUS (*__kgmmuSetTlbInvalidationScope__)(OBJGPU *, struct KernelGmmu *, NvU32, TLB_INVALIDATE_PARAMS *); + void (*__kgmmuFmtInitLevels__)(struct KernelGmmu *, MMU_FMT_LEVEL *, const NvU32, const NvU32, const NvU32); + NV_STATUS (*__kgmmuSetupWarForBug2720120__)(struct KernelGmmu *, GMMU_FMT_FAMILY *); + NV_STATUS (*__kgmmuReconcileTunableState__)(POBJGPU, struct KernelGmmu *, void *); + NV_STATUS (*__kgmmuStateLoad__)(POBJGPU, struct KernelGmmu *, NvU32); + NV_STATUS (*__kgmmuStateUnload__)(POBJGPU, struct KernelGmmu *, NvU32); + NV_STATUS (*__kgmmuServiceNotificationInterrupt__)(struct OBJGPU *, struct KernelGmmu *, IntrServiceServiceNotificationInterruptArguments *); + NV_STATUS (*__kgmmuStatePreLoad__)(POBJGPU, struct KernelGmmu *, NvU32); + NV_STATUS (*__kgmmuStatePostUnload__)(POBJGPU, struct KernelGmmu *, NvU32); + NV_STATUS (*__kgmmuStatePreUnload__)(POBJGPU, struct KernelGmmu *, NvU32); + NV_STATUS (*__kgmmuStateInitUnlocked__)(POBJGPU, struct KernelGmmu *); + void (*__kgmmuInitMissing__)(POBJGPU, struct KernelGmmu *); + NV_STATUS (*__kgmmuStatePreInitLocked__)(POBJGPU, struct KernelGmmu *); + NV_STATUS (*__kgmmuStatePreInitUnlocked__)(POBJGPU, struct KernelGmmu *); + NV_STATUS (*__kgmmuGetTunableState__)(POBJGPU, struct KernelGmmu *, void *); + NV_STATUS (*__kgmmuCompareTunableState__)(POBJGPU, struct KernelGmmu *, void *, void *); + void (*__kgmmuFreeTunableState__)(POBJGPU, struct KernelGmmu *, void *); + NvBool (*__kgmmuClearInterrupt__)(struct OBJGPU *, struct KernelGmmu *, IntrServiceClearInterruptArguments *); + NV_STATUS (*__kgmmuAllocTunableState__)(POBJGPU, struct KernelGmmu *, void **); + NV_STATUS (*__kgmmuSetTunableState__)(POBJGPU, struct KernelGmmu *, void *); + NvBool (*__kgmmuIsPresent__)(POBJGPU, struct KernelGmmu *); + NvBool PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED; + NvBool PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED; + const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo; + GMMU_FMT_FAMILY *pFmtFamilies[2]; + NvU32 defaultBigPageSize; + NvU32 PDEAperture; + NvU32 PDEAttr; + NvU32 PDEBAR1Aperture; + NvU32 PDEBAR1Attr; + NvU32 PTEAperture; + NvU32 PTEAttr; + NvU32 PTEBAR1Aperture; + NvU32 PTEBAR1Attr; + NvU32 overrideBigPageSize; + NvBool bEnablePerVaspaceBigPage; + NvBool bIgnoreHubTlbInvalidate; + NvU64 maxVASize; + struct NV_FIELD_ENUM_ENTRY pdeApertures[5]; + struct NV_FIELD_ENUM_ENTRY pteApertures[5]; + MEMORY_DESCRIPTOR *pWarSmallPageTable; + MEMORY_DESCRIPTOR *pWarPageDirectory0; + struct GMMU_FAULT_BUFFER mmuFaultBuffer[64]; + NvU32 uvmSharedIntrRmOwnsMask; + NvU64 sysmemBaseAddress; + NvBool bHugePageSupported; + NvBool bPageSize512mbSupported; + NvBool bBug2720120WarEnabled; + NvBool bVaspaceInteropSupported; +}; + +#ifndef __NVOC_CLASS_KernelGmmu_TYPEDEF__ +#define __NVOC_CLASS_KernelGmmu_TYPEDEF__ +typedef struct KernelGmmu KernelGmmu; +#endif /* __NVOC_CLASS_KernelGmmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGmmu +#define __nvoc_class_id_KernelGmmu 0x29362f +#endif /* __nvoc_class_id_KernelGmmu */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGmmu; + +#define __staticCast_KernelGmmu(pThis) \ + ((pThis)->__nvoc_pbase_KernelGmmu) + +#ifdef __nvoc_kern_gmmu_h_disabled +#define __dynamicCast_KernelGmmu(pThis) ((KernelGmmu*)NULL) +#else //__nvoc_kern_gmmu_h_disabled +#define __dynamicCast_KernelGmmu(pThis) \ + ((KernelGmmu*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGmmu))) +#endif //__nvoc_kern_gmmu_h_disabled + +#define PDB_PROP_KGMMU_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KGMMU_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED_BASE_CAST +#define PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED_BASE_NAME PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED +#define PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED_BASE_CAST +#define PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED_BASE_NAME PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED + +NV_STATUS __nvoc_objCreateDynamic_KernelGmmu(KernelGmmu**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelGmmu(KernelGmmu**, Dynamic*, NvU32); +#define __objCreate_KernelGmmu(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelGmmu((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kgmmuConstructEngine(pGpu, pKernelGmmu, arg0) kgmmuConstructEngine_DISPATCH(pGpu, pKernelGmmu, arg0) +#define kgmmuStateInitLocked(pGpu, pKernelGmmu) kgmmuStateInitLocked_DISPATCH(pGpu, pKernelGmmu) +#define kgmmuStatePostLoad(pGpu, pKernelGmmu, flags) kgmmuStatePostLoad_DISPATCH(pGpu, pKernelGmmu, flags) +#define kgmmuStatePostLoad_HAL(pGpu, pKernelGmmu, flags) kgmmuStatePostLoad_DISPATCH(pGpu, pKernelGmmu, flags) +#define kgmmuStateDestroy(pGpu, pKernelGmmu) kgmmuStateDestroy_DISPATCH(pGpu, pKernelGmmu) +#define kgmmuRegisterIntrService(pGpu, pKernelGmmu, arg0) kgmmuRegisterIntrService_DISPATCH(pGpu, pKernelGmmu, arg0) +#define kgmmuServiceInterrupt(pGpu, pKernelGmmu, pParams) kgmmuServiceInterrupt_DISPATCH(pGpu, pKernelGmmu, pParams) +#define kgmmuInstBlkVaLimitGet(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) +#define kgmmuInstBlkVaLimitGet_HAL(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) kgmmuInstBlkVaLimitGet_DISPATCH(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData) +#define kgmmuSetTlbInvalidateMembarWarParameters(pGpu, pKernelGmmu, pParams) kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(pGpu, pKernelGmmu, pParams) +#define kgmmuSetTlbInvalidateMembarWarParameters_HAL(pGpu, pKernelGmmu, pParams) kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(pGpu, pKernelGmmu, pParams) +#define kgmmuSetTlbInvalidationScope(pGpu, pKernelGmmu, flags, pParams) kgmmuSetTlbInvalidationScope_DISPATCH(pGpu, pKernelGmmu, flags, pParams) +#define kgmmuSetTlbInvalidationScope_HAL(pGpu, pKernelGmmu, flags, pParams) kgmmuSetTlbInvalidationScope_DISPATCH(pGpu, pKernelGmmu, flags, pParams) +#define kgmmuFmtInitLevels(pKernelGmmu, pLevels, numLevels, version, bigPageShift) kgmmuFmtInitLevels_DISPATCH(pKernelGmmu, pLevels, numLevels, version, bigPageShift) +#define kgmmuFmtInitLevels_HAL(pKernelGmmu, pLevels, numLevels, version, bigPageShift) kgmmuFmtInitLevels_DISPATCH(pKernelGmmu, pLevels, numLevels, version, bigPageShift) +#define kgmmuSetupWarForBug2720120(pKernelGmmu, pFam) kgmmuSetupWarForBug2720120_DISPATCH(pKernelGmmu, pFam) +#define kgmmuSetupWarForBug2720120_HAL(pKernelGmmu, pFam) kgmmuSetupWarForBug2720120_DISPATCH(pKernelGmmu, pFam) +#define kgmmuReconcileTunableState(pGpu, pEngstate, pTunableState) kgmmuReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgmmuStateLoad(pGpu, pEngstate, arg0) kgmmuStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kgmmuStateUnload(pGpu, pEngstate, arg0) kgmmuStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kgmmuServiceNotificationInterrupt(pGpu, pIntrService, pParams) kgmmuServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define kgmmuStatePreLoad(pGpu, pEngstate, arg0) kgmmuStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kgmmuStatePostUnload(pGpu, pEngstate, arg0) kgmmuStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kgmmuStatePreUnload(pGpu, pEngstate, arg0) kgmmuStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kgmmuStateInitUnlocked(pGpu, pEngstate) kgmmuStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kgmmuInitMissing(pGpu, pEngstate) kgmmuInitMissing_DISPATCH(pGpu, pEngstate) +#define kgmmuStatePreInitLocked(pGpu, pEngstate) kgmmuStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kgmmuStatePreInitUnlocked(pGpu, pEngstate) kgmmuStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kgmmuGetTunableState(pGpu, pEngstate, pTunableState) kgmmuGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgmmuCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kgmmuCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kgmmuFreeTunableState(pGpu, pEngstate, pTunableState) kgmmuFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgmmuClearInterrupt(pGpu, pIntrService, pParams) kgmmuClearInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define kgmmuAllocTunableState(pGpu, pEngstate, ppTunableState) kgmmuAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kgmmuSetTunableState(pGpu, pEngstate, pTunableState) kgmmuSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgmmuIsPresent(pGpu, pEngstate) kgmmuIsPresent_DISPATCH(pGpu, pEngstate) +NvU32 kgmmuGetMaxBigPageSize_GM107(struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU32 kgmmuGetMaxBigPageSize(struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetMaxBigPageSize(pKernelGmmu) kgmmuGetMaxBigPageSize_GM107(pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuGetMaxBigPageSize_HAL(pKernelGmmu) kgmmuGetMaxBigPageSize(pKernelGmmu) + +static inline NvU32 kgmmuGetVaspaceClass_f515df(struct KernelGmmu *pKernelGmmu) { + return (37105); +} + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU32 kgmmuGetVaspaceClass(struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetVaspaceClass(pKernelGmmu) kgmmuGetVaspaceClass_f515df(pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuGetVaspaceClass_HAL(pKernelGmmu) kgmmuGetVaspaceClass(pKernelGmmu) + +NV_STATUS kgmmuInstBlkAtsGet_GV100(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxid, NvU32 *pOffset, NvU32 *pData); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuInstBlkAtsGet(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxid, NvU32 *pOffset, NvU32 *pData) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet_GV100(pKernelGmmu, pVAS, subctxid, pOffset, pData) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuInstBlkAtsGet_HAL(pKernelGmmu, pVAS, subctxid, pOffset, pData) kgmmuInstBlkAtsGet(pKernelGmmu, pVAS, subctxid, pOffset, pData) + +NV_STATUS kgmmuInstBlkPageDirBaseGet_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuInstBlkPageDirBaseGet(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuInstBlkPageDirBaseGet(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) kgmmuInstBlkPageDirBaseGet_GV100(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuInstBlkPageDirBaseGet_HAL(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) kgmmuInstBlkPageDirBaseGet(pGpu, pKernelGmmu, pVAS, pParams, subctxid, pOffsetLo, pDataLo, pOffsetHi, pDataHi) + +NvU32 kgmmuGetPDBAllocSize_GP100(struct KernelGmmu *pKernelGmmu, const MMU_FMT_LEVEL *arg0, NvU64 arg1); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU32 kgmmuGetPDBAllocSize(struct KernelGmmu *pKernelGmmu, const MMU_FMT_LEVEL *arg0, NvU64 arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetPDBAllocSize(pKernelGmmu, arg0, arg1) kgmmuGetPDBAllocSize_GP100(pKernelGmmu, arg0, arg1) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuGetPDBAllocSize_HAL(pKernelGmmu, arg0, arg1) kgmmuGetPDBAllocSize(pKernelGmmu, arg0, arg1) + +NvU32 kgmmuGetBigPageSize_GM107(struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU32 kgmmuGetBigPageSize(struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetBigPageSize(pKernelGmmu) kgmmuGetBigPageSize_GM107(pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuGetBigPageSize_HAL(pKernelGmmu) kgmmuGetBigPageSize(pKernelGmmu) + +NV_STATUS kgmmuInitStaticInfo_KERNEL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuInitStaticInfo(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuInitStaticInfo(pGpu, pKernelGmmu, pStaticInfo) kgmmuInitStaticInfo_KERNEL(pGpu, pKernelGmmu, pStaticInfo) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuInitStaticInfo_HAL(pGpu, pKernelGmmu, pStaticInfo) kgmmuInitStaticInfo(pGpu, pKernelGmmu, pStaticInfo) + +void kgmmuFmtInitCaps_GM20X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT *pFmt); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuFmtInitCaps(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT *pFmt) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtInitCaps(pKernelGmmu, pFmt) kgmmuFmtInitCaps_GM20X(pKernelGmmu, pFmt) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuFmtInitCaps_HAL(pKernelGmmu, pFmt) kgmmuFmtInitCaps(pKernelGmmu, pFmt) + +void kgmmuFmtInitPteApertures_GM10X(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuFmtInitPteApertures(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtInitPteApertures(pKernelGmmu, pEntries) kgmmuFmtInitPteApertures_GM10X(pKernelGmmu, pEntries) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuFmtInitPteApertures_HAL(pKernelGmmu, pEntries) kgmmuFmtInitPteApertures(pKernelGmmu, pEntries) + +void kgmmuFmtInitPdeApertures_GM10X(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuFmtInitPdeApertures(struct KernelGmmu *pKernelGmmu, struct NV_FIELD_ENUM_ENTRY *pEntries) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtInitPdeApertures(pKernelGmmu, pEntries) kgmmuFmtInitPdeApertures_GM10X(pKernelGmmu, pEntries) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuFmtInitPdeApertures_HAL(pKernelGmmu, pEntries) kgmmuFmtInitPdeApertures(pKernelGmmu, pEntries) + +void kgmmuInvalidateTlb_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pRootPageDir, NvU32 vaspaceFlags, VAS_PTE_UPDATE_TYPE update_type, NvU32 gfid, NvU32 invalidation_scope); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuInvalidateTlb(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pRootPageDir, NvU32 vaspaceFlags, VAS_PTE_UPDATE_TYPE update_type, NvU32 gfid, NvU32 invalidation_scope) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuInvalidateTlb(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) kgmmuInvalidateTlb_GM107(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuInvalidateTlb_HAL(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) kgmmuInvalidateTlb(pGpu, pKernelGmmu, pRootPageDir, vaspaceFlags, update_type, gfid, invalidation_scope) + +NV_STATUS kgmmuCheckPendingInvalidates_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut, NvU32 gfid); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuCheckPendingInvalidates(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, RMTIMEOUT *pTimeOut, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut, gfid) kgmmuCheckPendingInvalidates_TU102(pGpu, pKernelGmmu, pTimeOut, gfid) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuCheckPendingInvalidates_HAL(pGpu, pKernelGmmu, pTimeOut, gfid) kgmmuCheckPendingInvalidates(pGpu, pKernelGmmu, pTimeOut, gfid) + +NV_STATUS kgmmuCommitTlbInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuCommitTlbInvalidate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuCommitTlbInvalidate(pGpu, pKernelGmmu, pParams) kgmmuCommitTlbInvalidate_TU102(pGpu, pKernelGmmu, pParams) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuCommitTlbInvalidate_HAL(pGpu, pKernelGmmu, pParams) kgmmuCommitTlbInvalidate(pGpu, pKernelGmmu, pParams) + +void kgmmuSetPdbToInvalidate_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuSetPdbToInvalidate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuSetPdbToInvalidate(pGpu, pKernelGmmu, pParams) kgmmuSetPdbToInvalidate_TU102(pGpu, pKernelGmmu, pParams) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuSetPdbToInvalidate_HAL(pGpu, pKernelGmmu, pParams) kgmmuSetPdbToInvalidate(pGpu, pKernelGmmu, pParams) + +void kgmmuFmtInitPteComptagLine_TU10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuFmtInitPteComptagLine(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtInitPteComptagLine(pKernelGmmu, pPte, version) kgmmuFmtInitPteComptagLine_TU10X(pKernelGmmu, pPte, version) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuFmtInitPteComptagLine_HAL(pKernelGmmu, pPte, version) kgmmuFmtInitPteComptagLine(pKernelGmmu, pPte, version) + +void kgmmuFmtInitPeerPteFld_TU10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuFmtInitPeerPteFld(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtInitPeerPteFld(pKernelGmmu, pPte, version) kgmmuFmtInitPeerPteFld_TU10X(pKernelGmmu, pPte, version) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuFmtInitPeerPteFld_HAL(pKernelGmmu, pPte, version) kgmmuFmtInitPeerPteFld(pKernelGmmu, pPte, version) + +void kgmmuFmtInitPte_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuFmtInitPte(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PTE *pPte, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPteApertures, const NvBool bUnifiedAperture) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtInitPte(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) kgmmuFmtInitPte_GP10X(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuFmtInitPte_HAL(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) kgmmuFmtInitPte(pKernelGmmu, pPte, version, pPteApertures, bUnifiedAperture) + +void kgmmuFmtInitPde_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuFmtInitPde(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE *pPde, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtInitPde(pKernelGmmu, pPde, version, pPdeApertures) kgmmuFmtInitPde_GP10X(pKernelGmmu, pPde, version, pPdeApertures) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuFmtInitPde_HAL(pKernelGmmu, pPde, version, pPdeApertures) kgmmuFmtInitPde(pKernelGmmu, pPde, version, pPdeApertures) + +NvBool kgmmuFmtIsVersionSupported_GP10X(struct KernelGmmu *pKernelGmmu, NvU32 version); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvBool kgmmuFmtIsVersionSupported(struct KernelGmmu *pKernelGmmu, NvU32 version) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtIsVersionSupported(pKernelGmmu, version) kgmmuFmtIsVersionSupported_GP10X(pKernelGmmu, version) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuFmtIsVersionSupported_HAL(pKernelGmmu, version) kgmmuFmtIsVersionSupported(pKernelGmmu, version) + +void kgmmuFmtInitPdeMulti_GP10X(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuFmtInitPdeMulti(struct KernelGmmu *pKernelGmmu, struct GMMU_FMT_PDE_MULTI *pPdeMulti, const NvU32 version, const struct NV_FIELD_ENUM_ENTRY *pPdeApertures) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtInitPdeMulti(pKernelGmmu, pPdeMulti, version, pPdeApertures) kgmmuFmtInitPdeMulti_GP10X(pKernelGmmu, pPdeMulti, version, pPdeApertures) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuFmtInitPdeMulti_HAL(pKernelGmmu, pPdeMulti, version, pPdeApertures) kgmmuFmtInitPdeMulti(pKernelGmmu, pPdeMulti, version, pPdeApertures) + +void kgmmuDetermineMaxVASize_GM107(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuDetermineMaxVASize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuDetermineMaxVASize(pGpu, pKernelGmmu) kgmmuDetermineMaxVASize_GM107(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuDetermineMaxVASize_HAL(pGpu, pKernelGmmu) kgmmuDetermineMaxVASize(pGpu, pKernelGmmu) + +NV_STATUS kgmmuFmtFamiliesInit_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuFmtFamiliesInit(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtFamiliesInit(pGpu, pKernelGmmu) kgmmuFmtFamiliesInit_TU102(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuFmtFamiliesInit_HAL(pGpu, pKernelGmmu) kgmmuFmtFamiliesInit(pGpu, pKernelGmmu) + +static inline NV_STATUS kgmmuTranslatePtePcfFromSw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) { + return NV_OK; +} + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuTranslatePtePcfFromSw(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuTranslatePtePcfFromSw(pKernelGmmu, arg0, arg1) kgmmuTranslatePtePcfFromSw_56cd7a(pKernelGmmu, arg0, arg1) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuTranslatePtePcfFromSw_HAL(pKernelGmmu, arg0, arg1) kgmmuTranslatePtePcfFromSw(pKernelGmmu, arg0, arg1) + +static inline NV_STATUS kgmmuTranslatePtePcfFromHw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2) { + return NV_OK; +} + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuTranslatePtePcfFromHw(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvBool arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuTranslatePtePcfFromHw(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePtePcfFromHw_56cd7a(pKernelGmmu, arg0, arg1, arg2) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuTranslatePtePcfFromHw_HAL(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePtePcfFromHw(pKernelGmmu, arg0, arg1, arg2) + +static inline NV_STATUS kgmmuTranslatePdePcfFromSw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) { + return NV_OK; +} + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuTranslatePdePcfFromSw(struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuTranslatePdePcfFromSw(pKernelGmmu, arg0, arg1) kgmmuTranslatePdePcfFromSw_56cd7a(pKernelGmmu, arg0, arg1) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuTranslatePdePcfFromSw_HAL(pKernelGmmu, arg0, arg1) kgmmuTranslatePdePcfFromSw(pKernelGmmu, arg0, arg1) + +static inline NV_STATUS kgmmuTranslatePdePcfFromHw_56cd7a(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2) { + return NV_OK; +} + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuTranslatePdePcfFromHw(struct KernelGmmu *pKernelGmmu, NvU32 arg0, GMMU_APERTURE arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuTranslatePdePcfFromHw(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePdePcfFromHw_56cd7a(pKernelGmmu, arg0, arg1, arg2) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuTranslatePdePcfFromHw_HAL(pKernelGmmu, arg0, arg1, arg2) kgmmuTranslatePdePcfFromHw(pKernelGmmu, arg0, arg1, arg2) + +NV_STATUS kgmmuGetFaultRegisterMappings_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuGetFaultRegisterMappings(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvP64 *pFaultBufferGet, NvP64 *pFaultBufferPut, NvP64 *pFaultBufferInfo, NvP64 *faultIntr, NvP64 *faultIntrSet, NvP64 *faultIntrClear, NvU32 *faultMask, NvP64 *pPrefetchCtrl) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetFaultRegisterMappings(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings_TU102(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuGetFaultRegisterMappings_HAL(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) kgmmuGetFaultRegisterMappings(pGpu, pKernelGmmu, index, pFaultBufferGet, pFaultBufferPut, pFaultBufferInfo, faultIntr, faultIntrSet, faultIntrClear, faultMask, pPrefetchCtrl) + +const char *kgmmuGetFaultTypeString_GP100(struct KernelGmmu *pKernelGmmu, NvU32 faultType); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline const char *kgmmuGetFaultTypeString(struct KernelGmmu *pKernelGmmu, NvU32 faultType) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NULL; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetFaultTypeString(pKernelGmmu, faultType) kgmmuGetFaultTypeString_GP100(pKernelGmmu, faultType) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuGetFaultTypeString_HAL(pKernelGmmu, faultType) kgmmuGetFaultTypeString(pKernelGmmu, faultType) + +NV_STATUS kgmmuChangeReplayableFaultOwnership_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuChangeReplayableFaultOwnership(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuChangeReplayableFaultOwnership(pGpu, pKernelGmmu, arg0) kgmmuChangeReplayableFaultOwnership_GV100(pGpu, pKernelGmmu, arg0) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuChangeReplayableFaultOwnership_HAL(pGpu, pKernelGmmu, arg0) kgmmuChangeReplayableFaultOwnership(pGpu, pKernelGmmu, arg0) + +NV_STATUS kgmmuServiceReplayableFault_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuServiceReplayableFault(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuServiceReplayableFault(pGpu, pKernelGmmu) kgmmuServiceReplayableFault_TU102(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuServiceReplayableFault_HAL(pGpu, pKernelGmmu) kgmmuServiceReplayableFault(pGpu, pKernelGmmu) + +NV_STATUS kgmmuReportFaultBufferOverflow_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuReportFaultBufferOverflow(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuReportFaultBufferOverflow(pGpu, pKernelGmmu) kgmmuReportFaultBufferOverflow_GV100(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuReportFaultBufferOverflow_HAL(pGpu, pKernelGmmu) kgmmuReportFaultBufferOverflow(pGpu, pKernelGmmu) + +NV_STATUS kgmmuReadFaultBufferGetPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pGetOffset, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuReadFaultBufferGetPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pGetOffset, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuReadFaultBufferGetPtr(pGpu, pKernelGmmu, index, pGetOffset, arg0) kgmmuReadFaultBufferGetPtr_TU102(pGpu, pKernelGmmu, index, pGetOffset, arg0) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuReadFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, index, pGetOffset, arg0) kgmmuReadFaultBufferGetPtr(pGpu, pKernelGmmu, index, pGetOffset, arg0) + +NV_STATUS kgmmuReadFaultBufferPutPtr_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pPutOffset); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuReadFaultBufferPutPtr(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 index, NvU32 *pPutOffset) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuReadFaultBufferPutPtr(pGpu, pKernelGmmu, index, pPutOffset) kgmmuReadFaultBufferPutPtr_TU102(pGpu, pKernelGmmu, index, pPutOffset) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuReadFaultBufferPutPtr_HAL(pGpu, pKernelGmmu, index, pPutOffset) kgmmuReadFaultBufferPutPtr(pGpu, pKernelGmmu, index, pPutOffset) + +NvU32 kgmmuReadMmuFaultBufferSize_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 gfid); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU32 kgmmuReadMmuFaultBufferSize(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuReadMmuFaultBufferSize(pGpu, pKernelGmmu, arg0, gfid) kgmmuReadMmuFaultBufferSize_TU102(pGpu, pKernelGmmu, arg0, gfid) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuReadMmuFaultBufferSize_HAL(pGpu, pKernelGmmu, arg0, gfid) kgmmuReadMmuFaultBufferSize(pGpu, pKernelGmmu, arg0, gfid) + +NvU32 kgmmuReadMmuFaultStatus_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU32 kgmmuReadMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuReadMmuFaultStatus(pGpu, pKernelGmmu, gfid) kgmmuReadMmuFaultStatus_TU102(pGpu, pKernelGmmu, gfid) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuReadMmuFaultStatus_HAL(pGpu, pKernelGmmu, gfid) kgmmuReadMmuFaultStatus(pGpu, pKernelGmmu, gfid) + +void kgmmuWriteMmuFaultStatus_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuWriteMmuFaultStatus(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg0) kgmmuWriteMmuFaultStatus_TU102(pGpu, pKernelGmmu, arg0) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuWriteMmuFaultStatus_HAL(pGpu, pKernelGmmu, arg0) kgmmuWriteMmuFaultStatus(pGpu, pKernelGmmu, arg0) + +NvBool kgmmuIsNonReplayableFaultPending_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvBool kgmmuIsNonReplayableFaultPending(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu) kgmmuIsNonReplayableFaultPending_TU102(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu) kgmmuIsNonReplayableFaultPending(pGpu, pKernelGmmu) + +NV_STATUS kgmmuClientShadowFaultBufferAlloc_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuClientShadowFaultBufferAlloc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuClientShadowFaultBufferAlloc(pGpu, pKernelGmmu) kgmmuClientShadowFaultBufferAlloc_GV100(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuClientShadowFaultBufferAlloc_HAL(pGpu, pKernelGmmu) kgmmuClientShadowFaultBufferAlloc(pGpu, pKernelGmmu) + +NV_STATUS kgmmuClientShadowFaultBufferFree_GV100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuClientShadowFaultBufferFree(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuClientShadowFaultBufferFree(pGpu, pKernelGmmu) kgmmuClientShadowFaultBufferFree_GV100(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuClientShadowFaultBufferFree_HAL(pGpu, pKernelGmmu) kgmmuClientShadowFaultBufferFree(pGpu, pKernelGmmu) + +void kgmmuEncodeSysmemAddrs_GM107(struct KernelGmmu *pKernelGmmu, NvU64 *pAddresses, NvU64 count); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuEncodeSysmemAddrs(struct KernelGmmu *pKernelGmmu, NvU64 *pAddresses, NvU64 count) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuEncodeSysmemAddrs(pKernelGmmu, pAddresses, count) kgmmuEncodeSysmemAddrs_GM107(pKernelGmmu, pAddresses, count) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuEncodeSysmemAddrs_HAL(pKernelGmmu, pAddresses, count) kgmmuEncodeSysmemAddrs(pKernelGmmu, pAddresses, count) + +NvU8 kgmmuGetHwPteApertureFromMemdesc_GM107(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pDesc); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU8 kgmmuGetHwPteApertureFromMemdesc(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pDesc) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pDesc) kgmmuGetHwPteApertureFromMemdesc_GM107(pKernelGmmu, pDesc) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuGetHwPteApertureFromMemdesc_HAL(pKernelGmmu, pDesc) kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pDesc) + +NvBool kgmmuTestAccessCounterWriteNak_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvBool kgmmuTestAccessCounterWriteNak(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuTestAccessCounterWriteNak(pGpu, pKernelGmmu) kgmmuTestAccessCounterWriteNak_TU102(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuTestAccessCounterWriteNak_HAL(pGpu, pKernelGmmu) kgmmuTestAccessCounterWriteNak(pGpu, pKernelGmmu) + +NvU32 kgmmuGetGraphicsEngineId_GV100(struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU32 kgmmuGetGraphicsEngineId(struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetGraphicsEngineId(pKernelGmmu) kgmmuGetGraphicsEngineId_GV100(pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuGetGraphicsEngineId_HAL(pKernelGmmu) kgmmuGetGraphicsEngineId(pKernelGmmu) + +NV_STATUS kgmmuEnableNvlinkComputePeerAddressing_GV100(struct KernelGmmu *pKernelGmmu); + +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuEnableNvlinkComputePeerAddressing(struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuEnableNvlinkComputePeerAddressing(pKernelGmmu) kgmmuEnableNvlinkComputePeerAddressing_GV100(pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +#define kgmmuEnableNvlinkComputePeerAddressing_HAL(pKernelGmmu) kgmmuEnableNvlinkComputePeerAddressing(pKernelGmmu) + +NV_STATUS kgmmuConstructEngine_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, ENGDESCRIPTOR arg0); + +static inline NV_STATUS kgmmuConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, ENGDESCRIPTOR arg0) { + return pKernelGmmu->__kgmmuConstructEngine__(pGpu, pKernelGmmu, arg0); +} + +NV_STATUS kgmmuStateInitLocked_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); + +static inline NV_STATUS kgmmuStateInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->__kgmmuStateInitLocked__(pGpu, pKernelGmmu); +} + +NV_STATUS kgmmuStatePostLoad_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags); + +static inline NV_STATUS kgmmuStatePostLoad_56cd7a(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags) { + return NV_OK; +} + +static inline NV_STATUS kgmmuStatePostLoad_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags) { + return pKernelGmmu->__kgmmuStatePostLoad__(pGpu, pKernelGmmu, flags); +} + +void kgmmuStateDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); + +static inline void kgmmuStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + pKernelGmmu->__kgmmuStateDestroy__(pGpu, pKernelGmmu); +} + +void kgmmuRegisterIntrService_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceRecord arg0[155]); + +static inline void kgmmuRegisterIntrService_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceRecord arg0[155]) { + pKernelGmmu->__kgmmuRegisterIntrService__(pGpu, pKernelGmmu, arg0); +} + +NvU32 kgmmuServiceInterrupt_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams); + +static inline NvU32 kgmmuServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, IntrServiceServiceInterruptArguments *pParams) { + return pKernelGmmu->__kgmmuServiceInterrupt__(pGpu, pKernelGmmu, pParams); +} + +NV_STATUS kgmmuInstBlkVaLimitGet_GV100(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData); + +static inline NV_STATUS kgmmuInstBlkVaLimitGet_f03539(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData) { + *pOffset = 0; + return NV_OK; +} + +static inline NV_STATUS kgmmuInstBlkVaLimitGet_DISPATCH(struct KernelGmmu *pKernelGmmu, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData) { + return pKernelGmmu->__kgmmuInstBlkVaLimitGet__(pKernelGmmu, pVAS, subctxId, pParams, pOffset, pData); +} + +NvU32 kgmmuSetTlbInvalidateMembarWarParameters_TU102(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams); + +static inline NvU32 kgmmuSetTlbInvalidateMembarWarParameters_4a4dee(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) { + return 0; +} + +static inline NvU32 kgmmuSetTlbInvalidateMembarWarParameters_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, TLB_INVALIDATE_PARAMS *pParams) { + return pKernelGmmu->__kgmmuSetTlbInvalidateMembarWarParameters__(pGpu, pKernelGmmu, pParams); +} + +NV_STATUS kgmmuSetTlbInvalidationScope_GA100(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams); + +static inline NV_STATUS kgmmuSetTlbInvalidationScope_46f6a7(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kgmmuSetTlbInvalidationScope_DISPATCH(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 flags, TLB_INVALIDATE_PARAMS *pParams) { + return pKernelGmmu->__kgmmuSetTlbInvalidationScope__(pGpu, pKernelGmmu, flags, pParams); +} + +void kgmmuFmtInitLevels_GP10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift); + +void kgmmuFmtInitLevels_GA10X(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift); + +static inline void kgmmuFmtInitLevels_b3696a(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift) { + return; +} + +static inline void kgmmuFmtInitLevels_DISPATCH(struct KernelGmmu *pKernelGmmu, MMU_FMT_LEVEL *pLevels, const NvU32 numLevels, const NvU32 version, const NvU32 bigPageShift) { + pKernelGmmu->__kgmmuFmtInitLevels__(pKernelGmmu, pLevels, numLevels, version, bigPageShift); +} + +NV_STATUS kgmmuSetupWarForBug2720120_GA100(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam); + +static inline NV_STATUS kgmmuSetupWarForBug2720120_56cd7a(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam) { + return NV_OK; +} + +static inline NV_STATUS kgmmuSetupWarForBug2720120_DISPATCH(struct KernelGmmu *pKernelGmmu, GMMU_FMT_FAMILY *pFam) { + return pKernelGmmu->__kgmmuSetupWarForBug2720120__(pKernelGmmu, pFam); +} + +static inline NV_STATUS kgmmuReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, void *pTunableState) { + return pEngstate->__kgmmuReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgmmuStateLoad_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { + return pEngstate->__kgmmuStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgmmuStateUnload_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { + return pEngstate->__kgmmuStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgmmuServiceNotificationInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return pIntrService->__kgmmuServiceNotificationInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS kgmmuStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { + return pEngstate->__kgmmuStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgmmuStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { + return pEngstate->__kgmmuStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgmmuStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, NvU32 arg0) { + return pEngstate->__kgmmuStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgmmuStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) { + return pEngstate->__kgmmuStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kgmmuInitMissing_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) { + pEngstate->__kgmmuInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kgmmuStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) { + return pEngstate->__kgmmuStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kgmmuStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) { + return pEngstate->__kgmmuStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kgmmuGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, void *pTunableState) { + return pEngstate->__kgmmuGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgmmuCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kgmmuCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kgmmuFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, void *pTunableState) { + pEngstate->__kgmmuFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kgmmuClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGmmu *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return pIntrService->__kgmmuClearInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS kgmmuAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, void **ppTunableState) { + return pEngstate->__kgmmuAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kgmmuSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate, void *pTunableState) { + return pEngstate->__kgmmuSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kgmmuIsPresent_DISPATCH(POBJGPU pGpu, struct KernelGmmu *pEngstate) { + return pEngstate->__kgmmuIsPresent__(pGpu, pEngstate); +} + +static inline NvU32 kgmmuGetPDEAperture(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->PDEAperture; +} + +static inline NvU32 kgmmuGetPTEAperture(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->PTEAperture; +} + +static inline NvU32 kgmmuGetPDEBAR1Aperture(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->PDEBAR1Aperture; +} + +static inline NvU32 kgmmuGetPTEBAR1Aperture(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->PTEBAR1Aperture; +} + +static inline NvU32 kgmmuGetPDEBAR1Attr(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->PDEBAR1Attr; +} + +static inline NvU32 kgmmuGetPTEBAR1Attr(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->PTEBAR1Attr; +} + +static inline NvU32 kgmmuGetPDEAttr(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->PDEAttr; +} + +static inline NvU32 kgmmuGetPTEAttr(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->PTEAttr; +} + +static inline NvU32 kgmmuGetBigPageSizeOverride(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->overrideBigPageSize; +} + +static inline void kgmmuSetBigPageSizeOverride(struct KernelGmmu *pKernelGmmu, NvU32 bigPageSize) { + pKernelGmmu->overrideBigPageSize = bigPageSize; +} + +static inline NvBool kgmmuIsPerVaspaceBigPageEn(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->bEnablePerVaspaceBigPage; +} + +static inline NvBool kgmmuIsIgnoreHubTlbInvalidate(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->bIgnoreHubTlbInvalidate; +} + +static inline NvBool kgmmuIsHugePageSupported(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->bHugePageSupported; +} + +static inline NvBool kgmmuIsPageSize512mbSupported(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->bPageSize512mbSupported; +} + +static inline NvBool kgmmuIsBug2720120WarEnabled(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->bBug2720120WarEnabled; +} + +static inline NvBool kgmmuIsVaspaceInteropSupported(struct KernelGmmu *pKernelGmmu) { + return pKernelGmmu->bVaspaceInteropSupported; +} + +void kgmmuDestruct_IMPL(struct KernelGmmu *pKernelGmmu); +#define __nvoc_kgmmuDestruct(pKernelGmmu) kgmmuDestruct_IMPL(pKernelGmmu) +NV_STATUS kgmmuFmtInit_IMPL(struct KernelGmmu *pKernelGmmu); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuFmtInit(struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtInit(pKernelGmmu) kgmmuFmtInit_IMPL(pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +GMMU_APERTURE kgmmuGetMemAperture_IMPL(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pMemDesc); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline GMMU_APERTURE kgmmuGetMemAperture(struct KernelGmmu *pKernelGmmu, MEMORY_DESCRIPTOR *pMemDesc) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + GMMU_APERTURE ret; + portMemSet(&ret, 0, sizeof(GMMU_APERTURE)); + return ret; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetMemAperture(pKernelGmmu, pMemDesc) kgmmuGetMemAperture_IMPL(pKernelGmmu, pMemDesc) +#endif //__nvoc_kern_gmmu_h_disabled + +const GMMU_FMT_FAMILY *kgmmuFmtGetFamily_IMPL(struct KernelGmmu *pKernelGmmu, NvU32 version); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline const GMMU_FMT_FAMILY *kgmmuFmtGetFamily(struct KernelGmmu *pKernelGmmu, NvU32 version) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NULL; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtGetFamily(pKernelGmmu, version) kgmmuFmtGetFamily_IMPL(pKernelGmmu, version) +#endif //__nvoc_kern_gmmu_h_disabled + +const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *kgmmuGetStaticInfo_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *kgmmuGetStaticInfo(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NULL; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetStaticInfo(pGpu, pKernelGmmu) kgmmuGetStaticInfo_IMPL(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +const struct GMMU_FMT *kgmmuFmtGet_IMPL(struct KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline const struct GMMU_FMT *kgmmuFmtGet(struct KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NULL; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtGet(pKernelGmmu, version, bigPageSize) kgmmuFmtGet_IMPL(pKernelGmmu, version, bigPageSize) +#endif //__nvoc_kern_gmmu_h_disabled + +void kgmmuExtractPteInfo_IMPL(struct KernelGmmu *pKernelGmmu, union GMMU_ENTRY_VALUE *arg0, NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *arg1, const struct GMMU_FMT *arg2, const MMU_FMT_LEVEL *arg3); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuExtractPteInfo(struct KernelGmmu *pKernelGmmu, union GMMU_ENTRY_VALUE *arg0, NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *arg1, const struct GMMU_FMT *arg2, const MMU_FMT_LEVEL *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuExtractPteInfo(pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuExtractPteInfo_IMPL(pKernelGmmu, arg0, arg1, arg2, arg3) +#endif //__nvoc_kern_gmmu_h_disabled + +void kgmmuFieldSetKindCompTags_IMPL(struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *pFmt, const MMU_FMT_LEVEL *pLevel, const COMPR_INFO *pCompr, NvU64 physAddr, NvU64 surfOffset, NvU32 pteIndex, NvU8 *pEntries); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuFieldSetKindCompTags(struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *pFmt, const MMU_FMT_LEVEL *pLevel, const COMPR_INFO *pCompr, NvU64 physAddr, NvU64 surfOffset, NvU32 pteIndex, NvU8 *pEntries) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFieldSetKindCompTags(pKernelGmmu, pFmt, pLevel, pCompr, physAddr, surfOffset, pteIndex, pEntries) kgmmuFieldSetKindCompTags_IMPL(pKernelGmmu, pFmt, pLevel, pCompr, physAddr, surfOffset, pteIndex, pEntries) +#endif //__nvoc_kern_gmmu_h_disabled + +NvBool kgmmuFmtIsBigPageSizeSupported_IMPL(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvBool kgmmuFmtIsBigPageSizeSupported(struct KernelGmmu *pKernelGmmu, NvU64 bigPageSize) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtIsBigPageSizeSupported(pKernelGmmu, bigPageSize) kgmmuFmtIsBigPageSizeSupported_IMPL(pKernelGmmu, bigPageSize) +#endif //__nvoc_kern_gmmu_h_disabled + +const struct GMMU_FMT *kgmmuFmtGetLatestSupportedFormat_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline const struct GMMU_FMT *kgmmuFmtGetLatestSupportedFormat(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NULL; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFmtGetLatestSupportedFormat(pGpu, pKernelGmmu) kgmmuFmtGetLatestSupportedFormat_IMPL(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +NvU32 kgmmuGetMinBigPageSize_IMPL(struct KernelGmmu *pKernelGmmu); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU32 kgmmuGetMinBigPageSize(struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetMinBigPageSize(pKernelGmmu) kgmmuGetMinBigPageSize_IMPL(pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuInstBlkInit_IMPL(struct KernelGmmu *pKernelGmmu, PMEMORY_DESCRIPTOR pInstBlkDesc, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pInstBlkParams); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuInstBlkInit(struct KernelGmmu *pKernelGmmu, PMEMORY_DESCRIPTOR pInstBlkDesc, struct OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pInstBlkParams) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuInstBlkInit(pKernelGmmu, pInstBlkDesc, pVAS, subctxId, pInstBlkParams) kgmmuInstBlkInit_IMPL(pKernelGmmu, pInstBlkDesc, pVAS, subctxId, pInstBlkParams) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuFaultBufferReplayableAllocate_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuFaultBufferReplayableAllocate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvHandle arg0, NvHandle arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFaultBufferReplayableAllocate(pGpu, pKernelGmmu, arg0, arg1) kgmmuFaultBufferReplayableAllocate_IMPL(pGpu, pKernelGmmu, arg0, arg1) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuFaultBufferReplayableDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuFaultBufferReplayableDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFaultBufferReplayableDestroy(pGpu, pKernelGmmu) kgmmuFaultBufferReplayableDestroy_IMPL(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuFaultBufferAlloc_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuFaultBufferAlloc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFaultBufferAlloc(pGpu, pKernelGmmu, arg0, arg1) kgmmuFaultBufferAlloc_IMPL(pGpu, pKernelGmmu, arg0, arg1) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuFaultBufferCreateMemDesc_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU64 arg2, MEMORY_DESCRIPTOR **arg3); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuFaultBufferCreateMemDesc(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 arg1, NvU64 arg2, MEMORY_DESCRIPTOR **arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFaultBufferCreateMemDesc(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuFaultBufferCreateMemDesc_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuFaultBufferGetAddressSpace_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1, NvU32 *arg2); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuFaultBufferGetAddressSpace(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0, NvU32 *arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFaultBufferGetAddressSpace(pGpu, pKernelGmmu, arg0, arg1, arg2) kgmmuFaultBufferGetAddressSpace_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuFaultBufferFree_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuFaultBufferFree(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFaultBufferFree(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferFree_IMPL(pGpu, pKernelGmmu, arg0) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuFaultBufferUnregister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuFaultBufferUnregister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuFaultBufferUnregister(pGpu, pKernelGmmu, arg0) kgmmuFaultBufferUnregister_IMPL(pGpu, pKernelGmmu, arg0) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuClientShadowFaultBufferNonreplayableAllocate_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuClientShadowFaultBufferNonreplayableAllocate(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuClientShadowFaultBufferNonreplayableAllocate(pGpu, pKernelGmmu) kgmmuClientShadowFaultBufferNonreplayableAllocate_IMPL(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuClientShadowFaultBufferNonreplayableDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuClientShadowFaultBufferNonreplayableDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuClientShadowFaultBufferNonreplayableDestroy(pGpu, pKernelGmmu) kgmmuClientShadowFaultBufferNonreplayableDestroy_IMPL(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +NV_STATUS kgmmuClientShadowFaultBufferRegister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NV_STATUS kgmmuClientShadowFaultBufferRegister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuClientShadowFaultBufferRegister(pGpu, pKernelGmmu) kgmmuClientShadowFaultBufferRegister_IMPL(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +void kgmmuClientShadowFaultBufferUnregister_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuClientShadowFaultBufferUnregister(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuClientShadowFaultBufferUnregister(pGpu, pKernelGmmu) kgmmuClientShadowFaultBufferUnregister_IMPL(pGpu, pKernelGmmu) +#endif //__nvoc_kern_gmmu_h_disabled + +void kgmmuClientShadowFaultBufferPagesDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuClientShadowFaultBufferPagesDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuClientShadowFaultBufferPagesDestroy(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferPagesDestroy_IMPL(pGpu, pKernelGmmu, arg0) +#endif //__nvoc_kern_gmmu_h_disabled + +void kgmmuClientShadowFaultBufferQueueDestroy_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuClientShadowFaultBufferQueueDestroy(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuClientShadowFaultBufferQueueDestroy(pGpu, pKernelGmmu, arg0) kgmmuClientShadowFaultBufferQueueDestroy_IMPL(pGpu, pKernelGmmu, arg0) +#endif //__nvoc_kern_gmmu_h_disabled + +NvU64 kgmmuGetSizeOfPageTables_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU64 kgmmuGetSizeOfPageTables(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetSizeOfPageTables(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuGetSizeOfPageTables_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) +#endif //__nvoc_kern_gmmu_h_disabled + +NvU64 kgmmuGetSizeOfPageDirs_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU64 kgmmuGetSizeOfPageDirs(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, const struct GMMU_FMT *arg0, NvU64 arg1, NvU64 arg2, NvU64 arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuGetSizeOfPageDirs(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) kgmmuGetSizeOfPageDirs_IMPL(pGpu, pKernelGmmu, arg0, arg1, arg2, arg3) +#endif //__nvoc_kern_gmmu_h_disabled + +GMMU_APERTURE kgmmuGetExternalAllocAperture_IMPL(NvU32 addressSpace); +#define kgmmuGetExternalAllocAperture(addressSpace) kgmmuGetExternalAllocAperture_IMPL(addressSpace) +void kgmmuEncodePhysAddrs_IMPL(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 *pAddresses, NvU64 fabricBaseAddress, NvU64 count); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuEncodePhysAddrs(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 *pAddresses, NvU64 fabricBaseAddress, NvU64 count) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuEncodePhysAddrs(pKernelGmmu, aperture, pAddresses, fabricBaseAddress, count) kgmmuEncodePhysAddrs_IMPL(pKernelGmmu, aperture, pAddresses, fabricBaseAddress, count) +#endif //__nvoc_kern_gmmu_h_disabled + +NvU64 kgmmuEncodePhysAddr_IMPL(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 physAddr, NvU64 fabricBaseAddress); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline NvU64 kgmmuEncodePhysAddr(struct KernelGmmu *pKernelGmmu, const GMMU_APERTURE aperture, NvU64 physAddr, NvU64 fabricBaseAddress) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); + return 0; +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuEncodePhysAddr(pKernelGmmu, aperture, physAddr, fabricBaseAddress) kgmmuEncodePhysAddr_IMPL(pKernelGmmu, aperture, physAddr, fabricBaseAddress) +#endif //__nvoc_kern_gmmu_h_disabled + +void kgmmuAccessCntrChangeIntrOwnership_IMPL(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0); +#ifdef __nvoc_kern_gmmu_h_disabled +static inline void kgmmuAccessCntrChangeIntrOwnership(OBJGPU *pGpu, struct KernelGmmu *pKernelGmmu, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelGmmu was disabled!"); +} +#else //__nvoc_kern_gmmu_h_disabled +#define kgmmuAccessCntrChangeIntrOwnership(pGpu, pKernelGmmu, arg0) kgmmuAccessCntrChangeIntrOwnership_IMPL(pGpu, pKernelGmmu, arg0) +#endif //__nvoc_kern_gmmu_h_disabled + +#undef PRIVATE_FIELD + + +// defines for TLB Invalidation scope +#define NV_GMMU_INVAL_SCOPE_ALL_TLBS 0x00000000 +#define NV_GMMU_INVAL_SCOPE_LINK_TLBS 0x00000001 +#define NV_GMMU_INVAL_SCOPE_NON_LINK_TLBS 0x00000002 + +// bit fields for uvmSharedIntrRmOwnsMask +#define RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY NVBIT(0) +#define RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_ERROR NVBIT(1) +#define RM_UVM_SHARED_INTR_MASK_MMU_ECC_UNCORRECTED_ERROR_NOTIFY NVBIT(2) +#define RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY NVBIT(3) +#define RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_OVERFLOW NVBIT(4) +#define RM_UVM_SHARED_INTR_MASK_MMU_NONREPLAYABLE_FAULT_NOTIFY NVBIT(5) +#define RM_UVM_SHARED_INTR_MASK_MMU_NONREPLAYABLE_FAULT_OVERFLOW NVBIT(6) +#define RM_UVM_SHARED_INTR_MASK_MMU_OTHER_FAULT_NOTIFY NVBIT(7) +#define RM_UVM_SHARED_INTR_MASK_ALL (NVBIT(8) - 1) + +/*! + * Constants used for UVM mirroring loops. + */ +#define GMMU_USER_PAGE_DIR_INDEX 0 +#define GMMU_KERNEL_PAGE_DIR_INDEX 1 +#define GMMU_MAX_PAGE_DIR_INDEX_COUNT (GMMU_KERNEL_PAGE_DIR_INDEX + 1) + +/*! + * Page table walker callbacks used for map/unmap operations. + */ +extern const MMU_WALK_CALLBACKS g_gmmuWalkCallbacks; +extern const MMU_WALK_CALLBACKS g_bar2WalkCallbacks; +extern const MMU_TRACE_CALLBACKS g_gmmuTraceCallbacks; + +void gmmuMemDescCacheFree(GVAS_GPU_STATE *pGpuState); + +#endif // KERN_GMMU_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERN_GMMU_NVOC_H_ diff --git a/src/nvidia/generated/g_kern_mem_sys_nvoc.c b/src/nvidia/generated/g_kern_mem_sys_nvoc.c new file mode 100644 index 000000000..32e009fb4 --- /dev/null +++ b/src/nvidia/generated/g_kern_mem_sys_nvoc.c @@ -0,0 +1,475 @@ +#define NVOC_KERN_MEM_SYS_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kern_mem_sys_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x7faff1 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMemorySystem; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelMemorySystem(KernelMemorySystem*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelMemorySystem(KernelMemorySystem*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelMemorySystem(KernelMemorySystem*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelMemorySystem(KernelMemorySystem*, RmHalspecOwner* ); +void __nvoc_dtor_KernelMemorySystem(KernelMemorySystem*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelMemorySystem; + +static const struct NVOC_RTTI __nvoc_rtti_KernelMemorySystem_KernelMemorySystem = { + /*pClassDef=*/ &__nvoc_class_def_KernelMemorySystem, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelMemorySystem, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelMemorySystem_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelMemorySystem, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelMemorySystem_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelMemorySystem, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelMemorySystem = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelMemorySystem_KernelMemorySystem, + &__nvoc_rtti_KernelMemorySystem_OBJENGSTATE, + &__nvoc_rtti_KernelMemorySystem_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMemorySystem = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelMemorySystem), + /*classId=*/ classId(KernelMemorySystem), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelMemorySystem", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelMemorySystem, + /*pCastInfo=*/ &__nvoc_castinfo_KernelMemorySystem, + /*pExportInfo=*/ &__nvoc_export_info_KernelMemorySystem +}; + +static NV_STATUS __nvoc_thunk_KernelMemorySystem_engstateConstructEngine(OBJGPU *pGpu, struct OBJENGSTATE *pKernelMemorySystem, ENGDESCRIPTOR arg0) { + return kmemsysConstructEngine(pGpu, (struct KernelMemorySystem *)(((unsigned char *)pKernelMemorySystem) - __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_KernelMemorySystem_engstateStateInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pKernelMemorySystem) { + return kmemsysStateInitLocked(pGpu, (struct KernelMemorySystem *)(((unsigned char *)pKernelMemorySystem) - __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelMemorySystem_engstateStatePreLoad(OBJGPU *pGpu, struct OBJENGSTATE *pKernelMemorySystem, NvU32 flags) { + return kmemsysStatePreLoad(pGpu, (struct KernelMemorySystem *)(((unsigned char *)pKernelMemorySystem) - __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), flags); +} + +static void __nvoc_thunk_KernelMemorySystem_engstateStateDestroy(OBJGPU *pGpu, struct OBJENGSTATE *pKernelMemorySystem) { + kmemsysStateDestroy(pGpu, (struct KernelMemorySystem *)(((unsigned char *)pKernelMemorySystem) - __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysReconcileTunableState(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysStateLoad(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysStateUnload(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysStatePostUnload(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysStatePreUnload(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysStateInitUnlocked(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kmemsysInitMissing(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysStatePreInitLocked(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysStatePreInitUnlocked(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysGetTunableState(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysCompareTunableState(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kmemsysFreeTunableState(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysStatePostLoad(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysAllocTunableState(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmemsysSetTunableState(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kmemsysIsPresent(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMemorySystem_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelMemorySystem = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelMemorySystem(KernelMemorySystem *pThis) { + __nvoc_kmemsysDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelMemorySystem(KernelMemorySystem *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal field -- bDisableTiledCachingInvalidatesWithEccBug1521641 + if (0) + { + } + // default + else + { + pThis->bDisableTiledCachingInvalidatesWithEccBug1521641 = ((NvBool)(0 != 0)); + } + + // Hal field -- bGpuCacheEnable + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bGpuCacheEnable = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bGpuCacheEnable = ((NvBool)(0 != 0)); + } + + // Hal field -- bNumaNodesAdded + pThis->bNumaNodesAdded = ((NvBool)(0 != 0)); + + // Hal field -- bL2CleanFbPull + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bL2CleanFbPull = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bL2CleanFbPull = ((NvBool)(0 != 0)); + } + + // Hal field -- bPreserveComptagBackingStoreOnSuspend + pThis->bPreserveComptagBackingStoreOnSuspend = ((NvBool)(0 != 0)); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelMemorySystem(KernelMemorySystem *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelMemorySystem_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelMemorySystem(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelMemorySystem_exit; // Success + +__nvoc_ctor_KernelMemorySystem_fail_OBJENGSTATE: +__nvoc_ctor_KernelMemorySystem_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelMemorySystem_1(KernelMemorySystem *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__kmemsysConstructEngine__ = &kmemsysConstructEngine_IMPL; + + pThis->__kmemsysStateInitLocked__ = &kmemsysStateInitLocked_IMPL; + + pThis->__kmemsysStatePreLoad__ = &kmemsysStatePreLoad_IMPL; + + pThis->__kmemsysStateDestroy__ = &kmemsysStateDestroy_IMPL; + + // Hal function -- kmemsysGetFbNumaInfo + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmemsysGetFbNumaInfo__ = &kmemsysGetFbNumaInfo_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kmemsysGetFbNumaInfo__ = &kmemsysGetFbNumaInfo_56cd7a; + } + + // Hal function -- kmemsysReadUsableFbSize + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kmemsysReadUsableFbSize__ = &kmemsysReadUsableFbSize_GP102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmemsysReadUsableFbSize__ = &kmemsysReadUsableFbSize_GA102; + } + // default + else + { + pThis->__kmemsysReadUsableFbSize__ = &kmemsysReadUsableFbSize_5baef9; + } + + // Hal function -- kmemsysInitFlushSysmemBuffer + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kmemsysInitFlushSysmemBuffer__ = &kmemsysInitFlushSysmemBuffer_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmemsysInitFlushSysmemBuffer__ = &kmemsysInitFlushSysmemBuffer_GA100; + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + } + + // Hal function -- kmemsysProgramSysmemFlushBuffer + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kmemsysProgramSysmemFlushBuffer__ = &kmemsysProgramSysmemFlushBuffer_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmemsysProgramSysmemFlushBuffer__ = &kmemsysProgramSysmemFlushBuffer_GA100; + } + else if (0) + { + } + else if (0) + { + } + } + + // Hal function -- kmemsysIsPagePLCable + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kmemsysIsPagePLCable__ = &kmemsysIsPagePLCable_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmemsysIsPagePLCable__ = &kmemsysIsPagePLCable_GA102; + } + // default + else + { + pThis->__kmemsysIsPagePLCable__ = &kmemsysIsPagePLCable_510167; + } + } + + // Hal function -- kmemsysReadMIGMemoryCfg + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmemsysReadMIGMemoryCfg__ = &kmemsysReadMIGMemoryCfg_GA100; + } + // default + else + { + pThis->__kmemsysReadMIGMemoryCfg__ = &kmemsysReadMIGMemoryCfg_46f6a7; + } + } + + // Hal function -- kmemsysInitMIGMemoryPartitionTable + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kmemsysInitMIGMemoryPartitionTable__ = &kmemsysInitMIGMemoryPartitionTable_GA100; + } + // default + else + { + pThis->__kmemsysInitMIGMemoryPartitionTable__ = &kmemsysInitMIGMemoryPartitionTable_56cd7a; + } + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelMemorySystem_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelMemorySystem_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreLoad__ = &__nvoc_thunk_KernelMemorySystem_engstateStatePreLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelMemorySystem_engstateStateDestroy; + + pThis->__kmemsysReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmemsysReconcileTunableState; + + pThis->__kmemsysStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kmemsysStateLoad; + + pThis->__kmemsysStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kmemsysStateUnload; + + pThis->__kmemsysStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kmemsysStatePostUnload; + + pThis->__kmemsysStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kmemsysStatePreUnload; + + pThis->__kmemsysStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kmemsysStateInitUnlocked; + + pThis->__kmemsysInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kmemsysInitMissing; + + pThis->__kmemsysStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kmemsysStatePreInitLocked; + + pThis->__kmemsysStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kmemsysStatePreInitUnlocked; + + pThis->__kmemsysGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmemsysGetTunableState; + + pThis->__kmemsysCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmemsysCompareTunableState; + + pThis->__kmemsysFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmemsysFreeTunableState; + + pThis->__kmemsysStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kmemsysStatePostLoad; + + pThis->__kmemsysAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmemsysAllocTunableState; + + pThis->__kmemsysSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmemsysSetTunableState; + + pThis->__kmemsysIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kmemsysIsPresent; +} + +void __nvoc_init_funcTable_KernelMemorySystem(KernelMemorySystem *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelMemorySystem_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelMemorySystem(KernelMemorySystem *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelMemorySystem = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelMemorySystem(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelMemorySystem(KernelMemorySystem **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelMemorySystem *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelMemorySystem)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelMemorySystem)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelMemorySystem); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelMemorySystem(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelMemorySystem(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelMemorySystem_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelMemorySystem_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelMemorySystem(KernelMemorySystem **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelMemorySystem(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kern_mem_sys_nvoc.h b/src/nvidia/generated/g_kern_mem_sys_nvoc.h new file mode 100644 index 000000000..816412e97 --- /dev/null +++ b/src/nvidia/generated/g_kern_mem_sys_nvoc.h @@ -0,0 +1,833 @@ +#ifndef _G_KERN_MEM_SYS_NVOC_H_ +#define _G_KERN_MEM_SYS_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kern_mem_sys_nvoc.h" + +#ifndef KERN_MEM_SYS_H +#define KERN_MEM_SYS_H + +#include "core/core.h" +#include "gpu/eng_state.h" +#include "gpu/gpu.h" +#include "containers/map.h" +#include "gpu/mem_mgr/heap_base.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" +// +// FB Cache (opcode, mem target) defines used by kmemsysCacheOp hal API +// +typedef enum +{ + FB_CACHE_OP_UNDEFINED = 0, + // invalidate cache lines without writeback of dirty lines to memory + FB_CACHE_INVALIDATE = 1, + // writeback dirty lines but leave the lines in valid cache state + FB_CACHE_WRITEBACK, + // writeback dirty lines and then invalidates the cache state + FB_CACHE_EVICT, +} FB_CACHE_OP; + +// target memory types for cache operations +typedef enum +{ + FB_CACHE_MEM_UNDEFINED = 0, + FB_CACHE_SYSTEM_MEMORY = 1, + FB_CACHE_VIDEO_MEMORY, + FB_CACHE_PEER_MEMORY, + FB_CACHE_DIRTY, + FB_CACHE_COMPTAG_MEMORY, + FB_CACHE_DIRTY_ALL, +} FB_CACHE_MEMTYPE; + +typedef enum +{ + FB_CACHE_STATE_ENABLED, + FB_CACHE_STATE_DISABLED, +} FB_CACHE_STATE; + +typedef enum +{ + FB_CACHE_WRITE_MODE_WRITETHROUGH, + FB_CACHE_WRITE_MODE_WRITEBACK, +} FB_CACHE_WRITE_MODE; + +typedef enum +{ + FB_CACHE_BYPASS_MODE_ENABLED, + FB_CACHE_BYPASS_MODE_DISABLED, +} FB_CACHE_BYPASS_MODE; // FERMI (TEST) ONLY + +typedef enum +{ + FB_CACHE_RCM_STATE_FULL, + FB_CACHE_RCM_STATE_TRANSITIONING, + FB_CACHE_RCM_STATE_REDUCED, + FB_CACHE_RCM_STATE_ZERO_CACHE, +} FB_CACHE_RCM_STATE; + +/*! Tracks NUMA information of GPU memory partitions */ +typedef struct +{ + NvBool bInUse; // Is the partition in use? + NvU64 offset; // FB offset of the partition + NvU64 size; // FB size of the partition + NvU32 numaNodeId; // OS NUMA Node Id of the partition. +} MEM_PARTITION_NUMA_INFO; + +typedef struct MIG_MEM_BOUNDARY_CONFIG_TABLE +{ + /*! + * Memory boundary config A (4KB aligned) + */ + NvU64 memBoundaryCfgA; + + /*! + * Memory boundary config B (4KB aligned) + */ + NvU64 memBoundaryCfgB; + + /*! + * Memory boundary config C (64KB aligned) + */ + NvU32 memBoundaryCfgC; +} MIG_MEM_BOUNDARY_CONFIG_TABLE; + +/*! + * @brief Structure carrying memory configuration information for specific GPU instance + * The information will be used to allocate memory when a GPU instance is + * created or queried. The structure will be indexed with swizzIDs + */ +typedef struct MIG_GPU_INSTANCE_MEMORY_CONFIG +{ + /*! + * First VMMU segment from where the GPU instance memory starts + */ + NvU64 startingVmmuSegment; + + /*! + * Size of the GPU instance memory in the form of number of vmmu segments + */ + NvU64 memSizeInVmmuSegment; + + /*! + * GPU Instance memory config initialization state + */ + NvBool bInitialized; +} MIG_GPU_INSTANCE_MEMORY_CONFIG; + +/* @ref NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS */ +typedef NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS MEMORY_SYSTEM_STATIC_CONFIG; + +#define FB_HWRESID_CTAGID_FERMI 15:0 +#define FB_HWRESID_ZCULL_FERMI 30:16 + +#define FB_HWRESID_ZCULL_SHIFT_FERMI(i) (1 << (i)) + +#define FB_HWRESID_CTAGID_VAL_FERMI(n) \ + (((n) >> DRF_SHIFT(FB_HWRESID_CTAGID_FERMI)) & DRF_MASK(FB_HWRESID_CTAGID_FERMI)) + +#define FB_HWRESID_CTAGID_NUM_FERMI(i) \ + (((i) & DRF_MASK(FB_HWRESID_CTAGID_FERMI)) << DRF_SHIFT(FB_HWRESID_CTAGID_FERMI)) + +#define FB_SET_HWRESID_CTAGID_FERMI(h, i) \ + h = ( ((h) & ~(DRF_MASK(FB_HWRESID_CTAGID_FERMI) << DRF_SHIFT(FB_HWRESID_CTAGID_FERMI))) | \ + FB_HWRESID_CTAGID_NUM_FERMI(i) ) + +#define FB_HWRESID_ZCULL_NUM_FERMI(i) \ + (((1<> DRF_SHIFT(FB_HWRESID_ZCULL_FERMI)) & DRF_MASK(FB_HWRESID_ZCULL_FERMI)) + +/*! + * KernelMemorySystem is a logical abstraction of the GPU memory system. This + * type is instantiated in VGPU guest/GSP Client as well as the VGPU + * host/GSP-RM. + * + * When KernelMemorySystem wants to read or write hardware state, it does not + * have access to the registers on the GPU, it can however perform operations + * using the following mechanisms: + * + * 1.) access registers are virtualized across VFs, e.g.: registers within + * NV_VIRTUAL_FUNCTION_PRIV_XYZ. + * + * 2.) send a RPC to the VGPU Host/GSP-RM to perform the operation. + * + * Operations such as "get memory system bus width" are appropriate for this + * interface. Anything related to managing of the memory page + * tables/allocations should live in MemoryManager. + */ + +#ifdef NVOC_KERN_MEM_SYS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelMemorySystem { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelMemorySystem *__nvoc_pbase_KernelMemorySystem; + NV_STATUS (*__kmemsysConstructEngine__)(OBJGPU *, struct KernelMemorySystem *, ENGDESCRIPTOR); + NV_STATUS (*__kmemsysStateInitLocked__)(OBJGPU *, struct KernelMemorySystem *); + NV_STATUS (*__kmemsysStatePreLoad__)(OBJGPU *, struct KernelMemorySystem *, NvU32); + void (*__kmemsysStateDestroy__)(OBJGPU *, struct KernelMemorySystem *); + NV_STATUS (*__kmemsysGetFbNumaInfo__)(OBJGPU *, struct KernelMemorySystem *, NvU64 *, NvS32 *); + NV_STATUS (*__kmemsysReadUsableFbSize__)(OBJGPU *, struct KernelMemorySystem *, NvU64 *); + NV_STATUS (*__kmemsysInitFlushSysmemBuffer__)(OBJGPU *, struct KernelMemorySystem *); + void (*__kmemsysProgramSysmemFlushBuffer__)(OBJGPU *, struct KernelMemorySystem *); + NvBool (*__kmemsysIsPagePLCable__)(OBJGPU *, struct KernelMemorySystem *, NvU64, NvU64); + NV_STATUS (*__kmemsysReadMIGMemoryCfg__)(OBJGPU *, struct KernelMemorySystem *); + NV_STATUS (*__kmemsysInitMIGMemoryPartitionTable__)(OBJGPU *, struct KernelMemorySystem *); + NV_STATUS (*__kmemsysReconcileTunableState__)(POBJGPU, struct KernelMemorySystem *, void *); + NV_STATUS (*__kmemsysStateLoad__)(POBJGPU, struct KernelMemorySystem *, NvU32); + NV_STATUS (*__kmemsysStateUnload__)(POBJGPU, struct KernelMemorySystem *, NvU32); + NV_STATUS (*__kmemsysStatePostUnload__)(POBJGPU, struct KernelMemorySystem *, NvU32); + NV_STATUS (*__kmemsysStatePreUnload__)(POBJGPU, struct KernelMemorySystem *, NvU32); + NV_STATUS (*__kmemsysStateInitUnlocked__)(POBJGPU, struct KernelMemorySystem *); + void (*__kmemsysInitMissing__)(POBJGPU, struct KernelMemorySystem *); + NV_STATUS (*__kmemsysStatePreInitLocked__)(POBJGPU, struct KernelMemorySystem *); + NV_STATUS (*__kmemsysStatePreInitUnlocked__)(POBJGPU, struct KernelMemorySystem *); + NV_STATUS (*__kmemsysGetTunableState__)(POBJGPU, struct KernelMemorySystem *, void *); + NV_STATUS (*__kmemsysCompareTunableState__)(POBJGPU, struct KernelMemorySystem *, void *, void *); + void (*__kmemsysFreeTunableState__)(POBJGPU, struct KernelMemorySystem *, void *); + NV_STATUS (*__kmemsysStatePostLoad__)(POBJGPU, struct KernelMemorySystem *, NvU32); + NV_STATUS (*__kmemsysAllocTunableState__)(POBJGPU, struct KernelMemorySystem *, void **); + NV_STATUS (*__kmemsysSetTunableState__)(POBJGPU, struct KernelMemorySystem *, void *); + NvBool (*__kmemsysIsPresent__)(POBJGPU, struct KernelMemorySystem *); + NvBool bDisableTiledCachingInvalidatesWithEccBug1521641; + NvBool bGpuCacheEnable; + NvBool bNumaNodesAdded; + NvBool bL2CleanFbPull; + NvBool bPreserveComptagBackingStoreOnSuspend; + const MEMORY_SYSTEM_STATIC_CONFIG *pStaticConfig; + MEM_PARTITION_NUMA_INFO *memPartitionNumaInfo; + MIG_MEM_BOUNDARY_CONFIG_TABLE memBoundaryCfgTable; + MIG_GPU_INSTANCE_MEMORY_CONFIG gpuInstanceMemConfig[15]; + NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS migMemoryPartitionTable; + PMEMORY_DESCRIPTOR pSysmemFlushBufferMemDesc; + NvU64 sysmemFlushBuffer; + NvU64 coherentCpuFbBase; + NvU64 coherentCpuFbEnd; + NvU64 numaOnlineBase; + NvU64 numaOnlineSize; +}; + +#ifndef __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ +#define __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ +typedef struct KernelMemorySystem KernelMemorySystem; +#endif /* __NVOC_CLASS_KernelMemorySystem_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMemorySystem +#define __nvoc_class_id_KernelMemorySystem 0x7faff1 +#endif /* __nvoc_class_id_KernelMemorySystem */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMemorySystem; + +#define __staticCast_KernelMemorySystem(pThis) \ + ((pThis)->__nvoc_pbase_KernelMemorySystem) + +#ifdef __nvoc_kern_mem_sys_h_disabled +#define __dynamicCast_KernelMemorySystem(pThis) ((KernelMemorySystem*)NULL) +#else //__nvoc_kern_mem_sys_h_disabled +#define __dynamicCast_KernelMemorySystem(pThis) \ + ((KernelMemorySystem*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelMemorySystem))) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define PDB_PROP_KMEMSYS_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KMEMSYS_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelMemorySystem(KernelMemorySystem**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelMemorySystem(KernelMemorySystem**, Dynamic*, NvU32); +#define __objCreate_KernelMemorySystem(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelMemorySystem((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kmemsysConstructEngine(pGpu, pKernelMemorySystem, arg0) kmemsysConstructEngine_DISPATCH(pGpu, pKernelMemorySystem, arg0) +#define kmemsysStateInitLocked(pGpu, pKernelMemorySystem) kmemsysStateInitLocked_DISPATCH(pGpu, pKernelMemorySystem) +#define kmemsysStatePreLoad(pGpu, pKernelMemorySystem, flags) kmemsysStatePreLoad_DISPATCH(pGpu, pKernelMemorySystem, flags) +#define kmemsysStateDestroy(pGpu, pKernelMemorySystem) kmemsysStateDestroy_DISPATCH(pGpu, pKernelMemorySystem) +#define kmemsysGetFbNumaInfo(pGpu, pKernelMemorySystem, physAddr, numaNodeId) kmemsysGetFbNumaInfo_DISPATCH(pGpu, pKernelMemorySystem, physAddr, numaNodeId) +#define kmemsysGetFbNumaInfo_HAL(pGpu, pKernelMemorySystem, physAddr, numaNodeId) kmemsysGetFbNumaInfo_DISPATCH(pGpu, pKernelMemorySystem, physAddr, numaNodeId) +#define kmemsysReadUsableFbSize(pGpu, pKernelMemorySystem, pFbSize) kmemsysReadUsableFbSize_DISPATCH(pGpu, pKernelMemorySystem, pFbSize) +#define kmemsysReadUsableFbSize_HAL(pGpu, pKernelMemorySystem, pFbSize) kmemsysReadUsableFbSize_DISPATCH(pGpu, pKernelMemorySystem, pFbSize) +#define kmemsysInitFlushSysmemBuffer(pGpu, pKernelMemorySystem) kmemsysInitFlushSysmemBuffer_DISPATCH(pGpu, pKernelMemorySystem) +#define kmemsysInitFlushSysmemBuffer_HAL(pGpu, pKernelMemorySystem) kmemsysInitFlushSysmemBuffer_DISPATCH(pGpu, pKernelMemorySystem) +#define kmemsysProgramSysmemFlushBuffer(pGpu, pKernelMemorySystem) kmemsysProgramSysmemFlushBuffer_DISPATCH(pGpu, pKernelMemorySystem) +#define kmemsysProgramSysmemFlushBuffer_HAL(pGpu, pKernelMemorySystem) kmemsysProgramSysmemFlushBuffer_DISPATCH(pGpu, pKernelMemorySystem) +#define kmemsysIsPagePLCable(pGpu, KernelMemorySystem, physAddr, pageSize) kmemsysIsPagePLCable_DISPATCH(pGpu, KernelMemorySystem, physAddr, pageSize) +#define kmemsysIsPagePLCable_HAL(pGpu, KernelMemorySystem, physAddr, pageSize) kmemsysIsPagePLCable_DISPATCH(pGpu, KernelMemorySystem, physAddr, pageSize) +#define kmemsysReadMIGMemoryCfg(pGpu, pKernelMemorySystem) kmemsysReadMIGMemoryCfg_DISPATCH(pGpu, pKernelMemorySystem) +#define kmemsysReadMIGMemoryCfg_HAL(pGpu, pKernelMemorySystem) kmemsysReadMIGMemoryCfg_DISPATCH(pGpu, pKernelMemorySystem) +#define kmemsysInitMIGMemoryPartitionTable(pGpu, pKernelMemorySystem) kmemsysInitMIGMemoryPartitionTable_DISPATCH(pGpu, pKernelMemorySystem) +#define kmemsysInitMIGMemoryPartitionTable_HAL(pGpu, pKernelMemorySystem) kmemsysInitMIGMemoryPartitionTable_DISPATCH(pGpu, pKernelMemorySystem) +#define kmemsysReconcileTunableState(pGpu, pEngstate, pTunableState) kmemsysReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmemsysStateLoad(pGpu, pEngstate, arg0) kmemsysStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kmemsysStateUnload(pGpu, pEngstate, arg0) kmemsysStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kmemsysStatePostUnload(pGpu, pEngstate, arg0) kmemsysStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kmemsysStatePreUnload(pGpu, pEngstate, arg0) kmemsysStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kmemsysStateInitUnlocked(pGpu, pEngstate) kmemsysStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kmemsysInitMissing(pGpu, pEngstate) kmemsysInitMissing_DISPATCH(pGpu, pEngstate) +#define kmemsysStatePreInitLocked(pGpu, pEngstate) kmemsysStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kmemsysStatePreInitUnlocked(pGpu, pEngstate) kmemsysStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kmemsysGetTunableState(pGpu, pEngstate, pTunableState) kmemsysGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmemsysCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kmemsysCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kmemsysFreeTunableState(pGpu, pEngstate, pTunableState) kmemsysFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmemsysStatePostLoad(pGpu, pEngstate, arg0) kmemsysStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kmemsysAllocTunableState(pGpu, pEngstate, ppTunableState) kmemsysAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kmemsysSetTunableState(pGpu, pEngstate, pTunableState) kmemsysSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmemsysIsPresent(pGpu, pEngstate) kmemsysIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kmemsysGetUsableFbSize_KERNEL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysGetUsableFbSize(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysGetUsableFbSize(pGpu, pKernelMemorySystem, pFbSize) kmemsysGetUsableFbSize_KERNEL(pGpu, pKernelMemorySystem, pFbSize) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysGetUsableFbSize_HAL(pGpu, pKernelMemorySystem, pFbSize) kmemsysGetUsableFbSize(pGpu, pKernelMemorySystem, pFbSize) + +NV_STATUS kmemsysCacheOp_GM200(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, PMEMORY_DESCRIPTOR arg0, FB_CACHE_MEMTYPE arg1, FB_CACHE_OP operation); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysCacheOp(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, PMEMORY_DESCRIPTOR arg0, FB_CACHE_MEMTYPE arg1, FB_CACHE_OP operation) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysCacheOp(pGpu, pKernelMemorySystem, arg0, arg1, operation) kmemsysCacheOp_GM200(pGpu, pKernelMemorySystem, arg0, arg1, operation) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, arg0, arg1, operation) kmemsysCacheOp(pGpu, pKernelMemorySystem, arg0, arg1, operation) + +NV_STATUS kmemsysDoCacheOp_GM107(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0, NvU32 arg1, NvU32 arg2, PRMTIMEOUT arg3); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysDoCacheOp(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0, NvU32 arg1, NvU32 arg2, PRMTIMEOUT arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysDoCacheOp(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3) kmemsysDoCacheOp_GM107(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysDoCacheOp_HAL(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3) kmemsysDoCacheOp(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3) + +NvU32 kmemsysReadL2SysmemInvalidateReg_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NvU32 kmemsysReadL2SysmemInvalidateReg(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return 0; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysReadL2SysmemInvalidateReg(pGpu, pKernelMemorySystem) kmemsysReadL2SysmemInvalidateReg_TU102(pGpu, pKernelMemorySystem) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysReadL2SysmemInvalidateReg_HAL(pGpu, pKernelMemorySystem) kmemsysReadL2SysmemInvalidateReg(pGpu, pKernelMemorySystem) + +void kmemsysWriteL2SysmemInvalidateReg_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline void kmemsysWriteL2SysmemInvalidateReg(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysWriteL2SysmemInvalidateReg(pGpu, pKernelMemorySystem, arg0) kmemsysWriteL2SysmemInvalidateReg_TU102(pGpu, pKernelMemorySystem, arg0) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysWriteL2SysmemInvalidateReg_HAL(pGpu, pKernelMemorySystem, arg0) kmemsysWriteL2SysmemInvalidateReg(pGpu, pKernelMemorySystem, arg0) + +NvU32 kmemsysReadL2PeermemInvalidateReg_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NvU32 kmemsysReadL2PeermemInvalidateReg(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return 0; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysReadL2PeermemInvalidateReg(pGpu, pKernelMemorySystem) kmemsysReadL2PeermemInvalidateReg_TU102(pGpu, pKernelMemorySystem) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysReadL2PeermemInvalidateReg_HAL(pGpu, pKernelMemorySystem) kmemsysReadL2PeermemInvalidateReg(pGpu, pKernelMemorySystem) + +void kmemsysWriteL2PeermemInvalidateReg_TU102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline void kmemsysWriteL2PeermemInvalidateReg(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysWriteL2PeermemInvalidateReg(pGpu, pKernelMemorySystem, arg0) kmemsysWriteL2PeermemInvalidateReg_TU102(pGpu, pKernelMemorySystem, arg0) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysWriteL2PeermemInvalidateReg_HAL(pGpu, pKernelMemorySystem, arg0) kmemsysWriteL2PeermemInvalidateReg(pGpu, pKernelMemorySystem, arg0) + +static inline void kmemsysAssertSysmemFlushBufferValid_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return; +} + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline void kmemsysAssertSysmemFlushBufferValid(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysAssertSysmemFlushBufferValid(pGpu, pKernelMemorySystem) kmemsysAssertSysmemFlushBufferValid_b3696a(pGpu, pKernelMemorySystem) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysAssertSysmemFlushBufferValid_HAL(pGpu, pKernelMemorySystem) kmemsysAssertSysmemFlushBufferValid(pGpu, pKernelMemorySystem) + +NV_STATUS kmemsysInitStaticConfig_KERNEL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, MEMORY_SYSTEM_STATIC_CONFIG *pConfig); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysInitStaticConfig(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, MEMORY_SYSTEM_STATIC_CONFIG *pConfig) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysInitStaticConfig(pGpu, pKernelMemorySystem, pConfig) kmemsysInitStaticConfig_KERNEL(pGpu, pKernelMemorySystem, pConfig) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysInitStaticConfig_HAL(pGpu, pKernelMemorySystem, pConfig) kmemsysInitStaticConfig(pGpu, pKernelMemorySystem, pConfig) + +static inline NV_STATUS kmemsysPreFillCacheOnlyMemory_56cd7a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 arg0, NvU64 arg1) { + return NV_OK; +} + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysPreFillCacheOnlyMemory(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 arg0, NvU64 arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysPreFillCacheOnlyMemory(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysPreFillCacheOnlyMemory_56cd7a(pGpu, pKernelMemorySystem, arg0, arg1) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysPreFillCacheOnlyMemory_HAL(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysPreFillCacheOnlyMemory(pGpu, pKernelMemorySystem, arg0, arg1) + +static inline NV_STATUS kmemsysCheckDisplayRemapperRange_14278f(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 arg0, NvU64 arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_INVALID_STATE); +} + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysCheckDisplayRemapperRange(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 arg0, NvU64 arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysCheckDisplayRemapperRange(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysCheckDisplayRemapperRange_14278f(pGpu, pKernelMemorySystem, arg0, arg1) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysCheckDisplayRemapperRange_HAL(pGpu, pKernelMemorySystem, arg0, arg1) kmemsysCheckDisplayRemapperRange(pGpu, pKernelMemorySystem, arg0, arg1) + +static inline void kmemsysPostHeapCreate_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return; +} + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline void kmemsysPostHeapCreate(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysPostHeapCreate(pGpu, pKernelMemorySystem) kmemsysPostHeapCreate_b3696a(pGpu, pKernelMemorySystem) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysPostHeapCreate_HAL(pGpu, pKernelMemorySystem) kmemsysPostHeapCreate(pGpu, pKernelMemorySystem) + +static inline void kmemsysPreHeapDestruct_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return; +} + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline void kmemsysPreHeapDestruct(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysPreHeapDestruct(pGpu, pKernelMemorySystem) kmemsysPreHeapDestruct_b3696a(pGpu, pKernelMemorySystem) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysPreHeapDestruct_HAL(pGpu, pKernelMemorySystem) kmemsysPreHeapDestruct(pGpu, pKernelMemorySystem) + +NV_STATUS kmemsysAllocComprResources_KERNEL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, FB_ALLOC_INFO *arg0, NvU64 arg1, NvU32 arg2, NvU32 *arg3, NvU32 arg4); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysAllocComprResources(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, FB_ALLOC_INFO *arg0, NvU64 arg1, NvU32 arg2, NvU32 *arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysAllocComprResources(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3, arg4) kmemsysAllocComprResources_KERNEL(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysAllocComprResources_HAL(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3, arg4) kmemsysAllocComprResources(pGpu, pKernelMemorySystem, arg0, arg1, arg2, arg3, arg4) + +static inline void kmemsysFreeComprResources_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 hwResId) { + return; +} + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline void kmemsysFreeComprResources(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 hwResId) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysFreeComprResources(pGpu, pKernelMemorySystem, hwResId) kmemsysFreeComprResources_b3696a(pGpu, pKernelMemorySystem, hwResId) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysFreeComprResources_HAL(pGpu, pKernelMemorySystem, hwResId) kmemsysFreeComprResources(pGpu, pKernelMemorySystem, hwResId) + +NV_STATUS kmemsysSwizzIdToVmmuSegmentsRange_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, NvU32 vmmuSegmentSize, NvU32 totalVmmuSegments); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysSwizzIdToVmmuSegmentsRange(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, NvU32 vmmuSegmentSize, NvU32 totalVmmuSegments) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysSwizzIdToVmmuSegmentsRange(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, totalVmmuSegments) kmemsysSwizzIdToVmmuSegmentsRange_GA100(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, totalVmmuSegments) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysSwizzIdToVmmuSegmentsRange_HAL(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, totalVmmuSegments) kmemsysSwizzIdToVmmuSegmentsRange(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, totalVmmuSegments) + +NV_STATUS kmemsysPopulateMIGGPUInstanceMemConfig_KERNEL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysPopulateMIGGPUInstanceMemConfig(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysPopulateMIGGPUInstanceMemConfig(pGpu, pKernelMemorySystem) kmemsysPopulateMIGGPUInstanceMemConfig_KERNEL(pGpu, pKernelMemorySystem) +#endif //__nvoc_kern_mem_sys_h_disabled + +#define kmemsysPopulateMIGGPUInstanceMemConfig_HAL(pGpu, pKernelMemorySystem) kmemsysPopulateMIGGPUInstanceMemConfig(pGpu, pKernelMemorySystem) + +NV_STATUS kmemsysConstructEngine_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, ENGDESCRIPTOR arg0); + +static inline NV_STATUS kmemsysConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, ENGDESCRIPTOR arg0) { + return pKernelMemorySystem->__kmemsysConstructEngine__(pGpu, pKernelMemorySystem, arg0); +} + +NV_STATUS kmemsysStateInitLocked_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +static inline NV_STATUS kmemsysStateInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return pKernelMemorySystem->__kmemsysStateInitLocked__(pGpu, pKernelMemorySystem); +} + +NV_STATUS kmemsysStatePreLoad_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags); + +static inline NV_STATUS kmemsysStatePreLoad_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags) { + return pKernelMemorySystem->__kmemsysStatePreLoad__(pGpu, pKernelMemorySystem, flags); +} + +void kmemsysStateDestroy_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +static inline void kmemsysStateDestroy_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + pKernelMemorySystem->__kmemsysStateDestroy__(pGpu, pKernelMemorySystem); +} + +NV_STATUS kmemsysGetFbNumaInfo_GV100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *physAddr, NvS32 *numaNodeId); + +static inline NV_STATUS kmemsysGetFbNumaInfo_56cd7a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *physAddr, NvS32 *numaNodeId) { + return NV_OK; +} + +static inline NV_STATUS kmemsysGetFbNumaInfo_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *physAddr, NvS32 *numaNodeId) { + return pKernelMemorySystem->__kmemsysGetFbNumaInfo__(pGpu, pKernelMemorySystem, physAddr, numaNodeId); +} + +NV_STATUS kmemsysReadUsableFbSize_GP102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize); + +NV_STATUS kmemsysReadUsableFbSize_GA102(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize); + +static inline NV_STATUS kmemsysReadUsableFbSize_5baef9(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS kmemsysReadUsableFbSize_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU64 *pFbSize) { + return pKernelMemorySystem->__kmemsysReadUsableFbSize__(pGpu, pKernelMemorySystem, pFbSize); +} + +static inline NV_STATUS kmemsysInitFlushSysmemBuffer_56cd7a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return NV_OK; +} + +NV_STATUS kmemsysInitFlushSysmemBuffer_GM107(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +NV_STATUS kmemsysInitFlushSysmemBuffer_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +static inline NV_STATUS kmemsysInitFlushSysmemBuffer_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return pKernelMemorySystem->__kmemsysInitFlushSysmemBuffer__(pGpu, pKernelMemorySystem); +} + +static inline void kmemsysProgramSysmemFlushBuffer_b3696a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return; +} + +void kmemsysProgramSysmemFlushBuffer_GM107(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +void kmemsysProgramSysmemFlushBuffer_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +static inline void kmemsysProgramSysmemFlushBuffer_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + pKernelMemorySystem->__kmemsysProgramSysmemFlushBuffer__(pGpu, pKernelMemorySystem); +} + +static inline NvBool kmemsysIsPagePLCable_cbe027(OBJGPU *pGpu, struct KernelMemorySystem *KernelMemorySystem, NvU64 physAddr, NvU64 pageSize) { + return ((NvBool)(0 == 0)); +} + +NvBool kmemsysIsPagePLCable_GA100(OBJGPU *pGpu, struct KernelMemorySystem *KernelMemorySystem, NvU64 physAddr, NvU64 pageSize); + +NvBool kmemsysIsPagePLCable_GA102(OBJGPU *pGpu, struct KernelMemorySystem *KernelMemorySystem, NvU64 physAddr, NvU64 pageSize); + +static inline NvBool kmemsysIsPagePLCable_510167(OBJGPU *pGpu, struct KernelMemorySystem *KernelMemorySystem, NvU64 physAddr, NvU64 pageSize) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 == 0))); +} + +static inline NvBool kmemsysIsPagePLCable_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *KernelMemorySystem, NvU64 physAddr, NvU64 pageSize) { + return KernelMemorySystem->__kmemsysIsPagePLCable__(pGpu, KernelMemorySystem, physAddr, pageSize); +} + +static inline NV_STATUS kmemsysReadMIGMemoryCfg_46f6a7(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS kmemsysReadMIGMemoryCfg_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +static inline NV_STATUS kmemsysReadMIGMemoryCfg_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return pKernelMemorySystem->__kmemsysReadMIGMemoryCfg__(pGpu, pKernelMemorySystem); +} + +static inline NV_STATUS kmemsysInitMIGMemoryPartitionTable_56cd7a(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return NV_OK; +} + +NV_STATUS kmemsysInitMIGMemoryPartitionTable_GA100(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); + +static inline NV_STATUS kmemsysInitMIGMemoryPartitionTable_DISPATCH(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + return pKernelMemorySystem->__kmemsysInitMIGMemoryPartitionTable__(pGpu, pKernelMemorySystem); +} + +static inline NV_STATUS kmemsysReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void *pTunableState) { + return pEngstate->__kmemsysReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kmemsysStateLoad_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) { + return pEngstate->__kmemsysStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmemsysStateUnload_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) { + return pEngstate->__kmemsysStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmemsysStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) { + return pEngstate->__kmemsysStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmemsysStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) { + return pEngstate->__kmemsysStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmemsysStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) { + return pEngstate->__kmemsysStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kmemsysInitMissing_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) { + pEngstate->__kmemsysInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kmemsysStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) { + return pEngstate->__kmemsysStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kmemsysStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) { + return pEngstate->__kmemsysStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kmemsysGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void *pTunableState) { + return pEngstate->__kmemsysGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kmemsysCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kmemsysCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kmemsysFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void *pTunableState) { + pEngstate->__kmemsysFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kmemsysStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, NvU32 arg0) { + return pEngstate->__kmemsysStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmemsysAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void **ppTunableState) { + return pEngstate->__kmemsysAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kmemsysSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate, void *pTunableState) { + return pEngstate->__kmemsysSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kmemsysIsPresent_DISPATCH(POBJGPU pGpu, struct KernelMemorySystem *pEngstate) { + return pEngstate->__kmemsysIsPresent__(pGpu, pEngstate); +} + +static inline NvBool kmemsysIsL2CleanFbPull(struct KernelMemorySystem *pKernelMemorySystem) { + return pKernelMemorySystem->bL2CleanFbPull; +} + +void kmemsysDestruct_IMPL(struct KernelMemorySystem *pKernelMemorySystem); +#define __nvoc_kmemsysDestruct(pKernelMemorySystem) kmemsysDestruct_IMPL(pKernelMemorySystem) +NV_STATUS kmemsysEnsureSysmemFlushBufferInitialized_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysEnsureSysmemFlushBufferInitialized(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysEnsureSysmemFlushBufferInitialized(pGpu, pKernelMemorySystem) kmemsysEnsureSysmemFlushBufferInitialized_IMPL(pGpu, pKernelMemorySystem) +#endif //__nvoc_kern_mem_sys_h_disabled + +const MEMORY_SYSTEM_STATIC_CONFIG *kmemsysGetStaticConfig_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline const MEMORY_SYSTEM_STATIC_CONFIG *kmemsysGetStaticConfig(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NULL; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysGetStaticConfig(pGpu, pKernelMemorySystem) kmemsysGetStaticConfig_IMPL(pGpu, pKernelMemorySystem) +#endif //__nvoc_kern_mem_sys_h_disabled + +NV_STATUS kmemsysSetupCoherentCpuLink_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvBool bFlush); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysSetupCoherentCpuLink(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvBool bFlush) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysSetupCoherentCpuLink(pGpu, pKernelMemorySystem, bFlush) kmemsysSetupCoherentCpuLink_IMPL(pGpu, pKernelMemorySystem, bFlush) +#endif //__nvoc_kern_mem_sys_h_disabled + +void kmemsysTeardownCoherentCpuLink_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvBool bFlush); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline void kmemsysTeardownCoherentCpuLink(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvBool bFlush) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysTeardownCoherentCpuLink(pGpu, pKernelMemorySystem, bFlush) kmemsysTeardownCoherentCpuLink_IMPL(pGpu, pKernelMemorySystem, bFlush) +#endif //__nvoc_kern_mem_sys_h_disabled + +NV_STATUS kmemsysSendL2InvalidateEvict_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysSendL2InvalidateEvict(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysSendL2InvalidateEvict(pGpu, pKernelMemorySystem, flags) kmemsysSendL2InvalidateEvict_IMPL(pGpu, pKernelMemorySystem, flags) +#endif //__nvoc_kern_mem_sys_h_disabled + +NV_STATUS kmemsysSendFlushL2AllRamsAndCaches_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysSendFlushL2AllRamsAndCaches(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysSendFlushL2AllRamsAndCaches(pGpu, pKernelMemorySystem) kmemsysSendFlushL2AllRamsAndCaches_IMPL(pGpu, pKernelMemorySystem) +#endif //__nvoc_kern_mem_sys_h_disabled + +NV_STATUS kmemsysSwizzIdToMIGMemSize_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE totalRange, NvU32 *pPartitionSizeFlag, NvU64 *pSizeInBytes); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysSwizzIdToMIGMemSize(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE totalRange, NvU32 *pPartitionSizeFlag, NvU64 *pSizeInBytes) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysSwizzIdToMIGMemSize(pGpu, pKernelMemorySystem, swizzId, totalRange, pPartitionSizeFlag, pSizeInBytes) kmemsysSwizzIdToMIGMemSize_IMPL(pGpu, pKernelMemorySystem, swizzId, totalRange, pPartitionSizeFlag, pSizeInBytes) +#endif //__nvoc_kern_mem_sys_h_disabled + +NV_STATUS kmemsysSwizzIdToMIGMemRange_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE totalRange, struct NV_RANGE *pAddrRange); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysSwizzIdToMIGMemRange(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE totalRange, struct NV_RANGE *pAddrRange) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysSwizzIdToMIGMemRange(pGpu, pKernelMemorySystem, swizzId, totalRange, pAddrRange) kmemsysSwizzIdToMIGMemRange_IMPL(pGpu, pKernelMemorySystem, swizzId, totalRange, pAddrRange) +#endif //__nvoc_kern_mem_sys_h_disabled + +NV_STATUS kmemsysGetMIGGPUInstanceMemInfo_IMPL(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE *pAddrRange); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysGetMIGGPUInstanceMemInfo(OBJGPU *pGpu, struct KernelMemorySystem *pKernelMemorySystem, NvU32 swizzId, struct NV_RANGE *pAddrRange) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysGetMIGGPUInstanceMemInfo(pGpu, pKernelMemorySystem, swizzId, pAddrRange) kmemsysGetMIGGPUInstanceMemInfo_IMPL(pGpu, pKernelMemorySystem, swizzId, pAddrRange) +#endif //__nvoc_kern_mem_sys_h_disabled + +NV_STATUS kmemsysGetMIGGPUInstanceMemConfigFromSwizzId_IMPL(OBJGPU *arg0, struct KernelMemorySystem *arg1, NvU32 swizzId, const MIG_GPU_INSTANCE_MEMORY_CONFIG **arg2); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysGetMIGGPUInstanceMemConfigFromSwizzId(OBJGPU *arg0, struct KernelMemorySystem *arg1, NvU32 swizzId, const MIG_GPU_INSTANCE_MEMORY_CONFIG **arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysGetMIGGPUInstanceMemConfigFromSwizzId(arg0, arg1, swizzId, arg2) kmemsysGetMIGGPUInstanceMemConfigFromSwizzId_IMPL(arg0, arg1, swizzId, arg2) +#endif //__nvoc_kern_mem_sys_h_disabled + +NV_STATUS kmemsysInitMIGGPUInstanceMemConfigForSwizzId_IMPL(OBJGPU *arg0, struct KernelMemorySystem *arg1, NvU32 swizzId, NvU64 startingVmmuSegment, NvU64 memSizeInVmmuSegment); +#ifdef __nvoc_kern_mem_sys_h_disabled +static inline NV_STATUS kmemsysInitMIGGPUInstanceMemConfigForSwizzId(OBJGPU *arg0, struct KernelMemorySystem *arg1, NvU32 swizzId, NvU64 startingVmmuSegment, NvU64 memSizeInVmmuSegment) { + NV_ASSERT_FAILED_PRECOMP("KernelMemorySystem was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_mem_sys_h_disabled +#define kmemsysInitMIGGPUInstanceMemConfigForSwizzId(arg0, arg1, swizzId, startingVmmuSegment, memSizeInVmmuSegment) kmemsysInitMIGGPUInstanceMemConfigForSwizzId_IMPL(arg0, arg1, swizzId, startingVmmuSegment, memSizeInVmmuSegment) +#endif //__nvoc_kern_mem_sys_h_disabled + +#undef PRIVATE_FIELD + + +#define IS_COHERENT_CPU_ATS_OFFSET(kmemsys, offset, length) \ + (kmemsys && ((offset) >= kmemsys->coherentCpuFbBase) && \ + (((NvU64)offset + size) <= kmemsys->coherentCpuFbEnd)) + +#endif // KERN_MEM_SYS_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERN_MEM_SYS_NVOC_H_ diff --git a/src/nvidia/generated/g_kern_perf_nvoc.c b/src/nvidia/generated/g_kern_perf_nvoc.c new file mode 100644 index 000000000..f4241c426 --- /dev/null +++ b/src/nvidia/generated/g_kern_perf_nvoc.c @@ -0,0 +1,301 @@ +#define NVOC_KERN_PERF_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kern_perf_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xc53a57 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPerf; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelPerf(KernelPerf*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelPerf(KernelPerf*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelPerf(KernelPerf*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelPerf(KernelPerf*, RmHalspecOwner* ); +void __nvoc_dtor_KernelPerf(KernelPerf*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelPerf; + +static const struct NVOC_RTTI __nvoc_rtti_KernelPerf_KernelPerf = { + /*pClassDef=*/ &__nvoc_class_def_KernelPerf, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelPerf, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelPerf_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelPerf, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelPerf_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelPerf, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelPerf = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelPerf_KernelPerf, + &__nvoc_rtti_KernelPerf_OBJENGSTATE, + &__nvoc_rtti_KernelPerf_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPerf = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelPerf), + /*classId=*/ classId(KernelPerf), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelPerf", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelPerf, + /*pCastInfo=*/ &__nvoc_castinfo_KernelPerf, + /*pExportInfo=*/ &__nvoc_export_info_KernelPerf +}; + +static NV_STATUS __nvoc_thunk_KernelPerf_engstateConstructEngine(OBJGPU *pGpu, struct OBJENGSTATE *pKernelPerf, ENGDESCRIPTOR engDesc) { + return kperfConstructEngine(pGpu, (struct KernelPerf *)(((unsigned char *)pKernelPerf) - __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), engDesc); +} + +static NV_STATUS __nvoc_thunk_KernelPerf_engstateStateInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pKernelPerf) { + return kperfStateInitLocked(pGpu, (struct KernelPerf *)(((unsigned char *)pKernelPerf) - __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelPerf_engstateStateLoad(OBJGPU *pGpu, struct OBJENGSTATE *pKernelPerf, NvU32 flags) { + return kperfStateLoad(pGpu, (struct KernelPerf *)(((unsigned char *)pKernelPerf) - __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), flags); +} + +static NV_STATUS __nvoc_thunk_KernelPerf_engstateStateUnload(OBJGPU *pGpu, struct OBJENGSTATE *pKernelPerf, NvU32 flags) { + return kperfStateUnload(pGpu, (struct KernelPerf *)(((unsigned char *)pKernelPerf) - __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), flags); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfReconcileTunableState(POBJGPU pGpu, struct KernelPerf *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePreLoad(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePostUnload(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_kperfStateDestroy(POBJGPU pGpu, struct KernelPerf *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePreUnload(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStateInitUnlocked(POBJGPU pGpu, struct KernelPerf *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kperfInitMissing(POBJGPU pGpu, struct KernelPerf *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePreInitLocked(POBJGPU pGpu, struct KernelPerf *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePreInitUnlocked(POBJGPU pGpu, struct KernelPerf *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfGetTunableState(POBJGPU pGpu, struct KernelPerf *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfCompareTunableState(POBJGPU pGpu, struct KernelPerf *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kperfFreeTunableState(POBJGPU pGpu, struct KernelPerf *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfStatePostLoad(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfAllocTunableState(POBJGPU pGpu, struct KernelPerf *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kperfSetTunableState(POBJGPU pGpu, struct KernelPerf *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kperfIsPresent(POBJGPU pGpu, struct KernelPerf *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPerf_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelPerf = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelPerf(KernelPerf *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelPerf(KernelPerf *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelPerf(KernelPerf *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelPerf_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelPerf(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelPerf_exit; // Success + +__nvoc_ctor_KernelPerf_fail_OBJENGSTATE: +__nvoc_ctor_KernelPerf_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelPerf_1(KernelPerf *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__kperfConstructEngine__ = &kperfConstructEngine_IMPL; + + pThis->__kperfStateInitLocked__ = &kperfStateInitLocked_IMPL; + + pThis->__kperfStateLoad__ = &kperfStateLoad_IMPL; + + pThis->__kperfStateUnload__ = &kperfStateUnload_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelPerf_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelPerf_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelPerf_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelPerf_engstateStateUnload; + + pThis->__kperfReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kperfReconcileTunableState; + + pThis->__kperfStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePreLoad; + + pThis->__kperfStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePostUnload; + + pThis->__kperfStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kperfStateDestroy; + + pThis->__kperfStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePreUnload; + + pThis->__kperfStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kperfStateInitUnlocked; + + pThis->__kperfInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kperfInitMissing; + + pThis->__kperfStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePreInitLocked; + + pThis->__kperfStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePreInitUnlocked; + + pThis->__kperfGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kperfGetTunableState; + + pThis->__kperfCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kperfCompareTunableState; + + pThis->__kperfFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kperfFreeTunableState; + + pThis->__kperfStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kperfStatePostLoad; + + pThis->__kperfAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kperfAllocTunableState; + + pThis->__kperfSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kperfSetTunableState; + + pThis->__kperfIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kperfIsPresent; +} + +void __nvoc_init_funcTable_KernelPerf(KernelPerf *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelPerf_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelPerf(KernelPerf *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelPerf = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelPerf(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelPerf(KernelPerf **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelPerf *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelPerf)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelPerf)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelPerf); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelPerf(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelPerf(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelPerf_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelPerf_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelPerf(KernelPerf **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelPerf(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kern_perf_nvoc.h b/src/nvidia/generated/g_kern_perf_nvoc.h new file mode 100644 index 000000000..5c015d856 --- /dev/null +++ b/src/nvidia/generated/g_kern_perf_nvoc.h @@ -0,0 +1,338 @@ +#ifndef _G_KERN_PERF_NVOC_H_ +#define _G_KERN_PERF_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kern_perf_nvoc.h" + +#ifndef KERNEL_PERF_H +#define KERNEL_PERF_H + +/****************************************************************************** +* +* Kernel Perf module header +* This file contains functions managing performance on CPU RM +* +******************************************************************************/ +/* ------------------------ Includes --------------------------------------- */ +#include "gpu/gpu.h" +#include "gpu/eng_state.h" +#include "gpu/gpu_halspec.h" +#include "gpu/perf/kern_perf_boost.h" +#include "gpu/perf/kern_perf_1hz.h" +#include "gpu/perf/kern_perf_gpuboostsync.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" + +/* ----------------------------- Macros ------------------------------------- */ +/*! + * The rule of reentrancy is that routine can't run unless its flag and all lower + * flags are clear. This is Kernel Perf reentrancy function ID for 1HZ Callback. + */ +#define KERNEL_PERF_REENTRANCY_TIMER_1HZ_CALLBACK NVBIT(0) + +/* -------------------------- Datatypes ------------------------------------- */ +/*! + * KernelPerf is a logical abstraction of the GPU Perf Engine. The + * Public API of the Perf Engine is exposed through this object, and any + * interfaces which do not manage the underlying Perf hardware can be + * managed by this object. + */ +#ifdef NVOC_KERN_PERF_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelPerf { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelPerf *__nvoc_pbase_KernelPerf; + NV_STATUS (*__kperfConstructEngine__)(OBJGPU *, struct KernelPerf *, ENGDESCRIPTOR); + NV_STATUS (*__kperfStateInitLocked__)(OBJGPU *, struct KernelPerf *); + NV_STATUS (*__kperfStateLoad__)(OBJGPU *, struct KernelPerf *, NvU32); + NV_STATUS (*__kperfStateUnload__)(OBJGPU *, struct KernelPerf *, NvU32); + NV_STATUS (*__kperfReconcileTunableState__)(POBJGPU, struct KernelPerf *, void *); + NV_STATUS (*__kperfStatePreLoad__)(POBJGPU, struct KernelPerf *, NvU32); + NV_STATUS (*__kperfStatePostUnload__)(POBJGPU, struct KernelPerf *, NvU32); + void (*__kperfStateDestroy__)(POBJGPU, struct KernelPerf *); + NV_STATUS (*__kperfStatePreUnload__)(POBJGPU, struct KernelPerf *, NvU32); + NV_STATUS (*__kperfStateInitUnlocked__)(POBJGPU, struct KernelPerf *); + void (*__kperfInitMissing__)(POBJGPU, struct KernelPerf *); + NV_STATUS (*__kperfStatePreInitLocked__)(POBJGPU, struct KernelPerf *); + NV_STATUS (*__kperfStatePreInitUnlocked__)(POBJGPU, struct KernelPerf *); + NV_STATUS (*__kperfGetTunableState__)(POBJGPU, struct KernelPerf *, void *); + NV_STATUS (*__kperfCompareTunableState__)(POBJGPU, struct KernelPerf *, void *, void *); + void (*__kperfFreeTunableState__)(POBJGPU, struct KernelPerf *, void *); + NV_STATUS (*__kperfStatePostLoad__)(POBJGPU, struct KernelPerf *, NvU32); + NV_STATUS (*__kperfAllocTunableState__)(POBJGPU, struct KernelPerf *, void **); + NV_STATUS (*__kperfSetTunableState__)(POBJGPU, struct KernelPerf *, void *); + NvBool (*__kperfIsPresent__)(POBJGPU, struct KernelPerf *); + struct KERNEL_PERF_GPU_BOOST_SYNC sliGpuBoostSync; + NvU32 reentrancyMask; + KERNEL_PERF_BOOST_HINTS perfBoostHints; + KERNEL_PERF_1HZ timer1HzCallback; + NvU32 longestDurationIndex; +}; + +#ifndef __NVOC_CLASS_KernelPerf_TYPEDEF__ +#define __NVOC_CLASS_KernelPerf_TYPEDEF__ +typedef struct KernelPerf KernelPerf; +#endif /* __NVOC_CLASS_KernelPerf_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelPerf +#define __nvoc_class_id_KernelPerf 0xc53a57 +#endif /* __nvoc_class_id_KernelPerf */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPerf; + +#define __staticCast_KernelPerf(pThis) \ + ((pThis)->__nvoc_pbase_KernelPerf) + +#ifdef __nvoc_kern_perf_h_disabled +#define __dynamicCast_KernelPerf(pThis) ((KernelPerf*)NULL) +#else //__nvoc_kern_perf_h_disabled +#define __dynamicCast_KernelPerf(pThis) \ + ((KernelPerf*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelPerf))) +#endif //__nvoc_kern_perf_h_disabled + +#define PDB_PROP_KPERF_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KPERF_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelPerf(KernelPerf**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelPerf(KernelPerf**, Dynamic*, NvU32); +#define __objCreate_KernelPerf(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelPerf((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kperfConstructEngine(pGpu, pKernelPerf, engDesc) kperfConstructEngine_DISPATCH(pGpu, pKernelPerf, engDesc) +#define kperfStateInitLocked(pGpu, pKernelPerf) kperfStateInitLocked_DISPATCH(pGpu, pKernelPerf) +#define kperfStateLoad(pGpu, pKernelPerf, flags) kperfStateLoad_DISPATCH(pGpu, pKernelPerf, flags) +#define kperfStateUnload(pGpu, pKernelPerf, flags) kperfStateUnload_DISPATCH(pGpu, pKernelPerf, flags) +#define kperfReconcileTunableState(pGpu, pEngstate, pTunableState) kperfReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kperfStatePreLoad(pGpu, pEngstate, arg0) kperfStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kperfStatePostUnload(pGpu, pEngstate, arg0) kperfStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kperfStateDestroy(pGpu, pEngstate) kperfStateDestroy_DISPATCH(pGpu, pEngstate) +#define kperfStatePreUnload(pGpu, pEngstate, arg0) kperfStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kperfStateInitUnlocked(pGpu, pEngstate) kperfStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kperfInitMissing(pGpu, pEngstate) kperfInitMissing_DISPATCH(pGpu, pEngstate) +#define kperfStatePreInitLocked(pGpu, pEngstate) kperfStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kperfStatePreInitUnlocked(pGpu, pEngstate) kperfStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kperfGetTunableState(pGpu, pEngstate, pTunableState) kperfGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kperfCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kperfCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kperfFreeTunableState(pGpu, pEngstate, pTunableState) kperfFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kperfStatePostLoad(pGpu, pEngstate, arg0) kperfStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kperfAllocTunableState(pGpu, pEngstate, ppTunableState) kperfAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kperfSetTunableState(pGpu, pEngstate, pTunableState) kperfSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kperfIsPresent(pGpu, pEngstate) kperfIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kperfGpuBoostSyncStateInit_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf); + +#ifdef __nvoc_kern_perf_h_disabled +static inline NV_STATUS kperfGpuBoostSyncStateInit(OBJGPU *pGpu, struct KernelPerf *pKernelPerf) { + NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_perf_h_disabled +#define kperfGpuBoostSyncStateInit(pGpu, pKernelPerf) kperfGpuBoostSyncStateInit_IMPL(pGpu, pKernelPerf) +#endif //__nvoc_kern_perf_h_disabled + +#define kperfGpuBoostSyncStateInit_HAL(pGpu, pKernelPerf) kperfGpuBoostSyncStateInit(pGpu, pKernelPerf) + +NV_STATUS kperfBoostSet_3x(struct KernelPerf *pKernelPerf, struct Subdevice *pSubdevice, NV2080_CTRL_PERF_BOOST_PARAMS *pBoostParams); + +#ifdef __nvoc_kern_perf_h_disabled +static inline NV_STATUS kperfBoostSet(struct KernelPerf *pKernelPerf, struct Subdevice *pSubdevice, NV2080_CTRL_PERF_BOOST_PARAMS *pBoostParams) { + NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_perf_h_disabled +#define kperfBoostSet(pKernelPerf, pSubdevice, pBoostParams) kperfBoostSet_3x(pKernelPerf, pSubdevice, pBoostParams) +#endif //__nvoc_kern_perf_h_disabled + +#define kperfBoostSet_HAL(pKernelPerf, pSubdevice, pBoostParams) kperfBoostSet(pKernelPerf, pSubdevice, pBoostParams) + +NV_STATUS kperfConstructEngine_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, ENGDESCRIPTOR engDesc); + +static inline NV_STATUS kperfConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, ENGDESCRIPTOR engDesc) { + return pKernelPerf->__kperfConstructEngine__(pGpu, pKernelPerf, engDesc); +} + +NV_STATUS kperfStateInitLocked_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf); + +static inline NV_STATUS kperfStateInitLocked_DISPATCH(OBJGPU *pGpu, struct KernelPerf *pKernelPerf) { + return pKernelPerf->__kperfStateInitLocked__(pGpu, pKernelPerf); +} + +NV_STATUS kperfStateLoad_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 flags); + +static inline NV_STATUS kperfStateLoad_DISPATCH(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 flags) { + return pKernelPerf->__kperfStateLoad__(pGpu, pKernelPerf, flags); +} + +NV_STATUS kperfStateUnload_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 flags); + +static inline NV_STATUS kperfStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 flags) { + return pKernelPerf->__kperfStateUnload__(pGpu, pKernelPerf, flags); +} + +static inline NV_STATUS kperfReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, void *pTunableState) { + return pEngstate->__kperfReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kperfStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { + return pEngstate->__kperfStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kperfStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { + return pEngstate->__kperfStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void kperfStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) { + pEngstate->__kperfStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS kperfStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { + return pEngstate->__kperfStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kperfStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) { + return pEngstate->__kperfStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kperfInitMissing_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) { + pEngstate->__kperfInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kperfStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) { + return pEngstate->__kperfStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kperfStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) { + return pEngstate->__kperfStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kperfGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, void *pTunableState) { + return pEngstate->__kperfGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kperfCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kperfCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kperfFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, void *pTunableState) { + pEngstate->__kperfFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kperfStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, NvU32 arg0) { + return pEngstate->__kperfStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kperfAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, void **ppTunableState) { + return pEngstate->__kperfAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kperfSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate, void *pTunableState) { + return pEngstate->__kperfSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kperfIsPresent_DISPATCH(POBJGPU pGpu, struct KernelPerf *pEngstate) { + return pEngstate->__kperfIsPresent__(pGpu, pEngstate); +} + +NV_STATUS kperfBoostStateInit_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf); +#ifdef __nvoc_kern_perf_h_disabled +static inline NV_STATUS kperfBoostStateInit(OBJGPU *pGpu, struct KernelPerf *pKernelPerf) { + NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_perf_h_disabled +#define kperfBoostStateInit(pGpu, pKernelPerf) kperfBoostStateInit_IMPL(pGpu, pKernelPerf) +#endif //__nvoc_kern_perf_h_disabled + +NV_STATUS kperfGpuBoostSyncActivate_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvBool bActivate); +#ifdef __nvoc_kern_perf_h_disabled +static inline NV_STATUS kperfGpuBoostSyncActivate(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvBool bActivate) { + NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_perf_h_disabled +#define kperfGpuBoostSyncActivate(pGpu, pKernelPerf, bActivate) kperfGpuBoostSyncActivate_IMPL(pGpu, pKernelPerf, bActivate) +#endif //__nvoc_kern_perf_h_disabled + +NV_STATUS kperfDoSyncGpuBoostLimits_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS *pParams); +#ifdef __nvoc_kern_perf_h_disabled +static inline NV_STATUS kperfDoSyncGpuBoostLimits(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_perf_h_disabled +#define kperfDoSyncGpuBoostLimits(pGpu, pKernelPerf, pParams) kperfDoSyncGpuBoostLimits_IMPL(pGpu, pKernelPerf, pParams) +#endif //__nvoc_kern_perf_h_disabled + +NV_STATUS kperfReentrancy_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 function, NvBool bSet); +#ifdef __nvoc_kern_perf_h_disabled +static inline NV_STATUS kperfReentrancy(OBJGPU *pGpu, struct KernelPerf *pKernelPerf, NvU32 function, NvBool bSet) { + NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_perf_h_disabled +#define kperfReentrancy(pGpu, pKernelPerf, function, bSet) kperfReentrancy_IMPL(pGpu, pKernelPerf, function, bSet) +#endif //__nvoc_kern_perf_h_disabled + +void kperfTimer1HzCallback_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf); +#ifdef __nvoc_kern_perf_h_disabled +static inline void kperfTimer1HzCallback(OBJGPU *pGpu, struct KernelPerf *pKernelPerf) { + NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!"); +} +#else //__nvoc_kern_perf_h_disabled +#define kperfTimer1HzCallback(pGpu, pKernelPerf) kperfTimer1HzCallback_IMPL(pGpu, pKernelPerf) +#endif //__nvoc_kern_perf_h_disabled + +void kperfBoostHintCallback_IMPL(OBJGPU *pGpu, struct KernelPerf *pKernelPerf); +#ifdef __nvoc_kern_perf_h_disabled +static inline void kperfBoostHintCallback(OBJGPU *pGpu, struct KernelPerf *pKernelPerf) { + NV_ASSERT_FAILED_PRECOMP("KernelPerf was disabled!"); +} +#else //__nvoc_kern_perf_h_disabled +#define kperfBoostHintCallback(pGpu, pKernelPerf) kperfBoostHintCallback_IMPL(pGpu, pKernelPerf) +#endif //__nvoc_kern_perf_h_disabled + +#undef PRIVATE_FIELD + + +/* ------------------------ External Definitions --------------------------- */ +/* ------------------------ Function Prototypes ---------------------------- */ +/* ------------------------ Include Derived Types -------------------------- */ + +#endif // KERNEL_PERF_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERN_PERF_NVOC_H_ diff --git a/src/nvidia/generated/g_kern_perfbuffer_nvoc.c b/src/nvidia/generated/g_kern_perfbuffer_nvoc.c new file mode 100644 index 000000000..5656f4166 --- /dev/null +++ b/src/nvidia/generated/g_kern_perfbuffer_nvoc.c @@ -0,0 +1,343 @@ +#define NVOC_KERN_PERFBUFFER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kern_perfbuffer_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4bc43b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_PerfBuffer; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_PerfBuffer(PerfBuffer*, RmHalspecOwner* ); +void __nvoc_init_funcTable_PerfBuffer(PerfBuffer*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_PerfBuffer(PerfBuffer*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_PerfBuffer(PerfBuffer*, RmHalspecOwner* ); +void __nvoc_dtor_PerfBuffer(PerfBuffer*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_PerfBuffer; + +static const struct NVOC_RTTI __nvoc_rtti_PerfBuffer_PerfBuffer = { + /*pClassDef=*/ &__nvoc_class_def_PerfBuffer, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_PerfBuffer, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_PerfBuffer_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PerfBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_PerfBuffer_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PerfBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_PerfBuffer_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PerfBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_PerfBuffer_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PerfBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_PerfBuffer_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PerfBuffer, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_PerfBuffer = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_PerfBuffer_PerfBuffer, + &__nvoc_rtti_PerfBuffer_GpuResource, + &__nvoc_rtti_PerfBuffer_RmResource, + &__nvoc_rtti_PerfBuffer_RmResourceCommon, + &__nvoc_rtti_PerfBuffer_RsResource, + &__nvoc_rtti_PerfBuffer_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_PerfBuffer = +{ + /*classInfo=*/ { + /*size=*/ sizeof(PerfBuffer), + /*classId=*/ classId(PerfBuffer), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "PerfBuffer", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_PerfBuffer, + /*pCastInfo=*/ &__nvoc_castinfo_PerfBuffer, + /*pExportInfo=*/ &__nvoc_export_info_PerfBuffer +}; + +static NvBool __nvoc_thunk_GpuResource_perfbufferShareCallback(struct PerfBuffer *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_PerfBuffer_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_perfbufferControl(struct PerfBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_PerfBuffer_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_perfbufferUnmap(struct PerfBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_PerfBuffer_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_perfbufferGetMemInterMapParams(struct PerfBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_PerfBuffer_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_perfbufferGetMemoryMappingDescriptor(struct PerfBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_PerfBuffer_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_perfbufferGetMapAddrSpace(struct PerfBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_PerfBuffer_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_perfbufferGetInternalObjectHandle(struct PerfBuffer *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_PerfBuffer_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_perfbufferControlFilter(struct PerfBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_perfbufferAddAdditionalDependants(struct RsClient *pClient, struct PerfBuffer *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_perfbufferGetRefCount(struct PerfBuffer *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_perfbufferCheckMemInterUnmap(struct PerfBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_PerfBuffer_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_perfbufferMapTo(struct PerfBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_perfbufferControl_Prologue(struct PerfBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_perfbufferGetRegBaseOffsetAndSize(struct PerfBuffer *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_PerfBuffer_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_perfbufferCanCopy(struct PerfBuffer *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_perfbufferInternalControlForward(struct PerfBuffer *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_PerfBuffer_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_perfbufferPreDestruct(struct PerfBuffer *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_perfbufferUnmapFrom(struct PerfBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_perfbufferControl_Epilogue(struct PerfBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_perfbufferControlLookup(struct PerfBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_perfbufferMap(struct PerfBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_PerfBuffer_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_perfbufferAccessCallback(struct PerfBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_PerfBuffer_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_PerfBuffer = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_PerfBuffer(PerfBuffer *pThis) { + __nvoc_perfbufferDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_PerfBuffer(PerfBuffer *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_PerfBuffer(PerfBuffer *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_PerfBuffer_fail_GpuResource; + __nvoc_init_dataField_PerfBuffer(pThis, pRmhalspecowner); + + status = __nvoc_perfbufferConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_PerfBuffer_fail__init; + goto __nvoc_ctor_PerfBuffer_exit; // Success + +__nvoc_ctor_PerfBuffer_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_PerfBuffer_fail_GpuResource: +__nvoc_ctor_PerfBuffer_exit: + + return status; +} + +static void __nvoc_init_funcTable_PerfBuffer_1(PerfBuffer *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__perfbufferShareCallback__ = &__nvoc_thunk_GpuResource_perfbufferShareCallback; + + pThis->__perfbufferControl__ = &__nvoc_thunk_GpuResource_perfbufferControl; + + pThis->__perfbufferUnmap__ = &__nvoc_thunk_GpuResource_perfbufferUnmap; + + pThis->__perfbufferGetMemInterMapParams__ = &__nvoc_thunk_RmResource_perfbufferGetMemInterMapParams; + + pThis->__perfbufferGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_perfbufferGetMemoryMappingDescriptor; + + pThis->__perfbufferGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_perfbufferGetMapAddrSpace; + + pThis->__perfbufferGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_perfbufferGetInternalObjectHandle; + + pThis->__perfbufferControlFilter__ = &__nvoc_thunk_RsResource_perfbufferControlFilter; + + pThis->__perfbufferAddAdditionalDependants__ = &__nvoc_thunk_RsResource_perfbufferAddAdditionalDependants; + + pThis->__perfbufferGetRefCount__ = &__nvoc_thunk_RsResource_perfbufferGetRefCount; + + pThis->__perfbufferCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_perfbufferCheckMemInterUnmap; + + pThis->__perfbufferMapTo__ = &__nvoc_thunk_RsResource_perfbufferMapTo; + + pThis->__perfbufferControl_Prologue__ = &__nvoc_thunk_RmResource_perfbufferControl_Prologue; + + pThis->__perfbufferGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_perfbufferGetRegBaseOffsetAndSize; + + pThis->__perfbufferCanCopy__ = &__nvoc_thunk_RsResource_perfbufferCanCopy; + + pThis->__perfbufferInternalControlForward__ = &__nvoc_thunk_GpuResource_perfbufferInternalControlForward; + + pThis->__perfbufferPreDestruct__ = &__nvoc_thunk_RsResource_perfbufferPreDestruct; + + pThis->__perfbufferUnmapFrom__ = &__nvoc_thunk_RsResource_perfbufferUnmapFrom; + + pThis->__perfbufferControl_Epilogue__ = &__nvoc_thunk_RmResource_perfbufferControl_Epilogue; + + pThis->__perfbufferControlLookup__ = &__nvoc_thunk_RsResource_perfbufferControlLookup; + + pThis->__perfbufferMap__ = &__nvoc_thunk_GpuResource_perfbufferMap; + + pThis->__perfbufferAccessCallback__ = &__nvoc_thunk_RmResource_perfbufferAccessCallback; +} + +void __nvoc_init_funcTable_PerfBuffer(PerfBuffer *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_PerfBuffer_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_PerfBuffer(PerfBuffer *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_PerfBuffer = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_PerfBuffer(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_PerfBuffer(PerfBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + PerfBuffer *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(PerfBuffer)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(PerfBuffer)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_PerfBuffer); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_PerfBuffer(pThis, pRmhalspecowner); + status = __nvoc_ctor_PerfBuffer(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_PerfBuffer_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_PerfBuffer_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_PerfBuffer(PerfBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_PerfBuffer(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kern_perfbuffer_nvoc.h b/src/nvidia/generated/g_kern_perfbuffer_nvoc.h new file mode 100644 index 000000000..2f0dccd41 --- /dev/null +++ b/src/nvidia/generated/g_kern_perfbuffer_nvoc.h @@ -0,0 +1,261 @@ +#ifndef _G_KERN_PERFBUFFER_NVOC_H_ +#define _G_KERN_PERFBUFFER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kern_perfbuffer_nvoc.h" + +#ifndef KERN_PERFBUFFER_H +#define KERN_PERFBUFFER_H + +#include "rmapi/client.h" +#include "gpu/gpu_resource.h" +#include "gpu/gpu_halspec.h" + +/*! + * Definition of PerfBuffer resource class + */ +#ifdef NVOC_KERN_PERFBUFFER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct PerfBuffer { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct PerfBuffer *__nvoc_pbase_PerfBuffer; + NvBool (*__perfbufferShareCallback__)(struct PerfBuffer *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__perfbufferControl__)(struct PerfBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__perfbufferUnmap__)(struct PerfBuffer *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__perfbufferGetMemInterMapParams__)(struct PerfBuffer *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__perfbufferGetMemoryMappingDescriptor__)(struct PerfBuffer *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__perfbufferGetMapAddrSpace__)(struct PerfBuffer *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__perfbufferGetInternalObjectHandle__)(struct PerfBuffer *); + NV_STATUS (*__perfbufferControlFilter__)(struct PerfBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__perfbufferAddAdditionalDependants__)(struct RsClient *, struct PerfBuffer *, RsResourceRef *); + NvU32 (*__perfbufferGetRefCount__)(struct PerfBuffer *); + NV_STATUS (*__perfbufferCheckMemInterUnmap__)(struct PerfBuffer *, NvBool); + NV_STATUS (*__perfbufferMapTo__)(struct PerfBuffer *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__perfbufferControl_Prologue__)(struct PerfBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__perfbufferGetRegBaseOffsetAndSize__)(struct PerfBuffer *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__perfbufferCanCopy__)(struct PerfBuffer *); + NV_STATUS (*__perfbufferInternalControlForward__)(struct PerfBuffer *, NvU32, void *, NvU32); + void (*__perfbufferPreDestruct__)(struct PerfBuffer *); + NV_STATUS (*__perfbufferUnmapFrom__)(struct PerfBuffer *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__perfbufferControl_Epilogue__)(struct PerfBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__perfbufferControlLookup__)(struct PerfBuffer *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__perfbufferMap__)(struct PerfBuffer *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__perfbufferAccessCallback__)(struct PerfBuffer *, struct RsClient *, void *, RsAccessRight); + void *pObject; +}; + +#ifndef __NVOC_CLASS_PerfBuffer_TYPEDEF__ +#define __NVOC_CLASS_PerfBuffer_TYPEDEF__ +typedef struct PerfBuffer PerfBuffer; +#endif /* __NVOC_CLASS_PerfBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PerfBuffer +#define __nvoc_class_id_PerfBuffer 0x4bc43b +#endif /* __nvoc_class_id_PerfBuffer */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_PerfBuffer; + +#define __staticCast_PerfBuffer(pThis) \ + ((pThis)->__nvoc_pbase_PerfBuffer) + +#ifdef __nvoc_kern_perfbuffer_h_disabled +#define __dynamicCast_PerfBuffer(pThis) ((PerfBuffer*)NULL) +#else //__nvoc_kern_perfbuffer_h_disabled +#define __dynamicCast_PerfBuffer(pThis) \ + ((PerfBuffer*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(PerfBuffer))) +#endif //__nvoc_kern_perfbuffer_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_PerfBuffer(PerfBuffer**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_PerfBuffer(PerfBuffer**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_PerfBuffer(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_PerfBuffer((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define perfbufferShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) perfbufferShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define perfbufferControl(pGpuResource, pCallContext, pParams) perfbufferControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define perfbufferUnmap(pGpuResource, pCallContext, pCpuMapping) perfbufferUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define perfbufferGetMemInterMapParams(pRmResource, pParams) perfbufferGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define perfbufferGetMemoryMappingDescriptor(pRmResource, ppMemDesc) perfbufferGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define perfbufferGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) perfbufferGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define perfbufferGetInternalObjectHandle(pGpuResource) perfbufferGetInternalObjectHandle_DISPATCH(pGpuResource) +#define perfbufferControlFilter(pResource, pCallContext, pParams) perfbufferControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define perfbufferAddAdditionalDependants(pClient, pResource, pReference) perfbufferAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define perfbufferGetRefCount(pResource) perfbufferGetRefCount_DISPATCH(pResource) +#define perfbufferCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) perfbufferCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define perfbufferMapTo(pResource, pParams) perfbufferMapTo_DISPATCH(pResource, pParams) +#define perfbufferControl_Prologue(pResource, pCallContext, pParams) perfbufferControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define perfbufferGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) perfbufferGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define perfbufferCanCopy(pResource) perfbufferCanCopy_DISPATCH(pResource) +#define perfbufferInternalControlForward(pGpuResource, command, pParams, size) perfbufferInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define perfbufferPreDestruct(pResource) perfbufferPreDestruct_DISPATCH(pResource) +#define perfbufferUnmapFrom(pResource, pParams) perfbufferUnmapFrom_DISPATCH(pResource, pParams) +#define perfbufferControl_Epilogue(pResource, pCallContext, pParams) perfbufferControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define perfbufferControlLookup(pResource, pParams, ppEntry) perfbufferControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define perfbufferMap(pGpuResource, pCallContext, pParams, pCpuMapping) perfbufferMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define perfbufferAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) perfbufferAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS perfbufferConstructHal_KERNEL(struct PerfBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_kern_perfbuffer_h_disabled +static inline NV_STATUS perfbufferConstructHal(struct PerfBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("PerfBuffer was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_perfbuffer_h_disabled +#define perfbufferConstructHal(pResource, pCallContext, pParams) perfbufferConstructHal_KERNEL(pResource, pCallContext, pParams) +#endif //__nvoc_kern_perfbuffer_h_disabled + +#define perfbufferConstructHal_HAL(pResource, pCallContext, pParams) perfbufferConstructHal(pResource, pCallContext, pParams) + +static inline void perfbufferDestruct_b3696a(struct PerfBuffer *pResource) { + return; +} + +#define __nvoc_perfbufferDestruct(pResource) perfbufferDestruct_b3696a(pResource) +static inline NvBool perfbufferShareCallback_DISPATCH(struct PerfBuffer *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__perfbufferShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS perfbufferControl_DISPATCH(struct PerfBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__perfbufferControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS perfbufferUnmap_DISPATCH(struct PerfBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__perfbufferUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS perfbufferGetMemInterMapParams_DISPATCH(struct PerfBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__perfbufferGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS perfbufferGetMemoryMappingDescriptor_DISPATCH(struct PerfBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__perfbufferGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS perfbufferGetMapAddrSpace_DISPATCH(struct PerfBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__perfbufferGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle perfbufferGetInternalObjectHandle_DISPATCH(struct PerfBuffer *pGpuResource) { + return pGpuResource->__perfbufferGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS perfbufferControlFilter_DISPATCH(struct PerfBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__perfbufferControlFilter__(pResource, pCallContext, pParams); +} + +static inline void perfbufferAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct PerfBuffer *pResource, RsResourceRef *pReference) { + pResource->__perfbufferAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 perfbufferGetRefCount_DISPATCH(struct PerfBuffer *pResource) { + return pResource->__perfbufferGetRefCount__(pResource); +} + +static inline NV_STATUS perfbufferCheckMemInterUnmap_DISPATCH(struct PerfBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__perfbufferCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS perfbufferMapTo_DISPATCH(struct PerfBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__perfbufferMapTo__(pResource, pParams); +} + +static inline NV_STATUS perfbufferControl_Prologue_DISPATCH(struct PerfBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__perfbufferControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS perfbufferGetRegBaseOffsetAndSize_DISPATCH(struct PerfBuffer *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__perfbufferGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool perfbufferCanCopy_DISPATCH(struct PerfBuffer *pResource) { + return pResource->__perfbufferCanCopy__(pResource); +} + +static inline NV_STATUS perfbufferInternalControlForward_DISPATCH(struct PerfBuffer *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__perfbufferInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void perfbufferPreDestruct_DISPATCH(struct PerfBuffer *pResource) { + pResource->__perfbufferPreDestruct__(pResource); +} + +static inline NV_STATUS perfbufferUnmapFrom_DISPATCH(struct PerfBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__perfbufferUnmapFrom__(pResource, pParams); +} + +static inline void perfbufferControl_Epilogue_DISPATCH(struct PerfBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__perfbufferControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS perfbufferControlLookup_DISPATCH(struct PerfBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__perfbufferControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS perfbufferMap_DISPATCH(struct PerfBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__perfbufferMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool perfbufferAccessCallback_DISPATCH(struct PerfBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__perfbufferAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS __nvoc_perfbufferConstruct(struct PerfBuffer *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams) { + return perfbufferConstructHal(arg_pResource, arg_pCallContext, arg_pParams); +} + +NV_STATUS perfbufferPrivilegeCheck_IMPL(struct PerfBuffer *pPerfBuffer); +#ifdef __nvoc_kern_perfbuffer_h_disabled +static inline NV_STATUS perfbufferPrivilegeCheck(struct PerfBuffer *pPerfBuffer) { + NV_ASSERT_FAILED_PRECOMP("PerfBuffer was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_perfbuffer_h_disabled +#define perfbufferPrivilegeCheck(pPerfBuffer) perfbufferPrivilegeCheck_IMPL(pPerfBuffer) +#endif //__nvoc_kern_perfbuffer_h_disabled + +#undef PRIVATE_FIELD + + +#endif // KERN_PERFBUFFER_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERN_PERFBUFFER_NVOC_H_ diff --git a/src/nvidia/generated/g_kern_pmu_nvoc.c b/src/nvidia/generated/g_kern_pmu_nvoc.c new file mode 100644 index 000000000..4820c1961 --- /dev/null +++ b/src/nvidia/generated/g_kern_pmu_nvoc.c @@ -0,0 +1,281 @@ +#define NVOC_KERN_PMU_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kern_pmu_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xab9d7d = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPmu; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelPmu(KernelPmu*); +void __nvoc_init_funcTable_KernelPmu(KernelPmu*); +NV_STATUS __nvoc_ctor_KernelPmu(KernelPmu*); +void __nvoc_init_dataField_KernelPmu(KernelPmu*); +void __nvoc_dtor_KernelPmu(KernelPmu*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelPmu; + +static const struct NVOC_RTTI __nvoc_rtti_KernelPmu_KernelPmu = { + /*pClassDef=*/ &__nvoc_class_def_KernelPmu, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelPmu, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelPmu_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelPmu, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelPmu_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelPmu, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelPmu = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelPmu_KernelPmu, + &__nvoc_rtti_KernelPmu_OBJENGSTATE, + &__nvoc_rtti_KernelPmu_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPmu = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelPmu), + /*classId=*/ classId(KernelPmu), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelPmu", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelPmu, + /*pCastInfo=*/ &__nvoc_castinfo_KernelPmu, + /*pExportInfo=*/ &__nvoc_export_info_KernelPmu +}; + +static NV_STATUS __nvoc_thunk_KernelPmu_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelPmu, ENGDESCRIPTOR engDesc) { + return kpmuConstructEngine(pGpu, (struct KernelPmu *)(((unsigned char *)pKernelPmu) - __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), engDesc); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuReconcileTunableState(POBJGPU pGpu, struct KernelPmu *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStateLoad(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStateUnload(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStateInitLocked(POBJGPU pGpu, struct KernelPmu *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePreLoad(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePostUnload(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_kpmuStateDestroy(POBJGPU pGpu, struct KernelPmu *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePreUnload(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStateInitUnlocked(POBJGPU pGpu, struct KernelPmu *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kpmuInitMissing(POBJGPU pGpu, struct KernelPmu *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePreInitLocked(POBJGPU pGpu, struct KernelPmu *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePreInitUnlocked(POBJGPU pGpu, struct KernelPmu *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuGetTunableState(POBJGPU pGpu, struct KernelPmu *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuCompareTunableState(POBJGPU pGpu, struct KernelPmu *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kpmuFreeTunableState(POBJGPU pGpu, struct KernelPmu *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuStatePostLoad(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuAllocTunableState(POBJGPU pGpu, struct KernelPmu *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kpmuSetTunableState(POBJGPU pGpu, struct KernelPmu *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kpmuIsPresent(POBJGPU pGpu, struct KernelPmu *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelPmu_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelPmu = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelPmu(KernelPmu *pThis) { + __nvoc_kpmuDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelPmu(KernelPmu *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelPmu(KernelPmu *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelPmu_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelPmu(pThis); + goto __nvoc_ctor_KernelPmu_exit; // Success + +__nvoc_ctor_KernelPmu_fail_OBJENGSTATE: +__nvoc_ctor_KernelPmu_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelPmu_1(KernelPmu *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__kpmuConstructEngine__ = &kpmuConstructEngine_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelPmu_engstateConstructEngine; + + pThis->__kpmuReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kpmuReconcileTunableState; + + pThis->__kpmuStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kpmuStateLoad; + + pThis->__kpmuStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kpmuStateUnload; + + pThis->__kpmuStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kpmuStateInitLocked; + + pThis->__kpmuStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePreLoad; + + pThis->__kpmuStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePostUnload; + + pThis->__kpmuStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kpmuStateDestroy; + + pThis->__kpmuStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePreUnload; + + pThis->__kpmuStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kpmuStateInitUnlocked; + + pThis->__kpmuInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kpmuInitMissing; + + pThis->__kpmuStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePreInitLocked; + + pThis->__kpmuStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePreInitUnlocked; + + pThis->__kpmuGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kpmuGetTunableState; + + pThis->__kpmuCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kpmuCompareTunableState; + + pThis->__kpmuFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kpmuFreeTunableState; + + pThis->__kpmuStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kpmuStatePostLoad; + + pThis->__kpmuAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kpmuAllocTunableState; + + pThis->__kpmuSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kpmuSetTunableState; + + pThis->__kpmuIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kpmuIsPresent; +} + +void __nvoc_init_funcTable_KernelPmu(KernelPmu *pThis) { + __nvoc_init_funcTable_KernelPmu_1(pThis); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelPmu(KernelPmu *pThis) { + pThis->__nvoc_pbase_KernelPmu = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelPmu(pThis); +} + +NV_STATUS __nvoc_objCreate_KernelPmu(KernelPmu **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelPmu *pThis; + + pThis = portMemAllocNonPaged(sizeof(KernelPmu)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelPmu)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelPmu); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_KernelPmu(pThis); + status = __nvoc_ctor_KernelPmu(pThis); + if (status != NV_OK) goto __nvoc_objCreate_KernelPmu_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelPmu_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelPmu(KernelPmu **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelPmu(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kern_pmu_nvoc.h b/src/nvidia/generated/g_kern_pmu_nvoc.h new file mode 100644 index 000000000..fb802d79d --- /dev/null +++ b/src/nvidia/generated/g_kern_pmu_nvoc.h @@ -0,0 +1,263 @@ +#ifndef _G_KERN_PMU_NVOC_H_ +#define _G_KERN_PMU_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kern_pmu_nvoc.h" + +#ifndef KERNEL_PMU_H +#define KERNEL_PMU_H + +/****************************************************************************** +* +* Kernel Pmu module header +* This file contains functions managing PMU core on CPU RM +* +******************************************************************************/ + +#include "gpu/gpu.h" +#include "gpu/eng_state.h" +#include "logdecode.h" + +#define PMU_LOG_BUFFER_MAX_SIZE 0x1000 + +/*! + * KernelPmu is a logical abstraction of the GPU Pmu Engine. The + * Public API of the Pmu Engine is exposed through this object, and any + * interfaces which do not manage the underlying Pmu hardware can be + * managed by this object. + */ +#ifdef NVOC_KERN_PMU_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelPmu { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelPmu *__nvoc_pbase_KernelPmu; + NV_STATUS (*__kpmuConstructEngine__)(struct OBJGPU *, struct KernelPmu *, ENGDESCRIPTOR); + NV_STATUS (*__kpmuReconcileTunableState__)(POBJGPU, struct KernelPmu *, void *); + NV_STATUS (*__kpmuStateLoad__)(POBJGPU, struct KernelPmu *, NvU32); + NV_STATUS (*__kpmuStateUnload__)(POBJGPU, struct KernelPmu *, NvU32); + NV_STATUS (*__kpmuStateInitLocked__)(POBJGPU, struct KernelPmu *); + NV_STATUS (*__kpmuStatePreLoad__)(POBJGPU, struct KernelPmu *, NvU32); + NV_STATUS (*__kpmuStatePostUnload__)(POBJGPU, struct KernelPmu *, NvU32); + void (*__kpmuStateDestroy__)(POBJGPU, struct KernelPmu *); + NV_STATUS (*__kpmuStatePreUnload__)(POBJGPU, struct KernelPmu *, NvU32); + NV_STATUS (*__kpmuStateInitUnlocked__)(POBJGPU, struct KernelPmu *); + void (*__kpmuInitMissing__)(POBJGPU, struct KernelPmu *); + NV_STATUS (*__kpmuStatePreInitLocked__)(POBJGPU, struct KernelPmu *); + NV_STATUS (*__kpmuStatePreInitUnlocked__)(POBJGPU, struct KernelPmu *); + NV_STATUS (*__kpmuGetTunableState__)(POBJGPU, struct KernelPmu *, void *); + NV_STATUS (*__kpmuCompareTunableState__)(POBJGPU, struct KernelPmu *, void *, void *); + void (*__kpmuFreeTunableState__)(POBJGPU, struct KernelPmu *, void *); + NV_STATUS (*__kpmuStatePostLoad__)(POBJGPU, struct KernelPmu *, NvU32); + NV_STATUS (*__kpmuAllocTunableState__)(POBJGPU, struct KernelPmu *, void **); + NV_STATUS (*__kpmuSetTunableState__)(POBJGPU, struct KernelPmu *, void *); + NvBool (*__kpmuIsPresent__)(POBJGPU, struct KernelPmu *); + LIBOS_LOG_DECODE logDecode; + NvU32 printBufSize; + NvU8 *pPrintBuf; + void *pLogElf; +}; + +#ifndef __NVOC_CLASS_KernelPmu_TYPEDEF__ +#define __NVOC_CLASS_KernelPmu_TYPEDEF__ +typedef struct KernelPmu KernelPmu; +#endif /* __NVOC_CLASS_KernelPmu_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelPmu +#define __nvoc_class_id_KernelPmu 0xab9d7d +#endif /* __nvoc_class_id_KernelPmu */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelPmu; + +#define __staticCast_KernelPmu(pThis) \ + ((pThis)->__nvoc_pbase_KernelPmu) + +#ifdef __nvoc_kern_pmu_h_disabled +#define __dynamicCast_KernelPmu(pThis) ((KernelPmu*)NULL) +#else //__nvoc_kern_pmu_h_disabled +#define __dynamicCast_KernelPmu(pThis) \ + ((KernelPmu*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelPmu))) +#endif //__nvoc_kern_pmu_h_disabled + +#define PDB_PROP_KPMU_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KPMU_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelPmu(KernelPmu**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelPmu(KernelPmu**, Dynamic*, NvU32); +#define __objCreate_KernelPmu(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelPmu((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kpmuConstructEngine(pGpu, pKernelPmu, engDesc) kpmuConstructEngine_DISPATCH(pGpu, pKernelPmu, engDesc) +#define kpmuReconcileTunableState(pGpu, pEngstate, pTunableState) kpmuReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kpmuStateLoad(pGpu, pEngstate, arg0) kpmuStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kpmuStateUnload(pGpu, pEngstate, arg0) kpmuStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kpmuStateInitLocked(pGpu, pEngstate) kpmuStateInitLocked_DISPATCH(pGpu, pEngstate) +#define kpmuStatePreLoad(pGpu, pEngstate, arg0) kpmuStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kpmuStatePostUnload(pGpu, pEngstate, arg0) kpmuStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kpmuStateDestroy(pGpu, pEngstate) kpmuStateDestroy_DISPATCH(pGpu, pEngstate) +#define kpmuStatePreUnload(pGpu, pEngstate, arg0) kpmuStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kpmuStateInitUnlocked(pGpu, pEngstate) kpmuStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kpmuInitMissing(pGpu, pEngstate) kpmuInitMissing_DISPATCH(pGpu, pEngstate) +#define kpmuStatePreInitLocked(pGpu, pEngstate) kpmuStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kpmuStatePreInitUnlocked(pGpu, pEngstate) kpmuStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kpmuGetTunableState(pGpu, pEngstate, pTunableState) kpmuGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kpmuCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kpmuCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kpmuFreeTunableState(pGpu, pEngstate, pTunableState) kpmuFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kpmuStatePostLoad(pGpu, pEngstate, arg0) kpmuStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kpmuAllocTunableState(pGpu, pEngstate, ppTunableState) kpmuAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kpmuSetTunableState(pGpu, pEngstate, pTunableState) kpmuSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kpmuIsPresent(pGpu, pEngstate) kpmuIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kpmuConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu, ENGDESCRIPTOR engDesc); + +static inline NV_STATUS kpmuConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu, ENGDESCRIPTOR engDesc) { + return pKernelPmu->__kpmuConstructEngine__(pGpu, pKernelPmu, engDesc); +} + +static inline NV_STATUS kpmuReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, void *pTunableState) { + return pEngstate->__kpmuReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kpmuStateLoad_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return pEngstate->__kpmuStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kpmuStateUnload_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return pEngstate->__kpmuStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kpmuStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) { + return pEngstate->__kpmuStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kpmuStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return pEngstate->__kpmuStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kpmuStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return pEngstate->__kpmuStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void kpmuStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) { + pEngstate->__kpmuStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS kpmuStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return pEngstate->__kpmuStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kpmuStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) { + return pEngstate->__kpmuStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kpmuInitMissing_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) { + pEngstate->__kpmuInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kpmuStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) { + return pEngstate->__kpmuStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kpmuStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) { + return pEngstate->__kpmuStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kpmuGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, void *pTunableState) { + return pEngstate->__kpmuGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kpmuCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kpmuCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kpmuFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, void *pTunableState) { + pEngstate->__kpmuFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kpmuStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, NvU32 arg0) { + return pEngstate->__kpmuStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kpmuAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, void **ppTunableState) { + return pEngstate->__kpmuAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kpmuSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate, void *pTunableState) { + return pEngstate->__kpmuSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kpmuIsPresent_DISPATCH(POBJGPU pGpu, struct KernelPmu *pEngstate) { + return pEngstate->__kpmuIsPresent__(pGpu, pEngstate); +} + +void kpmuDestruct_IMPL(struct KernelPmu *pKernelPmu); +#define __nvoc_kpmuDestruct(pKernelPmu) kpmuDestruct_IMPL(pKernelPmu) +NV_STATUS kpmuInitLibosLoggingStructures_IMPL(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu); +#ifdef __nvoc_kern_pmu_h_disabled +static inline NV_STATUS kpmuInitLibosLoggingStructures(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu) { + NV_ASSERT_FAILED_PRECOMP("KernelPmu was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kern_pmu_h_disabled +#define kpmuInitLibosLoggingStructures(pGpu, pKernelPmu) kpmuInitLibosLoggingStructures_IMPL(pGpu, pKernelPmu) +#endif //__nvoc_kern_pmu_h_disabled + +void kpmuFreeLibosLoggingStructures_IMPL(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu); +#ifdef __nvoc_kern_pmu_h_disabled +static inline void kpmuFreeLibosLoggingStructures(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu) { + NV_ASSERT_FAILED_PRECOMP("KernelPmu was disabled!"); +} +#else //__nvoc_kern_pmu_h_disabled +#define kpmuFreeLibosLoggingStructures(pGpu, pKernelPmu) kpmuFreeLibosLoggingStructures_IMPL(pGpu, pKernelPmu) +#endif //__nvoc_kern_pmu_h_disabled + +void kpmuLogBuf_IMPL(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu, NvU8 *pBuf, NvU32 bufSize); +#ifdef __nvoc_kern_pmu_h_disabled +static inline void kpmuLogBuf(struct OBJGPU *pGpu, struct KernelPmu *pKernelPmu, NvU8 *pBuf, NvU32 bufSize) { + NV_ASSERT_FAILED_PRECOMP("KernelPmu was disabled!"); +} +#else //__nvoc_kern_pmu_h_disabled +#define kpmuLogBuf(pGpu, pKernelPmu, pBuf, bufSize) kpmuLogBuf_IMPL(pGpu, pKernelPmu, pBuf, bufSize) +#endif //__nvoc_kern_pmu_h_disabled + +#undef PRIVATE_FIELD + + +#endif // KERNEL_PMU_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERN_PMU_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_bif_nvoc.c b/src/nvidia/generated/g_kernel_bif_nvoc.c new file mode 100644 index 000000000..28927d5b2 --- /dev/null +++ b/src/nvidia/generated/g_kernel_bif_nvoc.c @@ -0,0 +1,410 @@ +#define NVOC_KERNEL_BIF_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_bif_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xdbe523 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelBif; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelBif(KernelBif*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelBif(KernelBif*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelBif(KernelBif*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelBif(KernelBif*, RmHalspecOwner* ); +void __nvoc_dtor_KernelBif(KernelBif*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelBif; + +static const struct NVOC_RTTI __nvoc_rtti_KernelBif_KernelBif = { + /*pClassDef=*/ &__nvoc_class_def_KernelBif, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelBif, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelBif_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelBif, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelBif_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelBif, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelBif = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelBif_KernelBif, + &__nvoc_rtti_KernelBif_OBJENGSTATE, + &__nvoc_rtti_KernelBif_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelBif = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelBif), + /*classId=*/ classId(KernelBif), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelBif", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelBif, + /*pCastInfo=*/ &__nvoc_castinfo_KernelBif, + /*pExportInfo=*/ &__nvoc_export_info_KernelBif +}; + +static NV_STATUS __nvoc_thunk_KernelBif_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelBif, ENGDESCRIPTOR arg0) { + return kbifConstructEngine(pGpu, (struct KernelBif *)(((unsigned char *)pKernelBif) - __nvoc_rtti_KernelBif_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_KernelBif_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelBif) { + return kbifStateInitLocked(pGpu, (struct KernelBif *)(((unsigned char *)pKernelBif) - __nvoc_rtti_KernelBif_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelBif_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelBif, NvU32 arg0) { + return kbifStateLoad(pGpu, (struct KernelBif *)(((unsigned char *)pKernelBif) - __nvoc_rtti_KernelBif_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_KernelBif_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelBif, NvU32 arg0) { + return kbifStateUnload(pGpu, (struct KernelBif *)(((unsigned char *)pKernelBif) - __nvoc_rtti_KernelBif_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifReconcileTunableState(POBJGPU pGpu, struct KernelBif *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifStatePreLoad(POBJGPU pGpu, struct KernelBif *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifStatePostUnload(POBJGPU pGpu, struct KernelBif *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_kbifStateDestroy(POBJGPU pGpu, struct KernelBif *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifStatePreUnload(POBJGPU pGpu, struct KernelBif *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifStateInitUnlocked(POBJGPU pGpu, struct KernelBif *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kbifInitMissing(POBJGPU pGpu, struct KernelBif *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifStatePreInitLocked(POBJGPU pGpu, struct KernelBif *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifStatePreInitUnlocked(POBJGPU pGpu, struct KernelBif *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifGetTunableState(POBJGPU pGpu, struct KernelBif *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifCompareTunableState(POBJGPU pGpu, struct KernelBif *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kbifFreeTunableState(POBJGPU pGpu, struct KernelBif *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifStatePostLoad(POBJGPU pGpu, struct KernelBif *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifAllocTunableState(POBJGPU pGpu, struct KernelBif *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kbifSetTunableState(POBJGPU pGpu, struct KernelBif *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kbifIsPresent(POBJGPU pGpu, struct KernelBif *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelBif_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelBif = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelBif(KernelBif *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelBif(KernelBif *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_KBIF_CHECK_IF_GPU_EXISTS_DEF + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KBIF_CHECK_IF_GPU_EXISTS_DEF, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KBIF_CHECK_IF_GPU_EXISTS_DEF, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KBIF_IS_FMODEL_MSI_BROKEN + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KBIF_IS_FMODEL_MSI_BROKEN, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KBIF_IS_FMODEL_MSI_BROKEN, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KBIF_USE_CONFIG_SPACE_TO_REARM_MSI + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KBIF_USE_CONFIG_SPACE_TO_REARM_MSI, ((NvBool)(0 == 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KBIF_P2P_READS_DISABLED + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KBIF_P2P_READS_DISABLED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KBIF_P2P_WRITES_DISABLED + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KBIF_P2P_WRITES_DISABLED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KBIF_UPSTREAM_LTR_SUPPORT_WAR_BUG_200634944 + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KBIF_UPSTREAM_LTR_SUPPORT_WAR_BUG_200634944, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KBIF_UPSTREAM_LTR_SUPPORT_WAR_BUG_200634944, ((NvBool)(0 != 0))); + } + pThis->setProperty(pThis, PDB_PROP_KBIF_SUPPORT_NONCOHERENT, ((NvBool)(0 == 0))); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelBif(KernelBif *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelBif_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelBif(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelBif_exit; // Success + +__nvoc_ctor_KernelBif_fail_OBJENGSTATE: +__nvoc_ctor_KernelBif_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelBif_1(KernelBif *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__kbifConstructEngine__ = &kbifConstructEngine_IMPL; + + pThis->__kbifStateInitLocked__ = &kbifStateInitLocked_IMPL; + + // Hal function -- kbifStateLoad + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__kbifStateLoad__ = &kbifStateLoad_IMPL; + } + + // Hal function -- kbifStateUnload + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__kbifStateUnload__ = &kbifStateUnload_IMPL; + } + + // Hal function -- kbifIsPciIoAccessEnabled + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbifIsPciIoAccessEnabled__ = &kbifIsPciIoAccessEnabled_GM107; + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kbifIsPciIoAccessEnabled__ = &kbifIsPciIoAccessEnabled_491d52; + } + + // Hal function -- kbifApplyWARBug3208922 + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kbifApplyWARBug3208922__ = &kbifApplyWARBug3208922_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kbifApplyWARBug3208922__ = &kbifApplyWARBug3208922_b3696a; + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelBif_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelBif_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelBif_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelBif_engstateStateUnload; + + pThis->__kbifReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbifReconcileTunableState; + + pThis->__kbifStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kbifStatePreLoad; + + pThis->__kbifStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kbifStatePostUnload; + + pThis->__kbifStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kbifStateDestroy; + + pThis->__kbifStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kbifStatePreUnload; + + pThis->__kbifStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kbifStateInitUnlocked; + + pThis->__kbifInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kbifInitMissing; + + pThis->__kbifStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kbifStatePreInitLocked; + + pThis->__kbifStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kbifStatePreInitUnlocked; + + pThis->__kbifGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbifGetTunableState; + + pThis->__kbifCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbifCompareTunableState; + + pThis->__kbifFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbifFreeTunableState; + + pThis->__kbifStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kbifStatePostLoad; + + pThis->__kbifAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbifAllocTunableState; + + pThis->__kbifSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kbifSetTunableState; + + pThis->__kbifIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kbifIsPresent; +} + +void __nvoc_init_funcTable_KernelBif(KernelBif *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelBif_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelBif(KernelBif *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelBif = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelBif(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelBif(KernelBif **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelBif *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelBif)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelBif)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelBif); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelBif(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelBif(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelBif_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelBif_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelBif(KernelBif **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelBif(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_bif_nvoc.h b/src/nvidia/generated/g_kernel_bif_nvoc.h new file mode 100644 index 000000000..f51345929 --- /dev/null +++ b/src/nvidia/generated/g_kernel_bif_nvoc.h @@ -0,0 +1,686 @@ +#ifndef _G_KERNEL_BIF_NVOC_H_ +#define _G_KERNEL_BIF_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* ------------------------ Includes ---------------------------------------- */ +#include "g_kernel_bif_nvoc.h" + +#ifndef KERNEL_BIF_H +#define KERNEL_BIF_H + +#include "core/core.h" +#include "gpu/eng_state.h" +#include "gpu/gpu_halspec.h" +#include "gpu/intr/intr_service.h" +#include "gpu/intrable/intrable.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "rmpbicmdif.h" +#include "nvoc/utility.h" +#include "ctrl/ctrl2080/ctrl2080bus.h" + + +/* ------------------------ Types definitions ------------------------------ */ + +// PCIe config space size +#define PCIE_CONFIG_SPACE_SIZE 0x1000 + +// The default value of registry key ForceP2P override, ~0 means no registry key. +#define BIF_P2P_NOT_OVERRIDEN ((NvU32)~0) + +// DMA capabilities +#define BIF_DMA_CAPS_SNOOP 15:0 +#define BIF_DMA_CAPS_SNOOP_CTXDMA 0x1 +#define BIF_DMA_CAPS_NOSNOOP 31:16 +#define BIF_DMA_CAPS_NOSNOOP_CTXDMA 0x1 + +#define KBIF_CLEAR_XVE_AER_ALL_MASK (0xFFFFFFFF) + +#define kbifIsSnoopDmaCapable(pGpu, pKernelBif) ((REF_VAL(BIF_DMA_CAPS_SNOOP, \ + kbifGetDmaCaps(pGpu, pKernelBif)))) + +// XVE bus options +typedef enum BUS_OPTIONS +{ + BUS_OPTIONS_DEV_CONTROL_STATUS = 0, + BUS_OPTIONS_LINK_CONTROL_STATUS, + BUS_OPTIONS_LINK_CAPABILITIES + +} BUS_OPTIONS; + +typedef struct HOST_VGPU_DEVICE HOST_VGPU_DEVICE; + +#ifdef NVOC_KERNEL_BIF_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelBif { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelBif *__nvoc_pbase_KernelBif; + NV_STATUS (*__kbifConstructEngine__)(struct OBJGPU *, struct KernelBif *, ENGDESCRIPTOR); + NV_STATUS (*__kbifStateInitLocked__)(struct OBJGPU *, struct KernelBif *); + NV_STATUS (*__kbifStateLoad__)(struct OBJGPU *, struct KernelBif *, NvU32); + NV_STATUS (*__kbifStateUnload__)(struct OBJGPU *, struct KernelBif *, NvU32); + NvBool (*__kbifIsPciIoAccessEnabled__)(struct OBJGPU *, struct KernelBif *); + void (*__kbifApplyWARBug3208922__)(struct OBJGPU *, struct KernelBif *); + NV_STATUS (*__kbifReconcileTunableState__)(POBJGPU, struct KernelBif *, void *); + NV_STATUS (*__kbifStatePreLoad__)(POBJGPU, struct KernelBif *, NvU32); + NV_STATUS (*__kbifStatePostUnload__)(POBJGPU, struct KernelBif *, NvU32); + void (*__kbifStateDestroy__)(POBJGPU, struct KernelBif *); + NV_STATUS (*__kbifStatePreUnload__)(POBJGPU, struct KernelBif *, NvU32); + NV_STATUS (*__kbifStateInitUnlocked__)(POBJGPU, struct KernelBif *); + void (*__kbifInitMissing__)(POBJGPU, struct KernelBif *); + NV_STATUS (*__kbifStatePreInitLocked__)(POBJGPU, struct KernelBif *); + NV_STATUS (*__kbifStatePreInitUnlocked__)(POBJGPU, struct KernelBif *); + NV_STATUS (*__kbifGetTunableState__)(POBJGPU, struct KernelBif *, void *); + NV_STATUS (*__kbifCompareTunableState__)(POBJGPU, struct KernelBif *, void *, void *); + void (*__kbifFreeTunableState__)(POBJGPU, struct KernelBif *, void *); + NV_STATUS (*__kbifStatePostLoad__)(POBJGPU, struct KernelBif *, NvU32); + NV_STATUS (*__kbifAllocTunableState__)(POBJGPU, struct KernelBif *, void **); + NV_STATUS (*__kbifSetTunableState__)(POBJGPU, struct KernelBif *, void *); + NvBool (*__kbifIsPresent__)(POBJGPU, struct KernelBif *); + NvBool PDB_PROP_KBIF_CHECK_IF_GPU_EXISTS_DEF; + NvBool PDB_PROP_KBIF_IS_MSI_ENABLED; + NvBool PDB_PROP_KBIF_IS_MSI_CACHED; + NvBool PDB_PROP_KBIF_IS_MSIX_ENABLED; + NvBool PDB_PROP_KBIF_IS_MSIX_CACHED; + NvBool PDB_PROP_KBIF_IS_FMODEL_MSI_BROKEN; + NvBool PDB_PROP_KBIF_USE_CONFIG_SPACE_TO_REARM_MSI; + NvBool PDB_PROP_KBIF_IS_C2C_LINK_UP; + NvBool PDB_PROP_KBIF_P2P_READS_DISABLED; + NvBool PDB_PROP_KBIF_P2P_WRITES_DISABLED; + NvBool PDB_PROP_KBIF_UPSTREAM_LTR_SUPPORT_WAR_BUG_200634944; + NvBool PDB_PROP_KBIF_SUPPORT_NONCOHERENT; + NvBool PDB_PROP_KBIF_PCIE_GEN4_CAPABLE; + NvU32 dmaCaps; + RmPhysAddr dmaWindowStartAddress; + NvU32 p2pOverride; + NvU32 forceP2PType; + NvBool peerMappingOverride; + NvBool EnteredRecoverySinceErrorsLastChecked; +}; + +#ifndef __NVOC_CLASS_KernelBif_TYPEDEF__ +#define __NVOC_CLASS_KernelBif_TYPEDEF__ +typedef struct KernelBif KernelBif; +#endif /* __NVOC_CLASS_KernelBif_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelBif +#define __nvoc_class_id_KernelBif 0xdbe523 +#endif /* __nvoc_class_id_KernelBif */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelBif; + +#define __staticCast_KernelBif(pThis) \ + ((pThis)->__nvoc_pbase_KernelBif) + +#ifdef __nvoc_kernel_bif_h_disabled +#define __dynamicCast_KernelBif(pThis) ((KernelBif*)NULL) +#else //__nvoc_kernel_bif_h_disabled +#define __dynamicCast_KernelBif(pThis) \ + ((KernelBif*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelBif))) +#endif //__nvoc_kernel_bif_h_disabled + +#define PDB_PROP_KBIF_CHECK_IF_GPU_EXISTS_DEF_BASE_CAST +#define PDB_PROP_KBIF_CHECK_IF_GPU_EXISTS_DEF_BASE_NAME PDB_PROP_KBIF_CHECK_IF_GPU_EXISTS_DEF +#define PDB_PROP_KBIF_IS_C2C_LINK_UP_BASE_CAST +#define PDB_PROP_KBIF_IS_C2C_LINK_UP_BASE_NAME PDB_PROP_KBIF_IS_C2C_LINK_UP +#define PDB_PROP_KBIF_IS_MSIX_ENABLED_BASE_CAST +#define PDB_PROP_KBIF_IS_MSIX_ENABLED_BASE_NAME PDB_PROP_KBIF_IS_MSIX_ENABLED +#define PDB_PROP_KBIF_P2P_WRITES_DISABLED_BASE_CAST +#define PDB_PROP_KBIF_P2P_WRITES_DISABLED_BASE_NAME PDB_PROP_KBIF_P2P_WRITES_DISABLED +#define PDB_PROP_KBIF_USE_CONFIG_SPACE_TO_REARM_MSI_BASE_CAST +#define PDB_PROP_KBIF_USE_CONFIG_SPACE_TO_REARM_MSI_BASE_NAME PDB_PROP_KBIF_USE_CONFIG_SPACE_TO_REARM_MSI +#define PDB_PROP_KBIF_IS_MSI_ENABLED_BASE_CAST +#define PDB_PROP_KBIF_IS_MSI_ENABLED_BASE_NAME PDB_PROP_KBIF_IS_MSI_ENABLED +#define PDB_PROP_KBIF_UPSTREAM_LTR_SUPPORT_WAR_BUG_200634944_BASE_CAST +#define PDB_PROP_KBIF_UPSTREAM_LTR_SUPPORT_WAR_BUG_200634944_BASE_NAME PDB_PROP_KBIF_UPSTREAM_LTR_SUPPORT_WAR_BUG_200634944 +#define PDB_PROP_KBIF_IS_MSIX_CACHED_BASE_CAST +#define PDB_PROP_KBIF_IS_MSIX_CACHED_BASE_NAME PDB_PROP_KBIF_IS_MSIX_CACHED +#define PDB_PROP_KBIF_PCIE_GEN4_CAPABLE_BASE_CAST +#define PDB_PROP_KBIF_PCIE_GEN4_CAPABLE_BASE_NAME PDB_PROP_KBIF_PCIE_GEN4_CAPABLE +#define PDB_PROP_KBIF_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KBIF_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_KBIF_P2P_READS_DISABLED_BASE_CAST +#define PDB_PROP_KBIF_P2P_READS_DISABLED_BASE_NAME PDB_PROP_KBIF_P2P_READS_DISABLED +#define PDB_PROP_KBIF_IS_FMODEL_MSI_BROKEN_BASE_CAST +#define PDB_PROP_KBIF_IS_FMODEL_MSI_BROKEN_BASE_NAME PDB_PROP_KBIF_IS_FMODEL_MSI_BROKEN +#define PDB_PROP_KBIF_IS_MSI_CACHED_BASE_CAST +#define PDB_PROP_KBIF_IS_MSI_CACHED_BASE_NAME PDB_PROP_KBIF_IS_MSI_CACHED +#define PDB_PROP_KBIF_SUPPORT_NONCOHERENT_BASE_CAST +#define PDB_PROP_KBIF_SUPPORT_NONCOHERENT_BASE_NAME PDB_PROP_KBIF_SUPPORT_NONCOHERENT + +NV_STATUS __nvoc_objCreateDynamic_KernelBif(KernelBif**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelBif(KernelBif**, Dynamic*, NvU32); +#define __objCreate_KernelBif(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelBif((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kbifConstructEngine(pGpu, pKernelBif, arg0) kbifConstructEngine_DISPATCH(pGpu, pKernelBif, arg0) +#define kbifStateInitLocked(pGpu, pKernelBif) kbifStateInitLocked_DISPATCH(pGpu, pKernelBif) +#define kbifStateLoad(pGpu, pKernelBif, arg0) kbifStateLoad_DISPATCH(pGpu, pKernelBif, arg0) +#define kbifStateLoad_HAL(pGpu, pKernelBif, arg0) kbifStateLoad_DISPATCH(pGpu, pKernelBif, arg0) +#define kbifStateUnload(pGpu, pKernelBif, arg0) kbifStateUnload_DISPATCH(pGpu, pKernelBif, arg0) +#define kbifStateUnload_HAL(pGpu, pKernelBif, arg0) kbifStateUnload_DISPATCH(pGpu, pKernelBif, arg0) +#define kbifIsPciIoAccessEnabled(pGpu, pKernelBif) kbifIsPciIoAccessEnabled_DISPATCH(pGpu, pKernelBif) +#define kbifIsPciIoAccessEnabled_HAL(pGpu, pKernelBif) kbifIsPciIoAccessEnabled_DISPATCH(pGpu, pKernelBif) +#define kbifApplyWARBug3208922(pGpu, pKernelBif) kbifApplyWARBug3208922_DISPATCH(pGpu, pKernelBif) +#define kbifApplyWARBug3208922_HAL(pGpu, pKernelBif) kbifApplyWARBug3208922_DISPATCH(pGpu, pKernelBif) +#define kbifReconcileTunableState(pGpu, pEngstate, pTunableState) kbifReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kbifStatePreLoad(pGpu, pEngstate, arg0) kbifStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kbifStatePostUnload(pGpu, pEngstate, arg0) kbifStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kbifStateDestroy(pGpu, pEngstate) kbifStateDestroy_DISPATCH(pGpu, pEngstate) +#define kbifStatePreUnload(pGpu, pEngstate, arg0) kbifStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kbifStateInitUnlocked(pGpu, pEngstate) kbifStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kbifInitMissing(pGpu, pEngstate) kbifInitMissing_DISPATCH(pGpu, pEngstate) +#define kbifStatePreInitLocked(pGpu, pEngstate) kbifStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kbifStatePreInitUnlocked(pGpu, pEngstate) kbifStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kbifGetTunableState(pGpu, pEngstate, pTunableState) kbifGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kbifCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kbifCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kbifFreeTunableState(pGpu, pEngstate, pTunableState) kbifFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kbifStatePostLoad(pGpu, pEngstate, arg0) kbifStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kbifAllocTunableState(pGpu, pEngstate, ppTunableState) kbifAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kbifSetTunableState(pGpu, pEngstate, pTunableState) kbifSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kbifIsPresent(pGpu, pEngstate) kbifIsPresent_DISPATCH(pGpu, pEngstate) +static inline NvU32 kbifGetBusIntfType_2f2c74(struct KernelBif *pKernelBif) { + return (3); +} + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NvU32 kbifGetBusIntfType(struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return 0; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifGetBusIntfType(pKernelBif) kbifGetBusIntfType_2f2c74(pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifGetBusIntfType_HAL(pKernelBif) kbifGetBusIntfType(pKernelBif) + +void kbifInitDmaCaps_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifInitDmaCaps(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifInitDmaCaps(pGpu, pKernelBif) kbifInitDmaCaps_IMPL(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifInitDmaCaps_HAL(pGpu, pKernelBif) kbifInitDmaCaps(pGpu, pKernelBif) + +void kbifClearConfigErrors_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvBool arg0, NvU32 arg1); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifClearConfigErrors(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvBool arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifClearConfigErrors(pGpu, pKernelBif, arg0, arg1) kbifClearConfigErrors_IMPL(pGpu, pKernelBif, arg0, arg1) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifClearConfigErrors_HAL(pGpu, pKernelBif, arg0, arg1) kbifClearConfigErrors(pGpu, pKernelBif, arg0, arg1) + +NV_STATUS kbifGetXveStatusBits_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 *pBits, NvU32 *pStatus); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NV_STATUS kbifGetXveStatusBits(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 *pBits, NvU32 *pStatus) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifGetXveStatusBits(pGpu, pKernelBif, pBits, pStatus) kbifGetXveStatusBits_GM107(pGpu, pKernelBif, pBits, pStatus) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifGetXveStatusBits_HAL(pGpu, pKernelBif, pBits, pStatus) kbifGetXveStatusBits(pGpu, pKernelBif, pBits, pStatus) + +NV_STATUS kbifClearXveStatus_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 *pStatus); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NV_STATUS kbifClearXveStatus(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 *pStatus) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifClearXveStatus(pGpu, pKernelBif, pStatus) kbifClearXveStatus_GM107(pGpu, pKernelBif, pStatus) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifClearXveStatus_HAL(pGpu, pKernelBif, pStatus) kbifClearXveStatus(pGpu, pKernelBif, pStatus) + +NV_STATUS kbifGetXveAerBits_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 *pBits); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NV_STATUS kbifGetXveAerBits(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 *pBits) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifGetXveAerBits(pGpu, pKernelBif, pBits) kbifGetXveAerBits_GM107(pGpu, pKernelBif, pBits) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifGetXveAerBits_HAL(pGpu, pKernelBif, pBits) kbifGetXveAerBits(pGpu, pKernelBif, pBits) + +NV_STATUS kbifClearXveAer_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 bits); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NV_STATUS kbifClearXveAer(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 bits) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifClearXveAer(pGpu, pKernelBif, bits) kbifClearXveAer_GM107(pGpu, pKernelBif, bits) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifClearXveAer_HAL(pGpu, pKernelBif, bits) kbifClearXveAer(pGpu, pKernelBif, bits) + +void kbifGetPcieConfigAccessTestRegisters_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 *pciStart, NvU32 *pcieStart); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifGetPcieConfigAccessTestRegisters(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 *pciStart, NvU32 *pcieStart) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifGetPcieConfigAccessTestRegisters(pGpu, pKernelBif, pciStart, pcieStart) kbifGetPcieConfigAccessTestRegisters_GM107(pGpu, pKernelBif, pciStart, pcieStart) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifGetPcieConfigAccessTestRegisters_HAL(pGpu, pKernelBif, pciStart, pcieStart) kbifGetPcieConfigAccessTestRegisters(pGpu, pKernelBif, pciStart, pcieStart) + +NV_STATUS kbifVerifyPcieConfigAccessTestRegisters_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 nvXveId, NvU32 nvXveVccapHdr); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NV_STATUS kbifVerifyPcieConfigAccessTestRegisters(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 nvXveId, NvU32 nvXveVccapHdr) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifVerifyPcieConfigAccessTestRegisters(pGpu, pKernelBif, nvXveId, nvXveVccapHdr) kbifVerifyPcieConfigAccessTestRegisters_GM107(pGpu, pKernelBif, nvXveId, nvXveVccapHdr) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifVerifyPcieConfigAccessTestRegisters_HAL(pGpu, pKernelBif, nvXveId, nvXveVccapHdr) kbifVerifyPcieConfigAccessTestRegisters(pGpu, pKernelBif, nvXveId, nvXveVccapHdr) + +void kbifRearmMSI_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifRearmMSI(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifRearmMSI(pGpu, pKernelBif) kbifRearmMSI_GM107(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifRearmMSI_HAL(pGpu, pKernelBif) kbifRearmMSI(pGpu, pKernelBif) + +NvBool kbifIsMSIEnabledInHW_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NvBool kbifIsMSIEnabledInHW(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifIsMSIEnabledInHW(pGpu, pKernelBif) kbifIsMSIEnabledInHW_GM107(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifIsMSIEnabledInHW_HAL(pGpu, pKernelBif) kbifIsMSIEnabledInHW(pGpu, pKernelBif) + +NvBool kbifIsMSIXEnabledInHW_TU102(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NvBool kbifIsMSIXEnabledInHW(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifIsMSIXEnabledInHW(pGpu, pKernelBif) kbifIsMSIXEnabledInHW_TU102(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifIsMSIXEnabledInHW_HAL(pGpu, pKernelBif) kbifIsMSIXEnabledInHW(pGpu, pKernelBif) + +NvBool kbifIs3dController_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NvBool kbifIs3dController(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifIs3dController(pGpu, pKernelBif) kbifIs3dController_GM107(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifIs3dController_HAL(pGpu, pKernelBif) kbifIs3dController(pGpu, pKernelBif) + +void kbifExecC73War_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifExecC73War(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifExecC73War(pGpu, pKernelBif) kbifExecC73War_GM107(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifExecC73War_HAL(pGpu, pKernelBif) kbifExecC73War(pGpu, pKernelBif) + +static inline void kbifEnableExtendedTagSupport_b3696a(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + return; +} + +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifEnableExtendedTagSupport(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifEnableExtendedTagSupport(pGpu, pKernelBif) kbifEnableExtendedTagSupport_b3696a(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifEnableExtendedTagSupport_HAL(pGpu, pKernelBif) kbifEnableExtendedTagSupport(pGpu, pKernelBif) + +void kbifPcieConfigEnableRelaxedOrdering_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifPcieConfigEnableRelaxedOrdering(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifPcieConfigEnableRelaxedOrdering(pGpu, pKernelBif) kbifPcieConfigEnableRelaxedOrdering_GM107(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifPcieConfigEnableRelaxedOrdering_HAL(pGpu, pKernelBif) kbifPcieConfigEnableRelaxedOrdering(pGpu, pKernelBif) + +void kbifPcieConfigDisableRelaxedOrdering_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifPcieConfigDisableRelaxedOrdering(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifPcieConfigDisableRelaxedOrdering(pGpu, pKernelBif) kbifPcieConfigDisableRelaxedOrdering_GM107(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifPcieConfigDisableRelaxedOrdering_HAL(pGpu, pKernelBif) kbifPcieConfigDisableRelaxedOrdering(pGpu, pKernelBif) + +NV_STATUS kbifEnableNoSnoop_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvBool bEnable); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NV_STATUS kbifEnableNoSnoop(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvBool bEnable) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifEnableNoSnoop(pGpu, pKernelBif, bEnable) kbifEnableNoSnoop_GM107(pGpu, pKernelBif, bEnable) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifEnableNoSnoop_HAL(pGpu, pKernelBif, bEnable) kbifEnableNoSnoop(pGpu, pKernelBif, bEnable) + +void kbifDisableP2PTransactions_TU102(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifDisableP2PTransactions(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifDisableP2PTransactions(pGpu, pKernelBif) kbifDisableP2PTransactions_TU102(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifDisableP2PTransactions_HAL(pGpu, pKernelBif) kbifDisableP2PTransactions(pGpu, pKernelBif) + +NV_STATUS kbifGetPciConfigSpacePriMirror_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 *pMirrorBase, NvU32 *pMirrorSize); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NV_STATUS kbifGetPciConfigSpacePriMirror(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 *pMirrorBase, NvU32 *pMirrorSize) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifGetPciConfigSpacePriMirror(pGpu, pKernelBif, pMirrorBase, pMirrorSize) kbifGetPciConfigSpacePriMirror_GM107(pGpu, pKernelBif, pMirrorBase, pMirrorSize) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifGetPciConfigSpacePriMirror_HAL(pGpu, pKernelBif, pMirrorBase, pMirrorSize) kbifGetPciConfigSpacePriMirror(pGpu, pKernelBif, pMirrorBase, pMirrorSize) + +NV_STATUS kbifGetBusOptionsAddr_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, BUS_OPTIONS options, NvU32 *addrReg); + +#ifdef __nvoc_kernel_bif_h_disabled +static inline NV_STATUS kbifGetBusOptionsAddr(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, BUS_OPTIONS options, NvU32 *addrReg) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifGetBusOptionsAddr(pGpu, pKernelBif, options, addrReg) kbifGetBusOptionsAddr_GM107(pGpu, pKernelBif, options, addrReg) +#endif //__nvoc_kernel_bif_h_disabled + +#define kbifGetBusOptionsAddr_HAL(pGpu, pKernelBif, options, addrReg) kbifGetBusOptionsAddr(pGpu, pKernelBif, options, addrReg) + +NV_STATUS kbifConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, ENGDESCRIPTOR arg0); + +static inline NV_STATUS kbifConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, ENGDESCRIPTOR arg0) { + return pKernelBif->__kbifConstructEngine__(pGpu, pKernelBif, arg0); +} + +NV_STATUS kbifStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +static inline NV_STATUS kbifStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + return pKernelBif->__kbifStateInitLocked__(pGpu, pKernelBif); +} + +static inline NV_STATUS kbifStateLoad_56cd7a(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 arg0) { + return NV_OK; +} + +NV_STATUS kbifStateLoad_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 arg0); + +static inline NV_STATUS kbifStateLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 arg0) { + return pKernelBif->__kbifStateLoad__(pGpu, pKernelBif, arg0); +} + +static inline NV_STATUS kbifStateUnload_56cd7a(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 arg0) { + return NV_OK; +} + +NV_STATUS kbifStateUnload_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 arg0); + +static inline NV_STATUS kbifStateUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NvU32 arg0) { + return pKernelBif->__kbifStateUnload__(pGpu, pKernelBif, arg0); +} + +NvBool kbifIsPciIoAccessEnabled_GM107(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +static inline NvBool kbifIsPciIoAccessEnabled_491d52(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kbifIsPciIoAccessEnabled_DISPATCH(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + return pKernelBif->__kbifIsPciIoAccessEnabled__(pGpu, pKernelBif); +} + +void kbifApplyWARBug3208922_GA100(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); + +static inline void kbifApplyWARBug3208922_b3696a(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + return; +} + +static inline void kbifApplyWARBug3208922_DISPATCH(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + pKernelBif->__kbifApplyWARBug3208922__(pGpu, pKernelBif); +} + +static inline NV_STATUS kbifReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, void *pTunableState) { + return pEngstate->__kbifReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kbifStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, NvU32 arg0) { + return pEngstate->__kbifStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kbifStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, NvU32 arg0) { + return pEngstate->__kbifStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void kbifStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate) { + pEngstate->__kbifStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS kbifStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, NvU32 arg0) { + return pEngstate->__kbifStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kbifStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate) { + return pEngstate->__kbifStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kbifInitMissing_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate) { + pEngstate->__kbifInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kbifStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate) { + return pEngstate->__kbifStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kbifStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate) { + return pEngstate->__kbifStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kbifGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, void *pTunableState) { + return pEngstate->__kbifGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kbifCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kbifCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kbifFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, void *pTunableState) { + pEngstate->__kbifFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kbifStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, NvU32 arg0) { + return pEngstate->__kbifStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kbifAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, void **ppTunableState) { + return pEngstate->__kbifAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kbifSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate, void *pTunableState) { + return pEngstate->__kbifSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kbifIsPresent_DISPATCH(POBJGPU pGpu, struct KernelBif *pEngstate) { + return pEngstate->__kbifIsPresent__(pGpu, pEngstate); +} + +NV_STATUS kbifStaticInfoInit_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); +#ifdef __nvoc_kernel_bif_h_disabled +static inline NV_STATUS kbifStaticInfoInit(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifStaticInfoInit(pGpu, pKernelBif) kbifStaticInfoInit_IMPL(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +void kbifInitPcieDeviceControlStatus_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifInitPcieDeviceControlStatus(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifInitPcieDeviceControlStatus(pGpu, pKernelBif) kbifInitPcieDeviceControlStatus_IMPL(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +void kbifCheckAndRearmMSI_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); +#ifdef __nvoc_kernel_bif_h_disabled +static inline void kbifCheckAndRearmMSI(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifCheckAndRearmMSI(pGpu, pKernelBif) kbifCheckAndRearmMSI_IMPL(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +NvBool kbifIsMSIEnabled_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); +#ifdef __nvoc_kernel_bif_h_disabled +static inline NvBool kbifIsMSIEnabled(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifIsMSIEnabled(pGpu, pKernelBif) kbifIsMSIEnabled_IMPL(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +NvBool kbifIsMSIXEnabled_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); +#ifdef __nvoc_kernel_bif_h_disabled +static inline NvBool kbifIsMSIXEnabled(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifIsMSIXEnabled(pGpu, pKernelBif) kbifIsMSIXEnabled_IMPL(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +NvBool kbifIsPciBusFamily_IMPL(struct KernelBif *pKernelBif); +#ifdef __nvoc_kernel_bif_h_disabled +static inline NvBool kbifIsPciBusFamily(struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifIsPciBusFamily(pKernelBif) kbifIsPciBusFamily_IMPL(pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +NV_STATUS kbifControlGetPCIEInfo_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NV2080_CTRL_BUS_INFO *pBusInfo); +#ifdef __nvoc_kernel_bif_h_disabled +static inline NV_STATUS kbifControlGetPCIEInfo(struct OBJGPU *pGpu, struct KernelBif *pKernelBif, NV2080_CTRL_BUS_INFO *pBusInfo) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifControlGetPCIEInfo(pGpu, pKernelBif, pBusInfo) kbifControlGetPCIEInfo_IMPL(pGpu, pKernelBif, pBusInfo) +#endif //__nvoc_kernel_bif_h_disabled + +NvU32 kbifGetDmaCaps_IMPL(struct OBJGPU *pGpu, struct KernelBif *pKernelBif); +#ifdef __nvoc_kernel_bif_h_disabled +static inline NvU32 kbifGetDmaCaps(struct OBJGPU *pGpu, struct KernelBif *pKernelBif) { + NV_ASSERT_FAILED_PRECOMP("KernelBif was disabled!"); + return 0; +} +#else //__nvoc_kernel_bif_h_disabled +#define kbifGetDmaCaps(pGpu, pKernelBif) kbifGetDmaCaps_IMPL(pGpu, pKernelBif) +#endif //__nvoc_kernel_bif_h_disabled + +#undef PRIVATE_FIELD + + +#endif // KERNEL_BIF_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_BIF_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_ce_context_nvoc.c b/src/nvidia/generated/g_kernel_ce_context_nvoc.c new file mode 100644 index 000000000..1e873d6ae --- /dev/null +++ b/src/nvidia/generated/g_kernel_ce_context_nvoc.c @@ -0,0 +1,405 @@ +#define NVOC_KERNEL_CE_CONTEXT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_ce_context_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x2d0ee9 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCeContext; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_KernelCeContext(KernelCeContext*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelCeContext(KernelCeContext*); +NV_STATUS __nvoc_ctor_KernelCeContext(KernelCeContext*, RmHalspecOwner* , CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_KernelCeContext(KernelCeContext*); +void __nvoc_dtor_KernelCeContext(KernelCeContext*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCeContext; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCeContext_KernelCeContext = { + /*pClassDef=*/ &__nvoc_class_def_KernelCeContext, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelCeContext, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCeContext_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCeContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCeContext_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCeContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCeContext_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCeContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCeContext_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCeContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCeContext_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCeContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCeContext_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCeContext, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCeContext_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCeContext, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCeContext_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCeContext, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelCeContext = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_KernelCeContext_KernelCeContext, + &__nvoc_rtti_KernelCeContext_ChannelDescendant, + &__nvoc_rtti_KernelCeContext_Notifier, + &__nvoc_rtti_KernelCeContext_INotifier, + &__nvoc_rtti_KernelCeContext_GpuResource, + &__nvoc_rtti_KernelCeContext_RmResource, + &__nvoc_rtti_KernelCeContext_RmResourceCommon, + &__nvoc_rtti_KernelCeContext_RsResource, + &__nvoc_rtti_KernelCeContext_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCeContext = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelCeContext), + /*classId=*/ classId(KernelCeContext), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelCeContext", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelCeContext, + /*pCastInfo=*/ &__nvoc_castinfo_KernelCeContext, + /*pExportInfo=*/ &__nvoc_export_info_KernelCeContext +}; + +static NV_STATUS __nvoc_thunk_ChannelDescendant_kcectxCheckMemInterUnmap(struct KernelCeContext *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_KernelCeContext_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_kcectxShareCallback(struct KernelCeContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCeContext_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_kcectxAccessCallback(struct KernelCeContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_kcectxMapTo(struct KernelCeContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kcectxGetMapAddrSpace(struct KernelCeContext *pGpuResource, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCeContext_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_kcectxSetNotificationShare(struct KernelCeContext *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelCeContext_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_kcectxGetRefCount(struct KernelCeContext *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_kcectxAddAdditionalDependants(struct RsClient *pClient, struct KernelCeContext *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_kcectxControl_Prologue(struct KernelCeContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kcectxGetRegBaseOffsetAndSize(struct KernelCeContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCeContext_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kcectxInternalControlForward(struct KernelCeContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCeContext_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_kcectxUnmapFrom(struct KernelCeContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_kcectxControl_Epilogue(struct KernelCeContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_kcectxControlLookup(struct KernelCeContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_kcectxGetSwMethods(struct KernelCeContext *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return chandesGetSwMethods((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_KernelCeContext_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NvHandle __nvoc_thunk_GpuResource_kcectxGetInternalObjectHandle(struct KernelCeContext *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCeContext_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kcectxControl(struct KernelCeContext *pGpuResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCeContext_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kcectxUnmap(struct KernelCeContext *pGpuResource, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCeContext_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_kcectxGetMemInterMapParams(struct KernelCeContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelCeContext_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_kcectxGetMemoryMappingDescriptor(struct KernelCeContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelCeContext_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_ChannelDescendant_kcectxIsSwMethodStalling(struct KernelCeContext *pChannelDescendant, NvU32 hHandle) { + return chandesIsSwMethodStalling((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_KernelCeContext_ChannelDescendant.offset), hHandle); +} + +static NV_STATUS __nvoc_thunk_RsResource_kcectxControlFilter(struct KernelCeContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_kcectxUnregisterEvent(struct KernelCeContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelCeContext_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_kcectxCanCopy(struct KernelCeContext *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_kcectxPreDestruct(struct KernelCeContext *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCeContext_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_kcectxGetNotificationListPtr(struct KernelCeContext *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelCeContext_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_kcectxGetNotificationShare(struct KernelCeContext *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelCeContext_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kcectxMap(struct KernelCeContext *pGpuResource, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCeContext_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_kcectxGetOrAllocNotifShare(struct KernelCeContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelCeContext_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCeContext = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_KernelCeContext(KernelCeContext *pThis) { + __nvoc_kcectxDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelCeContext(KernelCeContext *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, ENGDESCRIPTOR (*)(struct OBJGPU *, NvU32, void *)); +NV_STATUS __nvoc_ctor_KernelCeContext(KernelCeContext *pThis, RmHalspecOwner *pRmhalspecowner, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, kceGetEngineDescFromAllocParams); + if (status != NV_OK) goto __nvoc_ctor_KernelCeContext_fail_ChannelDescendant; + __nvoc_init_dataField_KernelCeContext(pThis); + + status = __nvoc_kcectxConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelCeContext_fail__init; + goto __nvoc_ctor_KernelCeContext_exit; // Success + +__nvoc_ctor_KernelCeContext_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_KernelCeContext_fail_ChannelDescendant: +__nvoc_ctor_KernelCeContext_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelCeContext_1(KernelCeContext *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__kcectxCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_kcectxCheckMemInterUnmap; + + pThis->__kcectxShareCallback__ = &__nvoc_thunk_GpuResource_kcectxShareCallback; + + pThis->__kcectxAccessCallback__ = &__nvoc_thunk_RmResource_kcectxAccessCallback; + + pThis->__kcectxMapTo__ = &__nvoc_thunk_RsResource_kcectxMapTo; + + pThis->__kcectxGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_kcectxGetMapAddrSpace; + + pThis->__kcectxSetNotificationShare__ = &__nvoc_thunk_Notifier_kcectxSetNotificationShare; + + pThis->__kcectxGetRefCount__ = &__nvoc_thunk_RsResource_kcectxGetRefCount; + + pThis->__kcectxAddAdditionalDependants__ = &__nvoc_thunk_RsResource_kcectxAddAdditionalDependants; + + pThis->__kcectxControl_Prologue__ = &__nvoc_thunk_RmResource_kcectxControl_Prologue; + + pThis->__kcectxGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_kcectxGetRegBaseOffsetAndSize; + + pThis->__kcectxInternalControlForward__ = &__nvoc_thunk_GpuResource_kcectxInternalControlForward; + + pThis->__kcectxUnmapFrom__ = &__nvoc_thunk_RsResource_kcectxUnmapFrom; + + pThis->__kcectxControl_Epilogue__ = &__nvoc_thunk_RmResource_kcectxControl_Epilogue; + + pThis->__kcectxControlLookup__ = &__nvoc_thunk_RsResource_kcectxControlLookup; + + pThis->__kcectxGetSwMethods__ = &__nvoc_thunk_ChannelDescendant_kcectxGetSwMethods; + + pThis->__kcectxGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_kcectxGetInternalObjectHandle; + + pThis->__kcectxControl__ = &__nvoc_thunk_GpuResource_kcectxControl; + + pThis->__kcectxUnmap__ = &__nvoc_thunk_GpuResource_kcectxUnmap; + + pThis->__kcectxGetMemInterMapParams__ = &__nvoc_thunk_RmResource_kcectxGetMemInterMapParams; + + pThis->__kcectxGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_kcectxGetMemoryMappingDescriptor; + + pThis->__kcectxIsSwMethodStalling__ = &__nvoc_thunk_ChannelDescendant_kcectxIsSwMethodStalling; + + pThis->__kcectxControlFilter__ = &__nvoc_thunk_RsResource_kcectxControlFilter; + + pThis->__kcectxUnregisterEvent__ = &__nvoc_thunk_Notifier_kcectxUnregisterEvent; + + pThis->__kcectxCanCopy__ = &__nvoc_thunk_RsResource_kcectxCanCopy; + + pThis->__kcectxPreDestruct__ = &__nvoc_thunk_RsResource_kcectxPreDestruct; + + pThis->__kcectxGetNotificationListPtr__ = &__nvoc_thunk_Notifier_kcectxGetNotificationListPtr; + + pThis->__kcectxGetNotificationShare__ = &__nvoc_thunk_Notifier_kcectxGetNotificationShare; + + pThis->__kcectxMap__ = &__nvoc_thunk_GpuResource_kcectxMap; + + pThis->__kcectxGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_kcectxGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_KernelCeContext(KernelCeContext *pThis) { + __nvoc_init_funcTable_KernelCeContext_1(pThis); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_KernelCeContext(KernelCeContext *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelCeContext = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_KernelCeContext(pThis); +} + +NV_STATUS __nvoc_objCreate_KernelCeContext(KernelCeContext **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + KernelCeContext *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelCeContext)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelCeContext)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelCeContext); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelCeContext(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelCeContext(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_KernelCeContext_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelCeContext_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelCeContext(KernelCeContext **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_KernelCeContext(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_ce_context_nvoc.h b/src/nvidia/generated/g_kernel_ce_context_nvoc.h new file mode 100644 index 000000000..aa1a6fcde --- /dev/null +++ b/src/nvidia/generated/g_kernel_ce_context_nvoc.h @@ -0,0 +1,281 @@ +#ifndef _G_KERNEL_CE_CONTEXT_NVOC_H_ +#define _G_KERNEL_CE_CONTEXT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_ce_context_nvoc.h" + +#ifndef KERNEL_CE_CONTEXT_H +#define KERNEL_CE_CONTEXT_H + +#include "core/core.h" +#include "gpu/eng_state.h" +#include "gpu/gpu_halspec.h" +#include "gpu/gpu.h" +#include "kernel/gpu/fifo/channel_descendant.h" + +ENGDESCRIPTOR kceGetEngineDescFromAllocParams(struct OBJGPU *pGpu, NvU32 externalClassId, void *pAllocParams); + +/*! + * RM internal class representing XXX_DMA_COPY_A + */ +#ifdef NVOC_KERNEL_CE_CONTEXT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelCeContext { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct KernelCeContext *__nvoc_pbase_KernelCeContext; + NV_STATUS (*__kcectxCheckMemInterUnmap__)(struct KernelCeContext *, NvBool); + NvBool (*__kcectxShareCallback__)(struct KernelCeContext *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__kcectxAccessCallback__)(struct KernelCeContext *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__kcectxMapTo__)(struct KernelCeContext *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__kcectxGetMapAddrSpace__)(struct KernelCeContext *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__kcectxSetNotificationShare__)(struct KernelCeContext *, struct NotifShare *); + NvU32 (*__kcectxGetRefCount__)(struct KernelCeContext *); + void (*__kcectxAddAdditionalDependants__)(struct RsClient *, struct KernelCeContext *, RsResourceRef *); + NV_STATUS (*__kcectxControl_Prologue__)(struct KernelCeContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kcectxGetRegBaseOffsetAndSize__)(struct KernelCeContext *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__kcectxInternalControlForward__)(struct KernelCeContext *, NvU32, void *, NvU32); + NV_STATUS (*__kcectxUnmapFrom__)(struct KernelCeContext *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__kcectxControl_Epilogue__)(struct KernelCeContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kcectxControlLookup__)(struct KernelCeContext *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__kcectxGetSwMethods__)(struct KernelCeContext *, METHOD **, NvU32 *); + NvHandle (*__kcectxGetInternalObjectHandle__)(struct KernelCeContext *); + NV_STATUS (*__kcectxControl__)(struct KernelCeContext *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kcectxUnmap__)(struct KernelCeContext *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__kcectxGetMemInterMapParams__)(struct KernelCeContext *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__kcectxGetMemoryMappingDescriptor__)(struct KernelCeContext *, struct MEMORY_DESCRIPTOR **); + NvBool (*__kcectxIsSwMethodStalling__)(struct KernelCeContext *, NvU32); + NV_STATUS (*__kcectxControlFilter__)(struct KernelCeContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kcectxUnregisterEvent__)(struct KernelCeContext *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__kcectxCanCopy__)(struct KernelCeContext *); + void (*__kcectxPreDestruct__)(struct KernelCeContext *); + PEVENTNOTIFICATION *(*__kcectxGetNotificationListPtr__)(struct KernelCeContext *); + struct NotifShare *(*__kcectxGetNotificationShare__)(struct KernelCeContext *); + NV_STATUS (*__kcectxMap__)(struct KernelCeContext *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__kcectxGetOrAllocNotifShare__)(struct KernelCeContext *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_KernelCeContext_TYPEDEF__ +#define __NVOC_CLASS_KernelCeContext_TYPEDEF__ +typedef struct KernelCeContext KernelCeContext; +#endif /* __NVOC_CLASS_KernelCeContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCeContext +#define __nvoc_class_id_KernelCeContext 0x2d0ee9 +#endif /* __nvoc_class_id_KernelCeContext */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCeContext; + +#define __staticCast_KernelCeContext(pThis) \ + ((pThis)->__nvoc_pbase_KernelCeContext) + +#ifdef __nvoc_kernel_ce_context_h_disabled +#define __dynamicCast_KernelCeContext(pThis) ((KernelCeContext*)NULL) +#else //__nvoc_kernel_ce_context_h_disabled +#define __dynamicCast_KernelCeContext(pThis) \ + ((KernelCeContext*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelCeContext))) +#endif //__nvoc_kernel_ce_context_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelCeContext(KernelCeContext**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelCeContext(KernelCeContext**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_KernelCeContext(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_KernelCeContext((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define kcectxCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) kcectxCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define kcectxShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) kcectxShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define kcectxAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) kcectxAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define kcectxMapTo(pResource, pParams) kcectxMapTo_DISPATCH(pResource, pParams) +#define kcectxGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) kcectxGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define kcectxSetNotificationShare(pNotifier, pNotifShare) kcectxSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define kcectxGetRefCount(pResource) kcectxGetRefCount_DISPATCH(pResource) +#define kcectxAddAdditionalDependants(pClient, pResource, pReference) kcectxAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define kcectxControl_Prologue(pResource, pCallContext, pParams) kcectxControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define kcectxGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) kcectxGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define kcectxInternalControlForward(pGpuResource, command, pParams, size) kcectxInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define kcectxUnmapFrom(pResource, pParams) kcectxUnmapFrom_DISPATCH(pResource, pParams) +#define kcectxControl_Epilogue(pResource, pCallContext, pParams) kcectxControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define kcectxControlLookup(pResource, pParams, ppEntry) kcectxControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define kcectxGetSwMethods(pChannelDescendant, ppMethods, pNumMethods) kcectxGetSwMethods_DISPATCH(pChannelDescendant, ppMethods, pNumMethods) +#define kcectxGetInternalObjectHandle(pGpuResource) kcectxGetInternalObjectHandle_DISPATCH(pGpuResource) +#define kcectxControl(pGpuResource, pCallContext, pParams) kcectxControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define kcectxUnmap(pGpuResource, pCallContext, pCpuMapping) kcectxUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define kcectxGetMemInterMapParams(pRmResource, pParams) kcectxGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define kcectxGetMemoryMappingDescriptor(pRmResource, ppMemDesc) kcectxGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define kcectxIsSwMethodStalling(pChannelDescendant, hHandle) kcectxIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define kcectxControlFilter(pResource, pCallContext, pParams) kcectxControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define kcectxUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) kcectxUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define kcectxCanCopy(pResource) kcectxCanCopy_DISPATCH(pResource) +#define kcectxPreDestruct(pResource) kcectxPreDestruct_DISPATCH(pResource) +#define kcectxGetNotificationListPtr(pNotifier) kcectxGetNotificationListPtr_DISPATCH(pNotifier) +#define kcectxGetNotificationShare(pNotifier) kcectxGetNotificationShare_DISPATCH(pNotifier) +#define kcectxMap(pGpuResource, pCallContext, pParams, pCpuMapping) kcectxMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define kcectxGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) kcectxGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +static inline NV_STATUS kcectxCheckMemInterUnmap_DISPATCH(struct KernelCeContext *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__kcectxCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool kcectxShareCallback_DISPATCH(struct KernelCeContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__kcectxShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool kcectxAccessCallback_DISPATCH(struct KernelCeContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__kcectxAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS kcectxMapTo_DISPATCH(struct KernelCeContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__kcectxMapTo__(pResource, pParams); +} + +static inline NV_STATUS kcectxGetMapAddrSpace_DISPATCH(struct KernelCeContext *pGpuResource, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__kcectxGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void kcectxSetNotificationShare_DISPATCH(struct KernelCeContext *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__kcectxSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 kcectxGetRefCount_DISPATCH(struct KernelCeContext *pResource) { + return pResource->__kcectxGetRefCount__(pResource); +} + +static inline void kcectxAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct KernelCeContext *pResource, RsResourceRef *pReference) { + pResource->__kcectxAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS kcectxControl_Prologue_DISPATCH(struct KernelCeContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kcectxControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kcectxGetRegBaseOffsetAndSize_DISPATCH(struct KernelCeContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__kcectxGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS kcectxInternalControlForward_DISPATCH(struct KernelCeContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__kcectxInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS kcectxUnmapFrom_DISPATCH(struct KernelCeContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__kcectxUnmapFrom__(pResource, pParams); +} + +static inline void kcectxControl_Epilogue_DISPATCH(struct KernelCeContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__kcectxControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kcectxControlLookup_DISPATCH(struct KernelCeContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__kcectxControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS kcectxGetSwMethods_DISPATCH(struct KernelCeContext *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return pChannelDescendant->__kcectxGetSwMethods__(pChannelDescendant, ppMethods, pNumMethods); +} + +static inline NvHandle kcectxGetInternalObjectHandle_DISPATCH(struct KernelCeContext *pGpuResource) { + return pGpuResource->__kcectxGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS kcectxControl_DISPATCH(struct KernelCeContext *pGpuResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__kcectxControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS kcectxUnmap_DISPATCH(struct KernelCeContext *pGpuResource, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pGpuResource->__kcectxUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS kcectxGetMemInterMapParams_DISPATCH(struct KernelCeContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__kcectxGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS kcectxGetMemoryMappingDescriptor_DISPATCH(struct KernelCeContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__kcectxGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvBool kcectxIsSwMethodStalling_DISPATCH(struct KernelCeContext *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__kcectxIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +static inline NV_STATUS kcectxControlFilter_DISPATCH(struct KernelCeContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kcectxControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kcectxUnregisterEvent_DISPATCH(struct KernelCeContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__kcectxUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool kcectxCanCopy_DISPATCH(struct KernelCeContext *pResource) { + return pResource->__kcectxCanCopy__(pResource); +} + +static inline void kcectxPreDestruct_DISPATCH(struct KernelCeContext *pResource) { + pResource->__kcectxPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *kcectxGetNotificationListPtr_DISPATCH(struct KernelCeContext *pNotifier) { + return pNotifier->__kcectxGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *kcectxGetNotificationShare_DISPATCH(struct KernelCeContext *pNotifier) { + return pNotifier->__kcectxGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS kcectxMap_DISPATCH(struct KernelCeContext *pGpuResource, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pGpuResource->__kcectxMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS kcectxGetOrAllocNotifShare_DISPATCH(struct KernelCeContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__kcectxGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS kcectxConstruct_IMPL(struct KernelCeContext *arg_pKCeContext, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_kcectxConstruct(arg_pKCeContext, arg_pCallContext, arg_pParams) kcectxConstruct_IMPL(arg_pKCeContext, arg_pCallContext, arg_pParams) +void kcectxDestruct_IMPL(struct KernelCeContext *pKCeContext); +#define __nvoc_kcectxDestruct(pKCeContext) kcectxDestruct_IMPL(pKCeContext) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_CE_CONTEXT_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_ce_nvoc.c b/src/nvidia/generated/g_kernel_ce_nvoc.c new file mode 100644 index 000000000..2b4de2af9 --- /dev/null +++ b/src/nvidia/generated/g_kernel_ce_nvoc.c @@ -0,0 +1,578 @@ +#define NVOC_KERNEL_CE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_ce_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x242aca = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +void __nvoc_init_KernelCE(KernelCE*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelCE(KernelCE*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelCE(KernelCE*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelCE(KernelCE*, RmHalspecOwner* ); +void __nvoc_dtor_KernelCE(KernelCE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCE; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCE_KernelCE = { + /*pClassDef=*/ &__nvoc_class_def_KernelCE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelCE, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCE_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCE, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCE_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCE, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCE_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCE, __nvoc_base_IntrService), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelCE = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_KernelCE_KernelCE, + &__nvoc_rtti_KernelCE_IntrService, + &__nvoc_rtti_KernelCE_OBJENGSTATE, + &__nvoc_rtti_KernelCE_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelCE), + /*classId=*/ classId(KernelCE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelCE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelCE, + /*pCastInfo=*/ &__nvoc_castinfo_KernelCE, + /*pExportInfo=*/ &__nvoc_export_info_KernelCE +}; + +static NV_STATUS __nvoc_thunk_KernelCE_engstateConstructEngine(OBJGPU *pGpu, struct OBJENGSTATE *pKCe, ENGDESCRIPTOR arg0) { + return kceConstructEngine(pGpu, (struct KernelCE *)(((unsigned char *)pKCe) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0); +} + +static NvBool __nvoc_thunk_KernelCE_engstateIsPresent(OBJGPU *pGpu, struct OBJENGSTATE *pKCe) { + return kceIsPresent(pGpu, (struct KernelCE *)(((unsigned char *)pKCe) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelCE_engstateStateLoad(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) { + return kceStateLoad(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_KernelCE_engstateStateUnload(OBJGPU *pGpu, struct OBJENGSTATE *pKCe, NvU32 flags) { + return kceStateUnload(pGpu, (struct KernelCE *)(((unsigned char *)pKCe) - __nvoc_rtti_KernelCE_OBJENGSTATE.offset), flags); +} + +static void __nvoc_thunk_KernelCE_intrservRegisterIntrService(OBJGPU *arg0, struct IntrService *arg1, IntrServiceRecord arg2[155]) { + kceRegisterIntrService(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_IntrService.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_KernelCE_intrservServiceNotificationInterrupt(OBJGPU *arg0, struct IntrService *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) { + return kceServiceNotificationInterrupt(arg0, (struct KernelCE *)(((unsigned char *)arg1) - __nvoc_rtti_KernelCE_IntrService.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceReconcileTunableState(POBJGPU pGpu, struct KernelCE *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStateInitLocked(POBJGPU pGpu, struct KernelCE *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePreLoad(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePostUnload(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_kceStateDestroy(POBJGPU pGpu, struct KernelCE *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePreUnload(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStateInitUnlocked(POBJGPU pGpu, struct KernelCE *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kceInitMissing(POBJGPU pGpu, struct KernelCE *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePreInitLocked(POBJGPU pGpu, struct KernelCE *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePreInitUnlocked(POBJGPU pGpu, struct KernelCE *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceGetTunableState(POBJGPU pGpu, struct KernelCE *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceCompareTunableState(POBJGPU pGpu, struct KernelCE *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kceFreeTunableState(POBJGPU pGpu, struct KernelCE *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_IntrService_kceClearInterrupt(OBJGPU *pGpu, struct KernelCE *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelCE_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceStatePostLoad(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceAllocTunableState(POBJGPU pGpu, struct KernelCE *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kceSetTunableState(POBJGPU pGpu, struct KernelCE *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelCE_OBJENGSTATE.offset), pTunableState); +} + +static NvU32 __nvoc_thunk_IntrService_kceServiceInterrupt(OBJGPU *pGpu, struct KernelCE *pIntrService, IntrServiceServiceInterruptArguments *pParams) { + return intrservServiceInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelCE_IntrService.offset), pParams); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_IntrService(IntrService*); +void __nvoc_dtor_KernelCE(KernelCE *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelCE(KernelCE *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_IntrService(IntrService* ); +NV_STATUS __nvoc_ctor_KernelCE(KernelCE *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelCE_fail_OBJENGSTATE; + status = __nvoc_ctor_IntrService(&pThis->__nvoc_base_IntrService); + if (status != NV_OK) goto __nvoc_ctor_KernelCE_fail_IntrService; + __nvoc_init_dataField_KernelCE(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelCE_exit; // Success + +__nvoc_ctor_KernelCE_fail_IntrService: + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); +__nvoc_ctor_KernelCE_fail_OBJENGSTATE: +__nvoc_ctor_KernelCE_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelCE_1(KernelCE *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__kceConstructEngine__ = &kceConstructEngine_IMPL; + + // Hal function -- kceIsPresent + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceIsPresent__ = &kceIsPresent_IMPL; + } + else if (0) + { + } + + // Hal function -- kceStateLoad + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceStateLoad__ = &kceStateLoad_GP100; + } + else if (0) + { + } + + // Hal function -- kceStateUnload + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceStateUnload__ = &kceStateUnload_GP100; + } + // default + else + { + pThis->__kceStateUnload__ = &kceStateUnload_56cd7a; + } + + pThis->__kceRegisterIntrService__ = &kceRegisterIntrService_IMPL; + + pThis->__kceServiceNotificationInterrupt__ = &kceServiceNotificationInterrupt_IMPL; + + // Hal function -- kceGetNvlinkAutoConfigCeValues + if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceGetNvlinkAutoConfigCeValues__ = &kceGetNvlinkAutoConfigCeValues_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceGetNvlinkAutoConfigCeValues__ = &kceGetNvlinkAutoConfigCeValues_GA100; + } + else if (0) + { + } + + // Hal function -- kceGetNvlinkMaxTopoForTable + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceGetNvlinkMaxTopoForTable__ = &kceGetNvlinkMaxTopoForTable_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceGetNvlinkMaxTopoForTable__ = &kceGetNvlinkMaxTopoForTable_491d52; + } + + // Hal function -- kceIsCurrentMaxTopology + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceIsCurrentMaxTopology__ = &kceIsCurrentMaxTopology_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceIsCurrentMaxTopology__ = &kceIsCurrentMaxTopology_491d52; + } + + // Hal function -- kceGetGrceConfigSize1 + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceGetGrceConfigSize1__ = &kceGetGrceConfigSize1_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceGetGrceConfigSize1__ = &kceGetGrceConfigSize1_GA100; + } + else if (0) + { + } + + // Hal function -- kceGetPce2lceConfigSize1 + if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceGetPce2lceConfigSize1__ = &kceGetPce2lceConfigSize1_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kceGetPce2lceConfigSize1__ = &kceGetPce2lceConfigSize1_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceGetPce2lceConfigSize1__ = &kceGetPce2lceConfigSize1_GA102; + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- kceGetMappings + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceGetMappings__ = &kceGetMappings_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceGetMappings__ = &kceGetMappings_46f6a7; + } + + // Hal function -- kceMapPceLceForSysmemLinks + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kceMapPceLceForSysmemLinks__ = &kceMapPceLceForSysmemLinks_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceMapPceLceForSysmemLinks__ = &kceMapPceLceForSysmemLinks_GA102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceMapPceLceForSysmemLinks__ = &kceMapPceLceForSysmemLinks_46f6a7; + } + + // Hal function -- kceMapPceLceForNvlinkPeers + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceMapPceLceForNvlinkPeers__ = &kceMapPceLceForNvlinkPeers_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceMapPceLceForNvlinkPeers__ = &kceMapPceLceForNvlinkPeers_46f6a7; + } + + // Hal function -- kceGetSysmemSupportedLceMask + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kceGetSysmemSupportedLceMask__ = &kceGetSysmemSupportedLceMask_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceGetSysmemSupportedLceMask__ = &kceGetSysmemSupportedLceMask_GA102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceGetSysmemSupportedLceMask__ = &kceGetSysmemSupportedLceMask_4a4dee; + } + + // Hal function -- kceMapAsyncLceDefault + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceMapAsyncLceDefault__ = &kceMapAsyncLceDefault_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceMapAsyncLceDefault__ = &kceMapAsyncLceDefault_46f6a7; + } + + // Hal function -- kceGetNvlinkPeerSupportedLceMask + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kceGetNvlinkPeerSupportedLceMask__ = &kceGetNvlinkPeerSupportedLceMask_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceGetNvlinkPeerSupportedLceMask__ = &kceGetNvlinkPeerSupportedLceMask_GA102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceGetNvlinkPeerSupportedLceMask__ = &kceGetNvlinkPeerSupportedLceMask_4a4dee; + } + + // Hal function -- kceGetGrceSupportedLceMask + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kceGetGrceSupportedLceMask__ = &kceGetGrceSupportedLceMask_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceGetGrceSupportedLceMask__ = &kceGetGrceSupportedLceMask_GA102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kceGetGrceSupportedLceMask__ = &kceGetGrceSupportedLceMask_4a4dee; + } + + // Hal function -- kceIsGen4orHigherSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kceIsGen4orHigherSupported__ = &kceIsGen4orHigherSupported_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceIsGen4orHigherSupported__ = &kceIsGen4orHigherSupported_cbe027; + } + + // Hal function -- kceApplyGen4orHigherMapping + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kceApplyGen4orHigherMapping__ = &kceApplyGen4orHigherMapping_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kceApplyGen4orHigherMapping__ = &kceApplyGen4orHigherMapping_b3696a; + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelCE_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateIsPresent__ = &__nvoc_thunk_KernelCE_engstateIsPresent; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelCE_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelCE_engstateStateUnload; + + pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_KernelCE_intrservRegisterIntrService; + + pThis->__nvoc_base_IntrService.__intrservServiceNotificationInterrupt__ = &__nvoc_thunk_KernelCE_intrservServiceNotificationInterrupt; + + pThis->__kceReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kceReconcileTunableState; + + pThis->__kceStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kceStateInitLocked; + + pThis->__kceStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kceStatePreLoad; + + pThis->__kceStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kceStatePostUnload; + + pThis->__kceStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kceStateDestroy; + + pThis->__kceStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kceStatePreUnload; + + pThis->__kceStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kceStateInitUnlocked; + + pThis->__kceInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kceInitMissing; + + pThis->__kceStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kceStatePreInitLocked; + + pThis->__kceStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kceStatePreInitUnlocked; + + pThis->__kceGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kceGetTunableState; + + pThis->__kceCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kceCompareTunableState; + + pThis->__kceFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kceFreeTunableState; + + pThis->__kceClearInterrupt__ = &__nvoc_thunk_IntrService_kceClearInterrupt; + + pThis->__kceStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kceStatePostLoad; + + pThis->__kceAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kceAllocTunableState; + + pThis->__kceSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kceSetTunableState; + + pThis->__kceServiceInterrupt__ = &__nvoc_thunk_IntrService_kceServiceInterrupt; +} + +void __nvoc_init_funcTable_KernelCE(KernelCE *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelCE_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_IntrService(IntrService*); +void __nvoc_init_KernelCE(KernelCE *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelCE = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_IntrService(&pThis->__nvoc_base_IntrService); + __nvoc_init_funcTable_KernelCE(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelCE(KernelCE **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelCE *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelCE)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelCE)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelCE); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelCE(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelCE(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelCE_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelCE_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelCE(KernelCE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelCE(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_ce_nvoc.h b/src/nvidia/generated/g_kernel_ce_nvoc.h new file mode 100644 index 000000000..0e3074e0f --- /dev/null +++ b/src/nvidia/generated/g_kernel_ce_nvoc.h @@ -0,0 +1,632 @@ +#ifndef _G_KERNEL_CE_NVOC_H_ +#define _G_KERNEL_CE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_ce_nvoc.h" + +#ifndef KERNEL_CE_H +#define KERNEL_CE_H + +#include "core/core.h" +#include "core/info_block.h" +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "gpu/eng_state.h" +#include "gpu/gpu_halspec.h" +#include "gpu/gpu.h" +#include "kernel/gpu/intr/intr_service.h" +#include "gpu/ce/kernel_ce_shared.h" + +// +// Kernel Copy Engine +// This class provides Kernel-RM interface and state tracking for Copy Engine. +// + +#ifdef NVOC_KERNEL_CE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct NVLINK_CE_AUTO_CONFIG_TABLE; + + +struct KernelCE { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct IntrService __nvoc_base_IntrService; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct IntrService *__nvoc_pbase_IntrService; + struct KernelCE *__nvoc_pbase_KernelCE; + NV_STATUS (*__kceConstructEngine__)(OBJGPU *, struct KernelCE *, ENGDESCRIPTOR); + NvBool (*__kceIsPresent__)(OBJGPU *, struct KernelCE *); + NV_STATUS (*__kceStateLoad__)(OBJGPU *, struct KernelCE *, NvU32); + NV_STATUS (*__kceStateUnload__)(OBJGPU *, struct KernelCE *, NvU32); + void (*__kceRegisterIntrService__)(OBJGPU *, struct KernelCE *, IntrServiceRecord *); + NV_STATUS (*__kceServiceNotificationInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceNotificationInterruptArguments *); + NV_STATUS (*__kceGetNvlinkAutoConfigCeValues__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *); + NvBool (*__kceGetNvlinkMaxTopoForTable__)(OBJGPU *, struct KernelCE *, struct NVLINK_TOPOLOGY_PARAMS *, void *, NvU32, NvU32 *); + NvBool (*__kceIsCurrentMaxTopology__)(OBJGPU *, struct KernelCE *, struct NVLINK_TOPOLOGY_PARAMS *, NvU32 *, NvU32 *); + NvU32 (*__kceGetGrceConfigSize1__)(struct KernelCE *); + NvU32 (*__kceGetPce2lceConfigSize1__)(struct KernelCE *); + NV_STATUS (*__kceGetMappings__)(OBJGPU *, struct KernelCE *, NVLINK_TOPOLOGY_PARAMS *, NvU32 *, NvU32 *, NvU32 *); + NV_STATUS (*__kceMapPceLceForSysmemLinks__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *, NvU32); + NV_STATUS (*__kceMapPceLceForNvlinkPeers__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *); + NvU32 (*__kceGetSysmemSupportedLceMask__)(OBJGPU *, struct KernelCE *); + NV_STATUS (*__kceMapAsyncLceDefault__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32 *, NvU32); + NvU32 (*__kceGetNvlinkPeerSupportedLceMask__)(OBJGPU *, struct KernelCE *, NvU32); + NvU32 (*__kceGetGrceSupportedLceMask__)(OBJGPU *, struct KernelCE *); + NvBool (*__kceIsGen4orHigherSupported__)(OBJGPU *, struct KernelCE *); + void (*__kceApplyGen4orHigherMapping__)(OBJGPU *, struct KernelCE *, NvU32 *, NvU32 *, NvU32, NvU32); + NV_STATUS (*__kceReconcileTunableState__)(POBJGPU, struct KernelCE *, void *); + NV_STATUS (*__kceStateInitLocked__)(POBJGPU, struct KernelCE *); + NV_STATUS (*__kceStatePreLoad__)(POBJGPU, struct KernelCE *, NvU32); + NV_STATUS (*__kceStatePostUnload__)(POBJGPU, struct KernelCE *, NvU32); + void (*__kceStateDestroy__)(POBJGPU, struct KernelCE *); + NV_STATUS (*__kceStatePreUnload__)(POBJGPU, struct KernelCE *, NvU32); + NV_STATUS (*__kceStateInitUnlocked__)(POBJGPU, struct KernelCE *); + void (*__kceInitMissing__)(POBJGPU, struct KernelCE *); + NV_STATUS (*__kceStatePreInitLocked__)(POBJGPU, struct KernelCE *); + NV_STATUS (*__kceStatePreInitUnlocked__)(POBJGPU, struct KernelCE *); + NV_STATUS (*__kceGetTunableState__)(POBJGPU, struct KernelCE *, void *); + NV_STATUS (*__kceCompareTunableState__)(POBJGPU, struct KernelCE *, void *, void *); + void (*__kceFreeTunableState__)(POBJGPU, struct KernelCE *, void *); + NvBool (*__kceClearInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceClearInterruptArguments *); + NV_STATUS (*__kceStatePostLoad__)(POBJGPU, struct KernelCE *, NvU32); + NV_STATUS (*__kceAllocTunableState__)(POBJGPU, struct KernelCE *, void **); + NV_STATUS (*__kceSetTunableState__)(POBJGPU, struct KernelCE *, void *); + NvU32 (*__kceServiceInterrupt__)(OBJGPU *, struct KernelCE *, IntrServiceServiceInterruptArguments *); + NvU32 publicID; + NvBool bStubbed; + NvU32 nvlinkPeerMask; + NvBool bIsAutoConfigEnabled; + NvBool bUseGen4Mapping; + struct IO_APERTURE aperture; +}; + +#ifndef __NVOC_CLASS_KernelCE_TYPEDEF__ +#define __NVOC_CLASS_KernelCE_TYPEDEF__ +typedef struct KernelCE KernelCE; +#endif /* __NVOC_CLASS_KernelCE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCE +#define __nvoc_class_id_KernelCE 0x242aca +#endif /* __nvoc_class_id_KernelCE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCE; + +#define __staticCast_KernelCE(pThis) \ + ((pThis)->__nvoc_pbase_KernelCE) + +#ifdef __nvoc_kernel_ce_h_disabled +#define __dynamicCast_KernelCE(pThis) ((KernelCE*)NULL) +#else //__nvoc_kernel_ce_h_disabled +#define __dynamicCast_KernelCE(pThis) \ + ((KernelCE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelCE))) +#endif //__nvoc_kernel_ce_h_disabled + +#define PDB_PROP_KCE_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KCE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelCE(KernelCE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelCE(KernelCE**, Dynamic*, NvU32); +#define __objCreate_KernelCE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelCE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kceConstructEngine(pGpu, pKCe, arg0) kceConstructEngine_DISPATCH(pGpu, pKCe, arg0) +#define kceIsPresent(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe) +#define kceIsPresent_HAL(pGpu, pKCe) kceIsPresent_DISPATCH(pGpu, pKCe) +#define kceStateLoad(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2) +#define kceStateLoad_HAL(arg0, arg1, arg2) kceStateLoad_DISPATCH(arg0, arg1, arg2) +#define kceStateUnload(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags) +#define kceStateUnload_HAL(pGpu, pKCe, flags) kceStateUnload_DISPATCH(pGpu, pKCe, flags) +#define kceRegisterIntrService(arg0, arg1, arg2) kceRegisterIntrService_DISPATCH(arg0, arg1, arg2) +#define kceServiceNotificationInterrupt(arg0, arg1, arg2) kceServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2) +#define kceGetNvlinkAutoConfigCeValues(pGpu, pKCe, arg0, arg1, arg2) kceGetNvlinkAutoConfigCeValues_DISPATCH(pGpu, pKCe, arg0, arg1, arg2) +#define kceGetNvlinkAutoConfigCeValues_HAL(pGpu, pKCe, arg0, arg1, arg2) kceGetNvlinkAutoConfigCeValues_DISPATCH(pGpu, pKCe, arg0, arg1, arg2) +#define kceGetNvlinkMaxTopoForTable(pGpu, pKCe, arg0, arg1, arg2, arg3) kceGetNvlinkMaxTopoForTable_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3) +#define kceGetNvlinkMaxTopoForTable_HAL(pGpu, pKCe, arg0, arg1, arg2, arg3) kceGetNvlinkMaxTopoForTable_DISPATCH(pGpu, pKCe, arg0, arg1, arg2, arg3) +#define kceIsCurrentMaxTopology(pGpu, arg0, arg1, arg2, arg3) kceIsCurrentMaxTopology_DISPATCH(pGpu, arg0, arg1, arg2, arg3) +#define kceIsCurrentMaxTopology_HAL(pGpu, arg0, arg1, arg2, arg3) kceIsCurrentMaxTopology_DISPATCH(pGpu, arg0, arg1, arg2, arg3) +#define kceGetGrceConfigSize1(arg0) kceGetGrceConfigSize1_DISPATCH(arg0) +#define kceGetGrceConfigSize1_HAL(arg0) kceGetGrceConfigSize1_DISPATCH(arg0) +#define kceGetPce2lceConfigSize1(arg0) kceGetPce2lceConfigSize1_DISPATCH(arg0) +#define kceGetPce2lceConfigSize1_HAL(arg0) kceGetPce2lceConfigSize1_DISPATCH(arg0) +#define kceGetMappings(pGpu, pCe, arg0, arg1, arg2, arg3) kceGetMappings_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3) +#define kceGetMappings_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceGetMappings_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3) +#define kceMapPceLceForSysmemLinks(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapPceLceForSysmemLinks_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3) +#define kceMapPceLceForSysmemLinks_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapPceLceForSysmemLinks_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3) +#define kceMapPceLceForNvlinkPeers(pGpu, pCe, arg0, arg1, arg2) kceMapPceLceForNvlinkPeers_DISPATCH(pGpu, pCe, arg0, arg1, arg2) +#define kceMapPceLceForNvlinkPeers_HAL(pGpu, pCe, arg0, arg1, arg2) kceMapPceLceForNvlinkPeers_DISPATCH(pGpu, pCe, arg0, arg1, arg2) +#define kceGetSysmemSupportedLceMask(pGpu, pCe) kceGetSysmemSupportedLceMask_DISPATCH(pGpu, pCe) +#define kceGetSysmemSupportedLceMask_HAL(pGpu, pCe) kceGetSysmemSupportedLceMask_DISPATCH(pGpu, pCe) +#define kceMapAsyncLceDefault(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapAsyncLceDefault_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3) +#define kceMapAsyncLceDefault_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceMapAsyncLceDefault_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3) +#define kceGetNvlinkPeerSupportedLceMask(pGpu, pCe, arg0) kceGetNvlinkPeerSupportedLceMask_DISPATCH(pGpu, pCe, arg0) +#define kceGetNvlinkPeerSupportedLceMask_HAL(pGpu, pCe, arg0) kceGetNvlinkPeerSupportedLceMask_DISPATCH(pGpu, pCe, arg0) +#define kceGetGrceSupportedLceMask(pGpu, pCe) kceGetGrceSupportedLceMask_DISPATCH(pGpu, pCe) +#define kceGetGrceSupportedLceMask_HAL(pGpu, pCe) kceGetGrceSupportedLceMask_DISPATCH(pGpu, pCe) +#define kceIsGen4orHigherSupported(pGpu, pCe) kceIsGen4orHigherSupported_DISPATCH(pGpu, pCe) +#define kceIsGen4orHigherSupported_HAL(pGpu, pCe) kceIsGen4orHigherSupported_DISPATCH(pGpu, pCe) +#define kceApplyGen4orHigherMapping(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3) +#define kceApplyGen4orHigherMapping_HAL(pGpu, pCe, arg0, arg1, arg2, arg3) kceApplyGen4orHigherMapping_DISPATCH(pGpu, pCe, arg0, arg1, arg2, arg3) +#define kceReconcileTunableState(pGpu, pEngstate, pTunableState) kceReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kceStateInitLocked(pGpu, pEngstate) kceStateInitLocked_DISPATCH(pGpu, pEngstate) +#define kceStatePreLoad(pGpu, pEngstate, arg0) kceStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kceStatePostUnload(pGpu, pEngstate, arg0) kceStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kceStateDestroy(pGpu, pEngstate) kceStateDestroy_DISPATCH(pGpu, pEngstate) +#define kceStatePreUnload(pGpu, pEngstate, arg0) kceStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kceStateInitUnlocked(pGpu, pEngstate) kceStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kceInitMissing(pGpu, pEngstate) kceInitMissing_DISPATCH(pGpu, pEngstate) +#define kceStatePreInitLocked(pGpu, pEngstate) kceStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kceStatePreInitUnlocked(pGpu, pEngstate) kceStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kceGetTunableState(pGpu, pEngstate, pTunableState) kceGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kceCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kceCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kceFreeTunableState(pGpu, pEngstate, pTunableState) kceFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kceClearInterrupt(pGpu, pIntrService, pParams) kceClearInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define kceStatePostLoad(pGpu, pEngstate, arg0) kceStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kceAllocTunableState(pGpu, pEngstate, ppTunableState) kceAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kceSetTunableState(pGpu, pEngstate, pTunableState) kceSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kceServiceInterrupt(pGpu, pIntrService, pParams) kceServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams) +static inline void kceNonstallIntrCheckAndClear_b3696a(OBJGPU *arg0, struct KernelCE *arg1, struct THREAD_STATE_NODE *arg2) { + return; +} + +#ifdef __nvoc_kernel_ce_h_disabled +static inline void kceNonstallIntrCheckAndClear(OBJGPU *arg0, struct KernelCE *arg1, struct THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); +} +#else //__nvoc_kernel_ce_h_disabled +#define kceNonstallIntrCheckAndClear(arg0, arg1, arg2) kceNonstallIntrCheckAndClear_b3696a(arg0, arg1, arg2) +#endif //__nvoc_kernel_ce_h_disabled + +#define kceNonstallIntrCheckAndClear_HAL(arg0, arg1, arg2) kceNonstallIntrCheckAndClear(arg0, arg1, arg2) + +NV_STATUS kceUpdateClassDB_KERNEL(OBJGPU *pGpu, struct KernelCE *pKCe); + +#ifdef __nvoc_kernel_ce_h_disabled +static inline NV_STATUS kceUpdateClassDB(OBJGPU *pGpu, struct KernelCE *pKCe) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ce_h_disabled +#define kceUpdateClassDB(pGpu, pKCe) kceUpdateClassDB_KERNEL(pGpu, pKCe) +#endif //__nvoc_kernel_ce_h_disabled + +#define kceUpdateClassDB_HAL(pGpu, pKCe) kceUpdateClassDB(pGpu, pKCe) + +NvBool kceIsCeSysmemRead_GP100(OBJGPU *pGpu, struct KernelCE *pKCe); + +#ifdef __nvoc_kernel_ce_h_disabled +static inline NvBool kceIsCeSysmemRead(OBJGPU *pGpu, struct KernelCE *pKCe) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_ce_h_disabled +#define kceIsCeSysmemRead(pGpu, pKCe) kceIsCeSysmemRead_GP100(pGpu, pKCe) +#endif //__nvoc_kernel_ce_h_disabled + +#define kceIsCeSysmemRead_HAL(pGpu, pKCe) kceIsCeSysmemRead(pGpu, pKCe) + +NvBool kceIsCeSysmemWrite_GP100(OBJGPU *pGpu, struct KernelCE *pKCe); + +#ifdef __nvoc_kernel_ce_h_disabled +static inline NvBool kceIsCeSysmemWrite(OBJGPU *pGpu, struct KernelCE *pKCe) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_ce_h_disabled +#define kceIsCeSysmemWrite(pGpu, pKCe) kceIsCeSysmemWrite_GP100(pGpu, pKCe) +#endif //__nvoc_kernel_ce_h_disabled + +#define kceIsCeSysmemWrite_HAL(pGpu, pKCe) kceIsCeSysmemWrite(pGpu, pKCe) + +NvBool kceIsCeNvlinkP2P_GP100(OBJGPU *pGpu, struct KernelCE *pKCe); + +#ifdef __nvoc_kernel_ce_h_disabled +static inline NvBool kceIsCeNvlinkP2P(OBJGPU *pGpu, struct KernelCE *pKCe) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_ce_h_disabled +#define kceIsCeNvlinkP2P(pGpu, pKCe) kceIsCeNvlinkP2P_GP100(pGpu, pKCe) +#endif //__nvoc_kernel_ce_h_disabled + +#define kceIsCeNvlinkP2P_HAL(pGpu, pKCe) kceIsCeNvlinkP2P(pGpu, pKCe) + +NV_STATUS kceGetP2PCes_GV100(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask); + +#ifdef __nvoc_kernel_ce_h_disabled +static inline NV_STATUS kceGetP2PCes(struct KernelCE *arg0, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ce_h_disabled +#define kceGetP2PCes(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes_GV100(arg0, pGpu, gpuMask, nvlinkP2PCeMask) +#endif //__nvoc_kernel_ce_h_disabled + +#define kceGetP2PCes_HAL(arg0, pGpu, gpuMask, nvlinkP2PCeMask) kceGetP2PCes(arg0, pGpu, gpuMask, nvlinkP2PCeMask) + +void kceGetSysmemRWLCEs_GV100(struct KernelCE *arg0, NvU32 *rd, NvU32 *wr); + +#ifdef __nvoc_kernel_ce_h_disabled +static inline void kceGetSysmemRWLCEs(struct KernelCE *arg0, NvU32 *rd, NvU32 *wr) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); +} +#else //__nvoc_kernel_ce_h_disabled +#define kceGetSysmemRWLCEs(arg0, rd, wr) kceGetSysmemRWLCEs_GV100(arg0, rd, wr) +#endif //__nvoc_kernel_ce_h_disabled + +#define kceGetSysmemRWLCEs_HAL(arg0, rd, wr) kceGetSysmemRWLCEs(arg0, rd, wr) + +void kceClearAssignedNvlinkPeerMasks_GV100(OBJGPU *pGpu, struct KernelCE *pKCe); + +#ifdef __nvoc_kernel_ce_h_disabled +static inline void kceClearAssignedNvlinkPeerMasks(OBJGPU *pGpu, struct KernelCE *pKCe) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); +} +#else //__nvoc_kernel_ce_h_disabled +#define kceClearAssignedNvlinkPeerMasks(pGpu, pKCe) kceClearAssignedNvlinkPeerMasks_GV100(pGpu, pKCe) +#endif //__nvoc_kernel_ce_h_disabled + +#define kceClearAssignedNvlinkPeerMasks_HAL(pGpu, pKCe) kceClearAssignedNvlinkPeerMasks(pGpu, pKCe) + +NvBool kceGetAutoConfigTableEntry_GV100(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, struct NVLINK_CE_AUTO_CONFIG_TABLE *arg1, NvU32 arg2, NvU32 *arg3, NvU32 *arg4); + +#ifdef __nvoc_kernel_ce_h_disabled +static inline NvBool kceGetAutoConfigTableEntry(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, struct NVLINK_CE_AUTO_CONFIG_TABLE *arg1, NvU32 arg2, NvU32 *arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_ce_h_disabled +#define kceGetAutoConfigTableEntry(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) kceGetAutoConfigTableEntry_GV100(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_kernel_ce_h_disabled + +#define kceGetAutoConfigTableEntry_HAL(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) kceGetAutoConfigTableEntry(pGpu, pKCe, arg0, arg1, arg2, arg3, arg4) + +NV_STATUS kceConstructEngine_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, ENGDESCRIPTOR arg0); + +static inline NV_STATUS kceConstructEngine_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, ENGDESCRIPTOR arg0) { + return pKCe->__kceConstructEngine__(pGpu, pKCe, arg0); +} + +NvBool kceIsPresent_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe); + +static inline NvBool kceIsPresent_491d52(OBJGPU *pGpu, struct KernelCE *pKCe) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kceIsPresent_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe) { + return pKCe->__kceIsPresent__(pGpu, pKCe); +} + +NV_STATUS kceStateLoad_GP100(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2); + +static inline NV_STATUS kceStateLoad_46f6a7(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kceStateLoad_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, NvU32 arg2) { + return arg1->__kceStateLoad__(arg0, arg1, arg2); +} + +NV_STATUS kceStateUnload_GP100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags); + +static inline NV_STATUS kceStateUnload_56cd7a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags) { + return NV_OK; +} + +static inline NV_STATUS kceStateUnload_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 flags) { + return pKCe->__kceStateUnload__(pGpu, pKCe, flags); +} + +void kceRegisterIntrService_IMPL(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[155]); + +static inline void kceRegisterIntrService_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceRecord arg2[155]) { + arg1->__kceRegisterIntrService__(arg0, arg1, arg2); +} + +NV_STATUS kceServiceNotificationInterrupt_IMPL(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceServiceNotificationInterruptArguments *arg2); + +static inline NV_STATUS kceServiceNotificationInterrupt_DISPATCH(OBJGPU *arg0, struct KernelCE *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) { + return arg1->__kceServiceNotificationInterrupt__(arg0, arg1, arg2); +} + +NV_STATUS kceGetNvlinkAutoConfigCeValues_TU102(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2); + +NV_STATUS kceGetNvlinkAutoConfigCeValues_GA100(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2); + +static inline NV_STATUS kceGetNvlinkAutoConfigCeValues_56cd7a(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) { + return NV_OK; +} + +static inline NV_STATUS kceGetNvlinkAutoConfigCeValues_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) { + return pKCe->__kceGetNvlinkAutoConfigCeValues__(pGpu, pKCe, arg0, arg1, arg2); +} + +NvBool kceGetNvlinkMaxTopoForTable_GP100(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, void *arg1, NvU32 arg2, NvU32 *arg3); + +static inline NvBool kceGetNvlinkMaxTopoForTable_491d52(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, void *arg1, NvU32 arg2, NvU32 *arg3) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kceGetNvlinkMaxTopoForTable_DISPATCH(OBJGPU *pGpu, struct KernelCE *pKCe, struct NVLINK_TOPOLOGY_PARAMS *arg0, void *arg1, NvU32 arg2, NvU32 *arg3) { + return pKCe->__kceGetNvlinkMaxTopoForTable__(pGpu, pKCe, arg0, arg1, arg2, arg3); +} + +NvBool kceIsCurrentMaxTopology_GA100(OBJGPU *pGpu, struct KernelCE *arg0, struct NVLINK_TOPOLOGY_PARAMS *arg1, NvU32 *arg2, NvU32 *arg3); + +static inline NvBool kceIsCurrentMaxTopology_491d52(OBJGPU *pGpu, struct KernelCE *arg0, struct NVLINK_TOPOLOGY_PARAMS *arg1, NvU32 *arg2, NvU32 *arg3) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kceIsCurrentMaxTopology_DISPATCH(OBJGPU *pGpu, struct KernelCE *arg0, struct NVLINK_TOPOLOGY_PARAMS *arg1, NvU32 *arg2, NvU32 *arg3) { + return arg0->__kceIsCurrentMaxTopology__(pGpu, arg0, arg1, arg2, arg3); +} + +NvU32 kceGetGrceConfigSize1_TU102(struct KernelCE *arg0); + +NvU32 kceGetGrceConfigSize1_GA100(struct KernelCE *arg0); + +static inline NvU32 kceGetGrceConfigSize1_4a4dee(struct KernelCE *arg0) { + return 0; +} + +static inline NvU32 kceGetGrceConfigSize1_DISPATCH(struct KernelCE *arg0) { + return arg0->__kceGetGrceConfigSize1__(arg0); +} + +NvU32 kceGetPce2lceConfigSize1_TU102(struct KernelCE *arg0); + +NvU32 kceGetPce2lceConfigSize1_GA100(struct KernelCE *arg0); + +NvU32 kceGetPce2lceConfigSize1_GA102(struct KernelCE *arg0); + +static inline NvU32 kceGetPce2lceConfigSize1_4a4dee(struct KernelCE *arg0) { + return 0; +} + +static inline NvU32 kceGetPce2lceConfigSize1_DISPATCH(struct KernelCE *arg0) { + return arg0->__kceGetPce2lceConfigSize1__(arg0); +} + +NV_STATUS kceGetMappings_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3); + +static inline NV_STATUS kceGetMappings_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kceGetMappings_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NVLINK_TOPOLOGY_PARAMS *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3) { + return pCe->__kceGetMappings__(pGpu, pCe, arg0, arg1, arg2, arg3); +} + +NV_STATUS kceMapPceLceForSysmemLinks_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3); + +NV_STATUS kceMapPceLceForSysmemLinks_GA102(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3); + +static inline NV_STATUS kceMapPceLceForSysmemLinks_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kceMapPceLceForSysmemLinks_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) { + return pCe->__kceMapPceLceForSysmemLinks__(pGpu, pCe, arg0, arg1, arg2, arg3); +} + +NV_STATUS kceMapPceLceForNvlinkPeers_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2); + +static inline NV_STATUS kceMapPceLceForNvlinkPeers_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kceMapPceLceForNvlinkPeers_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2) { + return pCe->__kceMapPceLceForNvlinkPeers__(pGpu, pCe, arg0, arg1, arg2); +} + +NvU32 kceGetSysmemSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe); + +NvU32 kceGetSysmemSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe); + +static inline NvU32 kceGetSysmemSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe) { + return 0; +} + +static inline NvU32 kceGetSysmemSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe) { + return pCe->__kceGetSysmemSupportedLceMask__(pGpu, pCe); +} + +NV_STATUS kceMapAsyncLceDefault_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3); + +static inline NV_STATUS kceMapAsyncLceDefault_46f6a7(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kceMapAsyncLceDefault_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 *arg2, NvU32 arg3) { + return pCe->__kceMapAsyncLceDefault__(pGpu, pCe, arg0, arg1, arg2, arg3); +} + +NvU32 kceGetNvlinkPeerSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0); + +NvU32 kceGetNvlinkPeerSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0); + +static inline NvU32 kceGetNvlinkPeerSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0) { + return 0; +} + +static inline NvU32 kceGetNvlinkPeerSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 arg0) { + return pCe->__kceGetNvlinkPeerSupportedLceMask__(pGpu, pCe, arg0); +} + +NvU32 kceGetGrceSupportedLceMask_GA100(OBJGPU *pGpu, struct KernelCE *pCe); + +NvU32 kceGetGrceSupportedLceMask_GA102(OBJGPU *pGpu, struct KernelCE *pCe); + +static inline NvU32 kceGetGrceSupportedLceMask_4a4dee(OBJGPU *pGpu, struct KernelCE *pCe) { + return 0; +} + +static inline NvU32 kceGetGrceSupportedLceMask_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe) { + return pCe->__kceGetGrceSupportedLceMask__(pGpu, pCe); +} + +NvBool kceIsGen4orHigherSupported_GA100(OBJGPU *pGpu, struct KernelCE *pCe); + +static inline NvBool kceIsGen4orHigherSupported_cbe027(OBJGPU *pGpu, struct KernelCE *pCe) { + return ((NvBool)(0 == 0)); +} + +static inline NvBool kceIsGen4orHigherSupported_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe) { + return pCe->__kceIsGen4orHigherSupported__(pGpu, pCe); +} + +void kceApplyGen4orHigherMapping_GA100(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 arg2, NvU32 arg3); + +static inline void kceApplyGen4orHigherMapping_b3696a(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 arg2, NvU32 arg3) { + return; +} + +static inline void kceApplyGen4orHigherMapping_DISPATCH(OBJGPU *pGpu, struct KernelCE *pCe, NvU32 *arg0, NvU32 *arg1, NvU32 arg2, NvU32 arg3) { + pCe->__kceApplyGen4orHigherMapping__(pGpu, pCe, arg0, arg1, arg2, arg3); +} + +static inline NV_STATUS kceReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, void *pTunableState) { + return pEngstate->__kceReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kceStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) { + return pEngstate->__kceStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kceStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) { + return pEngstate->__kceStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kceStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) { + return pEngstate->__kceStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void kceStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) { + pEngstate->__kceStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS kceStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) { + return pEngstate->__kceStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kceStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) { + return pEngstate->__kceStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kceInitMissing_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) { + pEngstate->__kceInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kceStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) { + return pEngstate->__kceStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kceStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate) { + return pEngstate->__kceStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kceGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, void *pTunableState) { + return pEngstate->__kceGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kceCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kceCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kceFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, void *pTunableState) { + pEngstate->__kceFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kceClearInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelCE *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return pIntrService->__kceClearInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS kceStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, NvU32 arg0) { + return pEngstate->__kceStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kceAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, void **ppTunableState) { + return pEngstate->__kceAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kceSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelCE *pEngstate, void *pTunableState) { + return pEngstate->__kceSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvU32 kceServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct KernelCE *pIntrService, IntrServiceServiceInterruptArguments *pParams) { + return pIntrService->__kceServiceInterrupt__(pGpu, pIntrService, pParams); +} + +NV_STATUS kceTopLevelPceLceMappingsUpdate_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe); +#ifdef __nvoc_kernel_ce_h_disabled +static inline NV_STATUS kceTopLevelPceLceMappingsUpdate(OBJGPU *pGpu, struct KernelCE *pKCe) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ce_h_disabled +#define kceTopLevelPceLceMappingsUpdate(pGpu, pKCe) kceTopLevelPceLceMappingsUpdate_IMPL(pGpu, pKCe) +#endif //__nvoc_kernel_ce_h_disabled + +NV_STATUS kceGetFaultMethodBufferSize_IMPL(OBJGPU *pGpu, NvU32 *size); +#define kceGetFaultMethodBufferSize(pGpu, size) kceGetFaultMethodBufferSize_IMPL(pGpu, size) +NV_STATUS kceGetAvailableHubPceMask_IMPL(OBJGPU *pGpu, NVLINK_TOPOLOGY_PARAMS *pTopoParams); +#define kceGetAvailableHubPceMask(pGpu, pTopoParams) kceGetAvailableHubPceMask_IMPL(pGpu, pTopoParams) +NV_STATUS kceGetDeviceCaps_IMPL(OBJGPU *gpu, struct KernelCE *pKCe, NvU32 engineType, NvU8 *ceCaps); +#ifdef __nvoc_kernel_ce_h_disabled +static inline NV_STATUS kceGetDeviceCaps(OBJGPU *gpu, struct KernelCE *pKCe, NvU32 engineType, NvU8 *ceCaps) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ce_h_disabled +#define kceGetDeviceCaps(gpu, pKCe, engineType, ceCaps) kceGetDeviceCaps_IMPL(gpu, pKCe, engineType, ceCaps) +#endif //__nvoc_kernel_ce_h_disabled + +NV_STATUS kceGetCeFromNvlinkConfig_IMPL(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3); +#ifdef __nvoc_kernel_ce_h_disabled +static inline NV_STATUS kceGetCeFromNvlinkConfig(OBJGPU *pGpu, struct KernelCE *pKCe, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelCE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ce_h_disabled +#define kceGetCeFromNvlinkConfig(pGpu, pKCe, arg0, arg1, arg2, arg3) kceGetCeFromNvlinkConfig_IMPL(pGpu, pKCe, arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_ce_h_disabled + +#undef PRIVATE_FIELD + + +#endif // KERNEL_CE_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_CE_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_channel_group_api_nvoc.c b/src/nvidia/generated/g_kernel_channel_group_api_nvoc.c new file mode 100644 index 000000000..6659c48ed --- /dev/null +++ b/src/nvidia/generated/g_kernel_channel_group_api_nvoc.c @@ -0,0 +1,647 @@ +#define NVOC_KERNEL_CHANNEL_GROUP_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_channel_group_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x2b5b80 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannelGroupApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_KernelChannelGroupApi(KernelChannelGroupApi*); +void __nvoc_init_funcTable_KernelChannelGroupApi(KernelChannelGroupApi*); +NV_STATUS __nvoc_ctor_KernelChannelGroupApi(KernelChannelGroupApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_KernelChannelGroupApi(KernelChannelGroupApi*); +void __nvoc_dtor_KernelChannelGroupApi(KernelChannelGroupApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelChannelGroupApi; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannelGroupApi_KernelChannelGroupApi = { + /*pClassDef=*/ &__nvoc_class_def_KernelChannelGroupApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelChannelGroupApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannelGroupApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannelGroupApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannelGroupApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannelGroupApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannelGroupApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannelGroupApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannelGroupApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannelGroupApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannelGroupApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannelGroupApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelChannelGroupApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_KernelChannelGroupApi_KernelChannelGroupApi, + &__nvoc_rtti_KernelChannelGroupApi_GpuResource, + &__nvoc_rtti_KernelChannelGroupApi_RmResource, + &__nvoc_rtti_KernelChannelGroupApi_RmResourceCommon, + &__nvoc_rtti_KernelChannelGroupApi_RsResource, + &__nvoc_rtti_KernelChannelGroupApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannelGroupApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelChannelGroupApi), + /*classId=*/ classId(KernelChannelGroupApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelChannelGroupApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelChannelGroupApi, + /*pCastInfo=*/ &__nvoc_castinfo_KernelChannelGroupApi, + /*pExportInfo=*/ &__nvoc_export_info_KernelChannelGroupApi +}; + +static NvBool __nvoc_thunk_KernelChannelGroupApi_resCanCopy(struct RsResource *pKernelChannelGroupApi) { + return kchangrpapiCanCopy((struct KernelChannelGroupApi *)(((unsigned char *)pKernelChannelGroupApi) - __nvoc_rtti_KernelChannelGroupApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelChannelGroupApi_gpuresControl(struct GpuResource *pKernelChannelGroupApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return kchangrpapiControl((struct KernelChannelGroupApi *)(((unsigned char *)pKernelChannelGroupApi) - __nvoc_rtti_KernelChannelGroupApi_GpuResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_kchangrpapiShareCallback(struct KernelChannelGroupApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannelGroupApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kchangrpapiUnmap(struct KernelChannelGroupApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannelGroupApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_kchangrpapiGetMemInterMapParams(struct KernelChannelGroupApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelChannelGroupApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_kchangrpapiGetMemoryMappingDescriptor(struct KernelChannelGroupApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelChannelGroupApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kchangrpapiGetMapAddrSpace(struct KernelChannelGroupApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannelGroupApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_kchangrpapiGetInternalObjectHandle(struct KernelChannelGroupApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannelGroupApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_kchangrpapiControlFilter(struct KernelChannelGroupApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannelGroupApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_kchangrpapiAddAdditionalDependants(struct RsClient *pClient, struct KernelChannelGroupApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannelGroupApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_kchangrpapiGetRefCount(struct KernelChannelGroupApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannelGroupApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_kchangrpapiCheckMemInterUnmap(struct KernelChannelGroupApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelChannelGroupApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_kchangrpapiMapTo(struct KernelChannelGroupApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannelGroupApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_kchangrpapiControl_Prologue(struct KernelChannelGroupApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannelGroupApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kchangrpapiGetRegBaseOffsetAndSize(struct KernelChannelGroupApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannelGroupApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kchangrpapiInternalControlForward(struct KernelChannelGroupApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannelGroupApi_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_kchangrpapiPreDestruct(struct KernelChannelGroupApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannelGroupApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_kchangrpapiUnmapFrom(struct KernelChannelGroupApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannelGroupApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_kchangrpapiControl_Epilogue(struct KernelChannelGroupApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannelGroupApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_kchangrpapiControlLookup(struct KernelChannelGroupApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannelGroupApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kchangrpapiMap(struct KernelChannelGroupApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannelGroupApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_kchangrpapiAccessCallback(struct KernelChannelGroupApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannelGroupApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_KernelChannelGroupApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlSetTpcPartitionMode_a094e1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900101u, + /*paramSize=*/ sizeof(NV0090_CTRL_TPC_PARTITION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlSetTpcPartitionMode" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlGetTpcPartitionMode_a094e1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900103u, + /*paramSize=*/ sizeof(NV0090_CTRL_TPC_PARTITION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlGetTpcPartitionMode" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlGetMMUDebugMode_a094e1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900105u, + /*paramSize=*/ sizeof(NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlGetMMUDebugMode" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlProgramVidmemPromote_a094e1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900107u, + /*paramSize=*/ sizeof(NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlProgramVidmemPromote" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdGpFifoSchedule_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06c0101u, + /*paramSize=*/ sizeof(NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdGpFifoSchedule" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdBind_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06c0102u, + /*paramSize=*/ sizeof(NVA06C_CTRL_BIND_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdBind" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdSetTimeslice_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06c0103u, + /*paramSize=*/ sizeof(NVA06C_CTRL_TIMESLICE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdSetTimeslice" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdGetTimeslice_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06c0104u, + /*paramSize=*/ sizeof(NVA06C_CTRL_TIMESLICE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdGetTimeslice" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdPreempt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06c0105u, + /*paramSize=*/ sizeof(NVA06C_CTRL_PREEMPT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdPreempt" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06c0106u, + /*paramSize=*/ sizeof(NVA06C_CTRL_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdGetInfo" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x110u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdSetInterleaveLevel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x110u) + /*flags=*/ 0x110u, + /*accessRight=*/0x2u, + /*methodId=*/ 0xa06c0107u, + /*paramSize=*/ sizeof(NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdSetInterleaveLevel" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdProgramVidmemPromote_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06c0109u, + /*paramSize=*/ sizeof(NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdProgramVidmemPromote" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06c010au, + /*paramSize=*/ sizeof(NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdMakeRealtime_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x2u, + /*methodId=*/ 0xa06c0110u, + /*paramSize=*/ sizeof(NVA06C_CTRL_MAKE_REALTIME_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdMakeRealtime" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdInternalGpFifoSchedule_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u) + /*flags=*/ 0x2610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06c0201u, + /*paramSize=*/ sizeof(NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdInternalGpFifoSchedule" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchangrpapiCtrlCmdInternalSetTimeslice_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u) + /*flags=*/ 0x2610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06c0202u, + /*paramSize=*/ sizeof(NVA06C_CTRL_TIMESLICE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannelGroupApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchangrpapiCtrlCmdInternalSetTimeslice" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelChannelGroupApi = +{ + /*numEntries=*/ 16, + /*pExportEntries=*/ __nvoc_exported_method_def_KernelChannelGroupApi +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_KernelChannelGroupApi(KernelChannelGroupApi *pThis) { + __nvoc_kchangrpapiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelChannelGroupApi(KernelChannelGroupApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_KernelChannelGroupApi(KernelChannelGroupApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelChannelGroupApi_fail_GpuResource; + __nvoc_init_dataField_KernelChannelGroupApi(pThis); + + status = __nvoc_kchangrpapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelChannelGroupApi_fail__init; + goto __nvoc_ctor_KernelChannelGroupApi_exit; // Success + +__nvoc_ctor_KernelChannelGroupApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_KernelChannelGroupApi_fail_GpuResource: +__nvoc_ctor_KernelChannelGroupApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelChannelGroupApi_1(KernelChannelGroupApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__kchangrpapiCanCopy__ = &kchangrpapiCanCopy_IMPL; + + pThis->__kchangrpapiControl__ = &kchangrpapiControl_IMPL; + + pThis->__kchangrpapiSetLegacyMode__ = &kchangrpapiSetLegacyMode_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchangrpapiCtrlCmdGpFifoSchedule__ = &kchangrpapiCtrlCmdGpFifoSchedule_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchangrpapiCtrlCmdBind__ = &kchangrpapiCtrlCmdBind_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchangrpapiCtrlCmdSetTimeslice__ = &kchangrpapiCtrlCmdSetTimeslice_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchangrpapiCtrlCmdGetTimeslice__ = &kchangrpapiCtrlCmdGetTimeslice_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchangrpapiCtrlCmdPreempt__ = &kchangrpapiCtrlCmdPreempt_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchangrpapiCtrlCmdGetInfo__ = &kchangrpapiCtrlCmdGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x110u) + pThis->__kchangrpapiCtrlCmdSetInterleaveLevel__ = &kchangrpapiCtrlCmdSetInterleaveLevel_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchangrpapiCtrlCmdProgramVidmemPromote__ = &kchangrpapiCtrlCmdProgramVidmemPromote_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers__ = &kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__kchangrpapiCtrlCmdMakeRealtime__ = &kchangrpapiCtrlCmdMakeRealtime_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u) + pThis->__kchangrpapiCtrlCmdInternalGpFifoSchedule__ = &kchangrpapiCtrlCmdInternalGpFifoSchedule_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u) + pThis->__kchangrpapiCtrlCmdInternalSetTimeslice__ = &kchangrpapiCtrlCmdInternalSetTimeslice_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchangrpapiCtrlGetTpcPartitionMode__ = &kchangrpapiCtrlGetTpcPartitionMode_a094e1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchangrpapiCtrlSetTpcPartitionMode__ = &kchangrpapiCtrlSetTpcPartitionMode_a094e1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchangrpapiCtrlGetMMUDebugMode__ = &kchangrpapiCtrlGetMMUDebugMode_a094e1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchangrpapiCtrlProgramVidmemPromote__ = &kchangrpapiCtrlProgramVidmemPromote_a094e1; +#endif + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_KernelChannelGroupApi_resCanCopy; + + pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_KernelChannelGroupApi_gpuresControl; + + pThis->__kchangrpapiShareCallback__ = &__nvoc_thunk_GpuResource_kchangrpapiShareCallback; + + pThis->__kchangrpapiUnmap__ = &__nvoc_thunk_GpuResource_kchangrpapiUnmap; + + pThis->__kchangrpapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_kchangrpapiGetMemInterMapParams; + + pThis->__kchangrpapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_kchangrpapiGetMemoryMappingDescriptor; + + pThis->__kchangrpapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_kchangrpapiGetMapAddrSpace; + + pThis->__kchangrpapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_kchangrpapiGetInternalObjectHandle; + + pThis->__kchangrpapiControlFilter__ = &__nvoc_thunk_RsResource_kchangrpapiControlFilter; + + pThis->__kchangrpapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_kchangrpapiAddAdditionalDependants; + + pThis->__kchangrpapiGetRefCount__ = &__nvoc_thunk_RsResource_kchangrpapiGetRefCount; + + pThis->__kchangrpapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_kchangrpapiCheckMemInterUnmap; + + pThis->__kchangrpapiMapTo__ = &__nvoc_thunk_RsResource_kchangrpapiMapTo; + + pThis->__kchangrpapiControl_Prologue__ = &__nvoc_thunk_RmResource_kchangrpapiControl_Prologue; + + pThis->__kchangrpapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_kchangrpapiGetRegBaseOffsetAndSize; + + pThis->__kchangrpapiInternalControlForward__ = &__nvoc_thunk_GpuResource_kchangrpapiInternalControlForward; + + pThis->__kchangrpapiPreDestruct__ = &__nvoc_thunk_RsResource_kchangrpapiPreDestruct; + + pThis->__kchangrpapiUnmapFrom__ = &__nvoc_thunk_RsResource_kchangrpapiUnmapFrom; + + pThis->__kchangrpapiControl_Epilogue__ = &__nvoc_thunk_RmResource_kchangrpapiControl_Epilogue; + + pThis->__kchangrpapiControlLookup__ = &__nvoc_thunk_RsResource_kchangrpapiControlLookup; + + pThis->__kchangrpapiMap__ = &__nvoc_thunk_GpuResource_kchangrpapiMap; + + pThis->__kchangrpapiAccessCallback__ = &__nvoc_thunk_RmResource_kchangrpapiAccessCallback; +} + +void __nvoc_init_funcTable_KernelChannelGroupApi(KernelChannelGroupApi *pThis) { + __nvoc_init_funcTable_KernelChannelGroupApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_KernelChannelGroupApi(KernelChannelGroupApi *pThis) { + pThis->__nvoc_pbase_KernelChannelGroupApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_KernelChannelGroupApi(pThis); +} + +NV_STATUS __nvoc_objCreate_KernelChannelGroupApi(KernelChannelGroupApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + KernelChannelGroupApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(KernelChannelGroupApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelChannelGroupApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelChannelGroupApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_KernelChannelGroupApi(pThis); + status = __nvoc_ctor_KernelChannelGroupApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_KernelChannelGroupApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelChannelGroupApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelChannelGroupApi(KernelChannelGroupApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_KernelChannelGroupApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_channel_group_api_nvoc.h b/src/nvidia/generated/g_kernel_channel_group_api_nvoc.h new file mode 100644 index 000000000..c9f69eeb3 --- /dev/null +++ b/src/nvidia/generated/g_kernel_channel_group_api_nvoc.h @@ -0,0 +1,427 @@ +#ifndef _G_KERNEL_CHANNEL_GROUP_API_NVOC_H_ +#define _G_KERNEL_CHANNEL_GROUP_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_channel_group_api_nvoc.h" + +#ifndef KERNEL_CHANNEL_GROUP_API_H +#define KERNEL_CHANNEL_GROUP_API_H 1 + +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/gpu_resource.h" +#include "kernel/gpu/gr/kernel_graphics_context.h" + +#include "ctrl/ctrla06c.h" // KEPLER_CHANNEL_GROUP_A +#include "ctrl/ctrl0090.h" // KERNEL_GRAPHICS_CONTEXT + +#include "nvoc/prelude.h" +#include "resserv/resserv.h" + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct KernelChannelGroup; + +#ifndef __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ +#define __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ +typedef struct KernelChannelGroup KernelChannelGroup; +#endif /* __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannelGroup +#define __nvoc_class_id_KernelChannelGroup 0xec6de1 +#endif /* __nvoc_class_id_KernelChannelGroup */ + + + +#ifdef NVOC_KERNEL_CHANNEL_GROUP_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelChannelGroupApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct KernelChannelGroupApi *__nvoc_pbase_KernelChannelGroupApi; + NvBool (*__kchangrpapiCanCopy__)(struct KernelChannelGroupApi *); + NV_STATUS (*__kchangrpapiControl__)(struct KernelChannelGroupApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kchangrpapiSetLegacyMode__)(struct KernelChannelGroupApi *, struct OBJGPU *, struct KernelFifo *, NvHandle); + NV_STATUS (*__kchangrpapiCtrlCmdGpFifoSchedule__)(struct KernelChannelGroupApi *, NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdBind__)(struct KernelChannelGroupApi *, NVA06C_CTRL_BIND_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdSetTimeslice__)(struct KernelChannelGroupApi *, NVA06C_CTRL_TIMESLICE_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdGetTimeslice__)(struct KernelChannelGroupApi *, NVA06C_CTRL_TIMESLICE_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdPreempt__)(struct KernelChannelGroupApi *, NVA06C_CTRL_PREEMPT_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdGetInfo__)(struct KernelChannelGroupApi *, NVA06C_CTRL_GET_INFO_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdSetInterleaveLevel__)(struct KernelChannelGroupApi *, NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdProgramVidmemPromote__)(struct KernelChannelGroupApi *, NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers__)(struct KernelChannelGroupApi *, NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdMakeRealtime__)(struct KernelChannelGroupApi *, NVA06C_CTRL_MAKE_REALTIME_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdInternalGpFifoSchedule__)(struct KernelChannelGroupApi *, NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlCmdInternalSetTimeslice__)(struct KernelChannelGroupApi *, NVA06C_CTRL_TIMESLICE_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlGetTpcPartitionMode__)(struct KernelChannelGroupApi *, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlSetTpcPartitionMode__)(struct KernelChannelGroupApi *, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlGetMMUDebugMode__)(struct KernelChannelGroupApi *, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *); + NV_STATUS (*__kchangrpapiCtrlProgramVidmemPromote__)(struct KernelChannelGroupApi *, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *); + NvBool (*__kchangrpapiShareCallback__)(struct KernelChannelGroupApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__kchangrpapiUnmap__)(struct KernelChannelGroupApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__kchangrpapiGetMemInterMapParams__)(struct KernelChannelGroupApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__kchangrpapiGetMemoryMappingDescriptor__)(struct KernelChannelGroupApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__kchangrpapiGetMapAddrSpace__)(struct KernelChannelGroupApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__kchangrpapiGetInternalObjectHandle__)(struct KernelChannelGroupApi *); + NV_STATUS (*__kchangrpapiControlFilter__)(struct KernelChannelGroupApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__kchangrpapiAddAdditionalDependants__)(struct RsClient *, struct KernelChannelGroupApi *, RsResourceRef *); + NvU32 (*__kchangrpapiGetRefCount__)(struct KernelChannelGroupApi *); + NV_STATUS (*__kchangrpapiCheckMemInterUnmap__)(struct KernelChannelGroupApi *, NvBool); + NV_STATUS (*__kchangrpapiMapTo__)(struct KernelChannelGroupApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__kchangrpapiControl_Prologue__)(struct KernelChannelGroupApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kchangrpapiGetRegBaseOffsetAndSize__)(struct KernelChannelGroupApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__kchangrpapiInternalControlForward__)(struct KernelChannelGroupApi *, NvU32, void *, NvU32); + void (*__kchangrpapiPreDestruct__)(struct KernelChannelGroupApi *); + NV_STATUS (*__kchangrpapiUnmapFrom__)(struct KernelChannelGroupApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__kchangrpapiControl_Epilogue__)(struct KernelChannelGroupApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kchangrpapiControlLookup__)(struct KernelChannelGroupApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__kchangrpapiMap__)(struct KernelChannelGroupApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__kchangrpapiAccessCallback__)(struct KernelChannelGroupApi *, struct RsClient *, void *, RsAccessRight); + struct KernelChannelGroup *pKernelChannelGroup; + NvHandle hErrorContext; + NvHandle hEccErrorContext; + NvHandle hKernelGraphicsContext; + NvHandle hLegacykCtxShareSync; + NvHandle hLegacykCtxShareAsync; +}; + +#ifndef __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ +#define __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ +typedef struct KernelChannelGroupApi KernelChannelGroupApi; +#endif /* __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannelGroupApi +#define __nvoc_class_id_KernelChannelGroupApi 0x2b5b80 +#endif /* __nvoc_class_id_KernelChannelGroupApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannelGroupApi; + +#define __staticCast_KernelChannelGroupApi(pThis) \ + ((pThis)->__nvoc_pbase_KernelChannelGroupApi) + +#ifdef __nvoc_kernel_channel_group_api_h_disabled +#define __dynamicCast_KernelChannelGroupApi(pThis) ((KernelChannelGroupApi*)NULL) +#else //__nvoc_kernel_channel_group_api_h_disabled +#define __dynamicCast_KernelChannelGroupApi(pThis) \ + ((KernelChannelGroupApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelChannelGroupApi))) +#endif //__nvoc_kernel_channel_group_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelChannelGroupApi(KernelChannelGroupApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelChannelGroupApi(KernelChannelGroupApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_KernelChannelGroupApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_KernelChannelGroupApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define kchangrpapiCanCopy(pKernelChannelGroupApi) kchangrpapiCanCopy_DISPATCH(pKernelChannelGroupApi) +#define kchangrpapiControl(pKernelChannelGroupApi, pCallContext, pParams) kchangrpapiControl_DISPATCH(pKernelChannelGroupApi, pCallContext, pParams) +#define kchangrpapiSetLegacyMode(pKernelChannelGroupApi, pGpu, pKernelFifo, hClient) kchangrpapiSetLegacyMode_DISPATCH(pKernelChannelGroupApi, pGpu, pKernelFifo, hClient) +#define kchangrpapiCtrlCmdGpFifoSchedule(pKernelChannelGroupApi, pSchedParams) kchangrpapiCtrlCmdGpFifoSchedule_DISPATCH(pKernelChannelGroupApi, pSchedParams) +#define kchangrpapiCtrlCmdBind(pKernelChannelGroupApi, pParams) kchangrpapiCtrlCmdBind_DISPATCH(pKernelChannelGroupApi, pParams) +#define kchangrpapiCtrlCmdSetTimeslice(pKernelChannelGroupApi, pTsParams) kchangrpapiCtrlCmdSetTimeslice_DISPATCH(pKernelChannelGroupApi, pTsParams) +#define kchangrpapiCtrlCmdGetTimeslice(pKernelChannelGroupApi, pTsParams) kchangrpapiCtrlCmdGetTimeslice_DISPATCH(pKernelChannelGroupApi, pTsParams) +#define kchangrpapiCtrlCmdPreempt(pKernelChannelGroupApi, pPreemptParams) kchangrpapiCtrlCmdPreempt_DISPATCH(pKernelChannelGroupApi, pPreemptParams) +#define kchangrpapiCtrlCmdGetInfo(pKernelChannelGroupApi, pParams) kchangrpapiCtrlCmdGetInfo_DISPATCH(pKernelChannelGroupApi, pParams) +#define kchangrpapiCtrlCmdSetInterleaveLevel(pKernelChannelGroupApi, pParams) kchangrpapiCtrlCmdSetInterleaveLevel_DISPATCH(pKernelChannelGroupApi, pParams) +#define kchangrpapiCtrlCmdProgramVidmemPromote(pKernelChannelGroupApi, pParams) kchangrpapiCtrlCmdProgramVidmemPromote_DISPATCH(pKernelChannelGroupApi, pParams) +#define kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers(pKernelChannelGroupApi, pParams) kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers_DISPATCH(pKernelChannelGroupApi, pParams) +#define kchangrpapiCtrlCmdMakeRealtime(pKernelChannelGroupApi, pParams) kchangrpapiCtrlCmdMakeRealtime_DISPATCH(pKernelChannelGroupApi, pParams) +#define kchangrpapiCtrlCmdInternalGpFifoSchedule(pKernelChannelGroupApi, pSchedParams) kchangrpapiCtrlCmdInternalGpFifoSchedule_DISPATCH(pKernelChannelGroupApi, pSchedParams) +#define kchangrpapiCtrlCmdInternalSetTimeslice(pKernelChannelGroupApi, pTsParams) kchangrpapiCtrlCmdInternalSetTimeslice_DISPATCH(pKernelChannelGroupApi, pTsParams) +#define kchangrpapiCtrlGetTpcPartitionMode(pKernelChannelGroupApi, pParams) kchangrpapiCtrlGetTpcPartitionMode_DISPATCH(pKernelChannelGroupApi, pParams) +#define kchangrpapiCtrlSetTpcPartitionMode(pKernelChannelGroupApi, pParams) kchangrpapiCtrlSetTpcPartitionMode_DISPATCH(pKernelChannelGroupApi, pParams) +#define kchangrpapiCtrlGetMMUDebugMode(pKernelChannelGroupApi, pParams) kchangrpapiCtrlGetMMUDebugMode_DISPATCH(pKernelChannelGroupApi, pParams) +#define kchangrpapiCtrlProgramVidmemPromote(pKernelChannelGroupApi, pParams) kchangrpapiCtrlProgramVidmemPromote_DISPATCH(pKernelChannelGroupApi, pParams) +#define kchangrpapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) kchangrpapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define kchangrpapiUnmap(pGpuResource, pCallContext, pCpuMapping) kchangrpapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define kchangrpapiGetMemInterMapParams(pRmResource, pParams) kchangrpapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define kchangrpapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) kchangrpapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define kchangrpapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) kchangrpapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define kchangrpapiGetInternalObjectHandle(pGpuResource) kchangrpapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define kchangrpapiControlFilter(pResource, pCallContext, pParams) kchangrpapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define kchangrpapiAddAdditionalDependants(pClient, pResource, pReference) kchangrpapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define kchangrpapiGetRefCount(pResource) kchangrpapiGetRefCount_DISPATCH(pResource) +#define kchangrpapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) kchangrpapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define kchangrpapiMapTo(pResource, pParams) kchangrpapiMapTo_DISPATCH(pResource, pParams) +#define kchangrpapiControl_Prologue(pResource, pCallContext, pParams) kchangrpapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define kchangrpapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) kchangrpapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define kchangrpapiInternalControlForward(pGpuResource, command, pParams, size) kchangrpapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define kchangrpapiPreDestruct(pResource) kchangrpapiPreDestruct_DISPATCH(pResource) +#define kchangrpapiUnmapFrom(pResource, pParams) kchangrpapiUnmapFrom_DISPATCH(pResource, pParams) +#define kchangrpapiControl_Epilogue(pResource, pCallContext, pParams) kchangrpapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define kchangrpapiControlLookup(pResource, pParams, ppEntry) kchangrpapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define kchangrpapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) kchangrpapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define kchangrpapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) kchangrpapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool kchangrpapiCanCopy_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi); + +static inline NvBool kchangrpapiCanCopy_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi) { + return pKernelChannelGroupApi->__kchangrpapiCanCopy__(pKernelChannelGroupApi); +} + +NV_STATUS kchangrpapiControl_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS kchangrpapiControl_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pKernelChannelGroupApi->__kchangrpapiControl__(pKernelChannelGroupApi, pCallContext, pParams); +} + +NV_STATUS kchangrpapiSetLegacyMode_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvHandle hClient); + +static inline NV_STATUS kchangrpapiSetLegacyMode_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvHandle hClient) { + return pKernelChannelGroupApi->__kchangrpapiSetLegacyMode__(pKernelChannelGroupApi, pGpu, pKernelFifo, hClient); +} + +NV_STATUS kchangrpapiCtrlCmdGpFifoSchedule_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams); + +static inline NV_STATUS kchangrpapiCtrlCmdGpFifoSchedule_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdGpFifoSchedule__(pKernelChannelGroupApi, pSchedParams); +} + +NV_STATUS kchangrpapiCtrlCmdBind_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_BIND_PARAMS *pParams); + +static inline NV_STATUS kchangrpapiCtrlCmdBind_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_BIND_PARAMS *pParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdBind__(pKernelChannelGroupApi, pParams); +} + +NV_STATUS kchangrpapiCtrlCmdSetTimeslice_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_TIMESLICE_PARAMS *pTsParams); + +static inline NV_STATUS kchangrpapiCtrlCmdSetTimeslice_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_TIMESLICE_PARAMS *pTsParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdSetTimeslice__(pKernelChannelGroupApi, pTsParams); +} + +NV_STATUS kchangrpapiCtrlCmdGetTimeslice_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_TIMESLICE_PARAMS *pTsParams); + +static inline NV_STATUS kchangrpapiCtrlCmdGetTimeslice_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_TIMESLICE_PARAMS *pTsParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdGetTimeslice__(pKernelChannelGroupApi, pTsParams); +} + +NV_STATUS kchangrpapiCtrlCmdPreempt_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_PREEMPT_PARAMS *pPreemptParams); + +static inline NV_STATUS kchangrpapiCtrlCmdPreempt_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_PREEMPT_PARAMS *pPreemptParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdPreempt__(pKernelChannelGroupApi, pPreemptParams); +} + +NV_STATUS kchangrpapiCtrlCmdGetInfo_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS kchangrpapiCtrlCmdGetInfo_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_GET_INFO_PARAMS *pParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdGetInfo__(pKernelChannelGroupApi, pParams); +} + +NV_STATUS kchangrpapiCtrlCmdSetInterleaveLevel_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams); + +static inline NV_STATUS kchangrpapiCtrlCmdSetInterleaveLevel_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdSetInterleaveLevel__(pKernelChannelGroupApi, pParams); +} + +NV_STATUS kchangrpapiCtrlCmdProgramVidmemPromote_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *pParams); + +static inline NV_STATUS kchangrpapiCtrlCmdProgramVidmemPromote_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *pParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdProgramVidmemPromote__(pKernelChannelGroupApi, pParams); +} + +NV_STATUS kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS *pParams); + +static inline NV_STATUS kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS *pParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers__(pKernelChannelGroupApi, pParams); +} + +NV_STATUS kchangrpapiCtrlCmdMakeRealtime_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_MAKE_REALTIME_PARAMS *pParams); + +static inline NV_STATUS kchangrpapiCtrlCmdMakeRealtime_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_MAKE_REALTIME_PARAMS *pParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdMakeRealtime__(pKernelChannelGroupApi, pParams); +} + +NV_STATUS kchangrpapiCtrlCmdInternalGpFifoSchedule_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams); + +static inline NV_STATUS kchangrpapiCtrlCmdInternalGpFifoSchedule_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdInternalGpFifoSchedule__(pKernelChannelGroupApi, pSchedParams); +} + +NV_STATUS kchangrpapiCtrlCmdInternalSetTimeslice_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_TIMESLICE_PARAMS *pTsParams); + +static inline NV_STATUS kchangrpapiCtrlCmdInternalSetTimeslice_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NVA06C_CTRL_TIMESLICE_PARAMS *pTsParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlCmdInternalSetTimeslice__(pKernelChannelGroupApi, pTsParams); +} + +static inline NV_STATUS kchangrpapiCtrlGetTpcPartitionMode_a094e1(struct KernelChannelGroupApi *pKernelChannelGroupApi, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams) { + return kgrctxCtrlHandle(resservGetTlsCallContext(), pKernelChannelGroupApi->hKernelGraphicsContext); +} + +static inline NV_STATUS kchangrpapiCtrlGetTpcPartitionMode_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlGetTpcPartitionMode__(pKernelChannelGroupApi, pParams); +} + +static inline NV_STATUS kchangrpapiCtrlSetTpcPartitionMode_a094e1(struct KernelChannelGroupApi *pKernelChannelGroupApi, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams) { + return kgrctxCtrlHandle(resservGetTlsCallContext(), pKernelChannelGroupApi->hKernelGraphicsContext); +} + +static inline NV_STATUS kchangrpapiCtrlSetTpcPartitionMode_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlSetTpcPartitionMode__(pKernelChannelGroupApi, pParams); +} + +static inline NV_STATUS kchangrpapiCtrlGetMMUDebugMode_a094e1(struct KernelChannelGroupApi *pKernelChannelGroupApi, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *pParams) { + return kgrctxCtrlHandle(resservGetTlsCallContext(), pKernelChannelGroupApi->hKernelGraphicsContext); +} + +static inline NV_STATUS kchangrpapiCtrlGetMMUDebugMode_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *pParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlGetMMUDebugMode__(pKernelChannelGroupApi, pParams); +} + +static inline NV_STATUS kchangrpapiCtrlProgramVidmemPromote_a094e1(struct KernelChannelGroupApi *pKernelChannelGroupApi, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *pParams) { + return kgrctxCtrlHandle(resservGetTlsCallContext(), pKernelChannelGroupApi->hKernelGraphicsContext); +} + +static inline NV_STATUS kchangrpapiCtrlProgramVidmemPromote_DISPATCH(struct KernelChannelGroupApi *pKernelChannelGroupApi, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *pParams) { + return pKernelChannelGroupApi->__kchangrpapiCtrlProgramVidmemPromote__(pKernelChannelGroupApi, pParams); +} + +static inline NvBool kchangrpapiShareCallback_DISPATCH(struct KernelChannelGroupApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__kchangrpapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS kchangrpapiUnmap_DISPATCH(struct KernelChannelGroupApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__kchangrpapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS kchangrpapiGetMemInterMapParams_DISPATCH(struct KernelChannelGroupApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__kchangrpapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS kchangrpapiGetMemoryMappingDescriptor_DISPATCH(struct KernelChannelGroupApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__kchangrpapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS kchangrpapiGetMapAddrSpace_DISPATCH(struct KernelChannelGroupApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__kchangrpapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle kchangrpapiGetInternalObjectHandle_DISPATCH(struct KernelChannelGroupApi *pGpuResource) { + return pGpuResource->__kchangrpapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS kchangrpapiControlFilter_DISPATCH(struct KernelChannelGroupApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kchangrpapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void kchangrpapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct KernelChannelGroupApi *pResource, RsResourceRef *pReference) { + pResource->__kchangrpapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 kchangrpapiGetRefCount_DISPATCH(struct KernelChannelGroupApi *pResource) { + return pResource->__kchangrpapiGetRefCount__(pResource); +} + +static inline NV_STATUS kchangrpapiCheckMemInterUnmap_DISPATCH(struct KernelChannelGroupApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__kchangrpapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS kchangrpapiMapTo_DISPATCH(struct KernelChannelGroupApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__kchangrpapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS kchangrpapiControl_Prologue_DISPATCH(struct KernelChannelGroupApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kchangrpapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kchangrpapiGetRegBaseOffsetAndSize_DISPATCH(struct KernelChannelGroupApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__kchangrpapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS kchangrpapiInternalControlForward_DISPATCH(struct KernelChannelGroupApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__kchangrpapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void kchangrpapiPreDestruct_DISPATCH(struct KernelChannelGroupApi *pResource) { + pResource->__kchangrpapiPreDestruct__(pResource); +} + +static inline NV_STATUS kchangrpapiUnmapFrom_DISPATCH(struct KernelChannelGroupApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__kchangrpapiUnmapFrom__(pResource, pParams); +} + +static inline void kchangrpapiControl_Epilogue_DISPATCH(struct KernelChannelGroupApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__kchangrpapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kchangrpapiControlLookup_DISPATCH(struct KernelChannelGroupApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__kchangrpapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS kchangrpapiMap_DISPATCH(struct KernelChannelGroupApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__kchangrpapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool kchangrpapiAccessCallback_DISPATCH(struct KernelChannelGroupApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__kchangrpapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS kchangrpapiConstruct_IMPL(struct KernelChannelGroupApi *arg_pKernelChannelGroupApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_kchangrpapiConstruct(arg_pKernelChannelGroupApi, arg_pCallContext, arg_pParams) kchangrpapiConstruct_IMPL(arg_pKernelChannelGroupApi, arg_pCallContext, arg_pParams) +NV_STATUS kchangrpapiCopyConstruct_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_kernel_channel_group_api_h_disabled +static inline NV_STATUS kchangrpapiCopyConstruct(struct KernelChannelGroupApi *pKernelChannelGroupApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroupApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_api_h_disabled +#define kchangrpapiCopyConstruct(pKernelChannelGroupApi, pCallContext, pParams) kchangrpapiCopyConstruct_IMPL(pKernelChannelGroupApi, pCallContext, pParams) +#endif //__nvoc_kernel_channel_group_api_h_disabled + +void kchangrpapiDestruct_IMPL(struct KernelChannelGroupApi *pKernelChannelGroupApi); +#define __nvoc_kchangrpapiDestruct(pKernelChannelGroupApi) kchangrpapiDestruct_IMPL(pKernelChannelGroupApi) +#undef PRIVATE_FIELD + + + +NV_STATUS CliGetChannelGroup(NvHandle, NvHandle, RsResourceRef**, NvHandle*); + +#endif // KERNEL_CHANNEL_GROUP_API_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_CHANNEL_GROUP_API_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_channel_group_nvoc.c b/src/nvidia/generated/g_kernel_channel_group_nvoc.c new file mode 100644 index 000000000..58dcac12b --- /dev/null +++ b/src/nvidia/generated/g_kernel_channel_group_nvoc.c @@ -0,0 +1,187 @@ +#define NVOC_KERNEL_CHANNEL_GROUP_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_channel_group_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xec6de1 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannelGroup; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +void __nvoc_init_KernelChannelGroup(KernelChannelGroup*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelChannelGroup(KernelChannelGroup*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelChannelGroup(KernelChannelGroup*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelChannelGroup(KernelChannelGroup*, RmHalspecOwner* ); +void __nvoc_dtor_KernelChannelGroup(KernelChannelGroup*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelChannelGroup; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannelGroup_KernelChannelGroup = { + /*pClassDef=*/ &__nvoc_class_def_KernelChannelGroup, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelChannelGroup, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannelGroup_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannelGroup, __nvoc_base_RsShared.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannelGroup_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannelGroup, __nvoc_base_RsShared), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelChannelGroup = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelChannelGroup_KernelChannelGroup, + &__nvoc_rtti_KernelChannelGroup_RsShared, + &__nvoc_rtti_KernelChannelGroup_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannelGroup = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelChannelGroup), + /*classId=*/ classId(KernelChannelGroup), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelChannelGroup", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelChannelGroup, + /*pCastInfo=*/ &__nvoc_castinfo_KernelChannelGroup, + /*pExportInfo=*/ &__nvoc_export_info_KernelChannelGroup +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelChannelGroup = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_KernelChannelGroup(KernelChannelGroup *pThis) { + __nvoc_kchangrpDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelChannelGroup(KernelChannelGroup *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_KernelChannelGroup(KernelChannelGroup *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_KernelChannelGroup_fail_RsShared; + __nvoc_init_dataField_KernelChannelGroup(pThis, pRmhalspecowner); + + status = __nvoc_kchangrpConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_KernelChannelGroup_fail__init; + goto __nvoc_ctor_KernelChannelGroup_exit; // Success + +__nvoc_ctor_KernelChannelGroup_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_KernelChannelGroup_fail_RsShared: +__nvoc_ctor_KernelChannelGroup_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelChannelGroup_1(KernelChannelGroup *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +void __nvoc_init_funcTable_KernelChannelGroup(KernelChannelGroup *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelChannelGroup_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_KernelChannelGroup(KernelChannelGroup *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelChannelGroup = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; + __nvoc_init_RsShared(&pThis->__nvoc_base_RsShared); + __nvoc_init_funcTable_KernelChannelGroup(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelChannelGroup(KernelChannelGroup **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelChannelGroup *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelChannelGroup)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelChannelGroup)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelChannelGroup); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelChannelGroup(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelChannelGroup(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelChannelGroup_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelChannelGroup_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelChannelGroup(KernelChannelGroup **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelChannelGroup(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_channel_group_nvoc.h b/src/nvidia/generated/g_kernel_channel_group_nvoc.h new file mode 100644 index 000000000..ed4857619 --- /dev/null +++ b/src/nvidia/generated/g_kernel_channel_group_nvoc.h @@ -0,0 +1,443 @@ +#ifndef _G_KERNEL_CHANNEL_GROUP_NVOC_H_ +#define _G_KERNEL_CHANNEL_GROUP_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_channel_group_nvoc.h" + +#ifndef KERNEL_CHANNEL_GROUP_H +#define KERNEL_CHANNEL_GROUP_H 1 + +#include "kernel/mem_mgr/vaspace.h" + +#include "ctrl/ctrl0080/ctrl0080gr.h" // NV03_DEVICE + +#include "libraries/containers/btree.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "nvoc/prelude.h" +#include "resserv/resserv.h" +#include "gpu/gpu_resource.h" + +#include "kernel/gpu/fifo/kernel_channel.h" + +#include "kernel/gpu/fifo/kernel_ctxshare.h" + +// Forward declaration +struct KernelChannelGroupApi; + +#ifndef __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ +#define __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ +typedef struct KernelChannelGroupApi KernelChannelGroupApi; +#endif /* __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannelGroupApi +#define __nvoc_class_id_KernelChannelGroupApi 0x2b5b80 +#endif /* __nvoc_class_id_KernelChannelGroupApi */ + + + +/*! + * Stores the list of all client kctxshareApi objects + * that reference the same shared kctxshare object + */ +MAKE_LIST(KernelChannelGroupApiList, KernelChannelGroupApi *); + +typedef enum +{ + CHANNELGROUP_STATE_ENABLE, + CHANNELGROUP_STATE_SCHED, + CHANNELGROUP_STATE_COUNT // Should be last +} CHANNELGROUP_STATE; + +// +// Describes the engine context memory for a channel +// (Stored in KernelChannelGroup because it's shared by all channels in the group) +// +typedef struct ENGINE_CTX_DESCRIPTOR +{ + MEMORY_DESCRIPTOR *pMemDesc; // Context memory + VA_LIST vaList; // Map to track the gpu va mapping to the context buffer + NvU32 engDesc; // Which engine type +} ENGINE_CTX_DESCRIPTOR; + + +// +// HW method buffer used by supporting engines to save/restore +// faulting methods after corresponding fault is handled. +// +typedef struct _HW_ENG_FAULT_METHOD_BUFFER +{ + NvU64 bar2Addr; + MEMORY_DESCRIPTOR *pMemDesc; +} HW_ENG_FAULT_METHOD_BUFFER; + +// +// dword array size used to track the valid subcontext mask. +// We use 1 bit per subcontext; so need 2 dwords to store the valid bitmask. +// +#define SUBCTX_MASK_ARRAY_SIZE 2 + +/** + * This class represents data that is shared when a TSG is duped. + * + * Instances of this class are ref-counted and will be kept alive until + * all TSG copies have been freed. + */ +#ifdef NVOC_KERNEL_CHANNEL_GROUP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelChannelGroup { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsShared __nvoc_base_RsShared; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + struct KernelChannelGroup *__nvoc_pbase_KernelChannelGroup; + NODE node; + NvU32 grpID; + NvU32 runlistId; + NvU32 chanCount; + NvU32 engineType; + struct OBJVASPACE *pVAS; + NvU32 gfid; + struct OBJEHEAP *pSubctxIdHeap; + CHANNEL_LIST *pChanList; + NvU64 timesliceUs; + ENGINE_CTX_DESCRIPTOR **ppEngCtxDesc; + NvBool bAllocatedByRm; + NvBool bLegacyMode; + HW_ENG_FAULT_METHOD_BUFFER *pMthdBuffers; + NvU32 (*ppSubctxMask)[2]; + NvU32 (*ppZombieSubctxMask)[2]; + NvU32 *pStateMask; + NvU32 *pInterleaveLevel; + NvBool bRunlistAssigned; + struct CTX_BUF_POOL_INFO *pCtxBufPool; + struct CTX_BUF_POOL_INFO *pChannelBufPool; + struct MapNode mapNode; + KernelChannelGroupApiList apiObjList; + NvBool bIsCallingContextVgpuPlugin; +}; + +#ifndef __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ +#define __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ +typedef struct KernelChannelGroup KernelChannelGroup; +#endif /* __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannelGroup +#define __nvoc_class_id_KernelChannelGroup 0xec6de1 +#endif /* __nvoc_class_id_KernelChannelGroup */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannelGroup; + +#define __staticCast_KernelChannelGroup(pThis) \ + ((pThis)->__nvoc_pbase_KernelChannelGroup) + +#ifdef __nvoc_kernel_channel_group_h_disabled +#define __dynamicCast_KernelChannelGroup(pThis) ((KernelChannelGroup*)NULL) +#else //__nvoc_kernel_channel_group_h_disabled +#define __dynamicCast_KernelChannelGroup(pThis) \ + ((KernelChannelGroup*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelChannelGroup))) +#endif //__nvoc_kernel_channel_group_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelChannelGroup(KernelChannelGroup**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelChannelGroup(KernelChannelGroup**, Dynamic*, NvU32); +#define __objCreate_KernelChannelGroup(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelChannelGroup((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +static inline NV_STATUS kchangrpSetInterleaveLevelSched_56cd7a(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) { + return NV_OK; +} + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpSetInterleaveLevelSched(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpSetInterleaveLevelSched(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevelSched_56cd7a(pGpu, pKernelChannelGroup, value) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpSetInterleaveLevelSched_HAL(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevelSched(pGpu, pKernelChannelGroup, value) + +NvU32 kchangrpGetDefaultRunlist_GM107(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup); + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NvU32 kchangrpGetDefaultRunlist(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return 0; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpGetDefaultRunlist(pGpu, pKernelChannelGroup) kchangrpGetDefaultRunlist_GM107(pGpu, pKernelChannelGroup) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpGetDefaultRunlist_HAL(pGpu, pKernelChannelGroup) kchangrpGetDefaultRunlist(pGpu, pKernelChannelGroup) + +static inline void kchangrpUpdateSubcontextMask_b3696a(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) { + return; +} + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline void kchangrpUpdateSubcontextMask(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpUpdateSubcontextMask(pGpu, arg0, arg1, arg2) kchangrpUpdateSubcontextMask_b3696a(pGpu, arg0, arg1, arg2) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpUpdateSubcontextMask_HAL(pGpu, arg0, arg1, arg2) kchangrpUpdateSubcontextMask(pGpu, arg0, arg1, arg2) + +static inline void kchangrpSetSubcontextZombieState_b3696a(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) { + return; +} + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline void kchangrpSetSubcontextZombieState(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1, NvBool arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpSetSubcontextZombieState(pGpu, arg0, arg1, arg2) kchangrpSetSubcontextZombieState_b3696a(pGpu, arg0, arg1, arg2) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpSetSubcontextZombieState_HAL(pGpu, arg0, arg1, arg2) kchangrpSetSubcontextZombieState(pGpu, arg0, arg1, arg2) + +static inline NvBool kchangrpGetSubcontextZombieState_ceaee8(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1) { + NV_ASSERT_PRECOMP(0); + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NvBool kchangrpGetSubcontextZombieState(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpGetSubcontextZombieState(pGpu, arg0, arg1) kchangrpGetSubcontextZombieState_ceaee8(pGpu, arg0, arg1) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpGetSubcontextZombieState_HAL(pGpu, arg0, arg1) kchangrpGetSubcontextZombieState(pGpu, arg0, arg1) + +static inline NV_STATUS kchangrpFreeGrSubcontextHdrs_56cd7a(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0) { + return NV_OK; +} + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpFreeGrSubcontextHdrs(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpFreeGrSubcontextHdrs(pGpu, arg0) kchangrpFreeGrSubcontextHdrs_56cd7a(pGpu, arg0) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpFreeGrSubcontextHdrs_HAL(pGpu, arg0) kchangrpFreeGrSubcontextHdrs(pGpu, arg0) + +NV_STATUS kchangrpAllocFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup); + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpAllocFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpAllocFaultMethodBuffers(pGpu, pKernelChannelGroup) kchangrpAllocFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpAllocFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup) kchangrpAllocFaultMethodBuffers(pGpu, pKernelChannelGroup) + +NV_STATUS kchangrpFreeFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup); + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpFreeFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpFreeFaultMethodBuffers(pGpu, pKernelChannelGroup) kchangrpFreeFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpFreeFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup) kchangrpFreeFaultMethodBuffers(pGpu, pKernelChannelGroup) + +NV_STATUS kchangrpMapFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue); + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpMapFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpMapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue) kchangrpMapFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup, runqueue) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpMapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, runqueue) kchangrpMapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue) + +NV_STATUS kchangrpUnmapFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue); + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpUnmapFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 runqueue) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpUnmapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue) kchangrpUnmapFaultMethodBuffers_GV100(pGpu, pKernelChannelGroup, runqueue) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpUnmapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, runqueue) kchangrpUnmapFaultMethodBuffers(pGpu, pKernelChannelGroup, runqueue) + +static inline NV_STATUS kchangrpSetRealtime_56cd7a(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvBool bRealtime) { + return NV_OK; +} + +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpSetRealtime(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvBool bRealtime) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpSetRealtime(pGpu, pKernelChannelGroup, bRealtime) kchangrpSetRealtime_56cd7a(pGpu, pKernelChannelGroup, bRealtime) +#endif //__nvoc_kernel_channel_group_h_disabled + +#define kchangrpSetRealtime_HAL(pGpu, pKernelChannelGroup, bRealtime) kchangrpSetRealtime(pGpu, pKernelChannelGroup, bRealtime) + +NV_STATUS kchangrpConstruct_IMPL(struct KernelChannelGroup *arg_pKernelChannelGroup); +#define __nvoc_kchangrpConstruct(arg_pKernelChannelGroup) kchangrpConstruct_IMPL(arg_pKernelChannelGroup) +void kchangrpDestruct_IMPL(struct KernelChannelGroup *pKernelChannelGroup); +#define __nvoc_kchangrpDestruct(pKernelChannelGroup) kchangrpDestruct_IMPL(pKernelChannelGroup) +void kchangrpSetState_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state); +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline void kchangrpSetState(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpSetState(pKernelChannelGroup, subdevice, state) kchangrpSetState_IMPL(pKernelChannelGroup, subdevice, state) +#endif //__nvoc_kernel_channel_group_h_disabled + +void kchangrpClearState_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state); +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline void kchangrpClearState(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpClearState(pKernelChannelGroup, subdevice, state) kchangrpClearState_IMPL(pKernelChannelGroup, subdevice, state) +#endif //__nvoc_kernel_channel_group_h_disabled + +NvBool kchangrpIsStateSet_IMPL(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state); +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NvBool kchangrpIsStateSet(struct KernelChannelGroup *pKernelChannelGroup, NvU32 subdevice, CHANNELGROUP_STATE state) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpIsStateSet(pKernelChannelGroup, subdevice, state) kchangrpIsStateSet_IMPL(pKernelChannelGroup, subdevice, state) +#endif //__nvoc_kernel_channel_group_h_disabled + +NV_STATUS kchangrpAddChannel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel); +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpAddChannel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpAddChannel(pGpu, pKernelChannelGroup, pKernelChannel) kchangrpAddChannel_IMPL(pGpu, pKernelChannelGroup, pKernelChannel) +#endif //__nvoc_kernel_channel_group_h_disabled + +NV_STATUS kchangrpRemoveChannel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel); +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpRemoveChannel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel) kchangrpRemoveChannel_IMPL(pGpu, pKernelChannelGroup, pKernelChannel) +#endif //__nvoc_kernel_channel_group_h_disabled + +NV_STATUS kchangrpInit_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct OBJVASPACE *pVAS, NvU32 gfid); +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpInit(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, struct OBJVASPACE *pVAS, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpInit(pGpu, pKernelChannelGroup, pVAS, gfid) kchangrpInit_IMPL(pGpu, pKernelChannelGroup, pVAS, gfid) +#endif //__nvoc_kernel_channel_group_h_disabled + +NV_STATUS kchangrpDestroy_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup); +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpDestroy(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpDestroy(pGpu, pKernelChannelGroup) kchangrpDestroy_IMPL(pGpu, pKernelChannelGroup) +#endif //__nvoc_kernel_channel_group_h_disabled + +NV_STATUS kchangrpAllocEngineContextDescriptor_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup); +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpAllocEngineContextDescriptor(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpAllocEngineContextDescriptor(pGpu, pKernelChannelGroup) kchangrpAllocEngineContextDescriptor_IMPL(pGpu, pKernelChannelGroup) +#endif //__nvoc_kernel_channel_group_h_disabled + +NV_STATUS kchangrpGetEngineContextMemDesc_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, MEMORY_DESCRIPTOR **arg1); +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpGetEngineContextMemDesc(struct OBJGPU *pGpu, struct KernelChannelGroup *arg0, MEMORY_DESCRIPTOR **arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpGetEngineContextMemDesc(pGpu, arg0, arg1) kchangrpGetEngineContextMemDesc_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_kernel_channel_group_h_disabled + +NV_STATUS kchangrpSetInterleaveLevel_IMPL(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value); +#ifdef __nvoc_kernel_channel_group_h_disabled +static inline NV_STATUS kchangrpSetInterleaveLevel(struct OBJGPU *pGpu, struct KernelChannelGroup *pKernelChannelGroup, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("KernelChannelGroup was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_group_h_disabled +#define kchangrpSetInterleaveLevel(pGpu, pKernelChannelGroup, value) kchangrpSetInterleaveLevel_IMPL(pGpu, pKernelChannelGroup, value) +#endif //__nvoc_kernel_channel_group_h_disabled + +#undef PRIVATE_FIELD + + +MAKE_INTRUSIVE_MAP(KernelChannelGroupMap, KernelChannelGroup, mapNode); + +#endif // KERNEL_CHANNEL_GROUP_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_CHANNEL_GROUP_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_channel_nvoc.c b/src/nvidia/generated/g_kernel_channel_nvoc.c new file mode 100644 index 000000000..30e98129d --- /dev/null +++ b/src/nvidia/generated/g_kernel_channel_nvoc.c @@ -0,0 +1,1319 @@ +#define NVOC_KERNEL_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_channel_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x5d8d70 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannel; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_KernelChannel(KernelChannel*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelChannel(KernelChannel*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelChannel(KernelChannel*, RmHalspecOwner* , CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_KernelChannel(KernelChannel*, RmHalspecOwner* ); +void __nvoc_dtor_KernelChannel(KernelChannel*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelChannel; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannel_KernelChannel = { + /*pClassDef=*/ &__nvoc_class_def_KernelChannel, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelChannel, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannel_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannel_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannel_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannel_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannel, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannel_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannel, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannel_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannel, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelChannel_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelChannel, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelChannel = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_KernelChannel_KernelChannel, + &__nvoc_rtti_KernelChannel_Notifier, + &__nvoc_rtti_KernelChannel_INotifier, + &__nvoc_rtti_KernelChannel_GpuResource, + &__nvoc_rtti_KernelChannel_RmResource, + &__nvoc_rtti_KernelChannel_RmResourceCommon, + &__nvoc_rtti_KernelChannel_RsResource, + &__nvoc_rtti_KernelChannel_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannel = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelChannel), + /*classId=*/ classId(KernelChannel), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelChannel", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelChannel, + /*pCastInfo=*/ &__nvoc_castinfo_KernelChannel, + /*pExportInfo=*/ &__nvoc_export_info_KernelChannel +}; + +static NV_STATUS __nvoc_thunk_KernelChannel_gpuresMap(struct GpuResource *pKernelChannel, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return kchannelMap((struct KernelChannel *)(((unsigned char *)pKernelChannel) - __nvoc_rtti_KernelChannel_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_KernelChannel_gpuresUnmap(struct GpuResource *pKernelChannel, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return kchannelUnmap((struct KernelChannel *)(((unsigned char *)pKernelChannel) - __nvoc_rtti_KernelChannel_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_KernelChannel_gpuresGetMapAddrSpace(struct GpuResource *pKernelChannel, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return kchannelGetMapAddrSpace((struct KernelChannel *)(((unsigned char *)pKernelChannel) - __nvoc_rtti_KernelChannel_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NV_STATUS __nvoc_thunk_KernelChannel_rmresGetMemInterMapParams(struct RmResource *pKernelChannel, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return kchannelGetMemInterMapParams((struct KernelChannel *)(((unsigned char *)pKernelChannel) - __nvoc_rtti_KernelChannel_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_KernelChannel_rmresCheckMemInterUnmap(struct RmResource *pKernelChannel, NvBool bSubdeviceHandleProvided) { + return kchannelCheckMemInterUnmap((struct KernelChannel *)(((unsigned char *)pKernelChannel) - __nvoc_rtti_KernelChannel_RmResource.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_kchannelShareCallback(struct KernelChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannel_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_kchannelMapTo(struct KernelChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_kchannelGetOrAllocNotifShare(struct KernelChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelChannel_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static void __nvoc_thunk_Notifier_kchannelSetNotificationShare(struct KernelChannel *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelChannel_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_kchannelGetRefCount(struct KernelChannel *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_kchannelAddAdditionalDependants(struct RsClient *pClient, struct KernelChannel *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_kchannelControl_Prologue(struct KernelChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kchannelGetRegBaseOffsetAndSize(struct KernelChannel *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannel_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kchannelInternalControlForward(struct KernelChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannel_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_kchannelUnmapFrom(struct KernelChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_kchannelControl_Epilogue(struct KernelChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_kchannelControlLookup(struct KernelChannel *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_kchannelGetInternalObjectHandle(struct KernelChannel *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannel_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kchannelControl(struct KernelChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelChannel_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_kchannelGetMemoryMappingDescriptor(struct KernelChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelChannel_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_kchannelControlFilter(struct KernelChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_kchannelUnregisterEvent(struct KernelChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelChannel_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_kchannelCanCopy(struct KernelChannel *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_kchannelPreDestruct(struct KernelChannel *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_kchannelGetNotificationListPtr(struct KernelChannel *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelChannel_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_kchannelGetNotificationShare(struct KernelChannel *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelChannel_Notifier.offset)); +} + +static NvBool __nvoc_thunk_RmResource_kchannelAccessCallback(struct KernelChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelChannel_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_KernelChannel[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlSetTpcPartitionMode_a094e1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900101u, + /*paramSize=*/ sizeof(NV0090_CTRL_TPC_PARTITION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlSetTpcPartitionMode" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlGetTpcPartitionMode_a094e1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900103u, + /*paramSize=*/ sizeof(NV0090_CTRL_TPC_PARTITION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlGetTpcPartitionMode" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlGetMMUDebugMode_a094e1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900105u, + /*paramSize=*/ sizeof(NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlGetMMUDebugMode" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlProgramVidmemPromote_a094e1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900107u, + /*paramSize=*/ sizeof(NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlProgramVidmemPromote" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdResetIsolatedChannel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x506f0105u, + /*paramSize=*/ sizeof(NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdResetIsolatedChannel" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetClassEngineid_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x906f0101u, + /*paramSize=*/ sizeof(NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetClassEngineid" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdResetChannel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x906f0102u, + /*paramSize=*/ sizeof(NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdResetChannel" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetDeferRCState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x906f0105u, + /*paramSize=*/ sizeof(NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetDeferRCState" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetMmuFaultInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x906f0106u, + /*paramSize=*/ sizeof(NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetMmuFaultInfo" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdEventSetNotification_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x906f0203u, + /*paramSize=*/ sizeof(NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdEventSetNotification" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetClassEngineidA06F_6a9a13, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06f0101u, + /*paramSize=*/ sizeof(NVA06F_CTRL_GET_CLASS_ENGINEID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetClassEngineidA06F" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdResetChannelA06F_ef73a1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06f0102u, + /*paramSize=*/ sizeof(NVA06F_CTRL_CMD_RESET_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdResetChannelA06F" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGpFifoSchedule_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06f0103u, + /*paramSize=*/ sizeof(NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGpFifoSchedule" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdBind_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06f0104u, + /*paramSize=*/ sizeof(NVA06F_CTRL_BIND_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdBind" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetMmuFaultInfoA06F_a7f9ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06f0107u, + /*paramSize=*/ sizeof(NVA06F_CTRL_GET_MMU_FAULT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetMmuFaultInfoA06F" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdSetErrorNotifier_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06f0108u, + /*paramSize=*/ sizeof(NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdSetErrorNotifier" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x110u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdSetInterleaveLevel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x110u) + /*flags=*/ 0x110u, + /*accessRight=*/0x2u, + /*methodId=*/ 0xa06f0109u, + /*paramSize=*/ sizeof(NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdSetInterleaveLevel" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdRestartRunlist_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x2u, + /*methodId=*/ 0xa06f0111u, + /*paramSize=*/ sizeof(NVA06F_CTRL_RESTART_RUNLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdRestartRunlist" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdStopChannel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa06f0112u, + /*paramSize=*/ sizeof(NVA06F_CTRL_STOP_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdStopChannel" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetClassEngineidA16F_6a9a13, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa16f0101u, + /*paramSize=*/ sizeof(NVA16F_CTRL_GET_CLASS_ENGINEID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetClassEngineidA16F" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdResetChannelA16F_ef73a1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa16f0102u, + /*paramSize=*/ sizeof(NVA16F_CTRL_CMD_RESET_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdResetChannelA16F" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGpFifoScheduleA16F_6546a6, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa16f0103u, + /*paramSize=*/ sizeof(NVA16F_CTRL_GPFIFO_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGpFifoScheduleA16F" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetClassEngineidA26F_6a9a13, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa26f0101u, + /*paramSize=*/ sizeof(NVA26F_CTRL_GET_CLASS_ENGINEID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetClassEngineidA26F" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdResetChannelA26F_ef73a1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa26f0102u, + /*paramSize=*/ sizeof(NVA26F_CTRL_CMD_RESET_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdResetChannelA26F" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelFCtrlCmdGpFifoScheduleA26F_6546a6, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xa26f0103u, + /*paramSize=*/ sizeof(NVA26F_CTRL_GPFIFO_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelFCtrlCmdGpFifoScheduleA26F" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetClassEngineidB06F_6a9a13, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb06f0101u, + /*paramSize=*/ sizeof(NVB06F_CTRL_GET_CLASS_ENGINEID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetClassEngineidB06F" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdResetChannelB06F_ef73a1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb06f0102u, + /*paramSize=*/ sizeof(NVB06F_CTRL_CMD_RESET_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdResetChannelB06F" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGpFifoScheduleB06F_6546a6, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb06f0103u, + /*paramSize=*/ sizeof(NVB06F_CTRL_GPFIFO_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGpFifoScheduleB06F" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdBindB06F_2c1c21, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb06f0104u, + /*paramSize=*/ sizeof(NVB06F_CTRL_BIND_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdBindB06F" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetEngineCtxSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb06f010bu, + /*paramSize=*/ sizeof(NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetEngineCtxSize" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetEngineCtxData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb06f010cu, + /*paramSize=*/ sizeof(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetEngineCtxData" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdMigrateEngineCtxData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb06f010du, + /*paramSize=*/ sizeof(NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdMigrateEngineCtxData" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetEngineCtxState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb06f010eu, + /*paramSize=*/ sizeof(NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetEngineCtxState" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetChannelHwState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb06f010fu, + /*paramSize=*/ sizeof(NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetChannelHwState" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdSetChannelHwState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb06f0110u, + /*paramSize=*/ sizeof(NVB06F_CTRL_SET_CHANNEL_HW_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdSetChannelHwState" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetClassEngineidC06F_6a9a13, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc06f0101u, + /*paramSize=*/ sizeof(NVC06F_CTRL_GET_CLASS_ENGINEID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetClassEngineidC06F" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdResetChannelC06F_ef73a1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc06f0102u, + /*paramSize=*/ sizeof(NVC06F_CTRL_CMD_RESET_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdResetChannelC06F" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGpFifoScheduleC06F_6546a6, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc06f0103u, + /*paramSize=*/ sizeof(NVC06F_CTRL_GPFIFO_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGpFifoScheduleC06F" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdBindC06F_2c1c21, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc06f0104u, + /*paramSize=*/ sizeof(NVC06F_CTRL_BIND_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdBindC06F" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGetClassEngineidC36F_6a9a13, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc36f0101u, + /*paramSize=*/ sizeof(NVC36F_CTRL_GET_CLASS_ENGINEID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGetClassEngineidC36F" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdResetChannelC36F_ef73a1, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc36f0102u, + /*paramSize=*/ sizeof(NVC36F_CTRL_CMD_RESET_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdResetChannelC36F" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGpFifoScheduleC36F_6546a6, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc36f0103u, + /*paramSize=*/ sizeof(NVC36F_CTRL_GPFIFO_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGpFifoScheduleC36F" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdBindC36F_2c1c21, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc36f0104u, + /*paramSize=*/ sizeof(NVC36F_CTRL_BIND_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdBindC36F" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGpfifoGetWorkSubmitToken_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc36f0108u, + /*paramSize=*/ sizeof(NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGpfifoGetWorkSubmitToken" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGpfifoUpdateFaultMethodBuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc36f0109u, + /*paramSize=*/ sizeof(NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGpfifoUpdateFaultMethodBuffer" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc36f010au, + /*paramSize=*/ sizeof(NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelChannel.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelChannel = +{ + /*numEntries=*/ 46, + /*pExportEntries=*/ __nvoc_exported_method_def_KernelChannel +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_KernelChannel(KernelChannel *pThis) { + __nvoc_kchannelDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelChannel(KernelChannel *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_KernelChannel(KernelChannel *pThis, RmHalspecOwner *pRmhalspecowner, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelChannel_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_KernelChannel_fail_Notifier; + __nvoc_init_dataField_KernelChannel(pThis, pRmhalspecowner); + + status = __nvoc_kchannelConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelChannel_fail__init; + goto __nvoc_ctor_KernelChannel_exit; // Success + +__nvoc_ctor_KernelChannel_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_KernelChannel_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_KernelChannel_fail_GpuResource: +__nvoc_ctor_KernelChannel_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelChannel_1(KernelChannel *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__kchannelMap__ = &kchannelMap_IMPL; + + pThis->__kchannelUnmap__ = &kchannelUnmap_IMPL; + + pThis->__kchannelGetMapAddrSpace__ = &kchannelGetMapAddrSpace_IMPL; + + pThis->__kchannelGetMemInterMapParams__ = &kchannelGetMemInterMapParams_IMPL; + + pThis->__kchannelCheckMemInterUnmap__ = &kchannelCheckMemInterUnmap_IMPL; + + // Hal function -- kchannelIsUserdAddrSizeValid + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kchannelIsUserdAddrSizeValid__ = &kchannelIsUserdAddrSizeValid_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kchannelIsUserdAddrSizeValid__ = &kchannelIsUserdAddrSizeValid_GA100; + } + else if (0) + { + } + else if (0) + { + } + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchannelCtrlCmdResetIsolatedChannel__ = &kchannelCtrlCmdResetIsolatedChannel_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGetClassEngineid__ = &kchannelCtrlCmdGetClassEngineid_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdResetChannel__ = &kchannelCtrlCmdResetChannel_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchannelCtrlCmdGetDeferRCState__ = &kchannelCtrlCmdGetDeferRCState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchannelCtrlCmdGetMmuFaultInfo__ = &kchannelCtrlCmdGetMmuFaultInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdEventSetNotification__ = &kchannelCtrlCmdEventSetNotification_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGetClassEngineidA06F__ = &kchannelCtrlCmdGetClassEngineidA06F_6a9a13; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdResetChannelA06F__ = &kchannelCtrlCmdResetChannelA06F_ef73a1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGpFifoSchedule__ = &kchannelCtrlCmdGpFifoSchedule_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdBind__ = &kchannelCtrlCmdBind_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchannelCtrlCmdGetMmuFaultInfoA06F__ = &kchannelCtrlCmdGetMmuFaultInfoA06F_a7f9ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdSetErrorNotifier__ = &kchannelCtrlCmdSetErrorNotifier_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x110u) + pThis->__kchannelCtrlCmdSetInterleaveLevel__ = &kchannelCtrlCmdSetInterleaveLevel_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchannelCtrlCmdRestartRunlist__ = &kchannelCtrlCmdRestartRunlist_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGetClassEngineidA16F__ = &kchannelCtrlCmdGetClassEngineidA16F_6a9a13; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdResetChannelA16F__ = &kchannelCtrlCmdResetChannelA16F_ef73a1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGpFifoScheduleA16F__ = &kchannelCtrlCmdGpFifoScheduleA16F_6546a6; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGetClassEngineidA26F__ = &kchannelCtrlCmdGetClassEngineidA26F_6a9a13; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdResetChannelA26F__ = &kchannelCtrlCmdResetChannelA26F_ef73a1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelFCtrlCmdGpFifoScheduleA26F__ = &kchannelFCtrlCmdGpFifoScheduleA26F_6546a6; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGetClassEngineidB06F__ = &kchannelCtrlCmdGetClassEngineidB06F_6a9a13; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdResetChannelB06F__ = &kchannelCtrlCmdResetChannelB06F_ef73a1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGpFifoScheduleB06F__ = &kchannelCtrlCmdGpFifoScheduleB06F_6546a6; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdBindB06F__ = &kchannelCtrlCmdBindB06F_2c1c21; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchannelCtrlCmdGetEngineCtxSize__ = &kchannelCtrlCmdGetEngineCtxSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchannelCtrlCmdGetEngineCtxData__ = &kchannelCtrlCmdGetEngineCtxData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__kchannelCtrlCmdMigrateEngineCtxData__ = &kchannelCtrlCmdMigrateEngineCtxData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchannelCtrlCmdGetEngineCtxState__ = &kchannelCtrlCmdGetEngineCtxState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__kchannelCtrlCmdGetChannelHwState__ = &kchannelCtrlCmdGetChannelHwState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__kchannelCtrlCmdSetChannelHwState__ = &kchannelCtrlCmdSetChannelHwState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGetClassEngineidC06F__ = &kchannelCtrlCmdGetClassEngineidC06F_6a9a13; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdResetChannelC06F__ = &kchannelCtrlCmdResetChannelC06F_ef73a1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGpFifoScheduleC06F__ = &kchannelCtrlCmdGpFifoScheduleC06F_6546a6; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdBindC06F__ = &kchannelCtrlCmdBindC06F_2c1c21; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGetClassEngineidC36F__ = &kchannelCtrlCmdGetClassEngineidC36F_6a9a13; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdResetChannelC36F__ = &kchannelCtrlCmdResetChannelC36F_ef73a1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGpFifoScheduleC36F__ = &kchannelCtrlCmdGpFifoScheduleC36F_6546a6; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdBindC36F__ = &kchannelCtrlCmdBindC36F_2c1c21; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGpfifoGetWorkSubmitToken__ = &kchannelCtrlCmdGpfifoGetWorkSubmitToken_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__kchannelCtrlCmdGpfifoUpdateFaultMethodBuffer__ = &kchannelCtrlCmdGpfifoUpdateFaultMethodBuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex__ = &kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlCmdStopChannel__ = &kchannelCtrlCmdStopChannel_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlGetTpcPartitionMode__ = &kchannelCtrlGetTpcPartitionMode_a094e1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlSetTpcPartitionMode__ = &kchannelCtrlSetTpcPartitionMode_a094e1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlGetMMUDebugMode__ = &kchannelCtrlGetMMUDebugMode_a094e1; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kchannelCtrlProgramVidmemPromote__ = &kchannelCtrlProgramVidmemPromote_a094e1; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_KernelChannel_gpuresMap; + + pThis->__nvoc_base_GpuResource.__gpuresUnmap__ = &__nvoc_thunk_KernelChannel_gpuresUnmap; + + pThis->__nvoc_base_GpuResource.__gpuresGetMapAddrSpace__ = &__nvoc_thunk_KernelChannel_gpuresGetMapAddrSpace; + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__rmresGetMemInterMapParams__ = &__nvoc_thunk_KernelChannel_rmresGetMemInterMapParams; + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__rmresCheckMemInterUnmap__ = &__nvoc_thunk_KernelChannel_rmresCheckMemInterUnmap; + + pThis->__kchannelShareCallback__ = &__nvoc_thunk_GpuResource_kchannelShareCallback; + + pThis->__kchannelMapTo__ = &__nvoc_thunk_RsResource_kchannelMapTo; + + pThis->__kchannelGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_kchannelGetOrAllocNotifShare; + + pThis->__kchannelSetNotificationShare__ = &__nvoc_thunk_Notifier_kchannelSetNotificationShare; + + pThis->__kchannelGetRefCount__ = &__nvoc_thunk_RsResource_kchannelGetRefCount; + + pThis->__kchannelAddAdditionalDependants__ = &__nvoc_thunk_RsResource_kchannelAddAdditionalDependants; + + pThis->__kchannelControl_Prologue__ = &__nvoc_thunk_RmResource_kchannelControl_Prologue; + + pThis->__kchannelGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_kchannelGetRegBaseOffsetAndSize; + + pThis->__kchannelInternalControlForward__ = &__nvoc_thunk_GpuResource_kchannelInternalControlForward; + + pThis->__kchannelUnmapFrom__ = &__nvoc_thunk_RsResource_kchannelUnmapFrom; + + pThis->__kchannelControl_Epilogue__ = &__nvoc_thunk_RmResource_kchannelControl_Epilogue; + + pThis->__kchannelControlLookup__ = &__nvoc_thunk_RsResource_kchannelControlLookup; + + pThis->__kchannelGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_kchannelGetInternalObjectHandle; + + pThis->__kchannelControl__ = &__nvoc_thunk_GpuResource_kchannelControl; + + pThis->__kchannelGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_kchannelGetMemoryMappingDescriptor; + + pThis->__kchannelControlFilter__ = &__nvoc_thunk_RsResource_kchannelControlFilter; + + pThis->__kchannelUnregisterEvent__ = &__nvoc_thunk_Notifier_kchannelUnregisterEvent; + + pThis->__kchannelCanCopy__ = &__nvoc_thunk_RsResource_kchannelCanCopy; + + pThis->__kchannelPreDestruct__ = &__nvoc_thunk_RsResource_kchannelPreDestruct; + + pThis->__kchannelGetNotificationListPtr__ = &__nvoc_thunk_Notifier_kchannelGetNotificationListPtr; + + pThis->__kchannelGetNotificationShare__ = &__nvoc_thunk_Notifier_kchannelGetNotificationShare; + + pThis->__kchannelAccessCallback__ = &__nvoc_thunk_RmResource_kchannelAccessCallback; +} + +void __nvoc_init_funcTable_KernelChannel(KernelChannel *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelChannel_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_KernelChannel(KernelChannel *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelChannel = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_KernelChannel(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelChannel(KernelChannel **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + KernelChannel *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelChannel)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelChannel)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelChannel); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelChannel(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelChannel(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_KernelChannel_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelChannel_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelChannel(KernelChannel **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_KernelChannel(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_channel_nvoc.h b/src/nvidia/generated/g_kernel_channel_nvoc.h new file mode 100644 index 000000000..b52d1791b --- /dev/null +++ b/src/nvidia/generated/g_kernel_channel_nvoc.h @@ -0,0 +1,1408 @@ +#ifndef _G_KERNEL_CHANNEL_NVOC_H_ +#define _G_KERNEL_CHANNEL_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_channel_nvoc.h" + +#ifndef KERNEL_CHANNEL_H +#define KERNEL_CHANNEL_H + +#include "core/core.h" +#include "os/os.h" +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "gpu/gpu_resource.h" +#include "kernel/gpu/fifo/kernel_ctxshare.h" +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/gr/kernel_graphics_context.h" +#include "kernel/gpu/intr/intr_service.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +#include "ctrl/ctrl0090.h" +#include "ctrl/ctrl208f/ctrl208ffifo.h" +#include "ctrl/ctrl506f.h" +#include "ctrl/ctrl906f.h" +#include "ctrl/ctrla06f.h" +#include "ctrl/ctrla16f.h" +#include "ctrl/ctrla26f.h" +#include "ctrl/ctrlb06f.h" +#include "ctrl/ctrlc06f.h" +#include "ctrl/ctrlc36f.h" +#include "ctrl/ctrlc56f.h" + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct UserInfo; + +#ifndef __NVOC_CLASS_UserInfo_TYPEDEF__ +#define __NVOC_CLASS_UserInfo_TYPEDEF__ +typedef struct UserInfo UserInfo; +#endif /* __NVOC_CLASS_UserInfo_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UserInfo +#define __nvoc_class_id_UserInfo 0x21d236 +#endif /* __nvoc_class_id_UserInfo */ + + +/*! + * @brief Type of hErrorContext or hEccErrorContext + * + * This is RPCed to GSP in #NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS.internalFlags + * along with the actual memdesc in + * #NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS.errorNotifierMem and + * #NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS.eccErrorNotifierMem. + */ +typedef enum { + /*! + * Initial state as passed in NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS by + * kernel CPU-RM clients. + */ + ERROR_NOTIFIER_TYPE_UNKNOWN = 0, + /*! @brief Error notifier is explicitly not set. + * + * The corresponding hErrorContext or hEccErrorContext must be + * NV01_NULL_OBJECT. + */ + ERROR_NOTIFIER_TYPE_NONE, + /*! @brief Error notifier is a ContextDma */ + ERROR_NOTIFIER_TYPE_CTXDMA, + /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */ + ERROR_NOTIFIER_TYPE_MEMORY +} ErrorNotifierType; + +// +// Iterates over the ChannelDescendants on a channel +// Uses an RS_ORDERED_ITERATOR and filters it by EngineID / ClassID +// +typedef struct { + RS_ORDERED_ITERATOR rsIter; + NvU32 engineID; + NvU32 classID; +} KernelChannelChildIterator; + +typedef enum +{ + CHANNEL_CLASS_TYPE_DMA, + CHANNEL_CLASS_TYPE_GPFIFO, +} CHANNEL_CLASS_TYPE; + +// +// Channel class info structure. +// +// Filled in by CliGetChannelClassInfo() routine. +// +typedef struct +{ + NvU32 notifiersMaxCount; // max# of notifiers for class + NvU32 eventActionDisable; // event disable action cmd value + NvU32 eventActionSingle; // event single-shot enable action cmd value + NvU32 eventActionRepeat; // event repeat enable action cmd value + NvU32 rcNotifierIndex; // RC notifier index differs depending on the channel class + CHANNEL_CLASS_TYPE classType; +} CLI_CHANNEL_CLASS_INFO; + +void CliGetChannelClassInfo(NvU32, CLI_CHANNEL_CLASS_INFO*); + +/*! + * This structure represents an iterator for all objects + * with given class number or engine tag on a channel or TSG. + * It is created by function @ref kchannelGetChildIterOverGroup. + */ +typedef struct +{ + NvU32 engDesc; + NvU32 classNum; + + // + // During iteration, a copy of the current channel/TSG as well as the + // next object node to start iterating from is tracked. + // + CHANNEL_NODE channelNode; + KernelChannelChildIterator kchannelIter; +} KernelChannelChildIterOverGroup; + +typedef struct _def_instance_block +{ + MEMORY_DESCRIPTOR *pInstanceBlockDesc; + MEMORY_DESCRIPTOR *pRamfcDesc; + /*! + * Used only for Suspend Resume RM internal channel. + * Will be moved to the Host context RL infolist. + */ + MEMORY_DESCRIPTOR *pRLMemDesc; +} FIFO_INSTANCE_BLOCK; + +/* Bitfields in NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS.internalFlags */ +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE 1:0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER 0x0 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN 0x1 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL 0x2 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE 3:2 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE 5:4 +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE ERROR_NOTIFIER_TYPE_NONE +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA ERROR_NOTIFIER_TYPE_CTXDMA +#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY ERROR_NOTIFIER_TYPE_MEMORY + +/*! + * Class for the kernel side of a Channel object. + */ +#ifdef NVOC_KERNEL_CHANNEL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelChannel { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct KernelChannel *__nvoc_pbase_KernelChannel; + NV_STATUS (*__kchannelMap__)(struct KernelChannel *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__kchannelUnmap__)(struct KernelChannel *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__kchannelGetMapAddrSpace__)(struct KernelChannel *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__kchannelGetMemInterMapParams__)(struct KernelChannel *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__kchannelCheckMemInterUnmap__)(struct KernelChannel *, NvBool); + NvBool (*__kchannelIsUserdAddrSizeValid__)(struct KernelChannel *, NvU32, NvU32); + NV_STATUS (*__kchannelCtrlCmdResetIsolatedChannel__)(struct KernelChannel *, NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetClassEngineid__)(struct KernelChannel *, NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdResetChannel__)(struct KernelChannel *, NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetDeferRCState__)(struct KernelChannel *, NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetMmuFaultInfo__)(struct KernelChannel *, NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdEventSetNotification__)(struct KernelChannel *, NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetClassEngineidA06F__)(struct KernelChannel *, NVA06F_CTRL_GET_CLASS_ENGINEID_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdResetChannelA06F__)(struct KernelChannel *, NVA06F_CTRL_CMD_RESET_CHANNEL_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGpFifoSchedule__)(struct KernelChannel *, NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdBind__)(struct KernelChannel *, NVA06F_CTRL_BIND_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetMmuFaultInfoA06F__)(struct KernelChannel *, NVA06F_CTRL_GET_MMU_FAULT_INFO_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdSetErrorNotifier__)(struct KernelChannel *, NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdSetInterleaveLevel__)(struct KernelChannel *, NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdRestartRunlist__)(struct KernelChannel *, NVA06F_CTRL_RESTART_RUNLIST_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetClassEngineidA16F__)(struct KernelChannel *, NVA16F_CTRL_GET_CLASS_ENGINEID_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdResetChannelA16F__)(struct KernelChannel *, NVA16F_CTRL_CMD_RESET_CHANNEL_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGpFifoScheduleA16F__)(struct KernelChannel *, NVA16F_CTRL_GPFIFO_SCHEDULE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetClassEngineidA26F__)(struct KernelChannel *, NVA26F_CTRL_GET_CLASS_ENGINEID_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdResetChannelA26F__)(struct KernelChannel *, NVA26F_CTRL_CMD_RESET_CHANNEL_PARAMS *); + NV_STATUS (*__kchannelFCtrlCmdGpFifoScheduleA26F__)(struct KernelChannel *, NVA26F_CTRL_GPFIFO_SCHEDULE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetClassEngineidB06F__)(struct KernelChannel *, NVB06F_CTRL_GET_CLASS_ENGINEID_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdResetChannelB06F__)(struct KernelChannel *, NVB06F_CTRL_CMD_RESET_CHANNEL_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGpFifoScheduleB06F__)(struct KernelChannel *, NVB06F_CTRL_GPFIFO_SCHEDULE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdBindB06F__)(struct KernelChannel *, NVB06F_CTRL_BIND_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetEngineCtxSize__)(struct KernelChannel *, NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetEngineCtxData__)(struct KernelChannel *, NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdMigrateEngineCtxData__)(struct KernelChannel *, NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetEngineCtxState__)(struct KernelChannel *, NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetChannelHwState__)(struct KernelChannel *, NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdSetChannelHwState__)(struct KernelChannel *, NVB06F_CTRL_SET_CHANNEL_HW_STATE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetClassEngineidC06F__)(struct KernelChannel *, NVC06F_CTRL_GET_CLASS_ENGINEID_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdResetChannelC06F__)(struct KernelChannel *, NVC06F_CTRL_CMD_RESET_CHANNEL_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGpFifoScheduleC06F__)(struct KernelChannel *, NVC06F_CTRL_GPFIFO_SCHEDULE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdBindC06F__)(struct KernelChannel *, NVC06F_CTRL_BIND_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGetClassEngineidC36F__)(struct KernelChannel *, NVC36F_CTRL_GET_CLASS_ENGINEID_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdResetChannelC36F__)(struct KernelChannel *, NVC36F_CTRL_CMD_RESET_CHANNEL_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGpFifoScheduleC36F__)(struct KernelChannel *, NVC36F_CTRL_GPFIFO_SCHEDULE_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdBindC36F__)(struct KernelChannel *, NVC36F_CTRL_BIND_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGpfifoGetWorkSubmitToken__)(struct KernelChannel *, NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGpfifoUpdateFaultMethodBuffer__)(struct KernelChannel *, NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex__)(struct KernelChannel *, NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *); + NV_STATUS (*__kchannelCtrlCmdStopChannel__)(struct KernelChannel *, NVA06F_CTRL_STOP_CHANNEL_PARAMS *); + NV_STATUS (*__kchannelCtrlGetTpcPartitionMode__)(struct KernelChannel *, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *); + NV_STATUS (*__kchannelCtrlSetTpcPartitionMode__)(struct KernelChannel *, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *); + NV_STATUS (*__kchannelCtrlGetMMUDebugMode__)(struct KernelChannel *, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *); + NV_STATUS (*__kchannelCtrlProgramVidmemPromote__)(struct KernelChannel *, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *); + NvBool (*__kchannelShareCallback__)(struct KernelChannel *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__kchannelMapTo__)(struct KernelChannel *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__kchannelGetOrAllocNotifShare__)(struct KernelChannel *, NvHandle, NvHandle, struct NotifShare **); + void (*__kchannelSetNotificationShare__)(struct KernelChannel *, struct NotifShare *); + NvU32 (*__kchannelGetRefCount__)(struct KernelChannel *); + void (*__kchannelAddAdditionalDependants__)(struct RsClient *, struct KernelChannel *, RsResourceRef *); + NV_STATUS (*__kchannelControl_Prologue__)(struct KernelChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kchannelGetRegBaseOffsetAndSize__)(struct KernelChannel *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__kchannelInternalControlForward__)(struct KernelChannel *, NvU32, void *, NvU32); + NV_STATUS (*__kchannelUnmapFrom__)(struct KernelChannel *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__kchannelControl_Epilogue__)(struct KernelChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kchannelControlLookup__)(struct KernelChannel *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__kchannelGetInternalObjectHandle__)(struct KernelChannel *); + NV_STATUS (*__kchannelControl__)(struct KernelChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kchannelGetMemoryMappingDescriptor__)(struct KernelChannel *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__kchannelControlFilter__)(struct KernelChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kchannelUnregisterEvent__)(struct KernelChannel *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__kchannelCanCopy__)(struct KernelChannel *); + void (*__kchannelPreDestruct__)(struct KernelChannel *); + PEVENTNOTIFICATION *(*__kchannelGetNotificationListPtr__)(struct KernelChannel *); + struct NotifShare *(*__kchannelGetNotificationShare__)(struct KernelChannel *); + NvBool (*__kchannelAccessCallback__)(struct KernelChannel *, struct RsClient *, void *, RsAccessRight); + NvU16 nextObjectClassID; + struct KernelChannel *pNextBindKernelChannel; + NvHandle hErrorContext; + MEMORY_DESCRIPTOR *pErrContextMemDesc; + ErrorNotifierType errorContextType; + NvU64 errorContextOffset; + NvHandle hEccErrorContext; + MEMORY_DESCRIPTOR *pEccErrContextMemDesc; + ErrorNotifierType eccErrorContextType; + NvU64 eccErrorContextOffset; + struct UserInfo *pUserInfo; + NvHandle hVASpace; + struct OBJVASPACE *pVAS; + NvHandle hKernelGraphicsContext; + NvU8 privilegeLevel; + NvU32 runlistId; + NvU32 ChID; + struct KernelChannelGroupApi *pKernelChannelGroupApi; + struct KernelCtxShareApi *pKernelCtxShareApi; + NvU32 refCount; + NvBool bIsContextBound; + FIFO_INSTANCE_BLOCK *pFifoHalData[8]; + MEMORY_DESCRIPTOR *pInstSubDeviceMemDesc[8]; + MEMORY_DESCRIPTOR *pUserdSubDeviceMemDesc[8]; + NvBool bClientAllocatedUserD; + NvU32 swState[8]; + NvU32 ProcessID; + NvU32 SubProcessID; + NvU32 bcStateCurrent; + NvU32 notifyIndex[2]; + NvU32 *pNotifyActions; + NvU64 userdLength; + NvBool bSkipCtxBufferAlloc; + NvU32 subctxId; + NvU32 cid; + struct MIG_INSTANCE_REF partitionRef; + NvU32 runqueue; + NvU32 engineType; +}; + +#ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ +#define __NVOC_CLASS_KernelChannel_TYPEDEF__ +typedef struct KernelChannel KernelChannel; +#endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannel +#define __nvoc_class_id_KernelChannel 0x5d8d70 +#endif /* __nvoc_class_id_KernelChannel */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelChannel; + +#define __staticCast_KernelChannel(pThis) \ + ((pThis)->__nvoc_pbase_KernelChannel) + +#ifdef __nvoc_kernel_channel_h_disabled +#define __dynamicCast_KernelChannel(pThis) ((KernelChannel*)NULL) +#else //__nvoc_kernel_channel_h_disabled +#define __dynamicCast_KernelChannel(pThis) \ + ((KernelChannel*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelChannel))) +#endif //__nvoc_kernel_channel_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelChannel(KernelChannel**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelChannel(KernelChannel**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_KernelChannel(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_KernelChannel((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define kchannelMap(pKernelChannel, pCallContext, pParams, pCpuMapping) kchannelMap_DISPATCH(pKernelChannel, pCallContext, pParams, pCpuMapping) +#define kchannelUnmap(pKernelChannel, pCallContext, pCpuMapping) kchannelUnmap_DISPATCH(pKernelChannel, pCallContext, pCpuMapping) +#define kchannelGetMapAddrSpace(pKernelChannel, pCallContext, mapFlags, pAddrSpace) kchannelGetMapAddrSpace_DISPATCH(pKernelChannel, pCallContext, mapFlags, pAddrSpace) +#define kchannelGetMemInterMapParams(pKernelChannel, pParams) kchannelGetMemInterMapParams_DISPATCH(pKernelChannel, pParams) +#define kchannelCheckMemInterUnmap(pKernelChannel, bSubdeviceHandleProvided) kchannelCheckMemInterUnmap_DISPATCH(pKernelChannel, bSubdeviceHandleProvided) +#define kchannelIsUserdAddrSizeValid(pKernelChannel, userdAddrLo, userdAddrHi) kchannelIsUserdAddrSizeValid_DISPATCH(pKernelChannel, userdAddrLo, userdAddrHi) +#define kchannelIsUserdAddrSizeValid_HAL(pKernelChannel, userdAddrLo, userdAddrHi) kchannelIsUserdAddrSizeValid_DISPATCH(pKernelChannel, userdAddrLo, userdAddrHi) +#define kchannelCtrlCmdResetIsolatedChannel(pKernelChannel, pResetParams) kchannelCtrlCmdResetIsolatedChannel_DISPATCH(pKernelChannel, pResetParams) +#define kchannelCtrlCmdGetClassEngineid(pKernelChannel, pParams) kchannelCtrlCmdGetClassEngineid_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdResetChannel(pKernelChannel, pResetChannelParams) kchannelCtrlCmdResetChannel_DISPATCH(pKernelChannel, pResetChannelParams) +#define kchannelCtrlCmdGetDeferRCState(pKernelChannel, pStateParams) kchannelCtrlCmdGetDeferRCState_DISPATCH(pKernelChannel, pStateParams) +#define kchannelCtrlCmdGetMmuFaultInfo(pKernelChannel, pFaultInfoParams) kchannelCtrlCmdGetMmuFaultInfo_DISPATCH(pKernelChannel, pFaultInfoParams) +#define kchannelCtrlCmdEventSetNotification(pKernelChannel, pSetEventParams) kchannelCtrlCmdEventSetNotification_DISPATCH(pKernelChannel, pSetEventParams) +#define kchannelCtrlCmdGetClassEngineidA06F(pKernelChannel, pParams) kchannelCtrlCmdGetClassEngineidA06F_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdResetChannelA06F(pKernelChannel, pResetChannelParams) kchannelCtrlCmdResetChannelA06F_DISPATCH(pKernelChannel, pResetChannelParams) +#define kchannelCtrlCmdGpFifoSchedule(pKernelChannel, pSchedParams) kchannelCtrlCmdGpFifoSchedule_DISPATCH(pKernelChannel, pSchedParams) +#define kchannelCtrlCmdBind(pKernelChannel, pParams) kchannelCtrlCmdBind_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdGetMmuFaultInfoA06F(pKernelChannel, pFaultInfoParams) kchannelCtrlCmdGetMmuFaultInfoA06F_DISPATCH(pKernelChannel, pFaultInfoParams) +#define kchannelCtrlCmdSetErrorNotifier(pKernelChannel, pSetErrorNotifierParams) kchannelCtrlCmdSetErrorNotifier_DISPATCH(pKernelChannel, pSetErrorNotifierParams) +#define kchannelCtrlCmdSetInterleaveLevel(pKernelChannel, pParams) kchannelCtrlCmdSetInterleaveLevel_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdRestartRunlist(pKernelChannel, pParams) kchannelCtrlCmdRestartRunlist_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdGetClassEngineidA16F(pKernelChannel, pParams) kchannelCtrlCmdGetClassEngineidA16F_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdResetChannelA16F(pKernelChannel, pResetChannelParams) kchannelCtrlCmdResetChannelA16F_DISPATCH(pKernelChannel, pResetChannelParams) +#define kchannelCtrlCmdGpFifoScheduleA16F(pKernelChannel, pSchedParams) kchannelCtrlCmdGpFifoScheduleA16F_DISPATCH(pKernelChannel, pSchedParams) +#define kchannelCtrlCmdGetClassEngineidA26F(pKernelChannel, pParams) kchannelCtrlCmdGetClassEngineidA26F_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdResetChannelA26F(pKernelChannel, pResetChannelParams) kchannelCtrlCmdResetChannelA26F_DISPATCH(pKernelChannel, pResetChannelParams) +#define kchannelFCtrlCmdGpFifoScheduleA26F(pKernelChannel, pSchedParams) kchannelFCtrlCmdGpFifoScheduleA26F_DISPATCH(pKernelChannel, pSchedParams) +#define kchannelCtrlCmdGetClassEngineidB06F(pKernelChannel, pParams) kchannelCtrlCmdGetClassEngineidB06F_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdResetChannelB06F(pKernelChannel, pResetChannelParams) kchannelCtrlCmdResetChannelB06F_DISPATCH(pKernelChannel, pResetChannelParams) +#define kchannelCtrlCmdGpFifoScheduleB06F(pKernelChannel, pSchedParams) kchannelCtrlCmdGpFifoScheduleB06F_DISPATCH(pKernelChannel, pSchedParams) +#define kchannelCtrlCmdBindB06F(pKernelChannel, pParams) kchannelCtrlCmdBindB06F_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdGetEngineCtxSize(pKernelChannel, pCtxSizeParams) kchannelCtrlCmdGetEngineCtxSize_DISPATCH(pKernelChannel, pCtxSizeParams) +#define kchannelCtrlCmdGetEngineCtxData(pKernelChannel, pCtxBuffParams) kchannelCtrlCmdGetEngineCtxData_DISPATCH(pKernelChannel, pCtxBuffParams) +#define kchannelCtrlCmdMigrateEngineCtxData(pKernelChannel, pCtxBuffParams) kchannelCtrlCmdMigrateEngineCtxData_DISPATCH(pKernelChannel, pCtxBuffParams) +#define kchannelCtrlCmdGetEngineCtxState(pKernelChannel, pCtxStateParams) kchannelCtrlCmdGetEngineCtxState_DISPATCH(pKernelChannel, pCtxStateParams) +#define kchannelCtrlCmdGetChannelHwState(pKernelChannel, pParams) kchannelCtrlCmdGetChannelHwState_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdSetChannelHwState(pKernelChannel, pParams) kchannelCtrlCmdSetChannelHwState_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdGetClassEngineidC06F(pKernelChannel, pParams) kchannelCtrlCmdGetClassEngineidC06F_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdResetChannelC06F(pKernelChannel, pResetChannelParams) kchannelCtrlCmdResetChannelC06F_DISPATCH(pKernelChannel, pResetChannelParams) +#define kchannelCtrlCmdGpFifoScheduleC06F(pKernelChannel, pSchedParams) kchannelCtrlCmdGpFifoScheduleC06F_DISPATCH(pKernelChannel, pSchedParams) +#define kchannelCtrlCmdBindC06F(pKernelChannel, pParams) kchannelCtrlCmdBindC06F_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdGetClassEngineidC36F(pKernelChannel, pParams) kchannelCtrlCmdGetClassEngineidC36F_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdResetChannelC36F(pKernelChannel, pResetChannelParams) kchannelCtrlCmdResetChannelC36F_DISPATCH(pKernelChannel, pResetChannelParams) +#define kchannelCtrlCmdGpFifoScheduleC36F(pKernelChannel, pSchedParams) kchannelCtrlCmdGpFifoScheduleC36F_DISPATCH(pKernelChannel, pSchedParams) +#define kchannelCtrlCmdBindC36F(pKernelChannel, pParams) kchannelCtrlCmdBindC36F_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdGpfifoGetWorkSubmitToken(pKernelChannel, pTokenParams) kchannelCtrlCmdGpfifoGetWorkSubmitToken_DISPATCH(pKernelChannel, pTokenParams) +#define kchannelCtrlCmdGpfifoUpdateFaultMethodBuffer(pKernelChannel, pFaultMthdBufferParams) kchannelCtrlCmdGpfifoUpdateFaultMethodBuffer_DISPATCH(pKernelChannel, pFaultMthdBufferParams) +#define kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex(pKernelChannel, pParams) kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlCmdStopChannel(pKernelChannel, pStopChannelParams) kchannelCtrlCmdStopChannel_DISPATCH(pKernelChannel, pStopChannelParams) +#define kchannelCtrlGetTpcPartitionMode(pKernelChannel, pParams) kchannelCtrlGetTpcPartitionMode_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlSetTpcPartitionMode(pKernelChannel, pParams) kchannelCtrlSetTpcPartitionMode_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlGetMMUDebugMode(pKernelChannel, pParams) kchannelCtrlGetMMUDebugMode_DISPATCH(pKernelChannel, pParams) +#define kchannelCtrlProgramVidmemPromote(pKernelChannel, pParams) kchannelCtrlProgramVidmemPromote_DISPATCH(pKernelChannel, pParams) +#define kchannelShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) kchannelShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define kchannelMapTo(pResource, pParams) kchannelMapTo_DISPATCH(pResource, pParams) +#define kchannelGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) kchannelGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define kchannelSetNotificationShare(pNotifier, pNotifShare) kchannelSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define kchannelGetRefCount(pResource) kchannelGetRefCount_DISPATCH(pResource) +#define kchannelAddAdditionalDependants(pClient, pResource, pReference) kchannelAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define kchannelControl_Prologue(pResource, pCallContext, pParams) kchannelControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define kchannelGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) kchannelGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define kchannelInternalControlForward(pGpuResource, command, pParams, size) kchannelInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define kchannelUnmapFrom(pResource, pParams) kchannelUnmapFrom_DISPATCH(pResource, pParams) +#define kchannelControl_Epilogue(pResource, pCallContext, pParams) kchannelControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define kchannelControlLookup(pResource, pParams, ppEntry) kchannelControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define kchannelGetInternalObjectHandle(pGpuResource) kchannelGetInternalObjectHandle_DISPATCH(pGpuResource) +#define kchannelControl(pGpuResource, pCallContext, pParams) kchannelControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define kchannelGetMemoryMappingDescriptor(pRmResource, ppMemDesc) kchannelGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define kchannelControlFilter(pResource, pCallContext, pParams) kchannelControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define kchannelUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) kchannelUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define kchannelCanCopy(pResource) kchannelCanCopy_DISPATCH(pResource) +#define kchannelPreDestruct(pResource) kchannelPreDestruct_DISPATCH(pResource) +#define kchannelGetNotificationListPtr(pNotifier) kchannelGetNotificationListPtr_DISPATCH(pNotifier) +#define kchannelGetNotificationShare(pNotifier) kchannelGetNotificationShare_DISPATCH(pNotifier) +#define kchannelAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) kchannelAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS kchannelNotifyRc_IMPL(struct KernelChannel *pKernelChannel); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelNotifyRc(struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelNotifyRc(pKernelChannel) kchannelNotifyRc_IMPL(pKernelChannel) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelNotifyRc_HAL(pKernelChannel) kchannelNotifyRc(pKernelChannel) + +NvBool kchannelIsSchedulable_IMPL(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NvBool kchannelIsSchedulable(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelIsSchedulable(pGpu, pKernelChannel) kchannelIsSchedulable_IMPL(pGpu, pKernelChannel) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelIsSchedulable_HAL(pGpu, pKernelChannel) kchannelIsSchedulable(pGpu, pKernelChannel) + +NV_STATUS kchannelAllocMem_GM107(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 Flags, NvU32 verifFlags); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelAllocMem(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 Flags, NvU32 verifFlags) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelAllocMem(pGpu, pKernelChannel, Flags, verifFlags) kchannelAllocMem_GM107(pGpu, pKernelChannel, Flags, verifFlags) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelAllocMem_HAL(pGpu, pKernelChannel, Flags, verifFlags) kchannelAllocMem(pGpu, pKernelChannel, Flags, verifFlags) + +void kchannelDestroyMem_GM107(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline void kchannelDestroyMem(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelDestroyMem(pGpu, pKernelChannel) kchannelDestroyMem_GM107(pGpu, pKernelChannel) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelDestroyMem_HAL(pGpu, pKernelChannel) kchannelDestroyMem(pGpu, pKernelChannel) + +NV_STATUS kchannelGetChannelPhysicalState_KERNEL(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelGetChannelPhysicalState(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelGetChannelPhysicalState(pGpu, pKernelChannel, pChannelStateParams) kchannelGetChannelPhysicalState_KERNEL(pGpu, pKernelChannel, pChannelStateParams) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelGetChannelPhysicalState_HAL(pGpu, pKernelChannel, pChannelStateParams) kchannelGetChannelPhysicalState(pGpu, pKernelChannel, pChannelStateParams) + +static inline NvU32 kchannelEmbedRunlistIDForSMC_13cd8d(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel) { + NV_ASSERT_PRECOMP(0); + return 0; +} + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NvU32 kchannelEmbedRunlistIDForSMC(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return 0; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelEmbedRunlistIDForSMC(pGpu, pKernelChannel) kchannelEmbedRunlistIDForSMC_13cd8d(pGpu, pKernelChannel) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelEmbedRunlistIDForSMC_HAL(pGpu, pKernelChannel) kchannelEmbedRunlistIDForSMC(pGpu, pKernelChannel) + +NV_STATUS kchannelAllocHwID_GM107(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvHandle hClient, NvU32 Flags, NvU32 verifFlags2, NvU32 ChID); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelAllocHwID(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvHandle hClient, NvU32 Flags, NvU32 verifFlags2, NvU32 ChID) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelAllocHwID(pGpu, pKernelChannel, hClient, Flags, verifFlags2, ChID) kchannelAllocHwID_GM107(pGpu, pKernelChannel, hClient, Flags, verifFlags2, ChID) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelAllocHwID_HAL(pGpu, pKernelChannel, hClient, Flags, verifFlags2, ChID) kchannelAllocHwID(pGpu, pKernelChannel, hClient, Flags, verifFlags2, ChID) + +NV_STATUS kchannelFreeHwID_GM107(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelFreeHwID(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelFreeHwID(pGpu, pKernelChannel) kchannelFreeHwID_GM107(pGpu, pKernelChannel) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelFreeHwID_HAL(pGpu, pKernelChannel) kchannelFreeHwID(pGpu, pKernelChannel) + +NV_STATUS kchannelGetUserdInfo_GM107(struct OBJGPU *pGpu, struct KernelChannel *arg0, NvU64 *userBase, NvU64 *offset, NvU64 *length); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelGetUserdInfo(struct OBJGPU *pGpu, struct KernelChannel *arg0, NvU64 *userBase, NvU64 *offset, NvU64 *length) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelGetUserdInfo(pGpu, arg0, userBase, offset, length) kchannelGetUserdInfo_GM107(pGpu, arg0, userBase, offset, length) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelGetUserdInfo_HAL(pGpu, arg0, userBase, offset, length) kchannelGetUserdInfo(pGpu, arg0, userBase, offset, length) + +NV_STATUS kchannelGetUserdBar1MapOffset_GM107(struct OBJGPU *pGpu, struct KernelChannel *arg0, NvU64 *bar1Offset, NvU32 *bar1MapSize); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelGetUserdBar1MapOffset(struct OBJGPU *pGpu, struct KernelChannel *arg0, NvU64 *bar1Offset, NvU32 *bar1MapSize) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelGetUserdBar1MapOffset(pGpu, arg0, bar1Offset, bar1MapSize) kchannelGetUserdBar1MapOffset_GM107(pGpu, arg0, bar1Offset, bar1MapSize) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelGetUserdBar1MapOffset_HAL(pGpu, arg0, bar1Offset, bar1MapSize) kchannelGetUserdBar1MapOffset(pGpu, arg0, bar1Offset, bar1MapSize) + +NV_STATUS kchannelCreateUserdMemDescBc_GV100(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvHandle arg0, NvHandle *arg1, NvU64 *arg2); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelCreateUserdMemDescBc(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvHandle arg0, NvHandle *arg1, NvU64 *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelCreateUserdMemDescBc(pGpu, pKernelChannel, arg0, arg1, arg2) kchannelCreateUserdMemDescBc_GV100(pGpu, pKernelChannel, arg0, arg1, arg2) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelCreateUserdMemDescBc_HAL(pGpu, pKernelChannel, arg0, arg1, arg2) kchannelCreateUserdMemDescBc(pGpu, pKernelChannel, arg0, arg1, arg2) + +NV_STATUS kchannelCreateUserdMemDesc_GV100(struct OBJGPU *pGpu, struct KernelChannel *arg0, NvHandle arg1, NvHandle arg2, NvU64 arg3, NvU64 *arg4, NvU32 *arg5); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelCreateUserdMemDesc(struct OBJGPU *pGpu, struct KernelChannel *arg0, NvHandle arg1, NvHandle arg2, NvU64 arg3, NvU64 *arg4, NvU32 *arg5) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelCreateUserdMemDesc(pGpu, arg0, arg1, arg2, arg3, arg4, arg5) kchannelCreateUserdMemDesc_GV100(pGpu, arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelCreateUserdMemDesc_HAL(pGpu, arg0, arg1, arg2, arg3, arg4, arg5) kchannelCreateUserdMemDesc(pGpu, arg0, arg1, arg2, arg3, arg4, arg5) + +NV_STATUS kchannelDestroyUserdMemDesc_GV100(struct OBJGPU *pGpu, struct KernelChannel *arg0); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelDestroyUserdMemDesc(struct OBJGPU *pGpu, struct KernelChannel *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelDestroyUserdMemDesc(pGpu, arg0) kchannelDestroyUserdMemDesc_GV100(pGpu, arg0) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelDestroyUserdMemDesc_HAL(pGpu, arg0) kchannelDestroyUserdMemDesc(pGpu, arg0) + +NV_STATUS kchannelCreateUserMemDesc_GM107(struct OBJGPU *pGpu, struct KernelChannel *arg0); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelCreateUserMemDesc(struct OBJGPU *pGpu, struct KernelChannel *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelCreateUserMemDesc(pGpu, arg0) kchannelCreateUserMemDesc_GM107(pGpu, arg0) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelCreateUserMemDesc_HAL(pGpu, arg0) kchannelCreateUserMemDesc(pGpu, arg0) + +NV_STATUS kchannelGetEngine_GM107(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 *engDesc); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelGetEngine(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 *engDesc) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelGetEngine(pGpu, pKernelChannel, engDesc) kchannelGetEngine_GM107(pGpu, pKernelChannel, engDesc) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelGetEngine_HAL(pGpu, pKernelChannel, engDesc) kchannelGetEngine(pGpu, pKernelChannel, engDesc) + +static inline NV_STATUS kchannelFwdToInternalCtrl_56cd7a(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 internalCmd, RmCtrlParams *pRmCtrlParams) { + return NV_OK; +} + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelFwdToInternalCtrl(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 internalCmd, RmCtrlParams *pRmCtrlParams) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelFwdToInternalCtrl(pGpu, pKernelChannel, internalCmd, pRmCtrlParams) kchannelFwdToInternalCtrl_56cd7a(pGpu, pKernelChannel, internalCmd, pRmCtrlParams) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelFwdToInternalCtrl_HAL(pGpu, pKernelChannel, internalCmd, pRmCtrlParams) kchannelFwdToInternalCtrl(pGpu, pKernelChannel, internalCmd, pRmCtrlParams) + +static inline NV_STATUS kchannelAllocChannel_56cd7a(struct KernelChannel *pKernelChannel, NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGpfifoParams) { + return NV_OK; +} + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelAllocChannel(struct KernelChannel *pKernelChannel, NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGpfifoParams) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelAllocChannel(pKernelChannel, pChannelGpfifoParams) kchannelAllocChannel_56cd7a(pKernelChannel, pChannelGpfifoParams) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelAllocChannel_HAL(pKernelChannel, pChannelGpfifoParams) kchannelAllocChannel(pKernelChannel, pChannelGpfifoParams) + +static inline NvBool kchannelIsValid_cbe027(struct KernelChannel *pKernelChannel) { + return ((NvBool)(0 == 0)); +} + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NvBool kchannelIsValid(struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelIsValid(pKernelChannel) kchannelIsValid_cbe027(pKernelChannel) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelIsValid_HAL(pKernelChannel) kchannelIsValid(pKernelChannel) + +NV_STATUS kchannelGetClassEngineID_GM107(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvHandle handle, NvU32 *classEngineID, NvU32 *classID, NvU32 *engineID); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelGetClassEngineID(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvHandle handle, NvU32 *classEngineID, NvU32 *classID, NvU32 *engineID) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelGetClassEngineID(pGpu, pKernelChannel, handle, classEngineID, classID, engineID) kchannelGetClassEngineID_GM107(pGpu, pKernelChannel, handle, classEngineID, classID, engineID) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelGetClassEngineID_HAL(pGpu, pKernelChannel, handle, classEngineID, classID, engineID) kchannelGetClassEngineID(pGpu, pKernelChannel, handle, classEngineID, classID, engineID) + +NV_STATUS kchannelEnableVirtualContext_GM107(struct KernelChannel *arg0); + +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelEnableVirtualContext(struct KernelChannel *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelEnableVirtualContext(arg0) kchannelEnableVirtualContext_GM107(arg0) +#endif //__nvoc_kernel_channel_h_disabled + +#define kchannelEnableVirtualContext_HAL(arg0) kchannelEnableVirtualContext(arg0) + +NV_STATUS kchannelMap_IMPL(struct KernelChannel *pKernelChannel, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS kchannelMap_DISPATCH(struct KernelChannel *pKernelChannel, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pKernelChannel->__kchannelMap__(pKernelChannel, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS kchannelUnmap_IMPL(struct KernelChannel *pKernelChannel, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS kchannelUnmap_DISPATCH(struct KernelChannel *pKernelChannel, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pKernelChannel->__kchannelUnmap__(pKernelChannel, pCallContext, pCpuMapping); +} + +NV_STATUS kchannelGetMapAddrSpace_IMPL(struct KernelChannel *pKernelChannel, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS kchannelGetMapAddrSpace_DISPATCH(struct KernelChannel *pKernelChannel, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pKernelChannel->__kchannelGetMapAddrSpace__(pKernelChannel, pCallContext, mapFlags, pAddrSpace); +} + +NV_STATUS kchannelGetMemInterMapParams_IMPL(struct KernelChannel *pKernelChannel, RMRES_MEM_INTER_MAP_PARAMS *pParams); + +static inline NV_STATUS kchannelGetMemInterMapParams_DISPATCH(struct KernelChannel *pKernelChannel, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pKernelChannel->__kchannelGetMemInterMapParams__(pKernelChannel, pParams); +} + +NV_STATUS kchannelCheckMemInterUnmap_IMPL(struct KernelChannel *pKernelChannel, NvBool bSubdeviceHandleProvided); + +static inline NV_STATUS kchannelCheckMemInterUnmap_DISPATCH(struct KernelChannel *pKernelChannel, NvBool bSubdeviceHandleProvided) { + return pKernelChannel->__kchannelCheckMemInterUnmap__(pKernelChannel, bSubdeviceHandleProvided); +} + +NvBool kchannelIsUserdAddrSizeValid_GV100(struct KernelChannel *pKernelChannel, NvU32 userdAddrLo, NvU32 userdAddrHi); + +NvBool kchannelIsUserdAddrSizeValid_GA100(struct KernelChannel *pKernelChannel, NvU32 userdAddrLo, NvU32 userdAddrHi); + +static inline NvBool kchannelIsUserdAddrSizeValid_cbe027(struct KernelChannel *pKernelChannel, NvU32 userdAddrLo, NvU32 userdAddrHi) { + return ((NvBool)(0 == 0)); +} + +static inline NvBool kchannelIsUserdAddrSizeValid_DISPATCH(struct KernelChannel *pKernelChannel, NvU32 userdAddrLo, NvU32 userdAddrHi) { + return pKernelChannel->__kchannelIsUserdAddrSizeValid__(pKernelChannel, userdAddrLo, userdAddrHi); +} + +NV_STATUS kchannelCtrlCmdResetIsolatedChannel_IMPL(struct KernelChannel *pKernelChannel, NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS *pResetParams); + +static inline NV_STATUS kchannelCtrlCmdResetIsolatedChannel_DISPATCH(struct KernelChannel *pKernelChannel, NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS *pResetParams) { + return pKernelChannel->__kchannelCtrlCmdResetIsolatedChannel__(pKernelChannel, pResetParams); +} + +NV_STATUS kchannelCtrlCmdGetClassEngineid_IMPL(struct KernelChannel *pKernelChannel, NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams); + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineid_DISPATCH(struct KernelChannel *pKernelChannel, NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdGetClassEngineid__(pKernelChannel, pParams); +} + +NV_STATUS kchannelCtrlCmdResetChannel_IMPL(struct KernelChannel *pKernelChannel, NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams); + +static inline NV_STATUS kchannelCtrlCmdResetChannel_DISPATCH(struct KernelChannel *pKernelChannel, NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return pKernelChannel->__kchannelCtrlCmdResetChannel__(pKernelChannel, pResetChannelParams); +} + +NV_STATUS kchannelCtrlCmdGetDeferRCState_IMPL(struct KernelChannel *pKernelChannel, NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS *pStateParams); + +static inline NV_STATUS kchannelCtrlCmdGetDeferRCState_DISPATCH(struct KernelChannel *pKernelChannel, NV906F_CTRL_CMD_GET_DEFER_RC_STATE_PARAMS *pStateParams) { + return pKernelChannel->__kchannelCtrlCmdGetDeferRCState__(pKernelChannel, pStateParams); +} + +NV_STATUS kchannelCtrlCmdGetMmuFaultInfo_IMPL(struct KernelChannel *pKernelChannel, NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS *pFaultInfoParams); + +static inline NV_STATUS kchannelCtrlCmdGetMmuFaultInfo_DISPATCH(struct KernelChannel *pKernelChannel, NV906F_CTRL_GET_MMU_FAULT_INFO_PARAMS *pFaultInfoParams) { + return pKernelChannel->__kchannelCtrlCmdGetMmuFaultInfo__(pKernelChannel, pFaultInfoParams); +} + +NV_STATUS kchannelCtrlCmdEventSetNotification_IMPL(struct KernelChannel *pKernelChannel, NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams); + +static inline NV_STATUS kchannelCtrlCmdEventSetNotification_DISPATCH(struct KernelChannel *pKernelChannel, NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams) { + return pKernelChannel->__kchannelCtrlCmdEventSetNotification__(pKernelChannel, pSetEventParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidA06F_6a9a13(struct KernelChannel *pKernelChannel, NVA06F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return kchannelCtrlCmdGetClassEngineid(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidA06F_DISPATCH(struct KernelChannel *pKernelChannel, NVA06F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdGetClassEngineidA06F__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelA06F_ef73a1(struct KernelChannel *pKernelChannel, NVA06F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return kchannelCtrlCmdResetChannel(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelA06F_DISPATCH(struct KernelChannel *pKernelChannel, NVA06F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return pKernelChannel->__kchannelCtrlCmdResetChannelA06F__(pKernelChannel, pResetChannelParams); +} + +NV_STATUS kchannelCtrlCmdGpFifoSchedule_IMPL(struct KernelChannel *pKernelChannel, NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams); + +static inline NV_STATUS kchannelCtrlCmdGpFifoSchedule_DISPATCH(struct KernelChannel *pKernelChannel, NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return pKernelChannel->__kchannelCtrlCmdGpFifoSchedule__(pKernelChannel, pSchedParams); +} + +NV_STATUS kchannelCtrlCmdBind_IMPL(struct KernelChannel *pKernelChannel, NVA06F_CTRL_BIND_PARAMS *pParams); + +static inline NV_STATUS kchannelCtrlCmdBind_DISPATCH(struct KernelChannel *pKernelChannel, NVA06F_CTRL_BIND_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdBind__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetMmuFaultInfoA06F_a7f9ac(struct KernelChannel *pKernelChannel, NVA06F_CTRL_GET_MMU_FAULT_INFO_PARAMS *pFaultInfoParams) { + return kchannelCtrlCmdGetMmuFaultInfo(pKernelChannel, pFaultInfoParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetMmuFaultInfoA06F_DISPATCH(struct KernelChannel *pKernelChannel, NVA06F_CTRL_GET_MMU_FAULT_INFO_PARAMS *pFaultInfoParams) { + return pKernelChannel->__kchannelCtrlCmdGetMmuFaultInfoA06F__(pKernelChannel, pFaultInfoParams); +} + +NV_STATUS kchannelCtrlCmdSetErrorNotifier_IMPL(struct KernelChannel *pKernelChannel, NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS *pSetErrorNotifierParams); + +static inline NV_STATUS kchannelCtrlCmdSetErrorNotifier_DISPATCH(struct KernelChannel *pKernelChannel, NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS *pSetErrorNotifierParams) { + return pKernelChannel->__kchannelCtrlCmdSetErrorNotifier__(pKernelChannel, pSetErrorNotifierParams); +} + +NV_STATUS kchannelCtrlCmdSetInterleaveLevel_IMPL(struct KernelChannel *pKernelChannel, NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams); + +static inline NV_STATUS kchannelCtrlCmdSetInterleaveLevel_DISPATCH(struct KernelChannel *pKernelChannel, NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdSetInterleaveLevel__(pKernelChannel, pParams); +} + +NV_STATUS kchannelCtrlCmdRestartRunlist_IMPL(struct KernelChannel *pKernelChannel, NVA06F_CTRL_RESTART_RUNLIST_PARAMS *pParams); + +static inline NV_STATUS kchannelCtrlCmdRestartRunlist_DISPATCH(struct KernelChannel *pKernelChannel, NVA06F_CTRL_RESTART_RUNLIST_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdRestartRunlist__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidA16F_6a9a13(struct KernelChannel *pKernelChannel, NVA16F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return kchannelCtrlCmdGetClassEngineid(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidA16F_DISPATCH(struct KernelChannel *pKernelChannel, NVA16F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdGetClassEngineidA16F__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelA16F_ef73a1(struct KernelChannel *pKernelChannel, NVA16F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return kchannelCtrlCmdResetChannel(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelA16F_DISPATCH(struct KernelChannel *pKernelChannel, NVA16F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return pKernelChannel->__kchannelCtrlCmdResetChannelA16F__(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelCtrlCmdGpFifoScheduleA16F_6546a6(struct KernelChannel *pKernelChannel, NVA16F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return kchannelCtrlCmdGpFifoSchedule(pKernelChannel, pSchedParams); +} + +static inline NV_STATUS kchannelCtrlCmdGpFifoScheduleA16F_DISPATCH(struct KernelChannel *pKernelChannel, NVA16F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return pKernelChannel->__kchannelCtrlCmdGpFifoScheduleA16F__(pKernelChannel, pSchedParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidA26F_6a9a13(struct KernelChannel *pKernelChannel, NVA26F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return kchannelCtrlCmdGetClassEngineid(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidA26F_DISPATCH(struct KernelChannel *pKernelChannel, NVA26F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdGetClassEngineidA26F__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelA26F_ef73a1(struct KernelChannel *pKernelChannel, NVA26F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return kchannelCtrlCmdResetChannel(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelA26F_DISPATCH(struct KernelChannel *pKernelChannel, NVA26F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return pKernelChannel->__kchannelCtrlCmdResetChannelA26F__(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelFCtrlCmdGpFifoScheduleA26F_6546a6(struct KernelChannel *pKernelChannel, NVA26F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return kchannelCtrlCmdGpFifoSchedule(pKernelChannel, pSchedParams); +} + +static inline NV_STATUS kchannelFCtrlCmdGpFifoScheduleA26F_DISPATCH(struct KernelChannel *pKernelChannel, NVA26F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return pKernelChannel->__kchannelFCtrlCmdGpFifoScheduleA26F__(pKernelChannel, pSchedParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidB06F_6a9a13(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return kchannelCtrlCmdGetClassEngineid(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidB06F_DISPATCH(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdGetClassEngineidB06F__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelB06F_ef73a1(struct KernelChannel *pKernelChannel, NVB06F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return kchannelCtrlCmdResetChannel(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelB06F_DISPATCH(struct KernelChannel *pKernelChannel, NVB06F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return pKernelChannel->__kchannelCtrlCmdResetChannelB06F__(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelCtrlCmdGpFifoScheduleB06F_6546a6(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return kchannelCtrlCmdGpFifoSchedule(pKernelChannel, pSchedParams); +} + +static inline NV_STATUS kchannelCtrlCmdGpFifoScheduleB06F_DISPATCH(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return pKernelChannel->__kchannelCtrlCmdGpFifoScheduleB06F__(pKernelChannel, pSchedParams); +} + +static inline NV_STATUS kchannelCtrlCmdBindB06F_2c1c21(struct KernelChannel *pKernelChannel, NVB06F_CTRL_BIND_PARAMS *pParams) { + return kchannelCtrlCmdBind(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdBindB06F_DISPATCH(struct KernelChannel *pKernelChannel, NVB06F_CTRL_BIND_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdBindB06F__(pKernelChannel, pParams); +} + +NV_STATUS kchannelCtrlCmdGetEngineCtxSize_IMPL(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS *pCtxSizeParams); + +static inline NV_STATUS kchannelCtrlCmdGetEngineCtxSize_DISPATCH(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS *pCtxSizeParams) { + return pKernelChannel->__kchannelCtrlCmdGetEngineCtxSize__(pKernelChannel, pCtxSizeParams); +} + +NV_STATUS kchannelCtrlCmdGetEngineCtxData_IMPL(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *pCtxBuffParams); + +static inline NV_STATUS kchannelCtrlCmdGetEngineCtxData_DISPATCH(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *pCtxBuffParams) { + return pKernelChannel->__kchannelCtrlCmdGetEngineCtxData__(pKernelChannel, pCtxBuffParams); +} + +NV_STATUS kchannelCtrlCmdMigrateEngineCtxData_IMPL(struct KernelChannel *pKernelChannel, NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS *pCtxBuffParams); + +static inline NV_STATUS kchannelCtrlCmdMigrateEngineCtxData_DISPATCH(struct KernelChannel *pKernelChannel, NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS *pCtxBuffParams) { + return pKernelChannel->__kchannelCtrlCmdMigrateEngineCtxData__(pKernelChannel, pCtxBuffParams); +} + +NV_STATUS kchannelCtrlCmdGetEngineCtxState_IMPL(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS *pCtxStateParams); + +static inline NV_STATUS kchannelCtrlCmdGetEngineCtxState_DISPATCH(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GET_ENGINE_CTX_STATE_PARAMS *pCtxStateParams) { + return pKernelChannel->__kchannelCtrlCmdGetEngineCtxState__(pKernelChannel, pCtxStateParams); +} + +NV_STATUS kchannelCtrlCmdGetChannelHwState_IMPL(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS *pParams); + +static inline NV_STATUS kchannelCtrlCmdGetChannelHwState_DISPATCH(struct KernelChannel *pKernelChannel, NVB06F_CTRL_GET_CHANNEL_HW_STATE_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdGetChannelHwState__(pKernelChannel, pParams); +} + +NV_STATUS kchannelCtrlCmdSetChannelHwState_IMPL(struct KernelChannel *pKernelChannel, NVB06F_CTRL_SET_CHANNEL_HW_STATE_PARAMS *pParams); + +static inline NV_STATUS kchannelCtrlCmdSetChannelHwState_DISPATCH(struct KernelChannel *pKernelChannel, NVB06F_CTRL_SET_CHANNEL_HW_STATE_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdSetChannelHwState__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidC06F_6a9a13(struct KernelChannel *pKernelChannel, NVC06F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return kchannelCtrlCmdGetClassEngineid(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidC06F_DISPATCH(struct KernelChannel *pKernelChannel, NVC06F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdGetClassEngineidC06F__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelC06F_ef73a1(struct KernelChannel *pKernelChannel, NVC06F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return kchannelCtrlCmdResetChannel(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelC06F_DISPATCH(struct KernelChannel *pKernelChannel, NVC06F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return pKernelChannel->__kchannelCtrlCmdResetChannelC06F__(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelCtrlCmdGpFifoScheduleC06F_6546a6(struct KernelChannel *pKernelChannel, NVC06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return kchannelCtrlCmdGpFifoSchedule(pKernelChannel, pSchedParams); +} + +static inline NV_STATUS kchannelCtrlCmdGpFifoScheduleC06F_DISPATCH(struct KernelChannel *pKernelChannel, NVC06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return pKernelChannel->__kchannelCtrlCmdGpFifoScheduleC06F__(pKernelChannel, pSchedParams); +} + +static inline NV_STATUS kchannelCtrlCmdBindC06F_2c1c21(struct KernelChannel *pKernelChannel, NVC06F_CTRL_BIND_PARAMS *pParams) { + return kchannelCtrlCmdBind(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdBindC06F_DISPATCH(struct KernelChannel *pKernelChannel, NVC06F_CTRL_BIND_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdBindC06F__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidC36F_6a9a13(struct KernelChannel *pKernelChannel, NVC36F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return kchannelCtrlCmdGetClassEngineid(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdGetClassEngineidC36F_DISPATCH(struct KernelChannel *pKernelChannel, NVC36F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdGetClassEngineidC36F__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelC36F_ef73a1(struct KernelChannel *pKernelChannel, NVC36F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return kchannelCtrlCmdResetChannel(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelCtrlCmdResetChannelC36F_DISPATCH(struct KernelChannel *pKernelChannel, NVC36F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams) { + return pKernelChannel->__kchannelCtrlCmdResetChannelC36F__(pKernelChannel, pResetChannelParams); +} + +static inline NV_STATUS kchannelCtrlCmdGpFifoScheduleC36F_6546a6(struct KernelChannel *pKernelChannel, NVC36F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return kchannelCtrlCmdGpFifoSchedule(pKernelChannel, pSchedParams); +} + +static inline NV_STATUS kchannelCtrlCmdGpFifoScheduleC36F_DISPATCH(struct KernelChannel *pKernelChannel, NVC36F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams) { + return pKernelChannel->__kchannelCtrlCmdGpFifoScheduleC36F__(pKernelChannel, pSchedParams); +} + +static inline NV_STATUS kchannelCtrlCmdBindC36F_2c1c21(struct KernelChannel *pKernelChannel, NVC36F_CTRL_BIND_PARAMS *pParams) { + return kchannelCtrlCmdBind(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlCmdBindC36F_DISPATCH(struct KernelChannel *pKernelChannel, NVC36F_CTRL_BIND_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdBindC36F__(pKernelChannel, pParams); +} + +NV_STATUS kchannelCtrlCmdGpfifoGetWorkSubmitToken_IMPL(struct KernelChannel *pKernelChannel, NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *pTokenParams); + +static inline NV_STATUS kchannelCtrlCmdGpfifoGetWorkSubmitToken_DISPATCH(struct KernelChannel *pKernelChannel, NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *pTokenParams) { + return pKernelChannel->__kchannelCtrlCmdGpfifoGetWorkSubmitToken__(pKernelChannel, pTokenParams); +} + +NV_STATUS kchannelCtrlCmdGpfifoUpdateFaultMethodBuffer_IMPL(struct KernelChannel *pKernelChannel, NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS *pFaultMthdBufferParams); + +static inline NV_STATUS kchannelCtrlCmdGpfifoUpdateFaultMethodBuffer_DISPATCH(struct KernelChannel *pKernelChannel, NVC36F_CTRL_GPFIFO_UPDATE_FAULT_METHOD_BUFFER_PARAMS *pFaultMthdBufferParams) { + return pKernelChannel->__kchannelCtrlCmdGpfifoUpdateFaultMethodBuffer__(pKernelChannel, pFaultMthdBufferParams); +} + +NV_STATUS kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex_IMPL(struct KernelChannel *pKernelChannel, NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *pParams); + +static inline NV_STATUS kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex_DISPATCH(struct KernelChannel *pKernelChannel, NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex__(pKernelChannel, pParams); +} + +NV_STATUS kchannelCtrlCmdStopChannel_IMPL(struct KernelChannel *pKernelChannel, NVA06F_CTRL_STOP_CHANNEL_PARAMS *pStopChannelParams); + +static inline NV_STATUS kchannelCtrlCmdStopChannel_DISPATCH(struct KernelChannel *pKernelChannel, NVA06F_CTRL_STOP_CHANNEL_PARAMS *pStopChannelParams) { + return pKernelChannel->__kchannelCtrlCmdStopChannel__(pKernelChannel, pStopChannelParams); +} + +static inline NV_STATUS kchannelCtrlGetTpcPartitionMode_a094e1(struct KernelChannel *pKernelChannel, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams) { + return kgrctxCtrlHandle(resservGetTlsCallContext(), pKernelChannel->hKernelGraphicsContext); +} + +static inline NV_STATUS kchannelCtrlGetTpcPartitionMode_DISPATCH(struct KernelChannel *pKernelChannel, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlGetTpcPartitionMode__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlSetTpcPartitionMode_a094e1(struct KernelChannel *pKernelChannel, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams) { + return kgrctxCtrlHandle(resservGetTlsCallContext(), pKernelChannel->hKernelGraphicsContext); +} + +static inline NV_STATUS kchannelCtrlSetTpcPartitionMode_DISPATCH(struct KernelChannel *pKernelChannel, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlSetTpcPartitionMode__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlGetMMUDebugMode_a094e1(struct KernelChannel *pKernelChannel, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *pParams) { + return kgrctxCtrlHandle(resservGetTlsCallContext(), pKernelChannel->hKernelGraphicsContext); +} + +static inline NV_STATUS kchannelCtrlGetMMUDebugMode_DISPATCH(struct KernelChannel *pKernelChannel, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlGetMMUDebugMode__(pKernelChannel, pParams); +} + +static inline NV_STATUS kchannelCtrlProgramVidmemPromote_a094e1(struct KernelChannel *pKernelChannel, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *pParams) { + return kgrctxCtrlHandle(resservGetTlsCallContext(), pKernelChannel->hKernelGraphicsContext); +} + +static inline NV_STATUS kchannelCtrlProgramVidmemPromote_DISPATCH(struct KernelChannel *pKernelChannel, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *pParams) { + return pKernelChannel->__kchannelCtrlProgramVidmemPromote__(pKernelChannel, pParams); +} + +static inline NvBool kchannelShareCallback_DISPATCH(struct KernelChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__kchannelShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS kchannelMapTo_DISPATCH(struct KernelChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__kchannelMapTo__(pResource, pParams); +} + +static inline NV_STATUS kchannelGetOrAllocNotifShare_DISPATCH(struct KernelChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__kchannelGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline void kchannelSetNotificationShare_DISPATCH(struct KernelChannel *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__kchannelSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 kchannelGetRefCount_DISPATCH(struct KernelChannel *pResource) { + return pResource->__kchannelGetRefCount__(pResource); +} + +static inline void kchannelAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct KernelChannel *pResource, RsResourceRef *pReference) { + pResource->__kchannelAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS kchannelControl_Prologue_DISPATCH(struct KernelChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kchannelControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kchannelGetRegBaseOffsetAndSize_DISPATCH(struct KernelChannel *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__kchannelGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS kchannelInternalControlForward_DISPATCH(struct KernelChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__kchannelInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS kchannelUnmapFrom_DISPATCH(struct KernelChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__kchannelUnmapFrom__(pResource, pParams); +} + +static inline void kchannelControl_Epilogue_DISPATCH(struct KernelChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__kchannelControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kchannelControlLookup_DISPATCH(struct KernelChannel *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__kchannelControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle kchannelGetInternalObjectHandle_DISPATCH(struct KernelChannel *pGpuResource) { + return pGpuResource->__kchannelGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS kchannelControl_DISPATCH(struct KernelChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__kchannelControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS kchannelGetMemoryMappingDescriptor_DISPATCH(struct KernelChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__kchannelGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS kchannelControlFilter_DISPATCH(struct KernelChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kchannelControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kchannelUnregisterEvent_DISPATCH(struct KernelChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__kchannelUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool kchannelCanCopy_DISPATCH(struct KernelChannel *pResource) { + return pResource->__kchannelCanCopy__(pResource); +} + +static inline void kchannelPreDestruct_DISPATCH(struct KernelChannel *pResource) { + pResource->__kchannelPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *kchannelGetNotificationListPtr_DISPATCH(struct KernelChannel *pNotifier) { + return pNotifier->__kchannelGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *kchannelGetNotificationShare_DISPATCH(struct KernelChannel *pNotifier) { + return pNotifier->__kchannelGetNotificationShare__(pNotifier); +} + +static inline NvBool kchannelAccessCallback_DISPATCH(struct KernelChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__kchannelAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NvU32 kchannelGetDebugTag(const struct KernelChannel *pKernelChannel) { + if (pKernelChannel == ((void *)0)) + return 4294967295U; + return pKernelChannel->ChID; +} + +static inline NvBool kchannelIsCtxBufferAllocSkipped(struct KernelChannel *pKernelChannel) { + return pKernelChannel->bSkipCtxBufferAlloc; +} + +static inline NvU32 kchannelGetSubctxId(struct KernelChannel *pKernelChannel) { + return pKernelChannel->subctxId; +} + +static inline NvU32 kchannelGetCid(struct KernelChannel *pKernelChannel) { + return pKernelChannel->cid; +} + +static inline struct MIG_INSTANCE_REF *kchannelGetMIGReference(struct KernelChannel *pKernelChannel) { + return &pKernelChannel->partitionRef; +} + +static inline NvU32 kchannelGetRunqueue(struct KernelChannel *pKernelChannel) { + return pKernelChannel->runqueue; +} + +static inline NvU32 kchannelGetRunlistId(struct KernelChannel *pKernelChannel) { + return pKernelChannel->runlistId; +} + +static inline void kchannelSetRunlistId(struct KernelChannel *pKernelChannel, NvU32 runlistId) { + pKernelChannel->runlistId = runlistId; +} + +static inline NvU32 kchannelGetEngineType(struct KernelChannel *pKernelChannel) { + return pKernelChannel->engineType; +} + +NV_STATUS kchannelConstruct_IMPL(struct KernelChannel *arg_pKernelChannel, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_kchannelConstruct(arg_pKernelChannel, arg_pCallContext, arg_pParams) kchannelConstruct_IMPL(arg_pKernelChannel, arg_pCallContext, arg_pParams) +void kchannelDestruct_IMPL(struct KernelChannel *pResource); +#define __nvoc_kchannelDestruct(pResource) kchannelDestruct_IMPL(pResource) +NV_STATUS kchannelRegisterChild_IMPL(struct KernelChannel *pKernelChannel, ChannelDescendant *pObject); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelRegisterChild(struct KernelChannel *pKernelChannel, ChannelDescendant *pObject) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelRegisterChild(pKernelChannel, pObject) kchannelRegisterChild_IMPL(pKernelChannel, pObject) +#endif //__nvoc_kernel_channel_h_disabled + +NV_STATUS kchannelDeregisterChild_IMPL(struct KernelChannel *pKernelChannel, ChannelDescendant *pObject); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelDeregisterChild(struct KernelChannel *pKernelChannel, ChannelDescendant *pObject) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelDeregisterChild(pKernelChannel, pObject) kchannelDeregisterChild_IMPL(pKernelChannel, pObject) +#endif //__nvoc_kernel_channel_h_disabled + +void kchannelNotifyGeneric_IMPL(struct KernelChannel *pKernelChannel, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize); +#ifdef __nvoc_kernel_channel_h_disabled +static inline void kchannelNotifyGeneric(struct KernelChannel *pKernelChannel, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelNotifyGeneric(pKernelChannel, notifyIndex, pNotifyParams, notifyParamsSize) kchannelNotifyGeneric_IMPL(pKernelChannel, notifyIndex, pNotifyParams, notifyParamsSize) +#endif //__nvoc_kernel_channel_h_disabled + +NvBool kchannelCheckIsKernel_IMPL(struct KernelChannel *pKernelChannel); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NvBool kchannelCheckIsKernel(struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelCheckIsKernel(pKernelChannel) kchannelCheckIsKernel_IMPL(pKernelChannel) +#endif //__nvoc_kernel_channel_h_disabled + +NvBool kchannelCheckIsAdmin_IMPL(struct KernelChannel *pKernelChannel); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NvBool kchannelCheckIsAdmin(struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelCheckIsAdmin(pKernelChannel) kchannelCheckIsAdmin_IMPL(pKernelChannel) +#endif //__nvoc_kernel_channel_h_disabled + +NV_STATUS kchannelBindToRunlist_IMPL(struct KernelChannel *pKernelChannel, NvU32 localEngineType, ENGDESCRIPTOR engineDesc); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelBindToRunlist(struct KernelChannel *pKernelChannel, NvU32 localEngineType, ENGDESCRIPTOR engineDesc) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelBindToRunlist(pKernelChannel, localEngineType, engineDesc) kchannelBindToRunlist_IMPL(pKernelChannel, localEngineType, engineDesc) +#endif //__nvoc_kernel_channel_h_disabled + +NV_STATUS kchannelSetEngineContextMemDesc_IMPL(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 engine, MEMORY_DESCRIPTOR *pMemDesc); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelSetEngineContextMemDesc(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 engine, MEMORY_DESCRIPTOR *pMemDesc) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelSetEngineContextMemDesc(pGpu, pKernelChannel, engine, pMemDesc) kchannelSetEngineContextMemDesc_IMPL(pGpu, pKernelChannel, engine, pMemDesc) +#endif //__nvoc_kernel_channel_h_disabled + +NV_STATUS kchannelMapEngineCtxBuf_IMPL(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 engine); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelMapEngineCtxBuf(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 engine) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelMapEngineCtxBuf(pGpu, pKernelChannel, engine) kchannelMapEngineCtxBuf_IMPL(pGpu, pKernelChannel, engine) +#endif //__nvoc_kernel_channel_h_disabled + +NV_STATUS kchannelUnmapEngineCtxBuf_IMPL(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 engine); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelUnmapEngineCtxBuf(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvU32 engine) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelUnmapEngineCtxBuf(pGpu, pKernelChannel, engine) kchannelUnmapEngineCtxBuf_IMPL(pGpu, pKernelChannel, engine) +#endif //__nvoc_kernel_channel_h_disabled + +NV_STATUS kchannelCheckBcStateCurrent_IMPL(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelCheckBcStateCurrent(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelCheckBcStateCurrent(pGpu, pKernelChannel) kchannelCheckBcStateCurrent_IMPL(pGpu, pKernelChannel) +#endif //__nvoc_kernel_channel_h_disabled + +NV_STATUS kchannelUpdateWorkSubmitTokenNotifIndex_IMPL(struct OBJGPU *pGpu, struct KernelChannel *arg0, NvU32 index); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelUpdateWorkSubmitTokenNotifIndex(struct OBJGPU *pGpu, struct KernelChannel *arg0, NvU32 index) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelUpdateWorkSubmitTokenNotifIndex(pGpu, arg0, index) kchannelUpdateWorkSubmitTokenNotifIndex_IMPL(pGpu, arg0, index) +#endif //__nvoc_kernel_channel_h_disabled + +NV_STATUS kchannelNotifyWorkSubmitToken_IMPL(struct OBJGPU *pGpu, struct KernelChannel *arg0, NvU32 token); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelNotifyWorkSubmitToken(struct OBJGPU *pGpu, struct KernelChannel *arg0, NvU32 token) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelNotifyWorkSubmitToken(pGpu, arg0, token) kchannelNotifyWorkSubmitToken_IMPL(pGpu, arg0, token) +#endif //__nvoc_kernel_channel_h_disabled + +NV_STATUS kchannelMapUserD_IMPL(struct OBJGPU *pGpu, struct KernelChannel *arg0, RS_PRIV_LEVEL arg1, NvU64 arg2, NvU32 arg3, NvP64 *arg4, NvP64 *arg5); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NV_STATUS kchannelMapUserD(struct OBJGPU *pGpu, struct KernelChannel *arg0, RS_PRIV_LEVEL arg1, NvU64 arg2, NvU32 arg3, NvP64 *arg4, NvP64 *arg5) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelMapUserD(pGpu, arg0, arg1, arg2, arg3, arg4, arg5) kchannelMapUserD_IMPL(pGpu, arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_kernel_channel_h_disabled + +void kchannelUnmapUserD_IMPL(struct OBJGPU *pGpu, struct KernelChannel *arg0, RS_PRIV_LEVEL arg1, NvP64 *arg2, NvP64 *arg3); +#ifdef __nvoc_kernel_channel_h_disabled +static inline void kchannelUnmapUserD(struct OBJGPU *pGpu, struct KernelChannel *arg0, RS_PRIV_LEVEL arg1, NvP64 *arg2, NvP64 *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelUnmapUserD(pGpu, arg0, arg1, arg2, arg3) kchannelUnmapUserD_IMPL(pGpu, arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_channel_h_disabled + +NV_STATUS kchannelGetFromDualHandle_IMPL(NvHandle arg0, NvHandle arg1, struct KernelChannel **arg2); +#define kchannelGetFromDualHandle(arg0, arg1, arg2) kchannelGetFromDualHandle_IMPL(arg0, arg1, arg2) +NV_STATUS kchannelGetFromDualHandleRestricted_IMPL(NvHandle arg0, NvHandle arg1, struct KernelChannel **arg2); +#define kchannelGetFromDualHandleRestricted(arg0, arg1, arg2) kchannelGetFromDualHandleRestricted_IMPL(arg0, arg1, arg2) +NvU32 kchannelGetGfid_IMPL(struct KernelChannel *pKernelChannel); +#ifdef __nvoc_kernel_channel_h_disabled +static inline NvU32 kchannelGetGfid(struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelChannel was disabled!"); + return 0; +} +#else //__nvoc_kernel_channel_h_disabled +#define kchannelGetGfid(pKernelChannel) kchannelGetGfid_IMPL(pKernelChannel) +#endif //__nvoc_kernel_channel_h_disabled + +#undef PRIVATE_FIELD + + +RS_ORDERED_ITERATOR kchannelGetIter( + struct RsClient *pClient, + RsResourceRef *pScopeRef); + +NV_STATUS kchannelGetNextKernelChannel( + struct OBJGPU *pGpu, + CHANNEL_ITERATOR *pIt, + struct KernelChannel **ppKernelChannel); + +NV_STATUS CliGetKernelChannelWithDevice(NvHandle hClient, + NvHandle hParent, + NvHandle hKernelChannel, + struct KernelChannel **ppKernelChannel); + +NV_STATUS CliGetKernelChannel(NvHandle hClient, + NvHandle hKernelChannel, + struct KernelChannel **ppKernelChannel); + +/*! + * @brief Helper to get type and memdesc of a channel notifier (memory/ctxdma) + */ +NV_STATUS kchannelGetNotifierInfo(struct OBJGPU *pGpu, + struct RsClient *pRsClient, + NvHandle hErrorContext, + MEMORY_DESCRIPTOR **ppMemDesc, + ErrorNotifierType *pNotifierType, + NvU64 *pOffset); + +// Utils to iterate over ChannelDescendants on one Channels +void kchannelGetChildIterator(struct KernelChannel *pKernelChannel, + NvU32 classID, + NvU32 engineID, + KernelChannelChildIterator *pIter); +ChannelDescendant *kchannelGetNextChild(KernelChannelChildIterator *pIter); +// Simpler function to call if you just need one result +ChannelDescendant *kchannelGetOneChild(struct KernelChannel *pKernelChannel, + NvU32 classID, + NvU32 engineID); + +// Utils to iterate over ChannelDescendants on all Channels in the same ChannelGroup +void kchannelGetChildIterOverGroup(struct KernelChannel *pKernelChannel, + NvU32 classNum, + NvU32 engDesc, + KernelChannelChildIterOverGroup *pIt); +ChannelDescendant *kchannelGetNextChildOverGroup(KernelChannelChildIterOverGroup *pIt); + +NV_STATUS kchannelFindChildByHandle(struct KernelChannel *pKernelChannel, NvHandle hResource, ChannelDescendant **ppObject); + +// Bitmap for KernelChannel->swState +#define KERNEL_CHANNEL_SW_STATE_CPU_MAP NVBIT(0) //UserD is mapped +#define KERNEL_CHANNEL_SW_STATE_RUNLIST_SET NVBIT(1) // RunlistId is set + +NvBool kchannelIsCpuMapped(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel); +void kchannelSetCpuMapped(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bCpuMapped); +NvBool kchannelIsRunlistSet(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel); +void kchannelSetRunlistSet(struct OBJGPU *pGpu, struct KernelChannel *pKernelChannel, NvBool bRunlistSet); + +#endif // KERNEL_CHANNEL_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_CHANNEL_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_ctxshare_nvoc.c b/src/nvidia/generated/g_kernel_ctxshare_nvoc.c new file mode 100644 index 000000000..d05cb1164 --- /dev/null +++ b/src/nvidia/generated/g_kernel_ctxshare_nvoc.c @@ -0,0 +1,566 @@ +#define NVOC_KERNEL_CTXSHARE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_ctxshare_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x5ae2fe = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCtxShare; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +void __nvoc_init_KernelCtxShare(KernelCtxShare*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelCtxShare(KernelCtxShare*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelCtxShare(KernelCtxShare*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelCtxShare(KernelCtxShare*, RmHalspecOwner* ); +void __nvoc_dtor_KernelCtxShare(KernelCtxShare*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCtxShare; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCtxShare_KernelCtxShare = { + /*pClassDef=*/ &__nvoc_class_def_KernelCtxShare, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelCtxShare, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCtxShare_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCtxShare, __nvoc_base_RsShared.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCtxShare_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCtxShare, __nvoc_base_RsShared), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelCtxShare = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelCtxShare_KernelCtxShare, + &__nvoc_rtti_KernelCtxShare_RsShared, + &__nvoc_rtti_KernelCtxShare_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCtxShare = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelCtxShare), + /*classId=*/ classId(KernelCtxShare), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelCtxShare", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelCtxShare, + /*pCastInfo=*/ &__nvoc_castinfo_KernelCtxShare, + /*pExportInfo=*/ &__nvoc_export_info_KernelCtxShare +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCtxShare = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_KernelCtxShare(KernelCtxShare *pThis) { + __nvoc_kctxshareDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelCtxShare(KernelCtxShare *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_KernelCtxShare(KernelCtxShare *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_KernelCtxShare_fail_RsShared; + __nvoc_init_dataField_KernelCtxShare(pThis, pRmhalspecowner); + + status = __nvoc_kctxshareConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_KernelCtxShare_fail__init; + goto __nvoc_ctor_KernelCtxShare_exit; // Success + +__nvoc_ctor_KernelCtxShare_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_KernelCtxShare_fail_RsShared: +__nvoc_ctor_KernelCtxShare_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelCtxShare_1(KernelCtxShare *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +void __nvoc_init_funcTable_KernelCtxShare(KernelCtxShare *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelCtxShare_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_KernelCtxShare(KernelCtxShare *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelCtxShare = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; + __nvoc_init_RsShared(&pThis->__nvoc_base_RsShared); + __nvoc_init_funcTable_KernelCtxShare(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelCtxShare(KernelCtxShare **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelCtxShare *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelCtxShare)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelCtxShare)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelCtxShare); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelCtxShare(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelCtxShare(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelCtxShare_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelCtxShare_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelCtxShare(KernelCtxShare **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelCtxShare(ppThis, pParent, createFlags); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x1f9af1 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCtxShareApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_KernelCtxShareApi(KernelCtxShareApi*); +void __nvoc_init_funcTable_KernelCtxShareApi(KernelCtxShareApi*); +NV_STATUS __nvoc_ctor_KernelCtxShareApi(KernelCtxShareApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_KernelCtxShareApi(KernelCtxShareApi*); +void __nvoc_dtor_KernelCtxShareApi(KernelCtxShareApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCtxShareApi; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCtxShareApi_KernelCtxShareApi = { + /*pClassDef=*/ &__nvoc_class_def_KernelCtxShareApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelCtxShareApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCtxShareApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCtxShareApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCtxShareApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCtxShareApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCtxShareApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCtxShareApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCtxShareApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCtxShareApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelCtxShareApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelCtxShareApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelCtxShareApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_KernelCtxShareApi_KernelCtxShareApi, + &__nvoc_rtti_KernelCtxShareApi_GpuResource, + &__nvoc_rtti_KernelCtxShareApi_RmResource, + &__nvoc_rtti_KernelCtxShareApi_RmResourceCommon, + &__nvoc_rtti_KernelCtxShareApi_RsResource, + &__nvoc_rtti_KernelCtxShareApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCtxShareApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelCtxShareApi), + /*classId=*/ classId(KernelCtxShareApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelCtxShareApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelCtxShareApi, + /*pCastInfo=*/ &__nvoc_castinfo_KernelCtxShareApi, + /*pExportInfo=*/ &__nvoc_export_info_KernelCtxShareApi +}; + +static NvBool __nvoc_thunk_KernelCtxShareApi_resCanCopy(struct RsResource *pKernelCtxShareApi) { + return kctxshareapiCanCopy((struct KernelCtxShareApi *)(((unsigned char *)pKernelCtxShareApi) - __nvoc_rtti_KernelCtxShareApi_RsResource.offset)); +} + +static NvBool __nvoc_thunk_GpuResource_kctxshareapiShareCallback(struct KernelCtxShareApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCtxShareApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kctxshareapiControl(struct KernelCtxShareApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCtxShareApi_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kctxshareapiUnmap(struct KernelCtxShareApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCtxShareApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_kctxshareapiGetMemInterMapParams(struct KernelCtxShareApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelCtxShareApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_kctxshareapiGetMemoryMappingDescriptor(struct KernelCtxShareApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelCtxShareApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kctxshareapiGetMapAddrSpace(struct KernelCtxShareApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCtxShareApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_kctxshareapiGetInternalObjectHandle(struct KernelCtxShareApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCtxShareApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_kctxshareapiControlFilter(struct KernelCtxShareApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCtxShareApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_kctxshareapiAddAdditionalDependants(struct RsClient *pClient, struct KernelCtxShareApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCtxShareApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_kctxshareapiGetRefCount(struct KernelCtxShareApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCtxShareApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_kctxshareapiCheckMemInterUnmap(struct KernelCtxShareApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelCtxShareApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_kctxshareapiMapTo(struct KernelCtxShareApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCtxShareApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_kctxshareapiControl_Prologue(struct KernelCtxShareApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCtxShareApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kctxshareapiGetRegBaseOffsetAndSize(struct KernelCtxShareApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCtxShareApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kctxshareapiInternalControlForward(struct KernelCtxShareApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCtxShareApi_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_kctxshareapiPreDestruct(struct KernelCtxShareApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCtxShareApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_kctxshareapiUnmapFrom(struct KernelCtxShareApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCtxShareApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_kctxshareapiControl_Epilogue(struct KernelCtxShareApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCtxShareApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_kctxshareapiControlLookup(struct KernelCtxShareApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCtxShareApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kctxshareapiMap(struct KernelCtxShareApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelCtxShareApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_kctxshareapiAccessCallback(struct KernelCtxShareApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelCtxShareApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_KernelCtxShareApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kctxshareapiCtrlCmdSetTpcPartitionTable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90670102u, + /*paramSize=*/ sizeof(NV9067_CTRL_TPC_PARTITION_TABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelCtxShareApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kctxshareapiCtrlCmdSetTpcPartitionTable" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kctxshareapiCtrlCmdGetCwdWatermark_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90670201u, + /*paramSize=*/ sizeof(NV9067_CTRL_CWD_WATERMARK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelCtxShareApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kctxshareapiCtrlCmdGetCwdWatermark" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kctxshareapiCtrlCmdSetCwdWatermark_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90670202u, + /*paramSize=*/ sizeof(NV9067_CTRL_CWD_WATERMARK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelCtxShareApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kctxshareapiCtrlCmdSetCwdWatermark" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelCtxShareApi = +{ + /*numEntries=*/ 3, + /*pExportEntries=*/ __nvoc_exported_method_def_KernelCtxShareApi +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_KernelCtxShareApi(KernelCtxShareApi *pThis) { + __nvoc_kctxshareapiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelCtxShareApi(KernelCtxShareApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_KernelCtxShareApi(KernelCtxShareApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelCtxShareApi_fail_GpuResource; + __nvoc_init_dataField_KernelCtxShareApi(pThis); + + status = __nvoc_kctxshareapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelCtxShareApi_fail__init; + goto __nvoc_ctor_KernelCtxShareApi_exit; // Success + +__nvoc_ctor_KernelCtxShareApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_KernelCtxShareApi_fail_GpuResource: +__nvoc_ctor_KernelCtxShareApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelCtxShareApi_1(KernelCtxShareApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__kctxshareapiCanCopy__ = &kctxshareapiCanCopy_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__kctxshareapiCtrlCmdSetTpcPartitionTable__ = &kctxshareapiCtrlCmdSetTpcPartitionTable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__kctxshareapiCtrlCmdGetCwdWatermark__ = &kctxshareapiCtrlCmdGetCwdWatermark_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__kctxshareapiCtrlCmdSetCwdWatermark__ = &kctxshareapiCtrlCmdSetCwdWatermark_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_KernelCtxShareApi_resCanCopy; + + pThis->__kctxshareapiShareCallback__ = &__nvoc_thunk_GpuResource_kctxshareapiShareCallback; + + pThis->__kctxshareapiControl__ = &__nvoc_thunk_GpuResource_kctxshareapiControl; + + pThis->__kctxshareapiUnmap__ = &__nvoc_thunk_GpuResource_kctxshareapiUnmap; + + pThis->__kctxshareapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_kctxshareapiGetMemInterMapParams; + + pThis->__kctxshareapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_kctxshareapiGetMemoryMappingDescriptor; + + pThis->__kctxshareapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_kctxshareapiGetMapAddrSpace; + + pThis->__kctxshareapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_kctxshareapiGetInternalObjectHandle; + + pThis->__kctxshareapiControlFilter__ = &__nvoc_thunk_RsResource_kctxshareapiControlFilter; + + pThis->__kctxshareapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_kctxshareapiAddAdditionalDependants; + + pThis->__kctxshareapiGetRefCount__ = &__nvoc_thunk_RsResource_kctxshareapiGetRefCount; + + pThis->__kctxshareapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_kctxshareapiCheckMemInterUnmap; + + pThis->__kctxshareapiMapTo__ = &__nvoc_thunk_RsResource_kctxshareapiMapTo; + + pThis->__kctxshareapiControl_Prologue__ = &__nvoc_thunk_RmResource_kctxshareapiControl_Prologue; + + pThis->__kctxshareapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_kctxshareapiGetRegBaseOffsetAndSize; + + pThis->__kctxshareapiInternalControlForward__ = &__nvoc_thunk_GpuResource_kctxshareapiInternalControlForward; + + pThis->__kctxshareapiPreDestruct__ = &__nvoc_thunk_RsResource_kctxshareapiPreDestruct; + + pThis->__kctxshareapiUnmapFrom__ = &__nvoc_thunk_RsResource_kctxshareapiUnmapFrom; + + pThis->__kctxshareapiControl_Epilogue__ = &__nvoc_thunk_RmResource_kctxshareapiControl_Epilogue; + + pThis->__kctxshareapiControlLookup__ = &__nvoc_thunk_RsResource_kctxshareapiControlLookup; + + pThis->__kctxshareapiMap__ = &__nvoc_thunk_GpuResource_kctxshareapiMap; + + pThis->__kctxshareapiAccessCallback__ = &__nvoc_thunk_RmResource_kctxshareapiAccessCallback; +} + +void __nvoc_init_funcTable_KernelCtxShareApi(KernelCtxShareApi *pThis) { + __nvoc_init_funcTable_KernelCtxShareApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_KernelCtxShareApi(KernelCtxShareApi *pThis) { + pThis->__nvoc_pbase_KernelCtxShareApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_KernelCtxShareApi(pThis); +} + +NV_STATUS __nvoc_objCreate_KernelCtxShareApi(KernelCtxShareApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + KernelCtxShareApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(KernelCtxShareApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelCtxShareApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelCtxShareApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_KernelCtxShareApi(pThis); + status = __nvoc_ctor_KernelCtxShareApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_KernelCtxShareApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelCtxShareApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelCtxShareApi(KernelCtxShareApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_KernelCtxShareApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_ctxshare_nvoc.h b/src/nvidia/generated/g_kernel_ctxshare_nvoc.h new file mode 100644 index 000000000..5d7bddb10 --- /dev/null +++ b/src/nvidia/generated/g_kernel_ctxshare_nvoc.h @@ -0,0 +1,409 @@ +#ifndef _G_KERNEL_CTXSHARE_NVOC_H_ +#define _G_KERNEL_CTXSHARE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_ctxshare_nvoc.h" + +#ifndef CTXSHARE_H +#define CTXSHARE_H + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_resource.h" +#include "gpu/gpu_resource.h" +#include "mem_mgr/vaspace.h" +#include "resserv/rs_server.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" + +#include "ctrl/ctrl9067.h" + + +#include "containers/btree.h" + +// Forward declaration +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct KernelCtxShareApi; + +#ifndef __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ +#define __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ +typedef struct KernelCtxShareApi KernelCtxShareApi; +#endif /* __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCtxShareApi +#define __nvoc_class_id_KernelCtxShareApi 0x1f9af1 +#endif /* __nvoc_class_id_KernelCtxShareApi */ + + + +/** + * This class represents data that is shared when a subcontext is duped. + * + * Instances of this class are ref-counted and will be kept alive until + * all subcontext copies have been freed by the resource server. + */ +#ifdef NVOC_KERNEL_CTXSHARE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelCtxShare { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsShared __nvoc_base_RsShared; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + struct KernelCtxShare *__nvoc_pbase_KernelCtxShare; + struct OBJVASPACE *pVAS; + struct KernelChannelGroup *pKernelChannelGroup; + NvU32 subctxId; + NvU32 flags; +}; + +#ifndef __NVOC_CLASS_KernelCtxShare_TYPEDEF__ +#define __NVOC_CLASS_KernelCtxShare_TYPEDEF__ +typedef struct KernelCtxShare KernelCtxShare; +#endif /* __NVOC_CLASS_KernelCtxShare_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCtxShare +#define __nvoc_class_id_KernelCtxShare 0x5ae2fe +#endif /* __nvoc_class_id_KernelCtxShare */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCtxShare; + +#define __staticCast_KernelCtxShare(pThis) \ + ((pThis)->__nvoc_pbase_KernelCtxShare) + +#ifdef __nvoc_kernel_ctxshare_h_disabled +#define __dynamicCast_KernelCtxShare(pThis) ((KernelCtxShare*)NULL) +#else //__nvoc_kernel_ctxshare_h_disabled +#define __dynamicCast_KernelCtxShare(pThis) \ + ((KernelCtxShare*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelCtxShare))) +#endif //__nvoc_kernel_ctxshare_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelCtxShare(KernelCtxShare**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelCtxShare(KernelCtxShare**, Dynamic*, NvU32); +#define __objCreate_KernelCtxShare(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelCtxShare((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +static inline NV_STATUS kctxshareInit_56cd7a(struct KernelCtxShare *pKernelCtxShare, struct KernelCtxShareApi *pKernelCtxShareApi, struct OBJGPU *pGpu, struct OBJVASPACE *pVAS, struct KernelChannelGroupApi *pKernelChannelGroupApi, NvU64 offset, PEMEMBLOCK pBlock) { + return NV_OK; +} + +#ifdef __nvoc_kernel_ctxshare_h_disabled +static inline NV_STATUS kctxshareInit(struct KernelCtxShare *pKernelCtxShare, struct KernelCtxShareApi *pKernelCtxShareApi, struct OBJGPU *pGpu, struct OBJVASPACE *pVAS, struct KernelChannelGroupApi *pKernelChannelGroupApi, NvU64 offset, PEMEMBLOCK pBlock) { + NV_ASSERT_FAILED_PRECOMP("KernelCtxShare was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ctxshare_h_disabled +#define kctxshareInit(pKernelCtxShare, pKernelCtxShareApi, pGpu, pVAS, pKernelChannelGroupApi, offset, pBlock) kctxshareInit_56cd7a(pKernelCtxShare, pKernelCtxShareApi, pGpu, pVAS, pKernelChannelGroupApi, offset, pBlock) +#endif //__nvoc_kernel_ctxshare_h_disabled + +#define kctxshareInit_HAL(pKernelCtxShare, pKernelCtxShareApi, pGpu, pVAS, pKernelChannelGroupApi, offset, pBlock) kctxshareInit(pKernelCtxShare, pKernelCtxShareApi, pGpu, pVAS, pKernelChannelGroupApi, offset, pBlock) + +static inline NV_STATUS kctxshareDestroy_56cd7a(struct KernelCtxShare *pKernelCtxShare, struct KernelCtxShareApi *pKernelCtxShareApi, struct OBJGPU *pGpu, struct KernelChannelGroupApi *pKernelChannelGroupApi, NvBool bRelease) { + return NV_OK; +} + +#ifdef __nvoc_kernel_ctxshare_h_disabled +static inline NV_STATUS kctxshareDestroy(struct KernelCtxShare *pKernelCtxShare, struct KernelCtxShareApi *pKernelCtxShareApi, struct OBJGPU *pGpu, struct KernelChannelGroupApi *pKernelChannelGroupApi, NvBool bRelease) { + NV_ASSERT_FAILED_PRECOMP("KernelCtxShare was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ctxshare_h_disabled +#define kctxshareDestroy(pKernelCtxShare, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi, bRelease) kctxshareDestroy_56cd7a(pKernelCtxShare, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi, bRelease) +#endif //__nvoc_kernel_ctxshare_h_disabled + +#define kctxshareDestroy_HAL(pKernelCtxShare, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi, bRelease) kctxshareDestroy(pKernelCtxShare, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi, bRelease) + +NV_STATUS kctxshareConstruct_IMPL(struct KernelCtxShare *arg_pKernelCtxShare); +#define __nvoc_kctxshareConstruct(arg_pKernelCtxShare) kctxshareConstruct_IMPL(arg_pKernelCtxShare) +NV_STATUS kctxshareInitCommon_IMPL(struct KernelCtxShare *pKernelCtxShare, struct KernelCtxShareApi *pKernelCtxShareApi, struct OBJGPU *pGpu, struct OBJVASPACE *pVAS, NvU32 Flags, NvU32 *pSubctxId, struct KernelChannelGroupApi *pKernelChannelGroupApi); +#ifdef __nvoc_kernel_ctxshare_h_disabled +static inline NV_STATUS kctxshareInitCommon(struct KernelCtxShare *pKernelCtxShare, struct KernelCtxShareApi *pKernelCtxShareApi, struct OBJGPU *pGpu, struct OBJVASPACE *pVAS, NvU32 Flags, NvU32 *pSubctxId, struct KernelChannelGroupApi *pKernelChannelGroupApi) { + NV_ASSERT_FAILED_PRECOMP("KernelCtxShare was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ctxshare_h_disabled +#define kctxshareInitCommon(pKernelCtxShare, pKernelCtxShareApi, pGpu, pVAS, Flags, pSubctxId, pKernelChannelGroupApi) kctxshareInitCommon_IMPL(pKernelCtxShare, pKernelCtxShareApi, pGpu, pVAS, Flags, pSubctxId, pKernelChannelGroupApi) +#endif //__nvoc_kernel_ctxshare_h_disabled + +NV_STATUS kctxshareDestroyCommon_IMPL(struct KernelCtxShare *pKernelCtxShare, struct KernelCtxShareApi *pKernelCtxShareApi, struct OBJGPU *pGpu, struct KernelChannelGroupApi *pKernelChannelGroupApi); +#ifdef __nvoc_kernel_ctxshare_h_disabled +static inline NV_STATUS kctxshareDestroyCommon(struct KernelCtxShare *pKernelCtxShare, struct KernelCtxShareApi *pKernelCtxShareApi, struct OBJGPU *pGpu, struct KernelChannelGroupApi *pKernelChannelGroupApi) { + NV_ASSERT_FAILED_PRECOMP("KernelCtxShare was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ctxshare_h_disabled +#define kctxshareDestroyCommon(pKernelCtxShare, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi) kctxshareDestroyCommon_IMPL(pKernelCtxShare, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi) +#endif //__nvoc_kernel_ctxshare_h_disabled + +void kctxshareDestruct_IMPL(struct KernelCtxShare *pKernelCtxShare); +#define __nvoc_kctxshareDestruct(pKernelCtxShare) kctxshareDestruct_IMPL(pKernelCtxShare) +#undef PRIVATE_FIELD + + +#ifdef NVOC_KERNEL_CTXSHARE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelCtxShareApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct KernelCtxShareApi *__nvoc_pbase_KernelCtxShareApi; + NvBool (*__kctxshareapiCanCopy__)(struct KernelCtxShareApi *); + NV_STATUS (*__kctxshareapiCtrlCmdSetTpcPartitionTable__)(struct KernelCtxShareApi *, NV9067_CTRL_TPC_PARTITION_TABLE_PARAMS *); + NV_STATUS (*__kctxshareapiCtrlCmdGetCwdWatermark__)(struct KernelCtxShareApi *, NV9067_CTRL_CWD_WATERMARK_PARAMS *); + NV_STATUS (*__kctxshareapiCtrlCmdSetCwdWatermark__)(struct KernelCtxShareApi *, NV9067_CTRL_CWD_WATERMARK_PARAMS *); + NvBool (*__kctxshareapiShareCallback__)(struct KernelCtxShareApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__kctxshareapiControl__)(struct KernelCtxShareApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kctxshareapiUnmap__)(struct KernelCtxShareApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__kctxshareapiGetMemInterMapParams__)(struct KernelCtxShareApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__kctxshareapiGetMemoryMappingDescriptor__)(struct KernelCtxShareApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__kctxshareapiGetMapAddrSpace__)(struct KernelCtxShareApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__kctxshareapiGetInternalObjectHandle__)(struct KernelCtxShareApi *); + NV_STATUS (*__kctxshareapiControlFilter__)(struct KernelCtxShareApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__kctxshareapiAddAdditionalDependants__)(struct RsClient *, struct KernelCtxShareApi *, RsResourceRef *); + NvU32 (*__kctxshareapiGetRefCount__)(struct KernelCtxShareApi *); + NV_STATUS (*__kctxshareapiCheckMemInterUnmap__)(struct KernelCtxShareApi *, NvBool); + NV_STATUS (*__kctxshareapiMapTo__)(struct KernelCtxShareApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__kctxshareapiControl_Prologue__)(struct KernelCtxShareApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kctxshareapiGetRegBaseOffsetAndSize__)(struct KernelCtxShareApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__kctxshareapiInternalControlForward__)(struct KernelCtxShareApi *, NvU32, void *, NvU32); + void (*__kctxshareapiPreDestruct__)(struct KernelCtxShareApi *); + NV_STATUS (*__kctxshareapiUnmapFrom__)(struct KernelCtxShareApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__kctxshareapiControl_Epilogue__)(struct KernelCtxShareApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kctxshareapiControlLookup__)(struct KernelCtxShareApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__kctxshareapiMap__)(struct KernelCtxShareApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__kctxshareapiAccessCallback__)(struct KernelCtxShareApi *, struct RsClient *, void *, RsAccessRight); + struct KernelCtxShare *pShareData; +}; + +#ifndef __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ +#define __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ +typedef struct KernelCtxShareApi KernelCtxShareApi; +#endif /* __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCtxShareApi +#define __nvoc_class_id_KernelCtxShareApi 0x1f9af1 +#endif /* __nvoc_class_id_KernelCtxShareApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelCtxShareApi; + +#define __staticCast_KernelCtxShareApi(pThis) \ + ((pThis)->__nvoc_pbase_KernelCtxShareApi) + +#ifdef __nvoc_kernel_ctxshare_h_disabled +#define __dynamicCast_KernelCtxShareApi(pThis) ((KernelCtxShareApi*)NULL) +#else //__nvoc_kernel_ctxshare_h_disabled +#define __dynamicCast_KernelCtxShareApi(pThis) \ + ((KernelCtxShareApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelCtxShareApi))) +#endif //__nvoc_kernel_ctxshare_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelCtxShareApi(KernelCtxShareApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelCtxShareApi(KernelCtxShareApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_KernelCtxShareApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_KernelCtxShareApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define kctxshareapiCanCopy(pKernelCtxShareApi) kctxshareapiCanCopy_DISPATCH(pKernelCtxShareApi) +#define kctxshareapiCtrlCmdSetTpcPartitionTable(pKernelCtxShareApi, pParams) kctxshareapiCtrlCmdSetTpcPartitionTable_DISPATCH(pKernelCtxShareApi, pParams) +#define kctxshareapiCtrlCmdGetCwdWatermark(pKernelCtxShareApi, pParams) kctxshareapiCtrlCmdGetCwdWatermark_DISPATCH(pKernelCtxShareApi, pParams) +#define kctxshareapiCtrlCmdSetCwdWatermark(pKernelCtxShareApi, pParams) kctxshareapiCtrlCmdSetCwdWatermark_DISPATCH(pKernelCtxShareApi, pParams) +#define kctxshareapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) kctxshareapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define kctxshareapiControl(pGpuResource, pCallContext, pParams) kctxshareapiControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define kctxshareapiUnmap(pGpuResource, pCallContext, pCpuMapping) kctxshareapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define kctxshareapiGetMemInterMapParams(pRmResource, pParams) kctxshareapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define kctxshareapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) kctxshareapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define kctxshareapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) kctxshareapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define kctxshareapiGetInternalObjectHandle(pGpuResource) kctxshareapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define kctxshareapiControlFilter(pResource, pCallContext, pParams) kctxshareapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define kctxshareapiAddAdditionalDependants(pClient, pResource, pReference) kctxshareapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define kctxshareapiGetRefCount(pResource) kctxshareapiGetRefCount_DISPATCH(pResource) +#define kctxshareapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) kctxshareapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define kctxshareapiMapTo(pResource, pParams) kctxshareapiMapTo_DISPATCH(pResource, pParams) +#define kctxshareapiControl_Prologue(pResource, pCallContext, pParams) kctxshareapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define kctxshareapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) kctxshareapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define kctxshareapiInternalControlForward(pGpuResource, command, pParams, size) kctxshareapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define kctxshareapiPreDestruct(pResource) kctxshareapiPreDestruct_DISPATCH(pResource) +#define kctxshareapiUnmapFrom(pResource, pParams) kctxshareapiUnmapFrom_DISPATCH(pResource, pParams) +#define kctxshareapiControl_Epilogue(pResource, pCallContext, pParams) kctxshareapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define kctxshareapiControlLookup(pResource, pParams, ppEntry) kctxshareapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define kctxshareapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) kctxshareapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define kctxshareapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) kctxshareapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool kctxshareapiCanCopy_IMPL(struct KernelCtxShareApi *pKernelCtxShareApi); + +static inline NvBool kctxshareapiCanCopy_DISPATCH(struct KernelCtxShareApi *pKernelCtxShareApi) { + return pKernelCtxShareApi->__kctxshareapiCanCopy__(pKernelCtxShareApi); +} + +NV_STATUS kctxshareapiCtrlCmdSetTpcPartitionTable_IMPL(struct KernelCtxShareApi *pKernelCtxShareApi, NV9067_CTRL_TPC_PARTITION_TABLE_PARAMS *pParams); + +static inline NV_STATUS kctxshareapiCtrlCmdSetTpcPartitionTable_DISPATCH(struct KernelCtxShareApi *pKernelCtxShareApi, NV9067_CTRL_TPC_PARTITION_TABLE_PARAMS *pParams) { + return pKernelCtxShareApi->__kctxshareapiCtrlCmdSetTpcPartitionTable__(pKernelCtxShareApi, pParams); +} + +NV_STATUS kctxshareapiCtrlCmdGetCwdWatermark_IMPL(struct KernelCtxShareApi *pKernelCtxShareApi, NV9067_CTRL_CWD_WATERMARK_PARAMS *pParams); + +static inline NV_STATUS kctxshareapiCtrlCmdGetCwdWatermark_DISPATCH(struct KernelCtxShareApi *pKernelCtxShareApi, NV9067_CTRL_CWD_WATERMARK_PARAMS *pParams) { + return pKernelCtxShareApi->__kctxshareapiCtrlCmdGetCwdWatermark__(pKernelCtxShareApi, pParams); +} + +NV_STATUS kctxshareapiCtrlCmdSetCwdWatermark_IMPL(struct KernelCtxShareApi *pKernelCtxShareApi, NV9067_CTRL_CWD_WATERMARK_PARAMS *pParams); + +static inline NV_STATUS kctxshareapiCtrlCmdSetCwdWatermark_DISPATCH(struct KernelCtxShareApi *pKernelCtxShareApi, NV9067_CTRL_CWD_WATERMARK_PARAMS *pParams) { + return pKernelCtxShareApi->__kctxshareapiCtrlCmdSetCwdWatermark__(pKernelCtxShareApi, pParams); +} + +static inline NvBool kctxshareapiShareCallback_DISPATCH(struct KernelCtxShareApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__kctxshareapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS kctxshareapiControl_DISPATCH(struct KernelCtxShareApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__kctxshareapiControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS kctxshareapiUnmap_DISPATCH(struct KernelCtxShareApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__kctxshareapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS kctxshareapiGetMemInterMapParams_DISPATCH(struct KernelCtxShareApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__kctxshareapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS kctxshareapiGetMemoryMappingDescriptor_DISPATCH(struct KernelCtxShareApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__kctxshareapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS kctxshareapiGetMapAddrSpace_DISPATCH(struct KernelCtxShareApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__kctxshareapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle kctxshareapiGetInternalObjectHandle_DISPATCH(struct KernelCtxShareApi *pGpuResource) { + return pGpuResource->__kctxshareapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS kctxshareapiControlFilter_DISPATCH(struct KernelCtxShareApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kctxshareapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void kctxshareapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct KernelCtxShareApi *pResource, RsResourceRef *pReference) { + pResource->__kctxshareapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 kctxshareapiGetRefCount_DISPATCH(struct KernelCtxShareApi *pResource) { + return pResource->__kctxshareapiGetRefCount__(pResource); +} + +static inline NV_STATUS kctxshareapiCheckMemInterUnmap_DISPATCH(struct KernelCtxShareApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__kctxshareapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS kctxshareapiMapTo_DISPATCH(struct KernelCtxShareApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__kctxshareapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS kctxshareapiControl_Prologue_DISPATCH(struct KernelCtxShareApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kctxshareapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kctxshareapiGetRegBaseOffsetAndSize_DISPATCH(struct KernelCtxShareApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__kctxshareapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS kctxshareapiInternalControlForward_DISPATCH(struct KernelCtxShareApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__kctxshareapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void kctxshareapiPreDestruct_DISPATCH(struct KernelCtxShareApi *pResource) { + pResource->__kctxshareapiPreDestruct__(pResource); +} + +static inline NV_STATUS kctxshareapiUnmapFrom_DISPATCH(struct KernelCtxShareApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__kctxshareapiUnmapFrom__(pResource, pParams); +} + +static inline void kctxshareapiControl_Epilogue_DISPATCH(struct KernelCtxShareApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__kctxshareapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kctxshareapiControlLookup_DISPATCH(struct KernelCtxShareApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__kctxshareapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS kctxshareapiMap_DISPATCH(struct KernelCtxShareApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__kctxshareapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool kctxshareapiAccessCallback_DISPATCH(struct KernelCtxShareApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__kctxshareapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS kctxshareapiConstruct_IMPL(struct KernelCtxShareApi *arg_pKernelCtxShareApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_kctxshareapiConstruct(arg_pKernelCtxShareApi, arg_pCallContext, arg_pParams) kctxshareapiConstruct_IMPL(arg_pKernelCtxShareApi, arg_pCallContext, arg_pParams) +NV_STATUS kctxshareapiCopyConstruct_IMPL(struct KernelCtxShareApi *pKernelCtxShareApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_kernel_ctxshare_h_disabled +static inline NV_STATUS kctxshareapiCopyConstruct(struct KernelCtxShareApi *pKernelCtxShareApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("KernelCtxShareApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ctxshare_h_disabled +#define kctxshareapiCopyConstruct(pKernelCtxShareApi, pCallContext, pParams) kctxshareapiCopyConstruct_IMPL(pKernelCtxShareApi, pCallContext, pParams) +#endif //__nvoc_kernel_ctxshare_h_disabled + +void kctxshareapiDestruct_IMPL(struct KernelCtxShareApi *pKernelCtxShareApi); +#define __nvoc_kctxshareapiDestruct(pKernelCtxShareApi) kctxshareapiDestruct_IMPL(pKernelCtxShareApi) +#undef PRIVATE_FIELD + + +#endif // CTXSHARE_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_CTXSHARE_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_falcon_nvoc.c b/src/nvidia/generated/g_kernel_falcon_nvoc.c new file mode 100644 index 000000000..3cb763058 --- /dev/null +++ b/src/nvidia/generated/g_kernel_falcon_nvoc.c @@ -0,0 +1,512 @@ +#define NVOC_KERNEL_FALCON_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_falcon_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb6b1af = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon; + +void __nvoc_init_KernelFalcon(KernelFalcon*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelFalcon(KernelFalcon*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelFalcon(KernelFalcon*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelFalcon(KernelFalcon*, RmHalspecOwner* ); +void __nvoc_dtor_KernelFalcon(KernelFalcon*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelFalcon; + +static const struct NVOC_RTTI __nvoc_rtti_KernelFalcon_KernelFalcon = { + /*pClassDef=*/ &__nvoc_class_def_KernelFalcon, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelFalcon, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelFalcon = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_KernelFalcon_KernelFalcon, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelFalcon), + /*classId=*/ classId(KernelFalcon), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelFalcon", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_KernelFalcon, + /*pExportInfo=*/ &__nvoc_export_info_KernelFalcon +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelFalcon = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_KernelFalcon(KernelFalcon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_KernelFalcon(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelFalcon_exit; // Success + +__nvoc_ctor_KernelFalcon_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelFalcon_1(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal function -- kflcnIsRiscvActive + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kflcnIsRiscvActive__ = &kflcnIsRiscvActive_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kflcnIsRiscvActive__ = &kflcnIsRiscvActive_GA10X; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kflcnRiscvProgramBcr + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kflcnRiscvProgramBcr__ = &kflcnRiscvProgramBcr_GA102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kflcnRiscvProgramBcr__ = &kflcnRiscvProgramBcr_f2d351; + } + } + else if (0) + { + } + + // Hal function -- kflcnSwitchToFalcon + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kflcnSwitchToFalcon__ = &kflcnSwitchToFalcon_GA10X; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kflcnSwitchToFalcon__ = &kflcnSwitchToFalcon_b3696a; + } + } + else if (0) + { + } + + pThis->__kflcnResetHw__ = NULL; + + // Hal function -- kflcnPreResetWait + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kflcnPreResetWait__ = &kflcnPreResetWait_GA10X; + } + // default + else + { + pThis->__kflcnPreResetWait__ = &kflcnPreResetWait_56cd7a; + } + } + else if (0) + { + } + + // Hal function -- kflcnWaitForResetToFinish + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kflcnWaitForResetToFinish__ = &kflcnWaitForResetToFinish_GA102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kflcnWaitForResetToFinish__ = &kflcnWaitForResetToFinish_TU102; + } + else if (0) + { + } + } + else if (0) + { + } + + pThis->__kflcnIsEngineInReset__ = NULL; + + // Hal function -- kflcnReadIntrStatus + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kflcnReadIntrStatus__ = &kflcnReadIntrStatus_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kflcnReadIntrStatus__ = &kflcnReadIntrStatus_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kflcnIntrRetrigger + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kflcnIntrRetrigger__ = &kflcnIntrRetrigger_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kflcnIntrRetrigger__ = &kflcnIntrRetrigger_b3696a; + } + } + else if (0) + { + } + + // Hal function -- kflcnMaskImemAddr + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kflcnMaskImemAddr__ = &kflcnMaskImemAddr_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kflcnMaskImemAddr__ = &kflcnMaskImemAddr_GA100; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kflcnMaskDmemAddr + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kflcnMaskDmemAddr__ = &kflcnMaskDmemAddr_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kflcnMaskDmemAddr__ = &kflcnMaskDmemAddr_GA100; + } + else if (0) + { + } + } + else if (0) + { + } +} + +void __nvoc_init_funcTable_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelFalcon_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_KernelFalcon(KernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelFalcon = pThis; + __nvoc_init_funcTable_KernelFalcon(pThis, pRmhalspecowner); +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xabcf08 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericKernelFalcon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_GenericKernelFalcon(GenericKernelFalcon*, RmHalspecOwner* ); +void __nvoc_init_funcTable_GenericKernelFalcon(GenericKernelFalcon*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_GenericKernelFalcon(GenericKernelFalcon*, RmHalspecOwner* , struct OBJGPU * arg_pGpu, KernelFalconEngineConfig * arg_pFalconConfig); +void __nvoc_init_dataField_GenericKernelFalcon(GenericKernelFalcon*, RmHalspecOwner* ); +void __nvoc_dtor_GenericKernelFalcon(GenericKernelFalcon*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericKernelFalcon; + +static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_GenericKernelFalcon = { + /*pClassDef=*/ &__nvoc_class_def_GenericKernelFalcon, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GenericKernelFalcon, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_KernelFalcon = { + /*pClassDef=*/ &__nvoc_class_def_KernelFalcon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericKernelFalcon, __nvoc_base_KernelFalcon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericKernelFalcon, __nvoc_base_IntrService), +}; + +static const struct NVOC_RTTI __nvoc_rtti_GenericKernelFalcon_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(GenericKernelFalcon, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_GenericKernelFalcon = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_GenericKernelFalcon_GenericKernelFalcon, + &__nvoc_rtti_GenericKernelFalcon_Object, + &__nvoc_rtti_GenericKernelFalcon_IntrService, + &__nvoc_rtti_GenericKernelFalcon_KernelFalcon, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_GenericKernelFalcon = +{ + /*classInfo=*/ { + /*size=*/ sizeof(GenericKernelFalcon), + /*classId=*/ classId(GenericKernelFalcon), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "GenericKernelFalcon", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GenericKernelFalcon, + /*pCastInfo=*/ &__nvoc_castinfo_GenericKernelFalcon, + /*pExportInfo=*/ &__nvoc_export_info_GenericKernelFalcon +}; + +static NV_STATUS __nvoc_thunk_GenericKernelFalcon_kflcnResetHw(struct OBJGPU *pGpu, struct KernelFalcon *pGenKernFlcn) { + return gkflcnResetHw(pGpu, (struct GenericKernelFalcon *)(((unsigned char *)pGenKernFlcn) - __nvoc_rtti_GenericKernelFalcon_KernelFalcon.offset)); +} + +static NvBool __nvoc_thunk_GenericKernelFalcon_kflcnIsEngineInReset(struct OBJGPU *pGpu, struct KernelFalcon *pGenKernFlcn) { + return gkflcnIsEngineInReset(pGpu, (struct GenericKernelFalcon *)(((unsigned char *)pGenKernFlcn) - __nvoc_rtti_GenericKernelFalcon_KernelFalcon.offset)); +} + +static void __nvoc_thunk_GenericKernelFalcon_intrservRegisterIntrService(struct OBJGPU *arg0, struct IntrService *arg1, IntrServiceRecord arg2[155]) { + gkflcnRegisterIntrService(arg0, (struct GenericKernelFalcon *)(((unsigned char *)arg1) - __nvoc_rtti_GenericKernelFalcon_IntrService.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_GenericKernelFalcon_intrservServiceNotificationInterrupt(struct OBJGPU *arg0, struct IntrService *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) { + return gkflcnServiceNotificationInterrupt(arg0, (struct GenericKernelFalcon *)(((unsigned char *)arg1) - __nvoc_rtti_GenericKernelFalcon_IntrService.offset), arg2); +} + +static NvBool __nvoc_thunk_IntrService_gkflcnClearInterrupt(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_GenericKernelFalcon_IntrService.offset), pParams); +} + +static NvU32 __nvoc_thunk_IntrService_gkflcnServiceInterrupt(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceServiceInterruptArguments *pParams) { + return intrservServiceInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_GenericKernelFalcon_IntrService.offset), pParams); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericKernelFalcon = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_KernelFalcon(KernelFalcon*); +void __nvoc_dtor_IntrService(IntrService*); +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_GenericKernelFalcon(GenericKernelFalcon *pThis) { + __nvoc_dtor_KernelFalcon(&pThis->__nvoc_base_KernelFalcon); + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_GenericKernelFalcon(GenericKernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_KernelFalcon(KernelFalcon* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_IntrService(IntrService* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_Object(Object* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_GenericKernelFalcon(GenericKernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner, struct OBJGPU * arg_pGpu, KernelFalconEngineConfig * arg_pFalconConfig) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_KernelFalcon(&pThis->__nvoc_base_KernelFalcon, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_GenericKernelFalcon_fail_KernelFalcon; + status = __nvoc_ctor_IntrService(&pThis->__nvoc_base_IntrService, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_GenericKernelFalcon_fail_IntrService; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_GenericKernelFalcon_fail_Object; + __nvoc_init_dataField_GenericKernelFalcon(pThis, pRmhalspecowner); + + status = __nvoc_gkflcnConstruct(pThis, arg_pGpu, arg_pFalconConfig); + if (status != NV_OK) goto __nvoc_ctor_GenericKernelFalcon_fail__init; + goto __nvoc_ctor_GenericKernelFalcon_exit; // Success + +__nvoc_ctor_GenericKernelFalcon_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_GenericKernelFalcon_fail_Object: + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); +__nvoc_ctor_GenericKernelFalcon_fail_IntrService: + __nvoc_dtor_KernelFalcon(&pThis->__nvoc_base_KernelFalcon); +__nvoc_ctor_GenericKernelFalcon_fail_KernelFalcon: +__nvoc_ctor_GenericKernelFalcon_exit: + + return status; +} + +static void __nvoc_init_funcTable_GenericKernelFalcon_1(GenericKernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__gkflcnResetHw__ = &gkflcnResetHw_IMPL; + + pThis->__gkflcnIsEngineInReset__ = &gkflcnIsEngineInReset_IMPL; + + pThis->__gkflcnRegisterIntrService__ = &gkflcnRegisterIntrService_IMPL; + + pThis->__gkflcnServiceNotificationInterrupt__ = &gkflcnServiceNotificationInterrupt_IMPL; + + pThis->__nvoc_base_KernelFalcon.__kflcnResetHw__ = &__nvoc_thunk_GenericKernelFalcon_kflcnResetHw; + + pThis->__nvoc_base_KernelFalcon.__kflcnIsEngineInReset__ = &__nvoc_thunk_GenericKernelFalcon_kflcnIsEngineInReset; + + pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_GenericKernelFalcon_intrservRegisterIntrService; + + pThis->__nvoc_base_IntrService.__intrservServiceNotificationInterrupt__ = &__nvoc_thunk_GenericKernelFalcon_intrservServiceNotificationInterrupt; + + pThis->__gkflcnClearInterrupt__ = &__nvoc_thunk_IntrService_gkflcnClearInterrupt; + + pThis->__gkflcnServiceInterrupt__ = &__nvoc_thunk_IntrService_gkflcnServiceInterrupt; +} + +void __nvoc_init_funcTable_GenericKernelFalcon(GenericKernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_GenericKernelFalcon_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_KernelFalcon(KernelFalcon*, RmHalspecOwner* ); +void __nvoc_init_IntrService(IntrService*, RmHalspecOwner* ); +void __nvoc_init_Object(Object*, RmHalspecOwner* ); +void __nvoc_init_GenericKernelFalcon(GenericKernelFalcon *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_GenericKernelFalcon = pThis; + pThis->__nvoc_pbase_KernelFalcon = &pThis->__nvoc_base_KernelFalcon; + pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_KernelFalcon(&pThis->__nvoc_base_KernelFalcon, pRmhalspecowner); + __nvoc_init_IntrService(&pThis->__nvoc_base_IntrService, pRmhalspecowner); + __nvoc_init_Object(&pThis->__nvoc_base_Object, pRmhalspecowner); + __nvoc_init_funcTable_GenericKernelFalcon(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_GenericKernelFalcon(GenericKernelFalcon **ppThis, Dynamic *pParent, NvU32 createFlags, struct OBJGPU * arg_pGpu, KernelFalconEngineConfig * arg_pFalconConfig) { + NV_STATUS status; + Object *pParentObj; + GenericKernelFalcon *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(GenericKernelFalcon)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(GenericKernelFalcon)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GenericKernelFalcon); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_GenericKernelFalcon(pThis, pRmhalspecowner); + status = __nvoc_ctor_GenericKernelFalcon(pThis, pRmhalspecowner, arg_pGpu, arg_pFalconConfig); + if (status != NV_OK) goto __nvoc_objCreate_GenericKernelFalcon_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_GenericKernelFalcon_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_GenericKernelFalcon(GenericKernelFalcon **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct OBJGPU * arg_pGpu = va_arg(args, struct OBJGPU *); + KernelFalconEngineConfig * arg_pFalconConfig = va_arg(args, KernelFalconEngineConfig *); + + status = __nvoc_objCreate_GenericKernelFalcon(ppThis, pParent, createFlags, arg_pGpu, arg_pFalconConfig); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_falcon_nvoc.h b/src/nvidia/generated/g_kernel_falcon_nvoc.h new file mode 100644 index 000000000..85d681e3d --- /dev/null +++ b/src/nvidia/generated/g_kernel_falcon_nvoc.h @@ -0,0 +1,559 @@ +#ifndef _G_KERNEL_FALCON_NVOC_H_ +#define _G_KERNEL_FALCON_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Provides definitions for all KernelFalcon data structures and + * interfaces. + */ + +#include "g_kernel_falcon_nvoc.h" + +#ifndef KERNEL_FALCON_H +#define KERNEL_FALCON_H + +#include "core/core.h" +#include "gpu/falcon/falcon_common.h" +#include "gpu/intr/intr_service.h" + +struct KernelChannel; + +#ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ +#define __NVOC_CLASS_KernelChannel_TYPEDEF__ +typedef struct KernelChannel KernelChannel; +#endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannel +#define __nvoc_class_id_KernelChannel 0x5d8d70 +#endif /* __nvoc_class_id_KernelChannel */ + + + +typedef struct KernelFalconEngineConfig { + NvU32 registerBase; // i.e. NV_P{GSP,SEC,NVDEC} + NvU32 riscvRegisterBase; // i.e. NV_FALCON2_{GSP,SEC,NVDEC}_BASE + NvU32 fbifBase; // i.e. NV_P{GSP,SEC,NVDEC}_FBIF_BASE + NvBool bBootFromHs; // whether engine has Boot-from-HS (true for HS-capable engines GA10X+) + NvU32 pmcEnableMask; // engine's enable bitmask in PMC (or 0 if engine reset is not in PMC) + NvU32 bIsPmcDeviceEngine; // whether engine's enable bit is in NV_PMC_DEVICE_ENABLE (vs. NV_PMC_ENABLE) + ENGDESCRIPTOR physEngDesc; // The engine descriptor for the falcon (e.g. ENG_SEC2) + NvU32 ctxAttr; // Memory attributes used for context buffers + NvU32 ctxBufferSize; // Context buffer size in bytes + NvU32 addrSpaceList; // index into ADDRLIST array in mem_desc.h +} KernelFalconEngineConfig; + +/*! + * Base class for booting Falcon cores (including RISC-V) + */ +#ifdef NVOC_KERNEL_FALCON_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelFalcon { + const struct NVOC_RTTI *__nvoc_rtti; + struct KernelFalcon *__nvoc_pbase_KernelFalcon; + NvBool (*__kflcnIsRiscvActive__)(struct OBJGPU *, struct KernelFalcon *); + void (*__kflcnRiscvProgramBcr__)(struct OBJGPU *, struct KernelFalcon *, NvBool); + void (*__kflcnSwitchToFalcon__)(struct OBJGPU *, struct KernelFalcon *); + NV_STATUS (*__kflcnResetHw__)(struct OBJGPU *, struct KernelFalcon *); + NV_STATUS (*__kflcnPreResetWait__)(struct OBJGPU *, struct KernelFalcon *); + NV_STATUS (*__kflcnWaitForResetToFinish__)(struct OBJGPU *, struct KernelFalcon *); + NvBool (*__kflcnIsEngineInReset__)(struct OBJGPU *, struct KernelFalcon *); + NvU32 (*__kflcnReadIntrStatus__)(struct OBJGPU *, struct KernelFalcon *); + void (*__kflcnIntrRetrigger__)(struct OBJGPU *, struct KernelFalcon *); + NvU32 (*__kflcnMaskImemAddr__)(struct OBJGPU *, struct KernelFalcon *, NvU32); + NvU32 (*__kflcnMaskDmemAddr__)(struct OBJGPU *, struct KernelFalcon *, NvU32); + NvU32 registerBase; + NvU32 riscvRegisterBase; + NvU32 fbifBase; + NvBool bBootFromHs; + NvU32 pmcEnableMask; + NvU32 bIsPmcDeviceEngine; + ENGDESCRIPTOR physEngDesc; + NvU32 ctxAttr; + NvU32 ctxBufferSize; + NvU32 addrSpaceList; +}; + +#ifndef __NVOC_CLASS_KernelFalcon_TYPEDEF__ +#define __NVOC_CLASS_KernelFalcon_TYPEDEF__ +typedef struct KernelFalcon KernelFalcon; +#endif /* __NVOC_CLASS_KernelFalcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFalcon +#define __nvoc_class_id_KernelFalcon 0xb6b1af +#endif /* __nvoc_class_id_KernelFalcon */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon; + +#define __staticCast_KernelFalcon(pThis) \ + ((pThis)->__nvoc_pbase_KernelFalcon) + +#ifdef __nvoc_kernel_falcon_h_disabled +#define __dynamicCast_KernelFalcon(pThis) ((KernelFalcon*)NULL) +#else //__nvoc_kernel_falcon_h_disabled +#define __dynamicCast_KernelFalcon(pThis) \ + ((KernelFalcon*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelFalcon))) +#endif //__nvoc_kernel_falcon_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelFalcon(KernelFalcon**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelFalcon(KernelFalcon**, Dynamic*, NvU32); +#define __objCreate_KernelFalcon(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelFalcon((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kflcnIsRiscvActive(pGpu, pKernelFlcn) kflcnIsRiscvActive_DISPATCH(pGpu, pKernelFlcn) +#define kflcnIsRiscvActive_HAL(pGpu, pKernelFlcn) kflcnIsRiscvActive_DISPATCH(pGpu, pKernelFlcn) +#define kflcnRiscvProgramBcr(pGpu, pKernelFlcn, bBRFetch) kflcnRiscvProgramBcr_DISPATCH(pGpu, pKernelFlcn, bBRFetch) +#define kflcnRiscvProgramBcr_HAL(pGpu, pKernelFlcn, bBRFetch) kflcnRiscvProgramBcr_DISPATCH(pGpu, pKernelFlcn, bBRFetch) +#define kflcnSwitchToFalcon(pGpu, pKernelFlcn) kflcnSwitchToFalcon_DISPATCH(pGpu, pKernelFlcn) +#define kflcnSwitchToFalcon_HAL(pGpu, pKernelFlcn) kflcnSwitchToFalcon_DISPATCH(pGpu, pKernelFlcn) +#define kflcnResetHw(pGpu, pKernelFlcn) kflcnResetHw_DISPATCH(pGpu, pKernelFlcn) +#define kflcnPreResetWait(pGpu, pKernelFlcn) kflcnPreResetWait_DISPATCH(pGpu, pKernelFlcn) +#define kflcnPreResetWait_HAL(pGpu, pKernelFlcn) kflcnPreResetWait_DISPATCH(pGpu, pKernelFlcn) +#define kflcnWaitForResetToFinish(pGpu, pKernelFlcn) kflcnWaitForResetToFinish_DISPATCH(pGpu, pKernelFlcn) +#define kflcnWaitForResetToFinish_HAL(pGpu, pKernelFlcn) kflcnWaitForResetToFinish_DISPATCH(pGpu, pKernelFlcn) +#define kflcnIsEngineInReset(pGpu, pKernelFlcn) kflcnIsEngineInReset_DISPATCH(pGpu, pKernelFlcn) +#define kflcnReadIntrStatus(pGpu, pKerneFlcn) kflcnReadIntrStatus_DISPATCH(pGpu, pKerneFlcn) +#define kflcnReadIntrStatus_HAL(pGpu, pKerneFlcn) kflcnReadIntrStatus_DISPATCH(pGpu, pKerneFlcn) +#define kflcnIntrRetrigger(pGpu, pKernelFlcn) kflcnIntrRetrigger_DISPATCH(pGpu, pKernelFlcn) +#define kflcnIntrRetrigger_HAL(pGpu, pKernelFlcn) kflcnIntrRetrigger_DISPATCH(pGpu, pKernelFlcn) +#define kflcnMaskImemAddr(pGpu, pKernelFlcn, addr) kflcnMaskImemAddr_DISPATCH(pGpu, pKernelFlcn, addr) +#define kflcnMaskImemAddr_HAL(pGpu, pKernelFlcn, addr) kflcnMaskImemAddr_DISPATCH(pGpu, pKernelFlcn, addr) +#define kflcnMaskDmemAddr(pGpu, pKernelFlcn, addr) kflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr) +#define kflcnMaskDmemAddr_HAL(pGpu, pKernelFlcn, addr) kflcnMaskDmemAddr_DISPATCH(pGpu, pKernelFlcn, addr) +NvU32 kflcnRegRead_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline NvU32 kflcnRegRead(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); + return 0; +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnRegRead(pGpu, pKernelFlcn, offset) kflcnRegRead_TU102(pGpu, pKernelFlcn, offset) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnRegRead_HAL(pGpu, pKernelFlcn, offset) kflcnRegRead(pGpu, pKernelFlcn, offset) + +void kflcnRegWrite_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline void kflcnRegWrite(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnRegWrite(pGpu, pKernelFlcn, offset, data) kflcnRegWrite_TU102(pGpu, pKernelFlcn, offset, data) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnRegWrite_HAL(pGpu, pKernelFlcn, offset, data) kflcnRegWrite(pGpu, pKernelFlcn, offset, data) + +NvU32 kflcnRiscvRegRead_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline NvU32 kflcnRiscvRegRead(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); + return 0; +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnRiscvRegRead(pGpu, pKernelFlcn, offset) kflcnRiscvRegRead_TU102(pGpu, pKernelFlcn, offset) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnRiscvRegRead_HAL(pGpu, pKernelFlcn, offset) kflcnRiscvRegRead(pGpu, pKernelFlcn, offset) + +void kflcnRiscvRegWrite_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline void kflcnRiscvRegWrite(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 offset, NvU32 data) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnRiscvRegWrite(pGpu, pKernelFlcn, offset, data) kflcnRiscvRegWrite_TU102(pGpu, pKernelFlcn, offset, data) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnRiscvRegWrite_HAL(pGpu, pKernelFlcn, offset, data) kflcnRiscvRegWrite(pGpu, pKernelFlcn, offset, data) + +NvBool kflcnIsRiscvCpuEnabled_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline NvBool kflcnIsRiscvCpuEnabled(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnIsRiscvCpuEnabled(pGpu, pKernelFlcn) kflcnIsRiscvCpuEnabled_TU102(pGpu, pKernelFlcn) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnIsRiscvCpuEnabled_HAL(pGpu, pKernelFlcn) kflcnIsRiscvCpuEnabled(pGpu, pKernelFlcn) + +void kflcnReset_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline void kflcnReset(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnReset(pGpu, pKernelFlcn) kflcnReset_TU102(pGpu, pKernelFlcn) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnReset_HAL(pGpu, pKernelFlcn) kflcnReset(pGpu, pKernelFlcn) + +void kflcnSecureReset_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline void kflcnSecureReset(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnSecureReset(pGpu, pKernelFlcn) kflcnSecureReset_TU102(pGpu, pKernelFlcn) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnSecureReset_HAL(pGpu, pKernelFlcn) kflcnSecureReset(pGpu, pKernelFlcn) + +void kflcnEnable_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvBool bEnable); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline void kflcnEnable(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvBool bEnable) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnEnable(pGpu, pKernelFlcn, bEnable) kflcnEnable_TU102(pGpu, pKernelFlcn, bEnable) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnEnable_HAL(pGpu, pKernelFlcn, bEnable) kflcnEnable(pGpu, pKernelFlcn, bEnable) + +void kflcnStartCpu_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline void kflcnStartCpu(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnStartCpu(pGpu, pKernelFlcn) kflcnStartCpu_TU102(pGpu, pKernelFlcn) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnStartCpu_HAL(pGpu, pKernelFlcn) kflcnStartCpu(pGpu, pKernelFlcn) + +void kflcnDisableCtxReq_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline void kflcnDisableCtxReq(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnDisableCtxReq(pGpu, pKernelFlcn) kflcnDisableCtxReq_TU102(pGpu, pKernelFlcn) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnDisableCtxReq_HAL(pGpu, pKernelFlcn) kflcnDisableCtxReq(pGpu, pKernelFlcn) + +NV_STATUS kflcnWaitForHalt_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 timeoutUs, NvU32 flags); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline NV_STATUS kflcnWaitForHalt(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 timeoutUs, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnWaitForHalt(pGpu, pKernelFlcn, timeoutUs, flags) kflcnWaitForHalt_TU102(pGpu, pKernelFlcn, timeoutUs, flags) +#endif //__nvoc_kernel_falcon_h_disabled + +#define kflcnWaitForHalt_HAL(pGpu, pKernelFlcn, timeoutUs, flags) kflcnWaitForHalt(pGpu, pKernelFlcn, timeoutUs, flags) + +NvBool kflcnIsRiscvActive_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +NvBool kflcnIsRiscvActive_GA10X(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +static inline NvBool kflcnIsRiscvActive_108313(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 != 0))); +} + +static inline NvBool kflcnIsRiscvActive_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + return pKernelFlcn->__kflcnIsRiscvActive__(pGpu, pKernelFlcn); +} + +void kflcnRiscvProgramBcr_GA102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvBool bBRFetch); + +static inline void kflcnRiscvProgramBcr_f2d351(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvBool bBRFetch) { + NV_ASSERT_PRECOMP(0); +} + +static inline void kflcnRiscvProgramBcr_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvBool bBRFetch) { + pKernelFlcn->__kflcnRiscvProgramBcr__(pGpu, pKernelFlcn, bBRFetch); +} + +void kflcnSwitchToFalcon_GA10X(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +static inline void kflcnSwitchToFalcon_b3696a(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + return; +} + +static inline void kflcnSwitchToFalcon_f2d351(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + NV_ASSERT_PRECOMP(0); +} + +static inline void kflcnSwitchToFalcon_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + pKernelFlcn->__kflcnSwitchToFalcon__(pGpu, pKernelFlcn); +} + +static inline NV_STATUS kflcnResetHw_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + return pKernelFlcn->__kflcnResetHw__(pGpu, pKernelFlcn); +} + +NV_STATUS kflcnPreResetWait_GA10X(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +static inline NV_STATUS kflcnPreResetWait_56cd7a(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + return NV_OK; +} + +static inline NV_STATUS kflcnPreResetWait_5baef9(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS kflcnPreResetWait_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + return pKernelFlcn->__kflcnPreResetWait__(pGpu, pKernelFlcn); +} + +NV_STATUS kflcnWaitForResetToFinish_GA102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +NV_STATUS kflcnWaitForResetToFinish_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +static inline NV_STATUS kflcnWaitForResetToFinish_56cd7a(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + return NV_OK; +} + +static inline NV_STATUS kflcnWaitForResetToFinish_5baef9(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS kflcnWaitForResetToFinish_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + return pKernelFlcn->__kflcnWaitForResetToFinish__(pGpu, pKernelFlcn); +} + +static inline NvBool kflcnIsEngineInReset_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + return pKernelFlcn->__kflcnIsEngineInReset__(pGpu, pKernelFlcn); +} + +NvU32 kflcnReadIntrStatus_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKerneFlcn); + +NvU32 kflcnReadIntrStatus_GA102(struct OBJGPU *pGpu, struct KernelFalcon *pKerneFlcn); + +static inline NvU32 kflcnReadIntrStatus_474d46(struct OBJGPU *pGpu, struct KernelFalcon *pKerneFlcn) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 kflcnReadIntrStatus_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKerneFlcn) { + return pKerneFlcn->__kflcnReadIntrStatus__(pGpu, pKerneFlcn); +} + +void kflcnIntrRetrigger_GA100(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn); + +static inline void kflcnIntrRetrigger_b3696a(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + return; +} + +static inline void kflcnIntrRetrigger_f2d351(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + NV_ASSERT_PRECOMP(0); +} + +static inline void kflcnIntrRetrigger_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn) { + pKernelFlcn->__kflcnIntrRetrigger__(pGpu, pKernelFlcn); +} + +NvU32 kflcnMaskImemAddr_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr); + +NvU32 kflcnMaskImemAddr_GA100(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr); + +static inline NvU32 kflcnMaskImemAddr_474d46(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 kflcnMaskImemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr) { + return pKernelFlcn->__kflcnMaskImemAddr__(pGpu, pKernelFlcn, addr); +} + +NvU32 kflcnMaskDmemAddr_TU102(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr); + +NvU32 kflcnMaskDmemAddr_GA100(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr); + +static inline NvU32 kflcnMaskDmemAddr_474d46(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 kflcnMaskDmemAddr_DISPATCH(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFlcn, NvU32 addr) { + return pKernelFlcn->__kflcnMaskDmemAddr__(pGpu, pKernelFlcn, addr); +} + +void kflcnConfigureEngine_IMPL(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFalcon, KernelFalconEngineConfig *pFalconConfig); +#ifdef __nvoc_kernel_falcon_h_disabled +static inline void kflcnConfigureEngine(struct OBJGPU *pGpu, struct KernelFalcon *pKernelFalcon, KernelFalconEngineConfig *pFalconConfig) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnConfigureEngine(pGpu, pKernelFalcon, pFalconConfig) kflcnConfigureEngine_IMPL(pGpu, pKernelFalcon, pFalconConfig) +#endif //__nvoc_kernel_falcon_h_disabled + +NV_STATUS kflcnAllocContext_IMPL(struct OBJGPU *arg0, struct KernelFalcon *arg1, struct KernelChannel *arg2, NvU32 arg3); +#ifdef __nvoc_kernel_falcon_h_disabled +static inline NV_STATUS kflcnAllocContext(struct OBJGPU *arg0, struct KernelFalcon *arg1, struct KernelChannel *arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnAllocContext(arg0, arg1, arg2, arg3) kflcnAllocContext_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_falcon_h_disabled + +NV_STATUS kflcnFreeContext_IMPL(struct OBJGPU *arg0, struct KernelFalcon *arg1, struct KernelChannel *arg2, NvU32 arg3); +#ifdef __nvoc_kernel_falcon_h_disabled +static inline NV_STATUS kflcnFreeContext(struct OBJGPU *arg0, struct KernelFalcon *arg1, struct KernelChannel *arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelFalcon was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_falcon_h_disabled +#define kflcnFreeContext(arg0, arg1, arg2, arg3) kflcnFreeContext_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_falcon_h_disabled + +struct KernelFalcon *kflcnGetKernelFalconForEngine_IMPL(struct OBJGPU *pGpu, ENGDESCRIPTOR physEngDesc); +#define kflcnGetKernelFalconForEngine(pGpu, physEngDesc) kflcnGetKernelFalconForEngine_IMPL(pGpu, physEngDesc) +#undef PRIVATE_FIELD + + +// Basic implementation of KernelFalcon that can be instantiated. +#ifdef NVOC_KERNEL_FALCON_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GenericKernelFalcon { + const struct NVOC_RTTI *__nvoc_rtti; + struct KernelFalcon __nvoc_base_KernelFalcon; + struct IntrService __nvoc_base_IntrService; + struct Object __nvoc_base_Object; + struct KernelFalcon *__nvoc_pbase_KernelFalcon; + struct IntrService *__nvoc_pbase_IntrService; + struct Object *__nvoc_pbase_Object; + struct GenericKernelFalcon *__nvoc_pbase_GenericKernelFalcon; + NV_STATUS (*__gkflcnResetHw__)(struct OBJGPU *, struct GenericKernelFalcon *); + NvBool (*__gkflcnIsEngineInReset__)(struct OBJGPU *, struct GenericKernelFalcon *); + void (*__gkflcnRegisterIntrService__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceRecord *); + NV_STATUS (*__gkflcnServiceNotificationInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceServiceNotificationInterruptArguments *); + NvBool (*__gkflcnClearInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceClearInterruptArguments *); + NvU32 (*__gkflcnServiceInterrupt__)(struct OBJGPU *, struct GenericKernelFalcon *, IntrServiceServiceInterruptArguments *); +}; + +#ifndef __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ +#define __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ +typedef struct GenericKernelFalcon GenericKernelFalcon; +#endif /* __NVOC_CLASS_GenericKernelFalcon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GenericKernelFalcon +#define __nvoc_class_id_GenericKernelFalcon 0xabcf08 +#endif /* __nvoc_class_id_GenericKernelFalcon */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericKernelFalcon; + +#define __staticCast_GenericKernelFalcon(pThis) \ + ((pThis)->__nvoc_pbase_GenericKernelFalcon) + +#ifdef __nvoc_kernel_falcon_h_disabled +#define __dynamicCast_GenericKernelFalcon(pThis) ((GenericKernelFalcon*)NULL) +#else //__nvoc_kernel_falcon_h_disabled +#define __dynamicCast_GenericKernelFalcon(pThis) \ + ((GenericKernelFalcon*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GenericKernelFalcon))) +#endif //__nvoc_kernel_falcon_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_GenericKernelFalcon(GenericKernelFalcon**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_GenericKernelFalcon(GenericKernelFalcon**, Dynamic*, NvU32, struct OBJGPU * arg_pGpu, KernelFalconEngineConfig * arg_pFalconConfig); +#define __objCreate_GenericKernelFalcon(ppNewObj, pParent, createFlags, arg_pGpu, arg_pFalconConfig) \ + __nvoc_objCreate_GenericKernelFalcon((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pGpu, arg_pFalconConfig) + +#define gkflcnResetHw(pGpu, pGenKernFlcn) gkflcnResetHw_DISPATCH(pGpu, pGenKernFlcn) +#define gkflcnIsEngineInReset(pGpu, pGenKernFlcn) gkflcnIsEngineInReset_DISPATCH(pGpu, pGenKernFlcn) +#define gkflcnRegisterIntrService(arg0, arg1, arg2) gkflcnRegisterIntrService_DISPATCH(arg0, arg1, arg2) +#define gkflcnServiceNotificationInterrupt(arg0, arg1, arg2) gkflcnServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2) +#define gkflcnClearInterrupt(pGpu, pIntrService, pParams) gkflcnClearInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define gkflcnServiceInterrupt(pGpu, pIntrService, pParams) gkflcnServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams) +void gkflcnNonstallIntrCheckAndClear_TU102(struct OBJGPU *arg0, struct GenericKernelFalcon *pKernelFlcn, struct THREAD_STATE_NODE *arg1); + +#ifdef __nvoc_kernel_falcon_h_disabled +static inline void gkflcnNonstallIntrCheckAndClear(struct OBJGPU *arg0, struct GenericKernelFalcon *pKernelFlcn, struct THREAD_STATE_NODE *arg1) { + NV_ASSERT_FAILED_PRECOMP("GenericKernelFalcon was disabled!"); +} +#else //__nvoc_kernel_falcon_h_disabled +#define gkflcnNonstallIntrCheckAndClear(arg0, pKernelFlcn, arg1) gkflcnNonstallIntrCheckAndClear_TU102(arg0, pKernelFlcn, arg1) +#endif //__nvoc_kernel_falcon_h_disabled + +#define gkflcnNonstallIntrCheckAndClear_HAL(arg0, pKernelFlcn, arg1) gkflcnNonstallIntrCheckAndClear(arg0, pKernelFlcn, arg1) + +NV_STATUS gkflcnResetHw_IMPL(struct OBJGPU *pGpu, struct GenericKernelFalcon *pGenKernFlcn); + +static inline NV_STATUS gkflcnResetHw_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pGenKernFlcn) { + return pGenKernFlcn->__gkflcnResetHw__(pGpu, pGenKernFlcn); +} + +NvBool gkflcnIsEngineInReset_IMPL(struct OBJGPU *pGpu, struct GenericKernelFalcon *pGenKernFlcn); + +static inline NvBool gkflcnIsEngineInReset_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pGenKernFlcn) { + return pGenKernFlcn->__gkflcnIsEngineInReset__(pGpu, pGenKernFlcn); +} + +void gkflcnRegisterIntrService_IMPL(struct OBJGPU *arg0, struct GenericKernelFalcon *arg1, IntrServiceRecord arg2[155]); + +static inline void gkflcnRegisterIntrService_DISPATCH(struct OBJGPU *arg0, struct GenericKernelFalcon *arg1, IntrServiceRecord arg2[155]) { + arg1->__gkflcnRegisterIntrService__(arg0, arg1, arg2); +} + +NV_STATUS gkflcnServiceNotificationInterrupt_IMPL(struct OBJGPU *arg0, struct GenericKernelFalcon *arg1, IntrServiceServiceNotificationInterruptArguments *arg2); + +static inline NV_STATUS gkflcnServiceNotificationInterrupt_DISPATCH(struct OBJGPU *arg0, struct GenericKernelFalcon *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) { + return arg1->__gkflcnServiceNotificationInterrupt__(arg0, arg1, arg2); +} + +static inline NvBool gkflcnClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return pIntrService->__gkflcnClearInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NvU32 gkflcnServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct GenericKernelFalcon *pIntrService, IntrServiceServiceInterruptArguments *pParams) { + return pIntrService->__gkflcnServiceInterrupt__(pGpu, pIntrService, pParams); +} + +NV_STATUS gkflcnConstruct_IMPL(struct GenericKernelFalcon *arg_pGenKernFlcn, struct OBJGPU *arg_pGpu, KernelFalconEngineConfig *arg_pFalconConfig); +#define __nvoc_gkflcnConstruct(arg_pGenKernFlcn, arg_pGpu, arg_pFalconConfig) gkflcnConstruct_IMPL(arg_pGenKernFlcn, arg_pGpu, arg_pFalconConfig) +#undef PRIVATE_FIELD + + +#endif // KERNEL_FALCON_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_FALCON_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_fifo_nvoc.c b/src/nvidia/generated/g_kernel_fifo_nvoc.c new file mode 100644 index 000000000..06b2344bb --- /dev/null +++ b/src/nvidia/generated/g_kernel_fifo_nvoc.c @@ -0,0 +1,532 @@ +#define NVOC_KERNEL_FIFO_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_fifo_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xf3e155 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFifo; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelFifo(KernelFifo*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelFifo(KernelFifo*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelFifo(KernelFifo*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelFifo(KernelFifo*, RmHalspecOwner* ); +void __nvoc_dtor_KernelFifo(KernelFifo*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelFifo; + +static const struct NVOC_RTTI __nvoc_rtti_KernelFifo_KernelFifo = { + /*pClassDef=*/ &__nvoc_class_def_KernelFifo, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelFifo, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelFifo_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelFifo, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelFifo_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelFifo, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelFifo = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelFifo_KernelFifo, + &__nvoc_rtti_KernelFifo_OBJENGSTATE, + &__nvoc_rtti_KernelFifo_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFifo = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelFifo), + /*classId=*/ classId(KernelFifo), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelFifo", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelFifo, + /*pCastInfo=*/ &__nvoc_castinfo_KernelFifo, + /*pExportInfo=*/ &__nvoc_export_info_KernelFifo +}; + +static NV_STATUS __nvoc_thunk_KernelFifo_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelFifo, ENGDESCRIPTOR engDesc) { + return kfifoConstructEngine(pGpu, (struct KernelFifo *)(((unsigned char *)pKernelFifo) - __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), engDesc); +} + +static NV_STATUS __nvoc_thunk_KernelFifo_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelFifo) { + return kfifoStateInitLocked(pGpu, (struct KernelFifo *)(((unsigned char *)pKernelFifo) - __nvoc_rtti_KernelFifo_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_KernelFifo_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelFifo) { + kfifoStateDestroy(pGpu, (struct KernelFifo *)(((unsigned char *)pKernelFifo) - __nvoc_rtti_KernelFifo_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelFifo_engstateStatePostLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelFifo, NvU32 flags) { + return kfifoStatePostLoad(pGpu, (struct KernelFifo *)(((unsigned char *)pKernelFifo) - __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), flags); +} + +static NV_STATUS __nvoc_thunk_KernelFifo_engstateStatePreUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelFifo, NvU32 flags) { + return kfifoStatePreUnload(pGpu, (struct KernelFifo *)(((unsigned char *)pKernelFifo) - __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), flags); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoReconcileTunableState(POBJGPU pGpu, struct KernelFifo *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoStateLoad(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoStateUnload(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoStatePreLoad(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoStatePostUnload(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoStateInitUnlocked(POBJGPU pGpu, struct KernelFifo *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kfifoInitMissing(POBJGPU pGpu, struct KernelFifo *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoStatePreInitLocked(POBJGPU pGpu, struct KernelFifo *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoStatePreInitUnlocked(POBJGPU pGpu, struct KernelFifo *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoGetTunableState(POBJGPU pGpu, struct KernelFifo *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoCompareTunableState(POBJGPU pGpu, struct KernelFifo *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kfifoFreeTunableState(POBJGPU pGpu, struct KernelFifo *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoAllocTunableState(POBJGPU pGpu, struct KernelFifo *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kfifoSetTunableState(POBJGPU pGpu, struct KernelFifo *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kfifoIsPresent(POBJGPU pGpu, struct KernelFifo *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelFifo_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelFifo = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelFifo(KernelFifo *pThis) { + __nvoc_kfifoDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelFifo(KernelFifo *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal field -- bUseChidHeap + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bUseChidHeap = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bUseChidHeap = ((NvBool)(0 != 0)); + } + + // Hal field -- bUsePerRunlistChram + pThis->bUsePerRunlistChram = ((NvBool)(0 != 0)); + + // Hal field -- bIsPerRunlistChramSupportedInHw + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bIsPerRunlistChramSupportedInHw = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bIsPerRunlistChramSupportedInHw = ((NvBool)(0 != 0)); + } + + // Hal field -- bHostEngineExpansion + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bHostEngineExpansion = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bHostEngineExpansion = ((NvBool)(0 != 0)); + } + + // Hal field -- bHostHasLbOverflow + if (0) + { + } + // default + else + { + pThis->bHostHasLbOverflow = ((NvBool)(0 != 0)); + } + + // Hal field -- bSubcontextSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bSubcontextSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bSubcontextSupported = ((NvBool)(0 != 0)); + } + + // Hal field -- bMixedInstmemApertureDefAllowed + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bMixedInstmemApertureDefAllowed = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bMixedInstmemApertureDefAllowed = ((NvBool)(0 != 0)); + } + + // Hal field -- bIsZombieSubctxWarEnabled + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bIsZombieSubctxWarEnabled = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bIsZombieSubctxWarEnabled = ((NvBool)(0 != 0)); + } + + // Hal field -- bIsSchedSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bIsSchedSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bIsSchedSupported = ((NvBool)(0 != 0)); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelFifo(KernelFifo *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelFifo_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelFifo(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelFifo_exit; // Success + +__nvoc_ctor_KernelFifo_fail_OBJENGSTATE: +__nvoc_ctor_KernelFifo_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelFifo_1(KernelFifo *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__kfifoConstructEngine__ = &kfifoConstructEngine_IMPL; + + pThis->__kfifoStateInitLocked__ = &kfifoStateInitLocked_IMPL; + + pThis->__kfifoStateDestroy__ = &kfifoStateDestroy_IMPL; + + // Hal function -- kfifoStatePostLoad + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kfifoStatePostLoad__ = &kfifoStatePostLoad_GM107; + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- kfifoStatePreUnload + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kfifoStatePreUnload__ = &kfifoStatePreUnload_GM107; + } + else if (0) + { + } + + // Hal function -- kfifoChannelGroupGetLocalMaxSubcontext + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kfifoChannelGroupGetLocalMaxSubcontext__ = &kfifoChannelGroupGetLocalMaxSubcontext_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kfifoChannelGroupGetLocalMaxSubcontext__ = &kfifoChannelGroupGetLocalMaxSubcontext_GA100; + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- kfifoEngineInfoXlate + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kfifoEngineInfoXlate__ = &kfifoEngineInfoXlate_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kfifoEngineInfoXlate__ = &kfifoEngineInfoXlate_GA100; + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- kfifoGenerateWorkSubmitToken + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kfifoGenerateWorkSubmitToken__ = &kfifoGenerateWorkSubmitToken_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kfifoGenerateWorkSubmitToken__ = &kfifoGenerateWorkSubmitToken_GA100; + } + else if (0) + { + } + + // Hal function -- kfifoUpdateUsermodeDoorbell + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kfifoUpdateUsermodeDoorbell__ = &kfifoUpdateUsermodeDoorbell_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kfifoUpdateUsermodeDoorbell__ = &kfifoUpdateUsermodeDoorbell_GA100; + } + else if (0) + { + } + + // Hal function -- kfifoRunlistGetBaseShift + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kfifoRunlistGetBaseShift__ = &kfifoRunlistGetBaseShift_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kfifoRunlistGetBaseShift__ = &kfifoRunlistGetBaseShift_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kfifoRunlistGetBaseShift__ = &kfifoRunlistGetBaseShift_GA102; + } + else if (0) + { + } + + // Hal function -- kfifoGetMaxCeChannelGroups + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kfifoGetMaxCeChannelGroups__ = &kfifoGetMaxCeChannelGroups_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kfifoGetMaxCeChannelGroups__ = &kfifoGetMaxCeChannelGroups_GA100; + } + else if (0) + { + } + + // Hal function -- kfifoSetupUserD + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kfifoSetupUserD__ = &kfifoSetupUserD_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kfifoSetupUserD__ = &kfifoSetupUserD_GA100; + } + else if (0) + { + } + else if (0) + { + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelFifo_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelFifo_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelFifo_engstateStateDestroy; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostLoad__ = &__nvoc_thunk_KernelFifo_engstateStatePostLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreUnload__ = &__nvoc_thunk_KernelFifo_engstateStatePreUnload; + + pThis->__kfifoReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kfifoReconcileTunableState; + + pThis->__kfifoStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kfifoStateLoad; + + pThis->__kfifoStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kfifoStateUnload; + + pThis->__kfifoStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kfifoStatePreLoad; + + pThis->__kfifoStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kfifoStatePostUnload; + + pThis->__kfifoStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kfifoStateInitUnlocked; + + pThis->__kfifoInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kfifoInitMissing; + + pThis->__kfifoStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kfifoStatePreInitLocked; + + pThis->__kfifoStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kfifoStatePreInitUnlocked; + + pThis->__kfifoGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kfifoGetTunableState; + + pThis->__kfifoCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kfifoCompareTunableState; + + pThis->__kfifoFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kfifoFreeTunableState; + + pThis->__kfifoAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kfifoAllocTunableState; + + pThis->__kfifoSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kfifoSetTunableState; + + pThis->__kfifoIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kfifoIsPresent; +} + +void __nvoc_init_funcTable_KernelFifo(KernelFifo *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelFifo_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelFifo(KernelFifo *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelFifo = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelFifo(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelFifo(KernelFifo **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelFifo *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelFifo)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelFifo)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelFifo); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelFifo(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelFifo(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelFifo_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelFifo_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelFifo(KernelFifo **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelFifo(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_fifo_nvoc.h b/src/nvidia/generated/g_kernel_fifo_nvoc.h new file mode 100644 index 000000000..b0c853d69 --- /dev/null +++ b/src/nvidia/generated/g_kernel_fifo_nvoc.h @@ -0,0 +1,1923 @@ +#ifndef _G_KERNEL_FIFO_NVOC_H_ +#define _G_KERNEL_FIFO_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_fifo_nvoc.h" + +#ifndef _KERNELFIFO_H_ +#define _KERNELFIFO_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: KernelFifo.h * +* Defines and structures used for the KernelFifo Object. * +\***************************************************************************/ + +#include "class/cl2080.h" // NV2080_ENGINE_TYPE_LAST + +#include "kernel/gpu/eng_state.h" +#include "kernel/gpu/gpu_halspec.h" +#include "kernel/gpu/fifo/channel_descendant.h" + +#include "containers/eheap_old.h" +#include "containers/map.h" +#include "utils/nvbitvector.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "nvoc/utility.h" + +#include "ctrl/ctrl2080/ctrl2080gpu.h" // NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS +#include "ctrl/ctrl2080/ctrl2080fifo.h" // NV2080_CTRL_FIFO_MEM_INFO +#include "ctrl/ctrl2080/ctrl2080internal.h" // NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_* +#include "ctrl/ctrl906f.h" + +struct KernelChannel; + +#ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ +#define __NVOC_CLASS_KernelChannel_TYPEDEF__ +typedef struct KernelChannel KernelChannel; +#endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannel +#define __nvoc_class_id_KernelChannel 0x5d8d70 +#endif /* __nvoc_class_id_KernelChannel */ + + +struct KernelChannelGroup; + +#ifndef __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ +#define __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ +typedef struct KernelChannelGroup KernelChannelGroup; +#endif /* __NVOC_CLASS_KernelChannelGroup_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannelGroup +#define __nvoc_class_id_KernelChannelGroup 0xec6de1 +#endif /* __nvoc_class_id_KernelChannelGroup */ + + +struct KernelSchedMgr; + +#ifndef __NVOC_CLASS_KernelSchedMgr_TYPEDEF__ +#define __NVOC_CLASS_KernelSchedMgr_TYPEDEF__ +typedef struct KernelSchedMgr KernelSchedMgr; +#endif /* __NVOC_CLASS_KernelSchedMgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSchedMgr +#define __nvoc_class_id_KernelSchedMgr 0xea0970 +#endif /* __nvoc_class_id_KernelSchedMgr */ + + + +struct HOST_VGPU_DEVICE; + +// Pre-Ampere runlist ID to pass to kfifoGetChidMgr +#define CHIDMGR_RUNLIST_ID_LEGACY 0 + +#define INVALID_CHID 0xFFFFFFFF + +#define INVALID_RUNLIST_ID 0xFFFFFFFFU + +/*! We use 32-bit process ID for now */ +#define KERNEL_PID (0xFFFFFFFFULL) + +/*! cap at 64 for now, can extend when needed */ +#define MAX_NUM_RUNLISTS NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_ID +#define NUM_BUFFERS_PER_RUNLIST (NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS) +MAKE_BITVECTOR(CHID_MGR_VALID_BIT_VECTOR, MAX_NUM_RUNLISTS); + +// +// Matches GET_PUSHBUFFER_CAPABILITIES bit positions +// +#define VID_PB_ALLOWED 0x1 +#define PCI_PB_ALLOWED 0x2 + +/*! + * USERD isolation domain + * + * USERD allocated by different domains should not be put into the same physical page. + * This provides the basic security isolation because a physical page is the unit of + * granularity at which OS can provide isolation between processes. + * + * GUEST_USER: USERD allocated by guest user process + * GUEST_KERNEL: USERD allocated by guest kernel process + * GUEST_INSECURE: USERD allocated by guest/kernel process, + * INSECURE means there is no isolation between guest user and guest kernel + * HOST_USER: USERD allocated by host user process + * HOST_KERNEL: USERD allocated by host kernel process + * + * Please refer to RM_USERD_Isolation wiki for more details + */ +typedef enum _def_fifo_isolation_domain +{ + GUEST_USER = 0x0, + GUEST_KERNEL, + GUEST_INSECURE, + HOST_USER, + HOST_KERNEL +} FIFO_ISOLATION_DOMAIN; + +/*! + * USERD isolation ID + * + * In vGPU environment, sub process means the guest user/kernel process running within a single VM. + * It also refers to any sub process (or sub-sub process) within a parent process. + * + * Please refer to Resource Server for more details about sub process concept + */ +typedef struct _def_fifo_isolation_id +{ + FIFO_ISOLATION_DOMAIN domain; + NvU64 processID; + NvU64 subProcessID; +} FIFO_ISOLATIONID, *PFIFO_ISOLATIONID; + +/*! Used for calls to kfifoChannelGetFifoContextMemDesc */ +typedef enum +{ + FIFO_CTX_RAMFC = 0, + FIFO_CTX_INST_BLOCK = 1, +} FIFO_CTX; + +typedef struct +{ + NvU32 addrLo; + NvU32 addrHi; + NvU32 faultType; + NvU32 clientId; + NvBool bGpc; + NvU32 gpcId; + NvU32 accessType; + NvU32 faultEngineId; + NvU64 faultedShaderProgramVA[NV906F_CTRL_MMU_FAULT_SHADER_TYPES]; +} FIFO_MMU_EXCEPTION_DATA; + +/*! Used for calls to kchannelAllocHwID */ +typedef enum +{ + CHANNEL_HW_ID_ALLOC_MODE_GROW_DOWN, + CHANNEL_HW_ID_ALLOC_MODE_GROW_UP, + CHANNEL_HW_ID_ALLOC_MODE_PROVIDED, +} CHANNEL_HW_ID_ALLOC_MODE; + +typedef struct _fifo_hw_id +{ + /*! + * Bitfield of HW IDs. 1 = reserved, 0 = available. + * A reserved ID may not be allocated but it can't be used for any + * future allocations. + */ + NvU32 *pHwIdInUse; + + /*! + * Number of elements in pHwIdInUse + */ + NvU32 hwIdInUseSz; +} FIFO_HW_ID; + +DECLARE_INTRUSIVE_MAP(KernelChannelGroupMap); + +typedef struct +{ + /*! + * Runlist managed by this CHID_MGR. + */ + NvU32 runlistId; + + /*! + * Heap to manage pFifoData for all channels. + */ + OBJEHEAP *pFifoDataHeap; + + /*! + * Global ChID heap - manages channel IDs and isolation IDs. In non-SRIOV + * systems, allocations/frees in this heap mirror those in pFifoDataHeap. + * When SRIOV is enabled, we reserve/free channel IDs for the guest in + * chunks from this heap when the VM starts/shuts down. ChID allocations + * during channel construction from the guest ChID space are from the + * virtual ChID heap for that guest. + */ + OBJEHEAP *pGlobalChIDHeap; + + /*! + * Until FIFO code for SR-IOV moves to guest RM, this virtual ChID heap + * manages channel IDs allocated to a guest. + */ + OBJEHEAP **ppVirtualChIDHeap; + + /*! + * Number of channels managed by this CHID_MGR + */ + NvU32 numChannels; + + FIFO_HW_ID channelGrpMgr; + + /*! + * Channel group pointers + */ + KernelChannelGroupMap *pChanGrpTree; + +} CHID_MGR; + +/*! Typedef for the @ref channel_iterator structure */ +typedef struct channel_iterator CHANNEL_ITERATOR; +typedef struct channel_iterator *PCHANNEL_ITERATOR; + +/*! + * Generic Linked-list of Channel pointers to be used where ever multiple channels + * are managed. + * TODO: Remove as part of Jira CORERM-2658 + */ +typedef struct _channel_node +{ + struct KernelChannel *pKernelChannel; + struct _channel_node *pNext; +} CHANNEL_NODE, *PCHANNEL_NODE; + +/*! + * This structure represents an iterator for all channels. + * It is created by function @ref kfifoGetChannelIterator. + */ +struct channel_iterator +{ + NvU32 numChannels; + NvU32 numRunlists; + NvU32 physicalChannelID; + NvU32 runlistId; + CHANNEL_NODE channelNode; +}; + +typedef enum +{ + // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc., + ENGINE_INFO_TYPE_ENG_DESC = 0, + // HW engine ID + ENGINE_INFO_TYPE_FIFO_TAG, + // NV2080_ENGINE_TYPE_* + ENGINE_INFO_TYPE_NV2080, + // runlist id (meaning varies by GPU) + ENGINE_INFO_TYPE_RUNLIST, + // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_* + ENGINE_INFO_TYPE_MMU_FAULT_ID, + // ROBUST_CHANNEL_* + ENGINE_INFO_TYPE_RC_MASK, + // Reset Bit Position. On Ampere, only valid if not _INVALID + ENGINE_INFO_TYPE_RESET, + // Interrupt Bit Position + ENGINE_INFO_TYPE_INTR, + // log2(MC_ENGINE_*) + ENGINE_INFO_TYPE_MC, + // The DEV_TYPE_ENUM for this engine + ENGINE_INFO_TYPE_DEV_TYPE_ENUM, + // The particular instance of this engine type + ENGINE_INFO_TYPE_INSTANCE_ID, + // The base address for this engine's NV_RUNLIST. Valid only on Ampere+ + ENGINE_INFO_TYPE_RUNLIST_PRI_BASE, + // If this entry is a host-driven engine. Valid only on Ampere+ + ENGINE_INFO_TYPE_IS_ENGINE, + // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+ + ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID, + // The base address for this engine's NV_CHRAM registers. Valid only on Ampere+ + ENGINE_INFO_TYPE_CHRAM_PRI_BASE, + + // Used for iterating the engine info table by the index passed. + ENGINE_INFO_TYPE_INVALID, + + // Input-only parameter for fifoEngineInfoXlate. + ENGINE_INFO_TYPE_PBDMA_ID +} ENGINE_INFO_TYPE; + +// Maximum number of pbdma IDs for a given engine +#define FIFO_ENGINE_MAX_NUM_PBDMA 2 + +// Maximum size (including null terminator for an engine name +#define FIFO_ENGINE_NAME_MAX_SIZE 16 + +typedef struct _def_fifo_engine_list +{ + NvU32 engineData[ENGINE_INFO_TYPE_INVALID]; + NvU32 pbdmaIds[FIFO_ENGINE_MAX_NUM_PBDMA]; + NvU32 pbdmaFaultIds[FIFO_ENGINE_MAX_NUM_PBDMA]; + NvU32 numPbdmas; + char engineName[FIFO_ENGINE_NAME_MAX_SIZE]; +} FIFO_ENGINE_LIST, *PFIFO_ENGINE_LIST; + +typedef struct _def_engine_info +{ + NvU32 maxNumPbdmas; // max number of PBDMAs + NvU32 basePbdmaFaultId; // base PBDMA Fault ID + NvU32 maxNumRunlists; // max number of runlists + NvU32 numRunlists; // Gets the number of runlists, not the maximum + NvU32 engineInfoListSize; + FIFO_ENGINE_LIST *engineInfoList; +} ENGINE_INFO; + +// Fully qualified instance block address +typedef struct +{ + NvU64 address; // Physical address or IOVA (unshifted) + NvU32 aperture; // INST_BLOCK_APERTURE + NvU32 gfid; // Valid in PF when SR-IOV is enabled +} INST_BLOCK_DESC; + +typedef struct _channel_list +{ + CHANNEL_NODE *pHead; + CHANNEL_NODE *pTail; +} CHANNEL_LIST, *PCHANNEL_LIST; + +typedef struct _def_preallocated_userd_info +{ + NvU32 userdAperture; // default aperture for USERD + NvU32 userdAttr; // default attr for USERD + MEMORY_DESCRIPTOR *userdPhysDesc[NV_MAX_SUBDEVICES]; // base phys addr of contiguous USERD + NvU64 userdBar1MapStartOffset; // base offset of 's BAR1 map + NvU32 userdBar1MapSize; // sizeof 's map + NvU8 *userdBar1CpuPtr; // cpu map of + NvU32 userdBar1RefMask; // mask of GPUs referencing userD +} PREALLOCATED_USERD_INFO; + + +// Scheduling enable/disable handlers +typedef NV_STATUS (*PFifoSchedulingHandler)(OBJGPU *pGpu, void *pData); +typedef struct FifoSchedulingHandlerEntry +{ + PFifoSchedulingHandler pCallback; + void *pCallbackParam; + NvBool bHandled; +} FifoSchedulingHandlerEntry; + +MAKE_LIST(FifoSchedulingHandlerEntryList, FifoSchedulingHandlerEntry); + +// +// This define indicates legacy pdb in instance block. +// +#define FIFO_PDB_IDX_BASE (0xFFFFFFFF) + +// +// Aperture defines must match NV_MMU_PTE_APERTURE HW defines +// We do not support instance memory in peer (1). +// +#define INST_BLOCK_APERTURE_VIDEO_MEMORY 0x00000000 +#define INST_BLOCK_APERTURE_RESERVED 0x00000001 +#define INST_BLOCK_APERTURE_SYSTEM_COHERENT_MEMORY 0x00000002 +#define INST_BLOCK_APERTURE_SYSTEM_NON_COHERENT_MEMORY 0x00000003 + +// Macro to verify HW/class defines are compatible +#define VERIFY_INST_BLOCK_APERTURE(vid, coh, ncoh) \ + ct_assert((vid) == INST_BLOCK_APERTURE_VIDEO_MEMORY); \ + ct_assert((coh) == INST_BLOCK_APERTURE_SYSTEM_COHERENT_MEMORY); \ + ct_assert((ncoh) == INST_BLOCK_APERTURE_SYSTEM_NON_COHERENT_MEMORY) + +// +// The actual GPU object definition +// +#ifdef NVOC_KERNEL_FIFO_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelFifo { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelFifo *__nvoc_pbase_KernelFifo; + NV_STATUS (*__kfifoConstructEngine__)(struct OBJGPU *, struct KernelFifo *, ENGDESCRIPTOR); + NV_STATUS (*__kfifoStateInitLocked__)(struct OBJGPU *, struct KernelFifo *); + void (*__kfifoStateDestroy__)(struct OBJGPU *, struct KernelFifo *); + NV_STATUS (*__kfifoStatePostLoad__)(struct OBJGPU *, struct KernelFifo *, NvU32); + NV_STATUS (*__kfifoStatePreUnload__)(struct OBJGPU *, struct KernelFifo *, NvU32); + NvU32 (*__kfifoChannelGroupGetLocalMaxSubcontext__)(struct OBJGPU *, struct KernelFifo *, struct KernelChannelGroup *, NvBool); + NV_STATUS (*__kfifoEngineInfoXlate__)(struct OBJGPU *, struct KernelFifo *, ENGINE_INFO_TYPE, NvU32, ENGINE_INFO_TYPE, NvU32 *); + NV_STATUS (*__kfifoGenerateWorkSubmitToken__)(struct OBJGPU *, struct KernelFifo *, struct KernelChannel *, NvU32 *, NvBool); + NV_STATUS (*__kfifoUpdateUsermodeDoorbell__)(struct OBJGPU *, struct KernelFifo *, NvU32, NvU32); + NvU32 (*__kfifoRunlistGetBaseShift__)(struct KernelFifo *); + NvU32 (*__kfifoGetMaxCeChannelGroups__)(struct OBJGPU *, struct KernelFifo *); + void (*__kfifoSetupUserD__)(struct KernelFifo *, NvU8 *); + NV_STATUS (*__kfifoReconcileTunableState__)(POBJGPU, struct KernelFifo *, void *); + NV_STATUS (*__kfifoStateLoad__)(POBJGPU, struct KernelFifo *, NvU32); + NV_STATUS (*__kfifoStateUnload__)(POBJGPU, struct KernelFifo *, NvU32); + NV_STATUS (*__kfifoStatePreLoad__)(POBJGPU, struct KernelFifo *, NvU32); + NV_STATUS (*__kfifoStatePostUnload__)(POBJGPU, struct KernelFifo *, NvU32); + NV_STATUS (*__kfifoStateInitUnlocked__)(POBJGPU, struct KernelFifo *); + void (*__kfifoInitMissing__)(POBJGPU, struct KernelFifo *); + NV_STATUS (*__kfifoStatePreInitLocked__)(POBJGPU, struct KernelFifo *); + NV_STATUS (*__kfifoStatePreInitUnlocked__)(POBJGPU, struct KernelFifo *); + NV_STATUS (*__kfifoGetTunableState__)(POBJGPU, struct KernelFifo *, void *); + NV_STATUS (*__kfifoCompareTunableState__)(POBJGPU, struct KernelFifo *, void *, void *); + void (*__kfifoFreeTunableState__)(POBJGPU, struct KernelFifo *, void *); + NV_STATUS (*__kfifoAllocTunableState__)(POBJGPU, struct KernelFifo *, void **); + NV_STATUS (*__kfifoSetTunableState__)(POBJGPU, struct KernelFifo *, void *); + NvBool (*__kfifoIsPresent__)(POBJGPU, struct KernelFifo *); + struct KernelSchedMgr *pKernelSchedMgr; + CHID_MGR **ppChidMgr; + NvU32 numChidMgrs; + union CHID_MGR_VALID_BIT_VECTOR chidMgrValid; + ENGINE_INFO engineInfo; + PREALLOCATED_USERD_INFO userdInfo; + NvU32 maxSubcontextCount; + FifoSchedulingHandlerEntryList postSchedulingEnableHandlerList; + FifoSchedulingHandlerEntryList preSchedulingDisableHandlerList; + NvBool bUseChidHeap; + NvBool bUsePerRunlistChram; + NvBool bIsPerRunlistChramSupportedInHw; + NvBool bHostEngineExpansion; + NvBool bHostHasLbOverflow; + NvBool bSubcontextSupported; + NvBool bMixedInstmemApertureDefAllowed; + NvBool bIsZombieSubctxWarEnabled; + NvBool bIsSchedSupported; + NvBool bWddmInterleavingPolicyEnabled; + NvBool bUserdInSystemMemory; + NvBool bUserdMapDmaSupported; + NvBool bPerRunlistChramOverride; + NvBool bNumChannelsOverride; + NvU32 numChannelsOverride; + NvBool bInstProtectedMem; + NvU32 InstAttr; + const NV_ADDRESS_SPACE *pInstAllocList; + MEMORY_DESCRIPTOR *pDummyPageMemDesc; + CTX_BUF_POOL_INFO *pRunlistBufPool[52]; + MEMORY_DESCRIPTOR ***pppRunlistBufMemDesc; +}; + +#ifndef __NVOC_CLASS_KernelFifo_TYPEDEF__ +#define __NVOC_CLASS_KernelFifo_TYPEDEF__ +typedef struct KernelFifo KernelFifo; +#endif /* __NVOC_CLASS_KernelFifo_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelFifo +#define __nvoc_class_id_KernelFifo 0xf3e155 +#endif /* __nvoc_class_id_KernelFifo */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFifo; + +#define __staticCast_KernelFifo(pThis) \ + ((pThis)->__nvoc_pbase_KernelFifo) + +#ifdef __nvoc_kernel_fifo_h_disabled +#define __dynamicCast_KernelFifo(pThis) ((KernelFifo*)NULL) +#else //__nvoc_kernel_fifo_h_disabled +#define __dynamicCast_KernelFifo(pThis) \ + ((KernelFifo*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelFifo))) +#endif //__nvoc_kernel_fifo_h_disabled + +#define PDB_PROP_KFIFO_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KFIFO_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelFifo(KernelFifo**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelFifo(KernelFifo**, Dynamic*, NvU32); +#define __objCreate_KernelFifo(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelFifo((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kfifoConstructEngine(pGpu, pKernelFifo, engDesc) kfifoConstructEngine_DISPATCH(pGpu, pKernelFifo, engDesc) +#define kfifoStateInitLocked(pGpu, pKernelFifo) kfifoStateInitLocked_DISPATCH(pGpu, pKernelFifo) +#define kfifoStateDestroy(pGpu, pKernelFifo) kfifoStateDestroy_DISPATCH(pGpu, pKernelFifo) +#define kfifoStatePostLoad(pGpu, pKernelFifo, flags) kfifoStatePostLoad_DISPATCH(pGpu, pKernelFifo, flags) +#define kfifoStatePostLoad_HAL(pGpu, pKernelFifo, flags) kfifoStatePostLoad_DISPATCH(pGpu, pKernelFifo, flags) +#define kfifoStatePreUnload(pGpu, pKernelFifo, flags) kfifoStatePreUnload_DISPATCH(pGpu, pKernelFifo, flags) +#define kfifoStatePreUnload_HAL(pGpu, pKernelFifo, flags) kfifoStatePreUnload_DISPATCH(pGpu, pKernelFifo, flags) +#define kfifoChannelGroupGetLocalMaxSubcontext(pGpu, pKernelFifo, arg0, arg1) kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(pGpu, pKernelFifo, arg0, arg1) +#define kfifoChannelGroupGetLocalMaxSubcontext_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(pGpu, pKernelFifo, arg0, arg1) +#define kfifoEngineInfoXlate(pGpu, pKernelFifo, inType, inVal, outType, pOutVal) kfifoEngineInfoXlate_DISPATCH(pGpu, pKernelFifo, inType, inVal, outType, pOutVal) +#define kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, inType, inVal, outType, pOutVal) kfifoEngineInfoXlate_DISPATCH(pGpu, pKernelFifo, inType, inVal, outType, pOutVal) +#define kfifoGenerateWorkSubmitToken(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost) kfifoGenerateWorkSubmitToken_DISPATCH(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost) +#define kfifoGenerateWorkSubmitToken_HAL(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost) kfifoGenerateWorkSubmitToken_DISPATCH(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost) +#define kfifoUpdateUsermodeDoorbell(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateUsermodeDoorbell_DISPATCH(arg0, arg1, workSubmitToken, runlisId) +#define kfifoUpdateUsermodeDoorbell_HAL(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateUsermodeDoorbell_DISPATCH(arg0, arg1, workSubmitToken, runlisId) +#define kfifoRunlistGetBaseShift(pKernelFifo) kfifoRunlistGetBaseShift_DISPATCH(pKernelFifo) +#define kfifoRunlistGetBaseShift_HAL(pKernelFifo) kfifoRunlistGetBaseShift_DISPATCH(pKernelFifo) +#define kfifoGetMaxCeChannelGroups(pGpu, pKernelFifo) kfifoGetMaxCeChannelGroups_DISPATCH(pGpu, pKernelFifo) +#define kfifoGetMaxCeChannelGroups_HAL(pGpu, pKernelFifo) kfifoGetMaxCeChannelGroups_DISPATCH(pGpu, pKernelFifo) +#define kfifoSetupUserD(pKernelFifo, pUserD) kfifoSetupUserD_DISPATCH(pKernelFifo, pUserD) +#define kfifoSetupUserD_HAL(pKernelFifo, pUserD) kfifoSetupUserD_DISPATCH(pKernelFifo, pUserD) +#define kfifoReconcileTunableState(pGpu, pEngstate, pTunableState) kfifoReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kfifoStateLoad(pGpu, pEngstate, arg0) kfifoStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kfifoStateUnload(pGpu, pEngstate, arg0) kfifoStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kfifoStatePreLoad(pGpu, pEngstate, arg0) kfifoStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kfifoStatePostUnload(pGpu, pEngstate, arg0) kfifoStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kfifoStateInitUnlocked(pGpu, pEngstate) kfifoStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kfifoInitMissing(pGpu, pEngstate) kfifoInitMissing_DISPATCH(pGpu, pEngstate) +#define kfifoStatePreInitLocked(pGpu, pEngstate) kfifoStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kfifoStatePreInitUnlocked(pGpu, pEngstate) kfifoStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kfifoGetTunableState(pGpu, pEngstate, pTunableState) kfifoGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kfifoCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kfifoCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kfifoFreeTunableState(pGpu, pEngstate, pTunableState) kfifoFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kfifoAllocTunableState(pGpu, pEngstate, ppTunableState) kfifoAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kfifoSetTunableState(pGpu, pEngstate, pTunableState) kfifoSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kfifoIsPresent(pGpu, pEngstate) kfifoIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kfifoConstructHal_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoConstructHal(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoConstructHal(pGpu, pKernelFifo) kfifoConstructHal_GM107(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoConstructHal_HAL(pGpu, pKernelFifo) kfifoConstructHal(pGpu, pKernelFifo) + +static inline NV_STATUS kfifoChannelGroupSetTimesliceSched_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) { + return NV_OK; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChannelGroupSetTimesliceSched(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChannelGroupSetTimesliceSched(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimesliceSched_56cd7a(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoChannelGroupSetTimesliceSched_HAL(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimesliceSched(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) + +NvU32 kfifoRunlistQueryNumChannels_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoRunlistQueryNumChannels(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoRunlistQueryNumChannels(pGpu, pKernelFifo, runlistId) kfifoRunlistQueryNumChannels_KERNEL(pGpu, pKernelFifo, runlistId) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoRunlistQueryNumChannels_HAL(pGpu, pKernelFifo, runlistId) kfifoRunlistQueryNumChannels(pGpu, pKernelFifo, runlistId) + +NV_STATUS kfifoIdleChannelsPerDevice_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvHandle *phClients, NvHandle *phDevices, NvHandle *phChannels, NvU32 numChannels, NvU32 flags, NvU32 timeout); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoIdleChannelsPerDevice(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvHandle *phClients, NvHandle *phDevices, NvHandle *phChannels, NvU32 numChannels, NvU32 flags, NvU32 timeout) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoIdleChannelsPerDevice(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout) kfifoIdleChannelsPerDevice_KERNEL(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoIdleChannelsPerDevice_HAL(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout) kfifoIdleChannelsPerDevice(pGpu, pKernelFifo, phClients, phDevices, phChannels, numChannels, flags, timeout) + +NvU64 kfifoChannelGroupGetDefaultTimeslice_GV100(struct KernelFifo *pKernelFifo); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU64 kfifoChannelGroupGetDefaultTimeslice(struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChannelGroupGetDefaultTimeslice(pKernelFifo) kfifoChannelGroupGetDefaultTimeslice_GV100(pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoChannelGroupGetDefaultTimeslice_HAL(pKernelFifo) kfifoChannelGroupGetDefaultTimeslice(pKernelFifo) + +static inline NvU64 kfifoRunlistGetMinTimeSlice_4a4dee(struct KernelFifo *pKernelFifo) { + return 0; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU64 kfifoRunlistGetMinTimeSlice(struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoRunlistGetMinTimeSlice(pKernelFifo) kfifoRunlistGetMinTimeSlice_4a4dee(pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoRunlistGetMinTimeSlice_HAL(pKernelFifo) kfifoRunlistGetMinTimeSlice(pKernelFifo) + +NV_STATUS kfifoGetInstMemInfo_GM107(struct KernelFifo *pKernelFifo, NvU64 *pSize, NvU64 *pAlignment, NvBool *pbInstProtectedMem, NvU32 *pInstAttr, const NV_ADDRESS_SPACE **ppInstAllocList); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetInstMemInfo(struct KernelFifo *pKernelFifo, NvU64 *pSize, NvU64 *pAlignment, NvBool *pbInstProtectedMem, NvU32 *pInstAttr, const NV_ADDRESS_SPACE **ppInstAllocList) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetInstMemInfo(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList) kfifoGetInstMemInfo_GM107(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetInstMemInfo_HAL(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList) kfifoGetInstMemInfo(pKernelFifo, pSize, pAlignment, pbInstProtectedMem, pInstAttr, ppInstAllocList) + +void kfifoGetInstBlkSizeAlign_GM107(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pShift); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoGetInstBlkSizeAlign(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pShift) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetInstBlkSizeAlign(pKernelFifo, pSize, pShift) kfifoGetInstBlkSizeAlign_GM107(pKernelFifo, pSize, pShift) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetInstBlkSizeAlign_HAL(pKernelFifo, pSize, pShift) kfifoGetInstBlkSizeAlign(pKernelFifo, pSize, pShift) + +NvU32 kfifoGetDefaultRunlist_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineType); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetDefaultRunlist(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineType) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetDefaultRunlist(pGpu, pKernelFifo, engineType) kfifoGetDefaultRunlist_GM107(pGpu, pKernelFifo, engineType) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetDefaultRunlist_HAL(pGpu, pKernelFifo, engineType) kfifoGetDefaultRunlist(pGpu, pKernelFifo, engineType) + +NvBool kfifoValidateSCGTypeAndRunqueue_GP102(struct KernelFifo *pKernelFifo, NvU32 scgType, NvU32 runqueue); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvBool kfifoValidateSCGTypeAndRunqueue(struct KernelFifo *pKernelFifo, NvU32 scgType, NvU32 runqueue) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoValidateSCGTypeAndRunqueue(pKernelFifo, scgType, runqueue) kfifoValidateSCGTypeAndRunqueue_GP102(pKernelFifo, scgType, runqueue) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoValidateSCGTypeAndRunqueue_HAL(pKernelFifo, scgType, runqueue) kfifoValidateSCGTypeAndRunqueue(pKernelFifo, scgType, runqueue) + +NvBool kfifoValidateEngineAndRunqueue_GP102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 runqueue); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvBool kfifoValidateEngineAndRunqueue(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 runqueue) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoValidateEngineAndRunqueue(pGpu, pKernelFifo, engDesc, runqueue) kfifoValidateEngineAndRunqueue_GP102(pGpu, pKernelFifo, engDesc, runqueue) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoValidateEngineAndRunqueue_HAL(pGpu, pKernelFifo, engDesc, runqueue) kfifoValidateEngineAndRunqueue(pGpu, pKernelFifo, engDesc, runqueue) + +NvBool kfifoValidateEngineAndSubctxType_GP102(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 subctxType); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvBool kfifoValidateEngineAndSubctxType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvU32 subctxType) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoValidateEngineAndSubctxType(pGpu, pKernelFifo, engDesc, subctxType) kfifoValidateEngineAndSubctxType_GP102(pGpu, pKernelFifo, engDesc, subctxType) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoValidateEngineAndSubctxType_HAL(pGpu, pKernelFifo, engDesc, subctxType) kfifoValidateEngineAndSubctxType(pGpu, pKernelFifo, engDesc, subctxType) + +NV_STATUS kfifoRmctrlGetWorkSubmitToken_GV100(struct KernelFifo *pKernelFifo, NvHandle hClient, NvHandle hChannel, NvU32 *pWorkSubmitToken); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoRmctrlGetWorkSubmitToken(struct KernelFifo *pKernelFifo, NvHandle hClient, NvHandle hChannel, NvU32 *pWorkSubmitToken) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoRmctrlGetWorkSubmitToken(pKernelFifo, hClient, hChannel, pWorkSubmitToken) kfifoRmctrlGetWorkSubmitToken_GV100(pKernelFifo, hClient, hChannel, pWorkSubmitToken) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoRmctrlGetWorkSubmitToken_HAL(pKernelFifo, hClient, hChannel, pWorkSubmitToken) kfifoRmctrlGetWorkSubmitToken(pKernelFifo, hClient, hChannel, pWorkSubmitToken) + +NV_STATUS kfifoChannelGetFifoContextMemDesc_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, FIFO_CTX engState, MEMORY_DESCRIPTOR **ppMemdesc); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChannelGetFifoContextMemDesc(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *pKernelChannel, FIFO_CTX engState, MEMORY_DESCRIPTOR **ppMemdesc) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChannelGetFifoContextMemDesc(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc) kfifoChannelGetFifoContextMemDesc_GM107(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoChannelGetFifoContextMemDesc_HAL(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc) kfifoChannelGetFifoContextMemDesc(pGpu, pKernelFifo, pKernelChannel, engState, ppMemdesc) + +static inline NV_STATUS kfifoCheckChannelAllocAddrSpaces_56cd7a(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace) { + return NV_OK; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoCheckChannelAllocAddrSpaces(struct KernelFifo *pKernelFifo, NV_ADDRESS_SPACE userdAddrSpace, NV_ADDRESS_SPACE pushBuffAddrSpace, NV_ADDRESS_SPACE gpFifoAddrSpace) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoCheckChannelAllocAddrSpaces(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace) kfifoCheckChannelAllocAddrSpaces_56cd7a(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoCheckChannelAllocAddrSpaces_HAL(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace) kfifoCheckChannelAllocAddrSpaces(pKernelFifo, userdAddrSpace, pushBuffAddrSpace, gpFifoAddrSpace) + +NV_STATUS kfifoConvertInstToKernelChannel_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, INST_BLOCK_DESC *arg0, struct KernelChannel **arg1); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoConvertInstToKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, INST_BLOCK_DESC *arg0, struct KernelChannel **arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoConvertInstToKernelChannel(pGpu, pKernelFifo, arg0, arg1) kfifoConvertInstToKernelChannel_GM107(pGpu, pKernelFifo, arg0, arg1) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoConvertInstToKernelChannel_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoConvertInstToKernelChannel(pGpu, pKernelFifo, arg0, arg1) + +static inline NvU64 kfifoGetMmioUsermodeOffset_474d46(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU64 kfifoGetMmioUsermodeOffset(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetMmioUsermodeOffset(pGpu, pKernelFifo, arg0) kfifoGetMmioUsermodeOffset_474d46(pGpu, pKernelFifo, arg0) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetMmioUsermodeOffset_HAL(pGpu, pKernelFifo, arg0) kfifoGetMmioUsermodeOffset(pGpu, pKernelFifo, arg0) + +static inline NvU64 kfifoGetMmioUsermodeSize_474d46(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU64 kfifoGetMmioUsermodeSize(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetMmioUsermodeSize(pGpu, pKernelFifo, arg0) kfifoGetMmioUsermodeSize_474d46(pGpu, pKernelFifo, arg0) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetMmioUsermodeSize_HAL(pGpu, pKernelFifo, arg0) kfifoGetMmioUsermodeSize(pGpu, pKernelFifo, arg0) + +NV_STATUS kfifoGetUsermodeMapInfo_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *arg0, NvU32 *arg1); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetUsermodeMapInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetUsermodeMapInfo(pGpu, pKernelFifo, arg0, arg1) kfifoGetUsermodeMapInfo_GV100(pGpu, pKernelFifo, arg0, arg1) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoGetUsermodeMapInfo(pGpu, pKernelFifo, arg0, arg1) + +NvU32 kfifoGetMaxSubcontext_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetMaxSubcontext(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetMaxSubcontext(pGpu, pKernelFifo, arg0) kfifoGetMaxSubcontext_GV100(pGpu, pKernelFifo, arg0) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, arg0) kfifoGetMaxSubcontext(pGpu, pKernelFifo, arg0) + +NvU32 kfifoGetMaxSubcontextFromGr_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernel); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetMaxSubcontextFromGr(struct OBJGPU *pGpu, struct KernelFifo *pKernel) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetMaxSubcontextFromGr(pGpu, pKernel) kfifoGetMaxSubcontextFromGr_KERNEL(pGpu, pKernel) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetMaxSubcontextFromGr_HAL(pGpu, pKernel) kfifoGetMaxSubcontextFromGr(pGpu, pKernel) + +static inline NvU32 kfifoGetNumRunqueues_adde13(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + return 2; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetNumRunqueues(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetNumRunqueues(pGpu, pKernelFifo) kfifoGetNumRunqueues_adde13(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo) kfifoGetNumRunqueues(pGpu, pKernelFifo) + +NvU32 kfifoGetMaxChannelGroupSize_GV100(struct KernelFifo *pKernelFifo); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetMaxChannelGroupSize(struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetMaxChannelGroupSize(pKernelFifo) kfifoGetMaxChannelGroupSize_GV100(pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetMaxChannelGroupSize_HAL(pKernelFifo) kfifoGetMaxChannelGroupSize(pKernelFifo) + +static inline void kfifoGetCtxBufferMapFlags_b3696a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags) { + return; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoGetCtxBufferMapFlags(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engine, NvU32 *pFlags) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetCtxBufferMapFlags(pGpu, pKernelFifo, engine, pFlags) kfifoGetCtxBufferMapFlags_b3696a(pGpu, pKernelFifo, engine, pFlags) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetCtxBufferMapFlags_HAL(pGpu, pKernelFifo, engine, pFlags) kfifoGetCtxBufferMapFlags(pGpu, pKernelFifo, engine, pFlags) + +static inline NV_STATUS kfifoAddObject_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) { + return NV_OK; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoAddObject(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoAddObject(pGpu, pKernelFifo, pObject) kfifoAddObject_56cd7a(pGpu, pKernelFifo, pObject) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoAddObject_HAL(pGpu, pKernelFifo, pObject) kfifoAddObject(pGpu, pKernelFifo, pObject) + +static inline NV_STATUS kfifoDeleteObject_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) { + return NV_OK; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoDeleteObject(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct ChannelDescendant *pObject) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoDeleteObject(pGpu, pKernelFifo, pObject) kfifoDeleteObject_56cd7a(pGpu, pKernelFifo, pObject) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoDeleteObject_HAL(pGpu, pKernelFifo, pObject) kfifoDeleteObject(pGpu, pKernelFifo, pObject) + +NV_STATUS kfifoConstructEngineList_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoConstructEngineList(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoConstructEngineList(pGpu, pKernelFifo) kfifoConstructEngineList_KERNEL(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoConstructEngineList_HAL(pGpu, pKernelFifo) kfifoConstructEngineList(pGpu, pKernelFifo) + +NV_STATUS kfifoGetHostDeviceInfoTable_KERNEL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO *pEngineInfo); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetHostDeviceInfoTable(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO *pEngineInfo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetHostDeviceInfoTable(pGpu, pKernelFifo, pEngineInfo) kfifoGetHostDeviceInfoTable_KERNEL(pGpu, pKernelFifo, pEngineInfo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetHostDeviceInfoTable_HAL(pGpu, pKernelFifo, pEngineInfo) kfifoGetHostDeviceInfoTable(pGpu, pKernelFifo, pEngineInfo) + +void kfifoGetSubctxType_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 *arg1); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoGetSubctxType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetSubctxType(pGpu, pKernelFifo, arg0, arg1) kfifoGetSubctxType_GV100(pGpu, pKernelFifo, arg0, arg1) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetSubctxType_HAL(pGpu, pKernelFifo, arg0, arg1) kfifoGetSubctxType(pGpu, pKernelFifo, arg0, arg1) + +static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken_c04480(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGenerateInternalWorkSubmitToken(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGenerateInternalWorkSubmitToken(pGpu, arg0, arg1) kfifoGenerateInternalWorkSubmitToken_c04480(pGpu, arg0, arg1) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGenerateInternalWorkSubmitToken_HAL(pGpu, arg0, arg1) kfifoGenerateInternalWorkSubmitToken(pGpu, arg0, arg1) + +static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode_c04480(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoUpdateInternalDoorbellForUsermode(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoUpdateInternalDoorbellForUsermode(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateInternalDoorbellForUsermode_c04480(arg0, arg1, workSubmitToken, runlisId) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoUpdateInternalDoorbellForUsermode_HAL(arg0, arg1, workSubmitToken, runlisId) kfifoUpdateInternalDoorbellForUsermode(arg0, arg1, workSubmitToken, runlisId) + +static inline NvBool kfifoIsLiteModeEnabled_491d52(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvBool kfifoIsLiteModeEnabled(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoIsLiteModeEnabled(pGpu, pKernelFifo) kfifoIsLiteModeEnabled_491d52(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoIsLiteModeEnabled_HAL(pGpu, pKernelFifo) kfifoIsLiteModeEnabled(pGpu, pKernelFifo) + +NvU32 kfifoGetNumEngines_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetNumEngines(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetNumEngines(pGpu, pKernelFifo) kfifoGetNumEngines_GM107(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetNumEngines_HAL(pGpu, pKernelFifo) kfifoGetNumEngines(pGpu, pKernelFifo) + +const char *kfifoGetEngineName_GM107(struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline const char *kfifoGetEngineName(struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NULL; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetEngineName(pKernelFifo, inType, inVal) kfifoGetEngineName_GM107(pKernelFifo, inType, inVal) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetEngineName_HAL(pKernelFifo, inType, inVal) kfifoGetEngineName(pKernelFifo, inType, inVal) + +NvU32 kfifoGetMaxNumRunlists_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetMaxNumRunlists(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetMaxNumRunlists(pGpu, pKernelFifo) kfifoGetMaxNumRunlists_GM107(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo) kfifoGetMaxNumRunlists(pGpu, pKernelFifo) + +NV_STATUS kfifoGetEnginePbdmaIds_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE type, NvU32 val, NvU32 **ppPbdmaIds, NvU32 *pNumPbdmas); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetEnginePbdmaIds(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE type, NvU32 val, NvU32 **ppPbdmaIds, NvU32 *pNumPbdmas) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetEnginePbdmaIds(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas) kfifoGetEnginePbdmaIds_GM107(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetEnginePbdmaIds_HAL(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas) kfifoGetEnginePbdmaIds(pGpu, pKernelFifo, type, val, ppPbdmaIds, pNumPbdmas) + +NV_STATUS kfifoGetEnginePartnerList_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pParams); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetEnginePartnerList(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetEnginePartnerList(pGpu, pKernelFifo, pParams) kfifoGetEnginePartnerList_GM107(pGpu, pKernelFifo, pParams) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetEnginePartnerList_HAL(pGpu, pKernelFifo, pParams) kfifoGetEnginePartnerList(pGpu, pKernelFifo, pParams) + +static inline NvBool kfifoRunlistIsTsgHeaderSupported_cbe027(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) { + return ((NvBool)(0 == 0)); +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvBool kfifoRunlistIsTsgHeaderSupported(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoRunlistIsTsgHeaderSupported(pGpu, pKernelFifo, arg0) kfifoRunlistIsTsgHeaderSupported_cbe027(pGpu, pKernelFifo, arg0) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoRunlistIsTsgHeaderSupported_HAL(pGpu, pKernelFifo, arg0) kfifoRunlistIsTsgHeaderSupported(pGpu, pKernelFifo, arg0) + +NvU32 kfifoRunlistGetEntrySize_GV100(struct KernelFifo *arg0); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoRunlistGetEntrySize(struct KernelFifo *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoRunlistGetEntrySize(arg0) kfifoRunlistGetEntrySize_GV100(arg0) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoRunlistGetEntrySize_HAL(arg0) kfifoRunlistGetEntrySize(arg0) + +static inline void kfifoSetupBar1UserdSnoop_b3696a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bEnable, NvU64 offset) { + return; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoSetupBar1UserdSnoop(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bEnable, NvU64 offset) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoSetupBar1UserdSnoop(pGpu, pKernelFifo, bEnable, offset) kfifoSetupBar1UserdSnoop_b3696a(pGpu, pKernelFifo, bEnable, offset) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoSetupBar1UserdSnoop_HAL(pGpu, pKernelFifo, bEnable, offset) kfifoSetupBar1UserdSnoop(pGpu, pKernelFifo, bEnable, offset) + +NV_STATUS kfifoPreAllocUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoPreAllocUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoPreAllocUserD(pGpu, pKernelFifo) kfifoPreAllocUserD_GM107(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoPreAllocUserD_HAL(pGpu, pKernelFifo) kfifoPreAllocUserD(pGpu, pKernelFifo) + +void kfifoFreePreAllocUserD_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoFreePreAllocUserD(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoFreePreAllocUserD(pGpu, pKernelFifo) kfifoFreePreAllocUserD_GM107(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoFreePreAllocUserD_HAL(pGpu, pKernelFifo) kfifoFreePreAllocUserD(pGpu, pKernelFifo) + +static inline NvU64 kfifoGetUserdBar1MapStartOffset_4a4dee(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + return 0; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU64 kfifoGetUserdBar1MapStartOffset(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetUserdBar1MapStartOffset(pGpu, pKernelFifo) kfifoGetUserdBar1MapStartOffset_4a4dee(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetUserdBar1MapStartOffset_HAL(pGpu, pKernelFifo) kfifoGetUserdBar1MapStartOffset(pGpu, pKernelFifo) + +NV_STATUS kfifoGetUserdBar1MapInfo_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *bar1Offset, NvU32 *bar1MapSize); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetUserdBar1MapInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU64 *bar1Offset, NvU32 *bar1MapSize) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetUserdBar1MapInfo(pGpu, pKernelFifo, bar1Offset, bar1MapSize) kfifoGetUserdBar1MapInfo_GM107(pGpu, pKernelFifo, bar1Offset, bar1MapSize) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetUserdBar1MapInfo_HAL(pGpu, pKernelFifo, bar1Offset, bar1MapSize) kfifoGetUserdBar1MapInfo(pGpu, pKernelFifo, bar1Offset, bar1MapSize) + +void kfifoGetUserdSizeAlign_GM107(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pAddrShift); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoGetUserdSizeAlign(struct KernelFifo *pKernelFifo, NvU32 *pSize, NvU32 *pAddrShift) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetUserdSizeAlign(pKernelFifo, pSize, pAddrShift) kfifoGetUserdSizeAlign_GM107(pKernelFifo, pSize, pAddrShift) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetUserdSizeAlign_HAL(pKernelFifo, pSize, pAddrShift) kfifoGetUserdSizeAlign(pKernelFifo, pSize, pAddrShift) + +NV_STATUS kfifoGetUserdLocation_GM107(struct KernelFifo *pKernelFifo, NvU32 *pUserdAperture, NvU32 *pUserdAttribute); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetUserdLocation(struct KernelFifo *pKernelFifo, NvU32 *pUserdAperture, NvU32 *pUserdAttribute) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetUserdLocation(pKernelFifo, pUserdAperture, pUserdAttribute) kfifoGetUserdLocation_GM107(pKernelFifo, pUserdAperture, pUserdAttribute) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetUserdLocation_HAL(pKernelFifo, pUserdAperture, pUserdAttribute) kfifoGetUserdLocation(pKernelFifo, pUserdAperture, pUserdAttribute) + +NvU32 kfifoCalcTotalSizeOfFaultMethodBuffers_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bCalcForFbRsvd); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoCalcTotalSizeOfFaultMethodBuffers(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bCalcForFbRsvd) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoCalcTotalSizeOfFaultMethodBuffers(pGpu, pKernelFifo, bCalcForFbRsvd) kfifoCalcTotalSizeOfFaultMethodBuffers_GV100(pGpu, pKernelFifo, bCalcForFbRsvd) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoCalcTotalSizeOfFaultMethodBuffers_HAL(pGpu, pKernelFifo, bCalcForFbRsvd) kfifoCalcTotalSizeOfFaultMethodBuffers(pGpu, pKernelFifo, bCalcForFbRsvd) + +NV_STATUS kfifoCheckEngine_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvBool *pPresent); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoCheckEngine(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engDesc, NvBool *pPresent) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoCheckEngine(pGpu, pKernelFifo, engDesc, pPresent) kfifoCheckEngine_GM107(pGpu, pKernelFifo, engDesc, pPresent) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoCheckEngine_HAL(pGpu, pKernelFifo, engDesc, pPresent) kfifoCheckEngine(pGpu, pKernelFifo, engDesc, pPresent) + +NV_STATUS kfifoGetVChIdForSChId_FWCLIENT(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 chId, NvU32 gfid, NvU32 engineId, NvU32 *pVChid); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetVChIdForSChId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 chId, NvU32 gfid, NvU32 engineId, NvU32 *pVChid) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetVChIdForSChId(pGpu, pKernelFifo, chId, gfid, engineId, pVChid) kfifoGetVChIdForSChId_FWCLIENT(pGpu, pKernelFifo, chId, gfid, engineId, pVChid) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoGetVChIdForSChId_HAL(pGpu, pKernelFifo, chId, gfid, engineId, pVChid) kfifoGetVChIdForSChId(pGpu, pKernelFifo, chId, gfid, engineId, pVChid) + +static inline NV_STATUS kfifoProgramChIdTable_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, struct HOST_VGPU_DEVICE *pHostVgpuDevice) { + return NV_OK; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoProgramChIdTable(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, struct HOST_VGPU_DEVICE *pHostVgpuDevice) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoProgramChIdTable(pGpu, pKernelFifo, pChidMgr, offset, numChannels, pHostVgpuDevice) kfifoProgramChIdTable_56cd7a(pGpu, pKernelFifo, pChidMgr, offset, numChannels, pHostVgpuDevice) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoProgramChIdTable_HAL(pGpu, pKernelFifo, pChidMgr, offset, numChannels, pHostVgpuDevice) kfifoProgramChIdTable(pGpu, pKernelFifo, pChidMgr, offset, numChannels, pHostVgpuDevice) + +static inline NV_STATUS kfifoRestoreSchedPolicy_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + return NV_OK; +} + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoRestoreSchedPolicy(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoRestoreSchedPolicy(pGpu, pKernelFifo) kfifoRestoreSchedPolicy_56cd7a(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoRestoreSchedPolicy_HAL(pGpu, pKernelFifo) kfifoRestoreSchedPolicy(pGpu, pKernelFifo) + +NV_STATUS kfifoRunlistSetId_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 runlistId); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoRunlistSetId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 runlistId) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoRunlistSetId(pGpu, pKernelFifo, arg0, runlistId) kfifoRunlistSetId_GM107(pGpu, pKernelFifo, arg0, runlistId) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoRunlistSetId_HAL(pGpu, pKernelFifo, arg0, runlistId) kfifoRunlistSetId(pGpu, pKernelFifo, arg0, runlistId) + +NV_STATUS kfifoRunlistSetIdByEngine_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 engDesc); + +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoRunlistSetIdByEngine(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, NvU32 engDesc) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoRunlistSetIdByEngine(pGpu, pKernelFifo, arg0, engDesc) kfifoRunlistSetIdByEngine_GM107(pGpu, pKernelFifo, arg0, engDesc) +#endif //__nvoc_kernel_fifo_h_disabled + +#define kfifoRunlistSetIdByEngine_HAL(pGpu, pKernelFifo, arg0, engDesc) kfifoRunlistSetIdByEngine(pGpu, pKernelFifo, arg0, engDesc) + +NV_STATUS kfifoConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGDESCRIPTOR engDesc); + +static inline NV_STATUS kfifoConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGDESCRIPTOR engDesc) { + return pKernelFifo->__kfifoConstructEngine__(pGpu, pKernelFifo, engDesc); +} + +NV_STATUS kfifoStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); + +static inline NV_STATUS kfifoStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + return pKernelFifo->__kfifoStateInitLocked__(pGpu, pKernelFifo); +} + +void kfifoStateDestroy_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); + +static inline void kfifoStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + pKernelFifo->__kfifoStateDestroy__(pGpu, pKernelFifo); +} + +NV_STATUS kfifoStatePostLoad_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags); + +static inline NV_STATUS kfifoStatePostLoad_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) { + return NV_OK; +} + +static inline NV_STATUS kfifoStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) { + return pKernelFifo->__kfifoStatePostLoad__(pGpu, pKernelFifo, flags); +} + +NV_STATUS kfifoStatePreUnload_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags); + +static inline NV_STATUS kfifoStatePreUnload_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) { + return NV_OK; +} + +static inline NV_STATUS kfifoStatePreUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 flags) { + return pKernelFifo->__kfifoStatePreUnload__(pGpu, pKernelFifo, flags); +} + +NvU32 kfifoChannelGroupGetLocalMaxSubcontext_GM107(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1); + +NvU32 kfifoChannelGroupGetLocalMaxSubcontext_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1); + +static inline NvU32 kfifoChannelGroupGetLocalMaxSubcontext_474d46(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 kfifoChannelGroupGetLocalMaxSubcontext_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *arg0, NvBool arg1) { + return pKernelFifo->__kfifoChannelGroupGetLocalMaxSubcontext__(pGpu, pKernelFifo, arg0, arg1); +} + +NV_STATUS kfifoEngineInfoXlate_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal); + +NV_STATUS kfifoEngineInfoXlate_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal); + +static inline NV_STATUS kfifoEngineInfoXlate_46f6a7(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kfifoEngineInfoXlate_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal) { + return pKernelFifo->__kfifoEngineInfoXlate__(pGpu, pKernelFifo, inType, inVal, outType, pOutVal); +} + +NV_STATUS kfifoGenerateWorkSubmitToken_TU102(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost); + +NV_STATUS kfifoGenerateWorkSubmitToken_GA100(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost); + +static inline NV_STATUS kfifoGenerateWorkSubmitToken_56cd7a(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost) { + return NV_OK; +} + +static inline NV_STATUS kfifoGenerateWorkSubmitToken_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *arg0, struct KernelChannel *arg1, NvU32 *pGeneratedToken, NvBool bUsedForHost) { + return arg0->__kfifoGenerateWorkSubmitToken__(pGpu, arg0, arg1, pGeneratedToken, bUsedForHost); +} + +NV_STATUS kfifoUpdateUsermodeDoorbell_TU102(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId); + +NV_STATUS kfifoUpdateUsermodeDoorbell_GA100(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId); + +static inline NV_STATUS kfifoUpdateUsermodeDoorbell_46f6a7(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kfifoUpdateUsermodeDoorbell_DISPATCH(struct OBJGPU *arg0, struct KernelFifo *arg1, NvU32 workSubmitToken, NvU32 runlisId) { + return arg1->__kfifoUpdateUsermodeDoorbell__(arg0, arg1, workSubmitToken, runlisId); +} + +NvU32 kfifoRunlistGetBaseShift_GM107(struct KernelFifo *pKernelFifo); + +NvU32 kfifoRunlistGetBaseShift_GA100(struct KernelFifo *pKernelFifo); + +NvU32 kfifoRunlistGetBaseShift_GA102(struct KernelFifo *pKernelFifo); + +static inline NvU32 kfifoRunlistGetBaseShift_474d46(struct KernelFifo *pKernelFifo) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 kfifoRunlistGetBaseShift_DISPATCH(struct KernelFifo *pKernelFifo) { + return pKernelFifo->__kfifoRunlistGetBaseShift__(pKernelFifo); +} + +NvU32 kfifoGetMaxCeChannelGroups_GV100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); + +NvU32 kfifoGetMaxCeChannelGroups_GA100(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); + +static inline NvU32 kfifoGetMaxCeChannelGroups_474d46(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 kfifoGetMaxCeChannelGroups_DISPATCH(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + return pKernelFifo->__kfifoGetMaxCeChannelGroups__(pGpu, pKernelFifo); +} + +void kfifoSetupUserD_GM107(struct KernelFifo *pKernelFifo, NvU8 *pUserD); + +void kfifoSetupUserD_GA100(struct KernelFifo *pKernelFifo, NvU8 *pUserD); + +static inline void kfifoSetupUserD_f2d351(struct KernelFifo *pKernelFifo, NvU8 *pUserD) { + NV_ASSERT_PRECOMP(0); +} + +static inline void kfifoSetupUserD_DISPATCH(struct KernelFifo *pKernelFifo, NvU8 *pUserD) { + pKernelFifo->__kfifoSetupUserD__(pKernelFifo, pUserD); +} + +static inline NV_STATUS kfifoReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, void *pTunableState) { + return pEngstate->__kfifoReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kfifoStateLoad_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) { + return pEngstate->__kfifoStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kfifoStateUnload_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) { + return pEngstate->__kfifoStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kfifoStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) { + return pEngstate->__kfifoStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kfifoStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, NvU32 arg0) { + return pEngstate->__kfifoStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kfifoStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) { + return pEngstate->__kfifoStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kfifoInitMissing_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) { + pEngstate->__kfifoInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kfifoStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) { + return pEngstate->__kfifoStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kfifoStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) { + return pEngstate->__kfifoStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kfifoGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, void *pTunableState) { + return pEngstate->__kfifoGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kfifoCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kfifoCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kfifoFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, void *pTunableState) { + pEngstate->__kfifoFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kfifoAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, void **ppTunableState) { + return pEngstate->__kfifoAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kfifoSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate, void *pTunableState) { + return pEngstate->__kfifoSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kfifoIsPresent_DISPATCH(POBJGPU pGpu, struct KernelFifo *pEngstate) { + return pEngstate->__kfifoIsPresent__(pGpu, pEngstate); +} + +static inline NV_STATUS kfifoChidMgrReserveSystemChids(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 numChannels, struct HOST_VGPU_DEVICE *pHostVgpuDevice) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kfifoChidMgrFreeSystemChids(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, struct HOST_VGPU_DEVICE *pHostVgpuDevice) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kfifoSetChidOffset(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 offset, NvU32 numChannels, struct HOST_VGPU_DEVICE *pHostVgpuDevice) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline const ENGINE_INFO *kfifoGetEngineInfo(struct KernelFifo *pKernelFifo) { + if (pKernelFifo->engineInfo.engineInfoList == ((void *)0)) + return ((void *)0); + return &pKernelFifo->engineInfo; +} + +static inline const PREALLOCATED_USERD_INFO *kfifoGetPreallocatedUserdInfo(struct KernelFifo *pKernelFifo) { + return &pKernelFifo->userdInfo; +} + +static inline NvBool kfifoIsPerRunlistChramEnabled(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bUsePerRunlistChram; +} + +static inline NvBool kfifoIsPerRunlistChramSupportedInHw(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bIsPerRunlistChramSupportedInHw; +} + +static inline NvBool kfifoIsChidHeapEnabled(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bUseChidHeap; +} + +static inline NvBool kfifoIsHostEngineExpansionSupported(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bHostEngineExpansion; +} + +static inline NvBool kfifoIsSubcontextSupported(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bSubcontextSupported; +} + +static inline NvBool kfifoHostHasLbOverflow(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bHostHasLbOverflow; +} + +static inline NvBool kfifoIsUserdInSystemMemory(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bUserdInSystemMemory; +} + +static inline NvBool kfifoIsUserdMapDmaSupported(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bUserdMapDmaSupported; +} + +static inline NvBool kfifoIsMixedInstmemApertureDefAllowed(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bMixedInstmemApertureDefAllowed; +} + +static inline NvBool kfifoIsZombieSubctxWarEnabled(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bIsZombieSubctxWarEnabled; +} + +static inline NvBool kfifoIsWddmInterleavingPolicyEnabled(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bWddmInterleavingPolicyEnabled; +} + +static inline NvBool kfifoIsSchedSupported(struct KernelFifo *pKernelFifo) { + return pKernelFifo->bIsSchedSupported; +} + +static inline struct KernelSchedMgr *kfifoGetKernelSchedMgr(struct KernelFifo *pKernelFifo) { + return (struct KernelSchedMgr *)pKernelFifo->pKernelSchedMgr; +} + +static inline MEMORY_DESCRIPTOR *kfifoGetDummyPageMemDesc(struct KernelFifo *pKernelFifo) { + return pKernelFifo->pDummyPageMemDesc; +} + +void kfifoDestruct_IMPL(struct KernelFifo *pKernelFifo); +#define __nvoc_kfifoDestruct(pKernelFifo) kfifoDestruct_IMPL(pKernelFifo) +NV_STATUS kfifoChidMgrConstruct_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChidMgrConstruct(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrConstruct(pGpu, pKernelFifo) kfifoChidMgrConstruct_IMPL(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +void kfifoChidMgrDestruct_IMPL(struct KernelFifo *pKernelFifo); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoChidMgrDestruct(struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrDestruct(pKernelFifo) kfifoChidMgrDestruct_IMPL(pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChidMgrAllocChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvHandle hClient, CHANNEL_HW_ID_ALLOC_MODE arg0, NvBool bForceInternalIdx, NvU32 internalIdx, NvBool bForceUserdPage, NvU32 userdPageIdx, NvU32 ChID, struct KernelChannel *arg1); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChidMgrAllocChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvHandle hClient, CHANNEL_HW_ID_ALLOC_MODE arg0, NvBool bForceInternalIdx, NvU32 internalIdx, NvBool bForceUserdPage, NvU32 userdPageIdx, NvU32 ChID, struct KernelChannel *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrAllocChid(pGpu, pKernelFifo, pChidMgr, hClient, arg0, bForceInternalIdx, internalIdx, bForceUserdPage, userdPageIdx, ChID, arg1) kfifoChidMgrAllocChid_IMPL(pGpu, pKernelFifo, pChidMgr, hClient, arg0, bForceInternalIdx, internalIdx, bForceUserdPage, userdPageIdx, ChID, arg1) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChidMgrRetainChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChidMgrRetainChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrRetainChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrRetainChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChidMgrReleaseChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChidMgrReleaseChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrReleaseChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrReleaseChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChidMgrFreeChid_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChidMgrFreeChid(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrFreeChid(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrFreeChid_IMPL(pGpu, pKernelFifo, pChidMgr, ChID) +#endif //__nvoc_kernel_fifo_h_disabled + +NvU32 kfifoChidMgrGetNumChannels_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoChidMgrGetNumChannels(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr) kfifoChidMgrGetNumChannels_IMPL(pGpu, pKernelFifo, pChidMgr) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChidMgrAllocChannelGroupHwID_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 *pGrpId); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChidMgrAllocChannelGroupHwID(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 *pGrpId) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrAllocChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, pGrpId) kfifoChidMgrAllocChannelGroupHwID_IMPL(pGpu, pKernelFifo, pChidMgr, pGrpId) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChidMgrFreeChannelGroupHwID_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpId); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChidMgrFreeChannelGroupHwID(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpId) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrFreeChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, grpId) kfifoChidMgrFreeChannelGroupHwID_IMPL(pGpu, pKernelFifo, pChidMgr, grpId) +#endif //__nvoc_kernel_fifo_h_disabled + +struct KernelChannelGroup *kfifoChidMgrGetKernelChannelGroup_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpID); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline struct KernelChannelGroup *kfifoChidMgrGetKernelChannelGroup(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 grpID) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NULL; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrGetKernelChannelGroup(pGpu, pKernelFifo, pChidMgr, grpID) kfifoChidMgrGetKernelChannelGroup_IMPL(pGpu, pKernelFifo, pChidMgr, grpID) +#endif //__nvoc_kernel_fifo_h_disabled + +struct KernelChannel *kfifoChidMgrGetKernelChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline struct KernelChannel *kfifoChidMgrGetKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHID_MGR *pChidMgr, NvU32 ChID) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NULL; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo, pChidMgr, ChID) kfifoChidMgrGetKernelChannel_IMPL(pGpu, pKernelFifo, pChidMgr, ChID) +#endif //__nvoc_kernel_fifo_h_disabled + +CHID_MGR *kfifoGetChidMgr_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline CHID_MGR *kfifoGetChidMgr(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NULL; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetChidMgr(pGpu, pKernelFifo, runlistId) kfifoGetChidMgr_IMPL(pGpu, pKernelFifo, runlistId) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoGetChidMgrFromType_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineType, NvU32 value, CHID_MGR **arg0); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetChidMgrFromType(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineType, NvU32 value, CHID_MGR **arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetChidMgrFromType(pGpu, pKernelFifo, engineType, value, arg0) kfifoGetChidMgrFromType_IMPL(pGpu, pKernelFifo, engineType, value, arg0) +#endif //__nvoc_kernel_fifo_h_disabled + +struct KernelChannelGroup *kfifoGetChannelGroup_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 grpID, NvU32 runlistID); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline struct KernelChannelGroup *kfifoGetChannelGroup(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 grpID, NvU32 runlistID) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NULL; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetChannelGroup(pGpu, pKernelFifo, grpID, runlistID) kfifoGetChannelGroup_IMPL(pGpu, pKernelFifo, grpID, runlistID) +#endif //__nvoc_kernel_fifo_h_disabled + +NvU32 kfifoGetChannelGroupsInUse_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetChannelGroupsInUse(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetChannelGroupsInUse(pGpu, pKernelFifo) kfifoGetChannelGroupsInUse_IMPL(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +NvU32 kfifoGetRunlistChannelGroupsInUse_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetRunlistChannelGroupsInUse(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetRunlistChannelGroupsInUse(pGpu, pKernelFifo, runlistId) kfifoGetRunlistChannelGroupsInUse_IMPL(pGpu, pKernelFifo, runlistId) +#endif //__nvoc_kernel_fifo_h_disabled + +void kfifoGetChannelIterator_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoGetChannelIterator(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetChannelIterator(pGpu, pKernelFifo, pIt) kfifoGetChannelIterator_IMPL(pGpu, pKernelFifo, pIt) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoGetNextKernelChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, struct KernelChannel **ppKernelChannel); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetNextKernelChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_ITERATOR *pIt, struct KernelChannel **ppKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetNextKernelChannel(pGpu, pKernelFifo, pIt, ppKernelChannel) kfifoGetNextKernelChannel_IMPL(pGpu, pKernelFifo, pIt, ppKernelChannel) +#endif //__nvoc_kernel_fifo_h_disabled + +void kfifoFillMemInfo_IMPL(struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc, NV2080_CTRL_FIFO_MEM_INFO *pMemory); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoFillMemInfo(struct KernelFifo *pKernelFifo, MEMORY_DESCRIPTOR *pMemDesc, NV2080_CTRL_FIFO_MEM_INFO *pMemory) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoFillMemInfo(pKernelFifo, pMemDesc, pMemory) kfifoFillMemInfo_IMPL(pKernelFifo, pMemDesc, pMemory) +#endif //__nvoc_kernel_fifo_h_disabled + +NvU32 kfifoGetAllocatedChannelMask_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetAllocatedChannelMask(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetAllocatedChannelMask(pGpu, pKernelFifo) kfifoGetAllocatedChannelMask_IMPL(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChannelListCreate_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST **arg0); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChannelListCreate(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST **arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChannelListCreate(pGpu, pKernelFifo, arg0) kfifoChannelListCreate_IMPL(pGpu, pKernelFifo, arg0) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChannelListDestroy_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST *arg0); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChannelListDestroy(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, CHANNEL_LIST *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChannelListDestroy(pGpu, pKernelFifo, arg0) kfifoChannelListDestroy_IMPL(pGpu, pKernelFifo, arg0) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChannelListAppend_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChannelListAppend(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChannelListAppend(pGpu, pKernelFifo, arg0, arg1) kfifoChannelListAppend_IMPL(pGpu, pKernelFifo, arg0, arg1) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChannelListRemove_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChannelListRemove(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannel *arg0, CHANNEL_LIST *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChannelListRemove(pGpu, pKernelFifo, arg0, arg1) kfifoChannelListRemove_IMPL(pGpu, pKernelFifo, arg0, arg1) +#endif //__nvoc_kernel_fifo_h_disabled + +NvBool kfifoEngineListHasChannel_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 *arg0, NvU32 arg1); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvBool kfifoEngineListHasChannel(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 *arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoEngineListHasChannel(pGpu, pKernelFifo, arg0, arg1) kfifoEngineListHasChannel_IMPL(pGpu, pKernelFifo, arg0, arg1) +#endif //__nvoc_kernel_fifo_h_disabled + +CTX_BUF_POOL_INFO *kfifoGetRunlistBufPool_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineType); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline CTX_BUF_POOL_INFO *kfifoGetRunlistBufPool(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 engineType) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NULL; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetRunlistBufPool(pGpu, pKernelFifo, engineType) kfifoGetRunlistBufPool_IMPL(pGpu, pKernelFifo, engineType) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoGetRunlistBufInfo_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvBool arg1, NvU32 arg2, NvU64 *arg3, NvU64 *arg4); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetRunlistBufInfo(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 arg0, NvBool arg1, NvU32 arg2, NvU64 *arg3, NvU64 *arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetRunlistBufInfo(pGpu, pKernelFifo, arg0, arg1, arg2, arg3, arg4) kfifoGetRunlistBufInfo_IMPL(pGpu, pKernelFifo, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoAddSchedulingHandler_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoAddSchedulingHandler(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoAddSchedulingHandler(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData) kfifoAddSchedulingHandler_IMPL(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData) +#endif //__nvoc_kernel_fifo_h_disabled + +void kfifoRemoveSchedulingHandler_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoRemoveSchedulingHandler(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, PFifoSchedulingHandler pPostSchedulingEnableHandler, void *pPostSchedulingEnableHandlerData, PFifoSchedulingHandler pPreSchedulingDisableHandler, void *pPreSchedulingDisableHandlerData) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoRemoveSchedulingHandler(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData) kfifoRemoveSchedulingHandler_IMPL(pGpu, pKernelFifo, pPostSchedulingEnableHandler, pPostSchedulingEnableHandlerData, pPreSchedulingDisableHandler, pPreSchedulingDisableHandlerData) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoTriggerPostSchedulingEnableCallback_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoTriggerPostSchedulingEnableCallback(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoTriggerPostSchedulingEnableCallback(pGpu, pKernelFifo) kfifoTriggerPostSchedulingEnableCallback_IMPL(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoTriggerPreSchedulingDisableCallback_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoTriggerPreSchedulingDisableCallback(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoTriggerPreSchedulingDisableCallback(pGpu, pKernelFifo) kfifoTriggerPreSchedulingDisableCallback_IMPL(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +NvU32 kfifoGetMaxChannelsInSystem_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetMaxChannelsInSystem(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetMaxChannelsInSystem(pGpu, pKernelFifo) kfifoGetMaxChannelsInSystem_IMPL(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +NvU32 kfifoGetMaxChannelGroupsInSystem_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetMaxChannelGroupsInSystem(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetMaxChannelGroupsInSystem(pGpu, pKernelFifo) kfifoGetMaxChannelGroupsInSystem_IMPL(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +void kfifoGetDeviceCaps_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU8 *pKfifoCaps, NvBool bCapsInitialized); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline void kfifoGetDeviceCaps(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU8 *pKfifoCaps, NvBool bCapsInitialized) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetDeviceCaps(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized) kfifoGetDeviceCaps_IMPL(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized) +#endif //__nvoc_kernel_fifo_h_disabled + +NvU32 kfifoReturnPushbufferCaps_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoReturnPushbufferCaps(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoReturnPushbufferCaps(pGpu, pKernelFifo) kfifoReturnPushbufferCaps_IMPL(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +void kfifoRunlistGetBufAllocParams_IMPL(struct OBJGPU *pGpu, NV_ADDRESS_SPACE *pAperture, NvU32 *pAttr, NvU64 *pAllocFlags); +#define kfifoRunlistGetBufAllocParams(pGpu, pAperture, pAttr, pAllocFlags) kfifoRunlistGetBufAllocParams_IMPL(pGpu, pAperture, pAttr, pAllocFlags) +NV_STATUS kfifoRunlistAllocBuffers_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bSupportTsg, NV_ADDRESS_SPACE aperture, NvU32 runlistId, NvU32 attr, NvU64 allocFlags, NvU64 maxRunlistEntries, NvBool bHWRL, PMEMORY_DESCRIPTOR *ppMemDesc); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoRunlistAllocBuffers(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvBool bSupportTsg, NV_ADDRESS_SPACE aperture, NvU32 runlistId, NvU32 attr, NvU64 allocFlags, NvU64 maxRunlistEntries, NvBool bHWRL, PMEMORY_DESCRIPTOR *ppMemDesc) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoRunlistAllocBuffers(pGpu, pKernelFifo, bSupportTsg, aperture, runlistId, attr, allocFlags, maxRunlistEntries, bHWRL, ppMemDesc) kfifoRunlistAllocBuffers_IMPL(pGpu, pKernelFifo, bSupportTsg, aperture, runlistId, attr, allocFlags, maxRunlistEntries, bHWRL, ppMemDesc) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoGetEngineListForRunlist_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, NvU32 *pOutEngineIds, NvU32 *pNumEngines); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoGetEngineListForRunlist(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, NvU32 runlistId, NvU32 *pOutEngineIds, NvU32 *pNumEngines) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetEngineListForRunlist(pGpu, pKernelFifo, runlistId, pOutEngineIds, pNumEngines) kfifoGetEngineListForRunlist_IMPL(pGpu, pKernelFifo, runlistId, pOutEngineIds, pNumEngines) +#endif //__nvoc_kernel_fifo_h_disabled + +NvU32 kfifoGetChannelClassId_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NvU32 kfifoGetChannelClassId(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return 0; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoGetChannelClassId(pGpu, pKernelFifo) kfifoGetChannelClassId_IMPL(pGpu, pKernelFifo) +#endif //__nvoc_kernel_fifo_h_disabled + +NV_STATUS kfifoChannelGroupSetTimeslice_IMPL(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit); +#ifdef __nvoc_kernel_fifo_h_disabled +static inline NV_STATUS kfifoChannelGroupSetTimeslice(struct OBJGPU *pGpu, struct KernelFifo *pKernelFifo, struct KernelChannelGroup *pKernelChannelGroup, NvU64 timesliceUs, NvBool bSkipSubmit) { + NV_ASSERT_FAILED_PRECOMP("KernelFifo was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_fifo_h_disabled +#define kfifoChannelGroupSetTimeslice(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) kfifoChannelGroupSetTimeslice_IMPL(pGpu, pKernelFifo, pKernelChannelGroup, timesliceUs, bSkipSubmit) +#endif //__nvoc_kernel_fifo_h_disabled + +#undef PRIVATE_FIELD + + +NV_STATUS RmIdleChannels(NvHandle hClient, + NvHandle hDevice, + NvHandle hChannel, + NvU32 numChannels, + NvP64 clients, + NvP64 devices, + NvP64 channels, + NvU32 flags, + NvU32 timeout, + NvBool bUserModeArgs); + +#endif // _KERNELFIFO_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_FIFO_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_graphics_context_nvoc.c b/src/nvidia/generated/g_kernel_graphics_context_nvoc.c new file mode 100644 index 000000000..f4e7a62c3 --- /dev/null +++ b/src/nvidia/generated/g_kernel_graphics_context_nvoc.c @@ -0,0 +1,595 @@ +#define NVOC_KERNEL_GRAPHICS_CONTEXT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_graphics_context_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x7ead09 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsContext; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_KernelGraphicsContext(KernelGraphicsContext*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelGraphicsContext(KernelGraphicsContext*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelGraphicsContext(KernelGraphicsContext*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_KernelGraphicsContext(KernelGraphicsContext*, RmHalspecOwner* ); +void __nvoc_dtor_KernelGraphicsContext(KernelGraphicsContext*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGraphicsContext; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsContext_KernelGraphicsContext = { + /*pClassDef=*/ &__nvoc_class_def_KernelGraphicsContext, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelGraphicsContext, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsContext_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsContext, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsContext_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsContext, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsContext_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsContext, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsContext_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsContext, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsContext_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsContext, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelGraphicsContext = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_KernelGraphicsContext_KernelGraphicsContext, + &__nvoc_rtti_KernelGraphicsContext_GpuResource, + &__nvoc_rtti_KernelGraphicsContext_RmResource, + &__nvoc_rtti_KernelGraphicsContext_RmResourceCommon, + &__nvoc_rtti_KernelGraphicsContext_RsResource, + &__nvoc_rtti_KernelGraphicsContext_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsContext = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelGraphicsContext), + /*classId=*/ classId(KernelGraphicsContext), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelGraphicsContext", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelGraphicsContext, + /*pCastInfo=*/ &__nvoc_castinfo_KernelGraphicsContext, + /*pExportInfo=*/ &__nvoc_export_info_KernelGraphicsContext +}; + +static NvBool __nvoc_thunk_KernelGraphicsContext_resCanCopy(struct RsResource *arg0) { + return kgrctxCanCopy((struct KernelGraphicsContext *)(((unsigned char *)arg0) - __nvoc_rtti_KernelGraphicsContext_RsResource.offset)); +} + +static NvHandle __nvoc_thunk_KernelGraphicsContext_gpuresGetInternalObjectHandle(struct GpuResource *arg0) { + return kgrctxGetInternalObjectHandle((struct KernelGraphicsContext *)(((unsigned char *)arg0) - __nvoc_rtti_KernelGraphicsContext_GpuResource.offset)); +} + +static NvBool __nvoc_thunk_GpuResource_kgrctxShareCallback(struct KernelGraphicsContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsContext_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrctxControl(struct KernelGraphicsContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsContext_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrctxUnmap(struct KernelGraphicsContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsContext_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_kgrctxGetMemInterMapParams(struct KernelGraphicsContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelGraphicsContext_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_kgrctxGetMemoryMappingDescriptor(struct KernelGraphicsContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelGraphicsContext_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrctxGetMapAddrSpace(struct KernelGraphicsContext *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsContext_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NV_STATUS __nvoc_thunk_RsResource_kgrctxControlFilter(struct KernelGraphicsContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsContext_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_kgrctxAddAdditionalDependants(struct RsClient *pClient, struct KernelGraphicsContext *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsContext_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_kgrctxGetRefCount(struct KernelGraphicsContext *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsContext_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_kgrctxCheckMemInterUnmap(struct KernelGraphicsContext *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelGraphicsContext_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_kgrctxMapTo(struct KernelGraphicsContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsContext_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_kgrctxControl_Prologue(struct KernelGraphicsContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrctxGetRegBaseOffsetAndSize(struct KernelGraphicsContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsContext_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrctxInternalControlForward(struct KernelGraphicsContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsContext_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_kgrctxPreDestruct(struct KernelGraphicsContext *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsContext_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_kgrctxUnmapFrom(struct KernelGraphicsContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsContext_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_kgrctxControl_Epilogue(struct KernelGraphicsContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_kgrctxControlLookup(struct KernelGraphicsContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsContext_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrctxMap(struct KernelGraphicsContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsContext_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_kgrctxAccessCallback(struct KernelGraphicsContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsContext_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_KernelGraphicsContext[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kgrctxCtrlSetTpcPartitionMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900101u, + /*paramSize=*/ sizeof(NV0090_CTRL_TPC_PARTITION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelGraphicsContext.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kgrctxCtrlSetTpcPartitionMode" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kgrctxCtrlGetTpcPartitionMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900103u, + /*paramSize=*/ sizeof(NV0090_CTRL_TPC_PARTITION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelGraphicsContext.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kgrctxCtrlGetTpcPartitionMode" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kgrctxCtrlGetMMUDebugMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900105u, + /*paramSize=*/ sizeof(NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelGraphicsContext.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kgrctxCtrlGetMMUDebugMode" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) kgrctxCtrlProgramVidmemPromote_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x900107u, + /*paramSize=*/ sizeof(NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelGraphicsContext.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "kgrctxCtrlProgramVidmemPromote" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGraphicsContext = +{ + /*numEntries=*/ 4, + /*pExportEntries=*/ __nvoc_exported_method_def_KernelGraphicsContext +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_KernelGraphicsContext(KernelGraphicsContext *pThis) { + __nvoc_kgrctxDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelGraphicsContext(KernelGraphicsContext *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_KernelGraphicsContext(KernelGraphicsContext *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelGraphicsContext_fail_GpuResource; + __nvoc_init_dataField_KernelGraphicsContext(pThis, pRmhalspecowner); + + status = __nvoc_kgrctxConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelGraphicsContext_fail__init; + goto __nvoc_ctor_KernelGraphicsContext_exit; // Success + +__nvoc_ctor_KernelGraphicsContext_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_KernelGraphicsContext_fail_GpuResource: +__nvoc_ctor_KernelGraphicsContext_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelGraphicsContext_1(KernelGraphicsContext *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__kgrctxCanCopy__ = &kgrctxCanCopy_0c883b; + + pThis->__kgrctxGetInternalObjectHandle__ = &kgrctxGetInternalObjectHandle_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kgrctxCtrlGetTpcPartitionMode__ = &kgrctxCtrlGetTpcPartitionMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kgrctxCtrlSetTpcPartitionMode__ = &kgrctxCtrlSetTpcPartitionMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kgrctxCtrlGetMMUDebugMode__ = &kgrctxCtrlGetMMUDebugMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__kgrctxCtrlProgramVidmemPromote__ = &kgrctxCtrlProgramVidmemPromote_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_KernelGraphicsContext_resCanCopy; + + pThis->__nvoc_base_GpuResource.__gpuresGetInternalObjectHandle__ = &__nvoc_thunk_KernelGraphicsContext_gpuresGetInternalObjectHandle; + + pThis->__kgrctxShareCallback__ = &__nvoc_thunk_GpuResource_kgrctxShareCallback; + + pThis->__kgrctxControl__ = &__nvoc_thunk_GpuResource_kgrctxControl; + + pThis->__kgrctxUnmap__ = &__nvoc_thunk_GpuResource_kgrctxUnmap; + + pThis->__kgrctxGetMemInterMapParams__ = &__nvoc_thunk_RmResource_kgrctxGetMemInterMapParams; + + pThis->__kgrctxGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_kgrctxGetMemoryMappingDescriptor; + + pThis->__kgrctxGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_kgrctxGetMapAddrSpace; + + pThis->__kgrctxControlFilter__ = &__nvoc_thunk_RsResource_kgrctxControlFilter; + + pThis->__kgrctxAddAdditionalDependants__ = &__nvoc_thunk_RsResource_kgrctxAddAdditionalDependants; + + pThis->__kgrctxGetRefCount__ = &__nvoc_thunk_RsResource_kgrctxGetRefCount; + + pThis->__kgrctxCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_kgrctxCheckMemInterUnmap; + + pThis->__kgrctxMapTo__ = &__nvoc_thunk_RsResource_kgrctxMapTo; + + pThis->__kgrctxControl_Prologue__ = &__nvoc_thunk_RmResource_kgrctxControl_Prologue; + + pThis->__kgrctxGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_kgrctxGetRegBaseOffsetAndSize; + + pThis->__kgrctxInternalControlForward__ = &__nvoc_thunk_GpuResource_kgrctxInternalControlForward; + + pThis->__kgrctxPreDestruct__ = &__nvoc_thunk_RsResource_kgrctxPreDestruct; + + pThis->__kgrctxUnmapFrom__ = &__nvoc_thunk_RsResource_kgrctxUnmapFrom; + + pThis->__kgrctxControl_Epilogue__ = &__nvoc_thunk_RmResource_kgrctxControl_Epilogue; + + pThis->__kgrctxControlLookup__ = &__nvoc_thunk_RsResource_kgrctxControlLookup; + + pThis->__kgrctxMap__ = &__nvoc_thunk_GpuResource_kgrctxMap; + + pThis->__kgrctxAccessCallback__ = &__nvoc_thunk_RmResource_kgrctxAccessCallback; +} + +void __nvoc_init_funcTable_KernelGraphicsContext(KernelGraphicsContext *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelGraphicsContext_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_KernelGraphicsContext(KernelGraphicsContext *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelGraphicsContext = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_KernelGraphicsContext(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelGraphicsContext(KernelGraphicsContext **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + KernelGraphicsContext *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelGraphicsContext)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelGraphicsContext)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelGraphicsContext); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelGraphicsContext(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelGraphicsContext(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_KernelGraphicsContext_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelGraphicsContext_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelGraphicsContext(KernelGraphicsContext **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_KernelGraphicsContext(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xe7abeb = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsContextShared; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +void __nvoc_init_KernelGraphicsContextShared(KernelGraphicsContextShared*); +void __nvoc_init_funcTable_KernelGraphicsContextShared(KernelGraphicsContextShared*); +NV_STATUS __nvoc_ctor_KernelGraphicsContextShared(KernelGraphicsContextShared*); +void __nvoc_init_dataField_KernelGraphicsContextShared(KernelGraphicsContextShared*); +void __nvoc_dtor_KernelGraphicsContextShared(KernelGraphicsContextShared*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGraphicsContextShared; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsContextShared_KernelGraphicsContextShared = { + /*pClassDef=*/ &__nvoc_class_def_KernelGraphicsContextShared, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelGraphicsContextShared, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsContextShared_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsContextShared, __nvoc_base_RsShared.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsContextShared_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsContextShared, __nvoc_base_RsShared), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelGraphicsContextShared = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelGraphicsContextShared_KernelGraphicsContextShared, + &__nvoc_rtti_KernelGraphicsContextShared_RsShared, + &__nvoc_rtti_KernelGraphicsContextShared_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsContextShared = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelGraphicsContextShared), + /*classId=*/ classId(KernelGraphicsContextShared), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelGraphicsContextShared", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelGraphicsContextShared, + /*pCastInfo=*/ &__nvoc_castinfo_KernelGraphicsContextShared, + /*pExportInfo=*/ &__nvoc_export_info_KernelGraphicsContextShared +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGraphicsContextShared = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_KernelGraphicsContextShared(KernelGraphicsContextShared *pThis) { + __nvoc_shrkgrctxDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelGraphicsContextShared(KernelGraphicsContextShared *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_KernelGraphicsContextShared(KernelGraphicsContextShared *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_KernelGraphicsContextShared_fail_RsShared; + __nvoc_init_dataField_KernelGraphicsContextShared(pThis); + + status = __nvoc_shrkgrctxConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_KernelGraphicsContextShared_fail__init; + goto __nvoc_ctor_KernelGraphicsContextShared_exit; // Success + +__nvoc_ctor_KernelGraphicsContextShared_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_KernelGraphicsContextShared_fail_RsShared: +__nvoc_ctor_KernelGraphicsContextShared_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelGraphicsContextShared_1(KernelGraphicsContextShared *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_KernelGraphicsContextShared(KernelGraphicsContextShared *pThis) { + __nvoc_init_funcTable_KernelGraphicsContextShared_1(pThis); +} + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_KernelGraphicsContextShared(KernelGraphicsContextShared *pThis) { + pThis->__nvoc_pbase_KernelGraphicsContextShared = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; + __nvoc_init_RsShared(&pThis->__nvoc_base_RsShared); + __nvoc_init_funcTable_KernelGraphicsContextShared(pThis); +} + +NV_STATUS __nvoc_objCreate_KernelGraphicsContextShared(KernelGraphicsContextShared **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelGraphicsContextShared *pThis; + + pThis = portMemAllocNonPaged(sizeof(KernelGraphicsContextShared)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelGraphicsContextShared)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelGraphicsContextShared); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_KernelGraphicsContextShared(pThis); + status = __nvoc_ctor_KernelGraphicsContextShared(pThis); + if (status != NV_OK) goto __nvoc_objCreate_KernelGraphicsContextShared_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelGraphicsContextShared_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelGraphicsContextShared(KernelGraphicsContextShared **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelGraphicsContextShared(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_graphics_context_nvoc.h b/src/nvidia/generated/g_kernel_graphics_context_nvoc.h new file mode 100644 index 000000000..317885468 --- /dev/null +++ b/src/nvidia/generated/g_kernel_graphics_context_nvoc.h @@ -0,0 +1,1024 @@ +#ifndef _G_KERNEL_GRAPHICS_CONTEXT_NVOC_H_ +#define _G_KERNEL_GRAPHICS_CONTEXT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_graphics_context_nvoc.h" + +#ifndef _KERNEL_GRAPHICS_CONTEXT_H_ +#define _KERNEL_GRAPHICS_CONTEXT_H_ + +#include "gpu/gpu_resource.h" +#include "resserv/rs_server.h" +#include "resserv/rs_resource.h" +#include "ctrl/ctrl83de.h" +#include "ctrl/ctrl0090.h" +#include "mmu/gmmu_fmt.h" +#include "gpu/gpu_halspec.h" +#include "utils/nv_enum.h" +#include "mem_mgr/vaddr_list.h" + +struct KernelChannel; + +#ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ +#define __NVOC_CLASS_KernelChannel_TYPEDEF__ +typedef struct KernelChannel KernelChannel; +#endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannel +#define __nvoc_class_id_KernelChannel 0x5d8d70 +#endif /* __nvoc_class_id_KernelChannel */ + + +struct KernelChannelGroupApi; + +#ifndef __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ +#define __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ +typedef struct KernelChannelGroupApi KernelChannelGroupApi; +#endif /* __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannelGroupApi +#define __nvoc_class_id_KernelChannelGroupApi 0x2b5b80 +#endif /* __nvoc_class_id_KernelChannelGroupApi */ + + + +typedef struct +{ + const NV_ADDRESS_SPACE *pAllocList; + NvU32 cpuAttr; +} GR_BUFFER_ATTR; + +typedef struct +{ + MEMORY_DESCRIPTOR *memDesc; + VA_LIST vAddrList; +} GR_CTX_PATCHBUFFER; + +typedef struct +{ + MEMORY_DESCRIPTOR *memDesc; + VA_LIST vAddrList; +} GR_CTX_ZCULLBUFFER; + +typedef struct +{ + MEMORY_DESCRIPTOR *memDesc; + VA_LIST vAddrList; +} GR_CTX_PMBUFFER; + +typedef enum +{ + GR_OBJECT_TYPE_3D, + GR_OBJECT_TYPE_2D, + GR_OBJECT_TYPE_MEM, + GR_OBJECT_TYPE_COMPUTE, + GR_OBJECT_TYPE_INVALID, +} GR_OBJECT_TYPE; + +typedef struct +{ + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 virtualAddr; +} GR_CTX_PREEMPTBUFFER; + +/* + * Global buffer types. These are shared between contexts + * each PF/VF context normally. A GraphicsContext may have + * a private allocation for security (VPR) or when + * graphics preemption is enabled. + * + * Not all buffer types are supported on every GPU. + */ +#define GR_GLOBALCTX_BUFFER_DEF(x) \ + NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_BUNDLE_CB, 0x00000000) \ + NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_PAGEPOOL, 0x00000001) \ + NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB, 0x00000002) \ + NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_RTV_CB, 0x00000003) \ + NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_GFXP_POOL, 0x00000004) \ + NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_GFXP_CTRL_BLK, 0x00000005) \ + NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_FECS_EVENT, 0x00000006) \ + NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP, 0x00000007) \ + NV_ENUM_ENTRY(x, GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP, 0x00000008) \ + NV_ENUM_ENTRY(x, GR_GLOBAL_BUFFER_GLOBAL_PRIV_ACCESS_MAP, 0x00000009) + +NV_ENUM_DEF(GR_GLOBALCTX_BUFFER, GR_GLOBALCTX_BUFFER_DEF) +#define GR_GLOBALCTX_BUFFER_COUNT NV_ENUM_SIZE(GR_GLOBALCTX_BUFFER) + + +#define GR_CTX_BUFFER_DEF(x) \ + NV_ENUM_ENTRY(x, GR_CTX_BUFFER_MAIN, 0x00000000) \ + NV_ENUM_ENTRY(x, GR_CTX_BUFFER_ZCULL, 0x00000001) \ + NV_ENUM_ENTRY(x, GR_CTX_BUFFER_PM, 0x00000002) \ + NV_ENUM_ENTRY(x, GR_CTX_BUFFER_PREEMPT, 0x00000003) \ + NV_ENUM_ENTRY(x, GR_CTX_BUFFER_SPILL, 0x00000004) \ + NV_ENUM_ENTRY(x, GR_CTX_BUFFER_BETA_CB, 0x00000005) \ + NV_ENUM_ENTRY(x, GR_CTX_BUFFER_PAGEPOOL, 0x00000006) \ + NV_ENUM_ENTRY(x, GR_CTX_BUFFER_RTV_CB, 0x00000007) \ + NV_ENUM_ENTRY(x, GR_CTX_BUFFER_PATCH, 0x00000008) + +NV_ENUM_DEF(GR_CTX_BUFFER, GR_CTX_BUFFER_DEF) + +typedef struct +{ + MEMORY_DESCRIPTOR *memDesc[GR_GLOBALCTX_BUFFER_COUNT]; + NvBool bAllocated; + NvBool bFecsBufferAllocated; // FIXME merge this with bAllocated + + // Tracks whether Physical has initialized the memory descriptor for the promoted Kernel buffer + NvBool bInitialized[GR_GLOBALCTX_BUFFER_COUNT]; + + // Check if vGPU Guest is running with FECS Trace feature supported driver + NvBool bFecsTraceUnsupportedInGuest; +} GR_GLOBALCTX_BUFFERS; + +struct KernelGraphicsContextUnicast; +typedef struct KernelGraphicsContextUnicast KernelGraphicsContextUnicast; + +struct KernelGraphics; + +#ifndef __NVOC_CLASS_KernelGraphics_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphics_TYPEDEF__ +typedef struct KernelGraphics KernelGraphics; +#endif /* __NVOC_CLASS_KernelGraphics_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphics +#define __nvoc_class_id_KernelGraphics 0xea3fa9 +#endif /* __nvoc_class_id_KernelGraphics */ + + +struct KernelGraphicsContextShared; + +#ifndef __NVOC_CLASS_KernelGraphicsContextShared_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsContextShared_TYPEDEF__ +typedef struct KernelGraphicsContextShared KernelGraphicsContextShared; +#endif /* __NVOC_CLASS_KernelGraphicsContextShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsContextShared +#define __nvoc_class_id_KernelGraphicsContextShared 0xe7abeb +#endif /* __nvoc_class_id_KernelGraphicsContextShared */ + + +struct KernelGraphicsObject; + +#ifndef __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ +typedef struct KernelGraphicsObject KernelGraphicsObject; +#endif /* __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsObject +#define __nvoc_class_id_KernelGraphicsObject 0x097648 +#endif /* __nvoc_class_id_KernelGraphicsObject */ + + + +/*! + * KernelGraphicsContext is a logical abstraction of GrContext object (Per Channel or + * per ChannelGroup) in Kernel side. The Public API of the GrContext is exposed through + * this object, and any interfaces which do not manage the underlying HW can be managed + * by this object. + */ +#ifdef NVOC_KERNEL_GRAPHICS_CONTEXT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelGraphicsContext { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct KernelGraphicsContext *__nvoc_pbase_KernelGraphicsContext; + NvBool (*__kgrctxCanCopy__)(struct KernelGraphicsContext *); + NvHandle (*__kgrctxGetInternalObjectHandle__)(struct KernelGraphicsContext *); + NV_STATUS (*__kgrctxCtrlGetTpcPartitionMode__)(struct KernelGraphicsContext *, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *); + NV_STATUS (*__kgrctxCtrlSetTpcPartitionMode__)(struct KernelGraphicsContext *, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *); + NV_STATUS (*__kgrctxCtrlGetMMUDebugMode__)(struct KernelGraphicsContext *, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *); + NV_STATUS (*__kgrctxCtrlProgramVidmemPromote__)(struct KernelGraphicsContext *, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *); + NvBool (*__kgrctxShareCallback__)(struct KernelGraphicsContext *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__kgrctxControl__)(struct KernelGraphicsContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kgrctxUnmap__)(struct KernelGraphicsContext *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__kgrctxGetMemInterMapParams__)(struct KernelGraphicsContext *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__kgrctxGetMemoryMappingDescriptor__)(struct KernelGraphicsContext *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__kgrctxGetMapAddrSpace__)(struct KernelGraphicsContext *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__kgrctxControlFilter__)(struct KernelGraphicsContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__kgrctxAddAdditionalDependants__)(struct RsClient *, struct KernelGraphicsContext *, RsResourceRef *); + NvU32 (*__kgrctxGetRefCount__)(struct KernelGraphicsContext *); + NV_STATUS (*__kgrctxCheckMemInterUnmap__)(struct KernelGraphicsContext *, NvBool); + NV_STATUS (*__kgrctxMapTo__)(struct KernelGraphicsContext *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__kgrctxControl_Prologue__)(struct KernelGraphicsContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kgrctxGetRegBaseOffsetAndSize__)(struct KernelGraphicsContext *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__kgrctxInternalControlForward__)(struct KernelGraphicsContext *, NvU32, void *, NvU32); + void (*__kgrctxPreDestruct__)(struct KernelGraphicsContext *); + NV_STATUS (*__kgrctxUnmapFrom__)(struct KernelGraphicsContext *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__kgrctxControl_Epilogue__)(struct KernelGraphicsContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kgrctxControlLookup__)(struct KernelGraphicsContext *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__kgrctxMap__)(struct KernelGraphicsContext *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__kgrctxAccessCallback__)(struct KernelGraphicsContext *, struct RsClient *, void *, RsAccessRight); + struct KernelGraphicsContextShared *pShared; +}; + +#ifndef __NVOC_CLASS_KernelGraphicsContext_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsContext_TYPEDEF__ +typedef struct KernelGraphicsContext KernelGraphicsContext; +#endif /* __NVOC_CLASS_KernelGraphicsContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsContext +#define __nvoc_class_id_KernelGraphicsContext 0x7ead09 +#endif /* __nvoc_class_id_KernelGraphicsContext */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsContext; + +#define __staticCast_KernelGraphicsContext(pThis) \ + ((pThis)->__nvoc_pbase_KernelGraphicsContext) + +#ifdef __nvoc_kernel_graphics_context_h_disabled +#define __dynamicCast_KernelGraphicsContext(pThis) ((KernelGraphicsContext*)NULL) +#else //__nvoc_kernel_graphics_context_h_disabled +#define __dynamicCast_KernelGraphicsContext(pThis) \ + ((KernelGraphicsContext*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGraphicsContext))) +#endif //__nvoc_kernel_graphics_context_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelGraphicsContext(KernelGraphicsContext**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelGraphicsContext(KernelGraphicsContext**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_KernelGraphicsContext(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_KernelGraphicsContext((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define kgrctxCanCopy(arg0) kgrctxCanCopy_DISPATCH(arg0) +#define kgrctxGetInternalObjectHandle(arg0) kgrctxGetInternalObjectHandle_DISPATCH(arg0) +#define kgrctxCtrlGetTpcPartitionMode(pKernelGraphicsContext, pParams) kgrctxCtrlGetTpcPartitionMode_DISPATCH(pKernelGraphicsContext, pParams) +#define kgrctxCtrlSetTpcPartitionMode(pKernelGraphicsContext, pParams) kgrctxCtrlSetTpcPartitionMode_DISPATCH(pKernelGraphicsContext, pParams) +#define kgrctxCtrlGetMMUDebugMode(pKernelGraphicsContext, pParams) kgrctxCtrlGetMMUDebugMode_DISPATCH(pKernelGraphicsContext, pParams) +#define kgrctxCtrlProgramVidmemPromote(pKernelGraphicsContext, pParams) kgrctxCtrlProgramVidmemPromote_DISPATCH(pKernelGraphicsContext, pParams) +#define kgrctxShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) kgrctxShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define kgrctxControl(pGpuResource, pCallContext, pParams) kgrctxControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define kgrctxUnmap(pGpuResource, pCallContext, pCpuMapping) kgrctxUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define kgrctxGetMemInterMapParams(pRmResource, pParams) kgrctxGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define kgrctxGetMemoryMappingDescriptor(pRmResource, ppMemDesc) kgrctxGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define kgrctxGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) kgrctxGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define kgrctxControlFilter(pResource, pCallContext, pParams) kgrctxControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define kgrctxAddAdditionalDependants(pClient, pResource, pReference) kgrctxAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define kgrctxGetRefCount(pResource) kgrctxGetRefCount_DISPATCH(pResource) +#define kgrctxCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) kgrctxCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define kgrctxMapTo(pResource, pParams) kgrctxMapTo_DISPATCH(pResource, pParams) +#define kgrctxControl_Prologue(pResource, pCallContext, pParams) kgrctxControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define kgrctxGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) kgrctxGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define kgrctxInternalControlForward(pGpuResource, command, pParams, size) kgrctxInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define kgrctxPreDestruct(pResource) kgrctxPreDestruct_DISPATCH(pResource) +#define kgrctxUnmapFrom(pResource, pParams) kgrctxUnmapFrom_DISPATCH(pResource, pParams) +#define kgrctxControl_Epilogue(pResource, pCallContext, pParams) kgrctxControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define kgrctxControlLookup(pResource, pParams, ppEntry) kgrctxControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define kgrctxMap(pGpuResource, pCallContext, pParams, pCpuMapping) kgrctxMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define kgrctxAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) kgrctxAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool kgrctxShouldManageCtxBuffers_KERNEL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, NvU32 gfid); + +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NvBool kgrctxShouldManageCtxBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxShouldManageCtxBuffers(arg0, arg1, gfid) kgrctxShouldManageCtxBuffers_KERNEL(arg0, arg1, gfid) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#define kgrctxShouldManageCtxBuffers_HAL(arg0, arg1, gfid) kgrctxShouldManageCtxBuffers(arg0, arg1, gfid) + +NV_STATUS kgrctxReleaseSubctxResources_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct OBJVASPACE *arg3, NvU32 veid); + +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxReleaseSubctxResources(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct OBJVASPACE *arg3, NvU32 veid) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxReleaseSubctxResources(arg0, arg1, arg2, arg3, veid) kgrctxReleaseSubctxResources_IMPL(arg0, arg1, arg2, arg3, veid) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#define kgrctxReleaseSubctxResources_HAL(arg0, arg1, arg2, arg3, veid) kgrctxReleaseSubctxResources(arg0, arg1, arg2, arg3, veid) + +NvBool kgrctxShouldCleanup_KERNEL(struct OBJGPU *pGpu, struct KernelGraphicsContext *pKernelGraphicsContext); + +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NvBool kgrctxShouldCleanup(struct OBJGPU *pGpu, struct KernelGraphicsContext *pKernelGraphicsContext) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxShouldCleanup(pGpu, pKernelGraphicsContext) kgrctxShouldCleanup_KERNEL(pGpu, pKernelGraphicsContext) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#define kgrctxShouldCleanup_HAL(pGpu, pKernelGraphicsContext) kgrctxShouldCleanup(pGpu, pKernelGraphicsContext) + +NvBool kgrctxShouldPreAllocPmBuffer_PF(struct OBJGPU *pGpu, struct KernelGraphicsContext *pKernelGraphicsContext, struct KernelChannel *pKernelChannel); + +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NvBool kgrctxShouldPreAllocPmBuffer(struct OBJGPU *pGpu, struct KernelGraphicsContext *pKernelGraphicsContext, struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxShouldPreAllocPmBuffer(pGpu, pKernelGraphicsContext, pKernelChannel) kgrctxShouldPreAllocPmBuffer_PF(pGpu, pKernelGraphicsContext, pKernelChannel) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#define kgrctxShouldPreAllocPmBuffer_HAL(pGpu, pKernelGraphicsContext, pKernelChannel) kgrctxShouldPreAllocPmBuffer(pGpu, pKernelGraphicsContext, pKernelChannel) + +void kgrctxUnmapBuffers_KERNEL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, KernelGraphicsContextUnicast *arg2, struct KernelChannel *arg3); + +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxUnmapBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, KernelGraphicsContextUnicast *arg2, struct KernelChannel *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxUnmapBuffers(arg0, arg1, arg2, arg3) kgrctxUnmapBuffers_KERNEL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#define kgrctxUnmapBuffers_HAL(arg0, arg1, arg2, arg3) kgrctxUnmapBuffers(arg0, arg1, arg2, arg3) + +NV_STATUS kgrctxUnmapCtxBuffers_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphicsObject *arg2, struct KernelGraphics *arg3, NvBool bDestructor); + +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxUnmapCtxBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphicsObject *arg2, struct KernelGraphics *arg3, NvBool bDestructor) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxUnmapCtxBuffers(arg0, arg1, arg2, arg3, bDestructor) kgrctxUnmapCtxBuffers_IMPL(arg0, arg1, arg2, arg3, bDestructor) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#define kgrctxUnmapCtxBuffers_HAL(arg0, arg1, arg2, arg3, bDestructor) kgrctxUnmapCtxBuffers(arg0, arg1, arg2, arg3, bDestructor) + +void kgrctxIncObjectCount_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, NvU32 classNum); + +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxIncObjectCount(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, NvU32 classNum) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxIncObjectCount(arg0, arg1, classNum) kgrctxIncObjectCount_IMPL(arg0, arg1, classNum) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#define kgrctxIncObjectCount_HAL(arg0, arg1, classNum) kgrctxIncObjectCount(arg0, arg1, classNum) + +void kgrctxDecObjectCount_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, NvU32 classNum); + +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxDecObjectCount(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, NvU32 classNum) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxDecObjectCount(arg0, arg1, classNum) kgrctxDecObjectCount_IMPL(arg0, arg1, classNum) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#define kgrctxDecObjectCount_HAL(arg0, arg1, classNum) kgrctxDecObjectCount(arg0, arg1, classNum) + +GR_GLOBALCTX_BUFFER kgrctxGetRegisterAccessMapId_PF(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelChannel *arg2); + +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline GR_GLOBALCTX_BUFFER kgrctxGetRegisterAccessMapId(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelChannel *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + GR_GLOBALCTX_BUFFER ret; + portMemSet(&ret, 0, sizeof(GR_GLOBALCTX_BUFFER)); + return ret; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxGetRegisterAccessMapId(arg0, arg1, arg2) kgrctxGetRegisterAccessMapId_PF(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#define kgrctxGetRegisterAccessMapId_HAL(arg0, arg1, arg2) kgrctxGetRegisterAccessMapId(arg0, arg1, arg2) + +static inline NvBool kgrctxCanCopy_0c883b(struct KernelGraphicsContext *arg0) { + return ((NvBool)(0 == 0)); +} + +static inline NvBool kgrctxCanCopy_DISPATCH(struct KernelGraphicsContext *arg0) { + return arg0->__kgrctxCanCopy__(arg0); +} + +NvHandle kgrctxGetInternalObjectHandle_IMPL(struct KernelGraphicsContext *arg0); + +static inline NvHandle kgrctxGetInternalObjectHandle_DISPATCH(struct KernelGraphicsContext *arg0) { + return arg0->__kgrctxGetInternalObjectHandle__(arg0); +} + +NV_STATUS kgrctxCtrlGetTpcPartitionMode_IMPL(struct KernelGraphicsContext *pKernelGraphicsContext, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams); + +static inline NV_STATUS kgrctxCtrlGetTpcPartitionMode_DISPATCH(struct KernelGraphicsContext *pKernelGraphicsContext, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams) { + return pKernelGraphicsContext->__kgrctxCtrlGetTpcPartitionMode__(pKernelGraphicsContext, pParams); +} + +NV_STATUS kgrctxCtrlSetTpcPartitionMode_IMPL(struct KernelGraphicsContext *pKernelGraphicsContext, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams); + +static inline NV_STATUS kgrctxCtrlSetTpcPartitionMode_DISPATCH(struct KernelGraphicsContext *pKernelGraphicsContext, NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams) { + return pKernelGraphicsContext->__kgrctxCtrlSetTpcPartitionMode__(pKernelGraphicsContext, pParams); +} + +NV_STATUS kgrctxCtrlGetMMUDebugMode_IMPL(struct KernelGraphicsContext *pKernelGraphicsContext, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *pParams); + +static inline NV_STATUS kgrctxCtrlGetMMUDebugMode_DISPATCH(struct KernelGraphicsContext *pKernelGraphicsContext, NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *pParams) { + return pKernelGraphicsContext->__kgrctxCtrlGetMMUDebugMode__(pKernelGraphicsContext, pParams); +} + +NV_STATUS kgrctxCtrlProgramVidmemPromote_IMPL(struct KernelGraphicsContext *pKernelGraphicsContext, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *pParams); + +static inline NV_STATUS kgrctxCtrlProgramVidmemPromote_DISPATCH(struct KernelGraphicsContext *pKernelGraphicsContext, NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *pParams) { + return pKernelGraphicsContext->__kgrctxCtrlProgramVidmemPromote__(pKernelGraphicsContext, pParams); +} + +static inline NvBool kgrctxShareCallback_DISPATCH(struct KernelGraphicsContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__kgrctxShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS kgrctxControl_DISPATCH(struct KernelGraphicsContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__kgrctxControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS kgrctxUnmap_DISPATCH(struct KernelGraphicsContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__kgrctxUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS kgrctxGetMemInterMapParams_DISPATCH(struct KernelGraphicsContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__kgrctxGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS kgrctxGetMemoryMappingDescriptor_DISPATCH(struct KernelGraphicsContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__kgrctxGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS kgrctxGetMapAddrSpace_DISPATCH(struct KernelGraphicsContext *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__kgrctxGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS kgrctxControlFilter_DISPATCH(struct KernelGraphicsContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kgrctxControlFilter__(pResource, pCallContext, pParams); +} + +static inline void kgrctxAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct KernelGraphicsContext *pResource, RsResourceRef *pReference) { + pResource->__kgrctxAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 kgrctxGetRefCount_DISPATCH(struct KernelGraphicsContext *pResource) { + return pResource->__kgrctxGetRefCount__(pResource); +} + +static inline NV_STATUS kgrctxCheckMemInterUnmap_DISPATCH(struct KernelGraphicsContext *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__kgrctxCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS kgrctxMapTo_DISPATCH(struct KernelGraphicsContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__kgrctxMapTo__(pResource, pParams); +} + +static inline NV_STATUS kgrctxControl_Prologue_DISPATCH(struct KernelGraphicsContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kgrctxControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kgrctxGetRegBaseOffsetAndSize_DISPATCH(struct KernelGraphicsContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__kgrctxGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS kgrctxInternalControlForward_DISPATCH(struct KernelGraphicsContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__kgrctxInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void kgrctxPreDestruct_DISPATCH(struct KernelGraphicsContext *pResource) { + pResource->__kgrctxPreDestruct__(pResource); +} + +static inline NV_STATUS kgrctxUnmapFrom_DISPATCH(struct KernelGraphicsContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__kgrctxUnmapFrom__(pResource, pParams); +} + +static inline void kgrctxControl_Epilogue_DISPATCH(struct KernelGraphicsContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__kgrctxControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kgrctxControlLookup_DISPATCH(struct KernelGraphicsContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__kgrctxControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS kgrctxMap_DISPATCH(struct KernelGraphicsContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__kgrctxMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool kgrctxAccessCallback_DISPATCH(struct KernelGraphicsContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__kgrctxAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS kgrctxFromKernelChannel_IMPL(struct KernelChannel *arg0, struct KernelGraphicsContext **arg1); +#define kgrctxFromKernelChannel(arg0, arg1) kgrctxFromKernelChannel_IMPL(arg0, arg1) +NV_STATUS kgrctxFromKernelChannelGroupApi_IMPL(struct KernelChannelGroupApi *arg0, struct KernelGraphicsContext **arg1); +#define kgrctxFromKernelChannelGroupApi(arg0, arg1) kgrctxFromKernelChannelGroupApi_IMPL(arg0, arg1) +NV_STATUS kgrctxGetGlobalContextBufferExternalId_IMPL(GR_GLOBALCTX_BUFFER arg0, NvU32 *pExternalId); +#define kgrctxGetGlobalContextBufferExternalId(arg0, pExternalId) kgrctxGetGlobalContextBufferExternalId_IMPL(arg0, pExternalId) +NV_STATUS kgrctxGetGlobalContextBufferInternalId_IMPL(NvU32 externalId, GR_GLOBALCTX_BUFFER *arg0); +#define kgrctxGetGlobalContextBufferInternalId(externalId, arg0) kgrctxGetGlobalContextBufferInternalId_IMPL(externalId, arg0) +NV_STATUS kgrctxCtxBufferToFifoEngineId_IMPL(GR_CTX_BUFFER arg0, NvU32 *pFifoEngineId); +#define kgrctxCtxBufferToFifoEngineId(arg0, pFifoEngineId) kgrctxCtxBufferToFifoEngineId_IMPL(arg0, pFifoEngineId) +NV_STATUS kgrctxGlobalCtxBufferToFifoEngineId_IMPL(GR_GLOBALCTX_BUFFER arg0, NvU32 *pFifoEngineId); +#define kgrctxGlobalCtxBufferToFifoEngineId(arg0, pFifoEngineId) kgrctxGlobalCtxBufferToFifoEngineId_IMPL(arg0, pFifoEngineId) +NV_STATUS kgrctxGetGidInfoInPlace_IMPL(struct OBJGPU *pGpu, NvU8 *pUuidBuffer, NvU32 uuidBufferSize, NvU32 flags); +#define kgrctxGetGidInfoInPlace(pGpu, pUuidBuffer, uuidBufferSize, flags) kgrctxGetGidInfoInPlace_IMPL(pGpu, pUuidBuffer, uuidBufferSize, flags) +GMMU_APERTURE kgrctxGetExternalAllocAperture_IMPL(NvU32 addressSpace); +#define kgrctxGetExternalAllocAperture(addressSpace) kgrctxGetExternalAllocAperture_IMPL(addressSpace) +NV_STATUS kgrctxFillCtxBufferInfo_IMPL(struct MEMORY_DESCRIPTOR *arg0, NvU32 externalId, NvBool bBufferGlobal, NV2080_CTRL_GR_CTX_BUFFER_INFO *arg1); +#define kgrctxFillCtxBufferInfo(arg0, externalId, bBufferGlobal, arg1) kgrctxFillCtxBufferInfo_IMPL(arg0, externalId, bBufferGlobal, arg1) +NV_STATUS kgrctxConstruct_IMPL(struct KernelGraphicsContext *arg_pKernelGraphicsContext, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_kgrctxConstruct(arg_pKernelGraphicsContext, arg_pCallContext, arg_pParams) kgrctxConstruct_IMPL(arg_pKernelGraphicsContext, arg_pCallContext, arg_pParams) +NV_STATUS kgrctxCopyConstruct_IMPL(struct KernelGraphicsContext *arg0, struct CALL_CONTEXT *arg1, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg2); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxCopyConstruct(struct KernelGraphicsContext *arg0, struct CALL_CONTEXT *arg1, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxCopyConstruct(arg0, arg1, arg2) kgrctxCopyConstruct_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxDestruct_IMPL(struct KernelGraphicsContext *arg0); +#define __nvoc_kgrctxDestruct(arg0) kgrctxDestruct_IMPL(arg0) +NV_STATUS kgrctxGetUnicast_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, KernelGraphicsContextUnicast **arg2); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxGetUnicast(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, KernelGraphicsContextUnicast **arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxGetUnicast(arg0, arg1, arg2) kgrctxGetUnicast_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxLookupMmuFault_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, NV83DE_MMU_FAULT_INFO *arg2); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxLookupMmuFault(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, NV83DE_MMU_FAULT_INFO *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxLookupMmuFault(arg0, arg1, arg2) kgrctxLookupMmuFault_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxClearMmuFault_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxClearMmuFault(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxClearMmuFault(arg0, arg1) kgrctxClearMmuFault_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxRecordMmuFault_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, NvU32 mmuFaultInfo); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxRecordMmuFault(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, NvU32 mmuFaultInfo) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxRecordMmuFault(arg0, arg1, mmuFaultInfo) kgrctxRecordMmuFault_IMPL(arg0, arg1, mmuFaultInfo) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NvBool kgrctxIsMainContextAllocated_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NvBool kgrctxIsMainContextAllocated(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxIsMainContextAllocated(arg0, arg1) kgrctxIsMainContextAllocated_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxGetMainContextBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct MEMORY_DESCRIPTOR **arg2); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxGetMainContextBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct MEMORY_DESCRIPTOR **arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxGetMainContextBuffer(arg0, arg1, arg2) kgrctxGetMainContextBuffer_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxGetBufferCount_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, NvU32 *pBufferCount); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxGetBufferCount(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, NvU32 *pBufferCount) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxGetBufferCount(arg0, arg1, arg2, pBufferCount) kgrctxGetBufferCount_IMPL(arg0, arg1, arg2, pBufferCount) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxGetCtxBuffers_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, NvU32 gfid, NvU32 bufferCount, struct MEMORY_DESCRIPTOR **arg3, NvU32 *pCtxBufferType, NvU32 *pBufferCountOut, NvU32 *pFirstGlobalBuffer); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxGetCtxBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, NvU32 gfid, NvU32 bufferCount, struct MEMORY_DESCRIPTOR **arg3, NvU32 *pCtxBufferType, NvU32 *pBufferCountOut, NvU32 *pFirstGlobalBuffer) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxGetCtxBuffers(arg0, arg1, arg2, gfid, bufferCount, arg3, pCtxBufferType, pBufferCountOut, pFirstGlobalBuffer) kgrctxGetCtxBuffers_IMPL(arg0, arg1, arg2, gfid, bufferCount, arg3, pCtxBufferType, pBufferCountOut, pFirstGlobalBuffer) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxGetCtxBufferInfo_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, NvU32 gfid, NvU32 bufferMaxCount, NvU32 *pBufferCount, NV2080_CTRL_GR_CTX_BUFFER_INFO *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxGetCtxBufferInfo(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, NvU32 gfid, NvU32 bufferMaxCount, NvU32 *pBufferCount, NV2080_CTRL_GR_CTX_BUFFER_INFO *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxGetCtxBufferInfo(arg0, arg1, arg2, gfid, bufferMaxCount, pBufferCount, arg3) kgrctxGetCtxBufferInfo_IMPL(arg0, arg1, arg2, gfid, bufferMaxCount, pBufferCount, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxGetCtxBufferPtes_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, NvU32 gfid, NvU32 bufferType, NvU32 firstPage, NvU64 *pPhysAddrs, NvU32 addrsSize, NvU32 *pNumPages, NvBool *pbNoMorePages); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxGetCtxBufferPtes(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, NvU32 gfid, NvU32 bufferType, NvU32 firstPage, NvU64 *pPhysAddrs, NvU32 addrsSize, NvU32 *pNumPages, NvBool *pbNoMorePages) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxGetCtxBufferPtes(arg0, arg1, arg2, gfid, bufferType, firstPage, pPhysAddrs, addrsSize, pNumPages, pbNoMorePages) kgrctxGetCtxBufferPtes_IMPL(arg0, arg1, arg2, gfid, bufferType, firstPage, pPhysAddrs, addrsSize, pNumPages, pbNoMorePages) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxAllocMainCtxBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxAllocMainCtxBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxAllocMainCtxBuffer(arg0, arg1, arg2, arg3) kgrctxAllocMainCtxBuffer_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxAllocPatchBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxAllocPatchBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxAllocPatchBuffer(arg0, arg1, arg2, arg3) kgrctxAllocPatchBuffer_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxAllocPmBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxAllocPmBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxAllocPmBuffer(arg0, arg1, arg2, arg3) kgrctxAllocPmBuffer_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxAllocCtxBuffers_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelGraphicsObject *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxAllocCtxBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelGraphicsObject *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxAllocCtxBuffers(arg0, arg1, arg2, arg3) kgrctxAllocCtxBuffers_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxMapGlobalCtxBuffer_IMPL(struct OBJGPU *pGpu, struct KernelGraphicsContext *arg0, struct KernelGraphics *arg1, NvU32 gfid, struct OBJVASPACE *arg2, GR_GLOBALCTX_BUFFER arg3, NvBool bIsReadOnly); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxMapGlobalCtxBuffer(struct OBJGPU *pGpu, struct KernelGraphicsContext *arg0, struct KernelGraphics *arg1, NvU32 gfid, struct OBJVASPACE *arg2, GR_GLOBALCTX_BUFFER arg3, NvBool bIsReadOnly) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxMapGlobalCtxBuffer(pGpu, arg0, arg1, gfid, arg2, arg3, bIsReadOnly) kgrctxMapGlobalCtxBuffer_IMPL(pGpu, arg0, arg1, gfid, arg2, arg3, bIsReadOnly) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxMapGlobalCtxBuffers_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, NvU32 gfid, struct KernelChannel *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxMapGlobalCtxBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, NvU32 gfid, struct KernelChannel *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxMapGlobalCtxBuffers(arg0, arg1, arg2, gfid, arg3) kgrctxMapGlobalCtxBuffers_IMPL(arg0, arg1, arg2, gfid, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxMapCtxBuffers_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelGraphicsObject *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxMapCtxBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelGraphicsObject *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxMapCtxBuffers(arg0, arg1, arg2, arg3) kgrctxMapCtxBuffers_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxPrepareInitializeCtxBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3, NvU32 externalId, NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *arg4, NvBool *pbAddEntry); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxPrepareInitializeCtxBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3, NvU32 externalId, NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *arg4, NvBool *pbAddEntry) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxPrepareInitializeCtxBuffer(arg0, arg1, arg2, arg3, externalId, arg4, pbAddEntry) kgrctxPrepareInitializeCtxBuffer_IMPL(arg0, arg1, arg2, arg3, externalId, arg4, pbAddEntry) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxPreparePromoteCtxBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelChannel *arg2, NvU32 externalId, NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *arg3, NvBool *pbAddEntry); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxPreparePromoteCtxBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelChannel *arg2, NvU32 externalId, NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *arg3, NvBool *pbAddEntry) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxPreparePromoteCtxBuffer(arg0, arg1, arg2, externalId, arg3, pbAddEntry) kgrctxPreparePromoteCtxBuffer_IMPL(arg0, arg1, arg2, externalId, arg3, pbAddEntry) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxMarkCtxBufferInitialized_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3, NvU32 externalId); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxMarkCtxBufferInitialized(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3, NvU32 externalId) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxMarkCtxBufferInitialized(arg0, arg1, arg2, arg3, externalId) kgrctxMarkCtxBufferInitialized_IMPL(arg0, arg1, arg2, arg3, externalId) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS kgrctxSetupDeferredPmBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS kgrctxSetupDeferredPmBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxSetupDeferredPmBuffer(arg0, arg1, arg2, arg3) kgrctxSetupDeferredPmBuffer_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxUnmapGlobalCtxBuffers_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct OBJVASPACE *arg3, NvU32 gfid); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxUnmapGlobalCtxBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct OBJVASPACE *arg3, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxUnmapGlobalCtxBuffers(arg0, arg1, arg2, arg3, gfid) kgrctxUnmapGlobalCtxBuffers_IMPL(arg0, arg1, arg2, arg3, gfid) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxUnmapGlobalCtxBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct OBJVASPACE *arg3, GR_GLOBALCTX_BUFFER arg4); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxUnmapGlobalCtxBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct OBJVASPACE *arg3, GR_GLOBALCTX_BUFFER arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxUnmapGlobalCtxBuffer(arg0, arg1, arg2, arg3, arg4) kgrctxUnmapGlobalCtxBuffer_IMPL(arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxUnmapMainCtxBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxUnmapMainCtxBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxUnmapMainCtxBuffer(arg0, arg1, arg2, arg3) kgrctxUnmapMainCtxBuffer_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxUnmapCtxPmBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct OBJVASPACE *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxUnmapCtxPmBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct OBJVASPACE *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxUnmapCtxPmBuffer(arg0, arg1, arg2, arg3) kgrctxUnmapCtxPmBuffer_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxUnmapAssociatedCtxBuffers_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxUnmapAssociatedCtxBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct KernelGraphics *arg2, struct KernelChannel *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxUnmapAssociatedCtxBuffers(arg0, arg1, arg2, arg3) kgrctxUnmapAssociatedCtxBuffers_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxFreeMainCtxBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxFreeMainCtxBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxFreeMainCtxBuffer(arg0, arg1) kgrctxFreeMainCtxBuffer_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxFreeZcullBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct OBJVASPACE *arg2); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxFreeZcullBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct OBJVASPACE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxFreeZcullBuffer(arg0, arg1, arg2) kgrctxFreeZcullBuffer_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxFreeCtxPreemptionBuffers_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct OBJVASPACE *arg2); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxFreeCtxPreemptionBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1, struct OBJVASPACE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxFreeCtxPreemptionBuffers(arg0, arg1, arg2) kgrctxFreeCtxPreemptionBuffers_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxFreePatchBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxFreePatchBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxFreePatchBuffer(arg0, arg1) kgrctxFreePatchBuffer_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxFreePmBuffer_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxFreePmBuffer(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxFreePmBuffer(arg0, arg1) kgrctxFreePmBuffer_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxFreeLocalGlobalCtxBuffers_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxFreeLocalGlobalCtxBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxFreeLocalGlobalCtxBuffers(arg0, arg1) kgrctxFreeLocalGlobalCtxBuffers_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void kgrctxFreeAssociatedCtxBuffers_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void kgrctxFreeAssociatedCtxBuffers(struct OBJGPU *arg0, struct KernelGraphicsContext *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContext was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define kgrctxFreeAssociatedCtxBuffers(arg0, arg1) kgrctxFreeAssociatedCtxBuffers_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#undef PRIVATE_FIELD + + +/** + * This refcounted class encapsulates the context data that is shared when a + * context is duped. + */ +#ifdef NVOC_KERNEL_GRAPHICS_CONTEXT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelGraphicsContextUnicast { + NvU32 channelObjects; + NvU32 objectCounts[4]; + GR_CTX_PATCHBUFFER ctxPatchBuffer; + GR_CTX_ZCULLBUFFER zcullCtxswBuffer; + GR_CTX_PMBUFFER pmCtxswBuffer; + struct MEMORY_DESCRIPTOR *pMainCtxBuffer; + GR_GLOBALCTX_BUFFERS localCtxBuffer; + VA_LIST globalCtxBufferVaList[10]; + NvBool bKGrMainCtxBufferInitialized; + NvBool bKGrPatchCtxBufferInitialized; + NvBool bKGrPmCtxBufferInitialized; + GR_CTX_PREEMPTBUFFER preemptCtxswBuffer; + GR_CTX_PREEMPTBUFFER spillCtxswBuffer; + GR_CTX_PREEMPTBUFFER betaCBCtxswBuffer; + GR_CTX_PREEMPTBUFFER pagepoolCtxswBuffer; + GR_CTX_PREEMPTBUFFER rtvCbCtxswBuffer; + NvBool bVprChannel; + NvBool bSupportsPerSubctxHeader; + NV83DE_MMU_FAULT_INFO mmuFaultInfo; +}; + + +struct KernelGraphicsContextShared { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsShared __nvoc_base_RsShared; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + struct KernelGraphicsContextShared *__nvoc_pbase_KernelGraphicsContextShared; + struct KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; +}; + +#ifndef __NVOC_CLASS_KernelGraphicsContextShared_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsContextShared_TYPEDEF__ +typedef struct KernelGraphicsContextShared KernelGraphicsContextShared; +#endif /* __NVOC_CLASS_KernelGraphicsContextShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsContextShared +#define __nvoc_class_id_KernelGraphicsContextShared 0xe7abeb +#endif /* __nvoc_class_id_KernelGraphicsContextShared */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsContextShared; + +#define __staticCast_KernelGraphicsContextShared(pThis) \ + ((pThis)->__nvoc_pbase_KernelGraphicsContextShared) + +#ifdef __nvoc_kernel_graphics_context_h_disabled +#define __dynamicCast_KernelGraphicsContextShared(pThis) ((KernelGraphicsContextShared*)NULL) +#else //__nvoc_kernel_graphics_context_h_disabled +#define __dynamicCast_KernelGraphicsContextShared(pThis) \ + ((KernelGraphicsContextShared*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGraphicsContextShared))) +#endif //__nvoc_kernel_graphics_context_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelGraphicsContextShared(KernelGraphicsContextShared**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelGraphicsContextShared(KernelGraphicsContextShared**, Dynamic*, NvU32); +#define __objCreate_KernelGraphicsContextShared(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelGraphicsContextShared((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS shrkgrctxConstruct_IMPL(struct KernelGraphicsContextShared *arg_); +#define __nvoc_shrkgrctxConstruct(arg_) shrkgrctxConstruct_IMPL(arg_) +NV_STATUS shrkgrctxInit_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContextShared *arg1, struct KernelGraphicsContext *arg2); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS shrkgrctxInit(struct OBJGPU *arg0, struct KernelGraphicsContextShared *arg1, struct KernelGraphicsContext *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContextShared was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define shrkgrctxInit(arg0, arg1, arg2) shrkgrctxInit_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_context_h_disabled + +NV_STATUS shrkgrctxConstructUnicast_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContextShared *arg1, struct KernelGraphicsContext *arg2, struct KernelGraphics *arg3, KernelGraphicsContextUnicast *arg4); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline NV_STATUS shrkgrctxConstructUnicast(struct OBJGPU *arg0, struct KernelGraphicsContextShared *arg1, struct KernelGraphicsContext *arg2, struct KernelGraphics *arg3, KernelGraphicsContextUnicast *arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContextShared was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define shrkgrctxConstructUnicast(arg0, arg1, arg2, arg3, arg4) shrkgrctxConstructUnicast_IMPL(arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void shrkgrctxDestruct_IMPL(struct KernelGraphicsContextShared *arg0); +#define __nvoc_shrkgrctxDestruct(arg0) shrkgrctxDestruct_IMPL(arg0) +void shrkgrctxTeardown_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContextShared *arg1, struct KernelGraphicsContext *arg2); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void shrkgrctxTeardown(struct OBJGPU *arg0, struct KernelGraphicsContextShared *arg1, struct KernelGraphicsContext *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContextShared was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define shrkgrctxTeardown(arg0, arg1, arg2) shrkgrctxTeardown_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void shrkgrctxDestructUnicast_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContextShared *arg1, struct KernelGraphicsContext *arg2, KernelGraphicsContextUnicast *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void shrkgrctxDestructUnicast(struct OBJGPU *arg0, struct KernelGraphicsContextShared *arg1, struct KernelGraphicsContext *arg2, KernelGraphicsContextUnicast *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContextShared was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define shrkgrctxDestructUnicast(arg0, arg1, arg2, arg3) shrkgrctxDestructUnicast_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +void shrkgrctxDetach_IMPL(struct OBJGPU *arg0, struct KernelGraphicsContextShared *arg1, struct KernelGraphicsContext *arg2, struct KernelChannel *arg3); +#ifdef __nvoc_kernel_graphics_context_h_disabled +static inline void shrkgrctxDetach(struct OBJGPU *arg0, struct KernelGraphicsContextShared *arg1, struct KernelGraphicsContext *arg2, struct KernelChannel *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsContextShared was disabled!"); +} +#else //__nvoc_kernel_graphics_context_h_disabled +#define shrkgrctxDetach(arg0, arg1, arg2, arg3) shrkgrctxDetach_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_context_h_disabled + +#undef PRIVATE_FIELD + + +/*! Handle NV0090 ctrl call forwarding */ +NV_STATUS kgrctxCtrlHandle +( + CALL_CONTEXT *, + NvHandle hKernelGraphicsContext +); + +#endif // _KERNEL_GRAPHICS_CONTEXT_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_GRAPHICS_CONTEXT_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_graphics_manager_nvoc.c b/src/nvidia/generated/g_kernel_graphics_manager_nvoc.c new file mode 100644 index 000000000..75eca8041 --- /dev/null +++ b/src/nvidia/generated/g_kernel_graphics_manager_nvoc.c @@ -0,0 +1,281 @@ +#define NVOC_KERNEL_GRAPHICS_MANAGER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_graphics_manager_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xd22179 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsManager; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelGraphicsManager(KernelGraphicsManager*); +void __nvoc_init_funcTable_KernelGraphicsManager(KernelGraphicsManager*); +NV_STATUS __nvoc_ctor_KernelGraphicsManager(KernelGraphicsManager*); +void __nvoc_init_dataField_KernelGraphicsManager(KernelGraphicsManager*); +void __nvoc_dtor_KernelGraphicsManager(KernelGraphicsManager*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGraphicsManager; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsManager_KernelGraphicsManager = { + /*pClassDef=*/ &__nvoc_class_def_KernelGraphicsManager, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelGraphicsManager, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsManager_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsManager, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsManager, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelGraphicsManager = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelGraphicsManager_KernelGraphicsManager, + &__nvoc_rtti_KernelGraphicsManager_OBJENGSTATE, + &__nvoc_rtti_KernelGraphicsManager_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsManager = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelGraphicsManager), + /*classId=*/ classId(KernelGraphicsManager), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelGraphicsManager", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelGraphicsManager, + /*pCastInfo=*/ &__nvoc_castinfo_KernelGraphicsManager, + /*pExportInfo=*/ &__nvoc_export_info_KernelGraphicsManager +}; + +static NV_STATUS __nvoc_thunk_KernelGraphicsManager_engstateConstructEngine(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, ENGDESCRIPTOR arg2) { + return kgrmgrConstructEngine(arg0, (struct KernelGraphicsManager *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrReconcileTunableState(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrStateLoad(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrStateUnload(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrStateInitLocked(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrStatePreLoad(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrStatePostUnload(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_kgrmgrStateDestroy(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrStatePreUnload(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrStateInitUnlocked(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kgrmgrInitMissing(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrStatePreInitLocked(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrStatePreInitUnlocked(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrGetTunableState(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrCompareTunableState(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kgrmgrFreeTunableState(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrStatePostLoad(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrAllocTunableState(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgrmgrSetTunableState(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kgrmgrIsPresent(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphicsManager_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGraphicsManager = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelGraphicsManager(KernelGraphicsManager *pThis) { + __nvoc_kgrmgrDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelGraphicsManager(KernelGraphicsManager *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelGraphicsManager(KernelGraphicsManager *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelGraphicsManager_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelGraphicsManager(pThis); + goto __nvoc_ctor_KernelGraphicsManager_exit; // Success + +__nvoc_ctor_KernelGraphicsManager_fail_OBJENGSTATE: +__nvoc_ctor_KernelGraphicsManager_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelGraphicsManager_1(KernelGraphicsManager *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__kgrmgrConstructEngine__ = &kgrmgrConstructEngine_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelGraphicsManager_engstateConstructEngine; + + pThis->__kgrmgrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrReconcileTunableState; + + pThis->__kgrmgrStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStateLoad; + + pThis->__kgrmgrStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStateUnload; + + pThis->__kgrmgrStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStateInitLocked; + + pThis->__kgrmgrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStatePreLoad; + + pThis->__kgrmgrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStatePostUnload; + + pThis->__kgrmgrStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStateDestroy; + + pThis->__kgrmgrStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStatePreUnload; + + pThis->__kgrmgrStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStateInitUnlocked; + + pThis->__kgrmgrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrInitMissing; + + pThis->__kgrmgrStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStatePreInitLocked; + + pThis->__kgrmgrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStatePreInitUnlocked; + + pThis->__kgrmgrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrGetTunableState; + + pThis->__kgrmgrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrCompareTunableState; + + pThis->__kgrmgrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrFreeTunableState; + + pThis->__kgrmgrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrStatePostLoad; + + pThis->__kgrmgrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrAllocTunableState; + + pThis->__kgrmgrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrSetTunableState; + + pThis->__kgrmgrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kgrmgrIsPresent; +} + +void __nvoc_init_funcTable_KernelGraphicsManager(KernelGraphicsManager *pThis) { + __nvoc_init_funcTable_KernelGraphicsManager_1(pThis); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelGraphicsManager(KernelGraphicsManager *pThis) { + pThis->__nvoc_pbase_KernelGraphicsManager = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelGraphicsManager(pThis); +} + +NV_STATUS __nvoc_objCreate_KernelGraphicsManager(KernelGraphicsManager **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelGraphicsManager *pThis; + + pThis = portMemAllocNonPaged(sizeof(KernelGraphicsManager)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelGraphicsManager)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelGraphicsManager); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_KernelGraphicsManager(pThis); + status = __nvoc_ctor_KernelGraphicsManager(pThis); + if (status != NV_OK) goto __nvoc_objCreate_KernelGraphicsManager_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelGraphicsManager_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelGraphicsManager(KernelGraphicsManager **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelGraphicsManager(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_graphics_manager_nvoc.h b/src/nvidia/generated/g_kernel_graphics_manager_nvoc.h new file mode 100644 index 000000000..d8ecabb45 --- /dev/null +++ b/src/nvidia/generated/g_kernel_graphics_manager_nvoc.h @@ -0,0 +1,409 @@ +#ifndef _G_KERNEL_GRAPHICS_MANAGER_NVOC_H_ +#define _G_KERNEL_GRAPHICS_MANAGER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_graphics_manager_nvoc.h" + +#ifndef KERNEL_GRAPHICS_MANAGER_H +#define KERNEL_GRAPHICS_MANAGER_H + +#include "core/core.h" +#include "gpu/eng_state.h" +#include "gpu/gpu.h" +#include "kernel/mem_mgr/ctx_buf_pool.h" +#include "kernel/gpu/gr/kernel_graphics_context.h" + +#define KGRMGR_MAX_GR 8 +#define KGRMGR_MAX_GPC 12 +#define GR_INDEX_INVALID 0xFFFFFFFF +#define KGRMGR_MAX_GPCGRP 4 + +typedef struct KERNEL_MIG_GPU_INSTANCE KERNEL_MIG_GPU_INSTANCE; + +/*! + * KernelGraphicsManager provides means to access KernelGraphics engine with specified index. + * It also houses information at a higher level or that is common between KernelGraphics engines. + */ +#ifdef NVOC_KERNEL_GRAPHICS_MANAGER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct GRMGR_LEGACY_KGRAPHICS_STATIC_INFO { + NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS floorsweepingMasks; + NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS *pPpcMasks; + NV2080_CTRL_INTERNAL_STATIC_GR_INFO *pGrInfo; + NvBool bInitialized; +}; + + +struct KernelGraphicsManager { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelGraphicsManager *__nvoc_pbase_KernelGraphicsManager; + NV_STATUS (*__kgrmgrConstructEngine__)(struct OBJGPU *, struct KernelGraphicsManager *, ENGDESCRIPTOR); + NV_STATUS (*__kgrmgrReconcileTunableState__)(POBJGPU, struct KernelGraphicsManager *, void *); + NV_STATUS (*__kgrmgrStateLoad__)(POBJGPU, struct KernelGraphicsManager *, NvU32); + NV_STATUS (*__kgrmgrStateUnload__)(POBJGPU, struct KernelGraphicsManager *, NvU32); + NV_STATUS (*__kgrmgrStateInitLocked__)(POBJGPU, struct KernelGraphicsManager *); + NV_STATUS (*__kgrmgrStatePreLoad__)(POBJGPU, struct KernelGraphicsManager *, NvU32); + NV_STATUS (*__kgrmgrStatePostUnload__)(POBJGPU, struct KernelGraphicsManager *, NvU32); + void (*__kgrmgrStateDestroy__)(POBJGPU, struct KernelGraphicsManager *); + NV_STATUS (*__kgrmgrStatePreUnload__)(POBJGPU, struct KernelGraphicsManager *, NvU32); + NV_STATUS (*__kgrmgrStateInitUnlocked__)(POBJGPU, struct KernelGraphicsManager *); + void (*__kgrmgrInitMissing__)(POBJGPU, struct KernelGraphicsManager *); + NV_STATUS (*__kgrmgrStatePreInitLocked__)(POBJGPU, struct KernelGraphicsManager *); + NV_STATUS (*__kgrmgrStatePreInitUnlocked__)(POBJGPU, struct KernelGraphicsManager *); + NV_STATUS (*__kgrmgrGetTunableState__)(POBJGPU, struct KernelGraphicsManager *, void *); + NV_STATUS (*__kgrmgrCompareTunableState__)(POBJGPU, struct KernelGraphicsManager *, void *, void *); + void (*__kgrmgrFreeTunableState__)(POBJGPU, struct KernelGraphicsManager *, void *); + NV_STATUS (*__kgrmgrStatePostLoad__)(POBJGPU, struct KernelGraphicsManager *, NvU32); + NV_STATUS (*__kgrmgrAllocTunableState__)(POBJGPU, struct KernelGraphicsManager *, void **); + NV_STATUS (*__kgrmgrSetTunableState__)(POBJGPU, struct KernelGraphicsManager *, void *); + NvBool (*__kgrmgrIsPresent__)(POBJGPU, struct KernelGraphicsManager *); + struct GRMGR_LEGACY_KGRAPHICS_STATIC_INFO legacyKgraphicsStaticInfo; + NvU64 veidInUseMask; + NvU64 grIdxVeidMask[8]; + CTX_BUF_INFO globalCtxBufInfo[10]; +}; + +#ifndef __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ +typedef struct KernelGraphicsManager KernelGraphicsManager; +#endif /* __NVOC_CLASS_KernelGraphicsManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsManager +#define __nvoc_class_id_KernelGraphicsManager 0xd22179 +#endif /* __nvoc_class_id_KernelGraphicsManager */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsManager; + +#define __staticCast_KernelGraphicsManager(pThis) \ + ((pThis)->__nvoc_pbase_KernelGraphicsManager) + +#ifdef __nvoc_kernel_graphics_manager_h_disabled +#define __dynamicCast_KernelGraphicsManager(pThis) ((KernelGraphicsManager*)NULL) +#else //__nvoc_kernel_graphics_manager_h_disabled +#define __dynamicCast_KernelGraphicsManager(pThis) \ + ((KernelGraphicsManager*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGraphicsManager))) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +#define PDB_PROP_KGRMGR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KGRMGR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelGraphicsManager(KernelGraphicsManager**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelGraphicsManager(KernelGraphicsManager**, Dynamic*, NvU32); +#define __objCreate_KernelGraphicsManager(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelGraphicsManager((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kgrmgrConstructEngine(arg0, arg1, arg2) kgrmgrConstructEngine_DISPATCH(arg0, arg1, arg2) +#define kgrmgrReconcileTunableState(pGpu, pEngstate, pTunableState) kgrmgrReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgrmgrStateLoad(pGpu, pEngstate, arg0) kgrmgrStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kgrmgrStateUnload(pGpu, pEngstate, arg0) kgrmgrStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kgrmgrStateInitLocked(pGpu, pEngstate) kgrmgrStateInitLocked_DISPATCH(pGpu, pEngstate) +#define kgrmgrStatePreLoad(pGpu, pEngstate, arg0) kgrmgrStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kgrmgrStatePostUnload(pGpu, pEngstate, arg0) kgrmgrStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kgrmgrStateDestroy(pGpu, pEngstate) kgrmgrStateDestroy_DISPATCH(pGpu, pEngstate) +#define kgrmgrStatePreUnload(pGpu, pEngstate, arg0) kgrmgrStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kgrmgrStateInitUnlocked(pGpu, pEngstate) kgrmgrStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kgrmgrInitMissing(pGpu, pEngstate) kgrmgrInitMissing_DISPATCH(pGpu, pEngstate) +#define kgrmgrStatePreInitLocked(pGpu, pEngstate) kgrmgrStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kgrmgrStatePreInitUnlocked(pGpu, pEngstate) kgrmgrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kgrmgrGetTunableState(pGpu, pEngstate, pTunableState) kgrmgrGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgrmgrCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kgrmgrCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kgrmgrFreeTunableState(pGpu, pEngstate, pTunableState) kgrmgrFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgrmgrStatePostLoad(pGpu, pEngstate, arg0) kgrmgrStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kgrmgrAllocTunableState(pGpu, pEngstate, ppTunableState) kgrmgrAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kgrmgrSetTunableState(pGpu, pEngstate, pTunableState) kgrmgrSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgrmgrIsPresent(pGpu, pEngstate) kgrmgrIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kgrmgrConstructEngine_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, ENGDESCRIPTOR arg2); + +static inline NV_STATUS kgrmgrConstructEngine_DISPATCH(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, ENGDESCRIPTOR arg2) { + return arg1->__kgrmgrConstructEngine__(arg0, arg1, arg2); +} + +static inline NV_STATUS kgrmgrReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void *pTunableState) { + return pEngstate->__kgrmgrReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgrmgrStateLoad_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return pEngstate->__kgrmgrStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgrmgrStateUnload_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return pEngstate->__kgrmgrStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgrmgrStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + return pEngstate->__kgrmgrStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kgrmgrStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return pEngstate->__kgrmgrStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgrmgrStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return pEngstate->__kgrmgrStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void kgrmgrStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + pEngstate->__kgrmgrStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS kgrmgrStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return pEngstate->__kgrmgrStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgrmgrStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + return pEngstate->__kgrmgrStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kgrmgrInitMissing_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + pEngstate->__kgrmgrInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kgrmgrStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + return pEngstate->__kgrmgrStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kgrmgrStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + return pEngstate->__kgrmgrStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kgrmgrGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void *pTunableState) { + return pEngstate->__kgrmgrGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgrmgrCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kgrmgrCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kgrmgrFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void *pTunableState) { + pEngstate->__kgrmgrFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgrmgrStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, NvU32 arg0) { + return pEngstate->__kgrmgrStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgrmgrAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void **ppTunableState) { + return pEngstate->__kgrmgrAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kgrmgrSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate, void *pTunableState) { + return pEngstate->__kgrmgrSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kgrmgrIsPresent_DISPATCH(POBJGPU pGpu, struct KernelGraphicsManager *pEngstate) { + return pEngstate->__kgrmgrIsPresent__(pGpu, pEngstate); +} + +void kgrmgrGetGrObjectType_IMPL(NvU32 classNum, NvU32 *pObjectType); +#define kgrmgrGetGrObjectType(classNum, pObjectType) kgrmgrGetGrObjectType_IMPL(classNum, pObjectType) +NvBool kgrmgrIsCtxBufSupported_IMPL(GR_CTX_BUFFER arg0, NvBool bClassSupported2D); +#define kgrmgrIsCtxBufSupported(arg0, bClassSupported2D) kgrmgrIsCtxBufSupported_IMPL(arg0, bClassSupported2D) +NvBool kgrmgrIsGlobalCtxBufSupported_IMPL(GR_GLOBALCTX_BUFFER arg0, NvBool bClassSupported2D); +#define kgrmgrIsGlobalCtxBufSupported(arg0, bClassSupported2D) kgrmgrIsGlobalCtxBufSupported_IMPL(arg0, bClassSupported2D) +void kgrmgrCtrlSetEngineID_IMPL(NvU32 engID, NV2080_CTRL_GR_ROUTE_INFO *arg0); +#define kgrmgrCtrlSetEngineID(engID, arg0) kgrmgrCtrlSetEngineID_IMPL(engID, arg0) +void kgrmgrCtrlSetChannelHandle_IMPL(NvHandle hChannel, NV2080_CTRL_GR_ROUTE_INFO *arg0); +#define kgrmgrCtrlSetChannelHandle(hChannel, arg0) kgrmgrCtrlSetChannelHandle_IMPL(hChannel, arg0) +void kgrmgrDestruct_IMPL(struct KernelGraphicsManager *arg0); +#define __nvoc_kgrmgrDestruct(arg0) kgrmgrDestruct_IMPL(arg0) +void kgrmgrSetLegacyKgraphicsStaticInfo_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, struct KernelGraphics *arg2); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline void kgrmgrSetLegacyKgraphicsStaticInfo(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, struct KernelGraphics *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrSetLegacyKgraphicsStaticInfo(arg0, arg1, arg2) kgrmgrSetLegacyKgraphicsStaticInfo_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NV_STATUS kgrmgrCtrlRouteKGR_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvHandle hClient, const NV2080_CTRL_GR_ROUTE_INFO *pGrRouteInfo, struct KernelGraphics **ppKernelGraphics); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NV_STATUS kgrmgrCtrlRouteKGR(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvHandle hClient, const NV2080_CTRL_GR_ROUTE_INFO *pGrRouteInfo, struct KernelGraphics **ppKernelGraphics) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrCtrlRouteKGR(arg0, arg1, hClient, pGrRouteInfo, ppKernelGraphics) kgrmgrCtrlRouteKGR_IMPL(arg0, arg1, hClient, pGrRouteInfo, ppKernelGraphics) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NvU32 kgrmgrGetLegacyGpcMask_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NvU32 kgrmgrGetLegacyGpcMask(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return 0; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrGetLegacyGpcMask(arg0, arg1) kgrmgrGetLegacyGpcMask_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NvU32 kgrmgrGetLegacyTpcMask_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 gpcId); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NvU32 kgrmgrGetLegacyTpcMask(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 gpcId) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return 0; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrGetLegacyTpcMask(arg0, arg1, gpcId) kgrmgrGetLegacyTpcMask_IMPL(arg0, arg1, gpcId) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NV_STATUS kgrmgrGetLegacyPpcMask_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 physGpcId, NvU32 *pPpcMask); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NV_STATUS kgrmgrGetLegacyPpcMask(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 physGpcId, NvU32 *pPpcMask) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrGetLegacyPpcMask(arg0, arg1, physGpcId, pPpcMask) kgrmgrGetLegacyPpcMask_IMPL(arg0, arg1, physGpcId, pPpcMask) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NvU32 kgrmgrGetLegacyZcullMask_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 physGpcId); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NvU32 kgrmgrGetLegacyZcullMask(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 physGpcId) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return 0; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrGetLegacyZcullMask(arg0, arg1, physGpcId) kgrmgrGetLegacyZcullMask_IMPL(arg0, arg1, physGpcId) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NV_STATUS kgrmgrAllocVeidsForGrIdx_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 grIdx, NvU32 gpcCount, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NV_STATUS kgrmgrAllocVeidsForGrIdx(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 grIdx, NvU32 gpcCount, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrAllocVeidsForGrIdx(arg0, arg1, grIdx, gpcCount, arg2) kgrmgrAllocVeidsForGrIdx_IMPL(arg0, arg1, grIdx, gpcCount, arg2) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +void kgrmgrClearVeidsForGrIdx_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 grIdx); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline void kgrmgrClearVeidsForGrIdx(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 grIdx) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrClearVeidsForGrIdx(arg0, arg1, grIdx) kgrmgrClearVeidsForGrIdx_IMPL(arg0, arg1, grIdx) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NV_STATUS kgrmgrGetMaxVeidsPerGpc_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 *pMaxVeidsPerGpc); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NV_STATUS kgrmgrGetMaxVeidsPerGpc(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 *pMaxVeidsPerGpc) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrGetMaxVeidsPerGpc(arg0, arg1, pMaxVeidsPerGpc) kgrmgrGetMaxVeidsPerGpc_IMPL(arg0, arg1, pMaxVeidsPerGpc) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NV_STATUS kgrmgrGetVeidBaseForGrIdx_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 grIdx, NvU32 *pVeidStart); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NV_STATUS kgrmgrGetVeidBaseForGrIdx(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 grIdx, NvU32 *pVeidStart) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrGetVeidBaseForGrIdx(arg0, arg1, grIdx, pVeidStart) kgrmgrGetVeidBaseForGrIdx_IMPL(arg0, arg1, grIdx, pVeidStart) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NV_STATUS kgrmgrGetGrIdxForVeid_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 veid, NvU32 *pGrIdx); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NV_STATUS kgrmgrGetGrIdxForVeid(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 veid, NvU32 *pGrIdx) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrGetGrIdxForVeid(arg0, arg1, veid, pGrIdx) kgrmgrGetGrIdxForVeid_IMPL(arg0, arg1, veid, pGrIdx) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NV_STATUS kgrmgrDiscoverMaxLocalCtxBufInfo_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, struct KernelGraphics *arg2, NvU32 swizzId); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NV_STATUS kgrmgrDiscoverMaxLocalCtxBufInfo(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, struct KernelGraphics *arg2, NvU32 swizzId) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrDiscoverMaxLocalCtxBufInfo(arg0, arg1, arg2, swizzId) kgrmgrDiscoverMaxLocalCtxBufInfo_IMPL(arg0, arg1, arg2, swizzId) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +const CTX_BUF_INFO *kgrmgrGetGlobalCtxBufInfo_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, GR_GLOBALCTX_BUFFER arg2); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline const CTX_BUF_INFO *kgrmgrGetGlobalCtxBufInfo(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, GR_GLOBALCTX_BUFFER arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return NULL; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrGetGlobalCtxBufInfo(arg0, arg1, arg2) kgrmgrGetGlobalCtxBufInfo_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +void kgrmgrSetGlobalCtxBufInfo_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, GR_GLOBALCTX_BUFFER arg2, NvU64 size, NvU64 align, RM_ATTR_PAGE_SIZE attr, NvBool bContiguous); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline void kgrmgrSetGlobalCtxBufInfo(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, GR_GLOBALCTX_BUFFER arg2, NvU64 size, NvU64 align, RM_ATTR_PAGE_SIZE attr, NvBool bContiguous) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrSetGlobalCtxBufInfo(arg0, arg1, arg2, size, align, attr, bContiguous) kgrmgrSetGlobalCtxBufInfo_IMPL(arg0, arg1, arg2, size, align, attr, bContiguous) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NV_STATUS kgrmgrDiscoverMaxGlobalCtxBufSizes_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, struct KernelGraphics *arg2, NvBool bMemoryPartitioningNeeded); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NV_STATUS kgrmgrDiscoverMaxGlobalCtxBufSizes(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, struct KernelGraphics *arg2, NvBool bMemoryPartitioningNeeded) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrDiscoverMaxGlobalCtxBufSizes(arg0, arg1, arg2, bMemoryPartitioningNeeded) kgrmgrDiscoverMaxGlobalCtxBufSizes_IMPL(arg0, arg1, arg2, bMemoryPartitioningNeeded) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +NvU32 kgrmgrGetLegacyGpcTpcCount_IMPL(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 gpcId); +#ifdef __nvoc_kernel_graphics_manager_h_disabled +static inline NvU32 kgrmgrGetLegacyGpcTpcCount(struct OBJGPU *arg0, struct KernelGraphicsManager *arg1, NvU32 gpcId) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsManager was disabled!"); + return 0; +} +#else //__nvoc_kernel_graphics_manager_h_disabled +#define kgrmgrGetLegacyGpcTpcCount(arg0, arg1, gpcId) kgrmgrGetLegacyGpcTpcCount_IMPL(arg0, arg1, gpcId) +#endif //__nvoc_kernel_graphics_manager_h_disabled + +#undef PRIVATE_FIELD + + +#endif // KERNEL_GRAPHICS_MANAGER_H + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_GRAPHICS_MANAGER_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_graphics_nvoc.c b/src/nvidia/generated/g_kernel_graphics_nvoc.c new file mode 100644 index 000000000..1ac0db926 --- /dev/null +++ b/src/nvidia/generated/g_kernel_graphics_nvoc.c @@ -0,0 +1,447 @@ +#define NVOC_KERNEL_GRAPHICS_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_graphics_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xea3fa9 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphics; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +void __nvoc_init_KernelGraphics(KernelGraphics*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelGraphics(KernelGraphics*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelGraphics(KernelGraphics*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelGraphics(KernelGraphics*, RmHalspecOwner* ); +void __nvoc_dtor_KernelGraphics(KernelGraphics*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGraphics; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphics_KernelGraphics = { + /*pClassDef=*/ &__nvoc_class_def_KernelGraphics, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelGraphics, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphics_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphics, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphics_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphics, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphics_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphics, __nvoc_base_IntrService), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelGraphics = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_KernelGraphics_KernelGraphics, + &__nvoc_rtti_KernelGraphics_IntrService, + &__nvoc_rtti_KernelGraphics_OBJENGSTATE, + &__nvoc_rtti_KernelGraphics_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphics = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelGraphics), + /*classId=*/ classId(KernelGraphics), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelGraphics", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelGraphics, + /*pCastInfo=*/ &__nvoc_castinfo_KernelGraphics, + /*pExportInfo=*/ &__nvoc_export_info_KernelGraphics +}; + +static NV_STATUS __nvoc_thunk_KernelGraphics_engstateConstructEngine(OBJGPU *arg0, struct OBJENGSTATE *arg1, ENGDESCRIPTOR arg2) { + return kgraphicsConstructEngine(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_KernelGraphics_engstateStateInitLocked(OBJGPU *arg0, struct OBJENGSTATE *arg1) { + return kgraphicsStateInitLocked(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelGraphics_engstateStateLoad(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 flags) { + return kgraphicsStateLoad(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), flags); +} + +static NV_STATUS __nvoc_thunk_KernelGraphics_engstateStatePreUnload(OBJGPU *pGpu, struct OBJENGSTATE *arg0, NvU32 flags) { + return kgraphicsStatePreUnload(pGpu, (struct KernelGraphics *)(((unsigned char *)arg0) - __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), flags); +} + +static NV_STATUS __nvoc_thunk_KernelGraphics_engstateStateUnload(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 flags) { + return kgraphicsStateUnload(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), flags); +} + +static void __nvoc_thunk_KernelGraphics_engstateStateDestroy(OBJGPU *arg0, struct OBJENGSTATE *arg1) { + kgraphicsStateDestroy(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset)); +} + +static NvBool __nvoc_thunk_KernelGraphics_engstateIsPresent(OBJGPU *arg0, struct OBJENGSTATE *arg1) { + return kgraphicsIsPresent(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelGraphics_engstateStatePostLoad(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 flags) { + return kgraphicsStatePostLoad(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), flags); +} + +static void __nvoc_thunk_KernelGraphics_intrservRegisterIntrService(OBJGPU *arg0, struct IntrService *arg1, IntrServiceRecord arg2[155]) { + kgraphicsRegisterIntrService(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_IntrService.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_KernelGraphics_intrservServiceNotificationInterrupt(OBJGPU *arg0, struct IntrService *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) { + return kgraphicsServiceNotificationInterrupt(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_IntrService.offset), arg2); +} + +static NvBool __nvoc_thunk_KernelGraphics_intrservClearInterrupt(OBJGPU *arg0, struct IntrService *arg1, IntrServiceClearInterruptArguments *arg2) { + return kgraphicsClearInterrupt(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_IntrService.offset), arg2); +} + +static NvU32 __nvoc_thunk_KernelGraphics_intrservServiceInterrupt(OBJGPU *arg0, struct IntrService *arg1, IntrServiceServiceInterruptArguments *arg2) { + return kgraphicsServiceInterrupt(arg0, (struct KernelGraphics *)(((unsigned char *)arg1) - __nvoc_rtti_KernelGraphics_IntrService.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgraphicsReconcileTunableState(POBJGPU pGpu, struct KernelGraphics *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgraphicsStatePreLoad(POBJGPU pGpu, struct KernelGraphics *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgraphicsStatePostUnload(POBJGPU pGpu, struct KernelGraphics *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgraphicsStateInitUnlocked(POBJGPU pGpu, struct KernelGraphics *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kgraphicsInitMissing(POBJGPU pGpu, struct KernelGraphics *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgraphicsStatePreInitLocked(POBJGPU pGpu, struct KernelGraphics *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgraphicsStatePreInitUnlocked(POBJGPU pGpu, struct KernelGraphics *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgraphicsGetTunableState(POBJGPU pGpu, struct KernelGraphics *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgraphicsCompareTunableState(POBJGPU pGpu, struct KernelGraphics *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kgraphicsFreeTunableState(POBJGPU pGpu, struct KernelGraphics *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgraphicsAllocTunableState(POBJGPU pGpu, struct KernelGraphics *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgraphicsSetTunableState(POBJGPU pGpu, struct KernelGraphics *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGraphics_OBJENGSTATE.offset), pTunableState); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGraphics = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_IntrService(IntrService*); +void __nvoc_dtor_KernelGraphics(KernelGraphics *pThis) { + __nvoc_kgraphicsDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelGraphics(KernelGraphics *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal field -- bCtxswLoggingSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bCtxswLoggingSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bCtxswLoggingSupported = ((NvBool)(0 != 0)); + } + + // Hal field -- bDeferContextInit + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->bDeferContextInit = ((NvBool)(0 != 0)); + } + else if (0) + { + } + + // Hal field -- bPerSubcontextContextHeaderSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bPerSubcontextContextHeaderSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bPerSubcontextContextHeaderSupported = ((NvBool)(0 != 0)); + } + + // Hal field -- bSetContextBuffersGPUPrivileged + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bSetContextBuffersGPUPrivileged = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bSetContextBuffersGPUPrivileged = ((NvBool)(0 != 0)); + } + + // Hal field -- bUcodeSupportsPrivAccessMap + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bUcodeSupportsPrivAccessMap = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bUcodeSupportsPrivAccessMap = ((NvBool)(0 != 0)); + } + + // Hal field -- bRtvCbSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bRtvCbSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bRtvCbSupported = ((NvBool)(0 != 0)); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_IntrService(IntrService* ); +NV_STATUS __nvoc_ctor_KernelGraphics(KernelGraphics *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelGraphics_fail_OBJENGSTATE; + status = __nvoc_ctor_IntrService(&pThis->__nvoc_base_IntrService); + if (status != NV_OK) goto __nvoc_ctor_KernelGraphics_fail_IntrService; + __nvoc_init_dataField_KernelGraphics(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelGraphics_exit; // Success + +__nvoc_ctor_KernelGraphics_fail_IntrService: + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); +__nvoc_ctor_KernelGraphics_fail_OBJENGSTATE: +__nvoc_ctor_KernelGraphics_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelGraphics_1(KernelGraphics *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__kgraphicsConstructEngine__ = &kgraphicsConstructEngine_IMPL; + + pThis->__kgraphicsStateInitLocked__ = &kgraphicsStateInitLocked_IMPL; + + pThis->__kgraphicsStateLoad__ = &kgraphicsStateLoad_IMPL; + + pThis->__kgraphicsStatePreUnload__ = &kgraphicsStatePreUnload_IMPL; + + pThis->__kgraphicsStateUnload__ = &kgraphicsStateUnload_IMPL; + + pThis->__kgraphicsStateDestroy__ = &kgraphicsStateDestroy_IMPL; + + pThis->__kgraphicsIsPresent__ = &kgraphicsIsPresent_IMPL; + + pThis->__kgraphicsStatePostLoad__ = &kgraphicsStatePostLoad_IMPL; + + pThis->__kgraphicsRegisterIntrService__ = &kgraphicsRegisterIntrService_IMPL; + + pThis->__kgraphicsServiceNotificationInterrupt__ = &kgraphicsServiceNotificationInterrupt_IMPL; + + // Hal function -- kgraphicsClearInterrupt + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgraphicsClearInterrupt__ = &kgraphicsClearInterrupt_GP100; + } + else if (0) + { + } + + // Hal function -- kgraphicsServiceInterrupt + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgraphicsServiceInterrupt__ = &kgraphicsServiceInterrupt_GP100; + } + else if (0) + { + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelGraphics_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelGraphics_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelGraphics_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreUnload__ = &__nvoc_thunk_KernelGraphics_engstateStatePreUnload; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelGraphics_engstateStateUnload; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelGraphics_engstateStateDestroy; + + pThis->__nvoc_base_OBJENGSTATE.__engstateIsPresent__ = &__nvoc_thunk_KernelGraphics_engstateIsPresent; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostLoad__ = &__nvoc_thunk_KernelGraphics_engstateStatePostLoad; + + pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_KernelGraphics_intrservRegisterIntrService; + + pThis->__nvoc_base_IntrService.__intrservServiceNotificationInterrupt__ = &__nvoc_thunk_KernelGraphics_intrservServiceNotificationInterrupt; + + pThis->__nvoc_base_IntrService.__intrservClearInterrupt__ = &__nvoc_thunk_KernelGraphics_intrservClearInterrupt; + + pThis->__nvoc_base_IntrService.__intrservServiceInterrupt__ = &__nvoc_thunk_KernelGraphics_intrservServiceInterrupt; + + pThis->__kgraphicsReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsReconcileTunableState; + + pThis->__kgraphicsStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsStatePreLoad; + + pThis->__kgraphicsStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsStatePostUnload; + + pThis->__kgraphicsStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsStateInitUnlocked; + + pThis->__kgraphicsInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsInitMissing; + + pThis->__kgraphicsStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsStatePreInitLocked; + + pThis->__kgraphicsStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsStatePreInitUnlocked; + + pThis->__kgraphicsGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsGetTunableState; + + pThis->__kgraphicsCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsCompareTunableState; + + pThis->__kgraphicsFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsFreeTunableState; + + pThis->__kgraphicsAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsAllocTunableState; + + pThis->__kgraphicsSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgraphicsSetTunableState; +} + +void __nvoc_init_funcTable_KernelGraphics(KernelGraphics *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelGraphics_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_IntrService(IntrService*); +void __nvoc_init_KernelGraphics(KernelGraphics *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelGraphics = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_IntrService(&pThis->__nvoc_base_IntrService); + __nvoc_init_funcTable_KernelGraphics(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelGraphics(KernelGraphics **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelGraphics *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelGraphics)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelGraphics)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelGraphics); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelGraphics(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelGraphics(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelGraphics_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelGraphics_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelGraphics(KernelGraphics **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelGraphics(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_graphics_nvoc.h b/src/nvidia/generated/g_kernel_graphics_nvoc.h new file mode 100644 index 000000000..8be4b3fa6 --- /dev/null +++ b/src/nvidia/generated/g_kernel_graphics_nvoc.h @@ -0,0 +1,753 @@ +#ifndef _G_KERNEL_GRAPHICS_NVOC_H_ +#define _G_KERNEL_GRAPHICS_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_graphics_nvoc.h" + +#ifndef _KERNEL_GRAPHICS_H_ +#define _KERNEL_GRAPHICS_H_ + +#include "core/core.h" +#include "gpu/eng_state.h" +#include "gpu/gpu.h" +#include "kernel/gpu/gr/kernel_graphics_object.h" +#include "kernel/gpu/gr/kernel_graphics_context.h" +#include "kernel/mem_mgr/ctx_buf_pool.h" +#include "kernel/gpu/gr/fecs_event_list.h" +#include "eventbufferproducer.h" +#include "kernel/gpu/intr/intr_service.h" + +#include "ctrl/ctrl2080/ctrl2080internal.h" + + +#define GR_VERTEX_CACHE_SIZE 16 + +struct KGRAPHICS_STATIC_INFO; +typedef struct KGRAPHICS_STATIC_INFO KGRAPHICS_STATIC_INFO; +typedef struct KGRAPHICS_FECS_TRACE_INFO KGRAPHICS_FECS_TRACE_INFO; +typedef struct KGRAPHICS_GLOBAL_CTX_BUFFERS_INFO KGRAPHICS_GLOBAL_CTX_BUFFERS_INFO; + +/*! + * Static info retrieved from Physical RM detailing the configuration of the + * visible portions of this graphics engine. This data is mostly used to service + * control calls which export data from RM. + */ +struct KGRAPHICS_STATIC_INFO +{ + // + // @ref NV0080_CTRL_CMD_GR_GET_INFO + // @ref NV0080_CTRL_CMD_GR_GET_INFO_V2 + // @ref NV2080_CTRL_CMD_GR_GET_INFO + // @ref NV2080_CTRL_CMD_GR_GET_INFO_V2 + // + NV2080_CTRL_INTERNAL_STATIC_GR_INFO *pGrInfo; + + // + // @ref NV0080_CTRL_CMD_GR_GET_CAPS + // @ref NV0080_CTRL_CMD_GR_GET_CAPS_V2 + // @ref NV2080_CTRL_CMD_GR_GET_CAPS + // @ref NV2080_CTRL_CMD_GR_GET_CAPS_V2 + // + NV2080_CTRL_INTERNAL_STATIC_GR_CAPS grCaps; + + // + // @ref NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER + // @ref NV2080_CTRL_CMD_GR_GET_SM_TO_GPC_TPC_MAPPINGS + // + NV2080_CTRL_INTERNAL_STATIC_GR_GLOBAL_SM_ORDER globalSmOrder; + + // + // @ref NV2080_CTRL_CMD_GR_GET_GPC_MASK + // @ref NV2080_CTRL_CMD_GR_GET_TPC_MASK + // @ref NV2080_CTRL_CMD_GR_GET_PHYS_GPC_MASK + // + NV2080_CTRL_INTERNAL_STATIC_GR_FLOORSWEEPING_MASKS floorsweepingMasks; + + // @ref NV2080_CTRL_CMD_GR_GET_PPC_MASK + NV2080_CTRL_INTERNAL_STATIC_GR_PPC_MASKS *pPpcMasks; + + // @ref NV2080_CTRL_CMD_GR_GET_ZCULL_INFO + NV2080_CTRL_INTERNAL_STATIC_GR_ZCULL_INFO *pZcullInfo; + + // @ref NV2080_CTRL_CMD_GR_GET_ROP_INFO + NV2080_CTRL_INTERNAL_STATIC_GR_ROP_INFO *pRopInfo; + + // + // @ref NV2080_CTRL_CMD_GR_GET_ENGINE_CONTEXT_PROPERTIES + // @ref NV2080_CTRL_CMD_GR_GET_ATTRIBUTE_BUFFER_SIZE + // + NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO *pContextBuffersInfo; + + // @ref NV2080_CTRL_CMD_GR_GET_SM_ISSUE_RATE_MODIFIER + NV2080_CTRL_INTERNAL_STATIC_GR_SM_ISSUE_RATE_MODIFIER *pSmIssueRateModifier; + + // + // @ref NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE + // @ref NV2080_CTRL_CMD_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE + // + NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE fecsRecordSize; + + // + // @ref NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES + // @ref NV2080_CTRL_CMD_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES + // + NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES *pFecsTraceDefines; + + // @ref bPerSubcontextContextHeaderSupported + NV2080_CTRL_INTERNAL_STATIC_GR_PDB_PROPERTIES pdbTable; +}; + +struct KGRAPHICS_GLOBAL_CTX_BUFFERS_INFO +{ + NvU32 globalCtxBufferSize; + + GR_GLOBALCTX_BUFFERS *pGlobalCtxBuffers; + NvBool bSizeAligned[GR_GLOBALCTX_BUFFER_COUNT]; + GR_BUFFER_ATTR globalCtxAttr[GR_GLOBALCTX_BUFFER_COUNT]; + GR_BUFFER_ATTR localCtxAttr[GR_GLOBALCTX_BUFFER_COUNT]; + GR_BUFFER_ATTR vfGlobalCtxAttr[GR_GLOBALCTX_BUFFER_COUNT]; +}; + +// Opaque forward declarations +typedef struct KGRAPHICS_PRIVATE_DATA KGRAPHICS_PRIVATE_DATA; +typedef struct KGRAPHICS_FECS_TRACE_INFO KGRAPHICS_FECS_TRACE_INFO; + +/*! + * KernelGraphics is a logical abstraction of the GPU Graphics Engine. The + * Public API of the Graphics Engine is exposed through this object, and any + * interfaces which do not manage the underlying Graphics hardware can be + * managed by this object. + */ +#ifdef NVOC_KERNEL_GRAPHICS_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelGraphics { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct IntrService __nvoc_base_IntrService; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct IntrService *__nvoc_pbase_IntrService; + struct KernelGraphics *__nvoc_pbase_KernelGraphics; + NV_STATUS (*__kgraphicsConstructEngine__)(OBJGPU *, struct KernelGraphics *, ENGDESCRIPTOR); + NV_STATUS (*__kgraphicsStateInitLocked__)(OBJGPU *, struct KernelGraphics *); + NV_STATUS (*__kgraphicsStateLoad__)(OBJGPU *, struct KernelGraphics *, NvU32); + NV_STATUS (*__kgraphicsStatePreUnload__)(OBJGPU *, struct KernelGraphics *, NvU32); + NV_STATUS (*__kgraphicsStateUnload__)(OBJGPU *, struct KernelGraphics *, NvU32); + void (*__kgraphicsStateDestroy__)(OBJGPU *, struct KernelGraphics *); + NvBool (*__kgraphicsIsPresent__)(OBJGPU *, struct KernelGraphics *); + NV_STATUS (*__kgraphicsStatePostLoad__)(OBJGPU *, struct KernelGraphics *, NvU32); + void (*__kgraphicsRegisterIntrService__)(OBJGPU *, struct KernelGraphics *, IntrServiceRecord *); + NV_STATUS (*__kgraphicsServiceNotificationInterrupt__)(OBJGPU *, struct KernelGraphics *, IntrServiceServiceNotificationInterruptArguments *); + NvBool (*__kgraphicsClearInterrupt__)(OBJGPU *, struct KernelGraphics *, IntrServiceClearInterruptArguments *); + NvU32 (*__kgraphicsServiceInterrupt__)(OBJGPU *, struct KernelGraphics *, IntrServiceServiceInterruptArguments *); + NV_STATUS (*__kgraphicsReconcileTunableState__)(POBJGPU, struct KernelGraphics *, void *); + NV_STATUS (*__kgraphicsStatePreLoad__)(POBJGPU, struct KernelGraphics *, NvU32); + NV_STATUS (*__kgraphicsStatePostUnload__)(POBJGPU, struct KernelGraphics *, NvU32); + NV_STATUS (*__kgraphicsStateInitUnlocked__)(POBJGPU, struct KernelGraphics *); + void (*__kgraphicsInitMissing__)(POBJGPU, struct KernelGraphics *); + NV_STATUS (*__kgraphicsStatePreInitLocked__)(POBJGPU, struct KernelGraphics *); + NV_STATUS (*__kgraphicsStatePreInitUnlocked__)(POBJGPU, struct KernelGraphics *); + NV_STATUS (*__kgraphicsGetTunableState__)(POBJGPU, struct KernelGraphics *, void *); + NV_STATUS (*__kgraphicsCompareTunableState__)(POBJGPU, struct KernelGraphics *, void *, void *); + void (*__kgraphicsFreeTunableState__)(POBJGPU, struct KernelGraphics *, void *); + NV_STATUS (*__kgraphicsAllocTunableState__)(POBJGPU, struct KernelGraphics *, void **); + NV_STATUS (*__kgraphicsSetTunableState__)(POBJGPU, struct KernelGraphics *, void *); + NvBool bCtxswLoggingSupported; + NvBool bIntrDrivenCtxswLoggingEnabled; + NvBool bBottomHalfCtxswLoggingEnabled; + NvBool bDeferContextInit; + NvBool bPerSubcontextContextHeaderSupported; + NvBool bSetContextBuffersGPUPrivileged; + NvBool bUcodeSupportsPrivAccessMap; + NvBool bRtvCbSupported; + NvU32 instance; + KGRAPHICS_PRIVATE_DATA *pPrivate; + NvBool bCollectingDeferredStaticData; + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo; + struct KGRAPHICS_GLOBAL_CTX_BUFFERS_INFO globalCtxBuffersInfo; + struct CTX_BUF_POOL_INFO *pCtxBufPool; + CTX_BUF_INFO maxCtxBufSize[9]; + GR_BUFFER_ATTR ctxAttr[9]; +}; + +#ifndef __NVOC_CLASS_KernelGraphics_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphics_TYPEDEF__ +typedef struct KernelGraphics KernelGraphics; +#endif /* __NVOC_CLASS_KernelGraphics_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphics +#define __nvoc_class_id_KernelGraphics 0xea3fa9 +#endif /* __nvoc_class_id_KernelGraphics */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphics; + +#define __staticCast_KernelGraphics(pThis) \ + ((pThis)->__nvoc_pbase_KernelGraphics) + +#ifdef __nvoc_kernel_graphics_h_disabled +#define __dynamicCast_KernelGraphics(pThis) ((KernelGraphics*)NULL) +#else //__nvoc_kernel_graphics_h_disabled +#define __dynamicCast_KernelGraphics(pThis) \ + ((KernelGraphics*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGraphics))) +#endif //__nvoc_kernel_graphics_h_disabled + +#define PDB_PROP_KGRAPHICS_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KGRAPHICS_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelGraphics(KernelGraphics**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelGraphics(KernelGraphics**, Dynamic*, NvU32); +#define __objCreate_KernelGraphics(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelGraphics((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kgraphicsConstructEngine(arg0, arg1, arg2) kgraphicsConstructEngine_DISPATCH(arg0, arg1, arg2) +#define kgraphicsStateInitLocked(arg0, arg1) kgraphicsStateInitLocked_DISPATCH(arg0, arg1) +#define kgraphicsStateLoad(arg0, arg1, flags) kgraphicsStateLoad_DISPATCH(arg0, arg1, flags) +#define kgraphicsStatePreUnload(pGpu, arg0, flags) kgraphicsStatePreUnload_DISPATCH(pGpu, arg0, flags) +#define kgraphicsStateUnload(arg0, arg1, flags) kgraphicsStateUnload_DISPATCH(arg0, arg1, flags) +#define kgraphicsStateDestroy(arg0, arg1) kgraphicsStateDestroy_DISPATCH(arg0, arg1) +#define kgraphicsIsPresent(arg0, arg1) kgraphicsIsPresent_DISPATCH(arg0, arg1) +#define kgraphicsStatePostLoad(arg0, arg1, flags) kgraphicsStatePostLoad_DISPATCH(arg0, arg1, flags) +#define kgraphicsRegisterIntrService(arg0, arg1, arg2) kgraphicsRegisterIntrService_DISPATCH(arg0, arg1, arg2) +#define kgraphicsServiceNotificationInterrupt(arg0, arg1, arg2) kgraphicsServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2) +#define kgraphicsClearInterrupt(arg0, arg1, arg2) kgraphicsClearInterrupt_DISPATCH(arg0, arg1, arg2) +#define kgraphicsClearInterrupt_HAL(arg0, arg1, arg2) kgraphicsClearInterrupt_DISPATCH(arg0, arg1, arg2) +#define kgraphicsServiceInterrupt(arg0, arg1, arg2) kgraphicsServiceInterrupt_DISPATCH(arg0, arg1, arg2) +#define kgraphicsServiceInterrupt_HAL(arg0, arg1, arg2) kgraphicsServiceInterrupt_DISPATCH(arg0, arg1, arg2) +#define kgraphicsReconcileTunableState(pGpu, pEngstate, pTunableState) kgraphicsReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgraphicsStatePreLoad(pGpu, pEngstate, arg0) kgraphicsStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kgraphicsStatePostUnload(pGpu, pEngstate, arg0) kgraphicsStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kgraphicsStateInitUnlocked(pGpu, pEngstate) kgraphicsStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kgraphicsInitMissing(pGpu, pEngstate) kgraphicsInitMissing_DISPATCH(pGpu, pEngstate) +#define kgraphicsStatePreInitLocked(pGpu, pEngstate) kgraphicsStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kgraphicsStatePreInitUnlocked(pGpu, pEngstate) kgraphicsStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kgraphicsGetTunableState(pGpu, pEngstate, pTunableState) kgraphicsGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgraphicsCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kgraphicsCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kgraphicsFreeTunableState(pGpu, pEngstate, pTunableState) kgraphicsFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgraphicsAllocTunableState(pGpu, pEngstate, ppTunableState) kgraphicsAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kgraphicsSetTunableState(pGpu, pEngstate, pTunableState) kgraphicsSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +static inline NvBool kgraphicsShouldForceMainCtxContiguity_cbe027(OBJGPU *arg0, struct KernelGraphics *arg1) { + return ((NvBool)(0 == 0)); +} + +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NvBool kgraphicsShouldForceMainCtxContiguity(OBJGPU *arg0, struct KernelGraphics *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsShouldForceMainCtxContiguity(arg0, arg1) kgraphicsShouldForceMainCtxContiguity_cbe027(arg0, arg1) +#endif //__nvoc_kernel_graphics_h_disabled + +#define kgraphicsShouldForceMainCtxContiguity_HAL(arg0, arg1) kgraphicsShouldForceMainCtxContiguity(arg0, arg1) + +NV_STATUS kgraphicsAllocKgraphicsBuffers_KERNEL(OBJGPU *arg0, struct KernelGraphics *arg1, struct KernelGraphicsContext *arg2, struct KernelChannel *arg3); + +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsAllocKgraphicsBuffers(OBJGPU *arg0, struct KernelGraphics *arg1, struct KernelGraphicsContext *arg2, struct KernelChannel *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsAllocKgraphicsBuffers(arg0, arg1, arg2, arg3) kgraphicsAllocKgraphicsBuffers_KERNEL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_h_disabled + +#define kgraphicsAllocKgraphicsBuffers_HAL(arg0, arg1, arg2, arg3) kgraphicsAllocKgraphicsBuffers(arg0, arg1, arg2, arg3) + +NV_STATUS kgraphicsAllocGrGlobalCtxBuffers_TU102(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 gfid, struct KernelGraphicsContext *arg2); + +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsAllocGrGlobalCtxBuffers(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 gfid, struct KernelGraphicsContext *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsAllocGrGlobalCtxBuffers(arg0, arg1, gfid, arg2) kgraphicsAllocGrGlobalCtxBuffers_TU102(arg0, arg1, gfid, arg2) +#endif //__nvoc_kernel_graphics_h_disabled + +#define kgraphicsAllocGrGlobalCtxBuffers_HAL(arg0, arg1, gfid, arg2) kgraphicsAllocGrGlobalCtxBuffers(arg0, arg1, gfid, arg2) + +NV_STATUS kgraphicsAllocGlobalCtxBuffers_GP100(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 gfid); + +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsAllocGlobalCtxBuffers(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsAllocGlobalCtxBuffers(arg0, arg1, gfid) kgraphicsAllocGlobalCtxBuffers_GP100(arg0, arg1, gfid) +#endif //__nvoc_kernel_graphics_h_disabled + +#define kgraphicsAllocGlobalCtxBuffers_HAL(arg0, arg1, gfid) kgraphicsAllocGlobalCtxBuffers(arg0, arg1, gfid) + +NV_STATUS kgraphicsLoadStaticInfo_KERNEL(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 swizzId); + +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsLoadStaticInfo(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 swizzId) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsLoadStaticInfo(arg0, arg1, swizzId) kgraphicsLoadStaticInfo_KERNEL(arg0, arg1, swizzId) +#endif //__nvoc_kernel_graphics_h_disabled + +#define kgraphicsLoadStaticInfo_HAL(arg0, arg1, swizzId) kgraphicsLoadStaticInfo(arg0, arg1, swizzId) + +static inline void kgraphicsNonstallIntrCheckAndClear_b3696a(OBJGPU *arg0, struct KernelGraphics *arg1, struct THREAD_STATE_NODE *arg2) { + return; +} + +#ifdef __nvoc_kernel_graphics_h_disabled +static inline void kgraphicsNonstallIntrCheckAndClear(OBJGPU *arg0, struct KernelGraphics *arg1, struct THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsNonstallIntrCheckAndClear(arg0, arg1, arg2) kgraphicsNonstallIntrCheckAndClear_b3696a(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_h_disabled + +#define kgraphicsNonstallIntrCheckAndClear_HAL(arg0, arg1, arg2) kgraphicsNonstallIntrCheckAndClear(arg0, arg1, arg2) + +void kgraphicsInitFecsRegistryOverrides_GP100(OBJGPU *arg0, struct KernelGraphics *arg1); + +#ifdef __nvoc_kernel_graphics_h_disabled +static inline void kgraphicsInitFecsRegistryOverrides(OBJGPU *arg0, struct KernelGraphics *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsInitFecsRegistryOverrides(arg0, arg1) kgraphicsInitFecsRegistryOverrides_GP100(arg0, arg1) +#endif //__nvoc_kernel_graphics_h_disabled + +#define kgraphicsInitFecsRegistryOverrides_HAL(arg0, arg1) kgraphicsInitFecsRegistryOverrides(arg0, arg1) + +NvBool kgraphicsIsUnrestrictedAccessMapSupported_PF(OBJGPU *arg0, struct KernelGraphics *arg1); + +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NvBool kgraphicsIsUnrestrictedAccessMapSupported(OBJGPU *arg0, struct KernelGraphics *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsIsUnrestrictedAccessMapSupported(arg0, arg1) kgraphicsIsUnrestrictedAccessMapSupported_PF(arg0, arg1) +#endif //__nvoc_kernel_graphics_h_disabled + +#define kgraphicsIsUnrestrictedAccessMapSupported_HAL(arg0, arg1) kgraphicsIsUnrestrictedAccessMapSupported(arg0, arg1) + +NV_STATUS kgraphicsConstructEngine_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, ENGDESCRIPTOR arg2); + +static inline NV_STATUS kgraphicsConstructEngine_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, ENGDESCRIPTOR arg2) { + return arg1->__kgraphicsConstructEngine__(arg0, arg1, arg2); +} + +NV_STATUS kgraphicsStateInitLocked_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1); + +static inline NV_STATUS kgraphicsStateInitLocked_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1) { + return arg1->__kgraphicsStateInitLocked__(arg0, arg1); +} + +NV_STATUS kgraphicsStateLoad_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 flags); + +static inline NV_STATUS kgraphicsStateLoad_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 flags) { + return arg1->__kgraphicsStateLoad__(arg0, arg1, flags); +} + +NV_STATUS kgraphicsStatePreUnload_IMPL(OBJGPU *pGpu, struct KernelGraphics *arg0, NvU32 flags); + +static inline NV_STATUS kgraphicsStatePreUnload_DISPATCH(OBJGPU *pGpu, struct KernelGraphics *arg0, NvU32 flags) { + return arg0->__kgraphicsStatePreUnload__(pGpu, arg0, flags); +} + +NV_STATUS kgraphicsStateUnload_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 flags); + +static inline NV_STATUS kgraphicsStateUnload_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 flags) { + return arg1->__kgraphicsStateUnload__(arg0, arg1, flags); +} + +void kgraphicsStateDestroy_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1); + +static inline void kgraphicsStateDestroy_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1) { + arg1->__kgraphicsStateDestroy__(arg0, arg1); +} + +NvBool kgraphicsIsPresent_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1); + +static inline NvBool kgraphicsIsPresent_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1) { + return arg1->__kgraphicsIsPresent__(arg0, arg1); +} + +NV_STATUS kgraphicsStatePostLoad_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 flags); + +static inline NV_STATUS kgraphicsStatePostLoad_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 flags) { + return arg1->__kgraphicsStatePostLoad__(arg0, arg1, flags); +} + +void kgraphicsRegisterIntrService_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, IntrServiceRecord arg2[155]); + +static inline void kgraphicsRegisterIntrService_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, IntrServiceRecord arg2[155]) { + arg1->__kgraphicsRegisterIntrService__(arg0, arg1, arg2); +} + +NV_STATUS kgraphicsServiceNotificationInterrupt_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, IntrServiceServiceNotificationInterruptArguments *arg2); + +static inline NV_STATUS kgraphicsServiceNotificationInterrupt_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) { + return arg1->__kgraphicsServiceNotificationInterrupt__(arg0, arg1, arg2); +} + +NvBool kgraphicsClearInterrupt_GP100(OBJGPU *arg0, struct KernelGraphics *arg1, IntrServiceClearInterruptArguments *arg2); + +static inline NvBool kgraphicsClearInterrupt_5baef9(OBJGPU *arg0, struct KernelGraphics *arg1, IntrServiceClearInterruptArguments *arg2) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NvBool kgraphicsClearInterrupt_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, IntrServiceClearInterruptArguments *arg2) { + return arg1->__kgraphicsClearInterrupt__(arg0, arg1, arg2); +} + +NvU32 kgraphicsServiceInterrupt_GP100(OBJGPU *arg0, struct KernelGraphics *arg1, IntrServiceServiceInterruptArguments *arg2); + +static inline NvU32 kgraphicsServiceInterrupt_5baef9(OBJGPU *arg0, struct KernelGraphics *arg1, IntrServiceServiceInterruptArguments *arg2) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NvU32 kgraphicsServiceInterrupt_DISPATCH(OBJGPU *arg0, struct KernelGraphics *arg1, IntrServiceServiceInterruptArguments *arg2) { + return arg1->__kgraphicsServiceInterrupt__(arg0, arg1, arg2); +} + +static inline NV_STATUS kgraphicsReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate, void *pTunableState) { + return pEngstate->__kgraphicsReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgraphicsStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate, NvU32 arg0) { + return pEngstate->__kgraphicsStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgraphicsStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate, NvU32 arg0) { + return pEngstate->__kgraphicsStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgraphicsStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate) { + return pEngstate->__kgraphicsStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kgraphicsInitMissing_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate) { + pEngstate->__kgraphicsInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kgraphicsStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate) { + return pEngstate->__kgraphicsStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kgraphicsStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate) { + return pEngstate->__kgraphicsStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kgraphicsGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate, void *pTunableState) { + return pEngstate->__kgraphicsGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgraphicsCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kgraphicsCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kgraphicsFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate, void *pTunableState) { + pEngstate->__kgraphicsFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgraphicsAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate, void **ppTunableState) { + return pEngstate->__kgraphicsAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kgraphicsSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelGraphics *pEngstate, void *pTunableState) { + return pEngstate->__kgraphicsSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline KGRAPHICS_FECS_TRACE_INFO *kgraphicsGetFecsTraceInfo(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) { + return pKernelGraphics->pFecsTraceInfo; +} + +static inline NvU32 kgraphicsGetInstance(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) { + return pKernelGraphics->instance; +} + +static inline NvBool kgraphicsIsCtxswLoggingSupported(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) { + return pKernelGraphics->bCtxswLoggingSupported; +} + +static inline void kgraphicsSetCtxswLoggingSupported(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics, NvBool bProp) { + pKernelGraphics->bCtxswLoggingSupported = bProp; +} + +static inline NvBool kgraphicsIsIntrDrivenCtxswLoggingEnabled(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) { + return pKernelGraphics->bIntrDrivenCtxswLoggingEnabled; +} + +static inline void kgraphicsSetIntrDrivenCtxswLoggingEnabled(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics, NvBool bProp) { + pKernelGraphics->bIntrDrivenCtxswLoggingEnabled = bProp; +} + +static inline NvBool kgraphicsIsBottomHalfCtxswLoggingEnabled(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) { + return pKernelGraphics->bBottomHalfCtxswLoggingEnabled; +} + +static inline void kgraphicsSetBottomHalfCtxswLoggingEnabled(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics, NvBool bProp) { + pKernelGraphics->bBottomHalfCtxswLoggingEnabled = bProp; +} + +static inline NvBool kgraphicsShouldDeferContextInit(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) { + return pKernelGraphics->bDeferContextInit; +} + +static inline NvBool kgraphicsIsPerSubcontextContextHeaderSupported(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) { + return pKernelGraphics->bPerSubcontextContextHeaderSupported; +} + +static inline void kgraphicsSetPerSubcontextContextHeaderSupported(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics, NvBool bProp) { + pKernelGraphics->bPerSubcontextContextHeaderSupported = bProp; +} + +static inline NvBool kgraphicsShouldSetContextBuffersGPUPrivileged(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) { + return pKernelGraphics->bSetContextBuffersGPUPrivileged; +} + +static inline NvBool kgraphicsDoesUcodeSupportPrivAccessMap(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) { + return pKernelGraphics->bUcodeSupportsPrivAccessMap; +} + +static inline NvBool kgraphicsIsRtvCbSupported(OBJGPU *pGpu, struct KernelGraphics *pKernelGraphics) { + return pKernelGraphics->bRtvCbSupported; +} + +void kgraphicsDestruct_IMPL(struct KernelGraphics *arg0); +#define __nvoc_kgraphicsDestruct(arg0) kgraphicsDestruct_IMPL(arg0) +void kgraphicsInvalidateStaticInfo_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline void kgraphicsInvalidateStaticInfo(OBJGPU *arg0, struct KernelGraphics *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsInvalidateStaticInfo(arg0, arg1) kgraphicsInvalidateStaticInfo_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_h_disabled + +const CTX_BUF_INFO *kgraphicsGetCtxBufferInfo_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, GR_CTX_BUFFER arg2); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline const CTX_BUF_INFO *kgraphicsGetCtxBufferInfo(OBJGPU *arg0, struct KernelGraphics *arg1, GR_CTX_BUFFER arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NULL; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsGetCtxBufferInfo(arg0, arg1, arg2) kgraphicsGetCtxBufferInfo_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_h_disabled + +void kgraphicsSetCtxBufferInfo_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, GR_CTX_BUFFER arg2, NvU64 size, NvU64 align, RM_ATTR_PAGE_SIZE attr, NvBool bContiguous); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline void kgraphicsSetCtxBufferInfo(OBJGPU *arg0, struct KernelGraphics *arg1, GR_CTX_BUFFER arg2, NvU64 size, NvU64 align, RM_ATTR_PAGE_SIZE attr, NvBool bContiguous) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsSetCtxBufferInfo(arg0, arg1, arg2, size, align, attr, bContiguous) kgraphicsSetCtxBufferInfo_IMPL(arg0, arg1, arg2, size, align, attr, bContiguous) +#endif //__nvoc_kernel_graphics_h_disabled + +void kgraphicsClearCtxBufferInfo_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline void kgraphicsClearCtxBufferInfo(OBJGPU *arg0, struct KernelGraphics *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsClearCtxBufferInfo(arg0, arg1) kgraphicsClearCtxBufferInfo_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_h_disabled + +NV_STATUS kgraphicsInitCtxBufPool_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, struct Heap *arg2); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsInitCtxBufPool(OBJGPU *arg0, struct KernelGraphics *arg1, struct Heap *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsInitCtxBufPool(arg0, arg1, arg2) kgraphicsInitCtxBufPool_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_h_disabled + +struct CTX_BUF_POOL_INFO *kgraphicsGetCtxBufPool_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline struct CTX_BUF_POOL_INFO *kgraphicsGetCtxBufPool(OBJGPU *arg0, struct KernelGraphics *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NULL; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsGetCtxBufPool(arg0, arg1) kgraphicsGetCtxBufPool_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_h_disabled + +void kgraphicsDestroyCtxBufPool_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline void kgraphicsDestroyCtxBufPool(OBJGPU *arg0, struct KernelGraphics *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsDestroyCtxBufPool(arg0, arg1) kgraphicsDestroyCtxBufPool_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_h_disabled + +GR_GLOBALCTX_BUFFERS *kgraphicsGetGlobalCtxBuffers_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 gfid); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline GR_GLOBALCTX_BUFFERS *kgraphicsGetGlobalCtxBuffers(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NULL; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsGetGlobalCtxBuffers(arg0, arg1, gfid) kgraphicsGetGlobalCtxBuffers_IMPL(arg0, arg1, gfid) +#endif //__nvoc_kernel_graphics_h_disabled + +NvBool kgraphicsIsGlobalCtxBufferSizeAligned_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, GR_GLOBALCTX_BUFFER arg2); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NvBool kgraphicsIsGlobalCtxBufferSizeAligned(OBJGPU *arg0, struct KernelGraphics *arg1, GR_GLOBALCTX_BUFFER arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsIsGlobalCtxBufferSizeAligned(arg0, arg1, arg2) kgraphicsIsGlobalCtxBufferSizeAligned_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_h_disabled + +const GR_BUFFER_ATTR *kgraphicsGetGlobalPrivAccessMapAttr_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline const GR_BUFFER_ATTR *kgraphicsGetGlobalPrivAccessMapAttr(OBJGPU *arg0, struct KernelGraphics *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NULL; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsGetGlobalPrivAccessMapAttr(arg0, arg1) kgraphicsGetGlobalPrivAccessMapAttr_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_h_disabled + +NV_STATUS kgraphicsMapCtxBuffer_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, MEMORY_DESCRIPTOR *arg2, struct OBJVASPACE *arg3, VA_LIST *arg4, NvBool bAlignSize, NvBool bIsReadOnly); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsMapCtxBuffer(OBJGPU *arg0, struct KernelGraphics *arg1, MEMORY_DESCRIPTOR *arg2, struct OBJVASPACE *arg3, VA_LIST *arg4, NvBool bAlignSize, NvBool bIsReadOnly) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsMapCtxBuffer(arg0, arg1, arg2, arg3, arg4, bAlignSize, bIsReadOnly) kgraphicsMapCtxBuffer_IMPL(arg0, arg1, arg2, arg3, arg4, bAlignSize, bIsReadOnly) +#endif //__nvoc_kernel_graphics_h_disabled + +void kgraphicsUnmapCtxBuffer_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, struct OBJVASPACE *arg2, VA_LIST *arg3); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline void kgraphicsUnmapCtxBuffer(OBJGPU *arg0, struct KernelGraphics *arg1, struct OBJVASPACE *arg2, VA_LIST *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsUnmapCtxBuffer(arg0, arg1, arg2, arg3) kgraphicsUnmapCtxBuffer_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_graphics_h_disabled + +void kgraphicsFreeGlobalCtxBuffers_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 gfid); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline void kgraphicsFreeGlobalCtxBuffers(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsFreeGlobalCtxBuffers(arg0, arg1, gfid) kgraphicsFreeGlobalCtxBuffers_IMPL(arg0, arg1, gfid) +#endif //__nvoc_kernel_graphics_h_disabled + +NV_STATUS kgraphicsGetMainCtxBufferSize_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, NvBool bIncludeSubctxHdrs, NvU32 *pSize); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsGetMainCtxBufferSize(OBJGPU *arg0, struct KernelGraphics *arg1, NvBool bIncludeSubctxHdrs, NvU32 *pSize) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsGetMainCtxBufferSize(arg0, arg1, bIncludeSubctxHdrs, pSize) kgraphicsGetMainCtxBufferSize_IMPL(arg0, arg1, bIncludeSubctxHdrs, pSize) +#endif //__nvoc_kernel_graphics_h_disabled + +NV_STATUS kgraphicsGetClassByType_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 objectType, NvU32 *pClass); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsGetClassByType(OBJGPU *arg0, struct KernelGraphics *arg1, NvU32 objectType, NvU32 *pClass) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsGetClassByType(arg0, arg1, objectType, pClass) kgraphicsGetClassByType_IMPL(arg0, arg1, objectType, pClass) +#endif //__nvoc_kernel_graphics_h_disabled + +const GR_BUFFER_ATTR *kgraphicsGetContextBufferAttr_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, GR_CTX_BUFFER arg2); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline const GR_BUFFER_ATTR *kgraphicsGetContextBufferAttr(OBJGPU *arg0, struct KernelGraphics *arg1, GR_CTX_BUFFER arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NULL; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsGetContextBufferAttr(arg0, arg1, arg2) kgraphicsGetContextBufferAttr_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_h_disabled + +NV_STATUS kgraphicsCreateGoldenImageChannel_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsCreateGoldenImageChannel(OBJGPU *arg0, struct KernelGraphics *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsCreateGoldenImageChannel(arg0, arg1) kgraphicsCreateGoldenImageChannel_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_h_disabled + +NV_STATUS kgraphicsInitializeDeferredStaticData_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, NvHandle hClient, NvHandle hSubdevice); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsInitializeDeferredStaticData(OBJGPU *arg0, struct KernelGraphics *arg1, NvHandle hClient, NvHandle hSubdevice) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsInitializeDeferredStaticData(arg0, arg1, hClient, hSubdevice) kgraphicsInitializeDeferredStaticData_IMPL(arg0, arg1, hClient, hSubdevice) +#endif //__nvoc_kernel_graphics_h_disabled + +const struct KGRAPHICS_STATIC_INFO *kgraphicsGetStaticInfo_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline const struct KGRAPHICS_STATIC_INFO *kgraphicsGetStaticInfo(OBJGPU *arg0, struct KernelGraphics *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NULL; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsGetStaticInfo(arg0, arg1) kgraphicsGetStaticInfo_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_h_disabled + +NV_STATUS kgraphicsGetCaps_IMPL(OBJGPU *arg0, struct KernelGraphics *arg1, NvU8 *pGrCaps); +#ifdef __nvoc_kernel_graphics_h_disabled +static inline NV_STATUS kgraphicsGetCaps(OBJGPU *arg0, struct KernelGraphics *arg1, NvU8 *pGrCaps) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphics was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_h_disabled +#define kgraphicsGetCaps(arg0, arg1, pGrCaps) kgraphicsGetCaps_IMPL(arg0, arg1, pGrCaps) +#endif //__nvoc_kernel_graphics_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _KERNEL_GRAPHICS_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_GRAPHICS_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_graphics_object_nvoc.c b/src/nvidia/generated/g_kernel_graphics_object_nvoc.c new file mode 100644 index 000000000..56f6eb4ec --- /dev/null +++ b/src/nvidia/generated/g_kernel_graphics_object_nvoc.c @@ -0,0 +1,425 @@ +#define NVOC_KERNEL_GRAPHICS_OBJECT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_graphics_object_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x097648 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsObject; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_KernelGraphicsObject(KernelGraphicsObject*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelGraphicsObject(KernelGraphicsObject*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelGraphicsObject(KernelGraphicsObject*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_KernelGraphicsObject(KernelGraphicsObject*, RmHalspecOwner* ); +void __nvoc_dtor_KernelGraphicsObject(KernelGraphicsObject*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGraphicsObject; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsObject_KernelGraphicsObject = { + /*pClassDef=*/ &__nvoc_class_def_KernelGraphicsObject, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelGraphicsObject, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsObject_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsObject_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsObject_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsObject_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsObject_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsObject_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsObject, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsObject_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsObject, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGraphicsObject_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGraphicsObject, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelGraphicsObject = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_KernelGraphicsObject_KernelGraphicsObject, + &__nvoc_rtti_KernelGraphicsObject_ChannelDescendant, + &__nvoc_rtti_KernelGraphicsObject_Notifier, + &__nvoc_rtti_KernelGraphicsObject_INotifier, + &__nvoc_rtti_KernelGraphicsObject_GpuResource, + &__nvoc_rtti_KernelGraphicsObject_RmResource, + &__nvoc_rtti_KernelGraphicsObject_RmResourceCommon, + &__nvoc_rtti_KernelGraphicsObject_RsResource, + &__nvoc_rtti_KernelGraphicsObject_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsObject = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelGraphicsObject), + /*classId=*/ classId(KernelGraphicsObject), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelGraphicsObject", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelGraphicsObject, + /*pCastInfo=*/ &__nvoc_castinfo_KernelGraphicsObject, + /*pExportInfo=*/ &__nvoc_export_info_KernelGraphicsObject +}; + +static NV_STATUS __nvoc_thunk_KernelGraphicsObject_rmresGetMemInterMapParams(struct RmResource *arg0, RMRES_MEM_INTER_MAP_PARAMS *arg1) { + return kgrobjGetMemInterMapParams((struct KernelGraphicsObject *)(((unsigned char *)arg0) - __nvoc_rtti_KernelGraphicsObject_RmResource.offset), arg1); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_kgrobjCheckMemInterUnmap(struct KernelGraphicsObject *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_KernelGraphicsObject_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_kgrobjShareCallback(struct KernelGraphicsObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsObject_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_kgrobjAccessCallback(struct KernelGraphicsObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_kgrobjMapTo(struct KernelGraphicsObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrobjGetMapAddrSpace(struct KernelGraphicsObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsObject_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_kgrobjSetNotificationShare(struct KernelGraphicsObject *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelGraphicsObject_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_kgrobjGetRefCount(struct KernelGraphicsObject *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_kgrobjAddAdditionalDependants(struct RsClient *pClient, struct KernelGraphicsObject *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_kgrobjControl_Prologue(struct KernelGraphicsObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrobjGetRegBaseOffsetAndSize(struct KernelGraphicsObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsObject_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrobjInternalControlForward(struct KernelGraphicsObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsObject_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_kgrobjUnmapFrom(struct KernelGraphicsObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_kgrobjControl_Epilogue(struct KernelGraphicsObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_kgrobjControlLookup(struct KernelGraphicsObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_kgrobjGetSwMethods(struct KernelGraphicsObject *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return chandesGetSwMethods((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_KernelGraphicsObject_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NvHandle __nvoc_thunk_GpuResource_kgrobjGetInternalObjectHandle(struct KernelGraphicsObject *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsObject_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrobjControl(struct KernelGraphicsObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsObject_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrobjUnmap(struct KernelGraphicsObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsObject_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_kgrobjGetMemoryMappingDescriptor(struct KernelGraphicsObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelGraphicsObject_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_ChannelDescendant_kgrobjIsSwMethodStalling(struct KernelGraphicsObject *pChannelDescendant, NvU32 hHandle) { + return chandesIsSwMethodStalling((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_KernelGraphicsObject_ChannelDescendant.offset), hHandle); +} + +static NV_STATUS __nvoc_thunk_RsResource_kgrobjControlFilter(struct KernelGraphicsObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_kgrobjUnregisterEvent(struct KernelGraphicsObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelGraphicsObject_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_kgrobjCanCopy(struct KernelGraphicsObject *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_kgrobjPreDestruct(struct KernelGraphicsObject *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelGraphicsObject_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_kgrobjGetNotificationListPtr(struct KernelGraphicsObject *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelGraphicsObject_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_kgrobjGetNotificationShare(struct KernelGraphicsObject *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelGraphicsObject_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_kgrobjMap(struct KernelGraphicsObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelGraphicsObject_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_kgrobjGetOrAllocNotifShare(struct KernelGraphicsObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelGraphicsObject_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGraphicsObject = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_KernelGraphicsObject(KernelGraphicsObject *pThis) { + __nvoc_kgrobjDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelGraphicsObject(KernelGraphicsObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, PARAM_TO_ENGDESC_FUNCTION *); +NV_STATUS __nvoc_ctor_KernelGraphicsObject(KernelGraphicsObject *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, ((void *)0)); + if (status != NV_OK) goto __nvoc_ctor_KernelGraphicsObject_fail_ChannelDescendant; + __nvoc_init_dataField_KernelGraphicsObject(pThis, pRmhalspecowner); + + status = __nvoc_kgrobjConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelGraphicsObject_fail__init; + goto __nvoc_ctor_KernelGraphicsObject_exit; // Success + +__nvoc_ctor_KernelGraphicsObject_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_KernelGraphicsObject_fail_ChannelDescendant: +__nvoc_ctor_KernelGraphicsObject_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelGraphicsObject_1(KernelGraphicsObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__kgrobjGetMemInterMapParams__ = &kgrobjGetMemInterMapParams_IMPL; + + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__rmresGetMemInterMapParams__ = &__nvoc_thunk_KernelGraphicsObject_rmresGetMemInterMapParams; + + pThis->__kgrobjCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_kgrobjCheckMemInterUnmap; + + pThis->__kgrobjShareCallback__ = &__nvoc_thunk_GpuResource_kgrobjShareCallback; + + pThis->__kgrobjAccessCallback__ = &__nvoc_thunk_RmResource_kgrobjAccessCallback; + + pThis->__kgrobjMapTo__ = &__nvoc_thunk_RsResource_kgrobjMapTo; + + pThis->__kgrobjGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_kgrobjGetMapAddrSpace; + + pThis->__kgrobjSetNotificationShare__ = &__nvoc_thunk_Notifier_kgrobjSetNotificationShare; + + pThis->__kgrobjGetRefCount__ = &__nvoc_thunk_RsResource_kgrobjGetRefCount; + + pThis->__kgrobjAddAdditionalDependants__ = &__nvoc_thunk_RsResource_kgrobjAddAdditionalDependants; + + pThis->__kgrobjControl_Prologue__ = &__nvoc_thunk_RmResource_kgrobjControl_Prologue; + + pThis->__kgrobjGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_kgrobjGetRegBaseOffsetAndSize; + + pThis->__kgrobjInternalControlForward__ = &__nvoc_thunk_GpuResource_kgrobjInternalControlForward; + + pThis->__kgrobjUnmapFrom__ = &__nvoc_thunk_RsResource_kgrobjUnmapFrom; + + pThis->__kgrobjControl_Epilogue__ = &__nvoc_thunk_RmResource_kgrobjControl_Epilogue; + + pThis->__kgrobjControlLookup__ = &__nvoc_thunk_RsResource_kgrobjControlLookup; + + pThis->__kgrobjGetSwMethods__ = &__nvoc_thunk_ChannelDescendant_kgrobjGetSwMethods; + + pThis->__kgrobjGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_kgrobjGetInternalObjectHandle; + + pThis->__kgrobjControl__ = &__nvoc_thunk_GpuResource_kgrobjControl; + + pThis->__kgrobjUnmap__ = &__nvoc_thunk_GpuResource_kgrobjUnmap; + + pThis->__kgrobjGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_kgrobjGetMemoryMappingDescriptor; + + pThis->__kgrobjIsSwMethodStalling__ = &__nvoc_thunk_ChannelDescendant_kgrobjIsSwMethodStalling; + + pThis->__kgrobjControlFilter__ = &__nvoc_thunk_RsResource_kgrobjControlFilter; + + pThis->__kgrobjUnregisterEvent__ = &__nvoc_thunk_Notifier_kgrobjUnregisterEvent; + + pThis->__kgrobjCanCopy__ = &__nvoc_thunk_RsResource_kgrobjCanCopy; + + pThis->__kgrobjPreDestruct__ = &__nvoc_thunk_RsResource_kgrobjPreDestruct; + + pThis->__kgrobjGetNotificationListPtr__ = &__nvoc_thunk_Notifier_kgrobjGetNotificationListPtr; + + pThis->__kgrobjGetNotificationShare__ = &__nvoc_thunk_Notifier_kgrobjGetNotificationShare; + + pThis->__kgrobjMap__ = &__nvoc_thunk_GpuResource_kgrobjMap; + + pThis->__kgrobjGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_kgrobjGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_KernelGraphicsObject(KernelGraphicsObject *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelGraphicsObject_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_KernelGraphicsObject(KernelGraphicsObject *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelGraphicsObject = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_KernelGraphicsObject(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelGraphicsObject(KernelGraphicsObject **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + KernelGraphicsObject *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelGraphicsObject)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelGraphicsObject)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelGraphicsObject); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelGraphicsObject(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelGraphicsObject(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_KernelGraphicsObject_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelGraphicsObject_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelGraphicsObject(KernelGraphicsObject **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_KernelGraphicsObject(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_graphics_object_nvoc.h b/src/nvidia/generated/g_kernel_graphics_object_nvoc.h new file mode 100644 index 000000000..f72afc321 --- /dev/null +++ b/src/nvidia/generated/g_kernel_graphics_object_nvoc.h @@ -0,0 +1,360 @@ +#ifndef _G_KERNEL_GRAPHICS_OBJECT_NVOC_H_ +#define _G_KERNEL_GRAPHICS_OBJECT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_graphics_object_nvoc.h" + +#ifndef _KERNEL_GRAPHICS_OBJECT_H_ +#define _KERNEL_GRAPHICS_OBJECT_H_ + +#include "core/core.h" +#include "kernel/gpu/fifo/channel_descendant.h" +#include "ctrl/ctrl83de/ctrl83dedebug.h" +#include "mem_mgr/vaddr_list.h" +#include "utils/nv_enum.h" +#include "kernel/gpu/gr/kernel_graphics_context.h" + +struct KernelSMDebuggerSession; + +#ifndef __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ +#define __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ +typedef struct KernelSMDebuggerSession KernelSMDebuggerSession; +#endif /* __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSMDebuggerSession +#define __nvoc_class_id_KernelSMDebuggerSession 0x4adc81 +#endif /* __nvoc_class_id_KernelSMDebuggerSession */ + + +MAKE_LIST(KernelSMDebuggerSessionList, KernelSMDebuggerSession *); + +/*! + * RM internal class representing 3D and compute graphics classes, e.g.: _A, + * _COMPUTE_A, etc + */ +#ifdef NVOC_KERNEL_GRAPHICS_OBJECT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelGraphicsObject { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct KernelGraphicsObject *__nvoc_pbase_KernelGraphicsObject; + NV_STATUS (*__kgrobjGetMemInterMapParams__)(struct KernelGraphicsObject *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__kgrobjCheckMemInterUnmap__)(struct KernelGraphicsObject *, NvBool); + NvBool (*__kgrobjShareCallback__)(struct KernelGraphicsObject *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__kgrobjAccessCallback__)(struct KernelGraphicsObject *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__kgrobjMapTo__)(struct KernelGraphicsObject *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__kgrobjGetMapAddrSpace__)(struct KernelGraphicsObject *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__kgrobjSetNotificationShare__)(struct KernelGraphicsObject *, struct NotifShare *); + NvU32 (*__kgrobjGetRefCount__)(struct KernelGraphicsObject *); + void (*__kgrobjAddAdditionalDependants__)(struct RsClient *, struct KernelGraphicsObject *, RsResourceRef *); + NV_STATUS (*__kgrobjControl_Prologue__)(struct KernelGraphicsObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kgrobjGetRegBaseOffsetAndSize__)(struct KernelGraphicsObject *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__kgrobjInternalControlForward__)(struct KernelGraphicsObject *, NvU32, void *, NvU32); + NV_STATUS (*__kgrobjUnmapFrom__)(struct KernelGraphicsObject *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__kgrobjControl_Epilogue__)(struct KernelGraphicsObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kgrobjControlLookup__)(struct KernelGraphicsObject *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__kgrobjGetSwMethods__)(struct KernelGraphicsObject *, METHOD **, NvU32 *); + NvHandle (*__kgrobjGetInternalObjectHandle__)(struct KernelGraphicsObject *); + NV_STATUS (*__kgrobjControl__)(struct KernelGraphicsObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kgrobjUnmap__)(struct KernelGraphicsObject *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__kgrobjGetMemoryMappingDescriptor__)(struct KernelGraphicsObject *, struct MEMORY_DESCRIPTOR **); + NvBool (*__kgrobjIsSwMethodStalling__)(struct KernelGraphicsObject *, NvU32); + NV_STATUS (*__kgrobjControlFilter__)(struct KernelGraphicsObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__kgrobjUnregisterEvent__)(struct KernelGraphicsObject *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__kgrobjCanCopy__)(struct KernelGraphicsObject *); + void (*__kgrobjPreDestruct__)(struct KernelGraphicsObject *); + PEVENTNOTIFICATION *(*__kgrobjGetNotificationListPtr__)(struct KernelGraphicsObject *); + struct NotifShare *(*__kgrobjGetNotificationShare__)(struct KernelGraphicsObject *); + NV_STATUS (*__kgrobjMap__)(struct KernelGraphicsObject *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__kgrobjGetOrAllocNotifShare__)(struct KernelGraphicsObject *, NvHandle, NvHandle, struct NotifShare **); + MEMORY_DESCRIPTOR *pMmioMemDesc; + KernelSMDebuggerSessionList activeDebuggers; + struct KernelGraphicsContext *pKernelGraphicsContext; +}; + +#ifndef __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ +typedef struct KernelGraphicsObject KernelGraphicsObject; +#endif /* __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsObject +#define __nvoc_class_id_KernelGraphicsObject 0x097648 +#endif /* __nvoc_class_id_KernelGraphicsObject */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGraphicsObject; + +#define __staticCast_KernelGraphicsObject(pThis) \ + ((pThis)->__nvoc_pbase_KernelGraphicsObject) + +#ifdef __nvoc_kernel_graphics_object_h_disabled +#define __dynamicCast_KernelGraphicsObject(pThis) ((KernelGraphicsObject*)NULL) +#else //__nvoc_kernel_graphics_object_h_disabled +#define __dynamicCast_KernelGraphicsObject(pThis) \ + ((KernelGraphicsObject*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGraphicsObject))) +#endif //__nvoc_kernel_graphics_object_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelGraphicsObject(KernelGraphicsObject**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelGraphicsObject(KernelGraphicsObject**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_KernelGraphicsObject(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_KernelGraphicsObject((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define kgrobjGetMemInterMapParams(arg0, arg1) kgrobjGetMemInterMapParams_DISPATCH(arg0, arg1) +#define kgrobjCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) kgrobjCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define kgrobjShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) kgrobjShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define kgrobjAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) kgrobjAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define kgrobjMapTo(pResource, pParams) kgrobjMapTo_DISPATCH(pResource, pParams) +#define kgrobjGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) kgrobjGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define kgrobjSetNotificationShare(pNotifier, pNotifShare) kgrobjSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define kgrobjGetRefCount(pResource) kgrobjGetRefCount_DISPATCH(pResource) +#define kgrobjAddAdditionalDependants(pClient, pResource, pReference) kgrobjAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define kgrobjControl_Prologue(pResource, pCallContext, pParams) kgrobjControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define kgrobjGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) kgrobjGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define kgrobjInternalControlForward(pGpuResource, command, pParams, size) kgrobjInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define kgrobjUnmapFrom(pResource, pParams) kgrobjUnmapFrom_DISPATCH(pResource, pParams) +#define kgrobjControl_Epilogue(pResource, pCallContext, pParams) kgrobjControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define kgrobjControlLookup(pResource, pParams, ppEntry) kgrobjControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define kgrobjGetSwMethods(pChannelDescendant, ppMethods, pNumMethods) kgrobjGetSwMethods_DISPATCH(pChannelDescendant, ppMethods, pNumMethods) +#define kgrobjGetInternalObjectHandle(pGpuResource) kgrobjGetInternalObjectHandle_DISPATCH(pGpuResource) +#define kgrobjControl(pGpuResource, pCallContext, pParams) kgrobjControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define kgrobjUnmap(pGpuResource, pCallContext, pCpuMapping) kgrobjUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define kgrobjGetMemoryMappingDescriptor(pRmResource, ppMemDesc) kgrobjGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define kgrobjIsSwMethodStalling(pChannelDescendant, hHandle) kgrobjIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define kgrobjControlFilter(pResource, pCallContext, pParams) kgrobjControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define kgrobjUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) kgrobjUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define kgrobjCanCopy(pResource) kgrobjCanCopy_DISPATCH(pResource) +#define kgrobjPreDestruct(pResource) kgrobjPreDestruct_DISPATCH(pResource) +#define kgrobjGetNotificationListPtr(pNotifier) kgrobjGetNotificationListPtr_DISPATCH(pNotifier) +#define kgrobjGetNotificationShare(pNotifier) kgrobjGetNotificationShare_DISPATCH(pNotifier) +#define kgrobjMap(pGpuResource, pCallContext, pParams, pCpuMapping) kgrobjMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define kgrobjGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) kgrobjGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +void kgrobjGetPromoteIds_FWCLIENT(struct OBJGPU *arg0, struct KernelGraphicsObject *arg1, NvU32 maxPromoteIds, NvU32 *pPromoteIds, NvU32 *pNumEntries, NvBool *pbPromote); + +#ifdef __nvoc_kernel_graphics_object_h_disabled +static inline void kgrobjGetPromoteIds(struct OBJGPU *arg0, struct KernelGraphicsObject *arg1, NvU32 maxPromoteIds, NvU32 *pPromoteIds, NvU32 *pNumEntries, NvBool *pbPromote) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsObject was disabled!"); +} +#else //__nvoc_kernel_graphics_object_h_disabled +#define kgrobjGetPromoteIds(arg0, arg1, maxPromoteIds, pPromoteIds, pNumEntries, pbPromote) kgrobjGetPromoteIds_FWCLIENT(arg0, arg1, maxPromoteIds, pPromoteIds, pNumEntries, pbPromote) +#endif //__nvoc_kernel_graphics_object_h_disabled + +#define kgrobjGetPromoteIds_HAL(arg0, arg1, maxPromoteIds, pPromoteIds, pNumEntries, pbPromote) kgrobjGetPromoteIds(arg0, arg1, maxPromoteIds, pPromoteIds, pNumEntries, pbPromote) + +NvBool kgrobjShouldCleanup_KERNEL(struct OBJGPU *arg0, struct KernelGraphicsObject *arg1); + +#ifdef __nvoc_kernel_graphics_object_h_disabled +static inline NvBool kgrobjShouldCleanup(struct OBJGPU *arg0, struct KernelGraphicsObject *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsObject was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_graphics_object_h_disabled +#define kgrobjShouldCleanup(arg0, arg1) kgrobjShouldCleanup_KERNEL(arg0, arg1) +#endif //__nvoc_kernel_graphics_object_h_disabled + +#define kgrobjShouldCleanup_HAL(arg0, arg1) kgrobjShouldCleanup(arg0, arg1) + +NV_STATUS kgrobjSetComputeMmio_IMPL(struct OBJGPU *arg0, struct KernelGraphicsObject *arg1); + +#ifdef __nvoc_kernel_graphics_object_h_disabled +static inline NV_STATUS kgrobjSetComputeMmio(struct OBJGPU *arg0, struct KernelGraphicsObject *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsObject was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_object_h_disabled +#define kgrobjSetComputeMmio(arg0, arg1) kgrobjSetComputeMmio_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_object_h_disabled + +#define kgrobjSetComputeMmio_HAL(arg0, arg1) kgrobjSetComputeMmio(arg0, arg1) + +void kgrobjFreeComputeMmio_IMPL(struct OBJGPU *arg0, struct KernelGraphicsObject *arg1); + +#ifdef __nvoc_kernel_graphics_object_h_disabled +static inline void kgrobjFreeComputeMmio(struct OBJGPU *arg0, struct KernelGraphicsObject *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsObject was disabled!"); +} +#else //__nvoc_kernel_graphics_object_h_disabled +#define kgrobjFreeComputeMmio(arg0, arg1) kgrobjFreeComputeMmio_IMPL(arg0, arg1) +#endif //__nvoc_kernel_graphics_object_h_disabled + +#define kgrobjFreeComputeMmio_HAL(arg0, arg1) kgrobjFreeComputeMmio(arg0, arg1) + +NV_STATUS kgrobjGetMemInterMapParams_IMPL(struct KernelGraphicsObject *arg0, RMRES_MEM_INTER_MAP_PARAMS *arg1); + +static inline NV_STATUS kgrobjGetMemInterMapParams_DISPATCH(struct KernelGraphicsObject *arg0, RMRES_MEM_INTER_MAP_PARAMS *arg1) { + return arg0->__kgrobjGetMemInterMapParams__(arg0, arg1); +} + +static inline NV_STATUS kgrobjCheckMemInterUnmap_DISPATCH(struct KernelGraphicsObject *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__kgrobjCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool kgrobjShareCallback_DISPATCH(struct KernelGraphicsObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__kgrobjShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool kgrobjAccessCallback_DISPATCH(struct KernelGraphicsObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__kgrobjAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS kgrobjMapTo_DISPATCH(struct KernelGraphicsObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__kgrobjMapTo__(pResource, pParams); +} + +static inline NV_STATUS kgrobjGetMapAddrSpace_DISPATCH(struct KernelGraphicsObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__kgrobjGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void kgrobjSetNotificationShare_DISPATCH(struct KernelGraphicsObject *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__kgrobjSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 kgrobjGetRefCount_DISPATCH(struct KernelGraphicsObject *pResource) { + return pResource->__kgrobjGetRefCount__(pResource); +} + +static inline void kgrobjAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct KernelGraphicsObject *pResource, RsResourceRef *pReference) { + pResource->__kgrobjAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS kgrobjControl_Prologue_DISPATCH(struct KernelGraphicsObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kgrobjControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kgrobjGetRegBaseOffsetAndSize_DISPATCH(struct KernelGraphicsObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__kgrobjGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS kgrobjInternalControlForward_DISPATCH(struct KernelGraphicsObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__kgrobjInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS kgrobjUnmapFrom_DISPATCH(struct KernelGraphicsObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__kgrobjUnmapFrom__(pResource, pParams); +} + +static inline void kgrobjControl_Epilogue_DISPATCH(struct KernelGraphicsObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__kgrobjControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kgrobjControlLookup_DISPATCH(struct KernelGraphicsObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__kgrobjControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS kgrobjGetSwMethods_DISPATCH(struct KernelGraphicsObject *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return pChannelDescendant->__kgrobjGetSwMethods__(pChannelDescendant, ppMethods, pNumMethods); +} + +static inline NvHandle kgrobjGetInternalObjectHandle_DISPATCH(struct KernelGraphicsObject *pGpuResource) { + return pGpuResource->__kgrobjGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS kgrobjControl_DISPATCH(struct KernelGraphicsObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__kgrobjControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS kgrobjUnmap_DISPATCH(struct KernelGraphicsObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__kgrobjUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS kgrobjGetMemoryMappingDescriptor_DISPATCH(struct KernelGraphicsObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__kgrobjGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvBool kgrobjIsSwMethodStalling_DISPATCH(struct KernelGraphicsObject *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__kgrobjIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +static inline NV_STATUS kgrobjControlFilter_DISPATCH(struct KernelGraphicsObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__kgrobjControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS kgrobjUnregisterEvent_DISPATCH(struct KernelGraphicsObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__kgrobjUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool kgrobjCanCopy_DISPATCH(struct KernelGraphicsObject *pResource) { + return pResource->__kgrobjCanCopy__(pResource); +} + +static inline void kgrobjPreDestruct_DISPATCH(struct KernelGraphicsObject *pResource) { + pResource->__kgrobjPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *kgrobjGetNotificationListPtr_DISPATCH(struct KernelGraphicsObject *pNotifier) { + return pNotifier->__kgrobjGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *kgrobjGetNotificationShare_DISPATCH(struct KernelGraphicsObject *pNotifier) { + return pNotifier->__kgrobjGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS kgrobjMap_DISPATCH(struct KernelGraphicsObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__kgrobjMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS kgrobjGetOrAllocNotifShare_DISPATCH(struct KernelGraphicsObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__kgrobjGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS kgrobjConstruct_IMPL(struct KernelGraphicsObject *arg_pKernelGraphicsObject, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_kgrobjConstruct(arg_pKernelGraphicsObject, arg_pCallContext, arg_pParams) kgrobjConstruct_IMPL(arg_pKernelGraphicsObject, arg_pCallContext, arg_pParams) +void kgrobjDestruct_IMPL(struct KernelGraphicsObject *pKernelGraphicsObject); +#define __nvoc_kgrobjDestruct(pKernelGraphicsObject) kgrobjDestruct_IMPL(pKernelGraphicsObject) +NV_STATUS kgrobjPromoteContext_IMPL(struct OBJGPU *arg0, struct KernelGraphicsObject *arg1, struct KernelGraphics *arg2); +#ifdef __nvoc_kernel_graphics_object_h_disabled +static inline NV_STATUS kgrobjPromoteContext(struct OBJGPU *arg0, struct KernelGraphicsObject *arg1, struct KernelGraphics *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelGraphicsObject was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_graphics_object_h_disabled +#define kgrobjPromoteContext(arg0, arg1, arg2) kgrobjPromoteContext_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_graphics_object_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _KERNEL_GRAPHICS_OBJECT_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_GRAPHICS_OBJECT_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_gsp_nvoc.c b/src/nvidia/generated/g_kernel_gsp_nvoc.c new file mode 100644 index 000000000..a2cee0149 --- /dev/null +++ b/src/nvidia/generated/g_kernel_gsp_nvoc.c @@ -0,0 +1,716 @@ +#define NVOC_KERNEL_GSP_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_gsp_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x311d4e = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGsp; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon; + +void __nvoc_init_KernelGsp(KernelGsp*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelGsp(KernelGsp*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelGsp(KernelGsp*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelGsp(KernelGsp*, RmHalspecOwner* ); +void __nvoc_dtor_KernelGsp(KernelGsp*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGsp; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_KernelGsp = { + /*pClassDef=*/ &__nvoc_class_def_KernelGsp, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelGsp, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGsp, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGsp, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGsp, __nvoc_base_IntrService), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelGsp_KernelFalcon = { + /*pClassDef=*/ &__nvoc_class_def_KernelFalcon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelGsp, __nvoc_base_KernelFalcon), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelGsp = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_KernelGsp_KernelGsp, + &__nvoc_rtti_KernelGsp_KernelFalcon, + &__nvoc_rtti_KernelGsp_IntrService, + &__nvoc_rtti_KernelGsp_OBJENGSTATE, + &__nvoc_rtti_KernelGsp_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGsp = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelGsp), + /*classId=*/ classId(KernelGsp), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelGsp", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelGsp, + /*pCastInfo=*/ &__nvoc_castinfo_KernelGsp, + /*pExportInfo=*/ &__nvoc_export_info_KernelGsp +}; + +static NV_STATUS __nvoc_thunk_KernelGsp_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelGsp, ENGDESCRIPTOR arg0) { + return kgspConstructEngine(pGpu, (struct KernelGsp *)(((unsigned char *)pKernelGsp) - __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_KernelGsp_intrservRegisterIntrService(struct OBJGPU *pGpu, struct IntrService *pKernelGsp, IntrServiceRecord pRecords[155]) { + kgspRegisterIntrService(pGpu, (struct KernelGsp *)(((unsigned char *)pKernelGsp) - __nvoc_rtti_KernelGsp_IntrService.offset), pRecords); +} + +static NvU32 __nvoc_thunk_KernelGsp_intrservServiceInterrupt(struct OBJGPU *pGpu, struct IntrService *pKernelGsp, IntrServiceServiceInterruptArguments *pParams) { + return kgspServiceInterrupt(pGpu, (struct KernelGsp *)(((unsigned char *)pKernelGsp) - __nvoc_rtti_KernelGsp_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_KernelGsp_kflcnResetHw(struct OBJGPU *pGpu, struct KernelFalcon *pKernelGsp) { + return kgspResetHw(pGpu, (struct KernelGsp *)(((unsigned char *)pKernelGsp) - __nvoc_rtti_KernelGsp_KernelFalcon.offset)); +} + +static NvBool __nvoc_thunk_KernelGsp_kflcnIsEngineInReset(struct OBJGPU *pGpu, struct KernelFalcon *pKernelGsp) { + return kgspIsEngineInReset(pGpu, (struct KernelGsp *)(((unsigned char *)pKernelGsp) - __nvoc_rtti_KernelGsp_KernelFalcon.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kgspStateDestroy(POBJGPU pGpu, struct KernelGsp *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kgspFreeTunableState(POBJGPU pGpu, struct KernelGsp *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspCompareTunableState(POBJGPU pGpu, struct KernelGsp *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static NvBool __nvoc_thunk_IntrService_kgspClearInterrupt(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelGsp_IntrService.offset), pParams); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kgspIsPresent(POBJGPU pGpu, struct KernelGsp *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspReconcileTunableState(POBJGPU pGpu, struct KernelGsp *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStateLoad(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStateUnload(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_IntrService_kgspServiceNotificationInterrupt(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return intrservServiceNotificationInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelGsp_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStateInitLocked(POBJGPU pGpu, struct KernelGsp *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePreLoad(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePostUnload(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePreUnload(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspGetTunableState(POBJGPU pGpu, struct KernelGsp *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStateInitUnlocked(POBJGPU pGpu, struct KernelGsp *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kgspInitMissing(POBJGPU pGpu, struct KernelGsp *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePreInitLocked(POBJGPU pGpu, struct KernelGsp *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePreInitUnlocked(POBJGPU pGpu, struct KernelGsp *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspStatePostLoad(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspAllocTunableState(POBJGPU pGpu, struct KernelGsp *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kgspSetTunableState(POBJGPU pGpu, struct KernelGsp *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelGsp_OBJENGSTATE.offset), pTunableState); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelGsp = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_IntrService(IntrService*); +void __nvoc_dtor_KernelFalcon(KernelFalcon*); +void __nvoc_dtor_KernelGsp(KernelGsp *pThis) { + __nvoc_kgspDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); + __nvoc_dtor_KernelFalcon(&pThis->__nvoc_base_KernelFalcon); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelGsp(KernelGsp *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_IntrService(IntrService* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelFalcon(KernelFalcon* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelGsp(KernelGsp *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_KernelGsp_fail_OBJENGSTATE; + status = __nvoc_ctor_IntrService(&pThis->__nvoc_base_IntrService, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_KernelGsp_fail_IntrService; + status = __nvoc_ctor_KernelFalcon(&pThis->__nvoc_base_KernelFalcon, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_KernelGsp_fail_KernelFalcon; + __nvoc_init_dataField_KernelGsp(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelGsp_exit; // Success + +__nvoc_ctor_KernelGsp_fail_KernelFalcon: + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); +__nvoc_ctor_KernelGsp_fail_IntrService: + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); +__nvoc_ctor_KernelGsp_fail_OBJENGSTATE: +__nvoc_ctor_KernelGsp_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelGsp_1(KernelGsp *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__kgspConstructEngine__ = &kgspConstructEngine_IMPL; + + pThis->__kgspRegisterIntrService__ = &kgspRegisterIntrService_IMPL; + + pThis->__kgspServiceInterrupt__ = &kgspServiceInterrupt_IMPL; + + // Hal function -- kgspConfigureFalcon + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kgspConfigureFalcon__ = &kgspConfigureFalcon_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspConfigureFalcon__ = &kgspConfigureFalcon_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspIsDebugModeEnabled + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kgspIsDebugModeEnabled__ = &kgspIsDebugModeEnabled_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspIsDebugModeEnabled__ = &kgspIsDebugModeEnabled_GA100; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspBootstrapRiscvOSEarly + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kgspBootstrapRiscvOSEarly__ = &kgspBootstrapRiscvOSEarly_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspBootstrapRiscvOSEarly__ = &kgspBootstrapRiscvOSEarly_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspGetGspRmBootUcodeStorage + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kgspGetGspRmBootUcodeStorage__ = &kgspGetGspRmBootUcodeStorage_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspGetGspRmBootUcodeStorage__ = &kgspGetGspRmBootUcodeStorage_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspGetBinArchiveGspRmBoot + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kgspGetBinArchiveGspRmBoot__ = &kgspGetBinArchiveGspRmBoot_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kgspGetBinArchiveGspRmBoot__ = &kgspGetBinArchiveGspRmBoot_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspGetBinArchiveGspRmBoot__ = &kgspGetBinArchiveGspRmBoot_GA102; + } + else if (0) + { + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspExecuteSequencerCommand + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kgspExecuteSequencerCommand__ = &kgspExecuteSequencerCommand_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspExecuteSequencerCommand__ = &kgspExecuteSequencerCommand_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspReadUcodeFuseVersion + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kgspReadUcodeFuseVersion__ = &kgspReadUcodeFuseVersion_b2b553; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspReadUcodeFuseVersion__ = &kgspReadUcodeFuseVersion_GA100; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspResetHw + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspResetHw__ = &kgspResetHw_TU102; + } + else if (0) + { + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspIsEngineInReset + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspIsEngineInReset__ = &kgspIsEngineInReset_TU102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspGetFrtsSize + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspGetFrtsSize__ = &kgspGetFrtsSize_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kgspGetFrtsSize__ = &kgspGetFrtsSize_4a4dee; + } + } + else if (0) + { + } + + // Hal function -- kgspExecuteFwsecFrts + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspExecuteFwsecFrts__ = &kgspExecuteFwsecFrts_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kgspExecuteFwsecFrts__ = &kgspExecuteFwsecFrts_5baef9; + } + } + else if (0) + { + } + + // Hal function -- kgspExecuteHsFalcon + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000007e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 */ + { + pThis->__kgspExecuteHsFalcon__ = &kgspExecuteHsFalcon_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspExecuteHsFalcon__ = &kgspExecuteHsFalcon_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspGetBinArchiveBooterLoadUcode + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000000e0UL) )) /* ChipHal: TU102 | TU104 | TU106 */ + { + pThis->__kgspGetBinArchiveBooterLoadUcode__ = &kgspGetBinArchiveBooterLoadUcode_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000300UL) )) /* ChipHal: TU116 | TU117 */ + { + pThis->__kgspGetBinArchiveBooterLoadUcode__ = &kgspGetBinArchiveBooterLoadUcode_TU116; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kgspGetBinArchiveBooterLoadUcode__ = &kgspGetBinArchiveBooterLoadUcode_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspGetBinArchiveBooterLoadUcode__ = &kgspGetBinArchiveBooterLoadUcode_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspGetBinArchiveBooterReloadUcode + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000000e0UL) )) /* ChipHal: TU102 | TU104 | TU106 */ + { + pThis->__kgspGetBinArchiveBooterReloadUcode__ = &kgspGetBinArchiveBooterReloadUcode_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000300UL) )) /* ChipHal: TU116 | TU117 */ + { + pThis->__kgspGetBinArchiveBooterReloadUcode__ = &kgspGetBinArchiveBooterReloadUcode_TU116; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kgspGetBinArchiveBooterReloadUcode__ = &kgspGetBinArchiveBooterReloadUcode_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspGetBinArchiveBooterReloadUcode__ = &kgspGetBinArchiveBooterReloadUcode_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspGetBinArchiveBooterUnloadUcode + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000000e0UL) )) /* ChipHal: TU102 | TU104 | TU106 */ + { + pThis->__kgspGetBinArchiveBooterUnloadUcode__ = &kgspGetBinArchiveBooterUnloadUcode_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000300UL) )) /* ChipHal: TU116 | TU117 */ + { + pThis->__kgspGetBinArchiveBooterUnloadUcode__ = &kgspGetBinArchiveBooterUnloadUcode_TU116; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kgspGetBinArchiveBooterUnloadUcode__ = &kgspGetBinArchiveBooterUnloadUcode_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspGetBinArchiveBooterUnloadUcode__ = &kgspGetBinArchiveBooterUnloadUcode_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- kgspGetSignatureSectionName + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kgspGetSignatureSectionName__ = &kgspGetSignatureSectionName_63b8e2; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kgspGetSignatureSectionName__ = &kgspGetSignatureSectionName_e46f5b; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000000e0UL) )) /* ChipHal: TU102 | TU104 | TU106 */ + { + pThis->__kgspGetSignatureSectionName__ = &kgspGetSignatureSectionName_cbc19d; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000300UL) )) /* ChipHal: TU116 | TU117 */ + { + pThis->__kgspGetSignatureSectionName__ = &kgspGetSignatureSectionName_ab7237; + } + // default + else + { + pThis->__kgspGetSignatureSectionName__ = &kgspGetSignatureSectionName_9e2234; + } + } + else if (0) + { + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelGsp_engstateConstructEngine; + + pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_KernelGsp_intrservRegisterIntrService; + + pThis->__nvoc_base_IntrService.__intrservServiceInterrupt__ = &__nvoc_thunk_KernelGsp_intrservServiceInterrupt; + + pThis->__nvoc_base_KernelFalcon.__kflcnResetHw__ = &__nvoc_thunk_KernelGsp_kflcnResetHw; + + pThis->__nvoc_base_KernelFalcon.__kflcnIsEngineInReset__ = &__nvoc_thunk_KernelGsp_kflcnIsEngineInReset; + + pThis->__kgspStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kgspStateDestroy; + + pThis->__kgspFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgspFreeTunableState; + + pThis->__kgspCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgspCompareTunableState; + + pThis->__kgspClearInterrupt__ = &__nvoc_thunk_IntrService_kgspClearInterrupt; + + pThis->__kgspIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kgspIsPresent; + + pThis->__kgspReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgspReconcileTunableState; + + pThis->__kgspStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kgspStateLoad; + + pThis->__kgspStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kgspStateUnload; + + pThis->__kgspServiceNotificationInterrupt__ = &__nvoc_thunk_IntrService_kgspServiceNotificationInterrupt; + + pThis->__kgspStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kgspStateInitLocked; + + pThis->__kgspStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePreLoad; + + pThis->__kgspStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePostUnload; + + pThis->__kgspStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePreUnload; + + pThis->__kgspGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgspGetTunableState; + + pThis->__kgspStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgspStateInitUnlocked; + + pThis->__kgspInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kgspInitMissing; + + pThis->__kgspStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePreInitLocked; + + pThis->__kgspStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePreInitUnlocked; + + pThis->__kgspStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kgspStatePostLoad; + + pThis->__kgspAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgspAllocTunableState; + + pThis->__kgspSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kgspSetTunableState; +} + +void __nvoc_init_funcTable_KernelGsp(KernelGsp *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelGsp_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*, RmHalspecOwner* ); +void __nvoc_init_IntrService(IntrService*, RmHalspecOwner* ); +void __nvoc_init_KernelFalcon(KernelFalcon*, RmHalspecOwner* ); +void __nvoc_init_KernelGsp(KernelGsp *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelGsp = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService; + pThis->__nvoc_pbase_KernelFalcon = &pThis->__nvoc_base_KernelFalcon; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE, pRmhalspecowner); + __nvoc_init_IntrService(&pThis->__nvoc_base_IntrService, pRmhalspecowner); + __nvoc_init_KernelFalcon(&pThis->__nvoc_base_KernelFalcon, pRmhalspecowner); + __nvoc_init_funcTable_KernelGsp(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelGsp(KernelGsp **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelGsp *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelGsp)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelGsp)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelGsp); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelGsp(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelGsp(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelGsp_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelGsp_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelGsp(KernelGsp **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelGsp(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_gsp_nvoc.h b/src/nvidia/generated/g_kernel_gsp_nvoc.h new file mode 100644 index 000000000..8d1f09fc0 --- /dev/null +++ b/src/nvidia/generated/g_kernel_gsp_nvoc.h @@ -0,0 +1,1057 @@ +#ifndef _G_KERNEL_GSP_NVOC_H_ +#define _G_KERNEL_GSP_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_gsp_nvoc.h" + +#ifndef KERNEL_GSP_H +#define KERNEL_GSP_H + +/*! + * This file provides definitions for all KernelGsp data structures + * and interfaces. KernelGsp is responsible for initiating the boot + * of RM on the GSP core (GSP-RM) and helps facilitate communication + * between Kernel RM and GSP-RM. + */ + +#include "core/core.h" +#include "core/bin_data.h" +#include "gpu/eng_state.h" +#include "gpu/intr/intr_service.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gsp/gsp_static_config.h" +#include "gpu/gsp/gsp_init_args.h" +#include "rmRiscvUcode.h" + +#include "libos_init_args.h" +#include "gsp_fw_wpr_meta.h" +#include "logdecode.h" + +/*! + * Forward declarations + */ +typedef struct SimAccessBuffer SimAccessBuffer; + +/*! + * Structure for VBIOS image for early FRTS. + */ +typedef struct KernelGspVbiosImg +{ + NvU8 *pImage; + NvU32 biosSize; + NvU32 expansionRomOffset; +} KernelGspVbiosImg; + +/*! + * Variant of KernelGspFlcnUcode representing a non-Boot-from-HS ucode that + * loads directly without the generic falcon bootloader. + */ +typedef struct KernelGspFlcnUcodeBootDirect +{ + NvU8 *pImage; + NvU32 size; + + NvU32 imemSize; + NvU32 imemNsSize; + NvU32 imemNsPa; + NvU32 imemSecSize; + NvU32 imemSecPa; + + NvU32 dataOffset; + NvU32 dmemSize; + NvU32 dmemPa; +} KernelGspFlcnUcodeBootDirect; + +/*! + * Variant of KernelGspFlcnUcode representing a non-Boot-from-HS ucode that + * loads via the generic falcon bootloader. + */ +typedef struct KernelGspFlcnUcodeBootWithLoader +{ + MEMORY_DESCRIPTOR *pCodeMemDesc; + MEMORY_DESCRIPTOR *pDataMemDesc; + + NvU32 codeOffset; + NvU32 imemSize; + NvU32 imemNsSize; + NvU32 imemNsPa; + NvU32 imemSecSize; + NvU32 imemSecPa; + NvU32 codeEntry; + + NvU32 dataOffset; + NvU32 dmemSize; + NvU32 dmemPa; + + // Extra fields used for falcon ucodes from VBIOS + NvU32 interfaceOffset; +} KernelGspFlcnUcodeBootWithLoader; + +/*! + * Variant of KernelGspFlcnUcode representing a Boot-from-HS ucode. + */ +typedef struct KernelGspFlcnUcodeBootFromHs +{ + MEMORY_DESCRIPTOR *pUcodeMemDesc; + NvU32 size; + + NvU32 codeOffset; + NvU32 imemSize; + NvU32 imemPa; + NvU32 imemVa; + + NvU32 dataOffset; + NvU32 dmemSize; + NvU32 dmemPa; + NvU32 dmemVa; + + NvU32 hsSigDmemAddr; + NvU32 ucodeId; + NvU32 engineIdMask; + + // Extra fields used for falcon ucodes from VBIOS + NvU32 *pSignatures; + NvU32 signaturesTotalSize; // size of buffer pointed by pSignatures + NvU32 sigSize; // size of one signature + NvU32 sigCount; + + NvU32 vbiosSigVersions; + NvU32 interfaceOffset; +} KernelGspFlcnUcodeBootFromHs; + +/*! + * Type of KernelGspFlcnUcode. Used as tag in tagged union KernelGspFlcnUcode. + * Affects how the ucode is loaded/booted. + */ +typedef enum KernelGspFlcnUcodeBootType +{ + KGSP_FLCN_UCODE_BOOT_DIRECT, + KGSP_FLCN_UCODE_BOOT_WITH_LOADER, + KGSP_FLCN_UCODE_BOOT_FROM_HS +} KernelGspFlcnUcodeBootType; + +/*! + * Tagged union of falcon ucode variants used by early FRTS and GSP-RM boot. + */ +typedef struct KernelGspFlcnUcode +{ + KernelGspFlcnUcodeBootType bootType; + union + { + KernelGspFlcnUcodeBootDirect ucodeBootDirect; + KernelGspFlcnUcodeBootWithLoader ucodeBootWithLoader; + KernelGspFlcnUcodeBootFromHs ucodeBootFromHs; + }; +} KernelGspFlcnUcode; + +/*! + * GSP-RM source when running in Emulated/Simulated RISCV environment is + * extremely slow, so we need a factor (X) to scale timeouts by. + */ +#define GSP_SCALE_TIMEOUT_EMU_SIM 2500 + +/*! + * Size of libos init arguments packet. + */ +#define LIBOS_INIT_ARGUMENTS_SIZE 0x1000 + +/*! + * Structure for passing GSP-RM firmware data + */ +typedef struct GSP_FIRMWARE +{ + const void *pBuf; // buffer holding the firmware (ucode) + NvU32 size; // size of the firmware + const void *pLogElf; // firmware logging section and symbol information to decode logs + NvU32 logElfSize; // size of the gsp log elf binary +} GSP_FIRMWARE; + +/*! + * All signature sections in the elf must start with this prefix which can be + * used to identify them + */ +#define SIGNATURE_SECTION_NAME_PREFIX ".fwsignature_" + +/*! + * Index into libosLogDecode array. + */ +enum +{ + LOGIDX_INIT, + LOGIDX_RM, + LOGIDX_SIZE +}; + +/*! + * LIBOS task logging. + */ +typedef struct +{ + /* Memory for task logging */ + MEMORY_DESCRIPTOR *pTaskLogDescriptor; + NvU64 *pTaskLogBuffer; + NvP64 pTaskLogMappingPriv; + NvU64 id8; +} RM_LIBOS_LOG_MEM; + +/*! + * KernelGsp object definition + */ +#ifdef NVOC_KERNEL_GSP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelGsp { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct IntrService __nvoc_base_IntrService; + struct KernelFalcon __nvoc_base_KernelFalcon; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct IntrService *__nvoc_pbase_IntrService; + struct KernelFalcon *__nvoc_pbase_KernelFalcon; + struct KernelGsp *__nvoc_pbase_KernelGsp; + NV_STATUS (*__kgspConstructEngine__)(struct OBJGPU *, struct KernelGsp *, ENGDESCRIPTOR); + void (*__kgspRegisterIntrService__)(struct OBJGPU *, struct KernelGsp *, IntrServiceRecord *); + NvU32 (*__kgspServiceInterrupt__)(struct OBJGPU *, struct KernelGsp *, IntrServiceServiceInterruptArguments *); + void (*__kgspConfigureFalcon__)(struct OBJGPU *, struct KernelGsp *); + NvBool (*__kgspIsDebugModeEnabled__)(struct OBJGPU *, struct KernelGsp *); + NV_STATUS (*__kgspBootstrapRiscvOSEarly__)(struct OBJGPU *, struct KernelGsp *, GSP_FIRMWARE *); + void (*__kgspGetGspRmBootUcodeStorage__)(struct OBJGPU *, struct KernelGsp *, BINDATA_STORAGE **, BINDATA_STORAGE **); + const BINDATA_ARCHIVE *(*__kgspGetBinArchiveGspRmBoot__)(struct KernelGsp *); + NV_STATUS (*__kgspExecuteSequencerCommand__)(struct OBJGPU *, struct KernelGsp *, NvU32, NvU32 *, NvU32); + NvU32 (*__kgspReadUcodeFuseVersion__)(struct OBJGPU *, struct KernelGsp *, NvU32); + NV_STATUS (*__kgspResetHw__)(struct OBJGPU *, struct KernelGsp *); + NvBool (*__kgspIsEngineInReset__)(struct OBJGPU *, struct KernelGsp *); + NvU32 (*__kgspGetFrtsSize__)(struct OBJGPU *, struct KernelGsp *); + NV_STATUS (*__kgspExecuteFwsecFrts__)(struct OBJGPU *, struct KernelGsp *, KernelGspFlcnUcode *, const NvU64); + NV_STATUS (*__kgspExecuteHsFalcon__)(struct OBJGPU *, struct KernelGsp *, KernelGspFlcnUcode *, struct KernelFalcon *, NvU32 *, NvU32 *); + const BINDATA_ARCHIVE *(*__kgspGetBinArchiveBooterLoadUcode__)(struct KernelGsp *); + const BINDATA_ARCHIVE *(*__kgspGetBinArchiveBooterReloadUcode__)(struct KernelGsp *); + const BINDATA_ARCHIVE *(*__kgspGetBinArchiveBooterUnloadUcode__)(struct KernelGsp *); + const char *(*__kgspGetSignatureSectionName__)(struct OBJGPU *, struct KernelGsp *); + void (*__kgspStateDestroy__)(POBJGPU, struct KernelGsp *); + void (*__kgspFreeTunableState__)(POBJGPU, struct KernelGsp *, void *); + NV_STATUS (*__kgspCompareTunableState__)(POBJGPU, struct KernelGsp *, void *, void *); + NvBool (*__kgspClearInterrupt__)(struct OBJGPU *, struct KernelGsp *, IntrServiceClearInterruptArguments *); + NvBool (*__kgspIsPresent__)(POBJGPU, struct KernelGsp *); + NV_STATUS (*__kgspReconcileTunableState__)(POBJGPU, struct KernelGsp *, void *); + NV_STATUS (*__kgspStateLoad__)(POBJGPU, struct KernelGsp *, NvU32); + NV_STATUS (*__kgspStateUnload__)(POBJGPU, struct KernelGsp *, NvU32); + NV_STATUS (*__kgspServiceNotificationInterrupt__)(struct OBJGPU *, struct KernelGsp *, IntrServiceServiceNotificationInterruptArguments *); + NV_STATUS (*__kgspStateInitLocked__)(POBJGPU, struct KernelGsp *); + NV_STATUS (*__kgspStatePreLoad__)(POBJGPU, struct KernelGsp *, NvU32); + NV_STATUS (*__kgspStatePostUnload__)(POBJGPU, struct KernelGsp *, NvU32); + NV_STATUS (*__kgspStatePreUnload__)(POBJGPU, struct KernelGsp *, NvU32); + NV_STATUS (*__kgspGetTunableState__)(POBJGPU, struct KernelGsp *, void *); + NV_STATUS (*__kgspStateInitUnlocked__)(POBJGPU, struct KernelGsp *); + void (*__kgspInitMissing__)(POBJGPU, struct KernelGsp *); + NV_STATUS (*__kgspStatePreInitLocked__)(POBJGPU, struct KernelGsp *); + NV_STATUS (*__kgspStatePreInitUnlocked__)(POBJGPU, struct KernelGsp *); + NV_STATUS (*__kgspStatePostLoad__)(POBJGPU, struct KernelGsp *, NvU32); + NV_STATUS (*__kgspAllocTunableState__)(POBJGPU, struct KernelGsp *, void **); + NV_STATUS (*__kgspSetTunableState__)(POBJGPU, struct KernelGsp *, void *); + struct OBJRPC *pRpc; + KernelGspFlcnUcode *pFwsecUcode; + KernelGspFlcnUcode *pBooterLoadUcode; + KernelGspFlcnUcode *pBooterReloadUcode; + KernelGspFlcnUcode *pBooterUnloadUcode; + MEMORY_DESCRIPTOR *pWprMetaDescriptor; + GspFwWprMeta *pWprMeta; + NvP64 pWprMetaMappingPriv; + MEMORY_DESCRIPTOR *pLibosInitArgumentsDescriptor; + LibosMemoryRegionInitArgument *pLibosInitArgumentsCached; + NvP64 pLibosInitArgumentsMappingPriv; + MEMORY_DESCRIPTOR *pGspArgumentsDescriptor; + GSP_ARGUMENTS_CACHED *pGspArgumentsCached; + NvP64 pGspArgumentsMappingPriv; + MEMORY_DESCRIPTOR *pGspRmBootUcodeMemdesc; + NvP64 pGspRmBootUcodeMemdescPriv; + NvU32 gspRmBootUcodeSize; + NvU8 *pGspRmBootUcodeImage; + RM_RISCV_UCODE_DESC *pGspRmBootUcodeDesc; + MEMORY_DESCRIPTOR *pGspUCodeRadix3Descriptor; + MEMORY_DESCRIPTOR *pSignatureMemdesc; + LIBOS_LOG_DECODE logDecode; + RM_LIBOS_LOG_MEM rmLibosLogMem[2]; + void *pLogElf; + MEMORY_DESCRIPTOR *pMemDesc_simAccessBuf; + SimAccessBuffer *pSimAccessBuf; + NvP64 pSimAccessBufPriv; + GspStaticConfigInfo gspStaticInfo; +}; + +#ifndef __NVOC_CLASS_KernelGsp_TYPEDEF__ +#define __NVOC_CLASS_KernelGsp_TYPEDEF__ +typedef struct KernelGsp KernelGsp; +#endif /* __NVOC_CLASS_KernelGsp_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGsp +#define __nvoc_class_id_KernelGsp 0x311d4e +#endif /* __nvoc_class_id_KernelGsp */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelGsp; + +#define __staticCast_KernelGsp(pThis) \ + ((pThis)->__nvoc_pbase_KernelGsp) + +#ifdef __nvoc_kernel_gsp_h_disabled +#define __dynamicCast_KernelGsp(pThis) ((KernelGsp*)NULL) +#else //__nvoc_kernel_gsp_h_disabled +#define __dynamicCast_KernelGsp(pThis) \ + ((KernelGsp*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelGsp))) +#endif //__nvoc_kernel_gsp_h_disabled + +#define PDB_PROP_KGSP_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KGSP_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelGsp(KernelGsp**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelGsp(KernelGsp**, Dynamic*, NvU32); +#define __objCreate_KernelGsp(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelGsp((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kgspConstructEngine(pGpu, pKernelGsp, arg0) kgspConstructEngine_DISPATCH(pGpu, pKernelGsp, arg0) +#define kgspRegisterIntrService(pGpu, pKernelGsp, pRecords) kgspRegisterIntrService_DISPATCH(pGpu, pKernelGsp, pRecords) +#define kgspServiceInterrupt(pGpu, pKernelGsp, pParams) kgspServiceInterrupt_DISPATCH(pGpu, pKernelGsp, pParams) +#define kgspConfigureFalcon(pGpu, pKernelGsp) kgspConfigureFalcon_DISPATCH(pGpu, pKernelGsp) +#define kgspConfigureFalcon_HAL(pGpu, pKernelGsp) kgspConfigureFalcon_DISPATCH(pGpu, pKernelGsp) +#define kgspIsDebugModeEnabled(pGpu, pKernelGsp) kgspIsDebugModeEnabled_DISPATCH(pGpu, pKernelGsp) +#define kgspIsDebugModeEnabled_HAL(pGpu, pKernelGsp) kgspIsDebugModeEnabled_DISPATCH(pGpu, pKernelGsp) +#define kgspBootstrapRiscvOSEarly(pGpu, pKernelGsp, pGspFw) kgspBootstrapRiscvOSEarly_DISPATCH(pGpu, pKernelGsp, pGspFw) +#define kgspBootstrapRiscvOSEarly_HAL(pGpu, pKernelGsp, pGspFw) kgspBootstrapRiscvOSEarly_DISPATCH(pGpu, pKernelGsp, pGspFw) +#define kgspGetGspRmBootUcodeStorage(pGpu, pKernelGsp, ppBinStorageImage, ppBinStorageDesc) kgspGetGspRmBootUcodeStorage_DISPATCH(pGpu, pKernelGsp, ppBinStorageImage, ppBinStorageDesc) +#define kgspGetGspRmBootUcodeStorage_HAL(pGpu, pKernelGsp, ppBinStorageImage, ppBinStorageDesc) kgspGetGspRmBootUcodeStorage_DISPATCH(pGpu, pKernelGsp, ppBinStorageImage, ppBinStorageDesc) +#define kgspGetBinArchiveGspRmBoot(pKernelGsp) kgspGetBinArchiveGspRmBoot_DISPATCH(pKernelGsp) +#define kgspGetBinArchiveGspRmBoot_HAL(pKernelGsp) kgspGetBinArchiveGspRmBoot_DISPATCH(pKernelGsp) +#define kgspExecuteSequencerCommand(pGpu, pKernelGsp, opCode, pPayLoad, payloadSize) kgspExecuteSequencerCommand_DISPATCH(pGpu, pKernelGsp, opCode, pPayLoad, payloadSize) +#define kgspExecuteSequencerCommand_HAL(pGpu, pKernelGsp, opCode, pPayLoad, payloadSize) kgspExecuteSequencerCommand_DISPATCH(pGpu, pKernelGsp, opCode, pPayLoad, payloadSize) +#define kgspReadUcodeFuseVersion(pGpu, pKernelGsp, ucodeId) kgspReadUcodeFuseVersion_DISPATCH(pGpu, pKernelGsp, ucodeId) +#define kgspReadUcodeFuseVersion_HAL(pGpu, pKernelGsp, ucodeId) kgspReadUcodeFuseVersion_DISPATCH(pGpu, pKernelGsp, ucodeId) +#define kgspResetHw(pGpu, pKernelGsp) kgspResetHw_DISPATCH(pGpu, pKernelGsp) +#define kgspResetHw_HAL(pGpu, pKernelGsp) kgspResetHw_DISPATCH(pGpu, pKernelGsp) +#define kgspIsEngineInReset(pGpu, pKernelGsp) kgspIsEngineInReset_DISPATCH(pGpu, pKernelGsp) +#define kgspIsEngineInReset_HAL(pGpu, pKernelGsp) kgspIsEngineInReset_DISPATCH(pGpu, pKernelGsp) +#define kgspGetFrtsSize(pGpu, pKernelGsp) kgspGetFrtsSize_DISPATCH(pGpu, pKernelGsp) +#define kgspGetFrtsSize_HAL(pGpu, pKernelGsp) kgspGetFrtsSize_DISPATCH(pGpu, pKernelGsp) +#define kgspExecuteFwsecFrts(pGpu, pKernelGsp, pFwsecUcode, frtsOffset) kgspExecuteFwsecFrts_DISPATCH(pGpu, pKernelGsp, pFwsecUcode, frtsOffset) +#define kgspExecuteFwsecFrts_HAL(pGpu, pKernelGsp, pFwsecUcode, frtsOffset) kgspExecuteFwsecFrts_DISPATCH(pGpu, pKernelGsp, pFwsecUcode, frtsOffset) +#define kgspExecuteHsFalcon(pGpu, pKernelGsp, pFlcnUcode, pKernelFlcn, pMailbox0, pMailbox1) kgspExecuteHsFalcon_DISPATCH(pGpu, pKernelGsp, pFlcnUcode, pKernelFlcn, pMailbox0, pMailbox1) +#define kgspExecuteHsFalcon_HAL(pGpu, pKernelGsp, pFlcnUcode, pKernelFlcn, pMailbox0, pMailbox1) kgspExecuteHsFalcon_DISPATCH(pGpu, pKernelGsp, pFlcnUcode, pKernelFlcn, pMailbox0, pMailbox1) +#define kgspGetBinArchiveBooterLoadUcode(pKernelGsp) kgspGetBinArchiveBooterLoadUcode_DISPATCH(pKernelGsp) +#define kgspGetBinArchiveBooterLoadUcode_HAL(pKernelGsp) kgspGetBinArchiveBooterLoadUcode_DISPATCH(pKernelGsp) +#define kgspGetBinArchiveBooterReloadUcode(pKernelGsp) kgspGetBinArchiveBooterReloadUcode_DISPATCH(pKernelGsp) +#define kgspGetBinArchiveBooterReloadUcode_HAL(pKernelGsp) kgspGetBinArchiveBooterReloadUcode_DISPATCH(pKernelGsp) +#define kgspGetBinArchiveBooterUnloadUcode(pKernelGsp) kgspGetBinArchiveBooterUnloadUcode_DISPATCH(pKernelGsp) +#define kgspGetBinArchiveBooterUnloadUcode_HAL(pKernelGsp) kgspGetBinArchiveBooterUnloadUcode_DISPATCH(pKernelGsp) +#define kgspGetSignatureSectionName(pGpu, pKernelGsp) kgspGetSignatureSectionName_DISPATCH(pGpu, pKernelGsp) +#define kgspGetSignatureSectionName_HAL(pGpu, pKernelGsp) kgspGetSignatureSectionName_DISPATCH(pGpu, pKernelGsp) +#define kgspStateDestroy(pGpu, pEngstate) kgspStateDestroy_DISPATCH(pGpu, pEngstate) +#define kgspFreeTunableState(pGpu, pEngstate, pTunableState) kgspFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgspCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kgspCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kgspClearInterrupt(pGpu, pIntrService, pParams) kgspClearInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define kgspIsPresent(pGpu, pEngstate) kgspIsPresent_DISPATCH(pGpu, pEngstate) +#define kgspReconcileTunableState(pGpu, pEngstate, pTunableState) kgspReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgspStateLoad(pGpu, pEngstate, arg0) kgspStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kgspStateUnload(pGpu, pEngstate, arg0) kgspStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kgspServiceNotificationInterrupt(pGpu, pIntrService, pParams) kgspServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define kgspStateInitLocked(pGpu, pEngstate) kgspStateInitLocked_DISPATCH(pGpu, pEngstate) +#define kgspStatePreLoad(pGpu, pEngstate, arg0) kgspStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kgspStatePostUnload(pGpu, pEngstate, arg0) kgspStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kgspStatePreUnload(pGpu, pEngstate, arg0) kgspStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kgspGetTunableState(pGpu, pEngstate, pTunableState) kgspGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kgspStateInitUnlocked(pGpu, pEngstate) kgspStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kgspInitMissing(pGpu, pEngstate) kgspInitMissing_DISPATCH(pGpu, pEngstate) +#define kgspStatePreInitLocked(pGpu, pEngstate) kgspStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kgspStatePreInitUnlocked(pGpu, pEngstate) kgspStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kgspStatePostLoad(pGpu, pEngstate, arg0) kgspStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kgspAllocTunableState(pGpu, pEngstate, ppTunableState) kgspAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kgspSetTunableState(pGpu, pEngstate, pTunableState) kgspSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +NV_STATUS kgspAllocBootArgs_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspAllocBootArgs(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspAllocBootArgs(pGpu, pKernelGsp) kgspAllocBootArgs_TU102(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspAllocBootArgs_HAL(pGpu, pKernelGsp) kgspAllocBootArgs(pGpu, pKernelGsp) + +void kgspFreeBootArgs_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline void kgspFreeBootArgs(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspFreeBootArgs(pGpu, pKernelGsp) kgspFreeBootArgs_TU102(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspFreeBootArgs_HAL(pGpu, pKernelGsp) kgspFreeBootArgs(pGpu, pKernelGsp) + +void kgspProgramLibosBootArgsAddr_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline void kgspProgramLibosBootArgsAddr(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspProgramLibosBootArgsAddr(pGpu, pKernelGsp) kgspProgramLibosBootArgsAddr_TU102(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspProgramLibosBootArgsAddr_HAL(pGpu, pKernelGsp) kgspProgramLibosBootArgsAddr(pGpu, pKernelGsp) + +NV_STATUS kgspSetCmdQueueHead_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 queueIdx, NvU32 value); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspSetCmdQueueHead(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 queueIdx, NvU32 value) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspSetCmdQueueHead(pGpu, pKernelGsp, queueIdx, value) kgspSetCmdQueueHead_TU102(pGpu, pKernelGsp, queueIdx, value) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspSetCmdQueueHead_HAL(pGpu, pKernelGsp, queueIdx, value) kgspSetCmdQueueHead(pGpu, pKernelGsp, queueIdx, value) + +NV_STATUS kgspCalculateFbLayout_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspCalculateFbLayout(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspCalculateFbLayout(pGpu, pKernelGsp, pGspFw) kgspCalculateFbLayout_TU102(pGpu, pKernelGsp, pGspFw) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspCalculateFbLayout_HAL(pGpu, pKernelGsp, pGspFw) kgspCalculateFbLayout(pGpu, pKernelGsp, pGspFw) + +static inline NvU32 kgspGetNonWprHeapSize_ed6b8b(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return 1048576; +} + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NvU32 kgspGetNonWprHeapSize(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return 0; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspGetNonWprHeapSize(pGpu, pKernelGsp) kgspGetNonWprHeapSize_ed6b8b(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspGetNonWprHeapSize_HAL(pGpu, pKernelGsp) kgspGetNonWprHeapSize(pGpu, pKernelGsp) + +void kgspHealthCheck_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline void kgspHealthCheck(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspHealthCheck(pGpu, pKernelGsp) kgspHealthCheck_TU102(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspHealthCheck_HAL(pGpu, pKernelGsp) kgspHealthCheck(pGpu, pKernelGsp) + +NvU32 kgspService_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NvU32 kgspService(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return 0; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspService(pGpu, pKernelGsp) kgspService_TU102(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspService_HAL(pGpu, pKernelGsp) kgspService(pGpu, pKernelGsp) + +NV_STATUS kgspExtractVbiosFromRom_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspVbiosImg **ppVbiosImg); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspExtractVbiosFromRom(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspVbiosImg **ppVbiosImg) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspExtractVbiosFromRom(pGpu, pKernelGsp, ppVbiosImg) kgspExtractVbiosFromRom_TU102(pGpu, pKernelGsp, ppVbiosImg) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspExtractVbiosFromRom_HAL(pGpu, pKernelGsp, ppVbiosImg) kgspExtractVbiosFromRom(pGpu, pKernelGsp, ppVbiosImg) + +NV_STATUS kgspExecuteBooterLoad_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, const NvU64 gspFwWprMetaOffset); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspExecuteBooterLoad(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, const NvU64 gspFwWprMetaOffset) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspExecuteBooterLoad(pGpu, pKernelGsp, gspFwWprMetaOffset) kgspExecuteBooterLoad_TU102(pGpu, pKernelGsp, gspFwWprMetaOffset) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspExecuteBooterLoad_HAL(pGpu, pKernelGsp, gspFwWprMetaOffset) kgspExecuteBooterLoad(pGpu, pKernelGsp, gspFwWprMetaOffset) + +NV_STATUS kgspExecuteBooterReload_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspExecuteBooterReload(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspExecuteBooterReload(pGpu, pKernelGsp) kgspExecuteBooterReload_TU102(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspExecuteBooterReload_HAL(pGpu, pKernelGsp) kgspExecuteBooterReload(pGpu, pKernelGsp) + +NV_STATUS kgspExecuteBooterUnloadIfNeeded_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspExecuteBooterUnloadIfNeeded(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspExecuteBooterUnloadIfNeeded(pGpu, pKernelGsp) kgspExecuteBooterUnloadIfNeeded_TU102(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspExecuteBooterUnloadIfNeeded_HAL(pGpu, pKernelGsp) kgspExecuteBooterUnloadIfNeeded(pGpu, pKernelGsp) + +NV_STATUS kgspWaitForGfwBootOk_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspWaitForGfwBootOk(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspWaitForGfwBootOk(pGpu, pKernelGsp) kgspWaitForGfwBootOk_TU102(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +#define kgspWaitForGfwBootOk_HAL(pGpu, pKernelGsp) kgspWaitForGfwBootOk(pGpu, pKernelGsp) + +NV_STATUS kgspConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, ENGDESCRIPTOR arg0); + +static inline NV_STATUS kgspConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, ENGDESCRIPTOR arg0) { + return pKernelGsp->__kgspConstructEngine__(pGpu, pKernelGsp, arg0); +} + +void kgspRegisterIntrService_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, IntrServiceRecord pRecords[155]); + +static inline void kgspRegisterIntrService_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, IntrServiceRecord pRecords[155]) { + pKernelGsp->__kgspRegisterIntrService__(pGpu, pKernelGsp, pRecords); +} + +NvU32 kgspServiceInterrupt_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, IntrServiceServiceInterruptArguments *pParams); + +static inline NvU32 kgspServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, IntrServiceServiceInterruptArguments *pParams) { + return pKernelGsp->__kgspServiceInterrupt__(pGpu, pKernelGsp, pParams); +} + +void kgspConfigureFalcon_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +void kgspConfigureFalcon_GA102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +static inline void kgspConfigureFalcon_f2d351(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_PRECOMP(0); +} + +static inline void kgspConfigureFalcon_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + pKernelGsp->__kgspConfigureFalcon__(pGpu, pKernelGsp); +} + +NvBool kgspIsDebugModeEnabled_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +NvBool kgspIsDebugModeEnabled_GA100(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +static inline NvBool kgspIsDebugModeEnabled_108313(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 != 0))); +} + +static inline NvBool kgspIsDebugModeEnabled_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return pKernelGsp->__kgspIsDebugModeEnabled__(pGpu, pKernelGsp); +} + +NV_STATUS kgspBootstrapRiscvOSEarly_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw); + +NV_STATUS kgspBootstrapRiscvOSEarly_GA102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw); + +static inline NV_STATUS kgspBootstrapRiscvOSEarly_5baef9(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS kgspBootstrapRiscvOSEarly_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw) { + return pKernelGsp->__kgspBootstrapRiscvOSEarly__(pGpu, pKernelGsp, pGspFw); +} + +void kgspGetGspRmBootUcodeStorage_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, BINDATA_STORAGE **ppBinStorageImage, BINDATA_STORAGE **ppBinStorageDesc); + +void kgspGetGspRmBootUcodeStorage_GA102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, BINDATA_STORAGE **ppBinStorageImage, BINDATA_STORAGE **ppBinStorageDesc); + +static inline void kgspGetGspRmBootUcodeStorage_f2d351(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, BINDATA_STORAGE **ppBinStorageImage, BINDATA_STORAGE **ppBinStorageDesc) { + NV_ASSERT_PRECOMP(0); +} + +static inline void kgspGetGspRmBootUcodeStorage_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, BINDATA_STORAGE **ppBinStorageImage, BINDATA_STORAGE **ppBinStorageDesc) { + pKernelGsp->__kgspGetGspRmBootUcodeStorage__(pGpu, pKernelGsp, ppBinStorageImage, ppBinStorageDesc); +} + +const BINDATA_ARCHIVE *kgspGetBinArchiveGspRmBoot_TU102(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveGspRmBoot_GA100(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveGspRmBoot_GA102(struct KernelGsp *pKernelGsp); + +static inline const BINDATA_ARCHIVE *kgspGetBinArchiveGspRmBoot_80f438(struct KernelGsp *pKernelGsp) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0)); +} + +static inline const BINDATA_ARCHIVE *kgspGetBinArchiveGspRmBoot_DISPATCH(struct KernelGsp *pKernelGsp) { + return pKernelGsp->__kgspGetBinArchiveGspRmBoot__(pKernelGsp); +} + +NV_STATUS kgspExecuteSequencerCommand_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 opCode, NvU32 *pPayLoad, NvU32 payloadSize); + +NV_STATUS kgspExecuteSequencerCommand_GA102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 opCode, NvU32 *pPayLoad, NvU32 payloadSize); + +static inline NV_STATUS kgspExecuteSequencerCommand_5baef9(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 opCode, NvU32 *pPayLoad, NvU32 payloadSize) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS kgspExecuteSequencerCommand_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 opCode, NvU32 *pPayLoad, NvU32 payloadSize) { + return pKernelGsp->__kgspExecuteSequencerCommand__(pGpu, pKernelGsp, opCode, pPayLoad, payloadSize); +} + +static inline NvU32 kgspReadUcodeFuseVersion_b2b553(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 ucodeId) { + return 0; +} + +NvU32 kgspReadUcodeFuseVersion_GA100(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 ucodeId); + +static inline NvU32 kgspReadUcodeFuseVersion_474d46(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 ucodeId) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 kgspReadUcodeFuseVersion_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvU32 ucodeId) { + return pKernelGsp->__kgspReadUcodeFuseVersion__(pGpu, pKernelGsp, ucodeId); +} + +NV_STATUS kgspResetHw_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +static inline NV_STATUS kgspResetHw_5baef9(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS kgspResetHw_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return pKernelGsp->__kgspResetHw__(pGpu, pKernelGsp); +} + +NvBool kgspIsEngineInReset_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +static inline NvBool kgspIsEngineInReset_108313(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 != 0))); +} + +static inline NvBool kgspIsEngineInReset_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return pKernelGsp->__kgspIsEngineInReset__(pGpu, pKernelGsp); +} + +NvU32 kgspGetFrtsSize_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); + +static inline NvU32 kgspGetFrtsSize_4a4dee(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return 0; +} + +static inline NvU32 kgspGetFrtsSize_474d46(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 kgspGetFrtsSize_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return pKernelGsp->__kgspGetFrtsSize__(pGpu, pKernelGsp); +} + +NV_STATUS kgspExecuteFwsecFrts_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode *pFwsecUcode, const NvU64 frtsOffset); + +static inline NV_STATUS kgspExecuteFwsecFrts_5baef9(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode *pFwsecUcode, const NvU64 frtsOffset) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS kgspExecuteFwsecFrts_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode *pFwsecUcode, const NvU64 frtsOffset) { + return pKernelGsp->__kgspExecuteFwsecFrts__(pGpu, pKernelGsp, pFwsecUcode, frtsOffset); +} + +NV_STATUS kgspExecuteHsFalcon_TU102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode *pFlcnUcode, struct KernelFalcon *pKernelFlcn, NvU32 *pMailbox0, NvU32 *pMailbox1); + +NV_STATUS kgspExecuteHsFalcon_GA102(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode *pFlcnUcode, struct KernelFalcon *pKernelFlcn, NvU32 *pMailbox0, NvU32 *pMailbox1); + +static inline NV_STATUS kgspExecuteHsFalcon_5baef9(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode *pFlcnUcode, struct KernelFalcon *pKernelFlcn, NvU32 *pMailbox0, NvU32 *pMailbox1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS kgspExecuteHsFalcon_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode *pFlcnUcode, struct KernelFalcon *pKernelFlcn, NvU32 *pMailbox0, NvU32 *pMailbox1) { + return pKernelGsp->__kgspExecuteHsFalcon__(pGpu, pKernelGsp, pFlcnUcode, pKernelFlcn, pMailbox0, pMailbox1); +} + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterLoadUcode_TU102(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterLoadUcode_TU116(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterLoadUcode_GA100(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterLoadUcode_GA102(struct KernelGsp *pKernelGsp); + +static inline const BINDATA_ARCHIVE *kgspGetBinArchiveBooterLoadUcode_80f438(struct KernelGsp *pKernelGsp) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0)); +} + +static inline const BINDATA_ARCHIVE *kgspGetBinArchiveBooterLoadUcode_DISPATCH(struct KernelGsp *pKernelGsp) { + return pKernelGsp->__kgspGetBinArchiveBooterLoadUcode__(pKernelGsp); +} + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterReloadUcode_TU102(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterReloadUcode_TU116(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterReloadUcode_GA100(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterReloadUcode_GA102(struct KernelGsp *pKernelGsp); + +static inline const BINDATA_ARCHIVE *kgspGetBinArchiveBooterReloadUcode_80f438(struct KernelGsp *pKernelGsp) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0)); +} + +static inline const BINDATA_ARCHIVE *kgspGetBinArchiveBooterReloadUcode_DISPATCH(struct KernelGsp *pKernelGsp) { + return pKernelGsp->__kgspGetBinArchiveBooterReloadUcode__(pKernelGsp); +} + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_TU102(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_TU116(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_GA100(struct KernelGsp *pKernelGsp); + +const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_GA102(struct KernelGsp *pKernelGsp); + +static inline const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_80f438(struct KernelGsp *pKernelGsp) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0)); +} + +static inline const BINDATA_ARCHIVE *kgspGetBinArchiveBooterUnloadUcode_DISPATCH(struct KernelGsp *pKernelGsp) { + return pKernelGsp->__kgspGetBinArchiveBooterUnloadUcode__(pKernelGsp); +} + +static inline const char *kgspGetSignatureSectionName_63b8e2(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return ".fwsignature_ga100"; +} + +static inline const char *kgspGetSignatureSectionName_e46f5b(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return ".fwsignature_ga10x"; +} + +static inline const char *kgspGetSignatureSectionName_cbc19d(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return ".fwsignature_tu10x"; +} + +static inline const char *kgspGetSignatureSectionName_ab7237(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return ".fwsignature_tu11x"; +} + +static inline const char *kgspGetSignatureSectionName_9e2234(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return ((void *)0); +} + +static inline const char *kgspGetSignatureSectionName_80f438(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((void *)0)); +} + +static inline const char *kgspGetSignatureSectionName_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + return pKernelGsp->__kgspGetSignatureSectionName__(pGpu, pKernelGsp); +} + +static inline void kgspStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) { + pEngstate->__kgspStateDestroy__(pGpu, pEngstate); +} + +static inline void kgspFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, void *pTunableState) { + pEngstate->__kgspFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgspCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kgspCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline NvBool kgspClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return pIntrService->__kgspClearInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NvBool kgspIsPresent_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) { + return pEngstate->__kgspIsPresent__(pGpu, pEngstate); +} + +static inline NV_STATUS kgspReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, void *pTunableState) { + return pEngstate->__kgspReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgspStateLoad_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return pEngstate->__kgspStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgspStateUnload_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return pEngstate->__kgspStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgspServiceNotificationInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelGsp *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return pIntrService->__kgspServiceNotificationInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS kgspStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) { + return pEngstate->__kgspStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kgspStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return pEngstate->__kgspStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgspStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return pEngstate->__kgspStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgspStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return pEngstate->__kgspStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgspGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, void *pTunableState) { + return pEngstate->__kgspGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kgspStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) { + return pEngstate->__kgspStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kgspInitMissing_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) { + pEngstate->__kgspInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kgspStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) { + return pEngstate->__kgspStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kgspStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate) { + return pEngstate->__kgspStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kgspStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, NvU32 arg0) { + return pEngstate->__kgspStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kgspAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, void **ppTunableState) { + return pEngstate->__kgspAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kgspSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelGsp *pEngstate, void *pTunableState) { + return pEngstate->__kgspSetTunableState__(pGpu, pEngstate, pTunableState); +} + +void kgspDestruct_IMPL(struct KernelGsp *pKernelGsp); +#define __nvoc_kgspDestruct(pKernelGsp) kgspDestruct_IMPL(pKernelGsp) +void kgspPopulateGspRmInitArgs_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_SR_INIT_ARGUMENTS *pGspSrInitArgs); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline void kgspPopulateGspRmInitArgs(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_SR_INIT_ARGUMENTS *pGspSrInitArgs) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspPopulateGspRmInitArgs(pGpu, pKernelGsp, pGspSrInitArgs) kgspPopulateGspRmInitArgs_IMPL(pGpu, pKernelGsp, pGspSrInitArgs) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspInitRm_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspInitRm(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspInitRm(pGpu, pKernelGsp, pGspFw) kgspInitRm_IMPL(pGpu, pKernelGsp, pGspFw) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspUnloadRm_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspUnloadRm(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspUnloadRm(pGpu, pKernelGsp) kgspUnloadRm_IMPL(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspPrepareBootBinaryImage_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspPrepareBootBinaryImage(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspPrepareBootBinaryImage(pGpu, pKernelGsp) kgspPrepareBootBinaryImage_IMPL(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +void kgspSetupLibosInitArgs_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline void kgspSetupLibosInitArgs(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspSetupLibosInitArgs(pGpu, pKernelGsp) kgspSetupLibosInitArgs_IMPL(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +void kgspRpcRecvEvents_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline void kgspRpcRecvEvents(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspRpcRecvEvents(pGpu, pKernelGsp) kgspRpcRecvEvents_IMPL(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspWaitForRmInitDone_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspWaitForRmInitDone(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspWaitForRmInitDone(pGpu, pKernelGsp) kgspWaitForRmInitDone_IMPL(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspInitLogging_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspInitLogging(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspInitLogging(pGpu, pKernelGsp, pGspFw) kgspInitLogging_IMPL(pGpu, pKernelGsp, pGspFw) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspStartLogPolling_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspStartLogPolling(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspStartLogPolling(pGpu, pKernelGsp) kgspStartLogPolling_IMPL(pGpu, pKernelGsp) +#endif //__nvoc_kernel_gsp_h_disabled + +void kgspDumpGspLogs_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvBool arg0); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline void kgspDumpGspLogs(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspDumpGspLogs(pGpu, pKernelGsp, arg0) kgspDumpGspLogs_IMPL(pGpu, pKernelGsp, arg0) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspExecuteSequencerBuffer_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, void *pRunCpuSeqParams); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspExecuteSequencerBuffer(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, void *pRunCpuSeqParams) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspExecuteSequencerBuffer(pGpu, pKernelGsp, pRunCpuSeqParams) kgspExecuteSequencerBuffer_IMPL(pGpu, pKernelGsp, pRunCpuSeqParams) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspParseFwsecUcodeFromVbiosImg_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, const KernelGspVbiosImg *const pVbiosImg, KernelGspFlcnUcode **ppFwsecUcode); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspParseFwsecUcodeFromVbiosImg(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, const KernelGspVbiosImg *const pVbiosImg, KernelGspFlcnUcode **ppFwsecUcode) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspParseFwsecUcodeFromVbiosImg(pGpu, pKernelGsp, pVbiosImg, ppFwsecUcode) kgspParseFwsecUcodeFromVbiosImg_IMPL(pGpu, pKernelGsp, pVbiosImg, ppFwsecUcode) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspAllocateBooterLoadUcodeImage_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode **ppBooterLoadUcode); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspAllocateBooterLoadUcodeImage(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode **ppBooterLoadUcode) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspAllocateBooterLoadUcodeImage(pGpu, pKernelGsp, ppBooterLoadUcode) kgspAllocateBooterLoadUcodeImage_IMPL(pGpu, pKernelGsp, ppBooterLoadUcode) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspAllocateBooterReloadUcodeImage_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode **ppBooterReloadUcode); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspAllocateBooterReloadUcodeImage(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode **ppBooterReloadUcode) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspAllocateBooterReloadUcodeImage(pGpu, pKernelGsp, ppBooterReloadUcode) kgspAllocateBooterReloadUcodeImage_IMPL(pGpu, pKernelGsp, ppBooterReloadUcode) +#endif //__nvoc_kernel_gsp_h_disabled + +NV_STATUS kgspAllocateBooterUnloadUcodeImage_IMPL(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode **ppBooterUnloadUcode); +#ifdef __nvoc_kernel_gsp_h_disabled +static inline NV_STATUS kgspAllocateBooterUnloadUcodeImage(struct OBJGPU *pGpu, struct KernelGsp *pKernelGsp, KernelGspFlcnUcode **ppBooterUnloadUcode) { + NV_ASSERT_FAILED_PRECOMP("KernelGsp was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_gsp_h_disabled +#define kgspAllocateBooterUnloadUcodeImage(pGpu, pKernelGsp, ppBooterUnloadUcode) kgspAllocateBooterUnloadUcodeImage_IMPL(pGpu, pKernelGsp, ppBooterUnloadUcode) +#endif //__nvoc_kernel_gsp_h_disabled + +#undef PRIVATE_FIELD + + +NV_STATUS rpcRmApiControl_GSP(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + NvU32 cmd, void *pParamStructPtr, NvU32 paramsSize); +NV_STATUS rpcRmApiAlloc_GSP(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle hObject, NvU32 hClass, void *pAllocParams); +NV_STATUS rpcRmApiDupObject_GSP(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, NvHandle *phObject, + NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags); +NV_STATUS rpcRmApiFree_GSP(RM_API *pRmApi, NvHandle hClient, NvHandle hObject); + +/* Free a KernelGspVbiosImg structure */ +void kgspFreeVbiosImg(KernelGspVbiosImg *pVbiosImg); +/* Free a KernelGspFlcnUcode structure */ +void kgspFreeFlcnUcode(KernelGspFlcnUcode *pFlcnUcode); + +#endif // KERNEL_GSP_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_GSP_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_head_nvoc.c b/src/nvidia/generated/g_kernel_head_nvoc.c new file mode 100644 index 000000000..bfcfc7e58 --- /dev/null +++ b/src/nvidia/generated/g_kernel_head_nvoc.c @@ -0,0 +1,246 @@ +#define NVOC_KERNEL_HEAD_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_head_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x0145e6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_KernelHead(KernelHead*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelHead(KernelHead*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelHead(KernelHead*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelHead(KernelHead*, RmHalspecOwner* ); +void __nvoc_dtor_KernelHead(KernelHead*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelHead; + +static const struct NVOC_RTTI __nvoc_rtti_KernelHead_KernelHead = { + /*pClassDef=*/ &__nvoc_class_def_KernelHead, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelHead, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelHead_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelHead, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelHead = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_KernelHead_KernelHead, + &__nvoc_rtti_KernelHead_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelHead), + /*classId=*/ classId(KernelHead), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelHead", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelHead, + /*pCastInfo=*/ &__nvoc_castinfo_KernelHead, + /*pExportInfo=*/ &__nvoc_export_info_KernelHead +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelHead = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_KernelHead(KernelHead *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_KernelHead_fail_Object; + __nvoc_init_dataField_KernelHead(pThis, pRmhalspecowner); + + status = __nvoc_kheadConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_KernelHead_fail__init; + goto __nvoc_ctor_KernelHead_exit; // Success + +__nvoc_ctor_KernelHead_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_KernelHead_fail_Object: +__nvoc_ctor_KernelHead_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelHead_1(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + // Hal function -- kheadProcessVblankCallbacks + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kheadProcessVblankCallbacks__ = &kheadProcessVblankCallbacks_IMPL; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kheadProcessVblankCallbacks__ = &kheadProcessVblankCallbacks_e426af; + } + } + else if (0) + { + } + + // Hal function -- kheadResetPendingVblank + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kheadResetPendingVblank__ = &kheadResetPendingVblank_v04_00_KERNEL; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kheadResetPendingVblank__ = &kheadResetPendingVblank_e426af; + } + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- kheadResetPendingVblankForKernel + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kheadResetPendingVblankForKernel__ = &kheadResetPendingVblankForKernel_v04_00_KERNEL; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kheadResetPendingVblankForKernel__ = &kheadResetPendingVblankForKernel_e426af; + } + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- kheadReadPendingVblank + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kheadReadPendingVblank__ = &kheadReadPendingVblank_v04_00_KERNEL; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kheadReadPendingVblank__ = &kheadReadPendingVblank_92bfc3; + } + } + else if (0) + { + } +} + +void __nvoc_init_funcTable_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelHead_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelHead = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_KernelHead(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelHead(KernelHead **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelHead *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelHead)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelHead)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelHead); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelHead(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelHead(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelHead_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelHead_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelHead(KernelHead **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelHead(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_head_nvoc.h b/src/nvidia/generated/g_kernel_head_nvoc.h new file mode 100644 index 000000000..4ebe31e4f --- /dev/null +++ b/src/nvidia/generated/g_kernel_head_nvoc.h @@ -0,0 +1,351 @@ +#ifndef _G_KERNEL_HEAD_NVOC_H_ +#define _G_KERNEL_HEAD_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************** Kernelhead Routines **************************\ +* * +* Kernel head object function Definitions. * +* * +\***************************************************************************/ + +#include "g_kernel_head_nvoc.h" + +#ifndef KERNEL_HEAD_H +#define KERNEL_HEAD_H + +/* ------------------------ Includes --------------------------------------- */ +#include "gpu/disp/vblank_callback/vblank.h" +#include "gpu/gpu_halspec.h" +/* ------------------------ Types definitions ------------------------------ */ +enum +{ + headIntr_none = 0, + headIntr_vblank = NVBIT(0), +}; + +/* ------------------------ Macros & Defines ------------------------------- */ + +#ifdef NVOC_KERNEL_HEAD_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct __nvoc_inner_struc_KernelHead_1__ { + struct { + NvU32 Total; + NvU32 LowLatency; + NvU32 NormLatency; + } Counters; + struct { + VBLANKCALLBACK *pListLL; + VBLANKCALLBACK *pListNL; + } Callback; + NvU32 IntrState; +}; + + +struct KernelHead { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct KernelHead *__nvoc_pbase_KernelHead; + void (*__kheadProcessVblankCallbacks__)(struct OBJGPU *, struct KernelHead *, NvU32); + void (*__kheadResetPendingVblank__)(struct OBJGPU *, struct KernelHead *, THREAD_STATE_NODE *); + void (*__kheadResetPendingVblankForKernel__)(struct OBJGPU *, struct KernelHead *, THREAD_STATE_NODE *); + NvU32 (*__kheadReadPendingVblank__)(struct OBJGPU *, struct KernelHead *, NvU32); + struct __nvoc_inner_struc_KernelHead_1__ Vblank; + NvU32 PublicId; +}; + +#ifndef __NVOC_CLASS_KernelHead_TYPEDEF__ +#define __NVOC_CLASS_KernelHead_TYPEDEF__ +typedef struct KernelHead KernelHead; +#endif /* __NVOC_CLASS_KernelHead_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelHead +#define __nvoc_class_id_KernelHead 0x0145e6 +#endif /* __nvoc_class_id_KernelHead */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead; + +#define __staticCast_KernelHead(pThis) \ + ((pThis)->__nvoc_pbase_KernelHead) + +#ifdef __nvoc_kernel_head_h_disabled +#define __dynamicCast_KernelHead(pThis) ((KernelHead*)NULL) +#else //__nvoc_kernel_head_h_disabled +#define __dynamicCast_KernelHead(pThis) \ + ((KernelHead*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelHead))) +#endif //__nvoc_kernel_head_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelHead(KernelHead**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelHead(KernelHead**, Dynamic*, NvU32); +#define __objCreate_KernelHead(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelHead((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kheadProcessVblankCallbacks(pGpu, pKernelHead, arg0) kheadProcessVblankCallbacks_DISPATCH(pGpu, pKernelHead, arg0) +#define kheadProcessVblankCallbacks_HAL(pGpu, pKernelHead, arg0) kheadProcessVblankCallbacks_DISPATCH(pGpu, pKernelHead, arg0) +#define kheadResetPendingVblank(pGpu, pKhead, arg0) kheadResetPendingVblank_DISPATCH(pGpu, pKhead, arg0) +#define kheadResetPendingVblank_HAL(pGpu, pKhead, arg0) kheadResetPendingVblank_DISPATCH(pGpu, pKhead, arg0) +#define kheadResetPendingVblankForKernel(pGpu, pKhead, arg0) kheadResetPendingVblankForKernel_DISPATCH(pGpu, pKhead, arg0) +#define kheadResetPendingVblankForKernel_HAL(pGpu, pKhead, arg0) kheadResetPendingVblankForKernel_DISPATCH(pGpu, pKhead, arg0) +#define kheadReadPendingVblank(pGpu, pKernelHead, intr) kheadReadPendingVblank_DISPATCH(pGpu, pKernelHead, intr) +#define kheadReadPendingVblank_HAL(pGpu, pKernelHead, intr) kheadReadPendingVblank_DISPATCH(pGpu, pKernelHead, intr) +NvU32 kheadGetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead); + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadGetVblankTotalCounter(struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetVblankTotalCounter(pKernelHead) kheadGetVblankTotalCounter_IMPL(pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetVblankTotalCounter_HAL(pKernelHead) kheadGetVblankTotalCounter(pKernelHead) + +void kheadSetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead, NvU32 arg0); + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadSetVblankTotalCounter(struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadSetVblankTotalCounter(pKernelHead, arg0) kheadSetVblankTotalCounter_IMPL(pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadSetVblankTotalCounter_HAL(pKernelHead, arg0) kheadSetVblankTotalCounter(pKernelHead, arg0) + +NvU32 kheadGetVblankLowLatencyCounter_IMPL(struct KernelHead *pKernelHead); + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadGetVblankLowLatencyCounter(struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetVblankLowLatencyCounter(pKernelHead) kheadGetVblankLowLatencyCounter_IMPL(pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetVblankLowLatencyCounter_HAL(pKernelHead) kheadGetVblankLowLatencyCounter(pKernelHead) + +void kheadSetVblankLowLatencyCounter_IMPL(struct KernelHead *pKernelHead, NvU32 arg0); + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadSetVblankLowLatencyCounter(struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadSetVblankLowLatencyCounter(pKernelHead, arg0) kheadSetVblankLowLatencyCounter_IMPL(pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadSetVblankLowLatencyCounter_HAL(pKernelHead, arg0) kheadSetVblankLowLatencyCounter(pKernelHead, arg0) + +static inline NvU32 kheadGetVblankNormLatencyCounter_46f6a7(struct KernelHead *pKernelHead) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadGetVblankNormLatencyCounter(struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetVblankNormLatencyCounter(pKernelHead) kheadGetVblankNormLatencyCounter_46f6a7(pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetVblankNormLatencyCounter_HAL(pKernelHead) kheadGetVblankNormLatencyCounter(pKernelHead) + +static inline void kheadSetVblankNormLatencyCounter_b3696a(struct KernelHead *pKernelHead, NvU32 arg0) { + return; +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadSetVblankNormLatencyCounter(struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadSetVblankNormLatencyCounter(pKernelHead, arg0) kheadSetVblankNormLatencyCounter_b3696a(pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadSetVblankNormLatencyCounter_HAL(pKernelHead, arg0) kheadSetVblankNormLatencyCounter(pKernelHead, arg0) + +static inline NvBool kheadReadVblankIntrEnable_491d52(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvBool kheadReadVblankIntrEnable(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadReadVblankIntrEnable(pGpu, pKernelHead) kheadReadVblankIntrEnable_491d52(pGpu, pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadReadVblankIntrEnable_HAL(pGpu, pKernelHead) kheadReadVblankIntrEnable(pGpu, pKernelHead) + +static inline NvBool kheadGetDisplayInitialized_491d52(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline NvBool kheadGetDisplayInitialized(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadGetDisplayInitialized(pGpu, pKernelHead) kheadGetDisplayInitialized_491d52(pGpu, pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadGetDisplayInitialized_HAL(pGpu, pKernelHead) kheadGetDisplayInitialized(pGpu, pKernelHead) + +static inline void kheadWriteVblankIntrEnable_b3696a(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg0) { + return; +} + +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadWriteVblankIntrEnable(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg0) kheadWriteVblankIntrEnable_b3696a(pGpu, pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#define kheadWriteVblankIntrEnable_HAL(pGpu, pKernelHead, arg0) kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg0) + +static inline void kheadProcessVblankCallbacks_e426af(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_PRECOMP(0); + return; +} + +static inline void kheadProcessVblankCallbacks_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) { + pKernelHead->__kheadProcessVblankCallbacks__(pGpu, pKernelHead, arg0); +} + +void kheadResetPendingVblank_v04_00_KERNEL(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0); + +static inline void kheadResetPendingVblank_e426af(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) { + NV_ASSERT_PRECOMP(0); + return; +} + +static inline void kheadResetPendingVblank_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) { + pKhead->__kheadResetPendingVblank__(pGpu, pKhead, arg0); +} + +void kheadResetPendingVblankForKernel_v04_00_KERNEL(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0); + +static inline void kheadResetPendingVblankForKernel_e426af(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) { + NV_ASSERT_PRECOMP(0); + return; +} + +static inline void kheadResetPendingVblankForKernel_b3696a(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) { + return; +} + +static inline void kheadResetPendingVblankForKernel_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) { + pKhead->__kheadResetPendingVblankForKernel__(pGpu, pKhead, arg0); +} + +NvU32 kheadReadPendingVblank_v04_00_KERNEL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 intr); + +static inline NvU32 kheadReadPendingVblank_92bfc3(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 intr) { + NV_ASSERT_PRECOMP(0); + return NV_ERR_NOT_SUPPORTED; +} + +static inline NvU32 kheadReadPendingVblank_DISPATCH(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 intr) { + return pKernelHead->__kheadReadPendingVblank__(pGpu, pKernelHead, intr); +} + +NV_STATUS kheadConstruct_IMPL(struct KernelHead *arg_pKernelHead); +#define __nvoc_kheadConstruct(arg_pKernelHead) kheadConstruct_IMPL(arg_pKernelHead) +void kheadAddVblankCallback_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0); +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadAddVblankCallback(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadAddVblankCallback(pGpu, pKernelHead, arg0) kheadAddVblankCallback_IMPL(pGpu, pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +void kheadDeleteVblankCallback_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0); +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadDeleteVblankCallback(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadDeleteVblankCallback(pGpu, pKernelHead, arg0) kheadDeleteVblankCallback_IMPL(pGpu, pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +NvU32 kheadCheckVblankCallbacksQueued_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0, NvU32 *arg1); +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadCheckVblankCallbacksQueued(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, arg0, arg1) kheadCheckVblankCallbacksQueued_IMPL(pGpu, pKernelHead, arg0, arg1) +#endif //__nvoc_kernel_head_h_disabled + +NvU32 kheadReadVblankIntrState_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead); +#ifdef __nvoc_kernel_head_h_disabled +static inline NvU32 kheadReadVblankIntrState(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); + return 0; +} +#else //__nvoc_kernel_head_h_disabled +#define kheadReadVblankIntrState(pGpu, pKernelHead) kheadReadVblankIntrState_IMPL(pGpu, pKernelHead) +#endif //__nvoc_kernel_head_h_disabled + +void kheadWriteVblankIntrState_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0); +#ifdef __nvoc_kernel_head_h_disabled +static inline void kheadWriteVblankIntrState(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!"); +} +#else //__nvoc_kernel_head_h_disabled +#define kheadWriteVblankIntrState(pGpu, pKernelHead, arg0) kheadWriteVblankIntrState_IMPL(pGpu, pKernelHead, arg0) +#endif //__nvoc_kernel_head_h_disabled + +#undef PRIVATE_FIELD + + +void kheadProcessVblankCallbacks_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 state); + +#endif // KERNEL_HEAD_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_HEAD_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_ioctrl_nvoc.c b/src/nvidia/generated/g_kernel_ioctrl_nvoc.c new file mode 100644 index 000000000..c44d95525 --- /dev/null +++ b/src/nvidia/generated/g_kernel_ioctrl_nvoc.c @@ -0,0 +1,355 @@ +#define NVOC_KERNEL_IOCTRL_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_ioctrl_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x880c7d = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelIoctrl; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelIoctrl(KernelIoctrl*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelIoctrl(KernelIoctrl*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelIoctrl(KernelIoctrl*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelIoctrl(KernelIoctrl*, RmHalspecOwner* ); +void __nvoc_dtor_KernelIoctrl(KernelIoctrl*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelIoctrl; + +static const struct NVOC_RTTI __nvoc_rtti_KernelIoctrl_KernelIoctrl = { + /*pClassDef=*/ &__nvoc_class_def_KernelIoctrl, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelIoctrl, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelIoctrl_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelIoctrl, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelIoctrl_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelIoctrl, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelIoctrl = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelIoctrl_KernelIoctrl, + &__nvoc_rtti_KernelIoctrl_OBJENGSTATE, + &__nvoc_rtti_KernelIoctrl_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelIoctrl = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelIoctrl), + /*classId=*/ classId(KernelIoctrl), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelIoctrl", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelIoctrl, + /*pCastInfo=*/ &__nvoc_castinfo_KernelIoctrl, + /*pExportInfo=*/ &__nvoc_export_info_KernelIoctrl +}; + +static NV_STATUS __nvoc_thunk_KernelIoctrl_engstateConstructEngine(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) { + return kioctrlConstructEngine(arg0, (struct KernelIoctrl *)(((unsigned char *)arg1) - __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlReconcileTunableState(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlStateLoad(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlStateUnload(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlStateInitLocked(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlStatePreLoad(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlStatePostUnload(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_kioctrlStateDestroy(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlStatePreUnload(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlStateInitUnlocked(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kioctrlInitMissing(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlStatePreInitLocked(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlStatePreInitUnlocked(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlGetTunableState(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlCompareTunableState(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kioctrlFreeTunableState(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlStatePostLoad(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlAllocTunableState(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kioctrlSetTunableState(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kioctrlIsPresent(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelIoctrl_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelIoctrl = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelIoctrl(KernelIoctrl *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelIoctrl(KernelIoctrl *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_KIOCTRL_IS_MISSING + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->setProperty(pThis, PDB_PROP_KIOCTRL_IS_MISSING, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KIOCTRL_MINION_AVAILABLE + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KIOCTRL_MINION_AVAILABLE, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KIOCTRL_MINION_AVAILABLE, ((NvBool)(0 != 0))); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelIoctrl(KernelIoctrl *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelIoctrl_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelIoctrl(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelIoctrl_exit; // Success + +__nvoc_ctor_KernelIoctrl_fail_OBJENGSTATE: +__nvoc_ctor_KernelIoctrl_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelIoctrl_1(KernelIoctrl *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__kioctrlConstructEngine__ = &kioctrlConstructEngine_IMPL; + + // Hal function -- kioctrlGetMinionEnableDefault + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kioctrlGetMinionEnableDefault__ = &kioctrlGetMinionEnableDefault_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__kioctrlGetMinionEnableDefault__ = &kioctrlGetMinionEnableDefault_bf6dfa; + } + } + + // Hal function -- kioctrlMinionConstruct + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kioctrlMinionConstruct__ = &kioctrlMinionConstruct_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__kioctrlMinionConstruct__ = &kioctrlMinionConstruct_ac1694; + } + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelIoctrl_engstateConstructEngine; + + pThis->__kioctrlReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kioctrlReconcileTunableState; + + pThis->__kioctrlStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStateLoad; + + pThis->__kioctrlStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStateUnload; + + pThis->__kioctrlStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStateInitLocked; + + pThis->__kioctrlStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStatePreLoad; + + pThis->__kioctrlStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStatePostUnload; + + pThis->__kioctrlStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStateDestroy; + + pThis->__kioctrlStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStatePreUnload; + + pThis->__kioctrlStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStateInitUnlocked; + + pThis->__kioctrlInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kioctrlInitMissing; + + pThis->__kioctrlStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStatePreInitLocked; + + pThis->__kioctrlStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStatePreInitUnlocked; + + pThis->__kioctrlGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kioctrlGetTunableState; + + pThis->__kioctrlCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kioctrlCompareTunableState; + + pThis->__kioctrlFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kioctrlFreeTunableState; + + pThis->__kioctrlStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kioctrlStatePostLoad; + + pThis->__kioctrlAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kioctrlAllocTunableState; + + pThis->__kioctrlSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kioctrlSetTunableState; + + pThis->__kioctrlIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kioctrlIsPresent; +} + +void __nvoc_init_funcTable_KernelIoctrl(KernelIoctrl *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelIoctrl_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelIoctrl(KernelIoctrl *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelIoctrl = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelIoctrl(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelIoctrl(KernelIoctrl **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelIoctrl *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelIoctrl)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelIoctrl)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelIoctrl); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelIoctrl(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelIoctrl(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelIoctrl_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelIoctrl_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelIoctrl(KernelIoctrl **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelIoctrl(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_ioctrl_nvoc.h b/src/nvidia/generated/g_kernel_ioctrl_nvoc.h new file mode 100644 index 000000000..d4925015b --- /dev/null +++ b/src/nvidia/generated/g_kernel_ioctrl_nvoc.h @@ -0,0 +1,281 @@ +#ifndef _G_KERNEL_IOCTRL_NVOC_H_ +#define _G_KERNEL_IOCTRL_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_ioctrl_nvoc.h" + +#ifndef _KERNEL_IOCTRL_H_ +#define _KERNEL_IOCTRL_H_ + +#include "core/core.h" +#include "gpu/eng_state.h" +#include "lib/ref_count.h" +#include "gpu/gpu.h" +#include "nvCpuUuid.h" + +#if defined(INCLUDE_NVLINK_LIB) +#include "nvlink.h" +#include "nvlink_export.h" +#endif + +/*! + * KernelIoctrl is a logical abstraction of the GPU Ioctrl Engine. The + * Public API of the Ioctrl Engine is exposed through this object, and + * any interfaces which do not manage the underlying Ioctrl hardware + * can be managed by this object. + */ +#ifdef NVOC_KERNEL_IOCTRL_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelIoctrl { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelIoctrl *__nvoc_pbase_KernelIoctrl; + NV_STATUS (*__kioctrlConstructEngine__)(struct OBJGPU *, struct KernelIoctrl *, NvU32); + NvBool (*__kioctrlGetMinionEnableDefault__)(struct OBJGPU *, struct KernelIoctrl *); + NV_STATUS (*__kioctrlMinionConstruct__)(struct OBJGPU *, struct KernelIoctrl *); + NV_STATUS (*__kioctrlReconcileTunableState__)(POBJGPU, struct KernelIoctrl *, void *); + NV_STATUS (*__kioctrlStateLoad__)(POBJGPU, struct KernelIoctrl *, NvU32); + NV_STATUS (*__kioctrlStateUnload__)(POBJGPU, struct KernelIoctrl *, NvU32); + NV_STATUS (*__kioctrlStateInitLocked__)(POBJGPU, struct KernelIoctrl *); + NV_STATUS (*__kioctrlStatePreLoad__)(POBJGPU, struct KernelIoctrl *, NvU32); + NV_STATUS (*__kioctrlStatePostUnload__)(POBJGPU, struct KernelIoctrl *, NvU32); + void (*__kioctrlStateDestroy__)(POBJGPU, struct KernelIoctrl *); + NV_STATUS (*__kioctrlStatePreUnload__)(POBJGPU, struct KernelIoctrl *, NvU32); + NV_STATUS (*__kioctrlStateInitUnlocked__)(POBJGPU, struct KernelIoctrl *); + void (*__kioctrlInitMissing__)(POBJGPU, struct KernelIoctrl *); + NV_STATUS (*__kioctrlStatePreInitLocked__)(POBJGPU, struct KernelIoctrl *); + NV_STATUS (*__kioctrlStatePreInitUnlocked__)(POBJGPU, struct KernelIoctrl *); + NV_STATUS (*__kioctrlGetTunableState__)(POBJGPU, struct KernelIoctrl *, void *); + NV_STATUS (*__kioctrlCompareTunableState__)(POBJGPU, struct KernelIoctrl *, void *, void *); + void (*__kioctrlFreeTunableState__)(POBJGPU, struct KernelIoctrl *, void *); + NV_STATUS (*__kioctrlStatePostLoad__)(POBJGPU, struct KernelIoctrl *, NvU32); + NV_STATUS (*__kioctrlAllocTunableState__)(POBJGPU, struct KernelIoctrl *, void **); + NV_STATUS (*__kioctrlSetTunableState__)(POBJGPU, struct KernelIoctrl *, void *); + NvBool (*__kioctrlIsPresent__)(POBJGPU, struct KernelIoctrl *); + NvBool PDB_PROP_KIOCTRL_MINION_AVAILABLE; + NvBool PDB_PROP_KIOCTRL_MINION_FORCE_BOOT; + NvBool PDB_PROP_KIOCTRL_MINION_CACHE_SEEDS; + NvU32 PublicId; + NvU32 localDiscoveredLinks; + NvU32 localGlobalLinkOffset; + NvU32 ipVerIoctrl; + NvU32 ipVerMinion; + NvU32 ioctrlDiscoverySize; + NvU8 numDevices; +}; + +#ifndef __NVOC_CLASS_KernelIoctrl_TYPEDEF__ +#define __NVOC_CLASS_KernelIoctrl_TYPEDEF__ +typedef struct KernelIoctrl KernelIoctrl; +#endif /* __NVOC_CLASS_KernelIoctrl_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelIoctrl +#define __nvoc_class_id_KernelIoctrl 0x880c7d +#endif /* __nvoc_class_id_KernelIoctrl */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelIoctrl; + +#define __staticCast_KernelIoctrl(pThis) \ + ((pThis)->__nvoc_pbase_KernelIoctrl) + +#ifdef __nvoc_kernel_ioctrl_h_disabled +#define __dynamicCast_KernelIoctrl(pThis) ((KernelIoctrl*)NULL) +#else //__nvoc_kernel_ioctrl_h_disabled +#define __dynamicCast_KernelIoctrl(pThis) \ + ((KernelIoctrl*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelIoctrl))) +#endif //__nvoc_kernel_ioctrl_h_disabled + +#define PDB_PROP_KIOCTRL_MINION_CACHE_SEEDS_BASE_CAST +#define PDB_PROP_KIOCTRL_MINION_CACHE_SEEDS_BASE_NAME PDB_PROP_KIOCTRL_MINION_CACHE_SEEDS +#define PDB_PROP_KIOCTRL_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KIOCTRL_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_KIOCTRL_MINION_AVAILABLE_BASE_CAST +#define PDB_PROP_KIOCTRL_MINION_AVAILABLE_BASE_NAME PDB_PROP_KIOCTRL_MINION_AVAILABLE +#define PDB_PROP_KIOCTRL_MINION_FORCE_BOOT_BASE_CAST +#define PDB_PROP_KIOCTRL_MINION_FORCE_BOOT_BASE_NAME PDB_PROP_KIOCTRL_MINION_FORCE_BOOT + +NV_STATUS __nvoc_objCreateDynamic_KernelIoctrl(KernelIoctrl**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelIoctrl(KernelIoctrl**, Dynamic*, NvU32); +#define __objCreate_KernelIoctrl(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelIoctrl((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kioctrlConstructEngine(arg0, arg1, arg2) kioctrlConstructEngine_DISPATCH(arg0, arg1, arg2) +#define kioctrlGetMinionEnableDefault(pGpu, pKernelIoctrl) kioctrlGetMinionEnableDefault_DISPATCH(pGpu, pKernelIoctrl) +#define kioctrlGetMinionEnableDefault_HAL(pGpu, pKernelIoctrl) kioctrlGetMinionEnableDefault_DISPATCH(pGpu, pKernelIoctrl) +#define kioctrlMinionConstruct(pGpu, pKernelIoctrl) kioctrlMinionConstruct_DISPATCH(pGpu, pKernelIoctrl) +#define kioctrlMinionConstruct_HAL(pGpu, pKernelIoctrl) kioctrlMinionConstruct_DISPATCH(pGpu, pKernelIoctrl) +#define kioctrlReconcileTunableState(pGpu, pEngstate, pTunableState) kioctrlReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kioctrlStateLoad(pGpu, pEngstate, arg0) kioctrlStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kioctrlStateUnload(pGpu, pEngstate, arg0) kioctrlStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kioctrlStateInitLocked(pGpu, pEngstate) kioctrlStateInitLocked_DISPATCH(pGpu, pEngstate) +#define kioctrlStatePreLoad(pGpu, pEngstate, arg0) kioctrlStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kioctrlStatePostUnload(pGpu, pEngstate, arg0) kioctrlStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kioctrlStateDestroy(pGpu, pEngstate) kioctrlStateDestroy_DISPATCH(pGpu, pEngstate) +#define kioctrlStatePreUnload(pGpu, pEngstate, arg0) kioctrlStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kioctrlStateInitUnlocked(pGpu, pEngstate) kioctrlStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kioctrlInitMissing(pGpu, pEngstate) kioctrlInitMissing_DISPATCH(pGpu, pEngstate) +#define kioctrlStatePreInitLocked(pGpu, pEngstate) kioctrlStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kioctrlStatePreInitUnlocked(pGpu, pEngstate) kioctrlStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kioctrlGetTunableState(pGpu, pEngstate, pTunableState) kioctrlGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kioctrlCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kioctrlCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kioctrlFreeTunableState(pGpu, pEngstate, pTunableState) kioctrlFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kioctrlStatePostLoad(pGpu, pEngstate, arg0) kioctrlStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kioctrlAllocTunableState(pGpu, pEngstate, ppTunableState) kioctrlAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kioctrlSetTunableState(pGpu, pEngstate, pTunableState) kioctrlSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kioctrlIsPresent(pGpu, pEngstate) kioctrlIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kioctrlConstructEngine_IMPL(struct OBJGPU *arg0, struct KernelIoctrl *arg1, NvU32 arg2); + +static inline NV_STATUS kioctrlConstructEngine_DISPATCH(struct OBJGPU *arg0, struct KernelIoctrl *arg1, NvU32 arg2) { + return arg1->__kioctrlConstructEngine__(arg0, arg1, arg2); +} + +static inline NvBool kioctrlGetMinionEnableDefault_bf6dfa(struct OBJGPU *pGpu, struct KernelIoctrl *pKernelIoctrl) { + return ((NvBool)(0 != 0)); +} + +NvBool kioctrlGetMinionEnableDefault_GV100(struct OBJGPU *pGpu, struct KernelIoctrl *pKernelIoctrl); + +static inline NvBool kioctrlGetMinionEnableDefault_DISPATCH(struct OBJGPU *pGpu, struct KernelIoctrl *pKernelIoctrl) { + return pKernelIoctrl->__kioctrlGetMinionEnableDefault__(pGpu, pKernelIoctrl); +} + +static inline NV_STATUS kioctrlMinionConstruct_ac1694(struct OBJGPU *pGpu, struct KernelIoctrl *pKernelIoctrl) { + return NV_OK; +} + +NV_STATUS kioctrlMinionConstruct_GV100(struct OBJGPU *pGpu, struct KernelIoctrl *pKernelIoctrl); + +static inline NV_STATUS kioctrlMinionConstruct_DISPATCH(struct OBJGPU *pGpu, struct KernelIoctrl *pKernelIoctrl) { + return pKernelIoctrl->__kioctrlMinionConstruct__(pGpu, pKernelIoctrl); +} + +static inline NV_STATUS kioctrlReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void *pTunableState) { + return pEngstate->__kioctrlReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kioctrlStateLoad_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return pEngstate->__kioctrlStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kioctrlStateUnload_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return pEngstate->__kioctrlStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kioctrlStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + return pEngstate->__kioctrlStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kioctrlStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return pEngstate->__kioctrlStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kioctrlStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return pEngstate->__kioctrlStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void kioctrlStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + pEngstate->__kioctrlStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS kioctrlStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return pEngstate->__kioctrlStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kioctrlStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + return pEngstate->__kioctrlStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kioctrlInitMissing_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + pEngstate->__kioctrlInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kioctrlStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + return pEngstate->__kioctrlStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kioctrlStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + return pEngstate->__kioctrlStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kioctrlGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void *pTunableState) { + return pEngstate->__kioctrlGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kioctrlCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kioctrlCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kioctrlFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void *pTunableState) { + pEngstate->__kioctrlFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kioctrlStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, NvU32 arg0) { + return pEngstate->__kioctrlStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kioctrlAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void **ppTunableState) { + return pEngstate->__kioctrlAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kioctrlSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate, void *pTunableState) { + return pEngstate->__kioctrlSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kioctrlIsPresent_DISPATCH(POBJGPU pGpu, struct KernelIoctrl *pEngstate) { + return pEngstate->__kioctrlIsPresent__(pGpu, pEngstate); +} + +void kioctrlDestructEngine_IMPL(struct KernelIoctrl *arg0); +#ifdef __nvoc_kernel_ioctrl_h_disabled +static inline void kioctrlDestructEngine(struct KernelIoctrl *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelIoctrl was disabled!"); +} +#else //__nvoc_kernel_ioctrl_h_disabled +#define kioctrlDestructEngine(arg0) kioctrlDestructEngine_IMPL(arg0) +#endif //__nvoc_kernel_ioctrl_h_disabled + +#undef PRIVATE_FIELD + + +// Link Conversion Macros +#define KIOCTRL_LINK_GLOBAL_TO_LOCAL_MASK(mask) (mask >> pKernelIoctrl->localGlobalLinkOffset) + +#endif // _KERNEL_IOCTRL_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_IOCTRL_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_mc_nvoc.c b/src/nvidia/generated/g_kernel_mc_nvoc.c new file mode 100644 index 000000000..f0796a788 --- /dev/null +++ b/src/nvidia/generated/g_kernel_mc_nvoc.c @@ -0,0 +1,321 @@ +#define NVOC_KERNEL_MC_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_mc_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x3827ff = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMc; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelMc(KernelMc*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelMc(KernelMc*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelMc(KernelMc*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelMc(KernelMc*, RmHalspecOwner* ); +void __nvoc_dtor_KernelMc(KernelMc*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelMc; + +static const struct NVOC_RTTI __nvoc_rtti_KernelMc_KernelMc = { + /*pClassDef=*/ &__nvoc_class_def_KernelMc, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelMc, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelMc_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelMc, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelMc_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelMc, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelMc = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelMc_KernelMc, + &__nvoc_rtti_KernelMc_OBJENGSTATE, + &__nvoc_rtti_KernelMc_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMc = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelMc), + /*classId=*/ classId(KernelMc), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelMc", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelMc, + /*pCastInfo=*/ &__nvoc_castinfo_KernelMc, + /*pExportInfo=*/ &__nvoc_export_info_KernelMc +}; + +static NV_STATUS __nvoc_thunk_KernelMc_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelMc) { + return kmcStateInitLocked(pGpu, (struct KernelMc *)(((unsigned char *)pKernelMc) - __nvoc_rtti_KernelMc_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcReconcileTunableState(POBJGPU pGpu, struct KernelMc *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcStateLoad(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcStateUnload(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcStatePreLoad(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcStatePostUnload(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_kmcStateDestroy(POBJGPU pGpu, struct KernelMc *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcStatePreUnload(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcStateInitUnlocked(POBJGPU pGpu, struct KernelMc *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kmcInitMissing(POBJGPU pGpu, struct KernelMc *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcStatePreInitLocked(POBJGPU pGpu, struct KernelMc *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcStatePreInitUnlocked(POBJGPU pGpu, struct KernelMc *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcGetTunableState(POBJGPU pGpu, struct KernelMc *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcCompareTunableState(POBJGPU pGpu, struct KernelMc *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kmcFreeTunableState(POBJGPU pGpu, struct KernelMc *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcStatePostLoad(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcAllocTunableState(POBJGPU pGpu, struct KernelMc *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcSetTunableState(POBJGPU pGpu, struct KernelMc *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmcConstructEngine(POBJGPU pGpu, struct KernelMc *pEngstate, ENGDESCRIPTOR arg0) { + return engstateConstructEngine(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset), arg0); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kmcIsPresent(POBJGPU pGpu, struct KernelMc *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMc_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelMc = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelMc(KernelMc *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelMc(KernelMc *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelMc(KernelMc *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelMc_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelMc(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelMc_exit; // Success + +__nvoc_ctor_KernelMc_fail_OBJENGSTATE: +__nvoc_ctor_KernelMc_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelMc_1(KernelMc *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__kmcStateInitLocked__ = &kmcStateInitLocked_IMPL; + + // Hal function -- kmcWritePmcEnableReg + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kmcWritePmcEnableReg__ = &kmcWritePmcEnableReg_GK104; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmcWritePmcEnableReg__ = &kmcWritePmcEnableReg_GA100; + } + else if (0) + { + } + + // Hal function -- kmcReadPmcEnableReg + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__kmcReadPmcEnableReg__ = &kmcReadPmcEnableReg_GK104; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmcReadPmcEnableReg__ = &kmcReadPmcEnableReg_GA100; + } + else if (0) + { + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelMc_engstateStateInitLocked; + + pThis->__kmcReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmcReconcileTunableState; + + pThis->__kmcStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kmcStateLoad; + + pThis->__kmcStateUnload__ = &__nvoc_thunk_OBJENGSTATE_kmcStateUnload; + + pThis->__kmcStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kmcStatePreLoad; + + pThis->__kmcStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kmcStatePostUnload; + + pThis->__kmcStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kmcStateDestroy; + + pThis->__kmcStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kmcStatePreUnload; + + pThis->__kmcStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kmcStateInitUnlocked; + + pThis->__kmcInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kmcInitMissing; + + pThis->__kmcStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kmcStatePreInitLocked; + + pThis->__kmcStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kmcStatePreInitUnlocked; + + pThis->__kmcGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmcGetTunableState; + + pThis->__kmcCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmcCompareTunableState; + + pThis->__kmcFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmcFreeTunableState; + + pThis->__kmcStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kmcStatePostLoad; + + pThis->__kmcAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmcAllocTunableState; + + pThis->__kmcSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmcSetTunableState; + + pThis->__kmcConstructEngine__ = &__nvoc_thunk_OBJENGSTATE_kmcConstructEngine; + + pThis->__kmcIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kmcIsPresent; +} + +void __nvoc_init_funcTable_KernelMc(KernelMc *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelMc_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelMc(KernelMc *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelMc = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelMc(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelMc(KernelMc **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelMc *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelMc)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelMc)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelMc); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelMc(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelMc(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelMc_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelMc_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelMc(KernelMc **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelMc(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_mc_nvoc.h b/src/nvidia/generated/g_kernel_mc_nvoc.h new file mode 100644 index 000000000..451d20220 --- /dev/null +++ b/src/nvidia/generated/g_kernel_mc_nvoc.h @@ -0,0 +1,276 @@ +#ifndef _G_KERNEL_MC_NVOC_H_ +#define _G_KERNEL_MC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_mc_nvoc.h" + +#ifndef KERNEL_MC_H +#define KERNEL_MC_H + +/****************************************************************************** +* +* Kernel Master Control module header +* This file contains functions required for MC in Kernel RM +* +******************************************************************************/ + +#include "gpu/eng_state.h" +#include "gpu/gpu_halspec.h" + +#ifdef NVOC_KERNEL_MC_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelMc { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelMc *__nvoc_pbase_KernelMc; + NV_STATUS (*__kmcStateInitLocked__)(struct OBJGPU *, struct KernelMc *); + NV_STATUS (*__kmcWritePmcEnableReg__)(struct OBJGPU *, struct KernelMc *, NvU32, NvBool, NvBool); + NvU32 (*__kmcReadPmcEnableReg__)(struct OBJGPU *, struct KernelMc *, NvBool); + NV_STATUS (*__kmcReconcileTunableState__)(POBJGPU, struct KernelMc *, void *); + NV_STATUS (*__kmcStateLoad__)(POBJGPU, struct KernelMc *, NvU32); + NV_STATUS (*__kmcStateUnload__)(POBJGPU, struct KernelMc *, NvU32); + NV_STATUS (*__kmcStatePreLoad__)(POBJGPU, struct KernelMc *, NvU32); + NV_STATUS (*__kmcStatePostUnload__)(POBJGPU, struct KernelMc *, NvU32); + void (*__kmcStateDestroy__)(POBJGPU, struct KernelMc *); + NV_STATUS (*__kmcStatePreUnload__)(POBJGPU, struct KernelMc *, NvU32); + NV_STATUS (*__kmcStateInitUnlocked__)(POBJGPU, struct KernelMc *); + void (*__kmcInitMissing__)(POBJGPU, struct KernelMc *); + NV_STATUS (*__kmcStatePreInitLocked__)(POBJGPU, struct KernelMc *); + NV_STATUS (*__kmcStatePreInitUnlocked__)(POBJGPU, struct KernelMc *); + NV_STATUS (*__kmcGetTunableState__)(POBJGPU, struct KernelMc *, void *); + NV_STATUS (*__kmcCompareTunableState__)(POBJGPU, struct KernelMc *, void *, void *); + void (*__kmcFreeTunableState__)(POBJGPU, struct KernelMc *, void *); + NV_STATUS (*__kmcStatePostLoad__)(POBJGPU, struct KernelMc *, NvU32); + NV_STATUS (*__kmcAllocTunableState__)(POBJGPU, struct KernelMc *, void **); + NV_STATUS (*__kmcSetTunableState__)(POBJGPU, struct KernelMc *, void *); + NV_STATUS (*__kmcConstructEngine__)(POBJGPU, struct KernelMc *, ENGDESCRIPTOR); + NvBool (*__kmcIsPresent__)(POBJGPU, struct KernelMc *); +}; + +#ifndef __NVOC_CLASS_KernelMc_TYPEDEF__ +#define __NVOC_CLASS_KernelMc_TYPEDEF__ +typedef struct KernelMc KernelMc; +#endif /* __NVOC_CLASS_KernelMc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMc +#define __nvoc_class_id_KernelMc 0x3827ff +#endif /* __nvoc_class_id_KernelMc */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMc; + +#define __staticCast_KernelMc(pThis) \ + ((pThis)->__nvoc_pbase_KernelMc) + +#ifdef __nvoc_kernel_mc_h_disabled +#define __dynamicCast_KernelMc(pThis) ((KernelMc*)NULL) +#else //__nvoc_kernel_mc_h_disabled +#define __dynamicCast_KernelMc(pThis) \ + ((KernelMc*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelMc))) +#endif //__nvoc_kernel_mc_h_disabled + +#define PDB_PROP_KMC_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KMC_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelMc(KernelMc**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelMc(KernelMc**, Dynamic*, NvU32); +#define __objCreate_KernelMc(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelMc((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kmcStateInitLocked(pGpu, pKernelMc) kmcStateInitLocked_DISPATCH(pGpu, pKernelMc) +#define kmcWritePmcEnableReg(pGpu, pKernelMc, arg0, arg1, arg2) kmcWritePmcEnableReg_DISPATCH(pGpu, pKernelMc, arg0, arg1, arg2) +#define kmcWritePmcEnableReg_HAL(pGpu, pKernelMc, arg0, arg1, arg2) kmcWritePmcEnableReg_DISPATCH(pGpu, pKernelMc, arg0, arg1, arg2) +#define kmcReadPmcEnableReg(pGpu, pKernelMc, arg0) kmcReadPmcEnableReg_DISPATCH(pGpu, pKernelMc, arg0) +#define kmcReadPmcEnableReg_HAL(pGpu, pKernelMc, arg0) kmcReadPmcEnableReg_DISPATCH(pGpu, pKernelMc, arg0) +#define kmcReconcileTunableState(pGpu, pEngstate, pTunableState) kmcReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmcStateLoad(pGpu, pEngstate, arg0) kmcStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kmcStateUnload(pGpu, pEngstate, arg0) kmcStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kmcStatePreLoad(pGpu, pEngstate, arg0) kmcStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kmcStatePostUnload(pGpu, pEngstate, arg0) kmcStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kmcStateDestroy(pGpu, pEngstate) kmcStateDestroy_DISPATCH(pGpu, pEngstate) +#define kmcStatePreUnload(pGpu, pEngstate, arg0) kmcStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kmcStateInitUnlocked(pGpu, pEngstate) kmcStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kmcInitMissing(pGpu, pEngstate) kmcInitMissing_DISPATCH(pGpu, pEngstate) +#define kmcStatePreInitLocked(pGpu, pEngstate) kmcStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kmcStatePreInitUnlocked(pGpu, pEngstate) kmcStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kmcGetTunableState(pGpu, pEngstate, pTunableState) kmcGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmcCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kmcCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kmcFreeTunableState(pGpu, pEngstate, pTunableState) kmcFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmcStatePostLoad(pGpu, pEngstate, arg0) kmcStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kmcAllocTunableState(pGpu, pEngstate, ppTunableState) kmcAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kmcSetTunableState(pGpu, pEngstate, pTunableState) kmcSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmcConstructEngine(pGpu, pEngstate, arg0) kmcConstructEngine_DISPATCH(pGpu, pEngstate, arg0) +#define kmcIsPresent(pGpu, pEngstate) kmcIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kmcPrepareForXVEReset_GK104(struct OBJGPU *pGpu, struct KernelMc *pKernelMc); + +#ifdef __nvoc_kernel_mc_h_disabled +static inline NV_STATUS kmcPrepareForXVEReset(struct OBJGPU *pGpu, struct KernelMc *pKernelMc) { + NV_ASSERT_FAILED_PRECOMP("KernelMc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mc_h_disabled +#define kmcPrepareForXVEReset(pGpu, pKernelMc) kmcPrepareForXVEReset_GK104(pGpu, pKernelMc) +#endif //__nvoc_kernel_mc_h_disabled + +#define kmcPrepareForXVEReset_HAL(pGpu, pKernelMc) kmcPrepareForXVEReset(pGpu, pKernelMc) + +NV_STATUS kmcGetMcBar0MapInfo_GK104(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU64 *arg0, NvU32 *arg1); + +#ifdef __nvoc_kernel_mc_h_disabled +static inline NV_STATUS kmcGetMcBar0MapInfo(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU64 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mc_h_disabled +#define kmcGetMcBar0MapInfo(pGpu, pKernelMc, arg0, arg1) kmcGetMcBar0MapInfo_GK104(pGpu, pKernelMc, arg0, arg1) +#endif //__nvoc_kernel_mc_h_disabled + +#define kmcGetMcBar0MapInfo_HAL(pGpu, pKernelMc, arg0, arg1) kmcGetMcBar0MapInfo(pGpu, pKernelMc, arg0, arg1) + +NV_STATUS kmcStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelMc *pKernelMc); + +static inline NV_STATUS kmcStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelMc *pKernelMc) { + return pKernelMc->__kmcStateInitLocked__(pGpu, pKernelMc); +} + +NV_STATUS kmcWritePmcEnableReg_GK104(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU32 arg0, NvBool arg1, NvBool arg2); + +NV_STATUS kmcWritePmcEnableReg_GA100(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU32 arg0, NvBool arg1, NvBool arg2); + +static inline NV_STATUS kmcWritePmcEnableReg_46f6a7(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU32 arg0, NvBool arg1, NvBool arg2) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kmcWritePmcEnableReg_DISPATCH(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvU32 arg0, NvBool arg1, NvBool arg2) { + return pKernelMc->__kmcWritePmcEnableReg__(pGpu, pKernelMc, arg0, arg1, arg2); +} + +NvU32 kmcReadPmcEnableReg_GK104(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvBool arg0); + +NvU32 kmcReadPmcEnableReg_GA100(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvBool arg0); + +static inline NvU32 kmcReadPmcEnableReg_4a4dee(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvBool arg0) { + return 0; +} + +static inline NvU32 kmcReadPmcEnableReg_DISPATCH(struct OBJGPU *pGpu, struct KernelMc *pKernelMc, NvBool arg0) { + return pKernelMc->__kmcReadPmcEnableReg__(pGpu, pKernelMc, arg0); +} + +static inline NV_STATUS kmcReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, void *pTunableState) { + return pEngstate->__kmcReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kmcStateLoad_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return pEngstate->__kmcStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmcStateUnload_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return pEngstate->__kmcStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmcStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return pEngstate->__kmcStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmcStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return pEngstate->__kmcStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void kmcStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) { + pEngstate->__kmcStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS kmcStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return pEngstate->__kmcStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmcStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) { + return pEngstate->__kmcStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kmcInitMissing_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) { + pEngstate->__kmcInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kmcStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) { + return pEngstate->__kmcStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kmcStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) { + return pEngstate->__kmcStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kmcGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, void *pTunableState) { + return pEngstate->__kmcGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kmcCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kmcCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kmcFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, void *pTunableState) { + pEngstate->__kmcFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kmcStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, NvU32 arg0) { + return pEngstate->__kmcStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmcAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, void **ppTunableState) { + return pEngstate->__kmcAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kmcSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, void *pTunableState) { + return pEngstate->__kmcSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kmcConstructEngine_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate, ENGDESCRIPTOR arg0) { + return pEngstate->__kmcConstructEngine__(pGpu, pEngstate, arg0); +} + +static inline NvBool kmcIsPresent_DISPATCH(POBJGPU pGpu, struct KernelMc *pEngstate) { + return pEngstate->__kmcIsPresent__(pGpu, pEngstate); +} + +#undef PRIVATE_FIELD + + +#endif // KERNEL_MC_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_MC_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_mig_manager_nvoc.c b/src/nvidia/generated/g_kernel_mig_manager_nvoc.c new file mode 100644 index 000000000..02fe216cf --- /dev/null +++ b/src/nvidia/generated/g_kernel_mig_manager_nvoc.c @@ -0,0 +1,414 @@ +#define NVOC_KERNEL_MIG_MANAGER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_mig_manager_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x01c1bf = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMIGManager; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelMIGManager(KernelMIGManager*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelMIGManager(KernelMIGManager*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelMIGManager(KernelMIGManager*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelMIGManager(KernelMIGManager*, RmHalspecOwner* ); +void __nvoc_dtor_KernelMIGManager(KernelMIGManager*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelMIGManager; + +static const struct NVOC_RTTI __nvoc_rtti_KernelMIGManager_KernelMIGManager = { + /*pClassDef=*/ &__nvoc_class_def_KernelMIGManager, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelMIGManager, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelMIGManager_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelMIGManager, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelMIGManager_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelMIGManager, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelMIGManager = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelMIGManager_KernelMIGManager, + &__nvoc_rtti_KernelMIGManager_OBJENGSTATE, + &__nvoc_rtti_KernelMIGManager_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMIGManager = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelMIGManager), + /*classId=*/ classId(KernelMIGManager), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelMIGManager", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelMIGManager, + /*pCastInfo=*/ &__nvoc_castinfo_KernelMIGManager, + /*pExportInfo=*/ &__nvoc_export_info_KernelMIGManager +}; + +static NV_STATUS __nvoc_thunk_KernelMIGManager_engstateConstructEngine(OBJGPU *arg0, struct OBJENGSTATE *arg1, ENGDESCRIPTOR arg2) { + return kmigmgrConstructEngine(arg0, (struct KernelMIGManager *)(((unsigned char *)arg1) - __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_KernelMIGManager_engstateStateInitLocked(OBJGPU *arg0, struct OBJENGSTATE *arg1) { + return kmigmgrStateInitLocked(arg0, (struct KernelMIGManager *)(((unsigned char *)arg1) - __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelMIGManager_engstateStateUnload(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 flags) { + return kmigmgrStateUnload(arg0, (struct KernelMIGManager *)(((unsigned char *)arg1) - __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), flags); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrReconcileTunableState(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrStateLoad(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrStatePreLoad(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrStatePostUnload(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_kmigmgrStateDestroy(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrStatePreUnload(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrStateInitUnlocked(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_kmigmgrInitMissing(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrStatePreInitLocked(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrStatePreInitUnlocked(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrGetTunableState(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrCompareTunableState(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_kmigmgrFreeTunableState(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrStatePostLoad(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrAllocTunableState(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_kmigmgrSetTunableState(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_kmigmgrIsPresent(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelMIGManager_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelMIGManager = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelMIGManager(KernelMIGManager *pThis) { + __nvoc_kmigmgrDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelMIGManager(KernelMIGManager *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelMIGManager(KernelMIGManager *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelMIGManager_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelMIGManager(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelMIGManager_exit; // Success + +__nvoc_ctor_KernelMIGManager_fail_OBJENGSTATE: +__nvoc_ctor_KernelMIGManager_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelMIGManager_1(KernelMIGManager *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__kmigmgrConstructEngine__ = &kmigmgrConstructEngine_IMPL; + + pThis->__kmigmgrStateInitLocked__ = &kmigmgrStateInitLocked_IMPL; + + pThis->__kmigmgrStateUnload__ = &kmigmgrStateUnload_IMPL; + + // Hal function -- kmigmgrCreateGPUInstanceCheck + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kmigmgrCreateGPUInstanceCheck__ = &kmigmgrCreateGPUInstanceCheck_GA100; + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmigmgrCreateGPUInstanceCheck__ = &kmigmgrCreateGPUInstanceCheck_46f6a7; + } + + // Hal function -- kmigmgrIsDevinitMIGBitSet + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kmigmgrIsDevinitMIGBitSet__ = &kmigmgrIsDevinitMIGBitSet_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmigmgrIsDevinitMIGBitSet__ = &kmigmgrIsDevinitMIGBitSet_491d52; + } + } + + // Hal function -- kmigmgrIsGPUInstanceCombinationValid + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kmigmgrIsGPUInstanceCombinationValid__ = &kmigmgrIsGPUInstanceCombinationValid_GA100; + } + else if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmigmgrIsGPUInstanceCombinationValid__ = &kmigmgrIsGPUInstanceCombinationValid_491d52; + } + + // Hal function -- kmigmgrIsGPUInstanceFlagValid + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kmigmgrIsGPUInstanceFlagValid__ = &kmigmgrIsGPUInstanceFlagValid_GA100; + } + else if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmigmgrIsGPUInstanceFlagValid__ = &kmigmgrIsGPUInstanceFlagValid_491d52; + } + + // Hal function -- kmigmgrIsMemoryPartitioningRequested + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kmigmgrIsMemoryPartitioningRequested__ = &kmigmgrIsMemoryPartitioningRequested_GA100; + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmigmgrIsMemoryPartitioningRequested__ = &kmigmgrIsMemoryPartitioningRequested_491d52; + } + + // Hal function -- kmigmgrIsMemoryPartitioningNeeded + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kmigmgrIsMemoryPartitioningNeeded__ = &kmigmgrIsMemoryPartitioningNeeded_GA100; + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmigmgrIsMemoryPartitioningNeeded__ = &kmigmgrIsMemoryPartitioningNeeded_491d52; + } + + // Hal function -- kmigmgrMemSizeFlagToSwizzIdRange + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__kmigmgrMemSizeFlagToSwizzIdRange__ = &kmigmgrMemSizeFlagToSwizzIdRange_GA100; + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__kmigmgrMemSizeFlagToSwizzIdRange__ = &kmigmgrMemSizeFlagToSwizzIdRange_d64cd6; + } + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelMIGManager_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelMIGManager_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelMIGManager_engstateStateUnload; + + pThis->__kmigmgrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrReconcileTunableState; + + pThis->__kmigmgrStateLoad__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrStateLoad; + + pThis->__kmigmgrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrStatePreLoad; + + pThis->__kmigmgrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrStatePostUnload; + + pThis->__kmigmgrStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrStateDestroy; + + pThis->__kmigmgrStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrStatePreUnload; + + pThis->__kmigmgrStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrStateInitUnlocked; + + pThis->__kmigmgrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrInitMissing; + + pThis->__kmigmgrStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrStatePreInitLocked; + + pThis->__kmigmgrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrStatePreInitUnlocked; + + pThis->__kmigmgrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrGetTunableState; + + pThis->__kmigmgrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrCompareTunableState; + + pThis->__kmigmgrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrFreeTunableState; + + pThis->__kmigmgrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrStatePostLoad; + + pThis->__kmigmgrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrAllocTunableState; + + pThis->__kmigmgrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrSetTunableState; + + pThis->__kmigmgrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kmigmgrIsPresent; +} + +void __nvoc_init_funcTable_KernelMIGManager(KernelMIGManager *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelMIGManager_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelMIGManager(KernelMIGManager *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelMIGManager = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelMIGManager(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelMIGManager(KernelMIGManager **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelMIGManager *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelMIGManager)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelMIGManager)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelMIGManager); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelMIGManager(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelMIGManager(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelMIGManager_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelMIGManager_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelMIGManager(KernelMIGManager **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelMIGManager(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_mig_manager_nvoc.h b/src/nvidia/generated/g_kernel_mig_manager_nvoc.h new file mode 100644 index 000000000..848856803 --- /dev/null +++ b/src/nvidia/generated/g_kernel_mig_manager_nvoc.h @@ -0,0 +1,1507 @@ +#ifndef _G_KERNEL_MIG_MANAGER_NVOC_H_ +#define _G_KERNEL_MIG_MANAGER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_mig_manager_nvoc.h" + +#ifndef KERNEL_MIG_MANAGER_H +#define KERNEL_MIG_MANAGER_H + +#include "core/core.h" +#include "gpu/eng_state.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "kernel/gpu/gr/kernel_graphics_manager.h" +#include "kernel/gpu_mgr/gpu_mgr.h" +#include "kernel/gpu/mmu/kern_gmmu.h" + +#include "ctrl/ctrlc637.h" + +typedef struct KERNEL_MIG_GPU_INSTANCE KERNEL_MIG_GPU_INSTANCE; + +// Forward declaration of opaque type +typedef struct KERNEL_MIG_MANAGER_PRIVATE_DATA KERNEL_MIG_MANAGER_PRIVATE_DATA; +typedef struct MIG_GPU_INSTANCE MIG_GPU_INSTANCE; + +#define IS_MIG_ENABLED(pGpu) (((pGpu) != NULL) && (GPU_GET_KERNEL_MIG_MANAGER(pGpu) != NULL) && \ + kmigmgrIsMIGEnabled((pGpu), GPU_GET_KERNEL_MIG_MANAGER(pGpu))) +#define IS_MIG_IN_USE(pGpu) (((pGpu) != NULL) && (GPU_GET_KERNEL_MIG_MANAGER(pGpu) != NULL) && \ + kmigmgrIsMIGGpuInstancingEnabled((pGpu), GPU_GET_KERNEL_MIG_MANAGER(pGpu))) + +#define FOR_EACH_VALID_GPU_INSTANCE(pGpu, pKernelMIGManager, pLocal) \ + { \ + NvU32 i; \ + for (i = 0; i < KMIGMGR_MAX_GPU_INSTANCES; ++i) \ + { \ + (pLocal) = kmigmgrGetMIGGpuInstanceSlot((pGpu), (pKernelMIGManager), i);\ + if (((pLocal) == NULL) || !(pLocal)->bValid) \ + continue; + +#define FOR_EACH_VALID_GPU_INSTANCE_END() \ + } \ + } + +#define KMIGMGR_SWIZZID_INVALID 0xFFFFFFFF +#define KMIGMGR_MAX_GPU_SWIZZID 15 +#define KMIGMGR_MAX_GPU_INSTANCES GPUMGR_MAX_GPU_INSTANCES +#define KMIGMGR_MAX_COMPUTE_INSTANCES GPUMGR_MAX_COMPUTE_INSTANCES +#define KMIGMGR_COMPUTE_INSTANCE_ID_INVALID 0xFFFFFFFF + +#define KMIGMGR_INSTANCE_ATTRIBUTION_ID_INVALID \ + ((KMIGMGR_MAX_GPU_SWIZZID * KMIGMGR_MAX_GPU_SWIZZID) + \ + KMIGMGR_MAX_COMPUTE_INSTANCES) + +MAKE_BITVECTOR(GFID_BIT_VECTOR, VMMU_MAX_GFID); + +typedef struct KMIGMGR_INSTANCE_HANDLES +{ + /*! + * Client handle to make calls into this instance + */ + NvHandle hClient; + + /*! + * Device handle to make calls into this instance + */ + NvHandle hDevice; + + /*! + * Subdevice handle to make calls into this instance + */ + NvHandle hSubdevice; + + /*! + * Subscription handle to make calls into this instance + */ + NvHandle hSubscription; +} KMIGMGR_INSTANCE_HANDLES; + +typedef struct MIG_RESOURCE_ALLOCATION +{ + /*! + * Logical GPC-IDs which are associated with this instance + * As current assumption is that GPCs within a instance is always + * physically contiguous, so we can use start and count also saving some + * memory however it will enforce contiguity restriction which may not be + * in future. + */ + NvU32 gpcIds[KGRMGR_MAX_GPC]; + + /*! + * Number of GPCs associated with this instance + */ + NvU32 gpcCount; + + /*! + * VEID start offset for this instance + */ + NvU32 veidOffset; + + /*! + * Number of VEIDs associated with this instance + */ + NvU32 veidCount; + + /*! + * Bitvector of partitionable engines associated with this instance. + */ + ENGTYPE_BIT_VECTOR engines; + + /*! + * Bitvector of local engine IDs associated with this instance. + */ + ENGTYPE_BIT_VECTOR localEngines; +} MIG_RESOURCE_ALLOCATION; + +typedef struct MIG_COMPUTE_INSTANCE +{ + /*! + * Resource allocated for this instance + */ + MIG_RESOURCE_ALLOCATION resourceAllocation; + + /*! + * States that this is a valid compute instance + */ + NvBool bValid; + + /*! + * Flags indicating which engines (if any) are shared across multiple compute + * instances. Bit positions in this flag correspond to + * NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_* + */ + NvU32 sharedEngFlag; + + /*! + * Compute instance ID + */ + NvU32 id; + + /*! + * Shared object to track instance reference count + */ + struct RsShared *pShare; + + /*! + * Opaque pointer to os-specific capabilities + */ + OS_RM_CAPS *pOsRmCaps; + + /*! + * Compute instance UUID + */ + NvUuid uuid; + + /*! + * Handles for RPC's into this instance + */ + KMIGMGR_INSTANCE_HANDLES instanceHandles; +} MIG_COMPUTE_INSTANCE; + +/*! + * @brief Situational params for compute instance creation API + * + * This structure comes with two specializations: + * TYPE_REQUEST + * Parameter refers to request data passed in via EXEC_PARTITIONS_CREATE ctrl + * call. All resources claimed by new compute instance are chosen via allocator, + * and the API may create multiple compute instances. + * TYPE_RESTORE + * Parameter refers to saved compute instance data. Most resources claimed by new + * compute instance are determined by the save data, and others are claimed via + * allocator. + * TYPE_REQUEST_WITH_IDS + * Parameter refers to request data passed in via EXEC_PARTITIONS_CREATE ctrl + * call. All resources claimed by new instance are chosen via allocator. + * RM also tries to allocate instance with compute instance id + * requested by user. This flag is only supported on vGPU enabled RM build + * and will be removed when vgpu plugin implements virtualized compute + * instance ID support. (bug 2938187) + */ +typedef struct KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS +{ + enum + { + KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST, + KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_RESTORE, + KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST_WITH_IDS + } type; + union + { + struct + { + NvU32 count; + NVC637_CTRL_EXEC_PARTITIONS_INFO *pReqComputeInstanceInfo; + } request; + struct + { + struct GPUMGR_SAVE_COMPUTE_INSTANCE *pComputeInstanceSave; + } restore; + } inst; +} KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS; + +typedef struct KERNEL_MIG_GPU_INSTANCE +{ + /*! Structure containing GPU instance profile */ + const NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO *pProfile; + + /*! + * Resource allocated for this instance + */ + MIG_RESOURCE_ALLOCATION resourceAllocation; + + /*! + * Mask of physical engines in this GPU instance which are assigned exclusively + * to some compute instance. Indexed via NV2080_ENGINE_TYPE_* + */ + ENGTYPE_BIT_VECTOR exclusiveEngMask; + + /*! + * Mask of physical engines in this GPU instance which are assigned to at least + * one compute instance, but may be assigned to others. + * Indexed via NV2080_ENGINE_TYPE_* + */ + ENGTYPE_BIT_VECTOR sharedEngMask; + + /*! + * compute instance info. + */ + MIG_COMPUTE_INSTANCE MIGComputeInstance[KMIGMGR_MAX_COMPUTE_INSTANCES]; + + /*! + * Bit Vector of GFID's associated with this instance. + */ + GFID_BIT_VECTOR gfidMap; + + /*! + * GPU instance ID + */ + NvU32 swizzId; + + /*! + * Validated user-provided instance flags - NV2080_CTRL_GPU_PARTITION_FLAG_* + */ + NvU32 partitionFlag; + + /*! + * Memory handle associated with partitioned memory + */ + NvHandle hMemory; + + /*! + * Shared object to track instance reference count + */ + struct RsShared *pShare; + + /*! + * Heap used for managing instance's memory + */ + struct Heap *pMemoryPartitionHeap; + + /*! + * States that this instance is valid + */ + NvBool bValid; + + /*! + * Indicates that the GPU instance scrubber is initialized and should be + * accounted for / ignored in the instance refcount when determining + * whether or not a instance can be destroyed. + */ + NvBool bMemoryPartitionScrubberInitialized; + + /*! + * Physical memory address range for this instance. + */ + NV_RANGE memRange; + + /*! + * Memory pool for client page table allocations + */ + RM_POOL_ALLOC_MEM_RESERVE_INFO *pPageTableMemPool; + + /*! + * Physical MIG GPU Instance info for this instance + */ + MIG_GPU_INSTANCE *pMIGGpuInstance; + + /*! + * Mask of runlistIds for engines that belong to this instance + */ + NvU64 runlistIdMask; + + /*! + * Opaque pointer to os-specific capabilities + */ + OS_RM_CAPS *pOsRmCaps; + + /*! + * Handles for RPC's into this instance + */ + KMIGMGR_INSTANCE_HANDLES instanceHandles; +} KERNEL_MIG_GPU_INSTANCE; + +/*! + * @brief Situational params for GPU instance creation API + * + * This structure comes with two specializations: + * TYPE_REQUEST + * Parameter refers to request data passed in via SET_PARTITIONS ctrl + * call. All resources claimed by new GPU instance are chosen via allocator. + * TYPE_RESTORE + * Parameter refers to saved GPU instance data. Most resources claimed by new + * GPU instance are determined by the save data, and others are claimed via + * allocator. + */ +typedef struct KMIGMGR_CREATE_GPU_INSTANCE_PARAMS +{ + enum + { + KMIGMGR_CREATE_GPU_INSTANCE_PARAMS_TYPE_REQUEST, + KMIGMGR_CREATE_GPU_INSTANCE_PARAMS_TYPE_RESTORE + } type; + union + { + struct + { + NvU32 partitionFlag; + NV_RANGE placement; + NvBool bUsePlacement; + } request; + struct + { + struct GPUMGR_SAVE_GPU_INSTANCE *pGPUInstanceSave; + } restore; + } inst; +} KMIGMGR_CREATE_GPU_INSTANCE_PARAMS; + +/*! + * @brief Packed pointer to a GPU instance/compute instance combo + * @note Having NULL pKernelMIGGpuInstance and non-NULL pMIGComputeInstance is never expected + */ +struct MIG_INSTANCE_REF +{ + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance; + MIG_COMPUTE_INSTANCE *pMIGComputeInstance; +}; + +typedef struct KERNEL_MIG_MANAGER_STATIC_INFO +{ + /*! @ref NV2080_CTRL_CMD_INTERNAL_STATIC_MIGMGR_GET_PROFILES */ + NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS *pProfiles; + + /*! Mask of partitionable engines which are present on this GPU. */ + NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS *pPartitionableEngines; + + /*! Per swizzId FB memory page ranges */ + NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS *pSwizzIdFbMemPageRanges; + +} KERNEL_MIG_MANAGER_STATIC_INFO; + +/*! + * KernelMIGManager provides kernel side services for managing MIG instances. + * It also maintains state relating to GPU partitioning and related state. + */ +#ifdef NVOC_KERNEL_MIG_MANAGER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelMIGManager { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelMIGManager *__nvoc_pbase_KernelMIGManager; + NV_STATUS (*__kmigmgrConstructEngine__)(OBJGPU *, struct KernelMIGManager *, ENGDESCRIPTOR); + NV_STATUS (*__kmigmgrStateInitLocked__)(OBJGPU *, struct KernelMIGManager *); + NV_STATUS (*__kmigmgrStateUnload__)(OBJGPU *, struct KernelMIGManager *, NvU32); + NV_STATUS (*__kmigmgrCreateGPUInstanceCheck__)(OBJGPU *, struct KernelMIGManager *, NvBool); + NvBool (*__kmigmgrIsDevinitMIGBitSet__)(OBJGPU *, struct KernelMIGManager *); + NvBool (*__kmigmgrIsGPUInstanceCombinationValid__)(OBJGPU *, struct KernelMIGManager *, NvU32); + NvBool (*__kmigmgrIsGPUInstanceFlagValid__)(OBJGPU *, struct KernelMIGManager *, NvU32); + NvBool (*__kmigmgrIsMemoryPartitioningRequested__)(OBJGPU *, struct KernelMIGManager *, NvU32); + NvBool (*__kmigmgrIsMemoryPartitioningNeeded__)(OBJGPU *, struct KernelMIGManager *, NvU32); + struct NV_RANGE (*__kmigmgrMemSizeFlagToSwizzIdRange__)(OBJGPU *, struct KernelMIGManager *, NvU32); + NV_STATUS (*__kmigmgrReconcileTunableState__)(POBJGPU, struct KernelMIGManager *, void *); + NV_STATUS (*__kmigmgrStateLoad__)(POBJGPU, struct KernelMIGManager *, NvU32); + NV_STATUS (*__kmigmgrStatePreLoad__)(POBJGPU, struct KernelMIGManager *, NvU32); + NV_STATUS (*__kmigmgrStatePostUnload__)(POBJGPU, struct KernelMIGManager *, NvU32); + void (*__kmigmgrStateDestroy__)(POBJGPU, struct KernelMIGManager *); + NV_STATUS (*__kmigmgrStatePreUnload__)(POBJGPU, struct KernelMIGManager *, NvU32); + NV_STATUS (*__kmigmgrStateInitUnlocked__)(POBJGPU, struct KernelMIGManager *); + void (*__kmigmgrInitMissing__)(POBJGPU, struct KernelMIGManager *); + NV_STATUS (*__kmigmgrStatePreInitLocked__)(POBJGPU, struct KernelMIGManager *); + NV_STATUS (*__kmigmgrStatePreInitUnlocked__)(POBJGPU, struct KernelMIGManager *); + NV_STATUS (*__kmigmgrGetTunableState__)(POBJGPU, struct KernelMIGManager *, void *); + NV_STATUS (*__kmigmgrCompareTunableState__)(POBJGPU, struct KernelMIGManager *, void *, void *); + void (*__kmigmgrFreeTunableState__)(POBJGPU, struct KernelMIGManager *, void *); + NV_STATUS (*__kmigmgrStatePostLoad__)(POBJGPU, struct KernelMIGManager *, NvU32); + NV_STATUS (*__kmigmgrAllocTunableState__)(POBJGPU, struct KernelMIGManager *, void **); + NV_STATUS (*__kmigmgrSetTunableState__)(POBJGPU, struct KernelMIGManager *, void *); + NvBool (*__kmigmgrIsPresent__)(POBJGPU, struct KernelMIGManager *); + NvBool bIsA100ReducedConfig; + KERNEL_MIG_MANAGER_PRIVATE_DATA *pPrivate; + KERNEL_MIG_GPU_INSTANCE kernelMIGGpuInstance[8]; + NvBool bMIGEnabled; + NvU64 swizzIdInUseMask; + NvBool bRestoreWatchdog; + NvBool bReenableWatchdog; + union ENGTYPE_BIT_VECTOR partitionableEnginesInUse; + NvBool bDeviceProfilingInUse; +}; + +#ifndef __NVOC_CLASS_KernelMIGManager_TYPEDEF__ +#define __NVOC_CLASS_KernelMIGManager_TYPEDEF__ +typedef struct KernelMIGManager KernelMIGManager; +#endif /* __NVOC_CLASS_KernelMIGManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelMIGManager +#define __nvoc_class_id_KernelMIGManager 0x01c1bf +#endif /* __nvoc_class_id_KernelMIGManager */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelMIGManager; + +#define __staticCast_KernelMIGManager(pThis) \ + ((pThis)->__nvoc_pbase_KernelMIGManager) + +#ifdef __nvoc_kernel_mig_manager_h_disabled +#define __dynamicCast_KernelMIGManager(pThis) ((KernelMIGManager*)NULL) +#else //__nvoc_kernel_mig_manager_h_disabled +#define __dynamicCast_KernelMIGManager(pThis) \ + ((KernelMIGManager*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelMIGManager))) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define PDB_PROP_KMIGMGR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KMIGMGR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelMIGManager(KernelMIGManager**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelMIGManager(KernelMIGManager**, Dynamic*, NvU32); +#define __objCreate_KernelMIGManager(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelMIGManager((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define kmigmgrConstructEngine(arg0, arg1, arg2) kmigmgrConstructEngine_DISPATCH(arg0, arg1, arg2) +#define kmigmgrStateInitLocked(arg0, arg1) kmigmgrStateInitLocked_DISPATCH(arg0, arg1) +#define kmigmgrStateUnload(arg0, arg1, flags) kmigmgrStateUnload_DISPATCH(arg0, arg1, flags) +#define kmigmgrCreateGPUInstanceCheck(arg0, arg1, bMemoryPartitioningNeeded) kmigmgrCreateGPUInstanceCheck_DISPATCH(arg0, arg1, bMemoryPartitioningNeeded) +#define kmigmgrCreateGPUInstanceCheck_HAL(arg0, arg1, bMemoryPartitioningNeeded) kmigmgrCreateGPUInstanceCheck_DISPATCH(arg0, arg1, bMemoryPartitioningNeeded) +#define kmigmgrIsDevinitMIGBitSet(arg0, arg1) kmigmgrIsDevinitMIGBitSet_DISPATCH(arg0, arg1) +#define kmigmgrIsDevinitMIGBitSet_HAL(arg0, arg1) kmigmgrIsDevinitMIGBitSet_DISPATCH(arg0, arg1) +#define kmigmgrIsGPUInstanceCombinationValid(arg0, arg1, gpuInstanceFlag) kmigmgrIsGPUInstanceCombinationValid_DISPATCH(arg0, arg1, gpuInstanceFlag) +#define kmigmgrIsGPUInstanceCombinationValid_HAL(arg0, arg1, gpuInstanceFlag) kmigmgrIsGPUInstanceCombinationValid_DISPATCH(arg0, arg1, gpuInstanceFlag) +#define kmigmgrIsGPUInstanceFlagValid(arg0, arg1, gpuInstanceFlag) kmigmgrIsGPUInstanceFlagValid_DISPATCH(arg0, arg1, gpuInstanceFlag) +#define kmigmgrIsGPUInstanceFlagValid_HAL(arg0, arg1, gpuInstanceFlag) kmigmgrIsGPUInstanceFlagValid_DISPATCH(arg0, arg1, gpuInstanceFlag) +#define kmigmgrIsMemoryPartitioningRequested(arg0, arg1, partitionFlags) kmigmgrIsMemoryPartitioningRequested_DISPATCH(arg0, arg1, partitionFlags) +#define kmigmgrIsMemoryPartitioningRequested_HAL(arg0, arg1, partitionFlags) kmigmgrIsMemoryPartitioningRequested_DISPATCH(arg0, arg1, partitionFlags) +#define kmigmgrIsMemoryPartitioningNeeded(arg0, arg1, swizzId) kmigmgrIsMemoryPartitioningNeeded_DISPATCH(arg0, arg1, swizzId) +#define kmigmgrIsMemoryPartitioningNeeded_HAL(arg0, arg1, swizzId) kmigmgrIsMemoryPartitioningNeeded_DISPATCH(arg0, arg1, swizzId) +#define kmigmgrMemSizeFlagToSwizzIdRange(arg0, arg1, memSizeFlag) kmigmgrMemSizeFlagToSwizzIdRange_DISPATCH(arg0, arg1, memSizeFlag) +#define kmigmgrMemSizeFlagToSwizzIdRange_HAL(arg0, arg1, memSizeFlag) kmigmgrMemSizeFlagToSwizzIdRange_DISPATCH(arg0, arg1, memSizeFlag) +#define kmigmgrReconcileTunableState(pGpu, pEngstate, pTunableState) kmigmgrReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmigmgrStateLoad(pGpu, pEngstate, arg0) kmigmgrStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kmigmgrStatePreLoad(pGpu, pEngstate, arg0) kmigmgrStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kmigmgrStatePostUnload(pGpu, pEngstate, arg0) kmigmgrStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kmigmgrStateDestroy(pGpu, pEngstate) kmigmgrStateDestroy_DISPATCH(pGpu, pEngstate) +#define kmigmgrStatePreUnload(pGpu, pEngstate, arg0) kmigmgrStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define kmigmgrStateInitUnlocked(pGpu, pEngstate) kmigmgrStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kmigmgrInitMissing(pGpu, pEngstate) kmigmgrInitMissing_DISPATCH(pGpu, pEngstate) +#define kmigmgrStatePreInitLocked(pGpu, pEngstate) kmigmgrStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define kmigmgrStatePreInitUnlocked(pGpu, pEngstate) kmigmgrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define kmigmgrGetTunableState(pGpu, pEngstate, pTunableState) kmigmgrGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmigmgrCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kmigmgrCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define kmigmgrFreeTunableState(pGpu, pEngstate, pTunableState) kmigmgrFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmigmgrStatePostLoad(pGpu, pEngstate, arg0) kmigmgrStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define kmigmgrAllocTunableState(pGpu, pEngstate, ppTunableState) kmigmgrAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define kmigmgrSetTunableState(pGpu, pEngstate, pTunableState) kmigmgrSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define kmigmgrIsPresent(pGpu, pEngstate) kmigmgrIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS kmigmgrLoadStaticInfo_KERNEL(OBJGPU *arg0, struct KernelMIGManager *arg1); + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrLoadStaticInfo(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrLoadStaticInfo(arg0, arg1) kmigmgrLoadStaticInfo_KERNEL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrLoadStaticInfo_HAL(arg0, arg1) kmigmgrLoadStaticInfo(arg0, arg1) + +static inline NV_STATUS kmigmgrSetStaticInfo_46f6a7(OBJGPU *arg0, struct KernelMIGManager *arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSetStaticInfo(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSetStaticInfo(arg0, arg1) kmigmgrSetStaticInfo_46f6a7(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrSetStaticInfo_HAL(arg0, arg1) kmigmgrSetStaticInfo(arg0, arg1) + +static inline void kmigmgrClearStaticInfo_b3696a(OBJGPU *arg0, struct KernelMIGManager *arg1) { + return; +} + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrClearStaticInfo(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrClearStaticInfo(arg0, arg1) kmigmgrClearStaticInfo_b3696a(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrClearStaticInfo_HAL(arg0, arg1) kmigmgrClearStaticInfo(arg0, arg1) + +static inline NV_STATUS kmigmgrSaveToPersistenceFromVgpuStaticInfo_46f6a7(OBJGPU *arg0, struct KernelMIGManager *arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSaveToPersistenceFromVgpuStaticInfo(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSaveToPersistenceFromVgpuStaticInfo(arg0, arg1) kmigmgrSaveToPersistenceFromVgpuStaticInfo_46f6a7(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrSaveToPersistenceFromVgpuStaticInfo_HAL(arg0, arg1) kmigmgrSaveToPersistenceFromVgpuStaticInfo(arg0, arg1) + +NV_STATUS kmigmgrDeleteGPUInstanceRunlists_FWCLIENT(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrDeleteGPUInstanceRunlists(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrDeleteGPUInstanceRunlists(arg0, arg1, arg2) kmigmgrDeleteGPUInstanceRunlists_FWCLIENT(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrDeleteGPUInstanceRunlists_HAL(arg0, arg1, arg2) kmigmgrDeleteGPUInstanceRunlists(arg0, arg1, arg2) + +NV_STATUS kmigmgrCreateGPUInstanceRunlists_FWCLIENT(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrCreateGPUInstanceRunlists(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrCreateGPUInstanceRunlists(arg0, arg1, arg2) kmigmgrCreateGPUInstanceRunlists_FWCLIENT(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrCreateGPUInstanceRunlists_HAL(arg0, arg1, arg2) kmigmgrCreateGPUInstanceRunlists(arg0, arg1, arg2) + +NV_STATUS kmigmgrRestoreFromPersistence_PF(OBJGPU *arg0, struct KernelMIGManager *arg1); + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrRestoreFromPersistence(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrRestoreFromPersistence(arg0, arg1) kmigmgrRestoreFromPersistence_PF(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrRestoreFromPersistence_HAL(arg0, arg1) kmigmgrRestoreFromPersistence(arg0, arg1) + +void kmigmgrDetectReducedConfig_KERNEL(OBJGPU *arg0, struct KernelMIGManager *arg1); + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrDetectReducedConfig(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrDetectReducedConfig(arg0, arg1) kmigmgrDetectReducedConfig_KERNEL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrDetectReducedConfig_HAL(arg0, arg1) kmigmgrDetectReducedConfig(arg0, arg1) + +static inline NV_STATUS kmigmgrGenerateComputeInstanceUuid_5baef9(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvU32 globalGrIdx, NvUuid *pUuid) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGenerateComputeInstanceUuid(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvU32 globalGrIdx, NvUuid *pUuid) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGenerateComputeInstanceUuid(arg0, arg1, swizzId, globalGrIdx, pUuid) kmigmgrGenerateComputeInstanceUuid_5baef9(arg0, arg1, swizzId, globalGrIdx, pUuid) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrGenerateComputeInstanceUuid_HAL(arg0, arg1, swizzId, globalGrIdx, pUuid) kmigmgrGenerateComputeInstanceUuid(arg0, arg1, swizzId, globalGrIdx, pUuid) + +NV_STATUS kmigmgrCreateComputeInstances_FWCLIENT(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvBool bQuery, KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS arg3, NvU32 *pCIIds, NvBool bCreateCap); + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrCreateComputeInstances(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvBool bQuery, KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS arg3, NvU32 *pCIIds, NvBool bCreateCap) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrCreateComputeInstances(arg0, arg1, arg2, bQuery, arg3, pCIIds, bCreateCap) kmigmgrCreateComputeInstances_FWCLIENT(arg0, arg1, arg2, bQuery, arg3, pCIIds, bCreateCap) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrCreateComputeInstances_HAL(arg0, arg1, arg2, bQuery, arg3, pCIIds, bCreateCap) kmigmgrCreateComputeInstances(arg0, arg1, arg2, bQuery, arg3, pCIIds, bCreateCap) + +NV_STATUS kmigmgrSetMIGState_FWCLIENT(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bMemoryPartitioningNeeded, NvBool bEnable, NvBool bUnload); + +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSetMIGState(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bMemoryPartitioningNeeded, NvBool bEnable, NvBool bUnload) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSetMIGState(arg0, arg1, bMemoryPartitioningNeeded, bEnable, bUnload) kmigmgrSetMIGState_FWCLIENT(arg0, arg1, bMemoryPartitioningNeeded, bEnable, bUnload) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#define kmigmgrSetMIGState_HAL(arg0, arg1, bMemoryPartitioningNeeded, bEnable, bUnload) kmigmgrSetMIGState(arg0, arg1, bMemoryPartitioningNeeded, bEnable, bUnload) + +NV_STATUS kmigmgrConstructEngine_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, ENGDESCRIPTOR arg2); + +static inline NV_STATUS kmigmgrConstructEngine_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, ENGDESCRIPTOR arg2) { + return arg1->__kmigmgrConstructEngine__(arg0, arg1, arg2); +} + +NV_STATUS kmigmgrStateInitLocked_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); + +static inline NV_STATUS kmigmgrStateInitLocked_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1) { + return arg1->__kmigmgrStateInitLocked__(arg0, arg1); +} + +NV_STATUS kmigmgrStateUnload_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 flags); + +static inline NV_STATUS kmigmgrStateUnload_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 flags) { + return arg1->__kmigmgrStateUnload__(arg0, arg1, flags); +} + +NV_STATUS kmigmgrCreateGPUInstanceCheck_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bMemoryPartitioningNeeded); + +static inline NV_STATUS kmigmgrCreateGPUInstanceCheck_46f6a7(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bMemoryPartitioningNeeded) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS kmigmgrCreateGPUInstanceCheck_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bMemoryPartitioningNeeded) { + return arg1->__kmigmgrCreateGPUInstanceCheck__(arg0, arg1, bMemoryPartitioningNeeded); +} + +NvBool kmigmgrIsDevinitMIGBitSet_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1); + +static inline NvBool kmigmgrIsDevinitMIGBitSet_491d52(OBJGPU *arg0, struct KernelMIGManager *arg1) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kmigmgrIsDevinitMIGBitSet_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1) { + return arg1->__kmigmgrIsDevinitMIGBitSet__(arg0, arg1); +} + +NvBool kmigmgrIsGPUInstanceCombinationValid_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag); + +static inline NvBool kmigmgrIsGPUInstanceCombinationValid_491d52(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kmigmgrIsGPUInstanceCombinationValid_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag) { + return arg1->__kmigmgrIsGPUInstanceCombinationValid__(arg0, arg1, gpuInstanceFlag); +} + +NvBool kmigmgrIsGPUInstanceFlagValid_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag); + +static inline NvBool kmigmgrIsGPUInstanceFlagValid_491d52(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kmigmgrIsGPUInstanceFlagValid_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 gpuInstanceFlag) { + return arg1->__kmigmgrIsGPUInstanceFlagValid__(arg0, arg1, gpuInstanceFlag); +} + +NvBool kmigmgrIsMemoryPartitioningRequested_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 partitionFlags); + +static inline NvBool kmigmgrIsMemoryPartitioningRequested_491d52(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 partitionFlags) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kmigmgrIsMemoryPartitioningRequested_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 partitionFlags) { + return arg1->__kmigmgrIsMemoryPartitioningRequested__(arg0, arg1, partitionFlags); +} + +NvBool kmigmgrIsMemoryPartitioningNeeded_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId); + +static inline NvBool kmigmgrIsMemoryPartitioningNeeded_491d52(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kmigmgrIsMemoryPartitioningNeeded_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) { + return arg1->__kmigmgrIsMemoryPartitioningNeeded__(arg0, arg1, swizzId); +} + +static inline struct NV_RANGE kmigmgrMemSizeFlagToSwizzIdRange_d64cd6(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 memSizeFlag) { + return NV_RANGE_EMPTY; +} + +struct NV_RANGE kmigmgrMemSizeFlagToSwizzIdRange_GA100(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 memSizeFlag); + +static inline struct NV_RANGE kmigmgrMemSizeFlagToSwizzIdRange_DISPATCH(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 memSizeFlag) { + return arg1->__kmigmgrMemSizeFlagToSwizzIdRange__(arg0, arg1, memSizeFlag); +} + +static inline NV_STATUS kmigmgrReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void *pTunableState) { + return pEngstate->__kmigmgrReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kmigmgrStateLoad_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) { + return pEngstate->__kmigmgrStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmigmgrStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) { + return pEngstate->__kmigmgrStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmigmgrStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) { + return pEngstate->__kmigmgrStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void kmigmgrStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + pEngstate->__kmigmgrStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS kmigmgrStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) { + return pEngstate->__kmigmgrStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmigmgrStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + return pEngstate->__kmigmgrStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void kmigmgrInitMissing_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + pEngstate->__kmigmgrInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS kmigmgrStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + return pEngstate->__kmigmgrStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kmigmgrStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + return pEngstate->__kmigmgrStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS kmigmgrGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void *pTunableState) { + return pEngstate->__kmigmgrGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kmigmgrCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__kmigmgrCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void kmigmgrFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void *pTunableState) { + pEngstate->__kmigmgrFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS kmigmgrStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, NvU32 arg0) { + return pEngstate->__kmigmgrStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS kmigmgrAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void **ppTunableState) { + return pEngstate->__kmigmgrAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS kmigmgrSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate, void *pTunableState) { + return pEngstate->__kmigmgrSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool kmigmgrIsPresent_DISPATCH(POBJGPU pGpu, struct KernelMIGManager *pEngstate) { + return pEngstate->__kmigmgrIsPresent__(pGpu, pEngstate); +} + +static inline NvBool kmigmgrUseLegacyVgpuPolicy(OBJGPU *pGpu, struct KernelMIGManager *pKernelMIGManager) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool kmigmgrIsMIGNvlinkP2PSupportOverridden(OBJGPU *pGpu, struct KernelMIGManager *pKernelMIGManager) { + return ((NvBool)(0 != 0)); +} + +static inline const union ENGTYPE_BIT_VECTOR *kmigmgrGetPartitionableEnginesInUse(OBJGPU *pGpu, struct KernelMIGManager *pKernelMIGManager) { + return &pKernelMIGManager->partitionableEnginesInUse; +} + +static inline NvBool kmigmgrIsA100ReducedConfig(OBJGPU *pGpu, struct KernelMIGManager *pKernelMIGManager) { + return pKernelMIGManager->bIsA100ReducedConfig; +} + +NV_STATUS kmigmgrIncRefCount_IMPL(struct RsShared *arg0); +#define kmigmgrIncRefCount(arg0) kmigmgrIncRefCount_IMPL(arg0) +NV_STATUS kmigmgrDecRefCount_IMPL(struct RsShared *arg0); +#define kmigmgrDecRefCount(arg0) kmigmgrDecRefCount_IMPL(arg0) +struct MIG_INSTANCE_REF kmigmgrMakeGIReference_IMPL(KERNEL_MIG_GPU_INSTANCE *arg0); +#define kmigmgrMakeGIReference(arg0) kmigmgrMakeGIReference_IMPL(arg0) +struct MIG_INSTANCE_REF kmigmgrMakeCIReference_IMPL(KERNEL_MIG_GPU_INSTANCE *arg0, MIG_COMPUTE_INSTANCE *arg1); +#define kmigmgrMakeCIReference(arg0, arg1) kmigmgrMakeCIReference_IMPL(arg0, arg1) +NV_STATUS kmigmgrEngineTypeXlate_IMPL(union ENGTYPE_BIT_VECTOR *pSrc, NvU32 srcEngineType, union ENGTYPE_BIT_VECTOR *pDst, NvU32 *pDstEngineType); +#define kmigmgrEngineTypeXlate(pSrc, srcEngineType, pDst, pDstEngineType) kmigmgrEngineTypeXlate_IMPL(pSrc, srcEngineType, pDst, pDstEngineType) +NvBool kmigmgrIsInstanceAttributionIdValid_IMPL(NvU16 id); +#define kmigmgrIsInstanceAttributionIdValid(id) kmigmgrIsInstanceAttributionIdValid_IMPL(id) +struct MIG_INSTANCE_REF kmigmgrMakeNoMIGReference_IMPL(void); +#define kmigmgrMakeNoMIGReference() kmigmgrMakeNoMIGReference_IMPL() +NvBool kmigmgrIsMIGReferenceValid_IMPL(struct MIG_INSTANCE_REF *arg0); +#define kmigmgrIsMIGReferenceValid(arg0) kmigmgrIsMIGReferenceValid_IMPL(arg0) +NvBool kmigmgrAreMIGReferencesSame_IMPL(struct MIG_INSTANCE_REF *arg0, struct MIG_INSTANCE_REF *arg1); +#define kmigmgrAreMIGReferencesSame(arg0, arg1) kmigmgrAreMIGReferencesSame_IMPL(arg0, arg1) +NvU32 kmigmgrCountEnginesOfType_IMPL(const union ENGTYPE_BIT_VECTOR *arg0, NvU32 arg1); +#define kmigmgrCountEnginesOfType(arg0, arg1) kmigmgrCountEnginesOfType_IMPL(arg0, arg1) +NvU16 kmigmgrGetAttributionIdFromMIGReference_IMPL(struct MIG_INSTANCE_REF arg0); +#define kmigmgrGetAttributionIdFromMIGReference(arg0) kmigmgrGetAttributionIdFromMIGReference_IMPL(arg0) +NV_STATUS kmigmgrAllocateInstanceEngines_IMPL(union ENGTYPE_BIT_VECTOR *pSourceEngines, NvBool bShared, struct NV_RANGE engTypeRange, NvU32 reqEngCount, union ENGTYPE_BIT_VECTOR *pOutEngines, union ENGTYPE_BIT_VECTOR *pExclusiveEngines, union ENGTYPE_BIT_VECTOR *pSharedEngines); +#define kmigmgrAllocateInstanceEngines(pSourceEngines, bShared, engTypeRange, reqEngCount, pOutEngines, pExclusiveEngines, pSharedEngines) kmigmgrAllocateInstanceEngines_IMPL(pSourceEngines, bShared, engTypeRange, reqEngCount, pOutEngines, pExclusiveEngines, pSharedEngines) +void kmigmgrGetLocalEngineMask_IMPL(union ENGTYPE_BIT_VECTOR *pPhysicalEngineMask, union ENGTYPE_BIT_VECTOR *pLocalEngineMask); +#define kmigmgrGetLocalEngineMask(pPhysicalEngineMask, pLocalEngineMask) kmigmgrGetLocalEngineMask_IMPL(pPhysicalEngineMask, pLocalEngineMask) +NV_STATUS kmigmgrAllocGPUInstanceHandles_IMPL(OBJGPU *arg0, NvU32 swizzId, KERNEL_MIG_GPU_INSTANCE *arg1); +#define kmigmgrAllocGPUInstanceHandles(arg0, swizzId, arg1) kmigmgrAllocGPUInstanceHandles_IMPL(arg0, swizzId, arg1) +void kmigmgrFreeGPUInstanceHandles_IMPL(KERNEL_MIG_GPU_INSTANCE *arg0); +#define kmigmgrFreeGPUInstanceHandles(arg0) kmigmgrFreeGPUInstanceHandles_IMPL(arg0) +NvBool kmigmgrIsGPUInstanceReadyToBeDestroyed_IMPL(KERNEL_MIG_GPU_INSTANCE *arg0); +#define kmigmgrIsGPUInstanceReadyToBeDestroyed(arg0) kmigmgrIsGPUInstanceReadyToBeDestroyed_IMPL(arg0) +void kmigmgrDestruct_IMPL(struct KernelMIGManager *arg0); +#define __nvoc_kmigmgrDestruct(arg0) kmigmgrDestruct_IMPL(arg0) +void kmigmgrInitRegistryOverrides_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrInitRegistryOverrides(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrInitRegistryOverrides(arg0, arg1) kmigmgrInitRegistryOverrides_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +KERNEL_MIG_GPU_INSTANCE *kmigmgrGetMIGGpuInstanceSlot_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 i); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline KERNEL_MIG_GPU_INSTANCE *kmigmgrGetMIGGpuInstanceSlot(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 i) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NULL; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetMIGGpuInstanceSlot(arg0, arg1, i) kmigmgrGetMIGGpuInstanceSlot_IMPL(arg0, arg1, i) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsMIGSupported_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsMIGSupported(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsMIGSupported(arg0, arg1) kmigmgrIsMIGSupported_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsMIGEnabled_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsMIGEnabled(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsMIGEnabled(arg0, arg1) kmigmgrIsMIGEnabled_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsMIGGpuInstancingEnabled_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsMIGGpuInstancingEnabled(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsMIGGpuInstancingEnabled(arg0, arg1) kmigmgrIsMIGGpuInstancingEnabled_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsMIGMemPartitioningEnabled_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsMIGMemPartitioningEnabled(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsMIGMemPartitioningEnabled(arg0, arg1) kmigmgrIsMIGMemPartitioningEnabled_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +const KERNEL_MIG_MANAGER_STATIC_INFO *kmigmgrGetStaticInfo_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline const KERNEL_MIG_MANAGER_STATIC_INFO *kmigmgrGetStaticInfo(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NULL; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetStaticInfo(arg0, arg1) kmigmgrGetStaticInfo_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrSaveToPersistence_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSaveToPersistence(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSaveToPersistence(arg0, arg1) kmigmgrSaveToPersistence_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrDisableWatchdog_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrDisableWatchdog(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrDisableWatchdog(arg0, arg1) kmigmgrDisableWatchdog_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrRestoreWatchdog_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrRestoreWatchdog(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrRestoreWatchdog(arg0, arg1) kmigmgrRestoreWatchdog_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrSetSwizzIdInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSetSwizzIdInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSetSwizzIdInUse(arg0, arg1, swizzId) kmigmgrSetSwizzIdInUse_IMPL(arg0, arg1, swizzId) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrClearSwizzIdInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrClearSwizzIdInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrClearSwizzIdInUse(arg0, arg1, swizzId) kmigmgrClearSwizzIdInUse_IMPL(arg0, arg1, swizzId) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsSwizzIdInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsSwizzIdInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsSwizzIdInUse(arg0, arg1, swizzId) kmigmgrIsSwizzIdInUse_IMPL(arg0, arg1, swizzId) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetInvalidSwizzIdMask_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvU64 *pUnsupportedSwizzIdMask); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetInvalidSwizzIdMask(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvU64 *pUnsupportedSwizzIdMask) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetInvalidSwizzIdMask(arg0, arg1, swizzId, pUnsupportedSwizzIdMask) kmigmgrGetInvalidSwizzIdMask_IMPL(arg0, arg1, swizzId, pUnsupportedSwizzIdMask) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsMIGNvlinkP2PSupported_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsMIGNvlinkP2PSupported(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsMIGNvlinkP2PSupported(arg0, arg1) kmigmgrIsMIGNvlinkP2PSupported_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvU64 kmigmgrGetSwizzIdInUseMask_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvU64 kmigmgrGetSwizzIdInUseMask(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return 0; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetSwizzIdInUseMask(arg0, arg1) kmigmgrGetSwizzIdInUseMask_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrSetEnginesInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, union ENGTYPE_BIT_VECTOR *pEngines); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSetEnginesInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, union ENGTYPE_BIT_VECTOR *pEngines) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSetEnginesInUse(arg0, arg1, pEngines) kmigmgrSetEnginesInUse_IMPL(arg0, arg1, pEngines) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrClearEnginesInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, union ENGTYPE_BIT_VECTOR *pEngines); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrClearEnginesInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, union ENGTYPE_BIT_VECTOR *pEngines) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrClearEnginesInUse(arg0, arg1, pEngines) kmigmgrClearEnginesInUse_IMPL(arg0, arg1, pEngines) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsEngineInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 engineType); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsEngineInUse(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 engineType) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsEngineInUse(arg0, arg1, engineType) kmigmgrIsEngineInUse_IMPL(arg0, arg1, engineType) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsEnginePartitionable_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 engineType); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsEnginePartitionable(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 engineType) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsEnginePartitionable(arg0, arg1, engineType) kmigmgrIsEnginePartitionable_IMPL(arg0, arg1, engineType) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsEngineInInstance_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 globalEngType, struct MIG_INSTANCE_REF arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsEngineInInstance(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 globalEngType, struct MIG_INSTANCE_REF arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsEngineInInstance(arg0, arg1, globalEngType, arg2) kmigmgrIsEngineInInstance_IMPL(arg0, arg1, globalEngType, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetFreeEngines_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 engineCount, struct NV_RANGE engineRange, union ENGTYPE_BIT_VECTOR *pInstanceEngines); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetFreeEngines(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 engineCount, struct NV_RANGE engineRange, union ENGTYPE_BIT_VECTOR *pInstanceEngines) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetFreeEngines(arg0, arg1, engineCount, engineRange, pInstanceEngines) kmigmgrGetFreeEngines_IMPL(arg0, arg1, engineCount, engineRange, pInstanceEngines) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrCreateGPUInstance_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 *pSwizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2, NvBool bValid, NvBool bCreateCap); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrCreateGPUInstance(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 *pSwizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2, NvBool bValid, NvBool bCreateCap) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrCreateGPUInstance(arg0, arg1, pSwizzId, arg2, bValid, bCreateCap) kmigmgrCreateGPUInstance_IMPL(arg0, arg1, pSwizzId, arg2, bValid, bCreateCap) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrInvalidateGPUInstance_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvBool bUnload); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrInvalidateGPUInstance(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvBool bUnload) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrInvalidateGPUInstance(arg0, arg1, swizzId, bUnload) kmigmgrInvalidateGPUInstance_IMPL(arg0, arg1, swizzId, bUnload) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrInitGPUInstanceScrubber_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrInitGPUInstanceScrubber(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrInitGPUInstanceScrubber(arg0, arg1, arg2) kmigmgrInitGPUInstanceScrubber_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrDestroyGPUInstanceScrubber_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrDestroyGPUInstanceScrubber(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrDestroyGPUInstanceScrubber(arg0, arg1, arg2) kmigmgrDestroyGPUInstanceScrubber_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrInitGPUInstanceBufPools_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrInitGPUInstanceBufPools(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrInitGPUInstanceBufPools(arg0, arg1, arg2) kmigmgrInitGPUInstanceBufPools_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrInitGPUInstanceGrBufPools_IMPL(OBJGPU *pGpu, struct KernelMIGManager *arg0, KERNEL_MIG_GPU_INSTANCE *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrInitGPUInstanceGrBufPools(OBJGPU *pGpu, struct KernelMIGManager *arg0, KERNEL_MIG_GPU_INSTANCE *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrInitGPUInstanceGrBufPools(pGpu, arg0, arg1) kmigmgrInitGPUInstanceGrBufPools_IMPL(pGpu, arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrDestroyGPUInstanceGrBufPools_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrDestroyGPUInstanceGrBufPools(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrDestroyGPUInstanceGrBufPools(arg0, arg1, arg2) kmigmgrDestroyGPUInstanceGrBufPools_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrInitGPUInstancePool_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrInitGPUInstancePool(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrInitGPUInstancePool(arg0, arg1, arg2) kmigmgrInitGPUInstancePool_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrDestroyGPUInstancePool_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrDestroyGPUInstancePool(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrDestroyGPUInstancePool(arg0, arg1, arg2) kmigmgrDestroyGPUInstancePool_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrInitGPUInstanceRunlistBufPools_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrInitGPUInstanceRunlistBufPools(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrInitGPUInstanceRunlistBufPools(arg0, arg1, arg2) kmigmgrInitGPUInstanceRunlistBufPools_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrDestroyGPUInstanceRunlistBufPools_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrDestroyGPUInstanceRunlistBufPools(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrDestroyGPUInstanceRunlistBufPools(arg0, arg1, arg2) kmigmgrDestroyGPUInstanceRunlistBufPools_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrPrintSubscribingClients_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrPrintSubscribingClients(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrPrintSubscribingClients(arg0, arg1, swizzId) kmigmgrPrintSubscribingClients_IMPL(arg0, arg1, swizzId) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrInitGPUInstanceInfo_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrInitGPUInstanceInfo(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrInitGPUInstanceInfo(arg0, arg1, arg2) kmigmgrInitGPUInstanceInfo_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrTrimInstanceRunlistBufPools_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrTrimInstanceRunlistBufPools(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrTrimInstanceRunlistBufPools(arg0, arg1, arg2) kmigmgrTrimInstanceRunlistBufPools_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrSetDeviceProfilingInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSetDeviceProfilingInUse(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSetDeviceProfilingInUse(arg0, arg1) kmigmgrSetDeviceProfilingInUse_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrClearDeviceProfilingInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrClearDeviceProfilingInUse(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrClearDeviceProfilingInUse(arg0, arg1) kmigmgrClearDeviceProfilingInUse_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsDeviceProfilingInUse_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsDeviceProfilingInUse(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsDeviceProfilingInUse(arg0, arg1) kmigmgrIsDeviceProfilingInUse_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NvBool kmigmgrIsClientUsingDeviceProfiling_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NvBool kmigmgrIsClientUsingDeviceProfiling(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrIsClientUsingDeviceProfiling(arg0, arg1, hClient) kmigmgrIsClientUsingDeviceProfiling_IMPL(arg0, arg1, hClient) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrEnableAllLCEs_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bEnableAllLCEs); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrEnableAllLCEs(OBJGPU *arg0, struct KernelMIGManager *arg1, NvBool bEnableAllLCEs) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrEnableAllLCEs(arg0, arg1, bEnableAllLCEs) kmigmgrEnableAllLCEs_IMPL(arg0, arg1, bEnableAllLCEs) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetInstanceRefFromClient_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient, struct MIG_INSTANCE_REF *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetInstanceRefFromClient(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient, struct MIG_INSTANCE_REF *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetInstanceRefFromClient(arg0, arg1, hClient, arg2) kmigmgrGetInstanceRefFromClient_IMPL(arg0, arg1, hClient, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetMemoryPartitionHeapFromClient_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient, struct Heap **arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetMemoryPartitionHeapFromClient(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient, struct Heap **arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetMemoryPartitionHeapFromClient(arg0, arg1, hClient, arg2) kmigmgrGetMemoryPartitionHeapFromClient_IMPL(arg0, arg1, hClient, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetSwizzIdFromClient_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient, NvU32 *pSwizzId); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetSwizzIdFromClient(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient, NvU32 *pSwizzId) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetSwizzIdFromClient(arg0, arg1, hClient, pSwizzId) kmigmgrGetSwizzIdFromClient_IMPL(arg0, arg1, hClient, pSwizzId) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrPrintGPUInstanceInfo_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrPrintGPUInstanceInfo(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrPrintGPUInstanceInfo(arg0, arg1, arg2) kmigmgrPrintGPUInstanceInfo_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrSetGPUInstanceInfo_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSetGPUInstanceInfo(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSetGPUInstanceInfo(arg0, arg1, swizzId, arg2) kmigmgrSetGPUInstanceInfo_IMPL(arg0, arg1, swizzId, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetGPUInstanceInfo_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KERNEL_MIG_GPU_INSTANCE **arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetGPUInstanceInfo(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KERNEL_MIG_GPU_INSTANCE **arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetGPUInstanceInfo(arg0, arg1, swizzId, arg2) kmigmgrGetGPUInstanceInfo_IMPL(arg0, arg1, swizzId, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetLocalToGlobalEngineType_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct MIG_INSTANCE_REF arg2, NvU32 localEngType, NvU32 *pGlobalEngType); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetLocalToGlobalEngineType(OBJGPU *arg0, struct KernelMIGManager *arg1, struct MIG_INSTANCE_REF arg2, NvU32 localEngType, NvU32 *pGlobalEngType) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetLocalToGlobalEngineType(arg0, arg1, arg2, localEngType, pGlobalEngType) kmigmgrGetLocalToGlobalEngineType_IMPL(arg0, arg1, arg2, localEngType, pGlobalEngType) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetGlobalToLocalEngineType_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct MIG_INSTANCE_REF arg2, NvU32 globalEngType, NvU32 *pLocalEngType); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetGlobalToLocalEngineType(OBJGPU *arg0, struct KernelMIGManager *arg1, struct MIG_INSTANCE_REF arg2, NvU32 globalEngType, NvU32 *pLocalEngType) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetGlobalToLocalEngineType(arg0, arg1, arg2, globalEngType, pLocalEngType) kmigmgrGetGlobalToLocalEngineType_IMPL(arg0, arg1, arg2, globalEngType, pLocalEngType) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrFilterEngineList_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Subdevice *arg2, NvU32 *pEngineTypes, NvU32 *pEngineCount); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrFilterEngineList(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Subdevice *arg2, NvU32 *pEngineTypes, NvU32 *pEngineCount) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrFilterEngineList(arg0, arg1, arg2, pEngineTypes, pEngineCount) kmigmgrFilterEngineList_IMPL(arg0, arg1, arg2, pEngineTypes, pEngineCount) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrFilterEnginePartnerList_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Subdevice *arg2, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *arg3); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrFilterEnginePartnerList(OBJGPU *arg0, struct KernelMIGManager *arg1, struct Subdevice *arg2, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrFilterEnginePartnerList(arg0, arg1, arg2, arg3) kmigmgrFilterEnginePartnerList_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetProfileByPartitionFlag_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 partitionFlag, const NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO **arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetProfileByPartitionFlag(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 partitionFlag, const NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO **arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetProfileByPartitionFlag(arg0, arg1, partitionFlag, arg2) kmigmgrGetProfileByPartitionFlag_IMPL(arg0, arg1, partitionFlag, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrSaveComputeInstances_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, GPUMGR_SAVE_COMPUTE_INSTANCE *arg3); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSaveComputeInstances(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, GPUMGR_SAVE_COMPUTE_INSTANCE *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSaveComputeInstances(arg0, arg1, arg2, arg3) kmigmgrSaveComputeInstances_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrSetPartitioningMode_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSetPartitioningMode(OBJGPU *arg0, struct KernelMIGManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSetPartitioningMode(arg0, arg1) kmigmgrSetPartitioningMode_IMPL(arg0, arg1) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetMIGReferenceFromEngineType_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 engineType, struct MIG_INSTANCE_REF *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetMIGReferenceFromEngineType(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 engineType, struct MIG_INSTANCE_REF *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetMIGReferenceFromEngineType(arg0, arg1, engineType, arg2) kmigmgrGetMIGReferenceFromEngineType_IMPL(arg0, arg1, engineType, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrGetGPUInstanceScrubberCe_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient, NvU32 *ceInst); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrGetGPUInstanceScrubberCe(OBJGPU *arg0, struct KernelMIGManager *arg1, NvHandle hClient, NvU32 *ceInst) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrGetGPUInstanceScrubberCe(arg0, arg1, hClient, ceInst) kmigmgrGetGPUInstanceScrubberCe_IMPL(arg0, arg1, hClient, ceInst) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrDescribeGPUInstances_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS *arg2); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrDescribeGPUInstances(OBJGPU *arg0, struct KernelMIGManager *arg1, NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrDescribeGPUInstances(arg0, arg1, arg2) kmigmgrDescribeGPUInstances_IMPL(arg0, arg1, arg2) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrSwizzIdToResourceAllocation_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2, KERNEL_MIG_GPU_INSTANCE *arg3, MIG_RESOURCE_ALLOCATION *arg4); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrSwizzIdToResourceAllocation(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, KMIGMGR_CREATE_GPU_INSTANCE_PARAMS arg2, KERNEL_MIG_GPU_INSTANCE *arg3, MIG_RESOURCE_ALLOCATION *arg4) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrSwizzIdToResourceAllocation(arg0, arg1, swizzId, arg2, arg3, arg4) kmigmgrSwizzIdToResourceAllocation_IMPL(arg0, arg1, swizzId, arg2, arg3, arg4) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrAllocComputeInstanceHandles_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrAllocComputeInstanceHandles(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrAllocComputeInstanceHandles(arg0, arg1, arg2, arg3) kmigmgrAllocComputeInstanceHandles_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrFreeComputeInstanceHandles_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrFreeComputeInstanceHandles(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrFreeComputeInstanceHandles(arg0, arg1, arg2, arg3) kmigmgrFreeComputeInstanceHandles_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_mig_manager_h_disabled + +void kmigmgrReleaseComputeInstanceEngines_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline void kmigmgrReleaseComputeInstanceEngines(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, MIG_COMPUTE_INSTANCE *arg3) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrReleaseComputeInstanceEngines(arg0, arg1, arg2, arg3) kmigmgrReleaseComputeInstanceEngines_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrDeleteComputeInstance_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 CIId, NvBool bUnload); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrDeleteComputeInstance(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 CIId, NvBool bUnload) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrDeleteComputeInstance(arg0, arg1, arg2, CIId, bUnload) kmigmgrDeleteComputeInstance_IMPL(arg0, arg1, arg2, CIId, bUnload) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrConfigureGPUInstance_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvU32 *pGpcCountPerGr, NvU32 updateEngMask); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrConfigureGPUInstance(OBJGPU *arg0, struct KernelMIGManager *arg1, NvU32 swizzId, NvU32 *pGpcCountPerGr, NvU32 updateEngMask) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrConfigureGPUInstance(arg0, arg1, swizzId, pGpcCountPerGr, updateEngMask) kmigmgrConfigureGPUInstance_IMPL(arg0, arg1, swizzId, pGpcCountPerGr, updateEngMask) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrInvalidateGrGpcMapping_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 grIdx); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrInvalidateGrGpcMapping(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 grIdx) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrInvalidateGrGpcMapping(arg0, arg1, arg2, grIdx) kmigmgrInvalidateGrGpcMapping_IMPL(arg0, arg1, arg2, grIdx) +#endif //__nvoc_kernel_mig_manager_h_disabled + +NV_STATUS kmigmgrInvalidateGr_IMPL(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 grIdx); +#ifdef __nvoc_kernel_mig_manager_h_disabled +static inline NV_STATUS kmigmgrInvalidateGr(OBJGPU *arg0, struct KernelMIGManager *arg1, KERNEL_MIG_GPU_INSTANCE *arg2, NvU32 grIdx) { + NV_ASSERT_FAILED_PRECOMP("KernelMIGManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_mig_manager_h_disabled +#define kmigmgrInvalidateGr(arg0, arg1, arg2, grIdx) kmigmgrInvalidateGr_IMPL(arg0, arg1, arg2, grIdx) +#endif //__nvoc_kernel_mig_manager_h_disabled + +#undef PRIVATE_FIELD + + +#endif // KERNEL_MIG_MANAGER_H + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_MIG_MANAGER_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_nvdec_ctx_nvoc.c b/src/nvidia/generated/g_kernel_nvdec_ctx_nvoc.c new file mode 100644 index 000000000..f0dcc0553 --- /dev/null +++ b/src/nvidia/generated/g_kernel_nvdec_ctx_nvoc.c @@ -0,0 +1,415 @@ +#define NVOC_KERNEL_NVDEC_CTX_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_nvdec_ctx_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x70d2be = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvdecContext; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_NvdecContext(NvdecContext*, RmHalspecOwner* ); +void __nvoc_init_funcTable_NvdecContext(NvdecContext*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_NvdecContext(NvdecContext*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_NvdecContext(NvdecContext*, RmHalspecOwner* ); +void __nvoc_dtor_NvdecContext(NvdecContext*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NvdecContext; + +static const struct NVOC_RTTI __nvoc_rtti_NvdecContext_NvdecContext = { + /*pClassDef=*/ &__nvoc_class_def_NvdecContext, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NvdecContext, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvdecContext_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvdecContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvdecContext_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvdecContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvdecContext_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvdecContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvdecContext_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvdecContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvdecContext_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvdecContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvdecContext_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvdecContext, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvdecContext_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvdecContext, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvdecContext_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvdecContext, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_NvdecContext = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_NvdecContext_NvdecContext, + &__nvoc_rtti_NvdecContext_ChannelDescendant, + &__nvoc_rtti_NvdecContext_Notifier, + &__nvoc_rtti_NvdecContext_INotifier, + &__nvoc_rtti_NvdecContext_GpuResource, + &__nvoc_rtti_NvdecContext_RmResource, + &__nvoc_rtti_NvdecContext_RmResourceCommon, + &__nvoc_rtti_NvdecContext_RsResource, + &__nvoc_rtti_NvdecContext_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_NvdecContext = +{ + /*classInfo=*/ { + /*size=*/ sizeof(NvdecContext), + /*classId=*/ classId(NvdecContext), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "NvdecContext", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NvdecContext, + /*pCastInfo=*/ &__nvoc_castinfo_NvdecContext, + /*pExportInfo=*/ &__nvoc_export_info_NvdecContext +}; + +static NV_STATUS __nvoc_thunk_ChannelDescendant_nvdecctxCheckMemInterUnmap(struct NvdecContext *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_NvdecContext_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_nvdecctxShareCallback(struct NvdecContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvdecContext_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_nvdecctxAccessCallback(struct NvdecContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdecctxMapTo(struct NvdecContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvdecctxGetMapAddrSpace(struct NvdecContext *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvdecContext_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_nvdecctxSetNotificationShare(struct NvdecContext *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvdecContext_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_nvdecctxGetRefCount(struct NvdecContext *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_nvdecctxAddAdditionalDependants(struct RsClient *pClient, struct NvdecContext *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvdecctxControl_Prologue(struct NvdecContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvdecctxGetRegBaseOffsetAndSize(struct NvdecContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvdecContext_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvdecctxInternalControlForward(struct NvdecContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvdecContext_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdecctxUnmapFrom(struct NvdecContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_nvdecctxControl_Epilogue(struct NvdecContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdecctxControlLookup(struct NvdecContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_nvdecctxGetSwMethods(struct NvdecContext *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return chandesGetSwMethods((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_NvdecContext_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NvHandle __nvoc_thunk_GpuResource_nvdecctxGetInternalObjectHandle(struct NvdecContext *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvdecContext_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvdecctxControl(struct NvdecContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvdecContext_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvdecctxUnmap(struct NvdecContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvdecContext_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvdecctxGetMemInterMapParams(struct NvdecContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_NvdecContext_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvdecctxGetMemoryMappingDescriptor(struct NvdecContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_NvdecContext_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_ChannelDescendant_nvdecctxIsSwMethodStalling(struct NvdecContext *pChannelDescendant, NvU32 hHandle) { + return chandesIsSwMethodStalling((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_NvdecContext_ChannelDescendant.offset), hHandle); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvdecctxControlFilter(struct NvdecContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_nvdecctxUnregisterEvent(struct NvdecContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvdecContext_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_nvdecctxCanCopy(struct NvdecContext *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_nvdecctxPreDestruct(struct NvdecContext *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvdecContext_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_nvdecctxGetNotificationListPtr(struct NvdecContext *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvdecContext_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_nvdecctxGetNotificationShare(struct NvdecContext *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvdecContext_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvdecctxMap(struct NvdecContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvdecContext_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_nvdecctxGetOrAllocNotifShare(struct NvdecContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvdecContext_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_NvdecContext = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_NvdecContext(NvdecContext *pThis) { + __nvoc_nvdecctxDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_NvdecContext(NvdecContext *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, ENGDESCRIPTOR (*)(struct OBJGPU *, NvU32, void *)); +NV_STATUS __nvoc_ctor_NvdecContext(NvdecContext *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, nvdecGetEngineDescFromAllocParams); + if (status != NV_OK) goto __nvoc_ctor_NvdecContext_fail_ChannelDescendant; + __nvoc_init_dataField_NvdecContext(pThis, pRmhalspecowner); + + status = __nvoc_nvdecctxConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_NvdecContext_fail__init; + goto __nvoc_ctor_NvdecContext_exit; // Success + +__nvoc_ctor_NvdecContext_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_NvdecContext_fail_ChannelDescendant: +__nvoc_ctor_NvdecContext_exit: + + return status; +} + +static void __nvoc_init_funcTable_NvdecContext_1(NvdecContext *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__nvdecctxCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_nvdecctxCheckMemInterUnmap; + + pThis->__nvdecctxShareCallback__ = &__nvoc_thunk_GpuResource_nvdecctxShareCallback; + + pThis->__nvdecctxAccessCallback__ = &__nvoc_thunk_RmResource_nvdecctxAccessCallback; + + pThis->__nvdecctxMapTo__ = &__nvoc_thunk_RsResource_nvdecctxMapTo; + + pThis->__nvdecctxGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_nvdecctxGetMapAddrSpace; + + pThis->__nvdecctxSetNotificationShare__ = &__nvoc_thunk_Notifier_nvdecctxSetNotificationShare; + + pThis->__nvdecctxGetRefCount__ = &__nvoc_thunk_RsResource_nvdecctxGetRefCount; + + pThis->__nvdecctxAddAdditionalDependants__ = &__nvoc_thunk_RsResource_nvdecctxAddAdditionalDependants; + + pThis->__nvdecctxControl_Prologue__ = &__nvoc_thunk_RmResource_nvdecctxControl_Prologue; + + pThis->__nvdecctxGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_nvdecctxGetRegBaseOffsetAndSize; + + pThis->__nvdecctxInternalControlForward__ = &__nvoc_thunk_GpuResource_nvdecctxInternalControlForward; + + pThis->__nvdecctxUnmapFrom__ = &__nvoc_thunk_RsResource_nvdecctxUnmapFrom; + + pThis->__nvdecctxControl_Epilogue__ = &__nvoc_thunk_RmResource_nvdecctxControl_Epilogue; + + pThis->__nvdecctxControlLookup__ = &__nvoc_thunk_RsResource_nvdecctxControlLookup; + + pThis->__nvdecctxGetSwMethods__ = &__nvoc_thunk_ChannelDescendant_nvdecctxGetSwMethods; + + pThis->__nvdecctxGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_nvdecctxGetInternalObjectHandle; + + pThis->__nvdecctxControl__ = &__nvoc_thunk_GpuResource_nvdecctxControl; + + pThis->__nvdecctxUnmap__ = &__nvoc_thunk_GpuResource_nvdecctxUnmap; + + pThis->__nvdecctxGetMemInterMapParams__ = &__nvoc_thunk_RmResource_nvdecctxGetMemInterMapParams; + + pThis->__nvdecctxGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_nvdecctxGetMemoryMappingDescriptor; + + pThis->__nvdecctxIsSwMethodStalling__ = &__nvoc_thunk_ChannelDescendant_nvdecctxIsSwMethodStalling; + + pThis->__nvdecctxControlFilter__ = &__nvoc_thunk_RsResource_nvdecctxControlFilter; + + pThis->__nvdecctxUnregisterEvent__ = &__nvoc_thunk_Notifier_nvdecctxUnregisterEvent; + + pThis->__nvdecctxCanCopy__ = &__nvoc_thunk_RsResource_nvdecctxCanCopy; + + pThis->__nvdecctxPreDestruct__ = &__nvoc_thunk_RsResource_nvdecctxPreDestruct; + + pThis->__nvdecctxGetNotificationListPtr__ = &__nvoc_thunk_Notifier_nvdecctxGetNotificationListPtr; + + pThis->__nvdecctxGetNotificationShare__ = &__nvoc_thunk_Notifier_nvdecctxGetNotificationShare; + + pThis->__nvdecctxMap__ = &__nvoc_thunk_GpuResource_nvdecctxMap; + + pThis->__nvdecctxGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_nvdecctxGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_NvdecContext(NvdecContext *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_NvdecContext_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_NvdecContext(NvdecContext *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_NvdecContext = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_NvdecContext(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_NvdecContext(NvdecContext **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + NvdecContext *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(NvdecContext)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(NvdecContext)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_NvdecContext); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_NvdecContext(pThis, pRmhalspecowner); + status = __nvoc_ctor_NvdecContext(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_NvdecContext_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_NvdecContext_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_NvdecContext(NvdecContext **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_NvdecContext(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_nvdec_ctx_nvoc.h b/src/nvidia/generated/g_kernel_nvdec_ctx_nvoc.h new file mode 100644 index 000000000..3025c7ba4 --- /dev/null +++ b/src/nvidia/generated/g_kernel_nvdec_ctx_nvoc.h @@ -0,0 +1,306 @@ +#ifndef _G_KERNEL_NVDEC_CTX_NVOC_H_ +#define _G_KERNEL_NVDEC_CTX_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_nvdec_ctx_nvoc.h" + +#ifndef KERNEL_NVDEC_CTX_H +#define KERNEL_NVDEC_CTX_H + +#include "kernel/gpu/fifo/channel_descendant.h" + +ENGDESCRIPTOR nvdecGetEngineDescFromAllocParams(OBJGPU *pGpu, NvU32 externalClassId, void *pAllocParams); + +/*! + * RM internal class representing NVXXXX_VIDEO_DECODER + */ +#ifdef NVOC_KERNEL_NVDEC_CTX_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct NvdecContext { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct NvdecContext *__nvoc_pbase_NvdecContext; + NV_STATUS (*__nvdecctxCheckMemInterUnmap__)(struct NvdecContext *, NvBool); + NvBool (*__nvdecctxShareCallback__)(struct NvdecContext *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__nvdecctxAccessCallback__)(struct NvdecContext *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__nvdecctxMapTo__)(struct NvdecContext *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__nvdecctxGetMapAddrSpace__)(struct NvdecContext *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__nvdecctxSetNotificationShare__)(struct NvdecContext *, struct NotifShare *); + NvU32 (*__nvdecctxGetRefCount__)(struct NvdecContext *); + void (*__nvdecctxAddAdditionalDependants__)(struct RsClient *, struct NvdecContext *, RsResourceRef *); + NV_STATUS (*__nvdecctxControl_Prologue__)(struct NvdecContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__nvdecctxGetRegBaseOffsetAndSize__)(struct NvdecContext *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__nvdecctxInternalControlForward__)(struct NvdecContext *, NvU32, void *, NvU32); + NV_STATUS (*__nvdecctxUnmapFrom__)(struct NvdecContext *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__nvdecctxControl_Epilogue__)(struct NvdecContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__nvdecctxControlLookup__)(struct NvdecContext *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__nvdecctxGetSwMethods__)(struct NvdecContext *, METHOD **, NvU32 *); + NvHandle (*__nvdecctxGetInternalObjectHandle__)(struct NvdecContext *); + NV_STATUS (*__nvdecctxControl__)(struct NvdecContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__nvdecctxUnmap__)(struct NvdecContext *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__nvdecctxGetMemInterMapParams__)(struct NvdecContext *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__nvdecctxGetMemoryMappingDescriptor__)(struct NvdecContext *, struct MEMORY_DESCRIPTOR **); + NvBool (*__nvdecctxIsSwMethodStalling__)(struct NvdecContext *, NvU32); + NV_STATUS (*__nvdecctxControlFilter__)(struct NvdecContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__nvdecctxUnregisterEvent__)(struct NvdecContext *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__nvdecctxCanCopy__)(struct NvdecContext *); + void (*__nvdecctxPreDestruct__)(struct NvdecContext *); + PEVENTNOTIFICATION *(*__nvdecctxGetNotificationListPtr__)(struct NvdecContext *); + struct NotifShare *(*__nvdecctxGetNotificationShare__)(struct NvdecContext *); + NV_STATUS (*__nvdecctxMap__)(struct NvdecContext *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__nvdecctxGetOrAllocNotifShare__)(struct NvdecContext *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_NvdecContext_TYPEDEF__ +#define __NVOC_CLASS_NvdecContext_TYPEDEF__ +typedef struct NvdecContext NvdecContext; +#endif /* __NVOC_CLASS_NvdecContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvdecContext +#define __nvoc_class_id_NvdecContext 0x70d2be +#endif /* __nvoc_class_id_NvdecContext */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvdecContext; + +#define __staticCast_NvdecContext(pThis) \ + ((pThis)->__nvoc_pbase_NvdecContext) + +#ifdef __nvoc_kernel_nvdec_ctx_h_disabled +#define __dynamicCast_NvdecContext(pThis) ((NvdecContext*)NULL) +#else //__nvoc_kernel_nvdec_ctx_h_disabled +#define __dynamicCast_NvdecContext(pThis) \ + ((NvdecContext*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NvdecContext))) +#endif //__nvoc_kernel_nvdec_ctx_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_NvdecContext(NvdecContext**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NvdecContext(NvdecContext**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_NvdecContext(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_NvdecContext((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define nvdecctxCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) nvdecctxCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define nvdecctxShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) nvdecctxShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define nvdecctxAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) nvdecctxAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define nvdecctxMapTo(pResource, pParams) nvdecctxMapTo_DISPATCH(pResource, pParams) +#define nvdecctxGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) nvdecctxGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define nvdecctxSetNotificationShare(pNotifier, pNotifShare) nvdecctxSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define nvdecctxGetRefCount(pResource) nvdecctxGetRefCount_DISPATCH(pResource) +#define nvdecctxAddAdditionalDependants(pClient, pResource, pReference) nvdecctxAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define nvdecctxControl_Prologue(pResource, pCallContext, pParams) nvdecctxControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define nvdecctxGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) nvdecctxGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define nvdecctxInternalControlForward(pGpuResource, command, pParams, size) nvdecctxInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define nvdecctxUnmapFrom(pResource, pParams) nvdecctxUnmapFrom_DISPATCH(pResource, pParams) +#define nvdecctxControl_Epilogue(pResource, pCallContext, pParams) nvdecctxControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define nvdecctxControlLookup(pResource, pParams, ppEntry) nvdecctxControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define nvdecctxGetSwMethods(pChannelDescendant, ppMethods, pNumMethods) nvdecctxGetSwMethods_DISPATCH(pChannelDescendant, ppMethods, pNumMethods) +#define nvdecctxGetInternalObjectHandle(pGpuResource) nvdecctxGetInternalObjectHandle_DISPATCH(pGpuResource) +#define nvdecctxControl(pGpuResource, pCallContext, pParams) nvdecctxControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define nvdecctxUnmap(pGpuResource, pCallContext, pCpuMapping) nvdecctxUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define nvdecctxGetMemInterMapParams(pRmResource, pParams) nvdecctxGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define nvdecctxGetMemoryMappingDescriptor(pRmResource, ppMemDesc) nvdecctxGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define nvdecctxIsSwMethodStalling(pChannelDescendant, hHandle) nvdecctxIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define nvdecctxControlFilter(pResource, pCallContext, pParams) nvdecctxControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define nvdecctxUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) nvdecctxUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define nvdecctxCanCopy(pResource) nvdecctxCanCopy_DISPATCH(pResource) +#define nvdecctxPreDestruct(pResource) nvdecctxPreDestruct_DISPATCH(pResource) +#define nvdecctxGetNotificationListPtr(pNotifier) nvdecctxGetNotificationListPtr_DISPATCH(pNotifier) +#define nvdecctxGetNotificationShare(pNotifier) nvdecctxGetNotificationShare_DISPATCH(pNotifier) +#define nvdecctxMap(pGpuResource, pCallContext, pParams, pCpuMapping) nvdecctxMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define nvdecctxGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) nvdecctxGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS nvdecctxConstructHal_KERNEL(struct NvdecContext *pNvdecContext, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_kernel_nvdec_ctx_h_disabled +static inline NV_STATUS nvdecctxConstructHal(struct NvdecContext *pNvdecContext, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("NvdecContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvdec_ctx_h_disabled +#define nvdecctxConstructHal(pNvdecContext, pCallContext, pParams) nvdecctxConstructHal_KERNEL(pNvdecContext, pCallContext, pParams) +#endif //__nvoc_kernel_nvdec_ctx_h_disabled + +#define nvdecctxConstructHal_HAL(pNvdecContext, pCallContext, pParams) nvdecctxConstructHal(pNvdecContext, pCallContext, pParams) + +void nvdecctxDestructHal_KERNEL(struct NvdecContext *pNvdecContext); + +#ifdef __nvoc_kernel_nvdec_ctx_h_disabled +static inline void nvdecctxDestructHal(struct NvdecContext *pNvdecContext) { + NV_ASSERT_FAILED_PRECOMP("NvdecContext was disabled!"); +} +#else //__nvoc_kernel_nvdec_ctx_h_disabled +#define nvdecctxDestructHal(pNvdecContext) nvdecctxDestructHal_KERNEL(pNvdecContext) +#endif //__nvoc_kernel_nvdec_ctx_h_disabled + +#define nvdecctxDestructHal_HAL(pNvdecContext) nvdecctxDestructHal(pNvdecContext) + +static inline NV_STATUS nvdecctxCheckMemInterUnmap_DISPATCH(struct NvdecContext *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__nvdecctxCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool nvdecctxShareCallback_DISPATCH(struct NvdecContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvdecctxShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool nvdecctxAccessCallback_DISPATCH(struct NvdecContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvdecctxAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS nvdecctxMapTo_DISPATCH(struct NvdecContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvdecctxMapTo__(pResource, pParams); +} + +static inline NV_STATUS nvdecctxGetMapAddrSpace_DISPATCH(struct NvdecContext *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvdecctxGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void nvdecctxSetNotificationShare_DISPATCH(struct NvdecContext *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvdecctxSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 nvdecctxGetRefCount_DISPATCH(struct NvdecContext *pResource) { + return pResource->__nvdecctxGetRefCount__(pResource); +} + +static inline void nvdecctxAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct NvdecContext *pResource, RsResourceRef *pReference) { + pResource->__nvdecctxAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS nvdecctxControl_Prologue_DISPATCH(struct NvdecContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvdecctxControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS nvdecctxGetRegBaseOffsetAndSize_DISPATCH(struct NvdecContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvdecctxGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS nvdecctxInternalControlForward_DISPATCH(struct NvdecContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvdecctxInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS nvdecctxUnmapFrom_DISPATCH(struct NvdecContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvdecctxUnmapFrom__(pResource, pParams); +} + +static inline void nvdecctxControl_Epilogue_DISPATCH(struct NvdecContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvdecctxControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS nvdecctxControlLookup_DISPATCH(struct NvdecContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__nvdecctxControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS nvdecctxGetSwMethods_DISPATCH(struct NvdecContext *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return pChannelDescendant->__nvdecctxGetSwMethods__(pChannelDescendant, ppMethods, pNumMethods); +} + +static inline NvHandle nvdecctxGetInternalObjectHandle_DISPATCH(struct NvdecContext *pGpuResource) { + return pGpuResource->__nvdecctxGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS nvdecctxControl_DISPATCH(struct NvdecContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvdecctxControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS nvdecctxUnmap_DISPATCH(struct NvdecContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvdecctxUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS nvdecctxGetMemInterMapParams_DISPATCH(struct NvdecContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvdecctxGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS nvdecctxGetMemoryMappingDescriptor_DISPATCH(struct NvdecContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvdecctxGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvBool nvdecctxIsSwMethodStalling_DISPATCH(struct NvdecContext *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__nvdecctxIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +static inline NV_STATUS nvdecctxControlFilter_DISPATCH(struct NvdecContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvdecctxControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS nvdecctxUnregisterEvent_DISPATCH(struct NvdecContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvdecctxUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool nvdecctxCanCopy_DISPATCH(struct NvdecContext *pResource) { + return pResource->__nvdecctxCanCopy__(pResource); +} + +static inline void nvdecctxPreDestruct_DISPATCH(struct NvdecContext *pResource) { + pResource->__nvdecctxPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *nvdecctxGetNotificationListPtr_DISPATCH(struct NvdecContext *pNotifier) { + return pNotifier->__nvdecctxGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *nvdecctxGetNotificationShare_DISPATCH(struct NvdecContext *pNotifier) { + return pNotifier->__nvdecctxGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS nvdecctxMap_DISPATCH(struct NvdecContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvdecctxMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS nvdecctxGetOrAllocNotifShare_DISPATCH(struct NvdecContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvdecctxGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS __nvoc_nvdecctxConstruct(struct NvdecContext *arg_pNvdecContext, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams) { + return nvdecctxConstructHal(arg_pNvdecContext, arg_pCallContext, arg_pParams); +} + +static inline void __nvoc_nvdecctxDestruct(struct NvdecContext *pNvdecContext) { + nvdecctxDestructHal(pNvdecContext); +} + +#undef PRIVATE_FIELD + + +#endif // KERNEL_NVDEC_CTX_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_NVDEC_CTX_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_nvdec_nvoc.c b/src/nvidia/generated/g_kernel_nvdec_nvoc.c new file mode 100644 index 000000000..905d3d4d3 --- /dev/null +++ b/src/nvidia/generated/g_kernel_nvdec_nvoc.c @@ -0,0 +1,470 @@ +#define NVOC_KERNEL_NVDEC_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_nvdec_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xaba9df = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelNvdec; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +void __nvoc_init_KernelNvdec(KernelNvdec*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelNvdec(KernelNvdec*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelNvdec(KernelNvdec*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelNvdec(KernelNvdec*, RmHalspecOwner* ); +void __nvoc_dtor_KernelNvdec(KernelNvdec*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelNvdec; + +static const struct NVOC_RTTI __nvoc_rtti_KernelNvdec_KernelNvdec = { + /*pClassDef=*/ &__nvoc_class_def_KernelNvdec, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelNvdec, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelNvdec_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelNvdec, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelNvdec_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelNvdec, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelNvdec_KernelFalcon = { + /*pClassDef=*/ &__nvoc_class_def_KernelFalcon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelNvdec, __nvoc_base_KernelFalcon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelNvdec_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelNvdec, __nvoc_base_IntrService), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelNvdec = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_KernelNvdec_KernelNvdec, + &__nvoc_rtti_KernelNvdec_IntrService, + &__nvoc_rtti_KernelNvdec_KernelFalcon, + &__nvoc_rtti_KernelNvdec_OBJENGSTATE, + &__nvoc_rtti_KernelNvdec_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelNvdec = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelNvdec), + /*classId=*/ classId(KernelNvdec), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelNvdec", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelNvdec, + /*pCastInfo=*/ &__nvoc_castinfo_KernelNvdec, + /*pExportInfo=*/ &__nvoc_export_info_KernelNvdec +}; + +static NV_STATUS __nvoc_thunk_KernelNvdec_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelNvdec, ENGDESCRIPTOR arg0) { + return knvdecConstructEngine(pGpu, (struct KernelNvdec *)(((unsigned char *)pKernelNvdec) - __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_KernelNvdec_kflcnResetHw(struct OBJGPU *pGpu, struct KernelFalcon *pKernelNvdec) { + return knvdecResetHw(pGpu, (struct KernelNvdec *)(((unsigned char *)pKernelNvdec) - __nvoc_rtti_KernelNvdec_KernelFalcon.offset)); +} + +static NvBool __nvoc_thunk_KernelNvdec_kflcnIsEngineInReset(struct OBJGPU *pGpu, struct KernelFalcon *pKernelNvdec) { + return knvdecIsEngineInReset(pGpu, (struct KernelNvdec *)(((unsigned char *)pKernelNvdec) - __nvoc_rtti_KernelNvdec_KernelFalcon.offset)); +} + +static void __nvoc_thunk_KernelNvdec_intrservRegisterIntrService(struct OBJGPU *arg0, struct IntrService *arg1, IntrServiceRecord arg2[155]) { + knvdecRegisterIntrService(arg0, (struct KernelNvdec *)(((unsigned char *)arg1) - __nvoc_rtti_KernelNvdec_IntrService.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_KernelNvdec_intrservServiceNotificationInterrupt(struct OBJGPU *arg0, struct IntrService *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) { + return knvdecServiceNotificationInterrupt(arg0, (struct KernelNvdec *)(((unsigned char *)arg1) - __nvoc_rtti_KernelNvdec_IntrService.offset), arg2); +} + +static void __nvoc_thunk_OBJENGSTATE_knvdecStateDestroy(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_knvdecFreeTunableState(POBJGPU pGpu, struct KernelNvdec *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecCompareTunableState(POBJGPU pGpu, struct KernelNvdec *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static NvBool __nvoc_thunk_IntrService_knvdecClearInterrupt(struct OBJGPU *pGpu, struct KernelNvdec *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelNvdec_IntrService.offset), pParams); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_knvdecIsPresent(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset)); +} + +static NvU32 __nvoc_thunk_IntrService_knvdecServiceInterrupt(struct OBJGPU *pGpu, struct KernelNvdec *pIntrService, IntrServiceServiceInterruptArguments *pParams) { + return intrservServiceInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_KernelNvdec_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecReconcileTunableState(POBJGPU pGpu, struct KernelNvdec *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecStateLoad(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecStateUnload(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecStateInitLocked(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecStatePreLoad(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecStatePostUnload(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecStatePreUnload(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecStateInitUnlocked(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_knvdecInitMissing(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecStatePreInitLocked(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecStatePreInitUnlocked(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecGetTunableState(POBJGPU pGpu, struct KernelNvdec *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecStatePostLoad(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecAllocTunableState(POBJGPU pGpu, struct KernelNvdec *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvdecSetTunableState(POBJGPU pGpu, struct KernelNvdec *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvdec_OBJENGSTATE.offset), pTunableState); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelNvdec = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelFalcon(KernelFalcon*); +void __nvoc_dtor_IntrService(IntrService*); +void __nvoc_dtor_KernelNvdec(KernelNvdec *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_dtor_KernelFalcon(&pThis->__nvoc_base_KernelFalcon); + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelNvdec(KernelNvdec *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelFalcon(KernelFalcon* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_IntrService(IntrService* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelNvdec(KernelNvdec *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_KernelNvdec_fail_OBJENGSTATE; + status = __nvoc_ctor_KernelFalcon(&pThis->__nvoc_base_KernelFalcon, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_KernelNvdec_fail_KernelFalcon; + status = __nvoc_ctor_IntrService(&pThis->__nvoc_base_IntrService, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_KernelNvdec_fail_IntrService; + __nvoc_init_dataField_KernelNvdec(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelNvdec_exit; // Success + +__nvoc_ctor_KernelNvdec_fail_IntrService: + __nvoc_dtor_KernelFalcon(&pThis->__nvoc_base_KernelFalcon); +__nvoc_ctor_KernelNvdec_fail_KernelFalcon: + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); +__nvoc_ctor_KernelNvdec_fail_OBJENGSTATE: +__nvoc_ctor_KernelNvdec_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelNvdec_1(KernelNvdec *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + // Hal function -- knvdecConstructEngine + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__knvdecConstructEngine__ = &knvdecConstructEngine_IMPL; + } + else if (0) + { + } + + // Hal function -- knvdecConfigureFalcon + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__knvdecConfigureFalcon__ = &knvdecConfigureFalcon_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__knvdecConfigureFalcon__ = &knvdecConfigureFalcon_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvdecConfigureFalcon__ = &knvdecConfigureFalcon_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- knvdecResetHw + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvdecResetHw__ = &knvdecResetHw_ac1694; + } + else if (0) + { + } + } + else if (0) + { + } + + pThis->__knvdecIsEngineInReset__ = &knvdecIsEngineInReset_167f46; + + // Hal function -- knvdecIsEngineDisabled + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__knvdecIsEngineDisabled__ = &knvdecIsEngineDisabled_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__knvdecIsEngineDisabled__ = &knvdecIsEngineDisabled_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvdecIsEngineDisabled__ = &knvdecIsEngineDisabled_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- knvdecReadUcodeFuseVersion + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__knvdecReadUcodeFuseVersion__ = &knvdecReadUcodeFuseVersion_b2b553; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvdecReadUcodeFuseVersion__ = &knvdecReadUcodeFuseVersion_GA100; + } + else if (0) + { + } + } + else if (0) + { + } + + pThis->__knvdecRegisterIntrService__ = &knvdecRegisterIntrService_IMPL; + + pThis->__knvdecServiceNotificationInterrupt__ = &knvdecServiceNotificationInterrupt_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelNvdec_engstateConstructEngine; + + pThis->__nvoc_base_KernelFalcon.__kflcnResetHw__ = &__nvoc_thunk_KernelNvdec_kflcnResetHw; + + pThis->__nvoc_base_KernelFalcon.__kflcnIsEngineInReset__ = &__nvoc_thunk_KernelNvdec_kflcnIsEngineInReset; + + pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_KernelNvdec_intrservRegisterIntrService; + + pThis->__nvoc_base_IntrService.__intrservServiceNotificationInterrupt__ = &__nvoc_thunk_KernelNvdec_intrservServiceNotificationInterrupt; + + pThis->__knvdecStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_knvdecStateDestroy; + + pThis->__knvdecFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvdecFreeTunableState; + + pThis->__knvdecCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvdecCompareTunableState; + + pThis->__knvdecClearInterrupt__ = &__nvoc_thunk_IntrService_knvdecClearInterrupt; + + pThis->__knvdecIsPresent__ = &__nvoc_thunk_OBJENGSTATE_knvdecIsPresent; + + pThis->__knvdecServiceInterrupt__ = &__nvoc_thunk_IntrService_knvdecServiceInterrupt; + + pThis->__knvdecReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvdecReconcileTunableState; + + pThis->__knvdecStateLoad__ = &__nvoc_thunk_OBJENGSTATE_knvdecStateLoad; + + pThis->__knvdecStateUnload__ = &__nvoc_thunk_OBJENGSTATE_knvdecStateUnload; + + pThis->__knvdecStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_knvdecStateInitLocked; + + pThis->__knvdecStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_knvdecStatePreLoad; + + pThis->__knvdecStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_knvdecStatePostUnload; + + pThis->__knvdecStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_knvdecStatePreUnload; + + pThis->__knvdecStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_knvdecStateInitUnlocked; + + pThis->__knvdecInitMissing__ = &__nvoc_thunk_OBJENGSTATE_knvdecInitMissing; + + pThis->__knvdecStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_knvdecStatePreInitLocked; + + pThis->__knvdecStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_knvdecStatePreInitUnlocked; + + pThis->__knvdecGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvdecGetTunableState; + + pThis->__knvdecStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_knvdecStatePostLoad; + + pThis->__knvdecAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvdecAllocTunableState; + + pThis->__knvdecSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvdecSetTunableState; +} + +void __nvoc_init_funcTable_KernelNvdec(KernelNvdec *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelNvdec_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*, RmHalspecOwner* ); +void __nvoc_init_KernelFalcon(KernelFalcon*, RmHalspecOwner* ); +void __nvoc_init_IntrService(IntrService*, RmHalspecOwner* ); +void __nvoc_init_KernelNvdec(KernelNvdec *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelNvdec = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + pThis->__nvoc_pbase_KernelFalcon = &pThis->__nvoc_base_KernelFalcon; + pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE, pRmhalspecowner); + __nvoc_init_KernelFalcon(&pThis->__nvoc_base_KernelFalcon, pRmhalspecowner); + __nvoc_init_IntrService(&pThis->__nvoc_base_IntrService, pRmhalspecowner); + __nvoc_init_funcTable_KernelNvdec(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelNvdec(KernelNvdec **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelNvdec *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelNvdec)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelNvdec)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelNvdec); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelNvdec(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelNvdec(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelNvdec_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelNvdec_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelNvdec(KernelNvdec **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelNvdec(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_nvdec_nvoc.h b/src/nvidia/generated/g_kernel_nvdec_nvoc.h new file mode 100644 index 000000000..e9fff46c4 --- /dev/null +++ b/src/nvidia/generated/g_kernel_nvdec_nvoc.h @@ -0,0 +1,341 @@ +#ifndef _G_KERNEL_NVDEC_NVOC_H_ +#define _G_KERNEL_NVDEC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_nvdec_nvoc.h" + +#ifndef KERNEL_NVDEC_H +#define KERNEL_NVDEC_H + +#include "core/core.h" +#include "gpu/eng_state.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gpu.h" + +#ifdef NVOC_KERNEL_NVDEC_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelNvdec { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct KernelFalcon __nvoc_base_KernelFalcon; + struct IntrService __nvoc_base_IntrService; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelFalcon *__nvoc_pbase_KernelFalcon; + struct IntrService *__nvoc_pbase_IntrService; + struct KernelNvdec *__nvoc_pbase_KernelNvdec; + NV_STATUS (*__knvdecConstructEngine__)(struct OBJGPU *, struct KernelNvdec *, ENGDESCRIPTOR); + void (*__knvdecConfigureFalcon__)(struct OBJGPU *, struct KernelNvdec *); + NV_STATUS (*__knvdecResetHw__)(struct OBJGPU *, struct KernelNvdec *); + NvBool (*__knvdecIsEngineInReset__)(struct OBJGPU *, struct KernelNvdec *); + NvBool (*__knvdecIsEngineDisabled__)(struct OBJGPU *, struct KernelNvdec *); + NvU32 (*__knvdecReadUcodeFuseVersion__)(struct OBJGPU *, struct KernelNvdec *, NvU32); + void (*__knvdecRegisterIntrService__)(struct OBJGPU *, struct KernelNvdec *, IntrServiceRecord *); + NV_STATUS (*__knvdecServiceNotificationInterrupt__)(struct OBJGPU *, struct KernelNvdec *, IntrServiceServiceNotificationInterruptArguments *); + void (*__knvdecStateDestroy__)(POBJGPU, struct KernelNvdec *); + void (*__knvdecFreeTunableState__)(POBJGPU, struct KernelNvdec *, void *); + NV_STATUS (*__knvdecCompareTunableState__)(POBJGPU, struct KernelNvdec *, void *, void *); + NvBool (*__knvdecClearInterrupt__)(struct OBJGPU *, struct KernelNvdec *, IntrServiceClearInterruptArguments *); + NvBool (*__knvdecIsPresent__)(POBJGPU, struct KernelNvdec *); + NvU32 (*__knvdecServiceInterrupt__)(struct OBJGPU *, struct KernelNvdec *, IntrServiceServiceInterruptArguments *); + NV_STATUS (*__knvdecReconcileTunableState__)(POBJGPU, struct KernelNvdec *, void *); + NV_STATUS (*__knvdecStateLoad__)(POBJGPU, struct KernelNvdec *, NvU32); + NV_STATUS (*__knvdecStateUnload__)(POBJGPU, struct KernelNvdec *, NvU32); + NV_STATUS (*__knvdecStateInitLocked__)(POBJGPU, struct KernelNvdec *); + NV_STATUS (*__knvdecStatePreLoad__)(POBJGPU, struct KernelNvdec *, NvU32); + NV_STATUS (*__knvdecStatePostUnload__)(POBJGPU, struct KernelNvdec *, NvU32); + NV_STATUS (*__knvdecStatePreUnload__)(POBJGPU, struct KernelNvdec *, NvU32); + NV_STATUS (*__knvdecStateInitUnlocked__)(POBJGPU, struct KernelNvdec *); + void (*__knvdecInitMissing__)(POBJGPU, struct KernelNvdec *); + NV_STATUS (*__knvdecStatePreInitLocked__)(POBJGPU, struct KernelNvdec *); + NV_STATUS (*__knvdecStatePreInitUnlocked__)(POBJGPU, struct KernelNvdec *); + NV_STATUS (*__knvdecGetTunableState__)(POBJGPU, struct KernelNvdec *, void *); + NV_STATUS (*__knvdecStatePostLoad__)(POBJGPU, struct KernelNvdec *, NvU32); + NV_STATUS (*__knvdecAllocTunableState__)(POBJGPU, struct KernelNvdec *, void **); + NV_STATUS (*__knvdecSetTunableState__)(POBJGPU, struct KernelNvdec *, void *); +}; + +#ifndef __NVOC_CLASS_KernelNvdec_TYPEDEF__ +#define __NVOC_CLASS_KernelNvdec_TYPEDEF__ +typedef struct KernelNvdec KernelNvdec; +#endif /* __NVOC_CLASS_KernelNvdec_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelNvdec +#define __nvoc_class_id_KernelNvdec 0xaba9df +#endif /* __nvoc_class_id_KernelNvdec */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelNvdec; + +#define __staticCast_KernelNvdec(pThis) \ + ((pThis)->__nvoc_pbase_KernelNvdec) + +#ifdef __nvoc_kernel_nvdec_h_disabled +#define __dynamicCast_KernelNvdec(pThis) ((KernelNvdec*)NULL) +#else //__nvoc_kernel_nvdec_h_disabled +#define __dynamicCast_KernelNvdec(pThis) \ + ((KernelNvdec*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelNvdec))) +#endif //__nvoc_kernel_nvdec_h_disabled + +#define PDB_PROP_KNVDEC_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KNVDEC_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelNvdec(KernelNvdec**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelNvdec(KernelNvdec**, Dynamic*, NvU32); +#define __objCreate_KernelNvdec(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelNvdec((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define knvdecConstructEngine(pGpu, pKernelNvdec, arg0) knvdecConstructEngine_DISPATCH(pGpu, pKernelNvdec, arg0) +#define knvdecConstructEngine_HAL(pGpu, pKernelNvdec, arg0) knvdecConstructEngine_DISPATCH(pGpu, pKernelNvdec, arg0) +#define knvdecConfigureFalcon(pGpu, pKernelNvdec) knvdecConfigureFalcon_DISPATCH(pGpu, pKernelNvdec) +#define knvdecConfigureFalcon_HAL(pGpu, pKernelNvdec) knvdecConfigureFalcon_DISPATCH(pGpu, pKernelNvdec) +#define knvdecResetHw(pGpu, pKernelNvdec) knvdecResetHw_DISPATCH(pGpu, pKernelNvdec) +#define knvdecResetHw_HAL(pGpu, pKernelNvdec) knvdecResetHw_DISPATCH(pGpu, pKernelNvdec) +#define knvdecIsEngineInReset(pGpu, pKernelNvdec) knvdecIsEngineInReset_DISPATCH(pGpu, pKernelNvdec) +#define knvdecIsEngineDisabled(pGpu, pKernelNvdec) knvdecIsEngineDisabled_DISPATCH(pGpu, pKernelNvdec) +#define knvdecIsEngineDisabled_HAL(pGpu, pKernelNvdec) knvdecIsEngineDisabled_DISPATCH(pGpu, pKernelNvdec) +#define knvdecReadUcodeFuseVersion(pGpu, pKernelNvdec, ucodeId) knvdecReadUcodeFuseVersion_DISPATCH(pGpu, pKernelNvdec, ucodeId) +#define knvdecReadUcodeFuseVersion_HAL(pGpu, pKernelNvdec, ucodeId) knvdecReadUcodeFuseVersion_DISPATCH(pGpu, pKernelNvdec, ucodeId) +#define knvdecRegisterIntrService(arg0, arg1, arg2) knvdecRegisterIntrService_DISPATCH(arg0, arg1, arg2) +#define knvdecServiceNotificationInterrupt(arg0, arg1, arg2) knvdecServiceNotificationInterrupt_DISPATCH(arg0, arg1, arg2) +#define knvdecStateDestroy(pGpu, pEngstate) knvdecStateDestroy_DISPATCH(pGpu, pEngstate) +#define knvdecFreeTunableState(pGpu, pEngstate, pTunableState) knvdecFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define knvdecCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) knvdecCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define knvdecClearInterrupt(pGpu, pIntrService, pParams) knvdecClearInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define knvdecIsPresent(pGpu, pEngstate) knvdecIsPresent_DISPATCH(pGpu, pEngstate) +#define knvdecServiceInterrupt(pGpu, pIntrService, pParams) knvdecServiceInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define knvdecReconcileTunableState(pGpu, pEngstate, pTunableState) knvdecReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define knvdecStateLoad(pGpu, pEngstate, arg0) knvdecStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define knvdecStateUnload(pGpu, pEngstate, arg0) knvdecStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define knvdecStateInitLocked(pGpu, pEngstate) knvdecStateInitLocked_DISPATCH(pGpu, pEngstate) +#define knvdecStatePreLoad(pGpu, pEngstate, arg0) knvdecStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define knvdecStatePostUnload(pGpu, pEngstate, arg0) knvdecStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define knvdecStatePreUnload(pGpu, pEngstate, arg0) knvdecStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define knvdecStateInitUnlocked(pGpu, pEngstate) knvdecStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define knvdecInitMissing(pGpu, pEngstate) knvdecInitMissing_DISPATCH(pGpu, pEngstate) +#define knvdecStatePreInitLocked(pGpu, pEngstate) knvdecStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define knvdecStatePreInitUnlocked(pGpu, pEngstate) knvdecStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define knvdecGetTunableState(pGpu, pEngstate, pTunableState) knvdecGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define knvdecStatePostLoad(pGpu, pEngstate, arg0) knvdecStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define knvdecAllocTunableState(pGpu, pEngstate, ppTunableState) knvdecAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define knvdecSetTunableState(pGpu, pEngstate, pTunableState) knvdecSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +void knvdecNonstallIntrCheckAndClear_TU102(struct OBJGPU *arg0, struct KernelNvdec *arg1, struct THREAD_STATE_NODE *arg2); + +#ifdef __nvoc_kernel_nvdec_h_disabled +static inline void knvdecNonstallIntrCheckAndClear(struct OBJGPU *arg0, struct KernelNvdec *arg1, struct THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelNvdec was disabled!"); +} +#else //__nvoc_kernel_nvdec_h_disabled +#define knvdecNonstallIntrCheckAndClear(arg0, arg1, arg2) knvdecNonstallIntrCheckAndClear_TU102(arg0, arg1, arg2) +#endif //__nvoc_kernel_nvdec_h_disabled + +#define knvdecNonstallIntrCheckAndClear_HAL(arg0, arg1, arg2) knvdecNonstallIntrCheckAndClear(arg0, arg1, arg2) + +NV_STATUS knvdecConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec, ENGDESCRIPTOR arg0); + +static inline NV_STATUS knvdecConstructEngine_395e98(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec, ENGDESCRIPTOR arg0) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS knvdecConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec, ENGDESCRIPTOR arg0) { + return pKernelNvdec->__knvdecConstructEngine__(pGpu, pKernelNvdec, arg0); +} + +void knvdecConfigureFalcon_TU102(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec); + +void knvdecConfigureFalcon_GA100(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec); + +void knvdecConfigureFalcon_GA102(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec); + +static inline void knvdecConfigureFalcon_f2d351(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec) { + NV_ASSERT_PRECOMP(0); +} + +static inline void knvdecConfigureFalcon_DISPATCH(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec) { + pKernelNvdec->__knvdecConfigureFalcon__(pGpu, pKernelNvdec); +} + +static inline NV_STATUS knvdecResetHw_ac1694(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec) { + return NV_OK; +} + +static inline NV_STATUS knvdecResetHw_5baef9(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS knvdecResetHw_DISPATCH(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec) { + return pKernelNvdec->__knvdecResetHw__(pGpu, pKernelNvdec); +} + +static inline NvBool knvdecIsEngineInReset_167f46(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 != 0))); + return ((NvBool)(0 != 0)); +} + +static inline NvBool knvdecIsEngineInReset_DISPATCH(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec) { + return pKernelNvdec->__knvdecIsEngineInReset__(pGpu, pKernelNvdec); +} + +NvBool knvdecIsEngineDisabled_TU102(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec); + +NvBool knvdecIsEngineDisabled_GA100(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec); + +NvBool knvdecIsEngineDisabled_GA102(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec); + +static inline NvBool knvdecIsEngineDisabled_108313(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 != 0))); +} + +static inline NvBool knvdecIsEngineDisabled_DISPATCH(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec) { + return pKernelNvdec->__knvdecIsEngineDisabled__(pGpu, pKernelNvdec); +} + +static inline NvU32 knvdecReadUcodeFuseVersion_b2b553(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec, NvU32 ucodeId) { + return 0; +} + +NvU32 knvdecReadUcodeFuseVersion_GA100(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec, NvU32 ucodeId); + +static inline NvU32 knvdecReadUcodeFuseVersion_474d46(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec, NvU32 ucodeId) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 knvdecReadUcodeFuseVersion_DISPATCH(struct OBJGPU *pGpu, struct KernelNvdec *pKernelNvdec, NvU32 ucodeId) { + return pKernelNvdec->__knvdecReadUcodeFuseVersion__(pGpu, pKernelNvdec, ucodeId); +} + +void knvdecRegisterIntrService_IMPL(struct OBJGPU *arg0, struct KernelNvdec *arg1, IntrServiceRecord arg2[155]); + +static inline void knvdecRegisterIntrService_DISPATCH(struct OBJGPU *arg0, struct KernelNvdec *arg1, IntrServiceRecord arg2[155]) { + arg1->__knvdecRegisterIntrService__(arg0, arg1, arg2); +} + +NV_STATUS knvdecServiceNotificationInterrupt_IMPL(struct OBJGPU *arg0, struct KernelNvdec *arg1, IntrServiceServiceNotificationInterruptArguments *arg2); + +static inline NV_STATUS knvdecServiceNotificationInterrupt_DISPATCH(struct OBJGPU *arg0, struct KernelNvdec *arg1, IntrServiceServiceNotificationInterruptArguments *arg2) { + return arg1->__knvdecServiceNotificationInterrupt__(arg0, arg1, arg2); +} + +static inline void knvdecStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + pEngstate->__knvdecStateDestroy__(pGpu, pEngstate); +} + +static inline void knvdecFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, void *pTunableState) { + pEngstate->__knvdecFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS knvdecCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__knvdecCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline NvBool knvdecClearInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelNvdec *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return pIntrService->__knvdecClearInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NvBool knvdecIsPresent_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + return pEngstate->__knvdecIsPresent__(pGpu, pEngstate); +} + +static inline NvU32 knvdecServiceInterrupt_DISPATCH(struct OBJGPU *pGpu, struct KernelNvdec *pIntrService, IntrServiceServiceInterruptArguments *pParams) { + return pIntrService->__knvdecServiceInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS knvdecReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, void *pTunableState) { + return pEngstate->__knvdecReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS knvdecStateLoad_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return pEngstate->__knvdecStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS knvdecStateUnload_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return pEngstate->__knvdecStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS knvdecStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + return pEngstate->__knvdecStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS knvdecStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return pEngstate->__knvdecStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS knvdecStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return pEngstate->__knvdecStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS knvdecStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return pEngstate->__knvdecStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS knvdecStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + return pEngstate->__knvdecStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void knvdecInitMissing_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + pEngstate->__knvdecInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS knvdecStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + return pEngstate->__knvdecStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS knvdecStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate) { + return pEngstate->__knvdecStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS knvdecGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, void *pTunableState) { + return pEngstate->__knvdecGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS knvdecStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, NvU32 arg0) { + return pEngstate->__knvdecStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS knvdecAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, void **ppTunableState) { + return pEngstate->__knvdecAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS knvdecSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvdec *pEngstate, void *pTunableState) { + return pEngstate->__knvdecSetTunableState__(pGpu, pEngstate, pTunableState); +} + +#undef PRIVATE_FIELD + + +#endif // KERNEL_NVDEC_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_NVDEC_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_nvenc_ctx_nvoc.c b/src/nvidia/generated/g_kernel_nvenc_ctx_nvoc.c new file mode 100644 index 000000000..37f36484e --- /dev/null +++ b/src/nvidia/generated/g_kernel_nvenc_ctx_nvoc.c @@ -0,0 +1,415 @@ +#define NVOC_KERNEL_NVENC_CTX_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_nvenc_ctx_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x88c92a = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MsencContext; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_MsencContext(MsencContext*, RmHalspecOwner* ); +void __nvoc_init_funcTable_MsencContext(MsencContext*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_MsencContext(MsencContext*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_MsencContext(MsencContext*, RmHalspecOwner* ); +void __nvoc_dtor_MsencContext(MsencContext*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MsencContext; + +static const struct NVOC_RTTI __nvoc_rtti_MsencContext_MsencContext = { + /*pClassDef=*/ &__nvoc_class_def_MsencContext, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MsencContext, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_MsencContext_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MsencContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MsencContext_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MsencContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MsencContext_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MsencContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MsencContext_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MsencContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MsencContext_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MsencContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MsencContext_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MsencContext, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MsencContext_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MsencContext, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MsencContext_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MsencContext, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_MsencContext = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_MsencContext_MsencContext, + &__nvoc_rtti_MsencContext_ChannelDescendant, + &__nvoc_rtti_MsencContext_Notifier, + &__nvoc_rtti_MsencContext_INotifier, + &__nvoc_rtti_MsencContext_GpuResource, + &__nvoc_rtti_MsencContext_RmResource, + &__nvoc_rtti_MsencContext_RmResourceCommon, + &__nvoc_rtti_MsencContext_RsResource, + &__nvoc_rtti_MsencContext_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_MsencContext = +{ + /*classInfo=*/ { + /*size=*/ sizeof(MsencContext), + /*classId=*/ classId(MsencContext), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "MsencContext", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MsencContext, + /*pCastInfo=*/ &__nvoc_castinfo_MsencContext, + /*pExportInfo=*/ &__nvoc_export_info_MsencContext +}; + +static NV_STATUS __nvoc_thunk_ChannelDescendant_msencctxCheckMemInterUnmap(struct MsencContext *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_MsencContext_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_msencctxShareCallback(struct MsencContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MsencContext_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_msencctxAccessCallback(struct MsencContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_msencctxMapTo(struct MsencContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_msencctxGetMapAddrSpace(struct MsencContext *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MsencContext_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_msencctxSetNotificationShare(struct MsencContext *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_MsencContext_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_msencctxGetRefCount(struct MsencContext *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_msencctxAddAdditionalDependants(struct RsClient *pClient, struct MsencContext *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_msencctxControl_Prologue(struct MsencContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_msencctxGetRegBaseOffsetAndSize(struct MsencContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MsencContext_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_msencctxInternalControlForward(struct MsencContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MsencContext_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_msencctxUnmapFrom(struct MsencContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_msencctxControl_Epilogue(struct MsencContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_msencctxControlLookup(struct MsencContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_msencctxGetSwMethods(struct MsencContext *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return chandesGetSwMethods((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_MsencContext_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NvHandle __nvoc_thunk_GpuResource_msencctxGetInternalObjectHandle(struct MsencContext *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MsencContext_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_msencctxControl(struct MsencContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MsencContext_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_msencctxUnmap(struct MsencContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MsencContext_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_msencctxGetMemInterMapParams(struct MsencContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MsencContext_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_msencctxGetMemoryMappingDescriptor(struct MsencContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MsencContext_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_ChannelDescendant_msencctxIsSwMethodStalling(struct MsencContext *pChannelDescendant, NvU32 hHandle) { + return chandesIsSwMethodStalling((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_MsencContext_ChannelDescendant.offset), hHandle); +} + +static NV_STATUS __nvoc_thunk_RsResource_msencctxControlFilter(struct MsencContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_msencctxUnregisterEvent(struct MsencContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_MsencContext_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_msencctxCanCopy(struct MsencContext *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_msencctxPreDestruct(struct MsencContext *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MsencContext_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_msencctxGetNotificationListPtr(struct MsencContext *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_MsencContext_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_msencctxGetNotificationShare(struct MsencContext *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_MsencContext_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_msencctxMap(struct MsencContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MsencContext_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_msencctxGetOrAllocNotifShare(struct MsencContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_MsencContext_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_MsencContext = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_MsencContext(MsencContext *pThis) { + __nvoc_msencctxDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_MsencContext(MsencContext *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, ENGDESCRIPTOR (*)(struct OBJGPU *, NvU32, void *)); +NV_STATUS __nvoc_ctor_MsencContext(MsencContext *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, msencGetEngineDescFromAllocParams); + if (status != NV_OK) goto __nvoc_ctor_MsencContext_fail_ChannelDescendant; + __nvoc_init_dataField_MsencContext(pThis, pRmhalspecowner); + + status = __nvoc_msencctxConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MsencContext_fail__init; + goto __nvoc_ctor_MsencContext_exit; // Success + +__nvoc_ctor_MsencContext_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_MsencContext_fail_ChannelDescendant: +__nvoc_ctor_MsencContext_exit: + + return status; +} + +static void __nvoc_init_funcTable_MsencContext_1(MsencContext *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__msencctxCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_msencctxCheckMemInterUnmap; + + pThis->__msencctxShareCallback__ = &__nvoc_thunk_GpuResource_msencctxShareCallback; + + pThis->__msencctxAccessCallback__ = &__nvoc_thunk_RmResource_msencctxAccessCallback; + + pThis->__msencctxMapTo__ = &__nvoc_thunk_RsResource_msencctxMapTo; + + pThis->__msencctxGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_msencctxGetMapAddrSpace; + + pThis->__msencctxSetNotificationShare__ = &__nvoc_thunk_Notifier_msencctxSetNotificationShare; + + pThis->__msencctxGetRefCount__ = &__nvoc_thunk_RsResource_msencctxGetRefCount; + + pThis->__msencctxAddAdditionalDependants__ = &__nvoc_thunk_RsResource_msencctxAddAdditionalDependants; + + pThis->__msencctxControl_Prologue__ = &__nvoc_thunk_RmResource_msencctxControl_Prologue; + + pThis->__msencctxGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_msencctxGetRegBaseOffsetAndSize; + + pThis->__msencctxInternalControlForward__ = &__nvoc_thunk_GpuResource_msencctxInternalControlForward; + + pThis->__msencctxUnmapFrom__ = &__nvoc_thunk_RsResource_msencctxUnmapFrom; + + pThis->__msencctxControl_Epilogue__ = &__nvoc_thunk_RmResource_msencctxControl_Epilogue; + + pThis->__msencctxControlLookup__ = &__nvoc_thunk_RsResource_msencctxControlLookup; + + pThis->__msencctxGetSwMethods__ = &__nvoc_thunk_ChannelDescendant_msencctxGetSwMethods; + + pThis->__msencctxGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_msencctxGetInternalObjectHandle; + + pThis->__msencctxControl__ = &__nvoc_thunk_GpuResource_msencctxControl; + + pThis->__msencctxUnmap__ = &__nvoc_thunk_GpuResource_msencctxUnmap; + + pThis->__msencctxGetMemInterMapParams__ = &__nvoc_thunk_RmResource_msencctxGetMemInterMapParams; + + pThis->__msencctxGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_msencctxGetMemoryMappingDescriptor; + + pThis->__msencctxIsSwMethodStalling__ = &__nvoc_thunk_ChannelDescendant_msencctxIsSwMethodStalling; + + pThis->__msencctxControlFilter__ = &__nvoc_thunk_RsResource_msencctxControlFilter; + + pThis->__msencctxUnregisterEvent__ = &__nvoc_thunk_Notifier_msencctxUnregisterEvent; + + pThis->__msencctxCanCopy__ = &__nvoc_thunk_RsResource_msencctxCanCopy; + + pThis->__msencctxPreDestruct__ = &__nvoc_thunk_RsResource_msencctxPreDestruct; + + pThis->__msencctxGetNotificationListPtr__ = &__nvoc_thunk_Notifier_msencctxGetNotificationListPtr; + + pThis->__msencctxGetNotificationShare__ = &__nvoc_thunk_Notifier_msencctxGetNotificationShare; + + pThis->__msencctxMap__ = &__nvoc_thunk_GpuResource_msencctxMap; + + pThis->__msencctxGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_msencctxGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_MsencContext(MsencContext *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_MsencContext_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_MsencContext(MsencContext *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_MsencContext = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_MsencContext(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_MsencContext(MsencContext **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + MsencContext *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(MsencContext)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(MsencContext)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MsencContext); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_MsencContext(pThis, pRmhalspecowner); + status = __nvoc_ctor_MsencContext(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_MsencContext_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_MsencContext_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_MsencContext(MsencContext **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_MsencContext(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_nvenc_ctx_nvoc.h b/src/nvidia/generated/g_kernel_nvenc_ctx_nvoc.h new file mode 100644 index 000000000..e2e0e70ac --- /dev/null +++ b/src/nvidia/generated/g_kernel_nvenc_ctx_nvoc.h @@ -0,0 +1,306 @@ +#ifndef _G_KERNEL_NVENC_CTX_NVOC_H_ +#define _G_KERNEL_NVENC_CTX_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_nvenc_ctx_nvoc.h" + +#ifndef KERNEL_NVENC_CTX_H +#define KERNEL_NVENC_CTX_H + +#include "kernel/gpu/fifo/channel_descendant.h" + +ENGDESCRIPTOR msencGetEngineDescFromAllocParams(OBJGPU *pGpu, NvU32 externalClassId, void *pAllocParams); + +/*! + * RM internal class representing NVXXXX_VIDEO_ENCODER + */ +#ifdef NVOC_KERNEL_NVENC_CTX_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct MsencContext { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct MsencContext *__nvoc_pbase_MsencContext; + NV_STATUS (*__msencctxCheckMemInterUnmap__)(struct MsencContext *, NvBool); + NvBool (*__msencctxShareCallback__)(struct MsencContext *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__msencctxAccessCallback__)(struct MsencContext *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__msencctxMapTo__)(struct MsencContext *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__msencctxGetMapAddrSpace__)(struct MsencContext *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__msencctxSetNotificationShare__)(struct MsencContext *, struct NotifShare *); + NvU32 (*__msencctxGetRefCount__)(struct MsencContext *); + void (*__msencctxAddAdditionalDependants__)(struct RsClient *, struct MsencContext *, RsResourceRef *); + NV_STATUS (*__msencctxControl_Prologue__)(struct MsencContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__msencctxGetRegBaseOffsetAndSize__)(struct MsencContext *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__msencctxInternalControlForward__)(struct MsencContext *, NvU32, void *, NvU32); + NV_STATUS (*__msencctxUnmapFrom__)(struct MsencContext *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__msencctxControl_Epilogue__)(struct MsencContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__msencctxControlLookup__)(struct MsencContext *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__msencctxGetSwMethods__)(struct MsencContext *, METHOD **, NvU32 *); + NvHandle (*__msencctxGetInternalObjectHandle__)(struct MsencContext *); + NV_STATUS (*__msencctxControl__)(struct MsencContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__msencctxUnmap__)(struct MsencContext *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__msencctxGetMemInterMapParams__)(struct MsencContext *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__msencctxGetMemoryMappingDescriptor__)(struct MsencContext *, struct MEMORY_DESCRIPTOR **); + NvBool (*__msencctxIsSwMethodStalling__)(struct MsencContext *, NvU32); + NV_STATUS (*__msencctxControlFilter__)(struct MsencContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__msencctxUnregisterEvent__)(struct MsencContext *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__msencctxCanCopy__)(struct MsencContext *); + void (*__msencctxPreDestruct__)(struct MsencContext *); + PEVENTNOTIFICATION *(*__msencctxGetNotificationListPtr__)(struct MsencContext *); + struct NotifShare *(*__msencctxGetNotificationShare__)(struct MsencContext *); + NV_STATUS (*__msencctxMap__)(struct MsencContext *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__msencctxGetOrAllocNotifShare__)(struct MsencContext *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_MsencContext_TYPEDEF__ +#define __NVOC_CLASS_MsencContext_TYPEDEF__ +typedef struct MsencContext MsencContext; +#endif /* __NVOC_CLASS_MsencContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MsencContext +#define __nvoc_class_id_MsencContext 0x88c92a +#endif /* __nvoc_class_id_MsencContext */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MsencContext; + +#define __staticCast_MsencContext(pThis) \ + ((pThis)->__nvoc_pbase_MsencContext) + +#ifdef __nvoc_kernel_nvenc_ctx_h_disabled +#define __dynamicCast_MsencContext(pThis) ((MsencContext*)NULL) +#else //__nvoc_kernel_nvenc_ctx_h_disabled +#define __dynamicCast_MsencContext(pThis) \ + ((MsencContext*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MsencContext))) +#endif //__nvoc_kernel_nvenc_ctx_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_MsencContext(MsencContext**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MsencContext(MsencContext**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_MsencContext(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_MsencContext((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define msencctxCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) msencctxCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define msencctxShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) msencctxShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define msencctxAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) msencctxAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define msencctxMapTo(pResource, pParams) msencctxMapTo_DISPATCH(pResource, pParams) +#define msencctxGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) msencctxGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define msencctxSetNotificationShare(pNotifier, pNotifShare) msencctxSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define msencctxGetRefCount(pResource) msencctxGetRefCount_DISPATCH(pResource) +#define msencctxAddAdditionalDependants(pClient, pResource, pReference) msencctxAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define msencctxControl_Prologue(pResource, pCallContext, pParams) msencctxControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define msencctxGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) msencctxGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define msencctxInternalControlForward(pGpuResource, command, pParams, size) msencctxInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define msencctxUnmapFrom(pResource, pParams) msencctxUnmapFrom_DISPATCH(pResource, pParams) +#define msencctxControl_Epilogue(pResource, pCallContext, pParams) msencctxControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define msencctxControlLookup(pResource, pParams, ppEntry) msencctxControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define msencctxGetSwMethods(pChannelDescendant, ppMethods, pNumMethods) msencctxGetSwMethods_DISPATCH(pChannelDescendant, ppMethods, pNumMethods) +#define msencctxGetInternalObjectHandle(pGpuResource) msencctxGetInternalObjectHandle_DISPATCH(pGpuResource) +#define msencctxControl(pGpuResource, pCallContext, pParams) msencctxControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define msencctxUnmap(pGpuResource, pCallContext, pCpuMapping) msencctxUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define msencctxGetMemInterMapParams(pRmResource, pParams) msencctxGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define msencctxGetMemoryMappingDescriptor(pRmResource, ppMemDesc) msencctxGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define msencctxIsSwMethodStalling(pChannelDescendant, hHandle) msencctxIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define msencctxControlFilter(pResource, pCallContext, pParams) msencctxControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define msencctxUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) msencctxUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define msencctxCanCopy(pResource) msencctxCanCopy_DISPATCH(pResource) +#define msencctxPreDestruct(pResource) msencctxPreDestruct_DISPATCH(pResource) +#define msencctxGetNotificationListPtr(pNotifier) msencctxGetNotificationListPtr_DISPATCH(pNotifier) +#define msencctxGetNotificationShare(pNotifier) msencctxGetNotificationShare_DISPATCH(pNotifier) +#define msencctxMap(pGpuResource, pCallContext, pParams, pCpuMapping) msencctxMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define msencctxGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) msencctxGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS msencctxConstructHal_KERNEL(struct MsencContext *pMsencContext, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_kernel_nvenc_ctx_h_disabled +static inline NV_STATUS msencctxConstructHal(struct MsencContext *pMsencContext, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("MsencContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvenc_ctx_h_disabled +#define msencctxConstructHal(pMsencContext, pCallContext, pParams) msencctxConstructHal_KERNEL(pMsencContext, pCallContext, pParams) +#endif //__nvoc_kernel_nvenc_ctx_h_disabled + +#define msencctxConstructHal_HAL(pMsencContext, pCallContext, pParams) msencctxConstructHal(pMsencContext, pCallContext, pParams) + +void msencctxDestructHal_KERNEL(struct MsencContext *pMsencContext); + +#ifdef __nvoc_kernel_nvenc_ctx_h_disabled +static inline void msencctxDestructHal(struct MsencContext *pMsencContext) { + NV_ASSERT_FAILED_PRECOMP("MsencContext was disabled!"); +} +#else //__nvoc_kernel_nvenc_ctx_h_disabled +#define msencctxDestructHal(pMsencContext) msencctxDestructHal_KERNEL(pMsencContext) +#endif //__nvoc_kernel_nvenc_ctx_h_disabled + +#define msencctxDestructHal_HAL(pMsencContext) msencctxDestructHal(pMsencContext) + +static inline NV_STATUS msencctxCheckMemInterUnmap_DISPATCH(struct MsencContext *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__msencctxCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool msencctxShareCallback_DISPATCH(struct MsencContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__msencctxShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool msencctxAccessCallback_DISPATCH(struct MsencContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__msencctxAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS msencctxMapTo_DISPATCH(struct MsencContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__msencctxMapTo__(pResource, pParams); +} + +static inline NV_STATUS msencctxGetMapAddrSpace_DISPATCH(struct MsencContext *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__msencctxGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void msencctxSetNotificationShare_DISPATCH(struct MsencContext *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__msencctxSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 msencctxGetRefCount_DISPATCH(struct MsencContext *pResource) { + return pResource->__msencctxGetRefCount__(pResource); +} + +static inline void msencctxAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct MsencContext *pResource, RsResourceRef *pReference) { + pResource->__msencctxAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS msencctxControl_Prologue_DISPATCH(struct MsencContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__msencctxControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS msencctxGetRegBaseOffsetAndSize_DISPATCH(struct MsencContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__msencctxGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS msencctxInternalControlForward_DISPATCH(struct MsencContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__msencctxInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS msencctxUnmapFrom_DISPATCH(struct MsencContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__msencctxUnmapFrom__(pResource, pParams); +} + +static inline void msencctxControl_Epilogue_DISPATCH(struct MsencContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__msencctxControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS msencctxControlLookup_DISPATCH(struct MsencContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__msencctxControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS msencctxGetSwMethods_DISPATCH(struct MsencContext *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return pChannelDescendant->__msencctxGetSwMethods__(pChannelDescendant, ppMethods, pNumMethods); +} + +static inline NvHandle msencctxGetInternalObjectHandle_DISPATCH(struct MsencContext *pGpuResource) { + return pGpuResource->__msencctxGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS msencctxControl_DISPATCH(struct MsencContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__msencctxControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS msencctxUnmap_DISPATCH(struct MsencContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__msencctxUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS msencctxGetMemInterMapParams_DISPATCH(struct MsencContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__msencctxGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS msencctxGetMemoryMappingDescriptor_DISPATCH(struct MsencContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__msencctxGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvBool msencctxIsSwMethodStalling_DISPATCH(struct MsencContext *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__msencctxIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +static inline NV_STATUS msencctxControlFilter_DISPATCH(struct MsencContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__msencctxControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS msencctxUnregisterEvent_DISPATCH(struct MsencContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__msencctxUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool msencctxCanCopy_DISPATCH(struct MsencContext *pResource) { + return pResource->__msencctxCanCopy__(pResource); +} + +static inline void msencctxPreDestruct_DISPATCH(struct MsencContext *pResource) { + pResource->__msencctxPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *msencctxGetNotificationListPtr_DISPATCH(struct MsencContext *pNotifier) { + return pNotifier->__msencctxGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *msencctxGetNotificationShare_DISPATCH(struct MsencContext *pNotifier) { + return pNotifier->__msencctxGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS msencctxMap_DISPATCH(struct MsencContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__msencctxMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS msencctxGetOrAllocNotifShare_DISPATCH(struct MsencContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__msencctxGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS __nvoc_msencctxConstruct(struct MsencContext *arg_pMsencContext, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams) { + return msencctxConstructHal(arg_pMsencContext, arg_pCallContext, arg_pParams); +} + +static inline void __nvoc_msencctxDestruct(struct MsencContext *pMsencContext) { + msencctxDestructHal(pMsencContext); +} + +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_NVENC_CTX_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_nvjpg_ctx_nvoc.c b/src/nvidia/generated/g_kernel_nvjpg_ctx_nvoc.c new file mode 100644 index 000000000..b1a2e1607 --- /dev/null +++ b/src/nvidia/generated/g_kernel_nvjpg_ctx_nvoc.c @@ -0,0 +1,415 @@ +#define NVOC_KERNEL_NVJPG_CTX_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_nvjpg_ctx_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x08c1ce = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvjpgContext; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_NvjpgContext(NvjpgContext*, RmHalspecOwner* ); +void __nvoc_init_funcTable_NvjpgContext(NvjpgContext*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_NvjpgContext(NvjpgContext*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_NvjpgContext(NvjpgContext*, RmHalspecOwner* ); +void __nvoc_dtor_NvjpgContext(NvjpgContext*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NvjpgContext; + +static const struct NVOC_RTTI __nvoc_rtti_NvjpgContext_NvjpgContext = { + /*pClassDef=*/ &__nvoc_class_def_NvjpgContext, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NvjpgContext, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvjpgContext_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvjpgContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvjpgContext_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvjpgContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvjpgContext_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvjpgContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvjpgContext_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvjpgContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvjpgContext_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvjpgContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvjpgContext_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvjpgContext, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvjpgContext_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvjpgContext, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvjpgContext_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvjpgContext, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_NvjpgContext = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_NvjpgContext_NvjpgContext, + &__nvoc_rtti_NvjpgContext_ChannelDescendant, + &__nvoc_rtti_NvjpgContext_Notifier, + &__nvoc_rtti_NvjpgContext_INotifier, + &__nvoc_rtti_NvjpgContext_GpuResource, + &__nvoc_rtti_NvjpgContext_RmResource, + &__nvoc_rtti_NvjpgContext_RmResourceCommon, + &__nvoc_rtti_NvjpgContext_RsResource, + &__nvoc_rtti_NvjpgContext_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_NvjpgContext = +{ + /*classInfo=*/ { + /*size=*/ sizeof(NvjpgContext), + /*classId=*/ classId(NvjpgContext), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "NvjpgContext", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NvjpgContext, + /*pCastInfo=*/ &__nvoc_castinfo_NvjpgContext, + /*pExportInfo=*/ &__nvoc_export_info_NvjpgContext +}; + +static NV_STATUS __nvoc_thunk_ChannelDescendant_nvjpgctxCheckMemInterUnmap(struct NvjpgContext *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_NvjpgContext_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_nvjpgctxShareCallback(struct NvjpgContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvjpgContext_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_nvjpgctxAccessCallback(struct NvjpgContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvjpgctxMapTo(struct NvjpgContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvjpgctxGetMapAddrSpace(struct NvjpgContext *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvjpgContext_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_nvjpgctxSetNotificationShare(struct NvjpgContext *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvjpgContext_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_nvjpgctxGetRefCount(struct NvjpgContext *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_nvjpgctxAddAdditionalDependants(struct RsClient *pClient, struct NvjpgContext *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvjpgctxControl_Prologue(struct NvjpgContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvjpgctxGetRegBaseOffsetAndSize(struct NvjpgContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvjpgContext_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvjpgctxInternalControlForward(struct NvjpgContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvjpgContext_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvjpgctxUnmapFrom(struct NvjpgContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_nvjpgctxControl_Epilogue(struct NvjpgContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvjpgctxControlLookup(struct NvjpgContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_nvjpgctxGetSwMethods(struct NvjpgContext *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return chandesGetSwMethods((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_NvjpgContext_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NvHandle __nvoc_thunk_GpuResource_nvjpgctxGetInternalObjectHandle(struct NvjpgContext *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvjpgContext_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvjpgctxControl(struct NvjpgContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvjpgContext_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvjpgctxUnmap(struct NvjpgContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvjpgContext_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvjpgctxGetMemInterMapParams(struct NvjpgContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_NvjpgContext_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_nvjpgctxGetMemoryMappingDescriptor(struct NvjpgContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_NvjpgContext_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_ChannelDescendant_nvjpgctxIsSwMethodStalling(struct NvjpgContext *pChannelDescendant, NvU32 hHandle) { + return chandesIsSwMethodStalling((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_NvjpgContext_ChannelDescendant.offset), hHandle); +} + +static NV_STATUS __nvoc_thunk_RsResource_nvjpgctxControlFilter(struct NvjpgContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_nvjpgctxUnregisterEvent(struct NvjpgContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvjpgContext_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_nvjpgctxCanCopy(struct NvjpgContext *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_nvjpgctxPreDestruct(struct NvjpgContext *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NvjpgContext_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_nvjpgctxGetNotificationListPtr(struct NvjpgContext *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvjpgContext_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_nvjpgctxGetNotificationShare(struct NvjpgContext *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvjpgContext_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_nvjpgctxMap(struct NvjpgContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_NvjpgContext_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_nvjpgctxGetOrAllocNotifShare(struct NvjpgContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_NvjpgContext_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_NvjpgContext = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_NvjpgContext(NvjpgContext *pThis) { + __nvoc_nvjpgctxDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_NvjpgContext(NvjpgContext *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, ENGDESCRIPTOR (*)(struct OBJGPU *, NvU32, void *)); +NV_STATUS __nvoc_ctor_NvjpgContext(NvjpgContext *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, nvjpgGetEngineDescFromAllocParams); + if (status != NV_OK) goto __nvoc_ctor_NvjpgContext_fail_ChannelDescendant; + __nvoc_init_dataField_NvjpgContext(pThis, pRmhalspecowner); + + status = __nvoc_nvjpgctxConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_NvjpgContext_fail__init; + goto __nvoc_ctor_NvjpgContext_exit; // Success + +__nvoc_ctor_NvjpgContext_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_NvjpgContext_fail_ChannelDescendant: +__nvoc_ctor_NvjpgContext_exit: + + return status; +} + +static void __nvoc_init_funcTable_NvjpgContext_1(NvjpgContext *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__nvjpgctxCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_nvjpgctxCheckMemInterUnmap; + + pThis->__nvjpgctxShareCallback__ = &__nvoc_thunk_GpuResource_nvjpgctxShareCallback; + + pThis->__nvjpgctxAccessCallback__ = &__nvoc_thunk_RmResource_nvjpgctxAccessCallback; + + pThis->__nvjpgctxMapTo__ = &__nvoc_thunk_RsResource_nvjpgctxMapTo; + + pThis->__nvjpgctxGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_nvjpgctxGetMapAddrSpace; + + pThis->__nvjpgctxSetNotificationShare__ = &__nvoc_thunk_Notifier_nvjpgctxSetNotificationShare; + + pThis->__nvjpgctxGetRefCount__ = &__nvoc_thunk_RsResource_nvjpgctxGetRefCount; + + pThis->__nvjpgctxAddAdditionalDependants__ = &__nvoc_thunk_RsResource_nvjpgctxAddAdditionalDependants; + + pThis->__nvjpgctxControl_Prologue__ = &__nvoc_thunk_RmResource_nvjpgctxControl_Prologue; + + pThis->__nvjpgctxGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_nvjpgctxGetRegBaseOffsetAndSize; + + pThis->__nvjpgctxInternalControlForward__ = &__nvoc_thunk_GpuResource_nvjpgctxInternalControlForward; + + pThis->__nvjpgctxUnmapFrom__ = &__nvoc_thunk_RsResource_nvjpgctxUnmapFrom; + + pThis->__nvjpgctxControl_Epilogue__ = &__nvoc_thunk_RmResource_nvjpgctxControl_Epilogue; + + pThis->__nvjpgctxControlLookup__ = &__nvoc_thunk_RsResource_nvjpgctxControlLookup; + + pThis->__nvjpgctxGetSwMethods__ = &__nvoc_thunk_ChannelDescendant_nvjpgctxGetSwMethods; + + pThis->__nvjpgctxGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_nvjpgctxGetInternalObjectHandle; + + pThis->__nvjpgctxControl__ = &__nvoc_thunk_GpuResource_nvjpgctxControl; + + pThis->__nvjpgctxUnmap__ = &__nvoc_thunk_GpuResource_nvjpgctxUnmap; + + pThis->__nvjpgctxGetMemInterMapParams__ = &__nvoc_thunk_RmResource_nvjpgctxGetMemInterMapParams; + + pThis->__nvjpgctxGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_nvjpgctxGetMemoryMappingDescriptor; + + pThis->__nvjpgctxIsSwMethodStalling__ = &__nvoc_thunk_ChannelDescendant_nvjpgctxIsSwMethodStalling; + + pThis->__nvjpgctxControlFilter__ = &__nvoc_thunk_RsResource_nvjpgctxControlFilter; + + pThis->__nvjpgctxUnregisterEvent__ = &__nvoc_thunk_Notifier_nvjpgctxUnregisterEvent; + + pThis->__nvjpgctxCanCopy__ = &__nvoc_thunk_RsResource_nvjpgctxCanCopy; + + pThis->__nvjpgctxPreDestruct__ = &__nvoc_thunk_RsResource_nvjpgctxPreDestruct; + + pThis->__nvjpgctxGetNotificationListPtr__ = &__nvoc_thunk_Notifier_nvjpgctxGetNotificationListPtr; + + pThis->__nvjpgctxGetNotificationShare__ = &__nvoc_thunk_Notifier_nvjpgctxGetNotificationShare; + + pThis->__nvjpgctxMap__ = &__nvoc_thunk_GpuResource_nvjpgctxMap; + + pThis->__nvjpgctxGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_nvjpgctxGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_NvjpgContext(NvjpgContext *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_NvjpgContext_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_NvjpgContext(NvjpgContext *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_NvjpgContext = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_NvjpgContext(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_NvjpgContext(NvjpgContext **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + NvjpgContext *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(NvjpgContext)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(NvjpgContext)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_NvjpgContext); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_NvjpgContext(pThis, pRmhalspecowner); + status = __nvoc_ctor_NvjpgContext(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_NvjpgContext_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_NvjpgContext_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_NvjpgContext(NvjpgContext **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_NvjpgContext(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_nvjpg_ctx_nvoc.h b/src/nvidia/generated/g_kernel_nvjpg_ctx_nvoc.h new file mode 100644 index 000000000..4560a4cbb --- /dev/null +++ b/src/nvidia/generated/g_kernel_nvjpg_ctx_nvoc.h @@ -0,0 +1,306 @@ +#ifndef _G_KERNEL_NVJPG_CTX_NVOC_H_ +#define _G_KERNEL_NVJPG_CTX_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_nvjpg_ctx_nvoc.h" + +#ifndef KERNEL_NVJPG_CTX_H +#define KERNEL_NVJPG_CTX_H + +#include "kernel/gpu/fifo/channel_descendant.h" + +ENGDESCRIPTOR nvjpgGetEngineDescFromAllocParams(OBJGPU *pGpu, NvU32 externalClassId, void *pAllocParams); + +/*! + * RM internal class representing NVXXXX_VIDEO_NVJPG + */ +#ifdef NVOC_KERNEL_NVJPG_CTX_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct NvjpgContext { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct NvjpgContext *__nvoc_pbase_NvjpgContext; + NV_STATUS (*__nvjpgctxCheckMemInterUnmap__)(struct NvjpgContext *, NvBool); + NvBool (*__nvjpgctxShareCallback__)(struct NvjpgContext *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__nvjpgctxAccessCallback__)(struct NvjpgContext *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__nvjpgctxMapTo__)(struct NvjpgContext *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__nvjpgctxGetMapAddrSpace__)(struct NvjpgContext *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__nvjpgctxSetNotificationShare__)(struct NvjpgContext *, struct NotifShare *); + NvU32 (*__nvjpgctxGetRefCount__)(struct NvjpgContext *); + void (*__nvjpgctxAddAdditionalDependants__)(struct RsClient *, struct NvjpgContext *, RsResourceRef *); + NV_STATUS (*__nvjpgctxControl_Prologue__)(struct NvjpgContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__nvjpgctxGetRegBaseOffsetAndSize__)(struct NvjpgContext *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__nvjpgctxInternalControlForward__)(struct NvjpgContext *, NvU32, void *, NvU32); + NV_STATUS (*__nvjpgctxUnmapFrom__)(struct NvjpgContext *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__nvjpgctxControl_Epilogue__)(struct NvjpgContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__nvjpgctxControlLookup__)(struct NvjpgContext *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__nvjpgctxGetSwMethods__)(struct NvjpgContext *, METHOD **, NvU32 *); + NvHandle (*__nvjpgctxGetInternalObjectHandle__)(struct NvjpgContext *); + NV_STATUS (*__nvjpgctxControl__)(struct NvjpgContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__nvjpgctxUnmap__)(struct NvjpgContext *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__nvjpgctxGetMemInterMapParams__)(struct NvjpgContext *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__nvjpgctxGetMemoryMappingDescriptor__)(struct NvjpgContext *, struct MEMORY_DESCRIPTOR **); + NvBool (*__nvjpgctxIsSwMethodStalling__)(struct NvjpgContext *, NvU32); + NV_STATUS (*__nvjpgctxControlFilter__)(struct NvjpgContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__nvjpgctxUnregisterEvent__)(struct NvjpgContext *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__nvjpgctxCanCopy__)(struct NvjpgContext *); + void (*__nvjpgctxPreDestruct__)(struct NvjpgContext *); + PEVENTNOTIFICATION *(*__nvjpgctxGetNotificationListPtr__)(struct NvjpgContext *); + struct NotifShare *(*__nvjpgctxGetNotificationShare__)(struct NvjpgContext *); + NV_STATUS (*__nvjpgctxMap__)(struct NvjpgContext *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__nvjpgctxGetOrAllocNotifShare__)(struct NvjpgContext *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_NvjpgContext_TYPEDEF__ +#define __NVOC_CLASS_NvjpgContext_TYPEDEF__ +typedef struct NvjpgContext NvjpgContext; +#endif /* __NVOC_CLASS_NvjpgContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvjpgContext +#define __nvoc_class_id_NvjpgContext 0x08c1ce +#endif /* __nvoc_class_id_NvjpgContext */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvjpgContext; + +#define __staticCast_NvjpgContext(pThis) \ + ((pThis)->__nvoc_pbase_NvjpgContext) + +#ifdef __nvoc_kernel_nvjpg_ctx_h_disabled +#define __dynamicCast_NvjpgContext(pThis) ((NvjpgContext*)NULL) +#else //__nvoc_kernel_nvjpg_ctx_h_disabled +#define __dynamicCast_NvjpgContext(pThis) \ + ((NvjpgContext*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NvjpgContext))) +#endif //__nvoc_kernel_nvjpg_ctx_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_NvjpgContext(NvjpgContext**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NvjpgContext(NvjpgContext**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_NvjpgContext(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_NvjpgContext((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define nvjpgctxCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) nvjpgctxCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define nvjpgctxShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) nvjpgctxShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define nvjpgctxAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) nvjpgctxAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define nvjpgctxMapTo(pResource, pParams) nvjpgctxMapTo_DISPATCH(pResource, pParams) +#define nvjpgctxGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) nvjpgctxGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define nvjpgctxSetNotificationShare(pNotifier, pNotifShare) nvjpgctxSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define nvjpgctxGetRefCount(pResource) nvjpgctxGetRefCount_DISPATCH(pResource) +#define nvjpgctxAddAdditionalDependants(pClient, pResource, pReference) nvjpgctxAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define nvjpgctxControl_Prologue(pResource, pCallContext, pParams) nvjpgctxControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define nvjpgctxGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) nvjpgctxGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define nvjpgctxInternalControlForward(pGpuResource, command, pParams, size) nvjpgctxInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define nvjpgctxUnmapFrom(pResource, pParams) nvjpgctxUnmapFrom_DISPATCH(pResource, pParams) +#define nvjpgctxControl_Epilogue(pResource, pCallContext, pParams) nvjpgctxControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define nvjpgctxControlLookup(pResource, pParams, ppEntry) nvjpgctxControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define nvjpgctxGetSwMethods(pChannelDescendant, ppMethods, pNumMethods) nvjpgctxGetSwMethods_DISPATCH(pChannelDescendant, ppMethods, pNumMethods) +#define nvjpgctxGetInternalObjectHandle(pGpuResource) nvjpgctxGetInternalObjectHandle_DISPATCH(pGpuResource) +#define nvjpgctxControl(pGpuResource, pCallContext, pParams) nvjpgctxControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define nvjpgctxUnmap(pGpuResource, pCallContext, pCpuMapping) nvjpgctxUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define nvjpgctxGetMemInterMapParams(pRmResource, pParams) nvjpgctxGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define nvjpgctxGetMemoryMappingDescriptor(pRmResource, ppMemDesc) nvjpgctxGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define nvjpgctxIsSwMethodStalling(pChannelDescendant, hHandle) nvjpgctxIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define nvjpgctxControlFilter(pResource, pCallContext, pParams) nvjpgctxControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define nvjpgctxUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) nvjpgctxUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define nvjpgctxCanCopy(pResource) nvjpgctxCanCopy_DISPATCH(pResource) +#define nvjpgctxPreDestruct(pResource) nvjpgctxPreDestruct_DISPATCH(pResource) +#define nvjpgctxGetNotificationListPtr(pNotifier) nvjpgctxGetNotificationListPtr_DISPATCH(pNotifier) +#define nvjpgctxGetNotificationShare(pNotifier) nvjpgctxGetNotificationShare_DISPATCH(pNotifier) +#define nvjpgctxMap(pGpuResource, pCallContext, pParams, pCpuMapping) nvjpgctxMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define nvjpgctxGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) nvjpgctxGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS nvjpgctxConstructHal_KERNEL(struct NvjpgContext *pNvjpgContext, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_kernel_nvjpg_ctx_h_disabled +static inline NV_STATUS nvjpgctxConstructHal(struct NvjpgContext *pNvjpgContext, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("NvjpgContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvjpg_ctx_h_disabled +#define nvjpgctxConstructHal(pNvjpgContext, pCallContext, pParams) nvjpgctxConstructHal_KERNEL(pNvjpgContext, pCallContext, pParams) +#endif //__nvoc_kernel_nvjpg_ctx_h_disabled + +#define nvjpgctxConstructHal_HAL(pNvjpgContext, pCallContext, pParams) nvjpgctxConstructHal(pNvjpgContext, pCallContext, pParams) + +void nvjpgctxDestructHal_KERNEL(struct NvjpgContext *pNvjpgContext); + +#ifdef __nvoc_kernel_nvjpg_ctx_h_disabled +static inline void nvjpgctxDestructHal(struct NvjpgContext *pNvjpgContext) { + NV_ASSERT_FAILED_PRECOMP("NvjpgContext was disabled!"); +} +#else //__nvoc_kernel_nvjpg_ctx_h_disabled +#define nvjpgctxDestructHal(pNvjpgContext) nvjpgctxDestructHal_KERNEL(pNvjpgContext) +#endif //__nvoc_kernel_nvjpg_ctx_h_disabled + +#define nvjpgctxDestructHal_HAL(pNvjpgContext) nvjpgctxDestructHal(pNvjpgContext) + +static inline NV_STATUS nvjpgctxCheckMemInterUnmap_DISPATCH(struct NvjpgContext *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__nvjpgctxCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool nvjpgctxShareCallback_DISPATCH(struct NvjpgContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__nvjpgctxShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool nvjpgctxAccessCallback_DISPATCH(struct NvjpgContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nvjpgctxAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS nvjpgctxMapTo_DISPATCH(struct NvjpgContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nvjpgctxMapTo__(pResource, pParams); +} + +static inline NV_STATUS nvjpgctxGetMapAddrSpace_DISPATCH(struct NvjpgContext *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__nvjpgctxGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void nvjpgctxSetNotificationShare_DISPATCH(struct NvjpgContext *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__nvjpgctxSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 nvjpgctxGetRefCount_DISPATCH(struct NvjpgContext *pResource) { + return pResource->__nvjpgctxGetRefCount__(pResource); +} + +static inline void nvjpgctxAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct NvjpgContext *pResource, RsResourceRef *pReference) { + pResource->__nvjpgctxAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS nvjpgctxControl_Prologue_DISPATCH(struct NvjpgContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvjpgctxControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS nvjpgctxGetRegBaseOffsetAndSize_DISPATCH(struct NvjpgContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__nvjpgctxGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS nvjpgctxInternalControlForward_DISPATCH(struct NvjpgContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__nvjpgctxInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS nvjpgctxUnmapFrom_DISPATCH(struct NvjpgContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nvjpgctxUnmapFrom__(pResource, pParams); +} + +static inline void nvjpgctxControl_Epilogue_DISPATCH(struct NvjpgContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nvjpgctxControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS nvjpgctxControlLookup_DISPATCH(struct NvjpgContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__nvjpgctxControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS nvjpgctxGetSwMethods_DISPATCH(struct NvjpgContext *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return pChannelDescendant->__nvjpgctxGetSwMethods__(pChannelDescendant, ppMethods, pNumMethods); +} + +static inline NvHandle nvjpgctxGetInternalObjectHandle_DISPATCH(struct NvjpgContext *pGpuResource) { + return pGpuResource->__nvjpgctxGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS nvjpgctxControl_DISPATCH(struct NvjpgContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__nvjpgctxControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS nvjpgctxUnmap_DISPATCH(struct NvjpgContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvjpgctxUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS nvjpgctxGetMemInterMapParams_DISPATCH(struct NvjpgContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__nvjpgctxGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS nvjpgctxGetMemoryMappingDescriptor_DISPATCH(struct NvjpgContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__nvjpgctxGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvBool nvjpgctxIsSwMethodStalling_DISPATCH(struct NvjpgContext *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__nvjpgctxIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +static inline NV_STATUS nvjpgctxControlFilter_DISPATCH(struct NvjpgContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nvjpgctxControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS nvjpgctxUnregisterEvent_DISPATCH(struct NvjpgContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__nvjpgctxUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool nvjpgctxCanCopy_DISPATCH(struct NvjpgContext *pResource) { + return pResource->__nvjpgctxCanCopy__(pResource); +} + +static inline void nvjpgctxPreDestruct_DISPATCH(struct NvjpgContext *pResource) { + pResource->__nvjpgctxPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *nvjpgctxGetNotificationListPtr_DISPATCH(struct NvjpgContext *pNotifier) { + return pNotifier->__nvjpgctxGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *nvjpgctxGetNotificationShare_DISPATCH(struct NvjpgContext *pNotifier) { + return pNotifier->__nvjpgctxGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS nvjpgctxMap_DISPATCH(struct NvjpgContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__nvjpgctxMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS nvjpgctxGetOrAllocNotifShare_DISPATCH(struct NvjpgContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__nvjpgctxGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS __nvoc_nvjpgctxConstruct(struct NvjpgContext *arg_pNvjpgContext, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams) { + return nvjpgctxConstructHal(arg_pNvjpgContext, arg_pCallContext, arg_pParams); +} + +static inline void __nvoc_nvjpgctxDestruct(struct NvjpgContext *pNvjpgContext) { + nvjpgctxDestructHal(pNvjpgContext); +} + +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_NVJPG_CTX_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_nvlink_nvoc.c b/src/nvidia/generated/g_kernel_nvlink_nvoc.c new file mode 100644 index 000000000..35a8261e6 --- /dev/null +++ b/src/nvidia/generated/g_kernel_nvlink_nvoc.c @@ -0,0 +1,665 @@ +#define NVOC_KERNEL_NVLINK_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_nvlink_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xce6818 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelNvlink; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelNvlink(KernelNvlink*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelNvlink(KernelNvlink*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelNvlink(KernelNvlink*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelNvlink(KernelNvlink*, RmHalspecOwner* ); +void __nvoc_dtor_KernelNvlink(KernelNvlink*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelNvlink; + +static const struct NVOC_RTTI __nvoc_rtti_KernelNvlink_KernelNvlink = { + /*pClassDef=*/ &__nvoc_class_def_KernelNvlink, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelNvlink, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelNvlink_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelNvlink, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelNvlink_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelNvlink, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelNvlink = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelNvlink_KernelNvlink, + &__nvoc_rtti_KernelNvlink_OBJENGSTATE, + &__nvoc_rtti_KernelNvlink_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelNvlink = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelNvlink), + /*classId=*/ classId(KernelNvlink), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelNvlink", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelNvlink, + /*pCastInfo=*/ &__nvoc_castinfo_KernelNvlink, + /*pExportInfo=*/ &__nvoc_export_info_KernelNvlink +}; + +static NV_STATUS __nvoc_thunk_KernelNvlink_engstateConstructEngine(OBJGPU *arg0, struct OBJENGSTATE *arg1, ENGDESCRIPTOR arg2) { + return knvlinkConstructEngine(arg0, (struct KernelNvlink *)(((unsigned char *)arg1) - __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_KernelNvlink_engstateStatePreInitLocked(OBJGPU *arg0, struct OBJENGSTATE *arg1) { + return knvlinkStatePreInitLocked(arg0, (struct KernelNvlink *)(((unsigned char *)arg1) - __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_KernelNvlink_engstateStateLoad(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) { + return knvlinkStateLoad(arg0, (struct KernelNvlink *)(((unsigned char *)arg1) - __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_KernelNvlink_engstateStatePostLoad(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) { + return knvlinkStatePostLoad(arg0, (struct KernelNvlink *)(((unsigned char *)arg1) - __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_KernelNvlink_engstateStateUnload(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) { + return knvlinkStateUnload(arg0, (struct KernelNvlink *)(((unsigned char *)arg1) - __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), arg2); +} + +static NV_STATUS __nvoc_thunk_KernelNvlink_engstateStatePostUnload(OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) { + return knvlinkStatePostUnload(arg0, (struct KernelNvlink *)(((unsigned char *)arg1) - __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), arg2); +} + +static NvBool __nvoc_thunk_KernelNvlink_engstateIsPresent(OBJGPU *arg0, struct OBJENGSTATE *arg1) { + return knvlinkIsPresent(arg0, (struct KernelNvlink *)(((unsigned char *)arg1) - __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvlinkReconcileTunableState(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvlinkStateInitLocked(POBJGPU pGpu, struct KernelNvlink *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvlinkStatePreLoad(POBJGPU pGpu, struct KernelNvlink *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_knvlinkStateDestroy(POBJGPU pGpu, struct KernelNvlink *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvlinkStatePreUnload(POBJGPU pGpu, struct KernelNvlink *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvlinkStateInitUnlocked(POBJGPU pGpu, struct KernelNvlink *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_knvlinkInitMissing(POBJGPU pGpu, struct KernelNvlink *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvlinkStatePreInitUnlocked(POBJGPU pGpu, struct KernelNvlink *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvlinkGetTunableState(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvlinkCompareTunableState(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_knvlinkFreeTunableState(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvlinkAllocTunableState(POBJGPU pGpu, struct KernelNvlink *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_knvlinkSetTunableState(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelNvlink_OBJENGSTATE.offset), pTunableState); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelNvlink = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelNvlink(KernelNvlink *pThis) { + __nvoc_knvlinkDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelNvlink(KernelNvlink *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_IS_MISSING + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_IS_MISSING, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_ENABLED + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_ENABLED, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_ENABLED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_UNSET_NVLINK_PEER_SUPPORTED + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_UNSET_NVLINK_PEER_SUPPORTED, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_UNSET_NVLINK_PEER_SUPPORTED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_DECONFIG_HSHUB_ON_NO_MAPPING + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_DECONFIG_HSHUB_ON_NO_MAPPING, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_DECONFIG_HSHUB_ON_NO_MAPPING, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_LINKRESET_AFTER_SHUTDOWN + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_LINKRESET_AFTER_SHUTDOWN, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_LINKRESET_AFTER_SHUTDOWN, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_BUG2274645_RESET_FOR_RTD3_FGC6 + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000060UL) )) /* ChipHal: TU102 | TU104 */ + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_BUG2274645_RESET_FOR_RTD3_FGC6, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_BUG2274645_RESET_FOR_RTD3_FGC6, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_L2_POWER_STATE_FOR_LONG_IDLE + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000060UL) )) /* ChipHal: TU102 | TU104 */ + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_L2_POWER_STATE_FOR_LONG_IDLE, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_L2_POWER_STATE_FOR_LONG_IDLE, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_WAR_BUG_3471679_PEERID_FILTERING + pThis->setProperty(pThis, PDB_PROP_KNVLINK_WAR_BUG_3471679_PEERID_FILTERING, ((NvBool)(0 != 0))); + + // NVOC Property Hal field -- PDB_PROP_KNVLINK_SYSMEM_SUPPORT_ENABLED + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_SYSMEM_SUPPORT_ENABLED, ((NvBool)(0 != 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_KNVLINK_SYSMEM_SUPPORT_ENABLED, ((NvBool)(0 == 0))); + } + + pThis->fabricBaseAddr = (+18446744073709551615ULL); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelNvlink(KernelNvlink *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelNvlink_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelNvlink(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelNvlink_exit; // Success + +__nvoc_ctor_KernelNvlink_fail_OBJENGSTATE: +__nvoc_ctor_KernelNvlink_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelNvlink_1(KernelNvlink *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__knvlinkConstructEngine__ = &knvlinkConstructEngine_IMPL; + + pThis->__knvlinkStatePreInitLocked__ = &knvlinkStatePreInitLocked_IMPL; + + pThis->__knvlinkStateLoad__ = &knvlinkStateLoad_IMPL; + + pThis->__knvlinkStatePostLoad__ = &knvlinkStatePostLoad_IMPL; + + pThis->__knvlinkStateUnload__ = &knvlinkStateUnload_IMPL; + + pThis->__knvlinkStatePostUnload__ = &knvlinkStatePostUnload_IMPL; + + pThis->__knvlinkIsPresent__ = &knvlinkIsPresent_IMPL; + + // Hal function -- knvlinkValidateFabricBaseAddress + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkValidateFabricBaseAddress__ = &knvlinkValidateFabricBaseAddress_GA100; + } + // default + else + { + pThis->__knvlinkValidateFabricBaseAddress__ = &knvlinkValidateFabricBaseAddress_46f6a7; + } + } + + // Hal function -- knvlinkGetConnectedLinksMask + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkGetConnectedLinksMask__ = &knvlinkGetConnectedLinksMask_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__knvlinkGetConnectedLinksMask__ = &knvlinkGetConnectedLinksMask_15a734; + } + } + + // Hal function -- knvlinkEnableLinksPostTopology + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000060UL) )) /* ChipHal: TU102 | TU104 */ + { + pThis->__knvlinkEnableLinksPostTopology__ = &knvlinkEnableLinksPostTopology_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ff80UL) )) /* ChipHal: TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkEnableLinksPostTopology__ = &knvlinkEnableLinksPostTopology_56cd7a; + } + } + + // Hal function -- knvlinkOverrideConfig + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000060UL) )) /* ChipHal: TU102 | TU104 */ + { + pThis->__knvlinkOverrideConfig__ = &knvlinkOverrideConfig_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkOverrideConfig__ = &knvlinkOverrideConfig_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__knvlinkOverrideConfig__ = &knvlinkOverrideConfig_56cd7a; + } + } + + // Hal function -- knvlinkFilterBridgeLinks + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkFilterBridgeLinks__ = &knvlinkFilterBridgeLinks_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__knvlinkFilterBridgeLinks__ = &knvlinkFilterBridgeLinks_46f6a7; + } + } + + // Hal function -- knvlinkGetUniquePeerIdMask + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000060UL) )) /* ChipHal: TU102 | TU104 */ + { + pThis->__knvlinkGetUniquePeerIdMask__ = &knvlinkGetUniquePeerIdMask_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ff80UL) )) /* ChipHal: TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkGetUniquePeerIdMask__ = &knvlinkGetUniquePeerIdMask_15a734; + } + } + + // Hal function -- knvlinkGetUniquePeerId + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000060UL) )) /* ChipHal: TU102 | TU104 */ + { + pThis->__knvlinkGetUniquePeerId__ = &knvlinkGetUniquePeerId_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ff80UL) )) /* ChipHal: TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkGetUniquePeerId__ = &knvlinkGetUniquePeerId_c732fb; + } + } + + // Hal function -- knvlinkRemoveMapping + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000060UL) )) /* ChipHal: TU102 | TU104 */ + { + pThis->__knvlinkRemoveMapping__ = &knvlinkRemoveMapping_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkRemoveMapping__ = &knvlinkRemoveMapping_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__knvlinkRemoveMapping__ = &knvlinkRemoveMapping_56cd7a; + } + } + + // Hal function -- knvlinkGetP2POptimalCEs + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkGetP2POptimalCEs__ = &knvlinkGetP2POptimalCEs_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__knvlinkGetP2POptimalCEs__ = &knvlinkGetP2POptimalCEs_56cd7a; + } + } + + // Hal function -- knvlinkConstructHal + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkConstructHal__ = &knvlinkConstructHal_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__knvlinkConstructHal__ = &knvlinkConstructHal_56cd7a; + } + } + + // Hal function -- knvlinkSetupPeerMapping + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkSetupPeerMapping__ = &knvlinkSetupPeerMapping_GP100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__knvlinkSetupPeerMapping__ = &knvlinkSetupPeerMapping_b3696a; + } + } + + // Hal function -- knvlinkProgramLinkSpeed + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc60UL) )) /* ChipHal: TU102 | TU104 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkProgramLinkSpeed__ = &knvlinkProgramLinkSpeed_GV100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000380UL) )) /* ChipHal: TU106 | TU116 | TU117 */ + { + pThis->__knvlinkProgramLinkSpeed__ = &knvlinkProgramLinkSpeed_56cd7a; + } + } + + // Hal function -- knvlinkPoweredUpForD3 + if (0) + { + } + else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000060UL) )) /* ChipHal: TU102 | TU104 */ + { + pThis->__knvlinkPoweredUpForD3__ = &knvlinkPoweredUpForD3_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ff80UL) )) /* ChipHal: TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__knvlinkPoweredUpForD3__ = &knvlinkPoweredUpForD3_491d52; + } + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelNvlink_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_KernelNvlink_engstateStatePreInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelNvlink_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostLoad__ = &__nvoc_thunk_KernelNvlink_engstateStatePostLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelNvlink_engstateStateUnload; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostUnload__ = &__nvoc_thunk_KernelNvlink_engstateStatePostUnload; + + pThis->__nvoc_base_OBJENGSTATE.__engstateIsPresent__ = &__nvoc_thunk_KernelNvlink_engstateIsPresent; + + pThis->__knvlinkReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvlinkReconcileTunableState; + + pThis->__knvlinkStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_knvlinkStateInitLocked; + + pThis->__knvlinkStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_knvlinkStatePreLoad; + + pThis->__knvlinkStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_knvlinkStateDestroy; + + pThis->__knvlinkStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_knvlinkStatePreUnload; + + pThis->__knvlinkStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_knvlinkStateInitUnlocked; + + pThis->__knvlinkInitMissing__ = &__nvoc_thunk_OBJENGSTATE_knvlinkInitMissing; + + pThis->__knvlinkStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_knvlinkStatePreInitUnlocked; + + pThis->__knvlinkGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvlinkGetTunableState; + + pThis->__knvlinkCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvlinkCompareTunableState; + + pThis->__knvlinkFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvlinkFreeTunableState; + + pThis->__knvlinkAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvlinkAllocTunableState; + + pThis->__knvlinkSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_knvlinkSetTunableState; +} + +void __nvoc_init_funcTable_KernelNvlink(KernelNvlink *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelNvlink_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelNvlink(KernelNvlink *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelNvlink = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelNvlink(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelNvlink(KernelNvlink **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelNvlink *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelNvlink)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelNvlink)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelNvlink); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelNvlink(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelNvlink(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelNvlink_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelNvlink_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelNvlink(KernelNvlink **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelNvlink(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_nvlink_nvoc.h b/src/nvidia/generated/g_kernel_nvlink_nvoc.h new file mode 100644 index 000000000..02bf0afcb --- /dev/null +++ b/src/nvidia/generated/g_kernel_nvlink_nvoc.h @@ -0,0 +1,1434 @@ +#ifndef _G_KERNEL_NVLINK_NVOC_H_ +#define _G_KERNEL_NVLINK_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_nvlink_nvoc.h" + +#ifndef _KERNEL_NVLINK_H_ +#define _KERNEL_NVLINK_H_ + +#include "core/core.h" +#include "core/locks.h" +#include "gpu/eng_state.h" +#include "lib/ref_count.h" +#include "nvCpuUuid.h" +#include "gpu/bus/kern_bus.h" + +#if defined(INCLUDE_NVLINK_LIB) +#include "nvlink.h" +#include "nvlink_export.h" +#endif + +#include "kernel/gpu/nvlink/kernel_ioctrl.h" + +#include "ctrl/ctrl2080/ctrl2080nvlink.h" // rmcontrol params + +#if defined(INCLUDE_NVLINK_LIB) + +typedef struct _def_knvlink_conn_info +{ + NvU32 domain; + NvU16 bus; + NvU16 device; + NvU16 function; + NvU32 pciDeviceId; + NvU8 devUuid[NV_UUID_LEN]; + NvU64 deviceType; + NvU32 linkNumber; + NvU32 ipVerDlPl; + NvBool bConnected; + NvU64 chipSid; +} KNVLINK_CONN_INFO, *PKNVLINK_CONN_INFO; + +#endif + +// Known versions (taken from nvlinkip_discovery.h NV_NVLINKIP_DISCOVERY_COMMON_VERSION) +#define NVLINK_VERSION_10 0x00000001 +#define NVLINK_VERSION_20 0x00000002 +#define NVLINK_VERSION_22 0x00000004 +#define NVLINK_VERSION_30 0x00000005 +#define NVLINK_VERSION_31 0x00000006 + +// Maximum links the GPU NVLink SW can currently support +#define NVLINK_MAX_LINKS_SW 12 + +// Maximum IOCTRLs supported in SW +#define NVLINK_MAX_IOCTRLS_SW 3 + +// NvLink Phase Identifiers +#define NVLINK_PHASE_STATE_LOAD 0xFF000001 +#define NVLINK_PHASE_STATE_POST_LOAD 0xFF000002 + +/******** NVLink associated timeouts and delays ***********/ + +// INITOPTIMIZE timeout = 10s +#define NVLINK_INITOPTIMIZE_POLL_TIMEOUT 10000000 +#define NVLINK_INITOPTIMIZE_POLL_TIMEOUT_EMU 20000000 +#define NVLINK_INITOPTIMIZE_POLL_COUNT_DELAY_MS 1000 + +/**********************************************************/ + +// NvGpu identifier in nvlink core library +#define NVLINK_NVIDIA_DRIVER "NVIDIA GPU DRIVER" + +#define NVLINK_DRIVER_NAME_LENGTH 0x0000040 +#define NVLINK_DEVICE_NAME_LENGTH 0x0000040 +#define NVLINK_LINK_NAME_LENGTH 0x0000040 + +// +// Arch CONNECTION defines, replaces forceconfig. See Bugs 1665737, +// 1665734 and 1734252. +// This per link connection state is passed up from chiplib +// and can be controlled on the command line. +// The max number of connections is speced in __SIZE_1. +// +#define NV_NVLINK_ARCH_CONNECTION 31:0 +#define NV_NVLINK_ARCH_CONNECTION__SIZE_1 32 +#define NV_NVLINK_ARCH_CONNECTION_DISABLED 0x00000000 +#define NV_NVLINK_ARCH_CONNECTION_PEER_MASK 7:0 +#define NV_NVLINK_ARCH_CONNECTION_ENABLED 8:8 +#define NV_NVLINK_ARCH_CONNECTION_PHYSICAL_LINK 20:16 +#define NV_NVLINK_ARCH_CONNECTION_RESERVED 29:21 +#define NV_NVLINK_ARCH_CONNECTION_PEERS_COMPUTE_ONLY 30:30 +#define NV_NVLINK_ARCH_CONNECTION_CPU 31:31 + +// Invalid fabric address +#define NVLINK_INVALID_FABRIC_ADDR NV_U64_MAX + +#define NVLINK_MAX_PEERS_SW 8 + +// PCI Device IDs and types used for ForceConfig + +// FORCED_SYSMEM uses the ebridge device from NVLink1 +#define FORCED_SYSMEM_PCI_BUS 0xe +#define FORCED_SYSMEM_DEVICE_ID 0x10ec +#define FORCED_SYSMEM_DEVICE_TYPE NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_EBRIDGE + +// +// FORCED_SWITCH uses the first Switch device (Willow) +// Willow device ID is 0x10F5 or 0x1AC0..0x1ACF +// +#define FORCED_SWITCH_PCI_BUS 0xe +#define FORCED_SWITCH_DEVICE_ID 0x10F5 +#define FORCED_SWITCH_DEVICE_TYPE NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH + +// +// Structure representing per link information +// +typedef struct _def_knvlink_link +{ +#if defined(INCLUDE_NVLINK_LIB) + + // Reference to link object registered with core lib + nvlink_link *core_link; + + // Nvlink connection information + KNVLINK_CONN_INFO remoteEndInfo; + + // OS-specific data associated with the link + void *pOsInfo; + +#endif + + OBJGPU *pGpu; + NvU8 linkId; + NvBool bValid; + + // IOCTRL id that this link is associated with + NvU32 ioctrlId; + + // DLPL IP version for the link + NvU32 ipVerDlPl; + + // PLL sharing information + NvU8 pllMasterLinkId; + NvU8 pllSlaveLinkId; + + // RXDET per-lane status + NvU32 laneRxdetStatusMask; + +} KNVLINK_RM_LINK, *PKNVLINK_RM_LINK; + + +/*! + * KernelNvlink is a logical abstraction of the GPU Nvlink Engine. The + * Public API of the Nvlink Engine is exposed through this object, and + * any interfaces which do not manage the underlying Nvlink hardware + * can be managed by this object. + */ +#ifdef NVOC_KERNEL_NVLINK_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelNvlink { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelNvlink *__nvoc_pbase_KernelNvlink; + NV_STATUS (*__knvlinkConstructEngine__)(OBJGPU *, struct KernelNvlink *, ENGDESCRIPTOR); + NV_STATUS (*__knvlinkStatePreInitLocked__)(OBJGPU *, struct KernelNvlink *); + NV_STATUS (*__knvlinkStateLoad__)(OBJGPU *, struct KernelNvlink *, NvU32); + NV_STATUS (*__knvlinkStatePostLoad__)(OBJGPU *, struct KernelNvlink *, NvU32); + NV_STATUS (*__knvlinkStateUnload__)(OBJGPU *, struct KernelNvlink *, NvU32); + NV_STATUS (*__knvlinkStatePostUnload__)(OBJGPU *, struct KernelNvlink *, NvU32); + NvBool (*__knvlinkIsPresent__)(OBJGPU *, struct KernelNvlink *); + NV_STATUS (*__knvlinkValidateFabricBaseAddress__)(OBJGPU *, struct KernelNvlink *, NvU64); + NvU32 (*__knvlinkGetConnectedLinksMask__)(OBJGPU *, struct KernelNvlink *); + NV_STATUS (*__knvlinkEnableLinksPostTopology__)(OBJGPU *, struct KernelNvlink *, NvU32); + NV_STATUS (*__knvlinkOverrideConfig__)(OBJGPU *, struct KernelNvlink *, NvU32); + NV_STATUS (*__knvlinkFilterBridgeLinks__)(OBJGPU *, struct KernelNvlink *); + NvU32 (*__knvlinkGetUniquePeerIdMask__)(OBJGPU *, struct KernelNvlink *); + NvU32 (*__knvlinkGetUniquePeerId__)(OBJGPU *, struct KernelNvlink *, OBJGPU *); + NV_STATUS (*__knvlinkRemoveMapping__)(OBJGPU *, struct KernelNvlink *, NvBool, NvU32, NvBool); + NV_STATUS (*__knvlinkGetP2POptimalCEs__)(OBJGPU *, struct KernelNvlink *, NvU32, NvU32 *, NvU32 *, NvU32 *, NvU32 *); + NV_STATUS (*__knvlinkConstructHal__)(OBJGPU *, struct KernelNvlink *); + void (*__knvlinkSetupPeerMapping__)(OBJGPU *, struct KernelNvlink *, OBJGPU *, NvU32); + NV_STATUS (*__knvlinkProgramLinkSpeed__)(OBJGPU *, struct KernelNvlink *); + NvBool (*__knvlinkPoweredUpForD3__)(OBJGPU *, struct KernelNvlink *); + NV_STATUS (*__knvlinkReconcileTunableState__)(POBJGPU, struct KernelNvlink *, void *); + NV_STATUS (*__knvlinkStateInitLocked__)(POBJGPU, struct KernelNvlink *); + NV_STATUS (*__knvlinkStatePreLoad__)(POBJGPU, struct KernelNvlink *, NvU32); + void (*__knvlinkStateDestroy__)(POBJGPU, struct KernelNvlink *); + NV_STATUS (*__knvlinkStatePreUnload__)(POBJGPU, struct KernelNvlink *, NvU32); + NV_STATUS (*__knvlinkStateInitUnlocked__)(POBJGPU, struct KernelNvlink *); + void (*__knvlinkInitMissing__)(POBJGPU, struct KernelNvlink *); + NV_STATUS (*__knvlinkStatePreInitUnlocked__)(POBJGPU, struct KernelNvlink *); + NV_STATUS (*__knvlinkGetTunableState__)(POBJGPU, struct KernelNvlink *, void *); + NV_STATUS (*__knvlinkCompareTunableState__)(POBJGPU, struct KernelNvlink *, void *, void *); + void (*__knvlinkFreeTunableState__)(POBJGPU, struct KernelNvlink *, void *); + NV_STATUS (*__knvlinkAllocTunableState__)(POBJGPU, struct KernelNvlink *, void **); + NV_STATUS (*__knvlinkSetTunableState__)(POBJGPU, struct KernelNvlink *, void *); + NvBool PDB_PROP_KNVLINK_ENABLED; + NvBool PDB_PROP_KNVLINK_SINGLE_LANE_POWER_STATE_ENABLED; + NvBool PDB_PROP_KNVLINK_L2_POWER_STATE_ENABLED; + NvBool PDB_PROP_KNVLINK_UNSET_NVLINK_PEER_SUPPORTED; + NvBool PDB_PROP_KNVLINK_DECONFIG_HSHUB_ON_NO_MAPPING; + NvBool PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED; + NvBool PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD; + NvBool PDB_PROP_KNVLINK_LINKRESET_AFTER_SHUTDOWN; + NvBool PDB_PROP_KNVLINK_BUG2274645_RESET_FOR_RTD3_FGC6; + NvBool PDB_PROP_KNVLINK_L2_POWER_STATE_FOR_LONG_IDLE; + NvBool PDB_PROP_KNVLINK_WAR_BUG_3471679_PEERID_FILTERING; + NvBool PDB_PROP_KNVLINK_SYSMEM_SUPPORT_ENABLED; + struct KernelIoctrl *pKernelIoctrl[3]; + NvU32 ioctrlMask; + NvU32 ipVerNvlink; + NvU8 ioctrlNumEntries; + NvU32 ioctrlSize; + NvU32 registryControl; + NvU32 minionControl; + NvU32 verboseMask; + NvU32 *pLinkConnection; + NvBool bChiplibConfig; + NvBool bRegistryLinkOverride; + NvU32 registryLinkMask; + NvBool bOverrideComputePeerMode; + NvU32 discoveredLinks; + NvU32 vbiosDisabledLinkMask; + NvU32 regkeyDisabledLinksMask; + NvU32 initDisabledLinksMask; + NvU32 connectedLinksMask; + NvU32 bridgeSensableLinks; + NvU32 bridgedLinks; + NvU32 enabledLinks; + NvU32 initializedLinks; + KNVLINK_RM_LINK nvlinkLinks[12]; + NvU32 postRxDetLinkMask; + NvU32 disconnectedLinkMask; + NvU32 sysmemLinkMask; + NvU32 peerLinkMasks[32]; + NvU32 forcedSysmemDeviceType; + nvlink_device *pNvlinkDev; + NvU32 deviceLockRefcount; + NvBool bVerifTrainingEnable; + NvBool bL2Entry; + NvBool bSkipLinkTraining; + NvBool bForceAutoconfig; + NvBool bForceEnableCoreLibRtlsims; + NvBool bEnableTrainingAtLoad; + NvBool bEnableSafeModeAtLoad; + NvBool bDisableSingleLaneMode; + NvBool bDisableL2Mode; + NvU32 nvlinkLinkSpeed; + NvU32 errorRecoveries[12]; + NvBool bNvswitchProxy; + NvU64 fabricBaseAddr; +}; + +#ifndef __NVOC_CLASS_KernelNvlink_TYPEDEF__ +#define __NVOC_CLASS_KernelNvlink_TYPEDEF__ +typedef struct KernelNvlink KernelNvlink; +#endif /* __NVOC_CLASS_KernelNvlink_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelNvlink +#define __nvoc_class_id_KernelNvlink 0xce6818 +#endif /* __nvoc_class_id_KernelNvlink */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelNvlink; + +#define __staticCast_KernelNvlink(pThis) \ + ((pThis)->__nvoc_pbase_KernelNvlink) + +#ifdef __nvoc_kernel_nvlink_h_disabled +#define __dynamicCast_KernelNvlink(pThis) ((KernelNvlink*)NULL) +#else //__nvoc_kernel_nvlink_h_disabled +#define __dynamicCast_KernelNvlink(pThis) \ + ((KernelNvlink*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelNvlink))) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define PDB_PROP_KNVLINK_SYSMEM_SUPPORT_ENABLED_BASE_CAST +#define PDB_PROP_KNVLINK_SYSMEM_SUPPORT_ENABLED_BASE_NAME PDB_PROP_KNVLINK_SYSMEM_SUPPORT_ENABLED +#define PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED_BASE_CAST +#define PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED_BASE_NAME PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED +#define PDB_PROP_KNVLINK_ENABLED_BASE_CAST +#define PDB_PROP_KNVLINK_ENABLED_BASE_NAME PDB_PROP_KNVLINK_ENABLED +#define PDB_PROP_KNVLINK_UNSET_NVLINK_PEER_SUPPORTED_BASE_CAST +#define PDB_PROP_KNVLINK_UNSET_NVLINK_PEER_SUPPORTED_BASE_NAME PDB_PROP_KNVLINK_UNSET_NVLINK_PEER_SUPPORTED +#define PDB_PROP_KNVLINK_L2_POWER_STATE_ENABLED_BASE_CAST +#define PDB_PROP_KNVLINK_L2_POWER_STATE_ENABLED_BASE_NAME PDB_PROP_KNVLINK_L2_POWER_STATE_ENABLED +#define PDB_PROP_KNVLINK_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KNVLINK_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_KNVLINK_WAR_BUG_3471679_PEERID_FILTERING_BASE_CAST +#define PDB_PROP_KNVLINK_WAR_BUG_3471679_PEERID_FILTERING_BASE_NAME PDB_PROP_KNVLINK_WAR_BUG_3471679_PEERID_FILTERING +#define PDB_PROP_KNVLINK_SINGLE_LANE_POWER_STATE_ENABLED_BASE_CAST +#define PDB_PROP_KNVLINK_SINGLE_LANE_POWER_STATE_ENABLED_BASE_NAME PDB_PROP_KNVLINK_SINGLE_LANE_POWER_STATE_ENABLED +#define PDB_PROP_KNVLINK_BUG2274645_RESET_FOR_RTD3_FGC6_BASE_CAST +#define PDB_PROP_KNVLINK_BUG2274645_RESET_FOR_RTD3_FGC6_BASE_NAME PDB_PROP_KNVLINK_BUG2274645_RESET_FOR_RTD3_FGC6 +#define PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD_BASE_CAST +#define PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD_BASE_NAME PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD +#define PDB_PROP_KNVLINK_DECONFIG_HSHUB_ON_NO_MAPPING_BASE_CAST +#define PDB_PROP_KNVLINK_DECONFIG_HSHUB_ON_NO_MAPPING_BASE_NAME PDB_PROP_KNVLINK_DECONFIG_HSHUB_ON_NO_MAPPING +#define PDB_PROP_KNVLINK_L2_POWER_STATE_FOR_LONG_IDLE_BASE_CAST +#define PDB_PROP_KNVLINK_L2_POWER_STATE_FOR_LONG_IDLE_BASE_NAME PDB_PROP_KNVLINK_L2_POWER_STATE_FOR_LONG_IDLE +#define PDB_PROP_KNVLINK_LINKRESET_AFTER_SHUTDOWN_BASE_CAST +#define PDB_PROP_KNVLINK_LINKRESET_AFTER_SHUTDOWN_BASE_NAME PDB_PROP_KNVLINK_LINKRESET_AFTER_SHUTDOWN + +NV_STATUS __nvoc_objCreateDynamic_KernelNvlink(KernelNvlink**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelNvlink(KernelNvlink**, Dynamic*, NvU32); +#define __objCreate_KernelNvlink(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelNvlink((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define knvlinkConstructEngine(arg0, arg1, arg2) knvlinkConstructEngine_DISPATCH(arg0, arg1, arg2) +#define knvlinkStatePreInitLocked(arg0, arg1) knvlinkStatePreInitLocked_DISPATCH(arg0, arg1) +#define knvlinkStateLoad(arg0, arg1, arg2) knvlinkStateLoad_DISPATCH(arg0, arg1, arg2) +#define knvlinkStatePostLoad(arg0, arg1, arg2) knvlinkStatePostLoad_DISPATCH(arg0, arg1, arg2) +#define knvlinkStateUnload(arg0, arg1, arg2) knvlinkStateUnload_DISPATCH(arg0, arg1, arg2) +#define knvlinkStatePostUnload(arg0, arg1, arg2) knvlinkStatePostUnload_DISPATCH(arg0, arg1, arg2) +#define knvlinkIsPresent(arg0, arg1) knvlinkIsPresent_DISPATCH(arg0, arg1) +#define knvlinkValidateFabricBaseAddress(pGpu, pKernelNvlink, arg0) knvlinkValidateFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0) +#define knvlinkValidateFabricBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkValidateFabricBaseAddress_DISPATCH(pGpu, pKernelNvlink, arg0) +#define knvlinkGetConnectedLinksMask(pGpu, pKernelNvlink) knvlinkGetConnectedLinksMask_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkGetConnectedLinksMask_HAL(pGpu, pKernelNvlink) knvlinkGetConnectedLinksMask_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkEnableLinksPostTopology(pGpu, pKernelNvlink, arg0) knvlinkEnableLinksPostTopology_DISPATCH(pGpu, pKernelNvlink, arg0) +#define knvlinkEnableLinksPostTopology_HAL(pGpu, pKernelNvlink, arg0) knvlinkEnableLinksPostTopology_DISPATCH(pGpu, pKernelNvlink, arg0) +#define knvlinkOverrideConfig(pGpu, pKernelNvlink, arg0) knvlinkOverrideConfig_DISPATCH(pGpu, pKernelNvlink, arg0) +#define knvlinkOverrideConfig_HAL(pGpu, pKernelNvlink, arg0) knvlinkOverrideConfig_DISPATCH(pGpu, pKernelNvlink, arg0) +#define knvlinkFilterBridgeLinks(pGpu, pKernelNvlink) knvlinkFilterBridgeLinks_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkFilterBridgeLinks_HAL(pGpu, pKernelNvlink) knvlinkFilterBridgeLinks_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkGetUniquePeerIdMask(pGpu, pKernelNvlink) knvlinkGetUniquePeerIdMask_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkGetUniquePeerIdMask_HAL(pGpu, pKernelNvlink) knvlinkGetUniquePeerIdMask_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkGetUniquePeerId(pGpu, pKernelNvlink, pRemoteGpu) knvlinkGetUniquePeerId_DISPATCH(pGpu, pKernelNvlink, pRemoteGpu) +#define knvlinkGetUniquePeerId_HAL(pGpu, pKernelNvlink, pRemoteGpu) knvlinkGetUniquePeerId_DISPATCH(pGpu, pKernelNvlink, pRemoteGpu) +#define knvlinkRemoveMapping(pGpu, pKernelNvlink, bAllMapping, peerMask, bL2Entry) knvlinkRemoveMapping_DISPATCH(pGpu, pKernelNvlink, bAllMapping, peerMask, bL2Entry) +#define knvlinkRemoveMapping_HAL(pGpu, pKernelNvlink, bAllMapping, peerMask, bL2Entry) knvlinkRemoveMapping_DISPATCH(pGpu, pKernelNvlink, bAllMapping, peerMask, bL2Entry) +#define knvlinkGetP2POptimalCEs(pGpu, pKernelNvlink, arg0, arg1, arg2, arg3, arg4) knvlinkGetP2POptimalCEs_DISPATCH(pGpu, pKernelNvlink, arg0, arg1, arg2, arg3, arg4) +#define knvlinkGetP2POptimalCEs_HAL(pGpu, pKernelNvlink, arg0, arg1, arg2, arg3, arg4) knvlinkGetP2POptimalCEs_DISPATCH(pGpu, pKernelNvlink, arg0, arg1, arg2, arg3, arg4) +#define knvlinkConstructHal(pGpu, pKernelNvlink) knvlinkConstructHal_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkConstructHal_HAL(pGpu, pKernelNvlink) knvlinkConstructHal_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkSetupPeerMapping(pGpu, pKernelNvlink, pRemoteGpu, peerId) knvlinkSetupPeerMapping_DISPATCH(pGpu, pKernelNvlink, pRemoteGpu, peerId) +#define knvlinkSetupPeerMapping_HAL(pGpu, pKernelNvlink, pRemoteGpu, peerId) knvlinkSetupPeerMapping_DISPATCH(pGpu, pKernelNvlink, pRemoteGpu, peerId) +#define knvlinkProgramLinkSpeed(pGpu, pKernelNvlink) knvlinkProgramLinkSpeed_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkProgramLinkSpeed_HAL(pGpu, pKernelNvlink) knvlinkProgramLinkSpeed_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkPoweredUpForD3(pGpu, pKernelNvlink) knvlinkPoweredUpForD3_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkPoweredUpForD3_HAL(pGpu, pKernelNvlink) knvlinkPoweredUpForD3_DISPATCH(pGpu, pKernelNvlink) +#define knvlinkReconcileTunableState(pGpu, pEngstate, pTunableState) knvlinkReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define knvlinkStateInitLocked(pGpu, pEngstate) knvlinkStateInitLocked_DISPATCH(pGpu, pEngstate) +#define knvlinkStatePreLoad(pGpu, pEngstate, arg0) knvlinkStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define knvlinkStateDestroy(pGpu, pEngstate) knvlinkStateDestroy_DISPATCH(pGpu, pEngstate) +#define knvlinkStatePreUnload(pGpu, pEngstate, arg0) knvlinkStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define knvlinkStateInitUnlocked(pGpu, pEngstate) knvlinkStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define knvlinkInitMissing(pGpu, pEngstate) knvlinkInitMissing_DISPATCH(pGpu, pEngstate) +#define knvlinkStatePreInitUnlocked(pGpu, pEngstate) knvlinkStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define knvlinkGetTunableState(pGpu, pEngstate, pTunableState) knvlinkGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define knvlinkCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) knvlinkCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define knvlinkFreeTunableState(pGpu, pEngstate, pTunableState) knvlinkFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define knvlinkAllocTunableState(pGpu, pEngstate, ppTunableState) knvlinkAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define knvlinkSetTunableState(pGpu, pEngstate, pTunableState) knvlinkSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +NvBool knvlinkIsForcedConfig_IMPL(OBJGPU *arg0, struct KernelNvlink *arg1); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvBool knvlinkIsForcedConfig(OBJGPU *arg0, struct KernelNvlink *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkIsForcedConfig(arg0, arg1) knvlinkIsForcedConfig_IMPL(arg0, arg1) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkIsForcedConfig_HAL(arg0, arg1) knvlinkIsForcedConfig(arg0, arg1) + +NV_STATUS knvlinkApplyRegkeyOverrides_IMPL(OBJGPU *pGpu, struct KernelNvlink *arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkApplyRegkeyOverrides(OBJGPU *pGpu, struct KernelNvlink *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkApplyRegkeyOverrides(pGpu, arg0) knvlinkApplyRegkeyOverrides_IMPL(pGpu, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkApplyRegkeyOverrides_HAL(pGpu, arg0) knvlinkApplyRegkeyOverrides(pGpu, arg0) + +NvBool knvlinkIsNvlinkDefaultEnabled_IMPL(OBJGPU *pGpu, struct KernelNvlink *arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvBool knvlinkIsNvlinkDefaultEnabled(OBJGPU *pGpu, struct KernelNvlink *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkIsNvlinkDefaultEnabled(pGpu, arg0) knvlinkIsNvlinkDefaultEnabled_IMPL(pGpu, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkIsNvlinkDefaultEnabled_HAL(pGpu, arg0) knvlinkIsNvlinkDefaultEnabled(pGpu, arg0) + +NvBool knvlinkIsP2pLoopbackSupported_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvBool knvlinkIsP2pLoopbackSupported(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkIsP2pLoopbackSupported(pGpu, pKernelNvlink) knvlinkIsP2pLoopbackSupported_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkIsP2pLoopbackSupported_HAL(pGpu, pKernelNvlink) knvlinkIsP2pLoopbackSupported(pGpu, pKernelNvlink) + +NvBool knvlinkIsP2pLoopbackSupportedPerLink_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvBool knvlinkIsP2pLoopbackSupportedPerLink(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkIsP2pLoopbackSupportedPerLink(pGpu, pKernelNvlink, arg0) knvlinkIsP2pLoopbackSupportedPerLink_IMPL(pGpu, pKernelNvlink, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkIsP2pLoopbackSupportedPerLink_HAL(pGpu, pKernelNvlink, arg0) knvlinkIsP2pLoopbackSupportedPerLink(pGpu, pKernelNvlink, arg0) + +NvBool knvlinkIsNvlinkP2pSupported_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pPeerGpu); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvBool knvlinkIsNvlinkP2pSupported(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pPeerGpu) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkIsNvlinkP2pSupported(pGpu, pKernelNvlink, pPeerGpu) knvlinkIsNvlinkP2pSupported_IMPL(pGpu, pKernelNvlink, pPeerGpu) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkIsNvlinkP2pSupported_HAL(pGpu, pKernelNvlink, pPeerGpu) knvlinkIsNvlinkP2pSupported(pGpu, pKernelNvlink, pPeerGpu) + +NvBool knvlinkCheckNvswitchP2pConfig_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pPeerGpu); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvBool knvlinkCheckNvswitchP2pConfig(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pPeerGpu) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCheckNvswitchP2pConfig(pGpu, pKernelNvlink, pPeerGpu) knvlinkCheckNvswitchP2pConfig_IMPL(pGpu, pKernelNvlink, pPeerGpu) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCheckNvswitchP2pConfig_HAL(pGpu, pKernelNvlink, pPeerGpu) knvlinkCheckNvswitchP2pConfig(pGpu, pKernelNvlink, pPeerGpu) + +NV_STATUS knvlinkGetP2pConnectionStatus_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pPeerGpu); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkGetP2pConnectionStatus(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pPeerGpu) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkGetP2pConnectionStatus(pGpu, pKernelNvlink, pPeerGpu) knvlinkGetP2pConnectionStatus_IMPL(pGpu, pKernelNvlink, pPeerGpu) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkGetP2pConnectionStatus_HAL(pGpu, pKernelNvlink, pPeerGpu) knvlinkGetP2pConnectionStatus(pGpu, pKernelNvlink, pPeerGpu) + +NV_STATUS knvlinkUpdateCurrentConfig_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkUpdateCurrentConfig(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink) knvlinkUpdateCurrentConfig_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkUpdateCurrentConfig_HAL(pGpu, pKernelNvlink) knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink) + +void knvlinkCoreDriverLoadWar_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline void knvlinkCoreDriverLoadWar(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreDriverLoadWar(pGpu, pKernelNvlink) knvlinkCoreDriverLoadWar_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreDriverLoadWar_HAL(pGpu, pKernelNvlink) knvlinkCoreDriverLoadWar(pGpu, pKernelNvlink) + +void knvlinkCoreDriverUnloadWar_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline void knvlinkCoreDriverUnloadWar(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreDriverUnloadWar(pGpu, pKernelNvlink) knvlinkCoreDriverUnloadWar_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreDriverUnloadWar_HAL(pGpu, pKernelNvlink) knvlinkCoreDriverUnloadWar(pGpu, pKernelNvlink) + +NV_STATUS knvlinkCoreIsDriverSupported_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCoreIsDriverSupported(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreIsDriverSupported(pGpu, pKernelNvlink) knvlinkCoreIsDriverSupported_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreIsDriverSupported_HAL(pGpu, pKernelNvlink) knvlinkCoreIsDriverSupported(pGpu, pKernelNvlink) + +NV_STATUS knvlinkCoreAddDevice_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCoreAddDevice(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreAddDevice(pGpu, pKernelNvlink) knvlinkCoreAddDevice_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreAddDevice_HAL(pGpu, pKernelNvlink) knvlinkCoreAddDevice(pGpu, pKernelNvlink) + +NV_STATUS knvlinkCoreAddLink_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCoreAddLink(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreAddLink(pGpu, pKernelNvlink, arg0) knvlinkCoreAddLink_IMPL(pGpu, pKernelNvlink, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreAddLink_HAL(pGpu, pKernelNvlink, arg0) knvlinkCoreAddLink(pGpu, pKernelNvlink, arg0) + +NV_STATUS knvlinkCoreRemoveDevice_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCoreRemoveDevice(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreRemoveDevice(pGpu, pKernelNvlink) knvlinkCoreRemoveDevice_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreRemoveDevice_HAL(pGpu, pKernelNvlink) knvlinkCoreRemoveDevice(pGpu, pKernelNvlink) + +NV_STATUS knvlinkCoreRemoveLink_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCoreRemoveLink(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreRemoveLink(pGpu, pKernelNvlink, arg0) knvlinkCoreRemoveLink_IMPL(pGpu, pKernelNvlink, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreRemoveLink_HAL(pGpu, pKernelNvlink, arg0) knvlinkCoreRemoveLink(pGpu, pKernelNvlink, arg0) + +NV_STATUS knvlinkCoreShutdownDeviceLinks_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCoreShutdownDeviceLinks(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreShutdownDeviceLinks(pGpu, pKernelNvlink) knvlinkCoreShutdownDeviceLinks_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreShutdownDeviceLinks_HAL(pGpu, pKernelNvlink) knvlinkCoreShutdownDeviceLinks(pGpu, pKernelNvlink) + +NV_STATUS knvlinkCoreResetDeviceLinks_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCoreResetDeviceLinks(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreResetDeviceLinks(pGpu, pKernelNvlink) knvlinkCoreResetDeviceLinks_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreResetDeviceLinks_HAL(pGpu, pKernelNvlink) knvlinkCoreResetDeviceLinks(pGpu, pKernelNvlink) + +NV_STATUS knvlinkCoreUpdateDeviceUUID_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCoreUpdateDeviceUUID(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreUpdateDeviceUUID(pGpu, pKernelNvlink) knvlinkCoreUpdateDeviceUUID_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreUpdateDeviceUUID_HAL(pGpu, pKernelNvlink) knvlinkCoreUpdateDeviceUUID(pGpu, pKernelNvlink) + +NV_STATUS knvlinkCoreGetRemoteDeviceInfo_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCoreGetRemoteDeviceInfo(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCoreGetRemoteDeviceInfo(pGpu, pKernelNvlink) knvlinkCoreGetRemoteDeviceInfo_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCoreGetRemoteDeviceInfo_HAL(pGpu, pKernelNvlink) knvlinkCoreGetRemoteDeviceInfo(pGpu, pKernelNvlink) + +NvBool knvlinkIsGpuConnectedToNvswitch_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvBool knvlinkIsGpuConnectedToNvswitch(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink) knvlinkIsGpuConnectedToNvswitch_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkIsGpuConnectedToNvswitch_HAL(pGpu, pKernelNvlink) knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink) + +NvBool knvlinkIsLinkConnected_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvBool knvlinkIsLinkConnected(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkIsLinkConnected(pGpu, pKernelNvlink, arg0) knvlinkIsLinkConnected_IMPL(pGpu, pKernelNvlink, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkIsLinkConnected_HAL(pGpu, pKernelNvlink, arg0) knvlinkIsLinkConnected(pGpu, pKernelNvlink, arg0) + +NV_STATUS knvlinkTrainSysmemLinksToActive_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkTrainSysmemLinksToActive(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkTrainSysmemLinksToActive(pGpu, pKernelNvlink) knvlinkTrainSysmemLinksToActive_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkTrainSysmemLinksToActive_HAL(pGpu, pKernelNvlink) knvlinkTrainSysmemLinksToActive(pGpu, pKernelNvlink) + +NV_STATUS knvlinkTrainP2pLinksToActive_IMPL(OBJGPU *pGpu0, OBJGPU *pGpu1, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkTrainP2pLinksToActive(OBJGPU *pGpu0, OBJGPU *pGpu1, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkTrainP2pLinksToActive(pGpu0, pGpu1, pKernelNvlink) knvlinkTrainP2pLinksToActive_IMPL(pGpu0, pGpu1, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkTrainP2pLinksToActive_HAL(pGpu0, pGpu1, pKernelNvlink) knvlinkTrainP2pLinksToActive(pGpu0, pGpu1, pKernelNvlink) + +NV_STATUS knvlinkCheckTrainingIsComplete_IMPL(OBJGPU *pGpu0, OBJGPU *pGpu1, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCheckTrainingIsComplete(OBJGPU *pGpu0, OBJGPU *pGpu1, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCheckTrainingIsComplete(pGpu0, pGpu1, pKernelNvlink) knvlinkCheckTrainingIsComplete_IMPL(pGpu0, pGpu1, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCheckTrainingIsComplete_HAL(pGpu0, pGpu1, pKernelNvlink) knvlinkCheckTrainingIsComplete(pGpu0, pGpu1, pKernelNvlink) + +NV_STATUS knvlinkTrainFabricLinksToActive_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkTrainFabricLinksToActive(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkTrainFabricLinksToActive(pGpu, pKernelNvlink) knvlinkTrainFabricLinksToActive_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkTrainFabricLinksToActive_HAL(pGpu, pKernelNvlink) knvlinkTrainFabricLinksToActive(pGpu, pKernelNvlink) + +NV_STATUS knvlinkRetrainLink_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 linkId, NvBool bFromOff); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkRetrainLink(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 linkId, NvBool bFromOff) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkRetrainLink(pGpu, pKernelNvlink, linkId, bFromOff) knvlinkRetrainLink_IMPL(pGpu, pKernelNvlink, linkId, bFromOff) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkRetrainLink_HAL(pGpu, pKernelNvlink, linkId, bFromOff) knvlinkRetrainLink(pGpu, pKernelNvlink, linkId, bFromOff) + +NvU32 knvlinkGetEnabledLinkMask_IMPL(OBJGPU *pGpu, struct KernelNvlink *arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvU32 knvlinkGetEnabledLinkMask(OBJGPU *pGpu, struct KernelNvlink *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return 0; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkGetEnabledLinkMask(pGpu, arg0) knvlinkGetEnabledLinkMask_IMPL(pGpu, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkGetEnabledLinkMask_HAL(pGpu, arg0) knvlinkGetEnabledLinkMask(pGpu, arg0) + +NvU32 knvlinkGetDiscoveredLinkMask_IMPL(OBJGPU *pGpu, struct KernelNvlink *arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvU32 knvlinkGetDiscoveredLinkMask(OBJGPU *pGpu, struct KernelNvlink *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return 0; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkGetDiscoveredLinkMask(pGpu, arg0) knvlinkGetDiscoveredLinkMask_IMPL(pGpu, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkGetDiscoveredLinkMask_HAL(pGpu, arg0) knvlinkGetDiscoveredLinkMask(pGpu, arg0) + +NV_STATUS knvlinkProcessInitDisabledLinks_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkProcessInitDisabledLinks(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkProcessInitDisabledLinks(pGpu, pKernelNvlink) knvlinkProcessInitDisabledLinks_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkProcessInitDisabledLinks_HAL(pGpu, pKernelNvlink) knvlinkProcessInitDisabledLinks(pGpu, pKernelNvlink) + +NvU32 knvlinkGetNumLinksToSystem_IMPL(OBJGPU *arg0, struct KernelNvlink *arg1); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvU32 knvlinkGetNumLinksToSystem(OBJGPU *arg0, struct KernelNvlink *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return 0; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkGetNumLinksToSystem(arg0, arg1) knvlinkGetNumLinksToSystem_IMPL(arg0, arg1) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkGetNumLinksToSystem_HAL(arg0, arg1) knvlinkGetNumLinksToSystem(arg0, arg1) + +NvU32 knvlinkGetNumLinksToPeer_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pRemoteGpu); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvU32 knvlinkGetNumLinksToPeer(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pRemoteGpu) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return 0; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkGetNumLinksToPeer(pGpu, pKernelNvlink, pRemoteGpu) knvlinkGetNumLinksToPeer_IMPL(pGpu, pKernelNvlink, pRemoteGpu) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkGetNumLinksToPeer_HAL(pGpu, pKernelNvlink, pRemoteGpu) knvlinkGetNumLinksToPeer(pGpu, pKernelNvlink, pRemoteGpu) + +NvU32 knvlinkGetLinkMaskToPeer_IMPL(OBJGPU *pGpu0, struct KernelNvlink *pKernelNvlink0, OBJGPU *pGpu1); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvU32 knvlinkGetLinkMaskToPeer(OBJGPU *pGpu0, struct KernelNvlink *pKernelNvlink0, OBJGPU *pGpu1) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return 0; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkGetLinkMaskToPeer(pGpu0, pKernelNvlink0, pGpu1) knvlinkGetLinkMaskToPeer_IMPL(pGpu0, pKernelNvlink0, pGpu1) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkGetLinkMaskToPeer_HAL(pGpu0, pKernelNvlink0, pGpu1) knvlinkGetLinkMaskToPeer(pGpu0, pKernelNvlink0, pGpu1) + +NV_STATUS knvlinkSetLinkMaskToPeer_IMPL(OBJGPU *pGpu0, struct KernelNvlink *pKernelNvlink0, OBJGPU *pGpu1, NvU32 peerLinkMask); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkSetLinkMaskToPeer(OBJGPU *pGpu0, struct KernelNvlink *pKernelNvlink0, OBJGPU *pGpu1, NvU32 peerLinkMask) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkSetLinkMaskToPeer(pGpu0, pKernelNvlink0, pGpu1, peerLinkMask) knvlinkSetLinkMaskToPeer_IMPL(pGpu0, pKernelNvlink0, pGpu1, peerLinkMask) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkSetLinkMaskToPeer_HAL(pGpu0, pKernelNvlink0, pGpu1, peerLinkMask) knvlinkSetLinkMaskToPeer(pGpu0, pKernelNvlink0, pGpu1, peerLinkMask) + +NvU32 knvlinkGetPeersNvlinkMaskFromHshub_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvU32 knvlinkGetPeersNvlinkMaskFromHshub(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return 0; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkGetPeersNvlinkMaskFromHshub(pGpu, pKernelNvlink) knvlinkGetPeersNvlinkMaskFromHshub_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkGetPeersNvlinkMaskFromHshub_HAL(pGpu, pKernelNvlink) knvlinkGetPeersNvlinkMaskFromHshub(pGpu, pKernelNvlink) + +NV_STATUS knvlinkPrepareForXVEReset_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkPrepareForXVEReset(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkPrepareForXVEReset(pGpu, pKernelNvlink) knvlinkPrepareForXVEReset_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkPrepareForXVEReset_HAL(pGpu, pKernelNvlink) knvlinkPrepareForXVEReset(pGpu, pKernelNvlink) + +NV_STATUS knvlinkEnterExitSleep_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, NvBool arg1); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkEnterExitSleep(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkEnterExitSleep(pGpu, pKernelNvlink, arg0, arg1) knvlinkEnterExitSleep_IMPL(pGpu, pKernelNvlink, arg0, arg1) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkEnterExitSleep_HAL(pGpu, pKernelNvlink, arg0, arg1) knvlinkEnterExitSleep(pGpu, pKernelNvlink, arg0, arg1) + +NV_STATUS knvlinkSyncLinkMasksAndVbiosInfo_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkSyncLinkMasksAndVbiosInfo(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkSyncLinkMasksAndVbiosInfo(pGpu, pKernelNvlink) knvlinkSyncLinkMasksAndVbiosInfo_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkSyncLinkMasksAndVbiosInfo_HAL(pGpu, pKernelNvlink) knvlinkSyncLinkMasksAndVbiosInfo(pGpu, pKernelNvlink) + +NV_STATUS knvlinkUpdateLinkConnectionStatus_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkUpdateLinkConnectionStatus(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkUpdateLinkConnectionStatus(pGpu, pKernelNvlink, arg0) knvlinkUpdateLinkConnectionStatus_IMPL(pGpu, pKernelNvlink, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkUpdateLinkConnectionStatus_HAL(pGpu, pKernelNvlink, arg0) knvlinkUpdateLinkConnectionStatus(pGpu, pKernelNvlink, arg0) + +NV_STATUS knvlinkPreTrainLinksToActiveAli_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, NvBool arg1); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkPreTrainLinksToActiveAli(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkPreTrainLinksToActiveAli(pGpu, pKernelNvlink, arg0, arg1) knvlinkPreTrainLinksToActiveAli_IMPL(pGpu, pKernelNvlink, arg0, arg1) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkPreTrainLinksToActiveAli_HAL(pGpu, pKernelNvlink, arg0, arg1) knvlinkPreTrainLinksToActiveAli(pGpu, pKernelNvlink, arg0, arg1) + +NV_STATUS knvlinkTrainLinksToActiveAli_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, NvBool arg1); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkTrainLinksToActiveAli(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkTrainLinksToActiveAli(pGpu, pKernelNvlink, arg0, arg1) knvlinkTrainLinksToActiveAli_IMPL(pGpu, pKernelNvlink, arg0, arg1) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkTrainLinksToActiveAli_HAL(pGpu, pKernelNvlink, arg0, arg1) knvlinkTrainLinksToActiveAli(pGpu, pKernelNvlink, arg0, arg1) + +NV_STATUS knvlinkUpdatePostRxDetectLinkMask_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkUpdatePostRxDetectLinkMask(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkUpdatePostRxDetectLinkMask(pGpu, pKernelNvlink) knvlinkUpdatePostRxDetectLinkMask_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkUpdatePostRxDetectLinkMask_HAL(pGpu, pKernelNvlink) knvlinkUpdatePostRxDetectLinkMask(pGpu, pKernelNvlink) + +NV_STATUS knvlinkCopyNvlinkDeviceInfo_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCopyNvlinkDeviceInfo(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCopyNvlinkDeviceInfo(pGpu, pKernelNvlink) knvlinkCopyNvlinkDeviceInfo_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCopyNvlinkDeviceInfo_HAL(pGpu, pKernelNvlink) knvlinkCopyNvlinkDeviceInfo(pGpu, pKernelNvlink) + +NV_STATUS knvlinkCopyIoctrlDeviceInfo_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkCopyIoctrlDeviceInfo(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkCopyIoctrlDeviceInfo(pGpu, pKernelNvlink) knvlinkCopyIoctrlDeviceInfo_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkCopyIoctrlDeviceInfo_HAL(pGpu, pKernelNvlink) knvlinkCopyIoctrlDeviceInfo(pGpu, pKernelNvlink) + +NV_STATUS knvlinkSetupTopologyForForcedConfig_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkSetupTopologyForForcedConfig(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkSetupTopologyForForcedConfig(pGpu, pKernelNvlink) knvlinkSetupTopologyForForcedConfig_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkSetupTopologyForForcedConfig_HAL(pGpu, pKernelNvlink) knvlinkSetupTopologyForForcedConfig(pGpu, pKernelNvlink) + +NV_STATUS knvlinkSyncLaneShutdownProps_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkSyncLaneShutdownProps(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkSyncLaneShutdownProps(pGpu, pKernelNvlink) knvlinkSyncLaneShutdownProps_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkSyncLaneShutdownProps_HAL(pGpu, pKernelNvlink) knvlinkSyncLaneShutdownProps(pGpu, pKernelNvlink) + +void knvlinkSetPowerFeatures_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline void knvlinkSetPowerFeatures(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkSetPowerFeatures(pGpu, pKernelNvlink) knvlinkSetPowerFeatures_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkSetPowerFeatures_HAL(pGpu, pKernelNvlink) knvlinkSetPowerFeatures(pGpu, pKernelNvlink) + +NV_STATUS knvlinkExecGspRmRpc_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, void *arg1, NvU32 arg2); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkExecGspRmRpc(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, void *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkExecGspRmRpc(pGpu, pKernelNvlink, arg0, arg1, arg2) knvlinkExecGspRmRpc_IMPL(pGpu, pKernelNvlink, arg0, arg1, arg2) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkExecGspRmRpc_HAL(pGpu, pKernelNvlink, arg0, arg1, arg2) knvlinkExecGspRmRpc(pGpu, pKernelNvlink, arg0, arg1, arg2) + +NvBool knvlinkIsNvswitchProxyPresent_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvBool knvlinkIsNvswitchProxyPresent(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkIsNvswitchProxyPresent(pGpu, pKernelNvlink) knvlinkIsNvswitchProxyPresent_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkIsNvswitchProxyPresent_HAL(pGpu, pKernelNvlink) knvlinkIsNvswitchProxyPresent(pGpu, pKernelNvlink) + +void knvlinkDetectNvswitchProxy_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline void knvlinkDetectNvswitchProxy(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkDetectNvswitchProxy(pGpu, pKernelNvlink) knvlinkDetectNvswitchProxy_IMPL(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkDetectNvswitchProxy_HAL(pGpu, pKernelNvlink) knvlinkDetectNvswitchProxy(pGpu, pKernelNvlink) + +NV_STATUS knvlinkSetUniqueFlaBaseAddress_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkSetUniqueFlaBaseAddress(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkSetUniqueFlaBaseAddress(pGpu, pKernelNvlink, arg0) knvlinkSetUniqueFlaBaseAddress_IMPL(pGpu, pKernelNvlink, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkSetUniqueFlaBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkSetUniqueFlaBaseAddress(pGpu, pKernelNvlink, arg0) + +static inline NvU64 knvlinkGetUniqueFabricBaseAddress_72249a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return pKernelNvlink->fabricBaseAddr; +} + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NvU64 knvlinkGetUniqueFabricBaseAddress(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return 0; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink) knvlinkGetUniqueFabricBaseAddress_72249a(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkGetUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink) knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink) + +NV_STATUS knvlinkSetUniqueFabricBaseAddress_IMPL(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkSetUniqueFabricBaseAddress(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkSetUniqueFabricBaseAddress(pGpu, pKernelNvlink, arg0) knvlinkSetUniqueFabricBaseAddress_IMPL(pGpu, pKernelNvlink, arg0) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkSetUniqueFabricBaseAddress_HAL(pGpu, pKernelNvlink, arg0) knvlinkSetUniqueFabricBaseAddress(pGpu, pKernelNvlink, arg0) + +NV_STATUS knvlinkStatePostLoadHal_GV100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkStatePostLoadHal(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkStatePostLoadHal(pGpu, pKernelNvlink) knvlinkStatePostLoadHal_GV100(pGpu, pKernelNvlink) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkStatePostLoadHal_HAL(pGpu, pKernelNvlink) knvlinkStatePostLoadHal(pGpu, pKernelNvlink) + +NV_STATUS knvlinkApplyNvswitchDegradedModeSettings_GV100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 *switchLinkMasks); + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkApplyNvswitchDegradedModeSettings(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 *switchLinkMasks) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkApplyNvswitchDegradedModeSettings(pGpu, pKernelNvlink, switchLinkMasks) knvlinkApplyNvswitchDegradedModeSettings_GV100(pGpu, pKernelNvlink, switchLinkMasks) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkApplyNvswitchDegradedModeSettings_HAL(pGpu, pKernelNvlink, switchLinkMasks) knvlinkApplyNvswitchDegradedModeSettings(pGpu, pKernelNvlink, switchLinkMasks) + +static inline NV_STATUS knvlinkDiscoverPostRxDetLinks_46f6a7(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pPeerGpu) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkDiscoverPostRxDetLinks(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pPeerGpu) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkDiscoverPostRxDetLinks(pGpu, pKernelNvlink, pPeerGpu) knvlinkDiscoverPostRxDetLinks_46f6a7(pGpu, pKernelNvlink, pPeerGpu) +#endif //__nvoc_kernel_nvlink_h_disabled + +#define knvlinkDiscoverPostRxDetLinks_HAL(pGpu, pKernelNvlink, pPeerGpu) knvlinkDiscoverPostRxDetLinks(pGpu, pKernelNvlink, pPeerGpu) + +NV_STATUS knvlinkConstructEngine_IMPL(OBJGPU *arg0, struct KernelNvlink *arg1, ENGDESCRIPTOR arg2); + +static inline NV_STATUS knvlinkConstructEngine_DISPATCH(OBJGPU *arg0, struct KernelNvlink *arg1, ENGDESCRIPTOR arg2) { + return arg1->__knvlinkConstructEngine__(arg0, arg1, arg2); +} + +NV_STATUS knvlinkStatePreInitLocked_IMPL(OBJGPU *arg0, struct KernelNvlink *arg1); + +static inline NV_STATUS knvlinkStatePreInitLocked_DISPATCH(OBJGPU *arg0, struct KernelNvlink *arg1) { + return arg1->__knvlinkStatePreInitLocked__(arg0, arg1); +} + +NV_STATUS knvlinkStateLoad_IMPL(OBJGPU *arg0, struct KernelNvlink *arg1, NvU32 arg2); + +static inline NV_STATUS knvlinkStateLoad_DISPATCH(OBJGPU *arg0, struct KernelNvlink *arg1, NvU32 arg2) { + return arg1->__knvlinkStateLoad__(arg0, arg1, arg2); +} + +NV_STATUS knvlinkStatePostLoad_IMPL(OBJGPU *arg0, struct KernelNvlink *arg1, NvU32 arg2); + +static inline NV_STATUS knvlinkStatePostLoad_DISPATCH(OBJGPU *arg0, struct KernelNvlink *arg1, NvU32 arg2) { + return arg1->__knvlinkStatePostLoad__(arg0, arg1, arg2); +} + +NV_STATUS knvlinkStateUnload_IMPL(OBJGPU *arg0, struct KernelNvlink *arg1, NvU32 arg2); + +static inline NV_STATUS knvlinkStateUnload_DISPATCH(OBJGPU *arg0, struct KernelNvlink *arg1, NvU32 arg2) { + return arg1->__knvlinkStateUnload__(arg0, arg1, arg2); +} + +NV_STATUS knvlinkStatePostUnload_IMPL(OBJGPU *arg0, struct KernelNvlink *arg1, NvU32 arg2); + +static inline NV_STATUS knvlinkStatePostUnload_DISPATCH(OBJGPU *arg0, struct KernelNvlink *arg1, NvU32 arg2) { + return arg1->__knvlinkStatePostUnload__(arg0, arg1, arg2); +} + +NvBool knvlinkIsPresent_IMPL(OBJGPU *arg0, struct KernelNvlink *arg1); + +static inline NvBool knvlinkIsPresent_DISPATCH(OBJGPU *arg0, struct KernelNvlink *arg1) { + return arg1->__knvlinkIsPresent__(arg0, arg1); +} + +static inline NV_STATUS knvlinkValidateFabricBaseAddress_56cd7a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0) { + return NV_OK; +} + +NV_STATUS knvlinkValidateFabricBaseAddress_GA100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0); + +static inline NV_STATUS knvlinkValidateFabricBaseAddress_46f6a7(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS knvlinkValidateFabricBaseAddress_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU64 arg0) { + return pKernelNvlink->__knvlinkValidateFabricBaseAddress__(pGpu, pKernelNvlink, arg0); +} + +static inline NvU32 knvlinkGetConnectedLinksMask_15a734(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return 0U; +} + +NvU32 knvlinkGetConnectedLinksMask_TU102(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +static inline NvU32 knvlinkGetConnectedLinksMask_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return pKernelNvlink->__knvlinkGetConnectedLinksMask__(pGpu, pKernelNvlink); +} + +static inline NV_STATUS knvlinkEnableLinksPostTopology_56cd7a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) { + return NV_OK; +} + +NV_STATUS knvlinkEnableLinksPostTopology_GV100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0); + +static inline NV_STATUS knvlinkEnableLinksPostTopology_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) { + return pKernelNvlink->__knvlinkEnableLinksPostTopology__(pGpu, pKernelNvlink, arg0); +} + +static inline NV_STATUS knvlinkOverrideConfig_56cd7a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) { + return NV_OK; +} + +NV_STATUS knvlinkOverrideConfig_GV100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0); + +NV_STATUS knvlinkOverrideConfig_GA100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0); + +static inline NV_STATUS knvlinkOverrideConfig_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0) { + return pKernelNvlink->__knvlinkOverrideConfig__(pGpu, pKernelNvlink, arg0); +} + +static inline NV_STATUS knvlinkFilterBridgeLinks_56cd7a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return NV_OK; +} + +NV_STATUS knvlinkFilterBridgeLinks_TU102(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +static inline NV_STATUS knvlinkFilterBridgeLinks_46f6a7(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS knvlinkFilterBridgeLinks_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return pKernelNvlink->__knvlinkFilterBridgeLinks__(pGpu, pKernelNvlink); +} + +static inline NvU32 knvlinkGetUniquePeerIdMask_4a4dee(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return 0; +} + +NvU32 knvlinkGetUniquePeerIdMask_GP100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +static inline NvU32 knvlinkGetUniquePeerIdMask_15a734(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return 0U; +} + +static inline NvU32 knvlinkGetUniquePeerIdMask_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return pKernelNvlink->__knvlinkGetUniquePeerIdMask__(pGpu, pKernelNvlink); +} + +static inline NvU32 knvlinkGetUniquePeerId_c732fb(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pRemoteGpu) { + return 4294967295U; +} + +NvU32 knvlinkGetUniquePeerId_GP100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pRemoteGpu); + +static inline NvU32 knvlinkGetUniquePeerId_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pRemoteGpu) { + return pKernelNvlink->__knvlinkGetUniquePeerId__(pGpu, pKernelNvlink, pRemoteGpu); +} + +static inline NV_STATUS knvlinkRemoveMapping_56cd7a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvBool bAllMapping, NvU32 peerMask, NvBool bL2Entry) { + return NV_OK; +} + +NV_STATUS knvlinkRemoveMapping_GP100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvBool bAllMapping, NvU32 peerMask, NvBool bL2Entry); + +NV_STATUS knvlinkRemoveMapping_GA100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvBool bAllMapping, NvU32 peerMask, NvBool bL2Entry); + +static inline NV_STATUS knvlinkRemoveMapping_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvBool bAllMapping, NvU32 peerMask, NvBool bL2Entry) { + return pKernelNvlink->__knvlinkRemoveMapping__(pGpu, pKernelNvlink, bAllMapping, peerMask, bL2Entry); +} + +static inline NV_STATUS knvlinkGetP2POptimalCEs_56cd7a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3, NvU32 *arg4) { + return NV_OK; +} + +NV_STATUS knvlinkGetP2POptimalCEs_GP100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3, NvU32 *arg4); + +static inline NV_STATUS knvlinkGetP2POptimalCEs_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, NvU32 *arg3, NvU32 *arg4) { + return pKernelNvlink->__knvlinkGetP2POptimalCEs__(pGpu, pKernelNvlink, arg0, arg1, arg2, arg3, arg4); +} + +static inline NV_STATUS knvlinkConstructHal_56cd7a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return NV_OK; +} + +NV_STATUS knvlinkConstructHal_GV100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +static inline NV_STATUS knvlinkConstructHal_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return pKernelNvlink->__knvlinkConstructHal__(pGpu, pKernelNvlink); +} + +static inline void knvlinkSetupPeerMapping_b3696a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pRemoteGpu, NvU32 peerId) { + return; +} + +void knvlinkSetupPeerMapping_GP100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pRemoteGpu, NvU32 peerId); + +static inline void knvlinkSetupPeerMapping_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, OBJGPU *pRemoteGpu, NvU32 peerId) { + pKernelNvlink->__knvlinkSetupPeerMapping__(pGpu, pKernelNvlink, pRemoteGpu, peerId); +} + +static inline NV_STATUS knvlinkProgramLinkSpeed_56cd7a(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return NV_OK; +} + +NV_STATUS knvlinkProgramLinkSpeed_GV100(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +static inline NV_STATUS knvlinkProgramLinkSpeed_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return pKernelNvlink->__knvlinkProgramLinkSpeed__(pGpu, pKernelNvlink); +} + +static inline NvBool knvlinkPoweredUpForD3_491d52(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return ((NvBool)(0 != 0)); +} + +NvBool knvlinkPoweredUpForD3_TU102(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink); + +static inline NvBool knvlinkPoweredUpForD3_DISPATCH(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink) { + return pKernelNvlink->__knvlinkPoweredUpForD3__(pGpu, pKernelNvlink); +} + +static inline NV_STATUS knvlinkReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunableState) { + return pEngstate->__knvlinkReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS knvlinkStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate) { + return pEngstate->__knvlinkStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS knvlinkStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate, NvU32 arg0) { + return pEngstate->__knvlinkStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline void knvlinkStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate) { + pEngstate->__knvlinkStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS knvlinkStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate, NvU32 arg0) { + return pEngstate->__knvlinkStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS knvlinkStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate) { + return pEngstate->__knvlinkStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void knvlinkInitMissing_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate) { + pEngstate->__knvlinkInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS knvlinkStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate) { + return pEngstate->__knvlinkStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS knvlinkGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunableState) { + return pEngstate->__knvlinkGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS knvlinkCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__knvlinkCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void knvlinkFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunableState) { + pEngstate->__knvlinkFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS knvlinkAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate, void **ppTunableState) { + return pEngstate->__knvlinkAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS knvlinkSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelNvlink *pEngstate, void *pTunableState) { + return pEngstate->__knvlinkSetTunableState__(pGpu, pEngstate, pTunableState); +} + +void knvlinkDestruct_IMPL(struct KernelNvlink *arg0); +#define __nvoc_knvlinkDestruct(arg0) knvlinkDestruct_IMPL(arg0) +NV_STATUS knvlinkRemoveMissingIoctrlObjects_IMPL(OBJGPU *arg0, struct KernelNvlink *arg1); +#ifdef __nvoc_kernel_nvlink_h_disabled +static inline NV_STATUS knvlinkRemoveMissingIoctrlObjects(OBJGPU *arg0, struct KernelNvlink *arg1) { + NV_ASSERT_FAILED_PRECOMP("KernelNvlink was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_nvlink_h_disabled +#define knvlinkRemoveMissingIoctrlObjects(arg0, arg1) knvlinkRemoveMissingIoctrlObjects_IMPL(arg0, arg1) +#endif //__nvoc_kernel_nvlink_h_disabled + +#undef PRIVATE_FIELD + + + +// IOCTRL GET defines +#define KNVLINK_GET_IOCTRL(pKernelNvlink, PublicId) (pKernelNvlink->pKernelIoctrl[PublicId]) +#define KNVLINK_LINK_GET_IOCTRL(pKernelNvlink, linkId) \ + (KNVLINK_GET_IOCTRL(pKernelNvlink, pKernelNvlink->nvlinkLinks[linkId].ioctrlId)) + +#define KNVLINK_IS_LINK_CONNECTED_TO_GPU(pKernelNvlink, linkId, pRemoteGpu) \ + ((pKernelNvlink != NULL) && (pRemoteGpu != NULL) && \ + (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected) && \ + (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.domain == gpuGetDomain(pRemoteGpu)) && \ + (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bus == gpuGetBus(pRemoteGpu)) && \ + (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.device == gpuGetDevice(pRemoteGpu)) && \ + (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.function == 0)) + +// +// NVLINK internal functions +// +NV_STATUS knvlinkRetrainLinkFromOff (OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 linkId); +NV_STATUS knvlinkRetrainLinkFromSafe(OBJGPU *pGpu, struct KernelNvlink *pKernelNvlink, NvU32 linkId); + +// +// NVLINK Callback functions from core library +// +#if defined(INCLUDE_NVLINK_LIB) + +// Device callback functions + +NvlStatus knvlinkCoreAddDeviceCallback (nvlink_device *dev); +NvlStatus knvlinkCoreRemoveDeviceCallback(nvlink_device *dev); + +// Link callback functions + +NvlStatus knvlinkCoreAddLinkCallback (nvlink_link *link); +NvlStatus knvlinkCoreRemoveLinkCallback (nvlink_link *link); +NvlStatus knvlinkCoreLockLinkCallback (nvlink_link *link); +void knvlinkCoreUnlockLinkCallback (nvlink_link *link); +NvlStatus knvlinkCoreQueueLinkChangeCallback (nvlink_link_change *link_change); +NvlStatus knvlinkCoreSetDlLinkModeCallback (nvlink_link *link, NvU64 mode, NvU32 flags); +NvlStatus knvlinkCoreGetDlLinkModeCallback (nvlink_link *link, NvU64 *mode); +NvlStatus knvlinkCoreSetTlLinkModeCallback (nvlink_link *link, NvU64 mode, NvU32 flags); +NvlStatus knvlinkCoreGetTlLinkModeCallback (nvlink_link *link, NvU64 *mode); +NvlStatus knvlinkCoreWriteDiscoveryTokenCallback (nvlink_link *link, NvU64 token); +NvlStatus knvlinkCoreReadDiscoveryTokenCallback (nvlink_link *link, NvU64 *token); +NvlStatus knvlinkCoreSetTxSublinkModeCallback (nvlink_link *link, NvU64 mode, NvU32 flags); +NvlStatus knvlinkCoreSetRxSublinkModeCallback (nvlink_link *link, NvU64 mode, NvU32 flags); +NvlStatus knvlinkCoreGetTxSublinkModeCallback (nvlink_link *link, NvU64 *mode, NvU32 *subMode); +NvlStatus knvlinkCoreGetRxSublinkModeCallback (nvlink_link *link, NvU64 *mode, NvU32 *subMode); +NvlStatus knvlinkCoreSetRxSublinkDetectCallback (nvlink_link *link, NvU32 flags); +NvlStatus knvlinkCoreGetRxSublinkDetectCallback (nvlink_link *link); +void knvlinkCoreTrainingCompleteCallback (nvlink_link *link); +void knvlinkCoreGetUphyLoadCallback (nvlink_link *link, NvBool *bUnlocked); + +#endif + +// NVLINK Utility Functions +void knvlinkUtoa(NvU8 *, NvU64, NvU64); + +NV_STATUS knvlinkCtrlCmdBusGetNvlinkCaps(OBJGPU *pGpu, NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS *pParams); + +#endif // _KERNEL_NVLINK_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_NVLINK_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_ofa_ctx_nvoc.c b/src/nvidia/generated/g_kernel_ofa_ctx_nvoc.c new file mode 100644 index 000000000..ebb5f8561 --- /dev/null +++ b/src/nvidia/generated/g_kernel_ofa_ctx_nvoc.c @@ -0,0 +1,415 @@ +#define NVOC_KERNEL_OFA_CTX_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_ofa_ctx_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xf63d99 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OfaContext; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_OfaContext(OfaContext*, RmHalspecOwner* ); +void __nvoc_init_funcTable_OfaContext(OfaContext*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OfaContext(OfaContext*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_OfaContext(OfaContext*, RmHalspecOwner* ); +void __nvoc_dtor_OfaContext(OfaContext*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OfaContext; + +static const struct NVOC_RTTI __nvoc_rtti_OfaContext_OfaContext = { + /*pClassDef=*/ &__nvoc_class_def_OfaContext, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OfaContext, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OfaContext_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OfaContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OfaContext_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OfaContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OfaContext_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OfaContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OfaContext_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OfaContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OfaContext_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OfaContext, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OfaContext_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OfaContext, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OfaContext_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OfaContext, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OfaContext_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OfaContext, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OfaContext = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_OfaContext_OfaContext, + &__nvoc_rtti_OfaContext_ChannelDescendant, + &__nvoc_rtti_OfaContext_Notifier, + &__nvoc_rtti_OfaContext_INotifier, + &__nvoc_rtti_OfaContext_GpuResource, + &__nvoc_rtti_OfaContext_RmResource, + &__nvoc_rtti_OfaContext_RmResourceCommon, + &__nvoc_rtti_OfaContext_RsResource, + &__nvoc_rtti_OfaContext_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OfaContext = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OfaContext), + /*classId=*/ classId(OfaContext), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OfaContext", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OfaContext, + /*pCastInfo=*/ &__nvoc_castinfo_OfaContext, + /*pExportInfo=*/ &__nvoc_export_info_OfaContext +}; + +static NV_STATUS __nvoc_thunk_ChannelDescendant_ofactxCheckMemInterUnmap(struct OfaContext *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_OfaContext_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_ofactxShareCallback(struct OfaContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_OfaContext_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_ofactxAccessCallback(struct OfaContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_ofactxMapTo(struct OfaContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ofactxGetMapAddrSpace(struct OfaContext *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_OfaContext_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_ofactxSetNotificationShare(struct OfaContext *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_OfaContext_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_ofactxGetRefCount(struct OfaContext *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_ofactxAddAdditionalDependants(struct RsClient *pClient, struct OfaContext *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_ofactxControl_Prologue(struct OfaContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ofactxGetRegBaseOffsetAndSize(struct OfaContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_OfaContext_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ofactxInternalControlForward(struct OfaContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_OfaContext_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_ofactxUnmapFrom(struct OfaContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_ofactxControl_Epilogue(struct OfaContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_ofactxControlLookup(struct OfaContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_ofactxGetSwMethods(struct OfaContext *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return chandesGetSwMethods((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_OfaContext_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NvHandle __nvoc_thunk_GpuResource_ofactxGetInternalObjectHandle(struct OfaContext *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_OfaContext_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ofactxControl(struct OfaContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_OfaContext_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ofactxUnmap(struct OfaContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_OfaContext_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_ofactxGetMemInterMapParams(struct OfaContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_OfaContext_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_ofactxGetMemoryMappingDescriptor(struct OfaContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_OfaContext_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_ChannelDescendant_ofactxIsSwMethodStalling(struct OfaContext *pChannelDescendant, NvU32 hHandle) { + return chandesIsSwMethodStalling((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_OfaContext_ChannelDescendant.offset), hHandle); +} + +static NV_STATUS __nvoc_thunk_RsResource_ofactxControlFilter(struct OfaContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_ofactxUnregisterEvent(struct OfaContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_OfaContext_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_ofactxCanCopy(struct OfaContext *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_ofactxPreDestruct(struct OfaContext *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OfaContext_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_ofactxGetNotificationListPtr(struct OfaContext *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_OfaContext_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_ofactxGetNotificationShare(struct OfaContext *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_OfaContext_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ofactxMap(struct OfaContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_OfaContext_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_ofactxGetOrAllocNotifShare(struct OfaContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_OfaContext_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OfaContext = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_OfaContext(OfaContext *pThis) { + __nvoc_ofactxDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OfaContext(OfaContext *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, PARAM_TO_ENGDESC_FUNCTION *); +NV_STATUS __nvoc_ctor_OfaContext(OfaContext *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, ((void *)0)); + if (status != NV_OK) goto __nvoc_ctor_OfaContext_fail_ChannelDescendant; + __nvoc_init_dataField_OfaContext(pThis, pRmhalspecowner); + + status = __nvoc_ofactxConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_OfaContext_fail__init; + goto __nvoc_ctor_OfaContext_exit; // Success + +__nvoc_ctor_OfaContext_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_OfaContext_fail_ChannelDescendant: +__nvoc_ctor_OfaContext_exit: + + return status; +} + +static void __nvoc_init_funcTable_OfaContext_1(OfaContext *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__ofactxCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_ofactxCheckMemInterUnmap; + + pThis->__ofactxShareCallback__ = &__nvoc_thunk_GpuResource_ofactxShareCallback; + + pThis->__ofactxAccessCallback__ = &__nvoc_thunk_RmResource_ofactxAccessCallback; + + pThis->__ofactxMapTo__ = &__nvoc_thunk_RsResource_ofactxMapTo; + + pThis->__ofactxGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_ofactxGetMapAddrSpace; + + pThis->__ofactxSetNotificationShare__ = &__nvoc_thunk_Notifier_ofactxSetNotificationShare; + + pThis->__ofactxGetRefCount__ = &__nvoc_thunk_RsResource_ofactxGetRefCount; + + pThis->__ofactxAddAdditionalDependants__ = &__nvoc_thunk_RsResource_ofactxAddAdditionalDependants; + + pThis->__ofactxControl_Prologue__ = &__nvoc_thunk_RmResource_ofactxControl_Prologue; + + pThis->__ofactxGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_ofactxGetRegBaseOffsetAndSize; + + pThis->__ofactxInternalControlForward__ = &__nvoc_thunk_GpuResource_ofactxInternalControlForward; + + pThis->__ofactxUnmapFrom__ = &__nvoc_thunk_RsResource_ofactxUnmapFrom; + + pThis->__ofactxControl_Epilogue__ = &__nvoc_thunk_RmResource_ofactxControl_Epilogue; + + pThis->__ofactxControlLookup__ = &__nvoc_thunk_RsResource_ofactxControlLookup; + + pThis->__ofactxGetSwMethods__ = &__nvoc_thunk_ChannelDescendant_ofactxGetSwMethods; + + pThis->__ofactxGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_ofactxGetInternalObjectHandle; + + pThis->__ofactxControl__ = &__nvoc_thunk_GpuResource_ofactxControl; + + pThis->__ofactxUnmap__ = &__nvoc_thunk_GpuResource_ofactxUnmap; + + pThis->__ofactxGetMemInterMapParams__ = &__nvoc_thunk_RmResource_ofactxGetMemInterMapParams; + + pThis->__ofactxGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_ofactxGetMemoryMappingDescriptor; + + pThis->__ofactxIsSwMethodStalling__ = &__nvoc_thunk_ChannelDescendant_ofactxIsSwMethodStalling; + + pThis->__ofactxControlFilter__ = &__nvoc_thunk_RsResource_ofactxControlFilter; + + pThis->__ofactxUnregisterEvent__ = &__nvoc_thunk_Notifier_ofactxUnregisterEvent; + + pThis->__ofactxCanCopy__ = &__nvoc_thunk_RsResource_ofactxCanCopy; + + pThis->__ofactxPreDestruct__ = &__nvoc_thunk_RsResource_ofactxPreDestruct; + + pThis->__ofactxGetNotificationListPtr__ = &__nvoc_thunk_Notifier_ofactxGetNotificationListPtr; + + pThis->__ofactxGetNotificationShare__ = &__nvoc_thunk_Notifier_ofactxGetNotificationShare; + + pThis->__ofactxMap__ = &__nvoc_thunk_GpuResource_ofactxMap; + + pThis->__ofactxGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_ofactxGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_OfaContext(OfaContext *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_OfaContext_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_OfaContext(OfaContext *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_OfaContext = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_OfaContext(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_OfaContext(OfaContext **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + OfaContext *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(OfaContext)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OfaContext)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OfaContext); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_OfaContext(pThis, pRmhalspecowner); + status = __nvoc_ctor_OfaContext(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_OfaContext_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OfaContext_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OfaContext(OfaContext **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_OfaContext(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_ofa_ctx_nvoc.h b/src/nvidia/generated/g_kernel_ofa_ctx_nvoc.h new file mode 100644 index 000000000..63c9f61bf --- /dev/null +++ b/src/nvidia/generated/g_kernel_ofa_ctx_nvoc.h @@ -0,0 +1,304 @@ +#ifndef _G_KERNEL_OFA_CTX_NVOC_H_ +#define _G_KERNEL_OFA_CTX_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_ofa_ctx_nvoc.h" + +#ifndef KERNEL_OFA_CTX_H +#define KERNEL_OFA_CTX_H + +#include "kernel/gpu/fifo/channel_descendant.h" + +/*! + * RM internal class representing NVXXXX_VIDEO_OFA + */ +#ifdef NVOC_KERNEL_OFA_CTX_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OfaContext { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct OfaContext *__nvoc_pbase_OfaContext; + NV_STATUS (*__ofactxCheckMemInterUnmap__)(struct OfaContext *, NvBool); + NvBool (*__ofactxShareCallback__)(struct OfaContext *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__ofactxAccessCallback__)(struct OfaContext *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__ofactxMapTo__)(struct OfaContext *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__ofactxGetMapAddrSpace__)(struct OfaContext *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__ofactxSetNotificationShare__)(struct OfaContext *, struct NotifShare *); + NvU32 (*__ofactxGetRefCount__)(struct OfaContext *); + void (*__ofactxAddAdditionalDependants__)(struct RsClient *, struct OfaContext *, RsResourceRef *); + NV_STATUS (*__ofactxControl_Prologue__)(struct OfaContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__ofactxGetRegBaseOffsetAndSize__)(struct OfaContext *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__ofactxInternalControlForward__)(struct OfaContext *, NvU32, void *, NvU32); + NV_STATUS (*__ofactxUnmapFrom__)(struct OfaContext *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__ofactxControl_Epilogue__)(struct OfaContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__ofactxControlLookup__)(struct OfaContext *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__ofactxGetSwMethods__)(struct OfaContext *, METHOD **, NvU32 *); + NvHandle (*__ofactxGetInternalObjectHandle__)(struct OfaContext *); + NV_STATUS (*__ofactxControl__)(struct OfaContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__ofactxUnmap__)(struct OfaContext *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__ofactxGetMemInterMapParams__)(struct OfaContext *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__ofactxGetMemoryMappingDescriptor__)(struct OfaContext *, struct MEMORY_DESCRIPTOR **); + NvBool (*__ofactxIsSwMethodStalling__)(struct OfaContext *, NvU32); + NV_STATUS (*__ofactxControlFilter__)(struct OfaContext *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__ofactxUnregisterEvent__)(struct OfaContext *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__ofactxCanCopy__)(struct OfaContext *); + void (*__ofactxPreDestruct__)(struct OfaContext *); + PEVENTNOTIFICATION *(*__ofactxGetNotificationListPtr__)(struct OfaContext *); + struct NotifShare *(*__ofactxGetNotificationShare__)(struct OfaContext *); + NV_STATUS (*__ofactxMap__)(struct OfaContext *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__ofactxGetOrAllocNotifShare__)(struct OfaContext *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_OfaContext_TYPEDEF__ +#define __NVOC_CLASS_OfaContext_TYPEDEF__ +typedef struct OfaContext OfaContext; +#endif /* __NVOC_CLASS_OfaContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OfaContext +#define __nvoc_class_id_OfaContext 0xf63d99 +#endif /* __nvoc_class_id_OfaContext */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OfaContext; + +#define __staticCast_OfaContext(pThis) \ + ((pThis)->__nvoc_pbase_OfaContext) + +#ifdef __nvoc_kernel_ofa_ctx_h_disabled +#define __dynamicCast_OfaContext(pThis) ((OfaContext*)NULL) +#else //__nvoc_kernel_ofa_ctx_h_disabled +#define __dynamicCast_OfaContext(pThis) \ + ((OfaContext*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OfaContext))) +#endif //__nvoc_kernel_ofa_ctx_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OfaContext(OfaContext**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OfaContext(OfaContext**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_OfaContext(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_OfaContext((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define ofactxCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) ofactxCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define ofactxShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) ofactxShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define ofactxAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) ofactxAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define ofactxMapTo(pResource, pParams) ofactxMapTo_DISPATCH(pResource, pParams) +#define ofactxGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) ofactxGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define ofactxSetNotificationShare(pNotifier, pNotifShare) ofactxSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define ofactxGetRefCount(pResource) ofactxGetRefCount_DISPATCH(pResource) +#define ofactxAddAdditionalDependants(pClient, pResource, pReference) ofactxAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define ofactxControl_Prologue(pResource, pCallContext, pParams) ofactxControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define ofactxGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) ofactxGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define ofactxInternalControlForward(pGpuResource, command, pParams, size) ofactxInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define ofactxUnmapFrom(pResource, pParams) ofactxUnmapFrom_DISPATCH(pResource, pParams) +#define ofactxControl_Epilogue(pResource, pCallContext, pParams) ofactxControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define ofactxControlLookup(pResource, pParams, ppEntry) ofactxControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define ofactxGetSwMethods(pChannelDescendant, ppMethods, pNumMethods) ofactxGetSwMethods_DISPATCH(pChannelDescendant, ppMethods, pNumMethods) +#define ofactxGetInternalObjectHandle(pGpuResource) ofactxGetInternalObjectHandle_DISPATCH(pGpuResource) +#define ofactxControl(pGpuResource, pCallContext, pParams) ofactxControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define ofactxUnmap(pGpuResource, pCallContext, pCpuMapping) ofactxUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define ofactxGetMemInterMapParams(pRmResource, pParams) ofactxGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define ofactxGetMemoryMappingDescriptor(pRmResource, ppMemDesc) ofactxGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define ofactxIsSwMethodStalling(pChannelDescendant, hHandle) ofactxIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define ofactxControlFilter(pResource, pCallContext, pParams) ofactxControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define ofactxUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) ofactxUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define ofactxCanCopy(pResource) ofactxCanCopy_DISPATCH(pResource) +#define ofactxPreDestruct(pResource) ofactxPreDestruct_DISPATCH(pResource) +#define ofactxGetNotificationListPtr(pNotifier) ofactxGetNotificationListPtr_DISPATCH(pNotifier) +#define ofactxGetNotificationShare(pNotifier) ofactxGetNotificationShare_DISPATCH(pNotifier) +#define ofactxMap(pGpuResource, pCallContext, pParams, pCpuMapping) ofactxMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define ofactxGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) ofactxGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS ofactxConstructHal_KERNEL(struct OfaContext *pOfaContext, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_kernel_ofa_ctx_h_disabled +static inline NV_STATUS ofactxConstructHal(struct OfaContext *pOfaContext, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("OfaContext was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_ofa_ctx_h_disabled +#define ofactxConstructHal(pOfaContext, pCallContext, pParams) ofactxConstructHal_KERNEL(pOfaContext, pCallContext, pParams) +#endif //__nvoc_kernel_ofa_ctx_h_disabled + +#define ofactxConstructHal_HAL(pOfaContext, pCallContext, pParams) ofactxConstructHal(pOfaContext, pCallContext, pParams) + +void ofactxDestructHal_KERNEL(struct OfaContext *pOfaContext); + +#ifdef __nvoc_kernel_ofa_ctx_h_disabled +static inline void ofactxDestructHal(struct OfaContext *pOfaContext) { + NV_ASSERT_FAILED_PRECOMP("OfaContext was disabled!"); +} +#else //__nvoc_kernel_ofa_ctx_h_disabled +#define ofactxDestructHal(pOfaContext) ofactxDestructHal_KERNEL(pOfaContext) +#endif //__nvoc_kernel_ofa_ctx_h_disabled + +#define ofactxDestructHal_HAL(pOfaContext) ofactxDestructHal(pOfaContext) + +static inline NV_STATUS ofactxCheckMemInterUnmap_DISPATCH(struct OfaContext *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__ofactxCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool ofactxShareCallback_DISPATCH(struct OfaContext *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__ofactxShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool ofactxAccessCallback_DISPATCH(struct OfaContext *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__ofactxAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS ofactxMapTo_DISPATCH(struct OfaContext *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__ofactxMapTo__(pResource, pParams); +} + +static inline NV_STATUS ofactxGetMapAddrSpace_DISPATCH(struct OfaContext *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__ofactxGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void ofactxSetNotificationShare_DISPATCH(struct OfaContext *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__ofactxSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 ofactxGetRefCount_DISPATCH(struct OfaContext *pResource) { + return pResource->__ofactxGetRefCount__(pResource); +} + +static inline void ofactxAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct OfaContext *pResource, RsResourceRef *pReference) { + pResource->__ofactxAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS ofactxControl_Prologue_DISPATCH(struct OfaContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__ofactxControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ofactxGetRegBaseOffsetAndSize_DISPATCH(struct OfaContext *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__ofactxGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS ofactxInternalControlForward_DISPATCH(struct OfaContext *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__ofactxInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS ofactxUnmapFrom_DISPATCH(struct OfaContext *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__ofactxUnmapFrom__(pResource, pParams); +} + +static inline void ofactxControl_Epilogue_DISPATCH(struct OfaContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__ofactxControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ofactxControlLookup_DISPATCH(struct OfaContext *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__ofactxControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS ofactxGetSwMethods_DISPATCH(struct OfaContext *pChannelDescendant, METHOD **ppMethods, NvU32 *pNumMethods) { + return pChannelDescendant->__ofactxGetSwMethods__(pChannelDescendant, ppMethods, pNumMethods); +} + +static inline NvHandle ofactxGetInternalObjectHandle_DISPATCH(struct OfaContext *pGpuResource) { + return pGpuResource->__ofactxGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS ofactxControl_DISPATCH(struct OfaContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__ofactxControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS ofactxUnmap_DISPATCH(struct OfaContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__ofactxUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS ofactxGetMemInterMapParams_DISPATCH(struct OfaContext *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__ofactxGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS ofactxGetMemoryMappingDescriptor_DISPATCH(struct OfaContext *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__ofactxGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvBool ofactxIsSwMethodStalling_DISPATCH(struct OfaContext *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__ofactxIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +static inline NV_STATUS ofactxControlFilter_DISPATCH(struct OfaContext *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__ofactxControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ofactxUnregisterEvent_DISPATCH(struct OfaContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__ofactxUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool ofactxCanCopy_DISPATCH(struct OfaContext *pResource) { + return pResource->__ofactxCanCopy__(pResource); +} + +static inline void ofactxPreDestruct_DISPATCH(struct OfaContext *pResource) { + pResource->__ofactxPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *ofactxGetNotificationListPtr_DISPATCH(struct OfaContext *pNotifier) { + return pNotifier->__ofactxGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *ofactxGetNotificationShare_DISPATCH(struct OfaContext *pNotifier) { + return pNotifier->__ofactxGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS ofactxMap_DISPATCH(struct OfaContext *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__ofactxMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS ofactxGetOrAllocNotifShare_DISPATCH(struct OfaContext *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__ofactxGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS __nvoc_ofactxConstruct(struct OfaContext *arg_pOfaContext, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams) { + return ofactxConstructHal(arg_pOfaContext, arg_pCallContext, arg_pParams); +} + +static inline void __nvoc_ofactxDestruct(struct OfaContext *pOfaContext) { + ofactxDestructHal(pOfaContext); +} + +#undef PRIVATE_FIELD + + +#endif // KERNEL_OFA_CTX_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_OFA_CTX_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_rc_nvoc.c b/src/nvidia/generated/g_kernel_rc_nvoc.c new file mode 100644 index 000000000..0baff802e --- /dev/null +++ b/src/nvidia/generated/g_kernel_rc_nvoc.c @@ -0,0 +1,303 @@ +#define NVOC_KERNEL_RC_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_rc_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4888db = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelRc; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_KernelRc(KernelRc*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelRc(KernelRc*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelRc(KernelRc*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelRc(KernelRc*, RmHalspecOwner* ); +void __nvoc_dtor_KernelRc(KernelRc*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelRc; + +static const struct NVOC_RTTI __nvoc_rtti_KernelRc_KernelRc = { + /*pClassDef=*/ &__nvoc_class_def_KernelRc, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelRc, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelRc_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelRc, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelRc_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelRc, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelRc = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_KernelRc_KernelRc, + &__nvoc_rtti_KernelRc_OBJENGSTATE, + &__nvoc_rtti_KernelRc_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelRc = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelRc), + /*classId=*/ classId(KernelRc), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelRc", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelRc, + /*pCastInfo=*/ &__nvoc_castinfo_KernelRc, + /*pExportInfo=*/ &__nvoc_export_info_KernelRc +}; + +static NV_STATUS __nvoc_thunk_KernelRc_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelRc, ENGDESCRIPTOR engDescriptor) { + return krcConstructEngine(pGpu, (struct KernelRc *)(((unsigned char *)pKernelRc) - __nvoc_rtti_KernelRc_OBJENGSTATE.offset), engDescriptor); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcReconcileTunableState(POBJGPU pGpu, struct KernelRc *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcStateLoad(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcStateUnload(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcStateInitLocked(POBJGPU pGpu, struct KernelRc *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcStatePreLoad(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcStatePostUnload(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_krcStateDestroy(POBJGPU pGpu, struct KernelRc *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcStatePreUnload(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcStateInitUnlocked(POBJGPU pGpu, struct KernelRc *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_krcInitMissing(POBJGPU pGpu, struct KernelRc *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcStatePreInitLocked(POBJGPU pGpu, struct KernelRc *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcStatePreInitUnlocked(POBJGPU pGpu, struct KernelRc *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcGetTunableState(POBJGPU pGpu, struct KernelRc *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcCompareTunableState(POBJGPU pGpu, struct KernelRc *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_krcFreeTunableState(POBJGPU pGpu, struct KernelRc *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcStatePostLoad(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcAllocTunableState(POBJGPU pGpu, struct KernelRc *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_krcSetTunableState(POBJGPU pGpu, struct KernelRc *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_krcIsPresent(POBJGPU pGpu, struct KernelRc *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelRc_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelRc = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelRc(KernelRc *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelRc(KernelRc *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_KernelRc(KernelRc *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_KernelRc_fail_OBJENGSTATE; + __nvoc_init_dataField_KernelRc(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelRc_exit; // Success + +__nvoc_ctor_KernelRc_fail_OBJENGSTATE: +__nvoc_ctor_KernelRc_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelRc_1(KernelRc *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__krcConstructEngine__ = &krcConstructEngine_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelRc_engstateConstructEngine; + + pThis->__krcReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_krcReconcileTunableState; + + pThis->__krcStateLoad__ = &__nvoc_thunk_OBJENGSTATE_krcStateLoad; + + pThis->__krcStateUnload__ = &__nvoc_thunk_OBJENGSTATE_krcStateUnload; + + pThis->__krcStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_krcStateInitLocked; + + pThis->__krcStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_krcStatePreLoad; + + pThis->__krcStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_krcStatePostUnload; + + pThis->__krcStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_krcStateDestroy; + + pThis->__krcStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_krcStatePreUnload; + + pThis->__krcStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_krcStateInitUnlocked; + + pThis->__krcInitMissing__ = &__nvoc_thunk_OBJENGSTATE_krcInitMissing; + + pThis->__krcStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_krcStatePreInitLocked; + + pThis->__krcStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_krcStatePreInitUnlocked; + + pThis->__krcGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_krcGetTunableState; + + pThis->__krcCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_krcCompareTunableState; + + pThis->__krcFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_krcFreeTunableState; + + pThis->__krcStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_krcStatePostLoad; + + pThis->__krcAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_krcAllocTunableState; + + pThis->__krcSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_krcSetTunableState; + + pThis->__krcIsPresent__ = &__nvoc_thunk_OBJENGSTATE_krcIsPresent; +} + +void __nvoc_init_funcTable_KernelRc(KernelRc *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelRc_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_KernelRc(KernelRc *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelRc = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_KernelRc(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelRc(KernelRc **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelRc *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelRc)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelRc)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelRc); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelRc(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelRc(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelRc_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelRc_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelRc(KernelRc **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelRc(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_rc_nvoc.h b/src/nvidia/generated/g_kernel_rc_nvoc.h new file mode 100644 index 000000000..a93ca7d15 --- /dev/null +++ b/src/nvidia/generated/g_kernel_rc_nvoc.h @@ -0,0 +1,575 @@ +#ifndef _G_KERNEL_RC_NVOC_H_ +#define _G_KERNEL_RC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_rc_nvoc.h" + +#ifndef KERNEL_RC_H +#define KERNEL_RC_H 1 + +#include "kernel/gpu/eng_desc.h" +#include "kernel/gpu/eng_state.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/gpu.h" +#include "kernel/gpu/mmu/kern_gmmu.h" +#include "kernel/gpu/rc/kernel_rc_watchdog.h" +#include "kernel/gpu/rc/kernel_rc_watchdog_private.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "kernel/rmapi/client_resource.h" + + +typedef enum { + RC_NOTIFIER_SCOPE_CHANNEL = 0, + RC_NOTIFIER_SCOPE_TSG, +} RC_NOTIFIER_SCOPE; + + +/*! + * Kernel interface for RC (Robust Channels) and Watchdog + */ +#ifdef NVOC_KERNEL_RC_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelRc { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelRc *__nvoc_pbase_KernelRc; + NV_STATUS (*__krcConstructEngine__)(struct OBJGPU *, struct KernelRc *, ENGDESCRIPTOR); + NV_STATUS (*__krcReconcileTunableState__)(POBJGPU, struct KernelRc *, void *); + NV_STATUS (*__krcStateLoad__)(POBJGPU, struct KernelRc *, NvU32); + NV_STATUS (*__krcStateUnload__)(POBJGPU, struct KernelRc *, NvU32); + NV_STATUS (*__krcStateInitLocked__)(POBJGPU, struct KernelRc *); + NV_STATUS (*__krcStatePreLoad__)(POBJGPU, struct KernelRc *, NvU32); + NV_STATUS (*__krcStatePostUnload__)(POBJGPU, struct KernelRc *, NvU32); + void (*__krcStateDestroy__)(POBJGPU, struct KernelRc *); + NV_STATUS (*__krcStatePreUnload__)(POBJGPU, struct KernelRc *, NvU32); + NV_STATUS (*__krcStateInitUnlocked__)(POBJGPU, struct KernelRc *); + void (*__krcInitMissing__)(POBJGPU, struct KernelRc *); + NV_STATUS (*__krcStatePreInitLocked__)(POBJGPU, struct KernelRc *); + NV_STATUS (*__krcStatePreInitUnlocked__)(POBJGPU, struct KernelRc *); + NV_STATUS (*__krcGetTunableState__)(POBJGPU, struct KernelRc *, void *); + NV_STATUS (*__krcCompareTunableState__)(POBJGPU, struct KernelRc *, void *, void *); + void (*__krcFreeTunableState__)(POBJGPU, struct KernelRc *, void *); + NV_STATUS (*__krcStatePostLoad__)(POBJGPU, struct KernelRc *, NvU32); + NV_STATUS (*__krcAllocTunableState__)(POBJGPU, struct KernelRc *, void **); + NV_STATUS (*__krcSetTunableState__)(POBJGPU, struct KernelRc *, void *); + NvBool (*__krcIsPresent__)(POBJGPU, struct KernelRc *); + NvBool bRobustChannelsEnabled; + NvBool bBreakOnRc; + NvBool bLogEvents; + NvBool bGpuUuidLoggedOnce; + KernelWatchdog watchdog; + KernelWatchdogPersistent watchdogPersistent; + KernelWatchdogChannelInfo watchdogChannelInfo; +}; + +#ifndef __NVOC_CLASS_KernelRc_TYPEDEF__ +#define __NVOC_CLASS_KernelRc_TYPEDEF__ +typedef struct KernelRc KernelRc; +#endif /* __NVOC_CLASS_KernelRc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelRc +#define __nvoc_class_id_KernelRc 0x4888db +#endif /* __nvoc_class_id_KernelRc */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelRc; + +#define __staticCast_KernelRc(pThis) \ + ((pThis)->__nvoc_pbase_KernelRc) + +#ifdef __nvoc_kernel_rc_h_disabled +#define __dynamicCast_KernelRc(pThis) ((KernelRc*)NULL) +#else //__nvoc_kernel_rc_h_disabled +#define __dynamicCast_KernelRc(pThis) \ + ((KernelRc*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelRc))) +#endif //__nvoc_kernel_rc_h_disabled + +#define PDB_PROP_KRC_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KRC_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelRc(KernelRc**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelRc(KernelRc**, Dynamic*, NvU32); +#define __objCreate_KernelRc(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelRc((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define krcConstructEngine(pGpu, pKernelRc, engDescriptor) krcConstructEngine_DISPATCH(pGpu, pKernelRc, engDescriptor) +#define krcReconcileTunableState(pGpu, pEngstate, pTunableState) krcReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define krcStateLoad(pGpu, pEngstate, arg0) krcStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define krcStateUnload(pGpu, pEngstate, arg0) krcStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define krcStateInitLocked(pGpu, pEngstate) krcStateInitLocked_DISPATCH(pGpu, pEngstate) +#define krcStatePreLoad(pGpu, pEngstate, arg0) krcStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define krcStatePostUnload(pGpu, pEngstate, arg0) krcStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define krcStateDestroy(pGpu, pEngstate) krcStateDestroy_DISPATCH(pGpu, pEngstate) +#define krcStatePreUnload(pGpu, pEngstate, arg0) krcStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define krcStateInitUnlocked(pGpu, pEngstate) krcStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define krcInitMissing(pGpu, pEngstate) krcInitMissing_DISPATCH(pGpu, pEngstate) +#define krcStatePreInitLocked(pGpu, pEngstate) krcStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define krcStatePreInitUnlocked(pGpu, pEngstate) krcStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define krcGetTunableState(pGpu, pEngstate, pTunableState) krcGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define krcCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) krcCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define krcFreeTunableState(pGpu, pEngstate, pTunableState) krcFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define krcStatePostLoad(pGpu, pEngstate, arg0) krcStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define krcAllocTunableState(pGpu, pEngstate, ppTunableState) krcAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define krcSetTunableState(pGpu, pEngstate, pTunableState) krcSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define krcIsPresent(pGpu, pEngstate) krcIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS krcErrorWriteNotifier_CPU(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, NvU32 exceptType, NvU32 localEngineType, NV_STATUS notifierStatus, NvU32 *pFlushFlags); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcErrorWriteNotifier(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, NvU32 exceptType, NvU32 localEngineType, NV_STATUS notifierStatus, NvU32 *pFlushFlags) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcErrorWriteNotifier(pGpu, pKernelRc, pKernelChannel, exceptType, localEngineType, notifierStatus, pFlushFlags) krcErrorWriteNotifier_CPU(pGpu, pKernelRc, pKernelChannel, exceptType, localEngineType, notifierStatus, pFlushFlags) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcErrorWriteNotifier_HAL(pGpu, pKernelRc, pKernelChannel, exceptType, localEngineType, notifierStatus, pFlushFlags) krcErrorWriteNotifier(pGpu, pKernelRc, pKernelChannel, exceptType, localEngineType, notifierStatus, pFlushFlags) + +NV_STATUS krcErrorSendEventNotifications_KERNEL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, NvU32 engineId, NvU32 exceptType, RC_NOTIFIER_SCOPE scope, NvU16 partitionAttributionId); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcErrorSendEventNotifications(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, NvU32 engineId, NvU32 exceptType, RC_NOTIFIER_SCOPE scope, NvU16 partitionAttributionId) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcErrorSendEventNotifications(pGpu, pKernelRc, pKernelChannel, engineId, exceptType, scope, partitionAttributionId) krcErrorSendEventNotifications_KERNEL(pGpu, pKernelRc, pKernelChannel, engineId, exceptType, scope, partitionAttributionId) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcErrorSendEventNotifications_HAL(pGpu, pKernelRc, pKernelChannel, engineId, exceptType, scope, partitionAttributionId) krcErrorSendEventNotifications(pGpu, pKernelRc, pKernelChannel, engineId, exceptType, scope, partitionAttributionId) + +NV_STATUS krcErrorSendEventNotificationsCtxDma_FWCLIENT(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, RC_NOTIFIER_SCOPE scope); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcErrorSendEventNotificationsCtxDma(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, RC_NOTIFIER_SCOPE scope) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcErrorSendEventNotificationsCtxDma(pGpu, pKernelRc, pKernelChannel, scope) krcErrorSendEventNotificationsCtxDma_FWCLIENT(pGpu, pKernelRc, pKernelChannel, scope) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcErrorSendEventNotificationsCtxDma_HAL(pGpu, pKernelRc, pKernelChannel, scope) krcErrorSendEventNotificationsCtxDma(pGpu, pKernelRc, pKernelChannel, scope) + +void krcGetMigAttributionForError_KERNEL(struct KernelRc *pKernelRc, NvU32 exceptType, NvU16 *pGpuPartitionId, NvU16 *pComputeInstanceId); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcGetMigAttributionForError(struct KernelRc *pKernelRc, NvU32 exceptType, NvU16 *pGpuPartitionId, NvU16 *pComputeInstanceId) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcGetMigAttributionForError(pKernelRc, exceptType, pGpuPartitionId, pComputeInstanceId) krcGetMigAttributionForError_KERNEL(pKernelRc, exceptType, pGpuPartitionId, pComputeInstanceId) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcGetMigAttributionForError_HAL(pKernelRc, exceptType, pGpuPartitionId, pComputeInstanceId) krcGetMigAttributionForError(pKernelRc, exceptType, pGpuPartitionId, pComputeInstanceId) + +static inline struct KernelChannel *krcGetChannelInError_9e2234(struct KernelRc *pKernelRc) { + return ((void *)0); +} + +#ifdef __nvoc_kernel_rc_h_disabled +static inline struct KernelChannel *krcGetChannelInError(struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NULL; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcGetChannelInError(pKernelRc) krcGetChannelInError_9e2234(pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcGetChannelInError_HAL(pKernelRc) krcGetChannelInError(pKernelRc) + +NV_STATUS krcSubdeviceCtrlGetErrorInfoCheckPermissions_KERNEL(struct KernelRc *pKernelRc, struct Subdevice *pSubdevice); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcSubdeviceCtrlGetErrorInfoCheckPermissions(struct KernelRc *pKernelRc, struct Subdevice *pSubdevice) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcSubdeviceCtrlGetErrorInfoCheckPermissions(pKernelRc, pSubdevice) krcSubdeviceCtrlGetErrorInfoCheckPermissions_KERNEL(pKernelRc, pSubdevice) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcSubdeviceCtrlGetErrorInfoCheckPermissions_HAL(pKernelRc, pSubdevice) krcSubdeviceCtrlGetErrorInfoCheckPermissions(pKernelRc, pSubdevice) + +NV_STATUS krcCheckBusError_KERNEL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcCheckBusError(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcCheckBusError(pGpu, pKernelRc) krcCheckBusError_KERNEL(pGpu, pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcCheckBusError_HAL(pGpu, pKernelRc) krcCheckBusError(pGpu, pKernelRc) + +NV_STATUS krcCliresCtrlNvdGetRcerrRptCheckPermissions_KERNEL(struct KernelRc *pKernelRc, struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS *pReportParams); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcCliresCtrlNvdGetRcerrRptCheckPermissions(struct KernelRc *pKernelRc, struct RmClientResource *pRmCliRes, NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS *pReportParams) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcCliresCtrlNvdGetRcerrRptCheckPermissions(pKernelRc, pRmCliRes, pReportParams) krcCliresCtrlNvdGetRcerrRptCheckPermissions_KERNEL(pKernelRc, pRmCliRes, pReportParams) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcCliresCtrlNvdGetRcerrRptCheckPermissions_HAL(pKernelRc, pRmCliRes, pReportParams) krcCliresCtrlNvdGetRcerrRptCheckPermissions(pKernelRc, pRmCliRes, pReportParams) + +NV_STATUS krcWatchdogInit_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcWatchdogInit(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogInit(pGpu, pKernelRc) krcWatchdogInit_IMPL(pGpu, pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcWatchdogInit_HAL(pGpu, pKernelRc) krcWatchdogInit(pGpu, pKernelRc) + +void krcWatchdogInitPushbuffer_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcWatchdogInitPushbuffer(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogInitPushbuffer(pGpu, pKernelRc) krcWatchdogInitPushbuffer_IMPL(pGpu, pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcWatchdogInitPushbuffer_HAL(pGpu, pKernelRc) krcWatchdogInitPushbuffer(pGpu, pKernelRc) + +void krcWatchdog_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcWatchdog(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdog(pGpu, pKernelRc) krcWatchdog_IMPL(pGpu, pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcWatchdog_HAL(pGpu, pKernelRc) krcWatchdog(pGpu, pKernelRc) + +void krcWatchdogRecovery_KERNEL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc); + +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcWatchdogRecovery(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogRecovery(pGpu, pKernelRc) krcWatchdogRecovery_KERNEL(pGpu, pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcWatchdogRecovery_HAL(pGpu, pKernelRc) krcWatchdogRecovery(pGpu, pKernelRc) + +static inline void krcWatchdogCallbackVblankRecovery_b3696a(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + return; +} + +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcWatchdogCallbackVblankRecovery(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogCallbackVblankRecovery(pGpu, pKernelRc) krcWatchdogCallbackVblankRecovery_b3696a(pGpu, pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcWatchdogCallbackVblankRecovery_HAL(pGpu, pKernelRc) krcWatchdogCallbackVblankRecovery(pGpu, pKernelRc) + +static inline void krcWatchdogCallbackPerf_b3696a(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + return; +} + +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcWatchdogCallbackPerf(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogCallbackPerf(pGpu, pKernelRc) krcWatchdogCallbackPerf_b3696a(pGpu, pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +#define krcWatchdogCallbackPerf_HAL(pGpu, pKernelRc) krcWatchdogCallbackPerf(pGpu, pKernelRc) + +NV_STATUS krcConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, ENGDESCRIPTOR engDescriptor); + +static inline NV_STATUS krcConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, ENGDESCRIPTOR engDescriptor) { + return pKernelRc->__krcConstructEngine__(pGpu, pKernelRc, engDescriptor); +} + +static inline NV_STATUS krcReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, void *pTunableState) { + return pEngstate->__krcReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS krcStateLoad_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return pEngstate->__krcStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS krcStateUnload_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return pEngstate->__krcStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS krcStateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate) { + return pEngstate->__krcStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS krcStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return pEngstate->__krcStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS krcStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return pEngstate->__krcStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void krcStateDestroy_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate) { + pEngstate->__krcStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS krcStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return pEngstate->__krcStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS krcStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate) { + return pEngstate->__krcStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void krcInitMissing_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate) { + pEngstate->__krcInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS krcStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate) { + return pEngstate->__krcStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS krcStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate) { + return pEngstate->__krcStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS krcGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, void *pTunableState) { + return pEngstate->__krcGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS krcCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__krcCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void krcFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, void *pTunableState) { + pEngstate->__krcFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS krcStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, NvU32 arg0) { + return pEngstate->__krcStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS krcAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, void **ppTunableState) { + return pEngstate->__krcAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS krcSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate, void *pTunableState) { + return pEngstate->__krcSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool krcIsPresent_DISPATCH(POBJGPU pGpu, struct KernelRc *pEngstate) { + return pEngstate->__krcIsPresent__(pGpu, pEngstate); +} + +void krcInitRegistryOverridesDelayed_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc); +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcInitRegistryOverridesDelayed(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcInitRegistryOverridesDelayed(pGpu, pKernelRc) krcInitRegistryOverridesDelayed_IMPL(pGpu, pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +NV_STATUS krcErrorSetNotifier_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, NvU32 exceptType, NvU32 nv2080EngineType, RC_NOTIFIER_SCOPE scope); +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcErrorSetNotifier(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, NvU32 exceptType, NvU32 nv2080EngineType, RC_NOTIFIER_SCOPE scope) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcErrorSetNotifier(pGpu, pKernelRc, pKernelChannel, exceptType, nv2080EngineType, scope) krcErrorSetNotifier_IMPL(pGpu, pKernelRc, pKernelChannel, exceptType, nv2080EngineType, scope) +#endif //__nvoc_kernel_rc_h_disabled + +NV_STATUS krcReadVirtMem_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, NvU64 virtAddr, NvP64 bufPtr, NvU32 bufSize); +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcReadVirtMem(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, NvU64 virtAddr, NvP64 bufPtr, NvU32 bufSize) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcReadVirtMem(pGpu, pKernelRc, pKernelChannel, virtAddr, bufPtr, bufSize) krcReadVirtMem_IMPL(pGpu, pKernelRc, pKernelChannel, virtAddr, bufPtr, bufSize) +#endif //__nvoc_kernel_rc_h_disabled + +void krcReportXid_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, NvU32 exceptType, const char *pMsg); +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcReportXid(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, NvU32 exceptType, const char *pMsg) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcReportXid(pGpu, pKernelRc, exceptType, pMsg) krcReportXid_IMPL(pGpu, pKernelRc, exceptType, pMsg) +#endif //__nvoc_kernel_rc_h_disabled + +NvBool krcTestAllowAlloc_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, NvU32 failMask); +#ifdef __nvoc_kernel_rc_h_disabled +static inline NvBool krcTestAllowAlloc(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, NvU32 failMask) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcTestAllowAlloc(pGpu, pKernelRc, failMask) krcTestAllowAlloc_IMPL(pGpu, pKernelRc, failMask) +#endif //__nvoc_kernel_rc_h_disabled + +NvBool krcErrorInvokeCallback_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, NvU32 exceptType, NvU32 exceptLevel, NvU32 engineId, NvU32 rcDiagStart); +#ifdef __nvoc_kernel_rc_h_disabled +static inline NvBool krcErrorInvokeCallback(struct OBJGPU *pGpu, struct KernelRc *pKernelRc, struct KernelChannel *pKernelChannel, FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, NvU32 exceptType, NvU32 exceptLevel, NvU32 engineId, NvU32 rcDiagStart) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_FALSE; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcErrorInvokeCallback(pGpu, pKernelRc, pKernelChannel, pMmuExceptionData, exceptType, exceptLevel, engineId, rcDiagStart) krcErrorInvokeCallback_IMPL(pGpu, pKernelRc, pKernelChannel, pMmuExceptionData, exceptType, exceptLevel, engineId, rcDiagStart) +#endif //__nvoc_kernel_rc_h_disabled + +NV_STATUS krcSubdeviceCtrlCmdRcGetErrorCount_IMPL(struct KernelRc *pKernelRc, struct Subdevice *pSubdevice, NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS *pParams); +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcSubdeviceCtrlCmdRcGetErrorCount(struct KernelRc *pKernelRc, struct Subdevice *pSubdevice, NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcSubdeviceCtrlCmdRcGetErrorCount(pKernelRc, pSubdevice, pParams) krcSubdeviceCtrlCmdRcGetErrorCount_IMPL(pKernelRc, pSubdevice, pParams) +#endif //__nvoc_kernel_rc_h_disabled + +NV_STATUS krcSubdeviceCtrlCmdRcGetErrorV2_IMPL(struct KernelRc *pKernelRc, struct Subdevice *pSubdevice, NV2080_CTRL_RC_GET_ERROR_V2_PARAMS *pParams); +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcSubdeviceCtrlCmdRcGetErrorV2(struct KernelRc *pKernelRc, struct Subdevice *pSubdevice, NV2080_CTRL_RC_GET_ERROR_V2_PARAMS *pParams) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcSubdeviceCtrlCmdRcGetErrorV2(pKernelRc, pSubdevice, pParams) krcSubdeviceCtrlCmdRcGetErrorV2_IMPL(pKernelRc, pSubdevice, pParams) +#endif //__nvoc_kernel_rc_h_disabled + +NV_STATUS krcWatchdogChangeState_IMPL(struct KernelRc *pKernelRc, struct Subdevice *pSubdevice, RC_CHANGE_WATCHDOG_STATE_OPERATION_TYPE operation); +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcWatchdogChangeState(struct KernelRc *pKernelRc, struct Subdevice *pSubdevice, RC_CHANGE_WATCHDOG_STATE_OPERATION_TYPE operation) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogChangeState(pKernelRc, pSubdevice, operation) krcWatchdogChangeState_IMPL(pKernelRc, pSubdevice, operation) +#endif //__nvoc_kernel_rc_h_disabled + +void krcWatchdogEnable_IMPL(struct KernelRc *pKernelRc, NvBool bOverRide); +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcWatchdogEnable(struct KernelRc *pKernelRc, NvBool bOverRide) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogEnable(pKernelRc, bOverRide) krcWatchdogEnable_IMPL(pKernelRc, bOverRide) +#endif //__nvoc_kernel_rc_h_disabled + +void krcWatchdogDisable_IMPL(struct KernelRc *pKernelRc); +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcWatchdogDisable(struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogDisable(pKernelRc) krcWatchdogDisable_IMPL(pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +NV_STATUS krcWatchdogShutdown_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc); +#ifdef __nvoc_kernel_rc_h_disabled +static inline NV_STATUS krcWatchdogShutdown(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogShutdown(pGpu, pKernelRc) krcWatchdogShutdown_IMPL(pGpu, pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +void krcWatchdogGetReservationCounts_IMPL(struct KernelRc *pKernelRc, NvS32 *pEnable, NvS32 *pDisable, NvS32 *pSoftDisable); +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcWatchdogGetReservationCounts(struct KernelRc *pKernelRc, NvS32 *pEnable, NvS32 *pDisable, NvS32 *pSoftDisable) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogGetReservationCounts(pKernelRc, pEnable, pDisable, pSoftDisable) krcWatchdogGetReservationCounts_IMPL(pKernelRc, pEnable, pDisable, pSoftDisable) +#endif //__nvoc_kernel_rc_h_disabled + +void krcWatchdogWriteNotifierToGpfifo_IMPL(struct OBJGPU *pGpu, struct KernelRc *pKernelRc); +#ifdef __nvoc_kernel_rc_h_disabled +static inline void krcWatchdogWriteNotifierToGpfifo(struct OBJGPU *pGpu, struct KernelRc *pKernelRc) { + NV_ASSERT_FAILED_PRECOMP("KernelRc was disabled!"); +} +#else //__nvoc_kernel_rc_h_disabled +#define krcWatchdogWriteNotifierToGpfifo(pGpu, pKernelRc) krcWatchdogWriteNotifierToGpfifo_IMPL(pGpu, pKernelRc) +#endif //__nvoc_kernel_rc_h_disabled + +#undef PRIVATE_FIELD + + + +/*! Actual callback funtion called during RC */ +NvU32 krcResetCallback(NvHandle hClient, + NvHandle hDevice, + NvHandle hFifo, + NvHandle hChannel, + void *pContext, + NvBool bClearRc); + + +/*! Watchdog timer function */ +void krcWatchdogTimerProc(struct OBJGPU *pGpu, void *); + + +#define krcBreakpoint(pKernelRc) \ + { \ + if ((pKernelRc)->bBreakOnRc) \ + { \ + DBG_BREAKPOINT(); \ + } \ + } + +#endif // ifndef KERNEL_RC_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_RC_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_sched_mgr_nvoc.c b/src/nvidia/generated/g_kernel_sched_mgr_nvoc.c new file mode 100644 index 000000000..d1a923de2 --- /dev/null +++ b/src/nvidia/generated/g_kernel_sched_mgr_nvoc.c @@ -0,0 +1,150 @@ +#define NVOC_KERNEL_SCHED_MGR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_sched_mgr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xea0970 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelSchedMgr; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_KernelSchedMgr(KernelSchedMgr*); +void __nvoc_init_funcTable_KernelSchedMgr(KernelSchedMgr*); +NV_STATUS __nvoc_ctor_KernelSchedMgr(KernelSchedMgr*); +void __nvoc_init_dataField_KernelSchedMgr(KernelSchedMgr*); +void __nvoc_dtor_KernelSchedMgr(KernelSchedMgr*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelSchedMgr; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSchedMgr_KernelSchedMgr = { + /*pClassDef=*/ &__nvoc_class_def_KernelSchedMgr, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelSchedMgr, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSchedMgr_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSchedMgr, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelSchedMgr = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_KernelSchedMgr_KernelSchedMgr, + &__nvoc_rtti_KernelSchedMgr_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelSchedMgr = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelSchedMgr), + /*classId=*/ classId(KernelSchedMgr), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelSchedMgr", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelSchedMgr, + /*pCastInfo=*/ &__nvoc_castinfo_KernelSchedMgr, + /*pExportInfo=*/ &__nvoc_export_info_KernelSchedMgr +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelSchedMgr = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_KernelSchedMgr(KernelSchedMgr *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelSchedMgr(KernelSchedMgr *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->bIsSchedSwEnabled = ((NvBool)(0 != 0)); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_KernelSchedMgr(KernelSchedMgr *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_KernelSchedMgr_fail_Object; + __nvoc_init_dataField_KernelSchedMgr(pThis); + goto __nvoc_ctor_KernelSchedMgr_exit; // Success + +__nvoc_ctor_KernelSchedMgr_fail_Object: +__nvoc_ctor_KernelSchedMgr_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelSchedMgr_1(KernelSchedMgr *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_KernelSchedMgr(KernelSchedMgr *pThis) { + __nvoc_init_funcTable_KernelSchedMgr_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_KernelSchedMgr(KernelSchedMgr *pThis) { + pThis->__nvoc_pbase_KernelSchedMgr = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_KernelSchedMgr(pThis); +} + +NV_STATUS __nvoc_objCreate_KernelSchedMgr(KernelSchedMgr **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelSchedMgr *pThis; + + pThis = portMemAllocNonPaged(sizeof(KernelSchedMgr)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelSchedMgr)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelSchedMgr); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_KernelSchedMgr(pThis); + status = __nvoc_ctor_KernelSchedMgr(pThis); + if (status != NV_OK) goto __nvoc_objCreate_KernelSchedMgr_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelSchedMgr_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelSchedMgr(KernelSchedMgr **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelSchedMgr(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_sched_mgr_nvoc.h b/src/nvidia/generated/g_kernel_sched_mgr_nvoc.h new file mode 100644 index 000000000..2aef42b2c --- /dev/null +++ b/src/nvidia/generated/g_kernel_sched_mgr_nvoc.h @@ -0,0 +1,138 @@ +#ifndef _G_KERNEL_SCHED_MGR_NVOC_H_ +#define _G_KERNEL_SCHED_MGR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file kernel_sched_mgr.h + * @brief Provides definition for KernelSchedMgr data-structures and interfaces. + */ + +#include "g_kernel_sched_mgr_nvoc.h" + +#ifndef _KERNELSCHEDMGR_H_ +#define _KERNELSCHEDMGR_H_ + +typedef enum __SCHED_POLICY SCHED_POLICY; + +/* -------------------------------- Includes -------------------------------- */ + +#include "core/core.h" +#include "gpu/gpu.h" + +#include "ctrl/ctrl2080/ctrl2080fifo.h" // NV2080_CTRL_FIFO_* + +/* ------------------------------- Datatypes --------------------------------*/ + +enum __SCHED_POLICY +{ + SCHED_POLICY_DEFAULT = 0, + SCHED_POLICY_VGPU_RELATIVE, + SCHED_POLICY_PGPU_SHARE, + SCHED_POLICY_WDDM_COMPATIBILITY, + SCHED_POLICY_GFN_LSTT, + SCHED_POLICY_CHANNEL_INTERLEAVED, + SCHED_POLICY_CHANNEL_INTERLEAVED_WDDM, +}; + +/*! + * Class of scheduling manager for all the runlists. + */ +#ifdef NVOC_KERNEL_SCHED_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelSchedMgr { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct KernelSchedMgr *__nvoc_pbase_KernelSchedMgr; + NvBool bIsSchedSwEnabled; + NvU32 configSchedPolicy; +}; + +#ifndef __NVOC_CLASS_KernelSchedMgr_TYPEDEF__ +#define __NVOC_CLASS_KernelSchedMgr_TYPEDEF__ +typedef struct KernelSchedMgr KernelSchedMgr; +#endif /* __NVOC_CLASS_KernelSchedMgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSchedMgr +#define __nvoc_class_id_KernelSchedMgr 0xea0970 +#endif /* __nvoc_class_id_KernelSchedMgr */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelSchedMgr; + +#define __staticCast_KernelSchedMgr(pThis) \ + ((pThis)->__nvoc_pbase_KernelSchedMgr) + +#ifdef __nvoc_kernel_sched_mgr_h_disabled +#define __dynamicCast_KernelSchedMgr(pThis) ((KernelSchedMgr*)NULL) +#else //__nvoc_kernel_sched_mgr_h_disabled +#define __dynamicCast_KernelSchedMgr(pThis) \ + ((KernelSchedMgr*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelSchedMgr))) +#endif //__nvoc_kernel_sched_mgr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelSchedMgr(KernelSchedMgr**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelSchedMgr(KernelSchedMgr**, Dynamic*, NvU32); +#define __objCreate_KernelSchedMgr(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelSchedMgr((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +static inline NvBool kschedmgrIsSchedSwEnabled(struct KernelSchedMgr *pKernelSchedMgr) { + return pKernelSchedMgr->bIsSchedSwEnabled; +} + +static inline NvU32 kschedmgrGetSchedPolicy(struct KernelSchedMgr *pKernelSchedMgr) { + return pKernelSchedMgr->configSchedPolicy; +} + +void kschedmgrConstructPolicy_IMPL(struct KernelSchedMgr *pKernelSchedMgr, struct OBJGPU *pGpu); +#ifdef __nvoc_kernel_sched_mgr_h_disabled +static inline void kschedmgrConstructPolicy(struct KernelSchedMgr *pKernelSchedMgr, struct OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("KernelSchedMgr was disabled!"); +} +#else //__nvoc_kernel_sched_mgr_h_disabled +#define kschedmgrConstructPolicy(pKernelSchedMgr, pGpu) kschedmgrConstructPolicy_IMPL(pKernelSchedMgr, pGpu) +#endif //__nvoc_kernel_sched_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#define GPU_GET_KERNEL_SCHEDMGR(pGpu) \ + (kfifoGetKernelSchedMgr(GPU_GET_KERNEL_FIFO_UC(pGpu))) + +#endif // _KERNELSCHEDMGR_H_ + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_SCHED_MGR_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_sec2_nvoc.c b/src/nvidia/generated/g_kernel_sec2_nvoc.c new file mode 100644 index 000000000..5dae6ee3b --- /dev/null +++ b/src/nvidia/generated/g_kernel_sec2_nvoc.c @@ -0,0 +1,414 @@ +#define NVOC_KERNEL_SEC2_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_sec2_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x2f36c9 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelSec2; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelFalcon; + +void __nvoc_init_KernelSec2(KernelSec2*, RmHalspecOwner* ); +void __nvoc_init_funcTable_KernelSec2(KernelSec2*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelSec2(KernelSec2*, RmHalspecOwner* ); +void __nvoc_init_dataField_KernelSec2(KernelSec2*, RmHalspecOwner* ); +void __nvoc_dtor_KernelSec2(KernelSec2*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelSec2; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_KernelSec2 = { + /*pClassDef=*/ &__nvoc_class_def_KernelSec2, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelSec2, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSec2, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSec2, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSec2_KernelFalcon = { + /*pClassDef=*/ &__nvoc_class_def_KernelFalcon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSec2, __nvoc_base_KernelFalcon), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelSec2 = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_KernelSec2_KernelSec2, + &__nvoc_rtti_KernelSec2_KernelFalcon, + &__nvoc_rtti_KernelSec2_OBJENGSTATE, + &__nvoc_rtti_KernelSec2_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelSec2 = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelSec2), + /*classId=*/ classId(KernelSec2), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelSec2", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelSec2, + /*pCastInfo=*/ &__nvoc_castinfo_KernelSec2, + /*pExportInfo=*/ &__nvoc_export_info_KernelSec2 +}; + +static NV_STATUS __nvoc_thunk_KernelSec2_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelSec2, ENGDESCRIPTOR arg0) { + return ksec2ConstructEngine(pGpu, (struct KernelSec2 *)(((unsigned char *)pKernelSec2) - __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_KernelSec2_kflcnResetHw(struct OBJGPU *pGpu, struct KernelFalcon *pKernelSec2) { + return ksec2ResetHw(pGpu, (struct KernelSec2 *)(((unsigned char *)pKernelSec2) - __nvoc_rtti_KernelSec2_KernelFalcon.offset)); +} + +static NvBool __nvoc_thunk_KernelSec2_kflcnIsEngineInReset(struct OBJGPU *pGpu, struct KernelFalcon *pKernelSec2) { + return ksec2IsEngineInReset(pGpu, (struct KernelSec2 *)(((unsigned char *)pKernelSec2) - __nvoc_rtti_KernelSec2_KernelFalcon.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2ReconcileTunableState(POBJGPU pGpu, struct KernelSec2 *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StateLoad(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StateUnload(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StateInitLocked(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePreLoad(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePostUnload(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_ksec2StateDestroy(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePreUnload(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StateInitUnlocked(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_ksec2InitMissing(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePreInitLocked(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePreInitUnlocked(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2GetTunableState(POBJGPU pGpu, struct KernelSec2 *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2CompareTunableState(POBJGPU pGpu, struct KernelSec2 *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_ksec2FreeTunableState(POBJGPU pGpu, struct KernelSec2 *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2StatePostLoad(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2AllocTunableState(POBJGPU pGpu, struct KernelSec2 *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_ksec2SetTunableState(POBJGPU pGpu, struct KernelSec2 *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_ksec2IsPresent(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelSec2_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelSec2 = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_KernelFalcon(KernelFalcon*); +void __nvoc_dtor_KernelSec2(KernelSec2 *pThis) { + __nvoc_ksec2Destruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_dtor_KernelFalcon(&pThis->__nvoc_base_KernelFalcon); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelSec2(KernelSec2 *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelFalcon(KernelFalcon* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_KernelSec2(KernelSec2 *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_KernelSec2_fail_OBJENGSTATE; + status = __nvoc_ctor_KernelFalcon(&pThis->__nvoc_base_KernelFalcon, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_KernelSec2_fail_KernelFalcon; + __nvoc_init_dataField_KernelSec2(pThis, pRmhalspecowner); + goto __nvoc_ctor_KernelSec2_exit; // Success + +__nvoc_ctor_KernelSec2_fail_KernelFalcon: + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); +__nvoc_ctor_KernelSec2_fail_OBJENGSTATE: +__nvoc_ctor_KernelSec2_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelSec2_1(KernelSec2 *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + // Hal function -- ksec2ConstructEngine + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__ksec2ConstructEngine__ = &ksec2ConstructEngine_IMPL; + } + else if (0) + { + } + + // Hal function -- ksec2ConfigureFalcon + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__ksec2ConfigureFalcon__ = &ksec2ConfigureFalcon_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__ksec2ConfigureFalcon__ = &ksec2ConfigureFalcon_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__ksec2ConfigureFalcon__ = &ksec2ConfigureFalcon_GA102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- ksec2ResetHw + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__ksec2ResetHw__ = &ksec2ResetHw_TU102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- ksec2IsEngineInReset + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__ksec2IsEngineInReset__ = &ksec2IsEngineInReset_TU102; + } + else if (0) + { + } + } + else if (0) + { + } + + // Hal function -- ksec2ReadUcodeFuseVersion + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__ksec2ReadUcodeFuseVersion__ = &ksec2ReadUcodeFuseVersion_b2b553; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__ksec2ReadUcodeFuseVersion__ = &ksec2ReadUcodeFuseVersion_GA100; + } + else if (0) + { + } + } + else if (0) + { + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelSec2_engstateConstructEngine; + + pThis->__nvoc_base_KernelFalcon.__kflcnResetHw__ = &__nvoc_thunk_KernelSec2_kflcnResetHw; + + pThis->__nvoc_base_KernelFalcon.__kflcnIsEngineInReset__ = &__nvoc_thunk_KernelSec2_kflcnIsEngineInReset; + + pThis->__ksec2ReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_ksec2ReconcileTunableState; + + pThis->__ksec2StateLoad__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateLoad; + + pThis->__ksec2StateUnload__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateUnload; + + pThis->__ksec2StateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateInitLocked; + + pThis->__ksec2StatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePreLoad; + + pThis->__ksec2StatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePostUnload; + + pThis->__ksec2StateDestroy__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateDestroy; + + pThis->__ksec2StatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePreUnload; + + pThis->__ksec2StateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_ksec2StateInitUnlocked; + + pThis->__ksec2InitMissing__ = &__nvoc_thunk_OBJENGSTATE_ksec2InitMissing; + + pThis->__ksec2StatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePreInitLocked; + + pThis->__ksec2StatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePreInitUnlocked; + + pThis->__ksec2GetTunableState__ = &__nvoc_thunk_OBJENGSTATE_ksec2GetTunableState; + + pThis->__ksec2CompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_ksec2CompareTunableState; + + pThis->__ksec2FreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_ksec2FreeTunableState; + + pThis->__ksec2StatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_ksec2StatePostLoad; + + pThis->__ksec2AllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_ksec2AllocTunableState; + + pThis->__ksec2SetTunableState__ = &__nvoc_thunk_OBJENGSTATE_ksec2SetTunableState; + + pThis->__ksec2IsPresent__ = &__nvoc_thunk_OBJENGSTATE_ksec2IsPresent; +} + +void __nvoc_init_funcTable_KernelSec2(KernelSec2 *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_KernelSec2_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*, RmHalspecOwner* ); +void __nvoc_init_KernelFalcon(KernelFalcon*, RmHalspecOwner* ); +void __nvoc_init_KernelSec2(KernelSec2 *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_KernelSec2 = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + pThis->__nvoc_pbase_KernelFalcon = &pThis->__nvoc_base_KernelFalcon; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE, pRmhalspecowner); + __nvoc_init_KernelFalcon(&pThis->__nvoc_base_KernelFalcon, pRmhalspecowner); + __nvoc_init_funcTable_KernelSec2(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_KernelSec2(KernelSec2 **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + KernelSec2 *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(KernelSec2)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelSec2)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelSec2); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_KernelSec2(pThis, pRmhalspecowner); + status = __nvoc_ctor_KernelSec2(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_KernelSec2_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelSec2_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelSec2(KernelSec2 **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_KernelSec2(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_sec2_nvoc.h b/src/nvidia/generated/g_kernel_sec2_nvoc.h new file mode 100644 index 000000000..ccac76c6b --- /dev/null +++ b/src/nvidia/generated/g_kernel_sec2_nvoc.h @@ -0,0 +1,317 @@ +#ifndef _G_KERNEL_SEC2_NVOC_H_ +#define _G_KERNEL_SEC2_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_sec2_nvoc.h" + +#ifndef KERNEL_SEC2_H +#define KERNEL_SEC2_H + +#include "core/bin_data.h" +#include "core/core.h" +#include "gpu/eng_state.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gpu.h" + +// forward declaration of RM_FLCN_BL_DESC from rmflcnbl.h +struct _def_rm_flcn_bl_desc; +typedef struct _def_rm_flcn_bl_desc RM_FLCN_BL_DESC; + +#ifdef NVOC_KERNEL_SEC2_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelSec2 { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct KernelFalcon __nvoc_base_KernelFalcon; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct KernelFalcon *__nvoc_pbase_KernelFalcon; + struct KernelSec2 *__nvoc_pbase_KernelSec2; + NV_STATUS (*__ksec2ConstructEngine__)(struct OBJGPU *, struct KernelSec2 *, ENGDESCRIPTOR); + void (*__ksec2ConfigureFalcon__)(struct OBJGPU *, struct KernelSec2 *); + NV_STATUS (*__ksec2ResetHw__)(struct OBJGPU *, struct KernelSec2 *); + NvBool (*__ksec2IsEngineInReset__)(struct OBJGPU *, struct KernelSec2 *); + NvU32 (*__ksec2ReadUcodeFuseVersion__)(struct OBJGPU *, struct KernelSec2 *, NvU32); + NV_STATUS (*__ksec2ReconcileTunableState__)(POBJGPU, struct KernelSec2 *, void *); + NV_STATUS (*__ksec2StateLoad__)(POBJGPU, struct KernelSec2 *, NvU32); + NV_STATUS (*__ksec2StateUnload__)(POBJGPU, struct KernelSec2 *, NvU32); + NV_STATUS (*__ksec2StateInitLocked__)(POBJGPU, struct KernelSec2 *); + NV_STATUS (*__ksec2StatePreLoad__)(POBJGPU, struct KernelSec2 *, NvU32); + NV_STATUS (*__ksec2StatePostUnload__)(POBJGPU, struct KernelSec2 *, NvU32); + void (*__ksec2StateDestroy__)(POBJGPU, struct KernelSec2 *); + NV_STATUS (*__ksec2StatePreUnload__)(POBJGPU, struct KernelSec2 *, NvU32); + NV_STATUS (*__ksec2StateInitUnlocked__)(POBJGPU, struct KernelSec2 *); + void (*__ksec2InitMissing__)(POBJGPU, struct KernelSec2 *); + NV_STATUS (*__ksec2StatePreInitLocked__)(POBJGPU, struct KernelSec2 *); + NV_STATUS (*__ksec2StatePreInitUnlocked__)(POBJGPU, struct KernelSec2 *); + NV_STATUS (*__ksec2GetTunableState__)(POBJGPU, struct KernelSec2 *, void *); + NV_STATUS (*__ksec2CompareTunableState__)(POBJGPU, struct KernelSec2 *, void *, void *); + void (*__ksec2FreeTunableState__)(POBJGPU, struct KernelSec2 *, void *); + NV_STATUS (*__ksec2StatePostLoad__)(POBJGPU, struct KernelSec2 *, NvU32); + NV_STATUS (*__ksec2AllocTunableState__)(POBJGPU, struct KernelSec2 *, void **); + NV_STATUS (*__ksec2SetTunableState__)(POBJGPU, struct KernelSec2 *, void *); + NvBool (*__ksec2IsPresent__)(POBJGPU, struct KernelSec2 *); + const RM_FLCN_BL_DESC *pGenericBlUcodeDesc; + const NvU8 *pGenericBlUcodeImg; +}; + +#ifndef __NVOC_CLASS_KernelSec2_TYPEDEF__ +#define __NVOC_CLASS_KernelSec2_TYPEDEF__ +typedef struct KernelSec2 KernelSec2; +#endif /* __NVOC_CLASS_KernelSec2_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSec2 +#define __nvoc_class_id_KernelSec2 0x2f36c9 +#endif /* __nvoc_class_id_KernelSec2 */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelSec2; + +#define __staticCast_KernelSec2(pThis) \ + ((pThis)->__nvoc_pbase_KernelSec2) + +#ifdef __nvoc_kernel_sec2_h_disabled +#define __dynamicCast_KernelSec2(pThis) ((KernelSec2*)NULL) +#else //__nvoc_kernel_sec2_h_disabled +#define __dynamicCast_KernelSec2(pThis) \ + ((KernelSec2*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelSec2))) +#endif //__nvoc_kernel_sec2_h_disabled + +#define PDB_PROP_KSEC2_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_KSEC2_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_KernelSec2(KernelSec2**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelSec2(KernelSec2**, Dynamic*, NvU32); +#define __objCreate_KernelSec2(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_KernelSec2((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define ksec2ConstructEngine(pGpu, pKernelSec2, arg0) ksec2ConstructEngine_DISPATCH(pGpu, pKernelSec2, arg0) +#define ksec2ConstructEngine_HAL(pGpu, pKernelSec2, arg0) ksec2ConstructEngine_DISPATCH(pGpu, pKernelSec2, arg0) +#define ksec2ConfigureFalcon(pGpu, pKernelSec2) ksec2ConfigureFalcon_DISPATCH(pGpu, pKernelSec2) +#define ksec2ConfigureFalcon_HAL(pGpu, pKernelSec2) ksec2ConfigureFalcon_DISPATCH(pGpu, pKernelSec2) +#define ksec2ResetHw(pGpu, pKernelSec2) ksec2ResetHw_DISPATCH(pGpu, pKernelSec2) +#define ksec2ResetHw_HAL(pGpu, pKernelSec2) ksec2ResetHw_DISPATCH(pGpu, pKernelSec2) +#define ksec2IsEngineInReset(pGpu, pKernelSec2) ksec2IsEngineInReset_DISPATCH(pGpu, pKernelSec2) +#define ksec2IsEngineInReset_HAL(pGpu, pKernelSec2) ksec2IsEngineInReset_DISPATCH(pGpu, pKernelSec2) +#define ksec2ReadUcodeFuseVersion(pGpu, pKernelSec2, ucodeId) ksec2ReadUcodeFuseVersion_DISPATCH(pGpu, pKernelSec2, ucodeId) +#define ksec2ReadUcodeFuseVersion_HAL(pGpu, pKernelSec2, ucodeId) ksec2ReadUcodeFuseVersion_DISPATCH(pGpu, pKernelSec2, ucodeId) +#define ksec2ReconcileTunableState(pGpu, pEngstate, pTunableState) ksec2ReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define ksec2StateLoad(pGpu, pEngstate, arg0) ksec2StateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define ksec2StateUnload(pGpu, pEngstate, arg0) ksec2StateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define ksec2StateInitLocked(pGpu, pEngstate) ksec2StateInitLocked_DISPATCH(pGpu, pEngstate) +#define ksec2StatePreLoad(pGpu, pEngstate, arg0) ksec2StatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define ksec2StatePostUnload(pGpu, pEngstate, arg0) ksec2StatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define ksec2StateDestroy(pGpu, pEngstate) ksec2StateDestroy_DISPATCH(pGpu, pEngstate) +#define ksec2StatePreUnload(pGpu, pEngstate, arg0) ksec2StatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define ksec2StateInitUnlocked(pGpu, pEngstate) ksec2StateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define ksec2InitMissing(pGpu, pEngstate) ksec2InitMissing_DISPATCH(pGpu, pEngstate) +#define ksec2StatePreInitLocked(pGpu, pEngstate) ksec2StatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define ksec2StatePreInitUnlocked(pGpu, pEngstate) ksec2StatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define ksec2GetTunableState(pGpu, pEngstate, pTunableState) ksec2GetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define ksec2CompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) ksec2CompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define ksec2FreeTunableState(pGpu, pEngstate, pTunableState) ksec2FreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define ksec2StatePostLoad(pGpu, pEngstate, arg0) ksec2StatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define ksec2AllocTunableState(pGpu, pEngstate, ppTunableState) ksec2AllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define ksec2SetTunableState(pGpu, pEngstate, pTunableState) ksec2SetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define ksec2IsPresent(pGpu, pEngstate) ksec2IsPresent_DISPATCH(pGpu, pEngstate) +const BINDATA_ARCHIVE *ksec2GetBinArchiveBlUcode_TU102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2); + +#ifdef __nvoc_kernel_sec2_h_disabled +static inline const BINDATA_ARCHIVE *ksec2GetBinArchiveBlUcode(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) { + NV_ASSERT_FAILED_PRECOMP("KernelSec2 was disabled!"); + return NULL; +} +#else //__nvoc_kernel_sec2_h_disabled +#define ksec2GetBinArchiveBlUcode(pGpu, pKernelSec2) ksec2GetBinArchiveBlUcode_TU102(pGpu, pKernelSec2) +#endif //__nvoc_kernel_sec2_h_disabled + +#define ksec2GetBinArchiveBlUcode_HAL(pGpu, pKernelSec2) ksec2GetBinArchiveBlUcode(pGpu, pKernelSec2) + +NV_STATUS ksec2GetGenericBlUcode_TU102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, const RM_FLCN_BL_DESC **ppDesc, const NvU8 **ppImg); + +#ifdef __nvoc_kernel_sec2_h_disabled +static inline NV_STATUS ksec2GetGenericBlUcode(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, const RM_FLCN_BL_DESC **ppDesc, const NvU8 **ppImg) { + NV_ASSERT_FAILED_PRECOMP("KernelSec2 was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_kernel_sec2_h_disabled +#define ksec2GetGenericBlUcode(pGpu, pKernelSec2, ppDesc, ppImg) ksec2GetGenericBlUcode_TU102(pGpu, pKernelSec2, ppDesc, ppImg) +#endif //__nvoc_kernel_sec2_h_disabled + +#define ksec2GetGenericBlUcode_HAL(pGpu, pKernelSec2, ppDesc, ppImg) ksec2GetGenericBlUcode(pGpu, pKernelSec2, ppDesc, ppImg) + +NV_STATUS ksec2ConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, ENGDESCRIPTOR arg0); + +static inline NV_STATUS ksec2ConstructEngine_395e98(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, ENGDESCRIPTOR arg0) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS ksec2ConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, ENGDESCRIPTOR arg0) { + return pKernelSec2->__ksec2ConstructEngine__(pGpu, pKernelSec2, arg0); +} + +void ksec2ConfigureFalcon_TU102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2); + +void ksec2ConfigureFalcon_GA100(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2); + +void ksec2ConfigureFalcon_GA102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2); + +static inline void ksec2ConfigureFalcon_f2d351(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) { + NV_ASSERT_PRECOMP(0); +} + +static inline void ksec2ConfigureFalcon_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) { + pKernelSec2->__ksec2ConfigureFalcon__(pGpu, pKernelSec2); +} + +NV_STATUS ksec2ResetHw_TU102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2); + +static inline NV_STATUS ksec2ResetHw_5baef9(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS ksec2ResetHw_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) { + return pKernelSec2->__ksec2ResetHw__(pGpu, pKernelSec2); +} + +NvBool ksec2IsEngineInReset_TU102(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2); + +static inline NvBool ksec2IsEngineInReset_108313(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) { + NV_ASSERT_OR_RETURN_PRECOMP(0, ((NvBool)(0 != 0))); +} + +static inline NvBool ksec2IsEngineInReset_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2) { + return pKernelSec2->__ksec2IsEngineInReset__(pGpu, pKernelSec2); +} + +static inline NvU32 ksec2ReadUcodeFuseVersion_b2b553(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 ucodeId) { + return 0; +} + +NvU32 ksec2ReadUcodeFuseVersion_GA100(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 ucodeId); + +static inline NvU32 ksec2ReadUcodeFuseVersion_474d46(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 ucodeId) { + NV_ASSERT_OR_RETURN_PRECOMP(0, 0); +} + +static inline NvU32 ksec2ReadUcodeFuseVersion_DISPATCH(struct OBJGPU *pGpu, struct KernelSec2 *pKernelSec2, NvU32 ucodeId) { + return pKernelSec2->__ksec2ReadUcodeFuseVersion__(pGpu, pKernelSec2, ucodeId); +} + +static inline NV_STATUS ksec2ReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, void *pTunableState) { + return pEngstate->__ksec2ReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS ksec2StateLoad_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return pEngstate->__ksec2StateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS ksec2StateUnload_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return pEngstate->__ksec2StateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS ksec2StateInitLocked_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + return pEngstate->__ksec2StateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS ksec2StatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return pEngstate->__ksec2StatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS ksec2StatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return pEngstate->__ksec2StatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void ksec2StateDestroy_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + pEngstate->__ksec2StateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS ksec2StatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return pEngstate->__ksec2StatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS ksec2StateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + return pEngstate->__ksec2StateInitUnlocked__(pGpu, pEngstate); +} + +static inline void ksec2InitMissing_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + pEngstate->__ksec2InitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS ksec2StatePreInitLocked_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + return pEngstate->__ksec2StatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS ksec2StatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + return pEngstate->__ksec2StatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS ksec2GetTunableState_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, void *pTunableState) { + return pEngstate->__ksec2GetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS ksec2CompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__ksec2CompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void ksec2FreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, void *pTunableState) { + pEngstate->__ksec2FreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS ksec2StatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, NvU32 arg0) { + return pEngstate->__ksec2StatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS ksec2AllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, void **ppTunableState) { + return pEngstate->__ksec2AllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS ksec2SetTunableState_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate, void *pTunableState) { + return pEngstate->__ksec2SetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool ksec2IsPresent_DISPATCH(POBJGPU pGpu, struct KernelSec2 *pEngstate) { + return pEngstate->__ksec2IsPresent__(pGpu, pEngstate); +} + +void ksec2Destruct_IMPL(struct KernelSec2 *pKernelSec2); +#define __nvoc_ksec2Destruct(pKernelSec2) ksec2Destruct_IMPL(pKernelSec2) +#undef PRIVATE_FIELD + + +#endif // KERNEL_SEC2_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_SEC2_NVOC_H_ diff --git a/src/nvidia/generated/g_kernel_sm_debugger_session_nvoc.c b/src/nvidia/generated/g_kernel_sm_debugger_session_nvoc.c new file mode 100644 index 000000000..baf2911d5 --- /dev/null +++ b/src/nvidia/generated/g_kernel_sm_debugger_session_nvoc.c @@ -0,0 +1,1069 @@ +#define NVOC_KERNEL_SM_DEBUGGER_SESSION_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_kernel_sm_debugger_session_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x48fa7d = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmDebuggerSession; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsSession; + +void __nvoc_init_RmDebuggerSession(RmDebuggerSession*); +void __nvoc_init_funcTable_RmDebuggerSession(RmDebuggerSession*); +NV_STATUS __nvoc_ctor_RmDebuggerSession(RmDebuggerSession*); +void __nvoc_init_dataField_RmDebuggerSession(RmDebuggerSession*); +void __nvoc_dtor_RmDebuggerSession(RmDebuggerSession*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmDebuggerSession; + +static const struct NVOC_RTTI __nvoc_rtti_RmDebuggerSession_RmDebuggerSession = { + /*pClassDef=*/ &__nvoc_class_def_RmDebuggerSession, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmDebuggerSession, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmDebuggerSession_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmDebuggerSession, __nvoc_base_RsSession.__nvoc_base_RsShared.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmDebuggerSession_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmDebuggerSession, __nvoc_base_RsSession.__nvoc_base_RsShared), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmDebuggerSession_RsSession = { + /*pClassDef=*/ &__nvoc_class_def_RsSession, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmDebuggerSession, __nvoc_base_RsSession), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmDebuggerSession = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_RmDebuggerSession_RmDebuggerSession, + &__nvoc_rtti_RmDebuggerSession_RsSession, + &__nvoc_rtti_RmDebuggerSession_RsShared, + &__nvoc_rtti_RmDebuggerSession_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmDebuggerSession = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmDebuggerSession), + /*classId=*/ classId(RmDebuggerSession), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmDebuggerSession", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmDebuggerSession, + /*pCastInfo=*/ &__nvoc_castinfo_RmDebuggerSession, + /*pExportInfo=*/ &__nvoc_export_info_RmDebuggerSession +}; + +static void __nvoc_thunk_RmDebuggerSession_sessionRemoveDependant(struct RsSession *pDbgSession, struct RsResourceRef *pResourceRef) { + dbgSessionRemoveDependant((struct RmDebuggerSession *)(((unsigned char *)pDbgSession) - __nvoc_rtti_RmDebuggerSession_RsSession.offset), pResourceRef); +} + +static void __nvoc_thunk_RmDebuggerSession_sessionRemoveDependency(struct RsSession *pDbgSession, struct RsResourceRef *pResourceRef) { + dbgSessionRemoveDependency((struct RmDebuggerSession *)(((unsigned char *)pDbgSession) - __nvoc_rtti_RmDebuggerSession_RsSession.offset), pResourceRef); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmDebuggerSession = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsSession(RsSession*); +void __nvoc_dtor_RmDebuggerSession(RmDebuggerSession *pThis) { + __nvoc_dtor_RsSession(&pThis->__nvoc_base_RsSession); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmDebuggerSession(RmDebuggerSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsSession(RsSession* ); +NV_STATUS __nvoc_ctor_RmDebuggerSession(RmDebuggerSession *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsSession(&pThis->__nvoc_base_RsSession); + if (status != NV_OK) goto __nvoc_ctor_RmDebuggerSession_fail_RsSession; + __nvoc_init_dataField_RmDebuggerSession(pThis); + goto __nvoc_ctor_RmDebuggerSession_exit; // Success + +__nvoc_ctor_RmDebuggerSession_fail_RsSession: +__nvoc_ctor_RmDebuggerSession_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmDebuggerSession_1(RmDebuggerSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__dbgSessionRemoveDependant__ = &dbgSessionRemoveDependant_IMPL; + + pThis->__dbgSessionRemoveDependency__ = &dbgSessionRemoveDependency_IMPL; + + pThis->__nvoc_base_RsSession.__sessionRemoveDependant__ = &__nvoc_thunk_RmDebuggerSession_sessionRemoveDependant; + + pThis->__nvoc_base_RsSession.__sessionRemoveDependency__ = &__nvoc_thunk_RmDebuggerSession_sessionRemoveDependency; +} + +void __nvoc_init_funcTable_RmDebuggerSession(RmDebuggerSession *pThis) { + __nvoc_init_funcTable_RmDebuggerSession_1(pThis); +} + +void __nvoc_init_RsSession(RsSession*); +void __nvoc_init_RmDebuggerSession(RmDebuggerSession *pThis) { + pThis->__nvoc_pbase_RmDebuggerSession = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsSession.__nvoc_base_RsShared.__nvoc_base_Object; + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsSession.__nvoc_base_RsShared; + pThis->__nvoc_pbase_RsSession = &pThis->__nvoc_base_RsSession; + __nvoc_init_RsSession(&pThis->__nvoc_base_RsSession); + __nvoc_init_funcTable_RmDebuggerSession(pThis); +} + +NV_STATUS __nvoc_objCreate_RmDebuggerSession(RmDebuggerSession **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + RmDebuggerSession *pThis; + + pThis = portMemAllocNonPaged(sizeof(RmDebuggerSession)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RmDebuggerSession)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RmDebuggerSession); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsSession.__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsSession.__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RmDebuggerSession(pThis); + status = __nvoc_ctor_RmDebuggerSession(pThis); + if (status != NV_OK) goto __nvoc_objCreate_RmDebuggerSession_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RmDebuggerSession_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RmDebuggerSession(RmDebuggerSession **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_RmDebuggerSession(ppThis, pParent, createFlags); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4adc81 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelSMDebuggerSession; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_KernelSMDebuggerSession(KernelSMDebuggerSession*); +void __nvoc_init_funcTable_KernelSMDebuggerSession(KernelSMDebuggerSession*); +NV_STATUS __nvoc_ctor_KernelSMDebuggerSession(KernelSMDebuggerSession*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_KernelSMDebuggerSession(KernelSMDebuggerSession*); +void __nvoc_dtor_KernelSMDebuggerSession(KernelSMDebuggerSession*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelSMDebuggerSession; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSMDebuggerSession_KernelSMDebuggerSession = { + /*pClassDef=*/ &__nvoc_class_def_KernelSMDebuggerSession, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelSMDebuggerSession, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSMDebuggerSession_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSMDebuggerSession, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSMDebuggerSession_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSMDebuggerSession, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSMDebuggerSession_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSMDebuggerSession, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSMDebuggerSession_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSMDebuggerSession, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSMDebuggerSession_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSMDebuggerSession, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSMDebuggerSession_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSMDebuggerSession, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_KernelSMDebuggerSession_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(KernelSMDebuggerSession, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_KernelSMDebuggerSession = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_KernelSMDebuggerSession_KernelSMDebuggerSession, + &__nvoc_rtti_KernelSMDebuggerSession_Notifier, + &__nvoc_rtti_KernelSMDebuggerSession_INotifier, + &__nvoc_rtti_KernelSMDebuggerSession_GpuResource, + &__nvoc_rtti_KernelSMDebuggerSession_RmResource, + &__nvoc_rtti_KernelSMDebuggerSession_RmResourceCommon, + &__nvoc_rtti_KernelSMDebuggerSession_RsResource, + &__nvoc_rtti_KernelSMDebuggerSession_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_KernelSMDebuggerSession = +{ + /*classInfo=*/ { + /*size=*/ sizeof(KernelSMDebuggerSession), + /*classId=*/ classId(KernelSMDebuggerSession), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "KernelSMDebuggerSession", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelSMDebuggerSession, + /*pCastInfo=*/ &__nvoc_castinfo_KernelSMDebuggerSession, + /*pExportInfo=*/ &__nvoc_export_info_KernelSMDebuggerSession +}; + +static NV_STATUS __nvoc_thunk_KernelSMDebuggerSession_gpuresInternalControlForward(struct GpuResource *arg0, NvU32 command, void *pParams, NvU32 size) { + return ksmdbgssnInternalControlForward((struct KernelSMDebuggerSession *)(((unsigned char *)arg0) - __nvoc_rtti_KernelSMDebuggerSession_GpuResource.offset), command, pParams, size); +} + +static NvHandle __nvoc_thunk_KernelSMDebuggerSession_gpuresGetInternalObjectHandle(struct GpuResource *arg0) { + return ksmdbgssnGetInternalObjectHandle((struct KernelSMDebuggerSession *)(((unsigned char *)arg0) - __nvoc_rtti_KernelSMDebuggerSession_GpuResource.offset)); +} + +static NvBool __nvoc_thunk_GpuResource_ksmdbgssnShareCallback(struct KernelSMDebuggerSession *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelSMDebuggerSession_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_ksmdbgssnMapTo(struct KernelSMDebuggerSession *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_ksmdbgssnGetOrAllocNotifShare(struct KernelSMDebuggerSession *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelSMDebuggerSession_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_ksmdbgssnCheckMemInterUnmap(struct KernelSMDebuggerSession *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelSMDebuggerSession_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ksmdbgssnGetMapAddrSpace(struct KernelSMDebuggerSession *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelSMDebuggerSession_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_ksmdbgssnSetNotificationShare(struct KernelSMDebuggerSession *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelSMDebuggerSession_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_ksmdbgssnGetRefCount(struct KernelSMDebuggerSession *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_ksmdbgssnAddAdditionalDependants(struct RsClient *pClient, struct KernelSMDebuggerSession *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_ksmdbgssnControl_Prologue(struct KernelSMDebuggerSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ksmdbgssnGetRegBaseOffsetAndSize(struct KernelSMDebuggerSession *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelSMDebuggerSession_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_RsResource_ksmdbgssnUnmapFrom(struct KernelSMDebuggerSession *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_ksmdbgssnControl_Epilogue(struct KernelSMDebuggerSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_ksmdbgssnControlLookup(struct KernelSMDebuggerSession *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ksmdbgssnControl(struct KernelSMDebuggerSession *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelSMDebuggerSession_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ksmdbgssnUnmap(struct KernelSMDebuggerSession *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelSMDebuggerSession_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_ksmdbgssnGetMemInterMapParams(struct KernelSMDebuggerSession *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelSMDebuggerSession_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_ksmdbgssnGetMemoryMappingDescriptor(struct KernelSMDebuggerSession *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_KernelSMDebuggerSession_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_ksmdbgssnControlFilter(struct KernelSMDebuggerSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_ksmdbgssnUnregisterEvent(struct KernelSMDebuggerSession *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelSMDebuggerSession_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_ksmdbgssnCanCopy(struct KernelSMDebuggerSession *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_ksmdbgssnPreDestruct(struct KernelSMDebuggerSession *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_ksmdbgssnGetNotificationListPtr(struct KernelSMDebuggerSession *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelSMDebuggerSession_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_ksmdbgssnGetNotificationShare(struct KernelSMDebuggerSession *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_KernelSMDebuggerSession_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_ksmdbgssnMap(struct KernelSMDebuggerSession *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_KernelSMDebuggerSession_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_ksmdbgssnAccessCallback(struct KernelSMDebuggerSession *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_KernelSMDebuggerSession_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_KernelSMDebuggerSession[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugSetModeMMUDebug_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0307u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugSetModeMMUDebug" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugGetModeMMUDebug_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0308u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugGetModeMMUDebug" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugSetExceptionMask_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0309u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugSetExceptionMask" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugReadSingleSmErrorState_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de030bu, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugReadSingleSmErrorState" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugReadAllSmErrorStates_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de030cu, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugReadAllSmErrorStates" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugClearSingleSmErrorState_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de030fu, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugClearSingleSmErrorState" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugClearAllSmErrorStates_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0310u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugClearAllSmErrorStates" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugSetNextStopTriggerType_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0313u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugSetNextStopTriggerType" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugSetSingleStepInterruptHandling_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0314u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugSetSingleStepInterruptHandling" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugReadMemory_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0315u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugReadMemory" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugWriteMemory_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0316u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugWriteMemory" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugSuspendContext_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0317u, + /*paramSize=*/ sizeof(NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugSuspendContext" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugResumeContext_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0318u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugResumeContext" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdReadSurface_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de031au, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdReadSurface" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdWriteSurface_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de031bu, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdWriteSurface" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdGetMappings_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de031cu, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdGetMappings" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugExecRegOps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + /*flags=*/ 0x2010u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de031du, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugExecRegOps" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugSetModeErrbarDebug_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de031fu, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugSetModeErrbarDebug" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugGetModeErrbarDebug_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0320u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugGetModeErrbarDebug" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugSetSingleSmSingleStep_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0321u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugSetSingleSmSingleStep" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugSetSingleSmStopTrigger_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0322u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugSetSingleSmStopTrigger" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugSetSingleSmRunTrigger_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0323u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugSetSingleSmRunTrigger" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugSetSingleSmSkipIdleWarpDetect_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0324u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugSetSingleSmSkipIdleWarpDetect" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugGetSingleSmDebuggerStatus_fcf1ac, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0325u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugGetSingleSmDebuggerStatus" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugReadBatchMemory_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0326u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugReadBatchMemory" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) ksmdbgssnCtrlCmdDebugWriteBatchMemory_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x83de0327u, + /*paramSize=*/ sizeof(NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_KernelSMDebuggerSession.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "ksmdbgssnCtrlCmdDebugWriteBatchMemory" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelSMDebuggerSession = +{ + /*numEntries=*/ 26, + /*pExportEntries=*/ __nvoc_exported_method_def_KernelSMDebuggerSession +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_KernelSMDebuggerSession(KernelSMDebuggerSession *pThis) { + __nvoc_ksmdbgssnDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_KernelSMDebuggerSession(KernelSMDebuggerSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_KernelSMDebuggerSession(KernelSMDebuggerSession *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelSMDebuggerSession_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_KernelSMDebuggerSession_fail_Notifier; + __nvoc_init_dataField_KernelSMDebuggerSession(pThis); + + status = __nvoc_ksmdbgssnConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_KernelSMDebuggerSession_fail__init; + goto __nvoc_ctor_KernelSMDebuggerSession_exit; // Success + +__nvoc_ctor_KernelSMDebuggerSession_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_KernelSMDebuggerSession_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_KernelSMDebuggerSession_fail_GpuResource: +__nvoc_ctor_KernelSMDebuggerSession_exit: + + return status; +} + +static void __nvoc_init_funcTable_KernelSMDebuggerSession_1(KernelSMDebuggerSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__ksmdbgssnInternalControlForward__ = &ksmdbgssnInternalControlForward_IMPL; + + pThis->__ksmdbgssnGetInternalObjectHandle__ = &ksmdbgssnGetInternalObjectHandle_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugSetModeMMUDebug__ = &ksmdbgssnCtrlCmdDebugSetModeMMUDebug_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugGetModeMMUDebug__ = &ksmdbgssnCtrlCmdDebugGetModeMMUDebug_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugSetModeErrbarDebug__ = &ksmdbgssnCtrlCmdDebugSetModeErrbarDebug_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugGetModeErrbarDebug__ = &ksmdbgssnCtrlCmdDebugGetModeErrbarDebug_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugSetExceptionMask__ = &ksmdbgssnCtrlCmdDebugSetExceptionMask_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugReadSingleSmErrorState__ = &ksmdbgssnCtrlCmdDebugReadSingleSmErrorState_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__ksmdbgssnCtrlCmdDebugReadAllSmErrorStates__ = &ksmdbgssnCtrlCmdDebugReadAllSmErrorStates_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugClearSingleSmErrorState__ = &ksmdbgssnCtrlCmdDebugClearSingleSmErrorState_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__ksmdbgssnCtrlCmdDebugClearAllSmErrorStates__ = &ksmdbgssnCtrlCmdDebugClearAllSmErrorStates_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugSuspendContext__ = &ksmdbgssnCtrlCmdDebugSuspendContext_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugResumeContext__ = &ksmdbgssnCtrlCmdDebugResumeContext_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ksmdbgssnCtrlCmdReadSurface__ = &ksmdbgssnCtrlCmdReadSurface_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ksmdbgssnCtrlCmdWriteSurface__ = &ksmdbgssnCtrlCmdWriteSurface_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ksmdbgssnCtrlCmdGetMappings__ = &ksmdbgssnCtrlCmdGetMappings_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugSetNextStopTriggerType__ = &ksmdbgssnCtrlCmdDebugSetNextStopTriggerType_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugSetSingleStepInterruptHandling__ = &ksmdbgssnCtrlCmdDebugSetSingleStepInterruptHandling_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ksmdbgssnCtrlCmdDebugReadMemory__ = &ksmdbgssnCtrlCmdDebugReadMemory_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ksmdbgssnCtrlCmdDebugWriteMemory__ = &ksmdbgssnCtrlCmdDebugWriteMemory_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + pThis->__ksmdbgssnCtrlCmdDebugExecRegOps__ = &ksmdbgssnCtrlCmdDebugExecRegOps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugSetSingleSmSingleStep__ = &ksmdbgssnCtrlCmdDebugSetSingleSmSingleStep_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugSetSingleSmStopTrigger__ = &ksmdbgssnCtrlCmdDebugSetSingleSmStopTrigger_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugSetSingleSmRunTrigger__ = &ksmdbgssnCtrlCmdDebugSetSingleSmRunTrigger_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugSetSingleSmSkipIdleWarpDetect__ = &ksmdbgssnCtrlCmdDebugSetSingleSmSkipIdleWarpDetect_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__ksmdbgssnCtrlCmdDebugGetSingleSmDebuggerStatus__ = &ksmdbgssnCtrlCmdDebugGetSingleSmDebuggerStatus_fcf1ac; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ksmdbgssnCtrlCmdDebugReadBatchMemory__ = &ksmdbgssnCtrlCmdDebugReadBatchMemory_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__ksmdbgssnCtrlCmdDebugWriteBatchMemory__ = &ksmdbgssnCtrlCmdDebugWriteBatchMemory_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresInternalControlForward__ = &__nvoc_thunk_KernelSMDebuggerSession_gpuresInternalControlForward; + + pThis->__nvoc_base_GpuResource.__gpuresGetInternalObjectHandle__ = &__nvoc_thunk_KernelSMDebuggerSession_gpuresGetInternalObjectHandle; + + pThis->__ksmdbgssnShareCallback__ = &__nvoc_thunk_GpuResource_ksmdbgssnShareCallback; + + pThis->__ksmdbgssnMapTo__ = &__nvoc_thunk_RsResource_ksmdbgssnMapTo; + + pThis->__ksmdbgssnGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_ksmdbgssnGetOrAllocNotifShare; + + pThis->__ksmdbgssnCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_ksmdbgssnCheckMemInterUnmap; + + pThis->__ksmdbgssnGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_ksmdbgssnGetMapAddrSpace; + + pThis->__ksmdbgssnSetNotificationShare__ = &__nvoc_thunk_Notifier_ksmdbgssnSetNotificationShare; + + pThis->__ksmdbgssnGetRefCount__ = &__nvoc_thunk_RsResource_ksmdbgssnGetRefCount; + + pThis->__ksmdbgssnAddAdditionalDependants__ = &__nvoc_thunk_RsResource_ksmdbgssnAddAdditionalDependants; + + pThis->__ksmdbgssnControl_Prologue__ = &__nvoc_thunk_RmResource_ksmdbgssnControl_Prologue; + + pThis->__ksmdbgssnGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_ksmdbgssnGetRegBaseOffsetAndSize; + + pThis->__ksmdbgssnUnmapFrom__ = &__nvoc_thunk_RsResource_ksmdbgssnUnmapFrom; + + pThis->__ksmdbgssnControl_Epilogue__ = &__nvoc_thunk_RmResource_ksmdbgssnControl_Epilogue; + + pThis->__ksmdbgssnControlLookup__ = &__nvoc_thunk_RsResource_ksmdbgssnControlLookup; + + pThis->__ksmdbgssnControl__ = &__nvoc_thunk_GpuResource_ksmdbgssnControl; + + pThis->__ksmdbgssnUnmap__ = &__nvoc_thunk_GpuResource_ksmdbgssnUnmap; + + pThis->__ksmdbgssnGetMemInterMapParams__ = &__nvoc_thunk_RmResource_ksmdbgssnGetMemInterMapParams; + + pThis->__ksmdbgssnGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_ksmdbgssnGetMemoryMappingDescriptor; + + pThis->__ksmdbgssnControlFilter__ = &__nvoc_thunk_RsResource_ksmdbgssnControlFilter; + + pThis->__ksmdbgssnUnregisterEvent__ = &__nvoc_thunk_Notifier_ksmdbgssnUnregisterEvent; + + pThis->__ksmdbgssnCanCopy__ = &__nvoc_thunk_RsResource_ksmdbgssnCanCopy; + + pThis->__ksmdbgssnPreDestruct__ = &__nvoc_thunk_RsResource_ksmdbgssnPreDestruct; + + pThis->__ksmdbgssnGetNotificationListPtr__ = &__nvoc_thunk_Notifier_ksmdbgssnGetNotificationListPtr; + + pThis->__ksmdbgssnGetNotificationShare__ = &__nvoc_thunk_Notifier_ksmdbgssnGetNotificationShare; + + pThis->__ksmdbgssnMap__ = &__nvoc_thunk_GpuResource_ksmdbgssnMap; + + pThis->__ksmdbgssnAccessCallback__ = &__nvoc_thunk_RmResource_ksmdbgssnAccessCallback; +} + +void __nvoc_init_funcTable_KernelSMDebuggerSession(KernelSMDebuggerSession *pThis) { + __nvoc_init_funcTable_KernelSMDebuggerSession_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_KernelSMDebuggerSession(KernelSMDebuggerSession *pThis) { + pThis->__nvoc_pbase_KernelSMDebuggerSession = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_KernelSMDebuggerSession(pThis); +} + +NV_STATUS __nvoc_objCreate_KernelSMDebuggerSession(KernelSMDebuggerSession **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + KernelSMDebuggerSession *pThis; + + pThis = portMemAllocNonPaged(sizeof(KernelSMDebuggerSession)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(KernelSMDebuggerSession)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelSMDebuggerSession); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_KernelSMDebuggerSession(pThis); + status = __nvoc_ctor_KernelSMDebuggerSession(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_KernelSMDebuggerSession_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_KernelSMDebuggerSession_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_KernelSMDebuggerSession(KernelSMDebuggerSession **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_KernelSMDebuggerSession(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_kernel_sm_debugger_session_nvoc.h b/src/nvidia/generated/g_kernel_sm_debugger_session_nvoc.h new file mode 100644 index 000000000..20367bad2 --- /dev/null +++ b/src/nvidia/generated/g_kernel_sm_debugger_session_nvoc.h @@ -0,0 +1,652 @@ +#ifndef _G_KERNEL_SM_DEBUGGER_SESSION_NVOC_H_ +#define _G_KERNEL_SM_DEBUGGER_SESSION_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_kernel_sm_debugger_session_nvoc.h" + +#ifndef KERNEL_SM_DEBUGGER_SESSION_H +#define KERNEL_SM_DEBUGGER_SESSION_H + +#include "gpu/gpu_halspec.h" +#include "utils/nv_enum.h" +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" +#include "rmapi/control.h" + +#include "ctrl/ctrl83de.h" + +struct KernelGraphicsObject; + +#ifndef __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ +typedef struct KernelGraphicsObject KernelGraphicsObject; +#endif /* __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsObject +#define __nvoc_class_id_KernelGraphicsObject 0x097648 +#endif /* __nvoc_class_id_KernelGraphicsObject */ + + + +/* + * Valid Values for smdebugMode + */ +#define SM_DEBUG_MODE_ENABLE (0x00000001) +#define SM_DEBUG_MODE_DISABLE (0x00000002) + +/* + * Valid Values for stopOnAnyWARPMode + */ +#define STOP_ON_ANYWARP_MODE_ENABLED (0x00000001) +#define STOP_ON_ANYWARP_MODE_DISABLED (0x00000002) + +/* + * Valid Values for stopOnAnySMMode + */ +#define STOP_ON_ANYSM_MODE_ENABLED (0x00000001) +#define STOP_ON_ANYSM_MODE_DISABLED (0x00000002) + +#define SMDBG_EXCEPTION_TYPE_DEF(x) \ + NV_ENUM_ENTRY(x, SMDBG_EXCEPTION_TYPE_FATAL, 0) \ + NV_ENUM_ENTRY(x, SMDBG_EXCEPTION_TYPE_TRAP, 1) \ + NV_ENUM_ENTRY(x, SMDBG_EXCEPTION_TYPE_SINGLE_STEP, 2) \ + NV_ENUM_ENTRY(x, SMDBG_EXCEPTION_TYPE_INT, 3) \ + NV_ENUM_ENTRY(x, SMDBG_EXCEPTION_TYPE_CILP, 4) \ + NV_ENUM_ENTRY(x, SMDBG_EXCEPTION_TYPE_PREEMPTION_STARTED, 5) + +NV_ENUM_DEF(SMDBG_EXCEPTION_TYPE, SMDBG_EXCEPTION_TYPE_DEF); +ct_assert(NV_ENUM_IS_CONTIGUOUS(SMDBG_EXCEPTION_TYPE)); + +ct_assert(NVBIT32(SMDBG_EXCEPTION_TYPE_FATAL) == + NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_FATAL); +ct_assert(NVBIT32(SMDBG_EXCEPTION_TYPE_TRAP) == + NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_TRAP); +ct_assert(NVBIT32(SMDBG_EXCEPTION_TYPE_SINGLE_STEP) == + NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_SINGLE_STEP); +ct_assert(NVBIT32(SMDBG_EXCEPTION_TYPE_INT) == + NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_INT); +ct_assert(NVBIT32(SMDBG_EXCEPTION_TYPE_CILP) == + NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_CILP); +ct_assert(NVBIT32(SMDBG_EXCEPTION_TYPE_PREEMPTION_STARTED) == + NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PREEMPTION_STARTED); + +// +// Debugger Session object for automatically freeing and +// invalidating the debugger object when underlying objects that it +// relies on are freed before it. +// +#ifdef NVOC_KERNEL_SM_DEBUGGER_SESSION_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmDebuggerSession { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsSession __nvoc_base_RsSession; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + struct RsSession *__nvoc_pbase_RsSession; + struct RmDebuggerSession *__nvoc_pbase_RmDebuggerSession; + void (*__dbgSessionRemoveDependant__)(struct RmDebuggerSession *, struct RsResourceRef *); + void (*__dbgSessionRemoveDependency__)(struct RmDebuggerSession *, struct RsResourceRef *); +}; + +#ifndef __NVOC_CLASS_RmDebuggerSession_TYPEDEF__ +#define __NVOC_CLASS_RmDebuggerSession_TYPEDEF__ +typedef struct RmDebuggerSession RmDebuggerSession; +#endif /* __NVOC_CLASS_RmDebuggerSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmDebuggerSession +#define __nvoc_class_id_RmDebuggerSession 0x48fa7d +#endif /* __nvoc_class_id_RmDebuggerSession */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmDebuggerSession; + +#define __staticCast_RmDebuggerSession(pThis) \ + ((pThis)->__nvoc_pbase_RmDebuggerSession) + +#ifdef __nvoc_kernel_sm_debugger_session_h_disabled +#define __dynamicCast_RmDebuggerSession(pThis) ((RmDebuggerSession*)NULL) +#else //__nvoc_kernel_sm_debugger_session_h_disabled +#define __dynamicCast_RmDebuggerSession(pThis) \ + ((RmDebuggerSession*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmDebuggerSession))) +#endif //__nvoc_kernel_sm_debugger_session_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmDebuggerSession(RmDebuggerSession**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmDebuggerSession(RmDebuggerSession**, Dynamic*, NvU32); +#define __objCreate_RmDebuggerSession(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RmDebuggerSession((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define dbgSessionRemoveDependant(pDbgSession, pResourceRef) dbgSessionRemoveDependant_DISPATCH(pDbgSession, pResourceRef) +#define dbgSessionRemoveDependency(pDbgSession, pResourceRef) dbgSessionRemoveDependency_DISPATCH(pDbgSession, pResourceRef) +void dbgSessionRemoveDependant_IMPL(struct RmDebuggerSession *pDbgSession, struct RsResourceRef *pResourceRef); + +static inline void dbgSessionRemoveDependant_DISPATCH(struct RmDebuggerSession *pDbgSession, struct RsResourceRef *pResourceRef) { + pDbgSession->__dbgSessionRemoveDependant__(pDbgSession, pResourceRef); +} + +void dbgSessionRemoveDependency_IMPL(struct RmDebuggerSession *pDbgSession, struct RsResourceRef *pResourceRef); + +static inline void dbgSessionRemoveDependency_DISPATCH(struct RmDebuggerSession *pDbgSession, struct RsResourceRef *pResourceRef) { + pDbgSession->__dbgSessionRemoveDependency__(pDbgSession, pResourceRef); +} + +#undef PRIVATE_FIELD + + +#ifdef NVOC_KERNEL_SM_DEBUGGER_SESSION_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct KernelSMDebuggerSession { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct KernelSMDebuggerSession *__nvoc_pbase_KernelSMDebuggerSession; + NV_STATUS (*__ksmdbgssnInternalControlForward__)(struct KernelSMDebuggerSession *, NvU32, void *, NvU32); + NvHandle (*__ksmdbgssnGetInternalObjectHandle__)(struct KernelSMDebuggerSession *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugSetModeMMUDebug__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugGetModeMMUDebug__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugSetModeErrbarDebug__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugGetModeErrbarDebug__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugSetExceptionMask__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugReadSingleSmErrorState__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugReadAllSmErrorStates__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugClearSingleSmErrorState__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugClearAllSmErrorStates__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugSuspendContext__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugResumeContext__)(struct KernelSMDebuggerSession *); + NV_STATUS (*__ksmdbgssnCtrlCmdReadSurface__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS *); + NV_STATUS (*__ksmdbgssnCtrlCmdWriteSurface__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS *); + NV_STATUS (*__ksmdbgssnCtrlCmdGetMappings__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugSetNextStopTriggerType__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugSetSingleStepInterruptHandling__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugReadMemory__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugWriteMemory__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugExecRegOps__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugSetSingleSmSingleStep__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugSetSingleSmStopTrigger__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugSetSingleSmRunTrigger__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugSetSingleSmSkipIdleWarpDetect__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugGetSingleSmDebuggerStatus__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugReadBatchMemory__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS *); + NV_STATUS (*__ksmdbgssnCtrlCmdDebugWriteBatchMemory__)(struct KernelSMDebuggerSession *, NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS *); + NvBool (*__ksmdbgssnShareCallback__)(struct KernelSMDebuggerSession *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__ksmdbgssnMapTo__)(struct KernelSMDebuggerSession *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__ksmdbgssnGetOrAllocNotifShare__)(struct KernelSMDebuggerSession *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__ksmdbgssnCheckMemInterUnmap__)(struct KernelSMDebuggerSession *, NvBool); + NV_STATUS (*__ksmdbgssnGetMapAddrSpace__)(struct KernelSMDebuggerSession *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__ksmdbgssnSetNotificationShare__)(struct KernelSMDebuggerSession *, struct NotifShare *); + NvU32 (*__ksmdbgssnGetRefCount__)(struct KernelSMDebuggerSession *); + void (*__ksmdbgssnAddAdditionalDependants__)(struct RsClient *, struct KernelSMDebuggerSession *, RsResourceRef *); + NV_STATUS (*__ksmdbgssnControl_Prologue__)(struct KernelSMDebuggerSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__ksmdbgssnGetRegBaseOffsetAndSize__)(struct KernelSMDebuggerSession *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__ksmdbgssnUnmapFrom__)(struct KernelSMDebuggerSession *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__ksmdbgssnControl_Epilogue__)(struct KernelSMDebuggerSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__ksmdbgssnControlLookup__)(struct KernelSMDebuggerSession *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__ksmdbgssnControl__)(struct KernelSMDebuggerSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__ksmdbgssnUnmap__)(struct KernelSMDebuggerSession *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__ksmdbgssnGetMemInterMapParams__)(struct KernelSMDebuggerSession *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__ksmdbgssnGetMemoryMappingDescriptor__)(struct KernelSMDebuggerSession *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__ksmdbgssnControlFilter__)(struct KernelSMDebuggerSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__ksmdbgssnUnregisterEvent__)(struct KernelSMDebuggerSession *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__ksmdbgssnCanCopy__)(struct KernelSMDebuggerSession *); + void (*__ksmdbgssnPreDestruct__)(struct KernelSMDebuggerSession *); + PEVENTNOTIFICATION *(*__ksmdbgssnGetNotificationListPtr__)(struct KernelSMDebuggerSession *); + struct NotifShare *(*__ksmdbgssnGetNotificationShare__)(struct KernelSMDebuggerSession *); + NV_STATUS (*__ksmdbgssnMap__)(struct KernelSMDebuggerSession *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__ksmdbgssnAccessCallback__)(struct KernelSMDebuggerSession *, struct RsClient *, void *, RsAccessRight); + struct RmDebuggerSession *pDebugSession; + struct KernelGraphicsObject *pObject; + NvHandle hDebugger; + NvHandle hDebuggerClient; + NvHandle hChannel; + NvHandle hChannelClient; + NvHandle hSubdevice; + NvHandle hInternalClient; + NvHandle hInternalDevice; + NvHandle hInternalSubdevice; + NvHandle hInternalSubscription; + NvHandle hInternalMemMapping; +}; + +#ifndef __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ +#define __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ +typedef struct KernelSMDebuggerSession KernelSMDebuggerSession; +#endif /* __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSMDebuggerSession +#define __nvoc_class_id_KernelSMDebuggerSession 0x4adc81 +#endif /* __nvoc_class_id_KernelSMDebuggerSession */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelSMDebuggerSession; + +#define __staticCast_KernelSMDebuggerSession(pThis) \ + ((pThis)->__nvoc_pbase_KernelSMDebuggerSession) + +#ifdef __nvoc_kernel_sm_debugger_session_h_disabled +#define __dynamicCast_KernelSMDebuggerSession(pThis) ((KernelSMDebuggerSession*)NULL) +#else //__nvoc_kernel_sm_debugger_session_h_disabled +#define __dynamicCast_KernelSMDebuggerSession(pThis) \ + ((KernelSMDebuggerSession*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelSMDebuggerSession))) +#endif //__nvoc_kernel_sm_debugger_session_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_KernelSMDebuggerSession(KernelSMDebuggerSession**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_KernelSMDebuggerSession(KernelSMDebuggerSession**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_KernelSMDebuggerSession(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_KernelSMDebuggerSession((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define ksmdbgssnInternalControlForward(arg0, command, pParams, size) ksmdbgssnInternalControlForward_DISPATCH(arg0, command, pParams, size) +#define ksmdbgssnGetInternalObjectHandle(arg0) ksmdbgssnGetInternalObjectHandle_DISPATCH(arg0) +#define ksmdbgssnCtrlCmdDebugSetModeMMUDebug(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugSetModeMMUDebug_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugGetModeMMUDebug(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugGetModeMMUDebug_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugSetModeErrbarDebug(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugSetModeErrbarDebug_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugGetModeErrbarDebug(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugGetModeErrbarDebug_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugSetExceptionMask(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugSetExceptionMask_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugReadSingleSmErrorState(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugReadSingleSmErrorState_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugReadAllSmErrorStates(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugReadAllSmErrorStates_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugClearSingleSmErrorState(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugClearSingleSmErrorState_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugClearAllSmErrorStates(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugClearAllSmErrorStates_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugSuspendContext(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugSuspendContext_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugResumeContext(pKernelSMDebuggerSession) ksmdbgssnCtrlCmdDebugResumeContext_DISPATCH(pKernelSMDebuggerSession) +#define ksmdbgssnCtrlCmdReadSurface(arg0, arg1) ksmdbgssnCtrlCmdReadSurface_DISPATCH(arg0, arg1) +#define ksmdbgssnCtrlCmdWriteSurface(arg0, arg1) ksmdbgssnCtrlCmdWriteSurface_DISPATCH(arg0, arg1) +#define ksmdbgssnCtrlCmdGetMappings(arg0, arg1) ksmdbgssnCtrlCmdGetMappings_DISPATCH(arg0, arg1) +#define ksmdbgssnCtrlCmdDebugSetNextStopTriggerType(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugSetNextStopTriggerType_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugSetSingleStepInterruptHandling(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugSetSingleStepInterruptHandling_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugReadMemory(arg0, arg1) ksmdbgssnCtrlCmdDebugReadMemory_DISPATCH(arg0, arg1) +#define ksmdbgssnCtrlCmdDebugWriteMemory(arg0, arg1) ksmdbgssnCtrlCmdDebugWriteMemory_DISPATCH(arg0, arg1) +#define ksmdbgssnCtrlCmdDebugExecRegOps(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugExecRegOps_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugSetSingleSmSingleStep(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugSetSingleSmSingleStep_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugSetSingleSmStopTrigger(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugSetSingleSmStopTrigger_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugSetSingleSmRunTrigger(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugSetSingleSmRunTrigger_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugSetSingleSmSkipIdleWarpDetect(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugSetSingleSmSkipIdleWarpDetect_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugGetSingleSmDebuggerStatus(pKernelSMDebuggerSession, pParams) ksmdbgssnCtrlCmdDebugGetSingleSmDebuggerStatus_DISPATCH(pKernelSMDebuggerSession, pParams) +#define ksmdbgssnCtrlCmdDebugReadBatchMemory(arg0, arg1) ksmdbgssnCtrlCmdDebugReadBatchMemory_DISPATCH(arg0, arg1) +#define ksmdbgssnCtrlCmdDebugWriteBatchMemory(arg0, arg1) ksmdbgssnCtrlCmdDebugWriteBatchMemory_DISPATCH(arg0, arg1) +#define ksmdbgssnShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) ksmdbgssnShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define ksmdbgssnMapTo(pResource, pParams) ksmdbgssnMapTo_DISPATCH(pResource, pParams) +#define ksmdbgssnGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) ksmdbgssnGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define ksmdbgssnCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) ksmdbgssnCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define ksmdbgssnGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) ksmdbgssnGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define ksmdbgssnSetNotificationShare(pNotifier, pNotifShare) ksmdbgssnSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define ksmdbgssnGetRefCount(pResource) ksmdbgssnGetRefCount_DISPATCH(pResource) +#define ksmdbgssnAddAdditionalDependants(pClient, pResource, pReference) ksmdbgssnAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define ksmdbgssnControl_Prologue(pResource, pCallContext, pParams) ksmdbgssnControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define ksmdbgssnGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) ksmdbgssnGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define ksmdbgssnUnmapFrom(pResource, pParams) ksmdbgssnUnmapFrom_DISPATCH(pResource, pParams) +#define ksmdbgssnControl_Epilogue(pResource, pCallContext, pParams) ksmdbgssnControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define ksmdbgssnControlLookup(pResource, pParams, ppEntry) ksmdbgssnControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define ksmdbgssnControl(pGpuResource, pCallContext, pParams) ksmdbgssnControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define ksmdbgssnUnmap(pGpuResource, pCallContext, pCpuMapping) ksmdbgssnUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define ksmdbgssnGetMemInterMapParams(pRmResource, pParams) ksmdbgssnGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define ksmdbgssnGetMemoryMappingDescriptor(pRmResource, ppMemDesc) ksmdbgssnGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define ksmdbgssnControlFilter(pResource, pCallContext, pParams) ksmdbgssnControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define ksmdbgssnUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) ksmdbgssnUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define ksmdbgssnCanCopy(pResource) ksmdbgssnCanCopy_DISPATCH(pResource) +#define ksmdbgssnPreDestruct(pResource) ksmdbgssnPreDestruct_DISPATCH(pResource) +#define ksmdbgssnGetNotificationListPtr(pNotifier) ksmdbgssnGetNotificationListPtr_DISPATCH(pNotifier) +#define ksmdbgssnGetNotificationShare(pNotifier) ksmdbgssnGetNotificationShare_DISPATCH(pNotifier) +#define ksmdbgssnMap(pGpuResource, pCallContext, pParams, pCpuMapping) ksmdbgssnMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define ksmdbgssnAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) ksmdbgssnAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS ksmdbgssnInternalControlForward_IMPL(struct KernelSMDebuggerSession *arg0, NvU32 command, void *pParams, NvU32 size); + +static inline NV_STATUS ksmdbgssnInternalControlForward_DISPATCH(struct KernelSMDebuggerSession *arg0, NvU32 command, void *pParams, NvU32 size) { + return arg0->__ksmdbgssnInternalControlForward__(arg0, command, pParams, size); +} + +NvHandle ksmdbgssnGetInternalObjectHandle_IMPL(struct KernelSMDebuggerSession *arg0); + +static inline NvHandle ksmdbgssnGetInternalObjectHandle_DISPATCH(struct KernelSMDebuggerSession *arg0) { + return arg0->__ksmdbgssnGetInternalObjectHandle__(arg0); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetModeMMUDebug_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365315U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetModeMMUDebug_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_MODE_MMU_DEBUG_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugSetModeMMUDebug__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugGetModeMMUDebug_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365316U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugGetModeMMUDebug_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_GET_MODE_MMU_DEBUG_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugGetModeMMUDebug__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetModeErrbarDebug_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365328U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetModeErrbarDebug_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_MODE_ERRBAR_DEBUG_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugSetModeErrbarDebug__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugGetModeErrbarDebug_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365329U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugGetModeErrbarDebug_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_GET_MODE_ERRBAR_DEBUG_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugGetModeErrbarDebug__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetExceptionMask_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365317U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetExceptionMask_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_EXCEPTION_MASK_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugSetExceptionMask__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugReadSingleSmErrorState_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365319U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugReadSingleSmErrorState_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_READ_SINGLE_SM_ERROR_STATE_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugReadSingleSmErrorState__(pKernelSMDebuggerSession, pParams); +} + +NV_STATUS ksmdbgssnCtrlCmdDebugReadAllSmErrorStates_IMPL(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS *pParams); + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugReadAllSmErrorStates_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugReadAllSmErrorStates__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugClearSingleSmErrorState_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365321U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugClearSingleSmErrorState_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_CLEAR_SINGLE_SM_ERROR_STATE_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugClearSingleSmErrorState__(pKernelSMDebuggerSession, pParams); +} + +NV_STATUS ksmdbgssnCtrlCmdDebugClearAllSmErrorStates_IMPL(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS *pParams); + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugClearAllSmErrorStates_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugClearAllSmErrorStates__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSuspendContext_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365325U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSuspendContext_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_CMD_DEBUG_SUSPEND_CONTEXT_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugSuspendContext__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugResumeContext_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365326U), ((void *)0), 0); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugResumeContext_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugResumeContext__(pKernelSMDebuggerSession); +} + +NV_STATUS ksmdbgssnCtrlCmdReadSurface_IMPL(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS *arg1); + +static inline NV_STATUS ksmdbgssnCtrlCmdReadSurface_DISPATCH(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS *arg1) { + return arg0->__ksmdbgssnCtrlCmdReadSurface__(arg0, arg1); +} + +NV_STATUS ksmdbgssnCtrlCmdWriteSurface_IMPL(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS *arg1); + +static inline NV_STATUS ksmdbgssnCtrlCmdWriteSurface_DISPATCH(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS *arg1) { + return arg0->__ksmdbgssnCtrlCmdWriteSurface__(arg0, arg1); +} + +NV_STATUS ksmdbgssnCtrlCmdGetMappings_IMPL(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS *arg1); + +static inline NV_STATUS ksmdbgssnCtrlCmdGetMappings_DISPATCH(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS *arg1) { + return arg0->__ksmdbgssnCtrlCmdGetMappings__(arg0, arg1); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetNextStopTriggerType_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365323U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetNextStopTriggerType_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_NEXT_STOP_TRIGGER_TYPE_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugSetNextStopTriggerType__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetSingleStepInterruptHandling_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365324U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetSingleStepInterruptHandling_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_SINGLE_STEP_INTERRUPT_HANDLING_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugSetSingleStepInterruptHandling__(pKernelSMDebuggerSession, pParams); +} + +NV_STATUS ksmdbgssnCtrlCmdDebugReadMemory_IMPL(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *arg1); + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugReadMemory_DISPATCH(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *arg1) { + return arg0->__ksmdbgssnCtrlCmdDebugReadMemory__(arg0, arg1); +} + +NV_STATUS ksmdbgssnCtrlCmdDebugWriteMemory_IMPL(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *arg1); + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugWriteMemory_DISPATCH(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *arg1) { + return arg0->__ksmdbgssnCtrlCmdDebugWriteMemory__(arg0, arg1); +} + +NV_STATUS ksmdbgssnCtrlCmdDebugExecRegOps_IMPL(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS *pParams); + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugExecRegOps_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugExecRegOps__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetSingleSmSingleStep_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365330U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetSingleSmSingleStep_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SINGLE_STEP_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugSetSingleSmSingleStep__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetSingleSmStopTrigger_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365331U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetSingleSmStopTrigger_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_STOP_TRIGGER_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugSetSingleSmStopTrigger__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetSingleSmRunTrigger_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365332U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetSingleSmRunTrigger_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_RUN_TRIGGER_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugSetSingleSmRunTrigger__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetSingleSmSkipIdleWarpDetect_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365333U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugSetSingleSmSkipIdleWarpDetect_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_SET_SINGLE_SM_SKIP_IDLE_WARP_DETECT_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugSetSingleSmSkipIdleWarpDetect__(pKernelSMDebuggerSession, pParams); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugGetSingleSmDebuggerStatus_fcf1ac(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS *pParams) { + return ksmdbgssnInternalControlForward(pKernelSMDebuggerSession, (2212365334U), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugGetSingleSmDebuggerStatus_DISPATCH(struct KernelSMDebuggerSession *pKernelSMDebuggerSession, NV83DE_CTRL_DEBUG_GET_SINGLE_SM_DEBUGGER_STATUS_PARAMS *pParams) { + return pKernelSMDebuggerSession->__ksmdbgssnCtrlCmdDebugGetSingleSmDebuggerStatus__(pKernelSMDebuggerSession, pParams); +} + +NV_STATUS ksmdbgssnCtrlCmdDebugReadBatchMemory_IMPL(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS *arg1); + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugReadBatchMemory_DISPATCH(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS *arg1) { + return arg0->__ksmdbgssnCtrlCmdDebugReadBatchMemory__(arg0, arg1); +} + +NV_STATUS ksmdbgssnCtrlCmdDebugWriteBatchMemory_IMPL(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS *arg1); + +static inline NV_STATUS ksmdbgssnCtrlCmdDebugWriteBatchMemory_DISPATCH(struct KernelSMDebuggerSession *arg0, NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS *arg1) { + return arg0->__ksmdbgssnCtrlCmdDebugWriteBatchMemory__(arg0, arg1); +} + +static inline NvBool ksmdbgssnShareCallback_DISPATCH(struct KernelSMDebuggerSession *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__ksmdbgssnShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS ksmdbgssnMapTo_DISPATCH(struct KernelSMDebuggerSession *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__ksmdbgssnMapTo__(pResource, pParams); +} + +static inline NV_STATUS ksmdbgssnGetOrAllocNotifShare_DISPATCH(struct KernelSMDebuggerSession *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__ksmdbgssnGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS ksmdbgssnCheckMemInterUnmap_DISPATCH(struct KernelSMDebuggerSession *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__ksmdbgssnCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS ksmdbgssnGetMapAddrSpace_DISPATCH(struct KernelSMDebuggerSession *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__ksmdbgssnGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void ksmdbgssnSetNotificationShare_DISPATCH(struct KernelSMDebuggerSession *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__ksmdbgssnSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 ksmdbgssnGetRefCount_DISPATCH(struct KernelSMDebuggerSession *pResource) { + return pResource->__ksmdbgssnGetRefCount__(pResource); +} + +static inline void ksmdbgssnAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct KernelSMDebuggerSession *pResource, RsResourceRef *pReference) { + pResource->__ksmdbgssnAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS ksmdbgssnControl_Prologue_DISPATCH(struct KernelSMDebuggerSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__ksmdbgssnControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ksmdbgssnGetRegBaseOffsetAndSize_DISPATCH(struct KernelSMDebuggerSession *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__ksmdbgssnGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS ksmdbgssnUnmapFrom_DISPATCH(struct KernelSMDebuggerSession *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__ksmdbgssnUnmapFrom__(pResource, pParams); +} + +static inline void ksmdbgssnControl_Epilogue_DISPATCH(struct KernelSMDebuggerSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__ksmdbgssnControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ksmdbgssnControlLookup_DISPATCH(struct KernelSMDebuggerSession *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__ksmdbgssnControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS ksmdbgssnControl_DISPATCH(struct KernelSMDebuggerSession *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__ksmdbgssnControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS ksmdbgssnUnmap_DISPATCH(struct KernelSMDebuggerSession *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__ksmdbgssnUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS ksmdbgssnGetMemInterMapParams_DISPATCH(struct KernelSMDebuggerSession *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__ksmdbgssnGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS ksmdbgssnGetMemoryMappingDescriptor_DISPATCH(struct KernelSMDebuggerSession *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__ksmdbgssnGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS ksmdbgssnControlFilter_DISPATCH(struct KernelSMDebuggerSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__ksmdbgssnControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS ksmdbgssnUnregisterEvent_DISPATCH(struct KernelSMDebuggerSession *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__ksmdbgssnUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool ksmdbgssnCanCopy_DISPATCH(struct KernelSMDebuggerSession *pResource) { + return pResource->__ksmdbgssnCanCopy__(pResource); +} + +static inline void ksmdbgssnPreDestruct_DISPATCH(struct KernelSMDebuggerSession *pResource) { + pResource->__ksmdbgssnPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *ksmdbgssnGetNotificationListPtr_DISPATCH(struct KernelSMDebuggerSession *pNotifier) { + return pNotifier->__ksmdbgssnGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *ksmdbgssnGetNotificationShare_DISPATCH(struct KernelSMDebuggerSession *pNotifier) { + return pNotifier->__ksmdbgssnGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS ksmdbgssnMap_DISPATCH(struct KernelSMDebuggerSession *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__ksmdbgssnMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool ksmdbgssnAccessCallback_DISPATCH(struct KernelSMDebuggerSession *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__ksmdbgssnAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS ksmdbgssnConstruct_IMPL(struct KernelSMDebuggerSession *arg_pKernelSMDebuggerSession, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_ksmdbgssnConstruct(arg_pKernelSMDebuggerSession, arg_pCallContext, arg_pParams) ksmdbgssnConstruct_IMPL(arg_pKernelSMDebuggerSession, arg_pCallContext, arg_pParams) +void ksmdbgssnDestruct_IMPL(struct KernelSMDebuggerSession *arg0); +#define __nvoc_ksmdbgssnDestruct(arg0) ksmdbgssnDestruct_IMPL(arg0) +void ksmdbgssnFreeCallback_IMPL(struct KernelSMDebuggerSession *arg0); +#ifdef __nvoc_kernel_sm_debugger_session_h_disabled +static inline void ksmdbgssnFreeCallback(struct KernelSMDebuggerSession *arg0) { + NV_ASSERT_FAILED_PRECOMP("KernelSMDebuggerSession was disabled!"); +} +#else //__nvoc_kernel_sm_debugger_session_h_disabled +#define ksmdbgssnFreeCallback(arg0) ksmdbgssnFreeCallback_IMPL(arg0) +#endif //__nvoc_kernel_sm_debugger_session_h_disabled + +#undef PRIVATE_FIELD + + +#endif // KERNEL_SM_DEBUGGER_SESSION_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_KERNEL_SM_DEBUGGER_SESSION_NVOC_H_ diff --git a/src/nvidia/generated/g_mem_desc_nvoc.h b/src/nvidia/generated/g_mem_desc_nvoc.h new file mode 100644 index 000000000..558f42925 --- /dev/null +++ b/src/nvidia/generated/g_mem_desc_nvoc.h @@ -0,0 +1,1081 @@ +#ifndef _G_MEM_DESC_NVOC_H_ +#define _G_MEM_DESC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_mem_desc_nvoc.h" + +#ifndef _MEMDESC_H_ +#define _MEMDESC_H_ + +#include "core/prelude.h" +#include "poolalloc.h" + + +struct OBJVASPACE; + +#ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +typedef struct OBJVASPACE OBJVASPACE; +#endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVASPACE +#define __nvoc_class_id_OBJVASPACE 0x6c347f +#endif /* __nvoc_class_id_OBJVASPACE */ + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct Heap; + +#ifndef __NVOC_CLASS_Heap_TYPEDEF__ +#define __NVOC_CLASS_Heap_TYPEDEF__ +typedef struct Heap Heap; +#endif /* __NVOC_CLASS_Heap_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Heap +#define __nvoc_class_id_Heap 0x556e9a +#endif /* __nvoc_class_id_Heap */ + + +struct MEMORY_DESCRIPTOR; + +typedef struct CTX_BUF_POOL_INFO CTX_BUF_POOL_INFO; +typedef struct COMPR_INFO COMPR_INFO; + +// +// Address space identifiers. +// +typedef NvU32 NV_ADDRESS_SPACE; +#define ADDR_UNKNOWN 0 // Address space is unknown +#define ADDR_SYSMEM 1 // System memory (PCI) +#define ADDR_FBMEM 2 // Frame buffer memory space +#define ADDR_REGMEM 3 // NV register memory space +#define ADDR_VIRTUAL 4 // Virtual address space only +#define ADDR_FABRIC 5 // Fabric address space for the GPA based addressing. +#define ADDR_FABRIC_V2 6 // Fabric address space for the FLA based addressing. Will replace ADDR_FABRIC. + +// +// Address translation identifiers: +// +// Memory descriptors are used to describe physical block(s) of memory. +// That memory can be described at various levels of address translation +// using the address translation (AT) enumerates. The levels of translation +// supported is illustrated below. +// +// The diagram is drawn for system memory with SR-IOV but the translations +// are similar for video memory (replace IOMMU with VMMU). VGPU pre-SR-IOV +// is also different. +// +// +-------------------+ +-------------------+ +// | CPU | | GPU Engine | +// +-------------------+ +-------------------+ +// | | +// | | GPU VA +// | V +// | +-------------------+ +// | CPU VA | GMMU | +// | +-------------------+ +// | | +// | | GPU GPA (AT_GPU) +// v v +// +-------------------+ +-------------------+ +// | MMU (1st level)| | | IOMMU (1st level) | +// +-------------------+ +-------------------+ +// | | +// | CPU GPA (AT_CPU) | <---- AT_PA for VGPU guest +// v v +// +-------------------+ +-------------------+ +// | MMU (2nd level) | | IOMMU (2nd level) | +// +-------------------+ +-------------------+ +// | | +// | SPA | SPA <---- AT_PA for bare metal +// v v or VGPU host +// +---------------------------------------------------+ +// | System Memory | +// +---------------------------------------------------+ +// +// +// Descriptions for *physical* address translation levels: +// +// AT_CPU - CPU physical address or guest physical address (GPA) +// AT_GPU - GPU physical address or guest physical address (GPA) +// AT_PA - When running in host RM or bare metal this is the system physical address. When +// running inside a VGPU guest environment, this is the last level of translation +// visible to the OS context that RM is running in. +// +// AT_CPU should typically == AT_PA, but there might be cases such as IBM P9 where vidmem +// might be 0-based on GPU but exposed elsewhere in the CPU address space. +// +// Descriptions for *virtual* address translation levels: +// +// AT_GPU_VA - Memory descriptors can also describe virtual memory allocations. AT_GPU_VA +// represents a GMMU virtual address. +// +#define AT_CPU AT_VARIANT(0) +#define AT_GPU AT_VARIANT(1) +#define AT_PA AT_VARIANT(2) + +#define AT_GPU_VA AT_VARIANT(3) + +// +// TODO - switch to using numeric values for AT_XYZ. Using pointers for +// typesafety after initial split from using class IDs/mmuContext +// +typedef struct ADDRESS_TRANSLATION_ *ADDRESS_TRANSLATION; +#define AT_VARIANT(x) ((struct ADDRESS_TRANSLATION_ *)x) +#define AT_VALUE(x) ((NvU64)(NvUPtr)(x)) + +// +// Overrides address translation in SR-IOV enabled usecases +// +// In SRIOV systems, an access from guest has to go through the following +// translations: +// +// GVA -> GPA -> SPA +// +// Given HOST manages channel/memory management for guest, there are certain +// code paths that expects VA -> GPA translations and some may need GPA -> SPA +// translations. We use address translation to differentiate between these +// cases. +// +// We use AT_PA to force GPA -> SPA translation for vidmem. In case of non-SRIOV systems, +// using IO_VASPACE_A will fall back to FERMI_VASPACE_A or default context. +// +#define FORCE_VMMU_TRANSLATION(pMemDesc, curAddressTranslation) \ + ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ? AT_PA : curAddressTranslation) + +typedef struct _memdescDestroyCallback MEM_DESC_DESTROY_CALLBACK; + +typedef void (MEM_DATA_RELEASE_CALL_BACK)(struct MEMORY_DESCRIPTOR *); + +// +// A memory descriptor is an object that describes and can be used to manipulate +// a block of memory. The memory can be video or system memory; it can be +// contiguous or noncontiguous; it can be tiled, block linear, etc. However, +// regardless of what type of memory it is, clients can use a standard set of +// APIs to manipulate it. +// +DECLARE_INTRUSIVE_LIST(MEMORY_DESCRIPTOR_LIST); + +typedef struct MEMORY_DESCRIPTOR +{ + // The GPU that this memory belongs to + OBJGPU *pGpu; + + // Flags field for optional behavior + NvU64 _flags; + + // Size of mapping used for this allocation. Multiple mappings on Fermi must always use the same page size. + NvU32 _pageSize; + + // Size of the memory allocation in pages + NvU64 PageCount; + + // Alignment of the memory allocation as size in bytes + // XXX: would 32b work here? + NvU64 Alignment; + + // Size of the memory allocation requested in bytes + NvU64 Size; + + // Actual size of memory allocated to satisfy alignment. + // We report the requested size, not the actual size. A number of callers + // depend on this. + NvU64 ActualSize; + + // The information returned from osAllocPages + NvP64 _address; + void *_pMemData; + MEM_DATA_RELEASE_CALL_BACK *_pMemDataReleaseCallback; + + // When memory is allocated by a guest Virtual Machine (VM) + // it is aliased by the host RM. We store a unique guest ID + // for each piece of aliased memory to facilitate host RM mappings + // to these pages (only in case of system memory). + // XXX: would 32b work here? + NvU64 _guestId; + + // To keep track of the offset from parent memdesc + NvU64 subMemOffset; + + // + // The byte offset at which the memory allocation begins within the first + // PTE. To locate the physical address of the byte at offset i in the memory + // allocation, use the following logic: + // i += PteAdjust; + // if (PhysicallyContiguous) + // PhysAddr = PteArray[0] + i; + // else + // PhysAddr = PteArray[i >> RM_PAGE_SHIFT] + (i & RM_PAGE_MASK); + // + NvU32 PteAdjust; + + // Has the memory been allocated yet? + NvBool Allocated; + + // + // Marks that a request to deallocate memory has been called on this memdesc while it had multiple references + // NV_TRUE denotes that memFree will be called when refcount reaches 0. + // + NvBool bDeferredFree; + + // Does this use SUBALLOCATOR? + NvBool bUsingSuballocator; + + // Where does the memory live? Video, system, other + NV_ADDRESS_SPACE _addressSpace; + + // Attributes reflecting GPU caching of this memory. + NvU32 _gpuCacheAttrib; + + // Peer vid mem cacheability + NvU32 _gpuP2PCacheAttrib; + + // One of NV_MEMORY_CACHED, NV_MEMORY_UNCACHED, NV_MEMORY_WRITECOMBINED + NvU32 _cpuCacheAttrib; + + // The page kind of this memory + NvU32 _pteKind; + NvU32 _pteKindCompressed; + + // + // Scale memory allocation by this value + // + NvU32 _subDeviceAllocCount; + + // + // Reference count for the object. + // + NvU32 RefCount; + + // Reference count for duplication of memory object via RmDupObject. + NvU32 DupCount; + + // + // The HwResId is used by the device dependent HAL to keep track of + // resources attached to the memory (e.g.: compression tags, zcull). + // + NvU32 _hwResId; + + // + // Keep track which heap is actually used for this allocation + // + struct Heap *pHeap; + + // + // GFID that this memory allocation belongs to + // + NvU32 gfid; + + // + // Keep track of the PMA_ALLOC_INFO data. + // + struct PMA_ALLOC_INFO *pPmaAllocInfo; + + // Serve as head node in a list of page handles + PoolPageHandleList *pPageHandleList; + + // + // List of callbacks to call when destroying memory descriptor + // + MEM_DESC_DESTROY_CALLBACK *_pMemDestroyCallbackList; + + // pointer to descriptor which was used to subset current descriptor + struct MEMORY_DESCRIPTOR *_pParentDescriptor; + + // Count used for sanity check + NvU32 childDescriptorCnt; + + // Next memory descriptor in subdevice list + struct MEMORY_DESCRIPTOR *_pNext; + + // Pointer to system Memory descriptor which used to back some FB content across S3/S4. + struct MEMORY_DESCRIPTOR *_pStandbyBuffer; + + // Serve as a head node in a list of submemdescs + MEMORY_DESCRIPTOR_LIST *pSubMemDescList; + + // If strung in a intrusive linked list + ListNode node; + + // + // Pointer to IOVA mappings used to back the IOMMU VAs for different IOVA spaces + // Submemory descriptors only have on mapping, but the root descriptor will have + // one per IOVA space that the memory is mapped into. + // + struct IOVAMAPPING *_pIommuMappings; + + // Kernel mapping of the memory + NvP64 _kernelMapping; + NvP64 _kernelMappingPriv; + + // Internal mapping + void *_pInternalMapping; + void *_pInternalMappingPriv; + NvU32 _internalMappingRefCount; + + // Array to hold SPA addresses when memdesc is allocated from GPA. Valid only for SRIOV cases + RmPhysAddr *pPteSpaMappings; + + // + // context buffer pool from which this memdesc is to be allocated. + // This is controlled by PDB_PROP_GPU_MOVE_RM_BUFFERS_TO_PMA which is + // enabled only for SMC today + // + CTX_BUF_POOL_INFO *pCtxBufPool; + + // Max physical address width to be override + NvU32 _overridenAddressWidth; + + // We verified that memdesc is safe to be mapped as large pages + NvBool bForceHugePages; + + // + // If PhysicallyContiguous is NV_TRUE, this array consists of one element. + // If PhysicallyContiguous is NV_FALSE, this array is actually larger and has + // one entry for each physical page in the memory allocation. As a result, + // this structure must be allocated from the heap. + // If the AddressSpace is ADDR_FBMEM, each entry is an FB offset. + // Otherwise, each entry is a physical address on the system bus. + // TBD: for now, the array will be sized at one entry for every 4KB, but + // we probably want to optimize this later to support 64KB pages. + // + RmPhysAddr _pteArray[1]; + //!!! Place nothing behind PteArray!!! +} MEMORY_DESCRIPTOR, *PMEMORY_DESCRIPTOR; + +MAKE_INTRUSIVE_LIST(MEMORY_DESCRIPTOR_LIST, MEMORY_DESCRIPTOR, node); + +// +// Common address space lists +// +extern const NV_ADDRESS_SPACE ADDRLIST_FBMEM_PREFERRED[]; +extern const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_PREFERRED[]; +extern const NV_ADDRESS_SPACE ADDRLIST_FBMEM_ONLY[]; +extern const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_ONLY[]; + +NvU32 memdescAddrSpaceListToU32(const NV_ADDRESS_SPACE *addrlist); +const NV_ADDRESS_SPACE *memdescU32ToAddrSpaceList(NvU32 index); + +NV_STATUS _memdescUpdateSpaArray(PMEMORY_DESCRIPTOR pMemDesc); +// Create a memory descriptor data structure (without allocating any physical +// storage). +NV_STATUS memdescCreate(MEMORY_DESCRIPTOR **ppMemDesc, OBJGPU *pGpu, NvU64 Size, + NvU64 alignment, NvBool PhysicallyContiguous, + NV_ADDRESS_SPACE AddressSpace, NvU32 CpuCacheAttrib, NvU64 Flags); + +#define MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE_FB_BC_ONLY(pGpu, addressSpace) \ + ((gpumgrGetBcEnabledStatus(pGpu) && (pGpu != NULL) && (addressSpace == ADDR_FBMEM)) ? MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE : MEMDESC_FLAGS_NONE) + +// Initialize a caller supplied memory descriptor for use with memdescDescribe() +void memdescCreateExisting(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, NvU64 Size, + NV_ADDRESS_SPACE AddressSpace, + NvU32 CpuCacheAttrib, NvU64 Flags); + +// Increment reference count +void memdescAddRef(MEMORY_DESCRIPTOR *pMemDesc); + +// Decrement reference count +void memdescRemoveRef(MEMORY_DESCRIPTOR *pMemDesc); + +// Decrement reference count and reclaim any resources when possible +void memdescDestroy(MEMORY_DESCRIPTOR *pMemDesc); + +// +// The destroy callback is called when the memory descriptor is +// destroyed with memdescDestroy(). +// +// The caller is responsible for managing the memory used +// containing the callback. +// +typedef void (MemDescDestroyCallBack)(OBJGPU *, void *pObject, MEMORY_DESCRIPTOR *); +struct _memdescDestroyCallback +{ + MemDescDestroyCallBack *destroyCallback; + void *pObject; + MEM_DESC_DESTROY_CALLBACK *pNext; +}; +void memdescAddDestroyCallback(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *); +void memdescRemoveDestroyCallback(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *); + +// Allocate physical storage for a memory descriptor and fill in its PteArray +NV_STATUS memdescAlloc(MEMORY_DESCRIPTOR *pMemDesc); + +// Allocate memory from one of the possible locations specified in pList. +NV_STATUS memdescAllocList(MEMORY_DESCRIPTOR *pMemDesc, const NV_ADDRESS_SPACE *pList); + +// Free physical storage for a memory descriptor +void memdescFree(MEMORY_DESCRIPTOR *pMemDesc); + +// Lock the paged virtual memory +NV_STATUS memdescLock(MEMORY_DESCRIPTOR *pMemDesc); + +// Unlock the paged virtual memory +NV_STATUS memdescUnlock(MEMORY_DESCRIPTOR *pMemDesc); + +// Allocate a CPU mapping of an arbitrary subrange of the memory. +// 64-bit clean (mac can have a 32-bit kernel pointer and 64-bit client pointers) +NV_STATUS memdescMap(MEMORY_DESCRIPTOR *pMemDesc, NvU64 Offset, NvU64 Size, + NvBool Kernel, NvU32 Protect, NvP64 *pAddress, NvP64 *pPriv); + +// Free a CPU mapping of an arbitrary subrange of the memory. +void memdescUnmap(MEMORY_DESCRIPTOR *pMemDesc, NvBool Kernel, NvU32 ProcessId, + NvP64 Address, NvP64 Priv); + +// Allocate a CPU mapping of an arbitrary subrange of the memory. +// fails unless Kernel == NV_TRUE +NV_STATUS memdescMapOld(MEMORY_DESCRIPTOR *pMemDesc, NvU64 Offset, NvU64 Size, + NvBool Kernel, NvU32 Protect, void **pAddress, void **pPriv); + +// Free a CPU mapping of an arbitrary subrange of the memory. +void memdescUnmapOld(MEMORY_DESCRIPTOR *pMemDesc, NvBool Kernel, NvU32 ProcessId, + void *Address, void *Priv); + +// Fill in a MEMORY_DESCRIPTOR with a description of a preexisting contiguous +// memory allocation. It should already be initialized with +// memdescCreate*(). +void memdescDescribe(MEMORY_DESCRIPTOR *pMemDesc, + NV_ADDRESS_SPACE AddressSpace, + RmPhysAddr Base, NvU64 Size); + +// Fill in a MEMORY_DESCRIPTOR with the physical page addresses returned by PMA. +// It should already be initialized with memdescCreate*(). +void memdescFillPages(MEMORY_DESCRIPTOR *pMemDesc, NvU32 offset, + NvU64 *pPages, NvU32 pageCount, NvU32 pageSize); + +// Create a MEMORY_DESCRIPTOR for a subset of an existing memory allocation. +// The new MEMORY_DESCRIPTOR must be freed with memdescDestroy. +NV_STATUS memdescCreateSubMem(MEMORY_DESCRIPTOR **ppMemDescNew, + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, NvU64 Offset, NvU64 Size); + +// Compute the physical address of a byte within a MEMORY_DESCRIPTOR +RmPhysAddr memdescGetPhysAddr(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU64 offset); + +// Compute count physical addresses within a MEMORY_DESCRIPTOR. Starting at the +// given offset and advancing it by stride for each consecutive address. +void memdescGetPhysAddrs(MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses); + +// Compute count physical addresses within a MEMORY_DESCRIPTOR for a specific +// GPU. Starting at the given offset and advancing it by stride for each +// consecutive address. +void memdescGetPhysAddrsForGpu(MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses); + +// Obtains one of the PTEs from the MEMORY_DESCRIPTOR. Assumes 4KB pages, +// and works for either contiguous or noncontiguous descriptors. +RmPhysAddr memdescGetPte(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU32 PteIndex); + +void memdescSetPte(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU32 PteIndex, RmPhysAddr PhysAddr); + +// Obtains the PteArray from the MEMORY_DESCRIPTOR for the specified GPU. +RmPhysAddr * memdescGetPteArrayForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, ADDRESS_TRANSLATION addressTranslation); + +/*! + * @brief Obtains the PteArray from the MEMORY_DESCRIPTOR. + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * + * @returns PageArray + */ +static inline RmPhysAddr * +memdescGetPteArray(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + return memdescGetPteArrayForGpu(pMemDesc, pMemDesc->pGpu, addressTranslation); +} + +// Obtains the PteArray size from the MEMORY_DESCRIPTOR based on the mmuContext. +NvU32 memdescGetPteArraySize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation); + +// Return the aperture of the NV_ADDRESS_SPACE as a null terminated string. +// Useful for print statements. +const char* memdescGetApertureString(NV_ADDRESS_SPACE addrSpace); + +// Return true if two MEMORY_DESCRIPTOR are equal +NvBool memdescDescIsEqual(MEMORY_DESCRIPTOR *pMemDescOne, MEMORY_DESCRIPTOR *pMemDescTwo); + +// Retrieve the per-GPU memory descriptor for a subdevice +MEMORY_DESCRIPTOR *memdescGetMemDescFromSubDeviceInst(MEMORY_DESCRIPTOR *pMemDesc, NvU32 subDeviceInst); + +// Retrieve the per-GPU memory descriptor for a GPU +MEMORY_DESCRIPTOR *memdescGetMemDescFromGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu); + +// Retrieve the per-GPU memory descriptor at an index +MEMORY_DESCRIPTOR *memdescGetMemDescFromIndex(MEMORY_DESCRIPTOR *pMemDesc, NvU32 index); + +// Print information on memory descriptor +void memdescPrintMemdesc(MEMORY_DESCRIPTOR *pMemDesc, NvBool bPrintIndividualPages, const char *pPrefixMessage); + +// Get the page offset for an arbitrary power of two page size +NvU64 memdescGetPageOffset(MEMORY_DESCRIPTOR *pMemDesc, NvU32 pageSize); + +// +// Internal APIs for the IOVASPACE to manage IOMMU mappings in a memdesc. +// +// Note that the external APIs are memdescMapIommu(), +// memdescUnmapIommu() and memdescGetIommuMap(). +// +NV_STATUS memdescAddIommuMap(PMEMORY_DESCRIPTOR pMemDesc, struct IOVAMAPPING *pIommuMap); +void memdescRemoveIommuMap(PMEMORY_DESCRIPTOR pMemDesc, struct IOVAMAPPING *pIommuMap); + +// +// Map and unmap IOMMU for the specified VA space +// +// Each memdescUnmapIommu() call has to be paired with a previous successful +// memdescMapIommu() call for the same VA space. The calls are refcounted for +// each VA space and only the last Unmap will remove the mappings. +// +// The caller has to guarantee that before the VA space is destroyed, either the +// mapping is explicitly unmapped with memdescUnmapIommu() or the memdesc is +// freed (or destroyed for memdescs that are not memdescFree()d). +// +NV_STATUS memdescMapIommu(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId); +void memdescUnmapIommu(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId); + +// Returns the IOVA mapping created by memdescMapIommu(). +struct IOVAMAPPING *memdescGetIommuMap(PMEMORY_DESCRIPTOR pMemDesc, NvU32 vaspaceId); + +// +// Check subdevice consistency functions +// +void memdescCheckSubDevicePageSizeConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS, + NvU64 pageSize, NvU64 pageOffset); +void memdescCheckSubDeviceMemContiguityConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS, + NvBool bIsMemContiguous); +NV_STATUS memdescCheckSubDeviceKindComprConsistency(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, struct OBJVASPACE *pVAS, + NvU32 kind, COMPR_INFO *pComprInfo); + +// +// Accessor functions +// +void memdescSetHeapOffset(MEMORY_DESCRIPTOR *pMemDesc, RmPhysAddr fbOffset); +void memdescSetCpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 cpuCacheAttrib); +void memdescSetGpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 GpuCacheAttrib); +NvU32 memdescGetGpuP2PCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetGpuP2PCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc, NvU32 GpuCacheAttrib); +NvU32 memdescGetPteKindForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu); +void memdescSetPteKindForGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu, NvU32 pteKind); +NvU32 memdescGetPteKindCompressed(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetPteKindCompressed(MEMORY_DESCRIPTOR *pMemDesc, NvU32 pteKindCmpr); +NvP64 memdescGetKernelMapping(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetKernelMapping(MEMORY_DESCRIPTOR *pMemDesc, NvP64 kernelMapping); +NvP64 memdescGetKernelMappingPriv(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetKernelMappingPriv(MEMORY_DESCRIPTOR *pMemDesc, NvP64 kernelMappingPriv); +MEMORY_DESCRIPTOR *memdescGetStandbyBuffer(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetStandbyBuffer(MEMORY_DESCRIPTOR *pMemDesc, MEMORY_DESCRIPTOR *pStandbyBuffer); +void memdescSetDestroyCallbackList(MEMORY_DESCRIPTOR *pMemDesc, MEM_DESC_DESTROY_CALLBACK *pCb); +NvU64 memdescGetGuestId(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetGuestId(MEMORY_DESCRIPTOR *pMemDesc, NvU64 guestId); +NvBool memdescGetFlag(MEMORY_DESCRIPTOR *pMemDesc, NvU64 flag); +void memdescSetFlag(MEMORY_DESCRIPTOR *pMemDesc, NvU64 flag, NvBool bValue); +NvP64 memdescGetAddress(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetAddress(MEMORY_DESCRIPTOR *pMemDesc, NvP64 pAddress); +void *memdescGetMemData(MEMORY_DESCRIPTOR *pMemDesc); +void memdescSetMemData(MEMORY_DESCRIPTOR *pMemDesc, void *pMemData, MEM_DATA_RELEASE_CALL_BACK *pMemDataReleaseCallback); +NvBool memdescGetVolatility(MEMORY_DESCRIPTOR *pMemDesc); +NvBool memdescGetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation); +void memdescSetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvBool isContiguous); +NvBool memdescCheckContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation); +NV_ADDRESS_SPACE memdescGetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc); +NvU32 memdescGetPageSize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation); +void memdescSetPageSize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvU32 pageSize); +PMEMORY_DESCRIPTOR memdescGetRootMemDesc(PMEMORY_DESCRIPTOR pMemDesc, NvU64 *pRootOffset); +void memdescSetCustomHeap(PMEMORY_DESCRIPTOR); +NvBool memdescGetCustomHeap(PMEMORY_DESCRIPTOR); + +/*! + * @brief Get PTE kind + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * + * @returns Current PTE kind value. + */ +static inline NvU32 +memdescGetPteKind(PMEMORY_DESCRIPTOR pMemDesc) +{ + return memdescGetPteKindForGpu(pMemDesc, pMemDesc->pGpu); +} + +/*! + * @brief Set PTE kind. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pteKind New PTE kind + * + * @returns nothing + */ +static inline void +memdescSetPteKind(PMEMORY_DESCRIPTOR pMemDesc, NvU32 pteKind) +{ + memdescSetPteKindForGpu(pMemDesc, pMemDesc->pGpu, pteKind); +} + +/*! + * @brief Get HW resource identifier (HwResId) + * + * TODO: Need to ensure this is checked per subdevice only. + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current HW resource identifier + */ +static inline NvU32 +memdescGetHwResId(PMEMORY_DESCRIPTOR pMemDesc) +{ + return pMemDesc->_hwResId; +} + +/*! + * @brief Set HW resource identifier (HwResId) + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] hwResId New HW resource identifier + * + * @returns nothing + */ +static inline void +memdescSetHwResId(PMEMORY_DESCRIPTOR pMemDesc, NvU32 hwResId) +{ + pMemDesc->_hwResId = hwResId; +} + +/*! + * @brief Get mem destroy callback list pointer + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Pointer to mem destroy callback list + */ +static inline MEM_DESC_DESTROY_CALLBACK * +memdescGetDestroyCallbackList(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_pMemDestroyCallbackList; +} + +/*! + * @brief Get the byte offset relative to the root memory descriptor. + * + * Root memory descriptor is the Top level memory descriptor with no parent, + * from which this memory descriptor was derived. + * + * @param[in] pMemDesc Return pointer to memory descriptor. + * + * @returns the byte offset relative to Root memory descriptor. + */ +static inline NvU64 +memdescGetRootOffset(PMEMORY_DESCRIPTOR pMemDesc) +{ + NvU64 rootOffset = 0; + (void)memdescGetRootMemDesc(pMemDesc, &rootOffset); + return rootOffset; +} + +/*! + * @brief Get CPU cache attributes + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current CPU cache attributes + */ +static inline NvU32 +memdescGetCpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_cpuCacheAttrib; +} + +/*! + * @brief Get GPU cache attributes + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current GPU cache attributes + */ +static inline NvU32 +memdescGetGpuCacheAttrib(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_gpuCacheAttrib; +} + +/*! + * @brief Return pte adjust + * + * PteAdjust is zero whenever the memory is allocated as allocations are always + * going to be page-size aligned. However, we can have memory descriptors + * created on pre-allocated addresses + offset that aren't page aligned. + * PteAdjust is non-zero in such cases. We do not allow memdescDescribe operation + * (i.e. memory descriptors created on pre-allocated address) for subdevice + * memdesc and hence top level memdesc is always used to access pte adjust. + * + * @param[in] pMemDesc Memory descriptor to use + * + * @returns PteAdjust + */ +static inline NvU32 +memdescGetPteAdjust(PMEMORY_DESCRIPTOR pMemDesc) +{ + return pMemDesc->PteAdjust; +} + +/*! + * @brief Get subdevice allocation count. + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current subdevice allocation count value. + */ +static inline NvU32 +memdescGetSubDeviceAllocCount (MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_subDeviceAllocCount; +} + +/*! + * @brief Get memory descriptor of parent + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Memory descriptor of parent + */ +static inline MEMORY_DESCRIPTOR * +memdescGetParentDescriptor(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_pParentDescriptor; +} + +/*! + * @brief Set the address space of the memory descriptor + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] addressSpace Address Space + * + * @returns nothing + */ +static inline void +memdescSetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc, NV_ADDRESS_SPACE addressSpace) +{ + pMemDesc->_addressSpace = addressSpace; +} + +/*! + * @brief Return size + * + * @param[in] pMemDesc Memory descriptor to use + * + * @returns Size + */ +static inline NvU64 +memdescGetSize(PMEMORY_DESCRIPTOR pMemDesc) +{ + return pMemDesc->Size; +} + +/*! + * @brief Checks if subdevice memory descriptors are present + * + * See memdescGetMemDescFromSubDeviceInst for an explanation of subdevice memory + * descriptors + * + * @param[in] pMemDesc Memory descriptor to query + * + * @returns NV_TRUE if subdevice memory descriptors exist + */ +static NV_INLINE NvBool +memdescHasSubDeviceMemDescs(MEMORY_DESCRIPTOR *pMemDesc) +{ + return (pMemDesc->_subDeviceAllocCount > 1); +} + +/*! + * @brief Checks if memory descriptor describes memory that is submemory + * + * @param[in] pMemDesc Memory descriptor to query + * + * @returns NV_TRUE if it is a submemory desc, NV_FALSE otherwise. + */ +static NV_INLINE NvBool +memdescIsSubMemoryMemDesc(MEMORY_DESCRIPTOR *pMemDesc) +{ + return pMemDesc->_pParentDescriptor != NULL ? NV_TRUE : NV_FALSE; +} + +NV_STATUS memdescGetNvLinkGpa(OBJGPU *pGpu, NvU64 pageCount, RmPhysAddr *pGpa); + +NV_STATUS memdescSetCtxBufPool(PMEMORY_DESCRIPTOR pMemDesc, CTX_BUF_POOL_INFO* pCtxBufPool); +CTX_BUF_POOL_INFO* memdescGetCtxBufPool(PMEMORY_DESCRIPTOR pMemDesc); + +/*! + * @brief Override the registry INST_LOC two-bit enum to an aperture (list) + cpu attr. + * + * loc parameters uses NV_REG_STR_RM_INST_LOC defines. + * Caller must set initial default values. + */ +void memdescOverrideInstLoc(NvU32 loc, const char *name, NV_ADDRESS_SPACE *pAddrSpace, NvU32 *pCpuMappingAttr); +void memdescOverrideInstLocList(NvU32 loc, const char *name, const NV_ADDRESS_SPACE **ppAllocList, NvU32 *pCpuMappingAttr); + +/*! +* @brief Override the physical system address limit. +* +*/ +void memdescOverridePhysicalAddressWidthWindowsWAR(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 addressWidth); + +/*! +* @brief Register memory descriptor referenced by hMemory in CPU-RM to GSP +* +* @param[in] pGpu OBJGPU pointer +* @param[in] hClient client handled +* @param[in] hSubDevice subdevice handle +* @param[in] hMemory memory handle +* +* @returns NV_STATUS +*/ +NV_STATUS memdescRegisterToGSP(OBJGPU *pGpu, NvHandle hClient, NvHandle hParent, NvHandle hMemory); + +/*! +* @brief Deregister memory descriptor referenced by hMemory in CPU-RM from GSP +* +* @param[in] pGpu OBJGPU pointer +* @param[in] hClient client handled +* @param[in] hSubDevice subdevice handle +* @param[in] hMemory memory handle +* +* @returns NV_STATUS +*/ + +NV_STATUS memdescDeregisterFromGSP(OBJGPU *pGpu, NvHandle hClient, NvHandle hParent, NvHandle hMemory); + +// cache maintenance functions +void memdescFlushGpuCaches(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc); +void memdescFlushCpuCaches(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc); + +// Map memory descriptor for RM internal access +void* memdescMapInternal(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); +void memdescUnmapInternal(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); + +// +// External flags: +// ALLOC_PER_SUBDEVICE Allocate independent system memory for each GPU +// LOST_ON_SUSPEND PM code will skip this allocation during S/R +// LOCKLESS_SYSMEM_ALLOC System memory should be allocated unprotected by +// the RM lock +// GPU_PRIVILEGED This memory will be marked as privileged in the GPU +// page tables. When set only GPU requestors who are +// "privileged" are allowed to access this memory. +// This can be used for mapping sensitive memory into +// a user's GPU address space (like context buffers). +// Note support for this in our GPUs is limited, so +// only use it if you know the HW accessing the memory +// makes privileged requests. +// +// Internal flags: +// SET_KIND Whether or not the kind was set a different value +// than default. +// PRE_ALLOCATED Caller provided memory descriptor memory +// FIXED_ADDRESS_ALLOCATE Allocate from the heap with a fixed address +// ALLOCATED Has the memory been allocated yet? +// GUEST_ALLOCATED Is the memory allocated by a guest VM? +// We make aliased memory descriptors to guest +// allocated memory and mark it so, so that we know +// how to deal with it in memdescMap() etc. +// KERNEL_MODE Is the memory for a user or kernel context? +// XXX This is lame, and it would be best if we could +// get rid of it. Memory *storage* isn't either user +// or kernel -- only mappings are user or kernel. +// Unfortunately, osAllocPages requires that we +// provide this information. +// PHYSICALLY_CONTIGUOUS Are the underlying physical pages of this memory +// allocation contiguous? +// ENCRYPTED TurboCipher allocations need a bit in the PTE to +// indicate encrypted +// UNICAST Memory descriptor was created via UC path +// PAGED_SYSMEM Allocate the memory from paged system memory. When +// this flag is used, memdescLock() should be called +// to lock the memory in physical pages before we +// access this memory descriptor. +// CPU_ONLY Allocate memory only accessed by CPU. +// +#define MEMDESC_FLAGS_NONE ((NvU64)0x0) +#define MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE NVBIT64(0) +#define MEMDESC_FLAGS_SET_KIND NVBIT64(1) +#define MEMDESC_FLAGS_LOST_ON_SUSPEND NVBIT64(2) +#define MEMDESC_FLAGS_PRE_ALLOCATED NVBIT64(3) +#define MEMDESC_FLAGS_FIXED_ADDRESS_ALLOCATE NVBIT64(4) +#define MEMDESC_FLAGS_LOCKLESS_SYSMEM_ALLOC NVBIT64(5) +#define MEMDESC_FLAGS_GPU_IN_RESET NVBIT64(6) +#define MEMDESC_ALLOC_FLAGS_PROTECTED NVBIT64(7) +#define MEMDESC_FLAGS_GUEST_ALLOCATED NVBIT64(8) +#define MEMDESC_FLAGS_KERNEL_MODE NVBIT64(9) +#define MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS NVBIT64(10) +#define MEMDESC_FLAGS_ENCRYPTED NVBIT64(11) +#define MEMDESC_FLAGS_PAGED_SYSMEM NVBIT64(12) +#define MEMDESC_FLAGS_GPU_PRIVILEGED NVBIT64(13) +#define MEMDESC_FLAGS_PRESERVE_CONTENT_ON_SUSPEND NVBIT64(14) +#define MEMDESC_FLAGS_DUMMY_TOPLEVEL NVBIT64(15) + +// Don't use the below two flags. For memdesc internal use only. +// These flags will be removed on memory allocation refactoring in RM +#define MEMDESC_FLAGS_PROVIDE_IOMMU_MAP NVBIT64(16) +#define MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE NVBIT64(17) + +#define MEMDESC_FLAGS_CUSTOM_HEAP_ACR NVBIT64(18) + +// Allocate in "fast" or "slow" memory, if there are multiple grades of memory (like mixed density) +#define MEMDESC_FLAGS_HIGH_PRIORITY NVBIT64(19) +#define MEMDESC_FLAGS_LOW_PRIORITY NVBIT64(20) + +// Flag to specify if requested size should be rounded to page size +#define MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE NVBIT64(21) + +#define MEMDESC_FLAGS_CPU_ONLY NVBIT64(22) + +// This flags is used for a special SYSMEM descriptor that points to a memory +// region allocated externally (e.g. malloc, kmalloc etc.) +#define MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM NVBIT64(23) + +// Owned by Physical Memory Allocator (PMA). +#define MEMDESC_FLAGS_ALLOC_PMA_OWNED NVBIT64(24) + +// This flag is added as part of Sub-Allocator feature meant to be used by VGPU clients. +// Once VGPU clients allocate a large block of memory for their use, they carve-out a small +// portion of it to be used for RM internal allocations originating from a given client. Each +// allocation can choose to use this carved-out memory owned by client or be part of global heap. +// This flag has to be used in RM internal allocation only when a particular allocation is tied to +// the life-time of this client and will be freed before client gets destroyed. +#define MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE NVBIT64(25) + +// This flag is used to specify the pages are pinned using other kernel module or API +// Currently, this flag is used for vGPU on KVM where RM calls vfio APIs to pin and unpin pages +// instead of using os_lock_user_pages() and os_unlock_user_pages(). +#define MEMDESC_FLAGS_FOREIGN_PAGE NVBIT64(26) + +// These flags are used for SYSMEM descriptors that point to a physical BAR +// range and do not take the usual memory mapping paths. Currently, these are used for vGPU. +#define MEMDESC_FLAGS_BAR0_REFLECT NVBIT64(27) +#define MEMDESC_FLAGS_BAR1_REFLECT NVBIT64(28) + +// This flag is used to create shared memory required for vGPU operation. +// During RPC and all other shared memory allocations, VF RM will set this flag to instruct mods +// layer to create shared memory between VF process and PF process. +#define MEMDESC_FLAGS_MODS_SHARED_MEM NVBIT64(29) + +// This flag is set in memdescs that describe client (currently MODS) managed VPR allocations. +#define MEMDESC_FLAGS_VPR_REGION_CLIENT_MANAGED NVBIT64(30) + +// This flags is used for a special SYSMEM descriptor that points to physical BAR +// range of a third party device. +#define MEMDESC_FLAGS_PEER_IO_MEM NVBIT64(31) + +// If the flag is set, the RM will only allow read-only CPU user-mappings +// to the descriptor. +#define MEMDESC_FLAGS_USER_READ_ONLY NVBIT64(32) + +// If the flag is set, the RM will only allow read-only DMA mappings +// to the descriptor. +#define MEMDESC_FLAGS_DEVICE_READ_ONLY NVBIT64(33) + +// This flag is used to denote the memory descriptor that is part of larger memory descriptor; +// created using NV01_MEMORY_LIST_SYSTEM, NV01_MEMORY_LIST_FBMEM or NV01_MEMORY_LIST_OBJECT. +#define MEMDESC_FLAGS_LIST_MEMORY NVBIT64(34) + +// This flag is used to denote that this memdesc is allocated from +// a context buffer pool. When this flag is set, we expect a pointer +// to this context buffer pool to be cached in memdesc. +#define MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL NVBIT64(36) + +// +// This flag is used to skip privilege checks for the ADDR_REGMEM mapping type. +// This flag is useful for cases like UserModeApi where we want to use this memory type +// in a non-privileged user context +#define MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK NVBIT64(37) + +// This flag denotes the memory descriptor of type Display non iso +#define MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO NVBIT64(38) + +// This flag is used to force mapping of coherent sysmem through +// the GMMU over BAR1. This is useful when we need some form +// of special translation of the SYSMEM_COH aperture by the GMMU. +#define MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1 NVBIT64(39) + +// This flag is used to override system memory limit to be allocated +// within override address width. +#define MEMDESC_FLAGS_OVERRIDE_SYSTEM_ADDRESS_LIMIT NVBIT64(40) + +// +// If this flag is set, Linux RM will ensure that the allocated memory is +// 32-bit addressable. +#define MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE NVBIT64(41) + +// +// If this flag is set, the memory is registered in GSP +// +#define MEMDESC_FLAGS_REGISTERED_TO_GSP NVBIT64(42) + +// +// Indicates that this memdesc is tracking client sysmem allocation as +// against RM internal sysmem allocation +// +#define MEMDESC_FLAGS_SYSMEM_OWNED_BY_CLIENT NVBIT64(44) + +// +// The following is a special use case for sharing memory between +// the GPU and a WSL client. There is no IOMMU-compliant support +// currently for this, so a WAR is required for r515. The intent +// is to remove this by r525. +// +#define MEMDESC_FLAGS_WSL_SHARED_MEMORY NVBIT64(46) + +#endif // _MEMDESC_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MEM_DESC_NVOC_H_ diff --git a/src/nvidia/generated/g_mem_fabric_nvoc.c b/src/nvidia/generated/g_mem_fabric_nvoc.c new file mode 100644 index 000000000..e5817964e --- /dev/null +++ b/src/nvidia/generated/g_mem_fabric_nvoc.c @@ -0,0 +1,375 @@ +#define NVOC_MEM_FABRIC_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mem_fabric_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x127499 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryFabric; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_MemoryFabric(MemoryFabric*); +void __nvoc_init_funcTable_MemoryFabric(MemoryFabric*); +NV_STATUS __nvoc_ctor_MemoryFabric(MemoryFabric*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_MemoryFabric(MemoryFabric*); +void __nvoc_dtor_MemoryFabric(MemoryFabric*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryFabric; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryFabric_MemoryFabric = { + /*pClassDef=*/ &__nvoc_class_def_MemoryFabric, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MemoryFabric, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryFabric_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryFabric, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryFabric_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryFabric, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryFabric_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryFabric, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryFabric_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryFabric, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryFabric_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryFabric, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_MemoryFabric = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_MemoryFabric_MemoryFabric, + &__nvoc_rtti_MemoryFabric_Memory, + &__nvoc_rtti_MemoryFabric_RmResource, + &__nvoc_rtti_MemoryFabric_RmResourceCommon, + &__nvoc_rtti_MemoryFabric_RsResource, + &__nvoc_rtti_MemoryFabric_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryFabric = +{ + /*classInfo=*/ { + /*size=*/ sizeof(MemoryFabric), + /*classId=*/ classId(MemoryFabric), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "MemoryFabric", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MemoryFabric, + /*pCastInfo=*/ &__nvoc_castinfo_MemoryFabric, + /*pExportInfo=*/ &__nvoc_export_info_MemoryFabric +}; + +static NvBool __nvoc_thunk_MemoryFabric_resCanCopy(struct RsResource *pMemoryFabric) { + return memoryfabricCanCopy((struct MemoryFabric *)(((unsigned char *)pMemoryFabric) - __nvoc_rtti_MemoryFabric_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_MemoryFabric_memControl(struct Memory *pMemoryFabric, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memoryfabricControl((struct MemoryFabric *)(((unsigned char *)pMemoryFabric) - __nvoc_rtti_MemoryFabric_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_memoryfabricCheckMemInterUnmap(struct MemoryFabric *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryFabric_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_memoryfabricUnmap(struct MemoryFabric *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryFabric_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_memoryfabricGetMemInterMapParams(struct MemoryFabric *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryFabric_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_memoryfabricGetMemoryMappingDescriptor(struct MemoryFabric *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryFabric_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_memoryfabricGetMapAddrSpace(struct MemoryFabric *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryFabric_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_memoryfabricShareCallback(struct MemoryFabric *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_memoryfabricControlFilter(struct MemoryFabric *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_memoryfabricAddAdditionalDependants(struct RsClient *pClient, struct MemoryFabric *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_memoryfabricGetRefCount(struct MemoryFabric *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_memoryfabricMapTo(struct MemoryFabric *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_memoryfabricControl_Prologue(struct MemoryFabric *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_memoryfabricIsReady(struct MemoryFabric *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryFabric_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_memoryfabricCheckCopyPermissions(struct MemoryFabric *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryFabric_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_memoryfabricPreDestruct(struct MemoryFabric *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_memoryfabricUnmapFrom(struct MemoryFabric *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_memoryfabricControl_Epilogue(struct MemoryFabric *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_memoryfabricControlLookup(struct MemoryFabric *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_memoryfabricMap(struct MemoryFabric *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_MemoryFabric_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_memoryfabricAccessCallback(struct MemoryFabric *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MemoryFabric_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_MemoryFabric[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memoryfabricCtrlGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xf80101u, + /*paramSize=*/ sizeof(NV00F8_CTRL_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_MemoryFabric.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memoryfabricCtrlGetInfo" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2840u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memoryfabricCtrlCmdDescribe_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2840u) + /*flags=*/ 0x2840u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xf80102u, + /*paramSize=*/ sizeof(NV00F8_CTRL_DESCRIBE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_MemoryFabric.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memoryfabricCtrlCmdDescribe" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryFabric = +{ + /*numEntries=*/ 2, + /*pExportEntries=*/ __nvoc_exported_method_def_MemoryFabric +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_MemoryFabric(MemoryFabric *pThis) { + __nvoc_memoryfabricDestruct(pThis); + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_MemoryFabric(MemoryFabric *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_MemoryFabric(MemoryFabric *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MemoryFabric_fail_Memory; + __nvoc_init_dataField_MemoryFabric(pThis); + + status = __nvoc_memoryfabricConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MemoryFabric_fail__init; + goto __nvoc_ctor_MemoryFabric_exit; // Success + +__nvoc_ctor_MemoryFabric_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_MemoryFabric_fail_Memory: +__nvoc_ctor_MemoryFabric_exit: + + return status; +} + +static void __nvoc_init_funcTable_MemoryFabric_1(MemoryFabric *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__memoryfabricCanCopy__ = &memoryfabricCanCopy_IMPL; + + pThis->__memoryfabricCopyConstruct__ = &memoryfabricCopyConstruct_IMPL; + + pThis->__memoryfabricControl__ = &memoryfabricControl_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__memoryfabricCtrlGetInfo__ = &memoryfabricCtrlGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2840u) + pThis->__memoryfabricCtrlCmdDescribe__ = &memoryfabricCtrlCmdDescribe_IMPL; +#endif + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_MemoryFabric_resCanCopy; + + pThis->__nvoc_base_Memory.__memControl__ = &__nvoc_thunk_MemoryFabric_memControl; + + pThis->__memoryfabricCheckMemInterUnmap__ = &__nvoc_thunk_Memory_memoryfabricCheckMemInterUnmap; + + pThis->__memoryfabricUnmap__ = &__nvoc_thunk_Memory_memoryfabricUnmap; + + pThis->__memoryfabricGetMemInterMapParams__ = &__nvoc_thunk_Memory_memoryfabricGetMemInterMapParams; + + pThis->__memoryfabricGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_memoryfabricGetMemoryMappingDescriptor; + + pThis->__memoryfabricGetMapAddrSpace__ = &__nvoc_thunk_Memory_memoryfabricGetMapAddrSpace; + + pThis->__memoryfabricShareCallback__ = &__nvoc_thunk_RmResource_memoryfabricShareCallback; + + pThis->__memoryfabricControlFilter__ = &__nvoc_thunk_RsResource_memoryfabricControlFilter; + + pThis->__memoryfabricAddAdditionalDependants__ = &__nvoc_thunk_RsResource_memoryfabricAddAdditionalDependants; + + pThis->__memoryfabricGetRefCount__ = &__nvoc_thunk_RsResource_memoryfabricGetRefCount; + + pThis->__memoryfabricMapTo__ = &__nvoc_thunk_RsResource_memoryfabricMapTo; + + pThis->__memoryfabricControl_Prologue__ = &__nvoc_thunk_RmResource_memoryfabricControl_Prologue; + + pThis->__memoryfabricIsReady__ = &__nvoc_thunk_Memory_memoryfabricIsReady; + + pThis->__memoryfabricCheckCopyPermissions__ = &__nvoc_thunk_Memory_memoryfabricCheckCopyPermissions; + + pThis->__memoryfabricPreDestruct__ = &__nvoc_thunk_RsResource_memoryfabricPreDestruct; + + pThis->__memoryfabricUnmapFrom__ = &__nvoc_thunk_RsResource_memoryfabricUnmapFrom; + + pThis->__memoryfabricControl_Epilogue__ = &__nvoc_thunk_RmResource_memoryfabricControl_Epilogue; + + pThis->__memoryfabricControlLookup__ = &__nvoc_thunk_RsResource_memoryfabricControlLookup; + + pThis->__memoryfabricMap__ = &__nvoc_thunk_Memory_memoryfabricMap; + + pThis->__memoryfabricAccessCallback__ = &__nvoc_thunk_RmResource_memoryfabricAccessCallback; +} + +void __nvoc_init_funcTable_MemoryFabric(MemoryFabric *pThis) { + __nvoc_init_funcTable_MemoryFabric_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_MemoryFabric(MemoryFabric *pThis) { + pThis->__nvoc_pbase_MemoryFabric = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_MemoryFabric(pThis); +} + +NV_STATUS __nvoc_objCreate_MemoryFabric(MemoryFabric **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + MemoryFabric *pThis; + + pThis = portMemAllocNonPaged(sizeof(MemoryFabric)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(MemoryFabric)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MemoryFabric); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_MemoryFabric(pThis); + status = __nvoc_ctor_MemoryFabric(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_MemoryFabric_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_MemoryFabric_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_MemoryFabric(MemoryFabric **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_MemoryFabric(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_mem_fabric_nvoc.h b/src/nvidia/generated/g_mem_fabric_nvoc.h new file mode 100644 index 000000000..a2b5d3c83 --- /dev/null +++ b/src/nvidia/generated/g_mem_fabric_nvoc.h @@ -0,0 +1,276 @@ +#ifndef _G_MEM_FABRIC_NVOC_H_ +#define _G_MEM_FABRIC_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains the functions for managing fabric memory + * + *****************************************************************************/ + +#include "g_mem_fabric_nvoc.h" + +#ifndef _MEMORYFABRIC_H_ +#define _MEMORYFABRIC_H_ + +#include "core/core.h" +#include "mem_mgr/mem.h" +#include "rmapi/resource.h" +#include "gpu/mem_mgr/mem_desc.h" + +#include "ctrl/ctrl00f8.h" + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +#ifdef NVOC_MEM_FABRIC_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct MemoryFabric { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct MemoryFabric *__nvoc_pbase_MemoryFabric; + NvBool (*__memoryfabricCanCopy__)(struct MemoryFabric *); + NV_STATUS (*__memoryfabricCopyConstruct__)(struct MemoryFabric *, CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + NV_STATUS (*__memoryfabricControl__)(struct MemoryFabric *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__memoryfabricCtrlGetInfo__)(struct MemoryFabric *, NV00F8_CTRL_GET_INFO_PARAMS *); + NV_STATUS (*__memoryfabricCtrlCmdDescribe__)(struct MemoryFabric *, NV00F8_CTRL_DESCRIBE_PARAMS *); + NV_STATUS (*__memoryfabricCheckMemInterUnmap__)(struct MemoryFabric *, NvBool); + NV_STATUS (*__memoryfabricUnmap__)(struct MemoryFabric *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__memoryfabricGetMemInterMapParams__)(struct MemoryFabric *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__memoryfabricGetMemoryMappingDescriptor__)(struct MemoryFabric *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__memoryfabricGetMapAddrSpace__)(struct MemoryFabric *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__memoryfabricShareCallback__)(struct MemoryFabric *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__memoryfabricControlFilter__)(struct MemoryFabric *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__memoryfabricAddAdditionalDependants__)(struct RsClient *, struct MemoryFabric *, RsResourceRef *); + NvU32 (*__memoryfabricGetRefCount__)(struct MemoryFabric *); + NV_STATUS (*__memoryfabricMapTo__)(struct MemoryFabric *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__memoryfabricControl_Prologue__)(struct MemoryFabric *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__memoryfabricIsReady__)(struct MemoryFabric *); + NV_STATUS (*__memoryfabricCheckCopyPermissions__)(struct MemoryFabric *, struct OBJGPU *, NvHandle); + void (*__memoryfabricPreDestruct__)(struct MemoryFabric *); + NV_STATUS (*__memoryfabricUnmapFrom__)(struct MemoryFabric *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__memoryfabricControl_Epilogue__)(struct MemoryFabric *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__memoryfabricControlLookup__)(struct MemoryFabric *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__memoryfabricMap__)(struct MemoryFabric *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__memoryfabricAccessCallback__)(struct MemoryFabric *, struct RsClient *, void *, RsAccessRight); + NvU32 flags; +}; + +#ifndef __NVOC_CLASS_MemoryFabric_TYPEDEF__ +#define __NVOC_CLASS_MemoryFabric_TYPEDEF__ +typedef struct MemoryFabric MemoryFabric; +#endif /* __NVOC_CLASS_MemoryFabric_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryFabric +#define __nvoc_class_id_MemoryFabric 0x127499 +#endif /* __nvoc_class_id_MemoryFabric */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryFabric; + +#define __staticCast_MemoryFabric(pThis) \ + ((pThis)->__nvoc_pbase_MemoryFabric) + +#ifdef __nvoc_mem_fabric_h_disabled +#define __dynamicCast_MemoryFabric(pThis) ((MemoryFabric*)NULL) +#else //__nvoc_mem_fabric_h_disabled +#define __dynamicCast_MemoryFabric(pThis) \ + ((MemoryFabric*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MemoryFabric))) +#endif //__nvoc_mem_fabric_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_MemoryFabric(MemoryFabric**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MemoryFabric(MemoryFabric**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_MemoryFabric(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_MemoryFabric((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define memoryfabricCanCopy(pMemoryFabric) memoryfabricCanCopy_DISPATCH(pMemoryFabric) +#define memoryfabricCopyConstruct(pMemoryFabric, pCallContext, pParams) memoryfabricCopyConstruct_DISPATCH(pMemoryFabric, pCallContext, pParams) +#define memoryfabricControl(pMemoryFabric, pCallContext, pParams) memoryfabricControl_DISPATCH(pMemoryFabric, pCallContext, pParams) +#define memoryfabricCtrlGetInfo(pMemoryFabric, pParams) memoryfabricCtrlGetInfo_DISPATCH(pMemoryFabric, pParams) +#define memoryfabricCtrlCmdDescribe(pMemoryFabric, pParams) memoryfabricCtrlCmdDescribe_DISPATCH(pMemoryFabric, pParams) +#define memoryfabricCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) memoryfabricCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define memoryfabricUnmap(pMemory, pCallContext, pCpuMapping) memoryfabricUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define memoryfabricGetMemInterMapParams(pMemory, pParams) memoryfabricGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define memoryfabricGetMemoryMappingDescriptor(pMemory, ppMemDesc) memoryfabricGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define memoryfabricGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) memoryfabricGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define memoryfabricShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) memoryfabricShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define memoryfabricControlFilter(pResource, pCallContext, pParams) memoryfabricControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define memoryfabricAddAdditionalDependants(pClient, pResource, pReference) memoryfabricAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define memoryfabricGetRefCount(pResource) memoryfabricGetRefCount_DISPATCH(pResource) +#define memoryfabricMapTo(pResource, pParams) memoryfabricMapTo_DISPATCH(pResource, pParams) +#define memoryfabricControl_Prologue(pResource, pCallContext, pParams) memoryfabricControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define memoryfabricIsReady(pMemory) memoryfabricIsReady_DISPATCH(pMemory) +#define memoryfabricCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) memoryfabricCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define memoryfabricPreDestruct(pResource) memoryfabricPreDestruct_DISPATCH(pResource) +#define memoryfabricUnmapFrom(pResource, pParams) memoryfabricUnmapFrom_DISPATCH(pResource, pParams) +#define memoryfabricControl_Epilogue(pResource, pCallContext, pParams) memoryfabricControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define memoryfabricControlLookup(pResource, pParams, ppEntry) memoryfabricControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define memoryfabricMap(pMemory, pCallContext, pParams, pCpuMapping) memoryfabricMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define memoryfabricAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) memoryfabricAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool memoryfabricCanCopy_IMPL(struct MemoryFabric *pMemoryFabric); + +static inline NvBool memoryfabricCanCopy_DISPATCH(struct MemoryFabric *pMemoryFabric) { + return pMemoryFabric->__memoryfabricCanCopy__(pMemoryFabric); +} + +NV_STATUS memoryfabricCopyConstruct_IMPL(struct MemoryFabric *pMemoryFabric, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS memoryfabricCopyConstruct_DISPATCH(struct MemoryFabric *pMemoryFabric, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + return pMemoryFabric->__memoryfabricCopyConstruct__(pMemoryFabric, pCallContext, pParams); +} + +NV_STATUS memoryfabricControl_IMPL(struct MemoryFabric *pMemoryFabric, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS memoryfabricControl_DISPATCH(struct MemoryFabric *pMemoryFabric, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemoryFabric->__memoryfabricControl__(pMemoryFabric, pCallContext, pParams); +} + +NV_STATUS memoryfabricCtrlGetInfo_IMPL(struct MemoryFabric *pMemoryFabric, NV00F8_CTRL_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS memoryfabricCtrlGetInfo_DISPATCH(struct MemoryFabric *pMemoryFabric, NV00F8_CTRL_GET_INFO_PARAMS *pParams) { + return pMemoryFabric->__memoryfabricCtrlGetInfo__(pMemoryFabric, pParams); +} + +NV_STATUS memoryfabricCtrlCmdDescribe_IMPL(struct MemoryFabric *pMemoryFabric, NV00F8_CTRL_DESCRIBE_PARAMS *pParams); + +static inline NV_STATUS memoryfabricCtrlCmdDescribe_DISPATCH(struct MemoryFabric *pMemoryFabric, NV00F8_CTRL_DESCRIBE_PARAMS *pParams) { + return pMemoryFabric->__memoryfabricCtrlCmdDescribe__(pMemoryFabric, pParams); +} + +static inline NV_STATUS memoryfabricCheckMemInterUnmap_DISPATCH(struct MemoryFabric *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__memoryfabricCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS memoryfabricUnmap_DISPATCH(struct MemoryFabric *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__memoryfabricUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS memoryfabricGetMemInterMapParams_DISPATCH(struct MemoryFabric *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__memoryfabricGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS memoryfabricGetMemoryMappingDescriptor_DISPATCH(struct MemoryFabric *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__memoryfabricGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS memoryfabricGetMapAddrSpace_DISPATCH(struct MemoryFabric *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__memoryfabricGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool memoryfabricShareCallback_DISPATCH(struct MemoryFabric *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__memoryfabricShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS memoryfabricControlFilter_DISPATCH(struct MemoryFabric *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__memoryfabricControlFilter__(pResource, pCallContext, pParams); +} + +static inline void memoryfabricAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct MemoryFabric *pResource, RsResourceRef *pReference) { + pResource->__memoryfabricAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 memoryfabricGetRefCount_DISPATCH(struct MemoryFabric *pResource) { + return pResource->__memoryfabricGetRefCount__(pResource); +} + +static inline NV_STATUS memoryfabricMapTo_DISPATCH(struct MemoryFabric *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__memoryfabricMapTo__(pResource, pParams); +} + +static inline NV_STATUS memoryfabricControl_Prologue_DISPATCH(struct MemoryFabric *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__memoryfabricControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS memoryfabricIsReady_DISPATCH(struct MemoryFabric *pMemory) { + return pMemory->__memoryfabricIsReady__(pMemory); +} + +static inline NV_STATUS memoryfabricCheckCopyPermissions_DISPATCH(struct MemoryFabric *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__memoryfabricCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void memoryfabricPreDestruct_DISPATCH(struct MemoryFabric *pResource) { + pResource->__memoryfabricPreDestruct__(pResource); +} + +static inline NV_STATUS memoryfabricUnmapFrom_DISPATCH(struct MemoryFabric *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__memoryfabricUnmapFrom__(pResource, pParams); +} + +static inline void memoryfabricControl_Epilogue_DISPATCH(struct MemoryFabric *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__memoryfabricControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS memoryfabricControlLookup_DISPATCH(struct MemoryFabric *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__memoryfabricControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS memoryfabricMap_DISPATCH(struct MemoryFabric *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__memoryfabricMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool memoryfabricAccessCallback_DISPATCH(struct MemoryFabric *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__memoryfabricAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS memoryfabricConstruct_IMPL(struct MemoryFabric *arg_pMemoryFabric, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_memoryfabricConstruct(arg_pMemoryFabric, arg_pCallContext, arg_pParams) memoryfabricConstruct_IMPL(arg_pMemoryFabric, arg_pCallContext, arg_pParams) +void memoryfabricDestruct_IMPL(struct MemoryFabric *pMemoryFabric); +#define __nvoc_memoryfabricDestruct(pMemoryFabric) memoryfabricDestruct_IMPL(pMemoryFabric) +NvBool memoryfabricCanExport_IMPL(struct MemoryFabric *pMemoryFabric); +#ifdef __nvoc_mem_fabric_h_disabled +static inline NvBool memoryfabricCanExport(struct MemoryFabric *pMemoryFabric) { + NV_ASSERT_FAILED_PRECOMP("MemoryFabric was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_fabric_h_disabled +#define memoryfabricCanExport(pMemoryFabric) memoryfabricCanExport_IMPL(pMemoryFabric) +#endif //__nvoc_mem_fabric_h_disabled + +#undef PRIVATE_FIELD + + +#endif /* _MEMORYFABRIC_H_ */ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MEM_FABRIC_NVOC_H_ diff --git a/src/nvidia/generated/g_mem_mgr_nvoc.c b/src/nvidia/generated/g_mem_mgr_nvoc.c new file mode 100644 index 000000000..b4d4aa746 --- /dev/null +++ b/src/nvidia/generated/g_mem_mgr_nvoc.c @@ -0,0 +1,602 @@ +#define NVOC_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mem_mgr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x22ad47 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_MemoryManager(MemoryManager*, RmHalspecOwner* ); +void __nvoc_init_funcTable_MemoryManager(MemoryManager*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_MemoryManager(MemoryManager*, RmHalspecOwner* ); +void __nvoc_init_dataField_MemoryManager(MemoryManager*, RmHalspecOwner* ); +void __nvoc_dtor_MemoryManager(MemoryManager*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryManager; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_MemoryManager = { + /*pClassDef=*/ &__nvoc_class_def_MemoryManager, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MemoryManager, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_MemoryManager = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_MemoryManager_MemoryManager, + &__nvoc_rtti_MemoryManager_OBJENGSTATE, + &__nvoc_rtti_MemoryManager_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager = +{ + /*classInfo=*/ { + /*size=*/ sizeof(MemoryManager), + /*classId=*/ classId(MemoryManager), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "MemoryManager", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MemoryManager, + /*pCastInfo=*/ &__nvoc_castinfo_MemoryManager, + /*pExportInfo=*/ &__nvoc_export_info_MemoryManager +}; + +static NV_STATUS __nvoc_thunk_MemoryManager_engstateConstructEngine(OBJGPU *pGpu, struct OBJENGSTATE *pMemoryManager, ENGDESCRIPTOR arg0) { + return memmgrConstructEngine(pGpu, (struct MemoryManager *)(((unsigned char *)pMemoryManager) - __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_MemoryManager_engstateStatePreInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pMemoryManager) { + return memmgrStatePreInitLocked(pGpu, (struct MemoryManager *)(((unsigned char *)pMemoryManager) - __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_MemoryManager_engstateStateInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pMemoryManager) { + return memmgrStateInitLocked(pGpu, (struct MemoryManager *)(((unsigned char *)pMemoryManager) - __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_MemoryManager_engstateStateLoad(OBJGPU *pGpu, struct OBJENGSTATE *pMemoryManager, NvU32 arg0) { + return memmgrStateLoad(pGpu, (struct MemoryManager *)(((unsigned char *)pMemoryManager) - __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_MemoryManager_engstateStatePreUnload(OBJGPU *pGpu, struct OBJENGSTATE *pMemoryManager, NvU32 arg0) { + return memmgrStatePreUnload(pGpu, (struct MemoryManager *)(((unsigned char *)pMemoryManager) - __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_MemoryManager_engstateStateUnload(OBJGPU *pGpu, struct OBJENGSTATE *pMemoryManager, NvU32 arg0) { + return memmgrStateUnload(pGpu, (struct MemoryManager *)(((unsigned char *)pMemoryManager) - __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_MemoryManager_engstateStateDestroy(OBJGPU *pGpu, struct OBJENGSTATE *pMemoryManager) { + memmgrStateDestroy(pGpu, (struct MemoryManager *)(((unsigned char *)pMemoryManager) - __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrReconcileTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePostUnload(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateInitUnlocked(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_memmgrInitMissing(POBJGPU pGpu, struct MemoryManager *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrGetTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrCompareTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_memmgrFreeTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePostLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrAllocTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrSetTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_memmgrIsPresent(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryManager = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_MemoryManager(MemoryManager *pThis) { + __nvoc_memmgrDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal field -- bFbRegionsSupported + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bFbRegionsSupported = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bFbRegionsSupported = ((NvBool)(0 != 0)); + } + + // Hal field -- bPmaEnabled + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bPmaEnabled = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bPmaEnabled = ((NvBool)(0 != 0)); + } + + // Hal field -- bClientPageTablesPmaManaged + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bClientPageTablesPmaManaged = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bClientPageTablesPmaManaged = ((NvBool)(0 != 0)); + } + + // Hal field -- bScanoutSysmem + if (0) + { + } + // default + else + { + pThis->bScanoutSysmem = ((NvBool)(0 != 0)); + } + + // Hal field -- bDisallowSplitLowerMemory + if (0) + { + } + // default + else + { + pThis->bDisallowSplitLowerMemory = ((NvBool)(0 != 0)); + } + + // Hal field -- bSmallPageCompression + if (0) + { + } + // default + else + { + pThis->bSmallPageCompression = ((NvBool)(0 != 0)); + } + + // Hal field -- bSysmemCompressionSupportDef + if (0) + { + } + // default + else + { + pThis->bSysmemCompressionSupportDef = ((NvBool)(0 != 0)); + } + + // Hal field -- bBug2301372IncreaseRmReserveMemoryWar + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->bBug2301372IncreaseRmReserveMemoryWar = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bBug2301372IncreaseRmReserveMemoryWar = ((NvBool)(0 != 0)); + } + + pThis->bEnableDynamicPageOfflining = ((NvBool)(0 != 0)); + + // Hal field -- bVgpuPmaSupport + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bVgpuPmaSupport = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bVgpuPmaSupport = ((NvBool)(0 != 0)); + } + + pThis->bSupportCCProtectedMemoryAlloc = ((NvBool)(0 != 0)); + + // Hal field -- bAllowNoncontiguousAllocation + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bAllowNoncontiguousAllocation = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bAllowNoncontiguousAllocation = ((NvBool)(0 != 0)); + } + + // Hal field -- bScrubOnFreeEnabled + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->bScrubOnFreeEnabled = ((NvBool)(0 == 0)); + } + // default + else + { + pThis->bScrubOnFreeEnabled = ((NvBool)(0 != 0)); + } + + // Hal field -- bFastScrubberEnabled + if (0) + { + } + // default + else + { + pThis->bFastScrubberEnabled = ((NvBool)(0 != 0)); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_MemoryManager_fail_OBJENGSTATE; + __nvoc_init_dataField_MemoryManager(pThis, pRmhalspecowner); + goto __nvoc_ctor_MemoryManager_exit; // Success + +__nvoc_ctor_MemoryManager_fail_OBJENGSTATE: +__nvoc_ctor_MemoryManager_exit: + + return status; +} + +static void __nvoc_init_funcTable_MemoryManager_1(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__memmgrConstructEngine__ = &memmgrConstructEngine_IMPL; + + pThis->__memmgrStatePreInitLocked__ = &memmgrStatePreInitLocked_IMPL; + + pThis->__memmgrStateInitLocked__ = &memmgrStateInitLocked_IMPL; + + pThis->__memmgrStateLoad__ = &memmgrStateLoad_IMPL; + + pThis->__memmgrStatePreUnload__ = &memmgrStatePreUnload_IMPL; + + pThis->__memmgrStateUnload__ = &memmgrStateUnload_IMPL; + + pThis->__memmgrStateDestroy__ = &memmgrStateDestroy_IMPL; + + // Hal function -- memmgrAllocDetermineAlignment + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__memmgrAllocDetermineAlignment__ = &memmgrAllocDetermineAlignment_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrAllocDetermineAlignment__ = &memmgrAllocDetermineAlignment_GA100; + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- memmgrGetMaxContextSize + if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__memmgrGetMaxContextSize__ = &memmgrGetMaxContextSize_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrGetMaxContextSize__ = &memmgrGetMaxContextSize_GA100; + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- memmgrScrubRegistryOverrides + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__memmgrScrubRegistryOverrides__ = &memmgrScrubRegistryOverrides_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrScrubRegistryOverrides__ = &memmgrScrubRegistryOverrides_GA100; + } + else if (0) + { + } + + // Hal function -- memmgrGetFlaKind + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrGetFlaKind__ = &memmgrGetFlaKind_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__memmgrGetFlaKind__ = &memmgrGetFlaKind_46f6a7; + } + + // Hal function -- memmgrDetermineComptag + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__memmgrDetermineComptag__ = &memmgrDetermineComptag_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrDetermineComptag__ = &memmgrDetermineComptag_13cd8d; + } + + // Hal function -- memmgrCheckReservedMemorySize + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__memmgrCheckReservedMemorySize__ = &memmgrCheckReservedMemorySize_GK104; + } + // default + else + { + pThis->__memmgrCheckReservedMemorySize__ = &memmgrCheckReservedMemorySize_56cd7a; + } + + // Hal function -- memmgrReadMmuLock + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__memmgrReadMmuLock__ = &memmgrReadMmuLock_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrReadMmuLock__ = &memmgrReadMmuLock_e133c0; + } + + // Hal function -- memmgrBlockMemLockedMemory + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__memmgrBlockMemLockedMemory__ = &memmgrBlockMemLockedMemory_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrBlockMemLockedMemory__ = &memmgrBlockMemLockedMemory_56cd7a; + } + + // Hal function -- memmgrInsertUnprotectedRegionAtBottomOfFb + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__memmgrInsertUnprotectedRegionAtBottomOfFb__ = &memmgrInsertUnprotectedRegionAtBottomOfFb_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fbe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrInsertUnprotectedRegionAtBottomOfFb__ = &memmgrInsertUnprotectedRegionAtBottomOfFb_56cd7a; + } + + // Hal function -- memmgrGetDisablePlcKind + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrGetDisablePlcKind__ = &memmgrGetDisablePlcKind_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__memmgrGetDisablePlcKind__ = &memmgrGetDisablePlcKind_b3696a; + } + + // Hal function -- memmgrEnableDynamicPageOfflining + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00000400UL) )) /* ChipHal: GA100 */ + { + pThis->__memmgrEnableDynamicPageOfflining__ = &memmgrEnableDynamicPageOfflining_GA100; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000f800UL) )) /* ChipHal: GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrEnableDynamicPageOfflining__ = &memmgrEnableDynamicPageOfflining_GA102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__memmgrEnableDynamicPageOfflining__ = &memmgrEnableDynamicPageOfflining_b3696a; + } + + // Hal function -- memmgrGetBlackListPages + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__memmgrGetBlackListPages__ = &memmgrGetBlackListPages_GM107; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__memmgrGetBlackListPages__ = &memmgrGetBlackListPages_GA100; + } + else if (0) + { + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_MemoryManager_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_MemoryManager_engstateStatePreInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_MemoryManager_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_MemoryManager_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreUnload__ = &__nvoc_thunk_MemoryManager_engstateStatePreUnload; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_MemoryManager_engstateStateUnload; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_MemoryManager_engstateStateDestroy; + + pThis->__memmgrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrReconcileTunableState; + + pThis->__memmgrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreLoad; + + pThis->__memmgrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePostUnload; + + pThis->__memmgrStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateInitUnlocked; + + pThis->__memmgrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_memmgrInitMissing; + + pThis->__memmgrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked; + + pThis->__memmgrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrGetTunableState; + + pThis->__memmgrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrCompareTunableState; + + pThis->__memmgrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrFreeTunableState; + + pThis->__memmgrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePostLoad; + + pThis->__memmgrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrAllocTunableState; + + pThis->__memmgrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrSetTunableState; + + pThis->__memmgrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_memmgrIsPresent; +} + +void __nvoc_init_funcTable_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_MemoryManager_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_MemoryManager = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_MemoryManager(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_MemoryManager(MemoryManager **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + MemoryManager *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(MemoryManager)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(MemoryManager)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MemoryManager); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_MemoryManager(pThis, pRmhalspecowner); + status = __nvoc_ctor_MemoryManager(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_MemoryManager_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_MemoryManager_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_MemoryManager(MemoryManager **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_MemoryManager(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_mem_mgr_nvoc.h b/src/nvidia/generated/g_mem_mgr_nvoc.h new file mode 100644 index 000000000..262e3b321 --- /dev/null +++ b/src/nvidia/generated/g_mem_mgr_nvoc.h @@ -0,0 +1,2537 @@ +#ifndef _G_MEM_MGR_NVOC_H_ +#define _G_MEM_MGR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_mem_mgr_nvoc.h" + +#ifndef MEM_MGR_H +#define MEM_MGR_H + +#include "core/core.h" +#include "core/info_block.h" +#include "gpu/eng_state.h" + +#include "gpu/mem_mgr/fbsr.h" +#include "gpu/gpu.h" + +#include "mem_mgr/mem.h" + +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "containers/map.h" +#include "gpu/mem_mgr/heap_base.h" +#include "mem_mgr/vaspace.h" + +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h" + +typedef volatile struct _cl906f_tag1 Nv906fControl; +typedef struct KERNEL_MIG_GPU_INSTANCE KERNEL_MIG_GPU_INSTANCE; + +typedef struct +{ + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 offset; +} TRANSFER_SURFACE; + +// Memory transfer engine types. +typedef enum +{ + TRANSFER_TYPE_PROCESSOR = 0, // CPU/GSP/DPU depending on execution context + TRANSFER_TYPE_GSP_DMA, // Dma engine internal to GSP + TRANSFER_TYPE_CE, // Copy Engine HW +} TRANSFER_TYPE; + +#define TRANSFER_FLAGS_NONE 0 +#define TRANSFER_FLAGS_DEFER_FLUSH NVBIT32(0) // Applicable only for write operations +#define TRANSFER_FLAGS_SHADOW_ALLOC NVBIT32(1) // Applicable only for non-PROCESSOR transfers +#define TRANSFER_FLAGS_SHADOW_INIT_MEM NVBIT32(2) // Applicable only for non-PROCESSOR transfers +#define TRANSFER_FLAGS_PERSISTENT_CPU_MAPPING NVBIT32(3) // Require long lived PROCESSOR mapping +#define TRANSFER_FLAGS_DESTROY_MAPPING NVBIT32(4) // Destroy any cached mappings when complete + +typedef struct +{ + NvU32 bar1Size; + NvU32 bar1AvailSize; + NvU32 bankSwizzleAlignment; + NvU32 bar1MaxContigAvailSize; +} GETBAR1INFO, *PGETBAR1INFO; + +// +// RM Default PTE kind +// Bug #2242255, introducing the RM Default kind to allow sharing memory between +// different architectures especially between Turing+ and Pre Turing chips +// +#define RM_DEFAULT_PTE_KIND 0x100 + +typedef enum +{ + FB_IS_KIND_Z, // Kind is a Z buffer + FB_IS_KIND_ZBC, // Zero bandwidth clears + FB_IS_KIND_ZBC_ALLOWS_1, // ZBC with 1 bit of tag + FB_IS_KIND_ZBC_ALLOWS_2, // ZBC with 2 bits of tag + FB_IS_KIND_ZBC_ALLOWS_4, // ZBC with 4 bits of tag + FB_IS_KIND_COMPRESSIBLE, // Any compressible kind + FB_IS_KIND_COMPRESSIBLE_1, // Compressible with 1 comp tag bit + FB_IS_KIND_COMPRESSIBLE_2, // Compressible with 2 comp tag bits + FB_IS_KIND_COMPRESSIBLE_4, // Compressible with 4 comp tag bits + FB_IS_KIND_SUPPORTED, // Kind is supported + FB_IS_KIND_DISALLOW_PLC, // Kind Disallows PLC +} FB_IS_KIND_OP; + +// Surface compression parameters +typedef struct COMPR_INFO +{ + // Surface kind; if not compressed, following parameters are ignored + NvU32 kind; + + // Compression page shift; 0 if kind is uncompressed + NvU32 compPageShift; + + // + // Are comptags are determined per-page by PA? + // If set, following parameters are ignored + // + NvBool bPhysBasedComptags; + + // see GMMU_COMPR_INFO + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMin; + NvU32 compTagLineMultiplier; +} COMPR_INFO; + +// +// Fixed Channel Properties for Memutils Object +// + +typedef NV_STATUS FbScrubCallback(OBJGPU *); + +#define BLOCK_INDEX_FROM_ADDR(addr,size) ((NvU32)((addr) >> size)) +#define BLOCK_ADDR_FROM_INDEX(idx,size) (((NvU64)(idx)) << size) + +#define MEMUTILS_SIZE_PER_BLOCK_INBYTES (0x68) +#define MEMUTILS_TOTAL_SIZE_PER_BLOCK_INBYTES (0x60) //(COPY + PB SEMA) +#define MEMUTILS_TD_BLOCKS_PER_CHUNK 0x40 + +#define BLOCK_INDEX_FROM_ADDR(addr,size) ((NvU32)((addr) >> size)) +#define BLOCK_ADDR_FROM_INDEX(idx,size) (((NvU64)(idx)) << size) + +#define MEMUTILS_NUM_PAYLOAD_SEMAPHORES (2) +#define MEMUTILS_NUM_GPFIFIO_ENTRIES (32) +// PB size should be a multiple of chunk size +#define MEMUTILS_CHANNEL_PB_SIZE (0x10 * MEMUTILS_SIZE_PER_BLOCK_INBYTES * \ + MEMUTILS_TD_BLOCKS_PER_CHUNK) +#define MEMUTILS_CHANNEL_SEMAPHORE_SIZE (4 * MEMUTILS_NUM_PAYLOAD_SEMAPHORES) +#define MEMUTILS_CHANNEL_NOTIFIER_SIZE (sizeof(NvNotification) * 1) + +// offset and line length should be a multiple of 4KB +#define MEMUTIL_SCRUB_OFFSET_ALIGNMENT (4 * 1024) +#define MEMUTIL_SCRUB_LINE_LENGTH_ALIGNMENT (4 * 1024) + +typedef enum { + SCRUBBER_CHANNEL, + FAST_SCRUBBER_CHANNEL, + COPY_CHANNEL, + MAX_CHANNEL_TYPE +} CHANNEL_KIND; + +// This will be moved to a channel object next +typedef struct OBJCHANNEL +{ + NvHandle deviceId; // Device Handle + NvHandle physMemId; // Memory Handle + NvHandle channelId; // Channel Handle + NvHandle subdeviceId; // Subdevice Handle + NvHandle errNotifierIdVirt; + NvHandle errNotifierIdPhys; + NvHandle copyObjectId; + NvHandle eventId; + NvHandle pushBufferId; + NvHandle bitMapSemPhysId; + NvHandle bitMapSemVirtId; + NvHandle hVASpaceId; // VASpace handle, when scrubber in virtual mode + NvHandle hFbAlias; // Used only for virtual channels + NvHandle hFbAliasVA; + // to be moved later + + NvU32 channelSize; + NvU32 channelNumGpFifioEntries; + NvU32 channelPbSize; + NvU32 channelNotifierSize; + NvU32 methodSizePerBlock; + NvU32 semaOffset; + NvU32 finishPayloadOffset; + NvU32 finishPayload; + NvBool isChannelSynchronized; + NvBool isProgressChecked; +// +// RM internal channels are created as privileged channels (physical address access) by default +// For MMU Bug: 2739505, we need to switch to use channels in non-privileged mode. +// + NvBool bUseVasForCeCopy; // set to NV_TRUE, when scrubber operates in virtual address + struct RsClient *pRsClient; + struct OBJVASPACE *pVAS; + NvU32 engineType; + NvU64 startFbOffset; + NvU64 fbSize; + NvU64 fbAliasVA; + NvU64 vaStartOffset; + // to be moved to a separate object later + + NvU32 *pBlockPendingState; + NvU32 *pBlockDoneState; + NvU32 blockCount; + NvHandle hClient; + NvHandle hLiteClient; // Used only for fifo lite channels + NvBool bClientAllocated; + NvU64 pbGpuVA; + NvU64 pbGpuBitMapVA; + NvU64 pbGpuNotifierVA; + NvU8 *pbCpuVA; + NvU8 *pbBitMapVA; + Nv906fControl *pControlGPFifo; + NvU32 classEngineID; + NVOS10_EVENT_KERNEL_CALLBACK_EX callback; + NvU32 state; + NvU32 hTdCopyClass; + NvU32 minBlockSize; + NvU32 maxBlockSize; + NvU32 channelPutOffset; + NvU8 blockShift; + NvU32 lastPayloadPushed; + NvBool isChannelActive; + NvU32 workSubmitToken; + // + // Work submit token read from notifier memory. + // + NvNotification *pTokenFromNotifier; + NvU32 lastSubmittedEntry; + NvHandle lastAllocatedHandle; + CHANNEL_KIND type; + + // Used for Volta+ + NvHandle doorbellRegionHandle; + NvU8 *pDoorbellRegion; + NvU32 *pDoorbellRegisterOffset; + NvBool bUseDoorbellRegister; + NvHandle hUserD; + NvBool bClientUserd; + + + // + // Used only by suspend resume channel. + // This denotes whether the channel manages the BAR2 VASpace. + // Suspend resume happens way before the regular BAR2 init. + // Channel instmem has to be stored in vidmem due to 40 bit restriction in host on Pascal+ chips. + // So the suspend resume channel has to setup BAR2 for accessing vidmem. + // + NvBool bManageBAR2; + OBJGPU *pGpu; + struct KernelCE *pKCe; + + // Used by Partition Scrubber + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance; + NvHandle hPartitionRef; +} OBJCHANNEL, *POBJCHANNEL; + +#define NV_METHOD(SubCh, Method, Num) \ + (DRF_DEF(906F, _DMA_INCR, _OPCODE, _VALUE) | \ + DRF_NUM(906F, _DMA_INCR, _COUNT, Num) | \ + DRF_NUM(906F, _DMA_INCR, _SUBCHANNEL, SubCh) | \ + DRF_NUM(906F, _DMA_INCR, _ADDRESS, (Method) >> 2)) + +#define PUSH_DATA(Data) MEM_WR32(ptr++, (Data)) + +#define PUSH_PAIR(SubCh, Method, Data) \ + do \ + { \ + PUSH_DATA(NV_METHOD(SubCh, (Method), 1)); \ + PUSH_DATA((Data)); \ + } while (0) + +//----------------------------------------------------------------------------- + +typedef struct +{ + NvU32 lastSubmittedBlock; + NvBool isTopDownScrubber; + NvBool isActive; + NvU32 scrubberState; + NvU32 currentFbRegion; + NvU32 startBlock; + NvU32 endBlock; + NvU32 *pPendingBitMap; + NvU32 *pDoneBitMap; + NvU32 blockCount; + struct OBJCE *pCe; + NvBool bCeInUse; + OBJCHANNEL tdHeapState; + OBJCHANNEL allocationScrubberState; +} OBJSCRUB, *POBJSCRUB; + +typedef struct +{ + NvU64 base; // Base/start address of the region + NvU64 limit; // Last/end address of region + NvU64 rsvdSize; // Memory RM may be required to allocate in this region + NvBool bRsvdRegion; // Reserved region -- not publicly usable + NvU32 performance; // Relative performance. Higher is faster + NvBool bSupportCompressed; // Support compressed kinds + NvBool bSupportISO; // Support ISO (display, cursor, video) surfaces + NvBool bProtected; // Represents a protected region of memory. + NvBool bInternalHeap; // PMA:Used for internal RM allocations + NvBool bLostOnSuspend; // Not required to be Saved during S/R. +} FB_REGION_DESCRIPTOR, *PFB_REGION_DESCRIPTOR; + +#define MAX_FB_REGIONS 16 + +// Maximum number of contexts created for WHQL test WDDM Max Contexts +#define WHQL_TEST_MAX_CONTEXTS 100 + +// Object 'get' macros for FB relative object retrievals. +#define MEMORY_MANAGER_GET_HEAP(p) ((p)->pHeap) + +typedef struct _def_fb_mem_node +{ + struct _def_fb_mem_node *pNext; + + NvBool bFreeDescriptor; + PMEMORY_DESCRIPTOR pMemDesc; + +} FB_MEM_NODE, *PFB_MEM_NODE; + +// defines for MemoryManager::fbsrReservedRanges +#define MAX_FBSR_RESERVED_REGIONS 2 // Max. Memory descriptors for RM Instance memory +#define FBSR_RESERVED_INST_MEMORY_BEFORE_BAR2PTE 0 +#define FBSR_RESERVED_INST_MEMORY_AFTER_BAR2PTE 1 + +/*! + * MemoryManager provides the root memory management of GPU video memory. + * External entities might provide suballocators on top of MemoryManager. + * + * MemoryManager can have static information on the memory system (e.g.: list of + * kinds, etc), however MemoryManager does not have direct access to the GPU + * memory system (e.g.: BAR0 registers). It relies on KernelMemorySystem for + * operations on the memory system. + * + * MemoryManager is instantiated in VGPU guest/GSP Client as well as the VGPU + * host/GSP-RM. + */ + +#define MEM_MGR_STUB_ORIN(...) + +#ifdef NVOC_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RM_POOL_ALLOC_MEM_RESERVE_INFO; + +struct __nvoc_inner_struc_MemoryManager_1__ { + NvU64 fbUsableMemSize; + NvU64 fbTotalMemSizeMb; + NvU64 fbAddrSpaceSizeMb; + NvU64 mapRamSizeMb; + NvU64 fbOverrideSizeMb; + NvU64 reservedMemSize; + NvU32 numFBRegions; + FB_REGION_DESCRIPTOR fbRegion[16]; + NvU32 numFBRegionPriority; + NvU64 maxFBPSize; + NvU64 minFBPSize; + NvU32 fbRegionPriority[16]; + NvU64 ReservedConsoleDispMemSize; + PMEMORY_DESCRIPTOR pReservedConsoleMemDesc; + NvU32 lowerRangeMag; + NvU32 lowerRangeScale; + NvU32 middleRangeMag; + NvU32 middleRangeScale; + NvU32 upperRangeMag; + NvU32 upperRangeScale; +}; + +struct MIG_MEMORY_PARTITIONING_INFO { + struct NV_RANGE partitionableMemoryRange; + struct NV_RANGE partitionableBar1Range; + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubdevice; + NvBool bNonMIGTopLevelScrubber; +}; + + +struct MemoryManager { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct MemoryManager *__nvoc_pbase_MemoryManager; + NV_STATUS (*__memmgrConstructEngine__)(OBJGPU *, struct MemoryManager *, ENGDESCRIPTOR); + NV_STATUS (*__memmgrStatePreInitLocked__)(OBJGPU *, struct MemoryManager *); + NV_STATUS (*__memmgrStateInitLocked__)(OBJGPU *, struct MemoryManager *); + NV_STATUS (*__memmgrStateLoad__)(OBJGPU *, struct MemoryManager *, NvU32); + NV_STATUS (*__memmgrStatePreUnload__)(OBJGPU *, struct MemoryManager *, NvU32); + NV_STATUS (*__memmgrStateUnload__)(OBJGPU *, struct MemoryManager *, NvU32); + void (*__memmgrStateDestroy__)(OBJGPU *, struct MemoryManager *); + NV_STATUS (*__memmgrAllocDetermineAlignment__)(OBJGPU *, struct MemoryManager *, NvU64 *, NvU64 *, NvU64, NvU32, NvU32, NvU32, NvU64); + NvU64 (*__memmgrGetMaxContextSize__)(OBJGPU *, struct MemoryManager *); + void (*__memmgrScrubRegistryOverrides__)(OBJGPU *, struct MemoryManager *); + NV_STATUS (*__memmgrGetFlaKind__)(OBJGPU *, struct MemoryManager *, NvU32 *); + NvU32 (*__memmgrDetermineComptag__)(OBJGPU *, struct MemoryManager *, RmPhysAddr); + NV_STATUS (*__memmgrCheckReservedMemorySize__)(OBJGPU *, struct MemoryManager *); + NV_STATUS (*__memmgrReadMmuLock__)(OBJGPU *, struct MemoryManager *, NvBool *, NvU64 *, NvU64 *); + NV_STATUS (*__memmgrBlockMemLockedMemory__)(OBJGPU *, struct MemoryManager *); + NV_STATUS (*__memmgrInsertUnprotectedRegionAtBottomOfFb__)(OBJGPU *, struct MemoryManager *, NvU64 *); + void (*__memmgrGetDisablePlcKind__)(struct MemoryManager *, NvU32 *); + void (*__memmgrEnableDynamicPageOfflining__)(OBJGPU *, struct MemoryManager *); + NV_STATUS (*__memmgrGetBlackListPages__)(OBJGPU *, struct MemoryManager *, BLACKLIST_ADDRESS *, NvU32 *); + NV_STATUS (*__memmgrReconcileTunableState__)(POBJGPU, struct MemoryManager *, void *); + NV_STATUS (*__memmgrStatePreLoad__)(POBJGPU, struct MemoryManager *, NvU32); + NV_STATUS (*__memmgrStatePostUnload__)(POBJGPU, struct MemoryManager *, NvU32); + NV_STATUS (*__memmgrStateInitUnlocked__)(POBJGPU, struct MemoryManager *); + void (*__memmgrInitMissing__)(POBJGPU, struct MemoryManager *); + NV_STATUS (*__memmgrStatePreInitUnlocked__)(POBJGPU, struct MemoryManager *); + NV_STATUS (*__memmgrGetTunableState__)(POBJGPU, struct MemoryManager *, void *); + NV_STATUS (*__memmgrCompareTunableState__)(POBJGPU, struct MemoryManager *, void *, void *); + void (*__memmgrFreeTunableState__)(POBJGPU, struct MemoryManager *, void *); + NV_STATUS (*__memmgrStatePostLoad__)(POBJGPU, struct MemoryManager *, NvU32); + NV_STATUS (*__memmgrAllocTunableState__)(POBJGPU, struct MemoryManager *, void **); + NV_STATUS (*__memmgrSetTunableState__)(POBJGPU, struct MemoryManager *, void *); + NvBool (*__memmgrIsPresent__)(POBJGPU, struct MemoryManager *); + NvBool bFbsrWddmModeEnabled; + NvBool bFbRegionsSupported; + NvBool bPmaSupportedOnPlatform; + NvBool bPmaEnabled; + NvBool bPmaInitialized; + NvBool bPmaForcePersistence; + NvBool bPmaAddrTree; + NvBool bClientPageTablesPmaManaged; + NvBool bScanoutSysmem; + NvBool bMixedDensityFbp; + NvBool bPreferSlowRegion; + NvBool bPersistentStandbyBuffer; + NvBool bEnableFbsrPagedDma; + NvBool bDisallowSplitLowerMemory; + NvBool bIgnoreUpperMemory; + NvBool bLddmReservedMemoryCalculated; + NvBool bSmallPageCompression; + NvBool bSysmemCompressionSupportDef; + NvBool bBug1698088IncreaseRmReserveMemoryWar; + NvBool bBug2301372IncreaseRmReserveMemoryWar; + NvBool bEnableFbsrFileMode; + NvBool bEnableDynamicPageOfflining; + NvBool bVgpuPmaSupport; + NvBool bSupportCCProtectedMemoryAlloc; + NvBool bAllowNoncontiguousAllocation; + NvBool bEccInterleavedVidmemScrub; + NvBool bScrubberInitialized; + NvBool bAllowSysmemHugePages; + NvBool bEccScrubOverride; + NvU32 sysmemPageSize; + struct Heap *pHeap; + NvBool bScrubOnFreeEnabled; + NvBool bFastScrubberEnabled; + NvBool bDisableAsyncScrubforMods; + NvBool bUseVasForCeMemoryOps; + NvBool bRmExecutingEccScrub; + NvBool bBug1441072EccScrubWar; + NvU64 heapStartOffset; + NvU64 rsvdMemoryBase; + NvU32 rsvdMemorySize; + OBJSCRUB eccScrubberState; + struct __nvoc_inner_struc_MemoryManager_1__ Ram; + NvU32 PteKindOverride; + NvU64 scratchDwordOffset; + NvU32 zbcSurfaces; + NvU64 overrideInitHeapMin; + NvU64 overrideHeapMax; + NvU64 fbOverrideStartKb; + NvU64 rsvdMemorySizeIncrement; + struct OBJFBSR *pFbsr[7]; + struct OBJFBSR *pActiveFbsr; + NvU32 fbsrStartMode; + NvU32 fixedFbsrModesMask; + MEMORY_DESCRIPTOR *fbsrReservedRanges[2]; + PFB_MEM_NODE pMemHeadNode; + PFB_MEM_NODE pMemTailNode; + struct RM_POOL_ALLOC_MEM_RESERVE_INFO *pPageLevelReserve; + struct MIG_MEMORY_PARTITIONING_INFO MIGMemoryPartitioningInfo; + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubdevice; + NvHandle hThirdPartyP2P; +}; + +#ifndef __NVOC_CLASS_MemoryManager_TYPEDEF__ +#define __NVOC_CLASS_MemoryManager_TYPEDEF__ +typedef struct MemoryManager MemoryManager; +#endif /* __NVOC_CLASS_MemoryManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryManager +#define __nvoc_class_id_MemoryManager 0x22ad47 +#endif /* __nvoc_class_id_MemoryManager */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager; + +#define __staticCast_MemoryManager(pThis) \ + ((pThis)->__nvoc_pbase_MemoryManager) + +#ifdef __nvoc_mem_mgr_h_disabled +#define __dynamicCast_MemoryManager(pThis) ((MemoryManager*)NULL) +#else //__nvoc_mem_mgr_h_disabled +#define __dynamicCast_MemoryManager(pThis) \ + ((MemoryManager*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MemoryManager))) +#endif //__nvoc_mem_mgr_h_disabled + +#define PDB_PROP_MEMMGR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_MEMMGR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_MemoryManager(MemoryManager**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MemoryManager(MemoryManager**, Dynamic*, NvU32); +#define __objCreate_MemoryManager(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_MemoryManager((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define memmgrConstructEngine(pGpu, pMemoryManager, arg0) memmgrConstructEngine_DISPATCH(pGpu, pMemoryManager, arg0) +#define memmgrStatePreInitLocked(pGpu, pMemoryManager) memmgrStatePreInitLocked_DISPATCH(pGpu, pMemoryManager) +#define memmgrStateInitLocked(pGpu, pMemoryManager) memmgrStateInitLocked_DISPATCH(pGpu, pMemoryManager) +#define memmgrStateLoad(pGpu, pMemoryManager, arg0) memmgrStateLoad_DISPATCH(pGpu, pMemoryManager, arg0) +#define memmgrStatePreUnload(pGpu, pMemoryManager, arg0) memmgrStatePreUnload_DISPATCH(pGpu, pMemoryManager, arg0) +#define memmgrStateUnload(pGpu, pMemoryManager, arg0) memmgrStateUnload_DISPATCH(pGpu, pMemoryManager, arg0) +#define memmgrStateDestroy(pGpu, pMemoryManager) memmgrStateDestroy_DISPATCH(pGpu, pMemoryManager) +#define memmgrAllocDetermineAlignment(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) memmgrAllocDetermineAlignment_DISPATCH(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) +#define memmgrAllocDetermineAlignment_HAL(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) memmgrAllocDetermineAlignment_DISPATCH(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment) +#define memmgrGetMaxContextSize(pGpu, pMemoryManager) memmgrGetMaxContextSize_DISPATCH(pGpu, pMemoryManager) +#define memmgrGetMaxContextSize_HAL(pGpu, pMemoryManager) memmgrGetMaxContextSize_DISPATCH(pGpu, pMemoryManager) +#define memmgrScrubRegistryOverrides(pGpu, pMemoryManager) memmgrScrubRegistryOverrides_DISPATCH(pGpu, pMemoryManager) +#define memmgrScrubRegistryOverrides_HAL(pGpu, pMemoryManager) memmgrScrubRegistryOverrides_DISPATCH(pGpu, pMemoryManager) +#define memmgrGetFlaKind(pGpu, pMemoryManager, arg0) memmgrGetFlaKind_DISPATCH(pGpu, pMemoryManager, arg0) +#define memmgrGetFlaKind_HAL(pGpu, pMemoryManager, arg0) memmgrGetFlaKind_DISPATCH(pGpu, pMemoryManager, arg0) +#define memmgrDetermineComptag(pGpu, pMemoryManager, arg0) memmgrDetermineComptag_DISPATCH(pGpu, pMemoryManager, arg0) +#define memmgrDetermineComptag_HAL(pGpu, pMemoryManager, arg0) memmgrDetermineComptag_DISPATCH(pGpu, pMemoryManager, arg0) +#define memmgrCheckReservedMemorySize(pGpu, pMemoryManager) memmgrCheckReservedMemorySize_DISPATCH(pGpu, pMemoryManager) +#define memmgrCheckReservedMemorySize_HAL(pGpu, pMemoryManager) memmgrCheckReservedMemorySize_DISPATCH(pGpu, pMemoryManager) +#define memmgrReadMmuLock(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) memmgrReadMmuLock_DISPATCH(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) +#define memmgrReadMmuLock_HAL(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) memmgrReadMmuLock_DISPATCH(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi) +#define memmgrBlockMemLockedMemory(pGpu, pMemoryManager) memmgrBlockMemLockedMemory_DISPATCH(pGpu, pMemoryManager) +#define memmgrBlockMemLockedMemory_HAL(pGpu, pMemoryManager) memmgrBlockMemLockedMemory_DISPATCH(pGpu, pMemoryManager) +#define memmgrInsertUnprotectedRegionAtBottomOfFb(pGpu, pMemoryManager, pSize) memmgrInsertUnprotectedRegionAtBottomOfFb_DISPATCH(pGpu, pMemoryManager, pSize) +#define memmgrInsertUnprotectedRegionAtBottomOfFb_HAL(pGpu, pMemoryManager, pSize) memmgrInsertUnprotectedRegionAtBottomOfFb_DISPATCH(pGpu, pMemoryManager, pSize) +#define memmgrGetDisablePlcKind(pMemoryManager, pteKind) memmgrGetDisablePlcKind_DISPATCH(pMemoryManager, pteKind) +#define memmgrGetDisablePlcKind_HAL(pMemoryManager, pteKind) memmgrGetDisablePlcKind_DISPATCH(pMemoryManager, pteKind) +#define memmgrEnableDynamicPageOfflining(pGpu, pMemoryManager) memmgrEnableDynamicPageOfflining_DISPATCH(pGpu, pMemoryManager) +#define memmgrEnableDynamicPageOfflining_HAL(pGpu, pMemoryManager) memmgrEnableDynamicPageOfflining_DISPATCH(pGpu, pMemoryManager) +#define memmgrGetBlackListPages(pGpu, pMemoryManager, pBlAddrs, pCount) memmgrGetBlackListPages_DISPATCH(pGpu, pMemoryManager, pBlAddrs, pCount) +#define memmgrGetBlackListPages_HAL(pGpu, pMemoryManager, pBlAddrs, pCount) memmgrGetBlackListPages_DISPATCH(pGpu, pMemoryManager, pBlAddrs, pCount) +#define memmgrReconcileTunableState(pGpu, pEngstate, pTunableState) memmgrReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define memmgrStatePreLoad(pGpu, pEngstate, arg0) memmgrStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define memmgrStatePostUnload(pGpu, pEngstate, arg0) memmgrStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define memmgrStateInitUnlocked(pGpu, pEngstate) memmgrStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define memmgrInitMissing(pGpu, pEngstate) memmgrInitMissing_DISPATCH(pGpu, pEngstate) +#define memmgrStatePreInitUnlocked(pGpu, pEngstate) memmgrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define memmgrGetTunableState(pGpu, pEngstate, pTunableState) memmgrGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define memmgrCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) memmgrCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define memmgrFreeTunableState(pGpu, pEngstate, pTunableState) memmgrFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define memmgrStatePostLoad(pGpu, pEngstate, arg0) memmgrStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define memmgrAllocTunableState(pGpu, pEngstate, ppTunableState) memmgrAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define memmgrSetTunableState(pGpu, pEngstate, pTunableState) memmgrSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define memmgrIsPresent(pGpu, pEngstate) memmgrIsPresent_DISPATCH(pGpu, pEngstate) +NvU32 memmgrDeterminePageSize_IMPL(struct MemoryManager *pMemoryManager, NvHandle hClient, NvU64 memSize, NvU32 memFormat, NvU32 pageFormatFlags, NvU32 *pRetAttr, NvU32 *pRetAttr2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrDeterminePageSize(struct MemoryManager *pMemoryManager, NvHandle hClient, NvU64 memSize, NvU32 memFormat, NvU32 pageFormatFlags, NvU32 *pRetAttr, NvU32 *pRetAttr2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDeterminePageSize(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) memmgrDeterminePageSize_IMPL(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDeterminePageSize_HAL(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) memmgrDeterminePageSize(pMemoryManager, hClient, memSize, memFormat, pageFormatFlags, pRetAttr, pRetAttr2) + +static inline NV_STATUS memmgrReserveConsoleRegion_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_REGION_DESCRIPTOR *arg0) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrReserveConsoleRegion(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_REGION_DESCRIPTOR *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReserveConsoleRegion(pGpu, pMemoryManager, arg0) memmgrReserveConsoleRegion_56cd7a(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrReserveConsoleRegion_HAL(pGpu, pMemoryManager, arg0) memmgrReserveConsoleRegion(pGpu, pMemoryManager, arg0) + +NV_STATUS memmgrAllocateConsoleRegion_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_REGION_DESCRIPTOR *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocateConsoleRegion(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_REGION_DESCRIPTOR *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocateConsoleRegion(pGpu, pMemoryManager, arg0) memmgrAllocateConsoleRegion_IMPL(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocateConsoleRegion_HAL(pGpu, pMemoryManager, arg0) memmgrAllocateConsoleRegion(pGpu, pMemoryManager, arg0) + +NV_STATUS memmgrGetKindComprForGpu_KERNEL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *arg0, OBJGPU *pGpu, NvU64 offset, NvU32 *kind, COMPR_INFO *pComprInfo); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetKindComprForGpu(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *arg0, OBJGPU *pGpu, NvU64 offset, NvU32 *kind, COMPR_INFO *pComprInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetKindComprForGpu(pMemoryManager, arg0, pGpu, offset, kind, pComprInfo) memmgrGetKindComprForGpu_KERNEL(pMemoryManager, arg0, pGpu, offset, kind, pComprInfo) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetKindComprForGpu_HAL(pMemoryManager, arg0, pGpu, offset, kind, pComprInfo) memmgrGetKindComprForGpu(pMemoryManager, arg0, pGpu, offset, kind, pComprInfo) + +NV_STATUS memmgrScrubInit_GP100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubInit(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubInit(pGpu, pMemoryManager) memmgrScrubInit_GP100(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubInit_HAL(pGpu, pMemoryManager) memmgrScrubInit(pGpu, pMemoryManager) + +NV_STATUS memmgrScrubHandlePostSchedulingEnable_GP100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubHandlePostSchedulingEnable(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubHandlePostSchedulingEnable(pGpu, pMemoryManager) memmgrScrubHandlePostSchedulingEnable_GP100(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubHandlePostSchedulingEnable_HAL(pGpu, pMemoryManager) memmgrScrubHandlePostSchedulingEnable(pGpu, pMemoryManager) + +static inline void memmgrGetScrubState_f2d351(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0, NvU64 *arg1, NvBool *arg2) { + NV_ASSERT_PRECOMP(0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetScrubState(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0, NvU64 *arg1, NvBool *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetScrubState(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrGetScrubState_f2d351(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetScrubState_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrGetScrubState(pGpu, pMemoryManager, arg0, arg1, arg2) + +static inline void memmgrScrubInternalRegions_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubInternalRegions(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubInternalRegions(pGpu, pMemoryManager) memmgrScrubInternalRegions_b3696a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubInternalRegions_HAL(pGpu, pMemoryManager) memmgrScrubInternalRegions(pGpu, pMemoryManager) + +static inline NvBool memmgrEccScrubInProgress_491d52(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrEccScrubInProgress(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrEccScrubInProgress(pGpu, pMemoryManager) memmgrEccScrubInProgress_491d52(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrEccScrubInProgress_HAL(pGpu, pMemoryManager) memmgrEccScrubInProgress(pGpu, pMemoryManager) + +static inline void memmgrAsyncScrubRegion_f2d351(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg0, NvU64 arg1) { + NV_ASSERT_PRECOMP(0); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrAsyncScrubRegion(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg0, NvU64 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAsyncScrubRegion(pGpu, pMemoryManager, arg0, arg1) memmgrAsyncScrubRegion_f2d351(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAsyncScrubRegion_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrAsyncScrubRegion(pGpu, pMemoryManager, arg0, arg1) + +NV_STATUS memmgrScrubHandlePreSchedulingDisable_GP100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubHandlePreSchedulingDisable(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubHandlePreSchedulingDisable(pGpu, pMemoryManager) memmgrScrubHandlePreSchedulingDisable_GP100(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubHandlePreSchedulingDisable_HAL(pGpu, pMemoryManager) memmgrScrubHandlePreSchedulingDisable(pGpu, pMemoryManager) + +void memmgrScrubDestroy_GP100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubDestroy(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubDestroy(pGpu, pMemoryManager) memmgrScrubDestroy_GP100(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubDestroy_HAL(pGpu, pMemoryManager) memmgrScrubDestroy(pGpu, pMemoryManager) + +static inline void memmgrScrubMemory_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg0, NvU64 arg1) { + return; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrScrubMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg0, NvU64 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubMemory(pGpu, pMemoryManager, arg0, arg1) memmgrScrubMemory_b3696a(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubMemory_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrScrubMemory(pGpu, pMemoryManager, arg0, arg1) + +NV_STATUS memmgrMemUtilsMemSetBlocking_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsMemSetBlocking(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsMemSetBlocking(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrMemUtilsMemSetBlocking_GM107(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsMemSetBlocking_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrMemUtilsMemSetBlocking(pGpu, pMemoryManager, arg0, arg1, arg2) + +NV_STATUS memmgrMemUtilsMemSet_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2, NvU32 arg3, NvU32 *arg4); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsMemSet(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2, NvU32 arg3, NvU32 *arg4) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsMemSet(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4) memmgrMemUtilsMemSet_GM107(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsMemSet_HAL(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4) memmgrMemUtilsMemSet(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4) + +NV_STATUS memmgrMemUtilsMemSetBatched_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsMemSetBatched(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NvU64 arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsMemSetBatched(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrMemUtilsMemSetBatched_GM107(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsMemSetBatched_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrMemUtilsMemSetBatched(pGpu, pMemoryManager, arg0, arg1, arg2) + +NV_STATUS memmgrMemUtilsMemCopyBatched_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NV_ADDRESS_SPACE arg2, NvU32 arg3, RmPhysAddr arg4, NV_ADDRESS_SPACE arg5, NvU32 arg6, NvU64 arg7); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsMemCopyBatched(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, RmPhysAddr arg1, NV_ADDRESS_SPACE arg2, NvU32 arg3, RmPhysAddr arg4, NV_ADDRESS_SPACE arg5, NvU32 arg6, NvU64 arg7) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsMemCopyBatched(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) memmgrMemUtilsMemCopyBatched_GM107(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsMemCopyBatched_HAL(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) memmgrMemUtilsMemCopyBatched(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + +NV_STATUS memmgrMemUtilsAllocateEccScrubber_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsAllocateEccScrubber(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsAllocateEccScrubber(pGpu, pMemoryManager, arg0) memmgrMemUtilsAllocateEccScrubber_GM107(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsAllocateEccScrubber_HAL(pGpu, pMemoryManager, arg0) memmgrMemUtilsAllocateEccScrubber(pGpu, pMemoryManager, arg0) + +NV_STATUS memmgrMemUtilsAllocateEccAllocScrubber_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsAllocateEccAllocScrubber(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsAllocateEccAllocScrubber(pGpu, pMemoryManager, arg0) memmgrMemUtilsAllocateEccAllocScrubber_GM107(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsAllocateEccAllocScrubber_HAL(pGpu, pMemoryManager, arg0) memmgrMemUtilsAllocateEccAllocScrubber(pGpu, pMemoryManager, arg0) + +NV_STATUS memmgrMemUtilsChannelInitialize_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsChannelInitialize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsChannelInitialize(pGpu, pMemoryManager, arg0) memmgrMemUtilsChannelInitialize_GM107(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, arg0) memmgrMemUtilsChannelInitialize(pGpu, pMemoryManager, arg0) + +NV_STATUS memmgrMemUtilsCopyEngineInitialize_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsCopyEngineInitialize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsCopyEngineInitialize(pGpu, pMemoryManager, arg0) memmgrMemUtilsCopyEngineInitialize_GM107(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsCopyEngineInitialize_HAL(pGpu, pMemoryManager, arg0) memmgrMemUtilsCopyEngineInitialize(pGpu, pMemoryManager, arg0) + +NV_STATUS memmgrMemUtilsGetCopyEngineClass_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pClass); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsGetCopyEngineClass(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pClass) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsGetCopyEngineClass(pGpu, pMemoryManager, pClass) memmgrMemUtilsGetCopyEngineClass_GM107(pGpu, pMemoryManager, pClass) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsGetCopyEngineClass_HAL(pGpu, pMemoryManager, pClass) memmgrMemUtilsGetCopyEngineClass(pGpu, pMemoryManager, pClass) + +NV_STATUS memmgrMemUtilsCreateMemoryAlias_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemUtilsCreateMemoryAlias(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsCreateMemoryAlias(pGpu, pMemoryManager, arg0) memmgrMemUtilsCreateMemoryAlias_GM107(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrMemUtilsCreateMemoryAlias_HAL(pGpu, pMemoryManager, arg0) memmgrMemUtilsCreateMemoryAlias(pGpu, pMemoryManager, arg0) + +NV_STATUS memmgrAllocHal_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocHal(pGpu, pMemoryManager, pFbAllocInfo) memmgrAllocHal_GM107(pGpu, pMemoryManager, pFbAllocInfo) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocHal_HAL(pGpu, pMemoryManager, pFbAllocInfo) memmgrAllocHal(pGpu, pMemoryManager, pFbAllocInfo) + +NV_STATUS memmgrFreeHal_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo, PRMTIMEOUT pTimeout); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFreeHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo, PRMTIMEOUT pTimeout) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeHal(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) memmgrFreeHal_GM107(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrFreeHal_HAL(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) memmgrFreeHal(pGpu, pMemoryManager, pFbAllocInfo, pTimeout) + +static inline NV_STATUS memmgrUpdateSurfaceCompression_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, Memory *arg0, NvBool arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrUpdateSurfaceCompression(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, Memory *arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrUpdateSurfaceCompression(pGpu, pMemoryManager, arg0, arg1) memmgrUpdateSurfaceCompression_5baef9(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrUpdateSurfaceCompression_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrUpdateSurfaceCompression(pGpu, pMemoryManager, arg0, arg1) + +NV_STATUS memmgrGetBankPlacementData_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pBankPlacementLowData); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBankPlacementData(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *pBankPlacementLowData) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBankPlacementData(pGpu, pMemoryManager, pBankPlacementLowData) memmgrGetBankPlacementData_GM107(pGpu, pMemoryManager, pBankPlacementLowData) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBankPlacementData_HAL(pGpu, pMemoryManager, pBankPlacementLowData) memmgrGetBankPlacementData(pGpu, pMemoryManager, pBankPlacementLowData) + +void memmgrDirtyForPmTest_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool partialDirty); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrDirtyForPmTest(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool partialDirty) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDirtyForPmTest(pGpu, pMemoryManager, partialDirty) memmgrDirtyForPmTest_GM107(pGpu, pMemoryManager, partialDirty) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDirtyForPmTest_HAL(pGpu, pMemoryManager, partialDirty) memmgrDirtyForPmTest(pGpu, pMemoryManager, partialDirty) + +NvU32 memmgrGetReservedHeapSizeMb_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetReservedHeapSizeMb(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetReservedHeapSizeMb(pGpu, pMemoryManager) memmgrGetReservedHeapSizeMb_GM107(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetReservedHeapSizeMb_HAL(pGpu, pMemoryManager) memmgrGetReservedHeapSizeMb(pGpu, pMemoryManager) + +static inline NV_STATUS memmgrInitFbRegionsHal_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitFbRegionsHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitFbRegionsHal(pGpu, pMemoryManager) memmgrInitFbRegionsHal_56cd7a(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInitFbRegionsHal_HAL(pGpu, pMemoryManager) memmgrInitFbRegionsHal(pGpu, pMemoryManager) + +void memmgrHandleSizeOverrides_GP100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrHandleSizeOverrides(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrHandleSizeOverrides(pGpu, pMemoryManager) memmgrHandleSizeOverrides_GP100(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrHandleSizeOverrides_HAL(pGpu, pMemoryManager) memmgrHandleSizeOverrides(pGpu, pMemoryManager) + +NV_STATUS memmgrFinishHandleSizeOverrides_GP100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFinishHandleSizeOverrides(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFinishHandleSizeOverrides(pGpu, pMemoryManager) memmgrFinishHandleSizeOverrides_GP100(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrFinishHandleSizeOverrides_HAL(pGpu, pMemoryManager) memmgrFinishHandleSizeOverrides(pGpu, pMemoryManager) + +NV_STATUS memmgrGetBAR1InfoForClient_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvHandle arg0, PGETBAR1INFO bar1Info); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBAR1InfoForClient(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvHandle arg0, PGETBAR1INFO bar1Info) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBAR1InfoForClient(pGpu, pMemoryManager, arg0, bar1Info) memmgrGetBAR1InfoForClient_GM107(pGpu, pMemoryManager, arg0, bar1Info) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBAR1InfoForClient_HAL(pGpu, pMemoryManager, arg0, bar1Info) memmgrGetBAR1InfoForClient(pGpu, pMemoryManager, arg0, bar1Info) + +static inline NvU64 memmgrGetFbTaxSize_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetFbTaxSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetFbTaxSize(pGpu, pMemoryManager) memmgrGetFbTaxSize_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetFbTaxSize_HAL(pGpu, pMemoryManager) memmgrGetFbTaxSize(pGpu, pMemoryManager) + +static inline NvU64 memmgrGetVgpuHostRmReservedFb_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 vgpuTypeId) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetVgpuHostRmReservedFb(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 vgpuTypeId) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetVgpuHostRmReservedFb(pGpu, pMemoryManager, vgpuTypeId) memmgrGetVgpuHostRmReservedFb_4a4dee(pGpu, pMemoryManager, vgpuTypeId) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetVgpuHostRmReservedFb_HAL(pGpu, pMemoryManager, vgpuTypeId) memmgrGetVgpuHostRmReservedFb(pGpu, pMemoryManager, vgpuTypeId) + +NvU64 memmgrGetRsvdSizeForSr_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU64 memmgrGetRsvdSizeForSr(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetRsvdSizeForSr(pGpu, pMemoryManager) memmgrGetRsvdSizeForSr_GM107(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetRsvdSizeForSr_HAL(pGpu, pMemoryManager) memmgrGetRsvdSizeForSr(pGpu, pMemoryManager) + +static inline NvBool memmgrVerifyDepthSurfaceAttrs_cbe027(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 arg1) { + return ((NvBool)(0 == 0)); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrVerifyDepthSurfaceAttrs(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrVerifyDepthSurfaceAttrs(pMemoryManager, arg0, arg1) memmgrVerifyDepthSurfaceAttrs_cbe027(pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrVerifyDepthSurfaceAttrs_HAL(pMemoryManager, arg0, arg1) memmgrVerifyDepthSurfaceAttrs(pMemoryManager, arg0, arg1) + +static inline NV_STATUS memmgrAllocMemToSaveVgaWorkspace_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR **arg0, MEMORY_DESCRIPTOR **arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocMemToSaveVgaWorkspace(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR **arg0, MEMORY_DESCRIPTOR **arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocMemToSaveVgaWorkspace(pGpu, pMemoryManager, arg0, arg1) memmgrAllocMemToSaveVgaWorkspace_5baef9(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocMemToSaveVgaWorkspace_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrAllocMemToSaveVgaWorkspace(pGpu, pMemoryManager, arg0, arg1) + +NvBool memmgrComparePhysicalAddresses_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0, NvU64 arg1, NvU32 arg2, NvU64 arg3); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrComparePhysicalAddresses(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0, NvU64 arg1, NvU32 arg2, NvU64 arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrComparePhysicalAddresses(pGpu, pMemoryManager, arg0, arg1, arg2, arg3) memmgrComparePhysicalAddresses_GM107(pGpu, pMemoryManager, arg0, arg1, arg2, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrComparePhysicalAddresses_HAL(pGpu, pMemoryManager, arg0, arg1, arg2, arg3) memmgrComparePhysicalAddresses(pGpu, pMemoryManager, arg0, arg1, arg2, arg3) + +RmPhysAddr memmgrGetInvalidOffset_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline RmPhysAddr memmgrGetInvalidOffset(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + RmPhysAddr ret; + portMemSet(&ret, 0, sizeof(RmPhysAddr)); + return ret; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetInvalidOffset(pGpu, pMemoryManager) memmgrGetInvalidOffset_GM107(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetInvalidOffset_HAL(pGpu, pMemoryManager) memmgrGetInvalidOffset(pGpu, pMemoryManager) + +NvU32 memmgrGetAddrSpaceSizeMB_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetAddrSpaceSizeMB(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetAddrSpaceSizeMB(pGpu, pMemoryManager) memmgrGetAddrSpaceSizeMB_GM107(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetAddrSpaceSizeMB_HAL(pGpu, pMemoryManager) memmgrGetAddrSpaceSizeMB(pGpu, pMemoryManager) + +NvU32 memmgrGetUsableMemSizeMB_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetUsableMemSizeMB(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUsableMemSizeMB(pGpu, pMemoryManager) memmgrGetUsableMemSizeMB_GM107(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUsableMemSizeMB_HAL(pGpu, pMemoryManager) memmgrGetUsableMemSizeMB(pGpu, pMemoryManager) + +NV_STATUS memmgrGetSurfacePhysAttr_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, Memory *pMemory, NvU64 *pOffset, NvU32 *pMemAperture, NvU32 *pMemKind, NvU32 *pComprOffset, NvU32 *pComprKind, NvU32 *pLineMin, NvU32 *pLineMax, NvU32 *pZCullId, NvU32 *pGpuCacheAttr, NvU32 *pGpuP2PCacheAttr, NvU64 *contigSegmentSize); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetSurfacePhysAttr(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, Memory *pMemory, NvU64 *pOffset, NvU32 *pMemAperture, NvU32 *pMemKind, NvU32 *pComprOffset, NvU32 *pComprKind, NvU32 *pLineMin, NvU32 *pLineMax, NvU32 *pZCullId, NvU32 *pGpuCacheAttr, NvU32 *pGpuP2PCacheAttr, NvU64 *contigSegmentSize) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetSurfacePhysAttr(pGpu, pMemoryManager, pMemory, pOffset, pMemAperture, pMemKind, pComprOffset, pComprKind, pLineMin, pLineMax, pZCullId, pGpuCacheAttr, pGpuP2PCacheAttr, contigSegmentSize) memmgrGetSurfacePhysAttr_GM107(pGpu, pMemoryManager, pMemory, pOffset, pMemAperture, pMemKind, pComprOffset, pComprKind, pLineMin, pLineMax, pZCullId, pGpuCacheAttr, pGpuP2PCacheAttr, contigSegmentSize) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetSurfacePhysAttr_HAL(pGpu, pMemoryManager, pMemory, pOffset, pMemAperture, pMemKind, pComprOffset, pComprKind, pLineMin, pLineMax, pZCullId, pGpuCacheAttr, pGpuP2PCacheAttr, contigSegmentSize) memmgrGetSurfacePhysAttr(pGpu, pMemoryManager, pMemory, pOffset, pMemAperture, pMemKind, pComprOffset, pComprKind, pLineMin, pLineMax, pZCullId, pGpuCacheAttr, pGpuP2PCacheAttr, contigSegmentSize) + +static inline NvBool memmgrVerifyComprAttrs_cbe027(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 arg1, NvU32 arg2) { + return ((NvBool)(0 == 0)); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrVerifyComprAttrs(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrVerifyComprAttrs(pMemoryManager, arg0, arg1, arg2) memmgrVerifyComprAttrs_cbe027(pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrVerifyComprAttrs_HAL(pMemoryManager, arg0, arg1, arg2) memmgrVerifyComprAttrs(pMemoryManager, arg0, arg1, arg2) + +NvBool memmgrIsKindCompressible_TU102(struct MemoryManager *pMemoryManager, NvU32 arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsKindCompressible(struct MemoryManager *pMemoryManager, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsKindCompressible(pMemoryManager, arg0) memmgrIsKindCompressible_TU102(pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsKindCompressible_HAL(pMemoryManager, arg0) memmgrIsKindCompressible(pMemoryManager, arg0) + +static inline NvBool memmgrIsKindBlocklinear_491d52(struct MemoryManager *pMemoryManager, NvU32 arg0) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsKindBlocklinear(struct MemoryManager *pMemoryManager, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsKindBlocklinear(pMemoryManager, arg0) memmgrIsKindBlocklinear_491d52(pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsKindBlocklinear_HAL(pMemoryManager, arg0) memmgrIsKindBlocklinear(pMemoryManager, arg0) + +NvU32 memmgrGetPteKindBl_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetPteKindBl(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetPteKindBl(pGpu, pMemoryManager) memmgrGetPteKindBl_GM107(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetPteKindBl_HAL(pGpu, pMemoryManager) memmgrGetPteKindBl(pGpu, pMemoryManager) + +NvU32 memmgrGetPteKindPitch_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetPteKindPitch(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetPteKindPitch(pGpu, pMemoryManager) memmgrGetPteKindPitch_GM107(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetPteKindPitch_HAL(pGpu, pMemoryManager) memmgrGetPteKindPitch(pGpu, pMemoryManager) + +NvU32 memmgrChooseKindZ_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindZ(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindZ(pGpu, pMemoryManager, arg0) memmgrChooseKindZ_TU102(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindZ_HAL(pGpu, pMemoryManager, arg0) memmgrChooseKindZ(pGpu, pMemoryManager, arg0) + +NvU32 memmgrChooseKindCompressZ_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindCompressZ(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindCompressZ(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressZ_TU102(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindCompressZ_HAL(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressZ(pGpu, pMemoryManager, arg0) + +NvU32 memmgrChooseKindCompressC_GP100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindCompressC(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindCompressC(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressC_GP100(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindCompressC_HAL(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressC(pGpu, pMemoryManager, arg0) + +static inline NvU32 memmgrChooseKindCompressCForMS2_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrChooseKindCompressCForMS2(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKindCompressCForMS2(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressCForMS2_4a4dee(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKindCompressCForMS2_HAL(pGpu, pMemoryManager, arg0) memmgrChooseKindCompressCForMS2(pGpu, pMemoryManager, arg0) + +NvU32 memmgrGetUncompressedKind_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 kind, NvBool releaseReacquire); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetUncompressedKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 kind, NvBool releaseReacquire) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUncompressedKind(pGpu, pMemoryManager, kind, releaseReacquire) memmgrGetUncompressedKind_TU102(pGpu, pMemoryManager, kind, releaseReacquire) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUncompressedKind_HAL(pGpu, pMemoryManager, kind, releaseReacquire) memmgrGetUncompressedKind(pGpu, pMemoryManager, kind, releaseReacquire) + +static inline NV_STATUS memmgrGetUncompressedKindForMS2_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetUncompressedKindForMS2(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUncompressedKindForMS2(pGpu, pMemoryManager, arg0, arg1) memmgrGetUncompressedKindForMS2_5baef9(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUncompressedKindForMS2_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrGetUncompressedKindForMS2(pGpu, pMemoryManager, arg0, arg1) + +NV_STATUS memmgrChooseKind_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0, NvU32 arg1, NvU32 *arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrChooseKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *arg0, NvU32 arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChooseKind(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrChooseKind_TU102(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChooseKind_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrChooseKind(pGpu, pMemoryManager, arg0, arg1, arg2) + +NvBool memmgrIsKind_TU102(struct MemoryManager *pMemoryManager, FB_IS_KIND_OP arg0, NvU32 arg1); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsKind(struct MemoryManager *pMemoryManager, FB_IS_KIND_OP arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsKind(pMemoryManager, arg0, arg1) memmgrIsKind_TU102(pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsKind_HAL(pMemoryManager, arg0, arg1) memmgrIsKind(pMemoryManager, arg0, arg1) + +NvU32 memmgrGetMessageKind_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetMessageKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMessageKind(pGpu, pMemoryManager) memmgrGetMessageKind_TU102(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetMessageKind_HAL(pGpu, pMemoryManager) memmgrGetMessageKind(pGpu, pMemoryManager) + +NvU32 memmgrGetDefaultPteKindForNoHandle_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetDefaultPteKindForNoHandle(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetDefaultPteKindForNoHandle(pGpu, pMemoryManager) memmgrGetDefaultPteKindForNoHandle_TU102(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetDefaultPteKindForNoHandle_HAL(pGpu, pMemoryManager) memmgrGetDefaultPteKindForNoHandle(pGpu, pMemoryManager) + +NvBool memmgrIsSurfaceBlockLinear_TU102(struct MemoryManager *pMemoryManager, Memory *arg0, NvU32 arg1, NvU32 arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsSurfaceBlockLinear(struct MemoryManager *pMemoryManager, Memory *arg0, NvU32 arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsSurfaceBlockLinear(pMemoryManager, arg0, arg1, arg2) memmgrIsSurfaceBlockLinear_TU102(pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrIsSurfaceBlockLinear_HAL(pMemoryManager, arg0, arg1, arg2) memmgrIsSurfaceBlockLinear(pMemoryManager, arg0, arg1, arg2) + +NvU32 memmgrGetHwPteKindFromSwPteKind_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetHwPteKindFromSwPteKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetHwPteKindFromSwPteKind(pGpu, pMemoryManager, pteKind) memmgrGetHwPteKindFromSwPteKind_TU102(pGpu, pMemoryManager, pteKind) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, pteKind) memmgrGetHwPteKindFromSwPteKind(pGpu, pMemoryManager, pteKind) + +NvU32 memmgrGetSwPteKindFromHwPteKind_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetSwPteKindFromHwPteKind(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 pteKind) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetSwPteKindFromHwPteKind(pGpu, pMemoryManager, pteKind) memmgrGetSwPteKindFromHwPteKind_TU102(pGpu, pMemoryManager, pteKind) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetSwPteKindFromHwPteKind_HAL(pGpu, pMemoryManager, pteKind) memmgrGetSwPteKindFromHwPteKind(pGpu, pMemoryManager, pteKind) + +void memmgrGetPteKindForScrubber_TU102(struct MemoryManager *pMemoryManager, NvU32 *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetPteKindForScrubber(struct MemoryManager *pMemoryManager, NvU32 *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetPteKindForScrubber(pMemoryManager, arg0) memmgrGetPteKindForScrubber_TU102(pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetPteKindForScrubber_HAL(pMemoryManager, arg0) memmgrGetPteKindForScrubber(pMemoryManager, arg0) + +NvU32 memmgrGetCtagOffsetFromParams_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetCtagOffsetFromParams(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetCtagOffsetFromParams(pGpu, pMemoryManager, arg0) memmgrGetCtagOffsetFromParams_TU102(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetCtagOffsetFromParams_HAL(pGpu, pMemoryManager, arg0) memmgrGetCtagOffsetFromParams(pGpu, pMemoryManager, arg0) + +void memmgrSetCtagOffsetInParams_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0, NvU32 arg1); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrSetCtagOffsetInParams(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetCtagOffsetInParams(pGpu, pMemoryManager, arg0, arg1) memmgrSetCtagOffsetInParams_TU102(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetCtagOffsetInParams_HAL(pGpu, pMemoryManager, arg0, arg1) memmgrSetCtagOffsetInParams(pGpu, pMemoryManager, arg0, arg1) + +void memmgrChannelPushSemaphoreMethodsBlock_GP100(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU64 arg1, NvU32 arg2, NvU32 **arg3); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrChannelPushSemaphoreMethodsBlock(struct MemoryManager *pMemoryManager, NvU32 arg0, NvU64 arg1, NvU32 arg2, NvU32 **arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChannelPushSemaphoreMethodsBlock(pMemoryManager, arg0, arg1, arg2, arg3) memmgrChannelPushSemaphoreMethodsBlock_GP100(pMemoryManager, arg0, arg1, arg2, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChannelPushSemaphoreMethodsBlock_HAL(pMemoryManager, arg0, arg1, arg2, arg3) memmgrChannelPushSemaphoreMethodsBlock(pMemoryManager, arg0, arg1, arg2, arg3) + +void memmgrChannelPushAddressMethodsBlock_GP100(struct MemoryManager *pMemoryManager, NvBool arg0, NvU32 arg1, RmPhysAddr arg2, NvU32 **arg3); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrChannelPushAddressMethodsBlock(struct MemoryManager *pMemoryManager, NvBool arg0, NvU32 arg1, RmPhysAddr arg2, NvU32 **arg3) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrChannelPushAddressMethodsBlock(pMemoryManager, arg0, arg1, arg2, arg3) memmgrChannelPushAddressMethodsBlock_GP100(pMemoryManager, arg0, arg1, arg2, arg3) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrChannelPushAddressMethodsBlock_HAL(pMemoryManager, arg0, arg1, arg2, arg3) memmgrChannelPushAddressMethodsBlock(pMemoryManager, arg0, arg1, arg2, arg3) + +NV_STATUS memmgrScrubMapDoorbellRegion_GV100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrScrubMapDoorbellRegion(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrScrubMapDoorbellRegion(pGpu, pMemoryManager, arg0) memmgrScrubMapDoorbellRegion_GV100(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrScrubMapDoorbellRegion_HAL(pGpu, pMemoryManager, arg0) memmgrScrubMapDoorbellRegion(pGpu, pMemoryManager, arg0) + +NV_STATUS memmgrSetAllocParameters_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetAllocParameters(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetAllocParameters(pGpu, pMemoryManager, pFbAllocInfo) memmgrSetAllocParameters_GM107(pGpu, pMemoryManager, pFbAllocInfo) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetAllocParameters_HAL(pGpu, pMemoryManager, pFbAllocInfo) memmgrSetAllocParameters(pGpu, pMemoryManager, pFbAllocInfo) + +void memmgrCalcReservedFbSpaceForUVM_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrCalcReservedFbSpaceForUVM(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCalcReservedFbSpaceForUVM(pGpu, pMemoryManager, arg0) memmgrCalcReservedFbSpaceForUVM_GM107(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrCalcReservedFbSpaceForUVM_HAL(pGpu, pMemoryManager, arg0) memmgrCalcReservedFbSpaceForUVM(pGpu, pMemoryManager, arg0) + +void memmgrCalcReservedFbSpaceHal_FWCLIENT(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0, NvU64 *arg1, NvU64 *arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrCalcReservedFbSpaceHal(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0, NvU64 *arg1, NvU64 *arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCalcReservedFbSpaceHal(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrCalcReservedFbSpaceHal_FWCLIENT(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrCalcReservedFbSpaceHal_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrCalcReservedFbSpaceHal(pGpu, pMemoryManager, arg0, arg1, arg2) + +static inline NvU32 memmgrGetGrHeapReservationSize_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetGrHeapReservationSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetGrHeapReservationSize(pGpu, pMemoryManager) memmgrGetGrHeapReservationSize_4a4dee(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetGrHeapReservationSize_HAL(pGpu, pMemoryManager) memmgrGetGrHeapReservationSize(pGpu, pMemoryManager) + +NvU32 memmgrGetRunlistEntriesReservedFbSpace_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetRunlistEntriesReservedFbSpace(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetRunlistEntriesReservedFbSpace(pGpu, pMemoryManager) memmgrGetRunlistEntriesReservedFbSpace_GM107(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetRunlistEntriesReservedFbSpace_HAL(pGpu, pMemoryManager) memmgrGetRunlistEntriesReservedFbSpace(pGpu, pMemoryManager) + +NvU32 memmgrGetUserdReservedFbSpace_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetUserdReservedFbSpace(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUserdReservedFbSpace(pGpu, pMemoryManager) memmgrGetUserdReservedFbSpace_GM107(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetUserdReservedFbSpace_HAL(pGpu, pMemoryManager) memmgrGetUserdReservedFbSpace(pGpu, pMemoryManager) + +NV_STATUS memmgrInitReservedMemory_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg0); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitReservedMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitReservedMemory(pGpu, pMemoryManager, arg0) memmgrInitReservedMemory_GM107(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInitReservedMemory_HAL(pGpu, pMemoryManager, arg0) memmgrInitReservedMemory(pGpu, pMemoryManager, arg0) + +NV_STATUS memmgrPreInitReservedMemory_FWCLIENT(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPreInitReservedMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPreInitReservedMemory(pGpu, pMemoryManager) memmgrPreInitReservedMemory_FWCLIENT(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrPreInitReservedMemory_HAL(pGpu, pMemoryManager) memmgrPreInitReservedMemory(pGpu, pMemoryManager) + +NV_STATUS memmgrInitBaseFbRegions_FWCLIENT(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitBaseFbRegions(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitBaseFbRegions(pGpu, pMemoryManager) memmgrInitBaseFbRegions_FWCLIENT(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrInitBaseFbRegions_HAL(pGpu, pMemoryManager) memmgrInitBaseFbRegions(pGpu, pMemoryManager) + +NV_STATUS memmgrSetMemDescPageSize_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PMEMORY_DESCRIPTOR arg0, ADDRESS_TRANSLATION arg1, RM_ATTR_PAGE_SIZE arg2); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetMemDescPageSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PMEMORY_DESCRIPTOR arg0, ADDRESS_TRANSLATION arg1, RM_ATTR_PAGE_SIZE arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetMemDescPageSize(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrSetMemDescPageSize_GM107(pGpu, pMemoryManager, arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, arg0, arg1, arg2) memmgrSetMemDescPageSize(pGpu, pMemoryManager, arg0, arg1, arg2) + +NV_STATUS memmgrSetPartitionableMem_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetPartitionableMem(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetPartitionableMem(pGpu, pMemoryManager) memmgrSetPartitionableMem_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrSetPartitionableMem_HAL(pGpu, pMemoryManager) memmgrSetPartitionableMem(pGpu, pMemoryManager) + +NV_STATUS memmgrAllocMIGGPUInstanceMemory_PF(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle *phMemory, struct NV_RANGE *pAddrRange, struct Heap **ppMemoryPartitionHeap); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocMIGGPUInstanceMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle *phMemory, struct NV_RANGE *pAddrRange, struct Heap **ppMemoryPartitionHeap) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocMIGGPUInstanceMemory(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) memmgrAllocMIGGPUInstanceMemory_PF(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrAllocMIGGPUInstanceMemory_HAL(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) memmgrAllocMIGGPUInstanceMemory(pGpu, pMemoryManager, swizzId, phMemory, pAddrRange, ppMemoryPartitionHeap) + +NV_STATUS memmgrGetBlackListPagesForHeap_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *pHeap); + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetBlackListPagesForHeap(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *pHeap) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetBlackListPagesForHeap(pGpu, pMemoryManager, pHeap) memmgrGetBlackListPagesForHeap_GM107(pGpu, pMemoryManager, pHeap) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrGetBlackListPagesForHeap_HAL(pGpu, pMemoryManager, pHeap) memmgrGetBlackListPagesForHeap(pGpu, pMemoryManager, pHeap) + +static inline NV_STATUS memmgrDiscoverMIGPartitionableMemoryRange_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct NV_RANGE *pMemoryRange) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrDiscoverMIGPartitionableMemoryRange(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct NV_RANGE *pMemoryRange) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDiscoverMIGPartitionableMemoryRange(pGpu, pMemoryManager, pMemoryRange) memmgrDiscoverMIGPartitionableMemoryRange_46f6a7(pGpu, pMemoryManager, pMemoryRange) +#endif //__nvoc_mem_mgr_h_disabled + +#define memmgrDiscoverMIGPartitionableMemoryRange_HAL(pGpu, pMemoryManager, pMemoryRange) memmgrDiscoverMIGPartitionableMemoryRange(pGpu, pMemoryManager, pMemoryRange) + +NV_STATUS memmgrConstructEngine_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, ENGDESCRIPTOR arg0); + +static inline NV_STATUS memmgrConstructEngine_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, ENGDESCRIPTOR arg0) { + return pMemoryManager->__memmgrConstructEngine__(pGpu, pMemoryManager, arg0); +} + +NV_STATUS memmgrStatePreInitLocked_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +static inline NV_STATUS memmgrStatePreInitLocked_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return pMemoryManager->__memmgrStatePreInitLocked__(pGpu, pMemoryManager); +} + +NV_STATUS memmgrStateInitLocked_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +static inline NV_STATUS memmgrStateInitLocked_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return pMemoryManager->__memmgrStateInitLocked__(pGpu, pMemoryManager); +} + +NV_STATUS memmgrStateLoad_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0); + +static inline NV_STATUS memmgrStateLoad_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0) { + return pMemoryManager->__memmgrStateLoad__(pGpu, pMemoryManager, arg0); +} + +NV_STATUS memmgrStatePreUnload_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0); + +static inline NV_STATUS memmgrStatePreUnload_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0) { + return pMemoryManager->__memmgrStatePreUnload__(pGpu, pMemoryManager, arg0); +} + +NV_STATUS memmgrStateUnload_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0); + +static inline NV_STATUS memmgrStateUnload_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0) { + return pMemoryManager->__memmgrStateUnload__(pGpu, pMemoryManager, arg0); +} + +void memmgrStateDestroy_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +static inline void memmgrStateDestroy_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + pMemoryManager->__memmgrStateDestroy__(pGpu, pMemoryManager); +} + +NV_STATUS memmgrAllocDetermineAlignment_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pMemSize, NvU64 *pAlign, NvU64 alignPad, NvU32 allocFlags, NvU32 retAttr, NvU32 retAttr2, NvU64 hwAlignment); + +NV_STATUS memmgrAllocDetermineAlignment_GA100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pMemSize, NvU64 *pAlign, NvU64 alignPad, NvU32 allocFlags, NvU32 retAttr, NvU32 retAttr2, NvU64 hwAlignment); + +static inline NV_STATUS memmgrAllocDetermineAlignment_5baef9(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pMemSize, NvU64 *pAlign, NvU64 alignPad, NvU32 allocFlags, NvU32 retAttr, NvU32 retAttr2, NvU64 hwAlignment) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS memmgrAllocDetermineAlignment_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pMemSize, NvU64 *pAlign, NvU64 alignPad, NvU32 allocFlags, NvU32 retAttr, NvU32 retAttr2, NvU64 hwAlignment) { + return NV_OK; +} + +static inline NV_STATUS memmgrAllocDetermineAlignment_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pMemSize, NvU64 *pAlign, NvU64 alignPad, NvU32 allocFlags, NvU32 retAttr, NvU32 retAttr2, NvU64 hwAlignment) { + return pMemoryManager->__memmgrAllocDetermineAlignment__(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, allocFlags, retAttr, retAttr2, hwAlignment); +} + +NvU64 memmgrGetMaxContextSize_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +NvU64 memmgrGetMaxContextSize_GA100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +static inline NvU64 memmgrGetMaxContextSize_4a4dee(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return 0; +} + +static inline NvU64 memmgrGetMaxContextSize_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return pMemoryManager->__memmgrGetMaxContextSize__(pGpu, pMemoryManager); +} + +void memmgrScrubRegistryOverrides_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +void memmgrScrubRegistryOverrides_GA100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +static inline void memmgrScrubRegistryOverrides_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + +static inline void memmgrScrubRegistryOverrides_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + pMemoryManager->__memmgrScrubRegistryOverrides__(pGpu, pMemoryManager); +} + +NV_STATUS memmgrGetFlaKind_GA100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *arg0); + +static inline NV_STATUS memmgrGetFlaKind_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *arg0) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS memmgrGetFlaKind_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 *arg0) { + return pMemoryManager->__memmgrGetFlaKind__(pGpu, pMemoryManager, arg0); +} + +NvU32 memmgrDetermineComptag_TU102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg0); + +static inline NvU32 memmgrDetermineComptag_13cd8d(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg0) { + NV_ASSERT_PRECOMP(0); + return 0; +} + +static inline NvU32 memmgrDetermineComptag_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr arg0) { + return pMemoryManager->__memmgrDetermineComptag__(pGpu, pMemoryManager, arg0); +} + +NV_STATUS memmgrCheckReservedMemorySize_GK104(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +static inline NV_STATUS memmgrCheckReservedMemorySize_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +static inline NV_STATUS memmgrCheckReservedMemorySize_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return pMemoryManager->__memmgrCheckReservedMemorySize__(pGpu, pMemoryManager); +} + +NV_STATUS memmgrReadMmuLock_GA100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool *pbIsValid, NvU64 *pMmuLockLo, NvU64 *pMmuLockHi); + +static inline NV_STATUS memmgrReadMmuLock_e133c0(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool *pbIsValid, NvU64 *pMmuLockLo, NvU64 *pMmuLockHi) { + *pbIsValid = ((NvBool)(0 != 0)); + return NV_OK; +} + +static inline NV_STATUS memmgrReadMmuLock_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvBool *pbIsValid, NvU64 *pMmuLockLo, NvU64 *pMmuLockHi) { + return pMemoryManager->__memmgrReadMmuLock__(pGpu, pMemoryManager, pbIsValid, pMmuLockLo, pMmuLockHi); +} + +NV_STATUS memmgrBlockMemLockedMemory_GA100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +static inline NV_STATUS memmgrBlockMemLockedMemory_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return NV_OK; +} + +static inline NV_STATUS memmgrBlockMemLockedMemory_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return pMemoryManager->__memmgrBlockMemLockedMemory__(pGpu, pMemoryManager); +} + +NV_STATUS memmgrInsertUnprotectedRegionAtBottomOfFb_GA100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pSize); + +static inline NV_STATUS memmgrInsertUnprotectedRegionAtBottomOfFb_56cd7a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pSize) { + return NV_OK; +} + +static inline NV_STATUS memmgrInsertUnprotectedRegionAtBottomOfFb_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pSize) { + return pMemoryManager->__memmgrInsertUnprotectedRegionAtBottomOfFb__(pGpu, pMemoryManager, pSize); +} + +void memmgrGetDisablePlcKind_GA100(struct MemoryManager *pMemoryManager, NvU32 *pteKind); + +static inline void memmgrGetDisablePlcKind_b3696a(struct MemoryManager *pMemoryManager, NvU32 *pteKind) { + return; +} + +static inline void memmgrGetDisablePlcKind_DISPATCH(struct MemoryManager *pMemoryManager, NvU32 *pteKind) { + pMemoryManager->__memmgrGetDisablePlcKind__(pMemoryManager, pteKind); +} + +void memmgrEnableDynamicPageOfflining_GA100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +void memmgrEnableDynamicPageOfflining_GA102(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); + +static inline void memmgrEnableDynamicPageOfflining_b3696a(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + return; +} + +static inline void memmgrEnableDynamicPageOfflining_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + pMemoryManager->__memmgrEnableDynamicPageOfflining__(pGpu, pMemoryManager); +} + +NV_STATUS memmgrGetBlackListPages_GM107(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, BLACKLIST_ADDRESS *pBlAddrs, NvU32 *pCount); + +NV_STATUS memmgrGetBlackListPages_GA100(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, BLACKLIST_ADDRESS *pBlAddrs, NvU32 *pCount); + +static inline NV_STATUS memmgrGetBlackListPages_46f6a7(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, BLACKLIST_ADDRESS *pBlAddrs, NvU32 *pCount) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS memmgrGetBlackListPages_DISPATCH(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, BLACKLIST_ADDRESS *pBlAddrs, NvU32 *pCount) { + return pMemoryManager->__memmgrGetBlackListPages__(pGpu, pMemoryManager, pBlAddrs, pCount); +} + +static inline NV_STATUS memmgrReconcileTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return pEngstate->__memmgrReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS memmgrStatePreLoad_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return pEngstate->__memmgrStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS memmgrStatePostUnload_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return pEngstate->__memmgrStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS memmgrStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__memmgrStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void memmgrInitMissing_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + pEngstate->__memmgrInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__memmgrStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS memmgrGetTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return pEngstate->__memmgrGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS memmgrCompareTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__memmgrCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void memmgrFreeTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + pEngstate->__memmgrFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS memmgrStatePostLoad_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) { + return pEngstate->__memmgrStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS memmgrAllocTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void **ppTunableState) { + return pEngstate->__memmgrAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS memmgrSetTunableState_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) { + return pEngstate->__memmgrSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool memmgrIsPresent_DISPATCH(POBJGPU pGpu, struct MemoryManager *pEngstate) { + return pEngstate->__memmgrIsPresent__(pGpu, pEngstate); +} + +static inline NvBool memmgrIsScrubOnFreeEnabled(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bScrubOnFreeEnabled; +} + +static inline NvBool memmgrIsFastScrubberEnabled(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bFastScrubberEnabled; +} + +static inline NvBool memmgrUseVasForCeMemoryOps(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bUseVasForCeMemoryOps; +} + +static inline NvBool memmgrRmExecutingEccScrub(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bRmExecutingEccScrub; +} + +static inline NvBool memmgrBug1441072EccScrubWar(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bBug1441072EccScrubWar; +} + +static inline NvBool memmgrIsPmaInitialized(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaInitialized; +} + +static inline void memmgrSetPmaInitialized(struct MemoryManager *pMemoryManager, NvBool val) { + pMemoryManager->bPmaInitialized = val; +} + +static inline NvBool memmgrAreFbRegionsSupported(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bFbRegionsSupported; +} + +static inline NvBool memmgrIsPmaSupportedOnPlatform(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaSupportedOnPlatform; +} + +static inline NvBool memmgrIsPmaEnabled(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaEnabled; +} + +static inline NvBool memmgrIsPmaForcePersistence(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaForcePersistence; +} + +static inline void memmgrSetPmaForcePersistence(struct MemoryManager *pMemoryManager, NvBool val) { + pMemoryManager->bPmaForcePersistence = val; +} + +static inline NvBool memmgrAreClientPageTablesPmaManaged(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bClientPageTablesPmaManaged; +} + +static inline void memmgrSetClientPageTablesPmaManaged(struct MemoryManager *pMemoryManager, NvBool val) { + pMemoryManager->bClientPageTablesPmaManaged = val; +} + +static inline NvBool memmgrIsPmaAddrTree(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bPmaAddrTree; +} + +static inline NvBool memmgrIsMemoryProtectionEnabledInSw(struct MemoryManager *pMemoryManager) { + return pMemoryManager->bSupportCCProtectedMemoryAlloc; +} + +static inline void memmgrSetMemoryProtectionInSw(struct MemoryManager *pMemoryManager, NvU32 val) { + pMemoryManager->bSupportCCProtectedMemoryAlloc = !!val; +} + +static inline NvU64 memmgrGetRsvdMemoryBase(struct MemoryManager *pMemoryManager) { + return pMemoryManager->rsvdMemoryBase; +} + +static inline NvU32 memmgrGetRsvdMemorySize(struct MemoryManager *pMemoryManager) { + return pMemoryManager->rsvdMemorySize; +} + +void memmgrDestruct_IMPL(struct MemoryManager *pMemoryManager); +#define __nvoc_memmgrDestruct(pMemoryManager) memmgrDestruct_IMPL(pMemoryManager) +NV_STATUS memmgrSavePowerMgmtState_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSavePowerMgmtState(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSavePowerMgmtState(pGpu, pMemoryManager) memmgrSavePowerMgmtState_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrRestorePowerMgmtState_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrRestorePowerMgmtState(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrRestorePowerMgmtState(pGpu, pMemoryManager) memmgrRestorePowerMgmtState_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrAllocResources_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo) memmgrAllocResources_IMPL(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrFree_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *arg0, NvHandle arg1, NvHandle arg2, NvHandle arg3, NvU32 arg4, MEMORY_DESCRIPTOR *arg5); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFree(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *arg0, NvHandle arg1, NvHandle arg2, NvHandle arg3, NvU32 arg4, MEMORY_DESCRIPTOR *arg5) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFree(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5) memmgrFree_IMPL(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_mem_mgr_h_disabled + +struct Heap *memmgrGetDeviceSuballocator_IMPL(struct MemoryManager *pMemoryManager, NvBool bForceSubheap); +#ifdef __nvoc_mem_mgr_h_disabled +static inline struct Heap *memmgrGetDeviceSuballocator(struct MemoryManager *pMemoryManager, NvBool bForceSubheap) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NULL; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetDeviceSuballocator(pMemoryManager, bForceSubheap) memmgrGetDeviceSuballocator_IMPL(pMemoryManager, bForceSubheap) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemCopy_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, TRANSFER_SURFACE *pSrc, NvU32 size, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemCopy(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, TRANSFER_SURFACE *pSrc, NvU32 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemCopy(pMemoryManager, pDst, pSrc, size, flags) memmgrMemCopy_IMPL(pMemoryManager, pDst, pSrc, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemSet_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, NvU32 value, NvU32 size, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemSet(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, NvU32 value, NvU32 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemSet(pMemoryManager, pDst, value, size, flags) memmgrMemSet_IMPL(pMemoryManager, pDst, value, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemWrite_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, void *pBuf, NvU64 size, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemWrite(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pDst, void *pBuf, NvU64 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemWrite(pMemoryManager, pDst, pBuf, size, flags) memmgrMemWrite_IMPL(pMemoryManager, pDst, pBuf, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemRead_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pSrc, void *pBuf, NvU64 size, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemRead(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pSrc, void *pBuf, NvU64 size, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemRead(pMemoryManager, pSrc, pBuf, size, flags) memmgrMemRead_IMPL(pMemoryManager, pSrc, pBuf, size, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NvU8 *memmgrMemBeginTransfer_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU8 *memmgrMemBeginTransfer(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NULL; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemBeginTransfer(pMemoryManager, pTransferInfo, shadowBufSize, flags) memmgrMemBeginTransfer_IMPL(pMemoryManager, pTransferInfo, shadowBufSize, flags) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrMemEndTransfer_IMPL(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrMemEndTransfer(struct MemoryManager *pMemoryManager, TRANSFER_SURFACE *pTransferInfo, NvU64 shadowBufSize, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemEndTransfer(pMemoryManager, pTransferInfo, shadowBufSize, flags) memmgrMemEndTransfer_IMPL(pMemoryManager, pTransferInfo, shadowBufSize, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NvU8 *memmgrMemDescBeginTransfer_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU8 *memmgrMemDescBeginTransfer(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NULL; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemDescBeginTransfer(pMemoryManager, pMemDesc, flags) memmgrMemDescBeginTransfer_IMPL(pMemoryManager, pMemDesc, flags) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrMemDescEndTransfer_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrMemDescEndTransfer(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemDescEndTransfer(pMemoryManager, pMemDesc, flags) memmgrMemDescEndTransfer_IMPL(pMemoryManager, pMemDesc, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrMemDescMemSet_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 value, NvU32 flags); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrMemDescMemSet(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *pMemDesc, NvU32 value, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemDescMemSet(pMemoryManager, pMemDesc, value, flags) memmgrMemDescMemSet_IMPL(pMemoryManager, pMemDesc, value, flags) +#endif //__nvoc_mem_mgr_h_disabled + +NV_ADDRESS_SPACE memmgrAllocGetAddrSpace_IMPL(struct MemoryManager *pMemoryManager, NvU32 flags, NvU32 attr); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_ADDRESS_SPACE memmgrAllocGetAddrSpace(struct MemoryManager *pMemoryManager, NvU32 flags, NvU32 attr) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + NV_ADDRESS_SPACE ret; + portMemSet(&ret, 0, sizeof(NV_ADDRESS_SPACE)); + return ret; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocGetAddrSpace(pMemoryManager, flags, attr) memmgrAllocGetAddrSpace_IMPL(pMemoryManager, flags, attr) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrCreateHeap_IMPL(struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrCreateHeap(struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCreateHeap(pMemoryManager) memmgrCreateHeap_IMPL(pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrGetUsedRamSize_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetUsedRamSize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetUsedRamSize(pGpu, pMemoryManager, arg0) memmgrGetUsedRamSize_IMPL(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrAllocHwResources_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocHwResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocHwResources(pGpu, pMemoryManager, arg0) memmgrAllocHwResources_IMPL(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrFreeHwResources_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFreeHwResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, FB_ALLOC_INFO *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeHwResources(pGpu, pMemoryManager, arg0) memmgrFreeHwResources_IMPL(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +NvBool memmgrLargePageSupported_IMPL(struct MemoryManager *pMemoryManager, NV_ADDRESS_SPACE arg0); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrLargePageSupported(struct MemoryManager *pMemoryManager, NV_ADDRESS_SPACE arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrLargePageSupported(pMemoryManager, arg0) memmgrLargePageSupported_IMPL(pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +NvBool memmgrComprSupported_IMPL(struct MemoryManager *pMemoryManager, NV_ADDRESS_SPACE arg0); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrComprSupported(struct MemoryManager *pMemoryManager, NV_ADDRESS_SPACE arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrComprSupported(pMemoryManager, arg0) memmgrComprSupported_IMPL(pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +NvU32 memmgrGetMappableRamSizeMb_IMPL(struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrGetMappableRamSizeMb(struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMappableRamSizeMb(pMemoryManager) memmgrGetMappableRamSizeMb_IMPL(pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +PFB_REGION_DESCRIPTOR memmgrLookupFbRegionByOffset_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr fbOffset, RmPhysAddr fbLimit); +#ifdef __nvoc_mem_mgr_h_disabled +static inline PFB_REGION_DESCRIPTOR memmgrLookupFbRegionByOffset(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, RmPhysAddr fbOffset, RmPhysAddr fbLimit) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NULL; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrLookupFbRegionByOffset(pGpu, pMemoryManager, fbOffset, fbLimit) memmgrLookupFbRegionByOffset_IMPL(pGpu, pMemoryManager, fbOffset, fbLimit) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrFillMemdescForPhysAttr_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PMEMORY_DESCRIPTOR arg0, ADDRESS_TRANSLATION arg1, NvU64 *arg2, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6, NvU32 *arg7, NvU64 *arg8); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFillMemdescForPhysAttr(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PMEMORY_DESCRIPTOR arg0, ADDRESS_TRANSLATION arg1, NvU64 *arg2, NvU32 *arg3, NvU32 *arg4, NvU32 *arg5, NvU32 *arg6, NvU32 *arg7, NvU64 *arg8) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFillMemdescForPhysAttr(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) memmgrFillMemdescForPhysAttr_IMPL(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrSetPlatformPmaSupport_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetPlatformPmaSupport(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetPlatformPmaSupport(pGpu, pMemoryManager) memmgrSetPlatformPmaSupport_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrRegionSetupForPma_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrRegionSetupForPma(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrRegionSetupForPma(pGpu, pMemoryManager) memmgrRegionSetupForPma_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrInitFbRegions_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitFbRegions(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitFbRegions(pGpu, pMemoryManager) memmgrInitFbRegions_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrRegionSetupCommon_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrRegionSetupCommon(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrRegionSetupCommon(pGpu, pMemoryManager) memmgrRegionSetupCommon_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrRegenerateFbRegionPriority_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrRegenerateFbRegionPriority(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrRegenerateFbRegionPriority(pGpu, pMemoryManager) memmgrRegenerateFbRegionPriority_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NvU32 memmgrInsertFbRegion_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PFB_REGION_DESCRIPTOR arg0); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvU32 memmgrInsertFbRegion(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PFB_REGION_DESCRIPTOR arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return 0; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInsertFbRegion(pGpu, pMemoryManager, arg0) memmgrInsertFbRegion_IMPL(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrDumpFbRegions_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrDumpFbRegions(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrDumpFbRegions(pGpu, pMemoryManager) memmgrDumpFbRegions_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrClearFbRegions_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrClearFbRegions(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrClearFbRegions(pGpu, pMemoryManager) memmgrClearFbRegions_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrReleaseConsoleRegion_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrReleaseConsoleRegion(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReleaseConsoleRegion(pGpu, pMemoryManager) memmgrReleaseConsoleRegion_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +PMEMORY_DESCRIPTOR memmgrGetReservedConsoleMemDesc_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline PMEMORY_DESCRIPTOR memmgrGetReservedConsoleMemDesc(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NULL; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetReservedConsoleMemDesc(pGpu, pMemoryManager) memmgrGetReservedConsoleMemDesc_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrReserveBar2BackingStore_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrReserveBar2BackingStore(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReserveBar2BackingStore(pGpu, pMemoryManager, arg0) memmgrReserveBar2BackingStore_IMPL(pGpu, pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrCalcReservedFbSpace_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrCalcReservedFbSpace(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrCalcReservedFbSpace(pGpu, pMemoryManager) memmgrCalcReservedFbSpace_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrMemUtilsSetupChannelBufferSizes_IMPL(struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, NvU32 arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrMemUtilsSetupChannelBufferSizes(struct MemoryManager *pMemoryManager, OBJCHANNEL *arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrMemUtilsSetupChannelBufferSizes(pMemoryManager, arg0, arg1) memmgrMemUtilsSetupChannelBufferSizes_IMPL(pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrGetKindComprFromMemDesc_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *arg0, NvU64 offset, NvU32 *kind, COMPR_INFO *pComprInfo); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrGetKindComprFromMemDesc(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *arg0, NvU64 offset, NvU32 *kind, COMPR_INFO *pComprInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetKindComprFromMemDesc(pMemoryManager, arg0, offset, kind, pComprInfo) memmgrGetKindComprFromMemDesc_IMPL(pMemoryManager, arg0, offset, kind, pComprInfo) +#endif //__nvoc_mem_mgr_h_disabled + +NvBool memmgrIsCompressible_IMPL(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *arg0); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NvBool memmgrIsCompressible(struct MemoryManager *pMemoryManager, MEMORY_DESCRIPTOR *arg0) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_FALSE; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrIsCompressible(pMemoryManager, arg0) memmgrIsCompressible_IMPL(pMemoryManager, arg0) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrFillComprInfo_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 arg1, NvU32 arg2, NvU64 arg3, NvU32 arg4, COMPR_INFO *arg5); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFillComprInfo(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 arg0, NvU32 arg1, NvU32 arg2, NvU64 arg3, NvU32 arg4, COMPR_INFO *arg5) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFillComprInfo(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5) memmgrFillComprInfo_IMPL(pGpu, pMemoryManager, arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrComprInfoDisableCompression_IMPL(struct MemoryManager *pMemoryManager, COMPR_INFO *pComprInfo); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrComprInfoDisableCompression(struct MemoryManager *pMemoryManager, COMPR_INFO *pComprInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrComprInfoDisableCompression(pMemoryManager, pComprInfo) memmgrComprInfoDisableCompression_IMPL(pMemoryManager, pComprInfo) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrFillComprInfoUncompressed_IMPL(struct MemoryManager *pMemoryManager, NvU32 kind, COMPR_INFO *pComprInfo); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrFillComprInfoUncompressed(struct MemoryManager *pMemoryManager, NvU32 kind, COMPR_INFO *pComprInfo) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFillComprInfoUncompressed(pMemoryManager, kind, pComprInfo) memmgrFillComprInfoUncompressed_IMPL(pMemoryManager, kind, pComprInfo) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrPmaInitialize_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PMA *pPma); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPmaInitialize(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, PMA *pPma) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPmaInitialize(pGpu, pMemoryManager, pPma) memmgrPmaInitialize_IMPL(pGpu, pMemoryManager, pPma) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrPmaRegisterRegions_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *pHeap, PMA *pPma); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPmaRegisterRegions(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, struct Heap *pHeap, PMA *pPma) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPmaRegisterRegions(pGpu, pMemoryManager, pHeap, pPma) memmgrPmaRegisterRegions_IMPL(pGpu, pMemoryManager, pHeap, pPma) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrSetMIGPartitionableBAR1Range_IMPL(OBJGPU *arg0, struct MemoryManager *arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSetMIGPartitionableBAR1Range(OBJGPU *arg0, struct MemoryManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetMIGPartitionableBAR1Range(arg0, arg1) memmgrSetMIGPartitionableBAR1Range_IMPL(arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +struct NV_RANGE memmgrGetMIGPartitionableBAR1Range_IMPL(OBJGPU *arg0, struct MemoryManager *arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline struct NV_RANGE memmgrGetMIGPartitionableBAR1Range(OBJGPU *arg0, struct MemoryManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + struct NV_RANGE ret; + portMemSet(&ret, 0, sizeof(struct NV_RANGE)); + return ret; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMIGPartitionableBAR1Range(arg0, arg1) memmgrGetMIGPartitionableBAR1Range_IMPL(arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrSetMIGPartitionableMemoryRange_IMPL(OBJGPU *arg0, struct MemoryManager *arg1, struct NV_RANGE arg2); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrSetMIGPartitionableMemoryRange(OBJGPU *arg0, struct MemoryManager *arg1, struct NV_RANGE arg2) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSetMIGPartitionableMemoryRange(arg0, arg1, arg2) memmgrSetMIGPartitionableMemoryRange_IMPL(arg0, arg1, arg2) +#endif //__nvoc_mem_mgr_h_disabled + +struct NV_RANGE memmgrGetMIGPartitionableMemoryRange_IMPL(OBJGPU *arg0, struct MemoryManager *arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline struct NV_RANGE memmgrGetMIGPartitionableMemoryRange(OBJGPU *arg0, struct MemoryManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + struct NV_RANGE ret; + portMemSet(&ret, 0, sizeof(struct NV_RANGE)); + return ret; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetMIGPartitionableMemoryRange(arg0, arg1) memmgrGetMIGPartitionableMemoryRange_IMPL(arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrFreeMIGGPUInstanceMemory_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle hMemory, struct Heap **ppMemoryPartitionHeap); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrFreeMIGGPUInstanceMemory(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU32 swizzId, NvHandle hMemory, struct Heap **ppMemoryPartitionHeap) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeMIGGPUInstanceMemory(pGpu, pMemoryManager, swizzId, hMemory, ppMemoryPartitionHeap) memmgrFreeMIGGPUInstanceMemory_IMPL(pGpu, pMemoryManager, swizzId, hMemory, ppMemoryPartitionHeap) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrPageLevelPoolsCreate_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPageLevelPoolsCreate(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPageLevelPoolsCreate(pGpu, pMemoryManager) memmgrPageLevelPoolsCreate_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrPageLevelPoolsDestroy_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrPageLevelPoolsDestroy(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPageLevelPoolsDestroy(pGpu, pMemoryManager) memmgrPageLevelPoolsDestroy_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrPageLevelPoolsGetInfo_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvHandle arg0, struct RM_POOL_ALLOC_MEM_RESERVE_INFO **arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrPageLevelPoolsGetInfo(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvHandle arg0, struct RM_POOL_ALLOC_MEM_RESERVE_INFO **arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrPageLevelPoolsGetInfo(pGpu, pMemoryManager, arg0, arg1) memmgrPageLevelPoolsGetInfo_IMPL(pGpu, pMemoryManager, arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrAllocMIGMemoryAllocationInternalHandles_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrAllocMIGMemoryAllocationInternalHandles(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrAllocMIGMemoryAllocationInternalHandles(pGpu, pMemoryManager) memmgrAllocMIGMemoryAllocationInternalHandles_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrFreeMIGMemoryAllocationInternalHandles_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrFreeMIGMemoryAllocationInternalHandles(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrFreeMIGMemoryAllocationInternalHandles(pGpu, pMemoryManager) memmgrFreeMIGMemoryAllocationInternalHandles_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrGetFreeMemoryForAllMIGGPUInstances_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pBytes); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetFreeMemoryForAllMIGGPUInstances(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, NvU64 *pBytes) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetFreeMemoryForAllMIGGPUInstances(pGpu, pMemoryManager, pBytes) memmgrGetFreeMemoryForAllMIGGPUInstances_IMPL(pGpu, pMemoryManager, pBytes) +#endif //__nvoc_mem_mgr_h_disabled + +void memmgrGetTopLevelScrubberStatus_IMPL(OBJGPU *arg0, struct MemoryManager *arg1, NvBool *pbTopLevelScrubberEnabled, NvBool *pbTopLevelScrubberConstructed); +#ifdef __nvoc_mem_mgr_h_disabled +static inline void memmgrGetTopLevelScrubberStatus(OBJGPU *arg0, struct MemoryManager *arg1, NvBool *pbTopLevelScrubberEnabled, NvBool *pbTopLevelScrubberConstructed) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrGetTopLevelScrubberStatus(arg0, arg1, pbTopLevelScrubberEnabled, pbTopLevelScrubberConstructed) memmgrGetTopLevelScrubberStatus_IMPL(arg0, arg1, pbTopLevelScrubberEnabled, pbTopLevelScrubberConstructed) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrSaveAndDestroyTopLevelScrubber_IMPL(OBJGPU *arg0, struct MemoryManager *arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrSaveAndDestroyTopLevelScrubber(OBJGPU *arg0, struct MemoryManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrSaveAndDestroyTopLevelScrubber(arg0, arg1) memmgrSaveAndDestroyTopLevelScrubber_IMPL(arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrInitSavedTopLevelScrubber_IMPL(OBJGPU *arg0, struct MemoryManager *arg1); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrInitSavedTopLevelScrubber(OBJGPU *arg0, struct MemoryManager *arg1) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrInitSavedTopLevelScrubber(arg0, arg1) memmgrInitSavedTopLevelScrubber_IMPL(arg0, arg1) +#endif //__nvoc_mem_mgr_h_disabled + +NV_STATUS memmgrReserveMemoryForFsp_IMPL(OBJGPU *pGpu, struct MemoryManager *pMemoryManager); +#ifdef __nvoc_mem_mgr_h_disabled +static inline NV_STATUS memmgrReserveMemoryForFsp(OBJGPU *pGpu, struct MemoryManager *pMemoryManager) { + NV_ASSERT_FAILED_PRECOMP("MemoryManager was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_mgr_h_disabled +#define memmgrReserveMemoryForFsp(pGpu, pMemoryManager) memmgrReserveMemoryForFsp_IMPL(pGpu, pMemoryManager) +#endif //__nvoc_mem_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif // MEM_MGR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MEM_MGR_NVOC_H_ diff --git a/src/nvidia/generated/g_mem_nvoc.c b/src/nvidia/generated/g_mem_nvoc.c new file mode 100644 index 000000000..70b77f205 --- /dev/null +++ b/src/nvidia/generated/g_mem_nvoc.c @@ -0,0 +1,454 @@ +#define NVOC_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4789f2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_funcTable_Memory(Memory*); +NV_STATUS __nvoc_ctor_Memory(Memory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Memory(Memory*); +void __nvoc_dtor_Memory(Memory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Memory; + +static const struct NVOC_RTTI __nvoc_rtti_Memory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Memory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Memory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Memory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Memory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Memory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Memory = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_Memory_Memory, + &__nvoc_rtti_Memory_RmResource, + &__nvoc_rtti_Memory_RmResourceCommon, + &__nvoc_rtti_Memory_RsResource, + &__nvoc_rtti_Memory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Memory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Memory), + /*classId=*/ classId(Memory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Memory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Memory, + /*pCastInfo=*/ &__nvoc_castinfo_Memory, + /*pExportInfo=*/ &__nvoc_export_info_Memory +}; + +static NV_STATUS __nvoc_thunk_Memory_resControl(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_resMap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_resUnmap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_rmresGetMemInterMapParams(struct RmResource *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_rmresCheckMemInterUnmap(struct RmResource *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_rmresGetMemoryMappingDescriptor(struct RmResource *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_RmResource_memShareCallback(struct Memory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvU32 __nvoc_thunk_RsResource_memGetRefCount(struct Memory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_memControlFilter(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_memAddAdditionalDependants(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_memControl_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_memCanCopy(struct Memory *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_memMapTo(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_memPreDestruct(struct Memory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_memUnmapFrom(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_memControl_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_memControlLookup(struct Memory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pParams, ppEntry); +} + +static NvBool __nvoc_thunk_RmResource_memAccessCallback(struct Memory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Memory[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdGetSurfacePartitionStrideLvm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410105u, + /*paramSize=*/ sizeof(NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdGetSurfacePartitionStrideLvm" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdGetSurfaceInfoLvm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410110u, + /*paramSize=*/ sizeof(NV0041_CTRL_GET_SURFACE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdGetSurfaceInfoLvm" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdGetSurfaceCompressionCoverageLvm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410112u, + /*paramSize=*/ sizeof(NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdGetSurfaceCompressionCoverageLvm" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdSurfaceFlushGpuCache_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410116u, + /*paramSize=*/ sizeof(NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdSurfaceFlushGpuCache" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdGetMemPageSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410118u, + /*paramSize=*/ sizeof(NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdGetMemPageSize" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdSetTag_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410120u, + /*paramSize=*/ sizeof(NV0041_CTRL_CMD_SET_TAG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdSetTag" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) memCtrlCmdGetTag_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x410121u, + /*paramSize=*/ sizeof(NV0041_CTRL_CMD_GET_TAG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Memory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "memCtrlCmdGetTag" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Memory = +{ + /*numEntries=*/ 7, + /*pExportEntries=*/ __nvoc_exported_method_def_Memory +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_Memory(Memory *pThis) { + __nvoc_memDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Memory(Memory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Memory(Memory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Memory_fail_RmResource; + __nvoc_init_dataField_Memory(pThis); + + status = __nvoc_memConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Memory_fail__init; + goto __nvoc_ctor_Memory_exit; // Success + +__nvoc_ctor_Memory_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_Memory_fail_RmResource: +__nvoc_ctor_Memory_exit: + + return status; +} + +static void __nvoc_init_funcTable_Memory_1(Memory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__memGetMapAddrSpace__ = &memGetMapAddrSpace_IMPL; + + pThis->__memControl__ = &memControl_IMPL; + + pThis->__memMap__ = &memMap_IMPL; + + pThis->__memUnmap__ = &memUnmap_IMPL; + + pThis->__memGetMemInterMapParams__ = &memGetMemInterMapParams_IMPL; + + pThis->__memCheckMemInterUnmap__ = &memCheckMemInterUnmap_ac1694; + + pThis->__memGetMemoryMappingDescriptor__ = &memGetMemoryMappingDescriptor_IMPL; + + pThis->__memCheckCopyPermissions__ = &memCheckCopyPermissions_ac1694; + + pThis->__memIsReady__ = &memIsReady_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__memCtrlCmdGetSurfaceCompressionCoverageLvm__ = &memCtrlCmdGetSurfaceCompressionCoverageLvm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__memCtrlCmdGetSurfacePartitionStrideLvm__ = &memCtrlCmdGetSurfacePartitionStrideLvm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__memCtrlCmdGetSurfaceInfoLvm__ = &memCtrlCmdGetSurfaceInfoLvm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__memCtrlCmdSurfaceFlushGpuCache__ = &memCtrlCmdSurfaceFlushGpuCache_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__memCtrlCmdGetMemPageSize__ = &memCtrlCmdGetMemPageSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__memCtrlCmdSetTag__ = &memCtrlCmdSetTag_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__memCtrlCmdGetTag__ = &memCtrlCmdGetTag_IMPL; +#endif + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resControl__ = &__nvoc_thunk_Memory_resControl; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMap__ = &__nvoc_thunk_Memory_resMap; + + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmap__ = &__nvoc_thunk_Memory_resUnmap; + + pThis->__nvoc_base_RmResource.__rmresGetMemInterMapParams__ = &__nvoc_thunk_Memory_rmresGetMemInterMapParams; + + pThis->__nvoc_base_RmResource.__rmresCheckMemInterUnmap__ = &__nvoc_thunk_Memory_rmresCheckMemInterUnmap; + + pThis->__nvoc_base_RmResource.__rmresGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_rmresGetMemoryMappingDescriptor; + + pThis->__memShareCallback__ = &__nvoc_thunk_RmResource_memShareCallback; + + pThis->__memGetRefCount__ = &__nvoc_thunk_RsResource_memGetRefCount; + + pThis->__memControlFilter__ = &__nvoc_thunk_RsResource_memControlFilter; + + pThis->__memAddAdditionalDependants__ = &__nvoc_thunk_RsResource_memAddAdditionalDependants; + + pThis->__memControl_Prologue__ = &__nvoc_thunk_RmResource_memControl_Prologue; + + pThis->__memCanCopy__ = &__nvoc_thunk_RsResource_memCanCopy; + + pThis->__memMapTo__ = &__nvoc_thunk_RsResource_memMapTo; + + pThis->__memPreDestruct__ = &__nvoc_thunk_RsResource_memPreDestruct; + + pThis->__memUnmapFrom__ = &__nvoc_thunk_RsResource_memUnmapFrom; + + pThis->__memControl_Epilogue__ = &__nvoc_thunk_RmResource_memControl_Epilogue; + + pThis->__memControlLookup__ = &__nvoc_thunk_RsResource_memControlLookup; + + pThis->__memAccessCallback__ = &__nvoc_thunk_RmResource_memAccessCallback; +} + +void __nvoc_init_funcTable_Memory(Memory *pThis) { + __nvoc_init_funcTable_Memory_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_Memory(Memory *pThis) { + pThis->__nvoc_pbase_Memory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_Memory(pThis); +} + +NV_STATUS __nvoc_objCreate_Memory(Memory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Memory *pThis; + + pThis = portMemAllocNonPaged(sizeof(Memory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Memory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Memory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_Memory(pThis); + status = __nvoc_ctor_Memory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Memory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Memory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Memory(Memory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Memory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_mem_nvoc.h b/src/nvidia/generated/g_mem_nvoc.h new file mode 100644 index 000000000..9e59b16f9 --- /dev/null +++ b/src/nvidia/generated/g_mem_nvoc.h @@ -0,0 +1,473 @@ +#ifndef _G_MEM_NVOC_H_ +#define _G_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_mem_nvoc.h" + +#ifndef _MEMORY_API_H_ +#define _MEMORY_API_H_ + +#include "core/core.h" +#include "resserv/rs_resource.h" +#include "rmapi/rmapi.h" +#include "rmapi/resource.h" + +#include "containers/btree.h" + +#include "ctrl/ctrl0041.h" + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + +struct RsClient; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + + +struct Heap; + +#ifndef __NVOC_CLASS_Heap_TYPEDEF__ +#define __NVOC_CLASS_Heap_TYPEDEF__ +typedef struct Heap Heap; +#endif /* __NVOC_CLASS_Heap_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Heap +#define __nvoc_class_id_Heap 0x556e9a +#endif /* __nvoc_class_id_Heap */ + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR; +typedef struct PmuMapping PmuMapping; +typedef struct HWRESOURCE_INFO HWRESOURCE_INFO; + +// +// vGPU non-stall interrupt info +// +typedef struct _def_client_vgpu_ns_intr +{ + NvU32 nsSemValue; // Non stall interrupt semaphore value + NvU32 nsSemOffset; // Non stall interrupt semaphore offset. Currently it is always 0. + NvBool isSemaMemValidationEnabled; // Enable change in Non stall interrupt sema value check + // while generating event + NvU64 guestDomainId; // guest ID that we need to use to inject interrupt + NvU64 guestMSIAddr; // MSI address allocated by guest OS + NvU32 guestMSIData; // MSI data value set by guest OS + void *pVgpuVfioRef; // Reference to vgpu device in nvidia-vgpu-vfio module + void *pVmBusHostChannel; // VmBus Host channel to communicated the event with the Guest + void *pEventDpc; // DPC event to pass the interrupt +} VGPU_NS_INTR; + +typedef struct +{ + struct Memory *pNext; + struct Memory *pPrev; +} memCircularListItem; + +/*! + * RM internal class representing NV01_MEMORY_XXX + * + * @note Memory cannot be a GpuResource because NoDeviceMemory + * subclass is not allocated under a device. + */ +#ifdef NVOC_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Memory { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + NV_STATUS (*__memGetMapAddrSpace__)(struct Memory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__memControl__)(struct Memory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__memMap__)(struct Memory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__memUnmap__)(struct Memory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__memGetMemInterMapParams__)(struct Memory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__memCheckMemInterUnmap__)(struct Memory *, NvBool); + NV_STATUS (*__memGetMemoryMappingDescriptor__)(struct Memory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__memCheckCopyPermissions__)(struct Memory *, struct OBJGPU *, NvHandle); + NV_STATUS (*__memIsReady__)(struct Memory *); + NV_STATUS (*__memCtrlCmdGetSurfaceCompressionCoverageLvm__)(struct Memory *, NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS *); + NV_STATUS (*__memCtrlCmdGetSurfacePartitionStrideLvm__)(struct Memory *, NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS *); + NV_STATUS (*__memCtrlCmdGetSurfaceInfoLvm__)(struct Memory *, NV0041_CTRL_GET_SURFACE_INFO_PARAMS *); + NV_STATUS (*__memCtrlCmdSurfaceFlushGpuCache__)(struct Memory *, NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS *); + NV_STATUS (*__memCtrlCmdGetMemPageSize__)(struct Memory *, NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS *); + NV_STATUS (*__memCtrlCmdSetTag__)(struct Memory *, NV0041_CTRL_CMD_SET_TAG_PARAMS *); + NV_STATUS (*__memCtrlCmdGetTag__)(struct Memory *, NV0041_CTRL_CMD_GET_TAG_PARAMS *); + NvBool (*__memShareCallback__)(struct Memory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvU32 (*__memGetRefCount__)(struct Memory *); + NV_STATUS (*__memControlFilter__)(struct Memory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__memAddAdditionalDependants__)(struct RsClient *, struct Memory *, RsResourceRef *); + NV_STATUS (*__memControl_Prologue__)(struct Memory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__memCanCopy__)(struct Memory *); + NV_STATUS (*__memMapTo__)(struct Memory *, RS_RES_MAP_TO_PARAMS *); + void (*__memPreDestruct__)(struct Memory *); + NV_STATUS (*__memUnmapFrom__)(struct Memory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__memControl_Epilogue__)(struct Memory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__memControlLookup__)(struct Memory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvBool (*__memAccessCallback__)(struct Memory *, struct RsClient *, void *, RsAccessRight); + NvBool bConstructed; + struct Device *pDevice; + struct Subdevice *pSubDevice; + struct OBJGPU *pGpu; + NvBool bBcResource; + NvU32 categoryClassId; + NvU64 Length; + NvU32 HeapOwner; + NvU32 RefCount; + struct Heap *pHeap; + MEMORY_DESCRIPTOR *pMemDesc; + NvBool isMemDescOwner; + memCircularListItem dupListItem; + NvP64 KernelVAddr; + NvP64 KernelMapPriv; + PmuMapping *pPmuMappingList; + NODE Node; + NvU32 Attr; + NvU32 Attr2; + NvU32 Pitch; + NvU32 Type; + NvU32 Flags; + NvU32 tag; + NvU64 osDeviceHandle; + HWRESOURCE_INFO *pHwResource; + NvBool bRpcAlloc; + VGPU_NS_INTR vgpuNsIntr; +}; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +#define __staticCast_Memory(pThis) \ + ((pThis)->__nvoc_pbase_Memory) + +#ifdef __nvoc_mem_h_disabled +#define __dynamicCast_Memory(pThis) ((Memory*)NULL) +#else //__nvoc_mem_h_disabled +#define __dynamicCast_Memory(pThis) \ + ((Memory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Memory))) +#endif //__nvoc_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Memory(Memory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Memory(Memory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Memory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Memory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define memGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) memGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define memControl(pMemory, pCallContext, pParams) memControl_DISPATCH(pMemory, pCallContext, pParams) +#define memMap(pMemory, pCallContext, pParams, pCpuMapping) memMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define memUnmap(pMemory, pCallContext, pCpuMapping) memUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define memGetMemInterMapParams(pMemory, pParams) memGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define memCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) memCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define memGetMemoryMappingDescriptor(pMemory, ppMemDesc) memGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define memCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) memCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define memIsReady(pMemory) memIsReady_DISPATCH(pMemory) +#define memCtrlCmdGetSurfaceCompressionCoverageLvm(pMemory, pParams) memCtrlCmdGetSurfaceCompressionCoverageLvm_DISPATCH(pMemory, pParams) +#define memCtrlCmdGetSurfacePartitionStrideLvm(pMemory, pParams) memCtrlCmdGetSurfacePartitionStrideLvm_DISPATCH(pMemory, pParams) +#define memCtrlCmdGetSurfaceInfoLvm(pMemory, pSurfaceInfoParams) memCtrlCmdGetSurfaceInfoLvm_DISPATCH(pMemory, pSurfaceInfoParams) +#define memCtrlCmdSurfaceFlushGpuCache(pMemory, pCacheFlushParams) memCtrlCmdSurfaceFlushGpuCache_DISPATCH(pMemory, pCacheFlushParams) +#define memCtrlCmdGetMemPageSize(pMemory, pPageSizeParams) memCtrlCmdGetMemPageSize_DISPATCH(pMemory, pPageSizeParams) +#define memCtrlCmdSetTag(pMemory, pParams) memCtrlCmdSetTag_DISPATCH(pMemory, pParams) +#define memCtrlCmdGetTag(pMemory, pParams) memCtrlCmdGetTag_DISPATCH(pMemory, pParams) +#define memShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) memShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define memGetRefCount(pResource) memGetRefCount_DISPATCH(pResource) +#define memControlFilter(pResource, pCallContext, pParams) memControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define memAddAdditionalDependants(pClient, pResource, pReference) memAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define memControl_Prologue(pResource, pCallContext, pParams) memControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define memCanCopy(pResource) memCanCopy_DISPATCH(pResource) +#define memMapTo(pResource, pParams) memMapTo_DISPATCH(pResource, pParams) +#define memPreDestruct(pResource) memPreDestruct_DISPATCH(pResource) +#define memUnmapFrom(pResource, pParams) memUnmapFrom_DISPATCH(pResource, pParams) +#define memControl_Epilogue(pResource, pCallContext, pParams) memControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define memControlLookup(pResource, pParams, ppEntry) memControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define memAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) memAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS memGetMapAddrSpace_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS memGetMapAddrSpace_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__memGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +NV_STATUS memControl_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS memControl_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__memControl__(pMemory, pCallContext, pParams); +} + +NV_STATUS memMap_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS memMap_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__memMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS memUnmap_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS memUnmap_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__memUnmap__(pMemory, pCallContext, pCpuMapping); +} + +NV_STATUS memGetMemInterMapParams_IMPL(struct Memory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams); + +static inline NV_STATUS memGetMemInterMapParams_DISPATCH(struct Memory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__memGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS memCheckMemInterUnmap_ac1694(struct Memory *pMemory, NvBool bSubdeviceHandleProvided) { + return NV_OK; +} + +static inline NV_STATUS memCheckMemInterUnmap_DISPATCH(struct Memory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__memCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +NV_STATUS memGetMemoryMappingDescriptor_IMPL(struct Memory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc); + +static inline NV_STATUS memGetMemoryMappingDescriptor_DISPATCH(struct Memory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__memGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS memCheckCopyPermissions_ac1694(struct Memory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return NV_OK; +} + +static inline NV_STATUS memCheckCopyPermissions_DISPATCH(struct Memory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__memCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +NV_STATUS memIsReady_IMPL(struct Memory *pMemory); + +static inline NV_STATUS memIsReady_DISPATCH(struct Memory *pMemory) { + return pMemory->__memIsReady__(pMemory); +} + +NV_STATUS memCtrlCmdGetSurfaceCompressionCoverageLvm_IMPL(struct Memory *pMemory, NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS *pParams); + +static inline NV_STATUS memCtrlCmdGetSurfaceCompressionCoverageLvm_DISPATCH(struct Memory *pMemory, NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS *pParams) { + return pMemory->__memCtrlCmdGetSurfaceCompressionCoverageLvm__(pMemory, pParams); +} + +NV_STATUS memCtrlCmdGetSurfacePartitionStrideLvm_IMPL(struct Memory *pMemory, NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS *pParams); + +static inline NV_STATUS memCtrlCmdGetSurfacePartitionStrideLvm_DISPATCH(struct Memory *pMemory, NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS *pParams) { + return pMemory->__memCtrlCmdGetSurfacePartitionStrideLvm__(pMemory, pParams); +} + +NV_STATUS memCtrlCmdGetSurfaceInfoLvm_IMPL(struct Memory *pMemory, NV0041_CTRL_GET_SURFACE_INFO_PARAMS *pSurfaceInfoParams); + +static inline NV_STATUS memCtrlCmdGetSurfaceInfoLvm_DISPATCH(struct Memory *pMemory, NV0041_CTRL_GET_SURFACE_INFO_PARAMS *pSurfaceInfoParams) { + return pMemory->__memCtrlCmdGetSurfaceInfoLvm__(pMemory, pSurfaceInfoParams); +} + +NV_STATUS memCtrlCmdSurfaceFlushGpuCache_IMPL(struct Memory *pMemory, NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS *pCacheFlushParams); + +static inline NV_STATUS memCtrlCmdSurfaceFlushGpuCache_DISPATCH(struct Memory *pMemory, NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS *pCacheFlushParams) { + return pMemory->__memCtrlCmdSurfaceFlushGpuCache__(pMemory, pCacheFlushParams); +} + +NV_STATUS memCtrlCmdGetMemPageSize_IMPL(struct Memory *pMemory, NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS *pPageSizeParams); + +static inline NV_STATUS memCtrlCmdGetMemPageSize_DISPATCH(struct Memory *pMemory, NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS *pPageSizeParams) { + return pMemory->__memCtrlCmdGetMemPageSize__(pMemory, pPageSizeParams); +} + +NV_STATUS memCtrlCmdSetTag_IMPL(struct Memory *pMemory, NV0041_CTRL_CMD_SET_TAG_PARAMS *pParams); + +static inline NV_STATUS memCtrlCmdSetTag_DISPATCH(struct Memory *pMemory, NV0041_CTRL_CMD_SET_TAG_PARAMS *pParams) { + return pMemory->__memCtrlCmdSetTag__(pMemory, pParams); +} + +NV_STATUS memCtrlCmdGetTag_IMPL(struct Memory *pMemory, NV0041_CTRL_CMD_GET_TAG_PARAMS *pParams); + +static inline NV_STATUS memCtrlCmdGetTag_DISPATCH(struct Memory *pMemory, NV0041_CTRL_CMD_GET_TAG_PARAMS *pParams) { + return pMemory->__memCtrlCmdGetTag__(pMemory, pParams); +} + +static inline NvBool memShareCallback_DISPATCH(struct Memory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__memShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvU32 memGetRefCount_DISPATCH(struct Memory *pResource) { + return pResource->__memGetRefCount__(pResource); +} + +static inline NV_STATUS memControlFilter_DISPATCH(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__memControlFilter__(pResource, pCallContext, pParams); +} + +static inline void memAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference) { + pResource->__memAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS memControl_Prologue_DISPATCH(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__memControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool memCanCopy_DISPATCH(struct Memory *pResource) { + return pResource->__memCanCopy__(pResource); +} + +static inline NV_STATUS memMapTo_DISPATCH(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__memMapTo__(pResource, pParams); +} + +static inline void memPreDestruct_DISPATCH(struct Memory *pResource) { + pResource->__memPreDestruct__(pResource); +} + +static inline NV_STATUS memUnmapFrom_DISPATCH(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__memUnmapFrom__(pResource, pParams); +} + +static inline void memControl_Epilogue_DISPATCH(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__memControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS memControlLookup_DISPATCH(struct Memory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__memControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvBool memAccessCallback_DISPATCH(struct Memory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__memAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS memConstruct_IMPL(struct Memory *arg_pMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_memConstruct(arg_pMemory, arg_pCallContext, arg_pParams) memConstruct_IMPL(arg_pMemory, arg_pCallContext, arg_pParams) +NV_STATUS memCopyConstruct_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_mem_h_disabled +static inline NV_STATUS memCopyConstruct(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_h_disabled +#define memCopyConstruct(pMemory, pCallContext, pParams) memCopyConstruct_IMPL(pMemory, pCallContext, pParams) +#endif //__nvoc_mem_h_disabled + +void memDestruct_IMPL(struct Memory *pMemory); +#define __nvoc_memDestruct(pMemory) memDestruct_IMPL(pMemory) +NV_STATUS memConstructCommon_IMPL(struct Memory *pMemory, NvU32 categoryClassId, NvU32 flags, MEMORY_DESCRIPTOR *pMemDesc, NvU32 heapOwner, struct Heap *pHeap, NvU32 attr, NvU32 attr2, NvU32 Pitch, NvU32 type, NvU32 tag, HWRESOURCE_INFO *pHwResource); +#ifdef __nvoc_mem_h_disabled +static inline NV_STATUS memConstructCommon(struct Memory *pMemory, NvU32 categoryClassId, NvU32 flags, MEMORY_DESCRIPTOR *pMemDesc, NvU32 heapOwner, struct Heap *pHeap, NvU32 attr, NvU32 attr2, NvU32 Pitch, NvU32 type, NvU32 tag, HWRESOURCE_INFO *pHwResource) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_h_disabled +#define memConstructCommon(pMemory, categoryClassId, flags, pMemDesc, heapOwner, pHeap, attr, attr2, Pitch, type, tag, pHwResource) memConstructCommon_IMPL(pMemory, categoryClassId, flags, pMemDesc, heapOwner, pHeap, attr, attr2, Pitch, type, tag, pHwResource) +#endif //__nvoc_mem_h_disabled + +void memDestructCommon_IMPL(struct Memory *pMemory); +#ifdef __nvoc_mem_h_disabled +static inline void memDestructCommon(struct Memory *pMemory) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); +} +#else //__nvoc_mem_h_disabled +#define memDestructCommon(pMemory) memDestructCommon_IMPL(pMemory) +#endif //__nvoc_mem_h_disabled + +NV_STATUS memCreateMemDesc_IMPL(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NV_ADDRESS_SPACE addrSpace, NvU64 FBOffset, NvU64 length, NvU32 attr, NvU32 attr2); +#define memCreateMemDesc(pGpu, ppMemDesc, addrSpace, FBOffset, length, attr, attr2) memCreateMemDesc_IMPL(pGpu, ppMemDesc, addrSpace, FBOffset, length, attr, attr2) +NV_STATUS memCreateKernelMapping_IMPL(struct Memory *pMemory, NvU32 Protect, NvBool bClear); +#ifdef __nvoc_mem_h_disabled +static inline NV_STATUS memCreateKernelMapping(struct Memory *pMemory, NvU32 Protect, NvBool bClear) { + NV_ASSERT_FAILED_PRECOMP("Memory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_mem_h_disabled +#define memCreateKernelMapping(pMemory, Protect, bClear) memCreateKernelMapping_IMPL(pMemory, Protect, bClear) +#endif //__nvoc_mem_h_disabled + +NV_STATUS memGetByHandle_IMPL(struct RsClient *pClient, NvHandle hMemory, struct Memory **ppMemory); +#define memGetByHandle(pClient, hMemory, ppMemory) memGetByHandle_IMPL(pClient, hMemory, ppMemory) +NV_STATUS memGetByHandleAndDevice_IMPL(struct RsClient *pClient, NvHandle hMemory, NvHandle hDevice, struct Memory **ppMemory); +#define memGetByHandleAndDevice(pClient, hMemory, hDevice, ppMemory) memGetByHandleAndDevice_IMPL(pClient, hMemory, hDevice, ppMemory) +NV_STATUS memGetByHandleAndGroupedGpu_IMPL(struct RsClient *pClient, NvHandle hMemory, struct OBJGPU *pGpu, struct Memory **ppMemory); +#define memGetByHandleAndGroupedGpu(pClient, hMemory, pGpu, ppMemory) memGetByHandleAndGroupedGpu_IMPL(pClient, hMemory, pGpu, ppMemory) +#undef PRIVATE_FIELD + + +#endif + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_mig_config_session_nvoc.c b/src/nvidia/generated/g_mig_config_session_nvoc.c new file mode 100644 index 000000000..e2555681e --- /dev/null +++ b/src/nvidia/generated/g_mig_config_session_nvoc.c @@ -0,0 +1,294 @@ +#define NVOC_MIG_CONFIG_SESSION_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mig_config_session_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x36a941 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MIGConfigSession; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_MIGConfigSession(MIGConfigSession*); +void __nvoc_init_funcTable_MIGConfigSession(MIGConfigSession*); +NV_STATUS __nvoc_ctor_MIGConfigSession(MIGConfigSession*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_MIGConfigSession(MIGConfigSession*); +void __nvoc_dtor_MIGConfigSession(MIGConfigSession*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MIGConfigSession; + +static const struct NVOC_RTTI __nvoc_rtti_MIGConfigSession_MIGConfigSession = { + /*pClassDef=*/ &__nvoc_class_def_MIGConfigSession, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MIGConfigSession, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_MIGConfigSession_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MIGConfigSession, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MIGConfigSession_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MIGConfigSession, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MIGConfigSession_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MIGConfigSession, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MIGConfigSession_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MIGConfigSession, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_MIGConfigSession = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_MIGConfigSession_MIGConfigSession, + &__nvoc_rtti_MIGConfigSession_RmResource, + &__nvoc_rtti_MIGConfigSession_RmResourceCommon, + &__nvoc_rtti_MIGConfigSession_RsResource, + &__nvoc_rtti_MIGConfigSession_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_MIGConfigSession = +{ + /*classInfo=*/ { + /*size=*/ sizeof(MIGConfigSession), + /*classId=*/ classId(MIGConfigSession), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "MIGConfigSession", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MIGConfigSession, + /*pCastInfo=*/ &__nvoc_castinfo_MIGConfigSession, + /*pExportInfo=*/ &__nvoc_export_info_MIGConfigSession +}; + +static NvBool __nvoc_thunk_RmResource_migconfigsessionShareCallback(struct MIGConfigSession *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_migconfigsessionCheckMemInterUnmap(struct MIGConfigSession *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MIGConfigSession_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_migconfigsessionControl(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_migconfigsessionGetMemInterMapParams(struct MIGConfigSession *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MIGConfigSession_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_migconfigsessionGetMemoryMappingDescriptor(struct MIGConfigSession *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MIGConfigSession_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_migconfigsessionGetRefCount(struct MIGConfigSession *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_migconfigsessionControlFilter(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_migconfigsessionAddAdditionalDependants(struct RsClient *pClient, struct MIGConfigSession *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_migconfigsessionUnmap(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_migconfigsessionControl_Prologue(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_migconfigsessionCanCopy(struct MIGConfigSession *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_migconfigsessionMapTo(struct MIGConfigSession *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_migconfigsessionPreDestruct(struct MIGConfigSession *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_migconfigsessionUnmapFrom(struct MIGConfigSession *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_migconfigsessionControl_Epilogue(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_migconfigsessionControlLookup(struct MIGConfigSession *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_migconfigsessionMap(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_migconfigsessionAccessCallback(struct MIGConfigSession *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGConfigSession_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_MIGConfigSession = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_MIGConfigSession(MIGConfigSession *pThis) { + __nvoc_migconfigsessionDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_MIGConfigSession(MIGConfigSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_MIGConfigSession(MIGConfigSession *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MIGConfigSession_fail_RmResource; + __nvoc_init_dataField_MIGConfigSession(pThis); + + status = __nvoc_migconfigsessionConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MIGConfigSession_fail__init; + goto __nvoc_ctor_MIGConfigSession_exit; // Success + +__nvoc_ctor_MIGConfigSession_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_MIGConfigSession_fail_RmResource: +__nvoc_ctor_MIGConfigSession_exit: + + return status; +} + +static void __nvoc_init_funcTable_MIGConfigSession_1(MIGConfigSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__migconfigsessionShareCallback__ = &__nvoc_thunk_RmResource_migconfigsessionShareCallback; + + pThis->__migconfigsessionCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_migconfigsessionCheckMemInterUnmap; + + pThis->__migconfigsessionControl__ = &__nvoc_thunk_RsResource_migconfigsessionControl; + + pThis->__migconfigsessionGetMemInterMapParams__ = &__nvoc_thunk_RmResource_migconfigsessionGetMemInterMapParams; + + pThis->__migconfigsessionGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_migconfigsessionGetMemoryMappingDescriptor; + + pThis->__migconfigsessionGetRefCount__ = &__nvoc_thunk_RsResource_migconfigsessionGetRefCount; + + pThis->__migconfigsessionControlFilter__ = &__nvoc_thunk_RsResource_migconfigsessionControlFilter; + + pThis->__migconfigsessionAddAdditionalDependants__ = &__nvoc_thunk_RsResource_migconfigsessionAddAdditionalDependants; + + pThis->__migconfigsessionUnmap__ = &__nvoc_thunk_RsResource_migconfigsessionUnmap; + + pThis->__migconfigsessionControl_Prologue__ = &__nvoc_thunk_RmResource_migconfigsessionControl_Prologue; + + pThis->__migconfigsessionCanCopy__ = &__nvoc_thunk_RsResource_migconfigsessionCanCopy; + + pThis->__migconfigsessionMapTo__ = &__nvoc_thunk_RsResource_migconfigsessionMapTo; + + pThis->__migconfigsessionPreDestruct__ = &__nvoc_thunk_RsResource_migconfigsessionPreDestruct; + + pThis->__migconfigsessionUnmapFrom__ = &__nvoc_thunk_RsResource_migconfigsessionUnmapFrom; + + pThis->__migconfigsessionControl_Epilogue__ = &__nvoc_thunk_RmResource_migconfigsessionControl_Epilogue; + + pThis->__migconfigsessionControlLookup__ = &__nvoc_thunk_RsResource_migconfigsessionControlLookup; + + pThis->__migconfigsessionMap__ = &__nvoc_thunk_RsResource_migconfigsessionMap; + + pThis->__migconfigsessionAccessCallback__ = &__nvoc_thunk_RmResource_migconfigsessionAccessCallback; +} + +void __nvoc_init_funcTable_MIGConfigSession(MIGConfigSession *pThis) { + __nvoc_init_funcTable_MIGConfigSession_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_MIGConfigSession(MIGConfigSession *pThis) { + pThis->__nvoc_pbase_MIGConfigSession = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_MIGConfigSession(pThis); +} + +NV_STATUS __nvoc_objCreate_MIGConfigSession(MIGConfigSession **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + MIGConfigSession *pThis; + + pThis = portMemAllocNonPaged(sizeof(MIGConfigSession)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(MIGConfigSession)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MIGConfigSession); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_MIGConfigSession(pThis); + status = __nvoc_ctor_MIGConfigSession(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_MIGConfigSession_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_MIGConfigSession_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_MIGConfigSession(MIGConfigSession **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_MIGConfigSession(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_mig_config_session_nvoc.h b/src/nvidia/generated/g_mig_config_session_nvoc.h new file mode 100644 index 000000000..5ce5f3adb --- /dev/null +++ b/src/nvidia/generated/g_mig_config_session_nvoc.h @@ -0,0 +1,219 @@ +#ifndef _G_MIG_CONFIG_SESSION_NVOC_H_ +#define _G_MIG_CONFIG_SESSION_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains the functions managing MIG configuration + * + * Key attributes of MIGConfigSession class: + * - hClient is parent of MIGConfigSession. + * - MIGConfigSession can be allocated by privileged client. + * - RmApi lock must be held. + *****************************************************************************/ + +#include "g_mig_config_session_nvoc.h" + +#ifndef MIG_CONFIG_SESSION_H +#define MIG_CONFIG_SESSION_H + +#include "rmapi/resource.h" +#include "class/clc639.h" + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +#ifdef NVOC_MIG_CONFIG_SESSION_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct MIGConfigSession { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct MIGConfigSession *__nvoc_pbase_MIGConfigSession; + NvBool (*__migconfigsessionShareCallback__)(struct MIGConfigSession *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__migconfigsessionCheckMemInterUnmap__)(struct MIGConfigSession *, NvBool); + NV_STATUS (*__migconfigsessionControl__)(struct MIGConfigSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__migconfigsessionGetMemInterMapParams__)(struct MIGConfigSession *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__migconfigsessionGetMemoryMappingDescriptor__)(struct MIGConfigSession *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__migconfigsessionGetRefCount__)(struct MIGConfigSession *); + NV_STATUS (*__migconfigsessionControlFilter__)(struct MIGConfigSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__migconfigsessionAddAdditionalDependants__)(struct RsClient *, struct MIGConfigSession *, RsResourceRef *); + NV_STATUS (*__migconfigsessionUnmap__)(struct MIGConfigSession *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__migconfigsessionControl_Prologue__)(struct MIGConfigSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__migconfigsessionCanCopy__)(struct MIGConfigSession *); + NV_STATUS (*__migconfigsessionMapTo__)(struct MIGConfigSession *, RS_RES_MAP_TO_PARAMS *); + void (*__migconfigsessionPreDestruct__)(struct MIGConfigSession *); + NV_STATUS (*__migconfigsessionUnmapFrom__)(struct MIGConfigSession *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__migconfigsessionControl_Epilogue__)(struct MIGConfigSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__migconfigsessionControlLookup__)(struct MIGConfigSession *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__migconfigsessionMap__)(struct MIGConfigSession *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__migconfigsessionAccessCallback__)(struct MIGConfigSession *, struct RsClient *, void *, RsAccessRight); + NvU64 dupedCapDescriptor; +}; + +#ifndef __NVOC_CLASS_MIGConfigSession_TYPEDEF__ +#define __NVOC_CLASS_MIGConfigSession_TYPEDEF__ +typedef struct MIGConfigSession MIGConfigSession; +#endif /* __NVOC_CLASS_MIGConfigSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGConfigSession +#define __nvoc_class_id_MIGConfigSession 0x36a941 +#endif /* __nvoc_class_id_MIGConfigSession */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MIGConfigSession; + +#define __staticCast_MIGConfigSession(pThis) \ + ((pThis)->__nvoc_pbase_MIGConfigSession) + +#ifdef __nvoc_mig_config_session_h_disabled +#define __dynamicCast_MIGConfigSession(pThis) ((MIGConfigSession*)NULL) +#else //__nvoc_mig_config_session_h_disabled +#define __dynamicCast_MIGConfigSession(pThis) \ + ((MIGConfigSession*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MIGConfigSession))) +#endif //__nvoc_mig_config_session_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_MIGConfigSession(MIGConfigSession**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MIGConfigSession(MIGConfigSession**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_MIGConfigSession(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_MIGConfigSession((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define migconfigsessionShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) migconfigsessionShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define migconfigsessionCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) migconfigsessionCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define migconfigsessionControl(pResource, pCallContext, pParams) migconfigsessionControl_DISPATCH(pResource, pCallContext, pParams) +#define migconfigsessionGetMemInterMapParams(pRmResource, pParams) migconfigsessionGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define migconfigsessionGetMemoryMappingDescriptor(pRmResource, ppMemDesc) migconfigsessionGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define migconfigsessionGetRefCount(pResource) migconfigsessionGetRefCount_DISPATCH(pResource) +#define migconfigsessionControlFilter(pResource, pCallContext, pParams) migconfigsessionControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define migconfigsessionAddAdditionalDependants(pClient, pResource, pReference) migconfigsessionAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define migconfigsessionUnmap(pResource, pCallContext, pCpuMapping) migconfigsessionUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define migconfigsessionControl_Prologue(pResource, pCallContext, pParams) migconfigsessionControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define migconfigsessionCanCopy(pResource) migconfigsessionCanCopy_DISPATCH(pResource) +#define migconfigsessionMapTo(pResource, pParams) migconfigsessionMapTo_DISPATCH(pResource, pParams) +#define migconfigsessionPreDestruct(pResource) migconfigsessionPreDestruct_DISPATCH(pResource) +#define migconfigsessionUnmapFrom(pResource, pParams) migconfigsessionUnmapFrom_DISPATCH(pResource, pParams) +#define migconfigsessionControl_Epilogue(pResource, pCallContext, pParams) migconfigsessionControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define migconfigsessionControlLookup(pResource, pParams, ppEntry) migconfigsessionControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define migconfigsessionMap(pResource, pCallContext, pParams, pCpuMapping) migconfigsessionMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define migconfigsessionAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) migconfigsessionAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool migconfigsessionShareCallback_DISPATCH(struct MIGConfigSession *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__migconfigsessionShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS migconfigsessionCheckMemInterUnmap_DISPATCH(struct MIGConfigSession *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__migconfigsessionCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS migconfigsessionControl_DISPATCH(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__migconfigsessionControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS migconfigsessionGetMemInterMapParams_DISPATCH(struct MIGConfigSession *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__migconfigsessionGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS migconfigsessionGetMemoryMappingDescriptor_DISPATCH(struct MIGConfigSession *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__migconfigsessionGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 migconfigsessionGetRefCount_DISPATCH(struct MIGConfigSession *pResource) { + return pResource->__migconfigsessionGetRefCount__(pResource); +} + +static inline NV_STATUS migconfigsessionControlFilter_DISPATCH(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__migconfigsessionControlFilter__(pResource, pCallContext, pParams); +} + +static inline void migconfigsessionAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct MIGConfigSession *pResource, RsResourceRef *pReference) { + pResource->__migconfigsessionAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS migconfigsessionUnmap_DISPATCH(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__migconfigsessionUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS migconfigsessionControl_Prologue_DISPATCH(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__migconfigsessionControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool migconfigsessionCanCopy_DISPATCH(struct MIGConfigSession *pResource) { + return pResource->__migconfigsessionCanCopy__(pResource); +} + +static inline NV_STATUS migconfigsessionMapTo_DISPATCH(struct MIGConfigSession *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__migconfigsessionMapTo__(pResource, pParams); +} + +static inline void migconfigsessionPreDestruct_DISPATCH(struct MIGConfigSession *pResource) { + pResource->__migconfigsessionPreDestruct__(pResource); +} + +static inline NV_STATUS migconfigsessionUnmapFrom_DISPATCH(struct MIGConfigSession *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__migconfigsessionUnmapFrom__(pResource, pParams); +} + +static inline void migconfigsessionControl_Epilogue_DISPATCH(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__migconfigsessionControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS migconfigsessionControlLookup_DISPATCH(struct MIGConfigSession *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__migconfigsessionControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS migconfigsessionMap_DISPATCH(struct MIGConfigSession *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__migconfigsessionMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool migconfigsessionAccessCallback_DISPATCH(struct MIGConfigSession *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__migconfigsessionAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS migconfigsessionConstruct_IMPL(struct MIGConfigSession *arg_pMIGConfigSession, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_migconfigsessionConstruct(arg_pMIGConfigSession, arg_pCallContext, arg_pParams) migconfigsessionConstruct_IMPL(arg_pMIGConfigSession, arg_pCallContext, arg_pParams) +void migconfigsessionDestruct_IMPL(struct MIGConfigSession *pMIGConfigSession); +#define __nvoc_migconfigsessionDestruct(pMIGConfigSession) migconfigsessionDestruct_IMPL(pMIGConfigSession) +#undef PRIVATE_FIELD + + +#endif // MIG_CONFIG_SESSION_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MIG_CONFIG_SESSION_NVOC_H_ diff --git a/src/nvidia/generated/g_mig_monitor_session_nvoc.c b/src/nvidia/generated/g_mig_monitor_session_nvoc.c new file mode 100644 index 000000000..234ef2eeb --- /dev/null +++ b/src/nvidia/generated/g_mig_monitor_session_nvoc.c @@ -0,0 +1,294 @@ +#define NVOC_MIG_MONITOR_SESSION_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mig_monitor_session_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x29e15c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MIGMonitorSession; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_MIGMonitorSession(MIGMonitorSession*); +void __nvoc_init_funcTable_MIGMonitorSession(MIGMonitorSession*); +NV_STATUS __nvoc_ctor_MIGMonitorSession(MIGMonitorSession*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_MIGMonitorSession(MIGMonitorSession*); +void __nvoc_dtor_MIGMonitorSession(MIGMonitorSession*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MIGMonitorSession; + +static const struct NVOC_RTTI __nvoc_rtti_MIGMonitorSession_MIGMonitorSession = { + /*pClassDef=*/ &__nvoc_class_def_MIGMonitorSession, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MIGMonitorSession, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_MIGMonitorSession_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MIGMonitorSession, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MIGMonitorSession_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MIGMonitorSession, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MIGMonitorSession_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MIGMonitorSession, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MIGMonitorSession_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MIGMonitorSession, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_MIGMonitorSession = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_MIGMonitorSession_MIGMonitorSession, + &__nvoc_rtti_MIGMonitorSession_RmResource, + &__nvoc_rtti_MIGMonitorSession_RmResourceCommon, + &__nvoc_rtti_MIGMonitorSession_RsResource, + &__nvoc_rtti_MIGMonitorSession_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_MIGMonitorSession = +{ + /*classInfo=*/ { + /*size=*/ sizeof(MIGMonitorSession), + /*classId=*/ classId(MIGMonitorSession), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "MIGMonitorSession", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MIGMonitorSession, + /*pCastInfo=*/ &__nvoc_castinfo_MIGMonitorSession, + /*pExportInfo=*/ &__nvoc_export_info_MIGMonitorSession +}; + +static NvBool __nvoc_thunk_RmResource_migmonitorsessionShareCallback(struct MIGMonitorSession *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_migmonitorsessionCheckMemInterUnmap(struct MIGMonitorSession *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MIGMonitorSession_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_migmonitorsessionControl(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_migmonitorsessionGetMemInterMapParams(struct MIGMonitorSession *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MIGMonitorSession_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_migmonitorsessionGetMemoryMappingDescriptor(struct MIGMonitorSession *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MIGMonitorSession_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_migmonitorsessionGetRefCount(struct MIGMonitorSession *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_migmonitorsessionControlFilter(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_migmonitorsessionAddAdditionalDependants(struct RsClient *pClient, struct MIGMonitorSession *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_migmonitorsessionUnmap(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_migmonitorsessionControl_Prologue(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_migmonitorsessionCanCopy(struct MIGMonitorSession *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_migmonitorsessionMapTo(struct MIGMonitorSession *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_migmonitorsessionPreDestruct(struct MIGMonitorSession *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_migmonitorsessionUnmapFrom(struct MIGMonitorSession *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_migmonitorsessionControl_Epilogue(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_migmonitorsessionControlLookup(struct MIGMonitorSession *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_migmonitorsessionMap(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_migmonitorsessionAccessCallback(struct MIGMonitorSession *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MIGMonitorSession_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_MIGMonitorSession = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_MIGMonitorSession(MIGMonitorSession *pThis) { + __nvoc_migmonitorsessionDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_MIGMonitorSession(MIGMonitorSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_MIGMonitorSession(MIGMonitorSession *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MIGMonitorSession_fail_RmResource; + __nvoc_init_dataField_MIGMonitorSession(pThis); + + status = __nvoc_migmonitorsessionConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MIGMonitorSession_fail__init; + goto __nvoc_ctor_MIGMonitorSession_exit; // Success + +__nvoc_ctor_MIGMonitorSession_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_MIGMonitorSession_fail_RmResource: +__nvoc_ctor_MIGMonitorSession_exit: + + return status; +} + +static void __nvoc_init_funcTable_MIGMonitorSession_1(MIGMonitorSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__migmonitorsessionShareCallback__ = &__nvoc_thunk_RmResource_migmonitorsessionShareCallback; + + pThis->__migmonitorsessionCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_migmonitorsessionCheckMemInterUnmap; + + pThis->__migmonitorsessionControl__ = &__nvoc_thunk_RsResource_migmonitorsessionControl; + + pThis->__migmonitorsessionGetMemInterMapParams__ = &__nvoc_thunk_RmResource_migmonitorsessionGetMemInterMapParams; + + pThis->__migmonitorsessionGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_migmonitorsessionGetMemoryMappingDescriptor; + + pThis->__migmonitorsessionGetRefCount__ = &__nvoc_thunk_RsResource_migmonitorsessionGetRefCount; + + pThis->__migmonitorsessionControlFilter__ = &__nvoc_thunk_RsResource_migmonitorsessionControlFilter; + + pThis->__migmonitorsessionAddAdditionalDependants__ = &__nvoc_thunk_RsResource_migmonitorsessionAddAdditionalDependants; + + pThis->__migmonitorsessionUnmap__ = &__nvoc_thunk_RsResource_migmonitorsessionUnmap; + + pThis->__migmonitorsessionControl_Prologue__ = &__nvoc_thunk_RmResource_migmonitorsessionControl_Prologue; + + pThis->__migmonitorsessionCanCopy__ = &__nvoc_thunk_RsResource_migmonitorsessionCanCopy; + + pThis->__migmonitorsessionMapTo__ = &__nvoc_thunk_RsResource_migmonitorsessionMapTo; + + pThis->__migmonitorsessionPreDestruct__ = &__nvoc_thunk_RsResource_migmonitorsessionPreDestruct; + + pThis->__migmonitorsessionUnmapFrom__ = &__nvoc_thunk_RsResource_migmonitorsessionUnmapFrom; + + pThis->__migmonitorsessionControl_Epilogue__ = &__nvoc_thunk_RmResource_migmonitorsessionControl_Epilogue; + + pThis->__migmonitorsessionControlLookup__ = &__nvoc_thunk_RsResource_migmonitorsessionControlLookup; + + pThis->__migmonitorsessionMap__ = &__nvoc_thunk_RsResource_migmonitorsessionMap; + + pThis->__migmonitorsessionAccessCallback__ = &__nvoc_thunk_RmResource_migmonitorsessionAccessCallback; +} + +void __nvoc_init_funcTable_MIGMonitorSession(MIGMonitorSession *pThis) { + __nvoc_init_funcTable_MIGMonitorSession_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_MIGMonitorSession(MIGMonitorSession *pThis) { + pThis->__nvoc_pbase_MIGMonitorSession = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_MIGMonitorSession(pThis); +} + +NV_STATUS __nvoc_objCreate_MIGMonitorSession(MIGMonitorSession **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + MIGMonitorSession *pThis; + + pThis = portMemAllocNonPaged(sizeof(MIGMonitorSession)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(MIGMonitorSession)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MIGMonitorSession); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_MIGMonitorSession(pThis); + status = __nvoc_ctor_MIGMonitorSession(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_MIGMonitorSession_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_MIGMonitorSession_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_MIGMonitorSession(MIGMonitorSession **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_MIGMonitorSession(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_mig_monitor_session_nvoc.h b/src/nvidia/generated/g_mig_monitor_session_nvoc.h new file mode 100644 index 000000000..2670536ec --- /dev/null +++ b/src/nvidia/generated/g_mig_monitor_session_nvoc.h @@ -0,0 +1,220 @@ +#ifndef _G_MIG_MONITOR_SESSION_NVOC_H_ +#define _G_MIG_MONITOR_SESSION_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains functions to grant MIG monitor capability + * + * Key attributes of MIGMonitorSession class: + * - hClient is parent of MIGMonitorSession. + * - As the MIG monitor capability is world accessible by default, + * MIGMonitorSession allocation requires a privileged client only + * if the platform doesn't implement this capability + * - RmApi lock must be held. + *****************************************************************************/ + +#include "g_mig_monitor_session_nvoc.h" + +#ifndef MIG_MONITOR_SESSION_H +#define MIG_MONITOR_SESSION_H + +#include "rmapi/resource.h" + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +#ifdef NVOC_MIG_MONITOR_SESSION_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct MIGMonitorSession { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct MIGMonitorSession *__nvoc_pbase_MIGMonitorSession; + NvBool (*__migmonitorsessionShareCallback__)(struct MIGMonitorSession *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__migmonitorsessionCheckMemInterUnmap__)(struct MIGMonitorSession *, NvBool); + NV_STATUS (*__migmonitorsessionControl__)(struct MIGMonitorSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__migmonitorsessionGetMemInterMapParams__)(struct MIGMonitorSession *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__migmonitorsessionGetMemoryMappingDescriptor__)(struct MIGMonitorSession *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__migmonitorsessionGetRefCount__)(struct MIGMonitorSession *); + NV_STATUS (*__migmonitorsessionControlFilter__)(struct MIGMonitorSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__migmonitorsessionAddAdditionalDependants__)(struct RsClient *, struct MIGMonitorSession *, RsResourceRef *); + NV_STATUS (*__migmonitorsessionUnmap__)(struct MIGMonitorSession *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__migmonitorsessionControl_Prologue__)(struct MIGMonitorSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__migmonitorsessionCanCopy__)(struct MIGMonitorSession *); + NV_STATUS (*__migmonitorsessionMapTo__)(struct MIGMonitorSession *, RS_RES_MAP_TO_PARAMS *); + void (*__migmonitorsessionPreDestruct__)(struct MIGMonitorSession *); + NV_STATUS (*__migmonitorsessionUnmapFrom__)(struct MIGMonitorSession *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__migmonitorsessionControl_Epilogue__)(struct MIGMonitorSession *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__migmonitorsessionControlLookup__)(struct MIGMonitorSession *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__migmonitorsessionMap__)(struct MIGMonitorSession *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__migmonitorsessionAccessCallback__)(struct MIGMonitorSession *, struct RsClient *, void *, RsAccessRight); + NvU64 dupedCapDescriptor; +}; + +#ifndef __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ +#define __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ +typedef struct MIGMonitorSession MIGMonitorSession; +#endif /* __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGMonitorSession +#define __nvoc_class_id_MIGMonitorSession 0x29e15c +#endif /* __nvoc_class_id_MIGMonitorSession */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MIGMonitorSession; + +#define __staticCast_MIGMonitorSession(pThis) \ + ((pThis)->__nvoc_pbase_MIGMonitorSession) + +#ifdef __nvoc_mig_monitor_session_h_disabled +#define __dynamicCast_MIGMonitorSession(pThis) ((MIGMonitorSession*)NULL) +#else //__nvoc_mig_monitor_session_h_disabled +#define __dynamicCast_MIGMonitorSession(pThis) \ + ((MIGMonitorSession*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MIGMonitorSession))) +#endif //__nvoc_mig_monitor_session_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_MIGMonitorSession(MIGMonitorSession**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MIGMonitorSession(MIGMonitorSession**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_MIGMonitorSession(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_MIGMonitorSession((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define migmonitorsessionShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) migmonitorsessionShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define migmonitorsessionCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) migmonitorsessionCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define migmonitorsessionControl(pResource, pCallContext, pParams) migmonitorsessionControl_DISPATCH(pResource, pCallContext, pParams) +#define migmonitorsessionGetMemInterMapParams(pRmResource, pParams) migmonitorsessionGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define migmonitorsessionGetMemoryMappingDescriptor(pRmResource, ppMemDesc) migmonitorsessionGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define migmonitorsessionGetRefCount(pResource) migmonitorsessionGetRefCount_DISPATCH(pResource) +#define migmonitorsessionControlFilter(pResource, pCallContext, pParams) migmonitorsessionControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define migmonitorsessionAddAdditionalDependants(pClient, pResource, pReference) migmonitorsessionAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define migmonitorsessionUnmap(pResource, pCallContext, pCpuMapping) migmonitorsessionUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define migmonitorsessionControl_Prologue(pResource, pCallContext, pParams) migmonitorsessionControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define migmonitorsessionCanCopy(pResource) migmonitorsessionCanCopy_DISPATCH(pResource) +#define migmonitorsessionMapTo(pResource, pParams) migmonitorsessionMapTo_DISPATCH(pResource, pParams) +#define migmonitorsessionPreDestruct(pResource) migmonitorsessionPreDestruct_DISPATCH(pResource) +#define migmonitorsessionUnmapFrom(pResource, pParams) migmonitorsessionUnmapFrom_DISPATCH(pResource, pParams) +#define migmonitorsessionControl_Epilogue(pResource, pCallContext, pParams) migmonitorsessionControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define migmonitorsessionControlLookup(pResource, pParams, ppEntry) migmonitorsessionControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define migmonitorsessionMap(pResource, pCallContext, pParams, pCpuMapping) migmonitorsessionMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define migmonitorsessionAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) migmonitorsessionAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool migmonitorsessionShareCallback_DISPATCH(struct MIGMonitorSession *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__migmonitorsessionShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS migmonitorsessionCheckMemInterUnmap_DISPATCH(struct MIGMonitorSession *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__migmonitorsessionCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS migmonitorsessionControl_DISPATCH(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__migmonitorsessionControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS migmonitorsessionGetMemInterMapParams_DISPATCH(struct MIGMonitorSession *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__migmonitorsessionGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS migmonitorsessionGetMemoryMappingDescriptor_DISPATCH(struct MIGMonitorSession *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__migmonitorsessionGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 migmonitorsessionGetRefCount_DISPATCH(struct MIGMonitorSession *pResource) { + return pResource->__migmonitorsessionGetRefCount__(pResource); +} + +static inline NV_STATUS migmonitorsessionControlFilter_DISPATCH(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__migmonitorsessionControlFilter__(pResource, pCallContext, pParams); +} + +static inline void migmonitorsessionAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct MIGMonitorSession *pResource, RsResourceRef *pReference) { + pResource->__migmonitorsessionAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS migmonitorsessionUnmap_DISPATCH(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__migmonitorsessionUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS migmonitorsessionControl_Prologue_DISPATCH(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__migmonitorsessionControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool migmonitorsessionCanCopy_DISPATCH(struct MIGMonitorSession *pResource) { + return pResource->__migmonitorsessionCanCopy__(pResource); +} + +static inline NV_STATUS migmonitorsessionMapTo_DISPATCH(struct MIGMonitorSession *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__migmonitorsessionMapTo__(pResource, pParams); +} + +static inline void migmonitorsessionPreDestruct_DISPATCH(struct MIGMonitorSession *pResource) { + pResource->__migmonitorsessionPreDestruct__(pResource); +} + +static inline NV_STATUS migmonitorsessionUnmapFrom_DISPATCH(struct MIGMonitorSession *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__migmonitorsessionUnmapFrom__(pResource, pParams); +} + +static inline void migmonitorsessionControl_Epilogue_DISPATCH(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__migmonitorsessionControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS migmonitorsessionControlLookup_DISPATCH(struct MIGMonitorSession *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__migmonitorsessionControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS migmonitorsessionMap_DISPATCH(struct MIGMonitorSession *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__migmonitorsessionMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool migmonitorsessionAccessCallback_DISPATCH(struct MIGMonitorSession *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__migmonitorsessionAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS migmonitorsessionConstruct_IMPL(struct MIGMonitorSession *arg_pMIGMonitorSession, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_migmonitorsessionConstruct(arg_pMIGMonitorSession, arg_pCallContext, arg_pParams) migmonitorsessionConstruct_IMPL(arg_pMIGMonitorSession, arg_pCallContext, arg_pParams) +void migmonitorsessionDestruct_IMPL(struct MIGMonitorSession *pMIGMonitorSession); +#define __nvoc_migmonitorsessionDestruct(pMIGMonitorSession) migmonitorsessionDestruct_IMPL(pMIGMonitorSession) +#undef PRIVATE_FIELD + + +#endif // MIG_MONITOR_SESSION_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MIG_MONITOR_SESSION_NVOC_H_ diff --git a/src/nvidia/generated/g_mmu_fault_buffer_nvoc.c b/src/nvidia/generated/g_mmu_fault_buffer_nvoc.c new file mode 100644 index 000000000..a0c8417d1 --- /dev/null +++ b/src/nvidia/generated/g_mmu_fault_buffer_nvoc.c @@ -0,0 +1,478 @@ +#define NVOC_MMU_FAULT_BUFFER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mmu_fault_buffer_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x7e1829 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MmuFaultBuffer; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_MmuFaultBuffer(MmuFaultBuffer*); +void __nvoc_init_funcTable_MmuFaultBuffer(MmuFaultBuffer*); +NV_STATUS __nvoc_ctor_MmuFaultBuffer(MmuFaultBuffer*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_MmuFaultBuffer(MmuFaultBuffer*); +void __nvoc_dtor_MmuFaultBuffer(MmuFaultBuffer*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MmuFaultBuffer; + +static const struct NVOC_RTTI __nvoc_rtti_MmuFaultBuffer_MmuFaultBuffer = { + /*pClassDef=*/ &__nvoc_class_def_MmuFaultBuffer, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MmuFaultBuffer, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_MmuFaultBuffer_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MmuFaultBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MmuFaultBuffer_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MmuFaultBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MmuFaultBuffer_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MmuFaultBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MmuFaultBuffer_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MmuFaultBuffer, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MmuFaultBuffer_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MmuFaultBuffer, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MmuFaultBuffer_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MmuFaultBuffer, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MmuFaultBuffer_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MmuFaultBuffer, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_MmuFaultBuffer = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_MmuFaultBuffer_MmuFaultBuffer, + &__nvoc_rtti_MmuFaultBuffer_Notifier, + &__nvoc_rtti_MmuFaultBuffer_INotifier, + &__nvoc_rtti_MmuFaultBuffer_GpuResource, + &__nvoc_rtti_MmuFaultBuffer_RmResource, + &__nvoc_rtti_MmuFaultBuffer_RmResourceCommon, + &__nvoc_rtti_MmuFaultBuffer_RsResource, + &__nvoc_rtti_MmuFaultBuffer_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_MmuFaultBuffer = +{ + /*classInfo=*/ { + /*size=*/ sizeof(MmuFaultBuffer), + /*classId=*/ classId(MmuFaultBuffer), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "MmuFaultBuffer", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MmuFaultBuffer, + /*pCastInfo=*/ &__nvoc_castinfo_MmuFaultBuffer, + /*pExportInfo=*/ &__nvoc_export_info_MmuFaultBuffer +}; + +static NV_STATUS __nvoc_thunk_MmuFaultBuffer_gpuresMap(struct GpuResource *pMmuFaultBuffer, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return faultbufMap((struct MmuFaultBuffer *)(((unsigned char *)pMmuFaultBuffer) - __nvoc_rtti_MmuFaultBuffer_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_MmuFaultBuffer_gpuresUnmap(struct GpuResource *pMmuFaultBuffer, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return faultbufUnmap((struct MmuFaultBuffer *)(((unsigned char *)pMmuFaultBuffer) - __nvoc_rtti_MmuFaultBuffer_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_MmuFaultBuffer_gpuresGetMapAddrSpace(struct GpuResource *pMmuFaultBuffer, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return faultbufGetMapAddrSpace((struct MmuFaultBuffer *)(((unsigned char *)pMmuFaultBuffer) - __nvoc_rtti_MmuFaultBuffer_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_GpuResource_faultbufShareCallback(struct MmuFaultBuffer *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MmuFaultBuffer_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_faultbufMapTo(struct MmuFaultBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_faultbufGetOrAllocNotifShare(struct MmuFaultBuffer *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_MmuFaultBuffer_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_faultbufCheckMemInterUnmap(struct MmuFaultBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MmuFaultBuffer_RmResource.offset), bSubdeviceHandleProvided); +} + +static void __nvoc_thunk_Notifier_faultbufSetNotificationShare(struct MmuFaultBuffer *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_MmuFaultBuffer_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_faultbufGetRefCount(struct MmuFaultBuffer *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_faultbufAddAdditionalDependants(struct RsClient *pClient, struct MmuFaultBuffer *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_faultbufControl_Prologue(struct MmuFaultBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_faultbufGetRegBaseOffsetAndSize(struct MmuFaultBuffer *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MmuFaultBuffer_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_faultbufInternalControlForward(struct MmuFaultBuffer *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MmuFaultBuffer_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_faultbufUnmapFrom(struct MmuFaultBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_faultbufControl_Epilogue(struct MmuFaultBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_faultbufControlLookup(struct MmuFaultBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_faultbufGetInternalObjectHandle(struct MmuFaultBuffer *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MmuFaultBuffer_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_faultbufControl(struct MmuFaultBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_MmuFaultBuffer_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_faultbufGetMemInterMapParams(struct MmuFaultBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MmuFaultBuffer_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_faultbufGetMemoryMappingDescriptor(struct MmuFaultBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MmuFaultBuffer_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_faultbufControlFilter(struct MmuFaultBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_faultbufUnregisterEvent(struct MmuFaultBuffer *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_MmuFaultBuffer_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_faultbufCanCopy(struct MmuFaultBuffer *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_faultbufPreDestruct(struct MmuFaultBuffer *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_faultbufGetNotificationListPtr(struct MmuFaultBuffer *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_MmuFaultBuffer_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_faultbufGetNotificationShare(struct MmuFaultBuffer *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_MmuFaultBuffer_Notifier.offset)); +} + +static NvBool __nvoc_thunk_RmResource_faultbufAccessCallback(struct MmuFaultBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MmuFaultBuffer_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_MmuFaultBuffer[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) faultbufCtrlCmdFaultbufferGetSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0690105u, + /*paramSize=*/ sizeof(NVB069_CTRL_FAULTBUFFER_GET_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_MmuFaultBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "faultbufCtrlCmdFaultbufferGetSize" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) faultbufCtrlCmdFaultbufferGetRegisterMappings_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0690106u, + /*paramSize=*/ sizeof(NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_MmuFaultBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "faultbufCtrlCmdFaultbufferGetRegisterMappings" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3690101u, + /*paramSize=*/ sizeof(NVC369_CTRL_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_MmuFaultBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xc3690102u, + /*paramSize=*/ sizeof(NVC369_CTRL_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_MmuFaultBuffer.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_MmuFaultBuffer = +{ + /*numEntries=*/ 4, + /*pExportEntries=*/ __nvoc_exported_method_def_MmuFaultBuffer +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_MmuFaultBuffer(MmuFaultBuffer *pThis) { + __nvoc_faultbufDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_MmuFaultBuffer(MmuFaultBuffer *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_MmuFaultBuffer(MmuFaultBuffer *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MmuFaultBuffer_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_MmuFaultBuffer_fail_Notifier; + __nvoc_init_dataField_MmuFaultBuffer(pThis); + + status = __nvoc_faultbufConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MmuFaultBuffer_fail__init; + goto __nvoc_ctor_MmuFaultBuffer_exit; // Success + +__nvoc_ctor_MmuFaultBuffer_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_MmuFaultBuffer_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_MmuFaultBuffer_fail_GpuResource: +__nvoc_ctor_MmuFaultBuffer_exit: + + return status; +} + +static void __nvoc_init_funcTable_MmuFaultBuffer_1(MmuFaultBuffer *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__faultbufMap__ = &faultbufMap_IMPL; + + pThis->__faultbufUnmap__ = &faultbufUnmap_IMPL; + + pThis->__faultbufGetMapAddrSpace__ = &faultbufGetMapAddrSpace_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__faultbufCtrlCmdFaultbufferGetSize__ = &faultbufCtrlCmdFaultbufferGetSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__faultbufCtrlCmdFaultbufferGetRegisterMappings__ = &faultbufCtrlCmdFaultbufferGetRegisterMappings_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf__ = &faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf__ = &faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_MmuFaultBuffer_gpuresMap; + + pThis->__nvoc_base_GpuResource.__gpuresUnmap__ = &__nvoc_thunk_MmuFaultBuffer_gpuresUnmap; + + pThis->__nvoc_base_GpuResource.__gpuresGetMapAddrSpace__ = &__nvoc_thunk_MmuFaultBuffer_gpuresGetMapAddrSpace; + + pThis->__faultbufShareCallback__ = &__nvoc_thunk_GpuResource_faultbufShareCallback; + + pThis->__faultbufMapTo__ = &__nvoc_thunk_RsResource_faultbufMapTo; + + pThis->__faultbufGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_faultbufGetOrAllocNotifShare; + + pThis->__faultbufCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_faultbufCheckMemInterUnmap; + + pThis->__faultbufSetNotificationShare__ = &__nvoc_thunk_Notifier_faultbufSetNotificationShare; + + pThis->__faultbufGetRefCount__ = &__nvoc_thunk_RsResource_faultbufGetRefCount; + + pThis->__faultbufAddAdditionalDependants__ = &__nvoc_thunk_RsResource_faultbufAddAdditionalDependants; + + pThis->__faultbufControl_Prologue__ = &__nvoc_thunk_RmResource_faultbufControl_Prologue; + + pThis->__faultbufGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_faultbufGetRegBaseOffsetAndSize; + + pThis->__faultbufInternalControlForward__ = &__nvoc_thunk_GpuResource_faultbufInternalControlForward; + + pThis->__faultbufUnmapFrom__ = &__nvoc_thunk_RsResource_faultbufUnmapFrom; + + pThis->__faultbufControl_Epilogue__ = &__nvoc_thunk_RmResource_faultbufControl_Epilogue; + + pThis->__faultbufControlLookup__ = &__nvoc_thunk_RsResource_faultbufControlLookup; + + pThis->__faultbufGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_faultbufGetInternalObjectHandle; + + pThis->__faultbufControl__ = &__nvoc_thunk_GpuResource_faultbufControl; + + pThis->__faultbufGetMemInterMapParams__ = &__nvoc_thunk_RmResource_faultbufGetMemInterMapParams; + + pThis->__faultbufGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_faultbufGetMemoryMappingDescriptor; + + pThis->__faultbufControlFilter__ = &__nvoc_thunk_RsResource_faultbufControlFilter; + + pThis->__faultbufUnregisterEvent__ = &__nvoc_thunk_Notifier_faultbufUnregisterEvent; + + pThis->__faultbufCanCopy__ = &__nvoc_thunk_RsResource_faultbufCanCopy; + + pThis->__faultbufPreDestruct__ = &__nvoc_thunk_RsResource_faultbufPreDestruct; + + pThis->__faultbufGetNotificationListPtr__ = &__nvoc_thunk_Notifier_faultbufGetNotificationListPtr; + + pThis->__faultbufGetNotificationShare__ = &__nvoc_thunk_Notifier_faultbufGetNotificationShare; + + pThis->__faultbufAccessCallback__ = &__nvoc_thunk_RmResource_faultbufAccessCallback; +} + +void __nvoc_init_funcTable_MmuFaultBuffer(MmuFaultBuffer *pThis) { + __nvoc_init_funcTable_MmuFaultBuffer_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_MmuFaultBuffer(MmuFaultBuffer *pThis) { + pThis->__nvoc_pbase_MmuFaultBuffer = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_MmuFaultBuffer(pThis); +} + +NV_STATUS __nvoc_objCreate_MmuFaultBuffer(MmuFaultBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + MmuFaultBuffer *pThis; + + pThis = portMemAllocNonPaged(sizeof(MmuFaultBuffer)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(MmuFaultBuffer)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MmuFaultBuffer); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_MmuFaultBuffer(pThis); + status = __nvoc_ctor_MmuFaultBuffer(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_MmuFaultBuffer_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_MmuFaultBuffer_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_MmuFaultBuffer(MmuFaultBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_MmuFaultBuffer(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_mmu_fault_buffer_nvoc.h b/src/nvidia/generated/g_mmu_fault_buffer_nvoc.h new file mode 100644 index 000000000..0cb36e69d --- /dev/null +++ b/src/nvidia/generated/g_mmu_fault_buffer_nvoc.h @@ -0,0 +1,305 @@ +#ifndef _G_MMU_FAULT_BUFFER_NVOC_H_ +#define _G_MMU_FAULT_BUFFER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_mmu_fault_buffer_nvoc.h" + +#ifndef MMU_FAULT_BUFFER_H +#define MMU_FAULT_BUFFER_H + +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" +#include "ctrl/ctrlb069.h" +#include "ctrl/ctrlc369.h" +#include "rmapi/control.h" // for macro RMCTRL_EXPORT etc. + +/*! + * RM internal class representing XXX_FAULT_BUFFER + */ +#ifdef NVOC_MMU_FAULT_BUFFER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct MmuFaultBuffer { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct MmuFaultBuffer *__nvoc_pbase_MmuFaultBuffer; + NV_STATUS (*__faultbufMap__)(struct MmuFaultBuffer *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__faultbufUnmap__)(struct MmuFaultBuffer *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__faultbufGetMapAddrSpace__)(struct MmuFaultBuffer *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__faultbufCtrlCmdFaultbufferGetSize__)(struct MmuFaultBuffer *, NVB069_CTRL_FAULTBUFFER_GET_SIZE_PARAMS *); + NV_STATUS (*__faultbufCtrlCmdFaultbufferGetRegisterMappings__)(struct MmuFaultBuffer *, NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS_PARAMS *); + NV_STATUS (*__faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf__)(struct MmuFaultBuffer *, NVC369_CTRL_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF_PARAMS *); + NV_STATUS (*__faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf__)(struct MmuFaultBuffer *, NVC369_CTRL_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF_PARAMS *); + NvBool (*__faultbufShareCallback__)(struct MmuFaultBuffer *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__faultbufMapTo__)(struct MmuFaultBuffer *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__faultbufGetOrAllocNotifShare__)(struct MmuFaultBuffer *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__faultbufCheckMemInterUnmap__)(struct MmuFaultBuffer *, NvBool); + void (*__faultbufSetNotificationShare__)(struct MmuFaultBuffer *, struct NotifShare *); + NvU32 (*__faultbufGetRefCount__)(struct MmuFaultBuffer *); + void (*__faultbufAddAdditionalDependants__)(struct RsClient *, struct MmuFaultBuffer *, RsResourceRef *); + NV_STATUS (*__faultbufControl_Prologue__)(struct MmuFaultBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__faultbufGetRegBaseOffsetAndSize__)(struct MmuFaultBuffer *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__faultbufInternalControlForward__)(struct MmuFaultBuffer *, NvU32, void *, NvU32); + NV_STATUS (*__faultbufUnmapFrom__)(struct MmuFaultBuffer *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__faultbufControl_Epilogue__)(struct MmuFaultBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__faultbufControlLookup__)(struct MmuFaultBuffer *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__faultbufGetInternalObjectHandle__)(struct MmuFaultBuffer *); + NV_STATUS (*__faultbufControl__)(struct MmuFaultBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__faultbufGetMemInterMapParams__)(struct MmuFaultBuffer *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__faultbufGetMemoryMappingDescriptor__)(struct MmuFaultBuffer *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__faultbufControlFilter__)(struct MmuFaultBuffer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__faultbufUnregisterEvent__)(struct MmuFaultBuffer *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__faultbufCanCopy__)(struct MmuFaultBuffer *); + void (*__faultbufPreDestruct__)(struct MmuFaultBuffer *); + PEVENTNOTIFICATION *(*__faultbufGetNotificationListPtr__)(struct MmuFaultBuffer *); + struct NotifShare *(*__faultbufGetNotificationShare__)(struct MmuFaultBuffer *); + NvBool (*__faultbufAccessCallback__)(struct MmuFaultBuffer *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_MmuFaultBuffer_TYPEDEF__ +#define __NVOC_CLASS_MmuFaultBuffer_TYPEDEF__ +typedef struct MmuFaultBuffer MmuFaultBuffer; +#endif /* __NVOC_CLASS_MmuFaultBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MmuFaultBuffer +#define __nvoc_class_id_MmuFaultBuffer 0x7e1829 +#endif /* __nvoc_class_id_MmuFaultBuffer */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MmuFaultBuffer; + +#define __staticCast_MmuFaultBuffer(pThis) \ + ((pThis)->__nvoc_pbase_MmuFaultBuffer) + +#ifdef __nvoc_mmu_fault_buffer_h_disabled +#define __dynamicCast_MmuFaultBuffer(pThis) ((MmuFaultBuffer*)NULL) +#else //__nvoc_mmu_fault_buffer_h_disabled +#define __dynamicCast_MmuFaultBuffer(pThis) \ + ((MmuFaultBuffer*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MmuFaultBuffer))) +#endif //__nvoc_mmu_fault_buffer_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_MmuFaultBuffer(MmuFaultBuffer**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MmuFaultBuffer(MmuFaultBuffer**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_MmuFaultBuffer(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_MmuFaultBuffer((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define faultbufMap(pMmuFaultBuffer, pCallContext, pParams, pCpuMapping) faultbufMap_DISPATCH(pMmuFaultBuffer, pCallContext, pParams, pCpuMapping) +#define faultbufUnmap(pMmuFaultBuffer, pCallContext, pCpuMapping) faultbufUnmap_DISPATCH(pMmuFaultBuffer, pCallContext, pCpuMapping) +#define faultbufGetMapAddrSpace(pMmuFaultBuffer, pCallContext, mapFlags, pAddrSpace) faultbufGetMapAddrSpace_DISPATCH(pMmuFaultBuffer, pCallContext, mapFlags, pAddrSpace) +#define faultbufCtrlCmdFaultbufferGetSize(pMmuFaultBuffer, pGetParams) faultbufCtrlCmdFaultbufferGetSize_DISPATCH(pMmuFaultBuffer, pGetParams) +#define faultbufCtrlCmdFaultbufferGetRegisterMappings(pMmuFaultBuffer, pParams) faultbufCtrlCmdFaultbufferGetRegisterMappings_DISPATCH(pMmuFaultBuffer, pParams) +#define faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf(pMmuFaultBuffer, pParams) faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf_DISPATCH(pMmuFaultBuffer, pParams) +#define faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf(pMmuFaultBuffer, pParams) faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf_DISPATCH(pMmuFaultBuffer, pParams) +#define faultbufShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) faultbufShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define faultbufMapTo(pResource, pParams) faultbufMapTo_DISPATCH(pResource, pParams) +#define faultbufGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) faultbufGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define faultbufCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) faultbufCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define faultbufSetNotificationShare(pNotifier, pNotifShare) faultbufSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define faultbufGetRefCount(pResource) faultbufGetRefCount_DISPATCH(pResource) +#define faultbufAddAdditionalDependants(pClient, pResource, pReference) faultbufAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define faultbufControl_Prologue(pResource, pCallContext, pParams) faultbufControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define faultbufGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) faultbufGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define faultbufInternalControlForward(pGpuResource, command, pParams, size) faultbufInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define faultbufUnmapFrom(pResource, pParams) faultbufUnmapFrom_DISPATCH(pResource, pParams) +#define faultbufControl_Epilogue(pResource, pCallContext, pParams) faultbufControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define faultbufControlLookup(pResource, pParams, ppEntry) faultbufControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define faultbufGetInternalObjectHandle(pGpuResource) faultbufGetInternalObjectHandle_DISPATCH(pGpuResource) +#define faultbufControl(pGpuResource, pCallContext, pParams) faultbufControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define faultbufGetMemInterMapParams(pRmResource, pParams) faultbufGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define faultbufGetMemoryMappingDescriptor(pRmResource, ppMemDesc) faultbufGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define faultbufControlFilter(pResource, pCallContext, pParams) faultbufControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define faultbufUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) faultbufUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define faultbufCanCopy(pResource) faultbufCanCopy_DISPATCH(pResource) +#define faultbufPreDestruct(pResource) faultbufPreDestruct_DISPATCH(pResource) +#define faultbufGetNotificationListPtr(pNotifier) faultbufGetNotificationListPtr_DISPATCH(pNotifier) +#define faultbufGetNotificationShare(pNotifier) faultbufGetNotificationShare_DISPATCH(pNotifier) +#define faultbufAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) faultbufAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS faultbufMap_IMPL(struct MmuFaultBuffer *pMmuFaultBuffer, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping); + +static inline NV_STATUS faultbufMap_DISPATCH(struct MmuFaultBuffer *pMmuFaultBuffer, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pMmuFaultBuffer->__faultbufMap__(pMmuFaultBuffer, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS faultbufUnmap_IMPL(struct MmuFaultBuffer *pMmuFaultBuffer, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping); + +static inline NV_STATUS faultbufUnmap_DISPATCH(struct MmuFaultBuffer *pMmuFaultBuffer, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pMmuFaultBuffer->__faultbufUnmap__(pMmuFaultBuffer, pCallContext, pCpuMapping); +} + +NV_STATUS faultbufGetMapAddrSpace_IMPL(struct MmuFaultBuffer *pMmuFaultBuffer, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS faultbufGetMapAddrSpace_DISPATCH(struct MmuFaultBuffer *pMmuFaultBuffer, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMmuFaultBuffer->__faultbufGetMapAddrSpace__(pMmuFaultBuffer, pCallContext, mapFlags, pAddrSpace); +} + +NV_STATUS faultbufCtrlCmdFaultbufferGetSize_IMPL(struct MmuFaultBuffer *pMmuFaultBuffer, NVB069_CTRL_FAULTBUFFER_GET_SIZE_PARAMS *pGetParams); + +static inline NV_STATUS faultbufCtrlCmdFaultbufferGetSize_DISPATCH(struct MmuFaultBuffer *pMmuFaultBuffer, NVB069_CTRL_FAULTBUFFER_GET_SIZE_PARAMS *pGetParams) { + return pMmuFaultBuffer->__faultbufCtrlCmdFaultbufferGetSize__(pMmuFaultBuffer, pGetParams); +} + +NV_STATUS faultbufCtrlCmdFaultbufferGetRegisterMappings_IMPL(struct MmuFaultBuffer *pMmuFaultBuffer, NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS_PARAMS *pParams); + +static inline NV_STATUS faultbufCtrlCmdFaultbufferGetRegisterMappings_DISPATCH(struct MmuFaultBuffer *pMmuFaultBuffer, NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS_PARAMS *pParams) { + return pMmuFaultBuffer->__faultbufCtrlCmdFaultbufferGetRegisterMappings__(pMmuFaultBuffer, pParams); +} + +NV_STATUS faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf_IMPL(struct MmuFaultBuffer *pMmuFaultBuffer, NVC369_CTRL_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF_PARAMS *pParams); + +static inline NV_STATUS faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf_DISPATCH(struct MmuFaultBuffer *pMmuFaultBuffer, NVC369_CTRL_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF_PARAMS *pParams) { + return pMmuFaultBuffer->__faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf__(pMmuFaultBuffer, pParams); +} + +NV_STATUS faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf_IMPL(struct MmuFaultBuffer *pMmuFaultBuffer, NVC369_CTRL_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF_PARAMS *pParams); + +static inline NV_STATUS faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf_DISPATCH(struct MmuFaultBuffer *pMmuFaultBuffer, NVC369_CTRL_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF_PARAMS *pParams) { + return pMmuFaultBuffer->__faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf__(pMmuFaultBuffer, pParams); +} + +static inline NvBool faultbufShareCallback_DISPATCH(struct MmuFaultBuffer *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__faultbufShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS faultbufMapTo_DISPATCH(struct MmuFaultBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__faultbufMapTo__(pResource, pParams); +} + +static inline NV_STATUS faultbufGetOrAllocNotifShare_DISPATCH(struct MmuFaultBuffer *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__faultbufGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS faultbufCheckMemInterUnmap_DISPATCH(struct MmuFaultBuffer *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__faultbufCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline void faultbufSetNotificationShare_DISPATCH(struct MmuFaultBuffer *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__faultbufSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 faultbufGetRefCount_DISPATCH(struct MmuFaultBuffer *pResource) { + return pResource->__faultbufGetRefCount__(pResource); +} + +static inline void faultbufAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct MmuFaultBuffer *pResource, RsResourceRef *pReference) { + pResource->__faultbufAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS faultbufControl_Prologue_DISPATCH(struct MmuFaultBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__faultbufControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS faultbufGetRegBaseOffsetAndSize_DISPATCH(struct MmuFaultBuffer *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__faultbufGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS faultbufInternalControlForward_DISPATCH(struct MmuFaultBuffer *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__faultbufInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS faultbufUnmapFrom_DISPATCH(struct MmuFaultBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__faultbufUnmapFrom__(pResource, pParams); +} + +static inline void faultbufControl_Epilogue_DISPATCH(struct MmuFaultBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__faultbufControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS faultbufControlLookup_DISPATCH(struct MmuFaultBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__faultbufControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle faultbufGetInternalObjectHandle_DISPATCH(struct MmuFaultBuffer *pGpuResource) { + return pGpuResource->__faultbufGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS faultbufControl_DISPATCH(struct MmuFaultBuffer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__faultbufControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS faultbufGetMemInterMapParams_DISPATCH(struct MmuFaultBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__faultbufGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS faultbufGetMemoryMappingDescriptor_DISPATCH(struct MmuFaultBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__faultbufGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS faultbufControlFilter_DISPATCH(struct MmuFaultBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__faultbufControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS faultbufUnregisterEvent_DISPATCH(struct MmuFaultBuffer *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__faultbufUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool faultbufCanCopy_DISPATCH(struct MmuFaultBuffer *pResource) { + return pResource->__faultbufCanCopy__(pResource); +} + +static inline void faultbufPreDestruct_DISPATCH(struct MmuFaultBuffer *pResource) { + pResource->__faultbufPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *faultbufGetNotificationListPtr_DISPATCH(struct MmuFaultBuffer *pNotifier) { + return pNotifier->__faultbufGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *faultbufGetNotificationShare_DISPATCH(struct MmuFaultBuffer *pNotifier) { + return pNotifier->__faultbufGetNotificationShare__(pNotifier); +} + +static inline NvBool faultbufAccessCallback_DISPATCH(struct MmuFaultBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__faultbufAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS faultbufConstruct_IMPL(struct MmuFaultBuffer *arg_pMmuFaultBuffer, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_faultbufConstruct(arg_pMmuFaultBuffer, arg_pCallContext, arg_pParams) faultbufConstruct_IMPL(arg_pMmuFaultBuffer, arg_pCallContext, arg_pParams) +void faultbufDestruct_IMPL(struct MmuFaultBuffer *pMmuFaultBuffer); +#define __nvoc_faultbufDestruct(pMmuFaultBuffer) faultbufDestruct_IMPL(pMmuFaultBuffer) +#undef PRIVATE_FIELD + + +#endif // MMU_FAULT_BUFFER_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MMU_FAULT_BUFFER_NVOC_H_ diff --git a/src/nvidia/generated/g_mps_api_nvoc.c b/src/nvidia/generated/g_mps_api_nvoc.c new file mode 100644 index 000000000..8a0ae1fe4 --- /dev/null +++ b/src/nvidia/generated/g_mps_api_nvoc.c @@ -0,0 +1,294 @@ +#define NVOC_MPS_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_mps_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x22ce42 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MpsApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_MpsApi(MpsApi*); +void __nvoc_init_funcTable_MpsApi(MpsApi*); +NV_STATUS __nvoc_ctor_MpsApi(MpsApi*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_MpsApi(MpsApi*); +void __nvoc_dtor_MpsApi(MpsApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MpsApi; + +static const struct NVOC_RTTI __nvoc_rtti_MpsApi_MpsApi = { + /*pClassDef=*/ &__nvoc_class_def_MpsApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MpsApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_MpsApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MpsApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MpsApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MpsApi, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MpsApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MpsApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_MpsApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(MpsApi, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_MpsApi = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_MpsApi_MpsApi, + &__nvoc_rtti_MpsApi_RmResource, + &__nvoc_rtti_MpsApi_RmResourceCommon, + &__nvoc_rtti_MpsApi_RsResource, + &__nvoc_rtti_MpsApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_MpsApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(MpsApi), + /*classId=*/ classId(MpsApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "MpsApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MpsApi, + /*pCastInfo=*/ &__nvoc_castinfo_MpsApi, + /*pExportInfo=*/ &__nvoc_export_info_MpsApi +}; + +static NvBool __nvoc_thunk_RmResource_mpsApiShareCallback(struct MpsApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_mpsApiCheckMemInterUnmap(struct MpsApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MpsApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_mpsApiControl(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_mpsApiGetMemInterMapParams(struct MpsApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MpsApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_mpsApiGetMemoryMappingDescriptor(struct MpsApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_MpsApi_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_mpsApiGetRefCount(struct MpsApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_mpsApiControlFilter(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_mpsApiAddAdditionalDependants(struct RsClient *pClient, struct MpsApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_mpsApiUnmap(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_mpsApiControl_Prologue(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_mpsApiCanCopy(struct MpsApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_mpsApiMapTo(struct MpsApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_mpsApiPreDestruct(struct MpsApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_mpsApiUnmapFrom(struct MpsApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_mpsApiControl_Epilogue(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_mpsApiControlLookup(struct MpsApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_mpsApiMap(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_mpsApiAccessCallback(struct MpsApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_MpsApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_MpsApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_MpsApi(MpsApi *pThis) { + __nvoc_mpsApiDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_MpsApi(MpsApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_MpsApi(MpsApi *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MpsApi_fail_RmResource; + __nvoc_init_dataField_MpsApi(pThis); + + status = __nvoc_mpsApiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_MpsApi_fail__init; + goto __nvoc_ctor_MpsApi_exit; // Success + +__nvoc_ctor_MpsApi_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_MpsApi_fail_RmResource: +__nvoc_ctor_MpsApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_MpsApi_1(MpsApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__mpsApiShareCallback__ = &__nvoc_thunk_RmResource_mpsApiShareCallback; + + pThis->__mpsApiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_mpsApiCheckMemInterUnmap; + + pThis->__mpsApiControl__ = &__nvoc_thunk_RsResource_mpsApiControl; + + pThis->__mpsApiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_mpsApiGetMemInterMapParams; + + pThis->__mpsApiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_mpsApiGetMemoryMappingDescriptor; + + pThis->__mpsApiGetRefCount__ = &__nvoc_thunk_RsResource_mpsApiGetRefCount; + + pThis->__mpsApiControlFilter__ = &__nvoc_thunk_RsResource_mpsApiControlFilter; + + pThis->__mpsApiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_mpsApiAddAdditionalDependants; + + pThis->__mpsApiUnmap__ = &__nvoc_thunk_RsResource_mpsApiUnmap; + + pThis->__mpsApiControl_Prologue__ = &__nvoc_thunk_RmResource_mpsApiControl_Prologue; + + pThis->__mpsApiCanCopy__ = &__nvoc_thunk_RsResource_mpsApiCanCopy; + + pThis->__mpsApiMapTo__ = &__nvoc_thunk_RsResource_mpsApiMapTo; + + pThis->__mpsApiPreDestruct__ = &__nvoc_thunk_RsResource_mpsApiPreDestruct; + + pThis->__mpsApiUnmapFrom__ = &__nvoc_thunk_RsResource_mpsApiUnmapFrom; + + pThis->__mpsApiControl_Epilogue__ = &__nvoc_thunk_RmResource_mpsApiControl_Epilogue; + + pThis->__mpsApiControlLookup__ = &__nvoc_thunk_RsResource_mpsApiControlLookup; + + pThis->__mpsApiMap__ = &__nvoc_thunk_RsResource_mpsApiMap; + + pThis->__mpsApiAccessCallback__ = &__nvoc_thunk_RmResource_mpsApiAccessCallback; +} + +void __nvoc_init_funcTable_MpsApi(MpsApi *pThis) { + __nvoc_init_funcTable_MpsApi_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_MpsApi(MpsApi *pThis) { + pThis->__nvoc_pbase_MpsApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_MpsApi(pThis); +} + +NV_STATUS __nvoc_objCreate_MpsApi(MpsApi **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + MpsApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(MpsApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(MpsApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MpsApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_MpsApi(pThis); + status = __nvoc_ctor_MpsApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_MpsApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_MpsApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_MpsApi(MpsApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_MpsApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_mps_api_nvoc.h b/src/nvidia/generated/g_mps_api_nvoc.h new file mode 100644 index 000000000..f8415ffaa --- /dev/null +++ b/src/nvidia/generated/g_mps_api_nvoc.h @@ -0,0 +1,223 @@ +#ifndef _G_MPS_API_NVOC_H_ +#define _G_MPS_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/****************************************************************************** + * + * File: mpsApi.h + * + * Description: + * This file contains the functions managing the MpsApi object + * + *****************************************************************************/ + +#include "g_mps_api_nvoc.h" + +#ifndef _MPSAPI_H_ +#define _MPSAPI_H_ + +#include "gpu/gpu.h" +#include "rmapi/resource.h" + + +// **************************************************************************** +// Type Definitions +// **************************************************************************** + +// +// MpsApi information +// +// A client which has allocated MpsApi object is identified as MPS process. +// +#ifdef NVOC_MPS_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct MpsApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct MpsApi *__nvoc_pbase_MpsApi; + NvBool (*__mpsApiShareCallback__)(struct MpsApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__mpsApiCheckMemInterUnmap__)(struct MpsApi *, NvBool); + NV_STATUS (*__mpsApiControl__)(struct MpsApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__mpsApiGetMemInterMapParams__)(struct MpsApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__mpsApiGetMemoryMappingDescriptor__)(struct MpsApi *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__mpsApiGetRefCount__)(struct MpsApi *); + NV_STATUS (*__mpsApiControlFilter__)(struct MpsApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__mpsApiAddAdditionalDependants__)(struct RsClient *, struct MpsApi *, RsResourceRef *); + NV_STATUS (*__mpsApiUnmap__)(struct MpsApi *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__mpsApiControl_Prologue__)(struct MpsApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__mpsApiCanCopy__)(struct MpsApi *); + NV_STATUS (*__mpsApiMapTo__)(struct MpsApi *, RS_RES_MAP_TO_PARAMS *); + void (*__mpsApiPreDestruct__)(struct MpsApi *); + NV_STATUS (*__mpsApiUnmapFrom__)(struct MpsApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__mpsApiControl_Epilogue__)(struct MpsApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__mpsApiControlLookup__)(struct MpsApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__mpsApiMap__)(struct MpsApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__mpsApiAccessCallback__)(struct MpsApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_MpsApi_TYPEDEF__ +#define __NVOC_CLASS_MpsApi_TYPEDEF__ +typedef struct MpsApi MpsApi; +#endif /* __NVOC_CLASS_MpsApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MpsApi +#define __nvoc_class_id_MpsApi 0x22ce42 +#endif /* __nvoc_class_id_MpsApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_MpsApi; + +#define __staticCast_MpsApi(pThis) \ + ((pThis)->__nvoc_pbase_MpsApi) + +#ifdef __nvoc_mps_api_h_disabled +#define __dynamicCast_MpsApi(pThis) ((MpsApi*)NULL) +#else //__nvoc_mps_api_h_disabled +#define __dynamicCast_MpsApi(pThis) \ + ((MpsApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(MpsApi))) +#endif //__nvoc_mps_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_MpsApi(MpsApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_MpsApi(MpsApi**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_MpsApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_MpsApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define mpsApiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) mpsApiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define mpsApiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) mpsApiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define mpsApiControl(pResource, pCallContext, pParams) mpsApiControl_DISPATCH(pResource, pCallContext, pParams) +#define mpsApiGetMemInterMapParams(pRmResource, pParams) mpsApiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define mpsApiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) mpsApiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define mpsApiGetRefCount(pResource) mpsApiGetRefCount_DISPATCH(pResource) +#define mpsApiControlFilter(pResource, pCallContext, pParams) mpsApiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define mpsApiAddAdditionalDependants(pClient, pResource, pReference) mpsApiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define mpsApiUnmap(pResource, pCallContext, pCpuMapping) mpsApiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define mpsApiControl_Prologue(pResource, pCallContext, pParams) mpsApiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define mpsApiCanCopy(pResource) mpsApiCanCopy_DISPATCH(pResource) +#define mpsApiMapTo(pResource, pParams) mpsApiMapTo_DISPATCH(pResource, pParams) +#define mpsApiPreDestruct(pResource) mpsApiPreDestruct_DISPATCH(pResource) +#define mpsApiUnmapFrom(pResource, pParams) mpsApiUnmapFrom_DISPATCH(pResource, pParams) +#define mpsApiControl_Epilogue(pResource, pCallContext, pParams) mpsApiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define mpsApiControlLookup(pResource, pParams, ppEntry) mpsApiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define mpsApiMap(pResource, pCallContext, pParams, pCpuMapping) mpsApiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define mpsApiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) mpsApiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool mpsApiShareCallback_DISPATCH(struct MpsApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__mpsApiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS mpsApiCheckMemInterUnmap_DISPATCH(struct MpsApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__mpsApiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS mpsApiControl_DISPATCH(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__mpsApiControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS mpsApiGetMemInterMapParams_DISPATCH(struct MpsApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__mpsApiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS mpsApiGetMemoryMappingDescriptor_DISPATCH(struct MpsApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__mpsApiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 mpsApiGetRefCount_DISPATCH(struct MpsApi *pResource) { + return pResource->__mpsApiGetRefCount__(pResource); +} + +static inline NV_STATUS mpsApiControlFilter_DISPATCH(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__mpsApiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void mpsApiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct MpsApi *pResource, RsResourceRef *pReference) { + pResource->__mpsApiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS mpsApiUnmap_DISPATCH(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__mpsApiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS mpsApiControl_Prologue_DISPATCH(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__mpsApiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool mpsApiCanCopy_DISPATCH(struct MpsApi *pResource) { + return pResource->__mpsApiCanCopy__(pResource); +} + +static inline NV_STATUS mpsApiMapTo_DISPATCH(struct MpsApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__mpsApiMapTo__(pResource, pParams); +} + +static inline void mpsApiPreDestruct_DISPATCH(struct MpsApi *pResource) { + pResource->__mpsApiPreDestruct__(pResource); +} + +static inline NV_STATUS mpsApiUnmapFrom_DISPATCH(struct MpsApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__mpsApiUnmapFrom__(pResource, pParams); +} + +static inline void mpsApiControl_Epilogue_DISPATCH(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__mpsApiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS mpsApiControlLookup_DISPATCH(struct MpsApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__mpsApiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS mpsApiMap_DISPATCH(struct MpsApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__mpsApiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool mpsApiAccessCallback_DISPATCH(struct MpsApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__mpsApiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS mpsApiConstruct_IMPL(struct MpsApi *arg_pMpsApi, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_mpsApiConstruct(arg_pMpsApi, arg_pCallContext, arg_pParams) mpsApiConstruct_IMPL(arg_pMpsApi, arg_pCallContext, arg_pParams) +void mpsApiDestruct_IMPL(struct MpsApi *pMpsApi); +#define __nvoc_mpsApiDestruct(pMpsApi) mpsApiDestruct_IMPL(pMpsApi) +#undef PRIVATE_FIELD + + +#endif // _MPSAPI_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_MPS_API_NVOC_H_ diff --git a/src/nvidia/generated/g_no_device_mem_nvoc.c b/src/nvidia/generated/g_no_device_mem_nvoc.c new file mode 100644 index 000000000..245dfdfd3 --- /dev/null +++ b/src/nvidia/generated/g_no_device_mem_nvoc.c @@ -0,0 +1,324 @@ +#define NVOC_NO_DEVICE_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_no_device_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x6c0832 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NoDeviceMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_NoDeviceMemory(NoDeviceMemory*); +void __nvoc_init_funcTable_NoDeviceMemory(NoDeviceMemory*); +NV_STATUS __nvoc_ctor_NoDeviceMemory(NoDeviceMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_NoDeviceMemory(NoDeviceMemory*); +void __nvoc_dtor_NoDeviceMemory(NoDeviceMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NoDeviceMemory; + +static const struct NVOC_RTTI __nvoc_rtti_NoDeviceMemory_NoDeviceMemory = { + /*pClassDef=*/ &__nvoc_class_def_NoDeviceMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NoDeviceMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_NoDeviceMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NoDeviceMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NoDeviceMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NoDeviceMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NoDeviceMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NoDeviceMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NoDeviceMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NoDeviceMemory, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NoDeviceMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NoDeviceMemory, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_NoDeviceMemory = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_NoDeviceMemory_NoDeviceMemory, + &__nvoc_rtti_NoDeviceMemory_Memory, + &__nvoc_rtti_NoDeviceMemory_RmResource, + &__nvoc_rtti_NoDeviceMemory_RmResourceCommon, + &__nvoc_rtti_NoDeviceMemory_RsResource, + &__nvoc_rtti_NoDeviceMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_NoDeviceMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(NoDeviceMemory), + /*classId=*/ classId(NoDeviceMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "NoDeviceMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NoDeviceMemory, + /*pCastInfo=*/ &__nvoc_castinfo_NoDeviceMemory, + /*pExportInfo=*/ &__nvoc_export_info_NoDeviceMemory +}; + +static NV_STATUS __nvoc_thunk_NoDeviceMemory_memGetMapAddrSpace(struct Memory *pNoDeviceMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return nodevicememGetMapAddrSpace((struct NoDeviceMemory *)(((unsigned char *)pNoDeviceMemory) - __nvoc_rtti_NoDeviceMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NV_STATUS __nvoc_thunk_Memory_nodevicememCheckMemInterUnmap(struct NoDeviceMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_NoDeviceMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_nodevicememControl(struct NoDeviceMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_NoDeviceMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_nodevicememUnmap(struct NoDeviceMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_NoDeviceMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_nodevicememGetMemInterMapParams(struct NoDeviceMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_NoDeviceMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_nodevicememGetMemoryMappingDescriptor(struct NoDeviceMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_NoDeviceMemory_Memory.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_RmResource_nodevicememShareCallback(struct NoDeviceMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_nodevicememControlFilter(struct NoDeviceMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_nodevicememAddAdditionalDependants(struct RsClient *pClient, struct NoDeviceMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_nodevicememGetRefCount(struct NoDeviceMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_nodevicememMapTo(struct NoDeviceMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_nodevicememControl_Prologue(struct NoDeviceMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_nodevicememCanCopy(struct NoDeviceMemory *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_nodevicememIsReady(struct NoDeviceMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_NoDeviceMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_nodevicememCheckCopyPermissions(struct NoDeviceMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_NoDeviceMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_nodevicememPreDestruct(struct NoDeviceMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_nodevicememUnmapFrom(struct NoDeviceMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_nodevicememControl_Epilogue(struct NoDeviceMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_nodevicememControlLookup(struct NoDeviceMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_nodevicememMap(struct NoDeviceMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_NoDeviceMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_nodevicememAccessCallback(struct NoDeviceMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_NoDeviceMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_NoDeviceMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_NoDeviceMemory(NoDeviceMemory *pThis) { + __nvoc_nodevicememDestruct(pThis); + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_NoDeviceMemory(NoDeviceMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_NoDeviceMemory(NoDeviceMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_NoDeviceMemory_fail_Memory; + __nvoc_init_dataField_NoDeviceMemory(pThis); + + status = __nvoc_nodevicememConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_NoDeviceMemory_fail__init; + goto __nvoc_ctor_NoDeviceMemory_exit; // Success + +__nvoc_ctor_NoDeviceMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_NoDeviceMemory_fail_Memory: +__nvoc_ctor_NoDeviceMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_NoDeviceMemory_1(NoDeviceMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__nodevicememGetMapAddrSpace__ = &nodevicememGetMapAddrSpace_IMPL; + + pThis->__nvoc_base_Memory.__memGetMapAddrSpace__ = &__nvoc_thunk_NoDeviceMemory_memGetMapAddrSpace; + + pThis->__nodevicememCheckMemInterUnmap__ = &__nvoc_thunk_Memory_nodevicememCheckMemInterUnmap; + + pThis->__nodevicememControl__ = &__nvoc_thunk_Memory_nodevicememControl; + + pThis->__nodevicememUnmap__ = &__nvoc_thunk_Memory_nodevicememUnmap; + + pThis->__nodevicememGetMemInterMapParams__ = &__nvoc_thunk_Memory_nodevicememGetMemInterMapParams; + + pThis->__nodevicememGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_nodevicememGetMemoryMappingDescriptor; + + pThis->__nodevicememShareCallback__ = &__nvoc_thunk_RmResource_nodevicememShareCallback; + + pThis->__nodevicememControlFilter__ = &__nvoc_thunk_RsResource_nodevicememControlFilter; + + pThis->__nodevicememAddAdditionalDependants__ = &__nvoc_thunk_RsResource_nodevicememAddAdditionalDependants; + + pThis->__nodevicememGetRefCount__ = &__nvoc_thunk_RsResource_nodevicememGetRefCount; + + pThis->__nodevicememMapTo__ = &__nvoc_thunk_RsResource_nodevicememMapTo; + + pThis->__nodevicememControl_Prologue__ = &__nvoc_thunk_RmResource_nodevicememControl_Prologue; + + pThis->__nodevicememCanCopy__ = &__nvoc_thunk_RsResource_nodevicememCanCopy; + + pThis->__nodevicememIsReady__ = &__nvoc_thunk_Memory_nodevicememIsReady; + + pThis->__nodevicememCheckCopyPermissions__ = &__nvoc_thunk_Memory_nodevicememCheckCopyPermissions; + + pThis->__nodevicememPreDestruct__ = &__nvoc_thunk_RsResource_nodevicememPreDestruct; + + pThis->__nodevicememUnmapFrom__ = &__nvoc_thunk_RsResource_nodevicememUnmapFrom; + + pThis->__nodevicememControl_Epilogue__ = &__nvoc_thunk_RmResource_nodevicememControl_Epilogue; + + pThis->__nodevicememControlLookup__ = &__nvoc_thunk_RsResource_nodevicememControlLookup; + + pThis->__nodevicememMap__ = &__nvoc_thunk_Memory_nodevicememMap; + + pThis->__nodevicememAccessCallback__ = &__nvoc_thunk_RmResource_nodevicememAccessCallback; +} + +void __nvoc_init_funcTable_NoDeviceMemory(NoDeviceMemory *pThis) { + __nvoc_init_funcTable_NoDeviceMemory_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_NoDeviceMemory(NoDeviceMemory *pThis) { + pThis->__nvoc_pbase_NoDeviceMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_NoDeviceMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_NoDeviceMemory(NoDeviceMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + NoDeviceMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(NoDeviceMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(NoDeviceMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_NoDeviceMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_NoDeviceMemory(pThis); + status = __nvoc_ctor_NoDeviceMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_NoDeviceMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_NoDeviceMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_NoDeviceMemory(NoDeviceMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_NoDeviceMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_no_device_mem_nvoc.h b/src/nvidia/generated/g_no_device_mem_nvoc.h new file mode 100644 index 000000000..83904f607 --- /dev/null +++ b/src/nvidia/generated/g_no_device_mem_nvoc.h @@ -0,0 +1,228 @@ +#ifndef _G_NO_DEVICE_MEM_NVOC_H_ +#define _G_NO_DEVICE_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_no_device_mem_nvoc.h" + +#ifndef _NO_DEVICE_MEMORY_H_ +#define _NO_DEVICE_MEMORY_H_ + +#include "mem_mgr/mem.h" + +/** + * This class represents contiguous system memory that is associated with a + * client instead of a device. This object can be used for memory allocations + * that should survive device teardown. + */ +#ifdef NVOC_NO_DEVICE_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct NoDeviceMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct NoDeviceMemory *__nvoc_pbase_NoDeviceMemory; + NV_STATUS (*__nodevicememGetMapAddrSpace__)(struct NoDeviceMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NV_STATUS (*__nodevicememCheckMemInterUnmap__)(struct NoDeviceMemory *, NvBool); + NV_STATUS (*__nodevicememControl__)(struct NoDeviceMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__nodevicememUnmap__)(struct NoDeviceMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__nodevicememGetMemInterMapParams__)(struct NoDeviceMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__nodevicememGetMemoryMappingDescriptor__)(struct NoDeviceMemory *, MEMORY_DESCRIPTOR **); + NvBool (*__nodevicememShareCallback__)(struct NoDeviceMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__nodevicememControlFilter__)(struct NoDeviceMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__nodevicememAddAdditionalDependants__)(struct RsClient *, struct NoDeviceMemory *, RsResourceRef *); + NvU32 (*__nodevicememGetRefCount__)(struct NoDeviceMemory *); + NV_STATUS (*__nodevicememMapTo__)(struct NoDeviceMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__nodevicememControl_Prologue__)(struct NoDeviceMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__nodevicememCanCopy__)(struct NoDeviceMemory *); + NV_STATUS (*__nodevicememIsReady__)(struct NoDeviceMemory *); + NV_STATUS (*__nodevicememCheckCopyPermissions__)(struct NoDeviceMemory *, struct OBJGPU *, NvHandle); + void (*__nodevicememPreDestruct__)(struct NoDeviceMemory *); + NV_STATUS (*__nodevicememUnmapFrom__)(struct NoDeviceMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__nodevicememControl_Epilogue__)(struct NoDeviceMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__nodevicememControlLookup__)(struct NoDeviceMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__nodevicememMap__)(struct NoDeviceMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__nodevicememAccessCallback__)(struct NoDeviceMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_NoDeviceMemory_TYPEDEF__ +#define __NVOC_CLASS_NoDeviceMemory_TYPEDEF__ +typedef struct NoDeviceMemory NoDeviceMemory; +#endif /* __NVOC_CLASS_NoDeviceMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NoDeviceMemory +#define __nvoc_class_id_NoDeviceMemory 0x6c0832 +#endif /* __nvoc_class_id_NoDeviceMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NoDeviceMemory; + +#define __staticCast_NoDeviceMemory(pThis) \ + ((pThis)->__nvoc_pbase_NoDeviceMemory) + +#ifdef __nvoc_no_device_mem_h_disabled +#define __dynamicCast_NoDeviceMemory(pThis) ((NoDeviceMemory*)NULL) +#else //__nvoc_no_device_mem_h_disabled +#define __dynamicCast_NoDeviceMemory(pThis) \ + ((NoDeviceMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NoDeviceMemory))) +#endif //__nvoc_no_device_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_NoDeviceMemory(NoDeviceMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NoDeviceMemory(NoDeviceMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_NoDeviceMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_NoDeviceMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define nodevicememGetMapAddrSpace(pNoDeviceMemory, pCallContext, mapFlags, pAddrSpace) nodevicememGetMapAddrSpace_DISPATCH(pNoDeviceMemory, pCallContext, mapFlags, pAddrSpace) +#define nodevicememCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) nodevicememCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define nodevicememControl(pMemory, pCallContext, pParams) nodevicememControl_DISPATCH(pMemory, pCallContext, pParams) +#define nodevicememUnmap(pMemory, pCallContext, pCpuMapping) nodevicememUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define nodevicememGetMemInterMapParams(pMemory, pParams) nodevicememGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define nodevicememGetMemoryMappingDescriptor(pMemory, ppMemDesc) nodevicememGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define nodevicememShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) nodevicememShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define nodevicememControlFilter(pResource, pCallContext, pParams) nodevicememControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define nodevicememAddAdditionalDependants(pClient, pResource, pReference) nodevicememAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define nodevicememGetRefCount(pResource) nodevicememGetRefCount_DISPATCH(pResource) +#define nodevicememMapTo(pResource, pParams) nodevicememMapTo_DISPATCH(pResource, pParams) +#define nodevicememControl_Prologue(pResource, pCallContext, pParams) nodevicememControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define nodevicememCanCopy(pResource) nodevicememCanCopy_DISPATCH(pResource) +#define nodevicememIsReady(pMemory) nodevicememIsReady_DISPATCH(pMemory) +#define nodevicememCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) nodevicememCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define nodevicememPreDestruct(pResource) nodevicememPreDestruct_DISPATCH(pResource) +#define nodevicememUnmapFrom(pResource, pParams) nodevicememUnmapFrom_DISPATCH(pResource, pParams) +#define nodevicememControl_Epilogue(pResource, pCallContext, pParams) nodevicememControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define nodevicememControlLookup(pResource, pParams, ppEntry) nodevicememControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define nodevicememMap(pMemory, pCallContext, pParams, pCpuMapping) nodevicememMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define nodevicememAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) nodevicememAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS nodevicememGetMapAddrSpace_IMPL(struct NoDeviceMemory *pNoDeviceMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace); + +static inline NV_STATUS nodevicememGetMapAddrSpace_DISPATCH(struct NoDeviceMemory *pNoDeviceMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pNoDeviceMemory->__nodevicememGetMapAddrSpace__(pNoDeviceMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NV_STATUS nodevicememCheckMemInterUnmap_DISPATCH(struct NoDeviceMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__nodevicememCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS nodevicememControl_DISPATCH(struct NoDeviceMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__nodevicememControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS nodevicememUnmap_DISPATCH(struct NoDeviceMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__nodevicememUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS nodevicememGetMemInterMapParams_DISPATCH(struct NoDeviceMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__nodevicememGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS nodevicememGetMemoryMappingDescriptor_DISPATCH(struct NoDeviceMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__nodevicememGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NvBool nodevicememShareCallback_DISPATCH(struct NoDeviceMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__nodevicememShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS nodevicememControlFilter_DISPATCH(struct NoDeviceMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nodevicememControlFilter__(pResource, pCallContext, pParams); +} + +static inline void nodevicememAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct NoDeviceMemory *pResource, RsResourceRef *pReference) { + pResource->__nodevicememAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 nodevicememGetRefCount_DISPATCH(struct NoDeviceMemory *pResource) { + return pResource->__nodevicememGetRefCount__(pResource); +} + +static inline NV_STATUS nodevicememMapTo_DISPATCH(struct NoDeviceMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__nodevicememMapTo__(pResource, pParams); +} + +static inline NV_STATUS nodevicememControl_Prologue_DISPATCH(struct NoDeviceMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__nodevicememControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool nodevicememCanCopy_DISPATCH(struct NoDeviceMemory *pResource) { + return pResource->__nodevicememCanCopy__(pResource); +} + +static inline NV_STATUS nodevicememIsReady_DISPATCH(struct NoDeviceMemory *pMemory) { + return pMemory->__nodevicememIsReady__(pMemory); +} + +static inline NV_STATUS nodevicememCheckCopyPermissions_DISPATCH(struct NoDeviceMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__nodevicememCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void nodevicememPreDestruct_DISPATCH(struct NoDeviceMemory *pResource) { + pResource->__nodevicememPreDestruct__(pResource); +} + +static inline NV_STATUS nodevicememUnmapFrom_DISPATCH(struct NoDeviceMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__nodevicememUnmapFrom__(pResource, pParams); +} + +static inline void nodevicememControl_Epilogue_DISPATCH(struct NoDeviceMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__nodevicememControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS nodevicememControlLookup_DISPATCH(struct NoDeviceMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__nodevicememControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS nodevicememMap_DISPATCH(struct NoDeviceMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__nodevicememMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool nodevicememAccessCallback_DISPATCH(struct NoDeviceMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__nodevicememAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS nodevicememConstruct_IMPL(struct NoDeviceMemory *arg_pNoDeviceMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_nodevicememConstruct(arg_pNoDeviceMemory, arg_pCallContext, arg_pParams) nodevicememConstruct_IMPL(arg_pNoDeviceMemory, arg_pCallContext, arg_pParams) +void nodevicememDestruct_IMPL(struct NoDeviceMemory *pNoDeviceMemory); +#define __nvoc_nodevicememDestruct(pNoDeviceMemory) nodevicememDestruct_IMPL(pNoDeviceMemory) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_NO_DEVICE_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_nv_debug_dump_nvoc.c b/src/nvidia/generated/g_nv_debug_dump_nvoc.c new file mode 100644 index 000000000..d5c38aac8 --- /dev/null +++ b/src/nvidia/generated/g_nv_debug_dump_nvoc.c @@ -0,0 +1,283 @@ +#define NVOC_NV_DEBUG_DUMP_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_nv_debug_dump_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x7e80a2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvDebugDump; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_NvDebugDump(NvDebugDump*); +void __nvoc_init_funcTable_NvDebugDump(NvDebugDump*); +NV_STATUS __nvoc_ctor_NvDebugDump(NvDebugDump*); +void __nvoc_init_dataField_NvDebugDump(NvDebugDump*); +void __nvoc_dtor_NvDebugDump(NvDebugDump*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NvDebugDump; + +static const struct NVOC_RTTI __nvoc_rtti_NvDebugDump_NvDebugDump = { + /*pClassDef=*/ &__nvoc_class_def_NvDebugDump, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NvDebugDump, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDebugDump_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDebugDump, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_NvDebugDump_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(NvDebugDump, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_NvDebugDump = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_NvDebugDump_NvDebugDump, + &__nvoc_rtti_NvDebugDump_OBJENGSTATE, + &__nvoc_rtti_NvDebugDump_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_NvDebugDump = +{ + /*classInfo=*/ { + /*size=*/ sizeof(NvDebugDump), + /*classId=*/ classId(NvDebugDump), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "NvDebugDump", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NvDebugDump, + /*pCastInfo=*/ &__nvoc_castinfo_NvDebugDump, + /*pExportInfo=*/ &__nvoc_export_info_NvDebugDump +}; + +static NV_STATUS __nvoc_thunk_NvDebugDump_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pNvd, ENGDESCRIPTOR arg0) { + return nvdConstructEngine(pGpu, (struct NvDebugDump *)(((unsigned char *)pNvd) - __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_NvDebugDump_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pNvd) { + return nvdStateInitLocked(pGpu, (struct NvDebugDump *)(((unsigned char *)pNvd) - __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdReconcileTunableState(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdStateLoad(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdStateUnload(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdStatePreLoad(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdStatePostUnload(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_nvdStateDestroy(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdStatePreUnload(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdStateInitUnlocked(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_nvdInitMissing(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdStatePreInitLocked(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdStatePreInitUnlocked(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdGetTunableState(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdCompareTunableState(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_nvdFreeTunableState(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdStatePostLoad(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdAllocTunableState(POBJGPU pGpu, struct NvDebugDump *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_nvdSetTunableState(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_nvdIsPresent(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_NvDebugDump_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_NvDebugDump = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_NvDebugDump(NvDebugDump *pThis) { + __nvoc_nvdDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_NvDebugDump(NvDebugDump *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_NvDebugDump(NvDebugDump *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_NvDebugDump_fail_OBJENGSTATE; + __nvoc_init_dataField_NvDebugDump(pThis); + goto __nvoc_ctor_NvDebugDump_exit; // Success + +__nvoc_ctor_NvDebugDump_fail_OBJENGSTATE: +__nvoc_ctor_NvDebugDump_exit: + + return status; +} + +static void __nvoc_init_funcTable_NvDebugDump_1(NvDebugDump *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__nvdConstructEngine__ = &nvdConstructEngine_IMPL; + + pThis->__nvdStateInitLocked__ = &nvdStateInitLocked_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_NvDebugDump_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_NvDebugDump_engstateStateInitLocked; + + pThis->__nvdReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_nvdReconcileTunableState; + + pThis->__nvdStateLoad__ = &__nvoc_thunk_OBJENGSTATE_nvdStateLoad; + + pThis->__nvdStateUnload__ = &__nvoc_thunk_OBJENGSTATE_nvdStateUnload; + + pThis->__nvdStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_nvdStatePreLoad; + + pThis->__nvdStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_nvdStatePostUnload; + + pThis->__nvdStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_nvdStateDestroy; + + pThis->__nvdStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_nvdStatePreUnload; + + pThis->__nvdStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_nvdStateInitUnlocked; + + pThis->__nvdInitMissing__ = &__nvoc_thunk_OBJENGSTATE_nvdInitMissing; + + pThis->__nvdStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_nvdStatePreInitLocked; + + pThis->__nvdStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_nvdStatePreInitUnlocked; + + pThis->__nvdGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_nvdGetTunableState; + + pThis->__nvdCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_nvdCompareTunableState; + + pThis->__nvdFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_nvdFreeTunableState; + + pThis->__nvdStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_nvdStatePostLoad; + + pThis->__nvdAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_nvdAllocTunableState; + + pThis->__nvdSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_nvdSetTunableState; + + pThis->__nvdIsPresent__ = &__nvoc_thunk_OBJENGSTATE_nvdIsPresent; +} + +void __nvoc_init_funcTable_NvDebugDump(NvDebugDump *pThis) { + __nvoc_init_funcTable_NvDebugDump_1(pThis); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_NvDebugDump(NvDebugDump *pThis) { + pThis->__nvoc_pbase_NvDebugDump = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_NvDebugDump(pThis); +} + +NV_STATUS __nvoc_objCreate_NvDebugDump(NvDebugDump **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + NvDebugDump *pThis; + + pThis = portMemAllocNonPaged(sizeof(NvDebugDump)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(NvDebugDump)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_NvDebugDump); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_NvDebugDump(pThis); + status = __nvoc_ctor_NvDebugDump(pThis); + if (status != NV_OK) goto __nvoc_objCreate_NvDebugDump_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_NvDebugDump_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_NvDebugDump(NvDebugDump **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_NvDebugDump(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_nv_debug_dump_nvoc.h b/src/nvidia/generated/g_nv_debug_dump_nvoc.h new file mode 100644 index 000000000..44b011691 --- /dev/null +++ b/src/nvidia/generated/g_nv_debug_dump_nvoc.h @@ -0,0 +1,424 @@ +#ifndef _G_NV_DEBUG_DUMP_NVOC_H_ +#define _G_NV_DEBUG_DUMP_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_nv_debug_dump_nvoc.h" + +#ifndef _NV_DEBUG_DUMP_H_ +#define _NV_DEBUG_DUMP_H_ + +#include "gpu/eng_state.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "core/info_block.h" + +#include "nvdump.h" + +#include "lib/protobuf/prb.h" +#include "rmapi/control.h" +#include "gpu/gpu.h" + +// Os Independent Error Types +typedef enum +{ + NVD_SKIP_ZERO, + NVD_GPU_HUNG, + NVD_FAILURE_TO_RECOVER, + NVD_MACHINE_CHECK, + NVD_POWERUP_FAILURE, + NVD_CPU_EXCEPTION, + NVD_EXTERNALLY_GENERATED, + NVD_GPU_GENERATED, +} NVD_ERROR_TYPE; + +#define NV_NVD_ERROR_CODE_MAJOR 31:16 +#define NV_NVD_ERROR_CODE_MINOR 15:0 + +#define NVD_ERROR_CODE(Major, Minor) \ + (DRF_NUM(_NVD, _ERROR_CODE, _MAJOR, Major) | \ + DRF_NUM(_NVD, _ERROR_CODE, _MINOR, Minor)) + + +#define NVD_ENGINE_FLAGS_PRIORITY 1:0 +#define NVD_ENGINE_FLAGS_PRIORITY_LOW 0x00000000 +#define NVD_ENGINE_FLAGS_PRIORITY_MED 0x00000001 +#define NVD_ENGINE_FLAGS_PRIORITY_HIGH 0x00000002 +#define NVD_ENGINE_FLAGS_PRIORITY_CRITICAL 0x00000003 + +/* + * NVD_ENGINE_FLAGS_SOURCE + * + * CPU - Always run on CPU, even if running as GSP-RM client. + * GSP - Run on GSP for GSP-RM client, otherwise run on CPU. + * BOTH - Engine dump is split between GSP-RM and CPU. Run both. + */ +#define NVD_ENGINE_FLAGS_SOURCE 3:2 +#define NVD_ENGINE_FLAGS_SOURCE_CPU 0x00000001 +#define NVD_ENGINE_FLAGS_SOURCE_GSP 0x00000002 +#define NVD_ENGINE_FLAGS_SOURCE_BOTH 0x00000003 + + +#define NV_NVD_ENGINE_STEP_MAJOR 31:16 +#define NV_NVD_ENGINE_STEP_MINOR 15:0 + +#define NVD_ENGINE_STEP(Major, Minor) \ + (DRF_NUM(_NVD, _ENGINE_STEP, _MAJOR, Major) | \ + DRF_NUM(_NVD, _ENGINE_STEP, _MINOR, Minor)) + +typedef enum +{ + NVD_FIRST_ENGINE = 0, + NVD_LAST_ENGINE = 0xFF, +} NVD_WHICH_ENGINE; + +typedef struct _def_nvd_debug_buffer { + NvU32 tag; + MEMORY_DESCRIPTOR *pMemDesc; + struct _def_nvd_debug_buffer *pNext; +} NVD_DEBUG_BUFFER; + +// Enumeration of Dump Types (Journal Entry, OCA dump, or API requested dump) +typedef enum +{ + NVD_DUMP_TYPE_JOURNAL, // Very small records only. Total for + // whole Journal is 4K (including overhead), + // actual amount of raw data stored is less. + NVD_DUMP_TYPE_OCA, // Assume 8K - 512 K total + NVD_DUMP_TYPE_API, // Mini Dump >512K +} NVD_DUMP_TYPE; + +// Enumeration of Sizes returned by nvDumpGetDumpBufferSizeEnum +typedef enum +{ + NVD_DUMP_SIZE_JOURNAL_WRITE, // Very small records only. + NVD_DUMP_SIZE_SMALL, // Assume 8K - 512 K total + NVD_DUMP_SIZE_MEDIUM, // Mini Dump >512K + NVD_DUMP_SIZE_LARGE // Megs of space +} NVD_DUMP_SIZE; + +// +// NV Dump State +// +// State passed into all dump routines. +// +typedef struct _def_nvd_state NVD_STATE; + +struct _def_nvd_state +{ + NvBool bDumpInProcess; // Currently creating dump. + NvBool bRMLock; // Acquired the RM lock. + NvBool bGpuAccessible; // OK to read priv registers on GPU. + NvU32 bugCheckCode; // Raw OS bugcheck code. + NvU32 internalCode; // OS Independent error code. + NvU32 initialbufferSize; // Size of buffer passed in. + NVD_DUMP_TYPE nvDumpType; // Type of DUMP. +}; + + +NVD_DUMP_SIZE nvDumpGetDumpBufferSizeEnum( NVD_STATE *pNvDumpState ); + +typedef NV_STATUS NvdDumpEngineFunc(struct OBJGPU *pGpu, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, void *pvData); + +typedef struct _def_nvd_engine_callback { + NvdDumpEngineFunc *pDumpEngineFunc; // Callback function. + NvU32 engDesc; // Indicates which engine this is. + NvU32 flags; // See NVD_ENGINE_FLAGS above. + void *pvData; // Opaque pointer to data passed to callback function. + struct _def_nvd_engine_callback *pNext; // Next Engine +} NVD_ENGINE_CALLBACK; + +#ifdef NVOC_NV_DEBUG_DUMP_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct NvDebugDump { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct NvDebugDump *__nvoc_pbase_NvDebugDump; + NV_STATUS (*__nvdConstructEngine__)(struct OBJGPU *, struct NvDebugDump *, ENGDESCRIPTOR); + NV_STATUS (*__nvdStateInitLocked__)(struct OBJGPU *, struct NvDebugDump *); + NV_STATUS (*__nvdReconcileTunableState__)(POBJGPU, struct NvDebugDump *, void *); + NV_STATUS (*__nvdStateLoad__)(POBJGPU, struct NvDebugDump *, NvU32); + NV_STATUS (*__nvdStateUnload__)(POBJGPU, struct NvDebugDump *, NvU32); + NV_STATUS (*__nvdStatePreLoad__)(POBJGPU, struct NvDebugDump *, NvU32); + NV_STATUS (*__nvdStatePostUnload__)(POBJGPU, struct NvDebugDump *, NvU32); + void (*__nvdStateDestroy__)(POBJGPU, struct NvDebugDump *); + NV_STATUS (*__nvdStatePreUnload__)(POBJGPU, struct NvDebugDump *, NvU32); + NV_STATUS (*__nvdStateInitUnlocked__)(POBJGPU, struct NvDebugDump *); + void (*__nvdInitMissing__)(POBJGPU, struct NvDebugDump *); + NV_STATUS (*__nvdStatePreInitLocked__)(POBJGPU, struct NvDebugDump *); + NV_STATUS (*__nvdStatePreInitUnlocked__)(POBJGPU, struct NvDebugDump *); + NV_STATUS (*__nvdGetTunableState__)(POBJGPU, struct NvDebugDump *, void *); + NV_STATUS (*__nvdCompareTunableState__)(POBJGPU, struct NvDebugDump *, void *, void *); + void (*__nvdFreeTunableState__)(POBJGPU, struct NvDebugDump *, void *); + NV_STATUS (*__nvdStatePostLoad__)(POBJGPU, struct NvDebugDump *, NvU32); + NV_STATUS (*__nvdAllocTunableState__)(POBJGPU, struct NvDebugDump *, void **); + NV_STATUS (*__nvdSetTunableState__)(POBJGPU, struct NvDebugDump *, void *); + NvBool (*__nvdIsPresent__)(POBJGPU, struct NvDebugDump *); + NVD_DEBUG_BUFFER *pHeadDebugBuffer; + NVD_ENGINE_CALLBACK *pCallbacks; +}; + +#ifndef __NVOC_CLASS_NvDebugDump_TYPEDEF__ +#define __NVOC_CLASS_NvDebugDump_TYPEDEF__ +typedef struct NvDebugDump NvDebugDump; +#endif /* __NVOC_CLASS_NvDebugDump_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDebugDump +#define __nvoc_class_id_NvDebugDump 0x7e80a2 +#endif /* __nvoc_class_id_NvDebugDump */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_NvDebugDump; + +#define __staticCast_NvDebugDump(pThis) \ + ((pThis)->__nvoc_pbase_NvDebugDump) + +#ifdef __nvoc_nv_debug_dump_h_disabled +#define __dynamicCast_NvDebugDump(pThis) ((NvDebugDump*)NULL) +#else //__nvoc_nv_debug_dump_h_disabled +#define __dynamicCast_NvDebugDump(pThis) \ + ((NvDebugDump*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NvDebugDump))) +#endif //__nvoc_nv_debug_dump_h_disabled + +#define PDB_PROP_NVD_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_NVD_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_NvDebugDump(NvDebugDump**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_NvDebugDump(NvDebugDump**, Dynamic*, NvU32); +#define __objCreate_NvDebugDump(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_NvDebugDump((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define nvdConstructEngine(pGpu, pNvd, arg0) nvdConstructEngine_DISPATCH(pGpu, pNvd, arg0) +#define nvdStateInitLocked(pGpu, pNvd) nvdStateInitLocked_DISPATCH(pGpu, pNvd) +#define nvdReconcileTunableState(pGpu, pEngstate, pTunableState) nvdReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define nvdStateLoad(pGpu, pEngstate, arg0) nvdStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define nvdStateUnload(pGpu, pEngstate, arg0) nvdStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define nvdStatePreLoad(pGpu, pEngstate, arg0) nvdStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define nvdStatePostUnload(pGpu, pEngstate, arg0) nvdStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define nvdStateDestroy(pGpu, pEngstate) nvdStateDestroy_DISPATCH(pGpu, pEngstate) +#define nvdStatePreUnload(pGpu, pEngstate, arg0) nvdStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define nvdStateInitUnlocked(pGpu, pEngstate) nvdStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define nvdInitMissing(pGpu, pEngstate) nvdInitMissing_DISPATCH(pGpu, pEngstate) +#define nvdStatePreInitLocked(pGpu, pEngstate) nvdStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define nvdStatePreInitUnlocked(pGpu, pEngstate) nvdStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define nvdGetTunableState(pGpu, pEngstate, pTunableState) nvdGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define nvdCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) nvdCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define nvdFreeTunableState(pGpu, pEngstate, pTunableState) nvdFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define nvdStatePostLoad(pGpu, pEngstate, arg0) nvdStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define nvdAllocTunableState(pGpu, pEngstate, ppTunableState) nvdAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define nvdSetTunableState(pGpu, pEngstate, pTunableState) nvdSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define nvdIsPresent(pGpu, pEngstate) nvdIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS nvdConstructEngine_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, ENGDESCRIPTOR arg0); + +static inline NV_STATUS nvdConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, ENGDESCRIPTOR arg0) { + return pNvd->__nvdConstructEngine__(pGpu, pNvd, arg0); +} + +NV_STATUS nvdStateInitLocked_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd); + +static inline NV_STATUS nvdStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct NvDebugDump *pNvd) { + return pNvd->__nvdStateInitLocked__(pGpu, pNvd); +} + +static inline NV_STATUS nvdReconcileTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + return pEngstate->__nvdReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS nvdStateLoad_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS nvdStateUnload_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS nvdStatePreLoad_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS nvdStatePostUnload_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void nvdStateDestroy_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + pEngstate->__nvdStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdStatePreUnload_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS nvdStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvdStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void nvdInitMissing_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + pEngstate->__nvdInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvdStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvdStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS nvdGetTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + return pEngstate->__nvdGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS nvdCompareTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__nvdCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void nvdFreeTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + pEngstate->__nvdFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS nvdStatePostLoad_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, NvU32 arg0) { + return pEngstate->__nvdStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS nvdAllocTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void **ppTunableState) { + return pEngstate->__nvdAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS nvdSetTunableState_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate, void *pTunableState) { + return pEngstate->__nvdSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool nvdIsPresent_DISPATCH(POBJGPU pGpu, struct NvDebugDump *pEngstate) { + return pEngstate->__nvdIsPresent__(pGpu, pEngstate); +} + +void nvdDestruct_IMPL(struct NvDebugDump *pNvd); +#define __nvoc_nvdDestruct(pNvd) nvdDestruct_IMPL(pNvd) +NV_STATUS nvdDumpComponent_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 component, NVDUMP_BUFFER *pBuffer, NVDUMP_BUFFER_POLICY policy, PrbBufferCallback *pBufferCallback); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdDumpComponent(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 component, NVDUMP_BUFFER *pBuffer, NVDUMP_BUFFER_POLICY policy, PrbBufferCallback *pBufferCallback) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdDumpComponent(pGpu, pNvd, component, pBuffer, policy, pBufferCallback) nvdDumpComponent_IMPL(pGpu, pNvd, component, pBuffer, policy, pBufferCallback) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdAllocDebugBuffer_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 arg0, NvU32 *arg1, MEMORY_DESCRIPTOR **arg2); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdAllocDebugBuffer(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 arg0, NvU32 *arg1, MEMORY_DESCRIPTOR **arg2) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdAllocDebugBuffer(pGpu, pNvd, arg0, arg1, arg2) nvdAllocDebugBuffer_IMPL(pGpu, pNvd, arg0, arg1, arg2) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdFreeDebugBuffer_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, MEMORY_DESCRIPTOR *arg0); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdFreeDebugBuffer(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, MEMORY_DESCRIPTOR *arg0) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdFreeDebugBuffer(pGpu, pNvd, arg0) nvdFreeDebugBuffer_IMPL(pGpu, pNvd, arg0) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdDumpDebugBuffers_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *arg0); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdDumpDebugBuffers(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *arg0) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdDumpDebugBuffers(pGpu, pNvd, arg0) nvdDumpDebugBuffers_IMPL(pGpu, pNvd, arg0) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdEngineSignUp_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvdDumpEngineFunc *arg0, NvU32 engDesc, NvU32 flags, void *arg1); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdEngineSignUp(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvdDumpEngineFunc *arg0, NvU32 engDesc, NvU32 flags, void *arg1) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdEngineSignUp(pGpu, pNvd, arg0, engDesc, flags, arg1) nvdEngineSignUp_IMPL(pGpu, pNvd, arg0, engDesc, flags, arg1) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdEngineRelease_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdEngineRelease(struct OBJGPU *pGpu, struct NvDebugDump *pNvd) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdEngineRelease(pGpu, pNvd) nvdEngineRelease_IMPL(pGpu, pNvd) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdDoEngineDump_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, NvU32 arg0); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdDoEngineDump(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdDoEngineDump(pGpu, pNvd, pPrbEnc, pNvDumpState, arg0) nvdDoEngineDump_IMPL(pGpu, pNvd, pPrbEnc, pNvDumpState, arg0) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdDumpAllEngines_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdDumpAllEngines(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdDumpAllEngines(pGpu, pNvd, pPrbEnc, pNvDumpState) nvdDumpAllEngines_IMPL(pGpu, pNvd, pPrbEnc, pNvDumpState) +#endif //__nvoc_nv_debug_dump_h_disabled + +NV_STATUS nvdFindEngine_IMPL(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 engDesc, NVD_ENGINE_CALLBACK **ppEngineCallback); +#ifdef __nvoc_nv_debug_dump_h_disabled +static inline NV_STATUS nvdFindEngine(struct OBJGPU *pGpu, struct NvDebugDump *pNvd, NvU32 engDesc, NVD_ENGINE_CALLBACK **ppEngineCallback) { + NV_ASSERT_FAILED_PRECOMP("NvDebugDump was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_nv_debug_dump_h_disabled +#define nvdFindEngine(pGpu, pNvd, engDesc, ppEngineCallback) nvdFindEngine_IMPL(pGpu, pNvd, engDesc, ppEngineCallback) +#endif //__nvoc_nv_debug_dump_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _NV_DEBUG_DUMP_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_NV_DEBUG_DUMP_NVOC_H_ diff --git a/src/nvidia/generated/g_nv_name_released.h b/src/nvidia/generated/g_nv_name_released.h new file mode 100644 index 000000000..2b9bb3079 --- /dev/null +++ b/src/nvidia/generated/g_nv_name_released.h @@ -0,0 +1,1493 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef G_NV_NAME_RELEASED_H +#define G_NV_NAME_RELEASED_H + +typedef struct _CHIPS_RELEASED { + unsigned short devID; + unsigned short subSystemID; + unsigned short subSystemVendorID; + const char *name; +} CHIPS_RELEASED; + +static const CHIPS_RELEASED sChipsReleased[] = { + { 0x1340, 0x0000, 0x0000, "NVIDIA GeForce 830M" }, + { 0x1340, 0x2b2b, 0x103c, "NVIDIA GeForce 830A" }, + { 0x1341, 0x0000, 0x0000, "NVIDIA GeForce 840M" }, + { 0x1341, 0x3697, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1341, 0x3699, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1341, 0x369c, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1341, 0x36af, 0x17aa, "NVIDIA GeForce 840A" }, + { 0x1344, 0x0000, 0x0000, "NVIDIA GeForce 845M" }, + { 0x1346, 0x0000, 0x0000, "NVIDIA GeForce 930M" }, + { 0x1346, 0x30ba, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1346, 0x362c, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1346, 0x362f, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1346, 0x3636, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1347, 0x0000, 0x0000, "NVIDIA GeForce 940M" }, + { 0x1347, 0x36b9, 0x17aa, "NVIDIA GeForce 940A" }, + { 0x1347, 0x36ba, 0x17aa, "NVIDIA GeForce 940A" }, + { 0x1348, 0x0000, 0x0000, "NVIDIA GeForce 945M" }, + { 0x1348, 0x2b5c, 0x103c, "NVIDIA GeForce 945A" }, + { 0x1349, 0x0000, 0x0000, "NVIDIA GeForce 930M" }, + { 0x1349, 0x3124, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x364b, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x36c3, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x36d1, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x1349, 0x36d8, 0x17aa, "NVIDIA GeForce 930A" }, + { 0x134B, 0x0000, 0x0000, "NVIDIA GeForce 940MX" }, + { 0x134B, 0x0008, 0x1414, "NVIDIA GeForce GPU" }, + { 0x134D, 0x0000, 0x0000, "NVIDIA GeForce 940MX" }, + { 0x134E, 0x0000, 0x0000, "NVIDIA GeForce 930MX" }, + { 0x134F, 0x0000, 0x0000, "NVIDIA GeForce 920MX" }, + { 0x137A, 0x0000, 0x0000, "NVIDIA N15M-Q3" }, + { 0x137A, 0x2225, 0x17aa, "Quadro K620M" }, + { 0x137A, 0x2232, 0x17aa, "Quadro M500M" }, + { 0x137A, 0x505a, 0x17aa, "Quadro M500M" }, + { 0x137B, 0x0000, 0x0000, "Quadro M520" }, + { 0x1380, 0x0000, 0x0000, "NVIDIA GeForce GTX 750 Ti" }, + { 0x1381, 0x0000, 0x0000, "NVIDIA GeForce GTX 750" }, + { 0x1382, 0x0000, 0x0000, "NVIDIA GeForce GTX 745" }, + { 0x1390, 0x0000, 0x0000, "NVIDIA GeForce 845M" }, + { 0x1391, 0x0000, 0x0000, "NVIDIA GeForce GTX 850M" }, + { 0x1391, 0x3697, 0x17aa, "NVIDIA GeForce GTX 850A" }, + { 0x1392, 0x0000, 0x0000, "NVIDIA GeForce GTX 860M" }, + { 0x1392, 0x066a, 0x1028, "NVIDIA GeForce GPU" }, + { 0x1392, 0x861e, 0x1043, "NVIDIA GeForce GTX 750 Ti" }, + { 0x1392, 0x86d9, 0x1043, "NVIDIA GeForce GTX 750 Ti" }, + { 0x1393, 0x0000, 0x0000, "NVIDIA GeForce 840M" }, + { 0x1398, 0x0000, 0x0000, "NVIDIA GeForce 845M" }, + { 0x1399, 0x0000, 0x0000, "NVIDIA GeForce 945M" }, + { 0x139A, 0x0000, 0x0000, "NVIDIA GeForce GTX 950M" }, + { 0x139A, 0x362c, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x362f, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x363f, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x3640, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x3647, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139A, 0x36b9, 0x17aa, "NVIDIA GeForce GTX 950A" }, + { 0x139B, 0x0000, 0x0000, "NVIDIA GeForce GTX 960M" }, + { 0x139B, 0x107a, 0x1025, "NVIDIA GeForce GTX 750 Ti" }, + { 0x139B, 0x06a3, 0x1028, "NVIDIA GeForce GTX 860M" }, + { 0x139B, 0x2b4c, 0x103c, "NVIDIA GeForce GTX 960A" }, + { 0x139B, 0x3649, 0x17aa, "NVIDIA GeForce GTX 750Ti" }, + { 0x139B, 0x36bf, 0x17aa, "NVIDIA GeForce GTX 960A" }, + { 0x139B, 0xc248, 0x19da, "NVIDIA GeForce GTX 750 Ti" }, + { 0x139B, 0x8a75, 0x1afa, "NVIDIA GeForce GTX 750Ti" }, + { 0x139C, 0x0000, 0x0000, "NVIDIA GeForce 940M" }, + { 0x139D, 0x0000, 0x0000, "NVIDIA GeForce GTX 750 Ti" }, + { 0x13B0, 0x0000, 0x0000, "Quadro M2000M" }, + { 0x13B1, 0x0000, 0x0000, "Quadro M1000M" }, + { 0x13B2, 0x0000, 0x0000, "Quadro M600M" }, + { 0x13B3, 0x0000, 0x0000, "Quadro K2200M" }, + { 0x13B4, 0x0000, 0x0000, "Quadro M620" }, + { 0x13B6, 0x0000, 0x0000, "Quadro M1200" }, + { 0x13B9, 0x0000, 0x0000, "NVS 810" }, + { 0x13BA, 0x0000, 0x0000, "Quadro K2200" }, + { 0x13BB, 0x0000, 0x0000, "Quadro K620" }, + { 0x13BC, 0x0000, 0x0000, "Quadro K1200" }, + { 0x13BC, 0x1140, 0x15c3, "EIZO Quadro MED-XN50LP" }, + { 0x13C0, 0x0000, 0x0000, "NVIDIA GeForce GTX 980" }, + { 0x13C2, 0x0000, 0x0000, "NVIDIA GeForce GTX 970" }, + { 0x13D7, 0x0000, 0x0000, "NVIDIA GeForce GTX 980M" }, + { 0x13D8, 0x0000, 0x0000, "NVIDIA GeForce GTX 970M" }, + { 0x13D8, 0x1198, 0x1462, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0x1199, 0x1462, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0xb282, 0x19da, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0xb284, 0x19da, "NVIDIA GeForce GTX 960" }, + { 0x13D8, 0xb286, 0x19da, "NVIDIA GeForce GTX 960" }, + { 0x13D9, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x13DA, 0x0000, 0x0000, "NVIDIA GeForce GTX 980" }, + { 0x13F0, 0x0000, 0x0000, "Quadro M5000" }, + { 0x13F1, 0x0000, 0x0000, "Quadro M4000" }, + { 0x13F1, 0x1153, 0x15c3, "EIZO Quadro MED-XN90" }, + { 0x13F2, 0x0000, 0x0000, "Tesla M60" }, + { 0x13F3, 0x0000, 0x0000, "Tesla M6" }, + { 0x13F8, 0x0000, 0x0000, "Quadro M5000M" }, + { 0x13F8, 0x11dd, 0x10de, "Quadro M5000 SE" }, + { 0x13F9, 0x0000, 0x0000, "Quadro M4000M" }, + { 0x13FA, 0x0000, 0x0000, "Quadro M3000M" }, + { 0x13FA, 0x11c9, 0x10de, "Quadro M3000 SE" }, + { 0x13FB, 0x0000, 0x0000, "Quadro M5500" }, + { 0x1401, 0x0000, 0x0000, "NVIDIA GeForce GTX 960" }, + { 0x1402, 0x0000, 0x0000, "NVIDIA GeForce GTX 950" }, + { 0x1406, 0x0000, 0x0000, "NVIDIA GeForce GTX 960" }, + { 0x1407, 0x0000, 0x0000, "NVIDIA GeForce GTX 750" }, + { 0x1427, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x1427, 0xd003, 0x1458, "NVIDIA GeForce GTX 950" }, + { 0x1430, 0x0000, 0x0000, "Quadro M2000" }, + { 0x1430, 0x1190, 0x15c3, "EIZO Quadro MED-XN70" }, + { 0x1431, 0x0000, 0x0000, "Tesla M4" }, + { 0x1436, 0x0000, 0x0000, "Quadro M2200" }, + { 0x15F0, 0x0000, 0x0000, "Quadro GP100" }, + { 0x15F7, 0x0000, 0x0000, "Tesla P100-PCIE-12GB" }, + { 0x15F8, 0x0000, 0x0000, "Tesla P100-PCIE-16GB" }, + { 0x15F9, 0x0000, 0x0000, "Tesla P100-SXM2-16GB" }, + { 0x1617, 0x0000, 0x0000, "NVIDIA GeForce GTX 980M" }, + { 0x1618, 0x0000, 0x0000, "NVIDIA GeForce GTX 970M" }, + { 0x1619, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x161A, 0x0000, 0x0000, "NVIDIA GeForce GTX 980" }, + { 0x1667, 0x0000, 0x0000, "NVIDIA GeForce GTX 965M" }, + { 0x174D, 0x0000, 0x0000, "NVIDIA GeForce MX130" }, + { 0x174E, 0x0000, 0x0000, "NVIDIA GeForce MX110" }, + { 0x179C, 0x0000, 0x0000, "NVIDIA GeForce 940MX" }, + { 0x17C2, 0x0000, 0x0000, "NVIDIA GeForce GTX TITAN X" }, + { 0x17C8, 0x0000, 0x0000, "NVIDIA GeForce GTX 980 Ti" }, + { 0x17F0, 0x0000, 0x0000, "Quadro M6000" }, + { 0x17F1, 0x0000, 0x0000, "Quadro M6000 24GB" }, + { 0x17FD, 0x0000, 0x0000, "Tesla M40" }, + { 0x17FD, 0x1173, 0x10de, "Tesla M40 24GB" }, + { 0x1B00, 0x0000, 0x0000, "NVIDIA TITAN X (Pascal)" }, + { 0x1B02, 0x0000, 0x0000, "NVIDIA TITAN Xp" }, + { 0x1B02, 0x123e, 0x10de, "NVIDIA TITAN Xp COLLECTORS EDITION" }, + { 0x1B02, 0x123f, 0x10de, "NVIDIA TITAN Xp COLLECTORS EDITION" }, + { 0x1B06, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080 Ti" }, + { 0x1B30, 0x0000, 0x0000, "Quadro P6000" }, + { 0x1B38, 0x0000, 0x0000, "Tesla P40" }, + { 0x1B80, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080" }, + { 0x1B81, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1B82, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070 Ti" }, + { 0x1B83, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 6GB" }, + { 0x1B84, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 3GB" }, + { 0x1B87, 0x0000, 0x0000, "NVIDIA P104-100" }, + { 0x1BA0, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080" }, + { 0x1BA0, 0x0887, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BA1, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1BA1, 0x08a1, 0x1028, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x08a2, 0x1028, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1cce, 0x1043, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1651, 0x1458, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1653, 0x1458, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x11e8, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x11e9, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1225, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1226, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1227, 0x1462, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x9501, 0x1558, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x95e1, 0x1558, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x2000, 0x1a58, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA1, 0x1032, 0x1d05, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BA2, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1BB0, 0x0000, 0x0000, "Quadro P5000" }, + { 0x1BB1, 0x0000, 0x0000, "Quadro P4000" }, + { 0x1BB1, 0x11a3, 0x15c3, "EIZO Quadro MED-XN91" }, + { 0x1BB4, 0x0000, 0x0000, "Tesla P6" }, + { 0x1BB5, 0x0000, 0x0000, "Quadro P5200" }, + { 0x1BB5, 0x2268, 0x17aa, "Quadro P5200 with Max-Q Design" }, + { 0x1BB5, 0x2269, 0x17aa, "Quadro P5200 with Max-Q Design" }, + { 0x1BB6, 0x0000, 0x0000, "Quadro P5000" }, + { 0x1BB7, 0x0000, 0x0000, "Quadro P4000" }, + { 0x1BB7, 0x11e9, 0x1462, "Quadro P4000 with Max-Q Design" }, + { 0x1BB7, 0x9501, 0x1558, "Quadro P4000 with Max-Q Design" }, + { 0x1BB8, 0x0000, 0x0000, "Quadro P3000" }, + { 0x1BB9, 0x0000, 0x0000, "Quadro P4200" }, + { 0x1BB9, 0x95e1, 0x1558, "Quadro P4200 with Max-Q Design" }, + { 0x1BB9, 0x2268, 0x17aa, "Quadro P4200 with Max-Q Design" }, + { 0x1BB9, 0x2269, 0x17aa, "Quadro P4200 with Max-Q Design" }, + { 0x1BBB, 0x0000, 0x0000, "Quadro P3200" }, + { 0x1BBB, 0x225f, 0x17aa, "Quadro P3200 with Max-Q Design" }, + { 0x1BBB, 0x2262, 0x17aa, "Quadro P3200 with Max-Q Design" }, + { 0x1BC7, 0x0000, 0x0000, "NVIDIA P104-101" }, + { 0x1BE0, 0x0000, 0x0000, "NVIDIA GeForce GTX 1080" }, + { 0x1BE0, 0x1221, 0x1025, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x123e, 0x1025, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x07c0, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x0876, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x088b, 0x1028, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x1031, 0x1043, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x1bf0, 0x1043, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE0, 0x355b, 0x1458, "NVIDIA GeForce GTX 1080 with Max-Q Design" }, + { 0x1BE1, 0x0000, 0x0000, "NVIDIA GeForce GTX 1070" }, + { 0x1BE1, 0x84db, 0x103c, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BE1, 0x16f0, 0x1043, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1BE1, 0x2009, 0x3842, "NVIDIA GeForce GTX 1070 with Max-Q Design" }, + { 0x1C02, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 3GB" }, + { 0x1C03, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 6GB" }, + { 0x1C04, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 5GB" }, + { 0x1C06, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060 6GB" }, + { 0x1C07, 0x0000, 0x0000, "NVIDIA P106-100" }, + { 0x1C09, 0x0000, 0x0000, "NVIDIA P106-090" }, + { 0x1C20, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060" }, + { 0x1C20, 0x0802, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0803, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0825, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0827, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0885, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x0886, 0x1028, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x8467, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x8478, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x8581, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x1244, 0x1462, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x95e5, 0x1558, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x39b9, 0x17aa, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x2000, 0x1a58, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x2001, 0x1a58, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C20, 0x1059, 0x1d05, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C21, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C22, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C23, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060" }, + { 0x1C30, 0x0000, 0x0000, "Quadro P2000" }, + { 0x1C30, 0x11b3, 0x15c3, "EIZO Quadro MED-XN71" }, + { 0x1C31, 0x0000, 0x0000, "Quadro P2200" }, + { 0x1C31, 0x131b, 0x15c3, "EIZO Quadro MED-XN72" }, + { 0x1C60, 0x0000, 0x0000, "NVIDIA GeForce GTX 1060" }, + { 0x1C60, 0x8390, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C60, 0x8467, 0x103c, "NVIDIA GeForce GTX 1060 with Max-Q Design" }, + { 0x1C61, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C62, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C81, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C82, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C83, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C8C, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C8C, 0x087c, 0x1028, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x8519, 0x103c, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x856a, 0x103c, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x123c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x2266, 0x17aa, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x2267, 0x17aa, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8C, 0x39ff, 0x17aa, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8D, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C8D, 0x84e9, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x84eb, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x856a, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x114f, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1341, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1351, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1481, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x14a1, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x18c1, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1b5e, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1217, 0x152d, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8D, 0x1707, 0x1d72, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C8F, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050 Ti" }, + { 0x1C8F, 0x123c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x126d, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x1284, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C8F, 0x1297, 0x1462, "NVIDIA GeForce GTX 1050 Ti with Max-Q Design" }, + { 0x1C90, 0x0000, 0x0000, "NVIDIA GeForce MX150" }, + { 0x1C90, 0x09c1, 0x1028, "NVIDIA GeForce MX250" }, + { 0x1C91, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C91, 0x856a, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C91, 0x86e3, 0x103c, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C91, 0x1232, 0x152d, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x0000, 0x0000, "NVIDIA GeForce GTX 1050" }, + { 0x1C92, 0x149f, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x1b31, 0x1043, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x1245, 0x1462, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C92, 0x126c, 0x1462, "NVIDIA GeForce GTX 1050 with Max-Q Design" }, + { 0x1C94, 0x0000, 0x0000, "NVIDIA GeForce MX350" }, + { 0x1C96, 0x0000, 0x0000, "NVIDIA GeForce MX350" }, + { 0x1CB1, 0x0000, 0x0000, "Quadro P1000" }, + { 0x1CB1, 0x11bc, 0x15c3, "EIZO Quadro MED-XN51LP" }, + { 0x1CB2, 0x0000, 0x0000, "Quadro P600" }, + { 0x1CB3, 0x0000, 0x0000, "Quadro P400" }, + { 0x1CB3, 0x11be, 0x15c3, "EIZO Quadro MED-XN31LP" }, + { 0x1CB6, 0x0000, 0x0000, "Quadro P620" }, + { 0x1CBA, 0x0000, 0x0000, "Quadro P2000" }, + { 0x1CBA, 0x2266, 0x17aa, "Quadro P2000 with Max-Q Design" }, + { 0x1CBA, 0x2267, 0x17aa, "Quadro P2000 with Max-Q Design" }, + { 0x1CBB, 0x0000, 0x0000, "Quadro P1000" }, + { 0x1CBC, 0x0000, 0x0000, "Quadro P600" }, + { 0x1CBD, 0x0000, 0x0000, "Quadro P620" }, + { 0x1CFA, 0x0000, 0x0000, "Quadro P2000" }, + { 0x1CFB, 0x0000, 0x0000, "Quadro P1000" }, + { 0x1CFB, 0x2600, 0x102b, "Matrox D-Series D1480" }, + { 0x1CFB, 0x2700, 0x102b, "Matrox D-Series D1450" }, + { 0x1D01, 0x0000, 0x0000, "NVIDIA GeForce GT 1030" }, + { 0x1D02, 0x0000, 0x0000, "NVIDIA GeForce GT 1010" }, + { 0x1D10, 0x0000, 0x0000, "NVIDIA GeForce MX150" }, + { 0x1D11, 0x0000, 0x0000, "NVIDIA GeForce MX230" }, + { 0x1D12, 0x0000, 0x0000, "NVIDIA GeForce MX150" }, + { 0x1D13, 0x0000, 0x0000, "NVIDIA GeForce MX250" }, + { 0x1D16, 0x0000, 0x0000, "NVIDIA GeForce MX330" }, + { 0x1D33, 0x0000, 0x0000, "Quadro P500" }, + { 0x1D34, 0x0000, 0x0000, "Quadro P520" }, + { 0x1D52, 0x0000, 0x0000, "NVIDIA GeForce MX250" }, + { 0x1D81, 0x0000, 0x0000, "NVIDIA TITAN V" }, + { 0x1DB1, 0x0000, 0x0000, "Tesla V100-SXM2-16GB" }, + { 0x1DB1, 0x1307, 0x10de, "Tesla V100-SXM2-16GB-LS" }, + { 0x1DB3, 0x0000, 0x0000, "Tesla V100-FHHL-16GB" }, + { 0x1DB4, 0x0000, 0x0000, "Tesla V100-PCIE-16GB" }, + { 0x1DB4, 0x1306, 0x10de, "Tesla V100-PCIE-16GB-LS" }, + { 0x1DB5, 0x0000, 0x0000, "Tesla V100-SXM2-32GB" }, + { 0x1DB5, 0x1308, 0x10de, "Tesla V100-SXM2-32GB-LS" }, + { 0x1DB6, 0x0000, 0x0000, "Tesla V100-PCIE-32GB" }, + { 0x1DB7, 0x0000, 0x0000, "Tesla V100-DGXS-32GB" }, + { 0x1DB8, 0x0000, 0x0000, "Tesla V100-SXM3-32GB" }, + { 0x1DB8, 0x131d, 0x10de, "Tesla V100-SXM3-32GB-H" }, + { 0x1DBA, 0x0000, 0x0000, "Quadro GV100" }, + { 0x1DBA, 0x12eb, 0x10de, "NVIDIA TITAN V JHH Special Edition" }, + { 0x1DF0, 0x0000, 0x0000, "Tesla PG500-216" }, + { 0x1DF2, 0x0000, 0x0000, "Tesla PG503-216" }, + { 0x1DF6, 0x0000, 0x0000, "Tesla V100S-PCIE-32GB" }, + { 0x1E02, 0x0000, 0x0000, "NVIDIA TITAN RTX" }, + { 0x1E04, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Ti" }, + { 0x1E07, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Ti" }, + { 0x1E09, 0x0000, 0x0000, "NVIDIA CMP 50HX" }, + { 0x1E30, 0x0000, 0x0000, "Quadro RTX 6000" }, + { 0x1E30, 0x129e, 0x1028, "Quadro RTX 8000" }, + { 0x1E30, 0x129e, 0x103c, "Quadro RTX 8000" }, + { 0x1E30, 0x129e, 0x10de, "Quadro RTX 8000" }, + { 0x1E36, 0x0000, 0x0000, "Quadro RTX 6000" }, + { 0x1E78, 0x13d8, 0x10de, "Quadro RTX 8000" }, + { 0x1E78, 0x13d9, 0x10de, "Quadro RTX 6000" }, + { 0x1E81, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 SUPER" }, + { 0x1E82, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1E84, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 SUPER" }, + { 0x1E87, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1E89, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1E90, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1E90, 0x1375, 0x1025, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08a1, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08a2, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ea, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08eb, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ec, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x093b, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x093c, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8572, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8573, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8602, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x8606, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86c6, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86c7, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x87a6, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x87a7, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x131f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x137f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x141f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1751, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1660, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1661, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1662, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x75a6, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x75a7, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86a6, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x86a7, 0x1458, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1274, 0x1462, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1277, 0x1462, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1220, 0x152d, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x95e1, 0x1558, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x97e1, 0x1558, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x2002, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x2005, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x2007, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x3000, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x3001, 0x1a58, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E90, 0x1069, 0x1d05, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1E91, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 Super" }, + { 0x1E91, 0x8607, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x8736, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x8738, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x8772, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x878b, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x1e61, 0x1043, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x1511, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x75b3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x75b4, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x76b2, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x76b3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x78a2, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x78a3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x86b2, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x86b3, 0x1458, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x12ae, 0x1462, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x12b0, 0x1462, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x12c6, 0x1462, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x22c3, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x22c5, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x2009, 0x1a58, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x200a, 0x1a58, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x3002, 0x1a58, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E91, 0x3012, 0x8086, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1E93, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Super" }, + { 0x1E93, 0x1401, 0x1025, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x149c, 0x1025, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x09d2, 0x1028, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8607, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x86c7, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8736, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8738, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x8772, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x87a6, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x87a7, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x75b1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x75b2, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x76b0, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x76b1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x78a0, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x78a1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x86b0, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x86b1, 0x1458, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12ae, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12b0, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12b4, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x12c6, 0x1462, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x50d3, 0x1558, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x70d1, 0x1558, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x22c3, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x22c5, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x2009, 0x1a58, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x200a, 0x1a58, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x3002, 0x1a58, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1E93, 0x1089, 0x1d05, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1EB0, 0x0000, 0x0000, "Quadro RTX 5000" }, + { 0x1EB1, 0x0000, 0x0000, "Quadro RTX 4000" }, + { 0x1EB1, 0x12a0, 0x15c3, "EIZO Quadro MED-XN92" }, + { 0x1EB5, 0x0000, 0x0000, "Quadro RTX 5000" }, + { 0x1EB5, 0x1375, 0x1025, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x1401, 0x1025, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x149c, 0x1025, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x09c3, 0x1028, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8736, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8738, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8772, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8780, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8782, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8783, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x8785, 0x103c, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x1dd1, 0x1043, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x1274, 0x1462, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x12b0, 0x1462, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x12c6, 0x1462, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x22b8, 0x17aa, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x22ba, 0x17aa, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x2005, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x2007, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x2008, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB5, 0x200a, 0x1a58, "Quadro RTX 5000 with Max-Q Design" }, + { 0x1EB6, 0x0000, 0x0000, "Quadro RTX 4000" }, + { 0x1EB6, 0x09c3, 0x1028, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8736, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8738, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8772, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8780, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8782, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8783, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x8785, 0x103c, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x1274, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x1277, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x12b0, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x12c6, 0x1462, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x22b8, 0x17aa, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EB6, 0x22ba, 0x17aa, "Quadro RTX 4000 with Max-Q Design" }, + { 0x1EC2, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 SUPER" }, + { 0x1EC7, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 SUPER" }, + { 0x1ED0, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080" }, + { 0x1ED0, 0x132d, 0x1025, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8572, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8573, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8600, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x8605, 0x103c, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x138f, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x15c1, 0x1043, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x3fee, 0x17aa, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED0, 0x3ffe, 0x17aa, "NVIDIA GeForce RTX 2080 with Max-Q Design" }, + { 0x1ED1, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070 Super" }, + { 0x1ED1, 0x1432, 0x1025, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x8746, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x165f, 0x1043, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0xc192, 0x144d, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x3fce, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x3fcf, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED1, 0x3fd0, 0x17aa, "NVIDIA GeForce RTX 2070 Super with Max-Q Design" }, + { 0x1ED3, 0x0000, 0x0000, "NVIDIA GeForce RTX 2080 Super" }, + { 0x1ED3, 0x1432, 0x1025, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x09d1, 0x1028, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x8746, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x878a, 0x103c, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x1d61, 0x1043, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x1e51, 0x1043, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x1f01, 0x1043, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x3fce, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x3fcf, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1ED3, 0x3fd0, 0x17aa, "NVIDIA GeForce RTX 2080 Super with Max-Q Design" }, + { 0x1EF5, 0x0000, 0x0000, "Quadro RTX 5000" }, + { 0x1F02, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F03, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F06, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060 SUPER" }, + { 0x1F07, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F08, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F0A, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F0B, 0x0000, 0x0000, "NVIDIA CMP 40HX" }, + { 0x1F10, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F10, 0x132d, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1342, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08a1, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08a2, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ea, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08eb, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ec, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x093b, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x093c, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8572, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8573, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8602, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x8606, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x132f, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x136f, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1881, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1e6e, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1658, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1663, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1664, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x75a4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x75a5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x86a4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x86a5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1274, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1277, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x95e1, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x97e1, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2002, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2005, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2007, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x3000, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x3001, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x105e, 0x1d05, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x1070, 0x1d05, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2087, 0x1d05, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F10, 0x2087, 0x8086, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F11, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F12, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F12, 0x098f, 0x1028, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x8741, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x8744, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x878e, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x880e, 0x103c, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x1e11, 0x1043, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x1f11, 0x1043, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x12d9, 0x1462, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x3801, 0x17aa, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x3802, 0x17aa, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F12, 0x3803, 0x17aa, "NVIDIA GeForce RTX 2060 with Max-Q Design" }, + { 0x1F14, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F14, 0x1401, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1432, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1442, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1446, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x147d, 0x1025, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x09e2, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x09f3, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8607, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86c6, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86c7, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8736, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8738, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8746, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x8772, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x878b, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x87a6, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x87a7, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x174f, 0x1043, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x1512, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x75b5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x75b6, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x76b4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x76b5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x78a4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x78a5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86b4, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x86b5, 0x1458, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x12ae, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x12b0, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x12c6, 0x1462, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x50d3, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x70d1, 0x1558, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x200c, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x2011, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F14, 0x3002, 0x1a58, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F15, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F36, 0x0000, 0x0000, "Quadro RTX 3000" }, + { 0x1F36, 0x0990, 0x1028, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x8736, 0x103c, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x8738, 0x103c, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x8772, 0x103c, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x13cf, 0x1043, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F36, 0x0032, 0x1414, "Quadro RTX 3000 with Max-Q Design" }, + { 0x1F42, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060 SUPER" }, + { 0x1F47, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060 SUPER" }, + { 0x1F50, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F50, 0x08ed, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x08ee, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x08ef, 0x1028, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8572, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8573, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8574, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8600, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x8605, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x3fee, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F50, 0x3ffe, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F51, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F54, 0x0000, 0x0000, "NVIDIA GeForce RTX 2070" }, + { 0x1F54, 0x878a, 0x103c, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F54, 0x3fce, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F54, 0x3fcf, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F54, 0x3fd0, 0x17aa, "NVIDIA GeForce RTX 2070 with Max-Q Design" }, + { 0x1F55, 0x0000, 0x0000, "NVIDIA GeForce RTX 2060" }, + { 0x1F76, 0x0000, 0x0000, "Quadro RTX 3000" }, + { 0x1F76, 0x2800, 0x102b, "Matrox D-Series D2450" }, + { 0x1F76, 0x2900, 0x102b, "Matrox D-Series D2480" }, + { 0x1F82, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F91, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F91, 0x863e, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x86e7, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x86e8, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x12cf, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x156f, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x0032, 0x1414, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0xc822, 0x144d, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x127e, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1281, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1284, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1285, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x129c, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x229f, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x3802, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x3806, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x3f1a, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F91, 0x1001, 0x1a58, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F95, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 Ti" }, + { 0x1F95, 0x1479, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x147a, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x147b, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x147c, 0x1025, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x86e7, 0x103c, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x86e8, 0x103c, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x8815, 0x103c, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1dff, 0x1043, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1e1f, 0x1043, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0xc838, 0x144d, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x12bd, 0x1462, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x12c5, 0x1462, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x12d2, 0x1462, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x22c0, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x22c1, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x3837, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x3f95, 0x17aa, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1003, 0x1a58, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1006, 0x1a58, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x1007, 0x1a58, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F95, 0x3e30, 0x1e83, "NVIDIA GeForce GTX 1650 Ti with Max-Q Design" }, + { 0x1F96, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F96, 0x1297, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F97, 0x0000, 0x0000, "NVIDIA GeForce MX450" }, + { 0x1F98, 0x0000, 0x0000, "NVIDIA GeForce MX450" }, + { 0x1F99, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F99, 0x1479, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x147a, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x147b, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x147c, 0x1025, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x8815, 0x103c, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x13b2, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x1402, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x1902, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x12bd, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x12c5, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x12d2, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x22da, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x3f93, 0x17aa, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F99, 0x3e30, 0x1e83, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9C, 0x0000, 0x0000, "NVIDIA GeForce MX450" }, + { 0x1F9D, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1F9D, 0x128d, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x130d, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x149c, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x185c, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x189c, 0x1043, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x12f4, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x1302, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x131b, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x1326, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x132a, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9D, 0x132e, 0x1462, "NVIDIA GeForce GTX 1650 with Max-Q Design" }, + { 0x1F9F, 0x0000, 0x0000, "NVIDIA GeForce MX550" }, + { 0x1FA0, 0x0000, 0x0000, "NVIDIA GeForce MX550" }, + { 0x1FB0, 0x12db, 0x1028, "NVIDIA T1000" }, + { 0x1FB0, 0x12db, 0x103c, "NVIDIA T1000" }, + { 0x1FB0, 0x8a80, 0x103c, "NVIDIA T1000" }, + { 0x1FB0, 0x12db, 0x10de, "NVIDIA T1000" }, + { 0x1FB0, 0x1485, 0x10de, "NVIDIA DGX Display" }, + { 0x1FB0, 0x12db, 0x17aa, "NVIDIA T1000" }, + { 0x1FB1, 0x1488, 0x1028, "NVIDIA T600" }, + { 0x1FB1, 0x1488, 0x103c, "NVIDIA T600" }, + { 0x1FB1, 0x8a80, 0x103c, "NVIDIA T600" }, + { 0x1FB1, 0x1488, 0x10de, "NVIDIA T600" }, + { 0x1FB1, 0x1488, 0x17aa, "NVIDIA T600" }, + { 0x1FB2, 0x1489, 0x1028, "NVIDIA T400" }, + { 0x1FB2, 0x1489, 0x103c, "NVIDIA T400" }, + { 0x1FB2, 0x8a80, 0x103c, "NVIDIA T400" }, + { 0x1FB2, 0x1489, 0x10de, "NVIDIA T400" }, + { 0x1FB2, 0x1489, 0x17aa, "NVIDIA T400" }, + { 0x1FB6, 0x0000, 0x0000, "NVIDIA T600 Laptop GPU" }, + { 0x1FB7, 0x0000, 0x0000, "NVIDIA T550 Laptop GPU" }, + { 0x1FB8, 0x0000, 0x0000, "Quadro T2000" }, + { 0x1FB8, 0x097e, 0x1028, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8736, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8738, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8772, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8780, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8782, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8783, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x8785, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x87f0, 0x103c, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x1281, 0x1462, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x12bd, 0x1462, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x22c0, 0x17aa, "Quadro T2000 with Max-Q Design" }, + { 0x1FB8, 0x22c1, 0x17aa, "Quadro T2000 with Max-Q Design" }, + { 0x1FB9, 0x0000, 0x0000, "Quadro T1000" }, + { 0x1FB9, 0x1479, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x147a, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x147b, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x147c, 0x1025, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8736, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8738, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8772, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8780, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8782, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8783, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x8785, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x87f0, 0x103c, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x12bd, 0x1462, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x22c0, 0x17aa, "Quadro T1000 with Max-Q Design" }, + { 0x1FB9, 0x22c1, 0x17aa, "Quadro T1000 with Max-Q Design" }, + { 0x1FBA, 0x0000, 0x0000, "NVIDIA T600 Laptop GPU" }, + { 0x1FBB, 0x0000, 0x0000, "NVIDIA T500" }, + { 0x1FBC, 0x0000, 0x0000, "NVIDIA T1200 Laptop GPU" }, + { 0x1FDD, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x1FF0, 0x1612, 0x1028, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x1612, 0x103c, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x8a80, 0x103c, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x1612, 0x10de, "NVIDIA T1000 8GB" }, + { 0x1FF0, 0x1612, 0x17aa, "NVIDIA T1000 8GB" }, + { 0x1FF2, 0x1613, 0x1028, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x1613, 0x103c, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x8a80, 0x103c, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x1613, 0x10de, "NVIDIA T400 4GB" }, + { 0x1FF2, 0x1613, 0x17aa, "NVIDIA T400 4GB" }, + { 0x1FF9, 0x0000, 0x0000, "Quadro T1000" }, + { 0x20B0, 0x0000, 0x0000, "NVIDIA A100-SXM4-40GB" }, + { 0x20B0, 0x1450, 0x10de, "NVIDIA A100-PG509-200" }, + { 0x20B2, 0x1463, 0x10de, "NVIDIA A100-SXM4-80GB" }, + { 0x20B2, 0x147f, 0x10de, "NVIDIA A100-SXM4-80GB" }, + { 0x20B3, 0x14a7, 0x10de, "NVIDIA PG506-242" }, + { 0x20B3, 0x14a8, 0x10de, "NVIDIA PG506-243" }, + { 0x20B5, 0x1533, 0x10de, "NVIDIA A100 80GB PCIe" }, + { 0x20B6, 0x1492, 0x10de, "NVIDIA PG506-232" }, + { 0x20B7, 0x1532, 0x10de, "NVIDIA A30" }, + { 0x20F1, 0x145f, 0x10de, "NVIDIA A100-PCIE-40GB" }, + { 0x2182, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" }, + { 0x2184, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660" }, + { 0x2187, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 SUPER" }, + { 0x2188, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650" }, + { 0x2189, 0x0000, 0x0000, "NVIDIA CMP 30HX" }, + { 0x2191, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" }, + { 0x2191, 0x0949, 0x1028, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x85fb, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x85fe, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x86d6, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x8741, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x8744, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x878d, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x87af, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x87b3, 0x103c, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x171f, 0x1043, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x17ef, 0x1043, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x18d1, 0x1043, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x0032, 0x1414, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x128a, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x128b, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12c6, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12cb, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12cc, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x12d9, 0x1462, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x380c, 0x17aa, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x381d, 0x17aa, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2191, 0x381e, 0x17aa, "NVIDIA GeForce GTX 1660 Ti with Max-Q Design" }, + { 0x2192, 0x0000, 0x0000, "NVIDIA GeForce GTX 1650 Ti" }, + { 0x21C4, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 SUPER" }, + { 0x21D1, 0x0000, 0x0000, "NVIDIA GeForce GTX 1660 Ti" }, + { 0x2203, 0x0000, 0x0000, "NVIDIA GeForce RTX 3090 Ti" }, + { 0x2204, 0x0000, 0x0000, "NVIDIA GeForce RTX 3090" }, + { 0x2206, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080" }, + { 0x2208, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti" }, + { 0x220A, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080" }, + { 0x220D, 0x0000, 0x0000, "NVIDIA CMP 90HX" }, + { 0x2216, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080" }, + { 0x2230, 0x1459, 0x1028, "NVIDIA RTX A6000" }, + { 0x2230, 0x1459, 0x103c, "NVIDIA RTX A6000" }, + { 0x2230, 0x1459, 0x10de, "NVIDIA RTX A6000" }, + { 0x2230, 0x1459, 0x17aa, "NVIDIA RTX A6000" }, + { 0x2231, 0x147e, 0x1028, "NVIDIA RTX A5000" }, + { 0x2231, 0x147e, 0x103c, "NVIDIA RTX A5000" }, + { 0x2231, 0x147e, 0x10de, "NVIDIA RTX A5000" }, + { 0x2231, 0x147e, 0x17aa, "NVIDIA RTX A5000" }, + { 0x2232, 0x163c, 0x1028, "NVIDIA RTX A4500" }, + { 0x2232, 0x163c, 0x103c, "NVIDIA RTX A4500" }, + { 0x2232, 0x163c, 0x10de, "NVIDIA RTX A4500" }, + { 0x2232, 0x163c, 0x17aa, "NVIDIA RTX A4500" }, + { 0x2233, 0x165a, 0x1028, "NVIDIA RTX A5500" }, + { 0x2233, 0x165a, 0x103c, "NVIDIA RTX A5500" }, + { 0x2233, 0x165a, 0x10de, "NVIDIA RTX A5500" }, + { 0x2233, 0x165a, 0x17aa, "NVIDIA RTX A5500" }, + { 0x2235, 0x145a, 0x10de, "NVIDIA A40" }, + { 0x2236, 0x1482, 0x10de, "NVIDIA A10" }, + { 0x2237, 0x152f, 0x10de, "NVIDIA A10G" }, + { 0x2238, 0x1677, 0x10de, "NVIDIA A10M" }, + { 0x2414, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, + { 0x2420, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" }, + { 0x2438, 0x0000, 0x0000, "NVIDIA RTX A5500 Laptop GPU" }, + { 0x2460, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Ti Laptop GPU" }, + { 0x2482, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Ti" }, + { 0x2484, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070" }, + { 0x2486, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, + { 0x2487, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x2488, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070" }, + { 0x2489, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Ti" }, + { 0x248A, 0x0000, 0x0000, "NVIDIA CMP 70HX" }, + { 0x249C, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Laptop GPU" }, + { 0x249C, 0x1194, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x249D, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Laptop GPU" }, + { 0x24A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Ti Laptop GPU" }, + { 0x24A0, 0x1192, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x24B0, 0x14ad, 0x1028, "NVIDIA RTX A4000" }, + { 0x24B0, 0x14ad, 0x103c, "NVIDIA RTX A4000" }, + { 0x24B0, 0x14ad, 0x10de, "NVIDIA RTX A4000" }, + { 0x24B0, 0x14ad, 0x17aa, "NVIDIA RTX A4000" }, + { 0x24B1, 0x1658, 0x10de, "NVIDIA RTX A4000H" }, + { 0x24B6, 0x0000, 0x0000, "NVIDIA RTX A5000 Laptop GPU" }, + { 0x24B7, 0x0000, 0x0000, "NVIDIA RTX A4000 Laptop GPU" }, + { 0x24B8, 0x0000, 0x0000, "NVIDIA RTX A3000 Laptop GPU" }, + { 0x24B9, 0x0000, 0x0000, "NVIDIA RTX A3000 12GB Laptop GPU" }, + { 0x24BA, 0x0000, 0x0000, "NVIDIA RTX A4500 Laptop GPU" }, + { 0x24BB, 0x0000, 0x0000, "NVIDIA RTX A3000 12GB Laptop GPU" }, + { 0x24DC, 0x0000, 0x0000, "NVIDIA GeForce RTX 3080 Laptop GPU" }, + { 0x24DD, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Laptop GPU" }, + { 0x24E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3070 Ti Laptop GPU" }, + { 0x24FA, 0x0000, 0x0000, "NVIDIA RTX A4500 Embedded GPU" }, + { 0x2503, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x2504, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060" }, + { 0x2507, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050" }, + { 0x2508, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 OEM" }, + { 0x2520, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x2523, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x2531, 0x151d, 0x1028, "NVIDIA RTX A2000" }, + { 0x2531, 0x151d, 0x103c, "NVIDIA RTX A2000" }, + { 0x2531, 0x151d, 0x10de, "NVIDIA RTX A2000" }, + { 0x2531, 0x151d, 0x17aa, "NVIDIA RTX A2000" }, + { 0x2560, 0x0000, 0x0000, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x2563, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x2571, 0x1611, 0x1028, "NVIDIA RTX A2000 12GB" }, + { 0x2571, 0x1611, 0x103c, "NVIDIA RTX A2000 12GB" }, + { 0x2571, 0x1611, 0x10de, "NVIDIA RTX A2000 12GB" }, + { 0x2571, 0x1611, 0x17aa, "NVIDIA RTX A2000 12GB" }, + { 0x25A0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x25A0, 0x8928, 0x103c, "NVIDIA GeForce RTX 3050Ti Laptop GPU" }, + { 0x25A0, 0x89f9, 0x103c, "NVIDIA GeForce RTX 3050Ti Laptop GPU" }, + { 0x25A0, 0x1196, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x25A2, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25A2, 0x0baf, 0x1028, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x25A2, 0x1195, 0x1d05, "NVIDIA GeForce RTX 3060 Laptop GPU" }, + { 0x25A5, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25A6, 0x0000, 0x0000, "NVIDIA GeForce MX570" }, + { 0x25A7, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" }, + { 0x25A9, 0x0000, 0x0000, "NVIDIA GeForce RTX 2050" }, + { 0x25AA, 0x0000, 0x0000, "NVIDIA GeForce MX570 A" }, + { 0x25B6, 0x14a9, 0x10de, "NVIDIA A16" }, + { 0x25B6, 0x157e, 0x10de, "NVIDIA A2" }, + { 0x25B8, 0x0000, 0x0000, "NVIDIA RTX A2000 Laptop GPU" }, + { 0x25B9, 0x0000, 0x0000, "NVIDIA RTX A1000 Laptop GPU" }, + { 0x25BA, 0x0000, 0x0000, "NVIDIA RTX A2000 8GB Laptop GPU" }, + { 0x25BB, 0x0000, 0x0000, "NVIDIA RTX A500 Laptop GPU" }, + { 0x25E0, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Ti Laptop GPU" }, + { 0x25E2, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25E5, 0x0000, 0x0000, "NVIDIA GeForce RTX 3050 Laptop GPU" }, + { 0x25F9, 0x0000, 0x0000, "NVIDIA RTX A1000 Embedded GPU" }, + { 0x25FA, 0x0000, 0x0000, "NVIDIA RTX A2000 Embedded GPU" }, + { 0x13BD, 0x11cc, 0x10DE, "GRID M10-0B" }, + { 0x13BD, 0x11cd, 0x10DE, "GRID M10-1B" }, + { 0x13BD, 0x11ce, 0x10DE, "GRID M10-0Q" }, + { 0x13BD, 0x11cf, 0x10DE, "GRID M10-1Q" }, + { 0x13BD, 0x11d0, 0x10DE, "GRID M10-2Q" }, + { 0x13BD, 0x11d1, 0x10DE, "GRID M10-4Q" }, + { 0x13BD, 0x11d2, 0x10DE, "GRID M10-8Q" }, + { 0x13BD, 0x1286, 0x10DE, "GRID M10-2B" }, + { 0x13BD, 0x12ee, 0x10DE, "GRID M10-2B4" }, + { 0x13BD, 0x1339, 0x10DE, "GRID M10-1B4" }, + { 0x13F2, 0x114c, 0x10DE, "GRID M60-0Q" }, + { 0x13F2, 0x114d, 0x10DE, "GRID M60-1Q" }, + { 0x13F2, 0x114e, 0x10DE, "GRID M60-2Q" }, + { 0x13F2, 0x114f, 0x10DE, "GRID M60-4Q" }, + { 0x13F2, 0x1150, 0x10DE, "GRID M60-8Q" }, + { 0x13F2, 0x1176, 0x10DE, "GRID M60-0B" }, + { 0x13F2, 0x1177, 0x10DE, "GRID M60-1B" }, + { 0x13F2, 0x117d, 0x10DE, "GRID M60-2B" }, + { 0x13F2, 0x12ec, 0x10DE, "GRID M60-2B4" }, + { 0x13F2, 0x1337, 0x10DE, "GRID M60-1B4" }, + { 0x13F3, 0x117c, 0x10DE, "GRID M6-2B" }, + { 0x13F3, 0x117e, 0x10DE, "GRID M6-0B" }, + { 0x13F3, 0x117f, 0x10DE, "GRID M6-1B" }, + { 0x13F3, 0x1180, 0x10DE, "GRID M6-0Q" }, + { 0x13F3, 0x1181, 0x10DE, "GRID M6-1Q" }, + { 0x13F3, 0x1182, 0x10DE, "GRID M6-2Q" }, + { 0x13F3, 0x1183, 0x10DE, "GRID M6-4Q" }, + { 0x13F3, 0x1184, 0x10DE, "GRID M6-8Q" }, + { 0x13F3, 0x12ed, 0x10DE, "GRID M6-2B4" }, + { 0x13F3, 0x1338, 0x10DE, "GRID M6-1B4" }, + { 0x15F7, 0x1265, 0x10DE, "GRID P100C-1B" }, + { 0x15F7, 0x1266, 0x10DE, "GRID P100C-1Q" }, + { 0x15F7, 0x1267, 0x10DE, "GRID P100C-2Q" }, + { 0x15F7, 0x1268, 0x10DE, "GRID P100C-4Q" }, + { 0x15F7, 0x1269, 0x10DE, "GRID P100C-6Q" }, + { 0x15F7, 0x126a, 0x10DE, "GRID P100C-12Q" }, + { 0x15F7, 0x128d, 0x10DE, "GRID P100C-2B" }, + { 0x15F7, 0x12f4, 0x10DE, "GRID P100C-2B4" }, + { 0x15F7, 0x133f, 0x10DE, "GRID P100C-1B4" }, + { 0x15F7, 0x137d, 0x10DE, "GRID P100C-12C" }, + { 0x15F7, 0x138c, 0x10DE, "GRID P100C-4C" }, + { 0x15F7, 0x138d, 0x10DE, "GRID P100C-6C" }, + { 0x15F8, 0x1221, 0x10DE, "GRID P100-1B" }, + { 0x15F8, 0x1222, 0x10DE, "GRID P100-1Q" }, + { 0x15F8, 0x1223, 0x10DE, "GRID P100-2Q" }, + { 0x15F8, 0x1224, 0x10DE, "GRID P100-4Q" }, + { 0x15F8, 0x1225, 0x10DE, "GRID P100-8Q" }, + { 0x15F8, 0x1226, 0x10DE, "GRID P100-16Q" }, + { 0x15F8, 0x128c, 0x10DE, "GRID P100-2B" }, + { 0x15F8, 0x12f2, 0x10DE, "GRID P100-2B4" }, + { 0x15F8, 0x133d, 0x10DE, "GRID P100-1B4" }, + { 0x15F8, 0x137c, 0x10DE, "GRID P100-16C" }, + { 0x15F8, 0x138a, 0x10DE, "GRID P100-4C" }, + { 0x15F8, 0x138b, 0x10DE, "GRID P100-8C" }, + { 0x15F9, 0x122c, 0x10DE, "GRID P100X-1B" }, + { 0x15F9, 0x122d, 0x10DE, "GRID P100X-1Q" }, + { 0x15F9, 0x122e, 0x10DE, "GRID P100X-2Q" }, + { 0x15F9, 0x122f, 0x10DE, "GRID P100X-4Q" }, + { 0x15F9, 0x1230, 0x10DE, "GRID P100X-8Q" }, + { 0x15F9, 0x1231, 0x10DE, "GRID P100X-16Q" }, + { 0x15F9, 0x128b, 0x10DE, "GRID P100X-2B" }, + { 0x15F9, 0x12f3, 0x10DE, "GRID P100X-2B4" }, + { 0x15F9, 0x133e, 0x10DE, "GRID P100X-1B4" }, + { 0x15F9, 0x137b, 0x10DE, "GRID P100X-16C" }, + { 0x15F9, 0x1388, 0x10DE, "GRID P100X-4C" }, + { 0x15F9, 0x1389, 0x10DE, "GRID P100X-8C" }, + { 0x1B38, 0x11e7, 0x10DE, "GRID P40-1B" }, + { 0x1B38, 0x11e8, 0x10DE, "GRID P40-1Q" }, + { 0x1B38, 0x11e9, 0x10DE, "GRID P40-2Q" }, + { 0x1B38, 0x11ea, 0x10DE, "GRID P40-3Q" }, + { 0x1B38, 0x11eb, 0x10DE, "GRID P40-4Q" }, + { 0x1B38, 0x11ec, 0x10DE, "GRID P40-6Q" }, + { 0x1B38, 0x11ed, 0x10DE, "GRID P40-8Q" }, + { 0x1B38, 0x11ee, 0x10DE, "GRID P40-12Q" }, + { 0x1B38, 0x11ef, 0x10DE, "GRID P40-24Q" }, + { 0x1B38, 0x1287, 0x10DE, "GRID P40-2B" }, + { 0x1B38, 0x12b1, 0x10DE, "GeForce GTX P40-24" }, + { 0x1B38, 0x12b2, 0x10DE, "GeForce GTX P40-12" }, + { 0x1B38, 0x12b3, 0x10DE, "GeForce GTX P40-6" }, + { 0x1B38, 0x12ef, 0x10DE, "GRID P40-2B4" }, + { 0x1B38, 0x133a, 0x10DE, "GRID P40-1B4" }, + { 0x1B38, 0x137e, 0x10DE, "GRID P40-24C" }, + { 0x1B38, 0x1381, 0x10DE, "GRID P40-4C" }, + { 0x1B38, 0x1382, 0x10DE, "GRID P40-6C" }, + { 0x1B38, 0x1383, 0x10DE, "GRID P40-8C" }, + { 0x1B38, 0x1384, 0x10DE, "GRID P40-12C" }, + { 0x1B38, 0x13b0, 0x10DE, "GRID GTX P40-6" }, + { 0x1B38, 0x13b1, 0x10DE, "GRID GTX P40-12" }, + { 0x1B38, 0x13b2, 0x10DE, "GRID GTX P40-24" }, + { 0x1B38, 0x13d0, 0x10DE, "GRID GTX P40-8" }, + { 0x1BB3, 0x1203, 0x10DE, "GRID P4-1B" }, + { 0x1BB3, 0x1204, 0x10DE, "GRID P4-1Q" }, + { 0x1BB3, 0x1205, 0x10DE, "GRID P4-2Q" }, + { 0x1BB3, 0x1206, 0x10DE, "GRID P4-4Q" }, + { 0x1BB3, 0x1207, 0x10DE, "GRID P4-8Q" }, + { 0x1BB3, 0x1288, 0x10DE, "GRID P4-2B" }, + { 0x1BB3, 0x12f1, 0x10DE, "GRID P4-2B4" }, + { 0x1BB3, 0x133c, 0x10DE, "GRID P4-1B4" }, + { 0x1BB3, 0x136d, 0x10DE, "GRID GTX P4-2" }, + { 0x1BB3, 0x136e, 0x10DE, "GRID GTX P4-4" }, + { 0x1BB3, 0x136f, 0x10DE, "GRID GTX P4-8" }, + { 0x1BB3, 0x1380, 0x10DE, "GRID P4-8C" }, + { 0x1BB3, 0x1385, 0x10DE, "GRID P4-4C" }, + { 0x1BB4, 0x11f8, 0x10DE, "GRID P6-1B" }, + { 0x1BB4, 0x11f9, 0x10DE, "GRID P6-1Q" }, + { 0x1BB4, 0x11fa, 0x10DE, "GRID P6-2Q" }, + { 0x1BB4, 0x11fb, 0x10DE, "GRID P6-4Q" }, + { 0x1BB4, 0x11fc, 0x10DE, "GRID P6-8Q" }, + { 0x1BB4, 0x11fd, 0x10DE, "GRID P6-16Q" }, + { 0x1BB4, 0x1289, 0x10DE, "GRID P6-2B" }, + { 0x1BB4, 0x12f0, 0x10DE, "GRID P6-2B4" }, + { 0x1BB4, 0x133b, 0x10DE, "GRID P6-1B4" }, + { 0x1BB4, 0x137f, 0x10DE, "GRID P6-16C" }, + { 0x1BB4, 0x1386, 0x10DE, "GRID P6-4C" }, + { 0x1BB4, 0x1387, 0x10DE, "GRID P6-8C" }, + { 0x1DB1, 0x1259, 0x10DE, "GRID V100X-1B" }, + { 0x1DB1, 0x125a, 0x10DE, "GRID V100X-1Q" }, + { 0x1DB1, 0x125b, 0x10DE, "GRID V100X-2Q" }, + { 0x1DB1, 0x125c, 0x10DE, "GRID V100X-4Q" }, + { 0x1DB1, 0x125d, 0x10DE, "GRID V100X-8Q" }, + { 0x1DB1, 0x125e, 0x10DE, "GRID V100X-16Q" }, + { 0x1DB1, 0x128e, 0x10DE, "GRID V100X-2B" }, + { 0x1DB1, 0x12f6, 0x10DE, "GRID V100X-2B4" }, + { 0x1DB1, 0x1341, 0x10DE, "GRID V100X-1B4" }, + { 0x1DB1, 0x1378, 0x10DE, "GRID V100X-16C" }, + { 0x1DB1, 0x138e, 0x10DE, "GRID V100X-4C" }, + { 0x1DB1, 0x138f, 0x10DE, "GRID V100X-8C" }, + { 0x1DB3, 0x1290, 0x10DE, "GRID V100L-1B" }, + { 0x1DB3, 0x1291, 0x10DE, "GRID V100L-2B" }, + { 0x1DB3, 0x1292, 0x10DE, "GRID V100L-1Q" }, + { 0x1DB3, 0x1293, 0x10DE, "GRID V100L-2Q" }, + { 0x1DB3, 0x1294, 0x10DE, "GRID V100L-4Q" }, + { 0x1DB3, 0x1295, 0x10DE, "GRID V100L-8Q" }, + { 0x1DB3, 0x1296, 0x10DE, "GRID V100L-16Q" }, + { 0x1DB3, 0x12f9, 0x10DE, "GRID V100L-2B4" }, + { 0x1DB3, 0x1344, 0x10DE, "GRID V100L-1B4" }, + { 0x1DB3, 0x137a, 0x10DE, "GRID V100L-16C" }, + { 0x1DB3, 0x1398, 0x10DE, "GRID V100L-4C" }, + { 0x1DB3, 0x1399, 0x10DE, "GRID V100L-8C" }, + { 0x1DB4, 0x124e, 0x10DE, "GRID V100-1B" }, + { 0x1DB4, 0x124f, 0x10DE, "GRID V100-1Q" }, + { 0x1DB4, 0x1250, 0x10DE, "GRID V100-2Q" }, + { 0x1DB4, 0x1251, 0x10DE, "GRID V100-4Q" }, + { 0x1DB4, 0x1252, 0x10DE, "GRID V100-8Q" }, + { 0x1DB4, 0x1253, 0x10DE, "GRID V100-16Q" }, + { 0x1DB4, 0x128f, 0x10DE, "GRID V100-2B" }, + { 0x1DB4, 0x12f5, 0x10DE, "GRID V100-2B4" }, + { 0x1DB4, 0x1340, 0x10DE, "GRID V100-1B4" }, + { 0x1DB4, 0x1379, 0x10DE, "GRID V100-16C" }, + { 0x1DB4, 0x1393, 0x10DE, "GRID V100-4C" }, + { 0x1DB4, 0x1394, 0x10DE, "GRID V100-8C" }, + { 0x1DB5, 0x12cb, 0x10DE, "GRID V100DX-1B" }, + { 0x1DB5, 0x12cc, 0x10DE, "GRID V100DX-2B" }, + { 0x1DB5, 0x12cd, 0x10DE, "GRID V100DX-1Q" }, + { 0x1DB5, 0x12ce, 0x10DE, "GRID V100DX-2Q" }, + { 0x1DB5, 0x12cf, 0x10DE, "GRID V100DX-4Q" }, + { 0x1DB5, 0x12d0, 0x10DE, "GRID V100DX-8Q" }, + { 0x1DB5, 0x12d1, 0x10DE, "GRID V100DX-16Q" }, + { 0x1DB5, 0x12d2, 0x10DE, "GRID V100DX-32Q" }, + { 0x1DB5, 0x12f8, 0x10DE, "GRID V100DX-2B4" }, + { 0x1DB5, 0x1343, 0x10DE, "GRID V100DX-1B4" }, + { 0x1DB5, 0x1376, 0x10DE, "GRID V100DX-32C" }, + { 0x1DB5, 0x1390, 0x10DE, "GRID V100DX-4C" }, + { 0x1DB5, 0x1391, 0x10DE, "GRID V100DX-8C" }, + { 0x1DB5, 0x1392, 0x10DE, "GRID V100DX-16C" }, + { 0x1DB6, 0x12bd, 0x10DE, "GRID V100D-1B" }, + { 0x1DB6, 0x12be, 0x10DE, "GRID V100D-2B" }, + { 0x1DB6, 0x12bf, 0x10DE, "GRID V100D-1Q" }, + { 0x1DB6, 0x12c0, 0x10DE, "GRID V100D-2Q" }, + { 0x1DB6, 0x12c1, 0x10DE, "GRID V100D-4Q" }, + { 0x1DB6, 0x12c2, 0x10DE, "GRID V100D-8Q" }, + { 0x1DB6, 0x12c3, 0x10DE, "GRID V100D-16Q" }, + { 0x1DB6, 0x12c4, 0x10DE, "GRID V100D-32Q" }, + { 0x1DB6, 0x12f7, 0x10DE, "GRID V100D-2B4" }, + { 0x1DB6, 0x1342, 0x10DE, "GRID V100D-1B4" }, + { 0x1DB6, 0x1377, 0x10DE, "GRID V100D-32C" }, + { 0x1DB6, 0x1395, 0x10DE, "GRID V100D-4C" }, + { 0x1DB6, 0x1396, 0x10DE, "GRID V100D-8C" }, + { 0x1DB6, 0x1397, 0x10DE, "GRID V100D-16C" }, + { 0x1DB6, 0x13cd, 0x10DE, "GRID GTX V100D-8" }, + { 0x1DB6, 0x13ce, 0x10DE, "GRID GTX V100D-16" }, + { 0x1DB6, 0x13cf, 0x10DE, "GRID GTX V100D-32" }, + { 0x1DF6, 0x13e1, 0x10DE, "GRID V100S-1B" }, + { 0x1DF6, 0x13e3, 0x10DE, "GRID V100S-2B" }, + { 0x1DF6, 0x13e5, 0x10DE, "GRID V100S-1Q" }, + { 0x1DF6, 0x13e6, 0x10DE, "GRID V100S-2Q" }, + { 0x1DF6, 0x13e7, 0x10DE, "GRID V100S-4Q" }, + { 0x1DF6, 0x13e8, 0x10DE, "GRID V100S-8Q" }, + { 0x1DF6, 0x13e9, 0x10DE, "GRID V100S-16Q" }, + { 0x1DF6, 0x13ea, 0x10DE, "GRID V100S-32Q" }, + { 0x1DF6, 0x13f1, 0x10DE, "GRID V100S-4C" }, + { 0x1DF6, 0x13f2, 0x10DE, "GRID V100S-8C" }, + { 0x1DF6, 0x13f3, 0x10DE, "GRID V100S-16C" }, + { 0x1DF6, 0x13f4, 0x10DE, "GRID V100S-32C" }, + { 0x1E30, 0x1325, 0x10DE, "GRID RTX6000-1Q" }, + { 0x1E30, 0x1326, 0x10DE, "GRID RTX6000-2Q" }, + { 0x1E30, 0x1327, 0x10DE, "GRID RTX6000-3Q" }, + { 0x1E30, 0x1328, 0x10DE, "GRID RTX6000-4Q" }, + { 0x1E30, 0x1329, 0x10DE, "GRID RTX6000-6Q" }, + { 0x1E30, 0x132a, 0x10DE, "GRID RTX6000-8Q" }, + { 0x1E30, 0x132b, 0x10DE, "GRID RTX6000-12Q" }, + { 0x1E30, 0x132c, 0x10DE, "GRID RTX6000-24Q" }, + { 0x1E30, 0x132d, 0x10DE, "GRID RTX8000-1Q" }, + { 0x1E30, 0x132e, 0x10DE, "GRID RTX8000-2Q" }, + { 0x1E30, 0x132f, 0x10DE, "GRID RTX8000-3Q" }, + { 0x1E30, 0x1330, 0x10DE, "GRID RTX8000-4Q" }, + { 0x1E30, 0x1331, 0x10DE, "GRID RTX8000-6Q" }, + { 0x1E30, 0x1332, 0x10DE, "GRID RTX8000-8Q" }, + { 0x1E30, 0x1333, 0x10DE, "GRID RTX8000-12Q" }, + { 0x1E30, 0x1334, 0x10DE, "GRID RTX8000-16Q" }, + { 0x1E30, 0x1335, 0x10DE, "GRID RTX8000-24Q" }, + { 0x1E30, 0x1336, 0x10DE, "GRID RTX8000-48Q" }, + { 0x1E30, 0x13b9, 0x10DE, "GRID RTX6000-6" }, + { 0x1E30, 0x13ba, 0x10DE, "GRID RTX6000-12" }, + { 0x1E30, 0x13bb, 0x10DE, "GRID RTX6000-24" }, + { 0x1E30, 0x13bc, 0x10DE, "GRID RTX8000-12" }, + { 0x1E30, 0x13bd, 0x10DE, "GRID RTX8000-24" }, + { 0x1E30, 0x13be, 0x10DE, "GRID RTX8000-48" }, + { 0x1E30, 0x13bf, 0x10DE, "GRID RTX6000-4C" }, + { 0x1E30, 0x13c0, 0x10DE, "GRID RTX6000-6C" }, + { 0x1E30, 0x13c1, 0x10DE, "GRID RTX6000-8C" }, + { 0x1E30, 0x13c2, 0x10DE, "GRID RTX6000-12C" }, + { 0x1E30, 0x13c3, 0x10DE, "GRID RTX6000-24C" }, + { 0x1E30, 0x13c4, 0x10DE, "GRID RTX8000-4C" }, + { 0x1E30, 0x13c5, 0x10DE, "GRID RTX8000-6C" }, + { 0x1E30, 0x13c6, 0x10DE, "GRID RTX8000-8C" }, + { 0x1E30, 0x13c7, 0x10DE, "GRID RTX8000-12C" }, + { 0x1E30, 0x13c8, 0x10DE, "GRID RTX8000-16C" }, + { 0x1E30, 0x13c9, 0x10DE, "GRID RTX8000-24C" }, + { 0x1E30, 0x13ca, 0x10DE, "GRID RTX8000-48C" }, + { 0x1E30, 0x13cb, 0x10DE, "GRID RTX6000-8" }, + { 0x1E30, 0x13cc, 0x10DE, "GRID RTX8000-16" }, + { 0x1E30, 0x1437, 0x10DE, "GRID RTX6000-1B" }, + { 0x1E30, 0x1438, 0x10DE, "GRID RTX6000-2B" }, + { 0x1E30, 0x1441, 0x10DE, "GRID RTX8000-1B" }, + { 0x1E30, 0x1442, 0x10DE, "GRID RTX8000-2B" }, + { 0x1E37, 0x1347, 0x10DE, "GeForce RTX T10x-8" }, + { 0x1E37, 0x1348, 0x10DE, "GeForce RTX T10x-4" }, + { 0x1E37, 0x1349, 0x10DE, "GeForce RTX T10x-2" }, + { 0x1E37, 0x136a, 0x10DE, "GRID RTX T10-4" }, + { 0x1E37, 0x136b, 0x10DE, "GRID RTX T10-8" }, + { 0x1E37, 0x136c, 0x10DE, "GRID RTX T10-16" }, + { 0x1E37, 0x13a4, 0x10DE, "GeForce RTX T10-4" }, + { 0x1E37, 0x13a5, 0x10DE, "GeForce RTX T10-8" }, + { 0x1E37, 0x13a6, 0x10DE, "GeForce RTX T10-16" }, + { 0x1E37, 0x13a7, 0x10DE, "GRID RTX T10x-2" }, + { 0x1E37, 0x13a8, 0x10DE, "GRID RTX T10x-4" }, + { 0x1E37, 0x13a9, 0x10DE, "GRID RTX T10x-8" }, + { 0x1E37, 0x148a, 0x10DE, "GRID RTX T10-2" }, + { 0x1E37, 0x148b, 0x10DE, "GRID RTX T10-1" }, + { 0x1E37, 0x148c, 0x10DE, "GRID RTX T10-0" }, + { 0x1E78, 0x13f7, 0x10DE, "GRID RTX6000P-1B" }, + { 0x1E78, 0x13f8, 0x10DE, "GRID RTX6000P-2B" }, + { 0x1E78, 0x13f9, 0x10DE, "GRID RTX6000P-1Q" }, + { 0x1E78, 0x13fa, 0x10DE, "GRID RTX6000P-2Q" }, + { 0x1E78, 0x13fb, 0x10DE, "GRID RTX6000P-3Q" }, + { 0x1E78, 0x13fc, 0x10DE, "GRID RTX6000P-4Q" }, + { 0x1E78, 0x13fd, 0x10DE, "GRID RTX6000P-6Q" }, + { 0x1E78, 0x13fe, 0x10DE, "GRID RTX6000P-8Q" }, + { 0x1E78, 0x13ff, 0x10DE, "GRID RTX6000P-12Q" }, + { 0x1E78, 0x1400, 0x10DE, "GRID RTX6000P-24Q" }, + { 0x1E78, 0x1409, 0x10DE, "GRID RTX6000P-6" }, + { 0x1E78, 0x140a, 0x10DE, "GRID RTX6000P-8" }, + { 0x1E78, 0x140b, 0x10DE, "GRID RTX6000P-12" }, + { 0x1E78, 0x140c, 0x10DE, "GRID RTX6000P-24" }, + { 0x1E78, 0x140d, 0x10DE, "GRID RTX6000P-4C" }, + { 0x1E78, 0x140e, 0x10DE, "GRID RTX6000P-6C" }, + { 0x1E78, 0x140f, 0x10DE, "GRID RTX6000P-8C" }, + { 0x1E78, 0x1410, 0x10DE, "GRID RTX6000P-12C" }, + { 0x1E78, 0x1411, 0x10DE, "GRID RTX6000P-24C" }, + { 0x1E78, 0x1412, 0x10DE, "GRID RTX8000P-1B" }, + { 0x1E78, 0x1413, 0x10DE, "GRID RTX8000P-2B" }, + { 0x1E78, 0x1414, 0x10DE, "GRID RTX8000P-1Q" }, + { 0x1E78, 0x1415, 0x10DE, "GRID RTX8000P-2Q" }, + { 0x1E78, 0x1416, 0x10DE, "GRID RTX8000P-3Q" }, + { 0x1E78, 0x1417, 0x10DE, "GRID RTX8000P-4Q" }, + { 0x1E78, 0x1418, 0x10DE, "GRID RTX8000P-6Q" }, + { 0x1E78, 0x1419, 0x10DE, "GRID RTX8000P-8Q" }, + { 0x1E78, 0x141a, 0x10DE, "GRID RTX8000P-12Q" }, + { 0x1E78, 0x141b, 0x10DE, "GRID RTX8000P-16Q" }, + { 0x1E78, 0x141c, 0x10DE, "GRID RTX8000P-24Q" }, + { 0x1E78, 0x141d, 0x10DE, "GRID RTX8000P-48Q" }, + { 0x1E78, 0x1427, 0x10DE, "GRID RTX8000P-12" }, + { 0x1E78, 0x1428, 0x10DE, "GRID RTX8000P-16" }, + { 0x1E78, 0x1429, 0x10DE, "GRID RTX8000P-24" }, + { 0x1E78, 0x142a, 0x10DE, "GRID RTX8000P-48" }, + { 0x1E78, 0x142b, 0x10DE, "GRID RTX8000P-4C" }, + { 0x1E78, 0x142c, 0x10DE, "GRID RTX8000P-6C" }, + { 0x1E78, 0x142d, 0x10DE, "GRID RTX8000P-8C" }, + { 0x1E78, 0x142e, 0x10DE, "GRID RTX8000P-12C" }, + { 0x1E78, 0x142f, 0x10DE, "GRID RTX8000P-16C" }, + { 0x1E78, 0x1430, 0x10DE, "GRID RTX8000P-24C" }, + { 0x1E78, 0x1431, 0x10DE, "GRID RTX8000P-48C" }, + { 0x1EB8, 0x1309, 0x10DE, "GRID T4-1B" }, + { 0x1EB8, 0x130a, 0x10DE, "GRID T4-2B" }, + { 0x1EB8, 0x130b, 0x10DE, "GRID T4-2B4" }, + { 0x1EB8, 0x130c, 0x10DE, "GRID T4-1Q" }, + { 0x1EB8, 0x130d, 0x10DE, "GRID T4-2Q" }, + { 0x1EB8, 0x130e, 0x10DE, "GRID T4-4Q" }, + { 0x1EB8, 0x130f, 0x10DE, "GRID T4-8Q" }, + { 0x1EB8, 0x1310, 0x10DE, "GRID T4-16Q" }, + { 0x1EB8, 0x1345, 0x10DE, "GRID T4-1B4" }, + { 0x1EB8, 0x1367, 0x10DE, "GRID RTX T4-4" }, + { 0x1EB8, 0x1368, 0x10DE, "GRID RTX T4-8" }, + { 0x1EB8, 0x1369, 0x10DE, "GRID RTX T4-16" }, + { 0x1EB8, 0x1375, 0x10DE, "GRID T4-16C" }, + { 0x1EB8, 0x139a, 0x10DE, "GRID T4-4C" }, + { 0x1EB8, 0x139b, 0x10DE, "GRID T4-8C" }, + { 0x1EB8, 0x148d, 0x10DE, "GRID RTX T4-2" }, + { 0x1EB8, 0x148e, 0x10DE, "GRID RTX T4-1" }, + { 0x1EB8, 0x148f, 0x10DE, "GRID RTX T4-0" }, + { 0x20B0, 0x146f, 0x10DE, "GRID A100X-1-5C" }, + { 0x20B0, 0x1470, 0x10DE, "GRID A100X-2-10C" }, + { 0x20B0, 0x1471, 0x10DE, "GRID A100X-3-20C" }, + { 0x20B0, 0x1472, 0x10DE, "GRID A100X-4-20C" }, + { 0x20B0, 0x1473, 0x10DE, "GRID A100X-7-40C" }, + { 0x20B0, 0x1474, 0x10DE, "GRID A100X-4C" }, + { 0x20B0, 0x1475, 0x10DE, "GRID A100X-5C" }, + { 0x20B0, 0x1476, 0x10DE, "GRID A100X-8C" }, + { 0x20B0, 0x1477, 0x10DE, "GRID A100X-10C" }, + { 0x20B0, 0x1478, 0x10DE, "GRID A100X-20C" }, + { 0x20B0, 0x1479, 0x10DE, "GRID A100X-40C" }, + { 0x20B0, 0x160c, 0x10DE, "GRID A100X-1-5CME" }, + { 0x20B2, 0x1523, 0x10DE, "GRID A100DX-1-10C" }, + { 0x20B2, 0x1524, 0x10DE, "GRID A100DX-2-20C" }, + { 0x20B2, 0x1525, 0x10DE, "GRID A100DX-3-40C" }, + { 0x20B2, 0x1526, 0x10DE, "GRID A100DX-4-40C" }, + { 0x20B2, 0x1527, 0x10DE, "GRID A100DX-7-80C" }, + { 0x20B2, 0x1528, 0x10DE, "GRID A100DX-4C" }, + { 0x20B2, 0x1529, 0x10DE, "GRID A100DX-8C" }, + { 0x20B2, 0x152a, 0x10DE, "GRID A100DX-10C" }, + { 0x20B2, 0x152b, 0x10DE, "GRID A100DX-16C" }, + { 0x20B2, 0x152c, 0x10DE, "GRID A100DX-20C" }, + { 0x20B2, 0x152d, 0x10DE, "GRID A100DX-40C" }, + { 0x20B2, 0x152e, 0x10DE, "GRID A100DX-80C" }, + { 0x20B2, 0x160d, 0x10DE, "GRID A100DX-1-10CME" }, + { 0x20B5, 0x1591, 0x10DE, "GRID A100D-1-10C" }, + { 0x20B5, 0x1592, 0x10DE, "GRID A100D-2-20C" }, + { 0x20B5, 0x1593, 0x10DE, "GRID A100D-3-40C" }, + { 0x20B5, 0x1594, 0x10DE, "GRID A100D-4-40C" }, + { 0x20B5, 0x1595, 0x10DE, "GRID A100D-7-80C" }, + { 0x20B5, 0x1596, 0x10DE, "GRID A100D-4C" }, + { 0x20B5, 0x1597, 0x10DE, "GRID A100D-8C" }, + { 0x20B5, 0x1598, 0x10DE, "GRID A100D-10C" }, + { 0x20B5, 0x1599, 0x10DE, "GRID A100D-16C" }, + { 0x20B5, 0x159a, 0x10DE, "GRID A100D-20C" }, + { 0x20B5, 0x159b, 0x10DE, "GRID A100D-40C" }, + { 0x20B5, 0x159c, 0x10DE, "GRID A100D-80C" }, + { 0x20B5, 0x160f, 0x10DE, "GRID A100D-1-10CME" }, + { 0x20B7, 0x1589, 0x10DE, "NVIDIA A30-1-6C" }, + { 0x20B7, 0x158a, 0x10DE, "NVIDIA A30-2-12C" }, + { 0x20B7, 0x158b, 0x10DE, "NVIDIA A30-4-24C" }, + { 0x20B7, 0x158c, 0x10DE, "NVIDIA A30-4C" }, + { 0x20B7, 0x158d, 0x10DE, "NVIDIA A30-6C" }, + { 0x20B7, 0x158e, 0x10DE, "NVIDIA A30-8C" }, + { 0x20B7, 0x158f, 0x10DE, "NVIDIA A30-12C" }, + { 0x20B7, 0x1590, 0x10DE, "NVIDIA A30-24C" }, + { 0x20B7, 0x1610, 0x10DE, "NVIDIA A30-1-6CME" }, + { 0x20BF, 0x4450, 0x10DE, "GRID A100B-4C" }, + { 0x20BF, 0x4451, 0x10DE, "GRID A100B-5C" }, + { 0x20BF, 0x4452, 0x10DE, "GRID A100B-8C" }, + { 0x20BF, 0x4453, 0x10DE, "GRID A100B-10C" }, + { 0x20BF, 0x4454, 0x10DE, "GRID A100B-20C" }, + { 0x20BF, 0x4455, 0x10DE, "GRID A100B-40C" }, + { 0x20BF, 0x5560, 0x10DE, "GRID A100B-1-5C" }, + { 0x20BF, 0x5561, 0x10DE, "GRID A100B-2-10C" }, + { 0x20BF, 0x5562, 0x10DE, "GRID A100B-3-20C" }, + { 0x20BF, 0x5563, 0x10DE, "GRID A100B-4-20C" }, + { 0x20BF, 0x5564, 0x10DE, "GRID A100B-7-40C" }, + { 0x20F1, 0x1493, 0x10DE, "GRID A100-1-5C" }, + { 0x20F1, 0x1494, 0x10DE, "GRID A100-2-10C" }, + { 0x20F1, 0x1495, 0x10DE, "GRID A100-3-20C" }, + { 0x20F1, 0x1496, 0x10DE, "GRID A100-4-20C" }, + { 0x20F1, 0x1497, 0x10DE, "GRID A100-7-40C" }, + { 0x20F1, 0x1498, 0x10DE, "GRID A100-4C" }, + { 0x20F1, 0x1499, 0x10DE, "GRID A100-5C" }, + { 0x20F1, 0x149a, 0x10DE, "GRID A100-8C" }, + { 0x20F1, 0x149b, 0x10DE, "GRID A100-10C" }, + { 0x20F1, 0x149c, 0x10DE, "GRID A100-20C" }, + { 0x20F1, 0x149d, 0x10DE, "GRID A100-40C" }, + { 0x20F1, 0x160e, 0x10DE, "GRID A100-1-5CME" }, + { 0x2230, 0x14fa, 0x10DE, "NVIDIA RTXA6000-1B" }, + { 0x2230, 0x14fb, 0x10DE, "NVIDIA RTXA6000-2B" }, + { 0x2230, 0x14fc, 0x10DE, "NVIDIA RTXA6000-1Q" }, + { 0x2230, 0x14fd, 0x10DE, "NVIDIA RTXA6000-2Q" }, + { 0x2230, 0x14fe, 0x10DE, "NVIDIA RTXA6000-3Q" }, + { 0x2230, 0x14ff, 0x10DE, "NVIDIA RTXA6000-4Q" }, + { 0x2230, 0x1500, 0x10DE, "NVIDIA RTXA6000-6Q" }, + { 0x2230, 0x1501, 0x10DE, "NVIDIA RTXA6000-8Q" }, + { 0x2230, 0x1502, 0x10DE, "NVIDIA RTXA6000-12Q" }, + { 0x2230, 0x1503, 0x10DE, "NVIDIA RTXA6000-16Q" }, + { 0x2230, 0x1504, 0x10DE, "NVIDIA RTXA6000-24Q" }, + { 0x2230, 0x1505, 0x10DE, "NVIDIA RTXA6000-48Q" }, + { 0x2230, 0x1510, 0x10DE, "NVIDIA RTXA6000-12" }, + { 0x2230, 0x1511, 0x10DE, "NVIDIA RTXA6000-16" }, + { 0x2230, 0x1512, 0x10DE, "NVIDIA RTXA6000-24" }, + { 0x2230, 0x1513, 0x10DE, "NVIDIA RTXA6000-48" }, + { 0x2230, 0x1514, 0x10DE, "NVIDIA RTXA6000-4C" }, + { 0x2230, 0x1515, 0x10DE, "NVIDIA RTXA6000-6C" }, + { 0x2230, 0x1516, 0x10DE, "NVIDIA RTXA6000-8C" }, + { 0x2230, 0x1517, 0x10DE, "NVIDIA RTXA6000-12C" }, + { 0x2230, 0x1518, 0x10DE, "NVIDIA RTXA6000-16C" }, + { 0x2230, 0x1519, 0x10DE, "NVIDIA RTXA6000-24C" }, + { 0x2230, 0x151a, 0x10DE, "NVIDIA RTXA6000-48C" }, + { 0x2231, 0x1562, 0x10DE, "NVIDIA RTXA5000-1B" }, + { 0x2231, 0x1563, 0x10DE, "NVIDIA RTXA5000-2B" }, + { 0x2231, 0x1564, 0x10DE, "NVIDIA RTXA5000-1Q" }, + { 0x2231, 0x1565, 0x10DE, "NVIDIA RTXA5000-2Q" }, + { 0x2231, 0x1566, 0x10DE, "NVIDIA RTXA5000-3Q" }, + { 0x2231, 0x1567, 0x10DE, "NVIDIA RTXA5000-4Q" }, + { 0x2231, 0x1568, 0x10DE, "NVIDIA RTXA5000-6Q" }, + { 0x2231, 0x1569, 0x10DE, "NVIDIA RTXA5000-8Q" }, + { 0x2231, 0x156a, 0x10DE, "NVIDIA RTXA5000-12Q" }, + { 0x2231, 0x156b, 0x10DE, "NVIDIA RTXA5000-24Q" }, + { 0x2231, 0x1574, 0x10DE, "NVIDIA RTXA5000-6" }, + { 0x2231, 0x1575, 0x10DE, "NVIDIA RTXA5000-8" }, + { 0x2231, 0x1576, 0x10DE, "NVIDIA RTXA5000-12" }, + { 0x2231, 0x1577, 0x10DE, "NVIDIA RTXA5000-24" }, + { 0x2231, 0x1578, 0x10DE, "NVIDIA RTXA5000-4C" }, + { 0x2231, 0x1579, 0x10DE, "NVIDIA RTXA5000-6C" }, + { 0x2231, 0x157a, 0x10DE, "NVIDIA RTXA5000-8C" }, + { 0x2231, 0x157b, 0x10DE, "NVIDIA RTXA5000-12C" }, + { 0x2231, 0x157c, 0x10DE, "NVIDIA RTXA5000-24C" }, + { 0x2233, 0x165c, 0x10DE, "NVIDIA RTXA5500-1B" }, + { 0x2233, 0x165d, 0x10DE, "NVIDIA RTXA5500-2B" }, + { 0x2233, 0x165e, 0x10DE, "NVIDIA RTXA5500-1Q" }, + { 0x2233, 0x165f, 0x10DE, "NVIDIA RTXA5500-2Q" }, + { 0x2233, 0x1660, 0x10DE, "NVIDIA RTXA5500-3Q" }, + { 0x2233, 0x1661, 0x10DE, "NVIDIA RTXA5500-4Q" }, + { 0x2233, 0x1662, 0x10DE, "NVIDIA RTXA5500-6Q" }, + { 0x2233, 0x1663, 0x10DE, "NVIDIA RTXA5500-8Q" }, + { 0x2233, 0x1664, 0x10DE, "NVIDIA RTXA5500-12Q" }, + { 0x2233, 0x1665, 0x10DE, "NVIDIA RTXA5500-24Q" }, + { 0x2233, 0x166e, 0x10DE, "NVIDIA RTXA5500-6" }, + { 0x2233, 0x166f, 0x10DE, "NVIDIA RTXA5500-8" }, + { 0x2233, 0x1670, 0x10DE, "NVIDIA RTXA5500-12" }, + { 0x2233, 0x1671, 0x10DE, "NVIDIA RTXA5500-24" }, + { 0x2233, 0x1672, 0x10DE, "NVIDIA RTXA5500-4C" }, + { 0x2233, 0x1673, 0x10DE, "NVIDIA RTXA5500-6C" }, + { 0x2233, 0x1674, 0x10DE, "NVIDIA RTXA5500-8C" }, + { 0x2233, 0x1675, 0x10DE, "NVIDIA RTXA5500-12C" }, + { 0x2233, 0x1676, 0x10DE, "NVIDIA RTXA5500-24C" }, + { 0x2235, 0x14d5, 0x10DE, "NVIDIA A40-1B" }, + { 0x2235, 0x14d6, 0x10DE, "NVIDIA A40-2B" }, + { 0x2235, 0x14d7, 0x10DE, "NVIDIA A40-1Q" }, + { 0x2235, 0x14d8, 0x10DE, "NVIDIA A40-2Q" }, + { 0x2235, 0x14d9, 0x10DE, "NVIDIA A40-3Q" }, + { 0x2235, 0x14da, 0x10DE, "NVIDIA A40-4Q" }, + { 0x2235, 0x14db, 0x10DE, "NVIDIA A40-6Q" }, + { 0x2235, 0x14dc, 0x10DE, "NVIDIA A40-8Q" }, + { 0x2235, 0x14dd, 0x10DE, "NVIDIA A40-12Q" }, + { 0x2235, 0x14de, 0x10DE, "NVIDIA A40-16Q" }, + { 0x2235, 0x14df, 0x10DE, "NVIDIA A40-24Q" }, + { 0x2235, 0x14e0, 0x10DE, "NVIDIA A40-48Q" }, + { 0x2235, 0x14eb, 0x10DE, "NVIDIA A40-12" }, + { 0x2235, 0x14ec, 0x10DE, "NVIDIA A40-16" }, + { 0x2235, 0x14ed, 0x10DE, "NVIDIA A40-24" }, + { 0x2235, 0x14ee, 0x10DE, "NVIDIA A40-48" }, + { 0x2235, 0x14f3, 0x10DE, "NVIDIA A40-4C" }, + { 0x2235, 0x14f4, 0x10DE, "NVIDIA A40-6C" }, + { 0x2235, 0x14f5, 0x10DE, "NVIDIA A40-8C" }, + { 0x2235, 0x14f6, 0x10DE, "NVIDIA A40-12C" }, + { 0x2235, 0x14f7, 0x10DE, "NVIDIA A40-16C" }, + { 0x2235, 0x14f8, 0x10DE, "NVIDIA A40-24C" }, + { 0x2235, 0x14f9, 0x10DE, "NVIDIA A40-48C" }, + { 0x2235, 0x1684, 0x10DE, "NVIDIA A40-2" }, + { 0x2235, 0x1685, 0x10DE, "NVIDIA A40-3" }, + { 0x2235, 0x1686, 0x10DE, "NVIDIA A40-4" }, + { 0x2235, 0x1687, 0x10DE, "NVIDIA A40-6" }, + { 0x2235, 0x1688, 0x10DE, "NVIDIA A40-8" }, + { 0x2236, 0x14b6, 0x10DE, "NVIDIA A10-1B" }, + { 0x2236, 0x14b7, 0x10DE, "NVIDIA A10-2B" }, + { 0x2236, 0x14b8, 0x10DE, "NVIDIA A10-1Q" }, + { 0x2236, 0x14b9, 0x10DE, "NVIDIA A10-2Q" }, + { 0x2236, 0x14ba, 0x10DE, "NVIDIA A10-3Q" }, + { 0x2236, 0x14bb, 0x10DE, "NVIDIA A10-4Q" }, + { 0x2236, 0x14bc, 0x10DE, "NVIDIA A10-6Q" }, + { 0x2236, 0x14bd, 0x10DE, "NVIDIA A10-8Q" }, + { 0x2236, 0x14be, 0x10DE, "NVIDIA A10-12Q" }, + { 0x2236, 0x14bf, 0x10DE, "NVIDIA A10-24Q" }, + { 0x2236, 0x14c8, 0x10DE, "NVIDIA A10-6" }, + { 0x2236, 0x14c9, 0x10DE, "NVIDIA A10-8" }, + { 0x2236, 0x14ca, 0x10DE, "NVIDIA A10-12" }, + { 0x2236, 0x14cb, 0x10DE, "NVIDIA A10-24" }, + { 0x2236, 0x14d0, 0x10DE, "NVIDIA A10-4C" }, + { 0x2236, 0x14d1, 0x10DE, "NVIDIA A10-6C" }, + { 0x2236, 0x14d2, 0x10DE, "NVIDIA A10-8C" }, + { 0x2236, 0x14d3, 0x10DE, "NVIDIA A10-12C" }, + { 0x2236, 0x14d4, 0x10DE, "NVIDIA A10-24C" }, + { 0x2236, 0x167e, 0x10DE, "NVIDIA A10-2" }, + { 0x2236, 0x167f, 0x10DE, "NVIDIA A10-3" }, + { 0x2236, 0x1680, 0x10DE, "NVIDIA A10-4" }, + { 0x2237, 0x155b, 0x10DE, "NVIDIA A10G-2" }, + { 0x2237, 0x155c, 0x10DE, "NVIDIA A10G-3" }, + { 0x2237, 0x155d, 0x10DE, "NVIDIA A10G-4" }, + { 0x2237, 0x155e, 0x10DE, "NVIDIA A10G-6" }, + { 0x2237, 0x155f, 0x10DE, "NVIDIA A10G-8" }, + { 0x2237, 0x1560, 0x10DE, "NVIDIA A10G-12" }, + { 0x2237, 0x1561, 0x10DE, "NVIDIA A10G-24" }, + { 0x2237, 0x162a, 0x10DE, "NVIDIA A10G-1B" }, + { 0x2237, 0x162b, 0x10DE, "NVIDIA A10G-2B" }, + { 0x2237, 0x162c, 0x10DE, "NVIDIA A10G-1Q" }, + { 0x2237, 0x162d, 0x10DE, "NVIDIA A10G-2Q" }, + { 0x2237, 0x162e, 0x10DE, "NVIDIA A10G-3Q" }, + { 0x2237, 0x162f, 0x10DE, "NVIDIA A10G-4Q" }, + { 0x2237, 0x1630, 0x10DE, "NVIDIA A10G-6Q" }, + { 0x2237, 0x1631, 0x10DE, "NVIDIA A10G-8Q" }, + { 0x2237, 0x1632, 0x10DE, "NVIDIA A10G-12Q" }, + { 0x2237, 0x1633, 0x10DE, "NVIDIA A10G-24Q" }, + { 0x2238, 0x16a3, 0x10DE, "NVIDIA A10M-1B" }, + { 0x2238, 0x16a4, 0x10DE, "NVIDIA A10M-2B" }, + { 0x2238, 0x16a5, 0x10DE, "NVIDIA A10M-1Q" }, + { 0x2238, 0x16a6, 0x10DE, "NVIDIA A10M-2Q" }, + { 0x2238, 0x16a7, 0x10DE, "NVIDIA A10M-4Q" }, + { 0x2238, 0x16a8, 0x10DE, "NVIDIA A10M-5Q" }, + { 0x2238, 0x16a9, 0x10DE, "NVIDIA A10M-10Q" }, + { 0x2238, 0x16aa, 0x10DE, "NVIDIA A10M-20Q" }, + { 0x2238, 0x16b1, 0x10DE, "NVIDIA A10M-2" }, + { 0x2238, 0x16b2, 0x10DE, "NVIDIA A10M-4" }, + { 0x2238, 0x16b3, 0x10DE, "NVIDIA A10M-5" }, + { 0x2238, 0x16b4, 0x10DE, "NVIDIA A10M-10" }, + { 0x2238, 0x16b5, 0x10DE, "NVIDIA A10M-20" }, + { 0x2238, 0x16b6, 0x10DE, "NVIDIA A10M-4C" }, + { 0x2238, 0x16b7, 0x10DE, "NVIDIA A10M-5C" }, + { 0x2238, 0x16b8, 0x10DE, "NVIDIA A10M-10C" }, + { 0x2238, 0x16b9, 0x10DE, "NVIDIA A10M-20C" }, + { 0x25B6, 0x159d, 0x10DE, "NVIDIA A16-1B" }, + { 0x25B6, 0x159e, 0x10DE, "NVIDIA A16-2B" }, + { 0x25B6, 0x159f, 0x10DE, "NVIDIA A16-1Q" }, + { 0x25B6, 0x1600, 0x10DE, "NVIDIA A16-2Q" }, + { 0x25B6, 0x1601, 0x10DE, "NVIDIA A16-4Q" }, + { 0x25B6, 0x1602, 0x10DE, "NVIDIA A16-8Q" }, + { 0x25B6, 0x1603, 0x10DE, "NVIDIA A16-16Q" }, + { 0x25B6, 0x1609, 0x10DE, "NVIDIA A16-4C" }, + { 0x25B6, 0x160a, 0x10DE, "NVIDIA A16-8C" }, + { 0x25B6, 0x160b, 0x10DE, "NVIDIA A16-16C" }, + { 0x25B6, 0x1646, 0x10DE, "NVIDIA A2-1B" }, + { 0x25B6, 0x1647, 0x10DE, "NVIDIA A2-2B" }, + { 0x25B6, 0x1648, 0x10DE, "NVIDIA A2-1Q" }, + { 0x25B6, 0x1649, 0x10DE, "NVIDIA A2-2Q" }, + { 0x25B6, 0x164a, 0x10DE, "NVIDIA A2-4Q" }, + { 0x25B6, 0x164b, 0x10DE, "NVIDIA A2-8Q" }, + { 0x25B6, 0x164c, 0x10DE, "NVIDIA A2-16Q" }, + { 0x25B6, 0x1652, 0x10DE, "NVIDIA A2-4" }, + { 0x25B6, 0x1653, 0x10DE, "NVIDIA A2-8" }, + { 0x25B6, 0x1654, 0x10DE, "NVIDIA A2-16" }, + { 0x25B6, 0x1655, 0x10DE, "NVIDIA A2-4C" }, + { 0x25B6, 0x1656, 0x10DE, "NVIDIA A2-8C" }, + { 0x25B6, 0x1657, 0x10DE, "NVIDIA A2-16C" }, +}; + +#endif // G_NV_NAME_RELEASED_H diff --git a/src/nvidia/generated/g_nvdebug_pb.c b/src/nvidia/generated/g_nvdebug_pb.c new file mode 100644 index 000000000..6f6949a3a --- /dev/null +++ b/src/nvidia/generated/g_nvdebug_pb.c @@ -0,0 +1,909 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#include "nvtypes.h" +#include "prbrt.h" +#include "g_nvdebug_pb.h" + +// 'OsType' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_ostype[] = { + { + 1, + PRB_MAYBE_ENUM_NAME("OS_NT") + }, + { + 2, + PRB_MAYBE_ENUM_NAME("OS_UNIX") + }, + { + 3, + PRB_MAYBE_ENUM_NAME("OS_MAC") + }, + { + 4, + PRB_MAYBE_ENUM_NAME("OS_MODS") + }, + { + 5, + PRB_MAYBE_ENUM_NAME("OS_QNX") + }, + { + 6, + PRB_MAYBE_ENUM_NAME("OS_UNITTEST") + }, + { + 7, + PRB_MAYBE_ENUM_NAME("OS_UNK") + }, + { + 8, + PRB_MAYBE_ENUM_NAME("OS_LIBOS") + }, +}; + +const PRB_ENUM_DESC prb_enums_nvdebug_ostype = { + prb_enum_mappings_ostype, + 8, + PRB_MAYBE_ENUM_NAME("OsType") +}; + +// 'SystemInfo' field defaults + +// 'SystemInfo' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT64, + 0 | PRB_IS_DEPRECATED, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("timestamp") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO_NORTHBRIDGEINFO, + 0, + PRB_MAYBE_FIELD_NAME("northbridge_info") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO_CPUINFO, + 0, + PRB_MAYBE_FIELD_NAME("cpu_info") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO_GPUINFO, + 0, + PRB_MAYBE_FIELD_NAME("gpu_info") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO_OSINFO, + 0, + PRB_MAYBE_FIELD_NAME("os_info") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 6, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO_DRIVERINFO, + 0, + PRB_MAYBE_FIELD_NAME("driver_info") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 7, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO_CONFIG, + 0, + PRB_MAYBE_FIELD_NAME("gpu_config") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 8, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO_ERRORSTATE, + 0, + PRB_MAYBE_FIELD_NAME("error_state") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 9, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO_SOCINFO, + 0, + PRB_MAYBE_FIELD_NAME("soc_info") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 10, + { + PRB_OPTIONAL, + PRB_UINT32, + 0 | PRB_IS_DEPRECATED, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("time_since_boot") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 11, + { + PRB_OPTIONAL, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO_TIMEINFO, + 0, + PRB_MAYBE_FIELD_NAME("time_info") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 12, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("bugcheck_count") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'GpuInfo' field defaults + +// 'GpuInfo' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_gpuinfo[] = { + { + 1, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + NVDEBUG_ENG_GPU, + 0, + PRB_MAYBE_FIELD_NAME("eng_gpu") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + NVDEBUG_ENG_MC, + 0, + PRB_MAYBE_FIELD_NAME("eng_mc") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 18, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + NVDEBUG_ENG_NVD, + 0, + PRB_MAYBE_FIELD_NAME("eng_nvd") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'NvDump' field defaults + +// 'NvDump' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_nvdump[] = { + { + 1, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO, + 0, + PRB_MAYBE_FIELD_NAME("system_info") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + DCL_DCLMSG, + 0, + PRB_MAYBE_FIELD_NAME("dcl_msg") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + NVDEBUG_GPUINFO, + 0, + PRB_MAYBE_FIELD_NAME("gpu_info") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_REPEATED, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("exception_address") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 101, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + NVDEBUG_SYSTEMINFO, + 0, + PRB_MAYBE_FIELD_NAME("system_info_gsprm") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'NorthBridgeInfo' field defaults + +// 'NorthBridgeInfo' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_northbridgeinfo[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("ssid") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'SocInfo' field defaults + +// 'SocInfo' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_socinfo[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("vendorId") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("chipId") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'CpuInfo' field defaults + +// 'CpuInfo' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_cpuinfo[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("cpu_type") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("num_cpu_cores") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("num_logical_cpus") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("cpu_caps") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'GpuInfo' field defaults + +// 'GpuInfo' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_gpuinfo[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("num_gpus") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_REPEATED, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("gpu_id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_REPEATED, + PRB_STRING, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("gpu_uuid") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_REPEATED, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("device_id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_REPEATED, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("pmcBoot0") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 6, + { + PRB_REPEATED, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("junction_temp") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 7, + { + PRB_REPEATED, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("subdev_id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'OsInfo' field defaults + +// 'OsInfo' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_osinfo[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_nvdebug_ostype, + PRB_MAYBE_FIELD_NAME("family") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("osMajorVersion") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("osMinorVersion") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("osBldNum") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("winProductType") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 6, + { + PRB_OPTIONAL, + PRB_STRING, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("buildVersion") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 7, + { + PRB_OPTIONAL, + PRB_STRING, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("buildDatePlus") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'DriverInfo' field defaults + +// 'DriverInfo' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_driverinfo[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("is_release") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_STRING, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("version") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_STRING, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("branch") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("changelist") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_OPTIONAL, + PRB_STRING, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("previous_version") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 6, + { + PRB_OPTIONAL, + PRB_STRING, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("previous_branch") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 7, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("previous_changelist") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 8, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("load_count") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'Config' field defaults + +// 'Config' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_config[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("master_id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_REPEATED, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("gpu_id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'ErrorState' field defaults + +// 'ErrorState' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_errorstate[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("bugcheck_code") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_BOOL, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("got_rm_lock") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("dump_buffer_size") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'TimeInfo' field defaults + +// 'TimeInfo' field descriptors +const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_timeinfo[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("timestamp_freq") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("timestamp_dump") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("system_time_dump") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_UINT32, + 0 | PRB_IS_DEPRECATED, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("time_since_boot_us") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("time_since_boot_sec") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// Message descriptors +const PRB_MSG_DESC prb_messages_nvdebug[] = { + { + 12, + prb_fields_nvdebug_systeminfo, + PRB_MAYBE_MESSAGE_NAME("NvDebug.SystemInfo") + }, + { + 3, + prb_fields_nvdebug_gpuinfo, + PRB_MAYBE_MESSAGE_NAME("NvDebug.GpuInfo") + }, + { + 5, + prb_fields_nvdebug_nvdump, + PRB_MAYBE_MESSAGE_NAME("NvDebug.NvDump") + }, + { + 2, + prb_fields_nvdebug_systeminfo_northbridgeinfo, + PRB_MAYBE_MESSAGE_NAME("NvDebug.SystemInfo.NorthBridgeInfo") + }, + { + 2, + prb_fields_nvdebug_systeminfo_socinfo, + PRB_MAYBE_MESSAGE_NAME("NvDebug.SystemInfo.SocInfo") + }, + { + 4, + prb_fields_nvdebug_systeminfo_cpuinfo, + PRB_MAYBE_MESSAGE_NAME("NvDebug.SystemInfo.CpuInfo") + }, + { + 7, + prb_fields_nvdebug_systeminfo_gpuinfo, + PRB_MAYBE_MESSAGE_NAME("NvDebug.SystemInfo.GpuInfo") + }, + { + 7, + prb_fields_nvdebug_systeminfo_osinfo, + PRB_MAYBE_MESSAGE_NAME("NvDebug.SystemInfo.OsInfo") + }, + { + 8, + prb_fields_nvdebug_systeminfo_driverinfo, + PRB_MAYBE_MESSAGE_NAME("NvDebug.SystemInfo.DriverInfo") + }, + { + 2, + prb_fields_nvdebug_systeminfo_config, + PRB_MAYBE_MESSAGE_NAME("NvDebug.SystemInfo.Config") + }, + { + 3, + prb_fields_nvdebug_systeminfo_errorstate, + PRB_MAYBE_MESSAGE_NAME("NvDebug.SystemInfo.ErrorState") + }, + { + 5, + prb_fields_nvdebug_systeminfo_timeinfo, + PRB_MAYBE_MESSAGE_NAME("NvDebug.SystemInfo.TimeInfo") + }, +}; + +// Service descriptors +const PRB_SERVICE_DESC prb_services_nvdebug[] = { + { 0 } +}; + diff --git a/src/nvidia/generated/g_nvdebug_pb.h b/src/nvidia/generated/g_nvdebug_pb.h new file mode 100644 index 000000000..16c11f1c1 --- /dev/null +++ b/src/nvidia/generated/g_nvdebug_pb.h @@ -0,0 +1,250 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#ifndef G_NVDEBUG_PB_H__ +#define G_NVDEBUG_PB_H__ + +#include "g_all_dcl_pb.h" +#include "g_engines_pb.h" +#include "g_regs_pb.h" +#include "g_journal_pb.h" + +extern const PRB_ENUM_DESC prb_enums_nvdebug_ostype; + +// 'OsType' enumeration values +#define NVDEBUG_OS_NT 1 +#define NVDEBUG_OS_UNIX 2 +#define NVDEBUG_OS_MAC 3 +#define NVDEBUG_OS_MODS 4 +#define NVDEBUG_OS_QNX 5 +#define NVDEBUG_OS_UNITTEST 6 +#define NVDEBUG_OS_UNK 7 +#define NVDEBUG_OS_LIBOS 8 + +extern const PRB_MSG_DESC prb_messages_nvdebug[]; + +// Message descriptor pointers +#define NVDEBUG_SYSTEMINFO (&prb_messages_nvdebug[0]) +#define NVDEBUG_GPUINFO (&prb_messages_nvdebug[1]) +#define NVDEBUG_NVDUMP (&prb_messages_nvdebug[2]) +#define NVDEBUG_SYSTEMINFO_NORTHBRIDGEINFO (&prb_messages_nvdebug[3]) +#define NVDEBUG_SYSTEMINFO_SOCINFO (&prb_messages_nvdebug[4]) +#define NVDEBUG_SYSTEMINFO_CPUINFO (&prb_messages_nvdebug[5]) +#define NVDEBUG_SYSTEMINFO_GPUINFO (&prb_messages_nvdebug[6]) +#define NVDEBUG_SYSTEMINFO_OSINFO (&prb_messages_nvdebug[7]) +#define NVDEBUG_SYSTEMINFO_DRIVERINFO (&prb_messages_nvdebug[8]) +#define NVDEBUG_SYSTEMINFO_CONFIG (&prb_messages_nvdebug[9]) +#define NVDEBUG_SYSTEMINFO_ERRORSTATE (&prb_messages_nvdebug[10]) +#define NVDEBUG_SYSTEMINFO_TIMEINFO (&prb_messages_nvdebug[11]) + +// Message maximum lengths +// Does not include repeated fields, strings and byte arrays. +#define NVDEBUG_SYSTEMINFO_LEN 275 +#define NVDEBUG_GPUINFO_LEN 164 +#define NVDEBUG_NVDUMP_LEN 1308 +#define NVDEBUG_SYSTEMINFO_NORTHBRIDGEINFO_LEN 12 +#define NVDEBUG_SYSTEMINFO_SOCINFO_LEN 12 +#define NVDEBUG_SYSTEMINFO_CPUINFO_LEN 24 +#define NVDEBUG_SYSTEMINFO_GPUINFO_LEN 38 +#define NVDEBUG_SYSTEMINFO_OSINFO_LEN 31 +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_LEN 28 +#define NVDEBUG_SYSTEMINFO_CONFIG_LEN 12 +#define NVDEBUG_SYSTEMINFO_ERRORSTATE_LEN 14 +#define NVDEBUG_SYSTEMINFO_TIMEINFO_LEN 45 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo[]; + +// 'SystemInfo' field descriptor pointers +#define NVDEBUG_SYSTEMINFO_TIMESTAMP (&prb_fields_nvdebug_systeminfo[0]) +#define NVDEBUG_SYSTEMINFO_NORTHBRIDGE_INFO (&prb_fields_nvdebug_systeminfo[1]) +#define NVDEBUG_SYSTEMINFO_CPU_INFO (&prb_fields_nvdebug_systeminfo[2]) +#define NVDEBUG_SYSTEMINFO_GPU_INFO (&prb_fields_nvdebug_systeminfo[3]) +#define NVDEBUG_SYSTEMINFO_OS_INFO (&prb_fields_nvdebug_systeminfo[4]) +#define NVDEBUG_SYSTEMINFO_DRIVER_INFO (&prb_fields_nvdebug_systeminfo[5]) +#define NVDEBUG_SYSTEMINFO_GPU_CONFIG (&prb_fields_nvdebug_systeminfo[6]) +#define NVDEBUG_SYSTEMINFO_ERROR_STATE (&prb_fields_nvdebug_systeminfo[7]) +#define NVDEBUG_SYSTEMINFO_SOC_INFO (&prb_fields_nvdebug_systeminfo[8]) +#define NVDEBUG_SYSTEMINFO_TIME_SINCE_BOOT (&prb_fields_nvdebug_systeminfo[9]) +#define NVDEBUG_SYSTEMINFO_TIME_INFO (&prb_fields_nvdebug_systeminfo[10]) +#define NVDEBUG_SYSTEMINFO_BUGCHECK_COUNT (&prb_fields_nvdebug_systeminfo[11]) + +// 'SystemInfo' field lengths +#define NVDEBUG_SYSTEMINFO_TIMESTAMP_LEN 10 +#define NVDEBUG_SYSTEMINFO_NORTHBRIDGE_INFO_LEN 15 +#define NVDEBUG_SYSTEMINFO_CPU_INFO_LEN 27 +#define NVDEBUG_SYSTEMINFO_GPU_INFO_LEN 41 +#define NVDEBUG_SYSTEMINFO_OS_INFO_LEN 34 +#define NVDEBUG_SYSTEMINFO_DRIVER_INFO_LEN 31 +#define NVDEBUG_SYSTEMINFO_GPU_CONFIG_LEN 15 +#define NVDEBUG_SYSTEMINFO_ERROR_STATE_LEN 17 +#define NVDEBUG_SYSTEMINFO_SOC_INFO_LEN 15 +#define NVDEBUG_SYSTEMINFO_TIME_SINCE_BOOT_LEN 5 +#define NVDEBUG_SYSTEMINFO_TIME_INFO_LEN 48 +#define NVDEBUG_SYSTEMINFO_BUGCHECK_COUNT_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_gpuinfo[]; + +// 'GpuInfo' field descriptor pointers +#define NVDEBUG_GPUINFO_ENG_GPU (&prb_fields_nvdebug_gpuinfo[0]) +#define NVDEBUG_GPUINFO_ENG_MC (&prb_fields_nvdebug_gpuinfo[1]) +#define NVDEBUG_GPUINFO_ENG_NVD (&prb_fields_nvdebug_gpuinfo[2]) + +// 'GpuInfo' field lengths +#define NVDEBUG_GPUINFO_ENG_GPU_LEN 59 +#define NVDEBUG_GPUINFO_ENG_MC_LEN 69 +#define NVDEBUG_GPUINFO_ENG_NVD_LEN 33 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_nvdump[]; + +// 'NvDump' field descriptor pointers +#define NVDEBUG_NVDUMP_SYSTEM_INFO (&prb_fields_nvdebug_nvdump[0]) +#define NVDEBUG_NVDUMP_DCL_MSG (&prb_fields_nvdebug_nvdump[1]) +#define NVDEBUG_NVDUMP_GPU_INFO (&prb_fields_nvdebug_nvdump[2]) +#define NVDEBUG_NVDUMP_EXCEPTION_ADDRESS (&prb_fields_nvdebug_nvdump[3]) +#define NVDEBUG_NVDUMP_SYSTEM_INFO_GSPRM (&prb_fields_nvdebug_nvdump[4]) + +// 'NvDump' field lengths +#define NVDEBUG_NVDUMP_SYSTEM_INFO_LEN 278 +#define NVDEBUG_NVDUMP_DCL_MSG_LEN 570 +#define NVDEBUG_NVDUMP_GPU_INFO_LEN 167 +#define NVDEBUG_NVDUMP_EXCEPTION_ADDRESS_LEN 10 +#define NVDEBUG_NVDUMP_SYSTEM_INFO_GSPRM_LEN 278 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_northbridgeinfo[]; + +// 'NorthBridgeInfo' field descriptor pointers +#define NVDEBUG_SYSTEMINFO_NORTHBRIDGEINFO_ID (&prb_fields_nvdebug_systeminfo_northbridgeinfo[0]) +#define NVDEBUG_SYSTEMINFO_NORTHBRIDGEINFO_SSID (&prb_fields_nvdebug_systeminfo_northbridgeinfo[1]) + +// 'NorthBridgeInfo' field lengths +#define NVDEBUG_SYSTEMINFO_NORTHBRIDGEINFO_ID_LEN 5 +#define NVDEBUG_SYSTEMINFO_NORTHBRIDGEINFO_SSID_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_socinfo[]; + +// 'SocInfo' field descriptor pointers +#define NVDEBUG_SYSTEMINFO_SOCINFO_VENDORID (&prb_fields_nvdebug_systeminfo_socinfo[0]) +#define NVDEBUG_SYSTEMINFO_SOCINFO_CHIPID (&prb_fields_nvdebug_systeminfo_socinfo[1]) + +// 'SocInfo' field lengths +#define NVDEBUG_SYSTEMINFO_SOCINFO_VENDORID_LEN 5 +#define NVDEBUG_SYSTEMINFO_SOCINFO_CHIPID_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_cpuinfo[]; + +// 'CpuInfo' field descriptor pointers +#define NVDEBUG_SYSTEMINFO_CPUINFO_CPU_TYPE (&prb_fields_nvdebug_systeminfo_cpuinfo[0]) +#define NVDEBUG_SYSTEMINFO_CPUINFO_NUM_CPU_CORES (&prb_fields_nvdebug_systeminfo_cpuinfo[1]) +#define NVDEBUG_SYSTEMINFO_CPUINFO_NUM_LOGICAL_CPUS (&prb_fields_nvdebug_systeminfo_cpuinfo[2]) +#define NVDEBUG_SYSTEMINFO_CPUINFO_CPU_CAPS (&prb_fields_nvdebug_systeminfo_cpuinfo[3]) + +// 'CpuInfo' field lengths +#define NVDEBUG_SYSTEMINFO_CPUINFO_CPU_TYPE_LEN 5 +#define NVDEBUG_SYSTEMINFO_CPUINFO_NUM_CPU_CORES_LEN 5 +#define NVDEBUG_SYSTEMINFO_CPUINFO_NUM_LOGICAL_CPUS_LEN 5 +#define NVDEBUG_SYSTEMINFO_CPUINFO_CPU_CAPS_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_gpuinfo[]; + +// 'GpuInfo' field descriptor pointers +#define NVDEBUG_SYSTEMINFO_GPUINFO_NUM_GPUS (&prb_fields_nvdebug_systeminfo_gpuinfo[0]) +#define NVDEBUG_SYSTEMINFO_GPUINFO_GPU_ID (&prb_fields_nvdebug_systeminfo_gpuinfo[1]) +#define NVDEBUG_SYSTEMINFO_GPUINFO_GPU_UUID (&prb_fields_nvdebug_systeminfo_gpuinfo[2]) +#define NVDEBUG_SYSTEMINFO_GPUINFO_DEVICE_ID (&prb_fields_nvdebug_systeminfo_gpuinfo[3]) +#define NVDEBUG_SYSTEMINFO_GPUINFO_PMCBOOT0 (&prb_fields_nvdebug_systeminfo_gpuinfo[4]) +#define NVDEBUG_SYSTEMINFO_GPUINFO_JUNCTION_TEMP (&prb_fields_nvdebug_systeminfo_gpuinfo[5]) +#define NVDEBUG_SYSTEMINFO_GPUINFO_SUBDEV_ID (&prb_fields_nvdebug_systeminfo_gpuinfo[6]) + +// 'GpuInfo' field lengths +#define NVDEBUG_SYSTEMINFO_GPUINFO_NUM_GPUS_LEN 5 +#define NVDEBUG_SYSTEMINFO_GPUINFO_GPU_ID_LEN 5 +#define NVDEBUG_SYSTEMINFO_GPUINFO_GPU_UUID_LEN 1 +#define NVDEBUG_SYSTEMINFO_GPUINFO_DEVICE_ID_LEN 5 +#define NVDEBUG_SYSTEMINFO_GPUINFO_PMCBOOT0_LEN 5 +#define NVDEBUG_SYSTEMINFO_GPUINFO_JUNCTION_TEMP_LEN 5 +#define NVDEBUG_SYSTEMINFO_GPUINFO_SUBDEV_ID_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_osinfo[]; + +// 'OsInfo' field descriptor pointers +#define NVDEBUG_SYSTEMINFO_OSINFO_FAMILY (&prb_fields_nvdebug_systeminfo_osinfo[0]) +#define NVDEBUG_SYSTEMINFO_OSINFO_OSMAJORVERSION (&prb_fields_nvdebug_systeminfo_osinfo[1]) +#define NVDEBUG_SYSTEMINFO_OSINFO_OSMINORVERSION (&prb_fields_nvdebug_systeminfo_osinfo[2]) +#define NVDEBUG_SYSTEMINFO_OSINFO_OSBLDNUM (&prb_fields_nvdebug_systeminfo_osinfo[3]) +#define NVDEBUG_SYSTEMINFO_OSINFO_WINPRODUCTTYPE (&prb_fields_nvdebug_systeminfo_osinfo[4]) +#define NVDEBUG_SYSTEMINFO_OSINFO_BUILDVERSION (&prb_fields_nvdebug_systeminfo_osinfo[5]) +#define NVDEBUG_SYSTEMINFO_OSINFO_BUILDDATEPLUS (&prb_fields_nvdebug_systeminfo_osinfo[6]) + +// 'OsInfo' field lengths +#define NVDEBUG_SYSTEMINFO_OSINFO_FAMILY_LEN 2 +#define NVDEBUG_SYSTEMINFO_OSINFO_OSMAJORVERSION_LEN 5 +#define NVDEBUG_SYSTEMINFO_OSINFO_OSMINORVERSION_LEN 5 +#define NVDEBUG_SYSTEMINFO_OSINFO_OSBLDNUM_LEN 5 +#define NVDEBUG_SYSTEMINFO_OSINFO_WINPRODUCTTYPE_LEN 5 +#define NVDEBUG_SYSTEMINFO_OSINFO_BUILDVERSION_LEN 1 +#define NVDEBUG_SYSTEMINFO_OSINFO_BUILDDATEPLUS_LEN 1 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_driverinfo[]; + +// 'DriverInfo' field descriptor pointers +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_IS_RELEASE (&prb_fields_nvdebug_systeminfo_driverinfo[0]) +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_VERSION (&prb_fields_nvdebug_systeminfo_driverinfo[1]) +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_BRANCH (&prb_fields_nvdebug_systeminfo_driverinfo[2]) +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_CHANGELIST (&prb_fields_nvdebug_systeminfo_driverinfo[3]) +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_PREVIOUS_VERSION (&prb_fields_nvdebug_systeminfo_driverinfo[4]) +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_PREVIOUS_BRANCH (&prb_fields_nvdebug_systeminfo_driverinfo[5]) +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_PREVIOUS_CHANGELIST (&prb_fields_nvdebug_systeminfo_driverinfo[6]) +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_LOAD_COUNT (&prb_fields_nvdebug_systeminfo_driverinfo[7]) + +// 'DriverInfo' field lengths +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_IS_RELEASE_LEN 1 +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_VERSION_LEN 1 +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_BRANCH_LEN 1 +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_CHANGELIST_LEN 5 +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_PREVIOUS_VERSION_LEN 1 +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_PREVIOUS_BRANCH_LEN 1 +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_PREVIOUS_CHANGELIST_LEN 5 +#define NVDEBUG_SYSTEMINFO_DRIVERINFO_LOAD_COUNT_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_config[]; + +// 'Config' field descriptor pointers +#define NVDEBUG_SYSTEMINFO_CONFIG_MASTER_ID (&prb_fields_nvdebug_systeminfo_config[0]) +#define NVDEBUG_SYSTEMINFO_CONFIG_GPU_ID (&prb_fields_nvdebug_systeminfo_config[1]) + +// 'Config' field lengths +#define NVDEBUG_SYSTEMINFO_CONFIG_MASTER_ID_LEN 5 +#define NVDEBUG_SYSTEMINFO_CONFIG_GPU_ID_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_errorstate[]; + +// 'ErrorState' field descriptor pointers +#define NVDEBUG_SYSTEMINFO_ERRORSTATE_BUGCHECK_CODE (&prb_fields_nvdebug_systeminfo_errorstate[0]) +#define NVDEBUG_SYSTEMINFO_ERRORSTATE_GOT_RM_LOCK (&prb_fields_nvdebug_systeminfo_errorstate[1]) +#define NVDEBUG_SYSTEMINFO_ERRORSTATE_DUMP_BUFFER_SIZE (&prb_fields_nvdebug_systeminfo_errorstate[2]) + +// 'ErrorState' field lengths +#define NVDEBUG_SYSTEMINFO_ERRORSTATE_BUGCHECK_CODE_LEN 5 +#define NVDEBUG_SYSTEMINFO_ERRORSTATE_GOT_RM_LOCK_LEN 1 +#define NVDEBUG_SYSTEMINFO_ERRORSTATE_DUMP_BUFFER_SIZE_LEN 5 + +extern const PRB_FIELD_DESC prb_fields_nvdebug_systeminfo_timeinfo[]; + +// 'TimeInfo' field descriptor pointers +#define NVDEBUG_SYSTEMINFO_TIMEINFO_TIMESTAMP_FREQ (&prb_fields_nvdebug_systeminfo_timeinfo[0]) +#define NVDEBUG_SYSTEMINFO_TIMEINFO_TIMESTAMP_DUMP (&prb_fields_nvdebug_systeminfo_timeinfo[1]) +#define NVDEBUG_SYSTEMINFO_TIMEINFO_SYSTEM_TIME_DUMP (&prb_fields_nvdebug_systeminfo_timeinfo[2]) +#define NVDEBUG_SYSTEMINFO_TIMEINFO_TIME_SINCE_BOOT_US (&prb_fields_nvdebug_systeminfo_timeinfo[3]) +#define NVDEBUG_SYSTEMINFO_TIMEINFO_TIME_SINCE_BOOT_SEC (&prb_fields_nvdebug_systeminfo_timeinfo[4]) + +// 'TimeInfo' field lengths +#define NVDEBUG_SYSTEMINFO_TIMEINFO_TIMESTAMP_FREQ_LEN 10 +#define NVDEBUG_SYSTEMINFO_TIMEINFO_TIMESTAMP_DUMP_LEN 10 +#define NVDEBUG_SYSTEMINFO_TIMEINFO_SYSTEM_TIME_DUMP_LEN 10 +#define NVDEBUG_SYSTEMINFO_TIMEINFO_TIME_SINCE_BOOT_US_LEN 5 +#define NVDEBUG_SYSTEMINFO_TIMEINFO_TIME_SINCE_BOOT_SEC_LEN 5 + +extern const PRB_SERVICE_DESC prb_services_nvdebug[]; + +// Service descriptor pointers + +#endif // G_NVDEBUG_PB_H__ diff --git a/src/nvidia/generated/g_nvh_state.h b/src/nvidia/generated/g_nvh_state.h new file mode 100644 index 000000000..7c35d4c92 --- /dev/null +++ b/src/nvidia/generated/g_nvh_state.h @@ -0,0 +1,28 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// NVOC Header State : This file is used for different code path for disabled NVH +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_nvh_state.h +// + +#ifndef _G_NVH_STATE_H_ +#define _G_NVH_STATE_H_ + +// +// __nvoc_nvh_state_guard +// This macro define is used to check whether this header is included before +// NVOC headers. The usage: +// #ifndef __nvoc_nvh_state_guard +// #error "NVH state guard header is not included prior to this NVOC header" +// #endif +// +#define __nvoc_nvh_state_guard + +// +// List of disabled NVOC headers +// + + + +#endif // _G_NVH_STATE_H_ diff --git a/src/nvidia/generated/g_object_nvoc.c b/src/nvidia/generated/g_object_nvoc.c new file mode 100644 index 000000000..7e87023db --- /dev/null +++ b/src/nvidia/generated/g_object_nvoc.c @@ -0,0 +1,130 @@ +#define NVOC_OBJECT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_object_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x497031 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_Object(Object*); +void __nvoc_init_funcTable_Object(Object*); +NV_STATUS __nvoc_ctor_Object(Object*); +void __nvoc_init_dataField_Object(Object*); +void __nvoc_dtor_Object(Object*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Object; + +static const struct NVOC_RTTI __nvoc_rtti_Object_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Object, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Object = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_Object_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Object = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Object), + /*classId=*/ classId(Object), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Object", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Object, + /*pCastInfo=*/ &__nvoc_castinfo_Object, + /*pExportInfo=*/ &__nvoc_export_info_Object +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Object = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Object(Object *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_Object(pThis); + goto __nvoc_ctor_Object_exit; // Success + +__nvoc_ctor_Object_exit: + + return status; +} + +static void __nvoc_init_funcTable_Object_1(Object *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_Object(Object *pThis) { + __nvoc_init_funcTable_Object_1(pThis); +} + +void __nvoc_init_Object(Object *pThis) { + pThis->__nvoc_pbase_Object = pThis; + __nvoc_init_funcTable_Object(pThis); +} + +NV_STATUS __nvoc_objCreate_Object(Object **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + Object *pThis; + + pThis = portMemAllocNonPaged(sizeof(Object)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Object)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Object); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, pThis); + } + else + { + pThis->pParent = NULL; + } + + __nvoc_init_Object(pThis); + status = __nvoc_ctor_Object(pThis); + if (status != NV_OK) goto __nvoc_objCreate_Object_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Object_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Object(Object **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_Object(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_object_nvoc.h b/src/nvidia/generated/g_object_nvoc.h new file mode 100644 index 000000000..d9df8bb3e --- /dev/null +++ b/src/nvidia/generated/g_object_nvoc.h @@ -0,0 +1,187 @@ +#ifndef _G_OBJECT_NVOC_H_ +#define _G_OBJECT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#include "g_object_nvoc.h" + +#ifndef _NVOC_OBJECT_H_ +#define _NVOC_OBJECT_H_ + +#include "nvtypes.h" +#include "nvstatus.h" + +#include "nvoc/prelude.h" + +struct Object; + +#ifndef __NVOC_CLASS_Object_TYPEDEF__ +#define __NVOC_CLASS_Object_TYPEDEF__ +typedef struct Object Object; +#endif /* __NVOC_CLASS_Object_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Object +#define __nvoc_class_id_Object 0x497031 +#endif /* __nvoc_class_id_Object */ + + +struct NVOC_CLASS_INFO; + +/*! + * Tracks the head of an object's child list, and the next object in its + * parent's child list. + */ +struct NVOC_CHILD_TREE +{ + struct Object *pChild; + struct Object *pSibling; +}; + +//! The base class of all instantiable NVOC objects. +#ifdef NVOC_OBJECT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Object { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object *__nvoc_pbase_Object; + struct Object *pParent; + struct NVOC_CHILD_TREE childTree; + NvU32 ipVersion; +}; + +#ifndef __NVOC_CLASS_Object_TYPEDEF__ +#define __NVOC_CLASS_Object_TYPEDEF__ +typedef struct Object Object; +#endif /* __NVOC_CLASS_Object_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Object +#define __nvoc_class_id_Object 0x497031 +#endif /* __nvoc_class_id_Object */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +#define __staticCast_Object(pThis) \ + ((pThis)->__nvoc_pbase_Object) + +#ifdef __nvoc_object_h_disabled +#define __dynamicCast_Object(pThis) ((Object*)NULL) +#else //__nvoc_object_h_disabled +#define __dynamicCast_Object(pThis) \ + ((Object*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Object))) +#endif //__nvoc_object_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Object(Object**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Object(Object**, Dynamic*, NvU32); +#define __objCreate_Object(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_Object((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +void objAddChild_IMPL(struct Object *pObj, struct Object *pChild); +#ifdef __nvoc_object_h_disabled +static inline void objAddChild(struct Object *pObj, struct Object *pChild) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); +} +#else //__nvoc_object_h_disabled +#define objAddChild(pObj, pChild) objAddChild_IMPL(pObj, pChild) +#endif //__nvoc_object_h_disabled + +void objRemoveChild_IMPL(struct Object *pObj, struct Object *pChild); +#ifdef __nvoc_object_h_disabled +static inline void objRemoveChild(struct Object *pObj, struct Object *pChild) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); +} +#else //__nvoc_object_h_disabled +#define objRemoveChild(pObj, pChild) objRemoveChild_IMPL(pObj, pChild) +#endif //__nvoc_object_h_disabled + +struct Object *objGetChild_IMPL(struct Object *pObj); +#ifdef __nvoc_object_h_disabled +static inline struct Object *objGetChild(struct Object *pObj) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); + return NULL; +} +#else //__nvoc_object_h_disabled +#define objGetChild(pObj) objGetChild_IMPL(pObj) +#endif //__nvoc_object_h_disabled + +struct Object *objGetSibling_IMPL(struct Object *pObj); +#ifdef __nvoc_object_h_disabled +static inline struct Object *objGetSibling(struct Object *pObj) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); + return NULL; +} +#else //__nvoc_object_h_disabled +#define objGetSibling(pObj) objGetSibling_IMPL(pObj) +#endif //__nvoc_object_h_disabled + +struct Object *objGetDirectParent_IMPL(struct Object *pObj); +#ifdef __nvoc_object_h_disabled +static inline struct Object *objGetDirectParent(struct Object *pObj) { + NV_ASSERT_FAILED_PRECOMP("Object was disabled!"); + return NULL; +} +#else //__nvoc_object_h_disabled +#define objGetDirectParent(pObj) objGetDirectParent_IMPL(pObj) +#endif //__nvoc_object_h_disabled + +#undef PRIVATE_FIELD + + +// +// IP versioning definitions are temporary until NVOC halspec support is +// finished. +// +// IP_VERSION format as defined by the hardware engines. +// A _MAJOR value of 0 means the object has no version number. +// + +#define NV_ODB_IP_VER_DEV 7:0 /* R-IVF */ +#define NV_ODB_IP_VER_ECO 15:8 /* R-IVF */ +#define NV_ODB_IP_VER_MINOR 23:16 /* R-IVF */ +#define NV_ODB_IP_VER_MAJOR 31:24 /* R-IVF */ + +#define IPVersion(pObj) staticCast((pObj), Object)->ipVersion +#define IsIPVersionValid(pObj) (DRF_VAL(_ODB, _IP_VER, _MAJOR, IPVersion(pObj)) != 0) +#define IsIPVersionOrLater(pObj, v0) (IPVersion(pObj) >= (v0)) +// v0 .. v1 inclusive +#define IsIPVersionInRange(pObj, v0, v1) ((IPVersion(pObj) >= (v0)) && (IPVersion(pObj) <= (v1))) + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_OBJECT_NVOC_H_ diff --git a/src/nvidia/generated/g_objgpumon_nvoc.c b/src/nvidia/generated/g_objgpumon_nvoc.c new file mode 100644 index 000000000..66ca1b088 --- /dev/null +++ b/src/nvidia/generated/g_objgpumon_nvoc.c @@ -0,0 +1,293 @@ +#define NVOC_OBJGPUMON_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_objgpumon_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x2b424b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMON; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_OBJGPUMON(OBJGPUMON*, RmHalspecOwner* ); +void __nvoc_init_funcTable_OBJGPUMON(OBJGPUMON*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJGPUMON(OBJGPUMON*, RmHalspecOwner* ); +void __nvoc_init_dataField_OBJGPUMON(OBJGPUMON*, RmHalspecOwner* ); +void __nvoc_dtor_OBJGPUMON(OBJGPUMON*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUMON; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMON_OBJGPUMON = { + /*pClassDef=*/ &__nvoc_class_def_OBJGPUMON, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUMON, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMON_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPUMON, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMON_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJGPUMON, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPUMON = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_OBJGPUMON_OBJGPUMON, + &__nvoc_rtti_OBJGPUMON_OBJENGSTATE, + &__nvoc_rtti_OBJGPUMON_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMON = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJGPUMON), + /*classId=*/ classId(OBJGPUMON), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJGPUMON", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUMON, + /*pCastInfo=*/ &__nvoc_castinfo_OBJGPUMON, + /*pExportInfo=*/ &__nvoc_export_info_OBJGPUMON +}; + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonReconcileTunableState(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonStateLoad(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonStateUnload(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonStateInitLocked(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonStatePreLoad(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonStatePostUnload(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_gpumonStateDestroy(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonStatePreUnload(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonStateInitUnlocked(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_gpumonInitMissing(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonStatePreInitLocked(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonStatePreInitUnlocked(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonGetTunableState(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonCompareTunableState(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_gpumonFreeTunableState(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonStatePostLoad(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonAllocTunableState(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonSetTunableState(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_gpumonConstructEngine(POBJGPU pGpu, struct OBJGPUMON *pEngstate, ENGDESCRIPTOR arg0) { + return engstateConstructEngine(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset), arg0); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_gpumonIsPresent(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJGPUMON_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUMON = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJGPUMON(OBJGPUMON *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJGPUMON(OBJGPUMON *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_OBJGPUMON(OBJGPUMON *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_OBJGPUMON_fail_OBJENGSTATE; + __nvoc_init_dataField_OBJGPUMON(pThis, pRmhalspecowner); + goto __nvoc_ctor_OBJGPUMON_exit; // Success + +__nvoc_ctor_OBJGPUMON_fail_OBJENGSTATE: +__nvoc_ctor_OBJGPUMON_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJGPUMON_1(OBJGPUMON *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__gpumonReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_gpumonReconcileTunableState; + + pThis->__gpumonStateLoad__ = &__nvoc_thunk_OBJENGSTATE_gpumonStateLoad; + + pThis->__gpumonStateUnload__ = &__nvoc_thunk_OBJENGSTATE_gpumonStateUnload; + + pThis->__gpumonStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_gpumonStateInitLocked; + + pThis->__gpumonStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_gpumonStatePreLoad; + + pThis->__gpumonStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_gpumonStatePostUnload; + + pThis->__gpumonStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_gpumonStateDestroy; + + pThis->__gpumonStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_gpumonStatePreUnload; + + pThis->__gpumonStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_gpumonStateInitUnlocked; + + pThis->__gpumonInitMissing__ = &__nvoc_thunk_OBJENGSTATE_gpumonInitMissing; + + pThis->__gpumonStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_gpumonStatePreInitLocked; + + pThis->__gpumonStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_gpumonStatePreInitUnlocked; + + pThis->__gpumonGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_gpumonGetTunableState; + + pThis->__gpumonCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_gpumonCompareTunableState; + + pThis->__gpumonFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_gpumonFreeTunableState; + + pThis->__gpumonStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_gpumonStatePostLoad; + + pThis->__gpumonAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_gpumonAllocTunableState; + + pThis->__gpumonSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_gpumonSetTunableState; + + pThis->__gpumonConstructEngine__ = &__nvoc_thunk_OBJENGSTATE_gpumonConstructEngine; + + pThis->__gpumonIsPresent__ = &__nvoc_thunk_OBJENGSTATE_gpumonIsPresent; +} + +void __nvoc_init_funcTable_OBJGPUMON(OBJGPUMON *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_OBJGPUMON_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_OBJGPUMON(OBJGPUMON *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_OBJGPUMON = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_OBJGPUMON(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_OBJGPUMON(OBJGPUMON **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJGPUMON *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(OBJGPUMON)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJGPUMON)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPUMON); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_OBJGPUMON(pThis, pRmhalspecowner); + status = __nvoc_ctor_OBJGPUMON(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_OBJGPUMON_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJGPUMON_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUMON(OBJGPUMON **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJGPUMON(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_objgpumon_nvoc.h b/src/nvidia/generated/g_objgpumon_nvoc.h new file mode 100644 index 000000000..aa06b8164 --- /dev/null +++ b/src/nvidia/generated/g_objgpumon_nvoc.h @@ -0,0 +1,243 @@ +#ifndef _G_OBJGPUMON_NVOC_H_ +#define _G_OBJGPUMON_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_objgpumon_nvoc.h" + +#ifndef _OBJGPUMON_H_ +#define _OBJGPUMON_H_ + +/****************** Resource Manager Defines and Structures *****************\ +* * +* Defines and structures used for the Gpumon Object. * +* * +\****************************************************************************/ + +#include "gpu/gpu.h" +#include "gpu/eng_state.h" + +#include "ctrl/ctrl2080/ctrl2080perf.h" + +typedef struct OBJGPUMON *POBJGPUMON; + +#ifdef NVOC_OBJGPUMON_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJGPUMON { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct OBJGPUMON *__nvoc_pbase_OBJGPUMON; + NV_STATUS (*__gpumonReconcileTunableState__)(POBJGPU, struct OBJGPUMON *, void *); + NV_STATUS (*__gpumonStateLoad__)(POBJGPU, struct OBJGPUMON *, NvU32); + NV_STATUS (*__gpumonStateUnload__)(POBJGPU, struct OBJGPUMON *, NvU32); + NV_STATUS (*__gpumonStateInitLocked__)(POBJGPU, struct OBJGPUMON *); + NV_STATUS (*__gpumonStatePreLoad__)(POBJGPU, struct OBJGPUMON *, NvU32); + NV_STATUS (*__gpumonStatePostUnload__)(POBJGPU, struct OBJGPUMON *, NvU32); + void (*__gpumonStateDestroy__)(POBJGPU, struct OBJGPUMON *); + NV_STATUS (*__gpumonStatePreUnload__)(POBJGPU, struct OBJGPUMON *, NvU32); + NV_STATUS (*__gpumonStateInitUnlocked__)(POBJGPU, struct OBJGPUMON *); + void (*__gpumonInitMissing__)(POBJGPU, struct OBJGPUMON *); + NV_STATUS (*__gpumonStatePreInitLocked__)(POBJGPU, struct OBJGPUMON *); + NV_STATUS (*__gpumonStatePreInitUnlocked__)(POBJGPU, struct OBJGPUMON *); + NV_STATUS (*__gpumonGetTunableState__)(POBJGPU, struct OBJGPUMON *, void *); + NV_STATUS (*__gpumonCompareTunableState__)(POBJGPU, struct OBJGPUMON *, void *, void *); + void (*__gpumonFreeTunableState__)(POBJGPU, struct OBJGPUMON *, void *); + NV_STATUS (*__gpumonStatePostLoad__)(POBJGPU, struct OBJGPUMON *, NvU32); + NV_STATUS (*__gpumonAllocTunableState__)(POBJGPU, struct OBJGPUMON *, void **); + NV_STATUS (*__gpumonSetTunableState__)(POBJGPU, struct OBJGPUMON *, void *); + NV_STATUS (*__gpumonConstructEngine__)(POBJGPU, struct OBJGPUMON *, ENGDESCRIPTOR); + NvBool (*__gpumonIsPresent__)(POBJGPU, struct OBJGPUMON *); +}; + +#ifndef __NVOC_CLASS_OBJGPUMON_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUMON_TYPEDEF__ +typedef struct OBJGPUMON OBJGPUMON; +#endif /* __NVOC_CLASS_OBJGPUMON_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUMON +#define __nvoc_class_id_OBJGPUMON 0x2b424b +#endif /* __nvoc_class_id_OBJGPUMON */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMON; + +#define __staticCast_OBJGPUMON(pThis) \ + ((pThis)->__nvoc_pbase_OBJGPUMON) + +#ifdef __nvoc_objgpumon_h_disabled +#define __dynamicCast_OBJGPUMON(pThis) ((OBJGPUMON*)NULL) +#else //__nvoc_objgpumon_h_disabled +#define __dynamicCast_OBJGPUMON(pThis) \ + ((OBJGPUMON*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJGPUMON))) +#endif //__nvoc_objgpumon_h_disabled + +#define PDB_PROP_GPUMON_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_GPUMON_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_OBJGPUMON(OBJGPUMON**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJGPUMON(OBJGPUMON**, Dynamic*, NvU32); +#define __objCreate_OBJGPUMON(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJGPUMON((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define gpumonReconcileTunableState(pGpu, pEngstate, pTunableState) gpumonReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define gpumonStateLoad(pGpu, pEngstate, arg0) gpumonStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define gpumonStateUnload(pGpu, pEngstate, arg0) gpumonStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define gpumonStateInitLocked(pGpu, pEngstate) gpumonStateInitLocked_DISPATCH(pGpu, pEngstate) +#define gpumonStatePreLoad(pGpu, pEngstate, arg0) gpumonStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define gpumonStatePostUnload(pGpu, pEngstate, arg0) gpumonStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define gpumonStateDestroy(pGpu, pEngstate) gpumonStateDestroy_DISPATCH(pGpu, pEngstate) +#define gpumonStatePreUnload(pGpu, pEngstate, arg0) gpumonStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define gpumonStateInitUnlocked(pGpu, pEngstate) gpumonStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define gpumonInitMissing(pGpu, pEngstate) gpumonInitMissing_DISPATCH(pGpu, pEngstate) +#define gpumonStatePreInitLocked(pGpu, pEngstate) gpumonStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define gpumonStatePreInitUnlocked(pGpu, pEngstate) gpumonStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define gpumonGetTunableState(pGpu, pEngstate, pTunableState) gpumonGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define gpumonCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) gpumonCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define gpumonFreeTunableState(pGpu, pEngstate, pTunableState) gpumonFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define gpumonStatePostLoad(pGpu, pEngstate, arg0) gpumonStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define gpumonAllocTunableState(pGpu, pEngstate, ppTunableState) gpumonAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define gpumonSetTunableState(pGpu, pEngstate, pTunableState) gpumonSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define gpumonConstructEngine(pGpu, pEngstate, arg0) gpumonConstructEngine_DISPATCH(pGpu, pEngstate, arg0) +#define gpumonIsPresent(pGpu, pEngstate) gpumonIsPresent_DISPATCH(pGpu, pEngstate) +void gpumonGetContextProcessInfo_GM107(struct OBJGPU *pGpu, struct OBJGPUMON *pGpumon, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, const char **arg3); + +#ifdef __nvoc_objgpumon_h_disabled +static inline void gpumonGetContextProcessInfo(struct OBJGPU *pGpu, struct OBJGPUMON *pGpumon, NvU32 arg0, NvU32 *arg1, NvU32 *arg2, const char **arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUMON was disabled!"); +} +#else //__nvoc_objgpumon_h_disabled +#define gpumonGetContextProcessInfo(pGpu, pGpumon, arg0, arg1, arg2, arg3) gpumonGetContextProcessInfo_GM107(pGpu, pGpumon, arg0, arg1, arg2, arg3) +#endif //__nvoc_objgpumon_h_disabled + +#define gpumonGetContextProcessInfo_HAL(pGpu, pGpumon, arg0, arg1, arg2, arg3) gpumonGetContextProcessInfo(pGpu, pGpumon, arg0, arg1, arg2, arg3) + +static inline NV_STATUS gpumonReconcileTunableState_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void *pTunableState) { + return pEngstate->__gpumonReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS gpumonStateLoad_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return pEngstate->__gpumonStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS gpumonStateUnload_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return pEngstate->__gpumonStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS gpumonStateInitLocked_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + return pEngstate->__gpumonStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS gpumonStatePreLoad_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return pEngstate->__gpumonStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS gpumonStatePostUnload_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return pEngstate->__gpumonStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void gpumonStateDestroy_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + pEngstate->__gpumonStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS gpumonStatePreUnload_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return pEngstate->__gpumonStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS gpumonStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + return pEngstate->__gpumonStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void gpumonInitMissing_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + pEngstate->__gpumonInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS gpumonStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + return pEngstate->__gpumonStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS gpumonStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + return pEngstate->__gpumonStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS gpumonGetTunableState_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void *pTunableState) { + return pEngstate->__gpumonGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS gpumonCompareTunableState_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__gpumonCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void gpumonFreeTunableState_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void *pTunableState) { + pEngstate->__gpumonFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS gpumonStatePostLoad_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, NvU32 arg0) { + return pEngstate->__gpumonStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS gpumonAllocTunableState_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void **ppTunableState) { + return pEngstate->__gpumonAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS gpumonSetTunableState_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, void *pTunableState) { + return pEngstate->__gpumonSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS gpumonConstructEngine_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate, ENGDESCRIPTOR arg0) { + return pEngstate->__gpumonConstructEngine__(pGpu, pEngstate, arg0); +} + +static inline NvBool gpumonIsPresent_DISPATCH(POBJGPU pGpu, struct OBJGPUMON *pEngstate) { + return pEngstate->__gpumonIsPresent__(pGpu, pEngstate); +} + +NV_STATUS gpumonGetPerfmonUtilSamples_IMPL(struct OBJGPU *pGpu, struct OBJGPUMON *pGpumon, NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE *arg0, NvU32 arg1, NvU32 *arg2); +#ifdef __nvoc_objgpumon_h_disabled +static inline NV_STATUS gpumonGetPerfmonUtilSamples(struct OBJGPU *pGpu, struct OBJGPUMON *pGpumon, NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE *arg0, NvU32 arg1, NvU32 *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJGPUMON was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objgpumon_h_disabled +#define gpumonGetPerfmonUtilSamples(pGpu, pGpumon, arg0, arg1, arg2) gpumonGetPerfmonUtilSamples_IMPL(pGpu, pGpumon, arg0, arg1, arg2) +#endif //__nvoc_objgpumon_h_disabled + +#undef PRIVATE_FIELD + + +#endif //_OBJGPUMON_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_OBJGPUMON_NVOC_H_ diff --git a/src/nvidia/generated/g_objsweng_nvoc.c b/src/nvidia/generated/g_objsweng_nvoc.c new file mode 100644 index 000000000..19b65d935 --- /dev/null +++ b/src/nvidia/generated/g_objsweng_nvoc.c @@ -0,0 +1,280 @@ +#define NVOC_OBJSWENG_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_objsweng_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x95a6f5 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSWENG; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_OBJSWENG(OBJSWENG*); +void __nvoc_init_funcTable_OBJSWENG(OBJSWENG*); +NV_STATUS __nvoc_ctor_OBJSWENG(OBJSWENG*); +void __nvoc_init_dataField_OBJSWENG(OBJSWENG*); +void __nvoc_dtor_OBJSWENG(OBJSWENG*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJSWENG; + +static const struct NVOC_RTTI __nvoc_rtti_OBJSWENG_OBJSWENG = { + /*pClassDef=*/ &__nvoc_class_def_OBJSWENG, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJSWENG, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJSWENG_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJSWENG, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJSWENG_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJSWENG, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJSWENG = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_OBJSWENG_OBJSWENG, + &__nvoc_rtti_OBJSWENG_OBJENGSTATE, + &__nvoc_rtti_OBJSWENG_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSWENG = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJSWENG), + /*classId=*/ classId(OBJSWENG), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJSWENG", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJSWENG, + /*pCastInfo=*/ &__nvoc_castinfo_OBJSWENG, + /*pExportInfo=*/ &__nvoc_export_info_OBJSWENG +}; + +static NV_STATUS __nvoc_thunk_OBJSWENG_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pSweng, ENGDESCRIPTOR arg0) { + return swengConstructEngine(pGpu, (struct OBJSWENG *)(((unsigned char *)pSweng) - __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengReconcileTunableState(POBJGPU pGpu, struct OBJSWENG *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengStateLoad(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengStateUnload(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengStateInitLocked(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengStatePreLoad(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengStatePostUnload(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_swengStateDestroy(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengStatePreUnload(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengStateInitUnlocked(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_swengInitMissing(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengStatePreInitLocked(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengStatePreInitUnlocked(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengGetTunableState(POBJGPU pGpu, struct OBJSWENG *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengCompareTunableState(POBJGPU pGpu, struct OBJSWENG *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_swengFreeTunableState(POBJGPU pGpu, struct OBJSWENG *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengStatePostLoad(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengAllocTunableState(POBJGPU pGpu, struct OBJSWENG *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swengSetTunableState(POBJGPU pGpu, struct OBJSWENG *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_swengIsPresent(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJSWENG_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJSWENG = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJSWENG(OBJSWENG *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJSWENG(OBJSWENG *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_OBJSWENG(OBJSWENG *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_OBJSWENG_fail_OBJENGSTATE; + __nvoc_init_dataField_OBJSWENG(pThis); + goto __nvoc_ctor_OBJSWENG_exit; // Success + +__nvoc_ctor_OBJSWENG_fail_OBJENGSTATE: +__nvoc_ctor_OBJSWENG_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJSWENG_1(OBJSWENG *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__swengConstructEngine__ = &swengConstructEngine_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_OBJSWENG_engstateConstructEngine; + + pThis->__swengReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_swengReconcileTunableState; + + pThis->__swengStateLoad__ = &__nvoc_thunk_OBJENGSTATE_swengStateLoad; + + pThis->__swengStateUnload__ = &__nvoc_thunk_OBJENGSTATE_swengStateUnload; + + pThis->__swengStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_swengStateInitLocked; + + pThis->__swengStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_swengStatePreLoad; + + pThis->__swengStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_swengStatePostUnload; + + pThis->__swengStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_swengStateDestroy; + + pThis->__swengStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_swengStatePreUnload; + + pThis->__swengStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_swengStateInitUnlocked; + + pThis->__swengInitMissing__ = &__nvoc_thunk_OBJENGSTATE_swengInitMissing; + + pThis->__swengStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_swengStatePreInitLocked; + + pThis->__swengStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_swengStatePreInitUnlocked; + + pThis->__swengGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_swengGetTunableState; + + pThis->__swengCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_swengCompareTunableState; + + pThis->__swengFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_swengFreeTunableState; + + pThis->__swengStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_swengStatePostLoad; + + pThis->__swengAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_swengAllocTunableState; + + pThis->__swengSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_swengSetTunableState; + + pThis->__swengIsPresent__ = &__nvoc_thunk_OBJENGSTATE_swengIsPresent; +} + +void __nvoc_init_funcTable_OBJSWENG(OBJSWENG *pThis) { + __nvoc_init_funcTable_OBJSWENG_1(pThis); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_OBJSWENG(OBJSWENG *pThis) { + pThis->__nvoc_pbase_OBJSWENG = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_OBJSWENG(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJSWENG(OBJSWENG **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJSWENG *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJSWENG)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJSWENG)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJSWENG); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJSWENG(pThis); + status = __nvoc_ctor_OBJSWENG(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJSWENG_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJSWENG_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJSWENG(OBJSWENG **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJSWENG(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_objsweng_nvoc.h b/src/nvidia/generated/g_objsweng_nvoc.h new file mode 100644 index 000000000..6ba8fc3be --- /dev/null +++ b/src/nvidia/generated/g_objsweng_nvoc.h @@ -0,0 +1,223 @@ +#ifndef _G_OBJSWENG_NVOC_H_ +#define _G_OBJSWENG_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** Modular includes *****************************\ +* * +* OBJSWENG implements the 'Software Engine' from the GPU host * +* perspective. That is the software component that emulates a GPU host * +* engine by implementing SW methods in the driver. * +* * +****************************************************************************/ + +#include "g_objsweng_nvoc.h" + +#ifndef _OBJSWENG_H_ +#define _OBJSWENG_H_ + +#include "core/core.h" +#include "gpu/eng_state.h" + +typedef struct OBJSWENG *POBJSWENG; + +#ifdef NVOC_OBJSWENG_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJSWENG { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct OBJSWENG *__nvoc_pbase_OBJSWENG; + NV_STATUS (*__swengConstructEngine__)(struct OBJGPU *, struct OBJSWENG *, ENGDESCRIPTOR); + NV_STATUS (*__swengReconcileTunableState__)(POBJGPU, struct OBJSWENG *, void *); + NV_STATUS (*__swengStateLoad__)(POBJGPU, struct OBJSWENG *, NvU32); + NV_STATUS (*__swengStateUnload__)(POBJGPU, struct OBJSWENG *, NvU32); + NV_STATUS (*__swengStateInitLocked__)(POBJGPU, struct OBJSWENG *); + NV_STATUS (*__swengStatePreLoad__)(POBJGPU, struct OBJSWENG *, NvU32); + NV_STATUS (*__swengStatePostUnload__)(POBJGPU, struct OBJSWENG *, NvU32); + void (*__swengStateDestroy__)(POBJGPU, struct OBJSWENG *); + NV_STATUS (*__swengStatePreUnload__)(POBJGPU, struct OBJSWENG *, NvU32); + NV_STATUS (*__swengStateInitUnlocked__)(POBJGPU, struct OBJSWENG *); + void (*__swengInitMissing__)(POBJGPU, struct OBJSWENG *); + NV_STATUS (*__swengStatePreInitLocked__)(POBJGPU, struct OBJSWENG *); + NV_STATUS (*__swengStatePreInitUnlocked__)(POBJGPU, struct OBJSWENG *); + NV_STATUS (*__swengGetTunableState__)(POBJGPU, struct OBJSWENG *, void *); + NV_STATUS (*__swengCompareTunableState__)(POBJGPU, struct OBJSWENG *, void *, void *); + void (*__swengFreeTunableState__)(POBJGPU, struct OBJSWENG *, void *); + NV_STATUS (*__swengStatePostLoad__)(POBJGPU, struct OBJSWENG *, NvU32); + NV_STATUS (*__swengAllocTunableState__)(POBJGPU, struct OBJSWENG *, void **); + NV_STATUS (*__swengSetTunableState__)(POBJGPU, struct OBJSWENG *, void *); + NvBool (*__swengIsPresent__)(POBJGPU, struct OBJSWENG *); +}; + +#ifndef __NVOC_CLASS_OBJSWENG_TYPEDEF__ +#define __NVOC_CLASS_OBJSWENG_TYPEDEF__ +typedef struct OBJSWENG OBJSWENG; +#endif /* __NVOC_CLASS_OBJSWENG_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSWENG +#define __nvoc_class_id_OBJSWENG 0x95a6f5 +#endif /* __nvoc_class_id_OBJSWENG */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSWENG; + +#define __staticCast_OBJSWENG(pThis) \ + ((pThis)->__nvoc_pbase_OBJSWENG) + +#ifdef __nvoc_objsweng_h_disabled +#define __dynamicCast_OBJSWENG(pThis) ((OBJSWENG*)NULL) +#else //__nvoc_objsweng_h_disabled +#define __dynamicCast_OBJSWENG(pThis) \ + ((OBJSWENG*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJSWENG))) +#endif //__nvoc_objsweng_h_disabled + +#define PDB_PROP_SWENG_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_SWENG_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_OBJSWENG(OBJSWENG**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJSWENG(OBJSWENG**, Dynamic*, NvU32); +#define __objCreate_OBJSWENG(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJSWENG((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define swengConstructEngine(pGpu, pSweng, arg0) swengConstructEngine_DISPATCH(pGpu, pSweng, arg0) +#define swengReconcileTunableState(pGpu, pEngstate, pTunableState) swengReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define swengStateLoad(pGpu, pEngstate, arg0) swengStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define swengStateUnload(pGpu, pEngstate, arg0) swengStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define swengStateInitLocked(pGpu, pEngstate) swengStateInitLocked_DISPATCH(pGpu, pEngstate) +#define swengStatePreLoad(pGpu, pEngstate, arg0) swengStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define swengStatePostUnload(pGpu, pEngstate, arg0) swengStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define swengStateDestroy(pGpu, pEngstate) swengStateDestroy_DISPATCH(pGpu, pEngstate) +#define swengStatePreUnload(pGpu, pEngstate, arg0) swengStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define swengStateInitUnlocked(pGpu, pEngstate) swengStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define swengInitMissing(pGpu, pEngstate) swengInitMissing_DISPATCH(pGpu, pEngstate) +#define swengStatePreInitLocked(pGpu, pEngstate) swengStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define swengStatePreInitUnlocked(pGpu, pEngstate) swengStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define swengGetTunableState(pGpu, pEngstate, pTunableState) swengGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define swengCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) swengCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define swengFreeTunableState(pGpu, pEngstate, pTunableState) swengFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define swengStatePostLoad(pGpu, pEngstate, arg0) swengStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define swengAllocTunableState(pGpu, pEngstate, ppTunableState) swengAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define swengSetTunableState(pGpu, pEngstate, pTunableState) swengSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define swengIsPresent(pGpu, pEngstate) swengIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS swengConstructEngine_IMPL(struct OBJGPU *pGpu, struct OBJSWENG *pSweng, ENGDESCRIPTOR arg0); + +static inline NV_STATUS swengConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct OBJSWENG *pSweng, ENGDESCRIPTOR arg0) { + return pSweng->__swengConstructEngine__(pGpu, pSweng, arg0); +} + +static inline NV_STATUS swengReconcileTunableState_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, void *pTunableState) { + return pEngstate->__swengReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS swengStateLoad_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return pEngstate->__swengStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swengStateUnload_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return pEngstate->__swengStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swengStateInitLocked_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + return pEngstate->__swengStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS swengStatePreLoad_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return pEngstate->__swengStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swengStatePostUnload_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return pEngstate->__swengStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void swengStateDestroy_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + pEngstate->__swengStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS swengStatePreUnload_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return pEngstate->__swengStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swengStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + return pEngstate->__swengStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void swengInitMissing_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + pEngstate->__swengInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS swengStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + return pEngstate->__swengStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS swengStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + return pEngstate->__swengStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS swengGetTunableState_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, void *pTunableState) { + return pEngstate->__swengGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS swengCompareTunableState_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__swengCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void swengFreeTunableState_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, void *pTunableState) { + pEngstate->__swengFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS swengStatePostLoad_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, NvU32 arg0) { + return pEngstate->__swengStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swengAllocTunableState_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, void **ppTunableState) { + return pEngstate->__swengAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS swengSetTunableState_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate, void *pTunableState) { + return pEngstate->__swengSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool swengIsPresent_DISPATCH(POBJGPU pGpu, struct OBJSWENG *pEngstate) { + return pEngstate->__swengIsPresent__(pGpu, pEngstate); +} + +#undef PRIVATE_FIELD + + +#endif // _OBJSWENG_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_OBJSWENG_NVOC_H_ diff --git a/src/nvidia/generated/g_objtmr_nvoc.c b/src/nvidia/generated/g_objtmr_nvoc.c new file mode 100644 index 000000000..3d7776b68 --- /dev/null +++ b/src/nvidia/generated/g_objtmr_nvoc.c @@ -0,0 +1,514 @@ +#define NVOC_OBJTMR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_objtmr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x9ddede = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTMR; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJINTRABLE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +void __nvoc_init_OBJTMR(OBJTMR*, RmHalspecOwner* ); +void __nvoc_init_funcTable_OBJTMR(OBJTMR*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJTMR(OBJTMR*, RmHalspecOwner* ); +void __nvoc_init_dataField_OBJTMR(OBJTMR*, RmHalspecOwner* ); +void __nvoc_dtor_OBJTMR(OBJTMR*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJTMR; + +static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_OBJTMR = { + /*pClassDef=*/ &__nvoc_class_def_OBJTMR, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJTMR, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_OBJINTRABLE = { + /*pClassDef=*/ &__nvoc_class_def_OBJINTRABLE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJTMR, __nvoc_base_OBJINTRABLE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJTMR, __nvoc_base_IntrService), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJTMR = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_OBJTMR_OBJTMR, + &__nvoc_rtti_OBJTMR_IntrService, + &__nvoc_rtti_OBJTMR_OBJINTRABLE, + &__nvoc_rtti_OBJTMR_OBJENGSTATE, + &__nvoc_rtti_OBJTMR_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTMR = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJTMR), + /*classId=*/ classId(OBJTMR), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJTMR", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJTMR, + /*pCastInfo=*/ &__nvoc_castinfo_OBJTMR, + /*pExportInfo=*/ &__nvoc_export_info_OBJTMR +}; + +static void __nvoc_thunk_OBJTMR_intrservRegisterIntrService(OBJGPU *pGpu, struct IntrService *pTmr, IntrServiceRecord pRecords[155]) { + tmrRegisterIntrService(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_IntrService.offset), pRecords); +} + +static NvBool __nvoc_thunk_OBJTMR_intrservClearInterrupt(OBJGPU *pGpu, struct IntrService *pTmr, IntrServiceClearInterruptArguments *pParams) { + return tmrClearInterrupt(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_IntrService.offset), pParams); +} + +static NvU32 __nvoc_thunk_OBJTMR_intrservServiceInterrupt(OBJGPU *pGpu, struct IntrService *pTmr, IntrServiceServiceInterruptArguments *pParams) { + return tmrServiceInterrupt(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJTMR_engstateConstructEngine(OBJGPU *pGpu, struct OBJENGSTATE *pTmr, ENGDESCRIPTOR arg0) { + return tmrConstructEngine(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJTMR_engstateStateInitLocked(OBJGPU *pGpu, struct OBJENGSTATE *pTmr) { + return tmrStateInitLocked(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJTMR_engstateStateInitUnlocked(OBJGPU *pGpu, struct OBJENGSTATE *pTmr) { + return tmrStateInitUnlocked(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJTMR_engstateStateLoad(OBJGPU *pGpu, struct OBJENGSTATE *pTmr, NvU32 arg0) { + return tmrStateLoad(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJTMR_engstateStateUnload(OBJGPU *pGpu, struct OBJENGSTATE *pTmr, NvU32 arg0) { + return tmrStateUnload(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJTMR_engstateStateDestroy(OBJGPU *pGpu, struct OBJENGSTATE *pTmr) { + tmrStateDestroy(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJTMR_intrableGetPhysicalIntrVectors(OBJGPU *pGpu, struct OBJINTRABLE *pTmr, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return tmrGetPhysicalIntrVectors(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJINTRABLE.offset), maxIntrs, pIntrs, pMcEngineIdxs, pCount); +} + +static void __nvoc_thunk_OBJENGSTATE_tmrFreeTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrCompareTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static NV_STATUS __nvoc_thunk_OBJINTRABLE_tmrGetNotificationIntrVector(struct OBJGPU *pGpu, struct OBJTMR *pIntrable, NvU32 *pIntrVector) { + return intrableGetNotificationIntrVector(pGpu, (struct OBJINTRABLE *)(((unsigned char *)pIntrable) + __nvoc_rtti_OBJTMR_OBJINTRABLE.offset), pIntrVector); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_tmrIsPresent(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrReconcileTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJINTRABLE_tmrGetKernelIntrVectors(struct OBJGPU *pGpu, struct OBJTMR *pIntrable, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return intrableGetKernelIntrVectors(pGpu, (struct OBJINTRABLE *)(((unsigned char *)pIntrable) + __nvoc_rtti_OBJTMR_OBJINTRABLE.offset), maxIntrs, pIntrs, pMcEngineIdxs, pCount); +} + +static NV_STATUS __nvoc_thunk_OBJINTRABLE_tmrSetNotificationIntrVector(struct OBJGPU *pGpu, struct OBJTMR *pIntrable, NvU32 intrVector) { + return intrableSetNotificationIntrVector(pGpu, (struct OBJINTRABLE *)(((unsigned char *)pIntrable) + __nvoc_rtti_OBJTMR_OBJINTRABLE.offset), intrVector); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreLoad(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePostUnload(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreUnload(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrGetTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState); +} + +static void __nvoc_thunk_OBJENGSTATE_tmrInitMissing(POBJGPU pGpu, struct OBJTMR *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreInitLocked(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreInitUnlocked(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_IntrService_tmrServiceNotificationInterrupt(OBJGPU *pGpu, struct OBJTMR *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return intrservServiceNotificationInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_OBJTMR_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePostLoad(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrAllocTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrSetTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJTMR = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJINTRABLE(OBJINTRABLE*); +void __nvoc_dtor_IntrService(IntrService*); +void __nvoc_dtor_OBJTMR(OBJTMR *pThis) { + __nvoc_tmrDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_dtor_OBJINTRABLE(&pThis->__nvoc_base_OBJINTRABLE); + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS, ((NvBool)(0 != 0))); + } + pThis->setProperty(pThis, PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS, (0)); + pThis->setProperty(pThis, PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS, (0)); + + // NVOC Property Hal field -- PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL, ((NvBool)(0 != 0))); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJINTRABLE(OBJINTRABLE* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_IntrService(IntrService* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_OBJTMR_fail_OBJENGSTATE; + status = __nvoc_ctor_OBJINTRABLE(&pThis->__nvoc_base_OBJINTRABLE, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_OBJTMR_fail_OBJINTRABLE; + status = __nvoc_ctor_IntrService(&pThis->__nvoc_base_IntrService, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_OBJTMR_fail_IntrService; + __nvoc_init_dataField_OBJTMR(pThis, pRmhalspecowner); + goto __nvoc_ctor_OBJTMR_exit; // Success + +__nvoc_ctor_OBJTMR_fail_IntrService: + __nvoc_dtor_OBJINTRABLE(&pThis->__nvoc_base_OBJINTRABLE); +__nvoc_ctor_OBJTMR_fail_OBJINTRABLE: + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); +__nvoc_ctor_OBJTMR_fail_OBJENGSTATE: +__nvoc_ctor_OBJTMR_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJTMR_1(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__tmrRegisterIntrService__ = &tmrRegisterIntrService_IMPL; + + pThis->__tmrClearInterrupt__ = &tmrClearInterrupt_IMPL; + + // Hal function -- tmrServiceInterrupt + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__tmrServiceInterrupt__ = &tmrServiceInterrupt_56cd7a; + } + else if (0) + { +#if 0 + if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__tmrServiceInterrupt__ = &tmrServiceInterrupt_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__tmrServiceInterrupt__ = &tmrServiceInterrupt_GA100; + } + else if (0) + { + } +#endif + } + + pThis->__tmrConstructEngine__ = &tmrConstructEngine_IMPL; + + pThis->__tmrStateInitLocked__ = &tmrStateInitLocked_IMPL; + + pThis->__tmrStateInitUnlocked__ = &tmrStateInitUnlocked_IMPL; + + pThis->__tmrStateLoad__ = &tmrStateLoad_IMPL; + + pThis->__tmrStateUnload__ = &tmrStateUnload_IMPL; + + pThis->__tmrStateDestroy__ = &tmrStateDestroy_IMPL; + + // Hal function -- tmrGetGpuPtimerOffset + if (0) + { + } + else if (0) + { + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x000003e0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 */ + { + pThis->__tmrGetGpuPtimerOffset__ = &tmrGetGpuPtimerOffset_TU102; + } + else if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000fc00UL) )) /* ChipHal: GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__tmrGetGpuPtimerOffset__ = &tmrGetGpuPtimerOffset_GA100; + } + else if (0) + { + } + else if (0) + { + } + + // Hal function -- tmrGetPhysicalIntrVectors + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__tmrGetPhysicalIntrVectors__ = &tmrGetPhysicalIntrVectors_46f6a7; + } + else if (0) + { +#if 0 + if (0) + { + } + // default + else + { + pThis->__tmrGetPhysicalIntrVectors__ = &tmrGetPhysicalIntrVectors_46f6a7; + } +#endif + } + + pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_OBJTMR_intrservRegisterIntrService; + + pThis->__nvoc_base_IntrService.__intrservClearInterrupt__ = &__nvoc_thunk_OBJTMR_intrservClearInterrupt; + + pThis->__nvoc_base_IntrService.__intrservServiceInterrupt__ = &__nvoc_thunk_OBJTMR_intrservServiceInterrupt; + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_OBJTMR_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_OBJTMR_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitUnlocked__ = &__nvoc_thunk_OBJTMR_engstateStateInitUnlocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_OBJTMR_engstateStateLoad; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_OBJTMR_engstateStateUnload; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_OBJTMR_engstateStateDestroy; + + pThis->__nvoc_base_OBJINTRABLE.__intrableGetPhysicalIntrVectors__ = &__nvoc_thunk_OBJTMR_intrableGetPhysicalIntrVectors; + + pThis->__tmrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrFreeTunableState; + + pThis->__tmrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrCompareTunableState; + + pThis->__tmrGetNotificationIntrVector__ = &__nvoc_thunk_OBJINTRABLE_tmrGetNotificationIntrVector; + + pThis->__tmrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_tmrIsPresent; + + pThis->__tmrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrReconcileTunableState; + + pThis->__tmrGetKernelIntrVectors__ = &__nvoc_thunk_OBJINTRABLE_tmrGetKernelIntrVectors; + + pThis->__tmrSetNotificationIntrVector__ = &__nvoc_thunk_OBJINTRABLE_tmrSetNotificationIntrVector; + + pThis->__tmrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreLoad; + + pThis->__tmrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePostUnload; + + pThis->__tmrStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreUnload; + + pThis->__tmrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrGetTunableState; + + pThis->__tmrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_tmrInitMissing; + + pThis->__tmrStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreInitLocked; + + pThis->__tmrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreInitUnlocked; + + pThis->__tmrServiceNotificationInterrupt__ = &__nvoc_thunk_IntrService_tmrServiceNotificationInterrupt; + + pThis->__tmrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePostLoad; + + pThis->__tmrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrAllocTunableState; + + pThis->__tmrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrSetTunableState; +} + +void __nvoc_init_funcTable_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_OBJTMR_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*, RmHalspecOwner* ); +void __nvoc_init_OBJINTRABLE(OBJINTRABLE*, RmHalspecOwner* ); +void __nvoc_init_IntrService(IntrService*, RmHalspecOwner* ); +void __nvoc_init_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_OBJTMR = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + pThis->__nvoc_pbase_OBJINTRABLE = &pThis->__nvoc_base_OBJINTRABLE; + pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE, pRmhalspecowner); + __nvoc_init_OBJINTRABLE(&pThis->__nvoc_base_OBJINTRABLE, pRmhalspecowner); + __nvoc_init_IntrService(&pThis->__nvoc_base_IntrService, pRmhalspecowner); + __nvoc_init_funcTable_OBJTMR(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_OBJTMR(OBJTMR **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJTMR *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(OBJTMR)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJTMR)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJTMR); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_OBJTMR(pThis, pRmhalspecowner); + status = __nvoc_ctor_OBJTMR(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_OBJTMR_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJTMR_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJTMR(OBJTMR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJTMR(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_objtmr_nvoc.h b/src/nvidia/generated/g_objtmr_nvoc.h new file mode 100644 index 000000000..5493ee175 --- /dev/null +++ b/src/nvidia/generated/g_objtmr_nvoc.h @@ -0,0 +1,1154 @@ +#ifndef _G_OBJTMR_NVOC_H_ +#define _G_OBJTMR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_objtmr_nvoc.h" + +#ifndef _OBJTMR_H_ +#define _OBJTMR_H_ + +/*! + * @file + * @brief Defines and structures used for the Tmr Engine Object. + */ + +/* ------------------------ Includes --------------------------------------- */ +#include "core/core.h" +#include "core/info_block.h" +#include "gpu/eng_state.h" +#include "gpu/gpu.h" +#include "tmr.h" +#include "lib/ref_count.h" +#include "os/os.h" +#include "nvoc/utility.h" +#include "kernel/gpu/intrable/intrable.h" +#include "kernel/gpu/intr/intr_service.h" + +/* ------------------------ Macros ----------------------------------------- */ +// +// Extent of the timer callback array +// +#define TMR_NUM_CALLBACKS_RM 96 +#define TMR_NUM_CALLBACKS_OS 36 + +// Callback scheduled without any explicit flags set. +#define TMR_FLAGS_NONE 0x00000000 +// Automatically reschedule the callback, so that it repeats. +// Otherwise, callback is scheduled for one-shot execution. +#define TMR_FLAG_RECUR NVBIT(0) +// Indicate that the implementation of the callback function will/can release +// a GPU semaphore. This allows fifoIdleChannels to query this information, +// and hence not bail out early if channels are blocked on semaphores that +// will in fact be released. + // !!NOTE: This is OBSOLETE, it should be moved directly to FIFO, where it's needed +#define TMR_FLAG_RELEASE_SEMAPHORE NVBIT(1) +#define TMR_FLAG_OS_TIMER_QUEUED NVBIT(2) + +#define TMR_GET_GPU(p) ENG_GET_GPU(p) + +/* ------------------------ Function Redefinitions ------------------------- */ +#define tmrEventScheduleRelSec(pTmr, pEvent, RelTimeSec) tmrEventScheduleRel(pTmr, pEvent, (NvU64)(RelTimeSec) * 1000000000 ) + +#define tmrGetInfoBlock(pTmr, pListHead, dataId) getInfoPtr(pListHead, dataId) +#define tmrAddInfoBlock(pTmr, ppListHead, dataId, size) addInfoPtr(ppListHead, dataId, size) +#define tmrDeleteInfoBlock(pTmr, ppListHead, dataId) deleteInfoPtr(ppListHead, dataId) +#define tmrTestInfoBlock(pTmr, pListHead, dataId) testInfoPtr(pListHead, dataId) + +/* ------------------------ Datatypes -------------------------------------- */ +TYPEDEF_BITVECTOR(MC_ENGINE_BITVECTOR); + +// +// Forward references for timer related structures +// +typedef struct DAYMSECTIME *PDAYMSECTIME; +typedef struct DAYMSECTIME DAYMSECTIME; + +// +// System time structure +// +struct DAYMSECTIME +{ + NvU32 days; + NvU32 msecs; + NvU32 valid; +}; + +/*! + * Callback wrapper memory type, used with interfacing all scheduling functions + * Reveals only partial representation of the event information. + * User Use only, internal code will not change them. + */ +struct TMR_EVENT +{ + TIMEPROC pTimeProc; //__nvoc_pbase_OBJTMR) + +#ifdef __nvoc_objtmr_h_disabled +#define __dynamicCast_OBJTMR(pThis) ((OBJTMR*)NULL) +#else //__nvoc_objtmr_h_disabled +#define __dynamicCast_OBJTMR(pThis) \ + ((OBJTMR*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJTMR))) +#endif //__nvoc_objtmr_h_disabled + +#define PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS +#define PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS +#define PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS +#define PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL_BASE_CAST +#define PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL_BASE_NAME PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL +#define PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS_BASE_CAST +#define PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS_BASE_NAME PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS +#define PDB_PROP_TMR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_TMR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE_BASE_CAST +#define PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE_BASE_NAME PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE + +NV_STATUS __nvoc_objCreateDynamic_OBJTMR(OBJTMR**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJTMR(OBJTMR**, Dynamic*, NvU32); +#define __objCreate_OBJTMR(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJTMR((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define tmrRegisterIntrService(pGpu, pTmr, pRecords) tmrRegisterIntrService_DISPATCH(pGpu, pTmr, pRecords) +#define tmrClearInterrupt(pGpu, pTmr, pParams) tmrClearInterrupt_DISPATCH(pGpu, pTmr, pParams) +#define tmrServiceInterrupt(pGpu, pTmr, pParams) tmrServiceInterrupt_DISPATCH(pGpu, pTmr, pParams) +#define tmrServiceInterrupt_HAL(pGpu, pTmr, pParams) tmrServiceInterrupt_DISPATCH(pGpu, pTmr, pParams) +#define tmrConstructEngine(pGpu, pTmr, arg0) tmrConstructEngine_DISPATCH(pGpu, pTmr, arg0) +#define tmrStateInitLocked(pGpu, pTmr) tmrStateInitLocked_DISPATCH(pGpu, pTmr) +#define tmrStateInitUnlocked(pGpu, pTmr) tmrStateInitUnlocked_DISPATCH(pGpu, pTmr) +#define tmrStateLoad(pGpu, pTmr, arg0) tmrStateLoad_DISPATCH(pGpu, pTmr, arg0) +#define tmrStateUnload(pGpu, pTmr, arg0) tmrStateUnload_DISPATCH(pGpu, pTmr, arg0) +#define tmrStateDestroy(pGpu, pTmr) tmrStateDestroy_DISPATCH(pGpu, pTmr) +#define tmrGetGpuPtimerOffset(pGpu, pTmr, arg0, arg1) tmrGetGpuPtimerOffset_DISPATCH(pGpu, pTmr, arg0, arg1) +#define tmrGetGpuPtimerOffset_HAL(pGpu, pTmr, arg0, arg1) tmrGetGpuPtimerOffset_DISPATCH(pGpu, pTmr, arg0, arg1) +#define tmrGetPhysicalIntrVectors(pGpu, pTmr, maxIntrs, pIntrs, pMcEngineIdxs, pCount) tmrGetPhysicalIntrVectors_DISPATCH(pGpu, pTmr, maxIntrs, pIntrs, pMcEngineIdxs, pCount) +#define tmrGetPhysicalIntrVectors_HAL(pGpu, pTmr, maxIntrs, pIntrs, pMcEngineIdxs, pCount) tmrGetPhysicalIntrVectors_DISPATCH(pGpu, pTmr, maxIntrs, pIntrs, pMcEngineIdxs, pCount) +#define tmrFreeTunableState(pGpu, pEngstate, pTunableState) tmrFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define tmrCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) tmrCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define tmrGetNotificationIntrVector(pGpu, pIntrable, pIntrVector) tmrGetNotificationIntrVector_DISPATCH(pGpu, pIntrable, pIntrVector) +#define tmrIsPresent(pGpu, pEngstate) tmrIsPresent_DISPATCH(pGpu, pEngstate) +#define tmrReconcileTunableState(pGpu, pEngstate, pTunableState) tmrReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define tmrGetKernelIntrVectors(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount) tmrGetKernelIntrVectors_DISPATCH(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount) +#define tmrSetNotificationIntrVector(pGpu, pIntrable, intrVector) tmrSetNotificationIntrVector_DISPATCH(pGpu, pIntrable, intrVector) +#define tmrStatePreLoad(pGpu, pEngstate, arg0) tmrStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define tmrStatePostUnload(pGpu, pEngstate, arg0) tmrStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define tmrStatePreUnload(pGpu, pEngstate, arg0) tmrStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define tmrGetTunableState(pGpu, pEngstate, pTunableState) tmrGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define tmrInitMissing(pGpu, pEngstate) tmrInitMissing_DISPATCH(pGpu, pEngstate) +#define tmrStatePreInitLocked(pGpu, pEngstate) tmrStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define tmrStatePreInitUnlocked(pGpu, pEngstate) tmrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define tmrServiceNotificationInterrupt(pGpu, pIntrService, pParams) tmrServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define tmrStatePostLoad(pGpu, pEngstate, arg0) tmrStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define tmrAllocTunableState(pGpu, pEngstate, ppTunableState) tmrAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define tmrSetTunableState(pGpu, pEngstate, pTunableState) tmrSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +NV_STATUS tmrGetCurrentTime_IMPL(struct OBJTMR *pTmr, NvU64 *pTime); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetCurrentTime(struct OBJTMR *pTmr, NvU64 *pTime) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCurrentTime(pTmr, pTime) tmrGetCurrentTime_IMPL(pTmr, pTime) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetCurrentTime_HAL(pTmr, pTime) tmrGetCurrentTime(pTmr, pTime) + +NV_STATUS tmrGetCurrentTimeEx_IMPL(struct OBJTMR *pTmr, NvU64 *pTime, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetCurrentTimeEx(struct OBJTMR *pTmr, NvU64 *pTime, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCurrentTimeEx(pTmr, pTime, arg0) tmrGetCurrentTimeEx_IMPL(pTmr, pTime, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetCurrentTimeEx_HAL(pTmr, pTime, arg0) tmrGetCurrentTimeEx(pTmr, pTime, arg0) + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrDelay(struct OBJTMR *pTmr, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrDelay(pTmr, arg0) tmrDelay_OSTIMER(pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrDelay_HAL(pTmr, arg0) tmrDelay(pTmr, arg0) + +NV_STATUS tmrSetCurrentTime_GV100(OBJGPU *pGpu, struct OBJTMR *pTmr); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCurrentTime(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCurrentTime(pGpu, pTmr) tmrSetCurrentTime_GV100(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCurrentTime_HAL(pGpu, pTmr) tmrSetCurrentTime(pGpu, pTmr) + +static inline NV_STATUS tmrSetAlarmIntrDisable_56cd7a(OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_OK; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarmIntrDisable(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarmIntrDisable(pGpu, pTmr) tmrSetAlarmIntrDisable_56cd7a(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarmIntrDisable_HAL(pGpu, pTmr) tmrSetAlarmIntrDisable(pGpu, pTmr) + +static inline NV_STATUS tmrSetAlarmIntrEnable_56cd7a(OBJGPU *pGpu, struct OBJTMR *pTmr) { + return NV_OK; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarmIntrEnable(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarmIntrEnable(pGpu, pTmr) tmrSetAlarmIntrEnable_56cd7a(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarmIntrEnable_HAL(pGpu, pTmr) tmrSetAlarmIntrEnable(pGpu, pTmr) + +static inline NV_STATUS tmrSetAlarmIntrReset_56cd7a(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + return NV_OK; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarmIntrReset(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarmIntrReset(pGpu, pTmr, arg0) tmrSetAlarmIntrReset_56cd7a(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarmIntrReset_HAL(pGpu, pTmr, arg0) tmrSetAlarmIntrReset(pGpu, pTmr, arg0) + +static inline NV_STATUS tmrGetIntrStatus_cb5ce8(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *pStatus, struct THREAD_STATE_NODE *arg0) { + *pStatus = 0; + return NV_OK; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetIntrStatus(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *pStatus, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetIntrStatus(pGpu, pTmr, pStatus, arg0) tmrGetIntrStatus_cb5ce8(pGpu, pTmr, pStatus, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetIntrStatus_HAL(pGpu, pTmr, pStatus, arg0) tmrGetIntrStatus(pGpu, pTmr, pStatus, arg0) + +static inline NvU32 tmrGetTimeLo_cf0499(OBJGPU *pGpu, struct OBJTMR *pTmr) { + return ((NvU32)(((NvU64)(osGetTimestamp())) & 4294967295U)); +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrGetTimeLo(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTimeLo(pGpu, pTmr) tmrGetTimeLo_cf0499(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTimeLo_HAL(pGpu, pTmr) tmrGetTimeLo(pGpu, pTmr) + +static inline NvU64 tmrGetTime_fa6bbe(OBJGPU *pGpu, struct OBJTMR *pTmr) { + return osGetTimestamp(); +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU64 tmrGetTime(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTime(pGpu, pTmr) tmrGetTime_fa6bbe(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTime_HAL(pGpu, pTmr) tmrGetTime(pGpu, pTmr) + +NvU64 tmrGetTimeEx_GM107(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU64 tmrGetTimeEx(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTimeEx(pGpu, pTmr, arg0) tmrGetTimeEx_GM107(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTimeEx_HAL(pGpu, pTmr, arg0) tmrGetTimeEx(pGpu, pTmr, arg0) + +NvU32 tmrReadTimeLoReg_TU102(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrReadTimeLoReg(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrReadTimeLoReg(pGpu, pTmr, arg0) tmrReadTimeLoReg_TU102(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrReadTimeLoReg_HAL(pGpu, pTmr, arg0) tmrReadTimeLoReg(pGpu, pTmr, arg0) + +NvU32 tmrReadTimeHiReg_TU102(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrReadTimeHiReg(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrReadTimeHiReg(pGpu, pTmr, arg0) tmrReadTimeHiReg_TU102(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrReadTimeHiReg_HAL(pGpu, pTmr, arg0) tmrReadTimeHiReg(pGpu, pTmr, arg0) + +static inline NV_STATUS tmrSetAlarm_56cd7a(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 alarm, struct THREAD_STATE_NODE *pThreadState) { + return NV_OK; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetAlarm(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 alarm, struct THREAD_STATE_NODE *pThreadState) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetAlarm(pGpu, pTmr, alarm, pThreadState) tmrSetAlarm_56cd7a(pGpu, pTmr, alarm, pThreadState) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetAlarm_HAL(pGpu, pTmr, alarm, pThreadState) tmrSetAlarm(pGpu, pTmr, alarm, pThreadState) + +static inline NvBool tmrGetAlarmPending_491d52(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrGetAlarmPending(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetAlarmPending(pGpu, pTmr, arg0) tmrGetAlarmPending_491d52(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetAlarmPending_HAL(pGpu, pTmr, arg0) tmrGetAlarmPending(pGpu, pTmr, arg0) + +NV_STATUS tmrSetCountdownIntrDisable_GM200(OBJGPU *pGpu, struct OBJTMR *pTmr); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdownIntrDisable(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownIntrDisable(pGpu, pTmr) tmrSetCountdownIntrDisable_GM200(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdownIntrDisable_HAL(pGpu, pTmr) tmrSetCountdownIntrDisable(pGpu, pTmr) + +NV_STATUS tmrSetCountdownIntrEnable_TU102(OBJGPU *pGpu, struct OBJTMR *pTmr); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdownIntrEnable(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownIntrEnable(pGpu, pTmr) tmrSetCountdownIntrEnable_TU102(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdownIntrEnable_HAL(pGpu, pTmr) tmrSetCountdownIntrEnable(pGpu, pTmr) + +NV_STATUS tmrSetCountdownIntrReset_TU102(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdownIntrReset(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownIntrReset(pGpu, pTmr, arg0) tmrSetCountdownIntrReset_TU102(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdownIntrReset_HAL(pGpu, pTmr, arg0) tmrSetCountdownIntrReset(pGpu, pTmr, arg0) + +static inline NvBool tmrGetCountdownPending_491d52(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + return ((NvBool)(0 != 0)); +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrGetCountdownPending(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCountdownPending(pGpu, pTmr, arg0) tmrGetCountdownPending_491d52(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetCountdownPending_HAL(pGpu, pTmr, arg0) tmrGetCountdownPending(pGpu, pTmr, arg0) + +NV_STATUS tmrSetCountdown_TU102(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrSetCountdown(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0, NvU32 arg1, struct THREAD_STATE_NODE *arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdown(pGpu, pTmr, arg0, arg1, arg2) tmrSetCountdown_TU102(pGpu, pTmr, arg0, arg1, arg2) +#endif //__nvoc_objtmr_h_disabled + +#define tmrSetCountdown_HAL(pGpu, pTmr, arg0, arg1, arg2) tmrSetCountdown(pGpu, pTmr, arg0, arg1, arg2) + +NV_STATUS tmrGetTimerBar0MapInfo_PTIMER(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg0, NvU32 *arg1); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetTimerBar0MapInfo(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg0, NvU32 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetTimerBar0MapInfo(pGpu, pTmr, arg0, arg1) tmrGetTimerBar0MapInfo_PTIMER(pGpu, pTmr, arg0, arg1) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr, arg0, arg1) tmrGetTimerBar0MapInfo(pGpu, pTmr, arg0, arg1) + +NV_STATUS tmrGrTickFreqChange_GM107(OBJGPU *pGpu, struct OBJTMR *pTmr, NvBool arg0); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGrTickFreqChange(OBJGPU *pGpu, struct OBJTMR *pTmr, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGrTickFreqChange(pGpu, pTmr, arg0) tmrGrTickFreqChange_GM107(pGpu, pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGrTickFreqChange_HAL(pGpu, pTmr, arg0) tmrGrTickFreqChange(pGpu, pTmr, arg0) + +static inline NvU32 tmrGetUtilsClkScaleFactor_4a4dee(OBJGPU *pGpu, struct OBJTMR *pTmr) { + return 0; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NvU32 tmrGetUtilsClkScaleFactor(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return 0; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetUtilsClkScaleFactor(pGpu, pTmr) tmrGetUtilsClkScaleFactor_4a4dee(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetUtilsClkScaleFactor_HAL(pGpu, pTmr) tmrGetUtilsClkScaleFactor(pGpu, pTmr) + +NV_STATUS tmrGetGpuAndCpuTimestampPair_GM107(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg0, NvU64 *arg1); + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetGpuAndCpuTimestampPair(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *arg0, NvU64 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetGpuAndCpuTimestampPair(pGpu, pTmr, arg0, arg1) tmrGetGpuAndCpuTimestampPair_GM107(pGpu, pTmr, arg0, arg1) +#endif //__nvoc_objtmr_h_disabled + +#define tmrGetGpuAndCpuTimestampPair_HAL(pGpu, pTmr, arg0, arg1) tmrGetGpuAndCpuTimestampPair(pGpu, pTmr, arg0, arg1) + +static inline void tmrResetTimerRegistersForVF_b3696a(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 gfid) { + return; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrResetTimerRegistersForVF(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 gfid) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrResetTimerRegistersForVF(pGpu, pTmr, gfid) tmrResetTimerRegistersForVF_b3696a(pGpu, pTmr, gfid) +#endif //__nvoc_objtmr_h_disabled + +#define tmrResetTimerRegistersForVF_HAL(pGpu, pTmr, gfid) tmrResetTimerRegistersForVF(pGpu, pTmr, gfid) + +static inline NV_STATUS tmrEventCreateOSTimer_46f6a7(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventCreateOSTimer(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCreateOSTimer(pTmr, pEvent) tmrEventCreateOSTimer_46f6a7(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventCreateOSTimer_HAL(pTmr, pEvent) tmrEventCreateOSTimer(pTmr, pEvent) + +static inline NV_STATUS tmrEventScheduleAbsOSTimer_46f6a7(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeAbs) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventScheduleAbsOSTimer(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeAbs) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventScheduleAbsOSTimer(pTmr, pEvent, timeAbs) tmrEventScheduleAbsOSTimer_46f6a7(pTmr, pEvent, timeAbs) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventScheduleAbsOSTimer_HAL(pTmr, pEvent, timeAbs) tmrEventScheduleAbsOSTimer(pTmr, pEvent, timeAbs) + +static inline NV_STATUS tmrEventServiceOSTimerCallback_46f6a7(OBJGPU *pGpu, struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventServiceOSTimerCallback(OBJGPU *pGpu, struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventServiceOSTimerCallback(pGpu, pTmr, pEvent) tmrEventServiceOSTimerCallback_46f6a7(pGpu, pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventServiceOSTimerCallback_HAL(pGpu, pTmr, pEvent) tmrEventServiceOSTimerCallback(pGpu, pTmr, pEvent) + +static inline NV_STATUS tmrEventCancelOSTimer_46f6a7(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventCancelOSTimer(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCancelOSTimer(pTmr, pEvent) tmrEventCancelOSTimer_46f6a7(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventCancelOSTimer_HAL(pTmr, pEvent) tmrEventCancelOSTimer(pTmr, pEvent) + +static inline NV_STATUS tmrEventDestroyOSTimer_46f6a7(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventDestroyOSTimer(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventDestroyOSTimer(pTmr, pEvent) tmrEventDestroyOSTimer_46f6a7(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +#define tmrEventDestroyOSTimer_HAL(pTmr, pEvent) tmrEventDestroyOSTimer(pTmr, pEvent) + +void tmrRegisterIntrService_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr, IntrServiceRecord pRecords[155]); + +static inline void tmrRegisterIntrService_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr, IntrServiceRecord pRecords[155]) { + pTmr->__tmrRegisterIntrService__(pGpu, pTmr, pRecords); +} + +NvBool tmrClearInterrupt_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr, IntrServiceClearInterruptArguments *pParams); + +static inline NvBool tmrClearInterrupt_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr, IntrServiceClearInterruptArguments *pParams) { + return pTmr->__tmrClearInterrupt__(pGpu, pTmr, pParams); +} + +static inline NvU32 tmrServiceInterrupt_56cd7a(OBJGPU *pGpu, struct OBJTMR *pTmr, IntrServiceServiceInterruptArguments *pParams) { + return NV_OK; +} + +NvU32 tmrServiceInterrupt_TU102(OBJGPU *pGpu, struct OBJTMR *pTmr, IntrServiceServiceInterruptArguments *pParams); + +NvU32 tmrServiceInterrupt_GA100(OBJGPU *pGpu, struct OBJTMR *pTmr, IntrServiceServiceInterruptArguments *pParams); + +static inline NvU32 tmrServiceInterrupt_46f6a7(OBJGPU *pGpu, struct OBJTMR *pTmr, IntrServiceServiceInterruptArguments *pParams) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NvU32 tmrServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr, IntrServiceServiceInterruptArguments *pParams) { + return pTmr->__tmrServiceInterrupt__(pGpu, pTmr, pParams); +} + +NV_STATUS tmrConstructEngine_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr, ENGDESCRIPTOR arg0); + +static inline NV_STATUS tmrConstructEngine_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr, ENGDESCRIPTOR arg0) { + return pTmr->__tmrConstructEngine__(pGpu, pTmr, arg0); +} + +NV_STATUS tmrStateInitLocked_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr); + +static inline NV_STATUS tmrStateInitLocked_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr) { + return pTmr->__tmrStateInitLocked__(pGpu, pTmr); +} + +NV_STATUS tmrStateInitUnlocked_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr); + +static inline NV_STATUS tmrStateInitUnlocked_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr) { + return pTmr->__tmrStateInitUnlocked__(pGpu, pTmr); +} + +NV_STATUS tmrStateLoad_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0); + +static inline NV_STATUS tmrStateLoad_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0) { + return pTmr->__tmrStateLoad__(pGpu, pTmr, arg0); +} + +NV_STATUS tmrStateUnload_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0); + +static inline NV_STATUS tmrStateUnload_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 arg0) { + return pTmr->__tmrStateUnload__(pGpu, pTmr, arg0); +} + +void tmrStateDestroy_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr); + +static inline void tmrStateDestroy_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr) { + pTmr->__tmrStateDestroy__(pGpu, pTmr); +} + +NV_STATUS tmrGetGpuPtimerOffset_TU102(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *arg0, NvU32 *arg1); + +NV_STATUS tmrGetGpuPtimerOffset_GA100(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *arg0, NvU32 *arg1); + +static inline NV_STATUS tmrGetGpuPtimerOffset_46f6a7(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *arg0, NvU32 *arg1) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS tmrGetGpuPtimerOffset_5baef9(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *arg0, NvU32 *arg1) { + NV_ASSERT_OR_RETURN_PRECOMP(0, NV_ERR_NOT_SUPPORTED); +} + +static inline NV_STATUS tmrGetGpuPtimerOffset_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 *arg0, NvU32 *arg1) { + return pTmr->__tmrGetGpuPtimerOffset__(pGpu, pTmr, arg0, arg1); +} + +static inline NV_STATUS tmrGetPhysicalIntrVectors_46f6a7(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS tmrGetPhysicalIntrVectors_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return pTmr->__tmrGetPhysicalIntrVectors__(pGpu, pTmr, maxIntrs, pIntrs, pMcEngineIdxs, pCount); +} + +static inline void tmrFreeTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + pEngstate->__tmrFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS tmrCompareTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__tmrCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline NV_STATUS tmrGetNotificationIntrVector_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pIntrable, NvU32 *pIntrVector) { + return pIntrable->__tmrGetNotificationIntrVector__(pGpu, pIntrable, pIntrVector); +} + +static inline NvBool tmrIsPresent_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return pEngstate->__tmrIsPresent__(pGpu, pEngstate); +} + +static inline NV_STATUS tmrReconcileTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return pEngstate->__tmrReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS tmrGetKernelIntrVectors_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pIntrable, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return pIntrable->__tmrGetKernelIntrVectors__(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount); +} + +static inline NV_STATUS tmrSetNotificationIntrVector_DISPATCH(struct OBJGPU *pGpu, struct OBJTMR *pIntrable, NvU32 intrVector) { + return pIntrable->__tmrSetNotificationIntrVector__(pGpu, pIntrable, intrVector); +} + +static inline NV_STATUS tmrStatePreLoad_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return pEngstate->__tmrStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS tmrStatePostUnload_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return pEngstate->__tmrStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS tmrStatePreUnload_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return pEngstate->__tmrStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS tmrGetTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return pEngstate->__tmrGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline void tmrInitMissing_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate) { + pEngstate->__tmrInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS tmrStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return pEngstate->__tmrStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS tmrStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate) { + return pEngstate->__tmrStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS tmrServiceNotificationInterrupt_DISPATCH(OBJGPU *pGpu, struct OBJTMR *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return pIntrService->__tmrServiceNotificationInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS tmrStatePostLoad_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) { + return pEngstate->__tmrStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS tmrAllocTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void **ppTunableState) { + return pEngstate->__tmrAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS tmrSetTunableState_DISPATCH(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) { + return pEngstate->__tmrSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool tmrServiceSwrlCallbacksPmcTree(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + return ((NvBool)(0 != 0)); +} + +static inline NvBool tmrClearSwrlCallbacksSemaphore(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + return ((NvBool)(0 != 0)); +} + +static inline void tmrServiceSwrlCallbacks(OBJGPU *pGpu, struct OBJTMR *pTmr, struct THREAD_STATE_NODE *arg0) { + return; +} + +static inline NvBool tmrServiceSwrlWrapper(OBJGPU *pGpu, struct OBJTMR *pTmr, MC_ENGINE_BITVECTOR *arg0, struct THREAD_STATE_NODE *arg1) { + return ((NvBool)(0 != 0)); +} + +void tmrDestruct_IMPL(struct OBJTMR *pTmr); +#define __nvoc_tmrDestruct(pTmr) tmrDestruct_IMPL(pTmr) +NV_STATUS tmrEventCreate_IMPL(struct OBJTMR *pTmr, PTMR_EVENT *ppEvent, TIMEPROC callbackFn, void *pUserData, NvU32 flags); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventCreate(struct OBJTMR *pTmr, PTMR_EVENT *ppEvent, TIMEPROC callbackFn, void *pUserData, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCreate(pTmr, ppEvent, callbackFn, pUserData, flags) tmrEventCreate_IMPL(pTmr, ppEvent, callbackFn, pUserData, flags) +#endif //__nvoc_objtmr_h_disabled + +void tmrEventCancel_IMPL(struct OBJTMR *pTmr, PTMR_EVENT pEvent); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrEventCancel(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventCancel(pTmr, pEvent) tmrEventCancel_IMPL(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +void tmrEventDestroy_IMPL(struct OBJTMR *pTmr, PTMR_EVENT pEvent); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrEventDestroy(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventDestroy(pTmr, pEvent) tmrEventDestroy_IMPL(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +void tmrInitCallbacks_IMPL(struct OBJTMR *pTmr); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrInitCallbacks(struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrInitCallbacks(pTmr) tmrInitCallbacks_IMPL(pTmr) +#endif //__nvoc_objtmr_h_disabled + +void tmrSetCountdownCallback_IMPL(struct OBJTMR *pTmr, TIMEPROC_COUNTDOWN arg0); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrSetCountdownCallback(struct OBJTMR *pTmr, TIMEPROC_COUNTDOWN arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrSetCountdownCallback(pTmr, arg0) tmrSetCountdownCallback_IMPL(pTmr, arg0) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrCancelCallback_IMPL(struct OBJTMR *pTmr, void *pObject); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrCancelCallback(struct OBJTMR *pTmr, void *pObject) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCancelCallback(pTmr, pObject) tmrCancelCallback_IMPL(pTmr, pObject) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrGetCurrentDiffTime_IMPL(struct OBJTMR *pTmr, NvU64 arg0, NvU64 *arg1); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrGetCurrentDiffTime(struct OBJTMR *pTmr, NvU64 arg0, NvU64 *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCurrentDiffTime(pTmr, arg0, arg1) tmrGetCurrentDiffTime_IMPL(pTmr, arg0, arg1) +#endif //__nvoc_objtmr_h_disabled + +void tmrGetSystemTime_IMPL(struct OBJTMR *pTmr, PDAYMSECTIME pTime); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrGetSystemTime(struct OBJTMR *pTmr, PDAYMSECTIME pTime) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetSystemTime(pTmr, pTime) tmrGetSystemTime_IMPL(pTmr, pTime) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrCheckCallbacksReleaseSem_IMPL(struct OBJTMR *pTmr, NvU32 chId); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrCheckCallbacksReleaseSem(struct OBJTMR *pTmr, NvU32 chId) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCheckCallbacksReleaseSem(pTmr, chId) tmrCheckCallbacksReleaseSem_IMPL(pTmr, chId) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrDiffExceedsTime_IMPL(struct OBJTMR *pTmr, PDAYMSECTIME pFutureTime, PDAYMSECTIME pPastTime, NvU32 time); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrDiffExceedsTime(struct OBJTMR *pTmr, PDAYMSECTIME pFutureTime, PDAYMSECTIME pPastTime, NvU32 time) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrDiffExceedsTime(pTmr, pFutureTime, pPastTime, time) tmrDiffExceedsTime_IMPL(pTmr, pFutureTime, pPastTime, time) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrEventScheduleAbs_IMPL(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeAbs); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventScheduleAbs(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeAbs) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventScheduleAbs(pTmr, pEvent, timeAbs) tmrEventScheduleAbs_IMPL(pTmr, pEvent, timeAbs) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrScheduleCallbackAbs_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU64 arg2, NvU32 arg3, NvU32 arg4); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrScheduleCallbackAbs(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU64 arg2, NvU32 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrScheduleCallbackAbs(pTmr, arg0, arg1, arg2, arg3, arg4) tmrScheduleCallbackAbs_IMPL(pTmr, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrEventScheduleRel_IMPL(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeRel); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrEventScheduleRel(struct OBJTMR *pTmr, PTMR_EVENT pEvent, NvU64 timeRel) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventScheduleRel(pTmr, pEvent, timeRel) tmrEventScheduleRel_IMPL(pTmr, pEvent, timeRel) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrScheduleCallbackRel_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU64 arg2, NvU32 arg3, NvU32 arg4); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrScheduleCallbackRel(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU64 arg2, NvU32 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrScheduleCallbackRel(pTmr, arg0, arg1, arg2, arg3, arg4) tmrScheduleCallbackRel_IMPL(pTmr, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrScheduleCallbackRelSec_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU32 arg2, NvU32 arg3, NvU32 arg4); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrScheduleCallbackRelSec(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1, NvU32 arg2, NvU32 arg3, NvU32 arg4) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrScheduleCallbackRelSec(pTmr, arg0, arg1, arg2, arg3, arg4) tmrScheduleCallbackRelSec_IMPL(pTmr, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrEventOnList_IMPL(struct OBJTMR *pTmr, PTMR_EVENT pEvent); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrEventOnList(struct OBJTMR *pTmr, PTMR_EVENT pEvent) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrEventOnList(pTmr, pEvent) tmrEventOnList_IMPL(pTmr, pEvent) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrCallbackOnList_IMPL(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrCallbackOnList(struct OBJTMR *pTmr, TIMEPROC_OBSOLETE arg0, void *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCallbackOnList(pTmr, arg0, arg1) tmrCallbackOnList_IMPL(pTmr, arg0, arg1) +#endif //__nvoc_objtmr_h_disabled + +void tmrRmCallbackIntrEnable_IMPL(struct OBJTMR *pTmr, OBJGPU *pGpu); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrRmCallbackIntrEnable(struct OBJTMR *pTmr, OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrRmCallbackIntrEnable(pTmr, pGpu) tmrRmCallbackIntrEnable_IMPL(pTmr, pGpu) +#endif //__nvoc_objtmr_h_disabled + +void tmrRmCallbackIntrDisable_IMPL(struct OBJTMR *pTmr, OBJGPU *pGpu); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrRmCallbackIntrDisable(struct OBJTMR *pTmr, OBJGPU *pGpu) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrRmCallbackIntrDisable(pTmr, pGpu) tmrRmCallbackIntrDisable_IMPL(pTmr, pGpu) +#endif //__nvoc_objtmr_h_disabled + +NV_STATUS tmrTimeUntilNextCallback_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *pTimeUntilCallbackNs); +#ifdef __nvoc_objtmr_h_disabled +static inline NV_STATUS tmrTimeUntilNextCallback(OBJGPU *pGpu, struct OBJTMR *pTmr, NvU64 *pTimeUntilCallbackNs) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_objtmr_h_disabled +#define tmrTimeUntilNextCallback(pGpu, pTmr, pTimeUntilCallbackNs) tmrTimeUntilNextCallback_IMPL(pGpu, pTmr, pTimeUntilCallbackNs) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrCallExpiredCallbacks_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrCallExpiredCallbacks(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrCallExpiredCallbacks(pGpu, pTmr) tmrCallExpiredCallbacks_IMPL(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +void tmrResetCallbackInterrupt_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr); +#ifdef __nvoc_objtmr_h_disabled +static inline void tmrResetCallbackInterrupt(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); +} +#else //__nvoc_objtmr_h_disabled +#define tmrResetCallbackInterrupt(pGpu, pTmr) tmrResetCallbackInterrupt_IMPL(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +NvBool tmrGetCallbackInterruptPending_IMPL(OBJGPU *pGpu, struct OBJTMR *pTmr); +#ifdef __nvoc_objtmr_h_disabled +static inline NvBool tmrGetCallbackInterruptPending(OBJGPU *pGpu, struct OBJTMR *pTmr) { + NV_ASSERT_FAILED_PRECOMP("OBJTMR was disabled!"); + return NV_FALSE; +} +#else //__nvoc_objtmr_h_disabled +#define tmrGetCallbackInterruptPending(pGpu, pTmr) tmrGetCallbackInterruptPending_IMPL(pGpu, pTmr) +#endif //__nvoc_objtmr_h_disabled + +#undef PRIVATE_FIELD + + +NV_STATUS tmrCtrlCmdEventCreate(OBJGPU *pGpu, TMR_EVENT_SET_PARAMS *pParams); +NV_STATUS tmrCtrlCmdEventSchedule(OBJGPU *pGpu, TMR_EVENT_SCHEDULE_PARAMS *pParams); +NV_STATUS tmrCtrlCmdEventCancel(OBJGPU *pGpu, TMR_EVENT_GENERAL_PARAMS *pParams); +NV_STATUS tmrCtrlCmdEventDestroy(OBJGPU *pGpu, TMR_EVENT_GENERAL_PARAMS *pParams); + +NV_STATUS tmrDelay_OSTIMER(struct OBJTMR *pTmr, NvU32 nsec); + +#endif // _OBJTMR_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_OBJTMR_NVOC_H_ diff --git a/src/nvidia/generated/g_odb.h b/src/nvidia/generated/g_odb.h new file mode 100644 index 000000000..28aa96fce --- /dev/null +++ b/src/nvidia/generated/g_odb.h @@ -0,0 +1,133 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_odb.h +// + +#ifndef _G_ODB_H_ +#define _G_ODB_H_ + +#define OBJECT_BASE_DEFINITION(ENG) __##ENG##_OBJECT_BASE_DEFINITION + +#ifndef __NVOC_CLASS_OBJGPIO_TYPEDEF__ +#define __NVOC_CLASS_OBJGPIO_TYPEDEF__ +typedef struct OBJGPIO OBJGPIO; +#endif /* __NVOC_CLASS_OBJGPIO_TYPEDEF__ */ +typedef struct OBJGPIO *POBJGPIO; + +#ifndef __NVOC_CLASS_OBJOS_TYPEDEF__ +#define __NVOC_CLASS_OBJOS_TYPEDEF__ +typedef struct OBJOS OBJOS; +#endif /* __NVOC_CLASS_OBJOS_TYPEDEF__ */ +typedef struct OBJOS *POBJOS; + +#ifndef __NVOC_CLASS_OBJRPC_TYPEDEF__ +#define __NVOC_CLASS_OBJRPC_TYPEDEF__ +typedef struct OBJRPC OBJRPC; +#endif /* __NVOC_CLASS_OBJRPC_TYPEDEF__ */ +typedef struct OBJRPC *POBJRPC; + +#ifndef __NVOC_CLASS_OBJRPCSTRUCTURECOPY_TYPEDEF__ +#define __NVOC_CLASS_OBJRPCSTRUCTURECOPY_TYPEDEF__ +typedef struct OBJRPCSTRUCTURECOPY OBJRPCSTRUCTURECOPY; +#endif /* __NVOC_CLASS_OBJRPCSTRUCTURECOPY_TYPEDEF__ */ +typedef struct OBJRPCSTRUCTURECOPY *POBJRPCSTRUCTURECOPY; + + + +#if NV_PRINTF_STRINGS_ALLOWED +#define odbGetClassName(p) (objGetClassInfo((p))->name) +#endif + +// TODO : temporary hack, to delete +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ +typedef struct OBJGPU *POBJGPU; + +#ifndef __NVOC_CLASS_OBJDISP_TYPEDEF__ +#define __NVOC_CLASS_OBJDISP_TYPEDEF__ +typedef struct OBJDISP OBJDISP; +#endif /* __NVOC_CLASS_OBJDISP_TYPEDEF__ */ +typedef struct OBJDISP *POBJDISP; + +// +// #define staticCast(pObj, TYPE) ((pObj)? __staticCast_##TYPE((pObj)) : NULL) +// +#define __staticCast_OBJGPIO(pObj) ((pObj)->__iom_pbase_OBJGPIO) +#define __staticCast_OBJOS(pObj) ((pObj)->__iom_pbase_OBJOS) +#define __staticCast_OBJRPC(pObj) ((pObj)->__iom_pbase_OBJRPC) +#define __staticCast_OBJRPCSTRUCTURECOPY(pObj) ((pObj)->__iom_pbase_OBJRPCSTRUCTURECOPY) + + +// +// #define dynamicCast(pObj, TYPE) (__dynamicCast_##TYPE((pObj))) +// +#define __dynamicCast_OBJGPIO(pObj) NULL +#define __dynamicCast_OBJOS(pObj) ((POBJOS)__nvoc_dynamicCast(staticCast((pObj), Dynamic), classInfo(OBJOS))) +#define __dynamicCast_OBJRPC(pObj) ((POBJRPC)__nvoc_dynamicCast(staticCast((pObj), Dynamic), classInfo(OBJRPC))) +#define __dynamicCast_OBJRPCSTRUCTURECOPY(pObj) NULL + + + +#define PDB_PROP_GPIO_DCB_ENTRIES_PARSED_BASE_CAST +#define PDB_PROP_GPIO_DCB_ENTRIES_PARSED_BASE_NAME pdb.PDB_PROP_GPIO_DCB_ENTRIES_PARSED + +#define PDB_PROP_GPIO_ENTRY_ORIGIN_DCB_GAT_BASE_CAST +#define PDB_PROP_GPIO_ENTRY_ORIGIN_DCB_GAT_BASE_NAME pdb.PDB_PROP_GPIO_ENTRY_ORIGIN_DCB_GAT + +#define PDB_PROP_GPIO_EPC_HWSLOW_FC7E081B_BASE_CAST +#define PDB_PROP_GPIO_EPC_HWSLOW_FC7E081B_BASE_NAME pdb.PDB_PROP_GPIO_EPC_HWSLOW_FC7E081B + +#define PDB_PROP_GPIO_FORCE_FAST_LVDS_MUX_SWITCH_BASE_CAST +#define PDB_PROP_GPIO_FORCE_FAST_LVDS_MUX_SWITCH_BASE_NAME pdb.PDB_PROP_GPIO_FORCE_FAST_LVDS_MUX_SWITCH + +#define PDB_PROP_GPIO_IS_MISSING_BASE_CAST __nvoc_pbase_OBJENGSTATE-> +#define PDB_PROP_GPIO_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +#define PDB_PROP_GPIO_RM_PMU_GPIO_SYNC_ENABLED_DEF_BASE_CAST +#define PDB_PROP_GPIO_RM_PMU_GPIO_SYNC_ENABLED_DEF_BASE_NAME pdb.PDB_PROP_GPIO_RM_PMU_GPIO_SYNC_ENABLED_DEF + +#define PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE_BASE_CAST +#define PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE_BASE_NAME pdb.PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE + +#define PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS_BASE_CAST +#define PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS_BASE_NAME pdb.PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS + +#define PDB_PROP_OS_GET_ACPI_TABLE_FROM_UEFI_BASE_CAST +#define PDB_PROP_OS_GET_ACPI_TABLE_FROM_UEFI_BASE_NAME pdb.PDB_PROP_OS_GET_ACPI_TABLE_FROM_UEFI + +#define PDB_PROP_OS_LIMIT_GPU_RESET_BASE_CAST +#define PDB_PROP_OS_LIMIT_GPU_RESET_BASE_NAME pdb.PDB_PROP_OS_LIMIT_GPU_RESET + +#define PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS_BASE_CAST +#define PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS_BASE_NAME pdb.PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS + +#define PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT_BASE_CAST +#define PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT_BASE_NAME pdb.PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT + +#define PDB_PROP_OS_PAT_UNSUPPORTED_BASE_CAST +#define PDB_PROP_OS_PAT_UNSUPPORTED_BASE_NAME pdb.PDB_PROP_OS_PAT_UNSUPPORTED + +#define PDB_PROP_OS_SLI_ALLOWED_BASE_CAST +#define PDB_PROP_OS_SLI_ALLOWED_BASE_NAME pdb.PDB_PROP_OS_SLI_ALLOWED + +#define PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER_BASE_CAST +#define PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER_BASE_NAME pdb.PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER + +#define PDB_PROP_OS_SUPPORTS_TDR_BASE_CAST +#define PDB_PROP_OS_SUPPORTS_TDR_BASE_NAME pdb.PDB_PROP_OS_SUPPORTS_TDR + +#define PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED_BASE_CAST +#define PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED_BASE_NAME pdb.PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED + +#define PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED_BASE_CAST +#define PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED_BASE_NAME pdb.PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED + +#define PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM_BASE_CAST +#define PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM_BASE_NAME pdb.PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM + + + +#endif // _G_ODB_H_ diff --git a/src/nvidia/generated/g_os_desc_mem_nvoc.c b/src/nvidia/generated/g_os_desc_mem_nvoc.c new file mode 100644 index 000000000..c7e904759 --- /dev/null +++ b/src/nvidia/generated/g_os_desc_mem_nvoc.c @@ -0,0 +1,323 @@ +#define NVOC_OS_DESC_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_os_desc_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb3dacd = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_OsDescMemory(OsDescMemory*); +void __nvoc_init_funcTable_OsDescMemory(OsDescMemory*); +NV_STATUS __nvoc_ctor_OsDescMemory(OsDescMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_OsDescMemory(OsDescMemory*); +void __nvoc_dtor_OsDescMemory(OsDescMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OsDescMemory; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_OsDescMemory = { + /*pClassDef=*/ &__nvoc_class_def_OsDescMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OsDescMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OsDescMemory = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_OsDescMemory_OsDescMemory, + &__nvoc_rtti_OsDescMemory_Memory, + &__nvoc_rtti_OsDescMemory_RmResource, + &__nvoc_rtti_OsDescMemory_RmResourceCommon, + &__nvoc_rtti_OsDescMemory_RsResource, + &__nvoc_rtti_OsDescMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OsDescMemory), + /*classId=*/ classId(OsDescMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OsDescMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OsDescMemory, + /*pCastInfo=*/ &__nvoc_castinfo_OsDescMemory, + /*pExportInfo=*/ &__nvoc_export_info_OsDescMemory +}; + +static NvBool __nvoc_thunk_OsDescMemory_resCanCopy(struct RsResource *pOsDescMemory) { + return osdescCanCopy((struct OsDescMemory *)(((unsigned char *)pOsDescMemory) - __nvoc_rtti_OsDescMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescCheckMemInterUnmap(struct OsDescMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescControl(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescUnmap(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescGetMemInterMapParams(struct OsDescMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescGetMemoryMappingDescriptor(struct OsDescMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescGetMapAddrSpace(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_osdescShareCallback(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_osdescControlFilter(struct OsDescMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_osdescAddAdditionalDependants(struct RsClient *pClient, struct OsDescMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_osdescGetRefCount(struct OsDescMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_osdescMapTo(struct OsDescMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_osdescControl_Prologue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescIsReady(struct OsDescMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescCheckCopyPermissions(struct OsDescMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_osdescPreDestruct(struct OsDescMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_osdescUnmapFrom(struct OsDescMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_osdescControl_Epilogue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_osdescControlLookup(struct OsDescMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_osdescMap(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_osdescAccessCallback(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OsDescMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_OsDescMemory(OsDescMemory *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OsDescMemory(OsDescMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_OsDescMemory(OsDescMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_OsDescMemory_fail_Memory; + __nvoc_init_dataField_OsDescMemory(pThis); + + status = __nvoc_osdescConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_OsDescMemory_fail__init; + goto __nvoc_ctor_OsDescMemory_exit; // Success + +__nvoc_ctor_OsDescMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_OsDescMemory_fail_Memory: +__nvoc_ctor_OsDescMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_OsDescMemory_1(OsDescMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__osdescCanCopy__ = &osdescCanCopy_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_OsDescMemory_resCanCopy; + + pThis->__osdescCheckMemInterUnmap__ = &__nvoc_thunk_Memory_osdescCheckMemInterUnmap; + + pThis->__osdescControl__ = &__nvoc_thunk_Memory_osdescControl; + + pThis->__osdescUnmap__ = &__nvoc_thunk_Memory_osdescUnmap; + + pThis->__osdescGetMemInterMapParams__ = &__nvoc_thunk_Memory_osdescGetMemInterMapParams; + + pThis->__osdescGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_osdescGetMemoryMappingDescriptor; + + pThis->__osdescGetMapAddrSpace__ = &__nvoc_thunk_Memory_osdescGetMapAddrSpace; + + pThis->__osdescShareCallback__ = &__nvoc_thunk_RmResource_osdescShareCallback; + + pThis->__osdescControlFilter__ = &__nvoc_thunk_RsResource_osdescControlFilter; + + pThis->__osdescAddAdditionalDependants__ = &__nvoc_thunk_RsResource_osdescAddAdditionalDependants; + + pThis->__osdescGetRefCount__ = &__nvoc_thunk_RsResource_osdescGetRefCount; + + pThis->__osdescMapTo__ = &__nvoc_thunk_RsResource_osdescMapTo; + + pThis->__osdescControl_Prologue__ = &__nvoc_thunk_RmResource_osdescControl_Prologue; + + pThis->__osdescIsReady__ = &__nvoc_thunk_Memory_osdescIsReady; + + pThis->__osdescCheckCopyPermissions__ = &__nvoc_thunk_Memory_osdescCheckCopyPermissions; + + pThis->__osdescPreDestruct__ = &__nvoc_thunk_RsResource_osdescPreDestruct; + + pThis->__osdescUnmapFrom__ = &__nvoc_thunk_RsResource_osdescUnmapFrom; + + pThis->__osdescControl_Epilogue__ = &__nvoc_thunk_RmResource_osdescControl_Epilogue; + + pThis->__osdescControlLookup__ = &__nvoc_thunk_RsResource_osdescControlLookup; + + pThis->__osdescMap__ = &__nvoc_thunk_Memory_osdescMap; + + pThis->__osdescAccessCallback__ = &__nvoc_thunk_RmResource_osdescAccessCallback; +} + +void __nvoc_init_funcTable_OsDescMemory(OsDescMemory *pThis) { + __nvoc_init_funcTable_OsDescMemory_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_OsDescMemory(OsDescMemory *pThis) { + pThis->__nvoc_pbase_OsDescMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_OsDescMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_OsDescMemory(OsDescMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + OsDescMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(OsDescMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OsDescMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OsDescMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OsDescMemory(pThis); + status = __nvoc_ctor_OsDescMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_OsDescMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OsDescMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OsDescMemory(OsDescMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_OsDescMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_os_desc_mem_nvoc.h b/src/nvidia/generated/g_os_desc_mem_nvoc.h new file mode 100644 index 000000000..863bdba1f --- /dev/null +++ b/src/nvidia/generated/g_os_desc_mem_nvoc.h @@ -0,0 +1,224 @@ +#ifndef _G_OS_DESC_MEM_NVOC_H_ +#define _G_OS_DESC_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_os_desc_mem_nvoc.h" + +#ifndef _OS_DESC_MEMORY_H_ +#define _OS_DESC_MEMORY_H_ + +#include "mem_mgr/mem.h" + +/*! + * Bind memory allocated through os descriptor + */ +#ifdef NVOC_OS_DESC_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OsDescMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct OsDescMemory *__nvoc_pbase_OsDescMemory; + NvBool (*__osdescCanCopy__)(struct OsDescMemory *); + NV_STATUS (*__osdescCheckMemInterUnmap__)(struct OsDescMemory *, NvBool); + NV_STATUS (*__osdescControl__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__osdescUnmap__)(struct OsDescMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__osdescGetMemInterMapParams__)(struct OsDescMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__osdescGetMemoryMappingDescriptor__)(struct OsDescMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__osdescGetMapAddrSpace__)(struct OsDescMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__osdescShareCallback__)(struct OsDescMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__osdescControlFilter__)(struct OsDescMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__osdescAddAdditionalDependants__)(struct RsClient *, struct OsDescMemory *, RsResourceRef *); + NvU32 (*__osdescGetRefCount__)(struct OsDescMemory *); + NV_STATUS (*__osdescMapTo__)(struct OsDescMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__osdescControl_Prologue__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__osdescIsReady__)(struct OsDescMemory *); + NV_STATUS (*__osdescCheckCopyPermissions__)(struct OsDescMemory *, struct OBJGPU *, NvHandle); + void (*__osdescPreDestruct__)(struct OsDescMemory *); + NV_STATUS (*__osdescUnmapFrom__)(struct OsDescMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__osdescControl_Epilogue__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__osdescControlLookup__)(struct OsDescMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__osdescMap__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__osdescAccessCallback__)(struct OsDescMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_OsDescMemory_TYPEDEF__ +#define __NVOC_CLASS_OsDescMemory_TYPEDEF__ +typedef struct OsDescMemory OsDescMemory; +#endif /* __NVOC_CLASS_OsDescMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OsDescMemory +#define __nvoc_class_id_OsDescMemory 0xb3dacd +#endif /* __nvoc_class_id_OsDescMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory; + +#define __staticCast_OsDescMemory(pThis) \ + ((pThis)->__nvoc_pbase_OsDescMemory) + +#ifdef __nvoc_os_desc_mem_h_disabled +#define __dynamicCast_OsDescMemory(pThis) ((OsDescMemory*)NULL) +#else //__nvoc_os_desc_mem_h_disabled +#define __dynamicCast_OsDescMemory(pThis) \ + ((OsDescMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OsDescMemory))) +#endif //__nvoc_os_desc_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OsDescMemory(OsDescMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OsDescMemory(OsDescMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_OsDescMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_OsDescMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define osdescCanCopy(pOsDescMemory) osdescCanCopy_DISPATCH(pOsDescMemory) +#define osdescCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) osdescCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define osdescControl(pMemory, pCallContext, pParams) osdescControl_DISPATCH(pMemory, pCallContext, pParams) +#define osdescUnmap(pMemory, pCallContext, pCpuMapping) osdescUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define osdescGetMemInterMapParams(pMemory, pParams) osdescGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define osdescGetMemoryMappingDescriptor(pMemory, ppMemDesc) osdescGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define osdescGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) osdescGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define osdescShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) osdescShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define osdescControlFilter(pResource, pCallContext, pParams) osdescControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define osdescAddAdditionalDependants(pClient, pResource, pReference) osdescAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define osdescGetRefCount(pResource) osdescGetRefCount_DISPATCH(pResource) +#define osdescMapTo(pResource, pParams) osdescMapTo_DISPATCH(pResource, pParams) +#define osdescControl_Prologue(pResource, pCallContext, pParams) osdescControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define osdescIsReady(pMemory) osdescIsReady_DISPATCH(pMemory) +#define osdescCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) osdescCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define osdescPreDestruct(pResource) osdescPreDestruct_DISPATCH(pResource) +#define osdescUnmapFrom(pResource, pParams) osdescUnmapFrom_DISPATCH(pResource, pParams) +#define osdescControl_Epilogue(pResource, pCallContext, pParams) osdescControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define osdescControlLookup(pResource, pParams, ppEntry) osdescControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define osdescMap(pMemory, pCallContext, pParams, pCpuMapping) osdescMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define osdescAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) osdescAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool osdescCanCopy_IMPL(struct OsDescMemory *pOsDescMemory); + +static inline NvBool osdescCanCopy_DISPATCH(struct OsDescMemory *pOsDescMemory) { + return pOsDescMemory->__osdescCanCopy__(pOsDescMemory); +} + +static inline NV_STATUS osdescCheckMemInterUnmap_DISPATCH(struct OsDescMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__osdescCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS osdescControl_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__osdescControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS osdescUnmap_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__osdescUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS osdescGetMemInterMapParams_DISPATCH(struct OsDescMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__osdescGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS osdescGetMemoryMappingDescriptor_DISPATCH(struct OsDescMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__osdescGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS osdescGetMapAddrSpace_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__osdescGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool osdescShareCallback_DISPATCH(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__osdescShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS osdescControlFilter_DISPATCH(struct OsDescMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__osdescControlFilter__(pResource, pCallContext, pParams); +} + +static inline void osdescAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct OsDescMemory *pResource, RsResourceRef *pReference) { + pResource->__osdescAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 osdescGetRefCount_DISPATCH(struct OsDescMemory *pResource) { + return pResource->__osdescGetRefCount__(pResource); +} + +static inline NV_STATUS osdescMapTo_DISPATCH(struct OsDescMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__osdescMapTo__(pResource, pParams); +} + +static inline NV_STATUS osdescControl_Prologue_DISPATCH(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__osdescControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS osdescIsReady_DISPATCH(struct OsDescMemory *pMemory) { + return pMemory->__osdescIsReady__(pMemory); +} + +static inline NV_STATUS osdescCheckCopyPermissions_DISPATCH(struct OsDescMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__osdescCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void osdescPreDestruct_DISPATCH(struct OsDescMemory *pResource) { + pResource->__osdescPreDestruct__(pResource); +} + +static inline NV_STATUS osdescUnmapFrom_DISPATCH(struct OsDescMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__osdescUnmapFrom__(pResource, pParams); +} + +static inline void osdescControl_Epilogue_DISPATCH(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__osdescControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS osdescControlLookup_DISPATCH(struct OsDescMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__osdescControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS osdescMap_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__osdescMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool osdescAccessCallback_DISPATCH(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__osdescAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS osdescConstruct_IMPL(struct OsDescMemory *arg_pOsDescMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_osdescConstruct(arg_pOsDescMemory, arg_pCallContext, arg_pParams) osdescConstruct_IMPL(arg_pOsDescMemory, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_OS_DESC_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_os_hal.h b/src/nvidia/generated/g_os_hal.h new file mode 100644 index 000000000..ab875c8eb --- /dev/null +++ b/src/nvidia/generated/g_os_hal.h @@ -0,0 +1,88 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Provides access to OS Hal interfaces. +// +// Profile: shipping-gpus-openrm +// Haldef: os.def +// Template: templates/gt_eng_hal.h +// + +#ifndef _G_OSHAL_H_ +#define _G_OSHAL_H_ + +// +// Typedefs for OS public object interfaces. +// + + + +// +// "struct" to list OS's public interfaces, eg: pOs->osInit(pGpu, pOs) +// + + + +// +// macro defines to directly access OS's OBJ interfaces, +// eg: #define osReadFoo(_pGpu, _pOs) _pOs->obj._osReadFoo(_pGpu, _pOs) +// + + + +// +// macro defines to access OS's function pointers, +// eg: #define osReadFoo_FNPTR(_pOs) _pOs->obj.__osReadFoo__ +// or #define osReadFoo_FNPTR(_pOs) _pOs->__osReadFoo__ +// + + + +// +// Typedefs for OS HAL interfaces. +// + + + +// +// struct to access OS's hal interfaces, eg: pOs->hal.osReadFoo(pGpu, pOs) +// + + + +// +// macro defines to directly access OS's hal interfaces, +// eg: #define osReadFoo_HAL(_pGpu, _pOs) _pOs->hal.osReadFoo(_pGpu, _pOs) +// + + + +// +// Inline stub function definitions. +// + + + +// +// OS PDB properties +// + +typedef struct PDB_PROP_OS { + + NvBool PDB_PROP_OS_PAT_UNSUPPORTED; + NvBool PDB_PROP_OS_SLI_ALLOWED; + NvBool PDB_PROP_OS_SYSTEM_EVENTS_SUPPORTED; + NvBool PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT; + NvBool PDB_PROP_OS_WAIT_FOR_ACPI_SUBSYSTEM; + NvBool PDB_PROP_OS_UNCACHED_MEMORY_MAPPINGS_NOT_SUPPORTED; + NvBool PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE; + NvBool PDB_PROP_OS_LIMIT_GPU_RESET; + NvBool PDB_PROP_OS_SUPPORTS_TDR; + NvBool PDB_PROP_OS_GET_ACPI_TABLE_FROM_UEFI; + NvBool PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER; // Set if this OS supports the display remapper (otherwise force DNISO to vidmem if display can't access all of sysmem). + NvBool PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS; // The OS does not allow the driver to map the PCIE config space. + NvBool PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS; // Accessing paged segment might cause problem at some code path. Set the flag up on the code path to make it fail osPagedSegmentAccessCheck() + +} PDB_PROP_OS; + + +#endif // _G_OSHAL_H_ diff --git a/src/nvidia/generated/g_os_iom.c b/src/nvidia/generated/g_os_iom.c new file mode 100644 index 000000000..4022d37f9 --- /dev/null +++ b/src/nvidia/generated/g_os_iom.c @@ -0,0 +1,199 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_eng_iom.c +// +#include "nvstatus.h" +#include "nvport/inline/util_valist.h" +#include "nvport/nvport.h" +#include "core/core.h" +#include "nvoc/rtti.h" +#include "os/os.h" +#include "gpu/gpu.h" + +#include "os/os.h" + +#include "g_os_private.h" + +// +// OS's object-level _STUB, _MISSING, _VGPUSTUB interface routines +// (if any) +// + + + + + + + + +// +// Initialize OS's object-level interfaces +// +void +osSetPropertiesSpecial +( + POBJOS pOs +) +{ + // SET_IF support for OS's PDB properties + if (!RMCFG_FEATURE_PLATFORM_UNIX) { + pOs->setProperty(pOs, PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER, NV_TRUE); + } + +} + + + + + +// +// OS's run-time type information +// + +extern const struct NVOC_CLASS_DEF __iom_class_def_OBJOS; +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __iom_dtor_OBJOS(POBJOS); +NV_STATUS __iom_objCreate_OBJOS(POBJOS *ppThis, Dynamic *pParent, NvU32 createFlags); + +const struct NVOC_RTTI __iom_rtti_OBJOS_OBJOS = +{ + &__iom_class_def_OBJOS, + (NVOC_DYNAMIC_DTOR)&__iom_dtor_OBJOS, + 0, +}; + +const struct NVOC_RTTI __iom_rtti_OBJOS_Object = +{ + &__nvoc_class_def_Object, + (NVOC_DYNAMIC_DTOR)&__nvoc_destructFromBase, + NV_OFFSETOF(OBJOS, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __iom_castinfo_OBJOS = +{ + 2, + { + &__iom_rtti_OBJOS_OBJOS, + &__iom_rtti_OBJOS_Object + } +}; + +extern const NVOC_RTTI_PROVIDER __iom_rtti_provider; +const struct NVOC_CLASS_DEF __iom_class_def_OBJOS = +{ + { + sizeof(OBJOS), + classId(OBJOS), + &__iom_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + "OBJOS", +#endif + }, + (NVOC_DYNAMIC_OBJ_CREATE)&__iom_objCreate_OBJOS, + &__iom_castinfo_OBJOS, +}; + + + + +// +// OS's object infrastructure boilerplate +// + +// initializers, constructors, and destructors for OS's base classes +void __nvoc_init_Object(Object*); +NV_STATUS __nvoc_ctor_Object(Object*); +void __nvoc_dtor_Object(Object*); +NV_STATUS __nvoc_vctor_Object(Dynamic*, va_list); + + +NV_STATUS __iom_ctor_OBJOS(POBJOS pOs) +{ + NV_STATUS status; + RMCFG_MODULE_ENABLED_OR_ASSERT_AND_BAIL(OS); + status = __nvoc_ctor_Object(&pOs->__nvoc_base_Object); + if (status != NV_OK) goto __iom_ctor_OBJOS_fail_Object; + return NV_OK; +__iom_ctor_OBJOS_fail_Object: + return status; +} + +NV_STATUS __iom_vctor_OBJOS(Dynamic *pDynamic, va_list args) +{ + NV_STATUS status; + POBJOS pThis = dynamicCast(pDynamic, OBJOS); + if (pThis != NULL) + { + status = __iom_ctor_OBJOS(pThis); + } + else + { + status = NV_ERR_INVALID_OBJECT; + } + return status; +} + +void __iom_dtor_OBJOS(POBJOS pOs) +{ + __nvoc_dtor_Object(&pOs->__nvoc_base_Object); +} + +// OS's object initializer function to set up vtables and RTTI +void __iom_init_OBJOS(POBJOS pOs) +{ + pOs->__nvoc_pbase_Object = &pOs->__nvoc_base_Object; + __nvoc_init_Object(&pOs->__nvoc_base_Object); + osSetPropertiesSpecial(pOs); +} + + +// +// OS's object creation routine +// +NV_STATUS __iom_objCreate_OBJOS(POBJOS *ppThis, Dynamic *pParent, NvU32 flags) +{ + NV_STATUS status; + Object *pParentObj; + POBJOS pThis = NULL; + + // var flags is used by NVOC but not enabled in IOM + PORT_UNREFERENCED_VARIABLE(flags); + + pThis = portMemAllocNonPaged(sizeof(OBJOS)); + if (!pThis) + { + status = NV_ERR_NO_MEMORY; + goto __iom_objCreate_OBJOS_cleanup; + } + + portMemSet(pThis, 0, sizeof(OBJOS)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__iom_class_def_OBJOS); + + pParentObj = dynamicCast(pParent, Object); + if (pParent) + { + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + + __iom_init_OBJOS(pThis); + status = __iom_ctor_OBJOS(pThis); + if (status != NV_OK) goto __iom_objCreate_OBJOS_cleanup; + + *ppThis = pThis; + + return NV_OK; + +__iom_objCreate_OBJOS_cleanup: + if (pThis) + { + // do not call destructors here since __iom_ctor_OBJOS already called them + portMemSet(pThis, 0, sizeof(OBJOS)); + portMemFree(pThis); + pThis = NULL; + } + return status; +} + diff --git a/src/nvidia/generated/g_os_nvoc.h b/src/nvidia/generated/g_os_nvoc.h new file mode 100644 index 000000000..16f314d7d --- /dev/null +++ b/src/nvidia/generated/g_os_nvoc.h @@ -0,0 +1,1405 @@ +#ifndef _G_OS_NVOC_H_ +#define _G_OS_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_os_nvoc.h" + + +#ifndef _OS_H_ +#define _OS_H_ + +/*! + * @file os.h + * @brief Interface for Operating System module + */ + +/* ------------------------ Core & Library Includes ------------------------- */ +#include "core/core.h" +#include "containers/btree.h" + +/* ------------------------ SDK & Interface Includes ------------------------ */ +#include "nvsecurityinfo.h" +#include "nvacpitypes.h" +#include "nvimpshared.h" // TODO - should move from sdk to resman/interface +#include "nvi2c.h" // TODO - should move from sdk to resman/interface + +/* ------------------------ OS Includes ------------------------------------- */ +#include "os/nv_memory_type.h" +#include "os/capability.h" + +/* ------------------------ Forward Declarations ---------------------------- */ + +// +// The OS module should NOT depend on RM modules. The only exception is +// core/core.h. +// +// DO NOT ADD INCLUDES TO RM MODULE HEADERS FROM THIS FILE. OS module should be +// a leaf module. Dependencies on RM headers in this files results in circular +// dependencies as most modules depend on the OS module. +// +// Ideally, all types used by the OS module's interface are from the SDK, +// resman/interface or self-contained within the OS module header. For now, +// since the OS module depends on a few RM internal types we forward declare to +// avoid the need to pull in headers from across RM. +// +typedef struct SYS_STATIC_CONFIG SYS_STATIC_CONFIG; +typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR; +typedef struct IOVAMAPPING *PIOVAMAPPING; +typedef struct OBJGPUMGR OBJGPUMGR; +typedef struct EVENTNOTIFICATION EVENTNOTIFICATION, *PEVENTNOTIFICATION; +typedef struct DEVICE_MAPPING DEVICE_MAPPING; +typedef void *PUID_TOKEN; +typedef struct OBJTMR OBJTMR; +typedef struct OBJCL OBJCL; +typedef struct _GUID *LPGUID; + +// +// Forward declare OS_GPU_INFO type +// +// TODO - We shouldn't need a special definition per-OS. OS implementations +// should use a consistent type +// +typedef struct nv_state_t OS_GPU_INFO; + +/* ------------------------ OS Interface ------------------------------------ */ + +typedef struct os_wait_queue OS_WAIT_QUEUE; + +// +// Defines and Typedefs used by the OS +// +typedef NvU64 OS_THREAD_HANDLE; + +// +// Forward references for OS1HZTIMERENTRY symbols +// +typedef struct OS1HZTIMERENTRY *POS1HZTIMERENTRY; +typedef struct OS1HZTIMERENTRY OS1HZTIMERENTRY; + +// +// Simple 1 second callback facility. Schedules the given routine to be called with the supplied data +// in approximately 1 second. Might be called from an elevated IRQL. +// Unlike the tmr facilities (tmrScheduleCallbackXXX), this does not rely on the hardware. +// +typedef void (*OS1HZPROC)(OBJGPU *, void *); + +#define NV_OS_1HZ_ONESHOT 0x00000000 +#define NV_OS_1HZ_REPEAT 0x00000001 + +struct OS1HZTIMERENTRY +{ + OS1HZPROC callback; + void* data; + NvU32 flags; + POS1HZTIMERENTRY next; +}; + +typedef struct RM_PAGEABLE_SECTION { + void *osHandle; // handle returned from OS API + void *pDataSection; // pointer to a date inside the target data/bss/const segment +} RM_PAGEABLE_SECTION; + + +// OSSetVideoSource defines +#define NV_OS_VIDEO_SOURCE_MCE 0x0 +#define NV_OS_VIDEO_SOURCE_WINDVR 0x1 +#define NV_OS_VIDEO_SOURCE_WMP9 0x2 +#define NV_OS_VIDEO_SOURCE_VMR9 0x3 +#define NV_OS_VIDEO_SOURCE_WINDVD 0x4 + +// OSPollHotkeyState return values +#define NV_OS_HOTKEY_STATE_DISPLAY_CHANGE 0:0 +#define NV_OS_HOTKEY_STATE_DISPLAY_CHANGE_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_DISPLAY_CHANGE_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_SCALE_EVENT 1:1 +#define NV_OS_HOTKEY_STATE_SCALE_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_SCALE_EVENT_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_LID_EVENT 2:2 +#define NV_OS_HOTKEY_STATE_LID_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_LID_EVENT_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_POWER_EVENT 3:3 +#define NV_OS_HOTKEY_STATE_POWER_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_POWER_EVENT_FOUND 0x00000001 +#define NV_OS_HOTKEY_STATE_DOCK_EVENT 4:4 +#define NV_OS_HOTKEY_STATE_DOCK_EVENT_NOT_FOUND 0x00000000 +#define NV_OS_HOTKEY_STATE_DOCK_EVENT_FOUND 0x00000001 + +#define MAX_BRIGHTNESS_BCL_ELEMENTS 103 + +// ACPI _DOD Bit defines +// These bits are defined in the Hybrid SAS +#define NV_ACPI_DOD_DISPLAY_OWNER 20:18 +#define NV_ACPI_DOD_DISPLAY_OWNER_ALL 0x00000000 +#define NV_ACPI_DOD_DISPLAY_OWNER_MGPU 0x00000001 +#define NV_ACPI_DOD_DISPLAY_OWNER_DGPU1 0x00000002 + +#define NV_OS_ALLOCFLAGS_LOCKPAGES NVBIT(0) +#define NV_OS_ALLOCFLAGS_PAGEDPOOL NVBIT(1) +#define NV_OS_ALLOCFLAGS_NONPAGEDPOOL 0 + +// ACPI 3.0a definitions for requested data length +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_128B 0x00000001 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_256B 0x00000002 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_384B 0x00000003 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_512B 0x00000004 +#define NV_ACPI_DDC_REQUESTED_DATA_LENGTH_DEFAULT 0x00000001 + +typedef enum _OS_PEX_RECOVERY_STATUS +{ + OS_PEX_RECOVERY_GPU_RESET_PENDING = 0, + OS_PEX_RECOVERY_GPU_RESTORED, + OS_PEX_RECOVERY_GPU_REMOVED +} OS_PEX_RECOVERY_STATUS; + +// osBugCheck bugcode defines +#define OS_BUG_CHECK_BUGCODE_UNKNOWN (0) +#define OS_BUG_CHECK_BUGCODE_INTERNAL_TEST (1) +#define OS_BUG_CHECK_BUGCODE_BUS (2) +#define OS_BUG_CHECK_BUGCODE_ECC_DBE (3) +#define OS_BUG_CHECK_BUGCODE_NVLINK_TL_ERR (4) +#define OS_BUG_CHECK_BUGCODE_PAGED_SEGMENT (5) +#define OS_BUG_CHECK_BUGCODE_BDOD_ON_ASSERT (6) +#define OS_BUG_CHECK_BUGCODE_LAST OS_BUG_CHECK_BUGCODE_BDOD_ON_ASSERT + +#define OS_BUG_CHECK_BUGCODE_STR \ + { \ + "Unknown Error", \ + "Nv Internal Testing", \ + "Bus Error", \ + "Double Bit Error", \ + "NVLink TL Error", \ + "Invalid Bindata Access" \ + "BSOD on Assert or Breakpoint" \ + } + +// Flags needed by OSAllocPagesNode +#define OS_ALLOC_PAGES_NODE_NONE 0x0 +#define OS_ALLOC_PAGES_NODE_SKIP_RECLAIM 0x1 + +// +// Structures for osPackageRegistry and osUnpackageRegistry +// +typedef struct PACKED_REGISTRY_ENTRY +{ + NvU32 nameOffset; + NvU8 type; + NvU32 data; + NvU32 length; +} PACKED_REGISTRY_ENTRY; + +typedef struct PACKED_REGISTRY_TABLE +{ + NvU32 size; + NvU32 numEntries; + PACKED_REGISTRY_ENTRY entries[0]; +} PACKED_REGISTRY_TABLE; + +// +// Values for PACKED_REGISTRY_ENTRY::type +// +#define REGISTRY_TABLE_ENTRY_TYPE_UNKNOWN 0 +#define REGISTRY_TABLE_ENTRY_TYPE_DWORD 1 +#define REGISTRY_TABLE_ENTRY_TYPE_BINARY 2 +#define REGISTRY_TABLE_ENTRY_TYPE_STRING 3 + +/* + * OS_DRIVER_BLOCK + * + * driverStart + * CPU VA of where the driver is loaded + * unique_id + * Debug GUID of the Driver. Used to match with Pdb + * age + * Additional GUID information + * offset + * Offset from VA to start of text + */ +typedef struct { + NvP64 driverStart NV_ALIGN_BYTES(8); + NvU8 unique_id[16]; + NvU32 age; + NvU32 offset; +} OS_DRIVER_BLOCK; + +// Basic OS interface functions +typedef NvU32 OSSetEvent(OBJGPU *, NvP64); +typedef NV_STATUS OSEventNotification(OBJGPU *, PEVENTNOTIFICATION, NvU32, void *, NvU32); +typedef NV_STATUS OSEventNotificationWithInfo(OBJGPU *, PEVENTNOTIFICATION, NvU32, NvU32, NvU16, void *, NvU32); +typedef NV_STATUS OSObjectEventNotification(NvHandle, NvHandle, NvU32, PEVENTNOTIFICATION, NvU32, void *, NvU32); +typedef NV_STATUS NV_FORCERESULTCHECK OSAllocPages(MEMORY_DESCRIPTOR *); +typedef NV_STATUS NV_FORCERESULTCHECK OSAllocPagesInternal(MEMORY_DESCRIPTOR *); +typedef void OSFreePages(MEMORY_DESCRIPTOR *); +typedef void OSFreePagesInternal(MEMORY_DESCRIPTOR *); +typedef NV_STATUS NV_FORCERESULTCHECK OSLockMem(MEMORY_DESCRIPTOR *); +typedef NV_STATUS OSUnlockMem(MEMORY_DESCRIPTOR *); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapGPU(OBJGPU *, RS_PRIV_LEVEL, NvU64, NvU64, NvU32, NvP64 *, NvP64 *); +typedef void OSUnmapGPU(OS_GPU_INFO *, RS_PRIV_LEVEL, NvP64, NvU64, NvP64); +typedef NV_STATUS NV_FORCERESULTCHECK OSDeviceClassToDeviceName(NvU32, NvU8 *); +typedef NV_STATUS NV_FORCERESULTCHECK OSNotifyEvent(OBJGPU *, PEVENTNOTIFICATION, NvU32, NvU32, NV_STATUS); +typedef NV_STATUS OSReadRegistryString(OBJGPU *, const char *, NvU8 *, NvU32 *); +typedef NV_STATUS OSWriteRegistryBinary(OBJGPU *, const char *, NvU8 *, NvU32); +typedef NV_STATUS OSWriteRegistryVolatile(OBJGPU *, const char *, NvU8 *, NvU32); +typedef NV_STATUS OSReadRegistryVolatile(OBJGPU *, const char *, NvU8 *, NvU32); +typedef NV_STATUS OSReadRegistryVolatileSize(OBJGPU *, const char *, NvU32 *); +typedef NV_STATUS OSReadRegistryBinary(OBJGPU *, const char *, NvU8 *, NvU32 *); +typedef NV_STATUS OSWriteRegistryDword(OBJGPU *, const char *, NvU32); +typedef NV_STATUS OSReadRegistryDword(OBJGPU *, const char *, NvU32 *); +typedef NV_STATUS OSReadRegistryDwordBase(OBJGPU *, const char *, NvU32 *); +typedef NV_STATUS OSReadRegistryStringBase(OBJGPU *, const char *, NvU8 *, NvU32 *); +typedef NV_STATUS OSPackageRegistry(OBJGPU *, PACKED_REGISTRY_TABLE *, NvU32 *); +typedef NV_STATUS OSUnpackageRegistry(PACKED_REGISTRY_TABLE *); +typedef NvBool OSQueueDpc(OBJGPU *); +typedef void OSFlushCpuWriteCombineBuffer(void); +typedef NV_STATUS OSNumaMemblockSize(NvU64 *); +typedef NvBool OSNumaOnliningEnabled(OS_GPU_INFO *); +typedef NV_STATUS OSAllocPagesNode(NvS32, NvLength, NvU32, NvU64 *); +typedef NV_STATUS OSAllocAcquirePage(NvU64); +typedef NV_STATUS OSAllocReleasePage(NvU64); +typedef NvU32 OSGetPageRefcount(NvU64); +typedef NvU32 OSCountTailPages(NvU64); +typedef NvU32 OSGetPageSize(void); + + +// We use osAcquireRmSema to catch "unported" sema code to new lock model +typedef NV_STATUS NV_FORCERESULTCHECK OSAcquireRmSema(void *); +typedef NvBool NV_FORCERESULTCHECK OSIsRmSemaOwner(void *); + +#define DPC_RELEASE_ALL_GPU_LOCKS (1) +#define DPC_RELEASE_SINGLE_GPU_LOCK (2) + +typedef NV_STATUS OSGpuLocksQueueRelease(OBJGPU *pGpu, NvU32 dpcGpuLockRelease); +typedef NvU32 OSApiLockAcquireConfigureFlags(NvU32 flags); +typedef NV_STATUS NV_FORCERESULTCHECK OSCondAcquireRmSema(void *); +typedef NvU32 OSReleaseRmSema(void *, OBJGPU *); + +typedef NvU32 OSGetCpuCount(void); +typedef NvU32 OSGetMaximumCoreCount(void); +typedef NvU32 OSGetCurrentProcessorNumber(void); +typedef NV_STATUS OSDelay(NvU32); +typedef NV_STATUS OSDelayUs(NvU32); +typedef NV_STATUS OSDelayNs(NvU32); +typedef void OSSpinLoop(void); +typedef NvU32 OSGetCurrentProcess(void); +typedef void OSGetCurrentProcessName(char *, NvU32); +typedef NvU32 OSGetCurrentPasid(void); +typedef NV_STATUS OSGetCurrentThread(OS_THREAD_HANDLE *); +typedef NV_STATUS OSAttachToProcess(void **, NvU32); +typedef void OSDetachFromProcess(void*); +typedef NV_STATUS OSVirtualToPhysicalAddr(MEMORY_DESCRIPTOR *, NvP64, RmPhysAddr *); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapPciMemoryUser(OS_GPU_INFO *, RmPhysAddr, NvU64, NvU32, NvP64 *, NvP64 *, NvU32); +typedef void OSUnmapPciMemoryUser(OS_GPU_INFO *, NvP64, NvU64, NvP64); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapPciMemoryKernelOld(OBJGPU *, RmPhysAddr, NvU64, NvU32, void **, NvU32); +typedef void OSUnmapPciMemoryKernelOld(OBJGPU *, void *); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapPciMemoryKernel64(OBJGPU *, RmPhysAddr, NvU64, NvU32, NvP64 *, NvU32); +typedef void OSUnmapPciMemoryKernel64(OBJGPU *, NvP64); +typedef NV_STATUS NV_FORCERESULTCHECK OSMapSystemMemory(MEMORY_DESCRIPTOR *, NvU64, NvU64, NvBool, NvU32, NvP64*, NvP64*); +typedef void OSUnmapSystemMemory(MEMORY_DESCRIPTOR *, NvBool, NvU32, NvP64, NvP64); +typedef NvBool OSLockShouldToggleInterrupts(OBJGPU *); +typedef NV_STATUS OSGetPerformanceCounter(NvU64 *); +typedef NvBool OSDbgBreakpointEnabled(void); +typedef NV_STATUS OSAttachGpu(OBJGPU *, void *); +typedef NV_STATUS OSDpcAttachGpu(OBJGPU *, void *); +typedef void OSDpcDetachGpu(OBJGPU *); +typedef NV_STATUS OSHandleGpuLost(OBJGPU *); +typedef void OSHandleGpuSurpriseRemoval(OBJGPU *); +typedef void OSInitScalabilityOptions(OBJGPU *, void *); +typedef void OSHandleDeferredRecovery(OBJGPU *); +typedef NvBool OSIsSwPreInitOnly(OS_GPU_INFO *); + +#define NVRM_MAX_FILE_NAME_LENGTH (128) +#define NVRM_FILE_ACCESS_READ NVBIT(0) +#define NVRM_FILE_ACCESS_WRITE NVBIT(1) + +typedef void OSGetTimeoutParams(OBJGPU *, NvU32 *, NvU32 *, NvU32 *); +typedef NvBool OSIsRaisedIRQL(void); +typedef NvBool OSIsISR(void); +typedef NV_STATUS OSGetDriverBlock(OS_GPU_INFO *, OS_DRIVER_BLOCK *); +typedef NvBool OSIsEqualGUID(void *, void *); + +#define OS_QUEUE_WORKITEM_FLAGS_NONE 0x00000000 +#define OS_QUEUE_WORKITEM_FLAGS_DONT_FREE_PARAMS NVBIT(0) +#define OS_QUEUE_WORKITEM_FLAGS_FALLBACK_TO_DPC NVBIT(1) +// +// Lock flags: +// Only one of the LOCK_GPU flags should be provided. If multiple are, +// the priority ordering should be GPUS > GROUP_DEVICE > GROUP_SUBDEVICE +// +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA NVBIT(8) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW NVBIT(9) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO NVBIT(10) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW NVBIT(11) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RO NVBIT(12) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW NVBIT(13) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RO NVBIT(14) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW NVBIT(15) +#define OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RO NVBIT(16) +// +// Perform a GPU full power sanity after getting GPU locks. +// One of the above LOCK_GPU flags must be provided when using this flag. +// +#define OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY NVBIT(17) +#define OS_QUEUE_WORKITEM_FLAGS_FOR_PM_RESUME NVBIT(18) +typedef void OSWorkItemFunction(NvU32 gpuInstance, void *); +typedef void OSSystemWorkItemFunction(void *); +typedef NV_STATUS OSQueueWorkItem(OBJGPU *, OSWorkItemFunction, void *); +typedef NV_STATUS OSQueueWorkItemWithFlags(OBJGPU *, OSWorkItemFunction, void *, NvU32); +typedef NV_STATUS OSQueueSystemWorkItem(OSSystemWorkItemFunction, void *); + +// MXM ACPI calls +typedef NV_STATUS OSCallACPI_MXMX(OBJGPU *, NvU32, NvU8 *); +typedef NV_STATUS OSCallACPI_DDC(OBJGPU *, NvU32, NvU8*,NvU32*, NvBool); +typedef NV_STATUS OSCallACPI_BCL(OBJGPU *, NvU32, NvU32 *, NvU16 *); + +// Display MUX ACPI calls +typedef NV_STATUS OSCallACPI_MXDS(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_MXDM(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_MXID(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_LRST(OBJGPU *, NvU32, NvU32 *); + +// Hybrid GPU ACPI calls +typedef NV_STATUS OSCallACPI_NVHG_GPUON(OBJGPU *, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_GPUOFF(OBJGPU *, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_GPUSTA(OBJGPU *, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_MXDS(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_MXMX(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_DOS(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_ROM(OBJGPU *, NvU32 *, NvU32 *); +typedef NV_STATUS OSCallACPI_NVHG_DCS(OBJGPU *, NvU32, NvU32 *); +typedef NV_STATUS OSCallACPI_DOD(OBJGPU *, NvU32 *, NvU32 *); + +// Tegra ACPI calls +typedef NV_STATUS OSCallACPI_SUB(OBJGPU *, NvU8 *, NvU32 *); +typedef NV_STATUS OSCallACPI_ON(OBJGPU *, NvU32); +typedef NV_STATUS OSCallACPI_OFF(OBJGPU *, NvU32); + +// Notebook Power Balancing ACPI calls +typedef NV_STATUS OSCallACPI_NBPS(OBJGPU *, NvU8 *, NvU32 *); +typedef NV_STATUS OSCallACPI_NBSL(OBJGPU *, NvU32); + +// Optimus WMI ACPI calls +typedef NV_STATUS OSCallACPI_OPTM_GPUON(OBJGPU *); + +// Generic ACPI _DSM call +typedef NV_STATUS OSCallACPI_DSM(OBJGPU *pGpu, ACPI_DSM_FUNCTION acpiDSMFunction, + NvU32 NVHGDSMSubfunction, NvU32 *pInOut, NvU16 *size); + +// UEFI variable calls +typedef NV_STATUS OSGetUefiVariable(OBJGPU *, char *, LPGUID, NvU8 *, NvU32 *, NvU32 *); + +// The following functions are also implemented in WinNT +typedef void OSQADbgRegistryInit(OBJOS *); +typedef NV_STATUS OSGetVersionDump(void *); +// End of WinNT + +// OS functions typically only implemented for MacOS core +// These next functions also appear on UNIX +typedef NvU32 OSnv_rdcr4(OBJOS *); +typedef NvU64 OSnv_rdxcr0(OBJOS *); +typedef int OSnv_cpuid(OBJOS *, int, int, NvU32 *, NvU32 *, NvU32 *, NvU32 *); +// end of functions shared between MacOSX and UNIX + +// These next functions also appear on UNIX +typedef NvU32 OSnv_rdmsr(OBJOS *, NvU32, NvU32 *, NvU32 *); +typedef NvU32 OSnv_wrmsr(OBJOS *, NvU32, NvU32, NvU32); +// end functions shared by MacOS and UNIX + +typedef NvU32 OSRobustChannelsDefaultState(OBJOS *); + +// NOTE: The following functions are also implemented in MODS +typedef NV_STATUS OSSimEscapeWrite(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 Value); +typedef NV_STATUS OSSimEscapeWriteBuffer(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); +typedef NV_STATUS OSSimEscapeRead(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value); +typedef NV_STATUS OSSimEscapeReadBuffer(OBJGPU *, const char *path, NvU32 Index, NvU32 Size, void* pBuffer); +typedef NvU32 OSGetSimulationMode(void); +typedef void OSLogString(const char*, ...); +typedef void OSFlushLog(void); + +// End of MODS functions + +//Vista Specific Functions + +typedef NV_STATUS OSSetupVBlank(OBJGPU *pGpu, void * pProc, + void * pParm1, void * pParm2, NvU32 Head, void * pParm3); + +// Heap reserve tracking functions +typedef void OSInternalReserveAllocCallback(NvU64 offset, NvU64 size, NvU32 gpuId); +typedef void OSInternalReserveFreeCallback(NvU64 offset, NvU32 gpuId); + + +// +// SPB_GPS (Vista) specific defines +// +typedef struct +{ + NvU64 cpuFPCounter1; // CPU Fixed Performance Counter 1 + NvU64 cpuFPCounter2; // CPU Fixed Performance Counter 2 + NvU64 cpuC0Counter; // C0 Counter + NvU64 cpuCoreTSC; // per core Time Stamp Counter value + NvU8 cpuCoreC0Value; // average C0 residency per core + NvU8 cpuCoreAperf; // CPU Aperf value per core + +}OS_CPU_CORE_PERF_COUNTERS, *POS_CPU_CORE_PERF_COUNTERS; + +typedef NV_STATUS OsGetSystemCpuLogicalCoreCounts(NvU32 *pCpuCoreCount); +typedef NV_STATUS OsGetSystemCpuC0AndAPerfCounters(NvU32 coreIndex, POS_CPU_CORE_PERF_COUNTERS pCpuPerfData); +typedef void OsEnableCpuPerformanceCounters(OBJOS *pOS); +typedef NV_STATUS OsCpuDpcObjInit(void **ppCpuDpcObj, OBJGPU *pGpu, NvU32 coreCount); +typedef void OsCpuDpcObjQueue(void **ppCpuDpcObj, NvU32 coreCount, POS_CPU_CORE_PERF_COUNTERS pCpuPerfData); +typedef void OsCpuDpcObjFree(void **ppCpuDpcObj); +typedef NV_STATUS OsSystemGetBatteryDrain(NvS32 *pChargeRate); + +// OSDRIVERERROR structure +typedef struct +{ + enum { + OS_DRIVER_ERROR_CODE_NONE = 0, + OS_DRIVER_ERROR_CODE_HP_GT216_VBIOS_BUG_587560, + OS_DRIVER_ERROR_CODE_COUNT, // Must always be last + } code; + + union + { + void *osDriverErrorContextNone; + + } context; + +} OSDRIVERERROR, * POSDRIVERERROR; + +typedef NV_STATUS OSPexRecoveryCallback(OS_GPU_INFO *, OS_PEX_RECOVERY_STATUS); + +// +// Function pointer typedef for use as callback prototype when filtering +// address ranges in os memory access routines +// +typedef NV_STATUS (OSMemFilterCb)(void *pPriv, NvU64 addr, void *pData, NvU64 size, NvBool bRead); + +// Structure typedef for storing the callback pointer and priv data +typedef struct +{ + NODE node; + OSMemFilterCb *pFilterCb; + void *pPriv; +} OSMEMFILTERDATA, *POSMEMFILTERDATA; + +// +// OS Functions typically only implemented for MODS +// Note: See comments above for other functions that +// are also implemented on MODS as well as other +// OS's. +// + +typedef NvBool OSRmInitRm(OBJOS *); +typedef NV_STATUS OSGetPanelStrapAndIndex(OBJOS *, OBJGPU *, NvU32 *, NvU32 *); +typedef NV_STATUS OSNotifySbiosDisplayChangeEnd(OBJGPU *, NvU32); +typedef NvU32 OSGetDfpScalerFromSbios(OBJGPU *); +typedef NvU32 OSPollHotkeyState(OBJGPU *); + +typedef NV_STATUS OSInitGpuMgr(OBJGPUMGR *); +typedef void OSSyncWithRmDestroy(void); +typedef void OSSyncWithGpuDestroy(NvBool); + +typedef void OSModifyGpuSwStatePersistence(OS_GPU_INFO *, NvBool); + +typedef NV_STATUS OSMemAddFilter(NvU64, NvU64, OSMemFilterCb*, void *); +typedef NV_STATUS OSMemRemoveFilter(NvU64); +typedef POSMEMFILTERDATA OSMemGetFilter(NvUPtr); + +typedef NV_STATUS OSGetCarveoutInfo(NvU64*, NvU64*); +typedef NV_STATUS OSGetVPRInfo(NvU64*, NvU64*); +typedef NV_STATUS OSAllocInVPR(MEMORY_DESCRIPTOR*); +typedef NV_STATUS OSGetGenCarveout(NvU64*, NvU64 *, NvU32, NvU64); + +typedef NvU32 OSPepReadReg(OBJGPU *, NvU32); +typedef void OSPepWriteReg(OBJGPU *, NvU32, NvU32); + +typedef NV_STATUS OSI2CClosePorts(OS_GPU_INFO *, NvU32); +typedef NV_STATUS OSWriteI2CBufferDirect(OBJGPU *, NvU32, NvU8, void *, NvU32, void *, NvU32); +typedef NV_STATUS OSReadI2CBufferDirect(OBJGPU *, NvU32, NvU8, void *, NvU32, void *, NvU32); +typedef NV_STATUS OSI2CTransfer(OBJGPU *, NvU32, NvU8, nv_i2c_msg_t *, NvU32); +typedef NV_STATUS OSSetGpuRailVoltage(OBJGPU *, NvU32, NvU32*); +typedef NV_STATUS OSGetGpuRailVoltage(OBJGPU *, NvU32*); +typedef NV_STATUS OSGetGpuRailVoltageInfo(OBJGPU *, NvU32 *, NvU32 *, NvU32 *); +typedef NV_STATUS OSTegraSocGetImpImportData(TEGRA_IMP_IMPORT_DATA *); +typedef NV_STATUS OSTegraAllocateDisplayBandwidth(OS_GPU_INFO *, NvU32, NvU32); + +typedef NV_STATUS OSMemdrvQueryInterface(OS_GPU_INFO *); +typedef void OSMemdrvReleaseInterface(void); +typedef NV_STATUS OSMemdrvGetAsid(NvU32, NvU32 *); +typedef NV_STATUS OSMemdrvGetStreamId(NvU32, NvU32 *); + +typedef NV_STATUS OSGC6PowerControl(OBJGPU *, NvU32, NvU32 *); + +typedef RmPhysAddr OSPageArrayGetPhysAddr(OS_GPU_INFO *pOsGpuInfo, void* pPageData, NvU32 pageIndex); +typedef NV_STATUS OSGetChipInfo(OBJGPU *, NvU32*, NvU32*, NvU32*, NvU32*); +typedef NV_STATUS OSGetCurrentIrqPrivData(OS_GPU_INFO *, NvU32*); + +typedef enum +{ + RC_CALLBACK_IGNORE, + RC_CALLBACK_ISOLATE, + RC_CALLBACK_ISOLATE_NO_RESET, +} RC_CALLBACK_STATUS; +typedef RC_CALLBACK_STATUS OSRCCallback(OBJGPU *, NvHandle, NvHandle, NvHandle, NvHandle, NvU32, NvU32, NvU32 *, void *); +typedef NvBool OSCheckCallback(OBJGPU *); +typedef NV_STATUS OSReadPFPciConfigInVF(NvU32, NvU32*); + +#include "g_os_odb.h" // (rmconfig) os public interface + +// Actual definition of the OBJOS structure +struct OBJOS +{ + // + // Every object has a common object database structure at its start. + // WARNING: Never add anything above this structure definition. + // + OBJECT_BASE_DEFINITION(OS); + + // Function pointers to Misc OS routines + OSDbgBreakpointEnabled *osDbgBreakpointEnabled; + + // Function pointers to routines normally implemented by WinNT only + OSQADbgRegistryInit *osQADbgRegistryInit; + OSQueueWorkItem *osQueueWorkItem; + OSQueueWorkItemWithFlags *osQueueWorkItemWithFlags; + OSQueueSystemWorkItem *osQueueSystemWorkItem; + + // used to initialize OBJSTEREO + void* (*osGetStereoDongleInterface)(void); + + // Function pointers to routines normally implemented by MacOS core + OSnv_rdcr4 *osNv_rdcr4; + OSnv_rdxcr0 *osNv_rdxcr0; + OSnv_cpuid *osNv_cpuid; + + // Function pointers to routines only implemented by MacOSX + OSnv_rdmsr *osNv_rdmsr; + OSnv_wrmsr *osNv_wrmsr; + OSRobustChannelsDefaultState *osRobustChannelsDefaultState; + + OSSimEscapeWrite *osSimEscapeWrite; + OSSimEscapeWriteBuffer *osSimEscapeWriteBuffer; + OSSimEscapeRead *osSimEscapeRead; + OSSimEscapeReadBuffer *osSimEscapeReadBuffer; + + // Function pointer to API functions. Add other OS's before these + OSRmInitRm *osRmInitRm; + + OSGetSimulationMode *osGetSimulationMode; + + // Function pointers for MXM ACPI Calls + OSCallACPI_MXMX *osCallACPI_MXMX; + OSCallACPI_DDC *osCallACPI_DDC; + OSCallACPI_BCL *osCallACPI_BCL; + + // Function pointers for display MUX ACPI calls + OSCallACPI_MXDS *osCallACPI_MXDS; + OSCallACPI_MXDM *osCallACPI_MXDM; + OSCallACPI_MXID *osCallACPI_MXID; + OSCallACPI_LRST *osCallACPI_LRST; + + // Function pointers for Hybrid GPU WMI calls + OSCallACPI_NVHG_GPUON *osCallACPI_NVHG_GPUON; + OSCallACPI_NVHG_GPUOFF *osCallACPI_NVHG_GPUOFF; + OSCallACPI_NVHG_GPUSTA *osCallACPI_NVHG_GPUSTA; + OSCallACPI_NVHG_MXDS *osCallACPI_NVHG_MXDS; + OSCallACPI_NVHG_MXMX *osCallACPI_NVHG_MXMX; + OSCallACPI_NVHG_DOS *osCallACPI_NVHG_DOS; + OSCallACPI_NVHG_ROM *osCallACPI_NVHG_ROM; + OSCallACPI_NVHG_DCS *osCallACPI_NVHG_DCS; + OSCallACPI_DOD *osCallACPI_DOD; + + // Function pointers for Tegra systems + OSCallACPI_SUB *osCallACPI_SUB; + OSCallACPI_ON *osCallACPI_ON; + OSCallACPI_OFF *osCallACPI_OFF; + + // function pointers to all DSM calls + OSCallACPI_DSM *osCallACPI_DSM; + + // function pointers to UEFI runtime variable calls + OSGetUefiVariable *osGetUefiVariable; + + OSCheckCallback *osCheckCallback; + OSRCCallback *osRCCallback; + + // Function pointers Notebook Power Balancing WMI calls + OSCallACPI_NBPS *osCallACPI_NBPS; + OSCallACPI_NBSL *osCallACPI_NBSL; + + // Function pointers for Optimus GPU WMI calls + OSCallACPI_OPTM_GPUON *osCallACPI_OPTM_GPUON; + + OSSetupVBlank *osSetupVBlank; + + OSPexRecoveryCallback *osPexRecoveryCallback; + + // Function pointers for heap reserve tracking callbacks + OSInternalReserveAllocCallback *osInternalReserveAllocCallback; + OSInternalReserveFreeCallback *osInternalReserveFreeCallback; + + // In 4K pages + NvU32 SystemMemorySize; + + // OS-specific page array interface. + OSPageArrayGetPhysAddr *osPageArrayGetPhysAddr; + + // mask holding dynamic power supported GPUs + NvU32 dynamicPowerSupportGpuMask; + + // Flag for SIM_BUILD in MODS + NvBool bIsSimMods; +}; + +NV_STATUS addProbe(OBJGPU *, NvU32); + + +typedef NV_STATUS OSFlushCpuCache(void); +typedef void OSAddRecordForCrashLog(void *, NvU32); +typedef void OSDeleteRecordForCrashLog(void *); + +OSFlushCpuCache osFlushCpuCache; +OSAddRecordForCrashLog osAddRecordForCrashLog; +OSDeleteRecordForCrashLog osDeleteRecordForCrashLog; + + +// +// This file should only contain the most common OS functions that provide +// direct call. Ex. osDelay, osIsAdministrator +// +NV_STATUS osTegraSocPmPowergate(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocPmUnpowergate(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocDeviceReset(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocGetImpImportData(TEGRA_IMP_IMPORT_DATA *pTegraImpImportData); +NV_STATUS osTegraAllocateDisplayBandwidth(OS_GPU_INFO *pOsGpuInfo, + NvU32 averageBandwidthKBPS, + NvU32 floorBandwidthKBPS); + +NvBool osIsAdministrator(void); +NvBool osAllowPriorityOverride(void); +NV_STATUS osGetCurrentTime(NvU32 *pSec,NvU32 *puSec); +NV_STATUS osGetCurrentTick(NvU64 *pTimeInNs); +NvU64 osGetTickResolution(void); +NvU64 osGetTimestamp(void); +NvU64 osGetTimestampFreq(void); + +NV_STATUS osDeferredIsr(OBJGPU *pGpu); + +void osEnableInterrupts(OBJGPU *pGpu); + +void osDisableInterrupts(OBJGPU *pGpu, + NvBool bIsr); + +void osBugCheck(NvU32 bugCode); +void osAssertFailed(void); + +// OS PCI R/W functions +void *osPciInitHandle(NvU32 domain, NvU8 bus, NvU8 slot, NvU8 function, + NvU16 *pVendor, NvU16 *pDevice); +NvU32 osPciReadDword(void *pHandle, NvU32 offset); +NvU16 osPciReadWord(void *pHandle, NvU32 offset); +NvU8 osPciReadByte(void *pHandle, NvU32 offset); +void osPciWriteDword(void *pHandle, NvU32 offset, NvU32 value); +void osPciWriteWord(void *pHandle, NvU32 offset, NvU16 value); +void osPciWriteByte(void *pHandle, NvU32 offset, NvU8 value); + +// OS RM capabilities calls + +void osRmCapInitDescriptor(NvU64 *pCapDescriptor); +NV_STATUS osRmCapAcquire(OS_RM_CAPS *pOsRmCaps, NvU32 rmCap, + NvU64 capDescriptor, + NvU64 *dupedCapDescriptor); +void osRmCapRelease(NvU64 dupedCapDescriptor); +NV_STATUS osRmCapRegisterGpu(OS_GPU_INFO *pOsGpuInfo, OS_RM_CAPS **ppOsRmCaps); +void osRmCapUnregister(OS_RM_CAPS **ppOsRmCaps); +NV_STATUS osRmCapRegisterSmcPartition(OS_RM_CAPS *pGpuOsRmCaps, + OS_RM_CAPS **ppPartitionOsRmCaps, + NvU32 partitionId); +NV_STATUS osRmCapRegisterSmcExecutionPartition( + OS_RM_CAPS *pPartitionOsRmCaps, + OS_RM_CAPS **ppExecPartitionOsRmCaps, + NvU32 execPartitionId); +NV_STATUS osRmCapRegisterSys(OS_RM_CAPS **ppOsRmCaps); + +NV_STATUS osGetRandomBytes(NvU8 *pBytes, NvU16 numBytes); + +NV_STATUS osAllocWaitQueue(OS_WAIT_QUEUE **ppWq); +void osFreeWaitQueue(OS_WAIT_QUEUE *pWq); +void osWaitUninterruptible(OS_WAIT_QUEUE *pWq); +void osWaitInterruptible(OS_WAIT_QUEUE *pWq); +void osWakeUp(OS_WAIT_QUEUE *pWq); + +NvU32 osGetDynamicPowerSupportMask(void); + +void osUnrefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osRefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo); + +NV_STATUS osIovaMap(PIOVAMAPPING pIovaMapping); +void osIovaUnmap(PIOVAMAPPING pIovaMapping); +NV_STATUS osGetAtsTargetAddressRange(OBJGPU *pGpu, + NvU32 *pAddr, + NvU32 *pAddrWidth, + NvU32 *pMask, + NvU32 *pMaskWidth, + NvU32 *pGranularity, + NvBool bIsPeer, + NvU32 peerIndex); +NV_STATUS osGetFbNumaInfo(OBJGPU *pGpu, + NvU64 *pAddrPhys, + NvS32 *pNodeId); +NV_STATUS osGetForcedNVLinkConnection(OBJGPU *pGpu, + NvU32 maxLinks, + NvU32 *pLinkConnection); +NV_STATUS osGetForcedC2CConnection(OBJGPU *pGpu, + NvU32 maxLinks, + NvU32 *pLinkConnection); +void osSetNVLinkSysmemLinkState(OBJGPU *pGpu,NvBool enabled); +NV_STATUS osGetPlatformNvlinkLinerate(OBJGPU *pGpu,NvU32 *lineRate); +const struct nvlink_link_handlers* osGetNvlinkLinkCallbacks(void); + +void osRemoveGpu(NvU32 domain, NvU8 bus, NvU8 device); +NvBool osRemoveGpuSupported(void); + +void initVGXSpecificRegistry(OBJGPU *); + +NV_STATUS osVgpuVfioWake(void *waitQueue); +NV_STATUS osVgpuInjectInterrupt(void *pArg1); +NV_STATUS osVgpuRegisterMdev(OS_GPU_INFO *pArg1); +NV_STATUS osIsVgpuVfioPresent(void); +NV_STATUS osVgpuAllocVmbusEventDpc(void **ppArg1); +void osVgpuScheduleVmbusEventDpc(void *pArg1, void *pArg2); +NV_STATUS osLockPageableDataSection(RM_PAGEABLE_SECTION *pSection); +NV_STATUS osUnlockPageableDataSection(RM_PAGEABLE_SECTION *pSection); + +void osFlushGpuCoherentCpuCacheRange(OS_GPU_INFO *pOsGpuInfo, + NvU64 cpuVirtual, + NvU64 size); +NvBool osUidTokensEqual(PUID_TOKEN arg1, PUID_TOKEN arg2); + +NV_STATUS osValidateClientTokens(PSECURITY_TOKEN arg1, + PSECURITY_TOKEN arg2); +PUID_TOKEN osGetCurrentUidToken(void); +PSECURITY_TOKEN osGetSecurityToken(void); + +NV_STATUS osIsKernelBuffer(void *pArg1, NvU32 arg2); + +NV_STATUS osMapViewToSection(OS_GPU_INFO *pArg1, + void *pSectionHandle, + void **ppAddress, + NvU64 actualSize, + NvU64 sectionOffset, + NvBool bIommuEnabled); +NV_STATUS osUnmapViewFromSection(OS_GPU_INFO *pArg1, + void *pAddress, + NvBool bIommuEnabled); + +NV_STATUS osOpenTemporaryFile(void **ppFile); +void osCloseFile(void *pFile); +NV_STATUS osWriteToFile(void *pFile, NvU8 *buffer, + NvU64 size, NvU64 offset); +NV_STATUS osReadFromFile(void *pFile, NvU8 *buffer, + NvU64 size, NvU64 offset); + +NV_STATUS osSrPinSysmem(OS_GPU_INFO *pArg1, + NvU64 commitSize, + void *pMdl); +NV_STATUS osSrUnpinSysmem(OS_GPU_INFO *pArg1); + +void osPagedSegmentAccessCheck(void); + +NV_STATUS osCreateMemFromOsDescriptorInternal(OBJGPU *pGpu, void *pAddress, + NvU32 flags, NvU64 size, + MEMORY_DESCRIPTOR **ppMemDesc, + NvBool bCachedKernel, + RS_PRIV_LEVEL privilegeLevel); + +NV_STATUS osReserveCpuAddressSpaceUpperBound(void **ppSectionHandle, + NvU64 maxSectionSize); +void osReleaseCpuAddressSpaceUpperBound(void *pSectionHandle); + +// OS Tegra IPC functions +NV_STATUS osTegraDceRegisterIpcClient(NvU32 interfaceType, void *usrCtx, + NvU32 *clientId); +NV_STATUS osTegraDceClientIpcSendRecv(NvU32 clientId, void *msg, + NvU32 msgLength); +NV_STATUS osTegraDceUnregisterIpcClient(NvU32 clientId); + +// +// Define OS-layer specific type instead of #include "clk_domains.h" for +// CLKWHICH, avoids upwards dependency from OS interface on higher level +// RM modules +// +typedef NvU32 OS_CLKWHICH; + +NV_STATUS osTegraSocEnableClk(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM); +NV_STATUS osTegraSocDisableClk(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM); +NV_STATUS osTegraSocGetCurrFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 *pCurrFreqKHz); +NV_STATUS osTegraSocGetMaxFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 *pMaxFreqKHz); +NV_STATUS osTegraSocGetMinFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 *pMinFreqKHz); +NV_STATUS osTegraSocSetFreqKHz(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRM, NvU32 reqFreqKHz); +NV_STATUS osTegraSocSetParent(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRMsource, OS_CLKWHICH whichClkRMparent); +NV_STATUS osTegraSocGetParent(OS_GPU_INFO *pOsGpuInfo, OS_CLKWHICH whichClkRMsource, OS_CLKWHICH *pWhichClkRMparent); + +NV_STATUS osTegraSocDeviceReset(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocPmPowergate(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osTegraSocPmUnpowergate(OS_GPU_INFO *pOsGpuInfo); +NV_STATUS osGetSyncpointAperture(OS_GPU_INFO *pOsGpuInfo, + NvU32 syncpointId, + NvU64 *physAddr, + NvU64 *limit, + NvU32 *offset); +NV_STATUS osTegraI2CGetBusState(OS_GPU_INFO *pOsGpuInfo, NvU32 port, NvS32 *scl, NvS32 *sda); + +NV_STATUS osGetVersion(NvU32 *pMajorVer, + NvU32 *pMinorVer, + NvU32 *pBuildNum, + NvU16 *pServicePackMaj, + NvU16 *pProductType); + +NvBool osGrService(OS_GPU_INFO *pOsGpuInfo, NvU32 grIdx, NvU32 intr, NvU32 nstatus, NvU32 addr, NvU32 dataLo); + +NvBool osDispService(NvU32 Intr0, NvU32 Intr1); + +NV_STATUS osReferenceObjectCount(void *pEvent); + +NV_STATUS osDereferenceObjectCount(void *pEvent); + +// +// Perform OS-specific error logging. +// Like libc's vsnprintf(), osErrorLogV() invalidates its va_list argument. The va_list argument +// may not be reused after osErrorLogV() returns. If the va_list is needed after the +// osErrorLogV() call, create a copy of the va_list using va_copy(). +// The caller controls the lifetime of the va_list argument, and should free it using va_end. +// +void osErrorLogV(OBJGPU *pGpu, NvU32 num, const char * pFormat, va_list arglist); +void osErrorLog(OBJGPU *pGpu, NvU32 num, const char* pFormat, ...); + +NV_STATUS osNvifInitialize(OBJGPU *pGpu); + +NV_STATUS osNvifMethod(OBJGPU *pGpu, NvU32 func, + NvU32 subFunc, void *pInParam, + NvU16 inParamSize, NvU32 *pOutStatus, + void *pOutData, NvU16 *pOutDataSize); + +NV_STATUS osCreateMemFromOsDescriptor(OBJGPU *pGpu, NvP64 pDescriptor, + NvHandle hClient, NvU32 flags, + NvU64 *pLimit, + MEMORY_DESCRIPTOR **ppMemDesc, + NvU32 descriptorType, + RS_PRIV_LEVEL privilegeLevel); + +void* osMapKernelSpace(RmPhysAddr Start, + NvU64 Size, + NvU32 Mode, + NvU32 Protect); + +void osUnmapKernelSpace(void *addr, NvU64 size); + + +void *osMapIOSpace(RmPhysAddr start, + NvU64 size_bytes, + void ** priv, + NvU32 user, + NvU32 mode, + NvU32 Protect); + +void osUnmapIOSpace(void *pAddress, + NvU64 Size, + void *pData, + NvU32 User); + +NvBool osTestPcieExtendedConfigAccess(void *handle, NvU32 offset); + +NvU32 osGetCpuFrequency(void); + +void osIoWriteByte(NvU32 Address, NvU8 Value); + +NvU8 osIoReadByte(NvU32 Address); + +void osIoWriteWord(NvU32 Address, NvU16 Value); + +NvU16 osIoReadWord(NvU32 Address); + +void osIoWriteDword(NvU32 port, NvU32 data); + +NvU32 osIoReadDword(NvU32 port); + +// OS functions to get memory pages + +NV_STATUS osGetNumMemoryPages (MEMORY_DESCRIPTOR *pMemDesc, NvU32 *pNumPages); +NV_STATUS osGetMemoryPages (MEMORY_DESCRIPTOR *pMemDesc, void *pPages, NvU32 *pNumPages); + +NV_STATUS osGetAcpiTable(NvU32 tableSignature, + void **ppTable, + NvU32 tableSize, + NvU32 *retSize); + +NV_STATUS osInitGetAcpiTable(void); + +NV_STATUS osGetIbmnpuGenregInfo(OS_GPU_INFO *pArg1, + NvU64 *pArg2, + NvU64 *pArg3); + +NV_STATUS osGetIbmnpuRelaxedOrderingMode(OS_GPU_INFO *pArg1, + NvBool *pArg2); + +void osWaitForIbmnpuRsync(OS_GPU_INFO *pArg1); + +NV_STATUS osGetAcpiRsdpFromUefi(NvU32 *pRsdpAddr); + +NV_STATUS osCreateNanoTimer(OS_GPU_INFO *pArg1, + void *tmrEvent, + void **tmrUserData); + +NV_STATUS osStartNanoTimer(OS_GPU_INFO *pArg1, + void *pTimer, + NvU64 timeNs); + +NV_STATUS osCancelNanoTimer(OS_GPU_INFO *pArg1, + void *pArg2); + +NV_STATUS osDestroyNanoTimer(OS_GPU_INFO *pArg1, + void *pArg2); + +NV_STATUS osSchedule(void); + +NV_STATUS osDmaMapPages(OS_GPU_INFO *pArg1, + MEMORY_DESCRIPTOR *pMemDesc); + +NV_STATUS osDmaUnmapPages(OS_GPU_INFO *pArg1, + MEMORY_DESCRIPTOR *pMemDesc); + +void osDmaSetAddressSize(OS_GPU_INFO *pArg1, + NvU32 bits); + +void osClientGcoffDisallowRefcount(OS_GPU_INFO *pArg1, + NvBool arg2); + +NV_STATUS osTegraSocGpioGetPinState(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3); + +void osTegraSocGpioSetPinState(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3); + +NV_STATUS osTegraSocGpioSetPinDirection(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3); + +NV_STATUS osTegraSocGpioGetPinDirection(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3); + +NV_STATUS osTegraSocGpioGetPinNumber(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 *pArg3); + +NV_STATUS osTegraSocGpioGetPinInterruptStatus(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3, + NvBool *pArg4); + +NV_STATUS osTegraSocGpioSetPinInterrupt(OS_GPU_INFO *pArg1, + NvU32 arg2, + NvU32 arg3); + +NV_STATUS osTegraSocResetMipiCal(OS_GPU_INFO *pArg1); + +NV_STATUS osGetTegraNumDpAuxInstances(OS_GPU_INFO *pArg1, + NvU32 *pArg2); + +NvU32 osTegraSocFuseRegRead(NvU32 addr); + +NV_STATUS osGetCurrentIrqPrivData(OS_GPU_INFO *pArg1, + NvU32 *pArg2); + +NV_STATUS osGetTegraBrightnessLevel(OS_GPU_INFO *pArg1, + NvU32 *pArg2); + +NV_STATUS osSetTegraBrightnessLevel(OS_GPU_INFO *pArg1, + NvU32 arg2); + +NvBool osIsVga(OS_GPU_INFO *pArg1, + NvBool bIsGpuPrimaryDevice); + +void osInitOSHwInfo(OBJGPU *pGpu); + +void osDestroyOSHwInfo(OBJGPU *pGpu); + +NV_STATUS osUserHandleToKernelPtr(NvU32 hClient, + NvP64 Handle, + NvP64 *pHandle); + +NV_STATUS osGetSmbiosTable(void **pBaseVAddr, NvU64 *pLength, + NvU64 *pNumSubTypes, NvU32 *pVersion); + +void osPutSmbiosTable(void *pBaseVAddr, NvU64 length); + +NvBool osIsNvswitchPresent(void); + +void osQueueMMUFaultHandler(OBJGPU *); + +NvBool osIsGpuAccessible(OBJGPU *pGpu); + +void osGpuWriteReg008(OBJGPU *pGpu, + NvU32 thisAddress, + NvV8 thisValue); + +void osDevWriteReg008(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV8 thisValue); + +NvU8 osGpuReadReg008(OBJGPU *pGpu, + NvU32 thisAddress); + +NvU8 osDevReadReg008(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress); + +void osGpuWriteReg016(OBJGPU *pGpu, + NvU32 thisAddress, + NvV16 thisValue); + +void osDevWriteReg016(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV16 thisValue); + +NvU16 osGpuReadReg016(OBJGPU *pGpu, + NvU32 thisAddress); + +NvU16 osDevReadReg016(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress); + +void osGpuWriteReg032(OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue); + +void osDevWriteReg032(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress, + NvV32 thisValue); + +NvU32 osGpuReadReg032(OBJGPU *pGpu, + NvU32 thisAddress); + +NvU32 osDevReadReg032(OBJGPU *pGpu, + DEVICE_MAPPING *pMapping, + NvU32 thisAddress); + +NV_STATUS osIsr(OBJGPU *pGpu); + +NV_STATUS osSanityTestIsr(OBJGPU *pGpu); + +NV_STATUS osInitMapping(OBJGPU *pGpu); + +NV_STATUS osVerifySystemEnvironment(OBJGPU *pGpu); + +NV_STATUS osSanityTestIsr(OBJGPU *pGpu); + +NvBool osDmabufIsSupported(void); + +static NV_INLINE NV_STATUS isrWrapper(NvBool testIntr, OBJGPU *pGpu) +{ + // + // If pGpu->testIntr is not true then use original osIsr function. + // On VMware Esxi 6.0, both rm isr and dpc handlers are called from Esxi 6.0 + // dpc handler. Because of this when multiple GPU are present in the system, + // we may get a call to rm_isr routine for a hw interrupt corresponding to a + // previously initialized GPU. In that case we need to call original osIsr + // function. + // + + NV_STATUS status = NV_OK; + + if (testIntr) + { + status = osSanityTestIsr(pGpu); + } + else + { + status = osIsr(pGpu); + } + + return status; +} + +#define OS_PCIE_CAP_MASK_REQ_ATOMICS_32 NVBIT(0) +#define OS_PCIE_CAP_MASK_REQ_ATOMICS_64 NVBIT(1) +#define OS_PCIE_CAP_MASK_REQ_ATOMICS_128 NVBIT(2) + +// Os 1Hz timer callback functions +NV_STATUS osInit1HzCallbacks(OBJTMR *pTmr); +NV_STATUS osDestroy1HzCallbacks(OBJTMR *pTmr); +NV_STATUS osSchedule1SecondCallback(OBJGPU *pGpu, OS1HZPROC callback, void *pData, NvU32 flags); +void osRemove1SecondRepeatingCallback(OBJGPU *pGpu, OS1HZPROC callback, void *pData); +NvBool osRun1HzCallbacksNow(OBJGPU *pGpu); +NV_STATUS osDoFunctionLevelReset(OBJGPU *pGpu); + +void vgpuDevWriteReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue, + NvBool *vgpuHandled +); + +NvU32 vgpuDevReadReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvBool *vgpuHandled +); + +void osInitSystemStaticConfig(SYS_STATIC_CONFIG *); + +NvU32 osGetReleaseAssertBehavior(void); +void osDbgBugCheckOnAssert(void); + +NvBool osBugCheckOnTimeoutEnabled(void); + +// +// TODO: to clean-up the rest of the list +// +OSAttachGpu osAttachGpu; +OSDpcAttachGpu osDpcAttachGpu; +OSDpcDetachGpu osDpcDetachGpu; +OSHandleGpuLost osHandleGpuLost; +OSHandleGpuSurpriseRemoval osHandleGpuSurpriseRemoval; +OSInitScalabilityOptions osInitScalabilityOptions; +OSQueueDpc osQueueDpc; +OSRmInitRm osRmInitRm; +OSSetEvent osSetEvent; +OSEventNotification osEventNotification; +OSEventNotificationWithInfo osEventNotificationWithInfo; +OSObjectEventNotification osObjectEventNotification; +OSNotifyEvent osNotifyEvent; +OSFlushCpuWriteCombineBuffer osFlushCpuWriteCombineBuffer; +OSDeviceClassToDeviceName osDeviceClassToDeviceName; +OSDelay osDelay; +OSSpinLoop osSpinLoop; +OSDelayUs osDelayUs; +OSDelayNs osDelayNs; +OSGetCpuCount osGetCpuCount; +OSGetMaximumCoreCount osGetMaximumCoreCount; +OSGetCurrentProcessorNumber osGetCurrentProcessorNumber; +OSGetVersionDump osGetVersionDump; + +OSMemAddFilter osMemAddFilter; +OSMemRemoveFilter osMemRemoveFilter; +OSMemGetFilter osMemGetFilter; + +OSAllocPagesInternal osAllocPagesInternal; +OSFreePagesInternal osFreePagesInternal; + +OSGetPageSize osGetPageSize; +OSNumaMemblockSize osNumaMemblockSize; +OSNumaOnliningEnabled osNumaOnliningEnabled; +OSAllocPagesNode osAllocPagesNode; +OSAllocAcquirePage osAllocAcquirePage; +OSAllocReleasePage osAllocReleasePage; +OSGetPageRefcount osGetPageRefcount; +OSCountTailPages osCountTailPages; +OSVirtualToPhysicalAddr osKernVirtualToPhysicalAddr; +OSLockMem osLockMem; +OSUnlockMem osUnlockMem; +OSMapSystemMemory osMapSystemMemory; +OSUnmapSystemMemory osUnmapSystemMemory; +OSWriteRegistryDword osWriteRegistryDword; +OSReadRegistryDword osReadRegistryDword; +OSReadRegistryString osReadRegistryString; +OSWriteRegistryBinary osWriteRegistryBinary; +OSWriteRegistryVolatile osWriteRegistryVolatile; +OSReadRegistryVolatile osReadRegistryVolatile; +OSReadRegistryVolatileSize osReadRegistryVolatileSize; +OSReadRegistryBinary osReadRegistryBinary; +OSReadRegistryDwordBase osReadRegistryDwordBase; +OSReadRegistryStringBase osReadRegistryStringBase; +OSPackageRegistry osPackageRegistry; +OSUnpackageRegistry osUnpackageRegistry; +NV_STATUS osDestroyRegistry(void); +OSMapPciMemoryUser osMapPciMemoryUser; +OSUnmapPciMemoryUser osUnmapPciMemoryUser; +OSMapPciMemoryKernelOld osMapPciMemoryKernelOld; +OSMapPciMemoryKernel64 osMapPciMemoryKernel64; +OSUnmapPciMemoryKernelOld osUnmapPciMemoryKernelOld; +OSUnmapPciMemoryKernel64 osUnmapPciMemoryKernel64; +OSMapGPU osMapGPU; +OSUnmapGPU osUnmapGPU; +OSLockShouldToggleInterrupts osLockShouldToggleInterrupts; + +OSGetPerformanceCounter osGetPerformanceCounter; + +OSI2CClosePorts osI2CClosePorts; +OSWriteI2CBufferDirect osWriteI2CBufferDirect; +OSReadI2CBufferDirect osReadI2CBufferDirect; +OSI2CTransfer osI2CTransfer; +OSSetGpuRailVoltage osSetGpuRailVoltage; +OSGetGpuRailVoltage osGetGpuRailVoltage; +OSGetChipInfo osGetChipInfo; +OSGetGpuRailVoltageInfo osGetGpuRailVoltageInfo; + +OSGetCurrentProcess osGetCurrentProcess; +OSGetCurrentProcessName osGetCurrentProcessName; +OSGetCurrentThread osGetCurrentThread; +OSAttachToProcess osAttachToProcess; +OSDetachFromProcess osDetachFromProcess; +OSPollHotkeyState osPollHotkeyState; + +OSIsRaisedIRQL osIsRaisedIRQL; +OSIsISR osIsISR; +OSGetDriverBlock osGetDriverBlock; + +OSInitGpuMgr osInitGpuMgr; + +OSSyncWithRmDestroy osSyncWithRmDestroy; +OSSyncWithGpuDestroy osSyncWithGpuDestroy; + +OSModifyGpuSwStatePersistence osModifyGpuSwStatePersistence; + +OSPexRecoveryCallback osPexRecoveryCallback; +OSHandleDeferredRecovery osHandleDeferredRecovery; +OSIsSwPreInitOnly osIsSwPreInitOnly; +OSGetCarveoutInfo osGetCarveoutInfo; +OSGetVPRInfo osGetVPRInfo; +OSAllocInVPR osAllocInVPR; +OSGetGenCarveout osGetGenCarveout; +OsGetSystemCpuLogicalCoreCounts osGetSystemCpuLogicalCoreCounts; +OsGetSystemCpuC0AndAPerfCounters osGetSystemCpuC0AndAPerfCounters; +OsEnableCpuPerformanceCounters osEnableCpuPerformanceCounters; +OsCpuDpcObjInit osCpuDpcObjInit; +OsCpuDpcObjQueue osCpuDpcObjQueue; +OsCpuDpcObjFree osCpuDpcObjFree; +OsSystemGetBatteryDrain osSystemGetBatteryDrain; +OSGC6PowerControl osGC6PowerControl; +OSReadPFPciConfigInVF osReadPFPciConfigInVF; + +// +// When the new basic lock model is enabled then the following legacy RM +// system semaphore routines are stubbed. +// +#define osAllocRmSema(s) (NV_OK) +#define osFreeRmSema(s) +#define osIsAcquiredRmSema(s) (NV_TRUE) +#define osIsRmSemaOwner(s) (NV_TRUE) +#define osCondReleaseRmSema(s) (NV_TRUE) +#define osAcquireRmSemaForced(s) osAcquireRmSema(s) +#define osGpuLockSetOwner(s,t) (NV_OK) + +// +// This version of osAcquireRmSema asserts that the GPUs lock is held when the +// basic lock model is enabled. This should help catch newly introduced +// dependencies on the legacy RM system semaphore that do not have +// corresponding corresponding basic lock model support. +// +OSAcquireRmSema osAcquireRmSema; +OSAcquireRmSema osAcquireRmSemaForced; + +OSApiLockAcquireConfigureFlags osApiLockAcquireConfigureFlags; +OSGpuLocksQueueRelease osGpuLocksQueueRelease; +OSCondAcquireRmSema osCondAcquireRmSema; +OSReleaseRmSema osReleaseRmSema; + +OSFlushLog osFlushLog; + +#define MODS_ARCH_ERROR_PRINTF(format, ...) +#define MODS_ARCH_INFO_PRINTF(format, ...) +#define MODS_ARCH_REPORT(event, format, ...) + + +#define osAllocPages(a) osAllocPagesInternal(a) +#define osFreePages(a) osFreePagesInternal(a) + +extern NV_STATUS constructObjOS(OBJOS *); +extern void osInitObjOS(OBJOS *); + +extern OSGetTimeoutParams osGetTimeoutParams; +extern OSGetSimulationMode osGetSimulationMode; + +// +// NV OS simulation mode defines +// Keep in sync with gpu.h SIM MODE defines until osGetSimulationMode is deprecated. +// +#ifndef NV_SIM_MODE_DEFS +#define NV_SIM_MODE_DEFS +#define NV_SIM_MODE_HARDWARE 0U +#define NV_SIM_MODE_RTL 1U +#define NV_SIM_MODE_CMODEL 2U +#define NV_SIM_MODE_MODS_AMODEL 3U +#define NV_SIM_MODE_TEGRA_FPGA 4U +#define NV_SIM_MODE_INVALID (~0x0U) +#endif + +// +// NV Heap control defines +// +#define NV_HEAP_CONTROL_INTERNAL 0 +#define NV_HEAP_CONTROL_EXTERNAL 1 + +// osDelayUs flags +#define OSDELAYUS_FLAGS_USE_TMR_DELAY NVBIT(0) + +// osEventNotification notifyIndex all value +#define OS_EVENT_NOTIFICATION_INDEX_ALL (0xffffffff) + +// tells osEventNotification to only issue notifies/events on this subdev +#define OS_EVENT_NOTIFICATION_INDEX_MATCH_SUBDEV (0x10000000) + +// Notify callback action +#define NV_OS_WRITE_THEN_AWAKEN 0x00000001 + +// +// Include per-OS definitions +// +// #ifdef out for nvoctrans, this hides include to system headers which +// breaks the tool. +// +// TODO - we should delete the per-OS os_custom.h files exposed to +// OS-agnostic code. Cross-OS code shouldn't pull in per-OS headers or +// per-OS definitions. +// +#include "os_custom.h" + +#define NV_SEMA_RELEASE_SUCCEED 0 // lock released, no waiting thread to notify +#define NV_SEMA_RELEASE_FAILED 1 // failed to lock release +#define NV_SEMA_RELEASE_NOTIFIED 2 // lock released, notify waiting thread +#define NV_SEMA_RELEASE_DPC_QUEUED 3 // lock released, queue DPC to notify waiting thread +#define NV_SEMA_RELEASE_DPC_FAILED 4 // lock released, but failed to queue a DPC to notify waiting thread + + #define ADD_PROBE(pGpu, probeId) + +#define IS_SIM_MODS(pOS) (pOS->bIsSimMods) + +#endif // _OS_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_OS_NVOC_H_ diff --git a/src/nvidia/generated/g_os_odb.h b/src/nvidia/generated/g_os_odb.h new file mode 100644 index 000000000..1bdfa915d --- /dev/null +++ b/src/nvidia/generated/g_os_odb.h @@ -0,0 +1,36 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Provides OS object boilerplate and RTTI. +// +// Profile: shipping-gpus-openrm +// Haldef: os.def +// Template: templates/gt_eng_odb.h +// + +#ifndef _G_OS_ODB_H_ +#define _G_OS_ODB_H_ + +#include "core/core.h" +#include "g_os_hal.h" + + +#define __OS_OBJECT_BASE_DEFINITION \ + const struct NVOC_RTTI *__nvoc_rtti; \ + Object __nvoc_base_Object; \ + Object *__nvoc_pbase_Object; \ + PDB_PROP_OS pdb + + +extern const struct NVOC_CLASS_DEF __iom_class_def_OBJOS; +#define __nvoc_class_def_OBJOS __iom_class_def_OBJOS + +#ifndef __nvoc_class_id_OBJOS +#define __nvoc_class_id_OBJOS 0xaa1d70 +#endif /* __nvoc_class_id_OBJOS */ + +#define __objCreate_OBJOS(ppNewObj, pParent, flags) \ + __iom_objCreate_OBJOS((ppNewObj), staticCast((pParent), Dynamic), flags) +NV_STATUS __iom_objCreate_OBJOS(POBJOS *ppNewObj, Dynamic *pParent, NvU32 flags); + + +#endif // _G_OS_ODB_H_ diff --git a/src/nvidia/generated/g_os_private.h b/src/nvidia/generated/g_os_private.h new file mode 100644 index 000000000..47a3154b6 --- /dev/null +++ b/src/nvidia/generated/g_os_private.h @@ -0,0 +1,55 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Private HAL support for OS. +// +// Profile: shipping-gpus-openrm +// Haldef: os.def +// Template: templates/gt_eng_private.h +// + +#ifndef _G_OS_PRIVATE_H_ +#define _G_OS_PRIVATE_H_ + +#include "g_os_hal.h" + + + + + + + +// +// OS's object-level *non-static* interface functions (static ones are below) +// + + + +#if defined(RMCFG_ENGINE_SETUP) // for use by hal init only + + + + + + + + + + + +// +// Setup OS's hal interface function pointers +// + + + + + +#endif // RMCFG_ENGINE_SETUP + + + +// Were any _MOCK interfaces generated into g_os_private.h ? +#define OS_MOCK_FUNCTIONS_GENERATED 0 + + +#endif // _G_OS_PRIVATE_H_ diff --git a/src/nvidia/generated/g_p2p_api_nvoc.c b/src/nvidia/generated/g_p2p_api_nvoc.c new file mode 100644 index 000000000..7fea1ea77 --- /dev/null +++ b/src/nvidia/generated/g_p2p_api_nvoc.c @@ -0,0 +1,294 @@ +#define NVOC_P2P_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_p2p_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x3982b7 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_P2PApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_P2PApi(P2PApi*); +void __nvoc_init_funcTable_P2PApi(P2PApi*); +NV_STATUS __nvoc_ctor_P2PApi(P2PApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_P2PApi(P2PApi*); +void __nvoc_dtor_P2PApi(P2PApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_P2PApi; + +static const struct NVOC_RTTI __nvoc_rtti_P2PApi_P2PApi = { + /*pClassDef=*/ &__nvoc_class_def_P2PApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_P2PApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_P2PApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(P2PApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_P2PApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(P2PApi, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_P2PApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(P2PApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_P2PApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(P2PApi, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_P2PApi = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_P2PApi_P2PApi, + &__nvoc_rtti_P2PApi_RmResource, + &__nvoc_rtti_P2PApi_RmResourceCommon, + &__nvoc_rtti_P2PApi_RsResource, + &__nvoc_rtti_P2PApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_P2PApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(P2PApi), + /*classId=*/ classId(P2PApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "P2PApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_P2PApi, + /*pCastInfo=*/ &__nvoc_castinfo_P2PApi, + /*pExportInfo=*/ &__nvoc_export_info_P2PApi +}; + +static NvBool __nvoc_thunk_RmResource_p2papiShareCallback(struct P2PApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_p2papiCheckMemInterUnmap(struct P2PApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_P2PApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_p2papiControl(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_p2papiGetMemInterMapParams(struct P2PApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_P2PApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_p2papiGetMemoryMappingDescriptor(struct P2PApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_P2PApi_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_p2papiGetRefCount(struct P2PApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_p2papiControlFilter(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_p2papiAddAdditionalDependants(struct RsClient *pClient, struct P2PApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_p2papiUnmap(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_p2papiControl_Prologue(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_p2papiCanCopy(struct P2PApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_p2papiMapTo(struct P2PApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_p2papiPreDestruct(struct P2PApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_p2papiUnmapFrom(struct P2PApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_p2papiControl_Epilogue(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_p2papiControlLookup(struct P2PApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_p2papiMap(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_p2papiAccessCallback(struct P2PApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_P2PApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_P2PApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_P2PApi(P2PApi *pThis) { + __nvoc_p2papiDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_P2PApi(P2PApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_P2PApi(P2PApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_P2PApi_fail_RmResource; + __nvoc_init_dataField_P2PApi(pThis); + + status = __nvoc_p2papiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_P2PApi_fail__init; + goto __nvoc_ctor_P2PApi_exit; // Success + +__nvoc_ctor_P2PApi_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_P2PApi_fail_RmResource: +__nvoc_ctor_P2PApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_P2PApi_1(P2PApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__p2papiShareCallback__ = &__nvoc_thunk_RmResource_p2papiShareCallback; + + pThis->__p2papiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_p2papiCheckMemInterUnmap; + + pThis->__p2papiControl__ = &__nvoc_thunk_RsResource_p2papiControl; + + pThis->__p2papiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_p2papiGetMemInterMapParams; + + pThis->__p2papiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_p2papiGetMemoryMappingDescriptor; + + pThis->__p2papiGetRefCount__ = &__nvoc_thunk_RsResource_p2papiGetRefCount; + + pThis->__p2papiControlFilter__ = &__nvoc_thunk_RsResource_p2papiControlFilter; + + pThis->__p2papiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_p2papiAddAdditionalDependants; + + pThis->__p2papiUnmap__ = &__nvoc_thunk_RsResource_p2papiUnmap; + + pThis->__p2papiControl_Prologue__ = &__nvoc_thunk_RmResource_p2papiControl_Prologue; + + pThis->__p2papiCanCopy__ = &__nvoc_thunk_RsResource_p2papiCanCopy; + + pThis->__p2papiMapTo__ = &__nvoc_thunk_RsResource_p2papiMapTo; + + pThis->__p2papiPreDestruct__ = &__nvoc_thunk_RsResource_p2papiPreDestruct; + + pThis->__p2papiUnmapFrom__ = &__nvoc_thunk_RsResource_p2papiUnmapFrom; + + pThis->__p2papiControl_Epilogue__ = &__nvoc_thunk_RmResource_p2papiControl_Epilogue; + + pThis->__p2papiControlLookup__ = &__nvoc_thunk_RsResource_p2papiControlLookup; + + pThis->__p2papiMap__ = &__nvoc_thunk_RsResource_p2papiMap; + + pThis->__p2papiAccessCallback__ = &__nvoc_thunk_RmResource_p2papiAccessCallback; +} + +void __nvoc_init_funcTable_P2PApi(P2PApi *pThis) { + __nvoc_init_funcTable_P2PApi_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_P2PApi(P2PApi *pThis) { + pThis->__nvoc_pbase_P2PApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_P2PApi(pThis); +} + +NV_STATUS __nvoc_objCreate_P2PApi(P2PApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + P2PApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(P2PApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(P2PApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_P2PApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_P2PApi(pThis); + status = __nvoc_ctor_P2PApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_P2PApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_P2PApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_P2PApi(P2PApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_P2PApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_p2p_api_nvoc.h b/src/nvidia/generated/g_p2p_api_nvoc.h new file mode 100644 index 000000000..8c140ef74 --- /dev/null +++ b/src/nvidia/generated/g_p2p_api_nvoc.h @@ -0,0 +1,278 @@ +#ifndef _G_P2P_API_NVOC_H_ +#define _G_P2P_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_p2p_api_nvoc.h" + +#ifndef _P2P_API_H_ +#define _P2P_API_H_ + +#include "core/core.h" +#include "gpu/mem_mgr/heap.h" +#include "rmapi/client.h" +#include "rmapi/mapping_list.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "platform/p2p/p2p_caps.h" + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + + +struct _def_client_p2p_dma_mapping_info +{ + PCLI_DMA_MAPPING_INFO pPeer1Info; + PCLI_DMA_MAPPING_INFO pPeer2Info; +}; + +typedef struct _def_client_p2p_dma_mapping_info CLI_P2P_DMA_MAPPING_INFO, *PCLI_P2P_DMA_MAPPING_INFO; + +MAKE_MAP(CLI_P2P_DMA_MAPPING_INFO_MAP, CLI_P2P_DMA_MAPPING_INFO); + + +// +// Definitions for P2PApi.attributes. +// Need to keep NV_P2PAPI_ATTRIBUTES_CONNECTION_TYPE sync with P2P_CONNECTIVITY in p2p_caps.h +// +#define NV_P2PAPI_ATTRIBUTES_CONNECTION_TYPE 3:0 +#define NV_P2PAPI_ATTRIBUTES_CONNECTION_TYPE_UNKNOWN 0x0 +#define NV_P2PAPI_ATTRIBUTES_CONNECTION_TYPE_PCIE 0x1 +#define NV_P2PAPI_ATTRIBUTES_CONNECTION_TYPE_PCIE_BAR1 0x2 +#define NV_P2PAPI_ATTRIBUTES_CONNECTION_TYPE_NVLINK 0x3 +#define NV_P2PAPI_ATTRIBUTES_CONNECTION_TYPE_NVLINK_INDIRECT 0x4 +#define NV_P2PAPI_ATTRIBUTES_CONNECTION_TYPE_C2C 0x5 +#define NV_P2PAPI_ATTRIBUTES_LINK_TYPE 4:4 +#define NV_P2PAPI_ATTRIBUTES_LINK_TYPE_GPA 0x0 +#define NV_P2PAPI_ATTRIBUTES_LINK_TYPE_SPA 0x1 + +#ifdef NVOC_P2P_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct P2PApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct P2PApi *__nvoc_pbase_P2PApi; + NvBool (*__p2papiShareCallback__)(struct P2PApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__p2papiCheckMemInterUnmap__)(struct P2PApi *, NvBool); + NV_STATUS (*__p2papiControl__)(struct P2PApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__p2papiGetMemInterMapParams__)(struct P2PApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__p2papiGetMemoryMappingDescriptor__)(struct P2PApi *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__p2papiGetRefCount__)(struct P2PApi *); + NV_STATUS (*__p2papiControlFilter__)(struct P2PApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__p2papiAddAdditionalDependants__)(struct RsClient *, struct P2PApi *, RsResourceRef *); + NV_STATUS (*__p2papiUnmap__)(struct P2PApi *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__p2papiControl_Prologue__)(struct P2PApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__p2papiCanCopy__)(struct P2PApi *); + NV_STATUS (*__p2papiMapTo__)(struct P2PApi *, RS_RES_MAP_TO_PARAMS *); + void (*__p2papiPreDestruct__)(struct P2PApi *); + NV_STATUS (*__p2papiUnmapFrom__)(struct P2PApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__p2papiControl_Epilogue__)(struct P2PApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__p2papiControlLookup__)(struct P2PApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__p2papiMap__)(struct P2PApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__p2papiAccessCallback__)(struct P2PApi *, struct RsClient *, void *, RsAccessRight); + NODE Node; + struct Subdevice *peer1; + struct Subdevice *peer2; + NvU32 peerId1; + NvU32 peerId2; + NvU32 attributes; + CLI_P2P_DMA_MAPPING_INFO_MAP dmaMappingMap; +}; + +#ifndef __NVOC_CLASS_P2PApi_TYPEDEF__ +#define __NVOC_CLASS_P2PApi_TYPEDEF__ +typedef struct P2PApi P2PApi; +#endif /* __NVOC_CLASS_P2PApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_P2PApi +#define __nvoc_class_id_P2PApi 0x3982b7 +#endif /* __nvoc_class_id_P2PApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_P2PApi; + +#define __staticCast_P2PApi(pThis) \ + ((pThis)->__nvoc_pbase_P2PApi) + +#ifdef __nvoc_p2p_api_h_disabled +#define __dynamicCast_P2PApi(pThis) ((P2PApi*)NULL) +#else //__nvoc_p2p_api_h_disabled +#define __dynamicCast_P2PApi(pThis) \ + ((P2PApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(P2PApi))) +#endif //__nvoc_p2p_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_P2PApi(P2PApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_P2PApi(P2PApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_P2PApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_P2PApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define p2papiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) p2papiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define p2papiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) p2papiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define p2papiControl(pResource, pCallContext, pParams) p2papiControl_DISPATCH(pResource, pCallContext, pParams) +#define p2papiGetMemInterMapParams(pRmResource, pParams) p2papiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define p2papiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) p2papiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define p2papiGetRefCount(pResource) p2papiGetRefCount_DISPATCH(pResource) +#define p2papiControlFilter(pResource, pCallContext, pParams) p2papiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define p2papiAddAdditionalDependants(pClient, pResource, pReference) p2papiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define p2papiUnmap(pResource, pCallContext, pCpuMapping) p2papiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define p2papiControl_Prologue(pResource, pCallContext, pParams) p2papiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define p2papiCanCopy(pResource) p2papiCanCopy_DISPATCH(pResource) +#define p2papiMapTo(pResource, pParams) p2papiMapTo_DISPATCH(pResource, pParams) +#define p2papiPreDestruct(pResource) p2papiPreDestruct_DISPATCH(pResource) +#define p2papiUnmapFrom(pResource, pParams) p2papiUnmapFrom_DISPATCH(pResource, pParams) +#define p2papiControl_Epilogue(pResource, pCallContext, pParams) p2papiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define p2papiControlLookup(pResource, pParams, ppEntry) p2papiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define p2papiMap(pResource, pCallContext, pParams, pCpuMapping) p2papiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define p2papiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) p2papiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool p2papiShareCallback_DISPATCH(struct P2PApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__p2papiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS p2papiCheckMemInterUnmap_DISPATCH(struct P2PApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__p2papiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS p2papiControl_DISPATCH(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__p2papiControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS p2papiGetMemInterMapParams_DISPATCH(struct P2PApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__p2papiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS p2papiGetMemoryMappingDescriptor_DISPATCH(struct P2PApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__p2papiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 p2papiGetRefCount_DISPATCH(struct P2PApi *pResource) { + return pResource->__p2papiGetRefCount__(pResource); +} + +static inline NV_STATUS p2papiControlFilter_DISPATCH(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__p2papiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void p2papiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct P2PApi *pResource, RsResourceRef *pReference) { + pResource->__p2papiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS p2papiUnmap_DISPATCH(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__p2papiUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS p2papiControl_Prologue_DISPATCH(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__p2papiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool p2papiCanCopy_DISPATCH(struct P2PApi *pResource) { + return pResource->__p2papiCanCopy__(pResource); +} + +static inline NV_STATUS p2papiMapTo_DISPATCH(struct P2PApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__p2papiMapTo__(pResource, pParams); +} + +static inline void p2papiPreDestruct_DISPATCH(struct P2PApi *pResource) { + pResource->__p2papiPreDestruct__(pResource); +} + +static inline NV_STATUS p2papiUnmapFrom_DISPATCH(struct P2PApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__p2papiUnmapFrom__(pResource, pParams); +} + +static inline void p2papiControl_Epilogue_DISPATCH(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__p2papiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS p2papiControlLookup_DISPATCH(struct P2PApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__p2papiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS p2papiMap_DISPATCH(struct P2PApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__p2papiMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool p2papiAccessCallback_DISPATCH(struct P2PApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__p2papiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS p2papiConstruct_IMPL(struct P2PApi *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_p2papiConstruct(arg_pResource, arg_pCallContext, arg_pParams) p2papiConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void p2papiDestruct_IMPL(struct P2PApi *pResource); +#define __nvoc_p2papiDestruct(pResource) p2papiDestruct_IMPL(pResource) +#undef PRIVATE_FIELD + + +typedef struct P2PApi *PCLI_P2P_INFO; // RS-TODO: Delete +MAKE_LIST(PCLI_P2P_INFO_LIST, PCLI_P2P_INFO); + +// Add a dma mapping info to the p2p object that maps the peer GPUs +NV_STATUS CliAddP2PDmaMappingInfo (NvHandle, NvHandle, NvU32, NvHandle, NvU32, PCLI_DMA_MAPPING_INFO); + +// Free the CliP2PList of a given client +NV_STATUS CliFreeP2PList (NvHandle); + +// Free the p2p infos of a subdevice +NV_STATUS CliFreeSubDeviceP2PList (struct Subdevice *, CALL_CONTEXT *); + +// Unmap the dma mappings associated with a p2p object and free the p2p pDmaMappingList +NV_STATUS CliFreeP2PDmaMappingList (NvHandle, PCLI_P2P_INFO); + +// Delete the dma mapping info from the p2p object when the memory is no longer peer mapped +NV_STATUS CliDelP2PDmaMappingInfo (NvHandle, PCLI_DMA_MAPPING_INFO); + +// Delete the dma mapping info from the p2p object when the memory is no longer peer mapped +NV_STATUS CliUpdateP2PDmaMappingInList (NvHandle, PCLI_DMA_MAPPING_INFO, NvU64); + +// Remove the P2P mapping corresponding to the P2P info passed in +NV_STATUS CliInvalidateP2PInfo (NvHandle, PCLI_P2P_INFO); + +#endif // _P2P_API_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_P2P_API_NVOC_H_ diff --git a/src/nvidia/generated/g_phys_mem_nvoc.c b/src/nvidia/generated/g_phys_mem_nvoc.c new file mode 100644 index 000000000..90bc9c364 --- /dev/null +++ b/src/nvidia/generated/g_phys_mem_nvoc.c @@ -0,0 +1,323 @@ +#define NVOC_PHYS_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_phys_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x5fccf2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_PhysicalMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_PhysicalMemory(PhysicalMemory*); +void __nvoc_init_funcTable_PhysicalMemory(PhysicalMemory*); +NV_STATUS __nvoc_ctor_PhysicalMemory(PhysicalMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_PhysicalMemory(PhysicalMemory*); +void __nvoc_dtor_PhysicalMemory(PhysicalMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_PhysicalMemory; + +static const struct NVOC_RTTI __nvoc_rtti_PhysicalMemory_PhysicalMemory = { + /*pClassDef=*/ &__nvoc_class_def_PhysicalMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_PhysicalMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_PhysicalMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PhysicalMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_PhysicalMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PhysicalMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_PhysicalMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PhysicalMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_PhysicalMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PhysicalMemory, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_PhysicalMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PhysicalMemory, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_PhysicalMemory = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_PhysicalMemory_PhysicalMemory, + &__nvoc_rtti_PhysicalMemory_Memory, + &__nvoc_rtti_PhysicalMemory_RmResource, + &__nvoc_rtti_PhysicalMemory_RmResourceCommon, + &__nvoc_rtti_PhysicalMemory_RsResource, + &__nvoc_rtti_PhysicalMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_PhysicalMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(PhysicalMemory), + /*classId=*/ classId(PhysicalMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "PhysicalMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_PhysicalMemory, + /*pCastInfo=*/ &__nvoc_castinfo_PhysicalMemory, + /*pExportInfo=*/ &__nvoc_export_info_PhysicalMemory +}; + +static NvBool __nvoc_thunk_PhysicalMemory_resCanCopy(struct RsResource *pPhysicalMemory) { + return physmemCanCopy((struct PhysicalMemory *)(((unsigned char *)pPhysicalMemory) - __nvoc_rtti_PhysicalMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_physmemCheckMemInterUnmap(struct PhysicalMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_PhysicalMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_physmemControl(struct PhysicalMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_PhysicalMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_physmemUnmap(struct PhysicalMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_PhysicalMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_physmemGetMemInterMapParams(struct PhysicalMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_PhysicalMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_physmemGetMemoryMappingDescriptor(struct PhysicalMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_PhysicalMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_physmemGetMapAddrSpace(struct PhysicalMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_PhysicalMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_physmemShareCallback(struct PhysicalMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_physmemControlFilter(struct PhysicalMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_physmemAddAdditionalDependants(struct RsClient *pClient, struct PhysicalMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_physmemGetRefCount(struct PhysicalMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_physmemMapTo(struct PhysicalMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_physmemControl_Prologue(struct PhysicalMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_physmemIsReady(struct PhysicalMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_PhysicalMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_physmemCheckCopyPermissions(struct PhysicalMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_PhysicalMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_physmemPreDestruct(struct PhysicalMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_physmemUnmapFrom(struct PhysicalMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_physmemControl_Epilogue(struct PhysicalMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_physmemControlLookup(struct PhysicalMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_physmemMap(struct PhysicalMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_PhysicalMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_physmemAccessCallback(struct PhysicalMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_PhysicalMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_PhysicalMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_PhysicalMemory(PhysicalMemory *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_PhysicalMemory(PhysicalMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_PhysicalMemory(PhysicalMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_PhysicalMemory_fail_Memory; + __nvoc_init_dataField_PhysicalMemory(pThis); + + status = __nvoc_physmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_PhysicalMemory_fail__init; + goto __nvoc_ctor_PhysicalMemory_exit; // Success + +__nvoc_ctor_PhysicalMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_PhysicalMemory_fail_Memory: +__nvoc_ctor_PhysicalMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_PhysicalMemory_1(PhysicalMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__physmemCanCopy__ = &physmemCanCopy_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_PhysicalMemory_resCanCopy; + + pThis->__physmemCheckMemInterUnmap__ = &__nvoc_thunk_Memory_physmemCheckMemInterUnmap; + + pThis->__physmemControl__ = &__nvoc_thunk_Memory_physmemControl; + + pThis->__physmemUnmap__ = &__nvoc_thunk_Memory_physmemUnmap; + + pThis->__physmemGetMemInterMapParams__ = &__nvoc_thunk_Memory_physmemGetMemInterMapParams; + + pThis->__physmemGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_physmemGetMemoryMappingDescriptor; + + pThis->__physmemGetMapAddrSpace__ = &__nvoc_thunk_Memory_physmemGetMapAddrSpace; + + pThis->__physmemShareCallback__ = &__nvoc_thunk_RmResource_physmemShareCallback; + + pThis->__physmemControlFilter__ = &__nvoc_thunk_RsResource_physmemControlFilter; + + pThis->__physmemAddAdditionalDependants__ = &__nvoc_thunk_RsResource_physmemAddAdditionalDependants; + + pThis->__physmemGetRefCount__ = &__nvoc_thunk_RsResource_physmemGetRefCount; + + pThis->__physmemMapTo__ = &__nvoc_thunk_RsResource_physmemMapTo; + + pThis->__physmemControl_Prologue__ = &__nvoc_thunk_RmResource_physmemControl_Prologue; + + pThis->__physmemIsReady__ = &__nvoc_thunk_Memory_physmemIsReady; + + pThis->__physmemCheckCopyPermissions__ = &__nvoc_thunk_Memory_physmemCheckCopyPermissions; + + pThis->__physmemPreDestruct__ = &__nvoc_thunk_RsResource_physmemPreDestruct; + + pThis->__physmemUnmapFrom__ = &__nvoc_thunk_RsResource_physmemUnmapFrom; + + pThis->__physmemControl_Epilogue__ = &__nvoc_thunk_RmResource_physmemControl_Epilogue; + + pThis->__physmemControlLookup__ = &__nvoc_thunk_RsResource_physmemControlLookup; + + pThis->__physmemMap__ = &__nvoc_thunk_Memory_physmemMap; + + pThis->__physmemAccessCallback__ = &__nvoc_thunk_RmResource_physmemAccessCallback; +} + +void __nvoc_init_funcTable_PhysicalMemory(PhysicalMemory *pThis) { + __nvoc_init_funcTable_PhysicalMemory_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_PhysicalMemory(PhysicalMemory *pThis) { + pThis->__nvoc_pbase_PhysicalMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_PhysicalMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_PhysicalMemory(PhysicalMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + PhysicalMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(PhysicalMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(PhysicalMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_PhysicalMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_PhysicalMemory(pThis); + status = __nvoc_ctor_PhysicalMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_PhysicalMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_PhysicalMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_PhysicalMemory(PhysicalMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_PhysicalMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_phys_mem_nvoc.h b/src/nvidia/generated/g_phys_mem_nvoc.h new file mode 100644 index 000000000..9cc28a363 --- /dev/null +++ b/src/nvidia/generated/g_phys_mem_nvoc.h @@ -0,0 +1,226 @@ +#ifndef _G_PHYS_MEM_NVOC_H_ +#define _G_PHYS_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_phys_mem_nvoc.h" + +#ifndef _PHYSICAL_MEMORY_H_ +#define _PHYSICAL_MEMORY_H_ + +#include "mem_mgr/mem.h" + +/*! + * Allocator for NV01_MEMORY_LOCAL_PHYSICAL + * + * Linear view for all video memory (similar to /dev/mem). + */ +#ifdef NVOC_PHYS_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct PhysicalMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct PhysicalMemory *__nvoc_pbase_PhysicalMemory; + NvBool (*__physmemCanCopy__)(struct PhysicalMemory *); + NV_STATUS (*__physmemCheckMemInterUnmap__)(struct PhysicalMemory *, NvBool); + NV_STATUS (*__physmemControl__)(struct PhysicalMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__physmemUnmap__)(struct PhysicalMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__physmemGetMemInterMapParams__)(struct PhysicalMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__physmemGetMemoryMappingDescriptor__)(struct PhysicalMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__physmemGetMapAddrSpace__)(struct PhysicalMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__physmemShareCallback__)(struct PhysicalMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__physmemControlFilter__)(struct PhysicalMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__physmemAddAdditionalDependants__)(struct RsClient *, struct PhysicalMemory *, RsResourceRef *); + NvU32 (*__physmemGetRefCount__)(struct PhysicalMemory *); + NV_STATUS (*__physmemMapTo__)(struct PhysicalMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__physmemControl_Prologue__)(struct PhysicalMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__physmemIsReady__)(struct PhysicalMemory *); + NV_STATUS (*__physmemCheckCopyPermissions__)(struct PhysicalMemory *, struct OBJGPU *, NvHandle); + void (*__physmemPreDestruct__)(struct PhysicalMemory *); + NV_STATUS (*__physmemUnmapFrom__)(struct PhysicalMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__physmemControl_Epilogue__)(struct PhysicalMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__physmemControlLookup__)(struct PhysicalMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__physmemMap__)(struct PhysicalMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__physmemAccessCallback__)(struct PhysicalMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_PhysicalMemory_TYPEDEF__ +#define __NVOC_CLASS_PhysicalMemory_TYPEDEF__ +typedef struct PhysicalMemory PhysicalMemory; +#endif /* __NVOC_CLASS_PhysicalMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PhysicalMemory +#define __nvoc_class_id_PhysicalMemory 0x5fccf2 +#endif /* __nvoc_class_id_PhysicalMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_PhysicalMemory; + +#define __staticCast_PhysicalMemory(pThis) \ + ((pThis)->__nvoc_pbase_PhysicalMemory) + +#ifdef __nvoc_phys_mem_h_disabled +#define __dynamicCast_PhysicalMemory(pThis) ((PhysicalMemory*)NULL) +#else //__nvoc_phys_mem_h_disabled +#define __dynamicCast_PhysicalMemory(pThis) \ + ((PhysicalMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(PhysicalMemory))) +#endif //__nvoc_phys_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_PhysicalMemory(PhysicalMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_PhysicalMemory(PhysicalMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_PhysicalMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_PhysicalMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define physmemCanCopy(pPhysicalMemory) physmemCanCopy_DISPATCH(pPhysicalMemory) +#define physmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) physmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define physmemControl(pMemory, pCallContext, pParams) physmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define physmemUnmap(pMemory, pCallContext, pCpuMapping) physmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define physmemGetMemInterMapParams(pMemory, pParams) physmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define physmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) physmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define physmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) physmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define physmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) physmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define physmemControlFilter(pResource, pCallContext, pParams) physmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define physmemAddAdditionalDependants(pClient, pResource, pReference) physmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define physmemGetRefCount(pResource) physmemGetRefCount_DISPATCH(pResource) +#define physmemMapTo(pResource, pParams) physmemMapTo_DISPATCH(pResource, pParams) +#define physmemControl_Prologue(pResource, pCallContext, pParams) physmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define physmemIsReady(pMemory) physmemIsReady_DISPATCH(pMemory) +#define physmemCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) physmemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define physmemPreDestruct(pResource) physmemPreDestruct_DISPATCH(pResource) +#define physmemUnmapFrom(pResource, pParams) physmemUnmapFrom_DISPATCH(pResource, pParams) +#define physmemControl_Epilogue(pResource, pCallContext, pParams) physmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define physmemControlLookup(pResource, pParams, ppEntry) physmemControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define physmemMap(pMemory, pCallContext, pParams, pCpuMapping) physmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define physmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) physmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool physmemCanCopy_IMPL(struct PhysicalMemory *pPhysicalMemory); + +static inline NvBool physmemCanCopy_DISPATCH(struct PhysicalMemory *pPhysicalMemory) { + return pPhysicalMemory->__physmemCanCopy__(pPhysicalMemory); +} + +static inline NV_STATUS physmemCheckMemInterUnmap_DISPATCH(struct PhysicalMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__physmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS physmemControl_DISPATCH(struct PhysicalMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__physmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS physmemUnmap_DISPATCH(struct PhysicalMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__physmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS physmemGetMemInterMapParams_DISPATCH(struct PhysicalMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__physmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS physmemGetMemoryMappingDescriptor_DISPATCH(struct PhysicalMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__physmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS physmemGetMapAddrSpace_DISPATCH(struct PhysicalMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__physmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool physmemShareCallback_DISPATCH(struct PhysicalMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__physmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS physmemControlFilter_DISPATCH(struct PhysicalMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__physmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline void physmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct PhysicalMemory *pResource, RsResourceRef *pReference) { + pResource->__physmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 physmemGetRefCount_DISPATCH(struct PhysicalMemory *pResource) { + return pResource->__physmemGetRefCount__(pResource); +} + +static inline NV_STATUS physmemMapTo_DISPATCH(struct PhysicalMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__physmemMapTo__(pResource, pParams); +} + +static inline NV_STATUS physmemControl_Prologue_DISPATCH(struct PhysicalMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__physmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS physmemIsReady_DISPATCH(struct PhysicalMemory *pMemory) { + return pMemory->__physmemIsReady__(pMemory); +} + +static inline NV_STATUS physmemCheckCopyPermissions_DISPATCH(struct PhysicalMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__physmemCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void physmemPreDestruct_DISPATCH(struct PhysicalMemory *pResource) { + pResource->__physmemPreDestruct__(pResource); +} + +static inline NV_STATUS physmemUnmapFrom_DISPATCH(struct PhysicalMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__physmemUnmapFrom__(pResource, pParams); +} + +static inline void physmemControl_Epilogue_DISPATCH(struct PhysicalMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__physmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS physmemControlLookup_DISPATCH(struct PhysicalMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__physmemControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS physmemMap_DISPATCH(struct PhysicalMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__physmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool physmemAccessCallback_DISPATCH(struct PhysicalMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__physmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS physmemConstruct_IMPL(struct PhysicalMemory *arg_pPhysicalMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_physmemConstruct(arg_pPhysicalMemory, arg_pCallContext, arg_pParams) physmemConstruct_IMPL(arg_pPhysicalMemory, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_PHYS_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_platform_nvoc.c b/src/nvidia/generated/g_platform_nvoc.c new file mode 100644 index 000000000..92ce3539c --- /dev/null +++ b/src/nvidia/generated/g_platform_nvoc.c @@ -0,0 +1,155 @@ +#define NVOC_PLATFORM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_platform_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb543ae = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJPFM; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJPFM(OBJPFM*); +void __nvoc_init_funcTable_OBJPFM(OBJPFM*); +NV_STATUS __nvoc_ctor_OBJPFM(OBJPFM*); +void __nvoc_init_dataField_OBJPFM(OBJPFM*); +void __nvoc_dtor_OBJPFM(OBJPFM*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJPFM; + +static const struct NVOC_RTTI __nvoc_rtti_OBJPFM_OBJPFM = { + /*pClassDef=*/ &__nvoc_class_def_OBJPFM, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJPFM, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJPFM_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJPFM, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJPFM = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJPFM_OBJPFM, + &__nvoc_rtti_OBJPFM_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJPFM = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJPFM), + /*classId=*/ classId(OBJPFM), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJPFM", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJPFM, + /*pCastInfo=*/ &__nvoc_castinfo_OBJPFM, + /*pExportInfo=*/ &__nvoc_export_info_OBJPFM +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJPFM = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJPFM(OBJPFM *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJPFM(OBJPFM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + pThis->setProperty(pThis, PDB_PROP_PFM_SUPPORTS_ACPI, (0)); + pThis->setProperty(pThis, PDB_PROP_PFM_POSSIBLE_HIGHRES_BOOT, (0)); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJPFM(OBJPFM *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJPFM_fail_Object; + __nvoc_init_dataField_OBJPFM(pThis); + + status = __nvoc_pfmConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJPFM_fail__init; + goto __nvoc_ctor_OBJPFM_exit; // Success + +__nvoc_ctor_OBJPFM_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJPFM_fail_Object: +__nvoc_ctor_OBJPFM_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJPFM_1(OBJPFM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJPFM(OBJPFM *pThis) { + __nvoc_init_funcTable_OBJPFM_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJPFM(OBJPFM *pThis) { + pThis->__nvoc_pbase_OBJPFM = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJPFM(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJPFM(OBJPFM **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJPFM *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJPFM)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJPFM)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJPFM); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJPFM(pThis); + status = __nvoc_ctor_OBJPFM(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJPFM_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJPFM_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJPFM(OBJPFM **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJPFM(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_platform_nvoc.h b/src/nvidia/generated/g_platform_nvoc.h new file mode 100644 index 000000000..ee9d4a9ac --- /dev/null +++ b/src/nvidia/generated/g_platform_nvoc.h @@ -0,0 +1,229 @@ +#ifndef _G_PLATFORM_NVOC_H_ +#define _G_PLATFORM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_platform_nvoc.h" + +#ifndef _PLATFORM_H_ +#define _PLATFORM_H_ + +typedef struct OBJPFM *POBJPFM; + +#ifndef __NVOC_CLASS_OBJPFM_TYPEDEF__ +#define __NVOC_CLASS_OBJPFM_TYPEDEF__ +typedef struct OBJPFM OBJPFM; +#endif /* __NVOC_CLASS_OBJPFM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPFM +#define __nvoc_class_id_OBJPFM 0xb543ae +#endif /* __nvoc_class_id_OBJPFM */ + + + +#include "nvoc/runtime.h" +#include "nvlimits.h" // NV_MAX_DEVICES +#include "ctrl/ctrl0073/ctrl0073specific.h" // NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES + +#include "core/core.h" + +#include "nvCpuUuid.h" + +/*! + * Data structure representing single BlobData entry. + */ +typedef struct +{ + NvU16 identifier; + NvU32 argument; +} PFM_BLOB_DATA_ENTRY, *PPFM_BLOB_DATA_ENTRY; + +typedef struct +{ + NvU32 entryCount; + PFM_BLOB_DATA_ENTRY *pEntry; +} PFM_BLOB_DATA; + +typedef struct +{ + NvU32 acpiId; + NvU32 displayId; + NvU32 dodIndex; +} ACPI_ID_MAPPING; + +/**************** Resource Manager Defines and Structures ******************\ +* Defines and structures used for the Platform object. * +\***************************************************************************/ + +#ifdef NVOC_PLATFORM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJPFM { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJPFM *__nvoc_pbase_OBJPFM; + NvBool PDB_PROP_PFM_SUPPORTS_ACPI; + NvBool PDB_PROP_PFM_IS_MOBILE; + NvBool PDB_PROP_PFM_IS_TOSHIBA_MOBILE; + NvBool PDB_PROP_PFM_NO_HOSTBRIDGE_DETECT; + NvBool PDB_PROP_PFM_ENABLE_PERF_WITHOUT_MXM; + NvBool PDB_PROP_PFM_SKIP_DP_IRQ_HANDLE; + NvBool PDB_PROP_PFM_MODS_USE_TWO_STAGE_RC_RECOVER; + NvBool PDB_PROP_PFM_POSSIBLE_HIGHRES_BOOT; + NvBool PDB_PROP_PFM_APPLE_EDP_SUPPORTED; + NvBool PDB_PROP_PFM_BLOB_DATA_INIT_ATTEMPTED; + NvBool PDB_PROP_PFM_BLOB_DATA_INIT_SUCCEEDED; + PFM_BLOB_DATA blobData; + ACPI_ID_MAPPING acpiIdMapping[32][16]; +}; + +#ifndef __NVOC_CLASS_OBJPFM_TYPEDEF__ +#define __NVOC_CLASS_OBJPFM_TYPEDEF__ +typedef struct OBJPFM OBJPFM; +#endif /* __NVOC_CLASS_OBJPFM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPFM +#define __nvoc_class_id_OBJPFM 0xb543ae +#endif /* __nvoc_class_id_OBJPFM */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJPFM; + +#define __staticCast_OBJPFM(pThis) \ + ((pThis)->__nvoc_pbase_OBJPFM) + +#ifdef __nvoc_platform_h_disabled +#define __dynamicCast_OBJPFM(pThis) ((OBJPFM*)NULL) +#else //__nvoc_platform_h_disabled +#define __dynamicCast_OBJPFM(pThis) \ + ((OBJPFM*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJPFM))) +#endif //__nvoc_platform_h_disabled + +#define PDB_PROP_PFM_SKIP_DP_IRQ_HANDLE_BASE_CAST +#define PDB_PROP_PFM_SKIP_DP_IRQ_HANDLE_BASE_NAME PDB_PROP_PFM_SKIP_DP_IRQ_HANDLE +#define PDB_PROP_PFM_APPLE_EDP_SUPPORTED_BASE_CAST +#define PDB_PROP_PFM_APPLE_EDP_SUPPORTED_BASE_NAME PDB_PROP_PFM_APPLE_EDP_SUPPORTED +#define PDB_PROP_PFM_IS_MOBILE_BASE_CAST +#define PDB_PROP_PFM_IS_MOBILE_BASE_NAME PDB_PROP_PFM_IS_MOBILE +#define PDB_PROP_PFM_MODS_USE_TWO_STAGE_RC_RECOVER_BASE_CAST +#define PDB_PROP_PFM_MODS_USE_TWO_STAGE_RC_RECOVER_BASE_NAME PDB_PROP_PFM_MODS_USE_TWO_STAGE_RC_RECOVER +#define PDB_PROP_PFM_BLOB_DATA_INIT_ATTEMPTED_BASE_CAST +#define PDB_PROP_PFM_BLOB_DATA_INIT_ATTEMPTED_BASE_NAME PDB_PROP_PFM_BLOB_DATA_INIT_ATTEMPTED +#define PDB_PROP_PFM_ENABLE_PERF_WITHOUT_MXM_BASE_CAST +#define PDB_PROP_PFM_ENABLE_PERF_WITHOUT_MXM_BASE_NAME PDB_PROP_PFM_ENABLE_PERF_WITHOUT_MXM +#define PDB_PROP_PFM_BLOB_DATA_INIT_SUCCEEDED_BASE_CAST +#define PDB_PROP_PFM_BLOB_DATA_INIT_SUCCEEDED_BASE_NAME PDB_PROP_PFM_BLOB_DATA_INIT_SUCCEEDED +#define PDB_PROP_PFM_SUPPORTS_ACPI_BASE_CAST +#define PDB_PROP_PFM_SUPPORTS_ACPI_BASE_NAME PDB_PROP_PFM_SUPPORTS_ACPI +#define PDB_PROP_PFM_IS_TOSHIBA_MOBILE_BASE_CAST +#define PDB_PROP_PFM_IS_TOSHIBA_MOBILE_BASE_NAME PDB_PROP_PFM_IS_TOSHIBA_MOBILE +#define PDB_PROP_PFM_POSSIBLE_HIGHRES_BOOT_BASE_CAST +#define PDB_PROP_PFM_POSSIBLE_HIGHRES_BOOT_BASE_NAME PDB_PROP_PFM_POSSIBLE_HIGHRES_BOOT +#define PDB_PROP_PFM_NO_HOSTBRIDGE_DETECT_BASE_CAST +#define PDB_PROP_PFM_NO_HOSTBRIDGE_DETECT_BASE_NAME PDB_PROP_PFM_NO_HOSTBRIDGE_DETECT + +NV_STATUS __nvoc_objCreateDynamic_OBJPFM(OBJPFM**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJPFM(OBJPFM**, Dynamic*, NvU32); +#define __objCreate_OBJPFM(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJPFM((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS pfmConstruct_IMPL(struct OBJPFM *arg_pPfm); +#define __nvoc_pfmConstruct(arg_pPfm) pfmConstruct_IMPL(arg_pPfm) +void pfmBlobDataDestroy_IMPL(struct OBJPFM *pPfm); +#define pfmBlobDataDestroy(pPfm) pfmBlobDataDestroy_IMPL(pPfm) +void pfmUpdateAcpiIdMapping_IMPL(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2, NvU32 arg3, NvU32 arg4, NvU32 arg5); +#ifdef __nvoc_platform_h_disabled +static inline void pfmUpdateAcpiIdMapping(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2, NvU32 arg3, NvU32 arg4, NvU32 arg5) { + NV_ASSERT_FAILED_PRECOMP("OBJPFM was disabled!"); +} +#else //__nvoc_platform_h_disabled +#define pfmUpdateAcpiIdMapping(arg0, arg1, arg2, arg3, arg4, arg5) pfmUpdateAcpiIdMapping_IMPL(arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_platform_h_disabled + +NvU32 pfmFindAcpiId_IMPL(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2); +#ifdef __nvoc_platform_h_disabled +static inline NvU32 pfmFindAcpiId(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJPFM was disabled!"); + return 0; +} +#else //__nvoc_platform_h_disabled +#define pfmFindAcpiId(arg0, arg1, arg2) pfmFindAcpiId_IMPL(arg0, arg1, arg2) +#endif //__nvoc_platform_h_disabled + +NvU32 pfmFindDodIndex_IMPL(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2); +#ifdef __nvoc_platform_h_disabled +static inline NvU32 pfmFindDodIndex(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJPFM was disabled!"); + return 0; +} +#else //__nvoc_platform_h_disabled +#define pfmFindDodIndex(arg0, arg1, arg2) pfmFindDodIndex_IMPL(arg0, arg1, arg2) +#endif //__nvoc_platform_h_disabled + +NvU32 pfmFindDevMaskFromDodIndex_IMPL(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2); +#ifdef __nvoc_platform_h_disabled +static inline NvU32 pfmFindDevMaskFromDodIndex(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJPFM was disabled!"); + return 0; +} +#else //__nvoc_platform_h_disabled +#define pfmFindDevMaskFromDodIndex(arg0, arg1, arg2) pfmFindDevMaskFromDodIndex_IMPL(arg0, arg1, arg2) +#endif //__nvoc_platform_h_disabled + +NvU32 pfmFindDevMaskFromAcpiId_IMPL(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2); +#ifdef __nvoc_platform_h_disabled +static inline NvU32 pfmFindDevMaskFromAcpiId(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJPFM was disabled!"); + return 0; +} +#else //__nvoc_platform_h_disabled +#define pfmFindDevMaskFromAcpiId(arg0, arg1, arg2) pfmFindDevMaskFromAcpiId_IMPL(arg0, arg1, arg2) +#endif //__nvoc_platform_h_disabled + +void pfmUpdateDeviceAcpiId_IMPL(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2, NvU32 arg3); +#ifdef __nvoc_platform_h_disabled +static inline void pfmUpdateDeviceAcpiId(struct OBJPFM *arg0, OBJGPU *arg1, NvU32 arg2, NvU32 arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJPFM was disabled!"); +} +#else //__nvoc_platform_h_disabled +#define pfmUpdateDeviceAcpiId(arg0, arg1, arg2, arg3) pfmUpdateDeviceAcpiId_IMPL(arg0, arg1, arg2, arg3) +#endif //__nvoc_platform_h_disabled + +#undef PRIVATE_FIELD + + +#endif // _PLATFORM_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_PLATFORM_NVOC_H_ diff --git a/src/nvidia/generated/g_prereq_tracker_nvoc.c b/src/nvidia/generated/g_prereq_tracker_nvoc.c new file mode 100644 index 000000000..ee126ac7f --- /dev/null +++ b/src/nvidia/generated/g_prereq_tracker_nvoc.c @@ -0,0 +1,155 @@ +#define NVOC_PREREQ_TRACKER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_prereq_tracker_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x0e171b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_PrereqTracker; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_PrereqTracker(PrereqTracker*); +void __nvoc_init_funcTable_PrereqTracker(PrereqTracker*); +NV_STATUS __nvoc_ctor_PrereqTracker(PrereqTracker*, struct OBJGPU * arg_pParent); +void __nvoc_init_dataField_PrereqTracker(PrereqTracker*); +void __nvoc_dtor_PrereqTracker(PrereqTracker*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_PrereqTracker; + +static const struct NVOC_RTTI __nvoc_rtti_PrereqTracker_PrereqTracker = { + /*pClassDef=*/ &__nvoc_class_def_PrereqTracker, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_PrereqTracker, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_PrereqTracker_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(PrereqTracker, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_PrereqTracker = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_PrereqTracker_PrereqTracker, + &__nvoc_rtti_PrereqTracker_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_PrereqTracker = +{ + /*classInfo=*/ { + /*size=*/ sizeof(PrereqTracker), + /*classId=*/ classId(PrereqTracker), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "PrereqTracker", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_PrereqTracker, + /*pCastInfo=*/ &__nvoc_castinfo_PrereqTracker, + /*pExportInfo=*/ &__nvoc_export_info_PrereqTracker +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_PrereqTracker = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_PrereqTracker(PrereqTracker *pThis) { + __nvoc_prereqDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_PrereqTracker(PrereqTracker *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_PrereqTracker(PrereqTracker *pThis, struct OBJGPU * arg_pParent) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_PrereqTracker_fail_Object; + __nvoc_init_dataField_PrereqTracker(pThis); + + status = __nvoc_prereqConstruct(pThis, arg_pParent); + if (status != NV_OK) goto __nvoc_ctor_PrereqTracker_fail__init; + goto __nvoc_ctor_PrereqTracker_exit; // Success + +__nvoc_ctor_PrereqTracker_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_PrereqTracker_fail_Object: +__nvoc_ctor_PrereqTracker_exit: + + return status; +} + +static void __nvoc_init_funcTable_PrereqTracker_1(PrereqTracker *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_PrereqTracker(PrereqTracker *pThis) { + __nvoc_init_funcTable_PrereqTracker_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_PrereqTracker(PrereqTracker *pThis) { + pThis->__nvoc_pbase_PrereqTracker = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_PrereqTracker(pThis); +} + +NV_STATUS __nvoc_objCreate_PrereqTracker(PrereqTracker **ppThis, Dynamic *pParent, NvU32 createFlags, struct OBJGPU * arg_pParent) { + NV_STATUS status; + Object *pParentObj; + PrereqTracker *pThis; + + pThis = portMemAllocNonPaged(sizeof(PrereqTracker)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(PrereqTracker)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_PrereqTracker); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_PrereqTracker(pThis); + status = __nvoc_ctor_PrereqTracker(pThis, arg_pParent); + if (status != NV_OK) goto __nvoc_objCreate_PrereqTracker_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_PrereqTracker_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_PrereqTracker(PrereqTracker **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct OBJGPU * arg_pParent = va_arg(args, struct OBJGPU *); + + status = __nvoc_objCreate_PrereqTracker(ppThis, pParent, createFlags, arg_pParent); + + return status; +} + diff --git a/src/nvidia/generated/g_prereq_tracker_nvoc.h b/src/nvidia/generated/g_prereq_tracker_nvoc.h new file mode 100644 index 000000000..b29327e2a --- /dev/null +++ b/src/nvidia/generated/g_prereq_tracker_nvoc.h @@ -0,0 +1,254 @@ +#ifndef _G_PREREQ_TRACKER_NVOC_H_ +#define _G_PREREQ_TRACKER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file prereq_tracker.h + * @brief Holds interfaces and data structures required by the prerequisite + * tracking feature/code. + * + * Code depending on multiple other features should use prereqComposeEntry() to create + * a prerequisite tracking structure with a provided bitVector of all necessary + * dependencies, which will arm the prereq to start watching those dependencies. + * Once those dependencies are fulfilled they should issue prereqSatisfy() (one-by-one) + * This common code should broadcast those to all prerequisite tracking structures + * and once all respective dependencies are satisfied, will issue the + * registered callback. + * Similarly, dependencies should issue prereqRetract() before they change + * their state and common code will broadcast that to all tracking structures + * and issue callbacks again with bSatisfied=false, if all dependencies + * for that prereq were previously satisfied. + * + * @note Feature is designed to prevent creating new prerequisites once + * dependencies start issuing Satisfy()/Retract() notifications. + * Therefore, ComposeEntry all prerequisites during + * stateInit() and allow code to issue Satisfy()/Retract() only in + * stateLoad() or later. + */ + +#include "g_prereq_tracker_nvoc.h" + +#ifndef __PREREQUISITE_TRACKER_H__ +#define __PREREQUISITE_TRACKER_H__ + +/* ------------------------ Includes ---------------------------------------- */ +#include "containers/list.h" +#include "utils/nvbitvector.h" + +#include "nvoc/object.h" + +/* ------------------------ Macros ------------------------------------------ */ + +#define PREREQ_ID_VECTOR_SIZE 64 + +/*! + * Performs check if all dependencies of the given prerequisite tracking + * structure has been satisfied. + * + * @param[in] _pPrereq PREREQ_ENTRY pointer + * + * @return boolean if prerequisite has been satisfied. + */ +#define PREREQ_IS_SATISFIED(_pPrereq) \ + ((_pPrereq)->countRequested == (_pPrereq)->countSatisfied) + +/* ------------------------ Datatypes --------------------------------------- */ + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +/*! + * @brief Callback prototype. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] bSatisfied + * Indicates if dependencies were just satisfied or about to be retracted. + * + * @return NV_OK if callback successfully executed + * @return status failure specific error code + */ +typedef NV_STATUS GpuPrereqCallback(struct OBJGPU *pGpu, NvBool bSatisfied); + +typedef NvU16 PREREQ_ID; + +/*! + * Bitvector for storing prereq IDs required for another prereq struct + * Limited to size defined above, set to largest required by users + */ +MAKE_BITVECTOR(PREREQ_ID_BIT_VECTOR, PREREQ_ID_VECTOR_SIZE); + +/*! + * An individual prerequisite tracking entry structure. + */ +typedef struct +{ + /*! + * Mask of the dependencies (prerequisites that have to be satisfied before + * callback can be issues). + */ + PREREQ_ID_BIT_VECTOR requested; + + /*! + * Counter of all dependencies (prerequisites) tracked by this structure. + */ + NvS32 countRequested; + /*! + * Counter of currently satisfied dependencies (prerequisites) tracked by + * this structure. Once equal to @ref countRequested, callback can be issued. + */ + NvS32 countSatisfied; + + /*! + * Boolean indicating that the given PREREQ_ENTRY is armed and ready to fire @ref + * callback whenever all PREREQ_IDs specified in @ref requested are satisfied. + * + * This bit is set during @ref prereqComposeEntry_IMPL(), which will also do an + * initial satisfaction check of all @ref requested PREREQ_IDs + * and fire the @ref callback if necessary. + */ + NvBool bArmed; + + /*! + * @copydoc GpuPrereqCallback + */ + GpuPrereqCallback *callback; +} PREREQ_ENTRY; +MAKE_LIST(PrereqList, PREREQ_ENTRY); + +/*! + * Holds common prerequisite tracking information. + */ +#ifdef NVOC_PREREQ_TRACKER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct PrereqTracker { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct PrereqTracker *__nvoc_pbase_PrereqTracker; + union PREREQ_ID_BIT_VECTOR satisfied; + NvBool bInitialized; + PrereqList prereqList; + struct OBJGPU *pParent; +}; + +#ifndef __NVOC_CLASS_PrereqTracker_TYPEDEF__ +#define __NVOC_CLASS_PrereqTracker_TYPEDEF__ +typedef struct PrereqTracker PrereqTracker; +#endif /* __NVOC_CLASS_PrereqTracker_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PrereqTracker +#define __nvoc_class_id_PrereqTracker 0x0e171b +#endif /* __nvoc_class_id_PrereqTracker */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_PrereqTracker; + +#define __staticCast_PrereqTracker(pThis) \ + ((pThis)->__nvoc_pbase_PrereqTracker) + +#ifdef __nvoc_prereq_tracker_h_disabled +#define __dynamicCast_PrereqTracker(pThis) ((PrereqTracker*)NULL) +#else //__nvoc_prereq_tracker_h_disabled +#define __dynamicCast_PrereqTracker(pThis) \ + ((PrereqTracker*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(PrereqTracker))) +#endif //__nvoc_prereq_tracker_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_PrereqTracker(PrereqTracker**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_PrereqTracker(PrereqTracker**, Dynamic*, NvU32, struct OBJGPU * arg_pParent); +#define __objCreate_PrereqTracker(ppNewObj, pParent, createFlags, arg_pParent) \ + __nvoc_objCreate_PrereqTracker((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pParent) + +NV_STATUS prereqConstruct_IMPL(struct PrereqTracker *arg_pTracker, struct OBJGPU *arg_pParent); +#define __nvoc_prereqConstruct(arg_pTracker, arg_pParent) prereqConstruct_IMPL(arg_pTracker, arg_pParent) +void prereqDestruct_IMPL(struct PrereqTracker *pTracker); +#define __nvoc_prereqDestruct(pTracker) prereqDestruct_IMPL(pTracker) +NV_STATUS prereqSatisfy_IMPL(struct PrereqTracker *pTracker, PREREQ_ID prereqId); +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NV_STATUS prereqSatisfy(struct PrereqTracker *pTracker, PREREQ_ID prereqId) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqSatisfy(pTracker, prereqId) prereqSatisfy_IMPL(pTracker, prereqId) +#endif //__nvoc_prereq_tracker_h_disabled + +NV_STATUS prereqRetract_IMPL(struct PrereqTracker *pTracker, PREREQ_ID prereqId); +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NV_STATUS prereqRetract(struct PrereqTracker *pTracker, PREREQ_ID prereqId) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqRetract(pTracker, prereqId) prereqRetract_IMPL(pTracker, prereqId) +#endif //__nvoc_prereq_tracker_h_disabled + +NvBool prereqIdIsSatisfied_IMPL(struct PrereqTracker *pTracker, PREREQ_ID prereqId); +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NvBool prereqIdIsSatisfied(struct PrereqTracker *pTracker, PREREQ_ID prereqId) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_FALSE; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqIdIsSatisfied(pTracker, prereqId) prereqIdIsSatisfied_IMPL(pTracker, prereqId) +#endif //__nvoc_prereq_tracker_h_disabled + +NV_STATUS prereqComposeEntry_IMPL(struct PrereqTracker *pTracker, GpuPrereqCallback *callback, union PREREQ_ID_BIT_VECTOR *pDepends, PREREQ_ENTRY **ppPrereq); +#ifdef __nvoc_prereq_tracker_h_disabled +static inline NV_STATUS prereqComposeEntry(struct PrereqTracker *pTracker, GpuPrereqCallback *callback, union PREREQ_ID_BIT_VECTOR *pDepends, PREREQ_ENTRY **ppPrereq) { + NV_ASSERT_FAILED_PRECOMP("PrereqTracker was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_prereq_tracker_h_disabled +#define prereqComposeEntry(pTracker, callback, pDepends, ppPrereq) prereqComposeEntry_IMPL(pTracker, callback, pDepends, ppPrereq) +#endif //__nvoc_prereq_tracker_h_disabled + +#undef PRIVATE_FIELD + + +#endif // __PREREQUISITE_TRACKER_H__ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_PREREQ_TRACKER_NVOC_H_ diff --git a/src/nvidia/generated/g_profiler_v1_nvoc.c b/src/nvidia/generated/g_profiler_v1_nvoc.c new file mode 100644 index 000000000..7526f3114 --- /dev/null +++ b/src/nvidia/generated/g_profiler_v1_nvoc.c @@ -0,0 +1,449 @@ +#define NVOC_PROFILER_V1_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_profiler_v1_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x65b4c7 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Profiler; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_Profiler(Profiler*, RmHalspecOwner* ); +void __nvoc_init_funcTable_Profiler(Profiler*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_Profiler(Profiler*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Profiler(Profiler*, RmHalspecOwner* ); +void __nvoc_dtor_Profiler(Profiler*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Profiler; + +static const struct NVOC_RTTI __nvoc_rtti_Profiler_Profiler = { + /*pClassDef=*/ &__nvoc_class_def_Profiler, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Profiler, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Profiler_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Profiler, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Profiler_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Profiler, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Profiler_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Profiler, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Profiler_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Profiler, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Profiler_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Profiler, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Profiler = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_Profiler_Profiler, + &__nvoc_rtti_Profiler_GpuResource, + &__nvoc_rtti_Profiler_RmResource, + &__nvoc_rtti_Profiler_RmResourceCommon, + &__nvoc_rtti_Profiler_RsResource, + &__nvoc_rtti_Profiler_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Profiler = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Profiler), + /*classId=*/ classId(Profiler), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Profiler", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Profiler, + /*pCastInfo=*/ &__nvoc_castinfo_Profiler, + /*pExportInfo=*/ &__nvoc_export_info_Profiler +}; + +static NV_STATUS __nvoc_thunk_Profiler_gpuresControl(struct GpuResource *pProfiler, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return profilerControl((struct Profiler *)(((unsigned char *)pProfiler) - __nvoc_rtti_Profiler_GpuResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_profilerShareCallback(struct Profiler *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Profiler_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerUnmap(struct Profiler *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Profiler_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerGetMemInterMapParams(struct Profiler *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Profiler_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerGetMemoryMappingDescriptor(struct Profiler *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Profiler_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerGetMapAddrSpace(struct Profiler *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Profiler_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_profilerGetInternalObjectHandle(struct Profiler *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Profiler_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerControlFilter(struct Profiler *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_profilerAddAdditionalDependants(struct RsClient *pClient, struct Profiler *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_profilerGetRefCount(struct Profiler *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerCheckMemInterUnmap(struct Profiler *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Profiler_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerMapTo(struct Profiler *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerControl_Prologue(struct Profiler *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerGetRegBaseOffsetAndSize(struct Profiler *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Profiler_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_profilerCanCopy(struct Profiler *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerInternalControlForward(struct Profiler *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Profiler_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_profilerPreDestruct(struct Profiler *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerUnmapFrom(struct Profiler *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_profilerControl_Epilogue(struct Profiler *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerControlLookup(struct Profiler *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerMap(struct Profiler *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Profiler_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_profilerAccessCallback(struct Profiler *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Profiler_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Profiler[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerCtrlCmdProfilerReserveHwpm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cc0101u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Profiler.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerCtrlCmdProfilerReserveHwpm" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerCtrlCmdProfilerReleaseHwpm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cc0102u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Profiler.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerCtrlCmdProfilerReleaseHwpm" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerCtrlCmdProfilerGetHwpmReservationInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cc0103u, + /*paramSize=*/ sizeof(NV90CC_CTRL_HWPM_GET_RESERVATION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Profiler.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerCtrlCmdProfilerGetHwpmReservationInfo" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerCtrlCmdProfilerRequestCgControls_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cc0301u, + /*paramSize=*/ sizeof(NV90CC_CTRL_POWER_REQUEST_FEATURES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Profiler.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerCtrlCmdProfilerRequestCgControls" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerCtrlCmdProfilerReleaseCgControls_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90cc0302u, + /*paramSize=*/ sizeof(NV90CC_CTRL_POWER_RELEASE_FEATURES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Profiler.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerCtrlCmdProfilerReleaseCgControls" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Profiler = +{ + /*numEntries=*/ 5, + /*pExportEntries=*/ __nvoc_exported_method_def_Profiler +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Profiler(Profiler *pThis) { + __nvoc_profilerDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Profiler(Profiler *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Profiler(Profiler *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Profiler_fail_GpuResource; + __nvoc_init_dataField_Profiler(pThis, pRmhalspecowner); + + status = __nvoc_profilerConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Profiler_fail__init; + goto __nvoc_ctor_Profiler_exit; // Success + +__nvoc_ctor_Profiler_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_Profiler_fail_GpuResource: +__nvoc_ctor_Profiler_exit: + + return status; +} + +static void __nvoc_init_funcTable_Profiler_1(Profiler *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__profilerControl__ = &profilerControl_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerCtrlCmdProfilerReserveHwpm__ = &profilerCtrlCmdProfilerReserveHwpm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerCtrlCmdProfilerReleaseHwpm__ = &profilerCtrlCmdProfilerReleaseHwpm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerCtrlCmdProfilerGetHwpmReservationInfo__ = &profilerCtrlCmdProfilerGetHwpmReservationInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerCtrlCmdProfilerRequestCgControls__ = &profilerCtrlCmdProfilerRequestCgControls_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerCtrlCmdProfilerReleaseCgControls__ = &profilerCtrlCmdProfilerReleaseCgControls_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_Profiler_gpuresControl; + + pThis->__profilerShareCallback__ = &__nvoc_thunk_GpuResource_profilerShareCallback; + + pThis->__profilerUnmap__ = &__nvoc_thunk_GpuResource_profilerUnmap; + + pThis->__profilerGetMemInterMapParams__ = &__nvoc_thunk_RmResource_profilerGetMemInterMapParams; + + pThis->__profilerGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_profilerGetMemoryMappingDescriptor; + + pThis->__profilerGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_profilerGetMapAddrSpace; + + pThis->__profilerGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_profilerGetInternalObjectHandle; + + pThis->__profilerControlFilter__ = &__nvoc_thunk_RsResource_profilerControlFilter; + + pThis->__profilerAddAdditionalDependants__ = &__nvoc_thunk_RsResource_profilerAddAdditionalDependants; + + pThis->__profilerGetRefCount__ = &__nvoc_thunk_RsResource_profilerGetRefCount; + + pThis->__profilerCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_profilerCheckMemInterUnmap; + + pThis->__profilerMapTo__ = &__nvoc_thunk_RsResource_profilerMapTo; + + pThis->__profilerControl_Prologue__ = &__nvoc_thunk_RmResource_profilerControl_Prologue; + + pThis->__profilerGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_profilerGetRegBaseOffsetAndSize; + + pThis->__profilerCanCopy__ = &__nvoc_thunk_RsResource_profilerCanCopy; + + pThis->__profilerInternalControlForward__ = &__nvoc_thunk_GpuResource_profilerInternalControlForward; + + pThis->__profilerPreDestruct__ = &__nvoc_thunk_RsResource_profilerPreDestruct; + + pThis->__profilerUnmapFrom__ = &__nvoc_thunk_RsResource_profilerUnmapFrom; + + pThis->__profilerControl_Epilogue__ = &__nvoc_thunk_RmResource_profilerControl_Epilogue; + + pThis->__profilerControlLookup__ = &__nvoc_thunk_RsResource_profilerControlLookup; + + pThis->__profilerMap__ = &__nvoc_thunk_GpuResource_profilerMap; + + pThis->__profilerAccessCallback__ = &__nvoc_thunk_RmResource_profilerAccessCallback; +} + +void __nvoc_init_funcTable_Profiler(Profiler *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_Profiler_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Profiler(Profiler *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_Profiler = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_Profiler(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_Profiler(Profiler **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Profiler *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(Profiler)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Profiler)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Profiler); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_Profiler(pThis, pRmhalspecowner); + status = __nvoc_ctor_Profiler(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Profiler_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Profiler_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Profiler(Profiler **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Profiler(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_profiler_v1_nvoc.h b/src/nvidia/generated/g_profiler_v1_nvoc.h new file mode 100644 index 000000000..da5a4040b --- /dev/null +++ b/src/nvidia/generated/g_profiler_v1_nvoc.h @@ -0,0 +1,321 @@ +#ifndef _G_PROFILER_V1_NVOC_H_ +#define _G_PROFILER_V1_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_profiler_v1_nvoc.h" + +#ifndef PROFILER_V1_H +#define PROFILER_V1_H + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_resource.h" +#include "ctrl/ctrl90cc.h" +#include "gpu/gpu_resource.h" +#include "gpu/gpu_halspec.h" +#include "nvoc/utility.h" + +#ifdef NVOC_PROFILER_V1_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Profiler { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct Profiler *__nvoc_pbase_Profiler; + NV_STATUS (*__profilerControl__)(struct Profiler *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__profilerCtrlCmdProfilerReserveHwpm__)(struct Profiler *); + NV_STATUS (*__profilerCtrlCmdProfilerReleaseHwpm__)(struct Profiler *); + NV_STATUS (*__profilerCtrlCmdProfilerGetHwpmReservationInfo__)(struct Profiler *, NV90CC_CTRL_HWPM_GET_RESERVATION_INFO_PARAMS *); + NV_STATUS (*__profilerCtrlCmdProfilerRequestCgControls__)(struct Profiler *, NV90CC_CTRL_POWER_REQUEST_FEATURES_PARAMS *); + NV_STATUS (*__profilerCtrlCmdProfilerReleaseCgControls__)(struct Profiler *, NV90CC_CTRL_POWER_RELEASE_FEATURES_PARAMS *); + NvBool (*__profilerShareCallback__)(struct Profiler *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__profilerUnmap__)(struct Profiler *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__profilerGetMemInterMapParams__)(struct Profiler *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__profilerGetMemoryMappingDescriptor__)(struct Profiler *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__profilerGetMapAddrSpace__)(struct Profiler *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__profilerGetInternalObjectHandle__)(struct Profiler *); + NV_STATUS (*__profilerControlFilter__)(struct Profiler *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__profilerAddAdditionalDependants__)(struct RsClient *, struct Profiler *, RsResourceRef *); + NvU32 (*__profilerGetRefCount__)(struct Profiler *); + NV_STATUS (*__profilerCheckMemInterUnmap__)(struct Profiler *, NvBool); + NV_STATUS (*__profilerMapTo__)(struct Profiler *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__profilerControl_Prologue__)(struct Profiler *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__profilerGetRegBaseOffsetAndSize__)(struct Profiler *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__profilerCanCopy__)(struct Profiler *); + NV_STATUS (*__profilerInternalControlForward__)(struct Profiler *, NvU32, void *, NvU32); + void (*__profilerPreDestruct__)(struct Profiler *); + NV_STATUS (*__profilerUnmapFrom__)(struct Profiler *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__profilerControl_Epilogue__)(struct Profiler *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__profilerControlLookup__)(struct Profiler *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__profilerMap__)(struct Profiler *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__profilerAccessCallback__)(struct Profiler *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_Profiler_TYPEDEF__ +#define __NVOC_CLASS_Profiler_TYPEDEF__ +typedef struct Profiler Profiler; +#endif /* __NVOC_CLASS_Profiler_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Profiler +#define __nvoc_class_id_Profiler 0x65b4c7 +#endif /* __nvoc_class_id_Profiler */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Profiler; + +#define __staticCast_Profiler(pThis) \ + ((pThis)->__nvoc_pbase_Profiler) + +#ifdef __nvoc_profiler_v1_h_disabled +#define __dynamicCast_Profiler(pThis) ((Profiler*)NULL) +#else //__nvoc_profiler_v1_h_disabled +#define __dynamicCast_Profiler(pThis) \ + ((Profiler*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Profiler))) +#endif //__nvoc_profiler_v1_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Profiler(Profiler**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Profiler(Profiler**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Profiler(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Profiler((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define profilerControl(pProfiler, pCallContext, pParams) profilerControl_DISPATCH(pProfiler, pCallContext, pParams) +#define profilerCtrlCmdProfilerReserveHwpm(pProfiler) profilerCtrlCmdProfilerReserveHwpm_DISPATCH(pProfiler) +#define profilerCtrlCmdProfilerReleaseHwpm(pProfiler) profilerCtrlCmdProfilerReleaseHwpm_DISPATCH(pProfiler) +#define profilerCtrlCmdProfilerGetHwpmReservationInfo(pProfiler, pParams) profilerCtrlCmdProfilerGetHwpmReservationInfo_DISPATCH(pProfiler, pParams) +#define profilerCtrlCmdProfilerRequestCgControls(pProfiler, pParams) profilerCtrlCmdProfilerRequestCgControls_DISPATCH(pProfiler, pParams) +#define profilerCtrlCmdProfilerReleaseCgControls(pProfiler, pParams) profilerCtrlCmdProfilerReleaseCgControls_DISPATCH(pProfiler, pParams) +#define profilerShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) profilerShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define profilerUnmap(pGpuResource, pCallContext, pCpuMapping) profilerUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define profilerGetMemInterMapParams(pRmResource, pParams) profilerGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define profilerGetMemoryMappingDescriptor(pRmResource, ppMemDesc) profilerGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define profilerGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) profilerGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define profilerGetInternalObjectHandle(pGpuResource) profilerGetInternalObjectHandle_DISPATCH(pGpuResource) +#define profilerControlFilter(pResource, pCallContext, pParams) profilerControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define profilerAddAdditionalDependants(pClient, pResource, pReference) profilerAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define profilerGetRefCount(pResource) profilerGetRefCount_DISPATCH(pResource) +#define profilerCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) profilerCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define profilerMapTo(pResource, pParams) profilerMapTo_DISPATCH(pResource, pParams) +#define profilerControl_Prologue(pResource, pCallContext, pParams) profilerControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define profilerGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) profilerGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define profilerCanCopy(pResource) profilerCanCopy_DISPATCH(pResource) +#define profilerInternalControlForward(pGpuResource, command, pParams, size) profilerInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define profilerPreDestruct(pResource) profilerPreDestruct_DISPATCH(pResource) +#define profilerUnmapFrom(pResource, pParams) profilerUnmapFrom_DISPATCH(pResource, pParams) +#define profilerControl_Epilogue(pResource, pCallContext, pParams) profilerControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define profilerControlLookup(pResource, pParams, ppEntry) profilerControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define profilerMap(pGpuResource, pCallContext, pParams, pCpuMapping) profilerMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define profilerAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) profilerAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool profilerIsProfilingPermitted_IMPL(struct Profiler *pProfiler); + +#ifdef __nvoc_profiler_v1_h_disabled +static inline NvBool profilerIsProfilingPermitted(struct Profiler *pProfiler) { + NV_ASSERT_FAILED_PRECOMP("Profiler was disabled!"); + return NV_FALSE; +} +#else //__nvoc_profiler_v1_h_disabled +#define profilerIsProfilingPermitted(pProfiler) profilerIsProfilingPermitted_IMPL(pProfiler) +#endif //__nvoc_profiler_v1_h_disabled + +#define profilerIsProfilingPermitted_HAL(pProfiler) profilerIsProfilingPermitted(pProfiler) + +static inline NV_STATUS profilerConstructState_ac1694(struct Profiler *pProfiler, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + return NV_OK; +} + +#ifdef __nvoc_profiler_v1_h_disabled +static inline NV_STATUS profilerConstructState(struct Profiler *pProfiler, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("Profiler was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_profiler_v1_h_disabled +#define profilerConstructState(pProfiler, pCallContext, pParams) profilerConstructState_ac1694(pProfiler, pCallContext, pParams) +#endif //__nvoc_profiler_v1_h_disabled + +#define profilerConstructState_HAL(pProfiler, pCallContext, pParams) profilerConstructState(pProfiler, pCallContext, pParams) + +static inline void profilerDestruct_d44104(struct Profiler *pProfiler) { + return; +} + +#define __nvoc_profilerDestruct(pProfiler) profilerDestruct_d44104(pProfiler) +static inline NV_STATUS profilerControlHwpmSupported_ac1694(struct Profiler *pProfiler, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return NV_OK; +} + +#ifdef __nvoc_profiler_v1_h_disabled +static inline NV_STATUS profilerControlHwpmSupported(struct Profiler *pProfiler, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("Profiler was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_profiler_v1_h_disabled +#define profilerControlHwpmSupported(pProfiler, pParams) profilerControlHwpmSupported_ac1694(pProfiler, pParams) +#endif //__nvoc_profiler_v1_h_disabled + +#define profilerControlHwpmSupported_HAL(pProfiler, pParams) profilerControlHwpmSupported(pProfiler, pParams) + +NV_STATUS profilerControl_IMPL(struct Profiler *pProfiler, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS profilerControl_DISPATCH(struct Profiler *pProfiler, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pProfiler->__profilerControl__(pProfiler, pCallContext, pParams); +} + +NV_STATUS profilerCtrlCmdProfilerReserveHwpm_IMPL(struct Profiler *pProfiler); + +static inline NV_STATUS profilerCtrlCmdProfilerReserveHwpm_DISPATCH(struct Profiler *pProfiler) { + return pProfiler->__profilerCtrlCmdProfilerReserveHwpm__(pProfiler); +} + +NV_STATUS profilerCtrlCmdProfilerReleaseHwpm_IMPL(struct Profiler *pProfiler); + +static inline NV_STATUS profilerCtrlCmdProfilerReleaseHwpm_DISPATCH(struct Profiler *pProfiler) { + return pProfiler->__profilerCtrlCmdProfilerReleaseHwpm__(pProfiler); +} + +NV_STATUS profilerCtrlCmdProfilerGetHwpmReservationInfo_IMPL(struct Profiler *pProfiler, NV90CC_CTRL_HWPM_GET_RESERVATION_INFO_PARAMS *pParams); + +static inline NV_STATUS profilerCtrlCmdProfilerGetHwpmReservationInfo_DISPATCH(struct Profiler *pProfiler, NV90CC_CTRL_HWPM_GET_RESERVATION_INFO_PARAMS *pParams) { + return pProfiler->__profilerCtrlCmdProfilerGetHwpmReservationInfo__(pProfiler, pParams); +} + +NV_STATUS profilerCtrlCmdProfilerRequestCgControls_IMPL(struct Profiler *pProfiler, NV90CC_CTRL_POWER_REQUEST_FEATURES_PARAMS *pParams); + +static inline NV_STATUS profilerCtrlCmdProfilerRequestCgControls_DISPATCH(struct Profiler *pProfiler, NV90CC_CTRL_POWER_REQUEST_FEATURES_PARAMS *pParams) { + return pProfiler->__profilerCtrlCmdProfilerRequestCgControls__(pProfiler, pParams); +} + +NV_STATUS profilerCtrlCmdProfilerReleaseCgControls_IMPL(struct Profiler *pProfiler, NV90CC_CTRL_POWER_RELEASE_FEATURES_PARAMS *pParams); + +static inline NV_STATUS profilerCtrlCmdProfilerReleaseCgControls_DISPATCH(struct Profiler *pProfiler, NV90CC_CTRL_POWER_RELEASE_FEATURES_PARAMS *pParams) { + return pProfiler->__profilerCtrlCmdProfilerReleaseCgControls__(pProfiler, pParams); +} + +static inline NvBool profilerShareCallback_DISPATCH(struct Profiler *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__profilerShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS profilerUnmap_DISPATCH(struct Profiler *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__profilerUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS profilerGetMemInterMapParams_DISPATCH(struct Profiler *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__profilerGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS profilerGetMemoryMappingDescriptor_DISPATCH(struct Profiler *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__profilerGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS profilerGetMapAddrSpace_DISPATCH(struct Profiler *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__profilerGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle profilerGetInternalObjectHandle_DISPATCH(struct Profiler *pGpuResource) { + return pGpuResource->__profilerGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS profilerControlFilter_DISPATCH(struct Profiler *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__profilerControlFilter__(pResource, pCallContext, pParams); +} + +static inline void profilerAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Profiler *pResource, RsResourceRef *pReference) { + pResource->__profilerAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 profilerGetRefCount_DISPATCH(struct Profiler *pResource) { + return pResource->__profilerGetRefCount__(pResource); +} + +static inline NV_STATUS profilerCheckMemInterUnmap_DISPATCH(struct Profiler *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__profilerCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS profilerMapTo_DISPATCH(struct Profiler *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__profilerMapTo__(pResource, pParams); +} + +static inline NV_STATUS profilerControl_Prologue_DISPATCH(struct Profiler *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__profilerControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS profilerGetRegBaseOffsetAndSize_DISPATCH(struct Profiler *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__profilerGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool profilerCanCopy_DISPATCH(struct Profiler *pResource) { + return pResource->__profilerCanCopy__(pResource); +} + +static inline NV_STATUS profilerInternalControlForward_DISPATCH(struct Profiler *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__profilerInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void profilerPreDestruct_DISPATCH(struct Profiler *pResource) { + pResource->__profilerPreDestruct__(pResource); +} + +static inline NV_STATUS profilerUnmapFrom_DISPATCH(struct Profiler *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__profilerUnmapFrom__(pResource, pParams); +} + +static inline void profilerControl_Epilogue_DISPATCH(struct Profiler *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__profilerControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS profilerControlLookup_DISPATCH(struct Profiler *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__profilerControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS profilerMap_DISPATCH(struct Profiler *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__profilerMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool profilerAccessCallback_DISPATCH(struct Profiler *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__profilerAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS profilerConstruct_IMPL(struct Profiler *arg_pProfiler, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_profilerConstruct(arg_pProfiler, arg_pCallContext, arg_pParams) profilerConstruct_IMPL(arg_pProfiler, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // PROFILER_V1_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_PROFILER_V1_NVOC_H_ diff --git a/src/nvidia/generated/g_profiler_v2_nvoc.c b/src/nvidia/generated/g_profiler_v2_nvoc.c new file mode 100644 index 000000000..23135b7ff --- /dev/null +++ b/src/nvidia/generated/g_profiler_v2_nvoc.c @@ -0,0 +1,1019 @@ +#define NVOC_PROFILER_V2_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_profiler_v2_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4976fc = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ProfilerBase; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_ProfilerBase(ProfilerBase*, RmHalspecOwner* ); +void __nvoc_init_funcTable_ProfilerBase(ProfilerBase*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_ProfilerBase(ProfilerBase*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_ProfilerBase(ProfilerBase*, RmHalspecOwner* ); +void __nvoc_dtor_ProfilerBase(ProfilerBase*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ProfilerBase; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerBase_ProfilerBase = { + /*pClassDef=*/ &__nvoc_class_def_ProfilerBase, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ProfilerBase, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerBase_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerBase, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerBase_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerBase, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerBase_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerBase, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerBase_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerBase, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerBase_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerBase, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_ProfilerBase = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_ProfilerBase_ProfilerBase, + &__nvoc_rtti_ProfilerBase_GpuResource, + &__nvoc_rtti_ProfilerBase_RmResource, + &__nvoc_rtti_ProfilerBase_RmResourceCommon, + &__nvoc_rtti_ProfilerBase_RsResource, + &__nvoc_rtti_ProfilerBase_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_ProfilerBase = +{ + /*classInfo=*/ { + /*size=*/ sizeof(ProfilerBase), + /*classId=*/ classId(ProfilerBase), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "ProfilerBase", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ProfilerBase, + /*pCastInfo=*/ &__nvoc_castinfo_ProfilerBase, + /*pExportInfo=*/ &__nvoc_export_info_ProfilerBase +}; + +static NvBool __nvoc_thunk_GpuResource_profilerBaseShareCallback(struct ProfilerBase *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerBase_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerBaseControl(struct ProfilerBase *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerBase_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerBaseUnmap(struct ProfilerBase *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerBase_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerBaseGetMemInterMapParams(struct ProfilerBase *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ProfilerBase_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerBaseGetMemoryMappingDescriptor(struct ProfilerBase *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ProfilerBase_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerBaseGetMapAddrSpace(struct ProfilerBase *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerBase_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_profilerBaseGetInternalObjectHandle(struct ProfilerBase *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerBase_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerBaseControlFilter(struct ProfilerBase *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_profilerBaseAddAdditionalDependants(struct RsClient *pClient, struct ProfilerBase *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_profilerBaseGetRefCount(struct ProfilerBase *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerBaseCheckMemInterUnmap(struct ProfilerBase *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ProfilerBase_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerBaseMapTo(struct ProfilerBase *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerBaseControl_Prologue(struct ProfilerBase *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerBaseGetRegBaseOffsetAndSize(struct ProfilerBase *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerBase_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_profilerBaseCanCopy(struct ProfilerBase *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerBaseInternalControlForward(struct ProfilerBase *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerBase_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_profilerBasePreDestruct(struct ProfilerBase *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerBaseUnmapFrom(struct ProfilerBase *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_profilerBaseControl_Epilogue(struct ProfilerBase *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerBaseControlLookup(struct ProfilerBase *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerBaseMap(struct ProfilerBase *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerBase_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_profilerBaseAccessCallback(struct ProfilerBase *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerBase_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ProfilerBase[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdReserveHwpmLegacy_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0101u, + /*paramSize=*/ sizeof(NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdReserveHwpmLegacy" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdReleaseHwpmLegacy_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0102u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdReleaseHwpmLegacy" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdReservePmAreaSmpc_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0103u, + /*paramSize=*/ sizeof(NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdReservePmAreaSmpc" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdReleasePmAreaSmpc_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0104u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdReleasePmAreaSmpc" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdAllocPmaStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0105u, + /*paramSize=*/ sizeof(NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdAllocPmaStream" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdFreePmaStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0106u, + /*paramSize=*/ sizeof(NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdFreePmaStream" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdBindPmResources_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0107u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdBindPmResources" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdUnbindPmResources_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0108u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdUnbindPmResources" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdPmaStreamUpdateGetPut_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0109u, + /*paramSize=*/ sizeof(NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdPmaStreamUpdateGetPut" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x230u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdExecRegops_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x230u) + /*flags=*/ 0x230u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc010au, + /*paramSize=*/ sizeof(NVB0CC_CTRL_EXEC_REG_OPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdExecRegops" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdReservePmAreaPcSampler_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc010bu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdReservePmAreaPcSampler" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdReleasePmAreaPcSampler_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc010cu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdReleasePmAreaPcSampler" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdGetTotalHsCredits_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc010du, + /*paramSize=*/ sizeof(NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdGetTotalHsCredits" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdSetHsCredits_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc010eu, + /*paramSize=*/ sizeof(NVB0CC_CTRL_SET_HS_CREDITS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdSetHsCredits" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdGetHsCredits_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc010fu, + /*paramSize=*/ sizeof(NVB0CC_CTRL_GET_HS_CREDITS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdGetHsCredits" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalAllocPmaStream_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0200u, + /*paramSize=*/ sizeof(NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdInternalAllocPmaStream" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) profilerBaseCtrlCmdInternalPermissionsInit_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0xb0cc0203u, + /*paramSize=*/ sizeof(NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ProfilerBase.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "profilerBaseCtrlCmdInternalPermissionsInit" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_ProfilerBase = +{ + /*numEntries=*/ 17, + /*pExportEntries=*/ __nvoc_exported_method_def_ProfilerBase +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_ProfilerBase(ProfilerBase *pThis) { + __nvoc_profilerBaseDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_ProfilerBase(ProfilerBase *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_ProfilerBase(ProfilerBase *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ProfilerBase_fail_GpuResource; + __nvoc_init_dataField_ProfilerBase(pThis, pRmhalspecowner); + + status = __nvoc_profilerBaseConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ProfilerBase_fail__init; + goto __nvoc_ctor_ProfilerBase_exit; // Success + +__nvoc_ctor_ProfilerBase_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_ProfilerBase_fail_GpuResource: +__nvoc_ctor_ProfilerBase_exit: + + return status; +} + +static void __nvoc_init_funcTable_ProfilerBase_1(ProfilerBase *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdReserveHwpmLegacy__ = &profilerBaseCtrlCmdReserveHwpmLegacy_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdReleaseHwpmLegacy__ = &profilerBaseCtrlCmdReleaseHwpmLegacy_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdReservePmAreaSmpc__ = &profilerBaseCtrlCmdReservePmAreaSmpc_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdReleasePmAreaSmpc__ = &profilerBaseCtrlCmdReleasePmAreaSmpc_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__profilerBaseCtrlCmdAllocPmaStream__ = &profilerBaseCtrlCmdAllocPmaStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdFreePmaStream__ = &profilerBaseCtrlCmdFreePmaStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdBindPmResources__ = &profilerBaseCtrlCmdBindPmResources_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdUnbindPmResources__ = &profilerBaseCtrlCmdUnbindPmResources_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdPmaStreamUpdateGetPut__ = &profilerBaseCtrlCmdPmaStreamUpdateGetPut_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x230u) + pThis->__profilerBaseCtrlCmdExecRegops__ = &profilerBaseCtrlCmdExecRegops_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__profilerBaseCtrlCmdInternalAllocPmaStream__ = &profilerBaseCtrlCmdInternalAllocPmaStream_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__profilerBaseCtrlCmdInternalPermissionsInit__ = &profilerBaseCtrlCmdInternalPermissionsInit_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdReservePmAreaPcSampler__ = &profilerBaseCtrlCmdReservePmAreaPcSampler_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdReleasePmAreaPcSampler__ = &profilerBaseCtrlCmdReleasePmAreaPcSampler_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdGetTotalHsCredits__ = &profilerBaseCtrlCmdGetTotalHsCredits_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdGetHsCredits__ = &profilerBaseCtrlCmdGetHsCredits_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__profilerBaseCtrlCmdSetHsCredits__ = &profilerBaseCtrlCmdSetHsCredits_IMPL; +#endif + + pThis->__profilerBaseShareCallback__ = &__nvoc_thunk_GpuResource_profilerBaseShareCallback; + + pThis->__profilerBaseControl__ = &__nvoc_thunk_GpuResource_profilerBaseControl; + + pThis->__profilerBaseUnmap__ = &__nvoc_thunk_GpuResource_profilerBaseUnmap; + + pThis->__profilerBaseGetMemInterMapParams__ = &__nvoc_thunk_RmResource_profilerBaseGetMemInterMapParams; + + pThis->__profilerBaseGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_profilerBaseGetMemoryMappingDescriptor; + + pThis->__profilerBaseGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_profilerBaseGetMapAddrSpace; + + pThis->__profilerBaseGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_profilerBaseGetInternalObjectHandle; + + pThis->__profilerBaseControlFilter__ = &__nvoc_thunk_RsResource_profilerBaseControlFilter; + + pThis->__profilerBaseAddAdditionalDependants__ = &__nvoc_thunk_RsResource_profilerBaseAddAdditionalDependants; + + pThis->__profilerBaseGetRefCount__ = &__nvoc_thunk_RsResource_profilerBaseGetRefCount; + + pThis->__profilerBaseCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_profilerBaseCheckMemInterUnmap; + + pThis->__profilerBaseMapTo__ = &__nvoc_thunk_RsResource_profilerBaseMapTo; + + pThis->__profilerBaseControl_Prologue__ = &__nvoc_thunk_RmResource_profilerBaseControl_Prologue; + + pThis->__profilerBaseGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_profilerBaseGetRegBaseOffsetAndSize; + + pThis->__profilerBaseCanCopy__ = &__nvoc_thunk_RsResource_profilerBaseCanCopy; + + pThis->__profilerBaseInternalControlForward__ = &__nvoc_thunk_GpuResource_profilerBaseInternalControlForward; + + pThis->__profilerBasePreDestruct__ = &__nvoc_thunk_RsResource_profilerBasePreDestruct; + + pThis->__profilerBaseUnmapFrom__ = &__nvoc_thunk_RsResource_profilerBaseUnmapFrom; + + pThis->__profilerBaseControl_Epilogue__ = &__nvoc_thunk_RmResource_profilerBaseControl_Epilogue; + + pThis->__profilerBaseControlLookup__ = &__nvoc_thunk_RsResource_profilerBaseControlLookup; + + pThis->__profilerBaseMap__ = &__nvoc_thunk_GpuResource_profilerBaseMap; + + pThis->__profilerBaseAccessCallback__ = &__nvoc_thunk_RmResource_profilerBaseAccessCallback; +} + +void __nvoc_init_funcTable_ProfilerBase(ProfilerBase *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_ProfilerBase_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_ProfilerBase(ProfilerBase *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_ProfilerBase = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_ProfilerBase(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_ProfilerBase(ProfilerBase **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + ProfilerBase *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(ProfilerBase)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(ProfilerBase)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ProfilerBase); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_ProfilerBase(pThis, pRmhalspecowner); + status = __nvoc_ctor_ProfilerBase(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_ProfilerBase_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_ProfilerBase_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_ProfilerBase(ProfilerBase **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_ProfilerBase(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x54d077 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ProfilerDev; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ProfilerBase; + +void __nvoc_init_ProfilerDev(ProfilerDev*, RmHalspecOwner* ); +void __nvoc_init_funcTable_ProfilerDev(ProfilerDev*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_ProfilerDev(ProfilerDev*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_ProfilerDev(ProfilerDev*, RmHalspecOwner* ); +void __nvoc_dtor_ProfilerDev(ProfilerDev*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ProfilerDev; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerDev_ProfilerDev = { + /*pClassDef=*/ &__nvoc_class_def_ProfilerDev, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ProfilerDev, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerDev_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerDev, __nvoc_base_ProfilerBase.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerDev_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerDev, __nvoc_base_ProfilerBase.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerDev_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerDev, __nvoc_base_ProfilerBase.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerDev_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerDev, __nvoc_base_ProfilerBase.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerDev_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerDev, __nvoc_base_ProfilerBase.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ProfilerDev_ProfilerBase = { + /*pClassDef=*/ &__nvoc_class_def_ProfilerBase, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ProfilerDev, __nvoc_base_ProfilerBase), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_ProfilerDev = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_ProfilerDev_ProfilerDev, + &__nvoc_rtti_ProfilerDev_ProfilerBase, + &__nvoc_rtti_ProfilerDev_GpuResource, + &__nvoc_rtti_ProfilerDev_RmResource, + &__nvoc_rtti_ProfilerDev_RmResourceCommon, + &__nvoc_rtti_ProfilerDev_RsResource, + &__nvoc_rtti_ProfilerDev_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_ProfilerDev = +{ + /*classInfo=*/ { + /*size=*/ sizeof(ProfilerDev), + /*classId=*/ classId(ProfilerDev), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "ProfilerDev", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ProfilerDev, + /*pCastInfo=*/ &__nvoc_castinfo_ProfilerDev, + /*pExportInfo=*/ &__nvoc_export_info_ProfilerDev +}; + +static NvBool __nvoc_thunk_GpuResource_profilerDevShareCallback(struct ProfilerDev *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerDev_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerDevControl(struct ProfilerDev *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerDev_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerDevUnmap(struct ProfilerDev *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerDev_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerDevGetMemInterMapParams(struct ProfilerDev *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ProfilerDev_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerDevGetMemoryMappingDescriptor(struct ProfilerDev *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ProfilerDev_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerDevGetMapAddrSpace(struct ProfilerDev *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerDev_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_profilerDevGetInternalObjectHandle(struct ProfilerDev *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerDev_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerDevControlFilter(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_profilerDevAddAdditionalDependants(struct RsClient *pClient, struct ProfilerDev *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_profilerDevGetRefCount(struct ProfilerDev *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerDevCheckMemInterUnmap(struct ProfilerDev *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ProfilerDev_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerDevMapTo(struct ProfilerDev *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_profilerDevControl_Prologue(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerDevGetRegBaseOffsetAndSize(struct ProfilerDev *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerDev_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_profilerDevCanCopy(struct ProfilerDev *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerDevInternalControlForward(struct ProfilerDev *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerDev_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_profilerDevPreDestruct(struct ProfilerDev *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerDevUnmapFrom(struct ProfilerDev *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_profilerDevControl_Epilogue(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_profilerDevControlLookup(struct ProfilerDev *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_profilerDevMap(struct ProfilerDev *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ProfilerDev_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_profilerDevAccessCallback(struct ProfilerDev *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ProfilerDev_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_ProfilerDev = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_ProfilerBase(ProfilerBase*); +void __nvoc_dtor_ProfilerDev(ProfilerDev *pThis) { + __nvoc_profilerDevDestruct(pThis); + __nvoc_dtor_ProfilerBase(&pThis->__nvoc_base_ProfilerBase); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_ProfilerDev(ProfilerDev *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_ProfilerBase(ProfilerBase* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_ProfilerDev(ProfilerDev *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ProfilerBase(&pThis->__nvoc_base_ProfilerBase, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ProfilerDev_fail_ProfilerBase; + __nvoc_init_dataField_ProfilerDev(pThis, pRmhalspecowner); + + status = __nvoc_profilerDevConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ProfilerDev_fail__init; + goto __nvoc_ctor_ProfilerDev_exit; // Success + +__nvoc_ctor_ProfilerDev_fail__init: + __nvoc_dtor_ProfilerBase(&pThis->__nvoc_base_ProfilerBase); +__nvoc_ctor_ProfilerDev_fail_ProfilerBase: +__nvoc_ctor_ProfilerDev_exit: + + return status; +} + +static void __nvoc_init_funcTable_ProfilerDev_1(ProfilerDev *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__profilerDevShareCallback__ = &__nvoc_thunk_GpuResource_profilerDevShareCallback; + + pThis->__profilerDevControl__ = &__nvoc_thunk_GpuResource_profilerDevControl; + + pThis->__profilerDevUnmap__ = &__nvoc_thunk_GpuResource_profilerDevUnmap; + + pThis->__profilerDevGetMemInterMapParams__ = &__nvoc_thunk_RmResource_profilerDevGetMemInterMapParams; + + pThis->__profilerDevGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_profilerDevGetMemoryMappingDescriptor; + + pThis->__profilerDevGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_profilerDevGetMapAddrSpace; + + pThis->__profilerDevGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_profilerDevGetInternalObjectHandle; + + pThis->__profilerDevControlFilter__ = &__nvoc_thunk_RsResource_profilerDevControlFilter; + + pThis->__profilerDevAddAdditionalDependants__ = &__nvoc_thunk_RsResource_profilerDevAddAdditionalDependants; + + pThis->__profilerDevGetRefCount__ = &__nvoc_thunk_RsResource_profilerDevGetRefCount; + + pThis->__profilerDevCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_profilerDevCheckMemInterUnmap; + + pThis->__profilerDevMapTo__ = &__nvoc_thunk_RsResource_profilerDevMapTo; + + pThis->__profilerDevControl_Prologue__ = &__nvoc_thunk_RmResource_profilerDevControl_Prologue; + + pThis->__profilerDevGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_profilerDevGetRegBaseOffsetAndSize; + + pThis->__profilerDevCanCopy__ = &__nvoc_thunk_RsResource_profilerDevCanCopy; + + pThis->__profilerDevInternalControlForward__ = &__nvoc_thunk_GpuResource_profilerDevInternalControlForward; + + pThis->__profilerDevPreDestruct__ = &__nvoc_thunk_RsResource_profilerDevPreDestruct; + + pThis->__profilerDevUnmapFrom__ = &__nvoc_thunk_RsResource_profilerDevUnmapFrom; + + pThis->__profilerDevControl_Epilogue__ = &__nvoc_thunk_RmResource_profilerDevControl_Epilogue; + + pThis->__profilerDevControlLookup__ = &__nvoc_thunk_RsResource_profilerDevControlLookup; + + pThis->__profilerDevMap__ = &__nvoc_thunk_GpuResource_profilerDevMap; + + pThis->__profilerDevAccessCallback__ = &__nvoc_thunk_RmResource_profilerDevAccessCallback; +} + +void __nvoc_init_funcTable_ProfilerDev(ProfilerDev *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_ProfilerDev_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_ProfilerBase(ProfilerBase*, RmHalspecOwner* ); +void __nvoc_init_ProfilerDev(ProfilerDev *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_ProfilerDev = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ProfilerBase.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ProfilerBase.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ProfilerBase.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ProfilerBase.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ProfilerBase.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_ProfilerBase = &pThis->__nvoc_base_ProfilerBase; + __nvoc_init_ProfilerBase(&pThis->__nvoc_base_ProfilerBase, pRmhalspecowner); + __nvoc_init_funcTable_ProfilerDev(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_ProfilerDev(ProfilerDev **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + ProfilerDev *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(ProfilerDev)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(ProfilerDev)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ProfilerDev); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ProfilerBase.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ProfilerBase.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_ProfilerDev(pThis, pRmhalspecowner); + status = __nvoc_ctor_ProfilerDev(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_ProfilerDev_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_ProfilerDev_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_ProfilerDev(ProfilerDev **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_ProfilerDev(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_profiler_v2_nvoc.h b/src/nvidia/generated/g_profiler_v2_nvoc.h new file mode 100644 index 000000000..4a25d239b --- /dev/null +++ b/src/nvidia/generated/g_profiler_v2_nvoc.h @@ -0,0 +1,670 @@ +#ifndef _G_PROFILER_V2_NVOC_H_ +#define _G_PROFILER_V2_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_profiler_v2_nvoc.h" + +#ifndef PROFILER_V2_H +#define PROFILER_V2_H + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_resource.h" +#include "gpu/gpu_resource.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrlb0cc.h" +#include "gpu/gpu_halspec.h" +#include "nvoc/utility.h" + +#include "class/clb2cc.h" // MAXWELL_PROFILER_DEVICE + +typedef struct +{ + NvBool bMemoryProfilingPermitted; + NvBool bAdminProfilingPermitted; + NvBool bDevProfilingPermitted; +} PROFILER_CLIENT_PERMISSIONS; + +#ifdef NVOC_PROFILER_V2_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct ProfilerBase { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct ProfilerBase *__nvoc_pbase_ProfilerBase; + NV_STATUS (*__profilerBaseCtrlCmdReserveHwpmLegacy__)(struct ProfilerBase *, NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdReleaseHwpmLegacy__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseCtrlCmdReservePmAreaSmpc__)(struct ProfilerBase *, NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdReleasePmAreaSmpc__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseCtrlCmdAllocPmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdFreePmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdBindPmResources__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseCtrlCmdUnbindPmResources__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseCtrlCmdPmaStreamUpdateGetPut__)(struct ProfilerBase *, NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdExecRegops__)(struct ProfilerBase *, NVB0CC_CTRL_EXEC_REG_OPS_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdInternalAllocPmaStream__)(struct ProfilerBase *, NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdInternalPermissionsInit__)(struct ProfilerBase *, NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdReservePmAreaPcSampler__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseCtrlCmdReleasePmAreaPcSampler__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseCtrlCmdGetTotalHsCredits__)(struct ProfilerBase *, NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdGetHsCredits__)(struct ProfilerBase *, NVB0CC_CTRL_GET_HS_CREDITS_PARAMS *); + NV_STATUS (*__profilerBaseCtrlCmdSetHsCredits__)(struct ProfilerBase *, NVB0CC_CTRL_SET_HS_CREDITS_PARAMS *); + NvBool (*__profilerBaseShareCallback__)(struct ProfilerBase *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__profilerBaseControl__)(struct ProfilerBase *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__profilerBaseUnmap__)(struct ProfilerBase *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__profilerBaseGetMemInterMapParams__)(struct ProfilerBase *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__profilerBaseGetMemoryMappingDescriptor__)(struct ProfilerBase *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__profilerBaseGetMapAddrSpace__)(struct ProfilerBase *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__profilerBaseGetInternalObjectHandle__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseControlFilter__)(struct ProfilerBase *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__profilerBaseAddAdditionalDependants__)(struct RsClient *, struct ProfilerBase *, RsResourceRef *); + NvU32 (*__profilerBaseGetRefCount__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseCheckMemInterUnmap__)(struct ProfilerBase *, NvBool); + NV_STATUS (*__profilerBaseMapTo__)(struct ProfilerBase *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__profilerBaseControl_Prologue__)(struct ProfilerBase *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__profilerBaseGetRegBaseOffsetAndSize__)(struct ProfilerBase *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__profilerBaseCanCopy__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseInternalControlForward__)(struct ProfilerBase *, NvU32, void *, NvU32); + void (*__profilerBasePreDestruct__)(struct ProfilerBase *); + NV_STATUS (*__profilerBaseUnmapFrom__)(struct ProfilerBase *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__profilerBaseControl_Epilogue__)(struct ProfilerBase *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__profilerBaseControlLookup__)(struct ProfilerBase *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__profilerBaseMap__)(struct ProfilerBase *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__profilerBaseAccessCallback__)(struct ProfilerBase *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_ProfilerBase_TYPEDEF__ +#define __NVOC_CLASS_ProfilerBase_TYPEDEF__ +typedef struct ProfilerBase ProfilerBase; +#endif /* __NVOC_CLASS_ProfilerBase_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ProfilerBase +#define __nvoc_class_id_ProfilerBase 0x4976fc +#endif /* __nvoc_class_id_ProfilerBase */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ProfilerBase; + +#define __staticCast_ProfilerBase(pThis) \ + ((pThis)->__nvoc_pbase_ProfilerBase) + +#ifdef __nvoc_profiler_v2_h_disabled +#define __dynamicCast_ProfilerBase(pThis) ((ProfilerBase*)NULL) +#else //__nvoc_profiler_v2_h_disabled +#define __dynamicCast_ProfilerBase(pThis) \ + ((ProfilerBase*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ProfilerBase))) +#endif //__nvoc_profiler_v2_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_ProfilerBase(ProfilerBase**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_ProfilerBase(ProfilerBase**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_ProfilerBase(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_ProfilerBase((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define profilerBaseCtrlCmdReserveHwpmLegacy(pProfiler, pParams) profilerBaseCtrlCmdReserveHwpmLegacy_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdReleaseHwpmLegacy(pProfiler) profilerBaseCtrlCmdReleaseHwpmLegacy_DISPATCH(pProfiler) +#define profilerBaseCtrlCmdReservePmAreaSmpc(pProfiler, pParams) profilerBaseCtrlCmdReservePmAreaSmpc_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdReleasePmAreaSmpc(pProfiler) profilerBaseCtrlCmdReleasePmAreaSmpc_DISPATCH(pProfiler) +#define profilerBaseCtrlCmdAllocPmaStream(pProfiler, pParams) profilerBaseCtrlCmdAllocPmaStream_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdFreePmaStream(pProfiler, pParams) profilerBaseCtrlCmdFreePmaStream_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdBindPmResources(pProfiler) profilerBaseCtrlCmdBindPmResources_DISPATCH(pProfiler) +#define profilerBaseCtrlCmdUnbindPmResources(pProfiler) profilerBaseCtrlCmdUnbindPmResources_DISPATCH(pProfiler) +#define profilerBaseCtrlCmdPmaStreamUpdateGetPut(pProfiler, pParams) profilerBaseCtrlCmdPmaStreamUpdateGetPut_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdExecRegops(pProfiler, pParams) profilerBaseCtrlCmdExecRegops_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdInternalAllocPmaStream(pProfiler, pParams) profilerBaseCtrlCmdInternalAllocPmaStream_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdInternalPermissionsInit(pProfiler, pParams) profilerBaseCtrlCmdInternalPermissionsInit_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdReservePmAreaPcSampler(pProfiler) profilerBaseCtrlCmdReservePmAreaPcSampler_DISPATCH(pProfiler) +#define profilerBaseCtrlCmdReleasePmAreaPcSampler(pProfiler) profilerBaseCtrlCmdReleasePmAreaPcSampler_DISPATCH(pProfiler) +#define profilerBaseCtrlCmdGetTotalHsCredits(pProfiler, pParams) profilerBaseCtrlCmdGetTotalHsCredits_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdGetHsCredits(pProfiler, pParams) profilerBaseCtrlCmdGetHsCredits_DISPATCH(pProfiler, pParams) +#define profilerBaseCtrlCmdSetHsCredits(pProfiler, pParams) profilerBaseCtrlCmdSetHsCredits_DISPATCH(pProfiler, pParams) +#define profilerBaseShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) profilerBaseShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define profilerBaseControl(pGpuResource, pCallContext, pParams) profilerBaseControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define profilerBaseUnmap(pGpuResource, pCallContext, pCpuMapping) profilerBaseUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define profilerBaseGetMemInterMapParams(pRmResource, pParams) profilerBaseGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define profilerBaseGetMemoryMappingDescriptor(pRmResource, ppMemDesc) profilerBaseGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define profilerBaseGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) profilerBaseGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define profilerBaseGetInternalObjectHandle(pGpuResource) profilerBaseGetInternalObjectHandle_DISPATCH(pGpuResource) +#define profilerBaseControlFilter(pResource, pCallContext, pParams) profilerBaseControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define profilerBaseAddAdditionalDependants(pClient, pResource, pReference) profilerBaseAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define profilerBaseGetRefCount(pResource) profilerBaseGetRefCount_DISPATCH(pResource) +#define profilerBaseCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) profilerBaseCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define profilerBaseMapTo(pResource, pParams) profilerBaseMapTo_DISPATCH(pResource, pParams) +#define profilerBaseControl_Prologue(pResource, pCallContext, pParams) profilerBaseControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define profilerBaseGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) profilerBaseGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define profilerBaseCanCopy(pResource) profilerBaseCanCopy_DISPATCH(pResource) +#define profilerBaseInternalControlForward(pGpuResource, command, pParams, size) profilerBaseInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define profilerBasePreDestruct(pResource) profilerBasePreDestruct_DISPATCH(pResource) +#define profilerBaseUnmapFrom(pResource, pParams) profilerBaseUnmapFrom_DISPATCH(pResource, pParams) +#define profilerBaseControl_Epilogue(pResource, pCallContext, pParams) profilerBaseControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define profilerBaseControlLookup(pResource, pParams, ppEntry) profilerBaseControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define profilerBaseMap(pGpuResource, pCallContext, pParams, pCpuMapping) profilerBaseMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define profilerBaseAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) profilerBaseAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NV_STATUS profilerBaseConstructState_56cd7a(struct ProfilerBase *pProf, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + return NV_OK; +} + +#ifdef __nvoc_profiler_v2_h_disabled +static inline NV_STATUS profilerBaseConstructState(struct ProfilerBase *pProf, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("ProfilerBase was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_profiler_v2_h_disabled +#define profilerBaseConstructState(pProf, pCallContext, pParams) profilerBaseConstructState_56cd7a(pProf, pCallContext, pParams) +#endif //__nvoc_profiler_v2_h_disabled + +#define profilerBaseConstructState_HAL(pProf, pCallContext, pParams) profilerBaseConstructState(pProf, pCallContext, pParams) + +static inline void profilerBaseDestructState_b3696a(struct ProfilerBase *pProf) { + return; +} + +#ifdef __nvoc_profiler_v2_h_disabled +static inline void profilerBaseDestructState(struct ProfilerBase *pProf) { + NV_ASSERT_FAILED_PRECOMP("ProfilerBase was disabled!"); +} +#else //__nvoc_profiler_v2_h_disabled +#define profilerBaseDestructState(pProf) profilerBaseDestructState_b3696a(pProf) +#endif //__nvoc_profiler_v2_h_disabled + +#define profilerBaseDestructState_HAL(pProf) profilerBaseDestructState(pProf) + +NV_STATUS profilerBaseCtrlCmdReserveHwpmLegacy_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdReserveHwpmLegacy_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_RESERVE_HWPM_LEGACY_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdReserveHwpmLegacy__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdReleaseHwpmLegacy_IMPL(struct ProfilerBase *pProfiler); + +static inline NV_STATUS profilerBaseCtrlCmdReleaseHwpmLegacy_DISPATCH(struct ProfilerBase *pProfiler) { + return pProfiler->__profilerBaseCtrlCmdReleaseHwpmLegacy__(pProfiler); +} + +NV_STATUS profilerBaseCtrlCmdReservePmAreaSmpc_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdReservePmAreaSmpc_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_RESERVE_PM_AREA_SMPC_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdReservePmAreaSmpc__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdReleasePmAreaSmpc_IMPL(struct ProfilerBase *pProfiler); + +static inline NV_STATUS profilerBaseCtrlCmdReleasePmAreaSmpc_DISPATCH(struct ProfilerBase *pProfiler) { + return pProfiler->__profilerBaseCtrlCmdReleasePmAreaSmpc__(pProfiler); +} + +NV_STATUS profilerBaseCtrlCmdAllocPmaStream_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdAllocPmaStream_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdAllocPmaStream__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdFreePmaStream_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdFreePmaStream_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_FREE_PMA_STREAM_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdFreePmaStream__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdBindPmResources_IMPL(struct ProfilerBase *pProfiler); + +static inline NV_STATUS profilerBaseCtrlCmdBindPmResources_DISPATCH(struct ProfilerBase *pProfiler) { + return pProfiler->__profilerBaseCtrlCmdBindPmResources__(pProfiler); +} + +NV_STATUS profilerBaseCtrlCmdUnbindPmResources_IMPL(struct ProfilerBase *pProfiler); + +static inline NV_STATUS profilerBaseCtrlCmdUnbindPmResources_DISPATCH(struct ProfilerBase *pProfiler) { + return pProfiler->__profilerBaseCtrlCmdUnbindPmResources__(pProfiler); +} + +NV_STATUS profilerBaseCtrlCmdPmaStreamUpdateGetPut_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdPmaStreamUpdateGetPut_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_PMA_STREAM_UPDATE_GET_PUT_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdPmaStreamUpdateGetPut__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdExecRegops_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_EXEC_REG_OPS_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdExecRegops_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_EXEC_REG_OPS_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdExecRegops__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdInternalAllocPmaStream_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdInternalAllocPmaStream_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdInternalAllocPmaStream__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdInternalPermissionsInit_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdInternalPermissionsInit_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdInternalPermissionsInit__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdReservePmAreaPcSampler_IMPL(struct ProfilerBase *pProfiler); + +static inline NV_STATUS profilerBaseCtrlCmdReservePmAreaPcSampler_DISPATCH(struct ProfilerBase *pProfiler) { + return pProfiler->__profilerBaseCtrlCmdReservePmAreaPcSampler__(pProfiler); +} + +NV_STATUS profilerBaseCtrlCmdReleasePmAreaPcSampler_IMPL(struct ProfilerBase *pProfiler); + +static inline NV_STATUS profilerBaseCtrlCmdReleasePmAreaPcSampler_DISPATCH(struct ProfilerBase *pProfiler) { + return pProfiler->__profilerBaseCtrlCmdReleasePmAreaPcSampler__(pProfiler); +} + +NV_STATUS profilerBaseCtrlCmdGetTotalHsCredits_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdGetTotalHsCredits_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_GET_TOTAL_HS_CREDITS_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdGetTotalHsCredits__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdGetHsCredits_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_GET_HS_CREDITS_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdGetHsCredits_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_GET_HS_CREDITS_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdGetHsCredits__(pProfiler, pParams); +} + +NV_STATUS profilerBaseCtrlCmdSetHsCredits_IMPL(struct ProfilerBase *pProfiler, NVB0CC_CTRL_SET_HS_CREDITS_PARAMS *pParams); + +static inline NV_STATUS profilerBaseCtrlCmdSetHsCredits_DISPATCH(struct ProfilerBase *pProfiler, NVB0CC_CTRL_SET_HS_CREDITS_PARAMS *pParams) { + return pProfiler->__profilerBaseCtrlCmdSetHsCredits__(pProfiler, pParams); +} + +static inline NvBool profilerBaseShareCallback_DISPATCH(struct ProfilerBase *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__profilerBaseShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS profilerBaseControl_DISPATCH(struct ProfilerBase *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__profilerBaseControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS profilerBaseUnmap_DISPATCH(struct ProfilerBase *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__profilerBaseUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS profilerBaseGetMemInterMapParams_DISPATCH(struct ProfilerBase *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__profilerBaseGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS profilerBaseGetMemoryMappingDescriptor_DISPATCH(struct ProfilerBase *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__profilerBaseGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS profilerBaseGetMapAddrSpace_DISPATCH(struct ProfilerBase *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__profilerBaseGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle profilerBaseGetInternalObjectHandle_DISPATCH(struct ProfilerBase *pGpuResource) { + return pGpuResource->__profilerBaseGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS profilerBaseControlFilter_DISPATCH(struct ProfilerBase *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__profilerBaseControlFilter__(pResource, pCallContext, pParams); +} + +static inline void profilerBaseAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ProfilerBase *pResource, RsResourceRef *pReference) { + pResource->__profilerBaseAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 profilerBaseGetRefCount_DISPATCH(struct ProfilerBase *pResource) { + return pResource->__profilerBaseGetRefCount__(pResource); +} + +static inline NV_STATUS profilerBaseCheckMemInterUnmap_DISPATCH(struct ProfilerBase *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__profilerBaseCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS profilerBaseMapTo_DISPATCH(struct ProfilerBase *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__profilerBaseMapTo__(pResource, pParams); +} + +static inline NV_STATUS profilerBaseControl_Prologue_DISPATCH(struct ProfilerBase *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__profilerBaseControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS profilerBaseGetRegBaseOffsetAndSize_DISPATCH(struct ProfilerBase *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__profilerBaseGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool profilerBaseCanCopy_DISPATCH(struct ProfilerBase *pResource) { + return pResource->__profilerBaseCanCopy__(pResource); +} + +static inline NV_STATUS profilerBaseInternalControlForward_DISPATCH(struct ProfilerBase *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__profilerBaseInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void profilerBasePreDestruct_DISPATCH(struct ProfilerBase *pResource) { + pResource->__profilerBasePreDestruct__(pResource); +} + +static inline NV_STATUS profilerBaseUnmapFrom_DISPATCH(struct ProfilerBase *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__profilerBaseUnmapFrom__(pResource, pParams); +} + +static inline void profilerBaseControl_Epilogue_DISPATCH(struct ProfilerBase *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__profilerBaseControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS profilerBaseControlLookup_DISPATCH(struct ProfilerBase *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__profilerBaseControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS profilerBaseMap_DISPATCH(struct ProfilerBase *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__profilerBaseMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool profilerBaseAccessCallback_DISPATCH(struct ProfilerBase *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__profilerBaseAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS profilerBaseConstruct_IMPL(struct ProfilerBase *arg_pProf, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_profilerBaseConstruct(arg_pProf, arg_pCallContext, arg_pParams) profilerBaseConstruct_IMPL(arg_pProf, arg_pCallContext, arg_pParams) +void profilerBaseDestruct_IMPL(struct ProfilerBase *pProf); +#define __nvoc_profilerBaseDestruct(pProf) profilerBaseDestruct_IMPL(pProf) +#undef PRIVATE_FIELD + + +#ifdef NVOC_PROFILER_V2_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct ProfilerDev { + const struct NVOC_RTTI *__nvoc_rtti; + struct ProfilerBase __nvoc_base_ProfilerBase; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct ProfilerBase *__nvoc_pbase_ProfilerBase; + struct ProfilerDev *__nvoc_pbase_ProfilerDev; + NvBool (*__profilerDevShareCallback__)(struct ProfilerDev *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__profilerDevControl__)(struct ProfilerDev *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__profilerDevUnmap__)(struct ProfilerDev *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__profilerDevGetMemInterMapParams__)(struct ProfilerDev *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__profilerDevGetMemoryMappingDescriptor__)(struct ProfilerDev *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__profilerDevGetMapAddrSpace__)(struct ProfilerDev *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__profilerDevGetInternalObjectHandle__)(struct ProfilerDev *); + NV_STATUS (*__profilerDevControlFilter__)(struct ProfilerDev *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__profilerDevAddAdditionalDependants__)(struct RsClient *, struct ProfilerDev *, RsResourceRef *); + NvU32 (*__profilerDevGetRefCount__)(struct ProfilerDev *); + NV_STATUS (*__profilerDevCheckMemInterUnmap__)(struct ProfilerDev *, NvBool); + NV_STATUS (*__profilerDevMapTo__)(struct ProfilerDev *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__profilerDevControl_Prologue__)(struct ProfilerDev *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__profilerDevGetRegBaseOffsetAndSize__)(struct ProfilerDev *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__profilerDevCanCopy__)(struct ProfilerDev *); + NV_STATUS (*__profilerDevInternalControlForward__)(struct ProfilerDev *, NvU32, void *, NvU32); + void (*__profilerDevPreDestruct__)(struct ProfilerDev *); + NV_STATUS (*__profilerDevUnmapFrom__)(struct ProfilerDev *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__profilerDevControl_Epilogue__)(struct ProfilerDev *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__profilerDevControlLookup__)(struct ProfilerDev *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__profilerDevMap__)(struct ProfilerDev *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__profilerDevAccessCallback__)(struct ProfilerDev *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_ProfilerDev_TYPEDEF__ +#define __NVOC_CLASS_ProfilerDev_TYPEDEF__ +typedef struct ProfilerDev ProfilerDev; +#endif /* __NVOC_CLASS_ProfilerDev_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ProfilerDev +#define __nvoc_class_id_ProfilerDev 0x54d077 +#endif /* __nvoc_class_id_ProfilerDev */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ProfilerDev; + +#define __staticCast_ProfilerDev(pThis) \ + ((pThis)->__nvoc_pbase_ProfilerDev) + +#ifdef __nvoc_profiler_v2_h_disabled +#define __dynamicCast_ProfilerDev(pThis) ((ProfilerDev*)NULL) +#else //__nvoc_profiler_v2_h_disabled +#define __dynamicCast_ProfilerDev(pThis) \ + ((ProfilerDev*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ProfilerDev))) +#endif //__nvoc_profiler_v2_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_ProfilerDev(ProfilerDev**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_ProfilerDev(ProfilerDev**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_ProfilerDev(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_ProfilerDev((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define profilerDevShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) profilerDevShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define profilerDevControl(pGpuResource, pCallContext, pParams) profilerDevControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define profilerDevUnmap(pGpuResource, pCallContext, pCpuMapping) profilerDevUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define profilerDevGetMemInterMapParams(pRmResource, pParams) profilerDevGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define profilerDevGetMemoryMappingDescriptor(pRmResource, ppMemDesc) profilerDevGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define profilerDevGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) profilerDevGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define profilerDevGetInternalObjectHandle(pGpuResource) profilerDevGetInternalObjectHandle_DISPATCH(pGpuResource) +#define profilerDevControlFilter(pResource, pCallContext, pParams) profilerDevControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define profilerDevAddAdditionalDependants(pClient, pResource, pReference) profilerDevAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define profilerDevGetRefCount(pResource) profilerDevGetRefCount_DISPATCH(pResource) +#define profilerDevCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) profilerDevCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define profilerDevMapTo(pResource, pParams) profilerDevMapTo_DISPATCH(pResource, pParams) +#define profilerDevControl_Prologue(pResource, pCallContext, pParams) profilerDevControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define profilerDevGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) profilerDevGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define profilerDevCanCopy(pResource) profilerDevCanCopy_DISPATCH(pResource) +#define profilerDevInternalControlForward(pGpuResource, command, pParams, size) profilerDevInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define profilerDevPreDestruct(pResource) profilerDevPreDestruct_DISPATCH(pResource) +#define profilerDevUnmapFrom(pResource, pParams) profilerDevUnmapFrom_DISPATCH(pResource, pParams) +#define profilerDevControl_Epilogue(pResource, pCallContext, pParams) profilerDevControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define profilerDevControlLookup(pResource, pParams, ppEntry) profilerDevControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define profilerDevMap(pGpuResource, pCallContext, pParams, pCpuMapping) profilerDevMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define profilerDevAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) profilerDevAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS profilerDevConstructState_IMPL(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams, PROFILER_CLIENT_PERMISSIONS clientPermissions); + +#ifdef __nvoc_profiler_v2_h_disabled +static inline NV_STATUS profilerDevConstructState(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams, PROFILER_CLIENT_PERMISSIONS clientPermissions) { + NV_ASSERT_FAILED_PRECOMP("ProfilerDev was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_profiler_v2_h_disabled +#define profilerDevConstructState(pResource, pCallContext, pParams, clientPermissions) profilerDevConstructState_IMPL(pResource, pCallContext, pParams, clientPermissions) +#endif //__nvoc_profiler_v2_h_disabled + +#define profilerDevConstructState_HAL(pResource, pCallContext, pParams, clientPermissions) profilerDevConstructState(pResource, pCallContext, pParams, clientPermissions) + +NV_STATUS profilerDevConstructStatePrologue_FWCLIENT(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_profiler_v2_h_disabled +static inline NV_STATUS profilerDevConstructStatePrologue(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("ProfilerDev was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_profiler_v2_h_disabled +#define profilerDevConstructStatePrologue(pResource, pCallContext, pParams) profilerDevConstructStatePrologue_FWCLIENT(pResource, pCallContext, pParams) +#endif //__nvoc_profiler_v2_h_disabled + +#define profilerDevConstructStatePrologue_HAL(pResource, pCallContext, pParams) profilerDevConstructStatePrologue(pResource, pCallContext, pParams) + +NV_STATUS profilerDevConstructStateInterlude_IMPL(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams, PROFILER_CLIENT_PERMISSIONS clientPermissions); + +#ifdef __nvoc_profiler_v2_h_disabled +static inline NV_STATUS profilerDevConstructStateInterlude(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams, PROFILER_CLIENT_PERMISSIONS clientPermissions) { + NV_ASSERT_FAILED_PRECOMP("ProfilerDev was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_profiler_v2_h_disabled +#define profilerDevConstructStateInterlude(pResource, pCallContext, pParams, clientPermissions) profilerDevConstructStateInterlude_IMPL(pResource, pCallContext, pParams, clientPermissions) +#endif //__nvoc_profiler_v2_h_disabled + +#define profilerDevConstructStateInterlude_HAL(pResource, pCallContext, pParams, clientPermissions) profilerDevConstructStateInterlude(pResource, pCallContext, pParams, clientPermissions) + +static inline NV_STATUS profilerDevConstructStateEpilogue_56cd7a(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + return NV_OK; +} + +#ifdef __nvoc_profiler_v2_h_disabled +static inline NV_STATUS profilerDevConstructStateEpilogue(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("ProfilerDev was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_profiler_v2_h_disabled +#define profilerDevConstructStateEpilogue(pResource, pCallContext, pParams) profilerDevConstructStateEpilogue_56cd7a(pResource, pCallContext, pParams) +#endif //__nvoc_profiler_v2_h_disabled + +#define profilerDevConstructStateEpilogue_HAL(pResource, pCallContext, pParams) profilerDevConstructStateEpilogue(pResource, pCallContext, pParams) + +NvBool profilerDevQueryCapabilities_IMPL(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams, PROFILER_CLIENT_PERMISSIONS *pClientPermissions); + +#ifdef __nvoc_profiler_v2_h_disabled +static inline NvBool profilerDevQueryCapabilities(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams, PROFILER_CLIENT_PERMISSIONS *pClientPermissions) { + NV_ASSERT_FAILED_PRECOMP("ProfilerDev was disabled!"); + return NV_FALSE; +} +#else //__nvoc_profiler_v2_h_disabled +#define profilerDevQueryCapabilities(pResource, pCallContext, pParams, pClientPermissions) profilerDevQueryCapabilities_IMPL(pResource, pCallContext, pParams, pClientPermissions) +#endif //__nvoc_profiler_v2_h_disabled + +#define profilerDevQueryCapabilities_HAL(pResource, pCallContext, pParams, pClientPermissions) profilerDevQueryCapabilities(pResource, pCallContext, pParams, pClientPermissions) + +void profilerDevDestructState_FWCLIENT(struct ProfilerDev *pResource); + +#ifdef __nvoc_profiler_v2_h_disabled +static inline void profilerDevDestructState(struct ProfilerDev *pResource) { + NV_ASSERT_FAILED_PRECOMP("ProfilerDev was disabled!"); +} +#else //__nvoc_profiler_v2_h_disabled +#define profilerDevDestructState(pResource) profilerDevDestructState_FWCLIENT(pResource) +#endif //__nvoc_profiler_v2_h_disabled + +#define profilerDevDestructState_HAL(pResource) profilerDevDestructState(pResource) + +static inline NvBool profilerDevShareCallback_DISPATCH(struct ProfilerDev *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__profilerDevShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS profilerDevControl_DISPATCH(struct ProfilerDev *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__profilerDevControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS profilerDevUnmap_DISPATCH(struct ProfilerDev *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__profilerDevUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS profilerDevGetMemInterMapParams_DISPATCH(struct ProfilerDev *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__profilerDevGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS profilerDevGetMemoryMappingDescriptor_DISPATCH(struct ProfilerDev *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__profilerDevGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS profilerDevGetMapAddrSpace_DISPATCH(struct ProfilerDev *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__profilerDevGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle profilerDevGetInternalObjectHandle_DISPATCH(struct ProfilerDev *pGpuResource) { + return pGpuResource->__profilerDevGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS profilerDevControlFilter_DISPATCH(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__profilerDevControlFilter__(pResource, pCallContext, pParams); +} + +static inline void profilerDevAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ProfilerDev *pResource, RsResourceRef *pReference) { + pResource->__profilerDevAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 profilerDevGetRefCount_DISPATCH(struct ProfilerDev *pResource) { + return pResource->__profilerDevGetRefCount__(pResource); +} + +static inline NV_STATUS profilerDevCheckMemInterUnmap_DISPATCH(struct ProfilerDev *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__profilerDevCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS profilerDevMapTo_DISPATCH(struct ProfilerDev *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__profilerDevMapTo__(pResource, pParams); +} + +static inline NV_STATUS profilerDevControl_Prologue_DISPATCH(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__profilerDevControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS profilerDevGetRegBaseOffsetAndSize_DISPATCH(struct ProfilerDev *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__profilerDevGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool profilerDevCanCopy_DISPATCH(struct ProfilerDev *pResource) { + return pResource->__profilerDevCanCopy__(pResource); +} + +static inline NV_STATUS profilerDevInternalControlForward_DISPATCH(struct ProfilerDev *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__profilerDevInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void profilerDevPreDestruct_DISPATCH(struct ProfilerDev *pResource) { + pResource->__profilerDevPreDestruct__(pResource); +} + +static inline NV_STATUS profilerDevUnmapFrom_DISPATCH(struct ProfilerDev *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__profilerDevUnmapFrom__(pResource, pParams); +} + +static inline void profilerDevControl_Epilogue_DISPATCH(struct ProfilerDev *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__profilerDevControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS profilerDevControlLookup_DISPATCH(struct ProfilerDev *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__profilerDevControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS profilerDevMap_DISPATCH(struct ProfilerDev *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__profilerDevMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool profilerDevAccessCallback_DISPATCH(struct ProfilerDev *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__profilerDevAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS profilerDevConstruct_IMPL(struct ProfilerDev *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_profilerDevConstruct(arg_pResource, arg_pCallContext, arg_pParams) profilerDevConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void profilerDevDestruct_IMPL(struct ProfilerDev *pResource); +#define __nvoc_profilerDevDestruct(pResource) profilerDevDestruct_IMPL(pResource) +#undef PRIVATE_FIELD + +#endif // PROFILER_V2_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_PROFILER_V2_NVOC_H_ diff --git a/src/nvidia/generated/g_rc_pb.c b/src/nvidia/generated/g_rc_pb.c new file mode 100644 index 000000000..fc68e02a5 --- /dev/null +++ b/src/nvidia/generated/g_rc_pb.c @@ -0,0 +1,440 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#include "nvtypes.h" +#include "prbrt.h" +#include "g_rc_pb.h" + +// 'RobustChannelError' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_robustchannelerror[] = { + { + 1024, + PRB_MAYBE_ENUM_NAME("ROBUST_CHANNEL_OTHER") + }, +}; + +const PRB_ENUM_DESC prb_enums_rc_robustchannelerror = { + prb_enum_mappings_robustchannelerror, + 1, + PRB_MAYBE_ENUM_NAME("RobustChannelError") +}; + +// 'MMUFaultType' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_mmufaulttype[] = { + { + 255, + PRB_MAYBE_ENUM_NAME("NV_PFAULT_FAULT_TYPE_UNKNOWN") + }, +}; + +const PRB_ENUM_DESC prb_enums_rc_mmufaulttype = { + prb_enum_mappings_mmufaulttype, + 1, + PRB_MAYBE_ENUM_NAME("MMUFaultType") +}; + +// 'MMUErrSrc' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_mmuerrsrc[] = { + { + 255, + PRB_MAYBE_ENUM_NAME("NV_PFIFO_INTR_MMU_FAULT_INFO_CLIENT_UNKNOWN") + }, +}; + +const PRB_ENUM_DESC prb_enums_rc_mmuerrsrc = { + prb_enum_mappings_mmuerrsrc, + 1, + PRB_MAYBE_ENUM_NAME("MMUErrSrc") +}; + +// 'MMUFltAccessType' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_mmufltaccesstype[] = { + { + 0, + PRB_MAYBE_ENUM_NAME("NV_PFAULT_ACCESS_TYPE_VIRT_READ") + }, + { + 1, + PRB_MAYBE_ENUM_NAME("NV_PFAULT_ACCESS_TYPE_VIRT_WRITE") + }, +}; + +const PRB_ENUM_DESC prb_enums_rc_mmufltaccesstype = { + prb_enum_mappings_mmufltaccesstype, + 2, + PRB_MAYBE_ENUM_NAME("MMUFltAccessType") +}; + +// 'PBDMAErrType' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_pbdmaerrtype[] = { + { + 64, + PRB_MAYBE_ENUM_NAME("PBDMA_ERR_UNKNOWN") + }, +}; + +const PRB_ENUM_DESC prb_enums_rc_pbdmaerrtype = { + prb_enum_mappings_pbdmaerrtype, + 1, + PRB_MAYBE_ENUM_NAME("PBDMAErrType") +}; + +// 'RcDiagRecordType' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_rcdiagrecordtype[] = { + { + 1, + PRB_MAYBE_ENUM_NAME("GRSTATUS") + }, + { + 2, + PRB_MAYBE_ENUM_NAME("GPCSTATUS") + }, +}; + +const PRB_ENUM_DESC prb_enums_rc_rcdiagrecordtype = { + prb_enum_mappings_rcdiagrecordtype, + 2, + PRB_MAYBE_ENUM_NAME("RcDiagRecordType") +}; + +// 'GenericData' field defaults + +// 'GenericData' field descriptors +const PRB_FIELD_DESC prb_fields_rc_genericdata[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("nv_agpconf_cmd") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("nb_agpconf_cmd") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("error_context") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("channel_id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_rc_robustchannelerror, + PRB_MAYBE_FIELD_NAME("error_type") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 6, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("pushbuffer_space") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 7, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("time") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 8, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("gpu_id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 9, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("error_number") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 10, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("system_time") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'RcCounter' field defaults + +// 'RcCounter' field descriptors +const PRB_FIELD_DESC prb_fields_rc_rccounter[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_rc_robustchannelerror, + PRB_MAYBE_FIELD_NAME("rcErrorType") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("count") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_rc_mmufaulttype, + PRB_MAYBE_FIELD_NAME("rcFaultType") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 4, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_rc_mmuerrsrc, + PRB_MAYBE_FIELD_NAME("rcErrorSrc") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 5, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("rcLastCHID") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 6, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("rcLastTime") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 7, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_gr_gr_err_typ, + PRB_MAYBE_FIELD_NAME("rcGRErrType") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 8, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_gr_gr_excptn_subtyp, + PRB_MAYBE_FIELD_NAME("rcGRExcptnType") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 9, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("rcPbdmaID") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 10, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_rc_pbdmaerrtype, + PRB_MAYBE_FIELD_NAME("rcPbdmaErr") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 11, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("mmuFaultVA") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 12, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_rc_mmufltaccesstype, + PRB_MAYBE_FIELD_NAME("mmuFaultAccessType") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// 'RcDiagRecord' field defaults + +// 'RcDiagRecord' field descriptors +const PRB_FIELD_DESC prb_fields_rc_rcdiagrecord[] = { + { + 1, + { + PRB_OPTIONAL, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_rc_rcdiagrecordtype, + PRB_MAYBE_FIELD_NAME("record_type") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT32, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("record_id") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_REPEATED, + PRB_MESSAGE, + 0, + }, + REGS_REGSANDMEM, + 0, + PRB_MAYBE_FIELD_NAME("regs") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// Message descriptors +const PRB_MSG_DESC prb_messages_rc[] = { + { + 10, + prb_fields_rc_genericdata, + PRB_MAYBE_MESSAGE_NAME("Rc.GenericData") + }, + { + 12, + prb_fields_rc_rccounter, + PRB_MAYBE_MESSAGE_NAME("Rc.RcCounter") + }, + { + 3, + prb_fields_rc_rcdiagrecord, + PRB_MAYBE_MESSAGE_NAME("Rc.RcDiagRecord") + }, +}; + +// Service descriptors +const PRB_SERVICE_DESC prb_services_rc[] = { + { 0 } +}; + diff --git a/src/nvidia/generated/g_rc_pb.h b/src/nvidia/generated/g_rc_pb.h new file mode 100644 index 000000000..ff24cc8aa --- /dev/null +++ b/src/nvidia/generated/g_rc_pb.h @@ -0,0 +1,126 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#ifndef G_RC_PB_H__ +#define G_RC_PB_H__ + +#include "g_regs_pb.h" +#include "g_gr_pb.h" + +extern const PRB_ENUM_DESC prb_enums_rc_robustchannelerror; + +// 'RobustChannelError' enumeration values +#define RC_ROBUST_CHANNEL_OTHER 1024 + +extern const PRB_ENUM_DESC prb_enums_rc_mmufaulttype; + +// 'MMUFaultType' enumeration values +#define RC_NV_PFAULT_FAULT_TYPE_UNKNOWN 255 + +extern const PRB_ENUM_DESC prb_enums_rc_mmuerrsrc; + +// 'MMUErrSrc' enumeration values +#define RC_NV_PFIFO_INTR_MMU_FAULT_INFO_CLIENT_UNKNOWN 255 + +extern const PRB_ENUM_DESC prb_enums_rc_mmufltaccesstype; + +// 'MMUFltAccessType' enumeration values +#define RC_NV_PFAULT_ACCESS_TYPE_VIRT_READ 0 +#define RC_NV_PFAULT_ACCESS_TYPE_VIRT_WRITE 1 + +extern const PRB_ENUM_DESC prb_enums_rc_pbdmaerrtype; + +// 'PBDMAErrType' enumeration values +#define RC_PBDMA_ERR_UNKNOWN 64 + +extern const PRB_ENUM_DESC prb_enums_rc_rcdiagrecordtype; + +// 'RcDiagRecordType' enumeration values +#define RC_GRSTATUS 1 +#define RC_GPCSTATUS 2 + +extern const PRB_MSG_DESC prb_messages_rc[]; + +// Message descriptor pointers +#define RC_GENERICDATA (&prb_messages_rc[0]) +#define RC_RCCOUNTER (&prb_messages_rc[1]) +#define RC_RCDIAGRECORD (&prb_messages_rc[2]) + +// Message maximum lengths +// Does not include repeated fields, strings and byte arrays. +#define RC_GENERICDATA_LEN 67 +#define RC_RCCOUNTER_LEN 61 +#define RC_RCDIAGRECORD_LEN 39 + +extern const PRB_FIELD_DESC prb_fields_rc_genericdata[]; + +// 'GenericData' field descriptor pointers +#define RC_GENERICDATA_NV_AGPCONF_CMD (&prb_fields_rc_genericdata[0]) +#define RC_GENERICDATA_NB_AGPCONF_CMD (&prb_fields_rc_genericdata[1]) +#define RC_GENERICDATA_ERROR_CONTEXT (&prb_fields_rc_genericdata[2]) +#define RC_GENERICDATA_CHANNEL_ID (&prb_fields_rc_genericdata[3]) +#define RC_GENERICDATA_ERROR_TYPE (&prb_fields_rc_genericdata[4]) +#define RC_GENERICDATA_PUSHBUFFER_SPACE (&prb_fields_rc_genericdata[5]) +#define RC_GENERICDATA_TIME (&prb_fields_rc_genericdata[6]) +#define RC_GENERICDATA_GPU_ID (&prb_fields_rc_genericdata[7]) +#define RC_GENERICDATA_ERROR_NUMBER (&prb_fields_rc_genericdata[8]) +#define RC_GENERICDATA_SYSTEM_TIME (&prb_fields_rc_genericdata[9]) + +// 'GenericData' field lengths +#define RC_GENERICDATA_NV_AGPCONF_CMD_LEN 5 +#define RC_GENERICDATA_NB_AGPCONF_CMD_LEN 5 +#define RC_GENERICDATA_ERROR_CONTEXT_LEN 5 +#define RC_GENERICDATA_CHANNEL_ID_LEN 5 +#define RC_GENERICDATA_ERROR_TYPE_LEN 2 +#define RC_GENERICDATA_PUSHBUFFER_SPACE_LEN 5 +#define RC_GENERICDATA_TIME_LEN 10 +#define RC_GENERICDATA_GPU_ID_LEN 5 +#define RC_GENERICDATA_ERROR_NUMBER_LEN 5 +#define RC_GENERICDATA_SYSTEM_TIME_LEN 10 + +extern const PRB_FIELD_DESC prb_fields_rc_rccounter[]; + +// 'RcCounter' field descriptor pointers +#define RC_RCCOUNTER_RCERRORTYPE (&prb_fields_rc_rccounter[0]) +#define RC_RCCOUNTER_COUNT (&prb_fields_rc_rccounter[1]) +#define RC_RCCOUNTER_RCFAULTTYPE (&prb_fields_rc_rccounter[2]) +#define RC_RCCOUNTER_RCERRORSRC (&prb_fields_rc_rccounter[3]) +#define RC_RCCOUNTER_RCLASTCHID (&prb_fields_rc_rccounter[4]) +#define RC_RCCOUNTER_RCLASTTIME (&prb_fields_rc_rccounter[5]) +#define RC_RCCOUNTER_RCGRERRTYPE (&prb_fields_rc_rccounter[6]) +#define RC_RCCOUNTER_RCGREXCPTNTYPE (&prb_fields_rc_rccounter[7]) +#define RC_RCCOUNTER_RCPBDMAID (&prb_fields_rc_rccounter[8]) +#define RC_RCCOUNTER_RCPBDMAERR (&prb_fields_rc_rccounter[9]) +#define RC_RCCOUNTER_MMUFAULTVA (&prb_fields_rc_rccounter[10]) +#define RC_RCCOUNTER_MMUFAULTACCESSTYPE (&prb_fields_rc_rccounter[11]) + +// 'RcCounter' field lengths +#define RC_RCCOUNTER_RCERRORTYPE_LEN 2 +#define RC_RCCOUNTER_COUNT_LEN 5 +#define RC_RCCOUNTER_RCFAULTTYPE_LEN 2 +#define RC_RCCOUNTER_RCERRORSRC_LEN 2 +#define RC_RCCOUNTER_RCLASTCHID_LEN 5 +#define RC_RCCOUNTER_RCLASTTIME_LEN 10 +#define RC_RCCOUNTER_RCGRERRTYPE_LEN 2 +#define RC_RCCOUNTER_RCGREXCPTNTYPE_LEN 2 +#define RC_RCCOUNTER_RCPBDMAID_LEN 5 +#define RC_RCCOUNTER_RCPBDMAERR_LEN 2 +#define RC_RCCOUNTER_MMUFAULTVA_LEN 10 +#define RC_RCCOUNTER_MMUFAULTACCESSTYPE_LEN 2 + +extern const PRB_FIELD_DESC prb_fields_rc_rcdiagrecord[]; + +// 'RcDiagRecord' field descriptor pointers +#define RC_RCDIAGRECORD_RECORD_TYPE (&prb_fields_rc_rcdiagrecord[0]) +#define RC_RCDIAGRECORD_RECORD_ID (&prb_fields_rc_rcdiagrecord[1]) +#define RC_RCDIAGRECORD_REGS (&prb_fields_rc_rcdiagrecord[2]) + +// 'RcDiagRecord' field lengths +#define RC_RCDIAGRECORD_RECORD_TYPE_LEN 2 +#define RC_RCDIAGRECORD_RECORD_ID_LEN 5 +#define RC_RCDIAGRECORD_REGS_LEN 29 + +extern const PRB_SERVICE_DESC prb_services_rc[]; + +// Service descriptor pointers + +#endif // G_RC_PB_H__ diff --git a/src/nvidia/generated/g_ref_count_nvoc.c b/src/nvidia/generated/g_ref_count_nvoc.c new file mode 100644 index 000000000..f38517b34 --- /dev/null +++ b/src/nvidia/generated/g_ref_count_nvoc.c @@ -0,0 +1,158 @@ +#define NVOC_REF_COUNT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_ref_count_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xf89281 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJREFCNT; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJREFCNT(OBJREFCNT*); +void __nvoc_init_funcTable_OBJREFCNT(OBJREFCNT*); +NV_STATUS __nvoc_ctor_OBJREFCNT(OBJREFCNT*, Dynamic * arg_pParent, NvU32 arg_tag, RefcntStateChangeCallback * arg_pStateChangeCallback, RefcntResetCallback * arg_pResetCallback); +void __nvoc_init_dataField_OBJREFCNT(OBJREFCNT*); +void __nvoc_dtor_OBJREFCNT(OBJREFCNT*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJREFCNT; + +static const struct NVOC_RTTI __nvoc_rtti_OBJREFCNT_OBJREFCNT = { + /*pClassDef=*/ &__nvoc_class_def_OBJREFCNT, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJREFCNT, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJREFCNT_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJREFCNT, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJREFCNT = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJREFCNT_OBJREFCNT, + &__nvoc_rtti_OBJREFCNT_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJREFCNT = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJREFCNT), + /*classId=*/ classId(OBJREFCNT), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJREFCNT", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJREFCNT, + /*pCastInfo=*/ &__nvoc_castinfo_OBJREFCNT, + /*pExportInfo=*/ &__nvoc_export_info_OBJREFCNT +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJREFCNT = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJREFCNT(OBJREFCNT *pThis) { + __nvoc_refcntDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJREFCNT(OBJREFCNT *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJREFCNT(OBJREFCNT *pThis, Dynamic * arg_pParent, NvU32 arg_tag, RefcntStateChangeCallback * arg_pStateChangeCallback, RefcntResetCallback * arg_pResetCallback) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJREFCNT_fail_Object; + __nvoc_init_dataField_OBJREFCNT(pThis); + + status = __nvoc_refcntConstruct(pThis, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback); + if (status != NV_OK) goto __nvoc_ctor_OBJREFCNT_fail__init; + goto __nvoc_ctor_OBJREFCNT_exit; // Success + +__nvoc_ctor_OBJREFCNT_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJREFCNT_fail_Object: +__nvoc_ctor_OBJREFCNT_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJREFCNT_1(OBJREFCNT *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJREFCNT(OBJREFCNT *pThis) { + __nvoc_init_funcTable_OBJREFCNT_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJREFCNT(OBJREFCNT *pThis) { + pThis->__nvoc_pbase_OBJREFCNT = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJREFCNT(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJREFCNT(OBJREFCNT **ppThis, Dynamic *pParent, NvU32 createFlags, Dynamic * arg_pParent, NvU32 arg_tag, RefcntStateChangeCallback * arg_pStateChangeCallback, RefcntResetCallback * arg_pResetCallback) { + NV_STATUS status; + Object *pParentObj; + OBJREFCNT *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJREFCNT)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJREFCNT)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJREFCNT); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJREFCNT(pThis); + status = __nvoc_ctor_OBJREFCNT(pThis, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback); + if (status != NV_OK) goto __nvoc_objCreate_OBJREFCNT_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJREFCNT_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJREFCNT(OBJREFCNT **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + Dynamic * arg_pParent = va_arg(args, Dynamic *); + NvU32 arg_tag = va_arg(args, NvU32); + RefcntStateChangeCallback * arg_pStateChangeCallback = va_arg(args, RefcntStateChangeCallback *); + RefcntResetCallback * arg_pResetCallback = va_arg(args, RefcntResetCallback *); + + status = __nvoc_objCreate_OBJREFCNT(ppThis, pParent, createFlags, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback); + + return status; +} + diff --git a/src/nvidia/generated/g_ref_count_nvoc.h b/src/nvidia/generated/g_ref_count_nvoc.h new file mode 100644 index 000000000..4d39c80e0 --- /dev/null +++ b/src/nvidia/generated/g_ref_count_nvoc.h @@ -0,0 +1,183 @@ +#ifndef _G_REF_COUNT_NVOC_H_ +#define _G_REF_COUNT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_ref_count_nvoc.h" + +#ifndef REF_COUNT_H +#define REF_COUNT_H + +/****************** Resource Manager Defines and Structures *****************\ +* * +* Defines and structures used for the Reference-Counting Object. * +* * +\****************************************************************************/ + +#include "containers/map.h" +#include "nvoc/object.h" + +#define NV_REQUESTER_INIT NV_U64_MIN +#define NV_REQUESTER_RM NV_U64_MAX +#define NV_REQUESTER_CLIENT_OBJECT(c,o) (((NvU64)(c) << 32) | o) + +typedef enum +{ + REFCNT_STATE_DEFAULT = 0, + REFCNT_STATE_ENABLED, + REFCNT_STATE_DISABLED, + REFCNT_STATE_ERROR, +} REFCNT_STATE; + +typedef struct +{ + NvU32 numReferences; +} REFCNT_REQUESTER_ENTRY, *PREFCNT_REQUESTER_ENTRY; + +MAKE_MAP(REFCNT_REQUESTER_ENTRY_MAP, REFCNT_REQUESTER_ENTRY); + +typedef struct OBJREFCNT *POBJREFCNT; + +#ifndef __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +#define __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +typedef struct OBJREFCNT OBJREFCNT; +#endif /* __NVOC_CLASS_OBJREFCNT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJREFCNT +#define __nvoc_class_id_OBJREFCNT 0xf89281 +#endif /* __nvoc_class_id_OBJREFCNT */ + + + +// +// XXX-IOM: +// These callback types are good candidates to be replaced with IOM +// functionality, where small derived classes can be created on a 'callback' +// base interface, should that become more practical (currently, adding any +// kind of class still requires a non-trivial amount of boilerplate to wire +// up). +// +typedef NV_STATUS RefcntStateChangeCallback(POBJREFCNT, Dynamic *, + REFCNT_STATE, REFCNT_STATE); + +typedef void RefcntResetCallback(POBJREFCNT, Dynamic *, NvU64); + +#ifdef NVOC_REF_COUNT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJREFCNT { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJREFCNT *__nvoc_pbase_OBJREFCNT; + NvBool PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS; + Dynamic *pParent; + NvU32 tag; + REFCNT_REQUESTER_ENTRY_MAP requesterTree; + REFCNT_STATE state; + NvU32 count; + RefcntStateChangeCallback *refcntStateChangeCallback; + RefcntResetCallback *refcntResetCallback; +}; + +#ifndef __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +#define __NVOC_CLASS_OBJREFCNT_TYPEDEF__ +typedef struct OBJREFCNT OBJREFCNT; +#endif /* __NVOC_CLASS_OBJREFCNT_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJREFCNT +#define __nvoc_class_id_OBJREFCNT 0xf89281 +#endif /* __nvoc_class_id_OBJREFCNT */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJREFCNT; + +#define __staticCast_OBJREFCNT(pThis) \ + ((pThis)->__nvoc_pbase_OBJREFCNT) + +#ifdef __nvoc_ref_count_h_disabled +#define __dynamicCast_OBJREFCNT(pThis) ((OBJREFCNT*)NULL) +#else //__nvoc_ref_count_h_disabled +#define __dynamicCast_OBJREFCNT(pThis) \ + ((OBJREFCNT*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJREFCNT))) +#endif //__nvoc_ref_count_h_disabled + +#define PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS_BASE_CAST +#define PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS_BASE_NAME PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS + +NV_STATUS __nvoc_objCreateDynamic_OBJREFCNT(OBJREFCNT**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJREFCNT(OBJREFCNT**, Dynamic*, NvU32, Dynamic * arg_pParent, NvU32 arg_tag, RefcntStateChangeCallback * arg_pStateChangeCallback, RefcntResetCallback * arg_pResetCallback); +#define __objCreate_OBJREFCNT(ppNewObj, pParent, createFlags, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) \ + __nvoc_objCreate_OBJREFCNT((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) + +NV_STATUS refcntConstruct_IMPL(POBJREFCNT arg_pRefcnt, Dynamic *arg_pParent, NvU32 arg_tag, RefcntStateChangeCallback *arg_pStateChangeCallback, RefcntResetCallback *arg_pResetCallback); +#define __nvoc_refcntConstruct(arg_pRefcnt, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) refcntConstruct_IMPL(arg_pRefcnt, arg_pParent, arg_tag, arg_pStateChangeCallback, arg_pResetCallback) +void refcntDestruct_IMPL(POBJREFCNT pRefcnt); +#define __nvoc_refcntDestruct(pRefcnt) refcntDestruct_IMPL(pRefcnt) +NV_STATUS refcntRequestReference_IMPL(POBJREFCNT pRefcnt, NvU64 arg0, NvU32 arg1, NvBool arg2); +#ifdef __nvoc_ref_count_h_disabled +static inline NV_STATUS refcntRequestReference(POBJREFCNT pRefcnt, NvU64 arg0, NvU32 arg1, NvBool arg2) { + NV_ASSERT_FAILED_PRECOMP("OBJREFCNT was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_ref_count_h_disabled +#define refcntRequestReference(pRefcnt, arg0, arg1, arg2) refcntRequestReference_IMPL(pRefcnt, arg0, arg1, arg2) +#endif //__nvoc_ref_count_h_disabled + +NV_STATUS refcntReleaseReferences_IMPL(POBJREFCNT pRefcnt, NvU64 arg0, NvBool arg1); +#ifdef __nvoc_ref_count_h_disabled +static inline NV_STATUS refcntReleaseReferences(POBJREFCNT pRefcnt, NvU64 arg0, NvBool arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJREFCNT was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_ref_count_h_disabled +#define refcntReleaseReferences(pRefcnt, arg0, arg1) refcntReleaseReferences_IMPL(pRefcnt, arg0, arg1) +#endif //__nvoc_ref_count_h_disabled + +NV_STATUS refcntReset_IMPL(POBJREFCNT pRefcnt, NvBool arg0); +#ifdef __nvoc_ref_count_h_disabled +static inline NV_STATUS refcntReset(POBJREFCNT pRefcnt, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJREFCNT was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_ref_count_h_disabled +#define refcntReset(pRefcnt, arg0) refcntReset_IMPL(pRefcnt, arg0) +#endif //__nvoc_ref_count_h_disabled + +#undef PRIVATE_FIELD + + +#endif // REF_COUNT_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_REF_COUNT_NVOC_H_ diff --git a/src/nvidia/generated/g_reg_mem_nvoc.c b/src/nvidia/generated/g_reg_mem_nvoc.c new file mode 100644 index 000000000..c54ea8f80 --- /dev/null +++ b/src/nvidia/generated/g_reg_mem_nvoc.c @@ -0,0 +1,323 @@ +#define NVOC_REG_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_reg_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x40d457 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RegisterMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_RegisterMemory(RegisterMemory*); +void __nvoc_init_funcTable_RegisterMemory(RegisterMemory*); +NV_STATUS __nvoc_ctor_RegisterMemory(RegisterMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RegisterMemory(RegisterMemory*); +void __nvoc_dtor_RegisterMemory(RegisterMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RegisterMemory; + +static const struct NVOC_RTTI __nvoc_rtti_RegisterMemory_RegisterMemory = { + /*pClassDef=*/ &__nvoc_class_def_RegisterMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RegisterMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RegisterMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RegisterMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RegisterMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RegisterMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RegisterMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RegisterMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RegisterMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RegisterMemory, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RegisterMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RegisterMemory, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RegisterMemory = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_RegisterMemory_RegisterMemory, + &__nvoc_rtti_RegisterMemory_Memory, + &__nvoc_rtti_RegisterMemory_RmResource, + &__nvoc_rtti_RegisterMemory_RmResourceCommon, + &__nvoc_rtti_RegisterMemory_RsResource, + &__nvoc_rtti_RegisterMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RegisterMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RegisterMemory), + /*classId=*/ classId(RegisterMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RegisterMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RegisterMemory, + /*pCastInfo=*/ &__nvoc_castinfo_RegisterMemory, + /*pExportInfo=*/ &__nvoc_export_info_RegisterMemory +}; + +static NvBool __nvoc_thunk_RegisterMemory_resCanCopy(struct RsResource *pRegisterMemory) { + return regmemCanCopy((struct RegisterMemory *)(((unsigned char *)pRegisterMemory) - __nvoc_rtti_RegisterMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_regmemCheckMemInterUnmap(struct RegisterMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_RegisterMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_regmemControl(struct RegisterMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_RegisterMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_regmemUnmap(struct RegisterMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_RegisterMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_regmemGetMemInterMapParams(struct RegisterMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_RegisterMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_regmemGetMemoryMappingDescriptor(struct RegisterMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_RegisterMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_regmemGetMapAddrSpace(struct RegisterMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_RegisterMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_regmemShareCallback(struct RegisterMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_regmemControlFilter(struct RegisterMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_regmemAddAdditionalDependants(struct RsClient *pClient, struct RegisterMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_regmemGetRefCount(struct RegisterMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_regmemMapTo(struct RegisterMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_regmemControl_Prologue(struct RegisterMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_regmemIsReady(struct RegisterMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_RegisterMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_regmemCheckCopyPermissions(struct RegisterMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_RegisterMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_regmemPreDestruct(struct RegisterMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_regmemUnmapFrom(struct RegisterMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_regmemControl_Epilogue(struct RegisterMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_regmemControlLookup(struct RegisterMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_regmemMap(struct RegisterMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_RegisterMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_regmemAccessCallback(struct RegisterMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_RegisterMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RegisterMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_RegisterMemory(RegisterMemory *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RegisterMemory(RegisterMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RegisterMemory(RegisterMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RegisterMemory_fail_Memory; + __nvoc_init_dataField_RegisterMemory(pThis); + + status = __nvoc_regmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RegisterMemory_fail__init; + goto __nvoc_ctor_RegisterMemory_exit; // Success + +__nvoc_ctor_RegisterMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_RegisterMemory_fail_Memory: +__nvoc_ctor_RegisterMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_RegisterMemory_1(RegisterMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__regmemCanCopy__ = ®memCanCopy_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_RegisterMemory_resCanCopy; + + pThis->__regmemCheckMemInterUnmap__ = &__nvoc_thunk_Memory_regmemCheckMemInterUnmap; + + pThis->__regmemControl__ = &__nvoc_thunk_Memory_regmemControl; + + pThis->__regmemUnmap__ = &__nvoc_thunk_Memory_regmemUnmap; + + pThis->__regmemGetMemInterMapParams__ = &__nvoc_thunk_Memory_regmemGetMemInterMapParams; + + pThis->__regmemGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_regmemGetMemoryMappingDescriptor; + + pThis->__regmemGetMapAddrSpace__ = &__nvoc_thunk_Memory_regmemGetMapAddrSpace; + + pThis->__regmemShareCallback__ = &__nvoc_thunk_RmResource_regmemShareCallback; + + pThis->__regmemControlFilter__ = &__nvoc_thunk_RsResource_regmemControlFilter; + + pThis->__regmemAddAdditionalDependants__ = &__nvoc_thunk_RsResource_regmemAddAdditionalDependants; + + pThis->__regmemGetRefCount__ = &__nvoc_thunk_RsResource_regmemGetRefCount; + + pThis->__regmemMapTo__ = &__nvoc_thunk_RsResource_regmemMapTo; + + pThis->__regmemControl_Prologue__ = &__nvoc_thunk_RmResource_regmemControl_Prologue; + + pThis->__regmemIsReady__ = &__nvoc_thunk_Memory_regmemIsReady; + + pThis->__regmemCheckCopyPermissions__ = &__nvoc_thunk_Memory_regmemCheckCopyPermissions; + + pThis->__regmemPreDestruct__ = &__nvoc_thunk_RsResource_regmemPreDestruct; + + pThis->__regmemUnmapFrom__ = &__nvoc_thunk_RsResource_regmemUnmapFrom; + + pThis->__regmemControl_Epilogue__ = &__nvoc_thunk_RmResource_regmemControl_Epilogue; + + pThis->__regmemControlLookup__ = &__nvoc_thunk_RsResource_regmemControlLookup; + + pThis->__regmemMap__ = &__nvoc_thunk_Memory_regmemMap; + + pThis->__regmemAccessCallback__ = &__nvoc_thunk_RmResource_regmemAccessCallback; +} + +void __nvoc_init_funcTable_RegisterMemory(RegisterMemory *pThis) { + __nvoc_init_funcTable_RegisterMemory_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_RegisterMemory(RegisterMemory *pThis) { + pThis->__nvoc_pbase_RegisterMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_RegisterMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_RegisterMemory(RegisterMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RegisterMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(RegisterMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RegisterMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RegisterMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RegisterMemory(pThis); + status = __nvoc_ctor_RegisterMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RegisterMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RegisterMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RegisterMemory(RegisterMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RegisterMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_reg_mem_nvoc.h b/src/nvidia/generated/g_reg_mem_nvoc.h new file mode 100644 index 000000000..062965ac7 --- /dev/null +++ b/src/nvidia/generated/g_reg_mem_nvoc.h @@ -0,0 +1,226 @@ +#ifndef _G_REG_MEM_NVOC_H_ +#define _G_REG_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_reg_mem_nvoc.h" + +#ifndef _REGISTER_MEMORY_H_ +#define _REGISTER_MEMORY_H_ + +#include "mem_mgr/mem.h" + +/*! + * Memory allocation representing GPU register space + * + * For security and maintenance reasons we want to phase this class out. + */ +#ifdef NVOC_REG_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RegisterMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct RegisterMemory *__nvoc_pbase_RegisterMemory; + NvBool (*__regmemCanCopy__)(struct RegisterMemory *); + NV_STATUS (*__regmemCheckMemInterUnmap__)(struct RegisterMemory *, NvBool); + NV_STATUS (*__regmemControl__)(struct RegisterMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__regmemUnmap__)(struct RegisterMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__regmemGetMemInterMapParams__)(struct RegisterMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__regmemGetMemoryMappingDescriptor__)(struct RegisterMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__regmemGetMapAddrSpace__)(struct RegisterMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__regmemShareCallback__)(struct RegisterMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__regmemControlFilter__)(struct RegisterMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__regmemAddAdditionalDependants__)(struct RsClient *, struct RegisterMemory *, RsResourceRef *); + NvU32 (*__regmemGetRefCount__)(struct RegisterMemory *); + NV_STATUS (*__regmemMapTo__)(struct RegisterMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__regmemControl_Prologue__)(struct RegisterMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__regmemIsReady__)(struct RegisterMemory *); + NV_STATUS (*__regmemCheckCopyPermissions__)(struct RegisterMemory *, struct OBJGPU *, NvHandle); + void (*__regmemPreDestruct__)(struct RegisterMemory *); + NV_STATUS (*__regmemUnmapFrom__)(struct RegisterMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__regmemControl_Epilogue__)(struct RegisterMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__regmemControlLookup__)(struct RegisterMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__regmemMap__)(struct RegisterMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__regmemAccessCallback__)(struct RegisterMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_RegisterMemory_TYPEDEF__ +#define __NVOC_CLASS_RegisterMemory_TYPEDEF__ +typedef struct RegisterMemory RegisterMemory; +#endif /* __NVOC_CLASS_RegisterMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RegisterMemory +#define __nvoc_class_id_RegisterMemory 0x40d457 +#endif /* __nvoc_class_id_RegisterMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RegisterMemory; + +#define __staticCast_RegisterMemory(pThis) \ + ((pThis)->__nvoc_pbase_RegisterMemory) + +#ifdef __nvoc_reg_mem_h_disabled +#define __dynamicCast_RegisterMemory(pThis) ((RegisterMemory*)NULL) +#else //__nvoc_reg_mem_h_disabled +#define __dynamicCast_RegisterMemory(pThis) \ + ((RegisterMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RegisterMemory))) +#endif //__nvoc_reg_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RegisterMemory(RegisterMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RegisterMemory(RegisterMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RegisterMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RegisterMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define regmemCanCopy(pRegisterMemory) regmemCanCopy_DISPATCH(pRegisterMemory) +#define regmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) regmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define regmemControl(pMemory, pCallContext, pParams) regmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define regmemUnmap(pMemory, pCallContext, pCpuMapping) regmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define regmemGetMemInterMapParams(pMemory, pParams) regmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define regmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) regmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define regmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) regmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define regmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) regmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define regmemControlFilter(pResource, pCallContext, pParams) regmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define regmemAddAdditionalDependants(pClient, pResource, pReference) regmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define regmemGetRefCount(pResource) regmemGetRefCount_DISPATCH(pResource) +#define regmemMapTo(pResource, pParams) regmemMapTo_DISPATCH(pResource, pParams) +#define regmemControl_Prologue(pResource, pCallContext, pParams) regmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define regmemIsReady(pMemory) regmemIsReady_DISPATCH(pMemory) +#define regmemCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) regmemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define regmemPreDestruct(pResource) regmemPreDestruct_DISPATCH(pResource) +#define regmemUnmapFrom(pResource, pParams) regmemUnmapFrom_DISPATCH(pResource, pParams) +#define regmemControl_Epilogue(pResource, pCallContext, pParams) regmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define regmemControlLookup(pResource, pParams, ppEntry) regmemControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define regmemMap(pMemory, pCallContext, pParams, pCpuMapping) regmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define regmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) regmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool regmemCanCopy_IMPL(struct RegisterMemory *pRegisterMemory); + +static inline NvBool regmemCanCopy_DISPATCH(struct RegisterMemory *pRegisterMemory) { + return pRegisterMemory->__regmemCanCopy__(pRegisterMemory); +} + +static inline NV_STATUS regmemCheckMemInterUnmap_DISPATCH(struct RegisterMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__regmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS regmemControl_DISPATCH(struct RegisterMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__regmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS regmemUnmap_DISPATCH(struct RegisterMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__regmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS regmemGetMemInterMapParams_DISPATCH(struct RegisterMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__regmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS regmemGetMemoryMappingDescriptor_DISPATCH(struct RegisterMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__regmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS regmemGetMapAddrSpace_DISPATCH(struct RegisterMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__regmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool regmemShareCallback_DISPATCH(struct RegisterMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__regmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS regmemControlFilter_DISPATCH(struct RegisterMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__regmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline void regmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RegisterMemory *pResource, RsResourceRef *pReference) { + pResource->__regmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 regmemGetRefCount_DISPATCH(struct RegisterMemory *pResource) { + return pResource->__regmemGetRefCount__(pResource); +} + +static inline NV_STATUS regmemMapTo_DISPATCH(struct RegisterMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__regmemMapTo__(pResource, pParams); +} + +static inline NV_STATUS regmemControl_Prologue_DISPATCH(struct RegisterMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__regmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS regmemIsReady_DISPATCH(struct RegisterMemory *pMemory) { + return pMemory->__regmemIsReady__(pMemory); +} + +static inline NV_STATUS regmemCheckCopyPermissions_DISPATCH(struct RegisterMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__regmemCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void regmemPreDestruct_DISPATCH(struct RegisterMemory *pResource) { + pResource->__regmemPreDestruct__(pResource); +} + +static inline NV_STATUS regmemUnmapFrom_DISPATCH(struct RegisterMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__regmemUnmapFrom__(pResource, pParams); +} + +static inline void regmemControl_Epilogue_DISPATCH(struct RegisterMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__regmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS regmemControlLookup_DISPATCH(struct RegisterMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__regmemControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS regmemMap_DISPATCH(struct RegisterMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__regmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool regmemAccessCallback_DISPATCH(struct RegisterMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__regmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS regmemConstruct_IMPL(struct RegisterMemory *arg_pRegisterMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_regmemConstruct(arg_pRegisterMemory, arg_pCallContext, arg_pParams) regmemConstruct_IMPL(arg_pRegisterMemory, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_REG_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_regs_pb.c b/src/nvidia/generated/g_regs_pb.c new file mode 100644 index 000000000..14cf560fb --- /dev/null +++ b/src/nvidia/generated/g_regs_pb.c @@ -0,0 +1,117 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#include "nvtypes.h" +#include "prbrt.h" +#include "g_regs_pb.h" + +// 'RegsAndMem.MemType' enum descriptor +static const PRB_ENUM_MAPPING prb_enum_mappings_regsandmem_memtype[] = { + { + 1, + PRB_MAYBE_ENUM_NAME("GPU_REGS") + }, + { + 2, + PRB_MAYBE_ENUM_NAME("SYS_MEM") + }, + { + 3, + PRB_MAYBE_ENUM_NAME("PDE") + }, + { + 4, + PRB_MAYBE_ENUM_NAME("PTE") + }, + { + 5, + PRB_MAYBE_ENUM_NAME("CPU_REGS") + }, + { + 6, + PRB_MAYBE_ENUM_NAME("PCI_CONFIG_REGS") + }, + { + 7, + PRB_MAYBE_ENUM_NAME("PCI_SPACE") + }, + { + 8, + PRB_MAYBE_ENUM_NAME("INSTANCE") + }, +}; + +const PRB_ENUM_DESC prb_enums_regs_regsandmem_memtype = { + prb_enum_mappings_regsandmem_memtype, + 8, + PRB_MAYBE_ENUM_NAME("MemType") +}; + +// 'RegsAndMem' field defaults +PRB_MAYBE_FIELD_DEFAULT_DEF(static const NvU32 regs_regsandmem_stride_default = 4;) + +// 'RegsAndMem' field descriptors +const PRB_FIELD_DESC prb_fields_regs_regsandmem[] = { + { + 1, + { + PRB_REQUIRED, + PRB_ENUM, + 0, + }, + 0, + &prb_enums_regs_regsandmem_memtype, + PRB_MAYBE_FIELD_NAME("type") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 2, + { + PRB_OPTIONAL, + PRB_UINT64, + 0, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("offset") + PRB_MAYBE_FIELD_DEFAULT(0) + }, + { + 3, + { + PRB_OPTIONAL, + PRB_UINT32, + 0 | PRB_HAS_DEFAULT, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("stride") + PRB_MAYBE_FIELD_DEFAULT((const PRB_VALUE *)®s_regsandmem_stride_default) + }, + { + 4, + { + PRB_REPEATED, + PRB_UINT32, + 0 | PRB_IS_PACKED, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("val") + PRB_MAYBE_FIELD_DEFAULT(0) + }, +}; + +// Message descriptors +const PRB_MSG_DESC prb_messages_regs[] = { + { + 4, + prb_fields_regs_regsandmem, + PRB_MAYBE_MESSAGE_NAME("Regs.RegsAndMem") + }, +}; + +// Service descriptors +const PRB_SERVICE_DESC prb_services_regs[] = { + { 0 } +}; + diff --git a/src/nvidia/generated/g_regs_pb.h b/src/nvidia/generated/g_regs_pb.h new file mode 100644 index 000000000..3b9cb54ec --- /dev/null +++ b/src/nvidia/generated/g_regs_pb.h @@ -0,0 +1,46 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! + +#ifndef G_REGS_PB_H__ +#define G_REGS_PB_H__ + + +extern const PRB_ENUM_DESC prb_enums_regs_regsandmem_memtype; + +// 'RegsAndMem.MemType' enumeration values +#define REGS_REGSANDMEM_GPU_REGS 1 +#define REGS_REGSANDMEM_SYS_MEM 2 +#define REGS_REGSANDMEM_PDE 3 +#define REGS_REGSANDMEM_PTE 4 +#define REGS_REGSANDMEM_CPU_REGS 5 +#define REGS_REGSANDMEM_PCI_CONFIG_REGS 6 +#define REGS_REGSANDMEM_PCI_SPACE 7 +#define REGS_REGSANDMEM_INSTANCE 8 + +extern const PRB_MSG_DESC prb_messages_regs[]; + +// Message descriptor pointers +#define REGS_REGSANDMEM (&prb_messages_regs[0]) + +// Message maximum lengths +// Does not include repeated fields, strings and byte arrays. +#define REGS_REGSANDMEM_LEN 26 + +extern const PRB_FIELD_DESC prb_fields_regs_regsandmem[]; + +// 'RegsAndMem' field descriptor pointers +#define REGS_REGSANDMEM_TYPE (&prb_fields_regs_regsandmem[0]) +#define REGS_REGSANDMEM_OFFSET (&prb_fields_regs_regsandmem[1]) +#define REGS_REGSANDMEM_STRIDE (&prb_fields_regs_regsandmem[2]) +#define REGS_REGSANDMEM_VAL (&prb_fields_regs_regsandmem[3]) + +// 'RegsAndMem' field lengths +#define REGS_REGSANDMEM_TYPE_LEN 2 +#define REGS_REGSANDMEM_OFFSET_LEN 10 +#define REGS_REGSANDMEM_STRIDE_LEN 5 +#define REGS_REGSANDMEM_VAL_LEN 5 + +extern const PRB_SERVICE_DESC prb_services_regs[]; + +// Service descriptor pointers + +#endif // G_REGS_PB_H__ diff --git a/src/nvidia/generated/g_resource_fwd_decls_nvoc.h b/src/nvidia/generated/g_resource_fwd_decls_nvoc.h new file mode 100644 index 000000000..1c6bf0a90 --- /dev/null +++ b/src/nvidia/generated/g_resource_fwd_decls_nvoc.h @@ -0,0 +1,1252 @@ +#ifndef _G_RESOURCE_FWD_DECLS_NVOC_H_ +#define _G_RESOURCE_FWD_DECLS_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_resource_fwd_decls_nvoc.h" + +// +// This header is a temporary WAR for CORERM-3115 +// When that RFE is implemented, we'll be able to generate these forward decls +// from resource_list.h directly +// +#ifndef RESOURCE_FWD_DECLS_H +#define RESOURCE_FWD_DECLS_H + +// Base classes +struct ChannelDescendant; + +#ifndef __NVOC_CLASS_ChannelDescendant_TYPEDEF__ +#define __NVOC_CLASS_ChannelDescendant_TYPEDEF__ +typedef struct ChannelDescendant ChannelDescendant; +#endif /* __NVOC_CLASS_ChannelDescendant_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ChannelDescendant +#define __nvoc_class_id_ChannelDescendant 0x43d7c4 +#endif /* __nvoc_class_id_ChannelDescendant */ + + +struct DispChannel; + +#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__ +#define __NVOC_CLASS_DispChannel_TYPEDEF__ +typedef struct DispChannel DispChannel; +#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannel +#define __nvoc_class_id_DispChannel 0xbd2ff3 +#endif /* __nvoc_class_id_DispChannel */ + + +struct GpuResource; + +#ifndef __NVOC_CLASS_GpuResource_TYPEDEF__ +#define __NVOC_CLASS_GpuResource_TYPEDEF__ +typedef struct GpuResource GpuResource; +#endif /* __NVOC_CLASS_GpuResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuResource +#define __nvoc_class_id_GpuResource 0x5d5d9f +#endif /* __nvoc_class_id_GpuResource */ + + +struct INotifier; + +#ifndef __NVOC_CLASS_INotifier_TYPEDEF__ +#define __NVOC_CLASS_INotifier_TYPEDEF__ +typedef struct INotifier INotifier; +#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_INotifier +#define __nvoc_class_id_INotifier 0xf8f965 +#endif /* __nvoc_class_id_INotifier */ + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + +struct Notifier; + +#ifndef __NVOC_CLASS_Notifier_TYPEDEF__ +#define __NVOC_CLASS_Notifier_TYPEDEF__ +typedef struct Notifier Notifier; +#endif /* __NVOC_CLASS_Notifier_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Notifier +#define __nvoc_class_id_Notifier 0xa8683b +#endif /* __nvoc_class_id_Notifier */ + + +struct NotifShare; + +#ifndef __NVOC_CLASS_NotifShare_TYPEDEF__ +#define __NVOC_CLASS_NotifShare_TYPEDEF__ +typedef struct NotifShare NotifShare; +#endif /* __NVOC_CLASS_NotifShare_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NotifShare +#define __nvoc_class_id_NotifShare 0xd5f150 +#endif /* __nvoc_class_id_NotifShare */ + + +struct Resource; + +#ifndef __NVOC_CLASS_Resource_TYPEDEF__ +#define __NVOC_CLASS_Resource_TYPEDEF__ +typedef struct Resource Resource; +#endif /* __NVOC_CLASS_Resource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Resource +#define __nvoc_class_id_Resource 0xbe8545 +#endif /* __nvoc_class_id_Resource */ + + +struct RmResource; + +#ifndef __NVOC_CLASS_RmResource_TYPEDEF__ +#define __NVOC_CLASS_RmResource_TYPEDEF__ +typedef struct RmResource RmResource; +#endif /* __NVOC_CLASS_RmResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResource +#define __nvoc_class_id_RmResource 0x03610d +#endif /* __nvoc_class_id_RmResource */ + + +struct RmResourceCommon; + +#ifndef __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +#define __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +typedef struct RmResourceCommon RmResourceCommon; +#endif /* __NVOC_CLASS_RmResourceCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResourceCommon +#define __nvoc_class_id_RmResourceCommon 0x8ef259 +#endif /* __nvoc_class_id_RmResourceCommon */ + + +struct RsResource; + +#ifndef __NVOC_CLASS_RsResource_TYPEDEF__ +#define __NVOC_CLASS_RsResource_TYPEDEF__ +typedef struct RsResource RsResource; +#endif /* __NVOC_CLASS_RsResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsResource +#define __nvoc_class_id_RsResource 0xd551cb +#endif /* __nvoc_class_id_RsResource */ + + +struct RsShared; + +#ifndef __NVOC_CLASS_RsShared_TYPEDEF__ +#define __NVOC_CLASS_RsShared_TYPEDEF__ +typedef struct RsShared RsShared; +#endif /* __NVOC_CLASS_RsShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsShared +#define __nvoc_class_id_RsShared 0x830542 +#endif /* __nvoc_class_id_RsShared */ + + + +// Allocatable resources +struct AccessCounterBuffer; + +#ifndef __NVOC_CLASS_AccessCounterBuffer_TYPEDEF__ +#define __NVOC_CLASS_AccessCounterBuffer_TYPEDEF__ +typedef struct AccessCounterBuffer AccessCounterBuffer; +#endif /* __NVOC_CLASS_AccessCounterBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_AccessCounterBuffer +#define __nvoc_class_id_AccessCounterBuffer 0x1f0074 +#endif /* __nvoc_class_id_AccessCounterBuffer */ + + +struct KernelCeContext; + +#ifndef __NVOC_CLASS_KernelCeContext_TYPEDEF__ +#define __NVOC_CLASS_KernelCeContext_TYPEDEF__ +typedef struct KernelCeContext KernelCeContext; +#endif /* __NVOC_CLASS_KernelCeContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCeContext +#define __nvoc_class_id_KernelCeContext 0x2d0ee9 +#endif /* __nvoc_class_id_KernelCeContext */ + + +struct Channel; + +#ifndef __NVOC_CLASS_Channel_TYPEDEF__ +#define __NVOC_CLASS_Channel_TYPEDEF__ +typedef struct Channel Channel; +#endif /* __NVOC_CLASS_Channel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Channel +#define __nvoc_class_id_Channel 0x781dc9 +#endif /* __nvoc_class_id_Channel */ + + +struct ConsoleMemory; + +#ifndef __NVOC_CLASS_ConsoleMemory_TYPEDEF__ +#define __NVOC_CLASS_ConsoleMemory_TYPEDEF__ +typedef struct ConsoleMemory ConsoleMemory; +#endif /* __NVOC_CLASS_ConsoleMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ConsoleMemory +#define __nvoc_class_id_ConsoleMemory 0xaac69e +#endif /* __nvoc_class_id_ConsoleMemory */ + + +struct ContextDma; + +#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__ +#define __NVOC_CLASS_ContextDma_TYPEDEF__ +typedef struct ContextDma ContextDma; +#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ContextDma +#define __nvoc_class_id_ContextDma 0x88441b +#endif /* __nvoc_class_id_ContextDma */ + + +struct DebugBufferApi; + +#ifndef __NVOC_CLASS_DebugBufferApi_TYPEDEF__ +#define __NVOC_CLASS_DebugBufferApi_TYPEDEF__ +typedef struct DebugBufferApi DebugBufferApi; +#endif /* __NVOC_CLASS_DebugBufferApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DebugBufferApi +#define __nvoc_class_id_DebugBufferApi 0x5e7a1b +#endif /* __nvoc_class_id_DebugBufferApi */ + + +struct DeferredApiObject; + +#ifndef __NVOC_CLASS_DeferredApiObject_TYPEDEF__ +#define __NVOC_CLASS_DeferredApiObject_TYPEDEF__ +typedef struct DeferredApiObject DeferredApiObject; +#endif /* __NVOC_CLASS_DeferredApiObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DeferredApiObject +#define __nvoc_class_id_DeferredApiObject 0x8ea933 +#endif /* __nvoc_class_id_DeferredApiObject */ + + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + +struct DiagApi; + +#ifndef __NVOC_CLASS_DiagApi_TYPEDEF__ +#define __NVOC_CLASS_DiagApi_TYPEDEF__ +typedef struct DiagApi DiagApi; +#endif /* __NVOC_CLASS_DiagApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DiagApi +#define __nvoc_class_id_DiagApi 0xaa3066 +#endif /* __nvoc_class_id_DiagApi */ + + +struct DispCapabilities; + +#ifndef __NVOC_CLASS_DispCapabilities_TYPEDEF__ +#define __NVOC_CLASS_DispCapabilities_TYPEDEF__ +typedef struct DispCapabilities DispCapabilities; +#endif /* __NVOC_CLASS_DispCapabilities_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCapabilities +#define __nvoc_class_id_DispCapabilities 0x99db3e +#endif /* __nvoc_class_id_DispCapabilities */ + + +struct DispChannelDma; + +#ifndef __NVOC_CLASS_DispChannelDma_TYPEDEF__ +#define __NVOC_CLASS_DispChannelDma_TYPEDEF__ +typedef struct DispChannelDma DispChannelDma; +#endif /* __NVOC_CLASS_DispChannelDma_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelDma +#define __nvoc_class_id_DispChannelDma 0xfe3d2e +#endif /* __nvoc_class_id_DispChannelDma */ + + +struct DispChannelPio; + +#ifndef __NVOC_CLASS_DispChannelPio_TYPEDEF__ +#define __NVOC_CLASS_DispChannelPio_TYPEDEF__ +typedef struct DispChannelPio DispChannelPio; +#endif /* __NVOC_CLASS_DispChannelPio_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispChannelPio +#define __nvoc_class_id_DispChannelPio 0x10dec3 +#endif /* __nvoc_class_id_DispChannelPio */ + + +struct DispCommon; + +#ifndef __NVOC_CLASS_DispCommon_TYPEDEF__ +#define __NVOC_CLASS_DispCommon_TYPEDEF__ +typedef struct DispCommon DispCommon; +#endif /* __NVOC_CLASS_DispCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCommon +#define __nvoc_class_id_DispCommon 0x41f4f2 +#endif /* __nvoc_class_id_DispCommon */ + + +struct DispSfUser; + +#ifndef __NVOC_CLASS_DispSfUser_TYPEDEF__ +#define __NVOC_CLASS_DispSfUser_TYPEDEF__ +typedef struct DispSfUser DispSfUser; +#endif /* __NVOC_CLASS_DispSfUser_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSfUser +#define __nvoc_class_id_DispSfUser 0xba7439 +#endif /* __nvoc_class_id_DispSfUser */ + + +struct DispSwObj; + +#ifndef __NVOC_CLASS_DispSwObj_TYPEDEF__ +#define __NVOC_CLASS_DispSwObj_TYPEDEF__ +typedef struct DispSwObj DispSwObj; +#endif /* __NVOC_CLASS_DispSwObj_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSwObj +#define __nvoc_class_id_DispSwObj 0x6aa5e2 +#endif /* __nvoc_class_id_DispSwObj */ + + +struct DispSwObject; + +#ifndef __NVOC_CLASS_DispSwObject_TYPEDEF__ +#define __NVOC_CLASS_DispSwObject_TYPEDEF__ +typedef struct DispSwObject DispSwObject; +#endif /* __NVOC_CLASS_DispSwObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispSwObject +#define __nvoc_class_id_DispSwObject 0x99ad6d +#endif /* __nvoc_class_id_DispSwObject */ + + +struct Event; + +#ifndef __NVOC_CLASS_Event_TYPEDEF__ +#define __NVOC_CLASS_Event_TYPEDEF__ +typedef struct Event Event; +#endif /* __NVOC_CLASS_Event_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Event +#define __nvoc_class_id_Event 0xa4ecfc +#endif /* __nvoc_class_id_Event */ + + +struct EventBuffer; + +#ifndef __NVOC_CLASS_EventBuffer_TYPEDEF__ +#define __NVOC_CLASS_EventBuffer_TYPEDEF__ +typedef struct EventBuffer EventBuffer; +#endif /* __NVOC_CLASS_EventBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_EventBuffer +#define __nvoc_class_id_EventBuffer 0x63502b +#endif /* __nvoc_class_id_EventBuffer */ + + +struct FbSegment; + +#ifndef __NVOC_CLASS_FbSegment_TYPEDEF__ +#define __NVOC_CLASS_FbSegment_TYPEDEF__ +typedef struct FbSegment FbSegment; +#endif /* __NVOC_CLASS_FbSegment_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FbSegment +#define __nvoc_class_id_FbSegment 0x2d55be +#endif /* __nvoc_class_id_FbSegment */ + + +struct FlaMemory; + +#ifndef __NVOC_CLASS_FlaMemory_TYPEDEF__ +#define __NVOC_CLASS_FlaMemory_TYPEDEF__ +typedef struct FlaMemory FlaMemory; +#endif /* __NVOC_CLASS_FlaMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FlaMemory +#define __nvoc_class_id_FlaMemory 0xe61ee1 +#endif /* __nvoc_class_id_FlaMemory */ + + +struct FmSessionApi; + +#ifndef __NVOC_CLASS_FmSessionApi_TYPEDEF__ +#define __NVOC_CLASS_FmSessionApi_TYPEDEF__ +typedef struct FmSessionApi FmSessionApi; +#endif /* __NVOC_CLASS_FmSessionApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_FmSessionApi +#define __nvoc_class_id_FmSessionApi 0xdfbd08 +#endif /* __nvoc_class_id_FmSessionApi */ + + +struct GenericEngineApi; + +#ifndef __NVOC_CLASS_GenericEngineApi_TYPEDEF__ +#define __NVOC_CLASS_GenericEngineApi_TYPEDEF__ +typedef struct GenericEngineApi GenericEngineApi; +#endif /* __NVOC_CLASS_GenericEngineApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GenericEngineApi +#define __nvoc_class_id_GenericEngineApi 0x4bc329 +#endif /* __nvoc_class_id_GenericEngineApi */ + + +struct GpuManagementApi; + +#ifndef __NVOC_CLASS_GpuManagementApi_TYPEDEF__ +#define __NVOC_CLASS_GpuManagementApi_TYPEDEF__ +typedef struct GpuManagementApi GpuManagementApi; +#endif /* __NVOC_CLASS_GpuManagementApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuManagementApi +#define __nvoc_class_id_GpuManagementApi 0x376305 +#endif /* __nvoc_class_id_GpuManagementApi */ + + +struct GraphicsContext; + +#ifndef __NVOC_CLASS_GraphicsContext_TYPEDEF__ +#define __NVOC_CLASS_GraphicsContext_TYPEDEF__ +typedef struct GraphicsContext GraphicsContext; +#endif /* __NVOC_CLASS_GraphicsContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GraphicsContext +#define __nvoc_class_id_GraphicsContext 0x954c97 +#endif /* __nvoc_class_id_GraphicsContext */ + + +struct GraphicsObject; + +#ifndef __NVOC_CLASS_GraphicsObject_TYPEDEF__ +#define __NVOC_CLASS_GraphicsObject_TYPEDEF__ +typedef struct GraphicsObject GraphicsObject; +#endif /* __NVOC_CLASS_GraphicsObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GraphicsObject +#define __nvoc_class_id_GraphicsObject 0x8cddfd +#endif /* __nvoc_class_id_GraphicsObject */ + + +struct Griddisplayless; + +#ifndef __NVOC_CLASS_Griddisplayless_TYPEDEF__ +#define __NVOC_CLASS_Griddisplayless_TYPEDEF__ +typedef struct Griddisplayless Griddisplayless; +#endif /* __NVOC_CLASS_Griddisplayless_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Griddisplayless +#define __nvoc_class_id_Griddisplayless 0x3d03b2 +#endif /* __nvoc_class_id_Griddisplayless */ + + +struct Hdacodec; + +#ifndef __NVOC_CLASS_Hdacodec_TYPEDEF__ +#define __NVOC_CLASS_Hdacodec_TYPEDEF__ +typedef struct Hdacodec Hdacodec; +#endif /* __NVOC_CLASS_Hdacodec_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Hdacodec +#define __nvoc_class_id_Hdacodec 0xf59a20 +#endif /* __nvoc_class_id_Hdacodec */ + + +struct HostVgpuDeviceApi; + +#ifndef __NVOC_CLASS_HostVgpuDeviceApi_TYPEDEF__ +#define __NVOC_CLASS_HostVgpuDeviceApi_TYPEDEF__ +typedef struct HostVgpuDeviceApi HostVgpuDeviceApi; +#endif /* __NVOC_CLASS_HostVgpuDeviceApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_HostVgpuDeviceApi +#define __nvoc_class_id_HostVgpuDeviceApi 0x4c4173 +#endif /* __nvoc_class_id_HostVgpuDeviceApi */ + + +struct HostVgpuDeviceApi_KERNEL; + +#ifndef __NVOC_CLASS_HostVgpuDeviceApi_KERNEL_TYPEDEF__ +#define __NVOC_CLASS_HostVgpuDeviceApi_KERNEL_TYPEDEF__ +typedef struct HostVgpuDeviceApi_KERNEL HostVgpuDeviceApi_KERNEL; +#endif /* __NVOC_CLASS_HostVgpuDeviceApi_KERNEL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_HostVgpuDeviceApi_KERNEL +#define __nvoc_class_id_HostVgpuDeviceApi_KERNEL 0xeb7e48 +#endif /* __nvoc_class_id_HostVgpuDeviceApi_KERNEL */ + + +struct I2cApi; + +#ifndef __NVOC_CLASS_I2cApi_TYPEDEF__ +#define __NVOC_CLASS_I2cApi_TYPEDEF__ +typedef struct I2cApi I2cApi; +#endif /* __NVOC_CLASS_I2cApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_I2cApi +#define __nvoc_class_id_I2cApi 0xceb8f6 +#endif /* __nvoc_class_id_I2cApi */ + + +struct KernelChannel; + +#ifndef __NVOC_CLASS_KernelChannel_TYPEDEF__ +#define __NVOC_CLASS_KernelChannel_TYPEDEF__ +typedef struct KernelChannel KernelChannel; +#endif /* __NVOC_CLASS_KernelChannel_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannel +#define __nvoc_class_id_KernelChannel 0x5d8d70 +#endif /* __nvoc_class_id_KernelChannel */ + + +struct KernelChannelGroupApi; + +#ifndef __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ +#define __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ +typedef struct KernelChannelGroupApi KernelChannelGroupApi; +#endif /* __NVOC_CLASS_KernelChannelGroupApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelChannelGroupApi +#define __nvoc_class_id_KernelChannelGroupApi 0x2b5b80 +#endif /* __nvoc_class_id_KernelChannelGroupApi */ + + +struct KernelCtxShareApi; + +#ifndef __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ +#define __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ +typedef struct KernelCtxShareApi KernelCtxShareApi; +#endif /* __NVOC_CLASS_KernelCtxShareApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelCtxShareApi +#define __nvoc_class_id_KernelCtxShareApi 0x1f9af1 +#endif /* __nvoc_class_id_KernelCtxShareApi */ + + +struct KernelGraphicsContext; + +#ifndef __NVOC_CLASS_KernelGraphicsContext_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsContext_TYPEDEF__ +typedef struct KernelGraphicsContext KernelGraphicsContext; +#endif /* __NVOC_CLASS_KernelGraphicsContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsContext +#define __nvoc_class_id_KernelGraphicsContext 0x7ead09 +#endif /* __nvoc_class_id_KernelGraphicsContext */ + + +struct KernelGraphicsObject; + +#ifndef __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ +#define __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ +typedef struct KernelGraphicsObject KernelGraphicsObject; +#endif /* __NVOC_CLASS_KernelGraphicsObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelGraphicsObject +#define __nvoc_class_id_KernelGraphicsObject 0x097648 +#endif /* __nvoc_class_id_KernelGraphicsObject */ + + +struct KernelSMDebuggerSession; + +#ifndef __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ +#define __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ +typedef struct KernelSMDebuggerSession KernelSMDebuggerSession; +#endif /* __NVOC_CLASS_KernelSMDebuggerSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelSMDebuggerSession +#define __nvoc_class_id_KernelSMDebuggerSession 0x4adc81 +#endif /* __nvoc_class_id_KernelSMDebuggerSession */ + + +struct MemoryFabric; + +#ifndef __NVOC_CLASS_MemoryFabric_TYPEDEF__ +#define __NVOC_CLASS_MemoryFabric_TYPEDEF__ +typedef struct MemoryFabric MemoryFabric; +#endif /* __NVOC_CLASS_MemoryFabric_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryFabric +#define __nvoc_class_id_MemoryFabric 0x127499 +#endif /* __nvoc_class_id_MemoryFabric */ + + +struct MemoryHwResources; + +#ifndef __NVOC_CLASS_MemoryHwResources_TYPEDEF__ +#define __NVOC_CLASS_MemoryHwResources_TYPEDEF__ +typedef struct MemoryHwResources MemoryHwResources; +#endif /* __NVOC_CLASS_MemoryHwResources_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryHwResources +#define __nvoc_class_id_MemoryHwResources 0x9a2a71 +#endif /* __nvoc_class_id_MemoryHwResources */ + + +struct MemoryList; + +#ifndef __NVOC_CLASS_MemoryList_TYPEDEF__ +#define __NVOC_CLASS_MemoryList_TYPEDEF__ +typedef struct MemoryList MemoryList; +#endif /* __NVOC_CLASS_MemoryList_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryList +#define __nvoc_class_id_MemoryList 0x298f78 +#endif /* __nvoc_class_id_MemoryList */ + + +struct MmuFaultBuffer; + +#ifndef __NVOC_CLASS_MmuFaultBuffer_TYPEDEF__ +#define __NVOC_CLASS_MmuFaultBuffer_TYPEDEF__ +typedef struct MmuFaultBuffer MmuFaultBuffer; +#endif /* __NVOC_CLASS_MmuFaultBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MmuFaultBuffer +#define __nvoc_class_id_MmuFaultBuffer 0x7e1829 +#endif /* __nvoc_class_id_MmuFaultBuffer */ + + +struct MpsApi; + +#ifndef __NVOC_CLASS_MpsApi_TYPEDEF__ +#define __NVOC_CLASS_MpsApi_TYPEDEF__ +typedef struct MpsApi MpsApi; +#endif /* __NVOC_CLASS_MpsApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MpsApi +#define __nvoc_class_id_MpsApi 0x22ce42 +#endif /* __nvoc_class_id_MpsApi */ + + +struct MsencContext; + +#ifndef __NVOC_CLASS_MsencContext_TYPEDEF__ +#define __NVOC_CLASS_MsencContext_TYPEDEF__ +typedef struct MsencContext MsencContext; +#endif /* __NVOC_CLASS_MsencContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MsencContext +#define __nvoc_class_id_MsencContext 0x88c92a +#endif /* __nvoc_class_id_MsencContext */ + + +struct NoDeviceMemory; + +#ifndef __NVOC_CLASS_NoDeviceMemory_TYPEDEF__ +#define __NVOC_CLASS_NoDeviceMemory_TYPEDEF__ +typedef struct NoDeviceMemory NoDeviceMemory; +#endif /* __NVOC_CLASS_NoDeviceMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NoDeviceMemory +#define __nvoc_class_id_NoDeviceMemory 0x6c0832 +#endif /* __nvoc_class_id_NoDeviceMemory */ + + +struct NpuResource; + +#ifndef __NVOC_CLASS_NpuResource_TYPEDEF__ +#define __NVOC_CLASS_NpuResource_TYPEDEF__ +typedef struct NpuResource NpuResource; +#endif /* __NVOC_CLASS_NpuResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NpuResource +#define __nvoc_class_id_NpuResource 0x4d1af2 +#endif /* __nvoc_class_id_NpuResource */ + + +struct NvdecContext; + +#ifndef __NVOC_CLASS_NvdecContext_TYPEDEF__ +#define __NVOC_CLASS_NvdecContext_TYPEDEF__ +typedef struct NvdecContext NvdecContext; +#endif /* __NVOC_CLASS_NvdecContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvdecContext +#define __nvoc_class_id_NvdecContext 0x70d2be +#endif /* __nvoc_class_id_NvdecContext */ + + +struct NvDispApi; + +#ifndef __NVOC_CLASS_NvDispApi_TYPEDEF__ +#define __NVOC_CLASS_NvDispApi_TYPEDEF__ +typedef struct NvDispApi NvDispApi; +#endif /* __NVOC_CLASS_NvDispApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvDispApi +#define __nvoc_class_id_NvDispApi 0x36aa0b +#endif /* __nvoc_class_id_NvDispApi */ + + +struct NvjpgContext; + +#ifndef __NVOC_CLASS_NvjpgContext_TYPEDEF__ +#define __NVOC_CLASS_NvjpgContext_TYPEDEF__ +typedef struct NvjpgContext NvjpgContext; +#endif /* __NVOC_CLASS_NvjpgContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_NvjpgContext +#define __nvoc_class_id_NvjpgContext 0x08c1ce +#endif /* __nvoc_class_id_NvjpgContext */ + + +struct OfaContext; + +#ifndef __NVOC_CLASS_OfaContext_TYPEDEF__ +#define __NVOC_CLASS_OfaContext_TYPEDEF__ +typedef struct OfaContext OfaContext; +#endif /* __NVOC_CLASS_OfaContext_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OfaContext +#define __nvoc_class_id_OfaContext 0xf63d99 +#endif /* __nvoc_class_id_OfaContext */ + + +struct OsDescMemory; + +#ifndef __NVOC_CLASS_OsDescMemory_TYPEDEF__ +#define __NVOC_CLASS_OsDescMemory_TYPEDEF__ +typedef struct OsDescMemory OsDescMemory; +#endif /* __NVOC_CLASS_OsDescMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OsDescMemory +#define __nvoc_class_id_OsDescMemory 0xb3dacd +#endif /* __nvoc_class_id_OsDescMemory */ + + +struct UserLocalDescMemory; + +#ifndef __NVOC_CLASS_UserLocalDescMemory_TYPEDEF__ +#define __NVOC_CLASS_UserLocalDescMemory_TYPEDEF__ +typedef struct UserLocalDescMemory UserLocalDescMemory; +#endif /* __NVOC_CLASS_UserLocalDescMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UserLocalDescMemory +#define __nvoc_class_id_UserLocalDescMemory 0x799456 +#endif /* __nvoc_class_id_UserLocalDescMemory */ + + +struct P2PApi; + +#ifndef __NVOC_CLASS_P2PApi_TYPEDEF__ +#define __NVOC_CLASS_P2PApi_TYPEDEF__ +typedef struct P2PApi P2PApi; +#endif /* __NVOC_CLASS_P2PApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_P2PApi +#define __nvoc_class_id_P2PApi 0x3982b7 +#endif /* __nvoc_class_id_P2PApi */ + + +struct PerfBuffer; + +#ifndef __NVOC_CLASS_PerfBuffer_TYPEDEF__ +#define __NVOC_CLASS_PerfBuffer_TYPEDEF__ +typedef struct PerfBuffer PerfBuffer; +#endif /* __NVOC_CLASS_PerfBuffer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PerfBuffer +#define __nvoc_class_id_PerfBuffer 0x4bc43b +#endif /* __nvoc_class_id_PerfBuffer */ + + +struct PhysicalMemory; + +#ifndef __NVOC_CLASS_PhysicalMemory_TYPEDEF__ +#define __NVOC_CLASS_PhysicalMemory_TYPEDEF__ +typedef struct PhysicalMemory PhysicalMemory; +#endif /* __NVOC_CLASS_PhysicalMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PhysicalMemory +#define __nvoc_class_id_PhysicalMemory 0x5fccf2 +#endif /* __nvoc_class_id_PhysicalMemory */ + + +struct PhysMemSubAlloc; + +#ifndef __NVOC_CLASS_PhysMemSubAlloc_TYPEDEF__ +#define __NVOC_CLASS_PhysMemSubAlloc_TYPEDEF__ +typedef struct PhysMemSubAlloc PhysMemSubAlloc; +#endif /* __NVOC_CLASS_PhysMemSubAlloc_TYPEDEF__ */ + +#ifndef __nvoc_class_id_PhysMemSubAlloc +#define __nvoc_class_id_PhysMemSubAlloc 0x2351fc +#endif /* __nvoc_class_id_PhysMemSubAlloc */ + + +struct Profiler; + +#ifndef __NVOC_CLASS_Profiler_TYPEDEF__ +#define __NVOC_CLASS_Profiler_TYPEDEF__ +typedef struct Profiler Profiler; +#endif /* __NVOC_CLASS_Profiler_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Profiler +#define __nvoc_class_id_Profiler 0x65b4c7 +#endif /* __nvoc_class_id_Profiler */ + + +struct ProfilerCtx; + +#ifndef __NVOC_CLASS_ProfilerCtx_TYPEDEF__ +#define __NVOC_CLASS_ProfilerCtx_TYPEDEF__ +typedef struct ProfilerCtx ProfilerCtx; +#endif /* __NVOC_CLASS_ProfilerCtx_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ProfilerCtx +#define __nvoc_class_id_ProfilerCtx 0xe99229 +#endif /* __nvoc_class_id_ProfilerCtx */ + + +struct ProfilerDev; + +#ifndef __NVOC_CLASS_ProfilerDev_TYPEDEF__ +#define __NVOC_CLASS_ProfilerDev_TYPEDEF__ +typedef struct ProfilerDev ProfilerDev; +#endif /* __NVOC_CLASS_ProfilerDev_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ProfilerDev +#define __nvoc_class_id_ProfilerDev 0x54d077 +#endif /* __nvoc_class_id_ProfilerDev */ + + +struct RegisterMemory; + +#ifndef __NVOC_CLASS_RegisterMemory_TYPEDEF__ +#define __NVOC_CLASS_RegisterMemory_TYPEDEF__ +typedef struct RegisterMemory RegisterMemory; +#endif /* __NVOC_CLASS_RegisterMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RegisterMemory +#define __nvoc_class_id_RegisterMemory 0x40d457 +#endif /* __nvoc_class_id_RegisterMemory */ + + +struct RemapperObject; + +#ifndef __NVOC_CLASS_RemapperObject_TYPEDEF__ +#define __NVOC_CLASS_RemapperObject_TYPEDEF__ +typedef struct RemapperObject RemapperObject; +#endif /* __NVOC_CLASS_RemapperObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RemapperObject +#define __nvoc_class_id_RemapperObject 0xfc96cb +#endif /* __nvoc_class_id_RemapperObject */ + + +struct RgLineCallback; + +#ifndef __NVOC_CLASS_RgLineCallback_TYPEDEF__ +#define __NVOC_CLASS_RgLineCallback_TYPEDEF__ +typedef struct RgLineCallback RgLineCallback; +#endif /* __NVOC_CLASS_RgLineCallback_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RgLineCallback +#define __nvoc_class_id_RgLineCallback 0xa3ff1c +#endif /* __nvoc_class_id_RgLineCallback */ + + +struct RmClientResource; + +#ifndef __NVOC_CLASS_RmClientResource_TYPEDEF__ +#define __NVOC_CLASS_RmClientResource_TYPEDEF__ +typedef struct RmClientResource RmClientResource; +#endif /* __NVOC_CLASS_RmClientResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmClientResource +#define __nvoc_class_id_RmClientResource 0x37a701 +#endif /* __nvoc_class_id_RmClientResource */ + + +struct MIGConfigSession; + +#ifndef __NVOC_CLASS_MIGConfigSession_TYPEDEF__ +#define __NVOC_CLASS_MIGConfigSession_TYPEDEF__ +typedef struct MIGConfigSession MIGConfigSession; +#endif /* __NVOC_CLASS_MIGConfigSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGConfigSession +#define __nvoc_class_id_MIGConfigSession 0x36a941 +#endif /* __nvoc_class_id_MIGConfigSession */ + + +struct ComputeInstanceSubscription; + +#ifndef __NVOC_CLASS_ComputeInstanceSubscription_TYPEDEF__ +#define __NVOC_CLASS_ComputeInstanceSubscription_TYPEDEF__ +typedef struct ComputeInstanceSubscription ComputeInstanceSubscription; +#endif /* __NVOC_CLASS_ComputeInstanceSubscription_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ComputeInstanceSubscription +#define __nvoc_class_id_ComputeInstanceSubscription 0xd1f238 +#endif /* __nvoc_class_id_ComputeInstanceSubscription */ + + +struct MIGMonitorSession; + +#ifndef __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ +#define __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ +typedef struct MIGMonitorSession MIGMonitorSession; +#endif /* __NVOC_CLASS_MIGMonitorSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MIGMonitorSession +#define __nvoc_class_id_MIGMonitorSession 0x29e15c +#endif /* __nvoc_class_id_MIGMonitorSession */ + + +struct GPUInstanceSubscription; + +#ifndef __NVOC_CLASS_GPUInstanceSubscription_TYPEDEF__ +#define __NVOC_CLASS_GPUInstanceSubscription_TYPEDEF__ +typedef struct GPUInstanceSubscription GPUInstanceSubscription; +#endif /* __NVOC_CLASS_GPUInstanceSubscription_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GPUInstanceSubscription +#define __nvoc_class_id_GPUInstanceSubscription 0x91fde7 +#endif /* __nvoc_class_id_GPUInstanceSubscription */ + + +struct SMDebuggerSession; + +#ifndef __NVOC_CLASS_SMDebuggerSession_TYPEDEF__ +#define __NVOC_CLASS_SMDebuggerSession_TYPEDEF__ +typedef struct SMDebuggerSession SMDebuggerSession; +#endif /* __NVOC_CLASS_SMDebuggerSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SMDebuggerSession +#define __nvoc_class_id_SMDebuggerSession 0x9afab7 +#endif /* __nvoc_class_id_SMDebuggerSession */ + + +struct SoftwareMethodTest; + +#ifndef __NVOC_CLASS_SoftwareMethodTest_TYPEDEF__ +#define __NVOC_CLASS_SoftwareMethodTest_TYPEDEF__ +typedef struct SoftwareMethodTest SoftwareMethodTest; +#endif /* __NVOC_CLASS_SoftwareMethodTest_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SoftwareMethodTest +#define __nvoc_class_id_SoftwareMethodTest 0xdea092 +#endif /* __nvoc_class_id_SoftwareMethodTest */ + + +struct Subdevice; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + + +struct BinaryApi; + +#ifndef __NVOC_CLASS_BinaryApi_TYPEDEF__ +#define __NVOC_CLASS_BinaryApi_TYPEDEF__ +typedef struct BinaryApi BinaryApi; +#endif /* __NVOC_CLASS_BinaryApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_BinaryApi +#define __nvoc_class_id_BinaryApi 0xb7a47c +#endif /* __nvoc_class_id_BinaryApi */ + + +struct BinaryApiPrivileged; + +#ifndef __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ +#define __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ +typedef struct BinaryApiPrivileged BinaryApiPrivileged; +#endif /* __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ */ + +#ifndef __nvoc_class_id_BinaryApiPrivileged +#define __nvoc_class_id_BinaryApiPrivileged 0x1c0579 +#endif /* __nvoc_class_id_BinaryApiPrivileged */ + + +struct SyncGpuBoost; + +#ifndef __NVOC_CLASS_SyncGpuBoost_TYPEDEF__ +#define __NVOC_CLASS_SyncGpuBoost_TYPEDEF__ +typedef struct SyncGpuBoost SyncGpuBoost; +#endif /* __NVOC_CLASS_SyncGpuBoost_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SyncGpuBoost +#define __nvoc_class_id_SyncGpuBoost 0xc7e30b +#endif /* __nvoc_class_id_SyncGpuBoost */ + + +struct SyncpointMemory; + +#ifndef __NVOC_CLASS_SyncpointMemory_TYPEDEF__ +#define __NVOC_CLASS_SyncpointMemory_TYPEDEF__ +typedef struct SyncpointMemory SyncpointMemory; +#endif /* __NVOC_CLASS_SyncpointMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SyncpointMemory +#define __nvoc_class_id_SyncpointMemory 0x529def +#endif /* __nvoc_class_id_SyncpointMemory */ + + +struct SystemMemory; + +#ifndef __NVOC_CLASS_SystemMemory_TYPEDEF__ +#define __NVOC_CLASS_SystemMemory_TYPEDEF__ +typedef struct SystemMemory SystemMemory; +#endif /* __NVOC_CLASS_SystemMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SystemMemory +#define __nvoc_class_id_SystemMemory 0x007a98 +#endif /* __nvoc_class_id_SystemMemory */ + + +struct ThirdPartyP2P; + +#ifndef __NVOC_CLASS_ThirdPartyP2P_TYPEDEF__ +#define __NVOC_CLASS_ThirdPartyP2P_TYPEDEF__ +typedef struct ThirdPartyP2P ThirdPartyP2P; +#endif /* __NVOC_CLASS_ThirdPartyP2P_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ThirdPartyP2P +#define __nvoc_class_id_ThirdPartyP2P 0x34d08b +#endif /* __nvoc_class_id_ThirdPartyP2P */ + + +struct TimedSemaSwObject; + +#ifndef __NVOC_CLASS_TimedSemaSwObject_TYPEDEF__ +#define __NVOC_CLASS_TimedSemaSwObject_TYPEDEF__ +typedef struct TimedSemaSwObject TimedSemaSwObject; +#endif /* __NVOC_CLASS_TimedSemaSwObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_TimedSemaSwObject +#define __nvoc_class_id_TimedSemaSwObject 0x335775 +#endif /* __nvoc_class_id_TimedSemaSwObject */ + + +struct TimerApi; + +#ifndef __NVOC_CLASS_TimerApi_TYPEDEF__ +#define __NVOC_CLASS_TimerApi_TYPEDEF__ +typedef struct TimerApi TimerApi; +#endif /* __NVOC_CLASS_TimerApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_TimerApi +#define __nvoc_class_id_TimerApi 0xb13ac4 +#endif /* __nvoc_class_id_TimerApi */ + + +struct UserModeApi; + +#ifndef __NVOC_CLASS_UserModeApi_TYPEDEF__ +#define __NVOC_CLASS_UserModeApi_TYPEDEF__ +typedef struct UserModeApi UserModeApi; +#endif /* __NVOC_CLASS_UserModeApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UserModeApi +#define __nvoc_class_id_UserModeApi 0x6f57ec +#endif /* __nvoc_class_id_UserModeApi */ + + +struct UvmChannelRetainer; + +#ifndef __NVOC_CLASS_UvmChannelRetainer_TYPEDEF__ +#define __NVOC_CLASS_UvmChannelRetainer_TYPEDEF__ +typedef struct UvmChannelRetainer UvmChannelRetainer; +#endif /* __NVOC_CLASS_UvmChannelRetainer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UvmChannelRetainer +#define __nvoc_class_id_UvmChannelRetainer 0xa3f03a +#endif /* __nvoc_class_id_UvmChannelRetainer */ + + +struct UvmSwObject; + +#ifndef __NVOC_CLASS_UvmSwObject_TYPEDEF__ +#define __NVOC_CLASS_UvmSwObject_TYPEDEF__ +typedef struct UvmSwObject UvmSwObject; +#endif /* __NVOC_CLASS_UvmSwObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UvmSwObject +#define __nvoc_class_id_UvmSwObject 0xc35503 +#endif /* __nvoc_class_id_UvmSwObject */ + + +struct VaSpaceApi; + +#ifndef __NVOC_CLASS_VaSpaceApi_TYPEDEF__ +#define __NVOC_CLASS_VaSpaceApi_TYPEDEF__ +typedef struct VaSpaceApi VaSpaceApi; +#endif /* __NVOC_CLASS_VaSpaceApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VaSpaceApi +#define __nvoc_class_id_VaSpaceApi 0xcd048b +#endif /* __nvoc_class_id_VaSpaceApi */ + + +struct VblankCallback; + +#ifndef __NVOC_CLASS_VblankCallback_TYPEDEF__ +#define __NVOC_CLASS_VblankCallback_TYPEDEF__ +typedef struct VblankCallback VblankCallback; +#endif /* __NVOC_CLASS_VblankCallback_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VblankCallback +#define __nvoc_class_id_VblankCallback 0x4c1997 +#endif /* __nvoc_class_id_VblankCallback */ + + +struct VgpuApi; + +#ifndef __NVOC_CLASS_VgpuApi_TYPEDEF__ +#define __NVOC_CLASS_VgpuApi_TYPEDEF__ +typedef struct VgpuApi VgpuApi; +#endif /* __NVOC_CLASS_VgpuApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VgpuApi +#define __nvoc_class_id_VgpuApi 0x7774f5 +#endif /* __nvoc_class_id_VgpuApi */ + + +struct VgpuConfigApi; + +#ifndef __NVOC_CLASS_VgpuConfigApi_TYPEDEF__ +#define __NVOC_CLASS_VgpuConfigApi_TYPEDEF__ +typedef struct VgpuConfigApi VgpuConfigApi; +#endif /* __NVOC_CLASS_VgpuConfigApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VgpuConfigApi +#define __nvoc_class_id_VgpuConfigApi 0x4d560a +#endif /* __nvoc_class_id_VgpuConfigApi */ + + +struct VideoMemory; + +#ifndef __NVOC_CLASS_VideoMemory_TYPEDEF__ +#define __NVOC_CLASS_VideoMemory_TYPEDEF__ +typedef struct VideoMemory VideoMemory; +#endif /* __NVOC_CLASS_VideoMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VideoMemory +#define __nvoc_class_id_VideoMemory 0xed948f +#endif /* __nvoc_class_id_VideoMemory */ + + +struct VirtualMemory; + +#ifndef __NVOC_CLASS_VirtualMemory_TYPEDEF__ +#define __NVOC_CLASS_VirtualMemory_TYPEDEF__ +typedef struct VirtualMemory VirtualMemory; +#endif /* __NVOC_CLASS_VirtualMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtualMemory +#define __nvoc_class_id_VirtualMemory 0x2aea5c +#endif /* __nvoc_class_id_VirtualMemory */ + + +struct VirtualMemoryRange; + +#ifndef __NVOC_CLASS_VirtualMemoryRange_TYPEDEF__ +#define __NVOC_CLASS_VirtualMemoryRange_TYPEDEF__ +typedef struct VirtualMemoryRange VirtualMemoryRange; +#endif /* __NVOC_CLASS_VirtualMemoryRange_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtualMemoryRange +#define __nvoc_class_id_VirtualMemoryRange 0x7032c6 +#endif /* __nvoc_class_id_VirtualMemoryRange */ + + +struct VmmuApi; + +#ifndef __NVOC_CLASS_VmmuApi_TYPEDEF__ +#define __NVOC_CLASS_VmmuApi_TYPEDEF__ +typedef struct VmmuApi VmmuApi; +#endif /* __NVOC_CLASS_VmmuApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VmmuApi +#define __nvoc_class_id_VmmuApi 0x40d73a +#endif /* __nvoc_class_id_VmmuApi */ + + +struct ZbcApi; + +#ifndef __NVOC_CLASS_ZbcApi_TYPEDEF__ +#define __NVOC_CLASS_ZbcApi_TYPEDEF__ +typedef struct ZbcApi ZbcApi; +#endif /* __NVOC_CLASS_ZbcApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ZbcApi +#define __nvoc_class_id_ZbcApi 0x397ee3 +#endif /* __nvoc_class_id_ZbcApi */ + + + +#endif // RESOURCE_FWD_DECLS_H + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RESOURCE_FWD_DECLS_NVOC_H_ diff --git a/src/nvidia/generated/g_resource_nvoc.c b/src/nvidia/generated/g_resource_nvoc.c new file mode 100644 index 000000000..eee007e5a --- /dev/null +++ b/src/nvidia/generated/g_resource_nvoc.c @@ -0,0 +1,370 @@ +#define NVOC_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_resource_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x8ef259 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +void __nvoc_init_RmResourceCommon(RmResourceCommon*); +void __nvoc_init_funcTable_RmResourceCommon(RmResourceCommon*); +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon*); +void __nvoc_init_dataField_RmResourceCommon(RmResourceCommon*); +void __nvoc_dtor_RmResourceCommon(RmResourceCommon*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmResourceCommon; + +static const struct NVOC_RTTI __nvoc_rtti_RmResourceCommon_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmResourceCommon, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmResourceCommon = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_RmResourceCommon_RmResourceCommon, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmResourceCommon), + /*classId=*/ classId(RmResourceCommon), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmResourceCommon", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_RmResourceCommon, + /*pExportInfo=*/ &__nvoc_export_info_RmResourceCommon +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmResourceCommon = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResourceCommon(RmResourceCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmResourceCommon(RmResourceCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_RmResourceCommon(pThis); + + status = __nvoc_rmrescmnConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_RmResourceCommon_fail__init; + goto __nvoc_ctor_RmResourceCommon_exit; // Success + +__nvoc_ctor_RmResourceCommon_fail__init: +__nvoc_ctor_RmResourceCommon_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmResourceCommon_1(RmResourceCommon *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_RmResourceCommon(RmResourceCommon *pThis) { + __nvoc_init_funcTable_RmResourceCommon_1(pThis); +} + +void __nvoc_init_RmResourceCommon(RmResourceCommon *pThis) { + pThis->__nvoc_pbase_RmResourceCommon = pThis; + __nvoc_init_funcTable_RmResourceCommon(pThis); +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x03610d = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_funcTable_RmResource(RmResource*); +NV_STATUS __nvoc_ctor_RmResource(RmResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RmResource(RmResource*); +void __nvoc_dtor_RmResource(RmResource*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmResource; + +static const struct NVOC_RTTI __nvoc_rtti_RmResource_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmResource, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmResource_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmResource, __nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmResource_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmResource, __nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RmResource_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RmResource, __nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RmResource = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_RmResource_RmResource, + &__nvoc_rtti_RmResource_RmResourceCommon, + &__nvoc_rtti_RmResource_RsResource, + &__nvoc_rtti_RmResource_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RmResource), + /*classId=*/ classId(RmResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RmResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmResource, + /*pCastInfo=*/ &__nvoc_castinfo_RmResource, + /*pExportInfo=*/ &__nvoc_export_info_RmResource +}; + +static NvBool __nvoc_thunk_RmResource_resAccessCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) - __nvoc_rtti_RmResource_RsResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NvBool __nvoc_thunk_RmResource_resShareCallback(struct RsResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) - __nvoc_rtti_RmResource_RsResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_resControl_Prologue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) - __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RmResource_resControl_Epilogue(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) - __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresControl(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresUnmap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresMapTo(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pParams); +} + +static NvU32 __nvoc_thunk_RsResource_rmresGetRefCount(struct RmResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresControlFilter(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_rmresAddAdditionalDependants(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pReference); +} + +static NvBool __nvoc_thunk_RsResource_rmresCanCopy(struct RmResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_rmresPreDestruct(struct RmResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresUnmapFrom(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresControlLookup(struct RmResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_rmresMap(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RmResource_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RmResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsResource(RsResource*); +void __nvoc_dtor_RmResourceCommon(RmResourceCommon*); +void __nvoc_dtor_RmResource(RmResource *pThis) { + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RmResource(RmResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsResource(RsResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RmResourceCommon(RmResourceCommon* ); +NV_STATUS __nvoc_ctor_RmResource(RmResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsResource(&pThis->__nvoc_base_RsResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmResource_fail_RsResource; + status = __nvoc_ctor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + if (status != NV_OK) goto __nvoc_ctor_RmResource_fail_RmResourceCommon; + __nvoc_init_dataField_RmResource(pThis); + + status = __nvoc_rmresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RmResource_fail__init; + goto __nvoc_ctor_RmResource_exit; // Success + +__nvoc_ctor_RmResource_fail__init: + __nvoc_dtor_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); +__nvoc_ctor_RmResource_fail_RmResourceCommon: + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); +__nvoc_ctor_RmResource_fail_RsResource: +__nvoc_ctor_RmResource_exit: + + return status; +} + +static void __nvoc_init_funcTable_RmResource_1(RmResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__rmresAccessCallback__ = &rmresAccessCallback_IMPL; + + pThis->__rmresShareCallback__ = &rmresShareCallback_IMPL; + + pThis->__rmresGetMemInterMapParams__ = &rmresGetMemInterMapParams_IMPL; + + pThis->__rmresCheckMemInterUnmap__ = &rmresCheckMemInterUnmap_IMPL; + + pThis->__rmresGetMemoryMappingDescriptor__ = &rmresGetMemoryMappingDescriptor_IMPL; + + pThis->__rmresControl_Prologue__ = &rmresControl_Prologue_IMPL; + + pThis->__rmresControl_Epilogue__ = &rmresControl_Epilogue_IMPL; + + pThis->__nvoc_base_RsResource.__resAccessCallback__ = &__nvoc_thunk_RmResource_resAccessCallback; + + pThis->__nvoc_base_RsResource.__resShareCallback__ = &__nvoc_thunk_RmResource_resShareCallback; + + pThis->__nvoc_base_RsResource.__resControl_Prologue__ = &__nvoc_thunk_RmResource_resControl_Prologue; + + pThis->__nvoc_base_RsResource.__resControl_Epilogue__ = &__nvoc_thunk_RmResource_resControl_Epilogue; + + pThis->__rmresControl__ = &__nvoc_thunk_RsResource_rmresControl; + + pThis->__rmresUnmap__ = &__nvoc_thunk_RsResource_rmresUnmap; + + pThis->__rmresMapTo__ = &__nvoc_thunk_RsResource_rmresMapTo; + + pThis->__rmresGetRefCount__ = &__nvoc_thunk_RsResource_rmresGetRefCount; + + pThis->__rmresControlFilter__ = &__nvoc_thunk_RsResource_rmresControlFilter; + + pThis->__rmresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_rmresAddAdditionalDependants; + + pThis->__rmresCanCopy__ = &__nvoc_thunk_RsResource_rmresCanCopy; + + pThis->__rmresPreDestruct__ = &__nvoc_thunk_RsResource_rmresPreDestruct; + + pThis->__rmresUnmapFrom__ = &__nvoc_thunk_RsResource_rmresUnmapFrom; + + pThis->__rmresControlLookup__ = &__nvoc_thunk_RsResource_rmresControlLookup; + + pThis->__rmresMap__ = &__nvoc_thunk_RsResource_rmresMap; +} + +void __nvoc_init_funcTable_RmResource(RmResource *pThis) { + __nvoc_init_funcTable_RmResource_1(pThis); +} + +void __nvoc_init_RsResource(RsResource*); +void __nvoc_init_RmResourceCommon(RmResourceCommon*); +void __nvoc_init_RmResource(RmResource *pThis) { + pThis->__nvoc_pbase_RmResource = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResourceCommon; + __nvoc_init_RsResource(&pThis->__nvoc_base_RsResource); + __nvoc_init_RmResourceCommon(&pThis->__nvoc_base_RmResourceCommon); + __nvoc_init_funcTable_RmResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RmResource(RmResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RmResource *pThis; + + pThis = portMemAllocNonPaged(sizeof(RmResource)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RmResource)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RmResource); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RmResource(pThis); + status = __nvoc_ctor_RmResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RmResource_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RmResource_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RmResource(RmResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RmResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_resource_nvoc.h b/src/nvidia/generated/g_resource_nvoc.h new file mode 100644 index 000000000..4760fc578 --- /dev/null +++ b/src/nvidia/generated/g_resource_nvoc.h @@ -0,0 +1,355 @@ +#ifndef _G_RESOURCE_NVOC_H_ +#define _G_RESOURCE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_resource_nvoc.h" + +#ifndef _RESOURCE_H_ +#define _RESOURCE_H_ + +#include "core/core.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" + +/* Forward declarations */ +struct MEMORY_DESCRIPTOR; +struct OBJVASPACE; + +struct RMRES_MEM_INTER_MAP_PARAMS +{ + /// [in] + OBJGPU *pGpu; + RsResourceRef *pMemoryRef; + NvBool bSubdeviceHandleProvided; + + /// [out] + OBJGPU *pSrcGpu; + struct MEMORY_DESCRIPTOR *pSrcMemDesc; + NvHandle hMemoryDevice; + NvBool bDmaMapNeeded; + // This flag will be set when this is FLA mapping + NvBool bFlaMapping; +}; + +struct RS_RES_MAP_TO_PARAMS +{ + OBJGPU *pGpu; ///< [in] + OBJGPU *pSrcGpu; ///< [in] + struct MEMORY_DESCRIPTOR *pSrcMemDesc; ///< [in] + struct MEMORY_DESCRIPTOR **ppMemDesc; ///< [out] + RsResourceRef *pMemoryRef; ///< [in] + NvHandle hBroadcastDevice; ///< [in] + NvHandle hMemoryDevice; ///< [in] + NvU32 gpuMask; ///< [in] + NvU64 offset; ///< [in] + NvU64 length; ///< [in] + NvU32 flags; ///< [in] + NvU64 *pDmaOffset; ///< [inout] + NvBool bSubdeviceHandleProvided; ///< [in] + NvBool bDmaMapNeeded; ///< [in] + NvBool bFlaMapping; ///< [in] +}; + +struct RS_RES_UNMAP_FROM_PARAMS +{ + OBJGPU *pGpu; ///< [in] + NvHandle hMemory; ///< [in] + NvHandle hBroadcastDevice; ///< [in] + NvU32 gpuMask; ///< [in] + NvU32 flags; ///< [in] + NvU64 dmaOffset; ///< [in] + struct MEMORY_DESCRIPTOR *pMemDesc; ///< [in] + NvBool bSubdeviceHandleProvided; ///< [in] +}; + +struct RS_INTER_MAP_PRIVATE +{ + OBJGPU *pGpu; + OBJGPU *pSrcGpu; + struct MEMORY_DESCRIPTOR *pSrcMemDesc; + NvHandle hBroadcastDevice; + NvHandle hMemoryDevice; + NvU32 gpuMask; + NvBool bSubdeviceHandleProvided; + NvBool bDmaMapNeeded; + NvBool bFlaMapping; +}; + +struct RS_INTER_UNMAP_PRIVATE +{ + OBJGPU *pGpu; + NvHandle hBroadcastDevice; + NvU32 gpuMask; + NvBool bSubdeviceHandleProvided; + NvBool bcState; + NvBool bAllocated; ///< This struct has been allocated and must be freed +}; + +struct RS_CPU_MAPPING_PRIVATE +{ + NvU64 gpuAddress; + NvU64 gpuMapLength; + OBJGPU *pGpu; + NvP64 pPriv; + NvU32 protect; + NvBool bKernel; +}; + +typedef struct RMRES_MEM_INTER_MAP_PARAMS RMRES_MEM_INTER_MAP_PARAMS; + +/*! + * All RsResource subclasses in RM must inherit from this class + */ +#ifdef NVOC_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmResourceCommon { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; +}; + +#ifndef __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +#define __NVOC_CLASS_RmResourceCommon_TYPEDEF__ +typedef struct RmResourceCommon RmResourceCommon; +#endif /* __NVOC_CLASS_RmResourceCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResourceCommon +#define __nvoc_class_id_RmResourceCommon 0x8ef259 +#endif /* __nvoc_class_id_RmResourceCommon */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +#define __staticCast_RmResourceCommon(pThis) \ + ((pThis)->__nvoc_pbase_RmResourceCommon) + +#ifdef __nvoc_resource_h_disabled +#define __dynamicCast_RmResourceCommon(pThis) ((RmResourceCommon*)NULL) +#else //__nvoc_resource_h_disabled +#define __dynamicCast_RmResourceCommon(pThis) \ + ((RmResourceCommon*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmResourceCommon))) +#endif //__nvoc_resource_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmResourceCommon(RmResourceCommon**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmResourceCommon(RmResourceCommon**, Dynamic*, NvU32); +#define __objCreate_RmResourceCommon(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RmResourceCommon((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS rmrescmnConstruct_IMPL(struct RmResourceCommon *arg_pResourceCommmon); +#define __nvoc_rmrescmnConstruct(arg_pResourceCommmon) rmrescmnConstruct_IMPL(arg_pResourceCommmon) +#undef PRIVATE_FIELD + + +/*! + * Utility base class for all RsResource subclasses in by RM. Doesn't have to be + * used but if it isn't used RmResourceCommon must be inherited manually + */ +#ifdef NVOC_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RmResource { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsResource __nvoc_base_RsResource; + struct RmResourceCommon __nvoc_base_RmResourceCommon; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + NvBool (*__rmresAccessCallback__)(struct RmResource *, struct RsClient *, void *, RsAccessRight); + NvBool (*__rmresShareCallback__)(struct RmResource *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__rmresGetMemInterMapParams__)(struct RmResource *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__rmresCheckMemInterUnmap__)(struct RmResource *, NvBool); + NV_STATUS (*__rmresGetMemoryMappingDescriptor__)(struct RmResource *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__rmresControl_Prologue__)(struct RmResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__rmresControl_Epilogue__)(struct RmResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__rmresControl__)(struct RmResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__rmresUnmap__)(struct RmResource *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__rmresMapTo__)(struct RmResource *, RS_RES_MAP_TO_PARAMS *); + NvU32 (*__rmresGetRefCount__)(struct RmResource *); + NV_STATUS (*__rmresControlFilter__)(struct RmResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__rmresAddAdditionalDependants__)(struct RsClient *, struct RmResource *, RsResourceRef *); + NvBool (*__rmresCanCopy__)(struct RmResource *); + void (*__rmresPreDestruct__)(struct RmResource *); + NV_STATUS (*__rmresUnmapFrom__)(struct RmResource *, RS_RES_UNMAP_FROM_PARAMS *); + NV_STATUS (*__rmresControlLookup__)(struct RmResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__rmresMap__)(struct RmResource *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvU32 rpcGpuInstance; + NvBool bRpcFree; +}; + +#ifndef __NVOC_CLASS_RmResource_TYPEDEF__ +#define __NVOC_CLASS_RmResource_TYPEDEF__ +typedef struct RmResource RmResource; +#endif /* __NVOC_CLASS_RmResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RmResource +#define __nvoc_class_id_RmResource 0x03610d +#endif /* __nvoc_class_id_RmResource */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +#define __staticCast_RmResource(pThis) \ + ((pThis)->__nvoc_pbase_RmResource) + +#ifdef __nvoc_resource_h_disabled +#define __dynamicCast_RmResource(pThis) ((RmResource*)NULL) +#else //__nvoc_resource_h_disabled +#define __dynamicCast_RmResource(pThis) \ + ((RmResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmResource))) +#endif //__nvoc_resource_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RmResource(RmResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RmResource(RmResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RmResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RmResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define rmresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) rmresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define rmresShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) rmresShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define rmresGetMemInterMapParams(pRmResource, pParams) rmresGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define rmresCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) rmresCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define rmresGetMemoryMappingDescriptor(pRmResource, ppMemDesc) rmresGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define rmresControl_Prologue(pResource, pCallContext, pParams) rmresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define rmresControl_Epilogue(pResource, pCallContext, pParams) rmresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define rmresControl(pResource, pCallContext, pParams) rmresControl_DISPATCH(pResource, pCallContext, pParams) +#define rmresUnmap(pResource, pCallContext, pCpuMapping) rmresUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define rmresMapTo(pResource, pParams) rmresMapTo_DISPATCH(pResource, pParams) +#define rmresGetRefCount(pResource) rmresGetRefCount_DISPATCH(pResource) +#define rmresControlFilter(pResource, pCallContext, pParams) rmresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define rmresAddAdditionalDependants(pClient, pResource, pReference) rmresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define rmresCanCopy(pResource) rmresCanCopy_DISPATCH(pResource) +#define rmresPreDestruct(pResource) rmresPreDestruct_DISPATCH(pResource) +#define rmresUnmapFrom(pResource, pParams) rmresUnmapFrom_DISPATCH(pResource, pParams) +#define rmresControlLookup(pResource, pParams, ppEntry) rmresControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define rmresMap(pResource, pCallContext, pParams, pCpuMapping) rmresMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +NvBool rmresAccessCallback_IMPL(struct RmResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + +static inline NvBool rmresAccessCallback_DISPATCH(struct RmResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__rmresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NvBool rmresShareCallback_IMPL(struct RmResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +static inline NvBool rmresShareCallback_DISPATCH(struct RmResource *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__rmresShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +NV_STATUS rmresGetMemInterMapParams_IMPL(struct RmResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams); + +static inline NV_STATUS rmresGetMemInterMapParams_DISPATCH(struct RmResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__rmresGetMemInterMapParams__(pRmResource, pParams); +} + +NV_STATUS rmresCheckMemInterUnmap_IMPL(struct RmResource *pRmResource, NvBool bSubdeviceHandleProvided); + +static inline NV_STATUS rmresCheckMemInterUnmap_DISPATCH(struct RmResource *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__rmresCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +NV_STATUS rmresGetMemoryMappingDescriptor_IMPL(struct RmResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc); + +static inline NV_STATUS rmresGetMemoryMappingDescriptor_DISPATCH(struct RmResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__rmresGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +NV_STATUS rmresControl_Prologue_IMPL(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS rmresControl_Prologue_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__rmresControl_Prologue__(pResource, pCallContext, pParams); +} + +void rmresControl_Epilogue_IMPL(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline void rmresControl_Epilogue_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__rmresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS rmresControl_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__rmresControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS rmresUnmap_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__rmresUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS rmresMapTo_DISPATCH(struct RmResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__rmresMapTo__(pResource, pParams); +} + +static inline NvU32 rmresGetRefCount_DISPATCH(struct RmResource *pResource) { + return pResource->__rmresGetRefCount__(pResource); +} + +static inline NV_STATUS rmresControlFilter_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__rmresControlFilter__(pResource, pCallContext, pParams); +} + +static inline void rmresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RmResource *pResource, RsResourceRef *pReference) { + pResource->__rmresAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvBool rmresCanCopy_DISPATCH(struct RmResource *pResource) { + return pResource->__rmresCanCopy__(pResource); +} + +static inline void rmresPreDestruct_DISPATCH(struct RmResource *pResource) { + pResource->__rmresPreDestruct__(pResource); +} + +static inline NV_STATUS rmresUnmapFrom_DISPATCH(struct RmResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__rmresUnmapFrom__(pResource, pParams); +} + +static inline NV_STATUS rmresControlLookup_DISPATCH(struct RmResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__rmresControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS rmresMap_DISPATCH(struct RmResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__rmresMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS rmresConstruct_IMPL(struct RmResource *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_rmresConstruct(arg_pResource, arg_pCallContext, arg_pParams) rmresConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // _RESOURCE_H_ + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RESOURCE_NVOC_H_ diff --git a/src/nvidia/generated/g_resserv_nvoc.h b/src/nvidia/generated/g_resserv_nvoc.h new file mode 100644 index 000000000..426eff0f3 --- /dev/null +++ b/src/nvidia/generated/g_resserv_nvoc.h @@ -0,0 +1,418 @@ +#ifndef _G_RESSERV_NVOC_H_ +#define _G_RESSERV_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_resserv_nvoc.h" + +#ifndef _RESSERV_H_ +#define _RESSERV_H_ + +#include "nvoc/object.h" + +#include "containers/list.h" +#include "containers/map.h" +#include "containers/multimap.h" + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvos.h" +#include "nvsecurityinfo.h" +#include "rs_access.h" + +#if LOCK_VAL_ENABLED +#include "lockval/lockval.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if (RS_STANDALONE) +#include + +#ifndef NV_PRINTF +extern int g_debugLevel; +#define NV_PRINTF(level, format, ...) if (g_debugLevel) { printf(format, ##__VA_ARGS__); } +#endif +#include "utils/nvprintf.h" +#endif + +// +// Forward declarations +// +typedef struct RsServer RsServer; +typedef struct RsDomain RsDomain; +typedef struct CLIENT_ENTRY CLIENT_ENTRY; +typedef struct RsResourceDep RsResourceDep; +typedef struct RsResourceRef RsResourceRef; +typedef struct RsInterMapping RsInterMapping; +typedef struct RsCpuMapping RsCpuMapping; + +// RS-TODO INTERNAL and EXTERNAL params should be different structures +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS_INTERNAL; +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS_INTERNAL; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS_INTERNAL; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS_INTERNAL; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS_INTERNAL; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_LEGACY_CONTROL_PARAMS; +typedef struct RS_LEGACY_ALLOC_PARAMS RS_LEGACY_ALLOC_PARAMS; +typedef struct RS_LEGACY_FREE_PARAMS RS_LEGACY_FREE_PARAMS; + +typedef struct RS_CPU_MAP_PARAMS RS_CPU_MAP_PARAMS; +typedef struct RS_CPU_UNMAP_PARAMS RS_CPU_UNMAP_PARAMS; +typedef struct RS_INTER_MAP_PARAMS RS_INTER_MAP_PARAMS; +typedef struct RS_INTER_UNMAP_PARAMS RS_INTER_UNMAP_PARAMS; + +// Forward declarations for structs defined by user +typedef struct RS_RES_MAP_TO_PARAMS RS_RES_MAP_TO_PARAMS; +typedef struct RS_RES_UNMAP_FROM_PARAMS RS_RES_UNMAP_FROM_PARAMS; +typedef struct RS_INTER_MAP_PRIVATE RS_INTER_MAP_PRIVATE; +typedef struct RS_INTER_UNMAP_PRIVATE RS_INTER_UNMAP_PRIVATE; +typedef struct RS_CPU_MAPPING_PRIVATE RS_CPU_MAPPING_PRIVATE; + +typedef struct RS_CPU_MAPPING_BACK_REF RS_CPU_MAPPING_BACK_REF; +typedef struct RS_INTER_MAPPING_BACK_REF RS_INTER_MAPPING_BACK_REF; +typedef struct RS_FREE_STACK RS_FREE_STACK; +typedef struct CALL_CONTEXT CALL_CONTEXT; +typedef struct ACCESS_CONTROL ACCESS_CONTROL; +typedef struct RS_ITERATOR RS_ITERATOR; +typedef struct RS_ORDERED_ITERATOR RS_ORDERED_ITERATOR; +typedef struct RS_SHARE_ITERATOR RS_SHARE_ITERATOR; +typedef struct API_STATE API_STATE; +typedef struct RS_LOCK_INFO RS_LOCK_INFO; +typedef struct RS_CONTROL_COOKIE RS_CONTROL_COOKIE; +typedef NV_STATUS RsCtrlFunc(struct RS_RES_CONTROL_PARAMS_INTERNAL*); + +struct RsClient; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + + +struct RsResource; + +#ifndef __NVOC_CLASS_RsResource_TYPEDEF__ +#define __NVOC_CLASS_RsResource_TYPEDEF__ +typedef struct RsResource RsResource; +#endif /* __NVOC_CLASS_RsResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsResource +#define __nvoc_class_id_RsResource 0xd551cb +#endif /* __nvoc_class_id_RsResource */ + + +struct RsShared; + +#ifndef __NVOC_CLASS_RsShared_TYPEDEF__ +#define __NVOC_CLASS_RsShared_TYPEDEF__ +typedef struct RsShared RsShared; +#endif /* __NVOC_CLASS_RsShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsShared +#define __nvoc_class_id_RsShared 0x830542 +#endif /* __nvoc_class_id_RsShared */ + + + +MAKE_LIST(RsResourceRefList, RsResourceRef*); +MAKE_LIST(RsResourceList, RsResource*); +MAKE_LIST(RsHandleList, NvHandle); +MAKE_LIST(RsClientList, CLIENT_ENTRY*); +MAKE_LIST(RsShareList, RS_SHARE_POLICY); +MAKE_MULTIMAP(RsIndex, RsResourceRef*); + +typedef NV_STATUS (*CtrlImpl_t)(struct RsClient*, struct RsResource*, void*); + +typedef void *PUID_TOKEN; + +// +// Defines +// + +/// Domain handles must start at this base value +#define RS_DOMAIN_HANDLE_BASE 0xD0D00000 + +/// Client handles must start at this base value +#define RS_CLIENT_HANDLE_BASE 0xC1D00000 + +/// +/// Internal Client handles must start at this base value +/// at either of these two bases +/// +#define RS_CLIENT_INTERNAL_HANDLE_BASE 0xC1E00000 + +#define RS_CLIENT_INTERNAL_HANDLE_BASE_EX 0xC1F00000 + +// +// Print a warning if any client's resource count exceeds this +// threshold. Unless this was intentional, this is likely a client bug. +// +#define RS_CLIENT_RESOURCE_WARNING_THRESHOLD 100000 + + +/// 0xFFFF max client handles. +#define RS_CLIENT_HANDLE_BUCKET_COUNT 0x400 // 1024 +#define RS_CLIENT_HANDLE_BUCKET_MASK 0x3FF + + +/// The default maximum number of domains a resource server can allocate +#define RS_MAX_DOMAINS_DEFAULT 4096 + +/// The maximum length of a line of ancestry for resource references +#define RS_MAX_RESOURCE_DEPTH 6 + +/// RS_LOCK_FLAGS +#define RS_LOCK_FLAGS_NO_TOP_LOCK NVBIT(0) +#define RS_LOCK_FLAGS_NO_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK NVBIT(5) +#define RS_LOCK_FLAGS_FREE_SESSION_LOCK NVBIT(6) + +/// RS_LOCK_STATE +#define RS_LOCK_STATE_TOP_LOCK_ACQUIRED NVBIT(0) +#define RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED NVBIT(1) +#define RS_LOCK_STATE_CUSTOM_LOCK_2_ACQUIRED NVBIT(2) +#define RS_LOCK_STATE_CUSTOM_LOCK_3_ACQUIRED NVBIT(3) +#define RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK NVBIT(6) +#define RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED NVBIT(7) +#define RS_LOCK_STATE_SESSION_LOCK_ACQUIRED NVBIT(8) + +/// RS_LOCK_RELEASE +#define RS_LOCK_RELEASE_TOP_LOCK NVBIT(0) +#define RS_LOCK_RELEASE_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_RELEASE_SESSION_LOCK NVBIT(5) + +/// API enumerations used for locking knobs +typedef enum +{ + RS_LOCK_CLIENT =0, + RS_LOCK_TOP =1, + RS_LOCK_RESOURCE =2, + RS_LOCK_CUSTOM_3 =3, +} RS_LOCK_ENUM; + +typedef enum +{ + RS_API_ALLOC_CLIENT = 0, + RS_API_ALLOC_RESOURCE = 1, + RS_API_FREE_RESOURCE = 2, + RS_API_MAP = 3, + RS_API_UNMAP = 4, + RS_API_INTER_MAP = 5, + RS_API_INTER_UNMAP = 6, + RS_API_COPY = 7, + RS_API_SHARE = 8, + RS_API_CTRL = 9, + RS_API_MAX, +} RS_API_ENUM; + +NV_STATUS indexAdd(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); +NV_STATUS indexRemove(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); + +// +// Externs +// +/** + * NVOC wrapper for constructing resources of a given type + * + * @param[in] pAllocator Allocator for the resource object + * @param[in] pCallContext Caller context passed to resource constructor + * @param[inout] pParams Resource allocation parameters + * @param[out] ppResource New resource object + */ +extern NV_STATUS resservResourceFactory(PORT_MEM_ALLOCATOR *pAllocator, CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, struct RsResource **ppResource); + +/** + * NVOC wrapper for constructing an application-specific client. + */ +extern NV_STATUS resservClientFactory(PORT_MEM_ALLOCATOR *pAllocator, RS_RES_ALLOC_PARAMS_INTERNAL *pParams, struct RsClient **ppRsClient); + +/** + * Validate the UID/PID security token of the current user against a client's security token. + * + * This will be obsolete after phase 1. + * + * @param[in] pClientToken + * @param[in] pCurrentToken + * + * @returns NV_OK if the current user's security token matches the client's security token + */ +extern NV_STATUS osValidateClientTokens(PSECURITY_TOKEN pClientToken, PSECURITY_TOKEN pCurrentToken); + +/** + * Get the security token of the current user for the UID/PID security model. + * + * This will be obsolete after phase 1. + */ +extern PSECURITY_TOKEN osGetSecurityToken(void); + +/** + * TLS entry id for call contexts. All servers will use the same id. + */ +#define TLS_ENTRY_ID_RESSERV_CALL_CONTEXT TLS_ENTRY_ID_RESSERV_1 + +// +// Structs +// +struct RS_FREE_STACK +{ + RS_FREE_STACK *pPrev; + RsResourceRef *pResourceRef; +}; + +struct CALL_CONTEXT +{ + RsServer *pServer; ///< The resource server instance that owns the client + struct RsClient *pClient; ///< Client that was the target of the call + RsResourceRef *pResourceRef; ///< Reference that was the target of the call + RsResourceRef *pContextRef; ///< Reference that may be used to provide more context [optional] + RS_LOCK_INFO *pLockInfo; ///< Saved locking context information for the call + API_SECURITY_INFO secInfo; + RS_RES_CONTROL_PARAMS_INTERNAL *pControlParams; ///< parameters of the call [optional] +}; + +typedef enum { + RS_ITERATE_CHILDREN, ///< Iterate over a RsResourceRef's children + RS_ITERATE_DESCENDANTS, ///< Iterate over a RsResourceRef's children, grandchildren, etc. (unspecified order) + RS_ITERATE_CACHED, ///< Iterate over a RsResourceRef's cache + RS_ITERATE_DEPENDANTS, ///< Iterate over a RsResourceRef's dependants +} RS_ITER_TYPE; + +typedef enum +{ + LOCK_ACCESS_READ, + LOCK_ACCESS_WRITE, +} LOCK_ACCESS_TYPE; + + + +/** + * Access control information. This information will be filled out by the user + * of the Resource Server when allocating a client or resource. + */ +struct ACCESS_CONTROL +{ + /** + * The privilege level of this access control + */ + RS_PRIV_LEVEL privilegeLevel; + + /** + * Opaque pointer for storing a security token + */ + PSECURITY_TOKEN pSecurityToken; +}; + +// +// Utility wrappers for locking validator +// +#if LOCK_VAL_ENABLED +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) \ + do { NV_ASSERT_OK(lockvalLockInit((lock), (lockClass), (inst))); } while(0) + +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireRead((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_RLOCK); \ +} while(0) + +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireWrite((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_WLOCK); \ +} while(0) + +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseRead((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseWrite((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#else +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do { portSyncRwLockAcquireRead((lock)); } while(0) +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do { portSyncRwLockAcquireWrite((lock)); } while(0) +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseRead((lock)); } while(0) +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseWrite((lock)); } while(0) +#endif + +#define RS_RWLOCK_RELEASE_READ(lock, validator) RS_RWLOCK_RELEASE_READ_EXT(lock, validator, NV_FALSE) +#define RS_RWLOCK_RELEASE_WRITE(lock, validator) RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, NV_FALSE) + + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RESSERV_NVOC_H_ diff --git a/src/nvidia/generated/g_rg_line_callback_nvoc.c b/src/nvidia/generated/g_rg_line_callback_nvoc.c new file mode 100644 index 000000000..728e35be1 --- /dev/null +++ b/src/nvidia/generated/g_rg_line_callback_nvoc.c @@ -0,0 +1,328 @@ +#define NVOC_RG_LINE_CALLBACK_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_rg_line_callback_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xa3ff1c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RgLineCallback; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_RgLineCallback(RgLineCallback*); +void __nvoc_init_funcTable_RgLineCallback(RgLineCallback*); +NV_STATUS __nvoc_ctor_RgLineCallback(RgLineCallback*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RgLineCallback(RgLineCallback*); +void __nvoc_dtor_RgLineCallback(RgLineCallback*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RgLineCallback; + +static const struct NVOC_RTTI __nvoc_rtti_RgLineCallback_RgLineCallback = { + /*pClassDef=*/ &__nvoc_class_def_RgLineCallback, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RgLineCallback, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RgLineCallback_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RgLineCallback, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RgLineCallback_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RgLineCallback, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RgLineCallback_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RgLineCallback, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RgLineCallback_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RgLineCallback, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RgLineCallback_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RgLineCallback, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RgLineCallback = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_RgLineCallback_RgLineCallback, + &__nvoc_rtti_RgLineCallback_GpuResource, + &__nvoc_rtti_RgLineCallback_RmResource, + &__nvoc_rtti_RgLineCallback_RmResourceCommon, + &__nvoc_rtti_RgLineCallback_RsResource, + &__nvoc_rtti_RgLineCallback_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RgLineCallback = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RgLineCallback), + /*classId=*/ classId(RgLineCallback), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RgLineCallback", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RgLineCallback, + /*pCastInfo=*/ &__nvoc_castinfo_RgLineCallback, + /*pExportInfo=*/ &__nvoc_export_info_RgLineCallback +}; + +static NvBool __nvoc_thunk_GpuResource_rglcbShareCallback(struct RgLineCallback *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_RgLineCallback_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_rglcbControl(struct RgLineCallback *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_RgLineCallback_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_rglcbUnmap(struct RgLineCallback *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_RgLineCallback_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_rglcbGetMemInterMapParams(struct RgLineCallback *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_RgLineCallback_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_rglcbGetMemoryMappingDescriptor(struct RgLineCallback *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_RgLineCallback_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_rglcbGetMapAddrSpace(struct RgLineCallback *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_RgLineCallback_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_rglcbGetInternalObjectHandle(struct RgLineCallback *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_RgLineCallback_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_rglcbControlFilter(struct RgLineCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_rglcbAddAdditionalDependants(struct RsClient *pClient, struct RgLineCallback *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_rglcbGetRefCount(struct RgLineCallback *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_rglcbCheckMemInterUnmap(struct RgLineCallback *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_RgLineCallback_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_rglcbMapTo(struct RgLineCallback *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_rglcbControl_Prologue(struct RgLineCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_rglcbGetRegBaseOffsetAndSize(struct RgLineCallback *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_RgLineCallback_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_rglcbCanCopy(struct RgLineCallback *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_rglcbInternalControlForward(struct RgLineCallback *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_RgLineCallback_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_rglcbPreDestruct(struct RgLineCallback *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_rglcbUnmapFrom(struct RgLineCallback *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_rglcbControl_Epilogue(struct RgLineCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_rglcbControlLookup(struct RgLineCallback *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_rglcbMap(struct RgLineCallback *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_RgLineCallback_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_rglcbAccessCallback(struct RgLineCallback *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_RgLineCallback_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RgLineCallback = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_RgLineCallback(RgLineCallback *pThis) { + __nvoc_rglcbDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RgLineCallback(RgLineCallback *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RgLineCallback(RgLineCallback *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RgLineCallback_fail_GpuResource; + __nvoc_init_dataField_RgLineCallback(pThis); + + status = __nvoc_rglcbConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RgLineCallback_fail__init; + goto __nvoc_ctor_RgLineCallback_exit; // Success + +__nvoc_ctor_RgLineCallback_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_RgLineCallback_fail_GpuResource: +__nvoc_ctor_RgLineCallback_exit: + + return status; +} + +static void __nvoc_init_funcTable_RgLineCallback_1(RgLineCallback *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__rglcbShareCallback__ = &__nvoc_thunk_GpuResource_rglcbShareCallback; + + pThis->__rglcbControl__ = &__nvoc_thunk_GpuResource_rglcbControl; + + pThis->__rglcbUnmap__ = &__nvoc_thunk_GpuResource_rglcbUnmap; + + pThis->__rglcbGetMemInterMapParams__ = &__nvoc_thunk_RmResource_rglcbGetMemInterMapParams; + + pThis->__rglcbGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_rglcbGetMemoryMappingDescriptor; + + pThis->__rglcbGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_rglcbGetMapAddrSpace; + + pThis->__rglcbGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_rglcbGetInternalObjectHandle; + + pThis->__rglcbControlFilter__ = &__nvoc_thunk_RsResource_rglcbControlFilter; + + pThis->__rglcbAddAdditionalDependants__ = &__nvoc_thunk_RsResource_rglcbAddAdditionalDependants; + + pThis->__rglcbGetRefCount__ = &__nvoc_thunk_RsResource_rglcbGetRefCount; + + pThis->__rglcbCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_rglcbCheckMemInterUnmap; + + pThis->__rglcbMapTo__ = &__nvoc_thunk_RsResource_rglcbMapTo; + + pThis->__rglcbControl_Prologue__ = &__nvoc_thunk_RmResource_rglcbControl_Prologue; + + pThis->__rglcbGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_rglcbGetRegBaseOffsetAndSize; + + pThis->__rglcbCanCopy__ = &__nvoc_thunk_RsResource_rglcbCanCopy; + + pThis->__rglcbInternalControlForward__ = &__nvoc_thunk_GpuResource_rglcbInternalControlForward; + + pThis->__rglcbPreDestruct__ = &__nvoc_thunk_RsResource_rglcbPreDestruct; + + pThis->__rglcbUnmapFrom__ = &__nvoc_thunk_RsResource_rglcbUnmapFrom; + + pThis->__rglcbControl_Epilogue__ = &__nvoc_thunk_RmResource_rglcbControl_Epilogue; + + pThis->__rglcbControlLookup__ = &__nvoc_thunk_RsResource_rglcbControlLookup; + + pThis->__rglcbMap__ = &__nvoc_thunk_GpuResource_rglcbMap; + + pThis->__rglcbAccessCallback__ = &__nvoc_thunk_RmResource_rglcbAccessCallback; +} + +void __nvoc_init_funcTable_RgLineCallback(RgLineCallback *pThis) { + __nvoc_init_funcTable_RgLineCallback_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_RgLineCallback(RgLineCallback *pThis) { + pThis->__nvoc_pbase_RgLineCallback = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_RgLineCallback(pThis); +} + +NV_STATUS __nvoc_objCreate_RgLineCallback(RgLineCallback **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RgLineCallback *pThis; + + pThis = portMemAllocNonPaged(sizeof(RgLineCallback)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RgLineCallback)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RgLineCallback); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RgLineCallback(pThis); + status = __nvoc_ctor_RgLineCallback(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RgLineCallback_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RgLineCallback_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RgLineCallback(RgLineCallback **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RgLineCallback(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_rg_line_callback_nvoc.h b/src/nvidia/generated/g_rg_line_callback_nvoc.h new file mode 100644 index 000000000..be0745202 --- /dev/null +++ b/src/nvidia/generated/g_rg_line_callback_nvoc.h @@ -0,0 +1,260 @@ +#ifndef _G_RG_LINE_CALLBACK_NVOC_H_ +#define _G_RG_LINE_CALLBACK_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rg_line_callback_nvoc.h" + +#ifndef RG_LINE_CALLBACK_H +#define RG_LINE_CALLBACK_H + +#include "class/cl0092.h" +#include "gpu/gpu_resource.h" + +struct DispCommon; + +#ifndef __NVOC_CLASS_DispCommon_TYPEDEF__ +#define __NVOC_CLASS_DispCommon_TYPEDEF__ +typedef struct DispCommon DispCommon; +#endif /* __NVOC_CLASS_DispCommon_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DispCommon +#define __nvoc_class_id_DispCommon 0x41f4f2 +#endif /* __nvoc_class_id_DispCommon */ + + + +/*! + * RM internal class representing NV0092_RG_LINE_CALLBACK + */ +#ifdef NVOC_RG_LINE_CALLBACK_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RgLineCallback { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct RgLineCallback *__nvoc_pbase_RgLineCallback; + NvBool (*__rglcbShareCallback__)(struct RgLineCallback *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__rglcbControl__)(struct RgLineCallback *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__rglcbUnmap__)(struct RgLineCallback *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__rglcbGetMemInterMapParams__)(struct RgLineCallback *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__rglcbGetMemoryMappingDescriptor__)(struct RgLineCallback *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__rglcbGetMapAddrSpace__)(struct RgLineCallback *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__rglcbGetInternalObjectHandle__)(struct RgLineCallback *); + NV_STATUS (*__rglcbControlFilter__)(struct RgLineCallback *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__rglcbAddAdditionalDependants__)(struct RsClient *, struct RgLineCallback *, RsResourceRef *); + NvU32 (*__rglcbGetRefCount__)(struct RgLineCallback *); + NV_STATUS (*__rglcbCheckMemInterUnmap__)(struct RgLineCallback *, NvBool); + NV_STATUS (*__rglcbMapTo__)(struct RgLineCallback *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__rglcbControl_Prologue__)(struct RgLineCallback *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__rglcbGetRegBaseOffsetAndSize__)(struct RgLineCallback *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__rglcbCanCopy__)(struct RgLineCallback *); + NV_STATUS (*__rglcbInternalControlForward__)(struct RgLineCallback *, NvU32, void *, NvU32); + void (*__rglcbPreDestruct__)(struct RgLineCallback *); + NV_STATUS (*__rglcbUnmapFrom__)(struct RgLineCallback *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__rglcbControl_Epilogue__)(struct RgLineCallback *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__rglcbControlLookup__)(struct RgLineCallback *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__rglcbMap__)(struct RgLineCallback *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__rglcbAccessCallback__)(struct RgLineCallback *, struct RsClient *, void *, RsAccessRight); + NvU32 subDeviceInstance; + NvU32 head; + NvU32 rgLineNum; + NV0092_REGISTER_RG_LINE_CALLBACK_FN pCallbkFn; + void *pCallbkParams; + NvU32 rgIntrLine; + struct DispCommon *pDispCommon; +}; + +#ifndef __NVOC_CLASS_RgLineCallback_TYPEDEF__ +#define __NVOC_CLASS_RgLineCallback_TYPEDEF__ +typedef struct RgLineCallback RgLineCallback; +#endif /* __NVOC_CLASS_RgLineCallback_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RgLineCallback +#define __nvoc_class_id_RgLineCallback 0xa3ff1c +#endif /* __nvoc_class_id_RgLineCallback */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RgLineCallback; + +#define __staticCast_RgLineCallback(pThis) \ + ((pThis)->__nvoc_pbase_RgLineCallback) + +#ifdef __nvoc_rg_line_callback_h_disabled +#define __dynamicCast_RgLineCallback(pThis) ((RgLineCallback*)NULL) +#else //__nvoc_rg_line_callback_h_disabled +#define __dynamicCast_RgLineCallback(pThis) \ + ((RgLineCallback*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RgLineCallback))) +#endif //__nvoc_rg_line_callback_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RgLineCallback(RgLineCallback**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RgLineCallback(RgLineCallback**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RgLineCallback(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RgLineCallback((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define rglcbShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) rglcbShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define rglcbControl(pGpuResource, pCallContext, pParams) rglcbControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define rglcbUnmap(pGpuResource, pCallContext, pCpuMapping) rglcbUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define rglcbGetMemInterMapParams(pRmResource, pParams) rglcbGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define rglcbGetMemoryMappingDescriptor(pRmResource, ppMemDesc) rglcbGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define rglcbGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) rglcbGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define rglcbGetInternalObjectHandle(pGpuResource) rglcbGetInternalObjectHandle_DISPATCH(pGpuResource) +#define rglcbControlFilter(pResource, pCallContext, pParams) rglcbControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define rglcbAddAdditionalDependants(pClient, pResource, pReference) rglcbAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define rglcbGetRefCount(pResource) rglcbGetRefCount_DISPATCH(pResource) +#define rglcbCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) rglcbCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define rglcbMapTo(pResource, pParams) rglcbMapTo_DISPATCH(pResource, pParams) +#define rglcbControl_Prologue(pResource, pCallContext, pParams) rglcbControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define rglcbGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) rglcbGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define rglcbCanCopy(pResource) rglcbCanCopy_DISPATCH(pResource) +#define rglcbInternalControlForward(pGpuResource, command, pParams, size) rglcbInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define rglcbPreDestruct(pResource) rglcbPreDestruct_DISPATCH(pResource) +#define rglcbUnmapFrom(pResource, pParams) rglcbUnmapFrom_DISPATCH(pResource, pParams) +#define rglcbControl_Epilogue(pResource, pCallContext, pParams) rglcbControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define rglcbControlLookup(pResource, pParams, ppEntry) rglcbControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define rglcbMap(pGpuResource, pCallContext, pParams, pCpuMapping) rglcbMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define rglcbAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) rglcbAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool rglcbShareCallback_DISPATCH(struct RgLineCallback *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__rglcbShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS rglcbControl_DISPATCH(struct RgLineCallback *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__rglcbControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS rglcbUnmap_DISPATCH(struct RgLineCallback *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__rglcbUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS rglcbGetMemInterMapParams_DISPATCH(struct RgLineCallback *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__rglcbGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS rglcbGetMemoryMappingDescriptor_DISPATCH(struct RgLineCallback *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__rglcbGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS rglcbGetMapAddrSpace_DISPATCH(struct RgLineCallback *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__rglcbGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle rglcbGetInternalObjectHandle_DISPATCH(struct RgLineCallback *pGpuResource) { + return pGpuResource->__rglcbGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS rglcbControlFilter_DISPATCH(struct RgLineCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__rglcbControlFilter__(pResource, pCallContext, pParams); +} + +static inline void rglcbAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RgLineCallback *pResource, RsResourceRef *pReference) { + pResource->__rglcbAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 rglcbGetRefCount_DISPATCH(struct RgLineCallback *pResource) { + return pResource->__rglcbGetRefCount__(pResource); +} + +static inline NV_STATUS rglcbCheckMemInterUnmap_DISPATCH(struct RgLineCallback *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__rglcbCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS rglcbMapTo_DISPATCH(struct RgLineCallback *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__rglcbMapTo__(pResource, pParams); +} + +static inline NV_STATUS rglcbControl_Prologue_DISPATCH(struct RgLineCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__rglcbControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS rglcbGetRegBaseOffsetAndSize_DISPATCH(struct RgLineCallback *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__rglcbGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool rglcbCanCopy_DISPATCH(struct RgLineCallback *pResource) { + return pResource->__rglcbCanCopy__(pResource); +} + +static inline NV_STATUS rglcbInternalControlForward_DISPATCH(struct RgLineCallback *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__rglcbInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void rglcbPreDestruct_DISPATCH(struct RgLineCallback *pResource) { + pResource->__rglcbPreDestruct__(pResource); +} + +static inline NV_STATUS rglcbUnmapFrom_DISPATCH(struct RgLineCallback *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__rglcbUnmapFrom__(pResource, pParams); +} + +static inline void rglcbControl_Epilogue_DISPATCH(struct RgLineCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__rglcbControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS rglcbControlLookup_DISPATCH(struct RgLineCallback *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__rglcbControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS rglcbMap_DISPATCH(struct RgLineCallback *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__rglcbMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool rglcbAccessCallback_DISPATCH(struct RgLineCallback *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__rglcbAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS rglcbConstruct_IMPL(struct RgLineCallback *arg_pRgLineCallback, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_rglcbConstruct(arg_pRgLineCallback, arg_pCallContext, arg_pParams) rglcbConstruct_IMPL(arg_pRgLineCallback, arg_pCallContext, arg_pParams) +void rglcbDestruct_IMPL(struct RgLineCallback *pRgLineCallback); +#define __nvoc_rglcbDestruct(pRgLineCallback) rglcbDestruct_IMPL(pRgLineCallback) +void rglcbInvoke_IMPL(struct RgLineCallback *pRgLineCallback, NvBool bIsIrqlIsr); +#ifdef __nvoc_rg_line_callback_h_disabled +static inline void rglcbInvoke(struct RgLineCallback *pRgLineCallback, NvBool bIsIrqlIsr) { + NV_ASSERT_FAILED_PRECOMP("RgLineCallback was disabled!"); +} +#else //__nvoc_rg_line_callback_h_disabled +#define rglcbInvoke(pRgLineCallback, bIsIrqlIsr) rglcbInvoke_IMPL(pRgLineCallback, bIsIrqlIsr) +#endif //__nvoc_rg_line_callback_h_disabled + +#undef PRIVATE_FIELD + + +#endif // RG_LINE_CALLBACK_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RG_LINE_CALLBACK_NVOC_H_ diff --git a/src/nvidia/generated/g_rmconfig_private.h b/src/nvidia/generated/g_rmconfig_private.h new file mode 100644 index 000000000..0abaf0a37 --- /dev/null +++ b/src/nvidia/generated/g_rmconfig_private.h @@ -0,0 +1,695 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// private rmconfig generated #defines such as IsG84(), +// RMCFG_FEATURE_ENABLED_STATUS(), etc. +// +// Only for use within resman. +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_rmconfig_private.h +// +// Chips: TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +// + +#ifndef _G_RMCFG_PRIVATE_H_ +#define _G_RMCFG_PRIVATE_H_ + +// +// CHIP identity macros such as IsGK104() +// + +// GF10X +#define IsGF100(pGpu) ((0) && (pGpu)) +#define IsGF100orBetter(pGpu) ((1) && (pGpu)) + +#define IsGF100B(pGpu) ((0) && (pGpu)) +#define IsGF100BorBetter(pGpu) ((1) && (pGpu)) + +#define IsGF104(pGpu) ((0) && (pGpu)) +#define IsGF104orBetter(pGpu) ((1) && (pGpu)) + +#define IsGF104B(pGpu) ((0) && (pGpu)) +#define IsGF104BorBetter(pGpu) ((1) && (pGpu)) + +#define IsGF106(pGpu) ((0) && (pGpu)) +#define IsGF106orBetter(pGpu) ((1) && (pGpu)) + +#define IsGF106B(pGpu) ((0) && (pGpu)) +#define IsGF106BorBetter(pGpu) ((1) && (pGpu)) + +#define IsGF108(pGpu) ((0) && (pGpu)) +#define IsGF108orBetter(pGpu) ((1) && (pGpu)) + +// Any GF10X chip? +#define IsGF10X(pGpu) (0 && (pGpu)) +#define IsGF10XorBetter(pGpu) (1 || (pGpu)) + + +// GF11X +#define IsGF110D(pGpu) ((0) && (pGpu)) +#define IsGF110DorBetter(pGpu) ((1) && (pGpu)) + +#define IsGF110(pGpu) ((0) && (pGpu)) +#define IsGF110orBetter(pGpu) ((1) && (pGpu)) + +#define IsGF117(pGpu) ((0) && (pGpu)) +#define IsGF117orBetter(pGpu) ((1) && (pGpu)) +#define IsGF117MaskRevA01(pGpu) ((0) && (pGpu)) + +#define IsGF118(pGpu) ((0) && (pGpu)) +#define IsGF118orBetter(pGpu) ((1) && (pGpu)) + +#define IsGF119(pGpu) ((0) && (pGpu)) +#define IsGF119orBetter(pGpu) ((1) && (pGpu)) +#define IsGF119MaskRevA01(pGpu) ((0) && (pGpu)) + +// Any GF11X chip? +#define IsGF11X(pGpu) (0 && (pGpu)) +#define IsGF11XorBetter(pGpu) (1 || (pGpu)) + + +// GF10XF +#define IsGF110F(pGpu) ((0) && (pGpu)) +#define IsGF110ForBetter(pGpu) ((1) && (pGpu)) + +#define IsGF110F2(pGpu) ((0) && (pGpu)) +#define IsGF110F2orBetter(pGpu) ((1) && (pGpu)) + +#define IsGF110F3(pGpu) ((0) && (pGpu)) +#define IsGF110F3orBetter(pGpu) ((1) && (pGpu)) + +// Any GF10XF chip? +#define IsGF10XF(pGpu) (0 && (pGpu)) +#define IsGF10XForBetter(pGpu) (1 || (pGpu)) + + +// GK10X +#define IsGK104(pGpu) ((0) && (pGpu)) +#define IsGK104orBetter(pGpu) ((1) && (pGpu)) +#define IsGK104MaskRevA01(pGpu) ((0) && (pGpu)) + +#define IsGK106(pGpu) ((0) && (pGpu)) +#define IsGK106orBetter(pGpu) ((1) && (pGpu)) + +#define IsGK107(pGpu) ((0) && (pGpu)) +#define IsGK107orBetter(pGpu) ((1) && (pGpu)) +#define IsGK107MaskRevA01(pGpu) ((0) && (pGpu)) + +#define IsGK20A(pGpu) ((0) && (pGpu)) +#define IsGK20AorBetter(pGpu) ((1) && (pGpu)) + +// Any GK10X chip? +#define IsGK10X(pGpu) (0 && (pGpu)) +#define IsGK10XorBetter(pGpu) (1 || (pGpu)) + + +// GK11X +#define IsGK110(pGpu) ((0) && (pGpu)) +#define IsGK110orBetter(pGpu) ((1) && (pGpu)) + +#define IsGK110B(pGpu) ((0) && (pGpu)) +#define IsGK110BorBetter(pGpu) ((1) && (pGpu)) + +#define IsGK110C(pGpu) ((0) && (pGpu)) +#define IsGK110CorBetter(pGpu) ((1) && (pGpu)) + +// Any GK11X chip? +#define IsGK11X(pGpu) (0 && (pGpu)) +#define IsGK11XorBetter(pGpu) (1 || (pGpu)) + + +// GK20X +#define IsGK208(pGpu) ((0) && (pGpu)) +#define IsGK208orBetter(pGpu) ((1) && (pGpu)) + +#define IsGK208S(pGpu) ((0) && (pGpu)) +#define IsGK208SorBetter(pGpu) ((1) && (pGpu)) + +// Any GK20X chip? +#define IsGK20X(pGpu) (0 && (pGpu)) +#define IsGK20XorBetter(pGpu) (1 || (pGpu)) + + +// GM10X +#define IsGM107(pGpu) ((0) && (pGpu)) +#define IsGM107orBetter(pGpu) ((1) && (pGpu)) +#define IsGM107MaskRevA01(pGpu) ((0) && (pGpu)) + +#define IsGM108(pGpu) ((0) && (pGpu)) +#define IsGM108orBetter(pGpu) ((1) && (pGpu)) +#define IsGM108MaskRevA01(pGpu) ((0) && (pGpu)) + +// Any GM10X chip? +#define IsGM10X(pGpu) (0 && (pGpu)) +#define IsGM10XorBetter(pGpu) (1 || (pGpu)) + + +// GM20X +#define IsGM200(pGpu) ((0) && (pGpu)) +#define IsGM200orBetter(pGpu) ((1) && (pGpu)) + +#define IsGM204(pGpu) ((0) && (pGpu)) +#define IsGM204orBetter(pGpu) ((1) && (pGpu)) + +#define IsGM206(pGpu) ((0) && (pGpu)) +#define IsGM206orBetter(pGpu) ((1) && (pGpu)) + +// Any GM20X chip? +#define IsGM20X(pGpu) (0 && (pGpu)) +#define IsGM20XorBetter(pGpu) (1 || (pGpu)) + + +// GP10X +#define IsGP100(pGpu) ((0) && (pGpu)) +#define IsGP100orBetter(pGpu) ((1) && (pGpu)) + +#define IsGP102(pGpu) ((0) && (pGpu)) +#define IsGP102orBetter(pGpu) ((1) && (pGpu)) + +#define IsGP104(pGpu) ((0) && (pGpu)) +#define IsGP104orBetter(pGpu) ((1) && (pGpu)) + +#define IsGP106(pGpu) ((0) && (pGpu)) +#define IsGP106orBetter(pGpu) ((1) && (pGpu)) + +#define IsGP107(pGpu) ((0) && (pGpu)) +#define IsGP107orBetter(pGpu) ((1) && (pGpu)) + +#define IsGP108(pGpu) ((0) && (pGpu)) +#define IsGP108orBetter(pGpu) ((1) && (pGpu)) + +// Any GP10X chip? +#define IsGP10X(pGpu) (0 && (pGpu)) +#define IsGP10XorBetter(pGpu) (1 || (pGpu)) + + +// GV10X +#define IsGV100(pGpu) ((0) && (pGpu)) +#define IsGV100orBetter(pGpu) ((1) && (pGpu)) + +// Any GV10X chip? +#define IsGV10X(pGpu) (0 && (pGpu)) +#define IsGV10XorBetter(pGpu) (1 || (pGpu)) + + +// GV11X +#define IsGV11B(pGpu) ((0) && (pGpu)) +#define IsGV11BorBetter(pGpu) ((1) && (pGpu)) + +// Any GV11X chip? +#define IsGV11X(pGpu) (0 && (pGpu)) +#define IsGV11XorBetter(pGpu) (1 || (pGpu)) + + +// TU10X +#define IsTU102(pGpu) rmcfg_IsTU102(pGpu) +#define IsTU102orBetter(pGpu) ((1) && (pGpu)) + +#define IsTU104(pGpu) rmcfg_IsTU104(pGpu) +#define IsTU104orBetter(pGpu) rmcfg_IsTU104orBetter(pGpu) + +#define IsTU106(pGpu) rmcfg_IsTU106(pGpu) +#define IsTU106orBetter(pGpu) rmcfg_IsTU106orBetter(pGpu) + +#define IsTU116(pGpu) rmcfg_IsTU116(pGpu) +#define IsTU116orBetter(pGpu) rmcfg_IsTU116orBetter(pGpu) + +#define IsTU117(pGpu) rmcfg_IsTU117(pGpu) +#define IsTU117orBetter(pGpu) rmcfg_IsTU117orBetter(pGpu) + +// Any TU10X chip? +#define IsTU10X(pGpu) rmcfg_IsTU10X(pGpu) +#define IsTU10XorBetter(pGpu) (1 || (pGpu)) + + +// GA10X +#define IsGA100(pGpu) rmcfg_IsGA100(pGpu) +#define IsGA100orBetter(pGpu) rmcfg_IsGA100orBetter(pGpu) + +#define IsGA102(pGpu) rmcfg_IsGA102(pGpu) +#define IsGA102orBetter(pGpu) rmcfg_IsGA102orBetter(pGpu) + +#define IsGA103(pGpu) rmcfg_IsGA103(pGpu) +#define IsGA103orBetter(pGpu) rmcfg_IsGA103orBetter(pGpu) + +#define IsGA104(pGpu) rmcfg_IsGA104(pGpu) +#define IsGA104orBetter(pGpu) rmcfg_IsGA104orBetter(pGpu) + +#define IsGA106(pGpu) rmcfg_IsGA106(pGpu) +#define IsGA106orBetter(pGpu) rmcfg_IsGA106orBetter(pGpu) + +#define IsGA107(pGpu) rmcfg_IsGA107(pGpu) +#define IsGA107orBetter(pGpu) rmcfg_IsGA107orBetter(pGpu) + +#define IsGA10B(pGpu) ((0) && (pGpu)) +#define IsGA10BorBetter(pGpu) ((0) && (pGpu)) + +// Any GA10X chip? +#define IsGA10X(pGpu) rmcfg_IsGA10X(pGpu) +#define IsGA10XorBetter(pGpu) rmcfg_IsGA10XorBetter(pGpu) + + +// GA10XF +#define IsGA102F(pGpu) ((0) && (pGpu)) +#define IsGA102ForBetter(pGpu) ((0) && (pGpu)) + +// Any GA10XF chip? +#define IsGA10XF(pGpu) (0 && (pGpu)) +#define IsGA10XForBetter(pGpu) (0 && (pGpu)) + + +// T12X +#define IsT001_FERMI_NOT_EXIST(pGpu) ((0) && (pGpu)) +#define IsT001_FERMI_NOT_EXISTorBetter(pGpu) ((0) && (pGpu)) + +#define IsT124(pGpu) ((0) && (pGpu)) +#define IsT124orBetter(pGpu) ((0) && (pGpu)) + +// Any T12X chip? +#define IsT12X(pGpu) (0 && (pGpu)) +#define IsT12XorBetter(pGpu) (0 && (pGpu)) + + +// T13X +#define IsT132(pGpu) ((0) && (pGpu)) +#define IsT132orBetter(pGpu) ((0) && (pGpu)) + +// Any T13X chip? +#define IsT13X(pGpu) (0 && (pGpu)) +#define IsT13XorBetter(pGpu) (0 && (pGpu)) + + +// T21X +#define IsT210(pGpu) ((0) && (pGpu)) +#define IsT210orBetter(pGpu) ((0) && (pGpu)) + +// Any T21X chip? +#define IsT21X(pGpu) (0 && (pGpu)) +#define IsT21XorBetter(pGpu) (0 && (pGpu)) + + +// T18X +#define IsT186(pGpu) ((0) && (pGpu)) +#define IsT186orBetter(pGpu) ((0) && (pGpu)) + +// Any T18X chip? +#define IsT18X(pGpu) (0 && (pGpu)) +#define IsT18XorBetter(pGpu) (0 && (pGpu)) + + +// T19X +#define IsT194(pGpu) ((0) && (pGpu)) +#define IsT194orBetter(pGpu) ((0) && (pGpu)) + +#define IsT002_TURING_NOT_EXIST(pGpu) ((0) && (pGpu)) +#define IsT002_TURING_NOT_EXISTorBetter(pGpu) ((0) && (pGpu)) + +// Any T19X chip? +#define IsT19X(pGpu) (0 && (pGpu)) +#define IsT19XorBetter(pGpu) (0 && (pGpu)) + + +// T23XG +#define IsT234(pGpu) ((0) && (pGpu)) +#define IsT234orBetter(pGpu) ((0) && (pGpu)) + +// Any T23XG chip? +#define IsT23XG(pGpu) (0 && (pGpu)) +#define IsT23XGorBetter(pGpu) (0 && (pGpu)) + + +// T23XD +#define IsT234D(pGpu) ((0) && (pGpu)) +#define IsT234DorBetter(pGpu) ((0) && (pGpu)) + +// Any T23XD chip? +#define IsT23XD(pGpu) (0 && (pGpu)) +#define IsT23XDorBetter(pGpu) (0 && (pGpu)) + + +// SIMS +#define IsAMODEL(pGpu) ((0) && (pGpu)) +#define IsAMODELorBetter(pGpu) ((0) && (pGpu)) + +// Any SIMS chip? +#define IsSIMS(pGpu) (0 && (pGpu)) +#define IsSIMSorBetter(pGpu) (0 && (pGpu)) + + +// Any CLASSIC_GPUS chip? +#define IsCLASSIC_GPUS(pGpu) (1 || (pGpu)) +#define IsCLASSIC_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any dFERMI chip? +#define IsdFERMI(pGpu) (0 && (pGpu)) +#define IsdFERMIorBetter(pGpu) (1 || (pGpu)) + + +// Any FERMI chip? +#define IsFERMI(pGpu) (IsFERMI_CLASSIC_GPUS(pGpu) || IsFERMI_TEGRA_BIG_GPUS(pGpu)) +#define IsFERMIorBetter(pGpu) (IsFERMI_CLASSIC_GPUSorBetter(pGpu) || IsFERMI_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any FERMI_CLASSIC_GPUS chip? +#define IsFERMI_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsFERMI_CLASSIC_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any DISPLAYLESS chip? +#define IsDISPLAYLESS(pGpu) rmcfg_IsDISPLAYLESS(pGpu) + + +// Any dKEPLER chip? +#define IsdKEPLER(pGpu) (0 && (pGpu)) +#define IsdKEPLERorBetter(pGpu) (1 || (pGpu)) + + +// Any KEPLER chip? +#define IsKEPLER(pGpu) (IsKEPLER_CLASSIC_GPUS(pGpu) || IsKEPLER_TEGRA_BIG_GPUS(pGpu)) +#define IsKEPLERorBetter(pGpu) (IsKEPLER_CLASSIC_GPUSorBetter(pGpu) || IsKEPLER_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any KEPLER_CLASSIC_GPUS chip? +#define IsKEPLER_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsKEPLER_CLASSIC_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any dMAXWELL chip? +#define IsdMAXWELL(pGpu) (0 && (pGpu)) +#define IsdMAXWELLorBetter(pGpu) (1 || (pGpu)) + + +// Any MAXWELL chip? +#define IsMAXWELL(pGpu) (IsMAXWELL_CLASSIC_GPUS(pGpu) || IsMAXWELL_TEGRA_BIG_GPUS(pGpu)) +#define IsMAXWELLorBetter(pGpu) (IsMAXWELL_CLASSIC_GPUSorBetter(pGpu) || IsMAXWELL_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any MAXWELL_CLASSIC_GPUS chip? +#define IsMAXWELL_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsMAXWELL_CLASSIC_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any dPASCAL chip? +#define IsdPASCAL(pGpu) (0 && (pGpu)) +#define IsdPASCALorBetter(pGpu) (1 || (pGpu)) + + +// Any PASCAL chip? +#define IsPASCAL(pGpu) (IsPASCAL_CLASSIC_GPUS(pGpu) || IsPASCAL_TEGRA_BIG_GPUS(pGpu)) +#define IsPASCALorBetter(pGpu) (IsPASCAL_CLASSIC_GPUSorBetter(pGpu) || IsPASCAL_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any PASCAL_CLASSIC_GPUS chip? +#define IsPASCAL_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsPASCAL_CLASSIC_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any dVOLTA chip? +#define IsdVOLTA(pGpu) (0 && (pGpu)) +#define IsdVOLTAorBetter(pGpu) (1 || (pGpu)) + + +// Any VOLTA chip? +#define IsVOLTA(pGpu) (IsVOLTA_CLASSIC_GPUS(pGpu) || IsVOLTA_TEGRA_BIG_GPUS(pGpu)) +#define IsVOLTAorBetter(pGpu) (IsVOLTA_CLASSIC_GPUSorBetter(pGpu) || IsVOLTA_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any VOLTA_CLASSIC_GPUS chip? +#define IsVOLTA_CLASSIC_GPUS(pGpu) (0 && (pGpu)) +#define IsVOLTA_CLASSIC_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any dTURING chip? +#define IsdTURING(pGpu) rmcfg_IsdTURING(pGpu) +#define IsdTURINGorBetter(pGpu) (1 || (pGpu)) + + +// Any TURING chip? +#define IsTURING(pGpu) (IsTURING_CLASSIC_GPUS(pGpu) || IsTURING_TEGRA_BIG_GPUS(pGpu)) +#define IsTURINGorBetter(pGpu) (IsTURING_CLASSIC_GPUSorBetter(pGpu) || IsTURING_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any TURING_CLASSIC_GPUS chip? +#define IsTURING_CLASSIC_GPUS(pGpu) rmcfg_IsTURING_CLASSIC_GPUS(pGpu) +#define IsTURING_CLASSIC_GPUSorBetter(pGpu) (1 || (pGpu)) + + +// Any dAMPERE chip? +#define IsdAMPERE(pGpu) rmcfg_IsdAMPERE(pGpu) +#define IsdAMPEREorBetter(pGpu) rmcfg_IsdAMPEREorBetter(pGpu) + + +// Any AMPERE chip? +#define IsAMPERE(pGpu) (IsAMPERE_CLASSIC_GPUS(pGpu) || IsAMPERE_TEGRA_BIG_GPUS(pGpu)) +#define IsAMPEREorBetter(pGpu) (IsAMPERE_CLASSIC_GPUSorBetter(pGpu) || IsAMPERE_TEGRA_BIG_GPUSorBetter(pGpu)) + + +// Any AMPERE_CLASSIC_GPUS chip? +#define IsAMPERE_CLASSIC_GPUS(pGpu) rmcfg_IsAMPERE_CLASSIC_GPUS(pGpu) +#define IsAMPERE_CLASSIC_GPUSorBetter(pGpu) rmcfg_IsAMPERE_CLASSIC_GPUSorBetter(pGpu) + + +// Any TEGRA_DGPU_AMPERE chip? +#define IsTEGRA_DGPU_AMPERE(pGpu) (0 && (pGpu)) + + +// Any TEGRA_DGPU chip? +#define IsTEGRA_DGPU(pGpu) (0 && (pGpu)) + + +// Any DFPGA chip? +#define IsDFPGA(pGpu) (0 && (pGpu)) + + +// Any TEGRA_BIG_GPUS chip? +#define IsTEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsTEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any FERMI_TEGRA_BIG_GPUS chip? +#define IsFERMI_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsFERMI_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TEGRA chip? +#define IsTEGRA(pGpu) (IsTEGRA_TEGRA_BIG_GPUS(pGpu) || IsTEGRA_TEGRA_NVDISP_GPUS(pGpu)) +#define IsTEGRAorBetter(pGpu) (IsTEGRA_TEGRA_BIG_GPUSorBetter(pGpu) || IsTEGRA_TEGRA_NVDISP_GPUSorBetter(pGpu)) + + +// Any TEGRA_TEGRA_BIG_GPUS chip? +#define IsTEGRA_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsTEGRA_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tKEPLER chip? +#define IstKEPLER(pGpu) (0 && (pGpu)) +#define IstKEPLERorBetter(pGpu) (0 && (pGpu)) + + +// Any KEPLER_TEGRA_BIG_GPUS chip? +#define IsKEPLER_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsKEPLER_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tMAXWELL chip? +#define IstMAXWELL(pGpu) (0 && (pGpu)) +#define IstMAXWELLorBetter(pGpu) (0 && (pGpu)) + + +// Any MAXWELL_TEGRA_BIG_GPUS chip? +#define IsMAXWELL_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsMAXWELL_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tPASCAL chip? +#define IstPASCAL(pGpu) (0 && (pGpu)) +#define IstPASCALorBetter(pGpu) (0 && (pGpu)) + + +// Any PASCAL_TEGRA_BIG_GPUS chip? +#define IsPASCAL_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsPASCAL_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tVOLTA chip? +#define IstVOLTA(pGpu) (0 && (pGpu)) +#define IstVOLTAorBetter(pGpu) (0 && (pGpu)) + + +// Any VOLTA_TEGRA_BIG_GPUS chip? +#define IsVOLTA_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsVOLTA_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TURING_TEGRA_BIG_GPUS chip? +#define IsTURING_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsTURING_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any T23X chip? +#define IsT23X(pGpu) (IsT23X_TEGRA_BIG_GPUS(pGpu) || IsT23X_TEGRA_NVDISP_GPUS(pGpu)) +#define IsT23XorBetter(pGpu) (IsT23X_TEGRA_BIG_GPUSorBetter(pGpu) || IsT23X_TEGRA_NVDISP_GPUSorBetter(pGpu)) + + +// Any T23X_TEGRA_BIG_GPUS chip? +#define IsT23X_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsT23X_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any tAMPERE chip? +#define IstAMPERE(pGpu) (0 && (pGpu)) +#define IstAMPEREorBetter(pGpu) (0 && (pGpu)) + + +// Any AMPERE_TEGRA_BIG_GPUS chip? +#define IsAMPERE_TEGRA_BIG_GPUS(pGpu) (0 && (pGpu)) +#define IsAMPERE_TEGRA_BIG_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TEGRA_NVDISP_GPUS chip? +#define IsTEGRA_NVDISP_GPUS(pGpu) (0 && (pGpu)) +#define IsTEGRA_NVDISP_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any T23X_TEGRA_NVDISP_GPUS chip? +#define IsT23X_TEGRA_NVDISP_GPUS(pGpu) (0 && (pGpu)) +#define IsT23X_TEGRA_NVDISP_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any TEGRA_TEGRA_NVDISP_GPUS chip? +#define IsTEGRA_TEGRA_NVDISP_GPUS(pGpu) (0 && (pGpu)) +#define IsTEGRA_TEGRA_NVDISP_GPUSorBetter(pGpu) (0 && (pGpu)) + + +// Any SIMULATION_GPUS chip? +#define IsSIMULATION_GPUS(pGpu) (0 && (pGpu)) +#define IsSIMULATION_GPUSorBetter(pGpu) (0 && (pGpu)) + + + + + +// +// Enable/disable printing of entity names (class, engine, etc.) +// +#define RMCFG_ENTITY_NAME(entity) "" + +// +// Macros to help with enabling or disabling code based on whether +// a feature (or chip or engine or ...) is enabled or not. +// Also have RMCFG_CHIP_), RMCFG_FEATURE_ENABLED(, etc +// from rmconfig.h. +// +// NOTE: these definitions are "flat" (ie they don't use some more general +// RMCFG_ENABLED(CHIP,X) form because the pre-processor would re-evaluate +// the expansion of the item (chip, feature, class, api). For classes, +// at least, this is a problem since we would end up with class number +// instead of its name... + +// hack: MSVC is not C99 compliant + +// CHIP's +#define RMCFG_CHIP_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_CHIP_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CHIP" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + #define RMCFG_CHIP_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_CHIP_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CHIP" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_CHIP_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + +// FEATURE's +#define RMCFG_FEATURE_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_FEATURE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "FEATURE" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_FEATURE_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_FEATURE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "FEATURE" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_FEATURE_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + +#define RMCFG_FEATURE_PLATFORM_P (RMCFG_FEATURE_PLATFORM_##P) + +// MODULE's +#define RMCFG_MODULE_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_MODULE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "MODULE" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_MODULE_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_MODULE_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "MODULE" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_MODULE_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + + +// CLASS's +#define RMCFG_CLASS_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_CLASS_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CLASS" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_CLASS_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_CLASS_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "CLASS" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_CLASS_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + +// API's +#define RMCFG_API_ENABLED_OR_BAIL(W) \ + do { \ + if ( ! RMCFG_API_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "API" RMCFG_ENTITY_NAME(#W) " not enabled, bailing\n"); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) +#define RMCFG_API_ENABLED_OR_ASSERT_AND_BAIL(W) \ + do { \ + if ( ! RMCFG_API_##W) \ + { \ + NV_PRINTF(LEVEL_ERROR, "API" RMCFG_ENTITY_NAME(#W) " not enabled, assert and bail\n"); \ + NV_ASSERT_PRECOMP(RMCFG_API_##W); \ + return NV_ERR_NOT_SUPPORTED; \ + } \ + } while(0) + + + +// ARCH test +#define RMCFG_IS_ARCH(arch) RMCFG_FEATURE_ARCH_##arch + +#endif // _G_RMCFG_PRIVATE_H_ diff --git a/src/nvidia/generated/g_rmconfig_util.c b/src/nvidia/generated/g_rmconfig_util.c new file mode 100644 index 000000000..d9d1b9b78 --- /dev/null +++ b/src/nvidia/generated/g_rmconfig_util.c @@ -0,0 +1,188 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// rmconfig runtime support that will be part of "core" resman. +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_rmconfig_util.c +// +// Chips: TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +// + +#include "gpu/gpu.h" + +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" + +// NVOC RTTI provider for IOM objects +const NVOC_RTTI_PROVIDER __iom_rtti_provider = { 0 }; + +// +// helper functions for IsCHIP() et.al. +// These help to reduce code size for runtime IsCHIP() and IsCHIPALIAS() invocations +// + +NvBool rmcfg_IsTU102(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_TU102, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsTU104(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_TU104, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsTU104orBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_TU104, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsTU106(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_TU106, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsTU106orBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_TU106, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsTU116(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_TU116, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsTU116orBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_TU116, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsTU117(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_TU117, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsTU117orBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_TU117, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsTU10X(POBJGPU pGpu) +{ + return IsTU102(pGpu) || IsTU104(pGpu) || IsTU106(pGpu) || IsTU116(pGpu) || IsTU117(pGpu); +} + +NvBool rmcfg_IsGA100(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_GA100, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA100orBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_GA100, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA102(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_GA102, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA102orBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_GA102, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA103(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_GA103, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA103orBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_GA103, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA104(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_GA104, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA104orBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_GA104, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA106(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_GA106, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA106orBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_GA106, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA107(POBJGPU pGpu) +{ + return gpuIsImplementation(pGpu, HAL_IMPL_GA107, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA107orBetter(POBJGPU pGpu) +{ + return gpuIsImplementationOrBetter(pGpu, HAL_IMPL_GA107, GPU_NO_MASK_REVISION, GPU_NO_REVISION); +} + +NvBool rmcfg_IsGA10X(POBJGPU pGpu) +{ + return IsGA100(pGpu) || IsGA102(pGpu) || IsGA103(pGpu) || IsGA104(pGpu) || IsGA106(pGpu) || IsGA107(pGpu); +} + +NvBool rmcfg_IsGA10XorBetter(POBJGPU pGpu) +{ + return IsGA100orBetter(pGpu); +} + +NvBool rmcfg_IsDISPLAYLESS(POBJGPU pGpu) +{ + return IsGA100(pGpu); +} + +NvBool rmcfg_IsdTURING(POBJGPU pGpu) +{ + return IsTU102(pGpu) || IsTU104(pGpu) || IsTU106(pGpu) || IsTU116(pGpu) || IsTU117(pGpu); +} + +NvBool rmcfg_IsTURING_CLASSIC_GPUS(POBJGPU pGpu) +{ + return IsTU102(pGpu) || IsTU104(pGpu) || IsTU106(pGpu) || IsTU116(pGpu) || IsTU117(pGpu); +} + +NvBool rmcfg_IsdAMPERE(POBJGPU pGpu) +{ + return IsGA100(pGpu) || IsGA102(pGpu) || IsGA103(pGpu) || IsGA104(pGpu) || IsGA106(pGpu) || IsGA107(pGpu); +} + +NvBool rmcfg_IsdAMPEREorBetter(POBJGPU pGpu) +{ + return IsGA100orBetter(pGpu); +} + +NvBool rmcfg_IsAMPERE_CLASSIC_GPUS(POBJGPU pGpu) +{ + return IsGA100(pGpu) || IsGA102(pGpu) || IsGA103(pGpu) || IsGA104(pGpu) || IsGA106(pGpu) || IsGA107(pGpu); +} + +NvBool rmcfg_IsAMPERE_CLASSIC_GPUSorBetter(POBJGPU pGpu) +{ + return IsGA100orBetter(pGpu); +} + + + +// NVOC class ID uniqueness checks +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x0x05c7b5 = 1; /* OBJGPIO */ +char __nvoc_class_id_uniqueness_check_0x0xaa1d70 = 1; /* OBJOS */ +char __nvoc_class_id_uniqueness_check_0x0x1ab16a = 1; /* OBJRPC */ +char __nvoc_class_id_uniqueness_check_0x0xd4dff8 = 1; /* OBJRPCSTRUCTURECOPY */ + +#endif diff --git a/src/nvidia/generated/g_rmconfig_util.h b/src/nvidia/generated/g_rmconfig_util.h new file mode 100644 index 000000000..9122e89f1 --- /dev/null +++ b/src/nvidia/generated/g_rmconfig_util.h @@ -0,0 +1,54 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Prototypes for rmconfig utility functions such as _IsGK104(), etc. +// +// Only for use within resman. +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_rmconfig_util.h +// +// Chips: TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +// + +#ifndef _G_RMCFG_UTIL_H_ +#define _G_RMCFG_UTIL_H_ + +// +// Any needed prototypes for helper functions for IsCHIP(), eg rmcfg_IsGK104() +// These cannot be put in rmconfig_private.h as they need the OBJ typedefs. +// + +NvBool rmcfg_IsTU102(POBJGPU pGpu); +NvBool rmcfg_IsTU104(POBJGPU pGpu); +NvBool rmcfg_IsTU104orBetter(POBJGPU pGpu); +NvBool rmcfg_IsTU106(POBJGPU pGpu); +NvBool rmcfg_IsTU106orBetter(POBJGPU pGpu); +NvBool rmcfg_IsTU116(POBJGPU pGpu); +NvBool rmcfg_IsTU116orBetter(POBJGPU pGpu); +NvBool rmcfg_IsTU117(POBJGPU pGpu); +NvBool rmcfg_IsTU117orBetter(POBJGPU pGpu); +NvBool rmcfg_IsTU10X(POBJGPU pGpu); +NvBool rmcfg_IsGA100(POBJGPU pGpu); +NvBool rmcfg_IsGA100orBetter(POBJGPU pGpu); +NvBool rmcfg_IsGA102(POBJGPU pGpu); +NvBool rmcfg_IsGA102orBetter(POBJGPU pGpu); +NvBool rmcfg_IsGA103(POBJGPU pGpu); +NvBool rmcfg_IsGA103orBetter(POBJGPU pGpu); +NvBool rmcfg_IsGA104(POBJGPU pGpu); +NvBool rmcfg_IsGA104orBetter(POBJGPU pGpu); +NvBool rmcfg_IsGA106(POBJGPU pGpu); +NvBool rmcfg_IsGA106orBetter(POBJGPU pGpu); +NvBool rmcfg_IsGA107(POBJGPU pGpu); +NvBool rmcfg_IsGA107orBetter(POBJGPU pGpu); +NvBool rmcfg_IsGA10X(POBJGPU pGpu); +NvBool rmcfg_IsGA10XorBetter(POBJGPU pGpu); +NvBool rmcfg_IsDISPLAYLESS(POBJGPU pGpu); +NvBool rmcfg_IsdTURING(POBJGPU pGpu); +NvBool rmcfg_IsTURING_CLASSIC_GPUS(POBJGPU pGpu); +NvBool rmcfg_IsdAMPERE(POBJGPU pGpu); +NvBool rmcfg_IsdAMPEREorBetter(POBJGPU pGpu); +NvBool rmcfg_IsAMPERE_CLASSIC_GPUS(POBJGPU pGpu); +NvBool rmcfg_IsAMPERE_CLASSIC_GPUSorBetter(POBJGPU pGpu); + + +#endif // _G_RMCFG_UTIL_H_ diff --git a/src/nvidia/generated/g_rpc-message-header.h b/src/nvidia/generated/g_rpc-message-header.h new file mode 100644 index 000000000..4117e6520 --- /dev/null +++ b/src/nvidia/generated/g_rpc-message-header.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * WARNING: This is an autogenerated file. DO NOT EDIT. + * This file is generated using below files: + * template file: kernel/inc/vgpu/gt_rpc-message.h + * definition file: kernel/inc/vgpu/rpc-message-header.def + */ + + +#ifdef RPC_MESSAGE_STRUCTURES +typedef union rpc_message_rpc_union_field_v03_00 +{ + NvU32 spare; + NvU32 cpuRmGfid; +} rpc_message_rpc_union_field_v03_00; + +typedef rpc_message_rpc_union_field_v03_00 rpc_message_rpc_union_field_v; + +typedef struct rpc_message_header_v03_00 +{ + NvU32 header_version; + NvU32 signature; + NvU32 length; + NvU32 function; + NvU32 rpc_result; + NvU32 rpc_result_private; + NvU32 sequence; + rpc_message_rpc_union_field_v u; + rpc_generic_union rpc_message_data[]; +} rpc_message_header_v03_00; + +typedef rpc_message_header_v03_00 rpc_message_header_v; + + +#endif + +#ifdef RPC_MESSAGE_GENERIC_UNION +// This is a generic union, that will be used for the communication between the vmioplugin & guest RM. +typedef union rpc_message_generic_union { + rpc_message_rpc_union_field_v03_00 rpc_union_field_v03_00; + rpc_message_rpc_union_field_v rpc_union_field_v; + rpc_message_header_v03_00 header_v03_00; + rpc_message_header_v header_v; +} rpc_message_generic_union; + +#endif diff --git a/src/nvidia/generated/g_rpc-structures.h b/src/nvidia/generated/g_rpc-structures.h new file mode 100644 index 000000000..f1941dd0a --- /dev/null +++ b/src/nvidia/generated/g_rpc-structures.h @@ -0,0 +1,2101 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * WARNING: This is an autogenerated file. DO NOT EDIT. + * This file is generated using below files: + * template file: kernel/inc/vgpu/gt_rpc-structures.h + * definition file: kernel/inc/vgpu/rpc-structures.def + */ + + +#ifdef RPC_STRUCTURES +// These structures will be used for the communication between the vmioplugin & guest RM. +#define SDK_STRUCTURES +#include "g_sdk-structures.h" +#undef SDK_STRUCTURES +typedef struct rpc_set_guest_system_info_v03_00 +{ + NvU32 vgxVersionMajorNum; + NvU32 vgxVersionMinorNum; + NvU32 guestDriverVersionBufferLength; + NvU32 guestVersionBufferLength; + NvU32 guestTitleBufferLength; + NvU32 guestClNum; + char guestDriverVersion[0x100]; + char guestVersion[0x100]; + char guestTitle[0x100]; +} rpc_set_guest_system_info_v03_00; + +typedef rpc_set_guest_system_info_v03_00 rpc_set_guest_system_info_v; + +typedef struct rpc_alloc_memory_v13_01 +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU32 hClass; + NvU32 flags; + NvU32 pteAdjust; + NvU32 format; + NvU64 length NV_ALIGN_BYTES(8); + NvU32 pageCount; + struct pte_desc pteDesc; +} rpc_alloc_memory_v13_01; + +typedef rpc_alloc_memory_v13_01 rpc_alloc_memory_v; + +typedef struct rpc_free_v03_00 +{ + NVOS00_PARAMETERS_v03_00 params; +} rpc_free_v03_00; + +typedef rpc_free_v03_00 rpc_free_v; + +typedef struct rpc_map_memory_dma_v03_00 +{ + NVOS46_PARAMETERS_v03_00 params; +} rpc_map_memory_dma_v03_00; + +typedef rpc_map_memory_dma_v03_00 rpc_map_memory_dma_v; + +typedef struct rpc_unmap_memory_dma_v03_00 +{ + NVOS47_PARAMETERS_v03_00 params; +} rpc_unmap_memory_dma_v03_00; + +typedef rpc_unmap_memory_dma_v03_00 rpc_unmap_memory_dma_v; + +typedef struct rpc_dup_object_v03_00 +{ + NVOS55_PARAMETERS_v03_00 params; +} rpc_dup_object_v03_00; + +typedef rpc_dup_object_v03_00 rpc_dup_object_v; + +typedef struct rpc_idle_channels_v03_00 +{ + NvU32 flags; + NvU32 timeout; + NvU32 nchannels; + idle_channel_list_v03_00 channel_list[]; +} rpc_idle_channels_v03_00; + +typedef rpc_idle_channels_v03_00 rpc_idle_channels_v; + +typedef struct rpc_unloading_guest_driver_v1F_07 +{ + NvBool bSuspend; + NvBool bGc6Entering; + NvU32 newLevel; +} rpc_unloading_guest_driver_v1F_07; + +typedef rpc_unloading_guest_driver_v1F_07 rpc_unloading_guest_driver_v; + +typedef struct rpc_gpu_exec_reg_ops_v12_01 +{ + NvHandle hClient; + NvHandle hObject; + gpu_exec_reg_ops_v12_01 params; +} rpc_gpu_exec_reg_ops_v12_01; + +typedef rpc_gpu_exec_reg_ops_v12_01 rpc_gpu_exec_reg_ops_v; + +typedef struct rpc_set_page_directory_v03_00 +{ + NvHandle hClient; + NvHandle hDevice; + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v03_00 params; +} rpc_set_page_directory_v03_00; + +typedef struct rpc_set_page_directory_v1E_05 +{ + NvHandle hClient; + NvHandle hDevice; + NvU32 pasid; + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05 params; +} rpc_set_page_directory_v1E_05; + +typedef rpc_set_page_directory_v1E_05 rpc_set_page_directory_v; + +typedef struct rpc_unset_page_directory_v03_00 +{ + NvHandle hClient; + NvHandle hDevice; + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v03_00 params; +} rpc_unset_page_directory_v03_00; + +typedef struct rpc_unset_page_directory_v1E_05 +{ + NvHandle hClient; + NvHandle hDevice; + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05 params; +} rpc_unset_page_directory_v1E_05; + +typedef rpc_unset_page_directory_v1E_05 rpc_unset_page_directory_v; + +typedef struct rpc_get_gsp_static_info_v14_00 +{ + NvU32 data; +} rpc_get_gsp_static_info_v14_00; + +typedef rpc_get_gsp_static_info_v14_00 rpc_get_gsp_static_info_v; + +typedef struct rpc_update_bar_pde_v15_00 +{ + UpdateBarPde_v15_00 info; +} rpc_update_bar_pde_v15_00; + +typedef rpc_update_bar_pde_v15_00 rpc_update_bar_pde_v; + +typedef struct rpc_vgpu_pf_reg_read32_v15_00 +{ + NvU64 address NV_ALIGN_BYTES(8); + NvU32 value; + NvU32 grEngId; +} rpc_vgpu_pf_reg_read32_v15_00; + +typedef rpc_vgpu_pf_reg_read32_v15_00 rpc_vgpu_pf_reg_read32_v; + +typedef struct rpc_rmfs_init_v15_00 +{ + NvU64 statusQueuePhysAddr NV_ALIGN_BYTES(8); +} rpc_rmfs_init_v15_00; + +typedef rpc_rmfs_init_v15_00 rpc_rmfs_init_v; + +typedef struct rpc_rmfs_test_v15_00 +{ + NvU32 numReps; + NvU32 flags; + NvU32 testData1; + NvU32 testData2; +} rpc_rmfs_test_v15_00; + +typedef rpc_rmfs_test_v15_00 rpc_rmfs_test_v; + +typedef struct rpc_gsp_set_system_info_v17_00 +{ + NvU32 data; +} rpc_gsp_set_system_info_v17_00; + +typedef rpc_gsp_set_system_info_v17_00 rpc_gsp_set_system_info_v; + +typedef struct rpc_gsp_rm_alloc_v03_00 +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvU32 hClass; + NvU32 status; + NvU32 paramsSize; + NvU8 params[]; +} rpc_gsp_rm_alloc_v03_00; + +typedef rpc_gsp_rm_alloc_v03_00 rpc_gsp_rm_alloc_v; + +typedef struct rpc_gsp_rm_control_v03_00 +{ + NvHandle hClient; + NvHandle hObject; + NvU32 cmd; + NvU32 status; + NvU32 paramsSize; + NvBool serialized; + NvU8 reserved[3]; + NvU8 params[]; +} rpc_gsp_rm_control_v03_00; + +typedef rpc_gsp_rm_control_v03_00 rpc_gsp_rm_control_v; + +typedef struct rpc_dump_protobuf_component_v18_12 +{ + NvU16 component; + NvU8 nvDumpType; + NvBool countOnly; + NvU32 bugCheckCode; + NvU32 internalCode; + NvU32 bufferSize; + NvU8 blob[]; +} rpc_dump_protobuf_component_v18_12; + +typedef rpc_dump_protobuf_component_v18_12 rpc_dump_protobuf_component_v; + +typedef struct rpc_run_cpu_sequencer_v17_00 +{ + NvU32 bufferSizeDWord; + NvU32 cmdIndex; + NvU32 regSaveArea[8]; + NvU32 commandBuffer[]; +} rpc_run_cpu_sequencer_v17_00; + +typedef rpc_run_cpu_sequencer_v17_00 rpc_run_cpu_sequencer_v; + +typedef struct rpc_post_event_v17_00 +{ + NvHandle hClient; + NvHandle hEvent; + NvU32 notifyIndex; + NvU32 data; + NvU32 status; + NvU32 eventDataSize; + NvBool bNotifyList; + NvU8 eventData[]; +} rpc_post_event_v17_00; + +typedef rpc_post_event_v17_00 rpc_post_event_v; + +typedef struct rpc_rc_triggered_v17_02 +{ + NvU32 nv2080EngineType; + NvU32 chid; + NvU32 exceptType; + NvU32 scope; + NvU16 partitionAttributionId; +} rpc_rc_triggered_v17_02; + +typedef rpc_rc_triggered_v17_02 rpc_rc_triggered_v; + +typedef struct rpc_os_error_log_v17_00 +{ + NvU32 exceptType; + char errString[0x100]; +} rpc_os_error_log_v17_00; + +typedef rpc_os_error_log_v17_00 rpc_os_error_log_v; + +typedef struct rpc_rg_line_intr_v17_00 +{ + NvU32 head; + NvU32 rgIntr; +} rpc_rg_line_intr_v17_00; + +typedef rpc_rg_line_intr_v17_00 rpc_rg_line_intr_v; + +typedef struct rpc_display_modeset_v01_00 +{ + NvBool bModesetStart; + NvU32 minRequiredIsoBandwidthKBPS; + NvU32 minRequiredFloorBandwidthKBPS; +} rpc_display_modeset_v01_00; + +typedef rpc_display_modeset_v01_00 rpc_display_modeset_v; + +typedef struct rpc_gpuacct_perfmon_util_samples_v17_00 +{ + NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v17_00 params; +} rpc_gpuacct_perfmon_util_samples_v17_00; + +typedef rpc_gpuacct_perfmon_util_samples_v17_00 rpc_gpuacct_perfmon_util_samples_v; + +typedef struct rpc_vgpu_gsp_plugin_triggered_v17_00 +{ + NvU32 gfid; + NvU32 notifyIndex; +} rpc_vgpu_gsp_plugin_triggered_v17_00; + +typedef rpc_vgpu_gsp_plugin_triggered_v17_00 rpc_vgpu_gsp_plugin_triggered_v; + +typedef struct rpc_vgpu_config_event_v17_00 +{ + NvU32 notifyIndex; +} rpc_vgpu_config_event_v17_00; + +typedef rpc_vgpu_config_event_v17_00 rpc_vgpu_config_event_v; + +typedef struct rpc_dce_rm_init_v01_00 +{ + NvBool bInit; +} rpc_dce_rm_init_v01_00; + +typedef rpc_dce_rm_init_v01_00 rpc_dce_rm_init_v; + +typedef struct rpc_sim_read_v1E_01 +{ + char path[0x100]; + NvU32 index; + NvU32 count; +} rpc_sim_read_v1E_01; + +typedef rpc_sim_read_v1E_01 rpc_sim_read_v; + +typedef struct rpc_sim_write_v1E_01 +{ + char path[0x100]; + NvU32 index; + NvU32 count; + NvU32 data; +} rpc_sim_write_v1E_01; + +typedef rpc_sim_write_v1E_01 rpc_sim_write_v; + +typedef struct rpc_ucode_libos_print_v1E_08 +{ + NvU32 ucodeEngDesc; + NvU32 libosPrintBufSize; + NvU8 libosPrintBuf[]; +} rpc_ucode_libos_print_v1E_08; + +typedef rpc_ucode_libos_print_v1E_08 rpc_ucode_libos_print_v; + +typedef struct rpc_init_done_v17_00 +{ + NvU32 not_used; +} rpc_init_done_v17_00; + +typedef rpc_init_done_v17_00 rpc_init_done_v; + +typedef struct rpc_semaphore_schedule_callback_v17_00 +{ + NvU64 GPUVA NV_ALIGN_BYTES(8); + NvU32 hVASpace; + NvU32 ReleaseValue; + NvU32 Flags; + NvU32 completionStatus; + NvHandle hClient; + NvHandle hEvent; +} rpc_semaphore_schedule_callback_v17_00; + +typedef rpc_semaphore_schedule_callback_v17_00 rpc_semaphore_schedule_callback_v; + +typedef struct rpc_perf_gpu_boost_sync_limits_callback_v17_00 +{ + NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v17_00 params; +} rpc_perf_gpu_boost_sync_limits_callback_v17_00; + +typedef rpc_perf_gpu_boost_sync_limits_callback_v17_00 rpc_perf_gpu_boost_sync_limits_callback_v; + +typedef struct rpc_perf_bridgeless_info_update_v17_00 +{ + NvU64 bBridgeless NV_ALIGN_BYTES(8); +} rpc_perf_bridgeless_info_update_v17_00; + +typedef rpc_perf_bridgeless_info_update_v17_00 rpc_perf_bridgeless_info_update_v; + + +#endif + +#ifdef RPC_DEBUG_PRINT_STRUCTURES +// These are printable definitions of above structures. These will be used for RPC logging in the vmioplugin. +#define SDK_DEBUG_PRINT_STRUCTURES +#include "g_sdk-structures.h" +#undef SDK_DEBUG_PRINT_STRUCTURES + +#ifndef SKIP_PRINT_rpc_nop_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_nop_v03_00[] = { + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_nop_v03_00 = { + .name = "rpc_nop", + .fdesc = vmiopd_fdesc_t_rpc_nop_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_set_guest_system_info_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_set_guest_system_info_v03_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_set_guest_system_info_v03_00, vgxVersionMajorNum), + .name = "vgxVersionMajorNum" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_set_guest_system_info_v03_00, vgxVersionMinorNum), + .name = "vgxVersionMinorNum" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_set_guest_system_info_v03_00, guestDriverVersionBufferLength), + .name = "guestDriverVersionBufferLength" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_set_guest_system_info_v03_00, guestVersionBufferLength), + .name = "guestVersionBufferLength" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_set_guest_system_info_v03_00, guestTitleBufferLength), + .name = "guestTitleBufferLength" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_set_guest_system_info_v03_00, guestClNum), + .name = "guestClNum" + }, + { + .vtype = vtype_char_array, + .offset = NV_OFFSETOF(rpc_set_guest_system_info_v03_00, guestDriverVersion), + .array_length = 0x100, + .name = "guestDriverVersion" + }, + { + .vtype = vtype_char_array, + .offset = NV_OFFSETOF(rpc_set_guest_system_info_v03_00, guestVersion), + .array_length = 0x100, + .name = "guestVersion" + }, + { + .vtype = vtype_char_array, + .offset = NV_OFFSETOF(rpc_set_guest_system_info_v03_00, guestTitle), + .array_length = 0x100, + .name = "guestTitle" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_set_guest_system_info_v03_00 = { + .name = "rpc_set_guest_system_info", + .header_length = NV_SIZEOF32(rpc_set_guest_system_info_v03_00), + .fdesc = vmiopd_fdesc_t_rpc_set_guest_system_info_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_alloc_memory_v13_01 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_alloc_memory_v13_01[] = { + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_alloc_memory_v13_01, hClient), + .name = "hClient" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_alloc_memory_v13_01, hDevice), + .name = "hDevice" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_alloc_memory_v13_01, hMemory), + .name = "hMemory" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_alloc_memory_v13_01, hClass), + .name = "hClass" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_alloc_memory_v13_01, flags), + .name = "flags" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_alloc_memory_v13_01, pteAdjust), + .name = "pteAdjust" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_alloc_memory_v13_01, format), + .name = "format" + }, + { + .vtype = vtype_NvU64, + .offset = NV_OFFSETOF(rpc_alloc_memory_v13_01, length), + .name = "length" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_alloc_memory_v13_01, pageCount), + .name = "pageCount" + }, + { + .vtype = vtype_struct_pte_desc, + .offset = NV_OFFSETOF(rpc_alloc_memory_v13_01, pteDesc), + .name = "pteDesc" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_alloc_memory_v13_01 = { + .name = "rpc_alloc_memory", + .header_length = NV_SIZEOF32(rpc_alloc_memory_v13_01), + .fdesc = vmiopd_fdesc_t_rpc_alloc_memory_v13_01 +}; +#endif + +#ifndef SKIP_PRINT_rpc_free_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_free_v03_00[] = { + { + .vtype = vtype_NVOS00_PARAMETERS_v03_00, + .offset = NV_OFFSETOF(rpc_free_v03_00, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_free_v03_00 = { + .name = "rpc_free", + .header_length = NV_SIZEOF32(rpc_free_v03_00), + .fdesc = vmiopd_fdesc_t_rpc_free_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_map_memory_dma_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_map_memory_dma_v03_00[] = { + { + .vtype = vtype_NVOS46_PARAMETERS_v03_00, + .offset = NV_OFFSETOF(rpc_map_memory_dma_v03_00, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_map_memory_dma_v03_00 = { + .name = "rpc_map_memory_dma", + .header_length = NV_SIZEOF32(rpc_map_memory_dma_v03_00), + .fdesc = vmiopd_fdesc_t_rpc_map_memory_dma_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_unmap_memory_dma_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_unmap_memory_dma_v03_00[] = { + { + .vtype = vtype_NVOS47_PARAMETERS_v03_00, + .offset = NV_OFFSETOF(rpc_unmap_memory_dma_v03_00, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_unmap_memory_dma_v03_00 = { + .name = "rpc_unmap_memory_dma", + .header_length = NV_SIZEOF32(rpc_unmap_memory_dma_v03_00), + .fdesc = vmiopd_fdesc_t_rpc_unmap_memory_dma_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_dup_object_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_dup_object_v03_00[] = { + { + .vtype = vtype_NVOS55_PARAMETERS_v03_00, + .offset = NV_OFFSETOF(rpc_dup_object_v03_00, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_dup_object_v03_00 = { + .name = "rpc_dup_object", + .header_length = NV_SIZEOF32(rpc_dup_object_v03_00), + .fdesc = vmiopd_fdesc_t_rpc_dup_object_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_idle_channels_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_idle_channels_v03_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_idle_channels_v03_00, flags), + .name = "flags" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_idle_channels_v03_00, timeout), + .name = "timeout" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_idle_channels_v03_00, nchannels), + .name = "nchannels" + }, + { + .vtype = vtype_idle_channel_list_v03_00_array, + .offset = NV_OFFSETOF(rpc_idle_channels_v03_00, channel_list), + .array_length = 0, + .array_length_fn = get_array_length_rpc_idle_channels_v03_00_channel_list, + .name = "channel_list" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_idle_channels_v03_00 = { + .name = "rpc_idle_channels", + .header_length = NV_SIZEOF32(rpc_idle_channels_v03_00), + .fdesc = vmiopd_fdesc_t_rpc_idle_channels_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_unloading_guest_driver_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_unloading_guest_driver_v03_00[] = { + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_unloading_guest_driver_v03_00 = { + .name = "rpc_unloading_guest_driver", + .fdesc = vmiopd_fdesc_t_rpc_unloading_guest_driver_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_unloading_guest_driver_v1F_07 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_unloading_guest_driver_v1F_07[] = { + { + .vtype = vtype_NvBool, + .offset = NV_OFFSETOF(rpc_unloading_guest_driver_v1F_07, bSuspend), + .name = "bSuspend" + }, + { + .vtype = vtype_NvBool, + .offset = NV_OFFSETOF(rpc_unloading_guest_driver_v1F_07, bGc6Entering), + .name = "bGc6Entering" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_unloading_guest_driver_v1F_07, newLevel), + .name = "newLevel" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_unloading_guest_driver_v1F_07 = { + .name = "rpc_unloading_guest_driver", + .header_length = NV_SIZEOF32(rpc_unloading_guest_driver_v1F_07), + .fdesc = vmiopd_fdesc_t_rpc_unloading_guest_driver_v1F_07 +}; +#endif + +#ifndef SKIP_PRINT_rpc_gpu_exec_reg_ops_v12_01 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_gpu_exec_reg_ops_v12_01[] = { + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_gpu_exec_reg_ops_v12_01, hClient), + .name = "hClient" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_gpu_exec_reg_ops_v12_01, hObject), + .name = "hObject" + }, + { + .vtype = vtype_gpu_exec_reg_ops_v12_01, + .offset = NV_OFFSETOF(rpc_gpu_exec_reg_ops_v12_01, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_gpu_exec_reg_ops_v12_01 = { + .name = "rpc_gpu_exec_reg_ops", + .header_length = NV_SIZEOF32(rpc_gpu_exec_reg_ops_v12_01), + .fdesc = vmiopd_fdesc_t_rpc_gpu_exec_reg_ops_v12_01 +}; +#endif + +#ifndef SKIP_PRINT_rpc_set_page_directory_v1E_05 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_set_page_directory_v1E_05[] = { + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_set_page_directory_v1E_05, hClient), + .name = "hClient" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_set_page_directory_v1E_05, hDevice), + .name = "hDevice" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_set_page_directory_v1E_05, pasid), + .name = "pasid" + }, + { + .vtype = vtype_NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05, + .offset = NV_OFFSETOF(rpc_set_page_directory_v1E_05, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_set_page_directory_v1E_05 = { + .name = "rpc_set_page_directory", + .header_length = NV_SIZEOF32(rpc_set_page_directory_v1E_05), + .fdesc = vmiopd_fdesc_t_rpc_set_page_directory_v1E_05 +}; +#endif + +#ifndef SKIP_PRINT_rpc_set_page_directory_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_set_page_directory_v03_00[] = { + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_set_page_directory_v03_00, hClient), + .name = "hClient" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_set_page_directory_v03_00, hDevice), + .name = "hDevice" + }, + { + .vtype = vtype_NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v03_00, + .offset = NV_OFFSETOF(rpc_set_page_directory_v03_00, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_set_page_directory_v03_00 = { + .name = "rpc_set_page_directory", + .header_length = NV_SIZEOF32(rpc_set_page_directory_v03_00), + .fdesc = vmiopd_fdesc_t_rpc_set_page_directory_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_unset_page_directory_v1E_05 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_unset_page_directory_v1E_05[] = { + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_unset_page_directory_v1E_05, hClient), + .name = "hClient" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_unset_page_directory_v1E_05, hDevice), + .name = "hDevice" + }, + { + .vtype = vtype_NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05, + .offset = NV_OFFSETOF(rpc_unset_page_directory_v1E_05, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_unset_page_directory_v1E_05 = { + .name = "rpc_unset_page_directory", + .header_length = NV_SIZEOF32(rpc_unset_page_directory_v1E_05), + .fdesc = vmiopd_fdesc_t_rpc_unset_page_directory_v1E_05 +}; +#endif + +#ifndef SKIP_PRINT_rpc_unset_page_directory_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_unset_page_directory_v03_00[] = { + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_unset_page_directory_v03_00, hClient), + .name = "hClient" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_unset_page_directory_v03_00, hDevice), + .name = "hDevice" + }, + { + .vtype = vtype_NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v03_00, + .offset = NV_OFFSETOF(rpc_unset_page_directory_v03_00, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_unset_page_directory_v03_00 = { + .name = "rpc_unset_page_directory", + .header_length = NV_SIZEOF32(rpc_unset_page_directory_v03_00), + .fdesc = vmiopd_fdesc_t_rpc_unset_page_directory_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_get_gsp_static_info_v14_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_get_gsp_static_info_v14_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_get_gsp_static_info_v14_00, data), + .name = "data" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_get_gsp_static_info_v14_00 = { + .name = "rpc_get_gsp_static_info", + .header_length = NV_SIZEOF32(rpc_get_gsp_static_info_v14_00), + .fdesc = vmiopd_fdesc_t_rpc_get_gsp_static_info_v14_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_update_bar_pde_v15_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_update_bar_pde_v15_00[] = { + { + .vtype = vtype_UpdateBarPde_v15_00, + .offset = NV_OFFSETOF(rpc_update_bar_pde_v15_00, info), + .name = "info" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_update_bar_pde_v15_00 = { + .name = "rpc_update_bar_pde", + .header_length = NV_SIZEOF32(rpc_update_bar_pde_v15_00), + .fdesc = vmiopd_fdesc_t_rpc_update_bar_pde_v15_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_vgpu_pf_reg_read32_v15_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_vgpu_pf_reg_read32_v15_00[] = { + { + .vtype = vtype_NvU64, + .offset = NV_OFFSETOF(rpc_vgpu_pf_reg_read32_v15_00, address), + .name = "address" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_vgpu_pf_reg_read32_v15_00, value), + .name = "value" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_vgpu_pf_reg_read32_v15_00, grEngId), + .name = "grEngId" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_vgpu_pf_reg_read32_v15_00 = { + .name = "rpc_vgpu_pf_reg_read32", + .header_length = NV_SIZEOF32(rpc_vgpu_pf_reg_read32_v15_00), + .fdesc = vmiopd_fdesc_t_rpc_vgpu_pf_reg_read32_v15_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_rmfs_init_v15_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rmfs_init_v15_00[] = { + { + .vtype = vtype_NvU64, + .offset = NV_OFFSETOF(rpc_rmfs_init_v15_00, statusQueuePhysAddr), + .name = "statusQueuePhysAddr" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_rmfs_init_v15_00 = { + .name = "rpc_rmfs_init", + .header_length = NV_SIZEOF32(rpc_rmfs_init_v15_00), + .fdesc = vmiopd_fdesc_t_rpc_rmfs_init_v15_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_rmfs_close_queue_v15_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rmfs_close_queue_v15_00[] = { + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_rmfs_close_queue_v15_00 = { + .name = "rpc_rmfs_close_queue", + .fdesc = vmiopd_fdesc_t_rpc_rmfs_close_queue_v15_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_rmfs_cleanup_v15_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rmfs_cleanup_v15_00[] = { + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_rmfs_cleanup_v15_00 = { + .name = "rpc_rmfs_cleanup", + .fdesc = vmiopd_fdesc_t_rpc_rmfs_cleanup_v15_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_rmfs_test_v15_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rmfs_test_v15_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_rmfs_test_v15_00, numReps), + .name = "numReps" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_rmfs_test_v15_00, flags), + .name = "flags" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_rmfs_test_v15_00, testData1), + .name = "testData1" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_rmfs_test_v15_00, testData2), + .name = "testData2" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_rmfs_test_v15_00 = { + .name = "rpc_rmfs_test", + .header_length = NV_SIZEOF32(rpc_rmfs_test_v15_00), + .fdesc = vmiopd_fdesc_t_rpc_rmfs_test_v15_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_gsp_set_system_info_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_gsp_set_system_info_v17_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_gsp_set_system_info_v17_00, data), + .name = "data" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_gsp_set_system_info_v17_00 = { + .name = "rpc_gsp_set_system_info", + .header_length = NV_SIZEOF32(rpc_gsp_set_system_info_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_gsp_set_system_info_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_set_registry_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_set_registry_v17_00[] = { + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_set_registry_v17_00 = { + .name = "rpc_set_registry", + .fdesc = vmiopd_fdesc_t_rpc_set_registry_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_gsp_rm_alloc_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_gsp_rm_alloc_v03_00[] = { + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_gsp_rm_alloc_v03_00, hClient), + .name = "hClient" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_gsp_rm_alloc_v03_00, hParent), + .name = "hParent" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_gsp_rm_alloc_v03_00, hObject), + .name = "hObject" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_gsp_rm_alloc_v03_00, hClass), + .name = "hClass" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_gsp_rm_alloc_v03_00, status), + .name = "status" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_gsp_rm_alloc_v03_00, paramsSize), + .name = "paramsSize" + }, + { + .vtype = vtype_NvU8_array, + .offset = NV_OFFSETOF(rpc_gsp_rm_alloc_v03_00, params), + .array_length = 0, + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_gsp_rm_alloc_v03_00 = { + .name = "rpc_gsp_rm_alloc", + .header_length = NV_SIZEOF32(rpc_gsp_rm_alloc_v03_00), + .fdesc = vmiopd_fdesc_t_rpc_gsp_rm_alloc_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_gsp_rm_control_v03_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_gsp_rm_control_v03_00[] = { + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_gsp_rm_control_v03_00, hClient), + .name = "hClient" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_gsp_rm_control_v03_00, hObject), + .name = "hObject" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_gsp_rm_control_v03_00, cmd), + .name = "cmd" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_gsp_rm_control_v03_00, status), + .name = "status" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_gsp_rm_control_v03_00, paramsSize), + .name = "paramsSize" + }, + { + .vtype = vtype_NvBool, + .offset = NV_OFFSETOF(rpc_gsp_rm_control_v03_00, serialized), + .name = "serialized" + }, + { + .vtype = vtype_NvU8_array, + .offset = NV_OFFSETOF(rpc_gsp_rm_control_v03_00, reserved), + .array_length = 3, + .name = "reserved" + }, + { + .vtype = vtype_NvU8_array, + .offset = NV_OFFSETOF(rpc_gsp_rm_control_v03_00, params), + .array_length = 0, + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_gsp_rm_control_v03_00 = { + .name = "rpc_gsp_rm_control", + .header_length = NV_SIZEOF32(rpc_gsp_rm_control_v03_00), + .fdesc = vmiopd_fdesc_t_rpc_gsp_rm_control_v03_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_dump_protobuf_component_v18_12 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_dump_protobuf_component_v18_12[] = { + { + .vtype = vtype_NvU16, + .offset = NV_OFFSETOF(rpc_dump_protobuf_component_v18_12, component), + .name = "component" + }, + { + .vtype = vtype_NvU8, + .offset = NV_OFFSETOF(rpc_dump_protobuf_component_v18_12, nvDumpType), + .name = "nvDumpType" + }, + { + .vtype = vtype_NvBool, + .offset = NV_OFFSETOF(rpc_dump_protobuf_component_v18_12, countOnly), + .name = "countOnly" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_dump_protobuf_component_v18_12, bugCheckCode), + .name = "bugCheckCode" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_dump_protobuf_component_v18_12, internalCode), + .name = "internalCode" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_dump_protobuf_component_v18_12, bufferSize), + .name = "bufferSize" + }, + { + .vtype = vtype_NvU8_array, + .offset = NV_OFFSETOF(rpc_dump_protobuf_component_v18_12, blob), + .array_length = 0, + .name = "blob" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_dump_protobuf_component_v18_12 = { + .name = "rpc_dump_protobuf_component", + .header_length = NV_SIZEOF32(rpc_dump_protobuf_component_v18_12), + .fdesc = vmiopd_fdesc_t_rpc_dump_protobuf_component_v18_12 +}; +#endif + +#ifndef SKIP_PRINT_rpc_run_cpu_sequencer_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_run_cpu_sequencer_v17_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_run_cpu_sequencer_v17_00, bufferSizeDWord), + .name = "bufferSizeDWord" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_run_cpu_sequencer_v17_00, cmdIndex), + .name = "cmdIndex" + }, + { + .vtype = vtype_NvU32_array, + .offset = NV_OFFSETOF(rpc_run_cpu_sequencer_v17_00, regSaveArea), + .array_length = 8, + .name = "regSaveArea" + }, + { + .vtype = vtype_NvU32_array, + .offset = NV_OFFSETOF(rpc_run_cpu_sequencer_v17_00, commandBuffer), + .array_length = 0, + .name = "commandBuffer" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_run_cpu_sequencer_v17_00 = { + .name = "rpc_run_cpu_sequencer", + .header_length = NV_SIZEOF32(rpc_run_cpu_sequencer_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_run_cpu_sequencer_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_post_event_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_post_event_v17_00[] = { + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_post_event_v17_00, hClient), + .name = "hClient" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_post_event_v17_00, hEvent), + .name = "hEvent" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_post_event_v17_00, notifyIndex), + .name = "notifyIndex" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_post_event_v17_00, data), + .name = "data" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_post_event_v17_00, status), + .name = "status" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_post_event_v17_00, eventDataSize), + .name = "eventDataSize" + }, + { + .vtype = vtype_NvBool, + .offset = NV_OFFSETOF(rpc_post_event_v17_00, bNotifyList), + .name = "bNotifyList" + }, + { + .vtype = vtype_NvU8_array, + .offset = NV_OFFSETOF(rpc_post_event_v17_00, eventData), + .array_length = 0, + .name = "eventData" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_post_event_v17_00 = { + .name = "rpc_post_event", + .header_length = NV_SIZEOF32(rpc_post_event_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_post_event_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_rc_triggered_v17_02 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rc_triggered_v17_02[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_rc_triggered_v17_02, nv2080EngineType), + .name = "nv2080EngineType" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_rc_triggered_v17_02, chid), + .name = "chid" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_rc_triggered_v17_02, exceptType), + .name = "exceptType" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_rc_triggered_v17_02, scope), + .name = "scope" + }, + { + .vtype = vtype_NvU16, + .offset = NV_OFFSETOF(rpc_rc_triggered_v17_02, partitionAttributionId), + .name = "partitionAttributionId" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_rc_triggered_v17_02 = { + .name = "rpc_rc_triggered", + .header_length = NV_SIZEOF32(rpc_rc_triggered_v17_02), + .fdesc = vmiopd_fdesc_t_rpc_rc_triggered_v17_02 +}; +#endif + +#ifndef SKIP_PRINT_rpc_os_error_log_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_os_error_log_v17_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_os_error_log_v17_00, exceptType), + .name = "exceptType" + }, + { + .vtype = vtype_char_array, + .offset = NV_OFFSETOF(rpc_os_error_log_v17_00, errString), + .array_length = 0x100, + .name = "errString" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_os_error_log_v17_00 = { + .name = "rpc_os_error_log", + .header_length = NV_SIZEOF32(rpc_os_error_log_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_os_error_log_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_rg_line_intr_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_rg_line_intr_v17_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_rg_line_intr_v17_00, head), + .name = "head" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_rg_line_intr_v17_00, rgIntr), + .name = "rgIntr" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_rg_line_intr_v17_00 = { + .name = "rpc_rg_line_intr", + .header_length = NV_SIZEOF32(rpc_rg_line_intr_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_rg_line_intr_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_display_modeset_v01_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_display_modeset_v01_00[] = { + { + .vtype = vtype_NvBool, + .offset = NV_OFFSETOF(rpc_display_modeset_v01_00, bModesetStart), + .name = "bModesetStart" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_display_modeset_v01_00, minRequiredIsoBandwidthKBPS), + .name = "minRequiredIsoBandwidthKBPS" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_display_modeset_v01_00, minRequiredFloorBandwidthKBPS), + .name = "minRequiredFloorBandwidthKBPS" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_display_modeset_v01_00 = { + .name = "rpc_display_modeset", + .header_length = NV_SIZEOF32(rpc_display_modeset_v01_00), + .fdesc = vmiopd_fdesc_t_rpc_display_modeset_v01_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_gpuacct_perfmon_util_samples_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_gpuacct_perfmon_util_samples_v17_00[] = { + { + .vtype = vtype_NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v17_00, + .offset = NV_OFFSETOF(rpc_gpuacct_perfmon_util_samples_v17_00, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_gpuacct_perfmon_util_samples_v17_00 = { + .name = "rpc_gpuacct_perfmon_util_samples", + .header_length = NV_SIZEOF32(rpc_gpuacct_perfmon_util_samples_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_gpuacct_perfmon_util_samples_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_vgpu_gsp_plugin_triggered_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_vgpu_gsp_plugin_triggered_v17_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_vgpu_gsp_plugin_triggered_v17_00, gfid), + .name = "gfid" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_vgpu_gsp_plugin_triggered_v17_00, notifyIndex), + .name = "notifyIndex" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_vgpu_gsp_plugin_triggered_v17_00 = { + .name = "rpc_vgpu_gsp_plugin_triggered", + .header_length = NV_SIZEOF32(rpc_vgpu_gsp_plugin_triggered_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_vgpu_gsp_plugin_triggered_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_vgpu_config_event_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_vgpu_config_event_v17_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_vgpu_config_event_v17_00, notifyIndex), + .name = "notifyIndex" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_vgpu_config_event_v17_00 = { + .name = "rpc_vgpu_config_event", + .header_length = NV_SIZEOF32(rpc_vgpu_config_event_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_vgpu_config_event_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_dce_rm_init_v01_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_dce_rm_init_v01_00[] = { + { + .vtype = vtype_NvBool, + .offset = NV_OFFSETOF(rpc_dce_rm_init_v01_00, bInit), + .name = "bInit" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_dce_rm_init_v01_00 = { + .name = "rpc_dce_rm_init", + .header_length = NV_SIZEOF32(rpc_dce_rm_init_v01_00), + .fdesc = vmiopd_fdesc_t_rpc_dce_rm_init_v01_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_sim_read_v1E_01 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_sim_read_v1E_01[] = { + { + .vtype = vtype_char_array, + .offset = NV_OFFSETOF(rpc_sim_read_v1E_01, path), + .array_length = 0x100, + .name = "path" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_sim_read_v1E_01, index), + .name = "index" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_sim_read_v1E_01, count), + .name = "count" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_sim_read_v1E_01 = { + .name = "rpc_sim_read", + .header_length = NV_SIZEOF32(rpc_sim_read_v1E_01), + .fdesc = vmiopd_fdesc_t_rpc_sim_read_v1E_01 +}; +#endif + +#ifndef SKIP_PRINT_rpc_sim_write_v1E_01 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_sim_write_v1E_01[] = { + { + .vtype = vtype_char_array, + .offset = NV_OFFSETOF(rpc_sim_write_v1E_01, path), + .array_length = 0x100, + .name = "path" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_sim_write_v1E_01, index), + .name = "index" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_sim_write_v1E_01, count), + .name = "count" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_sim_write_v1E_01, data), + .name = "data" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_sim_write_v1E_01 = { + .name = "rpc_sim_write", + .header_length = NV_SIZEOF32(rpc_sim_write_v1E_01), + .fdesc = vmiopd_fdesc_t_rpc_sim_write_v1E_01 +}; +#endif + +#ifndef SKIP_PRINT_rpc_ucode_libos_print_v1E_08 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_ucode_libos_print_v1E_08[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_ucode_libos_print_v1E_08, ucodeEngDesc), + .name = "ucodeEngDesc" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_ucode_libos_print_v1E_08, libosPrintBufSize), + .name = "libosPrintBufSize" + }, + { + .vtype = vtype_NvU8_array, + .offset = NV_OFFSETOF(rpc_ucode_libos_print_v1E_08, libosPrintBuf), + .array_length = 0, + .name = "libosPrintBuf" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_ucode_libos_print_v1E_08 = { + .name = "rpc_ucode_libos_print", + .header_length = NV_SIZEOF32(rpc_ucode_libos_print_v1E_08), + .fdesc = vmiopd_fdesc_t_rpc_ucode_libos_print_v1E_08 +}; +#endif + +#ifndef SKIP_PRINT_rpc_init_done_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_init_done_v17_00[] = { + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_init_done_v17_00, not_used), + .name = "not_used" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_init_done_v17_00 = { + .name = "rpc_init_done", + .header_length = NV_SIZEOF32(rpc_init_done_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_init_done_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_semaphore_schedule_callback_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_semaphore_schedule_callback_v17_00[] = { + { + .vtype = vtype_NvU64, + .offset = NV_OFFSETOF(rpc_semaphore_schedule_callback_v17_00, GPUVA), + .name = "GPUVA" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_semaphore_schedule_callback_v17_00, hVASpace), + .name = "hVASpace" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_semaphore_schedule_callback_v17_00, ReleaseValue), + .name = "ReleaseValue" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_semaphore_schedule_callback_v17_00, Flags), + .name = "Flags" + }, + { + .vtype = vtype_NvU32, + .offset = NV_OFFSETOF(rpc_semaphore_schedule_callback_v17_00, completionStatus), + .name = "completionStatus" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_semaphore_schedule_callback_v17_00, hClient), + .name = "hClient" + }, + { + .vtype = vtype_NvHandle, + .offset = NV_OFFSETOF(rpc_semaphore_schedule_callback_v17_00, hEvent), + .name = "hEvent" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_semaphore_schedule_callback_v17_00 = { + .name = "rpc_semaphore_schedule_callback", + .header_length = NV_SIZEOF32(rpc_semaphore_schedule_callback_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_semaphore_schedule_callback_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_perf_gpu_boost_sync_limits_callback_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_perf_gpu_boost_sync_limits_callback_v17_00[] = { + { + .vtype = vtype_NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v17_00, + .offset = NV_OFFSETOF(rpc_perf_gpu_boost_sync_limits_callback_v17_00, params), + .name = "params" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_perf_gpu_boost_sync_limits_callback_v17_00 = { + .name = "rpc_perf_gpu_boost_sync_limits_callback", + .header_length = NV_SIZEOF32(rpc_perf_gpu_boost_sync_limits_callback_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_perf_gpu_boost_sync_limits_callback_v17_00 +}; +#endif + +#ifndef SKIP_PRINT_rpc_perf_bridgeless_info_update_v17_00 +static vmiopd_fdesc_t vmiopd_fdesc_t_rpc_perf_bridgeless_info_update_v17_00[] = { + { + .vtype = vtype_NvU64, + .offset = NV_OFFSETOF(rpc_perf_bridgeless_info_update_v17_00, bBridgeless), + .name = "bBridgeless" + }, + { + .vtype = vt_end + } +}; + +static vmiopd_mdesc_t vmiopd_mdesc_t_rpc_perf_bridgeless_info_update_v17_00 = { + .name = "rpc_perf_bridgeless_info_update", + .header_length = NV_SIZEOF32(rpc_perf_bridgeless_info_update_v17_00), + .fdesc = vmiopd_fdesc_t_rpc_perf_bridgeless_info_update_v17_00 +}; +#endif + +#endif + +#ifdef RPC_DEBUG_PRINT_FUNCTIONS +// These are definitions for versioned functions. These will be used for RPC logging in the vmioplugin. +#define SDK_DEBUG_PRINT_FUNCTIONS +#include "g_sdk-structures.h" +#undef SDK_DEBUG_PRINT_FUNCTIONS +#ifndef SKIP_PRINT_rpc_nop_v03_00 +vmiopd_mdesc_t *rpcdebugNop_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_nop_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_set_guest_system_info_v03_00 +vmiopd_mdesc_t *rpcdebugSetGuestSystemInfo_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_set_guest_system_info_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_alloc_memory_v13_01 +vmiopd_mdesc_t *rpcdebugAllocMemory_v13_01(void) +{ + return &vmiopd_mdesc_t_rpc_alloc_memory_v13_01; +} +#endif + +#ifndef SKIP_PRINT_rpc_free_v03_00 +vmiopd_mdesc_t *rpcdebugFree_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_free_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_map_memory_dma_v03_00 +vmiopd_mdesc_t *rpcdebugMapMemoryDma_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_map_memory_dma_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_unmap_memory_dma_v03_00 +vmiopd_mdesc_t *rpcdebugUnmapMemoryDma_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_unmap_memory_dma_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_dup_object_v03_00 +vmiopd_mdesc_t *rpcdebugDupObject_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_dup_object_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_idle_channels_v03_00 +vmiopd_mdesc_t *rpcdebugIdleChannels_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_idle_channels_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_unloading_guest_driver_v03_00 +vmiopd_mdesc_t *rpcdebugUnloadingGuestDriver_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_unloading_guest_driver_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_unloading_guest_driver_v1F_07 +vmiopd_mdesc_t *rpcdebugUnloadingGuestDriver_v1F_07(void) +{ + return &vmiopd_mdesc_t_rpc_unloading_guest_driver_v1F_07; +} +#endif + +#ifndef SKIP_PRINT_rpc_gpu_exec_reg_ops_v12_01 +vmiopd_mdesc_t *rpcdebugGpuExecRegOps_v12_01(void) +{ + return &vmiopd_mdesc_t_rpc_gpu_exec_reg_ops_v12_01; +} +#endif + +#ifndef SKIP_PRINT_rpc_set_page_directory_v1E_05 +vmiopd_mdesc_t *rpcdebugSetPageDirectory_v1E_05(void) +{ + return &vmiopd_mdesc_t_rpc_set_page_directory_v1E_05; +} +#endif + +#ifndef SKIP_PRINT_rpc_set_page_directory_v03_00 +vmiopd_mdesc_t *rpcdebugSetPageDirectory_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_set_page_directory_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_unset_page_directory_v1E_05 +vmiopd_mdesc_t *rpcdebugUnsetPageDirectory_v1E_05(void) +{ + return &vmiopd_mdesc_t_rpc_unset_page_directory_v1E_05; +} +#endif + +#ifndef SKIP_PRINT_rpc_unset_page_directory_v03_00 +vmiopd_mdesc_t *rpcdebugUnsetPageDirectory_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_unset_page_directory_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_get_gsp_static_info_v14_00 +vmiopd_mdesc_t *rpcdebugGetGspStaticInfo_v14_00(void) +{ + return &vmiopd_mdesc_t_rpc_get_gsp_static_info_v14_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_update_bar_pde_v15_00 +vmiopd_mdesc_t *rpcdebugUpdateBarPde_v15_00(void) +{ + return &vmiopd_mdesc_t_rpc_update_bar_pde_v15_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_vgpu_pf_reg_read32_v15_00 +vmiopd_mdesc_t *rpcdebugVgpuPfRegRead32_v15_00(void) +{ + return &vmiopd_mdesc_t_rpc_vgpu_pf_reg_read32_v15_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_rmfs_init_v15_00 +vmiopd_mdesc_t *rpcdebugRmfsInit_v15_00(void) +{ + return &vmiopd_mdesc_t_rpc_rmfs_init_v15_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_rmfs_close_queue_v15_00 +vmiopd_mdesc_t *rpcdebugRmfsCloseQueue_v15_00(void) +{ + return &vmiopd_mdesc_t_rpc_rmfs_close_queue_v15_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_rmfs_cleanup_v15_00 +vmiopd_mdesc_t *rpcdebugRmfsCleanup_v15_00(void) +{ + return &vmiopd_mdesc_t_rpc_rmfs_cleanup_v15_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_rmfs_test_v15_00 +vmiopd_mdesc_t *rpcdebugRmfsTest_v15_00(void) +{ + return &vmiopd_mdesc_t_rpc_rmfs_test_v15_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_gsp_set_system_info_v17_00 +vmiopd_mdesc_t *rpcdebugGspSetSystemInfo_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_gsp_set_system_info_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_set_registry_v17_00 +vmiopd_mdesc_t *rpcdebugSetRegistry_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_set_registry_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_gsp_rm_alloc_v03_00 +vmiopd_mdesc_t *rpcdebugGspRmAlloc_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_gsp_rm_alloc_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_gsp_rm_control_v03_00 +vmiopd_mdesc_t *rpcdebugGspRmControl_v03_00(void) +{ + return &vmiopd_mdesc_t_rpc_gsp_rm_control_v03_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_dump_protobuf_component_v18_12 +vmiopd_mdesc_t *rpcdebugDumpProtobufComponent_v18_12(void) +{ + return &vmiopd_mdesc_t_rpc_dump_protobuf_component_v18_12; +} +#endif + +#ifndef SKIP_PRINT_rpc_run_cpu_sequencer_v17_00 +vmiopd_mdesc_t *rpcdebugRunCpuSequencer_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_run_cpu_sequencer_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_post_event_v17_00 +vmiopd_mdesc_t *rpcdebugPostEvent_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_post_event_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_rc_triggered_v17_02 +vmiopd_mdesc_t *rpcdebugRcTriggered_v17_02(void) +{ + return &vmiopd_mdesc_t_rpc_rc_triggered_v17_02; +} +#endif + +#ifndef SKIP_PRINT_rpc_os_error_log_v17_00 +vmiopd_mdesc_t *rpcdebugOsErrorLog_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_os_error_log_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_rg_line_intr_v17_00 +vmiopd_mdesc_t *rpcdebugRgLineIntr_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_rg_line_intr_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_display_modeset_v01_00 +vmiopd_mdesc_t *rpcdebugDisplayModeset_v01_00(void) +{ + return &vmiopd_mdesc_t_rpc_display_modeset_v01_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_gpuacct_perfmon_util_samples_v17_00 +vmiopd_mdesc_t *rpcdebugGpuacctPerfmonUtilSamples_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_gpuacct_perfmon_util_samples_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_vgpu_gsp_plugin_triggered_v17_00 +vmiopd_mdesc_t *rpcdebugVgpuGspPluginTriggered_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_vgpu_gsp_plugin_triggered_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_vgpu_config_event_v17_00 +vmiopd_mdesc_t *rpcdebugVgpuConfigEvent_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_vgpu_config_event_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_dce_rm_init_v01_00 +vmiopd_mdesc_t *rpcdebugDceRmInit_v01_00(void) +{ + return &vmiopd_mdesc_t_rpc_dce_rm_init_v01_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_sim_read_v1E_01 +vmiopd_mdesc_t *rpcdebugSimRead_v1E_01(void) +{ + return &vmiopd_mdesc_t_rpc_sim_read_v1E_01; +} +#endif + +#ifndef SKIP_PRINT_rpc_sim_write_v1E_01 +vmiopd_mdesc_t *rpcdebugSimWrite_v1E_01(void) +{ + return &vmiopd_mdesc_t_rpc_sim_write_v1E_01; +} +#endif + +#ifndef SKIP_PRINT_rpc_ucode_libos_print_v1E_08 +vmiopd_mdesc_t *rpcdebugUcodeLibosPrint_v1E_08(void) +{ + return &vmiopd_mdesc_t_rpc_ucode_libos_print_v1E_08; +} +#endif + +#ifndef SKIP_PRINT_rpc_init_done_v17_00 +vmiopd_mdesc_t *rpcdebugInitDone_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_init_done_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_semaphore_schedule_callback_v17_00 +vmiopd_mdesc_t *rpcdebugSemaphoreScheduleCallback_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_semaphore_schedule_callback_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_perf_gpu_boost_sync_limits_callback_v17_00 +vmiopd_mdesc_t *rpcdebugPerfGpuBoostSyncLimitsCallback_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_perf_gpu_boost_sync_limits_callback_v17_00; +} +#endif + +#ifndef SKIP_PRINT_rpc_perf_bridgeless_info_update_v17_00 +vmiopd_mdesc_t *rpcdebugPerfBridgelessInfoUpdate_v17_00(void) +{ + return &vmiopd_mdesc_t_rpc_perf_bridgeless_info_update_v17_00; +} +#endif + + +#endif + +#ifdef RPC_GENERIC_UNION +// This is a generic union, that will be used for the communication between the vmioplugin & guest RM. +typedef union rpc_generic_union { + rpc_set_guest_system_info_v03_00 set_guest_system_info_v03_00; + rpc_set_guest_system_info_v set_guest_system_info_v; + rpc_alloc_memory_v13_01 alloc_memory_v13_01; + rpc_alloc_memory_v alloc_memory_v; + rpc_free_v03_00 free_v03_00; + rpc_free_v free_v; + rpc_map_memory_dma_v03_00 map_memory_dma_v03_00; + rpc_map_memory_dma_v map_memory_dma_v; + rpc_unmap_memory_dma_v03_00 unmap_memory_dma_v03_00; + rpc_unmap_memory_dma_v unmap_memory_dma_v; + rpc_dup_object_v03_00 dup_object_v03_00; + rpc_dup_object_v dup_object_v; + rpc_idle_channels_v03_00 idle_channels_v03_00; + rpc_idle_channels_v idle_channels_v; + rpc_unloading_guest_driver_v1F_07 unloading_guest_driver_v1F_07; + rpc_unloading_guest_driver_v unloading_guest_driver_v; + rpc_gpu_exec_reg_ops_v12_01 gpu_exec_reg_ops_v12_01; + rpc_gpu_exec_reg_ops_v gpu_exec_reg_ops_v; + rpc_set_page_directory_v1E_05 set_page_directory_v1E_05; + rpc_set_page_directory_v03_00 set_page_directory_v03_00; + rpc_set_page_directory_v set_page_directory_v; + rpc_unset_page_directory_v1E_05 unset_page_directory_v1E_05; + rpc_unset_page_directory_v03_00 unset_page_directory_v03_00; + rpc_unset_page_directory_v unset_page_directory_v; + rpc_get_gsp_static_info_v14_00 get_gsp_static_info_v14_00; + rpc_get_gsp_static_info_v get_gsp_static_info_v; + rpc_update_bar_pde_v15_00 update_bar_pde_v15_00; + rpc_update_bar_pde_v update_bar_pde_v; + rpc_vgpu_pf_reg_read32_v15_00 vgpu_pf_reg_read32_v15_00; + rpc_vgpu_pf_reg_read32_v vgpu_pf_reg_read32_v; + rpc_rmfs_init_v15_00 rmfs_init_v15_00; + rpc_rmfs_init_v rmfs_init_v; + rpc_rmfs_test_v15_00 rmfs_test_v15_00; + rpc_rmfs_test_v rmfs_test_v; + rpc_gsp_set_system_info_v17_00 gsp_set_system_info_v17_00; + rpc_gsp_set_system_info_v gsp_set_system_info_v; + rpc_gsp_rm_alloc_v03_00 gsp_rm_alloc_v03_00; + rpc_gsp_rm_alloc_v gsp_rm_alloc_v; + rpc_gsp_rm_control_v03_00 gsp_rm_control_v03_00; + rpc_gsp_rm_control_v gsp_rm_control_v; + rpc_dump_protobuf_component_v18_12 dump_protobuf_component_v18_12; + rpc_dump_protobuf_component_v dump_protobuf_component_v; + rpc_run_cpu_sequencer_v17_00 run_cpu_sequencer_v17_00; + rpc_run_cpu_sequencer_v run_cpu_sequencer_v; + rpc_post_event_v17_00 post_event_v17_00; + rpc_post_event_v post_event_v; + rpc_rc_triggered_v17_02 rc_triggered_v17_02; + rpc_rc_triggered_v rc_triggered_v; + rpc_os_error_log_v17_00 os_error_log_v17_00; + rpc_os_error_log_v os_error_log_v; + rpc_rg_line_intr_v17_00 rg_line_intr_v17_00; + rpc_rg_line_intr_v rg_line_intr_v; + rpc_display_modeset_v01_00 display_modeset_v01_00; + rpc_display_modeset_v display_modeset_v; + rpc_gpuacct_perfmon_util_samples_v17_00 gpuacct_perfmon_util_samples_v17_00; + rpc_gpuacct_perfmon_util_samples_v gpuacct_perfmon_util_samples_v; + rpc_vgpu_gsp_plugin_triggered_v17_00 vgpu_gsp_plugin_triggered_v17_00; + rpc_vgpu_gsp_plugin_triggered_v vgpu_gsp_plugin_triggered_v; + rpc_vgpu_config_event_v17_00 vgpu_config_event_v17_00; + rpc_vgpu_config_event_v vgpu_config_event_v; + rpc_dce_rm_init_v01_00 dce_rm_init_v01_00; + rpc_dce_rm_init_v dce_rm_init_v; + rpc_sim_read_v1E_01 sim_read_v1E_01; + rpc_sim_read_v sim_read_v; + rpc_sim_write_v1E_01 sim_write_v1E_01; + rpc_sim_write_v sim_write_v; + rpc_ucode_libos_print_v1E_08 ucode_libos_print_v1E_08; + rpc_ucode_libos_print_v ucode_libos_print_v; + rpc_init_done_v17_00 init_done_v17_00; + rpc_init_done_v init_done_v; + rpc_semaphore_schedule_callback_v17_00 semaphore_schedule_callback_v17_00; + rpc_semaphore_schedule_callback_v semaphore_schedule_callback_v; + rpc_perf_gpu_boost_sync_limits_callback_v17_00 perf_gpu_boost_sync_limits_callback_v17_00; + rpc_perf_gpu_boost_sync_limits_callback_v perf_gpu_boost_sync_limits_callback_v; + rpc_perf_bridgeless_info_update_v17_00 perf_bridgeless_info_update_v17_00; + rpc_perf_bridgeless_info_update_v perf_bridgeless_info_update_v; +} rpc_generic_union; + +#endif + +#ifdef RPC_UNION_MEMBER_NAME_FUNCTIONS_CMD +#define SDK_UNION_MEMBER_NAME_FUNCTIONS_CMD +#include "g_sdk-structures.h" +#undef SDK_UNION_MEMBER_NAME_FUNCTIONS_CMD + +#endif + + +#ifdef RPC_ARRAY_LENGTH_FUNCTIONS +#define SDK_ARRAY_LENGTH_FUNCTIONS +#include "g_sdk-structures.h" +#undef SDK_ARRAY_LENGTH_FUNCTIONS + +// Array length functions for IDLE_CHANNELS: +static NV_STATUS get_array_length_rpc_idle_channels_v03_00_channel_list(void *msg, NvS32 bytes_remaining, uint32_t* length) +{ + rpc_idle_channels_v03_00 *param = msg; + + if ((NvS32)(NV_OFFSETOF(rpc_idle_channels_v03_00, nchannels) + sizeof(param->nchannels)) > bytes_remaining) + return NV_ERR_BUFFER_TOO_SMALL; + + *length = param->nchannels; + return NV_OK; +} + +#endif + +#ifdef AUTOGENERATE_RPC_MIN_SUPPORTED_VERSION_INFORMATION +#define NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MAJOR 0x18 +#define NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MINOR 0x00 +#endif diff --git a/src/nvidia/generated/g_rpc_hal.h b/src/nvidia/generated/g_rpc_hal.h new file mode 100644 index 000000000..377fa079f --- /dev/null +++ b/src/nvidia/generated/g_rpc_hal.h @@ -0,0 +1,195 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Provides access to RPC Hal interfaces. +// +// Profile: shipping-gpus-openrm +// Haldef: rpc.def +// Template: templates/gt_eng_hal.h +// + +#ifndef _G_RPCHAL_H_ +#define _G_RPCHAL_H_ + +// +// Typedefs for RPC public object interfaces. +// + +typedef NV_STATUS RpcConstruct(POBJGPU, POBJRPC); +typedef void RpcDestroy(POBJGPU, POBJRPC); +typedef NV_STATUS RpcSendMessage(POBJGPU, POBJRPC); +typedef NV_STATUS RpcRecvPoll(POBJGPU, POBJRPC, NvU32); + + +// +// "struct" to list RPC's public interfaces, eg: pRpc->rpcInit(pGpu, pRpc) +// + +typedef struct RPC_OBJ_IFACES { + RpcConstruct *__rpcConstruct__ ; /* Construct the RPC object */ + RpcDestroy *__rpcDestroy__ ; /* Destroy the RPC object */ + RpcSendMessage *__rpcSendMessage__ ; /* Send an RPC message */ + RpcRecvPoll *__rpcRecvPoll__ ; /* Receive an RPC message */ +} RPC_OBJ_IFACES; + + + +// +// macro defines to directly access RPC's OBJ interfaces, +// eg: #define rpcReadFoo(_pGpu, _pRpc) _pRpc->obj._rpcReadFoo(_pGpu, _pRpc) +// + +#define rpcConstruct(_pGpu, _pRpc) \ + (_pRpc)->obj.__rpcConstruct__(_pGpu, _pRpc) +#define rpcDestroy(_pGpu, _pRpc) \ + (_pRpc)->obj.__rpcDestroy__(_pGpu, _pRpc) +#define rpcSendMessage(_pGpu, _pRpc) \ + (_pRpc)->obj.__rpcSendMessage__(_pGpu, _pRpc) +#define rpcRecvPoll(_pGpu, _pRpc, _arg0) \ + (_pRpc)->obj.__rpcRecvPoll__(_pGpu, _pRpc, _arg0) + + +// +// macro defines to access RPC's function pointers, +// eg: #define rpcReadFoo_FNPTR(_pRpc) _pRpc->obj.__rpcReadFoo__ +// or #define rpcReadFoo_FNPTR(_pRpc) _pRpc->__rpcReadFoo__ +// + +#define rpcSendMessage_FNPTR(_pRpc) \ + (_pRpc)->obj.__rpcSendMessage__ +#define rpcRecvPoll_FNPTR(_pRpc) \ + (_pRpc)->obj.__rpcRecvPoll__ + + +// +// Typedefs for RPC HAL interfaces. +// + +typedef NV_STATUS RpcVgpuPfRegRead32(POBJGPU, POBJRPC, NvU64, NvU32*, NvU32); +typedef NV_STATUS RpcDumpProtobufComponent(POBJGPU, POBJRPC, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, NVDUMP_COMPONENT component); +typedef NV_STATUS RpcAllocMemory(POBJGPU, POBJRPC, NvHandle, NvHandle, NvHandle, + NvU32, NvU32, MEMORY_DESCRIPTOR*); +typedef NV_STATUS RpcGpuExecRegOps(POBJGPU, POBJRPC, NvHandle, NvHandle, + NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS*, NV2080_CTRL_GPU_REG_OP*); +typedef NV_STATUS RpcRmfsInit(POBJGPU, POBJRPC, PMEMORY_DESCRIPTOR); +typedef NV_STATUS RpcUnsetPageDirectory(POBJGPU, POBJRPC, NvHandle, NvHandle, + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS*); +typedef NV_STATUS RpcGetGspStaticInfo(POBJGPU, POBJRPC); +typedef NV_STATUS RpcGspSetSystemInfo(POBJGPU, POBJRPC); +typedef NV_STATUS RpcRmfsCleanup(POBJGPU, POBJRPC); +typedef NV_STATUS RpcSetPageDirectory(POBJGPU, POBJRPC, NvHandle, NvHandle, + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS*); +typedef NV_STATUS RpcUnloadingGuestDriver(POBJGPU, POBJRPC, NvBool, NvBool, NvU32); +typedef NV_STATUS RpcSetRegistry(POBJGPU, POBJRPC); +typedef NV_STATUS RpcRmfsCloseQueue(POBJGPU, POBJRPC); +typedef NV_STATUS RpcGetStaticInfo(POBJGPU, POBJRPC); +typedef NV_STATUS RpcIdleChannels(OBJGPU *, OBJRPC *, NvHandle *phclients, + NvHandle *phdevices, NvHandle *phchannels, + NvU32 nentries, NvU32 flags, NvU32 timeout); +typedef NV_STATUS RpcUpdateBarPde(POBJGPU, POBJRPC, NV_RPC_UPDATE_PDE_BAR_TYPE, NvU64, NvU64); +typedef NV_STATUS RpcMapMemoryDma(POBJGPU, POBJRPC, NvHandle, NvHandle, NvHandle, + NvHandle, NvU64, NvU64, NvU32, NvU64*); +typedef NV_STATUS RpcUnmapMemoryDma(POBJGPU, POBJRPC, NvHandle, NvHandle, NvHandle, NvHandle, NvU32, NvU64); +typedef NV_STATUS RpcRmfsTest(POBJGPU, POBJRPC, NvU32, NvU32, NvU32, NvU32); +typedef NV_STATUS Rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *); + + +// +// struct to access RPC's hal interfaces, eg: pRpc->hal.rpcReadFoo(pGpu, pRpc) +// + +typedef struct RPC_HAL_IFACES { + RpcVgpuPfRegRead32 *rpcVgpuPfRegRead32; /* Read reg value from plugin */ + RpcDumpProtobufComponent *rpcDumpProtobufComponent; /* Dump a GSP component into the protobuf. */ + RpcAllocMemory *rpcAllocMemory; /* ALLOC_MEMORY */ + RpcGpuExecRegOps *rpcGpuExecRegOps; /* GPU_EXEC_REG_OPS */ + RpcRmfsInit *rpcRmfsInit; /* Resman File Streaming Init */ + RpcUnsetPageDirectory *rpcUnsetPageDirectory; /* UNSET_PAGE_DIRECTORY */ + RpcGetGspStaticInfo *rpcGetGspStaticInfo; /* Get static info from GSP RM. */ + RpcGspSetSystemInfo *rpcGspSetSystemInfo; /* Tells GSP-RM about the overall system environment */ + RpcRmfsCleanup *rpcRmfsCleanup; /* Resman File Cleanup */ + RpcSetPageDirectory *rpcSetPageDirectory; /* SET_PAGE_DIRECTORY */ + RpcUnloadingGuestDriver *rpcUnloadingGuestDriver; /* UNLOADING_GUEST_DRIVER */ + RpcSetRegistry *rpcSetRegistry; /* GSP Init Set registry values */ + RpcRmfsCloseQueue *rpcRmfsCloseQueue; /* Resman File Streaming Close Queue */ + RpcGetStaticInfo *rpcGetStaticInfo; /* GET_STATIC_INFO */ + RpcIdleChannels *rpcIdleChannels; /* IDLE_CHANNELS */ + RpcUpdateBarPde *rpcUpdateBarPde; /* Update the value of BAR1/BAR2 PDE */ + RpcMapMemoryDma *rpcMapMemoryDma; /* MAP_MEMORY_DMA */ + RpcUnmapMemoryDma *rpcUnmapMemoryDma; /* UNMAP_MEMORY_DMA */ + RpcRmfsTest *rpcRmfsTest; /* Resman File Streaming Test */ + Rpc_iGrp_ipVersions_getInfo *rpc_iGrp_ipVersions_getInfo; /* Return lookup table of hal interface ptrs based on IP_VERSION */ +} RPC_HAL_IFACES; + + +// +// macro defines to directly access RPC's hal interfaces, +// eg: #define rpcReadFoo_HAL(_pGpu, _pRpc) _pRpc->hal.rpcReadFoo(_pGpu, _pRpc) +// + +#define rpcVgpuPfRegRead32_HAL(_pGpu, _pRpc, _arg0, _pArg1, _arg2) \ + (_pRpc)->_hal.rpcVgpuPfRegRead32(_pGpu, _pRpc, _arg0, _pArg1, _arg2) +#define rpcDumpProtobufComponent_HAL(_pGpu, _pRpc, _pPrbEnc, _pNvDumpState, _component) \ + (_pRpc)->_hal.rpcDumpProtobufComponent(_pGpu, _pRpc, _pPrbEnc, _pNvDumpState, _component) +#define rpcAllocMemory_HAL(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3, _arg4, _pArg5) \ + (_pRpc)->_hal.rpcAllocMemory(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3, _arg4, _pArg5) +#define rpcGpuExecRegOps_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2, _pArg3) \ + (_pRpc)->_hal.rpcGpuExecRegOps(_pGpu, _pRpc, _arg0, _arg1, _pArg2, _pArg3) +#define rpcRmfsInit_HAL(_pGpu, _pRpc, _arg0) \ + (_pRpc)->_hal.rpcRmfsInit(_pGpu, _pRpc, _arg0) +#define rpcUnsetPageDirectory_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \ + (_pRpc)->_hal.rpcUnsetPageDirectory(_pGpu, _pRpc, _arg0, _arg1, _pArg2) +#define rpcGetGspStaticInfo_HAL(_pGpu, _pRpc) \ + (_pRpc)->_hal.rpcGetGspStaticInfo(_pGpu, _pRpc) +#define rpcGspSetSystemInfo_HAL(_pGpu, _pRpc) \ + (_pRpc)->_hal.rpcGspSetSystemInfo(_pGpu, _pRpc) +#define rpcRmfsCleanup_HAL(_pGpu, _pRpc) \ + (_pRpc)->_hal.rpcRmfsCleanup(_pGpu, _pRpc) +#define rpcSetPageDirectory_HAL(_pGpu, _pRpc, _arg0, _arg1, _pArg2) \ + (_pRpc)->_hal.rpcSetPageDirectory(_pGpu, _pRpc, _arg0, _arg1, _pArg2) +#define rpcUnloadingGuestDriver_HAL(_pGpu, _pRpc, _arg0, _arg1, _arg2) \ + (_pRpc)->_hal.rpcUnloadingGuestDriver(_pGpu, _pRpc, _arg0, _arg1, _arg2) +#define rpcSetRegistry_HAL(_pGpu, _pRpc) \ + (_pRpc)->_hal.rpcSetRegistry(_pGpu, _pRpc) +#define rpcRmfsCloseQueue_HAL(_pGpu, _pRpc) \ + (_pRpc)->_hal.rpcRmfsCloseQueue(_pGpu, _pRpc) +#define rpcGetStaticInfo_HAL(_pGpu, _pRpc) \ + (_pRpc)->_hal.rpcGetStaticInfo(_pGpu, _pRpc) +#define rpcIdleChannels_HAL(_pArg0, _pRpc, _pPhclients, _pPhdevices, _pPhchannels, _nentries, _flags, _timeout) \ + (_pRpc)->_hal.rpcIdleChannels(_pArg0, _pRpc, _pPhclients, _pPhdevices, _pPhchannels, _nentries, _flags, _timeout) +#define rpcUpdateBarPde_HAL(_pGpu, _pRpc, _arg0, _arg1, _arg2) \ + (_pRpc)->_hal.rpcUpdateBarPde(_pGpu, _pRpc, _arg0, _arg1, _arg2) +#define rpcMapMemoryDma_HAL(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3, _arg4, _arg5, _arg6, _pArg7) \ + (_pRpc)->_hal.rpcMapMemoryDma(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3, _arg4, _arg5, _arg6, _pArg7) +#define rpcUnmapMemoryDma_HAL(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3, _arg4, _arg5) \ + (_pRpc)->_hal.rpcUnmapMemoryDma(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3, _arg4, _arg5) +#define rpcRmfsTest_HAL(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3) \ + (_pRpc)->_hal.rpcRmfsTest(_pGpu, _pRpc, _arg0, _arg1, _arg2, _arg3) +#define rpc_iGrp_ipVersions_getInfo_HAL(_pRpc, _pArg0) \ + (_pRpc)->_hal.rpc_iGrp_ipVersions_getInfo(_pArg0) + +// +// hal function pointer defines requested by the :GEN_FNPTR_DEFINE flag +// + +#define rpc_iGrp_ipVersions_getInfo_HAL_FNPTR(_pObj) (_pObj)->_hal.rpc_iGrp_ipVersions_getInfo + +// Are there any optimized hal interfaces? +#define RPC_DIRECT_HAL_CALLS 0 + +// Are there any non-optimized hal interfaces? +#define RPC_INDIRECT_HAL_CALLS 1 + + +// +// Inline stub function definitions. +// + + + +// +// RPC PDB properties +// + + + +#endif // _G_RPCHAL_H_ diff --git a/src/nvidia/generated/g_rpc_iom.c b/src/nvidia/generated/g_rpc_iom.c new file mode 100644 index 000000000..45dfd15c4 --- /dev/null +++ b/src/nvidia/generated/g_rpc_iom.c @@ -0,0 +1,140 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_eng_iom.c +// +#include "nvstatus.h" +#include "nvport/inline/util_valist.h" +#include "nvport/nvport.h" +#include "core/core.h" +#include "nvoc/rtti.h" +#include "os/os.h" +#include "gpu/gpu.h" + +#include "objrpc.h" + +#include "g_rpc_private.h" + +// +// RPC's object-level _STUB, _MISSING, _VGPUSTUB interface routines +// (if any) +// + + + + + + + + +// +// Initialize RPC's object-level interfaces +// +void rpcObjIfacesSetup(OBJRPC *pRpc) +{ + RPC_OBJ_IFACES *pRpcMethods = &pRpc->obj; + pRpcMethods->__rpcConstruct__ = rpcConstruct_IMPL; + pRpcMethods->__rpcDestroy__ = rpcDestroy_IMPL; + pRpcMethods->__rpcSendMessage__ = rpcSendMessage_IMPL; + pRpcMethods->__rpcRecvPoll__ = rpcRecvPoll_IMPL; + // pRpcMethods->__rpcSendMessageUvm__ = rpcSendMessageUvm_IMPL; # ONLY_IF => VIRTUALIZATION + // pRpcMethods->__rpcRecvPollUvm__ = rpcRecvPollUvm_IMPL; # ONLY_IF => VIRTUALIZATION +} + +void +rpcSetPropertiesSpecial +( + POBJRPC pRpc +) +{ + +} + + + + + +// +// RPC's run-time type information +// + +extern const struct NVOC_CLASS_DEF __iom_class_def_OBJRPC; + +void __iom_dtor_OBJRPC(POBJRPC); + +const struct NVOC_RTTI __iom_rtti_OBJRPC_OBJRPC = +{ + &__iom_class_def_OBJRPC, + (NVOC_DYNAMIC_DTOR)&__iom_dtor_OBJRPC, + 0, +}; + +static const struct NVOC_CASTINFO __iom_castinfo_OBJRPC = +{ + 1, + { + &__iom_rtti_OBJRPC_OBJRPC + } +}; + +extern const NVOC_RTTI_PROVIDER __iom_rtti_provider; +const struct NVOC_CLASS_DEF __iom_class_def_OBJRPC = +{ + { + sizeof(OBJRPC), + classId(OBJRPC), + &__iom_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + "OBJRPC", +#endif + }, + NULL, + &__iom_castinfo_OBJRPC, +}; + + + + +// +// RPC's object infrastructure boilerplate +// + +// initializers, constructors, and destructors for RPC's base classes + +NV_STATUS __iom_ctor_OBJRPC(POBJRPC pRpc) +{ + RMCFG_MODULE_ENABLED_OR_ASSERT_AND_BAIL(RPC); + return NV_OK; +} + +NV_STATUS __iom_vctor_OBJRPC(Dynamic *pDynamic, va_list args) +{ + NV_STATUS status; + POBJRPC pThis = dynamicCast(pDynamic, OBJRPC); + if (pThis != NULL) + { + status = __iom_ctor_OBJRPC(pThis); + } + else + { + status = NV_ERR_INVALID_OBJECT; + } + return status; +} + +void __iom_dtor_OBJRPC(POBJRPC pRpc) +{ +} + +// RPC's object initializer function to set up vtables and RTTI +void __iom_init_OBJRPC(POBJRPC pRpc) +{ + rpcObjIfacesSetup(pRpc); + rpcSetPropertiesSpecial(pRpc); +} + + +// +// RPC's object creation routine +// + diff --git a/src/nvidia/generated/g_rpc_odb.h b/src/nvidia/generated/g_rpc_odb.h new file mode 100644 index 000000000..c8961bb8d --- /dev/null +++ b/src/nvidia/generated/g_rpc_odb.h @@ -0,0 +1,31 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Provides RPC object boilerplate and RTTI. +// +// Profile: shipping-gpus-openrm +// Haldef: rpc.def +// Template: templates/gt_eng_odb.h +// + +#ifndef _G_RPC_ODB_H_ +#define _G_RPC_ODB_H_ + +#include "core/core.h" +#include "g_rpc_hal.h" + + +#define __RPC_OBJECT_BASE_DEFINITION \ + const struct NVOC_RTTI *__nvoc_rtti; \ + RPC_OBJ_IFACES obj; \ + RPC_HAL_IFACES _hal + + +extern const struct NVOC_CLASS_DEF __iom_class_def_OBJRPC; +#define __nvoc_class_def_OBJRPC __iom_class_def_OBJRPC + +#ifndef __nvoc_class_id_OBJRPC +#define __nvoc_class_id_OBJRPC 0x1ab16a +#endif /* __nvoc_class_id_OBJRPC */ + + +#endif // _G_RPC_ODB_H_ diff --git a/src/nvidia/generated/g_rpc_private.h b/src/nvidia/generated/g_rpc_private.h new file mode 100644 index 000000000..663c9877d --- /dev/null +++ b/src/nvidia/generated/g_rpc_private.h @@ -0,0 +1,2886 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// Private HAL support for RPC. +// +// Profile: shipping-gpus-openrm +// Haldef: rpc.def +// Template: templates/gt_eng_private.h +// + +#ifndef _G_RPC_PRIVATE_H_ +#define _G_RPC_PRIVATE_H_ + +#include "g_rpc_hal.h" + + + // RPC:VGPU_PF_REG_READ32 +RpcVgpuPfRegRead32 rpcVgpuPfRegRead32_v15_00; +RpcVgpuPfRegRead32 rpcVgpuPfRegRead32_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:DUMP_PROTOBUF_COMPONENT +RpcDumpProtobufComponent rpcDumpProtobufComponent_v18_12; +RpcDumpProtobufComponent rpcDumpProtobufComponent_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:ALLOC_MEMORY +RpcAllocMemory rpcAllocMemory_v13_01; +RpcAllocMemory rpcAllocMemory_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:GPU_EXEC_REG_OPS +RpcGpuExecRegOps rpcGpuExecRegOps_v12_01; +RpcGpuExecRegOps rpcGpuExecRegOps_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:RMFS_INIT +RpcRmfsInit rpcRmfsInit_v15_00; +RpcRmfsInit rpcRmfsInit_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:UNSET_PAGE_DIRECTORY +RpcUnsetPageDirectory rpcUnsetPageDirectory_v03_00; +RpcUnsetPageDirectory rpcUnsetPageDirectory_v1E_05; +RpcUnsetPageDirectory rpcUnsetPageDirectory_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:GET_GSP_STATIC_INFO +RpcGetGspStaticInfo rpcGetGspStaticInfo_v14_00; +RpcGetGspStaticInfo rpcGetGspStaticInfo_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:GSP_SET_SYSTEM_INFO +RpcGspSetSystemInfo rpcGspSetSystemInfo_v17_00; +RpcGspSetSystemInfo rpcGspSetSystemInfo_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:RMFS_CLEANUP +RpcRmfsCleanup rpcRmfsCleanup_v15_00; +RpcRmfsCleanup rpcRmfsCleanup_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:SET_PAGE_DIRECTORY +RpcSetPageDirectory rpcSetPageDirectory_v03_00; +RpcSetPageDirectory rpcSetPageDirectory_v1E_05; +RpcSetPageDirectory rpcSetPageDirectory_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:UNLOADING_GUEST_DRIVER +RpcUnloadingGuestDriver rpcUnloadingGuestDriver_v03_00; +RpcUnloadingGuestDriver rpcUnloadingGuestDriver_v1F_07; +RpcUnloadingGuestDriver rpcUnloadingGuestDriver_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:SET_REGISTRY +RpcSetRegistry rpcSetRegistry_v17_00; +RpcSetRegistry rpcSetRegistry_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:RMFS_CLOSE_QUEUE +RpcRmfsCloseQueue rpcRmfsCloseQueue_v15_00; +RpcRmfsCloseQueue rpcRmfsCloseQueue_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:GET_STATIC_INFO +RpcGetStaticInfo rpcGetStaticInfo_v17_05; +RpcGetStaticInfo rpcGetStaticInfo_v18_03; +RpcGetStaticInfo rpcGetStaticInfo_v18_04; +RpcGetStaticInfo rpcGetStaticInfo_v18_0E; +RpcGetStaticInfo rpcGetStaticInfo_v18_10; +RpcGetStaticInfo rpcGetStaticInfo_v18_11; +RpcGetStaticInfo rpcGetStaticInfo_v18_13; +RpcGetStaticInfo rpcGetStaticInfo_v18_16; +RpcGetStaticInfo rpcGetStaticInfo_v19_00; +RpcGetStaticInfo rpcGetStaticInfo_v1A_00; +RpcGetStaticInfo rpcGetStaticInfo_v1A_05; +RpcGetStaticInfo rpcGetStaticInfo_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:IDLE_CHANNELS +RpcIdleChannels rpcIdleChannels_v03_00; +RpcIdleChannels rpcIdleChannels_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:UPDATE_BAR_PDE +RpcUpdateBarPde rpcUpdateBarPde_v15_00; +RpcUpdateBarPde rpcUpdateBarPde_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:MAP_MEMORY_DMA +RpcMapMemoryDma rpcMapMemoryDma_v03_00; +RpcMapMemoryDma rpcMapMemoryDma_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:UNMAP_MEMORY_DMA +RpcUnmapMemoryDma rpcUnmapMemoryDma_v03_00; +RpcUnmapMemoryDma rpcUnmapMemoryDma_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + // RPC:RMFS_TEST +RpcRmfsTest rpcRmfsTest_v15_00; +RpcRmfsTest rpcRmfsTest_STUB; // TU10X, GA100, GA102, GA103, GA104, GA106, GA107 + + + + + + +// +// RPC's object-level *non-static* interface functions (static ones are below) +// +RpcConstruct rpcConstruct_IMPL; +RpcDestroy rpcDestroy_IMPL; +RpcSendMessage rpcSendMessage_IMPL; +RpcRecvPoll rpcRecvPoll_IMPL; + + + +#if defined(RMCFG_ENGINE_SETUP) // for use by hal init only + + + + + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v03_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v04_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v05_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v06_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v07_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v07_07(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v08_01(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v09_08(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v09_0B(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v09_0C(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v12_01(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v13_01(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v14_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v14_01(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v14_02(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v15_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v15_02(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v17_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v17_04(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v17_05(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_01(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_02(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_03(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_04(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_05(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_06(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_07(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_08(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_09(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_0A(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_0B(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_0C(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_0D(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_0E(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_0F(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_10(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_11(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_12(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_13(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_14(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_15(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v18_16(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v19_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v19_01(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_01(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_02(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_03(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_04(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_05(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_06(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_07(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_08(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_09(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_0A(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_0B(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_0C(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_0D(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_0E(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_0F(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_10(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_12(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_13(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_14(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_15(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_16(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_17(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_18(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_1A(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_1B(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_1C(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_1D(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_1E(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_1F(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_20(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_21(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_22(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_23(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1A_24(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1B_02(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1B_04(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1C_02(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1C_04(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1C_05(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1C_07(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1C_08(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1C_09(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1C_0A(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1C_0B(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1C_0C(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1D_02(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1D_05(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1D_06(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1E_01(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1E_04(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1E_05(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1E_06(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1E_07(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1E_08(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1E_0A(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1E_0B(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1E_0D(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1E_0E(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_00(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_03(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_04(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_05(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_06(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_07(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_08(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_0A(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_0B(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_0C(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_0D(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_0E(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + +// No enabled chips use this variant provider +static void rpc_iGrp_ipVersions_Install_v1F_0F(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ +#if 0 + + POBJGPU pGpu = pInfo->pGpu; + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pGpu += 0; + pRpcHal += 0; + + +#endif // +} + + + + +// the "_UNASSIGNED" function for all IP_VERSIONS dynamic interfaces +NV_STATUS iGrp_ipVersions_UNASSIGNED(void); + + +static NV_STATUS rpc_iGrp_ipVersions_Wrapup(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ + OBJRPC *pRpc = (OBJRPC *) pInfo->pDynamic; + RPC_HAL_IFACES *pRpcHal = &pRpc->_hal; + + // avoid possible unused warnings + pRpcHal += 0; + + // fixup per-interface overrides? + if (IsIPVersionInRange(pRpc, 0x15000000, 0xFFFFFFFF)) + pRpcHal->rpcVgpuPfRegRead32 = rpcVgpuPfRegRead32_v15_00; + if (IsIPVersionInRange(pRpc, 0x18120000, 0xFFFFFFFF)) + pRpcHal->rpcDumpProtobufComponent = rpcDumpProtobufComponent_v18_12; + if (IsIPVersionInRange(pRpc, 0x13010000, 0xFFFFFFFF)) + pRpcHal->rpcAllocMemory = rpcAllocMemory_v13_01; + if (IsIPVersionInRange(pRpc, 0x12010000, 0xFFFFFFFF)) + pRpcHal->rpcGpuExecRegOps = rpcGpuExecRegOps_v12_01; + if (IsIPVersionInRange(pRpc, 0x15000000, 0xFFFFFFFF)) + pRpcHal->rpcRmfsInit = rpcRmfsInit_v15_00; + if (IsIPVersionInRange(pRpc, 0x03000000, 0x1E04FFFF)) + pRpcHal->rpcUnsetPageDirectory = rpcUnsetPageDirectory_v03_00; + if (IsIPVersionInRange(pRpc, 0x1E050000, 0xFFFFFFFF)) + pRpcHal->rpcUnsetPageDirectory = rpcUnsetPageDirectory_v1E_05; + if (IsIPVersionInRange(pRpc, 0x14000000, 0xFFFFFFFF)) + pRpcHal->rpcGetGspStaticInfo = rpcGetGspStaticInfo_v14_00; + if (IsIPVersionInRange(pRpc, 0x17000000, 0xFFFFFFFF)) + pRpcHal->rpcGspSetSystemInfo = rpcGspSetSystemInfo_v17_00; + if (IsIPVersionInRange(pRpc, 0x15000000, 0xFFFFFFFF)) + pRpcHal->rpcRmfsCleanup = rpcRmfsCleanup_v15_00; + if (IsIPVersionInRange(pRpc, 0x03000000, 0x1E04FFFF)) + pRpcHal->rpcSetPageDirectory = rpcSetPageDirectory_v03_00; + if (IsIPVersionInRange(pRpc, 0x1E050000, 0xFFFFFFFF)) + pRpcHal->rpcSetPageDirectory = rpcSetPageDirectory_v1E_05; + if (IsIPVersionInRange(pRpc, 0x03000000, 0x1F06FFFF)) + pRpcHal->rpcUnloadingGuestDriver = rpcUnloadingGuestDriver_v03_00; + if (IsIPVersionInRange(pRpc, 0x1F070000, 0xFFFFFFFF)) + pRpcHal->rpcUnloadingGuestDriver = rpcUnloadingGuestDriver_v1F_07; + if (IsIPVersionInRange(pRpc, 0x17000000, 0xFFFFFFFF)) + pRpcHal->rpcSetRegistry = rpcSetRegistry_v17_00; + if (IsIPVersionInRange(pRpc, 0x15000000, 0xFFFFFFFF)) + pRpcHal->rpcRmfsCloseQueue = rpcRmfsCloseQueue_v15_00; + if (IsIPVersionInRange(pRpc, 0x17050000, 0x1802FFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v17_05; + if (IsIPVersionInRange(pRpc, 0x18030000, 0x1803FFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v18_03; + if (IsIPVersionInRange(pRpc, 0x18040000, 0x180DFFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v18_04; + if (IsIPVersionInRange(pRpc, 0x180E0000, 0x180FFFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v18_0E; + if (IsIPVersionInRange(pRpc, 0x18100000, 0x1810FFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v18_10; + if (IsIPVersionInRange(pRpc, 0x18110000, 0x1812FFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v18_11; + if (IsIPVersionInRange(pRpc, 0x18130000, 0x1815FFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v18_13; + if (IsIPVersionInRange(pRpc, 0x18160000, 0x18FFFFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v18_16; + if (IsIPVersionInRange(pRpc, 0x19000000, 0x19FFFFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v19_00; + if (IsIPVersionInRange(pRpc, 0x1A000000, 0x1A04FFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v1A_00; + if (IsIPVersionInRange(pRpc, 0x1A050000, 0xFFFFFFFF)) + pRpcHal->rpcGetStaticInfo = rpcGetStaticInfo_v1A_05; + if (IsIPVersionInRange(pRpc, 0x03000000, 0xFFFFFFFF)) + pRpcHal->rpcIdleChannels = rpcIdleChannels_v03_00; + if (IsIPVersionInRange(pRpc, 0x15000000, 0xFFFFFFFF)) + pRpcHal->rpcUpdateBarPde = rpcUpdateBarPde_v15_00; + if (IsIPVersionInRange(pRpc, 0x03000000, 0xFFFFFFFF)) + pRpcHal->rpcMapMemoryDma = rpcMapMemoryDma_v03_00; + if (IsIPVersionInRange(pRpc, 0x03000000, 0xFFFFFFFF)) + pRpcHal->rpcUnmapMemoryDma = rpcUnmapMemoryDma_v03_00; + if (IsIPVersionInRange(pRpc, 0x15000000, 0xFFFFFFFF)) + pRpcHal->rpcRmfsTest = rpcRmfsTest_v15_00; + + // Verify each 'dynamically set' interface was actually set + +#define _RPC_HAL_VERIFY_INTERFACE(_pHalFn) \ + NV_ASSERT_OR_RETURN_PRECOMP(_pHalFn != (void *) iGrp_ipVersions_UNASSIGNED, NV_ERR_NOT_SUPPORTED) + + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcVgpuPfRegRead32); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcDumpProtobufComponent); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcAllocMemory); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcGpuExecRegOps); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcRmfsInit); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcUnsetPageDirectory); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcGetGspStaticInfo); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcGspSetSystemInfo); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcRmfsCleanup); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcSetPageDirectory); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcUnloadingGuestDriver); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcSetRegistry); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcRmfsCloseQueue); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcGetStaticInfo); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcIdleChannels); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcUpdateBarPde); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcMapMemoryDma); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcUnmapMemoryDma); + _RPC_HAL_VERIFY_INTERFACE(pRpcHal->rpcRmfsTest); + +#undef _RPC_HAL_VERIFY_INTERFACE + + return NV_OK; +} + + +static NV_STATUS rpc_iGrp_ipVersions_getInfo(IGRP_IP_VERSIONS_TABLE_INFO *pInfo) +{ + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v03_00[] = { + { 0x03000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v04_00[] = { + { 0x04000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v05_00[] = { + { 0x05000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v06_00[] = { + { 0x06000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v07_00[] = { + { 0x07000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v07_07[] = { + { 0x07070000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v08_01[] = { + { 0x08010000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v09_08[] = { + { 0x09080000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v09_0B[] = { + { 0x090B0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v09_0C[] = { + { 0x090C0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v12_01[] = { + { 0x12010000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v13_01[] = { + { 0x13010000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v14_00[] = { + { 0x14000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v14_01[] = { + { 0x14010000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v14_02[] = { + { 0x14020000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v15_00[] = { + { 0x15000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v15_02[] = { + { 0x15020000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v17_00[] = { + { 0x17000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v17_04[] = { + { 0x17040000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v17_05[] = { + { 0x17050000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_00[] = { + { 0x18000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_01[] = { + { 0x18010000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_02[] = { + { 0x18020000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_03[] = { + { 0x18030000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_04[] = { + { 0x18040000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_05[] = { + { 0x18050000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_06[] = { + { 0x18060000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_07[] = { + { 0x18070000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_08[] = { + { 0x18080000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_09[] = { + { 0x18090000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_0A[] = { + { 0x180A0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_0B[] = { + { 0x180B0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_0C[] = { + { 0x180C0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_0D[] = { + { 0x180D0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_0E[] = { + { 0x180E0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_0F[] = { + { 0x180F0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_10[] = { + { 0x18100000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_11[] = { + { 0x18110000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_12[] = { + { 0x18120000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_13[] = { + { 0x18130000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_14[] = { + { 0x18140000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_15[] = { + { 0x18150000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v18_16[] = { + { 0x18160000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v19_00[] = { + { 0x19000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v19_01[] = { + { 0x19010000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_00[] = { + { 0x1A000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_01[] = { + { 0x1A010000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_02[] = { + { 0x1A020000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_03[] = { + { 0x1A030000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_04[] = { + { 0x1A040000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_05[] = { + { 0x1A050000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_06[] = { + { 0x1A060000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_07[] = { + { 0x1A070000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_08[] = { + { 0x1A080000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_09[] = { + { 0x1A090000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_0A[] = { + { 0x1A0A0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_0B[] = { + { 0x1A0B0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_0C[] = { + { 0x1A0C0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_0D[] = { + { 0x1A0D0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_0E[] = { + { 0x1A0E0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_0F[] = { + { 0x1A0F0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_10[] = { + { 0x1A100000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_12[] = { + { 0x1A120000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_13[] = { + { 0x1A130000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_14[] = { + { 0x1A140000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_15[] = { + { 0x1A150000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_16[] = { + { 0x1A160000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_17[] = { + { 0x1A170000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_18[] = { + { 0x1A180000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_1A[] = { + { 0x1A1A0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_1B[] = { + { 0x1A1B0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_1C[] = { + { 0x1A1C0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_1D[] = { + { 0x1A1D0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_1E[] = { + { 0x1A1E0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_1F[] = { + { 0x1A1F0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_20[] = { + { 0x1A200000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_21[] = { + { 0x1A210000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_22[] = { + { 0x1A220000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_23[] = { + { 0x1A230000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1A_24[] = { + { 0x1A240000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1B_02[] = { + { 0x1B020000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1B_04[] = { + { 0x1B040000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1C_02[] = { + { 0x1C020000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1C_04[] = { + { 0x1C040000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1C_05[] = { + { 0x1C050000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1C_07[] = { + { 0x1C070000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1C_08[] = { + { 0x1C080000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1C_09[] = { + { 0x1C090000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1C_0A[] = { + { 0x1C0A0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1C_0B[] = { + { 0x1C0B0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1C_0C[] = { + { 0x1C0C0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1D_02[] = { + { 0x1D020000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1D_05[] = { + { 0x1D050000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1D_06[] = { + { 0x1D060000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_01[] = { + { 0x1E010000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_04[] = { + { 0x1E040000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_05[] = { + { 0x1E050000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_06[] = { + { 0x1E060000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_07[] = { + { 0x1E070000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_08[] = { + { 0x1E080000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_0A[] = { + { 0x1E0A0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_0B[] = { + { 0x1E0B0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_0D[] = { + { 0x1E0D0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1E_0E[] = { + { 0x1E0E0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_00[] = { + { 0x1F000000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_03[] = { + { 0x1F030000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_04[] = { + { 0x1F040000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_05[] = { + { 0x1F050000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_06[] = { + { 0x1F060000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_07[] = { + { 0x1F070000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_08[] = { + { 0x1F080000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_0A[] = { + { 0x1F0A0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_0B[] = { + { 0x1F0B0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_0C[] = { + { 0x1F0C0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_0D[] = { + { 0x1F0D0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_0E[] = { + { 0x1F0E0000, 0xFFFFFFFF, }, // + }; + static const IGRP_IP_VERSION_RANGE RPC_IGRP_IP_VERSIONS_RANGES_v1F_0F[] = { + { 0x1F0F0000, 0xFFFFFFFF, }, // + }; + +#define _RPC_HAL_IGRP_ENTRY_INIT(v) \ + { RPC_IGRP_IP_VERSIONS_RANGES_##v, NV_ARRAY_ELEMENTS(RPC_IGRP_IP_VERSIONS_RANGES_##v), rpc_iGrp_ipVersions_Install_##v, } + + static const IGRP_IP_VERSIONS_ENTRY rpc_iGrp_ipVersions_table[] = { + _RPC_HAL_IGRP_ENTRY_INIT(v03_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v04_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v05_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v06_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v07_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v07_07), // + _RPC_HAL_IGRP_ENTRY_INIT(v08_01), // + _RPC_HAL_IGRP_ENTRY_INIT(v09_08), // + _RPC_HAL_IGRP_ENTRY_INIT(v09_0B), // + _RPC_HAL_IGRP_ENTRY_INIT(v09_0C), // + _RPC_HAL_IGRP_ENTRY_INIT(v12_01), // + _RPC_HAL_IGRP_ENTRY_INIT(v13_01), // + _RPC_HAL_IGRP_ENTRY_INIT(v14_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v14_01), // + _RPC_HAL_IGRP_ENTRY_INIT(v14_02), // + _RPC_HAL_IGRP_ENTRY_INIT(v15_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v15_02), // + _RPC_HAL_IGRP_ENTRY_INIT(v17_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v17_04), // + _RPC_HAL_IGRP_ENTRY_INIT(v17_05), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_01), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_02), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_03), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_04), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_05), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_06), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_07), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_08), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_09), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_0A), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_0B), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_0C), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_0D), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_0E), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_0F), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_10), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_11), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_12), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_13), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_14), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_15), // + _RPC_HAL_IGRP_ENTRY_INIT(v18_16), // + _RPC_HAL_IGRP_ENTRY_INIT(v19_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v19_01), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_01), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_02), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_03), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_04), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_05), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_06), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_07), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_08), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_09), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_0A), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_0B), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_0C), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_0D), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_0E), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_0F), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_10), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_12), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_13), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_14), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_15), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_16), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_17), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_18), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_1A), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_1B), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_1C), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_1D), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_1E), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_1F), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_20), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_21), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_22), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_23), // + _RPC_HAL_IGRP_ENTRY_INIT(v1A_24), // + _RPC_HAL_IGRP_ENTRY_INIT(v1B_02), // + _RPC_HAL_IGRP_ENTRY_INIT(v1B_04), // + _RPC_HAL_IGRP_ENTRY_INIT(v1C_02), // + _RPC_HAL_IGRP_ENTRY_INIT(v1C_04), // + _RPC_HAL_IGRP_ENTRY_INIT(v1C_05), // + _RPC_HAL_IGRP_ENTRY_INIT(v1C_07), // + _RPC_HAL_IGRP_ENTRY_INIT(v1C_08), // + _RPC_HAL_IGRP_ENTRY_INIT(v1C_09), // + _RPC_HAL_IGRP_ENTRY_INIT(v1C_0A), // + _RPC_HAL_IGRP_ENTRY_INIT(v1C_0B), // + _RPC_HAL_IGRP_ENTRY_INIT(v1C_0C), // + _RPC_HAL_IGRP_ENTRY_INIT(v1D_02), // + _RPC_HAL_IGRP_ENTRY_INIT(v1D_05), // + _RPC_HAL_IGRP_ENTRY_INIT(v1D_06), // + _RPC_HAL_IGRP_ENTRY_INIT(v1E_01), // + _RPC_HAL_IGRP_ENTRY_INIT(v1E_04), // + _RPC_HAL_IGRP_ENTRY_INIT(v1E_05), // + _RPC_HAL_IGRP_ENTRY_INIT(v1E_06), // + _RPC_HAL_IGRP_ENTRY_INIT(v1E_07), // + _RPC_HAL_IGRP_ENTRY_INIT(v1E_08), // + _RPC_HAL_IGRP_ENTRY_INIT(v1E_0A), // + _RPC_HAL_IGRP_ENTRY_INIT(v1E_0B), // + _RPC_HAL_IGRP_ENTRY_INIT(v1E_0D), // + _RPC_HAL_IGRP_ENTRY_INIT(v1E_0E), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_00), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_03), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_04), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_05), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_06), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_07), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_08), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_0A), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_0B), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_0C), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_0D), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_0E), // + _RPC_HAL_IGRP_ENTRY_INIT(v1F_0F), // + }; + +#undef _RPC_HAL_IGRP_ENTRY_INIT + + pInfo->pTable = rpc_iGrp_ipVersions_table; + pInfo->numEntries = NV_ARRAY_ELEMENTS(rpc_iGrp_ipVersions_table); + pInfo->ifacesWrapupFn = rpc_iGrp_ipVersions_Wrapup; + + return NV_OK; +} + + +// +// Setup RPC's hal interface function pointers +// + +#if defined(RMCFG_HAL_SETUP_TU102) + +static void rpcHalIfacesSetup_TU102(RPC_HAL_IFACES *pRpcHal) +{ + + // TU102's RPC hal interface function pointer block + static const RPC_HAL_IFACES rpcHalIfacesInitStruct_TU102 = + { + rpcVgpuPfRegRead32_STUB, // rpcVgpuPfRegRead32 + rpcDumpProtobufComponent_STUB, // rpcDumpProtobufComponent + rpcAllocMemory_STUB, // rpcAllocMemory + rpcGpuExecRegOps_STUB, // rpcGpuExecRegOps + rpcRmfsInit_STUB, // rpcRmfsInit + rpcUnsetPageDirectory_STUB, // rpcUnsetPageDirectory + rpcGetGspStaticInfo_STUB, // rpcGetGspStaticInfo + rpcGspSetSystemInfo_STUB, // rpcGspSetSystemInfo + rpcRmfsCleanup_STUB, // rpcRmfsCleanup + rpcSetPageDirectory_STUB, // rpcSetPageDirectory + rpcUnloadingGuestDriver_STUB, // rpcUnloadingGuestDriver + rpcSetRegistry_STUB, // rpcSetRegistry + rpcRmfsCloseQueue_STUB, // rpcRmfsCloseQueue + rpcGetStaticInfo_STUB, // rpcGetStaticInfo + rpcIdleChannels_STUB, // rpcIdleChannels + rpcUpdateBarPde_STUB, // rpcUpdateBarPde + rpcMapMemoryDma_STUB, // rpcMapMemoryDma + rpcUnmapMemoryDma_STUB, // rpcUnmapMemoryDma + rpcRmfsTest_STUB, // rpcRmfsTest + rpc_iGrp_ipVersions_getInfo, // rpc_iGrp_ipVersions_getInfo + + }; // rpcHalIfacesInitStruct_TU102 + + // init TU102's RPC function ptrs using the init struct above + *pRpcHal = rpcHalIfacesInitStruct_TU102; +} + +#endif // TU10X or TU102 + +#if defined(RMCFG_HAL_SETUP_TU104) + +static void rpcHalIfacesSetup_TU104(RPC_HAL_IFACES *pRpcHal) +{ + rpcHalIfacesSetup_TU102(pRpcHal); // TU104 interfaces identical to TU102 +} + +#endif // TU10X or TU104 + +#if defined(RMCFG_HAL_SETUP_TU106) + +static void rpcHalIfacesSetup_TU106(RPC_HAL_IFACES *pRpcHal) +{ + rpcHalIfacesSetup_TU102(pRpcHal); // TU106 interfaces identical to TU102 +} + +#endif // TU10X or TU106 + +#if defined(RMCFG_HAL_SETUP_TU116) + +static void rpcHalIfacesSetup_TU116(RPC_HAL_IFACES *pRpcHal) +{ + rpcHalIfacesSetup_TU102(pRpcHal); // TU116 interfaces identical to TU102 +} + +#endif // TU10X or TU116 + +#if defined(RMCFG_HAL_SETUP_TU117) + +static void rpcHalIfacesSetup_TU117(RPC_HAL_IFACES *pRpcHal) +{ + rpcHalIfacesSetup_TU102(pRpcHal); // TU117 interfaces identical to TU102 +} + +#endif // TU10X or TU117 + +#if defined(RMCFG_HAL_SETUP_GA100) + +static void rpcHalIfacesSetup_GA100(RPC_HAL_IFACES *pRpcHal) +{ + + // GA100's RPC hal interface function pointer block + static const RPC_HAL_IFACES rpcHalIfacesInitStruct_GA100 = + { + rpcVgpuPfRegRead32_STUB, // rpcVgpuPfRegRead32 + rpcDumpProtobufComponent_STUB, // rpcDumpProtobufComponent + rpcAllocMemory_STUB, // rpcAllocMemory + rpcGpuExecRegOps_STUB, // rpcGpuExecRegOps + rpcRmfsInit_STUB, // rpcRmfsInit + rpcUnsetPageDirectory_STUB, // rpcUnsetPageDirectory + rpcGetGspStaticInfo_STUB, // rpcGetGspStaticInfo + rpcGspSetSystemInfo_STUB, // rpcGspSetSystemInfo + rpcRmfsCleanup_STUB, // rpcRmfsCleanup + rpcSetPageDirectory_STUB, // rpcSetPageDirectory + rpcUnloadingGuestDriver_STUB, // rpcUnloadingGuestDriver + rpcSetRegistry_STUB, // rpcSetRegistry + rpcRmfsCloseQueue_STUB, // rpcRmfsCloseQueue + rpcGetStaticInfo_STUB, // rpcGetStaticInfo + rpcIdleChannels_STUB, // rpcIdleChannels + rpcUpdateBarPde_STUB, // rpcUpdateBarPde + rpcMapMemoryDma_STUB, // rpcMapMemoryDma + rpcUnmapMemoryDma_STUB, // rpcUnmapMemoryDma + rpcRmfsTest_STUB, // rpcRmfsTest + rpc_iGrp_ipVersions_getInfo, // rpc_iGrp_ipVersions_getInfo + + }; // rpcHalIfacesInitStruct_GA100 + + // init GA100's RPC function ptrs using the init struct above + *pRpcHal = rpcHalIfacesInitStruct_GA100; +} + +#endif // GA10X or GA100 + +#if defined(RMCFG_HAL_SETUP_GA102) + +static void rpcHalIfacesSetup_GA102(RPC_HAL_IFACES *pRpcHal) +{ + rpcHalIfacesSetup_GA100(pRpcHal); // GA102 interfaces almost identical to GA100 + +} + +#endif // GA10X or GA102 + +#if defined(RMCFG_HAL_SETUP_GA103) + +static void rpcHalIfacesSetup_GA103(RPC_HAL_IFACES *pRpcHal) +{ + rpcHalIfacesSetup_GA102(pRpcHal); // GA103 interfaces identical to GA102 +} + +#endif // GA10X or GA103 + +#if defined(RMCFG_HAL_SETUP_GA104) + +static void rpcHalIfacesSetup_GA104(RPC_HAL_IFACES *pRpcHal) +{ + rpcHalIfacesSetup_GA102(pRpcHal); // GA104 interfaces identical to GA102 +} + +#endif // GA10X or GA104 + +#if defined(RMCFG_HAL_SETUP_GA106) + +static void rpcHalIfacesSetup_GA106(RPC_HAL_IFACES *pRpcHal) +{ + rpcHalIfacesSetup_GA102(pRpcHal); // GA106 interfaces identical to GA102 +} + +#endif // GA10X or GA106 + +#if defined(RMCFG_HAL_SETUP_GA107) + +static void rpcHalIfacesSetup_GA107(RPC_HAL_IFACES *pRpcHal) +{ + rpcHalIfacesSetup_GA102(pRpcHal); // GA107 interfaces identical to GA102 +} + +#endif // GA10X or GA107 + + + + + +#endif // RMCFG_ENGINE_SETUP + + + +// Were any _MOCK interfaces generated into g_rpc_private.h ? +#define RPC_MOCK_FUNCTIONS_GENERATED 0 + + +#endif // _G_RPC_PRIVATE_H_ diff --git a/src/nvidia/generated/g_rs_client_nvoc.c b/src/nvidia/generated/g_rs_client_nvoc.c new file mode 100644 index 000000000..7dffc9329 --- /dev/null +++ b/src/nvidia/generated/g_rs_client_nvoc.c @@ -0,0 +1,421 @@ +#define NVOC_RS_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_rs_client_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x8f87e5 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_RsClient(RsClient*); +void __nvoc_init_funcTable_RsClient(RsClient*); +NV_STATUS __nvoc_ctor_RsClient(RsClient*, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RsClient(RsClient*); +void __nvoc_dtor_RsClient(RsClient*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RsClient; + +static const struct NVOC_RTTI __nvoc_rtti_RsClient_RsClient = { + /*pClassDef=*/ &__nvoc_class_def_RsClient, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsClient, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsClient_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsClient, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RsClient = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_RsClient_RsClient, + &__nvoc_rtti_RsClient_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsClient), + /*classId=*/ classId(RsClient), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsClient", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsClient, + /*pCastInfo=*/ &__nvoc_castinfo_RsClient, + /*pExportInfo=*/ &__nvoc_export_info_RsClient +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RsClient = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RsClient(RsClient *pThis) { + __nvoc_clientDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsClient(RsClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RsClient(RsClient *pThis, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_RsClient_fail_Object; + __nvoc_init_dataField_RsClient(pThis); + + status = __nvoc_clientConstruct(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsClient_fail__init; + goto __nvoc_ctor_RsClient_exit; // Success + +__nvoc_ctor_RsClient_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_RsClient_fail_Object: +__nvoc_ctor_RsClient_exit: + + return status; +} + +static void __nvoc_init_funcTable_RsClient_1(RsClient *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__clientValidate__ = &clientValidate_IMPL; + + pThis->__clientFreeResource__ = &clientFreeResource_IMPL; + + pThis->__clientDestructResourceRef__ = &clientDestructResourceRef_IMPL; + + pThis->__clientUnmapMemory__ = &clientUnmapMemory_IMPL; + + pThis->__clientInterMap__ = &clientInterMap_IMPL; + + pThis->__clientInterUnmap__ = &clientInterUnmap_IMPL; + + pThis->__clientValidateNewResourceHandle__ = &clientValidateNewResourceHandle_IMPL; + + pThis->__clientPostProcessPendingFreeList__ = &clientPostProcessPendingFreeList_IMPL; + + pThis->__clientShareResource__ = &clientShareResource_IMPL; +} + +void __nvoc_init_funcTable_RsClient(RsClient *pThis) { + __nvoc_init_funcTable_RsClient_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_RsClient(RsClient *pThis) { + pThis->__nvoc_pbase_RsClient = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_RsClient(pThis); +} + +NV_STATUS __nvoc_objCreate_RsClient(RsClient **ppThis, Dynamic *pParent, NvU32 createFlags, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RsClient *pThis; + + pThis = portMemAllocNonPaged(sizeof(RsClient)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RsClient)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RsClient); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RsClient(pThis); + status = __nvoc_ctor_RsClient(pThis, arg_pAllocator, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RsClient_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RsClient_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsClient(RsClient **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct PORT_MEM_ALLOCATOR * arg_pAllocator = va_arg(args, struct PORT_MEM_ALLOCATOR *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RsClient(ppThis, pParent, createFlags, arg_pAllocator, arg_pParams); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x083442 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +void __nvoc_init_RsClientResource(RsClientResource*); +void __nvoc_init_funcTable_RsClientResource(RsClientResource*); +NV_STATUS __nvoc_ctor_RsClientResource(RsClientResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RsClientResource(RsClientResource*); +void __nvoc_dtor_RsClientResource(RsClientResource*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RsClientResource; + +static const struct NVOC_RTTI __nvoc_rtti_RsClientResource_RsClientResource = { + /*pClassDef=*/ &__nvoc_class_def_RsClientResource, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsClientResource, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsClientResource_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsClientResource_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsClientResource, __nvoc_base_RsResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RsClientResource = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_RsClientResource_RsClientResource, + &__nvoc_rtti_RsClientResource_RsResource, + &__nvoc_rtti_RsClientResource_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsClientResource), + /*classId=*/ classId(RsClientResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsClientResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsClientResource, + /*pCastInfo=*/ &__nvoc_castinfo_RsClientResource, + /*pExportInfo=*/ &__nvoc_export_info_RsClientResource +}; + +static NvBool __nvoc_thunk_RsResource_clientresShareCallback(struct RsClientResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return resShareCallback((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresControl(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresUnmap(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresMapTo(struct RsClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pParams); +} + +static NvU32 __nvoc_thunk_RsResource_clientresGetRefCount(struct RsClientResource *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresControlFilter(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_clientresAddAdditionalDependants(struct RsClient *pClient, struct RsClientResource *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pReference); +} + +static NvBool __nvoc_thunk_RsResource_clientresCanCopy(struct RsClientResource *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresControl_Prologue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl_Prologue((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_clientresPreDestruct(struct RsClientResource *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresUnmapFrom(struct RsClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_clientresControl_Epilogue(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + resControl_Epilogue((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresControlLookup(struct RsClientResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_clientresMap(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RsResource_clientresAccessCallback(struct RsClientResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return resAccessCallback((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_RsClientResource_RsResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RsClientResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsResource(RsResource*); +void __nvoc_dtor_RsClientResource(RsClientResource *pThis) { + __nvoc_clientresDestruct(pThis); + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsClientResource(RsClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsResource(RsResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_RsClientResource(RsClientResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsResource(&pThis->__nvoc_base_RsResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsClientResource_fail_RsResource; + __nvoc_init_dataField_RsClientResource(pThis); + + status = __nvoc_clientresConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsClientResource_fail__init; + goto __nvoc_ctor_RsClientResource_exit; // Success + +__nvoc_ctor_RsClientResource_fail__init: + __nvoc_dtor_RsResource(&pThis->__nvoc_base_RsResource); +__nvoc_ctor_RsClientResource_fail_RsResource: +__nvoc_ctor_RsClientResource_exit: + + return status; +} + +static void __nvoc_init_funcTable_RsClientResource_1(RsClientResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__clientresShareCallback__ = &__nvoc_thunk_RsResource_clientresShareCallback; + + pThis->__clientresControl__ = &__nvoc_thunk_RsResource_clientresControl; + + pThis->__clientresUnmap__ = &__nvoc_thunk_RsResource_clientresUnmap; + + pThis->__clientresMapTo__ = &__nvoc_thunk_RsResource_clientresMapTo; + + pThis->__clientresGetRefCount__ = &__nvoc_thunk_RsResource_clientresGetRefCount; + + pThis->__clientresControlFilter__ = &__nvoc_thunk_RsResource_clientresControlFilter; + + pThis->__clientresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_clientresAddAdditionalDependants; + + pThis->__clientresCanCopy__ = &__nvoc_thunk_RsResource_clientresCanCopy; + + pThis->__clientresControl_Prologue__ = &__nvoc_thunk_RsResource_clientresControl_Prologue; + + pThis->__clientresPreDestruct__ = &__nvoc_thunk_RsResource_clientresPreDestruct; + + pThis->__clientresUnmapFrom__ = &__nvoc_thunk_RsResource_clientresUnmapFrom; + + pThis->__clientresControl_Epilogue__ = &__nvoc_thunk_RsResource_clientresControl_Epilogue; + + pThis->__clientresControlLookup__ = &__nvoc_thunk_RsResource_clientresControlLookup; + + pThis->__clientresMap__ = &__nvoc_thunk_RsResource_clientresMap; + + pThis->__clientresAccessCallback__ = &__nvoc_thunk_RsResource_clientresAccessCallback; +} + +void __nvoc_init_funcTable_RsClientResource(RsClientResource *pThis) { + __nvoc_init_funcTable_RsClientResource_1(pThis); +} + +void __nvoc_init_RsResource(RsResource*); +void __nvoc_init_RsClientResource(RsClientResource *pThis) { + pThis->__nvoc_pbase_RsClientResource = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RsResource; + __nvoc_init_RsResource(&pThis->__nvoc_base_RsResource); + __nvoc_init_funcTable_RsClientResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RsClientResource(RsClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RsClientResource *pThis; + + pThis = portMemAllocNonPaged(sizeof(RsClientResource)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RsClientResource)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RsClientResource); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RsClientResource(pThis); + status = __nvoc_ctor_RsClientResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RsClientResource_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RsClientResource_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsClientResource(RsClientResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RsClientResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_rs_client_nvoc.h b/src/nvidia/generated/g_rs_client_nvoc.h new file mode 100644 index 000000000..c5cd0925d --- /dev/null +++ b/src/nvidia/generated/g_rs_client_nvoc.h @@ -0,0 +1,601 @@ +#ifndef _G_RS_CLIENT_NVOC_H_ +#define _G_RS_CLIENT_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_client_nvoc.h" + +#ifndef _RS_CLIENT_H_ +#define _RS_CLIENT_H_ + + +#include "resserv/resserv.h" +#include "nvport/nvport.h" +#include "resserv/rs_resource.h" +#include "containers/list.h" +#include "utils/nvrange.h" + +#define RS_UNIQUE_HANDLE_BASE (0xcaf00000) +#define RS_UNIQUE_HANDLE_RANGE (0x00080000) + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsClient + * @addtogroup RsClient + * @{*/ + +typedef enum { + CLIENT_TYPE_USER, + CLIENT_TYPE_KERNEL +} CLIENT_TYPE; + +typedef struct AccessBackRef +{ + NvHandle hClient; + NvHandle hResource; +} AccessBackRef; + +MAKE_LIST(AccessBackRefList, AccessBackRef); + +/** + * Information about a client + */ +#ifdef NVOC_RS_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RsClient { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct RsClient *__nvoc_pbase_RsClient; + NV_STATUS (*__clientValidate__)(struct RsClient *, const API_SECURITY_INFO *); + NV_STATUS (*__clientFreeResource__)(struct RsClient *, RsServer *, struct RS_RES_FREE_PARAMS_INTERNAL *); + NV_STATUS (*__clientDestructResourceRef__)(struct RsClient *, RsServer *, struct RsResourceRef *); + NV_STATUS (*__clientUnmapMemory__)(struct RsClient *, struct RsResourceRef *, struct RS_LOCK_INFO *, struct RsCpuMapping **, API_SECURITY_INFO *); + NV_STATUS (*__clientInterMap__)(struct RsClient *, struct RsResourceRef *, struct RsResourceRef *, struct RS_INTER_MAP_PARAMS *); + void (*__clientInterUnmap__)(struct RsClient *, struct RsResourceRef *, struct RS_INTER_UNMAP_PARAMS *); + NV_STATUS (*__clientValidateNewResourceHandle__)(struct RsClient *, NvHandle, NvBool); + NV_STATUS (*__clientPostProcessPendingFreeList__)(struct RsClient *, struct RsResourceRef **); + NV_STATUS (*__clientShareResource__)(struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *, struct CALL_CONTEXT *); + NvHandle hClient; + CLIENT_TYPE type; + NvBool bActive; + NvBool bResourceWarning; + RsRefMap resourceMap; + AccessBackRefList accessBackRefList; + NvHandle handleRangeStart; + NvHandle handleRangeSize; + struct NV_RANGE handleRestrictRange; + NvHandle handleGenIdx; + RsRefFreeList pendingFreeList; + struct RS_FREE_STACK *pFreeStack; +}; + +#ifndef __NVOC_CLASS_RsClient_TYPEDEF__ +#define __NVOC_CLASS_RsClient_TYPEDEF__ +typedef struct RsClient RsClient; +#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClient +#define __nvoc_class_id_RsClient 0x8f87e5 +#endif /* __nvoc_class_id_RsClient */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient; + +#define __staticCast_RsClient(pThis) \ + ((pThis)->__nvoc_pbase_RsClient) + +#ifdef __nvoc_rs_client_h_disabled +#define __dynamicCast_RsClient(pThis) ((RsClient*)NULL) +#else //__nvoc_rs_client_h_disabled +#define __dynamicCast_RsClient(pThis) \ + ((RsClient*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsClient))) +#endif //__nvoc_rs_client_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RsClient(RsClient**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsClient(RsClient**, Dynamic*, NvU32, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RsClient(ppNewObj, pParent, createFlags, arg_pAllocator, arg_pParams) \ + __nvoc_objCreate_RsClient((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pAllocator, arg_pParams) + +#define clientValidate(pClient, pSecInfo) clientValidate_DISPATCH(pClient, pSecInfo) +#define clientFreeResource(pClient, pServer, pParams) clientFreeResource_DISPATCH(pClient, pServer, pParams) +#define clientDestructResourceRef(pClient, pServer, pResourceRef) clientDestructResourceRef_DISPATCH(pClient, pServer, pResourceRef) +#define clientUnmapMemory(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) clientUnmapMemory_DISPATCH(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo) +#define clientInterMap(pClient, pMapperRef, pMappableRef, pParams) clientInterMap_DISPATCH(pClient, pMapperRef, pMappableRef, pParams) +#define clientInterUnmap(pClient, pMapperRef, pParams) clientInterUnmap_DISPATCH(pClient, pMapperRef, pParams) +#define clientValidateNewResourceHandle(pClient, hResource, bRestrict) clientValidateNewResourceHandle_DISPATCH(pClient, hResource, bRestrict) +#define clientPostProcessPendingFreeList(pClient, ppFirstLowPriRef) clientPostProcessPendingFreeList_DISPATCH(pClient, ppFirstLowPriRef) +#define clientShareResource(pClient, pResourceRef, pSharePolicy, pCallContext) clientShareResource_DISPATCH(pClient, pResourceRef, pSharePolicy, pCallContext) +NV_STATUS clientValidate_IMPL(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo); + +static inline NV_STATUS clientValidate_DISPATCH(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo) { + return pClient->__clientValidate__(pClient, pSecInfo); +} + +NV_STATUS clientFreeResource_IMPL(struct RsClient *pClient, RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS clientFreeResource_DISPATCH(struct RsClient *pClient, RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + return pClient->__clientFreeResource__(pClient, pServer, pParams); +} + +NV_STATUS clientDestructResourceRef_IMPL(struct RsClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef); + +static inline NV_STATUS clientDestructResourceRef_DISPATCH(struct RsClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef) { + return pClient->__clientDestructResourceRef__(pClient, pServer, pResourceRef); +} + +NV_STATUS clientUnmapMemory_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo); + +static inline NV_STATUS clientUnmapMemory_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) { + return pClient->__clientUnmapMemory__(pClient, pResourceRef, pLockInfo, ppCpuMapping, pSecInfo); +} + +NV_STATUS clientInterMap_IMPL(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams); + +static inline NV_STATUS clientInterMap_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) { + return pClient->__clientInterMap__(pClient, pMapperRef, pMappableRef, pParams); +} + +void clientInterUnmap_IMPL(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams); + +static inline void clientInterUnmap_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) { + pClient->__clientInterUnmap__(pClient, pMapperRef, pParams); +} + +NV_STATUS clientValidateNewResourceHandle_IMPL(struct RsClient *pClient, NvHandle hResource, NvBool bRestrict); + +static inline NV_STATUS clientValidateNewResourceHandle_DISPATCH(struct RsClient *pClient, NvHandle hResource, NvBool bRestrict) { + return pClient->__clientValidateNewResourceHandle__(pClient, hResource, bRestrict); +} + +NV_STATUS clientPostProcessPendingFreeList_IMPL(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef); + +static inline NV_STATUS clientPostProcessPendingFreeList_DISPATCH(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef) { + return pClient->__clientPostProcessPendingFreeList__(pClient, ppFirstLowPriRef); +} + +NV_STATUS clientShareResource_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext); + +static inline NV_STATUS clientShareResource_DISPATCH(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + return pClient->__clientShareResource__(pClient, pResourceRef, pSharePolicy, pCallContext); +} + +NV_STATUS clientConstruct_IMPL(struct RsClient *arg_pClient, struct PORT_MEM_ALLOCATOR *arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_clientConstruct(arg_pClient, arg_pAllocator, arg_pParams) clientConstruct_IMPL(arg_pClient, arg_pAllocator, arg_pParams) +void clientDestruct_IMPL(struct RsClient *pClient); +#define __nvoc_clientDestruct(pClient) clientDestruct_IMPL(pClient) +NV_STATUS clientGetResourceByRef_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RsResource **ppResource); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceByRef(struct RsClient *pClient, struct RsResourceRef *pResourceRef, struct RsResource **ppResource) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceByRef(pClient, pResourceRef, ppResource) clientGetResourceByRef_IMPL(pClient, pResourceRef, ppResource) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResource_IMPL(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResource **ppResource); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResource(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResource **ppResource) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResource(pClient, hResource, internalClassId, ppResource) clientGetResource_IMPL(pClient, hResource, internalClassId, ppResource) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResourceRef_IMPL(struct RsClient *pClient, NvHandle hResource, struct RsResourceRef **ppResourceRef); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceRef(struct RsClient *pClient, NvHandle hResource, struct RsResourceRef **ppResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceRef(pClient, hResource, ppResourceRef) clientGetResourceRef_IMPL(pClient, hResource, ppResourceRef) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResourceRefWithAccess_IMPL(struct RsClient *pClient, NvHandle hResource, const RS_ACCESS_MASK *pRightsRequired, struct RsResourceRef **ppResourceRef); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceRefWithAccess(struct RsClient *pClient, NvHandle hResource, const RS_ACCESS_MASK *pRightsRequired, struct RsResourceRef **ppResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceRefWithAccess(pClient, hResource, pRightsRequired, ppResourceRef) clientGetResourceRefWithAccess_IMPL(pClient, hResource, pRightsRequired, ppResourceRef) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGetResourceRefByType_IMPL(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResourceRef **ppResourceRef); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGetResourceRefByType(struct RsClient *pClient, NvHandle hResource, NvU32 internalClassId, struct RsResourceRef **ppResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGetResourceRefByType(pClient, hResource, internalClassId, ppResourceRef) clientGetResourceRefByType_IMPL(pClient, hResource, internalClassId, ppResourceRef) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientAllocResource_IMPL(struct RsClient *pClient, RsServer *pServer, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientAllocResource(struct RsClient *pClient, RsServer *pServer, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientAllocResource(pClient, pServer, pParams) clientAllocResource_IMPL(pClient, pServer, pParams) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientCopyResource_IMPL(struct RsClient *pClient, RsServer *pServer, struct RS_RES_DUP_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientCopyResource(struct RsClient *pClient, RsServer *pServer, struct RS_RES_DUP_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientCopyResource(pClient, pServer, pParams) clientCopyResource_IMPL(pClient, pServer, pParams) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientGenResourceHandle_IMPL(struct RsClient *pClient, NvHandle *pHandle); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientGenResourceHandle(struct RsClient *pClient, NvHandle *pHandle) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientGenResourceHandle(pClient, pHandle) clientGenResourceHandle_IMPL(pClient, pHandle) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientAssignResourceHandle_IMPL(struct RsClient *pClient, NvHandle *phResource); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientAssignResourceHandle(struct RsClient *pClient, NvHandle *phResource) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientAssignResourceHandle(pClient, phResource) clientAssignResourceHandle_IMPL(pClient, phResource) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientUpdatePendingFreeList_IMPL(struct RsClient *pClient, struct RsResourceRef *pTarget, struct RsResourceRef *pReference, NvBool bMove); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientUpdatePendingFreeList(struct RsClient *pClient, struct RsResourceRef *pTarget, struct RsResourceRef *pReference, NvBool bMove) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientUpdatePendingFreeList(pClient, pTarget, pReference, bMove) clientUpdatePendingFreeList_IMPL(pClient, pTarget, pReference, bMove) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientAddAccessBackRef_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientAddAccessBackRef(struct RsClient *pClient, struct RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientAddAccessBackRef(pClient, pResourceRef) clientAddAccessBackRef_IMPL(pClient, pResourceRef) +#endif //__nvoc_rs_client_h_disabled + +void clientFreeAccessBackRefs_IMPL(struct RsClient *pClient, RsServer *pServer); +#ifdef __nvoc_rs_client_h_disabled +static inline void clientFreeAccessBackRefs(struct RsClient *pClient, RsServer *pServer) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); +} +#else //__nvoc_rs_client_h_disabled +#define clientFreeAccessBackRefs(pClient, pServer) clientFreeAccessBackRefs_IMPL(pClient, pServer) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientSetHandleGenerator_IMPL(struct RsClient *pClient, NvHandle handleRangeStart, NvHandle handleRangeSize); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientSetHandleGenerator(struct RsClient *pClient, NvHandle handleRangeStart, NvHandle handleRangeSize) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientSetHandleGenerator(pClient, handleRangeStart, handleRangeSize) clientSetHandleGenerator_IMPL(pClient, handleRangeStart, handleRangeSize) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientCanShareResource_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientCanShareResource(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientCanShareResource(pClient, pResourceRef, pSharePolicy, pCallContext) clientCanShareResource_IMPL(pClient, pResourceRef, pSharePolicy, pCallContext) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientShareResourceTargetClient_IMPL(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientShareResourceTargetClient(struct RsClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientShareResourceTargetClient(pClient, pResourceRef, pSharePolicy, pCallContext) clientShareResourceTargetClient_IMPL(pClient, pResourceRef, pSharePolicy, pCallContext) +#endif //__nvoc_rs_client_h_disabled + +NV_STATUS clientSetRestrictedRange_IMPL(struct RsClient *pClient, NvHandle handleRangeStart, NvU32 handleRangeSize); +#ifdef __nvoc_rs_client_h_disabled +static inline NV_STATUS clientSetRestrictedRange(struct RsClient *pClient, NvHandle handleRangeStart, NvU32 handleRangeSize) { + NV_ASSERT_FAILED_PRECOMP("RsClient was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_client_h_disabled +#define clientSetRestrictedRange(pClient, handleRangeStart, handleRangeSize) clientSetRestrictedRange_IMPL(pClient, handleRangeStart, handleRangeSize) +#endif //__nvoc_rs_client_h_disabled + +#undef PRIVATE_FIELD + + +/** + * Get an iterator to the elements in the client's resource map + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] type RS_ITERATE_CHILDREN, RS_ITERATE_DESCENDANTS, RS_ITERATE_CACHED, RS_ITERATE_DEPENDANTS + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * + * @note If type=RS_ITERATE_CHILDREN, pScopeRef will restrict iteration to children of the scope ref + * @note If type=RS_ITERATE_DESCENDANTS, pScopeRef will restrict iteration to descendants of the scope ref + * @note If type=RS_ITERATE_CACHED, pScopeRef will restrict iteration to references cached by the scope ref + */ +RS_ITERATOR clientRefIter(struct RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, RS_ITER_TYPE type, NvBool bExactMatch); + +/** + * Get the next iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefIterNext(struct RsClient *pClient, RS_ITERATOR *pIt); + +/** + * Get an iterator to the elements in the client's resource map. + * + * This iterator will visit all descendants in pre-order according to the parent-child + * resource hierarchy. + * + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + */ +RS_ORDERED_ITERATOR clientRefOrderedIter(struct RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, NvBool bExactMatch); + +/** + * Get the next ordered iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefOrderedIterNext(struct RsClient *pClient, RS_ORDERED_ITERATOR *pIt); + + +/** + * RsResource interface to a RsClient + * + * This allows clients to be interfaced with as-if they were resources (e.g., + * to perform a control call on a client). + * + * An RsClientResource is automatically allocated under a client as a top-level + * object when that client is allocated and cannot be explicitly freed. Only + * one RsClientResource is permitted per-client. + * + * Any resource allocated under a client will be a descendant of the client + * proxy resource. + * + */ +#ifdef NVOC_RS_CLIENT_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RsClientResource { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsResource __nvoc_base_RsResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RsClientResource *__nvoc_pbase_RsClientResource; + NvBool (*__clientresShareCallback__)(struct RsClientResource *, struct RsClient *, RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__clientresControl__)(struct RsClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__clientresUnmap__)(struct RsClientResource *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__clientresMapTo__)(struct RsClientResource *, RS_RES_MAP_TO_PARAMS *); + NvU32 (*__clientresGetRefCount__)(struct RsClientResource *); + NV_STATUS (*__clientresControlFilter__)(struct RsClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__clientresAddAdditionalDependants__)(struct RsClient *, struct RsClientResource *, RsResourceRef *); + NvBool (*__clientresCanCopy__)(struct RsClientResource *); + NV_STATUS (*__clientresControl_Prologue__)(struct RsClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__clientresPreDestruct__)(struct RsClientResource *); + NV_STATUS (*__clientresUnmapFrom__)(struct RsClientResource *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__clientresControl_Epilogue__)(struct RsClientResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__clientresControlLookup__)(struct RsClientResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__clientresMap__)(struct RsClientResource *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__clientresAccessCallback__)(struct RsClientResource *, struct RsClient *, void *, RsAccessRight); + struct RsClient *pClient; +}; + +#ifndef __NVOC_CLASS_RsClientResource_TYPEDEF__ +#define __NVOC_CLASS_RsClientResource_TYPEDEF__ +typedef struct RsClientResource RsClientResource; +#endif /* __NVOC_CLASS_RsClientResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsClientResource +#define __nvoc_class_id_RsClientResource 0x083442 +#endif /* __nvoc_class_id_RsClientResource */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClientResource; + +#define __staticCast_RsClientResource(pThis) \ + ((pThis)->__nvoc_pbase_RsClientResource) + +#ifdef __nvoc_rs_client_h_disabled +#define __dynamicCast_RsClientResource(pThis) ((RsClientResource*)NULL) +#else //__nvoc_rs_client_h_disabled +#define __dynamicCast_RsClientResource(pThis) \ + ((RsClientResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsClientResource))) +#endif //__nvoc_rs_client_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RsClientResource(RsClientResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsClientResource(RsClientResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RsClientResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RsClientResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define clientresShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) clientresShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define clientresControl(pResource, pCallContext, pParams) clientresControl_DISPATCH(pResource, pCallContext, pParams) +#define clientresUnmap(pResource, pCallContext, pCpuMapping) clientresUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define clientresMapTo(pResource, pParams) clientresMapTo_DISPATCH(pResource, pParams) +#define clientresGetRefCount(pResource) clientresGetRefCount_DISPATCH(pResource) +#define clientresControlFilter(pResource, pCallContext, pParams) clientresControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define clientresAddAdditionalDependants(pClient, pResource, pReference) clientresAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define clientresCanCopy(pResource) clientresCanCopy_DISPATCH(pResource) +#define clientresControl_Prologue(pResource, pCallContext, pParams) clientresControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define clientresPreDestruct(pResource) clientresPreDestruct_DISPATCH(pResource) +#define clientresUnmapFrom(pResource, pParams) clientresUnmapFrom_DISPATCH(pResource, pParams) +#define clientresControl_Epilogue(pResource, pCallContext, pParams) clientresControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define clientresControlLookup(pResource, pParams, ppEntry) clientresControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define clientresMap(pResource, pCallContext, pParams, pCpuMapping) clientresMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define clientresAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) clientresAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool clientresShareCallback_DISPATCH(struct RsClientResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__clientresShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS clientresControl_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__clientresControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS clientresUnmap_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__clientresUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS clientresMapTo_DISPATCH(struct RsClientResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__clientresMapTo__(pResource, pParams); +} + +static inline NvU32 clientresGetRefCount_DISPATCH(struct RsClientResource *pResource) { + return pResource->__clientresGetRefCount__(pResource); +} + +static inline NV_STATUS clientresControlFilter_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__clientresControlFilter__(pResource, pCallContext, pParams); +} + +static inline void clientresAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RsClientResource *pResource, RsResourceRef *pReference) { + pResource->__clientresAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvBool clientresCanCopy_DISPATCH(struct RsClientResource *pResource) { + return pResource->__clientresCanCopy__(pResource); +} + +static inline NV_STATUS clientresControl_Prologue_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__clientresControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline void clientresPreDestruct_DISPATCH(struct RsClientResource *pResource) { + pResource->__clientresPreDestruct__(pResource); +} + +static inline NV_STATUS clientresUnmapFrom_DISPATCH(struct RsClientResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__clientresUnmapFrom__(pResource, pParams); +} + +static inline void clientresControl_Epilogue_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__clientresControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS clientresControlLookup_DISPATCH(struct RsClientResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__clientresControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS clientresMap_DISPATCH(struct RsClientResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__clientresMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool clientresAccessCallback_DISPATCH(struct RsClientResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__clientresAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS clientresConstruct_IMPL(struct RsClientResource *arg_pClientRes, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_clientresConstruct(arg_pClientRes, arg_pCallContext, arg_pParams) clientresConstruct_IMPL(arg_pClientRes, arg_pCallContext, arg_pParams) +void clientresDestruct_IMPL(struct RsClientResource *pClientRes); +#define __nvoc_clientresDestruct(pClientRes) clientresDestruct_IMPL(pClientRes) +#undef PRIVATE_FIELD + + +/** + * Client destruction parameters + */ +struct RS_CLIENT_FREE_PARAMS_INTERNAL +{ + NvHandle hDomain; ///< [in] The parent domain + NvHandle hClient; ///< [in] The client handle + NvBool bHiPriOnly; ///< [in] Only free high priority resources + NvU32 state; ///< [in] User-defined state + + RS_RES_FREE_PARAMS_INTERNAL *pResFreeParams; ///< [in] Necessary for locking state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * Return an iterator to a resource reference multi-map + * @param[in] pIndex The multi-map to iterate + * @param[in] index Return only the references belonging to this index + */ +RsIndexIter indexRefIter(RsIndex *pIndex, NvU32 index); + +/** + * Return an iterator to all resource references in a multi-map + * @param[in] pIndex The multi-map to iterate + */ +RsIndexIter indexRefIterAll(RsIndex *pIndex); + +/** + * Get the next iterator in a resource reference multi-map + * @param[in] pIt Iterator + */ +NvBool indexRefIterNext(RsIndexIter *pIt); + +/* @} */ + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RS_CLIENT_NVOC_H_ diff --git a/src/nvidia/generated/g_rs_resource_nvoc.c b/src/nvidia/generated/g_rs_resource_nvoc.c new file mode 100644 index 000000000..f57685e2c --- /dev/null +++ b/src/nvidia/generated/g_rs_resource_nvoc.c @@ -0,0 +1,186 @@ +#define NVOC_RS_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_rs_resource_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xd551cb = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_RsResource(RsResource*); +void __nvoc_init_funcTable_RsResource(RsResource*); +NV_STATUS __nvoc_ctor_RsResource(RsResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_RsResource(RsResource*); +void __nvoc_dtor_RsResource(RsResource*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RsResource; + +static const struct NVOC_RTTI __nvoc_rtti_RsResource_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsResource, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsResource_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsResource, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RsResource = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_RsResource_RsResource, + &__nvoc_rtti_RsResource_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsResource), + /*classId=*/ classId(RsResource), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsResource", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsResource, + /*pCastInfo=*/ &__nvoc_castinfo_RsResource, + /*pExportInfo=*/ &__nvoc_export_info_RsResource +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RsResource = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RsResource(RsResource *pThis) { + __nvoc_resDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsResource(RsResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RsResource(RsResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_RsResource_fail_Object; + __nvoc_init_dataField_RsResource(pThis); + + status = __nvoc_resConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_RsResource_fail__init; + goto __nvoc_ctor_RsResource_exit; // Success + +__nvoc_ctor_RsResource_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_RsResource_fail_Object: +__nvoc_ctor_RsResource_exit: + + return status; +} + +static void __nvoc_init_funcTable_RsResource_1(RsResource *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__resCanCopy__ = &resCanCopy_IMPL; + + pThis->__resPreDestruct__ = &resPreDestruct_IMPL; + + pThis->__resControlLookup__ = &resControlLookup_IMPL; + + pThis->__resControl__ = &resControl_IMPL; + + pThis->__resControlFilter__ = &resControlFilter_IMPL; + + pThis->__resControl_Prologue__ = &resControl_Prologue_IMPL; + + pThis->__resControl_Epilogue__ = &resControl_Epilogue_IMPL; + + pThis->__resMap__ = &resMap_IMPL; + + pThis->__resUnmap__ = &resUnmap_IMPL; + + pThis->__resMapTo__ = &resMapTo_IMPL; + + pThis->__resUnmapFrom__ = &resUnmapFrom_IMPL; + + pThis->__resGetRefCount__ = &resGetRefCount_IMPL; + + pThis->__resAccessCallback__ = &resAccessCallback_IMPL; + + pThis->__resShareCallback__ = &resShareCallback_IMPL; + + pThis->__resAddAdditionalDependants__ = &resAddAdditionalDependants_IMPL; +} + +void __nvoc_init_funcTable_RsResource(RsResource *pThis) { + __nvoc_init_funcTable_RsResource_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_RsResource(RsResource *pThis) { + pThis->__nvoc_pbase_RsResource = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_RsResource(pThis); +} + +NV_STATUS __nvoc_objCreate_RsResource(RsResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + RsResource *pThis; + + pThis = portMemAllocNonPaged(sizeof(RsResource)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RsResource)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RsResource); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RsResource(pThis); + status = __nvoc_ctor_RsResource(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_RsResource_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RsResource_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsResource(RsResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_RsResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_rs_resource_nvoc.h b/src/nvidia/generated/g_rs_resource_nvoc.h new file mode 100644 index 000000000..be788fadb --- /dev/null +++ b/src/nvidia/generated/g_rs_resource_nvoc.h @@ -0,0 +1,860 @@ +#ifndef _G_RS_RESOURCE_NVOC_H_ +#define _G_RS_RESOURCE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_resource_nvoc.h" + +#ifndef _RS_RESOURCE_H_ +#define _RS_RESOURCE_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "nvoc/object.h" +#include "resserv/rs_access_map.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct RsSession; + +#ifndef __NVOC_CLASS_RsSession_TYPEDEF__ +#define __NVOC_CLASS_RsSession_TYPEDEF__ +typedef struct RsSession RsSession; +#endif /* __NVOC_CLASS_RsSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsSession +#define __nvoc_class_id_RsSession 0x830d90 +#endif /* __nvoc_class_id_RsSession */ + + + +/** + * @defgroup RsResource + * @addtogroup RsResource + * @{*/ + +#define ALLOC_STATE_INTERNAL_CLIENT_HANDLE NVBIT(5) + +/* + * Locking operations for lock-metering + */ +#define RS_LOCK_TRACE_INVALID 1 +#define RS_LOCK_TRACE_ACQUIRE 1 +#define RS_LOCK_TRACE_RELEASE 2 +#define RS_LOCK_TRACE_ALLOC 3 +#define RS_LOCK_TRACE_FREE 4 +#define RS_LOCK_TRACE_CTRL 5 +#define RS_LOCK_TRACE_MAP 6 +#define RS_LOCK_TRACE_UNMAP 7 + +/** + * Context information for top-level, resource-level, and client-level locking + * operations + */ +struct RS_LOCK_INFO +{ + struct RsClient *pClient; ///< Pointer to client that was locked (if any) + struct RsClient *pSecondClient; ///< Pointer to second client, for dual-client locking + RsResourceRef *pContextRef; ///< User-defined reference + struct RsSession *pSession; ///< Session object to be locked, if any + NvU32 flags; ///< RS_LOCK_FLAGS_* + NvU32 state; ///< RS_LOCK_STATE_* + NvU32 gpuMask; + NvU8 traceOp; ///< RS_LOCK_TRACE_* operation for lock-metering + NvU32 traceClassId; ///< Class of initial resource that was locked for lock metering +}; + +struct RS_RES_ALLOC_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hParent; ///< [in] The handle of the resource's parent. This may be a client or another resource. + NvHandle hResource; ///< [inout] Server will assign a handle if this is 0, or else try the value provided + NvU32 externalClassId; ///< [in] External class ID of resource + NvHandle hDomain; ///< UNUSED + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + struct RsClient *pClient; ///< [out] Cached client + RsResourceRef *pResourceRef; ///< [out] Cached resource reference + NvU32 allocFlags; ///< [in] Allocation flags + NvU32 allocState; ///< [inout] Allocation state + API_SECURITY_INFO *pSecInfo; + + void *pAllocParams; ///< [in] Copied-in allocation parameters + + // ... Dupe alloc + struct RsClient *pSrcClient; ///< The client that is sharing the resource + RsResourceRef *pSrcRef; ///< Reference to the resource that will be shared + + RS_ACCESS_MASK *pRightsRequested; ///< [in] Access rights requested on the new resource + // Buffer for storing contents of user mask. Do not use directly, use pRightsRequested instead. + RS_ACCESS_MASK rightsRequestedCopy; + + RS_ACCESS_MASK *pRightsRequired; ///< [in] Access rights required to alloc this object type +}; + +struct RS_RES_DUP_PARAMS_INTERNAL +{ + NvHandle hClientSrc; ///< [in] The handle of the source resource's client + NvHandle hResourceSrc; ///< [in] The handle of the source resource. + NvHandle hClientDst; ///< [in] The handle of the destination resource's client (may be different from source client) + NvHandle hParentDst; ///< [in] The handle of the destination resource's parent. + NvHandle hResourceDst; ///< [inout] The handle of the destination resource. Generated if 0. + void *pShareParams; ///< [in] Copied-in sharing parameters + NvU32 flags; ///< [in] Flags to denote special cases ( Bug: 2859347 to track removal) + // Internal use only + struct RsClient *pSrcClient; + RsResourceRef *pSrcRef; + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +struct RS_RES_SHARE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the owner's client + NvHandle hResource; ///< [in] The handle of the resource. + RS_SHARE_POLICY *pSharePolicy; ///< [in] The policy to share with + + // Internal use only + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +#define RS_IS_COPY_CTOR(pParams) ((pParams)->pSrcRef != NULL) + +struct RS_RES_FREE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hResource; ///< [in] The handle of the resource + NvBool bInvalidateOnly; ///< [in] Free the resource, but don't release its handle + NvHandle hDomain; ///< UNUSED + + // Internal use only + NvBool bHiPriOnly; ///< [in] Only free if this is a high priority resources + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + NvU32 freeFlags; ///< [in] Flags for the free operation + NvU32 freeState; ///< [inout] Free state + RsResourceRef *pResourceRef; ///< [inout] Cached RsResourceRef + NV_STATUS status; ///< [out] Status of free operation + API_SECURITY_INFO *pSecInfo; ///< [in] Security info +}; + +struct NVOC_EXPORTED_METHOD_DEF; +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct OBJGPUGRP; + +#ifndef __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ +typedef struct OBJGPUGRP OBJGPUGRP; +#endif /* __NVOC_CLASS_OBJGPUGRP_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUGRP +#define __nvoc_class_id_OBJGPUGRP 0xe40531 +#endif /* __nvoc_class_id_OBJGPUGRP */ + + + +// +// RS_RES_CONTROL_PARAMS +// +// This structure encapsulates data sent to the cmd-specific rmctrl +// handlers. Along with the arguments supplied by the requesting +// client (hClient, hObject, cmd, pParams, paramSize). +// +struct RS_RES_CONTROL_PARAMS_INTERNAL +{ + NvHandle hClient; // client-specified NV01_ROOT object handle + NvHandle hObject; // client-specified object handle + NvU32 cmd; // client-specified command # + NvU32 flags; // flags related to control call execution + void *pParams; // client-specified params (in kernel space) + NvU32 paramsSize; // client-specified size of pParams in bytes + + NvHandle hParent; // handle of hObject parent + struct OBJGPU *pGpu; // ptr to OBJGPU struct if applicable + struct OBJGPUGRP *pGpuGrp; // ptr to OBJGPUGRP struct if applicable + RsResourceRef *pResourceRef; // ptr to RsResourceRef if object is managed by + // Resource Server + API_SECURITY_INFO secInfo; // information on privilege level and pointer location (user/kernel) + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + RS_CONTROL_COOKIE *pCookie; + NvBool bInternal; // True if control call was not issued from an external client + NvBool bDeferredApi; // Indicates ctrl is being dispatched via deferred API + + struct RS_RES_CONTROL_PARAMS_INTERNAL *pLegacyParams; // RS-TODO removeme +}; + +struct RS_RES_DTOR_PARAMS +{ + CALL_CONTEXT *pFreeContext; + RS_RES_FREE_PARAMS_INTERNAL *pFreeParams; +}; + +/** + * Base class for all resources. Mostly a pure virtual interface which + * should be overridden to implement resource specific behavior. + */ +#ifdef NVOC_RS_RESOURCE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RsResource { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + NvBool (*__resCanCopy__)(struct RsResource *); + void (*__resPreDestruct__)(struct RsResource *); + NV_STATUS (*__resControlLookup__)(struct RsResource *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__resControl__)(struct RsResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__resControlFilter__)(struct RsResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__resControl_Prologue__)(struct RsResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__resControl_Epilogue__)(struct RsResource *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__resMap__)(struct RsResource *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NV_STATUS (*__resUnmap__)(struct RsResource *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__resMapTo__)(struct RsResource *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__resUnmapFrom__)(struct RsResource *, RS_RES_UNMAP_FROM_PARAMS *); + NvU32 (*__resGetRefCount__)(struct RsResource *); + NvBool (*__resAccessCallback__)(struct RsResource *, struct RsClient *, void *, RsAccessRight); + NvBool (*__resShareCallback__)(struct RsResource *, struct RsClient *, RsResourceRef *, RS_SHARE_POLICY *); + void (*__resAddAdditionalDependants__)(struct RsClient *, struct RsResource *, RsResourceRef *); + RsResourceRef *pResourceRef; + struct RS_RES_DTOR_PARAMS dtorParams; + NvBool bConstructed; +}; + +#ifndef __NVOC_CLASS_RsResource_TYPEDEF__ +#define __NVOC_CLASS_RsResource_TYPEDEF__ +typedef struct RsResource RsResource; +#endif /* __NVOC_CLASS_RsResource_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsResource +#define __nvoc_class_id_RsResource 0xd551cb +#endif /* __nvoc_class_id_RsResource */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +#define __staticCast_RsResource(pThis) \ + ((pThis)->__nvoc_pbase_RsResource) + +#ifdef __nvoc_rs_resource_h_disabled +#define __dynamicCast_RsResource(pThis) ((RsResource*)NULL) +#else //__nvoc_rs_resource_h_disabled +#define __dynamicCast_RsResource(pThis) \ + ((RsResource*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsResource))) +#endif //__nvoc_rs_resource_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RsResource(RsResource**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsResource(RsResource**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_RsResource(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_RsResource((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define resCanCopy(pResource) resCanCopy_DISPATCH(pResource) +#define resPreDestruct(pResource) resPreDestruct_DISPATCH(pResource) +#define resControlLookup(pResource, pParams, ppEntry) resControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define resControl(pResource, pCallContext, pParams) resControl_DISPATCH(pResource, pCallContext, pParams) +#define resControlFilter(pResource, pCallContext, pParams) resControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define resControl_Prologue(pResource, pCallContext, pParams) resControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define resControl_Epilogue(pResource, pCallContext, pParams) resControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define resMap(pResource, pCallContext, pParams, pCpuMapping) resMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define resUnmap(pResource, pCallContext, pCpuMapping) resUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define resMapTo(pResource, pParams) resMapTo_DISPATCH(pResource, pParams) +#define resUnmapFrom(pResource, pParams) resUnmapFrom_DISPATCH(pResource, pParams) +#define resGetRefCount(pResource) resGetRefCount_DISPATCH(pResource) +#define resAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) resAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define resShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) resShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define resAddAdditionalDependants(pClient, pResource, pReference) resAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +NvBool resCanCopy_IMPL(struct RsResource *pResource); + +static inline NvBool resCanCopy_DISPATCH(struct RsResource *pResource) { + return pResource->__resCanCopy__(pResource); +} + +void resPreDestruct_IMPL(struct RsResource *pResource); + +static inline void resPreDestruct_DISPATCH(struct RsResource *pResource) { + pResource->__resPreDestruct__(pResource); +} + +NV_STATUS resControlLookup_IMPL(struct RsResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry); + +static inline NV_STATUS resControlLookup_DISPATCH(struct RsResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__resControlLookup__(pResource, pParams, ppEntry); +} + +NV_STATUS resControl_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS resControl_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__resControl__(pResource, pCallContext, pParams); +} + +NV_STATUS resControlFilter_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS resControlFilter_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__resControlFilter__(pResource, pCallContext, pParams); +} + +NV_STATUS resControl_Prologue_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS resControl_Prologue_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__resControl_Prologue__(pResource, pCallContext, pParams); +} + +void resControl_Epilogue_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline void resControl_Epilogue_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__resControl_Epilogue__(pResource, pCallContext, pParams); +} + +NV_STATUS resMap_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS resMap_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__resMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +NV_STATUS resUnmap_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + +static inline NV_STATUS resUnmap_DISPATCH(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__resUnmap__(pResource, pCallContext, pCpuMapping); +} + +NV_STATUS resMapTo_IMPL(struct RsResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); + +static inline NV_STATUS resMapTo_DISPATCH(struct RsResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__resMapTo__(pResource, pParams); +} + +NV_STATUS resUnmapFrom_IMPL(struct RsResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); + +static inline NV_STATUS resUnmapFrom_DISPATCH(struct RsResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__resUnmapFrom__(pResource, pParams); +} + +NvU32 resGetRefCount_IMPL(struct RsResource *pResource); + +static inline NvU32 resGetRefCount_DISPATCH(struct RsResource *pResource) { + return pResource->__resGetRefCount__(pResource); +} + +NvBool resAccessCallback_IMPL(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + +static inline NvBool resAccessCallback_DISPATCH(struct RsResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__resAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NvBool resShareCallback_IMPL(struct RsResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + +static inline NvBool resShareCallback_DISPATCH(struct RsResource *pResource, struct RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__resShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +void resAddAdditionalDependants_IMPL(struct RsClient *pClient, struct RsResource *pResource, RsResourceRef *pReference); + +static inline void resAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct RsResource *pResource, RsResourceRef *pReference) { + pResource->__resAddAdditionalDependants__(pClient, pResource, pReference); +} + +NV_STATUS resConstruct_IMPL(struct RsResource *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_resConstruct(arg_pResource, arg_pCallContext, arg_pParams) resConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void resDestruct_IMPL(struct RsResource *pResource); +#define __nvoc_resDestruct(pResource) resDestruct_IMPL(pResource) +NV_STATUS resSetFreeParams_IMPL(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_FREE_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_rs_resource_h_disabled +static inline NV_STATUS resSetFreeParams(struct RsResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("RsResource was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_resource_h_disabled +#define resSetFreeParams(pResource, pCallContext, pParams) resSetFreeParams_IMPL(pResource, pCallContext, pParams) +#endif //__nvoc_rs_resource_h_disabled + +NV_STATUS resGetFreeParams_IMPL(struct RsResource *pResource, struct CALL_CONTEXT **ppCallContext, struct RS_RES_FREE_PARAMS_INTERNAL **ppParams); +#ifdef __nvoc_rs_resource_h_disabled +static inline NV_STATUS resGetFreeParams(struct RsResource *pResource, struct CALL_CONTEXT **ppCallContext, struct RS_RES_FREE_PARAMS_INTERNAL **ppParams) { + NV_ASSERT_FAILED_PRECOMP("RsResource was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_resource_h_disabled +#define resGetFreeParams(pResource, ppCallContext, ppParams) resGetFreeParams_IMPL(pResource, ppCallContext, ppParams) +#endif //__nvoc_rs_resource_h_disabled + +#undef PRIVATE_FIELD + + +/* @} */ + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +/** + * @defgroup RsCpuMapping + * @addtogroup RsCpuMapping + * @{*/ +struct RsCpuMapping +{ + NvU64 offset; + NvU64 length; + NvU32 flags; + NvP64 pLinearAddress; + RsResourceRef *pContextRef; ///< Context resource that may be needed for the mapping + void *pContext; ///< Additional context data for the mapping + NvU32 processId; + + RS_CPU_MAPPING_PRIVATE *pPrivate; ///< Opaque struct allocated and freed by resserv on behalf of the user +}; +MAKE_LIST(RsCpuMappingList, RsCpuMapping); + +/** + * CPU mapping parameters + */ +struct RS_CPU_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU64 offset; ///< [in] Offset into the resource + NvU64 length; ///< [in] Size of the region to map + NvP64 *ppCpuVirtAddr; + NvU32 flags; ///< [in] Resource-specific flags + + // Passed from RM into CpuMapping + NvU32 protect; ///< [in] Protection flags + NvBool bKernel; + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU unmapping params for resource server tests + */ +struct RS_CPU_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress; ///< [in] Address of mapped memory + NvU32 flags; ///< [in] Resource-specific flags + NvU32 processId; + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + // RM-only + void *pProcessHandle; + + NvBool (*fnFilter)(RsCpuMapping*); ///< [in] Mapping-filter function + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU mapping back-reference + */ +struct RS_CPU_MAPPING_BACK_REF +{ + RsCpuMapping *pCpuMapping; ///< Mapping linked to this backref + RsResourceRef *pBackRef; ///< Resource reference with mapping +}; +MAKE_LIST(RsCpuMappingBackRefList, RS_CPU_MAPPING_BACK_REF); +/* @} */ + +/** + * @defgroup RsInterMapping + * @addtogroup RsInterMapping + * @{*/ +struct RS_INTER_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hMappable; + NvHandle hDevice; + NvU64 offset; + NvU64 length; + NvU32 flags; + NvU64 dmaOffset; ///< [inout] RS-TODO rename this + void *pMemDesc; ///< [out] + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_MAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +struct RS_INTER_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hMappable; + NvHandle hDevice; + NvU32 flags; + NvU64 dmaOffset; ///< [in] RS-TODO rename this + void *pMemDesc; ///< MEMORY_DESCRIPTOR * + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_UNMAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +/** + * Inter-mapping information + * Used to keep track of inter-mappings and unmap them on free + */ +struct RsInterMapping +{ + // RsResourceRef *pMapperRef ///< (Implied) the resource that created and owns this mapping (this resource) + RsResourceRef *pMappableRef; ///< The resource being mapped by the mapper (e.g. hMemory) + RsResourceRef *pContextRef; ///< A resource used to provide additional context for the mapping (e.g. hDevice) + NvU32 flags; ///< Flags passed when mapping, same flags also passed when unmapping + NvU64 dmaOffset; + void *pMemDesc; +}; +MAKE_LIST(RsInterMappingList, RsInterMapping); + +/** + * Inter-mapping back-reference + */ +struct RS_INTER_MAPPING_BACK_REF +{ + RsResourceRef *pMapperRef; ///< Resource reference with mapping + RsInterMapping *pMapping; ///< Pointer to the inter-mapping linked to this backref +}; +MAKE_LIST(RsInterMappingBackRefList, RS_INTER_MAPPING_BACK_REF); +/* @} */ + +typedef struct RS_RESOURCE_DESC RS_RESOURCE_DESC; +RS_RESOURCE_DESC *RsResInfoByExternalClassId(NvU32); +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *); + +/** + * A reference to a resource that has been allocated in RM. + */ +struct RsResourceRef +{ + struct RsClient *pClient; ///< Pointer to the client that owns the ref + struct RsResource *pResource; ///< Pointer to the actual resource + NvHandle hResource; ///< Resource handle + struct RsResourceRef *pParentRef; ///< Parent resource reference + RsIndex childRefMap; ///< Child reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + + /** + * Cached reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * The resource reference cache is a one-way association between this resource reference and + * any other resource reference. Resource server does not populate the cache so it is up to the + * resource implementation to manage it. clientRefIter can be used to iterate this cache. + */ + RsIndex cachedRefMap; + + /** + * Dependants reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * A map of all resources that strongly depend on this resource. + */ + RsIndex depRefMap; + + /** + * Dependants back-reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * AKA dependencies map + * + * A map of all resources that this resource strongly depends on. + */ + RsIndex depBackRefMap; + + /** + * Policy under which this resource can be shared with other clients + */ + RsShareList sharePolicyList; + NvBool bSharePolicyListModified; + + /** + * A mask of the access rights that the owner client has on this object. + */ + RS_ACCESS_MASK accessMask; + + const RS_RESOURCE_DESC *pResourceDesc; ///< Cached pointer to the resource descriptor + NvU32 internalClassId; ///< Internal resource class id + NvU32 externalClassId; ///< External resource class id + NvU32 depth; ///< The depth of this reference in the resource graph + NvBool bInvalidated; ///< Reference has been freed but not removed yet + + RsCpuMappingList cpuMappings; ///< List of CPU mappings to the resource from this resource reference + RsCpuMappingBackRefList backRefs; ///< List of references that have this reference as a mapping context + + RsInterMappingList interMappings; ///< List of inter-resource mappings created by this resource + RsInterMappingBackRefList interBackRefs; ///< List of inter-resource mappings this resource has been mapped into + + struct RsSession *pSession; ///< If set, this ref depends on a shared session + struct RsSession *pDependantSession; ///< If set, this ref is depended on by a shared session + + ListNode freeNode; ///< Links to the client's pendingFreeList +}; +MAKE_MAP(RsRefMap, RsResourceRef); +MAKE_INTRUSIVE_LIST(RsRefFreeList, RsResourceRef, freeNode); + + +// Iterator data structure to save state while walking through a list +struct RS_ITERATOR +{ + union + { + RsRefMapIter mapIt; ///< Map iterator for all resource references under a client + RsIndexIter idxIt; ///< Index iterator for child references of a resource reference + }; + + struct RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over + NvU8 type; ///< RS_ITERATE_* + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId +}; + +// Iterator data structure to save state while walking through a resource tree in pre-order +struct RS_ORDERED_ITERATOR +{ + NvS8 depth; ///< Depth of index stack; special value of -1 implies that the scope reference should be iterated over as well + RsIndexIter idxIt[RS_MAX_RESOURCE_DEPTH+1]; ///< Stack of index iterators for child references of a resource reference + + struct RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over +}; + +/** + * Macro for looking up a reference from a resource + */ +#define RES_GET_REF(pResource) (staticCast((pResource), RsResource)->pResourceRef) + +/** + * Macro for looking up a resource handle from a resource + */ +#define RES_GET_HANDLE(pResource) (RES_GET_REF(pResource)->hResource) + +/** + * Macro for looking up a resource's external class from a resource + */ +#define RES_GET_EXT_CLASS_ID(pResource) (RES_GET_REF(pResource)->externalClassId) + +/** + * Macro for looking up a resource's parent handle from a resource + */ +#define RES_GET_PARENT_HANDLE(pResource) (RES_GET_REF(pResource)->pParentRef->hResource) + +/** + * Macro for looking up a client from a resource + */ +#define RES_GET_CLIENT(pResource) (RES_GET_REF(pResource)->pClient) + +/** + * Macro for looking up a client handle from a resource + */ +#define RES_GET_CLIENT_HANDLE(pResource) (RES_GET_REF(pResource)->pClient->hClient) + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[out] ppMapping The returned mapping + */ +NV_STATUS refFindCpuMapping(RsResourceRef *pResourceRef, NvP64 pAddress, RsCpuMapping **ppMapping); + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + * @param[out] ppMapping The returned mapping + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + */ +NV_STATUS refFindCpuMappingWithFilter(RsResourceRef *pResourceRef, NvP64 pAddress, NvBool (*fnFilter)(RsCpuMapping*), RsCpuMapping **ppMapping); + +/** + * Find the first child object of given type + * + * @param[in] pParentRef + * @param[in] internalClassId + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * @param[out] pResourceRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindChildOfType(RsResourceRef *pParentRef, NvU32 internalClassId, NvBool bExactMatch, RsResourceRef **ppResourceRef); + +/** + * Traverse up the reference parent-child hierarchy to find an ancestor reference of a given type + * + * @param[in] pDescendantRef + * @param[in] internalClassId + * @param[out] ppAncestorRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindAncestorOfType(RsResourceRef *pDescendantRef, NvU32 internalClassId, RsResourceRef **ppAncestorRef); + +/** + * Traverse up the reference parent-child hierarchy to find if a ref is a descendant of a given ancestor ref + * + * @param[in] pDescendantRef The node to start searching from (not included in the search) + * @param[in] pAncestorRef The node to search for in the parent-child hierarchy + */ +NvBool refHasAncestor(RsResourceRef *pDescendantRef, RsResourceRef *pAncestorRef); + +/** + * Add a new mapping to a reference's mapping list + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapParams The parameters used to initialize the mapping + * @param[in] pContextRef A reference to a resource that provides a context for the mapping + * @param[out] ppMapping Pointer to the allocated mapping [optional] + */ +NV_STATUS refAddMapping(RsResourceRef *pResourceRef, RS_CPU_MAP_PARAMS *pMapParams, + RsResourceRef *pContextRef, RsCpuMapping **ppMapping); + +/** + * Remove an existing mapping from a reference's mapping list and remove back-references to the mapping. + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapping Pointer to the allocated mapping + */ +void refRemoveMapping(RsResourceRef *pResourceRef, RsCpuMapping *pMapping); + +/** + * Allocate the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to alloc the private struct when the mapping is created + * @param[in] pMapParams The parameters which were used to create the mapping + * @param[inout] pMapping Pointer to the mapping whose private struct should be allocated + */ +NV_STATUS refAllocCpuMappingPrivate(RS_CPU_MAP_PARAMS *pMapParams, RsCpuMapping *pMapping); + +/** + * Free the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to free the private struct when the mapping is removed + * @param[inout] pMapping Pointer to the mapping whose private struct should be freed + */ +void refFreeCpuMappingPrivate(RsCpuMapping *pMapping); + +/** + * Add a dependency between this resource reference and a dependent reference. + * If this reference is freed, the dependent will be invalidated and torn down. + * + * @note Dependencies are implicit between a parent resource reference and child resource reference + * @note No circular dependency checking is performed + */ +NV_STATUS refAddDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Remove the dependency between this resource reference and a dependent resource reference. + */ +NV_STATUS refRemoveDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Find, Add, or Remove an inter-mapping between two resources to the Mapper's list of inter-mappings + * Inter-mappings are stored in the Mapper, and are matched by both the MappableRef and offset. + * + * @param[in] pMapperRef The reference which owns the inter-mapping + * @param[in] pMappableRef The reference which was mapped from to create the inter-mapping + * If NULL, will be ignored while matching inter-mappings + * @param[in] dmaOffset The offset value assigned while mapping, used to identify mappings + * @param[in] pContextRef A reference used during mapping and locking for additional context, used to identify mappings + * @param[inout] ppMapping Writes the resulting inter-mapping, if successfully created (Add) or found (Find) + * @param[in] pMapping The inter-mapping to remove (Remove) + */ +NV_STATUS refFindInterMapping(RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RsResourceRef *pContextRef, NvU64 dmaOffset, RsInterMapping **ppMapping); +NV_STATUS refAddInterMapping(RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RsResourceRef *pContextRef, RsInterMapping **ppMapping); +void refRemoveInterMapping(RsResourceRef *pMapperRef, RsInterMapping *pMapping); + +/** + * Store a resource reference in another reference's cache. + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to store in the cache + */ +NV_STATUS refCacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Remove a resource reference from another reference's cache + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to de-index + */ +NV_STATUS refUncacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Determine whether a reference is queued for removal + * @param[in] pResourceRef + * @param[in] pClient + */ +NvBool refPendingFree(RsResourceRef *pResourceRef, struct RsClient *pClient); + + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RS_RESOURCE_NVOC_H_ diff --git a/src/nvidia/generated/g_rs_server_nvoc.c b/src/nvidia/generated/g_rs_server_nvoc.c new file mode 100644 index 000000000..2af2871c4 --- /dev/null +++ b/src/nvidia/generated/g_rs_server_nvoc.c @@ -0,0 +1,313 @@ +#define NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_rs_server_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x830542 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_funcTable_RsShared(RsShared*); +NV_STATUS __nvoc_ctor_RsShared(RsShared*); +void __nvoc_init_dataField_RsShared(RsShared*); +void __nvoc_dtor_RsShared(RsShared*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RsShared; + +static const struct NVOC_RTTI __nvoc_rtti_RsShared_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsShared, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsShared_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsShared, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RsShared = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_RsShared_RsShared, + &__nvoc_rtti_RsShared_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsShared), + /*classId=*/ classId(RsShared), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsShared", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsShared, + /*pCastInfo=*/ &__nvoc_castinfo_RsShared, + /*pExportInfo=*/ &__nvoc_export_info_RsShared +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RsShared = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_RsShared(RsShared *pThis) { + __nvoc_shrDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsShared(RsShared *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_RsShared(RsShared *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_RsShared_fail_Object; + __nvoc_init_dataField_RsShared(pThis); + + status = __nvoc_shrConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_RsShared_fail__init; + goto __nvoc_ctor_RsShared_exit; // Success + +__nvoc_ctor_RsShared_fail__init: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_RsShared_fail_Object: +__nvoc_ctor_RsShared_exit: + + return status; +} + +static void __nvoc_init_funcTable_RsShared_1(RsShared *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_RsShared(RsShared *pThis) { + __nvoc_init_funcTable_RsShared_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_RsShared(RsShared *pThis) { + pThis->__nvoc_pbase_RsShared = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_RsShared(pThis); +} + +NV_STATUS __nvoc_objCreate_RsShared(RsShared **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + RsShared *pThis; + + pThis = portMemAllocNonPaged(sizeof(RsShared)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RsShared)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RsShared); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RsShared(pThis); + status = __nvoc_ctor_RsShared(pThis); + if (status != NV_OK) goto __nvoc_objCreate_RsShared_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RsShared_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsShared(RsShared **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_RsShared(ppThis, pParent, createFlags); + + return status; +} + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x830d90 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsSession; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +void __nvoc_init_RsSession(RsSession*); +void __nvoc_init_funcTable_RsSession(RsSession*); +NV_STATUS __nvoc_ctor_RsSession(RsSession*); +void __nvoc_init_dataField_RsSession(RsSession*); +void __nvoc_dtor_RsSession(RsSession*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RsSession; + +static const struct NVOC_RTTI __nvoc_rtti_RsSession_RsSession = { + /*pClassDef=*/ &__nvoc_class_def_RsSession, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RsSession, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsSession_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsSession, __nvoc_base_RsShared.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_RsSession_RsShared = { + /*pClassDef=*/ &__nvoc_class_def_RsShared, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(RsSession, __nvoc_base_RsShared), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_RsSession = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_RsSession_RsSession, + &__nvoc_rtti_RsSession_RsShared, + &__nvoc_rtti_RsSession_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_RsSession = +{ + /*classInfo=*/ { + /*size=*/ sizeof(RsSession), + /*classId=*/ classId(RsSession), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "RsSession", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RsSession, + /*pCastInfo=*/ &__nvoc_castinfo_RsSession, + /*pExportInfo=*/ &__nvoc_export_info_RsSession +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_RsSession = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RsShared(RsShared*); +void __nvoc_dtor_RsSession(RsSession *pThis) { + __nvoc_sessionDestruct(pThis); + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_RsSession(RsSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RsShared(RsShared* ); +NV_STATUS __nvoc_ctor_RsSession(RsSession *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared); + if (status != NV_OK) goto __nvoc_ctor_RsSession_fail_RsShared; + __nvoc_init_dataField_RsSession(pThis); + + status = __nvoc_sessionConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_RsSession_fail__init; + goto __nvoc_ctor_RsSession_exit; // Success + +__nvoc_ctor_RsSession_fail__init: + __nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared); +__nvoc_ctor_RsSession_fail_RsShared: +__nvoc_ctor_RsSession_exit: + + return status; +} + +static void __nvoc_init_funcTable_RsSession_1(RsSession *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__sessionRemoveDependant__ = &sessionRemoveDependant_IMPL; + + pThis->__sessionRemoveDependency__ = &sessionRemoveDependency_IMPL; +} + +void __nvoc_init_funcTable_RsSession(RsSession *pThis) { + __nvoc_init_funcTable_RsSession_1(pThis); +} + +void __nvoc_init_RsShared(RsShared*); +void __nvoc_init_RsSession(RsSession *pThis) { + pThis->__nvoc_pbase_RsSession = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object; + pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared; + __nvoc_init_RsShared(&pThis->__nvoc_base_RsShared); + __nvoc_init_funcTable_RsSession(pThis); +} + +NV_STATUS __nvoc_objCreate_RsSession(RsSession **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + RsSession *pThis; + + pThis = portMemAllocNonPaged(sizeof(RsSession)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(RsSession)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RsSession); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_RsSession(pThis); + status = __nvoc_ctor_RsSession(pThis); + if (status != NV_OK) goto __nvoc_objCreate_RsSession_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_RsSession_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_RsSession(RsSession **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_RsSession(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_rs_server_nvoc.h b/src/nvidia/generated/g_rs_server_nvoc.h new file mode 100644 index 000000000..47e49cd1f --- /dev/null +++ b/src/nvidia/generated/g_rs_server_nvoc.h @@ -0,0 +1,1062 @@ +#ifndef _G_RS_SERVER_NVOC_H_ +#define _G_RS_SERVER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_server_nvoc.h" + +#ifndef _RS_SERVER_H_ +#define _RS_SERVER_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsServer + * @addtogroup RsServer + * @{*/ + +/** + * Book-keeping for individual client locks + */ +struct CLIENT_ENTRY +{ + PORT_RWLOCK *pLock; + struct RsClient *pClient; + NvHandle hClient; + NvU64 lockOwnerTid; ///< Thread id of the lock owner + +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK lockVal; +#endif +}; + +/** + * Base-class for objects that are shared among multiple + * RsResources (including RsResources from other clients) + */ +#ifdef NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RsShared { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + NvS32 refCount; + struct MapNode node; +}; + +#ifndef __NVOC_CLASS_RsShared_TYPEDEF__ +#define __NVOC_CLASS_RsShared_TYPEDEF__ +typedef struct RsShared RsShared; +#endif /* __NVOC_CLASS_RsShared_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsShared +#define __nvoc_class_id_RsShared 0x830542 +#endif /* __nvoc_class_id_RsShared */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared; + +#define __staticCast_RsShared(pThis) \ + ((pThis)->__nvoc_pbase_RsShared) + +#ifdef __nvoc_rs_server_h_disabled +#define __dynamicCast_RsShared(pThis) ((RsShared*)NULL) +#else //__nvoc_rs_server_h_disabled +#define __dynamicCast_RsShared(pThis) \ + ((RsShared*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsShared))) +#endif //__nvoc_rs_server_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RsShared(RsShared**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsShared(RsShared**, Dynamic*, NvU32); +#define __objCreate_RsShared(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RsShared((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS shrConstruct_IMPL(struct RsShared *arg_pShared); +#define __nvoc_shrConstruct(arg_pShared) shrConstruct_IMPL(arg_pShared) +void shrDestruct_IMPL(struct RsShared *pShared); +#define __nvoc_shrDestruct(pShared) shrDestruct_IMPL(pShared) +#undef PRIVATE_FIELD + +MAKE_INTRUSIVE_MAP(RsSharedMap, RsShared, node); + +/** + * Utility class for objects that can reference + * multiple client handle spaces. Free's and control calls + * that occur on objects which reference an RsSession will + * need to acquire pLock first. + */ +#ifdef NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct RsSession { + const struct NVOC_RTTI *__nvoc_rtti; + struct RsShared __nvoc_base_RsShared; + struct Object *__nvoc_pbase_Object; + struct RsShared *__nvoc_pbase_RsShared; + struct RsSession *__nvoc_pbase_RsSession; + void (*__sessionRemoveDependant__)(struct RsSession *, RsResourceRef *); + void (*__sessionRemoveDependency__)(struct RsSession *, RsResourceRef *); + PORT_RWLOCK *pLock; + NvBool bValid; + RsResourceRefList dependencies; + RsResourceRefList dependants; +}; + +#ifndef __NVOC_CLASS_RsSession_TYPEDEF__ +#define __NVOC_CLASS_RsSession_TYPEDEF__ +typedef struct RsSession RsSession; +#endif /* __NVOC_CLASS_RsSession_TYPEDEF__ */ + +#ifndef __nvoc_class_id_RsSession +#define __nvoc_class_id_RsSession 0x830d90 +#endif /* __nvoc_class_id_RsSession */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsSession; + +#define __staticCast_RsSession(pThis) \ + ((pThis)->__nvoc_pbase_RsSession) + +#ifdef __nvoc_rs_server_h_disabled +#define __dynamicCast_RsSession(pThis) ((RsSession*)NULL) +#else //__nvoc_rs_server_h_disabled +#define __dynamicCast_RsSession(pThis) \ + ((RsSession*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RsSession))) +#endif //__nvoc_rs_server_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_RsSession(RsSession**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_RsSession(RsSession**, Dynamic*, NvU32); +#define __objCreate_RsSession(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_RsSession((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define sessionRemoveDependant(pSession, pResourceRef) sessionRemoveDependant_DISPATCH(pSession, pResourceRef) +#define sessionRemoveDependency(pSession, pResourceRef) sessionRemoveDependency_DISPATCH(pSession, pResourceRef) +void sessionRemoveDependant_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); + +static inline void sessionRemoveDependant_DISPATCH(struct RsSession *pSession, RsResourceRef *pResourceRef) { + pSession->__sessionRemoveDependant__(pSession, pResourceRef); +} + +void sessionRemoveDependency_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); + +static inline void sessionRemoveDependency_DISPATCH(struct RsSession *pSession, RsResourceRef *pResourceRef) { + pSession->__sessionRemoveDependency__(pSession, pResourceRef); +} + +NV_STATUS sessionConstruct_IMPL(struct RsSession *arg_pSession); +#define __nvoc_sessionConstruct(arg_pSession) sessionConstruct_IMPL(arg_pSession) +void sessionDestruct_IMPL(struct RsSession *pSession); +#define __nvoc_sessionDestruct(pSession) sessionDestruct_IMPL(pSession) +NV_STATUS sessionAddDependant_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); +#ifdef __nvoc_rs_server_h_disabled +static inline NV_STATUS sessionAddDependant(struct RsSession *pSession, RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_server_h_disabled +#define sessionAddDependant(pSession, pResourceRef) sessionAddDependant_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +NV_STATUS sessionAddDependency_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); +#ifdef __nvoc_rs_server_h_disabled +static inline NV_STATUS sessionAddDependency(struct RsSession *pSession, RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_server_h_disabled +#define sessionAddDependency(pSession, pResourceRef) sessionAddDependency_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +NV_STATUS sessionCheckLocksForAdd_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); +#ifdef __nvoc_rs_server_h_disabled +static inline NV_STATUS sessionCheckLocksForAdd(struct RsSession *pSession, RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_rs_server_h_disabled +#define sessionCheckLocksForAdd(pSession, pResourceRef) sessionCheckLocksForAdd_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +void sessionCheckLocksForRemove_IMPL(struct RsSession *pSession, RsResourceRef *pResourceRef); +#ifdef __nvoc_rs_server_h_disabled +static inline void sessionCheckLocksForRemove(struct RsSession *pSession, RsResourceRef *pResourceRef) { + NV_ASSERT_FAILED_PRECOMP("RsSession was disabled!"); +} +#else //__nvoc_rs_server_h_disabled +#define sessionCheckLocksForRemove(pSession, pResourceRef) sessionCheckLocksForRemove_IMPL(pSession, pResourceRef) +#endif //__nvoc_rs_server_h_disabled + +#undef PRIVATE_FIELD + + +// Iterator data structure to save state while walking through a map +struct RS_SHARE_ITERATOR +{ + RsSharedMapIter mapIt; + NvU32 internalClassId; + struct RsShared *pShared; ///< Share that is being iterated over +}; + +/** + * Top-level structure that RMAPI and RM interface with + * + * This class is all that needs to be allocated to use the resource server + * library. + * + * The RsServer interface should be kept as narrow as possible. Map and + * MapTo are added because <1> the unmap variants operate in addresses and not + * handles and <2> having explicit knowledge of map operations in the server is + * helpful when dealing with multiple levels of address spaces (e.g., guest + * user-mode, guest kernel-mode, host kernel-mode). + */ +struct RsServer +{ + /** + * Privilege level determines what objects a server is allowed to allocate, and + * also determines whether additional handle validation needs to be performed. + */ + RS_PRIV_LEVEL privilegeLevel; + + RsClientList *pClientSortedList; ///< Bucket if linked List of clients (and their locks) owned by this server + NvU32 clientCurrentHandleIndex; + + NvBool bConstructed; ///< Determines whether the server is ready to be used + PORT_MEM_ALLOCATOR *pAllocator; ///< Allocator to use for all objects allocated by the server + + PORT_RWLOCK *pClientListLock; ///< Lock that needs to be taken when accessing the client list + + PORT_SPINLOCK *pShareMapLock; ///< Lock that needs to be taken when accessing the shared resource map + RsSharedMap shareMap; ///< Map of shared resources + +#if (RS_STANDALONE) + NvU64 topLockOwnerTid; ///< Thread id of top-lock owner + PORT_RWLOCK *pTopLock; ///< Top-level resource server lock + PORT_RWLOCK *pResLock; ///< Resource-level resource server lock +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK topLockVal; + LOCK_VAL_LOCK resLockVal; +#endif +#endif + + /// Print out a list of all resources that will be freed when a free request is made + NvBool bDebugFreeList; + + /// If true, control call param copies will be performed outside the top/api lock + NvBool bUnlockedParamCopy; + + /** + * Setting this flag to false disables any attempts to + * automatically acquire access rights or to control access to resources by + * checking for access rights. + */ + NvBool bRsAccessEnabled; + + /** + * Mask of interfaces (RS_API_*) that will use a read-only top lock by default + */ + NvU32 roTopLockApiMask; + + /// Share policies which clients default to when no other policies are used + RsShareList defaultInheritedSharePolicyList; + /// Share policies to apply to all shares, regardless of other policies + RsShareList globalInternalSharePolicyList; + + NvU32 internalHandleBase; + + NvU32 activeClientCount; + NvU64 activeResourceCount; +}; + +/** + * Construct a server instance. This must be performed before any other server + * operation. + * + * @param[in] pServer This server instance + * @param[in] privilegeLevel Privilege level for this resource server instance + * @param[in] maxDomains Maximum number of domains to support, or 0 for the default + */ +NV_STATUS serverConstruct(RsServer *pServer, RS_PRIV_LEVEL privilegeLevel, NvU32 maxDomains); + +/** + * Destroy a server instance. Destructing a server does not guarantee that child domains + * and clients will be appropriately freed. serverFreeDomain should be explicitly called + * on all allocated domains to ensure all clients and resources get cleaned up. + * + * @param[in] pServer This server instance + */ +NV_STATUS serverDestruct(RsServer *pServer); + +/** + * Allocate a domain handle. Domain handles are used to track clients created by a domain. + * + * @param[in] pServer This server instance + * @param[in] hParentDomain + * @param[in] pAccessControl + * @param[out] phDomain + * + */ +NV_STATUS serverAllocDomain(RsServer *pServer, NvU32 hParentDomain, ACCESS_CONTROL *pAccessControl, NvHandle *phDomain); + +/** + * Verify that the calling user is allowed to perform the access. This check only + * applies to calls from RING_USER or RING_KERNEL. No check is performed in + * RING_HOST. + * + * @param[in] pServer This server instance + * @param[in] hDomain + * @param[in] hClient + * + */ +NV_STATUS serverValidate(RsServer *pServer, NvU32 hDomain, NvHandle hClient); + +/** + * Verify that the domain has sufficient permission to allocate the given class. + * @param[in] pServer + * @param[in] hDomain + * @param[in] externalClassId External resource class id + */ +NV_STATUS serverValidateAlloc(RsServer *pServer, NvU32 hDomain, NvU32 externalClassId); + +/** + * Free a domain handle. All clients of this domain will be freed. + * + * @param[in] pServer This server instance + * @param[in] hDomain The handle of the domain to free + */ +NV_STATUS serverFreeDomain(RsServer *pServer, NvHandle hDomain); + +/** + * Allocate a client handle. A client handle is required to allocate resources. + * + * @param[in] pServer This server instance + * @param[inout] pParams Client allocation parameters + */ +NV_STATUS serverAllocClient(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +/** + * Free a client handle. All resources references owned by the client will be + * freed. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Client free params + */ +NV_STATUS serverFreeClient(RsServer *pServer, RS_CLIENT_FREE_PARAMS* pParams); + +/** + * Free a list of client handles. All resources references owned by the client will be + * freed. All priority resources will be freed first across all listed clients. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] phClientList The list of client handles to free + * @param[in] numClients The number of clients in the list + * @param[in] freeState User-defined free state + * @param[in] pSecInfo Security Info + * + */ +NV_STATUS serverFreeClientList(RsServer *pServer, NvHandle *phClientList, NvU32 numClients, NvU32 freeState, API_SECURITY_INFO *pSecInfo); + +/** + * Allocate a resource. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +NV_STATUS serverAllocResource(RsServer *pServer, RS_RES_ALLOC_PARAMS *params); + +/** + * Allocate a ref-counted resource share. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + */ +NV_STATUS serverAllocShare(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, struct RsShared **ppShare); + +/** + * Allocate a ref-counted resource share with Halspec parent. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + * @param[in] pHalspecParent Parent object whose Halspec can be used for the shared class object + */ +NV_STATUS serverAllocShareWithHalspecParent(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, struct RsShared **ppShare, struct Object *pHalspecParent); + +/** + * Get the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NvS32 serverGetShareRefCount(RsServer *pServer, struct RsShared *pShare); + +/** + * Increment the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverRefShare(RsServer *pServer, struct RsShared *pShare); + +/** + * Decrement the ref-count of a resource share. If the ref-count + * has reached zero, the resource share will be freed. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverFreeShare(RsServer *pServer, struct RsShared *pShare); + +/** + * Get an iterator to the elements in the server's shared object map + * @param[in] pServer + * @param[in] internalClassId If non-zero, only RsShared that are (or can be + * derived from) the specified class will be returned + */ +RS_SHARE_ITERATOR serverShareIter(RsServer *pServer, NvU32 internalClassId); + +/** + * Get an iterator to the elements in the server's shared object map + */ +NvBool serverShareIterNext(RS_SHARE_ITERATOR*); + + +/** + * Allocate a resource. Assumes top-level lock has been taken. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. User-implemented. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +extern NV_STATUS serverAllocResourceUnderLock(RsServer *pServer, RS_RES_ALLOC_PARAMS *pAllocParams); + +/** + * Call Free RPC for given resource. Assumes top-level lock has been taken. + * + * @param[in] pServer This server instance + * @param[inout] pFreeParams The Free parameters + */ +extern NV_STATUS serverFreeResourceRpcUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pFreeParams); + +/** + * Copy-in parameters supplied by caller, and initialize API state. User-implemented. + * @param[in] pServer + * @param[in] pAllocParams Resource allocation parameters + * @param[out] ppApiState User-defined API_STATE; should be allocated by this function + */ +extern NV_STATUS serverAllocApiCopyIn(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, API_STATE **ppApiState); + +/** + * Copy-out parameters supplied by caller, and release API state. User-implemented. + * @param[in] pServer + * @param[in] status Status of allocation request + * @param[in] pApiState API_STATE for the allocation + */ +extern NV_STATUS serverAllocApiCopyOut(RsServer *pServer, NV_STATUS status, API_STATE *pApiState); + +/** + * Obtain a second client handle to lock if required for the allocation. + * @param[in] pParams Resource allocation parameters + * @param[in] phClient Client to lock, if any + */ +extern NV_STATUS serverLookupSecondClient(RS_RES_ALLOC_PARAMS_INTERNAL *pParams, NvHandle *phClient); + +/** + * Acquires a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + */ +extern NV_STATUS serverTopLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverTopLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a session lock. + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pResourceRef Resource reference to take session locks on + * @param[inout] pLockInfo Lock state + */ +extern NV_STATUS serverSessionLock_Prologue(LOCK_ACCESS_TYPE access, RsResourceRef *pResourceRef, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a session lock. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverSessionLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + */ +extern NV_STATUS serverResLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverResLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * WAR for additional tasks that must be performed after resource-level locks are released. User-implemented. + * @param[inout] status Allocation status + * @param[in] bClientAlloc Caller is attempting to allocate a client + * @param[inout] pParams Allocation parameters + */ +extern NV_STATUS serverAllocEpilogue_WAR(RsServer *pServer, NV_STATUS status, NvBool bClientAlloc, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams); + +/** + * Free a resource reference and all of its descendants. This will decrease the + * resource's reference count. The resource itself will only be freed if there + * are no more references to it. + * + * It is invalid to attempt to free a resource from a user other than the one that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Same as serverFreeResourceTree except the top-level lock is assumed to have been taken. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTreeUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags in the dup parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Dup parameters + */ +extern NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Updates the lock flags in the free parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +extern NV_STATUS serverUpdateLockFlagsForFree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags for automatic inter-unmap during free + * + * @param[in] pServer This server instance + * @param[inout] pParams Unmap params, contained pLockInfo will be modified + */ +extern NV_STATUS serverUpdateLockFlagsForInterAutoUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Initialize parameters for a recursive call to serverFreeResourceTree. User-implemented. + * @param[in] hClient + * @param[in] hResource + * @param[inout] pParams + */ +extern NV_STATUS serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO *pLockInfo, RS_RES_FREE_PARAMS *pParams); + +/** + * Common operations performed after top locks and client locks are taken, but before + * the control call is executed. This includes validating the control call cookie, + * looking up locking flags, parameter copy-in, and taking resource locks. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverControl_Prologue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE *pAccess, NvU32 *pReleaseFlags); + +/** + * Common operations performed after the control call is executed. This + * includes releasing locks and parameter copy-out. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + * @param[in] status Control call status + */ +NV_STATUS serverControl_Epilogue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE access, NvU32 *pReleaseFlags, NV_STATUS status); + +/** + * Initialize a NVOC export control call cookie + * + * @param[in] pExportedEntry + * @param[inout] pCookie + */ +extern void serverControl_InitCookie(const struct NVOC_EXPORTED_METHOD_DEF *pExportedEntry, RS_CONTROL_COOKIE *pCookie); + +/** + * Validate a NVOC export control call cookie + * + * @param[in] pParams + * @param[inout] pCookie + */ +extern NV_STATUS serverControl_ValidateCookie(RS_RES_CONTROL_PARAMS_INTERNAL *pParams, RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-in control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyIn(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-out control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyOut(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus); + +/** + * Determine whether an API supports a read-only lock for a given lock + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] api RS_API* + */ +NvBool serverSupportsReadOnlyLock(RsServer *pServer, RS_LOCK_ENUM lock, RS_API_ENUM api); + +/** + * Determine whether the current thread has taken the RW API lock + * @param[in] pServer ResServ instance + */ +extern NvBool serverRwApiLockIsOwner(RsServer *pServer); + +/** + * Lookup locking flags for a resource alloc + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverAllocResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess); +/** + * + * Lookup level locking flags for a resource free + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverFreeResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a resource copy + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverCopyResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a resource access share + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Share parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverShareResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a control call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Control call parameters + * @param[in] pCookie Control call cookie + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverControlLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for a map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for an unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Fill the server's share policy lists with any default or global policies needed + */ +extern NV_STATUS serverInitGlobalSharePolicies(RsServer *pServer); + +/** + * Issue a control command to a resource + * + * @param[in] pServer This server instance + * @param[in] pParams Control parameters + */ +NV_STATUS serverControl(RsServer *pServer, RS_RES_CONTROL_PARAMS *pParams); + +/** + * Copy a resource owned by one client into another client. + * + * The clients must be in the same client handle space. The underlying + * resource is not duplicated, but it is refcounted so the resource will + * not be freed until the reference count hits zero. + * + * Copying a resource will fail if the user making the call does not own + * the source client. + * + * @param[in] pServer This server instance + * @param[inout] pParams Resource sharing parameters + */ +NV_STATUS serverCopyResource(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Share certain access rights to a resource with other clients using the provided share policy + * + * The policy entry passed in will be added to the object's share policy list. + * If the bRevoke is true, the policy will be removed instead. + * + * Sharing will fail if the user making the call does not own the source client. + * + * @param[in] pServer This server instance + * @param[in] pParams Resource sharing parameters + */ +NV_STATUS serverShareResourceAccess(RsServer *pServer, RS_RES_SHARE_PARAMS *pParams); + +/** + * Creates a CPU mapping of the resource in the virtual address space of the process. + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_MAP_PARAMS *pParams); + +/** + * Release a CPU virtual address unmapping + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[in] pParams CPU unmapping parameters + */ +NV_STATUS serverUnmap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_UNMAP_PARAMS *pParams); + +/** + * Pre-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap_Prologue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Post-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverMap_Epilogue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Pre-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverUnmap_Prologue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverUnmap_Epilogue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Creates an inter-mapping between two resources + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterMap(RsServer *pServer, RS_INTER_MAP_PARAMS *pParams); + +/** + * Release an inter-mapping between two resources + * + * @param[in] pServer This server instance + * @param[in] pParams unmapping parameters + */ +NV_STATUS serverInterUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Pre-inter-map operations. Called with top/client locks acquired. + * This function acquires resource locks. + * + * @param[in] pServer + * @param[in] pMapperRef The resource that can be used to create the mapping + * @param[in] pMappableRef The resource that can be mapped + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverInterMap_Prologue(RsServer *pServer, RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Post-inter-map operations. Called with top, client, and resource locks acquired. + * This function releases resource locks. + * + * @param[in] pServer + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +void serverInterMap_Epilogue(RsServer *pServer, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Pre-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterUnmap_Prologue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +void serverInterUnmap_Epilogue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Acquire a client pointer from a client handle. The caller is responsible for + * ensuring that lock ordering is not violated (otherwise there can be + * deadlock): clients must be locked in increasing order of client index (not + * handle). + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[out] ppClient Pointer to the RsClient + */ +NV_STATUS serverAcquireClient(RsServer *pServer, NvHandle hClient, LOCK_ACCESS_TYPE lockAccess, struct RsClient **ppClient); + +/** + * Release a client pointer + * + * @param[in] pServer This server instance + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pClient Pointer to the RsClient + */ +NV_STATUS serverReleaseClient(RsServer *pServer, LOCK_ACCESS_TYPE lockAccess, struct RsClient *pClient); + +/** + * Get a client pointer from a client handle without taking any locks. + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[out] ppClient Pointer to the RsClient + */ +NV_STATUS serverGetClientUnderLock(RsServer *pServer, NvHandle hClient, struct RsClient **ppClient); + +/** + * Get the count of clients allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU32 serverGetClientCount(RsServer *pServer); + +/** + * Get the count of resources allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU64 serverGetResourceCount(RsServer *pServer); + +/** + * Swap a TLS call context entry and increment the TLS entry refcount. + * A new TLS entry for call context will be allocated if necessary. + * + * @note This should be paired with a corresponding resservRestoreTlsCallContext call + */ +NV_STATUS resservSwapTlsCallContext(CALL_CONTEXT **ppOldCallContext, CALL_CONTEXT *pNewCallContext); + +/** + * Get the current TLS call context. This will not increment a refcount on the TLS entry. + */ +CALL_CONTEXT *resservGetTlsCallContext(void); + +/** + * Set a TLS call context entry and decrement the TLS entry refcount. + * @note This should be paired with a corresponding resservSwapTlsCallContext call + */ +NV_STATUS resservRestoreTlsCallContext(CALL_CONTEXT *pOldCallContext); + +/** + * Find a resource reference of a given type from the TLS call context + * @param[in] internalClassId Only return a reference if it matches this type + * @param[in] bSearchAncestors Search parents of the call context resource ref + */ +RsResourceRef *resservGetContextRefByType(NvU32 internalClassId, NvBool bSearchAncestors); + +#ifdef __cplusplus +} +#endif + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_RS_SERVER_NVOC_H_ diff --git a/src/nvidia/generated/g_sdk-structures.h b/src/nvidia/generated/g_sdk-structures.h new file mode 100644 index 000000000..53601c8ff --- /dev/null +++ b/src/nvidia/generated/g_sdk-structures.h @@ -0,0 +1,349 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * WARNING: This is an autogenerated file. DO NOT EDIT. + * This file is generated using below files: + * template file: kernel/inc/vgpu/gt_sdk-structures.h + * definition file: kernel/inc/vgpu/sdk-structures.def + */ + + +#ifdef SDK_STRUCTURES +// These are copy of sdk structures, that will be used for the communication between the vmioplugin & guest RM. +#include "vgpu/sdk-structures.h" +typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v03_00 +{ + NvU64 physAddress NV_ALIGN_BYTES(8); + NvU32 numEntries; + NvU32 flags; + NvHandle hVASpace; + NvU32 chId; + NvU32 subDeviceId; +} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v03_00; + +typedef struct NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05 +{ + NvU64 physAddress NV_ALIGN_BYTES(8); + NvU32 numEntries; + NvU32 flags; + NvHandle hVASpace; + NvU32 chId; + NvU32 subDeviceId; + NvU32 pasid; +} NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05; + +typedef NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05 NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v; + +typedef struct NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS_v03_00 +{ + NvU64 physAddr NV_ALIGN_BYTES(8); + NvU32 numEntries; + NvU32 aperture; +} NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS_v03_00; + +typedef NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS_v03_00 NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS_v; + +typedef struct NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS_v03_00 +{ + NvU32 pdeIndex; + NvU32 flags; + NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS_v03_00 ptParams[NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE]; + NvHandle hVASpace; + NvP64 pPdeBuffer NV_ALIGN_BYTES(8); + NvU32 subDeviceId; +} NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS_v03_00; + +typedef NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS_v03_00 NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS_v; + +typedef struct NVOS00_PARAMETERS_v03_00 +{ + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +} NVOS00_PARAMETERS_v03_00; + +typedef NVOS00_PARAMETERS_v03_00 NVOS00_PARAMETERS_v; + +typedef struct NVOS46_PARAMETERS_v03_00 +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hDma; + NvHandle hMemory; + NvU64 offset NV_ALIGN_BYTES(8); + NvU64 length NV_ALIGN_BYTES(8); + NvV32 flags; + NvU64 dmaOffset NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS46_PARAMETERS_v03_00; + +typedef NVOS46_PARAMETERS_v03_00 NVOS46_PARAMETERS_v; + +typedef struct NVOS47_PARAMETERS_v03_00 +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hDma; + NvHandle hMemory; + NvV32 flags; + NvU64 dmaOffset NV_ALIGN_BYTES(8); + NvV32 status; +} NVOS47_PARAMETERS_v03_00; + +typedef NVOS47_PARAMETERS_v03_00 NVOS47_PARAMETERS_v; + +typedef struct NVOS55_PARAMETERS_v03_00 +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvHandle hClientSrc; + NvHandle hObjectSrc; + NvU32 flags; + NvU32 status; +} NVOS55_PARAMETERS_v03_00; + +typedef NVOS55_PARAMETERS_v03_00 NVOS55_PARAMETERS_v; + +typedef struct NV2080_CTRL_GR_ROUTE_INFO_v12_01 +{ + NvU32 flags; + NvU64 route NV_ALIGN_BYTES(8); +} NV2080_CTRL_GR_ROUTE_INFO_v12_01; + +typedef NV2080_CTRL_GR_ROUTE_INFO_v12_01 NV2080_CTRL_GR_ROUTE_INFO_v; + +typedef struct NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v03_00 +{ + NvHandle hClientTarget; + NvHandle hChannelTarget; + NvU32 reserved00[3]; + NvU32 regOpCount; + NvP64 regOps NV_ALIGN_BYTES(8); +} NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v03_00; + +typedef struct NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v12_01 +{ + NvHandle hClientTarget; + NvHandle hChannelTarget; + NvU32 reserved00[3]; + NvU32 regOpCount; + NV2080_CTRL_GR_ROUTE_INFO_v12_01 grRouteInfo; + NvP64 regOps NV_ALIGN_BYTES(8); +} NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v12_01; + +typedef NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v12_01 NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v; + +typedef struct NV2080_CTRL_GPU_REG_OP_v03_00 +{ + NvU8 regOp; + NvU8 regType; + NvU8 regStatus; + NvU8 regQuad; + NvU32 regGroupMask; + NvU32 regSubGroupMask; + NvU32 regOffset; + NvU32 regValueHi; + NvU32 regValueLo; + NvU32 regAndNMaskHi; + NvU32 regAndNMaskLo; +} NV2080_CTRL_GPU_REG_OP_v03_00; + +typedef NV2080_CTRL_GPU_REG_OP_v03_00 NV2080_CTRL_GPU_REG_OP_v; + +typedef struct NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v06_01 +{ + NvU32 util; + NvU32 procId; +} NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v06_01; + +typedef struct NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00 +{ + NvU32 util; + NvU32 procId; + NvU32 subProcessID; +} NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00; + +typedef NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00 NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v; + +typedef struct NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v06_01 +{ + NvU64 timeStamp NV_ALIGN_BYTES(8); + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v06_01 fb; + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v06_01 gr; + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v06_01 nvenc; + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v06_01 nvdec; +} NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v06_01; + +typedef struct NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v17_00 +{ + NvU64 timeStamp NV_ALIGN_BYTES(8); + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00 fb; + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00 gr; + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00 nvenc; + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00 nvdec; +} NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v17_00; + +typedef struct NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v1F_0E +{ + NvU64 timeStamp NV_ALIGN_BYTES(8); + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00 fb; + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00 gr; + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00 nvenc; + NV2080_CTRL_PERF_GPUMON_ENGINE_UTIL_SAMPLE_v17_00 nvdec; +} NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v1F_0E; + +typedef NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v1F_0E NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v; + +typedef struct NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v17_00 +{ + NvU8 type; + NvU32 bufSize; + NvU32 count; + NvU32 tracker; + NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v17_00 samples[NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL]; +} NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v17_00; + +typedef struct NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v1F_0E +{ + NvU8 type; + NvU32 bufSize; + NvU32 count; + NvU32 tracker; + NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE_v1F_0E samples[NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL]; +} NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v1F_0E; + +typedef NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v1F_0E NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v; + +typedef struct NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v17_00 +{ + NvU32 flags; + NvBool bBridgeless; + NvU32 currLimits[NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM]; +} NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v17_00; + +typedef NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v17_00 NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v; + +typedef struct UpdateBarPde_v15_00 +{ + NV_RPC_UPDATE_PDE_BAR_TYPE barType; + NvU64 entryValue NV_ALIGN_BYTES(8); + NvU64 entryLevelShift NV_ALIGN_BYTES(8); +} UpdateBarPde_v15_00; + +typedef UpdateBarPde_v15_00 UpdateBarPde_v; + +typedef struct gpu_exec_reg_ops_v03_00 +{ + NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v03_00 reg_op_params; + NV2080_CTRL_GPU_REG_OP_v03_00 operations[]; +} gpu_exec_reg_ops_v03_00; + +typedef struct gpu_exec_reg_ops_v12_01 +{ + NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS_v12_01 reg_op_params; + NV2080_CTRL_GPU_REG_OP_v03_00 operations[]; +} gpu_exec_reg_ops_v12_01; + +typedef gpu_exec_reg_ops_v12_01 gpu_exec_reg_ops_v; + +typedef struct idle_channel_list_v03_00 +{ + NvU32 phClient; + NvU32 phDevice; + NvU32 phChannel; +} idle_channel_list_v03_00; + +typedef idle_channel_list_v03_00 idle_channel_list_v; + +typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v03_00 +{ + NvHandle hVASpace; + NvU32 subDeviceId; +} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v03_00; + +typedef struct NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05 +{ + NvHandle hVASpace; + NvU32 subDeviceId; +} NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05; + +typedef NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05 NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v; + +typedef struct NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F +{ + NvU32 regOpCount; + NVB0CC_REGOPS_MODE mode; + NvBool bPassed; + NvBool bDirect; + NV2080_CTRL_GPU_REG_OP_v03_00 regOps[NVB0CC_REGOPS_MAX_COUNT]; +} NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F; + +typedef NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v1A_0F NVB0CC_CTRL_EXEC_REG_OPS_PARAMS_v; + +typedef struct ATOMIC_OP_v1F_08 +{ + NvBool bSupported; + NvU32 attributes; +} ATOMIC_OP_v1F_08; + +typedef ATOMIC_OP_v1F_08 ATOMIC_OP_v; + + +#endif + +#ifdef SDK_UNION_MEMBER_NAME_FUNCTIONS + +#endif + +#ifdef SDK_UNION_MEMBER_NAME_FUNCTIONS_CMD + +#endif + +#ifdef SDK_ARRAY_LENGTH_FUNCTIONS + +// Array length functions for gpu_exec_reg_ops: +static NV_STATUS get_array_length_gpu_exec_reg_ops_v03_00_operations(void *msg, NvS32 bytes_remaining, uint32_t* length) +{ + gpu_exec_reg_ops_v03_00 *param = msg; + + if ((NvS32)(NV_OFFSETOF(gpu_exec_reg_ops_v03_00, reg_op_params.regOpCount) + sizeof(param->reg_op_params.regOpCount)) > bytes_remaining) + return NV_ERR_BUFFER_TOO_SMALL; + + *length = param->reg_op_params.regOpCount; + return NV_OK; +} +static NV_STATUS get_array_length_gpu_exec_reg_ops_v12_01_operations(void *msg, NvS32 bytes_remaining, uint32_t* length) +{ + gpu_exec_reg_ops_v12_01 *param = msg; + + if ((NvS32)(NV_OFFSETOF(gpu_exec_reg_ops_v12_01, reg_op_params.regOpCount) + sizeof(param->reg_op_params.regOpCount)) > bytes_remaining) + return NV_ERR_BUFFER_TOO_SMALL; + + *length = param->reg_op_params.regOpCount; + return NV_OK; +} + +#endif + diff --git a/src/nvidia/generated/g_standard_mem_nvoc.c b/src/nvidia/generated/g_standard_mem_nvoc.c new file mode 100644 index 000000000..9a64a73df --- /dev/null +++ b/src/nvidia/generated/g_standard_mem_nvoc.c @@ -0,0 +1,323 @@ +#define NVOC_STANDARD_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_standard_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x897bf7 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_StandardMemory(StandardMemory*); +void __nvoc_init_funcTable_StandardMemory(StandardMemory*); +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_StandardMemory(StandardMemory*); +void __nvoc_dtor_StandardMemory(StandardMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_StandardMemory; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_StandardMemory = { + /*pClassDef=*/ &__nvoc_class_def_StandardMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_StandardMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(StandardMemory, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_StandardMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(StandardMemory, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_StandardMemory = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_StandardMemory_StandardMemory, + &__nvoc_rtti_StandardMemory_Memory, + &__nvoc_rtti_StandardMemory_RmResource, + &__nvoc_rtti_StandardMemory_RmResourceCommon, + &__nvoc_rtti_StandardMemory_RsResource, + &__nvoc_rtti_StandardMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(StandardMemory), + /*classId=*/ classId(StandardMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "StandardMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_StandardMemory, + /*pCastInfo=*/ &__nvoc_castinfo_StandardMemory, + /*pExportInfo=*/ &__nvoc_export_info_StandardMemory +}; + +static NvBool __nvoc_thunk_StandardMemory_resCanCopy(struct RsResource *pStandardMemory) { + return stdmemCanCopy((struct StandardMemory *)(((unsigned char *)pStandardMemory) - __nvoc_rtti_StandardMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemCheckMemInterUnmap(struct StandardMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemControl(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemUnmap(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemGetMemInterMapParams(struct StandardMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemGetMemoryMappingDescriptor(struct StandardMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemGetMapAddrSpace(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_stdmemShareCallback(struct StandardMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_stdmemControlFilter(struct StandardMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_stdmemAddAdditionalDependants(struct RsClient *pClient, struct StandardMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_stdmemGetRefCount(struct StandardMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_stdmemMapTo(struct StandardMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_stdmemControl_Prologue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemIsReady(struct StandardMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemCheckCopyPermissions(struct StandardMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_stdmemPreDestruct(struct StandardMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_stdmemUnmapFrom(struct StandardMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_stdmemControl_Epilogue(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_stdmemControlLookup(struct StandardMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_stdmemMap(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_StandardMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_stdmemAccessCallback(struct StandardMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_StandardMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_StandardMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_StandardMemory(StandardMemory *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_StandardMemory(StandardMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_StandardMemory_fail_Memory; + __nvoc_init_dataField_StandardMemory(pThis); + + status = __nvoc_stdmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_StandardMemory_fail__init; + goto __nvoc_ctor_StandardMemory_exit; // Success + +__nvoc_ctor_StandardMemory_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_StandardMemory_fail_Memory: +__nvoc_ctor_StandardMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_StandardMemory_1(StandardMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__stdmemCanCopy__ = &stdmemCanCopy_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_StandardMemory_resCanCopy; + + pThis->__stdmemCheckMemInterUnmap__ = &__nvoc_thunk_Memory_stdmemCheckMemInterUnmap; + + pThis->__stdmemControl__ = &__nvoc_thunk_Memory_stdmemControl; + + pThis->__stdmemUnmap__ = &__nvoc_thunk_Memory_stdmemUnmap; + + pThis->__stdmemGetMemInterMapParams__ = &__nvoc_thunk_Memory_stdmemGetMemInterMapParams; + + pThis->__stdmemGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_stdmemGetMemoryMappingDescriptor; + + pThis->__stdmemGetMapAddrSpace__ = &__nvoc_thunk_Memory_stdmemGetMapAddrSpace; + + pThis->__stdmemShareCallback__ = &__nvoc_thunk_RmResource_stdmemShareCallback; + + pThis->__stdmemControlFilter__ = &__nvoc_thunk_RsResource_stdmemControlFilter; + + pThis->__stdmemAddAdditionalDependants__ = &__nvoc_thunk_RsResource_stdmemAddAdditionalDependants; + + pThis->__stdmemGetRefCount__ = &__nvoc_thunk_RsResource_stdmemGetRefCount; + + pThis->__stdmemMapTo__ = &__nvoc_thunk_RsResource_stdmemMapTo; + + pThis->__stdmemControl_Prologue__ = &__nvoc_thunk_RmResource_stdmemControl_Prologue; + + pThis->__stdmemIsReady__ = &__nvoc_thunk_Memory_stdmemIsReady; + + pThis->__stdmemCheckCopyPermissions__ = &__nvoc_thunk_Memory_stdmemCheckCopyPermissions; + + pThis->__stdmemPreDestruct__ = &__nvoc_thunk_RsResource_stdmemPreDestruct; + + pThis->__stdmemUnmapFrom__ = &__nvoc_thunk_RsResource_stdmemUnmapFrom; + + pThis->__stdmemControl_Epilogue__ = &__nvoc_thunk_RmResource_stdmemControl_Epilogue; + + pThis->__stdmemControlLookup__ = &__nvoc_thunk_RsResource_stdmemControlLookup; + + pThis->__stdmemMap__ = &__nvoc_thunk_Memory_stdmemMap; + + pThis->__stdmemAccessCallback__ = &__nvoc_thunk_RmResource_stdmemAccessCallback; +} + +void __nvoc_init_funcTable_StandardMemory(StandardMemory *pThis) { + __nvoc_init_funcTable_StandardMemory_1(pThis); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_StandardMemory(StandardMemory *pThis) { + pThis->__nvoc_pbase_StandardMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_StandardMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_StandardMemory(StandardMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + StandardMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(StandardMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(StandardMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_StandardMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_StandardMemory(pThis); + status = __nvoc_ctor_StandardMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_StandardMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_StandardMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_StandardMemory(StandardMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_StandardMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_standard_mem_nvoc.h b/src/nvidia/generated/g_standard_mem_nvoc.h new file mode 100644 index 000000000..bb1573e05 --- /dev/null +++ b/src/nvidia/generated/g_standard_mem_nvoc.h @@ -0,0 +1,261 @@ +#ifndef _G_STANDARD_MEM_NVOC_H_ +#define _G_STANDARD_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_standard_mem_nvoc.h" + +#ifndef _STANDARD_MEMORY_H_ +#define _STANDARD_MEMORY_H_ + +#include "mem_mgr/mem.h" + +#include "ctrl/ctrl003e.h" + +typedef struct MEMORY_ALLOCATION_REQUEST MEMORY_ALLOCATION_REQUEST; + +struct MemoryManager; + +#ifndef __NVOC_CLASS_MemoryManager_TYPEDEF__ +#define __NVOC_CLASS_MemoryManager_TYPEDEF__ +typedef struct MemoryManager MemoryManager; +#endif /* __NVOC_CLASS_MemoryManager_TYPEDEF__ */ + +#ifndef __nvoc_class_id_MemoryManager +#define __nvoc_class_id_MemoryManager 0x22ad47 +#endif /* __nvoc_class_id_MemoryManager */ + + +/*! + * Allocator for normal virtual, video and system memory + */ +#ifdef NVOC_STANDARD_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct StandardMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct StandardMemory *__nvoc_pbase_StandardMemory; + NvBool (*__stdmemCanCopy__)(struct StandardMemory *); + NV_STATUS (*__stdmemCheckMemInterUnmap__)(struct StandardMemory *, NvBool); + NV_STATUS (*__stdmemControl__)(struct StandardMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__stdmemUnmap__)(struct StandardMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__stdmemGetMemInterMapParams__)(struct StandardMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__stdmemGetMemoryMappingDescriptor__)(struct StandardMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__stdmemGetMapAddrSpace__)(struct StandardMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__stdmemShareCallback__)(struct StandardMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__stdmemControlFilter__)(struct StandardMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__stdmemAddAdditionalDependants__)(struct RsClient *, struct StandardMemory *, RsResourceRef *); + NvU32 (*__stdmemGetRefCount__)(struct StandardMemory *); + NV_STATUS (*__stdmemMapTo__)(struct StandardMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__stdmemControl_Prologue__)(struct StandardMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__stdmemIsReady__)(struct StandardMemory *); + NV_STATUS (*__stdmemCheckCopyPermissions__)(struct StandardMemory *, struct OBJGPU *, NvHandle); + void (*__stdmemPreDestruct__)(struct StandardMemory *); + NV_STATUS (*__stdmemUnmapFrom__)(struct StandardMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__stdmemControl_Epilogue__)(struct StandardMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__stdmemControlLookup__)(struct StandardMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__stdmemMap__)(struct StandardMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__stdmemAccessCallback__)(struct StandardMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_StandardMemory_TYPEDEF__ +#define __NVOC_CLASS_StandardMemory_TYPEDEF__ +typedef struct StandardMemory StandardMemory; +#endif /* __NVOC_CLASS_StandardMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_StandardMemory +#define __nvoc_class_id_StandardMemory 0x897bf7 +#endif /* __nvoc_class_id_StandardMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +#define __staticCast_StandardMemory(pThis) \ + ((pThis)->__nvoc_pbase_StandardMemory) + +#ifdef __nvoc_standard_mem_h_disabled +#define __dynamicCast_StandardMemory(pThis) ((StandardMemory*)NULL) +#else //__nvoc_standard_mem_h_disabled +#define __dynamicCast_StandardMemory(pThis) \ + ((StandardMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(StandardMemory))) +#endif //__nvoc_standard_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_StandardMemory(StandardMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_StandardMemory(StandardMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_StandardMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_StandardMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define stdmemCanCopy(pStandardMemory) stdmemCanCopy_DISPATCH(pStandardMemory) +#define stdmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) stdmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define stdmemControl(pMemory, pCallContext, pParams) stdmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define stdmemUnmap(pMemory, pCallContext, pCpuMapping) stdmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define stdmemGetMemInterMapParams(pMemory, pParams) stdmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define stdmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) stdmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define stdmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) stdmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define stdmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) stdmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define stdmemControlFilter(pResource, pCallContext, pParams) stdmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define stdmemAddAdditionalDependants(pClient, pResource, pReference) stdmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define stdmemGetRefCount(pResource) stdmemGetRefCount_DISPATCH(pResource) +#define stdmemMapTo(pResource, pParams) stdmemMapTo_DISPATCH(pResource, pParams) +#define stdmemControl_Prologue(pResource, pCallContext, pParams) stdmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define stdmemIsReady(pMemory) stdmemIsReady_DISPATCH(pMemory) +#define stdmemCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) stdmemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define stdmemPreDestruct(pResource) stdmemPreDestruct_DISPATCH(pResource) +#define stdmemUnmapFrom(pResource, pParams) stdmemUnmapFrom_DISPATCH(pResource, pParams) +#define stdmemControl_Epilogue(pResource, pCallContext, pParams) stdmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define stdmemControlLookup(pResource, pParams, ppEntry) stdmemControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define stdmemMap(pMemory, pCallContext, pParams, pCpuMapping) stdmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define stdmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) stdmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvU32 stdmemGetSysmemPageSize_IMPL(struct OBJGPU *pGpu, struct StandardMemory *pMemory); + +#ifdef __nvoc_standard_mem_h_disabled +static inline NvU32 stdmemGetSysmemPageSize(struct OBJGPU *pGpu, struct StandardMemory *pMemory) { + NV_ASSERT_FAILED_PRECOMP("StandardMemory was disabled!"); + return 0; +} +#else //__nvoc_standard_mem_h_disabled +#define stdmemGetSysmemPageSize(pGpu, pMemory) stdmemGetSysmemPageSize_IMPL(pGpu, pMemory) +#endif //__nvoc_standard_mem_h_disabled + +#define stdmemGetSysmemPageSize_HAL(pGpu, pMemory) stdmemGetSysmemPageSize(pGpu, pMemory) + +NvBool stdmemCanCopy_IMPL(struct StandardMemory *pStandardMemory); + +static inline NvBool stdmemCanCopy_DISPATCH(struct StandardMemory *pStandardMemory) { + return pStandardMemory->__stdmemCanCopy__(pStandardMemory); +} + +static inline NV_STATUS stdmemCheckMemInterUnmap_DISPATCH(struct StandardMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__stdmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS stdmemControl_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__stdmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS stdmemUnmap_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__stdmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS stdmemGetMemInterMapParams_DISPATCH(struct StandardMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__stdmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS stdmemGetMemoryMappingDescriptor_DISPATCH(struct StandardMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__stdmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS stdmemGetMapAddrSpace_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__stdmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool stdmemShareCallback_DISPATCH(struct StandardMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__stdmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS stdmemControlFilter_DISPATCH(struct StandardMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__stdmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline void stdmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct StandardMemory *pResource, RsResourceRef *pReference) { + pResource->__stdmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 stdmemGetRefCount_DISPATCH(struct StandardMemory *pResource) { + return pResource->__stdmemGetRefCount__(pResource); +} + +static inline NV_STATUS stdmemMapTo_DISPATCH(struct StandardMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__stdmemMapTo__(pResource, pParams); +} + +static inline NV_STATUS stdmemControl_Prologue_DISPATCH(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__stdmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS stdmemIsReady_DISPATCH(struct StandardMemory *pMemory) { + return pMemory->__stdmemIsReady__(pMemory); +} + +static inline NV_STATUS stdmemCheckCopyPermissions_DISPATCH(struct StandardMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__stdmemCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void stdmemPreDestruct_DISPATCH(struct StandardMemory *pResource) { + pResource->__stdmemPreDestruct__(pResource); +} + +static inline NV_STATUS stdmemUnmapFrom_DISPATCH(struct StandardMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__stdmemUnmapFrom__(pResource, pParams); +} + +static inline void stdmemControl_Epilogue_DISPATCH(struct StandardMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__stdmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS stdmemControlLookup_DISPATCH(struct StandardMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__stdmemControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS stdmemMap_DISPATCH(struct StandardMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__stdmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool stdmemAccessCallback_DISPATCH(struct StandardMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__stdmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS stdmemConstruct_IMPL(struct StandardMemory *arg_pStandardMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_stdmemConstruct(arg_pStandardMemory, arg_pCallContext, arg_pParams) stdmemConstruct_IMPL(arg_pStandardMemory, arg_pCallContext, arg_pParams) +NV_STATUS stdmemValidateParams_IMPL(struct OBJGPU *pGpu, NvHandle hClient, NV_MEMORY_ALLOCATION_PARAMS *pAllocData); +#define stdmemValidateParams(pGpu, hClient, pAllocData) stdmemValidateParams_IMPL(pGpu, hClient, pAllocData) +void stdmemDumpInputAllocParams_IMPL(NV_MEMORY_ALLOCATION_PARAMS *pAllocData, CALL_CONTEXT *pCallContext); +#define stdmemDumpInputAllocParams(pAllocData, pCallContext) stdmemDumpInputAllocParams_IMPL(pAllocData, pCallContext) +void stdmemDumpOutputAllocParams_IMPL(NV_MEMORY_ALLOCATION_PARAMS *pAllocData); +#define stdmemDumpOutputAllocParams(pAllocData) stdmemDumpOutputAllocParams_IMPL(pAllocData) +NvU32 stdmemQueryPageSize_IMPL(struct MemoryManager *pMemoryManager, NvHandle hClient, NV_MEMORY_ALLOCATION_PARAMS *pAllocData); +#define stdmemQueryPageSize(pMemoryManager, hClient, pAllocData) stdmemQueryPageSize_IMPL(pMemoryManager, hClient, pAllocData) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_STANDARD_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_subdevice_diag_nvoc.c b/src/nvidia/generated/g_subdevice_diag_nvoc.c new file mode 100644 index 000000000..bdad0db81 --- /dev/null +++ b/src/nvidia/generated/g_subdevice_diag_nvoc.c @@ -0,0 +1,646 @@ +#define NVOC_SUBDEVICE_DIAG_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_subdevice_diag_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xaa3066 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DiagApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_DiagApi(DiagApi*); +void __nvoc_init_funcTable_DiagApi(DiagApi*); +NV_STATUS __nvoc_ctor_DiagApi(DiagApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_DiagApi(DiagApi*); +void __nvoc_dtor_DiagApi(DiagApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DiagApi; + +static const struct NVOC_RTTI __nvoc_rtti_DiagApi_DiagApi = { + /*pClassDef=*/ &__nvoc_class_def_DiagApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DiagApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_DiagApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DiagApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DiagApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DiagApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DiagApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DiagApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DiagApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DiagApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DiagApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DiagApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DiagApi_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DiagApi, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_DiagApi_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(DiagApi, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_DiagApi = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_DiagApi_DiagApi, + &__nvoc_rtti_DiagApi_Notifier, + &__nvoc_rtti_DiagApi_INotifier, + &__nvoc_rtti_DiagApi_GpuResource, + &__nvoc_rtti_DiagApi_RmResource, + &__nvoc_rtti_DiagApi_RmResourceCommon, + &__nvoc_rtti_DiagApi_RsResource, + &__nvoc_rtti_DiagApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_DiagApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(DiagApi), + /*classId=*/ classId(DiagApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "DiagApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DiagApi, + /*pCastInfo=*/ &__nvoc_castinfo_DiagApi, + /*pExportInfo=*/ &__nvoc_export_info_DiagApi +}; + +static NV_STATUS __nvoc_thunk_DiagApi_gpuresControl(struct GpuResource *pDiagApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return diagapiControl((struct DiagApi *)(((unsigned char *)pDiagApi) - __nvoc_rtti_DiagApi_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_DiagApi_resControlFilter(struct RsResource *pDiagApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return diagapiControlFilter((struct DiagApi *)(((unsigned char *)pDiagApi) - __nvoc_rtti_DiagApi_RsResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_diagapiShareCallback(struct DiagApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DiagApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_diagapiMapTo(struct DiagApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DiagApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_diagapiGetOrAllocNotifShare(struct DiagApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DiagApi_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_diagapiCheckMemInterUnmap(struct DiagApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DiagApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_diagapiGetMapAddrSpace(struct DiagApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DiagApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_diagapiSetNotificationShare(struct DiagApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DiagApi_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_diagapiGetRefCount(struct DiagApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DiagApi_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_diagapiAddAdditionalDependants(struct RsClient *pClient, struct DiagApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DiagApi_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_diagapiControl_Prologue(struct DiagApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DiagApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_diagapiGetRegBaseOffsetAndSize(struct DiagApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DiagApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_diagapiInternalControlForward(struct DiagApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DiagApi_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_diagapiUnmapFrom(struct DiagApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DiagApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_diagapiControl_Epilogue(struct DiagApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DiagApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_diagapiControlLookup(struct DiagApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DiagApi_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_diagapiGetInternalObjectHandle(struct DiagApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DiagApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_diagapiUnmap(struct DiagApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DiagApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_diagapiGetMemInterMapParams(struct DiagApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DiagApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_diagapiGetMemoryMappingDescriptor(struct DiagApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DiagApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Notifier_diagapiUnregisterEvent(struct DiagApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DiagApi_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_diagapiCanCopy(struct DiagApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DiagApi_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_diagapiPreDestruct(struct DiagApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DiagApi_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_diagapiGetNotificationListPtr(struct DiagApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DiagApi_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_diagapiGetNotificationShare(struct DiagApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_DiagApi_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_diagapiMap(struct DiagApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DiagApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_diagapiAccessCallback(struct DiagApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DiagApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_DiagApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdFifoCheckEngineContext_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f0401u, + /*paramSize=*/ sizeof(NV208F_CTRL_FIFO_CHECK_ENGINE_CONTEXT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdFifoCheckEngineContext" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdFifoEnableVirtualContext_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f0402u, + /*paramSize=*/ sizeof(NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdFifoEnableVirtualContext" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdFifoGetChannelState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f0403u, + /*paramSize=*/ sizeof(NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdFifoGetChannelState" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdFbCtrlGpuCache_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f0506u, + /*paramSize=*/ sizeof(NV208F_CTRL_FB_CTRL_GPU_CACHE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdFbCtrlGpuCache" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdFbEccSetKillPtr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f050eu, + /*paramSize=*/ sizeof(NV208F_CTRL_FB_ECC_SET_KILL_PTR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdFbEccSetKillPtr" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdBifPBIWriteCommand_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f0701u, + /*paramSize=*/ sizeof(NV208F_CTRL_BIF_PBI_WRITE_COMMAND_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdBifPBIWriteCommand" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdBifConfigRegRead_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f0702u, + /*paramSize=*/ sizeof(NV208F_CTRL_BIF_CONFIG_REG_READ_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdBifConfigRegRead" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdBifConfigRegWrite_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f0703u, + /*paramSize=*/ sizeof(NV208F_CTRL_BIF_CONFIG_REG_WRITE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdBifConfigRegWrite" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdBifInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f0704u, + /*paramSize=*/ sizeof(NV208F_CTRL_BIF_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdBifInfo" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdMmuGetNumHshubmmus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f0b03u, + /*paramSize=*/ sizeof(NV208F_CTRL_MMU_GET_NUM_HSHUBMMUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdMmuGetNumHshubmmus" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdGpuGetRamSvopValues_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f1101u, + /*paramSize=*/ sizeof(NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdGpuGetRamSvopValues" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdGpuSetRamSvopValues_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f1102u, + /*paramSize=*/ sizeof(NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdGpuSetRamSvopValues" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) diagapiCtrlCmdGpuVerifyInforom_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x208f1105u, + /*paramSize=*/ sizeof(NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_DiagApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "diagapiCtrlCmdGpuVerifyInforom" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_DiagApi = +{ + /*numEntries=*/ 13, + /*pExportEntries=*/ __nvoc_exported_method_def_DiagApi +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_DiagApi(DiagApi *pThis) { + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_DiagApi(DiagApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_DiagApi(DiagApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DiagApi_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_DiagApi_fail_Notifier; + __nvoc_init_dataField_DiagApi(pThis); + + status = __nvoc_diagapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_DiagApi_fail__init; + goto __nvoc_ctor_DiagApi_exit; // Success + +__nvoc_ctor_DiagApi_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_DiagApi_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_DiagApi_fail_GpuResource: +__nvoc_ctor_DiagApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_DiagApi_1(DiagApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__diagapiControl__ = &diagapiControl_IMPL; + + pThis->__diagapiControlFilter__ = &diagapiControlFilter_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__diagapiCtrlCmdFifoCheckEngineContext__ = &diagapiCtrlCmdFifoCheckEngineContext_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__diagapiCtrlCmdFifoEnableVirtualContext__ = &diagapiCtrlCmdFifoEnableVirtualContext_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__diagapiCtrlCmdFifoGetChannelState__ = &diagapiCtrlCmdFifoGetChannelState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__diagapiCtrlCmdFbCtrlGpuCache__ = &diagapiCtrlCmdFbCtrlGpuCache_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__diagapiCtrlCmdFbEccSetKillPtr__ = &diagapiCtrlCmdFbEccSetKillPtr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__diagapiCtrlCmdGpuGetRamSvopValues__ = &diagapiCtrlCmdGpuGetRamSvopValues_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__diagapiCtrlCmdGpuSetRamSvopValues__ = &diagapiCtrlCmdGpuSetRamSvopValues_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__diagapiCtrlCmdGpuVerifyInforom__ = &diagapiCtrlCmdGpuVerifyInforom_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__diagapiCtrlCmdBifPBIWriteCommand__ = &diagapiCtrlCmdBifPBIWriteCommand_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__diagapiCtrlCmdBifConfigRegRead__ = &diagapiCtrlCmdBifConfigRegRead_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__diagapiCtrlCmdBifConfigRegWrite__ = &diagapiCtrlCmdBifConfigRegWrite_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__diagapiCtrlCmdBifInfo__ = &diagapiCtrlCmdBifInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__diagapiCtrlCmdMmuGetNumHshubmmus__ = &diagapiCtrlCmdMmuGetNumHshubmmus_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_DiagApi_gpuresControl; + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resControlFilter__ = &__nvoc_thunk_DiagApi_resControlFilter; + + pThis->__diagapiShareCallback__ = &__nvoc_thunk_GpuResource_diagapiShareCallback; + + pThis->__diagapiMapTo__ = &__nvoc_thunk_RsResource_diagapiMapTo; + + pThis->__diagapiGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_diagapiGetOrAllocNotifShare; + + pThis->__diagapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_diagapiCheckMemInterUnmap; + + pThis->__diagapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_diagapiGetMapAddrSpace; + + pThis->__diagapiSetNotificationShare__ = &__nvoc_thunk_Notifier_diagapiSetNotificationShare; + + pThis->__diagapiGetRefCount__ = &__nvoc_thunk_RsResource_diagapiGetRefCount; + + pThis->__diagapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_diagapiAddAdditionalDependants; + + pThis->__diagapiControl_Prologue__ = &__nvoc_thunk_RmResource_diagapiControl_Prologue; + + pThis->__diagapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_diagapiGetRegBaseOffsetAndSize; + + pThis->__diagapiInternalControlForward__ = &__nvoc_thunk_GpuResource_diagapiInternalControlForward; + + pThis->__diagapiUnmapFrom__ = &__nvoc_thunk_RsResource_diagapiUnmapFrom; + + pThis->__diagapiControl_Epilogue__ = &__nvoc_thunk_RmResource_diagapiControl_Epilogue; + + pThis->__diagapiControlLookup__ = &__nvoc_thunk_RsResource_diagapiControlLookup; + + pThis->__diagapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_diagapiGetInternalObjectHandle; + + pThis->__diagapiUnmap__ = &__nvoc_thunk_GpuResource_diagapiUnmap; + + pThis->__diagapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_diagapiGetMemInterMapParams; + + pThis->__diagapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_diagapiGetMemoryMappingDescriptor; + + pThis->__diagapiUnregisterEvent__ = &__nvoc_thunk_Notifier_diagapiUnregisterEvent; + + pThis->__diagapiCanCopy__ = &__nvoc_thunk_RsResource_diagapiCanCopy; + + pThis->__diagapiPreDestruct__ = &__nvoc_thunk_RsResource_diagapiPreDestruct; + + pThis->__diagapiGetNotificationListPtr__ = &__nvoc_thunk_Notifier_diagapiGetNotificationListPtr; + + pThis->__diagapiGetNotificationShare__ = &__nvoc_thunk_Notifier_diagapiGetNotificationShare; + + pThis->__diagapiMap__ = &__nvoc_thunk_GpuResource_diagapiMap; + + pThis->__diagapiAccessCallback__ = &__nvoc_thunk_RmResource_diagapiAccessCallback; +} + +void __nvoc_init_funcTable_DiagApi(DiagApi *pThis) { + __nvoc_init_funcTable_DiagApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_DiagApi(DiagApi *pThis) { + pThis->__nvoc_pbase_DiagApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_DiagApi(pThis); +} + +NV_STATUS __nvoc_objCreate_DiagApi(DiagApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + DiagApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(DiagApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(DiagApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DiagApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_DiagApi(pThis); + status = __nvoc_ctor_DiagApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_DiagApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_DiagApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_DiagApi(DiagApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_DiagApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_subdevice_diag_nvoc.h b/src/nvidia/generated/g_subdevice_diag_nvoc.h new file mode 100644 index 000000000..22f708ed5 --- /dev/null +++ b/src/nvidia/generated/g_subdevice_diag_nvoc.h @@ -0,0 +1,375 @@ +#ifndef _G_SUBDEVICE_DIAG_NVOC_H_ +#define _G_SUBDEVICE_DIAG_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_subdevice_diag_nvoc.h" + +#ifndef _DIAGAPI_H_ +#define _DIAGAPI_H_ + +#include "class/cl208f.h" // NV208F_NOTIFIERS_MAXCOUNT +#include "ctrl/ctrl208f.h" // rmcontrol params + +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" +#include "rmapi/control.h" + +/*! + * RM internal class representing NV20_SUBDEVICE_DIAG + */ +#ifdef NVOC_SUBDEVICE_DIAG_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct DiagApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct DiagApi *__nvoc_pbase_DiagApi; + NV_STATUS (*__diagapiControl__)(struct DiagApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__diagapiControlFilter__)(struct DiagApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__diagapiCtrlCmdFifoCheckEngineContext__)(struct DiagApi *, NV208F_CTRL_FIFO_CHECK_ENGINE_CONTEXT_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdFifoEnableVirtualContext__)(struct DiagApi *, NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdFifoGetChannelState__)(struct DiagApi *, NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdFbCtrlGpuCache__)(struct DiagApi *, NV208F_CTRL_FB_CTRL_GPU_CACHE_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdFbEccSetKillPtr__)(struct DiagApi *, NV208F_CTRL_FB_ECC_SET_KILL_PTR_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdGpuGetRamSvopValues__)(struct DiagApi *, NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdGpuSetRamSvopValues__)(struct DiagApi *, NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdGpuVerifyInforom__)(struct DiagApi *, NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdBifPBIWriteCommand__)(struct DiagApi *, NV208F_CTRL_BIF_PBI_WRITE_COMMAND_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdBifConfigRegRead__)(struct DiagApi *, NV208F_CTRL_BIF_CONFIG_REG_READ_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdBifConfigRegWrite__)(struct DiagApi *, NV208F_CTRL_BIF_CONFIG_REG_WRITE_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdBifInfo__)(struct DiagApi *, NV208F_CTRL_BIF_INFO_PARAMS *); + NV_STATUS (*__diagapiCtrlCmdMmuGetNumHshubmmus__)(struct DiagApi *, NV208F_CTRL_MMU_GET_NUM_HSHUBMMUS_PARAMS *); + NvBool (*__diagapiShareCallback__)(struct DiagApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__diagapiMapTo__)(struct DiagApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__diagapiGetOrAllocNotifShare__)(struct DiagApi *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__diagapiCheckMemInterUnmap__)(struct DiagApi *, NvBool); + NV_STATUS (*__diagapiGetMapAddrSpace__)(struct DiagApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__diagapiSetNotificationShare__)(struct DiagApi *, struct NotifShare *); + NvU32 (*__diagapiGetRefCount__)(struct DiagApi *); + void (*__diagapiAddAdditionalDependants__)(struct RsClient *, struct DiagApi *, RsResourceRef *); + NV_STATUS (*__diagapiControl_Prologue__)(struct DiagApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__diagapiGetRegBaseOffsetAndSize__)(struct DiagApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__diagapiInternalControlForward__)(struct DiagApi *, NvU32, void *, NvU32); + NV_STATUS (*__diagapiUnmapFrom__)(struct DiagApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__diagapiControl_Epilogue__)(struct DiagApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__diagapiControlLookup__)(struct DiagApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__diagapiGetInternalObjectHandle__)(struct DiagApi *); + NV_STATUS (*__diagapiUnmap__)(struct DiagApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__diagapiGetMemInterMapParams__)(struct DiagApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__diagapiGetMemoryMappingDescriptor__)(struct DiagApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__diagapiUnregisterEvent__)(struct DiagApi *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__diagapiCanCopy__)(struct DiagApi *); + void (*__diagapiPreDestruct__)(struct DiagApi *); + PEVENTNOTIFICATION *(*__diagapiGetNotificationListPtr__)(struct DiagApi *); + struct NotifShare *(*__diagapiGetNotificationShare__)(struct DiagApi *); + NV_STATUS (*__diagapiMap__)(struct DiagApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__diagapiAccessCallback__)(struct DiagApi *, struct RsClient *, void *, RsAccessRight); + NvU32 notifyActions[1]; +}; + +#ifndef __NVOC_CLASS_DiagApi_TYPEDEF__ +#define __NVOC_CLASS_DiagApi_TYPEDEF__ +typedef struct DiagApi DiagApi; +#endif /* __NVOC_CLASS_DiagApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_DiagApi +#define __nvoc_class_id_DiagApi 0xaa3066 +#endif /* __nvoc_class_id_DiagApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_DiagApi; + +#define __staticCast_DiagApi(pThis) \ + ((pThis)->__nvoc_pbase_DiagApi) + +#ifdef __nvoc_subdevice_diag_h_disabled +#define __dynamicCast_DiagApi(pThis) ((DiagApi*)NULL) +#else //__nvoc_subdevice_diag_h_disabled +#define __dynamicCast_DiagApi(pThis) \ + ((DiagApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DiagApi))) +#endif //__nvoc_subdevice_diag_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_DiagApi(DiagApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_DiagApi(DiagApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_DiagApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_DiagApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define diagapiControl(pDiagApi, pCallContext, pParams) diagapiControl_DISPATCH(pDiagApi, pCallContext, pParams) +#define diagapiControlFilter(pDiagApi, pCallContext, pParams) diagapiControlFilter_DISPATCH(pDiagApi, pCallContext, pParams) +#define diagapiCtrlCmdFifoCheckEngineContext(pDiagApi, pCheckEngineContextParams) diagapiCtrlCmdFifoCheckEngineContext_DISPATCH(pDiagApi, pCheckEngineContextParams) +#define diagapiCtrlCmdFifoEnableVirtualContext(pDiagApi, pEnableVCParams) diagapiCtrlCmdFifoEnableVirtualContext_DISPATCH(pDiagApi, pEnableVCParams) +#define diagapiCtrlCmdFifoGetChannelState(pDiagApi, pChannelStateParams) diagapiCtrlCmdFifoGetChannelState_DISPATCH(pDiagApi, pChannelStateParams) +#define diagapiCtrlCmdFbCtrlGpuCache(pDiagApi, pGpuCacheParams) diagapiCtrlCmdFbCtrlGpuCache_DISPATCH(pDiagApi, pGpuCacheParams) +#define diagapiCtrlCmdFbEccSetKillPtr(pDiagApi, pParams) diagapiCtrlCmdFbEccSetKillPtr_DISPATCH(pDiagApi, pParams) +#define diagapiCtrlCmdGpuGetRamSvopValues(pDiagApi, pGetRamSvopParams) diagapiCtrlCmdGpuGetRamSvopValues_DISPATCH(pDiagApi, pGetRamSvopParams) +#define diagapiCtrlCmdGpuSetRamSvopValues(pDiagApi, pSetRamSvopParams) diagapiCtrlCmdGpuSetRamSvopValues_DISPATCH(pDiagApi, pSetRamSvopParams) +#define diagapiCtrlCmdGpuVerifyInforom(pDiagApi, pParams) diagapiCtrlCmdGpuVerifyInforom_DISPATCH(pDiagApi, pParams) +#define diagapiCtrlCmdBifPBIWriteCommand(pDiagApi, pWritePbiParams) diagapiCtrlCmdBifPBIWriteCommand_DISPATCH(pDiagApi, pWritePbiParams) +#define diagapiCtrlCmdBifConfigRegRead(pDiagApi, pReadConfigReg) diagapiCtrlCmdBifConfigRegRead_DISPATCH(pDiagApi, pReadConfigReg) +#define diagapiCtrlCmdBifConfigRegWrite(pDiagApi, pWriteConfigReg) diagapiCtrlCmdBifConfigRegWrite_DISPATCH(pDiagApi, pWriteConfigReg) +#define diagapiCtrlCmdBifInfo(pDiagApi, pInfo) diagapiCtrlCmdBifInfo_DISPATCH(pDiagApi, pInfo) +#define diagapiCtrlCmdMmuGetNumHshubmmus(pDiagApi, pParams) diagapiCtrlCmdMmuGetNumHshubmmus_DISPATCH(pDiagApi, pParams) +#define diagapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) diagapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define diagapiMapTo(pResource, pParams) diagapiMapTo_DISPATCH(pResource, pParams) +#define diagapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) diagapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define diagapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) diagapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define diagapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) diagapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define diagapiSetNotificationShare(pNotifier, pNotifShare) diagapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define diagapiGetRefCount(pResource) diagapiGetRefCount_DISPATCH(pResource) +#define diagapiAddAdditionalDependants(pClient, pResource, pReference) diagapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define diagapiControl_Prologue(pResource, pCallContext, pParams) diagapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define diagapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) diagapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define diagapiInternalControlForward(pGpuResource, command, pParams, size) diagapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define diagapiUnmapFrom(pResource, pParams) diagapiUnmapFrom_DISPATCH(pResource, pParams) +#define diagapiControl_Epilogue(pResource, pCallContext, pParams) diagapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define diagapiControlLookup(pResource, pParams, ppEntry) diagapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define diagapiGetInternalObjectHandle(pGpuResource) diagapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define diagapiUnmap(pGpuResource, pCallContext, pCpuMapping) diagapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define diagapiGetMemInterMapParams(pRmResource, pParams) diagapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define diagapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) diagapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define diagapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) diagapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define diagapiCanCopy(pResource) diagapiCanCopy_DISPATCH(pResource) +#define diagapiPreDestruct(pResource) diagapiPreDestruct_DISPATCH(pResource) +#define diagapiGetNotificationListPtr(pNotifier) diagapiGetNotificationListPtr_DISPATCH(pNotifier) +#define diagapiGetNotificationShare(pNotifier) diagapiGetNotificationShare_DISPATCH(pNotifier) +#define diagapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) diagapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define diagapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) diagapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS diagapiControl_IMPL(struct DiagApi *pDiagApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS diagapiControl_DISPATCH(struct DiagApi *pDiagApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDiagApi->__diagapiControl__(pDiagApi, pCallContext, pParams); +} + +NV_STATUS diagapiControlFilter_IMPL(struct DiagApi *pDiagApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS diagapiControlFilter_DISPATCH(struct DiagApi *pDiagApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pDiagApi->__diagapiControlFilter__(pDiagApi, pCallContext, pParams); +} + +NV_STATUS diagapiCtrlCmdFifoCheckEngineContext_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_FIFO_CHECK_ENGINE_CONTEXT_PARAMS *pCheckEngineContextParams); + +static inline NV_STATUS diagapiCtrlCmdFifoCheckEngineContext_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_FIFO_CHECK_ENGINE_CONTEXT_PARAMS *pCheckEngineContextParams) { + return pDiagApi->__diagapiCtrlCmdFifoCheckEngineContext__(pDiagApi, pCheckEngineContextParams); +} + +NV_STATUS diagapiCtrlCmdFifoEnableVirtualContext_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS *pEnableVCParams); + +static inline NV_STATUS diagapiCtrlCmdFifoEnableVirtualContext_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS *pEnableVCParams) { + return pDiagApi->__diagapiCtrlCmdFifoEnableVirtualContext__(pDiagApi, pEnableVCParams); +} + +NV_STATUS diagapiCtrlCmdFifoGetChannelState_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams); + +static inline NV_STATUS diagapiCtrlCmdFifoGetChannelState_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams) { + return pDiagApi->__diagapiCtrlCmdFifoGetChannelState__(pDiagApi, pChannelStateParams); +} + +NV_STATUS diagapiCtrlCmdFbCtrlGpuCache_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_FB_CTRL_GPU_CACHE_PARAMS *pGpuCacheParams); + +static inline NV_STATUS diagapiCtrlCmdFbCtrlGpuCache_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_FB_CTRL_GPU_CACHE_PARAMS *pGpuCacheParams) { + return pDiagApi->__diagapiCtrlCmdFbCtrlGpuCache__(pDiagApi, pGpuCacheParams); +} + +NV_STATUS diagapiCtrlCmdFbEccSetKillPtr_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_FB_ECC_SET_KILL_PTR_PARAMS *pParams); + +static inline NV_STATUS diagapiCtrlCmdFbEccSetKillPtr_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_FB_ECC_SET_KILL_PTR_PARAMS *pParams) { + return pDiagApi->__diagapiCtrlCmdFbEccSetKillPtr__(pDiagApi, pParams); +} + +NV_STATUS diagapiCtrlCmdGpuGetRamSvopValues_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS *pGetRamSvopParams); + +static inline NV_STATUS diagapiCtrlCmdGpuGetRamSvopValues_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS *pGetRamSvopParams) { + return pDiagApi->__diagapiCtrlCmdGpuGetRamSvopValues__(pDiagApi, pGetRamSvopParams); +} + +NV_STATUS diagapiCtrlCmdGpuSetRamSvopValues_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS *pSetRamSvopParams); + +static inline NV_STATUS diagapiCtrlCmdGpuSetRamSvopValues_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_GPU_RAM_SVOP_VALUES_PARAMS *pSetRamSvopParams) { + return pDiagApi->__diagapiCtrlCmdGpuSetRamSvopValues__(pDiagApi, pSetRamSvopParams); +} + +NV_STATUS diagapiCtrlCmdGpuVerifyInforom_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS *pParams); + +static inline NV_STATUS diagapiCtrlCmdGpuVerifyInforom_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_GPU_VERIFY_INFOROM_PARAMS *pParams) { + return pDiagApi->__diagapiCtrlCmdGpuVerifyInforom__(pDiagApi, pParams); +} + +NV_STATUS diagapiCtrlCmdBifPBIWriteCommand_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_BIF_PBI_WRITE_COMMAND_PARAMS *pWritePbiParams); + +static inline NV_STATUS diagapiCtrlCmdBifPBIWriteCommand_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_BIF_PBI_WRITE_COMMAND_PARAMS *pWritePbiParams) { + return pDiagApi->__diagapiCtrlCmdBifPBIWriteCommand__(pDiagApi, pWritePbiParams); +} + +NV_STATUS diagapiCtrlCmdBifConfigRegRead_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_BIF_CONFIG_REG_READ_PARAMS *pReadConfigReg); + +static inline NV_STATUS diagapiCtrlCmdBifConfigRegRead_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_BIF_CONFIG_REG_READ_PARAMS *pReadConfigReg) { + return pDiagApi->__diagapiCtrlCmdBifConfigRegRead__(pDiagApi, pReadConfigReg); +} + +NV_STATUS diagapiCtrlCmdBifConfigRegWrite_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_BIF_CONFIG_REG_WRITE_PARAMS *pWriteConfigReg); + +static inline NV_STATUS diagapiCtrlCmdBifConfigRegWrite_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_BIF_CONFIG_REG_WRITE_PARAMS *pWriteConfigReg) { + return pDiagApi->__diagapiCtrlCmdBifConfigRegWrite__(pDiagApi, pWriteConfigReg); +} + +NV_STATUS diagapiCtrlCmdBifInfo_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_BIF_INFO_PARAMS *pInfo); + +static inline NV_STATUS diagapiCtrlCmdBifInfo_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_BIF_INFO_PARAMS *pInfo) { + return pDiagApi->__diagapiCtrlCmdBifInfo__(pDiagApi, pInfo); +} + +NV_STATUS diagapiCtrlCmdMmuGetNumHshubmmus_IMPL(struct DiagApi *pDiagApi, NV208F_CTRL_MMU_GET_NUM_HSHUBMMUS_PARAMS *pParams); + +static inline NV_STATUS diagapiCtrlCmdMmuGetNumHshubmmus_DISPATCH(struct DiagApi *pDiagApi, NV208F_CTRL_MMU_GET_NUM_HSHUBMMUS_PARAMS *pParams) { + return pDiagApi->__diagapiCtrlCmdMmuGetNumHshubmmus__(pDiagApi, pParams); +} + +static inline NvBool diagapiShareCallback_DISPATCH(struct DiagApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__diagapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS diagapiMapTo_DISPATCH(struct DiagApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__diagapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS diagapiGetOrAllocNotifShare_DISPATCH(struct DiagApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__diagapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS diagapiCheckMemInterUnmap_DISPATCH(struct DiagApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__diagapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS diagapiGetMapAddrSpace_DISPATCH(struct DiagApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__diagapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void diagapiSetNotificationShare_DISPATCH(struct DiagApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__diagapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 diagapiGetRefCount_DISPATCH(struct DiagApi *pResource) { + return pResource->__diagapiGetRefCount__(pResource); +} + +static inline void diagapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DiagApi *pResource, RsResourceRef *pReference) { + pResource->__diagapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS diagapiControl_Prologue_DISPATCH(struct DiagApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__diagapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS diagapiGetRegBaseOffsetAndSize_DISPATCH(struct DiagApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__diagapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS diagapiInternalControlForward_DISPATCH(struct DiagApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__diagapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS diagapiUnmapFrom_DISPATCH(struct DiagApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__diagapiUnmapFrom__(pResource, pParams); +} + +static inline void diagapiControl_Epilogue_DISPATCH(struct DiagApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__diagapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS diagapiControlLookup_DISPATCH(struct DiagApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__diagapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle diagapiGetInternalObjectHandle_DISPATCH(struct DiagApi *pGpuResource) { + return pGpuResource->__diagapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS diagapiUnmap_DISPATCH(struct DiagApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__diagapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS diagapiGetMemInterMapParams_DISPATCH(struct DiagApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__diagapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS diagapiGetMemoryMappingDescriptor_DISPATCH(struct DiagApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__diagapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS diagapiUnregisterEvent_DISPATCH(struct DiagApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__diagapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool diagapiCanCopy_DISPATCH(struct DiagApi *pResource) { + return pResource->__diagapiCanCopy__(pResource); +} + +static inline void diagapiPreDestruct_DISPATCH(struct DiagApi *pResource) { + pResource->__diagapiPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *diagapiGetNotificationListPtr_DISPATCH(struct DiagApi *pNotifier) { + return pNotifier->__diagapiGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *diagapiGetNotificationShare_DISPATCH(struct DiagApi *pNotifier) { + return pNotifier->__diagapiGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS diagapiMap_DISPATCH(struct DiagApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__diagapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool diagapiAccessCallback_DISPATCH(struct DiagApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__diagapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS diagapiConstruct_IMPL(struct DiagApi *arg_pDiagApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_diagapiConstruct(arg_pDiagApi, arg_pCallContext, arg_pParams) diagapiConstruct_IMPL(arg_pDiagApi, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // _DIAGAPI_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SUBDEVICE_DIAG_NVOC_H_ diff --git a/src/nvidia/generated/g_subdevice_nvoc.c b/src/nvidia/generated/g_subdevice_nvoc.c new file mode 100644 index 000000000..05437f490 --- /dev/null +++ b/src/nvidia/generated/g_subdevice_nvoc.c @@ -0,0 +1,7913 @@ +#define NVOC_SUBDEVICE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_subdevice_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4b01b3 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Subdevice; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_Subdevice(Subdevice*, RmHalspecOwner* ); +void __nvoc_init_funcTable_Subdevice(Subdevice*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_Subdevice(Subdevice*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_Subdevice(Subdevice*, RmHalspecOwner* ); +void __nvoc_dtor_Subdevice(Subdevice*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_Subdevice = { + /*pClassDef=*/ &__nvoc_class_def_Subdevice, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Subdevice, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_Subdevice_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(Subdevice, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_Subdevice = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_Subdevice_Subdevice, + &__nvoc_rtti_Subdevice_Notifier, + &__nvoc_rtti_Subdevice_INotifier, + &__nvoc_rtti_Subdevice_GpuResource, + &__nvoc_rtti_Subdevice_RmResource, + &__nvoc_rtti_Subdevice_RmResourceCommon, + &__nvoc_rtti_Subdevice_RsResource, + &__nvoc_rtti_Subdevice_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_Subdevice = +{ + /*classInfo=*/ { + /*size=*/ sizeof(Subdevice), + /*classId=*/ classId(Subdevice), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "Subdevice", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Subdevice, + /*pCastInfo=*/ &__nvoc_castinfo_Subdevice, + /*pExportInfo=*/ &__nvoc_export_info_Subdevice +}; + +static void __nvoc_thunk_Subdevice_resPreDestruct(struct RsResource *pResource) { + subdevicePreDestruct((struct Subdevice *)(((unsigned char *)pResource) - __nvoc_rtti_Subdevice_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Subdevice_gpuresInternalControlForward(struct GpuResource *pSubdevice, NvU32 command, void *pParams, NvU32 size) { + return subdeviceInternalControlForward((struct Subdevice *)(((unsigned char *)pSubdevice) - __nvoc_rtti_Subdevice_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_Subdevice_resControlFilter(struct RsResource *pSubdevice, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return subdeviceControlFilter((struct Subdevice *)(((unsigned char *)pSubdevice) - __nvoc_rtti_Subdevice_RsResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_GpuResource_subdeviceShareCallback(struct Subdevice *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_subdeviceMapTo(struct Subdevice *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_subdeviceGetOrAllocNotifShare(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_Subdevice_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_subdeviceCheckMemInterUnmap(struct Subdevice *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Subdevice_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_subdeviceGetMapAddrSpace(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_subdeviceSetNotificationShare(struct Subdevice *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_Subdevice_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_subdeviceGetRefCount(struct Subdevice *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_subdeviceAddAdditionalDependants(struct RsClient *pClient, struct Subdevice *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_subdeviceControl_Prologue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_subdeviceGetRegBaseOffsetAndSize(struct Subdevice *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_RsResource_subdeviceUnmapFrom(struct Subdevice *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_subdeviceControl_Epilogue(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_subdeviceControlLookup(struct Subdevice *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_subdeviceGetInternalObjectHandle(struct Subdevice *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_subdeviceControl(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_subdeviceUnmap(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_subdeviceGetMemInterMapParams(struct Subdevice *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Subdevice_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_subdeviceGetMemoryMappingDescriptor(struct Subdevice *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Subdevice_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Notifier_subdeviceUnregisterEvent(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_Subdevice_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_subdeviceCanCopy(struct Subdevice *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_subdeviceGetNotificationListPtr(struct Subdevice *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_Subdevice_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_subdeviceGetNotificationShare(struct Subdevice *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_Subdevice_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_subdeviceMap(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Subdevice_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_subdeviceAccessCallback(struct Subdevice *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Subdevice_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Subdevice[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800102u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetInfoV2" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetNameString_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa10u) + /*flags=*/ 0xa10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800110u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetNameString" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4a10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetShortNameString_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4a10u) + /*flags=*/ 0x4a10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800111u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetShortNameString" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetPower_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800112u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_SET_POWER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetPower" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetSdm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800118u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_SDM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetSdm" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetSimulationInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*flags=*/ 0x813u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800119u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetSimulationInfo" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetSdm_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + /*flags=*/ 0x5u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800120u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_SET_SDM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetSdm" +#endif + }, + { /* [7] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuExecRegOps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800122u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuExecRegOps" +#endif + }, + { /* [8] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngines_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800123u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngines" +#endif + }, + { /* [9] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngineClasslist_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800124u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngineClasslist" +#endif + }, + { /* [10] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngineFaultInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800125u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngineFaultInfo" +#endif + }, + { /* [11] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800128u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_QUERY_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryMode" +#endif + }, + { /* [12] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuPromoteCtx_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080012bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuPromoteCtx" +#endif + }, + { /* [13] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuEvictCtx_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080012cu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_EVICT_CTX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuEvictCtx" +#endif + }, + { /* [14] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuInitializeCtx_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080012du, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuInitializeCtx" +#endif + }, + { /* [15] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryEccStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + /*flags=*/ 0xa50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080012fu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryEccStatus" +#endif + }, + { /* [16] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetComputeModeRules_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) + /*flags=*/ 0x844u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800130u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetComputeModeRules" +#endif + }, + { /* [17] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryComputeModeRules_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800131u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryComputeModeRules" +#endif + }, + { /* [18] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryEccConfiguration_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800133u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryEccConfiguration" +#endif + }, + { /* [19] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetEccConfiguration_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800134u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetEccConfiguration" +#endif + }, + { /* [20] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuResetEccErrorStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800136u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuResetEccErrorStatus" +#endif + }, + { /* [21] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetFermiGpcInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800137u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetFermiGpcInfo" +#endif + }, + { /* [22] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetFermiTpcInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800138u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetFermiTpcInfo" +#endif + }, + { /* [23] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetFermiZcullInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800139u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetFermiZcullInfo" +#endif + }, + { /* [24] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetOEMBoardInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + /*flags=*/ 0x4210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080013fu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetOEMBoardInfo" +#endif + }, + { /* [25] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*flags=*/ 0x812u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800142u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetId" +#endif + }, + { /* [26] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuAcquireComputeModeReservation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800145u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuAcquireComputeModeReservation" +#endif + }, + { /* [27] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuReleaseComputeModeReservation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800146u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuReleaseComputeModeReservation" +#endif + }, + { /* [28] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800147u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEnginePartnerList" +#endif + }, + { /* [29] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetGidInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + /*flags=*/ 0xa50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080014au, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_GID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetGidInfo" +#endif + }, + { /* [30] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetInforomObjectVersion_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080014bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetInforomObjectVersion" +#endif + }, + { /* [31] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetOptimusInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080014cu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetOptimusInfo" +#endif + }, + { /* [32] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetIpVersion_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080014du, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetIpVersion" +#endif + }, + { /* [33] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryIllumSupport_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800153u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryIllumSupport" +#endif + }, + { /* [34] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetIllum_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800154u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GPU_ILLUM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetIllum" +#endif + }, + { /* [35] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetIllum_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800155u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GPU_ILLUM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetIllum" +#endif + }, + { /* [36] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetInforomImageVersion_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + /*flags=*/ 0x4210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800156u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetInforomImageVersion" +#endif + }, + { /* [37] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryInforomEccSupport_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800157u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryInforomEccSupport" +#endif + }, + { /* [38] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080015au, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo" +#endif + }, + { /* [39] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080015bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu" +#endif + }, + { /* [40] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryScrubberStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080015fu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryScrubberStatus" +#endif + }, + { /* [41] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetVprCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800160u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetVprCaps" +#endif + }, + { /* [42] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuHandleGpuSR_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800167u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuHandleGpuSR" +#endif + }, + { /* [43] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetPesInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800168u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_PES_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetPesInfo" +#endif + }, + { /* [44] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetOEMInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + /*flags=*/ 0x4210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800169u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetOEMInfo" +#endif + }, + { /* [45] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetVprInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080016bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetVprInfo" +#endif + }, + { /* [46] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEncoderCapacity_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080016cu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEncoderCapacity" +#endif + }, + { /* [47] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetNvencSwSessionStats_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080016du, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetNvencSwSessionStats" +#endif + }, + { /* [48] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetNvencSwSessionInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080016eu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetNvencSwSessionInfo" +#endif + }, + { /* [49] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetFabricAddr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080016fu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetFabricAddr" +#endif + }, + { /* [50] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEnginesV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + /*flags=*/ 0x811u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800170u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEnginesV2" +#endif + }, + { /* [51] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuInterruptFunction_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800171u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_INTERRUPT_FUNCTION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuInterruptFunction" +#endif + }, + { /* [52] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuQueryFunctionStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800173u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuQueryFunctionStatus" +#endif + }, + { /* [53] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetPartitions_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800174u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetPartitions" +#endif + }, + { /* [54] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetPartitions_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800175u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetPartitions" +#endif + }, + { /* [55] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuReportNonReplayableFault_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800177u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuReportNonReplayableFault" +#endif + }, + { /* [56] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngineRunlistPriBase_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800179u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngineRunlistPriBase" +#endif + }, + { /* [57] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetHwEngineId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080017au, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetHwEngineId" +#endif + }, + { /* [58] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080017bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetNvfbcSwSessionStats" +#endif + }, + { /* [59] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080017cu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo" +#endif + }, + { /* [60] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetPartitionCapacity_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800181u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetPartitionCapacity" +#endif + }, + { /* [61] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetCachedInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + /*flags=*/ 0x813u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800182u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetCachedInfo" +#endif + }, + { /* [62] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetPartitioningMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800183u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetPartitioningMode" +#endif + }, + { /* [63] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuDescribePartitions_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800185u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuDescribePartitions" +#endif + }, + { /* [64] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*flags=*/ 0x50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800188u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetMaxSupportedPageSize" +#endif + }, + { /* [65] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetActivePartitionIds_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080018bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetActivePartitionIds" +#endif + }, + { /* [66] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetPids_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080018du, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_PIDS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetPids" +#endif + }, + { /* [67] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetPidInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080018eu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_PID_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetPidInfo" +#endif + }, + { /* [68] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuSetComputePolicyConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800194u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuSetComputePolicyConfig" +#endif + }, + { /* [69] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetComputePolicyConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800195u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetComputePolicyConfig" +#endif + }, + { /* [70] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdValidateMemMapRequest_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800198u, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdValidateMemMapRequest" +#endif + }, + { /* [71] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x12u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x12u) + /*flags=*/ 0x12u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080019bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGpuGetEngineLoadTimes" +#endif + }, + { /* [72] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetNotification_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800301u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetNotification" +#endif + }, + { /* [73] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetTrigger_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800302u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetTrigger" +#endif + }, + { /* [74] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetMemoryNotifies_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800303u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetMemoryNotifies" +#endif + }, + { /* [75] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800304u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetSemaphoreMemory" +#endif + }, + { /* [76] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetSemaMemValidation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800306u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetSemaMemValidation" +#endif + }, + { /* [77] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEventSetTriggerFifo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800308u, + /*paramSize=*/ sizeof(NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEventSetTriggerFifo" +#endif + }, + { /* [78] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerSchedule_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800401u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerSchedule" +#endif + }, + { /* [79] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerCancel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800402u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerCancel" +#endif + }, + { /* [80] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerGetTime_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800403u, + /*paramSize=*/ sizeof(NV2080_CTRL_TIMER_GET_TIME_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerGetTime" +#endif + }, + { /* [81] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerGetRegisterOffset_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800404u, + /*paramSize=*/ sizeof(NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerGetRegisterOffset" +#endif + }, + { /* [82] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800406u, + /*paramSize=*/ sizeof(NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo" +#endif + }, + { /* [83] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdTimerSetGrTickFreq_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + /*flags=*/ 0x2010u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800407u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdTimerSetGrTickFreq" +#endif + }, + { /* [84] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdI2cReadBuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800601u, + /*paramSize=*/ sizeof(NV2080_CTRL_I2C_READ_BUFFER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdI2cReadBuffer" +#endif + }, + { /* [85] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdI2cWriteBuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800602u, + /*paramSize=*/ sizeof(NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdI2cWriteBuffer" +#endif + }, + { /* [86] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x230u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdI2cReadReg_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x230u) + /*flags=*/ 0x230u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800603u, + /*paramSize=*/ sizeof(NV2080_CTRL_I2C_RW_REG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdI2cReadReg" +#endif + }, + { /* [87] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x230u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdI2cWriteReg_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x230u) + /*flags=*/ 0x230u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800604u, + /*paramSize=*/ sizeof(NV2080_CTRL_I2C_RW_REG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdI2cWriteReg" +#endif + }, + { /* [88] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBiosGetSKUInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800808u, + /*paramSize=*/ sizeof(NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBiosGetSKUInfo" +#endif + }, + { /* [89] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBiosGetPostTime_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800809u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBiosGetPostTime" +#endif + }, + { /* [90] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBiosGetUefiSupport_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080080bu, + /*paramSize=*/ sizeof(NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBiosGetUefiSupport" +#endif + }, + { /* [91] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBiosGetInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800810u, + /*paramSize=*/ sizeof(NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBiosGetInfoV2" +#endif + }, + { /* [92] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayGetStaticInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a01u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayGetStaticInfo" +#endif + }, + { /* [93] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMemSysGetStaticConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a1cu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMemSysGetStaticConfig" +#endif + }, + { /* [94] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a1du, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer" +#endif + }, + { /* [95] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a1eu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer" +#endif + }, + { /* [96] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a1fu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetCaps" +#endif + }, + { /* [97] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a21u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer" +#endif + }, + { /* [98] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a22u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder" +#endif + }, + { /* [99] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMsencGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*flags=*/ 0x4600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a25u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMsencGetCaps" +#endif + }, + { /* [100] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a26u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks" +#endif + }, + { /* [101] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetCtxBufferPtes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a28u, + /*paramSize=*/ sizeof(NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferPtes" +#endif + }, + { /* [102] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a29u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize" +#endif + }, + { /* [103] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a2au, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetInfo" +#endif + }, + { /* [104] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetZcullInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a2cu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetZcullInfo" +#endif + }, + { /* [105] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetRopInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a2eu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetRopInfo" +#endif + }, + { /* [106] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetPpcMasks_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a30u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetPpcMasks" +#endif + }, + { /* [107] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a32u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo" +#endif + }, + { /* [108] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a34u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier" +#endif + }, + { /* [109] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetChipInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*flags=*/ 0x4600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a36u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetChipInfo" +#endif + }, + { /* [110] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a3du, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize" +#endif + }, + { /* [111] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a3fu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines" +#endif + }, + { /* [112] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetDeviceInfoTable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*flags=*/ 0x4600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a40u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetDeviceInfoTable" +#endif + }, + { /* [113] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetUserRegisterAccessMap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*flags=*/ 0x4600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a41u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetUserRegisterAccessMap" +#endif + }, + { /* [114] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetConstructedFalconInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a42u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetConstructedFalconInfo" +#endif + }, + { /* [115] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrInternalStaticGetPdbProperties_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a48u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrInternalStaticGetPdbProperties" +#endif + }, + { /* [116] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayWriteInstMem_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a49u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayWriteInstMem" +#endif + }, + { /* [117] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalRecoverAllComputeContexts_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4au, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalRecoverAllComputeContexts" +#endif + }, + { /* [118] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayGetIpVersion_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4bu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayGetIpVersion" +#endif + }, + { /* [119] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetSmcMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4cu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetSmcMode" +#endif + }, + { /* [120] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplaySetupRgLineIntr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a4du, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplaySetupRgLineIntr" +#endif + }, + { /* [121] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMemSysSetPartitionableMem_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a51u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMemSysSetPartitionableMem" +#endif + }, + { /* [122] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a53u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers" +#endif + }, + { /* [123] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplaySetImportedImpData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a54u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplaySetImportedImpData" +#endif + }, + { /* [124] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalBusBindLocalGfidForP2p_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a55u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalBusBindLocalGfidForP2p" +#endif + }, + { /* [125] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalBusBindRemoteGfidForP2p_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a56u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalBusBindRemoteGfidForP2p" +#endif + }, + { /* [126] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplaySetChannelPushbuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a58u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplaySetChannelPushbuffer" +#endif + }, + { /* [127] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGmmuGetStaticInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a59u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGmmuGetStaticInfo" +#endif + }, + { /* [128] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetHeapReservationSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a5bu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetHeapReservationSize" +#endif + }, + { /* [129] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdIntrGetKernelTable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a5cu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdIntrGetKernelTable" +#endif + }, + { /* [130] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDisplayGetDisplayMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a5du, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDisplayGetDisplayMask" +#endif + }, + { /* [131] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalFifoGetNumChannels_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u) + /*flags=*/ 0x2610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a61u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalFifoGetNumChannels" +#endif + }, + { /* [132] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a63u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles" +#endif + }, + { /* [133] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a65u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines" +#endif + }, + { /* [134] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + /*flags=*/ 0x2600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a66u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges" +#endif + }, + { /* [135] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKMemSysGetMIGMemoryConfig_133e5e, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a67u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKMemSysGetMIGMemoryConfig" +#endif + }, + { /* [136] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbSetZbcReferenced_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a69u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbSetZbcReferenced" +#endif + }, + { /* [137] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalRcWatchdogTimeout_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a6au, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalRcWatchdogTimeout" +#endif + }, + { /* [138] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a6bu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable" +#endif + }, + { /* [139] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMemSysL2InvalidateEvict_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a6cu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMemSysL2InvalidateEvict" +#endif + }, + { /* [140] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a6du, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches" +#endif + }, + { /* [141] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMemSysDisableNvlinkPeers_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a6eu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMemSysDisableNvlinkPeers" +#endif + }, + { /* [142] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMemSysProgramRawCompressionMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a6fu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMemSysProgramRawCompressionMode" +#endif + }, + { /* [143] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalBusFlushWithSysmembar_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a70u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalBusFlushWithSysmembar" +#endif + }, + { /* [144] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a71u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal" +#endif + }, + { /* [145] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a72u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote" +#endif + }, + { /* [146] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalBusDestroyP2pMailbox_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a73u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalBusDestroyP2pMailbox" +#endif + }, + { /* [147] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalBusCreateC2cPeerMapping_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a74u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalBusCreateC2cPeerMapping" +#endif + }, + { /* [148] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a75u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping" +#endif + }, + { /* [149] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfCudaLimitDisable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a7au, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfCudaLimitDisable" +#endif + }, + { /* [150] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfOptpCliClear_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a7cu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfOptpCliClear" +#endif + }, + { /* [151] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a7eu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl" +#endif + }, + { /* [152] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a7fu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits" +#endif + }, + { /* [153] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a80u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo" +#endif + }, + { /* [154] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdHshubPeerConnConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a88u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdHshubPeerConnConfig" +#endif + }, + { /* [155] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdHshubFirstLinkPeerId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a89u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdHshubFirstLinkPeerId" +#endif + }, + { /* [156] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdHshubGetHshubIdForLinks_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a8au, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdHshubGetHshubIdForLinks" +#endif + }, + { /* [157] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdHshubGetNumUnits_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a8bu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdHshubGetNumUnits" +#endif + }, + { /* [158] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdHshubNextHshubId_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a8cu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdHshubNextHshubId" +#endif + }, + { /* [159] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a98u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck" +#endif + }, + { /* [160] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a99u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet" +#endif + }, + { /* [161] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfBoostSet_2x_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a9au, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfBoostSet_2x" +#endif + }, + { /* [162] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a9bu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer" +#endif + }, + { /* [163] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a9cu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer" +#endif + }, + { /* [164] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a9du, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer" +#endif + }, + { /* [165] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800a9eu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer" +#endif + }, + { /* [166] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfBoostSet_3x_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800aa0u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfBoostSet_3x" +#endif + }, + { /* [167] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfBoostClear_3x_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + /*flags=*/ 0x610u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800aa1u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfBoostClear_3x" +#endif + }, + { /* [168] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + /*flags=*/ 0x400u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800aa7u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance" +#endif + }, + { /* [169] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + /*flags=*/ 0x400u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800aa9u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance" +#endif + }, + { /* [170] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBifGetStaticInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800aacu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBifGetStaticInfo" +#endif + }, + { /* [171] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800aadu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr" +#endif + }, + { /* [172] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800aaeu, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr" +#endif + }, + { /* [173] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBifGetAspmL1Flags_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800ab0u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBifGetAspmL1Flags" +#endif + }, + { /* [174] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800ab1u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount" +#endif + }, + { /* [175] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdInternalGetPcieP2pCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20800ab8u, + /*paramSize=*/ sizeof(NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdInternalGetPcieP2pCaps" +#endif + }, + { /* [176] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdSetGpfifo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801102u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_SET_GPFIFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdSetGpfifo" +#endif + }, + { /* [177] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFifoBindEngines_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801103u, + /*paramSize=*/ sizeof(NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFifoBindEngines" +#endif + }, + { /* [178] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdSetOperationalProperties_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801104u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdSetOperationalProperties" +#endif + }, + { /* [179] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGetPhysicalChannelCount_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801108u, + /*paramSize=*/ sizeof(NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGetPhysicalChannelCount" +#endif + }, + { /* [180] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFifoGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801109u, + /*paramSize=*/ sizeof(NV2080_CTRL_FIFO_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFifoGetInfo" +#endif + }, + { /* [181] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFifoDisableChannels_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080110bu, + /*paramSize=*/ sizeof(NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFifoDisableChannels" +#endif + }, + { /* [182] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFifoGetChannelMemInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080110cu, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFifoGetChannelMemInfo" +#endif + }, + { /* [183] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFifoGetUserdLocation_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080110du, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFifoGetUserdLocation" +#endif + }, + { /* [184] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFifoGetDeviceInfoTable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801112u, + /*paramSize=*/ sizeof(NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFifoGetDeviceInfoTable" +#endif + }, + { /* [185] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFifoClearFaultedBit_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801113u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFifoClearFaultedBit" +#endif + }, + { /* [186] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2310u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFifoRunlistSetSchedPolicy_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2310u) + /*flags=*/ 0x2310u, + /*accessRight=*/0x2u, + /*methodId=*/ 0x20801115u, + /*paramSize=*/ sizeof(NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFifoRunlistSetSchedPolicy" +#endif + }, + { /* [187] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFifoUpdateChannelInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801116u, + /*paramSize=*/ sizeof(NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFifoUpdateChannelInfo" +#endif + }, + { /* [188] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFifoDisableUsermodeChannels_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801117u, + /*paramSize=*/ sizeof(NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFifoDisableUsermodeChannels" +#endif + }, + { /* [189] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*flags=*/ 0x50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801201u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetInfo" +#endif + }, + { /* [190] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrCtxswZcullMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801205u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrCtxswZcullMode" +#endif + }, + { /* [191] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetZcullInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801206u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetZcullInfo" +#endif + }, + { /* [192] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrCtxswPmMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + /*flags=*/ 0x2010u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801207u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrCtxswPmMode" +#endif + }, + { /* [193] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrCtxswZcullBind_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801208u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrCtxswZcullBind" +#endif + }, + { /* [194] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrCtxswPmBind_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801209u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrCtxswPmBind" +#endif + }, + { /* [195] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrSetGpcTileMap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080120au, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrSetGpcTileMap" +#endif + }, + { /* [196] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrCtxswSmpcMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080120eu, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrCtxswSmpcMode" +#endif + }, + { /* [197] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetSmToGpcTpcMappings_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080120fu, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetSmToGpcTpcMappings" +#endif + }, + { /* [198] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrSetCtxswPreemptionMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801210u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrSetCtxswPreemptionMode" +#endif + }, + { /* [199] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrCtxswPreemptionBind_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801211u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrCtxswPreemptionBind" +#endif + }, + { /* [200] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrPcSamplingMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801212u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrPcSamplingMode" +#endif + }, + { /* [201] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetROPInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801213u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_ROP_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetROPInfo" +#endif + }, + { /* [202] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetCtxswStats_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801215u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetCtxswStats" +#endif + }, + { /* [203] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetCtxBufferSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*flags=*/ 0x50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801218u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferSize" +#endif + }, + { /* [204] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetCtxBufferInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801219u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetCtxBufferInfo" +#endif + }, + { /* [205] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetGlobalSmOrder_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080121bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetGlobalSmOrder" +#endif + }, + { /* [206] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetCurrentResidentChannel_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080121cu, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetCurrentResidentChannel" +#endif + }, + { /* [207] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetVatAlarmData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080121du, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetVatAlarmData" +#endif + }, + { /* [208] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetAttributeBufferSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080121eu, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetAttributeBufferSize" +#endif + }, + { /* [209] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGfxPoolQuerySize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080121fu, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGfxPoolQuerySize" +#endif + }, + { /* [210] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGfxPoolInitialize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801220u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGfxPoolInitialize" +#endif + }, + { /* [211] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGfxPoolAddSlots_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801221u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGfxPoolAddSlots" +#endif + }, + { /* [212] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGfxPoolRemoveSlots_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + /*flags=*/ 0x2200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801222u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGfxPoolRemoveSlots" +#endif + }, + { /* [213] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*flags=*/ 0x812u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801227u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetCapsV2" +#endif + }, + { /* [214] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*flags=*/ 0x50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801228u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetInfoV2" +#endif + }, + { /* [215] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetGpcMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*flags=*/ 0x50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080122au, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_GPC_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetGpcMask" +#endif + }, + { /* [216] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetTpcMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080122bu, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_TPC_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetTpcMask" +#endif + }, + { /* [217] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrSetTpcPartitionMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080122cu, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrSetTpcPartitionMode" +#endif + }, + { /* [218] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetEngineContextProperties_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080122du, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetEngineContextProperties" +#endif + }, + { /* [219] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetSmIssueRateModifier_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801230u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetSmIssueRateModifier" +#endif + }, + { /* [220] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrFecsBindEvtbufForUid_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801231u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrFecsBindEvtbufForUid" +#endif + }, + { /* [221] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetPhysGpcMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + /*flags=*/ 0x44u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801232u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetPhysGpcMask" +#endif + }, + { /* [222] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetPpcMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801233u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_PPC_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetPpcMask" +#endif + }, + { /* [223] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetNumTpcsForGpc_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801234u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetNumTpcsForGpc" +#endif + }, + { /* [224] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetCtxswModes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801235u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetCtxswModes" +#endif + }, + { /* [225] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetGpcTileMap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801236u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetGpcTileMap" +#endif + }, + { /* [226] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrGetZcullMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*flags=*/ 0x50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801237u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrGetZcullMask" +#endif + }, + { /* [227] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8010u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8010u) + /*flags=*/ 0x8010u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801238u, + /*paramSize=*/ sizeof(NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2" +#endif + }, + { /* [228] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801301u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetInfo" +#endif + }, + { /* [229] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801303u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetInfoV2" +#endif + }, + { /* [230] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetCarveoutAddressInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080130bu, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetCarveoutAddressInfo" +#endif + }, + { /* [231] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetCalibrationLockFailed_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080130cu, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetCalibrationLockFailed" +#endif + }, + { /* [232] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbFlushGpuCache_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080130eu, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbFlushGpuCache" +#endif + }, + { /* [233] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbSetGpuCacheAllocPolicy_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080130fu, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbSetGpuCacheAllocPolicy" +#endif + }, + { /* [234] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetBar1Offset_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801310u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetBar1Offset" +#endif + }, + { /* [235] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetGpuCacheAllocPolicy_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801312u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetGpuCacheAllocPolicy" +#endif + }, + { /* [236] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbIsKind_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801313u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_IS_KIND_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbIsKind" +#endif + }, + { /* [237] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetGpuCacheInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801315u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetGpuCacheInfo" +#endif + }, + { /* [238] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801318u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2" +#endif + }, + { /* [239] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801319u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2" +#endif + }, + { /* [240] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetFBRegionInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801320u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetFBRegionInfo" +#endif + }, + { /* [241] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetOfflinedPages_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801322u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetOfflinedPages" +#endif + }, + { /* [242] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetLTCInfoForFBP_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + /*flags=*/ 0xa50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801328u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetLTCInfoForFBP" +#endif + }, + { /* [243] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbCBCOp_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801337u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_FB_CBC_OP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbCBCOp" +#endif + }, + { /* [244] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetCtagsForCbcEviction_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801338u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetCtagsForCbcEviction" +#endif + }, + { /* [245] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbSetupVprRegion_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080133bu, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbSetupVprRegion" +#endif + }, + { /* [246] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetCliManagedOfflinedPages_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080133cu, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetCliManagedOfflinedPages" +#endif + }, + { /* [247] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetCompBitCopyConstructInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080133du, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetCompBitCopyConstructInfo" +#endif + }, + { /* [248] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbSetRrd_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080133eu, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_SET_RRD_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbSetRrd" +#endif + }, + { /* [249] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbSetReadLimit_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080133fu, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbSetReadLimit" +#endif + }, + { /* [250] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbSetWriteLimit_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801340u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbSetWriteLimit" +#endif + }, + { /* [251] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbPatchPbrForMining_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801341u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbPatchPbrForMining" +#endif + }, + { /* [252] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetMemAlignment_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + /*flags=*/ 0x50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801342u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetMemAlignment" +#endif + }, + { /* [253] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetRemappedRows_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801344u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetRemappedRows" +#endif + }, + { /* [254] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetFsInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801346u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_FS_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetFsInfo" +#endif + }, + { /* [255] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetRowRemapperHistogram_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801347u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetRowRemapperHistogram" +#endif + }, + { /* [256] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetDynamicOfflinedPages_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801348u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetDynamicOfflinedPages" +#endif + }, + { /* [257] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbUpdateNumaStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801350u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbUpdateNumaStatus" +#endif + }, + { /* [258] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFbGetNumaInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801351u, + /*paramSize=*/ sizeof(NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFbGetNumaInfo" +#endif + }, + { /* [259] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMcGetArchInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + /*flags=*/ 0x812u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801701u, + /*paramSize=*/ sizeof(NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMcGetArchInfo" +#endif + }, + { /* [260] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMcServiceInterrupts_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801702u, + /*paramSize=*/ sizeof(NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMcServiceInterrupts" +#endif + }, + { /* [261] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMcGetManufacturer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801703u, + /*paramSize=*/ sizeof(NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMcGetManufacturer" +#endif + }, + { /* [262] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMcQueryHostclkSlowdownStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801708u, + /*paramSize=*/ sizeof(NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMcQueryHostclkSlowdownStatus" +#endif + }, + { /* [263] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMcSetHostclkSlowdownStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801709u, + /*paramSize=*/ sizeof(NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMcSetHostclkSlowdownStatus" +#endif + }, + { /* [264] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdMcChangeReplayableFaultOwnership_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080170cu, + /*paramSize=*/ sizeof(NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdMcChangeReplayableFaultOwnership" +#endif + }, + { /* [265] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetPciInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801801u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetPciInfo" +#endif + }, + { /* [266] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801802u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetInfo" +#endif + }, + { /* [267] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetPciBarInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801803u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetPciBarInfo" +#endif + }, + { /* [268] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusSetPcieLinkWidth_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801804u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusSetPcieLinkWidth" +#endif + }, + { /* [269] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusSetPcieSpeed_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801805u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusSetPcieSpeed" +#endif + }, + { /* [270] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801806u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed" +#endif + }, + { /* [271] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801807u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed" +#endif + }, + { /* [272] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusHWBCGetUpstreamBAR0_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080180eu, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusHWBCGetUpstreamBAR0" +#endif + }, + { /* [273] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusServiceGpuMultifunctionState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801812u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusServiceGpuMultifunctionState" +#endif + }, + { /* [274] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetPexCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801813u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetPexCounters" +#endif + }, + { /* [275] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusClearPexCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801814u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusClearPexCounters" +#endif + }, + { /* [276] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusFreezePexCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801815u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusFreezePexCounters" +#endif + }, + { /* [277] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetPexLaneCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801816u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetPexLaneCounters" +#endif + }, + { /* [278] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetPcieLtrLatency_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801817u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetPcieLtrLatency" +#endif + }, + { /* [279] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusSetPcieLtrLatency_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801818u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusSetPcieLtrLatency" +#endif + }, + { /* [280] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetPexUtilCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801819u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetPexUtilCounters" +#endif + }, + { /* [281] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusClearPexUtilCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801820u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusClearPexUtilCounters" +#endif + }, + { /* [282] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetBFD_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801821u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_BFD_PARAMSARR), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetBFD" +#endif + }, + { /* [283] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetAspmDisableFlags_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801822u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetAspmDisableFlags" +#endif + }, + { /* [284] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetInfoV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801823u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_INFO_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetInfoV2" +#endif + }, + { /* [285] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusControlPublicAspmBits_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801824u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusControlPublicAspmBits" +#endif + }, + { /* [286] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetNvlinkPeerIdMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801825u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetNvlinkPeerIdMask" +#endif + }, + { /* [287] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusSetEomParameters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801826u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusSetEomParameters" +#endif + }, + { /* [288] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetUphyDlnCfgSpace_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801827u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetUphyDlnCfgSpace" +#endif + }, + { /* [289] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetEomStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20801828u, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetEomStatus" +#endif + }, + { /* [290] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusSysmemAccess_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10200u) + /*flags=*/ 0x10200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080182cu, + /*paramSize=*/ sizeof(NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusSysmemAccess" +#endif + }, + { /* [291] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdKPerfBoost_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080200au, + /*paramSize=*/ sizeof(NV2080_CTRL_PERF_BOOST_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdKPerfBoost" +#endif + }, + { /* [292] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdPerfRatedTdpGetControl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080206eu, + /*paramSize=*/ sizeof(NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdPerfRatedTdpGetControl" +#endif + }, + { /* [293] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdPerfRatedTdpSetControl_a2e9a2, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080206fu, + /*paramSize=*/ sizeof(NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdPerfRatedTdpSetControl" +#endif + }, + { /* [294] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdPerfReservePerfmonHw_3f0664, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802093u, + /*paramSize=*/ sizeof(NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdPerfReservePerfmonHw" +#endif + }, + { /* [295] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802096u, + /*paramSize=*/ sizeof(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2" +#endif + }, + { /* [296] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRcReadVirtualMem_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802204u, + /*paramSize=*/ sizeof(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRcReadVirtualMem" +#endif + }, + { /* [297] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRcGetErrorCount_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802205u, + /*paramSize=*/ sizeof(NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRcGetErrorCount" +#endif + }, + { /* [298] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRcSetCleanErrorHistory_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802207u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRcSetCleanErrorHistory" +#endif + }, + { /* [299] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRcGetWatchdogInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802209u, + /*paramSize=*/ sizeof(NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRcGetWatchdogInfo" +#endif + }, + { /* [300] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRcDisableWatchdog_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080220au, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRcDisableWatchdog" +#endif + }, + { /* [301] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRcEnableWatchdog_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080220bu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRcEnableWatchdog" +#endif + }, + { /* [302] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRcReleaseWatchdogRequests_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080220cu, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRcReleaseWatchdogRequests" +#endif + }, + { /* [303] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdSetRcRecovery_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080220du, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_RC_RECOVERY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdSetRcRecovery" +#endif + }, + { /* [304] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGetRcRecovery_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080220eu, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_RC_RECOVERY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGetRcRecovery" +#endif + }, + { /* [305] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRcSoftDisableWatchdog_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802210u, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRcSoftDisableWatchdog" +#endif + }, + { /* [306] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdSetRcInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802211u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_RC_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdSetRcInfo" +#endif + }, + { /* [307] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGetRcInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802212u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_RC_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGetRcInfo" +#endif + }, + { /* [308] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdRcGetErrorV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802213u, + /*paramSize=*/ sizeof(NV2080_CTRL_RC_GET_ERROR_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdRcGetErrorV2" +#endif + }, + { /* [309] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvdGetDumpSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802401u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvdGetDumpSize" +#endif + }, + { /* [310] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvdGetDump_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802402u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVD_GET_DUMP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvdGetDump" +#endif + }, + { /* [311] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvdGetNocatJournalRpt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802409u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvdGetNocatJournalRpt" +#endif + }, + { /* [312] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvdSetNocatJournalData_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080240bu, + /*paramSize=*/ sizeof(NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvdSetNocatJournalData" +#endif + }, + { /* [313] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDmaInvalidateTLB_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802502u, + /*paramSize=*/ sizeof(NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDmaInvalidateTLB" +#endif + }, + { /* [314] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdDmaGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802503u, + /*paramSize=*/ sizeof(NV2080_CTRL_DMA_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdDmaGetInfo" +#endif + }, + { /* [315] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdCeGetCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802a01u, + /*paramSize=*/ sizeof(NV2080_CTRL_CE_GET_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdCeGetCaps" +#endif + }, + { /* [316] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdCeGetCePceMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*flags=*/ 0x211u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802a02u, + /*paramSize=*/ sizeof(NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdCeGetCePceMask" +#endif + }, + { /* [317] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdCeGetCapsV2_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802a03u, + /*paramSize=*/ sizeof(NV2080_CTRL_CE_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdCeGetCapsV2" +#endif + }, + { /* [318] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdCeUpdatePceLceMappings_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802a05u, + /*paramSize=*/ sizeof(NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdCeUpdatePceLceMappings" +#endif + }, + { /* [319] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdCeUpdateClassDB_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + /*flags=*/ 0x600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802a06u, + /*paramSize=*/ sizeof(NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdCeUpdateClassDB" +#endif + }, + { /* [320] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdCeGetPhysicalCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe40u) + /*flags=*/ 0xe40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802a07u, + /*paramSize=*/ sizeof(NV2080_CTRL_CE_GET_CAPS_V2_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdCeGetPhysicalCaps" +#endif + }, + { /* [321] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdCeGetFaultMethodBufferSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4200u) + /*flags=*/ 0x4200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802a08u, + /*paramSize=*/ sizeof(NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdCeGetFaultMethodBufferSize" +#endif + }, + { /* [322] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdCeGetHubPceMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + /*flags=*/ 0x4600u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802a09u, + /*paramSize=*/ sizeof(NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdCeGetHubPceMask" +#endif + }, + { /* [323] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdCeGetAllCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802a0au, + /*paramSize=*/ sizeof(NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdCeGetAllCaps" +#endif + }, + { /* [324] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe40u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdCeGetAllPhysicalCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe40u) + /*flags=*/ 0xe40u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20802a0bu, + /*paramSize=*/ sizeof(NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdCeGetAllPhysicalCaps" +#endif + }, + { /* [325] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetNvlinkCaps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + /*flags=*/ 0x850u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803001u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetNvlinkCaps" +#endif + }, + { /* [326] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdBusGetNvlinkStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803002u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdBusGetNvlinkStatus" +#endif + }, + { /* [327] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGetNvlinkCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803004u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGetNvlinkCounters" +#endif + }, + { /* [328] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdClearNvlinkCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803005u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdClearNvlinkCounters" +#endif + }, + { /* [329] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803009u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts" +#endif + }, + { /* [330] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkSetupEom_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080300cu, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkSetupEom" +#endif + }, + { /* [331] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetPowerState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080300eu, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkGetPowerState" +#endif + }, + { /* [332] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkReadTpCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803015u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkReadTpCounters" +#endif + }, + { /* [333] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkEnableNvlinkPeer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803017u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkEnableNvlinkPeer" +#endif + }, + { /* [334] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetLpCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803018u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkGetLpCounters" +#endif + }, + { /* [335] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkCoreCallback_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803019u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkCoreCallback" +#endif + }, + { /* [336] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080301bu, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid" +#endif + }, + { /* [337] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkUpdateHshubMux_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080301cu, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkUpdateHshubMux" +#endif + }, + { /* [338] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080301du, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer" +#endif + }, + { /* [339] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080301eu, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer" +#endif + }, + { /* [340] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkRemoveNvlinkMapping_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080301fu, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkRemoveNvlinkMapping" +#endif + }, + { /* [341] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkSaveRestoreHshubState_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803020u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkSaveRestoreHshubState" +#endif + }, + { /* [342] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkProgramBufferready_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803021u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkProgramBufferready" +#endif + }, + { /* [343] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkUpdateCurrentConfig_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803022u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkUpdateCurrentConfig" +#endif + }, + { /* [344] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkSetLoopbackMode_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803023u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkSetLoopbackMode" +#endif + }, + { /* [345] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkUpdatePeerLinkMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803024u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkUpdatePeerLinkMask" +#endif + }, + { /* [346] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkUpdateLinkConnection_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803025u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkUpdateLinkConnection" +#endif + }, + { /* [347] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkEnableLinksPostTopology_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803026u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkEnableLinksPostTopology" +#endif + }, + { /* [348] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetRefreshCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803028u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkGetRefreshCounters" +#endif + }, + { /* [349] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkClearRefreshCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803029u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkClearRefreshCounters" +#endif + }, + { /* [350] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080302au, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet" +#endif + }, + { /* [351] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080302cu, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo" +#endif + }, + { /* [352] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080302du, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo" +#endif + }, + { /* [353] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkProgramLinkSpeed_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080302eu, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_PROGRAM_LINK_SPEED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkProgramLinkSpeed" +#endif + }, + { /* [354] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkAreLinksTrained_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080302fu, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_ARE_LINKS_TRAINED_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkAreLinksTrained" +#endif + }, + { /* [355] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkResetLinks_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803030u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_RESET_LINKS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkResetLinks" +#endif + }, + { /* [356] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkDisableDlInterrupts_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803031u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkDisableDlInterrupts" +#endif + }, + { /* [357] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetLinkAndClockInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803032u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkGetLinkAndClockInfo" +#endif + }, + { /* [358] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkSetupNvlinkSysmem_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803033u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkSetupNvlinkSysmem" +#endif + }, + { /* [359] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkProcessForcedConfigs_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803034u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkProcessForcedConfigs" +#endif + }, + { /* [360] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkSyncLaneShutdownProps_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803035u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkSyncLaneShutdownProps" +#endif + }, + { /* [361] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803036u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts" +#endif + }, + { /* [362] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803037u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask" +#endif + }, + { /* [363] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803038u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr" +#endif + }, + { /* [364] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + /*flags=*/ 0x201u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803039u, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo" +#endif + }, + { /* [365] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkEnableLinks_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080303au, + /*paramSize=*/ 0, + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkEnableLinks" +#endif + }, + { /* [366] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdNvlinkProcessInitDisabledLinks_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080303bu, + /*paramSize=*/ sizeof(NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdNvlinkProcessInitDisabledLinks" +#endif + }, + { /* [367] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnGetDmemUsage_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803101u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnGetDmemUsage" +#endif + }, + { /* [368] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnInstrumentationMap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803112u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_INSTRUMENTATION_MAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnInstrumentationMap" +#endif + }, + { /* [369] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnInstrumentationUnmap_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803113u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_INSTRUMENTATION_MAP_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnInstrumentationUnmap" +#endif + }, + { /* [370] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnInstrumentationGetInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803114u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_INSTRUMENTATION_GET_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnInstrumentationGetInfo" +#endif + }, + { /* [371] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnInstrumentationGetControl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803115u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_INSTRUMENTATION_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnInstrumentationGetControl" +#endif + }, + { /* [372] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnInstrumentationSetControl_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803116u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_INSTRUMENTATION_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnInstrumentationSetControl" +#endif + }, + { /* [373] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnInstrumentationRecalibrate_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803117u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_INSTRUMENTATION_RECALIBRATE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnInstrumentationRecalibrate" +#endif + }, + { /* [374] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnGetEngineArch_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803118u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnGetEngineArch" +#endif + }, + { /* [375] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnUstreamerQueueInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803120u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnUstreamerQueueInfo" +#endif + }, + { /* [376] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnUstreamerControlGet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803122u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlGet" +#endif + }, + { /* [377] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnUstreamerControlSet_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + /*flags=*/ 0x204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803123u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnUstreamerControlSet" +#endif + }, + { /* [378] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnGetCtxBufferInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + /*flags=*/ 0x200u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803124u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferInfo" +#endif + }, + { /* [379] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlcnGetCtxBufferSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803125u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlcnGetCtxBufferSize" +#endif + }, + { /* [380] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdEccGetClientExposedCounters_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803400u, + /*paramSize=*/ sizeof(NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdEccGetClientExposedCounters" +#endif + }, + { /* [381] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlaRange_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + /*flags=*/ 0x810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803501u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLA_RANGE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlaRange" +#endif + }, + { /* [382] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlaSetupInstanceMemBlock_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + /*flags=*/ 0x2204u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803502u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlaSetupInstanceMemBlock" +#endif + }, + { /* [383] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlaGetRange_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803503u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLA_GET_RANGE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlaGetRange" +#endif + }, + { /* [384] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1810u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdFlaGetFabricMemStats_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1810u) + /*flags=*/ 0x1810u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803504u, + /*paramSize=*/ sizeof(NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdFlaGetFabricMemStats" +#endif + }, + { /* [385] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGspGetFeatures_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + /*flags=*/ 0x211u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803601u, + /*paramSize=*/ sizeof(NV2080_CTRL_GSP_GET_FEATURES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGspGetFeatures" +#endif + }, + { /* [386] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGrmgrGetGrFsInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803801u, + /*paramSize=*/ sizeof(NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGrmgrGetGrFsInfo" +#endif + }, + { /* [387] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + /*flags=*/ 0x3u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d01u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixGc6BlockerRefCnt" +#endif + }, + { /* [388] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + /*flags=*/ 0x11u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d02u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixAllowDisallowGcoff" +#endif + }, + { /* [389] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + /*flags=*/ 0x1u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d03u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixAudioDynamicPower" +#endif + }, + { /* [390] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixVidmemPersistenceStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + /*flags=*/ 0x13u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d07u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixVidmemPersistenceStatus" +#endif + }, + { /* [391] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdOsUnixUpdateTgpStatus_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + /*flags=*/ 0x7u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20803d08u, + /*paramSize=*/ sizeof(NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdOsUnixUpdateTgpStatus" +#endif + }, + { /* [392] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdGetAvailableHshubMask_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + /*flags=*/ 0xa50u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x20804101u, + /*paramSize=*/ sizeof(NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdGetAvailableHshubMask" +#endif + }, + { /* [393] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x2080a083u, + /*paramSize=*/ sizeof(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM), + /*pClassInfo=*/ &(__nvoc_class_def_Subdevice.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_Subdevice = +{ + /*numEntries=*/ 394, + /*pExportEntries=*/ __nvoc_exported_method_def_Subdevice +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_Subdevice(Subdevice *pThis) { + __nvoc_subdeviceDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_Subdevice(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_Subdevice(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Subdevice_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_Subdevice_fail_Notifier; + __nvoc_init_dataField_Subdevice(pThis, pRmhalspecowner); + + status = __nvoc_subdeviceConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_Subdevice_fail__init; + goto __nvoc_ctor_Subdevice_exit; // Success + +__nvoc_ctor_Subdevice_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_Subdevice_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_Subdevice_fail_GpuResource: +__nvoc_ctor_Subdevice_exit: + + return status; +} + +static void __nvoc_init_funcTable_Subdevice_1(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__subdevicePreDestruct__ = &subdevicePreDestruct_IMPL; + + pThis->__subdeviceInternalControlForward__ = &subdeviceInternalControlForward_IMPL; + + pThis->__subdeviceControlFilter__ = &subdeviceControlFilter_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBiosGetInfoV2__ = &subdeviceCtrlCmdBiosGetInfoV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBiosGetSKUInfo__ = &subdeviceCtrlCmdBiosGetSKUInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBiosGetPostTime__ = &subdeviceCtrlCmdBiosGetPostTime_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBiosGetUefiSupport__ = &subdeviceCtrlCmdBiosGetUefiSupport_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + pThis->__subdeviceCtrlCmdMcGetArchInfo__ = &subdeviceCtrlCmdMcGetArchInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdMcGetManufacturer__ = &subdeviceCtrlCmdMcGetManufacturer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdMcQueryHostclkSlowdownStatus__ = &subdeviceCtrlCmdMcQueryHostclkSlowdownStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdMcSetHostclkSlowdownStatus__ = &subdeviceCtrlCmdMcSetHostclkSlowdownStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdMcChangeReplayableFaultOwnership__ = &subdeviceCtrlCmdMcChangeReplayableFaultOwnership_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdMcServiceInterrupts__ = &subdeviceCtrlCmdMcServiceInterrupts_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdDmaInvalidateTLB__ = &subdeviceCtrlCmdDmaInvalidateTLB_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdDmaGetInfo__ = &subdeviceCtrlCmdDmaGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdBusGetPciInfo__ = &subdeviceCtrlCmdBusGetPciInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdBusGetInfo__ = &subdeviceCtrlCmdBusGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdBusGetInfoV2__ = &subdeviceCtrlCmdBusGetInfoV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdBusGetPciBarInfo__ = &subdeviceCtrlCmdBusGetPciBarInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdBusSetPcieSpeed__ = &subdeviceCtrlCmdBusSetPcieSpeed_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdBusSetPcieLinkWidth__ = &subdeviceCtrlCmdBusSetPcieLinkWidth_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed__ = &subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed__ = &subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdBusHWBCGetUpstreamBAR0__ = &subdeviceCtrlCmdBusHWBCGetUpstreamBAR0_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusServiceGpuMultifunctionState__ = &subdeviceCtrlCmdBusServiceGpuMultifunctionState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusGetPexCounters__ = &subdeviceCtrlCmdBusGetPexCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdBusGetBFD__ = &subdeviceCtrlCmdBusGetBFD_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdBusGetAspmDisableFlags__ = &subdeviceCtrlCmdBusGetAspmDisableFlags_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusControlPublicAspmBits__ = &subdeviceCtrlCmdBusControlPublicAspmBits_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusClearPexCounters__ = &subdeviceCtrlCmdBusClearPexCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusGetPexUtilCounters__ = &subdeviceCtrlCmdBusGetPexUtilCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusClearPexUtilCounters__ = &subdeviceCtrlCmdBusClearPexUtilCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusFreezePexCounters__ = &subdeviceCtrlCmdBusFreezePexCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusGetPexLaneCounters__ = &subdeviceCtrlCmdBusGetPexLaneCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusGetPcieLtrLatency__ = &subdeviceCtrlCmdBusGetPcieLtrLatency_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusSetPcieLtrLatency__ = &subdeviceCtrlCmdBusSetPcieLtrLatency_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdBusGetNvlinkPeerIdMask__ = &subdeviceCtrlCmdBusGetNvlinkPeerIdMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusSetEomParameters__ = &subdeviceCtrlCmdBusSetEomParameters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusGetUphyDlnCfgSpace__ = &subdeviceCtrlCmdBusGetUphyDlnCfgSpace_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdBusGetEomStatus__ = &subdeviceCtrlCmdBusGetEomStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10200u) + pThis->__subdeviceCtrlCmdBusSysmemAccess__ = &subdeviceCtrlCmdBusSysmemAccess_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdBusGetNvlinkCaps__ = &subdeviceCtrlCmdBusGetNvlinkCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__subdeviceCtrlCmdBusGetNvlinkStatus__ = &subdeviceCtrlCmdBusGetNvlinkStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGetNvlinkCounters__ = &subdeviceCtrlCmdGetNvlinkCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdClearNvlinkCounters__ = &subdeviceCtrlCmdClearNvlinkCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts__ = &subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkSetupEom__ = &subdeviceCtrlCmdNvlinkSetupEom_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdNvlinkGetPowerState__ = &subdeviceCtrlCmdNvlinkGetPowerState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdNvlinkReadTpCounters__ = &subdeviceCtrlCmdNvlinkReadTpCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdNvlinkGetLpCounters__ = &subdeviceCtrlCmdNvlinkGetLpCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkEnableNvlinkPeer__ = &subdeviceCtrlCmdNvlinkEnableNvlinkPeer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkCoreCallback__ = &subdeviceCtrlCmdNvlinkCoreCallback_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkSetLoopbackMode__ = &subdeviceCtrlCmdNvlinkSetLoopbackMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid__ = &subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkUpdateHshubMux__ = &subdeviceCtrlCmdNvlinkUpdateHshubMux_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer__ = &subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer__ = &subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkRemoveNvlinkMapping__ = &subdeviceCtrlCmdNvlinkRemoveNvlinkMapping_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkSaveRestoreHshubState__ = &subdeviceCtrlCmdNvlinkSaveRestoreHshubState_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkProgramBufferready__ = &subdeviceCtrlCmdNvlinkProgramBufferready_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkUpdateCurrentConfig__ = &subdeviceCtrlCmdNvlinkUpdateCurrentConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkUpdatePeerLinkMask__ = &subdeviceCtrlCmdNvlinkUpdatePeerLinkMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkUpdateLinkConnection__ = &subdeviceCtrlCmdNvlinkUpdateLinkConnection_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkEnableLinksPostTopology__ = &subdeviceCtrlCmdNvlinkEnableLinksPostTopology_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkGetRefreshCounters__ = &subdeviceCtrlCmdNvlinkGetRefreshCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkClearRefreshCounters__ = &subdeviceCtrlCmdNvlinkClearRefreshCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet__ = &subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo__ = &subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo__ = &subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkProgramLinkSpeed__ = &subdeviceCtrlCmdNvlinkProgramLinkSpeed_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkAreLinksTrained__ = &subdeviceCtrlCmdNvlinkAreLinksTrained_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkResetLinks__ = &subdeviceCtrlCmdNvlinkResetLinks_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkDisableDlInterrupts__ = &subdeviceCtrlCmdNvlinkDisableDlInterrupts_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkGetLinkAndClockInfo__ = &subdeviceCtrlCmdNvlinkGetLinkAndClockInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkSetupNvlinkSysmem__ = &subdeviceCtrlCmdNvlinkSetupNvlinkSysmem_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkProcessForcedConfigs__ = &subdeviceCtrlCmdNvlinkProcessForcedConfigs_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkSyncLaneShutdownProps__ = &subdeviceCtrlCmdNvlinkSyncLaneShutdownProps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts__ = &subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask__ = &subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr__ = &subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x201u) + pThis->__subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo__ = &subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkEnableLinks__ = &subdeviceCtrlCmdNvlinkEnableLinks_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdNvlinkProcessInitDisabledLinks__ = &subdeviceCtrlCmdNvlinkProcessInitDisabledLinks_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdI2cReadBuffer__ = &subdeviceCtrlCmdI2cReadBuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdI2cWriteBuffer__ = &subdeviceCtrlCmdI2cWriteBuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x230u) + pThis->__subdeviceCtrlCmdI2cReadReg__ = &subdeviceCtrlCmdI2cReadReg_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x230u) + pThis->__subdeviceCtrlCmdI2cWriteReg__ = &subdeviceCtrlCmdI2cWriteReg_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples__ = &subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2__ = &subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdPerfRatedTdpGetControl__ = &subdeviceCtrlCmdPerfRatedTdpGetControl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdPerfRatedTdpSetControl__ = &subdeviceCtrlCmdPerfRatedTdpSetControl_a2e9a2; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdPerfReservePerfmonHw__ = &subdeviceCtrlCmdPerfReservePerfmonHw_3f0664; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__subdeviceCtrlCmdKPerfBoost__ = &subdeviceCtrlCmdKPerfBoost_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFbGetFBRegionInfo__ = &subdeviceCtrlCmdFbGetFBRegionInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFbGetBar1Offset__ = &subdeviceCtrlCmdFbGetBar1Offset_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdFbIsKind__ = &subdeviceCtrlCmdFbIsKind_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + pThis->__subdeviceCtrlCmdFbGetMemAlignment__ = &subdeviceCtrlCmdFbGetMemAlignment_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdFbGetHeapReservationSize__ = &subdeviceCtrlCmdFbGetHeapReservationSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdFbGetInfo__ = &subdeviceCtrlCmdFbGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdFbGetInfoV2__ = &subdeviceCtrlCmdFbGetInfoV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFbGetCarveoutAddressInfo__ = &subdeviceCtrlCmdFbGetCarveoutAddressInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFbGetCalibrationLockFailed__ = &subdeviceCtrlCmdFbGetCalibrationLockFailed_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFbFlushGpuCache__ = &subdeviceCtrlCmdFbFlushGpuCache_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFbSetGpuCacheAllocPolicy__ = &subdeviceCtrlCmdFbSetGpuCacheAllocPolicy_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFbGetGpuCacheAllocPolicy__ = &subdeviceCtrlCmdFbGetGpuCacheAllocPolicy_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2__ = &subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2__ = &subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFbGetGpuCacheInfo__ = &subdeviceCtrlCmdFbGetGpuCacheInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdFbGetCliManagedOfflinedPages__ = &subdeviceCtrlCmdFbGetCliManagedOfflinedPages_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFbGetOfflinedPages__ = &subdeviceCtrlCmdFbGetOfflinedPages_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdFbSetupVprRegion__ = &subdeviceCtrlCmdFbSetupVprRegion_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + pThis->__subdeviceCtrlCmdFbGetLTCInfoForFBP__ = &subdeviceCtrlCmdFbGetLTCInfoForFBP_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdFbGetCompBitCopyConstructInfo__ = &subdeviceCtrlCmdFbGetCompBitCopyConstructInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdFbPatchPbrForMining__ = &subdeviceCtrlCmdFbPatchPbrForMining_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFbGetRemappedRows__ = &subdeviceCtrlCmdFbGetRemappedRows_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFbGetFsInfo__ = &subdeviceCtrlCmdFbGetFsInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFbGetRowRemapperHistogram__ = &subdeviceCtrlCmdFbGetRowRemapperHistogram_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdFbGetDynamicOfflinedPages__ = &subdeviceCtrlCmdFbGetDynamicOfflinedPages_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__subdeviceCtrlCmdFbUpdateNumaStatus__ = &subdeviceCtrlCmdFbUpdateNumaStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__subdeviceCtrlCmdFbGetNumaInfo__ = &subdeviceCtrlCmdFbGetNumaInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdMemSysGetStaticConfig__ = &subdeviceCtrlCmdMemSysGetStaticConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdMemSysSetPartitionableMem__ = &subdeviceCtrlCmdMemSysSetPartitionableMem_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdKMemSysGetMIGMemoryConfig__ = &subdeviceCtrlCmdKMemSysGetMIGMemoryConfig_133e5e; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFbSetZbcReferenced__ = &subdeviceCtrlCmdFbSetZbcReferenced_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdMemSysL2InvalidateEvict__ = &subdeviceCtrlCmdMemSysL2InvalidateEvict_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches__ = &subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdMemSysDisableNvlinkPeers__ = &subdeviceCtrlCmdMemSysDisableNvlinkPeers_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdMemSysProgramRawCompressionMode__ = &subdeviceCtrlCmdMemSysProgramRawCompressionMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable__ = &subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdFbGetCtagsForCbcEviction__ = &subdeviceCtrlCmdFbGetCtagsForCbcEviction_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdFbCBCOp__ = &subdeviceCtrlCmdFbCBCOp_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdFbSetRrd__ = &subdeviceCtrlCmdFbSetRrd_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdFbSetReadLimit__ = &subdeviceCtrlCmdFbSetReadLimit_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdFbSetWriteLimit__ = &subdeviceCtrlCmdFbSetWriteLimit_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdSetGpfifo__ = &subdeviceCtrlCmdSetGpfifo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__subdeviceCtrlCmdSetOperationalProperties__ = &subdeviceCtrlCmdSetOperationalProperties_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdFifoBindEngines__ = &subdeviceCtrlCmdFifoBindEngines_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGetPhysicalChannelCount__ = &subdeviceCtrlCmdGetPhysicalChannelCount_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFifoGetInfo__ = &subdeviceCtrlCmdFifoGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFifoDisableChannels__ = &subdeviceCtrlCmdFifoDisableChannels_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__subdeviceCtrlCmdFifoDisableUsermodeChannels__ = &subdeviceCtrlCmdFifoDisableUsermodeChannels_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdFifoGetChannelMemInfo__ = &subdeviceCtrlCmdFifoGetChannelMemInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdFifoGetUserdLocation__ = &subdeviceCtrlCmdFifoGetUserdLocation_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__subdeviceCtrlCmdFifoGetDeviceInfoTable__ = &subdeviceCtrlCmdFifoGetDeviceInfoTable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__subdeviceCtrlCmdFifoClearFaultedBit__ = &subdeviceCtrlCmdFifoClearFaultedBit_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2310u) + pThis->__subdeviceCtrlCmdFifoRunlistSetSchedPolicy__ = &subdeviceCtrlCmdFifoRunlistSetSchedPolicy_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdFifoUpdateChannelInfo__ = &subdeviceCtrlCmdFifoUpdateChannelInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers__ = &subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2610u) + pThis->__subdeviceCtrlCmdInternalFifoGetNumChannels__ = &subdeviceCtrlCmdInternalFifoGetNumChannels_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + pThis->__subdeviceCtrlCmdKGrGetInfo__ = &subdeviceCtrlCmdKGrGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + pThis->__subdeviceCtrlCmdKGrGetInfoV2__ = &subdeviceCtrlCmdKGrGetInfoV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + pThis->__subdeviceCtrlCmdKGrGetCapsV2__ = &subdeviceCtrlCmdKGrGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrGetCtxswModes__ = &subdeviceCtrlCmdKGrGetCtxswModes_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrCtxswZcullMode__ = &subdeviceCtrlCmdKGrCtxswZcullMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrCtxswZcullBind__ = &subdeviceCtrlCmdKGrCtxswZcullBind_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdKGrGetZcullInfo__ = &subdeviceCtrlCmdKGrGetZcullInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + pThis->__subdeviceCtrlCmdKGrCtxswPmMode__ = &subdeviceCtrlCmdKGrCtxswPmMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrCtxswPmBind__ = &subdeviceCtrlCmdKGrCtxswPmBind_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrSetGpcTileMap__ = &subdeviceCtrlCmdKGrSetGpcTileMap_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrCtxswSmpcMode__ = &subdeviceCtrlCmdKGrCtxswSmpcMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrPcSamplingMode__ = &subdeviceCtrlCmdKGrPcSamplingMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdKGrGetSmToGpcTpcMappings__ = &subdeviceCtrlCmdKGrGetSmToGpcTpcMappings_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdKGrGetGlobalSmOrder__ = &subdeviceCtrlCmdKGrGetGlobalSmOrder_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrSetCtxswPreemptionMode__ = &subdeviceCtrlCmdKGrSetCtxswPreemptionMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrCtxswPreemptionBind__ = &subdeviceCtrlCmdKGrCtxswPreemptionBind_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdKGrGetROPInfo__ = &subdeviceCtrlCmdKGrGetROPInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrGetCtxswStats__ = &subdeviceCtrlCmdKGrGetCtxswStats_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + pThis->__subdeviceCtrlCmdKGrGetCtxBufferSize__ = &subdeviceCtrlCmdKGrGetCtxBufferSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__subdeviceCtrlCmdKGrGetCtxBufferInfo__ = &subdeviceCtrlCmdKGrGetCtxBufferInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__subdeviceCtrlCmdKGrGetCtxBufferPtes__ = &subdeviceCtrlCmdKGrGetCtxBufferPtes_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrGetCurrentResidentChannel__ = &subdeviceCtrlCmdKGrGetCurrentResidentChannel_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrGetVatAlarmData__ = &subdeviceCtrlCmdKGrGetVatAlarmData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdKGrGetAttributeBufferSize__ = &subdeviceCtrlCmdKGrGetAttributeBufferSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__subdeviceCtrlCmdKGrGfxPoolQuerySize__ = &subdeviceCtrlCmdKGrGfxPoolQuerySize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__subdeviceCtrlCmdKGrGfxPoolInitialize__ = &subdeviceCtrlCmdKGrGfxPoolInitialize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__subdeviceCtrlCmdKGrGfxPoolAddSlots__ = &subdeviceCtrlCmdKGrGfxPoolAddSlots_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__subdeviceCtrlCmdKGrGfxPoolRemoveSlots__ = &subdeviceCtrlCmdKGrGfxPoolRemoveSlots_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdKGrGetPpcMask__ = &subdeviceCtrlCmdKGrGetPpcMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrSetTpcPartitionMode__ = &subdeviceCtrlCmdKGrSetTpcPartitionMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdKGrGetSmIssueRateModifier__ = &subdeviceCtrlCmdKGrGetSmIssueRateModifier_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdKGrFecsBindEvtbufForUid__ = &subdeviceCtrlCmdKGrFecsBindEvtbufForUid_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x8010u) + pThis->__subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2__ = &subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x44u) + pThis->__subdeviceCtrlCmdKGrGetPhysGpcMask__ = &subdeviceCtrlCmdKGrGetPhysGpcMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + pThis->__subdeviceCtrlCmdKGrGetGpcMask__ = &subdeviceCtrlCmdKGrGetGpcMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdKGrGetTpcMask__ = &subdeviceCtrlCmdKGrGetTpcMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdKGrGetEngineContextProperties__ = &subdeviceCtrlCmdKGrGetEngineContextProperties_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdKGrGetNumTpcsForGpc__ = &subdeviceCtrlCmdKGrGetNumTpcsForGpc_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdKGrGetGpcTileMap__ = &subdeviceCtrlCmdKGrGetGpcTileMap_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + pThis->__subdeviceCtrlCmdKGrGetZcullMask__ = &subdeviceCtrlCmdKGrGetZcullMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetInfo__ = &subdeviceCtrlCmdKGrInternalStaticGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetCaps__ = &subdeviceCtrlCmdKGrInternalStaticGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder__ = &subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks__ = &subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetPpcMasks__ = &subdeviceCtrlCmdKGrInternalStaticGetPpcMasks_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetZcullInfo__ = &subdeviceCtrlCmdKGrInternalStaticGetZcullInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetRopInfo__ = &subdeviceCtrlCmdKGrInternalStaticGetRopInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo__ = &subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier__ = &subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize__ = &subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines__ = &subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdKGrInternalStaticGetPdbProperties__ = &subdeviceCtrlCmdKGrInternalStaticGetPdbProperties_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + pThis->__subdeviceCtrlCmdGpuGetCachedInfo__ = &subdeviceCtrlCmdGpuGetCachedInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdGpuGetInfoV2__ = &subdeviceCtrlCmdGpuGetInfoV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuGetIpVersion__ = &subdeviceCtrlCmdGpuGetIpVersion_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo__ = &subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu__ = &subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuSetOptimusInfo__ = &subdeviceCtrlCmdGpuSetOptimusInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa10u) + pThis->__subdeviceCtrlCmdGpuGetNameString__ = &subdeviceCtrlCmdGpuGetNameString_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4a10u) + pThis->__subdeviceCtrlCmdGpuGetShortNameString__ = &subdeviceCtrlCmdGpuGetShortNameString_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetEncoderCapacity__ = &subdeviceCtrlCmdGpuGetEncoderCapacity_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetNvencSwSessionStats__ = &subdeviceCtrlCmdGpuGetNvencSwSessionStats_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetNvencSwSessionInfo__ = &subdeviceCtrlCmdGpuGetNvencSwSessionInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetNvfbcSwSessionStats__ = &subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo__ = &subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuSetFabricAddr__ = &subdeviceCtrlCmdGpuSetFabricAddr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuSetPower__ = &subdeviceCtrlCmdGpuSetPower_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdGpuGetSdm__ = &subdeviceCtrlCmdGpuGetSdm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u) + pThis->__subdeviceCtrlCmdGpuSetSdm__ = &subdeviceCtrlCmdGpuSetSdm_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u) + pThis->__subdeviceCtrlCmdGpuGetSimulationInfo__ = &subdeviceCtrlCmdGpuGetSimulationInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__subdeviceCtrlCmdGpuGetEngines__ = &subdeviceCtrlCmdGpuGetEngines_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__subdeviceCtrlCmdGpuGetEnginesV2__ = &subdeviceCtrlCmdGpuGetEnginesV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u) + pThis->__subdeviceCtrlCmdGpuGetEngineClasslist__ = &subdeviceCtrlCmdGpuGetEngineClasslist_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdGpuGetEnginePartnerList__ = &subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdGpuGetFermiGpcInfo__ = &subdeviceCtrlCmdGpuGetFermiGpcInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdGpuGetFermiTpcInfo__ = &subdeviceCtrlCmdGpuGetFermiTpcInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetFermiZcullInfo__ = &subdeviceCtrlCmdGpuGetFermiZcullInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetPesInfo__ = &subdeviceCtrlCmdGpuGetPesInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuExecRegOps__ = &subdeviceCtrlCmdGpuExecRegOps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuQueryMode__ = &subdeviceCtrlCmdGpuQueryMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + pThis->__subdeviceCtrlCmdGpuGetInforomImageVersion__ = &subdeviceCtrlCmdGpuGetInforomImageVersion_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuGetInforomObjectVersion__ = &subdeviceCtrlCmdGpuGetInforomObjectVersion_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuQueryInforomEccSupport__ = &subdeviceCtrlCmdGpuQueryInforomEccSupport_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + pThis->__subdeviceCtrlCmdGpuQueryEccStatus__ = &subdeviceCtrlCmdGpuQueryEccStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + pThis->__subdeviceCtrlCmdGpuGetOEMBoardInfo__ = &subdeviceCtrlCmdGpuGetOEMBoardInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4210u) + pThis->__subdeviceCtrlCmdGpuGetOEMInfo__ = &subdeviceCtrlCmdGpuGetOEMInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__subdeviceCtrlCmdGpuHandleGpuSR__ = &subdeviceCtrlCmdGpuHandleGpuSR_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x844u) + pThis->__subdeviceCtrlCmdGpuSetComputeModeRules__ = &subdeviceCtrlCmdGpuSetComputeModeRules_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdGpuQueryComputeModeRules__ = &subdeviceCtrlCmdGpuQueryComputeModeRules_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdGpuAcquireComputeModeReservation__ = &subdeviceCtrlCmdGpuAcquireComputeModeReservation_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdGpuReleaseComputeModeReservation__ = &subdeviceCtrlCmdGpuReleaseComputeModeReservation_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__subdeviceCtrlCmdGpuInitializeCtx__ = &subdeviceCtrlCmdGpuInitializeCtx_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__subdeviceCtrlCmdGpuPromoteCtx__ = &subdeviceCtrlCmdGpuPromoteCtx_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2200u) + pThis->__subdeviceCtrlCmdGpuEvictCtx__ = &subdeviceCtrlCmdGpuEvictCtx_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x812u) + pThis->__subdeviceCtrlCmdGpuGetId__ = &subdeviceCtrlCmdGpuGetId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + pThis->__subdeviceCtrlCmdGpuGetGidInfo__ = &subdeviceCtrlCmdGpuGetGidInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuQueryIllumSupport__ = &subdeviceCtrlCmdGpuQueryIllumSupport_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuGetIllum__ = &subdeviceCtrlCmdGpuGetIllum_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuSetIllum__ = &subdeviceCtrlCmdGpuSetIllum_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuQueryScrubberStatus__ = &subdeviceCtrlCmdGpuQueryScrubberStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuGetVprCaps__ = &subdeviceCtrlCmdGpuGetVprCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuGetVprInfo__ = &subdeviceCtrlCmdGpuGetVprInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetPids__ = &subdeviceCtrlCmdGpuGetPids_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetPidInfo__ = &subdeviceCtrlCmdGpuGetPidInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdGpuInterruptFunction__ = &subdeviceCtrlCmdGpuInterruptFunction_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuQueryFunctionStatus__ = &subdeviceCtrlCmdGpuQueryFunctionStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdGpuReportNonReplayableFault__ = &subdeviceCtrlCmdGpuReportNonReplayableFault_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdGpuGetEngineFaultInfo__ = &subdeviceCtrlCmdGpuGetEngineFaultInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdGpuGetEngineRunlistPriBase__ = &subdeviceCtrlCmdGpuGetEngineRunlistPriBase_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetHwEngineId__ = &subdeviceCtrlCmdGpuGetHwEngineId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x50u) + pThis->__subdeviceCtrlCmdGpuGetMaxSupportedPageSize__ = &subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdGpuSetComputePolicyConfig__ = &subdeviceCtrlCmdGpuSetComputePolicyConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetComputePolicyConfig__ = &subdeviceCtrlCmdGpuGetComputePolicyConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__subdeviceCtrlCmdValidateMemMapRequest__ = &subdeviceCtrlCmdValidateMemMapRequest_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x12u) + pThis->__subdeviceCtrlCmdGpuGetEngineLoadTimes__ = &subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL; +#endif +} + +static void __nvoc_init_funcTable_Subdevice_2(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdEventSetTrigger__ = &subdeviceCtrlCmdEventSetTrigger_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdEventSetTriggerFifo__ = &subdeviceCtrlCmdEventSetTriggerFifo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdEventSetNotification__ = &subdeviceCtrlCmdEventSetNotification_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdEventSetMemoryNotifies__ = &subdeviceCtrlCmdEventSetMemoryNotifies_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdEventSetSemaphoreMemory__ = &subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdEventSetSemaMemValidation__ = &subdeviceCtrlCmdEventSetSemaMemValidation_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdTimerCancel__ = &subdeviceCtrlCmdTimerCancel_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdTimerSchedule__ = &subdeviceCtrlCmdTimerSchedule_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdTimerGetTime__ = &subdeviceCtrlCmdTimerGetTime_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdTimerGetRegisterOffset__ = &subdeviceCtrlCmdTimerGetRegisterOffset_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo__ = &subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2010u) + pThis->__subdeviceCtrlCmdTimerSetGrTickFreq__ = &subdeviceCtrlCmdTimerSetGrTickFreq_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__subdeviceCtrlCmdRcReadVirtualMem__ = &subdeviceCtrlCmdRcReadVirtualMem_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdRcGetErrorCount__ = &subdeviceCtrlCmdRcGetErrorCount_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdRcGetErrorV2__ = &subdeviceCtrlCmdRcGetErrorV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdRcSetCleanErrorHistory__ = &subdeviceCtrlCmdRcSetCleanErrorHistory_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdRcGetWatchdogInfo__ = &subdeviceCtrlCmdRcGetWatchdogInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdRcDisableWatchdog__ = &subdeviceCtrlCmdRcDisableWatchdog_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdRcSoftDisableWatchdog__ = &subdeviceCtrlCmdRcSoftDisableWatchdog_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdRcEnableWatchdog__ = &subdeviceCtrlCmdRcEnableWatchdog_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdRcReleaseWatchdogRequests__ = &subdeviceCtrlCmdRcReleaseWatchdogRequests_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalRcWatchdogTimeout__ = &subdeviceCtrlCmdInternalRcWatchdogTimeout_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdSetRcRecovery__ = &subdeviceCtrlCmdSetRcRecovery_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdGetRcRecovery__ = &subdeviceCtrlCmdGetRcRecovery_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdGetRcInfo__ = &subdeviceCtrlCmdGetRcInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdSetRcInfo__ = &subdeviceCtrlCmdSetRcInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdNvdGetDumpSize__ = &subdeviceCtrlCmdNvdGetDumpSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdNvdGetDump__ = &subdeviceCtrlCmdNvdGetDump_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__subdeviceCtrlCmdNvdGetNocatJournalRpt__ = &subdeviceCtrlCmdNvdGetNocatJournalRpt_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__subdeviceCtrlCmdNvdSetNocatJournalData__ = &subdeviceCtrlCmdNvdSetNocatJournalData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdCeGetCaps__ = &subdeviceCtrlCmdCeGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdCeGetCapsV2__ = &subdeviceCtrlCmdCeGetCapsV2_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x850u) + pThis->__subdeviceCtrlCmdCeGetAllCaps__ = &subdeviceCtrlCmdCeGetAllCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + pThis->__subdeviceCtrlCmdCeGetCePceMask__ = &subdeviceCtrlCmdCeGetCePceMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdCeUpdatePceLceMappings__ = &subdeviceCtrlCmdCeUpdatePceLceMappings_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFlcnGetDmemUsage__ = &subdeviceCtrlCmdFlcnGetDmemUsage_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFlcnInstrumentationMap__ = &subdeviceCtrlCmdFlcnInstrumentationMap_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFlcnInstrumentationUnmap__ = &subdeviceCtrlCmdFlcnInstrumentationUnmap_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFlcnInstrumentationGetInfo__ = &subdeviceCtrlCmdFlcnInstrumentationGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFlcnInstrumentationGetControl__ = &subdeviceCtrlCmdFlcnInstrumentationGetControl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdFlcnInstrumentationSetControl__ = &subdeviceCtrlCmdFlcnInstrumentationSetControl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFlcnInstrumentationRecalibrate__ = &subdeviceCtrlCmdFlcnInstrumentationRecalibrate_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFlcnGetEngineArch__ = &subdeviceCtrlCmdFlcnGetEngineArch_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFlcnUstreamerQueueInfo__ = &subdeviceCtrlCmdFlcnUstreamerQueueInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFlcnUstreamerControlGet__ = &subdeviceCtrlCmdFlcnUstreamerControlGet_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdFlcnUstreamerControlSet__ = &subdeviceCtrlCmdFlcnUstreamerControlSet_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x200u) + pThis->__subdeviceCtrlCmdFlcnGetCtxBufferInfo__ = &subdeviceCtrlCmdFlcnGetCtxBufferInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdFlcnGetCtxBufferSize__ = &subdeviceCtrlCmdFlcnGetCtxBufferSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdEccGetClientExposedCounters__ = &subdeviceCtrlCmdEccGetClientExposedCounters_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuQueryEccConfiguration__ = &subdeviceCtrlCmdGpuQueryEccConfiguration_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdGpuSetEccConfiguration__ = &subdeviceCtrlCmdGpuSetEccConfiguration_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u) + pThis->__subdeviceCtrlCmdGpuResetEccErrorStatus__ = &subdeviceCtrlCmdGpuResetEccErrorStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u) + pThis->__subdeviceCtrlCmdFlaRange__ = &subdeviceCtrlCmdFlaRange_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2204u) + pThis->__subdeviceCtrlCmdFlaSetupInstanceMemBlock__ = &subdeviceCtrlCmdFlaSetupInstanceMemBlock_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdFlaGetRange__ = &subdeviceCtrlCmdFlaGetRange_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1810u) + pThis->__subdeviceCtrlCmdFlaGetFabricMemStats__ = &subdeviceCtrlCmdFlaGetFabricMemStats_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u) + pThis->__subdeviceCtrlCmdGspGetFeatures__ = &subdeviceCtrlCmdGspGetFeatures_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetActivePartitionIds__ = &subdeviceCtrlCmdGpuGetActivePartitionIds_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__subdeviceCtrlCmdGpuGetPartitionCapacity__ = &subdeviceCtrlCmdGpuGetPartitionCapacity_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuDescribePartitions__ = &subdeviceCtrlCmdGpuDescribePartitions_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__subdeviceCtrlCmdGpuSetPartitioningMode__ = &subdeviceCtrlCmdGpuSetPartitioningMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__subdeviceCtrlCmdGrmgrGetGrFsInfo__ = &subdeviceCtrlCmdGrmgrGetGrFsInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuSetPartitions__ = &subdeviceCtrlCmdGpuSetPartitions_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__subdeviceCtrlCmdGpuGetPartitions__ = &subdeviceCtrlCmdGpuGetPartitions_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles__ = &subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines__ = &subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2600u) + pThis->__subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges__ = &subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + pThis->__subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance__ = &subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x400u) + pThis->__subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance__ = &subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x3u) + pThis->__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__ = &subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u) + pThis->__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__ = &subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u) + pThis->__subdeviceCtrlCmdOsUnixAudioDynamicPower__ = &subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u) + pThis->__subdeviceCtrlCmdOsUnixVidmemPersistenceStatus__ = &subdeviceCtrlCmdOsUnixVidmemPersistenceStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u) + pThis->__subdeviceCtrlCmdOsUnixUpdateTgpStatus__ = &subdeviceCtrlCmdOsUnixUpdateTgpStatus_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplayGetIpVersion__ = &subdeviceCtrlCmdDisplayGetIpVersion_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplayGetStaticInfo__ = &subdeviceCtrlCmdDisplayGetStaticInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplaySetChannelPushbuffer__ = &subdeviceCtrlCmdDisplaySetChannelPushbuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplayWriteInstMem__ = &subdeviceCtrlCmdDisplayWriteInstMem_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplaySetupRgLineIntr__ = &subdeviceCtrlCmdDisplaySetupRgLineIntr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplaySetImportedImpData__ = &subdeviceCtrlCmdDisplaySetImportedImpData_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdDisplayGetDisplayMask__ = &subdeviceCtrlCmdDisplayGetDisplayMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + pThis->__subdeviceCtrlCmdMsencGetCaps__ = &subdeviceCtrlCmdMsencGetCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer__ = &subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer__ = &subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer__ = &subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize__ = &subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + pThis->__subdeviceCtrlCmdInternalGetChipInfo__ = &subdeviceCtrlCmdInternalGetChipInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + pThis->__subdeviceCtrlCmdInternalGetUserRegisterAccessMap__ = &subdeviceCtrlCmdInternalGetUserRegisterAccessMap_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + pThis->__subdeviceCtrlCmdInternalGetDeviceInfoTable__ = &subdeviceCtrlCmdInternalGetDeviceInfoTable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalGetConstructedFalconInfo__ = &subdeviceCtrlCmdInternalGetConstructedFalconInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalRecoverAllComputeContexts__ = &subdeviceCtrlCmdInternalRecoverAllComputeContexts_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalGetSmcMode__ = &subdeviceCtrlCmdInternalGetSmcMode_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalBusBindLocalGfidForP2p__ = &subdeviceCtrlCmdInternalBusBindLocalGfidForP2p_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalBusBindRemoteGfidForP2p__ = &subdeviceCtrlCmdInternalBusBindRemoteGfidForP2p_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalBusFlushWithSysmembar__ = &subdeviceCtrlCmdInternalBusFlushWithSysmembar_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal__ = &subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote__ = &subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalBusDestroyP2pMailbox__ = &subdeviceCtrlCmdInternalBusDestroyP2pMailbox_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalBusCreateC2cPeerMapping__ = &subdeviceCtrlCmdInternalBusCreateC2cPeerMapping_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping__ = &subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdGmmuGetStaticInfo__ = &subdeviceCtrlCmdGmmuGetStaticInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer__ = &subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer__ = &subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer__ = &subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer__ = &subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe40u) + pThis->__subdeviceCtrlCmdCeGetPhysicalCaps__ = &subdeviceCtrlCmdCeGetPhysicalCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xe40u) + pThis->__subdeviceCtrlCmdCeGetAllPhysicalCaps__ = &subdeviceCtrlCmdCeGetAllPhysicalCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdCeUpdateClassDB__ = &subdeviceCtrlCmdCeUpdateClassDB_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4200u) + pThis->__subdeviceCtrlCmdCeGetFaultMethodBufferSize__ = &subdeviceCtrlCmdCeGetFaultMethodBufferSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4600u) + pThis->__subdeviceCtrlCmdCeGetHubPceMask__ = &subdeviceCtrlCmdCeGetHubPceMask_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdIntrGetKernelTable__ = &subdeviceCtrlCmdIntrGetKernelTable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__subdeviceCtrlCmdInternalPerfCudaLimitDisable__ = &subdeviceCtrlCmdInternalPerfCudaLimitDisable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalPerfOptpCliClear__ = &subdeviceCtrlCmdInternalPerfOptpCliClear_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__subdeviceCtrlCmdInternalPerfBoostSet_2x__ = &subdeviceCtrlCmdInternalPerfBoostSet_2x_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__subdeviceCtrlCmdInternalPerfBoostSet_3x__ = &subdeviceCtrlCmdInternalPerfBoostSet_3x_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__subdeviceCtrlCmdInternalPerfBoostClear_3x__ = &subdeviceCtrlCmdInternalPerfBoostClear_3x_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl__ = &subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo__ = &subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits__ = &subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck__ = &subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x610u) + pThis->__subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet__ = &subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount__ = &subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdBifGetStaticInfo__ = &subdeviceCtrlCmdBifGetStaticInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdBifGetAspmL1Flags__ = &subdeviceCtrlCmdBifGetAspmL1Flags_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdHshubPeerConnConfig__ = &subdeviceCtrlCmdHshubPeerConnConfig_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdHshubFirstLinkPeerId__ = &subdeviceCtrlCmdHshubFirstLinkPeerId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdHshubGetHshubIdForLinks__ = &subdeviceCtrlCmdHshubGetHshubIdForLinks_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdHshubGetNumUnits__ = &subdeviceCtrlCmdHshubGetNumUnits_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdHshubNextHshubId__ = &subdeviceCtrlCmdHshubNextHshubId_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr__ = &subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr__ = &subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x600u) + pThis->__subdeviceCtrlCmdInternalGetPcieP2pCaps__ = &subdeviceCtrlCmdInternalGetPcieP2pCaps_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0xa50u) + pThis->__subdeviceCtrlCmdGetAvailableHshubMask__ = &subdeviceCtrlCmdGetAvailableHshubMask_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resPreDestruct__ = &__nvoc_thunk_Subdevice_resPreDestruct; + + pThis->__nvoc_base_GpuResource.__gpuresInternalControlForward__ = &__nvoc_thunk_Subdevice_gpuresInternalControlForward; + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resControlFilter__ = &__nvoc_thunk_Subdevice_resControlFilter; + + pThis->__subdeviceShareCallback__ = &__nvoc_thunk_GpuResource_subdeviceShareCallback; + + pThis->__subdeviceMapTo__ = &__nvoc_thunk_RsResource_subdeviceMapTo; + + pThis->__subdeviceGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_subdeviceGetOrAllocNotifShare; + + pThis->__subdeviceCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_subdeviceCheckMemInterUnmap; + + pThis->__subdeviceGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_subdeviceGetMapAddrSpace; + + pThis->__subdeviceSetNotificationShare__ = &__nvoc_thunk_Notifier_subdeviceSetNotificationShare; + + pThis->__subdeviceGetRefCount__ = &__nvoc_thunk_RsResource_subdeviceGetRefCount; + + pThis->__subdeviceAddAdditionalDependants__ = &__nvoc_thunk_RsResource_subdeviceAddAdditionalDependants; + + pThis->__subdeviceControl_Prologue__ = &__nvoc_thunk_RmResource_subdeviceControl_Prologue; + + pThis->__subdeviceGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_subdeviceGetRegBaseOffsetAndSize; + + pThis->__subdeviceUnmapFrom__ = &__nvoc_thunk_RsResource_subdeviceUnmapFrom; + + pThis->__subdeviceControl_Epilogue__ = &__nvoc_thunk_RmResource_subdeviceControl_Epilogue; + + pThis->__subdeviceControlLookup__ = &__nvoc_thunk_RsResource_subdeviceControlLookup; + + pThis->__subdeviceGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_subdeviceGetInternalObjectHandle; + + pThis->__subdeviceControl__ = &__nvoc_thunk_GpuResource_subdeviceControl; + + pThis->__subdeviceUnmap__ = &__nvoc_thunk_GpuResource_subdeviceUnmap; + + pThis->__subdeviceGetMemInterMapParams__ = &__nvoc_thunk_RmResource_subdeviceGetMemInterMapParams; + + pThis->__subdeviceGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_subdeviceGetMemoryMappingDescriptor; + + pThis->__subdeviceUnregisterEvent__ = &__nvoc_thunk_Notifier_subdeviceUnregisterEvent; + + pThis->__subdeviceCanCopy__ = &__nvoc_thunk_RsResource_subdeviceCanCopy; + + pThis->__subdeviceGetNotificationListPtr__ = &__nvoc_thunk_Notifier_subdeviceGetNotificationListPtr; + + pThis->__subdeviceGetNotificationShare__ = &__nvoc_thunk_Notifier_subdeviceGetNotificationShare; + + pThis->__subdeviceMap__ = &__nvoc_thunk_GpuResource_subdeviceMap; + + pThis->__subdeviceAccessCallback__ = &__nvoc_thunk_RmResource_subdeviceAccessCallback; +} + +void __nvoc_init_funcTable_Subdevice(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_Subdevice_1(pThis, pRmhalspecowner); + __nvoc_init_funcTable_Subdevice_2(pThis, pRmhalspecowner); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_Subdevice(Subdevice *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_Subdevice = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_Subdevice(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_Subdevice(Subdevice **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + Subdevice *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(Subdevice)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(Subdevice)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Subdevice); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_Subdevice(pThis, pRmhalspecowner); + status = __nvoc_ctor_Subdevice(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_Subdevice_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_Subdevice_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_Subdevice(Subdevice **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_Subdevice(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_subdevice_nvoc.h b/src/nvidia/generated/g_subdevice_nvoc.h new file mode 100644 index 000000000..53d978a0c --- /dev/null +++ b/src/nvidia/generated/g_subdevice_nvoc.h @@ -0,0 +1,3680 @@ +#ifndef _G_SUBDEVICE_NVOC_H_ +#define _G_SUBDEVICE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_subdevice_nvoc.h" + +#ifndef _SUBDEVICE_H_ +#define _SUBDEVICE_H_ + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "resserv/rs_resource.h" +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" +#include "containers/btree.h" +#include "nvoc/utility.h" +#include "gpu/gpu_halspec.h" + +#include "class/cl2080.h" +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrl2080.h" // rmcontrol parameters + +#ifndef NV2080_NOTIFIERS_CE_IDX +// TODO these need to be moved to cl2080.h +#define NV2080_NOTIFIERS_CE_IDX(i) ((i) - NV2080_NOTIFIERS_CE0) +#define NV2080_NOTIFIERS_NVENC_IDX(i) ((i) - NV2080_NOTIFIERS_NVENC0) +#define NV2080_NOTIFIERS_NVDEC_IDX(i) ((i) - NV2080_NOTIFIERS_NVDEC0) +#define NV2080_NOTIFIERS_GR_IDX(i) ((i) - NV2080_NOTIFIERS_GR0) +#endif + +#define NV2080_ENGINE_RANGE_GR() rangeMake(NV2080_ENGINE_TYPE_GR(0), NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE - 1)) +#define NV2080_ENGINE_RANGE_COPY() rangeMake(NV2080_ENGINE_TYPE_COPY(0), NV2080_ENGINE_TYPE_COPY(NV2080_ENGINE_TYPE_COPY_SIZE - 1)) +#define NV2080_ENGINE_RANGE_NVDEC() rangeMake(NV2080_ENGINE_TYPE_NVDEC(0), NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE - 1)) +#define NV2080_ENGINE_RANGE_NVENC() rangeMake(NV2080_ENGINE_TYPE_NVENC(0), NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE - 1)) +#define NV2080_ENGINE_RANGE_NVJPEG() rangeMake(NV2080_ENGINE_TYPE_NVJPEG(0), NV2080_ENGINE_TYPE_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_SIZE - 1)) + +struct Device; + +#ifndef __NVOC_CLASS_Device_TYPEDEF__ +#define __NVOC_CLASS_Device_TYPEDEF__ +typedef struct Device Device; +#endif /* __NVOC_CLASS_Device_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Device +#define __nvoc_class_id_Device 0xe0ac20 +#endif /* __nvoc_class_id_Device */ + + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + +struct Memory; + +#ifndef __NVOC_CLASS_Memory_TYPEDEF__ +#define __NVOC_CLASS_Memory_TYPEDEF__ +typedef struct Memory Memory; +#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Memory +#define __nvoc_class_id_Memory 0x4789f2 +#endif /* __nvoc_class_id_Memory */ + + +struct P2PApi; + +#ifndef __NVOC_CLASS_P2PApi_TYPEDEF__ +#define __NVOC_CLASS_P2PApi_TYPEDEF__ +typedef struct P2PApi P2PApi; +#endif /* __NVOC_CLASS_P2PApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_P2PApi +#define __nvoc_class_id_P2PApi 0x3982b7 +#endif /* __nvoc_class_id_P2PApi */ + + + +/** + * A subdevice represents a single GPU within a device. Subdevice provide + * unicast semantics; that is, operations involving a subdevice are applied to + * the associated GPU only. + */ +#ifdef NVOC_SUBDEVICE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct Subdevice { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct Subdevice *__nvoc_pbase_Subdevice; + void (*__subdevicePreDestruct__)(struct Subdevice *); + NV_STATUS (*__subdeviceInternalControlForward__)(struct Subdevice *, NvU32, void *, NvU32); + NV_STATUS (*__subdeviceControlFilter__)(struct Subdevice *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__subdeviceCtrlCmdBiosGetInfoV2__)(struct Subdevice *, NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBiosGetSKUInfo__)(struct Subdevice *, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBiosGetPostTime__)(struct Subdevice *, NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBiosGetUefiSupport__)(struct Subdevice *, NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMcGetArchInfo__)(struct Subdevice *, NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMcGetManufacturer__)(struct Subdevice *, NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMcQueryHostclkSlowdownStatus__)(struct Subdevice *, NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMcSetHostclkSlowdownStatus__)(struct Subdevice *, NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMcChangeReplayableFaultOwnership__)(struct Subdevice *, NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMcServiceInterrupts__)(struct Subdevice *, NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDmaInvalidateTLB__)(struct Subdevice *, NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDmaGetInfo__)(struct Subdevice *, NV2080_CTRL_DMA_GET_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetPciInfo__)(struct Subdevice *, NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetInfo__)(struct Subdevice *, NV2080_CTRL_BUS_GET_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetInfoV2__)(struct Subdevice *, NV2080_CTRL_BUS_GET_INFO_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetPciBarInfo__)(struct Subdevice *, NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusSetPcieSpeed__)(struct Subdevice *, NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusSetPcieLinkWidth__)(struct Subdevice *, NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed__)(struct Subdevice *, NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed__)(struct Subdevice *, NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusHWBCGetUpstreamBAR0__)(struct Subdevice *, NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusServiceGpuMultifunctionState__)(struct Subdevice *, NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetPexCounters__)(struct Subdevice *, NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetBFD__)(struct Subdevice *, NV2080_CTRL_BUS_GET_BFD_PARAMSARR *); + NV_STATUS (*__subdeviceCtrlCmdBusGetAspmDisableFlags__)(struct Subdevice *, NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusControlPublicAspmBits__)(struct Subdevice *, NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusClearPexCounters__)(struct Subdevice *, NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetPexUtilCounters__)(struct Subdevice *, NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusClearPexUtilCounters__)(struct Subdevice *, NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusFreezePexCounters__)(struct Subdevice *, NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetPexLaneCounters__)(struct Subdevice *, NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetPcieLtrLatency__)(struct Subdevice *, NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusSetPcieLtrLatency__)(struct Subdevice *, NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetNvlinkPeerIdMask__)(struct Subdevice *, NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusSetEomParameters__)(struct Subdevice *, NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetUphyDlnCfgSpace__)(struct Subdevice *, NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetEomStatus__)(struct Subdevice *, NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusSysmemAccess__)(struct Subdevice *, NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetNvlinkCaps__)(struct Subdevice *, NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBusGetNvlinkStatus__)(struct Subdevice *, NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGetNvlinkCounters__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdClearNvlinkCounters__)(struct Subdevice *, NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkSetupEom__)(struct Subdevice *, NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkGetPowerState__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkReadTpCounters__)(struct Subdevice *, NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkGetLpCounters__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkEnableNvlinkPeer__)(struct Subdevice *, NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkCoreCallback__)(struct Subdevice *, NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkSetLoopbackMode__)(struct Subdevice *, NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid__)(struct Subdevice *, NV2080_CTRL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkUpdateHshubMux__)(struct Subdevice *, NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer__)(struct Subdevice *, NV2080_CTRL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer__)(struct Subdevice *, NV2080_CTRL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkRemoveNvlinkMapping__)(struct Subdevice *, NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkSaveRestoreHshubState__)(struct Subdevice *, NV2080_CTRL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkProgramBufferready__)(struct Subdevice *, NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkUpdateCurrentConfig__)(struct Subdevice *, NV2080_CTRL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkUpdatePeerLinkMask__)(struct Subdevice *, NV2080_CTRL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkUpdateLinkConnection__)(struct Subdevice *, NV2080_CTRL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkEnableLinksPostTopology__)(struct Subdevice *, NV2080_CTRL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkGetRefreshCounters__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkClearRefreshCounters__)(struct Subdevice *, NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkProgramLinkSpeed__)(struct Subdevice *, NV2080_CTRL_NVLINK_PROGRAM_LINK_SPEED_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkAreLinksTrained__)(struct Subdevice *, NV2080_CTRL_NVLINK_ARE_LINKS_TRAINED_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkResetLinks__)(struct Subdevice *, NV2080_CTRL_NVLINK_RESET_LINKS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkDisableDlInterrupts__)(struct Subdevice *, NV2080_CTRL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkGetLinkAndClockInfo__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkSetupNvlinkSysmem__)(struct Subdevice *, NV2080_CTRL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkProcessForcedConfigs__)(struct Subdevice *, NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkSyncLaneShutdownProps__)(struct Subdevice *, NV2080_CTRL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts__)(struct Subdevice *, NV2080_CTRL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask__)(struct Subdevice *, NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr__)(struct Subdevice *, NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo__)(struct Subdevice *, NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkEnableLinks__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdNvlinkProcessInitDisabledLinks__)(struct Subdevice *, NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdI2cReadBuffer__)(struct Subdevice *, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdI2cWriteBuffer__)(struct Subdevice *, NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdI2cReadReg__)(struct Subdevice *, NV2080_CTRL_I2C_RW_REG_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdI2cWriteReg__)(struct Subdevice *, NV2080_CTRL_I2C_RW_REG_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples__)(struct Subdevice *, NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM *); + NV_STATUS (*__subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2__)(struct Subdevice *, NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdPerfRatedTdpGetControl__)(struct Subdevice *, NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdPerfRatedTdpSetControl__)(struct Subdevice *, NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdPerfReservePerfmonHw__)(struct Subdevice *, NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKPerfBoost__)(struct Subdevice *, NV2080_CTRL_PERF_BOOST_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetFBRegionInfo__)(struct Subdevice *, NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetBar1Offset__)(struct Subdevice *, NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbIsKind__)(struct Subdevice *, NV2080_CTRL_FB_IS_KIND_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetMemAlignment__)(struct Subdevice *, NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetHeapReservationSize__)(struct Subdevice *, NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetInfo__)(struct Subdevice *, NV2080_CTRL_FB_GET_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetInfoV2__)(struct Subdevice *, NV2080_CTRL_FB_GET_INFO_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetCarveoutAddressInfo__)(struct Subdevice *, NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO *); + NV_STATUS (*__subdeviceCtrlCmdFbGetCalibrationLockFailed__)(struct Subdevice *, NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbFlushGpuCache__)(struct Subdevice *, NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbSetGpuCacheAllocPolicy__)(struct Subdevice *, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetGpuCacheAllocPolicy__)(struct Subdevice *, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2__)(struct Subdevice *, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2__)(struct Subdevice *, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetGpuCacheInfo__)(struct Subdevice *, NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetCliManagedOfflinedPages__)(struct Subdevice *, NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetOfflinedPages__)(struct Subdevice *, NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbSetupVprRegion__)(struct Subdevice *, NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetLTCInfoForFBP__)(struct Subdevice *, NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetCompBitCopyConstructInfo__)(struct Subdevice *, NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbPatchPbrForMining__)(struct Subdevice *, NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetRemappedRows__)(struct Subdevice *, NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetFsInfo__)(struct Subdevice *, NV2080_CTRL_FB_GET_FS_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetRowRemapperHistogram__)(struct Subdevice *, NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetDynamicOfflinedPages__)(struct Subdevice *, NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbUpdateNumaStatus__)(struct Subdevice *, NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetNumaInfo__)(struct Subdevice *, NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMemSysGetStaticConfig__)(struct Subdevice *, NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMemSysSetPartitionableMem__)(struct Subdevice *, NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKMemSysGetMIGMemoryConfig__)(struct Subdevice *, NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbSetZbcReferenced__)(struct Subdevice *, NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMemSysL2InvalidateEvict__)(struct Subdevice *, NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdMemSysDisableNvlinkPeers__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdMemSysProgramRawCompressionMode__)(struct Subdevice *, NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable__)(struct Subdevice *, NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbGetCtagsForCbcEviction__)(struct Subdevice *, NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbCBCOp__)(struct Subdevice *, NV2080_CTRL_CMD_FB_CBC_OP_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbSetRrd__)(struct Subdevice *, NV2080_CTRL_FB_SET_RRD_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbSetReadLimit__)(struct Subdevice *, NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFbSetWriteLimit__)(struct Subdevice *, NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdSetGpfifo__)(struct Subdevice *, NV2080_CTRL_CMD_SET_GPFIFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdSetOperationalProperties__)(struct Subdevice *, NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFifoBindEngines__)(struct Subdevice *, NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGetPhysicalChannelCount__)(struct Subdevice *, NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFifoGetInfo__)(struct Subdevice *, NV2080_CTRL_FIFO_GET_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFifoDisableChannels__)(struct Subdevice *, NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFifoDisableUsermodeChannels__)(struct Subdevice *, NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFifoGetChannelMemInfo__)(struct Subdevice *, NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFifoGetUserdLocation__)(struct Subdevice *, NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFifoGetDeviceInfoTable__)(struct Subdevice *, NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFifoClearFaultedBit__)(struct Subdevice *, NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFifoRunlistSetSchedPolicy__)(struct Subdevice *, NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFifoUpdateChannelInfo__)(struct Subdevice *, NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers__)(struct Subdevice *, NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalFifoGetNumChannels__)(struct Subdevice *, NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetInfo__)(struct Subdevice *, NV2080_CTRL_GR_GET_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetInfoV2__)(struct Subdevice *, NV2080_CTRL_GR_GET_INFO_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetCapsV2__)(struct Subdevice *, NV2080_CTRL_GR_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetCtxswModes__)(struct Subdevice *, NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrCtxswZcullMode__)(struct Subdevice *, NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrCtxswZcullBind__)(struct Subdevice *, NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetZcullInfo__)(struct Subdevice *, NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrCtxswPmMode__)(struct Subdevice *, NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrCtxswPmBind__)(struct Subdevice *, NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrSetGpcTileMap__)(struct Subdevice *, NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrCtxswSmpcMode__)(struct Subdevice *, NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrPcSamplingMode__)(struct Subdevice *, NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetSmToGpcTpcMappings__)(struct Subdevice *, NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetGlobalSmOrder__)(struct Subdevice *, NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrSetCtxswPreemptionMode__)(struct Subdevice *, NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrCtxswPreemptionBind__)(struct Subdevice *, NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetROPInfo__)(struct Subdevice *, NV2080_CTRL_GR_GET_ROP_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetCtxswStats__)(struct Subdevice *, NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetCtxBufferSize__)(struct Subdevice *, NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetCtxBufferInfo__)(struct Subdevice *, NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetCtxBufferPtes__)(struct Subdevice *, NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetCurrentResidentChannel__)(struct Subdevice *, NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetVatAlarmData__)(struct Subdevice *, NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetAttributeBufferSize__)(struct Subdevice *, NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGfxPoolQuerySize__)(struct Subdevice *, NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGfxPoolInitialize__)(struct Subdevice *, NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGfxPoolAddSlots__)(struct Subdevice *, NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGfxPoolRemoveSlots__)(struct Subdevice *, NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetPpcMask__)(struct Subdevice *, NV2080_CTRL_GR_GET_PPC_MASK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrSetTpcPartitionMode__)(struct Subdevice *, NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetSmIssueRateModifier__)(struct Subdevice *, NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrFecsBindEvtbufForUid__)(struct Subdevice *, NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2__)(struct Subdevice *, NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetPhysGpcMask__)(struct Subdevice *, NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetGpcMask__)(struct Subdevice *, NV2080_CTRL_GR_GET_GPC_MASK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetTpcMask__)(struct Subdevice *, NV2080_CTRL_GR_GET_TPC_MASK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetEngineContextProperties__)(struct Subdevice *, NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetNumTpcsForGpc__)(struct Subdevice *, NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetGpcTileMap__)(struct Subdevice *, NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrGetZcullMask__)(struct Subdevice *, NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetCaps__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetPpcMasks__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetZcullInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetRopInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdKGrInternalStaticGetPdbProperties__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetCachedInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetInfoV2__)(struct Subdevice *, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetIpVersion__)(struct Subdevice *, NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetOptimusInfo__)(struct Subdevice *, NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetNameString__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetShortNameString__)(struct Subdevice *, NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEncoderCapacity__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetNvencSwSessionStats__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetNvencSwSessionInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetNvfbcSwSessionStats__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetFabricAddr__)(struct Subdevice *, NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetPower__)(struct Subdevice *, NV2080_CTRL_GPU_SET_POWER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetSdm__)(struct Subdevice *, NV2080_CTRL_GPU_GET_SDM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetSdm__)(struct Subdevice *, NV2080_CTRL_GPU_SET_SDM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetSimulationInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngines__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEnginesV2__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngineClasslist__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEnginePartnerList__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetFermiGpcInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetFermiTpcInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetFermiZcullInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetPesInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_PES_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuExecRegOps__)(struct Subdevice *, NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuQueryMode__)(struct Subdevice *, NV2080_CTRL_GPU_QUERY_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetInforomImageVersion__)(struct Subdevice *, NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetInforomObjectVersion__)(struct Subdevice *, NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuQueryInforomEccSupport__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdGpuQueryEccStatus__)(struct Subdevice *, NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetOEMBoardInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetOEMInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuHandleGpuSR__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetComputeModeRules__)(struct Subdevice *, NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuQueryComputeModeRules__)(struct Subdevice *, NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuAcquireComputeModeReservation__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdGpuReleaseComputeModeReservation__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdGpuInitializeCtx__)(struct Subdevice *, NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuPromoteCtx__)(struct Subdevice *, NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuEvictCtx__)(struct Subdevice *, NV2080_CTRL_GPU_EVICT_CTX_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetId__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ID_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetGidInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuQueryIllumSupport__)(struct Subdevice *, NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetIllum__)(struct Subdevice *, NV2080_CTRL_CMD_GPU_ILLUM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetIllum__)(struct Subdevice *, NV2080_CTRL_CMD_GPU_ILLUM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuQueryScrubberStatus__)(struct Subdevice *, NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetVprCaps__)(struct Subdevice *, NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetVprInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetPids__)(struct Subdevice *, NV2080_CTRL_GPU_GET_PIDS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetPidInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuInterruptFunction__)(struct Subdevice *, NV2080_CTRL_GPU_INTERRUPT_FUNCTION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuQueryFunctionStatus__)(struct Subdevice *, NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuReportNonReplayableFault__)(struct Subdevice *, NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngineFaultInfo__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngineRunlistPriBase__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetHwEngineId__)(struct Subdevice *, NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetMaxSupportedPageSize__)(struct Subdevice *, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetComputePolicyConfig__)(struct Subdevice *, NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetComputePolicyConfig__)(struct Subdevice *, NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdValidateMemMapRequest__)(struct Subdevice *, NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetEngineLoadTimes__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEventSetTrigger__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdEventSetTriggerFifo__)(struct Subdevice *, NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEventSetNotification__)(struct Subdevice *, NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEventSetMemoryNotifies__)(struct Subdevice *, NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEventSetSemaphoreMemory__)(struct Subdevice *, NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEventSetSemaMemValidation__)(struct Subdevice *, NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdTimerCancel__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdTimerSchedule__)(struct Subdevice *, NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdTimerGetTime__)(struct Subdevice *, NV2080_CTRL_TIMER_GET_TIME_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdTimerGetRegisterOffset__)(struct Subdevice *, NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo__)(struct Subdevice *, NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdTimerSetGrTickFreq__)(struct Subdevice *, NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdRcReadVirtualMem__)(struct Subdevice *, NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdRcGetErrorCount__)(struct Subdevice *, NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdRcGetErrorV2__)(struct Subdevice *, NV2080_CTRL_RC_GET_ERROR_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdRcSetCleanErrorHistory__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdRcGetWatchdogInfo__)(struct Subdevice *, NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdRcDisableWatchdog__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdRcSoftDisableWatchdog__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdRcEnableWatchdog__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdRcReleaseWatchdogRequests__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdInternalRcWatchdogTimeout__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdSetRcRecovery__)(struct Subdevice *, NV2080_CTRL_CMD_RC_RECOVERY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGetRcRecovery__)(struct Subdevice *, NV2080_CTRL_CMD_RC_RECOVERY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGetRcInfo__)(struct Subdevice *, NV2080_CTRL_CMD_RC_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdSetRcInfo__)(struct Subdevice *, NV2080_CTRL_CMD_RC_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvdGetDumpSize__)(struct Subdevice *, NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvdGetDump__)(struct Subdevice *, NV2080_CTRL_NVD_GET_DUMP_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvdGetNocatJournalRpt__)(struct Subdevice *, NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdNvdSetNocatJournalData__)(struct Subdevice *, NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdCeGetCaps__)(struct Subdevice *, NV2080_CTRL_CE_GET_CAPS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdCeGetCapsV2__)(struct Subdevice *, NV2080_CTRL_CE_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdCeGetAllCaps__)(struct Subdevice *, NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdCeGetCePceMask__)(struct Subdevice *, NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdCeUpdatePceLceMappings__)(struct Subdevice *, NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnGetDmemUsage__)(struct Subdevice *, NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnInstrumentationMap__)(struct Subdevice *, NV2080_CTRL_FLCN_INSTRUMENTATION_MAP_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnInstrumentationUnmap__)(struct Subdevice *, NV2080_CTRL_FLCN_INSTRUMENTATION_MAP_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnInstrumentationGetInfo__)(struct Subdevice *, NV2080_CTRL_FLCN_INSTRUMENTATION_GET_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnInstrumentationGetControl__)(struct Subdevice *, NV2080_CTRL_FLCN_INSTRUMENTATION_CONTROL_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnInstrumentationSetControl__)(struct Subdevice *, NV2080_CTRL_FLCN_INSTRUMENTATION_CONTROL_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnInstrumentationRecalibrate__)(struct Subdevice *, NV2080_CTRL_FLCN_INSTRUMENTATION_RECALIBRATE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnGetEngineArch__)(struct Subdevice *, NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnUstreamerQueueInfo__)(struct Subdevice *, NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnUstreamerControlGet__)(struct Subdevice *, NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnUstreamerControlSet__)(struct Subdevice *, NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnGetCtxBufferInfo__)(struct Subdevice *, NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlcnGetCtxBufferSize__)(struct Subdevice *, NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdEccGetClientExposedCounters__)(struct Subdevice *, NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuQueryEccConfiguration__)(struct Subdevice *, NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetEccConfiguration__)(struct Subdevice *, NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuResetEccErrorStatus__)(struct Subdevice *, NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlaRange__)(struct Subdevice *, NV2080_CTRL_FLA_RANGE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlaSetupInstanceMemBlock__)(struct Subdevice *, NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlaGetRange__)(struct Subdevice *, NV2080_CTRL_FLA_GET_RANGE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdFlaGetFabricMemStats__)(struct Subdevice *, NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGspGetFeatures__)(struct Subdevice *, NV2080_CTRL_GSP_GET_FEATURES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetActivePartitionIds__)(struct Subdevice *, NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetPartitionCapacity__)(struct Subdevice *, NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuDescribePartitions__)(struct Subdevice *, NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetPartitioningMode__)(struct Subdevice *, NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGrmgrGetGrFsInfo__)(struct Subdevice *, NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuSetPartitions__)(struct Subdevice *, NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGpuGetPartitions__)(struct Subdevice *, NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges__)(struct Subdevice *, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance__)(struct Subdevice *, NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance__)(struct Subdevice *, NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdOsUnixAudioDynamicPower__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdOsUnixVidmemPersistenceStatus__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdOsUnixUpdateTgpStatus__)(struct Subdevice *, NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplayGetIpVersion__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplayGetStaticInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplaySetChannelPushbuffer__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplayWriteInstMem__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplaySetupRgLineIntr__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplaySetImportedImpData__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdDisplayGetDisplayMask__)(struct Subdevice *, NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdMsencGetCaps__)(struct Subdevice *, NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer__)(struct Subdevice *, NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize__)(struct Subdevice *, NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetChipInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetUserRegisterAccessMap__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetDeviceInfoTable__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetConstructedFalconInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalRecoverAllComputeContexts__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetSmcMode__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalBusBindLocalGfidForP2p__)(struct Subdevice *, NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalBusBindRemoteGfidForP2p__)(struct Subdevice *, NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalBusFlushWithSysmembar__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote__)(struct Subdevice *, NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalBusDestroyP2pMailbox__)(struct Subdevice *, NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalBusCreateC2cPeerMapping__)(struct Subdevice *, NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping__)(struct Subdevice *, NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGmmuGetStaticInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdCeGetPhysicalCaps__)(struct Subdevice *, NV2080_CTRL_CE_GET_CAPS_V2_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdCeGetAllPhysicalCaps__)(struct Subdevice *, NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdCeUpdateClassDB__)(struct Subdevice *, NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdCeGetFaultMethodBufferSize__)(struct Subdevice *, NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdCeGetHubPceMask__)(struct Subdevice *, NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdIntrGetKernelTable__)(struct Subdevice *, NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfCudaLimitDisable__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfOptpCliClear__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfBoostSet_2x__)(struct Subdevice *, NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfBoostSet_3x__)(struct Subdevice *, NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfBoostClear_3x__)(struct Subdevice *, NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl__)(struct Subdevice *, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits__)(struct Subdevice *, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck__)(struct Subdevice *, NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet__)(struct Subdevice *, NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount__)(struct Subdevice *, NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBifGetStaticInfo__)(struct Subdevice *, NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdBifGetAspmL1Flags__)(struct Subdevice *, NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdHshubPeerConnConfig__)(struct Subdevice *, NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdHshubFirstLinkPeerId__)(struct Subdevice *, NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdHshubGetHshubIdForLinks__)(struct Subdevice *, NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdHshubGetNumUnits__)(struct Subdevice *, NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdHshubNextHshubId__)(struct Subdevice *, NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr__)(struct Subdevice *); + NV_STATUS (*__subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr__)(struct Subdevice *, NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdInternalGetPcieP2pCaps__)(struct Subdevice *, NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS *); + NV_STATUS (*__subdeviceCtrlCmdGetAvailableHshubMask__)(struct Subdevice *, NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS *); + NvBool (*__subdeviceShareCallback__)(struct Subdevice *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__subdeviceMapTo__)(struct Subdevice *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__subdeviceGetOrAllocNotifShare__)(struct Subdevice *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__subdeviceCheckMemInterUnmap__)(struct Subdevice *, NvBool); + NV_STATUS (*__subdeviceGetMapAddrSpace__)(struct Subdevice *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__subdeviceSetNotificationShare__)(struct Subdevice *, struct NotifShare *); + NvU32 (*__subdeviceGetRefCount__)(struct Subdevice *); + void (*__subdeviceAddAdditionalDependants__)(struct RsClient *, struct Subdevice *, RsResourceRef *); + NV_STATUS (*__subdeviceControl_Prologue__)(struct Subdevice *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__subdeviceGetRegBaseOffsetAndSize__)(struct Subdevice *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__subdeviceUnmapFrom__)(struct Subdevice *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__subdeviceControl_Epilogue__)(struct Subdevice *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__subdeviceControlLookup__)(struct Subdevice *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__subdeviceGetInternalObjectHandle__)(struct Subdevice *); + NV_STATUS (*__subdeviceControl__)(struct Subdevice *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__subdeviceUnmap__)(struct Subdevice *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__subdeviceGetMemInterMapParams__)(struct Subdevice *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__subdeviceGetMemoryMappingDescriptor__)(struct Subdevice *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__subdeviceUnregisterEvent__)(struct Subdevice *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__subdeviceCanCopy__)(struct Subdevice *); + PEVENTNOTIFICATION *(*__subdeviceGetNotificationListPtr__)(struct Subdevice *); + struct NotifShare *(*__subdeviceGetNotificationShare__)(struct Subdevice *); + NV_STATUS (*__subdeviceMap__)(struct Subdevice *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__subdeviceAccessCallback__)(struct Subdevice *, struct RsClient *, void *, RsAccessRight); + NvU32 deviceInst; + NvU32 subDeviceInst; + struct Device *pDevice; + NvBool bMaxGrTickFreqRequested; + PNODE pP2PMappingList; + NvU64 P2PfbMappedBytes; + NvU32 notifyActions[165]; + NvHandle hNotifierMemory; + struct Memory *pNotifierMemory; + NvHandle hSemMemory; + NvU32 videoStream4KCount; + NvU32 videoStreamHDCount; + NvU32 videoStreamSDCount; + NvU32 videoStreamLinearCount; + NvU32 ofaCount; + NvBool bGpuDebugModeEnabled; + NvBool bRcWatchdogEnableRequested; + NvBool bRcWatchdogDisableRequested; + NvBool bRcWatchdogSoftDisableRequested; + NvBool bReservePerfMon; + NvU32 perfBoostIndex; + NvU32 perfBoostRefCount; + NvBool perfBoostEntryExists; + NvBool bLockedClockModeRequested; + NvU32 bNvlinkErrorInjectionModeRequested; + NvBool bSchedPolicySet; + NvBool bGcoffDisallowed; + NvBool bUpdateTGP; +}; + +#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__ +#define __NVOC_CLASS_Subdevice_TYPEDEF__ +typedef struct Subdevice Subdevice; +#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Subdevice +#define __nvoc_class_id_Subdevice 0x4b01b3 +#endif /* __nvoc_class_id_Subdevice */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Subdevice; + +#define __staticCast_Subdevice(pThis) \ + ((pThis)->__nvoc_pbase_Subdevice) + +#ifdef __nvoc_subdevice_h_disabled +#define __dynamicCast_Subdevice(pThis) ((Subdevice*)NULL) +#else //__nvoc_subdevice_h_disabled +#define __dynamicCast_Subdevice(pThis) \ + ((Subdevice*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Subdevice))) +#endif //__nvoc_subdevice_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_Subdevice(Subdevice**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_Subdevice(Subdevice**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_Subdevice(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_Subdevice((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define subdevicePreDestruct(pResource) subdevicePreDestruct_DISPATCH(pResource) +#define subdeviceInternalControlForward(pSubdevice, command, pParams, size) subdeviceInternalControlForward_DISPATCH(pSubdevice, command, pParams, size) +#define subdeviceControlFilter(pSubdevice, pCallContext, pParams) subdeviceControlFilter_DISPATCH(pSubdevice, pCallContext, pParams) +#define subdeviceCtrlCmdBiosGetInfoV2(pSubdevice, pBiosInfoParams) subdeviceCtrlCmdBiosGetInfoV2_DISPATCH(pSubdevice, pBiosInfoParams) +#define subdeviceCtrlCmdBiosGetSKUInfo(pSubdevice, pBiosGetSKUInfoParams) subdeviceCtrlCmdBiosGetSKUInfo_DISPATCH(pSubdevice, pBiosGetSKUInfoParams) +#define subdeviceCtrlCmdBiosGetPostTime(pSubdevice, pBiosPostTime) subdeviceCtrlCmdBiosGetPostTime_DISPATCH(pSubdevice, pBiosPostTime) +#define subdeviceCtrlCmdBiosGetUefiSupport(pSubdevice, pUEFIParams) subdeviceCtrlCmdBiosGetUefiSupport_DISPATCH(pSubdevice, pUEFIParams) +#define subdeviceCtrlCmdMcGetArchInfo(pSubdevice, pArchInfoParams) subdeviceCtrlCmdMcGetArchInfo_DISPATCH(pSubdevice, pArchInfoParams) +#define subdeviceCtrlCmdMcGetManufacturer(pSubdevice, pManufacturerParams) subdeviceCtrlCmdMcGetManufacturer_DISPATCH(pSubdevice, pManufacturerParams) +#define subdeviceCtrlCmdMcQueryHostclkSlowdownStatus(pSubdevice, pGetStatusParams) subdeviceCtrlCmdMcQueryHostclkSlowdownStatus_DISPATCH(pSubdevice, pGetStatusParams) +#define subdeviceCtrlCmdMcSetHostclkSlowdownStatus(pSubdevice, pParams) subdeviceCtrlCmdMcSetHostclkSlowdownStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdMcChangeReplayableFaultOwnership(pSubdevice, pReplayableFaultOwnrshpParams) subdeviceCtrlCmdMcChangeReplayableFaultOwnership_DISPATCH(pSubdevice, pReplayableFaultOwnrshpParams) +#define subdeviceCtrlCmdMcServiceInterrupts(pSubdevice, pServiceInterruptParams) subdeviceCtrlCmdMcServiceInterrupts_DISPATCH(pSubdevice, pServiceInterruptParams) +#define subdeviceCtrlCmdDmaInvalidateTLB(pSubdevice, pParams) subdeviceCtrlCmdDmaInvalidateTLB_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDmaGetInfo(pSubdevice, pDmaInfoParams) subdeviceCtrlCmdDmaGetInfo_DISPATCH(pSubdevice, pDmaInfoParams) +#define subdeviceCtrlCmdBusGetPciInfo(pSubdevice, pPciInfoParams) subdeviceCtrlCmdBusGetPciInfo_DISPATCH(pSubdevice, pPciInfoParams) +#define subdeviceCtrlCmdBusGetInfo(pSubdevice, pBusInfoParams) subdeviceCtrlCmdBusGetInfo_DISPATCH(pSubdevice, pBusInfoParams) +#define subdeviceCtrlCmdBusGetInfoV2(pSubdevice, pBusInfoParams) subdeviceCtrlCmdBusGetInfoV2_DISPATCH(pSubdevice, pBusInfoParams) +#define subdeviceCtrlCmdBusGetPciBarInfo(pSubdevice, pBarInfoParams) subdeviceCtrlCmdBusGetPciBarInfo_DISPATCH(pSubdevice, pBarInfoParams) +#define subdeviceCtrlCmdBusSetPcieSpeed(pSubdevice, pBusInfoParams) subdeviceCtrlCmdBusSetPcieSpeed_DISPATCH(pSubdevice, pBusInfoParams) +#define subdeviceCtrlCmdBusSetPcieLinkWidth(pSubdevice, pLinkWidthParams) subdeviceCtrlCmdBusSetPcieLinkWidth_DISPATCH(pSubdevice, pLinkWidthParams) +#define subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed(pSubdevice, pBusInfoParams) subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed_DISPATCH(pSubdevice, pBusInfoParams) +#define subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed(pSubdevice, pBusInfoParams) subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed_DISPATCH(pSubdevice, pBusInfoParams) +#define subdeviceCtrlCmdBusHWBCGetUpstreamBAR0(pSubdevice, pBusInfoParams) subdeviceCtrlCmdBusHWBCGetUpstreamBAR0_DISPATCH(pSubdevice, pBusInfoParams) +#define subdeviceCtrlCmdBusServiceGpuMultifunctionState(pSubdevice, pParams) subdeviceCtrlCmdBusServiceGpuMultifunctionState_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusGetPexCounters(pSubdevice, pParams) subdeviceCtrlCmdBusGetPexCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusGetBFD(pSubdevice, pBusGetBFDParams) subdeviceCtrlCmdBusGetBFD_DISPATCH(pSubdevice, pBusGetBFDParams) +#define subdeviceCtrlCmdBusGetAspmDisableFlags(pSubdevice, pParams) subdeviceCtrlCmdBusGetAspmDisableFlags_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusControlPublicAspmBits(pSubdevice, pParams) subdeviceCtrlCmdBusControlPublicAspmBits_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusClearPexCounters(pSubdevice, pParams) subdeviceCtrlCmdBusClearPexCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusGetPexUtilCounters(pSubdevice, pParams) subdeviceCtrlCmdBusGetPexUtilCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusClearPexUtilCounters(pSubdevice, pParams) subdeviceCtrlCmdBusClearPexUtilCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusFreezePexCounters(pSubdevice, pParams) subdeviceCtrlCmdBusFreezePexCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusGetPexLaneCounters(pSubdevice, pParams) subdeviceCtrlCmdBusGetPexLaneCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusGetPcieLtrLatency(pSubdevice, pParams) subdeviceCtrlCmdBusGetPcieLtrLatency_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusSetPcieLtrLatency(pSubdevice, pParams) subdeviceCtrlCmdBusSetPcieLtrLatency_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusGetNvlinkPeerIdMask(pSubdevice, pParams) subdeviceCtrlCmdBusGetNvlinkPeerIdMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusSetEomParameters(pSubdevice, pParams) subdeviceCtrlCmdBusSetEomParameters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusGetUphyDlnCfgSpace(pSubdevice, pParams) subdeviceCtrlCmdBusGetUphyDlnCfgSpace_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusGetEomStatus(pSubdevice, pParams) subdeviceCtrlCmdBusGetEomStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusSysmemAccess(pSubdevice, pParams) subdeviceCtrlCmdBusSysmemAccess_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusGetNvlinkCaps(pSubdevice, pParams) subdeviceCtrlCmdBusGetNvlinkCaps_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBusGetNvlinkStatus(pSubdevice, pParams) subdeviceCtrlCmdBusGetNvlinkStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGetNvlinkCounters(pSubdevice, pParams) subdeviceCtrlCmdGetNvlinkCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdClearNvlinkCounters(pSubdevice, pParams) subdeviceCtrlCmdClearNvlinkCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkSetupEom(pSubdevice, pParams) subdeviceCtrlCmdNvlinkSetupEom_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkGetPowerState(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetPowerState_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkReadTpCounters(pSubdevice, pParams) subdeviceCtrlCmdNvlinkReadTpCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkGetLpCounters(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetLpCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkEnableNvlinkPeer(pSubdevice, pParams) subdeviceCtrlCmdNvlinkEnableNvlinkPeer_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkCoreCallback(pSubdevice, pParams) subdeviceCtrlCmdNvlinkCoreCallback_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkSetLoopbackMode(pSubdevice, pParams) subdeviceCtrlCmdNvlinkSetLoopbackMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid(pSubdevice, pParams) subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkUpdateHshubMux(pSubdevice, pParams) subdeviceCtrlCmdNvlinkUpdateHshubMux_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer(pSubdevice, pParams) subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer(pSubdevice, pParams) subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkRemoveNvlinkMapping(pSubdevice, pParams) subdeviceCtrlCmdNvlinkRemoveNvlinkMapping_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkSaveRestoreHshubState(pSubdevice, pParams) subdeviceCtrlCmdNvlinkSaveRestoreHshubState_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkProgramBufferready(pSubdevice, pParams) subdeviceCtrlCmdNvlinkProgramBufferready_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkUpdateCurrentConfig(pSubdevice, pParams) subdeviceCtrlCmdNvlinkUpdateCurrentConfig_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkUpdatePeerLinkMask(pSubdevice, pParams) subdeviceCtrlCmdNvlinkUpdatePeerLinkMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkUpdateLinkConnection(pSubdevice, pParams) subdeviceCtrlCmdNvlinkUpdateLinkConnection_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkEnableLinksPostTopology(pSubdevice, pParams) subdeviceCtrlCmdNvlinkEnableLinksPostTopology_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkGetRefreshCounters(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetRefreshCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkClearRefreshCounters(pSubdevice, pParams) subdeviceCtrlCmdNvlinkClearRefreshCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkProgramLinkSpeed(pSubdevice, pParams) subdeviceCtrlCmdNvlinkProgramLinkSpeed_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkAreLinksTrained(pSubdevice, pParams) subdeviceCtrlCmdNvlinkAreLinksTrained_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkResetLinks(pSubdevice, pParams) subdeviceCtrlCmdNvlinkResetLinks_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkDisableDlInterrupts(pSubdevice, pParams) subdeviceCtrlCmdNvlinkDisableDlInterrupts_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkGetLinkAndClockInfo(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetLinkAndClockInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkSetupNvlinkSysmem(pSubdevice, pParams) subdeviceCtrlCmdNvlinkSetupNvlinkSysmem_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkProcessForcedConfigs(pSubdevice, pParams) subdeviceCtrlCmdNvlinkProcessForcedConfigs_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkSyncLaneShutdownProps(pSubdevice, pParams) subdeviceCtrlCmdNvlinkSyncLaneShutdownProps_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts(pSubdevice, pParams) subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask(pSubdevice, pParams) subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr(pSubdevice, pParams) subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo(pSubdevice, pParams) subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvlinkEnableLinks(pSubdevice) subdeviceCtrlCmdNvlinkEnableLinks_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdNvlinkProcessInitDisabledLinks(pSubdevice, pParams) subdeviceCtrlCmdNvlinkProcessInitDisabledLinks_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdI2cReadBuffer(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cReadBuffer_DISPATCH(pSubdevice, pI2cParams) +#define subdeviceCtrlCmdI2cWriteBuffer(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cWriteBuffer_DISPATCH(pSubdevice, pI2cParams) +#define subdeviceCtrlCmdI2cReadReg(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cReadReg_DISPATCH(pSubdevice, pI2cParams) +#define subdeviceCtrlCmdI2cWriteReg(pSubdevice, pI2cParams) subdeviceCtrlCmdI2cWriteReg_DISPATCH(pSubdevice, pI2cParams) +#define subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples(pSubdevice, pParams) subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2(pSubdevice, pParams) subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdPerfRatedTdpGetControl(pSubdevice, pControlParams) subdeviceCtrlCmdPerfRatedTdpGetControl_DISPATCH(pSubdevice, pControlParams) +#define subdeviceCtrlCmdPerfRatedTdpSetControl(pSubdevice, pControlParams) subdeviceCtrlCmdPerfRatedTdpSetControl_DISPATCH(pSubdevice, pControlParams) +#define subdeviceCtrlCmdPerfReservePerfmonHw(pSubdevice, pPerfmonParams) subdeviceCtrlCmdPerfReservePerfmonHw_DISPATCH(pSubdevice, pPerfmonParams) +#define subdeviceCtrlCmdKPerfBoost(pSubdevice, pBoostParams) subdeviceCtrlCmdKPerfBoost_DISPATCH(pSubdevice, pBoostParams) +#define subdeviceCtrlCmdFbGetFBRegionInfo(pSubdevice, pGFBRIParams) subdeviceCtrlCmdFbGetFBRegionInfo_DISPATCH(pSubdevice, pGFBRIParams) +#define subdeviceCtrlCmdFbGetBar1Offset(pSubdevice, pFbMemParams) subdeviceCtrlCmdFbGetBar1Offset_DISPATCH(pSubdevice, pFbMemParams) +#define subdeviceCtrlCmdFbIsKind(pSubdevice, pIsKindParams) subdeviceCtrlCmdFbIsKind_DISPATCH(pSubdevice, pIsKindParams) +#define subdeviceCtrlCmdFbGetMemAlignment(pSubdevice, pParams) subdeviceCtrlCmdFbGetMemAlignment_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetHeapReservationSize(pSubdevice, pParams) subdeviceCtrlCmdFbGetHeapReservationSize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetInfo(pSubdevice, pFbInfoParams) subdeviceCtrlCmdFbGetInfo_DISPATCH(pSubdevice, pFbInfoParams) +#define subdeviceCtrlCmdFbGetInfoV2(pSubdevice, pFbInfoParams) subdeviceCtrlCmdFbGetInfoV2_DISPATCH(pSubdevice, pFbInfoParams) +#define subdeviceCtrlCmdFbGetCarveoutAddressInfo(pSubdevice, pParams) subdeviceCtrlCmdFbGetCarveoutAddressInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetCalibrationLockFailed(pSubdevice, pGCLFParams) subdeviceCtrlCmdFbGetCalibrationLockFailed_DISPATCH(pSubdevice, pGCLFParams) +#define subdeviceCtrlCmdFbFlushGpuCache(pSubdevice, pCacheFlushParams) subdeviceCtrlCmdFbFlushGpuCache_DISPATCH(pSubdevice, pCacheFlushParams) +#define subdeviceCtrlCmdFbSetGpuCacheAllocPolicy(pSubdevice, pParams) subdeviceCtrlCmdFbSetGpuCacheAllocPolicy_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetGpuCacheAllocPolicy(pSubdevice, pGpuCacheAllocPolicyParams) subdeviceCtrlCmdFbGetGpuCacheAllocPolicy_DISPATCH(pSubdevice, pGpuCacheAllocPolicyParams) +#define subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2(pSubdevice, pParams) subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2(pSubdevice, pParams) subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetGpuCacheInfo(pSubdevice, pGpuCacheParams) subdeviceCtrlCmdFbGetGpuCacheInfo_DISPATCH(pSubdevice, pGpuCacheParams) +#define subdeviceCtrlCmdFbGetCliManagedOfflinedPages(pSubdevice, pOsOfflinedParams) subdeviceCtrlCmdFbGetCliManagedOfflinedPages_DISPATCH(pSubdevice, pOsOfflinedParams) +#define subdeviceCtrlCmdFbGetOfflinedPages(pSubdevice, pBlackListParams) subdeviceCtrlCmdFbGetOfflinedPages_DISPATCH(pSubdevice, pBlackListParams) +#define subdeviceCtrlCmdFbSetupVprRegion(pSubdevice, pCliReqParams) subdeviceCtrlCmdFbSetupVprRegion_DISPATCH(pSubdevice, pCliReqParams) +#define subdeviceCtrlCmdFbGetLTCInfoForFBP(pSubdevice, pParams) subdeviceCtrlCmdFbGetLTCInfoForFBP_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetCompBitCopyConstructInfo(pSubdevice, pParams) subdeviceCtrlCmdFbGetCompBitCopyConstructInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbPatchPbrForMining(pSubdevice, pParams) subdeviceCtrlCmdFbPatchPbrForMining_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetRemappedRows(pSubdevice, pRemappedRowsParams) subdeviceCtrlCmdFbGetRemappedRows_DISPATCH(pSubdevice, pRemappedRowsParams) +#define subdeviceCtrlCmdFbGetFsInfo(pSubdevice, pInfoParams) subdeviceCtrlCmdFbGetFsInfo_DISPATCH(pSubdevice, pInfoParams) +#define subdeviceCtrlCmdFbGetRowRemapperHistogram(pSubdevice, pParams) subdeviceCtrlCmdFbGetRowRemapperHistogram_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetDynamicOfflinedPages(pSubdevice, pParams) subdeviceCtrlCmdFbGetDynamicOfflinedPages_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbUpdateNumaStatus(pSubdevice, pParams) subdeviceCtrlCmdFbUpdateNumaStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetNumaInfo(pSubdevice, pParams) subdeviceCtrlCmdFbGetNumaInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdMemSysGetStaticConfig(pSubdevice, pParams) subdeviceCtrlCmdMemSysGetStaticConfig_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdMemSysSetPartitionableMem(pSubdevice, pParams) subdeviceCtrlCmdMemSysSetPartitionableMem_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKMemSysGetMIGMemoryConfig(pSubdevice, pParams) subdeviceCtrlCmdKMemSysGetMIGMemoryConfig_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbSetZbcReferenced(pSubdevice, pParams) subdeviceCtrlCmdFbSetZbcReferenced_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdMemSysL2InvalidateEvict(pSubdevice, pParams) subdeviceCtrlCmdMemSysL2InvalidateEvict_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches(pSubdevice) subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdMemSysDisableNvlinkPeers(pSubdevice) subdeviceCtrlCmdMemSysDisableNvlinkPeers_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdMemSysProgramRawCompressionMode(pSubdevice, pParams) subdeviceCtrlCmdMemSysProgramRawCompressionMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable(pSubdevice, pParams) subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbGetCtagsForCbcEviction(pSubdevice, pParams) subdeviceCtrlCmdFbGetCtagsForCbcEviction_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbCBCOp(pSubdevice, pParams) subdeviceCtrlCmdFbCBCOp_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbSetRrd(pSubdevice, pParams) subdeviceCtrlCmdFbSetRrd_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbSetReadLimit(pSubdevice, pParams) subdeviceCtrlCmdFbSetReadLimit_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFbSetWriteLimit(pSubdevice, pParams) subdeviceCtrlCmdFbSetWriteLimit_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdSetGpfifo(pSubdevice, pSetGpFifoParams) subdeviceCtrlCmdSetGpfifo_DISPATCH(pSubdevice, pSetGpFifoParams) +#define subdeviceCtrlCmdSetOperationalProperties(pSubdevice, pSetOperationalProperties) subdeviceCtrlCmdSetOperationalProperties_DISPATCH(pSubdevice, pSetOperationalProperties) +#define subdeviceCtrlCmdFifoBindEngines(pSubdevice, pBindParams) subdeviceCtrlCmdFifoBindEngines_DISPATCH(pSubdevice, pBindParams) +#define subdeviceCtrlCmdGetPhysicalChannelCount(pSubdevice, pParams) subdeviceCtrlCmdGetPhysicalChannelCount_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFifoGetInfo(pSubdevice, pFifoInfoParams) subdeviceCtrlCmdFifoGetInfo_DISPATCH(pSubdevice, pFifoInfoParams) +#define subdeviceCtrlCmdFifoDisableChannels(pSubdevice, pDisableChannelParams) subdeviceCtrlCmdFifoDisableChannels_DISPATCH(pSubdevice, pDisableChannelParams) +#define subdeviceCtrlCmdFifoDisableUsermodeChannels(pSubdevice, pParams) subdeviceCtrlCmdFifoDisableUsermodeChannels_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFifoGetChannelMemInfo(pSubdevice, pChannelMemParams) subdeviceCtrlCmdFifoGetChannelMemInfo_DISPATCH(pSubdevice, pChannelMemParams) +#define subdeviceCtrlCmdFifoGetUserdLocation(pSubdevice, pUserdLocationParams) subdeviceCtrlCmdFifoGetUserdLocation_DISPATCH(pSubdevice, pUserdLocationParams) +#define subdeviceCtrlCmdFifoGetDeviceInfoTable(pSubdevice, pParams) subdeviceCtrlCmdFifoGetDeviceInfoTable_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFifoClearFaultedBit(pSubdevice, pParams) subdeviceCtrlCmdFifoClearFaultedBit_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFifoRunlistSetSchedPolicy(pSubdevice, pSchedPolicyParams) subdeviceCtrlCmdFifoRunlistSetSchedPolicy_DISPATCH(pSubdevice, pSchedPolicyParams) +#define subdeviceCtrlCmdFifoUpdateChannelInfo(pSubdevice, pChannelInfo) subdeviceCtrlCmdFifoUpdateChannelInfo_DISPATCH(pSubdevice, pChannelInfo) +#define subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers(pSubdevice, pParams) subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalFifoGetNumChannels(pSubdevice, pNumChannelsParams) subdeviceCtrlCmdInternalFifoGetNumChannels_DISPATCH(pSubdevice, pNumChannelsParams) +#define subdeviceCtrlCmdKGrGetInfo(pSubdevice, pParams) subdeviceCtrlCmdKGrGetInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetInfoV2(pSubdevice, pParams) subdeviceCtrlCmdKGrGetInfoV2_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetCapsV2(pSubdevice, pGrCapsParams) subdeviceCtrlCmdKGrGetCapsV2_DISPATCH(pSubdevice, pGrCapsParams) +#define subdeviceCtrlCmdKGrGetCtxswModes(pSubdevice, pParams) subdeviceCtrlCmdKGrGetCtxswModes_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrCtxswZcullMode(pSubdevice, pParams) subdeviceCtrlCmdKGrCtxswZcullMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrCtxswZcullBind(pSubdevice, pParams) subdeviceCtrlCmdKGrCtxswZcullBind_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetZcullInfo(pSubdevice, pZcullInfoParams) subdeviceCtrlCmdKGrGetZcullInfo_DISPATCH(pSubdevice, pZcullInfoParams) +#define subdeviceCtrlCmdKGrCtxswPmMode(pSubdevice, pParams) subdeviceCtrlCmdKGrCtxswPmMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrCtxswPmBind(pSubdevice, pParams) subdeviceCtrlCmdKGrCtxswPmBind_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrSetGpcTileMap(pSubdevice, pParams) subdeviceCtrlCmdKGrSetGpcTileMap_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrCtxswSmpcMode(pSubdevice, pParams) subdeviceCtrlCmdKGrCtxswSmpcMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrPcSamplingMode(pSubdevice, pParams) subdeviceCtrlCmdKGrPcSamplingMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetSmToGpcTpcMappings(pSubdevice, pParams) subdeviceCtrlCmdKGrGetSmToGpcTpcMappings_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetGlobalSmOrder(pSubdevice, pParams) subdeviceCtrlCmdKGrGetGlobalSmOrder_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrSetCtxswPreemptionMode(pSubdevice, pParams) subdeviceCtrlCmdKGrSetCtxswPreemptionMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrCtxswPreemptionBind(pSubdevice, pParams) subdeviceCtrlCmdKGrCtxswPreemptionBind_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetROPInfo(pSubdevice, pParams) subdeviceCtrlCmdKGrGetROPInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetCtxswStats(pSubdevice, pParams) subdeviceCtrlCmdKGrGetCtxswStats_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetCtxBufferSize(pSubdevice, pParams) subdeviceCtrlCmdKGrGetCtxBufferSize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetCtxBufferInfo(pSubdevice, pParams) subdeviceCtrlCmdKGrGetCtxBufferInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetCtxBufferPtes(pSubdevice, pParams) subdeviceCtrlCmdKGrGetCtxBufferPtes_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetCurrentResidentChannel(pSubdevice, pParams) subdeviceCtrlCmdKGrGetCurrentResidentChannel_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetVatAlarmData(pSubdevice, pParams) subdeviceCtrlCmdKGrGetVatAlarmData_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetAttributeBufferSize(pSubdevice, pParams) subdeviceCtrlCmdKGrGetAttributeBufferSize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGfxPoolQuerySize(pSubdevice, pParams) subdeviceCtrlCmdKGrGfxPoolQuerySize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGfxPoolInitialize(pSubdevice, pParams) subdeviceCtrlCmdKGrGfxPoolInitialize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGfxPoolAddSlots(pSubdevice, pParams) subdeviceCtrlCmdKGrGfxPoolAddSlots_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGfxPoolRemoveSlots(pSubdevice, pParams) subdeviceCtrlCmdKGrGfxPoolRemoveSlots_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetPpcMask(pSubdevice, pParams) subdeviceCtrlCmdKGrGetPpcMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrSetTpcPartitionMode(pSubdevice, pParams) subdeviceCtrlCmdKGrSetTpcPartitionMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetSmIssueRateModifier(pSubdevice, pParams) subdeviceCtrlCmdKGrGetSmIssueRateModifier_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrFecsBindEvtbufForUid(pSubdevice, pParams) subdeviceCtrlCmdKGrFecsBindEvtbufForUid_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2(pSubdevice, pParams) subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetPhysGpcMask(pSubdevice, pParams) subdeviceCtrlCmdKGrGetPhysGpcMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetGpcMask(pSubdevice, pParams) subdeviceCtrlCmdKGrGetGpcMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetTpcMask(pSubdevice, pParams) subdeviceCtrlCmdKGrGetTpcMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetEngineContextProperties(pSubdevice, pParams) subdeviceCtrlCmdKGrGetEngineContextProperties_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetNumTpcsForGpc(pSubdevice, pParams) subdeviceCtrlCmdKGrGetNumTpcsForGpc_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetGpcTileMap(pSubdevice, pParams) subdeviceCtrlCmdKGrGetGpcTileMap_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrGetZcullMask(pSubdevice, pParams) subdeviceCtrlCmdKGrGetZcullMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetInfo(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetCaps(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetCaps_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetPpcMasks(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetPpcMasks_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetZcullInfo(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetZcullInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetRopInfo(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetRopInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdKGrInternalStaticGetPdbProperties(pSubdevice, pParams) subdeviceCtrlCmdKGrInternalStaticGetPdbProperties_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetCachedInfo(pSubdevice, pGpuInfoParams) subdeviceCtrlCmdGpuGetCachedInfo_DISPATCH(pSubdevice, pGpuInfoParams) +#define subdeviceCtrlCmdGpuGetInfoV2(pSubdevice, pGpuInfoParams) subdeviceCtrlCmdGpuGetInfoV2_DISPATCH(pSubdevice, pGpuInfoParams) +#define subdeviceCtrlCmdGpuGetIpVersion(pSubdevice, pGpuIpVersionParams) subdeviceCtrlCmdGpuGetIpVersion_DISPATCH(pSubdevice, pGpuIpVersionParams) +#define subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo(pSubdevice, pBridgeInfoParams) subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo_DISPATCH(pSubdevice, pBridgeInfoParams) +#define subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu(pSubdevice, pBridgeInfoParams) subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu_DISPATCH(pSubdevice, pBridgeInfoParams) +#define subdeviceCtrlCmdGpuSetOptimusInfo(pSubdevice, pGpuOptimusInfoParams) subdeviceCtrlCmdGpuSetOptimusInfo_DISPATCH(pSubdevice, pGpuOptimusInfoParams) +#define subdeviceCtrlCmdGpuGetNameString(pSubdevice, pNameStringParams) subdeviceCtrlCmdGpuGetNameString_DISPATCH(pSubdevice, pNameStringParams) +#define subdeviceCtrlCmdGpuGetShortNameString(pSubdevice, pShortNameStringParams) subdeviceCtrlCmdGpuGetShortNameString_DISPATCH(pSubdevice, pShortNameStringParams) +#define subdeviceCtrlCmdGpuGetEncoderCapacity(pSubdevice, pEncoderCapacityParams) subdeviceCtrlCmdGpuGetEncoderCapacity_DISPATCH(pSubdevice, pEncoderCapacityParams) +#define subdeviceCtrlCmdGpuGetNvencSwSessionStats(pSubdevice, pParams) subdeviceCtrlCmdGpuGetNvencSwSessionStats_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetNvencSwSessionInfo(pSubdevice, pParams) subdeviceCtrlCmdGpuGetNvencSwSessionInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetNvfbcSwSessionStats(pSubdevice, params) subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_DISPATCH(pSubdevice, params) +#define subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo(pSubdevice, params) subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo_DISPATCH(pSubdevice, params) +#define subdeviceCtrlCmdGpuSetFabricAddr(pSubdevice, pParams) subdeviceCtrlCmdGpuSetFabricAddr_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuSetPower(pSubdevice, pSetPowerParams) subdeviceCtrlCmdGpuSetPower_DISPATCH(pSubdevice, pSetPowerParams) +#define subdeviceCtrlCmdGpuGetSdm(pSubdevice, pSdmParams) subdeviceCtrlCmdGpuGetSdm_DISPATCH(pSubdevice, pSdmParams) +#define subdeviceCtrlCmdGpuSetSdm(pSubdevice, pSdmParams) subdeviceCtrlCmdGpuSetSdm_DISPATCH(pSubdevice, pSdmParams) +#define subdeviceCtrlCmdGpuGetSimulationInfo(pSubdevice, pGpuSimulationInfoParams) subdeviceCtrlCmdGpuGetSimulationInfo_DISPATCH(pSubdevice, pGpuSimulationInfoParams) +#define subdeviceCtrlCmdGpuGetEngines(pSubdevice, pParams) subdeviceCtrlCmdGpuGetEngines_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetEnginesV2(pSubdevice, pEngineParams) subdeviceCtrlCmdGpuGetEnginesV2_DISPATCH(pSubdevice, pEngineParams) +#define subdeviceCtrlCmdGpuGetEngineClasslist(pSubdevice, pClassParams) subdeviceCtrlCmdGpuGetEngineClasslist_DISPATCH(pSubdevice, pClassParams) +#define subdeviceCtrlCmdGpuGetEnginePartnerList(pSubdevice, pPartnerListParams) subdeviceCtrlCmdGpuGetEnginePartnerList_DISPATCH(pSubdevice, pPartnerListParams) +#define subdeviceCtrlCmdGpuGetFermiGpcInfo(pSubdevice, pParams) subdeviceCtrlCmdGpuGetFermiGpcInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetFermiTpcInfo(pSubdevice, pParams) subdeviceCtrlCmdGpuGetFermiTpcInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetFermiZcullInfo(pSubdevice, pGpuFermiZcullInfoParams) subdeviceCtrlCmdGpuGetFermiZcullInfo_DISPATCH(pSubdevice, pGpuFermiZcullInfoParams) +#define subdeviceCtrlCmdGpuGetPesInfo(pSubdevice, pParams) subdeviceCtrlCmdGpuGetPesInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuExecRegOps(pSubdevice, pRegParams) subdeviceCtrlCmdGpuExecRegOps_DISPATCH(pSubdevice, pRegParams) +#define subdeviceCtrlCmdGpuQueryMode(pSubdevice, pQueryMode) subdeviceCtrlCmdGpuQueryMode_DISPATCH(pSubdevice, pQueryMode) +#define subdeviceCtrlCmdGpuGetInforomImageVersion(pSubdevice, pVersionInfo) subdeviceCtrlCmdGpuGetInforomImageVersion_DISPATCH(pSubdevice, pVersionInfo) +#define subdeviceCtrlCmdGpuGetInforomObjectVersion(pSubdevice, pVersionInfo) subdeviceCtrlCmdGpuGetInforomObjectVersion_DISPATCH(pSubdevice, pVersionInfo) +#define subdeviceCtrlCmdGpuQueryInforomEccSupport(pSubdevice) subdeviceCtrlCmdGpuQueryInforomEccSupport_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdGpuQueryEccStatus(pSubdevice, pParams) subdeviceCtrlCmdGpuQueryEccStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetOEMBoardInfo(pSubdevice, pBoardInfo) subdeviceCtrlCmdGpuGetOEMBoardInfo_DISPATCH(pSubdevice, pBoardInfo) +#define subdeviceCtrlCmdGpuGetOEMInfo(pSubdevice, pOemInfo) subdeviceCtrlCmdGpuGetOEMInfo_DISPATCH(pSubdevice, pOemInfo) +#define subdeviceCtrlCmdGpuHandleGpuSR(pSubdevice) subdeviceCtrlCmdGpuHandleGpuSR_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdGpuSetComputeModeRules(pSubdevice, pSetRulesParams) subdeviceCtrlCmdGpuSetComputeModeRules_DISPATCH(pSubdevice, pSetRulesParams) +#define subdeviceCtrlCmdGpuQueryComputeModeRules(pSubdevice, pQueryRulesParams) subdeviceCtrlCmdGpuQueryComputeModeRules_DISPATCH(pSubdevice, pQueryRulesParams) +#define subdeviceCtrlCmdGpuAcquireComputeModeReservation(pSubdevice) subdeviceCtrlCmdGpuAcquireComputeModeReservation_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdGpuReleaseComputeModeReservation(pSubdevice) subdeviceCtrlCmdGpuReleaseComputeModeReservation_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdGpuInitializeCtx(pSubdevice, pInitializeCtxParams) subdeviceCtrlCmdGpuInitializeCtx_DISPATCH(pSubdevice, pInitializeCtxParams) +#define subdeviceCtrlCmdGpuPromoteCtx(pSubdevice, pPromoteCtxParams) subdeviceCtrlCmdGpuPromoteCtx_DISPATCH(pSubdevice, pPromoteCtxParams) +#define subdeviceCtrlCmdGpuEvictCtx(pSubdevice, pEvictCtxParams) subdeviceCtrlCmdGpuEvictCtx_DISPATCH(pSubdevice, pEvictCtxParams) +#define subdeviceCtrlCmdGpuGetId(pSubdevice, pIdParams) subdeviceCtrlCmdGpuGetId_DISPATCH(pSubdevice, pIdParams) +#define subdeviceCtrlCmdGpuGetGidInfo(pSubdevice, pGidInfoParams) subdeviceCtrlCmdGpuGetGidInfo_DISPATCH(pSubdevice, pGidInfoParams) +#define subdeviceCtrlCmdGpuQueryIllumSupport(pSubdevice, pConfigParams) subdeviceCtrlCmdGpuQueryIllumSupport_DISPATCH(pSubdevice, pConfigParams) +#define subdeviceCtrlCmdGpuGetIllum(pSubdevice, pConfigParams) subdeviceCtrlCmdGpuGetIllum_DISPATCH(pSubdevice, pConfigParams) +#define subdeviceCtrlCmdGpuSetIllum(pSubdevice, pConfigParams) subdeviceCtrlCmdGpuSetIllum_DISPATCH(pSubdevice, pConfigParams) +#define subdeviceCtrlCmdGpuQueryScrubberStatus(pSubdevice, pParams) subdeviceCtrlCmdGpuQueryScrubberStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetVprCaps(pSubdevice, pParams) subdeviceCtrlCmdGpuGetVprCaps_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetVprInfo(pSubdevice, pParams) subdeviceCtrlCmdGpuGetVprInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetPids(pSubdevice, pGetPidsParams) subdeviceCtrlCmdGpuGetPids_DISPATCH(pSubdevice, pGetPidsParams) +#define subdeviceCtrlCmdGpuGetPidInfo(pSubdevice, pGetPidInfoParams) subdeviceCtrlCmdGpuGetPidInfo_DISPATCH(pSubdevice, pGetPidInfoParams) +#define subdeviceCtrlCmdGpuInterruptFunction(pSubdevice, pParams) subdeviceCtrlCmdGpuInterruptFunction_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuQueryFunctionStatus(pSubdevice, pParams) subdeviceCtrlCmdGpuQueryFunctionStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuReportNonReplayableFault(pSubdevice, pParams) subdeviceCtrlCmdGpuReportNonReplayableFault_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetEngineFaultInfo(pSubdevice, pParams) subdeviceCtrlCmdGpuGetEngineFaultInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetEngineRunlistPriBase(pSubdevice, pParams) subdeviceCtrlCmdGpuGetEngineRunlistPriBase_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetHwEngineId(pSubdevice, pParams) subdeviceCtrlCmdGpuGetHwEngineId_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetMaxSupportedPageSize(pSubdevice, pParams) subdeviceCtrlCmdGpuGetMaxSupportedPageSize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuSetComputePolicyConfig(pSubdevice, pParams) subdeviceCtrlCmdGpuSetComputePolicyConfig_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetComputePolicyConfig(pSubdevice, pParams) subdeviceCtrlCmdGpuGetComputePolicyConfig_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdValidateMemMapRequest(pSubdevice, pParams) subdeviceCtrlCmdValidateMemMapRequest_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetEngineLoadTimes(pSubdevice, pParams) subdeviceCtrlCmdGpuGetEngineLoadTimes_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdEventSetTrigger(pSubdevice) subdeviceCtrlCmdEventSetTrigger_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdEventSetTriggerFifo(pSubdevice, pTriggerFifoParams) subdeviceCtrlCmdEventSetTriggerFifo_DISPATCH(pSubdevice, pTriggerFifoParams) +#define subdeviceCtrlCmdEventSetNotification(pSubdevice, pSetEventParams) subdeviceCtrlCmdEventSetNotification_DISPATCH(pSubdevice, pSetEventParams) +#define subdeviceCtrlCmdEventSetMemoryNotifies(pSubdevice, pSetMemoryNotifiesParams) subdeviceCtrlCmdEventSetMemoryNotifies_DISPATCH(pSubdevice, pSetMemoryNotifiesParams) +#define subdeviceCtrlCmdEventSetSemaphoreMemory(pSubdevice, pSetSemMemoryParams) subdeviceCtrlCmdEventSetSemaphoreMemory_DISPATCH(pSubdevice, pSetSemMemoryParams) +#define subdeviceCtrlCmdEventSetSemaMemValidation(pSubdevice, pSetSemaMemValidationParams) subdeviceCtrlCmdEventSetSemaMemValidation_DISPATCH(pSubdevice, pSetSemaMemValidationParams) +#define subdeviceCtrlCmdTimerCancel(pSubdevice) subdeviceCtrlCmdTimerCancel_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdTimerSchedule(pSubdevice, pParams) subdeviceCtrlCmdTimerSchedule_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdTimerGetTime(pSubdevice, pParams) subdeviceCtrlCmdTimerGetTime_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdTimerGetRegisterOffset(pSubdevice, pTimerRegOffsetParams) subdeviceCtrlCmdTimerGetRegisterOffset_DISPATCH(pSubdevice, pTimerRegOffsetParams) +#define subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo(pSubdevice, pParams) subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdTimerSetGrTickFreq(pSubdevice, pParams) subdeviceCtrlCmdTimerSetGrTickFreq_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdRcReadVirtualMem(pSubdevice, pReadVirtMemParam) subdeviceCtrlCmdRcReadVirtualMem_DISPATCH(pSubdevice, pReadVirtMemParam) +#define subdeviceCtrlCmdRcGetErrorCount(pSubdevice, pErrorCount) subdeviceCtrlCmdRcGetErrorCount_DISPATCH(pSubdevice, pErrorCount) +#define subdeviceCtrlCmdRcGetErrorV2(pSubdevice, pErrorParams) subdeviceCtrlCmdRcGetErrorV2_DISPATCH(pSubdevice, pErrorParams) +#define subdeviceCtrlCmdRcSetCleanErrorHistory(pSubdevice) subdeviceCtrlCmdRcSetCleanErrorHistory_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdRcGetWatchdogInfo(pSubdevice, pWatchdogInfoParams) subdeviceCtrlCmdRcGetWatchdogInfo_DISPATCH(pSubdevice, pWatchdogInfoParams) +#define subdeviceCtrlCmdRcDisableWatchdog(pSubdevice) subdeviceCtrlCmdRcDisableWatchdog_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdRcSoftDisableWatchdog(pSubdevice) subdeviceCtrlCmdRcSoftDisableWatchdog_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdRcEnableWatchdog(pSubdevice) subdeviceCtrlCmdRcEnableWatchdog_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdRcReleaseWatchdogRequests(pSubdevice) subdeviceCtrlCmdRcReleaseWatchdogRequests_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalRcWatchdogTimeout(pSubdevice) subdeviceCtrlCmdInternalRcWatchdogTimeout_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdSetRcRecovery(pSubdevice, pRcRecovery) subdeviceCtrlCmdSetRcRecovery_DISPATCH(pSubdevice, pRcRecovery) +#define subdeviceCtrlCmdGetRcRecovery(pSubdevice, pRcRecovery) subdeviceCtrlCmdGetRcRecovery_DISPATCH(pSubdevice, pRcRecovery) +#define subdeviceCtrlCmdGetRcInfo(pSubdevice, pParams) subdeviceCtrlCmdGetRcInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdSetRcInfo(pSubdevice, pParams) subdeviceCtrlCmdSetRcInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdNvdGetDumpSize(pSubdevice, pDumpSizeParams) subdeviceCtrlCmdNvdGetDumpSize_DISPATCH(pSubdevice, pDumpSizeParams) +#define subdeviceCtrlCmdNvdGetDump(pSubdevice, pDumpParams) subdeviceCtrlCmdNvdGetDump_DISPATCH(pSubdevice, pDumpParams) +#define subdeviceCtrlCmdNvdGetNocatJournalRpt(pSubdevice, pReportParams) subdeviceCtrlCmdNvdGetNocatJournalRpt_DISPATCH(pSubdevice, pReportParams) +#define subdeviceCtrlCmdNvdSetNocatJournalData(pSubdevice, pReportParams) subdeviceCtrlCmdNvdSetNocatJournalData_DISPATCH(pSubdevice, pReportParams) +#define subdeviceCtrlCmdCeGetCaps(pSubdevice, pCeCapsParams) subdeviceCtrlCmdCeGetCaps_DISPATCH(pSubdevice, pCeCapsParams) +#define subdeviceCtrlCmdCeGetCapsV2(pSubdevice, pCeCapsParams) subdeviceCtrlCmdCeGetCapsV2_DISPATCH(pSubdevice, pCeCapsParams) +#define subdeviceCtrlCmdCeGetAllCaps(pSubdevice, pCeCapsParams) subdeviceCtrlCmdCeGetAllCaps_DISPATCH(pSubdevice, pCeCapsParams) +#define subdeviceCtrlCmdCeGetCePceMask(pSubdevice, pCePceMaskParams) subdeviceCtrlCmdCeGetCePceMask_DISPATCH(pSubdevice, pCePceMaskParams) +#define subdeviceCtrlCmdCeUpdatePceLceMappings(pSubdevice, pCeUpdatePceLceMappingsParams) subdeviceCtrlCmdCeUpdatePceLceMappings_DISPATCH(pSubdevice, pCeUpdatePceLceMappingsParams) +#define subdeviceCtrlCmdFlcnGetDmemUsage(pSubdevice, pFlcnDmemUsageParams) subdeviceCtrlCmdFlcnGetDmemUsage_DISPATCH(pSubdevice, pFlcnDmemUsageParams) +#define subdeviceCtrlCmdFlcnInstrumentationMap(pSubdevice, pParams) subdeviceCtrlCmdFlcnInstrumentationMap_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnInstrumentationUnmap(pSubdevice, pParams) subdeviceCtrlCmdFlcnInstrumentationUnmap_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnInstrumentationGetInfo(pSubdevice, pParams) subdeviceCtrlCmdFlcnInstrumentationGetInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnInstrumentationGetControl(pSubdevice, pParams) subdeviceCtrlCmdFlcnInstrumentationGetControl_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnInstrumentationSetControl(pSubdevice, pParams) subdeviceCtrlCmdFlcnInstrumentationSetControl_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnInstrumentationRecalibrate(pSubdevice, pParams) subdeviceCtrlCmdFlcnInstrumentationRecalibrate_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnGetEngineArch(pSubdevice, pParams) subdeviceCtrlCmdFlcnGetEngineArch_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnUstreamerQueueInfo(pSubdevice, pParams) subdeviceCtrlCmdFlcnUstreamerQueueInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnUstreamerControlGet(pSubdevice, pParams) subdeviceCtrlCmdFlcnUstreamerControlGet_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnUstreamerControlSet(pSubdevice, pParams) subdeviceCtrlCmdFlcnUstreamerControlSet_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnGetCtxBufferInfo(pSubdevice, pParams) subdeviceCtrlCmdFlcnGetCtxBufferInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlcnGetCtxBufferSize(pSubdevice, pParams) subdeviceCtrlCmdFlcnGetCtxBufferSize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdEccGetClientExposedCounters(pSubdevice, pParams) subdeviceCtrlCmdEccGetClientExposedCounters_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuQueryEccConfiguration(pSubdevice, pConfig) subdeviceCtrlCmdGpuQueryEccConfiguration_DISPATCH(pSubdevice, pConfig) +#define subdeviceCtrlCmdGpuSetEccConfiguration(pSubdevice, pConfig) subdeviceCtrlCmdGpuSetEccConfiguration_DISPATCH(pSubdevice, pConfig) +#define subdeviceCtrlCmdGpuResetEccErrorStatus(pSubdevice, pParams) subdeviceCtrlCmdGpuResetEccErrorStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlaRange(pSubdevice, pParams) subdeviceCtrlCmdFlaRange_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlaSetupInstanceMemBlock(pSubdevice, pParams) subdeviceCtrlCmdFlaSetupInstanceMemBlock_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlaGetRange(pSubdevice, pParams) subdeviceCtrlCmdFlaGetRange_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdFlaGetFabricMemStats(pSubdevice, pParams) subdeviceCtrlCmdFlaGetFabricMemStats_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGspGetFeatures(pSubdevice, pGspFeaturesParams) subdeviceCtrlCmdGspGetFeatures_DISPATCH(pSubdevice, pGspFeaturesParams) +#define subdeviceCtrlCmdGpuGetActivePartitionIds(pSubdevice, pParams) subdeviceCtrlCmdGpuGetActivePartitionIds_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetPartitionCapacity(pSubdevice, pParams) subdeviceCtrlCmdGpuGetPartitionCapacity_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuDescribePartitions(pSubdevice, pParams) subdeviceCtrlCmdGpuDescribePartitions_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuSetPartitioningMode(pSubdevice, pParams) subdeviceCtrlCmdGpuSetPartitioningMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGrmgrGetGrFsInfo(pSubdevice, pParams) subdeviceCtrlCmdGrmgrGetGrFsInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuSetPartitions(pSubdevice, pParams) subdeviceCtrlCmdGpuSetPartitions_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGpuGetPartitions(pSubdevice, pParams) subdeviceCtrlCmdGpuGetPartitions_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles(pSubdevice, pParams) subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines(pSubdevice, pParams) subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges(pSubdevice, pParams) subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance(pSubdevice, pParams) subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance(pSubdevice, pParams) subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdOsUnixGc6BlockerRefCnt(pSubdevice, pParams) subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdOsUnixAllowDisallowGcoff(pSubdevice, pParams) subdeviceCtrlCmdOsUnixAllowDisallowGcoff_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdOsUnixAudioDynamicPower(pSubdevice, pParams) subdeviceCtrlCmdOsUnixAudioDynamicPower_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdOsUnixVidmemPersistenceStatus(pSubdevice, pParams) subdeviceCtrlCmdOsUnixVidmemPersistenceStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdOsUnixUpdateTgpStatus(pSubdevice, pParams) subdeviceCtrlCmdOsUnixUpdateTgpStatus_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayGetIpVersion(pSubdevice, pParams) subdeviceCtrlCmdDisplayGetIpVersion_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayGetStaticInfo(pSubdevice, pParams) subdeviceCtrlCmdDisplayGetStaticInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplaySetChannelPushbuffer(pSubdevice, pParams) subdeviceCtrlCmdDisplaySetChannelPushbuffer_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayWriteInstMem(pSubdevice, pParams) subdeviceCtrlCmdDisplayWriteInstMem_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplaySetupRgLineIntr(pSubdevice, pParams) subdeviceCtrlCmdDisplaySetupRgLineIntr_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplaySetImportedImpData(pSubdevice, pParams) subdeviceCtrlCmdDisplaySetImportedImpData_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdDisplayGetDisplayMask(pSubdevice, pParams) subdeviceCtrlCmdDisplayGetDisplayMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdMsencGetCaps(pSubdevice, pParams) subdeviceCtrlCmdMsencGetCaps_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer(pSubdevice, pParams) subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer(pSubdevice) subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer(pSubdevice) subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize(pSubdevice, pParams) subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetChipInfo(pSubdevice, pParams) subdeviceCtrlCmdInternalGetChipInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetUserRegisterAccessMap(pSubdevice, pParams) subdeviceCtrlCmdInternalGetUserRegisterAccessMap_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetDeviceInfoTable(pSubdevice, pParams) subdeviceCtrlCmdInternalGetDeviceInfoTable_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetConstructedFalconInfo(pSubdevice, pParams) subdeviceCtrlCmdInternalGetConstructedFalconInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalRecoverAllComputeContexts(pSubdevice) subdeviceCtrlCmdInternalRecoverAllComputeContexts_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalGetSmcMode(pSubdevice, pParams) subdeviceCtrlCmdInternalGetSmcMode_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalBusBindLocalGfidForP2p(pSubdevice, pParams) subdeviceCtrlCmdInternalBusBindLocalGfidForP2p_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalBusBindRemoteGfidForP2p(pSubdevice, pParams) subdeviceCtrlCmdInternalBusBindRemoteGfidForP2p_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalBusFlushWithSysmembar(pSubdevice) subdeviceCtrlCmdInternalBusFlushWithSysmembar_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal(pSubdevice, pParams) subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote(pSubdevice, pParams) subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalBusDestroyP2pMailbox(pSubdevice, pParams) subdeviceCtrlCmdInternalBusDestroyP2pMailbox_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalBusCreateC2cPeerMapping(pSubdevice, pParams) subdeviceCtrlCmdInternalBusCreateC2cPeerMapping_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping(pSubdevice, pParams) subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGmmuGetStaticInfo(pSubdevice, pParams) subdeviceCtrlCmdGmmuGetStaticInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer(pSubdevice, pParams) subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer(pSubdevice) subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer(pSubdevice, pParams) subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer(pSubdevice) subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdCeGetPhysicalCaps(pSubdevice, pCeCapsParams) subdeviceCtrlCmdCeGetPhysicalCaps_DISPATCH(pSubdevice, pCeCapsParams) +#define subdeviceCtrlCmdCeGetAllPhysicalCaps(pSubdevice, pCeCapsParams) subdeviceCtrlCmdCeGetAllPhysicalCaps_DISPATCH(pSubdevice, pCeCapsParams) +#define subdeviceCtrlCmdCeUpdateClassDB(pSubdevice, params) subdeviceCtrlCmdCeUpdateClassDB_DISPATCH(pSubdevice, params) +#define subdeviceCtrlCmdCeGetFaultMethodBufferSize(pSubdevice, params) subdeviceCtrlCmdCeGetFaultMethodBufferSize_DISPATCH(pSubdevice, params) +#define subdeviceCtrlCmdCeGetHubPceMask(pSubdevice, pParams) subdeviceCtrlCmdCeGetHubPceMask_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdIntrGetKernelTable(pSubdevice, pParams) subdeviceCtrlCmdIntrGetKernelTable_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalPerfCudaLimitDisable(pSubdevice) subdeviceCtrlCmdInternalPerfCudaLimitDisable_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalPerfOptpCliClear(pSubdevice) subdeviceCtrlCmdInternalPerfOptpCliClear_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalPerfBoostSet_2x(pSubdevice, pParams) subdeviceCtrlCmdInternalPerfBoostSet_2x_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalPerfBoostSet_3x(pSubdevice, pParams) subdeviceCtrlCmdInternalPerfBoostSet_3x_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalPerfBoostClear_3x(pSubdevice, pParams) subdeviceCtrlCmdInternalPerfBoostClear_3x_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl(pSubdevice, pParams) subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo(pSubdevice, pParams) subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits(pSubdevice, pParams) subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck(pSubdevice, pParams) subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet(pSubdevice, pParams) subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount(pSubdevice, pParams) subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBifGetStaticInfo(pSubdevice, pParams) subdeviceCtrlCmdBifGetStaticInfo_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdBifGetAspmL1Flags(pSubdevice, pParams) subdeviceCtrlCmdBifGetAspmL1Flags_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdHshubPeerConnConfig(pSubdevice, pParams) subdeviceCtrlCmdHshubPeerConnConfig_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdHshubFirstLinkPeerId(pSubdevice, pParams) subdeviceCtrlCmdHshubFirstLinkPeerId_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdHshubGetHshubIdForLinks(pSubdevice, pParams) subdeviceCtrlCmdHshubGetHshubIdForLinks_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdHshubGetNumUnits(pSubdevice, pParams) subdeviceCtrlCmdHshubGetNumUnits_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdHshubNextHshubId(pSubdevice, pParams) subdeviceCtrlCmdHshubNextHshubId_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr(pSubdevice) subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr_DISPATCH(pSubdevice) +#define subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr(pSubdevice, pParams) subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdInternalGetPcieP2pCaps(pSubdevice, pParams) subdeviceCtrlCmdInternalGetPcieP2pCaps_DISPATCH(pSubdevice, pParams) +#define subdeviceCtrlCmdGetAvailableHshubMask(pSubdevice, pParams) subdeviceCtrlCmdGetAvailableHshubMask_DISPATCH(pSubdevice, pParams) +#define subdeviceShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) subdeviceShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define subdeviceMapTo(pResource, pParams) subdeviceMapTo_DISPATCH(pResource, pParams) +#define subdeviceGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) subdeviceGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define subdeviceCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) subdeviceCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define subdeviceGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) subdeviceGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define subdeviceSetNotificationShare(pNotifier, pNotifShare) subdeviceSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define subdeviceGetRefCount(pResource) subdeviceGetRefCount_DISPATCH(pResource) +#define subdeviceAddAdditionalDependants(pClient, pResource, pReference) subdeviceAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define subdeviceControl_Prologue(pResource, pCallContext, pParams) subdeviceControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define subdeviceGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) subdeviceGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define subdeviceUnmapFrom(pResource, pParams) subdeviceUnmapFrom_DISPATCH(pResource, pParams) +#define subdeviceControl_Epilogue(pResource, pCallContext, pParams) subdeviceControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define subdeviceControlLookup(pResource, pParams, ppEntry) subdeviceControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define subdeviceGetInternalObjectHandle(pGpuResource) subdeviceGetInternalObjectHandle_DISPATCH(pGpuResource) +#define subdeviceControl(pGpuResource, pCallContext, pParams) subdeviceControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define subdeviceUnmap(pGpuResource, pCallContext, pCpuMapping) subdeviceUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define subdeviceGetMemInterMapParams(pRmResource, pParams) subdeviceGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define subdeviceGetMemoryMappingDescriptor(pRmResource, ppMemDesc) subdeviceGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define subdeviceUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) subdeviceUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define subdeviceCanCopy(pResource) subdeviceCanCopy_DISPATCH(pResource) +#define subdeviceGetNotificationListPtr(pNotifier) subdeviceGetNotificationListPtr_DISPATCH(pNotifier) +#define subdeviceGetNotificationShare(pNotifier) subdeviceGetNotificationShare_DISPATCH(pNotifier) +#define subdeviceMap(pGpuResource, pCallContext, pParams, pCpuMapping) subdeviceMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define subdeviceAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) subdeviceAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS subdeviceCtrlCmdPerfReservePerfmonHw_KERNEL(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS *pPerfmonParams); + +#ifdef __nvoc_subdevice_h_disabled +static inline NV_STATUS subdeviceCtrlCmdPerfReservePerfmonHw_internal(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS *pPerfmonParams) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceCtrlCmdPerfReservePerfmonHw_internal(pSubdevice, pPerfmonParams) subdeviceCtrlCmdPerfReservePerfmonHw_KERNEL(pSubdevice, pPerfmonParams) +#endif //__nvoc_subdevice_h_disabled + +#define subdeviceCtrlCmdPerfReservePerfmonHw_internal_HAL(pSubdevice, pPerfmonParams) subdeviceCtrlCmdPerfReservePerfmonHw_internal(pSubdevice, pPerfmonParams) + +NV_STATUS subdeviceCtrlCmdPerfRatedTdpSetControl_KERNEL(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *pControlParams); + +#ifdef __nvoc_subdevice_h_disabled +static inline NV_STATUS subdeviceCtrlCmdPerfRatedTdpSetControl_internal(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *pControlParams) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceCtrlCmdPerfRatedTdpSetControl_internal(pSubdevice, pControlParams) subdeviceCtrlCmdPerfRatedTdpSetControl_KERNEL(pSubdevice, pControlParams) +#endif //__nvoc_subdevice_h_disabled + +#define subdeviceCtrlCmdPerfRatedTdpSetControl_internal_HAL(pSubdevice, pControlParams) subdeviceCtrlCmdPerfRatedTdpSetControl_internal(pSubdevice, pControlParams) + +void subdevicePreDestruct_IMPL(struct Subdevice *pResource); + +static inline void subdevicePreDestruct_DISPATCH(struct Subdevice *pResource) { + pResource->__subdevicePreDestruct__(pResource); +} + +NV_STATUS subdeviceInternalControlForward_IMPL(struct Subdevice *pSubdevice, NvU32 command, void *pParams, NvU32 size); + +static inline NV_STATUS subdeviceInternalControlForward_DISPATCH(struct Subdevice *pSubdevice, NvU32 command, void *pParams, NvU32 size) { + return pSubdevice->__subdeviceInternalControlForward__(pSubdevice, command, pParams, size); +} + +NV_STATUS subdeviceControlFilter_IMPL(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +static inline NV_STATUS subdeviceControlFilter_DISPATCH(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pSubdevice->__subdeviceControlFilter__(pSubdevice, pCallContext, pParams); +} + +NV_STATUS subdeviceCtrlCmdBiosGetInfoV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS *pBiosInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdBiosGetInfoV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS *pBiosInfoParams) { + return pSubdevice->__subdeviceCtrlCmdBiosGetInfoV2__(pSubdevice, pBiosInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBiosGetSKUInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *pBiosGetSKUInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdBiosGetSKUInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS *pBiosGetSKUInfoParams) { + return pSubdevice->__subdeviceCtrlCmdBiosGetSKUInfo__(pSubdevice, pBiosGetSKUInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBiosGetPostTime_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS *pBiosPostTime); + +static inline NV_STATUS subdeviceCtrlCmdBiosGetPostTime_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS *pBiosPostTime) { + return pSubdevice->__subdeviceCtrlCmdBiosGetPostTime__(pSubdevice, pBiosPostTime); +} + +NV_STATUS subdeviceCtrlCmdBiosGetUefiSupport_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS *pUEFIParams); + +static inline NV_STATUS subdeviceCtrlCmdBiosGetUefiSupport_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS *pUEFIParams) { + return pSubdevice->__subdeviceCtrlCmdBiosGetUefiSupport__(pSubdevice, pUEFIParams); +} + +NV_STATUS subdeviceCtrlCmdMcGetArchInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS *pArchInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdMcGetArchInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS *pArchInfoParams) { + return pSubdevice->__subdeviceCtrlCmdMcGetArchInfo__(pSubdevice, pArchInfoParams); +} + +NV_STATUS subdeviceCtrlCmdMcGetManufacturer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS *pManufacturerParams); + +static inline NV_STATUS subdeviceCtrlCmdMcGetManufacturer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS *pManufacturerParams) { + return pSubdevice->__subdeviceCtrlCmdMcGetManufacturer__(pSubdevice, pManufacturerParams); +} + +NV_STATUS subdeviceCtrlCmdMcQueryHostclkSlowdownStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS *pGetStatusParams); + +static inline NV_STATUS subdeviceCtrlCmdMcQueryHostclkSlowdownStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS *pGetStatusParams) { + return pSubdevice->__subdeviceCtrlCmdMcQueryHostclkSlowdownStatus__(pSubdevice, pGetStatusParams); +} + +NV_STATUS subdeviceCtrlCmdMcSetHostclkSlowdownStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdMcSetHostclkSlowdownStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdMcSetHostclkSlowdownStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdMcChangeReplayableFaultOwnership_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS *pReplayableFaultOwnrshpParams); + +static inline NV_STATUS subdeviceCtrlCmdMcChangeReplayableFaultOwnership_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS *pReplayableFaultOwnrshpParams) { + return pSubdevice->__subdeviceCtrlCmdMcChangeReplayableFaultOwnership__(pSubdevice, pReplayableFaultOwnrshpParams); +} + +NV_STATUS subdeviceCtrlCmdMcServiceInterrupts_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS *pServiceInterruptParams); + +static inline NV_STATUS subdeviceCtrlCmdMcServiceInterrupts_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS *pServiceInterruptParams) { + return pSubdevice->__subdeviceCtrlCmdMcServiceInterrupts__(pSubdevice, pServiceInterruptParams); +} + +NV_STATUS subdeviceCtrlCmdDmaInvalidateTLB_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDmaInvalidateTLB_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDmaInvalidateTLB__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDmaGetInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_DMA_GET_INFO_PARAMS *pDmaInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdDmaGetInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_DMA_GET_INFO_PARAMS *pDmaInfoParams) { + return pSubdevice->__subdeviceCtrlCmdDmaGetInfo__(pSubdevice, pDmaInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetPciInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS *pPciInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetPciInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS *pPciInfoParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetPciInfo__(pSubdevice, pPciInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_INFO_PARAMS *pBusInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_INFO_PARAMS *pBusInfoParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetInfo__(pSubdevice, pBusInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetInfoV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_INFO_V2_PARAMS *pBusInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetInfoV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_INFO_V2_PARAMS *pBusInfoParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetInfoV2__(pSubdevice, pBusInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetPciBarInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS *pBarInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetPciBarInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS *pBarInfoParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetPciBarInfo__(pSubdevice, pBarInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBusSetPcieSpeed_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS *pBusInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdBusSetPcieSpeed_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_SET_PCIE_SPEED_PARAMS *pBusInfoParams) { + return pSubdevice->__subdeviceCtrlCmdBusSetPcieSpeed__(pSubdevice, pBusInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBusSetPcieLinkWidth_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS *pLinkWidthParams); + +static inline NV_STATUS subdeviceCtrlCmdBusSetPcieLinkWidth_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_SET_PCIE_LINK_WIDTH_PARAMS *pLinkWidthParams) { + return pSubdevice->__subdeviceCtrlCmdBusSetPcieLinkWidth__(pSubdevice, pLinkWidthParams); +} + +NV_STATUS subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS *pBusInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS *pBusInfoParams) { + return pSubdevice->__subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed__(pSubdevice, pBusInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS *pBusInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS *pBusInfoParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed__(pSubdevice, pBusInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBusHWBCGetUpstreamBAR0_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS *pBusInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdBusHWBCGetUpstreamBAR0_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS *pBusInfoParams) { + return pSubdevice->__subdeviceCtrlCmdBusHWBCGetUpstreamBAR0__(pSubdevice, pBusInfoParams); +} + +NV_STATUS subdeviceCtrlCmdBusServiceGpuMultifunctionState_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusServiceGpuMultifunctionState_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_SERVICE_GPU_MULTIFUNC_STATE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusServiceGpuMultifunctionState__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetPexCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetPexCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_PEX_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetPexCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetBFD_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_BFD_PARAMSARR *pBusGetBFDParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetBFD_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_BFD_PARAMSARR *pBusGetBFDParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetBFD__(pSubdevice, pBusGetBFDParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetAspmDisableFlags_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetAspmDisableFlags_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetAspmDisableFlags__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusControlPublicAspmBits_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusControlPublicAspmBits_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_CONTROL_PUBLIC_ASPM_BITS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusControlPublicAspmBits__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusClearPexCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusClearPexCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_CLEAR_PEX_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusClearPexCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetPexUtilCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetPexUtilCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_PEX_UTIL_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetPexUtilCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusClearPexUtilCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusClearPexUtilCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_CLEAR_PEX_UTIL_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusClearPexUtilCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusFreezePexCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusFreezePexCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_FREEZE_PEX_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusFreezePexCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetPexLaneCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetPexLaneCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_PEX_LANE_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetPexLaneCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetPcieLtrLatency_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetPcieLtrLatency_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_PCIE_LTR_LATENCY_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetPcieLtrLatency__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusSetPcieLtrLatency_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusSetPcieLtrLatency_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_SET_PCIE_LTR_LATENCY_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusSetPcieLtrLatency__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetNvlinkPeerIdMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetNvlinkPeerIdMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetNvlinkPeerIdMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusSetEomParameters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusSetEomParameters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_SET_EOM_PARAMETERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusSetEomParameters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetUphyDlnCfgSpace_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetUphyDlnCfgSpace_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_BUS_GET_UPHY_DLN_CFG_SPACE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetUphyDlnCfgSpace__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetEomStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetEomStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_GET_EOM_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetEomStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusSysmemAccess_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusSysmemAccess_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_BUS_SYSMEM_ACCESS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusSysmemAccess__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetNvlinkCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetNvlinkCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetNvlinkCaps__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBusGetNvlinkStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBusGetNvlinkStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBusGetNvlinkStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGetNvlinkCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGetNvlinkCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGetNvlinkCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdClearNvlinkCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdClearNvlinkCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_CLEAR_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdClearNvlinkCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_LINK_FATAL_ERROR_COUNTS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkGetLinkFatalErrorCounts__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkSetupEom_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkSetupEom_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_NVLINK_SETUP_EOM_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkSetupEom__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkGetPowerState_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkGetPowerState_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_POWER_STATE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkGetPowerState__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkReadTpCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkReadTpCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_READ_TP_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkReadTpCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkGetLpCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkGetLpCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_LP_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkGetLpCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkEnableNvlinkPeer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkEnableNvlinkPeer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkEnableNvlinkPeer__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkCoreCallback_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkCoreCallback_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkCoreCallback__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkSetLoopbackMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkSetLoopbackMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SET_LOOPBACK_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkSetLoopbackMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_UPDATE_REMOTE_LOCAL_SID_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkUpdateRemoteLocalSid__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkUpdateHshubMux_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkUpdateHshubMux_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkUpdateHshubMux__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkPreSetupNvlinkPeer__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkPostSetupNvlinkPeer__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkRemoveNvlinkMapping_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkRemoveNvlinkMapping_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkRemoveNvlinkMapping__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkSaveRestoreHshubState_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkSaveRestoreHshubState_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkSaveRestoreHshubState__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkProgramBufferready_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkProgramBufferready_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkProgramBufferready__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkUpdateCurrentConfig_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkUpdateCurrentConfig_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkUpdateCurrentConfig__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkUpdatePeerLinkMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkUpdatePeerLinkMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkUpdatePeerLinkMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkUpdateLinkConnection_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkUpdateLinkConnection_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkUpdateLinkConnection__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkEnableLinksPostTopology_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkEnableLinksPostTopology_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkEnableLinksPostTopology__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkGetRefreshCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkGetRefreshCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_REFRESH_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkGetRefreshCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkClearRefreshCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkClearRefreshCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_CLEAR_REFRESH_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkClearRefreshCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkGetLinkMaskPostRxDet__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkGetNvlinkDeviceInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkGetIoctrlDeviceInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkProgramLinkSpeed_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_PROGRAM_LINK_SPEED_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkProgramLinkSpeed_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_PROGRAM_LINK_SPEED_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkProgramLinkSpeed__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkAreLinksTrained_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_ARE_LINKS_TRAINED_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkAreLinksTrained_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_ARE_LINKS_TRAINED_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkAreLinksTrained__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkResetLinks_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_RESET_LINKS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkResetLinks_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_RESET_LINKS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkResetLinks__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkDisableDlInterrupts_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkDisableDlInterrupts_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkDisableDlInterrupts__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkGetLinkAndClockInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkGetLinkAndClockInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkGetLinkAndClockInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkSetupNvlinkSysmem_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkSetupNvlinkSysmem_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkSetupNvlinkSysmem__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkProcessForcedConfigs_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkProcessForcedConfigs_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkProcessForcedConfigs__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkSyncLaneShutdownProps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkSyncLaneShutdownProps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkSyncLaneShutdownProps__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkEnableSysmemNvlinkAts__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkHshubGetSysmemNvlinkMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkGetSetNvswitchFlaAddr__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkSyncLinkMasksAndVbiosInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvlinkEnableLinks_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkEnableLinks_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdNvlinkEnableLinks__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdNvlinkProcessInitDisabledLinks_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdNvlinkProcessInitDisabledLinks_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdNvlinkProcessInitDisabledLinks__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdI2cReadBuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *pI2cParams); + +static inline NV_STATUS subdeviceCtrlCmdI2cReadBuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_READ_BUFFER_PARAMS *pI2cParams) { + return pSubdevice->__subdeviceCtrlCmdI2cReadBuffer__(pSubdevice, pI2cParams); +} + +NV_STATUS subdeviceCtrlCmdI2cWriteBuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS *pI2cParams); + +static inline NV_STATUS subdeviceCtrlCmdI2cWriteBuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_WRITE_BUFFER_PARAMS *pI2cParams) { + return pSubdevice->__subdeviceCtrlCmdI2cWriteBuffer__(pSubdevice, pI2cParams); +} + +NV_STATUS subdeviceCtrlCmdI2cReadReg_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_RW_REG_PARAMS *pI2cParams); + +static inline NV_STATUS subdeviceCtrlCmdI2cReadReg_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_RW_REG_PARAMS *pI2cParams) { + return pSubdevice->__subdeviceCtrlCmdI2cReadReg__(pSubdevice, pI2cParams); +} + +NV_STATUS subdeviceCtrlCmdI2cWriteReg_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_RW_REG_PARAMS *pI2cParams); + +static inline NV_STATUS subdeviceCtrlCmdI2cWriteReg_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_I2C_RW_REG_PARAMS *pI2cParams) { + return pSubdevice->__subdeviceCtrlCmdI2cWriteReg__(pSubdevice, pI2cParams); +} + +NV_STATUS subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM *pParams); + +static inline NV_STATUS subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM *pParams) { + return pSubdevice->__subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamples__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdPerfGetGpumonPerfmonUtilSamplesV2__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdPerfRatedTdpGetControl_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *pControlParams); + +static inline NV_STATUS subdeviceCtrlCmdPerfRatedTdpGetControl_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *pControlParams) { + return pSubdevice->__subdeviceCtrlCmdPerfRatedTdpGetControl__(pSubdevice, pControlParams); +} + +static inline NV_STATUS subdeviceCtrlCmdPerfRatedTdpSetControl_a2e9a2(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *pControlParams) { + return subdeviceCtrlCmdPerfRatedTdpSetControl_internal(pSubdevice, pControlParams); +} + +static inline NV_STATUS subdeviceCtrlCmdPerfRatedTdpSetControl_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *pControlParams) { + return pSubdevice->__subdeviceCtrlCmdPerfRatedTdpSetControl__(pSubdevice, pControlParams); +} + +static inline NV_STATUS subdeviceCtrlCmdPerfReservePerfmonHw_3f0664(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS *pPerfmonParams) { + return subdeviceCtrlCmdPerfReservePerfmonHw_internal(pSubdevice, pPerfmonParams); +} + +static inline NV_STATUS subdeviceCtrlCmdPerfReservePerfmonHw_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS *pPerfmonParams) { + return pSubdevice->__subdeviceCtrlCmdPerfReservePerfmonHw__(pSubdevice, pPerfmonParams); +} + +NV_STATUS subdeviceCtrlCmdKPerfBoost_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_BOOST_PARAMS *pBoostParams); + +static inline NV_STATUS subdeviceCtrlCmdKPerfBoost_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_PERF_BOOST_PARAMS *pBoostParams) { + return pSubdevice->__subdeviceCtrlCmdKPerfBoost__(pSubdevice, pBoostParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetFBRegionInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *pGFBRIParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetFBRegionInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *pGFBRIParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetFBRegionInfo__(pSubdevice, pGFBRIParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetBar1Offset_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS *pFbMemParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetBar1Offset_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS *pFbMemParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetBar1Offset__(pSubdevice, pFbMemParams); +} + +NV_STATUS subdeviceCtrlCmdFbIsKind_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_IS_KIND_PARAMS *pIsKindParams); + +static inline NV_STATUS subdeviceCtrlCmdFbIsKind_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_IS_KIND_PARAMS *pIsKindParams) { + return pSubdevice->__subdeviceCtrlCmdFbIsKind__(pSubdevice, pIsKindParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetMemAlignment_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetMemAlignment_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetMemAlignment__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetHeapReservationSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetHeapReservationSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetHeapReservationSize__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_INFO_PARAMS *pFbInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_INFO_PARAMS *pFbInfoParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetInfo__(pSubdevice, pFbInfoParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetInfoV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pFbInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetInfoV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pFbInfoParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetInfoV2__(pSubdevice, pFbInfoParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetCarveoutAddressInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetCarveoutAddressInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetCarveoutAddressInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetCalibrationLockFailed_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS *pGCLFParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetCalibrationLockFailed_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_CALIBRATION_LOCK_FAILED_PARAMS *pGCLFParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetCalibrationLockFailed__(pSubdevice, pGCLFParams); +} + +NV_STATUS subdeviceCtrlCmdFbFlushGpuCache_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS *pCacheFlushParams); + +static inline NV_STATUS subdeviceCtrlCmdFbFlushGpuCache_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS *pCacheFlushParams) { + return pSubdevice->__subdeviceCtrlCmdFbFlushGpuCache__(pSubdevice, pCacheFlushParams); +} + +NV_STATUS subdeviceCtrlCmdFbSetGpuCacheAllocPolicy_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbSetGpuCacheAllocPolicy_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbSetGpuCacheAllocPolicy__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetGpuCacheAllocPolicy_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS *pGpuCacheAllocPolicyParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetGpuCacheAllocPolicy_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS *pGpuCacheAllocPolicyParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetGpuCacheAllocPolicy__(pSubdevice, pGpuCacheAllocPolicyParams); +} + +NV_STATUS subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetGpuCacheInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS *pGpuCacheParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetGpuCacheInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_GPU_CACHE_INFO_PARAMS *pGpuCacheParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetGpuCacheInfo__(pSubdevice, pGpuCacheParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetCliManagedOfflinedPages_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS *pOsOfflinedParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetCliManagedOfflinedPages_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS *pOsOfflinedParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetCliManagedOfflinedPages__(pSubdevice, pOsOfflinedParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetOfflinedPages_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS *pBlackListParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetOfflinedPages_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS *pBlackListParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetOfflinedPages__(pSubdevice, pBlackListParams); +} + +NV_STATUS subdeviceCtrlCmdFbSetupVprRegion_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS *pCliReqParams); + +static inline NV_STATUS subdeviceCtrlCmdFbSetupVprRegion_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FB_SETUP_VPR_REGION_PARAMS *pCliReqParams) { + return pSubdevice->__subdeviceCtrlCmdFbSetupVprRegion__(pSubdevice, pCliReqParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetLTCInfoForFBP_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetLTCInfoForFBP_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_LTC_INFO_FOR_FBP_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetLTCInfoForFBP__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetCompBitCopyConstructInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetCompBitCopyConstructInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FB_GET_COMPBITCOPY_CONSTRUCT_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetCompBitCopyConstructInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbPatchPbrForMining_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbPatchPbrForMining_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_PATCH_PBR_FOR_MINING_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbPatchPbrForMining__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetRemappedRows_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS *pRemappedRowsParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetRemappedRows_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_REMAPPED_ROWS_PARAMS *pRemappedRowsParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetRemappedRows__(pSubdevice, pRemappedRowsParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetFsInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_FS_INFO_PARAMS *pInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetFsInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_FS_INFO_PARAMS *pInfoParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetFsInfo__(pSubdevice, pInfoParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetRowRemapperHistogram_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetRowRemapperHistogram_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_ROW_REMAPPER_HISTOGRAM_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetRowRemapperHistogram__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetDynamicOfflinedPages_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetDynamicOfflinedPages_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetDynamicOfflinedPages__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbUpdateNumaStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbUpdateNumaStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbUpdateNumaStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetNumaInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetNumaInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetNumaInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdMemSysGetStaticConfig_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdMemSysGetStaticConfig_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_GET_STATIC_CONFIG_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdMemSysGetStaticConfig__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdMemSysSetPartitionableMem_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdMemSysSetPartitionableMem_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdMemSysSetPartitionableMem__(pSubdevice, pParams); +} + +static inline NV_STATUS subdeviceCtrlCmdKMemSysGetMIGMemoryConfig_133e5e(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS *pParams) { + return subdeviceInternalControlForward(pSubdevice, (545262184), pParams, sizeof (*pParams)); +} + +static inline NV_STATUS subdeviceCtrlCmdKMemSysGetMIGMemoryConfig_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKMemSysGetMIGMemoryConfig__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbSetZbcReferenced_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbSetZbcReferenced_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbSetZbcReferenced__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdMemSysL2InvalidateEvict_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdMemSysL2InvalidateEvict_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdMemSysL2InvalidateEvict__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdMemSysFlushL2AllRamsAndCaches__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdMemSysDisableNvlinkPeers_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdMemSysDisableNvlinkPeers_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdMemSysDisableNvlinkPeers__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdMemSysProgramRawCompressionMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdMemSysProgramRawCompressionMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdMemSysProgramRawCompressionMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdMemSysGetMIGMemoryPartitionTable__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbGetCtagsForCbcEviction_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbGetCtagsForCbcEviction_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_GET_CTAGS_FOR_CBC_EVICTION_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbGetCtagsForCbcEviction__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbCBCOp_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FB_CBC_OP_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbCBCOp_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FB_CBC_OP_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbCBCOp__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbSetRrd_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_SET_RRD_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbSetRrd_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_SET_RRD_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbSetRrd__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbSetReadLimit_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbSetReadLimit_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_SET_READ_LIMIT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbSetReadLimit__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFbSetWriteLimit_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFbSetWriteLimit_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FB_SET_WRITE_LIMIT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFbSetWriteLimit__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdSetGpfifo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_SET_GPFIFO_PARAMS *pSetGpFifoParams); + +static inline NV_STATUS subdeviceCtrlCmdSetGpfifo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_SET_GPFIFO_PARAMS *pSetGpFifoParams) { + return pSubdevice->__subdeviceCtrlCmdSetGpfifo__(pSubdevice, pSetGpFifoParams); +} + +NV_STATUS subdeviceCtrlCmdSetOperationalProperties_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS *pSetOperationalProperties); + +static inline NV_STATUS subdeviceCtrlCmdSetOperationalProperties_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_SET_OPERATIONAL_PROPERTIES_PARAMS *pSetOperationalProperties) { + return pSubdevice->__subdeviceCtrlCmdSetOperationalProperties__(pSubdevice, pSetOperationalProperties); +} + +NV_STATUS subdeviceCtrlCmdFifoBindEngines_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS *pBindParams); + +static inline NV_STATUS subdeviceCtrlCmdFifoBindEngines_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_BIND_ENGINES_PARAMS *pBindParams) { + return pSubdevice->__subdeviceCtrlCmdFifoBindEngines__(pSubdevice, pBindParams); +} + +NV_STATUS subdeviceCtrlCmdGetPhysicalChannelCount_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGetPhysicalChannelCount_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGetPhysicalChannelCount__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFifoGetInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_GET_INFO_PARAMS *pFifoInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdFifoGetInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_GET_INFO_PARAMS *pFifoInfoParams) { + return pSubdevice->__subdeviceCtrlCmdFifoGetInfo__(pSubdevice, pFifoInfoParams); +} + +NV_STATUS subdeviceCtrlCmdFifoDisableChannels_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS *pDisableChannelParams); + +static inline NV_STATUS subdeviceCtrlCmdFifoDisableChannels_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS *pDisableChannelParams) { + return pSubdevice->__subdeviceCtrlCmdFifoDisableChannels__(pSubdevice, pDisableChannelParams); +} + +NV_STATUS subdeviceCtrlCmdFifoDisableUsermodeChannels_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFifoDisableUsermodeChannels_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_DISABLE_USERMODE_CHANNELS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFifoDisableUsermodeChannels__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFifoGetChannelMemInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS *pChannelMemParams); + +static inline NV_STATUS subdeviceCtrlCmdFifoGetChannelMemInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS *pChannelMemParams) { + return pSubdevice->__subdeviceCtrlCmdFifoGetChannelMemInfo__(pSubdevice, pChannelMemParams); +} + +NV_STATUS subdeviceCtrlCmdFifoGetUserdLocation_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS *pUserdLocationParams); + +static inline NV_STATUS subdeviceCtrlCmdFifoGetUserdLocation_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS *pUserdLocationParams) { + return pSubdevice->__subdeviceCtrlCmdFifoGetUserdLocation__(pSubdevice, pUserdLocationParams); +} + +NV_STATUS subdeviceCtrlCmdFifoGetDeviceInfoTable_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFifoGetDeviceInfoTable_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFifoGetDeviceInfoTable__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFifoClearFaultedBit_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFifoClearFaultedBit_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_FIFO_CLEAR_FAULTED_BIT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFifoClearFaultedBit__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFifoRunlistSetSchedPolicy_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS *pSchedPolicyParams); + +static inline NV_STATUS subdeviceCtrlCmdFifoRunlistSetSchedPolicy_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_RUNLIST_SET_SCHED_POLICY_PARAMS *pSchedPolicyParams) { + return pSubdevice->__subdeviceCtrlCmdFifoRunlistSetSchedPolicy__(pSubdevice, pSchedPolicyParams); +} + +NV_STATUS subdeviceCtrlCmdFifoUpdateChannelInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS *pChannelInfo); + +static inline NV_STATUS subdeviceCtrlCmdFifoUpdateChannelInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS *pChannelInfo) { + return pSubdevice->__subdeviceCtrlCmdFifoUpdateChannelInfo__(pSubdevice, pChannelInfo); +} + +NV_STATUS subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalFifoPromoteRunlistBuffers__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalFifoGetNumChannels_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS *pNumChannelsParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalFifoGetNumChannels_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS *pNumChannelsParams) { + return pSubdevice->__subdeviceCtrlCmdInternalFifoGetNumChannels__(pSubdevice, pNumChannelsParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetInfoV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_INFO_V2_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetInfoV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_INFO_V2_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetInfoV2__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetCapsV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_CAPS_V2_PARAMS *pGrCapsParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetCapsV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_CAPS_V2_PARAMS *pGrCapsParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetCapsV2__(pSubdevice, pGrCapsParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetCtxswModes_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetCtxswModes_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_CTXSW_MODES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetCtxswModes__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrCtxswZcullMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrCtxswZcullMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_ZCULL_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrCtxswZcullMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrCtxswZcullBind_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrCtxswZcullBind_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrCtxswZcullBind__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetZcullInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS *pZcullInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetZcullInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS *pZcullInfoParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetZcullInfo__(pSubdevice, pZcullInfoParams); +} + +NV_STATUS subdeviceCtrlCmdKGrCtxswPmMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrCtxswPmMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrCtxswPmMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrCtxswPmBind_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrCtxswPmBind_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrCtxswPmBind__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrSetGpcTileMap_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrSetGpcTileMap_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_SET_GPC_TILE_MAP_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrSetGpcTileMap__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrCtxswSmpcMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrCtxswSmpcMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_SMPC_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrCtxswSmpcMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrPcSamplingMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrPcSamplingMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_PC_SAMPLING_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrPcSamplingMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetSmToGpcTpcMappings_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetSmToGpcTpcMappings_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetSmToGpcTpcMappings__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetGlobalSmOrder_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetGlobalSmOrder_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetGlobalSmOrder__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrSetCtxswPreemptionMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrSetCtxswPreemptionMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_SET_CTXSW_PREEMPTION_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrSetCtxswPreemptionMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrCtxswPreemptionBind_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrCtxswPreemptionBind_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrCtxswPreemptionBind__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetROPInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_ROP_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetROPInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_ROP_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetROPInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetCtxswStats_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetCtxswStats_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_CTXSW_STATS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetCtxswStats__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetCtxBufferSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetCtxBufferSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetCtxBufferSize__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetCtxBufferInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetCtxBufferInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetCtxBufferInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetCtxBufferPtes_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetCtxBufferPtes_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetCtxBufferPtes__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetCurrentResidentChannel_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetCurrentResidentChannel_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GR_GET_CURRENT_RESIDENT_CHANNEL_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetCurrentResidentChannel__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetVatAlarmData_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetVatAlarmData_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_VAT_ALARM_DATA_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetVatAlarmData__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetAttributeBufferSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetAttributeBufferSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetAttributeBufferSize__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGfxPoolQuerySize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGfxPoolQuerySize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GFX_POOL_QUERY_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGfxPoolQuerySize__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGfxPoolInitialize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGfxPoolInitialize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GFX_POOL_INITIALIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGfxPoolInitialize__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGfxPoolAddSlots_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGfxPoolAddSlots_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GFX_POOL_ADD_SLOTS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGfxPoolAddSlots__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGfxPoolRemoveSlots_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGfxPoolRemoveSlots_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GFX_POOL_REMOVE_SLOTS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGfxPoolRemoveSlots__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetPpcMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_PPC_MASK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetPpcMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_PPC_MASK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetPpcMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrSetTpcPartitionMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrSetTpcPartitionMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_SET_TPC_PARTITION_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrSetTpcPartitionMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetSmIssueRateModifier_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetSmIssueRateModifier_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetSmIssueRateModifier__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrFecsBindEvtbufForUid_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrFecsBindEvtbufForUid_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrFecsBindEvtbufForUid__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetPhysGpcMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetPhysGpcMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetPhysGpcMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetGpcMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_GPC_MASK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetGpcMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_GPC_MASK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetGpcMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetTpcMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_TPC_MASK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetTpcMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_TPC_MASK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetTpcMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetEngineContextProperties_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetEngineContextProperties_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetEngineContextProperties__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetNumTpcsForGpc_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetNumTpcsForGpc_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetNumTpcsForGpc__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetGpcTileMap_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetGpcTileMap_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_GPC_TILE_MAP_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetGpcTileMap__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrGetZcullMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrGetZcullMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrGetZcullMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetCaps__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetGlobalSmOrder__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetFloorsweepingMasks__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetPpcMasks_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetPpcMasks_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetPpcMasks__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetZcullInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetZcullInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetZcullInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetRopInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetRopInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetRopInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetContextBuffersInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetSmIssueRateModifier__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetFecsRecordSize__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetFecsTraceDefines__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetPdbProperties_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdKGrInternalStaticGetPdbProperties_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdKGrInternalStaticGetPdbProperties__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetCachedInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetCachedInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetCachedInfo__(pSubdevice, pGpuInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetInfoV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetInfoV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetInfoV2__(pSubdevice, pGpuInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetIpVersion_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS *pGpuIpVersionParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetIpVersion_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_IP_VERSION_PARAMS *pGpuIpVersionParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetIpVersion__(pSubdevice, pGpuIpVersionParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS *pBridgeInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS *pBridgeInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo__(pSubdevice, pBridgeInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS *pBridgeInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS *pBridgeInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu__(pSubdevice, pBridgeInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuSetOptimusInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *pGpuOptimusInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetOptimusInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *pGpuOptimusInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetOptimusInfo__(pSubdevice, pGpuOptimusInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetNameString_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *pNameStringParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetNameString_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS *pNameStringParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetNameString__(pSubdevice, pNameStringParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetShortNameString_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS *pShortNameStringParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetShortNameString_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS *pShortNameStringParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetShortNameString__(pSubdevice, pShortNameStringParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEncoderCapacity_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS *pEncoderCapacityParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEncoderCapacity_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS *pEncoderCapacityParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEncoderCapacity__(pSubdevice, pEncoderCapacityParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetNvencSwSessionStats_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetNvencSwSessionStats_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetNvencSwSessionStats__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetNvencSwSessionInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetNvencSwSessionInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetNvencSwSessionInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS *params); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS *params) { + return pSubdevice->__subdeviceCtrlCmdGpuGetNvfbcSwSessionStats__(pSubdevice, params); +} + +NV_STATUS subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS *params); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS *params) { + return pSubdevice->__subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo__(pSubdevice, params); +} + +NV_STATUS subdeviceCtrlCmdGpuSetFabricAddr_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetFabricAddr_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetFabricAddr__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuSetPower_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_POWER_PARAMS *pSetPowerParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetPower_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_POWER_PARAMS *pSetPowerParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetPower__(pSubdevice, pSetPowerParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetSdm_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SDM_PARAMS *pSdmParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetSdm_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SDM_PARAMS *pSdmParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetSdm__(pSubdevice, pSdmParams); +} + +NV_STATUS subdeviceCtrlCmdGpuSetSdm_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_SDM_PARAMS *pSdmParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetSdm_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_SDM_PARAMS *pSdmParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetSdm__(pSubdevice, pSdmParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetSimulationInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *pGpuSimulationInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetSimulationInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *pGpuSimulationInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetSimulationInfo__(pSubdevice, pGpuSimulationInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEngines_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngines_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngines__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEnginesV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *pEngineParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEnginesV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *pEngineParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEnginesV2__(pSubdevice, pEngineParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEngineClasslist_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *pClassParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngineClasslist_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *pClassParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngineClasslist__(pSubdevice, pClassParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEnginePartnerList_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEnginePartnerList__(pSubdevice, pPartnerListParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetFermiGpcInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetFermiGpcInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetFermiGpcInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetFermiTpcInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetFermiTpcInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetFermiTpcInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetFermiZcullInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS *pGpuFermiZcullInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetFermiZcullInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS *pGpuFermiZcullInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetFermiZcullInfo__(pSubdevice, pGpuFermiZcullInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetPesInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PES_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetPesInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PES_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetPesInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuExecRegOps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS *pRegParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuExecRegOps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS *pRegParams) { + return pSubdevice->__subdeviceCtrlCmdGpuExecRegOps__(pSubdevice, pRegParams); +} + +NV_STATUS subdeviceCtrlCmdGpuQueryMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_MODE_PARAMS *pQueryMode); + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_MODE_PARAMS *pQueryMode) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryMode__(pSubdevice, pQueryMode); +} + +NV_STATUS subdeviceCtrlCmdGpuGetInforomImageVersion_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS *pVersionInfo); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetInforomImageVersion_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFOROM_IMAGE_VERSION_PARAMS *pVersionInfo) { + return pSubdevice->__subdeviceCtrlCmdGpuGetInforomImageVersion__(pSubdevice, pVersionInfo); +} + +NV_STATUS subdeviceCtrlCmdGpuGetInforomObjectVersion_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS *pVersionInfo); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetInforomObjectVersion_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFOROM_OBJECT_VERSION_PARAMS *pVersionInfo) { + return pSubdevice->__subdeviceCtrlCmdGpuGetInforomObjectVersion__(pSubdevice, pVersionInfo); +} + +NV_STATUS subdeviceCtrlCmdGpuQueryInforomEccSupport_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryInforomEccSupport_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryInforomEccSupport__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdGpuQueryEccStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryEccStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryEccStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetOEMBoardInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *pBoardInfo); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetOEMBoardInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_BOARD_INFO_PARAMS *pBoardInfo) { + return pSubdevice->__subdeviceCtrlCmdGpuGetOEMBoardInfo__(pSubdevice, pBoardInfo); +} + +NV_STATUS subdeviceCtrlCmdGpuGetOEMInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS *pOemInfo); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetOEMInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_OEM_INFO_PARAMS *pOemInfo) { + return pSubdevice->__subdeviceCtrlCmdGpuGetOEMInfo__(pSubdevice, pOemInfo); +} + +NV_STATUS subdeviceCtrlCmdGpuHandleGpuSR_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdGpuHandleGpuSR_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdGpuHandleGpuSR__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdGpuSetComputeModeRules_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS *pSetRulesParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetComputeModeRules_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS *pSetRulesParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetComputeModeRules__(pSubdevice, pSetRulesParams); +} + +NV_STATUS subdeviceCtrlCmdGpuQueryComputeModeRules_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS *pQueryRulesParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryComputeModeRules_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS *pQueryRulesParams) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryComputeModeRules__(pSubdevice, pQueryRulesParams); +} + +NV_STATUS subdeviceCtrlCmdGpuAcquireComputeModeReservation_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdGpuAcquireComputeModeReservation_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdGpuAcquireComputeModeReservation__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdGpuReleaseComputeModeReservation_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdGpuReleaseComputeModeReservation_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdGpuReleaseComputeModeReservation__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdGpuInitializeCtx_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *pInitializeCtxParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuInitializeCtx_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS *pInitializeCtxParams) { + return pSubdevice->__subdeviceCtrlCmdGpuInitializeCtx__(pSubdevice, pInitializeCtxParams); +} + +NV_STATUS subdeviceCtrlCmdGpuPromoteCtx_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *pPromoteCtxParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuPromoteCtx_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *pPromoteCtxParams) { + return pSubdevice->__subdeviceCtrlCmdGpuPromoteCtx__(pSubdevice, pPromoteCtxParams); +} + +NV_STATUS subdeviceCtrlCmdGpuEvictCtx_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_EVICT_CTX_PARAMS *pEvictCtxParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuEvictCtx_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_EVICT_CTX_PARAMS *pEvictCtxParams) { + return pSubdevice->__subdeviceCtrlCmdGpuEvictCtx__(pSubdevice, pEvictCtxParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetId_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ID_PARAMS *pIdParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetId_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ID_PARAMS *pIdParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetId__(pSubdevice, pIdParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetGidInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetGidInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_GID_INFO_PARAMS *pGidInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetGidInfo__(pSubdevice, pGidInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuQueryIllumSupport_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS *pConfigParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryIllumSupport_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_QUERY_ILLUM_SUPPORT_PARAMS *pConfigParams) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryIllumSupport__(pSubdevice, pConfigParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetIllum_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_ILLUM_PARAMS *pConfigParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetIllum_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_ILLUM_PARAMS *pConfigParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetIllum__(pSubdevice, pConfigParams); +} + +NV_STATUS subdeviceCtrlCmdGpuSetIllum_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_ILLUM_PARAMS *pConfigParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetIllum_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_ILLUM_PARAMS *pConfigParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetIllum__(pSubdevice, pConfigParams); +} + +NV_STATUS subdeviceCtrlCmdGpuQueryScrubberStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryScrubberStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_SCRUBBER_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryScrubberStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetVprCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetVprCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_VPR_CAPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetVprCaps__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetVprInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetVprInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_VPR_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetVprInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetPids_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PIDS_PARAMS *pGetPidsParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetPids_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PIDS_PARAMS *pGetPidsParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetPids__(pSubdevice, pGetPidsParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetPidInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *pGetPidInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetPidInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *pGetPidInfoParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetPidInfo__(pSubdevice, pGetPidInfoParams); +} + +NV_STATUS subdeviceCtrlCmdGpuInterruptFunction_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_INTERRUPT_FUNCTION_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuInterruptFunction_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_INTERRUPT_FUNCTION_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuInterruptFunction__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuQueryFunctionStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryFunctionStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_GPU_QUERY_FUNCTION_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryFunctionStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuReportNonReplayableFault_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuReportNonReplayableFault_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuReportNonReplayableFault__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEngineFaultInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngineFaultInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngineFaultInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEngineRunlistPriBase_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngineRunlistPriBase_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngineRunlistPriBase__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetHwEngineId_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetHwEngineId_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetHwEngineId__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetMaxSupportedPageSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetMaxSupportedPageSize__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuSetComputePolicyConfig_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetComputePolicyConfig_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetComputePolicyConfig__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetComputePolicyConfig_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetComputePolicyConfig_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetComputePolicyConfig__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdValidateMemMapRequest_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdValidateMemMapRequest_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdValidateMemMapRequest__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetEngineLoadTimes_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetEngineLoadTimes__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdEventSetTrigger_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdEventSetTrigger_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdEventSetTrigger__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdEventSetTriggerFifo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *pTriggerFifoParams); + +static inline NV_STATUS subdeviceCtrlCmdEventSetTriggerFifo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *pTriggerFifoParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetTriggerFifo__(pSubdevice, pTriggerFifoParams); +} + +NV_STATUS subdeviceCtrlCmdEventSetNotification_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams); + +static inline NV_STATUS subdeviceCtrlCmdEventSetNotification_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetNotification__(pSubdevice, pSetEventParams); +} + +NV_STATUS subdeviceCtrlCmdEventSetMemoryNotifies_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams); + +static inline NV_STATUS subdeviceCtrlCmdEventSetMemoryNotifies_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetMemoryNotifies__(pSubdevice, pSetMemoryNotifiesParams); +} + +NV_STATUS subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *pSetSemMemoryParams); + +static inline NV_STATUS subdeviceCtrlCmdEventSetSemaphoreMemory_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *pSetSemMemoryParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetSemaphoreMemory__(pSubdevice, pSetSemMemoryParams); +} + +NV_STATUS subdeviceCtrlCmdEventSetSemaMemValidation_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *pSetSemaMemValidationParams); + +static inline NV_STATUS subdeviceCtrlCmdEventSetSemaMemValidation_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *pSetSemaMemValidationParams) { + return pSubdevice->__subdeviceCtrlCmdEventSetSemaMemValidation__(pSubdevice, pSetSemaMemValidationParams); +} + +NV_STATUS subdeviceCtrlCmdTimerCancel_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdTimerCancel_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdTimerCancel__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdTimerSchedule_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdTimerSchedule_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdTimerSchedule__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdTimerGetTime_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_TIME_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdTimerGetTime_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_TIME_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdTimerGetTime__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdTimerGetRegisterOffset_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *pTimerRegOffsetParams); + +static inline NV_STATUS subdeviceCtrlCmdTimerGetRegisterOffset_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *pTimerRegOffsetParams) { + return pSubdevice->__subdeviceCtrlCmdTimerGetRegisterOffset__(pSubdevice, pTimerRegOffsetParams); +} + +NV_STATUS subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdTimerSetGrTickFreq_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdTimerSetGrTickFreq_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdTimerSetGrTickFreq__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdRcReadVirtualMem_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *pReadVirtMemParam); + +static inline NV_STATUS subdeviceCtrlCmdRcReadVirtualMem_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *pReadVirtMemParam) { + return pSubdevice->__subdeviceCtrlCmdRcReadVirtualMem__(pSubdevice, pReadVirtMemParam); +} + +NV_STATUS subdeviceCtrlCmdRcGetErrorCount_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS *pErrorCount); + +static inline NV_STATUS subdeviceCtrlCmdRcGetErrorCount_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS *pErrorCount) { + return pSubdevice->__subdeviceCtrlCmdRcGetErrorCount__(pSubdevice, pErrorCount); +} + +NV_STATUS subdeviceCtrlCmdRcGetErrorV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_RC_GET_ERROR_V2_PARAMS *pErrorParams); + +static inline NV_STATUS subdeviceCtrlCmdRcGetErrorV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_RC_GET_ERROR_V2_PARAMS *pErrorParams) { + return pSubdevice->__subdeviceCtrlCmdRcGetErrorV2__(pSubdevice, pErrorParams); +} + +NV_STATUS subdeviceCtrlCmdRcSetCleanErrorHistory_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdRcSetCleanErrorHistory_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdRcSetCleanErrorHistory__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdRcGetWatchdogInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS *pWatchdogInfoParams); + +static inline NV_STATUS subdeviceCtrlCmdRcGetWatchdogInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS *pWatchdogInfoParams) { + return pSubdevice->__subdeviceCtrlCmdRcGetWatchdogInfo__(pSubdevice, pWatchdogInfoParams); +} + +NV_STATUS subdeviceCtrlCmdRcDisableWatchdog_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdRcDisableWatchdog_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdRcDisableWatchdog__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdRcSoftDisableWatchdog_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdRcSoftDisableWatchdog_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdRcSoftDisableWatchdog__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdRcEnableWatchdog_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdRcEnableWatchdog_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdRcEnableWatchdog__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdRcReleaseWatchdogRequests_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdRcReleaseWatchdogRequests_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdRcReleaseWatchdogRequests__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdInternalRcWatchdogTimeout_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalRcWatchdogTimeout_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalRcWatchdogTimeout__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdSetRcRecovery_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_RC_RECOVERY_PARAMS *pRcRecovery); + +static inline NV_STATUS subdeviceCtrlCmdSetRcRecovery_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_RC_RECOVERY_PARAMS *pRcRecovery) { + return pSubdevice->__subdeviceCtrlCmdSetRcRecovery__(pSubdevice, pRcRecovery); +} + +NV_STATUS subdeviceCtrlCmdGetRcRecovery_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_RC_RECOVERY_PARAMS *pRcRecovery); + +static inline NV_STATUS subdeviceCtrlCmdGetRcRecovery_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_RC_RECOVERY_PARAMS *pRcRecovery) { + return pSubdevice->__subdeviceCtrlCmdGetRcRecovery__(pSubdevice, pRcRecovery); +} + +NV_STATUS subdeviceCtrlCmdGetRcInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_RC_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGetRcInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_RC_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGetRcInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdSetRcInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_RC_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdSetRcInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_RC_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdSetRcInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdNvdGetDumpSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS *pDumpSizeParams); + +static inline NV_STATUS subdeviceCtrlCmdNvdGetDumpSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS *pDumpSizeParams) { + return pSubdevice->__subdeviceCtrlCmdNvdGetDumpSize__(pSubdevice, pDumpSizeParams); +} + +NV_STATUS subdeviceCtrlCmdNvdGetDump_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVD_GET_DUMP_PARAMS *pDumpParams); + +static inline NV_STATUS subdeviceCtrlCmdNvdGetDump_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVD_GET_DUMP_PARAMS *pDumpParams) { + return pSubdevice->__subdeviceCtrlCmdNvdGetDump__(pSubdevice, pDumpParams); +} + +NV_STATUS subdeviceCtrlCmdNvdGetNocatJournalRpt_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS *pReportParams); + +static inline NV_STATUS subdeviceCtrlCmdNvdGetNocatJournalRpt_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS *pReportParams) { + return pSubdevice->__subdeviceCtrlCmdNvdGetNocatJournalRpt__(pSubdevice, pReportParams); +} + +NV_STATUS subdeviceCtrlCmdNvdSetNocatJournalData_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS *pReportParams); + +static inline NV_STATUS subdeviceCtrlCmdNvdSetNocatJournalData_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS *pReportParams) { + return pSubdevice->__subdeviceCtrlCmdNvdSetNocatJournalData__(pSubdevice, pReportParams); +} + +NV_STATUS subdeviceCtrlCmdCeGetCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_CAPS_PARAMS *pCeCapsParams); + +static inline NV_STATUS subdeviceCtrlCmdCeGetCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_CAPS_PARAMS *pCeCapsParams) { + return pSubdevice->__subdeviceCtrlCmdCeGetCaps__(pSubdevice, pCeCapsParams); +} + +NV_STATUS subdeviceCtrlCmdCeGetCapsV2_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_CAPS_V2_PARAMS *pCeCapsParams); + +static inline NV_STATUS subdeviceCtrlCmdCeGetCapsV2_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_CAPS_V2_PARAMS *pCeCapsParams) { + return pSubdevice->__subdeviceCtrlCmdCeGetCapsV2__(pSubdevice, pCeCapsParams); +} + +NV_STATUS subdeviceCtrlCmdCeGetAllCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS *pCeCapsParams); + +static inline NV_STATUS subdeviceCtrlCmdCeGetAllCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS *pCeCapsParams) { + return pSubdevice->__subdeviceCtrlCmdCeGetAllCaps__(pSubdevice, pCeCapsParams); +} + +NV_STATUS subdeviceCtrlCmdCeGetCePceMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS *pCePceMaskParams); + +static inline NV_STATUS subdeviceCtrlCmdCeGetCePceMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS *pCePceMaskParams) { + return pSubdevice->__subdeviceCtrlCmdCeGetCePceMask__(pSubdevice, pCePceMaskParams); +} + +NV_STATUS subdeviceCtrlCmdCeUpdatePceLceMappings_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS *pCeUpdatePceLceMappingsParams); + +static inline NV_STATUS subdeviceCtrlCmdCeUpdatePceLceMappings_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS *pCeUpdatePceLceMappingsParams) { + return pSubdevice->__subdeviceCtrlCmdCeUpdatePceLceMappings__(pSubdevice, pCeUpdatePceLceMappingsParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnGetDmemUsage_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS *pFlcnDmemUsageParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnGetDmemUsage_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_GET_DMEM_USAGE_PARAMS *pFlcnDmemUsageParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnGetDmemUsage__(pSubdevice, pFlcnDmemUsageParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnInstrumentationMap_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_MAP_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnInstrumentationMap_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_MAP_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnInstrumentationMap__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnInstrumentationUnmap_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_MAP_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnInstrumentationUnmap_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_MAP_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnInstrumentationUnmap__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnInstrumentationGetInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnInstrumentationGetInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_GET_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnInstrumentationGetInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnInstrumentationGetControl_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_CONTROL_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnInstrumentationGetControl_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_CONTROL_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnInstrumentationGetControl__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnInstrumentationSetControl_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_CONTROL_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnInstrumentationSetControl_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_CONTROL_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnInstrumentationSetControl__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnInstrumentationRecalibrate_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_RECALIBRATE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnInstrumentationRecalibrate_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_INSTRUMENTATION_RECALIBRATE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnInstrumentationRecalibrate__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnGetEngineArch_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnGetEngineArch_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_GET_ENGINE_ARCH_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnGetEngineArch__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnUstreamerQueueInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnUstreamerQueueInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_USTREAMER_QUEUE_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnUstreamerQueueInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnUstreamerControlGet_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnUstreamerControlGet_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnUstreamerControlGet__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnUstreamerControlSet_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnUstreamerControlSet_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_USTREAMER_CONTROL_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnUstreamerControlSet__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnGetCtxBufferInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnGetCtxBufferInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnGetCtxBufferInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlcnGetCtxBufferSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlcnGetCtxBufferSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLCN_GET_CTX_BUFFER_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlcnGetCtxBufferSize__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdEccGetClientExposedCounters_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdEccGetClientExposedCounters_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdEccGetClientExposedCounters__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuQueryEccConfiguration_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS *pConfig); + +static inline NV_STATUS subdeviceCtrlCmdGpuQueryEccConfiguration_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_QUERY_ECC_CONFIGURATION_PARAMS *pConfig) { + return pSubdevice->__subdeviceCtrlCmdGpuQueryEccConfiguration__(pSubdevice, pConfig); +} + +NV_STATUS subdeviceCtrlCmdGpuSetEccConfiguration_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS *pConfig); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetEccConfiguration_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_ECC_CONFIGURATION_PARAMS *pConfig) { + return pSubdevice->__subdeviceCtrlCmdGpuSetEccConfiguration__(pSubdevice, pConfig); +} + +NV_STATUS subdeviceCtrlCmdGpuResetEccErrorStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuResetEccErrorStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_RESET_ECC_ERROR_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuResetEccErrorStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlaRange_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLA_RANGE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlaRange_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLA_RANGE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlaRange__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlaSetupInstanceMemBlock_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlaSetupInstanceMemBlock_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlaSetupInstanceMemBlock__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlaGetRange_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLA_GET_RANGE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlaGetRange_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLA_GET_RANGE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlaGetRange__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdFlaGetFabricMemStats_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdFlaGetFabricMemStats_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdFlaGetFabricMemStats__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGspGetFeatures_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GSP_GET_FEATURES_PARAMS *pGspFeaturesParams); + +static inline NV_STATUS subdeviceCtrlCmdGspGetFeatures_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GSP_GET_FEATURES_PARAMS *pGspFeaturesParams) { + return pSubdevice->__subdeviceCtrlCmdGspGetFeatures__(pSubdevice, pGspFeaturesParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetActivePartitionIds_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetActivePartitionIds_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetActivePartitionIds__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetPartitionCapacity_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetPartitionCapacity_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetPartitionCapacity__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuDescribePartitions_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuDescribePartitions_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuDescribePartitions__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuSetPartitioningMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetPartitioningMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetPartitioningMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGrmgrGetGrFsInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGrmgrGetGrFsInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GRMGR_GET_GR_FS_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGrmgrGetGrFsInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuSetPartitions_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuSetPartitions_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuSetPartitions__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGpuGetPartitions_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGpuGetPartitions_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGpuGetPartitions__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PROFILES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalStaticKMIGmgrGetProfiles__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_PARTITIONABLE_ENGINES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalStaticKMIGmgrGetPartitionableEngines__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_STATIC_MIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalStaticKMIGmgrGetSwizzIdFbMemPageRanges__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdOsUnixGc6BlockerRefCnt_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_GC6_BLOCKER_REFCNT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixGc6BlockerRefCnt__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdOsUnixAllowDisallowGcoff_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdOsUnixAllowDisallowGcoff_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_ALLOW_DISALLOW_GCOFF_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixAllowDisallowGcoff__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdOsUnixAudioDynamicPower_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdOsUnixAudioDynamicPower_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_AUDIO_DYNAMIC_POWER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixAudioDynamicPower__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdOsUnixVidmemPersistenceStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdOsUnixVidmemPersistenceStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_VIDMEM_PERSISTENCE_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixVidmemPersistenceStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdOsUnixUpdateTgpStatus_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdOsUnixUpdateTgpStatus_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_OS_UNIX_UPDATE_TGP_STATUS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdOsUnixUpdateTgpStatus__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplayGetIpVersion_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplayGetIpVersion_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayGetIpVersion__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplayGetStaticInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplayGetStaticInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayGetStaticInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplaySetChannelPushbuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplaySetChannelPushbuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplaySetChannelPushbuffer__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplayWriteInstMem_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplayWriteInstMem_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayWriteInstMem__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplaySetupRgLineIntr_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplaySetupRgLineIntr_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplaySetupRgLineIntr__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplaySetImportedImpData_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplaySetImportedImpData_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplaySetImportedImpData__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdDisplayGetDisplayMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdDisplayGetDisplayMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdDisplayGetDisplayMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdMsencGetCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdMsencGetCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdMsencGetCaps__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGetChipInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetChipInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetChipInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGetUserRegisterAccessMap_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetUserRegisterAccessMap_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetUserRegisterAccessMap__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGetDeviceInfoTable_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetDeviceInfoTable_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetDeviceInfoTable__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGetConstructedFalconInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetConstructedFalconInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetConstructedFalconInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalRecoverAllComputeContexts_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalRecoverAllComputeContexts_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalRecoverAllComputeContexts__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdInternalGetSmcMode_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetSmcMode_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetSmcMode__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalBusBindLocalGfidForP2p_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalBusBindLocalGfidForP2p_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalBusBindLocalGfidForP2p__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalBusBindRemoteGfidForP2p_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalBusBindRemoteGfidForP2p_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalBusBindRemoteGfidForP2p__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalBusFlushWithSysmembar_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalBusFlushWithSysmembar_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalBusFlushWithSysmembar__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalBusSetupP2pMailboxLocal__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalBusSetupP2pMailboxRemote__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalBusDestroyP2pMailbox_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalBusDestroyP2pMailbox_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalBusDestroyP2pMailbox__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalBusCreateC2cPeerMapping_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalBusCreateC2cPeerMapping_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BUS_CREATE_C2C_PEER_MAPPING_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalBusCreateC2cPeerMapping__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BUS_REMOVE_C2C_PEER_MAPPING_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalBusRemoveC2cPeerMapping__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGmmuGetStaticInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGmmuGetStaticInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGmmuGetStaticInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGmmuRegisterFaultBuffer__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalGmmuUnregisterFaultBuffer__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGmmuRegisterClientShadowFaultBuffer__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalGmmuUnregisterClientShadowFaultBuffer__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdCeGetPhysicalCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_CAPS_V2_PARAMS *pCeCapsParams); + +static inline NV_STATUS subdeviceCtrlCmdCeGetPhysicalCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_CAPS_V2_PARAMS *pCeCapsParams) { + return pSubdevice->__subdeviceCtrlCmdCeGetPhysicalCaps__(pSubdevice, pCeCapsParams); +} + +NV_STATUS subdeviceCtrlCmdCeGetAllPhysicalCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS *pCeCapsParams); + +static inline NV_STATUS subdeviceCtrlCmdCeGetAllPhysicalCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS *pCeCapsParams) { + return pSubdevice->__subdeviceCtrlCmdCeGetAllPhysicalCaps__(pSubdevice, pCeCapsParams); +} + +NV_STATUS subdeviceCtrlCmdCeUpdateClassDB_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS *params); + +static inline NV_STATUS subdeviceCtrlCmdCeUpdateClassDB_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS *params) { + return pSubdevice->__subdeviceCtrlCmdCeUpdateClassDB__(pSubdevice, params); +} + +NV_STATUS subdeviceCtrlCmdCeGetFaultMethodBufferSize_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *params); + +static inline NV_STATUS subdeviceCtrlCmdCeGetFaultMethodBufferSize_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *params) { + return pSubdevice->__subdeviceCtrlCmdCeGetFaultMethodBufferSize__(pSubdevice, params); +} + +NV_STATUS subdeviceCtrlCmdCeGetHubPceMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdCeGetHubPceMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdCeGetHubPceMask__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdIntrGetKernelTable_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdIntrGetKernelTable_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdIntrGetKernelTable__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfCudaLimitDisable_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfCudaLimitDisable_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfCudaLimitDisable__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfOptpCliClear_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfOptpCliClear_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfOptpCliClear__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfBoostSet_2x_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfBoostSet_2x_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfBoostSet_2x__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfBoostSet_3x_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfBoostSet_3x_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_3X *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfBoostSet_3x__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfBoostClear_3x_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfBoostClear_3x_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_BOOST_CLEAR_PARAMS_3X *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfBoostClear_3x__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfGpuBoostSyncSetControl__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfGpuBoostSyncGetInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfSyncGpuBoostSetLimits__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfPerfmonClientReservationCheck__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfPerfmonClientReservationSet__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_PERF_CF_CONTROLLERS_SET_MAX_VGPU_VM_COUNT_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalPerfCfControllerSetMaxVGpuVMCount__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBifGetStaticInfo_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBifGetStaticInfo_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBifGetStaticInfo__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdBifGetAspmL1Flags_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdBifGetAspmL1Flags_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdBifGetAspmL1Flags__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdHshubPeerConnConfig_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdHshubPeerConnConfig_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdHshubPeerConnConfig__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdHshubFirstLinkPeerId_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdHshubFirstLinkPeerId_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_HSHUB_FIRST_LINK_PEER_ID_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdHshubFirstLinkPeerId__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdHshubGetHshubIdForLinks_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdHshubGetHshubIdForLinks_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdHshubGetHshubIdForLinks__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdHshubGetNumUnits_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdHshubGetNumUnits_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdHshubGetNumUnits__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdHshubNextHshubId_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdHshubNextHshubId_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdHshubNextHshubId__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr_IMPL(struct Subdevice *pSubdevice); + +static inline NV_STATUS subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr_DISPATCH(struct Subdevice *pSubdevice) { + return pSubdevice->__subdeviceCtrlCmdInternalNvlinkEnableComputePeerAddr__(pSubdevice); +} + +NV_STATUS subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalNvlinkGetSetNvswitchFabricAddr__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdInternalGetPcieP2pCaps_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdInternalGetPcieP2pCaps_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdInternalGetPcieP2pCaps__(pSubdevice, pParams); +} + +NV_STATUS subdeviceCtrlCmdGetAvailableHshubMask_IMPL(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS *pParams); + +static inline NV_STATUS subdeviceCtrlCmdGetAvailableHshubMask_DISPATCH(struct Subdevice *pSubdevice, NV2080_CTRL_CMD_HSHUB_GET_AVAILABLE_MASK_PARAMS *pParams) { + return pSubdevice->__subdeviceCtrlCmdGetAvailableHshubMask__(pSubdevice, pParams); +} + +static inline NvBool subdeviceShareCallback_DISPATCH(struct Subdevice *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__subdeviceShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS subdeviceMapTo_DISPATCH(struct Subdevice *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__subdeviceMapTo__(pResource, pParams); +} + +static inline NV_STATUS subdeviceGetOrAllocNotifShare_DISPATCH(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__subdeviceGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS subdeviceCheckMemInterUnmap_DISPATCH(struct Subdevice *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__subdeviceCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS subdeviceGetMapAddrSpace_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__subdeviceGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void subdeviceSetNotificationShare_DISPATCH(struct Subdevice *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__subdeviceSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 subdeviceGetRefCount_DISPATCH(struct Subdevice *pResource) { + return pResource->__subdeviceGetRefCount__(pResource); +} + +static inline void subdeviceAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Subdevice *pResource, RsResourceRef *pReference) { + pResource->__subdeviceAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS subdeviceControl_Prologue_DISPATCH(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__subdeviceControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS subdeviceGetRegBaseOffsetAndSize_DISPATCH(struct Subdevice *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__subdeviceGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS subdeviceUnmapFrom_DISPATCH(struct Subdevice *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__subdeviceUnmapFrom__(pResource, pParams); +} + +static inline void subdeviceControl_Epilogue_DISPATCH(struct Subdevice *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__subdeviceControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS subdeviceControlLookup_DISPATCH(struct Subdevice *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__subdeviceControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle subdeviceGetInternalObjectHandle_DISPATCH(struct Subdevice *pGpuResource) { + return pGpuResource->__subdeviceGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS subdeviceControl_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__subdeviceControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS subdeviceUnmap_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__subdeviceUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS subdeviceGetMemInterMapParams_DISPATCH(struct Subdevice *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__subdeviceGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS subdeviceGetMemoryMappingDescriptor_DISPATCH(struct Subdevice *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__subdeviceGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS subdeviceUnregisterEvent_DISPATCH(struct Subdevice *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__subdeviceUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool subdeviceCanCopy_DISPATCH(struct Subdevice *pResource) { + return pResource->__subdeviceCanCopy__(pResource); +} + +static inline PEVENTNOTIFICATION *subdeviceGetNotificationListPtr_DISPATCH(struct Subdevice *pNotifier) { + return pNotifier->__subdeviceGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *subdeviceGetNotificationShare_DISPATCH(struct Subdevice *pNotifier) { + return pNotifier->__subdeviceGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS subdeviceMap_DISPATCH(struct Subdevice *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__subdeviceMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool subdeviceAccessCallback_DISPATCH(struct Subdevice *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__subdeviceAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS subdeviceSetPerfmonReservation(struct Subdevice *pSubdevice, NvBool bReservation, NvBool bClientHandlesGrGating, NvBool bRmHandlesIdleSlow) { + return NV_OK; +} + +static inline NV_STATUS subdeviceResetTGP(struct Subdevice *pSubdevice) { + return NV_OK; +} + +static inline NV_STATUS subdeviceReleaseVideoStreams(struct Subdevice *pSubdevice) { + return NV_OK; +} + +static inline void subdeviceRestoreLockedClock(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + return; +} + +static inline void subdeviceReleaseNvlinkErrorInjectionMode(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + return; +} + +NV_STATUS subdeviceConstruct_IMPL(struct Subdevice *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_subdeviceConstruct(arg_pResource, arg_pCallContext, arg_pParams) subdeviceConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void subdeviceDestruct_IMPL(struct Subdevice *pResource); +#define __nvoc_subdeviceDestruct(pResource) subdeviceDestruct_IMPL(pResource) +NV_STATUS subdeviceAddP2PApi_IMPL(struct Subdevice *pSubdevice, struct P2PApi *pP2PApi); +#ifdef __nvoc_subdevice_h_disabled +static inline NV_STATUS subdeviceAddP2PApi(struct Subdevice *pSubdevice, struct P2PApi *pP2PApi) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceAddP2PApi(pSubdevice, pP2PApi) subdeviceAddP2PApi_IMPL(pSubdevice, pP2PApi) +#endif //__nvoc_subdevice_h_disabled + +NV_STATUS subdeviceDelP2PApi_IMPL(struct Subdevice *pSubdevice, struct P2PApi *pP2PApi); +#ifdef __nvoc_subdevice_h_disabled +static inline NV_STATUS subdeviceDelP2PApi(struct Subdevice *pSubdevice, struct P2PApi *pP2PApi) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceDelP2PApi(pSubdevice, pP2PApi) subdeviceDelP2PApi_IMPL(pSubdevice, pP2PApi) +#endif //__nvoc_subdevice_h_disabled + +void subdeviceRestoreGrTickFreq_IMPL(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext); +#ifdef __nvoc_subdevice_h_disabled +static inline void subdeviceRestoreGrTickFreq(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceRestoreGrTickFreq(pSubdevice, pCallContext) subdeviceRestoreGrTickFreq_IMPL(pSubdevice, pCallContext) +#endif //__nvoc_subdevice_h_disabled + +void subdeviceRestoreWatchdog_IMPL(struct Subdevice *pSubdevice); +#ifdef __nvoc_subdevice_h_disabled +static inline void subdeviceRestoreWatchdog(struct Subdevice *pSubdevice) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceRestoreWatchdog(pSubdevice) subdeviceRestoreWatchdog_IMPL(pSubdevice) +#endif //__nvoc_subdevice_h_disabled + +void subdeviceUnsetGpuDebugMode_IMPL(struct Subdevice *pSubdevice); +#ifdef __nvoc_subdevice_h_disabled +static inline void subdeviceUnsetGpuDebugMode(struct Subdevice *pSubdevice) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceUnsetGpuDebugMode(pSubdevice) subdeviceUnsetGpuDebugMode_IMPL(pSubdevice) +#endif //__nvoc_subdevice_h_disabled + +void subdeviceReleaseComputeModeReservation_IMPL(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext); +#ifdef __nvoc_subdevice_h_disabled +static inline void subdeviceReleaseComputeModeReservation(struct Subdevice *pSubdevice, struct CALL_CONTEXT *pCallContext) { + NV_ASSERT_FAILED_PRECOMP("Subdevice was disabled!"); +} +#else //__nvoc_subdevice_h_disabled +#define subdeviceReleaseComputeModeReservation(pSubdevice, pCallContext) subdeviceReleaseComputeModeReservation_IMPL(pSubdevice, pCallContext) +#endif //__nvoc_subdevice_h_disabled + +NV_STATUS subdeviceGetByHandle_IMPL(struct RsClient *pClient, NvHandle hSubdevice, struct Subdevice **ppSubdevice); +#define subdeviceGetByHandle(pClient, hSubdevice, ppSubdevice) subdeviceGetByHandle_IMPL(pClient, hSubdevice, ppSubdevice) +NV_STATUS subdeviceGetByGpu_IMPL(struct RsClient *pClient, struct OBJGPU *pGpu, struct Subdevice **ppSubdevice); +#define subdeviceGetByGpu(pClient, pGpu, ppSubdevice) subdeviceGetByGpu_IMPL(pClient, pGpu, ppSubdevice) +NV_STATUS subdeviceGetByInstance_IMPL(struct RsClient *pClient, NvHandle hDevice, NvU32 subDeviceInst, struct Subdevice **ppSubdevice); +#define subdeviceGetByInstance(pClient, hDevice, subDeviceInst, ppSubdevice) subdeviceGetByInstance_IMPL(pClient, hDevice, subDeviceInst, ppSubdevice) +#undef PRIVATE_FIELD + + +// **************************************************************************** +// Deprecated Definitions +// **************************************************************************** + +/** + * WARNING: This function is deprecated! Please use subdeviceGetByGpu and + * GPU_RES_SET_THREAD_BC_STATE (if needed to set thread UC state for SLI) + */ +struct Subdevice *CliGetSubDeviceInfoFromGpu(NvHandle, struct OBJGPU*); + +/** + * WARNING: This function is deprecated! Please use subdeviceGetByGpu and + * RES_GET_HANDLE + */ +NV_STATUS CliGetSubDeviceHandleFromGpu(NvHandle, struct OBJGPU*, NvHandle *); + +/** + * WARNING: This function is deprecated and use is *strongly* discouraged + * (especially for new code!) + * + * From the function name (CliSetSubDeviceContext) it appears as a simple + * accessor but violates expectations by modifying the SLI BC threadstate (calls + * to GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully + * managed by the caller. + * + * Instead of using this routine, please use subdeviceGetByHandle then call + * GPU_RES_GET_GPU, RES_GET_HANDLE, GPU_RES_SET_THREAD_BC_STATE as needed. + * + * Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice, + * pSubdevice, the base pResource type, and any resource that inherits from + * GpuResource. That is, instead of using CliSetGpuContext or + * CliSetSubDeviceContext, please use following pattern to look up the pGpu: + * + * OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource) + * + * To set the threadstate, please use: + * + * GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource); + */ +NV_STATUS CliSetSubDeviceContext(NvHandle hClient, NvHandle hSubdevice, NvHandle *phDevice, + struct OBJGPU **ppGpu); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SUBDEVICE_NVOC_H_ diff --git a/src/nvidia/generated/g_sw_test_nvoc.c b/src/nvidia/generated/g_sw_test_nvoc.c new file mode 100644 index 000000000..771bb74dd --- /dev/null +++ b/src/nvidia/generated/g_sw_test_nvoc.c @@ -0,0 +1,407 @@ +#define NVOC_SW_TEST_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_sw_test_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xdea092 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SoftwareMethodTest; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_SoftwareMethodTest(SoftwareMethodTest*, RmHalspecOwner* ); +void __nvoc_init_funcTable_SoftwareMethodTest(SoftwareMethodTest*); +NV_STATUS __nvoc_ctor_SoftwareMethodTest(SoftwareMethodTest*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_SoftwareMethodTest(SoftwareMethodTest*); +void __nvoc_dtor_SoftwareMethodTest(SoftwareMethodTest*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_SoftwareMethodTest; + +static const struct NVOC_RTTI __nvoc_rtti_SoftwareMethodTest_SoftwareMethodTest = { + /*pClassDef=*/ &__nvoc_class_def_SoftwareMethodTest, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_SoftwareMethodTest, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_SoftwareMethodTest_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SoftwareMethodTest, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SoftwareMethodTest_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SoftwareMethodTest, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SoftwareMethodTest_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SoftwareMethodTest, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SoftwareMethodTest_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SoftwareMethodTest, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SoftwareMethodTest_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SoftwareMethodTest, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SoftwareMethodTest_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SoftwareMethodTest, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SoftwareMethodTest_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SoftwareMethodTest, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SoftwareMethodTest_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SoftwareMethodTest, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_SoftwareMethodTest = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_SoftwareMethodTest_SoftwareMethodTest, + &__nvoc_rtti_SoftwareMethodTest_ChannelDescendant, + &__nvoc_rtti_SoftwareMethodTest_Notifier, + &__nvoc_rtti_SoftwareMethodTest_INotifier, + &__nvoc_rtti_SoftwareMethodTest_GpuResource, + &__nvoc_rtti_SoftwareMethodTest_RmResource, + &__nvoc_rtti_SoftwareMethodTest_RmResourceCommon, + &__nvoc_rtti_SoftwareMethodTest_RsResource, + &__nvoc_rtti_SoftwareMethodTest_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_SoftwareMethodTest = +{ + /*classInfo=*/ { + /*size=*/ sizeof(SoftwareMethodTest), + /*classId=*/ classId(SoftwareMethodTest), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "SoftwareMethodTest", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_SoftwareMethodTest, + /*pCastInfo=*/ &__nvoc_castinfo_SoftwareMethodTest, + /*pExportInfo=*/ &__nvoc_export_info_SoftwareMethodTest +}; + +static NV_STATUS __nvoc_thunk_SoftwareMethodTest_chandesGetSwMethods(struct ChannelDescendant *pSwTest, METHOD **ppMethods, NvU32 *pNumMethods) { + return swtestGetSwMethods((struct SoftwareMethodTest *)(((unsigned char *)pSwTest) - __nvoc_rtti_SoftwareMethodTest_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_swtestCheckMemInterUnmap(struct SoftwareMethodTest *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_SoftwareMethodTest_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_swtestShareCallback(struct SoftwareMethodTest *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_SoftwareMethodTest_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_swtestAccessCallback(struct SoftwareMethodTest *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_swtestMapTo(struct SoftwareMethodTest *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_swtestGetMapAddrSpace(struct SoftwareMethodTest *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_SoftwareMethodTest_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_swtestSetNotificationShare(struct SoftwareMethodTest *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_SoftwareMethodTest_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_swtestGetRefCount(struct SoftwareMethodTest *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_swtestAddAdditionalDependants(struct RsClient *pClient, struct SoftwareMethodTest *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_swtestControl_Prologue(struct SoftwareMethodTest *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_swtestGetRegBaseOffsetAndSize(struct SoftwareMethodTest *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_SoftwareMethodTest_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_swtestInternalControlForward(struct SoftwareMethodTest *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_SoftwareMethodTest_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_swtestUnmapFrom(struct SoftwareMethodTest *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_swtestControl_Epilogue(struct SoftwareMethodTest *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_swtestControlLookup(struct SoftwareMethodTest *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_swtestGetInternalObjectHandle(struct SoftwareMethodTest *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_SoftwareMethodTest_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_swtestControl(struct SoftwareMethodTest *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_SoftwareMethodTest_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_swtestUnmap(struct SoftwareMethodTest *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_SoftwareMethodTest_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_swtestGetMemInterMapParams(struct SoftwareMethodTest *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_SoftwareMethodTest_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_swtestGetMemoryMappingDescriptor(struct SoftwareMethodTest *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_SoftwareMethodTest_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_ChannelDescendant_swtestIsSwMethodStalling(struct SoftwareMethodTest *pChannelDescendant, NvU32 hHandle) { + return chandesIsSwMethodStalling((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_SoftwareMethodTest_ChannelDescendant.offset), hHandle); +} + +static NV_STATUS __nvoc_thunk_RsResource_swtestControlFilter(struct SoftwareMethodTest *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_swtestUnregisterEvent(struct SoftwareMethodTest *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_SoftwareMethodTest_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_swtestCanCopy(struct SoftwareMethodTest *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_swtestPreDestruct(struct SoftwareMethodTest *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SoftwareMethodTest_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_swtestGetNotificationListPtr(struct SoftwareMethodTest *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_SoftwareMethodTest_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_swtestGetNotificationShare(struct SoftwareMethodTest *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_SoftwareMethodTest_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_swtestMap(struct SoftwareMethodTest *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_SoftwareMethodTest_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_swtestGetOrAllocNotifShare(struct SoftwareMethodTest *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_SoftwareMethodTest_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_SoftwareMethodTest = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_SoftwareMethodTest(SoftwareMethodTest *pThis) { + __nvoc_swtestDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_SoftwareMethodTest(SoftwareMethodTest *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, PARAM_TO_ENGDESC_FUNCTION *); +NV_STATUS __nvoc_ctor_SoftwareMethodTest(SoftwareMethodTest *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, ((void *)0)); + if (status != NV_OK) goto __nvoc_ctor_SoftwareMethodTest_fail_ChannelDescendant; + __nvoc_init_dataField_SoftwareMethodTest(pThis); + + status = __nvoc_swtestConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SoftwareMethodTest_fail__init; + goto __nvoc_ctor_SoftwareMethodTest_exit; // Success + +__nvoc_ctor_SoftwareMethodTest_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_SoftwareMethodTest_fail_ChannelDescendant: +__nvoc_ctor_SoftwareMethodTest_exit: + + return status; +} + +static void __nvoc_init_funcTable_SoftwareMethodTest_1(SoftwareMethodTest *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__swtestGetSwMethods__ = &swtestGetSwMethods_IMPL; + + pThis->__nvoc_base_ChannelDescendant.__chandesGetSwMethods__ = &__nvoc_thunk_SoftwareMethodTest_chandesGetSwMethods; + + pThis->__swtestCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_swtestCheckMemInterUnmap; + + pThis->__swtestShareCallback__ = &__nvoc_thunk_GpuResource_swtestShareCallback; + + pThis->__swtestAccessCallback__ = &__nvoc_thunk_RmResource_swtestAccessCallback; + + pThis->__swtestMapTo__ = &__nvoc_thunk_RsResource_swtestMapTo; + + pThis->__swtestGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_swtestGetMapAddrSpace; + + pThis->__swtestSetNotificationShare__ = &__nvoc_thunk_Notifier_swtestSetNotificationShare; + + pThis->__swtestGetRefCount__ = &__nvoc_thunk_RsResource_swtestGetRefCount; + + pThis->__swtestAddAdditionalDependants__ = &__nvoc_thunk_RsResource_swtestAddAdditionalDependants; + + pThis->__swtestControl_Prologue__ = &__nvoc_thunk_RmResource_swtestControl_Prologue; + + pThis->__swtestGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_swtestGetRegBaseOffsetAndSize; + + pThis->__swtestInternalControlForward__ = &__nvoc_thunk_GpuResource_swtestInternalControlForward; + + pThis->__swtestUnmapFrom__ = &__nvoc_thunk_RsResource_swtestUnmapFrom; + + pThis->__swtestControl_Epilogue__ = &__nvoc_thunk_RmResource_swtestControl_Epilogue; + + pThis->__swtestControlLookup__ = &__nvoc_thunk_RsResource_swtestControlLookup; + + pThis->__swtestGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_swtestGetInternalObjectHandle; + + pThis->__swtestControl__ = &__nvoc_thunk_GpuResource_swtestControl; + + pThis->__swtestUnmap__ = &__nvoc_thunk_GpuResource_swtestUnmap; + + pThis->__swtestGetMemInterMapParams__ = &__nvoc_thunk_RmResource_swtestGetMemInterMapParams; + + pThis->__swtestGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_swtestGetMemoryMappingDescriptor; + + pThis->__swtestIsSwMethodStalling__ = &__nvoc_thunk_ChannelDescendant_swtestIsSwMethodStalling; + + pThis->__swtestControlFilter__ = &__nvoc_thunk_RsResource_swtestControlFilter; + + pThis->__swtestUnregisterEvent__ = &__nvoc_thunk_Notifier_swtestUnregisterEvent; + + pThis->__swtestCanCopy__ = &__nvoc_thunk_RsResource_swtestCanCopy; + + pThis->__swtestPreDestruct__ = &__nvoc_thunk_RsResource_swtestPreDestruct; + + pThis->__swtestGetNotificationListPtr__ = &__nvoc_thunk_Notifier_swtestGetNotificationListPtr; + + pThis->__swtestGetNotificationShare__ = &__nvoc_thunk_Notifier_swtestGetNotificationShare; + + pThis->__swtestMap__ = &__nvoc_thunk_GpuResource_swtestMap; + + pThis->__swtestGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_swtestGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_SoftwareMethodTest(SoftwareMethodTest *pThis) { + __nvoc_init_funcTable_SoftwareMethodTest_1(pThis); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_SoftwareMethodTest(SoftwareMethodTest *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_SoftwareMethodTest = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_SoftwareMethodTest(pThis); +} + +NV_STATUS __nvoc_objCreate_SoftwareMethodTest(SoftwareMethodTest **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + SoftwareMethodTest *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(SoftwareMethodTest)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(SoftwareMethodTest)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_SoftwareMethodTest); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_SoftwareMethodTest(pThis, pRmhalspecowner); + status = __nvoc_ctor_SoftwareMethodTest(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_SoftwareMethodTest_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_SoftwareMethodTest_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_SoftwareMethodTest(SoftwareMethodTest **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_SoftwareMethodTest(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_sw_test_nvoc.h b/src/nvidia/generated/g_sw_test_nvoc.h new file mode 100644 index 000000000..db82b9cb0 --- /dev/null +++ b/src/nvidia/generated/g_sw_test_nvoc.h @@ -0,0 +1,278 @@ +#ifndef _G_SW_TEST_NVOC_H_ +#define _G_SW_TEST_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_sw_test_nvoc.h" + +#ifndef _SW_TEST_H_ +#define _SW_TEST_H_ + +#include "core/core.h" +#include "kernel/gpu/fifo/channel_descendant.h" + +/*! + * RM internal class representing NV04_SOFTWARE_TEST + */ +#ifdef NVOC_SW_TEST_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct SoftwareMethodTest { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct SoftwareMethodTest *__nvoc_pbase_SoftwareMethodTest; + NV_STATUS (*__swtestGetSwMethods__)(struct SoftwareMethodTest *, METHOD **, NvU32 *); + NV_STATUS (*__swtestCheckMemInterUnmap__)(struct SoftwareMethodTest *, NvBool); + NvBool (*__swtestShareCallback__)(struct SoftwareMethodTest *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__swtestAccessCallback__)(struct SoftwareMethodTest *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__swtestMapTo__)(struct SoftwareMethodTest *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__swtestGetMapAddrSpace__)(struct SoftwareMethodTest *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__swtestSetNotificationShare__)(struct SoftwareMethodTest *, struct NotifShare *); + NvU32 (*__swtestGetRefCount__)(struct SoftwareMethodTest *); + void (*__swtestAddAdditionalDependants__)(struct RsClient *, struct SoftwareMethodTest *, RsResourceRef *); + NV_STATUS (*__swtestControl_Prologue__)(struct SoftwareMethodTest *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__swtestGetRegBaseOffsetAndSize__)(struct SoftwareMethodTest *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__swtestInternalControlForward__)(struct SoftwareMethodTest *, NvU32, void *, NvU32); + NV_STATUS (*__swtestUnmapFrom__)(struct SoftwareMethodTest *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__swtestControl_Epilogue__)(struct SoftwareMethodTest *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__swtestControlLookup__)(struct SoftwareMethodTest *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__swtestGetInternalObjectHandle__)(struct SoftwareMethodTest *); + NV_STATUS (*__swtestControl__)(struct SoftwareMethodTest *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__swtestUnmap__)(struct SoftwareMethodTest *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__swtestGetMemInterMapParams__)(struct SoftwareMethodTest *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__swtestGetMemoryMappingDescriptor__)(struct SoftwareMethodTest *, struct MEMORY_DESCRIPTOR **); + NvBool (*__swtestIsSwMethodStalling__)(struct SoftwareMethodTest *, NvU32); + NV_STATUS (*__swtestControlFilter__)(struct SoftwareMethodTest *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__swtestUnregisterEvent__)(struct SoftwareMethodTest *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__swtestCanCopy__)(struct SoftwareMethodTest *); + void (*__swtestPreDestruct__)(struct SoftwareMethodTest *); + PEVENTNOTIFICATION *(*__swtestGetNotificationListPtr__)(struct SoftwareMethodTest *); + struct NotifShare *(*__swtestGetNotificationShare__)(struct SoftwareMethodTest *); + NV_STATUS (*__swtestMap__)(struct SoftwareMethodTest *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__swtestGetOrAllocNotifShare__)(struct SoftwareMethodTest *, NvHandle, NvHandle, struct NotifShare **); +}; + +#ifndef __NVOC_CLASS_SoftwareMethodTest_TYPEDEF__ +#define __NVOC_CLASS_SoftwareMethodTest_TYPEDEF__ +typedef struct SoftwareMethodTest SoftwareMethodTest; +#endif /* __NVOC_CLASS_SoftwareMethodTest_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SoftwareMethodTest +#define __nvoc_class_id_SoftwareMethodTest 0xdea092 +#endif /* __nvoc_class_id_SoftwareMethodTest */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SoftwareMethodTest; + +#define __staticCast_SoftwareMethodTest(pThis) \ + ((pThis)->__nvoc_pbase_SoftwareMethodTest) + +#ifdef __nvoc_sw_test_h_disabled +#define __dynamicCast_SoftwareMethodTest(pThis) ((SoftwareMethodTest*)NULL) +#else //__nvoc_sw_test_h_disabled +#define __dynamicCast_SoftwareMethodTest(pThis) \ + ((SoftwareMethodTest*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(SoftwareMethodTest))) +#endif //__nvoc_sw_test_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_SoftwareMethodTest(SoftwareMethodTest**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_SoftwareMethodTest(SoftwareMethodTest**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_SoftwareMethodTest(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_SoftwareMethodTest((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define swtestGetSwMethods(pSwTest, ppMethods, pNumMethods) swtestGetSwMethods_DISPATCH(pSwTest, ppMethods, pNumMethods) +#define swtestCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) swtestCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define swtestShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) swtestShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define swtestAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) swtestAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define swtestMapTo(pResource, pParams) swtestMapTo_DISPATCH(pResource, pParams) +#define swtestGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) swtestGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define swtestSetNotificationShare(pNotifier, pNotifShare) swtestSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define swtestGetRefCount(pResource) swtestGetRefCount_DISPATCH(pResource) +#define swtestAddAdditionalDependants(pClient, pResource, pReference) swtestAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define swtestControl_Prologue(pResource, pCallContext, pParams) swtestControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define swtestGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) swtestGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define swtestInternalControlForward(pGpuResource, command, pParams, size) swtestInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define swtestUnmapFrom(pResource, pParams) swtestUnmapFrom_DISPATCH(pResource, pParams) +#define swtestControl_Epilogue(pResource, pCallContext, pParams) swtestControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define swtestControlLookup(pResource, pParams, ppEntry) swtestControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define swtestGetInternalObjectHandle(pGpuResource) swtestGetInternalObjectHandle_DISPATCH(pGpuResource) +#define swtestControl(pGpuResource, pCallContext, pParams) swtestControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define swtestUnmap(pGpuResource, pCallContext, pCpuMapping) swtestUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define swtestGetMemInterMapParams(pRmResource, pParams) swtestGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define swtestGetMemoryMappingDescriptor(pRmResource, ppMemDesc) swtestGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define swtestIsSwMethodStalling(pChannelDescendant, hHandle) swtestIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define swtestControlFilter(pResource, pCallContext, pParams) swtestControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define swtestUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) swtestUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define swtestCanCopy(pResource) swtestCanCopy_DISPATCH(pResource) +#define swtestPreDestruct(pResource) swtestPreDestruct_DISPATCH(pResource) +#define swtestGetNotificationListPtr(pNotifier) swtestGetNotificationListPtr_DISPATCH(pNotifier) +#define swtestGetNotificationShare(pNotifier) swtestGetNotificationShare_DISPATCH(pNotifier) +#define swtestMap(pGpuResource, pCallContext, pParams, pCpuMapping) swtestMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define swtestGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) swtestGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS swtestGetSwMethods_IMPL(struct SoftwareMethodTest *pSwTest, METHOD **ppMethods, NvU32 *pNumMethods); + +static inline NV_STATUS swtestGetSwMethods_DISPATCH(struct SoftwareMethodTest *pSwTest, METHOD **ppMethods, NvU32 *pNumMethods) { + return pSwTest->__swtestGetSwMethods__(pSwTest, ppMethods, pNumMethods); +} + +static inline NV_STATUS swtestCheckMemInterUnmap_DISPATCH(struct SoftwareMethodTest *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__swtestCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool swtestShareCallback_DISPATCH(struct SoftwareMethodTest *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__swtestShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool swtestAccessCallback_DISPATCH(struct SoftwareMethodTest *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__swtestAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS swtestMapTo_DISPATCH(struct SoftwareMethodTest *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__swtestMapTo__(pResource, pParams); +} + +static inline NV_STATUS swtestGetMapAddrSpace_DISPATCH(struct SoftwareMethodTest *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__swtestGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void swtestSetNotificationShare_DISPATCH(struct SoftwareMethodTest *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__swtestSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 swtestGetRefCount_DISPATCH(struct SoftwareMethodTest *pResource) { + return pResource->__swtestGetRefCount__(pResource); +} + +static inline void swtestAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct SoftwareMethodTest *pResource, RsResourceRef *pReference) { + pResource->__swtestAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS swtestControl_Prologue_DISPATCH(struct SoftwareMethodTest *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__swtestControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS swtestGetRegBaseOffsetAndSize_DISPATCH(struct SoftwareMethodTest *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__swtestGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS swtestInternalControlForward_DISPATCH(struct SoftwareMethodTest *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__swtestInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS swtestUnmapFrom_DISPATCH(struct SoftwareMethodTest *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__swtestUnmapFrom__(pResource, pParams); +} + +static inline void swtestControl_Epilogue_DISPATCH(struct SoftwareMethodTest *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__swtestControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS swtestControlLookup_DISPATCH(struct SoftwareMethodTest *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__swtestControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle swtestGetInternalObjectHandle_DISPATCH(struct SoftwareMethodTest *pGpuResource) { + return pGpuResource->__swtestGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS swtestControl_DISPATCH(struct SoftwareMethodTest *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__swtestControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS swtestUnmap_DISPATCH(struct SoftwareMethodTest *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__swtestUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS swtestGetMemInterMapParams_DISPATCH(struct SoftwareMethodTest *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__swtestGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS swtestGetMemoryMappingDescriptor_DISPATCH(struct SoftwareMethodTest *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__swtestGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvBool swtestIsSwMethodStalling_DISPATCH(struct SoftwareMethodTest *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__swtestIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +static inline NV_STATUS swtestControlFilter_DISPATCH(struct SoftwareMethodTest *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__swtestControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS swtestUnregisterEvent_DISPATCH(struct SoftwareMethodTest *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__swtestUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool swtestCanCopy_DISPATCH(struct SoftwareMethodTest *pResource) { + return pResource->__swtestCanCopy__(pResource); +} + +static inline void swtestPreDestruct_DISPATCH(struct SoftwareMethodTest *pResource) { + pResource->__swtestPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *swtestGetNotificationListPtr_DISPATCH(struct SoftwareMethodTest *pNotifier) { + return pNotifier->__swtestGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *swtestGetNotificationShare_DISPATCH(struct SoftwareMethodTest *pNotifier) { + return pNotifier->__swtestGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS swtestMap_DISPATCH(struct SoftwareMethodTest *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__swtestMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS swtestGetOrAllocNotifShare_DISPATCH(struct SoftwareMethodTest *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__swtestGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS swtestConstruct_IMPL(struct SoftwareMethodTest *arg_pSwTest, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_swtestConstruct(arg_pSwTest, arg_pCallContext, arg_pParams) swtestConstruct_IMPL(arg_pSwTest, arg_pCallContext, arg_pParams) +void swtestDestruct_IMPL(struct SoftwareMethodTest *pSwTest); +#define __nvoc_swtestDestruct(pSwTest) swtestDestruct_IMPL(pSwTest) +#undef PRIVATE_FIELD + + +#endif // _SW_TEST_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SW_TEST_NVOC_H_ diff --git a/src/nvidia/generated/g_swintr_nvoc.c b/src/nvidia/generated/g_swintr_nvoc.c new file mode 100644 index 000000000..b1b7e5705 --- /dev/null +++ b/src/nvidia/generated/g_swintr_nvoc.c @@ -0,0 +1,401 @@ +#define NVOC_SWINTR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_swintr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x5ca633 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SwIntr; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJINTRABLE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +void __nvoc_init_SwIntr(SwIntr*, RmHalspecOwner* ); +void __nvoc_init_funcTable_SwIntr(SwIntr*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_SwIntr(SwIntr*, RmHalspecOwner* ); +void __nvoc_init_dataField_SwIntr(SwIntr*, RmHalspecOwner* ); +void __nvoc_dtor_SwIntr(SwIntr*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_SwIntr; + +static const struct NVOC_RTTI __nvoc_rtti_SwIntr_SwIntr = { + /*pClassDef=*/ &__nvoc_class_def_SwIntr, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_SwIntr, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_SwIntr_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SwIntr, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SwIntr_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SwIntr, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SwIntr_OBJINTRABLE = { + /*pClassDef=*/ &__nvoc_class_def_OBJINTRABLE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SwIntr, __nvoc_base_OBJINTRABLE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SwIntr_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SwIntr, __nvoc_base_IntrService), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_SwIntr = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_SwIntr_SwIntr, + &__nvoc_rtti_SwIntr_IntrService, + &__nvoc_rtti_SwIntr_OBJINTRABLE, + &__nvoc_rtti_SwIntr_OBJENGSTATE, + &__nvoc_rtti_SwIntr_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_SwIntr = +{ + /*classInfo=*/ { + /*size=*/ sizeof(SwIntr), + /*classId=*/ classId(SwIntr), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "SwIntr", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_SwIntr, + /*pCastInfo=*/ &__nvoc_castinfo_SwIntr, + /*pExportInfo=*/ &__nvoc_export_info_SwIntr +}; + +static void __nvoc_thunk_SwIntr_intrservRegisterIntrService(OBJGPU *pGpu, struct IntrService *pSwIntr, IntrServiceRecord pRecords[155]) { + swintrRegisterIntrService(pGpu, (struct SwIntr *)(((unsigned char *)pSwIntr) - __nvoc_rtti_SwIntr_IntrService.offset), pRecords); +} + +static NvU32 __nvoc_thunk_SwIntr_intrservServiceInterrupt(OBJGPU *pGpu, struct IntrService *pSwIntr, IntrServiceServiceInterruptArguments *pParams) { + return swintrServiceInterrupt(pGpu, (struct SwIntr *)(((unsigned char *)pSwIntr) - __nvoc_rtti_SwIntr_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_SwIntr_intrableGetKernelIntrVectors(OBJGPU *pGpu, struct OBJINTRABLE *pSwIntr, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return swintrGetKernelIntrVectors(pGpu, (struct SwIntr *)(((unsigned char *)pSwIntr) - __nvoc_rtti_SwIntr_OBJINTRABLE.offset), maxIntrs, pIntrs, pMcEngineIdxs, pCount); +} + +static void __nvoc_thunk_OBJENGSTATE_swintrStateDestroy(POBJGPU pGpu, struct SwIntr *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_swintrFreeTunableState(POBJGPU pGpu, struct SwIntr *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrCompareTunableState(POBJGPU pGpu, struct SwIntr *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static NvBool __nvoc_thunk_IntrService_swintrClearInterrupt(OBJGPU *pGpu, struct SwIntr *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_SwIntr_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJINTRABLE_swintrGetNotificationIntrVector(OBJGPU *pGpu, struct SwIntr *pIntrable, NvU32 *pIntrVector) { + return intrableGetNotificationIntrVector(pGpu, (struct OBJINTRABLE *)(((unsigned char *)pIntrable) + __nvoc_rtti_SwIntr_OBJINTRABLE.offset), pIntrVector); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_swintrIsPresent(POBJGPU pGpu, struct SwIntr *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrReconcileTunableState(POBJGPU pGpu, struct SwIntr *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrStateLoad(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJINTRABLE_swintrGetPhysicalIntrVectors(OBJGPU *pGpu, struct SwIntr *pIntrable, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return intrableGetPhysicalIntrVectors(pGpu, (struct OBJINTRABLE *)(((unsigned char *)pIntrable) + __nvoc_rtti_SwIntr_OBJINTRABLE.offset), maxIntrs, pIntrs, pMcEngineIdxs, pCount); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrStateUnload(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJINTRABLE_swintrSetNotificationIntrVector(OBJGPU *pGpu, struct SwIntr *pIntrable, NvU32 intrVector) { + return intrableSetNotificationIntrVector(pGpu, (struct OBJINTRABLE *)(((unsigned char *)pIntrable) + __nvoc_rtti_SwIntr_OBJINTRABLE.offset), intrVector); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrStateInitLocked(POBJGPU pGpu, struct SwIntr *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrStatePreLoad(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrStatePostUnload(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrStatePreUnload(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrGetTunableState(POBJGPU pGpu, struct SwIntr *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrStateInitUnlocked(POBJGPU pGpu, struct SwIntr *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_swintrInitMissing(POBJGPU pGpu, struct SwIntr *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrStatePreInitLocked(POBJGPU pGpu, struct SwIntr *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrStatePreInitUnlocked(POBJGPU pGpu, struct SwIntr *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_IntrService_swintrServiceNotificationInterrupt(OBJGPU *pGpu, struct SwIntr *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return intrservServiceNotificationInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_SwIntr_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrStatePostLoad(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrAllocTunableState(POBJGPU pGpu, struct SwIntr *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrSetTunableState(POBJGPU pGpu, struct SwIntr *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_swintrConstructEngine(POBJGPU pGpu, struct SwIntr *pEngstate, ENGDESCRIPTOR arg0) { + return engstateConstructEngine(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_SwIntr_OBJENGSTATE.offset), arg0); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_SwIntr = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_OBJINTRABLE(OBJINTRABLE*); +void __nvoc_dtor_IntrService(IntrService*); +void __nvoc_dtor_SwIntr(SwIntr *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_dtor_OBJINTRABLE(&pThis->__nvoc_base_OBJINTRABLE); + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_SwIntr(SwIntr *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJINTRABLE(OBJINTRABLE* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_IntrService(IntrService* , RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_SwIntr(SwIntr *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_SwIntr_fail_OBJENGSTATE; + status = __nvoc_ctor_OBJINTRABLE(&pThis->__nvoc_base_OBJINTRABLE, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_SwIntr_fail_OBJINTRABLE; + status = __nvoc_ctor_IntrService(&pThis->__nvoc_base_IntrService, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_ctor_SwIntr_fail_IntrService; + __nvoc_init_dataField_SwIntr(pThis, pRmhalspecowner); + goto __nvoc_ctor_SwIntr_exit; // Success + +__nvoc_ctor_SwIntr_fail_IntrService: + __nvoc_dtor_OBJINTRABLE(&pThis->__nvoc_base_OBJINTRABLE); +__nvoc_ctor_SwIntr_fail_OBJINTRABLE: + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); +__nvoc_ctor_SwIntr_fail_OBJENGSTATE: +__nvoc_ctor_SwIntr_exit: + + return status; +} + +static void __nvoc_init_funcTable_SwIntr_1(SwIntr *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__swintrRegisterIntrService__ = &swintrRegisterIntrService_IMPL; + + pThis->__swintrServiceInterrupt__ = &swintrServiceInterrupt_IMPL; + + // Hal function -- swintrGetKernelIntrVectors + if (0) + { + } + // default + else + { + pThis->__swintrGetKernelIntrVectors__ = &swintrGetKernelIntrVectors_46f6a7; + } + + pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_SwIntr_intrservRegisterIntrService; + + pThis->__nvoc_base_IntrService.__intrservServiceInterrupt__ = &__nvoc_thunk_SwIntr_intrservServiceInterrupt; + + pThis->__nvoc_base_OBJINTRABLE.__intrableGetKernelIntrVectors__ = &__nvoc_thunk_SwIntr_intrableGetKernelIntrVectors; + + pThis->__swintrStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_swintrStateDestroy; + + pThis->__swintrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_swintrFreeTunableState; + + pThis->__swintrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_swintrCompareTunableState; + + pThis->__swintrClearInterrupt__ = &__nvoc_thunk_IntrService_swintrClearInterrupt; + + pThis->__swintrGetNotificationIntrVector__ = &__nvoc_thunk_OBJINTRABLE_swintrGetNotificationIntrVector; + + pThis->__swintrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_swintrIsPresent; + + pThis->__swintrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_swintrReconcileTunableState; + + pThis->__swintrStateLoad__ = &__nvoc_thunk_OBJENGSTATE_swintrStateLoad; + + pThis->__swintrGetPhysicalIntrVectors__ = &__nvoc_thunk_OBJINTRABLE_swintrGetPhysicalIntrVectors; + + pThis->__swintrStateUnload__ = &__nvoc_thunk_OBJENGSTATE_swintrStateUnload; + + pThis->__swintrSetNotificationIntrVector__ = &__nvoc_thunk_OBJINTRABLE_swintrSetNotificationIntrVector; + + pThis->__swintrStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_swintrStateInitLocked; + + pThis->__swintrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_swintrStatePreLoad; + + pThis->__swintrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_swintrStatePostUnload; + + pThis->__swintrStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_swintrStatePreUnload; + + pThis->__swintrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_swintrGetTunableState; + + pThis->__swintrStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_swintrStateInitUnlocked; + + pThis->__swintrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_swintrInitMissing; + + pThis->__swintrStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_swintrStatePreInitLocked; + + pThis->__swintrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_swintrStatePreInitUnlocked; + + pThis->__swintrServiceNotificationInterrupt__ = &__nvoc_thunk_IntrService_swintrServiceNotificationInterrupt; + + pThis->__swintrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_swintrStatePostLoad; + + pThis->__swintrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_swintrAllocTunableState; + + pThis->__swintrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_swintrSetTunableState; + + pThis->__swintrConstructEngine__ = &__nvoc_thunk_OBJENGSTATE_swintrConstructEngine; +} + +void __nvoc_init_funcTable_SwIntr(SwIntr *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_SwIntr_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*, RmHalspecOwner* ); +void __nvoc_init_OBJINTRABLE(OBJINTRABLE*, RmHalspecOwner* ); +void __nvoc_init_IntrService(IntrService*, RmHalspecOwner* ); +void __nvoc_init_SwIntr(SwIntr *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_SwIntr = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + pThis->__nvoc_pbase_OBJINTRABLE = &pThis->__nvoc_base_OBJINTRABLE; + pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE, pRmhalspecowner); + __nvoc_init_OBJINTRABLE(&pThis->__nvoc_base_OBJINTRABLE, pRmhalspecowner); + __nvoc_init_IntrService(&pThis->__nvoc_base_IntrService, pRmhalspecowner); + __nvoc_init_funcTable_SwIntr(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_SwIntr(SwIntr **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + SwIntr *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(SwIntr)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(SwIntr)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_SwIntr); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_SwIntr(pThis, pRmhalspecowner); + status = __nvoc_ctor_SwIntr(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_SwIntr_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_SwIntr_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_SwIntr(SwIntr **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_SwIntr(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_swintr_nvoc.h b/src/nvidia/generated/g_swintr_nvoc.h new file mode 100644 index 000000000..f3b9129b4 --- /dev/null +++ b/src/nvidia/generated/g_swintr_nvoc.h @@ -0,0 +1,280 @@ +#ifndef _G_SWINTR_NVOC_H_ +#define _G_SWINTR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_swintr_nvoc.h" + +#ifndef SWINTR_H +#define SWINTR_H + +/*! + * @file swintr.h + * @brief This class exists to give an OBJENGSTATE responsible for the doorbell interrupts. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/eng_state.h" +#include "kernel/gpu/intr/intr_service.h" +#include "kernel/gpu/intrable/intrable.h" + +#ifdef NVOC_SWINTR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct SwIntr { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct OBJINTRABLE __nvoc_base_OBJINTRABLE; + struct IntrService __nvoc_base_IntrService; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct OBJINTRABLE *__nvoc_pbase_OBJINTRABLE; + struct IntrService *__nvoc_pbase_IntrService; + struct SwIntr *__nvoc_pbase_SwIntr; + void (*__swintrRegisterIntrService__)(OBJGPU *, struct SwIntr *, IntrServiceRecord *); + NvU32 (*__swintrServiceInterrupt__)(OBJGPU *, struct SwIntr *, IntrServiceServiceInterruptArguments *); + NV_STATUS (*__swintrGetKernelIntrVectors__)(OBJGPU *, struct SwIntr *, NvU32, NvU32 *, NvU32 *, NvU32 *); + void (*__swintrStateDestroy__)(POBJGPU, struct SwIntr *); + void (*__swintrFreeTunableState__)(POBJGPU, struct SwIntr *, void *); + NV_STATUS (*__swintrCompareTunableState__)(POBJGPU, struct SwIntr *, void *, void *); + NvBool (*__swintrClearInterrupt__)(OBJGPU *, struct SwIntr *, IntrServiceClearInterruptArguments *); + NV_STATUS (*__swintrGetNotificationIntrVector__)(OBJGPU *, struct SwIntr *, NvU32 *); + NvBool (*__swintrIsPresent__)(POBJGPU, struct SwIntr *); + NV_STATUS (*__swintrReconcileTunableState__)(POBJGPU, struct SwIntr *, void *); + NV_STATUS (*__swintrStateLoad__)(POBJGPU, struct SwIntr *, NvU32); + NV_STATUS (*__swintrGetPhysicalIntrVectors__)(OBJGPU *, struct SwIntr *, NvU32, NvU32 *, NvU32 *, NvU32 *); + NV_STATUS (*__swintrStateUnload__)(POBJGPU, struct SwIntr *, NvU32); + NV_STATUS (*__swintrSetNotificationIntrVector__)(OBJGPU *, struct SwIntr *, NvU32); + NV_STATUS (*__swintrStateInitLocked__)(POBJGPU, struct SwIntr *); + NV_STATUS (*__swintrStatePreLoad__)(POBJGPU, struct SwIntr *, NvU32); + NV_STATUS (*__swintrStatePostUnload__)(POBJGPU, struct SwIntr *, NvU32); + NV_STATUS (*__swintrStatePreUnload__)(POBJGPU, struct SwIntr *, NvU32); + NV_STATUS (*__swintrGetTunableState__)(POBJGPU, struct SwIntr *, void *); + NV_STATUS (*__swintrStateInitUnlocked__)(POBJGPU, struct SwIntr *); + void (*__swintrInitMissing__)(POBJGPU, struct SwIntr *); + NV_STATUS (*__swintrStatePreInitLocked__)(POBJGPU, struct SwIntr *); + NV_STATUS (*__swintrStatePreInitUnlocked__)(POBJGPU, struct SwIntr *); + NV_STATUS (*__swintrServiceNotificationInterrupt__)(OBJGPU *, struct SwIntr *, IntrServiceServiceNotificationInterruptArguments *); + NV_STATUS (*__swintrStatePostLoad__)(POBJGPU, struct SwIntr *, NvU32); + NV_STATUS (*__swintrAllocTunableState__)(POBJGPU, struct SwIntr *, void **); + NV_STATUS (*__swintrSetTunableState__)(POBJGPU, struct SwIntr *, void *); + NV_STATUS (*__swintrConstructEngine__)(POBJGPU, struct SwIntr *, ENGDESCRIPTOR); +}; + +#ifndef __NVOC_CLASS_SwIntr_TYPEDEF__ +#define __NVOC_CLASS_SwIntr_TYPEDEF__ +typedef struct SwIntr SwIntr; +#endif /* __NVOC_CLASS_SwIntr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SwIntr +#define __nvoc_class_id_SwIntr 0x5ca633 +#endif /* __nvoc_class_id_SwIntr */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SwIntr; + +#define __staticCast_SwIntr(pThis) \ + ((pThis)->__nvoc_pbase_SwIntr) + +#ifdef __nvoc_swintr_h_disabled +#define __dynamicCast_SwIntr(pThis) ((SwIntr*)NULL) +#else //__nvoc_swintr_h_disabled +#define __dynamicCast_SwIntr(pThis) \ + ((SwIntr*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(SwIntr))) +#endif //__nvoc_swintr_h_disabled + +#define PDB_PROP_SWINTR_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_SWINTR_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_SwIntr(SwIntr**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_SwIntr(SwIntr**, Dynamic*, NvU32); +#define __objCreate_SwIntr(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_SwIntr((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define swintrRegisterIntrService(pGpu, pSwIntr, pRecords) swintrRegisterIntrService_DISPATCH(pGpu, pSwIntr, pRecords) +#define swintrServiceInterrupt(pGpu, pSwIntr, pParams) swintrServiceInterrupt_DISPATCH(pGpu, pSwIntr, pParams) +#define swintrGetKernelIntrVectors(pGpu, pSwIntr, maxIntrs, pIntrs, pMcEngineIdxs, pCount) swintrGetKernelIntrVectors_DISPATCH(pGpu, pSwIntr, maxIntrs, pIntrs, pMcEngineIdxs, pCount) +#define swintrGetKernelIntrVectors_HAL(pGpu, pSwIntr, maxIntrs, pIntrs, pMcEngineIdxs, pCount) swintrGetKernelIntrVectors_DISPATCH(pGpu, pSwIntr, maxIntrs, pIntrs, pMcEngineIdxs, pCount) +#define swintrStateDestroy(pGpu, pEngstate) swintrStateDestroy_DISPATCH(pGpu, pEngstate) +#define swintrFreeTunableState(pGpu, pEngstate, pTunableState) swintrFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define swintrCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) swintrCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define swintrClearInterrupt(pGpu, pIntrService, pParams) swintrClearInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define swintrGetNotificationIntrVector(pGpu, pIntrable, pIntrVector) swintrGetNotificationIntrVector_DISPATCH(pGpu, pIntrable, pIntrVector) +#define swintrIsPresent(pGpu, pEngstate) swintrIsPresent_DISPATCH(pGpu, pEngstate) +#define swintrReconcileTunableState(pGpu, pEngstate, pTunableState) swintrReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define swintrStateLoad(pGpu, pEngstate, arg0) swintrStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define swintrGetPhysicalIntrVectors(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount) swintrGetPhysicalIntrVectors_DISPATCH(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount) +#define swintrStateUnload(pGpu, pEngstate, arg0) swintrStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define swintrSetNotificationIntrVector(pGpu, pIntrable, intrVector) swintrSetNotificationIntrVector_DISPATCH(pGpu, pIntrable, intrVector) +#define swintrStateInitLocked(pGpu, pEngstate) swintrStateInitLocked_DISPATCH(pGpu, pEngstate) +#define swintrStatePreLoad(pGpu, pEngstate, arg0) swintrStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define swintrStatePostUnload(pGpu, pEngstate, arg0) swintrStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define swintrStatePreUnload(pGpu, pEngstate, arg0) swintrStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define swintrGetTunableState(pGpu, pEngstate, pTunableState) swintrGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define swintrStateInitUnlocked(pGpu, pEngstate) swintrStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define swintrInitMissing(pGpu, pEngstate) swintrInitMissing_DISPATCH(pGpu, pEngstate) +#define swintrStatePreInitLocked(pGpu, pEngstate) swintrStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define swintrStatePreInitUnlocked(pGpu, pEngstate) swintrStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define swintrServiceNotificationInterrupt(pGpu, pIntrService, pParams) swintrServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define swintrStatePostLoad(pGpu, pEngstate, arg0) swintrStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define swintrAllocTunableState(pGpu, pEngstate, ppTunableState) swintrAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define swintrSetTunableState(pGpu, pEngstate, pTunableState) swintrSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define swintrConstructEngine(pGpu, pEngstate, arg0) swintrConstructEngine_DISPATCH(pGpu, pEngstate, arg0) +void swintrRegisterIntrService_IMPL(OBJGPU *pGpu, struct SwIntr *pSwIntr, IntrServiceRecord pRecords[155]); + +static inline void swintrRegisterIntrService_DISPATCH(OBJGPU *pGpu, struct SwIntr *pSwIntr, IntrServiceRecord pRecords[155]) { + pSwIntr->__swintrRegisterIntrService__(pGpu, pSwIntr, pRecords); +} + +NvU32 swintrServiceInterrupt_IMPL(OBJGPU *pGpu, struct SwIntr *pSwIntr, IntrServiceServiceInterruptArguments *pParams); + +static inline NvU32 swintrServiceInterrupt_DISPATCH(OBJGPU *pGpu, struct SwIntr *pSwIntr, IntrServiceServiceInterruptArguments *pParams) { + return pSwIntr->__swintrServiceInterrupt__(pGpu, pSwIntr, pParams); +} + +static inline NV_STATUS swintrGetKernelIntrVectors_46f6a7(OBJGPU *pGpu, struct SwIntr *pSwIntr, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS swintrGetKernelIntrVectors_DISPATCH(OBJGPU *pGpu, struct SwIntr *pSwIntr, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return pSwIntr->__swintrGetKernelIntrVectors__(pGpu, pSwIntr, maxIntrs, pIntrs, pMcEngineIdxs, pCount); +} + +static inline void swintrStateDestroy_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate) { + pEngstate->__swintrStateDestroy__(pGpu, pEngstate); +} + +static inline void swintrFreeTunableState_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, void *pTunableState) { + pEngstate->__swintrFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS swintrCompareTunableState_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__swintrCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline NvBool swintrClearInterrupt_DISPATCH(OBJGPU *pGpu, struct SwIntr *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return pIntrService->__swintrClearInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS swintrGetNotificationIntrVector_DISPATCH(OBJGPU *pGpu, struct SwIntr *pIntrable, NvU32 *pIntrVector) { + return pIntrable->__swintrGetNotificationIntrVector__(pGpu, pIntrable, pIntrVector); +} + +static inline NvBool swintrIsPresent_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate) { + return pEngstate->__swintrIsPresent__(pGpu, pEngstate); +} + +static inline NV_STATUS swintrReconcileTunableState_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, void *pTunableState) { + return pEngstate->__swintrReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS swintrStateLoad_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return pEngstate->__swintrStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swintrGetPhysicalIntrVectors_DISPATCH(OBJGPU *pGpu, struct SwIntr *pIntrable, NvU32 maxIntrs, NvU32 *pIntrs, NvU32 *pMcEngineIdxs, NvU32 *pCount) { + return pIntrable->__swintrGetPhysicalIntrVectors__(pGpu, pIntrable, maxIntrs, pIntrs, pMcEngineIdxs, pCount); +} + +static inline NV_STATUS swintrStateUnload_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return pEngstate->__swintrStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swintrSetNotificationIntrVector_DISPATCH(OBJGPU *pGpu, struct SwIntr *pIntrable, NvU32 intrVector) { + return pIntrable->__swintrSetNotificationIntrVector__(pGpu, pIntrable, intrVector); +} + +static inline NV_STATUS swintrStateInitLocked_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate) { + return pEngstate->__swintrStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS swintrStatePreLoad_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return pEngstate->__swintrStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swintrStatePostUnload_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return pEngstate->__swintrStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swintrStatePreUnload_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return pEngstate->__swintrStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swintrGetTunableState_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, void *pTunableState) { + return pEngstate->__swintrGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS swintrStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate) { + return pEngstate->__swintrStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void swintrInitMissing_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate) { + pEngstate->__swintrInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS swintrStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate) { + return pEngstate->__swintrStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS swintrStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate) { + return pEngstate->__swintrStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS swintrServiceNotificationInterrupt_DISPATCH(OBJGPU *pGpu, struct SwIntr *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return pIntrService->__swintrServiceNotificationInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS swintrStatePostLoad_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, NvU32 arg0) { + return pEngstate->__swintrStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS swintrAllocTunableState_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, void **ppTunableState) { + return pEngstate->__swintrAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS swintrSetTunableState_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, void *pTunableState) { + return pEngstate->__swintrSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS swintrConstructEngine_DISPATCH(POBJGPU pGpu, struct SwIntr *pEngstate, ENGDESCRIPTOR arg0) { + return pEngstate->__swintrConstructEngine__(pGpu, pEngstate, arg0); +} + +#undef PRIVATE_FIELD + + +#endif // SWINTR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SWINTR_NVOC_H_ diff --git a/src/nvidia/generated/g_syncgpuboost_nvoc.c b/src/nvidia/generated/g_syncgpuboost_nvoc.c new file mode 100644 index 000000000..1f30c80d7 --- /dev/null +++ b/src/nvidia/generated/g_syncgpuboost_nvoc.c @@ -0,0 +1,294 @@ +#define NVOC_SYNCGPUBOOST_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_syncgpuboost_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xc7e30b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SyncGpuBoost; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +void __nvoc_init_SyncGpuBoost(SyncGpuBoost*); +void __nvoc_init_funcTable_SyncGpuBoost(SyncGpuBoost*); +NV_STATUS __nvoc_ctor_SyncGpuBoost(SyncGpuBoost*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_SyncGpuBoost(SyncGpuBoost*); +void __nvoc_dtor_SyncGpuBoost(SyncGpuBoost*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_SyncGpuBoost; + +static const struct NVOC_RTTI __nvoc_rtti_SyncGpuBoost_SyncGpuBoost = { + /*pClassDef=*/ &__nvoc_class_def_SyncGpuBoost, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_SyncGpuBoost, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_SyncGpuBoost_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SyncGpuBoost, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SyncGpuBoost_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SyncGpuBoost, __nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SyncGpuBoost_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SyncGpuBoost, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SyncGpuBoost_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SyncGpuBoost, __nvoc_base_RmResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_SyncGpuBoost = { + /*numRelatives=*/ 5, + /*relatives=*/ { + &__nvoc_rtti_SyncGpuBoost_SyncGpuBoost, + &__nvoc_rtti_SyncGpuBoost_RmResource, + &__nvoc_rtti_SyncGpuBoost_RmResourceCommon, + &__nvoc_rtti_SyncGpuBoost_RsResource, + &__nvoc_rtti_SyncGpuBoost_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_SyncGpuBoost = +{ + /*classInfo=*/ { + /*size=*/ sizeof(SyncGpuBoost), + /*classId=*/ classId(SyncGpuBoost), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "SyncGpuBoost", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_SyncGpuBoost, + /*pCastInfo=*/ &__nvoc_castinfo_SyncGpuBoost, + /*pExportInfo=*/ &__nvoc_export_info_SyncGpuBoost +}; + +static NvBool __nvoc_thunk_RmResource_syncgpuboostShareCallback(struct SyncGpuBoost *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RmResource_syncgpuboostCheckMemInterUnmap(struct SyncGpuBoost *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_SyncGpuBoost_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncgpuboostControl(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_syncgpuboostGetMemInterMapParams(struct SyncGpuBoost *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_SyncGpuBoost_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_syncgpuboostGetMemoryMappingDescriptor(struct SyncGpuBoost *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_SyncGpuBoost_RmResource.offset), ppMemDesc); +} + +static NvU32 __nvoc_thunk_RsResource_syncgpuboostGetRefCount(struct SyncGpuBoost *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncgpuboostControlFilter(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_syncgpuboostAddAdditionalDependants(struct RsClient *pClient, struct SyncGpuBoost *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncgpuboostUnmap(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_syncgpuboostControl_Prologue(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_RsResource_syncgpuboostCanCopy(struct SyncGpuBoost *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncgpuboostMapTo(struct SyncGpuBoost *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RsResource_syncgpuboostPreDestruct(struct SyncGpuBoost *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncgpuboostUnmapFrom(struct SyncGpuBoost *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_syncgpuboostControl_Epilogue(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncgpuboostControlLookup(struct SyncGpuBoost *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_RsResource_syncgpuboostMap(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RsResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_syncgpuboostAccessCallback(struct SyncGpuBoost *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SyncGpuBoost_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_SyncGpuBoost = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_RmResource(RmResource*); +void __nvoc_dtor_SyncGpuBoost(SyncGpuBoost *pThis) { + __nvoc_syncgpuboostDestruct(pThis); + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_SyncGpuBoost(SyncGpuBoost *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_SyncGpuBoost(SyncGpuBoost *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SyncGpuBoost_fail_RmResource; + __nvoc_init_dataField_SyncGpuBoost(pThis); + + status = __nvoc_syncgpuboostConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SyncGpuBoost_fail__init; + goto __nvoc_ctor_SyncGpuBoost_exit; // Success + +__nvoc_ctor_SyncGpuBoost_fail__init: + __nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource); +__nvoc_ctor_SyncGpuBoost_fail_RmResource: +__nvoc_ctor_SyncGpuBoost_exit: + + return status; +} + +static void __nvoc_init_funcTable_SyncGpuBoost_1(SyncGpuBoost *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__syncgpuboostShareCallback__ = &__nvoc_thunk_RmResource_syncgpuboostShareCallback; + + pThis->__syncgpuboostCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_syncgpuboostCheckMemInterUnmap; + + pThis->__syncgpuboostControl__ = &__nvoc_thunk_RsResource_syncgpuboostControl; + + pThis->__syncgpuboostGetMemInterMapParams__ = &__nvoc_thunk_RmResource_syncgpuboostGetMemInterMapParams; + + pThis->__syncgpuboostGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_syncgpuboostGetMemoryMappingDescriptor; + + pThis->__syncgpuboostGetRefCount__ = &__nvoc_thunk_RsResource_syncgpuboostGetRefCount; + + pThis->__syncgpuboostControlFilter__ = &__nvoc_thunk_RsResource_syncgpuboostControlFilter; + + pThis->__syncgpuboostAddAdditionalDependants__ = &__nvoc_thunk_RsResource_syncgpuboostAddAdditionalDependants; + + pThis->__syncgpuboostUnmap__ = &__nvoc_thunk_RsResource_syncgpuboostUnmap; + + pThis->__syncgpuboostControl_Prologue__ = &__nvoc_thunk_RmResource_syncgpuboostControl_Prologue; + + pThis->__syncgpuboostCanCopy__ = &__nvoc_thunk_RsResource_syncgpuboostCanCopy; + + pThis->__syncgpuboostMapTo__ = &__nvoc_thunk_RsResource_syncgpuboostMapTo; + + pThis->__syncgpuboostPreDestruct__ = &__nvoc_thunk_RsResource_syncgpuboostPreDestruct; + + pThis->__syncgpuboostUnmapFrom__ = &__nvoc_thunk_RsResource_syncgpuboostUnmapFrom; + + pThis->__syncgpuboostControl_Epilogue__ = &__nvoc_thunk_RmResource_syncgpuboostControl_Epilogue; + + pThis->__syncgpuboostControlLookup__ = &__nvoc_thunk_RsResource_syncgpuboostControlLookup; + + pThis->__syncgpuboostMap__ = &__nvoc_thunk_RsResource_syncgpuboostMap; + + pThis->__syncgpuboostAccessCallback__ = &__nvoc_thunk_RmResource_syncgpuboostAccessCallback; +} + +void __nvoc_init_funcTable_SyncGpuBoost(SyncGpuBoost *pThis) { + __nvoc_init_funcTable_SyncGpuBoost_1(pThis); +} + +void __nvoc_init_RmResource(RmResource*); +void __nvoc_init_SyncGpuBoost(SyncGpuBoost *pThis) { + pThis->__nvoc_pbase_SyncGpuBoost = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource; + __nvoc_init_RmResource(&pThis->__nvoc_base_RmResource); + __nvoc_init_funcTable_SyncGpuBoost(pThis); +} + +NV_STATUS __nvoc_objCreate_SyncGpuBoost(SyncGpuBoost **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + SyncGpuBoost *pThis; + + pThis = portMemAllocNonPaged(sizeof(SyncGpuBoost)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(SyncGpuBoost)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_SyncGpuBoost); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_SyncGpuBoost(pThis); + status = __nvoc_ctor_SyncGpuBoost(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_SyncGpuBoost_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_SyncGpuBoost_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_SyncGpuBoost(SyncGpuBoost **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_SyncGpuBoost(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_syncgpuboost_nvoc.h b/src/nvidia/generated/g_syncgpuboost_nvoc.h new file mode 100644 index 000000000..1fb151dbc --- /dev/null +++ b/src/nvidia/generated/g_syncgpuboost_nvoc.h @@ -0,0 +1,206 @@ +#ifndef _G_SYNCGPUBOOST_NVOC_H_ +#define _G_SYNCGPUBOOST_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_syncgpuboost_nvoc.h" + +#ifndef _SYNCGPUBOOST_H_ +#define _SYNCGPUBOOST_H_ + +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "rmapi/resource.h" +#include "class/cl0060.h" + +#ifdef NVOC_SYNCGPUBOOST_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct SyncGpuBoost { + const struct NVOC_RTTI *__nvoc_rtti; + struct RmResource __nvoc_base_RmResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct SyncGpuBoost *__nvoc_pbase_SyncGpuBoost; + NvBool (*__syncgpuboostShareCallback__)(struct SyncGpuBoost *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__syncgpuboostCheckMemInterUnmap__)(struct SyncGpuBoost *, NvBool); + NV_STATUS (*__syncgpuboostControl__)(struct SyncGpuBoost *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__syncgpuboostGetMemInterMapParams__)(struct SyncGpuBoost *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__syncgpuboostGetMemoryMappingDescriptor__)(struct SyncGpuBoost *, struct MEMORY_DESCRIPTOR **); + NvU32 (*__syncgpuboostGetRefCount__)(struct SyncGpuBoost *); + NV_STATUS (*__syncgpuboostControlFilter__)(struct SyncGpuBoost *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__syncgpuboostAddAdditionalDependants__)(struct RsClient *, struct SyncGpuBoost *, RsResourceRef *); + NV_STATUS (*__syncgpuboostUnmap__)(struct SyncGpuBoost *, struct CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__syncgpuboostControl_Prologue__)(struct SyncGpuBoost *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__syncgpuboostCanCopy__)(struct SyncGpuBoost *); + NV_STATUS (*__syncgpuboostMapTo__)(struct SyncGpuBoost *, RS_RES_MAP_TO_PARAMS *); + void (*__syncgpuboostPreDestruct__)(struct SyncGpuBoost *); + NV_STATUS (*__syncgpuboostUnmapFrom__)(struct SyncGpuBoost *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__syncgpuboostControl_Epilogue__)(struct SyncGpuBoost *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__syncgpuboostControlLookup__)(struct SyncGpuBoost *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__syncgpuboostMap__)(struct SyncGpuBoost *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__syncgpuboostAccessCallback__)(struct SyncGpuBoost *, struct RsClient *, void *, RsAccessRight); + NvU32 gpuBoostGroupId; +}; + +#ifndef __NVOC_CLASS_SyncGpuBoost_TYPEDEF__ +#define __NVOC_CLASS_SyncGpuBoost_TYPEDEF__ +typedef struct SyncGpuBoost SyncGpuBoost; +#endif /* __NVOC_CLASS_SyncGpuBoost_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SyncGpuBoost +#define __nvoc_class_id_SyncGpuBoost 0xc7e30b +#endif /* __nvoc_class_id_SyncGpuBoost */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SyncGpuBoost; + +#define __staticCast_SyncGpuBoost(pThis) \ + ((pThis)->__nvoc_pbase_SyncGpuBoost) + +#ifdef __nvoc_syncgpuboost_h_disabled +#define __dynamicCast_SyncGpuBoost(pThis) ((SyncGpuBoost*)NULL) +#else //__nvoc_syncgpuboost_h_disabled +#define __dynamicCast_SyncGpuBoost(pThis) \ + ((SyncGpuBoost*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(SyncGpuBoost))) +#endif //__nvoc_syncgpuboost_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_SyncGpuBoost(SyncGpuBoost**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_SyncGpuBoost(SyncGpuBoost**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_SyncGpuBoost(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_SyncGpuBoost((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define syncgpuboostShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) syncgpuboostShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define syncgpuboostCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) syncgpuboostCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define syncgpuboostControl(pResource, pCallContext, pParams) syncgpuboostControl_DISPATCH(pResource, pCallContext, pParams) +#define syncgpuboostGetMemInterMapParams(pRmResource, pParams) syncgpuboostGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define syncgpuboostGetMemoryMappingDescriptor(pRmResource, ppMemDesc) syncgpuboostGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define syncgpuboostGetRefCount(pResource) syncgpuboostGetRefCount_DISPATCH(pResource) +#define syncgpuboostControlFilter(pResource, pCallContext, pParams) syncgpuboostControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define syncgpuboostAddAdditionalDependants(pClient, pResource, pReference) syncgpuboostAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define syncgpuboostUnmap(pResource, pCallContext, pCpuMapping) syncgpuboostUnmap_DISPATCH(pResource, pCallContext, pCpuMapping) +#define syncgpuboostControl_Prologue(pResource, pCallContext, pParams) syncgpuboostControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define syncgpuboostCanCopy(pResource) syncgpuboostCanCopy_DISPATCH(pResource) +#define syncgpuboostMapTo(pResource, pParams) syncgpuboostMapTo_DISPATCH(pResource, pParams) +#define syncgpuboostPreDestruct(pResource) syncgpuboostPreDestruct_DISPATCH(pResource) +#define syncgpuboostUnmapFrom(pResource, pParams) syncgpuboostUnmapFrom_DISPATCH(pResource, pParams) +#define syncgpuboostControl_Epilogue(pResource, pCallContext, pParams) syncgpuboostControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define syncgpuboostControlLookup(pResource, pParams, ppEntry) syncgpuboostControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define syncgpuboostMap(pResource, pCallContext, pParams, pCpuMapping) syncgpuboostMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping) +#define syncgpuboostAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) syncgpuboostAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NvBool syncgpuboostShareCallback_DISPATCH(struct SyncGpuBoost *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__syncgpuboostShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS syncgpuboostCheckMemInterUnmap_DISPATCH(struct SyncGpuBoost *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__syncgpuboostCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS syncgpuboostControl_DISPATCH(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__syncgpuboostControl__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS syncgpuboostGetMemInterMapParams_DISPATCH(struct SyncGpuBoost *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__syncgpuboostGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS syncgpuboostGetMemoryMappingDescriptor_DISPATCH(struct SyncGpuBoost *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__syncgpuboostGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvU32 syncgpuboostGetRefCount_DISPATCH(struct SyncGpuBoost *pResource) { + return pResource->__syncgpuboostGetRefCount__(pResource); +} + +static inline NV_STATUS syncgpuboostControlFilter_DISPATCH(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__syncgpuboostControlFilter__(pResource, pCallContext, pParams); +} + +static inline void syncgpuboostAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct SyncGpuBoost *pResource, RsResourceRef *pReference) { + pResource->__syncgpuboostAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS syncgpuboostUnmap_DISPATCH(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pResource->__syncgpuboostUnmap__(pResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS syncgpuboostControl_Prologue_DISPATCH(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__syncgpuboostControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool syncgpuboostCanCopy_DISPATCH(struct SyncGpuBoost *pResource) { + return pResource->__syncgpuboostCanCopy__(pResource); +} + +static inline NV_STATUS syncgpuboostMapTo_DISPATCH(struct SyncGpuBoost *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__syncgpuboostMapTo__(pResource, pParams); +} + +static inline void syncgpuboostPreDestruct_DISPATCH(struct SyncGpuBoost *pResource) { + pResource->__syncgpuboostPreDestruct__(pResource); +} + +static inline NV_STATUS syncgpuboostUnmapFrom_DISPATCH(struct SyncGpuBoost *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__syncgpuboostUnmapFrom__(pResource, pParams); +} + +static inline void syncgpuboostControl_Epilogue_DISPATCH(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__syncgpuboostControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS syncgpuboostControlLookup_DISPATCH(struct SyncGpuBoost *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__syncgpuboostControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS syncgpuboostMap_DISPATCH(struct SyncGpuBoost *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pResource->__syncgpuboostMap__(pResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool syncgpuboostAccessCallback_DISPATCH(struct SyncGpuBoost *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__syncgpuboostAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS syncgpuboostConstruct_IMPL(struct SyncGpuBoost *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_syncgpuboostConstruct(arg_pResource, arg_pCallContext, arg_pParams) syncgpuboostConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void syncgpuboostDestruct_IMPL(struct SyncGpuBoost *pResource); +#define __nvoc_syncgpuboostDestruct(pResource) syncgpuboostDestruct_IMPL(pResource) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SYNCGPUBOOST_NVOC_H_ diff --git a/src/nvidia/generated/g_system_mem_nvoc.c b/src/nvidia/generated/g_system_mem_nvoc.c new file mode 100644 index 000000000..ddfa4c0c4 --- /dev/null +++ b/src/nvidia/generated/g_system_mem_nvoc.c @@ -0,0 +1,378 @@ +#define NVOC_SYSTEM_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_system_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x007a98 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SystemMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +void __nvoc_init_SystemMemory(SystemMemory*); +void __nvoc_init_funcTable_SystemMemory(SystemMemory*); +NV_STATUS __nvoc_ctor_SystemMemory(SystemMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_SystemMemory(SystemMemory*); +void __nvoc_dtor_SystemMemory(SystemMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_SystemMemory; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_SystemMemory = { + /*pClassDef=*/ &__nvoc_class_def_SystemMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_SystemMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory), +}; + +static const struct NVOC_RTTI __nvoc_rtti_SystemMemory_StandardMemory = { + /*pClassDef=*/ &__nvoc_class_def_StandardMemory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(SystemMemory, __nvoc_base_StandardMemory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_SystemMemory = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_SystemMemory_SystemMemory, + &__nvoc_rtti_SystemMemory_StandardMemory, + &__nvoc_rtti_SystemMemory_Memory, + &__nvoc_rtti_SystemMemory_RmResource, + &__nvoc_rtti_SystemMemory_RmResourceCommon, + &__nvoc_rtti_SystemMemory_RsResource, + &__nvoc_rtti_SystemMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_SystemMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(SystemMemory), + /*classId=*/ classId(SystemMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "SystemMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_SystemMemory, + /*pCastInfo=*/ &__nvoc_castinfo_SystemMemory, + /*pExportInfo=*/ &__nvoc_export_info_SystemMemory +}; + +static NV_STATUS __nvoc_thunk_Memory_sysmemCheckMemInterUnmap(struct SystemMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemControl(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemUnmap(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemGetMemInterMapParams(struct SystemMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemGetMemoryMappingDescriptor(struct SystemMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemGetMapAddrSpace(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_sysmemShareCallback(struct SystemMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_sysmemControlFilter(struct SystemMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_sysmemAddAdditionalDependants(struct RsClient *pClient, struct SystemMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_sysmemGetRefCount(struct SystemMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_sysmemMapTo(struct SystemMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_sysmemControl_Prologue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_StandardMemory_sysmemCanCopy(struct SystemMemory *pStandardMemory) { + return stdmemCanCopy((struct StandardMemory *)(((unsigned char *)pStandardMemory) + __nvoc_rtti_SystemMemory_StandardMemory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemIsReady(struct SystemMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemCheckCopyPermissions(struct SystemMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_sysmemPreDestruct(struct SystemMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_sysmemUnmapFrom(struct SystemMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_sysmemControl_Epilogue(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_sysmemControlLookup(struct SystemMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_sysmemMap(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_SystemMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_sysmemAccessCallback(struct SystemMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_SystemMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_SystemMemory[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3e0102u, + /*paramSize=*/ sizeof(NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_SystemMemory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "sysmemCtrlCmdGetSurfaceNumPhysPages" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) sysmemCtrlCmdGetSurfacePhysPages_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x3e0103u, + /*paramSize=*/ sizeof(NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_SystemMemory.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "sysmemCtrlCmdGetSurfacePhysPages" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_SystemMemory = +{ + /*numEntries=*/ 2, + /*pExportEntries=*/ __nvoc_exported_method_def_SystemMemory +}; + +void __nvoc_dtor_StandardMemory(StandardMemory*); +void __nvoc_dtor_SystemMemory(SystemMemory *pThis) { + __nvoc_dtor_StandardMemory(&pThis->__nvoc_base_StandardMemory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_SystemMemory(SystemMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_SystemMemory(SystemMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_StandardMemory(&pThis->__nvoc_base_StandardMemory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SystemMemory_fail_StandardMemory; + __nvoc_init_dataField_SystemMemory(pThis); + + status = __nvoc_sysmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_SystemMemory_fail__init; + goto __nvoc_ctor_SystemMemory_exit; // Success + +__nvoc_ctor_SystemMemory_fail__init: + __nvoc_dtor_StandardMemory(&pThis->__nvoc_base_StandardMemory); +__nvoc_ctor_SystemMemory_fail_StandardMemory: +__nvoc_ctor_SystemMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_SystemMemory_1(SystemMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__sysmemCtrlCmdGetSurfaceNumPhysPages__ = &sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__sysmemCtrlCmdGetSurfacePhysPages__ = &sysmemCtrlCmdGetSurfacePhysPages_IMPL; +#endif + + pThis->__sysmemCheckMemInterUnmap__ = &__nvoc_thunk_Memory_sysmemCheckMemInterUnmap; + + pThis->__sysmemControl__ = &__nvoc_thunk_Memory_sysmemControl; + + pThis->__sysmemUnmap__ = &__nvoc_thunk_Memory_sysmemUnmap; + + pThis->__sysmemGetMemInterMapParams__ = &__nvoc_thunk_Memory_sysmemGetMemInterMapParams; + + pThis->__sysmemGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_sysmemGetMemoryMappingDescriptor; + + pThis->__sysmemGetMapAddrSpace__ = &__nvoc_thunk_Memory_sysmemGetMapAddrSpace; + + pThis->__sysmemShareCallback__ = &__nvoc_thunk_RmResource_sysmemShareCallback; + + pThis->__sysmemControlFilter__ = &__nvoc_thunk_RsResource_sysmemControlFilter; + + pThis->__sysmemAddAdditionalDependants__ = &__nvoc_thunk_RsResource_sysmemAddAdditionalDependants; + + pThis->__sysmemGetRefCount__ = &__nvoc_thunk_RsResource_sysmemGetRefCount; + + pThis->__sysmemMapTo__ = &__nvoc_thunk_RsResource_sysmemMapTo; + + pThis->__sysmemControl_Prologue__ = &__nvoc_thunk_RmResource_sysmemControl_Prologue; + + pThis->__sysmemCanCopy__ = &__nvoc_thunk_StandardMemory_sysmemCanCopy; + + pThis->__sysmemIsReady__ = &__nvoc_thunk_Memory_sysmemIsReady; + + pThis->__sysmemCheckCopyPermissions__ = &__nvoc_thunk_Memory_sysmemCheckCopyPermissions; + + pThis->__sysmemPreDestruct__ = &__nvoc_thunk_RsResource_sysmemPreDestruct; + + pThis->__sysmemUnmapFrom__ = &__nvoc_thunk_RsResource_sysmemUnmapFrom; + + pThis->__sysmemControl_Epilogue__ = &__nvoc_thunk_RmResource_sysmemControl_Epilogue; + + pThis->__sysmemControlLookup__ = &__nvoc_thunk_RsResource_sysmemControlLookup; + + pThis->__sysmemMap__ = &__nvoc_thunk_Memory_sysmemMap; + + pThis->__sysmemAccessCallback__ = &__nvoc_thunk_RmResource_sysmemAccessCallback; +} + +void __nvoc_init_funcTable_SystemMemory(SystemMemory *pThis) { + __nvoc_init_funcTable_SystemMemory_1(pThis); +} + +void __nvoc_init_StandardMemory(StandardMemory*); +void __nvoc_init_SystemMemory(SystemMemory *pThis) { + pThis->__nvoc_pbase_SystemMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory; + pThis->__nvoc_pbase_StandardMemory = &pThis->__nvoc_base_StandardMemory; + __nvoc_init_StandardMemory(&pThis->__nvoc_base_StandardMemory); + __nvoc_init_funcTable_SystemMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_SystemMemory(SystemMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + SystemMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(SystemMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(SystemMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_SystemMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_SystemMemory(pThis); + status = __nvoc_ctor_SystemMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_SystemMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_SystemMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_SystemMemory(SystemMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_SystemMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_system_mem_nvoc.h b/src/nvidia/generated/g_system_mem_nvoc.h new file mode 100644 index 000000000..e099d44bb --- /dev/null +++ b/src/nvidia/generated/g_system_mem_nvoc.h @@ -0,0 +1,254 @@ +#ifndef _G_SYSTEM_MEM_NVOC_H_ +#define _G_SYSTEM_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_system_mem_nvoc.h" + +#ifndef _SYSTEM_MEMORY_H_ +#define _SYSTEM_MEMORY_H_ + +#include "mem_mgr/standard_mem.h" +#include "gpu/mem_mgr/heap_base.h" + +#ifdef NVOC_SYSTEM_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct SystemMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct StandardMemory __nvoc_base_StandardMemory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct StandardMemory *__nvoc_pbase_StandardMemory; + struct SystemMemory *__nvoc_pbase_SystemMemory; + NV_STATUS (*__sysmemCtrlCmdGetSurfaceNumPhysPages__)(struct SystemMemory *, NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *); + NV_STATUS (*__sysmemCtrlCmdGetSurfacePhysPages__)(struct SystemMemory *, NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *); + NV_STATUS (*__sysmemCheckMemInterUnmap__)(struct SystemMemory *, NvBool); + NV_STATUS (*__sysmemControl__)(struct SystemMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__sysmemUnmap__)(struct SystemMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__sysmemGetMemInterMapParams__)(struct SystemMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__sysmemGetMemoryMappingDescriptor__)(struct SystemMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__sysmemGetMapAddrSpace__)(struct SystemMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__sysmemShareCallback__)(struct SystemMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__sysmemControlFilter__)(struct SystemMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__sysmemAddAdditionalDependants__)(struct RsClient *, struct SystemMemory *, RsResourceRef *); + NvU32 (*__sysmemGetRefCount__)(struct SystemMemory *); + NV_STATUS (*__sysmemMapTo__)(struct SystemMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__sysmemControl_Prologue__)(struct SystemMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__sysmemCanCopy__)(struct SystemMemory *); + NV_STATUS (*__sysmemIsReady__)(struct SystemMemory *); + NV_STATUS (*__sysmemCheckCopyPermissions__)(struct SystemMemory *, struct OBJGPU *, NvHandle); + void (*__sysmemPreDestruct__)(struct SystemMemory *); + NV_STATUS (*__sysmemUnmapFrom__)(struct SystemMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__sysmemControl_Epilogue__)(struct SystemMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__sysmemControlLookup__)(struct SystemMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__sysmemMap__)(struct SystemMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__sysmemAccessCallback__)(struct SystemMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_SystemMemory_TYPEDEF__ +#define __NVOC_CLASS_SystemMemory_TYPEDEF__ +typedef struct SystemMemory SystemMemory; +#endif /* __NVOC_CLASS_SystemMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_SystemMemory +#define __nvoc_class_id_SystemMemory 0x007a98 +#endif /* __nvoc_class_id_SystemMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_SystemMemory; + +#define __staticCast_SystemMemory(pThis) \ + ((pThis)->__nvoc_pbase_SystemMemory) + +#ifdef __nvoc_system_mem_h_disabled +#define __dynamicCast_SystemMemory(pThis) ((SystemMemory*)NULL) +#else //__nvoc_system_mem_h_disabled +#define __dynamicCast_SystemMemory(pThis) \ + ((SystemMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(SystemMemory))) +#endif //__nvoc_system_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_SystemMemory(SystemMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_SystemMemory(SystemMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_SystemMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_SystemMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define sysmemCtrlCmdGetSurfaceNumPhysPages(pStandardMemory, pParams) sysmemCtrlCmdGetSurfaceNumPhysPages_DISPATCH(pStandardMemory, pParams) +#define sysmemCtrlCmdGetSurfacePhysPages(pStandardMemory, pParams) sysmemCtrlCmdGetSurfacePhysPages_DISPATCH(pStandardMemory, pParams) +#define sysmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) sysmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define sysmemControl(pMemory, pCallContext, pParams) sysmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define sysmemUnmap(pMemory, pCallContext, pCpuMapping) sysmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define sysmemGetMemInterMapParams(pMemory, pParams) sysmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define sysmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) sysmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define sysmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) sysmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define sysmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) sysmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define sysmemControlFilter(pResource, pCallContext, pParams) sysmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define sysmemAddAdditionalDependants(pClient, pResource, pReference) sysmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define sysmemGetRefCount(pResource) sysmemGetRefCount_DISPATCH(pResource) +#define sysmemMapTo(pResource, pParams) sysmemMapTo_DISPATCH(pResource, pParams) +#define sysmemControl_Prologue(pResource, pCallContext, pParams) sysmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define sysmemCanCopy(pStandardMemory) sysmemCanCopy_DISPATCH(pStandardMemory) +#define sysmemIsReady(pMemory) sysmemIsReady_DISPATCH(pMemory) +#define sysmemCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) sysmemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define sysmemPreDestruct(pResource) sysmemPreDestruct_DISPATCH(pResource) +#define sysmemUnmapFrom(pResource, pParams) sysmemUnmapFrom_DISPATCH(pResource, pParams) +#define sysmemControl_Epilogue(pResource, pCallContext, pParams) sysmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define sysmemControlLookup(pResource, pParams, ppEntry) sysmemControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define sysmemMap(pMemory, pCallContext, pParams, pCpuMapping) sysmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define sysmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) sysmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS sysmemInitAllocRequest_HMM(struct OBJGPU *pGpu, struct SystemMemory *pSystemMemory, MEMORY_ALLOCATION_REQUEST *pAllocRequest); + +#ifdef __nvoc_system_mem_h_disabled +static inline NV_STATUS sysmemInitAllocRequest(struct OBJGPU *pGpu, struct SystemMemory *pSystemMemory, MEMORY_ALLOCATION_REQUEST *pAllocRequest) { + NV_ASSERT_FAILED_PRECOMP("SystemMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_system_mem_h_disabled +#define sysmemInitAllocRequest(pGpu, pSystemMemory, pAllocRequest) sysmemInitAllocRequest_HMM(pGpu, pSystemMemory, pAllocRequest) +#endif //__nvoc_system_mem_h_disabled + +#define sysmemInitAllocRequest_HAL(pGpu, pSystemMemory, pAllocRequest) sysmemInitAllocRequest(pGpu, pSystemMemory, pAllocRequest) + +NV_STATUS sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *pParams); + +static inline NV_STATUS sysmemCtrlCmdGetSurfaceNumPhysPages_DISPATCH(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *pParams) { + return pStandardMemory->__sysmemCtrlCmdGetSurfaceNumPhysPages__(pStandardMemory, pParams); +} + +NV_STATUS sysmemCtrlCmdGetSurfacePhysPages_IMPL(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *pParams); + +static inline NV_STATUS sysmemCtrlCmdGetSurfacePhysPages_DISPATCH(struct SystemMemory *pStandardMemory, NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *pParams) { + return pStandardMemory->__sysmemCtrlCmdGetSurfacePhysPages__(pStandardMemory, pParams); +} + +static inline NV_STATUS sysmemCheckMemInterUnmap_DISPATCH(struct SystemMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__sysmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS sysmemControl_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__sysmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS sysmemUnmap_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__sysmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS sysmemGetMemInterMapParams_DISPATCH(struct SystemMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__sysmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS sysmemGetMemoryMappingDescriptor_DISPATCH(struct SystemMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__sysmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS sysmemGetMapAddrSpace_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__sysmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool sysmemShareCallback_DISPATCH(struct SystemMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__sysmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS sysmemControlFilter_DISPATCH(struct SystemMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__sysmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline void sysmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct SystemMemory *pResource, RsResourceRef *pReference) { + pResource->__sysmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 sysmemGetRefCount_DISPATCH(struct SystemMemory *pResource) { + return pResource->__sysmemGetRefCount__(pResource); +} + +static inline NV_STATUS sysmemMapTo_DISPATCH(struct SystemMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__sysmemMapTo__(pResource, pParams); +} + +static inline NV_STATUS sysmemControl_Prologue_DISPATCH(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__sysmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool sysmemCanCopy_DISPATCH(struct SystemMemory *pStandardMemory) { + return pStandardMemory->__sysmemCanCopy__(pStandardMemory); +} + +static inline NV_STATUS sysmemIsReady_DISPATCH(struct SystemMemory *pMemory) { + return pMemory->__sysmemIsReady__(pMemory); +} + +static inline NV_STATUS sysmemCheckCopyPermissions_DISPATCH(struct SystemMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__sysmemCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void sysmemPreDestruct_DISPATCH(struct SystemMemory *pResource) { + pResource->__sysmemPreDestruct__(pResource); +} + +static inline NV_STATUS sysmemUnmapFrom_DISPATCH(struct SystemMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__sysmemUnmapFrom__(pResource, pParams); +} + +static inline void sysmemControl_Epilogue_DISPATCH(struct SystemMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__sysmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS sysmemControlLookup_DISPATCH(struct SystemMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__sysmemControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS sysmemMap_DISPATCH(struct SystemMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__sysmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool sysmemAccessCallback_DISPATCH(struct SystemMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__sysmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS sysmemConstruct_IMPL(struct SystemMemory *arg_pStandardMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_sysmemConstruct(arg_pStandardMemory, arg_pCallContext, arg_pParams) sysmemConstruct_IMPL(arg_pStandardMemory, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +NV_STATUS sysmemAllocResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo, + struct SystemMemory *pSystemMemory); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SYSTEM_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_system_nvoc.c b/src/nvidia/generated/g_system_nvoc.c new file mode 100644 index 000000000..6b8117c52 --- /dev/null +++ b/src/nvidia/generated/g_system_nvoc.c @@ -0,0 +1,182 @@ +#define NVOC_SYSTEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_system_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x40e2c8 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSYS; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +void __nvoc_init_OBJSYS(OBJSYS*); +void __nvoc_init_funcTable_OBJSYS(OBJSYS*); +NV_STATUS __nvoc_ctor_OBJSYS(OBJSYS*); +void __nvoc_init_dataField_OBJSYS(OBJSYS*); +void __nvoc_dtor_OBJSYS(OBJSYS*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJSYS; + +static const struct NVOC_RTTI __nvoc_rtti_OBJSYS_OBJSYS = { + /*pClassDef=*/ &__nvoc_class_def_OBJSYS, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJSYS, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJSYS_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJSYS, __nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJSYS_OBJTRACEABLE = { + /*pClassDef=*/ &__nvoc_class_def_OBJTRACEABLE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJSYS, __nvoc_base_OBJTRACEABLE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJSYS = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_OBJSYS_OBJSYS, + &__nvoc_rtti_OBJSYS_OBJTRACEABLE, + &__nvoc_rtti_OBJSYS_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSYS = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJSYS), + /*classId=*/ classId(OBJSYS), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJSYS", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJSYS, + /*pCastInfo=*/ &__nvoc_castinfo_OBJSYS, + /*pExportInfo=*/ &__nvoc_export_info_OBJSYS +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJSYS = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_dtor_OBJSYS(OBJSYS *pThis) { + __nvoc_sysDestruct(pThis); + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJSYS(OBJSYS *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + pThis->setProperty(pThis, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, ((0) || (1))); + pThis->setProperty(pThis, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, ((1) && !0)); + pThis->setProperty(pThis, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, (0)); + pThis->setProperty(pThis, PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED, ((0) || (0))); + pThis->setProperty(pThis, PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED, (0)); + pThis->setProperty(pThis, PDB_PROP_SYS_PRIORITY_BOOST, (0)); + pThis->setProperty(pThis, PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US, 16 * 1000); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE* ); +NV_STATUS __nvoc_ctor_OBJSYS(OBJSYS *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJSYS_fail_Object; + status = __nvoc_ctor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + if (status != NV_OK) goto __nvoc_ctor_OBJSYS_fail_OBJTRACEABLE; + __nvoc_init_dataField_OBJSYS(pThis); + + status = __nvoc_sysConstruct(pThis); + if (status != NV_OK) goto __nvoc_ctor_OBJSYS_fail__init; + goto __nvoc_ctor_OBJSYS_exit; // Success + +__nvoc_ctor_OBJSYS_fail__init: + __nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); +__nvoc_ctor_OBJSYS_fail_OBJTRACEABLE: + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); +__nvoc_ctor_OBJSYS_fail_Object: +__nvoc_ctor_OBJSYS_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJSYS_1(OBJSYS *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__sysCaptureState__ = &sysCaptureState_IMPL; +} + +void __nvoc_init_funcTable_OBJSYS(OBJSYS *pThis) { + __nvoc_init_funcTable_OBJSYS_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_OBJSYS(OBJSYS *pThis) { + pThis->__nvoc_pbase_OBJSYS = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + pThis->__nvoc_pbase_OBJTRACEABLE = &pThis->__nvoc_base_OBJTRACEABLE; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE); + __nvoc_init_funcTable_OBJSYS(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJSYS(OBJSYS **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJSYS *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJSYS)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJSYS)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJSYS); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJSYS(pThis); + status = __nvoc_ctor_OBJSYS(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJSYS_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJSYS_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJSYS(OBJSYS **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJSYS(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_system_nvoc.h b/src/nvidia/generated/g_system_nvoc.h new file mode 100644 index 000000000..01bdecb06 --- /dev/null +++ b/src/nvidia/generated/g_system_nvoc.h @@ -0,0 +1,597 @@ +#ifndef _G_SYSTEM_NVOC_H_ +#define _G_SYSTEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_system_nvoc.h" + +#ifndef SYSTEM_H +#define SYSTEM_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for the System Object. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "nvlimits.h" // NV_MAX_DEVICES +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "diagnostics/traceable.h" +#include "nvCpuUuid.h" +#include "os/capability.h" +#include "containers/btree.h" + +#define SYS_GET_INSTANCE() (g_pSys) +#define SYS_GET_GPUMGR(p) ((p)->pGpuMgr) +#define SYS_GET_GSYNCMGR(p) ((p)->pGsyncMgr) +#define SYS_GET_VGPUMGR(p) ((p)->pVgpuMgr) +#define SYS_GET_KERNEL_VGPUMGR(p) (RMCFG_FEATURE_KERNEL_RM ? (p)->pKernelVgpuMgr : NULL) +#define SYS_GET_OS(p) sysGetOs((p)) +#define SYS_GET_PFM(p) ((p)->pPfm) +#define SYS_GET_CL(p) ((p)->pCl) +#define SYS_GET_SWINSTR(p) ((p)->pSwInstr) +#define SYS_GET_GPUACCT(p) ((p)->pGpuAcct) +#define SYS_GET_GPS(p) ((p)->pGps) +#define SYS_GET_RCDB(p) ((p)->pRcDB) +#define SYS_GET_VMM(p) (RMCFG_MODULE_VMM ? (p)->pVmm : NULL) +#define SYS_GET_HYPERVISOR(p) ((p)->pHypervisor) +#define SYS_GET_VRRMGR(p) ((p)->pVrrMgr) +#define SYS_GET_GPUBOOSTMGR(p) ((p)->pGpuBoostMgr) +#define SYS_GET_DISPMGR(p) ((p)->pDispMgr) +#define SYS_GET_FABRIC(p) ((p)->pFabric) +#define SYS_GET_GPUDB(p) ((p)->pGpuDb) +#define SYS_GET_HALMGR(p) ((p)->pHalMgr) + +#define IsMobile(p) 0 + +#define RM_ASSERT_BEHAVIOR_BUGCHECK_RELEASE (0x41) +#define RM_ASSERT_BEHAVIOR_BUGCHECK_DEVELOP (0x42) + +// +// OS defines (Windows flavor can be added later on) +// Unix flavor need to be sync with defines in file "nv.h" +// +#define OS_TYPE_LINUX 0x1 +#define OS_TYPE_FREEBSD 0x2 +#define OS_TYPE_SUNOS 0x3 +#define OS_TYPE_VMWARE 0x4 + +// Child class forward declarations. +struct OBJPFM; + +#ifndef __NVOC_CLASS_OBJPFM_TYPEDEF__ +#define __NVOC_CLASS_OBJPFM_TYPEDEF__ +typedef struct OBJPFM OBJPFM; +#endif /* __NVOC_CLASS_OBJPFM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJPFM +#define __nvoc_class_id_OBJPFM 0xb543ae +#endif /* __nvoc_class_id_OBJPFM */ + + +struct OBJVMM; + +#ifndef __NVOC_CLASS_OBJVMM_TYPEDEF__ +#define __NVOC_CLASS_OBJVMM_TYPEDEF__ +typedef struct OBJVMM OBJVMM; +#endif /* __NVOC_CLASS_OBJVMM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMM +#define __nvoc_class_id_OBJVMM 0xa030ab +#endif /* __nvoc_class_id_OBJVMM */ + + +struct OBJHYPERVISOR; + +#ifndef __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +#define __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ +typedef struct OBJHYPERVISOR OBJHYPERVISOR; +#endif /* __NVOC_CLASS_OBJHYPERVISOR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHYPERVISOR +#define __nvoc_class_id_OBJHYPERVISOR 0x33c1ba +#endif /* __nvoc_class_id_OBJHYPERVISOR */ + + +struct OBJGPUMGR; + +#ifndef __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ +typedef struct OBJGPUMGR OBJGPUMGR; +#endif /* __NVOC_CLASS_OBJGPUMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUMGR +#define __nvoc_class_id_OBJGPUMGR 0xcf1b25 +#endif /* __nvoc_class_id_OBJGPUMGR */ + + +struct OBJDISPMGR; + +#ifndef __NVOC_CLASS_OBJDISPMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJDISPMGR_TYPEDEF__ +typedef struct OBJDISPMGR OBJDISPMGR; +#endif /* __NVOC_CLASS_OBJDISPMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJDISPMGR +#define __nvoc_class_id_OBJDISPMGR 0x69ad03 +#endif /* __nvoc_class_id_OBJDISPMGR */ + + +struct OBJGPS; + +#ifndef __NVOC_CLASS_OBJGPS_TYPEDEF__ +#define __NVOC_CLASS_OBJGPS_TYPEDEF__ +typedef struct OBJGPS OBJGPS; +#endif /* __NVOC_CLASS_OBJGPS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPS +#define __nvoc_class_id_OBJGPS 0x7ee07d +#endif /* __nvoc_class_id_OBJGPS */ + + +struct GpuAccounting; + +#ifndef __NVOC_CLASS_GpuAccounting_TYPEDEF__ +#define __NVOC_CLASS_GpuAccounting_TYPEDEF__ +typedef struct GpuAccounting GpuAccounting; +#endif /* __NVOC_CLASS_GpuAccounting_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuAccounting +#define __nvoc_class_id_GpuAccounting 0x0f1350 +#endif /* __nvoc_class_id_GpuAccounting */ + + +struct OBJHALMGR; + +#ifndef __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJHALMGR_TYPEDEF__ +typedef struct OBJHALMGR OBJHALMGR; +#endif /* __NVOC_CLASS_OBJHALMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJHALMGR +#define __nvoc_class_id_OBJHALMGR 0xbf26de +#endif /* __nvoc_class_id_OBJHALMGR */ + + +struct Fabric; + +#ifndef __NVOC_CLASS_Fabric_TYPEDEF__ +#define __NVOC_CLASS_Fabric_TYPEDEF__ +typedef struct Fabric Fabric; +#endif /* __NVOC_CLASS_Fabric_TYPEDEF__ */ + +#ifndef __nvoc_class_id_Fabric +#define __nvoc_class_id_Fabric 0x0ac791 +#endif /* __nvoc_class_id_Fabric */ + + +struct GpuDb; + +#ifndef __NVOC_CLASS_GpuDb_TYPEDEF__ +#define __NVOC_CLASS_GpuDb_TYPEDEF__ +typedef struct GpuDb GpuDb; +#endif /* __NVOC_CLASS_GpuDb_TYPEDEF__ */ + +#ifndef __nvoc_class_id_GpuDb +#define __nvoc_class_id_GpuDb 0xcdd250 +#endif /* __nvoc_class_id_GpuDb */ + + +struct OBJSWINSTR; + +#ifndef __NVOC_CLASS_OBJSWINSTR_TYPEDEF__ +#define __NVOC_CLASS_OBJSWINSTR_TYPEDEF__ +typedef struct OBJSWINSTR OBJSWINSTR; +#endif /* __NVOC_CLASS_OBJSWINSTR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSWINSTR +#define __nvoc_class_id_OBJSWINSTR 0xd586f3 +#endif /* __nvoc_class_id_OBJSWINSTR */ + + +struct OBJCL; + +#ifndef __NVOC_CLASS_OBJCL_TYPEDEF__ +#define __NVOC_CLASS_OBJCL_TYPEDEF__ +typedef struct OBJCL OBJCL; +#endif /* __NVOC_CLASS_OBJCL_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJCL +#define __nvoc_class_id_OBJCL 0x547dbb +#endif /* __nvoc_class_id_OBJCL */ + + +struct KernelVgpuMgr; + +#ifndef __NVOC_CLASS_KernelVgpuMgr_TYPEDEF__ +#define __NVOC_CLASS_KernelVgpuMgr_TYPEDEF__ +typedef struct KernelVgpuMgr KernelVgpuMgr; +#endif /* __NVOC_CLASS_KernelVgpuMgr_TYPEDEF__ */ + +#ifndef __nvoc_class_id_KernelVgpuMgr +#define __nvoc_class_id_KernelVgpuMgr 0xa793dd +#endif /* __nvoc_class_id_KernelVgpuMgr */ + + +struct OBJVRRMGR; + +#ifndef __NVOC_CLASS_OBJVRRMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJVRRMGR_TYPEDEF__ +typedef struct OBJVRRMGR OBJVRRMGR; +#endif /* __NVOC_CLASS_OBJVRRMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVRRMGR +#define __nvoc_class_id_OBJVRRMGR 0x442804 +#endif /* __nvoc_class_id_OBJVRRMGR */ + + +struct OBJGPUBOOSTMGR; + +#ifndef __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ +typedef struct OBJGPUBOOSTMGR OBJGPUBOOSTMGR; +#endif /* __NVOC_CLASS_OBJGPUBOOSTMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPUBOOSTMGR +#define __nvoc_class_id_OBJGPUBOOSTMGR 0x9f6bbf +#endif /* __nvoc_class_id_OBJGPUBOOSTMGR */ + + +struct OBJGSYNCMGR; + +#ifndef __NVOC_CLASS_OBJGSYNCMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJGSYNCMGR_TYPEDEF__ +typedef struct OBJGSYNCMGR OBJGSYNCMGR; +#endif /* __NVOC_CLASS_OBJGSYNCMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGSYNCMGR +#define __nvoc_class_id_OBJGSYNCMGR 0xd07fd0 +#endif /* __nvoc_class_id_OBJGSYNCMGR */ + + +struct OBJVGPUMGR; + +#ifndef __NVOC_CLASS_OBJVGPUMGR_TYPEDEF__ +#define __NVOC_CLASS_OBJVGPUMGR_TYPEDEF__ +typedef struct OBJVGPUMGR OBJVGPUMGR; +#endif /* __NVOC_CLASS_OBJVGPUMGR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVGPUMGR +#define __nvoc_class_id_OBJVGPUMGR 0x0e9beb +#endif /* __nvoc_class_id_OBJVGPUMGR */ + + + +typedef struct OBJRCDB Journal; + +/*! + * This structure contains static system configuration data. This structure + * will become a typesafe structure that can be exchanged with code + * running on GSP. + */ +typedef struct SYS_STATIC_CONFIG +{ + /*! Indicates if the GPU is in a notebook or not. */ + NvBool bIsNotebook; + + /*! Initial SLI configuration flags */ + NvU32 initialSliFlags; + + /*! Indicates the type of OS flavor */ + NvU32 osType; + + /*! AMD SEV (AMD's Secure Encrypted Virtualization) Status */ + NvU32 osSevStatus; + + /*! Indicates AMD SEV is enabled or not */ + NvBool bOsSevEnabled; +} SYS_STATIC_CONFIG; + +typedef struct +{ + NvBool bInitialized; // Set to true once we id the CPU + NvU32 type; // NV0000_CTRL_SYSTEM_CPU_TYPE value + NvU32 caps; // NV0000_CTRL_SYSTEM_CPU_CAP value + NvU32 brandId; // CPU Brand ID + NvU32 clock; + NvU32 l1DataCacheSize; // L1 data (or unified) cache size (KB) + NvU32 l2DataCacheSize; // L2 data (or unified) cache size (KB) + NvU32 dataCacheLineSize; // Bytes per line in the L1 data cache + NvU32 hostPageSize; // Native host os page size (4k/64k/etc) + NvU32 numPhysicalCpus; // Number of physical cpus + NvU32 numLogicalCpus; // Total number of logical cpus + NvU32 maxLogicalCpus; // Max Number of Cores on the System + char name[52]; // Embedded processor name; only filled + // filled in if CPU has embedded name + NvU32 family; // Vendor defined Family/extended Family + NvU32 model; // Vendor defined Model/extended Model + NvU32 coresOnDie; // # of cores on the die (0 if unknown) + NvU32 platformID; // Chip package type + NvU8 stepping; // Silicon stepping + NvBool bSEVCapable; // Is capable of SEV (Secure Encrypted Virtualization) + NvU32 maxEncryptedGuests; // Max # of encrypted guests supported +} SYS_CPU_INFO; + +typedef struct +{ + NvU32 strapUser; + NvU32 genRegsVse2VidsysEn; + NvU32 genRegsMiscIoAdr; +} SYS_VGA_POST_STATE; + + +#ifdef NVOC_SYSTEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJSYS { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct OBJTRACEABLE __nvoc_base_OBJTRACEABLE; + struct Object *__nvoc_pbase_Object; + struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; + struct OBJSYS *__nvoc_pbase_OBJSYS; + NV_STATUS (*__sysCaptureState__)(struct OBJSYS *); + NvBool PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT; + NvBool PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT; + NvBool PDB_PROP_SYS_POWER_BATTERY; + NvBool PDB_PROP_SYS_NVIF_INIT_DONE; + NvBool PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED; + NvBool PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED; + NvBool PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS; + NvBool PDB_PROP_SYS_ENABLE_STREAM_MEMOPS; + NvBool PDB_PROP_SYS_IS_UEFI; + NvBool PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED; + NvBool PDB_PROP_SYS_IS_GSYNC_ENABLED; + NvBool PDB_PROP_SYS_NVSWITCH_IS_PRESENT; + NvBool PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED; + NvBool PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED; + NvBool PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED; + NvBool PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING; + NvBool PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE; + NvBool PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT; + NvBool PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS; + NvBool PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED; + NvBool PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED; + NvBool PDB_PROP_SYS_IS_EFI_INIT; + NvBool PDB_PROP_SYS_IN_OCA_DATA_COLLECTION; + NvBool PDB_PROP_SYS_DEBUGGER_DISABLED; + NvBool PDB_PROP_SYS_PRIORITY_BOOST; + NvU32 PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US; + NvBool PDB_PROP_SYS_BSOD_ON_ASSERT; + NvBool PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT; + NvU32 apiLockMask; + NvU32 apiLockModuleMask; + NvU32 gpuLockModuleMask; + NvU32 pwrTransitionTimeoutOverride; + SYS_STATIC_CONFIG staticConfig; + NvU32 debugFlags; + NvU32 backtraceStackDepth; + SYS_CPU_INFO cpuInfo; + SYS_VGA_POST_STATE vgaPostState; + NvBool gpuHotPlugPollingActive[32]; + NvU32 gridSwPkg; + void *pSema; + NvU32 binMask; + PNODE pMemFilterList; + NvU64 rmInstanceId; + NvU32 currentCid; + OS_RM_CAPS *pOsRmCaps; + struct OBJGPUMGR *pGpuMgr; + struct OBJGSYNCMGR *pGsyncMgr; + struct OBJVGPUMGR *pVgpuMgr; + struct KernelVgpuMgr *pKernelVgpuMgr; + OBJOS *pOS; + struct OBJCL *pCl; + struct OBJPFM *pPfm; + struct OBJSWINSTR *pSwInstr; + struct GpuAccounting *pGpuAcct; + struct OBJGPS *pGps; + Journal *pRcDB; + struct OBJVMM *pVmm; + struct OBJHYPERVISOR *pHypervisor; + struct OBJVRRMGR *pVrrMgr; + struct OBJGPUBOOSTMGR *pGpuBoostMgr; + struct OBJDISPMGR *pDispMgr; + struct OBJHALMGR *pHalMgr; + struct Fabric *pFabric; + struct GpuDb *pGpuDb; +}; + +#ifndef __NVOC_CLASS_OBJSYS_TYPEDEF__ +#define __NVOC_CLASS_OBJSYS_TYPEDEF__ +typedef struct OBJSYS OBJSYS; +#endif /* __NVOC_CLASS_OBJSYS_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJSYS +#define __nvoc_class_id_OBJSYS 0x40e2c8 +#endif /* __nvoc_class_id_OBJSYS */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJSYS; + +#define __staticCast_OBJSYS(pThis) \ + ((pThis)->__nvoc_pbase_OBJSYS) + +#ifdef __nvoc_system_h_disabled +#define __dynamicCast_OBJSYS(pThis) ((OBJSYS*)NULL) +#else //__nvoc_system_h_disabled +#define __dynamicCast_OBJSYS(pThis) \ + ((OBJSYS*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJSYS))) +#endif //__nvoc_system_h_disabled + +#define PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED_BASE_CAST +#define PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED_BASE_NAME PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED +#define PDB_PROP_SYS_IS_EFI_INIT_BASE_CAST +#define PDB_PROP_SYS_IS_EFI_INIT_BASE_NAME PDB_PROP_SYS_IS_EFI_INIT +#define PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS_BASE_CAST +#define PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS_BASE_NAME PDB_PROP_SYS_INITIALIZE_SYSTEM_MEMORY_ALLOCATIONS +#define PDB_PROP_SYS_POWER_BATTERY_BASE_CAST +#define PDB_PROP_SYS_POWER_BATTERY_BASE_NAME PDB_PROP_SYS_POWER_BATTERY +#define PDB_PROP_SYS_NVIF_INIT_DONE_BASE_CAST +#define PDB_PROP_SYS_NVIF_INIT_DONE_BASE_NAME PDB_PROP_SYS_NVIF_INIT_DONE +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT_BASE_CAST +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT_BASE_NAME PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT +#define PDB_PROP_SYS_BSOD_ON_ASSERT_BASE_CAST +#define PDB_PROP_SYS_BSOD_ON_ASSERT_BASE_NAME PDB_PROP_SYS_BSOD_ON_ASSERT +#define PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS_BASE_CAST +#define PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS_BASE_NAME PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS +#define PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED_BASE_CAST +#define PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED_BASE_NAME PDB_PROP_SYS_PRIMARY_VBIOS_STATE_SAVED +#define PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT_BASE_CAST +#define PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT_BASE_NAME PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT +#define PDB_PROP_SYS_ENABLE_STREAM_MEMOPS_BASE_CAST +#define PDB_PROP_SYS_ENABLE_STREAM_MEMOPS_BASE_NAME PDB_PROP_SYS_ENABLE_STREAM_MEMOPS +#define PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT_BASE_CAST +#define PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT_BASE_NAME PDB_PROP_SYS_SBIOS_NVIF_POWERMIZER_LIMIT +#define PDB_PROP_SYS_IS_UEFI_BASE_CAST +#define PDB_PROP_SYS_IS_UEFI_BASE_NAME PDB_PROP_SYS_IS_UEFI +#define PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED_BASE_CAST +#define PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED_BASE_NAME PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED +#define PDB_PROP_SYS_IS_GSYNC_ENABLED_BASE_CAST +#define PDB_PROP_SYS_IS_GSYNC_ENABLED_BASE_NAME PDB_PROP_SYS_IS_GSYNC_ENABLED +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED_BASE_CAST +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED_BASE_NAME PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED +#define PDB_PROP_SYS_PRIORITY_BOOST_BASE_CAST +#define PDB_PROP_SYS_PRIORITY_BOOST_BASE_NAME PDB_PROP_SYS_PRIORITY_BOOST +#define PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US_BASE_CAST +#define PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US_BASE_NAME PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US +#define PDB_PROP_SYS_IN_OCA_DATA_COLLECTION_BASE_CAST +#define PDB_PROP_SYS_IN_OCA_DATA_COLLECTION_BASE_NAME PDB_PROP_SYS_IN_OCA_DATA_COLLECTION +#define PDB_PROP_SYS_NVSWITCH_IS_PRESENT_BASE_CAST +#define PDB_PROP_SYS_NVSWITCH_IS_PRESENT_BASE_NAME PDB_PROP_SYS_NVSWITCH_IS_PRESENT +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED_BASE_CAST +#define PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED_BASE_NAME PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED +#define PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED_BASE_CAST +#define PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED_BASE_NAME PDB_PROP_SYS_WIN_PRIMARY_DEVICE_MARKED +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_BASE_CAST +#define PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_BASE_NAME PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE +#define PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED_BASE_CAST +#define PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED_BASE_NAME PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED +#define PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED_BASE_CAST +#define PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED_BASE_NAME PDB_PROP_SYS_IS_AGGRESSIVE_GC6_ENABLED +#define PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING_BASE_CAST +#define PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING_BASE_NAME PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING +#define PDB_PROP_SYS_DEBUGGER_DISABLED_BASE_CAST +#define PDB_PROP_SYS_DEBUGGER_DISABLED_BASE_NAME PDB_PROP_SYS_DEBUGGER_DISABLED +#define PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT_BASE_CAST +#define PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT_BASE_NAME PDB_PROP_SYS_MXM_THERMAL_CONTROL_PRESENT + +NV_STATUS __nvoc_objCreateDynamic_OBJSYS(OBJSYS**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJSYS(OBJSYS**, Dynamic*, NvU32); +#define __objCreate_OBJSYS(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJSYS((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define sysCaptureState(arg0) sysCaptureState_DISPATCH(arg0) +NV_STATUS sysCaptureState_IMPL(struct OBJSYS *arg0); + +static inline NV_STATUS sysCaptureState_DISPATCH(struct OBJSYS *arg0) { + return arg0->__sysCaptureState__(arg0); +} + +static inline NvU32 sysGetPwrTransitionTimeout(struct OBJSYS *pSys) { + return pSys->pwrTransitionTimeoutOverride; +} + +static inline const SYS_STATIC_CONFIG *sysGetStaticConfig(struct OBJSYS *pSys) { + return &pSys->staticConfig; +} + +NV_STATUS sysConstruct_IMPL(struct OBJSYS *arg_); +#define __nvoc_sysConstruct(arg_) sysConstruct_IMPL(arg_) +void sysDestruct_IMPL(struct OBJSYS *arg0); +#define __nvoc_sysDestruct(arg0) sysDestruct_IMPL(arg0) +void sysInitRegistryOverrides_IMPL(struct OBJSYS *arg0); +#ifdef __nvoc_system_h_disabled +static inline void sysInitRegistryOverrides(struct OBJSYS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysInitRegistryOverrides(arg0) sysInitRegistryOverrides_IMPL(arg0) +#endif //__nvoc_system_h_disabled + +void sysApplyLockingPolicy_IMPL(struct OBJSYS *arg0); +#ifdef __nvoc_system_h_disabled +static inline void sysApplyLockingPolicy(struct OBJSYS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysApplyLockingPolicy(arg0) sysApplyLockingPolicy_IMPL(arg0) +#endif //__nvoc_system_h_disabled + +OBJOS *sysGetOs_IMPL(struct OBJSYS *arg0); +#ifdef __nvoc_system_h_disabled +static inline OBJOS *sysGetOs(struct OBJSYS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); + return NULL; +} +#else //__nvoc_system_h_disabled +#define sysGetOs(arg0) sysGetOs_IMPL(arg0) +#endif //__nvoc_system_h_disabled + +void sysEnableExternalFabricMgmt_IMPL(struct OBJSYS *arg0); +#ifdef __nvoc_system_h_disabled +static inline void sysEnableExternalFabricMgmt(struct OBJSYS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysEnableExternalFabricMgmt(arg0) sysEnableExternalFabricMgmt_IMPL(arg0) +#endif //__nvoc_system_h_disabled + +void sysForceInitFabricManagerState_IMPL(struct OBJSYS *arg0); +#ifdef __nvoc_system_h_disabled +static inline void sysForceInitFabricManagerState(struct OBJSYS *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); +} +#else //__nvoc_system_h_disabled +#define sysForceInitFabricManagerState(arg0) sysForceInitFabricManagerState_IMPL(arg0) +#endif //__nvoc_system_h_disabled + +NV_STATUS sysSyncExternalFabricMgmtWAR_IMPL(struct OBJSYS *arg0, OBJGPU *arg1); +#ifdef __nvoc_system_h_disabled +static inline NV_STATUS sysSyncExternalFabricMgmtWAR(struct OBJSYS *arg0, OBJGPU *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJSYS was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_system_h_disabled +#define sysSyncExternalFabricMgmtWAR(arg0, arg1) sysSyncExternalFabricMgmtWAR_IMPL(arg0, arg1) +#endif //__nvoc_system_h_disabled + +#undef PRIVATE_FIELD + + +extern struct OBJSYS *g_pSys; + +#endif // SYSTEM_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_SYSTEM_NVOC_H_ diff --git a/src/nvidia/generated/g_third_party_p2p_nvoc.c b/src/nvidia/generated/g_third_party_p2p_nvoc.c new file mode 100644 index 000000000..054b65889 --- /dev/null +++ b/src/nvidia/generated/g_third_party_p2p_nvoc.c @@ -0,0 +1,432 @@ +#define NVOC_THIRD_PARTY_P2P_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_third_party_p2p_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x34d08b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ThirdPartyP2P; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_ThirdPartyP2P(ThirdPartyP2P*); +void __nvoc_init_funcTable_ThirdPartyP2P(ThirdPartyP2P*); +NV_STATUS __nvoc_ctor_ThirdPartyP2P(ThirdPartyP2P*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_ThirdPartyP2P(ThirdPartyP2P*); +void __nvoc_dtor_ThirdPartyP2P(ThirdPartyP2P*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ThirdPartyP2P; + +static const struct NVOC_RTTI __nvoc_rtti_ThirdPartyP2P_ThirdPartyP2P = { + /*pClassDef=*/ &__nvoc_class_def_ThirdPartyP2P, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ThirdPartyP2P, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_ThirdPartyP2P_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ThirdPartyP2P, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ThirdPartyP2P_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ThirdPartyP2P, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ThirdPartyP2P_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ThirdPartyP2P, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ThirdPartyP2P_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ThirdPartyP2P, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ThirdPartyP2P_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ThirdPartyP2P, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_ThirdPartyP2P = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_ThirdPartyP2P_ThirdPartyP2P, + &__nvoc_rtti_ThirdPartyP2P_GpuResource, + &__nvoc_rtti_ThirdPartyP2P_RmResource, + &__nvoc_rtti_ThirdPartyP2P_RmResourceCommon, + &__nvoc_rtti_ThirdPartyP2P_RsResource, + &__nvoc_rtti_ThirdPartyP2P_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_ThirdPartyP2P = +{ + /*classInfo=*/ { + /*size=*/ sizeof(ThirdPartyP2P), + /*classId=*/ classId(ThirdPartyP2P), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "ThirdPartyP2P", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ThirdPartyP2P, + /*pCastInfo=*/ &__nvoc_castinfo_ThirdPartyP2P, + /*pExportInfo=*/ &__nvoc_export_info_ThirdPartyP2P +}; + +static NvBool __nvoc_thunk_GpuResource_thirdpartyp2pShareCallback(struct ThirdPartyP2P *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ThirdPartyP2P_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_thirdpartyp2pControl(struct ThirdPartyP2P *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ThirdPartyP2P_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_thirdpartyp2pUnmap(struct ThirdPartyP2P *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ThirdPartyP2P_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_thirdpartyp2pGetMemInterMapParams(struct ThirdPartyP2P *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ThirdPartyP2P_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_thirdpartyp2pGetMemoryMappingDescriptor(struct ThirdPartyP2P *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ThirdPartyP2P_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_thirdpartyp2pGetMapAddrSpace(struct ThirdPartyP2P *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ThirdPartyP2P_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_thirdpartyp2pGetInternalObjectHandle(struct ThirdPartyP2P *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ThirdPartyP2P_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_thirdpartyp2pControlFilter(struct ThirdPartyP2P *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_thirdpartyp2pAddAdditionalDependants(struct RsClient *pClient, struct ThirdPartyP2P *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_thirdpartyp2pGetRefCount(struct ThirdPartyP2P *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_thirdpartyp2pCheckMemInterUnmap(struct ThirdPartyP2P *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ThirdPartyP2P_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_thirdpartyp2pMapTo(struct ThirdPartyP2P *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_thirdpartyp2pControl_Prologue(struct ThirdPartyP2P *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_thirdpartyp2pGetRegBaseOffsetAndSize(struct ThirdPartyP2P *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ThirdPartyP2P_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_thirdpartyp2pCanCopy(struct ThirdPartyP2P *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_thirdpartyp2pInternalControlForward(struct ThirdPartyP2P *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ThirdPartyP2P_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_thirdpartyp2pPreDestruct(struct ThirdPartyP2P *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_thirdpartyp2pUnmapFrom(struct ThirdPartyP2P *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_thirdpartyp2pControl_Epilogue(struct ThirdPartyP2P *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_thirdpartyp2pControlLookup(struct ThirdPartyP2P *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_thirdpartyp2pMap(struct ThirdPartyP2P *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ThirdPartyP2P_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_thirdpartyp2pAccessCallback(struct ThirdPartyP2P *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ThirdPartyP2P_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ThirdPartyP2P[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) thirdpartyp2pCtrlCmdRegisterVaSpace_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x503c0102u, + /*paramSize=*/ sizeof(NV503C_CTRL_REGISTER_VA_SPACE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ThirdPartyP2P.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "thirdpartyp2pCtrlCmdRegisterVaSpace" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) thirdpartyp2pCtrlCmdUnregisterVaSpace_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x503c0103u, + /*paramSize=*/ sizeof(NV503C_CTRL_UNREGISTER_VA_SPACE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ThirdPartyP2P.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "thirdpartyp2pCtrlCmdUnregisterVaSpace" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) thirdpartyp2pCtrlCmdRegisterVidmem_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x503c0104u, + /*paramSize=*/ sizeof(NV503C_CTRL_REGISTER_VIDMEM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ThirdPartyP2P.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "thirdpartyp2pCtrlCmdRegisterVidmem" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) thirdpartyp2pCtrlCmdUnregisterVidmem_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x503c0105u, + /*paramSize=*/ sizeof(NV503C_CTRL_UNREGISTER_VIDMEM_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ThirdPartyP2P.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "thirdpartyp2pCtrlCmdUnregisterVidmem" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) thirdpartyp2pCtrlCmdRegisterPid_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x503c0106u, + /*paramSize=*/ sizeof(NV503C_CTRL_REGISTER_PID_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ThirdPartyP2P.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "thirdpartyp2pCtrlCmdRegisterPid" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_ThirdPartyP2P = +{ + /*numEntries=*/ 5, + /*pExportEntries=*/ __nvoc_exported_method_def_ThirdPartyP2P +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_ThirdPartyP2P(ThirdPartyP2P *pThis) { + __nvoc_thirdpartyp2pDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_ThirdPartyP2P(ThirdPartyP2P *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_ThirdPartyP2P(ThirdPartyP2P *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ThirdPartyP2P_fail_GpuResource; + __nvoc_init_dataField_ThirdPartyP2P(pThis); + + status = __nvoc_thirdpartyp2pConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ThirdPartyP2P_fail__init; + goto __nvoc_ctor_ThirdPartyP2P_exit; // Success + +__nvoc_ctor_ThirdPartyP2P_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_ThirdPartyP2P_fail_GpuResource: +__nvoc_ctor_ThirdPartyP2P_exit: + + return status; +} + +static void __nvoc_init_funcTable_ThirdPartyP2P_1(ThirdPartyP2P *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__thirdpartyp2pCtrlCmdRegisterVaSpace__ = &thirdpartyp2pCtrlCmdRegisterVaSpace_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__thirdpartyp2pCtrlCmdUnregisterVaSpace__ = &thirdpartyp2pCtrlCmdUnregisterVaSpace_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__thirdpartyp2pCtrlCmdRegisterVidmem__ = &thirdpartyp2pCtrlCmdRegisterVidmem_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__thirdpartyp2pCtrlCmdUnregisterVidmem__ = &thirdpartyp2pCtrlCmdUnregisterVidmem_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__thirdpartyp2pCtrlCmdRegisterPid__ = &thirdpartyp2pCtrlCmdRegisterPid_IMPL; +#endif + + pThis->__thirdpartyp2pShareCallback__ = &__nvoc_thunk_GpuResource_thirdpartyp2pShareCallback; + + pThis->__thirdpartyp2pControl__ = &__nvoc_thunk_GpuResource_thirdpartyp2pControl; + + pThis->__thirdpartyp2pUnmap__ = &__nvoc_thunk_GpuResource_thirdpartyp2pUnmap; + + pThis->__thirdpartyp2pGetMemInterMapParams__ = &__nvoc_thunk_RmResource_thirdpartyp2pGetMemInterMapParams; + + pThis->__thirdpartyp2pGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_thirdpartyp2pGetMemoryMappingDescriptor; + + pThis->__thirdpartyp2pGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_thirdpartyp2pGetMapAddrSpace; + + pThis->__thirdpartyp2pGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_thirdpartyp2pGetInternalObjectHandle; + + pThis->__thirdpartyp2pControlFilter__ = &__nvoc_thunk_RsResource_thirdpartyp2pControlFilter; + + pThis->__thirdpartyp2pAddAdditionalDependants__ = &__nvoc_thunk_RsResource_thirdpartyp2pAddAdditionalDependants; + + pThis->__thirdpartyp2pGetRefCount__ = &__nvoc_thunk_RsResource_thirdpartyp2pGetRefCount; + + pThis->__thirdpartyp2pCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_thirdpartyp2pCheckMemInterUnmap; + + pThis->__thirdpartyp2pMapTo__ = &__nvoc_thunk_RsResource_thirdpartyp2pMapTo; + + pThis->__thirdpartyp2pControl_Prologue__ = &__nvoc_thunk_RmResource_thirdpartyp2pControl_Prologue; + + pThis->__thirdpartyp2pGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_thirdpartyp2pGetRegBaseOffsetAndSize; + + pThis->__thirdpartyp2pCanCopy__ = &__nvoc_thunk_RsResource_thirdpartyp2pCanCopy; + + pThis->__thirdpartyp2pInternalControlForward__ = &__nvoc_thunk_GpuResource_thirdpartyp2pInternalControlForward; + + pThis->__thirdpartyp2pPreDestruct__ = &__nvoc_thunk_RsResource_thirdpartyp2pPreDestruct; + + pThis->__thirdpartyp2pUnmapFrom__ = &__nvoc_thunk_RsResource_thirdpartyp2pUnmapFrom; + + pThis->__thirdpartyp2pControl_Epilogue__ = &__nvoc_thunk_RmResource_thirdpartyp2pControl_Epilogue; + + pThis->__thirdpartyp2pControlLookup__ = &__nvoc_thunk_RsResource_thirdpartyp2pControlLookup; + + pThis->__thirdpartyp2pMap__ = &__nvoc_thunk_GpuResource_thirdpartyp2pMap; + + pThis->__thirdpartyp2pAccessCallback__ = &__nvoc_thunk_RmResource_thirdpartyp2pAccessCallback; +} + +void __nvoc_init_funcTable_ThirdPartyP2P(ThirdPartyP2P *pThis) { + __nvoc_init_funcTable_ThirdPartyP2P_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_ThirdPartyP2P(ThirdPartyP2P *pThis) { + pThis->__nvoc_pbase_ThirdPartyP2P = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_ThirdPartyP2P(pThis); +} + +NV_STATUS __nvoc_objCreate_ThirdPartyP2P(ThirdPartyP2P **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + ThirdPartyP2P *pThis; + + pThis = portMemAllocNonPaged(sizeof(ThirdPartyP2P)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(ThirdPartyP2P)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ThirdPartyP2P); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_ThirdPartyP2P(pThis); + status = __nvoc_ctor_ThirdPartyP2P(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_ThirdPartyP2P_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_ThirdPartyP2P_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_ThirdPartyP2P(ThirdPartyP2P **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_ThirdPartyP2P(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_third_party_p2p_nvoc.h b/src/nvidia/generated/g_third_party_p2p_nvoc.h new file mode 100644 index 000000000..1d958f21e --- /dev/null +++ b/src/nvidia/generated/g_third_party_p2p_nvoc.h @@ -0,0 +1,541 @@ +#ifndef _G_THIRD_PARTY_P2P_NVOC_H_ +#define _G_THIRD_PARTY_P2P_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_third_party_p2p_nvoc.h" + +#ifndef _THIRD_PARTY_P2P_H_ +#define _THIRD_PARTY_P2P_H_ + +#include "rmapi/client.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/gpu_resource.h" + +#include + +// +// The third-party P2P token is a 64bit integer. +// +// The value may be passed by the RM client enabling the third-party P2P access. +// Otherwise the P2P token format is: +// +// fn [ *GPU ID* ] [ *UNUSED* ] [ *PEER INDEX* ] +// bit 63 32 31 8 7 0 +// +// The third-party P2P token is a globally unique identifier for one +// of an attached GPU's several P2P slots. It is passed, as an +// opaque handle, to third-party driver stacks to allow the setup +// of a P2P mapping between a given GPU and a third-party device with +// NVIDIA P2P capabilities. +// + +#define CLI_ENCODEP2PTOKEN(gpuId, peerIndex) (((NvU64)gpuId << 32) | peerIndex) + +#define CLI_THIRD_PARTY_P2P_FLAGS_INITIALIZED NVBIT(0) + +#define CLI_THIRD_PARTY_P2P_MAX_CLIENT 256 + +// +// CUDA tools has requested that the third-party P2P code reserve at least +// 32MB of BAR1 address space for RM clients. Pad this reservation by +// 4MB to account for miscellaneous RM mappings. +// +#define CLI_THIRD_PARTY_P2P_BAR1_RESERVE (36 << 20) + +// **************************************************************************** +// Type definitions +// **************************************************************************** + +// +// third-party p2p support types +// +struct _def_client_third_party_p2p_vaspace_info +{ + NvHandle hClient; + NvHandle hThirdPartyP2P; + NvHandle hVASpace; + NvU32 vaSpaceToken; +}; +typedef struct _def_client_third_party_p2p_vaspace_info CLI_THIRD_PARTY_P2P_VASPACE_INFO, *PCLI_THIRD_PARTY_P2P_VASPACE_INFO; + +MAKE_MAP(CLI_THIRD_PARTY_P2P_VASPACE_INFO_MAP, CLI_THIRD_PARTY_P2P_VASPACE_INFO); + +typedef void (THIRD_PARTY_P2P_VIDMEM_FREE_CALLBACK)(void *); + +struct _def_client_third_party_p2p_mapping_info +{ + NODE Node; + THIRD_PARTY_P2P_VIDMEM_FREE_CALLBACK *pFreeCallback; + void *pData; + + // Address and length describe a subrange of the parent vidmem info address range + NvU64 address; + NvU64 length; + + struct _def_client_third_party_p2p_mapping_extent_info *pStart; +}; +typedef struct _def_client_third_party_p2p_mapping_info CLI_THIRD_PARTY_P2P_MAPPING_INFO, *PCLI_THIRD_PARTY_P2P_MAPPING_INFO; + +struct _def_client_third_party_p2p_mapping_extent_info +{ + // Address and length describe a subrange of the parent vidmem info address range + NvU64 address; + NvU64 length; + + NvU64 fbApertureOffset; + PMEMORY_DESCRIPTOR pMemDesc; + NvU32 refCount; + ListNode listNode; // Node in the list. +}; +typedef struct _def_client_third_party_p2p_mapping_extent_info CLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO, *PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO; + +MAKE_INTRUSIVE_LIST(CLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO_LIST, CLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO, listNode); + +struct _def_client_third_party_p2p_vidmem_info +{ + NvHandle hClient; + NvHandle hThirdPartyP2P; + NvHandle hMemory; + + // + // A node in the PCLI_THIRD_PARTY_P2P_INFO::pAddressRangeTree btree tracking + // the address range registered for this vidmem allocation. + // + // Notably the address ranges come from the user and are not enforced to + // be mapped in any GPU VA space. It's up to the user to pick them and they + // are used purely for exposing the registered vidmem allocations through + // the third-party P2P APIs at the specified ranges. See documentation of + // NV503C_CTRL_CMD_REGISTER_VIDMEM for more details. + // + // Commonly clients will map the allocation at the same address as it's + // registered with for third-party P2P and the third-party P2P APIs still + // call the address parameter a virtual address, but the implementation + // refers to them just as addresses to make it clear they are not enforced + // to be actually mapped. + // + // Notably in the past the addresses have been indeed looked up in the GPU + // VA spaces directly, but that became challenging with the introduction of + // externally managed VA spaces and now it's up to the clients to register + // them explicitly. + // + NODE addressRangeNode; + MapNode mapNode; + // Offset at which the address range starts in the vidmem allocation. + NvU64 offset; + + // VidmemInfo ID used for persistent mappings + NvU64 id; + + PMEMORY_DESCRIPTOR pMemDesc; + PNODE pMappingInfoList; + CLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO_LIST mappingExtentList; +}; +typedef struct _def_client_third_party_p2p_vidmem_info CLI_THIRD_PARTY_P2P_VIDMEM_INFO, *PCLI_THIRD_PARTY_P2P_VIDMEM_INFO; + +MAKE_INTRUSIVE_MAP(CLI_THIRD_PARTY_P2P_VIDMEM_INFO_MAP, CLI_THIRD_PARTY_P2P_VIDMEM_INFO, mapNode); + +typedef void (THIRD_PARTY_P2P_DESTROY_CALLBACK)(void *); + +enum _def_client_third_party_p2p_type +{ + CLI_THIRD_PARTY_P2P_TYPE_PROPRIETARY, + CLI_THIRD_PARTY_P2P_TYPE_BAR1, + CLI_THIRD_PARTY_P2P_TYPE_NVLINK +}; +typedef enum _def_client_third_party_p2p_type CLI_THIRD_PARTY_P2P_TYPE; + +struct _def_client_third_party_p2p_pid_client_mapping_info +{ + NvU32 pid; + NvHandle hClient; +}; +typedef struct _def_client_third_party_p2p_pid_client_mapping_info CLI_THIRD_PARTY_P2P_PID_CLIENT_INFO, *PCLI_THIRD_PARTY_P2P_PID_CLIENT_INFO; + + +#ifdef NVOC_THIRD_PARTY_P2P_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct ThirdPartyP2P { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct ThirdPartyP2P *__nvoc_pbase_ThirdPartyP2P; + NV_STATUS (*__thirdpartyp2pCtrlCmdRegisterVaSpace__)(struct ThirdPartyP2P *, NV503C_CTRL_REGISTER_VA_SPACE_PARAMS *); + NV_STATUS (*__thirdpartyp2pCtrlCmdUnregisterVaSpace__)(struct ThirdPartyP2P *, NV503C_CTRL_UNREGISTER_VA_SPACE_PARAMS *); + NV_STATUS (*__thirdpartyp2pCtrlCmdRegisterVidmem__)(struct ThirdPartyP2P *, NV503C_CTRL_REGISTER_VIDMEM_PARAMS *); + NV_STATUS (*__thirdpartyp2pCtrlCmdUnregisterVidmem__)(struct ThirdPartyP2P *, NV503C_CTRL_UNREGISTER_VIDMEM_PARAMS *); + NV_STATUS (*__thirdpartyp2pCtrlCmdRegisterPid__)(struct ThirdPartyP2P *, NV503C_CTRL_REGISTER_PID_PARAMS *); + NvBool (*__thirdpartyp2pShareCallback__)(struct ThirdPartyP2P *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__thirdpartyp2pControl__)(struct ThirdPartyP2P *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__thirdpartyp2pUnmap__)(struct ThirdPartyP2P *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__thirdpartyp2pGetMemInterMapParams__)(struct ThirdPartyP2P *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__thirdpartyp2pGetMemoryMappingDescriptor__)(struct ThirdPartyP2P *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__thirdpartyp2pGetMapAddrSpace__)(struct ThirdPartyP2P *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__thirdpartyp2pGetInternalObjectHandle__)(struct ThirdPartyP2P *); + NV_STATUS (*__thirdpartyp2pControlFilter__)(struct ThirdPartyP2P *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__thirdpartyp2pAddAdditionalDependants__)(struct RsClient *, struct ThirdPartyP2P *, RsResourceRef *); + NvU32 (*__thirdpartyp2pGetRefCount__)(struct ThirdPartyP2P *); + NV_STATUS (*__thirdpartyp2pCheckMemInterUnmap__)(struct ThirdPartyP2P *, NvBool); + NV_STATUS (*__thirdpartyp2pMapTo__)(struct ThirdPartyP2P *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__thirdpartyp2pControl_Prologue__)(struct ThirdPartyP2P *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__thirdpartyp2pGetRegBaseOffsetAndSize__)(struct ThirdPartyP2P *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__thirdpartyp2pCanCopy__)(struct ThirdPartyP2P *); + NV_STATUS (*__thirdpartyp2pInternalControlForward__)(struct ThirdPartyP2P *, NvU32, void *, NvU32); + void (*__thirdpartyp2pPreDestruct__)(struct ThirdPartyP2P *); + NV_STATUS (*__thirdpartyp2pUnmapFrom__)(struct ThirdPartyP2P *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__thirdpartyp2pControl_Epilogue__)(struct ThirdPartyP2P *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__thirdpartyp2pControlLookup__)(struct ThirdPartyP2P *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__thirdpartyp2pMap__)(struct ThirdPartyP2P *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__thirdpartyp2pAccessCallback__)(struct ThirdPartyP2P *, struct RsClient *, void *, RsAccessRight); + NODE Node; + NvHandle hClient; + NvHandle hThirdPartyP2P; + CLI_THIRD_PARTY_P2P_TYPE type; + struct Subdevice *pSubdevice; + NvU32 peerIndex; + NvU32 flags; + NvU64 p2pToken; + CLI_THIRD_PARTY_P2P_PID_CLIENT_INFO pidClientList[256]; + CLI_THIRD_PARTY_P2P_VASPACE_INFO_MAP vaSpaceInfoMap; + THIRD_PARTY_P2P_DESTROY_CALLBACK *pDestroyCallback; + void *pData; + CLI_THIRD_PARTY_P2P_VIDMEM_INFO_MAP vidmemInfoMap; + PNODE pAddressRangeTree; +}; + +#ifndef __NVOC_CLASS_ThirdPartyP2P_TYPEDEF__ +#define __NVOC_CLASS_ThirdPartyP2P_TYPEDEF__ +typedef struct ThirdPartyP2P ThirdPartyP2P; +#endif /* __NVOC_CLASS_ThirdPartyP2P_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ThirdPartyP2P +#define __nvoc_class_id_ThirdPartyP2P 0x34d08b +#endif /* __nvoc_class_id_ThirdPartyP2P */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ThirdPartyP2P; + +#define __staticCast_ThirdPartyP2P(pThis) \ + ((pThis)->__nvoc_pbase_ThirdPartyP2P) + +#ifdef __nvoc_third_party_p2p_h_disabled +#define __dynamicCast_ThirdPartyP2P(pThis) ((ThirdPartyP2P*)NULL) +#else //__nvoc_third_party_p2p_h_disabled +#define __dynamicCast_ThirdPartyP2P(pThis) \ + ((ThirdPartyP2P*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ThirdPartyP2P))) +#endif //__nvoc_third_party_p2p_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_ThirdPartyP2P(ThirdPartyP2P**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_ThirdPartyP2P(ThirdPartyP2P**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_ThirdPartyP2P(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_ThirdPartyP2P((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define thirdpartyp2pCtrlCmdRegisterVaSpace(pThirdPartyP2P, pRegisterVaSpaceParams) thirdpartyp2pCtrlCmdRegisterVaSpace_DISPATCH(pThirdPartyP2P, pRegisterVaSpaceParams) +#define thirdpartyp2pCtrlCmdUnregisterVaSpace(pThirdPartyP2P, pUnregisterVaSpaceParams) thirdpartyp2pCtrlCmdUnregisterVaSpace_DISPATCH(pThirdPartyP2P, pUnregisterVaSpaceParams) +#define thirdpartyp2pCtrlCmdRegisterVidmem(pThirdPartyP2P, pRegisterVidmemParams) thirdpartyp2pCtrlCmdRegisterVidmem_DISPATCH(pThirdPartyP2P, pRegisterVidmemParams) +#define thirdpartyp2pCtrlCmdUnregisterVidmem(pThirdPartyP2P, pUnregisterVidmemParams) thirdpartyp2pCtrlCmdUnregisterVidmem_DISPATCH(pThirdPartyP2P, pUnregisterVidmemParams) +#define thirdpartyp2pCtrlCmdRegisterPid(pThirdPartyP2P, pParams) thirdpartyp2pCtrlCmdRegisterPid_DISPATCH(pThirdPartyP2P, pParams) +#define thirdpartyp2pShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) thirdpartyp2pShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define thirdpartyp2pControl(pGpuResource, pCallContext, pParams) thirdpartyp2pControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define thirdpartyp2pUnmap(pGpuResource, pCallContext, pCpuMapping) thirdpartyp2pUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define thirdpartyp2pGetMemInterMapParams(pRmResource, pParams) thirdpartyp2pGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define thirdpartyp2pGetMemoryMappingDescriptor(pRmResource, ppMemDesc) thirdpartyp2pGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define thirdpartyp2pGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) thirdpartyp2pGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define thirdpartyp2pGetInternalObjectHandle(pGpuResource) thirdpartyp2pGetInternalObjectHandle_DISPATCH(pGpuResource) +#define thirdpartyp2pControlFilter(pResource, pCallContext, pParams) thirdpartyp2pControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define thirdpartyp2pAddAdditionalDependants(pClient, pResource, pReference) thirdpartyp2pAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define thirdpartyp2pGetRefCount(pResource) thirdpartyp2pGetRefCount_DISPATCH(pResource) +#define thirdpartyp2pCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) thirdpartyp2pCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define thirdpartyp2pMapTo(pResource, pParams) thirdpartyp2pMapTo_DISPATCH(pResource, pParams) +#define thirdpartyp2pControl_Prologue(pResource, pCallContext, pParams) thirdpartyp2pControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define thirdpartyp2pGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) thirdpartyp2pGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define thirdpartyp2pCanCopy(pResource) thirdpartyp2pCanCopy_DISPATCH(pResource) +#define thirdpartyp2pInternalControlForward(pGpuResource, command, pParams, size) thirdpartyp2pInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define thirdpartyp2pPreDestruct(pResource) thirdpartyp2pPreDestruct_DISPATCH(pResource) +#define thirdpartyp2pUnmapFrom(pResource, pParams) thirdpartyp2pUnmapFrom_DISPATCH(pResource, pParams) +#define thirdpartyp2pControl_Epilogue(pResource, pCallContext, pParams) thirdpartyp2pControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define thirdpartyp2pControlLookup(pResource, pParams, ppEntry) thirdpartyp2pControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define thirdpartyp2pMap(pGpuResource, pCallContext, pParams, pCpuMapping) thirdpartyp2pMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define thirdpartyp2pAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) thirdpartyp2pAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS thirdpartyp2pCtrlCmdRegisterVaSpace_IMPL(struct ThirdPartyP2P *pThirdPartyP2P, NV503C_CTRL_REGISTER_VA_SPACE_PARAMS *pRegisterVaSpaceParams); + +static inline NV_STATUS thirdpartyp2pCtrlCmdRegisterVaSpace_DISPATCH(struct ThirdPartyP2P *pThirdPartyP2P, NV503C_CTRL_REGISTER_VA_SPACE_PARAMS *pRegisterVaSpaceParams) { + return pThirdPartyP2P->__thirdpartyp2pCtrlCmdRegisterVaSpace__(pThirdPartyP2P, pRegisterVaSpaceParams); +} + +NV_STATUS thirdpartyp2pCtrlCmdUnregisterVaSpace_IMPL(struct ThirdPartyP2P *pThirdPartyP2P, NV503C_CTRL_UNREGISTER_VA_SPACE_PARAMS *pUnregisterVaSpaceParams); + +static inline NV_STATUS thirdpartyp2pCtrlCmdUnregisterVaSpace_DISPATCH(struct ThirdPartyP2P *pThirdPartyP2P, NV503C_CTRL_UNREGISTER_VA_SPACE_PARAMS *pUnregisterVaSpaceParams) { + return pThirdPartyP2P->__thirdpartyp2pCtrlCmdUnregisterVaSpace__(pThirdPartyP2P, pUnregisterVaSpaceParams); +} + +NV_STATUS thirdpartyp2pCtrlCmdRegisterVidmem_IMPL(struct ThirdPartyP2P *pThirdPartyP2P, NV503C_CTRL_REGISTER_VIDMEM_PARAMS *pRegisterVidmemParams); + +static inline NV_STATUS thirdpartyp2pCtrlCmdRegisterVidmem_DISPATCH(struct ThirdPartyP2P *pThirdPartyP2P, NV503C_CTRL_REGISTER_VIDMEM_PARAMS *pRegisterVidmemParams) { + return pThirdPartyP2P->__thirdpartyp2pCtrlCmdRegisterVidmem__(pThirdPartyP2P, pRegisterVidmemParams); +} + +NV_STATUS thirdpartyp2pCtrlCmdUnregisterVidmem_IMPL(struct ThirdPartyP2P *pThirdPartyP2P, NV503C_CTRL_UNREGISTER_VIDMEM_PARAMS *pUnregisterVidmemParams); + +static inline NV_STATUS thirdpartyp2pCtrlCmdUnregisterVidmem_DISPATCH(struct ThirdPartyP2P *pThirdPartyP2P, NV503C_CTRL_UNREGISTER_VIDMEM_PARAMS *pUnregisterVidmemParams) { + return pThirdPartyP2P->__thirdpartyp2pCtrlCmdUnregisterVidmem__(pThirdPartyP2P, pUnregisterVidmemParams); +} + +NV_STATUS thirdpartyp2pCtrlCmdRegisterPid_IMPL(struct ThirdPartyP2P *pThirdPartyP2P, NV503C_CTRL_REGISTER_PID_PARAMS *pParams); + +static inline NV_STATUS thirdpartyp2pCtrlCmdRegisterPid_DISPATCH(struct ThirdPartyP2P *pThirdPartyP2P, NV503C_CTRL_REGISTER_PID_PARAMS *pParams) { + return pThirdPartyP2P->__thirdpartyp2pCtrlCmdRegisterPid__(pThirdPartyP2P, pParams); +} + +static inline NvBool thirdpartyp2pShareCallback_DISPATCH(struct ThirdPartyP2P *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__thirdpartyp2pShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS thirdpartyp2pControl_DISPATCH(struct ThirdPartyP2P *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__thirdpartyp2pControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS thirdpartyp2pUnmap_DISPATCH(struct ThirdPartyP2P *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__thirdpartyp2pUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS thirdpartyp2pGetMemInterMapParams_DISPATCH(struct ThirdPartyP2P *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__thirdpartyp2pGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS thirdpartyp2pGetMemoryMappingDescriptor_DISPATCH(struct ThirdPartyP2P *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__thirdpartyp2pGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS thirdpartyp2pGetMapAddrSpace_DISPATCH(struct ThirdPartyP2P *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__thirdpartyp2pGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle thirdpartyp2pGetInternalObjectHandle_DISPATCH(struct ThirdPartyP2P *pGpuResource) { + return pGpuResource->__thirdpartyp2pGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS thirdpartyp2pControlFilter_DISPATCH(struct ThirdPartyP2P *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__thirdpartyp2pControlFilter__(pResource, pCallContext, pParams); +} + +static inline void thirdpartyp2pAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ThirdPartyP2P *pResource, RsResourceRef *pReference) { + pResource->__thirdpartyp2pAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 thirdpartyp2pGetRefCount_DISPATCH(struct ThirdPartyP2P *pResource) { + return pResource->__thirdpartyp2pGetRefCount__(pResource); +} + +static inline NV_STATUS thirdpartyp2pCheckMemInterUnmap_DISPATCH(struct ThirdPartyP2P *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__thirdpartyp2pCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS thirdpartyp2pMapTo_DISPATCH(struct ThirdPartyP2P *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__thirdpartyp2pMapTo__(pResource, pParams); +} + +static inline NV_STATUS thirdpartyp2pControl_Prologue_DISPATCH(struct ThirdPartyP2P *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__thirdpartyp2pControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS thirdpartyp2pGetRegBaseOffsetAndSize_DISPATCH(struct ThirdPartyP2P *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__thirdpartyp2pGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool thirdpartyp2pCanCopy_DISPATCH(struct ThirdPartyP2P *pResource) { + return pResource->__thirdpartyp2pCanCopy__(pResource); +} + +static inline NV_STATUS thirdpartyp2pInternalControlForward_DISPATCH(struct ThirdPartyP2P *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__thirdpartyp2pInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void thirdpartyp2pPreDestruct_DISPATCH(struct ThirdPartyP2P *pResource) { + pResource->__thirdpartyp2pPreDestruct__(pResource); +} + +static inline NV_STATUS thirdpartyp2pUnmapFrom_DISPATCH(struct ThirdPartyP2P *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__thirdpartyp2pUnmapFrom__(pResource, pParams); +} + +static inline void thirdpartyp2pControl_Epilogue_DISPATCH(struct ThirdPartyP2P *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__thirdpartyp2pControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS thirdpartyp2pControlLookup_DISPATCH(struct ThirdPartyP2P *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__thirdpartyp2pControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS thirdpartyp2pMap_DISPATCH(struct ThirdPartyP2P *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__thirdpartyp2pMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool thirdpartyp2pAccessCallback_DISPATCH(struct ThirdPartyP2P *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__thirdpartyp2pAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS thirdpartyp2pConstruct_IMPL(struct ThirdPartyP2P *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_thirdpartyp2pConstruct(arg_pResource, arg_pCallContext, arg_pParams) thirdpartyp2pConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +void thirdpartyp2pDestruct_IMPL(struct ThirdPartyP2P *pResource); +#define __nvoc_thirdpartyp2pDestruct(pResource) thirdpartyp2pDestruct_IMPL(pResource) +NvBool thirdpartyp2pIsValidClientPid_IMPL(struct ThirdPartyP2P *pThirdPartyP2P, NvU32 pid, NvHandle hClient); +#ifdef __nvoc_third_party_p2p_h_disabled +static inline NvBool thirdpartyp2pIsValidClientPid(struct ThirdPartyP2P *pThirdPartyP2P, NvU32 pid, NvHandle hClient) { + NV_ASSERT_FAILED_PRECOMP("ThirdPartyP2P was disabled!"); + return NV_FALSE; +} +#else //__nvoc_third_party_p2p_h_disabled +#define thirdpartyp2pIsValidClientPid(pThirdPartyP2P, pid, hClient) thirdpartyp2pIsValidClientPid_IMPL(pThirdPartyP2P, pid, hClient) +#endif //__nvoc_third_party_p2p_h_disabled + +NV_STATUS thirdpartyp2pDelMappingInfoByKey_IMPL(struct ThirdPartyP2P *pThirdPartyP2P, void *pKey, NvBool bIsRsyncNeeded); +#ifdef __nvoc_third_party_p2p_h_disabled +static inline NV_STATUS thirdpartyp2pDelMappingInfoByKey(struct ThirdPartyP2P *pThirdPartyP2P, void *pKey, NvBool bIsRsyncNeeded) { + NV_ASSERT_FAILED_PRECOMP("ThirdPartyP2P was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_third_party_p2p_h_disabled +#define thirdpartyp2pDelMappingInfoByKey(pThirdPartyP2P, pKey, bIsRsyncNeeded) thirdpartyp2pDelMappingInfoByKey_IMPL(pThirdPartyP2P, pKey, bIsRsyncNeeded) +#endif //__nvoc_third_party_p2p_h_disabled + +NV_STATUS thirdpartyp2pDelPersistentMappingInfoByKey_IMPL(struct ThirdPartyP2P *pThirdPartyP2P, void *pKey, NvBool bIsRsyncNeeded); +#ifdef __nvoc_third_party_p2p_h_disabled +static inline NV_STATUS thirdpartyp2pDelPersistentMappingInfoByKey(struct ThirdPartyP2P *pThirdPartyP2P, void *pKey, NvBool bIsRsyncNeeded) { + NV_ASSERT_FAILED_PRECOMP("ThirdPartyP2P was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_third_party_p2p_h_disabled +#define thirdpartyp2pDelPersistentMappingInfoByKey(pThirdPartyP2P, pKey, bIsRsyncNeeded) thirdpartyp2pDelPersistentMappingInfoByKey_IMPL(pThirdPartyP2P, pKey, bIsRsyncNeeded) +#endif //__nvoc_third_party_p2p_h_disabled + +NV_STATUS thirdpartyp2pGetVASpaceInfoFromToken_IMPL(struct ThirdPartyP2P *pThirdPartyP2P, NvU32 vaSpaceToken, PCLI_THIRD_PARTY_P2P_VASPACE_INFO *ppVASpaceInfo); +#ifdef __nvoc_third_party_p2p_h_disabled +static inline NV_STATUS thirdpartyp2pGetVASpaceInfoFromToken(struct ThirdPartyP2P *pThirdPartyP2P, NvU32 vaSpaceToken, PCLI_THIRD_PARTY_P2P_VASPACE_INFO *ppVASpaceInfo) { + NV_ASSERT_FAILED_PRECOMP("ThirdPartyP2P was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_third_party_p2p_h_disabled +#define thirdpartyp2pGetVASpaceInfoFromToken(pThirdPartyP2P, vaSpaceToken, ppVASpaceInfo) thirdpartyp2pGetVASpaceInfoFromToken_IMPL(pThirdPartyP2P, vaSpaceToken, ppVASpaceInfo) +#endif //__nvoc_third_party_p2p_h_disabled + +NV_STATUS thirdpartyp2pGetNextVASpaceInfo_IMPL(struct ThirdPartyP2P *pThirdPartyP2P, PCLI_THIRD_PARTY_P2P_VASPACE_INFO *arg0); +#ifdef __nvoc_third_party_p2p_h_disabled +static inline NV_STATUS thirdpartyp2pGetNextVASpaceInfo(struct ThirdPartyP2P *pThirdPartyP2P, PCLI_THIRD_PARTY_P2P_VASPACE_INFO *arg0) { + NV_ASSERT_FAILED_PRECOMP("ThirdPartyP2P was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_third_party_p2p_h_disabled +#define thirdpartyp2pGetNextVASpaceInfo(pThirdPartyP2P, arg0) thirdpartyp2pGetNextVASpaceInfo_IMPL(pThirdPartyP2P, arg0) +#endif //__nvoc_third_party_p2p_h_disabled + +#undef PRIVATE_FIELD + + +typedef struct ThirdPartyP2P *PCLI_THIRD_PARTY_P2P_INFO; // RS-TODO: Remove + +// **************************************************************************** +// Functions prototypes +// **************************************************************************** + +// Create and add/register a third-party P2P object +NV_STATUS CliAddThirdPartyP2P (NvHandle, NvHandle, struct Subdevice *, NvU32, NvU64); + +// Delete the specified third-party P2P object +NV_STATUS CliDelThirdPartyP2P (NvHandle, NvHandle); + +// Get third-party P2P info given client/object handles +NV_STATUS CliGetThirdPartyP2PInfo (NvHandle, NvHandle, PCLI_THIRD_PARTY_P2P_INFO *); + +// Get registered third-party P2P info from pid. Also match the provided client handle if provided. +NV_STATUS CliNextThirdPartyP2PInfoWithPid (struct OBJGPU *, NvU32, NvHandle, struct RmClient**, PCLI_THIRD_PARTY_P2P_INFO *); + +// Get third-party P2P info given a P2P token +NV_STATUS CliGetThirdPartyP2PInfoFromToken (NvU64, PCLI_THIRD_PARTY_P2P_INFO *); + +// Register an address space with a third-party P2P object +NV_STATUS CliAddThirdPartyP2PVASpace (NvHandle, NvHandle, NvHandle, NvU32 *); + +// Unregister an address space from a third-party P2P object +NV_STATUS CliDelThirdPartyP2PVASpace (struct ThirdPartyP2P*, NvHandle); + +// Register video memory with a third-party P2P object +NV_STATUS CliAddThirdPartyP2PVidmemInfo (NvHandle, NvHandle, NvHandle, NvU64, NvU64, NvU64, struct Memory *); + +// Unregister video memory from a third-party P2P object +NV_STATUS CliDelThirdPartyP2PVidmemInfo (struct ThirdPartyP2P*, NvHandle); + +// Unregister video memory from a third-party P2P object only if the VidmemInfo is not used +void CliDelThirdPartyP2PVidmemInfoPersistent (struct ThirdPartyP2P*, CLI_THIRD_PARTY_P2P_VIDMEM_INFO*); + +// Find registered video memory given an address +NV_STATUS CliGetThirdPartyP2PVidmemInfoFromAddress (NvHandle, NvHandle, NvU64, NvU64, NvU64 *, PCLI_THIRD_PARTY_P2P_VIDMEM_INFO *); + +// Find registered video memory given a VidmemInfo ID +NV_STATUS CliGetThirdPartyP2PVidmemInfoFromId(NvHandle, NvHandle, NvU64, CLI_THIRD_PARTY_P2P_VIDMEM_INFO **); + +// Find platformData given a P2PInfo object +NV_STATUS CliGetThirdPartyP2PPlatformData (PCLI_THIRD_PARTY_P2P_INFO, void *); + +// Associate a P2P mapping with registered video memory +NV_STATUS CliAddThirdPartyP2PMappingInfo (NvHandle, NvHandle, NvHandle, void *, THIRD_PARTY_P2P_VIDMEM_FREE_CALLBACK *, void *, PCLI_THIRD_PARTY_P2P_MAPPING_INFO *); + +// Find a P2P mapping given its platform specific data +NV_STATUS CliGetThirdPartyP2PMappingInfoFromKey (NvHandle, NvHandle, NvHandle, void *, PCLI_THIRD_PARTY_P2P_MAPPING_INFO *); + +// Register pid & client with a third-party P2P Info object +NV_STATUS CliAddThirdPartyP2PClientPid (NvHandle, NvHandle, NvU32, NvU32); + +// Unregister pid & client from a third-party P2P Info object +NV_STATUS CliDelThirdPartyP2PClientPid (struct RmClient*, NvHandle, NvU32, NvU32); + +// Remove association of a client from any existing third-Party P2P Info object +NV_STATUS CliUnregisterFromThirdPartyP2P (struct RmClient*); + +// Register a free callback +NV_STATUS CliRegisterThirdPartyP2PMappingCallback (NvHandle, NvHandle, NvHandle, void *, THIRD_PARTY_P2P_VIDMEM_FREE_CALLBACK *, void *); + +// Unregister memory from a third-party P2P Info object +void CliUnregisterMemoryFromThirdPartyP2P(struct Memory *pMemory); + +#endif // _THIRD_PARTY_P2P_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_THIRD_PARTY_P2P_NVOC_H_ diff --git a/src/nvidia/generated/g_timed_sema_nvoc.c b/src/nvidia/generated/g_timed_sema_nvoc.c new file mode 100644 index 000000000..c88fed3bd --- /dev/null +++ b/src/nvidia/generated/g_timed_sema_nvoc.c @@ -0,0 +1,473 @@ +#define NVOC_TIMED_SEMA_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_timed_sema_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x335775 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_TimedSemaSwObject; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_TimedSemaSwObject(TimedSemaSwObject*, RmHalspecOwner* ); +void __nvoc_init_funcTable_TimedSemaSwObject(TimedSemaSwObject*); +NV_STATUS __nvoc_ctor_TimedSemaSwObject(TimedSemaSwObject*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_TimedSemaSwObject(TimedSemaSwObject*); +void __nvoc_dtor_TimedSemaSwObject(TimedSemaSwObject*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_TimedSemaSwObject; + +static const struct NVOC_RTTI __nvoc_rtti_TimedSemaSwObject_TimedSemaSwObject = { + /*pClassDef=*/ &__nvoc_class_def_TimedSemaSwObject, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_TimedSemaSwObject, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimedSemaSwObject_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimedSemaSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimedSemaSwObject_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimedSemaSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimedSemaSwObject_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimedSemaSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimedSemaSwObject_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimedSemaSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimedSemaSwObject_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimedSemaSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimedSemaSwObject_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimedSemaSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimedSemaSwObject_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimedSemaSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimedSemaSwObject_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimedSemaSwObject, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_TimedSemaSwObject = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_TimedSemaSwObject_TimedSemaSwObject, + &__nvoc_rtti_TimedSemaSwObject_ChannelDescendant, + &__nvoc_rtti_TimedSemaSwObject_Notifier, + &__nvoc_rtti_TimedSemaSwObject_INotifier, + &__nvoc_rtti_TimedSemaSwObject_GpuResource, + &__nvoc_rtti_TimedSemaSwObject_RmResource, + &__nvoc_rtti_TimedSemaSwObject_RmResourceCommon, + &__nvoc_rtti_TimedSemaSwObject_RsResource, + &__nvoc_rtti_TimedSemaSwObject_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_TimedSemaSwObject = +{ + /*classInfo=*/ { + /*size=*/ sizeof(TimedSemaSwObject), + /*classId=*/ classId(TimedSemaSwObject), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "TimedSemaSwObject", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_TimedSemaSwObject, + /*pCastInfo=*/ &__nvoc_castinfo_TimedSemaSwObject, + /*pExportInfo=*/ &__nvoc_export_info_TimedSemaSwObject +}; + +static NV_STATUS __nvoc_thunk_TimedSemaSwObject_chandesGetSwMethods(struct ChannelDescendant *pTimedSemSw, METHOD **ppMethods, NvU32 *pNumMethods) { + return tsemaGetSwMethods((struct TimedSemaSwObject *)(((unsigned char *)pTimedSemSw) - __nvoc_rtti_TimedSemaSwObject_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_tsemaCheckMemInterUnmap(struct TimedSemaSwObject *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_TimedSemaSwObject_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_tsemaShareCallback(struct TimedSemaSwObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimedSemaSwObject_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_tsemaAccessCallback(struct TimedSemaSwObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_tsemaMapTo(struct TimedSemaSwObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tsemaGetMapAddrSpace(struct TimedSemaSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimedSemaSwObject_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_tsemaSetNotificationShare(struct TimedSemaSwObject *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimedSemaSwObject_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_tsemaGetRefCount(struct TimedSemaSwObject *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_tsemaAddAdditionalDependants(struct RsClient *pClient, struct TimedSemaSwObject *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_tsemaControl_Prologue(struct TimedSemaSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tsemaGetRegBaseOffsetAndSize(struct TimedSemaSwObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimedSemaSwObject_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tsemaInternalControlForward(struct TimedSemaSwObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimedSemaSwObject_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_tsemaUnmapFrom(struct TimedSemaSwObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_tsemaControl_Epilogue(struct TimedSemaSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_tsemaControlLookup(struct TimedSemaSwObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_tsemaGetInternalObjectHandle(struct TimedSemaSwObject *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimedSemaSwObject_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tsemaControl(struct TimedSemaSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimedSemaSwObject_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tsemaUnmap(struct TimedSemaSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimedSemaSwObject_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_tsemaGetMemInterMapParams(struct TimedSemaSwObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_TimedSemaSwObject_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_tsemaGetMemoryMappingDescriptor(struct TimedSemaSwObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_TimedSemaSwObject_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_ChannelDescendant_tsemaIsSwMethodStalling(struct TimedSemaSwObject *pChannelDescendant, NvU32 hHandle) { + return chandesIsSwMethodStalling((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_TimedSemaSwObject_ChannelDescendant.offset), hHandle); +} + +static NV_STATUS __nvoc_thunk_RsResource_tsemaControlFilter(struct TimedSemaSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_tsemaUnregisterEvent(struct TimedSemaSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimedSemaSwObject_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_tsemaCanCopy(struct TimedSemaSwObject *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_tsemaPreDestruct(struct TimedSemaSwObject *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimedSemaSwObject_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_tsemaGetNotificationListPtr(struct TimedSemaSwObject *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimedSemaSwObject_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_tsemaGetNotificationShare(struct TimedSemaSwObject *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimedSemaSwObject_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tsemaMap(struct TimedSemaSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimedSemaSwObject_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_tsemaGetOrAllocNotifShare(struct TimedSemaSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimedSemaSwObject_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_TimedSemaSwObject[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) tsemaCtrlCmdFlush_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90740101u, + /*paramSize=*/ sizeof(NV9074_CTRL_CMD_FLUSH_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_TimedSemaSwObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "tsemaCtrlCmdFlush" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) tsemaCtrlCmdGetTime_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90740102u, + /*paramSize=*/ sizeof(NV9074_CTRL_CMD_GET_TIME_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_TimedSemaSwObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "tsemaCtrlCmdGetTime" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) tsemaCtrlCmdRelease_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90740103u, + /*paramSize=*/ sizeof(NV9074_CTRL_CMD_RELEASE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_TimedSemaSwObject.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "tsemaCtrlCmdRelease" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_TimedSemaSwObject = +{ + /*numEntries=*/ 3, + /*pExportEntries=*/ __nvoc_exported_method_def_TimedSemaSwObject +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_TimedSemaSwObject(TimedSemaSwObject *pThis) { + __nvoc_tsemaDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_TimedSemaSwObject(TimedSemaSwObject *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, PARAM_TO_ENGDESC_FUNCTION *); +NV_STATUS __nvoc_ctor_TimedSemaSwObject(TimedSemaSwObject *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, ((void *)0)); + if (status != NV_OK) goto __nvoc_ctor_TimedSemaSwObject_fail_ChannelDescendant; + __nvoc_init_dataField_TimedSemaSwObject(pThis); + + status = __nvoc_tsemaConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_TimedSemaSwObject_fail__init; + goto __nvoc_ctor_TimedSemaSwObject_exit; // Success + +__nvoc_ctor_TimedSemaSwObject_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_TimedSemaSwObject_fail_ChannelDescendant: +__nvoc_ctor_TimedSemaSwObject_exit: + + return status; +} + +static void __nvoc_init_funcTable_TimedSemaSwObject_1(TimedSemaSwObject *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__tsemaGetSwMethods__ = &tsemaGetSwMethods_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__tsemaCtrlCmdFlush__ = &tsemaCtrlCmdFlush_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__tsemaCtrlCmdGetTime__ = &tsemaCtrlCmdGetTime_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__tsemaCtrlCmdRelease__ = &tsemaCtrlCmdRelease_IMPL; +#endif + + pThis->__nvoc_base_ChannelDescendant.__chandesGetSwMethods__ = &__nvoc_thunk_TimedSemaSwObject_chandesGetSwMethods; + + pThis->__tsemaCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_tsemaCheckMemInterUnmap; + + pThis->__tsemaShareCallback__ = &__nvoc_thunk_GpuResource_tsemaShareCallback; + + pThis->__tsemaAccessCallback__ = &__nvoc_thunk_RmResource_tsemaAccessCallback; + + pThis->__tsemaMapTo__ = &__nvoc_thunk_RsResource_tsemaMapTo; + + pThis->__tsemaGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_tsemaGetMapAddrSpace; + + pThis->__tsemaSetNotificationShare__ = &__nvoc_thunk_Notifier_tsemaSetNotificationShare; + + pThis->__tsemaGetRefCount__ = &__nvoc_thunk_RsResource_tsemaGetRefCount; + + pThis->__tsemaAddAdditionalDependants__ = &__nvoc_thunk_RsResource_tsemaAddAdditionalDependants; + + pThis->__tsemaControl_Prologue__ = &__nvoc_thunk_RmResource_tsemaControl_Prologue; + + pThis->__tsemaGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_tsemaGetRegBaseOffsetAndSize; + + pThis->__tsemaInternalControlForward__ = &__nvoc_thunk_GpuResource_tsemaInternalControlForward; + + pThis->__tsemaUnmapFrom__ = &__nvoc_thunk_RsResource_tsemaUnmapFrom; + + pThis->__tsemaControl_Epilogue__ = &__nvoc_thunk_RmResource_tsemaControl_Epilogue; + + pThis->__tsemaControlLookup__ = &__nvoc_thunk_RsResource_tsemaControlLookup; + + pThis->__tsemaGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_tsemaGetInternalObjectHandle; + + pThis->__tsemaControl__ = &__nvoc_thunk_GpuResource_tsemaControl; + + pThis->__tsemaUnmap__ = &__nvoc_thunk_GpuResource_tsemaUnmap; + + pThis->__tsemaGetMemInterMapParams__ = &__nvoc_thunk_RmResource_tsemaGetMemInterMapParams; + + pThis->__tsemaGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_tsemaGetMemoryMappingDescriptor; + + pThis->__tsemaIsSwMethodStalling__ = &__nvoc_thunk_ChannelDescendant_tsemaIsSwMethodStalling; + + pThis->__tsemaControlFilter__ = &__nvoc_thunk_RsResource_tsemaControlFilter; + + pThis->__tsemaUnregisterEvent__ = &__nvoc_thunk_Notifier_tsemaUnregisterEvent; + + pThis->__tsemaCanCopy__ = &__nvoc_thunk_RsResource_tsemaCanCopy; + + pThis->__tsemaPreDestruct__ = &__nvoc_thunk_RsResource_tsemaPreDestruct; + + pThis->__tsemaGetNotificationListPtr__ = &__nvoc_thunk_Notifier_tsemaGetNotificationListPtr; + + pThis->__tsemaGetNotificationShare__ = &__nvoc_thunk_Notifier_tsemaGetNotificationShare; + + pThis->__tsemaMap__ = &__nvoc_thunk_GpuResource_tsemaMap; + + pThis->__tsemaGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_tsemaGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_TimedSemaSwObject(TimedSemaSwObject *pThis) { + __nvoc_init_funcTable_TimedSemaSwObject_1(pThis); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_TimedSemaSwObject(TimedSemaSwObject *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_TimedSemaSwObject = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_TimedSemaSwObject(pThis); +} + +NV_STATUS __nvoc_objCreate_TimedSemaSwObject(TimedSemaSwObject **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + TimedSemaSwObject *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(TimedSemaSwObject)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(TimedSemaSwObject)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_TimedSemaSwObject); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_TimedSemaSwObject(pThis, pRmhalspecowner); + status = __nvoc_ctor_TimedSemaSwObject(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_TimedSemaSwObject_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_TimedSemaSwObject_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_TimedSemaSwObject(TimedSemaSwObject **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_TimedSemaSwObject(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_timed_sema_nvoc.h b/src/nvidia/generated/g_timed_sema_nvoc.h new file mode 100644 index 000000000..61361c2dc --- /dev/null +++ b/src/nvidia/generated/g_timed_sema_nvoc.h @@ -0,0 +1,330 @@ +#ifndef _G_TIMED_SEMA_NVOC_H_ +#define _G_TIMED_SEMA_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_timed_sema_nvoc.h" + +#ifndef _TIMED_SEMA_H_ +#define _TIMED_SEMA_H_ + +#include "core/core.h" +#include "kernel/gpu/fifo/channel_descendant.h" +#include "ctrl/ctrl9074.h" + +typedef struct +{ + NvU64 NotifierGPUVA; + NvU64 SemaphoreGPUVA; + NvU64 WaitTimestamp; + NvU32 ReleaseValue; + NvU32 NotifyAction; +} GF100_TIMED_SEM_ENTRY, *PGF100_TIMED_SEM_ENTRY; + +MAKE_LIST(GF100_TIMED_SEM_ENTRY_LIST, GF100_TIMED_SEM_ENTRY); + +/*! + * RM internal class representing GF100_TIMED_SEMAPHORE_SW + */ +#ifdef NVOC_TIMED_SEMA_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct TimedSemaSwObject { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct TimedSemaSwObject *__nvoc_pbase_TimedSemaSwObject; + NV_STATUS (*__tsemaGetSwMethods__)(struct TimedSemaSwObject *, METHOD **, NvU32 *); + NV_STATUS (*__tsemaCtrlCmdFlush__)(struct TimedSemaSwObject *, NV9074_CTRL_CMD_FLUSH_PARAMS *); + NV_STATUS (*__tsemaCtrlCmdGetTime__)(struct TimedSemaSwObject *, NV9074_CTRL_CMD_GET_TIME_PARAMS *); + NV_STATUS (*__tsemaCtrlCmdRelease__)(struct TimedSemaSwObject *, NV9074_CTRL_CMD_RELEASE_PARAMS *); + NV_STATUS (*__tsemaCheckMemInterUnmap__)(struct TimedSemaSwObject *, NvBool); + NvBool (*__tsemaShareCallback__)(struct TimedSemaSwObject *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__tsemaAccessCallback__)(struct TimedSemaSwObject *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__tsemaMapTo__)(struct TimedSemaSwObject *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__tsemaGetMapAddrSpace__)(struct TimedSemaSwObject *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__tsemaSetNotificationShare__)(struct TimedSemaSwObject *, struct NotifShare *); + NvU32 (*__tsemaGetRefCount__)(struct TimedSemaSwObject *); + void (*__tsemaAddAdditionalDependants__)(struct RsClient *, struct TimedSemaSwObject *, RsResourceRef *); + NV_STATUS (*__tsemaControl_Prologue__)(struct TimedSemaSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tsemaGetRegBaseOffsetAndSize__)(struct TimedSemaSwObject *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__tsemaInternalControlForward__)(struct TimedSemaSwObject *, NvU32, void *, NvU32); + NV_STATUS (*__tsemaUnmapFrom__)(struct TimedSemaSwObject *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__tsemaControl_Epilogue__)(struct TimedSemaSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tsemaControlLookup__)(struct TimedSemaSwObject *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__tsemaGetInternalObjectHandle__)(struct TimedSemaSwObject *); + NV_STATUS (*__tsemaControl__)(struct TimedSemaSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tsemaUnmap__)(struct TimedSemaSwObject *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__tsemaGetMemInterMapParams__)(struct TimedSemaSwObject *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__tsemaGetMemoryMappingDescriptor__)(struct TimedSemaSwObject *, struct MEMORY_DESCRIPTOR **); + NvBool (*__tsemaIsSwMethodStalling__)(struct TimedSemaSwObject *, NvU32); + NV_STATUS (*__tsemaControlFilter__)(struct TimedSemaSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tsemaUnregisterEvent__)(struct TimedSemaSwObject *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__tsemaCanCopy__)(struct TimedSemaSwObject *); + void (*__tsemaPreDestruct__)(struct TimedSemaSwObject *); + PEVENTNOTIFICATION *(*__tsemaGetNotificationListPtr__)(struct TimedSemaSwObject *); + struct NotifShare *(*__tsemaGetNotificationShare__)(struct TimedSemaSwObject *); + NV_STATUS (*__tsemaMap__)(struct TimedSemaSwObject *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__tsemaGetOrAllocNotifShare__)(struct TimedSemaSwObject *, NvHandle, NvHandle, struct NotifShare **); + NvU32 NotifierHi; + NvU32 NotifierLo; + NvU32 SemaphoreHi; + NvU32 SemaphoreLo; + NvU32 WaitTimestampHi; + NvU32 WaitTimestampLo; + NvU32 ReleaseValue; + NvU32 Flags; + NvU64 NotifierGPUVA; + NvU64 SemaphoreGPUVA; + NvU64 WaitTimestamp; + NvU64 FlushLimitTimestamp; + GF100_TIMED_SEM_ENTRY_LIST entryList; +}; + +#ifndef __NVOC_CLASS_TimedSemaSwObject_TYPEDEF__ +#define __NVOC_CLASS_TimedSemaSwObject_TYPEDEF__ +typedef struct TimedSemaSwObject TimedSemaSwObject; +#endif /* __NVOC_CLASS_TimedSemaSwObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_TimedSemaSwObject +#define __nvoc_class_id_TimedSemaSwObject 0x335775 +#endif /* __nvoc_class_id_TimedSemaSwObject */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_TimedSemaSwObject; + +#define __staticCast_TimedSemaSwObject(pThis) \ + ((pThis)->__nvoc_pbase_TimedSemaSwObject) + +#ifdef __nvoc_timed_sema_h_disabled +#define __dynamicCast_TimedSemaSwObject(pThis) ((TimedSemaSwObject*)NULL) +#else //__nvoc_timed_sema_h_disabled +#define __dynamicCast_TimedSemaSwObject(pThis) \ + ((TimedSemaSwObject*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(TimedSemaSwObject))) +#endif //__nvoc_timed_sema_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_TimedSemaSwObject(TimedSemaSwObject**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_TimedSemaSwObject(TimedSemaSwObject**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_TimedSemaSwObject(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_TimedSemaSwObject((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define tsemaGetSwMethods(pTimedSemSw, ppMethods, pNumMethods) tsemaGetSwMethods_DISPATCH(pTimedSemSw, ppMethods, pNumMethods) +#define tsemaCtrlCmdFlush(pTimedSemaSwObject, pFlushParams) tsemaCtrlCmdFlush_DISPATCH(pTimedSemaSwObject, pFlushParams) +#define tsemaCtrlCmdGetTime(pTimedSemaSwObject, pGetTimeParams) tsemaCtrlCmdGetTime_DISPATCH(pTimedSemaSwObject, pGetTimeParams) +#define tsemaCtrlCmdRelease(pTimedSemaSwObject, pReleaseParams) tsemaCtrlCmdRelease_DISPATCH(pTimedSemaSwObject, pReleaseParams) +#define tsemaCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) tsemaCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define tsemaShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) tsemaShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define tsemaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) tsemaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define tsemaMapTo(pResource, pParams) tsemaMapTo_DISPATCH(pResource, pParams) +#define tsemaGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) tsemaGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define tsemaSetNotificationShare(pNotifier, pNotifShare) tsemaSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define tsemaGetRefCount(pResource) tsemaGetRefCount_DISPATCH(pResource) +#define tsemaAddAdditionalDependants(pClient, pResource, pReference) tsemaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define tsemaControl_Prologue(pResource, pCallContext, pParams) tsemaControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define tsemaGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) tsemaGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define tsemaInternalControlForward(pGpuResource, command, pParams, size) tsemaInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define tsemaUnmapFrom(pResource, pParams) tsemaUnmapFrom_DISPATCH(pResource, pParams) +#define tsemaControl_Epilogue(pResource, pCallContext, pParams) tsemaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define tsemaControlLookup(pResource, pParams, ppEntry) tsemaControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define tsemaGetInternalObjectHandle(pGpuResource) tsemaGetInternalObjectHandle_DISPATCH(pGpuResource) +#define tsemaControl(pGpuResource, pCallContext, pParams) tsemaControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define tsemaUnmap(pGpuResource, pCallContext, pCpuMapping) tsemaUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define tsemaGetMemInterMapParams(pRmResource, pParams) tsemaGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define tsemaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) tsemaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define tsemaIsSwMethodStalling(pChannelDescendant, hHandle) tsemaIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define tsemaControlFilter(pResource, pCallContext, pParams) tsemaControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define tsemaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) tsemaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define tsemaCanCopy(pResource) tsemaCanCopy_DISPATCH(pResource) +#define tsemaPreDestruct(pResource) tsemaPreDestruct_DISPATCH(pResource) +#define tsemaGetNotificationListPtr(pNotifier) tsemaGetNotificationListPtr_DISPATCH(pNotifier) +#define tsemaGetNotificationShare(pNotifier) tsemaGetNotificationShare_DISPATCH(pNotifier) +#define tsemaMap(pGpuResource, pCallContext, pParams, pCpuMapping) tsemaMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define tsemaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) tsemaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +NV_STATUS tsemaGetSwMethods_IMPL(struct TimedSemaSwObject *pTimedSemSw, METHOD **ppMethods, NvU32 *pNumMethods); + +static inline NV_STATUS tsemaGetSwMethods_DISPATCH(struct TimedSemaSwObject *pTimedSemSw, METHOD **ppMethods, NvU32 *pNumMethods) { + return pTimedSemSw->__tsemaGetSwMethods__(pTimedSemSw, ppMethods, pNumMethods); +} + +NV_STATUS tsemaCtrlCmdFlush_IMPL(struct TimedSemaSwObject *pTimedSemaSwObject, NV9074_CTRL_CMD_FLUSH_PARAMS *pFlushParams); + +static inline NV_STATUS tsemaCtrlCmdFlush_DISPATCH(struct TimedSemaSwObject *pTimedSemaSwObject, NV9074_CTRL_CMD_FLUSH_PARAMS *pFlushParams) { + return pTimedSemaSwObject->__tsemaCtrlCmdFlush__(pTimedSemaSwObject, pFlushParams); +} + +NV_STATUS tsemaCtrlCmdGetTime_IMPL(struct TimedSemaSwObject *pTimedSemaSwObject, NV9074_CTRL_CMD_GET_TIME_PARAMS *pGetTimeParams); + +static inline NV_STATUS tsemaCtrlCmdGetTime_DISPATCH(struct TimedSemaSwObject *pTimedSemaSwObject, NV9074_CTRL_CMD_GET_TIME_PARAMS *pGetTimeParams) { + return pTimedSemaSwObject->__tsemaCtrlCmdGetTime__(pTimedSemaSwObject, pGetTimeParams); +} + +NV_STATUS tsemaCtrlCmdRelease_IMPL(struct TimedSemaSwObject *pTimedSemaSwObject, NV9074_CTRL_CMD_RELEASE_PARAMS *pReleaseParams); + +static inline NV_STATUS tsemaCtrlCmdRelease_DISPATCH(struct TimedSemaSwObject *pTimedSemaSwObject, NV9074_CTRL_CMD_RELEASE_PARAMS *pReleaseParams) { + return pTimedSemaSwObject->__tsemaCtrlCmdRelease__(pTimedSemaSwObject, pReleaseParams); +} + +static inline NV_STATUS tsemaCheckMemInterUnmap_DISPATCH(struct TimedSemaSwObject *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__tsemaCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool tsemaShareCallback_DISPATCH(struct TimedSemaSwObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__tsemaShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool tsemaAccessCallback_DISPATCH(struct TimedSemaSwObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__tsemaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS tsemaMapTo_DISPATCH(struct TimedSemaSwObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__tsemaMapTo__(pResource, pParams); +} + +static inline NV_STATUS tsemaGetMapAddrSpace_DISPATCH(struct TimedSemaSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__tsemaGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void tsemaSetNotificationShare_DISPATCH(struct TimedSemaSwObject *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__tsemaSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 tsemaGetRefCount_DISPATCH(struct TimedSemaSwObject *pResource) { + return pResource->__tsemaGetRefCount__(pResource); +} + +static inline void tsemaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct TimedSemaSwObject *pResource, RsResourceRef *pReference) { + pResource->__tsemaAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS tsemaControl_Prologue_DISPATCH(struct TimedSemaSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__tsemaControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS tsemaGetRegBaseOffsetAndSize_DISPATCH(struct TimedSemaSwObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__tsemaGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS tsemaInternalControlForward_DISPATCH(struct TimedSemaSwObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__tsemaInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS tsemaUnmapFrom_DISPATCH(struct TimedSemaSwObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__tsemaUnmapFrom__(pResource, pParams); +} + +static inline void tsemaControl_Epilogue_DISPATCH(struct TimedSemaSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__tsemaControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS tsemaControlLookup_DISPATCH(struct TimedSemaSwObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__tsemaControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle tsemaGetInternalObjectHandle_DISPATCH(struct TimedSemaSwObject *pGpuResource) { + return pGpuResource->__tsemaGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS tsemaControl_DISPATCH(struct TimedSemaSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__tsemaControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS tsemaUnmap_DISPATCH(struct TimedSemaSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__tsemaUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS tsemaGetMemInterMapParams_DISPATCH(struct TimedSemaSwObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__tsemaGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS tsemaGetMemoryMappingDescriptor_DISPATCH(struct TimedSemaSwObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__tsemaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvBool tsemaIsSwMethodStalling_DISPATCH(struct TimedSemaSwObject *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__tsemaIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +static inline NV_STATUS tsemaControlFilter_DISPATCH(struct TimedSemaSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__tsemaControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS tsemaUnregisterEvent_DISPATCH(struct TimedSemaSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__tsemaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool tsemaCanCopy_DISPATCH(struct TimedSemaSwObject *pResource) { + return pResource->__tsemaCanCopy__(pResource); +} + +static inline void tsemaPreDestruct_DISPATCH(struct TimedSemaSwObject *pResource) { + pResource->__tsemaPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *tsemaGetNotificationListPtr_DISPATCH(struct TimedSemaSwObject *pNotifier) { + return pNotifier->__tsemaGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *tsemaGetNotificationShare_DISPATCH(struct TimedSemaSwObject *pNotifier) { + return pNotifier->__tsemaGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS tsemaMap_DISPATCH(struct TimedSemaSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__tsemaMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS tsemaGetOrAllocNotifShare_DISPATCH(struct TimedSemaSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__tsemaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS tsemaConstruct_IMPL(struct TimedSemaSwObject *arg_pTimedSemSw, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_tsemaConstruct(arg_pTimedSemSw, arg_pCallContext, arg_pParams) tsemaConstruct_IMPL(arg_pTimedSemSw, arg_pCallContext, arg_pParams) +void tsemaDestruct_IMPL(struct TimedSemaSwObject *pTimedSemSw); +#define __nvoc_tsemaDestruct(pTimedSemSw) tsemaDestruct_IMPL(pTimedSemSw) +#undef PRIVATE_FIELD + + +// RS-TODO: Delete. Keeping old typedef for transition. +typedef struct TimedSemaSwObject *PGF100_TIMED_SEM_SW_OBJECT; + +#endif // _TIMED_SEMA_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_TIMED_SEMA_NVOC_H_ diff --git a/src/nvidia/generated/g_tmr_nvoc.c b/src/nvidia/generated/g_tmr_nvoc.c new file mode 100644 index 000000000..e6f048f2f --- /dev/null +++ b/src/nvidia/generated/g_tmr_nvoc.c @@ -0,0 +1,417 @@ +#define NVOC_TMR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_tmr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xb13ac4 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_TimerApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +void __nvoc_init_TimerApi(TimerApi*); +void __nvoc_init_funcTable_TimerApi(TimerApi*); +NV_STATUS __nvoc_ctor_TimerApi(TimerApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_TimerApi(TimerApi*); +void __nvoc_dtor_TimerApi(TimerApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_TimerApi; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_TimerApi = { + /*pClassDef=*/ &__nvoc_class_def_TimerApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_TimerApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_TimerApi_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(TimerApi, __nvoc_base_Notifier), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_TimerApi = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_TimerApi_TimerApi, + &__nvoc_rtti_TimerApi_Notifier, + &__nvoc_rtti_TimerApi_INotifier, + &__nvoc_rtti_TimerApi_GpuResource, + &__nvoc_rtti_TimerApi_RmResource, + &__nvoc_rtti_TimerApi_RmResourceCommon, + &__nvoc_rtti_TimerApi_RsResource, + &__nvoc_rtti_TimerApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_TimerApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(TimerApi), + /*classId=*/ classId(TimerApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "TimerApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_TimerApi, + /*pCastInfo=*/ &__nvoc_castinfo_TimerApi, + /*pExportInfo=*/ &__nvoc_export_info_TimerApi +}; + +static NV_STATUS __nvoc_thunk_TimerApi_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pTimerApi, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return tmrapiGetRegBaseOffsetAndSize((struct TimerApi *)(((unsigned char *)pTimerApi) - __nvoc_rtti_TimerApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_GpuResource_tmrapiShareCallback(struct TimerApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_tmrapiMapTo(struct TimerApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_tmrapiGetOrAllocNotifShare(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimerApi_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +static NV_STATUS __nvoc_thunk_RmResource_tmrapiCheckMemInterUnmap(struct TimerApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_TimerApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tmrapiGetMapAddrSpace(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_tmrapiSetNotificationShare(struct TimerApi *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimerApi_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_tmrapiGetRefCount(struct TimerApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_tmrapiAddAdditionalDependants(struct RsClient *pClient, struct TimerApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_tmrapiControl_Prologue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tmrapiInternalControlForward(struct TimerApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_tmrapiUnmapFrom(struct TimerApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_tmrapiControl_Epilogue(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_tmrapiControlLookup(struct TimerApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_tmrapiGetInternalObjectHandle(struct TimerApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tmrapiControl(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tmrapiUnmap(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_tmrapiGetMemInterMapParams(struct TimerApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_TimerApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_tmrapiGetMemoryMappingDescriptor(struct TimerApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_TimerApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_tmrapiControlFilter(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_tmrapiUnregisterEvent(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimerApi_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_tmrapiCanCopy(struct TimerApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_tmrapiPreDestruct(struct TimerApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_tmrapiGetNotificationListPtr(struct TimerApi *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimerApi_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_tmrapiGetNotificationShare(struct TimerApi *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_TimerApi_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_tmrapiMap(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_TimerApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_tmrapiAccessCallback(struct TimerApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_TimerApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_TimerApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) tmrapiCtrlCmdTmrSetAlarmNotify_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + /*flags=*/ 0x10u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x40110u, + /*paramSize=*/ sizeof(NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_TimerApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "tmrapiCtrlCmdTmrSetAlarmNotify" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_TimerApi = +{ + /*numEntries=*/ 1, + /*pExportEntries=*/ __nvoc_exported_method_def_TimerApi +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_Notifier(Notifier*); +void __nvoc_dtor_TimerApi(TimerApi *pThis) { + __nvoc_tmrapiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_TimerApi(TimerApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *); +NV_STATUS __nvoc_ctor_TimerApi(TimerApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_TimerApi_fail_GpuResource; + status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext); + if (status != NV_OK) goto __nvoc_ctor_TimerApi_fail_Notifier; + __nvoc_init_dataField_TimerApi(pThis); + + status = __nvoc_tmrapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_TimerApi_fail__init; + goto __nvoc_ctor_TimerApi_exit; // Success + +__nvoc_ctor_TimerApi_fail__init: + __nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier); +__nvoc_ctor_TimerApi_fail_Notifier: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_TimerApi_fail_GpuResource: +__nvoc_ctor_TimerApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_TimerApi_1(TimerApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__tmrapiGetRegBaseOffsetAndSize__ = &tmrapiGetRegBaseOffsetAndSize_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u) + pThis->__tmrapiCtrlCmdTmrSetAlarmNotify__ = &tmrapiCtrlCmdTmrSetAlarmNotify_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_TimerApi_gpuresGetRegBaseOffsetAndSize; + + pThis->__tmrapiShareCallback__ = &__nvoc_thunk_GpuResource_tmrapiShareCallback; + + pThis->__tmrapiMapTo__ = &__nvoc_thunk_RsResource_tmrapiMapTo; + + pThis->__tmrapiGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_tmrapiGetOrAllocNotifShare; + + pThis->__tmrapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_tmrapiCheckMemInterUnmap; + + pThis->__tmrapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_tmrapiGetMapAddrSpace; + + pThis->__tmrapiSetNotificationShare__ = &__nvoc_thunk_Notifier_tmrapiSetNotificationShare; + + pThis->__tmrapiGetRefCount__ = &__nvoc_thunk_RsResource_tmrapiGetRefCount; + + pThis->__tmrapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_tmrapiAddAdditionalDependants; + + pThis->__tmrapiControl_Prologue__ = &__nvoc_thunk_RmResource_tmrapiControl_Prologue; + + pThis->__tmrapiInternalControlForward__ = &__nvoc_thunk_GpuResource_tmrapiInternalControlForward; + + pThis->__tmrapiUnmapFrom__ = &__nvoc_thunk_RsResource_tmrapiUnmapFrom; + + pThis->__tmrapiControl_Epilogue__ = &__nvoc_thunk_RmResource_tmrapiControl_Epilogue; + + pThis->__tmrapiControlLookup__ = &__nvoc_thunk_RsResource_tmrapiControlLookup; + + pThis->__tmrapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_tmrapiGetInternalObjectHandle; + + pThis->__tmrapiControl__ = &__nvoc_thunk_GpuResource_tmrapiControl; + + pThis->__tmrapiUnmap__ = &__nvoc_thunk_GpuResource_tmrapiUnmap; + + pThis->__tmrapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_tmrapiGetMemInterMapParams; + + pThis->__tmrapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_tmrapiGetMemoryMappingDescriptor; + + pThis->__tmrapiControlFilter__ = &__nvoc_thunk_RsResource_tmrapiControlFilter; + + pThis->__tmrapiUnregisterEvent__ = &__nvoc_thunk_Notifier_tmrapiUnregisterEvent; + + pThis->__tmrapiCanCopy__ = &__nvoc_thunk_RsResource_tmrapiCanCopy; + + pThis->__tmrapiPreDestruct__ = &__nvoc_thunk_RsResource_tmrapiPreDestruct; + + pThis->__tmrapiGetNotificationListPtr__ = &__nvoc_thunk_Notifier_tmrapiGetNotificationListPtr; + + pThis->__tmrapiGetNotificationShare__ = &__nvoc_thunk_Notifier_tmrapiGetNotificationShare; + + pThis->__tmrapiMap__ = &__nvoc_thunk_GpuResource_tmrapiMap; + + pThis->__tmrapiAccessCallback__ = &__nvoc_thunk_RmResource_tmrapiAccessCallback; +} + +void __nvoc_init_funcTable_TimerApi(TimerApi *pThis) { + __nvoc_init_funcTable_TimerApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_Notifier(Notifier*); +void __nvoc_init_TimerApi(TimerApi *pThis) { + pThis->__nvoc_pbase_TimerApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_Notifier(&pThis->__nvoc_base_Notifier); + __nvoc_init_funcTable_TimerApi(pThis); +} + +NV_STATUS __nvoc_objCreate_TimerApi(TimerApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + TimerApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(TimerApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(TimerApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_TimerApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_TimerApi(pThis); + status = __nvoc_ctor_TimerApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_TimerApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_TimerApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_TimerApi(TimerApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_TimerApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_tmr_nvoc.h b/src/nvidia/generated/g_tmr_nvoc.h new file mode 100644 index 000000000..21be57f28 --- /dev/null +++ b/src/nvidia/generated/g_tmr_nvoc.h @@ -0,0 +1,332 @@ +#ifndef _G_TMR_NVOC_H_ +#define _G_TMR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_tmr_nvoc.h" + +#ifndef _TMR_H_ +#define _TMR_H_ + +/****************************** Timer Module *******************************\ +* * +* Module: TMR.H * +* Timer functions. * +* * +****************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu_resource.h" +#include "rmapi/event.h" + +#include "ctrl/ctrl0004.h" + +typedef struct OBJTMR *POBJTMR; + +#ifndef __NVOC_CLASS_OBJTMR_TYPEDEF__ +#define __NVOC_CLASS_OBJTMR_TYPEDEF__ +typedef struct OBJTMR OBJTMR; +#endif /* __NVOC_CLASS_OBJTMR_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJTMR +#define __nvoc_class_id_OBJTMR 0x9ddede +#endif /* __nvoc_class_id_OBJTMR */ + + + +//--------------------------------------------------------------------------- +// +// Time objects. +// +//--------------------------------------------------------------------------- + +#define TIMER_STATE_IDLE 0 +#define TIMER_STATE_BUSY 1 + +// Opaque callback memory type for interfacing the scheduling API +typedef struct TMR_EVENT *PTMR_EVENT; +typedef struct TMR_EVENT TMR_EVENT; + +typedef NV_STATUS (*TIMEPROC)(OBJGPU *, struct OBJTMR *, PTMR_EVENT); +typedef NV_STATUS (*TIMEPROC_OBSOLETE)(OBJGPU *, struct OBJTMR *, void *); +typedef NV_STATUS (*TIMEPROC_COUNTDOWN)(OBJGPU *, THREAD_STATE_NODE *); + +/*! + * RM internal class representing NV01_TIMER (child of SubDevice) + */ +#ifdef NVOC_TMR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct TimerApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Notifier __nvoc_base_Notifier; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct TimerApi *__nvoc_pbase_TimerApi; + NV_STATUS (*__tmrapiGetRegBaseOffsetAndSize__)(struct TimerApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__tmrapiCtrlCmdTmrSetAlarmNotify__)(struct TimerApi *, NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *); + NvBool (*__tmrapiShareCallback__)(struct TimerApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__tmrapiMapTo__)(struct TimerApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__tmrapiGetOrAllocNotifShare__)(struct TimerApi *, NvHandle, NvHandle, struct NotifShare **); + NV_STATUS (*__tmrapiCheckMemInterUnmap__)(struct TimerApi *, NvBool); + NV_STATUS (*__tmrapiGetMapAddrSpace__)(struct TimerApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__tmrapiSetNotificationShare__)(struct TimerApi *, struct NotifShare *); + NvU32 (*__tmrapiGetRefCount__)(struct TimerApi *); + void (*__tmrapiAddAdditionalDependants__)(struct RsClient *, struct TimerApi *, RsResourceRef *); + NV_STATUS (*__tmrapiControl_Prologue__)(struct TimerApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tmrapiInternalControlForward__)(struct TimerApi *, NvU32, void *, NvU32); + NV_STATUS (*__tmrapiUnmapFrom__)(struct TimerApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__tmrapiControl_Epilogue__)(struct TimerApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tmrapiControlLookup__)(struct TimerApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__tmrapiGetInternalObjectHandle__)(struct TimerApi *); + NV_STATUS (*__tmrapiControl__)(struct TimerApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tmrapiUnmap__)(struct TimerApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__tmrapiGetMemInterMapParams__)(struct TimerApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__tmrapiGetMemoryMappingDescriptor__)(struct TimerApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__tmrapiControlFilter__)(struct TimerApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__tmrapiUnregisterEvent__)(struct TimerApi *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__tmrapiCanCopy__)(struct TimerApi *); + void (*__tmrapiPreDestruct__)(struct TimerApi *); + PEVENTNOTIFICATION *(*__tmrapiGetNotificationListPtr__)(struct TimerApi *); + struct NotifShare *(*__tmrapiGetNotificationShare__)(struct TimerApi *); + NV_STATUS (*__tmrapiMap__)(struct TimerApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__tmrapiAccessCallback__)(struct TimerApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_TimerApi_TYPEDEF__ +#define __NVOC_CLASS_TimerApi_TYPEDEF__ +typedef struct TimerApi TimerApi; +#endif /* __NVOC_CLASS_TimerApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_TimerApi +#define __nvoc_class_id_TimerApi 0xb13ac4 +#endif /* __nvoc_class_id_TimerApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_TimerApi; + +#define __staticCast_TimerApi(pThis) \ + ((pThis)->__nvoc_pbase_TimerApi) + +#ifdef __nvoc_tmr_h_disabled +#define __dynamicCast_TimerApi(pThis) ((TimerApi*)NULL) +#else //__nvoc_tmr_h_disabled +#define __dynamicCast_TimerApi(pThis) \ + ((TimerApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(TimerApi))) +#endif //__nvoc_tmr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_TimerApi(TimerApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_TimerApi(TimerApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_TimerApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_TimerApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define tmrapiGetRegBaseOffsetAndSize(pTimerApi, pGpu, pOffset, pSize) tmrapiGetRegBaseOffsetAndSize_DISPATCH(pTimerApi, pGpu, pOffset, pSize) +#define tmrapiCtrlCmdTmrSetAlarmNotify(pTimerApi, pParams) tmrapiCtrlCmdTmrSetAlarmNotify_DISPATCH(pTimerApi, pParams) +#define tmrapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) tmrapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define tmrapiMapTo(pResource, pParams) tmrapiMapTo_DISPATCH(pResource, pParams) +#define tmrapiGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) tmrapiGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +#define tmrapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) tmrapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define tmrapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) tmrapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define tmrapiSetNotificationShare(pNotifier, pNotifShare) tmrapiSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define tmrapiGetRefCount(pResource) tmrapiGetRefCount_DISPATCH(pResource) +#define tmrapiAddAdditionalDependants(pClient, pResource, pReference) tmrapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define tmrapiControl_Prologue(pResource, pCallContext, pParams) tmrapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiInternalControlForward(pGpuResource, command, pParams, size) tmrapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define tmrapiUnmapFrom(pResource, pParams) tmrapiUnmapFrom_DISPATCH(pResource, pParams) +#define tmrapiControl_Epilogue(pResource, pCallContext, pParams) tmrapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiControlLookup(pResource, pParams, ppEntry) tmrapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define tmrapiGetInternalObjectHandle(pGpuResource) tmrapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define tmrapiControl(pGpuResource, pCallContext, pParams) tmrapiControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define tmrapiUnmap(pGpuResource, pCallContext, pCpuMapping) tmrapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define tmrapiGetMemInterMapParams(pRmResource, pParams) tmrapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define tmrapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) tmrapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define tmrapiControlFilter(pResource, pCallContext, pParams) tmrapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define tmrapiUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) tmrapiUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define tmrapiCanCopy(pResource) tmrapiCanCopy_DISPATCH(pResource) +#define tmrapiPreDestruct(pResource) tmrapiPreDestruct_DISPATCH(pResource) +#define tmrapiGetNotificationListPtr(pNotifier) tmrapiGetNotificationListPtr_DISPATCH(pNotifier) +#define tmrapiGetNotificationShare(pNotifier) tmrapiGetNotificationShare_DISPATCH(pNotifier) +#define tmrapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) tmrapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define tmrapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) tmrapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS tmrapiGetRegBaseOffsetAndSize_IMPL(struct TimerApi *pTimerApi, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize); + +static inline NV_STATUS tmrapiGetRegBaseOffsetAndSize_DISPATCH(struct TimerApi *pTimerApi, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pTimerApi->__tmrapiGetRegBaseOffsetAndSize__(pTimerApi, pGpu, pOffset, pSize); +} + +NV_STATUS tmrapiCtrlCmdTmrSetAlarmNotify_IMPL(struct TimerApi *pTimerApi, NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *pParams); + +static inline NV_STATUS tmrapiCtrlCmdTmrSetAlarmNotify_DISPATCH(struct TimerApi *pTimerApi, NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *pParams) { + return pTimerApi->__tmrapiCtrlCmdTmrSetAlarmNotify__(pTimerApi, pParams); +} + +static inline NvBool tmrapiShareCallback_DISPATCH(struct TimerApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__tmrapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS tmrapiMapTo_DISPATCH(struct TimerApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__tmrapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS tmrapiGetOrAllocNotifShare_DISPATCH(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__tmrapiGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +static inline NV_STATUS tmrapiCheckMemInterUnmap_DISPATCH(struct TimerApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__tmrapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS tmrapiGetMapAddrSpace_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__tmrapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void tmrapiSetNotificationShare_DISPATCH(struct TimerApi *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__tmrapiSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 tmrapiGetRefCount_DISPATCH(struct TimerApi *pResource) { + return pResource->__tmrapiGetRefCount__(pResource); +} + +static inline void tmrapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct TimerApi *pResource, RsResourceRef *pReference) { + pResource->__tmrapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS tmrapiControl_Prologue_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__tmrapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS tmrapiInternalControlForward_DISPATCH(struct TimerApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__tmrapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS tmrapiUnmapFrom_DISPATCH(struct TimerApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__tmrapiUnmapFrom__(pResource, pParams); +} + +static inline void tmrapiControl_Epilogue_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__tmrapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS tmrapiControlLookup_DISPATCH(struct TimerApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__tmrapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle tmrapiGetInternalObjectHandle_DISPATCH(struct TimerApi *pGpuResource) { + return pGpuResource->__tmrapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS tmrapiControl_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__tmrapiControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS tmrapiUnmap_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__tmrapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS tmrapiGetMemInterMapParams_DISPATCH(struct TimerApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__tmrapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS tmrapiGetMemoryMappingDescriptor_DISPATCH(struct TimerApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__tmrapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS tmrapiControlFilter_DISPATCH(struct TimerApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__tmrapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS tmrapiUnregisterEvent_DISPATCH(struct TimerApi *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__tmrapiUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool tmrapiCanCopy_DISPATCH(struct TimerApi *pResource) { + return pResource->__tmrapiCanCopy__(pResource); +} + +static inline void tmrapiPreDestruct_DISPATCH(struct TimerApi *pResource) { + pResource->__tmrapiPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *tmrapiGetNotificationListPtr_DISPATCH(struct TimerApi *pNotifier) { + return pNotifier->__tmrapiGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *tmrapiGetNotificationShare_DISPATCH(struct TimerApi *pNotifier) { + return pNotifier->__tmrapiGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS tmrapiMap_DISPATCH(struct TimerApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__tmrapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool tmrapiAccessCallback_DISPATCH(struct TimerApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__tmrapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS tmrapiConstruct_IMPL(struct TimerApi *arg_pTimerApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_tmrapiConstruct(arg_pTimerApi, arg_pCallContext, arg_pParams) tmrapiConstruct_IMPL(arg_pTimerApi, arg_pCallContext, arg_pParams) +void tmrapiDestruct_IMPL(struct TimerApi *pTimerApi); +#define __nvoc_tmrapiDestruct(pTimerApi) tmrapiDestruct_IMPL(pTimerApi) +void tmrapiDeregisterEvents_IMPL(struct TimerApi *pTimerApi); +#ifdef __nvoc_tmr_h_disabled +static inline void tmrapiDeregisterEvents(struct TimerApi *pTimerApi) { + NV_ASSERT_FAILED_PRECOMP("TimerApi was disabled!"); +} +#else //__nvoc_tmr_h_disabled +#define tmrapiDeregisterEvents(pTimerApi) tmrapiDeregisterEvents_IMPL(pTimerApi) +#endif //__nvoc_tmr_h_disabled + +#undef PRIVATE_FIELD + + + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- + + +#endif // _TMR_H_ + + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_TMR_NVOC_H_ diff --git a/src/nvidia/generated/g_traceable_nvoc.c b/src/nvidia/generated/g_traceable_nvoc.c new file mode 100644 index 000000000..f00b9ffb1 --- /dev/null +++ b/src/nvidia/generated/g_traceable_nvoc.c @@ -0,0 +1,87 @@ +#define NVOC_TRACEABLE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_traceable_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x6305d2 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_funcTable_OBJTRACEABLE(OBJTRACEABLE*); +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_init_dataField_OBJTRACEABLE(OBJTRACEABLE*); +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJTRACEABLE; + +static const struct NVOC_RTTI __nvoc_rtti_OBJTRACEABLE_OBJTRACEABLE = { + /*pClassDef=*/ &__nvoc_class_def_OBJTRACEABLE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJTRACEABLE, + /*offset=*/ 0, +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJTRACEABLE = { + /*numRelatives=*/ 1, + /*relatives=*/ { + &__nvoc_rtti_OBJTRACEABLE_OBJTRACEABLE, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJTRACEABLE), + /*classId=*/ classId(OBJTRACEABLE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJTRACEABLE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_OBJTRACEABLE, + /*pExportInfo=*/ &__nvoc_export_info_OBJTRACEABLE +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJTRACEABLE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJTRACEABLE(OBJTRACEABLE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE *pThis) { + NV_STATUS status = NV_OK; + __nvoc_init_dataField_OBJTRACEABLE(pThis); + goto __nvoc_ctor_OBJTRACEABLE_exit; // Success + +__nvoc_ctor_OBJTRACEABLE_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJTRACEABLE_1(OBJTRACEABLE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJTRACEABLE(OBJTRACEABLE *pThis) { + __nvoc_init_funcTable_OBJTRACEABLE_1(pThis); +} + +void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE *pThis) { + pThis->__nvoc_pbase_OBJTRACEABLE = pThis; + __nvoc_init_funcTable_OBJTRACEABLE(pThis); +} + diff --git a/src/nvidia/generated/g_traceable_nvoc.h b/src/nvidia/generated/g_traceable_nvoc.h new file mode 100644 index 000000000..0e59e39e1 --- /dev/null +++ b/src/nvidia/generated/g_traceable_nvoc.h @@ -0,0 +1,87 @@ +#ifndef _G_TRACEABLE_NVOC_H_ +#define _G_TRACEABLE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_traceable_nvoc.h" + +#ifndef __ANCI_TRACEABLE_H__ +#define __ANCI_TRACEABLE_H__ + +#include "core/core.h" + +#ifdef NVOC_TRACEABLE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJTRACEABLE { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJTRACEABLE *__nvoc_pbase_OBJTRACEABLE; +}; + +#ifndef __NVOC_CLASS_OBJTRACEABLE_TYPEDEF__ +#define __NVOC_CLASS_OBJTRACEABLE_TYPEDEF__ +typedef struct OBJTRACEABLE OBJTRACEABLE; +#endif /* __NVOC_CLASS_OBJTRACEABLE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJTRACEABLE +#define __nvoc_class_id_OBJTRACEABLE 0x6305d2 +#endif /* __nvoc_class_id_OBJTRACEABLE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE; + +#define __staticCast_OBJTRACEABLE(pThis) \ + ((pThis)->__nvoc_pbase_OBJTRACEABLE) + +#ifdef __nvoc_traceable_h_disabled +#define __dynamicCast_OBJTRACEABLE(pThis) ((OBJTRACEABLE*)NULL) +#else //__nvoc_traceable_h_disabled +#define __dynamicCast_OBJTRACEABLE(pThis) \ + ((OBJTRACEABLE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJTRACEABLE))) +#endif //__nvoc_traceable_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJTRACEABLE(OBJTRACEABLE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJTRACEABLE(OBJTRACEABLE**, Dynamic*, NvU32); +#define __objCreate_OBJTRACEABLE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJTRACEABLE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#undef PRIVATE_FIELD + + +void objTraverseCaptureState_IMPL(struct Object *pObj); +#define objTraverseCaptureState(p) objTraverseCaptureState_IMPL(staticCast((p), Object)) + +#endif // __ANCI_TRACEABLE_H__ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_TRACEABLE_NVOC_H_ diff --git a/src/nvidia/generated/g_usermode_api_nvoc.c b/src/nvidia/generated/g_usermode_api_nvoc.c new file mode 100644 index 000000000..1692c5871 --- /dev/null +++ b/src/nvidia/generated/g_usermode_api_nvoc.c @@ -0,0 +1,338 @@ +#define NVOC_USERMODE_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_usermode_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x6f57ec = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserModeApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +void __nvoc_init_UserModeApi(UserModeApi*, RmHalspecOwner* ); +void __nvoc_init_funcTable_UserModeApi(UserModeApi*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_UserModeApi(UserModeApi*, RmHalspecOwner* , CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_UserModeApi(UserModeApi*, RmHalspecOwner* ); +void __nvoc_dtor_UserModeApi(UserModeApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_UserModeApi; + +static const struct NVOC_RTTI __nvoc_rtti_UserModeApi_UserModeApi = { + /*pClassDef=*/ &__nvoc_class_def_UserModeApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_UserModeApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_UserModeApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UserModeApi, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UserModeApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UserModeApi, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UserModeApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UserModeApi, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UserModeApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UserModeApi, __nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UserModeApi_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UserModeApi, __nvoc_base_Memory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_UserModeApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_UserModeApi_UserModeApi, + &__nvoc_rtti_UserModeApi_Memory, + &__nvoc_rtti_UserModeApi_RmResource, + &__nvoc_rtti_UserModeApi_RmResourceCommon, + &__nvoc_rtti_UserModeApi_RsResource, + &__nvoc_rtti_UserModeApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_UserModeApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(UserModeApi), + /*classId=*/ classId(UserModeApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "UserModeApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_UserModeApi, + /*pCastInfo=*/ &__nvoc_castinfo_UserModeApi, + /*pExportInfo=*/ &__nvoc_export_info_UserModeApi +}; + +static NvBool __nvoc_thunk_UserModeApi_resCanCopy(struct RsResource *pUserModeApi) { + return usrmodeCanCopy((struct UserModeApi *)(((unsigned char *)pUserModeApi) - __nvoc_rtti_UserModeApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_usrmodeCheckMemInterUnmap(struct UserModeApi *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_UserModeApi_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_usrmodeControl(struct UserModeApi *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_UserModeApi_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_usrmodeUnmap(struct UserModeApi *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_UserModeApi_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_usrmodeGetMemInterMapParams(struct UserModeApi *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_UserModeApi_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_usrmodeGetMemoryMappingDescriptor(struct UserModeApi *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_UserModeApi_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_usrmodeGetMapAddrSpace(struct UserModeApi *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_UserModeApi_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_usrmodeShareCallback(struct UserModeApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_usrmodeControlFilter(struct UserModeApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_usrmodeAddAdditionalDependants(struct RsClient *pClient, struct UserModeApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_usrmodeGetRefCount(struct UserModeApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_usrmodeMapTo(struct UserModeApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_usrmodeControl_Prologue(struct UserModeApi *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_usrmodeIsReady(struct UserModeApi *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_UserModeApi_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_usrmodeCheckCopyPermissions(struct UserModeApi *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_UserModeApi_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_usrmodePreDestruct(struct UserModeApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_usrmodeUnmapFrom(struct UserModeApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_usrmodeControl_Epilogue(struct UserModeApi *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_usrmodeControlLookup(struct UserModeApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_usrmodeMap(struct UserModeApi *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_UserModeApi_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_usrmodeAccessCallback(struct UserModeApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_UserModeApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_UserModeApi = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Memory(Memory*); +void __nvoc_dtor_UserModeApi(UserModeApi *pThis) { + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_UserModeApi(UserModeApi *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_UserModeApi(UserModeApi *pThis, RmHalspecOwner *pRmhalspecowner, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_UserModeApi_fail_Memory; + __nvoc_init_dataField_UserModeApi(pThis, pRmhalspecowner); + + status = __nvoc_usrmodeConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_UserModeApi_fail__init; + goto __nvoc_ctor_UserModeApi_exit; // Success + +__nvoc_ctor_UserModeApi_fail__init: + __nvoc_dtor_Memory(&pThis->__nvoc_base_Memory); +__nvoc_ctor_UserModeApi_fail_Memory: +__nvoc_ctor_UserModeApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_UserModeApi_1(UserModeApi *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__usrmodeCanCopy__ = &usrmodeCanCopy_IMPL; + + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_UserModeApi_resCanCopy; + + pThis->__usrmodeCheckMemInterUnmap__ = &__nvoc_thunk_Memory_usrmodeCheckMemInterUnmap; + + pThis->__usrmodeControl__ = &__nvoc_thunk_Memory_usrmodeControl; + + pThis->__usrmodeUnmap__ = &__nvoc_thunk_Memory_usrmodeUnmap; + + pThis->__usrmodeGetMemInterMapParams__ = &__nvoc_thunk_Memory_usrmodeGetMemInterMapParams; + + pThis->__usrmodeGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_usrmodeGetMemoryMappingDescriptor; + + pThis->__usrmodeGetMapAddrSpace__ = &__nvoc_thunk_Memory_usrmodeGetMapAddrSpace; + + pThis->__usrmodeShareCallback__ = &__nvoc_thunk_RmResource_usrmodeShareCallback; + + pThis->__usrmodeControlFilter__ = &__nvoc_thunk_RsResource_usrmodeControlFilter; + + pThis->__usrmodeAddAdditionalDependants__ = &__nvoc_thunk_RsResource_usrmodeAddAdditionalDependants; + + pThis->__usrmodeGetRefCount__ = &__nvoc_thunk_RsResource_usrmodeGetRefCount; + + pThis->__usrmodeMapTo__ = &__nvoc_thunk_RsResource_usrmodeMapTo; + + pThis->__usrmodeControl_Prologue__ = &__nvoc_thunk_RmResource_usrmodeControl_Prologue; + + pThis->__usrmodeIsReady__ = &__nvoc_thunk_Memory_usrmodeIsReady; + + pThis->__usrmodeCheckCopyPermissions__ = &__nvoc_thunk_Memory_usrmodeCheckCopyPermissions; + + pThis->__usrmodePreDestruct__ = &__nvoc_thunk_RsResource_usrmodePreDestruct; + + pThis->__usrmodeUnmapFrom__ = &__nvoc_thunk_RsResource_usrmodeUnmapFrom; + + pThis->__usrmodeControl_Epilogue__ = &__nvoc_thunk_RmResource_usrmodeControl_Epilogue; + + pThis->__usrmodeControlLookup__ = &__nvoc_thunk_RsResource_usrmodeControlLookup; + + pThis->__usrmodeMap__ = &__nvoc_thunk_Memory_usrmodeMap; + + pThis->__usrmodeAccessCallback__ = &__nvoc_thunk_RmResource_usrmodeAccessCallback; +} + +void __nvoc_init_funcTable_UserModeApi(UserModeApi *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_UserModeApi_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_Memory(Memory*); +void __nvoc_init_UserModeApi(UserModeApi *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_UserModeApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory; + __nvoc_init_Memory(&pThis->__nvoc_base_Memory); + __nvoc_init_funcTable_UserModeApi(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_UserModeApi(UserModeApi **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + UserModeApi *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(UserModeApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(UserModeApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_UserModeApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_UserModeApi(pThis, pRmhalspecowner); + status = __nvoc_ctor_UserModeApi(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_UserModeApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_UserModeApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_UserModeApi(UserModeApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_UserModeApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_usermode_api_nvoc.h b/src/nvidia/generated/g_usermode_api_nvoc.h new file mode 100644 index 000000000..cda3262e4 --- /dev/null +++ b/src/nvidia/generated/g_usermode_api_nvoc.h @@ -0,0 +1,237 @@ +#ifndef _G_USERMODE_API_NVOC_H_ +#define _G_USERMODE_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_usermode_api_nvoc.h" + +#ifndef _USERMODE_API_H_ +#define _USERMODE_API_H_ + +#include "core/core.h" +#include "mem_mgr/mem.h" +#include "gpu/gpu.h" +#include "nvoc/utility.h" + +#ifdef NVOC_USERMODE_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct UserModeApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct Memory __nvoc_base_Memory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct UserModeApi *__nvoc_pbase_UserModeApi; + NvBool (*__usrmodeCanCopy__)(struct UserModeApi *); + NV_STATUS (*__usrmodeCheckMemInterUnmap__)(struct UserModeApi *, NvBool); + NV_STATUS (*__usrmodeControl__)(struct UserModeApi *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__usrmodeUnmap__)(struct UserModeApi *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__usrmodeGetMemInterMapParams__)(struct UserModeApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__usrmodeGetMemoryMappingDescriptor__)(struct UserModeApi *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__usrmodeGetMapAddrSpace__)(struct UserModeApi *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__usrmodeShareCallback__)(struct UserModeApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__usrmodeControlFilter__)(struct UserModeApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__usrmodeAddAdditionalDependants__)(struct RsClient *, struct UserModeApi *, RsResourceRef *); + NvU32 (*__usrmodeGetRefCount__)(struct UserModeApi *); + NV_STATUS (*__usrmodeMapTo__)(struct UserModeApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__usrmodeControl_Prologue__)(struct UserModeApi *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__usrmodeIsReady__)(struct UserModeApi *); + NV_STATUS (*__usrmodeCheckCopyPermissions__)(struct UserModeApi *, struct OBJGPU *, NvHandle); + void (*__usrmodePreDestruct__)(struct UserModeApi *); + NV_STATUS (*__usrmodeUnmapFrom__)(struct UserModeApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__usrmodeControl_Epilogue__)(struct UserModeApi *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__usrmodeControlLookup__)(struct UserModeApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__usrmodeMap__)(struct UserModeApi *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__usrmodeAccessCallback__)(struct UserModeApi *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_UserModeApi_TYPEDEF__ +#define __NVOC_CLASS_UserModeApi_TYPEDEF__ +typedef struct UserModeApi UserModeApi; +#endif /* __NVOC_CLASS_UserModeApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UserModeApi +#define __nvoc_class_id_UserModeApi 0x6f57ec +#endif /* __nvoc_class_id_UserModeApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserModeApi; + +#define __staticCast_UserModeApi(pThis) \ + ((pThis)->__nvoc_pbase_UserModeApi) + +#ifdef __nvoc_usermode_api_h_disabled +#define __dynamicCast_UserModeApi(pThis) ((UserModeApi*)NULL) +#else //__nvoc_usermode_api_h_disabled +#define __dynamicCast_UserModeApi(pThis) \ + ((UserModeApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(UserModeApi))) +#endif //__nvoc_usermode_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_UserModeApi(UserModeApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_UserModeApi(UserModeApi**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_UserModeApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_UserModeApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define usrmodeCanCopy(pUserModeApi) usrmodeCanCopy_DISPATCH(pUserModeApi) +#define usrmodeCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) usrmodeCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define usrmodeControl(pMemory, pCallContext, pParams) usrmodeControl_DISPATCH(pMemory, pCallContext, pParams) +#define usrmodeUnmap(pMemory, pCallContext, pCpuMapping) usrmodeUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define usrmodeGetMemInterMapParams(pMemory, pParams) usrmodeGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define usrmodeGetMemoryMappingDescriptor(pMemory, ppMemDesc) usrmodeGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define usrmodeGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) usrmodeGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define usrmodeShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) usrmodeShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define usrmodeControlFilter(pResource, pCallContext, pParams) usrmodeControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define usrmodeAddAdditionalDependants(pClient, pResource, pReference) usrmodeAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define usrmodeGetRefCount(pResource) usrmodeGetRefCount_DISPATCH(pResource) +#define usrmodeMapTo(pResource, pParams) usrmodeMapTo_DISPATCH(pResource, pParams) +#define usrmodeControl_Prologue(pResource, pCallContext, pParams) usrmodeControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define usrmodeIsReady(pMemory) usrmodeIsReady_DISPATCH(pMemory) +#define usrmodeCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) usrmodeCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define usrmodePreDestruct(pResource) usrmodePreDestruct_DISPATCH(pResource) +#define usrmodeUnmapFrom(pResource, pParams) usrmodeUnmapFrom_DISPATCH(pResource, pParams) +#define usrmodeControl_Epilogue(pResource, pCallContext, pParams) usrmodeControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define usrmodeControlLookup(pResource, pParams, ppEntry) usrmodeControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define usrmodeMap(pMemory, pCallContext, pParams, pCpuMapping) usrmodeMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define usrmodeAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) usrmodeAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS usrmodeConstructHal_GV100(struct UserModeApi *pUserModeApi, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +#ifdef __nvoc_usermode_api_h_disabled +static inline NV_STATUS usrmodeConstructHal(struct UserModeApi *pUserModeApi, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("UserModeApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_usermode_api_h_disabled +#define usrmodeConstructHal(pUserModeApi, pCallContext, pParams) usrmodeConstructHal_GV100(pUserModeApi, pCallContext, pParams) +#endif //__nvoc_usermode_api_h_disabled + +#define usrmodeConstructHal_HAL(pUserModeApi, pCallContext, pParams) usrmodeConstructHal(pUserModeApi, pCallContext, pParams) + +NvBool usrmodeCanCopy_IMPL(struct UserModeApi *pUserModeApi); + +static inline NvBool usrmodeCanCopy_DISPATCH(struct UserModeApi *pUserModeApi) { + return pUserModeApi->__usrmodeCanCopy__(pUserModeApi); +} + +static inline NV_STATUS usrmodeCheckMemInterUnmap_DISPATCH(struct UserModeApi *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__usrmodeCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS usrmodeControl_DISPATCH(struct UserModeApi *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__usrmodeControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS usrmodeUnmap_DISPATCH(struct UserModeApi *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__usrmodeUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS usrmodeGetMemInterMapParams_DISPATCH(struct UserModeApi *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__usrmodeGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS usrmodeGetMemoryMappingDescriptor_DISPATCH(struct UserModeApi *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__usrmodeGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS usrmodeGetMapAddrSpace_DISPATCH(struct UserModeApi *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__usrmodeGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool usrmodeShareCallback_DISPATCH(struct UserModeApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__usrmodeShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS usrmodeControlFilter_DISPATCH(struct UserModeApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__usrmodeControlFilter__(pResource, pCallContext, pParams); +} + +static inline void usrmodeAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct UserModeApi *pResource, RsResourceRef *pReference) { + pResource->__usrmodeAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 usrmodeGetRefCount_DISPATCH(struct UserModeApi *pResource) { + return pResource->__usrmodeGetRefCount__(pResource); +} + +static inline NV_STATUS usrmodeMapTo_DISPATCH(struct UserModeApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__usrmodeMapTo__(pResource, pParams); +} + +static inline NV_STATUS usrmodeControl_Prologue_DISPATCH(struct UserModeApi *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__usrmodeControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS usrmodeIsReady_DISPATCH(struct UserModeApi *pMemory) { + return pMemory->__usrmodeIsReady__(pMemory); +} + +static inline NV_STATUS usrmodeCheckCopyPermissions_DISPATCH(struct UserModeApi *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__usrmodeCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void usrmodePreDestruct_DISPATCH(struct UserModeApi *pResource) { + pResource->__usrmodePreDestruct__(pResource); +} + +static inline NV_STATUS usrmodeUnmapFrom_DISPATCH(struct UserModeApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__usrmodeUnmapFrom__(pResource, pParams); +} + +static inline void usrmodeControl_Epilogue_DISPATCH(struct UserModeApi *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__usrmodeControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS usrmodeControlLookup_DISPATCH(struct UserModeApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__usrmodeControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS usrmodeMap_DISPATCH(struct UserModeApi *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__usrmodeMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool usrmodeAccessCallback_DISPATCH(struct UserModeApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__usrmodeAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS usrmodeConstruct_IMPL(struct UserModeApi *arg_pUserModeApi, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_usrmodeConstruct(arg_pUserModeApi, arg_pCallContext, arg_pParams) usrmodeConstruct_IMPL(arg_pUserModeApi, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif // _USERMODE_API_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_USERMODE_API_NVOC_H_ diff --git a/src/nvidia/generated/g_uvm_channel_retainer_nvoc.c b/src/nvidia/generated/g_uvm_channel_retainer_nvoc.c new file mode 100644 index 000000000..61c27da8c --- /dev/null +++ b/src/nvidia/generated/g_uvm_channel_retainer_nvoc.c @@ -0,0 +1,343 @@ +#define NVOC_UVM_CHANNEL_RETAINER_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_uvm_channel_retainer_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xa3f03a = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UvmChannelRetainer; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_UvmChannelRetainer(UvmChannelRetainer*, RmHalspecOwner* ); +void __nvoc_init_funcTable_UvmChannelRetainer(UvmChannelRetainer*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_UvmChannelRetainer(UvmChannelRetainer*, RmHalspecOwner* , CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_UvmChannelRetainer(UvmChannelRetainer*, RmHalspecOwner* ); +void __nvoc_dtor_UvmChannelRetainer(UvmChannelRetainer*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_UvmChannelRetainer; + +static const struct NVOC_RTTI __nvoc_rtti_UvmChannelRetainer_UvmChannelRetainer = { + /*pClassDef=*/ &__nvoc_class_def_UvmChannelRetainer, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_UvmChannelRetainer, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmChannelRetainer_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmChannelRetainer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmChannelRetainer_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmChannelRetainer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmChannelRetainer_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmChannelRetainer, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmChannelRetainer_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmChannelRetainer, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmChannelRetainer_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmChannelRetainer, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_UvmChannelRetainer = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_UvmChannelRetainer_UvmChannelRetainer, + &__nvoc_rtti_UvmChannelRetainer_GpuResource, + &__nvoc_rtti_UvmChannelRetainer_RmResource, + &__nvoc_rtti_UvmChannelRetainer_RmResourceCommon, + &__nvoc_rtti_UvmChannelRetainer_RsResource, + &__nvoc_rtti_UvmChannelRetainer_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_UvmChannelRetainer = +{ + /*classInfo=*/ { + /*size=*/ sizeof(UvmChannelRetainer), + /*classId=*/ classId(UvmChannelRetainer), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "UvmChannelRetainer", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_UvmChannelRetainer, + /*pCastInfo=*/ &__nvoc_castinfo_UvmChannelRetainer, + /*pExportInfo=*/ &__nvoc_export_info_UvmChannelRetainer +}; + +static NvBool __nvoc_thunk_GpuResource_uvmchanrtnrShareCallback(struct UvmChannelRetainer *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmChannelRetainer_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmchanrtnrControl(struct UvmChannelRetainer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmChannelRetainer_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmchanrtnrUnmap(struct UvmChannelRetainer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmChannelRetainer_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_uvmchanrtnrGetMemInterMapParams(struct UvmChannelRetainer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_UvmChannelRetainer_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_uvmchanrtnrGetMemoryMappingDescriptor(struct UvmChannelRetainer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_UvmChannelRetainer_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmchanrtnrGetMapAddrSpace(struct UvmChannelRetainer *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmChannelRetainer_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_uvmchanrtnrGetInternalObjectHandle(struct UvmChannelRetainer *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmChannelRetainer_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_uvmchanrtnrControlFilter(struct UvmChannelRetainer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_uvmchanrtnrAddAdditionalDependants(struct RsClient *pClient, struct UvmChannelRetainer *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_uvmchanrtnrGetRefCount(struct UvmChannelRetainer *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_uvmchanrtnrCheckMemInterUnmap(struct UvmChannelRetainer *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_UvmChannelRetainer_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_uvmchanrtnrMapTo(struct UvmChannelRetainer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_uvmchanrtnrControl_Prologue(struct UvmChannelRetainer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmchanrtnrGetRegBaseOffsetAndSize(struct UvmChannelRetainer *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmChannelRetainer_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_uvmchanrtnrCanCopy(struct UvmChannelRetainer *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmchanrtnrInternalControlForward(struct UvmChannelRetainer *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmChannelRetainer_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_uvmchanrtnrPreDestruct(struct UvmChannelRetainer *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_uvmchanrtnrUnmapFrom(struct UvmChannelRetainer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_uvmchanrtnrControl_Epilogue(struct UvmChannelRetainer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_uvmchanrtnrControlLookup(struct UvmChannelRetainer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmchanrtnrMap(struct UvmChannelRetainer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmChannelRetainer_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_uvmchanrtnrAccessCallback(struct UvmChannelRetainer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmChannelRetainer_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_UvmChannelRetainer = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_UvmChannelRetainer(UvmChannelRetainer *pThis) { + __nvoc_uvmchanrtnrDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_UvmChannelRetainer(UvmChannelRetainer *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_UvmChannelRetainer(UvmChannelRetainer *pThis, RmHalspecOwner *pRmhalspecowner, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_UvmChannelRetainer_fail_GpuResource; + __nvoc_init_dataField_UvmChannelRetainer(pThis, pRmhalspecowner); + + status = __nvoc_uvmchanrtnrConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_UvmChannelRetainer_fail__init; + goto __nvoc_ctor_UvmChannelRetainer_exit; // Success + +__nvoc_ctor_UvmChannelRetainer_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_UvmChannelRetainer_fail_GpuResource: +__nvoc_ctor_UvmChannelRetainer_exit: + + return status; +} + +static void __nvoc_init_funcTable_UvmChannelRetainer_1(UvmChannelRetainer *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__uvmchanrtnrShareCallback__ = &__nvoc_thunk_GpuResource_uvmchanrtnrShareCallback; + + pThis->__uvmchanrtnrControl__ = &__nvoc_thunk_GpuResource_uvmchanrtnrControl; + + pThis->__uvmchanrtnrUnmap__ = &__nvoc_thunk_GpuResource_uvmchanrtnrUnmap; + + pThis->__uvmchanrtnrGetMemInterMapParams__ = &__nvoc_thunk_RmResource_uvmchanrtnrGetMemInterMapParams; + + pThis->__uvmchanrtnrGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_uvmchanrtnrGetMemoryMappingDescriptor; + + pThis->__uvmchanrtnrGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_uvmchanrtnrGetMapAddrSpace; + + pThis->__uvmchanrtnrGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_uvmchanrtnrGetInternalObjectHandle; + + pThis->__uvmchanrtnrControlFilter__ = &__nvoc_thunk_RsResource_uvmchanrtnrControlFilter; + + pThis->__uvmchanrtnrAddAdditionalDependants__ = &__nvoc_thunk_RsResource_uvmchanrtnrAddAdditionalDependants; + + pThis->__uvmchanrtnrGetRefCount__ = &__nvoc_thunk_RsResource_uvmchanrtnrGetRefCount; + + pThis->__uvmchanrtnrCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_uvmchanrtnrCheckMemInterUnmap; + + pThis->__uvmchanrtnrMapTo__ = &__nvoc_thunk_RsResource_uvmchanrtnrMapTo; + + pThis->__uvmchanrtnrControl_Prologue__ = &__nvoc_thunk_RmResource_uvmchanrtnrControl_Prologue; + + pThis->__uvmchanrtnrGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_uvmchanrtnrGetRegBaseOffsetAndSize; + + pThis->__uvmchanrtnrCanCopy__ = &__nvoc_thunk_RsResource_uvmchanrtnrCanCopy; + + pThis->__uvmchanrtnrInternalControlForward__ = &__nvoc_thunk_GpuResource_uvmchanrtnrInternalControlForward; + + pThis->__uvmchanrtnrPreDestruct__ = &__nvoc_thunk_RsResource_uvmchanrtnrPreDestruct; + + pThis->__uvmchanrtnrUnmapFrom__ = &__nvoc_thunk_RsResource_uvmchanrtnrUnmapFrom; + + pThis->__uvmchanrtnrControl_Epilogue__ = &__nvoc_thunk_RmResource_uvmchanrtnrControl_Epilogue; + + pThis->__uvmchanrtnrControlLookup__ = &__nvoc_thunk_RsResource_uvmchanrtnrControlLookup; + + pThis->__uvmchanrtnrMap__ = &__nvoc_thunk_GpuResource_uvmchanrtnrMap; + + pThis->__uvmchanrtnrAccessCallback__ = &__nvoc_thunk_RmResource_uvmchanrtnrAccessCallback; +} + +void __nvoc_init_funcTable_UvmChannelRetainer(UvmChannelRetainer *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_UvmChannelRetainer_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_UvmChannelRetainer(UvmChannelRetainer *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_UvmChannelRetainer = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_UvmChannelRetainer(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_UvmChannelRetainer(UvmChannelRetainer **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + UvmChannelRetainer *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(UvmChannelRetainer)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(UvmChannelRetainer)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_UvmChannelRetainer); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_UvmChannelRetainer(pThis, pRmhalspecowner); + status = __nvoc_ctor_UvmChannelRetainer(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_UvmChannelRetainer_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_UvmChannelRetainer_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_UvmChannelRetainer(UvmChannelRetainer **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_UvmChannelRetainer(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_uvm_channel_retainer_nvoc.h b/src/nvidia/generated/g_uvm_channel_retainer_nvoc.h new file mode 100644 index 000000000..9bbda2e91 --- /dev/null +++ b/src/nvidia/generated/g_uvm_channel_retainer_nvoc.h @@ -0,0 +1,261 @@ +#ifndef _G_UVM_CHANNEL_RETAINER_NVOC_H_ +#define _G_UVM_CHANNEL_RETAINER_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_uvm_channel_retainer_nvoc.h" + +#ifndef UVM_CHANNEL_RETAINER_H +#define UVM_CHANNEL_RETAINER_H + +#include "os/os.h" +#include "resserv/resserv.h" +#include "nvoc/prelude.h" +#include "gpu/gpu_resource.h" +#include "nvos.h" +#include "kernel/gpu/fifo/kernel_channel.h" + +struct OBJGPU; + +#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__ +#define __NVOC_CLASS_OBJGPU_TYPEDEF__ +typedef struct OBJGPU OBJGPU; +#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJGPU +#define __nvoc_class_id_OBJGPU 0x7ef3cb +#endif /* __nvoc_class_id_OBJGPU */ + + + +#ifdef NVOC_UVM_CHANNEL_RETAINER_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct UvmChannelRetainer { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct UvmChannelRetainer *__nvoc_pbase_UvmChannelRetainer; + NvBool (*__uvmchanrtnrShareCallback__)(struct UvmChannelRetainer *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__uvmchanrtnrControl__)(struct UvmChannelRetainer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__uvmchanrtnrUnmap__)(struct UvmChannelRetainer *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__uvmchanrtnrGetMemInterMapParams__)(struct UvmChannelRetainer *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__uvmchanrtnrGetMemoryMappingDescriptor__)(struct UvmChannelRetainer *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__uvmchanrtnrGetMapAddrSpace__)(struct UvmChannelRetainer *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__uvmchanrtnrGetInternalObjectHandle__)(struct UvmChannelRetainer *); + NV_STATUS (*__uvmchanrtnrControlFilter__)(struct UvmChannelRetainer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__uvmchanrtnrAddAdditionalDependants__)(struct RsClient *, struct UvmChannelRetainer *, RsResourceRef *); + NvU32 (*__uvmchanrtnrGetRefCount__)(struct UvmChannelRetainer *); + NV_STATUS (*__uvmchanrtnrCheckMemInterUnmap__)(struct UvmChannelRetainer *, NvBool); + NV_STATUS (*__uvmchanrtnrMapTo__)(struct UvmChannelRetainer *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__uvmchanrtnrControl_Prologue__)(struct UvmChannelRetainer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__uvmchanrtnrGetRegBaseOffsetAndSize__)(struct UvmChannelRetainer *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__uvmchanrtnrCanCopy__)(struct UvmChannelRetainer *); + NV_STATUS (*__uvmchanrtnrInternalControlForward__)(struct UvmChannelRetainer *, NvU32, void *, NvU32); + void (*__uvmchanrtnrPreDestruct__)(struct UvmChannelRetainer *); + NV_STATUS (*__uvmchanrtnrUnmapFrom__)(struct UvmChannelRetainer *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__uvmchanrtnrControl_Epilogue__)(struct UvmChannelRetainer *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__uvmchanrtnrControlLookup__)(struct UvmChannelRetainer *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__uvmchanrtnrMap__)(struct UvmChannelRetainer *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__uvmchanrtnrAccessCallback__)(struct UvmChannelRetainer *, struct RsClient *, void *, RsAccessRight); + MEMORY_DESCRIPTOR *pInstMemDesc; + NvU32 chId; + NvU32 runlistId; +}; + +#ifndef __NVOC_CLASS_UvmChannelRetainer_TYPEDEF__ +#define __NVOC_CLASS_UvmChannelRetainer_TYPEDEF__ +typedef struct UvmChannelRetainer UvmChannelRetainer; +#endif /* __NVOC_CLASS_UvmChannelRetainer_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UvmChannelRetainer +#define __nvoc_class_id_UvmChannelRetainer 0xa3f03a +#endif /* __nvoc_class_id_UvmChannelRetainer */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UvmChannelRetainer; + +#define __staticCast_UvmChannelRetainer(pThis) \ + ((pThis)->__nvoc_pbase_UvmChannelRetainer) + +#ifdef __nvoc_uvm_channel_retainer_h_disabled +#define __dynamicCast_UvmChannelRetainer(pThis) ((UvmChannelRetainer*)NULL) +#else //__nvoc_uvm_channel_retainer_h_disabled +#define __dynamicCast_UvmChannelRetainer(pThis) \ + ((UvmChannelRetainer*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(UvmChannelRetainer))) +#endif //__nvoc_uvm_channel_retainer_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_UvmChannelRetainer(UvmChannelRetainer**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_UvmChannelRetainer(UvmChannelRetainer**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_UvmChannelRetainer(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_UvmChannelRetainer((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define uvmchanrtnrShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) uvmchanrtnrShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define uvmchanrtnrControl(pGpuResource, pCallContext, pParams) uvmchanrtnrControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define uvmchanrtnrUnmap(pGpuResource, pCallContext, pCpuMapping) uvmchanrtnrUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define uvmchanrtnrGetMemInterMapParams(pRmResource, pParams) uvmchanrtnrGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define uvmchanrtnrGetMemoryMappingDescriptor(pRmResource, ppMemDesc) uvmchanrtnrGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define uvmchanrtnrGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) uvmchanrtnrGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define uvmchanrtnrGetInternalObjectHandle(pGpuResource) uvmchanrtnrGetInternalObjectHandle_DISPATCH(pGpuResource) +#define uvmchanrtnrControlFilter(pResource, pCallContext, pParams) uvmchanrtnrControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define uvmchanrtnrAddAdditionalDependants(pClient, pResource, pReference) uvmchanrtnrAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define uvmchanrtnrGetRefCount(pResource) uvmchanrtnrGetRefCount_DISPATCH(pResource) +#define uvmchanrtnrCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) uvmchanrtnrCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define uvmchanrtnrMapTo(pResource, pParams) uvmchanrtnrMapTo_DISPATCH(pResource, pParams) +#define uvmchanrtnrControl_Prologue(pResource, pCallContext, pParams) uvmchanrtnrControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define uvmchanrtnrGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) uvmchanrtnrGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define uvmchanrtnrCanCopy(pResource) uvmchanrtnrCanCopy_DISPATCH(pResource) +#define uvmchanrtnrInternalControlForward(pGpuResource, command, pParams, size) uvmchanrtnrInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define uvmchanrtnrPreDestruct(pResource) uvmchanrtnrPreDestruct_DISPATCH(pResource) +#define uvmchanrtnrUnmapFrom(pResource, pParams) uvmchanrtnrUnmapFrom_DISPATCH(pResource, pParams) +#define uvmchanrtnrControl_Epilogue(pResource, pCallContext, pParams) uvmchanrtnrControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define uvmchanrtnrControlLookup(pResource, pParams, ppEntry) uvmchanrtnrControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define uvmchanrtnrMap(pGpuResource, pCallContext, pParams, pCpuMapping) uvmchanrtnrMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define uvmchanrtnrAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) uvmchanrtnrAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool uvmchanrtnrIsAllocationAllowed_IMPL(struct UvmChannelRetainer *pUvmChannelRetainer, CALL_CONTEXT *pCallContext, struct KernelChannel *pKernelChannel); + +#ifdef __nvoc_uvm_channel_retainer_h_disabled +static inline NvBool uvmchanrtnrIsAllocationAllowed(struct UvmChannelRetainer *pUvmChannelRetainer, CALL_CONTEXT *pCallContext, struct KernelChannel *pKernelChannel) { + NV_ASSERT_FAILED_PRECOMP("UvmChannelRetainer was disabled!"); + return NV_FALSE; +} +#else //__nvoc_uvm_channel_retainer_h_disabled +#define uvmchanrtnrIsAllocationAllowed(pUvmChannelRetainer, pCallContext, pKernelChannel) uvmchanrtnrIsAllocationAllowed_IMPL(pUvmChannelRetainer, pCallContext, pKernelChannel) +#endif //__nvoc_uvm_channel_retainer_h_disabled + +#define uvmchanrtnrIsAllocationAllowed_HAL(pUvmChannelRetainer, pCallContext, pKernelChannel) uvmchanrtnrIsAllocationAllowed(pUvmChannelRetainer, pCallContext, pKernelChannel) + +static inline NvBool uvmchanrtnrShareCallback_DISPATCH(struct UvmChannelRetainer *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__uvmchanrtnrShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS uvmchanrtnrControl_DISPATCH(struct UvmChannelRetainer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__uvmchanrtnrControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS uvmchanrtnrUnmap_DISPATCH(struct UvmChannelRetainer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__uvmchanrtnrUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS uvmchanrtnrGetMemInterMapParams_DISPATCH(struct UvmChannelRetainer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__uvmchanrtnrGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS uvmchanrtnrGetMemoryMappingDescriptor_DISPATCH(struct UvmChannelRetainer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__uvmchanrtnrGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS uvmchanrtnrGetMapAddrSpace_DISPATCH(struct UvmChannelRetainer *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__uvmchanrtnrGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle uvmchanrtnrGetInternalObjectHandle_DISPATCH(struct UvmChannelRetainer *pGpuResource) { + return pGpuResource->__uvmchanrtnrGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS uvmchanrtnrControlFilter_DISPATCH(struct UvmChannelRetainer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__uvmchanrtnrControlFilter__(pResource, pCallContext, pParams); +} + +static inline void uvmchanrtnrAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct UvmChannelRetainer *pResource, RsResourceRef *pReference) { + pResource->__uvmchanrtnrAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 uvmchanrtnrGetRefCount_DISPATCH(struct UvmChannelRetainer *pResource) { + return pResource->__uvmchanrtnrGetRefCount__(pResource); +} + +static inline NV_STATUS uvmchanrtnrCheckMemInterUnmap_DISPATCH(struct UvmChannelRetainer *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__uvmchanrtnrCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS uvmchanrtnrMapTo_DISPATCH(struct UvmChannelRetainer *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__uvmchanrtnrMapTo__(pResource, pParams); +} + +static inline NV_STATUS uvmchanrtnrControl_Prologue_DISPATCH(struct UvmChannelRetainer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__uvmchanrtnrControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS uvmchanrtnrGetRegBaseOffsetAndSize_DISPATCH(struct UvmChannelRetainer *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__uvmchanrtnrGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool uvmchanrtnrCanCopy_DISPATCH(struct UvmChannelRetainer *pResource) { + return pResource->__uvmchanrtnrCanCopy__(pResource); +} + +static inline NV_STATUS uvmchanrtnrInternalControlForward_DISPATCH(struct UvmChannelRetainer *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__uvmchanrtnrInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void uvmchanrtnrPreDestruct_DISPATCH(struct UvmChannelRetainer *pResource) { + pResource->__uvmchanrtnrPreDestruct__(pResource); +} + +static inline NV_STATUS uvmchanrtnrUnmapFrom_DISPATCH(struct UvmChannelRetainer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__uvmchanrtnrUnmapFrom__(pResource, pParams); +} + +static inline void uvmchanrtnrControl_Epilogue_DISPATCH(struct UvmChannelRetainer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__uvmchanrtnrControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS uvmchanrtnrControlLookup_DISPATCH(struct UvmChannelRetainer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__uvmchanrtnrControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS uvmchanrtnrMap_DISPATCH(struct UvmChannelRetainer *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__uvmchanrtnrMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool uvmchanrtnrAccessCallback_DISPATCH(struct UvmChannelRetainer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__uvmchanrtnrAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS uvmchanrtnrConstruct_IMPL(struct UvmChannelRetainer *arg_pUvmChannelRetainer, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_uvmchanrtnrConstruct(arg_pUvmChannelRetainer, arg_pCallContext, arg_pParams) uvmchanrtnrConstruct_IMPL(arg_pUvmChannelRetainer, arg_pCallContext, arg_pParams) +void uvmchanrtnrDestruct_IMPL(struct UvmChannelRetainer *pUvmChannelRetainer); +#define __nvoc_uvmchanrtnrDestruct(pUvmChannelRetainer) uvmchanrtnrDestruct_IMPL(pUvmChannelRetainer) +#undef PRIVATE_FIELD + + +#endif // UVM_CHANNEL_RETAINER_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_UVM_CHANNEL_RETAINER_NVOC_H_ diff --git a/src/nvidia/generated/g_uvm_nvoc.c b/src/nvidia/generated/g_uvm_nvoc.c new file mode 100644 index 000000000..200130a20 --- /dev/null +++ b/src/nvidia/generated/g_uvm_nvoc.c @@ -0,0 +1,352 @@ +#define NVOC_UVM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_uvm_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xf9a17d = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJUVM; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_IntrService; + +void __nvoc_init_OBJUVM(OBJUVM*, RmHalspecOwner* ); +void __nvoc_init_funcTable_OBJUVM(OBJUVM*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_OBJUVM(OBJUVM*, RmHalspecOwner* ); +void __nvoc_init_dataField_OBJUVM(OBJUVM*, RmHalspecOwner* ); +void __nvoc_dtor_OBJUVM(OBJUVM*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJUVM; + +static const struct NVOC_RTTI __nvoc_rtti_OBJUVM_OBJUVM = { + /*pClassDef=*/ &__nvoc_class_def_OBJUVM, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJUVM, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJUVM_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJUVM, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJUVM_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJUVM, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJUVM_IntrService = { + /*pClassDef=*/ &__nvoc_class_def_IntrService, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJUVM, __nvoc_base_IntrService), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJUVM = { + /*numRelatives=*/ 4, + /*relatives=*/ { + &__nvoc_rtti_OBJUVM_OBJUVM, + &__nvoc_rtti_OBJUVM_IntrService, + &__nvoc_rtti_OBJUVM_OBJENGSTATE, + &__nvoc_rtti_OBJUVM_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJUVM = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJUVM), + /*classId=*/ classId(OBJUVM), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJUVM", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJUVM, + /*pCastInfo=*/ &__nvoc_castinfo_OBJUVM, + /*pExportInfo=*/ &__nvoc_export_info_OBJUVM +}; + +static void __nvoc_thunk_OBJUVM_engstateStateDestroy(OBJGPU *pGpu, struct OBJENGSTATE *pUvm) { + uvmStateDestroy(pGpu, (struct OBJUVM *)(((unsigned char *)pUvm) - __nvoc_rtti_OBJUVM_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJUVM_engstateStateInitUnlocked(OBJGPU *pGpu, struct OBJENGSTATE *pUvm) { + return uvmStateInitUnlocked(pGpu, (struct OBJUVM *)(((unsigned char *)pUvm) - __nvoc_rtti_OBJUVM_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJUVM_intrservRegisterIntrService(OBJGPU *arg0, struct IntrService *pUvm, IntrServiceRecord arg1[155]) { + uvmRegisterIntrService(arg0, (struct OBJUVM *)(((unsigned char *)pUvm) - __nvoc_rtti_OBJUVM_IntrService.offset), arg1); +} + +static NvU32 __nvoc_thunk_OBJUVM_intrservServiceInterrupt(OBJGPU *arg0, struct IntrService *pUvm, IntrServiceServiceInterruptArguments *arg1) { + return uvmServiceInterrupt(arg0, (struct OBJUVM *)(((unsigned char *)pUvm) - __nvoc_rtti_OBJUVM_IntrService.offset), arg1); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmReconcileTunableState(POBJGPU pGpu, struct OBJUVM *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmStateLoad(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmStateUnload(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_IntrService_uvmServiceNotificationInterrupt(OBJGPU *pGpu, struct OBJUVM *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return intrservServiceNotificationInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_OBJUVM_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmStateInitLocked(POBJGPU pGpu, struct OBJUVM *pEngstate) { + return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmStatePreLoad(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmStatePostUnload(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmStatePreUnload(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_uvmInitMissing(POBJGPU pGpu, struct OBJUVM *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmStatePreInitLocked(POBJGPU pGpu, struct OBJUVM *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmStatePreInitUnlocked(POBJGPU pGpu, struct OBJUVM *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmGetTunableState(POBJGPU pGpu, struct OBJUVM *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmCompareTunableState(POBJGPU pGpu, struct OBJUVM *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_uvmFreeTunableState(POBJGPU pGpu, struct OBJUVM *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_IntrService_uvmClearInterrupt(OBJGPU *pGpu, struct OBJUVM *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return intrservClearInterrupt(pGpu, (struct IntrService *)(((unsigned char *)pIntrService) + __nvoc_rtti_OBJUVM_IntrService.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmStatePostLoad(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmAllocTunableState(POBJGPU pGpu, struct OBJUVM *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmSetTunableState(POBJGPU pGpu, struct OBJUVM *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_uvmConstructEngine(POBJGPU pGpu, struct OBJUVM *pEngstate, ENGDESCRIPTOR arg0) { + return engstateConstructEngine(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset), arg0); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_uvmIsPresent(POBJGPU pGpu, struct OBJUVM *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJUVM_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJUVM = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_IntrService(IntrService*); +void __nvoc_dtor_OBJUVM(OBJUVM *pThis) { + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_dtor_IntrService(&pThis->__nvoc_base_IntrService); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJUVM(OBJUVM *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_IntrService(IntrService* ); +NV_STATUS __nvoc_ctor_OBJUVM(OBJUVM *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_OBJUVM_fail_OBJENGSTATE; + status = __nvoc_ctor_IntrService(&pThis->__nvoc_base_IntrService); + if (status != NV_OK) goto __nvoc_ctor_OBJUVM_fail_IntrService; + __nvoc_init_dataField_OBJUVM(pThis, pRmhalspecowner); + goto __nvoc_ctor_OBJUVM_exit; // Success + +__nvoc_ctor_OBJUVM_fail_IntrService: + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); +__nvoc_ctor_OBJUVM_fail_OBJENGSTATE: +__nvoc_ctor_OBJUVM_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJUVM_1(OBJUVM *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + + pThis->__uvmStateDestroy__ = &uvmStateDestroy_IMPL; + + pThis->__uvmStateInitUnlocked__ = &uvmStateInitUnlocked_IMPL; + + pThis->__uvmRegisterIntrService__ = &uvmRegisterIntrService_IMPL; + + pThis->__uvmServiceInterrupt__ = &uvmServiceInterrupt_IMPL; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_OBJUVM_engstateStateDestroy; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitUnlocked__ = &__nvoc_thunk_OBJUVM_engstateStateInitUnlocked; + + pThis->__nvoc_base_IntrService.__intrservRegisterIntrService__ = &__nvoc_thunk_OBJUVM_intrservRegisterIntrService; + + pThis->__nvoc_base_IntrService.__intrservServiceInterrupt__ = &__nvoc_thunk_OBJUVM_intrservServiceInterrupt; + + pThis->__uvmReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_uvmReconcileTunableState; + + pThis->__uvmStateLoad__ = &__nvoc_thunk_OBJENGSTATE_uvmStateLoad; + + pThis->__uvmStateUnload__ = &__nvoc_thunk_OBJENGSTATE_uvmStateUnload; + + pThis->__uvmServiceNotificationInterrupt__ = &__nvoc_thunk_IntrService_uvmServiceNotificationInterrupt; + + pThis->__uvmStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_uvmStateInitLocked; + + pThis->__uvmStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_uvmStatePreLoad; + + pThis->__uvmStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_uvmStatePostUnload; + + pThis->__uvmStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_uvmStatePreUnload; + + pThis->__uvmInitMissing__ = &__nvoc_thunk_OBJENGSTATE_uvmInitMissing; + + pThis->__uvmStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_uvmStatePreInitLocked; + + pThis->__uvmStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_uvmStatePreInitUnlocked; + + pThis->__uvmGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_uvmGetTunableState; + + pThis->__uvmCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_uvmCompareTunableState; + + pThis->__uvmFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_uvmFreeTunableState; + + pThis->__uvmClearInterrupt__ = &__nvoc_thunk_IntrService_uvmClearInterrupt; + + pThis->__uvmStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_uvmStatePostLoad; + + pThis->__uvmAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_uvmAllocTunableState; + + pThis->__uvmSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_uvmSetTunableState; + + pThis->__uvmConstructEngine__ = &__nvoc_thunk_OBJENGSTATE_uvmConstructEngine; + + pThis->__uvmIsPresent__ = &__nvoc_thunk_OBJENGSTATE_uvmIsPresent; +} + +void __nvoc_init_funcTable_OBJUVM(OBJUVM *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_OBJUVM_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_IntrService(IntrService*); +void __nvoc_init_OBJUVM(OBJUVM *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_OBJUVM = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + pThis->__nvoc_pbase_IntrService = &pThis->__nvoc_base_IntrService; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_IntrService(&pThis->__nvoc_base_IntrService); + __nvoc_init_funcTable_OBJUVM(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_OBJUVM(OBJUVM **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJUVM *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(OBJUVM)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJUVM)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJUVM); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_OBJUVM(pThis, pRmhalspecowner); + status = __nvoc_ctor_OBJUVM(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_OBJUVM_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJUVM_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJUVM(OBJUVM **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJUVM(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_uvm_nvoc.h b/src/nvidia/generated/g_uvm_nvoc.h new file mode 100644 index 000000000..de8699e9c --- /dev/null +++ b/src/nvidia/generated/g_uvm_nvoc.h @@ -0,0 +1,653 @@ +#ifndef _G_UVM_NVOC_H_ +#define _G_UVM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_uvm_nvoc.h" + +#ifndef UVM_H +#define UVM_H + +/*! + * @file + * @brief Provides definitions for all OBJUVM data structures and interfaces. + */ + +#include "core/core.h" +#include "rmapi/control.h" +#include "rmapi/rmapi_utils.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/gpu.h" +#include "nvoc/utility.h" +#include "kernel/gpu/intr/intr_service.h" + +#include "gpu/eng_state.h" + +typedef enum +{ + MIMC, + MOMC +} ACCESS_CNTR_TYPE; + +/*! + * Defines the structure used to contain all generic information related to + * the OBJUVM. + * Contains the Unified Virtual Memory (UVM) feature related data. + */ + + +/* + * This structure is used to store all the necessary information concerning the access counter buffer. + * It is contained within the UVM object. +*/ +struct ACCESS_CNTR_BUFFER +{ + NvU64 bar2UvmAccessCntrBufferAddr; //This is the bar2 VA that is used by the gpu in + // order to access the buffer + NvP64 hAccessCntrBufferCpuMapping; //This is a handle to the CPU mapping + MEMORY_DESCRIPTOR *pUvmAccessCntrAllocMemDesc; // Memory descriptor of the access counter buffer allocation + MEMORY_DESCRIPTOR *pUvmAccessCntrMemDesc; // Memory descriptor of the reconstructed access counter buffer + NvHandle hAccessCntrBufferObject; // This is a unique object handle + NvHandle hAccessCntrBufferClient; // This is a unique client handle + NvU32 accessCntrBufferSize; //This represents the size of the buffer (the maximum size that + // can be used before the buffer gets full) +}; + +typedef enum +{ + intr_notify, + intr_error, + intr_all +} ACCESS_CNTR_INTR_TYPE; + +typedef struct OBJUVM *POBJUVM; + +#ifdef NVOC_UVM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJUVM { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct IntrService __nvoc_base_IntrService; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct IntrService *__nvoc_pbase_IntrService; + struct OBJUVM *__nvoc_pbase_OBJUVM; + void (*__uvmStateDestroy__)(OBJGPU *, struct OBJUVM *); + NV_STATUS (*__uvmStateInitUnlocked__)(OBJGPU *, struct OBJUVM *); + void (*__uvmRegisterIntrService__)(OBJGPU *, struct OBJUVM *, IntrServiceRecord *); + NvU32 (*__uvmServiceInterrupt__)(OBJGPU *, struct OBJUVM *, IntrServiceServiceInterruptArguments *); + NV_STATUS (*__uvmReconcileTunableState__)(POBJGPU, struct OBJUVM *, void *); + NV_STATUS (*__uvmStateLoad__)(POBJGPU, struct OBJUVM *, NvU32); + NV_STATUS (*__uvmStateUnload__)(POBJGPU, struct OBJUVM *, NvU32); + NV_STATUS (*__uvmServiceNotificationInterrupt__)(OBJGPU *, struct OBJUVM *, IntrServiceServiceNotificationInterruptArguments *); + NV_STATUS (*__uvmStateInitLocked__)(POBJGPU, struct OBJUVM *); + NV_STATUS (*__uvmStatePreLoad__)(POBJGPU, struct OBJUVM *, NvU32); + NV_STATUS (*__uvmStatePostUnload__)(POBJGPU, struct OBJUVM *, NvU32); + NV_STATUS (*__uvmStatePreUnload__)(POBJGPU, struct OBJUVM *, NvU32); + void (*__uvmInitMissing__)(POBJGPU, struct OBJUVM *); + NV_STATUS (*__uvmStatePreInitLocked__)(POBJGPU, struct OBJUVM *); + NV_STATUS (*__uvmStatePreInitUnlocked__)(POBJGPU, struct OBJUVM *); + NV_STATUS (*__uvmGetTunableState__)(POBJGPU, struct OBJUVM *, void *); + NV_STATUS (*__uvmCompareTunableState__)(POBJGPU, struct OBJUVM *, void *, void *); + void (*__uvmFreeTunableState__)(POBJGPU, struct OBJUVM *, void *); + NvBool (*__uvmClearInterrupt__)(OBJGPU *, struct OBJUVM *, IntrServiceClearInterruptArguments *); + NV_STATUS (*__uvmStatePostLoad__)(POBJGPU, struct OBJUVM *, NvU32); + NV_STATUS (*__uvmAllocTunableState__)(POBJGPU, struct OBJUVM *, void **); + NV_STATUS (*__uvmSetTunableState__)(POBJGPU, struct OBJUVM *, void *); + NV_STATUS (*__uvmConstructEngine__)(POBJGPU, struct OBJUVM *, ENGDESCRIPTOR); + NvBool (*__uvmIsPresent__)(POBJGPU, struct OBJUVM *); + struct ACCESS_CNTR_BUFFER accessCntrBuffer; + NvHandle hClient; + NvHandle hSubdevice; + RM_API *pRmApi; +}; + +#ifndef __NVOC_CLASS_OBJUVM_TYPEDEF__ +#define __NVOC_CLASS_OBJUVM_TYPEDEF__ +typedef struct OBJUVM OBJUVM; +#endif /* __NVOC_CLASS_OBJUVM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJUVM +#define __nvoc_class_id_OBJUVM 0xf9a17d +#endif /* __nvoc_class_id_OBJUVM */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJUVM; + +#define __staticCast_OBJUVM(pThis) \ + ((pThis)->__nvoc_pbase_OBJUVM) + +#ifdef __nvoc_uvm_h_disabled +#define __dynamicCast_OBJUVM(pThis) ((OBJUVM*)NULL) +#else //__nvoc_uvm_h_disabled +#define __dynamicCast_OBJUVM(pThis) \ + ((OBJUVM*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJUVM))) +#endif //__nvoc_uvm_h_disabled + +#define PDB_PROP_UVM_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_UVM_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING + +NV_STATUS __nvoc_objCreateDynamic_OBJUVM(OBJUVM**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJUVM(OBJUVM**, Dynamic*, NvU32); +#define __objCreate_OBJUVM(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJUVM((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define uvmStateDestroy(pGpu, pUvm) uvmStateDestroy_DISPATCH(pGpu, pUvm) +#define uvmStateInitUnlocked(pGpu, pUvm) uvmStateInitUnlocked_DISPATCH(pGpu, pUvm) +#define uvmRegisterIntrService(arg0, pUvm, arg1) uvmRegisterIntrService_DISPATCH(arg0, pUvm, arg1) +#define uvmServiceInterrupt(arg0, pUvm, arg1) uvmServiceInterrupt_DISPATCH(arg0, pUvm, arg1) +#define uvmReconcileTunableState(pGpu, pEngstate, pTunableState) uvmReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define uvmStateLoad(pGpu, pEngstate, arg0) uvmStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define uvmStateUnload(pGpu, pEngstate, arg0) uvmStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define uvmServiceNotificationInterrupt(pGpu, pIntrService, pParams) uvmServiceNotificationInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define uvmStateInitLocked(pGpu, pEngstate) uvmStateInitLocked_DISPATCH(pGpu, pEngstate) +#define uvmStatePreLoad(pGpu, pEngstate, arg0) uvmStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define uvmStatePostUnload(pGpu, pEngstate, arg0) uvmStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define uvmStatePreUnload(pGpu, pEngstate, arg0) uvmStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define uvmInitMissing(pGpu, pEngstate) uvmInitMissing_DISPATCH(pGpu, pEngstate) +#define uvmStatePreInitLocked(pGpu, pEngstate) uvmStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define uvmStatePreInitUnlocked(pGpu, pEngstate) uvmStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define uvmGetTunableState(pGpu, pEngstate, pTunableState) uvmGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define uvmCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) uvmCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define uvmFreeTunableState(pGpu, pEngstate, pTunableState) uvmFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define uvmClearInterrupt(pGpu, pIntrService, pParams) uvmClearInterrupt_DISPATCH(pGpu, pIntrService, pParams) +#define uvmStatePostLoad(pGpu, pEngstate, arg0) uvmStatePostLoad_DISPATCH(pGpu, pEngstate, arg0) +#define uvmAllocTunableState(pGpu, pEngstate, ppTunableState) uvmAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define uvmSetTunableState(pGpu, pEngstate, pTunableState) uvmSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define uvmConstructEngine(pGpu, pEngstate, arg0) uvmConstructEngine_DISPATCH(pGpu, pEngstate, arg0) +#define uvmIsPresent(pGpu, pEngstate) uvmIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS uvmInitializeAccessCntrBuffer_IMPL(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmInitializeAccessCntrBuffer(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmInitializeAccessCntrBuffer(pGpu, pUvm) uvmInitializeAccessCntrBuffer_IMPL(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmInitializeAccessCntrBuffer_HAL(pGpu, pUvm) uvmInitializeAccessCntrBuffer(pGpu, pUvm) + +NV_STATUS uvmTerminateAccessCntrBuffer_IMPL(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmTerminateAccessCntrBuffer(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmTerminateAccessCntrBuffer(pGpu, pUvm) uvmTerminateAccessCntrBuffer_IMPL(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmTerminateAccessCntrBuffer_HAL(pGpu, pUvm) uvmTerminateAccessCntrBuffer(pGpu, pUvm) + +NV_STATUS uvmInitAccessCntrBuffer_GV100(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmInitAccessCntrBuffer(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmInitAccessCntrBuffer(pGpu, pUvm) uvmInitAccessCntrBuffer_GV100(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmInitAccessCntrBuffer_HAL(pGpu, pUvm) uvmInitAccessCntrBuffer(pGpu, pUvm) + +NV_STATUS uvmDestroyAccessCntrBuffer_GV100(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmDestroyAccessCntrBuffer(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmDestroyAccessCntrBuffer(pGpu, pUvm) uvmDestroyAccessCntrBuffer_GV100(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmDestroyAccessCntrBuffer_HAL(pGpu, pUvm) uvmDestroyAccessCntrBuffer(pGpu, pUvm) + +static inline NV_STATUS uvmAccessCntrBufferUnregister_ac1694(OBJGPU *arg0, struct OBJUVM *arg1) { + return NV_OK; +} + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmAccessCntrBufferUnregister(OBJGPU *arg0, struct OBJUVM *arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmAccessCntrBufferUnregister(arg0, arg1) uvmAccessCntrBufferUnregister_ac1694(arg0, arg1) +#endif //__nvoc_uvm_h_disabled + +#define uvmAccessCntrBufferUnregister_HAL(arg0, arg1) uvmAccessCntrBufferUnregister(arg0, arg1) + +static inline NV_STATUS uvmAccessCntrBufferRegister_ac1694(OBJGPU *arg0, struct OBJUVM *arg1, NvU32 arg2, RmPhysAddr *arg3) { + return NV_OK; +} + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmAccessCntrBufferRegister(OBJGPU *arg0, struct OBJUVM *arg1, NvU32 arg2, RmPhysAddr *arg3) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmAccessCntrBufferRegister(arg0, arg1, arg2, arg3) uvmAccessCntrBufferRegister_ac1694(arg0, arg1, arg2, arg3) +#endif //__nvoc_uvm_h_disabled + +#define uvmAccessCntrBufferRegister_HAL(arg0, arg1, arg2, arg3) uvmAccessCntrBufferRegister(arg0, arg1, arg2, arg3) + +NV_STATUS uvmUnloadAccessCntrBuffer_GV100(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmUnloadAccessCntrBuffer(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmUnloadAccessCntrBuffer(pGpu, pUvm) uvmUnloadAccessCntrBuffer_GV100(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmUnloadAccessCntrBuffer_HAL(pGpu, pUvm) uvmUnloadAccessCntrBuffer(pGpu, pUvm) + +NV_STATUS uvmSetupAccessCntrBuffer_GV100(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmSetupAccessCntrBuffer(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmSetupAccessCntrBuffer(pGpu, pUvm) uvmSetupAccessCntrBuffer_GV100(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmSetupAccessCntrBuffer_HAL(pGpu, pUvm) uvmSetupAccessCntrBuffer(pGpu, pUvm) + +NV_STATUS uvmReadAccessCntrBufferPutPtr_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 *arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmReadAccessCntrBufferPutPtr(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmReadAccessCntrBufferPutPtr(pGpu, pUvm, arg0) uvmReadAccessCntrBufferPutPtr_TU102(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmReadAccessCntrBufferPutPtr_HAL(pGpu, pUvm, arg0) uvmReadAccessCntrBufferPutPtr(pGpu, pUvm, arg0) + +NV_STATUS uvmReadAccessCntrBufferGetPtr_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 *arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmReadAccessCntrBufferGetPtr(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmReadAccessCntrBufferGetPtr(pGpu, pUvm, arg0) uvmReadAccessCntrBufferGetPtr_TU102(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmReadAccessCntrBufferGetPtr_HAL(pGpu, pUvm, arg0) uvmReadAccessCntrBufferGetPtr(pGpu, pUvm, arg0) + +NV_STATUS uvmReadAccessCntrBufferFullPtr_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, NvBool *arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmReadAccessCntrBufferFullPtr(OBJGPU *pGpu, struct OBJUVM *pUvm, NvBool *arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmReadAccessCntrBufferFullPtr(pGpu, pUvm, arg0) uvmReadAccessCntrBufferFullPtr_TU102(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmReadAccessCntrBufferFullPtr_HAL(pGpu, pUvm, arg0) uvmReadAccessCntrBufferFullPtr(pGpu, pUvm, arg0) + +NV_STATUS uvmResetAccessCntrBuffer_GV100(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmResetAccessCntrBuffer(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmResetAccessCntrBuffer(pGpu, pUvm, arg0) uvmResetAccessCntrBuffer_GV100(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmResetAccessCntrBuffer_HAL(pGpu, pUvm, arg0) uvmResetAccessCntrBuffer(pGpu, pUvm, arg0) + +NV_STATUS uvmAccessCntrSetGranularity_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, ACCESS_CNTR_TYPE arg0, NvU32 arg1); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmAccessCntrSetGranularity(OBJGPU *pGpu, struct OBJUVM *pUvm, ACCESS_CNTR_TYPE arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmAccessCntrSetGranularity(pGpu, pUvm, arg0, arg1) uvmAccessCntrSetGranularity_TU102(pGpu, pUvm, arg0, arg1) +#endif //__nvoc_uvm_h_disabled + +#define uvmAccessCntrSetGranularity_HAL(pGpu, pUvm, arg0, arg1) uvmAccessCntrSetGranularity(pGpu, pUvm, arg0, arg1) + +NV_STATUS uvmAccessCntrSetThreshold_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmAccessCntrSetThreshold(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmAccessCntrSetThreshold(pGpu, pUvm, arg0) uvmAccessCntrSetThreshold_TU102(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmAccessCntrSetThreshold_HAL(pGpu, pUvm, arg0) uvmAccessCntrSetThreshold(pGpu, pUvm, arg0) + +NV_STATUS uvmAccessCntrSetCounterLimit_GV100(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0, NvU32 arg1); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmAccessCntrSetCounterLimit(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0, NvU32 arg1) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmAccessCntrSetCounterLimit(pGpu, pUvm, arg0, arg1) uvmAccessCntrSetCounterLimit_GV100(pGpu, pUvm, arg0, arg1) +#endif //__nvoc_uvm_h_disabled + +#define uvmAccessCntrSetCounterLimit_HAL(pGpu, pUvm, arg0, arg1) uvmAccessCntrSetCounterLimit(pGpu, pUvm, arg0, arg1) + +NV_STATUS uvmWriteAccessCntrBufferGetPtr_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmWriteAccessCntrBufferGetPtr(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmWriteAccessCntrBufferGetPtr(pGpu, pUvm, arg0) uvmWriteAccessCntrBufferGetPtr_TU102(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmWriteAccessCntrBufferGetPtr_HAL(pGpu, pUvm, arg0) uvmWriteAccessCntrBufferGetPtr(pGpu, pUvm, arg0) + +NV_STATUS uvmEnableAccessCntr_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, NvBool arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmEnableAccessCntr(OBJGPU *pGpu, struct OBJUVM *pUvm, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmEnableAccessCntr(pGpu, pUvm, arg0) uvmEnableAccessCntr_TU102(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmEnableAccessCntr_HAL(pGpu, pUvm, arg0) uvmEnableAccessCntr(pGpu, pUvm, arg0) + +NV_STATUS uvmDisableAccessCntr_GV100(OBJGPU *pGpu, struct OBJUVM *pUvm, NvBool arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmDisableAccessCntr(OBJGPU *pGpu, struct OBJUVM *pUvm, NvBool arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmDisableAccessCntr(pGpu, pUvm, arg0) uvmDisableAccessCntr_GV100(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmDisableAccessCntr_HAL(pGpu, pUvm, arg0) uvmDisableAccessCntr(pGpu, pUvm, arg0) + +NV_STATUS uvmEnableAccessCntrIntr_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmEnableAccessCntrIntr(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmEnableAccessCntrIntr(pGpu, pUvm, arg0) uvmEnableAccessCntrIntr_TU102(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmEnableAccessCntrIntr_HAL(pGpu, pUvm, arg0) uvmEnableAccessCntrIntr(pGpu, pUvm, arg0) + +NV_STATUS uvmDisableAccessCntrIntr_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmDisableAccessCntrIntr(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmDisableAccessCntrIntr(pGpu, pUvm) uvmDisableAccessCntrIntr_TU102(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmDisableAccessCntrIntr_HAL(pGpu, pUvm) uvmDisableAccessCntrIntr(pGpu, pUvm) + +NV_STATUS uvmGetAccessCntrRegisterMappings_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, NvP64 *arg0, NvP64 *arg1, NvP64 *arg2, NvP64 *arg3, NvP64 *arg4, NvP64 *arg5, NvU32 *arg6); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmGetAccessCntrRegisterMappings(OBJGPU *pGpu, struct OBJUVM *pUvm, NvP64 *arg0, NvP64 *arg1, NvP64 *arg2, NvP64 *arg3, NvP64 *arg4, NvP64 *arg5, NvU32 *arg6) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmGetAccessCntrRegisterMappings(pGpu, pUvm, arg0, arg1, arg2, arg3, arg4, arg5, arg6) uvmGetAccessCntrRegisterMappings_TU102(pGpu, pUvm, arg0, arg1, arg2, arg3, arg4, arg5, arg6) +#endif //__nvoc_uvm_h_disabled + +#define uvmGetAccessCntrRegisterMappings_HAL(pGpu, pUvm, arg0, arg1, arg2, arg3, arg4, arg5, arg6) uvmGetAccessCntrRegisterMappings(pGpu, pUvm, arg0, arg1, arg2, arg3, arg4, arg5, arg6) + +NV_STATUS uvmAccessCntrService_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NV_STATUS uvmAccessCntrService(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_uvm_h_disabled +#define uvmAccessCntrService(pGpu, pUvm) uvmAccessCntrService_TU102(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmAccessCntrService_HAL(pGpu, pUvm) uvmAccessCntrService(pGpu, pUvm) + +NvU32 uvmGetAccessCounterBufferSize_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NvU32 uvmGetAccessCounterBufferSize(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return 0; +} +#else //__nvoc_uvm_h_disabled +#define uvmGetAccessCounterBufferSize(pGpu, pUvm) uvmGetAccessCounterBufferSize_TU102(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmGetAccessCounterBufferSize_HAL(pGpu, pUvm) uvmGetAccessCounterBufferSize(pGpu, pUvm) + +void uvmWriteAccessCntrBufferHiReg_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline void uvmWriteAccessCntrBufferHiReg(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); +} +#else //__nvoc_uvm_h_disabled +#define uvmWriteAccessCntrBufferHiReg(pGpu, pUvm, arg0) uvmWriteAccessCntrBufferHiReg_TU102(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmWriteAccessCntrBufferHiReg_HAL(pGpu, pUvm, arg0) uvmWriteAccessCntrBufferHiReg(pGpu, pUvm, arg0) + +void uvmWriteAccessCntrBufferLoReg_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0); + +#ifdef __nvoc_uvm_h_disabled +static inline void uvmWriteAccessCntrBufferLoReg(OBJGPU *pGpu, struct OBJUVM *pUvm, NvU32 arg0) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); +} +#else //__nvoc_uvm_h_disabled +#define uvmWriteAccessCntrBufferLoReg(pGpu, pUvm, arg0) uvmWriteAccessCntrBufferLoReg_TU102(pGpu, pUvm, arg0) +#endif //__nvoc_uvm_h_disabled + +#define uvmWriteAccessCntrBufferLoReg_HAL(pGpu, pUvm, arg0) uvmWriteAccessCntrBufferLoReg(pGpu, pUvm, arg0) + +NvU32 uvmReadAccessCntrBufferLoReg_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NvU32 uvmReadAccessCntrBufferLoReg(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return 0; +} +#else //__nvoc_uvm_h_disabled +#define uvmReadAccessCntrBufferLoReg(pGpu, pUvm) uvmReadAccessCntrBufferLoReg_TU102(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmReadAccessCntrBufferLoReg_HAL(pGpu, pUvm) uvmReadAccessCntrBufferLoReg(pGpu, pUvm) + +NvU32 uvmReadAccessCntrBufferInfoReg_TU102(OBJGPU *pGpu, struct OBJUVM *pUvm); + +#ifdef __nvoc_uvm_h_disabled +static inline NvU32 uvmReadAccessCntrBufferInfoReg(OBJGPU *pGpu, struct OBJUVM *pUvm) { + NV_ASSERT_FAILED_PRECOMP("OBJUVM was disabled!"); + return 0; +} +#else //__nvoc_uvm_h_disabled +#define uvmReadAccessCntrBufferInfoReg(pGpu, pUvm) uvmReadAccessCntrBufferInfoReg_TU102(pGpu, pUvm) +#endif //__nvoc_uvm_h_disabled + +#define uvmReadAccessCntrBufferInfoReg_HAL(pGpu, pUvm) uvmReadAccessCntrBufferInfoReg(pGpu, pUvm) + +void uvmStateDestroy_IMPL(OBJGPU *pGpu, struct OBJUVM *pUvm); + +static inline void uvmStateDestroy_DISPATCH(OBJGPU *pGpu, struct OBJUVM *pUvm) { + pUvm->__uvmStateDestroy__(pGpu, pUvm); +} + +NV_STATUS uvmStateInitUnlocked_IMPL(OBJGPU *pGpu, struct OBJUVM *pUvm); + +static inline NV_STATUS uvmStateInitUnlocked_DISPATCH(OBJGPU *pGpu, struct OBJUVM *pUvm) { + return pUvm->__uvmStateInitUnlocked__(pGpu, pUvm); +} + +void uvmRegisterIntrService_IMPL(OBJGPU *arg0, struct OBJUVM *pUvm, IntrServiceRecord arg1[155]); + +static inline void uvmRegisterIntrService_DISPATCH(OBJGPU *arg0, struct OBJUVM *pUvm, IntrServiceRecord arg1[155]) { + pUvm->__uvmRegisterIntrService__(arg0, pUvm, arg1); +} + +NvU32 uvmServiceInterrupt_IMPL(OBJGPU *arg0, struct OBJUVM *pUvm, IntrServiceServiceInterruptArguments *arg1); + +static inline NvU32 uvmServiceInterrupt_DISPATCH(OBJGPU *arg0, struct OBJUVM *pUvm, IntrServiceServiceInterruptArguments *arg1) { + return pUvm->__uvmServiceInterrupt__(arg0, pUvm, arg1); +} + +static inline NV_STATUS uvmReconcileTunableState_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, void *pTunableState) { + return pEngstate->__uvmReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS uvmStateLoad_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return pEngstate->__uvmStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS uvmStateUnload_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return pEngstate->__uvmStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS uvmServiceNotificationInterrupt_DISPATCH(OBJGPU *pGpu, struct OBJUVM *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) { + return pIntrService->__uvmServiceNotificationInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS uvmStateInitLocked_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate) { + return pEngstate->__uvmStateInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS uvmStatePreLoad_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return pEngstate->__uvmStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS uvmStatePostUnload_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return pEngstate->__uvmStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS uvmStatePreUnload_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return pEngstate->__uvmStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline void uvmInitMissing_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate) { + pEngstate->__uvmInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS uvmStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate) { + return pEngstate->__uvmStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS uvmStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate) { + return pEngstate->__uvmStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS uvmGetTunableState_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, void *pTunableState) { + return pEngstate->__uvmGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS uvmCompareTunableState_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__uvmCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void uvmFreeTunableState_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, void *pTunableState) { + pEngstate->__uvmFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool uvmClearInterrupt_DISPATCH(OBJGPU *pGpu, struct OBJUVM *pIntrService, IntrServiceClearInterruptArguments *pParams) { + return pIntrService->__uvmClearInterrupt__(pGpu, pIntrService, pParams); +} + +static inline NV_STATUS uvmStatePostLoad_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, NvU32 arg0) { + return pEngstate->__uvmStatePostLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS uvmAllocTunableState_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, void **ppTunableState) { + return pEngstate->__uvmAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS uvmSetTunableState_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, void *pTunableState) { + return pEngstate->__uvmSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS uvmConstructEngine_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate, ENGDESCRIPTOR arg0) { + return pEngstate->__uvmConstructEngine__(pGpu, pEngstate, arg0); +} + +static inline NvBool uvmIsPresent_DISPATCH(POBJGPU pGpu, struct OBJUVM *pEngstate) { + return pEngstate->__uvmIsPresent__(pGpu, pEngstate); +} + +#undef PRIVATE_FIELD + + +#endif // UVM_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_UVM_NVOC_H_ diff --git a/src/nvidia/generated/g_uvm_sw_nvoc.c b/src/nvidia/generated/g_uvm_sw_nvoc.c new file mode 100644 index 000000000..708821691 --- /dev/null +++ b/src/nvidia/generated/g_uvm_sw_nvoc.c @@ -0,0 +1,424 @@ +#define NVOC_UVM_SW_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_uvm_sw_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xc35503 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UvmSwObject; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ChannelDescendant; + +void __nvoc_init_UvmSwObject(UvmSwObject*, RmHalspecOwner* ); +void __nvoc_init_funcTable_UvmSwObject(UvmSwObject*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_UvmSwObject(UvmSwObject*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_UvmSwObject(UvmSwObject*, RmHalspecOwner* ); +void __nvoc_dtor_UvmSwObject(UvmSwObject*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_UvmSwObject; + +static const struct NVOC_RTTI __nvoc_rtti_UvmSwObject_UvmSwObject = { + /*pClassDef=*/ &__nvoc_class_def_UvmSwObject, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_UvmSwObject, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmSwObject_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmSwObject_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmSwObject_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmSwObject_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmSwObject_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_GpuResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmSwObject_INotifier = { + /*pClassDef=*/ &__nvoc_class_def_INotifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmSwObject_Notifier = { + /*pClassDef=*/ &__nvoc_class_def_Notifier, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmSwObject, __nvoc_base_ChannelDescendant.__nvoc_base_Notifier), +}; + +static const struct NVOC_RTTI __nvoc_rtti_UvmSwObject_ChannelDescendant = { + /*pClassDef=*/ &__nvoc_class_def_ChannelDescendant, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(UvmSwObject, __nvoc_base_ChannelDescendant), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_UvmSwObject = { + /*numRelatives=*/ 9, + /*relatives=*/ { + &__nvoc_rtti_UvmSwObject_UvmSwObject, + &__nvoc_rtti_UvmSwObject_ChannelDescendant, + &__nvoc_rtti_UvmSwObject_Notifier, + &__nvoc_rtti_UvmSwObject_INotifier, + &__nvoc_rtti_UvmSwObject_GpuResource, + &__nvoc_rtti_UvmSwObject_RmResource, + &__nvoc_rtti_UvmSwObject_RmResourceCommon, + &__nvoc_rtti_UvmSwObject_RsResource, + &__nvoc_rtti_UvmSwObject_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_UvmSwObject = +{ + /*classInfo=*/ { + /*size=*/ sizeof(UvmSwObject), + /*classId=*/ classId(UvmSwObject), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "UvmSwObject", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_UvmSwObject, + /*pCastInfo=*/ &__nvoc_castinfo_UvmSwObject, + /*pExportInfo=*/ &__nvoc_export_info_UvmSwObject +}; + +static NV_STATUS __nvoc_thunk_UvmSwObject_chandesGetSwMethods(struct ChannelDescendant *pUvmSw, METHOD **ppMethods, NvU32 *pNumMethods) { + return uvmswGetSwMethods((struct UvmSwObject *)(((unsigned char *)pUvmSw) - __nvoc_rtti_UvmSwObject_ChannelDescendant.offset), ppMethods, pNumMethods); +} + +static NV_STATUS __nvoc_thunk_ChannelDescendant_uvmswCheckMemInterUnmap(struct UvmSwObject *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return chandesCheckMemInterUnmap((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_UvmSwObject_ChannelDescendant.offset), bSubdeviceHandleProvided); +} + +static NvBool __nvoc_thunk_GpuResource_uvmswShareCallback(struct UvmSwObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmSwObject_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NvBool __nvoc_thunk_RmResource_uvmswAccessCallback(struct UvmSwObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +static NV_STATUS __nvoc_thunk_RsResource_uvmswMapTo(struct UvmSwObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmswGetMapAddrSpace(struct UvmSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmSwObject_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static void __nvoc_thunk_Notifier_uvmswSetNotificationShare(struct UvmSwObject *pNotifier, struct NotifShare *pNotifShare) { + notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_UvmSwObject_Notifier.offset), pNotifShare); +} + +static NvU32 __nvoc_thunk_RsResource_uvmswGetRefCount(struct UvmSwObject *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_uvmswAddAdditionalDependants(struct RsClient *pClient, struct UvmSwObject *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RsResource.offset), pReference); +} + +static NV_STATUS __nvoc_thunk_RmResource_uvmswControl_Prologue(struct UvmSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmswGetRegBaseOffsetAndSize(struct UvmSwObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmSwObject_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmswInternalControlForward(struct UvmSwObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmSwObject_GpuResource.offset), command, pParams, size); +} + +static NV_STATUS __nvoc_thunk_RsResource_uvmswUnmapFrom(struct UvmSwObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_uvmswControl_Epilogue(struct UvmSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_uvmswControlLookup(struct UvmSwObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RsResource.offset), pParams, ppEntry); +} + +static NvHandle __nvoc_thunk_GpuResource_uvmswGetInternalObjectHandle(struct UvmSwObject *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmSwObject_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmswControl(struct UvmSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmSwObject_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmswUnmap(struct UvmSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmSwObject_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_uvmswGetMemInterMapParams(struct UvmSwObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_UvmSwObject_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_uvmswGetMemoryMappingDescriptor(struct UvmSwObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_UvmSwObject_RmResource.offset), ppMemDesc); +} + +static NvBool __nvoc_thunk_ChannelDescendant_uvmswIsSwMethodStalling(struct UvmSwObject *pChannelDescendant, NvU32 hHandle) { + return chandesIsSwMethodStalling((struct ChannelDescendant *)(((unsigned char *)pChannelDescendant) + __nvoc_rtti_UvmSwObject_ChannelDescendant.offset), hHandle); +} + +static NV_STATUS __nvoc_thunk_RsResource_uvmswControlFilter(struct UvmSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RsResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Notifier_uvmswUnregisterEvent(struct UvmSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_UvmSwObject_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static NvBool __nvoc_thunk_RsResource_uvmswCanCopy(struct UvmSwObject *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_uvmswPreDestruct(struct UvmSwObject *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_UvmSwObject_RsResource.offset)); +} + +static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_uvmswGetNotificationListPtr(struct UvmSwObject *pNotifier) { + return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_UvmSwObject_Notifier.offset)); +} + +static struct NotifShare *__nvoc_thunk_Notifier_uvmswGetNotificationShare(struct UvmSwObject *pNotifier) { + return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_UvmSwObject_Notifier.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_uvmswMap(struct UvmSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_UvmSwObject_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Notifier_uvmswGetOrAllocNotifShare(struct UvmSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_UvmSwObject_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_UvmSwObject = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_ChannelDescendant(ChannelDescendant*); +void __nvoc_dtor_UvmSwObject(UvmSwObject *pThis) { + __nvoc_uvmswDestruct(pThis); + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_UvmSwObject(UvmSwObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_ChannelDescendant(ChannelDescendant* , RmHalspecOwner* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *, PARAM_TO_ENGDESC_FUNCTION *); +NV_STATUS __nvoc_ctor_UvmSwObject(UvmSwObject *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner, arg_pCallContext, arg_pParams, ((void *)0)); + if (status != NV_OK) goto __nvoc_ctor_UvmSwObject_fail_ChannelDescendant; + __nvoc_init_dataField_UvmSwObject(pThis, pRmhalspecowner); + + status = __nvoc_uvmswConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_UvmSwObject_fail__init; + goto __nvoc_ctor_UvmSwObject_exit; // Success + +__nvoc_ctor_UvmSwObject_fail__init: + __nvoc_dtor_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant); +__nvoc_ctor_UvmSwObject_fail_ChannelDescendant: +__nvoc_ctor_UvmSwObject_exit: + + return status; +} + +static void __nvoc_init_funcTable_UvmSwObject_1(UvmSwObject *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // Hal function -- uvmswGetSwMethods + if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */ + { + pThis->__uvmswGetSwMethods__ = &uvmswGetSwMethods_56cd7a; + } + else if (0) + { + } + + pThis->__nvoc_base_ChannelDescendant.__chandesGetSwMethods__ = &__nvoc_thunk_UvmSwObject_chandesGetSwMethods; + + pThis->__uvmswCheckMemInterUnmap__ = &__nvoc_thunk_ChannelDescendant_uvmswCheckMemInterUnmap; + + pThis->__uvmswShareCallback__ = &__nvoc_thunk_GpuResource_uvmswShareCallback; + + pThis->__uvmswAccessCallback__ = &__nvoc_thunk_RmResource_uvmswAccessCallback; + + pThis->__uvmswMapTo__ = &__nvoc_thunk_RsResource_uvmswMapTo; + + pThis->__uvmswGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_uvmswGetMapAddrSpace; + + pThis->__uvmswSetNotificationShare__ = &__nvoc_thunk_Notifier_uvmswSetNotificationShare; + + pThis->__uvmswGetRefCount__ = &__nvoc_thunk_RsResource_uvmswGetRefCount; + + pThis->__uvmswAddAdditionalDependants__ = &__nvoc_thunk_RsResource_uvmswAddAdditionalDependants; + + pThis->__uvmswControl_Prologue__ = &__nvoc_thunk_RmResource_uvmswControl_Prologue; + + pThis->__uvmswGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_uvmswGetRegBaseOffsetAndSize; + + pThis->__uvmswInternalControlForward__ = &__nvoc_thunk_GpuResource_uvmswInternalControlForward; + + pThis->__uvmswUnmapFrom__ = &__nvoc_thunk_RsResource_uvmswUnmapFrom; + + pThis->__uvmswControl_Epilogue__ = &__nvoc_thunk_RmResource_uvmswControl_Epilogue; + + pThis->__uvmswControlLookup__ = &__nvoc_thunk_RsResource_uvmswControlLookup; + + pThis->__uvmswGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_uvmswGetInternalObjectHandle; + + pThis->__uvmswControl__ = &__nvoc_thunk_GpuResource_uvmswControl; + + pThis->__uvmswUnmap__ = &__nvoc_thunk_GpuResource_uvmswUnmap; + + pThis->__uvmswGetMemInterMapParams__ = &__nvoc_thunk_RmResource_uvmswGetMemInterMapParams; + + pThis->__uvmswGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_uvmswGetMemoryMappingDescriptor; + + pThis->__uvmswIsSwMethodStalling__ = &__nvoc_thunk_ChannelDescendant_uvmswIsSwMethodStalling; + + pThis->__uvmswControlFilter__ = &__nvoc_thunk_RsResource_uvmswControlFilter; + + pThis->__uvmswUnregisterEvent__ = &__nvoc_thunk_Notifier_uvmswUnregisterEvent; + + pThis->__uvmswCanCopy__ = &__nvoc_thunk_RsResource_uvmswCanCopy; + + pThis->__uvmswPreDestruct__ = &__nvoc_thunk_RsResource_uvmswPreDestruct; + + pThis->__uvmswGetNotificationListPtr__ = &__nvoc_thunk_Notifier_uvmswGetNotificationListPtr; + + pThis->__uvmswGetNotificationShare__ = &__nvoc_thunk_Notifier_uvmswGetNotificationShare; + + pThis->__uvmswMap__ = &__nvoc_thunk_GpuResource_uvmswMap; + + pThis->__uvmswGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_uvmswGetOrAllocNotifShare; +} + +void __nvoc_init_funcTable_UvmSwObject(UvmSwObject *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_UvmSwObject_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_ChannelDescendant(ChannelDescendant*, RmHalspecOwner* ); +void __nvoc_init_UvmSwObject(UvmSwObject *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_UvmSwObject = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource; + pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier.__nvoc_base_INotifier; + pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_Notifier; + pThis->__nvoc_pbase_ChannelDescendant = &pThis->__nvoc_base_ChannelDescendant; + __nvoc_init_ChannelDescendant(&pThis->__nvoc_base_ChannelDescendant, pRmhalspecowner); + __nvoc_init_funcTable_UvmSwObject(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_UvmSwObject(UvmSwObject **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + UvmSwObject *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(UvmSwObject)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(UvmSwObject)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_UvmSwObject); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_ChannelDescendant.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_UvmSwObject(pThis, pRmhalspecowner); + status = __nvoc_ctor_UvmSwObject(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_UvmSwObject_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_UvmSwObject_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_UvmSwObject(UvmSwObject **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_UvmSwObject(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_uvm_sw_nvoc.h b/src/nvidia/generated/g_uvm_sw_nvoc.h new file mode 100644 index 000000000..60d6d1d88 --- /dev/null +++ b/src/nvidia/generated/g_uvm_sw_nvoc.h @@ -0,0 +1,295 @@ +#ifndef _G_UVM_SW_NVOC_H_ +#define _G_UVM_SW_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_uvm_sw_nvoc.h" + +#ifndef UVM_SW_H +#define UVM_SW_H + +#include "core/core.h" +#include "kernel/gpu/fifo/channel_descendant.h" + +/*! + * RM internal class representing GP100_UVM_SW + */ +#ifdef NVOC_UVM_SW_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct UvmSwObject { + const struct NVOC_RTTI *__nvoc_rtti; + struct ChannelDescendant __nvoc_base_ChannelDescendant; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct INotifier *__nvoc_pbase_INotifier; + struct Notifier *__nvoc_pbase_Notifier; + struct ChannelDescendant *__nvoc_pbase_ChannelDescendant; + struct UvmSwObject *__nvoc_pbase_UvmSwObject; + NV_STATUS (*__uvmswGetSwMethods__)(struct UvmSwObject *, METHOD **, NvU32 *); + NV_STATUS (*__uvmswCheckMemInterUnmap__)(struct UvmSwObject *, NvBool); + NvBool (*__uvmswShareCallback__)(struct UvmSwObject *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NvBool (*__uvmswAccessCallback__)(struct UvmSwObject *, struct RsClient *, void *, RsAccessRight); + NV_STATUS (*__uvmswMapTo__)(struct UvmSwObject *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__uvmswGetMapAddrSpace__)(struct UvmSwObject *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + void (*__uvmswSetNotificationShare__)(struct UvmSwObject *, struct NotifShare *); + NvU32 (*__uvmswGetRefCount__)(struct UvmSwObject *); + void (*__uvmswAddAdditionalDependants__)(struct RsClient *, struct UvmSwObject *, RsResourceRef *); + NV_STATUS (*__uvmswControl_Prologue__)(struct UvmSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__uvmswGetRegBaseOffsetAndSize__)(struct UvmSwObject *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__uvmswInternalControlForward__)(struct UvmSwObject *, NvU32, void *, NvU32); + NV_STATUS (*__uvmswUnmapFrom__)(struct UvmSwObject *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__uvmswControl_Epilogue__)(struct UvmSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__uvmswControlLookup__)(struct UvmSwObject *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NvHandle (*__uvmswGetInternalObjectHandle__)(struct UvmSwObject *); + NV_STATUS (*__uvmswControl__)(struct UvmSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__uvmswUnmap__)(struct UvmSwObject *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__uvmswGetMemInterMapParams__)(struct UvmSwObject *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__uvmswGetMemoryMappingDescriptor__)(struct UvmSwObject *, struct MEMORY_DESCRIPTOR **); + NvBool (*__uvmswIsSwMethodStalling__)(struct UvmSwObject *, NvU32); + NV_STATUS (*__uvmswControlFilter__)(struct UvmSwObject *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__uvmswUnregisterEvent__)(struct UvmSwObject *, NvHandle, NvHandle, NvHandle, NvHandle); + NvBool (*__uvmswCanCopy__)(struct UvmSwObject *); + void (*__uvmswPreDestruct__)(struct UvmSwObject *); + PEVENTNOTIFICATION *(*__uvmswGetNotificationListPtr__)(struct UvmSwObject *); + struct NotifShare *(*__uvmswGetNotificationShare__)(struct UvmSwObject *); + NV_STATUS (*__uvmswMap__)(struct UvmSwObject *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NV_STATUS (*__uvmswGetOrAllocNotifShare__)(struct UvmSwObject *, NvHandle, NvHandle, struct NotifShare **); + NvU32 methodA; + NvU32 methodB; + NvBool bCancelMethodASet; + NvBool bCancelMethodBSet; + NvBool bClearMethodASet; +}; + +#ifndef __NVOC_CLASS_UvmSwObject_TYPEDEF__ +#define __NVOC_CLASS_UvmSwObject_TYPEDEF__ +typedef struct UvmSwObject UvmSwObject; +#endif /* __NVOC_CLASS_UvmSwObject_TYPEDEF__ */ + +#ifndef __nvoc_class_id_UvmSwObject +#define __nvoc_class_id_UvmSwObject 0xc35503 +#endif /* __nvoc_class_id_UvmSwObject */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_UvmSwObject; + +#define __staticCast_UvmSwObject(pThis) \ + ((pThis)->__nvoc_pbase_UvmSwObject) + +#ifdef __nvoc_uvm_sw_h_disabled +#define __dynamicCast_UvmSwObject(pThis) ((UvmSwObject*)NULL) +#else //__nvoc_uvm_sw_h_disabled +#define __dynamicCast_UvmSwObject(pThis) \ + ((UvmSwObject*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(UvmSwObject))) +#endif //__nvoc_uvm_sw_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_UvmSwObject(UvmSwObject**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_UvmSwObject(UvmSwObject**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_UvmSwObject(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_UvmSwObject((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define uvmswGetSwMethods(pUvmSw, ppMethods, pNumMethods) uvmswGetSwMethods_DISPATCH(pUvmSw, ppMethods, pNumMethods) +#define uvmswGetSwMethods_HAL(pUvmSw, ppMethods, pNumMethods) uvmswGetSwMethods_DISPATCH(pUvmSw, ppMethods, pNumMethods) +#define uvmswCheckMemInterUnmap(pChannelDescendant, bSubdeviceHandleProvided) uvmswCheckMemInterUnmap_DISPATCH(pChannelDescendant, bSubdeviceHandleProvided) +#define uvmswShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) uvmswShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define uvmswAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) uvmswAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +#define uvmswMapTo(pResource, pParams) uvmswMapTo_DISPATCH(pResource, pParams) +#define uvmswGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) uvmswGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define uvmswSetNotificationShare(pNotifier, pNotifShare) uvmswSetNotificationShare_DISPATCH(pNotifier, pNotifShare) +#define uvmswGetRefCount(pResource) uvmswGetRefCount_DISPATCH(pResource) +#define uvmswAddAdditionalDependants(pClient, pResource, pReference) uvmswAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define uvmswControl_Prologue(pResource, pCallContext, pParams) uvmswControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define uvmswGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) uvmswGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define uvmswInternalControlForward(pGpuResource, command, pParams, size) uvmswInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define uvmswUnmapFrom(pResource, pParams) uvmswUnmapFrom_DISPATCH(pResource, pParams) +#define uvmswControl_Epilogue(pResource, pCallContext, pParams) uvmswControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define uvmswControlLookup(pResource, pParams, ppEntry) uvmswControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define uvmswGetInternalObjectHandle(pGpuResource) uvmswGetInternalObjectHandle_DISPATCH(pGpuResource) +#define uvmswControl(pGpuResource, pCallContext, pParams) uvmswControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define uvmswUnmap(pGpuResource, pCallContext, pCpuMapping) uvmswUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define uvmswGetMemInterMapParams(pRmResource, pParams) uvmswGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define uvmswGetMemoryMappingDescriptor(pRmResource, ppMemDesc) uvmswGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define uvmswIsSwMethodStalling(pChannelDescendant, hHandle) uvmswIsSwMethodStalling_DISPATCH(pChannelDescendant, hHandle) +#define uvmswControlFilter(pResource, pCallContext, pParams) uvmswControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define uvmswUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) uvmswUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) +#define uvmswCanCopy(pResource) uvmswCanCopy_DISPATCH(pResource) +#define uvmswPreDestruct(pResource) uvmswPreDestruct_DISPATCH(pResource) +#define uvmswGetNotificationListPtr(pNotifier) uvmswGetNotificationListPtr_DISPATCH(pNotifier) +#define uvmswGetNotificationShare(pNotifier) uvmswGetNotificationShare_DISPATCH(pNotifier) +#define uvmswMap(pGpuResource, pCallContext, pParams, pCpuMapping) uvmswMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define uvmswGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) uvmswGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) +static inline NV_STATUS uvmswGetSwMethods_56cd7a(struct UvmSwObject *pUvmSw, METHOD **ppMethods, NvU32 *pNumMethods) { + return NV_OK; +} + +static inline NV_STATUS uvmswGetSwMethods_DISPATCH(struct UvmSwObject *pUvmSw, METHOD **ppMethods, NvU32 *pNumMethods) { + return pUvmSw->__uvmswGetSwMethods__(pUvmSw, ppMethods, pNumMethods); +} + +static inline NV_STATUS uvmswCheckMemInterUnmap_DISPATCH(struct UvmSwObject *pChannelDescendant, NvBool bSubdeviceHandleProvided) { + return pChannelDescendant->__uvmswCheckMemInterUnmap__(pChannelDescendant, bSubdeviceHandleProvided); +} + +static inline NvBool uvmswShareCallback_DISPATCH(struct UvmSwObject *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__uvmswShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NvBool uvmswAccessCallback_DISPATCH(struct UvmSwObject *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__uvmswAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS uvmswMapTo_DISPATCH(struct UvmSwObject *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__uvmswMapTo__(pResource, pParams); +} + +static inline NV_STATUS uvmswGetMapAddrSpace_DISPATCH(struct UvmSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__uvmswGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline void uvmswSetNotificationShare_DISPATCH(struct UvmSwObject *pNotifier, struct NotifShare *pNotifShare) { + pNotifier->__uvmswSetNotificationShare__(pNotifier, pNotifShare); +} + +static inline NvU32 uvmswGetRefCount_DISPATCH(struct UvmSwObject *pResource) { + return pResource->__uvmswGetRefCount__(pResource); +} + +static inline void uvmswAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct UvmSwObject *pResource, RsResourceRef *pReference) { + pResource->__uvmswAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NV_STATUS uvmswControl_Prologue_DISPATCH(struct UvmSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__uvmswControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS uvmswGetRegBaseOffsetAndSize_DISPATCH(struct UvmSwObject *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__uvmswGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS uvmswInternalControlForward_DISPATCH(struct UvmSwObject *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__uvmswInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline NV_STATUS uvmswUnmapFrom_DISPATCH(struct UvmSwObject *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__uvmswUnmapFrom__(pResource, pParams); +} + +static inline void uvmswControl_Epilogue_DISPATCH(struct UvmSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__uvmswControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS uvmswControlLookup_DISPATCH(struct UvmSwObject *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__uvmswControlLookup__(pResource, pParams, ppEntry); +} + +static inline NvHandle uvmswGetInternalObjectHandle_DISPATCH(struct UvmSwObject *pGpuResource) { + return pGpuResource->__uvmswGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS uvmswControl_DISPATCH(struct UvmSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__uvmswControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS uvmswUnmap_DISPATCH(struct UvmSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__uvmswUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS uvmswGetMemInterMapParams_DISPATCH(struct UvmSwObject *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__uvmswGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS uvmswGetMemoryMappingDescriptor_DISPATCH(struct UvmSwObject *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__uvmswGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NvBool uvmswIsSwMethodStalling_DISPATCH(struct UvmSwObject *pChannelDescendant, NvU32 hHandle) { + return pChannelDescendant->__uvmswIsSwMethodStalling__(pChannelDescendant, hHandle); +} + +static inline NV_STATUS uvmswControlFilter_DISPATCH(struct UvmSwObject *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__uvmswControlFilter__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS uvmswUnregisterEvent_DISPATCH(struct UvmSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) { + return pNotifier->__uvmswUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent); +} + +static inline NvBool uvmswCanCopy_DISPATCH(struct UvmSwObject *pResource) { + return pResource->__uvmswCanCopy__(pResource); +} + +static inline void uvmswPreDestruct_DISPATCH(struct UvmSwObject *pResource) { + pResource->__uvmswPreDestruct__(pResource); +} + +static inline PEVENTNOTIFICATION *uvmswGetNotificationListPtr_DISPATCH(struct UvmSwObject *pNotifier) { + return pNotifier->__uvmswGetNotificationListPtr__(pNotifier); +} + +static inline struct NotifShare *uvmswGetNotificationShare_DISPATCH(struct UvmSwObject *pNotifier) { + return pNotifier->__uvmswGetNotificationShare__(pNotifier); +} + +static inline NV_STATUS uvmswMap_DISPATCH(struct UvmSwObject *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__uvmswMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NV_STATUS uvmswGetOrAllocNotifShare_DISPATCH(struct UvmSwObject *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) { + return pNotifier->__uvmswGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare); +} + +NV_STATUS uvmswConstruct_IMPL(struct UvmSwObject *arg_pUvmSw, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_uvmswConstruct(arg_pUvmSw, arg_pCallContext, arg_pParams) uvmswConstruct_IMPL(arg_pUvmSw, arg_pCallContext, arg_pParams) +void uvmswDestruct_IMPL(struct UvmSwObject *pUvmSw); +#define __nvoc_uvmswDestruct(pUvmSw) uvmswDestruct_IMPL(pUvmSw) +void uvmswInitSwMethodState_IMPL(struct UvmSwObject *pUvmSw); +#ifdef __nvoc_uvm_sw_h_disabled +static inline void uvmswInitSwMethodState(struct UvmSwObject *pUvmSw) { + NV_ASSERT_FAILED_PRECOMP("UvmSwObject was disabled!"); +} +#else //__nvoc_uvm_sw_h_disabled +#define uvmswInitSwMethodState(pUvmSw) uvmswInitSwMethodState_IMPL(pUvmSw) +#endif //__nvoc_uvm_sw_h_disabled + +#undef PRIVATE_FIELD + + +#endif // UVM_SW_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_UVM_SW_NVOC_H_ diff --git a/src/nvidia/generated/g_vaspace_api_nvoc.c b/src/nvidia/generated/g_vaspace_api_nvoc.c new file mode 100644 index 000000000..f3da323ea --- /dev/null +++ b/src/nvidia/generated/g_vaspace_api_nvoc.c @@ -0,0 +1,434 @@ +#define NVOC_VASPACE_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_vaspace_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xcd048b = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VaSpaceApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_VaSpaceApi(VaSpaceApi*); +void __nvoc_init_funcTable_VaSpaceApi(VaSpaceApi*); +NV_STATUS __nvoc_ctor_VaSpaceApi(VaSpaceApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_VaSpaceApi(VaSpaceApi*); +void __nvoc_dtor_VaSpaceApi(VaSpaceApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_VaSpaceApi; + +static const struct NVOC_RTTI __nvoc_rtti_VaSpaceApi_VaSpaceApi = { + /*pClassDef=*/ &__nvoc_class_def_VaSpaceApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_VaSpaceApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_VaSpaceApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VaSpaceApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VaSpaceApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VaSpaceApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VaSpaceApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VaSpaceApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VaSpaceApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VaSpaceApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VaSpaceApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VaSpaceApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_VaSpaceApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_VaSpaceApi_VaSpaceApi, + &__nvoc_rtti_VaSpaceApi_GpuResource, + &__nvoc_rtti_VaSpaceApi_RmResource, + &__nvoc_rtti_VaSpaceApi_RmResourceCommon, + &__nvoc_rtti_VaSpaceApi_RsResource, + &__nvoc_rtti_VaSpaceApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_VaSpaceApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(VaSpaceApi), + /*classId=*/ classId(VaSpaceApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "VaSpaceApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_VaSpaceApi, + /*pCastInfo=*/ &__nvoc_castinfo_VaSpaceApi, + /*pExportInfo=*/ &__nvoc_export_info_VaSpaceApi +}; + +static NvBool __nvoc_thunk_VaSpaceApi_resCanCopy(struct RsResource *pResource) { + return vaspaceapiCanCopy((struct VaSpaceApi *)(((unsigned char *)pResource) - __nvoc_rtti_VaSpaceApi_RsResource.offset)); +} + +static NvBool __nvoc_thunk_GpuResource_vaspaceapiShareCallback(struct VaSpaceApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VaSpaceApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vaspaceapiControl(struct VaSpaceApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VaSpaceApi_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vaspaceapiUnmap(struct VaSpaceApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VaSpaceApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_vaspaceapiGetMemInterMapParams(struct VaSpaceApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_VaSpaceApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_vaspaceapiGetMemoryMappingDescriptor(struct VaSpaceApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_VaSpaceApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vaspaceapiGetMapAddrSpace(struct VaSpaceApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VaSpaceApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_vaspaceapiGetInternalObjectHandle(struct VaSpaceApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VaSpaceApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_vaspaceapiControlFilter(struct VaSpaceApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VaSpaceApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_vaspaceapiAddAdditionalDependants(struct RsClient *pClient, struct VaSpaceApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VaSpaceApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_vaspaceapiGetRefCount(struct VaSpaceApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VaSpaceApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_vaspaceapiCheckMemInterUnmap(struct VaSpaceApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_VaSpaceApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_vaspaceapiMapTo(struct VaSpaceApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VaSpaceApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_vaspaceapiControl_Prologue(struct VaSpaceApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VaSpaceApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vaspaceapiGetRegBaseOffsetAndSize(struct VaSpaceApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VaSpaceApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vaspaceapiInternalControlForward(struct VaSpaceApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VaSpaceApi_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_vaspaceapiPreDestruct(struct VaSpaceApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VaSpaceApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_vaspaceapiUnmapFrom(struct VaSpaceApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VaSpaceApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_vaspaceapiControl_Epilogue(struct VaSpaceApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VaSpaceApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_vaspaceapiControlLookup(struct VaSpaceApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VaSpaceApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vaspaceapiMap(struct VaSpaceApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VaSpaceApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_vaspaceapiAccessCallback(struct VaSpaceApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VaSpaceApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VaSpaceApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) vaspaceapiCtrlCmdVaspaceGetGmmuFormat_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90f10101u, + /*paramSize=*/ sizeof(NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_VaSpaceApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "vaspaceapiCtrlCmdVaspaceGetGmmuFormat" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) vaspaceapiCtrlCmdVaspaceGetPageLevelInfo_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90f10102u, + /*paramSize=*/ sizeof(NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_VaSpaceApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "vaspaceapiCtrlCmdVaspaceGetPageLevelInfo" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) vaspaceapiCtrlCmdVaspaceReserveEntries_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90f10103u, + /*paramSize=*/ sizeof(NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_VaSpaceApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "vaspaceapiCtrlCmdVaspaceReserveEntries" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) vaspaceapiCtrlCmdVaspaceReleaseEntries_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90f10104u, + /*paramSize=*/ sizeof(NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_VaSpaceApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "vaspaceapiCtrlCmdVaspaceReleaseEntries" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + /*flags=*/ 0x4u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90f10106u, + /*paramSize=*/ sizeof(NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_VaSpaceApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_VaSpaceApi = +{ + /*numEntries=*/ 5, + /*pExportEntries=*/ __nvoc_exported_method_def_VaSpaceApi +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_VaSpaceApi(VaSpaceApi *pThis) { + __nvoc_vaspaceapiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_VaSpaceApi(VaSpaceApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_VaSpaceApi(VaSpaceApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_VaSpaceApi_fail_GpuResource; + __nvoc_init_dataField_VaSpaceApi(pThis); + + status = __nvoc_vaspaceapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_VaSpaceApi_fail__init; + goto __nvoc_ctor_VaSpaceApi_exit; // Success + +__nvoc_ctor_VaSpaceApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_VaSpaceApi_fail_GpuResource: +__nvoc_ctor_VaSpaceApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_VaSpaceApi_1(VaSpaceApi *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__vaspaceapiCanCopy__ = &vaspaceapiCanCopy_IMPL; + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__vaspaceapiCtrlCmdVaspaceGetGmmuFormat__ = &vaspaceapiCtrlCmdVaspaceGetGmmuFormat_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__vaspaceapiCtrlCmdVaspaceGetPageLevelInfo__ = &vaspaceapiCtrlCmdVaspaceGetPageLevelInfo_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__vaspaceapiCtrlCmdVaspaceReserveEntries__ = &vaspaceapiCtrlCmdVaspaceReserveEntries_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__vaspaceapiCtrlCmdVaspaceReleaseEntries__ = &vaspaceapiCtrlCmdVaspaceReleaseEntries_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x4u) + pThis->__vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes__ = &vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes_IMPL; +#endif + + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_VaSpaceApi_resCanCopy; + + pThis->__vaspaceapiShareCallback__ = &__nvoc_thunk_GpuResource_vaspaceapiShareCallback; + + pThis->__vaspaceapiControl__ = &__nvoc_thunk_GpuResource_vaspaceapiControl; + + pThis->__vaspaceapiUnmap__ = &__nvoc_thunk_GpuResource_vaspaceapiUnmap; + + pThis->__vaspaceapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_vaspaceapiGetMemInterMapParams; + + pThis->__vaspaceapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_vaspaceapiGetMemoryMappingDescriptor; + + pThis->__vaspaceapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_vaspaceapiGetMapAddrSpace; + + pThis->__vaspaceapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_vaspaceapiGetInternalObjectHandle; + + pThis->__vaspaceapiControlFilter__ = &__nvoc_thunk_RsResource_vaspaceapiControlFilter; + + pThis->__vaspaceapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_vaspaceapiAddAdditionalDependants; + + pThis->__vaspaceapiGetRefCount__ = &__nvoc_thunk_RsResource_vaspaceapiGetRefCount; + + pThis->__vaspaceapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_vaspaceapiCheckMemInterUnmap; + + pThis->__vaspaceapiMapTo__ = &__nvoc_thunk_RsResource_vaspaceapiMapTo; + + pThis->__vaspaceapiControl_Prologue__ = &__nvoc_thunk_RmResource_vaspaceapiControl_Prologue; + + pThis->__vaspaceapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_vaspaceapiGetRegBaseOffsetAndSize; + + pThis->__vaspaceapiInternalControlForward__ = &__nvoc_thunk_GpuResource_vaspaceapiInternalControlForward; + + pThis->__vaspaceapiPreDestruct__ = &__nvoc_thunk_RsResource_vaspaceapiPreDestruct; + + pThis->__vaspaceapiUnmapFrom__ = &__nvoc_thunk_RsResource_vaspaceapiUnmapFrom; + + pThis->__vaspaceapiControl_Epilogue__ = &__nvoc_thunk_RmResource_vaspaceapiControl_Epilogue; + + pThis->__vaspaceapiControlLookup__ = &__nvoc_thunk_RsResource_vaspaceapiControlLookup; + + pThis->__vaspaceapiMap__ = &__nvoc_thunk_GpuResource_vaspaceapiMap; + + pThis->__vaspaceapiAccessCallback__ = &__nvoc_thunk_RmResource_vaspaceapiAccessCallback; +} + +void __nvoc_init_funcTable_VaSpaceApi(VaSpaceApi *pThis) { + __nvoc_init_funcTable_VaSpaceApi_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_VaSpaceApi(VaSpaceApi *pThis) { + pThis->__nvoc_pbase_VaSpaceApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_VaSpaceApi(pThis); +} + +NV_STATUS __nvoc_objCreate_VaSpaceApi(VaSpaceApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + VaSpaceApi *pThis; + + pThis = portMemAllocNonPaged(sizeof(VaSpaceApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(VaSpaceApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_VaSpaceApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_VaSpaceApi(pThis); + status = __nvoc_ctor_VaSpaceApi(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_VaSpaceApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_VaSpaceApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_VaSpaceApi(VaSpaceApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_VaSpaceApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_vaspace_api_nvoc.h b/src/nvidia/generated/g_vaspace_api_nvoc.h new file mode 100644 index 000000000..43877f7b5 --- /dev/null +++ b/src/nvidia/generated/g_vaspace_api_nvoc.h @@ -0,0 +1,298 @@ +#ifndef _G_VASPACE_API_NVOC_H_ +#define _G_VASPACE_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_vaspace_api_nvoc.h" + +#ifndef VASPACE_API_H +#define VASPACE_API_H + +#include "core/core.h" +#include "mem_mgr/vaspace.h" +#include "rmapi/client.h" +#include "gpu/gpu_resource.h" +#include "ctrl/ctrl90f1.h" +#include "rmapi/control.h" // for macro RMCTRL_EXPORT etc. + +#define RM_INVALID_VASPACE_HANDLE 0xFFFFFFFF + +/*! + * Actions to manage the Server RM page levels (only used by Split VAS mechanism) + */ +typedef enum +{ + VASPACEAPI_MANAGE_PAGE_LEVELS_RESERVE, + VASPACEAPI_MANAGE_PAGE_LEVELS_RELEASE, + VASPACEAPI_MANAGE_PAGE_LEVELS_TRIM, +} VASPACEAPI_MANAGE_PAGE_LEVELS_ACTION; + +#ifdef NVOC_VASPACE_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct VaSpaceApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct VaSpaceApi *__nvoc_pbase_VaSpaceApi; + NvBool (*__vaspaceapiCanCopy__)(struct VaSpaceApi *); + NV_STATUS (*__vaspaceapiCtrlCmdVaspaceGetGmmuFormat__)(struct VaSpaceApi *, NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS *); + NV_STATUS (*__vaspaceapiCtrlCmdVaspaceGetPageLevelInfo__)(struct VaSpaceApi *, NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS *); + NV_STATUS (*__vaspaceapiCtrlCmdVaspaceReserveEntries__)(struct VaSpaceApi *, NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS *); + NV_STATUS (*__vaspaceapiCtrlCmdVaspaceReleaseEntries__)(struct VaSpaceApi *, NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS *); + NV_STATUS (*__vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes__)(struct VaSpaceApi *, NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *); + NvBool (*__vaspaceapiShareCallback__)(struct VaSpaceApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__vaspaceapiControl__)(struct VaSpaceApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vaspaceapiUnmap__)(struct VaSpaceApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__vaspaceapiGetMemInterMapParams__)(struct VaSpaceApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__vaspaceapiGetMemoryMappingDescriptor__)(struct VaSpaceApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__vaspaceapiGetMapAddrSpace__)(struct VaSpaceApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__vaspaceapiGetInternalObjectHandle__)(struct VaSpaceApi *); + NV_STATUS (*__vaspaceapiControlFilter__)(struct VaSpaceApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__vaspaceapiAddAdditionalDependants__)(struct RsClient *, struct VaSpaceApi *, RsResourceRef *); + NvU32 (*__vaspaceapiGetRefCount__)(struct VaSpaceApi *); + NV_STATUS (*__vaspaceapiCheckMemInterUnmap__)(struct VaSpaceApi *, NvBool); + NV_STATUS (*__vaspaceapiMapTo__)(struct VaSpaceApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__vaspaceapiControl_Prologue__)(struct VaSpaceApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vaspaceapiGetRegBaseOffsetAndSize__)(struct VaSpaceApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NV_STATUS (*__vaspaceapiInternalControlForward__)(struct VaSpaceApi *, NvU32, void *, NvU32); + void (*__vaspaceapiPreDestruct__)(struct VaSpaceApi *); + NV_STATUS (*__vaspaceapiUnmapFrom__)(struct VaSpaceApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__vaspaceapiControl_Epilogue__)(struct VaSpaceApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vaspaceapiControlLookup__)(struct VaSpaceApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__vaspaceapiMap__)(struct VaSpaceApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__vaspaceapiAccessCallback__)(struct VaSpaceApi *, struct RsClient *, void *, RsAccessRight); + struct OBJVASPACE *pVASpace; + NvBool bDuped; +}; + +#ifndef __NVOC_CLASS_VaSpaceApi_TYPEDEF__ +#define __NVOC_CLASS_VaSpaceApi_TYPEDEF__ +typedef struct VaSpaceApi VaSpaceApi; +#endif /* __NVOC_CLASS_VaSpaceApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VaSpaceApi +#define __nvoc_class_id_VaSpaceApi 0xcd048b +#endif /* __nvoc_class_id_VaSpaceApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VaSpaceApi; + +#define __staticCast_VaSpaceApi(pThis) \ + ((pThis)->__nvoc_pbase_VaSpaceApi) + +#ifdef __nvoc_vaspace_api_h_disabled +#define __dynamicCast_VaSpaceApi(pThis) ((VaSpaceApi*)NULL) +#else //__nvoc_vaspace_api_h_disabled +#define __dynamicCast_VaSpaceApi(pThis) \ + ((VaSpaceApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(VaSpaceApi))) +#endif //__nvoc_vaspace_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_VaSpaceApi(VaSpaceApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_VaSpaceApi(VaSpaceApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_VaSpaceApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_VaSpaceApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define vaspaceapiCanCopy(pResource) vaspaceapiCanCopy_DISPATCH(pResource) +#define vaspaceapiCtrlCmdVaspaceGetGmmuFormat(pVaspaceApi, pGmmuFormatParams) vaspaceapiCtrlCmdVaspaceGetGmmuFormat_DISPATCH(pVaspaceApi, pGmmuFormatParams) +#define vaspaceapiCtrlCmdVaspaceGetPageLevelInfo(pVaspaceApi, pPageLevelInfoParams) vaspaceapiCtrlCmdVaspaceGetPageLevelInfo_DISPATCH(pVaspaceApi, pPageLevelInfoParams) +#define vaspaceapiCtrlCmdVaspaceReserveEntries(pVaspaceApi, pReserveEntriesParams) vaspaceapiCtrlCmdVaspaceReserveEntries_DISPATCH(pVaspaceApi, pReserveEntriesParams) +#define vaspaceapiCtrlCmdVaspaceReleaseEntries(pVaspaceApi, pReleaseEntriesParams) vaspaceapiCtrlCmdVaspaceReleaseEntries_DISPATCH(pVaspaceApi, pReleaseEntriesParams) +#define vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes(pVaspaceApi, pCopyServerReservedPdesParams) vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes_DISPATCH(pVaspaceApi, pCopyServerReservedPdesParams) +#define vaspaceapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) vaspaceapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define vaspaceapiControl(pGpuResource, pCallContext, pParams) vaspaceapiControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define vaspaceapiUnmap(pGpuResource, pCallContext, pCpuMapping) vaspaceapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define vaspaceapiGetMemInterMapParams(pRmResource, pParams) vaspaceapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define vaspaceapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) vaspaceapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define vaspaceapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) vaspaceapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define vaspaceapiGetInternalObjectHandle(pGpuResource) vaspaceapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define vaspaceapiControlFilter(pResource, pCallContext, pParams) vaspaceapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define vaspaceapiAddAdditionalDependants(pClient, pResource, pReference) vaspaceapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define vaspaceapiGetRefCount(pResource) vaspaceapiGetRefCount_DISPATCH(pResource) +#define vaspaceapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) vaspaceapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define vaspaceapiMapTo(pResource, pParams) vaspaceapiMapTo_DISPATCH(pResource, pParams) +#define vaspaceapiControl_Prologue(pResource, pCallContext, pParams) vaspaceapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define vaspaceapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) vaspaceapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define vaspaceapiInternalControlForward(pGpuResource, command, pParams, size) vaspaceapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define vaspaceapiPreDestruct(pResource) vaspaceapiPreDestruct_DISPATCH(pResource) +#define vaspaceapiUnmapFrom(pResource, pParams) vaspaceapiUnmapFrom_DISPATCH(pResource, pParams) +#define vaspaceapiControl_Epilogue(pResource, pCallContext, pParams) vaspaceapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define vaspaceapiControlLookup(pResource, pParams, ppEntry) vaspaceapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define vaspaceapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) vaspaceapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define vaspaceapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) vaspaceapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NvBool vaspaceapiCanCopy_IMPL(struct VaSpaceApi *pResource); + +static inline NvBool vaspaceapiCanCopy_DISPATCH(struct VaSpaceApi *pResource) { + return pResource->__vaspaceapiCanCopy__(pResource); +} + +NV_STATUS vaspaceapiCtrlCmdVaspaceGetGmmuFormat_IMPL(struct VaSpaceApi *pVaspaceApi, NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS *pGmmuFormatParams); + +static inline NV_STATUS vaspaceapiCtrlCmdVaspaceGetGmmuFormat_DISPATCH(struct VaSpaceApi *pVaspaceApi, NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS *pGmmuFormatParams) { + return pVaspaceApi->__vaspaceapiCtrlCmdVaspaceGetGmmuFormat__(pVaspaceApi, pGmmuFormatParams); +} + +NV_STATUS vaspaceapiCtrlCmdVaspaceGetPageLevelInfo_IMPL(struct VaSpaceApi *pVaspaceApi, NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS *pPageLevelInfoParams); + +static inline NV_STATUS vaspaceapiCtrlCmdVaspaceGetPageLevelInfo_DISPATCH(struct VaSpaceApi *pVaspaceApi, NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS *pPageLevelInfoParams) { + return pVaspaceApi->__vaspaceapiCtrlCmdVaspaceGetPageLevelInfo__(pVaspaceApi, pPageLevelInfoParams); +} + +NV_STATUS vaspaceapiCtrlCmdVaspaceReserveEntries_IMPL(struct VaSpaceApi *pVaspaceApi, NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS *pReserveEntriesParams); + +static inline NV_STATUS vaspaceapiCtrlCmdVaspaceReserveEntries_DISPATCH(struct VaSpaceApi *pVaspaceApi, NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS *pReserveEntriesParams) { + return pVaspaceApi->__vaspaceapiCtrlCmdVaspaceReserveEntries__(pVaspaceApi, pReserveEntriesParams); +} + +NV_STATUS vaspaceapiCtrlCmdVaspaceReleaseEntries_IMPL(struct VaSpaceApi *pVaspaceApi, NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS *pReleaseEntriesParams); + +static inline NV_STATUS vaspaceapiCtrlCmdVaspaceReleaseEntries_DISPATCH(struct VaSpaceApi *pVaspaceApi, NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS *pReleaseEntriesParams) { + return pVaspaceApi->__vaspaceapiCtrlCmdVaspaceReleaseEntries__(pVaspaceApi, pReleaseEntriesParams); +} + +NV_STATUS vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes_IMPL(struct VaSpaceApi *pVaspaceApi, NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *pCopyServerReservedPdesParams); + +static inline NV_STATUS vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes_DISPATCH(struct VaSpaceApi *pVaspaceApi, NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *pCopyServerReservedPdesParams) { + return pVaspaceApi->__vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes__(pVaspaceApi, pCopyServerReservedPdesParams); +} + +static inline NvBool vaspaceapiShareCallback_DISPATCH(struct VaSpaceApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__vaspaceapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS vaspaceapiControl_DISPATCH(struct VaSpaceApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__vaspaceapiControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS vaspaceapiUnmap_DISPATCH(struct VaSpaceApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__vaspaceapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS vaspaceapiGetMemInterMapParams_DISPATCH(struct VaSpaceApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__vaspaceapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS vaspaceapiGetMemoryMappingDescriptor_DISPATCH(struct VaSpaceApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__vaspaceapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS vaspaceapiGetMapAddrSpace_DISPATCH(struct VaSpaceApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__vaspaceapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle vaspaceapiGetInternalObjectHandle_DISPATCH(struct VaSpaceApi *pGpuResource) { + return pGpuResource->__vaspaceapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS vaspaceapiControlFilter_DISPATCH(struct VaSpaceApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__vaspaceapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void vaspaceapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct VaSpaceApi *pResource, RsResourceRef *pReference) { + pResource->__vaspaceapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 vaspaceapiGetRefCount_DISPATCH(struct VaSpaceApi *pResource) { + return pResource->__vaspaceapiGetRefCount__(pResource); +} + +static inline NV_STATUS vaspaceapiCheckMemInterUnmap_DISPATCH(struct VaSpaceApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__vaspaceapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS vaspaceapiMapTo_DISPATCH(struct VaSpaceApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__vaspaceapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS vaspaceapiControl_Prologue_DISPATCH(struct VaSpaceApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__vaspaceapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS vaspaceapiGetRegBaseOffsetAndSize_DISPATCH(struct VaSpaceApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__vaspaceapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NV_STATUS vaspaceapiInternalControlForward_DISPATCH(struct VaSpaceApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__vaspaceapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void vaspaceapiPreDestruct_DISPATCH(struct VaSpaceApi *pResource) { + pResource->__vaspaceapiPreDestruct__(pResource); +} + +static inline NV_STATUS vaspaceapiUnmapFrom_DISPATCH(struct VaSpaceApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__vaspaceapiUnmapFrom__(pResource, pParams); +} + +static inline void vaspaceapiControl_Epilogue_DISPATCH(struct VaSpaceApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__vaspaceapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS vaspaceapiControlLookup_DISPATCH(struct VaSpaceApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__vaspaceapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS vaspaceapiMap_DISPATCH(struct VaSpaceApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__vaspaceapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool vaspaceapiAccessCallback_DISPATCH(struct VaSpaceApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__vaspaceapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS vaspaceapiConstruct_IMPL(struct VaSpaceApi *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_vaspaceapiConstruct(arg_pResource, arg_pCallContext, arg_pParams) vaspaceapiConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams) +NV_STATUS vaspaceapiCopyConstruct_IMPL(struct VaSpaceApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams); +#ifdef __nvoc_vaspace_api_h_disabled +static inline NV_STATUS vaspaceapiCopyConstruct(struct VaSpaceApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("VaSpaceApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_vaspace_api_h_disabled +#define vaspaceapiCopyConstruct(pResource, pCallContext, pParams) vaspaceapiCopyConstruct_IMPL(pResource, pCallContext, pParams) +#endif //__nvoc_vaspace_api_h_disabled + +void vaspaceapiDestruct_IMPL(struct VaSpaceApi *pResource); +#define __nvoc_vaspaceapiDestruct(pResource) vaspaceapiDestruct_IMPL(pResource) +#undef PRIVATE_FIELD + + +#endif // VASPACE_API_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_VASPACE_API_NVOC_H_ diff --git a/src/nvidia/generated/g_vaspace_nvoc.c b/src/nvidia/generated/g_vaspace_nvoc.c new file mode 100644 index 000000000..2c4bed253 --- /dev/null +++ b/src/nvidia/generated/g_vaspace_nvoc.c @@ -0,0 +1,163 @@ +#define NVOC_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_vaspace_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x6c347f = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJVASPACE(OBJVASPACE*); +void __nvoc_init_funcTable_OBJVASPACE(OBJVASPACE*); +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE*); +void __nvoc_init_dataField_OBJVASPACE(OBJVASPACE*); +void __nvoc_dtor_OBJVASPACE(OBJVASPACE*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJVASPACE; + +static const struct NVOC_RTTI __nvoc_rtti_OBJVASPACE_OBJVASPACE = { + /*pClassDef=*/ &__nvoc_class_def_OBJVASPACE, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJVASPACE, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJVASPACE_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJVASPACE, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJVASPACE = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJVASPACE_OBJVASPACE, + &__nvoc_rtti_OBJVASPACE_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJVASPACE), + /*classId=*/ classId(OBJVASPACE), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJVASPACE", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL, + /*pCastInfo=*/ &__nvoc_castinfo_OBJVASPACE, + /*pExportInfo=*/ &__nvoc_export_info_OBJVASPACE +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJVASPACE = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJVASPACE(OBJVASPACE *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJVASPACE(OBJVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJVASPACE_fail_Object; + __nvoc_init_dataField_OBJVASPACE(pThis); + goto __nvoc_ctor_OBJVASPACE_exit; // Success + +__nvoc_ctor_OBJVASPACE_fail_Object: +__nvoc_ctor_OBJVASPACE_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJVASPACE_1(OBJVASPACE *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__vaspaceConstruct___ = NULL; + + pThis->__vaspaceAlloc__ = NULL; + + pThis->__vaspaceFree__ = NULL; + + pThis->__vaspaceApplyDefaultAlignment__ = NULL; + + pThis->__vaspaceIncAllocRefCnt__ = &vaspaceIncAllocRefCnt_b7902c; + + pThis->__vaspaceGetVaStart__ = &vaspaceGetVaStart_IMPL; + + pThis->__vaspaceGetVaLimit__ = &vaspaceGetVaLimit_IMPL; + + pThis->__vaspaceGetVasInfo__ = NULL; + + pThis->__vaspaceGetFlags__ = &vaspaceGetFlags_edd98b; + + pThis->__vaspaceIsInternalVaRestricted__ = &vaspaceIsInternalVaRestricted_IMPL; + + pThis->__vaspaceMap__ = &vaspaceMap_b7902c; + + pThis->__vaspaceUnmap__ = &vaspaceUnmap_8b86a5; + + pThis->__vaspaceReserveMempool__ = &vaspaceReserveMempool_ac1694; + + pThis->__vaspaceGetHeap__ = &vaspaceGetHeap_128d6d; + + pThis->__vaspaceGetMapPageSize__ = &vaspaceGetMapPageSize_07238a; + + pThis->__vaspaceGetBigPageSize__ = &vaspaceGetBigPageSize_07238a; + + pThis->__vaspaceIsMirrored__ = &vaspaceIsMirrored_814c13; + + pThis->__vaspaceIsFaultCapable__ = &vaspaceIsFaultCapable_814c13; + + pThis->__vaspaceIsExternallyOwned__ = &vaspaceIsExternallyOwned_814c13; + + pThis->__vaspaceIsAtsEnabled__ = &vaspaceIsAtsEnabled_814c13; + + pThis->__vaspaceGetPasid__ = &vaspaceGetPasid_b7902c; + + pThis->__vaspaceGetPageDirBase__ = &vaspaceGetPageDirBase_128d6d; + + pThis->__vaspaceGetKernelPageDirBase__ = &vaspaceGetKernelPageDirBase_128d6d; + + pThis->__vaspacePinRootPageDir__ = &vaspacePinRootPageDir_b7902c; + + pThis->__vaspaceUnpinRootPageDir__ = &vaspaceUnpinRootPageDir_8b86a5; + + pThis->__vaspaceInvalidateTlb__ = &vaspaceInvalidateTlb_IMPL; + + pThis->__vaspaceGetPageTableInfo__ = &vaspaceGetPageTableInfo_b7902c; + + pThis->__vaspaceGetPteInfo__ = &vaspaceGetPteInfo_b7902c; + + pThis->__vaspaceSetPteInfo__ = &vaspaceSetPteInfo_b7902c; +} + +void __nvoc_init_funcTable_OBJVASPACE(OBJVASPACE *pThis) { + __nvoc_init_funcTable_OBJVASPACE_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJVASPACE(OBJVASPACE *pThis) { + pThis->__nvoc_pbase_OBJVASPACE = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJVASPACE(pThis); +} + diff --git a/src/nvidia/generated/g_vaspace_nvoc.h b/src/nvidia/generated/g_vaspace_nvoc.h new file mode 100644 index 000000000..09f356ef1 --- /dev/null +++ b/src/nvidia/generated/g_vaspace_nvoc.h @@ -0,0 +1,602 @@ +#ifndef _G_VASPACE_NVOC_H_ +#define _G_VASPACE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "g_vaspace_nvoc.h" + +#ifndef _VASPACE_H_ +#define _VASPACE_H_ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: VASPACE.H * +* Defines and structures used for Virtual Address Space Object. * +\***************************************************************************/ + +#include "ctrl/ctrl0080/ctrl0080dma.h" + +#include "core/core.h" +#include "resserv/rs_client.h" +#include "containers/eheap_old.h" +#include "gpu/mem_mgr/heap_base.h" +#include "gpu/mem_mgr/mem_desc.h" + + +typedef struct OBJVASPACE *POBJVASPACE; +typedef struct VASPACE VASPACE, *PVASPACE; +struct VirtMemAllocator; + +#ifndef __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +#define __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +typedef struct VirtMemAllocator VirtMemAllocator; +#endif /* __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtMemAllocator +#define __nvoc_class_id_VirtMemAllocator 0x899e48 +#endif /* __nvoc_class_id_VirtMemAllocator */ + + +typedef struct MMU_MAP_TARGET MMU_MAP_TARGET; +typedef struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS; + +typedef struct +{ + NvBool bReverse : 1; + NvBool bPreferSysmemPageTables : 1; + NvBool bExternallyManaged : 1; + NvBool bLazy : 1; + NvBool bSparse : 1; + NvBool bPrivileged : 1; + NvBool bClientAllocation : 1; + NvBool bFixedAddressRange : 1; + NvBool bFixedAddressAllocate : 1; + NvBool bForceContig : 1; + NvBool bForceNonContig : 1; + + // + // Using this flag may have security implications. So. use it only when + // you are sure about its usage. + // + NvBool bSkipTlbInvalidateOnFree : 1; +} VAS_ALLOC_FLAGS; + +#define VAS_EHEAP_OWNER_NVRM NvU32_BUILD('n','v','r','m') +#define VAS_EHEAP_OWNER_RSVD NvU32_BUILD('r','s','v','d') + +typedef struct +{ + NvBool bRemap : 1; // +// This flag will create a privileged PDB as part of this vaspace +// This new PDB will mirror all of the allocations made in the +// original PDB. The first PDE is considered privileged for this +// address space. +// SHARED_MANAGEMENT Enables mode where only a portion of the VAS is managed +// and the page directory may be allocated/set externally. +// ALLOW_ZERO_ADDRESS Explicitly allows the base VAS address to start at 0. +// Normally 0 is reserved to distinguish NULL pointers. +// +// BIG_PAGE_SIZE Field that specifies the big page size to be used. +// DEFAULT is used till GM10X. GM20X and later, uses +// custom value for big page size. +// SIZE_DEFAULT Lets RM pick the default value +// SIZE_64K Uses 64K as big page size for this VA space +// SIZE_128K Uses 128K as big page size for this VA space +// +// MMU_FMT_VA_BITS Selects the MMU format of the VA space by the number +// of VA bits supported. +// DEFAULT RM picks the default for the underlying MMU HW. +// 40 Fermi+ 40-bit (2-level) format. +// 49 Pascal+ 49-bit (5-level) format. +// +// ENABLE_VMM +// Temp flag to enable new VMM code path on select +// VA spaces (e.g. client but not BAR1/PMU VAS). +// +// ZERO_OLD_STRUCT Deprecated. +// +// ENABLE_FAULTING This address space is participating in UVM. +// RM will enable page faulting for all channels that will be +// associated with this address space. +// +// IS_UVM_MANAGED This flag will replace the SET_MIRRORED flag. It is used to +// denote that this VASpace is participating in UVM. +// +// ENABLE_ATS This address space has ATS enabled. +// +// +// ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR This flag when set will allow page table allocations +// to be routed to suballocator of the current process +// requesting mapping. If no suballocator, allocations +// will fallback to global heap. +// +// VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB This flag must be used by the VASs which use +// the NVLink MMU. +// +#define VASPACE_FLAGS_NONE 0 +#define VASPACE_FLAGS_BAR NVBIT(0) +#define VASPACE_FLAGS_SCRATCH_INVAL NVBIT(1) +#define VASPACE_FLAGS_ENABLE_ATS NVBIT(2) +#define VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS NVBIT(3) +#define VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE NVBIT(4) +#define VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS NVBIT(5) +// unused NVBIT(6) +#define VASPACE_FLAGS_BAR_BAR1 NVBIT(7) +#define VASPACE_FLAGS_BAR_BAR2 NVBIT(8) +#define VASPACE_FLAGS_BAR_IFB NVBIT(9) +#define VASPACE_FLAGS_PERFMON NVBIT(10) +#define VASPACE_FLAGS_PMU NVBIT(11) +#define VASPACE_FLAGS_DEFAULT_SIZE NVBIT(12) +#define VASPACE_FLAGS_DEFAULT_PARAMS NVBIT(13) +#define VASPACE_FLAGS_PTETABLE_PMA_MANAGED NVBIT(14) +#define VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB NVBIT(15) +#define VASPACE_FLAGS_DISABLE_SPLIT_VAS NVBIT(16) +#define VASPACE_FLAGS_SET_MIRRORED NVBIT(17) +#define VASPACE_FLAGS_SHARED_MANAGEMENT NVBIT(18) +#define VASPACE_FLAGS_ALLOW_ZERO_ADDRESS NVBIT(19) +#define VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL NVBIT(20) +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE 22:21 +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_DEFAULT 0x00000000 +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_64K 0x00000001 +#define NV_VASPACE_FLAGS_BIG_PAGE_SIZE_128K 0x00000002 +#define VASPACE_FLAGS_HDA NVBIT(23) +#define VASPACE_FLAGS_FLA NVBIT(24) // Soon to be deprecated and removed. + // Used by legacy FLA implementation. +#define VASPACE_FLAGS_HWPM NVBIT(25) +#define VASPACE_FLAGS_ENABLE_VMM NVBIT(26) +#define VASPACE_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE NVBIT(27) +#define VASPACE_FLAGS_REVERSE NVBIT(28) +#define VASPACE_FLAGS_ENABLE_FAULTING NVBIT(29) +#define VASPACE_FLAGS_IS_EXTERNALLY_OWNED NVBIT(30) +#define VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR NVBIT(31) + +/*! + * Flags for page table memory pools. + * + * VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY + * Only allocate levels from the top to the specified level only. + * Anything below the specified level is not allocated. + */ +#define VASPACE_RESERVE_FLAGS_NONE (0) +#define VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY NVBIT32(0) + +/*! + * Level of RM-management for a given VA range. + * + * FULL + * RM manages everything (e.g. PDEs, PTEs). + * PDES_ONLY + * RM only manages PDEs (through non-buffer version of UpdatePde2). + * Buffer versions of FillPteMem and UpdatePde2 are still allowed. + * NONE + * RM does not manage anything. + * Buffer versions of FillPteMem and UpdatePde2 are still allowed. + */ +typedef enum +{ + VA_MANAGEMENT_FULL = 0, + VA_MANAGEMENT_PDES_ONLY, + VA_MANAGEMENT_NONE, +} VA_MANAGEMENT; + +/*! + * Abstract base class of an RM-managed virtual address space. + */ +#ifdef NVOC_VASPACE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJVASPACE { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJVASPACE *__nvoc_pbase_OBJVASPACE; + NV_STATUS (*__vaspaceConstruct___)(struct OBJVASPACE *, NvU32, NvU32, NvU64, NvU64, NvU64, NvU64, NvU32); + NV_STATUS (*__vaspaceAlloc__)(struct OBJVASPACE *, NvU64, NvU64, NvU64, NvU64, NvU64, VAS_ALLOC_FLAGS, NvU64 *); + NV_STATUS (*__vaspaceFree__)(struct OBJVASPACE *, NvU64); + NV_STATUS (*__vaspaceApplyDefaultAlignment__)(struct OBJVASPACE *, const FB_ALLOC_INFO *, NvU64 *, NvU64 *, NvU64 *); + NV_STATUS (*__vaspaceIncAllocRefCnt__)(struct OBJVASPACE *, NvU64); + NvU64 (*__vaspaceGetVaStart__)(struct OBJVASPACE *); + NvU64 (*__vaspaceGetVaLimit__)(struct OBJVASPACE *); + NV_STATUS (*__vaspaceGetVasInfo__)(struct OBJVASPACE *, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *); + NvU32 (*__vaspaceGetFlags__)(struct OBJVASPACE *); + NvBool (*__vaspaceIsInternalVaRestricted__)(struct OBJVASPACE *); + NV_STATUS (*__vaspaceMap__)(struct OBJVASPACE *, struct OBJGPU *, const NvU64, const NvU64, const MMU_MAP_TARGET *, const VAS_MAP_FLAGS); + void (*__vaspaceUnmap__)(struct OBJVASPACE *, struct OBJGPU *, const NvU64, const NvU64); + NV_STATUS (*__vaspaceReserveMempool__)(struct OBJVASPACE *, struct OBJGPU *, NvHandle, NvU64, NvU64, NvU32); + struct OBJEHEAP *(*__vaspaceGetHeap__)(struct OBJVASPACE *); + NvU32 (*__vaspaceGetMapPageSize__)(struct OBJVASPACE *, struct OBJGPU *, EMEMBLOCK *); + NvU32 (*__vaspaceGetBigPageSize__)(struct OBJVASPACE *); + NvBool (*__vaspaceIsMirrored__)(struct OBJVASPACE *); + NvBool (*__vaspaceIsFaultCapable__)(struct OBJVASPACE *); + NvBool (*__vaspaceIsExternallyOwned__)(struct OBJVASPACE *); + NvBool (*__vaspaceIsAtsEnabled__)(struct OBJVASPACE *); + NV_STATUS (*__vaspaceGetPasid__)(struct OBJVASPACE *, NvU32 *); + PMEMORY_DESCRIPTOR (*__vaspaceGetPageDirBase__)(struct OBJVASPACE *, struct OBJGPU *); + PMEMORY_DESCRIPTOR (*__vaspaceGetKernelPageDirBase__)(struct OBJVASPACE *, struct OBJGPU *); + NV_STATUS (*__vaspacePinRootPageDir__)(struct OBJVASPACE *, struct OBJGPU *); + void (*__vaspaceUnpinRootPageDir__)(struct OBJVASPACE *, struct OBJGPU *); + void (*__vaspaceInvalidateTlb__)(struct OBJVASPACE *, struct OBJGPU *, VAS_PTE_UPDATE_TYPE); + NV_STATUS (*__vaspaceGetPageTableInfo__)(struct OBJVASPACE *, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *); + NV_STATUS (*__vaspaceGetPteInfo__)(struct OBJVASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *, RmPhysAddr *); + NV_STATUS (*__vaspaceSetPteInfo__)(struct OBJVASPACE *, struct OBJGPU *, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *); + NvU32 gpuMask; + ADDRESS_TRANSLATION addressTranslation; + NvU32 refCnt; + NvU32 vaspaceId; + NvU64 vasStart; + NvU64 vasLimit; +}; + +#ifndef __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +#define __NVOC_CLASS_OBJVASPACE_TYPEDEF__ +typedef struct OBJVASPACE OBJVASPACE; +#endif /* __NVOC_CLASS_OBJVASPACE_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVASPACE +#define __nvoc_class_id_OBJVASPACE 0x6c347f +#endif /* __nvoc_class_id_OBJVASPACE */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE; + +#define __staticCast_OBJVASPACE(pThis) \ + ((pThis)->__nvoc_pbase_OBJVASPACE) + +#ifdef __nvoc_vaspace_h_disabled +#define __dynamicCast_OBJVASPACE(pThis) ((OBJVASPACE*)NULL) +#else //__nvoc_vaspace_h_disabled +#define __dynamicCast_OBJVASPACE(pThis) \ + ((OBJVASPACE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJVASPACE))) +#endif //__nvoc_vaspace_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJVASPACE(OBJVASPACE**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJVASPACE(OBJVASPACE**, Dynamic*, NvU32); +#define __objCreate_OBJVASPACE(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJVASPACE((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define vaspaceConstruct_(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) vaspaceConstruct__DISPATCH(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags) +#define vaspaceAlloc(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) vaspaceAlloc_DISPATCH(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr) +#define vaspaceFree(pVAS, vAddr) vaspaceFree_DISPATCH(pVAS, vAddr) +#define vaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) vaspaceApplyDefaultAlignment_DISPATCH(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask) +#define vaspaceIncAllocRefCnt(pVAS, vAddr) vaspaceIncAllocRefCnt_DISPATCH(pVAS, vAddr) +#define vaspaceGetVaStart(pVAS) vaspaceGetVaStart_DISPATCH(pVAS) +#define vaspaceGetVaLimit(pVAS) vaspaceGetVaLimit_DISPATCH(pVAS) +#define vaspaceGetVasInfo(pVAS, pParams) vaspaceGetVasInfo_DISPATCH(pVAS, pParams) +#define vaspaceGetFlags(pVAS) vaspaceGetFlags_DISPATCH(pVAS) +#define vaspaceIsInternalVaRestricted(pVAS) vaspaceIsInternalVaRestricted_DISPATCH(pVAS) +#define vaspaceMap(pVAS, pGpu, vaLo, vaHi, pTarget, flags) vaspaceMap_DISPATCH(pVAS, pGpu, vaLo, vaHi, pTarget, flags) +#define vaspaceUnmap(pVAS, pGpu, vaLo, vaHi) vaspaceUnmap_DISPATCH(pVAS, pGpu, vaLo, vaHi) +#define vaspaceReserveMempool(pVAS, pGpu, hClient, size, pageSizeLockMask, flags) vaspaceReserveMempool_DISPATCH(pVAS, pGpu, hClient, size, pageSizeLockMask, flags) +#define vaspaceGetHeap(pVAS) vaspaceGetHeap_DISPATCH(pVAS) +#define vaspaceGetMapPageSize(pVAS, pGpu, pMemBlock) vaspaceGetMapPageSize_DISPATCH(pVAS, pGpu, pMemBlock) +#define vaspaceGetBigPageSize(pVAS) vaspaceGetBigPageSize_DISPATCH(pVAS) +#define vaspaceIsMirrored(pVAS) vaspaceIsMirrored_DISPATCH(pVAS) +#define vaspaceIsFaultCapable(pVAS) vaspaceIsFaultCapable_DISPATCH(pVAS) +#define vaspaceIsExternallyOwned(pVAS) vaspaceIsExternallyOwned_DISPATCH(pVAS) +#define vaspaceIsAtsEnabled(pVAS) vaspaceIsAtsEnabled_DISPATCH(pVAS) +#define vaspaceGetPasid(pVAS, pPasid) vaspaceGetPasid_DISPATCH(pVAS, pPasid) +#define vaspaceGetPageDirBase(pVAS, pGpu) vaspaceGetPageDirBase_DISPATCH(pVAS, pGpu) +#define vaspaceGetKernelPageDirBase(pVAS, pGpu) vaspaceGetKernelPageDirBase_DISPATCH(pVAS, pGpu) +#define vaspacePinRootPageDir(pVAS, pGpu) vaspacePinRootPageDir_DISPATCH(pVAS, pGpu) +#define vaspaceUnpinRootPageDir(pVAS, pGpu) vaspaceUnpinRootPageDir_DISPATCH(pVAS, pGpu) +#define vaspaceInvalidateTlb(pVAS, pGpu, type) vaspaceInvalidateTlb_DISPATCH(pVAS, pGpu, type) +#define vaspaceGetPageTableInfo(pVAS, pParams) vaspaceGetPageTableInfo_DISPATCH(pVAS, pParams) +#define vaspaceGetPteInfo(pVAS, pGpu, pParams, pPhysAddr) vaspaceGetPteInfo_DISPATCH(pVAS, pGpu, pParams, pPhysAddr) +#define vaspaceSetPteInfo(pVAS, pGpu, pParams) vaspaceSetPteInfo_DISPATCH(pVAS, pGpu, pParams) +static inline NV_STATUS vaspaceConstruct__DISPATCH(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) { + return pVAS->__vaspaceConstruct___(pVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); +} + +static inline NV_STATUS vaspaceAlloc_DISPATCH(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) { + return pVAS->__vaspaceAlloc__(pVAS, size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr); +} + +static inline NV_STATUS vaspaceFree_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__vaspaceFree__(pVAS, vAddr); +} + +static inline NV_STATUS vaspaceApplyDefaultAlignment_DISPATCH(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) { + return pVAS->__vaspaceApplyDefaultAlignment__(pVAS, pAllocInfo, pAlign, pSize, pPageSizeLockMask); +} + +static inline NV_STATUS vaspaceIncAllocRefCnt_b7902c(struct OBJVASPACE *pVAS, NvU64 vAddr) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS vaspaceIncAllocRefCnt_DISPATCH(struct OBJVASPACE *pVAS, NvU64 vAddr) { + return pVAS->__vaspaceIncAllocRefCnt__(pVAS, vAddr); +} + +NvU64 vaspaceGetVaStart_IMPL(struct OBJVASPACE *pVAS); + +static inline NvU64 vaspaceGetVaStart_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceGetVaStart__(pVAS); +} + +NvU64 vaspaceGetVaLimit_IMPL(struct OBJVASPACE *pVAS); + +static inline NvU64 vaspaceGetVaLimit_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceGetVaLimit__(pVAS); +} + +static inline NV_STATUS vaspaceGetVasInfo_DISPATCH(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) { + return pVAS->__vaspaceGetVasInfo__(pVAS, pParams); +} + +static inline NvU32 vaspaceGetFlags_edd98b(struct OBJVASPACE *pVAS) { + return 0U; +} + +static inline NvU32 vaspaceGetFlags_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceGetFlags__(pVAS); +} + +NvBool vaspaceIsInternalVaRestricted_IMPL(struct OBJVASPACE *pVAS); + +static inline NvBool vaspaceIsInternalVaRestricted_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceIsInternalVaRestricted__(pVAS); +} + +static inline NV_STATUS vaspaceMap_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS vaspaceMap_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi, const MMU_MAP_TARGET *pTarget, const VAS_MAP_FLAGS flags) { + return pVAS->__vaspaceMap__(pVAS, pGpu, vaLo, vaHi, pTarget, flags); +} + +static inline void vaspaceUnmap_8b86a5(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); +} + +static inline void vaspaceUnmap_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, const NvU64 vaLo, const NvU64 vaHi) { + pVAS->__vaspaceUnmap__(pVAS, pGpu, vaLo, vaHi); +} + +static inline NV_STATUS vaspaceReserveMempool_ac1694(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) { + return NV_OK; +} + +static inline NV_STATUS vaspaceReserveMempool_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NvHandle hClient, NvU64 size, NvU64 pageSizeLockMask, NvU32 flags) { + return pVAS->__vaspaceReserveMempool__(pVAS, pGpu, hClient, size, pageSizeLockMask, flags); +} + +static inline struct OBJEHEAP *vaspaceGetHeap_128d6d(struct OBJVASPACE *pVAS) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return ((void *)0); +} + +static inline struct OBJEHEAP *vaspaceGetHeap_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceGetHeap__(pVAS); +} + +static inline NvU32 vaspaceGetMapPageSize_07238a(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return 0U; +} + +static inline NvU32 vaspaceGetMapPageSize_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, EMEMBLOCK *pMemBlock) { + return pVAS->__vaspaceGetMapPageSize__(pVAS, pGpu, pMemBlock); +} + +static inline NvU32 vaspaceGetBigPageSize_07238a(struct OBJVASPACE *pVAS) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return 0U; +} + +static inline NvU32 vaspaceGetBigPageSize_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceGetBigPageSize__(pVAS); +} + +static inline NvBool vaspaceIsMirrored_814c13(struct OBJVASPACE *pVAS) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return ((NvBool)(0 != 0)); +} + +static inline NvBool vaspaceIsMirrored_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceIsMirrored__(pVAS); +} + +static inline NvBool vaspaceIsFaultCapable_814c13(struct OBJVASPACE *pVAS) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return ((NvBool)(0 != 0)); +} + +static inline NvBool vaspaceIsFaultCapable_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceIsFaultCapable__(pVAS); +} + +static inline NvBool vaspaceIsExternallyOwned_814c13(struct OBJVASPACE *pVAS) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return ((NvBool)(0 != 0)); +} + +static inline NvBool vaspaceIsExternallyOwned_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceIsExternallyOwned__(pVAS); +} + +static inline NvBool vaspaceIsAtsEnabled_814c13(struct OBJVASPACE *pVAS) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return ((NvBool)(0 != 0)); +} + +static inline NvBool vaspaceIsAtsEnabled_DISPATCH(struct OBJVASPACE *pVAS) { + return pVAS->__vaspaceIsAtsEnabled__(pVAS); +} + +static inline NV_STATUS vaspaceGetPasid_b7902c(struct OBJVASPACE *pVAS, NvU32 *pPasid) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS vaspaceGetPasid_DISPATCH(struct OBJVASPACE *pVAS, NvU32 *pPasid) { + return pVAS->__vaspaceGetPasid__(pVAS, pPasid); +} + +static inline PMEMORY_DESCRIPTOR vaspaceGetPageDirBase_128d6d(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return ((void *)0); +} + +static inline PMEMORY_DESCRIPTOR vaspaceGetPageDirBase_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__vaspaceGetPageDirBase__(pVAS, pGpu); +} + +static inline PMEMORY_DESCRIPTOR vaspaceGetKernelPageDirBase_128d6d(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return ((void *)0); +} + +static inline PMEMORY_DESCRIPTOR vaspaceGetKernelPageDirBase_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__vaspaceGetKernelPageDirBase__(pVAS, pGpu); +} + +static inline NV_STATUS vaspacePinRootPageDir_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS vaspacePinRootPageDir_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + return pVAS->__vaspacePinRootPageDir__(pVAS, pGpu); +} + +static inline void vaspaceUnpinRootPageDir_8b86a5(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); +} + +static inline void vaspaceUnpinRootPageDir_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu) { + pVAS->__vaspaceUnpinRootPageDir__(pVAS, pGpu); +} + +void vaspaceInvalidateTlb_IMPL(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type); + +static inline void vaspaceInvalidateTlb_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, VAS_PTE_UPDATE_TYPE type) { + pVAS->__vaspaceInvalidateTlb__(pVAS, pGpu, type); +} + +static inline NV_STATUS vaspaceGetPageTableInfo_b7902c(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS vaspaceGetPageTableInfo_DISPATCH(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams) { + return pVAS->__vaspaceGetPageTableInfo__(pVAS, pParams); +} + +static inline NV_STATUS vaspaceGetPteInfo_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS vaspaceGetPteInfo_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, RmPhysAddr *pPhysAddr) { + return pVAS->__vaspaceGetPteInfo__(pVAS, pGpu, pParams, pPhysAddr); +} + +static inline NV_STATUS vaspaceSetPteInfo_b7902c(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) { + NV_ASSERT_PRECOMP(((NvBool)(0 != 0))); + return NV_ERR_NOT_SUPPORTED; +} + +static inline NV_STATUS vaspaceSetPteInfo_DISPATCH(struct OBJVASPACE *pVAS, struct OBJGPU *pGpu, NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams) { + return pVAS->__vaspaceSetPteInfo__(pVAS, pGpu, pParams); +} + +void vaspaceIncRefCnt_IMPL(struct OBJVASPACE *pVAS); +#ifdef __nvoc_vaspace_h_disabled +static inline void vaspaceIncRefCnt(struct OBJVASPACE *pVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!"); +} +#else //__nvoc_vaspace_h_disabled +#define vaspaceIncRefCnt(pVAS) vaspaceIncRefCnt_IMPL(pVAS) +#endif //__nvoc_vaspace_h_disabled + +void vaspaceDecRefCnt_IMPL(struct OBJVASPACE *pVAS); +#ifdef __nvoc_vaspace_h_disabled +static inline void vaspaceDecRefCnt(struct OBJVASPACE *pVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!"); +} +#else //__nvoc_vaspace_h_disabled +#define vaspaceDecRefCnt(pVAS) vaspaceDecRefCnt_IMPL(pVAS) +#endif //__nvoc_vaspace_h_disabled + +NV_STATUS vaspaceGetByHandleOrDeviceDefault_IMPL(struct RsClient *pClient, NvHandle hDeviceOrSubDevice, NvHandle hVASpace, struct OBJVASPACE **ppVAS); +#define vaspaceGetByHandleOrDeviceDefault(pClient, hDeviceOrSubDevice, hVASpace, ppVAS) vaspaceGetByHandleOrDeviceDefault_IMPL(pClient, hDeviceOrSubDevice, hVASpace, ppVAS) +NV_STATUS vaspaceFillAllocParams_IMPL(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pSize, NvU64 *pAlign, NvU64 *pRangeLo, NvU64 *pRangeHi, NvU64 *pPageSizeLockMask, VAS_ALLOC_FLAGS *pFlags); +#ifdef __nvoc_vaspace_h_disabled +static inline NV_STATUS vaspaceFillAllocParams(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pSize, NvU64 *pAlign, NvU64 *pRangeLo, NvU64 *pRangeHi, NvU64 *pPageSizeLockMask, VAS_ALLOC_FLAGS *pFlags) { + NV_ASSERT_FAILED_PRECOMP("OBJVASPACE was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_vaspace_h_disabled +#define vaspaceFillAllocParams(pVAS, pAllocInfo, pSize, pAlign, pRangeLo, pRangeHi, pPageSizeLockMask, pFlags) vaspaceFillAllocParams_IMPL(pVAS, pAllocInfo, pSize, pAlign, pRangeLo, pRangeHi, pPageSizeLockMask, pFlags) +#endif //__nvoc_vaspace_h_disabled + +#undef PRIVATE_FIELD + + +// Ideally all non-static base class method declaration should be in the _private.h file +NvU64 vaspaceGetVaStart_IMPL(struct OBJVASPACE *pVAS); + +// For getting the address translation after the MMU (i.e.: after VA->PA translation) +#define VAS_ADDRESS_TRANSLATION(pVASpace) ((pVASpace)->addressTranslation) + +#endif // _VASPACE_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_VASPACE_NVOC_H_ diff --git a/src/nvidia/generated/g_vblank_callback_nvoc.c b/src/nvidia/generated/g_vblank_callback_nvoc.c new file mode 100644 index 000000000..47214af47 --- /dev/null +++ b/src/nvidia/generated/g_vblank_callback_nvoc.c @@ -0,0 +1,356 @@ +#define NVOC_VBLANK_CALLBACK_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_vblank_callback_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x4c1997 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VblankCallback; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_VblankCallback(VblankCallback*); +void __nvoc_init_funcTable_VblankCallback(VblankCallback*); +NV_STATUS __nvoc_ctor_VblankCallback(VblankCallback*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_VblankCallback(VblankCallback*); +void __nvoc_dtor_VblankCallback(VblankCallback*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_VblankCallback; + +static const struct NVOC_RTTI __nvoc_rtti_VblankCallback_VblankCallback = { + /*pClassDef=*/ &__nvoc_class_def_VblankCallback, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_VblankCallback, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_VblankCallback_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VblankCallback, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VblankCallback_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VblankCallback, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VblankCallback_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VblankCallback, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VblankCallback_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VblankCallback, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VblankCallback_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VblankCallback, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_VblankCallback = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_VblankCallback_VblankCallback, + &__nvoc_rtti_VblankCallback_GpuResource, + &__nvoc_rtti_VblankCallback_RmResource, + &__nvoc_rtti_VblankCallback_RmResourceCommon, + &__nvoc_rtti_VblankCallback_RsResource, + &__nvoc_rtti_VblankCallback_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_VblankCallback = +{ + /*classInfo=*/ { + /*size=*/ sizeof(VblankCallback), + /*classId=*/ classId(VblankCallback), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "VblankCallback", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_VblankCallback, + /*pCastInfo=*/ &__nvoc_castinfo_VblankCallback, + /*pExportInfo=*/ &__nvoc_export_info_VblankCallback +}; + +static NvBool __nvoc_thunk_GpuResource_vblcbShareCallback(struct VblankCallback *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VblankCallback_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vblcbControl(struct VblankCallback *pGpuResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VblankCallback_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vblcbUnmap(struct VblankCallback *pGpuResource, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VblankCallback_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_vblcbGetMemInterMapParams(struct VblankCallback *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_VblankCallback_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_vblcbGetMemoryMappingDescriptor(struct VblankCallback *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_VblankCallback_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vblcbGetMapAddrSpace(struct VblankCallback *pGpuResource, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VblankCallback_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_vblcbGetInternalObjectHandle(struct VblankCallback *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VblankCallback_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_vblcbControlFilter(struct VblankCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_vblcbAddAdditionalDependants(struct RsClient *pClient, struct VblankCallback *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_vblcbGetRefCount(struct VblankCallback *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_vblcbCheckMemInterUnmap(struct VblankCallback *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_VblankCallback_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_vblcbMapTo(struct VblankCallback *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_vblcbControl_Prologue(struct VblankCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vblcbGetRegBaseOffsetAndSize(struct VblankCallback *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VblankCallback_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_vblcbCanCopy(struct VblankCallback *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vblcbInternalControlForward(struct VblankCallback *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VblankCallback_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_vblcbPreDestruct(struct VblankCallback *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_vblcbUnmapFrom(struct VblankCallback *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_vblcbControl_Epilogue(struct VblankCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_vblcbControlLookup(struct VblankCallback *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_vblcbMap(struct VblankCallback *pGpuResource, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_VblankCallback_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_vblcbAccessCallback(struct VblankCallback *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VblankCallback_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_VblankCallback[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) vblcbCtrlSetVBlankNotification_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + /*flags=*/ 0x0u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90100101u, + /*paramSize=*/ sizeof(NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_VblankCallback.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "vblcbCtrlSetVBlankNotification" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_VblankCallback = +{ + /*numEntries=*/ 1, + /*pExportEntries=*/ __nvoc_exported_method_def_VblankCallback +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_VblankCallback(VblankCallback *pThis) { + __nvoc_vblcbDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_VblankCallback(VblankCallback *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_VblankCallback(VblankCallback *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_VblankCallback_fail_GpuResource; + __nvoc_init_dataField_VblankCallback(pThis); + + status = __nvoc_vblcbConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_VblankCallback_fail__init; + goto __nvoc_ctor_VblankCallback_exit; // Success + +__nvoc_ctor_VblankCallback_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_VblankCallback_fail_GpuResource: +__nvoc_ctor_VblankCallback_exit: + + return status; +} + +static void __nvoc_init_funcTable_VblankCallback_1(VblankCallback *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u) + pThis->__vblcbCtrlSetVBlankNotification__ = &vblcbCtrlSetVBlankNotification_IMPL; +#endif + + pThis->__vblcbShareCallback__ = &__nvoc_thunk_GpuResource_vblcbShareCallback; + + pThis->__vblcbControl__ = &__nvoc_thunk_GpuResource_vblcbControl; + + pThis->__vblcbUnmap__ = &__nvoc_thunk_GpuResource_vblcbUnmap; + + pThis->__vblcbGetMemInterMapParams__ = &__nvoc_thunk_RmResource_vblcbGetMemInterMapParams; + + pThis->__vblcbGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_vblcbGetMemoryMappingDescriptor; + + pThis->__vblcbGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_vblcbGetMapAddrSpace; + + pThis->__vblcbGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_vblcbGetInternalObjectHandle; + + pThis->__vblcbControlFilter__ = &__nvoc_thunk_RsResource_vblcbControlFilter; + + pThis->__vblcbAddAdditionalDependants__ = &__nvoc_thunk_RsResource_vblcbAddAdditionalDependants; + + pThis->__vblcbGetRefCount__ = &__nvoc_thunk_RsResource_vblcbGetRefCount; + + pThis->__vblcbCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_vblcbCheckMemInterUnmap; + + pThis->__vblcbMapTo__ = &__nvoc_thunk_RsResource_vblcbMapTo; + + pThis->__vblcbControl_Prologue__ = &__nvoc_thunk_RmResource_vblcbControl_Prologue; + + pThis->__vblcbGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_vblcbGetRegBaseOffsetAndSize; + + pThis->__vblcbCanCopy__ = &__nvoc_thunk_RsResource_vblcbCanCopy; + + pThis->__vblcbInternalControlForward__ = &__nvoc_thunk_GpuResource_vblcbInternalControlForward; + + pThis->__vblcbPreDestruct__ = &__nvoc_thunk_RsResource_vblcbPreDestruct; + + pThis->__vblcbUnmapFrom__ = &__nvoc_thunk_RsResource_vblcbUnmapFrom; + + pThis->__vblcbControl_Epilogue__ = &__nvoc_thunk_RmResource_vblcbControl_Epilogue; + + pThis->__vblcbControlLookup__ = &__nvoc_thunk_RsResource_vblcbControlLookup; + + pThis->__vblcbMap__ = &__nvoc_thunk_GpuResource_vblcbMap; + + pThis->__vblcbAccessCallback__ = &__nvoc_thunk_RmResource_vblcbAccessCallback; +} + +void __nvoc_init_funcTable_VblankCallback(VblankCallback *pThis) { + __nvoc_init_funcTable_VblankCallback_1(pThis); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_VblankCallback(VblankCallback *pThis) { + pThis->__nvoc_pbase_VblankCallback = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_VblankCallback(pThis); +} + +NV_STATUS __nvoc_objCreate_VblankCallback(VblankCallback **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + VblankCallback *pThis; + + pThis = portMemAllocNonPaged(sizeof(VblankCallback)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(VblankCallback)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_VblankCallback); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_VblankCallback(pThis); + status = __nvoc_ctor_VblankCallback(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_VblankCallback_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_VblankCallback_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_VblankCallback(VblankCallback **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_VblankCallback(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_vblank_callback_nvoc.h b/src/nvidia/generated/g_vblank_callback_nvoc.h new file mode 100644 index 000000000..d03e6fad6 --- /dev/null +++ b/src/nvidia/generated/g_vblank_callback_nvoc.h @@ -0,0 +1,252 @@ +#ifndef _G_VBLANK_CALLBACK_NVOC_H_ +#define _G_VBLANK_CALLBACK_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* VblankCallback Module Header +* This file contains functions managing vblank callback. +* +******************************************************************************/ + +#include "g_vblank_callback_nvoc.h" + +#ifndef VBLANK_CALLBACK_H +#define VBLANK_CALLBACK_H + +#include "gpu/gpu.h" +#include "gpu/gpu_resource.h" +#include "gpu/disp/vblank_callback/vblank.h" +#include "ctrl/ctrl9010.h" +/*! + * RM internal class representing NV9010_VBLANK_CALLBACK + */ +#ifdef NVOC_VBLANK_CALLBACK_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct VblankCallback { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct VblankCallback *__nvoc_pbase_VblankCallback; + NV_STATUS (*__vblcbCtrlSetVBlankNotification__)(struct VblankCallback *, NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION_PARAMS *); + NvBool (*__vblcbShareCallback__)(struct VblankCallback *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__vblcbControl__)(struct VblankCallback *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vblcbUnmap__)(struct VblankCallback *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__vblcbGetMemInterMapParams__)(struct VblankCallback *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__vblcbGetMemoryMappingDescriptor__)(struct VblankCallback *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__vblcbGetMapAddrSpace__)(struct VblankCallback *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__vblcbGetInternalObjectHandle__)(struct VblankCallback *); + NV_STATUS (*__vblcbControlFilter__)(struct VblankCallback *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__vblcbAddAdditionalDependants__)(struct RsClient *, struct VblankCallback *, RsResourceRef *); + NvU32 (*__vblcbGetRefCount__)(struct VblankCallback *); + NV_STATUS (*__vblcbCheckMemInterUnmap__)(struct VblankCallback *, NvBool); + NV_STATUS (*__vblcbMapTo__)(struct VblankCallback *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__vblcbControl_Prologue__)(struct VblankCallback *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vblcbGetRegBaseOffsetAndSize__)(struct VblankCallback *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__vblcbCanCopy__)(struct VblankCallback *); + NV_STATUS (*__vblcbInternalControlForward__)(struct VblankCallback *, NvU32, void *, NvU32); + void (*__vblcbPreDestruct__)(struct VblankCallback *); + NV_STATUS (*__vblcbUnmapFrom__)(struct VblankCallback *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__vblcbControl_Epilogue__)(struct VblankCallback *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vblcbControlLookup__)(struct VblankCallback *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__vblcbMap__)(struct VblankCallback *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__vblcbAccessCallback__)(struct VblankCallback *, struct RsClient *, void *, RsAccessRight); + VBLANKCALLBACK CallBack; + OSVBLANKCALLBACKPROC pProc; + void *pParm1; + void *pParm2; + NvU32 LogicalHead; +}; + +#ifndef __NVOC_CLASS_VblankCallback_TYPEDEF__ +#define __NVOC_CLASS_VblankCallback_TYPEDEF__ +typedef struct VblankCallback VblankCallback; +#endif /* __NVOC_CLASS_VblankCallback_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VblankCallback +#define __nvoc_class_id_VblankCallback 0x4c1997 +#endif /* __nvoc_class_id_VblankCallback */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VblankCallback; + +#define __staticCast_VblankCallback(pThis) \ + ((pThis)->__nvoc_pbase_VblankCallback) + +#ifdef __nvoc_vblank_callback_h_disabled +#define __dynamicCast_VblankCallback(pThis) ((VblankCallback*)NULL) +#else //__nvoc_vblank_callback_h_disabled +#define __dynamicCast_VblankCallback(pThis) \ + ((VblankCallback*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(VblankCallback))) +#endif //__nvoc_vblank_callback_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_VblankCallback(VblankCallback**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_VblankCallback(VblankCallback**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_VblankCallback(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_VblankCallback((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define vblcbCtrlSetVBlankNotification(pVblankCallback, pParams) vblcbCtrlSetVBlankNotification_DISPATCH(pVblankCallback, pParams) +#define vblcbShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) vblcbShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define vblcbControl(pGpuResource, pCallContext, pParams) vblcbControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define vblcbUnmap(pGpuResource, pCallContext, pCpuMapping) vblcbUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define vblcbGetMemInterMapParams(pRmResource, pParams) vblcbGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define vblcbGetMemoryMappingDescriptor(pRmResource, ppMemDesc) vblcbGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define vblcbGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) vblcbGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define vblcbGetInternalObjectHandle(pGpuResource) vblcbGetInternalObjectHandle_DISPATCH(pGpuResource) +#define vblcbControlFilter(pResource, pCallContext, pParams) vblcbControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define vblcbAddAdditionalDependants(pClient, pResource, pReference) vblcbAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define vblcbGetRefCount(pResource) vblcbGetRefCount_DISPATCH(pResource) +#define vblcbCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) vblcbCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define vblcbMapTo(pResource, pParams) vblcbMapTo_DISPATCH(pResource, pParams) +#define vblcbControl_Prologue(pResource, pCallContext, pParams) vblcbControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define vblcbGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) vblcbGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define vblcbCanCopy(pResource) vblcbCanCopy_DISPATCH(pResource) +#define vblcbInternalControlForward(pGpuResource, command, pParams, size) vblcbInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define vblcbPreDestruct(pResource) vblcbPreDestruct_DISPATCH(pResource) +#define vblcbUnmapFrom(pResource, pParams) vblcbUnmapFrom_DISPATCH(pResource, pParams) +#define vblcbControl_Epilogue(pResource, pCallContext, pParams) vblcbControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define vblcbControlLookup(pResource, pParams, ppEntry) vblcbControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define vblcbMap(pGpuResource, pCallContext, pParams, pCpuMapping) vblcbMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define vblcbAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) vblcbAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS vblcbCtrlSetVBlankNotification_IMPL(struct VblankCallback *pVblankCallback, NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION_PARAMS *pParams); + +static inline NV_STATUS vblcbCtrlSetVBlankNotification_DISPATCH(struct VblankCallback *pVblankCallback, NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION_PARAMS *pParams) { + return pVblankCallback->__vblcbCtrlSetVBlankNotification__(pVblankCallback, pParams); +} + +static inline NvBool vblcbShareCallback_DISPATCH(struct VblankCallback *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__vblcbShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS vblcbControl_DISPATCH(struct VblankCallback *pGpuResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__vblcbControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS vblcbUnmap_DISPATCH(struct VblankCallback *pGpuResource, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pGpuResource->__vblcbUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS vblcbGetMemInterMapParams_DISPATCH(struct VblankCallback *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__vblcbGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS vblcbGetMemoryMappingDescriptor_DISPATCH(struct VblankCallback *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__vblcbGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS vblcbGetMapAddrSpace_DISPATCH(struct VblankCallback *pGpuResource, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__vblcbGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle vblcbGetInternalObjectHandle_DISPATCH(struct VblankCallback *pGpuResource) { + return pGpuResource->__vblcbGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS vblcbControlFilter_DISPATCH(struct VblankCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__vblcbControlFilter__(pResource, pCallContext, pParams); +} + +static inline void vblcbAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct VblankCallback *pResource, RsResourceRef *pReference) { + pResource->__vblcbAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 vblcbGetRefCount_DISPATCH(struct VblankCallback *pResource) { + return pResource->__vblcbGetRefCount__(pResource); +} + +static inline NV_STATUS vblcbCheckMemInterUnmap_DISPATCH(struct VblankCallback *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__vblcbCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS vblcbMapTo_DISPATCH(struct VblankCallback *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__vblcbMapTo__(pResource, pParams); +} + +static inline NV_STATUS vblcbControl_Prologue_DISPATCH(struct VblankCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__vblcbControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS vblcbGetRegBaseOffsetAndSize_DISPATCH(struct VblankCallback *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__vblcbGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool vblcbCanCopy_DISPATCH(struct VblankCallback *pResource) { + return pResource->__vblcbCanCopy__(pResource); +} + +static inline NV_STATUS vblcbInternalControlForward_DISPATCH(struct VblankCallback *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__vblcbInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void vblcbPreDestruct_DISPATCH(struct VblankCallback *pResource) { + pResource->__vblcbPreDestruct__(pResource); +} + +static inline NV_STATUS vblcbUnmapFrom_DISPATCH(struct VblankCallback *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__vblcbUnmapFrom__(pResource, pParams); +} + +static inline void vblcbControl_Epilogue_DISPATCH(struct VblankCallback *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__vblcbControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS vblcbControlLookup_DISPATCH(struct VblankCallback *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__vblcbControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS vblcbMap_DISPATCH(struct VblankCallback *pGpuResource, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pGpuResource->__vblcbMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool vblcbAccessCallback_DISPATCH(struct VblankCallback *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__vblcbAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS vblcbConstruct_IMPL(struct VblankCallback *arg_pVblankCallback, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_vblcbConstruct(arg_pVblankCallback, arg_pCallContext, arg_pParams) vblcbConstruct_IMPL(arg_pVblankCallback, arg_pCallContext, arg_pParams) +void vblcbDestruct_IMPL(struct VblankCallback *pVblankCallback); +#define __nvoc_vblcbDestruct(pVblankCallback) vblcbDestruct_IMPL(pVblankCallback) +#undef PRIVATE_FIELD + + +#endif // VBLANK_CALLBACK_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_VBLANK_CALLBACK_NVOC_H_ diff --git a/src/nvidia/generated/g_video_mem_nvoc.c b/src/nvidia/generated/g_video_mem_nvoc.c new file mode 100644 index 000000000..3104aff19 --- /dev/null +++ b/src/nvidia/generated/g_video_mem_nvoc.c @@ -0,0 +1,334 @@ +#define NVOC_VIDEO_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_video_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xed948f = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VideoMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +void __nvoc_init_VideoMemory(VideoMemory*); +void __nvoc_init_funcTable_VideoMemory(VideoMemory*); +NV_STATUS __nvoc_ctor_VideoMemory(VideoMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_VideoMemory(VideoMemory*); +void __nvoc_dtor_VideoMemory(VideoMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_VideoMemory; + +static const struct NVOC_RTTI __nvoc_rtti_VideoMemory_VideoMemory = { + /*pClassDef=*/ &__nvoc_class_def_VideoMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_VideoMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_VideoMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VideoMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VideoMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VideoMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VideoMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VideoMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VideoMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VideoMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VideoMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VideoMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VideoMemory_StandardMemory = { + /*pClassDef=*/ &__nvoc_class_def_StandardMemory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VideoMemory, __nvoc_base_StandardMemory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_VideoMemory = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_VideoMemory_VideoMemory, + &__nvoc_rtti_VideoMemory_StandardMemory, + &__nvoc_rtti_VideoMemory_Memory, + &__nvoc_rtti_VideoMemory_RmResource, + &__nvoc_rtti_VideoMemory_RmResourceCommon, + &__nvoc_rtti_VideoMemory_RsResource, + &__nvoc_rtti_VideoMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_VideoMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(VideoMemory), + /*classId=*/ classId(VideoMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "VideoMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_VideoMemory, + /*pCastInfo=*/ &__nvoc_castinfo_VideoMemory, + /*pExportInfo=*/ &__nvoc_export_info_VideoMemory +}; + +static NV_STATUS __nvoc_thunk_VideoMemory_memCheckCopyPermissions(struct Memory *pVideoMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return vidmemCheckCopyPermissions((struct VideoMemory *)(((unsigned char *)pVideoMemory) - __nvoc_rtti_VideoMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static NV_STATUS __nvoc_thunk_Memory_vidmemCheckMemInterUnmap(struct VideoMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VideoMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_vidmemControl(struct VideoMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VideoMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_vidmemUnmap(struct VideoMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VideoMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_vidmemGetMemInterMapParams(struct VideoMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VideoMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_vidmemGetMemoryMappingDescriptor(struct VideoMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VideoMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_vidmemGetMapAddrSpace(struct VideoMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VideoMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_vidmemShareCallback(struct VideoMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_vidmemControlFilter(struct VideoMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_vidmemAddAdditionalDependants(struct RsClient *pClient, struct VideoMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_vidmemGetRefCount(struct VideoMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_vidmemMapTo(struct VideoMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_vidmemControl_Prologue(struct VideoMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_StandardMemory_vidmemCanCopy(struct VideoMemory *pStandardMemory) { + return stdmemCanCopy((struct StandardMemory *)(((unsigned char *)pStandardMemory) + __nvoc_rtti_VideoMemory_StandardMemory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_vidmemIsReady(struct VideoMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VideoMemory_Memory.offset)); +} + +static void __nvoc_thunk_RsResource_vidmemPreDestruct(struct VideoMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_vidmemUnmapFrom(struct VideoMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_vidmemControl_Epilogue(struct VideoMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_vidmemControlLookup(struct VideoMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_vidmemMap(struct VideoMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VideoMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_vidmemAccessCallback(struct VideoMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VideoMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_VideoMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_StandardMemory(StandardMemory*); +void __nvoc_dtor_VideoMemory(VideoMemory *pThis) { + __nvoc_vidmemDestruct(pThis); + __nvoc_dtor_StandardMemory(&pThis->__nvoc_base_StandardMemory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_VideoMemory(VideoMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_VideoMemory(VideoMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_StandardMemory(&pThis->__nvoc_base_StandardMemory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_VideoMemory_fail_StandardMemory; + __nvoc_init_dataField_VideoMemory(pThis); + + status = __nvoc_vidmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_VideoMemory_fail__init; + goto __nvoc_ctor_VideoMemory_exit; // Success + +__nvoc_ctor_VideoMemory_fail__init: + __nvoc_dtor_StandardMemory(&pThis->__nvoc_base_StandardMemory); +__nvoc_ctor_VideoMemory_fail_StandardMemory: +__nvoc_ctor_VideoMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_VideoMemory_1(VideoMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__vidmemCheckCopyPermissions__ = &vidmemCheckCopyPermissions_IMPL; + + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__memCheckCopyPermissions__ = &__nvoc_thunk_VideoMemory_memCheckCopyPermissions; + + pThis->__vidmemCheckMemInterUnmap__ = &__nvoc_thunk_Memory_vidmemCheckMemInterUnmap; + + pThis->__vidmemControl__ = &__nvoc_thunk_Memory_vidmemControl; + + pThis->__vidmemUnmap__ = &__nvoc_thunk_Memory_vidmemUnmap; + + pThis->__vidmemGetMemInterMapParams__ = &__nvoc_thunk_Memory_vidmemGetMemInterMapParams; + + pThis->__vidmemGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_vidmemGetMemoryMappingDescriptor; + + pThis->__vidmemGetMapAddrSpace__ = &__nvoc_thunk_Memory_vidmemGetMapAddrSpace; + + pThis->__vidmemShareCallback__ = &__nvoc_thunk_RmResource_vidmemShareCallback; + + pThis->__vidmemControlFilter__ = &__nvoc_thunk_RsResource_vidmemControlFilter; + + pThis->__vidmemAddAdditionalDependants__ = &__nvoc_thunk_RsResource_vidmemAddAdditionalDependants; + + pThis->__vidmemGetRefCount__ = &__nvoc_thunk_RsResource_vidmemGetRefCount; + + pThis->__vidmemMapTo__ = &__nvoc_thunk_RsResource_vidmemMapTo; + + pThis->__vidmemControl_Prologue__ = &__nvoc_thunk_RmResource_vidmemControl_Prologue; + + pThis->__vidmemCanCopy__ = &__nvoc_thunk_StandardMemory_vidmemCanCopy; + + pThis->__vidmemIsReady__ = &__nvoc_thunk_Memory_vidmemIsReady; + + pThis->__vidmemPreDestruct__ = &__nvoc_thunk_RsResource_vidmemPreDestruct; + + pThis->__vidmemUnmapFrom__ = &__nvoc_thunk_RsResource_vidmemUnmapFrom; + + pThis->__vidmemControl_Epilogue__ = &__nvoc_thunk_RmResource_vidmemControl_Epilogue; + + pThis->__vidmemControlLookup__ = &__nvoc_thunk_RsResource_vidmemControlLookup; + + pThis->__vidmemMap__ = &__nvoc_thunk_Memory_vidmemMap; + + pThis->__vidmemAccessCallback__ = &__nvoc_thunk_RmResource_vidmemAccessCallback; +} + +void __nvoc_init_funcTable_VideoMemory(VideoMemory *pThis) { + __nvoc_init_funcTable_VideoMemory_1(pThis); +} + +void __nvoc_init_StandardMemory(StandardMemory*); +void __nvoc_init_VideoMemory(VideoMemory *pThis) { + pThis->__nvoc_pbase_VideoMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory; + pThis->__nvoc_pbase_StandardMemory = &pThis->__nvoc_base_StandardMemory; + __nvoc_init_StandardMemory(&pThis->__nvoc_base_StandardMemory); + __nvoc_init_funcTable_VideoMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_VideoMemory(VideoMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + VideoMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(VideoMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(VideoMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_VideoMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_VideoMemory(pThis); + status = __nvoc_ctor_VideoMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_VideoMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_VideoMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_VideoMemory(VideoMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_VideoMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_video_mem_nvoc.h b/src/nvidia/generated/g_video_mem_nvoc.h new file mode 100644 index 000000000..d07c59529 --- /dev/null +++ b/src/nvidia/generated/g_video_mem_nvoc.h @@ -0,0 +1,235 @@ +#ifndef _G_VIDEO_MEM_NVOC_H_ +#define _G_VIDEO_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + + /* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_video_mem_nvoc.h" + +#ifndef _VIDEO_MEMORY_H_ +#define _VIDEO_MEMORY_H_ + +#include "mem_mgr/standard_mem.h" +#include "gpu/mem_mgr/heap_base.h" + +typedef struct PMA_ALLOC_INFO PMA_ALLOC_INFO; + +#ifdef NVOC_VIDEO_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct VideoMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct StandardMemory __nvoc_base_StandardMemory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct StandardMemory *__nvoc_pbase_StandardMemory; + struct VideoMemory *__nvoc_pbase_VideoMemory; + NV_STATUS (*__vidmemCheckCopyPermissions__)(struct VideoMemory *, struct OBJGPU *, NvHandle); + NV_STATUS (*__vidmemCheckMemInterUnmap__)(struct VideoMemory *, NvBool); + NV_STATUS (*__vidmemControl__)(struct VideoMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vidmemUnmap__)(struct VideoMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__vidmemGetMemInterMapParams__)(struct VideoMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__vidmemGetMemoryMappingDescriptor__)(struct VideoMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__vidmemGetMapAddrSpace__)(struct VideoMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__vidmemShareCallback__)(struct VideoMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__vidmemControlFilter__)(struct VideoMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__vidmemAddAdditionalDependants__)(struct RsClient *, struct VideoMemory *, RsResourceRef *); + NvU32 (*__vidmemGetRefCount__)(struct VideoMemory *); + NV_STATUS (*__vidmemMapTo__)(struct VideoMemory *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__vidmemControl_Prologue__)(struct VideoMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__vidmemCanCopy__)(struct VideoMemory *); + NV_STATUS (*__vidmemIsReady__)(struct VideoMemory *); + void (*__vidmemPreDestruct__)(struct VideoMemory *); + NV_STATUS (*__vidmemUnmapFrom__)(struct VideoMemory *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__vidmemControl_Epilogue__)(struct VideoMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vidmemControlLookup__)(struct VideoMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__vidmemMap__)(struct VideoMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__vidmemAccessCallback__)(struct VideoMemory *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_VideoMemory_TYPEDEF__ +#define __NVOC_CLASS_VideoMemory_TYPEDEF__ +typedef struct VideoMemory VideoMemory; +#endif /* __NVOC_CLASS_VideoMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VideoMemory +#define __nvoc_class_id_VideoMemory 0xed948f +#endif /* __nvoc_class_id_VideoMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VideoMemory; + +#define __staticCast_VideoMemory(pThis) \ + ((pThis)->__nvoc_pbase_VideoMemory) + +#ifdef __nvoc_video_mem_h_disabled +#define __dynamicCast_VideoMemory(pThis) ((VideoMemory*)NULL) +#else //__nvoc_video_mem_h_disabled +#define __dynamicCast_VideoMemory(pThis) \ + ((VideoMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(VideoMemory))) +#endif //__nvoc_video_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_VideoMemory(VideoMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_VideoMemory(VideoMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_VideoMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_VideoMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define vidmemCheckCopyPermissions(pVideoMemory, pDstGpu, hDstClientNvBool) vidmemCheckCopyPermissions_DISPATCH(pVideoMemory, pDstGpu, hDstClientNvBool) +#define vidmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) vidmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define vidmemControl(pMemory, pCallContext, pParams) vidmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define vidmemUnmap(pMemory, pCallContext, pCpuMapping) vidmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define vidmemGetMemInterMapParams(pMemory, pParams) vidmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define vidmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) vidmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define vidmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) vidmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define vidmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) vidmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define vidmemControlFilter(pResource, pCallContext, pParams) vidmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define vidmemAddAdditionalDependants(pClient, pResource, pReference) vidmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define vidmemGetRefCount(pResource) vidmemGetRefCount_DISPATCH(pResource) +#define vidmemMapTo(pResource, pParams) vidmemMapTo_DISPATCH(pResource, pParams) +#define vidmemControl_Prologue(pResource, pCallContext, pParams) vidmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define vidmemCanCopy(pStandardMemory) vidmemCanCopy_DISPATCH(pStandardMemory) +#define vidmemIsReady(pMemory) vidmemIsReady_DISPATCH(pMemory) +#define vidmemPreDestruct(pResource) vidmemPreDestruct_DISPATCH(pResource) +#define vidmemUnmapFrom(pResource, pParams) vidmemUnmapFrom_DISPATCH(pResource, pParams) +#define vidmemControl_Epilogue(pResource, pCallContext, pParams) vidmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define vidmemControlLookup(pResource, pParams, ppEntry) vidmemControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define vidmemMap(pMemory, pCallContext, pParams, pCpuMapping) vidmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define vidmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) vidmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS vidmemCheckCopyPermissions_IMPL(struct VideoMemory *pVideoMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool); + +static inline NV_STATUS vidmemCheckCopyPermissions_DISPATCH(struct VideoMemory *pVideoMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pVideoMemory->__vidmemCheckCopyPermissions__(pVideoMemory, pDstGpu, hDstClientNvBool); +} + +static inline NV_STATUS vidmemCheckMemInterUnmap_DISPATCH(struct VideoMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__vidmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS vidmemControl_DISPATCH(struct VideoMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__vidmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS vidmemUnmap_DISPATCH(struct VideoMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__vidmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS vidmemGetMemInterMapParams_DISPATCH(struct VideoMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__vidmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS vidmemGetMemoryMappingDescriptor_DISPATCH(struct VideoMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__vidmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS vidmemGetMapAddrSpace_DISPATCH(struct VideoMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__vidmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool vidmemShareCallback_DISPATCH(struct VideoMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__vidmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS vidmemControlFilter_DISPATCH(struct VideoMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__vidmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline void vidmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct VideoMemory *pResource, RsResourceRef *pReference) { + pResource->__vidmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 vidmemGetRefCount_DISPATCH(struct VideoMemory *pResource) { + return pResource->__vidmemGetRefCount__(pResource); +} + +static inline NV_STATUS vidmemMapTo_DISPATCH(struct VideoMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__vidmemMapTo__(pResource, pParams); +} + +static inline NV_STATUS vidmemControl_Prologue_DISPATCH(struct VideoMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__vidmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool vidmemCanCopy_DISPATCH(struct VideoMemory *pStandardMemory) { + return pStandardMemory->__vidmemCanCopy__(pStandardMemory); +} + +static inline NV_STATUS vidmemIsReady_DISPATCH(struct VideoMemory *pMemory) { + return pMemory->__vidmemIsReady__(pMemory); +} + +static inline void vidmemPreDestruct_DISPATCH(struct VideoMemory *pResource) { + pResource->__vidmemPreDestruct__(pResource); +} + +static inline NV_STATUS vidmemUnmapFrom_DISPATCH(struct VideoMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__vidmemUnmapFrom__(pResource, pParams); +} + +static inline void vidmemControl_Epilogue_DISPATCH(struct VideoMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__vidmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS vidmemControlLookup_DISPATCH(struct VideoMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__vidmemControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS vidmemMap_DISPATCH(struct VideoMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__vidmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool vidmemAccessCallback_DISPATCH(struct VideoMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__vidmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS vidmemConstruct_IMPL(struct VideoMemory *arg_pVideoMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_vidmemConstruct(arg_pVideoMemory, arg_pCallContext, arg_pParams) vidmemConstruct_IMPL(arg_pVideoMemory, arg_pCallContext, arg_pParams) +void vidmemDestruct_IMPL(struct VideoMemory *pVideoMemory); +#define __nvoc_vidmemDestruct(pVideoMemory) vidmemDestruct_IMPL(pVideoMemory) +struct Heap *vidmemGetHeap_IMPL(struct OBJGPU *pGpu, NvHandle hClient, NvBool bSubheap); +#define vidmemGetHeap(pGpu, hClient, bSubheap) vidmemGetHeap_IMPL(pGpu, hClient, bSubheap) +#undef PRIVATE_FIELD + + +void vidmemPmaFree(OBJGPU *, struct Heap *, PMA_ALLOC_INFO *, NvU32 flags); + +NV_STATUS vidmemAllocResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo, struct Heap *pHeap); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_VIDEO_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_virt_mem_allocator_nvoc.c b/src/nvidia/generated/g_virt_mem_allocator_nvoc.c new file mode 100644 index 000000000..f54c9890f --- /dev/null +++ b/src/nvidia/generated/g_virt_mem_allocator_nvoc.c @@ -0,0 +1,367 @@ +#define NVOC_VIRT_MEM_ALLOCATOR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_virt_mem_allocator_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x899e48 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VirtMemAllocator; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE; + +void __nvoc_init_VirtMemAllocator(VirtMemAllocator*, RmHalspecOwner* ); +void __nvoc_init_funcTable_VirtMemAllocator(VirtMemAllocator*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_VirtMemAllocator(VirtMemAllocator*, RmHalspecOwner* ); +void __nvoc_init_dataField_VirtMemAllocator(VirtMemAllocator*, RmHalspecOwner* ); +void __nvoc_dtor_VirtMemAllocator(VirtMemAllocator*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_VirtMemAllocator; + +static const struct NVOC_RTTI __nvoc_rtti_VirtMemAllocator_VirtMemAllocator = { + /*pClassDef=*/ &__nvoc_class_def_VirtMemAllocator, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_VirtMemAllocator, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtMemAllocator_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtMemAllocator, __nvoc_base_OBJENGSTATE.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtMemAllocator_OBJENGSTATE = { + /*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtMemAllocator, __nvoc_base_OBJENGSTATE), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_VirtMemAllocator = { + /*numRelatives=*/ 3, + /*relatives=*/ { + &__nvoc_rtti_VirtMemAllocator_VirtMemAllocator, + &__nvoc_rtti_VirtMemAllocator_OBJENGSTATE, + &__nvoc_rtti_VirtMemAllocator_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_VirtMemAllocator = +{ + /*classInfo=*/ { + /*size=*/ sizeof(VirtMemAllocator), + /*classId=*/ classId(VirtMemAllocator), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "VirtMemAllocator", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_VirtMemAllocator, + /*pCastInfo=*/ &__nvoc_castinfo_VirtMemAllocator, + /*pExportInfo=*/ &__nvoc_export_info_VirtMemAllocator +}; + +static NV_STATUS __nvoc_thunk_VirtMemAllocator_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pDma, ENGDESCRIPTOR arg0) { + return dmaConstructEngine(pGpu, (struct VirtMemAllocator *)(((unsigned char *)pDma) - __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_VirtMemAllocator_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pDma) { + return dmaStateInitLocked(pGpu, (struct VirtMemAllocator *)(((unsigned char *)pDma) - __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_VirtMemAllocator_engstateStatePostLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pDma, NvU32 arg0) { + return dmaStatePostLoad(pGpu, (struct VirtMemAllocator *)(((unsigned char *)pDma) - __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaReconcileTunableState(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void *pTunableState) { + return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaStateLoad(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) { + return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaStateUnload(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) { + return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaStatePreLoad(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) { + return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaStatePostUnload(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) { + return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), arg0); +} + +static void __nvoc_thunk_OBJENGSTATE_dmaStateDestroy(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaStatePreUnload(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) { + return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), arg0); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaStateInitUnlocked(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset)); +} + +static void __nvoc_thunk_OBJENGSTATE_dmaInitMissing(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaStatePreInitLocked(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaStatePreInitUnlocked(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset)); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaGetTunableState(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void *pTunableState) { + return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaCompareTunableState(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void *pTunables1, void *pTunables2) { + return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), pTunables1, pTunables2); +} + +static void __nvoc_thunk_OBJENGSTATE_dmaFreeTunableState(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void *pTunableState) { + engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), pTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaAllocTunableState(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void **ppTunableState) { + return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), ppTunableState); +} + +static NV_STATUS __nvoc_thunk_OBJENGSTATE_dmaSetTunableState(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void *pTunableState) { + return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset), pTunableState); +} + +static NvBool __nvoc_thunk_OBJENGSTATE_dmaIsPresent(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_VirtMemAllocator_OBJENGSTATE.offset)); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_VirtMemAllocator = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_dtor_VirtMemAllocator(VirtMemAllocator *pThis) { + __nvoc_dmaDestruct(pThis); + __nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_VirtMemAllocator(VirtMemAllocator *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + // NVOC Property Hal field -- PDB_PROP_DMA_ENFORCE_32BIT_POINTER + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_DMA_ENFORCE_32BIT_POINTER, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_DMA_ENFORCE_32BIT_POINTER, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->setProperty(pThis, PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL, ((NvBool)(0 == 0))); + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE, ((NvBool)(0 != 0))); + } + + // NVOC Property Hal field -- PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED + if (0) + { + } + // default + else + { + pThis->setProperty(pThis, PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED, ((NvBool)(0 == 0))); + } +} + +NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* ); +NV_STATUS __nvoc_ctor_VirtMemAllocator(VirtMemAllocator *pThis, RmHalspecOwner *pRmhalspecowner) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + if (status != NV_OK) goto __nvoc_ctor_VirtMemAllocator_fail_OBJENGSTATE; + __nvoc_init_dataField_VirtMemAllocator(pThis, pRmhalspecowner); + goto __nvoc_ctor_VirtMemAllocator_exit; // Success + +__nvoc_ctor_VirtMemAllocator_fail_OBJENGSTATE: +__nvoc_ctor_VirtMemAllocator_exit: + + return status; +} + +static void __nvoc_init_funcTable_VirtMemAllocator_1(VirtMemAllocator *pThis, RmHalspecOwner *pRmhalspecowner) { + ChipHal *chipHal = &pRmhalspecowner->chipHal; + const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx; + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(chipHal); + PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + + pThis->__dmaConstructEngine__ = &dmaConstructEngine_IMPL; + + pThis->__dmaStateInitLocked__ = &dmaStateInitLocked_IMPL; + + // Hal function -- dmaStatePostLoad + if (( ((chipHal_HalVarIdx >> 5) == 1UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x0000ffe0UL) )) /* ChipHal: TU102 | TU104 | TU106 | TU116 | TU117 | GA100 | GA102 | GA103 | GA104 | GA106 | GA107 */ + { + pThis->__dmaStatePostLoad__ = &dmaStatePostLoad_GM107; + } + else if (0) + { + } + + pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_VirtMemAllocator_engstateConstructEngine; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_VirtMemAllocator_engstateStateInitLocked; + + pThis->__nvoc_base_OBJENGSTATE.__engstateStatePostLoad__ = &__nvoc_thunk_VirtMemAllocator_engstateStatePostLoad; + + pThis->__dmaReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_dmaReconcileTunableState; + + pThis->__dmaStateLoad__ = &__nvoc_thunk_OBJENGSTATE_dmaStateLoad; + + pThis->__dmaStateUnload__ = &__nvoc_thunk_OBJENGSTATE_dmaStateUnload; + + pThis->__dmaStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_dmaStatePreLoad; + + pThis->__dmaStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_dmaStatePostUnload; + + pThis->__dmaStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_dmaStateDestroy; + + pThis->__dmaStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_dmaStatePreUnload; + + pThis->__dmaStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_dmaStateInitUnlocked; + + pThis->__dmaInitMissing__ = &__nvoc_thunk_OBJENGSTATE_dmaInitMissing; + + pThis->__dmaStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_dmaStatePreInitLocked; + + pThis->__dmaStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_dmaStatePreInitUnlocked; + + pThis->__dmaGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_dmaGetTunableState; + + pThis->__dmaCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_dmaCompareTunableState; + + pThis->__dmaFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_dmaFreeTunableState; + + pThis->__dmaAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_dmaAllocTunableState; + + pThis->__dmaSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_dmaSetTunableState; + + pThis->__dmaIsPresent__ = &__nvoc_thunk_OBJENGSTATE_dmaIsPresent; +} + +void __nvoc_init_funcTable_VirtMemAllocator(VirtMemAllocator *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_VirtMemAllocator_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_OBJENGSTATE(OBJENGSTATE*); +void __nvoc_init_VirtMemAllocator(VirtMemAllocator *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_VirtMemAllocator = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object; + pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE; + __nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE); + __nvoc_init_funcTable_VirtMemAllocator(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_VirtMemAllocator(VirtMemAllocator **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + VirtMemAllocator *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(VirtMemAllocator)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(VirtMemAllocator)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_VirtMemAllocator); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_VirtMemAllocator(pThis, pRmhalspecowner); + status = __nvoc_ctor_VirtMemAllocator(pThis, pRmhalspecowner); + if (status != NV_OK) goto __nvoc_objCreate_VirtMemAllocator_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_VirtMemAllocator_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_VirtMemAllocator(VirtMemAllocator **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_VirtMemAllocator(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_virt_mem_allocator_nvoc.h b/src/nvidia/generated/g_virt_mem_allocator_nvoc.h new file mode 100644 index 000000000..8770f1185 --- /dev/null +++ b/src/nvidia/generated/g_virt_mem_allocator_nvoc.h @@ -0,0 +1,630 @@ +#ifndef _G_VIRT_MEM_ALLOCATOR_NVOC_H_ +#define _G_VIRT_MEM_ALLOCATOR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_virt_mem_allocator_nvoc.h" + +#ifndef VIRT_MEM_ALLOCATOR_H +#define VIRT_MEM_ALLOCATOR_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for the VirtMemAllocator Object. * +* * +\***************************************************************************/ + +#include "kernel/core/core.h" +#include "kernel/core/info_block.h" +#include "kernel/gpu/disp/disp_objs.h" +#include "kernel/gpu/eng_state.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/gpu.h" +#include "kernel/gpu/mem_mgr/virt_mem_allocator_common.h" +#include "kernel/mem_mgr/vaspace.h" +#include "kernel/mem_mgr/virtual_mem.h" +#include "kernel/rmapi/control.h" +#include "kernel/rmapi/mapping_list.h" + +typedef struct DMA_PAGE_ARRAY DMA_PAGE_ARRAY; + +// +// DMA mapping calls can invalidate synchronously which always leaves the TLB in a +// consistent state with the PTEs. For performance reasons we sometimes defer +// the TLB invalidation when we have multiple mappings to perform before the +// mappings will be used. Please use deferred invalidates with care. +// +enum +{ + DMA_TLB_INVALIDATE = 0, + DMA_DEFER_TLB_INVALIDATE = 1 +}; + +// +// aperture capabilities +// +#define DMA_GPU_GART_CAPS_SNOOP 0x00000001 +#define DMA_GPU_GART_CAPS_NOSNOOP 0x00000002 + +// The parameters for dmaAllocBar1P2PMapping +typedef struct _def_dma_bar1p2p_mapping_params +{ + struct OBJVASPACE *pVas; // Virtual address space for request + struct OBJGPU *pPeerGpu; // The target GPU which owns the PeerMemDesc + MEMORY_DESCRIPTOR *pPeerMemDesc; // The memdesc of the target GPU vidmem + MEMORY_DESCRIPTOR *pMemDescOut; // The new memdesc of the mapped BAR1 region on the target GPU + NvU32 flags; // The flags used for the peer mapping + NvU32 flagsOut; // The new flags for for the new pPeerMemDesc + NvU64 offset; // The offset requested by the client. + NvU64 offsetOut; // The Adjusted offset by the new BAR1 surface mapping + NvU64 length; // The requested length by the client + CLI_DMA_MAPPING_INFO *pDmaMappingInfo; // The Dma mapping info structure. +} DMA_BAR1P2P_MAPPING_PRARAMS; + + +#ifdef NVOC_VIRT_MEM_ALLOCATOR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct VirtMemAllocator { + const struct NVOC_RTTI *__nvoc_rtti; + struct OBJENGSTATE __nvoc_base_OBJENGSTATE; + struct Object *__nvoc_pbase_Object; + struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE; + struct VirtMemAllocator *__nvoc_pbase_VirtMemAllocator; + NV_STATUS (*__dmaConstructEngine__)(struct OBJGPU *, struct VirtMemAllocator *, ENGDESCRIPTOR); + NV_STATUS (*__dmaStateInitLocked__)(struct OBJGPU *, struct VirtMemAllocator *); + NV_STATUS (*__dmaStatePostLoad__)(struct OBJGPU *, struct VirtMemAllocator *, NvU32); + NV_STATUS (*__dmaReconcileTunableState__)(POBJGPU, struct VirtMemAllocator *, void *); + NV_STATUS (*__dmaStateLoad__)(POBJGPU, struct VirtMemAllocator *, NvU32); + NV_STATUS (*__dmaStateUnload__)(POBJGPU, struct VirtMemAllocator *, NvU32); + NV_STATUS (*__dmaStatePreLoad__)(POBJGPU, struct VirtMemAllocator *, NvU32); + NV_STATUS (*__dmaStatePostUnload__)(POBJGPU, struct VirtMemAllocator *, NvU32); + void (*__dmaStateDestroy__)(POBJGPU, struct VirtMemAllocator *); + NV_STATUS (*__dmaStatePreUnload__)(POBJGPU, struct VirtMemAllocator *, NvU32); + NV_STATUS (*__dmaStateInitUnlocked__)(POBJGPU, struct VirtMemAllocator *); + void (*__dmaInitMissing__)(POBJGPU, struct VirtMemAllocator *); + NV_STATUS (*__dmaStatePreInitLocked__)(POBJGPU, struct VirtMemAllocator *); + NV_STATUS (*__dmaStatePreInitUnlocked__)(POBJGPU, struct VirtMemAllocator *); + NV_STATUS (*__dmaGetTunableState__)(POBJGPU, struct VirtMemAllocator *, void *); + NV_STATUS (*__dmaCompareTunableState__)(POBJGPU, struct VirtMemAllocator *, void *, void *); + void (*__dmaFreeTunableState__)(POBJGPU, struct VirtMemAllocator *, void *); + NV_STATUS (*__dmaAllocTunableState__)(POBJGPU, struct VirtMemAllocator *, void **); + NV_STATUS (*__dmaSetTunableState__)(POBJGPU, struct VirtMemAllocator *, void *); + NvBool (*__dmaIsPresent__)(POBJGPU, struct VirtMemAllocator *); + NvBool PDB_PROP_DMA_MMU_INVALIDATE_DISABLE; + NvBool PDB_PROP_DMA_ENFORCE_32BIT_POINTER; + NvBool PDB_PROP_DMA_MEMORY_MAP_OVERRIDE; + NvBool PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED; + NvBool PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL; + NvBool PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE; + NvBool PDB_PROP_DMA_RESTRICT_VA_RANGE; + NvBool PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED; + NvU32 gpuGartCaps; + NvU32 increaseRsvdPages; + struct ENG_INFO_LINK_NODE *infoList; +}; + +#ifndef __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +#define __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ +typedef struct VirtMemAllocator VirtMemAllocator; +#endif /* __NVOC_CLASS_VirtMemAllocator_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtMemAllocator +#define __nvoc_class_id_VirtMemAllocator 0x899e48 +#endif /* __nvoc_class_id_VirtMemAllocator */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VirtMemAllocator; + +#define __staticCast_VirtMemAllocator(pThis) \ + ((pThis)->__nvoc_pbase_VirtMemAllocator) + +#ifdef __nvoc_virt_mem_allocator_h_disabled +#define __dynamicCast_VirtMemAllocator(pThis) ((VirtMemAllocator*)NULL) +#else //__nvoc_virt_mem_allocator_h_disabled +#define __dynamicCast_VirtMemAllocator(pThis) \ + ((VirtMemAllocator*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(VirtMemAllocator))) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define PDB_PROP_DMA_MMU_INVALIDATE_DISABLE_BASE_CAST +#define PDB_PROP_DMA_MMU_INVALIDATE_DISABLE_BASE_NAME PDB_PROP_DMA_MMU_INVALIDATE_DISABLE +#define PDB_PROP_DMA_RESTRICT_VA_RANGE_BASE_CAST +#define PDB_PROP_DMA_RESTRICT_VA_RANGE_BASE_NAME PDB_PROP_DMA_RESTRICT_VA_RANGE +#define PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL_BASE_CAST +#define PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL_BASE_NAME PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL +#define PDB_PROP_DMA_ENFORCE_32BIT_POINTER_BASE_CAST +#define PDB_PROP_DMA_ENFORCE_32BIT_POINTER_BASE_NAME PDB_PROP_DMA_ENFORCE_32BIT_POINTER +#define PDB_PROP_DMA_MEMORY_MAP_OVERRIDE_BASE_CAST +#define PDB_PROP_DMA_MEMORY_MAP_OVERRIDE_BASE_NAME PDB_PROP_DMA_MEMORY_MAP_OVERRIDE +#define PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED_BASE_CAST +#define PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED_BASE_NAME PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED +#define PDB_PROP_DMA_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE. +#define PDB_PROP_DMA_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING +#define PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE_BASE_CAST +#define PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE_BASE_NAME PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE +#define PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED_BASE_CAST +#define PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED_BASE_NAME PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED + +NV_STATUS __nvoc_objCreateDynamic_VirtMemAllocator(VirtMemAllocator**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_VirtMemAllocator(VirtMemAllocator**, Dynamic*, NvU32); +#define __objCreate_VirtMemAllocator(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_VirtMemAllocator((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +#define dmaConstructEngine(pGpu, pDma, arg0) dmaConstructEngine_DISPATCH(pGpu, pDma, arg0) +#define dmaStateInitLocked(pGpu, pDma) dmaStateInitLocked_DISPATCH(pGpu, pDma) +#define dmaStatePostLoad(pGpu, pDma, arg0) dmaStatePostLoad_DISPATCH(pGpu, pDma, arg0) +#define dmaStatePostLoad_HAL(pGpu, pDma, arg0) dmaStatePostLoad_DISPATCH(pGpu, pDma, arg0) +#define dmaReconcileTunableState(pGpu, pEngstate, pTunableState) dmaReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dmaStateLoad(pGpu, pEngstate, arg0) dmaStateLoad_DISPATCH(pGpu, pEngstate, arg0) +#define dmaStateUnload(pGpu, pEngstate, arg0) dmaStateUnload_DISPATCH(pGpu, pEngstate, arg0) +#define dmaStatePreLoad(pGpu, pEngstate, arg0) dmaStatePreLoad_DISPATCH(pGpu, pEngstate, arg0) +#define dmaStatePostUnload(pGpu, pEngstate, arg0) dmaStatePostUnload_DISPATCH(pGpu, pEngstate, arg0) +#define dmaStateDestroy(pGpu, pEngstate) dmaStateDestroy_DISPATCH(pGpu, pEngstate) +#define dmaStatePreUnload(pGpu, pEngstate, arg0) dmaStatePreUnload_DISPATCH(pGpu, pEngstate, arg0) +#define dmaStateInitUnlocked(pGpu, pEngstate) dmaStateInitUnlocked_DISPATCH(pGpu, pEngstate) +#define dmaInitMissing(pGpu, pEngstate) dmaInitMissing_DISPATCH(pGpu, pEngstate) +#define dmaStatePreInitLocked(pGpu, pEngstate) dmaStatePreInitLocked_DISPATCH(pGpu, pEngstate) +#define dmaStatePreInitUnlocked(pGpu, pEngstate) dmaStatePreInitUnlocked_DISPATCH(pGpu, pEngstate) +#define dmaGetTunableState(pGpu, pEngstate, pTunableState) dmaGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dmaCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) dmaCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2) +#define dmaFreeTunableState(pGpu, pEngstate, pTunableState) dmaFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dmaAllocTunableState(pGpu, pEngstate, ppTunableState) dmaAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState) +#define dmaSetTunableState(pGpu, pEngstate, pTunableState) dmaSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState) +#define dmaIsPresent(pGpu, pEngstate) dmaIsPresent_DISPATCH(pGpu, pEngstate) +NV_STATUS dmaInit_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaInit(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaInit(pGpu, pDma) dmaInit_GM107(pGpu, pDma) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaInit_HAL(pGpu, pDma) dmaInit(pGpu, pDma) + +NV_STATUS dmaConstructHal_VGPUSTUB(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaConstructHal(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaConstructHal(pGpu, pDma) dmaConstructHal_VGPUSTUB(pGpu, pDma) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaConstructHal_HAL(pGpu, pDma) dmaConstructHal(pGpu, pDma) + +void dmaDestruct_GM107(struct VirtMemAllocator *pDma); + +#define __nvoc_dmaDestruct(pDma) dmaDestruct_GM107(pDma) +NV_STATUS dmaInitGart_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaInitGart(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaInitGart(pGpu, pDma) dmaInitGart_GM107(pGpu, pDma) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaInitGart_HAL(pGpu, pDma) dmaInitGart(pGpu, pDma) + +NV_STATUS dmaAllocMapping_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, MEMORY_DESCRIPTOR *arg1, NvU64 *arg2, NvU32 arg3, CLI_DMA_ALLOC_MAP_INFO *arg4, NvU32 arg5); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaAllocMapping(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, MEMORY_DESCRIPTOR *arg1, NvU64 *arg2, NvU32 arg3, CLI_DMA_ALLOC_MAP_INFO *arg4, NvU32 arg5) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaAllocMapping(pGpu, pDma, arg0, arg1, arg2, arg3, arg4, arg5) dmaAllocMapping_GM107(pGpu, pDma, arg0, arg1, arg2, arg3, arg4, arg5) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaAllocMapping_HAL(pGpu, pDma, arg0, arg1, arg2, arg3, arg4, arg5) dmaAllocMapping(pGpu, pDma, arg0, arg1, arg2, arg3, arg4, arg5) + +NV_STATUS dmaFreeMapping_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, NvU64 arg1, MEMORY_DESCRIPTOR *arg2, NvU32 arg3, CLI_DMA_ALLOC_MAP_INFO *arg4); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaFreeMapping(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, NvU64 arg1, MEMORY_DESCRIPTOR *arg2, NvU32 arg3, CLI_DMA_ALLOC_MAP_INFO *arg4) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaFreeMapping(pGpu, pDma, arg0, arg1, arg2, arg3, arg4) dmaFreeMapping_GM107(pGpu, pDma, arg0, arg1, arg2, arg3, arg4) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaFreeMapping_HAL(pGpu, pDma, arg0, arg1, arg2, arg3, arg4) dmaFreeMapping(pGpu, pDma, arg0, arg1, arg2, arg3, arg4) + +static inline NV_STATUS dmaAllocBar1P2PMapping_46f6a7(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, DMA_BAR1P2P_MAPPING_PRARAMS *params) { + return NV_ERR_NOT_SUPPORTED; +} + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaAllocBar1P2PMapping(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, DMA_BAR1P2P_MAPPING_PRARAMS *params) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaAllocBar1P2PMapping(pGpu, pDma, params) dmaAllocBar1P2PMapping_46f6a7(pGpu, pDma, params) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaAllocBar1P2PMapping_HAL(pGpu, pDma, params) dmaAllocBar1P2PMapping(pGpu, pDma, params) + +static inline void dmaFreeBar1P2PMapping_b3696a(struct VirtMemAllocator *pDma, CLI_DMA_MAPPING_INFO *arg0) { + return; +} + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline void dmaFreeBar1P2PMapping(struct VirtMemAllocator *pDma, CLI_DMA_MAPPING_INFO *arg0) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaFreeBar1P2PMapping(pDma, arg0) dmaFreeBar1P2PMapping_b3696a(pDma, arg0) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaFreeBar1P2PMapping_HAL(pDma, arg0) dmaFreeBar1P2PMapping(pDma, arg0) + +NV_STATUS dmaUpdateVASpace_GF100(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, MEMORY_DESCRIPTOR *pMemDesc, NvU8 *tgtPteMem, NvU64 vAddr, NvU64 vAddrLimit, NvU32 flags, DMA_PAGE_ARRAY *pPageArray, NvU32 overmapPteMod, COMPR_INFO *pComprInfo, NvU64 surfaceOffset, NvU32 valid, NvU32 aperture, NvU32 peer, NvU64 fabricAddr, NvU32 deferInvalidate, NvBool bSparse); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaUpdateVASpace(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, MEMORY_DESCRIPTOR *pMemDesc, NvU8 *tgtPteMem, NvU64 vAddr, NvU64 vAddrLimit, NvU32 flags, DMA_PAGE_ARRAY *pPageArray, NvU32 overmapPteMod, COMPR_INFO *pComprInfo, NvU64 surfaceOffset, NvU32 valid, NvU32 aperture, NvU32 peer, NvU64 fabricAddr, NvU32 deferInvalidate, NvBool bSparse) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaUpdateVASpace(pGpu, pDma, pVAS, pMemDesc, tgtPteMem, vAddr, vAddrLimit, flags, pPageArray, overmapPteMod, pComprInfo, surfaceOffset, valid, aperture, peer, fabricAddr, deferInvalidate, bSparse) dmaUpdateVASpace_GF100(pGpu, pDma, pVAS, pMemDesc, tgtPteMem, vAddr, vAddrLimit, flags, pPageArray, overmapPteMod, pComprInfo, surfaceOffset, valid, aperture, peer, fabricAddr, deferInvalidate, bSparse) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaUpdateVASpace_HAL(pGpu, pDma, pVAS, pMemDesc, tgtPteMem, vAddr, vAddrLimit, flags, pPageArray, overmapPteMod, pComprInfo, surfaceOffset, valid, aperture, peer, fabricAddr, deferInvalidate, bSparse) dmaUpdateVASpace(pGpu, pDma, pVAS, pMemDesc, tgtPteMem, vAddr, vAddrLimit, flags, pPageArray, overmapPteMod, pComprInfo, surfaceOffset, valid, aperture, peer, fabricAddr, deferInvalidate, bSparse) + +NV_STATUS dmaXlateVAtoPAforChannel_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct KernelChannel *pKernelChannel, NvU64 vAddr, NvU64 *pAddr, NvU32 *memType); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaXlateVAtoPAforChannel(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct KernelChannel *pKernelChannel, NvU64 vAddr, NvU64 *pAddr, NvU32 *memType) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaXlateVAtoPAforChannel(pGpu, pDma, pKernelChannel, vAddr, pAddr, memType) dmaXlateVAtoPAforChannel_GM107(pGpu, pDma, pKernelChannel, vAddr, pAddr, memType) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaXlateVAtoPAforChannel_HAL(pGpu, pDma, pKernelChannel, vAddr, pAddr, memType) dmaXlateVAtoPAforChannel(pGpu, pDma, pKernelChannel, vAddr, pAddr, memType) + +NvU32 dmaGetPTESize_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NvU32 dmaGetPTESize(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return 0; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaGetPTESize(pGpu, pDma) dmaGetPTESize_GM107(pGpu, pDma) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaGetPTESize_HAL(pGpu, pDma) dmaGetPTESize(pGpu, pDma) + +NV_STATUS dmaMapBuffer_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, PMEMORY_DESCRIPTOR pMemDesc, NvU64 *pVaddr, NvU32 allocFlags, NvU32 mapFlags); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaMapBuffer(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, PMEMORY_DESCRIPTOR pMemDesc, NvU64 *pVaddr, NvU32 allocFlags, NvU32 mapFlags) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaMapBuffer(pGpu, pDma, pVAS, pMemDesc, pVaddr, allocFlags, mapFlags) dmaMapBuffer_GM107(pGpu, pDma, pVAS, pMemDesc, pVaddr, allocFlags, mapFlags) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaMapBuffer_HAL(pGpu, pDma, pVAS, pMemDesc, pVaddr, allocFlags, mapFlags) dmaMapBuffer(pGpu, pDma, pVAS, pMemDesc, pVaddr, allocFlags, mapFlags) + +void dmaUnmapBuffer_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, NvU64 vaddr); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline void dmaUnmapBuffer(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *pVAS, NvU64 vaddr) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaUnmapBuffer(pGpu, pDma, pVAS, vaddr) dmaUnmapBuffer_GM107(pGpu, pDma, pVAS, vaddr) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaUnmapBuffer_HAL(pGpu, pDma, pVAS, vaddr) dmaUnmapBuffer(pGpu, pDma, pVAS, vaddr) + +NvU64 dmaGetPfnFromPte_GP100(struct VirtMemAllocator *pDma, NvBool bSysMem, NvU64 pPteMem); + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NvU64 dmaGetPfnFromPte(struct VirtMemAllocator *pDma, NvBool bSysMem, NvU64 pPteMem) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return 0; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaGetPfnFromPte(pDma, bSysMem, pPteMem) dmaGetPfnFromPte_GP100(pDma, bSysMem, pPteMem) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaGetPfnFromPte_HAL(pDma, bSysMem, pPteMem) dmaGetPfnFromPte(pDma, bSysMem, pPteMem) + +static inline struct OBJVASPACE *dmaGetPrivateVAS_fa6e19(struct VirtMemAllocator *pDma) { + return ((void *)0); +} + +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline struct OBJVASPACE *dmaGetPrivateVAS(struct VirtMemAllocator *pDma) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NULL; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaGetPrivateVAS(pDma) dmaGetPrivateVAS_fa6e19(pDma) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#define dmaGetPrivateVAS_HAL(pDma) dmaGetPrivateVAS(pDma) + +NV_STATUS dmaConstructEngine_IMPL(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, ENGDESCRIPTOR arg0); + +static inline NV_STATUS dmaConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, ENGDESCRIPTOR arg0) { + return pDma->__dmaConstructEngine__(pGpu, pDma, arg0); +} + +NV_STATUS dmaStateInitLocked_IMPL(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma); + +static inline NV_STATUS dmaStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma) { + return pDma->__dmaStateInitLocked__(pGpu, pDma); +} + +NV_STATUS dmaStatePostLoad_GM107(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, NvU32 arg0); + +static inline NV_STATUS dmaStatePostLoad_56cd7a(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, NvU32 arg0) { + return NV_OK; +} + +static inline NV_STATUS dmaStatePostLoad_DISPATCH(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, NvU32 arg0) { + return pDma->__dmaStatePostLoad__(pGpu, pDma, arg0); +} + +static inline NV_STATUS dmaReconcileTunableState_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void *pTunableState) { + return pEngstate->__dmaReconcileTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS dmaStateLoad_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) { + return pEngstate->__dmaStateLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dmaStateUnload_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) { + return pEngstate->__dmaStateUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dmaStatePreLoad_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) { + return pEngstate->__dmaStatePreLoad__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dmaStatePostUnload_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) { + return pEngstate->__dmaStatePostUnload__(pGpu, pEngstate, arg0); +} + +static inline void dmaStateDestroy_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + pEngstate->__dmaStateDestroy__(pGpu, pEngstate); +} + +static inline NV_STATUS dmaStatePreUnload_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, NvU32 arg0) { + return pEngstate->__dmaStatePreUnload__(pGpu, pEngstate, arg0); +} + +static inline NV_STATUS dmaStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + return pEngstate->__dmaStateInitUnlocked__(pGpu, pEngstate); +} + +static inline void dmaInitMissing_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + pEngstate->__dmaInitMissing__(pGpu, pEngstate); +} + +static inline NV_STATUS dmaStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + return pEngstate->__dmaStatePreInitLocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dmaStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + return pEngstate->__dmaStatePreInitUnlocked__(pGpu, pEngstate); +} + +static inline NV_STATUS dmaGetTunableState_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void *pTunableState) { + return pEngstate->__dmaGetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS dmaCompareTunableState_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void *pTunables1, void *pTunables2) { + return pEngstate->__dmaCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2); +} + +static inline void dmaFreeTunableState_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void *pTunableState) { + pEngstate->__dmaFreeTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NV_STATUS dmaAllocTunableState_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void **ppTunableState) { + return pEngstate->__dmaAllocTunableState__(pGpu, pEngstate, ppTunableState); +} + +static inline NV_STATUS dmaSetTunableState_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate, void *pTunableState) { + return pEngstate->__dmaSetTunableState__(pGpu, pEngstate, pTunableState); +} + +static inline NvBool dmaIsPresent_DISPATCH(POBJGPU pGpu, struct VirtMemAllocator *pEngstate) { + return pEngstate->__dmaIsPresent__(pGpu, pEngstate); +} + +NV_STATUS dmaAllocMap_IMPL(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, VirtualMemory *arg1, Memory *arg2, CLI_DMA_MAPPING_INFO *arg3); +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaAllocMap(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, VirtualMemory *arg1, Memory *arg2, CLI_DMA_MAPPING_INFO *arg3) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaAllocMap(pGpu, pDma, arg0, arg1, arg2, arg3) dmaAllocMap_IMPL(pGpu, pDma, arg0, arg1, arg2, arg3) +#endif //__nvoc_virt_mem_allocator_h_disabled + +NV_STATUS dmaFreeMap_IMPL(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, VirtualMemory *arg1, CLI_DMA_MAPPING_INFO *arg2, NvU32 flags); +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NV_STATUS dmaFreeMap(struct OBJGPU *pGpu, struct VirtMemAllocator *pDma, struct OBJVASPACE *arg0, VirtualMemory *arg1, CLI_DMA_MAPPING_INFO *arg2, NvU32 flags) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaFreeMap(pGpu, pDma, arg0, arg1, arg2, flags) dmaFreeMap_IMPL(pGpu, pDma, arg0, arg1, arg2, flags) +#endif //__nvoc_virt_mem_allocator_h_disabled + +NvBool dmaUseCompTagLineUpperHalf_IMPL(struct VirtMemAllocator *pDma, NvU32 pteIndex, NvU32 pageSize); +#ifdef __nvoc_virt_mem_allocator_h_disabled +static inline NvBool dmaUseCompTagLineUpperHalf(struct VirtMemAllocator *pDma, NvU32 pteIndex, NvU32 pageSize) { + NV_ASSERT_FAILED_PRECOMP("VirtMemAllocator was disabled!"); + return NV_FALSE; +} +#else //__nvoc_virt_mem_allocator_h_disabled +#define dmaUseCompTagLineUpperHalf(pDma, pteIndex, pageSize) dmaUseCompTagLineUpperHalf_IMPL(pDma, pteIndex, pageSize) +#endif //__nvoc_virt_mem_allocator_h_disabled + +#undef PRIVATE_FIELD + + +// +// Virtual Memory Manager +// + +// +// The VA space is the root of an address space. +// +// Currently we support 2 page sizes +// +#define VAS_PAGESIZE_IDX_4K 0 +#define VAS_PAGESIZE_IDX_BIG 1 +#define VAS_PAGESIZE_IDX_HUGE 2 +#define VAS_PAGESIZE_IDX_512M 3 +#define VAS_PAGESIZE_IDX_MAX 4 + +// VMM-TODO Used by old VAS Object +#define VAS_NUM_PAGESIZE_TYPES VAS_PAGESIZE_IDX_BIG+1 +#define VAS_PAGESIZE_IDX(PS) ((PS) != 4096) + +// Convert a page size mask to a string for debug prints. +#define VAS_PAGESIZE_MASK_STR(mask) \ + (!ONEBITSET(mask) ? "BOTH" : \ + ((mask == RM_PAGE_SIZE) ? "4KB" : "BIG")) + +// Value to pass to dmaAllocVASpace_HAL for both (default) page size. +#define VAS_ALLOC_PAGESIZE_BOTH (0x0) + +typedef enum +{ + VASPACE_BIG_PAGE_SIZE_64K_IDX = 0, + VASPACE_BIG_PAGE_SIZE_128K_IDX = 1, + VASPACE_NUM_BIG_PAGE_TYPES = 2 +}VASPACE_BIG_PAGE_SIZE_IDX; + +/*! + * Abstracts an array of physical page addresses. + */ +struct DMA_PAGE_ARRAY +{ + void *pData; //!< Array of PTE addresses or opaque OS-specific data. + RmPhysAddr orMask; //!< Mask to be bitwise-ORed onto each page address. + NvU32 startIndex; //!< Base index into the pData array. + NvU32 count; //!< Number of pages represented by this array. + NvBool bOsFormat; //!< Indicates if pData is an opaque OS-specific data. + NvBool bDuplicate; //!< Indicates to duplicate the address of the first page. + OS_GPU_INFO *pOsGpuInfo; //!< OS-specific GPU info needed for IOMMU on Windows. +}; + +// page array operations +void dmaPageArrayInit(DMA_PAGE_ARRAY *pPageArray, void *pPageData, NvU32 pageCount); +void dmaPageArrayInitFromMemDesc(DMA_PAGE_ARRAY *pPageArray, + MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation); +RmPhysAddr dmaPageArrayGetPhysAddr(DMA_PAGE_ARRAY *pPageArray, NvU32 pageIndex); + +/*! + * Indicates that if the VA range being initialized is sparse, + * the sparse bit should be set for the range. + */ +#define DMA_INIT_VAS_FLAGS_ENABLE_SPARSE NVBIT(0) + +// +// hal.dmaUpdateVASpace() flags +// +#define DMA_UPDATE_VASPACE_FLAGS_NONE 0 +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_PADDR NVBIT(0) +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_COMPR NVBIT(1) +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_ACCESS NVBIT(2) +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_VALID NVBIT(3) +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_PRIV NVBIT(4) +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_KIND NVBIT(5) +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_APERTURE NVBIT(6) +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_PEER NVBIT(7) +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_ENCRYPTED NVBIT(8) +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_TLB_LOCK NVBIT(9) +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_CACHE NVBIT(10) // VOLATILE of fermi +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_SHADER_ACCESS NVBIT(11) // Kepler shader access +#define DMA_UPDATE_VASPACE_FLAGS_UPDATE_ALL MASK_BITS(11) + +#define DMA_UPDATE_VASPACE_FLAGS_SKIP_4K_PTE_CHECK NVBIT(12) +#define DMA_UPDATE_VASPACE_FLAGS_INDIRECT_PEER NVBIT(22) +#define DMA_UPDATE_VASPACE_FLAGS_ALLOW_REMAP NVBIT(23) +#define DMA_UPDATE_VASPACE_FLAGS_UNALIGNED_COMP NVBIT(24) +#define DMA_UPDATE_VASPACE_FLAGS_FILL_PTE_MEM NVBIT(25) +#define DMA_UPDATE_VASPACE_FLAGS_DISABLE_ENCRYPTION NVBIT(26) +#define DMA_UPDATE_VASPACE_FLAGS_READ_ONLY NVBIT(27) +#define DMA_UPDATE_VASPACE_FLAGS_PRIV NVBIT(28) +#define DMA_UPDATE_VASPACE_FLAGS_TLB_LOCK NVBIT(29) +#define DMA_UPDATE_VASPACE_FLAGS_SHADER_WRITE_ONLY NVBIT(30) // Kepler shader access +#define DMA_UPDATE_VASPACE_FLAGS_SHADER_READ_ONLY NVBIT(31) // Kepler shader access + +// +// hal.dmaAllocVASpace() flags +// +#define DMA_ALLOC_VASPACE_NONE 0 +#define DMA_VA_LIMIT_49B NVBIT(0) +#define DMA_VA_LIMIT_57B NVBIT(1) +#define DMA_ALLOC_VASPACE_SIZE_ALIGNED NVBIT(9) + +// +// Internal device allocation flags +// +#define NV_DEVICE_INTERNAL_ALLOCATION_FLAGS_NONE 0 +#define NV_DEVICE_INTERNAL_ALLOCATION_FLAGS_ENABLE_PRIVILEGED_VASPACE NVBIT(0) + +// +// UVM privileged region +// +#define UVM_KERNEL_PRIVILEGED_REGION_START (0xFFF8000000ULL) +#define UVM_KERNEL_PRIVILEGED_REGION_LENGTH (0x0008000000ULL) + +#endif // VIRT_MEM_ALLOCATOR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_VIRT_MEM_ALLOCATOR_NVOC_H_ diff --git a/src/nvidia/generated/g_virt_mem_mgr_nvoc.c b/src/nvidia/generated/g_virt_mem_mgr_nvoc.c new file mode 100644 index 000000000..4cb2434d4 --- /dev/null +++ b/src/nvidia/generated/g_virt_mem_mgr_nvoc.c @@ -0,0 +1,148 @@ +#define NVOC_VIRT_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_virt_mem_mgr_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0xa030ab = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVMM; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +void __nvoc_init_OBJVMM(OBJVMM*); +void __nvoc_init_funcTable_OBJVMM(OBJVMM*); +NV_STATUS __nvoc_ctor_OBJVMM(OBJVMM*); +void __nvoc_init_dataField_OBJVMM(OBJVMM*); +void __nvoc_dtor_OBJVMM(OBJVMM*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJVMM; + +static const struct NVOC_RTTI __nvoc_rtti_OBJVMM_OBJVMM = { + /*pClassDef=*/ &__nvoc_class_def_OBJVMM, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJVMM, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_OBJVMM_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(OBJVMM, __nvoc_base_Object), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_OBJVMM = { + /*numRelatives=*/ 2, + /*relatives=*/ { + &__nvoc_rtti_OBJVMM_OBJVMM, + &__nvoc_rtti_OBJVMM_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVMM = +{ + /*classInfo=*/ { + /*size=*/ sizeof(OBJVMM), + /*classId=*/ classId(OBJVMM), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "OBJVMM", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJVMM, + /*pCastInfo=*/ &__nvoc_castinfo_OBJVMM, + /*pExportInfo=*/ &__nvoc_export_info_OBJVMM +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJVMM = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_Object(Object*); +void __nvoc_dtor_OBJVMM(OBJVMM *pThis) { + __nvoc_dtor_Object(&pThis->__nvoc_base_Object); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_OBJVMM(OBJVMM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_Object(Object* ); +NV_STATUS __nvoc_ctor_OBJVMM(OBJVMM *pThis) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object); + if (status != NV_OK) goto __nvoc_ctor_OBJVMM_fail_Object; + __nvoc_init_dataField_OBJVMM(pThis); + goto __nvoc_ctor_OBJVMM_exit; // Success + +__nvoc_ctor_OBJVMM_fail_Object: +__nvoc_ctor_OBJVMM_exit: + + return status; +} + +static void __nvoc_init_funcTable_OBJVMM_1(OBJVMM *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_funcTable_OBJVMM(OBJVMM *pThis) { + __nvoc_init_funcTable_OBJVMM_1(pThis); +} + +void __nvoc_init_Object(Object*); +void __nvoc_init_OBJVMM(OBJVMM *pThis) { + pThis->__nvoc_pbase_OBJVMM = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object; + __nvoc_init_Object(&pThis->__nvoc_base_Object); + __nvoc_init_funcTable_OBJVMM(pThis); +} + +NV_STATUS __nvoc_objCreate_OBJVMM(OBJVMM **ppThis, Dynamic *pParent, NvU32 createFlags) { + NV_STATUS status; + Object *pParentObj; + OBJVMM *pThis; + + pThis = portMemAllocNonPaged(sizeof(OBJVMM)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(OBJVMM)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJVMM); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_OBJVMM(pThis); + status = __nvoc_ctor_OBJVMM(pThis); + if (status != NV_OK) goto __nvoc_objCreate_OBJVMM_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_OBJVMM_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_OBJVMM(OBJVMM **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + + status = __nvoc_objCreate_OBJVMM(ppThis, pParent, createFlags); + + return status; +} + diff --git a/src/nvidia/generated/g_virt_mem_mgr_nvoc.h b/src/nvidia/generated/g_virt_mem_mgr_nvoc.h new file mode 100644 index 000000000..291303299 --- /dev/null +++ b/src/nvidia/generated/g_virt_mem_mgr_nvoc.h @@ -0,0 +1,133 @@ +#ifndef _G_VIRT_MEM_MGR_NVOC_H_ +#define _G_VIRT_MEM_MGR_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_virt_mem_mgr_nvoc.h" + +#ifndef VIRT_MEM_MGR_H +#define VIRT_MEM_MGR_H + +/**************** Resource Manager Defines and Structures ******************\ +* Defines and structures used for Virtual Memory Management Object. * +\***************************************************************************/ + +#include "mem_mgr/vaspace.h" + +typedef struct OBJVMM *POBJVMM; + +#ifndef __NVOC_CLASS_OBJVMM_TYPEDEF__ +#define __NVOC_CLASS_OBJVMM_TYPEDEF__ +typedef struct OBJVMM OBJVMM; +#endif /* __NVOC_CLASS_OBJVMM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMM +#define __nvoc_class_id_OBJVMM 0xa030ab +#endif /* __nvoc_class_id_OBJVMM */ + + + +#ifdef NVOC_VIRT_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct OBJVMM { + const struct NVOC_RTTI *__nvoc_rtti; + struct Object __nvoc_base_Object; + struct Object *__nvoc_pbase_Object; + struct OBJVMM *__nvoc_pbase_OBJVMM; +}; + +#ifndef __NVOC_CLASS_OBJVMM_TYPEDEF__ +#define __NVOC_CLASS_OBJVMM_TYPEDEF__ +typedef struct OBJVMM OBJVMM; +#endif /* __NVOC_CLASS_OBJVMM_TYPEDEF__ */ + +#ifndef __nvoc_class_id_OBJVMM +#define __nvoc_class_id_OBJVMM 0xa030ab +#endif /* __nvoc_class_id_OBJVMM */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVMM; + +#define __staticCast_OBJVMM(pThis) \ + ((pThis)->__nvoc_pbase_OBJVMM) + +#ifdef __nvoc_virt_mem_mgr_h_disabled +#define __dynamicCast_OBJVMM(pThis) ((OBJVMM*)NULL) +#else //__nvoc_virt_mem_mgr_h_disabled +#define __dynamicCast_OBJVMM(pThis) \ + ((OBJVMM*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJVMM))) +#endif //__nvoc_virt_mem_mgr_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_OBJVMM(OBJVMM**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_OBJVMM(OBJVMM**, Dynamic*, NvU32); +#define __objCreate_OBJVMM(ppNewObj, pParent, createFlags) \ + __nvoc_objCreate_OBJVMM((ppNewObj), staticCast((pParent), Dynamic), (createFlags)) + +NV_STATUS vmmCreateVaspace_IMPL(struct OBJVMM *pVmm, NvU32 _class, NvU32 vaspaceId, NvU32 gpuMask, NvU64 vaStart, NvU64 vaLimit, NvU64 vaInternalStart, NvU64 vaInternalEnd, struct OBJVASPACE *pPteSpaceMap, NvU32 flags, struct OBJVASPACE **ppVAS); +#ifdef __nvoc_virt_mem_mgr_h_disabled +static inline NV_STATUS vmmCreateVaspace(struct OBJVMM *pVmm, NvU32 _class, NvU32 vaspaceId, NvU32 gpuMask, NvU64 vaStart, NvU64 vaLimit, NvU64 vaInternalStart, NvU64 vaInternalEnd, struct OBJVASPACE *pPteSpaceMap, NvU32 flags, struct OBJVASPACE **ppVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVMM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_mgr_h_disabled +#define vmmCreateVaspace(pVmm, _class, vaspaceId, gpuMask, vaStart, vaLimit, vaInternalStart, vaInternalEnd, pPteSpaceMap, flags, ppVAS) vmmCreateVaspace_IMPL(pVmm, _class, vaspaceId, gpuMask, vaStart, vaLimit, vaInternalStart, vaInternalEnd, pPteSpaceMap, flags, ppVAS) +#endif //__nvoc_virt_mem_mgr_h_disabled + +void vmmDestroyVaspace_IMPL(struct OBJVMM *pVmm, struct OBJVASPACE *pVAS); +#ifdef __nvoc_virt_mem_mgr_h_disabled +static inline void vmmDestroyVaspace(struct OBJVMM *pVmm, struct OBJVASPACE *pVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVMM was disabled!"); +} +#else //__nvoc_virt_mem_mgr_h_disabled +#define vmmDestroyVaspace(pVmm, pVAS) vmmDestroyVaspace_IMPL(pVmm, pVAS) +#endif //__nvoc_virt_mem_mgr_h_disabled + +NV_STATUS vmmGetVaspaceFromId_IMPL(struct OBJVMM *pVmm, NvU32 vaspaceId, NvU32 classId, struct OBJVASPACE **ppVAS); +#ifdef __nvoc_virt_mem_mgr_h_disabled +static inline NV_STATUS vmmGetVaspaceFromId(struct OBJVMM *pVmm, NvU32 vaspaceId, NvU32 classId, struct OBJVASPACE **ppVAS) { + NV_ASSERT_FAILED_PRECOMP("OBJVMM was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virt_mem_mgr_h_disabled +#define vmmGetVaspaceFromId(pVmm, vaspaceId, classId, ppVAS) vmmGetVaspaceFromId_IMPL(pVmm, vaspaceId, classId, ppVAS) +#endif //__nvoc_virt_mem_mgr_h_disabled + +#undef PRIVATE_FIELD + + +#endif // VIRT_MEM_MGR_H + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_VIRT_MEM_MGR_NVOC_H_ diff --git a/src/nvidia/generated/g_virt_mem_range_nvoc.c b/src/nvidia/generated/g_virt_mem_range_nvoc.c new file mode 100644 index 000000000..a9734adba --- /dev/null +++ b/src/nvidia/generated/g_virt_mem_range_nvoc.c @@ -0,0 +1,341 @@ +#define NVOC_VIRT_MEM_RANGE_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_virt_mem_range_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x7032c6 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VirtualMemoryRange; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VirtualMemory; + +void __nvoc_init_VirtualMemoryRange(VirtualMemoryRange*); +void __nvoc_init_funcTable_VirtualMemoryRange(VirtualMemoryRange*); +NV_STATUS __nvoc_ctor_VirtualMemoryRange(VirtualMemoryRange*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_VirtualMemoryRange(VirtualMemoryRange*); +void __nvoc_dtor_VirtualMemoryRange(VirtualMemoryRange*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_VirtualMemoryRange; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemoryRange_VirtualMemoryRange = { + /*pClassDef=*/ &__nvoc_class_def_VirtualMemoryRange, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_VirtualMemoryRange, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemoryRange_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemoryRange, __nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemoryRange_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemoryRange, __nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemoryRange_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemoryRange, __nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemoryRange_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemoryRange, __nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemoryRange_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemoryRange, __nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemoryRange_StandardMemory = { + /*pClassDef=*/ &__nvoc_class_def_StandardMemory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemoryRange, __nvoc_base_VirtualMemory.__nvoc_base_StandardMemory), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemoryRange_VirtualMemory = { + /*pClassDef=*/ &__nvoc_class_def_VirtualMemory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemoryRange, __nvoc_base_VirtualMemory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_VirtualMemoryRange = { + /*numRelatives=*/ 8, + /*relatives=*/ { + &__nvoc_rtti_VirtualMemoryRange_VirtualMemoryRange, + &__nvoc_rtti_VirtualMemoryRange_VirtualMemory, + &__nvoc_rtti_VirtualMemoryRange_StandardMemory, + &__nvoc_rtti_VirtualMemoryRange_Memory, + &__nvoc_rtti_VirtualMemoryRange_RmResource, + &__nvoc_rtti_VirtualMemoryRange_RmResourceCommon, + &__nvoc_rtti_VirtualMemoryRange_RsResource, + &__nvoc_rtti_VirtualMemoryRange_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_VirtualMemoryRange = +{ + /*classInfo=*/ { + /*size=*/ sizeof(VirtualMemoryRange), + /*classId=*/ classId(VirtualMemoryRange), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "VirtualMemoryRange", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_VirtualMemoryRange, + /*pCastInfo=*/ &__nvoc_castinfo_VirtualMemoryRange, + /*pExportInfo=*/ &__nvoc_export_info_VirtualMemoryRange +}; + +static NV_STATUS __nvoc_thunk_Memory_vmrangeCheckMemInterUnmap(struct VirtualMemoryRange *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemoryRange_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_VirtualMemory_vmrangeMapTo(struct VirtualMemoryRange *pVirtualMemory, struct RS_RES_MAP_TO_PARAMS *pParams) { + return virtmemMapTo((struct VirtualMemory *)(((unsigned char *)pVirtualMemory) + __nvoc_rtti_VirtualMemoryRange_VirtualMemory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_vmrangeControl(struct VirtualMemoryRange *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemoryRange_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_vmrangeGetMemInterMapParams(struct VirtualMemoryRange *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemoryRange_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_vmrangeUnmap(struct VirtualMemoryRange *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemoryRange_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_vmrangeGetMapAddrSpace(struct VirtualMemoryRange *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemoryRange_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_vmrangeShareCallback(struct VirtualMemoryRange *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemoryRange_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_Memory_vmrangeGetMemoryMappingDescriptor(struct VirtualMemoryRange *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemoryRange_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_RsResource_vmrangeControlFilter(struct VirtualMemoryRange *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemoryRange_RsResource.offset), pCallContext, pParams); +} + +static NvU32 __nvoc_thunk_RsResource_vmrangeGetRefCount(struct VirtualMemoryRange *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemoryRange_RsResource.offset)); +} + +static void __nvoc_thunk_RsResource_vmrangeAddAdditionalDependants(struct RsClient *pClient, struct VirtualMemoryRange *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemoryRange_RsResource.offset), pReference); +} + +static NvBool __nvoc_thunk_StandardMemory_vmrangeCanCopy(struct VirtualMemoryRange *pStandardMemory) { + return stdmemCanCopy((struct StandardMemory *)(((unsigned char *)pStandardMemory) + __nvoc_rtti_VirtualMemoryRange_StandardMemory.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_vmrangeControl_Prologue(struct VirtualMemoryRange *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemoryRange_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_vmrangeIsReady(struct VirtualMemoryRange *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemoryRange_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_VirtualMemory_vmrangeUnmapFrom(struct VirtualMemoryRange *pVirtualMemory, struct RS_RES_UNMAP_FROM_PARAMS *pParams) { + return virtmemUnmapFrom((struct VirtualMemory *)(((unsigned char *)pVirtualMemory) + __nvoc_rtti_VirtualMemoryRange_VirtualMemory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_vmrangeCheckCopyPermissions(struct VirtualMemoryRange *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemoryRange_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_vmrangePreDestruct(struct VirtualMemoryRange *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemoryRange_RsResource.offset)); +} + +static void __nvoc_thunk_RmResource_vmrangeControl_Epilogue(struct VirtualMemoryRange *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemoryRange_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_vmrangeControlLookup(struct VirtualMemoryRange *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemoryRange_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_vmrangeMap(struct VirtualMemoryRange *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemoryRange_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_vmrangeAccessCallback(struct VirtualMemoryRange *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemoryRange_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_VirtualMemoryRange = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_VirtualMemory(VirtualMemory*); +void __nvoc_dtor_VirtualMemoryRange(VirtualMemoryRange *pThis) { + __nvoc_dtor_VirtualMemory(&pThis->__nvoc_base_VirtualMemory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_VirtualMemoryRange(VirtualMemoryRange *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_VirtualMemory(VirtualMemory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_VirtualMemoryRange(VirtualMemoryRange *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_VirtualMemory(&pThis->__nvoc_base_VirtualMemory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_VirtualMemoryRange_fail_VirtualMemory; + __nvoc_init_dataField_VirtualMemoryRange(pThis); + + status = __nvoc_vmrangeConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_VirtualMemoryRange_fail__init; + goto __nvoc_ctor_VirtualMemoryRange_exit; // Success + +__nvoc_ctor_VirtualMemoryRange_fail__init: + __nvoc_dtor_VirtualMemory(&pThis->__nvoc_base_VirtualMemory); +__nvoc_ctor_VirtualMemoryRange_fail_VirtualMemory: +__nvoc_ctor_VirtualMemoryRange_exit: + + return status; +} + +static void __nvoc_init_funcTable_VirtualMemoryRange_1(VirtualMemoryRange *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__vmrangeCheckMemInterUnmap__ = &__nvoc_thunk_Memory_vmrangeCheckMemInterUnmap; + + pThis->__vmrangeMapTo__ = &__nvoc_thunk_VirtualMemory_vmrangeMapTo; + + pThis->__vmrangeControl__ = &__nvoc_thunk_Memory_vmrangeControl; + + pThis->__vmrangeGetMemInterMapParams__ = &__nvoc_thunk_Memory_vmrangeGetMemInterMapParams; + + pThis->__vmrangeUnmap__ = &__nvoc_thunk_Memory_vmrangeUnmap; + + pThis->__vmrangeGetMapAddrSpace__ = &__nvoc_thunk_Memory_vmrangeGetMapAddrSpace; + + pThis->__vmrangeShareCallback__ = &__nvoc_thunk_RmResource_vmrangeShareCallback; + + pThis->__vmrangeGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_vmrangeGetMemoryMappingDescriptor; + + pThis->__vmrangeControlFilter__ = &__nvoc_thunk_RsResource_vmrangeControlFilter; + + pThis->__vmrangeGetRefCount__ = &__nvoc_thunk_RsResource_vmrangeGetRefCount; + + pThis->__vmrangeAddAdditionalDependants__ = &__nvoc_thunk_RsResource_vmrangeAddAdditionalDependants; + + pThis->__vmrangeCanCopy__ = &__nvoc_thunk_StandardMemory_vmrangeCanCopy; + + pThis->__vmrangeControl_Prologue__ = &__nvoc_thunk_RmResource_vmrangeControl_Prologue; + + pThis->__vmrangeIsReady__ = &__nvoc_thunk_Memory_vmrangeIsReady; + + pThis->__vmrangeUnmapFrom__ = &__nvoc_thunk_VirtualMemory_vmrangeUnmapFrom; + + pThis->__vmrangeCheckCopyPermissions__ = &__nvoc_thunk_Memory_vmrangeCheckCopyPermissions; + + pThis->__vmrangePreDestruct__ = &__nvoc_thunk_RsResource_vmrangePreDestruct; + + pThis->__vmrangeControl_Epilogue__ = &__nvoc_thunk_RmResource_vmrangeControl_Epilogue; + + pThis->__vmrangeControlLookup__ = &__nvoc_thunk_RsResource_vmrangeControlLookup; + + pThis->__vmrangeMap__ = &__nvoc_thunk_Memory_vmrangeMap; + + pThis->__vmrangeAccessCallback__ = &__nvoc_thunk_RmResource_vmrangeAccessCallback; +} + +void __nvoc_init_funcTable_VirtualMemoryRange(VirtualMemoryRange *pThis) { + __nvoc_init_funcTable_VirtualMemoryRange_1(pThis); +} + +void __nvoc_init_VirtualMemory(VirtualMemory*); +void __nvoc_init_VirtualMemoryRange(VirtualMemoryRange *pThis) { + pThis->__nvoc_pbase_VirtualMemoryRange = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory; + pThis->__nvoc_pbase_StandardMemory = &pThis->__nvoc_base_VirtualMemory.__nvoc_base_StandardMemory; + pThis->__nvoc_pbase_VirtualMemory = &pThis->__nvoc_base_VirtualMemory; + __nvoc_init_VirtualMemory(&pThis->__nvoc_base_VirtualMemory); + __nvoc_init_funcTable_VirtualMemoryRange(pThis); +} + +NV_STATUS __nvoc_objCreate_VirtualMemoryRange(VirtualMemoryRange **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + VirtualMemoryRange *pThis; + + pThis = portMemAllocNonPaged(sizeof(VirtualMemoryRange)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(VirtualMemoryRange)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_VirtualMemoryRange); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_VirtualMemory.__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_VirtualMemoryRange(pThis); + status = __nvoc_ctor_VirtualMemoryRange(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_VirtualMemoryRange_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_VirtualMemoryRange_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_VirtualMemoryRange(VirtualMemoryRange **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_VirtualMemoryRange(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_virt_mem_range_nvoc.h b/src/nvidia/generated/g_virt_mem_range_nvoc.h new file mode 100644 index 000000000..840c21636 --- /dev/null +++ b/src/nvidia/generated/g_virt_mem_range_nvoc.h @@ -0,0 +1,227 @@ +#ifndef _G_VIRT_MEM_RANGE_NVOC_H_ +#define _G_VIRT_MEM_RANGE_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_virt_mem_range_nvoc.h" + +#ifndef _VIRT_MEM_RANGE_H_ +#define _VIRT_MEM_RANGE_H_ + +#include "mem_mgr/virtual_mem.h" + +/*! + * Allocator for NV01_MEMORY_VIRTUAL class. + * + * Describes a range of typeless virtual memory memory. Used as + * a target space for RmMapMemoryDma. + */ +#ifdef NVOC_VIRT_MEM_RANGE_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct VirtualMemoryRange { + const struct NVOC_RTTI *__nvoc_rtti; + struct VirtualMemory __nvoc_base_VirtualMemory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct StandardMemory *__nvoc_pbase_StandardMemory; + struct VirtualMemory *__nvoc_pbase_VirtualMemory; + struct VirtualMemoryRange *__nvoc_pbase_VirtualMemoryRange; + NV_STATUS (*__vmrangeCheckMemInterUnmap__)(struct VirtualMemoryRange *, NvBool); + NV_STATUS (*__vmrangeMapTo__)(struct VirtualMemoryRange *, struct RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__vmrangeControl__)(struct VirtualMemoryRange *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vmrangeGetMemInterMapParams__)(struct VirtualMemoryRange *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__vmrangeUnmap__)(struct VirtualMemoryRange *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__vmrangeGetMapAddrSpace__)(struct VirtualMemoryRange *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__vmrangeShareCallback__)(struct VirtualMemoryRange *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__vmrangeGetMemoryMappingDescriptor__)(struct VirtualMemoryRange *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__vmrangeControlFilter__)(struct VirtualMemoryRange *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvU32 (*__vmrangeGetRefCount__)(struct VirtualMemoryRange *); + void (*__vmrangeAddAdditionalDependants__)(struct RsClient *, struct VirtualMemoryRange *, RsResourceRef *); + NvBool (*__vmrangeCanCopy__)(struct VirtualMemoryRange *); + NV_STATUS (*__vmrangeControl_Prologue__)(struct VirtualMemoryRange *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vmrangeIsReady__)(struct VirtualMemoryRange *); + NV_STATUS (*__vmrangeUnmapFrom__)(struct VirtualMemoryRange *, struct RS_RES_UNMAP_FROM_PARAMS *); + NV_STATUS (*__vmrangeCheckCopyPermissions__)(struct VirtualMemoryRange *, struct OBJGPU *, NvHandle); + void (*__vmrangePreDestruct__)(struct VirtualMemoryRange *); + void (*__vmrangeControl_Epilogue__)(struct VirtualMemoryRange *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__vmrangeControlLookup__)(struct VirtualMemoryRange *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__vmrangeMap__)(struct VirtualMemoryRange *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__vmrangeAccessCallback__)(struct VirtualMemoryRange *, struct RsClient *, void *, RsAccessRight); +}; + +#ifndef __NVOC_CLASS_VirtualMemoryRange_TYPEDEF__ +#define __NVOC_CLASS_VirtualMemoryRange_TYPEDEF__ +typedef struct VirtualMemoryRange VirtualMemoryRange; +#endif /* __NVOC_CLASS_VirtualMemoryRange_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtualMemoryRange +#define __nvoc_class_id_VirtualMemoryRange 0x7032c6 +#endif /* __nvoc_class_id_VirtualMemoryRange */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VirtualMemoryRange; + +#define __staticCast_VirtualMemoryRange(pThis) \ + ((pThis)->__nvoc_pbase_VirtualMemoryRange) + +#ifdef __nvoc_virt_mem_range_h_disabled +#define __dynamicCast_VirtualMemoryRange(pThis) ((VirtualMemoryRange*)NULL) +#else //__nvoc_virt_mem_range_h_disabled +#define __dynamicCast_VirtualMemoryRange(pThis) \ + ((VirtualMemoryRange*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(VirtualMemoryRange))) +#endif //__nvoc_virt_mem_range_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_VirtualMemoryRange(VirtualMemoryRange**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_VirtualMemoryRange(VirtualMemoryRange**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_VirtualMemoryRange(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_VirtualMemoryRange((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define vmrangeCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) vmrangeCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define vmrangeMapTo(pVirtualMemory, pParams) vmrangeMapTo_DISPATCH(pVirtualMemory, pParams) +#define vmrangeControl(pMemory, pCallContext, pParams) vmrangeControl_DISPATCH(pMemory, pCallContext, pParams) +#define vmrangeGetMemInterMapParams(pMemory, pParams) vmrangeGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define vmrangeUnmap(pMemory, pCallContext, pCpuMapping) vmrangeUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define vmrangeGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) vmrangeGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define vmrangeShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) vmrangeShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define vmrangeGetMemoryMappingDescriptor(pMemory, ppMemDesc) vmrangeGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define vmrangeControlFilter(pResource, pCallContext, pParams) vmrangeControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define vmrangeGetRefCount(pResource) vmrangeGetRefCount_DISPATCH(pResource) +#define vmrangeAddAdditionalDependants(pClient, pResource, pReference) vmrangeAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define vmrangeCanCopy(pStandardMemory) vmrangeCanCopy_DISPATCH(pStandardMemory) +#define vmrangeControl_Prologue(pResource, pCallContext, pParams) vmrangeControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define vmrangeIsReady(pMemory) vmrangeIsReady_DISPATCH(pMemory) +#define vmrangeUnmapFrom(pVirtualMemory, pParams) vmrangeUnmapFrom_DISPATCH(pVirtualMemory, pParams) +#define vmrangeCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) vmrangeCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define vmrangePreDestruct(pResource) vmrangePreDestruct_DISPATCH(pResource) +#define vmrangeControl_Epilogue(pResource, pCallContext, pParams) vmrangeControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define vmrangeControlLookup(pResource, pParams, ppEntry) vmrangeControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define vmrangeMap(pMemory, pCallContext, pParams, pCpuMapping) vmrangeMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define vmrangeAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) vmrangeAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NV_STATUS vmrangeCheckMemInterUnmap_DISPATCH(struct VirtualMemoryRange *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__vmrangeCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS vmrangeMapTo_DISPATCH(struct VirtualMemoryRange *pVirtualMemory, struct RS_RES_MAP_TO_PARAMS *pParams) { + return pVirtualMemory->__vmrangeMapTo__(pVirtualMemory, pParams); +} + +static inline NV_STATUS vmrangeControl_DISPATCH(struct VirtualMemoryRange *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__vmrangeControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS vmrangeGetMemInterMapParams_DISPATCH(struct VirtualMemoryRange *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__vmrangeGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS vmrangeUnmap_DISPATCH(struct VirtualMemoryRange *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__vmrangeUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS vmrangeGetMapAddrSpace_DISPATCH(struct VirtualMemoryRange *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__vmrangeGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool vmrangeShareCallback_DISPATCH(struct VirtualMemoryRange *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__vmrangeShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS vmrangeGetMemoryMappingDescriptor_DISPATCH(struct VirtualMemoryRange *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__vmrangeGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS vmrangeControlFilter_DISPATCH(struct VirtualMemoryRange *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__vmrangeControlFilter__(pResource, pCallContext, pParams); +} + +static inline NvU32 vmrangeGetRefCount_DISPATCH(struct VirtualMemoryRange *pResource) { + return pResource->__vmrangeGetRefCount__(pResource); +} + +static inline void vmrangeAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct VirtualMemoryRange *pResource, RsResourceRef *pReference) { + pResource->__vmrangeAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvBool vmrangeCanCopy_DISPATCH(struct VirtualMemoryRange *pStandardMemory) { + return pStandardMemory->__vmrangeCanCopy__(pStandardMemory); +} + +static inline NV_STATUS vmrangeControl_Prologue_DISPATCH(struct VirtualMemoryRange *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__vmrangeControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS vmrangeIsReady_DISPATCH(struct VirtualMemoryRange *pMemory) { + return pMemory->__vmrangeIsReady__(pMemory); +} + +static inline NV_STATUS vmrangeUnmapFrom_DISPATCH(struct VirtualMemoryRange *pVirtualMemory, struct RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pVirtualMemory->__vmrangeUnmapFrom__(pVirtualMemory, pParams); +} + +static inline NV_STATUS vmrangeCheckCopyPermissions_DISPATCH(struct VirtualMemoryRange *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__vmrangeCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void vmrangePreDestruct_DISPATCH(struct VirtualMemoryRange *pResource) { + pResource->__vmrangePreDestruct__(pResource); +} + +static inline void vmrangeControl_Epilogue_DISPATCH(struct VirtualMemoryRange *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__vmrangeControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS vmrangeControlLookup_DISPATCH(struct VirtualMemoryRange *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__vmrangeControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS vmrangeMap_DISPATCH(struct VirtualMemoryRange *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__vmrangeMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool vmrangeAccessCallback_DISPATCH(struct VirtualMemoryRange *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__vmrangeAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS vmrangeConstruct_IMPL(struct VirtualMemoryRange *arg_pVmRange, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_vmrangeConstruct(arg_pVmRange, arg_pCallContext, arg_pParams) vmrangeConstruct_IMPL(arg_pVmRange, arg_pCallContext, arg_pParams) +#undef PRIVATE_FIELD + + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_VIRT_MEM_RANGE_NVOC_H_ diff --git a/src/nvidia/generated/g_virtual_mem_nvoc.c b/src/nvidia/generated/g_virtual_mem_nvoc.c new file mode 100644 index 000000000..e648f8c8f --- /dev/null +++ b/src/nvidia/generated/g_virtual_mem_nvoc.c @@ -0,0 +1,336 @@ +#define NVOC_VIRTUAL_MEM_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_virtual_mem_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x2aea5c = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VirtualMemory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_StandardMemory; + +void __nvoc_init_VirtualMemory(VirtualMemory*); +void __nvoc_init_funcTable_VirtualMemory(VirtualMemory*); +NV_STATUS __nvoc_ctor_VirtualMemory(VirtualMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_VirtualMemory(VirtualMemory*); +void __nvoc_dtor_VirtualMemory(VirtualMemory*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_VirtualMemory; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemory_VirtualMemory = { + /*pClassDef=*/ &__nvoc_class_def_VirtualMemory, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_VirtualMemory, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemory_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemory_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemory_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemory_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemory_Memory = { + /*pClassDef=*/ &__nvoc_class_def_Memory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemory, __nvoc_base_StandardMemory.__nvoc_base_Memory), +}; + +static const struct NVOC_RTTI __nvoc_rtti_VirtualMemory_StandardMemory = { + /*pClassDef=*/ &__nvoc_class_def_StandardMemory, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(VirtualMemory, __nvoc_base_StandardMemory), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_VirtualMemory = { + /*numRelatives=*/ 7, + /*relatives=*/ { + &__nvoc_rtti_VirtualMemory_VirtualMemory, + &__nvoc_rtti_VirtualMemory_StandardMemory, + &__nvoc_rtti_VirtualMemory_Memory, + &__nvoc_rtti_VirtualMemory_RmResource, + &__nvoc_rtti_VirtualMemory_RmResourceCommon, + &__nvoc_rtti_VirtualMemory_RsResource, + &__nvoc_rtti_VirtualMemory_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_VirtualMemory = +{ + /*classInfo=*/ { + /*size=*/ sizeof(VirtualMemory), + /*classId=*/ classId(VirtualMemory), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "VirtualMemory", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_VirtualMemory, + /*pCastInfo=*/ &__nvoc_castinfo_VirtualMemory, + /*pExportInfo=*/ &__nvoc_export_info_VirtualMemory +}; + +static NV_STATUS __nvoc_thunk_VirtualMemory_resMapTo(struct RsResource *pVirtualMemory, struct RS_RES_MAP_TO_PARAMS *pParams) { + return virtmemMapTo((struct VirtualMemory *)(((unsigned char *)pVirtualMemory) - __nvoc_rtti_VirtualMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_VirtualMemory_resUnmapFrom(struct RsResource *pVirtualMemory, struct RS_RES_UNMAP_FROM_PARAMS *pParams) { + return virtmemUnmapFrom((struct VirtualMemory *)(((unsigned char *)pVirtualMemory) - __nvoc_rtti_VirtualMemory_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_virtmemCheckMemInterUnmap(struct VirtualMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemory_Memory.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_Memory_virtmemControl(struct VirtualMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemory_Memory.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_virtmemUnmap(struct VirtualMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemory_Memory.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_Memory_virtmemGetMemInterMapParams(struct VirtualMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemory_Memory.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_Memory_virtmemGetMemoryMappingDescriptor(struct VirtualMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemory_Memory.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_Memory_virtmemGetMapAddrSpace(struct VirtualMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvBool __nvoc_thunk_RmResource_virtmemShareCallback(struct VirtualMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_RsResource_virtmemControlFilter(struct VirtualMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemory_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_virtmemAddAdditionalDependants(struct RsClient *pClient, struct VirtualMemory *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemory_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_virtmemGetRefCount(struct VirtualMemory *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemory_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_virtmemControl_Prologue(struct VirtualMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemory_RmResource.offset), pCallContext, pParams); +} + +static NvBool __nvoc_thunk_StandardMemory_virtmemCanCopy(struct VirtualMemory *pStandardMemory) { + return stdmemCanCopy((struct StandardMemory *)(((unsigned char *)pStandardMemory) + __nvoc_rtti_VirtualMemory_StandardMemory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_virtmemIsReady(struct VirtualMemory *pMemory) { + return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemory_Memory.offset)); +} + +static NV_STATUS __nvoc_thunk_Memory_virtmemCheckCopyPermissions(struct VirtualMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemory_Memory.offset), pDstGpu, hDstClientNvBool); +} + +static void __nvoc_thunk_RsResource_virtmemPreDestruct(struct VirtualMemory *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemory_RsResource.offset)); +} + +static void __nvoc_thunk_RmResource_virtmemControl_Epilogue(struct VirtualMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemory_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_virtmemControlLookup(struct VirtualMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemory_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_Memory_virtmemMap(struct VirtualMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_VirtualMemory_Memory.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_virtmemAccessCallback(struct VirtualMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_VirtualMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +const struct NVOC_EXPORT_INFO __nvoc_export_info_VirtualMemory = +{ + /*numEntries=*/ 0, + /*pExportEntries=*/ 0 +}; + +void __nvoc_dtor_StandardMemory(StandardMemory*); +void __nvoc_dtor_VirtualMemory(VirtualMemory *pThis) { + __nvoc_virtmemDestruct(pThis); + __nvoc_dtor_StandardMemory(&pThis->__nvoc_base_StandardMemory); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_VirtualMemory(VirtualMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); +} + +NV_STATUS __nvoc_ctor_StandardMemory(StandardMemory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_VirtualMemory(VirtualMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_StandardMemory(&pThis->__nvoc_base_StandardMemory, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_VirtualMemory_fail_StandardMemory; + __nvoc_init_dataField_VirtualMemory(pThis); + + status = __nvoc_virtmemConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_VirtualMemory_fail__init; + goto __nvoc_ctor_VirtualMemory_exit; // Success + +__nvoc_ctor_VirtualMemory_fail__init: + __nvoc_dtor_StandardMemory(&pThis->__nvoc_base_StandardMemory); +__nvoc_ctor_VirtualMemory_fail_StandardMemory: +__nvoc_ctor_VirtualMemory_exit: + + return status; +} + +static void __nvoc_init_funcTable_VirtualMemory_1(VirtualMemory *pThis) { + PORT_UNREFERENCED_VARIABLE(pThis); + + pThis->__virtmemMapTo__ = &virtmemMapTo_IMPL; + + pThis->__virtmemUnmapFrom__ = &virtmemUnmapFrom_IMPL; + + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resMapTo__ = &__nvoc_thunk_VirtualMemory_resMapTo; + + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmapFrom__ = &__nvoc_thunk_VirtualMemory_resUnmapFrom; + + pThis->__virtmemCheckMemInterUnmap__ = &__nvoc_thunk_Memory_virtmemCheckMemInterUnmap; + + pThis->__virtmemControl__ = &__nvoc_thunk_Memory_virtmemControl; + + pThis->__virtmemUnmap__ = &__nvoc_thunk_Memory_virtmemUnmap; + + pThis->__virtmemGetMemInterMapParams__ = &__nvoc_thunk_Memory_virtmemGetMemInterMapParams; + + pThis->__virtmemGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_virtmemGetMemoryMappingDescriptor; + + pThis->__virtmemGetMapAddrSpace__ = &__nvoc_thunk_Memory_virtmemGetMapAddrSpace; + + pThis->__virtmemShareCallback__ = &__nvoc_thunk_RmResource_virtmemShareCallback; + + pThis->__virtmemControlFilter__ = &__nvoc_thunk_RsResource_virtmemControlFilter; + + pThis->__virtmemAddAdditionalDependants__ = &__nvoc_thunk_RsResource_virtmemAddAdditionalDependants; + + pThis->__virtmemGetRefCount__ = &__nvoc_thunk_RsResource_virtmemGetRefCount; + + pThis->__virtmemControl_Prologue__ = &__nvoc_thunk_RmResource_virtmemControl_Prologue; + + pThis->__virtmemCanCopy__ = &__nvoc_thunk_StandardMemory_virtmemCanCopy; + + pThis->__virtmemIsReady__ = &__nvoc_thunk_Memory_virtmemIsReady; + + pThis->__virtmemCheckCopyPermissions__ = &__nvoc_thunk_Memory_virtmemCheckCopyPermissions; + + pThis->__virtmemPreDestruct__ = &__nvoc_thunk_RsResource_virtmemPreDestruct; + + pThis->__virtmemControl_Epilogue__ = &__nvoc_thunk_RmResource_virtmemControl_Epilogue; + + pThis->__virtmemControlLookup__ = &__nvoc_thunk_RsResource_virtmemControlLookup; + + pThis->__virtmemMap__ = &__nvoc_thunk_Memory_virtmemMap; + + pThis->__virtmemAccessCallback__ = &__nvoc_thunk_RmResource_virtmemAccessCallback; +} + +void __nvoc_init_funcTable_VirtualMemory(VirtualMemory *pThis) { + __nvoc_init_funcTable_VirtualMemory_1(pThis); +} + +void __nvoc_init_StandardMemory(StandardMemory*); +void __nvoc_init_VirtualMemory(VirtualMemory *pThis) { + pThis->__nvoc_pbase_VirtualMemory = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource; + pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory; + pThis->__nvoc_pbase_StandardMemory = &pThis->__nvoc_base_StandardMemory; + __nvoc_init_StandardMemory(&pThis->__nvoc_base_StandardMemory); + __nvoc_init_funcTable_VirtualMemory(pThis); +} + +NV_STATUS __nvoc_objCreate_VirtualMemory(VirtualMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + VirtualMemory *pThis; + + pThis = portMemAllocNonPaged(sizeof(VirtualMemory)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(VirtualMemory)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_VirtualMemory); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_StandardMemory.__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + __nvoc_init_VirtualMemory(pThis); + status = __nvoc_ctor_VirtualMemory(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_VirtualMemory_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_VirtualMemory_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_VirtualMemory(VirtualMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_VirtualMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_virtual_mem_nvoc.h b/src/nvidia/generated/g_virtual_mem_nvoc.h new file mode 100644 index 000000000..4f71a4759 --- /dev/null +++ b/src/nvidia/generated/g_virtual_mem_nvoc.h @@ -0,0 +1,269 @@ +#ifndef _G_VIRTUAL_MEM_NVOC_H_ +#define _G_VIRTUAL_MEM_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_virtual_mem_nvoc.h" + +#ifndef _VIRTUAL_MEMORY_H_ +#define _VIRTUAL_MEMORY_H_ + +#include "mem_mgr/standard_mem.h" +#include "gpu/mem_mgr/heap_base.h" + +/*! + * Allocator for NV50_MEMORY_VIRTUAL class. + * + * Describes a range of typed virtual memory. Used as + * a target space for RmMapMemoryDma. + * + * The NV01_MEMORY_VIRTUAL subclass class is typeless. + */ +#ifdef NVOC_VIRTUAL_MEM_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct VirtualMemory { + const struct NVOC_RTTI *__nvoc_rtti; + struct StandardMemory __nvoc_base_StandardMemory; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct Memory *__nvoc_pbase_Memory; + struct StandardMemory *__nvoc_pbase_StandardMemory; + struct VirtualMemory *__nvoc_pbase_VirtualMemory; + NV_STATUS (*__virtmemMapTo__)(struct VirtualMemory *, struct RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__virtmemUnmapFrom__)(struct VirtualMemory *, struct RS_RES_UNMAP_FROM_PARAMS *); + NV_STATUS (*__virtmemCheckMemInterUnmap__)(struct VirtualMemory *, NvBool); + NV_STATUS (*__virtmemControl__)(struct VirtualMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__virtmemUnmap__)(struct VirtualMemory *, CALL_CONTEXT *, RsCpuMapping *); + NV_STATUS (*__virtmemGetMemInterMapParams__)(struct VirtualMemory *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__virtmemGetMemoryMappingDescriptor__)(struct VirtualMemory *, MEMORY_DESCRIPTOR **); + NV_STATUS (*__virtmemGetMapAddrSpace__)(struct VirtualMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvBool (*__virtmemShareCallback__)(struct VirtualMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__virtmemControlFilter__)(struct VirtualMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__virtmemAddAdditionalDependants__)(struct RsClient *, struct VirtualMemory *, RsResourceRef *); + NvU32 (*__virtmemGetRefCount__)(struct VirtualMemory *); + NV_STATUS (*__virtmemControl_Prologue__)(struct VirtualMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NvBool (*__virtmemCanCopy__)(struct VirtualMemory *); + NV_STATUS (*__virtmemIsReady__)(struct VirtualMemory *); + NV_STATUS (*__virtmemCheckCopyPermissions__)(struct VirtualMemory *, struct OBJGPU *, NvHandle); + void (*__virtmemPreDestruct__)(struct VirtualMemory *); + void (*__virtmemControl_Epilogue__)(struct VirtualMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__virtmemControlLookup__)(struct VirtualMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__virtmemMap__)(struct VirtualMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *); + NvBool (*__virtmemAccessCallback__)(struct VirtualMemory *, struct RsClient *, void *, RsAccessRight); + NvHandle hVASpace; + NvBool bAllowUnicastMapping; + NvBool bReserveVaOnAlloc; + NvBool bFlaVAS; + NvBool bRpcAlloc; + NODE *pDmaMappingList; + NvBool bOptimizePageTableMempoolUsage; +}; + +#ifndef __NVOC_CLASS_VirtualMemory_TYPEDEF__ +#define __NVOC_CLASS_VirtualMemory_TYPEDEF__ +typedef struct VirtualMemory VirtualMemory; +#endif /* __NVOC_CLASS_VirtualMemory_TYPEDEF__ */ + +#ifndef __nvoc_class_id_VirtualMemory +#define __nvoc_class_id_VirtualMemory 0x2aea5c +#endif /* __nvoc_class_id_VirtualMemory */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_VirtualMemory; + +#define __staticCast_VirtualMemory(pThis) \ + ((pThis)->__nvoc_pbase_VirtualMemory) + +#ifdef __nvoc_virtual_mem_h_disabled +#define __dynamicCast_VirtualMemory(pThis) ((VirtualMemory*)NULL) +#else //__nvoc_virtual_mem_h_disabled +#define __dynamicCast_VirtualMemory(pThis) \ + ((VirtualMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(VirtualMemory))) +#endif //__nvoc_virtual_mem_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_VirtualMemory(VirtualMemory**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_VirtualMemory(VirtualMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_VirtualMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_VirtualMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define virtmemMapTo(pVirtualMemory, pParams) virtmemMapTo_DISPATCH(pVirtualMemory, pParams) +#define virtmemUnmapFrom(pVirtualMemory, pParams) virtmemUnmapFrom_DISPATCH(pVirtualMemory, pParams) +#define virtmemCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) virtmemCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided) +#define virtmemControl(pMemory, pCallContext, pParams) virtmemControl_DISPATCH(pMemory, pCallContext, pParams) +#define virtmemUnmap(pMemory, pCallContext, pCpuMapping) virtmemUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping) +#define virtmemGetMemInterMapParams(pMemory, pParams) virtmemGetMemInterMapParams_DISPATCH(pMemory, pParams) +#define virtmemGetMemoryMappingDescriptor(pMemory, ppMemDesc) virtmemGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc) +#define virtmemGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) virtmemGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace) +#define virtmemShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) virtmemShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy) +#define virtmemControlFilter(pResource, pCallContext, pParams) virtmemControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define virtmemAddAdditionalDependants(pClient, pResource, pReference) virtmemAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define virtmemGetRefCount(pResource) virtmemGetRefCount_DISPATCH(pResource) +#define virtmemControl_Prologue(pResource, pCallContext, pParams) virtmemControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define virtmemCanCopy(pStandardMemory) virtmemCanCopy_DISPATCH(pStandardMemory) +#define virtmemIsReady(pMemory) virtmemIsReady_DISPATCH(pMemory) +#define virtmemCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) virtmemCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool) +#define virtmemPreDestruct(pResource) virtmemPreDestruct_DISPATCH(pResource) +#define virtmemControl_Epilogue(pResource, pCallContext, pParams) virtmemControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define virtmemControlLookup(pResource, pParams, ppEntry) virtmemControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define virtmemMap(pMemory, pCallContext, pParams, pCpuMapping) virtmemMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping) +#define virtmemAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) virtmemAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +NV_STATUS virtmemMapTo_IMPL(struct VirtualMemory *pVirtualMemory, struct RS_RES_MAP_TO_PARAMS *pParams); + +static inline NV_STATUS virtmemMapTo_DISPATCH(struct VirtualMemory *pVirtualMemory, struct RS_RES_MAP_TO_PARAMS *pParams) { + return pVirtualMemory->__virtmemMapTo__(pVirtualMemory, pParams); +} + +NV_STATUS virtmemUnmapFrom_IMPL(struct VirtualMemory *pVirtualMemory, struct RS_RES_UNMAP_FROM_PARAMS *pParams); + +static inline NV_STATUS virtmemUnmapFrom_DISPATCH(struct VirtualMemory *pVirtualMemory, struct RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pVirtualMemory->__virtmemUnmapFrom__(pVirtualMemory, pParams); +} + +static inline NV_STATUS virtmemCheckMemInterUnmap_DISPATCH(struct VirtualMemory *pMemory, NvBool bSubdeviceHandleProvided) { + return pMemory->__virtmemCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided); +} + +static inline NV_STATUS virtmemControl_DISPATCH(struct VirtualMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pMemory->__virtmemControl__(pMemory, pCallContext, pParams); +} + +static inline NV_STATUS virtmemUnmap_DISPATCH(struct VirtualMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) { + return pMemory->__virtmemUnmap__(pMemory, pCallContext, pCpuMapping); +} + +static inline NV_STATUS virtmemGetMemInterMapParams_DISPATCH(struct VirtualMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pMemory->__virtmemGetMemInterMapParams__(pMemory, pParams); +} + +static inline NV_STATUS virtmemGetMemoryMappingDescriptor_DISPATCH(struct VirtualMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) { + return pMemory->__virtmemGetMemoryMappingDescriptor__(pMemory, ppMemDesc); +} + +static inline NV_STATUS virtmemGetMapAddrSpace_DISPATCH(struct VirtualMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pMemory->__virtmemGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvBool virtmemShareCallback_DISPATCH(struct VirtualMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pResource->__virtmemShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS virtmemControlFilter_DISPATCH(struct VirtualMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__virtmemControlFilter__(pResource, pCallContext, pParams); +} + +static inline void virtmemAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct VirtualMemory *pResource, RsResourceRef *pReference) { + pResource->__virtmemAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 virtmemGetRefCount_DISPATCH(struct VirtualMemory *pResource) { + return pResource->__virtmemGetRefCount__(pResource); +} + +static inline NV_STATUS virtmemControl_Prologue_DISPATCH(struct VirtualMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__virtmemControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NvBool virtmemCanCopy_DISPATCH(struct VirtualMemory *pStandardMemory) { + return pStandardMemory->__virtmemCanCopy__(pStandardMemory); +} + +static inline NV_STATUS virtmemIsReady_DISPATCH(struct VirtualMemory *pMemory) { + return pMemory->__virtmemIsReady__(pMemory); +} + +static inline NV_STATUS virtmemCheckCopyPermissions_DISPATCH(struct VirtualMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) { + return pMemory->__virtmemCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool); +} + +static inline void virtmemPreDestruct_DISPATCH(struct VirtualMemory *pResource) { + pResource->__virtmemPreDestruct__(pResource); +} + +static inline void virtmemControl_Epilogue_DISPATCH(struct VirtualMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__virtmemControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS virtmemControlLookup_DISPATCH(struct VirtualMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__virtmemControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS virtmemMap_DISPATCH(struct VirtualMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) { + return pMemory->__virtmemMap__(pMemory, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool virtmemAccessCallback_DISPATCH(struct VirtualMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__virtmemAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +NV_STATUS virtmemConstruct_IMPL(struct VirtualMemory *arg_pVirtualMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams); +#define __nvoc_virtmemConstruct(arg_pVirtualMemory, arg_pCallContext, arg_pParams) virtmemConstruct_IMPL(arg_pVirtualMemory, arg_pCallContext, arg_pParams) +void virtmemDestruct_IMPL(struct VirtualMemory *pVirtualMemory); +#define __nvoc_virtmemDestruct(pVirtualMemory) virtmemDestruct_IMPL(pVirtualMemory) +NV_STATUS virtmemReserveMempool_IMPL(struct VirtualMemory *pVirtualMemory, struct OBJGPU *arg0, NvHandle hDevice, NvU64 size, NvU32 pageSizeMask); +#ifdef __nvoc_virtual_mem_h_disabled +static inline NV_STATUS virtmemReserveMempool(struct VirtualMemory *pVirtualMemory, struct OBJGPU *arg0, NvHandle hDevice, NvU64 size, NvU32 pageSizeMask) { + NV_ASSERT_FAILED_PRECOMP("VirtualMemory was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_virtual_mem_h_disabled +#define virtmemReserveMempool(pVirtualMemory, arg0, hDevice, size, pageSizeMask) virtmemReserveMempool_IMPL(pVirtualMemory, arg0, hDevice, size, pageSizeMask) +#endif //__nvoc_virtual_mem_h_disabled + +NvBool virtmemMatchesVASpace_IMPL(struct VirtualMemory *pVirtualMemory, NvHandle hClient, NvHandle hVASpace); +#ifdef __nvoc_virtual_mem_h_disabled +static inline NvBool virtmemMatchesVASpace(struct VirtualMemory *pVirtualMemory, NvHandle hClient, NvHandle hVASpace) { + NV_ASSERT_FAILED_PRECOMP("VirtualMemory was disabled!"); + return NV_FALSE; +} +#else //__nvoc_virtual_mem_h_disabled +#define virtmemMatchesVASpace(pVirtualMemory, hClient, hVASpace) virtmemMatchesVASpace_IMPL(pVirtualMemory, hClient, hVASpace) +#endif //__nvoc_virtual_mem_h_disabled + +NV_STATUS virtmemGetByHandleAndDevice_IMPL(struct RsClient *pClient, NvHandle hMemory, NvHandle hDevice, struct VirtualMemory **ppVirtualMemory); +#define virtmemGetByHandleAndDevice(pClient, hMemory, hDevice, ppVirtualMemory) virtmemGetByHandleAndDevice_IMPL(pClient, hMemory, hDevice, ppVirtualMemory) +void virtmemGetAddressAndSize_IMPL(struct VirtualMemory *arg0, NvU64 *pVAddr, NvU64 *pSize); +#define virtmemGetAddressAndSize(arg0, pVAddr, pSize) virtmemGetAddressAndSize_IMPL(arg0, pVAddr, pSize) +#undef PRIVATE_FIELD + + +NV_STATUS virtmemAllocResources(OBJGPU *pGpu, struct MemoryManager *pMemoryManager, MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo); + +#endif + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_VIRTUAL_MEM_NVOC_H_ diff --git a/src/nvidia/generated/g_zbc_api_nvoc.c b/src/nvidia/generated/g_zbc_api_nvoc.c new file mode 100644 index 000000000..486cdd9bf --- /dev/null +++ b/src/nvidia/generated/g_zbc_api_nvoc.c @@ -0,0 +1,485 @@ +#define NVOC_ZBC_API_H_PRIVATE_ACCESS_ALLOWED +#include "nvoc/runtime.h" +#include "nvoc/rtti.h" +#include "nvtypes.h" +#include "nvport/nvport.h" +#include "nvport/inline/util_valist.h" +#include "utils/nvassert.h" +#include "g_zbc_api_nvoc.h" + +#ifdef DEBUG +char __nvoc_class_id_uniqueness_check_0x397ee3 = 1; +#endif + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ZbcApi; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource; + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource; + +void __nvoc_init_ZbcApi(ZbcApi*, RmHalspecOwner* ); +void __nvoc_init_funcTable_ZbcApi(ZbcApi*, RmHalspecOwner* ); +NV_STATUS __nvoc_ctor_ZbcApi(ZbcApi*, RmHalspecOwner* , struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +void __nvoc_init_dataField_ZbcApi(ZbcApi*, RmHalspecOwner* ); +void __nvoc_dtor_ZbcApi(ZbcApi*); +extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ZbcApi; + +static const struct NVOC_RTTI __nvoc_rtti_ZbcApi_ZbcApi = { + /*pClassDef=*/ &__nvoc_class_def_ZbcApi, + /*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ZbcApi, + /*offset=*/ 0, +}; + +static const struct NVOC_RTTI __nvoc_rtti_ZbcApi_Object = { + /*pClassDef=*/ &__nvoc_class_def_Object, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ZbcApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ZbcApi_RsResource = { + /*pClassDef=*/ &__nvoc_class_def_RsResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ZbcApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ZbcApi_RmResourceCommon = { + /*pClassDef=*/ &__nvoc_class_def_RmResourceCommon, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ZbcApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ZbcApi_RmResource = { + /*pClassDef=*/ &__nvoc_class_def_RmResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ZbcApi, __nvoc_base_GpuResource.__nvoc_base_RmResource), +}; + +static const struct NVOC_RTTI __nvoc_rtti_ZbcApi_GpuResource = { + /*pClassDef=*/ &__nvoc_class_def_GpuResource, + /*dtor=*/ &__nvoc_destructFromBase, + /*offset=*/ NV_OFFSETOF(ZbcApi, __nvoc_base_GpuResource), +}; + +static const struct NVOC_CASTINFO __nvoc_castinfo_ZbcApi = { + /*numRelatives=*/ 6, + /*relatives=*/ { + &__nvoc_rtti_ZbcApi_ZbcApi, + &__nvoc_rtti_ZbcApi_GpuResource, + &__nvoc_rtti_ZbcApi_RmResource, + &__nvoc_rtti_ZbcApi_RmResourceCommon, + &__nvoc_rtti_ZbcApi_RsResource, + &__nvoc_rtti_ZbcApi_Object, + }, +}; + +const struct NVOC_CLASS_DEF __nvoc_class_def_ZbcApi = +{ + /*classInfo=*/ { + /*size=*/ sizeof(ZbcApi), + /*classId=*/ classId(ZbcApi), + /*providerId=*/ &__nvoc_rtti_provider, +#if NV_PRINTF_STRINGS_ALLOWED + /*name=*/ "ZbcApi", +#endif + }, + /*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ZbcApi, + /*pCastInfo=*/ &__nvoc_castinfo_ZbcApi, + /*pExportInfo=*/ &__nvoc_export_info_ZbcApi +}; + +static NvBool __nvoc_thunk_GpuResource_zbcapiShareCallback(struct ZbcApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ZbcApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy); +} + +static NV_STATUS __nvoc_thunk_GpuResource_zbcapiControl(struct ZbcApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ZbcApi_GpuResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_zbcapiUnmap(struct ZbcApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ZbcApi_GpuResource.offset), pCallContext, pCpuMapping); +} + +static NV_STATUS __nvoc_thunk_RmResource_zbcapiGetMemInterMapParams(struct ZbcApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ZbcApi_RmResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_zbcapiGetMemoryMappingDescriptor(struct ZbcApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ZbcApi_RmResource.offset), ppMemDesc); +} + +static NV_STATUS __nvoc_thunk_GpuResource_zbcapiGetMapAddrSpace(struct ZbcApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ZbcApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace); +} + +static NvHandle __nvoc_thunk_GpuResource_zbcapiGetInternalObjectHandle(struct ZbcApi *pGpuResource) { + return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ZbcApi_GpuResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_zbcapiControlFilter(struct ZbcApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RsResource.offset), pCallContext, pParams); +} + +static void __nvoc_thunk_RsResource_zbcapiAddAdditionalDependants(struct RsClient *pClient, struct ZbcApi *pResource, RsResourceRef *pReference) { + resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RsResource.offset), pReference); +} + +static NvU32 __nvoc_thunk_RsResource_zbcapiGetRefCount(struct ZbcApi *pResource) { + return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RmResource_zbcapiCheckMemInterUnmap(struct ZbcApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ZbcApi_RmResource.offset), bSubdeviceHandleProvided); +} + +static NV_STATUS __nvoc_thunk_RsResource_zbcapiMapTo(struct ZbcApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RsResource.offset), pParams); +} + +static NV_STATUS __nvoc_thunk_RmResource_zbcapiControl_Prologue(struct ZbcApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_GpuResource_zbcapiGetRegBaseOffsetAndSize(struct ZbcApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ZbcApi_GpuResource.offset), pGpu, pOffset, pSize); +} + +static NvBool __nvoc_thunk_RsResource_zbcapiCanCopy(struct ZbcApi *pResource) { + return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_GpuResource_zbcapiInternalControlForward(struct ZbcApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ZbcApi_GpuResource.offset), command, pParams, size); +} + +static void __nvoc_thunk_RsResource_zbcapiPreDestruct(struct ZbcApi *pResource) { + resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RsResource.offset)); +} + +static NV_STATUS __nvoc_thunk_RsResource_zbcapiUnmapFrom(struct ZbcApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RsResource.offset), pParams); +} + +static void __nvoc_thunk_RmResource_zbcapiControl_Epilogue(struct ZbcApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RmResource.offset), pCallContext, pParams); +} + +static NV_STATUS __nvoc_thunk_RsResource_zbcapiControlLookup(struct ZbcApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RsResource.offset), pParams, ppEntry); +} + +static NV_STATUS __nvoc_thunk_GpuResource_zbcapiMap(struct ZbcApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_ZbcApi_GpuResource.offset), pCallContext, pParams, pCpuMapping); +} + +static NvBool __nvoc_thunk_RmResource_zbcapiAccessCallback(struct ZbcApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ZbcApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight); +} + +#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG) +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0) +#endif + +static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ZbcApi[] = +{ + { /* [0] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) zbcapiCtrlCmdSetZbcColorClear_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90960101u, + /*paramSize=*/ sizeof(NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ZbcApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "zbcapiCtrlCmdSetZbcColorClear" +#endif + }, + { /* [1] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) zbcapiCtrlCmdSetZbcDepthClear_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90960102u, + /*paramSize=*/ sizeof(NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ZbcApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "zbcapiCtrlCmdSetZbcDepthClear" +#endif + }, + { /* [2] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) zbcapiCtrlCmdGetZbcClearTable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90960103u, + /*paramSize=*/ sizeof(NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ZbcApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "zbcapiCtrlCmdGetZbcClearTable" +#endif + }, + { /* [3] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) zbcapiCtrlCmdSetZbcClearTable_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90960104u, + /*paramSize=*/ sizeof(NV9096_CTRL_SET_ZBC_CLEAR_TABLE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ZbcApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "zbcapiCtrlCmdSetZbcClearTable" +#endif + }, + { /* [4] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) zbcapiCtrlCmdSetZbcStencilClear_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90960105u, + /*paramSize=*/ sizeof(NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ZbcApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "zbcapiCtrlCmdSetZbcStencilClear" +#endif + }, + { /* [5] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) zbcapiCtrlCmdGetZbcClearTableSize_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + /*flags=*/ 0x210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90960106u, + /*paramSize=*/ sizeof(NV9096_CTRL_GET_ZBC_CLEAR_TABLE_SIZE_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ZbcApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "zbcapiCtrlCmdGetZbcClearTableSize" +#endif + }, + { /* [6] */ +#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*pFunc=*/ (void (*)(void)) NULL, +#else + /*pFunc=*/ (void (*)(void)) zbcapiCtrlCmdGetZbcClearTableEntry_IMPL, +#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + /*flags=*/ 0x2210u, + /*accessRight=*/0x0u, + /*methodId=*/ 0x90960107u, + /*paramSize=*/ sizeof(NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS), + /*pClassInfo=*/ &(__nvoc_class_def_ZbcApi.classInfo), +#if NV_PRINTF_STRINGS_ALLOWED + /*func=*/ "zbcapiCtrlCmdGetZbcClearTableEntry" +#endif + }, + +}; + +const struct NVOC_EXPORT_INFO __nvoc_export_info_ZbcApi = +{ + /*numEntries=*/ 7, + /*pExportEntries=*/ __nvoc_exported_method_def_ZbcApi +}; + +void __nvoc_dtor_GpuResource(GpuResource*); +void __nvoc_dtor_ZbcApi(ZbcApi *pThis) { + __nvoc_zbcapiDestruct(pThis); + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); + PORT_UNREFERENCED_VARIABLE(pThis); +} + +void __nvoc_init_dataField_ZbcApi(ZbcApi *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); +} + +NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *); +NV_STATUS __nvoc_ctor_ZbcApi(ZbcApi *pThis, RmHalspecOwner *pRmhalspecowner, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status = NV_OK; + status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ZbcApi_fail_GpuResource; + __nvoc_init_dataField_ZbcApi(pThis, pRmhalspecowner); + + status = __nvoc_zbcapiConstruct(pThis, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_ctor_ZbcApi_fail__init; + goto __nvoc_ctor_ZbcApi_exit; // Success + +__nvoc_ctor_ZbcApi_fail__init: + __nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource); +__nvoc_ctor_ZbcApi_fail_GpuResource: +__nvoc_ctor_ZbcApi_exit: + + return status; +} + +static void __nvoc_init_funcTable_ZbcApi_1(ZbcApi *pThis, RmHalspecOwner *pRmhalspecowner) { + RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal; + const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx; + PORT_UNREFERENCED_VARIABLE(pThis); + PORT_UNREFERENCED_VARIABLE(pRmhalspecowner); + PORT_UNREFERENCED_VARIABLE(rmVariantHal); + PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx); + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__zbcapiCtrlCmdSetZbcColorClear__ = &zbcapiCtrlCmdSetZbcColorClear_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__zbcapiCtrlCmdSetZbcDepthClear__ = &zbcapiCtrlCmdSetZbcDepthClear_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__zbcapiCtrlCmdGetZbcClearTable__ = &zbcapiCtrlCmdGetZbcClearTable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__zbcapiCtrlCmdSetZbcClearTable__ = &zbcapiCtrlCmdSetZbcClearTable_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__zbcapiCtrlCmdSetZbcStencilClear__ = &zbcapiCtrlCmdSetZbcStencilClear_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x210u) + pThis->__zbcapiCtrlCmdGetZbcClearTableSize__ = &zbcapiCtrlCmdGetZbcClearTableSize_IMPL; +#endif + +#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x2210u) + pThis->__zbcapiCtrlCmdGetZbcClearTableEntry__ = &zbcapiCtrlCmdGetZbcClearTableEntry_IMPL; +#endif + + pThis->__zbcapiShareCallback__ = &__nvoc_thunk_GpuResource_zbcapiShareCallback; + + pThis->__zbcapiControl__ = &__nvoc_thunk_GpuResource_zbcapiControl; + + pThis->__zbcapiUnmap__ = &__nvoc_thunk_GpuResource_zbcapiUnmap; + + pThis->__zbcapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_zbcapiGetMemInterMapParams; + + pThis->__zbcapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_zbcapiGetMemoryMappingDescriptor; + + pThis->__zbcapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_zbcapiGetMapAddrSpace; + + pThis->__zbcapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_zbcapiGetInternalObjectHandle; + + pThis->__zbcapiControlFilter__ = &__nvoc_thunk_RsResource_zbcapiControlFilter; + + pThis->__zbcapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_zbcapiAddAdditionalDependants; + + pThis->__zbcapiGetRefCount__ = &__nvoc_thunk_RsResource_zbcapiGetRefCount; + + pThis->__zbcapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_zbcapiCheckMemInterUnmap; + + pThis->__zbcapiMapTo__ = &__nvoc_thunk_RsResource_zbcapiMapTo; + + pThis->__zbcapiControl_Prologue__ = &__nvoc_thunk_RmResource_zbcapiControl_Prologue; + + pThis->__zbcapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_zbcapiGetRegBaseOffsetAndSize; + + pThis->__zbcapiCanCopy__ = &__nvoc_thunk_RsResource_zbcapiCanCopy; + + pThis->__zbcapiInternalControlForward__ = &__nvoc_thunk_GpuResource_zbcapiInternalControlForward; + + pThis->__zbcapiPreDestruct__ = &__nvoc_thunk_RsResource_zbcapiPreDestruct; + + pThis->__zbcapiUnmapFrom__ = &__nvoc_thunk_RsResource_zbcapiUnmapFrom; + + pThis->__zbcapiControl_Epilogue__ = &__nvoc_thunk_RmResource_zbcapiControl_Epilogue; + + pThis->__zbcapiControlLookup__ = &__nvoc_thunk_RsResource_zbcapiControlLookup; + + pThis->__zbcapiMap__ = &__nvoc_thunk_GpuResource_zbcapiMap; + + pThis->__zbcapiAccessCallback__ = &__nvoc_thunk_RmResource_zbcapiAccessCallback; +} + +void __nvoc_init_funcTable_ZbcApi(ZbcApi *pThis, RmHalspecOwner *pRmhalspecowner) { + __nvoc_init_funcTable_ZbcApi_1(pThis, pRmhalspecowner); +} + +void __nvoc_init_GpuResource(GpuResource*); +void __nvoc_init_ZbcApi(ZbcApi *pThis, RmHalspecOwner *pRmhalspecowner) { + pThis->__nvoc_pbase_ZbcApi = pThis; + pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object; + pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource; + pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon; + pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource; + pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource; + __nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource); + __nvoc_init_funcTable_ZbcApi(pThis, pRmhalspecowner); +} + +NV_STATUS __nvoc_objCreate_ZbcApi(ZbcApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) { + NV_STATUS status; + Object *pParentObj; + ZbcApi *pThis; + RmHalspecOwner *pRmhalspecowner; + + pThis = portMemAllocNonPaged(sizeof(ZbcApi)); + if (pThis == NULL) return NV_ERR_NO_MEMORY; + + portMemSet(pThis, 0, sizeof(ZbcApi)); + + __nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ZbcApi); + + if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY)) + { + pParentObj = dynamicCast(pParent, Object); + objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object); + } + else + { + pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL; + } + + if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL) + pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent); + NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT); + + __nvoc_init_ZbcApi(pThis, pRmhalspecowner); + status = __nvoc_ctor_ZbcApi(pThis, pRmhalspecowner, arg_pCallContext, arg_pParams); + if (status != NV_OK) goto __nvoc_objCreate_ZbcApi_cleanup; + + *ppThis = pThis; + return NV_OK; + +__nvoc_objCreate_ZbcApi_cleanup: + // do not call destructors here since the constructor already called them + portMemFree(pThis); + return status; +} + +NV_STATUS __nvoc_objCreateDynamic_ZbcApi(ZbcApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) { + NV_STATUS status; + struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *); + struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *); + + status = __nvoc_objCreate_ZbcApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams); + + return status; +} + diff --git a/src/nvidia/generated/g_zbc_api_nvoc.h b/src/nvidia/generated/g_zbc_api_nvoc.h new file mode 100644 index 000000000..ddfd0084d --- /dev/null +++ b/src/nvidia/generated/g_zbc_api_nvoc.h @@ -0,0 +1,310 @@ +#ifndef _G_ZBC_API_NVOC_H_ +#define _G_ZBC_API_NVOC_H_ +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_zbc_api_nvoc.h" + +#ifndef _ZBCAPI_H_ +#define _ZBCAPI_H_ + +#include "gpu/gpu_resource.h" +#include "gpu/gpu_halspec.h" +#include "rmapi/control.h" +#include "ctrl/ctrl9096.h" + +/*! + * RM internal class representing GF100_ZBC_CLEAR + */ +#ifdef NVOC_ZBC_API_H_PRIVATE_ACCESS_ALLOWED +#define PRIVATE_FIELD(x) x +#else +#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x) +#endif +struct ZbcApi { + const struct NVOC_RTTI *__nvoc_rtti; + struct GpuResource __nvoc_base_GpuResource; + struct Object *__nvoc_pbase_Object; + struct RsResource *__nvoc_pbase_RsResource; + struct RmResourceCommon *__nvoc_pbase_RmResourceCommon; + struct RmResource *__nvoc_pbase_RmResource; + struct GpuResource *__nvoc_pbase_GpuResource; + struct ZbcApi *__nvoc_pbase_ZbcApi; + NV_STATUS (*__zbcapiCtrlCmdSetZbcColorClear__)(struct ZbcApi *, NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS *); + NV_STATUS (*__zbcapiCtrlCmdSetZbcDepthClear__)(struct ZbcApi *, NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS *); + NV_STATUS (*__zbcapiCtrlCmdGetZbcClearTable__)(struct ZbcApi *, NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS *); + NV_STATUS (*__zbcapiCtrlCmdSetZbcClearTable__)(struct ZbcApi *, NV9096_CTRL_SET_ZBC_CLEAR_TABLE_PARAMS *); + NV_STATUS (*__zbcapiCtrlCmdSetZbcStencilClear__)(struct ZbcApi *, NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS *); + NV_STATUS (*__zbcapiCtrlCmdGetZbcClearTableSize__)(struct ZbcApi *, NV9096_CTRL_GET_ZBC_CLEAR_TABLE_SIZE_PARAMS *); + NV_STATUS (*__zbcapiCtrlCmdGetZbcClearTableEntry__)(struct ZbcApi *, NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS *); + NvBool (*__zbcapiShareCallback__)(struct ZbcApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *); + NV_STATUS (*__zbcapiControl__)(struct ZbcApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__zbcapiUnmap__)(struct ZbcApi *, struct CALL_CONTEXT *, struct RsCpuMapping *); + NV_STATUS (*__zbcapiGetMemInterMapParams__)(struct ZbcApi *, RMRES_MEM_INTER_MAP_PARAMS *); + NV_STATUS (*__zbcapiGetMemoryMappingDescriptor__)(struct ZbcApi *, struct MEMORY_DESCRIPTOR **); + NV_STATUS (*__zbcapiGetMapAddrSpace__)(struct ZbcApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *); + NvHandle (*__zbcapiGetInternalObjectHandle__)(struct ZbcApi *); + NV_STATUS (*__zbcapiControlFilter__)(struct ZbcApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + void (*__zbcapiAddAdditionalDependants__)(struct RsClient *, struct ZbcApi *, RsResourceRef *); + NvU32 (*__zbcapiGetRefCount__)(struct ZbcApi *); + NV_STATUS (*__zbcapiCheckMemInterUnmap__)(struct ZbcApi *, NvBool); + NV_STATUS (*__zbcapiMapTo__)(struct ZbcApi *, RS_RES_MAP_TO_PARAMS *); + NV_STATUS (*__zbcapiControl_Prologue__)(struct ZbcApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__zbcapiGetRegBaseOffsetAndSize__)(struct ZbcApi *, struct OBJGPU *, NvU32 *, NvU32 *); + NvBool (*__zbcapiCanCopy__)(struct ZbcApi *); + NV_STATUS (*__zbcapiInternalControlForward__)(struct ZbcApi *, NvU32, void *, NvU32); + void (*__zbcapiPreDestruct__)(struct ZbcApi *); + NV_STATUS (*__zbcapiUnmapFrom__)(struct ZbcApi *, RS_RES_UNMAP_FROM_PARAMS *); + void (*__zbcapiControl_Epilogue__)(struct ZbcApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *); + NV_STATUS (*__zbcapiControlLookup__)(struct ZbcApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **); + NV_STATUS (*__zbcapiMap__)(struct ZbcApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *); + NvBool (*__zbcapiAccessCallback__)(struct ZbcApi *, struct RsClient *, void *, RsAccessRight); + NvBool bZbcUsed; +}; + +#ifndef __NVOC_CLASS_ZbcApi_TYPEDEF__ +#define __NVOC_CLASS_ZbcApi_TYPEDEF__ +typedef struct ZbcApi ZbcApi; +#endif /* __NVOC_CLASS_ZbcApi_TYPEDEF__ */ + +#ifndef __nvoc_class_id_ZbcApi +#define __nvoc_class_id_ZbcApi 0x397ee3 +#endif /* __nvoc_class_id_ZbcApi */ + +extern const struct NVOC_CLASS_DEF __nvoc_class_def_ZbcApi; + +#define __staticCast_ZbcApi(pThis) \ + ((pThis)->__nvoc_pbase_ZbcApi) + +#ifdef __nvoc_zbc_api_h_disabled +#define __dynamicCast_ZbcApi(pThis) ((ZbcApi*)NULL) +#else //__nvoc_zbc_api_h_disabled +#define __dynamicCast_ZbcApi(pThis) \ + ((ZbcApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ZbcApi))) +#endif //__nvoc_zbc_api_h_disabled + + +NV_STATUS __nvoc_objCreateDynamic_ZbcApi(ZbcApi**, Dynamic*, NvU32, va_list); + +NV_STATUS __nvoc_objCreate_ZbcApi(ZbcApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams); +#define __objCreate_ZbcApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \ + __nvoc_objCreate_ZbcApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams) + +#define zbcapiCtrlCmdSetZbcColorClear(pZbcApi, pSetZBCClearParams) zbcapiCtrlCmdSetZbcColorClear_DISPATCH(pZbcApi, pSetZBCClearParams) +#define zbcapiCtrlCmdSetZbcDepthClear(pZbcApi, pSetZBCClearParams) zbcapiCtrlCmdSetZbcDepthClear_DISPATCH(pZbcApi, pSetZBCClearParams) +#define zbcapiCtrlCmdGetZbcClearTable(pZbcApi, pGetZBCClearTableParams) zbcapiCtrlCmdGetZbcClearTable_DISPATCH(pZbcApi, pGetZBCClearTableParams) +#define zbcapiCtrlCmdSetZbcClearTable(pZbcApi, pSetZBCClearTableParams) zbcapiCtrlCmdSetZbcClearTable_DISPATCH(pZbcApi, pSetZBCClearTableParams) +#define zbcapiCtrlCmdSetZbcStencilClear(pZbcApi, pSetZBCClearParams) zbcapiCtrlCmdSetZbcStencilClear_DISPATCH(pZbcApi, pSetZBCClearParams) +#define zbcapiCtrlCmdGetZbcClearTableSize(pZbcApi, pGetZBCClearTableSizeParams) zbcapiCtrlCmdGetZbcClearTableSize_DISPATCH(pZbcApi, pGetZBCClearTableSizeParams) +#define zbcapiCtrlCmdGetZbcClearTableEntry(pZbcApi, pGetZBCClearTableEntryParams) zbcapiCtrlCmdGetZbcClearTableEntry_DISPATCH(pZbcApi, pGetZBCClearTableEntryParams) +#define zbcapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) zbcapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) +#define zbcapiControl(pGpuResource, pCallContext, pParams) zbcapiControl_DISPATCH(pGpuResource, pCallContext, pParams) +#define zbcapiUnmap(pGpuResource, pCallContext, pCpuMapping) zbcapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping) +#define zbcapiGetMemInterMapParams(pRmResource, pParams) zbcapiGetMemInterMapParams_DISPATCH(pRmResource, pParams) +#define zbcapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) zbcapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc) +#define zbcapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) zbcapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace) +#define zbcapiGetInternalObjectHandle(pGpuResource) zbcapiGetInternalObjectHandle_DISPATCH(pGpuResource) +#define zbcapiControlFilter(pResource, pCallContext, pParams) zbcapiControlFilter_DISPATCH(pResource, pCallContext, pParams) +#define zbcapiAddAdditionalDependants(pClient, pResource, pReference) zbcapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference) +#define zbcapiGetRefCount(pResource) zbcapiGetRefCount_DISPATCH(pResource) +#define zbcapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) zbcapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided) +#define zbcapiMapTo(pResource, pParams) zbcapiMapTo_DISPATCH(pResource, pParams) +#define zbcapiControl_Prologue(pResource, pCallContext, pParams) zbcapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams) +#define zbcapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) zbcapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize) +#define zbcapiCanCopy(pResource) zbcapiCanCopy_DISPATCH(pResource) +#define zbcapiInternalControlForward(pGpuResource, command, pParams, size) zbcapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size) +#define zbcapiPreDestruct(pResource) zbcapiPreDestruct_DISPATCH(pResource) +#define zbcapiUnmapFrom(pResource, pParams) zbcapiUnmapFrom_DISPATCH(pResource, pParams) +#define zbcapiControl_Epilogue(pResource, pCallContext, pParams) zbcapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams) +#define zbcapiControlLookup(pResource, pParams, ppEntry) zbcapiControlLookup_DISPATCH(pResource, pParams, ppEntry) +#define zbcapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) zbcapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping) +#define zbcapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) zbcapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight) +static inline NV_STATUS zbcapiConstructHal_56cd7a(struct ZbcApi *pZbcApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + return NV_OK; +} + +#ifdef __nvoc_zbc_api_h_disabled +static inline NV_STATUS zbcapiConstructHal(struct ZbcApi *pZbcApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) { + NV_ASSERT_FAILED_PRECOMP("ZbcApi was disabled!"); + return NV_ERR_NOT_SUPPORTED; +} +#else //__nvoc_zbc_api_h_disabled +#define zbcapiConstructHal(pZbcApi, pCallContext, pParams) zbcapiConstructHal_56cd7a(pZbcApi, pCallContext, pParams) +#endif //__nvoc_zbc_api_h_disabled + +#define zbcapiConstructHal_HAL(pZbcApi, pCallContext, pParams) zbcapiConstructHal(pZbcApi, pCallContext, pParams) + +static inline void zbcapiDestruct_b3696a(struct ZbcApi *pZbcApi) { + return; +} + +#define __nvoc_zbcapiDestruct(pZbcApi) zbcapiDestruct_b3696a(pZbcApi) +NV_STATUS zbcapiCtrlCmdSetZbcColorClear_IMPL(struct ZbcApi *pZbcApi, NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS *pSetZBCClearParams); + +static inline NV_STATUS zbcapiCtrlCmdSetZbcColorClear_DISPATCH(struct ZbcApi *pZbcApi, NV9096_CTRL_SET_ZBC_COLOR_CLEAR_PARAMS *pSetZBCClearParams) { + return pZbcApi->__zbcapiCtrlCmdSetZbcColorClear__(pZbcApi, pSetZBCClearParams); +} + +NV_STATUS zbcapiCtrlCmdSetZbcDepthClear_IMPL(struct ZbcApi *pZbcApi, NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS *pSetZBCClearParams); + +static inline NV_STATUS zbcapiCtrlCmdSetZbcDepthClear_DISPATCH(struct ZbcApi *pZbcApi, NV9096_CTRL_SET_ZBC_DEPTH_CLEAR_PARAMS *pSetZBCClearParams) { + return pZbcApi->__zbcapiCtrlCmdSetZbcDepthClear__(pZbcApi, pSetZBCClearParams); +} + +NV_STATUS zbcapiCtrlCmdGetZbcClearTable_IMPL(struct ZbcApi *pZbcApi, NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS *pGetZBCClearTableParams); + +static inline NV_STATUS zbcapiCtrlCmdGetZbcClearTable_DISPATCH(struct ZbcApi *pZbcApi, NV9096_CTRL_GET_ZBC_CLEAR_TABLE_PARAMS *pGetZBCClearTableParams) { + return pZbcApi->__zbcapiCtrlCmdGetZbcClearTable__(pZbcApi, pGetZBCClearTableParams); +} + +NV_STATUS zbcapiCtrlCmdSetZbcClearTable_IMPL(struct ZbcApi *pZbcApi, NV9096_CTRL_SET_ZBC_CLEAR_TABLE_PARAMS *pSetZBCClearTableParams); + +static inline NV_STATUS zbcapiCtrlCmdSetZbcClearTable_DISPATCH(struct ZbcApi *pZbcApi, NV9096_CTRL_SET_ZBC_CLEAR_TABLE_PARAMS *pSetZBCClearTableParams) { + return pZbcApi->__zbcapiCtrlCmdSetZbcClearTable__(pZbcApi, pSetZBCClearTableParams); +} + +NV_STATUS zbcapiCtrlCmdSetZbcStencilClear_IMPL(struct ZbcApi *pZbcApi, NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS *pSetZBCClearParams); + +static inline NV_STATUS zbcapiCtrlCmdSetZbcStencilClear_DISPATCH(struct ZbcApi *pZbcApi, NV9096_CTRL_SET_ZBC_STENCIL_CLEAR_PARAMS *pSetZBCClearParams) { + return pZbcApi->__zbcapiCtrlCmdSetZbcStencilClear__(pZbcApi, pSetZBCClearParams); +} + +NV_STATUS zbcapiCtrlCmdGetZbcClearTableSize_IMPL(struct ZbcApi *pZbcApi, NV9096_CTRL_GET_ZBC_CLEAR_TABLE_SIZE_PARAMS *pGetZBCClearTableSizeParams); + +static inline NV_STATUS zbcapiCtrlCmdGetZbcClearTableSize_DISPATCH(struct ZbcApi *pZbcApi, NV9096_CTRL_GET_ZBC_CLEAR_TABLE_SIZE_PARAMS *pGetZBCClearTableSizeParams) { + return pZbcApi->__zbcapiCtrlCmdGetZbcClearTableSize__(pZbcApi, pGetZBCClearTableSizeParams); +} + +NV_STATUS zbcapiCtrlCmdGetZbcClearTableEntry_IMPL(struct ZbcApi *pZbcApi, NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS *pGetZBCClearTableEntryParams); + +static inline NV_STATUS zbcapiCtrlCmdGetZbcClearTableEntry_DISPATCH(struct ZbcApi *pZbcApi, NV9096_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY_PARAMS *pGetZBCClearTableEntryParams) { + return pZbcApi->__zbcapiCtrlCmdGetZbcClearTableEntry__(pZbcApi, pGetZBCClearTableEntryParams); +} + +static inline NvBool zbcapiShareCallback_DISPATCH(struct ZbcApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) { + return pGpuResource->__zbcapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy); +} + +static inline NV_STATUS zbcapiControl_DISPATCH(struct ZbcApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pGpuResource->__zbcapiControl__(pGpuResource, pCallContext, pParams); +} + +static inline NV_STATUS zbcapiUnmap_DISPATCH(struct ZbcApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__zbcapiUnmap__(pGpuResource, pCallContext, pCpuMapping); +} + +static inline NV_STATUS zbcapiGetMemInterMapParams_DISPATCH(struct ZbcApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) { + return pRmResource->__zbcapiGetMemInterMapParams__(pRmResource, pParams); +} + +static inline NV_STATUS zbcapiGetMemoryMappingDescriptor_DISPATCH(struct ZbcApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) { + return pRmResource->__zbcapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc); +} + +static inline NV_STATUS zbcapiGetMapAddrSpace_DISPATCH(struct ZbcApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) { + return pGpuResource->__zbcapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace); +} + +static inline NvHandle zbcapiGetInternalObjectHandle_DISPATCH(struct ZbcApi *pGpuResource) { + return pGpuResource->__zbcapiGetInternalObjectHandle__(pGpuResource); +} + +static inline NV_STATUS zbcapiControlFilter_DISPATCH(struct ZbcApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__zbcapiControlFilter__(pResource, pCallContext, pParams); +} + +static inline void zbcapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ZbcApi *pResource, RsResourceRef *pReference) { + pResource->__zbcapiAddAdditionalDependants__(pClient, pResource, pReference); +} + +static inline NvU32 zbcapiGetRefCount_DISPATCH(struct ZbcApi *pResource) { + return pResource->__zbcapiGetRefCount__(pResource); +} + +static inline NV_STATUS zbcapiCheckMemInterUnmap_DISPATCH(struct ZbcApi *pRmResource, NvBool bSubdeviceHandleProvided) { + return pRmResource->__zbcapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided); +} + +static inline NV_STATUS zbcapiMapTo_DISPATCH(struct ZbcApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) { + return pResource->__zbcapiMapTo__(pResource, pParams); +} + +static inline NV_STATUS zbcapiControl_Prologue_DISPATCH(struct ZbcApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + return pResource->__zbcapiControl_Prologue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS zbcapiGetRegBaseOffsetAndSize_DISPATCH(struct ZbcApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) { + return pGpuResource->__zbcapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize); +} + +static inline NvBool zbcapiCanCopy_DISPATCH(struct ZbcApi *pResource) { + return pResource->__zbcapiCanCopy__(pResource); +} + +static inline NV_STATUS zbcapiInternalControlForward_DISPATCH(struct ZbcApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) { + return pGpuResource->__zbcapiInternalControlForward__(pGpuResource, command, pParams, size); +} + +static inline void zbcapiPreDestruct_DISPATCH(struct ZbcApi *pResource) { + pResource->__zbcapiPreDestruct__(pResource); +} + +static inline NV_STATUS zbcapiUnmapFrom_DISPATCH(struct ZbcApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) { + return pResource->__zbcapiUnmapFrom__(pResource, pParams); +} + +static inline void zbcapiControl_Epilogue_DISPATCH(struct ZbcApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) { + pResource->__zbcapiControl_Epilogue__(pResource, pCallContext, pParams); +} + +static inline NV_STATUS zbcapiControlLookup_DISPATCH(struct ZbcApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) { + return pResource->__zbcapiControlLookup__(pResource, pParams, ppEntry); +} + +static inline NV_STATUS zbcapiMap_DISPATCH(struct ZbcApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) { + return pGpuResource->__zbcapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping); +} + +static inline NvBool zbcapiAccessCallback_DISPATCH(struct ZbcApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) { + return pResource->__zbcapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight); +} + +static inline NV_STATUS __nvoc_zbcapiConstruct(struct ZbcApi *arg_pZbcApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams) { + return zbcapiConstructHal(arg_pZbcApi, arg_pCallContext, arg_pParams); +} + +#undef PRIVATE_FIELD + + +#endif // _ZBCAPI_H_ + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _G_ZBC_API_NVOC_H_ diff --git a/src/nvidia/generated/rmconfig.h b/src/nvidia/generated/rmconfig.h new file mode 100644 index 000000000..9af1bc5fa --- /dev/null +++ b/src/nvidia/generated/rmconfig.h @@ -0,0 +1,820 @@ +// This file is automatically generated by rmconfig - DO NOT EDIT! +// +// defines to indicate enabled/disabled for all chips, features, classes, engines, and apis. +// +// Profile: shipping-gpus-openrm +// Template: templates/gt_rmconfig.h +// +// Chips: TU10X, GA100, GA102, GA103, GA104, GA106, GA107 +// + +#ifndef _RMCFG_H_ +#define _RMCFG_H_ + + +// +// CHIP families - enabled or disabled +// +#define RMCFG_CHIP_GF10X 0 +#define RMCFG_CHIP_GF11X 0 +#define RMCFG_CHIP_GF10XF 0 +#define RMCFG_CHIP_GK10X 0 +#define RMCFG_CHIP_GK11X 0 +#define RMCFG_CHIP_GK20X 0 +#define RMCFG_CHIP_GM10X 0 +#define RMCFG_CHIP_GM20X 0 +#define RMCFG_CHIP_GP10X 0 +#define RMCFG_CHIP_GV10X 0 +#define RMCFG_CHIP_GV11X 0 +#define RMCFG_CHIP_TU10X 1 +#define RMCFG_CHIP_GA10X 1 +#define RMCFG_CHIP_GA10XF 0 +#define RMCFG_CHIP_T12X 0 +#define RMCFG_CHIP_T13X 0 +#define RMCFG_CHIP_T21X 0 +#define RMCFG_CHIP_T18X 0 +#define RMCFG_CHIP_T19X 0 +#define RMCFG_CHIP_T23XG 0 +#define RMCFG_CHIP_T23XD 0 +#define RMCFG_CHIP_SIMS 0 + + +// +// CHIPS - enabled or disabled +// +#define RMCFG_CHIP_GM107 0 +#define RMCFG_CHIP_GM108 0 + +#define RMCFG_CHIP_GM200 0 +#define RMCFG_CHIP_GM204 0 +#define RMCFG_CHIP_GM206 0 + +#define RMCFG_CHIP_GP100 0 +#define RMCFG_CHIP_GP102 0 +#define RMCFG_CHIP_GP104 0 +#define RMCFG_CHIP_GP106 0 +#define RMCFG_CHIP_GP107 0 +#define RMCFG_CHIP_GP108 0 + +#define RMCFG_CHIP_GV100 0 + +#define RMCFG_CHIP_TU102 1 +#define RMCFG_CHIP_TU104 1 +#define RMCFG_CHIP_TU106 1 +#define RMCFG_CHIP_TU116 1 +#define RMCFG_CHIP_TU117 1 + +#define RMCFG_CHIP_GA100 1 +#define RMCFG_CHIP_GA102 1 +#define RMCFG_CHIP_GA103 1 +#define RMCFG_CHIP_GA104 1 +#define RMCFG_CHIP_GA106 1 +#define RMCFG_CHIP_GA107 1 +#define RMCFG_CHIP_GA10B 0 + +#define RMCFG_CHIP_GA102F 0 + +#define RMCFG_CHIP_T234 0 + +#define RMCFG_CHIP_T234D 0 + +#define RMCFG_CHIP_AMODEL 0 + +// +// Obsolete CHIPS +// +#define RMCFG_CHIP_GF100 0 +#define RMCFG_CHIP_GF100B 0 +#define RMCFG_CHIP_GF104 0 +#define RMCFG_CHIP_GF104B 0 +#define RMCFG_CHIP_GF106 0 +#define RMCFG_CHIP_GF106B 0 +#define RMCFG_CHIP_GF108 0 +#define RMCFG_CHIP_GF110D 0 +#define RMCFG_CHIP_GF110 0 +#define RMCFG_CHIP_GF117 0 +#define RMCFG_CHIP_GF118 0 +#define RMCFG_CHIP_GF119 0 +#define RMCFG_CHIP_GF110F 0 +#define RMCFG_CHIP_GF110F2 0 +#define RMCFG_CHIP_GF110F3 0 +#define RMCFG_CHIP_GK104 0 +#define RMCFG_CHIP_GK106 0 +#define RMCFG_CHIP_GK107 0 +#define RMCFG_CHIP_GK20A 0 +#define RMCFG_CHIP_GK110 0 +#define RMCFG_CHIP_GK110B 0 +#define RMCFG_CHIP_GK110C 0 +#define RMCFG_CHIP_GK208 0 +#define RMCFG_CHIP_GK208S 0 +#define RMCFG_CHIP_GV11B 0 +#define RMCFG_CHIP_T001_FERMI_NOT_EXIST 0 +#define RMCFG_CHIP_T124 0 +#define RMCFG_CHIP_T132 0 +#define RMCFG_CHIP_T210 0 +#define RMCFG_CHIP_T186 0 +#define RMCFG_CHIP_T194 0 +#define RMCFG_CHIP_T002_TURING_NOT_EXIST 0 + + +// +// CHIP aliases +// +#define RMCFG_CHIP_CLASSIC_GPUS 1 +#define RMCFG_CHIP_dFERMI 0 +#define RMCFG_CHIP_DFERMI 0 +#define RMCFG_CHIP_FERMI 0 +#define RMCFG_CHIP_FERMI_CLASSIC_GPUS 0 +#define RMCFG_CHIP_ALL 1 +#define RMCFG_CHIP_ALL_CLASSIC_GPUS 1 +#define RMCFG_CHIP_ALL_CHIPS 1 +#define RMCFG_CHIP_ALL_CHIPS_CLASSIC_GPUS 1 +#define RMCFG_CHIP_DISPLAYLESS 1 +#define RMCFG_CHIP_dKEPLER 0 +#define RMCFG_CHIP_DKEPLER 0 +#define RMCFG_CHIP_KEPLER 0 +#define RMCFG_CHIP_KEPLER_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dMAXWELL 0 +#define RMCFG_CHIP_DMAXWELL 0 +#define RMCFG_CHIP_MAXWELL 0 +#define RMCFG_CHIP_MAXWELL_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dPASCAL 0 +#define RMCFG_CHIP_DPASCAL 0 +#define RMCFG_CHIP_PASCAL 0 +#define RMCFG_CHIP_PASCAL_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dVOLTA 0 +#define RMCFG_CHIP_DVOLTA 0 +#define RMCFG_CHIP_VOLTA 0 +#define RMCFG_CHIP_VOLTA_CLASSIC_GPUS 0 +#define RMCFG_CHIP_dTURING 1 +#define RMCFG_CHIP_DTURING 1 +#define RMCFG_CHIP_TURING 1 +#define RMCFG_CHIP_TURING_CLASSIC_GPUS 1 +#define RMCFG_CHIP_dAMPERE 1 +#define RMCFG_CHIP_DAMPERE 1 +#define RMCFG_CHIP_AMPERE 1 +#define RMCFG_CHIP_AMPERE_CLASSIC_GPUS 1 +#define RMCFG_CHIP_TEGRA_DGPU_AMPERE 0 +#define RMCFG_CHIP_TEGRA_DGPU 0 +#define RMCFG_CHIP_DFPGA 0 +#define RMCFG_CHIP_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_FERMI_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_TEGRA 0 +#define RMCFG_CHIP_TEGRA_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_ALL_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_ALL_CHIPS_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tKEPLER 0 +#define RMCFG_CHIP_TKEPLER 0 +#define RMCFG_CHIP_KEPLER_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tMAXWELL 0 +#define RMCFG_CHIP_TMAXWELL 0 +#define RMCFG_CHIP_MAXWELL_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tPASCAL 0 +#define RMCFG_CHIP_TPASCAL 0 +#define RMCFG_CHIP_PASCAL_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tVOLTA 0 +#define RMCFG_CHIP_TVOLTA 0 +#define RMCFG_CHIP_VOLTA_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_TURING_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_T23X 0 +#define RMCFG_CHIP_T23X_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_tAMPERE 0 +#define RMCFG_CHIP_TAMPERE 0 +#define RMCFG_CHIP_AMPERE_TEGRA_BIG_GPUS 0 +#define RMCFG_CHIP_TEGRA_NVDISP_GPUS 0 +#define RMCFG_CHIP_T23X_TEGRA_NVDISP_GPUS 0 +#define RMCFG_CHIP_TEGRA_TEGRA_NVDISP_GPUS 0 +#define RMCFG_CHIP_ALL_TEGRA_NVDISP_GPUS 0 +#define RMCFG_CHIP_ALL_CHIPS_TEGRA_NVDISP_GPUS 0 +#define RMCFG_CHIP_SIMULATION_GPUS 0 +#define RMCFG_CHIP_ALL_SIMULATION_GPUS 0 +#define RMCFG_CHIP_ALL_CHIPS_SIMULATION_GPUS 0 + + +// +// Features - enabled or disabled +// +#define RMCFG_FEATURE_PLATFORM_UNKNOWN 0 // Running on an unknown platform +#define RMCFG_FEATURE_PLATFORM_WINDOWS 0 // Running on Windows +#define RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM 0 // Running on Windows LDDM +#define RMCFG_FEATURE_PLATFORM_WINDOWS_VISTA 0 // aka PLATFORM_WINDOWS_LDDM +#define RMCFG_FEATURE_PLATFORM_UNIX 1 // Running on Unix +#define RMCFG_FEATURE_PLATFORM_DCE 0 // Running on Display Control Engine (DCE, an ARM Cortex R5 on Tegra) +#define RMCFG_FEATURE_PLATFORM_DOS 0 // Running on DOS/DJGPP +#define RMCFG_FEATURE_PLATFORM_SIM 0 // Running on Simulator +#define RMCFG_FEATURE_PLATFORM_MODS 0 // Running as part of MODS +#define RMCFG_FEATURE_PLATFORM_GSP 0 // Running as part of GSP Firmware +#define RMCFG_FEATURE_PLATFORM_MODS_WINDOWS 0 // Running as part of MODS on Windows +#define RMCFG_FEATURE_PLATFORM_MODS_UNIX 0 // Running as part of MODS on UNIX +#define RMCFG_FEATURE_PLATFORM_MODS_DOS 0 // Running as part of MODS on DOS +#define RMCFG_FEATURE_PLATFORM_MODS_DJGPP 0 // Running as part of MODS on DJGPP +#define RMCFG_FEATURE_ARCH_UNKNOWN 0 // unknown arch +#define RMCFG_FEATURE_ARCH_X86 0 // Intel x86, 32bit +#define RMCFG_FEATURE_ARCH_X64 0 // Intel 64bit +#define RMCFG_FEATURE_ARCH_RISCV64 0 // RISCV, 64bit +#define RMCFG_FEATURE_ARCH_AMD64 1 // AMD, 64bit +#define RMCFG_FEATURE_ARCH_IA64 0 // Itanium ia64 +#define RMCFG_FEATURE_ARCH_PPC 0 // Power PC +#define RMCFG_FEATURE_ARCH_PPC64LE 0 // 64-bit PPC little-endian +#define RMCFG_FEATURE_ARCH_ARM 0 // ARM +#define RMCFG_FEATURE_ARCH_ARM_V7 0 // ARM v7 +#define RMCFG_FEATURE_ARCH_AARCH64 0 // AArch64 +#define RMCFG_FEATURE_ARCH_ARGON 0 // argon?? +#define RMCFG_FEATURE_ARCH_AM926 0 // am926?? +#define RMCFG_FEATURE_RMCORE_BASE 1 // RMCORE Base +#define RMCFG_FEATURE_ORIN_PHYSICAL_RM 1 // Physical layer of RM, disabled only on Orin +#define RMCFG_FEATURE_KERNEL_RM 1 // Kernel layer of RM +#define RMCFG_FEATURE_NOTEBOOK 0 // Notebook support +#define RMCFG_FEATURE_MXM 0 // MXM Module Support (all versions) +#define RMCFG_FEATURE_ONSEMI_NB7NQ621M 1 // ONSEMI_NB7NQ621M Redriver Support +#define RMCFG_FEATURE_DCB_0X 1 // Fallback DCB routines +#define RMCFG_FEATURE_DCB_4X 1 // DCB4x (used on G8x and later) +#define RMCFG_FEATURE_XAPI 0 // Use XAPI for resman api calls +#define RMCFG_FEATURE_RMAPI_GRAVEYARD 1 // Use RMAPI Graveyard to translate deprecated APIs +#define RMCFG_FEATURE_HOTPLUG_POLLING 0 // HotPlug polling +#define RMCFG_FEATURE_MULTI_GPU 1 // Multiple GPUs managed by same RM instance +#define RMCFG_FEATURE_RM_BASIC_LOCK_MODEL 1 // Support for Basic Lock Model in RM +#define RMCFG_FEATURE_VIRTUALIZATION 0 // Detection and Guest RM Implementation within a Virtualization environment +#define RMCFG_FEATURE_PRESILICON 0 // For builds that can run on simulated or emulated GPU +#define RMCFG_FEATURE_GSP_CLIENT_RM 1 // GSP client RM +#define RMCFG_FEATURE_DCE_CLIENT_RM 0 // DCE client RM +#define RMCFG_FEATURE_PROTOBUF 1 // Protobuf data encoding for OCA data dumps +#define RMCFG_FEATURE_RELEASE_BUILD 1 // Release Build +#define RMCFG_FEATURE_VERIF_ONLY_CONTROLS 0 // Allow verify only control cmds to be used on verif builds(determined by this feature) +#define RMCFG_FEATURE_PAGE_RETIREMENT 1 // Offlining bad memory pages from the FB heap +#define RMCFG_FEATURE_PMA 1 // Physical memory allocator +#define RMCFG_FEATURE_DEVINIT_SCRIPT 0 // VBIOS scripting engine for sharing register sequences +#define RMCFG_FEATURE_OLD_DAC 1 // Legacy display support with dac code +#define RMCFG_FEATURE_CRC_POLLING 1 // GPU supports CRC Polling +#define RMCFG_FEATURE_DSI_INFO 0 // DSI information structures support +#define RMCFG_FEATURE_CLK2 1 // Tracks Clocks 2.0 project +#define RMCFG_FEATURE_GPU_LOW_POWER 1 // GPU low power Feature on Kepler and later +#define RMCFG_FEATURE_PC_VENDOR_SPECIFIC 1 // Vendor-specific code and features for PC +#define RMCFG_FEATURE_PEX_RESET_RECOVERY 1 // Enables the PEX Reset Recovery feature, which uses root/bridge port SBR to reset GPU when lost +#define RMCFG_FEATURE_HWBC 1 // Enables support bridge chip devices +#define RMCFG_FEATURE_SPARSE_TEXTURE 1 // Enables optimization and defaults for sparse texture +#define RMCFG_FEATURE_CFGEX_PERF_MODE 1 // legacy support for performance modes +#define RMCFG_FEATURE_TILED_RESOURCE_COMPR 1 +#define RMCFG_FEATURE_SYNC_GPU_BOOST 1 // Synchronized GPU Boost +#define RMCFG_FEATURE_NVSR_ON_NVDISPLAY 1 // NVSR on Nvdisplay +#define RMCFG_FEATURE_MANUAL_TRIGGER_BA_DMA_MODE 0 // Support for manually actuated BA DMA mode data collection. +#define RMCFG_FEATURE_RM_DRIVEN_BA_DMA_MODE 0 // Support for RM-driven BA DMA mode data collection. +#define RMCFG_FEATURE_VBLANK_CALLBACK 1 // Vblank callback functionality within RM +#define RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY 0 // Tegra SOC NvDisplay Driver +#define RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY_MINIMAL 0 // Enable only those parts of display code which are needed for Tegra SOC NvDisplay Driver +#define RMCFG_FEATURE_HEAD_REGIONAL_CRC 0 // Display Head Regional CRC support + + + +// +// Classes - enabled or disabled +// +#define RMCFG_CLASS_NV01_ROOT 1 +#define RMCFG_CLASS_NV1_ROOT 1 // aka NV01_ROOT +#define RMCFG_CLASS_NV01_NULL_OBJECT 1 // aka NV01_ROOT +#define RMCFG_CLASS_NV1_NULL_OBJECT 1 // aka NV01_ROOT +#define RMCFG_CLASS_NV01_ROOT_NON_PRIV 1 +#define RMCFG_CLASS_NV1_ROOT_NON_PRIV 1 // aka NV01_ROOT_NON_PRIV +#define RMCFG_CLASS_NV01_ROOT_CLIENT 1 +#define RMCFG_CLASS_FABRIC_MANAGER_SESSION 1 +#define RMCFG_CLASS_NV0020_GPU_MANAGEMENT 1 +#define RMCFG_CLASS_NV01_DEVICE_0 1 +#define RMCFG_CLASS_NV20_SUBDEVICE_0 1 +#define RMCFG_CLASS_NV2081_BINAPI 1 +#define RMCFG_CLASS_NV2082_BINAPI_PRIVILEGED 1 +#define RMCFG_CLASS_NV20_SUBDEVICE_DIAG 1 +#define RMCFG_CLASS_NV01_CONTEXT_DMA 1 +#define RMCFG_CLASS_NV01_MEMORY_SYSTEM 1 +#define RMCFG_CLASS_NV1_MEMORY_SYSTEM 1 // aka NV01_MEMORY_SYSTEM +#define RMCFG_CLASS_NV01_MEMORY_LOCAL_PRIVILEGED 1 +#define RMCFG_CLASS_NV1_MEMORY_LOCAL_PRIVILEGED 1 // aka NV01_MEMORY_LOCAL_PRIVILEGED +#define RMCFG_CLASS_NV01_MEMORY_PRIVILEGED 1 // aka NV01_MEMORY_LOCAL_PRIVILEGED +#define RMCFG_CLASS_NV1_MEMORY_PRIVILEGED 1 // aka NV01_MEMORY_LOCAL_PRIVILEGED +#define RMCFG_CLASS_NV01_MEMORY_LOCAL_USER 1 +#define RMCFG_CLASS_NV1_MEMORY_LOCAL_USER 1 // aka NV01_MEMORY_LOCAL_USER +#define RMCFG_CLASS_NV01_MEMORY_USER 1 // aka NV01_MEMORY_LOCAL_USER +#define RMCFG_CLASS_NV1_MEMORY_USER 1 // aka NV01_MEMORY_LOCAL_USER +#define RMCFG_CLASS_NV01_MEMORY_VIRTUAL 1 +#define RMCFG_CLASS_NV01_MEMORY_SYSTEM_DYNAMIC 1 // aka NV01_MEMORY_VIRTUAL +#define RMCFG_CLASS_NV1_MEMORY_SYSTEM_DYNAMIC 1 // aka NV01_MEMORY_VIRTUAL +#define RMCFG_CLASS_NV01_MEMORY_LOCAL_PHYSICAL 1 +#define RMCFG_CLASS_NV01_MEMORY_SYNCPOINT 0 +#define RMCFG_CLASS_NV01_MEMORY_SYSTEM_OS_DESCRIPTOR 1 +#define RMCFG_CLASS_NV01_MEMORY_DEVICELESS 1 +#define RMCFG_CLASS_NV01_MEMORY_FRAMEBUFFER_CONSOLE 1 +#define RMCFG_CLASS_NV01_MEMORY_HW_RESOURCES 1 +#define RMCFG_CLASS_NV01_MEMORY_LIST_SYSTEM 0 +#define RMCFG_CLASS_NV01_MEMORY_FLA 1 +#define RMCFG_CLASS_NV01_MEMORY_FABRIC_EXPORT 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC_EXPORT_V2 0 +#define RMCFG_CLASS_NV01_MEMORY_FABRIC_IMPORT 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC 1 +#define RMCFG_CLASS_NV_MEMORY_FABRIC_IMPORT_V2 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC_EXPORTED_REF 0 +#define RMCFG_CLASS_NV_MEMORY_FABRIC_IMPORTED_REF 0 +#define RMCFG_CLASS_FABRIC_VASPACE_A 1 +#define RMCFG_CLASS_IO_VASPACE_A 1 +#define RMCFG_CLASS_NV01_NULL 1 +#define RMCFG_CLASS_NV1_NULL 1 // aka NV01_NULL +#define RMCFG_CLASS_NV01_EVENT 1 +#define RMCFG_CLASS_NV1_EVENT 1 // aka NV01_EVENT +#define RMCFG_CLASS_NV01_EVENT_KERNEL_CALLBACK 1 +#define RMCFG_CLASS_NV1_EVENT_KERNEL_CALLBACK 1 // aka NV01_EVENT_KERNEL_CALLBACK +#define RMCFG_CLASS_NV01_EVENT_OS_EVENT 1 +#define RMCFG_CLASS_NV1_EVENT_OS_EVENT 1 // aka NV01_EVENT_OS_EVENT +#define RMCFG_CLASS_NV01_EVENT_WIN32_EVENT 1 // aka NV01_EVENT_OS_EVENT +#define RMCFG_CLASS_NV1_EVENT_WIN32_EVENT 1 // aka NV01_EVENT_OS_EVENT +#define RMCFG_CLASS_NV01_EVENT_KERNEL_CALLBACK_EX 1 +#define RMCFG_CLASS_NV1_EVENT_KERNEL_CALLBACK_EX 1 // aka NV01_EVENT_KERNEL_CALLBACK_EX +#define RMCFG_CLASS_NV01_TIMER 1 +#define RMCFG_CLASS_NV1_TIMER 1 // aka NV01_TIMER +#define RMCFG_CLASS_KERNEL_GRAPHICS_CONTEXT 1 // Graphics Context in Kernel side +#define RMCFG_CLASS_NV50_CHANNEL_GPFIFO 1 +#define RMCFG_CLASS_GF100_CHANNEL_GPFIFO 1 +#define RMCFG_CLASS_KEPLER_CHANNEL_GPFIFO_A 1 +#define RMCFG_CLASS_UVM_CHANNEL_RETAINER 1 +#define RMCFG_CLASS_KEPLER_CHANNEL_GPFIFO_B 1 +#define RMCFG_CLASS_MAXWELL_CHANNEL_GPFIFO_A 1 +#define RMCFG_CLASS_PASCAL_CHANNEL_GPFIFO_A 1 +#define RMCFG_CLASS_VOLTA_CHANNEL_GPFIFO_A 1 +#define RMCFG_CLASS_TURING_CHANNEL_GPFIFO_A 1 +#define RMCFG_CLASS_AMPERE_CHANNEL_GPFIFO_A 1 +#define RMCFG_CLASS_NV04_SOFTWARE_TEST 1 +#define RMCFG_CLASS_NV4_SOFTWARE_TEST 1 // aka NV04_SOFTWARE_TEST +#define RMCFG_CLASS_VOLTA_USERMODE_A 1 +#define RMCFG_CLASS_TURING_USERMODE_A 1 +#define RMCFG_CLASS_AMPERE_USERMODE_A 1 +#define RMCFG_CLASS_NVC371_DISP_SF_USER 1 +#define RMCFG_CLASS_NVC372_DISPLAY_SW 1 +#define RMCFG_CLASS_NVC573_DISP_CAPABILITIES 1 +#define RMCFG_CLASS_NVC673_DISP_CAPABILITIES 1 +#define RMCFG_CLASS_NV04_DISPLAY_COMMON 1 +#define RMCFG_CLASS_NV50_DEFERRED_API_CLASS 1 +#define RMCFG_CLASS_MPS_COMPUTE 1 +#define RMCFG_CLASS_NVC570_DISPLAY 1 +#define RMCFG_CLASS_NVC57A_CURSOR_IMM_CHANNEL_PIO 1 +#define RMCFG_CLASS_NVC57B_WINDOW_IMM_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC57D_CORE_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC57E_WINDOW_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC670_DISPLAY 1 +#define RMCFG_CLASS_NVC671_DISP_SF_USER 1 +#define RMCFG_CLASS_NVC67A_CURSOR_IMM_CHANNEL_PIO 1 +#define RMCFG_CLASS_NVC67B_WINDOW_IMM_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC67D_CORE_CHANNEL_DMA 1 +#define RMCFG_CLASS_NVC67E_WINDOW_CHANNEL_DMA 1 +#define RMCFG_CLASS_NV9010_VBLANK_CALLBACK 1 +#define RMCFG_CLASS_GF100_PROFILER 1 // Profiler Client Support +#define RMCFG_CLASS_MAXWELL_PROFILER 1 // Base Profiler Class +#define RMCFG_CLASS_MAXWELL_PROFILER_DEVICE 1 // Device level Profiler Client Support +#define RMCFG_CLASS_GF100_SUBDEVICE_MASTER 1 +#define RMCFG_CLASS_GF100_ZBC_CLEAR 1 +#define RMCFG_CLASS_GF100_DISP_SW 1 +#define RMCFG_CLASS_GF100_TIMED_SEMAPHORE_SW 1 +#define RMCFG_CLASS_G84_PERFBUFFER 1 +#define RMCFG_CLASS_NV50_MEMORY_VIRTUAL 1 +#define RMCFG_CLASS_NV50_P2P 1 +#define RMCFG_CLASS_NV50_THIRD_PARTY_P2P 1 +#define RMCFG_CLASS_FERMI_TWOD_A 1 // FERMI Graphics 2D +#define RMCFG_CLASS_FERMI_VASPACE_A 1 // FERMI virtual address space +#define RMCFG_CLASS_GF100_HDACODEC 1 +#define RMCFG_CLASS_NVC4B0_VIDEO_DECODER 1 // Decoder Class for Turing +#define RMCFG_CLASS_NVC6B0_VIDEO_DECODER 1 // Decoder Class for Ampere +#define RMCFG_CLASS_NVC7B0_VIDEO_DECODER 1 // Decoder Class for Ampere +#define RMCFG_CLASS_NVC4B7_VIDEO_ENCODER 1 +#define RMCFG_CLASS_NVB4B7_VIDEO_ENCODER 1 +#define RMCFG_CLASS_NVC7B7_VIDEO_ENCODER 1 +#define RMCFG_CLASS_NVC4D1_VIDEO_NVJPG 1 +#define RMCFG_CLASS_NVC6FA_VIDEO_OFA 1 +#define RMCFG_CLASS_NVC7FA_VIDEO_OFA 1 +#define RMCFG_CLASS_KEPLER_INLINE_TO_MEMORY_B 1 // Kepler inline to memory +#define RMCFG_CLASS_FERMI_CONTEXT_SHARE_A 1 // Context Share class +#define RMCFG_CLASS_KEPLER_CHANNEL_GROUP_A 1 // Channel Group Class +#define RMCFG_CLASS_PASCAL_DMA_COPY_A 1 +#define RMCFG_CLASS_TURING_DMA_COPY_A 1 +#define RMCFG_CLASS_AMPERE_DMA_COPY_A 1 +#define RMCFG_CLASS_AMPERE_DMA_COPY_B 1 +#define RMCFG_CLASS_MAXWELL_DMA_COPY_A 1 +#define RMCFG_CLASS_ACCESS_COUNTER_NOTIFY_BUFFER 1 // Access Cntr Buffer for Gr +#define RMCFG_CLASS_MMU_FAULT_BUFFER 1 // Volta Fault Buffer for Gr +#define RMCFG_CLASS_TURING_A 1 // Turing Graphics +#define RMCFG_CLASS_TURING_COMPUTE_A 1 // Turing Graphics Compute +#define RMCFG_CLASS_AMPERE_A 1 // AmpereA (Graphics) +#define RMCFG_CLASS_AMPERE_COMPUTE_A 1 // AmpereComputeA (Graphics Compute) +#define RMCFG_CLASS_AMPERE_B 1 // AmpereB (Graphics) +#define RMCFG_CLASS_AMPERE_COMPUTE_B 1 // AmpereComputeB (Graphics Compute) +#define RMCFG_CLASS_AMPERE_SMC_PARTITION_REF 1 // Ampere SMC Partition Subscription +#define RMCFG_CLASS_AMPERE_SMC_EXEC_PARTITION_REF 1 // Ampere SMC Execution Partition Subscription +#define RMCFG_CLASS_AMPERE_SMC_CONFIG_SESSION 1 // Ampere SMC config session subscription +#define RMCFG_CLASS_NV0092_RG_LINE_CALLBACK 1 // RG line callback functions +#define RMCFG_CLASS_AMPERE_SMC_MONITOR_SESSION 1 // Ampere SMC monitor session subscription +#define RMCFG_CLASS_NV40_DEBUG_BUFFER 1 +#define RMCFG_CLASS_GT200_DEBUGGER 1 // CUDA Debugger support +#define RMCFG_CLASS_NV40_I2C 1 // I2C operations +#define RMCFG_CLASS_NV_E3_THREED 0 // Tegra 3D class +#define RMCFG_CLASS_NV0060_SYNC_GPU_BOOST 1 // Synchronized GPU Boost Class. Defines a set of GPUs for Synchronized Boost +#define RMCFG_CLASS_GP100_UVM_SW 1 // UVM SW class to support SW methods for fault cancel +#define RMCFG_CLASS_NV_EVENT_BUFFER 1 // Event buffer class used to share event data with UMD + + + +// +// MODULES - enabled or disabled +// +#define RMCFG_MODULE_Object 1 // Base class for NVOC objects +#define RMCFG_MODULE_OBJECT 1 // aka Object +#define RMCFG_MODULE_TRACEABLE 1 // Interface for CaptureState +#define RMCFG_MODULE_ENGSTATE 1 // Base class for engines with generic constructors, StateLoad, etc. +#define RMCFG_MODULE_HOSTENG 1 // Base class for host engines +#define RMCFG_MODULE_FLCNABLE 0 // Base class for engines requiring falcon +#define RMCFG_MODULE_PMUCLIENT 0 // Base class for engines that use PMU engine +#define RMCFG_MODULE_INTRABLE 1 // Base class to generate and service top-level interrupts +#define RMCFG_MODULE_MUTEXABLE 0 // Base class for engines that implements mutex +#define RMCFG_MODULE_GpuMutexMgr 0 // GPU Mutex Manager +#define RMCFG_MODULE_GPUMUTEXMGR 0 // aka GpuMutexMgr +#define RMCFG_MODULE_BIF 0 // Bus Interface +#define RMCFG_MODULE_KERNEL_BIF 1 // Bus Interface on Kernel(CPU) RM +#define RMCFG_MODULE_BUS 0 // Bus +#define RMCFG_MODULE_KERNEL_BUS 1 // Bus on Kernel(CPU) RM +#define RMCFG_MODULE_ClockManager 0 // Clock Manager +#define RMCFG_MODULE_CLOCKMANAGER 0 // aka ClockManager +#define RMCFG_MODULE_KERNEL_ClockManager 1 // Kernel controls for Clock Manager +#define RMCFG_MODULE_KERNEL_CLOCKMANAGER 1 // aka KERNEL_ClockManager +#define RMCFG_MODULE_DAC 0 // DAC Resource +#define RMCFG_MODULE_KERNEL_DISPLAY 1 // Display module on Kernel(CPU) RM +#define RMCFG_MODULE_DISP 0 // Display +#define RMCFG_MODULE_VIRT_MEM_ALLOCATOR 1 +#define RMCFG_MODULE_DPAUX 0 +#define RMCFG_MODULE_MEMORY_SYSTEM 0 // Memory System +#define RMCFG_MODULE_KERNEL_MEMORY_SYSTEM 1 // Kernel Memory System +#define RMCFG_MODULE_MEMORY_MANAGER 1 // Memory Manager +#define RMCFG_MODULE_FBFLCN 0 // FB falcon +#define RMCFG_MODULE_FBSR 1 // Frame Buffer Save/Restore +#define RMCFG_MODULE_KERNEL_FIFO 1 // Fifo Module on Kernel(CPU) RM +#define RMCFG_MODULE_FIFO 0 // aka. HOST +#define RMCFG_MODULE_SCHED 0 // Scheduler for runlist +#define RMCFG_MODULE_FLCN 0 // Falcon-derived engines +#define RMCFG_MODULE_KERNEL_FALCON 1 // Falcon on Kernel(CPU) RM. Used for booting Falcon cores. +#define RMCFG_MODULE_GR 0 // Graphic +#define RMCFG_MODULE_GR0 0 // aka GR +#define RMCFG_MODULE_KERNEL_GRAPHICS 1 // Graphic on Kernel(CPU) RM +#define RMCFG_MODULE_GRMGR 0 // Graphics manager. Used for maintaining Gr partitioning policies +#define RMCFG_MODULE_MIG_MANAGER 0 // MIG manager on Physical (GSP) RM. Used for maintaining device partitioning policies +#define RMCFG_MODULE_KERNEL_MIG_MANAGER 1 // MIG manager on Kernel (CPU) RM. Used for maintaining device partitioning policies +#define RMCFG_MODULE_KERNEL_GRAPHICS_MANAGER 1 // Graphics manager on Kernel (CPU) RM. Used for maintaining Gr partitioning policies +#define RMCFG_MODULE_HAL 1 // Hardware Abstraction Layer +#define RMCFG_MODULE_HEAD 0 // Display component: Head +#define RMCFG_MODULE_SF 0 // Display component: Serial Formatter, output protocol formatting +#define RMCFG_MODULE_DISPLAY_INSTANCE_MEMORY 1 +#define RMCFG_MODULE_KERNEL_HEAD 1 +#define RMCFG_MODULE_INTR 1 +#define RMCFG_MODULE_MC 0 +#define RMCFG_MODULE_KERNEL_MC 1 // Master Control-related code needed in Kernel RM +#define RMCFG_MODULE_PRIV_RING 0 +#define RMCFG_MODULE_KERNEL_PERF 1 // Performance module on Kernel(CPU) RM +#define RMCFG_MODULE_PERF 0 // Performance Monitor +#define RMCFG_MODULE_STEREO 0 // Stereo Viewing +#define RMCFG_MODULE_TMR 1 +#define RMCFG_MODULE_SEQ 0 // Sequencer for backlight and LVDS control +#define RMCFG_MODULE_VGA 0 // Video Graphics Array +#define RMCFG_MODULE_VBIOS 0 +#define RMCFG_MODULE_KERNEL_RC 1 // Robust Channels and Watchdog Kernel API +#define RMCFG_MODULE_RC 0 // Robust Channels +#define RMCFG_MODULE_NV_DEBUG_DUMP 1 // NV Debug +#define RMCFG_MODULE_SWENG 1 // Software Engine for all SW classes +#define RMCFG_MODULE_GPU 1 // GPU Control Object +#define RMCFG_MODULE_I2C 0 // i2c Serial Interface +#define RMCFG_MODULE_KERNEL_I2C 1 // Kernel controls for I2C +#define RMCFG_MODULE_SPI 0 // SPI Interface +#define RMCFG_MODULE_SMBPBI 0 // SMBus Post-Box Interface +#define RMCFG_MODULE_GPIO 0 // General Purpose I/O Pins +#define RMCFG_MODULE_KERNEL_GPIO 1 // Kernel controls for GPIO +#define RMCFG_MODULE_FAN 0 // General Purpose I/O Pins +#define RMCFG_MODULE_KERNEL_FAN 1 // Kernel controls for FAN +#define RMCFG_MODULE_FUSE 0 +#define RMCFG_MODULE_VOLT 0 +#define RMCFG_MODULE_KERNEL_VOLT 1 // Kernel controls for VOLT +#define RMCFG_MODULE_THERM 0 // Thermal Monitoring +#define RMCFG_MODULE_KERNEL_THERM 1 // Kernel controls Thermal Monitoring +#define RMCFG_MODULE_OR 0 // Display component: Output Resource +#define RMCFG_MODULE_PIOR 0 // Display component: Parallel Input Output Resource +#define RMCFG_MODULE_SOR 0 // Display component: Serial Output Resource +#define RMCFG_MODULE_DSI 0 // Display Serial Interface +#define RMCFG_MODULE_HDCP 0 // High-bandwidth Digital Content Protection +#define RMCFG_MODULE_HDMI 0 // High-Definition Multimedia Interface +#define RMCFG_MODULE_ISOHUB 0 // Display's memory read interface +#define RMCFG_MODULE_BSP 0 // Bit Stream Processor/NVDEC +#define RMCFG_MODULE_NVDEC 0 // aka BSP +#define RMCFG_MODULE_KERNEL_NVDEC 1 // NVDEC on Kernel(CPU) RM. Used for booting Falcon cores. +#define RMCFG_MODULE_CIPHER 0 +#define RMCFG_MODULE_CE 0 // Copy Engine +#define RMCFG_MODULE_KERNEL_CE 1 // Kernel Copy Engine +#define RMCFG_MODULE_PMU 0 // PMU peregrine core +#define RMCFG_MODULE_KERNEL_PMU 1 // PMU peregrine core on Kernel(CPU) RM +#define RMCFG_MODULE_GPS 0 // GPU Performance Scaling +#define RMCFG_MODULE_MSENC 0 // Video Encoder (MSENC) Engine +#define RMCFG_MODULE_KERNEL_NVENC 1 +#define RMCFG_MODULE_HDA 0 // High Definition Audio (HDA) Engine +#define RMCFG_MODULE_HDACODEC 0 // High Definition Audio (HDA) Codec Engine +#define RMCFG_MODULE_INFOROM 0 // InfoROM Engine +#define RMCFG_MODULE_KERNEL_INFOROM 1 // Kernel controls for InfoROM Engine +#define RMCFG_MODULE_LPWR 0 // Low Power Object. This objects manages all power saving features. +#define RMCFG_MODULE_KERNEL_LPWR 1 // Low Power Object. This objects manages all power saving features. +#define RMCFG_MODULE_PGCTRL 1 // Power Gating Controller (PGCTRL) Engine +#define RMCFG_MODULE_LPWRFSM 0 // LPWR FSM Object Engine + +#define RMCFG_MODULE_PGISLAND 1 // Power Gating Island (PGISLAND) +#define RMCFG_MODULE_AP 1 // Adaptive Power Object (AP) Engine +#define RMCFG_MODULE_PSI 1 // Phase State Indicator Engine. HW folks calls it as Power Saving Interface. +#define RMCFG_MODULE_CG 1 // Clock Gating Object Engine. +#define RMCFG_MODULE_RPPG 1 // RAM Periphery Power Gating Object Engine. +#define RMCFG_MODULE_EI 1 // Engine Idle Framework Object Engine. +#define RMCFG_MODULE_DPU 0 // Display Falcon +#define RMCFG_MODULE_PMGR 0 // PCB Manager engine +#define RMCFG_MODULE_KERNEL_PMGR 1 // Kernel controls for Pmgr +#define RMCFG_MODULE_SYS 1 // System +#define RMCFG_MODULE_OS 1 // OS Layer +#define RMCFG_MODULE_GPUMGR 1 // GPU Manager object +#define RMCFG_MODULE_HEAP 1 // Heap Engine Object +#define RMCFG_MODULE_BRIGHTC 0 // Backlight brightness control module +#define RMCFG_MODULE_GSYNCMGR 0 // GSYNC Manager +#define RMCFG_MODULE_OD 0 // Display component: Output Device +#define RMCFG_MODULE_DFP 0 // Display component: Display Flat Panel +#define RMCFG_MODULE_CRT 0 // Display component: Cathode ray tube +#define RMCFG_MODULE_DisplayPort 0 // Display component: DisplayPort +#define RMCFG_MODULE_DISPLAYPORT 0 // aka DisplayPort +#define RMCFG_MODULE_TMDS 0 // Display component: Transition Minimized Differential Signaling +#define RMCFG_MODULE_CL 1 // Core Logic +#define RMCFG_MODULE_RCDB 1 // RC Journal log DB +#define RMCFG_MODULE_SWINSTR 0 // Software Instrumentation +#define RMCFG_MODULE_GPUACCT 1 // GPU Accounting +#define RMCFG_MODULE_GRDBG 0 // Debugger Engine Object +#define RMCFG_MODULE_PSR 0 // Panel Self Refresh +#define RMCFG_MODULE_UVM 1 // Unified Virtual Memory - provides interface to separate UVM and verification support +#define RMCFG_MODULE_VGPUMGR 0 // Virtual GPU management +#define RMCFG_MODULE_SEC2 0 // New secure falcon +#define RMCFG_MODULE_KERNEL_SEC2 1 // SEC2 on Kernel(CPU) RM. Used for booting Falcon cores. +#define RMCFG_MODULE_PMS 0 // PMU ModeSet object +#define RMCFG_MODULE_GCX 0 // Idle power states of GPU +#define RMCFG_MODULE_LSFM 0 // Light Secure Falcon Manager object +#define RMCFG_MODULE_ACR 0 // Programs MMU to protect the region +#define RMCFG_MODULE_REFCNT 1 // Reference Counting +#define RMCFG_MODULE_GPULOG 0 // Logger for logging GPU related data +#define RMCFG_MODULE_FECS 0 // Front-end context switch +#define RMCFG_MODULE_HYPERVISOR 0 // Hypervisor object to support its native API +#define RMCFG_MODULE_VRRMGR 0 // VRR Management object +#define RMCFG_MODULE_GPCCS 0 // GPC context switch +#define RMCFG_MODULE_MISSING 0 // MISSING (placeholder) Engine +#define RMCFG_MODULE_VMM 1 // virtual memory manager +#define RMCFG_MODULE_VASPACE 1 // virtual address space +#define RMCFG_MODULE_GVASPACE 1 // GPU virtual address space +#define RMCFG_MODULE_AVASPACE 0 // AMODEL virtual address space +#define RMCFG_MODULE_IOVASPACE 1 // IOMMU virtual address space +#define RMCFG_MODULE_FABRICVASPACE 1 // FABRIC virtual address space +#define RMCFG_MODULE_MMU 1 // Memory Management Unit- HW interface +#define RMCFG_MODULE_GMMU 0 // GPU Memory Management Unit +#define RMCFG_MODULE_KERNEL_GMMU 1 // GPU Memory Management Unit on Kernel(CPU) RM +#define RMCFG_MODULE_VMMU 0 // Virtual Memory Management Unit (for vGPU) +#define RMCFG_MODULE_GPUGRP 1 // Group of GPU(s) that may or may not be in SLI +#define RMCFG_MODULE_KERNEL_HWPM 1 // Hardware Performance Monitor on Kernel(CPU) RM +#define RMCFG_MODULE_HWPM 0 // Hardware Performance Monitor +#define RMCFG_MODULE_NVLINK 0 // NVLINK High-speed GPU interconnect +#define RMCFG_MODULE_KERNEL_NVLINK 1 // Nvlink on Kernel(CPU) RM +#define RMCFG_MODULE_KERNEL_NVLINK 1 // Nvlink on Kernel(CPU) RM +#define RMCFG_MODULE_IOCTRL 0 // NVLINK Ioctrl +#define RMCFG_MODULE_HSHUB 0 // High Speed Hub +#define RMCFG_MODULE_KERNEL_HSHUB 1 // High Speed Hub on Kernel(CPU) RM +#define RMCFG_MODULE_KERNEL_HSHUB 1 // High Speed Hub on Kernel(CPU) RM +#define RMCFG_MODULE_GPUMON 1 // GPU Monitoring +#define RMCFG_MODULE_GPUBOOSTMGR 1 // Sync Gpu Boost Manager +#define RMCFG_MODULE_GRIDDISPLAYLESS 0 // GRID Displayless +#define RMCFG_MODULE_WINDOW 0 // NvDisplay WINDOW channel +#define RMCFG_MODULE_RPC 1 // RPC Engine for VGPU +#define RMCFG_MODULE_RPCSTRUCTURECOPY 0 // RPC structure copying for VGPU +#define RMCFG_MODULE_NVJPG 0 // Video JPEG (NVJPG) Engine +#define RMCFG_MODULE_KERNEL_NVJPG 1 +#define RMCFG_MODULE_GSP 0 // GPU system processor +#define RMCFG_MODULE_KERNEL_GSP 1 // GSP on Kernel(CPU) RM. Used for booting RM on GSP. +#define RMCFG_MODULE_OFA 0 // Optical Flow Accelarator +#define RMCFG_MODULE_KERNEL_OFA 1 +#define RMCFG_MODULE_HOT_PLUG 0 // Display component: hot plug +#define RMCFG_MODULE_FABRIC 1 // NVLink Fabric +#define RMCFG_MODULE_GPUDB 1 // GPU DATABASE +#define RMCFG_MODULE_NNE 0 // Neural Net Engine (NNE) +#define RMCFG_MODULE_DCECLIENTRM 0 // DCE Client RM +#define RMCFG_MODULE_DCB 0 // Display Control Block for all display related data in VBIOS/DCB Image +#define RMCFG_MODULE_DISPMACRO 0 // DispMacro RM infrastructure for IED removal from VBIOS +#define RMCFG_MODULE_DISP_MGR 0 // Lid- and dock-related disp code for NOTEBOOK +#define RMCFG_MODULE_PLATFORM 1 // Object for platform related features + + + +// +// API's - enabled or disabled +// +#define RMCFG_API_NV04_ALLOC 1 +#define RMCFG_API_NVOS21_PARAMETERS 1 // aka NV04_ALLOC +#define RMCFG_API_NV_ESC_RM_ALLOC 1 // aka NV04_ALLOC +#define RMCFG_API_Nv04Alloc 1 // aka NV04_ALLOC +#define RMCFG_API_NvRmAlloc 1 // aka NV04_ALLOC +#define RMCFG_API_NV04_ALLOC_WITH_ACCESS 1 +#define RMCFG_API_NVOS64_PARAMETERS 1 // aka NV04_ALLOC_WITH_ACCESS +#define RMCFG_API_NV_ESC_RM_ALLOC 1 // aka NV04_ALLOC_WITH_ACCESS +#define RMCFG_API_Nv04AllocWithAccess 1 // aka NV04_ALLOC_WITH_ACCESS +#define RMCFG_API_NvRmAllocWithAccess 1 // aka NV04_ALLOC_WITH_ACCESS +#define RMCFG_API_NV01_ALLOC_MEMORY 1 +#define RMCFG_API_NVOS02_PARAMETERS 1 // aka NV01_ALLOC_MEMORY +#define RMCFG_API_NV_ESC_RM_ALLOC_MEMORY 1 // aka NV01_ALLOC_MEMORY +#define RMCFG_API_Nv01AllocMemory 1 // aka NV01_ALLOC_MEMORY +#define RMCFG_API_NvRmAllocMemory64 1 // aka NV01_ALLOC_MEMORY +#define RMCFG_API_NV01_ALLOC_OBJECT 1 +#define RMCFG_API_NVOS05_PARAMETERS 1 // aka NV01_ALLOC_OBJECT +#define RMCFG_API_NV_ESC_RM_ALLOC_OBJECT 1 // aka NV01_ALLOC_OBJECT +#define RMCFG_API_Nv01AllocObject 1 // aka NV01_ALLOC_OBJECT +#define RMCFG_API_NvRmAllocObject 1 // aka NV01_ALLOC_OBJECT +#define RMCFG_API_NV01_FREE 1 +#define RMCFG_API_NVOS00_PARAMETERS 1 // aka NV01_FREE +#define RMCFG_API_NV_ESC_RM_FREE 1 // aka NV01_FREE +#define RMCFG_API_Nv01Free 1 // aka NV01_FREE +#define RMCFG_API_NvRmFree 1 // aka NV01_FREE +#define RMCFG_API_NV04_UPDATE_CONTEXT_DMA 1 +#define RMCFG_API_NVOS37_PARAMETERS 1 // aka NV04_UPDATE_CONTEXT_DMA +#define RMCFG_API_NV_ESC_RM_UPDATE_CONTEXT_DMA 1 // aka NV04_UPDATE_CONTEXT_DMA +#define RMCFG_API_Nv03AllocChannelDma 1 // aka NV04_UPDATE_CONTEXT_DMA +#define RMCFG_API_NV04_VID_HEAP_CONTROL 1 +#define RMCFG_API_NVOS32_PARAMETERS 1 // aka NV04_VID_HEAP_CONTROL +#define RMCFG_API_NV_ESC_RM_VID_HEAP_CONTROL 1 // aka NV04_VID_HEAP_CONTROL +#define RMCFG_API_Nv04VidHeapControl 1 // aka NV04_VID_HEAP_CONTROL +#define RMCFG_API_NvRmVidHeapControl 1 // aka NV04_VID_HEAP_CONTROL +#define RMCFG_API_NV01_CONFIG_GET 0 +#define RMCFG_API_NVOS13_PARAMETERS 0 // aka NV01_CONFIG_GET +#define RMCFG_API_NV_ESC_RM_CONFIG_GET 0 // aka NV01_CONFIG_GET +#define RMCFG_API_Nv01ConfigGet 0 // aka NV01_CONFIG_GET +#define RMCFG_API_NvRmConfigGet 0 // aka NV01_CONFIG_GET +#define RMCFG_API_NV01_CONFIG_SET 0 +#define RMCFG_API_NVOS14_PARAMETERS 0 // aka NV01_CONFIG_SET +#define RMCFG_API_NV_ESC_RM_CONFIG_SET 0 // aka NV01_CONFIG_SET +#define RMCFG_API_Nv01ConfigSet 0 // aka NV01_CONFIG_SET +#define RMCFG_API_NvRmConfigSet 0 // aka NV01_CONFIG_SET +#define RMCFG_API_NV04_CONFIG_GET_EX 0 +#define RMCFG_API_NVOS_CONFIG_GET_EX_PARAMS 0 // aka NV04_CONFIG_GET_EX +#define RMCFG_API_NV_ESC_RM_CONFIG_GET_EX 0 // aka NV04_CONFIG_GET_EX +#define RMCFG_API_Nv04ConfigGetEx 0 // aka NV04_CONFIG_GET_EX +#define RMCFG_API_NvRmConfigGetEx 0 // aka NV04_CONFIG_GET_EX +#define RMCFG_API_NV04_CONFIG_SET_EX 0 +#define RMCFG_API_NVOS_CONFIG_SET_EX_PARAMS 0 // aka NV04_CONFIG_SET_EX +#define RMCFG_API_NV_ESC_RM_CONFIG_SET_EX 0 // aka NV04_CONFIG_SET_EX +#define RMCFG_API_Nv04ConfigSetEx 0 // aka NV04_CONFIG_SET_EX +#define RMCFG_API_NvRmConfigSetEx 0 // aka NV04_CONFIG_SET_EX +#define RMCFG_API_NV04_I2C_ACCESS 1 +#define RMCFG_API_NVOS_I2C_ACCESS_PARAMS 1 // aka NV04_I2C_ACCESS +#define RMCFG_API_NV_ESC_RM_I2C_ACCESS 1 // aka NV04_I2C_ACCESS +#define RMCFG_API_Nv04I2CAccess 1 // aka NV04_I2C_ACCESS +#define RMCFG_API_NvRmI2CAccess 1 // aka NV04_I2C_ACCESS +#define RMCFG_API_NV04_IDLE_CHANNELS 1 +#define RMCFG_API_NVOS30_PARAMETERS 1 // aka NV04_IDLE_CHANNELS +#define RMCFG_API_NV_ESC_RM_IDLE_CHANNELS 1 // aka NV04_IDLE_CHANNELS +#define RMCFG_API_Nv04IdleChannels 1 // aka NV04_IDLE_CHANNELS +#define RMCFG_API_NvRmIdleChannels 1 // aka NV04_IDLE_CHANNELS +#define RMCFG_API_NV04_MAP_MEMORY 1 +#define RMCFG_API_NVOS33_PARAMETERS 1 // aka NV04_MAP_MEMORY +#define RMCFG_API_NV_ESC_RM_MAP_MEMORY 1 // aka NV04_MAP_MEMORY +#define RMCFG_API_Nv04MapMemory 1 // aka NV04_MAP_MEMORY +#define RMCFG_API_NvRmMapMemory 1 // aka NV04_MAP_MEMORY +#define RMCFG_API_NV04_UNMAP_MEMORY 1 +#define RMCFG_API_NVOS34_PARAMETERS 1 // aka NV04_UNMAP_MEMORY +#define RMCFG_API_NV_ESC_RM_UNMAP_MEMORY 1 // aka NV04_UNMAP_MEMORY +#define RMCFG_API_Nv04UnmapMemory 1 // aka NV04_UNMAP_MEMORY +#define RMCFG_API_NvRmUnmapMemory 1 // aka NV04_UNMAP_MEMORY +#define RMCFG_API_NV04_MAP_MEMORY_DMA 1 +#define RMCFG_API_NVOS46_PARAMETERS 1 // aka NV04_MAP_MEMORY_DMA +#define RMCFG_API_NV_ESC_RM_MAP_MEMORY_DMA 1 // aka NV04_MAP_MEMORY_DMA +#define RMCFG_API_Nv04MapMemoryDma 1 // aka NV04_MAP_MEMORY_DMA +#define RMCFG_API_NvRmMapMemoryDma 1 // aka NV04_MAP_MEMORY_DMA +#define RMCFG_API_NV04_UNMAP_MEMORY_DMA 1 +#define RMCFG_API_NVOS47_PARAMETERS 1 // aka NV04_UNMAP_MEMORY_DMA +#define RMCFG_API_NV_ESC_RM_UNMAP_MEMORY_DMA 1 // aka NV04_UNMAP_MEMORY_DMA +#define RMCFG_API_Nv04UnmapMemoryDma 1 // aka NV04_UNMAP_MEMORY_DMA +#define RMCFG_API_NvRmUnmapMemoryDma 1 // aka NV04_UNMAP_MEMORY_DMA +#define RMCFG_API_NV04_ALLOC_CONTEXT_DMA 1 +#define RMCFG_API_NVOS39_PARAMETERS 1 // aka NV04_ALLOC_CONTEXT_DMA +#define RMCFG_API_NV_ESC_RM_ALLOC_CONTEXT_DMA2 1 // aka NV04_ALLOC_CONTEXT_DMA +#define RMCFG_API_Nv04AllocContextDma 1 // aka NV04_ALLOC_CONTEXT_DMA +#define RMCFG_API_NvRmAllocContextDma2 1 // aka NV04_ALLOC_CONTEXT_DMA +#define RMCFG_API_NV04_BIND_CONTEXT_DMA 1 +#define RMCFG_API_NVOS49_PARAMETERS 1 // aka NV04_BIND_CONTEXT_DMA +#define RMCFG_API_NV_ESC_RM_BIND_CONTEXT_DMA 1 // aka NV04_BIND_CONTEXT_DMA +#define RMCFG_API_Nv04BindContextDma 1 // aka NV04_BIND_CONTEXT_DMA +#define RMCFG_API_NvRmBindContextDma 1 // aka NV04_BIND_CONTEXT_DMA +#define RMCFG_API_NV04_CONTROL 1 +#define RMCFG_API_NVOS54_PARAMETERS 1 // aka NV04_CONTROL +#define RMCFG_API_NV_ESC_RM_CONTROL 1 // aka NV04_CONTROL +#define RMCFG_API_Nv04Control 1 // aka NV04_CONTROL +#define RMCFG_API_NvRmControl 1 // aka NV04_CONTROL +#define RMCFG_API_NV04_DUP_OBJECT 1 +#define RMCFG_API_NVOS55_PARAMETERS 1 // aka NV04_DUP_OBJECT +#define RMCFG_API_NV_ESC_RM_DUP_OBJECT 1 // aka NV04_DUP_OBJECT +#define RMCFG_API_Nv04DupObject 1 // aka NV04_DUP_OBJECT +#define RMCFG_API_NvRmDupObject 1 // aka NV04_DUP_OBJECT +#define RMCFG_API_NV04_DUP_OBJECT2 1 +#define RMCFG_API_NVOS55_PARAMETERS 1 // aka NV04_DUP_OBJECT2 +#define RMCFG_API_NV_ESC_RM_DUP_OBJECT 1 // aka NV04_DUP_OBJECT2 +#define RMCFG_API_Nv04DupObject 1 // aka NV04_DUP_OBJECT2 +#define RMCFG_API_NvRmDupObject2 1 // aka NV04_DUP_OBJECT2 +#define RMCFG_API_NV04_SHARE_OBJECT 1 +#define RMCFG_API_NVOS57_PARAMETERS 1 // aka NV04_SHARE_OBJECT +#define RMCFG_API_NV_ESC_RM_SHARE 1 // aka NV04_SHARE_OBJECT +#define RMCFG_API_Nv04Share 1 // aka NV04_SHARE_OBJECT +#define RMCFG_API_NvRmShare 1 // aka NV04_SHARE_OBJECT +#define RMCFG_API_NV04_GET_MEMORY_INFO 1 +#define RMCFG_API_NVOS58_PARAMETERS 1 // aka NV04_GET_MEMORY_INFO +#define RMCFG_API_NV_ESC_RM_GET_MEMORY_INFO 1 // aka NV04_GET_MEMORY_INFO +#define RMCFG_API_Nv04GetMemoryInfo 1 // aka NV04_GET_MEMORY_INFO +#define RMCFG_API_NV04_MAP_MEMORY_DMA_OFFSET 1 +#define RMCFG_API_NVOS59_PARAMETERS 1 // aka NV04_MAP_MEMORY_DMA_OFFSET +#define RMCFG_API_NV_ESC_RM_MAP_MEMORY_DMA_OFFSET 1 // aka NV04_MAP_MEMORY_DMA_OFFSET +#define RMCFG_API_Nv04MapMemoryDmaOffset 1 // aka NV04_MAP_MEMORY_DMA_OFFSET +#define RMCFG_API_NV04_UNMAP_MEMORY_DMA_OFFSET 1 +#define RMCFG_API_NVOS60_PARAMETERS 1 // aka NV04_UNMAP_MEMORY_DMA_OFFSET +#define RMCFG_API_NV_ESC_RM_UNMAP_MEMORY_DMA_OFFSET 1 // aka NV04_UNMAP_MEMORY_DMA_OFFSET +#define RMCFG_API_Nv04UnmapMemoryDmaOffset 1 // aka NV04_UNMAP_MEMORY_DMA_OFFSET +#define RMCFG_API_NV04_ADD_VBLANK_CALLBACK 1 +#define RMCFG_API_NVOS61_PARAMETERS 1 // aka NV04_ADD_VBLANK_CALLBACK +#define RMCFG_API_NV_ESC_RM_ADD_VBLANK_CALLBACK 1 // aka NV04_ADD_VBLANK_CALLBACK +#define RMCFG_API_Nv04AddVblankCallback 1 // aka NV04_ADD_VBLANK_CALLBACK +#define RMCFG_API_NvRmAddVblankCallback 1 // aka NV04_ADD_VBLANK_CALLBACK +#define RMCFG_API_NV04_ACCESS_REGISTRY 1 +#define RMCFG_API_NvRmReadRegistryDword 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmWriteRegistryDword 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmGetNumRegistryEntries 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmDeleteRegistryEntry 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmReadRegistryEntry 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmReadRegistryBinary 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NvRmWriteRegistryBinary 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NVOS38_PARAMETERS 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NV_ESC_RM_ACCESS_REGISTRY 1 // aka NV04_ACCESS_REGISTRY +#define RMCFG_API_NV04_GET_EVENT_DATA 1 +#define RMCFG_API_NVOS41_PARAMETERS 1 // aka NV04_GET_EVENT_DATA +#define RMCFG_API_NV_ESC_RM_GET_EVENT_DATA 1 // aka NV04_GET_EVENT_DATA +#define RMCFG_API_NvRmGetEventData 1 // aka NV04_GET_EVENT_DATA +#define RMCFG_API_NV04_UPDATE_DEVICE_MAPPING_INFO 1 // Update Mapping Parameters (unix-only) +#define RMCFG_API_NVOS56_PARAMETERS 1 // aka NV04_UPDATE_DEVICE_MAPPING_INFO +#define RMCFG_API_NV_ESC_RM_UPDATE_DEVICE_MAPPING_INFO 1 // aka NV04_UPDATE_DEVICE_MAPPING_INFO +#define RMCFG_API_NVXX_CARD_INFO 1 +#define RMCFG_API_nv_ioctl_card_info_t 1 // aka NVXX_CARD_INFO +#define RMCFG_API_NV_ESC_CARD_INFO 1 // aka NVXX_CARD_INFO +#define RMCFG_API_NVXX_ENV_INFO 1 +#define RMCFG_API_nv_ioctl_env_info_t 1 // aka NVXX_ENV_INFO +#define RMCFG_API_NV_ESC_ENV_INFO 1 // aka NVXX_ENV_INFO +#define RMCFG_API_NVXX_ALLOC_OS_EVENT 1 +#define RMCFG_API_nv_ioctl_alloc_os_event_t 1 // aka NVXX_ALLOC_OS_EVENT +#define RMCFG_API_NV_ESC_ALLOC_OS_EVENT 1 // aka NVXX_ALLOC_OS_EVENT +#define RMCFG_API_NvRmAllocOsEvent 1 // aka NVXX_ALLOC_OS_EVENT +#define RMCFG_API_NVXX_FREE_OS_EVENT 1 +#define RMCFG_API_nv_ioctl_free_os_event_t 1 // aka NVXX_FREE_OS_EVENT +#define RMCFG_API_NV_ESC_FREE_OS_EVENT 1 // aka NVXX_FREE_OS_EVENT +#define RMCFG_API_NvRmFreeOsEvent 1 // aka NVXX_FREE_OS_EVENT +#define RMCFG_API_NVXX_STATUS_CODE 1 +#define RMCFG_API_nv_ioctl_status_code_t 1 // aka NVXX_STATUS_CODE +#define RMCFG_API_NV_ESC_STATUS_CODE 1 // aka NVXX_STATUS_CODE +#define RMCFG_API_NVXX_CHECK_VERSION_STR 1 +#define RMCFG_API_nv_ioctl_rm_api_version_t 1 // aka NVXX_CHECK_VERSION_STR +#define RMCFG_API_NV_ESC_CHECK_VERSION_STR 1 // aka NVXX_CHECK_VERSION_STR +#define RMCFG_API_NVXX_ATTACH_GPUS_TO_FD 1 +#define RMCFG_API_NvU32 1 // aka NVXX_ATTACH_GPUS_TO_FD +#define RMCFG_API_NV_ESC_ATTACH_GPUS_TO_FD 1 // aka NVXX_ATTACH_GPUS_TO_FD + + + +// Disable misspelling detection +#define __RMCFG_vet_enabled 0 + + + + + + + + +// Make sure the specified feature is defined and not a misspelling +// by checking the "_def" forms above which are all set to '1' for +// each defined chip, feature, etc, irrespective of it's enable/disable +// state. +#define _RMCFG_vet(x) 0 +#if __RMCFG_vet_enabled && defined(__GNUC__) // broken on MSVC +# undef _RMCFG_vet +# define _RMCFG_vet(x) ((__def_RMCFG ## x) ? 0 : (0 * (1/0))) +#endif + +// +// Compile-time constant macros to help with enabling or disabling code based +// on whether a feature (or chip or class or engine or ...) is enabled. +// May be used by both C code ('if') and C-preprocessor directives ('#if') +// + +#define RMCFG_CHIP_ENABLED(_chip) (RMCFG_CHIP_##_chip + _RMCFG_vet(_CHIP_ ## _chip)) +#define RMCFG_FEATURE_ENABLED(_feature) (RMCFG_FEATURE_##_feature + _RMCFG_vet(_FEATURE_ ## _feature)) +#define RMCFG_MODULE_ENABLED(_module) (RMCFG_MODULE_##_module + _RMCFG_vet(_MODULE_ ## _module)) +#define RMCFG_CLASS_ENABLED(_clss) (RMCFG_CLASS_##_clss + _RMCFG_vet(_CLASS_ ## _clss)) +#define RMCFG_API_ENABLED(_api) (RMCFG_API_##_api + _RMCFG_vet(_API_ ## _api)) + +#endif // _RMCFG_H_ diff --git a/src/nvidia/inc/kernel/compute/fabric.h b/src/nvidia/inc/kernel/compute/fabric.h new file mode 100644 index 000000000..9851ca321 --- /dev/null +++ b/src/nvidia/inc/kernel/compute/fabric.h @@ -0,0 +1,3 @@ + +#include "g_fabric_nvoc.h" + diff --git a/src/nvidia/inc/kernel/compute/fm_session_api.h b/src/nvidia/inc/kernel/compute/fm_session_api.h new file mode 100644 index 000000000..9a340cd28 --- /dev/null +++ b/src/nvidia/inc/kernel/compute/fm_session_api.h @@ -0,0 +1,3 @@ + +#include "g_fm_session_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/compute/mps_api.h b/src/nvidia/inc/kernel/compute/mps_api.h new file mode 100644 index 000000000..c7ff75176 --- /dev/null +++ b/src/nvidia/inc/kernel/compute/mps_api.h @@ -0,0 +1,3 @@ + +#include "g_mps_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/core/bin_data.h b/src/nvidia/inc/kernel/core/bin_data.h new file mode 100644 index 000000000..019dd4b5c --- /dev/null +++ b/src/nvidia/inc/kernel/core/bin_data.h @@ -0,0 +1,106 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _BINDATA_H +#define _BINDATA_H + +#include "core/core.h" + +/************************************************************************************************************** +* +* File: bindata.h +* +* Description: +* Bindata management APIs +* +**************************************************************************************************************/ + +// +// Public interface for accessing the acquired binary data +// + +// +// Binary data access handler +// +typedef struct BINDATA_RUNTIME_INFO BINDATA_RUNTIME_INFO, *PBINDATA_RUNTIME_INFO; + +// +// Public binary storage information +// +struct BINDATA_STORAGE; // currently no public data fields +typedef struct BINDATA_STORAGE BINDATA_STORAGE, *PBINDATA_STORAGE; + + +// +// Primitives +// +NV_STATUS bindataAcquire(const BINDATA_STORAGE *pBinStorage, PBINDATA_RUNTIME_INFO *ppBinInfo); +NV_STATUS bindataGetNextChunk(PBINDATA_RUNTIME_INFO pBinInfo, NvU8 *pBuffer, NvU32 nBytes); +void bindataRelease(PBINDATA_RUNTIME_INFO pBinInfo); + + +// +// Utilities +// +NV_STATUS bindataWriteToBuffer(const BINDATA_STORAGE *pBinStorage, NvU8 *pBuffer, NvU32 bufferSize); +NvU32 bindataGetBufferSize(const BINDATA_STORAGE *pBinStorage); + + +// +// Bindata Archive support +// +typedef struct +{ + const char* name; // string of file name or name tag + const PBINDATA_STORAGE pBinStorage; // pointer to the binary storage +} BINDATA_ARCHIVE_ENTRY; + +typedef struct +{ + NvU32 entryNum; + BINDATA_ARCHIVE_ENTRY entries[]; +} BINDATA_ARCHIVE; + + +// Bindata Archive API - get Bindata storage from a Bindata Archive +const BINDATA_STORAGE * bindataArchiveGetStorage(const BINDATA_ARCHIVE *pBinArchive, const char *bindataName); + +// +// Iterate over all BINDATA_STORAGE entries that have not been referenced so far +// Returns the pointer to unreferenced data or NULL if no more are available. +// Example usage: +// const BINDATA_STORAGE *iter = NULL; +// void *datablock; +// NvU32 size; +// while ((datablock = bindataGetNextUnreferencedStorage(&iter, &size))) { +// do_stuff(datablock, size); +// } +// +void* bindataGetNextUnreferencedStorage(const BINDATA_STORAGE **iter, NvU32 *pDataSize); +// +// Marks a given BINDATA_STORAGE as destroyed, making all subsequent attempts +// to access it fail and return NULL/0 +// +void bindataDestroyStorage(BINDATA_STORAGE *storage); + +#endif // _BINDATA_H diff --git a/src/nvidia/inc/kernel/core/core.h b/src/nvidia/inc/kernel/core/core.h new file mode 100644 index 000000000..edf41baba --- /dev/null +++ b/src/nvidia/inc/kernel/core/core.h @@ -0,0 +1,50 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __CORE_H__ +#define __CORE_H__ + +#include "core/prelude.h" + +/** + * @brief Global RM initialization + * + * The single entrypoint into the RM for all platforms. This will initial cross + * platform RM subsystems and call into OS specific init as needed. + * + * Must be called once and only once before any RM internal functions can be + * called + * + * @return NV_OK if successful, error otherwise + */ +NV_STATUS coreInitializeRm(void); + +/** + * @brief Global RM shutdown + * + * Must be called once and only once when a driver is shutting down and no more + * RM internal functions will be called. + * + */ +void coreShutdownRm(void); + +#endif /* __CORE_H__ */ diff --git a/src/nvidia/inc/kernel/core/hal.h b/src/nvidia/inc/kernel/core/hal.h new file mode 100644 index 000000000..eaa931be8 --- /dev/null +++ b/src/nvidia/inc/kernel/core/hal.h @@ -0,0 +1,3 @@ + +#include "g_hal_nvoc.h" + diff --git a/src/nvidia/inc/kernel/core/hal_mgr.h b/src/nvidia/inc/kernel/core/hal_mgr.h new file mode 100644 index 000000000..b15d9cd2c --- /dev/null +++ b/src/nvidia/inc/kernel/core/hal_mgr.h @@ -0,0 +1,3 @@ + +#include "g_hal_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/core/info_block.h b/src/nvidia/inc/kernel/core/info_block.h new file mode 100644 index 000000000..f77da1566 --- /dev/null +++ b/src/nvidia/inc/kernel/core/info_block.h @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _INFO_BLOCK_H_ +#define _INFO_BLOCK_H_ + +// +// HAL privata data management. +// +typedef struct ENG_INFO_LINK_NODE *PENG_INFO_LINK_NODE; +typedef struct ENG_INFO_LINK_NODE ENG_INFO_LINK_NODE; + +// new style typedef for info block functions, simple typedef. +// Used by hal .def files via INFO_BLOCK_GROUP template in Gpuhal.def +typedef void *EngGetInfoBlockFn(PENG_INFO_LINK_NODE pHead, NvU32 dataId); +typedef void *EngAddInfoBlockFn(PENG_INFO_LINK_NODE *ppHead, NvU32 dataId, NvU32 size); +typedef void EngDeleteInfoBlockFn(PENG_INFO_LINK_NODE *ppHead, NvU32 dataId); +typedef NvBool EngTestInfoBlockFn(PENG_INFO_LINK_NODE pHead, NvU32 dataId); + +// old style typedef for info block functions (ptr to fn) +// delete these 4 typedefs once all .def files converted to use OBJECT_INTERFACES +typedef EngGetInfoBlockFn *EngGetInfoBlock; +typedef EngAddInfoBlockFn *EngAddInfoBlock; +typedef EngDeleteInfoBlockFn *EngDeleteInfoBlock; +typedef EngTestInfoBlockFn *EngTestInfoBlock; + + +struct ENG_INFO_LINK_NODE +{ + NvU32 dataId; + void *infoBlock; + PENG_INFO_LINK_NODE next; +}; + +void* getInfoPtr(PENG_INFO_LINK_NODE pHead, NvU32 dataId); +void* addInfoPtr(PENG_INFO_LINK_NODE *ppHead, NvU32 dataId, NvU32 size); +void deleteInfoPtr(PENG_INFO_LINK_NODE * ppHead, NvU32 dataId); +NvBool testInfoPtr(PENG_INFO_LINK_NODE, NvU32 dataId); + +#endif // _INFO_BLOCK_H_ diff --git a/src/nvidia/inc/kernel/core/locks.h b/src/nvidia/inc/kernel/core/locks.h new file mode 100644 index 000000000..de6ab641f --- /dev/null +++ b/src/nvidia/inc/kernel/core/locks.h @@ -0,0 +1,205 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef LOCKS_H +#define LOCKS_H + +#include "core/core.h" +#include "os/os.h" + +// Forward declarations +typedef struct OBJSYS OBJSYS; + +typedef enum +{ + GPU_LOCK_GRP_SUBDEVICE, // locks will be taken for subdevice only + GPU_LOCK_GRP_DEVICE, // locks will be taken for device only + GPU_LOCK_GRP_MASK, // locks will be taken for devices specified by the mask + GPU_LOCK_GRP_ALL // locks will be taken for all devices +} GPU_LOCK_GRP_ID; +typedef NvU32 GPU_MASK; + +// +// This structure is used to trace lock acquire/release activity. +// The calling IP is stored in a circular array. +// +#define MAX_TRACE_LOCK_CALLS 32 + +typedef enum +{ + lockTraceEmpty, + lockTraceAcquire, + lockTraceRelease, + lockTraceAlloc, + lockTraceFree +} LOCK_TRACE_TYPE; + +typedef struct +{ + LOCK_TRACE_TYPE type; + union { + GPU_MASK gpuMask; // For GPU locks + NvU32 lockModule; // For API lock + NvU32 value; + } data32; + union { + NvU16 gpuInst; // For GPU locks + NvU16 lockFlags; // For API lock + NvU16 value; + } data16; + NvBool bHighIrql; + NvU8 priority; + NvU64 callerRA; + NvU64 threadId; + NvU64 timestamp; +} LOCK_TRACE_ENTRY; + +typedef struct +{ + LOCK_TRACE_ENTRY entries[MAX_TRACE_LOCK_CALLS]; + NvU32 index; +} LOCK_TRACE_INFO; + +#define INSERT_LOCK_TRACE(plti, ra, t, d16, d32, ti, irql, pr, ts) \ +{ \ + (plti)->entries[(plti)->index].callerRA = (NvUPtr)ra; \ + (plti)->entries[(plti)->index].type = t; \ + (plti)->entries[(plti)->index].data16.value = d16; \ + (plti)->entries[(plti)->index].data32.value = d32; \ + (plti)->entries[(plti)->index].threadId = ti; \ + (plti)->entries[(plti)->index].timestamp = ts; \ + (plti)->entries[(plti)->index].bHighIrql = irql; \ + (plti)->entries[(plti)->index].priority = pr; \ + (plti)->index = ((plti)->index + 1) % MAX_TRACE_LOCK_CALLS; \ +} + +// +// Callers specify this value when they to lock all possible GPUs. +// +#define GPUS_LOCK_ALL (0xFFFFFFFF) + +// +// Flags for rmGpusLock[Acquire,Release] operations. +// + +// default no flags +#define GPUS_LOCK_FLAGS_NONE (0x00000000) +// conditional acquire; if lock is already held then return error +#define GPU_LOCK_FLAGS_COND_ACQUIRE NVBIT(0) +// acquire the lock in read (shared) mode, if applicable +#define GPU_LOCK_FLAGS_READ NVBIT(1) +// Attempt acquire even if it potentially violates the locking order +// But do not block in a way that could cause a deadlock +#define GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE NVBIT(2) +// Old name alias +#define GPUS_LOCK_FLAGS_COND_ACQUIRE GPU_LOCK_FLAGS_COND_ACQUIRE + +// +// RM Lock Related Functions +// +NV_STATUS rmLocksAlloc(OBJSYS *); +void rmLocksFree(OBJSYS *); + +NV_STATUS rmLocksAcquireAll(NvU32 module); +void rmLocksReleaseAll(void); + +NV_STATUS workItemLocksAcquire(NvU32 gpuInstance, NvU32 flags, NvU32 *pReleaseLocks, NvU32 *pGpuMask); +void workItemLocksRelease(NvU32 releaseLocks, NvU32 gpuMask); + +// +// Thread priority boosting and throttling: +// Used to temporarily increase the priority of a thread on Windows platforms +// in order to prevent starvation from the scheduler. +// +void threadPriorityStateAlloc(void); +void threadPriorityStateFree(void); + +//! Temporarily boost the priority of the current thread +void threadPriorityBoost(NvU64* pBoostPriority, NvU64 *pOriginalPriority); + +//! Gradually lower the priority of the current thread if it is boosted and sufficient time has elapsed +void threadPriorityThrottle(void); + +//! Restore the original priority of the current thread if it is boosted +void threadPriorityRestore(void); + +NV_STATUS rmGpuGroupLockGetMask(NvU32 gpuInst, GPU_LOCK_GRP_ID gpuGrpId, GPU_MASK* pGpuMask); + +// +// Defines for rmGpuLockSetOwner operation. +// +#define GPUS_LOCK_OWNER_PENDING_DPC_REFRESH (OS_THREAD_HANDLE)(-1) + +NV_STATUS rmGpuLockInfoInit(void); +void rmGpuLockInfoDestroy(void); +NV_STATUS rmGpuLockAlloc(NvU32); +void rmGpuLockFree(NvU32); +NV_STATUS rmGpuLocksAcquire(NvU32, NvU32); +NvU32 rmGpuLocksRelease(NvU32, OBJGPU *); +void rmGpuLocksFreeze(GPU_MASK); +void rmGpuLocksUnfreeze(GPU_MASK); +NV_STATUS rmGpuLockHide(NvU32); +void rmGpuLockShow(NvU32); +NvBool rmGpuLockIsOwner(void); +NvU32 rmGpuLocksGetOwnedMask(void); +NvBool rmGpuLockIsHidden(OBJGPU *); +NV_STATUS rmGpuLockSetOwner(OS_THREAD_HANDLE); +NV_STATUS rmGpuGroupLockAcquire(NvU32, GPU_LOCK_GRP_ID, NvU32, NvU32, GPU_MASK *); +NV_STATUS rmGpuGroupLockRelease(GPU_MASK, NvU32); +NvBool rmGpuGroupLockIsOwner(NvU32, GPU_LOCK_GRP_ID, GPU_MASK*); + +NvBool rmDeviceGpuLockIsOwner(NvU32); +NV_STATUS rmDeviceGpuLockSetOwner(OBJGPU *, OS_THREAD_HANDLE); +NV_STATUS rmDeviceGpuLocksAcquire(OBJGPU *, NvU32, NvU32); +NvU32 rmDeviceGpuLocksRelease(OBJGPU *, NvU32, OBJGPU *); + +NV_STATUS rmIntrMaskLockAlloc(NvU32 gpuInst); +void rmIntrMaskLockFree(NvU32 gpuInst); +/// @note The return value is always zero, not the actual IRQL +NvU64 rmIntrMaskLockAcquire(OBJGPU *pGpu); +void rmIntrMaskLockRelease(OBJGPU *pGpu, NvU64 oldIrql); + +// wrappers for handling lock-related NV_ASSERT_OR_RETURNs +#define LOCK_ASSERT_AND_RETURN(cond) NV_ASSERT_OR_ELSE_STR((cond), #cond, return NV_ERR_INVALID_LOCK_STATE) +#define IRQL_ASSERT_AND_RETURN(cond) NV_ASSERT_OR_ELSE_STR((cond), #cond, return NV_ERR_INVALID_IRQ_LEVEL) +#define LOCK_ASSERT_AND_RETURN_BOOL(cond, bRet) NV_ASSERT_OR_ELSE_STR((cond), #cond, return (bRet)) + +#define LOCK_METER_OP(f,l,t,d0,d1,d2) +#define LOCK_METER_DATA(t,d0,d1,d2) + +#define rmInitLockMetering() +#define rmDestroyLockMetering() + +// +// RM API lock definitions are handled by the rmapi module. Providing legacy +// rmApiLockXxx interface for temporary compatibility. CORERM-1370 +// +#include "rmapi/rmapi.h" + +#define API_LOCK_FLAGS_NONE RMAPI_LOCK_FLAGS_NONE +#define API_LOCK_FLAGS_COND_ACQUIRE RMAPI_LOCK_FLAGS_COND_ACQUIRE + +#define rmApiLockAcquire(flags, module) (rmapiLockAcquire(flags, module)) +static NV_INLINE NV_STATUS rmApiLockRelease(void) {rmapiLockRelease(); return NV_OK;} +#define rmApiLockIsOwner() (rmapiLockIsOwner()) + +#endif // LOCKS_H diff --git a/src/nvidia/inc/kernel/core/prelude.h b/src/nvidia/inc/kernel/core/prelude.h new file mode 100644 index 000000000..d6bdbe88a --- /dev/null +++ b/src/nvidia/inc/kernel/core/prelude.h @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __PRELUDE_H__ +#define __PRELUDE_H__ + +/* ------------------------ C library --------------------------------------- */ +#include // NULL + +/* ------------------------ SDK includes ------------------------------------ */ + +#include "nvtypes.h" +#include "nvrangetypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "nvlimits.h" +#include "nvos.h" + +#include "nvctassert.h" + +/* ------------------------ RM library and utils ---------------------------- */ +#include "nvport/nvport.h" +#include "nvoc/runtime.h" +#include "nvoc/utility.h" +#include "core/printf.h" +#include "core/strict.h" +#include "utils/nvassert.h" + +/* ------------------------ Code-generation --------------------------------- */ +#include "rmconfig.h" // RMCONFIG header generated by config/rmconfig.pl +#include "g_rmconfig_private.h" // resman-private hal setup such as: IsGK104(), etc. +#include "g_nvh_state.h" // pass enable/disable state to NVOC headers +#include "g_odb.h" +#include "g_hal.h" + +#include "rmcd.h" + +/* ------------------------ Common types ------------------------------------ */ +typedef NvU64 RmPhysAddr; // A physical address should be 64 bits + +typedef struct THREAD_STATE_NODE THREAD_STATE_NODE; // FW declare thread state + +/* ------------------------ Utility Macros ---------------------------------- */ + +// +// Power of 2 alignment. +// (Will give unexpected results if 'gran' is not a power of 2.) +// (v - v + gran) ensures that gran is upcasted to match v before +// the ~ operation, without explicitly having to typecast it. +// +#define RM_ALIGN_DOWN(v, gran) ((v) & ~(((v) - (v) + (gran)) - 1)) +#define RM_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~(((v) - (v) + (gran))-1)) +#define RM_IS_ALIGNED(v, gran) ((((gran) - 1) & (v)) == 0) + +#define RM_ALIGN_PTR_DOWN(p, gran) ((void *) RM_ALIGN_DOWN(((NvUPtr)p), (gran))) +#define RM_ALIGN_PTR_UP(p, gran) ((void *) RM_ALIGN_UP(((NvUPtr)p), (gran))) + +#define RM_PAGE_ALIGN_DOWN(value) RM_ALIGN_DOWN((value), RM_PAGE_SIZE) +#define RM_PAGE_ALIGN_UP(value) RM_ALIGN_UP((value), RM_PAGE_SIZE) + +#define NV_DELTA(a, b) (NV_MAX((a), (b)) - NV_MIN((a), (b))) // Okay for unsigned or signed + +#define NV_ROUNDUP(a,b) ((NV_CEIL(a,b))*(b)) +#define NV_ROUND_TO_QUANTA(a, quanta) (((quanta) == 0) ? (a): ((((a) + ((quanta) >> 1)) / (quanta)) * (quanta))) +#define NV_FLOOR_TO_QUANTA(a, quanta) (((a) / (quanta)) * (quanta)) +#define NV_SIZEOF32(x) (sizeof(x)) +#define NV_ARRAY_ELEMENTS(x) ((sizeof(x)/sizeof((x)[0]))) +#define NV_ARRAY_ELEMENTS32(x) ((NV_SIZEOF32(x)/NV_SIZEOF32((x)[0]))) +#define NV_BYTESWAP16(a) ((((a) & 0xff00)>>8) | \ + (((a) & 0x00ff)<<8)) +#define NV_BYTESWAP32(a) ((((a) & 0xff000000)>>24) | \ + (((a) & 0x00ff0000)>>8) | \ + (((a) & 0x0000ff00)<<8) | \ + (((a) & 0x000000ff)<<24)) +#define NV_TO_LOWER(c) (((c)>='A'&&(c)<='Z')?(c)+('a'-'A'):(c)) +#define NV_TO_UPPER(c) (((c)>='a'&&(c)<='z')?((c)-'a'+'A'):(c)) + +/*! + * Creates a byte mask for a word at given offset. + * offset = 0 0xffffff00 + * offset = 1 0xffff00ff + * offset = 2 0xff00ffff + * offset = 3 0x00ffffff + * + * @param[in] offset Offset for the mask. + */ +#define NV_BYTE_MASK(offset) (~(0xff << ((offset)<<3))) + +// +// note: the following trick fails if (z-1) * y > max_int +// +// since the calculation contains (x % z) * y, +// and the maximum value of (x % z) is (z-1). +// +// selecting the smaller of x and y to be y reduces the chances +// of problems, but for big enough z, the problem will return... +// +#define OVERFLOW_CAREFUL_MUL_DIV(x, y, z) \ + ((x) > (y)) ? (((x) / (z)) * (y) + (((x) % (z)) * (y)) / (z)) : (((y) / (z)) * (x) + (((y) % (z)) * (x)) / (z)) + +#define MASK_BITS(n) (~(0xFFFFFFFF << (n))) + +#endif /* __PRELUDE_H__ */ diff --git a/src/nvidia/inc/kernel/core/printf.h b/src/nvidia/inc/kernel/core/printf.h new file mode 100644 index 000000000..28d3a388e --- /dev/null +++ b/src/nvidia/inc/kernel/core/printf.h @@ -0,0 +1,368 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2001-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _PRINTF_H_ +#define _PRINTF_H_ + +/* + * RM PRINTF definitions. + * + * Provides RM internal definitions built on the generic nvprintf utilities + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvport/nvport.h" +#include "utils/nvprintf.h" +#include "nvlog/nvlog.h" + +#define DBG_FILE_LINE_FUNCTION NV_FILE_STR, __LINE__, NV_FUNCTION_STR + +/** + * @todo bug 1583359 - Move to NvPort compiler specifics + */ +#if defined(__GNUC__) || defined(__clang__) +#define NV_RETURN_ADDRESS() __builtin_return_address(0) +#else +#define NV_RETURN_ADDRESS() _ReturnAddress() +#endif + + +//****************************************************************************** +// BREAKPOINTS +//****************************************************************************** + +// NV_DBG_BREAKPOINT_ALLOWED can be overridden through CFLAGS +#if !defined(NV_DBG_BREAKPOINT_ALLOWED) +#if defined(DEBUG) || defined(ASSERT_BUILD) || defined(NV_MODS) || defined(QA_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) +#define NV_DBG_BREAKPOINT_ALLOWED 1 +#else +#define NV_DBG_BREAKPOINT_ALLOWED 0 +#endif +#endif // !defined(NV_DBG_BREAKPOINT_ALLOWED) + +#define NV_DEBUG_BREAK_FLAGS_RC 0:0 +#define NV_DEBUG_BREAK_FLAGS_RC_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_RC_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_ASSERT 1:1 +#define NV_DEBUG_BREAK_FLAGS_ASSERT_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_ASSERT_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_DBG_BREAK 2:2 +#define NV_DEBUG_BREAK_FLAGS_DBG_BREAK_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_DBG_BREAK_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT 3:3 +#define NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_POOL_TAGS 4:4 +#define NV_DEBUG_BREAK_FLAGS_POOL_TAGS_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_POOL_TAGS_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_POWER_ON 5:5 +#define NV_DEBUG_BREAK_FLAGS_POWER_ON_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_POWER_ON_ENABLE (0x00000001) +#define NV_DEBUG_BREAK_FLAGS_SMU_ERROR 6:6 +#define NV_DEBUG_BREAK_FLAGS_SMU_ERROR_DISABLE (0x0) +#define NV_DEBUG_BREAK_FLAGS_SMU_ERROR_ENABLE (0x1) +#define NV_DEBUG_BREAK_FLAGS_CRASH 7:7 +#define NV_DEBUG_BREAK_FLAGS_CRASH_DISABLE (0x00000000) +#define NV_DEBUG_BREAK_FLAGS_CRASH_ENABLE (0x00000001) + +#define NV_DEBUG_BREAK_ATTRIBUTES 7:0 +#define NV_DEBUG_BREAK_ATTRIBUTES_NONE (0x00000000) +#define NV_DEBUG_BREAK_ATTRIBUTES_RC (0x00000001) +#define NV_DEBUG_BREAK_ATTRIBUTES_ASSERT (0x00000002) +#define NV_DEBUG_BREAK_ATTRIBUTES_DBG_BREAK (0x00000004) +#define NV_DEBUG_BREAK_ATTRIBUTES_GPU_TIMEOUT (0x00000008) +#define NV_DEBUG_BREAK_ATTRIBUTES_POOL_TAGS (0x00000010) +#define NV_DEBUG_BREAK_ATTRIBUTES_POWER_ON (0x00000020) +#define NV_DEBUG_BREAK_ATTRIBUTES_SMU_ERROR (0x00000040) +#define NV_DEBUG_BREAK_ATTRIBUTES_CRASH (0x00000080) + +// Checks RMINFO and OS config to see if triggering a breakpoint is ever allowed +NvBool nvDbgBreakpointEnabled(void); +// Flushes the logs before a breakpoint, so we can see all the prints. +void osFlushLog(void); + +#define DBG_ROUTINE() \ + do \ + { \ + if (nvDbgBreakpointEnabled()) \ + PORT_BREAKPOINT_ALWAYS(); \ + } while (0) + +#define REL_DBG_BREAKPOINT() \ + REL_DBG_BREAKPOINT_MSG("NVRM-RC: Nvidia GPU Error Detected\n") + +#if NV_DBG_BREAKPOINT_ALLOWED + +#if !NVCPU_IS_RISCV64 + +#define DBG_BREAKPOINT_EX(PGPU, LEVEL) \ + do \ + { \ + NV_PRINTF(LEVEL_ERROR, "bp @ " NV_FILE_FMT ":%d\n", NV_FILE, __LINE__);\ + osFlushLog(); \ + DBG_ROUTINE(); \ + } while (0) + +#else // !NVCPU_IS_RISCV64 + +#define DBG_BREAKPOINT_EX(PGPU, LEVEL) \ + do \ + { \ + NV_ASSERT_FAILED("DBG_BREAKPOINT"); \ + } while (0) + +#endif // !NVCPU_IS_RISCV64 + +#define DBG_BREAKPOINT() DBG_BREAKPOINT_EX(NULL, 0) + +#define DBG_BREAKPOINT_EX_ARGS_IGNORED 1 +#define REL_DBG_BREAKPOINT_MSG(msg) \ + do \ + { \ + PORT_DBG_PRINT_STRING_LITERAL(msg); \ + DBG_BREAKPOINT(); \ + } while (0) + +#else // !NV_DBG_BREAKPOINT_ALLOWED + +#define DBG_BREAKPOINT() +#define DBG_BREAKPOINT_EX(PGPU, LEVEL) +#define DBG_BREAKPOINT_EX_ARGS_IGNORED 1 + +#define REL_DBG_BREAKPOINT_MSG(msg) \ + do \ + { \ + PORT_DBG_PRINT_STRING_LITERAL(msg); \ + DBG_ROUTINE(); \ + } while (0) + + +#endif // NV_DBG_BREAKPOINT_ALLOWED + +#define DBG_BREAKPOINT_REASON(reason) DBG_BREAKPOINT() + +#define DBG_BREAKPOINT_ERROR_INFO(errorCategory, errorInfo) DBG_BREAKPOINT() + +//****************************************************************************** +// PRINTS +//****************************************************************************** + +#include "utils/nvprintf.h" + +#define MAX_ERROR_STRING 256 +#ifndef NVPORT_CHECK_PRINTF_ARGUMENTS +#define NVPORT_CHECK_PRINTF_ARGUMENTS(x,c) +#endif +// +// Prototypes +// +NvBool nvDbgInit(void); +void nvDbgDestroy(void); +void nvDbg_Printf (const char *file, int line, const char *function, int debuglevel, const char *s, ...) NVPORT_CHECK_PRINTF_ARGUMENTS(5, 6); + +// +// Like libc's vsnprintf(), nvDbg_vPrintf() invalidates its va_list argument. The va_list argument +// may not be reused after nvDbg_vPrintf() returns. If the va_list is needed after the +// nvDbg_vPrintf() call, create a copy of the va_list using va_copy(). +// The caller controls the lifetime of the va_list argument, and should free it using va_end. +// +void nvDbg_vPrintf (const char *file, int line, const char *function, int debuglevel, const char *s, va_list args); +void nvDbg_PrintBuf(const char *file, int line, const char *function, int debgulevel, NvU8 buffer[], NvU32 bufsize); + +int nvDbgVsnprintf(char *dest, NvU32 destSize, const char *fmt, va_list args); +int nvDbgSnprintf (char *dest, NvU32 destSize, const char *fmt, ...); + +struct OBJGPU; +void nvDbgInitRmMsg(struct OBJGPU *); +// RmMsgPrefix return value +#define NVRM_MSG_PREFIX_NVRM NVBIT(0) +#define NVRM_MSG_PREFIX_FILE NVBIT(1) +#define NVRM_MSG_PREFIX_FUNCTION NVBIT(2) +#define NVRM_MSG_PREFIX_LINE NVBIT(3) +#define NVRM_MSG_PREFIX_OSTIMESTAMP NVBIT(4) +NvU32 RmMsgPrefix(NvU32 prefix, const char *filename, NvU32 linenumber, const char *function, char *str, NvU32 len); +// nvDbgRmMsgCheck return code +#define NVRM_MSG_NORMAL 0 // Use normal message handling (warnings/errors) +#define NVRM_MSG_HIDE 1 // Skip this message +#define NVRM_MSG_PRINT 2 // Force printing of this message +NvU32 nvDbgRmMsgCheck(const char *filename, NvU32 linenumber, const char *function, NvU32 level, const char *format, NvU32 *pPrefix); +void nvDbgDumpBufferBytes(void *pBuffer, NvU32 length); + + +#if NV_PRINTF_STRINGS_ALLOWED +#define DBG_STRING(str) str +#define DBG_INIT() nvDbgInit() +#define DBG_DESTROY() nvDbgDestroy() +#define DBG_VSNPRINTF(ptr_size_format_and_stuff) nvDbgVsnprintf ptr_size_format_and_stuff +#define DBG_PRINTBUF(dbglevel, buffer, bufsize) nvDbg_PrintBuf(DBG_FILE_LINE_FUNCTION, dbglevel, buffer, bufsize) +#define DBG_RMMSG_CHECK(level) (nvDbgRmMsgCheck(DBG_FILE_LINE_FUNCTION, level, NULL, NULL) == NVRM_MSG_PRINT) +#else // ! NV_PRINTF_STRINGS_ALLOWED -- debug printf strings not enabled +#define DBG_STRING(str) "" +#define DBG_INIT() (NV_TRUE) +#define DBG_DESTROY() +#define DBG_VSNPRINTF(ptr_size_format_and_stuff) +#define DBG_PRINTBUF(dbglevel, buffer, bufsize) +#define DBG_RMMSG_CHECK(level) (0) +#endif // NV_PRINTF_STRINGS_ALLOWED + + + +//****************************************************************************** +// POWER SANITY CHECKS +//****************************************************************************** +// +// Make sure the GPU is in full power or resuming from D3 state. Else, +// bailout from the calling function. An exception for systems, which support +// surprise removal feature. See Bugs 440565, 479003, and 499228.DO NOT IGNORE +// OR REMOVE THIS ASSERT. If you have problems with it, please talk to cplummer. +// +// bAllowWithoutSysmemAccess: Allow this RM Control when sysmem access is not available +// from the GPU. SHould be NV_TRUE only for NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS +// +// On systems supporting surprise removal, if the GPU is in D3 cold +// and still attached we would consider it a true D3 cold state +// and return NOT_FULL_POWER. See bug 1679965. +// +// +#define API_GPU_FULL_POWER_SANITY_CHECK(pGpu, bGpuAccess, bAllowWithoutSysmemAccess) \ + if ((!gpuIsGpuFullPower(pGpu)) && \ + (!(pGpu)->getProperty((pGpu), \ + PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))) \ + { \ + DBG_BREAKPOINT(); \ + if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu))) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } \ + else if (gpuIsSurpriseRemovalSupported(pGpu) && \ + (pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED)) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } \ + } \ + if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu)) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } + +#define API_GPU_FULL_POWER_SANITY_CHECK_OR_GOTO(pGpu, bGpuAccess, bAllowWithoutSysmemAccess, status, tag) \ + if ((!gpuIsGpuFullPower(pGpu)) && \ + (!(pGpu)->getProperty((pGpu), \ + PDB_PROP_GPU_IN_PM_RESUME_CODEPATH))) \ + { \ + DBG_BREAKPOINT(); \ + if (bGpuAccess || (!gpuIsSurpriseRemovalSupported(pGpu))) \ + { \ + status = NV_ERR_GPU_NOT_FULL_POWER; \ + goto tag; \ + } \ + else if (gpuIsSurpriseRemovalSupported(pGpu) && \ + (pGpu)->getProperty((pGpu), PDB_PROP_GPU_IS_CONNECTED)) \ + { \ + status = NV_ERR_GPU_NOT_FULL_POWER; \ + goto tag; \ + } \ + } \ + if (!(bAllowWithoutSysmemAccess) && !gpuCheckSysmemAccess(pGpu)) \ + { \ + return NV_ERR_GPU_NOT_FULL_POWER; \ + } + + +#if defined(PORT_IS_FUNC_SUPPORTED) +#if PORT_IS_FUNC_SUPPORTED(portMemExValidate) +#define DBG_VAL_PTR(p) portMemExValidate(p, NV_TRUE) +#endif +#endif +#ifndef DBG_VAL_PTR +#define DBG_VAL_PTR(p) +#endif + + +//******************************************************************************** +// +// NVRM_TRACE support +// low-overhead runtime state capture +// to enable, define USE_NVRM_TRACE (retail or debug builds) +// +//******************************************************************************** + +#ifdef USE_NVRM_TRACE + +NvU32 NVRM_TRACE_INIT(void); +NvU32 NVRM_TRACE_DISABLE(void); +void NVRM_TRACE_ENABLE(void); +void NVRM_TRACE_DUMP(void); +void NVRM_TRACE(NvU32); +void NVRM_TRACEV(NvU32 *,NvU32); +void NVRM_TRACE1(NvU32); +void NVRM_TRACE2(NvU32, NvU32); +void NVRM_TRACE3(NvU32, NvU32, NvU32); +void NVRM_TRACE4(NvU32, NvU32, NvU32, NvU32); +void NVRM_TRACE5(NvU32, NvU32, NvU32, NvU32, NvU32); + +// versions of reg read/write that log to trace buffer +//NvU32 NVRM_TRACE_REG_RD32(OBJGPU *, NvU32); +//void NVRM_TRACE_REG_WR32(OBJGPU *, NvU32, NvU32); + +// fifolog format looks like: +// 31:28 = unique file number +// 27:4 = file line number +// 1:0 = fifo state bits (bit1 = puller, bit0 = reassign) +#define FIFOLOG(fn,fa,fb) NVRM_TRACE2('FIFO', ((fn << 28) | (__LINE__ << 4) | \ + ((fa & 0x1) ? 1 : 0) << 1 | \ + ((fb & 0x1) ? 1 : 0)) ) + +#else // ! USE_NVRM_TRACE + +#define NVRM_TRACE_INIT() +#define NVRM_TRACE_DISABLE() 0 +#define NVRM_TRACE_ENABLE() +#define NVRM_TRACE_DUMP() +#define NVRM_TRACE(c0) +#define NVRM_TRACE1(c0) +#define NVRM_TRACE2(c0, c1) +#define NVRM_TRACE3(c0, c1, c2) +#define NVRM_TRACE4(c0, c1, c2, c3) +#define NVRM_TRACE5(c0, c1, c2, c3, c4) +#define FIFOLOG(a,b,c) + +#endif // ! USE_NVRM_TRACE + +#define NVRM_TRACE_ERROR(code, status) NVRM_TRACE3('EEEE', (code), (status)) +#define NVRM_TRACE_API(code, p0, p1, p2) NVRM_TRACE5('API ', (code), (p0), (p1), (p2)) + +void nvErrorLog(void *pVoid, NvU32 num, const char *pFormat, va_list arglist); +void nvErrorLog_va(void * pGpu, NvU32 num, const char * pFormat, ...); + +// memory allocation tracking data structs and globals +#define MAX_STACK_LEVEL 6 + +#ifdef __cplusplus +} +#endif + +#endif // _PRINTF_H_ diff --git a/src/nvidia/inc/kernel/core/strict.h b/src/nvidia/inc/kernel/core/strict.h new file mode 100644 index 000000000..d102e45ed --- /dev/null +++ b/src/nvidia/inc/kernel/core/strict.h @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __STRICT_H__ +#define __STRICT_H__ + +// +// RM_STRICT_SUPPRESS_DEPRECATED_DEFINITIONS_VER_XYZ should be set +// before including any RM internal headers when disabling deprecated +// definitions is desired. +// +// For transition during refactoring, we might introduce new types and +// interfaces and use macros/wrappers to forward the old interface to +// the new one. +// +// Once a callsite is migrated to use the new interface it can use RM +// strict to disable the deprecated definitions to prevent changes from +// reintroducing calls to a deprecated interface within a cleansed +// module. +// +// Controlling disablement of deprecated definitions is versioned. This +// enables us to introduce new deprecated interfaces incrementally. +// Example, ModuleA might scrub to versionX (removal of OBJFB defns) but +// not versionY (removal of legacy CLI types). +// +// Flags to turn off deprecated definitions are intended to be +// temporary, once all modules remove references the deprecated +// definitions and knobs in this header should be deleted. +// +#ifdef RM_STRICT_SUPPRESS_DEPRECATED_DEFINITIONS_VER_JAN_21_2020 +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_OBJFB_DEFINITIONS 0 +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS 0 +#endif + +// +// RM_STRICT_SUPPRESS_PHYSICAL_DEFINITIONS_VER_XYZ should be set before +// including any RM internal headers when disabling "physical" definitions is +// desired. +// +// Physical definitions refers to interfaces/types that are only used by GSP-RM +// and VGPU-host, i.e.: not to be used by VGPU Client or GSP Client +// +#ifdef RM_STRICT_SUPPRESS_PHYSICAL_DEFINITIONS_VER_JAN_21_2020 +#define RM_STRICT_CONFIG_EMIT_MEMORY_SYSTEM_DEFINITIONS 0 +#endif + +// +// Default deprecated and "physical engine" definitions on unless specified +// +#ifndef RM_STRICT_CONFIG_EMIT_DEPRECATED_OBJFB_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_OBJFB_DEFINITIONS 1 +#endif + +#ifndef RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS 1 +#endif + +#ifndef RM_STRICT_CONFIG_EMIT_MEMORY_SYSTEM_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_MEMORY_SYSTEM_DEFINITIONS 1 +#endif + +// +// "Physical engine" definitions not yet included in any version, but available +// for T234X. Should be defined to 0 before including any RM internal headers +// when disabling OBJDISP (and related) definitions is desired. +// +#ifndef RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 1 +#endif + +// +// Generate OBJGPU engine accessors (GPU_GET_FOO(pGpu)) for disabled engines. +// These will always return NULL, but will allow the code that references them +// to compile. +// +#ifndef RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS +#define RM_STRICT_CONFIG_EMIT_DISABLED_GPU_ENGINE_ACCESSORS 1 +#endif + +#endif /* __STRICT_H__ */ diff --git a/src/nvidia/inc/kernel/core/system.h b/src/nvidia/inc/kernel/core/system.h new file mode 100644 index 000000000..4e95886b9 --- /dev/null +++ b/src/nvidia/inc/kernel/core/system.h @@ -0,0 +1,3 @@ + +#include "g_system_nvoc.h" + diff --git a/src/nvidia/inc/kernel/core/thread_state.h b/src/nvidia/inc/kernel/core/thread_state.h new file mode 100644 index 000000000..6567989fe --- /dev/null +++ b/src/nvidia/inc/kernel/core/thread_state.h @@ -0,0 +1,217 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef THREAD_STATE_H +#define THREAD_STATE_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Defines and structures used for Thread State management * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "os/os.h" +#include "tls/tls.h" +#include "containers/map.h" +#include "containers/list.h" + +typedef struct OBJGPU OBJGPU; + +// +// Thread State Tracking structures and defines +// +typedef struct THREAD_TIMEOUT_STATE +{ + NvU64 enterTime; + NvU64 nonComputeTime; + NvU64 computeTime; + NvU64 nextCpuYieldTime; + NvU64 overrideTimeoutMsecs; + +} THREAD_TIMEOUT_STATE, *PTHREAD_TIMEOUT_STATE; + +typedef struct THREAD_STATE_FREE_CALLBACK +{ + void *pCbData; + void (*pCb)(void *pCbData); +} THREAD_STATE_FREE_CALLBACK; + +MAKE_LIST(THREAD_STATE_FREE_CB_LIST, THREAD_STATE_FREE_CALLBACK); + +typedef struct THREAD_STATE_NODE THREAD_STATE_NODE; + +struct THREAD_STATE_NODE +{ + OS_THREAD_HANDLE threadId; + /*! + * Thread sequencer id. This is a unique identifier for a given thread + * entry into the RM. This is separate from @ref threadId, as the threadId + * is really the OS's thread handle/pointer. In cases where the same + * physical thread is re-used (e.g. WORK_ITEMs are scheduled from a + * pre-allocated pool of worker threads), different RM threads will have the + * same threadId. + * + * This value is set by @ref threadStateInitXYZ() based off the global @ref + * THREAD_STATE_DB::threadSeqCntr. + */ + NvU32 threadSeqId; + NvBool bValid; + THREAD_TIMEOUT_STATE timeout; + NvU32 cpuNum; + NvU32 flags; + MapNode node; + + /*! + * If a callback is installed, threadStateFree() may block on it. + * + * The installed callbacks will be processed in FIFO order only. + * + * Only supported on non-ISR CPU RM paths. + */ + THREAD_STATE_FREE_CB_LIST cbList; +}; + +MAKE_INTRUSIVE_MAP(ThreadStateNodeMap, THREAD_STATE_NODE, node); + +typedef struct THREAD_STATE_DB_TIMEOUT +{ + NvU64 nonComputeTimeoutMsecs; + NvU64 computeTimeoutMsecs; + NvU32 computeGpuMask; + NvU32 flags; + +} THREAD_STATE_DB_TIMEOUT, *PTHREAD_STATE_DB_TIMEOUT; + +#define THREAD_STATE_TRACE_MAX_ENTRIES 8 + +typedef struct THREAD_STATE_TRACE_ENTRY +{ + NvU64 callerRA; + NvU32 flags; + +} THREAD_STATE_TRACE_ENTRY; + +typedef struct THREAD_STATE_TRACE_INFO +{ + NvU32 index; + THREAD_STATE_TRACE_ENTRY entries[THREAD_STATE_TRACE_MAX_ENTRIES]; + +} THREAD_STATE_TRACE_INFO; + +typedef struct THREAD_STATE_ISR_LOCKLESS +{ + THREAD_STATE_NODE **ppIsrThreadStateGpu; +} THREAD_STATE_ISR_LOCKLESS, *PTHREAD_STATE_ISR_LOCKLESS, **PPTHREAD_STATE_ISR_LOCKLESS; + +typedef struct THREAD_STATE_DB +{ + NvU32 setupFlags; + NvU32 maxCPUs; + /*! + * Thread state sequencer id counter. The last allocated thread state + * sequencer id via @ref threadStateInitXYZ(). + */ + NvU32 threadSeqCntr; + PORT_SPINLOCK *spinlock; + ThreadStateNodeMap dbRoot; + ThreadStateNodeMap dbRootPreempted; + THREAD_STATE_NODE **ppISRDeferredIntHandlerThreadNode; + PTHREAD_STATE_ISR_LOCKLESS pIsrlocklessThreadNode; + THREAD_STATE_DB_TIMEOUT timeout; + THREAD_STATE_TRACE_INFO traceInfo; +} THREAD_STATE_DB, *PTHREAD_STATE_DB; + +// +// This is the same for all OSes. This value was chosen because it is +// the minimum found on any OS at the time of this writing (May, 2008). +// +#define TIMEOUT_DEFAULT_OS_RESCHEDULE_INTERVAL_SECS 2 + +// +// The normal power transition requirement for Windows is 4 seconds. +// Use longer time to let OS fire timeout and ask recovery. +// +#define TIMEOUT_WDDM_POWER_TRANSITION_INTERVAL_MS 9800 + +// +// Thread State flags used for threadStateInitSetupFlags +// +#define THREAD_STATE_SETUP_FLAGS_NONE 0 +#define THREAD_STATE_SETUP_FLAGS_ENABLED NVBIT(0) +#define THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED NVBIT(1) +#define THREAD_STATE_SETUP_FLAGS_SLI_LOGIC_ENABLED NVBIT(2) +#define THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED NVBIT(3) +#define THREAD_STATE_SETUP_FLAGS_ASSERT_ON_TIMEOUT_ENABLED NVBIT(4) +#define THREAD_STATE_SETUP_FLAGS_ASSERT_ON_FAILED_LOOKUP_ENABLED NVBIT(5) +#define THREAD_STATE_SETUP_FLAGS_RESET_ON_TIMEOUT_ENABLED NVBIT(6) +#define THREAD_STATE_SETUP_FLAGS_DO_NOT_INCLUDE_SLEEP_TIME_ENABLED NVBIT(7) +#define THREAD_STATE_SETUP_FLAGS_PRINT_INFO_ENABLED NVBIT(31) + +// +// Thread State flags used for threadState[Init,Free] +// +#define THREAD_STATE_FLAGS_NONE 0 +#define THREAD_STATE_FLAGS_IS_ISR NVBIT(0) +#define THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER NVBIT(1) +#define THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER NVBIT(2) +#define THREAD_STATE_FLAGS_IS_ISR_LOCKLESS NVBIT(3) +#define THREAD_STATE_FLAGS_TIMEOUT_INITED NVBIT(5) +#define THREAD_STATE_FLAGS_PLACED_ON_PREEMPT_LIST NVBIT(6) +#define THREAD_STATE_FLAGS_DEVICE_INIT NVBIT(7) +#define THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED NVBIT(8) + +// These Threads run exclusively between a conditional acquire +#define THREAD_STATE_FLAGS_EXCLUSIVE_RUNNING (THREAD_STATE_FLAGS_IS_ISR | \ + THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER | \ + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER) + +#define THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING (THREAD_STATE_FLAGS_IS_ISR_DEFERRED_INT_HANDLER | \ + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER) + +NV_STATUS threadStateGlobalAlloc(void); +void threadStateGlobalFree(void); +void threadStateInitRegistryOverrides(OBJGPU *pGpu); +void threadStateInitSetupFlags(NvU32 flags); +NvU32 threadStateGetSetupFlags(void); + +void threadStateInitISRLockless(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateFreeISRLockless(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateInitISRAndDeferredIntHandler(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateFreeISRAndDeferredIntHandler(THREAD_STATE_NODE *, OBJGPU*, NvU32); +void threadStateInit(THREAD_STATE_NODE *pThreadNode, NvU32 flags); +void threadStateFree(THREAD_STATE_NODE *pThreadNode, NvU32 flags); + +NV_STATUS threadStateGetCurrent(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu); +NV_STATUS threadStateGetCurrentUnchecked(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu); +NV_STATUS threadStateInitTimeout(OBJGPU *pGpu, NvU32 timeoutUs, NvU32 flags); +NV_STATUS threadStateCheckTimeout(OBJGPU *pGpu, NvU64 *pElapsedTimeUs); +NV_STATUS threadStateResetTimeout(OBJGPU *pGpu); +void threadStateLogTimeout(OBJGPU *pGpu, NvU64 funcAddr, NvU32 lineNum); +void threadStateYieldCpuIfNecessary(OBJGPU *pGpu); +void threadStateSetTimeoutOverride(THREAD_STATE_NODE *, NvU64); + +NV_STATUS threadStateEnqueueCallbackOnFree(THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback); +void threadStateRemoveCallbackOnFree(THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback); +#endif // THREAD_STATE_H diff --git a/src/nvidia/inc/kernel/diagnostics/gpu_acct.h b/src/nvidia/inc/kernel/diagnostics/gpu_acct.h new file mode 100644 index 000000000..19855b1de --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/gpu_acct.h @@ -0,0 +1,3 @@ + +#include "g_gpu_acct_nvoc.h" + diff --git a/src/nvidia/inc/kernel/diagnostics/journal.h b/src/nvidia/inc/kernel/diagnostics/journal.h new file mode 100644 index 000000000..d19db84f4 --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/journal.h @@ -0,0 +1,3 @@ + +#include "g_journal_nvoc.h" + diff --git a/src/nvidia/inc/kernel/diagnostics/journal_structs.h b/src/nvidia/inc/kernel/diagnostics/journal_structs.h new file mode 100644 index 000000000..c435c8c20 --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/journal_structs.h @@ -0,0 +1,53 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef JOURNAL_STRUCTS_H +#define JOURNAL_STRUCTS_H 1 + +#include "nvcd.h" +#include "rmcd.h" + +// Meta Data to Describe an error block +typedef struct RMCD_ERROR_BLOCK { + NvU8 * pBlock; + NvU32 blockSize; + struct RMCD_ERROR_BLOCK * pNext; +} RMCD_ERROR_BLOCK; + +typedef struct RMERRORHEADER { + struct RMFIFOERRORELEMENT_V3 *pNextError; + RMCD_ERROR_BLOCK *pErrorBlock; + NvU32 GPUTag; + NvU32 ErrorNumber; +} RMERRORHEADER; + +typedef struct { + RMERRORHEADER ErrorHeader; + RmPrbInfo_RECORD_V2 RmPrbErrorData; +} RMPRBERRORELEMENT_V2; + +typedef struct RMFIFOERRORELEMENT_V3 { + RMERRORHEADER ErrorHeader; +} RMFIFOERRORELEMENT_V3; + +#endif /* ifndef JOURNAL_STRUCTS_H */ diff --git a/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h b/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h new file mode 100644 index 000000000..1a09575ed --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/nv_debug_dump.h @@ -0,0 +1,3 @@ + +#include "g_nv_debug_dump_nvoc.h" + diff --git a/src/nvidia/inc/kernel/diagnostics/profiler.h b/src/nvidia/inc/kernel/diagnostics/profiler.h new file mode 100644 index 000000000..26def56ee --- /dev/null +++ b/src/nvidia/inc/kernel/diagnostics/profiler.h @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _PROFILER_H_ +#define _PROFILER_H_ + +/*! + * @file profiler.h + * @brief Simple API to measure elapsed times in RM for profiling and statistics + * + * The primary goals of this API are to: + * 1. Be lightweight and have little-to-no setup required (built into release drivers) + * 2. Defer as much analysis as possible to the user of the data (keep it simple) + * 3. Provide sub-millisecond resolution if possible (medium-high granularity) + * + * This is intended mainly for coarse measurements of time-critical software + * sequences, such as GC6. For example, the measurements could be used to catch + * major latency regressions in a particular timing module. + * + * For more sophisticated profiling (e.g. for prospective analysis), use of an + * external profiling tool (e.g. xperf with ETW) is recommended instead. + */ + +#include "core/core.h" + +/*! + * Record containing the statistics of a single time module to be profiled + * periodically. + * + * This tracks the min/max elapsed time over all the measurement + * cycles, as well as the total elapsed time and number of cycles. + * To calculate the average elapsed time per cycle, divide total_ns by count. + * + * 64-bit precision integers are used to hold nanosecond resolution + * over long periods of time (e.g. greater than 4 seconds). + */ +typedef struct +{ + NvU64 count; //gpuId, data, RMTRACE_FUNC_PROG_ENTER); +// ... +// RMTRACE_MARKER_PROBE("About To Enter XXX", pGpu->gpuId, data, RMTRACE_FUNC_PROG_STEP); +// XXX(); +// ... +// BBB(); +// ... +// RMTRACE_MARKER_PROBE("AAA Function", pGpu->gpuId, data, RMTRACE_FUNC_PROG_EXIT); +// } +// +// +// void BBB() +// { +// RMTRACE_MARKER_PROBE("BBB Function", pGpu->gpuId, data, RMTRACE_FUNC_PROG_ENTER); +// ... +// CCC(); +// ... +// RMTRACE_MARKER_PROBE("BBB Function", pGpu->gpuId, data, RMTRACE_FUNC_PROG_EXIT); +// } +// +// With a tool (like EtwTool), we can generate below message automatically +// +// AAA Function (Enter) +// (0.1234ms) +// About to Enter XXX +// (0.0012ms) +// BBB Function (Enter) +// BBB Function (Leave) - 0.23ms +// AAA Function (Leave) -- 0.4111ms +// + +#define RMTRACE_FUNC_PROG_ENTER 0x0000 +#define RMTRACE_FUNC_PROG_EXIT 0x00FF +#define RMTRACE_FUNC_PROG_STEP 0x007F +#define RMTRACE_UNKNOWN_GPUID 0xFFFFFFFF +#define RMTRACE_UNUSED_PARAM 0 +#define RMTRACE_MAX_PRINT_BUFFER_SIZE 128 + +// +// Empty macros +// + +#define RMTRACE_INIT_NEW() +#define RMTRACE_DESTROY_NEW() +#define RMTRACE_SET_PTIMER_LOG(enable) +#define RMTRACE_IS_PTIMER_LOG_ENABLED() \ + NV_FALSE +#define RMTRACE_RMAPI(id, cmd) +#define RMTRACE_RMLOCK(id) +#define RMTRACE_DISP1(id, gpuId, param1) +#define RMTRACE_DISP2(id, gpuId, param1, param2) +#define RMTRACE_DISP3(id, gpuId, param1, param2, param3) +#define RMTRACE_DISP4(id, gpuId, param1, param2, param3, param4) +#define RMTRACE_DISP5(id, gpuId, param1, param2, param3, param4, param5) +#define RMTRACE_DISP6(id, gpuId, param1, param2, param3, param4, param5, param6) +#define RMTRACE_DISP_EDID(gpuId, publicId, connectedId, data, size) +#define RMTRACE_DISP_BRIGHTNESS_ENTRY(dispId, flags, blType, pwmInfoProvider, pwmInfoEntries, SBEnable, lmnProvider, lmnEntryCount, blPwmInfoSize, blPwmInfo) +#define RMTRACE_DISP_ERROR(id, gpuId, param1, param2, status) +#define RMTRACE_DISP_EXCEPTION(gpuId, param1, param2, param3, param4, param5) +#define RMTRACE_GPIO(id, _function, _state, _gpioPin, param) +#define RMTRACE_GPIO_LIST(id, count, list) +#define RMTRACE_I2C(id, gpuId, portId, address, indexSize, pIndex, dataSize, pData, status) +#define RMTRACE_I2C_SET_ACQUIRED(gpuId, portId, acquirer, status, curTime) +#define RMTRACE_I2C_ENUM_PORTS(gpuId, count, ports) +#define RMTRACE_GPU(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_RMJOURNAL(id, gpuId, type, group, key, count, firstTime, lastTime) +#define RMTRACE_POWER(id, gpuId, state, head, forcePerf, fastBootPowerState) +#define RMTRACE_PERF(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_THERM2(id, gpuId, param1, param2) +#define RMTRACE_THERM3(id, gpuId, param1, param2, param3) +#define RMTRACE_THERM6(id, gpuId, param1, param2, param3, param4, param5, param6) +#define RMTRACE_TIMEOUT(id, gpuId) +#define RMTRACE_VBIOS(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_VBIOS_ERROR(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_NVLOG(id, pData, dataSize) +#define RMTRACE_SBIOS(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_USBC0(id, gpuId) +#define RMTRACE_USBC1(id, gpuId, param1) +#define RMTRACE_USBC2(id, gpuId, param1, param2) +#define RMTRACE_USBC7(id, gpuId, param1, param2, param3, param4, param5, param6, param7) +#define RMTRACE_RMGENERAL(id, param1, param2, param3) +#define RMTRACE_NVTELEMETRY(id, gpuId, param1, param2, param3) +#define RMTRACE_NOCAT(id, gpuId, type, group, key, count, timeStamp) +#define RMTRACE_PRINT + + +#ifndef RMTRACE_FLAG_ENABLED +#define RMTRACE_FLAG_ENABLED (0) +#endif + +// +// Empty macros +// +#define RMTRACE_INIT() +#define RMTRACE_DESTROY() +#define RMTRACE_ENABLE(eventEventMask) +#define RMTRACE_PROBE(module, event) + +#define RMTRACE_PROBE1(module, event, dataType, data, dataSize) + +#define RMTRACE_PROBE2(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2) + +#define RMTRACE_PROBE3(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3) + +#define RMTRACE_PROBE4(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4) + +#define RMTRACE_PROBE5(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5) + +#define RMTRACE_PROBE6(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5, dataType6, data6, dataSize6) + +#define RMTRACE_PROBE7(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5, dataType6, data6, dataSize6, \ + dataType7, data7, dataSize7) +#define RMTRACE_PROBE10(module, event, dataType1, data1, dataSize1, dataType2, data2, dataSize2, \ + dataType3, data3, dataSize3, dataType4, data4, dataSize4, \ + dataType5, data5, dataSize5, dataType6, data6, dataSize6, \ + dataType7, data7, dataSize7, dataType8, data8, dataSize8, \ + dataType9, data9, dataSize9, dataType10, data10, dataSize10) +#define RMTRACE_PROBE2_PRIMTYPE(module, event, type0, val0, type1, val1) +#define RMTRACE_PROBE3_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2) +#define RMTRACE_PROBE4_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3) +#define RMTRACE_PROBE5_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3, \ + type4, val4) +#define RMTRACE_PROBE7_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3, \ + type4, val4, type5, val5, type6, val6) +#define RMTRACE_PROBE10_PRIMTYPE(module, event, type0, val0, type1, val1, type2, val2, type3, val3, \ + type4, val4, type5, val5, type6, val6, type7, val7, type8, val8, \ + type9, val9) +#define RMTRACE_MARKER_PROBE(name, gpuId, payload, id) + + +#endif /* TRACER_H */ diff --git a/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h b/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h new file mode 100644 index 000000000..30d9aaf83 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/audio/hda_codec_api.h @@ -0,0 +1,3 @@ + +#include "g_hda_codec_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/bif/kernel_bif.h b/src/nvidia/inc/kernel/gpu/bif/kernel_bif.h new file mode 100644 index 000000000..379dc4126 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/bif/kernel_bif.h @@ -0,0 +1,3 @@ + +#include "g_kernel_bif_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/bus/kern_bus.h b/src/nvidia/inc/kernel/gpu/bus/kern_bus.h new file mode 100644 index 000000000..afb7d426f --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/bus/kern_bus.h @@ -0,0 +1,3 @@ + +#include "g_kern_bus_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/bus/p2p_api.h b/src/nvidia/inc/kernel/gpu/bus/p2p_api.h new file mode 100644 index 000000000..08ba38f1d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/bus/p2p_api.h @@ -0,0 +1,3 @@ + +#include "g_p2p_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/bus/third_party_p2p.h b/src/nvidia/inc/kernel/gpu/bus/third_party_p2p.h new file mode 100644 index 000000000..14b2dd0db --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/bus/third_party_p2p.h @@ -0,0 +1,3 @@ + +#include "g_third_party_p2p_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/ce/kernel_ce.h b/src/nvidia/inc/kernel/gpu/ce/kernel_ce.h new file mode 100644 index 000000000..d1452d6cd --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/ce/kernel_ce.h @@ -0,0 +1,3 @@ + +#include "g_kernel_ce_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/ce/kernel_ce_context.h b/src/nvidia/inc/kernel/gpu/ce/kernel_ce_context.h new file mode 100644 index 000000000..46ebd2de6 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/ce/kernel_ce_context.h @@ -0,0 +1,3 @@ + +#include "g_kernel_ce_context_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/ce/kernel_ce_gv100_private.h b/src/nvidia/inc/kernel/gpu/ce/kernel_ce_gv100_private.h new file mode 100644 index 000000000..0136475a1 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/ce/kernel_ce_gv100_private.h @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERNEL_CE_GV100_PRIVATE_H +#define KERNEL_CE_GV100_PRIVATE_H + +#define MAX_CE_CNT 15 + +/* + * sysmemLinks + * Represents the number of sysmem links detected + * This affects how many PCEs LCE0(sysmem read CE) + * and LCE1(sysmem write CE) should be mapped to + * maxLinksPerPeer + * Represents the maximum number of peer links + * between this GPU and all its peers. This affects + * how many PCEs LCE3(P2P CE) should be mapped to + * numPeers + * Represents the number of Peer GPUs discovered so far + * bSymmetric + * Represents whether the topology detected so far + * is symmetric i.e. has same number of links to all + * peers connected through nvlink. This affects how + * many PCEs to assign to LCEs3-5 (nvlink P2P CEs) + * bSwitchConfig + * Represents whether the config listed is intended + * for use with nvswitch systems + * pceLceMap + * Value of NV_CE_PCE2LCE_CONFIG0 register with the + * above values for sysmemLinks, maxLinksPerPeer, + * numLinks and bSymmetric + * grceConfig + * Value of NV_CE_GRCE_CONFIG register with the + * above values for sysmemLinks, maxLinksPerPeer, + * numLinks and bSymmetric + * exposeCeMask + * Mask of CEs to expose to clients for the above + * above values for sysmemLinks, maxLinksPerPeer, + * numLinks and bSymmetric + */ +typedef struct NVLINK_CE_AUTO_CONFIG_TABLE +{ + NvU32 sysmemLinks; + NvU32 maxLinksPerPeer; + NvU32 numPeers; + NvBool bSymmetric; + NvBool bSwitchConfig; + NvU32 pceLceMap[MAX_CE_CNT]; + NvU32 grceConfig[MAX_CE_CNT]; + NvU32 exposeCeMask; +} NVLINK_CE_AUTO_CONFIG_TABLE; + +// +// General convention decided on between HW and SW: +// - CE2 is for SYSMEM reads +// - CE3 is for SYSMEM writes +// - CE4-8 are for P2P +// +#define NVLINK_SYSMEM_READ_LCE 2 +#define NVLINK_SYSMEM_WRITE_LCE 3 +#define NVLINK_MIN_P2P_LCE 4 +#define NVLINK_MAX_P2P_LCE 8 + +#endif diff --git a/src/nvidia/inc/kernel/gpu/ce/kernel_ce_private.h b/src/nvidia/inc/kernel/gpu/ce/kernel_ce_private.h new file mode 100644 index 000000000..fb74ea0da --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/ce/kernel_ce_private.h @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERNEL_CE_PRIVATE_H +#define KERNEL_CE_PRIVATE_H + +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +#define CE_GET_LOWEST_AVAILABLE_IDX(x) portUtilCountTrailingZeros32(x) + +/*! + * @brief Obtain relative CE index. + * + * @param localEngType NV2080_ENGINE_TYPE_ for this CE, or partition-local engine type. + * @param ceIdx CE index in 0..GPU_MAX_CES-1 + * + * @return NV_OK if the conversion is successful. + */ +static NV_INLINE +NV_STATUS ceIndexFromType(OBJGPU *pGpu, NvHandle hClient, NvU32 localEngType, NvU32 *ceIdx) +{ + // + // If MIG is enabled, client passes a logical engineId w.r.t its own partition + // we need to convert this logical Id to a physical engine Id as we use it + // to set runlistId + // + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, &ref)); + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + localEngType, + ceIdx)); + } + else + { + *ceIdx = localEngType; + } + + if (!NV2080_ENGINE_TYPE_IS_COPY(*ceIdx)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *ceIdx = NV2080_ENGINE_TYPE_COPY_IDX(*ceIdx); + return NV_OK; +} + +#endif // KERNEL_CE_PRIVATE_H diff --git a/src/nvidia/inc/kernel/gpu/ce/kernel_ce_shared.h b/src/nvidia/inc/kernel/gpu/ce/kernel_ce_shared.h new file mode 100644 index 000000000..fdabac4af --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/ce/kernel_ce_shared.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERNEL_CE_SHARED_H +#define KERNEL_CE_SHARED_H + +#include "gpu/gpu.h" + +/** + * Routines shared between CE and KCE. + */ + +NvBool ceIsCeGrce(OBJGPU *pGpu, NvU32 ceEngineType); +NvU32 ceCountGrCe(OBJGPU *pGpu); + +#endif diff --git a/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h b/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h new file mode 100644 index 000000000..ad600f302 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/dce_client/dce_client.h @@ -0,0 +1,3 @@ + +#include "g_dce_client_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/deferred_api.h b/src/nvidia/inc/kernel/gpu/deferred_api.h new file mode 100644 index 000000000..43d23712d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/deferred_api.h @@ -0,0 +1,3 @@ + +#include "g_deferred_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/device/device.h b/src/nvidia/inc/kernel/gpu/device/device.h new file mode 100644 index 000000000..76faf3c17 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/device/device.h @@ -0,0 +1,3 @@ + +#include "g_device_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h b/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h new file mode 100644 index 000000000..eee436cb3 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/disp_capabilities.h @@ -0,0 +1,3 @@ + +#include "g_disp_capabilities_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/disp_channel.h b/src/nvidia/inc/kernel/gpu/disp/disp_channel.h new file mode 100644 index 000000000..146dfbbf4 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/disp_channel.h @@ -0,0 +1,3 @@ + +#include "g_disp_channel_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/disp_objs.h b/src/nvidia/inc/kernel/gpu/disp/disp_objs.h new file mode 100644 index 000000000..6dde0572b --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/disp_objs.h @@ -0,0 +1,3 @@ + +#include "g_disp_objs_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h b/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h new file mode 100644 index 000000000..5addedc69 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/disp_sf_user.h @@ -0,0 +1,3 @@ + +#include "g_disp_sf_user_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h b/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h new file mode 100644 index 000000000..60fbcb0da --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/head/kernel_head.h @@ -0,0 +1,3 @@ + +#include "g_kernel_head_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h b/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h new file mode 100644 index 000000000..94c9909e9 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/inst_mem/disp_inst_mem.h @@ -0,0 +1,3 @@ + +#include "g_disp_inst_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/kern_disp.h b/src/nvidia/inc/kernel/gpu/disp/kern_disp.h new file mode 100644 index 000000000..25be380e4 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/kern_disp.h @@ -0,0 +1,3 @@ + +#include "g_kern_disp_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h b/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h new file mode 100644 index 000000000..649c97976 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/kern_disp_max.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERN_DISP_MAX_H +#define KERN_DISP_MAX_H + +/*! + * @brief Defines max values used for the KernelDisplay Engine Object, + * including values shared by OBJDISP code. + */ + +#define OBJ_MAX_HEADS 4 +#define MAX_RG_LINE_CALLBACKS_PER_HEAD 2 +#define OBJ_MAX_DFPS 31 + +#endif // KERN_DISP_MAX_H diff --git a/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h b/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h new file mode 100644 index 000000000..0497469eb --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/kern_disp_type.h @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERN_DISP_TYPE_H +#define KERN_DISP_TYPE_H + +/****************************************************************************** +* +* Defines display type enums that can be used in the KernelDisplay object. +* +******************************************************************************/ + +typedef enum +{ + dispChnClass_Curs, + dispChnClass_Ovim, + dispChnClass_Base, + dispChnClass_Core, + dispChnClass_Ovly, + dispChnClass_Winim, + dispChnClass_Win, + dispChnClass_Supported +} DISPCHNCLASS; + +enum DISPLAY_ICC_BW_CLIENT +{ + DISPLAY_ICC_BW_CLIENT_RM, + DISPLAY_ICC_BW_CLIENT_EXT, // DD or MODS + NUM_DISPLAY_ICC_BW_CLIENTS +}; + +#endif // #ifndef KERN_DISP_TYPE_H diff --git a/src/nvidia/inc/kernel/gpu/disp/rg_line_callback/rg_line_callback.h b/src/nvidia/inc/kernel/gpu/disp/rg_line_callback/rg_line_callback.h new file mode 100644 index 000000000..58ed78596 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/rg_line_callback/rg_line_callback.h @@ -0,0 +1,3 @@ + +#include "g_rg_line_callback_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h b/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h new file mode 100644 index 000000000..0634ce89f --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef VBLANK_H +#define VBLANK_H + +#include "gpu/gpu.h" +/* ------------------------ Types definitions ------------------------------ */ +/*! + * Callback function prototype + */ +typedef NV_STATUS (*VBLANKCALLBACKPROC)(OBJGPU*, void *, NvU32, NvU32, NV_STATUS); + +typedef struct VBLANKCALLBACK +{ + VBLANKCALLBACKPROC Proc; + void *pObject; + NvBool bObjectIsChannelDescendant; + NvU32 Param1; + NvU32 Param2; + NvU32 VBlankCount; + NvU32 VBlankOffset; + NvU64 TimeStamp; + NvU32 MC_CallbackFlag; + NvU32 Flags; + NV_STATUS Status; + struct VBLANKCALLBACK *Next; + NvBool bImmediateCallback; + NvBool bIsVblankNotifyEnable; +}VBLANKCALLBACK; + +/* ------------------------ Macros & Defines ------------------------------- */ + +/*! + * Callback function registration flags + */ +#define VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT 0x00000001 +#define VBLANK_CALLBACK_FLAG_COMPLETE_ON_OBJECT_CLEANUP 0x00000002 +#define VBLANK_CALLBACK_FLAG_PERSISTENT 0x00000004 +#define VBLANK_CALLBACK_FLAG_SPECIFIED_TIMESTAMP 0x00000010 +#define VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT 0x00000020 // Explicit request for the next vblank. +#define VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET 0x00000040 // Explicit request for the vblank offset from the current one +#define VBLANK_CALLBACK_FLAG_PROMOTE_TO_FRONT 0x00000080 // Promotes to being 'first', while still honoring VBlankCount +#define VBLANK_CALLBACK_FLAG_RELEASES_SEMAPHORE 0x00000100 // A flag for deadlock detection to check if this callback could release a semaphore +#define VBLANK_CALLBACK_FLAG_GUARANTEE_SAFETY 0x00000200 // This callback absolutely needs to run during vertical blank, even if it runs late as a consequence. +#define VBLANK_CALLBACK_FLAG_LOW_LATENCY__ISR_ONLY 0x08000000 // This means always process during ISR (never DPC.) Be careful! +#define VBLANK_CALLBACK_FLAG_LOW_LATENCY 0x10000000 // This now means ASAP, which could be ISR or DPC, depending on which happens first +#define VBLANK_CALLBACK_FLAG_MC_EXECUTE_ONCE 0x40000000 // A special flag for MultiChip configurations to have the callback execute only once +#define VBLANK_CALLBACK_FLAG_USER 0x80000000 + +/*! + * A little macro help for the CALLBACK_FLAG_MC_EXECUTE_ONCE flag above + */ +#define VBLANK_CALLBACK_EXECUTE_ONCE(x) (x & VBLANK_CALLBACK_FLAG_MC_EXECUTE_ONCE) + +/*! + * VBlank Service info gathering keep-alive in seconds. This value is the number of seconds the vblank service will run after a client request vblank info. + */ +#define VBLANK_INFO_GATHER_KEEPALIVE_SECONDS (5) + +/*! + * VBLANK SERVICE RELATED + * VBlank Service callback processing flags + * These two flags describe when to process the queues + */ + +#define VBLANK_STATE_PROCESS_NORMAL (0x00000000) // Process the requested queues if associated vblank interrupt is pending +#define VBLANK_STATE_PROCESS_IMMEDIATE (0x00000001) // Process the requested queues now, regardless of any vblank interrupt pending state + +/*! + * These three flags describe which queues to process + */ +#define VBLANK_STATE_PROCESS_LOW_LATENCY (0x00000002) // Process the low-latency vblank callback queue +#define VBLANK_STATE_PROCESS_NORMAL_LATENCY (0x00000004) // Process the normal-latency vblank callback queue + +#define VBLANK_STATE_PROCESS_ALL_CALLBACKS (VBLANK_STATE_PROCESS_LOW_LATENCY|VBLANK_STATE_PROCESS_NORMAL_LATENCY) // Process all callback (high and low latency) queues + +#define VBLANK_STATE_PROCESS_CALLBACKS_ONLY (0x00000008) // Process only the callback queue(s) and nothing else + +/*! + * set when called from an ISR; if VBlank() is in an ISR and there is + * more work to do, then VBlank() will not clear the pending bit + */ +#define VBLANK_STATE_PROCESS_CALLED_FROM_ISR (0x00000010) +#define VBLANK_STATE_PROCESS_CALLED_FROM_DPC (0x00000020) + +/*! Vblank Interrupt state */ +#define NV_HEAD_VBLANK_INTR_UNAVAILABLE (0x00000000) +#define NV_HEAD_VBLANK_INTR_AVAILABLE (0x00000001) +#define NV_HEAD_VBLANK_INTR_ENABLED (0x00000002) + +#endif // VBLANK_H diff --git a/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank_callback.h b/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank_callback.h new file mode 100644 index 000000000..205139417 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/disp/vblank_callback/vblank_callback.h @@ -0,0 +1,3 @@ + +#include "g_vblank_callback_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/eng_desc.h b/src/nvidia/inc/kernel/gpu/eng_desc.h new file mode 100644 index 000000000..bce623dc4 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/eng_desc.h @@ -0,0 +1,3 @@ + +#include "g_eng_desc_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/eng_state.h b/src/nvidia/inc/kernel/gpu/eng_state.h new file mode 100644 index 000000000..9f732f615 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/eng_state.h @@ -0,0 +1,3 @@ + +#include "g_eng_state_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/falcon/falcon_common.h b/src/nvidia/inc/kernel/gpu/falcon/falcon_common.h new file mode 100644 index 000000000..63b5ca42e --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/falcon/falcon_common.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-202 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef FALCON_COMMON_H +#define FALCON_COMMON_H + +/*! + * Alignment/granularity of falcon memory blocks + */ +#define FLCN_BLK_ALIGNMENT (256) + +/*! + * Address/offset alignment required for falcon IMEM accesses + */ +#define FLCN_IMEM_ACCESS_ALIGNMENT (4) + +/*! + * Address/offset alignment required for falcon DMEM accesses + */ +#define FLCN_DMEM_ACCESS_ALIGNMENT (4) + +/*! + * Falcon IMEM block-size (as a power-of-2) + */ +#define FALCON_IMEM_BLKSIZE2 (8) + +/*! + * Falcon DMEM block-size (as a power-of-2) + */ +#define FALCON_DMEM_BLKSIZE2 (8) + +/*! + * Denotes invalid/absent VA for falcon ucode loading + */ +#define FLCN_DMEM_VA_INVALID 0xffffffff + +/*! + * Default Falcon context buffer size + */ +#define FLCN_CTX_ENG_BUFFER_SIZE_HW 4096 + +#endif // FALCON_COMMON_H diff --git a/src/nvidia/inc/kernel/gpu/falcon/kernel_falcon.h b/src/nvidia/inc/kernel/gpu/falcon/kernel_falcon.h new file mode 100644 index 000000000..dc6ee7f32 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/falcon/kernel_falcon.h @@ -0,0 +1,3 @@ + +#include "g_kernel_falcon_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/fifo/channel_descendant.h b/src/nvidia/inc/kernel/gpu/fifo/channel_descendant.h new file mode 100644 index 000000000..09c3d1943 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/fifo/channel_descendant.h @@ -0,0 +1,3 @@ + +#include "g_channel_descendant_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/fifo/kernel_channel.h b/src/nvidia/inc/kernel/gpu/fifo/kernel_channel.h new file mode 100644 index 000000000..296103bdf --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/fifo/kernel_channel.h @@ -0,0 +1,3 @@ + +#include "g_kernel_channel_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/fifo/kernel_channel_group.h b/src/nvidia/inc/kernel/gpu/fifo/kernel_channel_group.h new file mode 100644 index 000000000..d1b3a01ac --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/fifo/kernel_channel_group.h @@ -0,0 +1,3 @@ + +#include "g_kernel_channel_group_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/fifo/kernel_channel_group_api.h b/src/nvidia/inc/kernel/gpu/fifo/kernel_channel_group_api.h new file mode 100644 index 000000000..ac6b42867 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/fifo/kernel_channel_group_api.h @@ -0,0 +1,3 @@ + +#include "g_kernel_channel_group_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/fifo/kernel_ctxshare.h b/src/nvidia/inc/kernel/gpu/fifo/kernel_ctxshare.h new file mode 100644 index 000000000..69c93f5a4 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/fifo/kernel_ctxshare.h @@ -0,0 +1,3 @@ + +#include "g_kernel_ctxshare_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/fifo/kernel_fifo.h b/src/nvidia/inc/kernel/gpu/fifo/kernel_fifo.h new file mode 100644 index 000000000..27fedbd46 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/fifo/kernel_fifo.h @@ -0,0 +1,3 @@ + +#include "g_kernel_fifo_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/fifo/kernel_sched_mgr.h b/src/nvidia/inc/kernel/gpu/fifo/kernel_sched_mgr.h new file mode 100644 index 000000000..e31a1111b --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/fifo/kernel_sched_mgr.h @@ -0,0 +1,3 @@ + +#include "g_kernel_sched_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/fifo/usermode_api.h b/src/nvidia/inc/kernel/gpu/fifo/usermode_api.h new file mode 100644 index 000000000..cac7af8af --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/fifo/usermode_api.h @@ -0,0 +1,3 @@ + +#include "g_usermode_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/fifo/uvm_channel_retainer.h b/src/nvidia/inc/kernel/gpu/fifo/uvm_channel_retainer.h new file mode 100644 index 000000000..c2c145a77 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/fifo/uvm_channel_retainer.h @@ -0,0 +1,3 @@ + +#include "g_uvm_channel_retainer_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gpu.h b/src/nvidia/inc/kernel/gpu/gpu.h new file mode 100644 index 000000000..29fdb1894 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu.h @@ -0,0 +1,3 @@ + +#include "g_gpu_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gpu_access.h b/src/nvidia/inc/kernel/gpu/gpu_access.h new file mode 100644 index 000000000..127c38a2e --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_access.h @@ -0,0 +1,381 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_ACCESS_H_ +#define _GPU_ACCESS_H_ + +#include "ioaccess/ioaccess.h" +#include "gpu/gpu_device_mapping.h" + +// Go straight at the memory or hardware. +#define MEM_RD08(a) (*(const volatile NvU8 *)(a)) +#define MEM_RD16(a) (*(const volatile NvU16 *)(a)) +#define MEM_RD32(a) (*(const volatile NvU32 *)(a)) +#define MEM_WR08(a, d) do { *(volatile NvU8 *)(a) = (d); } while (0) +#define MEM_WR16(a, d) do { *(volatile NvU16 *)(a) = (d); } while (0) +#define MEM_WR32(a, d) do { *(volatile NvU32 *)(a) = (d); } while (0) +#define MEM_WR64(a, d) do { *(volatile NvU64 *)(a) = (d); } while (0) + +// +// Define the signature of the register filter callback function +// +// flags can be optionally used for filters to decide whether to actually +// touch HW or not. flags should be OR'ed every time a new filter is found. (see objgpu.c) +// +typedef void (*GpuWriteRegCallback)(OBJGPU *, void *, NvU32 addr, NvU32 val, NvU32 accessSize, NvU32 flags); +typedef NvU32 (*GpuReadRegCallback)(OBJGPU *, void *, NvU32 addr, NvU32 accessSize, NvU32 flags); + +union GPUHWREG +{ + volatile NvV8 Reg008[1]; + volatile NvV16 Reg016[1]; + volatile NvV32 Reg032[1]; +}; + +typedef union GPUHWREG GPUHWREG; + +// +// Register filter record +// +// If REGISTER_FILTER_FLAGS_READ is set, then that means that the base RegRead +// function will not read the register, so the provided read callback function +// is expected to read the register and return the value. +// +// If REGISTER_FILTER_FLAGS_WRITE is set, then that means that the base RegWrite +// function will not write the register, so the provided callback write function +// is expected to write the given value to the register. +// +// It is an error to specify REGISTER_FILTER_FLAGS_READ and not provide a +// read callback function. +// +// It is an error to specify REGISTER_FILTER_FLAGS_WRITE and not provide a +// write callback function. +// +#define REGISTER_FILTER_FLAGS_READ (NVBIT(0)) +#define REGISTER_FILTER_FLAGS_WRITE (NVBIT(1)) +// filter is in the list but it is invalid and should be removed +#define REGISTER_FILTER_FLAGS_INVALID (NVBIT(2)) + +#define REGISTER_FILTER_FLAGS_VIRTUAL (0) +#define REGISTER_FILTER_FLAGS_READ_WRITE (REGISTER_FILTER_FLAGS_READ | REGISTER_FILTER_FLAGS_WRITE) + +typedef struct REGISTER_FILTER REGISTER_FILTER; + +struct REGISTER_FILTER +{ + REGISTER_FILTER *pNext; //!< pointer to next filter + NvU32 flags; //!< attributes of this filter + DEVICE_INDEX devIndex; //!< filter device + NvU32 devInstance; //!< filter device instance + NvU32 rangeStart; //!< filter range start (can overlap) + NvU32 rangeEnd; //!< filter range end (can overlap) + GpuWriteRegCallback pWriteCallback; //!< callback for write + GpuReadRegCallback pReadCallback; //!< callback for read + void *pParam; //!< pointer to param which gets passed to callbacks +}; + +typedef struct { + REGISTER_FILTER *pRegFilterList; // Active filters + REGISTER_FILTER *pRegFilterRecycleList; // Inactive filters + PORT_SPINLOCK * pRegFilterLock; // Thread-safe list management + NvU32 regFilterRefCnt; // Thread-safe list management + NvBool bRegFilterNeedRemove; // Thread-safe list garbage collection +} DEVICE_REGFILTER_INFO; + +typedef struct DEVICE_MAPPING +{ + GPUHWREG *gpuNvAddr; // CPU Virtual Address + RmPhysAddr gpuNvPAddr; // Physical Base Address + NvU32 gpuNvLength; // Length of the Aperture + NvU32 gpuNvSaveLength; + NvU32 gpuDeviceEnum; // Device ID NV_DEVID_* + NvU32 refCount; // refCount for the device map. + DEVICE_REGFILTER_INFO devRegFilterInfo; // register filter range list +} DEVICE_MAPPING; + +typedef struct +{ + IO_DEVICE parent; + OBJGPU *pGpu; + DEVICE_INDEX deviceIndex; + NvU32 instance; + // The following members are initialized in objgpu.c, + // but are not used anywhere. gpuApertureReg* functions + // fall back to DEVICE_MAPPING instead + GPUHWREG *gpuNvAddr; // CPU Virtual Address + RmPhysAddr gpuNvPAddr; // Physical Base Address + NvU32 gpuNvLength; // Length of Aperture + NvU32 gpuDeviceEnum; // Device ID NV_DEVID_* + NvU32 refCount; // refCount for the device map. + DEVICE_REGFILTER_INFO devRegFilterInfo; // register filter range list +} GPU_IO_DEVICE; + +typedef struct +{ + // Pointer to GPU linked to this RegisterAccess object + OBJGPU *pGpu; + + // HW register access tools + GPUHWREG *gpuFbAddr; + GPUHWREG *gpuInstAddr; + + // Register access profiling + NvU32 regReadCount; + NvU32 regWriteCount; +} RegisterAccess; + +/*! Init register IO access path */ +NV_STATUS regAccessConstruct(RegisterAccess *, OBJGPU *pGpu); + +/*! Shutdown register IO access path */ +void regAccessDestruct(RegisterAccess *); + +/*! Writes to 8 bit register */ +void regWrite008(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV8); + +/*! Writes to 16 bit register */ +void regWrite016(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV16); + +/*! Writes to 32 bit register, with thread state on the stack */ +void regWrite032(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV32, THREAD_STATE_NODE *); + +/*! Unicast register access, with thread state on the stack */ +void regWrite032Unicast(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvV32, THREAD_STATE_NODE *); + +/*! Reads from 8 bit register */ +NvU8 regRead008(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32); + +/*! Reads from 16 bit register */ +NvU16 regRead016(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32); + +/*! Reads from 32 bit register, with thread state on the stack */ +NvU32 regRead032(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, THREAD_STATE_NODE *); + +/*! Reads from 32 bit register and checks bit mask, with thread state on the stack */ +NvU32 regCheckRead032(RegisterAccess *, NvU32, NvU32, THREAD_STATE_NODE *); + +/*! Reads 32 bit register and polls bit field for specific value */ +NV_STATUS regRead032_AndPoll(RegisterAccess *, DEVICE_INDEX, NvU32, NvU32, NvU32); + +/*! Adds a register filter */ +NV_STATUS regAddRegisterFilter(RegisterAccess *, NvU32, DEVICE_INDEX, NvU32, NvU32, NvU32, GpuWriteRegCallback, GpuReadRegCallback, void *, REGISTER_FILTER **); + +/*! Removes register filter */ +void regRemoveRegisterFilter(RegisterAccess *, REGISTER_FILTER *); + +/*! Check status of read return value for GPU/bus errors */ +void regCheckAndLogReadFailure(RegisterAccess *, NvU32 addr, NvU32 mask, NvU32 value); + +// +// GPU register I/O macros. +// + +// +// GPU neutral macros typically used for register I/O. +// +#define GPU_DRF_SHIFT(drf) ((0?drf) % 32) +#define GPU_DRF_MASK(drf) (0xFFFFFFFF>>(31-((1?drf) % 32)+((0?drf) % 32))) +#define GPU_DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_DRF_SHIFTMASK(drf) (GPU_DRF_MASK(drf)<<(GPU_DRF_SHIFT(drf))) +#define GPU_DRF_WIDTH(drf) ((1?drf) - (0?drf) + 1) + + +// Device independent macros +// Multiple device instance macros + +#define REG_INST_RD08(g,dev,inst,a) regRead008(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a) +#define REG_INST_RD16(g,dev,inst,a) regRead016(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a) +#define REG_INST_RD32(g,dev,inst,a) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, NULL) + +#define REG_INST_WR08(g,dev,inst,a,v) regWrite008(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v) +#define REG_INST_WR16(g,dev,inst,a,v) regWrite016(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v) +#define REG_INST_WR32(g,dev,inst,a,v) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, NULL) +#define REG_INST_WR32_UC(g,dev,inst,a,v) regWrite032Unicast(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, NULL) + +#define REG_INST_RD32_EX(g,dev,inst,a,t) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, t) +#define REG_INST_WR32_EX(g,dev,inst,a,v,t) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, a, v, t) + +#define REG_INST_DEVIDX_RD32_EX(g,devidx,inst,a,t) regRead032(GPU_GET_REGISTER_ACCESS(g), devidx, inst, a, t) +#define REG_INST_DEVIDX_WR32_EX(g,devidx,inst,a,v,t) regWrite032(GPU_GET_REGISTER_ACCESS(g), devidx, inst, a, v, t) + +// GPU macros defined in terms of DEV_ macros +#define GPU_REG_RD08(g,a) REG_INST_RD08(g,GPU,0,a) +#define GPU_REG_RD16(g,a) REG_INST_RD16(g,GPU,0,a) +#define GPU_REG_RD32(g,a) REG_INST_RD32(g,GPU,0,a) +#define GPU_CHECK_REG_RD32(g,a,m) regCheckRead032(GPU_GET_REGISTER_ACCESS(g),a,m,NULL) +#define GPU_REG_RD32_AND_POLL(g,r,m,v) regRead032_AndPoll(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_GPU, r, m, v) + +#define GPU_REG_WR08(g,a,v) REG_INST_WR08(g,GPU,0,a,v) +#define GPU_REG_WR16(g,a,v) REG_INST_WR16(g,GPU,0,a,v) +#define GPU_REG_WR32(g,a,v) REG_INST_WR32(g,GPU,0,a,v) +#define GPU_REG_WR32_UC(g,a,v) REG_INST_WR32_UC(g,GPU,0,a,v) + +// GPU macros for SR-IOV +#define GPU_VREG_RD32(g, a) GPU_REG_RD32(g, g->sriovState.virtualRegPhysOffset + a) +#define GPU_VREG_WR32(g, a, v) GPU_REG_WR32(g, g->sriovState.virtualRegPhysOffset + a, v) +#define GPU_VREG_RD32_EX(g,a,t) REG_INST_RD32_EX(g, GPU, 0, g->sriovState.virtualRegPhysOffset + a, t) +#define GPU_VREG_WR32_EX(g,a,v,t) REG_INST_WR32_EX(g, GPU, 0, g->sriovState.virtualRegPhysOffset + a, v, t) +#define GPU_VREG_FLD_WR_DRF_DEF(g,d,r,f,c) GPU_VREG_WR32(g, NV##d##r,(GPU_VREG_RD32(g,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +#define VREG_INST_RD32(g,dev,inst,a) regRead032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, g->sriovState.virtualRegPhysOffset + a, NULL) +#define VREG_INST_WR32(g,dev,inst,a,v) regWrite032(GPU_GET_REGISTER_ACCESS(g), DEVICE_INDEX_##dev, inst, g->sriovState.virtualRegPhysOffset + a, v, NULL) +#define GPU_VREG_FLD_WR_DRF_NUM(g,d,r,f,n) VREG_INST_WR32(g,GPU,0,NV##d##r,(VREG_INST_RD32(g,GPU,0,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<sriovState.virtualRegPhysOffset + a) + +#define GPU_VREG_IDX_RD_DRF(g,d,r,i,f) (((GPU_VREG_RD32(g, NV ## d ## r(i)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_VREG_FLD_IDX_WR_DRF_DEF(g,d,r,i,f,c) GPU_VREG_WR32(g, NV##d##r(i),(GPU_VREG_RD32(g,NV##d##r(i))&~(GPU_DRF_MASK(NV##d##r##f)<sriovState.virtualRegPhysOffset + a) +#define GPU_VREG_WR32(g, a, v) gpuRegWr32_dumpinfo(__FUNCTION__,#a,"(VREG)",g, g->sriovState.virtualRegPhysOffset + a, v) + +#endif // GPU_REGISTER_ACCESS_DUMP + +// +// Macros for register I/O +// +#define GPU_FLD_WR_DRF_NUM(g,d,r,f,n) REG_INST_WR32(g,GPU,0,NV##d##r,(REG_INST_RD32(g,GPU,0,NV##d##r)&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_FLD_TEST_DRF_DEF(g,d,r,f,c) (GPU_REG_RD_DRF(g, d, r, f) == NV##d##r##f##c) +#define GPU_FLD_TEST_DRF_NUM(g,d,r,f,n) (GPU_REG_RD_DRF(g, d, r, f) == n) +#define GPU_FLD_IDX_TEST_DRF_DEF(g,d,r,f,c,i) (GPU_REG_IDX_RD_DRF(g, d, r, i, f) == NV##d##r##f##c) +#define GPU_FLD_2IDX_TEST_DRF_DEF(g,d,r,f,c,i,j) (GPU_REG_2IDX_RD_DRF(g, d, r, i, j, f) == NV##d##r##f##c) + +#define GPU_REG_RD_DRF_EX(g,d,r,f,t) (((GPU_REG_RD32_EX(g, NV ## d ## r, t))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +#define GPU_FLD_WR_DRF_NUM_EX(g,d,r,f,n,t) REG_INST_WR32_EX(g,GPU,0,NV##d##r,(REG_INST_RD32_EX(g,GPU,0,NV##d##r,t)&~(GPU_DRF_MASK(NV##d##r##f)<>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_REG_2IDX_RD_DRF(g,d,r,i,j,f) (((GPU_REG_RD32(g, NV ## d ## r(i, j)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) +#define GPU_REG_RD_DRF_IDX(g,d,r,f,i) (((GPU_REG_RD32(g, NV ## d ## r))>>GPU_DRF_SHIFT(NV ## d ## r ## f(i)))&GPU_DRF_MASK(NV ## d ## r ## f(i))) +#define GPU_REG_IDX_OFFSET_RD_DRF(g,d,r,i,o,f) (((GPU_REG_RD32(g, NV ## d ## r(i,o)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f)) + +// +// Macros that abstract the use of bif object to access GPU bus config registers +// This is the preferred set >= NV50 +// +#define GPU_BUS_CFG_RD32(g,r,d) gpuReadBusConfigReg_HAL(g, r, d) +#define GPU_BUS_CFG_WR32(g,r,d) gpuWriteBusConfigReg_HAL(g, r, d) +#define GPU_BUS_CFG_FLD_WR_DRF_DEF(g,x,d,r,f,c) GPU_BUS_CFG_WR32(g, NV##d##r,(x &~(GPU_DRF_MASK(NV##d##r##f)<>(31-(1?sf)+(0?sf))) +#define SF_SHIFTMASK(sf) (SF_MASK(sf) << SF_SHIFT(sf)) +#define SF_DEF(s,f,c) ((NV ## s ## f ## c)<>SF_SHIFT(NV ## s ## f))&SF_MASK(NV ## s ## f)) +#define SF_WIDTH(sf) ((1?sf) - (0?sf) + 1) +// This macro parses multi-word/array defines +#define SF_ARR32_VAL(s,f,arr) \ + (((arr)[SF_INDEX(NV ## s ## f)] >> SF_SHIFT(NV ## s ## f)) & SF_MASK(NV ## s ## f)) +#define FLD_SF_DEF(s,f,d,l) ((l)&~(SF_MASK(NV##s##f) << SF_SHIFT(NV##s##f)))| SF_DEF(s,f,d) +#define FLD_SF_NUM(s,f,n,l) ((l)&~(SF_MASK(NV##s##f) << SF_SHIFT(NV##s##f)))| SF_NUM(s,f,n) +#define FLD_SF_IDX_DEF(s,f,c,i,l) (((l) & ~SF_SHIFTMASK(NV ## s ## f(i))) | SF_IDX_DEF(s,f,c,i)) +#define FLD_SF_IDX_NUM(s,f,n,i,l) (((l) & ~SF_SHIFTMASK(NV ## s ## f(i))) | SF_IDX_NUM(s,f,n,i)) + +#endif // _GPU_ACCESS_H_ diff --git a/src/nvidia/inc/kernel/gpu/gpu_child_list.h b/src/nvidia/inc/kernel/gpu/gpu_child_list.h new file mode 100644 index 000000000..6c86a6e28 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_child_list.h @@ -0,0 +1,303 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// No include guards - this file is included multiple times, each time with a +// different definition for GPU_CHILD_SINGLE_INST and GPU_CHILD_GPU_CHILD_MULTI_INST +// +// Callers that will use the same definition for single- and multi- instance +// can define GPU_CHILD that will be used for both +// +#if defined(GPU_CHILD) +#if !defined(GPU_CHILD_SINGLE_INST) && !defined(GPU_CHILD_MULTI_INST) +#define GPU_CHILD_SINGLE_INST GPU_CHILD +#define GPU_CHILD_MULTI_INST GPU_CHILD +#else +#error "Must not define GPU_CHILD_{SINGLE,MULTI}_INST and GPU_CHILD at the same time" +#endif +#endif + +// +// GPU child list. All objects must inherit from OBJENGSTATE. Objects are +// constructed in the listed order and destructed in reverse order. Storage in +// OBJGPU and accessor macros (i.e.: GET_GPU_XXX) are generated from this list. +// + +// +// Temporarily needed to generate stubs for disabled modules +// To be removed when the references to these modules are gone +// +#if defined(GPU_CHILD_LIST_DISABLED_ONLY) +#define GPU_CHILD_MODULE(_rmcfgModule) !RMCFG_MODULE_ENABLED(_rmcfgModule) +#else +#define GPU_CHILD_MODULE(_rmcfgModule) RMCFG_MODULE_ENABLED(_rmcfgModule) +#endif + + /* Class Name Accessor Name Max Instances bConstructEarly bAlwaysCreate OBJGPU Field */ +#if GPU_CHILD_MODULE(FUSE) + GPU_CHILD_SINGLE_INST( OBJFUSE, GPU_GET_FUSE, 1, NV_TRUE, NV_TRUE, pFuse ) +#endif +#if GPU_CHILD_MODULE(BIF) + GPU_CHILD_SINGLE_INST( OBJBIF, GPU_GET_BIF, 1, NV_TRUE, NV_FALSE, pBif ) +#endif +#if GPU_CHILD_MODULE(KERNEL_BIF) + GPU_CHILD_SINGLE_INST( KernelBif, GPU_GET_KERNEL_BIF, 1, NV_TRUE, NV_FALSE, pKernelBif ) +#endif +#if GPU_CHILD_MODULE(NNE) + GPU_CHILD_SINGLE_INST( OBJNNE, GPU_GET_NNE, 1, NV_TRUE, NV_FALSE, pNne ) +#endif +#if GPU_CHILD_MODULE(MC) + GPU_CHILD_SINGLE_INST( OBJMC, GPU_GET_MC, 1, NV_FALSE, NV_FALSE, pMc ) +#endif +#if GPU_CHILD_MODULE(KERNEL_MC) + GPU_CHILD_SINGLE_INST( KernelMc, GPU_GET_KERNEL_MC, 1, NV_FALSE, NV_FALSE, pKernelMc ) +#endif +#if GPU_CHILD_MODULE(PRIV_RING) + GPU_CHILD_SINGLE_INST( PrivRing, GPU_GET_PRIV_RING, 1, NV_FALSE, NV_FALSE, pPrivRing ) +#endif +#if GPU_CHILD_MODULE(INTR) + GPU_CHILD_SINGLE_INST( SwIntr, GPU_GET_SW_INTR, 1, NV_FALSE, NV_FALSE, pSwIntr ) +#endif +#if GPU_CHILD_MODULE(MEMORY_SYSTEM) + GPU_CHILD_SINGLE_INST( MemorySystem, GPU_GET_MEMORY_SYSTEM, 1, NV_FALSE, NV_FALSE, pMemorySystem ) +#endif +#if GPU_CHILD_MODULE(KERNEL_MEMORY_SYSTEM) + GPU_CHILD_SINGLE_INST( KernelMemorySystem, GPU_GET_KERNEL_MEMORY_SYSTEM, 1, NV_FALSE, NV_FALSE, pKernelMemorySystem ) +#endif +#if GPU_CHILD_MODULE(MEMORY_MANAGER) + GPU_CHILD_SINGLE_INST( MemoryManager, GPU_GET_MEMORY_MANAGER, 1, NV_FALSE, NV_FALSE, pMemoryManager ) +#endif +#if GPU_CHILD_MODULE(FBFLCN) + GPU_CHILD_SINGLE_INST( OBJFBFLCN, GPU_GET_FBFLCN, 1, NV_FALSE, NV_FALSE, pFbflcn ) +#endif +#if GPU_CHILD_MODULE(HSHUB) + GPU_CHILD_MULTI_INST ( OBJHSHUB, GPU_GET_HSHUB, GPU_MAX_HSHUBS, NV_FALSE, NV_FALSE, pHshub ) +#endif +#if GPU_CHILD_MODULE(SEQ) + GPU_CHILD_SINGLE_INST( OBJSEQ, GPU_GET_SEQ, 1, NV_FALSE, NV_TRUE, pSeq ) +#endif +#if GPU_CHILD_MODULE(GpuMutexMgr) + GPU_CHILD_SINGLE_INST( GpuMutexMgr, GPU_GET_MUTEX_MGR, 1, NV_FALSE, NV_TRUE, pMutexMgr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_DISPLAY) + GPU_CHILD_SINGLE_INST( KernelDisplay, GPU_GET_KERNEL_DISPLAY, 1, NV_FALSE, NV_FALSE, pKernelDisplay ) +#endif +#if GPU_CHILD_MODULE(DISP) + GPU_CHILD_SINGLE_INST( OBJDISP, GPU_GET_DISP, 1, NV_FALSE, NV_FALSE, pDisp ) +#endif +#if GPU_CHILD_MODULE(TMR) + GPU_CHILD_SINGLE_INST( OBJTMR, GPU_GET_TIMER, 1, NV_TRUE, NV_TRUE, pTmr ) +#endif +#if GPU_CHILD_MODULE(BUS) + GPU_CHILD_SINGLE_INST( OBJBUS, GPU_GET_BUS, 1, NV_FALSE, NV_FALSE, pBus ) +#endif +#if GPU_CHILD_MODULE(KERNEL_BUS) + GPU_CHILD_SINGLE_INST( KernelBus, GPU_GET_KERNEL_BUS, 1, NV_FALSE, NV_FALSE, pKernelBus ) +#endif +#if GPU_CHILD_MODULE(GMMU) + GPU_CHILD_SINGLE_INST( OBJGMMU, GPU_GET_GMMU, 1, NV_FALSE, NV_FALSE, pGmmu ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GMMU) + GPU_CHILD_SINGLE_INST( KernelGmmu, GPU_GET_KERNEL_GMMU, 1, NV_FALSE, NV_FALSE, pKernelGmmu ) +#endif +#if GPU_CHILD_MODULE(KERNEL_NVDEC) + GPU_CHILD_SINGLE_INST( KernelNvdec, GPU_GET_KERNEL_NVDEC, 1, NV_FALSE, NV_FALSE, pKernelNvdec ) +#endif +#if GPU_CHILD_MODULE(KERNEL_SEC2) + GPU_CHILD_SINGLE_INST( KernelSec2, GPU_GET_KERNEL_SEC2, 1, NV_FALSE, NV_FALSE, pKernelSec2 ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GSP) + GPU_CHILD_SINGLE_INST( KernelGsp, GPU_GET_KERNEL_GSP, 1, NV_FALSE, NV_FALSE, pKernelGsp ) +#endif +#if GPU_CHILD_MODULE(DCECLIENTRM) + GPU_CHILD_SINGLE_INST( OBJDCECLIENTRM, GPU_GET_DCECLIENTRM, 1, NV_FALSE, NV_FALSE, pDceclientrm ) +#endif +#if GPU_CHILD_MODULE(VIRT_MEM_ALLOCATOR) + GPU_CHILD_SINGLE_INST( VirtMemAllocator, GPU_GET_DMA, 1, NV_FALSE, NV_FALSE, pDma ) +#endif +#if GPU_CHILD_MODULE(GRMGR) + GPU_CHILD_SINGLE_INST( GraphicsManager, GPU_GET_GRMGR, 1, NV_FALSE, NV_TRUE, pGrMgr ) +#endif +#if GPU_CHILD_MODULE(MIG_MANAGER) + GPU_CHILD_SINGLE_INST( MIGManager, GPU_GET_MIG_MANAGER, 1, NV_FALSE, NV_TRUE, pMIGManager ) +#endif +#if GPU_CHILD_MODULE(KERNEL_MIG_MANAGER) + GPU_CHILD_SINGLE_INST( KernelMIGManager, GPU_GET_KERNEL_MIG_MANAGER, 1, NV_FALSE, NV_TRUE, pKernelMIGManager ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GRAPHICS_MANAGER) + GPU_CHILD_SINGLE_INST( KernelGraphicsManager, GPU_GET_KERNEL_GRAPHICS_MANAGER, 1, NV_FALSE, NV_TRUE, pKernelGraphicsManager ) +#endif +#if GPU_CHILD_MODULE(GR) + GPU_CHILD_MULTI_INST ( Graphics, GPU_GET_GR_UNSAFE, GPU_MAX_GRS, NV_FALSE, NV_FALSE, pGr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_GRAPHICS) + GPU_CHILD_MULTI_INST ( KernelGraphics, GPU_GET_KERNEL_GRAPHICS, GPU_MAX_GRS, NV_FALSE, NV_FALSE, pKernelGraphics ) +#endif +#if GPU_CHILD_MODULE(ClockManager) + GPU_CHILD_SINGLE_INST( ClockManager, GPU_GET_CLK_MGR, 1, NV_FALSE, NV_FALSE, pClk ) +#endif +#if GPU_CHILD_MODULE(FAN) + GPU_CHILD_SINGLE_INST( OBJFAN, GPU_GET_FAN, 1, NV_FALSE, NV_FALSE, pFan ) +#endif +#if GPU_CHILD_MODULE(PERF) + GPU_CHILD_SINGLE_INST( Perf, GPU_GET_PERF, 1, NV_FALSE, NV_FALSE, pPerf ) +#endif +#if GPU_CHILD_MODULE(KERNEL_PERF) + GPU_CHILD_SINGLE_INST( KernelPerf, GPU_GET_KERNEL_PERF, 1, NV_FALSE, NV_FALSE, pKernelPerf ) +#endif +#if GPU_CHILD_MODULE(THERM) + GPU_CHILD_SINGLE_INST( Therm, GPU_GET_THERM, 1, NV_FALSE, NV_FALSE, pTherm ) +#endif +#if GPU_CHILD_MODULE(BSP) + GPU_CHILD_MULTI_INST ( OBJBSP, GPU_GET_BSP, GPU_MAX_NVDECS, NV_FALSE, NV_FALSE, pBsp ) +#endif +#if GPU_CHILD_MODULE(CIPHER) + GPU_CHILD_SINGLE_INST( OBJCIPHER, GPU_GET_CIPHER, 1, NV_FALSE, NV_FALSE, pCipher ) +#endif +#if GPU_CHILD_MODULE(VBIOS) + GPU_CHILD_SINGLE_INST( OBJVBIOS, GPU_GET_VBIOS, 1, NV_FALSE, NV_TRUE, pVbios ) +#endif +#if GPU_CHILD_MODULE(DCB) + GPU_CHILD_SINGLE_INST( OBJDCB, GPU_GET_DCB, 1, NV_FALSE, NV_TRUE, pDcb ) +#endif +#if GPU_CHILD_MODULE(GPIO) + GPU_CHILD_SINGLE_INST( OBJGPIO, GPU_GET_GPIO, 1, NV_FALSE, NV_TRUE, pGpio ) +#endif +#if GPU_CHILD_MODULE(VOLT) + GPU_CHILD_SINGLE_INST( OBJVOLT, GPU_GET_VOLT, 1, NV_FALSE, NV_FALSE, pVolt ) +#endif +#if GPU_CHILD_MODULE(I2C) + GPU_CHILD_SINGLE_INST( OBJI2C, GPU_GET_I2C, 1, NV_FALSE, NV_TRUE, pI2c ) +#endif +#if GPU_CHILD_MODULE(SPI) + GPU_CHILD_SINGLE_INST( Spi, GPU_GET_SPI, 1, NV_FALSE, NV_TRUE, pSpi ) +#endif +#if GPU_CHILD_MODULE(KERNEL_RC) + GPU_CHILD_SINGLE_INST( KernelRc, GPU_GET_KERNEL_RC, 1, NV_FALSE, NV_TRUE, pKernelRc ) +#endif +#if GPU_CHILD_MODULE(RC) + GPU_CHILD_SINGLE_INST( OBJRC, GPU_GET_RC, 1, NV_FALSE, NV_TRUE, pRC ) +#endif +#if GPU_CHILD_MODULE(STEREO) + GPU_CHILD_SINGLE_INST( OBJSTEREO, GPU_GET_STEREO, 1, NV_FALSE, NV_TRUE, pStereo ) +#endif +#if GPU_CHILD_MODULE(INTR) + GPU_CHILD_SINGLE_INST( Intr, GPU_GET_INTR, 1, NV_FALSE, NV_TRUE, pIntr ) +#endif +#if GPU_CHILD_MODULE(DPAUX) + GPU_CHILD_SINGLE_INST( OBJDPAUX, GPU_GET_DPAUX, 1, NV_FALSE, NV_FALSE, pDpAux ) +#endif +#if GPU_CHILD_MODULE(PMU) + GPU_CHILD_SINGLE_INST( Pmu, GPU_GET_PMU, 1, NV_FALSE, NV_FALSE, pPmu ) +#endif +#if GPU_CHILD_MODULE(KERNEL_PMU) + GPU_CHILD_SINGLE_INST( KernelPmu, GPU_GET_KERNEL_PMU, 1, NV_FALSE, NV_FALSE, pKernelPmu ) +#endif +#if GPU_CHILD_MODULE(CE) + GPU_CHILD_MULTI_INST ( OBJCE, GPU_GET_CE, GPU_MAX_CES, NV_FALSE, NV_FALSE, pCe ) +#endif +#if GPU_CHILD_MODULE(KERNEL_CE) + GPU_CHILD_MULTI_INST ( KernelCE, GPU_GET_KCE, GPU_MAX_CES, NV_FALSE, NV_FALSE, pKCe ) +#endif +#if GPU_CHILD_MODULE(MSENC) + GPU_CHILD_MULTI_INST ( OBJMSENC, GPU_GET_MSENC, GPU_MAX_MSENCS, NV_FALSE, NV_FALSE, pMsenc ) +#endif +#if GPU_CHILD_MODULE(HDA) + GPU_CHILD_SINGLE_INST( OBJHDA, GPU_GET_HDA, 1, NV_FALSE, NV_FALSE, pHda ) +#endif +#if GPU_CHILD_MODULE(HDACODEC) + GPU_CHILD_SINGLE_INST( OBJHDACODEC, GPU_GET_HDACODEC, 1, NV_FALSE, NV_FALSE, pHdacodec ) +#endif +#if GPU_CHILD_MODULE(LPWR) + GPU_CHILD_SINGLE_INST( Lpwr, GPU_GET_LPWR, 1, NV_FALSE, NV_FALSE, pLpwr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_FIFO) + GPU_CHILD_SINGLE_INST( KernelFifo, GPU_GET_KERNEL_FIFO_UC, 1, NV_FALSE, NV_FALSE, pKernelFifo ) +#endif +#if GPU_CHILD_MODULE(FIFO) + GPU_CHILD_SINGLE_INST( OBJFIFO, GPU_GET_FIFO_UC, 1, NV_FALSE, NV_FALSE, pFifo ) +#endif +#if GPU_CHILD_MODULE(INFOROM) + GPU_CHILD_SINGLE_INST( OBJINFOROM, GPU_GET_INFOROM, 1, NV_FALSE, NV_TRUE, pInforom ) +#endif +#if GPU_CHILD_MODULE(PMGR) + GPU_CHILD_SINGLE_INST( Pmgr, GPU_GET_PMGR, 1, NV_FALSE, NV_FALSE, pPmgr ) +#endif +#if GPU_CHILD_MODULE(UVM) + GPU_CHILD_SINGLE_INST( OBJUVM, GPU_GET_UVM, 1, NV_FALSE, NV_FALSE, pUvm ) +#endif +#if GPU_CHILD_MODULE(NV_DEBUG_DUMP) + GPU_CHILD_SINGLE_INST( NvDebugDump, GPU_GET_NVD, 1, NV_FALSE, NV_TRUE, pNvd ) +#endif +#if GPU_CHILD_MODULE(GRDBG) + GPU_CHILD_SINGLE_INST( SMDebugger, GPU_GET_GRDBG, 1, NV_FALSE, NV_TRUE, pGrdbg ) +#endif +#if GPU_CHILD_MODULE(SEC2) + GPU_CHILD_SINGLE_INST( OBJSEC2, GPU_GET_SEC2, 1, NV_FALSE, NV_FALSE, pSec2 ) +#endif +#if GPU_CHILD_MODULE(LSFM) + GPU_CHILD_SINGLE_INST( OBJLSFM, GPU_GET_LSFM, 1, NV_FALSE, NV_FALSE, pLsfm ) +#endif +#if GPU_CHILD_MODULE(ACR) + GPU_CHILD_SINGLE_INST( OBJACR, GPU_GET_ACR, 1, NV_FALSE, NV_FALSE, pAcr ) +#endif +#if GPU_CHILD_MODULE(KERNEL_NVLINK) + GPU_CHILD_SINGLE_INST( KernelNvlink, GPU_GET_KERNEL_NVLINK, 1, NV_FALSE, NV_FALSE, pKernelNvlink ) +#endif +#if GPU_CHILD_MODULE(NVLINK) + GPU_CHILD_SINGLE_INST( Nvlink, GPU_GET_NVLINK, 1, NV_FALSE, NV_FALSE, pNvLink ) +#endif +#if GPU_CHILD_MODULE(GPULOG) + GPU_CHILD_SINGLE_INST( OBJGPULOG, GPU_GET_GPULOG, 1, NV_FALSE, NV_TRUE, pGpuLog ) +#endif +#if GPU_CHILD_MODULE(GPUMON) + GPU_CHILD_SINGLE_INST( OBJGPUMON, GPU_GET_GPUMON, 1, NV_FALSE, NV_TRUE, pGpuMon ) +#endif +#if GPU_CHILD_MODULE(HWPM) + GPU_CHILD_SINGLE_INST( OBJHWPM, GPU_GET_HWPM, 1, NV_FALSE, NV_FALSE, pHwpm ) +#endif +#if GPU_CHILD_MODULE(GRIDDISPLAYLESS) + GPU_CHILD_SINGLE_INST( OBJGRIDDISPLAYLESS, GPU_GET_GRIDDISPLAYLESS, 1, NV_FALSE, NV_FALSE, pGridDisplayless ) +#endif +#if GPU_CHILD_MODULE(SWENG) + GPU_CHILD_SINGLE_INST( OBJSWENG, GPU_GET_SWENG, 1, NV_FALSE, NV_FALSE, pSwEng ) +#endif +#if GPU_CHILD_MODULE(VMMU) + GPU_CHILD_SINGLE_INST( OBJVMMU, GPU_GET_VMMU, 1, NV_FALSE, NV_FALSE, pVmmu ) +#endif +#if GPU_CHILD_MODULE(NVJPG) + GPU_CHILD_MULTI_INST( OBJNVJPG, GPU_GET_NVJPG, GPU_MAX_NVJPGS, NV_FALSE, NV_FALSE, pNvjpg ) +#endif +#if GPU_CHILD_MODULE(GSP) + GPU_CHILD_SINGLE_INST( Gsp, GPU_GET_GSP, 1, NV_FALSE, NV_FALSE, pGsp ) +#endif +#if GPU_CHILD_MODULE(OFA) + GPU_CHILD_SINGLE_INST( OBJOFA, GPU_GET_OFA, 1, NV_FALSE, NV_FALSE, pOfa ) +#endif + +// Undefine the entry macros to simplify call sites +#undef GPU_CHILD +#undef GPU_CHILD_SINGLE_INST +#undef GPU_CHILD_MULTI_INST +#undef GPU_CHILD_MODULE +#undef GPU_CHILD_LIST_DISABLED_ONLY diff --git a/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h b/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h new file mode 100644 index 000000000..1a1835088 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_device_mapping.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_DEVICE_MAPPING_H_ +#define _GPU_DEVICE_MAPPING_H_ + +// Defines the enum type DEVICE_INDEX used for identifying the device type being accessed +typedef enum +{ + DEVICE_INDEX_GPU = 0, + DEVICE_INDEX_HOST1X, + DEVICE_INDEX_DISPLAY, + DEVICE_INDEX_DPAUX, + DEVICE_INDEX_MC, + DEVICE_INDEX_CLKRST, + DEVICE_INDEX_MSS_NVLINK, + DEVICE_INDEX_HDACODEC, + DEVICE_INDEX_EMC, + DEVICE_INDEX_FUSE, + DEVICE_INDEX_KFUSE, + DEVICE_INDEX_MIPICAL, + DEVICE_INDEX_MAX //Should always be the last entry +} DEVICE_INDEX; + +typedef enum +{ + SOC_DEV_MAPPING_DISP = 0, + SOC_DEV_MAPPING_DPAUX0, + SOC_DEV_MAPPING_DPAUX1, // Update NV_MAX_SOC_DPAUX_NUM_DEVICES if adding new DPAUX mappings + SOC_DEV_MAPPING_HDACODEC, + SOC_DEV_MAPPING_MIPICAL, + SOC_DEV_MAPPING_MAX // Keep this as last entry +} SOC_DEV_MAPPING; + +#define GPU_MAX_DEVICE_MAPPINGS (60) + +typedef struct +{ + DEVICE_INDEX deviceIndex; // DEVICE_INDEX_* + NvU32 devId; // NV_DEVID_* +} DEVICE_ID_MAPPING; + +#endif // _GPU_DEVICE_MAPPING_H_ diff --git a/src/nvidia/inc/kernel/gpu/gpu_halspec.h b/src/nvidia/inc/kernel/gpu/gpu_halspec.h new file mode 100644 index 000000000..328774390 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_halspec.h @@ -0,0 +1,3 @@ + +#include "g_gpu_halspec_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gpu_resource.h b/src/nvidia/inc/kernel/gpu/gpu_resource.h new file mode 100644 index 000000000..4f25bcbe1 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_resource.h @@ -0,0 +1,3 @@ + +#include "g_gpu_resource_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h b/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h new file mode 100644 index 000000000..a9bca9da2 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_resource_desc.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_RESOURCE_DESC_H_ +#define _GPU_RESOURCE_DESC_H_ + +#include "gpu/eng_desc.h" + +typedef struct GPU_RESOURCE_DESC +{ + NvU32 externalClassId; + ENGDESCRIPTOR engDesc; +} GPU_RESOURCE_DESC; + +// CLASSDESCRIPTOR is deprecated, please use GPU_RESOURCE_DESC +typedef struct GPU_RESOURCE_DESC CLASSDESCRIPTOR, *PCLASSDESCRIPTOR; + +#endif // _GPU_RESOURCE_DESC_H_ diff --git a/src/nvidia/inc/kernel/gpu/gpu_timeout.h b/src/nvidia/inc/kernel/gpu/gpu_timeout.h new file mode 100644 index 000000000..037602b31 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_timeout.h @@ -0,0 +1,144 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPU_TIMEOUT_H_ +#define _GPU_TIMEOUT_H_ + +/* ------------------------ Includes ---------------------------------------- */ +#include "core/core.h" + + +/* ------------------------ Forward Definitions ----------------------------- */ +struct OBJGPU; + +/* ------------------------ Macros ------------------------------------------ */ +/*! + * @note GPU_TIMEOUT_DEFAULT is different per platform and can range anywhere + * from 2 to 30 secs depending on the GPU Mode and Platform. + * By default if GPU_TIMEOUT_DEFAULT is specified, we use the ThreadState + * unless explicitly told not to via GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE. + */ +#define GPU_TIMEOUT_DEFAULT 0 + +/*! + * gpuSetTimeout Flags - saved in pTimeout->flags + */ +#define GPU_TIMEOUT_FLAGS_DEFAULT NVBIT(0) //!< default timeout mechanism as set by platform +#define GPU_TIMEOUT_FLAGS_USE_THREAD_STATE NVBIT(1) //!< default timeout time used - use the ThreadState +#define GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE NVBIT(2) //!< even if default time was used - skip the ThreadState +#define GPU_TIMEOUT_FLAGS_OSTIMER NVBIT(3) //!< osGetCurrentTime() +#define GPU_TIMEOUT_FLAGS_OSDELAY NVBIT(4) //!< osDelay() +#define GPU_TIMEOUT_FLAGS_TMR NVBIT(5) //!< tmrGetCurrentTime() +#define GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG NVBIT(6) //!< bypass timeout logging in the RM journal +#define GPU_TIMEOUT_FLAGS_TMRDELAY NVBIT(7) //!< tmrDelay() +#define GPU_TIMEOUT_FLAGS_BYPASS_CPU_YIELD NVBIT(8) //!< don't explicitly let other threads run first +/*! + * gpuCheckTimeout Flags set in pTimeout->flags upon NV_ERR_TIMEOUT + */ +#define GPU_TIMEOUT_FLAGS_STATUS_LOCAL_TIMEOUT NVBIT(30) +#define GPU_TIMEOUT_FLAGS_STATUS_THREAD_STATE_TIMEOUT NVBIT(31) + +/* ------------------------ Datatypes --------------------------------------- */ +/*! + * Timeout support. + */ +typedef struct +{ + NvU64 timeout; + NvU32 flags; + OBJGPU *pTmrGpu; //!< The GPU whose timer is used in SLI mode + // Defined only if flags is set to _TMR or _TMRDELAY +} RMTIMEOUT, +*PRMTIMEOUT; + +/*! + * @brief GPU timeout related data. + */ +typedef struct +{ + volatile NvBool bDefaultOverridden; + volatile NvBool bScaled; + volatile NvU32 defaultus; //!< Default timeout in us + volatile NvU32 defaultResetus; //!< Default timeout reset value in us + NvU32 defaultFlags; //!< Default timeout mode + NvU32 scale; //!< Emulation/Simulation multiplier + OBJGPU *pGpu; +} TIMEOUT_DATA; + +/*! + * @brief A prototype of the condition evaluation function required by the + * @ref gpuTimeoutCondWait_IMPL interface. + * + * @note Function is responsible for evaluation of the encapsulated condition + * as well as for triggering of required prerequisites (if any). + * For example if condition depends on a PMU issued message function + * should assure proper servicing of the PMU interrupts. + * + * @param[in] pGpu OBJGPU pointer for this conditional function + * @param[in] pVoid + * Void parameter pointer which can be used to pass in the + * pCondData from @ref gpuTimeoutCondWait_IMPL(). + * + * @return NV_TRUE + * Waited condition has happened and @ref + * gpuTimeoutCondWait_IMPL() may return to caller. + * @return NV_FALSE + * Waited condition has not happened and @ref + * gpuTimeoutCondWait_IMPL() should continue to wait until this + * interface returns NV_TRUE or timeout occurs (whichever occurs + * first). + */ +typedef NvBool GpuWaitConditionFunc(OBJGPU *pGpu, void *pVoid); + +/* ------------------------ Function Prototypes ----------------------------- */ + +void timeoutInitializeGpuDefault(TIMEOUT_DATA *pTD, OBJGPU *pGpu); + +void timeoutRegistryOverride(TIMEOUT_DATA *pTD, OBJGPU *pGpu); + +void timeoutOverride(TIMEOUT_DATA *pTD, NvBool bOverride, NvU32 timeoutMs); + +/*! Initialize the RMTIMEOUT structure with the selected timeout scheme. */ +void timeoutSet(TIMEOUT_DATA *, RMTIMEOUT *, NvU32 timeoutUs, NvU32 flags); + +/*! Check if the passed in RMTIMEOUT struct has expired. */ +NV_STATUS timeoutCheck(TIMEOUT_DATA *, RMTIMEOUT *, NvU32); + +/*! Wait for the condition to become satisfied while checking for the timeout */ +NV_STATUS timeoutCondWait(TIMEOUT_DATA *, RMTIMEOUT *, GpuWaitConditionFunc *, void *pCondData, NvU32); + +/*! Scales timeout values depending on the environment we are running in. */ +static NV_INLINE NvU32 timeoutApplyScale(TIMEOUT_DATA *pTD, NvU32 timeout) +{ + return timeout * pTD->scale; +} + + +// Deprecated macros +#define gpuSetTimeout(g,a,t,c) timeoutSet(&(g)->timeoutData, t, a, c) +#define gpuCheckTimeout(g,t) timeoutCheck(&(g)->timeoutData, t, __LINE__) +#define gpuScaleTimeout(g,a) timeoutApplyScale(&(g)->timeoutData, a) +#define gpuTimeoutCondWait(g,a,b,t) timeoutCondWait(&(g)->timeoutData, t, a, b, __LINE__) + +#define GPU_ENG_RESET_TIMEOUT_VALUE(g, t) (t) + +#endif // _GPU_TIMEOUT_H_ diff --git a/src/nvidia/inc/kernel/gpu/gpu_uuid.h b/src/nvidia/inc/kernel/gpu/gpu_uuid.h new file mode 100644 index 000000000..884009425 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gpu_uuid.h @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _GPUUUID_H_ +#define _GPUUUID_H_ + +#include "core/core.h" +#include "nvCpuUuid.h" + +// +// GPU unique ID sizes. RM_SHA1_GID_SIZE uses the first 16 bytes of +// the SHA-1 digest (this is consistent with the way canonical UUIDs are +// constructed) +// +#define RM_SHA1_GID_SIZE 16 + +// UUID conversion routine: +NV_STATUS transformGidToUserFriendlyString(const NvU8 *pGidData, NvU32 gidSize, NvU8 **ppGidString, + NvU32 *pGidStrlen, NvU32 gidFlags); + +NV_STATUS nvGenerateGpuUuid(NvU16 chipId, NvU64 pdi, NvUuid *pUuid); + +NV_STATUS nvGenerateSmcUuid(NvU16 chipId, NvU64 pdi, + NvU32 swizzId, NvU32 syspipeId, NvUuid *pUuid); + +// 'G' 'P' 'U' '-'(x5), '\0x0', extra = 9 +#define NV_UUID_STR_LEN ((NV_UUID_LEN << 1) + 9) + +void nvGetSmcUuidString(const NvUuid *pUuid, char *pUuidStr); + +void nvGetGpuUuidString(const NvUuid *pUuid, char *pUuidStr); + +#endif // _GPUUUID_H_ diff --git a/src/nvidia/inc/kernel/gpu/gr/fecs_event_list.h b/src/nvidia/inc/kernel/gpu/gr/fecs_event_list.h new file mode 100644 index 000000000..14e188907 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gr/fecs_event_list.h @@ -0,0 +1,3 @@ + +#include "g_fecs_event_list_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gr/kernel_graphics.h b/src/nvidia/inc/kernel/gpu/gr/kernel_graphics.h new file mode 100644 index 000000000..97dc9d525 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gr/kernel_graphics.h @@ -0,0 +1,3 @@ + +#include "g_kernel_graphics_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gr/kernel_graphics_context.h b/src/nvidia/inc/kernel/gpu/gr/kernel_graphics_context.h new file mode 100644 index 000000000..7e26d98b2 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gr/kernel_graphics_context.h @@ -0,0 +1,3 @@ + +#include "g_kernel_graphics_context_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gr/kernel_graphics_manager.h b/src/nvidia/inc/kernel/gpu/gr/kernel_graphics_manager.h new file mode 100644 index 000000000..6d9a0c43c --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gr/kernel_graphics_manager.h @@ -0,0 +1,3 @@ + +#include "g_kernel_graphics_manager_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gr/kernel_graphics_object.h b/src/nvidia/inc/kernel/gpu/gr/kernel_graphics_object.h new file mode 100644 index 000000000..11949689d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gr/kernel_graphics_object.h @@ -0,0 +1,3 @@ + +#include "g_kernel_graphics_object_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gr/kernel_sm_debugger_session.h b/src/nvidia/inc/kernel/gpu/gr/kernel_sm_debugger_session.h new file mode 100644 index 000000000..43ffeb62a --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gr/kernel_sm_debugger_session.h @@ -0,0 +1,3 @@ + +#include "g_kernel_sm_debugger_session_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h b/src/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h new file mode 100644 index 000000000..a0da69646 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef GSP_INIT_ARGS_H +#define GSP_INIT_ARGS_H + +#include "core/core.h" + +typedef struct { + RmPhysAddr cmdQueuePhysAddr; +} GSP_RMFS_INIT_ARGUMENTS; + +typedef struct { + RmPhysAddr sharedMemPhysAddr; + NvU32 pageTableEntryCount; + NvLength cmdQueueOffset; + NvLength statQueueOffset; +} MESSAGE_QUEUE_INIT_ARGUMENTS; + +typedef struct { + NvU32 oldLevel; + NvU32 flags; + NvBool bInPMTransition; +} GSP_SR_INIT_ARGUMENTS; + +/*! + * (Cached) GSP fw RM initialization arguments. + */ +typedef struct +{ + MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments; + GSP_SR_INIT_ARGUMENTS srInitArguments; +} GSP_ARGUMENTS_CACHED; + +#endif // GSP_INIT_ARGS_H diff --git a/src/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h b/src/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h new file mode 100644 index 000000000..47abb8e23 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h @@ -0,0 +1,145 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef GSP_STATIC_CONFIG_H +#define GSP_STATIC_CONFIG_H + +// +// This header describes the set of static GPU configuration information +// that is collected during GSP RM init and made available to the +// CPU RM (aka GSP client) via the NV_RM_RPC_GET_STATIC_INFO() and +// NV_RM_RPC_GET_GSP_STATIC_INFO() calls. + +#include "ctrl/ctrl0080/ctrl0080gpu.h" +#include "ctrl/ctrl0080/ctrl0080gr.h" +#include "ctrl/ctrl2080/ctrl2080bios.h" +#include "ctrl/ctrl2080/ctrl2080fb.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" + +#include "gpu/gpu.h" // COMPUTE_BRANDING_TYPE +#include "vgpu/rpc_headers.h" // MAX_GPC_COUNT +#include "platform/chipset/chipset.h" // BUSINFO + +typedef struct GspSMInfo_t +{ + NvU32 version; + NvU32 regBankCount; + NvU32 regBankRegCount; + NvU32 maxWarpsPerSM; + NvU32 maxThreadsPerWarp; + NvU32 geomGsObufEntries; + NvU32 geomXbufEntries; + NvU32 maxSPPerSM; + NvU32 rtCoreCount; +} GspSMInfo; + +// Fetched from GSP-RM into CPU-RM +typedef struct GspStaticConfigInfo_t +{ + NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE]; + NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo; + NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo; + NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT]; + NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT]; + NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo; + NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams; + COMPUTE_BRANDING_TYPE computeBranding; + + NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps; + NvU32 sriovMaxGfid; + + NvU64 engineCaps; + + GspSMInfo SM_info; + + NvBool poisonFuseEnabled; + + NvU64 fb_length; + NvU32 fbio_mask; + NvU32 fb_bus_width; + NvU32 fb_ram_type; + NvU32 fbp_mask; + NvU32 l2_cache_size; + + NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL]; + NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL]; + + NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvBool bGpuInternalSku; + NvBool bIsQuadroGeneric; + NvBool bIsQuadroAd; + NvBool bIsNvidiaNvs; + NvBool bIsVgx; + NvBool bGeforceSmb; + NvBool bIsTitan; + NvBool bIsTesla; + + NvU64 bar1PdeBase; + NvU64 bar2PdeBase; + + NvBool bVbiosValid; + NvU32 vbiosSubVendor; + NvU32 vbiosSubDevice; + + NvBool bPageRetirementSupported; + + NvBool bSplitVasBetweenServerClientRm; + + NvBool bClRootportNeedsNosnoopWAR; + + VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads; + VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution; + NvU64 displaylessMaxPixels; + + // Client handle for internal RMAPI control. + NvHandle hInternalClient; + + // Device handle for internal RMAPI control. + NvHandle hInternalDevice; + + // Subdevice handle for internal RMAPI control. + NvHandle hInternalSubdevice; +} GspStaticConfigInfo; + +// Pushed from CPU-RM to GSP-RM +typedef struct GspSystemInfo +{ + NvU64 gpuPhysAddr; + NvU64 gpuPhysFbAddr; + NvU64 gpuPhysInstAddr; + NvU64 nvDomainBusDeviceFunc; + NvU64 simAccessBufPhysAddr; + NvU64 pcieAtomicsOpMask; + NvU64 consoleMemSize; + NvU32 pciConfigMirrorBase; + NvU32 pciConfigMirrorSize; + NvU8 oorArch; + NvU64 clPdbProperties; + NvU32 Chipset; + BUSINFO FHBBusInfo; +} GspSystemInfo; + + +#endif /* GSP_STATIC_CONFIG_H */ diff --git a/src/nvidia/inc/kernel/gpu/gsp/kernel_gsp.h b/src/nvidia/inc/kernel/gpu/gsp/kernel_gsp.h new file mode 100644 index 000000000..405bb8d9e --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gsp/kernel_gsp.h @@ -0,0 +1,3 @@ + +#include "g_kernel_gsp_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/gsp/message_queue.h b/src/nvidia/inc/kernel/gpu/gsp/message_queue.h new file mode 100644 index 000000000..a6888b63d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gsp/message_queue.h @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * GSP MESSAGE QUEUE + */ + +#ifndef _MESSAGE_QUEUE_H_ +#define _MESSAGE_QUEUE_H_ + +typedef struct _message_queue_info MESSAGE_QUEUE_INFO; + +// CPU-side calls +NV_STATUS GspMsgQueueInit(OBJGPU *pGpu, MESSAGE_QUEUE_INFO **ppMQI); +NV_STATUS GspStatusQueueInit(OBJGPU *pGpu, MESSAGE_QUEUE_INFO **ppMQI); +void GspMsgQueueCleanup(MESSAGE_QUEUE_INFO **ppMQI); +NV_STATUS GspMsgQueueSendCommand(MESSAGE_QUEUE_INFO *pMQI, OBJGPU *pGpu); +NV_STATUS GspMsgQueueReceiveStatus(MESSAGE_QUEUE_INFO *pMQI); + +#endif // _MESSAGE_QUEUE_H_ diff --git a/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h b/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h new file mode 100644 index 000000000..a4607ee79 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/gsp/message_queue_priv.h @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * GSP MESSAGE QUEUE + */ + +#ifndef _MESSAGE_QUEUE_PRIV_H_ +#define _MESSAGE_QUEUE_PRIV_H_ + +#include "msgq/msgq.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + +// Shared memory layout. +// +// Each of the following are page aligned: +// Page table for entire shared memory layout. +// Command queue header +// Command queue entries +// Status queue header +// Status queue entries + +typedef struct GSP_MSG_QUEUE_ELEMENT +{ + NvU32 checkSum; // Set to value needed to make checksum always zero. + NvU32 seqNum; // Sequence number maintained by the message queue. + rpc_message_header_v rpc; +} GSP_MSG_QUEUE_ELEMENT; + +typedef struct _message_queue_info +{ + // Parameters + NvLength pageTableEntryCount; + NvLength pageTableSize; + NvLength commandQueueSize; + NvLength statusQueueSize; + + // Shared memory area. + MEMORY_DESCRIPTOR *pSharedMemDesc; + RmPhysAddr sharedMemPA; // Page table for all of shared mem. + void *pCommandQueue; + void *pStatusQueue; + rpc_message_header_v *pRpcMsgBuf; // RPC message buffer VA. + + void *pInitMsgBuf; // RPC message buffer VA. + RmPhysAddr initMsgBufPA; // RPC message buffer PA. + + // Other CPU-side fields + void *pWorkArea; + GSP_MSG_QUEUE_ELEMENT *pCmdQueueElement; // Working copy of command queue element. + void *pMetaData; + msgqHandle hQueue; // Do not allow requests when hQueue is null. + NvU32 txSeqNum; // Next sequence number for tx. + NvU32 rxSeqNum; // Next sequence number for rx. +} MESSAGE_QUEUE_INFO; + +// +// Most of the following defines resolve to compile-time constants. +// +#define GSP_MSG_QUEUE_ELEMENT_SIZE_MIN RM_PAGE_SIZE +#define GSP_MSG_QUEUE_ELEMENT_SIZE_MAX (GSP_MSG_QUEUE_ELEMENT_SIZE_MIN * 16) +#define GSP_MSG_QUEUE_ELEMENT_HDR_SIZE NV_OFFSETOF(GSP_MSG_QUEUE_ELEMENT, rpc) + +#define GSP_MSG_QUEUE_RPC_SIZE_MAX \ + (GSP_MSG_QUEUE_ELEMENT_SIZE_MAX - GSP_MSG_QUEUE_ELEMENT_HDR_SIZE) + +#define GSP_MSG_QUEUE_BYTES_TO_ELEMENTS(b) \ + NV_DIV_AND_CEIL(b, GSP_MSG_QUEUE_ELEMENT_SIZE_MIN) + +#define GSP_MSG_QUEUE_ALIGN RM_PAGE_SHIFT // 2 ^ 12 = 4096 +#define GSP_MSG_QUEUE_ELEMENT_ALIGN RM_PAGE_SHIFT // 2 ^ 12 = 4096 +#define GSP_MSG_QUEUE_HEADER_SIZE RM_PAGE_SIZE +#define GSP_MSG_QUEUE_HEADER_ALIGN 4 // 2 ^ 4 = 16 + +#endif // _MESSAGE_QUEUE_PRIV_H_ diff --git a/src/nvidia/inc/kernel/gpu/host_eng/host_eng.h b/src/nvidia/inc/kernel/gpu/host_eng/host_eng.h new file mode 100644 index 000000000..ca3588808 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/host_eng/host_eng.h @@ -0,0 +1,3 @@ + +#include "g_host_eng_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/hwpm/profiler_v1.h b/src/nvidia/inc/kernel/gpu/hwpm/profiler_v1.h new file mode 100644 index 000000000..e9633aa5f --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/hwpm/profiler_v1.h @@ -0,0 +1,3 @@ + +#include "g_profiler_v1_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/hwpm/profiler_v2.h b/src/nvidia/inc/kernel/gpu/hwpm/profiler_v2.h new file mode 100644 index 000000000..72a06d496 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/hwpm/profiler_v2.h @@ -0,0 +1,3 @@ + +#include "g_profiler_v2_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/i2c/i2c_api.h b/src/nvidia/inc/kernel/gpu/i2c/i2c_api.h new file mode 100644 index 000000000..eeee33825 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/i2c/i2c_api.h @@ -0,0 +1,3 @@ + +#include "g_i2c_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/intr/engine_idx.h b/src/nvidia/inc/kernel/gpu/intr/engine_idx.h new file mode 100644 index 000000000..a87345adf --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/intr/engine_idx.h @@ -0,0 +1,164 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef ENGINE_IDX_H +#define ENGINE_IDX_H + +#include "utils/nvbitvector.h" + +/***************************************************************************\ +* * +* Module: engine_idx.h +* List of engines for use by INTR (and MC) modules. +* * +\***************************************************************************/ + +// +// Engine bits for use by various MC HAL routines +// +#define MC_ENGINE_IDX_NULL 0 // This must be 0 +#define MC_ENGINE_IDX_TMR 1 +#define MC_ENGINE_IDX_DISP 2 +#define MC_ENGINE_IDX_FB 3 +#define MC_ENGINE_IDX_FIFO 4 +#define MC_ENGINE_IDX_VIDEO 5 +#define MC_ENGINE_IDX_MD 6 +#define MC_ENGINE_IDX_BUS 7 +// UNUSED +#define MC_ENGINE_IDX_PMGR 9 +#define MC_ENGINE_IDX_VP2 10 +#define MC_ENGINE_IDX_CIPHER 11 +#define MC_ENGINE_IDX_BIF 12 +#define MC_ENGINE_IDX_PPP 13 +#define MC_ENGINE_IDX_PRIVRING 14 +#define MC_ENGINE_IDX_PMU 15 +#define MC_ENGINE_IDX_CE0 16 +#define MC_ENGINE_IDX_CE1 17 +#define MC_ENGINE_IDX_CE2 18 +#define MC_ENGINE_IDX_CE3 19 +#define MC_ENGINE_IDX_CE4 20 +#define MC_ENGINE_IDX_CE5 21 +#define MC_ENGINE_IDX_CE6 22 +#define MC_ENGINE_IDX_CE7 23 +#define MC_ENGINE_IDX_CE8 24 +#define MC_ENGINE_IDX_CE9 25 +#define MC_ENGINE_IDX_VIC 26 +#define MC_ENGINE_IDX_ISOHUB 27 +#define MC_ENGINE_IDX_VGPU 28 +#define MC_ENGINE_IDX_MSENC 29 +#define MC_ENGINE_IDX_MSENC1 30 +#define MC_ENGINE_IDX_MSENC2 31 +#define MC_ENGINE_IDX_C2C 32 +// UNUSED +#define MC_ENGINE_IDX_LTC 34 +#define MC_ENGINE_IDX_FBHUB 35 +#define MC_ENGINE_IDX_HDACODEC 36 +#define MC_ENGINE_IDX_GMMU 37 +#define MC_ENGINE_IDX_SEC2 38 +#define MC_ENGINE_IDX_FSP 39 +#define MC_ENGINE_IDX_NVLINK 40 +#define MC_ENGINE_IDX_GSP 41 +#define MC_ENGINE_IDX_NVJPG 42 +#define MC_ENGINE_IDX_NVJPEG MC_ENGINE_IDX_NVJPG +#define MC_ENGINE_IDX_NVJPEG0 MC_ENGINE_IDX_NVJPEG +#define MC_ENGINE_IDX_RESERVED43 43 +#define MC_ENGINE_IDX_RESERVED44 44 +#define MC_ENGINE_IDX_RESERVED45 45 +#define MC_ENGINE_IDX_RESERVED46 46 +#define MC_ENGINE_IDX_RESERVED47 47 +#define MC_ENGINE_IDX_RESERVED48 48 +#define MC_ENGINE_IDX_RESERVED49 49 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT 50 +#define MC_ENGINE_IDX_ACCESS_CNTR 51 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT 52 +#define MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR 53 +#define MC_ENGINE_IDX_NON_REPLAYABLE_FAULT_ERROR 54 +#define MC_ENGINE_IDX_INFO_FAULT 55 +#define MC_ENGINE_IDX_BSP 56 +#define MC_ENGINE_IDX_NVDEC MC_ENGINE_IDX_BSP +#define MC_ENGINE_IDX_NVDEC0 MC_ENGINE_IDX_NVDEC +#define MC_ENGINE_IDX_NVDEC1 57 +#define MC_ENGINE_IDX_NVDEC2 58 +#define MC_ENGINE_IDX_NVDEC3 59 +#define MC_ENGINE_IDX_NVDEC4 60 +#define MC_ENGINE_IDX_RESERVED61 61 +#define MC_ENGINE_IDX_RESERVED62 62 +#define MC_ENGINE_IDX_RESERVED63 63 +#define MC_ENGINE_IDX_CPU_DOORBELL 64 +#define MC_ENGINE_IDX_PRIV_DOORBELL 65 +#define MC_ENGINE_IDX_MMU_ECC_ERROR 66 +#define MC_ENGINE_IDX_BLG 67 +#define MC_ENGINE_IDX_PERFMON 68 +#define MC_ENGINE_IDX_BUF_RESET 69 +#define MC_ENGINE_IDX_XBAR 70 +#define MC_ENGINE_IDX_ZPW 71 +#define MC_ENGINE_IDX_OFA0 72 +#define MC_ENGINE_IDX_TEGRA 73 +#define MC_ENGINE_IDX_GR 74 +#define MC_ENGINE_IDX_GR0 MC_ENGINE_IDX_GR +#define MC_ENGINE_IDX_GR1 75 +#define MC_ENGINE_IDX_GR2 76 +#define MC_ENGINE_IDX_GR3 77 +#define MC_ENGINE_IDX_GR4 78 +#define MC_ENGINE_IDX_GR5 79 +#define MC_ENGINE_IDX_GR6 80 +#define MC_ENGINE_IDX_GR7 81 +#define MC_ENGINE_IDX_ESCHED 82 +#define MC_ENGINE_IDX_ESCHED__SIZE 64 +#define MC_ENGINE_IDX_GR_FECS_LOG 146 +#define MC_ENGINE_IDX_GR0_FECS_LOG MC_ENGINE_IDX_GR_FECS_LOG +#define MC_ENGINE_IDX_GR1_FECS_LOG 147 +#define MC_ENGINE_IDX_GR2_FECS_LOG 148 +#define MC_ENGINE_IDX_GR3_FECS_LOG 149 +#define MC_ENGINE_IDX_GR4_FECS_LOG 150 +#define MC_ENGINE_IDX_GR5_FECS_LOG 151 +#define MC_ENGINE_IDX_GR6_FECS_LOG 152 +#define MC_ENGINE_IDX_GR7_FECS_LOG 153 +#define MC_ENGINE_IDX_TMR_SWRL 154 +#define MC_ENGINE_IDX_MAX 155 // This must be kept as the max bit if + // we need to add more engines +#define MC_ENGINE_IDX_INVALID 0xFFFFFFFF + +// Index GR reference +#define MC_ENGINE_IDX_GRn(x) (MC_ENGINE_IDX_GR0 + (x)) +#define MC_ENGINE_IDX_GRn_FECS_LOG(x) (MC_ENGINE_IDX_GR0_FECS_LOG + (x)) + +// Index CE reference +#define MC_ENGINE_IDX_CE(x) (MC_ENGINE_IDX_CE0 + (x)) + +// Index MSENC reference +#define MC_ENGINE_IDX_MSENCn(x) (MC_ENGINE_IDX_MSENC + (x)) + +// Index NVDEC reference +#define MC_ENGINE_IDX_NVDECn(x) (MC_ENGINE_IDX_NVDEC + (x)) + +// Index NVJPEG reference +#define MC_ENGINE_IDX_NVJPEGn(x) (MC_ENGINE_IDX_NVJPEG + (x)) + +// Index ESCHED reference +#define MC_ENGINE_IDX_ESCHEDn(x) (MC_ENGINE_IDX_ESCHED + (x)) + +MAKE_BITVECTOR(MC_ENGINE_BITVECTOR, MC_ENGINE_IDX_MAX); +typedef MC_ENGINE_BITVECTOR *PMC_ENGINE_BITVECTOR; + +#endif // ENGINE_IDX_H diff --git a/src/nvidia/inc/kernel/gpu/intr/intr.h b/src/nvidia/inc/kernel/gpu/intr/intr.h new file mode 100644 index 000000000..8d3e35362 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/intr/intr.h @@ -0,0 +1,3 @@ + +#include "g_intr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/intr/intr_service.h b/src/nvidia/inc/kernel/gpu/intr/intr_service.h new file mode 100644 index 000000000..2f23c137e --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/intr/intr_service.h @@ -0,0 +1,3 @@ + +#include "g_intr_service_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/intr/swintr.h b/src/nvidia/inc/kernel/gpu/intr/swintr.h new file mode 100644 index 000000000..a0c3d9520 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/intr/swintr.h @@ -0,0 +1,3 @@ + +#include "g_swintr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/intrable/intrable.h b/src/nvidia/inc/kernel/gpu/intrable/intrable.h new file mode 100644 index 000000000..aa87efbb5 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/intrable/intrable.h @@ -0,0 +1,3 @@ + +#include "g_intrable_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mc/kernel_mc.h b/src/nvidia/inc/kernel/gpu/mc/kernel_mc.h new file mode 100644 index 000000000..349f9205f --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mc/kernel_mc.h @@ -0,0 +1,3 @@ + +#include "g_kernel_mc_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h b/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h new file mode 100644 index 000000000..42256664d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/context_dma.h @@ -0,0 +1,3 @@ + +#include "g_context_dma_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/fbsr.h b/src/nvidia/inc/kernel/gpu/mem_mgr/fbsr.h new file mode 100644 index 000000000..53d87977c --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/fbsr.h @@ -0,0 +1,3 @@ + +#include "g_fbsr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/fermi_dma.h b/src/nvidia/inc/kernel/gpu/mem_mgr/fermi_dma.h new file mode 100644 index 000000000..f5391fac1 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/fermi_dma.h @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef FERMI_DMA_H +#define FERMI_DMA_H + +#ifdef NV_MMU_PTE +#error "dev_mmu.h included before fermi_dma.h" +#endif +#ifdef NV_PLTCG +#error "dev_ltc.h included before fermi_dma.h" +#endif + +#include "gpu/mem_mgr/mem_desc.h" +#include "mem_mgr/gpu_vaspace.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" + +// TODO -- replace those FERMI_* define usages in RM code +#define FERMI_SMALL_PAGESIZE RM_PAGE_SIZE +#define FERMI_BIG_PAGESIZE_64K RM_PAGE_SIZE_64K +#define FERMI_BIG_PAGESIZE_128K RM_PAGE_SIZE_128K + +typedef struct DMAHALINFO_FERMI +{ + NvU32 vasReverse; + NvU32 compTagLineMultiplier; // Comptaglines increment by this value for VERIF only, see Bug 501651 +} DMAHALINFO_FERMI, *PDMAHALINFO_FERMI; + +#define DMA_GET_FERMI_INFOBLK(p) ((PDMAHALINFO_FERMI) getInfoPtr((p)->infoList, HAL_IMPL_GF100)) + +// +// From GF100 dev_mmu.ref: +// Each PDE maps a 64MB region of virtual memory when using 64KB big pages, or 128MB +// when using 128KB big pages. To map all 40b of virtual address space, the page +// directory consists of 16K entries when using 64KB big pages (64MB * 16K = 2^26 * 2^14 = 2^40), +// or 8K entries when using 128KB big pages (128MB * 8K = 2^27 * 2^13 = 2^40). +// +#define VASPACE_SIZE_MB_FERMI (1 << 20) +#define VASPACE_SIZE_FERMI (((NvU64)VASPACE_SIZE_MB_FERMI) << 20) + +// Alignment Defines for page tables +#define PDB_SHIFT_FERMI 12 // FERMITODO::DMA Will be there in dev_ram very soon +#define PDB_ALIGNMENT_FERMI (1 << PDB_SHIFT_FERMI) + +#endif // FERMI_DMA_H diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/heap.h b/src/nvidia/inc/kernel/gpu/mem_mgr/heap.h new file mode 100644 index 000000000..78d0f40eb --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/heap.h @@ -0,0 +1,3 @@ + +#include "g_heap_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h b/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h new file mode 100644 index 000000000..c0f3eb132 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/heap_base.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _HEAP_BASE_H_ +#define _HEAP_BASE_H_ + +#include "nvtypes.h" +#include "core/prelude.h" +#include "gpu/mem_mgr/mem_desc.h" + +// Contains the minimal set of resources used to compute a PTE kind +typedef struct _def_fb_alloc_page_format +{ + NvU32 attr; + NvU32 attr2; + NvU32 flags; + NvU32 kind; + NvU32 type; +} FB_ALLOC_PAGE_FORMAT; + +// +// FB allocation resources structure +// Need to be allocated from heap +// +typedef struct _def_fb_alloc_info +{ + NvU32 owner; + NvU32 hwResId; + NvU32 height; + NvU32 width; + NvU32 pitch; + NvU64 size; + NvU64 align; + NvU64 alignPad; + NvU64 pad; + NvU64 offset; + NvU32 internalflags; + NvU32 retAttr; + NvU32 retAttr2; + NvU32 format; + NvU32 comprCovg; + NvU32 zcullCovg; + NvU32 uncompressedKind; + NvU32 compPageShift; + NvU32 compressedKind; + NvU32 compTagLineMin; + NvU32 compPageIndexLo; + NvU32 compPageIndexHi; + NvU32 compTagLineMultiplier; + NvU32 startCovg; + NvU64 origSize; + NvU64 adjustedSize; + NvU64 desiredOffset; + + FB_ALLOC_PAGE_FORMAT * pageFormat; + + // Tracking client for VGPU + NvHandle hClient; + NvHandle hDevice; + + // These are only used in Vista + // no need yet for possAttr2 + NvU32 possAttr; // AllocHint, BindCompr + NvU32 ctagOffset; + + // Special flag for kernel allocations + NvBool bIsKernelAlloc; + + // + // Number of 4KB pages in the PTE array + // For contiguous allocation, this will be set to '1' + // + NvU64 pageCount4k; + + // denote that underlying physical allocation is contiguous or not + NvBool bContig; + + // + // Store the PTE Array to be used for allocating comptaglines + // If the NVOS32_ATTR_PHYSICALITY_CONTIGUOUS is set, it will only have + // one entry, otherwise it will have dynamically allocated memory + // This will track the pages in 4KB granularity + // + RmPhysAddr pteArray[1]; +} FB_ALLOC_INFO; + +// +// Contains information on the various hw resources (compr, etc...) that +// can be associated with a memory allocation. +// +typedef struct HWRESOURCE_INFO +{ + NvU32 attr; // NVOS32_ATTR_* + NvU32 attr2; // NVOS32_ATTR2_* + NvU32 comprCovg; // compression coverage + NvU32 ctagOffset; // comptag offset + NvU32 hwResId; + NvU32 refCount; + NvBool isVgpuHostAllocated; // used in vGPU guest RM to indicate if this HW resource is allocated by host RM or not. Used in Windows guest. + NvBool isGuestAllocated; // used in vGPU host RM to indicate if this HW resource is allocated from LIST_OBJECT path on behalf of Linux guest. +} HWRESOURCE_INFO; + + +typedef struct PMA_ALLOC_INFO +{ + NvBool bContig; + NvU32 pageCount; + NvU32 pageSize; + NvU32 refCount; + NvU64 allocSize; + NvU32 flags; + // + // If bContig is TRUE, this array consists of one element. + // If bContig is FALSE, this array is actually larger and + // has one entry for each physical page in the allocation. + // As a result, this structure must be allocated from heap. + // + NvU64 pageArray[1]; + //!!! Place nothing behind pageArray!!! +} PMA_ALLOC_INFO; + +typedef struct MEMORY_ALLOCATION_REQUEST +{ + NV_MEMORY_ALLOCATION_PARAMS *pUserParams; + OBJGPU *pGpu; + NvHandle hMemory; // in: can be NULL (translates to 0) + NvU32 internalflags; // Extended flags ?! flags seem exhausted. + HWRESOURCE_INFO *pHwResource; // out: data copied in if non-NULL + MEMORY_DESCRIPTOR *pMemDesc; // in/out: allocate memdesc if NULL + PMA_ALLOC_INFO *pPmaAllocInfo[NV_MAX_SUBDEVICES]; // out: tracks the pre-allocated memory per GPU. + NvU32 classNum; + NvHandle hClient; + NvHandle hParent; +} MEMORY_ALLOCATION_REQUEST; + +typedef struct +{ + NvU64 address; + NvU32 type; +} BLACKLIST_ADDRESS; + +#endif //_HEAP_BASE_H_ diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h new file mode 100644 index 000000000..6050d4366 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_desc.h @@ -0,0 +1,3 @@ + +#include "g_mem_desc_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h new file mode 100644 index 000000000..811d9026d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_mgr.h @@ -0,0 +1,3 @@ + +#include "g_mem_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/mem_scrub.h b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_scrub.h new file mode 100644 index 000000000..f11130981 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_scrub.h @@ -0,0 +1,252 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*********************** Memory Scrubber Routines **************************\ +* Defines and structures used for CE Physical Memory Scrubber * +\***************************************************************************/ + +#ifndef MEM_SCRUB_H +#define MEM_SCRUB_H + +#include "nvport/nvport.h" +#include "class/cl906f.h" +#include "class/cl906fsw.h" + +#include "nvctassert.h" +#include "vgpu/vgpu_guest_pma_scrubber.h" + +struct OBJGPU; +struct Heap; +struct OBJCHANNEL; + +#define RM_SUBCHANNEL 0x0 +#define MEMSET_PATTERN 0x00000000 +#define SCRUBBER_NUM_PAYLOAD_SEMAPHORES (2) +#define SCRUBBER_SEMAPHORE_SIZE_INBYTES (4) +#define SCRUBBER_CHANNEL_SEMAPHORE_SIZE (SCRUBBER_SEMAPHORE_SIZE_INBYTES *\ + SCRUBBER_NUM_PAYLOAD_SEMAPHORES) +#define SCRUBBER_CHANNEL_NOTIFIER_SIZE (sizeof(NvNotification) * NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1) + +#define SCRUBBER_VASPACE_BUFFER_SIZE 0x20000000ULL //512MB +#define SIZE_OF_ONE_MEMSET_BLOCK 0x60 +#define SCRUB_MAX_BYTES_PER_LINE 0xffffffffULL +#define MAX_SCRUB_ITEMS 4096 // 4K scrub items + +#define READ_SCRUBBER_PAYLOAD_SEMA(channel) MEM_RD32((NvU8*)channel->pbCpuVA +\ + channel->finishPayloadOffset) + +#define READ_SCRUBBER_PB_SEMA(channel) MEM_RD32((NvU8*)channel->pbCpuVA +\ + channel->semaOffset) + +#define WRITE_SCRUBBER_PB_SEMA(channel, val) MEM_WR32((NvU8*)channel->pbCpuVA +\ + channel->semaOffset, val); + +#define WRITE_SCRUBBER_PAYLOAD_SEMA(channel,val) MEM_WR32((NvU8*)channel->pbCpuVA +\ + channel->finishPayloadOffset, val); +// Use Incrementing Methods to save the PB Space +#define _NV_ASSERT_CONTIGUOUS_METHODS(a1, a2) NV_ASSERT((a2) - (a1) == 4) + +#define NV_PUSH_METHOD(OpType, SubCh, Method, Count) \ + (DRF_DEF(906F, _DMA, _SEC_OP, OpType) |\ + DRF_NUM(906F, _DMA, _METHOD_ADDRESS, (Method) >> 2) |\ + DRF_NUM(906F, _DMA, _METHOD_SUBCHANNEL, (SubCh)) |\ + DRF_NUM(906F, _DMA, _METHOD_COUNT, (Count))) + +#define NV_PUSH_DATA(Data) MEM_WR32(pPtr++, (Data)) + +#define _NV_PUSH_INC_1U(SubCh, a1,d1, Count) \ + do{ \ + NV_PUSH_DATA(NV_PUSH_METHOD(_INC_METHOD, SubCh, a1, Count));\ + NV_PUSH_DATA(d1); \ + } while(0) + +#define NV_PUSH_INC_1U(SubCh, a1,d1) \ + do{ \ + _NV_PUSH_INC_1U (SubCh, a1,d1, 1);\ + } while(0) + +#define NV_PUSH_INC_2U(SubCh, a1,d1, a2,d2) \ + do{ \ + _NV_ASSERT_CONTIGUOUS_METHODS(a1, a2);\ + _NV_PUSH_INC_1U(SubCh, a1,d1, 2); \ + NV_PUSH_DATA(d2); \ + } while(0) + +#define NV_PUSH_INC_3U(SubCh, a1,d1, a2,d2, a3,d3) \ + do{ \ + _NV_ASSERT_CONTIGUOUS_METHODS(a1,a2);\ + _NV_ASSERT_CONTIGUOUS_METHODS(a2,a3);\ + _NV_PUSH_INC_1U(SubCh, a1,d1, 3); \ + NV_PUSH_DATA(d2); \ + NV_PUSH_DATA(d3); \ + } while(0) + +#define NV_PUSH_INC_4U(SubCh, a1,d1, a2,d2, a3,d3, a4,d4) \ + do{ \ + _NV_ASSERT_CONTIGUOUS_METHODS(a1,a2);\ + _NV_ASSERT_CONTIGUOUS_METHODS(a2,a3);\ + _NV_ASSERT_CONTIGUOUS_METHODS(a3,a4);\ + _NV_PUSH_INC_1U(SubCh, a1,d1, 4); \ + NV_PUSH_DATA(d2); \ + NV_PUSH_DATA(d3); \ + NV_PUSH_DATA(d4); \ + } while(0) + + +// structure to store the details of a scrubbing work +typedef struct SCRUB_NODE { + // The 64 bit ID assigned to each work + NvU64 id; + // The base address from which the scrub to start + NvU64 base; + // The size of a scrub work + NvU64 size; +} SCRUB_NODE, *PSCRUB_NODE; + +// +// OBJMEMSCRUB OBJECT +// Memory scrubber struct encapsulates the CE Channel object, +// SCRUB_NODE array of size MAX_SCRUB_ITEMS, index to track +// the scrub work list. The scrubber data structures are +// synchronized using the mutex pScrubberMutex. +// + +typedef struct OBJMEMSCRUB { + // Mutex for Scrubber Object + PORT_MUTEX *pScrubberMutex; + // Last completed work ID communicated to Client + NvU64 lastSeenIdByClient; + // The last ID assigned to a work + NvU64 lastSubmittedWorkId; + // Last ID checked with the HW scrubber + NvU64 lastSWSemaphoreDone; + // Size of the scrub list + NvLength scrubListSize; + // Pre-allocated Free Scrub List + PSCRUB_NODE pScrubList; + // Scrubber Channel + struct OBJCHANNEL *pChannel; + struct OBJGPU *pGpu; + VGPU_GUEST_PMA_SCRUB_BUFFER_RING vgpuScrubBuffRing; + NvBool bVgpuScrubberEnabled; +} OBJMEMSCRUB, *POBJMEMSCRUB; + +ct_assert(VGPU_GUEST_PMA_MAX_SCRUB_ITEMS == MAX_SCRUB_ITEMS); + +/** + * Constructs the memory scrubber object and signals + * RM to create CE channels for submitting scrubbing work + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pHeap Heap pointer + * + * @returns NV_STATUS on success. + * error, if something fails + */ + +NV_STATUS scrubberConstruct(struct OBJGPU *pGpu, struct Heap *pHeap); + +/** + * Destructs the scrubber + * 1. De-registers the scrubber from the PMA object + * 2. Free the scrubber list and scrubber lock + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pHeap Heap pointer + * @param[in] pScrubber OBJMEMSCRUB pointer + * + */ + +NV_STATUS vgpuAllocGuestPmaScrubberSharedBuffer(struct OBJGPU *pGpu, OBJMEMSCRUB *pScrubber); +void vgpuFreeGuestPmaScrubberSharedBuffer(struct OBJGPU *pGpu, OBJMEMSCRUB *pScrubber); + +void scrubberDestruct(struct OBJGPU *pGpu, struct Heap *pHeap, OBJMEMSCRUB *pMemscrub); + + +/** + * This function checks for the completed scrub work items, + * and populates the SCRUB_NODE in the array. + * @param[in] pScrubber OBJMEMSCRUB pointer + * @param[out] ppList SCRUB_NODE double pointer + * @param[out] pSize NvU64 pointer + * @returns NV_OK on success, + * NV_ERR_INSUFFICIENT_RESOURCES when the list allocation fails. + */ + +NV_STATUS scrubCheck(OBJMEMSCRUB *pScrubber, PSCRUB_NODE *ppList, NvU64 *size); + + +/** + * This function submits work to the memory scrubber. + * This function interface is changed to return a list of scrubbed pages to the + * client, since the scrubber work list resources are limited, if the submission + * page count is more than scrubber list resources the completed scrubbed pages + * are saved in the list and the submission progresses. + * + * @param[in] pScrubber OBJMEMSCRUB pointer + * @param[in] chunkSize NvU64 size of each page + * @param[in] pPages NvU64 array of base address + * @param[in] pageCount NvU64 number of pages + * @param[out] ppList SCRUB_NODE double pointer to hand off the list + * @param[out] pSize NvU64 pointer to store the size + * + * @returns NV_OK on success, NV_ERR_GENERIC on HW Failure + */ + +NV_STATUS scrubSubmitPages(OBJMEMSCRUB *pScrubber, NvU64 chunkSize, NvU64* pages, + NvU64 pageCount, PSCRUB_NODE *ppList, NvU64 *size); + +/** + * This function waits for the memory scrubber to wait for the scrubbing of + * pages within the range [pagesStart, pagesEnd] for the for the array of pages + * of size pageCount + * + * @param[in] pScrubber OBJMEMSCRUB pointer + * @param[in] pageSize NvU64 size of each page + * @param[in] pPages NvU64 pointer to store the base address + * @param[in] pageCount NvU64 number of pages in the array + * + * @returns NV_OK + */ + +NV_STATUS scrubWaitPages(OBJMEMSCRUB *pScrubber, NvU64 chunkSize, NvU64* pages, + NvU32 pageCount); + + +/** + * This function waits for the scrubber to finish scrubbing enough items + * to have numPages fully scrubbed and then saves the work items to the list + * passed to the client. + * + * @param[in] pScrubber OBJMEMSCRUB pointer + * @param[in] numPages the number of pages we should wait to be scrubbed + * @param[in] pageSize the page size + * @param[out] ppList SCRUB_NODE double pointer to return the saved list pointer + * @param[out] pSize NvU64 pointer to return the size of saved work. + * + * @returns NV_OK if at least one work is pending in the scrubber list + * NV_ERR_NO_MEMORY when no work is pending in the scrubber list + */ +NV_STATUS scrubCheckAndWaitForSize (OBJMEMSCRUB *pScrubber, NvU64 numPages, + NvU64 pageSize, PSCRUB_NODE *ppList, NvU64 *pSize); +#endif // MEM_SCRUB_H diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h new file mode 100644 index 000000000..c3d7feaab --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/mem_utils.h @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _MEM_UTILS_H_ +#define _MEM_UTILS_H_ + +#include "core/prelude.h" + +#define CLEAR_HAL_ATTR(a) \ + a = (a &~(DRF_NUM(OS32, _ATTR, _COMPR, 0x3) | \ + DRF_NUM(OS32, _ATTR, _TILED, 0x3) | \ + DRF_NUM(OS32, _ATTR, _ZCULL, 0x3))); + +#define CLEAR_HAL_ATTR2(a) \ + a = (a & ~(DRF_SHIFTMASK(NVOS32_ATTR2_ZBC) | \ + DRF_SHIFTMASK(NVOS32_ATTR2_GPU_CACHEABLE))); + +NvU64 memUtilsLeastCommonAlignment(NvU64 align1, NvU64 align2); + +void memUtilsInitFBAllocInfo(NV_MEMORY_ALLOCATION_PARAMS *pAllocParams, FB_ALLOC_INFO *pFbAllocInfo, + NvHandle hClient, NvHandle hDevice); + +NV_STATUS memUtilsAllocMemDesc(OBJGPU *pGpu, MEMORY_ALLOCATION_REQUEST *pAllocRequest, FB_ALLOC_INFO *pFbAllocInfo, + MEMORY_DESCRIPTOR **ppMemDesc, Heap *pHeap, NV_ADDRESS_SPACE addrSpace, + NvBool bContig, NvBool *bAllocedMemDesc); + +NV_STATUS memUtilsMemSetNoBAR2(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, NvU8 value); + +#endif //_MEM_UTILS_H_ diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/addrtree.h b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/addrtree.h new file mode 100644 index 000000000..a7fef86c2 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/addrtree.h @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @brief Implement PMA address tree + * + */ + +#ifndef ADDRTREE_H +#define ADDRTREE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "map_defines.h" + +// Declare this before its definition because it refers to itself +typedef struct addrtree_node ADDRTREE_NODE; + +struct addrtree_node +{ + NvU32 level; // The level this node belongs to + NvU32 numChildren; // The number of children in the children array + NvU64 frame; // The first frame this node holds + NvU64 state[PMA_BITS_PER_PAGE]; // Tracks the actual state for each map + NvU64 seeChild[PMA_BITS_PER_PAGE]; // Whether this node is partially allocated + // If it is partially allocated, we must go to the children + // to find the correct information. + + ADDRTREE_NODE *parent; // The node's parent + ADDRTREE_NODE *children; // Pointer to an array of children +}; + +typedef struct addrtree_level +{ + NvU64 nodeCount; // Count of total number of nodes on this level + ADDRTREE_NODE *pNodeList; // Pointer to the start of the list of nodes on this level + NvU32 pageSizeShift; // Page size this level is tracking + NvU32 maxFramesPerNode; // The max number of this level frames per node +} ADDRTREE_LEVEL; + +typedef struct pma_addrtree +{ + NvU64 totalFrames; // Total number of 64KB frames being tracked + NvU32 levelCount; // Number of levels in this tree + ADDRTREE_LEVEL *levels; // List of levels in the tree + ADDRTREE_NODE *root; // Start of the node list + NvU64 numPaddingFrames; // Number of 64KB frames needed for padding for alignment + + NvU64 frameEvictionsInProcess; // Count of frame evictions in-process + PMA_STATS *pPmaStats; // Point back to the public struct in PMA structure + NvBool bProtected; // The memory segment tracked by this tree is protected (VPR/CPR) +} PMA_ADDRTREE; + +/*! + * @brief Initializes the addrtree for PMA uses + * + * Allocates the address tree structure for all the pages being managed in this tree. + * Address Tree implementation will use a default configuration for its own level + * structures. + * + * @param[in] numPages The number of pages being managed in this tree + * @param[in] addrBase The base address of this region. Required for addrtree alignment + * @param[in] pPmaStats Pointer to the PMA-wide stats structure + * @param[in] bProtected The tree tracks pages in protected memory + * + * @return PMA_ADDRTREE Pointer to the addrtree if succeeded, NULL otherwise + */ +void *pmaAddrtreeInit(NvU64 numFrames, NvU64 addrBase, PMA_STATS *pPmaStats, NvBool bProtected); + +/*! + * @brief Destroys the addrtree and free the memory + * + * @param[in] pMap The addrtree to destroy + * + * @return void + */ +void pmaAddrtreeDestroy(void *pMap); + +/*! + * @brief Get/set number of evicting frames + * Used for sanity checking in PMA layer as well as performance optimization + * for the map layer to scan faster. + */ +NvU64 pmaAddrtreeGetEvictingFrames(void *pMap); +void pmaAddrtreeSetEvictingFrames(void *pMap, NvU64 frameEvictionsInProcess); + + +/*! + * @brief Scans the addrtree for contiguous space that has the certain status. + * + * @param[in] pMap The addrtree to be scanned + * @param[in] addrBase The base address of this region + * @param[in] rangeStart The start of the restricted range + * @param[in] rangeEnd The end of the restricted range + * @param[in] numPages The number of pages we are scanning for + * @param[out] freeList A list of free frame numbers -- contains only 1 element + * @param[in] pageSize Size of one page + * @param[in] alignment Alignment requested by client + * @param[out] pagesAllocated Number of pages this call allocated + * @param[in] bSkipEvict Whether it's ok to skip the scan for evictable pages + * + * @return NV_OK if succeeded + * @return NV_ERR_IN_USE if found pages that can be evicted + * @return NV_ERR_NO_MEMORY if no available pages could be found + */ +NV_STATUS pmaAddrtreeScanContiguous( + void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd, + NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment, + NvU64 *pagesAllocated, NvBool bSkipEvict); + +NV_STATUS pmaAddrtreeScanDiscontiguous( + void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd, + NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment, + NvU64 *pagesAllocated, NvBool bSkipEvict); + +void pmaAddrtreePrintTree(void *pMap, const char* str); + + +/*! + * @brief Changes the state & attrib bits specified by mask + * + * Changes the state of the bits given the physical frame number + * TODO: all four interfaces need to be merged from PMA level so we can remove them! + * + * @param[in] pMap The addrtree to change + * @param[in] frameNum The frame number to change + * @param[in] newState The new state to change to + * @param[in] newStateMask Specific bits to write + * + * @return void + */ +void pmaAddrtreeChangeState(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState); +void pmaAddrtreeChangeStateAttrib(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState, NvBool writeAttrib); +void pmaAddrtreeChangeStateAttribEx(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState,PMA_PAGESTATUS newStateMask); +void pmaAddrtreeChangePageStateAttrib(void * pMap, NvU64 startFrame, NvU32 pageSize, + PMA_PAGESTATUS newState, NvBool writeAttrib); + +/*! + * @brief Read the page state & attrib bits + * + * Read the state of the page given the physical frame number + * + * @param[in] pMap The addrtree to read + * @param[in] frameNum The frame number to read + * @param[in] readAttrib Read attribute bits as well + * + * @return PAGESTATUS of the frame + */ +PMA_PAGESTATUS pmaAddrtreeRead(void *pMap, NvU64 frameNum, NvBool readAttrib); + + +/*! + * @brief Gets the total size of specified PMA managed region. + * + * Gets the total size of current PMA managed region in the FB. + * + * @param[in] pMap Pointer to the addrtree for the region + * @param[in] pBytesTotal Pointer that will return total bytes for current region. + * + */ +void pmaAddrtreeGetSize(void *pMap, NvU64 *pBytesTotal); + + +/*! + * @brief Gets the size of the maximum free chunk of memory in specified region. + * + * Gets the size of the maximum free chunk of memory in the specified PMA managed + * region of the FB. + * + * @param[in] pMap Pointer to the addrtree for the region + * @param[in] pLargestFree Pointer that will return largest free in current region. + * + */ +void pmaAddrtreeGetLargestFree(void *pMap, NvU64 *pLargestFree); + +/*! + * @brief Returns the address range that is completely available for eviction. + * - Should be ALLOC_UNPIN. + * In NUMA, OS manages memory and PMA will only track allocated memory in ALLOC_PIN + * and ALLOC_UNPIN state. FREE memory is managed by OS and cannot be tracked by PMA + * and hence PMA cannot consider FREE memory for eviction and can only consider frames + * in known state to PMA or eviction. ALLOC_PIN cannot be evicted and hence only ALLOC_UNPIN + * can be evictable. + * + * + * @param[in] pMap Pointer to the regmap for the region + * @param[in] addrBase Base address of the region + * @param[in] actualSize Size of the eviction range + * @param[in] pageSize Pagesize + * @param[out] evictStart Starting address of the eviction range + * @param[out] evictEnd End address of the eviction range. + * + * Returns: + * - NV_OK If there is evictable range of given size : actualSize + * + * - NV_ERR_NO_MEMORY if no contiguous range is evictable. + */ +NV_STATUS pmaAddrtreeScanContiguousNumaEviction(void *pMap, NvU64 addrBase, + NvLength actualSize, NvU64 pageSize, NvU64 *evictStart, NvU64 *evictEnd); + +#ifdef __cplusplus +} +#endif + +#endif // ADDRTREE_H diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/map_defines.h b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/map_defines.h new file mode 100644 index 000000000..0817d24d1 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/map_defines.h @@ -0,0 +1,143 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @brief Contains common defines between addrtree and regmap + */ + +#ifndef MAP_DEFINES_H +#define MAP_DEFINES_H + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#if !defined(NVWATCH) + +#endif + +#ifdef __cplusplus +extern "C" { +#endif + + +// Default page size 64KB +#define PMA_GRANULARITY 0x10000 +#define PMA_PAGE_SHIFT 16 + +// +// _PMA_1GB will cause overflows with an NvU32. It's bigger than NvU32 can store, +// but compilation still fails when using a NvU64 instead +// So just use bitshift. +// 1 << _TREE_64KB == sizeof(1 frame) +// + +#define _TREE_64KB 16 +#define _TREE_128KB 17 +#define _TREE_2MB 21 +#define _TREE_128MB 27 +#define _TREE_512MB 29 +#define _TREE_32GB 35 +#define _TREE_2TB 40 + +// Defines shared between pma.c and regmap.c +#define _PMA_64KB (64 * 1024) +#define _PMA_128KB (128 * 1024) +#define _PMA_2MB (2 * 1024 * 1024) +#define _PMA_512MB (512 * 1024 * 1024) + +// Scanning function return code +#define EVICTABLE -2 +#define ALL_FREE -3 + +typedef NvU32 PMA_PAGESTATUS; + +#define MAP_IDX_ALLOC_UNPIN 0 +#define MAP_IDX_ALLOC_PIN 1 +#define MAP_IDX_EVICTING 2 +#define MAP_IDX_SCRUBBING 3 +#define MAP_IDX_PERSISTENT 4 +#define MAP_IDX_NUMA_REUSE 5 +#define MAP_IDX_BLACKLIST 6 + +#define STATE_FREE 0x00 +#define STATE_UNPIN NVBIT(MAP_IDX_ALLOC_UNPIN) +#define STATE_PIN NVBIT(MAP_IDX_ALLOC_PIN) +#define STATE_MASK (STATE_UNPIN | STATE_PIN | STATE_FREE) +#define STATE_COUNT 3 + +#define ATTRIB_EVICTING NVBIT(MAP_IDX_EVICTING) +#define ATTRIB_SCRUBBING NVBIT(MAP_IDX_SCRUBBING) +#define ATTRIB_PERSISTENT NVBIT(MAP_IDX_PERSISTENT) +#define ATTRIB_NUMA_REUSE NVBIT(MAP_IDX_NUMA_REUSE) +#define ATTRIB_BLACKLIST NVBIT(MAP_IDX_BLACKLIST) +#define ATTRIB_MASK (ATTRIB_EVICTING | ATTRIB_SCRUBBING \ + | ATTRIB_PERSISTENT | ATTRIB_NUMA_REUSE \ + | ATTRIB_BLACKLIST) + +#define MAP_MASK (STATE_MASK | ATTRIB_MASK) + +#define PMA_STATE_BITS_PER_PAGE 2 // Alloc & pinned state +#define PMA_ATTRIB_BITS_PER_PAGE 5 // Persistence, Scrubbing, Evicting, Reuse & Blacklisting attributes +#define PMA_BITS_PER_PAGE (PMA_STATE_BITS_PER_PAGE + PMA_ATTRIB_BITS_PER_PAGE) + +// +// Stores PMA-wide statistics. +// +// In NUMA mode "free" means "not allocated by PMA". Since the kernel owns +// memory, any amount of those pages could be allocated by the kernel but PMA +// does not have that visibility. The provided counts are thus an upper bound on +// the number of free pages. +// +typedef struct _PMA_STATS +{ + NvU64 num2mbPages; // PMA-wide total number of 2MB pages + NvU64 numFreeFrames; // PMA-wide free 64KB frame count + NvU64 numFree2mbPages; // PMA-wide free 2MB pages count +#if !defined(NVWATCH) +#endif // !defined(NVWATCH) +} PMA_STATS; + +// Stores blacklisting information passed in from heap layer +typedef struct +{ + NvU64 physOffset; // base address of blacklisted page + NvBool bIsDynamic; // True if page was dynamically blacklisted +} PMA_BLACKLIST_ADDRESS, *PPMA_BLACKLIST_ADDRESS; + +// +// Store the blacklist chunk information with the physical offset aligned to 64K, +// and whether the blacklist chunk is managed by RM or Client. +// +typedef struct +{ + NvU64 physOffset; // physical offset of blacklisted FB address + NvBool bIsDynamic; // True if the page was dynamically blacklisted + NvBool bIsValid; // If the blacklisted address is still managed by RM +} PMA_BLACKLIST_CHUNK; + + +#ifdef __cplusplus +} +#endif + +#endif // MAP_DEFINES_H diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/numa.h b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/numa.h new file mode 100644 index 000000000..5fa78dbbf --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/numa.h @@ -0,0 +1,77 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * This file defines the internal interfaces to the NUMA allocator component, + * used by parent module PMA. + * + * All interfaces here should only be used on PowerPC 9 systems with NUMA + * mode enabled, where GPU's framebuffer memory is onlined to the Linux kernel. + * + * Therefore, PMA just needs to sub-allocate from the Linux kernel for most + * allocation requests. However, PMA does keep states in order to support + * eviction and GPU scrubber. Please see below for more details. + */ + +#ifndef NUMA_H +#define NUMA_H + +#include "nvport/nvport.h" +#include "phys_mem_allocator.h" +#include "nvmisc.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/*! + * @brief Handles NUMA allocation by calling kernel APIs + * + * This function will implement a mixture of pass-through memory allocation + * from the Linux kernel as well as eviction from UVM if the Linux kernel + * does not have any free memory. + * + * From a high level, PMA keep a bitmap for all UVM unpinned (evictable) + * allocations and PMA will decide to kick off eviction based on the bitmap. + * + * Please note that GPU scrubber is used for any memory evicted and CPU scrubber + * is used for allocations coming from the Linux kernel. + * The perf implication is under further study. See bug #1999793. + */ +NV_STATUS pmaNumaAllocate(PMA *pPma, NvLength allocationCount, NvU32 pageSize, + PMA_ALLOCATION_OPTIONS *allocationOptions, NvU64 *pPages); + +/*! + * @brief Frees pages on a NUMA node. + * This function implements pass-through free calls to the Linux kernel. + * For UVM allocations PMA also updates the bitmap used for eviction. + */ +void pmaNumaFreeInternal(PMA *pPma, NvU64 *pPages, NvU64 pageCount, NvU64 size, NvU32 flag); + +void pmaNumaSetReclaimSkipThreshold(PMA *pPma, NvU32 skipReclaimPercent); +#ifdef __cplusplus +} +#endif + +#endif // NUMA_H diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h new file mode 100644 index 000000000..c2a4e29cd --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h @@ -0,0 +1,856 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @brief This file exposes the PMA interfaces. + * The PMA module interacts with the RM and UVM components. UVM will use the + * ops layer to call out to PMA, while RM directly calls into PMA. + * The PMA module takes a global lock to protect its internal structure. It + * uses a bitmap structure called regmap. + * + * @bug + * 1. status code -- decide if we need to add to global + * 2. suspend/resume -- might add one more function to support + * + * @TODO + * 1. external fragmentation + * 2. use new scrubber API and remove the initScrubbing atomic variable + */ + +#ifndef PHYS_MEM_ALLOCATOR_H +#define PHYS_MEM_ALLOCATOR_H + +#include "nvport/nvport.h" +#include "regmap.h" +#include "addrtree.h" +#include "nvmisc.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct OBJMEMSCRUB OBJMEMSCRUB; +typedef struct SCRUB_NODE SCRUB_NODE; + +#define PMA_REGION_SIZE 64 +#define PMA_ADDR2FRAME(addr, base) (((addr) - (base)) >> PMA_PAGE_SHIFT) +#define PMA_FRAME2ADDR(frame, base) ((base) + ((frame) << PMA_PAGE_SHIFT)) + +// +// These flags are used for initialization in order to set global PMA states, +// in case we need to wait for scrubber to be initialized or wait for a NUMA +// node being onlined, etc. +// +#define PMA_INIT_NONE NVBIT(0) +#define PMA_INIT_SCRUB_ON_FREE NVBIT(1) +#define PMA_INIT_NUMA NVBIT(2) +#define PMA_INIT_INTERNAL NVBIT(3) // Used after heap is removed +#define PMA_INIT_FORCE_PERSISTENCE NVBIT(4) +#define PMA_INIT_ADDRTREE NVBIT(5) + +// These flags are used for querying PMA's config and/or state. +#define PMA_QUERY_SCRUB_ENABLED NVBIT(0) +#define PMA_QUERY_SCRUB_VALID NVBIT(1) +#define PMA_QUERY_NUMA_ENABLED NVBIT(2) +#define PMA_QUERY_NUMA_ONLINED NVBIT(3) + +// +// When modifying flags, make sure they are compatible with the mirrored +// UVM_PMA_* flags in nv_uvm_types.h. +// +// Input flags +#define PMA_ALLOCATE_DONT_EVICT NVBIT(0) +#define PMA_ALLOCATE_PINNED NVBIT(1) +#define PMA_ALLOCATE_SPECIFY_MINIMUM_SPEED NVBIT(2) +#define PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE NVBIT(3) +#define PMA_ALLOCATE_SPECIFY_REGION_ID NVBIT(4) +#define PMA_ALLOCATE_PREFER_SLOWEST NVBIT(5) +#define PMA_ALLOCATE_CONTIGUOUS NVBIT(6) +#define PMA_ALLOCATE_PERSISTENT NVBIT(7) +#define PMA_ALLOCATE_PROTECTED_REGION NVBIT(8) +#define PMA_ALLOCATE_FORCE_ALIGNMENT NVBIT(9) +#define PMA_ALLOCATE_NO_ZERO NVBIT(10) +#define PMA_ALLOCATE_TURN_BLACKLIST_OFF NVBIT(11) +#define PMA_ALLOCATE_ALLOW_PARTIAL NVBIT(12) + +// Output flags +#define PMA_ALLOCATE_RESULT_IS_ZERO NVBIT(0) + +// These are flags input to the pmaFreePages call +#define PMA_FREE_SKIP_SCRUB NVBIT(0) + +// State bits for debugging utilities like nvwatch +#define PMA_SCRUB_INITIALIZE 0 +#define PMA_SCRUB_IN_PROGRESS 1 +#define PMA_SCRUB_DONE 2 + +#define PMA_SCRUBBER_VALID 1 +#define PMA_SCRUBBER_INVALID 0 + +#define PMA_NUMA_NO_NODE -1 + +// Maximum blacklist entries possible +#define PMA_MAX_BLACKLIST_ENTRIES 512 + +typedef struct +{ + NvU32 flags; + NvU32 minimumSpeed; // valid if flags & PMA_ALLOCATE_SPECIFY_MININUM_SPEED + NvU64 physBegin, physEnd; // valid if flags & PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE + NvU32 regionId; // valid if flags & PMA_ALLOCATE_SPECIFY_REGION_ID + NvU64 alignment; // valid if flags & PMA_ALLOCATE_FORCE_ALIGNMENT + NvLength numPagesAllocated; // valid if flags & PMA_ALLOCATE_ALLOW_PARTIAL + + NvU32 resultFlags; // valid if the allocation function returns NV_OK +} PMA_ALLOCATION_OPTIONS; + +// +// Explanation: This struct will be provided when UVM/RM registers a region with PMA, +// after which the struct is stored locally in PMA. The internal "filter" function will +// use the information everytime a request comes in. +// +typedef struct +{ + NvU64 base; // Base/start address of the region + NvU64 limit; // Last/end address of region + NvU32 performance; // Relative performance. Higher is faster + NvBool bSupportCompressed; // Support compressed kinds + NvBool bSupportISO; // Support ISO (display, cursor, video) surfaces + NvBool bProtected; // Represents a protected region of memory. +} PMA_REGION_DESCRIPTOR; + +typedef struct _PMA_MAP_INFO PMA_MAP_INFO; +typedef struct _PMA PMA; + +// Range descriptors for managing persistent range lists +typedef struct _RANGELISTTYPE +{ + NvU64 base; + NvU64 limit; + struct _RANGELISTTYPE *pNext; +} RANGELISTTYPE, *PRANGELISTTYPE; + +/*! + * @brief Callbacks to UVM for eviction + */ +typedef NV_STATUS (*pmaEvictPagesCb_t)(void *ctxPtr, NvU32 pageSize, NvU64 *pPages, NvU32 count, NvU64 physBegin, NvU64 physEnd); +typedef NV_STATUS (*pmaEvictRangeCb_t)(void *ctxPtr, NvU64 physBegin, NvU64 physEnd); + +/*! + * @brief Pluggable data structure management. Currently we have regmap and address tree. + */ +typedef void *(*pmaMapInit_t)(NvU64 numFrames, NvU64 addrBase, PMA_STATS *pPmaStats, NvBool bProtected); +typedef void (*pmaMapDestroy_t)(void *pMap); +typedef void (*pmaMapChangeState_t)(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState); +typedef void (*pmaMapChangeStateAttrib_t)(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState, NvBool writeAttrib); +typedef void (*pmaMapChangeStateAttribEx_t)(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState, PMA_PAGESTATUS newStateMask); +typedef void (*pmaMapChangePageStateAttrib_t)(void *pMap, NvU64 startFrame, NvU32 pageSize, PMA_PAGESTATUS newState, NvBool writeAttrib); +typedef PMA_PAGESTATUS (*pmaMapRead_t)(void *pMap, NvU64 frameNum, NvBool readAttrib); +typedef NV_STATUS (*pmaMapScanContiguous_t)(void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd, + NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment, + NvU64 *pagesAllocated, NvBool bSkipEvict); +typedef NV_STATUS (*pmaMapScanDiscontiguous_t)(void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd, + NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment, + NvU64 *pagesAllocated, NvBool bSkipEvict); +typedef void (*pmaMapGetSize_t)(void *pMap, NvU64 *pBytesTotal); +typedef void (*pmaMapGetLargestFree_t)(void *pMap, NvU64 *pLargestFree); +typedef NV_STATUS (*pmaMapScanContiguousNumaEviction_t)(void *pMap, NvU64 addrBase, NvLength actualSize, + NvU64 pageSize, NvU64 *evictStart, NvU64 *evictEnd); +typedef NvU64 (*pmaMapGetEvictingFrames_t)(void *pMap); +typedef void (*pmaMapSetEvictingFrames_t)(void *pMap, NvU64 frameEvictionsInProcess); + +struct _PMA_MAP_INFO +{ + NvU32 mode; + pmaMapInit_t pmaMapInit; + pmaMapDestroy_t pmaMapDestroy; + pmaMapChangeState_t pmaMapChangeState; + pmaMapChangeStateAttrib_t pmaMapChangeStateAttrib; + pmaMapChangeStateAttribEx_t pmaMapChangeStateAttribEx; + pmaMapChangePageStateAttrib_t pmaMapChangePageStateAttrib; + pmaMapRead_t pmaMapRead; + pmaMapScanContiguous_t pmaMapScanContiguous; + pmaMapScanDiscontiguous_t pmaMapScanDiscontiguous; + pmaMapGetSize_t pmaMapGetSize; + pmaMapGetLargestFree_t pmaMapGetLargestFree; + pmaMapScanContiguousNumaEviction_t pmaMapScanContiguousNumaEviction; + pmaMapGetEvictingFrames_t pmaMapGetEvictingFrames; + pmaMapSetEvictingFrames_t pmaMapSetEvictingFrames; +}; + +struct _PMA +{ + PORT_SPINLOCK *pPmaLock; // PMA-wide lock + PORT_MUTEX *pEvictionCallbacksLock; // Eviction callback registration lock + + // Only used when free scrub-on-free feature is turned on + PORT_RWLOCK *pScrubberValidLock; // A reader-writer lock to protect the scrubber valid bit + PORT_MUTEX *pAllocLock; // Used to protect page stealing in the allocation path + + // Region related states + NvU32 regSize; // Actual size of regions array + void * pRegions[PMA_REGION_SIZE]; // All the region maps stored as opaque pointers + NvU32 *pSortedFastFirst; // Pre-sorted array of region IDs + PMA_REGION_DESCRIPTOR *pRegDescriptors [PMA_REGION_SIZE]; // Stores the descriptions of each region + PMA_MAP_INFO *pMapInfo; // The pluggable layer for managing scanning + + // Allocation related states + void * evictCtxPtr; // Opaque context pointer for eviction callback + pmaEvictPagesCb_t evictPagesCb; // Discontiguous eviction callback + pmaEvictRangeCb_t evictRangeCb; // Contiguous eviction callback + NvU64 frameAllocDemand; // Frame count of allocations in-process + NvBool bForcePersistence; // Force all allocations to persist across suspend/resume + PMA_STATS pmaStats; // PMA statistics used for client heuristics + + // Scrubber related states + NvSPtr initScrubbing; // If the init scrubber has finished in this PMA + NvBool bScrubOnFree; // If "scrub on free" is enabled for this PMA object + NvSPtr scrubberValid; // If scrubber object is valid, using atomic variable to prevent races + OBJMEMSCRUB *pScrubObj; // Object to store the FreeScrub header + + // NUMA states + NvBool bNuma; // If we are allocating for a NUMA system + NvBool nodeOnlined; // If node is onlined + NvS32 numaNodeId; // Current Node ID, set at initialization. -1 means invalid + NvU64 coherentCpuFbBase; // Used to calculate FB offset from bus address + NvU64 coherentCpuFbSize; // Used for error checking only + NvU32 numaReclaimSkipThreshold; // percent value below which __GFP_RECLAIM will not be used. + + // Blacklist related states + PMA_BLACKLIST_CHUNK *pBlacklistChunks; // Tracking for blacklist pages + NvU32 blacklistCount; // Number of blacklist pages + NvBool bClientManagedBlacklist; // Blacklisted pages in PMA that will be taken over by Client +}; + +/*! + * @brief This must be called before any other PMA functions. Returns a PMA + * object for later use + * + * @param[in] gpuId The UVM global GPU ID. Defined in nvCpuUuid.h as a 16 + * byte digest. PMA will only store reference to it. + * @param[in] pma Pointer to PMA object being initialized + * @param[in] initFlags PMA initialization flags for special modes + * @return + * NV_ERR_INVALID_ARGUMENT: + * If the combination of initFlags is invalid. + * NV_ERR_NO_MEMORY: + * Internal memory allocation failed. + * NV_ERR_GENERIC: + * Unexpected error. We try hard to avoid returning this error + * code, because it is not very informative. + * + */ +NV_STATUS pmaInitialize(PMA *pPma, NvU32 initFlags); + + +/*! + * @brief Release a PMA object. Also frees memory. + * + * All eviction handlers must have been unregistered by this point. + * + * @param[in] pma Pointer to PMA object being destroyed. + */ +void pmaDestroy(PMA *pPma); + + +/*! + * @brief Queries PMA configuration and state. + * + * Any clients of PMA can query config and state with a valid PMA object. + * Querying at different times may return different values when states change. + * + * @param[in] pma Pointer to PMA object being destroyed. + * @param[in/out] pConfigs Configs/states to query. See PMA_QUERY_* above. + */ +NV_STATUS pmaQueryConfigs(PMA* pPma, NvU32 *pConfigs); + + +/*! + * @brief Attaches a region of physical memory to be managed by the PMA. + * + * Systems with floorswept memory configurations will have (multi gigabyte) + * holes in memory. Each contiguous region should be reported by this + * registration function. This is also intended to be used by systems with + * two speed memories. + * + * Note: Some 0FB configurations may choose to skip registering any regions. + * At most 64 regions may be registered with a single PMA. + * + * @param[in] id + * A unique value in the range [0, 64) that uniquely identifies this + * region. Passed in by RM in region-order. Should be continuous for + * best performance. + * + * @param[in] bAsyncEccScrub + * RM will set this when it is performing the initial ECC scrub + * asynchronously. All pages in the pma will be marked 'allocated pinned'. + * RM will call pmaFreeContiguous as memory is scrubbed. + * + * Until the scrub is complete (by calling pmaScrubComplete), no + * allocation calls will fail with out of memory. Instead, it will hang + * until scrubbing is complete. One exception is when the client passes + * in the PMA_DONT_EVICT flag, in which case the call will actually fail + * regardless of whether RM is scrubbing memory. + * + * CAUTION! RM is responsible for ensuring black-listed pages are not + * marked free during the scrub. + * + * @param[in] regionState: + * Contains physical information about the region. + * + * @param[in] pBlacklistPageBase: + * List of base addresses of bad GPU pages. + * Each address is assumed to reference to a page of size + * PMA_GRANULARITY (64kb). + * + * Implementors note: PMA will simply mark these pages as "allocatedPinned". + * This list should be saved so that during pmaDestroy we can verify that only + * these pages are still allocated. + * + * @param[in] blacklistCount: + * Number of pages in above list. + * + * @return + * NV_ERR_NO_MEMORY: + * Internal memory allocation failed. + * NV_ERR_GENERIC: + * Unexpected error. We try hard to avoid returning this error code, + * because it is not very informative. + * + */ +NV_STATUS pmaRegisterRegion(PMA *pPma, NvU32 id, NvBool bAsyncEccScrub, + PMA_REGION_DESCRIPTOR *pRegionDesc, NvU32 blacklistCount, PPMA_BLACKLIST_ADDRESS pBlacklistPage); + + +/*! + * @brief Synchronous API for allocating pages from the PMA. + * PMA will decide which pma regions to allocate from based on the provided + * flags. PMA will also initiate UVM evictions to make room for this + * allocation unless prohibited by PMA_FLAGS_DONT_EVICT. UVM callers must pass + * this flag to avoid deadlock. Only UVM may allocate unpinned memory from this + * API and note that eviction callbacks need to be registered before that + * happens. + * + * Alignment of the allocated pages is guaranteed to be greater or equal to the + * requested page size. For contiguous allocations, a greater alignment can be + * specified with the PMA_ALLOCATE_FORCE_ALIGNMENT flag and the alignment + * allocation option. For non-contiguous allocations, it's an error to specify + * an alignment larger than the page size. + * + * For broadcast methods, PMA will guarantee the same physical frames are + * allocated on multiple GPUs, specified by the PMA objects passed in. + * + * Implementors note: + * If region registered with asyncEccScrub and pmaScrubComplete + * has not yet been issued then we cannot return NV_ERR_NO_MEMORY. + * We must instead drop the lock and wait until the next call to + * either pmaScrubComplete or pmaFreeContiguous/Pages to retry. + * Exception: PMA_ALLOCATE_DONT_EVICT + * + * @param[in] pPma + * The input PMA object + * + * @param[in] pageCount + * Number of pages to allocate. + * + * @param[in] pageSize + * 64kb, 128kb or 2mb. No other values are permissible. + * + * @param[in/out] allocationOptions + * Input flags: + * PMA_ALLOCATE_DONT_EVICT + * Do not evict in order to service this allocation. + * Do not wait for ECC scrub completion if out of memory. + * PMA_ALLOCATE_PINNED + * The allocation is pinned (RM must pass this) + * PMA_ALLOCATE_SPECIFY_MININUM_SPEED + * Only examines regions whose speed is greater than + * minimumSpeed. + * PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE + * Restrict the allowable physical address range for + * allocation to [physBegin, physEnd). + * PMA_ALLOCATE_SPECIFY_REGION_ID + * Only service allocations out of 'regionId'. The + * regionId is assigned in pmaRegisterRegion. + * PMA_ALLOCATE_PREFER_SLOWEST + * Prefer slower memory over faster. + * PMA_ALLOCATE_CONTIGUOUS + * If this allocation is a contiguous allocation + * PMA_ALLOCATE_PERSISTENT + * If this allocation should persist across suspend/resume + * PMA_ALLOCATE_FORCE_ALIGNMENT + * Force a specific alignment of the allocation. For non-contiguous + * allocations has to be less or equal to the page size. + * + * Output flags: + * PMA_ALLOCATE_RESULT_IS_ZERO + * If the allocated pages have been scrubbed. + * + * @param[out] pPages + * Return array of base addresses of allocated pages. + * + * @return + * NV_ERR_NO_MEMORY: + * Internal memory allocation failed. + * NV_ERR_GENERIC: + * Unexpected error. We try hard to avoid returning this error + * code,because it is not very informative. + * + */ +NV_STATUS pmaAllocatePages(PMA *pPma, NvLength pageCount, NvU32 pageSize, + PMA_ALLOCATION_OPTIONS *pAllocationOptions, NvU64 *pPages); + +// allocate on multiple GPU, thus pmaCount +NV_STATUS pmaAllocatePagesBroadcast(PMA **pPma, NvU32 pmaCount, NvLength allocationCount, + NvU32 pageSize, PMA_ALLOCATION_OPTIONS *pAllocationOptions, NvU64 *pPages); + + +/*! + * @brief Marks previously unpinned pages as pinned. + * + * It will return an error and rollback any change if any page is not + * previously marked "unpinned". + * + * @param[in] pPages + * Array of base addresses of pages to pin + * + * @param[in] pageCount + * Number of pages to pin + * + * @param[in] pageSize + * Page size of each page being pinned + * + * @return + * NV_OK: + * The pages have been pinned successfully. + * + * NV_ERR_IN_USE: + * Some of the pages requested to be pinned are being evicted and thus + * cannot be pinned. None of the pages have been pinned and the caller + * needs to make sure the pages can be successfully evicted. + * + * NV_ERR_INVALID_STATE: + * Some of the pages requested to be pinned weren't in the allocated + * unpinned state. + * + * TODO some error for rollback + * + */ +NV_STATUS pmaPinPages(PMA *pPma, NvU64 *pPages, NvLength pageCount, NvU32 pageSize); + + +/*! + * @brief Marks previously pinned pages as unpinned. + * + * It will return an error and rollback any change if any page is not + * previously marked "pinned". Behaviour is undefined is any blacklisted + * pages are unpinned. + * + * @param[in] pPages + * Array of base addresses of pages to pin + * + * @param[in] pageCount + * Number of pages to pin + * + * @param[in] pageSize + * Page size of each page being unpinned + * + * @return + * NV_ERR_GENERIC: + * Unexpected error. We try hard to avoid returning this error + * code,because it is not very informative. + * TODO some error for rollback + * + */ +NV_STATUS pmaUnpinPages(PMA *pPma, NvU64 *pPages, NvLength pageCount, NvU32 pageSize); + + +/*! + * @brief Marks a list of pages as free. + * This operation is also used by RM to mark pages as "scrubbed" for the + * initial ECC sweep. This function does not fail. + * + * @param[in] pPages + * Array of base addresses of pages to free + * + * @param[in] pageCount + * Number of pages to free + * If the value is 1, this is a contiguous free + * + * @param[in] size + * When freeing contiguous memory, this is the total size; + * When freeing discontiguous memory, this is page size of each page. + * + * @param[in] flag + * PMA_FREE_SKIP_SCRUB + * This flag is used to disable scrub on free when PMA frees the page + * + * @return Void + * + */ +void pmaFreePages(PMA *pPma, NvU64 *pPages, NvU64 pageCount, NvU64 size, NvU32 flag); + +/*! + * @brief Clears scrubbing bit on PMA pages within the supplied range. + * + * @param[in] pma + * PMA object + * + * @param[in] rangeBase + * The start address + * + * @param[in] rangeLimit + * The end address + * + * @return + * void + */ +void pmaClearScrubRange(PMA *pPma, NvU64 rangeBase, NvU64 rangeLimit); + + +/*! + * @brief Notifies the PMA that the ECC scrub is complete. + * + * Until this function is called no PMA allocations will fail with + * "insufficient memory". They will hang and wait for the scrubbing. + * + * TODO consider suspend/resume behavior! + * + * Design Note: + * pmaRegisterRegion(w/ asyncEccScrub) leaves all pages as + * "allocated and pinned". + * + * As the ECC scrub progresses, RM will call PMA and "pmaFreeContiguous" + * the regions as they are scrubbed. + * + * @param[in] pma + * PMA object + * + * @return + * NV_ERR_GENERIC: + * Unexpected error. We try hard to avoid returning this error + * code,because it is not very informative. + * + */ +NV_STATUS pmaScrubComplete(PMA *pPma); + + +/*! + * Register the eviction callbacks. + * + * Only one set of callbacks can be registered at a time and they need to be + * unregistered with pmaUnregisterEvictionCb() before a new set can be + * registered. + * + * Note that eviction callbacks need to be registered before allocating any unpinned memory. + * + * See the documentation of the callbacks above for details of the eviction. + * + * @param[in] pma + * PMA object + * + * @param[in] evictPageCb + * The callback function for evicting pages at a time + * + * @param[in] evictRangeCb + * The callback function for evicting a range + * + * @param[in] ctxPtr + * The callback context pointer to be passed back on callback + * + * @return + * NV_ERR_INVALID_ARGUMENT: + * One of the callbacks or PMA object was NULL. + * + * NV_ERR_INVALID_STATE: + * Callbacks already registered. + */ +NV_STATUS pmaRegisterEvictionCb(PMA *pPma, pmaEvictPagesCb_t evictPagesCb, pmaEvictRangeCb_t evictRangeCb, void *ctxPtr); + + +/*! + * Unregister the eviction callbacks. + * + * Guarantees that all pending eviction callbacks complete before returning. + * + * All unpinned allocations must be freed before the callbacks are unregistered + * and the caller needs to guarantee that any pending eviction callbacks won't + * block on the thread unregistering the callbacks. + * + * The call does nothing if the PMA object is NULL. + * + * @param[in] pma + * PMA object. + */ +void pmaUnregisterEvictionCb(PMA *pPma); + +/*! + * @brief Returns information about the total FB memory. + * + * @param[in] pPma PMA pointer + * @param[in] pBytesTotal Pointer that will return the total FB memory size. + * + * @return + * void + */ +void pmaGetTotalMemory(PMA *pPma, NvU64 *pBytesTotal); + +/*! + * @brief Returns information about each region managed by PMA + * + * @param[in] pPma PMA pointer + * @param[out] pRegSize Pointer to size of region descriptor array + * @param[out] ppRegionDesc Pointer to the array of region descriptors + * + * @return + * NV_STATUS codes based on convention + */ +NV_STATUS pmaGetRegionInfo(PMA *pPma, NvU32 *pRegSize, PMA_REGION_DESCRIPTOR **ppRegionDesc); + +/*! + * @brief Returns information about the total free FB memory. + * + * @param[in] pPma PMA pointer + * @param[in] pBytesFree Pointer that will return the free FB memory size. + * + * @return + * void + */ +void pmaGetFreeMemory(PMA *pPma, NvU64 *pBytesFree); + +/*! + * @brief Returns information about the largest free FB memory chunk across all regions. + * + * @param[in] pPma PMA pointer + * @param[in] pLargestFree Pointer that will return the largest free FB memory size. + * @param[in] pRegionBase Pointer that will return the region base of largest free FB memory. + * @param[in] pLargestOffset Pointer that will return the offset in region for largest free FB memory. + * + * @return + * void + */ +void pmaGetLargestFree(PMA *pPma, NvU64 *pLargestFree, NvU64 *pRegionBase, NvU64 *pLargestOffset); + +/*! + * @brief Returns a list of PMA allocated blocks which has ATTRIB_PERSISTENT + * attribute set. It will be used by FBSR module to save/restore + * clients PMA allocations during system suspend/resume. + * + * @param[in] pPma PMA pointer + * @param[in/out] ppPersistList Pointer to list of persistent segments + * + * @return + * NV_OK Success + * NV_ERR_NO_MEMORY Failure to allocate list + */ +NV_STATUS pmaBuildPersistentList(PMA *pPma, PRANGELISTTYPE *ppPersistList); + + +/*! + * @brief Returns a list of all PMA allocated blocks. For all the PMA + * allocated blocks, either STATE_PIN or STATE_UNPIN attribute will + * be set. It will be used by FBSR module to save/restore clients + * PMA allocations for Unix GC-OFF based power management. + * + * @param[in] pPma PMA pointer + * @param[in/out] ppList Pointer to list of all the PMA allocated blocks. + * + * @return + * NV_OK Success + * NV_ERR_NO_MEMORY Failure to allocate list + */ +NV_STATUS pmaBuildAllocatedBlocksList(PMA *pPma, PRANGELISTTYPE *ppList); + + +/*! + * @brief Frees previously generated list by function pmaBuildPersistentList(). + * + * @param[in] pPma PMA pointer + * @param[in/out] ppPersistList Pointer to list of persistent segments + * + * @return + * void + */ +void pmaFreePersistentList(PMA *pPma, PRANGELISTTYPE *ppPersistList); + + +/*! + * @brief Frees previously generated list by function + * pmaBuildAllocatedBlocksList(). + * + * @param[in] pPma PMA pointer + * @param[in/out] ppList Pointer to list of all the PMA allocated blocks. + * + * @return + * void + */ +void pmaFreeAllocatedBlocksList(PMA *pPma, PRANGELISTTYPE *ppList); + +/*! + * @brief Registers a memory scrubber to PMA. Currently only used for + * the scrub-on-free feature. + * + * This function will take the PMA lock to protect against races between + * the the use of the MemScrub object and any Reg/Unreg calls. + * + * @param[in] pPma PMA pointer + * @param[in] pScrubObj Pointer to the scrubber + * + * @return + * NV_OK Success + * NV_INVALID_STATE When PMA is NULL or bMemScrub is NV_FALSE + */ +NV_STATUS pmaRegMemScrub(PMA *pPma, OBJMEMSCRUB *pScrubObj); + + +/*! + * @brief Unregisters the memory scrubber, when the scrubber is torn + * down. If any feature is turned on that would require this scrubber to be + * present, after this call, the PMA object will be unavailable to give out + * any free pages for use, until pmaRegMemScrub is called. + * + * This function will take the PMA lock to protect against races between + * the use of the MemScrub object and any Reg/Unreg calls. + * + * @param[in] pPma PMA pointer + * + * @return + * void + */ +void pmaUnregMemScrub(PMA *pPma); + + +/*! + * @brief Notifies PMA that node onlining is complete and we can make pass-through + * calls to OS. + * + * This function will take the PMA lock to protect against races between + * the use of online/offline calls. + * + * TODO: Under development! + * Currently: + * - We online at hardcoded offset and size of PMA memory + * - Normal PMA allocation will go to the part of PMA that is not onlined + * + * Future: + * - Online all of PMA memory + * - PMA requests will suballocate from OS + * - Any mapping created over the PMA range will have to use kmap instead of ioremap + * + * Bug #1973975, 1858365, 1943187 + * + * @param[in] pPma PMA pointer + * @param[in] numaNodeId NUMA node ID that PMA is managing + * @param[in] coherentCpuFbBase The ATS aperture base corresponding to start of FB + * @param[in] coherentCpuFbSize The ATS aperture size. The actual size we + * onlined. This could be different from the HW + * ATS aperture size. + * + * @return + * NV_OK Success + * NV_INVALID_STATE When PMA is NULL or bNuma is NV_FALSE or nodeId is too big + */ +NV_STATUS pmaNumaOnlined(PMA *pPma, NvS32 numaNodeId, + NvU64 coherentCpuFbBase, NvU64 coherentCpuFbSize); + + +/*! + * @brief Notifies PMA that node offlining has started and PMA should start failing + * all allocation calls. + * + * This function will take the PMA lock to protect against races between + * the use of online/offline calls. + * + * @param[in] pPma PMA pointer + * + * @return + * void + */ +void pmaNumaOfflined(PMA *pPma); + +/*! + * @brief Returns client managed blacklisted pages in the PMA region + * + * @param[in] pPma PMA pointer + * @param[in] pChunks pointer to blacklist addresses in the PMA region + * @param[in] pPageSize pointer to Size of each blacklist page addresses + * @param[in] pNumChunks pointer to valid client managed blacklist pages + * + * @return + * void + */ +void pmaGetClientBlacklistedPages(PMA *pPma, NvU64 *pChunks, NvU32 *pPageSize, NvU32 *pNumChunks); + +/*! + * @brief Returns the PMA blacklist size in bytes for + * both statically and dynamically blacklisted pages. + * pDynamicBlacklistSize and pStaticBlacklistSize are only copied-out if non-NULL. + * + * @param[in] pPma PMA pointer + * @param[in] pDynamicBlacklistSize pointer to dynamic blacklist size (bytes) + * @param[in] pStaticBlacklistSize pointer to static blacklist size (bytes) + * + * @return + * void + */ +void pmaGetBlacklistSize(PMA *pPma, NvU32 *pDynamicBlacklistSize, NvU32 *pStaticBlacklistSize); + +/*! + * @brief Clear scrub bit for pages from the list of scrub items + * that fall in the base to base+size range to return these pages to PMA. + * + * @param[in] pPma PMA pointer + * @param[in] pPmaScrubList list of scrub items each with a id, base and size + * @param[in] count count of scrub items + * + * @return + * void + */ +void pmaClearScrubbedPages(PMA *pPma, SCRUB_NODE *pPmaScrubList, NvU64 count); + +/*! + * @brief Print states of all regions + */ +void pmaPrintMapState(PMA *pPma); + +/*! + * @brief Track the given physical address as blacklisted page in PMA. This call will blacklist + * the entire PMA page frame of size 64KB which contains the physical address. + * + * @param[in] pPma PMA pointer + * @param[in] physAddr Address of the blacklisted page + * + * Locking: + * - DO NOT call this function with the PMA lock already held. + * - This function will internally grab the PMA lock to update the attribute bits. + * + * @return + * void + */ +NV_STATUS pmaAddToBlacklistTracking(PMA *pPma, NvU64 physBase); + +#ifdef __cplusplus +} +#endif + +#endif // PHYS_MEM_ALLOCATOR_H diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.h b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.h new file mode 100644 index 000000000..ec43669bb --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.h @@ -0,0 +1,228 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef UTIL_H +#define UTIL_H + +#include "phys_mem_allocator.h" +#include "nvport/nvport.h" +#include "regmap.h" +#include "nvmisc.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// TODO See if this can be added to NvPort +#define pmaPortAtomicGet(ptr) portAtomicOrSize((ptr), 0) + +NvU32 findRegionID(PMA *pPma, NvU64 address); +void pmaPrintBlockStatus(PMA_PAGESTATUS blockStatus); +void pmaRegionPrint(PMA *pPma, PMA_REGION_DESCRIPTOR *pRegion, void *pMap); +NvBool pmaStateCheck(PMA *pPma); + +// Temporary putting these here. TODO refactor them in the next CL. +NV_STATUS _pmaEvictContiguous(PMA *pPma, void *pMap, NvU64 evictStart, NvU64 evictEnd); +NV_STATUS _pmaEvictPages(PMA *pPma, void *pMap, NvU64 *evictPages, NvU64 evictPageCount, + NvU64 *allocPages, NvU64 allocPageCount, NvU32 pageSize, NvU64 physBegin, + NvU64 physEnd); +void _pmaClearScrubBit(PMA *pPma, SCRUB_NODE *pPmaScrubList, NvU64 count); +NV_STATUS _pmaCheckScrubbedPages(PMA *pPma, NvU64 chunkSize, NvU64 *pPages, NvU32 pageCount); +NV_STATUS _pmaPredictOutOfMemory(PMA *pPma, NvLength allocationCount, NvU32 pageSize, + PMA_ALLOCATION_OPTIONS *allocationOptions); +NV_STATUS pmaSelector(PMA *pPma, PMA_ALLOCATION_OPTIONS *allocationOptions, NvS32 *regionList); +void _pmaReallocBlacklistPages (PMA *pPma, NvU32 regId, NvU64 rangeBegin, NvU64 rangeSize); +void _pmaFreeBlacklistPages (PMA *pPma, NvU32 regId, NvU64 rangeBegin, NvU64 rangeSize); +NvBool _pmaLookupBlacklistFrame (PMA *pPma, NvU32 regId, NvU64 frameNum); + +/*! + * @brief Marks a list of pages with the specified state and attributes. + * This operation is also used by RM to mark pages as "scrubbed" for the + * initial ECC sweep. This function does not fail. + * + * @param[in] base: + * Start of address range to pin. + * + * @param[in] size: + * Region size in bytes + * Will try to align to 64KB page. For desired behavior, pass in aligned + * sizes. + * + * @param[in] pmaState: + * FREE, ALLOC_UNPIN, ALLOC_PIN + * PERSISTENT + * SCRUBBING + * EVICTING + * + * @param[in] pmaStateWriteMask: + * ALLOC_MASK + * PERSISTENT + * SCRUBBING + * EVICTING + * + * @return + * NV_ERR_GENERIC: + * Unexpected error. We try hard to avoid returning this error + * code,because it is not very informative. + * + */ +void pmaSetBlockStateAttribUnderPmaLock(PMA *pPma, NvU64 base, NvU64 size, + PMA_PAGESTATUS pmaState, PMA_PAGESTATUS pmaStateWriteMask); + +/*! + * @brief Marks a list of pages with the specified state and attributes. + * This operation is also used by RM to mark pages as "scrubbed" for the + * initial ECC sweep. This function does not fail. + * + * @param[in] base: + * Start of address range to pin. + * + * @param[in] size: + * Region size in bytes + * Will try to align to 64KB page. For desired behavior, pass in aligned + * sizes. + * + * @param[in] pmaState: + * FREE, ALLOC_UNPIN, ALLOC_PIN + * PERSISTENT + * SCRUBBING + * EVICTING + * + * @param[in] pmaStateWriteMask: + * ALLOC_MASK + * PERSISTENT + * SCRUBBING + * EVICTING + * + * @return + * NV_ERR_GENERIC: + * Unexpected error. We try hard to avoid returning this error + * code,because it is not very informative. + * + */ +void pmaSetBlockStateAttrib(PMA *pPma, NvU64 base, NvU64 size, + PMA_PAGESTATUS pmaState, PMA_PAGESTATUS pmaStateWriteMask); + +/* + * @brief Update the per region specific frame state statistics. This function helps + * keep a running count of number of frames that are STATE_FREE, STATE_UNPIN, STATE_PIN. + * It also keeps the 64KB free frame & 2MB pages statistics via pNumFree statistics. + * + * @param[in/out] pNumFree: + * Pointer to the statistic counter to update + * @param[in/out] numPages: + * The number of pages to adjust pNumFree by + * @param[in] oldState: + * The state the page was in + * + * @param[in] newState: + * The state the page will be in + */ +void pmaStatsUpdateState(NvU64 *pNumFree, NvU64 numPages, PMA_PAGESTATUS oldState, + PMA_PAGESTATUS newState); + +NvBool pmaIsEvictionPending(PMA *pPma); + +void pmaOsSchedule(void); + +/*! + * @brief Returns a list of PMA-managed blocks with the specified state and + * attributes. + * + * @param[in] pPma PMA pointer + * @param[in/out] ppList Pointer to list of segments having specified + * state and attributes + * @param[in] pageStatus PMA page state and attribute + * + * @return + * NV_OK Success + * NV_ERR_NO_MEMORY Failure to allocate list + */ +NV_STATUS pmaBuildList(PMA *pPma, PRANGELISTTYPE *ppList, + PMA_PAGESTATUS pageStatus); + +/*! + * @brief Frees previously generated list of PMA-managed blocks with + * function pmaBuildList() + * + * @param[in] pPma PMA pointer + * @param[in/out] ppList Pointer to list of PMA segments + * + * @return + * None + */ +void pmaFreeList(PMA *pPma, PRANGELISTTYPE *ppList); + +/*! + * @brief Registers blacklisting information + * Called during pmaRegisterRegion to set the attribute for blacklisted pages + * + * @param[in] pPma PMA pointer + * @param[in] physAddrBase The base address of this address tree + * @param[in] pBlacklistPageBase Structure that contains the blacklisted pages + * @param[in] blacklistCount Number of blacklisted pages + * + * @return NV_OK + * NV_ERR_NO_MEMORY if memory allocation fails + */ +NV_STATUS pmaRegisterBlacklistInfo(PMA *pPma, NvU64 physAddrBase, + PPMA_BLACKLIST_ADDRESS pBlacklistPageBase, NvU32 blacklistCount); + +/*! + * @brief Query blacklisting states tracked by PMA + * + * @param[in] pPma PMA pointer + * @param[in] pBlacklistCount Pointer to store count of blacklisted pages + * @param[in] pbClientManagedBlacklist Pointer to store whether client manages blacklisting + * @param[in] ppBlacklistChunks Pointer to store the blacklisted chunks + * + * @return void + */ +void pmaQueryBlacklistInfo(PMA *pPma, NvU32 *pBlacklistCount, NvBool *pbClientManagedBlacklist, + PMA_BLACKLIST_CHUNK **ppBlacklistChunks); + +/*! + * @brief Update whether PMA has client managed blacklisting or not + * + * @param[in] pPma PMA pointer + * @param[in] bClientManagedBlacklist Whether PMA has client managed blacklisting + * When set to TRUE, PMA hands over control of blacklisted + * pages to the OS + * When set to FALSE, blacklisted pages are managed by PMA + * + * @return void + */ +void pmaSetClientManagedBlacklist(PMA *pPma, NvBool bClientManagedBlacklist); + + +/*! + * @brief Checks if the address is already present in the blacklist info + * + * @param[in] pPma PMA pointer + * @param[in] physAddress Physical address that needs to be blacklisted + + * @return NvBool + */ +NvBool pmaIsBlacklistingAddrUnique(PMA *pPma, NvU64 physAddress); + +#endif // UTIL_H diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/regmap.h b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/regmap.h new file mode 100644 index 000000000..f5d90b742 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/phys_mem_allocator/regmap.h @@ -0,0 +1,275 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @brief A bit map to keep track of FB frames + * + * @bug Return status needs to conform to NV_STATUS + * -- take a reference as output and always return NV_STATUS + * @bug Is enum a good choice for this? + * + */ + +#ifndef REGMAP_H +#define REGMAP_H + +#include "map_defines.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// +// Store the type here because we might use different algorithms for +// different types of memory scan +// +typedef struct pma_regmap +{ + NvU64 totalFrames; /* Total number of frames */ + NvU64 mapLength; /* Length of the map */ + NvU64 *map[PMA_BITS_PER_PAGE]; /* The bit map */ + NvU64 frameEvictionsInProcess; /* Count of frame evictions in-process */ + PMA_STATS *pPmaStats; /* Point back to the public struct in PMA structure */ + NvBool bProtected; /* The memory segment tracked by this regmap is protected (VPR/CPR) */ +} PMA_REGMAP; + +void pmaRegmapPrint(PMA_REGMAP *pMap); + + +/*! + * @brief Initializes the regmap for PMA uses + * + * Allocates a bit map for all the pages being managed in this map. All bits + * are initialized to be FREE. + * + * @param[in] numPages The number of pages being managed in this map + * @param[in] addrBase The base address of this region. Unused in regmap + * @param[in] pPmaPubStats The PMA stat struct to update + * @param[in] bProtected The map tracks pages in protected memory + * + * @return PMA_REGMAP Pointer to the regmap if succeeded, NULL otherwise + */ +void *pmaRegmapInit(NvU64 numPages, NvU64 addrBase, PMA_STATS *pPmaStats, NvBool bProtected); + + +/*! + * @brief Destroys the region map and free the memory + * + * @param[in] pMap The regmap to destroy + * + * @return void + */ +void pmaRegmapDestroy(void *pMap); + +/*! + * @brief Get/set number of evicting frames + * Used for sanity checking in PMA layer as well as performance optimization + * for the map layer to scan faster. + */ +NvU64 pmaRegmapGetEvictingFrames(void *pMap); +void pmaRegmapSetEvictingFrames(void *pMap, NvU64 frameEvictionsInProcess); + +/*! + * @brief Changes the recorded state bits + * + * Changes the state of the bits given the physical frame number + * + * @param[in] pMap The regmap to change + * @param[in] frameNum The frame number to change + * @param[in] newState The new state to change to + * + * @return void + */ +void pmaRegmapChangeState(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState); + +/*! + * @brief Changes the recorded state & attrib bits + * + * Changes the state of the bits given the physical frame number + * + * @param[in] pMap The regmap to change + * @param[in] frameNum The frame number to change + * @param[in] newState The new state to change to + * @param[in] writeAttrib Write attribute bits as well + * + * @return void + */ +void pmaRegmapChangeStateAttrib(void *pMap, NvU64 frameNum, + PMA_PAGESTATUS newState, NvBool writeAttrib); + +/*! + * @brief Changes the recorded state & attrib bits for an entire page + * + * Changes the state of the bits for an entire page given the physical + * frame number and the page size + * + * @param[in] pMap The regmap to change + * @param[in] frameNumStart The frame number to change + * @param[in] pageSize The page size of the page to change + * @param[in] newState The new state to change to + * @param[in] writeAttrib Write attribute bits as well + * + * @return void + */ +void pmaRegmapChangePageStateAttrib(void * pMap, NvU64 frameNumStart, NvU32 pageSize, + PMA_PAGESTATUS newState, NvBool writeAttrib); + +/*! + * @brief Changes the state & attrib bits specified by mask + * + * Changes the state of the bits given the physical frame number + * + * @param[in] pMap The regmap to change + * @param[in] frameNum The frame number to change + * @param[in] newState The new state to change to + * @param[in] newStateMask Specific bits to write + * + * @return void + */ +void pmaRegmapChangeStateAttribEx(void *pMap, NvU64 frameNum, + PMA_PAGESTATUS newState, + PMA_PAGESTATUS newStateMask); + +/*! + * @brief Read the page state & attrib bits + * + * Read the state of the page given the physical frame number + * + * @param[in] pMap The regmap to read + * @param[in] frameNum The frame number to read + * @param[in] readAttrib Read attribute bits as well + * + * @return PAGESTATUS of the frame + */ +PMA_PAGESTATUS pmaRegmapRead(void *pMap, NvU64 frameNum, NvBool readAttrib); + + +/*! + * @brief Scan the list for contiguous space + * + * Scans the regmap for contiguous space that has the certain status. This + * function is optimized for performance if PMA_BITS_PER_PAGE is 2. It uses + * trailing-zero algorithm to determine which frame(s) has different status. + * + * @param[in] pMap The regmap to be scanned + * @param[in] addrBase The base address of this region + * @param[in] rangeStart The start of the restricted range + * @param[in] rangeEnd The end of the restricted range + * @param[in] numPages The number of pages we are scanning for + * @param[out] freeList A list of free frame numbers -- contains only 1 element + * @param[in] pageSize Size of one page + * @param[in] alignment Alignment requested by client + * @param[out] pagesAllocated Number of pages this call allocated + * @param[in] bSkipEvict Whether it's ok to skip the scan for evictable pages + * + * @return NV_OK if succeeded + * @return NV_ERR_IN_USE if found pages that can be evicted + * @return NV_ERR_NO_MEMORY if no available pages could be found + */ +NV_STATUS pmaRegmapScanContiguous( + void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd, + NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment, + NvU64 *pagesAllocated, NvBool bSkipEvict); + +/*! + * @brief Scan the list for any space + * + * Scans the regmap for numFrames number of frames, possibly scattered across + * the regmap. This function will allocate contiguous space whenever possible. + * Not optimized for performance yet. + * + * @param[in] pMap The regmap to be scanned + * @param[in] addrBase The base address of this region + * @param[in] rangeStart The start of the restricted range + * @param[in] rangeEnd The end of the restricted range + * @param[in] numFrames The number of frames we are scanning for + * @param[out] freeList A list of free frame numbers (allocated by client) + * @param[in] pageSize Size of one page + * @param[in] alignment Alignment requested by client. Has to be equal to the pageSize. + * @param[out] pagesAllocated Number of pages this call allocated + * @param[in] bSkipEvict Whether it's ok to skip the scan for evictable pages + * + * @return NV_OK if succeeded + * @return NV_ERR_IN_USE if found pages that can be evicted + * @return NV_ERR_NO_MEMORY if no available pages could be found + */ +NV_STATUS pmaRegmapScanDiscontiguous( + void *pMap, NvU64 addrBase, NvU64 rangeStart, NvU64 rangeEnd, + NvU64 numPages, NvU64 *freelist, NvU32 pageSize, NvU64 alignment, + NvU64 *pagesAllocated, NvBool bSkipEvict); + +/*! + * @brief Gets the total size of specified PMA managed region. + * + * Gets the total size of current PMA managed region in the FB. + * + * @param[in] pMap Pointer to the regmap for the region + * @param[in] pBytesTotal Pointer that will return total bytes for current region. + * + */ +void pmaRegmapGetSize(void *pMap, NvU64 *pBytesTotal); + + +/*! + * @brief Gets the size of the maximum free chunk of memory in specified region. + * + * Gets the size of the maximum free chunk of memory in the specified PMA managed + * region of the FB. + * + * @param[in] pMap Pointer to the regmap for the region + * @param[in] pLargestFree Pointer that will return largest free in current region. + * + */ +void pmaRegmapGetLargestFree(void *pMap, NvU64 *pLargestFree); + + +/*! + * @brief Returns the address range that is completely available for eviction. + * - Should be ALLOC_UNPIN. + * In NUMA, OS manages memory and PMA will only track allocated memory in ALLOC_PIN + * and ALLOC_UNPIN state. FREE memory is managed by OS and cannot be tracked by PMA + * and hence PMA cannot consider FREE memory for eviction and can only consider frames + * in known state to PMA or eviction. ALLOC_PIN cannot be evicted and hence only ALLOC_UNPIN + * can be evictable. + * + * + * @param[in] pMap Pointer to the regmap for the region + * @param[in] addrBase Base address of the region + * @param[in] actualSize Size of the eviction range + * @param[in] pageSize Pagesize + * @param[out] evictStart Starting address of the eviction range + * @param[out] evictEnd End address of the eviction range. + * + * Returns: + * - NV_OK If there is evictable range of given size : actualSize + * + * - NV_ERR_NO_MEMORY if no contiguous range is evictable. + */ +NV_STATUS pmaRegMapScanContiguousNumaEviction(void *pMap, NvU64 addrBase, + NvLength actualSize, NvU64 pageSize, NvU64 *evictStart, NvU64 *evictEnd); + +#ifdef __cplusplus +} +#endif + +#endif // REGMAP_H diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/vaspace_api.h b/src/nvidia/inc/kernel/gpu/mem_mgr/vaspace_api.h new file mode 100644 index 000000000..4dc4feba0 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/vaspace_api.h @@ -0,0 +1,3 @@ + +#include "g_vaspace_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator.h b/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator.h new file mode 100644 index 000000000..92895456b --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator.h @@ -0,0 +1,3 @@ + +#include "g_virt_mem_allocator_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h b/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h new file mode 100644 index 000000000..779b608fa --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_mgr/virt_mem_allocator_common.h @@ -0,0 +1,151 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef VIRT_MEM_ALLOCATOR_COMMON_H +#define VIRT_MEM_ALLOCATOR_COMMON_H + +/********************************* DMA Manager *****************************\ +* * +* DMA object/engine management. * +* * +****************************************************************************/ + +#include "nvtypes.h" +#include "nvgputypes.h" +#include "nvstatus.h" + +typedef struct OBJGPU OBJGPU; +typedef struct ChannelDescendant ChannelDescendant; +typedef struct ContextDma ContextDma; +typedef struct Memory Memory; +typedef struct EVENTNOTIFICATION EVENTNOTIFICATION; +//--------------------------------------------------------------------------- +// +// Memory page defines. +// +// These correspond to the granularity understood by the hardware +// for address mapping; the system page size can be larger. +// +//--------------------------------------------------------------------------- +#define RM_PAGE_SIZE 4096 +#define RM_PAGE_SIZE_64K (64 * 1024) +#define RM_PAGE_SIZE_128K (128 * 1024) +#define RM_PAGE_MASK 0x0FFF +#define RM_PAGE_SHIFT 12 +#define RM_PAGE_SHIFT_64K 16 + +// Huge page size is 2 MB +#define RM_PAGE_SHIFT_HUGE 21 +#define RM_PAGE_SIZE_HUGE (1 << RM_PAGE_SHIFT_HUGE) +#define RM_PAGE_MASK_HUGE ((1 << RM_PAGE_SHIFT_HUGE) - 1) + +// 512MB page size +#define RM_PAGE_SHIFT_512M 29 +#define RM_PAGE_SIZE_512M (1 << RM_PAGE_SHIFT_512M) +#define RM_PAGE_MASK_512M (RM_PAGE_SIZE_512M - 1) + +//--------------------------------------------------------------------------- +// +// Memory page attributes. +// +// These attributes are used by software for page size mapping; +// Big pages can be of 64/128KB[Fermi/Kepler/Pascal] +// Huge page is 2 MB[Pascal+] +// 512MB page is Ampere+ +// Default page attribute lets driver decide the optimal page size +// +//--------------------------------------------------------------------------- +typedef enum +{ + RM_ATTR_PAGE_SIZE_DEFAULT = 0x0, + RM_ATTR_PAGE_SIZE_4KB = 0x1, + RM_ATTR_PAGE_SIZE_BIG = 0x2, + RM_ATTR_PAGE_SIZE_HUGE = 0x3, + RM_ATTR_PAGE_SIZE_512MB = 0x4, + RM_ATTR_PAGE_SIZE_INVALID = 0x5 +} +RM_ATTR_PAGE_SIZE; + +//--------------------------------------------------------------------------- +// +// Notification buffer structure. +// +//--------------------------------------------------------------------------- +typedef union _def_info_status_buffer +{ + struct + { + NvV16 OtherInfo16; + NvV16 Status; + } Info16Status_16; + + NvU32 Info16Status_32; + +} INFO16_STATUS; + +typedef struct _def_notification_buffer +{ + NvU32 TimeLo; + NvU32 TimeHi; + NvV32 OtherInfo32; + INFO16_STATUS Info16Status; +} NOTIFICATION, *PNOTIFICATION; + + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- +void notifyMethodComplete(OBJGPU*, ChannelDescendant *, NvU32, NvV32, NvU32, NvU16, NV_STATUS); + +NV_STATUS notifyFillNotifier (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS); +NV_STATUS notifyFillNotifierOffset (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU64); +NV_STATUS notifyFillNotifierOffsetTimestamp(OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU64, NvU64); +NV_STATUS notifyFillNotifierArray (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU32); +NV_STATUS notifyFillNotifierArrayTimestamp (OBJGPU*, ContextDma *, NvV32, NvV16, NV_STATUS, NvU32, NvU64); +void notifyFillNOTIFICATION(OBJGPU *pGpu, + NOTIFICATION *pNotifyBuffer, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvBool TimeSupplied, + NvU64 Time); +NV_STATUS notifyFillNotifierGPUVA (OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV16, NV_STATUS, NvU32); +NV_STATUS notifyFillNotifierGPUVATimestamp (OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV16, NV_STATUS, NvU32, NvU64); +NV_STATUS notifyFillNotifierMemory (OBJGPU*, Memory *, NvV32, NvV16, NV_STATUS, NvU32); +NV_STATUS notifyFillNotifierMemoryTimestamp(OBJGPU*, Memory *, NvV32, NvV16, NV_STATUS, NvU32, NvU64); +void notifyFillNvNotification(OBJGPU *pGpu, + NvNotification *pNotification, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvBool TimeSupplied, + NvU64 Time); + +NV_STATUS semaphoreFillGPUVA (OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV32, NvBool); +NV_STATUS semaphoreFillGPUVATimestamp(OBJGPU*, NvHandle, NvHandle, NvU64, NvV32, NvV32, NvBool, NvU64); + +RM_ATTR_PAGE_SIZE dmaNvos32ToPageSizeAttr(NvU32 attr, NvU32 attr2); + +#endif // VIRT_MEM_ALLOCATOR_COMMON_H diff --git a/src/nvidia/inc/kernel/gpu/mem_sys/kern_mem_sys.h b/src/nvidia/inc/kernel/gpu/mem_sys/kern_mem_sys.h new file mode 100644 index 000000000..ef709b5e8 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_sys/kern_mem_sys.h @@ -0,0 +1,3 @@ + +#include "g_kern_mem_sys_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mem_sys/zbc_api.h b/src/nvidia/inc/kernel/gpu/mem_sys/zbc_api.h new file mode 100644 index 000000000..fbc7e9f9f --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mem_sys/zbc_api.h @@ -0,0 +1,3 @@ + +#include "g_zbc_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mig_mgr/compute_instance_subscription.h b/src/nvidia/inc/kernel/gpu/mig_mgr/compute_instance_subscription.h new file mode 100644 index 000000000..9b62c3b65 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mig_mgr/compute_instance_subscription.h @@ -0,0 +1,3 @@ + +#include "g_compute_instance_subscription_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mig_mgr/gpu_instance_subscription.h b/src/nvidia/inc/kernel/gpu/mig_mgr/gpu_instance_subscription.h new file mode 100644 index 000000000..4bec62f33 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mig_mgr/gpu_instance_subscription.h @@ -0,0 +1,3 @@ + +#include "g_gpu_instance_subscription_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mig_mgr/kernel_mig_manager.h b/src/nvidia/inc/kernel/gpu/mig_mgr/kernel_mig_manager.h new file mode 100644 index 000000000..06ad82b21 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mig_mgr/kernel_mig_manager.h @@ -0,0 +1,3 @@ + +#include "g_kernel_mig_manager_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mig_mgr/mig_config_session.h b/src/nvidia/inc/kernel/gpu/mig_mgr/mig_config_session.h new file mode 100644 index 000000000..70da74a02 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mig_mgr/mig_config_session.h @@ -0,0 +1,3 @@ + +#include "g_mig_config_session_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mig_mgr/mig_monitor_session.h b/src/nvidia/inc/kernel/gpu/mig_mgr/mig_monitor_session.h new file mode 100644 index 000000000..af135eff8 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mig_mgr/mig_monitor_session.h @@ -0,0 +1,3 @@ + +#include "g_mig_monitor_session_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mmu/kern_gmmu.h b/src/nvidia/inc/kernel/gpu/mmu/kern_gmmu.h new file mode 100644 index 000000000..0ac4c599f --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mmu/kern_gmmu.h @@ -0,0 +1,3 @@ + +#include "g_kern_gmmu_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mmu/mmu_fault_buffer.h b/src/nvidia/inc/kernel/gpu/mmu/mmu_fault_buffer.h new file mode 100644 index 000000000..396e9f036 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mmu/mmu_fault_buffer.h @@ -0,0 +1,3 @@ + +#include "g_mmu_fault_buffer_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/mmu/mmu_trace.h b/src/nvidia/inc/kernel/gpu/mmu/mmu_trace.h new file mode 100644 index 000000000..8861a45c5 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mmu/mmu_trace.h @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef MMU_TRACE_H +#define MMU_TRACE_H + +#include "core/core.h" +#include "mem_mgr/vaspace.h" +#include "mmu/mmu_fmt.h" +#include "ctrl/ctrl83de.h" + +#define MMU_INVALID_ADDR (0xf) // All base addresses are aligned and 0xf is unaligned + +#define MMU_MAX_ENTRY_SIZE_BYTES 16 + +/* ------------------------ Types definitions ------------------------------ */ + +typedef union +{ + NvU8 v8[MMU_MAX_ENTRY_SIZE_BYTES ]; + NvU32 v32[MMU_MAX_ENTRY_SIZE_BYTES / sizeof(NvU32)]; + NvU64 v64[MMU_MAX_ENTRY_SIZE_BYTES / sizeof(NvU64)]; +} MMU_ENTRY; + +typedef NvBool (*MmuTraceCbIsPte)(const void *pFmt, const MMU_FMT_LEVEL *pFmtLevel, + const MMU_ENTRY *pEntry, NvBool *pValid); +typedef const void *(*MmuTraceCbGetFmtPde)(const void *pFmt, const MMU_FMT_LEVEL *pFmtLevel, + NvU32 sublevel); +typedef const void *(*MmuTraceCbGetFmtPte)(const void *pFmt); +typedef NvU64 (*MmuTraceCbGetPdePa)(OBJGPU *pGpu, const void *pFmtPde, const MMU_ENTRY *pPde); +typedef NvU64 (*MmuTraceCbGetPtePa)(OBJGPU *pGpu, const void *pFmtPte, const MMU_ENTRY *pPte); +typedef void (*MmuTraceCbPrintPdb)(OBJGPU *pGpu, OBJVASPACE *pVAS, NvU64 va, + NvU64 vaLimit); +typedef void (*MmuTraceCbPrintPde)(OBJGPU *pGpu, const void *pFmt, const MMU_FMT_LEVEL *pFmtLevel, + const MMU_ENTRY *pPde); +typedef void (*MmuTraceCbPrintPt)(OBJGPU *pGpu, const MMU_FMT_LEVEL *pFmtLevel, const void *pFmtPde, + const MMU_ENTRY *pPde); +typedef void (*MmuTraceCbPrintPte)(OBJGPU *pGpu, const MMU_FMT_LEVEL *pFmtLevel, const void *pFmtPte, + const MMU_ENTRY *pPte, NvU32 index); +typedef NvBool (*MmuTraceCbIsInvalidPdeOk)(OBJGPU *pGpu, const void *pFmt, const void *pFmtPde, + const MMU_ENTRY *pPde, NvU32 sublevel); +typedef NvU32 (*MmuTraceCbPdeAddrSpace)(const void *pFmtPde, const MMU_ENTRY *pPde); +typedef NvU32 (*MmuTraceCbPteAddrSpace)(const void *pFmtPte, const MMU_ENTRY *pPte); +typedef NvU32 (*MmuTraceCbSwToHwLevel)(const void *pFmt, NvU32 level); + +typedef enum +{ + MMU_TRACE_MODE_TRACE = 0, + MMU_TRACE_MODE_TRACE_VERBOSE = 1, + MMU_TRACE_MODE_TRANSLATE = 2, + MMU_TRACE_MODE_VALIDATE = 3, + MMU_TRACE_MODE_DUMP_RANGE = 4 +} MMU_TRACE_MODE, *PMMU_TRACE_MODE; + +typedef struct +{ + NvU64 pa; + NvU32 aperture; + NvBool valid; + NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS *pMapParams; + NvU64 validateCount; +} MMU_TRACE_ARG, *PMMU_TRACE_ARG; + +typedef struct +{ + MMU_TRACE_MODE mode; + NvU64 va; + NvU64 vaLimit; + PMMU_TRACE_ARG pArg; +} MMU_TRACE_PARAM, *PMMU_TRACE_PARAM; + +typedef struct +{ + MmuTraceCbIsPte isPte; + MmuTraceCbGetFmtPde getFmtPde; + MmuTraceCbGetFmtPte getFmtPte; + MmuTraceCbGetPdePa getPdePa; + MmuTraceCbGetPtePa getPtePa; + MmuTraceCbPrintPdb printPdb; + MmuTraceCbPrintPde printPde; + MmuTraceCbPrintPt printPt; + MmuTraceCbPrintPte printPte; + MmuTraceCbIsInvalidPdeOk isInvalidPdeOk; + MmuTraceCbPdeAddrSpace pdeAddrSpace; + MmuTraceCbPteAddrSpace pteAddrSpace; + MmuTraceCbSwToHwLevel swToHwLevel; +} MMU_TRACE_CALLBACKS; + +NV_STATUS mmuTrace(OBJGPU *pGpu, OBJVASPACE *pVAS, MMU_TRACE_PARAM *pParams); + +#endif // MMU_TRACE_H diff --git a/src/nvidia/inc/kernel/gpu/mmu/uvm_sw.h b/src/nvidia/inc/kernel/gpu/mmu/uvm_sw.h new file mode 100644 index 000000000..96c65afc5 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/mmu/uvm_sw.h @@ -0,0 +1,3 @@ + +#include "g_uvm_sw_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/nvdec/kernel_nvdec.h b/src/nvidia/inc/kernel/gpu/nvdec/kernel_nvdec.h new file mode 100644 index 000000000..3f3847994 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/nvdec/kernel_nvdec.h @@ -0,0 +1,3 @@ + +#include "g_kernel_nvdec_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/nvdec/kernel_nvdec_ctx.h b/src/nvidia/inc/kernel/gpu/nvdec/kernel_nvdec_ctx.h new file mode 100644 index 000000000..ed110b399 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/nvdec/kernel_nvdec_ctx.h @@ -0,0 +1,3 @@ + +#include "g_kernel_nvdec_ctx_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/nvenc/kernel_nvenc_ctx.h b/src/nvidia/inc/kernel/gpu/nvenc/kernel_nvenc_ctx.h new file mode 100644 index 000000000..738d3eaca --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/nvenc/kernel_nvenc_ctx.h @@ -0,0 +1,3 @@ + +#include "g_kernel_nvenc_ctx_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/nvjpg/kernel_nvjpg_ctx.h b/src/nvidia/inc/kernel/gpu/nvjpg/kernel_nvjpg_ctx.h new file mode 100644 index 000000000..b44e33f40 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/nvjpg/kernel_nvjpg_ctx.h @@ -0,0 +1,3 @@ + +#include "g_kernel_nvjpg_ctx_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/nvlink/kernel_ioctrl.h b/src/nvidia/inc/kernel/gpu/nvlink/kernel_ioctrl.h new file mode 100644 index 000000000..573161cc0 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/nvlink/kernel_ioctrl.h @@ -0,0 +1,3 @@ + +#include "g_kernel_ioctrl_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/nvlink/kernel_nvlink.h b/src/nvidia/inc/kernel/gpu/nvlink/kernel_nvlink.h new file mode 100644 index 000000000..dabd4387a --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/nvlink/kernel_nvlink.h @@ -0,0 +1,3 @@ + +#include "g_kernel_nvlink_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/ofa/kernel_ofa_ctx.h b/src/nvidia/inc/kernel/gpu/ofa/kernel_ofa_ctx.h new file mode 100644 index 000000000..0ddc1b963 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/ofa/kernel_ofa_ctx.h @@ -0,0 +1,3 @@ + +#include "g_kernel_ofa_ctx_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/perf/kern_cuda_limit.h b/src/nvidia/inc/kernel/gpu/perf/kern_cuda_limit.h new file mode 100644 index 000000000..dcd8ccf11 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/perf/kern_cuda_limit.h @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERNEL_CUDA_LIMIT_H +#define KERNEL_CUDA_LIMIT_H + +/* ------------------------ Includes --------------------------------------- */ +#include "gpu/device/device.h" + +/* ------------------------ Macros ----------------------------------------- */ + + +/* ------------------------ Datatypes -------------------------------------- */ + +/* -------------------- Function Prototypes -------------------------------- */ + +// Device CLI interfaces +NV_STATUS deviceKPerfCudaLimitCliDisable (Device *pDevice, OBJGPU *pGpu); + +#endif // KERNEL_CUDA_LIMIT_H diff --git a/src/nvidia/inc/kernel/gpu/perf/kern_perf.h b/src/nvidia/inc/kernel/gpu/perf/kern_perf.h new file mode 100644 index 000000000..2aabb87d7 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/perf/kern_perf.h @@ -0,0 +1,3 @@ + +#include "g_kern_perf_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/perf/kern_perf_1hz.h b/src/nvidia/inc/kernel/gpu/perf/kern_perf_1hz.h new file mode 100644 index 000000000..16efbf1bc --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/perf/kern_perf_1hz.h @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERNEL_PERF_1HZ_H +#define KERNEL_PERF_1HZ_H + +/* ------------------------ Includes --------------------------------------- */ +#include "gpu/gpu_resource.h" +#include "objtmr.h" + +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Datatypes -------------------------------------- */ +/*! + * This structure represents data for managing 1HZ Callback timer + */ +typedef struct +{ + /*! + * NV_TRUE if 1Hz callback is in progress + */ + NvBool b1HzTimerCallback; + + /*! + * TRUE if AllowMaxPerf and not in Hibernate/Standby + */ + NvBool bEnableTimerUpdates; +} KERNEL_PERF_1HZ; + +/* -------------------- Function Prototypes -------------------------------- */ +/*! + * @brief Handle 1Hz timer callback from SW interrupts + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pTmr OBJTMR pointer + * @param[in] *ptr timer callback ID + * + * @returns Always return NV_OK + * + */ +NV_STATUS kperfTimerProc(OBJGPU *pGpu, OBJTMR *pTmr, void *ptr); + +/*! + * Since the function tmrCancelCallback() needs a distinct value for POBJECT, + * we can not just use any value in the POBJECT field if we intend to use + * tmrCancelCallback() function. For scheduling Kernel Perf related callbacks we + * will use the unique value for the Kernel Perf by using the address of the function + * that will be called when timer elapses. + */ +#define TMR_POBJECT_KERNEL_PERF_1HZ ((void *)(kperfTimerProc)) + +#endif // KERNEL_PERF_1HZ_H diff --git a/src/nvidia/inc/kernel/gpu/perf/kern_perf_boost.h b/src/nvidia/inc/kernel/gpu/perf/kern_perf_boost.h new file mode 100644 index 000000000..8972a61b0 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/perf/kern_perf_boost.h @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERNEL_PERF_BOOST_H +#define KERNEL_PERF_BOOST_H + +/* ------------------------ Includes --------------------------------------- */ +#include "gpu/gpu_resource.h" +#include "objtmr.h" + +/* ------------------------ Macros ----------------------------------------- */ +/*! + * Maximum number of boost instances + */ +#define KERNEL_PERF_BOOST_HINT_COUNT 512 + +/*! + * Index in the boost array reserved for all CUDA clients + */ +#define KERNEL_PERF_BOOST_HINT_CUDA_CLIENT_INDEX (0x00000000) + +/* ------------------------ Datatypes -------------------------------------- */ +/*! + * Structure that represents one request for the Boost Hint from a specific + * client. + */ +typedef struct +{ + /*! + * Specifices the handle of the client who requested this boost + */ + NvHandle hClient; + + /*! + * Specifies the duration of the boost in seconds. This has to be less + * than NV2080_CTRL_KERNEL_PERF_BOOST_DURATION_MAX. + */ + NvU32 duration; + + /*! + * Specifies the actual command. _CLEAR is to clear existing boost. + * _BOOST_1LEVEL is to boost P-State one level higher. _BOOST_TO_MAX is to boost + * to the highest P-State. + * + * Note: _BOOST_1LEVEL is deprecated. + */ + NvU32 flags; + + /*! + * Specifies number of clients who requested + * CUDA boost. This is used only in case of the CUDA clients. + */ + NvU32 refCount; + + /*! + * NV_TRUE if the boost duration is infinite, NV_FALSE otherwise. + */ + NvBool bIsInfinite; +} KERNEL_PERF_BOOST_HINT; + +/*! + * Structure that represents array of Boosts + */ +typedef struct +{ + /*! + * One entry of this array corresponds one boost request + */ + KERNEL_PERF_BOOST_HINT boostHints[KERNEL_PERF_BOOST_HINT_COUNT]; +} KERNEL_PERF_BOOST_HINTS; + +/* -------------------- Function Prototypes -------------------------------- */ +#endif // KERNEL_PERF_BOOST_H diff --git a/src/nvidia/inc/kernel/gpu/perf/kern_perf_gpuboostsync.h b/src/nvidia/inc/kernel/gpu/perf/kern_perf_gpuboostsync.h new file mode 100644 index 000000000..4c38cd1fa --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/perf/kern_perf_gpuboostsync.h @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERNEL_PERF_GPU_BOOST_SYNC_H +#define KERNEL_PERF_GPU_BOOST_SYNC_H + +/* ------------------------ Includes --------------------------------------- */ +#include "power/gpu_boost_mgr.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" + +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Datatypes -------------------------------------- */ +/* + * GPU Boost synchronization info + */ +struct KERNEL_PERF_GPU_BOOST_SYNC +{ + /*! + * Cached GPU Boost synchronization limits. + */ + NvU32 limits[NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM]; + + /*! + * Hysteresis value for GPU Boost synchronization hysteresis algorithm. + */ + NvU64 hysteresisus; + + /*! + * Hysteresis algorithm for SLI GPU Boost synchronization enabled + */ + NvBool bHystersisEnable; + + /*! + * SLI GPU Boost feature is enabled. + */ + NvBool bSliGpuBoostSyncEnable; + + /*! + * Bridgeless information, supports only MIO bridge + */ + NvBool bBridgeless; +}; + +/*! + * SLI GPU Boost synchronization sub-structure. + */ +typedef struct +{ + /*! + * Timestamp of previous GPU Boost synchronization loop. + */ + NvU64 prevChangeTsns; + + /*! + * Limits of previous GPU Boost synchronization loop. + */ + NvU32 prevLimits[NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM]; +} SLI_GPU_BOOST_SYNC; + +/* ------------------------ Function Prototypes ---------------------------- */ +NV_STATUS kperfGpuBoostSyncStateUpdate(OBJGPUBOOSTMGR *pBoostMgr, NvU32 boostGroupId, NvBool bActivate); +NV_STATUS kPerfGpuBoostSyncBridgelessUpdateInfo(OBJGPU *pGpu, NvBool bBridgeless); + +#endif // KERNEL_PERF_GPU_BOOST_SYNC_H diff --git a/src/nvidia/inc/kernel/gpu/perf/kern_perf_pm.h b/src/nvidia/inc/kernel/gpu/perf/kern_perf_pm.h new file mode 100644 index 000000000..059b5cd4a --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/perf/kern_perf_pm.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERNEL_PERF_PM_H +#define KERNEL_PERF_PM_H + +/* ------------------------ Includes --------------------------------------- */ + +/* ------------------------ Macros ----------------------------------------- */ + +/* ------------------------ Datatypes -------------------------------------- */ + +/* -------------------- Function Prototypes -------------------------------- */ + +NV_STATUS kPerfPerfmonClientDeviceSet (NvHandle hClient, NvHandle hDevice, NvBool bReservation, NvBool bClientHandlesGrGating, + NvBool bRmHandlesIdleSlow); + +#endif // KERNEL_PERF_PM_H diff --git a/src/nvidia/inc/kernel/gpu/perf/kern_perfbuffer.h b/src/nvidia/inc/kernel/gpu/perf/kern_perfbuffer.h new file mode 100644 index 000000000..ad954a87b --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/perf/kern_perfbuffer.h @@ -0,0 +1,3 @@ + +#include "g_kern_perfbuffer_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/pmu/kern_pmu.h b/src/nvidia/inc/kernel/gpu/pmu/kern_pmu.h new file mode 100644 index 000000000..f9cbd224d --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/pmu/kern_pmu.h @@ -0,0 +1,3 @@ + +#include "g_kern_pmu_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/rc/kernel_rc.h b/src/nvidia/inc/kernel/gpu/rc/kernel_rc.h new file mode 100644 index 000000000..e51d75e85 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/rc/kernel_rc.h @@ -0,0 +1,3 @@ + +#include "g_kernel_rc_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/rc/kernel_rc_watchdog.h b/src/nvidia/inc/kernel/gpu/rc/kernel_rc_watchdog.h new file mode 100644 index 000000000..0d95cd026 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/rc/kernel_rc_watchdog.h @@ -0,0 +1,36 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef KERNEL_RC_WATCHDOG_H +#define KERNEL_RC_WATCHDOG_H 1 + + +typedef enum { + RMAPI_ENABLE_REQUEST = 0xa, + RMAPI_DISABLE_REQUEST = 0xb, + RMAPI_RELEASE_ALL_REQUESTS = 0xc, + RM_CLIENT_DESTRUCTION = 0xd, + RMAPI_SOFT_DISABLE_REQUEST = 0xe +} RC_CHANGE_WATCHDOG_STATE_OPERATION_TYPE; + +#endif // ifndef KERNEL_RC_WATCHDOG_H diff --git a/src/nvidia/inc/kernel/gpu/rc/kernel_rc_watchdog_private.h b/src/nvidia/inc/kernel/gpu/rc/kernel_rc_watchdog_private.h new file mode 100644 index 000000000..fffe04183 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/rc/kernel_rc_watchdog_private.h @@ -0,0 +1,115 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * + * Defines for watchdog component of kernel RC. + * + * Don't include this file directly, use kernel_rc.h. Watchdog will be split + * into its own component in future. Keeping defines in a separate file to aid + * that transition. See CORERM-2297 + */ +#ifndef KERNEL_RC_WATCHDOG_PRIVATE_H +#define KERNEL_RC_WATCHDOG_PRIVATE_H 1 + +#include "kernel/gpu/disp/kern_disp_max.h" + +#include "class/cl906f.h" // GF100_CHANNEL_GPFIFO + +#include "nvgputypes.h" +#include "nvlimits.h" +#include "nvtypes.h" + + +#define WATCHDOG_RESET_QUEUE_SIZE (4) + +// KernelWatchdog.flags +#define WATCHDOG_FLAGS_INITIALIZED NVBIT(0) // Fully initialized and ready +#define WATCHDOG_FLAGS_DISABLED NVBIT(1) // Disabled +#define WATCHDOG_FLAGS_ALLOC_UNCACHED_PCI NVBIT(2) // Alloc cached / uncached pushbuffer + +/*! Volatile watchdog state that is destroyed when watchdog is shutdown */ +typedef struct { + NvHandle hClient; + NvU32 runlistId; + NvU32 flags; + NvU32 deviceReset[WATCHDOG_RESET_QUEUE_SIZE]; + /*! Read Pointer for fifoWatchDog */ + NvU32 deviceResetRd; + // RmResetWatchdog + NvU32 deviceResetWr; + /*! Number of watchdog invocations */ + NvU32 count; + /*! Countdown for running thwap and stomp tests */ + NvU32 channelTestCountdown; + /*! Reset value for ChannelTestTimer */ + NvU32 channelTestInterval; + /*! Masks for RC testing */ + NvU32 thwapChannelMask; + NvU32 thwapRepeatMask; + NvU32 stompChannelMask; + NvU32 stompRepeatMask; + /*! Mask of allocations to fail for testing RC. See nvcm.h */ + NvU32 allocFailMask; + /*! Array of NvU32 to hold last vblank counter */ + NvU32 oldVblank[OBJ_MAX_HEADS]; + /*! Number of times that Vblank has failed to advance */ + NvU32 vblankFailureCount[OBJ_MAX_HEADS]; + NvNotification *notifiers[NV_MAX_SUBDEVICES]; + NvNotification *errorContext; + NvNotification *notifierToken; +} KernelWatchdog; + + +/*! Persistent watchdog state preserved across watchdog shutdowns */ +typedef struct { + NvS32 enableRequestsRefCount; + NvS32 disableRequestsRefCount; + NvS32 softDisableRequestsRefCount; + + /*! How long we wait for the notifier to come back after being run */ + NvU32 timeoutSecs; + /*! Seconds between when the Watchdog is run */ + NvU32 intervalSecs; + NvU64 notifyLimitTime; + NvU64 nextRunTime; + NvU64 resetLimitTime; +} KernelWatchdogPersistent; + + +/*! Watchdog channel info */ +typedef struct +{ + Nv906fControl *pControlGPFifo[NV_MAX_SUBDEVICES]; + NvU64 pGpuAddr; + NvU8 *pCpuAddr; + // Class engine ID needed for SetObject on Fermi+ + NvU32 classEngineID; + NvU32 gpEntry0[2]; + NvU32 gpEntry1[2]; + NvU32 pbBytes; + NvU32 class2dSubch; +} KernelWatchdogChannelInfo; + +#endif // ifndef KERNEL_RC_WATCHDOG_PRIVATE_H diff --git a/src/nvidia/inc/kernel/gpu/sec2/kernel_sec2.h b/src/nvidia/inc/kernel/gpu/sec2/kernel_sec2.h new file mode 100644 index 000000000..cf9977d5e --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/sec2/kernel_sec2.h @@ -0,0 +1,3 @@ + +#include "g_kernel_sec2_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h b/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h new file mode 100644 index 000000000..c1a8628ce --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/subdevice/generic_engine.h @@ -0,0 +1,3 @@ + +#include "g_generic_engine_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h b/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h new file mode 100644 index 000000000..a9b688eec --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/subdevice/subdevice.h @@ -0,0 +1,3 @@ + +#include "g_subdevice_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/subdevice/subdevice_ctrl_rc.h b/src/nvidia/inc/kernel/gpu/subdevice/subdevice_ctrl_rc.h new file mode 100644 index 000000000..41cb26a76 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/subdevice/subdevice_ctrl_rc.h @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM, + RMCTRL_FLAGS(KERNEL_PRIVILEGED)) + NV_STATUS subdeviceCtrlCmdRcReadVirtualMem(Subdevice *pSubdevice, + NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *pReadVirtMemParam); + + // + // Note that if SMC is enabled, mig/monitor capability must be acquired + // to query Xids + // + // These two calls must be NON_PRIVILEGED, and should NOT ROUTE_TO_PHYSICAL. + // MIG monitor capability checks are performed on kernel side and the calls + // are manually RPCed to physical RM. + // + // Physical RM checks if the call it received was an internal call. In + // future, this should be migrated to a separate internal ctrl cmd. + // + RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_GET_ERROR_COUNT, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS subdeviceCtrlCmdRcGetErrorCount(Subdevice *pSubdevice, + NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS *pErrorCount); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_GET_ERROR_V2, RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS subdeviceCtrlCmdRcGetErrorV2(Subdevice *pSubdevice, + NV2080_CTRL_RC_GET_ERROR_V2_PARAMS *pErrorParams); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_SET_CLEAN_ERROR_HISTORY, + RMCTRL_FLAGS(PRIVILEGED, ROUTE_TO_PHYSICAL)) + NV_STATUS subdeviceCtrlCmdRcSetCleanErrorHistory(Subdevice *pSubdevice); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_GET_WATCHDOG_INFO, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS subdeviceCtrlCmdRcGetWatchdogInfo(Subdevice *pSubdevice, + NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS *pWatchdogInfoParams); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS subdeviceCtrlCmdRcDisableWatchdog(Subdevice *pSubdevice); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_SOFT_DISABLE_WATCHDOG, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS subdeviceCtrlCmdRcSoftDisableWatchdog(Subdevice *pSubdevice); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS subdeviceCtrlCmdRcEnableWatchdog(Subdevice *pSubdevice); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS, + RMCTRL_FLAGS(NON_PRIVILEGED)) + NV_STATUS subdeviceCtrlCmdRcReleaseWatchdogRequests(Subdevice *pSubdevice); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_INTERNAL_RC_WATCHDOG_TIMEOUT, + RMCTRL_FLAGS(KERNEL_PRIVILEGED, INTERNAL, ROUTE_TO_PHYSICAL)) + NV_STATUS subdeviceCtrlCmdInternalRcWatchdogTimeout(Subdevice *pSubdevice); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_SET_RC_RECOVERY, + RMCTRL_FLAGS(PRIVILEGED, ROUTE_TO_PHYSICAL)) + NV_STATUS subdeviceCtrlCmdSetRcRecovery(Subdevice *pSubdevice, + NV2080_CTRL_CMD_RC_RECOVERY_PARAMS *pRcRecovery); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_GET_RC_RECOVERY, + RMCTRL_FLAGS(PRIVILEGED, ROUTE_TO_PHYSICAL)) + NV_STATUS subdeviceCtrlCmdGetRcRecovery(Subdevice *pSubdevice, + NV2080_CTRL_CMD_RC_RECOVERY_PARAMS *pRcRecovery); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_GET_RC_INFO, + RMCTRL_FLAGS(PRIVILEGED, ROUTE_TO_PHYSICAL)) + NV_STATUS subdeviceCtrlCmdGetRcInfo(Subdevice *pSubdevice, + NV2080_CTRL_CMD_RC_INFO_PARAMS *pParams); + + RMCTRL_EXPORT(NV2080_CTRL_CMD_SET_RC_INFO, + RMCTRL_FLAGS(PRIVILEGED, ROUTE_TO_PHYSICAL)) + NV_STATUS subdeviceCtrlCmdSetRcInfo(Subdevice *pSubdevice, + NV2080_CTRL_CMD_RC_INFO_PARAMS *pParams); + diff --git a/src/nvidia/inc/kernel/gpu/subdevice/subdevice_diag.h b/src/nvidia/inc/kernel/gpu/subdevice/subdevice_diag.h new file mode 100644 index 000000000..549e7270e --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/subdevice/subdevice_diag.h @@ -0,0 +1,3 @@ + +#include "g_subdevice_diag_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/sw_test.h b/src/nvidia/inc/kernel/gpu/sw_test.h new file mode 100644 index 000000000..247f95b23 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/sw_test.h @@ -0,0 +1,3 @@ + +#include "g_sw_test_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/timed_sema.h b/src/nvidia/inc/kernel/gpu/timed_sema.h new file mode 100644 index 000000000..f8993e7f9 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/timed_sema.h @@ -0,0 +1,3 @@ + +#include "g_timed_sema_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/uvm/access_cntr_buffer.h b/src/nvidia/inc/kernel/gpu/uvm/access_cntr_buffer.h new file mode 100644 index 000000000..ad6985f51 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/uvm/access_cntr_buffer.h @@ -0,0 +1,3 @@ + +#include "g_access_cntr_buffer_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/uvm/uvm.h b/src/nvidia/inc/kernel/gpu/uvm/uvm.h new file mode 100644 index 000000000..cf04cab9a --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/uvm/uvm.h @@ -0,0 +1,3 @@ + +#include "g_uvm_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu/vbios/bios_types.h b/src/nvidia/inc/kernel/gpu/vbios/bios_types.h new file mode 100644 index 000000000..ad90a5543 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu/vbios/bios_types.h @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef BIOSTYPES_H_FILE +#define BIOSTYPES_H_FILE + + +/************************************************************************************************************** +* +* Description: +* Definitions of BIOS types defines used in parsing bios structures. +* +**************************************************************************************************************/ + +// We convert the bios data structures before they are used. They are read from bios, +// then each field is expanded into a 32 bit field into host byte order. +// Each data structure has a format string below that describes its packed +// layout in the rom. +// +// The format string looks like: +// +// count defaults to 1 +// item is one of +// b (byte) +// w 2 byte word +// d 4 byte word +// +// Eg: "1b3d" +// +// means 1 byte field followed by 3 dword (32 bit) fields +// + +#define bios_U008 NvU32 +#define bios_U016 NvU32 +#define bios_U032 NvU32 +#define bios_S008 NvS32 +#define bios_S016 NvS32 +#define bios_S032 NvS32 + +#endif // BIOSTYPES_H_FILE diff --git a/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h b/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h new file mode 100644 index 000000000..0a4dc41f9 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu_mgr/gpu_db.h @@ -0,0 +1,3 @@ + +#include "g_gpu_db_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h b/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h new file mode 100644 index 000000000..425a3031f --- /dev/null +++ b/src/nvidia/inc/kernel/gpu_mgr/gpu_group.h @@ -0,0 +1,3 @@ + +#include "g_gpu_group_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h b/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h new file mode 100644 index 000000000..f3acd0cb1 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu_mgr/gpu_mgmt_api.h @@ -0,0 +1,3 @@ + +#include "g_gpu_mgmt_api_nvoc.h" + diff --git a/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h b/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h new file mode 100644 index 000000000..068c74821 --- /dev/null +++ b/src/nvidia/inc/kernel/gpu_mgr/gpu_mgr.h @@ -0,0 +1,3 @@ + +#include "g_gpu_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/console_mem.h b/src/nvidia/inc/kernel/mem_mgr/console_mem.h new file mode 100644 index 000000000..a9582aef0 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/console_mem.h @@ -0,0 +1,3 @@ + +#include "g_console_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/ctx_buf_pool.h b/src/nvidia/inc/kernel/mem_mgr/ctx_buf_pool.h new file mode 100644 index 000000000..ad676aab4 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/ctx_buf_pool.h @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _CTX_BUF_POOL_H_ +#define _CTX_BUF_POOL_H_ + +/*! + * @file ctx_buf_pool.h + * + * @brief Describes interfaces used for creating RM memory pools for + * RM internal allocations like global (engine-specific) and + * local (context-specific) context buffers. + */ + +/* ---------------------------------Includes ------------------------------------ */ +#include "core/core.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/pool_alloc.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + +// state of context buffer pools +struct CTX_BUF_POOL_INFO +{ + // + // Each array index corresponds to a pointer to memory pool with + // page size corresponding to RM_ATTR_PAGE_SIZE_* + // Pool corresponding to RM_ATTR_PAGE_SIZE_DEFAULT will be left unused + // + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemPool[RM_ATTR_PAGE_SIZE_INVALID]; +}; + +// List of all context buffers supported by memory pools +typedef enum CTX_BUF_ID +{ + CTX_BUF_ID_INST_MEM, + CTX_BUF_ID_RUNLIST, + CTX_BUF_ID_GR_GLOBAL, + CTX_BUF_ID_GR_LOCAL +}CTX_BUF_ID; + +// context buffer info +struct CTX_BUF_INFO +{ + NvU64 size; + NvU64 align; + RM_ATTR_PAGE_SIZE attr; + NvBool bContig; +}; +typedef struct CTX_BUF_INFO CTX_BUF_INFO; + +NV_STATUS ctxBufPoolInit(OBJGPU *pGpu, Heap *pHeap, CTX_BUF_POOL_INFO **ppCtxBufPool); +NV_STATUS ctxBufPoolReserve(OBJGPU *pGpu, CTX_BUF_POOL_INFO *pCtxBufPool, CTX_BUF_INFO *pBufInfoList, NvU32 bufCount); +NV_STATUS ctxBufPoolTrim(CTX_BUF_POOL_INFO *pCtxBufPool); +NV_STATUS ctxBufPoolAllocate(CTX_BUF_POOL_INFO *pCtxBufPool, PMEMORY_DESCRIPTOR pMemDesc); +NV_STATUS ctxBufPoolFree(CTX_BUF_POOL_INFO *pCtxBufPool, PMEMORY_DESCRIPTOR pMemDesc); +void ctxBufPoolRelease(CTX_BUF_POOL_INFO *pCtxBufPool); +void ctxBufPoolDestroy(CTX_BUF_POOL_INFO **ppCtxBufPool); +NvBool ctxBufPoolIsSupported(OBJGPU *pGpu); +NV_STATUS ctxBufPoolGetSizeAndPageSize(CTX_BUF_POOL_INFO *pCtxBufPool, OBJGPU *pGpu, NvU64 alignment, RM_ATTR_PAGE_SIZE attr, NvBool bContig, NvU64 *pSize, NvU32 *pPageSize); +NV_STATUS ctxBufPoolGetGlobalPool(OBJGPU *pGpu, CTX_BUF_ID bufId, NvU32 engineType, CTX_BUF_POOL_INFO **ppCtxBufPool); +NvBool ctxBufPoolIsScrubSkipped(CTX_BUF_POOL_INFO *pCtxBufPool); +void ctxBufPoolSetScrubSkip(CTX_BUF_POOL_INFO *pCtxBufPool, NvBool bSkipScrub); +#endif // _CTX_BUF_POOL_H_ diff --git a/src/nvidia/inc/kernel/mem_mgr/fabric_vaspace.h b/src/nvidia/inc/kernel/mem_mgr/fabric_vaspace.h new file mode 100644 index 000000000..4440a100e --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/fabric_vaspace.h @@ -0,0 +1,3 @@ + +#include "g_fabric_vaspace_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/fla_mem.h b/src/nvidia/inc/kernel/mem_mgr/fla_mem.h new file mode 100644 index 000000000..02649b18f --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/fla_mem.h @@ -0,0 +1,3 @@ + +#include "g_fla_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/gpu_vaspace.h b/src/nvidia/inc/kernel/mem_mgr/gpu_vaspace.h new file mode 100644 index 000000000..68aa142c5 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/gpu_vaspace.h @@ -0,0 +1,3 @@ + +#include "g_gpu_vaspace_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/hw_resources.h b/src/nvidia/inc/kernel/mem_mgr/hw_resources.h new file mode 100644 index 000000000..54e3d8541 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/hw_resources.h @@ -0,0 +1,3 @@ + +#include "g_hw_resources_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h b/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h new file mode 100644 index 000000000..2f075079f --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/io_vaspace.h @@ -0,0 +1,3 @@ + +#include "g_io_vaspace_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/mem.h b/src/nvidia/inc/kernel/mem_mgr/mem.h new file mode 100644 index 000000000..f2f8ce0df --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/mem.h @@ -0,0 +1,3 @@ + +#include "g_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/mem_fabric.h b/src/nvidia/inc/kernel/mem_mgr/mem_fabric.h new file mode 100644 index 000000000..3589d083d --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/mem_fabric.h @@ -0,0 +1,3 @@ + +#include "g_mem_fabric_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/no_device_mem.h b/src/nvidia/inc/kernel/mem_mgr/no_device_mem.h new file mode 100644 index 000000000..7c6c85898 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/no_device_mem.h @@ -0,0 +1,3 @@ + +#include "g_no_device_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h b/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h new file mode 100644 index 000000000..8b9685b1e --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/os_desc_mem.h @@ -0,0 +1,3 @@ + +#include "g_os_desc_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/p2p.h b/src/nvidia/inc/kernel/mem_mgr/p2p.h new file mode 100644 index 000000000..cc2d1b17e --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/p2p.h @@ -0,0 +1,37 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _P2P_H_ +#define _P2P_H_ + +#include "core/core.h" + +NV_STATUS RmP2PGetPages (NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, OBJGPU **, void *, void (*)(void *), void *); +NV_STATUS RmP2PGetPagesWithoutCallbackRegistration (NvU64, NvU32, NvU64, NvU64, NvU64 *, NvU32 *, NvU32 *, NvU32 *, OBJGPU **, void *); +NV_STATUS RmP2PGetPagesPersistent (NvU64, NvU64, void **, NvU64 *, NvU32 *, void *, void *); +NV_STATUS RmP2PRegisterCallback (NvU64, NvU64, NvU64, void *, void (*)(void *), void *); +NV_STATUS RmP2PPutPages (NvU64, NvU32, NvU64, void *); +NV_STATUS RmP2PGetGpuByAddress (NvU64, NvU64, OBJGPU **); +NV_STATUS RmP2PPutPagesPersistent (void *, void *); + +#endif diff --git a/src/nvidia/inc/kernel/mem_mgr/phys_mem.h b/src/nvidia/inc/kernel/mem_mgr/phys_mem.h new file mode 100644 index 000000000..1f1b1885a --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/phys_mem.h @@ -0,0 +1,3 @@ + +#include "g_phys_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/pool_alloc.h b/src/nvidia/inc/kernel/mem_mgr/pool_alloc.h new file mode 100644 index 000000000..63e492887 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/pool_alloc.h @@ -0,0 +1,201 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RM_POOL_ALLOC_H_ +#define _RM_POOL_ALLOC_H_ + +/*! + * @file pool_alloc.h + * + * @brief Describes utilities for creating pools for RM internal usage, + * allocating memory from the pools, freeing memory to the pools + * and destroying the pools. + * Uses: + * 1. RM allocations for client page tables + * 2. Context-specific RM allocations like context buffers, instance memory, fault buffers, GR local buffers + * 3. Engine-specific RM allocations like runlist buffers and GR global buffers + */ + +/* ---------------------------------Includes ------------------------------------ */ +#include "core/core.h" + +/* ------------------------------- Public Interface ----------------------------- */ +/*! + * Static configurations for various pools. + */ + +typedef enum +{ + POOL_CONFIG_GMMU_FMT_1 = 0, // configure pool for client page tables with version = GMMU_FMT_VERSION_1 + POOL_CONFIG_GMMU_FMT_2 = 1, // configure pool for client page tables with version = GMMU_FMT_VERSION_2 + POOL_CONFIG_CTXBUF_512M = 2, // configure pool for RM internal allocations like ctx buffers with 512MB page size + POOL_CONFIG_CTXBUF_2M = 3, // configure pool for RM internal allocations like ctx buffers with 2MB page size + POOL_CONFIG_CTXBUF_64K = 4, // configure pool for RM internal allocations like ctx buffers with 64KB page size + POOL_CONFIG_CTXBUF_4K = 5, // configure pool for RM internal allocations like ctx buffers with 4KB page size + POOL_CONFIG_MAX_SUPPORTED = 6 + +}POOL_CONFIG_MODE; +/* ------------------------------------ Datatypes ---------------------------------- */ + +/*! + * Opaque library-defined state of a pool reserve. + */ +typedef struct RM_POOL_ALLOC_MEM_RESERVE_INFO RM_POOL_ALLOC_MEM_RESERVE_INFO; + +/*! + * Opaque user-defined state describing a block of physical memory. + * We reference these as the backing memory for the allocation. + */ +typedef struct RM_POOL_ALLOC_MEMDESC RM_POOL_ALLOC_MEMDESC; + +/* ------------------------------- Public Interface ----------------------------- */ + +/*! + * @brief Sets up the memory pool and the tracking structure. + * + * @param[in] pCtx Pointer to some user context data + * @param[in] ppMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * @param[in] configMode Mode to configure the pool + * + * @return + * NV_OK: + * Internal memory allocation failed. + */ +NV_STATUS rmMemPoolSetup(void *pCtx, RM_POOL_ALLOC_MEM_RESERVE_INFO **ppMemReserve, POOL_CONFIG_MODE configMode); + +/*! + * @brief Reserve memory for the allocation in vidmem. Physical frames are + * added to the memory pool on need basis only when a mapping is + * made in the VAS. + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * @param[in] poolSize pool size + * @param[in] flags VASpace flags to skip scrubbing in PMA for internal clients + * + * @return + * NV_ERR_NO_MEMORY: + * Internal memory allocation failed + * NV_ERR_INVALID_ARGUMENT: + * Invalid argument + */ +NV_STATUS rmMemPoolReserve(RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserve, + NvU64 poolSize, NvU32 flags); + +/*! + * @brief Releases the memory pool memory to the PMA once all allocations are + * returned back to it. + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * @param[in] flags VASpace flags to skip scrubbing in PMA for internal clients + * + * @return + */ +void rmMemPoolRelease(RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, NvU32 flags); + +/*! + * @brief Returns any unused nodes from the topmost level of a pool hierarchy + * back to PMA. + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * @param[in] nodesToPreserve Number of nodes to preserve in the topmost pool + * @param[in] flags VASpace flags to skip scrubbing in PMA for internal clients + + * @return + */ +void rmMemPoolTrim (RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, + NvU32 nodesToPreserve, NvU32 flags); + +/*! + * @brief Suballocate memory for an allocation from the pool created + * by @see rmMemPoolReserve. + * + * @param[in] pMemReserveInfo Pointer to RM_POOL_ALLOC_MEM_RESERVE_INFO data + * @param[in] pMemDesc Pointer to the allocations mem descriptor + * + * @return + * NV_ERR_NO_MEMORY: + * Internal memory allocation failed. + * NV_ERR_GENERIC: + * Unexpected error. + */ +NV_STATUS rmMemPoolAllocate(RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, + RM_POOL_ALLOC_MEMDESC *pPoolMemDesc); + +/*! + * @brief Returns the allocation's memory back to the pool from + * which it was borrowed. + * + * @param[in] pMemReserveInfo Pointer to RM_POOL_ALLOC_MEM_RESERVE_INFO data + * @param[in] pMemDesc Pointer to the allocations mem descriptor + * @param[in] flags VASpace flags to skip scrubbing in PMA for internal clients + * + * @return + */ +void rmMemPoolFree(RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, + RM_POOL_ALLOC_MEMDESC *pPoolMemDesc, NvU32 flags); + +/*! + * @brief Destroys the memory pool once all allocations are returned + * back to it. + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * + * @return + */ +void rmMemPoolDestroy(RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo); + +/*! + * @brief Setup pool to skip scrubber. + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * @param[in] bSkipScrub skip scrubber + * + * @return + */ +void rmMemPoolSkipScrub(RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, NvBool bSkipScrub); + +/*! + * @brief Get pool setting for skipping scrubber. + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * + * @return + * NV_TRUE Scrubbing is skipped + * NV_FALSE Scrubbing not skipped + */ +NvBool rmMemPoolIsScrubSkipped(RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo); + +/*! + * @brief Get page size and chunk size for a pool + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * @param[out] chunkSize + * @param[out] pageSize + * + * @return + * NV_ERR_INVALID_ARGUMENT + * NV_OK + */ +NV_STATUS rmMemPoolGetChunkAndPageSize(RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, NvU64*, NvU32*); + +#endif //_RM_POOL_ALLOC_H_ diff --git a/src/nvidia/inc/kernel/mem_mgr/reg_mem.h b/src/nvidia/inc/kernel/mem_mgr/reg_mem.h new file mode 100644 index 000000000..89c238799 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/reg_mem.h @@ -0,0 +1,3 @@ + +#include "g_reg_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/standard_mem.h b/src/nvidia/inc/kernel/mem_mgr/standard_mem.h new file mode 100644 index 000000000..f4b5ecbcb --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/standard_mem.h @@ -0,0 +1,3 @@ + +#include "g_standard_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/system_mem.h b/src/nvidia/inc/kernel/mem_mgr/system_mem.h new file mode 100644 index 000000000..9ff0b1332 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/system_mem.h @@ -0,0 +1,3 @@ + +#include "g_system_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/vaddr_list.h b/src/nvidia/inc/kernel/mem_mgr/vaddr_list.h new file mode 100644 index 000000000..3aa6579d0 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/vaddr_list.h @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Structure to track virtual memory mapping + */ + +#ifndef VADDR_LIST_H +#define VADDR_LIST_H + +#include "containers/map.h" +#include "mem_mgr/vaspace.h" + +/*! + * Map info + */ +typedef struct +{ + /*! + * Indicate whether caller should release the VA. + * Default is set to TRUE. i.e caller should free the VA. + * Use vaListSetManaged() to change the value. + */ + NvBool bRelease; +}VADDR_LIST_INFO; + +/*! + * Virtual memory info + */ +typedef struct +{ + /*! virtual address */ + NvU64 vAddr; + /*! No: of active references to the virtual address */ + NvU64 refCnt; + /*! + * To track common properties. + * Key 0 (pVAS == NULL) is used to store this info + */ + VADDR_LIST_INFO *pVaListInfo; +}VA_INFO; + +/*! + * Dictionary that tracks active virtual memory mappings. + * Indexed by the vaspace object pointer. + */ +MAKE_MAP(VA_LIST, VA_INFO); + +/*! Init the tracker object */ +NV_STATUS vaListInit(VA_LIST *); +/*! Init the tracker object */ +void vaListDestroy(VA_LIST *); +/*! + * Set VA lifecycle property. + * TRUE means caller should free the VA. e.g RM managed mappings + * FALSE means caller shouldn't free the VA. e.g UVM or KMD managed mappings. + */ +NV_STATUS vaListSetManaged(VA_LIST *, NvBool bManaged); +NvBool vaListGetManaged(VA_LIST *); +/*! Add a vas mapping to the tracker */ +NV_STATUS vaListAddVa(VA_LIST *, OBJVASPACE *, NvU64 vaddr); +/*! Remove a vas mapping to the tracker */ +NV_STATUS vaListRemoveVa(VA_LIST *, OBJVASPACE *); +/*! Get the vas mapping */ +NV_STATUS vaListFindVa(VA_LIST *, OBJVASPACE *, NvU64 *vaddr); +/*! Get the vas refCount */ +NV_STATUS vaListGetRefCount(VA_LIST *, OBJVASPACE *, NvU64 *refCount); +#endif // VADDR_LIST_H diff --git a/src/nvidia/inc/kernel/mem_mgr/vaspace.h b/src/nvidia/inc/kernel/mem_mgr/vaspace.h new file mode 100644 index 000000000..6910058a7 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/vaspace.h @@ -0,0 +1,3 @@ + +#include "g_vaspace_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/video_mem.h b/src/nvidia/inc/kernel/mem_mgr/video_mem.h new file mode 100644 index 000000000..d4d2d9f14 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/video_mem.h @@ -0,0 +1,3 @@ + +#include "g_video_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h b/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h new file mode 100644 index 000000000..ab20731ab --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/virt_mem_mgr.h @@ -0,0 +1,3 @@ + +#include "g_virt_mem_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/virt_mem_range.h b/src/nvidia/inc/kernel/mem_mgr/virt_mem_range.h new file mode 100644 index 000000000..4b56f48cf --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/virt_mem_range.h @@ -0,0 +1,3 @@ + +#include "g_virt_mem_range_nvoc.h" + diff --git a/src/nvidia/inc/kernel/mem_mgr/virtual_mem.h b/src/nvidia/inc/kernel/mem_mgr/virtual_mem.h new file mode 100644 index 000000000..07e9d16e5 --- /dev/null +++ b/src/nvidia/inc/kernel/mem_mgr/virtual_mem.h @@ -0,0 +1,3 @@ + +#include "g_virtual_mem_nvoc.h" + diff --git a/src/nvidia/inc/kernel/os/capability.h b/src/nvidia/inc/kernel/os/capability.h new file mode 100644 index 000000000..e2fa861a5 --- /dev/null +++ b/src/nvidia/inc/kernel/os/capability.h @@ -0,0 +1,46 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _OS_CAPABILITY_H_ +#define _OS_CAPABILITY_H_ + +// OS specific RM capabilities structure +typedef struct OS_RM_CAPS OS_RM_CAPS; + +// RM capabilities +#define NV_RM_CAP_SYS_BASE 0x0 +#define NV_RM_CAP_SYS_PROFILER_CONTEXT (NV_RM_CAP_SYS_BASE + 0) +#define NV_RM_CAP_SYS_PROFILER_DEVICE (NV_RM_CAP_SYS_BASE + 1) +#define NV_RM_CAP_SYS_SMC_CONFIG (NV_RM_CAP_SYS_BASE + 2) +#define NV_RM_CAP_SYS_SMC_MONITOR (NV_RM_CAP_SYS_BASE + 3) + +#define NV_RM_CAP_SMC_PARTITION_BASE 0x100 +#define NV_RM_CAP_SMC_PARTITION_ACCESS (NV_RM_CAP_SMC_PARTITION_BASE + 0) + +#define NV_RM_CAP_EXT_BASE 0x200 +#define NV_RM_CAP_EXT_FABRIC_MGMT (NV_RM_CAP_EXT_BASE + 0) + +#define NV_RM_CAP_SMC_EXEC_PARTITION_BASE 0x300 +#define NV_RM_CAP_SMC_EXEC_PARTITION_ACCESS (NV_RM_CAP_SMC_EXEC_PARTITION_BASE + 0) + +#endif diff --git a/src/nvidia/inc/kernel/os/nv_memory_type.h b/src/nvidia/inc/kernel/os/nv_memory_type.h new file mode 100644 index 000000000..34255c758 --- /dev/null +++ b/src/nvidia/inc/kernel/os/nv_memory_type.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_MEMORY_TYPE_H +#define NV_MEMORY_TYPE_H + +#define NV_MEMORY_NONCONTIGUOUS 0 +#define NV_MEMORY_CONTIGUOUS 1 + +#define NV_MEMORY_CACHED 0 +#define NV_MEMORY_UNCACHED 1 +#define NV_MEMORY_WRITECOMBINED 2 +#define NV_MEMORY_WRITEBACK 5 +#define NV_MEMORY_DEFAULT 6 +#define NV_MEMORY_UNCACHED_WEAK 7 + +#define NV_PROTECT_READABLE 1 +#define NV_PROTECT_WRITEABLE 2 +#define NV_PROTECT_READ_WRITE (NV_PROTECT_READABLE | NV_PROTECT_WRITEABLE) + +#endif /* NV_MEMORY_TYPE_H */ diff --git a/src/nvidia/inc/kernel/os/os.h b/src/nvidia/inc/kernel/os/os.h new file mode 100644 index 000000000..c58aa0cb4 --- /dev/null +++ b/src/nvidia/inc/kernel/os/os.h @@ -0,0 +1,3 @@ + +#include "g_os_nvoc.h" + diff --git a/src/nvidia/inc/kernel/os/os_stub.h b/src/nvidia/inc/kernel/os/os_stub.h new file mode 100644 index 000000000..11132d417 --- /dev/null +++ b/src/nvidia/inc/kernel/os/os_stub.h @@ -0,0 +1,87 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef OS_STUB_H +#define OS_STUB_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Extern definitions of all public stub function interfaces * +* * +\***************************************************************************/ + +#include "os/os.h" + +// +// Each of these stub functions returns a different type. Used to +// stub out function pointers in OBJOS. +// +OSQADbgRegistryInit stubOsQADbgRegistryInit; +OSnv_rdcr4 stubOsnv_rdcr4; +OSnv_rdxcr0 stubOsnv_rdxcr0; +OSnv_cpuid stubOsnv_cpuid; +OSnv_rdmsr stubOsnv_rdmsr; +OSnv_wrmsr stubOsnv_wrmsr; +OSRobustChannelsDefaultState stubOsRobustChannelsDefaultState; +OSSpinLoop stubOsSpinLoop; +OSDbgBreakpointEnabled stubOsDbgBreakpointEnabled; +OSQueueWorkItem stubOsQueueWorkItem; +OSQueueWorkItemWithFlags stubOsQueueWorkItemWithFlags; +OSQueueSystemWorkItem stubOsQueueSystemWorkItem; +OSSimEscapeWrite stubOsSimEscapeWrite; +OSSimEscapeWriteBuffer stubOsSimEscapeWriteBuffer; +OSSimEscapeRead stubOsSimEscapeRead; +OSSimEscapeReadBuffer stubOsSimEscapeReadBuffer; +OSCallACPI_MXMX stubOsCallACPI_MXMX; +OSCallACPI_DSM stubOsCallACPI_DSM; +OSCallACPI_DDC stubOsCallACPI_DDC; +OSCallACPI_BCL stubOsCallACPI_BCL; +OSCallACPI_ON stubOsCallACPI_ON; +OSCallACPI_OFF stubOsCallACPI_OFF; +OSCallACPI_NVHG_GPUON stubOsCallWMI_NVHG_GPUON; +OSCallACPI_NVHG_GPUOFF stubOsCallWMI_NVHG_GPUOFF; +OSCallACPI_NVHG_GPUSTA stubOsCallWMI_NVHG_GPUSTA; +OSCallACPI_NVHG_MXDS stubOsCallWMI_NVHG_MXDS; +OSCallACPI_NVHG_MXMX stubOsCallWMI_NVHG_MXMX; +OSCallACPI_NVHG_DOS stubOsCallWMI_NVHG_DOS; +OSCallACPI_NVHG_ROM stubOsCallWMI_NVHG_ROM; +OSCallACPI_NVHG_DCS stubOsCallWMI_NVHG_DCS; +OSCallACPI_DOD stubOsCallWMI_DOD; +OSCheckCallback stubOsCheckCallback; +OSRCCallback stubOsRCCallback; + +OSCallACPI_NBPS stubOsCallACPI_NBPS; +OSCallACPI_NBSL stubOsCallACPI_NBSL; +OSCallACPI_OPTM_GPUON stubOsCallWMI_OPTM_GPUON; +OSSetupVBlank stubOsSetupVBlank; +OSObjectEventNotification stubOsObjectEventNotification; +OSPageArrayGetPhysAddr stubOsPageArrayGetPhysAddr; +OSInternalReserveFreeCallback stubOsInternalReserveFreeCallback; +OSInternalReserveAllocCallback stubOsInternalReserveAllocCallback; +OSGetUefiVariable stubOsGetUefiVariable; +OSCallACPI_MXDS stubOsCallACPI_MXDS; +OSCallACPI_MXDM stubOsCallACPI_MXDM; +OSCallACPI_MXID stubOsCallACPI_MXID; +OSCallACPI_LRST stubOsCallACPI_LRST; + +#endif // OS_STUB_H diff --git a/src/nvidia/inc/kernel/platform/acpi_common.h b/src/nvidia/inc/kernel/platform/acpi_common.h new file mode 100644 index 000000000..37d55b7f3 --- /dev/null +++ b/src/nvidia/inc/kernel/platform/acpi_common.h @@ -0,0 +1,112 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _ACPICOMMON_H_ +#define _ACPICOMMON_H_ + +#include "acpigenfuncs.h" +#include "core/core.h" +#include "rmconfig.h" + +NV_STATUS testIfDsmFuncSupported(OBJGPU *, ACPI_DSM_FUNCTION); +NV_STATUS testIfDsmSubFunctionEnabled(OBJGPU *, ACPI_DSM_FUNCTION, NvU32); +NV_STATUS remapDsmFunctionAndSubFunction(OBJGPU *, ACPI_DSM_FUNCTION *, NvU32 *); +NV_STATUS getDsmGetObjectSubfunction(OBJGPU *, ACPI_DSM_FUNCTION *, NvU32 *, NvU32*); +void cacheDsmSupportedFunction(OBJGPU *, ACPI_DSM_FUNCTION, NvU32, NvU32 *, NvU32); +NV_STATUS checkDsmCall(OBJGPU *, ACPI_DSM_FUNCTION *, NvU32 *, NvU32 *, NvU16 *); +void acpiDsmInit(OBJGPU *); +NV_STATUS getLicenseKey(OBJGPU *, NvU32, NvU32 *, NvU16 *); + +// useful macros +#if NV_PRINTF_ENABLED +#define DSMFuncStr(func) (func <= ACPI_DSM_FUNCTION_CURRENT ? DSMCalls[func] : DSMCalls[ACPI_DSM_FUNCTION_COUNT]) +#endif + +#define isDsmGetSuppFuncListCached(pGpu, acpiDsmFunction) (pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus != DSM_FUNC_STATUS_UNKNOWN) +#define isDsmGetSuppFuncListFailed(pGpu, acpiDsmFunction) (pGpu->acpi.dsm[acpiDsmFunction].suppFuncStatus == DSM_FUNC_STATUS_FAILED) +#define isGenericDsmFunction(acpiDsmFunction) (acpiDsmFunction >= ACPI_DSM_FUNCTION_COUNT) +#define isGenericDsmSubFunction(acpiDsmSubFunction) (acpiDsmSubFunction >= NV_ACPI_GENERIC_FUNC_START) + + +#define NV_ACPI_ALL_FUNC_SUPPORT 0x00000000 // Common is supported subfunction. +#define NV_ACPI_ALL_FUNC_SUPPORTED NVBIT(NV_ACPI_ALL_FUNC_SUPPORT) // is common Function supported? +#define NV_ACPI_ALL_SUBFUNC_UNKNOWN 0xFFFFFFFF // Common define for unknown ACPI sub-function + +// All the callbacks (MXM, NBCI, NVHG) use the same bits. These are common. +#define NV_ACPI_CALLBACKS_ARG_POSTPOWERSTATE 2:2 +#define NV_ACPI_CALLBACKS_ARG_POSTPOWERSTATE_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_ARG_CURRENTPOWERSTATE 7:4 +#define NV_ACPI_CALLBACKS_ARG_3DSTEREOSTATE_ACTIVE 8:8 +#define NV_ACPI_CALLBACKS_ARG_3DSTEREOSTATE_ACTIVE_NO 0x00000000 +#define NV_ACPI_CALLBACKS_ARG_3DSTEREOSTATE_ACTIVE_YES 0x00000001 + + +#define NV_ACPI_CALLBACKS_RET_POSTPOWERSTATE 2:2 +#define NV_ACPI_CALLBACKS_RET_POSTPOWERSTATE_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_RET_HOTPLUG 9:9 +#define NV_ACPI_CALLBACKS_RET_HOTPLUG_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_RET_CONFIG 10:10 +#define NV_ACPI_CALLBACKS_RET_CONFIG_NOTIFY 0x00000001 +#define NV_ACPI_CALLBACKS_RET_3DSTEREOSTATE_ACTIVE 12:12 +#define NV_ACPI_CALLBACKS_RET_3DSTEREOSTATE_ACTIVE_NOTIFY 0x00000001 + +#define ACPI_NOTIFY_DOCK_EVENT 0x77 +#define ACPI_NOTIFY_PANEL_SWITCH 0x80 +#define ACPI_NOTIFY_DEVICE_HOTPLUG 0x81 +#define ACPI_NOTIFY_CYCLE_DISPLAY_HOTKEY 0x82 +#define ACPI_NOTIFY_NEXT_DISPLAY_HOTKEY 0x83 +#define ACPI_NOTIFY_PREV_DISPLAY_HOTKEY 0x84 +#define ACPI_NOTIFY_CYCLE_BRIGHTNESS_HOTKEY 0x85 +#define ACPI_NOTIFY_INC_BRIGHTNESS_HOTKEY 0x86 +#define ACPI_NOTIFY_DEC_BRIGHTNESS_HOTKEY 0x87 +#define ACPI_NOTIFY_ZERO_BRIGHTNESS_HOTKEY 0x88 +#define ACPI_NOTIFY_VIDEO_WAKEUP 0x90 + +#define ACPI_NOTIFY_GPS_STATUS_CHANGE 0xC0 + +#define ACPI_NOTIFY_BACKLIGHT_OFF 0xC1 +#define ACPI_NOTIFY_BACKLIGHT_ON 0xC2 + +#define ACPI_NOTIFY_POWER_LEVEL_D1 0xD1 +#define ACPI_NOTIFY_POWER_LEVEL_D2 0xD2 +#define ACPI_NOTIFY_POWER_LEVEL_D3 0xD3 +#define ACPI_NOTIFY_POWER_LEVEL_D4 0xD4 +#define ACPI_NOTIFY_POWER_LEVEL_D5 0xD5 + +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 + +#define NV_ACPI_DSM_READ_SIZE (4*1024) // 4K as per spec + +// **************************************************** +// For _ROM Get ROM Data Method +// **************************************************** +#define ROM_METHOD_MAX_RETURN_BUFFER_SIZE 0x1000 + +// these are debug strings for printing which DSM subfunction didn't work. +// These map directly to the ACPI_DSM_FUNCTION enum in gpu/gpu.h. +#if NV_PRINTF_ENABLED +extern const char * const DSMCalls[]; +#endif + +#endif // _ACPICOMMON_H_ + diff --git a/src/nvidia/inc/kernel/platform/chipset/chipset.h b/src/nvidia/inc/kernel/platform/chipset/chipset.h new file mode 100644 index 000000000..98c8136b0 --- /dev/null +++ b/src/nvidia/inc/kernel/platform/chipset/chipset.h @@ -0,0 +1,3 @@ + +#include "g_chipset_nvoc.h" + diff --git a/src/nvidia/inc/kernel/platform/chipset/chipset_info.h b/src/nvidia/inc/kernel/platform/chipset/chipset_info.h new file mode 100644 index 000000000..87cb7f35a --- /dev/null +++ b/src/nvidia/inc/kernel/platform/chipset/chipset_info.h @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef CHIPSET_INFO_H +#define CHIPSET_INFO_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Chipset and Root Port information * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "platform/chipset/chipset.h" + +typedef struct CSINFO CSINFO; +typedef CSINFO *PCSINFO; + +typedef struct VENDORNAME VENDORNAME; +typedef VENDORNAME *PVENDORNAME; + +typedef struct RPINFO RPINFO; +typedef RPINFO *PRPINFO; + +typedef struct BRINFO BRINFO; +typedef RPINFO *PBRINFO; + +typedef struct ARMCSALLOWLISTINFO ARMCSALLOWLISTINFO; +typedef ARMCSALLOWLISTINFO *PARMCSALLOWLISTINFO; + +struct CSINFO +{ + NvU16 vendorID, deviceID; + NvU32 chipset; + const char *name; + NV_STATUS (*setupFunc)(OBJCL *); +}; + +extern CSINFO chipsetInfo[]; + +struct VENDORNAME +{ + NvU32 vendorID; + const char *name; +}; + +extern VENDORNAME vendorName[]; + +struct RPINFO +{ + NvU16 vendorID, deviceID; + NvU32 rpID; + NV_STATUS (*setupFunc)(OBJGPU *, OBJCL *); +}; + +extern RPINFO rootPortInfo[]; + +struct BRINFO +{ + NvU16 vendorID, deviceID; + NV_STATUS (*setupFunc)(OBJGPU *, OBJCL *); +}; + +extern BRINFO upstreamPortInfo[]; + +struct ARMCSALLOWLISTINFO +{ + NvU64 vendorID, deviceID; + NvU32 chipset; +}; + +extern ARMCSALLOWLISTINFO armChipsetAllowListInfo[]; + +void csGetInfoStrings(OBJCL *, NvU8 *, NvU8 *, NvU8 *, NvU8 *, NvU32); + +#endif // CHIPSET_INFO_H diff --git a/src/nvidia/inc/kernel/platform/chipset/pci_pbi.h b/src/nvidia/inc/kernel/platform/chipset/pci_pbi.h new file mode 100644 index 000000000..fbf36ba29 --- /dev/null +++ b/src/nvidia/inc/kernel/platform/chipset/pci_pbi.h @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _PCIPBI_H_ +#define _PCIPBI_H_ + +#include "core/core.h" +#include "rmconfig.h" + +// +// Note: The implementation assumes that there is no contention for +// PBI_MUTEX, otherwise it returns an error if PBI_MUTEX is unavailable. +// The pciPbi interfaces follow the mutex model described in the PBI +// specification, but otherwise should not be considered thread-safe. +// + +// +// Exported interfaces +// + +NV_STATUS pciPbiReadUuid(void *handle, NvU8 *uuid); +NV_STATUS pciPbiGetFeature(void *handle, NvU32 *feature); + +#endif //_PCIPBI_H_ diff --git a/src/nvidia/inc/kernel/platform/cpu.h b/src/nvidia/inc/kernel/platform/cpu.h new file mode 100644 index 000000000..4f58c9ecd --- /dev/null +++ b/src/nvidia/inc/kernel/platform/cpu.h @@ -0,0 +1,29 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _CPU_H_ +#define _CPU_H_ + +void RmInitCpuInfo (void); + +#endif // _CPU_H_ diff --git a/src/nvidia/inc/kernel/platform/hwbc.h b/src/nvidia/inc/kernel/platform/hwbc.h new file mode 100644 index 000000000..c99f81e90 --- /dev/null +++ b/src/nvidia/inc/kernel/platform/hwbc.h @@ -0,0 +1,135 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef HWBC_H +#define HWBC_H + +#include "gpu/gpu.h" // NBADDR, POBJGPU + +// HWBC_UPSTREAM_BUS_SPEED commands +#define HWBC_UPSTREAM_BUS_SPEED_GEN1PCIE 1 +#define HWBC_UPSTREAM_BUS_SPEED_GEN2PCIE 2 +#define HWBC_UPSTREAM_BUS_SPEED_GEN3PCIE 3 + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Module: HWBC.H * +* Hardware Broadcast related defines and structures. * +* * +\***************************************************************************/ +struct OBJCL; +typedef struct OBJHWBC *POBJHWBC; +typedef struct OBJHWBC OBJHWBC; +typedef struct HWBC_APERTURE *PHWBC_APERTURE; +typedef struct HWBC_APERTURE HWBC_APERTURE; + +// These values define maximum number of targets/apertures to be supported in +// the OBJHWBC object. +#define NUM_HWBC_TARGETS 4 +#define NUM_HWBC_APERTURES 3 + +#define PCI_P2P_PRE_BL 0x00000024 /* RW-4R */ +#define PCI_P2P_PRE_BL_B64BIT 3:0 /* C--VF */ +#define PCI_P2P_PRE_BL_B64BIT_YES 0x00000001 /* C---V */ +#define PCI_P2P_PRE_BL_PREFETCH_MEM_BASE 15:4 /* RWIUF */ +#define PCI_P2P_PRE_BL_L64BIT 19:16 /* C--VF */ +#define PCI_P2P_PRE_BL_L64BIT_YES 0x00000001 /* C---V */ +#define PCI_P2P_PRE_BL_PREFETCH_MEM_LIMIT 31:20 /* RWIUF */ +#define PCI_P2P_PRE_BU32 0x00000028 /* RW-4R */ +#define PCI_P2P_PRE_BU32_BASE_UPPER_BITS 31:0 /* RWIUF */ +#define PCI_P2P_PRE_LU32 0x0000002C /* RW-4R */ +#define PCI_P2P_PRE_LU32_LIMIT_UPPER_BITS 31:0 /* RWIUF */ + +#define BR03_REG(p, i) (p[NV_PES_XVU_ ## i / sizeof(*p)]) + +#define BR03_BAR0_SIZE (16*1024) +#define BR03_GPU_REGISTER_ALIAS_OFFSET 0x4FC000 + +NvBool objClSetPcieHWBC(OBJGPU *, OBJCL*); // Find all Broadcast resource in the higher hierarchy of the GPU + +// Disables ASPM on downstream ports of any BR04 A03 (or later) that is parent of device at 'bus'. +NV_STATUS Nvidia_BR04_disableDownstreamASPM(NvU8); + + +// +// Bridge resource type +// +typedef +enum { + HWBC_UNKNOWN = 0 +, HWBC_NVIDIA_BR03 +, HWBC_NVIDIA_BR04 +, HWBC_PLX_PEX8747 +} HWBC_RES_TYPE; + +struct OBJHWBC +{ + // what kind of BC resource + HWBC_RES_TYPE bcRes; + + NvU32 hwbcId; + + // the control device + // this would be the upstream port for BR03 or the host bridge for C19/CK804 + NBADDR ctrlDev; + + // any device has bus number between the minBus and maxBus(inclusive) is connected to this device + // this equals to the secondary bus number and subordinate bus number for a bridge (BR03) + NvU32 domain; + NvU8 minBus, maxBus; + + OBJHWBC *pSibling, *pFirstChild, *pParent; // link to siblings, the first child and parent + + NvU32 gpuMask; + + RmPhysAddr gpuPhysAddr; + + // + // BR04: This array is indexed by GPU instance number. If the GPU referred + // to by that instance is not behind this BR04 -1 is stored at that index; + // if it is behind this BR04 the downstream port it's behind is stored + // there. The information is necessary to determine which BR04s must be + // involved to broadcast between some set of GPUs, and also to determine + // how to program redirection windows for unicast access. + // + NvS8 dpForGpuInstance[NV_MAX_DEVICES]; + + // For mapping state + NvS8 mappingTarget; + NvU32 mappingCount; + + // Private data + NvBool hasPlxFirmwareInfo; + NvU32 fwVersion; + NvU8 fwOemVersion; + NvU8 plxRevision; + NvBool bNotInBoard; +}; + +void plxPex8747GetFirmwareInfo(OBJCL *pCl, OBJGPU *pGpu, OBJHWBC *pHWBC); + +// +// Hardware Broadcast error conditions +#define HWBC_ERROR_BR03_INVALID_BAR0 0 + +#endif // HWBC_H diff --git a/src/nvidia/inc/kernel/platform/p2p/p2p_caps.h b/src/nvidia/inc/kernel/platform/p2p/p2p_caps.h new file mode 100644 index 000000000..1bd045d18 --- /dev/null +++ b/src/nvidia/inc/kernel/platform/p2p/p2p_caps.h @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef P2P_CAPS_H +#define P2P_CAPS_H + +typedef enum +{ + P2P_CONNECTIVITY_UNKNOWN = 0, + P2P_CONNECTIVITY_PCIE, + P2P_CONNECTIVITY_PCIE_BAR1, + P2P_CONNECTIVITY_NVLINK, + P2P_CONNECTIVITY_NVLINK_INDIRECT, + P2P_CONNECTIVITY_C2C, +} P2P_CONNECTIVITY; + +NV_STATUS p2pGetCaps (NvU32 gpuMask, NvBool *pP2PWriteCapable, NvBool *pP2PReadCapable, + P2P_CONNECTIVITY *pConnectivity); +NV_STATUS p2pGetCapsStatus (NvU32 gpuMask, NvU8 *pP2PWriteCapStatus, NvU8 *pP2PReadCapStatus, + P2P_CONNECTIVITY *pConnectivity + ); + +#endif // P2P_CAPS_H diff --git a/src/nvidia/inc/kernel/platform/pci_exp_table.h b/src/nvidia/inc/kernel/platform/pci_exp_table.h new file mode 100644 index 000000000..185bd6978 --- /dev/null +++ b/src/nvidia/inc/kernel/platform/pci_exp_table.h @@ -0,0 +1,131 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef PCIEXPTBL_H +#define PCIEXPTBL_H + +// +// The VBIOS object comes from walking the PCI expansion code block +// The following structure holds the expansion code format. +// +#define PCI_EXP_ROM_SIGNATURE 0xaa55 +#define PCI_EXP_ROM_SIGNATURE_NV 0x4e56 // "VN" in word format +#define PCI_EXP_ROM_SIGNATURE_NV2 0xbb77 +#define IS_VALID_PCI_ROM_SIG(sig) ((sig == PCI_EXP_ROM_SIGNATURE) || \ + (sig == PCI_EXP_ROM_SIGNATURE_NV) || \ + (sig == PCI_EXP_ROM_SIGNATURE_NV2)) + +#define OFFSETOF_PCI_EXP_ROM_SIG 0x0 +#define OFFSETOF_PCI_EXP_ROM_NBSI_DATA_OFFSET 0x16 +#define OFFSETOF_PCI_EXP_ROM_PCI_DATA_STRUCT_PTR 0x18 + +#pragma pack(1) +typedef struct _PCI_EXP_ROM_STANDARD +{ + NvU16 sig; // 00h: ROM Signature 0xaa55 + NvU8 reserved [0x16]; // 02h: Reserved (processor architecture unique data) + NvU16 pciDataStrucPtr; // 18h: Pointer to PCI Data Structure + NvU32 sizeOfBlock; // 1Ah: +} PCI_EXP_ROM_STANDARD, *PPCI_EXP_ROM_STANDARD; +#pragma pack() + +#pragma pack(1) +typedef struct _PCI_EXP_ROM_NBSI +{ + NvU16 sig; // 00h: ROM Signature 0xaa55 + NvU8 reserved [0x14]; // 02h: Reserved (processor architecture unique data) + NvU16 nbsiDataOffset; // 16h: Offset from header to NBSI image + NvU16 pciDataStrucPtr; // 18h: Pointer to PCI Data Structure + NvU32 sizeOfBlock; // 1Ah: +} PCI_EXP_ROM_NBSI, *PPCI_EXP_ROM_NBSI; +#pragma pack() + +typedef union _PCI_EXP_ROM { + PCI_EXP_ROM_STANDARD standard; + PCI_EXP_ROM_NBSI nbsi; +} PCI_EXP_ROM, *PPCI_EXP_ROM; + +#define PCI_DATA_STRUCT_SIGNATURE 0x52494350 // "PCIR" in dword format +#define PCI_DATA_STRUCT_SIGNATURE_NV 0x5344504E // "NPDS" in dword format +#define PCI_DATA_STRUCT_SIGNATURE_NV2 0x53494752 // "RGIS" in dword format +#define IS_VALID_PCI_DATA_SIG(sig) ((sig == PCI_DATA_STRUCT_SIGNATURE) || \ + (sig == PCI_DATA_STRUCT_SIGNATURE_NV) || \ + (sig == PCI_DATA_STRUCT_SIGNATURE_NV2)) + +#define PCI_LAST_IMAGE NVBIT(7) +#define PCI_ROM_IMAGE_BLOCK_SIZE 512U + +#define OFFSETOF_PCI_DATA_STRUCT_SIG 0x0 +#define OFFSETOF_PCI_DATA_STRUCT_VENDOR_ID 0x4 +#define OFFSETOF_PCI_DATA_STRUCT_LEN 0xa +#define OFFSETOF_PCI_DATA_STRUCT_CLASS_CODE 0xd +#define OFFSETOF_PCI_DATA_STRUCT_CODE_TYPE 0x14 +#define OFFSETOF_PCI_DATA_STRUCT_IMAGE_LEN 0x10 +#define OFFSETOF_PCI_DATA_STRUCT_LAST_IMAGE 0x15 + +#pragma pack(1) +typedef struct _PCI_DATA_STRUCT +{ + NvU32 sig; // 00h: Signature, the string "PCIR" or NVIDIA's alternate "NPDS" + NvU16 vendorID; // 04h: Vendor Identification + NvU16 deviceID; // 06h: Device Identification + NvU16 deviceListPtr; // 08h: Device List Pointer + NvU16 pciDataStructLen; // 0Ah: PCI Data Structure Length + NvU8 pciDataStructRev; // 0Ch: PCI Data Structure Revision + NvU8 classCode[3]; // 0Dh: Class Code + NvU16 imageLen; // 10h: Image Length (units of 512 bytes) + NvU16 vendorRomRev; // 12h: Revision Level of the Vendor's ROM + NvU8 codeType; // 14h: holds NBSI_OBJ_CODE_TYPE (0x70) and others + NvU8 lastImage; // 15h: Last Image Indicator: bit7=1 is lastImage + NvU16 maxRunTimeImageLen; // 16h: Maximum Run-time Image Length (units of 512 bytes) +} PCI_DATA_STRUCT, *PPCI_DATA_STRUCT; +#pragma pack() + +#define NV_PCI_DATA_EXT_SIG 0x4544504E // "NPDE" in dword format +#define NV_PCI_DATA_EXT_REV_10 0x100 // 1.0 +#define NV_PCI_DATA_EXT_REV_11 0x101 // 1.1 + +#define OFFSETOF_PCI_DATA_EXT_STRUCT_SIG 0x0 +#define OFFSETOF_PCI_DATA_EXT_STRUCT_LEN 0x6 +#define OFFSETOF_PCI_DATA_EXT_STRUCT_REV 0x4 +#define OFFSETOF_PCI_DATA_EXT_STRUCT_SUBIMAGE_LEN 0x8 +#define OFFSETOF_PCI_DATA_EXT_STRUCT_LAST_IMAGE 0xa +#define OFFSETOF_PCI_DATA_EXT_STRUCT_FLAGS 0xb + +#define PCI_DATA_EXT_STRUCT_FLAGS_CHECKSUM_DISABLED 0x04 + +#pragma pack(1) +typedef struct _NV_PCI_DATA_EXT_STRUCT +{ + NvU32 signature; // 00h: Signature, the string "NPDE" + NvU16 nvPciDataExtRev; // 04h: NVIDIA PCI Data Extension Revision + NvU16 nvPciDataExtLen; // 06h: NVIDIA PCI Data Extension Length + NvU16 subimageLen; // 08h: Sub-image Length + NvU8 privLastImage; // 0Ah: Private Last Image Indicator + NvU8 flags; // 0Bh: Private images enabled if bit0=1 +} NV_PCI_DATA_EXT_STRUCT, *PNV_PCI_DATA_EXT_STRUCT; +#pragma pack() + +#endif // PCIEXPTBL_H + + diff --git a/src/nvidia/inc/kernel/platform/platform.h b/src/nvidia/inc/kernel/platform/platform.h new file mode 100644 index 000000000..29a27e1d2 --- /dev/null +++ b/src/nvidia/inc/kernel/platform/platform.h @@ -0,0 +1,3 @@ + +#include "g_platform_nvoc.h" + diff --git a/src/nvidia/inc/kernel/platform/sli/sli.h b/src/nvidia/inc/kernel/platform/sli/sli.h new file mode 100644 index 000000000..969069ba9 --- /dev/null +++ b/src/nvidia/inc/kernel/platform/sli/sli.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RMSLI_H +#define RMSLI_H + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Private SLI related defines and structures. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "nvlimits.h" +#include "gpu_mgr/gpu_mgr.h" + +#define IsDeviceDestroyed(p) (gpuGetDeviceInstance(p) == NV_MAX_DEVICES) + +// Unlinked SLI is implemented in RM clients +#define IsUnlinkedSLIEnabled(p) ((p)->getProperty((p), PDB_PROP_GPU_RM_UNLINKED_SLI)) + +#define IsSLIEnabled(p) 0 +#define NumSubDevices(p) 0 + +#define SLI_LOOP_START(sliLoopFlags) { NvU32 loopIndex = 0; do { +#define SLI_LOOP_END } while (loopIndex); } +#define SLI_LOOP_BREAK break +#define SLI_LOOP_CONTINUE continue +#define SLI_LOOP_GOTO(loc) { goto loc; } +#define SLI_LOOP_RETURN(SLi_ret) { return(SLi_ret); } +#define SLI_LOOP_RETURN_VOID { return; } + +// macro to use when declaring array vars that'll be used w/i SLI_LOOPs +#define SLI_LOOP_ARRAY_SIZE (NV_MAX_SUBDEVICES+1) + +// macro to veirfy that arrays are properly sized +#define VERIFY_SLI_LOOP_ARRAY_SIZE(arr) \ +do { \ + if (sizeof(arr) > sizeof(void *)) \ + { \ + NV_ASSERT(SLI_LOOP_ARRAY_SIZE == (sizeof(arr) / sizeof(arr[0]))); \ + } \ +} while (0) + +#endif // RMSLI_H diff --git a/src/nvidia/inc/kernel/power/gpu_boost_mgr.h b/src/nvidia/inc/kernel/power/gpu_boost_mgr.h new file mode 100644 index 000000000..d592ad80e --- /dev/null +++ b/src/nvidia/inc/kernel/power/gpu_boost_mgr.h @@ -0,0 +1,3 @@ + +#include "g_gpu_boost_mgr_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/alloc_size.h b/src/nvidia/inc/kernel/rmapi/alloc_size.h new file mode 100644 index 000000000..55734f2d6 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/alloc_size.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _ALLOC_SIZE_H_ +#define _ALLOC_SIZE_H_ + +#include "nvstatus.h" + +/* + * rmapiGetClassAllocParamSize() + * + * Returns class size in number of bytes. Returns zero + * if the specified class has no optional allocation parameters + * + */ +NV_STATUS rmapiGetClassAllocParamSize(NvU32 *pAllocParamSizeBytes, NvP64 pUserParams, NvBool *pBAllowNull, NvU32 hClass); + +#endif // _ALLOC_SIZE_H_ + diff --git a/src/nvidia/inc/kernel/rmapi/binary_api.h b/src/nvidia/inc/kernel/rmapi/binary_api.h new file mode 100644 index 000000000..9b461d57b --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/binary_api.h @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_binary_api_nvoc.h" + +#ifndef BINARY_API_H +#define BINARY_API_H + +#include "core/core.h" +#include "rmapi/resource.h" +#include "gpu/gpu_resource.h" +#include "resserv/rs_resource.h" +#include "rmapi/control.h" + +NVOC_PREFIX(binapi) class BinaryApi : GpuResource +{ +public: + NV_STATUS binapiConstruct(BinaryApi *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams) : + GpuResource(pCallContext, pParams); + + virtual NV_STATUS binapiControl(BinaryApi *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +}; + +NVOC_PREFIX(binapipriv) class BinaryApiPrivileged : BinaryApi +{ +public: + NV_STATUS binapiprivConstruct(BinaryApiPrivileged *pResource, CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams) : + BinaryApi(pCallContext, pParams); + + virtual NV_STATUS binapiprivControl(BinaryApiPrivileged *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + +}; + +#endif + diff --git a/src/nvidia/inc/kernel/rmapi/client.h b/src/nvidia/inc/kernel/rmapi/client.h new file mode 100644 index 000000000..bf1a4341a --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/client.h @@ -0,0 +1,3 @@ + +#include "g_client_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/client_resource.h b/src/nvidia/inc/kernel/rmapi/client_resource.h new file mode 100644 index 000000000..405ee3ab7 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/client_resource.h @@ -0,0 +1,3 @@ + +#include "g_client_resource_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/control.h b/src/nvidia/inc/kernel/rmapi/control.h new file mode 100644 index 000000000..eb6aa382b --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/control.h @@ -0,0 +1,159 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _CONTROL_H_ +#define _CONTROL_H_ + +#include "core/core.h" + +#include "resserv/rs_resource.h" +#include "resserv/resserv.h" + +#include "utils/nvmacro.h" +#include "rmapi/param_copy.h" + +struct NVOC_EXPORTED_METHOD_DEF; +typedef RS_RES_CONTROL_PARAMS_INTERNAL RmCtrlParams; + +// +// RmCtrlExecuteCookie +// +// This typedef describes the data used by the rmctrl cmd execution +// path. The data is filled at the beginning of rmControlCmdExecute() +// and used as necessary in the other stages. +// +struct RS_CONTROL_COOKIE +{ + // Rmctrl Command ID + NvU32 cmd; + + // Rmctrl Flags + NvU32 ctrlFlags; + + // Required Access Rights for this command + const RS_ACCESS_MASK rightsRequired; + + NvBool bFreeParamCopy; ///< Indicates that param copies should be cleaned up + NvBool bFreeEmbeddedCopy; ///< Indicates embedded param copies should be cleaned up + + RMAPI_PARAM_COPY paramCopy; + RMAPI_PARAM_COPY embeddedParamCopies[4]; // Up to 4 embedded pointers per one RmControl identified +}; +typedef RS_CONTROL_COOKIE RmCtrlExecuteCookie; + +// values for RmCtrlDeferredCmd.pending +#define RMCTRL_DEFERRED_FREE 0 // buffer is free +#define RMCTRL_DEFERRED_ACQUIRED 1 // buffer is acquired to fill in data +#define RMCTRL_DEFERRED_READY 2 // buffer is acquired and data has been copied. + +#define RMCTRL_DEFERRED_MAX_PARAM_SIZE 128 // 128 bytes internal buffer for rmctrl param + +typedef struct +{ + NvS32 volatile pending; + NvU32 cpuInst; + RmCtrlParams rmCtrlDeferredParams; + NvU8 paramBuffer[RMCTRL_DEFERRED_MAX_PARAM_SIZE]; // buffer to hold rmCtrlDeferredParams.pParams +} RmCtrlDeferredCmd; + +// catch commands misdirected to non-existent engines +#define VERIFY_OBJ_PTR(p) if (p == NULL) return NV_ERR_INVALID_ARGUMENT + +// macros to get/set/clear cap bits +#define RMCTRL_GET_CAP(tbl,cap,field) (((NvU8)tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)) +#define RMCTRL_SET_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) |= (0?cap##field)) +#define RMCTRL_CLEAR_CAP(tbl,cap,field) ((tbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) &= ~(0?cap##field)) + +// macros to AND/OR caps between two tables +#define RMCTRL_AND_CAP(finaltbl,tmptbl,tmp,cap,field) \ + tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] & tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp; + +#define RMCTRL_OR_CAP(finaltbl,tmptbl,tmp,cap,field) \ + tmp = ((finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] | tmptbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)]) & (0?cap##field)); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] &= ~(0?cap##field); \ + finaltbl[((1?cap##field)>=cap##_TBL_SIZE) ? 0/0 : (1?cap##field)] |= tmp; + +// Whether the command ID is a NULL command? +// We allow NVXXXX_CTRL_CMD_NULL (0x00000000) as well as the +// per-class NULL cmd ( _CATEGORY==0x00 and _INDEX==0x00 ) +#define RMCTRL_IS_NULL_CMD(cmd) ((cmd == NVXXXX_CTRL_CMD_NULL) || \ + (FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _CATEGORY, 0x00, cmd) && \ + FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _INDEX, 0x00, cmd))) + +// top-level internal RM Control interface +NV_STATUS rmControl_Deferred(RmCtrlDeferredCmd *pRmCtrlDeferredCmd); + +// Helper functions for handling embedded parameter copies +NV_STATUS embeddedParamCopyIn(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmCtrlParams); +NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *pParamCopy, RmCtrlParams *pRmCtrlParams); + +#define RM_CLIENT_PTR_ACCESS_CHECK_READ NVBIT(0) +#define RM_CLIENT_PTR_ACCESS_CHECK_WRITE NVBIT(1) + +// +// For NVOC Exported functions +// +// RMCTRL_FLAGS(A, B, C) is expanded to +// 0 | RMCTRL_FLAGS_A | RMCTRL_FLAGS_B | RMCTRL_FLAGS_C +// +// ACCESS_RIGHTS(A, B, C) is expanded to +// 0 | NVBIT(RS_ACCESS_A) | NVBIT(RS_ACCESS_B) | NVBIT(RS_ACCESS_C) +// +#define RMCTRL_EXPORT(cmdId, ...) [[nvoc::export(cmdId, __VA_ARGS__)]] +#define _RMCTRL_PREP_FLAG_ARG(x) | NV_CONCATENATE(RMCTRL_FLAGS_, x) +#define RMCTRL_FLAGS(...) (0 NV_FOREACH_ARG_NOCOMMA(_RMCTRL_PREP_FLAG_ARG, __VA_ARGS__)) +#define _RMCTRL_PREP_ACCESS_ARG(x) | NVBIT(NV_CONCATENATE(RS_ACCESS_, x)) +#define ACCESS_RIGHTS(...) (0 NV_FOREACH_ARG_NOCOMMA(_RMCTRL_PREP_ACCESS_ARG, __VA_ARGS__)) + +#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(ctrlFlags) \ + ( \ + (ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL) && \ + !RMCFG_FEATURE_PHYSICAL_RM \ + ) + +// per-rmcontrol flags values +#define RMCTRL_FLAGS_NONE 0x000000000 +#define RMCTRL_FLAGS_NO_STATIC 0x000000000 // internal to chip-config. TODO -- delete +#define RMCTRL_FLAGS_ONLY_IF_CMD_DEFINED 0x000000000 // internal to chip-config. TODO -- delete +#define RMCTRL_FLAGS_KERNEL_PRIVILEGED 0x000000000 +#define RMCTRL_FLAGS_NO_GPUS_LOCK 0x000000001 +#define RMCTRL_FLAGS_NO_GPUS_ACCESS 0x000000002 +#define RMCTRL_FLAGS_PRIVILEGED 0x000000004 +#define RMCTRL_FLAGS_HACK_USED_ON_MULTIPLE_CLASSES 0x000000008 +#define RMCTRL_FLAGS_NON_PRIVILEGED 0x000000010 +#define RMCTRL_FLAGS_BIG_PAYLOAD 0x000000020 +#define RMCTRL_FLAGS_GPU_LOCK_DEVICE_ONLY 0x000000040 +#define RMCTRL_FLAGS_PRIVILEGED_IF_RS_ACCESS_DISABLED 0x000000100 // for Resserv Access Rights migration +#define RMCTRL_FLAGS_ROUTE_TO_PHYSICAL 0x000000200 +#define RMCTRL_FLAGS_INTERNAL 0x000000400 +#define RMCTRL_FLAGS_API_LOCK_READONLY 0x000000800 +#define RMCTRL_FLAGS_GPU_LOCK_READONLY 0x000001000 +#define RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST 0x000002000 +#define RMCTRL_FLAGS_CACHEABLE 0x000004000 +#define RMCTRL_FLAGS_COPYOUT_ON_ERROR 0x000008000 +#define RMCTRL_FLAGS_ALLOW_WITHOUT_SYSMEM_ACCESS 0x000010000 + +#endif // _CONTROL_H_ + + diff --git a/src/nvidia/inc/kernel/rmapi/event.h b/src/nvidia/inc/kernel/rmapi/event.h new file mode 100644 index 000000000..3282443d4 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/event.h @@ -0,0 +1,3 @@ + +#include "g_event_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/event_buffer.h b/src/nvidia/inc/kernel/rmapi/event_buffer.h new file mode 100644 index 000000000..2867ac338 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/event_buffer.h @@ -0,0 +1,3 @@ + +#include "g_event_buffer_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/exports.h b/src/nvidia/inc/kernel/rmapi/exports.h new file mode 100644 index 000000000..d1a96247d --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/exports.h @@ -0,0 +1,130 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _EXPORTS_H +#define _EXPORTS_H + +#include "core/core.h" + +// +// !! Deprecated. Do not use these exported API functions. Instead use the +// User or Kernel ones below depending on if they are called from Kernel or +// User space. +// +// A User export is to be used for code paths originating from user space and +// MUST pass only user client handles and user-mode pointers. On most OSes, RM +// will sanity check the use of handles and pointers against incorrect or +// malicious use. +// +// A Kernel export is to be used for code paths originating from kernel space +// and MUST pass only kernel client handles and kernel-mode pointers. By default +// RM will skip any validation checks when a Kernel export is called. The onus +// is on the caller that only valid handles and pointers are passed. +// TBD. RM may enable the checks on debug builds or when a regkey is set. +// +// For more information refer to the Kernel_Client_Data_Validation wiki page +// +// WARNING!! RM has validation checks for handles and pointers. An incorrect use +// of export can cause RM failing the API. +// +void Nv01AllocMemory (NVOS02_PARAMETERS*); +void Nv01AllocObject (NVOS05_PARAMETERS*); +void Nv04Alloc (NVOS21_PARAMETERS*); +void Nv04AllocWithAccess (NVOS64_PARAMETERS*); +void Nv01Free (NVOS00_PARAMETERS*); +void Nv04Control (NVOS54_PARAMETERS*); +void Nv04VidHeapControl (NVOS32_PARAMETERS*); +void Nv04IdleChannels (NVOS30_PARAMETERS*); +void Nv04MapMemory (NVOS33_PARAMETERS*); +void Nv04UnmapMemory (NVOS34_PARAMETERS*); +void Nv04UpdateContextDma (NVOS37_PARAMETERS*); +void Nv04I2CAccess (NVOS_I2C_ACCESS_PARAMS*); +void Nv04AllocContextDma (NVOS39_PARAMETERS*); +void Nv04BindContextDma (NVOS49_PARAMETERS*); +void Nv04MapMemoryDma (NVOS46_PARAMETERS*); +void Nv04UnmapMemoryDma (NVOS47_PARAMETERS*); +void Nv04DupObject (NVOS55_PARAMETERS*); +void Nv04Share (NVOS57_PARAMETERS*); +void Nv04AddVblankCallback (NVOS61_PARAMETERS*); + +// exported "User" API functions +void Nv01AllocMemoryUser (NVOS02_PARAMETERS*); +void Nv01AllocObjectUser (NVOS05_PARAMETERS*); +void Nv04AllocUser (NVOS21_PARAMETERS*); +void Nv04AllocWithAccessUser (NVOS64_PARAMETERS*); +void Nv01FreeUser (NVOS00_PARAMETERS*); +void Nv04ControlUser (NVOS54_PARAMETERS*); +void Nv04VidHeapControlUser (NVOS32_PARAMETERS*); +void Nv04IdleChannelsUser (NVOS30_PARAMETERS*); +void Nv04MapMemoryUser (NVOS33_PARAMETERS*); +void Nv04UnmapMemoryUser (NVOS34_PARAMETERS*); +void Nv04UpdateContextDmaUser (NVOS37_PARAMETERS*); +void Nv04I2CAccessUser (NVOS_I2C_ACCESS_PARAMS*); +void Nv04AllocContextDmaUser (NVOS39_PARAMETERS*); +void Nv04BindContextDmaUser (NVOS49_PARAMETERS*); +void Nv04MapMemoryDmaUser (NVOS46_PARAMETERS*); +void Nv04UnmapMemoryDmaUser (NVOS47_PARAMETERS*); +void Nv04DupObjectUser (NVOS55_PARAMETERS*); +void Nv04ShareUser (NVOS57_PARAMETERS*); +void Nv04AddVblankCallbackUser (NVOS61_PARAMETERS*); + +// exported "Kernel" API functions +void Nv01AllocMemoryKernel (NVOS02_PARAMETERS*); +void Nv01AllocObjectKernel (NVOS05_PARAMETERS*); +void Nv04AllocKernel (NVOS21_PARAMETERS*); +void Nv04AllocWithAccessKernel (NVOS64_PARAMETERS*); +void Nv01FreeKernel (NVOS00_PARAMETERS*); +void Nv04ControlKernel (NVOS54_PARAMETERS*); +void Nv04VidHeapControlKernel (NVOS32_PARAMETERS*); +void Nv04IdleChannelsKernel (NVOS30_PARAMETERS*); +void Nv04MapMemoryKernel (NVOS33_PARAMETERS*); +void Nv04UnmapMemoryKernel (NVOS34_PARAMETERS*); +void Nv04UpdateContextDmaKernel (NVOS37_PARAMETERS*); +void Nv04I2CAccessKernel (NVOS_I2C_ACCESS_PARAMS*); +void Nv04AllocContextDmaKernel (NVOS39_PARAMETERS*); +void Nv04BindContextDmaKernel (NVOS49_PARAMETERS*); +void Nv04MapMemoryDmaKernel (NVOS46_PARAMETERS*); +void Nv04UnmapMemoryDmaKernel (NVOS47_PARAMETERS*); +void Nv04DupObjectKernel (NVOS55_PARAMETERS*); +void Nv04ShareKernel (NVOS57_PARAMETERS*); +void Nv04AddVblankCallbackKernel (NVOS61_PARAMETERS*); + +// exported "WithSecInfo" API functions +void Nv01AllocMemoryWithSecInfo (NVOS02_PARAMETERS*, API_SECURITY_INFO); +void Nv01AllocObjectWithSecInfo (NVOS05_PARAMETERS*, API_SECURITY_INFO); +void Nv04AllocWithSecInfo (NVOS21_PARAMETERS*, API_SECURITY_INFO); +void Nv04AllocWithAccessSecInfo (NVOS64_PARAMETERS*, API_SECURITY_INFO); +void Nv01FreeWithSecInfo (NVOS00_PARAMETERS*, API_SECURITY_INFO); +void Nv04ControlWithSecInfo (NVOS54_PARAMETERS*, API_SECURITY_INFO); +void Nv04VidHeapControlWithSecInfo (NVOS32_PARAMETERS*, API_SECURITY_INFO); +void Nv04IdleChannelsWithSecInfo (NVOS30_PARAMETERS*, API_SECURITY_INFO); +void Nv04MapMemoryWithSecInfo (NVOS33_PARAMETERS*, API_SECURITY_INFO); +void Nv04UnmapMemoryWithSecInfo (NVOS34_PARAMETERS*, API_SECURITY_INFO); +void Nv04I2CAccessWithSecInfo (NVOS_I2C_ACCESS_PARAMS*, API_SECURITY_INFO); +void Nv04AllocContextDmaWithSecInfo (NVOS39_PARAMETERS*, API_SECURITY_INFO); +void Nv04BindContextDmaWithSecInfo (NVOS49_PARAMETERS*, API_SECURITY_INFO); +void Nv04MapMemoryDmaWithSecInfo (NVOS46_PARAMETERS*, API_SECURITY_INFO); +void Nv04UnmapMemoryDmaWithSecInfo (NVOS47_PARAMETERS*, API_SECURITY_INFO); +void Nv04DupObjectWithSecInfo (NVOS55_PARAMETERS*, API_SECURITY_INFO); +void Nv04ShareWithSecInfo (NVOS57_PARAMETERS*, API_SECURITY_INFO); + +#endif // _EXPORTS_H diff --git a/src/nvidia/inc/kernel/rmapi/mapping_list.h b/src/nvidia/inc/kernel/rmapi/mapping_list.h new file mode 100644 index 000000000..55bc5ec73 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/mapping_list.h @@ -0,0 +1,175 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _MAPPING_LIST_H_ +#define _MAPPING_LIST_H_ + +#include +#include "containers/btree.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "rmapi/resource.h" + +struct P2PApi; +typedef struct VirtualMemory VirtualMemory; +typedef struct Memory Memory; + +// **************************************************************************** +// Type definitions +// **************************************************************************** + +// dma information definitions +typedef struct _def_client_dma_mapping_info CLI_DMA_MAPPING_INFO, *PCLI_DMA_MAPPING_INFO; +typedef struct _def_client_dma_mapping_info_iterator CLI_DMA_MAPPING_INFO_ITERATOR, *PCLI_DMA_MAPPING_INFO_ITERATOR; + +// mapping information definitions +typedef struct _def_client_dma_alloc_map_info CLI_DMA_ALLOC_MAP_INFO; + +// +// DMA memory mapping XXX keep around since needed by mapping.c +// We need to figure out what to do with this +// RS-TODO gradually remove this with inter-mapping cleanup +// +struct _def_client_dma_mapping_info +{ + NvHandle hDevice; + NvU64 DmaOffset; + void* KernelVAddr[NV_MAX_SUBDEVICES]; // Kernel's virtual address, if required + void* KernelPriv; // Token required to unmap the kernel mapping + NvU64 FbAperture[NV_MAX_SUBDEVICES]; // GPU aperture addresses, if required + NvU64 FbApertureLen[NV_MAX_SUBDEVICES]; // GPU aperture mapped lengths + MEMORY_DESCRIPTOR *pMemDesc; // Subregion to be mapped + NvU32 Flags; + struct P2PApi *pP2PInfo; + NvU32 gpuMask; + ADDRESS_TRANSLATION addressTranslation; + MEMORY_DESCRIPTOR *pBar1P2PVirtMemDesc; // The peer GPU mapped BAR1 region + MEMORY_DESCRIPTOR *pBar1P2PPhysMemDesc; // The peer GPU vidmem sub region + PCLI_DMA_MAPPING_INFO Next; + PCLI_DMA_MAPPING_INFO Prev; +}; + +// +// iterator object to enum CLI_DMA_MAPPING_INFO from 'pDmaMappingList' +// +struct _def_client_dma_mapping_info_iterator +{ + PNODE pDmaMappingList; // list of hDevices + PNODE pCurrentList; // current hDevice list entry, is list of pDmaMappings + PNODE pNextDmaMapping; // next pDmaMapping while iterating over the DmaOffsets +}; + +// +// DMA allocMapping +// +struct _def_client_dma_alloc_map_info +{ + CLI_DMA_MAPPING_INFO *pDmaMappingInfo; + struct VirtualMemory *pVirtualMemory; + struct Memory *pMemory; +}; + +// **************************************************************************** +// Function definitions +// **************************************************************************** + +// Client Memory Mappings +// +// CliUpdateMemoryMappingInfo - Fill in RsCpuMapping fields for system memory mappings +// +static inline NV_STATUS +CliUpdateMemoryMappingInfo +( + RsCpuMapping *pCpuMapping, + NvBool bKernel, + NvP64 cpuAddress, + NvP64 priv, + NvU64 cpuMapLength, + NvU32 flags +) +{ + if (pCpuMapping == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pCpuMapping->pPrivate->bKernel = bKernel; + pCpuMapping->length = cpuMapLength; + pCpuMapping->flags = flags; + pCpuMapping->processId = osGetCurrentProcess(); + pCpuMapping->pLinearAddress = cpuAddress; + pCpuMapping->pPrivate->pPriv = priv; + pCpuMapping->pPrivate->gpuAddress = -1; + pCpuMapping->pPrivate->gpuMapLength = -1; + + return NV_OK; +} + +// **************************************************************************** +// Device Memory Mappings +// **************************************************************************** + +// +// CliUpdateDeviceMemoryMapping - Fill in RsCpuMapping fields for device memory mappings +// +static inline NV_STATUS +CliUpdateDeviceMemoryMapping +( + RsCpuMapping *pCpuMapping, + NvBool bKernel, + NvP64 priv, + NvP64 cpuAddress, + NvU64 cpuMapLength, + NvU64 gpuAddress, + NvU64 gpuMapLength, + NvU32 flags +) +{ + if (pCpuMapping == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pCpuMapping->pPrivate->bKernel = bKernel; + pCpuMapping->length = cpuMapLength; + pCpuMapping->flags = flags; + pCpuMapping->processId = osGetCurrentProcess(); + pCpuMapping->pLinearAddress = cpuAddress; + pCpuMapping->pPrivate->pPriv = priv; + pCpuMapping->pPrivate->gpuAddress = gpuAddress; + pCpuMapping->pPrivate->gpuMapLength = gpuMapLength; + + return NV_OK; +} + +RsCpuMapping *CliFindMappingInClient (NvHandle, NvHandle, NvP64); + +// DMA Mappings +NV_STATUS intermapCreateDmaMapping (RsClient *, RsResourceRef *, NvHandle, NvHandle, PCLI_DMA_MAPPING_INFO *, NvU32); +NV_STATUS intermapRegisterDmaMapping (RsClient *, NvHandle, NvHandle, PCLI_DMA_MAPPING_INFO, NvU64, NvU32); +NV_STATUS intermapDelDmaMapping (RsClient *, NvHandle, NvHandle, NvU64, NvU32, NvBool*); +void intermapFreeDmaMapping (PCLI_DMA_MAPPING_INFO); + +NvBool CliGetDmaMappingInfo (NvHandle, NvHandle, NvHandle, NvU64, NvU32, PCLI_DMA_MAPPING_INFO*); +void CliGetDmaMappingIterator (PCLI_DMA_MAPPING_INFO *, PCLI_DMA_MAPPING_INFO_ITERATOR, PNODE pDmaMappingList); +void CliGetDmaMappingNext (PCLI_DMA_MAPPING_INFO *, PCLI_DMA_MAPPING_INFO_ITERATOR); + +// Unmap all DMA mappings between a memory resource and any DynamicMemory +NV_STATUS intermapUnmapDeviceMemoryDma (RsClient *, RsResourceRef *, NvHandle); + +#endif diff --git a/src/nvidia/inc/kernel/rmapi/nv_gpu_ops.h b/src/nvidia/inc/kernel/rmapi/nv_gpu_ops.h new file mode 100644 index 000000000..2116d66fa --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/nv_gpu_ops.h @@ -0,0 +1,272 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* + * nv_gpu_ops.h + * + * This file defines the interface between the common RM layer + * and the OS specific platform layers. (Currently supported + * are Linux and KMD) + * + */ + +#ifndef _NV_GPU_OPS_H_ +#define _NV_GPU_OPS_H_ +#include "nvgputypes.h" +#include "nv_uvm_types.h" + +typedef struct gpuSession *gpuSessionHandle; +typedef struct gpuDevice *gpuDeviceHandle; +typedef struct gpuAddressSpace *gpuAddressSpaceHandle; +typedef struct gpuChannel *gpuChannelHandle; +typedef struct gpuObject *gpuObjectHandle; + +typedef struct gpuRetainedChannel_struct gpuRetainedChannel; + +NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session); + +NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session); + +NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session, + const gpuInfo *pGpuInfo, + const NvProcessorUuid *gpuGuid, + struct gpuDevice **device, + NvBool bCreateSmcPartition); + +NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device); + +NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device, + NvU64 vaBase, + NvU64 vaSize, + gpuAddressSpaceHandle *vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +NV_STATUS nvGpuOpsGetP2PCaps(gpuDeviceHandle device1, + gpuDeviceHandle device2, + getP2PCapsParams *p2pCaps); + +void nvGpuOpsAddressSpaceDestroy(gpuAddressSpaceHandle vaSpace); + +NV_STATUS nvGpuOpsMemoryAllocFb (gpuAddressSpaceHandle vaSpace, + NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo); + +NV_STATUS nvGpuOpsMemoryAllocSys (gpuAddressSpaceHandle vaSpace, + NvLength length, NvU64 *gpuOffset, gpuAllocInfo * allocInfo); + +NV_STATUS nvGpuOpsPmaAllocPages(void *pPma, + NvLength pageCount, + NvU32 pageSize, + gpuPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages); + +void nvGpuOpsPmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags); + +NV_STATUS nvGpuOpsPmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags); + +NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize); + +NV_STATUS nvGpuOpsChannelAllocate(gpuAddressSpaceHandle vaSpace, + const gpuChannelAllocParams *params, + gpuChannelHandle *channelHandle, + gpuChannelInfo *channelInfo); + +NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace, + NvHandle hSrcClient, NvHandle hSrcAllocation, NvLength length, NvU64 *gpuOffset); + +void nvGpuOpsChannelDestroy(struct gpuChannel *channel); + +void nvGpuOpsMemoryFree(gpuAddressSpaceHandle vaSpace, + NvU64 pointer); + +NV_STATUS nvGpuOpsMemoryCpuMap(gpuAddressSpaceHandle vaSpace, + NvU64 memory, NvLength length, + void **cpuPtr, NvU32 pageSize); + +void nvGpuOpsMemoryCpuUnMap(gpuAddressSpaceHandle vaSpace, + void* cpuPtr); + +NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device, + gpuCaps *caps); + +NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device, + gpuCesCaps *caps); + +NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuAddressSpace *dstVaSpace, + NvU64 *dstAddress); + +NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + gpuMemoryInfo *pGpuMemoryInfo); + +NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice, + NvHandle hSubDevice, NvU8 *gpuGuid, + unsigned guidLength); + +NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid, + const NvU8 *gpuUuid, + NvHandle *hClient, + NvHandle *hDevice, + NvHandle *hSubDevice); + +NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device, + NvHandle hPhysHandle); + +NV_STATUS nvGpuOpsGetAttachedGpus(NvU8 *guidList, unsigned *numGpus); + +NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *gpuUuid, + const gpuClientInfo *pGpuClientInfo, + gpuInfo *pGpuInfo); + +NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, unsigned uuidLength, NvU32 *pDeviceId, + NvU32 *pSubdeviceId); + +NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, NvBool bOwnInterrupts); + +NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device); + +NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel * channel, NvBool *bEccDbeSet); + +NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace * vaSpace, + NvU64 physAddress, unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid); + +NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace * vaSpace); + +NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace * vaSpace, void ** pFmt); + +NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace * vaSpace); + +NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo * fbInfo); + +NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo * eccInfo); + +NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, gpuFaultInfo *pFaultInfo); + +NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device, + gpuFaultInfo *pFaultInfo); + +NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, NvBool *hasPendingFaults); + +NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, void *faultBuffer, NvU32 *numFaults); + +NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device, + NvHandle hUserClient, + NvHandle hUserVASpace, + struct gpuAddressSpace **vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo); + +NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device, + void **pPma, + const UvmPmaStatistics **pPmaPubStats); + +NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsOwnAccessCntrIntr(struct gpuSession *session, + gpuAccessCntrInfo *pAccessCntrInfo, + NvBool bOwnInterrupts); + +NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo, + gpuAccessCntrConfig *pAccessCntrConfig); + +NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, gpuAccessCntrInfo *pAccessCntrInfo); + +NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1, + struct gpuDevice *device2, + NvHandle *hP2pObject); + +NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session, + NvHandle hP2pObject); + +NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace, + NvHandle hDupedMemory, + NvU64 offset, + NvU64 size, + gpuExternalMappingInfo *pGpuExternalMappingInfo); + +NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace, + NvHandle hClient, + NvHandle hChannel, + gpuRetainedChannel **retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo); + +void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel); + +NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel, + gpuChannelResourceBindParams *channelResourceBindParams); + +void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, NvBool bImmediate); + +NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + gpuExternalMappingInfo *pGpuExternalMappingInfo); + +NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device, + const void *pFaultPacket); + +// Private interface used for windows only + +// Interface used for SR-IOV heavy + +NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device, + const gpuPagingChannelAllocParams *params, + gpuPagingChannelHandle *channelHandle, + gpuPagingChannelInfo *channelinfo); + +void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel); + +NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuDevice *device, + NvU64 *dstAddress); + +void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuDevice *device); + +NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel, + char *methodStream, + NvU32 methodStreamSize); + +#endif /* _NV_GPU_OPS_H_*/ diff --git a/src/nvidia/inc/kernel/rmapi/param_copy.h b/src/nvidia/inc/kernel/rmapi/param_copy.h new file mode 100644 index 000000000..06c9aaa0c --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/param_copy.h @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _PARAM_COPY_H_ +#define _PARAM_COPY_H_ + +// +// RMAPI_PARAM_COPY - a mechanism for getting user params in and out of resman. +// +// The struct RMAPI_PARAM_COPY keeps track of current API params for eventual +// copyout and free as needed. +// + +#include + +struct API_STATE +{ + NvP64 pUserParams; // ptr to params in client's addr space + void **ppKernelParams; // ptr to current 'pKernelParams' + NvU32 paramsSize; // # bytes + NvU32 flags; + NvBool bSizeValid; + const char *msgTag; +}; +typedef struct API_STATE RMAPI_PARAM_COPY; + +#define RMAPI_PARAM_COPY_FLAGS_NONE 0x00000000 +#define RMAPI_PARAM_COPY_FLAGS_IS_DIRECT_USAGE NVBIT(0) +#define RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN NVBIT(1) +#define RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT NVBIT(2) +#define RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER NVBIT(3) +// +// Only set this if the paramsSize member of RMAPI_PARAM_COPY has been validated for +// correctness before calling apiParamAccess. There is a default cap on the +// largest size allowed in order to avoid huge memory allocations triggering +// out of memory scenarios if the user passes in a bogus size. +// +#define RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK NVBIT(4) +// +// 1MB is the largest size allowed for an embedded pointer accessed through +// apiParamAccess unless RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK is specified +// and the size is validated before calling apiParamsAcquire. +// +#define RMAPI_PARAM_COPY_MAX_PARAMS_SIZE (1*1024*1024) + +#if NV_PRINTF_STRINGS_ALLOWED +#define RMAPI_PARAM_COPY_MSG_TAG(x) x +#define RMAPI_PARAM_COPY_SET_MSG_TAG(paramCopy, theMsgTag) (paramCopy).msgTag = theMsgTag +#else +#define RMAPI_PARAM_COPY_MSG_TAG(x) ((const char *) 0) +#define RMAPI_PARAM_COPY_SET_MSG_TAG(paramCopy, theMsgTag) (paramCopy).msgTag = ((const char *) 0) +#endif + +// +// Initializes the RMAPI_PARAM_COPY structure. Sets bValid to false if calculating size +// caused an overflow. This makes the rmapiParamsAcquire() call fail with +// NV_ERR_INVALID_ARGUMENT. Since rmapiParamsAcquire() always directly follows +// this initialization, there is no need to make it return a status and +// duplicate error checking. +// +#define RMAPI_PARAM_COPY_INIT(paramCopy, pKernelParams, theUserParams, numElems, sizeOfElem) \ + do { \ + RMAPI_PARAM_COPY_SET_MSG_TAG((paramCopy), __FUNCTION__); \ + (paramCopy).ppKernelParams = (void **) &(pKernelParams); \ + (paramCopy).pUserParams = (theUserParams); \ + (paramCopy).flags = RMAPI_PARAM_COPY_FLAGS_NONE; \ + (paramCopy).bSizeValid = portSafeMulU32((numElems), (sizeOfElem), &(paramCopy).paramsSize); \ + } while(0) + +// Routines for alloc/copyin/copyout/free sequences +NV_STATUS rmapiParamsAcquire(RMAPI_PARAM_COPY *, NvBool); +NV_STATUS rmapiParamsRelease(RMAPI_PARAM_COPY *); + +NV_STATUS rmapiParamsCopyOut(const char *msgTag, void *pKernelParams, NvP64 pUserParams, NvU32 paramsSize, NvBool); +NV_STATUS rmapiParamsCopyIn(const char *msgTag, void *pKernelParams, NvP64 pUserParams, NvU32 paramsSize, NvBool); + +// Init copy_param structure +NV_STATUS rmapiParamsCopyInit(RMAPI_PARAM_COPY *, NvU32 hClass); + +#endif // _PARAM_COPY_H_ diff --git a/src/nvidia/inc/kernel/rmapi/resource.h b/src/nvidia/inc/kernel/rmapi/resource.h new file mode 100644 index 000000000..054e13aa6 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/resource.h @@ -0,0 +1,3 @@ + +#include "g_resource_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h b/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h new file mode 100644 index 000000000..cdbabe75f --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/resource_fwd_decls.h @@ -0,0 +1,3 @@ + +#include "g_resource_fwd_decls_nvoc.h" + diff --git a/src/nvidia/inc/kernel/rmapi/rmapi.h b/src/nvidia/inc/kernel/rmapi/rmapi.h new file mode 100644 index 000000000..fca7b7029 --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/rmapi.h @@ -0,0 +1,405 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RMAPI_H_ +#define _RMAPI_H_ + +#include "core/core.h" +#include "nvsecurityinfo.h" + +// +// Forward declarations +// +typedef struct _RM_API RM_API; +typedef struct RsServer RsServer; +typedef struct OBJGPU OBJGPU; +typedef struct RsResource RsResource; +typedef struct RsCpuMapping RsCpuMapping; +typedef struct CALL_CONTEXT CALL_CONTEXT; +typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS_INTERNAL; +typedef struct RS_LOCK_INFO RS_LOCK_INFO; +typedef NvU32 NV_ADDRESS_SPACE; + +extern RsServer g_resServ; + +/** + * Initialize RMAPI module. + * + * Must be called once and only once before any RMAPI functions can be called + */ +NV_STATUS rmapiInitialize(void); + +/** + * Shutdown RMAPI module + * + * Must be called once and only once when a driver is shutting down and no more + * RMAPI functions will be called. + */ +void rmapiShutdown(void); + +// Flags for rmapiLockAcquire +#define RMAPI_LOCK_FLAGS_NONE (0x00000000) // default no flags +#define RMAPI_LOCK_FLAGS_COND_ACQUIRE NVBIT(0) // conditional acquire; if lock is + // already held then return error +#define RMAPI_LOCK_FLAGS_READ NVBIT(1) // Acquire API lock for READ +#define RMAPI_LOCK_FLAGS_WRITE (0x00000000) // Acquire API lock for WRITE - Default + +/** + * Acquire the RM API Lock + * + * The API lock is a sleeping mutex that is used to serialize access to RM APIs + * by (passive-level) RM clients. + * + * The API lock is not used to protect state accessed by DPC and ISRs. For DPC + * and ISRs that GPU lock is used instead. For state controlled by clients, this + * often requires taking both API and GPU locks in API paths + * + * @param[in] flags RM_LOCK_FLAGS_* + * @param[in] module RM_LOCK_MODULES_* + */ +NV_STATUS rmapiLockAcquire(NvU32 flags, NvU32 module); + +/** + * Release RM API Lock + */ +void rmapiLockRelease(void); + +/** + * Check if current thread owns the API lock + */ +NvBool rmapiLockIsOwner(void); + + +/** + * Type of RM API client interface + */ +typedef enum +{ + RMAPI_EXTERNAL, // For clients external from RM TLS, locks, etc -- no default security attributes + RMAPI_EXTERNAL_KERNEL, // For clients external from TLS and locks but which still need default security attributes + RMAPI_MODS_LOCK_BYPASS, // Hack for MODS - skip RM locks but initialize TLS (bug 1808386) + RMAPI_API_LOCK_INTERNAL, // For clients that already have the TLS & API lock held -- security is RM internal + RMAPI_GPU_LOCK_INTERNAL, // For clients that have TLS, API lock, and GPU lock -- security is RM internal + RMAPI_STUBS, // All functions just return NV_ERR_NOT_SUPPORTED + RMAPI_TYPE_MAX +} RMAPI_TYPE; + +/** + * Query interface that can be used to perform operations through the + * client-level RM API + */ +RM_API *rmapiGetInterface(RMAPI_TYPE rmapiType); + +// Flags for RM_API::Alloc +#define RMAPI_ALLOC_FLAGS_NONE 0 +#define RMAPI_ALLOC_FLAGS_SKIP_RPC NVBIT(0) + +// Flags for RM_API::Free +#define RMAPI_FREE_FLAGS_NONE 0 + +/** + * Interface for performing operations through the RM API exposed to client + * drivers. Interface provides consistent view to the RM API while abstracting + * the individuals callers from specifying security attributes and/or from + * locking needs. For example, this interface can be used either before or after + * the API or GPU locks. + */ +struct _RM_API +{ + // Allocate a resource with default security attributes and local pointers (no NvP64) + NV_STATUS (*Alloc)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams); + + // Allocate a resource with default security attributes and local pointers (no NvP64) + // and client assigned handle + NV_STATUS (*AllocWithHandle)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle hObject, NvU32 hClass, void *pAllocParams); + + // Allocate a resource + NV_STATUS (*AllocWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams, + NvU32 flags, NvP64 pRightsRequested, API_SECURITY_INFO *pSecInfo); + + // Free a resource with default security attributes + NV_STATUS (*Free)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject); + + // Free a resource + NV_STATUS (*FreeWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + NvU32 flags, API_SECURITY_INFO *pSecInfo); + + // Free a list of clients with default security attributes + NV_STATUS (*FreeClientList)(struct _RM_API *pRmApi, NvHandle *phClientList, NvU32 numClients); + + // Free a list of clients + NV_STATUS (*FreeClientListWithSecInfo)(struct _RM_API *pRmApi, NvHandle *phClientList, + NvU32 numClients, API_SECURITY_INFO *pSecInfo); + + // Invoke a control with default security attributes and local pointers (no NvP64) + NV_STATUS (*Control)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + void *pParams, NvU32 paramsSize); + + // Invoke a control + NV_STATUS (*ControlWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + NvP64 pParams, NvU32 paramsSize, NvU32 flags, API_SECURITY_INFO *pSecInfo); + + // Prefetch a control parameters into the control call cache (0000, 0080 and 2080 classes only) + NV_STATUS (*ControlPrefetch)(struct _RM_API *pRmApi, NvU32 cmd); + + // Dup an object with default security attributes + NV_STATUS (*DupObject)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, NvHandle *phObject, + NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags); + + // Dup an object + NV_STATUS (*DupObjectWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags, + API_SECURITY_INFO *pSecInfo); + + // Share an object with default security attributes + NV_STATUS (*Share)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy); + + // Share an object + NV_STATUS (*ShareWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, API_SECURITY_INFO *pSecInfo); + + // Map memory with default security attributes and local pointers (no NvP64). Provides + // RM internal implementation for NvRmMapMemory(). + NV_STATUS (*MapToCpu)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, void **ppCpuVirtAddr, NvU32 flags); + + // Map memory. Provides RM internal implementation for NvRmMapMemory(). + NV_STATUS (*MapToCpuWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags, API_SECURITY_INFO *pSecInfo); + + // Unmap memory with default security attributes and local pointers (no NvP64) + NV_STATUS (*UnmapFromCpu)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, void *pLinearAddress, + NvU32 flags, NvU32 ProcessId); + + // Unmap memory + NV_STATUS (*UnmapFromCpuWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvP64 pLinearAddress, NvU32 flags, NvU32 ProcessId, API_SECURITY_INFO *pSecInfo); + + // Map dma memory with default security attributes. Provides RM internal implementation for NvRmMapMemoryDma(). + NV_STATUS (*Map)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset); + + // Map dma memory. Provides RM internal implementation for NvRmMapMemoryDma(). + NV_STATUS (*MapWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset, API_SECURITY_INFO *pSecInfo); + + // Unmap dma memory with default security attributes + NV_STATUS (*Unmap)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU32 flags, NvU64 dmaOffset); + + // Unmap dma memory + NV_STATUS (*UnmapWithSecInfo)(struct _RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU32 flags, NvU64 dmaOffset, API_SECURITY_INFO *pSecInfo); + + API_SECURITY_INFO defaultSecInfo; + NvBool bHasDefaultSecInfo; + NvBool bTlsInternal; + NvBool bApiLockInternal; + NvBool bRmSemaInternal; + NvBool bGpuLockInternal; + void *pPrivateContext; +}; + +// Called before any RM resource is freed +NV_STATUS rmapiFreeResourcePrologue(RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams); + +// Mark for deletion the client resources given a GPU mask +void rmapiSetDelPendingClientResourcesFromGpuMask(NvU32 gpuMask); + +// Delete the marked client resources +void rmapiDelPendingClients(void); +void rmapiDelPendingDevices(NvU32 gpuMask); +void rmapiReportLeakedDevices(NvU32 gpuMask); + +// +// Given a value, retrieves an array of client handles corresponding to clients +// with matching pOSInfo fields. The array is allocated dynamically, and is +// expected to be freed by the caller. +// +NV_STATUS rmapiGetClientHandlesFromOSInfo(void*, NvHandle**, NvU32*); + +// +// Base mapping routines for use by RsResource subclasses +// +NV_STATUS rmapiMapGpuCommon(RsResource *, CALL_CONTEXT *, RsCpuMapping *, OBJGPU *, NvU32, NvU32); +NV_STATUS rmapiValidateKernelMapping(RS_PRIV_LEVEL privLevel, NvU32 flags, NvBool *pbKernel); +NV_STATUS rmapiGetEffectiveAddrSpace(OBJGPU *pGpu, MEMORY_DESCRIPTOR *pMemDesc, NvU32 flags, NV_ADDRESS_SPACE *pAddrSpace); + +/** + * Deprecated RM API interfaces. Use RM_API instead. + */ +NV_STATUS RmUnmapMemoryDma(NvHandle, NvHandle, NvHandle, NvHandle, MEMORY_DESCRIPTOR*, NvU32, NvU64); +NV_STATUS RmConfigGetEx (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvBool); +NV_STATUS RmConfigSetEx (NvHandle, NvHandle, NvU32, NvP64, NvU32, NvBool); + +/** + * Control cache API. + * Every function except rmapiControlCacheInit and rmapiControlCacheFree is thread safe. + */ +void rmapiControlCacheInit(void); +NvBool rmapiControlIsCacheable(NvU32 flags, NvBool isGSPClient); +void* rmapiControlCacheGet(NvHandle hClient, NvHandle hObject, NvU32 cmd); +NV_STATUS rmapiControlCacheSet(NvHandle hClient, NvHandle hObject, NvU32 cmd, + void* params, NvU32 paramsSize); +void rmapiControlCacheFree(void); +void rmapiControlCacheFreeClient(NvHandle hClient); +void rmapiControlCacheFreeObject(NvHandle hClient, NvHandle hObject); + +typedef struct _RM_API_CONTEXT { + NvU32 gpuMask; +} RM_API_CONTEXT; + +// +// Handler to do stuff that is required before invoking a RM API +// +NV_STATUS +rmapiPrologue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +); + +// +// Handler to do stuff that is required after invoking a RM API +// +void +rmapiEpilogue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +); + +void +rmapiInitLockInfo +( + RM_API *pRmApi, + NvHandle hClient, + RS_LOCK_INFO *pLockInfo +); + +// +// RM locking modules: 24-bit group bitmask, 8-bit subgroup id +// +// Lock acquires are tagged with a RM_LOCK_MODULE_* in order to partition +// the acquires into groups, which allows read-only locks to be +// enabled / disabled on a per-group basis (via apiLockMask and gpuLockMask +// in OBJSYS.) +// +// The groups are further partitioned into subgroups, which +// are used for lock profiling data collection. +// +#define RM_LOCK_MODULE_VAL(grp, subgrp) ((((grp) & 0xffffff) << 8) | ((subgrp) & 0xff)) +#define RM_LOCK_MODULE_GRP(val) (((val) >> 8) & 0xffffff) +// Grp SubGrp +#define RM_LOCK_MODULES_NONE RM_LOCK_MODULE_VAL(0x000000, 0x00) + +#define RM_LOCK_MODULES_WORKITEM RM_LOCK_MODULE_VAL(0x000001, 0x00) + +#define RM_LOCK_MODULES_CLIENT RM_LOCK_MODULE_VAL(0x000002, 0x00) + +#define RM_LOCK_MODULES_GPU_OPS RM_LOCK_MODULE_VAL(0x000004, 0x00) + +#define RM_LOCK_MODULES_OSAPI RM_LOCK_MODULE_VAL(0x000010, 0x00) +#define RM_LOCK_MODULES_STATE_CONFIG RM_LOCK_MODULE_VAL(0x000010, 0x01) +#define RM_LOCK_MODULES_EVENT RM_LOCK_MODULE_VAL(0x000010, 0x02) +#define RM_LOCK_MODULES_VBIOS RM_LOCK_MODULE_VAL(0x000010, 0x03) + +#define RM_LOCK_MODULES_MEM RM_LOCK_MODULE_VAL(0x000020, 0x00) +#define RM_LOCK_MODULES_MEM_FLA RM_LOCK_MODULE_VAL(0x000020, 0x01) +#define RM_LOCK_MODULES_MEM_PMA RM_LOCK_MODULE_VAL(0x000020, 0x02) + +#define RM_LOCK_MODULES_POWER RM_LOCK_MODULE_VAL(0x000040, 0x00) +#define RM_LOCK_MODULES_ACPI RM_LOCK_MODULE_VAL(0x000040, 0x01) +#define RM_LOCK_MODULES_DYN_POWER RM_LOCK_MODULE_VAL(0x000040, 0x02) + +#define RM_LOCK_MODULES_HYPERVISOR RM_LOCK_MODULE_VAL(0x000080, 0x00) +#define RM_LOCK_MODULES_VGPU RM_LOCK_MODULE_VAL(0x000080, 0x01) +#define RM_LOCK_MODULES_RPC RM_LOCK_MODULE_VAL(0x000080, 0x02) + +#define RM_LOCK_MODULES_DIAG RM_LOCK_MODULE_VAL(0x000100, 0x00) +#define RM_LOCK_MODULES_RC RM_LOCK_MODULE_VAL(0x000100, 0x01) + +#define RM_LOCK_MODULES_SLI RM_LOCK_MODULE_VAL(0x000200, 0x00) +#define RM_LOCK_MODULES_P2P RM_LOCK_MODULE_VAL(0x000200, 0x01) +#define RM_LOCK_MODULES_NVLINK RM_LOCK_MODULE_VAL(0x000200, 0x02) + +#define RM_LOCK_MODULES_HOTPLUG RM_LOCK_MODULE_VAL(0x000400, 0x00) +#define RM_LOCK_MODULES_DISP RM_LOCK_MODULE_VAL(0x000400, 0x01) +#define RM_LOCK_MODULES_KERNEL_RM_EVENTS RM_LOCK_MODULE_VAL(0x000400, 0x02) + +#define RM_LOCK_MODULES_GPU RM_LOCK_MODULE_VAL(0x000800, 0x00) +#define RM_LOCK_MODULES_GR RM_LOCK_MODULE_VAL(0x000800, 0x01) +#define RM_LOCK_MODULES_FB RM_LOCK_MODULE_VAL(0x000800, 0x02) +#define RM_LOCK_MODULES_FIFO RM_LOCK_MODULE_VAL(0x000800, 0x03) +#define RM_LOCK_MODULES_TMR RM_LOCK_MODULE_VAL(0x000800, 0x04) + +#define RM_LOCK_MODULES_I2C RM_LOCK_MODULE_VAL(0x001000, 0x00) +#define RM_LOCK_MODULES_GPS RM_LOCK_MODULE_VAL(0x001000, 0x01) +#define RM_LOCK_MODULES_SEC2 RM_LOCK_MODULE_VAL(0x001000, 0x02) +#define RM_LOCK_MODULES_THERM RM_LOCK_MODULE_VAL(0x001000, 0x03) +#define RM_LOCK_MODULES_INFOROM RM_LOCK_MODULE_VAL(0x001000, 0x04) + +#define RM_LOCK_MODULES_ISR RM_LOCK_MODULE_VAL(0x002000, 0x00) +#define RM_LOCK_MODULES_DPC RM_LOCK_MODULE_VAL(0x002000, 0x01) + +#define RM_LOCK_MODULES_INIT RM_LOCK_MODULE_VAL(0x004000, 0x00) +#define RM_LOCK_MODULES_STATE_LOAD RM_LOCK_MODULE_VAL(0x004000, 0x01) + +#define RM_LOCK_MODULES_STATE_UNLOAD RM_LOCK_MODULE_VAL(0x008000, 0x00) +#define RM_LOCK_MODULES_DESTROY RM_LOCK_MODULE_VAL(0x008000, 0x01) + +// +// ResServ lock flag translation +// +#define RM_LOCK_FLAGS_NONE 0 +#define RM_LOCK_FLAGS_NO_API_LOCK RS_LOCK_FLAGS_NO_TOP_LOCK +#define RM_LOCK_FLAGS_NO_CLIENT_LOCK RS_LOCK_FLAGS_NO_CLIENT_LOCK +#define RM_LOCK_FLAGS_NO_GPUS_LOCK RS_LOCK_FLAGS_NO_CUSTOM_LOCK_1 +#define RM_LOCK_FLAGS_GPU_GROUP_LOCK RS_LOCK_FLAGS_NO_CUSTOM_LOCK_2 +#define RM_LOCK_FLAGS_RM_SEMA RS_LOCK_FLAGS_NO_CUSTOM_LOCK_3 + +// +// ResServ lock state translation +// +#define RM_LOCK_STATES_NONE 0 +#define RM_LOCK_STATES_API_LOCK_ACQUIRED RS_LOCK_STATE_TOP_LOCK_ACQUIRED +#define RM_LOCK_STATES_GPUS_LOCK_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED +#define RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_2_ACQUIRED +#define RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK +#define RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED +#define RM_LOCK_STATES_RM_SEMA_ACQUIRED RS_LOCK_STATE_CUSTOM_LOCK_3_ACQUIRED + +// +// ResServ lock release translation +// +#define RM_LOCK_RELEASE_API_LOCK RS_LOCK_RELEASE_TOP_LOCK +#define RM_LOCK_RELEASE_CLIENT_LOCK RS_LOCK_RELEASE_CLIENT_LOCK +#define RM_LOCK_RELEASE_GPUS_LOCK RS_LOCK_RELEASE_CUSTOM_LOCK_1 +#define RM_LOCK_RELEASE_GPU_GROUP_LOCK RS_LOCK_RELEASE_CUSTOM_LOCK_2 +#define RM_LOCK_RELEASE_RM_SEMA RS_LOCK_RELEASE_CUSTOM_LOCK_3 + +#endif // _RMAPI_H_ diff --git a/src/nvidia/inc/kernel/rmapi/rmapi_utils.h b/src/nvidia/inc/kernel/rmapi/rmapi_utils.h new file mode 100644 index 000000000..2bb6df47f --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/rmapi_utils.h @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RMAPI_UTILS_H +#define RMAPI_UTILS_H + +#include "rmapi/rmapi.h" + +// +// Alloc a client, device and subdevice handle for a gpu +// +NV_STATUS +rmapiutilAllocClientAndDeviceHandles +( + RM_API *pRmApi, + OBJGPU *pGpu, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +); + +// +// Free client, device and subdevice handles +// +void +rmapiutilFreeClientAndDeviceHandles +( + RM_API *pRmApi, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +); + +// +// Return NV_TRUE if the given external class ID is an INTERNAL_ONLY class +// +NvBool rmapiutilIsExternalClassIdInternalOnly(NvU32 externalClassId); + +#endif /* RMAPI_UTILS_H */ diff --git a/src/nvidia/inc/kernel/rmapi/rs_utils.h b/src/nvidia/inc/kernel/rmapi/rs_utils.h new file mode 100644 index 000000000..203d11b9e --- /dev/null +++ b/src/nvidia/inc/kernel/rmapi/rs_utils.h @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RS_UTILS_H_ +#define _RS_UTILS_H_ + +/** + * @defgroup RsUtilities + * + * Provides convenience utilities for resserv. Utility functions provide + * abstractions that take handles as inputs -- helpful for legacy code that + * passes hClient or hResource handles and not underlying objects. Desire + * is for pClient and RsResourceRef types to be used for new code instead of + * passing handles around and this utility module phased out. + * + * @{ + */ + +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" + +#include "rmapi/client.h" + +#include "containers/list.h" + +MAKE_LIST(ClientHandlesList, NvHandle); + +#define serverutilGetDerived(pRmClient, hResource, ppBaseRes, type) \ + (clientGetResource(staticCast((pRmClient), RsClient), \ + (hResource), \ + classId(type), \ + (ppBaseRes)) != NV_OK) \ + ? NULL \ + : dynamicCast(*(ppBaseRes), type) + +/** + * Get the reference to a resource + * @param[in] hClient Client handle + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ +NV_STATUS serverutilGetResourceRef(NvHandle hClient, NvHandle hObject, + RsResourceRef **ppResourceRef); + +/** + * Get the reference to a resource (with a type check) + * @param[in] hClient Client handle + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ +NV_STATUS serverutilGetResourceRefWithType(NvHandle hClient, NvHandle hObject, + NvU32 internalClassId, RsResourceRef **ppResourceRef); + +/** + * Get the reference to a resource (with a type and parent check) + * @param[in] hClient Client handle + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ +NV_STATUS serverutilGetResourceRefWithParent(NvHandle hClient, NvHandle hParent, NvHandle hObject, + NvU32 internalClassId, RsResourceRef **ppResourceRef); + +/** + * Find the first child object of given type + */ +RsResourceRef *serverutilFindChildRefByType(NvHandle hClient, NvHandle hParent, NvU32 internalClassId, NvBool bExactMatch); + + +/** + * Get an iterator to the elements in the client's resource map + * + * See clientRefIter for documentation on hScopedObject and iterType + */ +RS_ITERATOR serverutilRefIter(NvHandle hClient, NvHandle hScopedObject, NvU32 internalClassId, RS_ITER_TYPE iterType, NvBool bExactMatch); + +/** + * Get an iterator to the elements in the server's shared object map + */ +RS_SHARE_ITERATOR serverutilShareIter(NvU32 internalClassId); + +/** + * Get an iterator to the elements in the server's shared object map + */ +NvBool serverutilShareIterNext(RS_SHARE_ITERATOR* pIt); + +/** + * Validate that a given resource handle is well-formed and does not already + * exist under a given client. + */ +NvBool serverutilValidateNewResourceHandle(NvHandle, NvHandle); + +/** + * Generate an unused handle for a resource. The handle will be generated in the white-listed range that was + * specified when the client was allocated. + */ +NV_STATUS serverutilGenResourceHandle(NvHandle, NvHandle*); + +/** + * Get a client pointer from a client handle without taking any locks. + * + * @param[in] hClient The client to acquire + * @param[out] ppClient Pointer to the RmClient + */ +NV_STATUS serverutilGetClientUnderLock(NvHandle hClient, RmClient **ppClient); + +/** + * Get a client pointer from a client handle and lock it. + * + * @param[in] hClient The client to acquire + * @param[in] access LOCK_ACCESS_* + * @param[out] ppClient Pointer to the RmClient + */ +NV_STATUS serverutilAcquireClient(NvHandle hClient, LOCK_ACCESS_TYPE access, RmClient **ppClient); + +/** + * Unlock a client + * + * @param[in] access LOCK_ACCESS_* + * @param[in] pClient Pointer to the RmClient + */ +void serverutilReleaseClient(LOCK_ACCESS_TYPE access, RmClient *pClient); + +/** + * Get the first valid client pointer in resource server without taking any locks. + */ +RmClient **serverutilGetFirstClientUnderLock(void); + +/** + * Get the next valid client pointer in resource server without taking any locks. + * + * @param[in] ppClient Pointer returned by a previous call to + * serverutilGetFirstClientUnderLock or + * serverutilGetNextClientUnderLock + */ +RmClient **serverutilGetNextClientUnderLock(RmClient **pClient); + +/*! + * @brief Retrieve all hClients allocated for the given (ProcID, SubProcessID) + * + * This function iterates through all the clients in the resource server and finds + * hClients allocated for the given (ProcID, SubProcessID) and returns them to + * the caller. + * + * @param[in] procID Process ID + * @param[in] subProcessID SubProcess ID + * @param[out] pClientList List in which the client handles are returned + * + * @return NV_STATUS + */ +NV_STATUS serverutilGetClientHandlesFromPid(NvU32 procID, NvU32 subProcessID, ClientHandlesList *pClientList); + +/** + * This is a filtering function intended to be used with refFindCpuMappingWithFilter. + * This filter will only match user mappings belonging to the current process. + * + * @param[in] ppMapping The mapping that is being filtered + */ +NvBool serverutilMappingFilterCurrentUserProc(RsCpuMapping *ppMapping); + +/** + * This is a filtering function intended to be used with refFindCpuMappingWithFilter. + * This filter will only match kernel mappings. + * + * @param[in] ppMapping The mapping that is being filtered + */ +NvBool serverutilMappingFilterKernel(RsCpuMapping *ppMapping); + +#endif diff --git a/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h b/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h new file mode 100644 index 000000000..2a07abc3a --- /dev/null +++ b/src/nvidia/inc/kernel/virtualization/hypervisor/hypervisor.h @@ -0,0 +1,3 @@ + +#include "g_hypervisor_nvoc.h" + diff --git a/src/nvidia/inc/lib/base_utils.h b/src/nvidia/inc/lib/base_utils.h new file mode 100644 index 000000000..27ab7b1a3 --- /dev/null +++ b/src/nvidia/inc/lib/base_utils.h @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef BASE_UTILS_H +#define BASE_UTILS_H + +#include "nvtypes.h" + +/*! + * @file + * @brief Various helper utility functions that have no other home. + */ + +NvU32 nvLogBase2(NvU64); + +// bit field helper functions +NvU32 nvBitFieldLSZero(NvU32 *, NvU32); +NvU32 nvBitFieldMSZero(NvU32 *, NvU32); +NvBool nvBitFieldTest(NvU32 *, NvU32, NvU32); +void nvBitFieldSet(NvU32 *, NvU32, NvU32, NvBool); + +// +// Sort an array of n elements/structures. +// Example: +// NvBool integerLess(void * a, void * b) +// { +// return *(int *)a < *(int *)b; +// } +// int array[1000]; +// ... +// nvMergeSort(array, sizeof(array)/sizeof(*array), sizeof(*array), integerLess); +// +void nvMergeSort(void * array, NvU32 n, void * tempBuffer, NvU32 elementSize, NvBool (*less)(void *, void *)); + +// +#define BASE10 (10) +#define BASE16 (16) + +// Do not conflict with libc naming +NvS32 nvStrToL(NvU8* pStr, NvU8** pEndStr, NvS32 base, NvU8 stopChar, NvU32 *numFound); + +// +// Returns bit mask of most significant bit of input +// +NvU64 nvMsb64(NvU64); + +// +// Converts unsigned long int to string +// +char * nvU32ToStr(NvU32 value, char *string, NvU32 radix); + +// +// Find the string length +// +NvU32 nvStringLen(const char * str); + +#endif // BASE_UTILS_H diff --git a/src/nvidia/inc/lib/protobuf/prb.h b/src/nvidia/inc/lib/protobuf/prb.h new file mode 100644 index 000000000..fb1f51f7a --- /dev/null +++ b/src/nvidia/inc/lib/protobuf/prb.h @@ -0,0 +1,358 @@ +/* + * Lightweight protocol buffers. + * + * Based on code taken from + * https://code.google.com/archive/p/lwpb/source/default/source + * + * The code there is licensed as Apache 2.0. However, NVIDIA has received the + * code from the original author under MIT license terms. + * + * + * Copyright 2009 Simon Kallweit + * Copyright 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __PRB_H__ +#define __PRB_H__ + +// Make sure the generated files can see rmconfig.h + +#ifndef _RMCFG_H +#include "rmconfig.h" +#endif + +// Maximum depth of message embedding +#ifndef PRB_MAX_DEPTH +#define PRB_MAX_DEPTH 8 +#endif + +// Maximum number of required fields in a message +#ifndef PRB_MAX_REQUIRED_FIELDS +#define PRB_MAX_REQUIRED_FIELDS 16 +#endif + +// Provide enum names as strings +#ifndef PRB_ENUM_NAMES +#define PRB_ENUM_NAMES 0 +#endif + +#if PRB_ENUM_NAMES +#define PRB_MAYBE_ENUM_NAME(n) n, +#else +#define PRB_MAYBE_ENUM_NAME(n) +#endif + +// Provide field names as strings +#ifndef PRB_FIELD_NAMES +#define PRB_FIELD_NAMES 0 +#endif + +#if PRB_FIELD_NAMES +#define PRB_MAYBE_FIELD_NAME(n) n, +#else +#define PRB_MAYBE_FIELD_NAME(n) +#endif + +// Provide field default values +#ifndef PRB_FIELD_DEFAULTS +#define PRB_FIELD_DEFAULTS 0 +#endif + +#if PRB_FIELD_DEFAULTS +#define PRB_MAYBE_FIELD_DEFAULT_DEF(n) n +#define PRB_MAYBE_FIELD_DEFAULT(n) n, +#else +#define PRB_MAYBE_FIELD_DEFAULT_DEF(n) +#define PRB_MAYBE_FIELD_DEFAULT(n) +#endif + +// Provide message names as strings +#ifndef PRB_MESSAGE_NAMES +#define PRB_MESSAGE_NAMES 0 +#endif + +#if PRB_MESSAGE_NAMES +#define PRB_MAYBE_MESSAGE_NAME(n) n, +#else +#define PRB_MAYBE_MESSAGE_NAME(n) +#endif + +// Provide method names as strings +#ifndef PRB_METHOD_NAMES +#define PRB_METHOD_NAMES 0 +#endif + +#if PRB_METHOD_NAMES +#define PRB_MAYBE_METHOD_NAME(n) n, +#else +#define PRB_MAYBE_MESSAGE_NAME(n) +#endif + +// Provide service names as strings +#ifndef PRB_SERVICE_NAMES +#define PRB_SERVICE_NAMES 0 +#endif + +#if PRB_SERVICE_NAMES +#define PRB_MAYBE_SERVICE_NAME(n) n, +#else +#define PRB_MAYBE_SERVICE_NAME(n) +#endif + +// Field labels +#define PRB_REQUIRED 0 +#define PRB_OPTIONAL 1 +#define PRB_REPEATED 2 + +// Field value types +#define PRB_DOUBLE 0 +#define PRB_FLOAT 1 +#define PRB_INT32 2 +#define PRB_INT64 3 +#define PRB_UINT32 4 +#define PRB_UINT64 5 +#define PRB_SINT32 6 +#define PRB_SINT64 7 +#define PRB_FIXED32 8 +#define PRB_FIXED64 9 +#define PRB_SFIXED32 10 +#define PRB_SFIXED64 11 +#define PRB_BOOL 12 +#define PRB_ENUM 13 +#define PRB_STRING 14 +#define PRB_BYTES 15 +#define PRB_MESSAGE 16 + +// Field flags +#define PRB_HAS_DEFAULT (1 << 0) +#define PRB_IS_PACKED (1 << 1) +#define PRB_IS_DEPRECATED (1 << 2) + +typedef struct +{ + unsigned int label : 2; + unsigned int typ : 6; + unsigned int flags : 8; +} PRB_FIELD_OPTS; + +// Protocol buffer wire types +typedef enum +{ + WT_VARINT = 0, + WT_64BIT = 1, + WT_STRING = 2, + WT_32BIT = 5 +} WIRE_TYPE; + +// Protocol buffer wire values +typedef union +{ + NvU64 varint; + NvU64 int64; + struct { + NvU64 len; + const void *data; + } string; + NvU32 int32; +} WIRE_VALUE; + +typedef struct +{ + const char *str; + NvU32 len; +} PRB_VALUE_STRING; + +typedef struct +{ + NvU8 *data; + NvU32 len; +} PRB_VALUE_BYTES; + +typedef struct +{ + void *data; + NvU32 len; +} PRB_VALUE_MESSAGE; + +typedef union +{ + NvF64 double_; + NvF32 float_; + NvS32 int32; + NvS64 int64; + NvU32 uint32; + NvU64 uint64; + NvBool bool_; + PRB_VALUE_STRING string; + PRB_VALUE_BYTES bytes; + PRB_VALUE_MESSAGE message; + int enum_; + int null; +} PRB_VALUE; + +typedef struct +{ + int value; +#if PRB_ENUM_NAMES + const char *name; +#endif +} PRB_ENUM_MAPPING; + +typedef struct +{ + const PRB_ENUM_MAPPING *mappings; + NvU32 count; +#if PRB_ENUM_NAMES + const char *name; +#endif +} PRB_ENUM_DESC; + +struct PRB_MSG_DESC; + +//* Protocol buffer field descriptor +typedef struct PRB_FIELD_DESC +{ + NvU32 number; + PRB_FIELD_OPTS opts; + const struct PRB_MSG_DESC *msg_desc; + const PRB_ENUM_DESC *enum_desc; +#if PRB_FIELD_NAMES + const char *name; +#endif +#if PRB_FIELD_DEFAULTS + const PRB_VALUE *def; +#endif +} PRB_FIELD_DESC; + +//* Protocol buffer message descriptor +typedef struct PRB_MSG_DESC +{ + NvU32 num_fields; + const PRB_FIELD_DESC *fields; +#if PRB_MESSAGE_NAMES + const char *name; +#endif +} PRB_MSG_DESC; + +// Forward declaration +struct PRB_SERVICE_DESC; + +// Protocol buffer method descriptor +struct PRB_METHOD_DESC +{ + const struct PRB_SERVICE_DESC *service; + const PRB_MSG_DESC *req_desc; + const PRB_MSG_DESC *res_desc; +#if PRB_METHOD_NAMES + const char *name; +#endif +}; + +// Protocol buffer service descriptor +typedef struct PRB_SERVICE_DESC +{ + const NvU32 num_methods; + const struct PRB_METHOD_DESC *methods; +#if PRB_SERVICE_NAMES + const char *name; +#endif +} PRB_SERVICE_DESC; + +// Simple memory buffer +typedef struct +{ + NvU8 *base; + NvU8 *pos; + NvU8 *end; +} PRB_BUF; + +// Encoder interface +typedef struct +{ + PRB_BUF buf; + const PRB_FIELD_DESC *field_desc; + const PRB_MSG_DESC *msg_desc; +} PRB_ENCODER_STACK_FRAME; + +typedef NV_STATUS PrbBufferCallback(void *pEncoder, NvBool bBufferFull); + +typedef struct +{ + PRB_ENCODER_STACK_FRAME stack[PRB_MAX_DEPTH]; + PrbBufferCallback *pBufferCallback; + int depth; + int flags; +} PRB_ENCODER; + +// flags +#define PRB_ENCODE_DISABLED 0x01 +#define PRB_BUFFER_ALLOCATED 0x02 +#define PRB_PACKED_FRAME 0x04 +#define PRB_STUBBED_FIELD 0x08 +#define PRB_COUNT_ONLY 0x10 +#define PRB_FIXED_MODE 0x20 + +// Slop to allow for message headers, etc. +#define PRB_MSG_OVERHEAD 32 + +void prbEncStart(PRB_ENCODER *encoder, const PRB_MSG_DESC *msg_desc, + void *data, NvU32 len, PrbBufferCallback *pBufferCallback); +NV_STATUS prbEncStartAlloc(PRB_ENCODER *encoder, + const PRB_MSG_DESC *msg_desc, NvU32 len, + PrbBufferCallback *pBufferCallback); +void prbEncStartCount(PRB_ENCODER *encoder, + const PRB_MSG_DESC *msg_desc, NvU32 len); +void prbFreeAllocatedBuffer(PRB_ENCODER *encoder); +NvU32 prbEncFinish(PRB_ENCODER *encoder, void **buff); +NV_STATUS prbEncNestedStart(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc); +NV_STATUS prbEncNestedEnd(PRB_ENCODER *encoder); +NV_STATUS prbEncStubbedAddBytes(PRB_ENCODER *encoder, NvU8 *buffer, + NvU32 len); +NV_STATUS prbEncAddInt32(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + NvS32 int32); +NV_STATUS prbEncAddUInt32(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + NvU32 uint32); +NV_STATUS prbEncAddInt64(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + NvS64 int64); +NV_STATUS prbEncAddUInt64(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + NvU64 uint64); +NV_STATUS prbEncAddBool(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + NvBool boolVal); +NV_STATUS prbEncAddEnum(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + int enum_); +NV_STATUS prbEncAddString(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + const char *str); +NV_STATUS prbEncAddBytes(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + const NvU8 *data, NvU32 len); +NV_STATUS prbEncCatMsg(PRB_ENCODER *encoder, + void *pMsg, NvU32 len); +NvU32 prbEncBufLeft(PRB_ENCODER *encoder); + +#endif // __PRB_H__ diff --git a/src/nvidia/inc/lib/protobuf/prb_util.h b/src/nvidia/inc/lib/protobuf/prb_util.h new file mode 100644 index 000000000..d8f2fb483 --- /dev/null +++ b/src/nvidia/inc/lib/protobuf/prb_util.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef PRB_UTIL_H +#define PRB_UTIL_H + +#include "kernel/diagnostics/journal_structs.h" +#include "kernel/gpu/gpu.h" + +#include "lib/protobuf/prb.h" +#include "nvstatus.h" +#include "nvtypes.h" + +typedef struct +{ + NvU32 offset; + NvU32 numRegs; + NvU32 stride; // Ignored if numRegs = 1 +} PRB_GPU_REG_TABLE; + +// Helper macro to help construct PRB_GPU_REG_TABLE entries from indexed registers. +#define PRB_GPU_REG_TBL_ENTRY(r) { r(0), r##__SIZE_1, r(1) - r(0) } + +// Helper macro to help construct PRB_GPU_REG_TABLE entries from indexed registers that are offset from a base register +#define PRB_GPU_REG_TBL_ENTRY_OFFSET(r, base) { r(0) + (base), r##__SIZE_1, r(1) - r(0) } + +typedef struct +{ + NvU32 offset; + NvU32 numRegs; + NvU32 stride; + NvU32 ilen; +} PRB_GPU_REG_INDEXED_TABLE; + +// Helper macro to help construct PRB_GPU_REG_INDEXED_TABLE entries +#define PRB_GRI_TBL_ENTRY(r, n, s) { r(0), n, s, r(1) - r(0) } + +// Macro that always returns 1. +// +// Used to mcheck registers and count them at the same time. Useful for passing +// counts into prbEncGpuRegs, etc. Note that this macro does NOT assert that +// the registers are adjacent. +// +#define PRB_COUNT_REG(r) (0?(r):1) + +// +// Worst case overhead per regs and mem message. +// Includes everything but the actual register to allow calculations +// for packed structures. +// +#define PRB_REGS_MSG_OVERHEAD 13 + +NV_STATUS prbEncGpuRegs(OBJGPU *pGpu, IO_APERTURE *pAperture, NvU64 offset, NvU32 numEntries, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncGpuRegImm(OBJGPU *pGpu, NvU64 offset, NvU32 reg, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncGpuRegOffset(OBJGPU *pGpu, IO_APERTURE *pAperture, const NvU32 *pOffset, NvU32 numEntries, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncGpuRegSliceOffset(OBJGPU *pGpu, IO_APERTURE *pAperture, const NvU32 *pOffset, NvU32 numEntries, NvU32 base, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncGpuRegTbl(OBJGPU *pGpu, IO_APERTURE *pAperture, const PRB_GPU_REG_TABLE *pTbl, NvU32 numEntries, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncGpuRegSliceTbl(OBJGPU *pGpu, IO_APERTURE *pAperture, const PRB_GPU_REG_TABLE *pTbl, NvU32 numEntries, NvU32 base, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncGpuRegSliceIndexedTbl(OBJGPU *pGpu, IO_APERTURE *pAperture, const PRB_GPU_REG_INDEXED_TABLE *pTbl, NvU32 numEntries, NvU32 base, NvU32 index, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncMem(NvU64 offset, NvU32 numRegs, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncInstMem(NvU64 physAddr, NvU8 *pVirtAddr, NvU32 numWords, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncInstMemImm(NvU64 physAddr, NvU32 data, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncPciConfigRegs(OBJGPU *pGpu, NvU64 index, NvU32 numRegs, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncPciReadDword(void *handle, NvU64 offset, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbEncGenExData(PRB_ENCODER *pPrbEnc, OBJGPU *pGpu, NvU32 chId, const PRB_FIELD_DESC *fieldDesc); +NV_STATUS prbWrapAndQueue(PRB_ENCODER *pPrbEnc, RMERRORHEADER *pErrorHeader, RMCD_ERROR_BLOCK **); +NV_STATUS prbSetupDclMsg(PRB_ENCODER *pPrbEnc, NvU32 len, const PRB_FIELD_DESC *fieldDesc); + +NV_STATUS prbEncUnwindNesting(PRB_ENCODER *pPrbEnc, NvU8 level); +static NV_FORCEINLINE NvU8 prbEncNestingLevel(PRB_ENCODER *pPrbEnc) { return pPrbEnc->depth; } + +#endif // PRB_UTIL_H diff --git a/src/nvidia/inc/lib/ref_count.h b/src/nvidia/inc/lib/ref_count.h new file mode 100644 index 000000000..c530d0ec3 --- /dev/null +++ b/src/nvidia/inc/lib/ref_count.h @@ -0,0 +1,3 @@ + +#include "g_ref_count_nvoc.h" + diff --git a/src/nvidia/inc/lib/zlib/inflate.h b/src/nvidia/inc/lib/zlib/inflate.h new file mode 100644 index 000000000..51f8b6486 --- /dev/null +++ b/src/nvidia/inc/lib/zlib/inflate.h @@ -0,0 +1,134 @@ +/* + Portions of this file are based on zlib. Subsequent additions by NVIDIA. + + Copyright (c) 2001-2021, NVIDIA CORPORATION. All rights reserved. + + zlib.h -- interface of the 'zlib' general purpose compression library + version 1.1.3, July 9th, 1998 + + Copyright (C) 1995-1998 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt + (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). +*/ +#ifndef _INFLATE_H_ +#define _INFLATE_H_ + +#include "nvos.h" + +#define NOMEMCPY 1 + +typedef NvU8 uch; +typedef NvU16 ush; +typedef NvU32 ulg; + +#define GZ_SLIDE_WINDOW_SIZE 32768 + +#define NEXTBYTE() pGzState->inbuf[pGzState->inptr++] +#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<>=(n);k-=(n);} + +/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */ +#define BMAX 16 /* maximum bit length of any code (16 for explode) */ +#define N_MAX 288 /* maximum number of codes in any set */ + +/* Huffman code lookup table entry--this entry is four bytes for machines + that have 16-bit pointers (e.g. PC's in the small or medium model). + Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16 + means that v is a literal, 16 < e < 32 means that v is a pointer to + the next table, which codes e - 16 bits, and lastly e == 99 indicates + an unused code. If a code with e == 99 is looked up, this implies an + error in the data. */ +struct huft { + uch e; /* number of extra bits or operation */ + uch b; /* number of bits in this code or subcode */ + union { + ush n; /* literal, length base, or distance base */ + struct huft *t; /* pointer to next level of table */ + } v; +}; + +/* The inflate algorithm uses a sliding 32K byte window on the uncompressed + stream to find repeated byte strings. This is implemented here as a + circular buffer. The index is updated simply by incrementing and then + and'ing with 0x7fff (32K-1). */ +/* It is left to other modules to supply the 32K area. It is assumed + to be usable as if it were declared "uch slide[32768];" or as just + "uch *slide;" and then malloc'ed in the latter case. The definition + must be in unzip.h, included above. */ +/* unsigned pGzState->wp; current position in slide */ +#define WSIZE GZ_SLIDE_WINDOW_SIZE +#define flush_output(w) (pGzState->wp=(w),flush_window(pGzState)) +#define Tracecv(A,B) +#define Tracevv(X) + +#define GZ_STATE_ITERATOR_OK 0 +#define GZ_STATE_ITERATOR_ERROR 1 +#define GZ_STATE_ITERATOR_END 2 + +#define GZ_STATE_HUFT_OK 0 +#define GZ_STATE_HUFT_INCOMP 1 +#define GZ_STATE_HUFT_ERROR 2 + +typedef struct { + unsigned int e; /* table entry flag/number of extra bits */ + unsigned int n, d; /* length and index for copy */ + unsigned int w; /* current window position */ + struct huft *t; /* pointer to table entry */ + ulg b; /* bit buffer */ + unsigned int k; /* number of bits in bit buffer */ + int continue_copy; /* last flush not finished*/ + unsigned int sn; /* used by inflated type 0 (stored) block */ +} GZ_INFLATE_CODES_STATE, *PGZ_INFLATE_CODES_STATE; + +typedef struct { + struct huft *tl; /* literal/length code table */ + struct huft *td; /* distance code table */ + NvS32 bl; /* lookup bits for tl */ + NvS32 bd; /* lookup bits for td */ + + NvU8 *inbuf,*outbuf; + NvU32 outBufSize; + NvU32 inptr,outptr; + NvU32 outLower,outUpper; + unsigned int wp; + unsigned int wp1; /* wp1 is index of first unflushed byte in slide window */ + unsigned int wp2; /* wp2 is index of last unflushed byte in slide window */ + uch *window; + + ulg bb; /* bit buffer */ + unsigned int bk; /* bits in bit buffer */ + int e; /* last block flag */ + + int newblock; /* start a new decompression block */ + NvU32 optSize; + GZ_INFLATE_CODES_STATE codesState; + +} GZ_INFLATE_STATE, *PGZ_INFLATE_STATE; + +NV_STATUS utilGzIterator(PGZ_INFLATE_STATE pGzState); +NV_STATUS utilGzAllocate(const NvU8 *zArray, NvU32 numTotalBytes, PGZ_INFLATE_STATE *ppGzState); +NvU32 utilGzGetData(PGZ_INFLATE_STATE pGzState, NvU32 offset, NvU32 size, NvU8 * outBuffer); +NV_STATUS utilGzDestroy(PGZ_INFLATE_STATE pGzState); + +#endif diff --git a/src/nvidia/inc/libraries/containers/btree.h b/src/nvidia/inc/libraries/containers/btree.h new file mode 100644 index 000000000..a463e0608 --- /dev/null +++ b/src/nvidia/inc/libraries/containers/btree.h @@ -0,0 +1,68 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _BTREE_H_ +#define _BTREE_H_ + +/*********************** Balanced Tree data structure **********************\ +* * +* Module: BTREE.H * +* API to BTREE routines. * +* * +\***************************************************************************/ + +// +// RED BLACK TREE structure. +// +#include "nvtypes.h" +#include "nvstatus.h" + +typedef struct NODE +{ + // public: + void *Data; + NvU64 keyStart; + NvU64 keyEnd; + + // private: + NvBool isRed; // !IsRed == IsBlack + struct NODE *parent; // tree links + struct NODE *left; + struct NODE *right; + +} NODE, *PNODE; + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- + +NV_STATUS btreeInsert(PNODE, PNODE *); +NV_STATUS btreeUnlink(PNODE, PNODE *); +NV_STATUS btreeSearch(NvU64, PNODE *, PNODE); +NV_STATUS btreeEnumStart(NvU64, PNODE *, PNODE); +NV_STATUS btreeEnumNext(PNODE *, PNODE); +NV_STATUS btreeDestroyData(PNODE); +NV_STATUS btreeDestroyNodes(PNODE); + +#endif // _BTREE_H_ diff --git a/src/nvidia/inc/libraries/containers/eheap_old.h b/src/nvidia/inc/libraries/containers/eheap_old.h new file mode 100644 index 000000000..6c19e8b0e --- /dev/null +++ b/src/nvidia/inc/libraries/containers/eheap_old.h @@ -0,0 +1,116 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _EHEAP_H_ +#define _EHEAP_H_ + +/*! + * @brief + * EHEAP is an extent allocator. It is just an abstract E(xtent)Heap. + */ + +#include "nvtypes.h" +#include "nvos.h" +#include "containers/btree.h" +#include "utils/nvrange.h" + +typedef struct OBJEHEAP *POBJEHEAP; +typedef struct OBJEHEAP OBJEHEAP; + +typedef struct EMEMBLOCK *PEMEMBLOCK; +typedef struct EMEMBLOCK +{ + NvU64 begin; + NvU64 end; + NvU64 align; + NvU32 growFlags; + NvU32 refCount; + NvU32 owner; + NODE node; + PEMEMBLOCK prevFree; + PEMEMBLOCK nextFree; + PEMEMBLOCK prev; + PEMEMBLOCK next; + void *pData; +} EMEMBLOCK; + +typedef NvBool EHeapOwnershipComparator(void*, void*); + +typedef NV_STATUS (*EHeapDestruct)(POBJEHEAP); +typedef NV_STATUS (*EHeapAlloc)(POBJEHEAP, NvU32, NvU32 *, NvU64 *, NvU64 *, NvU64 , NvU64, PEMEMBLOCK*, void*, EHeapOwnershipComparator*); +typedef NV_STATUS (*EHeapFree)(POBJEHEAP, NvU64); +typedef void (*EHeapInfo)(POBJEHEAP, NvU64 *, NvU64 *,NvU64 *, NvU64 *, NvU32 *, NvU64 *); +typedef void (*EHeapInfoForRange)(POBJEHEAP, NV_RANGE, NvU64 *, NvU64 *, NvU32 *, NvU64 *); +typedef NV_STATUS (*EHeapGetSize)(POBJEHEAP, NvU64 *); +typedef NV_STATUS (*EHeapGetFree)(POBJEHEAP, NvU64 *); +typedef NV_STATUS (*EHeapGetBase)(POBJEHEAP, NvU64 *); +typedef PEMEMBLOCK (*EHeapGetBlock)(POBJEHEAP, NvU64, NvBool bReturnFreeBlock); +typedef NV_STATUS (*EHeapSetAllocRange)(POBJEHEAP, NvU64 rangeLo, NvU64 rangeHi); +typedef NV_STATUS (*EHeapTraversalFn)(POBJEHEAP, void *pEnv, PEMEMBLOCK, NvU32 *pContinue, NvU32 *pInvalCursor); +typedef NV_STATUS (*EHeapTraverse)(POBJEHEAP, void *pEnv, EHeapTraversalFn, NvS32 direction); +typedef NvU32 (*EHeapGetNumBlocks)(POBJEHEAP); +typedef NV_STATUS (*EHeapGetBlockInfo)(POBJEHEAP, NvU32, NVOS32_HEAP_DUMP_BLOCK *); +typedef NV_STATUS (*EHeapSetOwnerIsolation)(POBJEHEAP, NvBool bEnable, NvU32 granularity); + +struct OBJEHEAP +{ + // Public heap interface methods + EHeapDestruct eheapDestruct; + EHeapAlloc eheapAlloc; + EHeapFree eheapFree; + EHeapInfo eheapInfo; + EHeapInfoForRange eheapInfoForRange; + EHeapGetSize eheapGetSize; + EHeapGetFree eheapGetFree; + EHeapGetBase eheapGetBase; + EHeapGetBlock eheapGetBlock; + EHeapSetAllocRange eheapSetAllocRange; + EHeapTraverse eheapTraverse; + EHeapGetNumBlocks eheapGetNumBlocks; + EHeapGetBlockInfo eheapGetBlockInfo; + EHeapSetOwnerIsolation eheapSetOwnerIsolation; + + // private data + NvU64 base; + NvU64 total; + NvU64 free; + NvU64 rangeLo; + NvU64 rangeHi; + NvBool bOwnerIsolation; + NvU32 ownerGranularity; + PEMEMBLOCK pBlockList; + PEMEMBLOCK pFreeBlockList; + NvU32 memHandle; + NvU32 numBlocks; + NvU32 sizeofMemBlock; + PNODE pBlockTree; + // user can specify num of EMEMBLOCK structs to + // be allocated at heap construction time so that + // we will not call portMemAllocNonPaged during eheapAlloc. + NvU32 numPreAllocMemStruct; + PEMEMBLOCK pFreeMemStructList; + PEMEMBLOCK pPreAllocAddr; +}; + +extern void constructObjEHeap(POBJEHEAP, NvU64, NvU64, NvU32, NvU32); + +#endif // _EHEAP_H_ diff --git a/src/nvidia/inc/libraries/containers/list.h b/src/nvidia/inc/libraries/containers/list.h new file mode 100644 index 000000000..1dc0ab7bf --- /dev/null +++ b/src/nvidia/inc/libraries/containers/list.h @@ -0,0 +1,331 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_LIST_H_ +#define _NV_CONTAINERS_LIST_H_ + +// Contains mix of C/C++ declarations. +#include "containers/type_safety.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvport/nvport.h" + +/** + * @defgroup NV_CONTAINERS_LIST List + * + * @brief List (sequence) of user-defined values. + * + * @details Order of values is not necessarily increasing or sorted, but order is + * preserved across mutation. Please see + * http://en.wikipedia.org/wiki/Sequence for a formal definition. + * + * The provided interface is abstract, decoupling the user from the underlying + * list implementation. Two options are available with regard to memory + * management, intrusive and non-intrusive. Users can select either one based + * on different situations. Despite the two versions of the list, the following + * implementation constraints are guaranteed. + * + * - Time Complexity: + * * Operations are \b O(1), + * * Unless stated otherwise. + * + * - Memory Usage: + * * \b O(N) memory is required for N values. + * * Intrusive and non-intrusive variants are provided. + * See @ref mem-ownership for further details. + * + * - Synchronization: + * * \b None. The container is not thread-safe. + * * Locking must be handled by the user if required. + */ + +#define MAKE_LIST(listTypeName, dataType) \ + typedef union listTypeName##Iter \ + { \ + dataType *pValue; \ + ListIterBase iter; \ + } listTypeName##Iter; \ + typedef union listTypeName \ + { \ + NonIntrusiveList real; \ + CONT_TAG_TYPE(ListBase, dataType, listTypeName##Iter); \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + } listTypeName + +#define DECLARE_LIST(listTypeName) \ + typedef union listTypeName##Iter listTypeName##Iter; \ + typedef union listTypeName listTypeName + +#define MAKE_INTRUSIVE_LIST(listTypeName, dataType, node) \ + typedef union listTypeName##Iter \ + { \ + dataType *pValue; \ + ListIterBase iter; \ + } listTypeName##Iter; \ + typedef union listTypeName \ + { \ + IntrusiveList real; \ + CONT_TAG_TYPE(ListBase, dataType, listTypeName##Iter); \ + CONT_TAG_INTRUSIVE(dataType, node); \ + } listTypeName \ + +#define DECLARE_INTRUSIVE_LIST(listTypeName) \ + typedef union listTypeName##Iter listTypeName##Iter; \ + typedef union listTypeName listTypeName + +/** +* @brief Internal node structure to embed within intrusive list values. +*/ +typedef struct ListNode ListNode; + +/** + * @brief Base type common to both intrusive and non-intrusive variants. + */ +typedef struct ListBase ListBase; + +/** + * @brief Non-intrusive list (container-managed memory). + */ +typedef struct NonIntrusiveList NonIntrusiveList; + +/** + * @brief Intrusive list (user-managed memory). + */ +typedef struct IntrusiveList IntrusiveList; + +/** + * @brief Iterator over a range of list values. + * + * See @ref iterators for usage details. + */ +typedef struct ListIterBase ListIterBase; + +struct ListNode +{ + /// @privatesection + ListNode *pPrev; + ListNode *pNext; +#if PORT_IS_CHECKED_BUILD + ListBase *pList; +#endif +}; + +struct ListIterBase +{ + void *pValue; + ListBase *pList; + ListNode *pNode; + ListNode *pLast; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +ListIterBase listIterRange_IMPL(ListBase *pList, void *pFirst, void *pLast); +CONT_VTABLE_DECL(ListBase, ListIterBase); + +struct ListBase +{ + CONT_VTABLE_FIELD(ListBase); + ListNode *pHead; + ListNode *pTail; + NvU32 count; + NvS32 nodeOffset; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +struct NonIntrusiveList +{ + ListBase base; + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 valueSize; +}; + +struct IntrusiveList +{ + ListBase base; +}; + +#define listInit(pList, pAllocator) \ + listInit_IMPL(&((pList)->real), pAllocator, sizeof(*(pList)->valueSize)) + +#define listInitIntrusive(pList) \ + listInitIntrusive_IMPL(&((pList)->real), sizeof(*(pList)->nodeOffset)) + +#define listDestroy(pList) \ + CONT_DISPATCH_ON_KIND(pList, \ + listDestroy_IMPL((NonIntrusiveList*)&((pList)->real)), \ + listDestroyIntrusive_IMPL(&((pList)->real.base)), \ + contDispatchVoid_STUB()) + +#define listCount(pList) \ + listCount_IMPL(&((pList)->real).base) + +#define listInsertNew(pList, pNext) \ + CONT_CAST_ELEM(pList, \ + listInsertNew_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pNext))) + +#define listAppendNew(pList) \ + CONT_CAST_ELEM(pList, listAppendNew_IMPL(&(pList)->real)) + +#define listPrependNew(pList) \ + CONT_CAST_ELEM(pList, listPrependNew_IMPL(&(pList)->real)) + +#define listInsertValue(pList, pNext, pValue) \ + CONT_CAST_ELEM(pList, \ + listInsertValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pNext), \ + CONT_CHECK_ARG(pList, pValue))) + +#define listAppendValue(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listAppendValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue))) + +#define listPrependValue(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listPrependValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue))) + +#define listInsertExisting(pList, pNext, pValue) \ + listInsertExisting_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pNext), \ + CONT_CHECK_ARG(pList, pValue)) + +#define listAppendExisting(pList, pValue) \ + listAppendExisting_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listPrependExisting(pList, pValue) \ + listPrependExisting_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listRemove(pList, pValue) \ + CONT_DISPATCH_ON_KIND(pList, \ + listRemove_IMPL((NonIntrusiveList*)&((pList)->real), \ + CONT_CHECK_ARG(pList, pValue)), \ + listRemoveIntrusive_IMPL(&((pList)->real).base, \ + CONT_CHECK_ARG(pList, pValue)), \ + contDispatchVoid_STUB()) + +#define listRemoveFirstByValue(pList, pValue) \ + listRemoveFirstByValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listRemoveAllByValue(pList, pValue) \ + listRemoveAllByValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue)) + +#define listClear(pList) \ + listDestroy(pList) + +#define listFindByValue(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listFindByValue_IMPL(&(pList)->real, \ + CONT_CHECK_ARG(pList, pValue))) + +#define listHead(pList) \ + CONT_CAST_ELEM(pList, listHead_IMPL(&((pList)->real).base)) + +#define listTail(pList) \ + CONT_CAST_ELEM(pList, listTail_IMPL(&((pList)->real).base)) + +#define listNext(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listNext_IMPL(&((pList)->real).base, \ + CONT_CHECK_ARG(pList, pValue))) + +#define listPrev(pList, pValue) \ + CONT_CAST_ELEM(pList, \ + listPrev_IMPL(&((pList)->real).base, \ + CONT_CHECK_ARG(pList, pValue))) + +#define listIterAll(pList) \ + listIterRange(pList, listHead(pList), listTail(pList)) + +#define listIterRange(pList, pFirst, pLast) \ + CONT_ITER_RANGE(pList, &listIterRange_IMPL, \ + CONT_CHECK_ARG(pList, pFirst), CONT_CHECK_ARG(pList, pLast)) + +#define listIterNext(pIt) \ + listIterNext_IMPL(&((pIt)->iter)) + +void listInit_IMPL(NonIntrusiveList *pList, PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize); +void listInitIntrusive_IMPL(IntrusiveList *pList, NvS32 nodeOffset); +void listDestroy_IMPL(NonIntrusiveList *pList); +void listDestroyIntrusive_IMPL(ListBase *pList); + +NvU32 listCount_IMPL(ListBase *pList); +void *listInsertNew_IMPL(NonIntrusiveList *pList, void *pNext); +void *listAppendNew_IMPL(NonIntrusiveList *pList); +void *listPrependNew_IMPL(NonIntrusiveList *pList); +void *listInsertValue_IMPL(NonIntrusiveList *pList, void *pNext, void *pValue); +void *listAppendValue_IMPL(NonIntrusiveList *pList, void *pValue); +void *listPrependValue_IMPL(NonIntrusiveList *pList, void *pValue); +void listInsertExisting_IMPL(IntrusiveList *pList, void *pNext, void *pValue); +void listAppendExisting_IMPL(IntrusiveList *pList, void *pValue); +void listPrependExisting_IMPL(IntrusiveList *pList, void *pValue); +void listRemove_IMPL(NonIntrusiveList *pList, void *pValue); +void listRemoveIntrusive_IMPL(ListBase *pList, void *pValue); +void listRemoveFirstByValue_IMPL(NonIntrusiveList *pList, void *pValue); +void listRemoveAllByValue_IMPL(NonIntrusiveList *pList, void *pValue); + +void *listFindByValue_IMPL(NonIntrusiveList *pList, void *pValue); +void *listHead_IMPL(ListBase *pList); +void *listTail_IMPL(ListBase *pList); +void *listNext_IMPL(ListBase *pList, void *pValue); +void *listPrev_IMPL(ListBase *pList, void *pValue); + +ListIterBase listIterAll_IMPL(ListBase *pList); +ListIterBase listIterRange_IMPL(ListBase *pList, void *pFirst, void *pLast); +NvBool listIterNext_IMPL(ListIterBase *pIt); + +static NV_FORCEINLINE ListNode * +listValueToNode(ListBase *pList, void *pValue) +{ + if (NULL == pList) return NULL; + if (NULL == pValue) return NULL; + return (ListNode*)((NvU8*)pValue + pList->nodeOffset); +} + +static NV_FORCEINLINE void * +listNodeToValue(ListBase *pList, ListNode *pNode) +{ + if (NULL == pList) return NULL; + if (NULL == pNode) return NULL; + return (NvU8*)pNode - pList->nodeOffset; +} + +#ifdef __cplusplus +} +#endif + +#endif // _NV_CONTAINERS_LIST_H_ diff --git a/src/nvidia/inc/libraries/containers/map.h b/src/nvidia/inc/libraries/containers/map.h new file mode 100644 index 000000000..b5f20a4d2 --- /dev/null +++ b/src/nvidia/inc/libraries/containers/map.h @@ -0,0 +1,300 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_MAP_H_ +#define _NV_CONTAINERS_MAP_H_ + +// Contains mix of C/C++ declarations. +#include "containers/type_safety.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +/** + * @defgroup NV_CONTAINERS_MAP Map + * + * @brief Map (ordered) from 64-bit integer keys to user-defined values. + * + * @details The provided interface is abstract, decoupling the user from the + * underlying ordered map implementation. Two options are available with regard + * to memory management, intrusive and non-intrusive. Users can select either + * one based on different situations. Despite the two versions of the map, + * the following implementation constraints are guaranteed. + * + * - Time Complexity: + * * Operations are \b O(log N), + * * Unless stated otherwise, + * * Where N is the number of values in the map. + * + * - Memory Usage: + * * \b O(N) memory is required for N values. + * * Intrusive and non-intrusive variants are provided. + * See @ref mem-ownership for further details. + * + * - Synchronization: + * * \b None. The container is not thread-safe. + * * Locking must be handled by the user if required. + * + */ + +#define MAKE_MAP(mapTypeName, dataType) \ + typedef union mapTypeName##Iter \ + { \ + dataType *pValue; \ + MapIterBase iter; \ + } mapTypeName##Iter; \ + typedef union mapTypeName \ + { \ + NonIntrusiveMap real; \ + CONT_TAG_TYPE(MapBase, dataType, mapTypeName##Iter); \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + } mapTypeName + +#define DECLARE_MAP(mapTypeName) \ + typedef union mapTypeName##Iter mapTypeName##Iter; \ + typedef union mapTypeName mapTypeName + +#define MAKE_INTRUSIVE_MAP(mapTypeName, dataType, node) \ + typedef union mapTypeName##Iter \ + { \ + dataType *pValue; \ + MapIterBase iter; \ + } mapTypeName##Iter; \ + typedef union mapTypeName \ + { \ + IntrusiveMap real; \ + CONT_TAG_TYPE(MapBase, dataType, mapTypeName##Iter); \ + CONT_TAG_INTRUSIVE(dataType, node); \ + } mapTypeName + +#define DECLARE_INTRUSIVE_MAP(mapTypeName) \ + typedef union mapTypeName##Iter mapTypeName##Iter; \ + typedef union mapTypeName mapTypeName + +/** + * @brief Internal node structure to embed within intrusive map values. + */ +typedef struct MapNode MapNode; + +/** + * @brief Base type common to both intrusive and non-intrusive variants. + */ +typedef struct MapBase MapBase; + +/** + * @brief Non-intrusive map (container-managed memory). + */ +typedef struct NonIntrusiveMap NonIntrusiveMap; + +/** + * @brief Intrusive map (user-managed memory). + */ +typedef struct IntrusiveMap IntrusiveMap; + +/** + * @brief Iterator over a range of map values. + * + * See @ref iterators for usage details. + */ +typedef struct MapIterBase MapIterBase; + +struct MapNode +{ + /// @privatesection + NvU64 key; + MapNode *pParent; + MapNode *pLeft; + MapNode *pRight; + NvBool bIsRed; +#if PORT_IS_CHECKED_BUILD + MapBase *pMap; +#endif +}; + +struct MapIterBase +{ + void *pValue; + MapBase *pMap; + MapNode *pNode; + MapNode *pLast; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +MapIterBase mapIterRange_IMPL(MapBase *pMap, void *pFirst, void *pLast); +CONT_VTABLE_DECL(MapBase, MapIterBase); + +struct MapBase +{ + CONT_VTABLE_FIELD(MapBase); + MapNode *pRoot; + NvS32 nodeOffset; + NvU32 count; +#if PORT_IS_CHECKED_BUILD + NvU32 versionNumber; +#endif +}; + +struct NonIntrusiveMap +{ + MapBase base; + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 valueSize; +}; + +struct IntrusiveMap +{ + MapBase base; +}; + +#define mapInit(pMap, pAllocator) \ + mapInit_IMPL(&((pMap)->real), pAllocator, sizeof(*(pMap)->valueSize)) + +#define mapInitIntrusive(pMap) \ + mapInitIntrusive_IMPL(&((pMap)->real), sizeof(*(pMap)->nodeOffset)) + +#define mapDestroy(pMap) \ + CONT_DISPATCH_ON_KIND(pMap, \ + mapDestroy_IMPL((NonIntrusiveMap*)&((pMap)->real)), \ + mapDestroyIntrusive_IMPL(&((pMap)->real.base)), \ + contDispatchVoid_STUB()) + +#define mapCount(pMap) \ + mapCount_IMPL(&((pMap)->real).base) + +#define mapKey(pMap, pValue) \ + mapKey_IMPL(&((pMap)->real).base, pValue) + +#define mapInsertNew(pMap, key) \ + CONT_CAST_ELEM(pMap, mapInsertNew_IMPL(&(pMap)->real, key)) + +#define mapInsertValue(pMap, key, pValue) \ + CONT_CAST_ELEM(pMap, \ + mapInsertValue_IMPL(&(pMap)->real, key, \ + CONT_CHECK_ARG(pMap, pValue))) + +#define mapInsertExisting(pMap, key, pValue) \ + mapInsertExisting_IMPL(&(pMap)->real, key, \ + CONT_CHECK_ARG(pMap, pValue)) + +#define mapRemove(pMap, pValue) \ + CONT_DISPATCH_ON_KIND(pMap, \ + mapRemove_IMPL((NonIntrusiveMap*)&((pMap)->real), \ + CONT_CHECK_ARG(pMap, pValue)), \ + mapRemoveIntrusive_IMPL(&((pMap)->real).base, \ + CONT_CHECK_ARG(pMap, pValue)), \ + contDispatchVoid_STUB()) + +#define mapClear(pMap) \ + mapDestroy(pMap) + +#define mapRemoveByKey(pMap, key) \ + CONT_DISPATCH_ON_KIND(pMap, \ + mapRemoveByKey_IMPL((NonIntrusiveMap*)&((pMap)->real), key), \ + mapRemoveByKeyIntrusive_IMPL(&((pMap)->real).base, key), \ + contDispatchVoid_STUB()) + +#define mapFind(pMap, key) \ + CONT_CAST_ELEM(pMap, mapFind_IMPL(&((pMap)->real).base, key)) + +#define mapFindGEQ(pMap, keyMin) \ + CONT_CAST_ELEM(pMap, \ + mapFindGEQ_IMPL(&((pMap)->real).base, keyMin)) + +#define mapFindLEQ(pMap, keyMax) \ + CONT_CAST_ELEM(pMap, \ + mapFindLEQ_IMPL(&((pMap)->real).base, keyMax)) + +#define mapNext(pMap, pValue) \ + CONT_CAST_ELEM(pMap, \ + mapNext_IMPL(&((pMap)->real).base, \ + CONT_CHECK_ARG(pMap, pValue))) + +#define mapPrev(pMap, pValue) \ + CONT_CAST_ELEM(pMap, \ + mapPrev_IMPL(&((pMap)->real).base, \ + CONT_CHECK_ARG(pMap, pValue))) + +#define mapIterAll(pMap) \ + mapIterRange(pMap, mapFindGEQ(pMap, 0), mapFindLEQ(pMap, NV_U64_MAX)) + +#define mapIterRange(pMap, pFirst, pLast) \ + CONT_ITER_RANGE(pMap, &mapIterRange_IMPL, \ + CONT_CHECK_ARG(pMap, pFirst), CONT_CHECK_ARG(pMap, pLast)) + +#define mapIterNext(pIt) \ + mapIterNext_IMPL(&((pIt)->iter)) + +void mapInit_IMPL(NonIntrusiveMap *pMap, + PORT_MEM_ALLOCATOR *pAllocator, NvU32 valueSize); +void mapInitIntrusive_IMPL(IntrusiveMap *pMap, NvS32 nodeOffset); +void mapDestroy_IMPL(NonIntrusiveMap *pMap); +void mapDestroyIntrusive_IMPL(MapBase *pMap); + +NvU32 mapCount_IMPL(MapBase *pMap); +NvU64 mapKey_IMPL(MapBase *pMap, void *pValue); + +void *mapInsertNew_IMPL(NonIntrusiveMap *pMap, NvU64 key); +void *mapInsertValue_IMPL(NonIntrusiveMap *pMap, NvU64 key, void *pValue); +NvBool mapInsertExisting_IMPL(IntrusiveMap *pMap, NvU64 key, void *pValue); +void mapRemove_IMPL(NonIntrusiveMap *pMap, void *pValue); +void mapRemoveIntrusive_IMPL(MapBase *pMap, void *pValue); +void mapRemoveByKey_IMPL(NonIntrusiveMap *pMap, NvU64 key); +void mapRemoveByKeyIntrusive_IMPL(MapBase *pMap, NvU64 key); + +void *mapFind_IMPL(MapBase *pMap, NvU64 key); +void *mapFindGEQ_IMPL(MapBase *pMap, NvU64 keyMin); +void *mapFindLEQ_IMPL(MapBase *pMap, NvU64 keyMax); +void *mapNext_IMPL(MapBase *pMap, void *pValue); +void *mapPrev_IMPL(MapBase *pMap, void *pValue); + +MapIterBase mapIterAll_IMPL(MapBase *pMap); +NvBool mapIterNext_IMPL(MapIterBase *pIt); + +static NV_FORCEINLINE MapNode * +mapValueToNode(MapBase *pMap, void *pValue) +{ + if (NULL == pMap) return NULL; + if (NULL == pValue) return NULL; + return (MapNode*)((NvU8*)pValue + pMap->nodeOffset); +} + +static NV_FORCEINLINE void * +mapNodeToValue(MapBase *pMap, MapNode *pNode) +{ + if (NULL == pMap) return NULL; + if (NULL == pNode) return NULL; + return (NvU8*)pNode - pMap->nodeOffset; +} + +#ifdef __cplusplus +} +#endif + +#endif // _NV_CONTAINERS_MAP_H_ diff --git a/src/nvidia/inc/libraries/containers/multimap.h b/src/nvidia/inc/libraries/containers/multimap.h new file mode 100644 index 000000000..7231c8b98 --- /dev/null +++ b/src/nvidia/inc/libraries/containers/multimap.h @@ -0,0 +1,296 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_MULTIMAP_H_ +#define _NV_CONTAINERS_MULTIMAP_H_ + +// Contains mix of C/C++ declarations. +#include "containers/type_safety.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include "containers/map.h" + +/** + * @defgroup NV_CONTAINERS_MULTIMAP Multimap + * + * @brief Two-layer multimap (ordered) from pairs of 64-bit unsigned integer + * keys to user-defined values. + * + * @details The provided interface is abstract, decoupling the user from the + * underlying ordered multimap implementation. Currently, memory management is + * limited to non-intrusive container-managed memory. The following + * implementation constraints are guaranteed. + * + * - Time Complexity: + * * Operations are \b O(log M + log N), + * * Unless stated otherwise, + * * Where M is the number of submaps and N is the total number of values in + * the map. + * + * - Memory Usage: + * * \b O(M + N) memory is required for M submaps and N values. + * * Only a non-intrusive variant is provided. + * See @ref mem-ownership for further details. + * + * - Synchronization: + * * \b None. The container is not thread-safe. + * * Locking must be handled by the user if required. + * + */ + +#define MAKE_MULTIMAP(multimapTypeName, dataType) \ + typedef struct multimapTypeName##Leaf \ + { \ + dataType data; \ + MultimapNode node; \ + } multimapTypeName##Leaf; \ + MAKE_INTRUSIVE_MAP(multimapTypeName##Submap, multimapTypeName##Leaf, \ + node.submapNode); \ + MAKE_MAP(multimapTypeName##Supermap, multimapTypeName##Submap); \ + typedef union multimapTypeName##Iter \ + { \ + dataType *pValue; \ + MultimapIterBase iter; \ + } multimapTypeName##Iter; \ + typedef union multimapTypeName \ + { \ + CONT_TAG_TYPE(MultimapBase, dataType, multimapTypeName##Iter); \ + struct { MultimapBase base; } real; \ + struct \ + { \ + /* This field simply aligns map with the one in MultimapBase */ \ + CONT_VTABLE_FIELD(MultimapBase); \ + multimapTypeName##Supermap map; \ + } type; \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + struct {char _[NV_OFFSETOF(multimapTypeName##Leaf, node)];} *nodeOffset; \ + struct {char _[sizeof(multimapTypeName##Submap)];} *submapSize; \ + } multimapTypeName; + +#define DECLARE_MULTIMAP(multimapTypeName) \ + typedef struct multimapTypeName##Leaf multimapTypeName##Leaf; \ + DECLARE_INTRUSIVE_MAP(multimapTypeName##Submap); \ + DECLARE_MAP(multimapTypeName##Supermap); \ + typedef union multimapTypeName##Iter multimapTypeName##Iter; \ + typedef union multimapTypeName multimapTypeName + +/** + * @brief Internal node structure associated with multimap values. + */ +typedef struct MultimapNode MultimapNode; + +/** + * @brief Base type common to all multimap iterator types. + */ +typedef struct MultimapIterBase MultimapIterBase; + +/** + * @brief Base type common to all multimap types. + */ +typedef struct MultimapBase MultimapBase; + +struct MultimapNode +{ + void *pSubmap; + MapNode submapNode; +}; + +struct MultimapIterBase +{ + void *pValue; + MultimapBase *pMultimap; + void *pNext; + void *pLast; +}; + +CONT_VTABLE_DECL(MultimapBase, MultimapIterBase); + +struct MultimapBase +{ + CONT_VTABLE_FIELD(MultimapBase); + NonIntrusiveMap map; + NvS32 multimapNodeOffset; + NvU32 itemCount; + NvU32 itemSize; +}; + + +#define multimapInit(pMultimap, pAllocator) \ + multimapInit_IMPL(&(pMultimap)->real.base, pAllocator, \ + sizeof(*(pMultimap)->valueSize), \ + sizeof(*(pMultimap)->nodeOffset), \ + sizeof(*(pMultimap)->submapSize)) + +#define multimapDestroy(pMultimap) \ + multimapDestroy_IMPL(&(pMultimap)->real.base) + +#define multimapClear(pMultimap) \ + multimapClear_IMPL(&(pMultimap)->real.base) + +#define multimapCountSubmaps(pMultimap) \ + mapCount(&(pMultimap)->type.map) + +#define multimapCountItems(pMultimap) \ + (pMultimap)->real.base.itemCount + +#define multimapFindSubmap(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapFindSubmap_IMPL(&(pMultimap)->real.base, submapKey)) + +#define multimapFindSubmapLEQ(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapFindSubmapLEQ_IMPL(&(pMultimap)->real.base, submapKey)) + +#define multimapFindSubmapGEQ(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapFindSubmapGEQ_IMPL(&(pMultimap)->real.base, submapKey)) + +#define multimapCountSubmapItems(pMultimap, pSubmap) \ + mapCount(pSubmap) + +#define multimapInsertItemNew(pMultimap, submapKey, itemKey) \ + CONT_CAST_ELEM(pMultimap, \ + multimapInsertItemNew_IMPL(&(pMultimap)->real.base, submapKey, itemKey)) + +#define multimapInsertItemValue(pMultimap, submapKey, itemKey, pValue) \ + CONT_CAST_ELEM(pMultimap, \ + multimapInsertItemValue_IMPL(&(pMultimap)->real.base, \ + submapKey, itemKey, pValue)) + +#define multimapInsertSubmap(pMultimap, submapKey) \ + CONT_CAST_ELEM(&(pMultimap)->type.map, \ + multimapInsertSubmap_IMPL(&(pMultimap)->real.base, submapKey)) + +#define multimapFindItem(pMultimap, submapKey, itemKey) \ + CONT_CAST_ELEM(pMultimap, \ + multimapFindItem_IMPL(&(pMultimap)->real.base, submapKey, itemKey)) + +#define multimapRemoveItem(pMultimap, pValue) \ + multimapRemoveItem_IMPL(&(pMultimap)->real.base, pValue) + +#define multimapRemoveSubmap(pMultimap, pSubmap) \ + multimapRemoveSubmap_IMPL(&(pMultimap)->real.base, &(pSubmap)->real.base) + +#define multimapRemoveItemByKey(pMultimap, submapKey, itemKey) \ + multimapRemoveItemByKey_IMPL(&(pMultimap)->real.base, submapKey, itemKey) + +#define multimapNextItem(pMultimap, pValue) \ + CONT_CAST_ELEM(pMultimap, \ + multimapNextItem_IMPL(&(pMultimap)->real.base, pValue)) + +#define multimapPrevItem(pMultimap, pValue) \ + CONT_CAST_ELEM(pMultimap, \ + multimapPrevItem_IMPL(&(pMultimap)->real.base, pValue)) + +#define multimapFirstItem(pMultimap) \ + CONT_CAST_ELEM(pMultimap, multimapFirstItem_IMPL(&(pMultimap)->real.base)) + +#define multimapLastItem(pMultimap) \ + CONT_CAST_ELEM(pMultimap, multimapLastItem_IMPL(&(pMultimap)->real.base)) + +#define multimapItemIterAll(pMultimap) \ + multimapItemIterRange(pMultimap, \ + multimapFirstItem(pMultimap), multimapLastItem(pMultimap)) + +#define multimapItemIterRange(pMultimap, pFirst, pLast) \ + CONT_ITER_RANGE(pMultimap, multimapItemIterRange_IMPL, \ + CONT_CHECK_ARG(pMultimap, pFirst), CONT_CHECK_ARG(pMultimap, pLast)) + +#define multimapSubmapIterItems(pMultimap, pSubmap) \ + multimapItemIterRange(pMultimap, \ + &mapFindGEQ(pSubmap, 0)->data, &mapFindLEQ(pSubmap, NV_U64_MAX)->data) + +#define multimapItemIterNext(pIt) \ + multimapItemIterNext_IMPL(&(pIt)->iter) + +#define multimapSubmapIterAll(pMultimap) \ + mapIterAll(&(pMultimap)->type.map) + +#define multimapSubmapIterRange(pMultimap, pFirst, pLast) \ + mapIterRange(&(pMultimap)->type.map, pFirst, pLast) + +#define multimapSubmapIterNext(pIt) \ + mapIterNext(pIt) + +#define multimapItemKey(pMultimap, pValue) \ + multimapValueToNode(&(pMultimap)->real.base, pValue)->submapNode.key + +#define multimapSubmapKey(pMultimap, pSubmap) \ + mapKey(&(pMultimap)->type.map, pSubmap) + +void multimapInit_IMPL(MultimapBase *pBase, PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize, NvS32 nodeOffset, NvU32 submapSize); +void multimapRemoveSubmap_IMPL(MultimapBase *pMultimap, MapBase *submap); +void multimapDestroy_IMPL(MultimapBase *pBase); +void multimapClear_IMPL(MultimapBase *pBase); + +void *multimapInsertSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey); + +void *multimapFindSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey); +void *multimapFindSubmapLEQ_IMPL(MultimapBase *pBase, NvU64 submapKey); +void *multimapFindSubmapGEQ_IMPL(MultimapBase *pBase, NvU64 submapKey); + +void *multimapInsertItemNew_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey); +void *multimapInsertItemValue_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey, void *pValue); + +void *multimapFindItem_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey); + +void multimapRemoveItem_IMPL(MultimapBase *pBase, void *pLeaf); +void multimapRemoveItemByKey_IMPL(MultimapBase *pBase, NvU64 submapKey, + NvU64 itemKey); + +void *multimapNextItem_IMPL(MultimapBase *pBase, void *pValue); +void *multimapPrevItem_IMPL(MultimapBase *pBase, void *pValue); + +void *multimapFirstItem_IMPL(MultimapBase *pBase); +void *multimapLastItem_IMPL(MultimapBase *pBase); + +MultimapIterBase multimapItemIterRange_IMPL(MultimapBase *pBase, + void *pFirst, void *pLast); +NvBool multimapItemIterNext_IMPL(MultimapIterBase *pIt); + +static NV_FORCEINLINE MultimapNode * +multimapValueToNode(MultimapBase *pBase, void *pValue) +{ + if (NULL == pBase || NULL == pValue) return NULL; + + return (MultimapNode *)((NvU8*)pValue + pBase->multimapNodeOffset); +} +static NV_FORCEINLINE void * +multimapNodeToValue(MultimapBase *pBase, MultimapNode *pNode) +{ + if (NULL == pBase || NULL == pNode) return NULL; + + return (NvU8*)pNode - pBase->multimapNodeOffset; +} + +#ifdef __cplusplus +} +#endif + +#endif // _NV_CONTAINERS_MULTIMAP_H_ diff --git a/src/nvidia/inc/libraries/containers/queue.h b/src/nvidia/inc/libraries/containers/queue.h new file mode 100644 index 000000000..e23a30331 --- /dev/null +++ b/src/nvidia/inc/libraries/containers/queue.h @@ -0,0 +1,143 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NV_CONTAINERS_QUEUE_H +#define NV_CONTAINERS_QUEUE_H + +#include "containers/type_safety.h" +#include "nvtypes.h" +#include "nvmisc.h" +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAKE_QUEUE_CIRCULAR(queueTypeName, dataType) \ + typedef struct queueTypeName##Iter_UNUSED \ + { \ + NvLength dummyElem; \ + } queueTypeName##Iter_UNUSED; \ + typedef union queueTypeName \ + { \ + Queue real; \ + CONT_TAG_TYPE(Queue, dataType, queueTypeName##Iter_UNUSED); \ + CONT_TAG_NON_INTRUSIVE(dataType); \ + } queueTypeName + +#define DECLARE_QUEUE_CIRCULAR(queueTypeName) \ + typedef struct queueTypeName##Iter_UNUSED queueTypeName##Iter_UNUSED; \ + typedef union queueTypeName queueTypeName + +struct Queue; +struct QueueContext; + +typedef void QueueCopyData(NvLength msgSize, NvLength opIdx, + struct QueueContext *pCtx, void *pClientData, + NvLength count, NvBool bCopyIn); + +typedef struct QueueContext { + QueueCopyData *pCopyData; // Function performing accesses to queue memory. + void *pData; // Private data. +} QueueContext; + +typedef struct Queue { + NvLength capacity; // Queue Capacity + PORT_MEM_ALLOCATOR *pAllocator; // Set of functions used for managing queue memory + void *pData; // Queue memory, if managed by pAllocator + NvLength msgSize; // Message size produced by Producer + NvLength getIdx NV_ALIGN_BYTES(64);// GET index modified by Consumer + NvLength putIdx NV_ALIGN_BYTES(64);// PUT index modified by Producer +} Queue; + +//for future use (more possible queues - just an example, currently only CIRCULAR will get implemented) +typedef enum +{ + QUEUE_TYPE_CIRCULAR = 1, + //QUEUE_TYPE_LINEAR = 2, + //QUEUE_TYPE_PRIORITY = 3, +}QUEUE_TYPE; + +#define queueInit(pQueue, pAllocator, capacity) \ + circularQueueInit_IMPL(&((pQueue)->real), pAllocator, \ + capacity, sizeof(*(pQueue)->valueSize)) + +#define queueInitNonManaged(pQueue, capacity) \ + circularQueueInitNonManaged_IMPL(&((pQueue)->real), \ + capacity, sizeof(*(pQueue)->valueSize)) + +#define queueDestroy(pQueue) \ + circularQueueDestroy_IMPL(&((pQueue)->real)) + +#define queueCount(pQueue) \ + circularQueueCount_IMPL(&((pQueue)->real)) + +#define queueCapacity(pQueue) \ + circularQueueCapacity_IMPL(&((pQueue)->real)) + +#define queueIsEmpty(pQueue) \ + circularQueueIsEmpty_IMPL(&((pQueue)->real)) + +#define queuePush(pQueue, pElements, numElements) \ + circularQueuePush_IMPL(&(pQueue)->real, \ + CONT_CHECK_ARG(pQueue, pElements), numElements) + +#define queuePushNonManaged(pQueue, pCtx, pElements, numElements) \ + circularQueuePushNonManaged_IMPL(&(pQueue)->real, pCtx, \ + CONT_CHECK_ARG(pQueue, pElements), numElements) + +#define queuePeek(pQueue) \ + CONT_CAST_ELEM(pQueue, circularQueuePeek_IMPL(&((pQueue)->real))) + +#define queuePop(pQueue) \ + circularQueuePop_IMPL(&((pQueue)->real)) + +#define queuePopAndCopy(pQueue, pCopyTo) \ + circularQueuePopAndCopy_IMPL(&((pQueue)->real), \ + CONT_CHECK_ARG(pQueue, pCopyTo)) + +#define queuePopAndCopyNonManaged(pQueue, pCtx, pCopyTo) \ + circularQueuePopAndCopyNonManaged_IMPL(&((pQueue)->real), pCtx, \ + CONT_CHECK_ARG(pQueue, pCopyTo)) + +NV_STATUS circularQueueInit_IMPL(Queue *pQueue, PORT_MEM_ALLOCATOR *pAllocator, + NvLength capacity, NvLength msgSize); +NV_STATUS circularQueueInitNonManaged_IMPL(Queue *pQueue, NvLength capacity, + NvLength msgSize); +void circularQueueDestroy_IMPL(Queue *pQueue); +NvLength circularQueueCapacity_IMPL(Queue *pQueue); +NvLength circularQueueCount_IMPL(Queue *pQueue); +NvBool circularQueueIsEmpty_IMPL(Queue *pQueue); +NvLength circularQueuePush_IMPL(Queue *pQueue, void* pElements, NvLength numElements); +NvLength circularQueuePushNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx, + void* pElements, NvLength numElements); +void* circularQueuePeek_IMPL(Queue *pQueue); +void circularQueuePop_IMPL(Queue *pQueue); +NvBool circularQueuePopAndCopy_IMPL(Queue *pQueue, void *pCopyTo); +NvBool circularQueuePopAndCopyNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx, + void *pCopyTo); +#ifdef __cplusplus +} +#endif + +#endif // NV_CONTAINERS_QUEUE_H diff --git a/src/nvidia/inc/libraries/containers/type_safety.h b/src/nvidia/inc/libraries/containers/type_safety.h new file mode 100644 index 000000000..ad8b9a3bf --- /dev/null +++ b/src/nvidia/inc/libraries/containers/type_safety.h @@ -0,0 +1,254 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_CONTAINERS_TYPE_SAFETY_H_ +#define _NV_CONTAINERS_TYPE_SAFETY_H_ + +#include "nvtypes.h" +#include "nvport/nvport.h" + +// Check for typeof support. For now restricting to GNUC compilers. +#if defined(__GNUC__) +#define NV_TYPEOF_SUPPORTED 1 +#else +#define NV_TYPEOF_SUPPORTED 0 +#endif + +/** + * Tag a non-intrusive container union with the following info: + * valueSize : size of its element type for non-intrusive malloc + * kind : non-intrusive kind ID for static dispatch + */ +#define CONT_TAG_NON_INTRUSIVE(elemType) \ + struct {char _[sizeof(elemType)];} *valueSize; \ + struct {char _[CONT_KIND_NON_INTRUSIVE];} *kind + +/** + * Tag an intrusive container union with the following info: + * nodeOffset : offset of the data structure node within element type + * kind : intrusive kind ID for static dispatch + */ +// FIXME: Do not use this for any structure members with offset 0! +// The size of a 0 length array is undefined according to the C99 standard +// and we've seen non-zero values of sizeof(*nodeOffset) appear at runtime +// leading to corruption. Filed Bug 2858103 to track work against this. +#define CONT_TAG_INTRUSIVE(elemType, node) \ + struct {char _[NV_OFFSETOF(elemType, node)];} *nodeOffset; \ + struct {char _[CONT_KIND_INTRUSIVE];} *kind + + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Utility identity function for several type-safety mechanisms. + */ +static NV_FORCEINLINE void *contId(void *pValue) +{ + return pValue; +} + +#ifdef __cplusplus +} +#endif + +/** + * @def CONT_TAG_ELEM_TYPE + * Tag a container union with element type info. + */ + +/** + * @def CONT_CHECK_ARG + * Check that a value has a container's element type. + */ + +/** + * @def CONT_CAST_ELEM + * Cast a void pointer to a container's element type. + */ + +// With C++ we can use typedef and templates for 100% type safety. +#if defined(__cplusplus) && !defined(NV_CONTAINERS_NO_TEMPLATES) + +#define CONT_TAG_TYPE(contType, elemType, iterType) \ + CONT_VTABLE_TAG(contType, elemType, iterType); \ + typedef contType ContType; \ + typedef elemType ElemType; \ + typedef iterType IterType + +template +typename T::ElemType *CONT_CHECK_ARG(T *pCont, typename T::ElemType *pValue) +{ + return pValue; +} + +template +typename T::ElemType *CONT_CAST_ELEM(T *pCont, void *pValue) +{ + return (typename T::ElemType *)pValue; +} + +template +typename T::IterType CONT_ITER_RANGE +( + T *pCont, + It (*pFunc)(typename T::ContType *, void *, void *), + void *pFirst, + void *pLast +) +{ + typename T::IterType temp; + temp.iter = pFunc(&pCont->real.base, pFirst, pLast); + return temp; +} + +template +typename T::IterType CONT_ITER_RANGE_INDEX +( + T *pCont, + It (*pFunc)(typename T::ContType *, NvU64, NvU64), + NvU64 first, + NvU64 last +) +{ + typename T::IterType temp; + temp.iter = pFunc(&pCont->real.base, first, last); + return temp; +} + +// Without C++ we need more creativity. :) +#else + +// Element tag is a pointer to the element type (no mem overhead in union). +#define CONT_TAG_TYPE(contType, elemType, iterType) \ + CONT_VTABLE_TAG(contType, elemType, iterType); \ + elemType *elem; \ + iterType *iter + +// Argument check uses sizeof to get error message without runtime overhead. +#define CONT_CHECK_ARG(pCont, pValue) \ + (sizeof((pCont)->elem = (pValue)) ? (pValue) : NULL) + +// +// Return checks are more problematic, but typeof is perfect when available. +// Without typeof we resort to a runtime vtable. +// +#if NV_TYPEOF_SUPPORTED + +#define CONT_CAST_ELEM(pCont, ret) ((typeof((pCont)->elem))(ret)) + +// +// The dummy contId prevents compilers from warning about incompatible +// function casts. This is safe since we know the two return structures +// are identical (modulo alpha-conversion). +// +#define CONT_ITER_RANGE(pCont, pFunc, pFirst, pLast) \ + (((typeof(*(pCont)->iter)(*)(void *, void *, void *))contId(pFunc))( \ + pCont, pFirst, pLast)) + +#define CONT_ITER_RANGE_INDEX(pCont, pFunc, first, last) \ + (((typeof(*(pCont)->iter)(*)(void *, NvU64, NvU64))contId(pFunc))( \ + pCont, first, last)) + +#else + +#define CONT_CAST_ELEM(pCont, ret) ((pCont)->vtable->checkRet(ret)) + +#define CONT_ITER_RANGE(pCont, pFunc, pFirst, pLast) \ + ((pCont)->vtable->iterRange(&(pCont)->real.base, pFirst, pLast)) + +#define CONT_ITER_RANGE_RANGE(pCont, pFunc, first, last) \ + ((pCont)->vtable->iterRangeIndex(&(pCont)->real.base, first, last)) + +#endif + +#endif + +#if NV_TYPEOF_SUPPORTED + +#define CONT_VTABLE_DECL(contType, iterType) +#define CONT_VTABLE_DEFN(contType, contIterRange, contIterRangeIndex) +#define CONT_VTABLE_TAG(contType, elemType, iterType) +#define CONT_VTABLE_FIELD(contType) +#define CONT_VTABLE_INIT(contType, pCont) + +#else + +#define CONT_VTABLE_DECL(contType, iterType) \ + typedef struct \ + { \ + void *(*checkRet)(void *pValue); \ + iterType (*iterRange)(contType *pCont, void *pFirst, void *pLast); \ + iterType (*iterRangeIndex)(contType *pCont, NvU64 first, NvU64 last);\ + } contType##_VTABLE; \ + +#define CONT_VTABLE_DEFN(contType, contIterRange, contIterRangeIndex) \ + static const contType##_VTABLE g_##contType##_VTABLE = \ + { \ + contId, \ + contIterRange, \ + contIterRangeIndex, \ + } + +#define CONT_VTABLE_TAG(contType, elemType, iterType) \ + const struct \ + { \ + elemType *(*checkRet)(void *pValue); \ + iterType (*iterRange)(contType *pCont, void *pFirst, void *pLast); \ + iterType (*iterRangeIndex)(contType *pCont, NvU64 first, NvU64 last);\ + } *vtable + +#define CONT_VTABLE_FIELD(contType) const contType##_VTABLE *vtable + +#define CONT_VTABLE_INIT(contType, pCont) \ + ((pCont)->vtable = &g_##contType##_VTABLE) + +#endif + +enum CONT_KIND +{ + CONT_KIND_NON_INTRUSIVE = 1, + CONT_KIND_INTRUSIVE = 2, +}; + +/** + * Static dispatch uses sizeof with dummy arrays to select a path. + * + * With optimizations enabled the unused paths should be trimmed, so this + * should have zero overhead in release builds. +*/ +#define CONT_DISPATCH_ON_KIND(pCont, ret1, ret2, ret3) \ + ((sizeof(*(pCont)->kind) == CONT_KIND_NON_INTRUSIVE) ? (ret1) : \ + (sizeof(*(pCont)->kind) == CONT_KIND_INTRUSIVE) ? (ret2) : \ + (ret3)) + +/** + * Utility stub useful for the above ret3 argument (unreachable path). + * Add stubs for different return types as needed. + */ +static NV_FORCEINLINE void contDispatchVoid_STUB(void) +{ + PORT_BREAKPOINT(); +} + +#endif // _NV_CONTAINERS_TYPE_SAFETY_H_ diff --git a/src/nvidia/inc/libraries/eventbufferproducer.h b/src/nvidia/inc/libraries/eventbufferproducer.h new file mode 100644 index 000000000..3a7412e71 --- /dev/null +++ b/src/nvidia/inc/libraries/eventbufferproducer.h @@ -0,0 +1,177 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_EVENT_BUFFER_PRODUCER_H_ +#define _NV_EVENT_BUFFER_PRODUCER_H_ +#include "nvtypes.h" +#include "class/cl90cd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* +* RECORD_BUFFER_INFO +* This structure holds information for the record buffer, which is a circular +* buffer with fixed sized records +* +* pHeader +* This is a shared header between the producer and consumer. +* It holds the get put pointers and overflow counts. +* +* recordBuffAddr +* This is the producer mapping to the record buffer. +* +* recordSize +* Size of each record in bytes. +* +* totalRecordCount +* Total number of records that this buffer can hold. +* +* bufferSize +* Total size of record buffer in bytes. +* +* notificationThreshold +* This felid specifies the number of records that the buffer can +* still hold before it gets full. +* Consumer is notified when this threshold is met. +* +*/ +typedef struct +{ + NV_EVENT_BUFFER_HEADER* pHeader; + NvP64 recordBuffAddr; + NvU32 recordSize; + NvU32 totalRecordCount; + NvU32 bufferSize; + NvU32 notificationThreshold; +} RECORD_BUFFER_INFO; + +/* +* VARDATA_BUFFER_INFO: +* This structure holds information for the variable length data buffer, +* which is a circular buffer with variable sized data records +* +* vardataBuffAddr +* This is the producer mapping to the vardata buffer. +* +* bufferSize +* Total size of vardata buffer in bytes. +* +* notificationThreshold +* This felid specifies the number of records that the buffer can +* still hold before it gets full. +* Consumer is notified when this threshold is met. +* +* get\put +* These are the get and put offsets for vardata buffer. +* These are not shared with the consumer. +* +* remainingSize +* Size in bytes remaining in the vardata buffer. +*/ +typedef struct +{ + NvP64 vardataBuffAddr; + NvU32 bufferSize; + NvU32 notificationThreshold; + NvU32 get; + NvU32 put; + NvU32 remainingSize; +} VARDATA_BUFFER_INFO; + +/* +* EVENT_BUFFER_PRODUCER_INFO: +* +* recordBuffer +* Record buffer information +* +* vardataBuffer +* Vardata buffer information +* +* notificationHandle +* notification handle used to notify the consumer +* +* isEnabled +* Data is added to the event buffer only if this flag is set +* Controlled by Consumer. +* +* isKeepNewest +* This flag is set if keepNewest mode is selected by the consumer. +*/ +typedef struct +{ + RECORD_BUFFER_INFO recordBuffer; + VARDATA_BUFFER_INFO vardataBuffer; + NvP64 notificationHandle; + NvBool isEnabled; + NvBool isKeepNewest; +} EVENT_BUFFER_PRODUCER_INFO; + +/* +* EVENT_BUFFER_PRODUCER_DATA: +* This structure holds data info to add a record in a buffer +* +* pPayload +* Pointer to the payload that needs to be added in the record buffer +* +* payloadSize +* Size of payload in bytes. +* +* pVardata +* Pointer to data that needs to be added in the vardata buffer +* +* vardataSize +* Size of vardata in bytes. +*/ +typedef struct +{ + NvP64 pPayload; + NvU32 payloadSize; + NvP64 pVardata; + NvU32 vardataSize; +} EVENT_BUFFER_PRODUCER_DATA; + +void eventBufferInitRecordBuffer(EVENT_BUFFER_PRODUCER_INFO *info, NV_EVENT_BUFFER_HEADER* pHeader, + NvP64 recordBuffAddr, NvU32 recordSize, NvU32 recordCount, NvU32 bufferSize, NvU32 notificationThreshold); + +void eventBufferInitVardataBuffer(EVENT_BUFFER_PRODUCER_INFO *info, NvP64 vardataBuffAddr, + NvU32 bufferSize, NvU32 notificationThreshold); + +void eventBufferInitNotificationHandle(EVENT_BUFFER_PRODUCER_INFO *info, NvP64 notificationHandle); +void eventBufferSetEnable(EVENT_BUFFER_PRODUCER_INFO *info, NvBool isEnabled); +void eventBufferSetKeepNewest(EVENT_BUFFER_PRODUCER_INFO *info, NvBool isKeepNewest); +void eventBufferUpdateRecordBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get); +void eventBufferUpdateVardataBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get); +NvU32 eventBufferGetRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO *info); +NvU32 eventBufferGetVardataBufferCount(EVENT_BUFFER_PRODUCER_INFO *info); + +void eventBufferProducerAddEvent(EVENT_BUFFER_PRODUCER_INFO* info, NvU16 eventType, NvU16 eventSubtype, + EVENT_BUFFER_PRODUCER_DATA *pData); + +NvBool eventBufferIsNotifyThresholdMet(EVENT_BUFFER_PRODUCER_INFO* info); + +#ifdef __cplusplus +}; /* extern "C" */ +#endif + +#endif //_NV_EVENT_BUFFER_PRODUCER_H_ diff --git a/src/nvidia/inc/libraries/field_desc.h b/src/nvidia/inc/libraries/field_desc.h new file mode 100644 index 000000000..92b7ed7ab --- /dev/null +++ b/src/nvidia/inc/libraries/field_desc.h @@ -0,0 +1,450 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NV_FIELD_DESC_H +#define NV_FIELD_DESC_H + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * @file + * @brief Simple runtime DRF-macro framework. + * + * Allows HAL definitions at the register/field level to maximize common code. + * Two primitive versions are provided - 32-bit and 64-bit. + */ +#include "nvtypes.h" +#include "nvmisc.h" +#include "compat.h" + +// Forward declarations. +typedef struct NV_FIELD_DESC32 NV_FIELD_DESC32; +typedef struct NV_FIELD_DESC64 NV_FIELD_DESC64; +typedef struct NV_FIELD_ENUM NV_FIELD_ENUM; +typedef struct NV_FIELD_ENUM_ENTRY NV_FIELD_ENUM_ENTRY; +typedef struct NV_FIELD_BOOL NV_FIELD_BOOL; + +/*! + * Descriptor for fields <= 32-bits in length. + * A field is assumed to fit within a 4-byte aligned region. + */ +struct NV_FIELD_DESC32 +{ + /*! + * Positive bitmask of the field, e.g. 0x6ULL for a field 2:1. + */ + NvU32 maskPos; + + /*! + * Negative bitmask of the field, e.g. ~0x6ULL for a field 2:1. + */ + NvU32 maskNeg; + + /*! + * Bit shift, e.g. 4 for a field 31:4. + */ + NvU16 shift; + + /*! + * Offset into the memory in bytes. + * This is needed for regions greater than 4 bytes. + */ + NvU16 offset; +}; + +#define NV_FIELD_DESC64_MAX_DISCONTIG_REGIONS 2 + +/*! + * Descriptor for fields <= 64-bits in length. + * A field is assumed to fit within an 8-byte aligned region of memory. + */ +struct NV_FIELD_DESC64 +{ + NvU32 regionCount; + + struct + { + /*! + * Positive bitmask of the field, e.g. 0x6ULL for a field 2:1. + */ + NvU64 maskPos; + + /*! + * Width of field, e.g. 0x2 for a field 2:1. + */ + NvU32 width; + + /*! + * Bit shift, e.g. 4 for a field 31:4. + */ + NvU32 shift; + + /*! + * Offset into the memory in bytes. + * This is needed for regions greater than 8 bytes. + */ + NvU32 offset; + } regions[NV_FIELD_DESC64_MAX_DISCONTIG_REGIONS]; +}; + +/*! + * Enumeration field value. + */ +struct NV_FIELD_ENUM_ENTRY +{ + NvU8 bValid; //!< Indicates the value is valid (for checking). + NvU8 value; //!< Raw (encoded) value. +}; + +/*! + * Enumeration field descriptor. + */ +struct NV_FIELD_ENUM +{ + NV_FIELD_DESC32 desc; + NvU8 numEntries; + const NV_FIELD_ENUM_ENTRY *entries; +}; + +/*! + * Boolean field descriptor. + */ +struct NV_FIELD_BOOL +{ + NV_FIELD_DESC32 desc; + NvBool bInvert; +}; + +// TODO: Move to nvmisc.h. +#ifndef DRF_OFFSET +#define DRF_OFFSET(drf) (((0?drf) / 32) * 4) +#endif + +#ifndef DRF_OFFSET64 +#define DRF_OFFSET64(drf) (((0?drf) / 64) * 8) +#endif + +// Utility macros to define field formats using HW manuals. +#define INIT_FIELD_DESC32(pField, drf) \ + do { \ + (pField)->maskPos = DRF_SHIFTMASK(drf); \ + (pField)->maskNeg = ~DRF_SHIFTMASK(drf); \ + (pField)->shift = DRF_SHIFT(drf); \ + (pField)->offset = DRF_OFFSET(drf); \ + } while (0) + +#define INIT_FIELD_DESC64(pField, drf) \ + do { \ + (pField)->regionCount = 1; \ + (pField)->regions[0].maskPos = DRF_SHIFTMASK64(drf); \ + (pField)->regions[0].width = (1?drf) - (0?drf) + 1; \ + (pField)->regions[0].shift = DRF_SHIFT64(drf); \ + (pField)->regions[0].offset = DRF_OFFSET64(drf); \ + } while (0) + + +/* + @note: + BEGIN_DISCONTIG_FIELD_DESC64(pField) + DRF_DISCONTIG_FIELD_DESC64(pField, NV_MMU_VER2_PTE_COMPTAGLINE) + DRF_DISCONTIG_FIELD_DESC64(pField, NV_MMU_VER2_PTE_ADDRESS_VID) + END_FIELD_DESC64_DISCONTIGUOUS(pField) + +*/ +#define BEGIN_DISCONTIG_FIELD_DESC64(pField) \ + pField->regionCount = 0; + +#define DRF_DISCONTIG_FIELD_DESC64(pField, drf) \ + do { \ + NV_ASSERT_CHECKED_PRECOMP(pField->regionCount < NV_FIELD_DESC64_MAX_DISCONTIG_REGIONS); \ + (pField)->regions[pField->regionCount].maskPos = DRF_SHIFTMASK64(drf); \ + (pField)->regions[pField->regionCount].width = (1?drf) - (0?drf) + 1; \ + (pField)->regions[pField->regionCount].shift = DRF_SHIFT64(drf); \ + (pField)->regions[pField->regionCount].offset = DRF_OFFSET64(drf); \ + pField->regionCount ++; \ + } while(0); + +#define END_FIELD_DESC64_DISCONTIGUOUS(pField) + + +#define INIT_FIELD_ENUM(pEnum, drf, count, pEntries) \ + do { \ + INIT_FIELD_DESC32(&(pEnum)->desc, drf); \ + (pEnum)->numEntries = count; \ + (pEnum)->entries = pEntries; \ + } while(0) + +#define INIT_FIELD_BOOL(pBool, drf) \ + do { \ + INIT_FIELD_DESC32(&(pBool)->desc, drf); \ + (pBool)->bInvert = (NvBool)!(drf##_TRUE); \ + } while(0) + +static NV_FORCEINLINE void +nvFieldEnumEntryInit(NV_FIELD_ENUM_ENTRY *pEntry, const NvU8 value) +{ + pEntry->bValid = NV_TRUE; + pEntry->value = value; +} + +/*! + * Test whether a 32-bit field descriptor is valid. + */ +static NV_FORCEINLINE NvBool +nvFieldIsValid32(const NV_FIELD_DESC32 *pField) +{ + if (0 != pField->maskPos) + { + NV_ASSERT_CHECKED_PRECOMP(pField->maskPos == ~pField->maskNeg); + NV_ASSERT_CHECKED_PRECOMP(0 != (NVBIT64(pField->shift) & pField->maskPos)); + NV_ASSERT_CHECKED_PRECOMP(0 == (pField->offset & 0x3)); + return NV_TRUE; + } + return NV_FALSE; +} + +/*! + * Test whether a 64-bit field descriptor is valid. + */ +static NV_FORCEINLINE NvBool +nvFieldIsValid64(const NV_FIELD_DESC64 *pField) +{ + NvU32 i; + NvU32 aggregate_mask = 0; + for (i = 0; i < pField->regionCount; i++) { + // Forbid empty registers + if (pField->regions[i].maskPos == 0) + return NV_FALSE; + + // Ensure that fields don't overlap + NV_ASSERT_CHECKED_PRECOMP((pField->regions[i].maskPos & aggregate_mask) == 0); + aggregate_mask |= pField->regions[i].maskPos; + + // Ensure that shift is bottom bit of maskPos + NV_ASSERT_CHECKED_PRECOMP(0 != (NVBIT64(pField->regions[i].shift) & pField->regions[i].maskPos)); + + // Ensure offset is quad-word aligned + NV_ASSERT_CHECKED_PRECOMP(0 == (pField->regions[i].offset & 0x7)); + } + return NV_TRUE; +} + +/*! + * Set a 32-bit field based on its descriptor. + * + * @param[in] pField Field format. + * @param[in] value Value to set within the entry. + * @param[in,out] pMem Existing memory to update of at least length (pField->offset + 4). + */ +static NV_FORCEINLINE void +nvFieldSet32 +( + const NV_FIELD_DESC32 *pField, + const NvU32 value, + NvU8 *pMem +) +{ + NvU32 *pValue = (NvU32*)(pMem + pField->offset); + const NvU32 shifted = value << pField->shift; + + NV_ASSERT_CHECKED_PRECOMP(nvFieldIsValid32(pField)); + NV_ASSERT_CHECKED_PRECOMP((shifted >> pField->shift) == value); + NV_ASSERT_CHECKED_PRECOMP((shifted & pField->maskPos) == shifted); + + *pValue = (*pValue & pField->maskNeg) | shifted; +} + +/*! + * Set a 64-bit field based on its descriptor. + * + * @param[in] pField Field format. + * @param[in] value Value to set within the entry. + * @param[in,out] pMem Existing memory to update of at least length (pField->offset + 8). + */ +static NV_FORCEINLINE void +nvFieldSet64 +( + const NV_FIELD_DESC64 *pField, + NvU64 value, + NvU8 *pMem +) +{ + NvU32 i; + NV_ASSERT_CHECKED_PRECOMP(nvFieldIsValid64(pField)); + + for (i = 0; i < pField->regionCount; i++) + { + // Compute location and mask + NvU64 *pValue = (NvU64*)(pMem + pField->regions[i].offset); + const NvU64 shifted = value << pField->regions[i].shift; + + // Store the portion of the value that fits in this field + *pValue = (*pValue & ~pField->regions[i].maskPos) | + (shifted & pField->regions[i].maskPos); + + // Shift off the bits we just stored + value >>= pField->regions[i].width; + } + + // Ensure value doesn't overflow fiel + NV_ASSERT_CHECKED_PRECOMP(value == 0); +} + +/*! + * Encode and set an enum value based on its descriptor. + * + * @param[in] pEnum Enum format. + * @param[in] value Un-encoded value to set within the entry. + * @param[in,out] pMem Existing memory to update of at least length (pEnum->desc.offset + 4). + */ +static NV_FORCEINLINE void +nvFieldSetEnum +( + const NV_FIELD_ENUM *pEnum, + const NvU32 value, + NvU8 *pMem +) +{ + NV_ASSERT_CHECKED_PRECOMP(value < pEnum->numEntries); + NV_ASSERT_CHECKED_PRECOMP(pEnum->entries[value].bValid); + nvFieldSet32(&pEnum->desc, pEnum->entries[value].value, pMem); +} + +/*! + * Set an boolean field based on its descriptor. + * + * @param[in] pField Boolean field descriptor. + * @param[in] value Truth value. + * @param[in,out] pMem Existing memory to update of at least length (pField->desc.offset + 4). + */ +static NV_FORCEINLINE void +nvFieldSetBool +( + const NV_FIELD_BOOL *pField, + const NvBool value, + NvU8 *pMem +) +{ + nvFieldSet32(&pField->desc, value ^ pField->bInvert, pMem); +} + +/*! + * Get the value of a 32-bit field based on its descriptor. + * + * @param[in] pField Field format. + * @param[in] pMem Memory of at least length (pField->offset + 4). + * + * @returns the extracted value. + */ +static NV_FORCEINLINE NvU32 +nvFieldGet32 +( + const NV_FIELD_DESC32 *pField, + const NvU8 *pMem +) +{ + NV_ASSERT_CHECKED_PRECOMP(nvFieldIsValid32(pField)); + return (*(const NvU32*)(pMem + pField->offset) & pField->maskPos) >> pField->shift; +} + +/*! + * Get the value of a 64-bit field based on its descriptor. + * + * @param[in] pField Field format. + * @param[in] pMem Memory of at least length (pField->offset + 8). + * + * @returns the extracted value. + */ +static NV_FORCEINLINE NvU64 +nvFieldGet64 +( + const NV_FIELD_DESC64 *pField, + const NvU8 *pMem +) +{ + NvU32 i, shift = 0; + NvU64 value = 0; + NV_ASSERT_CHECKED_PRECOMP(nvFieldIsValid64(pField)); + for (i = 0; i < pField->regionCount; i++) + { + NvU64 region_value = (*(const NvU64*)(pMem + pField->regions[i].offset) & + pField->regions[i].maskPos) >> pField->regions[i].shift; + + value |= region_value << shift; + + shift += pField->regions[i].width; + } + return value; +} + +/*! + * Get and decode an enum value based on its descriptor. + * + * @param[in] pEnum Enum format. + * @param[in] pMem Memory of at least length (pEnum->desc.offset + 4). + */ +static NV_FORCEINLINE NvU32 +nvFieldGetEnum +( + const NV_FIELD_ENUM *pEnum, + const NvU8 *pMem +) +{ + const NvU32 encoded = nvFieldGet32(&pEnum->desc, pMem); + NvU32 decoded; + for (decoded = 0; decoded < pEnum->numEntries; ++decoded) + { + if (pEnum->entries[decoded].bValid && + (pEnum->entries[decoded].value == encoded)) + { + return decoded; + } + } + NV_ASSERT_CHECKED_PRECOMP(0); + return 0; +} + +/*! + * Get an boolean field based on its descriptor. + * + * @param[in] pField Boolean field descriptor. + * @param[in] pMem Memory of at least length (pField->desc.offset + 4). + */ +static NV_FORCEINLINE NvBool +nvFieldGetBool +( + const NV_FIELD_BOOL *pField, + const NvU8 *pMem +) +{ + const NvU32 value = nvFieldGet32(&pField->desc, pMem); + NV_ASSERT_CHECKED_PRECOMP(value <= 1); + return (NvBool)(value ^ pField->bInvert); +} + +#ifdef __cplusplus +} +#endif + +#endif // NV_FIELD_DESC_H diff --git a/src/nvidia/inc/libraries/ioaccess/ioaccess.h b/src/nvidia/inc/libraries/ioaccess/ioaccess.h new file mode 100644 index 000000000..42a36295b --- /dev/null +++ b/src/nvidia/inc/libraries/ioaccess/ioaccess.h @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _IO_ACCESS_H_ +#define _IO_ACCESS_H_ + +#include "nvtypes.h" +#include "nvstatus.h" + +typedef struct IO_DEVICE IO_DEVICE, *PIO_DEVICE; +typedef struct IO_APERTURE IO_APERTURE, *PIO_APERTURE; + +typedef NvU8 ReadReg008Fn(PIO_APERTURE a, NvU32 addr); +typedef NvU16 ReadReg016Fn(PIO_APERTURE a, NvU32 addr); +typedef NvU32 ReadReg032Fn(PIO_APERTURE a, NvU32 addr); +typedef void WriteReg008Fn(PIO_APERTURE a, NvU32 addr, NvV8 value); +typedef void WriteReg016Fn(PIO_APERTURE a, NvU32 addr, NvV16 value); +typedef void WriteReg032Fn(PIO_APERTURE a, NvU32 addr, NvV32 value); +typedef NvBool ValidRegFn(PIO_APERTURE a, NvU32 addr); + +#define REG_DRF_SHIFT(drf) ((0?drf) % 32) +#define REG_DRF_MASK(drf) (0xFFFFFFFF>>(31-((1?drf) % 32)+((0?drf) % 32))) +#define REG_DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_DRF_SHIFTMASK(drf) (REG_DRF_MASK(drf)<<(REG_DRF_SHIFT(drf))) +#define REG_DRF_WIDTH(drf) ((1?drf) - (0?drf) + 1) + +#define REG_RD08(ap, addr) (ap)->pDevice->pReadReg008Fn((ap), (addr)) +#define REG_RD16(ap, addr) (ap)->pDevice->pReadReg016Fn((ap), (addr)) +#define REG_RD32(ap, addr) (ap)->pDevice->pReadReg032Fn((ap), (addr)) +#define REG_WR08(ap, addr, val) (ap)->pDevice->pWriteReg008Fn((ap), (addr), (val)) +#define REG_WR16(ap, addr, val) (ap)->pDevice->pWriteReg016Fn((ap), (addr), (val)) +#define REG_WR32(ap, addr, val) (ap)->pDevice->pWriteReg032Fn((ap), (addr), (val)) +#define REG_WR32_UC(ap, addr, val) (ap)->pDevice->pWriteReg032UcFn((ap), (addr), (val)) +#define REG_VALID(ap, addr) (ap)->pDevice->pValidRegFn((ap), (addr)) + +// Get the address of a register given the Aperture and offset. +#define REG_GET_ADDR(ap, offset) ((ap)->baseAddress + (offset)) + +// +// Macros for register I/O +// + +#define REG_FLD_WR_DRF_NUM(ap,d,r,f,n) REG_WR32(ap,NV##d##r,(REG_RD32(ap,NV##d##r)&~(REG_DRF_MASK(NV##d##r##f)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_FLD_TEST_DRF_DEF(ap,d,r,f,c) (REG_RD_DRF(ap,d, r, f) == NV##d##r##f##c) +#define REG_FLD_TEST_DRF_NUM(ap,d,r,f,n) (REG_RD_DRF(ap,d, r, f) == n) +#define REG_FLD_IDX_TEST_DRF_DEF(ap,d,r,f,c,i) (REG_IDX_RD_DRF(ap, d, r, i, f) == NV##d##r##f##c) + +// Read/write a field or entire register of which there are several copies each accessed via an index +#define REG_IDX_WR_DRF_NUM(ap,d,r,i,f,n) REG_WR32(ap,NV ## d ## r(i), REG_DRF_NUM(d,r,f,n)) +#define REG_IDX_WR_DRF_DEF(ap,d,r,i,f,c) REG_WR32(ap,NV ## d ## r(i), REG_DRF_DEF(d,r,f,c)) +#define REG_FLD_IDX_WR_DRF_NUM(ap,d,r,i,f,n) REG_WR32(ap,NV##d##r(i),(REG_RD32(ap,NV##d##r(i))&~(REG_DRF_MASK(NV##d##r##f)<>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) +#define REG_RD_DRF_IDX(ap,d,r,f,i) (((REG_RD32(ap,NV ## d ## r))>>REG_DRF_SHIFT(NV ## d ## r ## f(i)))®_DRF_MASK(NV ## d ## r ## f(i))) +#define REG_IDX_OFFSET_RD_DRF(ap,d,r,i,o,f) (((REG_RD32(ap,NV ## d ## r(i,o)))>>REG_DRF_SHIFT(NV ## d ## r ## f))®_DRF_MASK(NV ## d ## r ## f)) + +struct IO_DEVICE +{ + ReadReg008Fn *pReadReg008Fn; + ReadReg016Fn *pReadReg016Fn; + ReadReg032Fn *pReadReg032Fn; + WriteReg008Fn *pWriteReg008Fn; + WriteReg016Fn *pWriteReg016Fn; + WriteReg032Fn *pWriteReg032Fn; + WriteReg032Fn *pWriteReg032UcFn; + ValidRegFn *pValidRegFn; +}; + +struct IO_APERTURE +{ + PIO_DEVICE pDevice; // Pointer to module specific IO_DEVICE + NvU32 baseAddress; // register base address + NvU32 length; // length of aperture +}; + +//--------------------------------------------------------------------------- +// +// Function prototypes. +// +//--------------------------------------------------------------------------- + +NV_STATUS ioaccessCreateIOAperture +( + IO_APERTURE **ppAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +); + +NV_STATUS ioaccessInitIOAperture +( + IO_APERTURE *pAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +); + +void ioaccessDestroyIOAperture(IO_APERTURE *pAperture); + +#endif // _IO_ACCESS_H_ diff --git a/src/nvidia/inc/libraries/mmu/gmmu_fmt.h b/src/nvidia/inc/libraries/mmu/gmmu_fmt.h new file mode 100644 index 000000000..b26218cb6 --- /dev/null +++ b/src/nvidia/inc/libraries/mmu/gmmu_fmt.h @@ -0,0 +1,694 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_GMMU_FMT_H_ +#define _NV_GMMU_FMT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * @file gmmu_fmt.h + * + * @brief Defines a light abstraction over GPU MMU (GMMU) HW formats. + * + * There are two main components of the abstraction: + * 1. General topology of the format provided by mmu_fmt.h. + * 2. Description of the fields within PDEs and PTEs described by the + * field_desc.h and GMMU_FMT_P*E structs. + * + * The GMMU_FMT structure wraps these compoments together. + * + * The goals of this abstraction are: + * G1. Allow common page table management code to work across a variety + * of GMMU HW formats. + * G2. Provide as much low-level control as if directly using the HW + * manuals. + * G3. As close to DRF-macro efficiency as possible for critical paths. + * An example of a critical path is writing PTE values in a tight loop. + * On the other hand, PDE value writes (some which have more complex + * formats) occur orders of magnitude less frequently, and thus can + * afford more generality. + * + * One design consideration is how to distinguish + * MMU fields which are specific to certain architectures. + * + * The current approach is to describe the union of all fields + * across the supported formats. + * HW that does not support a given field must initialize the descriptor to + * zero (invalid) which will assert in the field setter/getter if used. + * + * While this introduces risk of "kitchen sink" syndrome, this approach was + * taken for the following reasons: + * 1. There are few fundamental feature differences between GMMU formats. + * 2. GMMU formats change relatively infrequently (e.g. rarely per-chip). + */ + +#include "nvtypes.h" +#include "field_desc.h" +#include "mmu_fmt.h" + +// +// Defines needed by PCF programming in PTE V3. +// Index bits are used when callers set flags. The below defines are only used +// for the HW <-> SW translation. +// + +// +// Note: The following PCF patterns have not been verified in HW +// and have been currently added to help overcome issues wrt +// PCF patterns tested in rmtest. +// +// SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD +// SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACE +// SW_MMU_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACE +// SW_MMU_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACE +// SW_MMU_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACE +// SW_MMU_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACE +// SW_MMU_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACE +// SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD +// SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD +// SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACE +// + +// Used by PDE +#define SW_MMU_PDE_PCF_INVALID_ATS_ALLOWED 0x00000201 +#define SW_MMU_PDE_PCF_INVALID_ATS_NOT_ALLOWED 0x00000001 +#define SW_MMU_PDE_PCF_SPARSE_ATS_ALLOWED 0x00000204 +#define SW_MMU_PDE_PCF_SPARSE_ATS_NOT_ALLOWED 0x00000004 + +#define SW_MMU_PDE_PCF_VALID_CACHED_ATS_ALLOWED 0x00000200 +#define SW_MMU_PDE_PCF_VALID_UNCACHED_ATS_ALLOWED 0x00000220 +#define SW_MMU_PDE_PCF_VALID_CACHED_ATS_NOT_ALLOWED 0x00000000 +#define SW_MMU_PDE_PCF_VALID_UNCACHED_ATS_NOT_ALLOWED 0x00000020 + +// Used by PTEs +#define SW_MMU_PTE_PCF_INVALID 0x00000001 +#define SW_MMU_PTE_PCF_NO_VALID_4KB_PAGE 0x00000002 +#define SW_MMU_PTE_PCF_SPARSE 0x00000004 +#define SW_MMU_PTE_PCF_MAPPING_NOWHERE 0x00000008 + +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACD 0x00000000 +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_CACHED_ACE 0x00000010 +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACD 0x00000020 +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_ATOMIC_UNCACHED_ACE 0x00000030 +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_CACHED_ACE 0x00000050 +#define SW_MMU_PTE_PCF_PRIVILEGE_RW_NO_ATOMIC_UNCACHED_ACE 0x00000070 +#define SW_MMU_PTE_PCF_PRIVILEGE_RO_ATOMIC_UNCACHED_ACE 0x000000B0 +#define SW_MMU_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_CACHED_ACE 0x000000D0 +#define SW_MMU_PTE_PCF_PRIVILEGE_RO_NO_ATOMIC_UNCACHED_ACE 0x000000F0 + +#define SW_MMU_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACD 0x00000100 +#define SW_MMU_PTE_PCF_REGULAR_RW_ATOMIC_CACHED_ACE 0x00000110 +#define SW_MMU_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACD 0x00000120 +#define SW_MMU_PTE_PCF_REGULAR_RW_ATOMIC_UNCACHED_ACE 0x00000130 + +#define SW_MMU_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACD 0x00000140 +#define SW_MMU_PTE_PCF_REGULAR_RW_NO_ATOMIC_CACHED_ACE 0x00000150 +#define SW_MMU_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACD 0x00000160 +#define SW_MMU_PTE_PCF_REGULAR_RW_NO_ATOMIC_UNCACHED_ACE 0x00000170 + +#define SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACD 0x00000180 +#define SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_CACHED_ACE 0x00000190 +#define SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACD 0x000001A0 +#define SW_MMU_PTE_PCF_REGULAR_RO_ATOMIC_UNCACHED_ACE 0x000001B0 + +#define SW_MMU_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACD 0x000001C0 +#define SW_MMU_PTE_PCF_REGULAR_RO_NO_ATOMIC_CACHED_ACE 0x000001D0 +#define SW_MMU_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACD 0x000001E0 +#define SW_MMU_PTE_PCF_REGULAR_RO_NO_ATOMIC_UNCACHED_ACE 0x000001F0 + +// +// Defines all toggles in either PTE or PDE PCF space +// Note: please do not change these defines without careful review! +// PTE and PDE defines are allowed to collide as they will not be +// processed in the same code paths anyway. +// +#define SW_MMU_PCF_INVALID_IDX 0 // Used for PDE and PTE +#define SW_MMU_PCF_NV4K_IDX 1 // PTE specific +#define SW_MMU_PCF_SPARSE_IDX 2 // Used for PDE and PTE +#define SW_MMU_PCF_NOMAPPING_IDX 3 // PTE specific + +#define SW_MMU_PCF_ACE_IDX 4 // PTE specific +#define SW_MMU_PCF_UNCACHED_IDX 5 // Used for PDE and PTE +#define SW_MMU_PCF_NOATOMIC_IDX 6 // PTE specific +#define SW_MMU_PCF_RO_IDX 7 // PTE specific + +#define SW_MMU_PCF_REGULAR_IDX 8 // PTE specific +#define SW_MMU_PCF_ATS_ALLOWED_IDX 9 // PDE specific + +// Forward declarations. +typedef union GMMU_ENTRY_VALUE GMMU_ENTRY_VALUE; +typedef struct GMMU_FIELD_APERTURE GMMU_FIELD_APERTURE; +typedef struct GMMU_FIELD_ADDRESS GMMU_FIELD_ADDRESS; +typedef struct GMMU_FMT GMMU_FMT; +typedef struct GMMU_FMT_PDE GMMU_FMT_PDE; +typedef struct GMMU_FMT_PDE_MULTI GMMU_FMT_PDE_MULTI; +typedef struct GMMU_FMT_PTE GMMU_FMT_PTE; +typedef struct GMMU_COMPR_INFO GMMU_COMPR_INFO; + +/*! + * Maximum size in bytes of page directory and table entries across + * the supported formats. + */ +#define GMMU_FMT_MAX_ENTRY_SIZE 16 + +/*! + * Maximum number of page levels across the supported formats. + */ +#define GMMU_FMT_MAX_LEVELS 6 + +/*! + * Default version specifier for API args to indicate no preference. + * This is not a real version number and not part of the + * enumeration array below. + */ +#define GMMU_FMT_VERSION_DEFAULT 0 + +/*! + * 2-level (40b VA) format supported Fermi through Maxwell. + * Still supported in Pascal HW as fallback. + */ +#define GMMU_FMT_VERSION_1 1 + +/*! + * 5-level (49b VA) format supported on Pascal+. + */ +#define GMMU_FMT_VERSION_2 2 + +/*! + * Maximum number of MMU versions supported. + */ +#define GMMU_FMT_MAX_VERSION_COUNT 2 + +/*! + * Array of format version numbers for enumeration utility. + */ +extern const NvU32 g_gmmuFmtVersions[GMMU_FMT_MAX_VERSION_COUNT]; + +/*! + * Maximum number of big page sizes supported by a single GPU. + */ +#define GMMU_FMT_MAX_BIG_PAGE_SIZES 2 + +/*! + * Array of big page shifts for enumeration utility. + */ +extern const NvU32 g_gmmuFmtBigPageShifts[GMMU_FMT_MAX_BIG_PAGE_SIZES]; + +/*! + * Convenience type for declaring generic temporary GMMU entry values. + */ +union GMMU_ENTRY_VALUE +{ + NvU8 v8[GMMU_FMT_MAX_ENTRY_SIZE / 1]; + NvU32 v32[GMMU_FMT_MAX_ENTRY_SIZE / 4]; + NvU64 v64[GMMU_FMT_MAX_ENTRY_SIZE / 8]; +}; + +/*! + * Top-level structure describing a GPU MMU format. + */ +struct GMMU_FMT +{ + NvU32 version; + + /*! + * Root of the page level topology (e.g. the root page directory). + */ + const MMU_FMT_LEVEL *pRoot; + + /*! + * Description of page directory entry fields common + * across page directory levels with a single sub-level. + */ + const GMMU_FMT_PDE *pPde; + + /*! + * Description of page directory entry fields common + * across page directory levels with two sub-levels. + */ + const GMMU_FMT_PDE_MULTI *pPdeMulti; + + /*! + * Description of page table entry fields common + * across all page table levels in the topology. + */ + const GMMU_FMT_PTE *pPte; + + /*! + * Indicates if the MMU HW supports sparse through the + * volatile field of each PDE/PTE. + */ + NvBool bSparseHwSupport; +}; + +/*! + * Physical apertures for the supported GMMU formats. + */ +typedef enum +{ + /*! + * Indicates an invalid aperture. + * @note Only supported for GPU PDEs to distinguish invalid sub-sevels. + */ + GMMU_APERTURE_INVALID, + + /*! + * GPU-local video memory (a.k.a. FB). + * @note Only supported for GPU PDEs and PTEs. + */ + GMMU_APERTURE_VIDEO, + + /*! + * GPU-peer video memory. + * @note Only supported for GPU PTEs. + * @note Peer index must be initialized in the appropriate address field. + */ + GMMU_APERTURE_PEER, + + /*! + * Non-coherent system memory. + * + * (GPU) MMU will NOT maintain coherence with CPU L2 cache. + * + * Higher-level APIs should only allow this when it is known + * the memory is not cacheable by CPU or the coherency is + * managed explicitly (e.g. w/ flushes in SW). + * Also consider that this path is not necessarily faster. + */ + GMMU_APERTURE_SYS_NONCOH, + + /*! + * Coherent system memory. + * + * (GPU) MMU will snoop CPU L2 cache if possible. + * TODO: Wiki link on arch differences. + * + * This is usually the safer choice over NONCOH since it works + * whether the memory is cached by CPU L2 or not. + * On some CPU architectures going through CPU L2 may + * even be faster than the non-coherent path. + */ + GMMU_APERTURE_SYS_COH, + + // Last value. + GMMU_APERTURE__COUNT +} GMMU_APERTURE; + +/*! + * Aperture field descriptor. + */ +struct GMMU_FIELD_APERTURE +{ + NV_FIELD_ENUM _enum; +}; + +#define INIT_FIELD_APERTURE(pAper, drf, _entries) \ + do { \ + INIT_FIELD_ENUM(&(pAper)->_enum, drf, GMMU_APERTURE__COUNT, _entries); \ + } while(0) + +/*! + * Encode and set a GMMU aperture enum value to a HW aperture field. + */ +static NV_FORCEINLINE void +gmmuFieldSetAperture +( + const GMMU_FIELD_APERTURE *pAperture, + const GMMU_APERTURE value, + NvU8 *pMem +) +{ + nvFieldSetEnum(&pAperture->_enum, value, pMem); +} + +/*! + * Get and decode a HW aperture field value to a GMMU aperture enum value. + */ +static NV_FORCEINLINE GMMU_APERTURE +gmmuFieldGetAperture +( + const GMMU_FIELD_APERTURE *pAperture, + const NvU8 *pMem +) +{ + return (GMMU_APERTURE)nvFieldGetEnum(&pAperture->_enum, pMem); +} + +/*! + * Address field descriptor. + */ +struct GMMU_FIELD_ADDRESS +{ + NV_FIELD_DESC64 desc; + NvU32 shift; +}; + +#define INIT_FIELD_ADDRESS(pAddr, drf, _shift) \ + do { \ + INIT_FIELD_DESC64(&(pAddr)->desc, drf); \ + (pAddr)->shift = _shift; \ + } while(0) + +/*! + * Encode (shift) and set a GMMU address field. + */ +static NV_FORCEINLINE void +gmmuFieldSetAddress +( + const GMMU_FIELD_ADDRESS *pField, + const NvU64 address, + NvU8 *pMem +) +{ + NV_ASSERT_CHECKED_PRECOMP(0 == (address & (NVBIT64(pField->shift) - 1))); + nvFieldSet64(&pField->desc, address >> pField->shift, pMem); +} + +/*! + * Get and decode (shift) a GMMU address field. + */ +static NV_FORCEINLINE NvU64 +gmmuFieldGetAddress +( + const GMMU_FIELD_ADDRESS *pField, + const NvU8 *pMem +) +{ + return nvFieldGet64(&pField->desc, pMem) << pField->shift; +} + +/*! + * Page directory entry (PDE) format. + */ +struct GMMU_FMT_PDE +{ + /*! + * Version information is needed to interpret fields differently. + * Should be always the same as version in GMMU_FMT above. + */ + NvU32 version; + + /*! + * Aperture field indicating which physical address space the sublevel resides. + */ + GMMU_FIELD_APERTURE fldAperture; + + /*! + * Physical address field when aperture is system memory. + */ + GMMU_FIELD_ADDRESS fldAddrSysmem; + + /*! + * Physical address field when aperture is video memory. + */ + GMMU_FIELD_ADDRESS fldAddrVidmem; + + /*! + * Physical address field (used by V3 format only). + */ + GMMU_FIELD_ADDRESS fldAddr; + + /*! + * Indicates GPU reads memory on every access to the + * next page directory/table level. + * + * @note This is not the same as caching, and is ignored for some + * apertures on some chips. + * TODO: Wiki link to explain arch differences. + */ + NV_FIELD_BOOL fldVolatile; + + /*! + * PDE_PCF field for V3 format. + */ + NV_FIELD_DESC32 fldPdePcf; +}; + +/*! + * Get the PDE physical address field format for a given aperture. + */ +const GMMU_FIELD_ADDRESS *gmmuFmtPdePhysAddrFld( + const GMMU_FMT_PDE *pPde, + const GMMU_APERTURE aperture); + +/*! + * Multi (e.g. dual) page directory entry format. + */ +struct GMMU_FMT_PDE_MULTI +{ + /*! + * Reciprocal exponent field for partial sub-level size. + * Minimum size of each sub-level is FullLevelSize / (2 ^ sizeRecipExpMax). + */ + NV_FIELD_DESC32 fldSizeRecipExp; + + /*! + * Per-sub-level information. + */ + GMMU_FMT_PDE subLevels[MMU_FMT_MAX_SUB_LEVELS]; +}; + +/*! + * Retrieve the PDE format corresponding to a particular level and sub-level. + * + * @param[in] pFmt MMU format. + * @param[in] pLevel Level format. + * @param[in] subLevel Sub-level index <= MMU_FMT_MAX_SUB_LEVELS. + * + * @returns Sub-level PDE format or NULL if not a page directory level. + */ +const GMMU_FMT_PDE* gmmuFmtGetPde( + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevel, + const NvU32 subLevel); + +/*! + * Page table entry (PTE) format. + */ +struct GMMU_FMT_PTE +{ + /*! + * Version information is needed to interpret fields differently. + * Should be always the same as version in GMMU_FMT above. + */ + NvU32 version; + + /*! + * Field that determines if the PTE is valid. + */ + NV_FIELD_BOOL fldValid; + + /*! + * Aperture field indicating which the physical page resides. + */ + GMMU_FIELD_APERTURE fldAperture; + + /*! + * Physical address field when aperture is system memory. + */ + GMMU_FIELD_ADDRESS fldAddrSysmem; + + /*! + * Physical address field when aperture is video memory. + */ + GMMU_FIELD_ADDRESS fldAddrVidmem; + + /*! + * Physical address field when aperture is peer memory. + */ + GMMU_FIELD_ADDRESS fldAddrPeer; + + /*! + * Peer index field when aperture is peer memory. + */ + NV_FIELD_DESC32 fldPeerIndex; + + /*! + * Indicates GPU reads/writes memory on every access to the page. + * + * @note This is not the same as caching, and is ignored for some + * apertures on some chips. + * TODO: Wiki link to explain arch differences. + */ + NV_FIELD_BOOL fldVolatile; + + /*! + * Indicates to generate a read-only (RO) fault on writes. + * + * @note This does not affect L1 cache access if + * fldWriteDisable is supported. + */ + NV_FIELD_BOOL fldReadOnly; + + /*! + * Indicates to generate a write-only (WO) fault on L1 reads. + * @note Only supported on some GPU architectures. + */ + NV_FIELD_BOOL fldReadDisable; + + /*! + * Indicates to generate a read-only (WO) fault on L1 writes. + * @note Only supported on some GPU architectures. + */ + NV_FIELD_BOOL fldWriteDisable; + + /*! + * Indicates to fault on non-priviledged access. + */ + NV_FIELD_BOOL fldPrivilege; + + /*! + * See HW manuals. + */ + NV_FIELD_BOOL fldEncrypted; + + /*! + * Indicates to lock the PTE in the GPU TLBs, giving precedence over + * unlocked-PTEs. + * TLB invalidate will still evict the PTE. + */ + NV_FIELD_BOOL fldLocked; + + /*! + * TODO: TBD + */ + NV_FIELD_BOOL fldAtomicDisable; + + /*! + * Kind (storage format) field. + */ + NV_FIELD_DESC32 fldKind; + + /*! + * Compression tag field. + */ + NV_FIELD_DESC32 fldCompTagLine; + + /*! + * Compression tag sub-index field. + */ + NV_FIELD_DESC32 fldCompTagSubIndex; + + /*! + * PTE_PCF field for V3 format. + */ + NV_FIELD_DESC32 fldPtePcf; +}; + +/*! + * Determine if an entry is a PTE or PDE based either on its static format or + * dynamic value. + * + * @param[in] pFmt MMU format. + * @param[in] pLevel Level format. + * @param[in] pEntry Entry value of size pLevel->entrySize. + * + * @returns true if the entry is a PTE, false if it is a PDE. + */ +NvBool gmmuFmtEntryIsPte( + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevel, + const NvU8 *pEntry); + +/*! + * Get the PTE physical address field format for a given aperture. + */ +const GMMU_FIELD_ADDRESS *gmmuFmtPtePhysAddrFld( + const GMMU_FMT_PTE *pPte, + const GMMU_APERTURE aperture); + +/*! + * GPU compression attributes for a physical surface. + * + * This info will be returned by RM from HW resource alloc API. + */ +struct GMMU_COMPR_INFO +{ + /*! + * log2 of compression page size. + */ + NvU32 compPageShift; + + /*! + * Compressed kind. + */ + NvU32 compressedKind; + + /*! + * Index of the first compression page relative to the surface. + * e.g. if the entire surface is compressed this is 0. + */ + NvU32 compPageIndexLo; + + /*! + * Index of the last compression page relative to the surface. + * e.g. (compPageIndexHi - compPageIndexLo + 1) is the number of comp + * tag lines used for the surface. + * CompPageIndex is tracked in a (1 << compPageShift) granularity + */ + NvU32 compPageIndexHi; + + /*! + * Starting comptag line to use at compPageIndexLo. + * Comptags are used contiguously up to the maximum + * compTagLineMin + (compPageIndexHi - compPageIndexLo). + */ + NvU32 compTagLineMin; + + /*! + * Granularity of comptagline assignment. + * Used for Verif only, Deprecated from Turing + */ + NvU32 compTagLineMultiplier; +}; + +/*! + * Update a PTE value's compression fields based + * on the legacy compression attributes of the surface being mapped. + * + * @param[in] pFmt MMU format. + * @param[in] pLevel Level format. + * @param[in] pCompr Compression info of the physical surface. + * @param[in] surfOffset Offset in bytes into the physical surface. + * @param[in] startPteIndex Starting pte index for comptagSubIndex calculation. + * @param[in] numPages Number of pages (PTEs) to update. + * @param[in,out] pEntries Array of PTE values to update of length + * numPages * pLevel->entrySize. + */ +void gmmuFmtInitPteCompTags( + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevel, + const GMMU_COMPR_INFO *pCompr, + const NvU64 surfOffset, + const NvU32 startPteIndex, + const NvU32 numPages, + NvU8 *pEntries); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/mmu/mmu_fmt.h b/src/nvidia/inc/libraries/mmu/mmu_fmt.h new file mode 100644 index 000000000..774cc3d28 --- /dev/null +++ b/src/nvidia/inc/libraries/mmu/mmu_fmt.h @@ -0,0 +1,226 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_MMU_FMT_H_ +#define _NV_MMU_FMT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * @file mmu_fmt.h + * + * @brief Defines an abstraction over general MMU HW formats. + * + * The main goal is to leverage common page table management + * code across a variety MMU HW formats. + */ +#include "nvtypes.h" +#include "nvmisc.h" +#include "compat.h" + +// +// Separate files for the types as they're included by CTRL definitions +// +#include "mmu_fmt_types.h" + +/*! + * Get bitmask of page sizes supported under a given MMU level. + * + * Example: For the root level this returns all the page sizes + * supported by the MMU format. + * + * @returns Bitmask of page sizes (sufficient since page sizes are power of 2). + */ +NvU64 mmuFmtAllPageSizes(const MMU_FMT_LEVEL *pLevel); + +/*! + * Get bitmask of the VA coverages for each level, starting at a given level. + * This is a superset of mmuFmtAllPageSizes, but includes page directory coverage bits. + * + * Example: For the root level this provides a summary of the VA breakdown. + * Each bit corresponds to the shift of a level in the format and + * the number bits set is equal to the total number of levels + * (including parallel sub-levels). + * + * @returns Bitmask of level VA coverages. + */ +NvU64 mmuFmtAllLevelCoverages(const MMU_FMT_LEVEL *pLevel); + +/*! + * Find a level with the given page shift. + * + * @param[in] pLevel Level format to start search. + * @param[in] pageShift log2(pageSize). + * + * @returns The level if found or NULL otherwise. + */ +const MMU_FMT_LEVEL *mmuFmtFindLevelWithPageShift( + const MMU_FMT_LEVEL *pLevel, + const NvU64 pageShift); + +/*! + * Find the parent level of a given level. + * + * @param[in] pRoot Root level format. + * @param[in] pLevel Child level format. + * @param[out] pSubLevel Returns the sub-level of the child within the parent if found. + * Can be NULL if not needed. + * + * @returns Parent level if found or NULL otherwise. + */ +const MMU_FMT_LEVEL *mmuFmtFindLevelParent( + const MMU_FMT_LEVEL *pRoot, + const MMU_FMT_LEVEL *pLevel, + NvU32 *pSubLevel); + +/*! + * Get the next sub-level format in a search for a particular level. + * + * @returns Next level if found or NULL otherwise. + */ +const MMU_FMT_LEVEL *mmuFmtGetNextLevel( + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_FMT_LEVEL *pTargetFmt); + +/*! + * Bitmask of VA covered by a given level. + * e.g. for the root level this is the maximum VAS limit. + */ +static NV_FORCEINLINE NvU64 +mmuFmtLevelVirtAddrMask(const MMU_FMT_LEVEL *pLevel) +{ + return NVBIT64(pLevel->virtAddrBitHi + 1) - 1; +} + +/*! + * Bitmask of VA covered by a single entry within a level. + * e.g. (page size - 1) for PTEs within this level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtEntryVirtAddrMask(const MMU_FMT_LEVEL *pLevel) +{ + return NVBIT64(pLevel->virtAddrBitLo) - 1; +} + +/*! + * Bitmask of VA that contains the entry index of a level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtEntryIndexVirtAddrMask(const MMU_FMT_LEVEL *pLevel) +{ + return mmuFmtLevelVirtAddrMask(pLevel) & ~mmuFmtEntryVirtAddrMask(pLevel); +} + +/*! + * Extract the entry index of a level from a virtual address. + */ +static NV_FORCEINLINE NvU32 +mmuFmtVirtAddrToEntryIndex(const MMU_FMT_LEVEL *pLevel, const NvU64 virtAddr) +{ + return (NvU32)((virtAddr & mmuFmtEntryIndexVirtAddrMask(pLevel)) >> pLevel->virtAddrBitLo); +} + +/*! + * Truncate a virtual address to the base of a level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtLevelVirtAddrLo(const MMU_FMT_LEVEL *pLevel, const NvU64 virtAddr) +{ + return virtAddr & ~mmuFmtLevelVirtAddrMask(pLevel); +} + +/*! + * Round a virtual address up to the limit covered by a level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtLevelVirtAddrHi(const MMU_FMT_LEVEL *pLevel, const NvU64 virtAddr) +{ + return mmuFmtLevelVirtAddrLo(pLevel, virtAddr) + mmuFmtLevelVirtAddrMask(pLevel); +} + +/*! + * Get the virtual address base of an entry index from the base virtual + * address of its level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtEntryIndexVirtAddrLo(const MMU_FMT_LEVEL *pLevel, const NvU64 vaLevelBase, + const NvU32 entryIndex) +{ + NV_ASSERT_CHECKED_PRECOMP(0 == (vaLevelBase & mmuFmtLevelVirtAddrMask(pLevel))); + return vaLevelBase + ((NvU64)entryIndex << pLevel->virtAddrBitLo); +} + +/*! + * Get the virtual address limit of an entry index from the base virtual + * address of its level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtEntryIndexVirtAddrHi(const MMU_FMT_LEVEL *pLevel, const NvU64 vaLevelBase, + const NvU32 entryIndex) +{ + return mmuFmtEntryIndexVirtAddrLo(pLevel, vaLevelBase, entryIndex) + + mmuFmtEntryVirtAddrMask(pLevel); +} + +/*! + * Get the page size for PTEs within a given MMU level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtLevelPageSize(const MMU_FMT_LEVEL *pLevel) +{ + return mmuFmtEntryVirtAddrMask(pLevel) + 1; +} + +/*! + * Extract the page offset of a virtual address based on a given MMU level. + */ +static NV_FORCEINLINE NvU64 +mmuFmtVirtAddrPageOffset(const MMU_FMT_LEVEL *pLevel, const NvU64 virtAddr) +{ + return virtAddr & mmuFmtEntryVirtAddrMask(pLevel); +} + +/*! + * Calculate the maximum number of entries contained by a given MMU level. + */ +static NV_FORCEINLINE NvU32 +mmuFmtLevelEntryCount(const MMU_FMT_LEVEL *pLevel) +{ + return NVBIT32(pLevel->virtAddrBitHi - pLevel->virtAddrBitLo + 1); +} + +/*! + * Calculate the maximum size in bytes of a given MMU level. + */ +static NV_FORCEINLINE NvU32 +mmuFmtLevelSize(const MMU_FMT_LEVEL *pLevel) +{ + return mmuFmtLevelEntryCount(pLevel) * pLevel->entrySize; +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/mmu/mmu_walk.h b/src/nvidia/inc/libraries/mmu/mmu_walk.h new file mode 100644 index 000000000..5a1f7400d --- /dev/null +++ b/src/nvidia/inc/libraries/mmu/mmu_walk.h @@ -0,0 +1,862 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_MMU_WALK_H_ +#define _NV_MMU_WALK_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * @file mmu_walk.h + * + * @brief Defines high-level utilities to manage, update, and query general MMU page tables. + * + * The MMU walk library provides a set of routines to manage and modify + * the page directories and tables of an MMU page level hierarchy. + * + * An instance of an MMU level hierarchy may be associated with a + * virtual address space (VAS) at a higher level, but this association + * is not handled by the library. + * + * @par State: + * The library requires SW state for each hierarchy being managed. + * Library users refer to this state through an opaque + * @ref MMU_WALK pointer, initialized by @ref mmuWalkCreate + * and destroyed by @ref mmuWalkDestroy. + * + * @par Locking: + * The library requires exclusive access to its @ref MMU_WALK + * state during each operation. + * It does NOT support concurrent operations on the same + * @ref MMU_WALK state, but each state is independent. + * Callers must also ensure any resources accessed by its + * callbacks are synchronized appropriately. + * + * @par Synchrony: + * While concurrent updates on the same hiearachy are not supported, + * the walker does support asynchronous/buffered updates. + * If the user callbacks support this strategy, it is possible to + * pipeline update operations with VAS access. + * + * @par Flushing: + * All access to page level memory and HW is abstracted by + * user callbacks. Therefore the walker does not enforce + * or schedule flushes or invalidation of caches/TLBs + * required for each operation. It is up to user callbacks to + * handle these appropriately. + * + * The basic operations of the MMU walker are mapping and unmapping + * ranges of VA, provided by @ref mmuWalkMap and @ref mmuWalkUnmap. + * + * These two operations have few restrictions on the allowed state transitions. + * The goal is to provide flexible primitives that enable the user + * to enforce specific (safer) policies above the walker. + * + * For example, it is possible to unmap a range that only partially + * overlaps existing mapped ranges. Similarly, existing mappings may be + * clobbered by new ones or moved from one page size to another. + * + * However, the walker does ensure that state transitions are complete - + * stale mappings of different page size are cleared and + * intermediate states are minimized (though not fully atomic). + * + * The remaining APIs are for special features and/or tuning. + * + * Sparse textures (a.k.a. tiled resources) is supported through + * @ref mmuWalkSparsify and @ref mmuWalkUnsparsify. + * + * Pre-reservation (lock-down) of page level memory for external use + * and/or to force non-lazy page level allocation is supported through + * @ref mmuWalkReserveEntries and @ref mmuWalkReleaseEntries. + * + * External migration of page level memory is supported through + * @ref mmuWalkMigrateLevelInstance. This is needed only to migrate + * page level instances. The target physical memory mapped by the levels + * can be migrated with @ref mmuWalkMap (user must handle the copy part). + */ + +/* ------------------------ Includes --------------------------------------- */ +#include "nvtypes.h" +#include "nvstatus.h" +#include "mmu_fmt.h" + +/* ------------------------ Version --------------------------------------- */ +/*! + * MMU Walk Api version number. + * version 2 added bIgnoreChannelBusy parameter in MmuWalkCBUpdatePdb + * and mmuWalkMigrateLevelInstance. + * @note - Whenever any of this API changes increment this version number. This + * is required to maintain compatibility with external clients. + */ +#define MMU_WALK_API_VERSION 2 + +/* --------------------------- Datatypes ------------------------------------ */ + +/*! + * Opaque library-defined state for a single page level hierarchy backing a VAS. + */ +typedef struct MMU_WALK MMU_WALK; + +/*! + * Opaque user-defined state describing a block of physical memory. + * The library references these as the backing memory for page level instances. + */ +typedef struct MMU_WALK_MEMDESC MMU_WALK_MEMDESC; + +/*! + * Opaque user-defined state passed to all user callbacks. + */ +typedef struct MMU_WALK_USER_CTX MMU_WALK_USER_CTX; + +/*! + * State that a range of MMU page level entries can be filled to. + * @see MmuWalkCBFillEntries. + */ +typedef enum +{ + /*! + * The range is not valid and will generate a fault on access. + */ + MMU_WALK_FILL_INVALID, + + /*! + * Also known as the "zero" state. + * Writes are dropped and reads return zero when the range is accessed. + * + * @note Not all MMUs support this state/feature. + */ + MMU_WALK_FILL_SPARSE, + + /** + * No valid aligned 4K PTE state for a 64K PTE + * 64K big PTE state indicating that there is no valid 4K aligned PTEs + * + * @note This is not supported pre Volta. + */ + MMU_WALK_FILL_NV4K, +} MMU_WALK_FILL_STATE; + +/*! + * See @ref mmuWalkMap. + */ +typedef struct MMU_MAP_TARGET MMU_MAP_TARGET; + +/*! + * See @ref mmuWalkMap. + */ +typedef struct MMU_MAP_ITERATOR MMU_MAP_ITERATOR; + +/*! + * User callback to allocate backing physical memory for a page level instance. + * + * The contents of the memory need not be initialized. + * The walker will initialize entries before use. + * + * The walker calls this lazily when a page level instance is + * required for the operation taking place. + * It is up to user implementation whether to allocate new memory, + * pre-allocate, pool, etc. + * + * It is also up to user to determine the best physical aperture and + * attributes for the memory (e.g. for GPU whether to place in vidmem/sysmem). + * The walker only modifies the memory contents through the remaining + * callbacks below so access is entirely opaque. + * + * This interface has several parameters that are required for + * specialized tuning of particular MMU formats, but for a simple + * user implementation most can be ignored. + * + * @param[in] vaBase First absolute VA covered by this level instance. + * This (+pLevelFmt) uniquely identifies the instance. + * @param[in] vaLimit Last absolute VA covered by this level instance + * required for the current operation. + * This may be used to implement dynamic growth + * for levels that support it (e.g. root page dir). + * @param[in] bTarget Indicates if this level instance is the target + * for the current operation. + * If bTarget is false it is usually not required + * to allocate memory, but it is required to maintain + * parallel partial size sub-levels. + * @param[in,out] ppMemDesc On input, the existing memory descriptor for this instance + * (may be NULL). + * This must NOT be modified or freed during this + * callback. The walker will call @ref MmuWalkCBLevelFree + * when this memory is no longer required. + * On output, new memory descriptor for this instance. + * Leaving the old memory descriptor is allowed if it + * already provides sufficient backing memory for the given VA range. + * If bTarget is true, this MUST be non-NULL on success. + * @param[in,out] pMemSize Old/new memory size in bytes. + * Can be used for dynamic root page directory growth + * or partial-size page tables. + * @param[in,out] pBChanged Indicates if the backing memory behind *ppMemDesc has + * changed (initially NV_FALSE). + * This must be set if either the *ppMemDesc pointer or + * *pMemSize change, but also allows for changes in + * physical aperture or location to be updated properly. + * + * @returns On failure the current walk operation will be aborted + * and the SW state rolled back. + * @returns The walker will only call this function + * prior to page level and HW state modifications. + */ +typedef NV_STATUS +MmuWalkCBLevelAlloc +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaBase, + const NvU64 vaLimit, + const NvBool bTarget, + MMU_WALK_MEMDESC **ppMemDesc, + NvU32 *pMemSize, + NvBool *pBChanged +); + +/*! + * User callback to free backing physical memory of an unused page level instance. + * + * The contents of the memory when freed are undefined. + * It is up to the user to zero the memory if required. + * + * The walker calls this aggressively when a page level instance is no longer + * required (on a commit or discard operation). + * It is up to user implementation whether to free immediately, + * cache for later re-use, etc. + * + * @param[in] vaBase First absolute VA covered by this level instance. + * This (+pLevelFmt) uniquely identifies the instance. + * @param[in] pOldMem Memory descriptor to free. + */ +typedef void +MmuWalkCBLevelFree +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaBase, + MMU_WALK_MEMDESC *pOldMem +); + +/*! + * User callback to initialize the HW root page directory pointer (PDB). + * In this context "PDB" stands for "page directory base (address)." + * + * Example: For GPU MMU this should update the instance blocks + * associated with the VAS. + * + * @returns NV_TRUE if the operation completed. + * @returns NV_FALSE if the operation must be retried later. See @ref mmuWalkContinue. + */ +typedef NvBool +MmuWalkCBUpdatePdb +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pRootFmt, + const MMU_WALK_MEMDESC *pRootMem, + const NvBool bIgnoreChannelBusy +); + +/*! + * User callback to initialize a page directory entry to point to one or more + * sub-levels. + * + * @param[in] pLevelFmt Format of the parent level. + * @param[in] pLevelMem Memory descriptor of the parent level. + * @param[in] entryIndex Index of the PDE being initialized. + * @param[in] pSubLevels Array of sub-level memory descriptors of length + * pLevelFmt->numSubLevels. + * + * @returns NV_TRUE if the operation completed. + * @returns NV_FALSE if the operation must be retried later. See @ref mmuWalkContinue. + */ +typedef NvBool +MmuWalkCBUpdatePde +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndex, + const MMU_WALK_MEMDESC **pSubLevels +); + +/*! + * User callback to fill a range of entries with a constant state. + * + * @param[in] pLevelFmt Format of the level. + * @param[in] pLevelMem Memory descriptor of the level. + * @param[in] entryIndexLo First entry index to initialize. + * @param[in] entryIndexHi Last entry index to initialize. + * @param[in] fillState Constant state to initialize to. + * @param[in,out] pProgress Number of entries filled successfully (0 on input). + * + * @returns If (*pProgress == entryIndexHi - entryIndexLo + 1) then + * the operation completed successfully. + * @returns Otherwise the operation must be retried later for + * the remaining entries. See @ref mmuWalkContinue. + */ +typedef void +MmuWalkCBFillEntries +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + const MMU_WALK_FILL_STATE fillState, + NvU32 *pProgress +); + +/*! + * User callback to copy a range of entries between backing page level memory. + * + * @note This interface is only required if dynamic page level growth is + * supported (e.g. for the root page directory or partial page tables) + * or for page level migration (@ref mmuWalkMigrateLevelInstance). + * + * @param[in] pLevelFmt Format of the level. + * @param[in] pMemSrc Source memory descriptor of the level. + * @param[in] pMemDst Destination memory descriptor of the level. + * @param[in] entryIndexLo First entry index to copy. + * @param[in] entryIndexHi Last entry index to copy. + * @param[in,out] pProgress Number of entries copied successfully (0 on input). + * + * @returns If (*pProgress == entryIndexHi - entryIndexLo + 1) then + * the operation completed successfully. + * @returns Otherwise the operation must be retried later for + * the remaining entries. See @ref mmuWalkContinue. + */ +typedef void +MmuWalkCBCopyEntries +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_WALK_MEMDESC *pMemSrc, + const MMU_WALK_MEMDESC *pMemDst, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + NvU32 *pProgress +); + +/*! + * User callback to copy staging buffer to its final destination. + * + * If NULL is passed as the staging buffer on walker creation, + * this callback is ignored. Otherwise, this callback should + * perform a memcopy from the table located at the staging buffer + * memdesc to its final location in the buffer allocated for the + * actual table (in FB or otherwise). + * + * @param[in] pStagingBuffer Staging buffer PD/PTs are written to + * @param[in] pLevelBuffer Memdesc containing final location for + * PD/PT + * @param[in] entryIndexLo Start index of entries to be copied. + * @param[in] entryIndexHi End index (inclusive) of entries to be + * copied. + * @param[in] tableSize Size of the current level of PD/PT, in + * entries. The offsets into the staging + * buffer are the entry indices taken + * modulo tableSize. + * @param[in] entrySize Size of each entry, in bytes + */ +typedef void +MmuWalkCBWriteBuffer +( + MMU_WALK_USER_CTX *pUserCtx, + MMU_WALK_MEMDESC *pStagingBuffer, + MMU_WALK_MEMDESC *pLevelBuffer, + NvU64 entryIndexLo, + NvU64 entryIndexHi, + NvU64 tableSize, + NvU64 entrySize +); + +/*! + * Bundles user-implemented callback pointers. + */ +typedef struct +{ + MmuWalkCBLevelAlloc *LevelAlloc; + MmuWalkCBLevelFree *LevelFree; + MmuWalkCBUpdatePdb *UpdatePdb; + MmuWalkCBUpdatePde *UpdatePde; + MmuWalkCBFillEntries *FillEntries; + MmuWalkCBCopyEntries *CopyEntries; + MmuWalkCBWriteBuffer *WriteBuffer; +} MMU_WALK_CALLBACKS; + + + +/*! + * Flags that affect walk library behavior. + */ +typedef struct +{ + /*! + * Indicates if the user implementation supports asynchronous/buffered + * updates, such that all callbacks that modify page level and/or HW state + * are buffered (e.g. to be committed by a later DMA/copy). + * + * The primary advantage of asynchronous mode is the potential to pipeline + * updates with other work. + * + * The main drawback of asynchronous mode is the amount of space required for + * the buffers is generally not known ahead of time (bounded, but potentially large). + * The walker library supports splitting a given operation into multiple + * pieces, each piece continuing where it left off until an operation is complete. + * This way the user can use a fixed or limited size buffer. + * + * Synchronous update mode (default) *requires* the callbacks to modify page level + * and HW state immediately. This is usually simpler to implement but + * less efficient. + */ + NvBool bAsynchronous : 1; + /** + * @brief Indicates if ATS is enabled. + * @details Should be setup as: + * gvaspaceIsAtsEnabled(pWalk->pUserCtx->pGVAS) + * Currently, from 8/2016, it is used to enable NV4K (no valid + * 4K PTE) in MMU walker + */ + NvBool bAtsEnabled : 1; + /** + * @brief Indicates if the iterative implementation should be used + * @details In certain situations like running the MMU Tracer or running + * on platforms like PPC, the recursive implementation of the + * MMU Walker consumes too much stack space. Enabling this option + * changes the MMU Walker to use iteration instead of recursion to + * reduce stack usage. + */ + NvBool bUseIterative : 1; +} MMU_WALK_FLAGS; + +/*! + * User callback to map a batch of entries during an @ref mmuWalkMap operation. + * + * A "batch" is a contiguous range of entries within a single page level instance. + * It is the responsibility of the callback to track the current + * page index into the target physical memory (if applicable). + * + * @note The reason this interface enforces batching is to amortize the cost + * of the function pointer (callback) flexibility. + * Some architectures (e.g. ARM) have performance issues with indirect + * function calls, and PTE init loop should be the critical path. + * + * @param[in] entryIndexLo First entry index to map. + * @param[in] entryIndexHi Last entry index to map. + * @param[in,out] pProgress Number of entries mapped successfully (0 on input). + * + * @returns If (*pProgress == entryIndexHi - entryIndexLo + 1) then + * the operation completed successfully. + * @returns Otherwise the operation must be retried later for + * the remaining entries. See @ref mmuWalkContinue. + */ +typedef void +MmuWalkCBMapNextEntries +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_MAP_TARGET *pTarget, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + NvU32 *pProgress +); + +/*! + * Describes the physical memory (target) of an @ref mmuWalkMap operation. + */ +struct MMU_MAP_TARGET +{ + /*! + * Target level format. + */ + const MMU_FMT_LEVEL *pLevelFmt; + + /*! + * User-defined iterator for the physical pages being mapped. + * This may be context sensitive - e.g. it can contain a counter to track + * the current page index. The walker will always call this for consecutive + * increasing page indices across a single map operation. + * + * @note The lifetime of this pointer extends until the operation + * completes. Take care that it is not stack allocated if + * using @ref mmuWalkContinue from a different call stack later. + */ + MMU_MAP_ITERATOR *pIter; + + /*! + * Callback to map the batch of entries. + */ + MmuWalkCBMapNextEntries *MapNextEntries; +}; + +/*----------------------------Public Interface--------------------------------*/ + +/*! + * Create an initial walker library SW state. + * + * @param[in] pRootFmt MMU format of the root page level. + * @param[in] pUserCtx User-defined context passed to callbacks. + * @param[in] pCb User-implemented callback bundle. + * @param[in] flags Flags applying to this walker instance. + * @param[out] ppWalk Returned walker state. + * @param[in] pStagingBuffer Optional memdesc to stage PD/PT writes to. + */ +NV_STATUS +mmuWalkCreate +( + const MMU_FMT_LEVEL *pRootFmt, + MMU_WALK_USER_CTX *pUserCtx, + const MMU_WALK_CALLBACKS *pCb, + const MMU_WALK_FLAGS flags, + MMU_WALK **ppWalk, + MMU_WALK_MEMDESC *pStagingBuffer +); + +/*! + * Destroy a walker library SW state. + * + * This will free all remaining memory referenced by the walker, but it + * is recommended to enforce symmetric operations at a higher level + * to catch/report memory leaks. + */ +void +mmuWalkDestroy +( + MMU_WALK *pWalk +); + +/*! + * Map a range of VA to physical memory at an arbitrary page level. + * + * The VA range must be aligned to the MMU's smallest page size and + * to the largest page size of any previous mapping that overlaps. + * The VA range cannot cross a sparse boundary. + * + * @returns See @ref mmuWalkContinue. + */ +NV_STATUS +mmuWalkMap +( + MMU_WALK *pWalk, + const NvU64 vaLo, + const NvU64 vaHi, + const MMU_MAP_TARGET *pTarget +); + +/*! + * Return a range of VA to its unmapped state (invalid or sparse). + * + * The VA range must be aligned to the MMU's smallest page size and + * to the largest page size of any previous mappings that overlap. + * The VA range cannot cross a sparse boundary. + * + * @returns See @ref mmuWalkContinue. + */ +NV_STATUS +mmuWalkUnmap +( + MMU_WALK *pWalk, + const NvU64 vaLo, + const NvU64 vaHi +); + +/*! + * Set the unmapped state of a VA range to the sparse (zero) state. + * + * The VA range must be aligned to the MMU's smallest page size. + * The previous state of the entire range must be unmapped and non-sparse. + * The last parameter indicates whether the staging buffer and the WriteBuffer + * callback should be used. + * + * @returns See @ref mmuWalkContinue. + */ +NV_STATUS +mmuWalkSparsify +( + MMU_WALK *pWalk, + const NvU64 vaLo, + const NvU64 vaHi, + const NvBool bUseStagingBuffer +); + +/*! + * Return a range of VA to the invalid unmapped state. + * + * The VA range must exactly match a previously sparsified range. + * Any mappings remaining within the range are cleared to invalid. + * + * @returns See @ref mmuWalkContinue. + */ +NV_STATUS +mmuWalkUnsparsify +( + MMU_WALK *pWalk, + const NvU64 vaLo, + const NvU64 vaHi +); + +/*! + * Reserve (lock-down) page level entries for a VA range. + * + * The VA range must be aligned to the target page size. + * The range may not overlap with an existing reserved range for the + * target page level, but reservation state between levels is independent. + * + * @note This does not change the effective state of the VA range. + * It only changes the state of the backing page level memory. + * + * @returns See @ref mmuWalkContinue. + */ +NV_STATUS +mmuWalkReserveEntries +( + MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaLo, + const NvU64 vaHi, + const NvBool bInvalidate +); + +/*! + * Release page level entries previously reserved. + * + * The VA range must exactly match an existing reserved range for + * the target page level. + * + * @note This does not change the effective state of the VA range. + * It only changes the state of the backing page level memory. + * + * @returns See @ref mmuWalkContinue. + */ +NV_STATUS +mmuWalkReleaseEntries +( + MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaLo, + const NvU64 vaHi +); + +/*! + * Commit the page directory entries for a VA range. + * + * Traverse the walker and rewrite the PDEs from the SW state. + * This won't trigger any new PDE allocations or state change. + * + * This call won't affect the PTEs. If needed, support can be added later. + * + * The VA range must be aligned to the MMU's smallest page size and + * to the largest page size of any previous mappings that overlap. + * The VA range cannot cross a sparse boundary. + * + * @returns See @ref mmuWalkContinue. + */ +NV_STATUS +mmuWalkCommitPDEs +( + MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelTarget, + const NvU64 vaLo, + const NvU64 vaHi +); + +/*! + * Switches a page level instance from one memory location to another. + * + * This function commits the PDB to the HW if the level instance being + * migrated happens to be the PDB. + * + * @note This differs from @ref mmuWalkMigrateLevelInstance in that it + * copies/does not copy and updates/does not update parent PDE as + * specified by the caller. + * + * @returns + */ +NV_STATUS +mmuWalkModifyLevelInstance +( + MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaBase, + MMU_WALK_MEMDESC *pNewMem, + const NvU32 memSize, + const NvBool bCopyEntries, + const NvBool bUpdatePde, + const NvBool bIgnoreChannelBusy +); + +/*! + * Migrate a page level instance from one memory location to another. + * + * The VA must be aligned to the base of an instance that has been + * previously allocated by the walker through @ref MmuWalkCBLevelAlloc. + * + * @note This does not change the effective state of any VA range. + * It only changes the state of the backing page level memory. + * It is a wrapper around @ref mmuWalkModifyLevelInstance. + * + * @returns See @ref mmuWalkContinue. + */ +NV_STATUS +mmuWalkMigrateLevelInstance +( + MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaBase, + MMU_WALK_MEMDESC *pNewMem, + const NvU32 memSize, + const NvBool bIgnoreChannelBusy +); + +/*! + * Query a walker SW state for the page level instance memory descriptors + * backing a given virtual address and page size. + * The caller provides an array of memdesc pointers. + */ +NV_STATUS +mmuWalkGetPageLevelInfo +( + const MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 virtAddr, + const MMU_WALK_MEMDESC **ppMemDesc, + NvU32 *pMemSize +); + +/*! + * Force frees all page level instances. We may have to force free page levels + * in case of surprise removal. In the surprise removal case, we may end up + * with many failed unmappings once the GPU is off the bus. This might leave + * some of the MMU_WALK_LEVEL and MMU_WALK_LEVEL_INST objects to be in an + * allocated state. This function just iterates over the level instances at + * each level and force frees everything ignoring any outstanding valid, sparse + * and reserved entries.. + */ +void +mmuWalkLevelInstancesForceFree +( + MMU_WALK *pWalk +); + +/*! + * Continue a walker operation that was previously started. + * + * If a state changing operation on the walker returns + * NV_WARN_MORE_PROCESSING_REQUIRED, the user must call this function + * to continue processing once resources (e.g. pushbuffer space) + * are again available. + * + * Any operation-specific context passed to the walker when the operation + * is started continues to be referenced until the operation completes. + * + * @returns NV_OK if the operation has finished. For asynchronous mode, + * the user must call @ref mmuWalkCommit once the last update + * buffer has committed. + * + * @returns NV_WARN_MORE_PROCESSING_REQUIRED if more processing is + * required. As a pathological case the walker supports a + * 1-entry update buffer, but that is obviously not efficient. + * + * @returns Any other error codes indicate the walker is either + * not in a state that can continue (user bug, ignored) or + * there is an interal bug - either in walker or user + * callbacks. The latter case is a fatal error - there is no + * way for walker to recover from such situations as the + * SW/HW state has potentially lost consistency. + * This would require fully transactional updates + * that would both increase intermediate memory requirements and + * the probability of an internal bug :D. + * The user must decide how to handle this case (either ignore + * and hope for the best or reset/crash the context using + * this state). + */ +NV_STATUS +mmuWalkContinue +( + MMU_WALK *pWalk +); + +/*! + * Commit any pending SW state the walker is tracking and + * free references to unused page level instances. + * + * The user only needs to call this if supporting + * @ref MMU_WALK_FLAGS::bAsynchronous mode. + * Otherwise this will be called automatically once an operation completes. + * + * For buffered updates, the user must call this after the entire + * operation has completed - once @ref mmuWalkContinue returns NV_OK and the final + * update buffer has been committed to memory/HW (only then is it safe + * to free the unused level instances). + */ +void +mmuWalkCommit +( + MMU_WALK *pWalk +); + +/*! + * Get the user context of a walker state. + */ +MMU_WALK_USER_CTX * +mmuWalkGetUserCtx +( + const MMU_WALK *pWalk +); + +/*! + * Set the user context of a walker state. + */ +void +mmuWalkSetUserCtx +( + MMU_WALK *pWalk, + MMU_WALK_USER_CTX *pUserCtx +); + +/*! + * Get the user callbacks of a walker state. + */ +const MMU_WALK_CALLBACKS * +mmuWalkGetCallbacks +( + const MMU_WALK *pWalk +); + +/*! + * Set the user callbacks of a walker state. + */ +void +mmuWalkSetCallbacks +( + MMU_WALK *pWalk, + const MMU_WALK_CALLBACKS *pCb +); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h b/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h new file mode 100644 index 000000000..a236b6ddf --- /dev/null +++ b/src/nvidia/inc/libraries/nvlog/internal/nvlog_printf_internal.h @@ -0,0 +1,149 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017,2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Internal macro definitions for NVLOG_PRINTF + * + * Macro magic example: (Assuming nothing gets compiled out) + * 0) NV_PRINTF(LEVEL_ERROR, "Bla %d %d", arg0, arg1) + * 1) NVLOG_PRINTF(GLOBAL, LEVEL_ERROR, "Bla %d %d", arg0, arg1)) + * - This gets picked up by the parser + * 2) _NVLOG_GET_PRINT + * 3) _NVLOG_GET_PRINT1(NVLOG_, NVLOG_FILEID, __LINE__, PRINT_REL, ___please_include_noprecomp_h___) + * 4) _NVLOG_GET_PRINT2(NVLOG_, 0xaaaaaa, 1024, PRINT_REL, ___please_include_noprecomp_h___) + * 5) NVLOG_0xaaaaaa_1024_PRINT_REL + * 6) NVLOG_PRINT(LEVEL_ERROR, 0xaaaaaa, 0x04001100, arg0, arg1) + * 7) NVLOG_PRINT2(LEVEL_ERROR) (0xaaaaaa, 0x04001100, arg0, arg1) + * 8) NVLOG_PRINT_LEVEL_0x4 (0xaaaaaa, 0x04001100, arg0, arg1) + * 9) nvLog_Printf4 (0xaaaaaa, 0x04001100, arg0, arg1) + * + */ + +// Compile time stubbing out output below NVLOG_LEVEL level +#define _NVLOG_NOTHING(...) ((void)0) + +// +// Use __COUNTER__ if available. If not, we can use __LINE__ since it is also +// monotonically rising. If __COUNTER__ is unavailable, we can't have inline +// functions using NvLog. +// +#if PORT_COMPILER_HAS_COUNTER +#define _NVLOG_COUNTER __COUNTER__ +#else +#define _NVLOG_COUNTER __LINE__ +#endif + +// +// NVLOG_PARSING is defined if the file is being compiled for the parser run +// +#if defined(NVLOG_PARSING) +// +// Since the '@' symbol is not found in C code, using it here makes it trivial +// for the parser code to extract the needed info from preprocessed source. +// +#define _NVLOG_PRINTF2(count, file, line, tag, route, level, format, ...) \ + NVLOG@@@count@@@file@@@line@@@level@@@tag@@@route@@@format@@@__VA_ARGS__@@@ + +#define _NVLOG_PRINTF(tag, route, level, format, ...) \ + _NVLOG_PRINTF2(_NVLOG_COUNTER, __FILE__, __LINE__, tag, route, level, format, __VA_ARGS__) + +#elif !NVLOG_ENABLED +#define _NVLOG_PRINTF _NVLOG_NOTHING + +#else // NVLOG_ENABLED && !defined(NVLOG_PARSING) + +#include "nvlog_inc.h" + +#ifdef NVLOG_STRINGS_ALLOWED +#define NVLOG_STRING(...) __VA_ARGS__ +#else +#define NVLOG_STRING(...) +#endif + +// +// One for every debug level, needed for compile time filtering. +// +typedef NV_STATUS NVLOG_PRINTF_PROTO(NvU32, NvU32, ...); +NVLOG_PRINTF_PROTO nvlogPrint_printf0; +NVLOG_PRINTF_PROTO nvlogPrint_printf1; +NVLOG_PRINTF_PROTO nvlogPrint_printf2; +NVLOG_PRINTF_PROTO nvlogPrint_printf3; +NVLOG_PRINTF_PROTO nvlogPrint_printf4; +NVLOG_PRINTF_PROTO nvlogPrint_printf5; +NVLOG_PRINTF_PROTO nvlogPrint_printf6; + +// This one is used for unknown debug level - It has an extra argument +NV_STATUS nvlogPrint_printf(NvU32 dbgLevel, NvU32 file, NvU32 line, ...); + + +#if NVLOG_LEVEL <= 0x0 +#define NVLOG_PRINT_LEVEL_0x0 nvlogPrint_printf0 +#else +#define NVLOG_PRINT_LEVEL_0x0 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x1 +#define NVLOG_PRINT_LEVEL_0x1 nvlogPrint_printf1 +#else +#define NVLOG_PRINT_LEVEL_0x1 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x2 +#define NVLOG_PRINT_LEVEL_0x2 nvlogPrint_printf2 +#else +#define NVLOG_PRINT_LEVEL_0x2 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x3 +#define NVLOG_PRINT_LEVEL_0x3 nvlogPrint_printf3 +#else +#define NVLOG_PRINT_LEVEL_0x3 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x4 +#define NVLOG_PRINT_LEVEL_0x4 nvlogPrint_printf4 +#else +#define NVLOG_PRINT_LEVEL_0x4 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x5 +#define NVLOG_PRINT_LEVEL_0x5 nvlogPrint_printf5 +#else +#define NVLOG_PRINT_LEVEL_0x5 _NVLOG_NOTHING +#endif +#if NVLOG_LEVEL <= 0x6 +#define NVLOG_PRINT_LEVEL_0x6 nvlogPrint_printf6 +#else +#define NVLOG_PRINT_LEVEL_0x6 _NVLOG_NOTHING +#endif +// For when the level isn't known at compile time +#define NVLOG_PRINT_LEVEL_ NVLOG_PRINT_LEVEL_UNKNOWN +#define NVLOG_PRINT_LEVEL_UNKNOWN nvlogPrint_printf + + +#define NVLOG_PRINT2(dbglvl) NVLOG_PRINT_LEVEL_ ## dbglvl +#define NVLOG_PRINT(level, ...) NVLOG_PRINT2(level)(__VA_ARGS__) + +#define _NVLOG_GET_PRINT2(prefix, x) prefix ##x +#define _NVLOG_GET_PRINT1(prefix, id) _NVLOG_GET_PRINT2(prefix, id) +#define _NVLOG_GET_PRINT _NVLOG_GET_PRINT1(NVLOG_PRINT_ID_, _NVLOG_COUNTER) + +#define _NVLOG_PRINTF(tag, route, level, format, ...) _NVLOG_GET_PRINT + +#endif // NVLOG_ENABLED && !defined(NVLOG_PARSING) diff --git a/src/nvidia/inc/libraries/nvlog/nvlog.h b/src/nvidia/inc/libraries/nvlog/nvlog.h new file mode 100644 index 000000000..00debf749 --- /dev/null +++ b/src/nvidia/inc/libraries/nvlog/nvlog.h @@ -0,0 +1,334 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVLOG_H_ +#define _NVLOG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "nvtypes.h" +#include "nvstatus.h" + +/******************* Common Debug & Trace Defines **************************\ +* * +* Module: NVLOG.H * +* * +\***************************************************************************/ + +// Include common NvLog definitions +#include "nvlog_defs.h" + +// Include printf definitions +#include "nvlog/nvlog_printf.h" + +extern NVLOG_LOGGER NvLogLogger; +extern NVLOG_PRINT_LOGGER NvLogPrintLogger; + +/********************************/ +/***** Exported functions *****/ +/********************************/ + + +/** + * @brief Global NvLog initialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogInit(void *pData); + +/** + * @brief Update the NvLog configuration from the registry + * + */ +void nvlogUpdate(void); + +/** + * @brief Global NvLog deinitialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogDestroy(void); + +/** + * @brief Allocate a new NvLog buffer + * + * @param[in] size Size of the buffer to allocate + * @param[in] flags Buffer flags, uses NVLOG_BUFFER_FLAGS_* DRF's + * @param[in] tag Tag for the new buffer, to identify it in a dump + * @param[out] pBufferHandle Handle of the newly created buffer + * + * @return NV_OK on success + */ +NV_STATUS nvlogAllocBuffer(NvU32 size, NvU32 flags, NvU32 tag, NVLOG_BUFFER_HANDLE *pBufferHandle, ...); + +/** + * @brief Deallocate a buffer with the given handle + * + * @param[in] hBuffer Handle of the buffer to deallocate + */ +void nvlogDeallocBuffer(NVLOG_BUFFER_HANDLE hBuffer); + +/** + * @brief Write to a buffer with the given handle + * + * @param[in] hBuffer Handle of the buffer to write to + * @param[in] pData Pointer to the data to be written + * @param[in] dataSize Size of the data to be written + * + * @return NV_OK on success + */ +NV_STATUS nvlogWriteToBuffer(NVLOG_BUFFER_HANDLE hBuffer, NvU8 *pData, NvU32 dataSize); + +/** + * @brief Extract a chunk of a buffer + * + * @param[in] hBuffer Handle of the buffer to extract + * @param[in] chunkNum Index (0-based) of the chunk to extract + * @param[in,out] pCunkSize In - Size of the chunk to extract + * Out - Size that was actually extracted, can be less + * @param[out] pDest Pointer to the memory the chunk will be copied to + * + * @return NV_OK on success + */ +NV_STATUS nvlogExtractBufferChunk(NVLOG_BUFFER_HANDLE hBuffer, NvU32 chunkNum, NvU32 *pChunkSize, NvU8 *pDest); + +/** + * @brief Get the size of a specified buffer + * + * @param[in] hBuffer Handle of the buffer + * @param[out] pSize Buffer size. + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferSize(NVLOG_BUFFER_HANDLE hBuffer, NvU32 *pSize); + +/** + * @brief Get the tag of a specified buffer. + * + * @param[in] hBuffer Handle of the buffer + * @param[out] pTag Buffer tag. + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferTag(NVLOG_BUFFER_HANDLE hBuffer, NvU32 *pTag); + +/** + * @brief Get flags for a specified buffer. + * Flag fields are defined as NVLOG_BUFFER_FLAGS_* in nvlog_defs.h + * + * @param[in] hBuffer Handle of the buffer + * @param[out] pFlags Buffer flags. + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferFlags(NVLOG_BUFFER_HANDLE hBuffer, NvU32 *pFlags); + +/** + * @brief Pause/resume logging to a specified buffer + * + * @param[in] hBuffer Handle of the buffer + * @param[out] bPause NV_TRUE ??Pause, NV_FALSE ??resume + * + * @return NV_OK on success + */ +NV_STATUS nvlogPauseLoggingToBuffer(NVLOG_BUFFER_HANDLE hBuffer, NvBool bPause); + +/** + * @brief Pause/resume logging to all buffers + * + * @param[out] bPause NV_TRUE ??Pause, NV_FALSE ??resume + * + * @return NV_OK on success + */ +NV_STATUS nvlogPauseAllLogging(NvBool bPause); + +/** + * @brief Get the handle of a buffer with the given tag + * + * @param[in] tag Tag of the buffer requested + * @param[out] pBufferHandle Handle of the buffer + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferHandleFromTag(NvU32 tag, NVLOG_BUFFER_HANDLE *pBufferHandle); + +/** + * @brief Get the handle of a buffer with the given tag + * + * @param[in] tag Tag of the buffer requested + * @param[out] pBufferHandle Handle of the buffer + * + * @return NV_OK on success + */ +NV_STATUS nvlogGetBufferSnapshot(NVLOG_BUFFER_HANDLE hBuffer, NvU8 *pDest, NvU32 destSize); + + +/** + * @brief Dumps all logs into the the kernel print log + * + * @note this will write to the log even if all other prints are disabled, + * including external release builds. The output will be base64 encoded and + * not decodable without the database, and pollute the logs. Use with caution. + * + * The format of the dump will be the same as the OS Crash Log dumps. + */ +void nvlogDumpToKernelLog(NvBool bDumpUnchangedBuffersOnlyOnce); + +// +// The values returned by CheckFilter functions contain up to four buffers. +// These indexes are in the local buffer array (i.e. in NVLOG_PRINT_LOGGER) +// There can be more than 256 total NvLog buffers, but only 256 per subsystem. +// +#define NVLOG_FILTER_BUFFER_NONE 0xFF + +// +// NvLog Print functions +// + +/** + * @brief Check the filtering rules for a given DBG_PRINTF + * + * @param[in] fileId ID (name hash) of the file + * @param[in] line Line number of the print + * @param[in] level Debug level (DBG_LEVEL_*) of the print + * @param[in] module Debug module (DBG_MODULE_*) of the print + * + * @return 32 bits to indicate which of the print buffers to log to. + */ +NvU32 nvlogPrintCheckFilter(NvU32 fileId, NvU16 line, NvU32 level, NvU32 module); + +/** + * @brief Global NvLog Print initialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogPrintInit(void); + +/** + * @brief NvLog Print update function + * + * @return NV_OK on success + */ +NV_STATUS nvlogPrintUpdate(void); + +/** + * @brief Global NvLog Print deinitialization function + * + * @return NV_OK on success + */ +NV_STATUS nvlogPrintDestroy(void); + +/** + * @brief Global NvLog ETW capture state function + * + * @return NV_OK on success + */ +NV_STATUS nvlogETWCaptureState(void); + +// +// Global initialization macros +// +extern volatile NvU32 nvlogInitCount; +#define NVLOG_INIT(pData) \ + do \ + { \ + if (portAtomicIncrementU32(&nvlogInitCount) == 1) \ + { \ + nvlogInit(pData); \ + } \ + } while (0) + +#define NVLOG_UPDATE() \ + do \ + { \ + if (nvlogInitCount == 1) \ + { \ + nvlogUpdate(); \ + } \ + } while (0) + +#define NVLOG_DESTROY() \ + do \ + { \ + if (portAtomicDecrementU32(&nvlogInitCount) == 0) \ + { \ + nvlogDestroy(); \ + } \ + } while (0) + +/********************************/ +/****** NvLog Filtering *******/ +/********************************/ + +// +// Used both by print and regtrace functions. +// + +/** + * @brief Binary search the range array for a given number + * + * @param[in] ranges Range array to search + * @param[in] numRanges Size of the given array + * @param[in] num Number to search for. + * + * @return Number that is found in the given range. + * If no number is found, returns ~0 (0xFFFFFFFF) + */ +NvU32 nvlogFindInRange16Array(NVLOG_RANGE_16 *ranges, NvU32 numRanges, NvU16 num); +/** + * @brief Binary search the range array for a given number + * + * @param[in] ranges Range array to search + * @param[in] numRanges Size of the given array + * @param[in] num Number to search for. + * + * @return Number that is found in the given range. + * If no number is found, returns ~0 (0xFFFFFFFF) + */ +NvU32 nvlogFindInRange32Array(NVLOG_RANGE_32 *ranges, NvU32 numRanges, NvU32 num); + +// Returns the rules for the given fileId-lineNum pair +/** + * @brief Binary search the range array for a given number + * + * @param[in] pFileLineFilter File:line filter to check + * @param[in] fileId ID of the file to search + * @param[in] lineNum Line number to search in the file entry + * + * @return Number that is found for the given file:line. + * If no number is found, returns ~0 (0xFFFFFFFF) + */ +NvU32 nvlogGetFileLineFilterRules(NVLOG_FILELINE_FILTER *pFileLineFilter, NvU32 fileId, NvU16 lineNum); + + +/** + * @brief Dump nvlog to kernel log only if enabled (performs regkey and platform checks) + */ +void nvlogDumpToKernelLogIfEnabled(void); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _NVLOG_H_ diff --git a/src/nvidia/inc/libraries/nvlog/nvlog_printf.h b/src/nvidia/inc/libraries/nvlog/nvlog_printf.h new file mode 100644 index 000000000..aa5fb9bc6 --- /dev/null +++ b/src/nvidia/inc/libraries/nvlog/nvlog_printf.h @@ -0,0 +1,91 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief NvLog call that logs prints. + * + * This is the traditional NvLog component. When enabled, it will also activate + * preprocessing of all source files to detect calls to NVLOG_PRINTF, and + * generate a database to be used for decoding. + * + * This file just defines the macros used by NV_PRINTF and others clients + */ + +#ifndef _NVLOG_PRINTF_H_ +#define _NVLOG_PRINTF_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef NVLOG_ENABLED +/// @brief If zero, most of NvLog will be compiled out +#define NVLOG_ENABLED 0 +#endif + +#ifndef NVLOG_LEVEL +/// @brief Level below which all prints will be compiled out. +#define NVLOG_LEVEL 2 +#endif + +/// @brief Maximum number of arguments to NVLOG_PRINTF +#define NVLOG_MAX_ARGS 20 + +/** + * @brief Log this printf in NvLog internal binary buffers + * + * These calls are picked up by the NvLog parser, and are replaced with custom + * calls from the generated header. See @page nvlog-parser for details. + * + * @param tag - An identifier to help with offline filtering. Doesn't need to + * be defined anywhere. + * @param route - 8bit mask of buffers the print will be routed to. + * Use NVLOG_BUFFER_XXX constants + * @param level - Level at which to print. Calls with level < NVLOG_LEVEL will + * be compiled out. + * @param format - printf-like format string + * @param ... - printf arguments + */ +#define NVLOG_PRINTF(tag, route, level, format, ...) _NVLOG_PRINTF(tag, route, level, format, __VA_ARGS__) + +#define NVLOG_BUFFER_NULL 0x01 +#define NVLOG_BUFFER_RM 0x02 +#define NVLOG_BUFFER_RM_BOOT 0x04 +#define NVLOG_BUFFER_ETW 0x08 +#define NVLOG_BUFFER_KMD_BOOT 0x10 +#define NVLOG_BUFFER_KMD 0x20 +#define NVLOG_BUFFER_ERROR 0x40 +#define NVLOG_BUFFER_DD 0x80 + +#define NVLOG_ROUTE_RM (NVLOG_BUFFER_RM | NVLOG_BUFFER_RM_BOOT | NVLOG_BUFFER_ETW) +#define NVLOG_ROUTE_KMD (NVLOG_BUFFER_KMD | NVLOG_BUFFER_KMD_BOOT | NVLOG_BUFFER_ETW) +#define NVLOG_ROUTE_DD (NVLOG_BUFFER_DD | NVLOG_BUFFER_KMD_BOOT | NVLOG_BUFFER_ETW) + +#include "nvlog/internal/nvlog_printf_internal.h" + +#ifdef __cplusplus +} //extern "C" +#endif + +#endif // _NVLOG_PRINTF_H_ diff --git a/src/nvidia/inc/libraries/nvoc/object.h b/src/nvidia/inc/libraries/nvoc/object.h new file mode 100644 index 000000000..285c00911 --- /dev/null +++ b/src/nvidia/inc/libraries/nvoc/object.h @@ -0,0 +1,126 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#include "g_object_nvoc.h" + +#ifndef _NVOC_OBJECT_H_ +#define _NVOC_OBJECT_H_ + +#include "nvtypes.h" +#include "nvstatus.h" + +#include "nvoc/prelude.h" + +class Object; +struct NVOC_CLASS_INFO; + +/*! + * Tracks the head of an object's child list, and the next object in its + * parent's child list. + */ +struct NVOC_CHILD_TREE +{ + Object *pChild; + Object *pSibling; +}; + +//! The base class of all instantiable NVOC objects. +NVOC_PREFIX(obj) class Object +{ +public: + + //! Runtime ownership tree parent node. + Object *pParent; + + //! Runtime ownership tree child and sibling links. + struct NVOC_CHILD_TREE childTree; + + //! IP Version value. Temporary until NVOC-style HALs are rolled out. + NvU32 ipVersion; + + /*! + * @brief Add pChild as a child of this object. + * + * This method is wrapped by objCreate and typically doesn't need to be + * called directly. + * + * Asserts if pChild is already a child of any object. + */ + void objAddChild(Object *pObj, Object *pChild); + + /*! + * @brief Remove pChild as a child of this object. + * + * This method is wrapped by objDelete and typically doesn't need to be + * called directly. + * + * Asserts if pChild is not a child of this object. + */ + void objRemoveChild(Object *pObj, Object *pChild); + + /*! + * @brief Gets the head of this object's child list from the child tree. + * + * This is a constant-time operation. + */ + Object *objGetChild(Object *pObj); + + /*! + * @brief Gets the next child of this object's parent from the child tree. + * + * This is a constant-time operation. + */ + Object *objGetSibling(Object *pObj); + + /*! + * @brief Gets the direct parent of this object. + * + * This is a constant-time operation. + */ + Object *objGetDirectParent(Object *pObj); +}; + +// +// IP versioning definitions are temporary until NVOC halspec support is +// finished. +// +// IP_VERSION format as defined by the hardware engines. +// A _MAJOR value of 0 means the object has no version number. +// + +#define NV_ODB_IP_VER_DEV 7:0 /* R-IVF */ +#define NV_ODB_IP_VER_ECO 15:8 /* R-IVF */ +#define NV_ODB_IP_VER_MINOR 23:16 /* R-IVF */ +#define NV_ODB_IP_VER_MAJOR 31:24 /* R-IVF */ + +#define IPVersion(pObj) staticCast((pObj), Object)->ipVersion +#define IsIPVersionValid(pObj) (DRF_VAL(_ODB, _IP_VER, _MAJOR, IPVersion(pObj)) != 0) +#define IsIPVersionOrLater(pObj, v0) (IPVersion(pObj) >= (v0)) +// v0 .. v1 inclusive +#define IsIPVersionInRange(pObj, v0, v1) ((IPVersion(pObj) >= (v0)) && (IPVersion(pObj) <= (v1))) + +#endif diff --git a/src/nvidia/inc/libraries/nvoc/prelude.h b/src/nvidia/inc/libraries/nvoc/prelude.h new file mode 100644 index 000000000..1e1b14b4c --- /dev/null +++ b/src/nvidia/inc/libraries/nvoc/prelude.h @@ -0,0 +1,255 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#ifndef _NVOC_PRELUDE_H_ +#define _NVOC_PRELUDE_H_ + +#include "utils/nvmacro.h" + +/* Calls the macro named in the first parameter with the rest of the given arguments. Written + * like this instead of just func(__VA_ARGS__) because some preprocessors treat __VA_ARGS__ + * as a single argument even when it contains commas. */ +#define NVOC_PP_CALL(func, ...) NV_EXPAND(func NV_EXPAND() (__VA_ARGS__)) + +/*! Macro to help specify prefixes on NVOC classes */ +#define NVOC_PREFIX(x) [[nvoc::prefix(x)]] + +/*! Macro to help specify NVOC classes attributes */ +#define NVOC_ATTRIBUTE(str) [[nvoc::classAttributes("\""#str"\"")]] + +/*! Macro to help specify properties on NVOC classes */ +#define NVOC_PROPERTY [[nvoc::property]] + +#ifndef NV_PRINTF_STRINGS_ALLOWED +#if defined(DEBUG) || defined(NV_MODS) || defined(QA_BUILD) +#define NV_PRINTF_STRINGS_ALLOWED 1 +#else +#define NV_PRINTF_STRINGS_ALLOWED 0 +#endif +#endif + +/*! + * @brief Gets a pointer to the NVOC_CLASS_INFO for the named NVOC class. + * + * This is similar to C++'s typeid macro. + */ +#define classInfo(x) reinterpretCast((&__nvoc_class_def_##x), const NVOC_CLASS_INFO *) + +/*! + * @brief Gets a unique integer identifier for the named NVOC class. + * + * This is similar to the hash_code of C++'s std::type_info. + */ +#define classId(x) __nvoc_class_id_##x + + +/*! NVOC class IDs will be no wider than NVOC_CLASS_ID_MAX_WIDTH bits. */ +#define NVOC_CLASS_ID_MAX_WIDTH 24 + +/*! + * @brief Statically casts pObj to a TYPE*. Fails to compile if the cast is invalid. + * + * This is similar to C++'s static_cast(pObj). + */ +#define staticCast(pObj, TYPE) ((pObj)? __staticCast_##TYPE((pObj)) : NULL) + +/*! + * @brief Statically casts pObj to a TYPE*. Fails to compile if the cast is invalid. + * + * This version staticCast() skips pointer check as a trade of better binary size and + * runtime efficiency. The caller is responsible to ensure pObj can never be NULL. + */ +#define staticCastNoPtrCheck(pObj, TYPE) __staticCast_##TYPE((pObj)) + +/*! + * @brief Dynamically casts pObj to a TYPE*. Returns NULL if the cast is invalid. + * + * This is similar to C++'s dynamic_cast(pObj). + */ +#define dynamicCast(pObj, TYPE) (__dynamicCast_##TYPE((pObj))) + +/*! + * @brief Reinterpret e as if it had type T. + * + * This is similar to C++'s reinterpret_cast(e). + */ +#define reinterpretCast(e, T) ((T)(e)) + +/*! + * NVOC_OBJ_CREATE_FLAGS* are used with objCreateWithFlags()/objCreateDynamicWithFlags(). + * + * NVOC_OBJ_CREATE_FLAGS_NONE + * Default behavior + * NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY + * Use halspec from parent without adding the new created object the child tree + */ +#define NVOC_OBJ_CREATE_FLAGS_NONE 0x0000u +#define NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY 0x0001u + +/*! + * @brief Create and construct a new object by class name. + * + * @param[out] ppNewObj A pointer to the new object + * @param[in] pParent A pointer to the object that should be the new object's parent, + * or NULL if the new object has no parent. + * @param[in] NAME The name of the class of the new object. + */ +/* MSVC suppresses trailing commas at the final expansion but not at intermediate expansions, so we + * need to put our trailing comma inside another macro so it will be eaten. Normally, one would + * just wrap the trailing comma and __VA_ARGS__ in NV_EXPAND, but Bullseye's preprocessor has + * trouble dealing with that properly, so instead we use an indirect macro caller that Bullseye + * seems to handle properly. This avoids producing a "too many arguments for macro" warning (C4002). */ +#define objCreate(ppNewObj, pParent, NAME, ...) \ + NVOC_PP_CALL(__objCreate_##NAME, (ppNewObj), (pParent), (NVOC_OBJ_CREATE_FLAGS_NONE), ##__VA_ARGS__) +#define objCreateWithFlags(ppNewObj, pParent, NAME, flags, ...) \ + NVOC_PP_CALL(__objCreate_##NAME, (ppNewObj), (pParent), (flags), ##__VA_ARGS__) + +/*! + * @brief Destruct and free an object and all of its children recursively. + * + * In C++, fields are destructed in reverse syntactic order. Similarly, in + * NVOC, runtime children are deleted in the reverse of the order they were + * added (usually reverse creation order). + */ +#define objDelete(pObj) __nvoc_objDelete(staticCast((pObj), Dynamic)) + +/*! + * @brief Get the given object's class ID + */ +#define objGetClassId(pObj) __nvoc_objGetClassId(staticCast((pObj), Dynamic)) + +/*! + * @brief Get the given object's class info + */ +#define objGetClassInfo(pObj) __nvoc_objGetClassInfo(staticCast((pObj), Dynamic)) + +#if NV_PRINTF_STRINGS_ALLOWED +/*! + * Get the given class's name from its class info. + */ +#define objGetClassName(pObj) (objGetClassInfo((pObj))->name) +#endif + +/*! + * @brief Create and construct a new object by class ID. + * + * @param[out] ppNewObj A pointer to the new object + * @param[in] pParent A pointer to the object that should be the new object's parent, + * or NULL if the new object has no parent. + * @param[in] pClassInfo A pointer to the NVOC_CLASS_INFO for the desired class. + */ +#define objCreateDynamic(ppNewObj, pParent, pClassInfo, ...) \ + __nvoc_objCreateDynamic((ppNewObj), staticCast((pParent), Dynamic), \ + (pClassInfo), (NVOC_OBJ_CREATE_FLAGS_NONE), ##__VA_ARGS__) +#define objCreateDynamicWithFlags(ppNewObj, pParent, pClassInfo, flags, ...) \ + __nvoc_objCreateDynamic((ppNewObj), staticCast((pParent), Dynamic), \ + (pClassInfo), (flags), ##__VA_ARGS__) + +/*! + * @brief Cast any object supporting Run-Time Type Information (RTTI) to 'Dynamic'. + * + * Since '__nvoc_rtti' is always first, pObj == &(pObj)->__nvoc_rtti + * The purpose of this expression is to force a compile-time error if + * pObj does not contain RTTI information + */ +#define __staticCast_Dynamic(pObj) ((Dynamic*) &(pObj)->__nvoc_rtti) + + +/* + * Helper macros for "pObject->getProperty(pObject, prop)" + * + * The NVOC property macros are currently based on IOM's property macros. + * + * Property inheritance for IOM (Improved Object Model) is done by introducing + * 'prop##_BASE_CAST' and 'prop##_BASE_NAME'. For IOM, those are defined in + * generated file g_odb.h. For NVOC, they are defined in each class's generated + * header. + * + * In non-inheritance cases, getProperty/setProperty functions are equal to: + * #define getProperty(pObj, prop) prop // or pdb.prop for IOM + * #define setProperty(pObj, prop, val) prop = val // or pdb.prop = val for IOM + * + * Once the IOM model is phased out, these will become: + * #define getProperty(pObj, prop) pObj->prop + * #define setProperty(pObj, prop, val) pObj->prop = val + */ +#define getProperty(pObj, prop) prop##_BASE_CAST prop##_BASE_NAME +#define setProperty(pObj, prop, val) prop##_BASE_CAST prop##_BASE_NAME = val + +/*! Special NULL pointer for macros that expect to staticCast their parameter */ +#define NVOC_NULL_OBJECT ((Object*) NULL) + + +/*! + * @brief Wrapper of the Run-Time Type Information (RTTI) pointer. + * + * @details In effect, this is the base class (not Object) for all classes + * that support RTTI because the RTTI pointer is always first in memory, + */ +typedef struct { + const struct NVOC_RTTI *__nvoc_rtti; +} Dynamic; + +typedef NvU32 NVOC_CLASS_ID; + +typedef struct NVOC_RTTI_PROVIDER { + NvU32 dummy; +} NVOC_RTTI_PROVIDER; + +typedef const NVOC_RTTI_PROVIDER *NVOC_RTTI_PROVIDER_ID; + +//! Public metadata about an NVOC class definition. +typedef struct NVOC_CLASS_INFO +{ + const NvU32 size; + const NVOC_CLASS_ID classId; + const NVOC_RTTI_PROVIDER_ID providerId; +#if NV_PRINTF_STRINGS_ALLOWED + const char *name; +#endif +} NVOC_CLASS_INFO; + +/*! + * @brief Wrapper of private field and private function + */ +#if defined(__clang__) // clang +#define NVOC_PRIVATE_FIELD(x) __attribute__((unavailable(#x " is a private field"))) x +#define NVOC_PRIVATE_FUNCTION(x) __attribute__((unavailable(#x " is a private function"))) x +#elif defined(__INTEL_COMPILER) // icc +#pragma warning(error: 1786) // treat deprecated as error (globally affected) +#define NVOC_PRIVATE_FIELD(x) __attribute__((deprecated(#x " is a private field"))) x +#define NVOC_PRIVATE_FUNCTION(x) __attribute__((deprecated(#x " is a private function"))) x +#elif defined(__GNUC__) || defined(__GNUG__) // gcc +#pragma GCC diagnostic error "-Wdeprecated-declarations" // treat deprecated as error (globally affected) +#define NVOC_PRIVATE_FIELD(x) __attribute__((deprecated(#x " is a private field"))) x +#define NVOC_PRIVATE_FUNCTION(x) __attribute__((error(#x " is a private function"))) x +#else // other +#define NVOC_PRIVATE_FIELD(x) x##_PRIVATE +#define NVOC_PRIVATE_FUNCTION(x) x##_PRIVATE +#endif + +#endif diff --git a/src/nvidia/inc/libraries/nvoc/rtti.h b/src/nvidia/inc/libraries/nvoc/rtti.h new file mode 100644 index 000000000..37cca27d0 --- /dev/null +++ b/src/nvidia/inc/libraries/nvoc/rtti.h @@ -0,0 +1,77 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#ifndef _NVOC_RTTI_H_ +#define _NVOC_RTTI_H_ + +#include "nvtypes.h" +#include "nvoc/runtime.h" +#include "nvport/inline/util_valist.h" + +typedef NV_STATUS (*NVOC_DYNAMIC_OBJ_CREATE)(Dynamic**, Dynamic *pParent, NvU32 createFlags, va_list); +typedef void (*NVOC_DYNAMIC_DTOR)(Dynamic*); + +// struct NVOC_CLASS_METADATA +// { +// // NvBool isMixedMode; +// // NvS32 ring; +// // const struct NVOC_EXPORTS *const pExportedClasses; +// }; + +// MSVC warning C4200 on "NVOC_CASTINFO::relatives": zero-sized array in struct/union +// Ignore the warning on VS2013+ +//! List of valid casts, needed for dynamicCast. +struct NVOC_CASTINFO +{ + const NvU32 numRelatives; + const struct NVOC_RTTI *const relatives[]; +}; + + + +//! Items unique to each NVOC class definition. Used to identify NVOC classes. +struct NVOC_CLASS_DEF { + const NVOC_CLASS_INFO classInfo; // public, defined in runtime.h; contains classId, size, and name + const NVOC_DYNAMIC_OBJ_CREATE objCreatefn; + const struct NVOC_CASTINFO *const pCastInfo; + const struct NVOC_EXPORT_INFO* const pExportInfo; +}; + +//! Items used to identify base class subobjects. +struct NVOC_RTTI // one per derived-ancestor relationship pair (and every derived class is also its own ancestor) +{ + const struct NVOC_CLASS_DEF *const pClassDef; // drives dynamicCast and objCreateDynamic, one per class + const NVOC_DYNAMIC_DTOR dtor; // __nvoc_destructFromBase for base substructures, real destructor for derived + const NvU32 offset; // 0 for derived +}; + + +void __nvoc_initRtti(Dynamic *pNewObject, const struct NVOC_CLASS_DEF *pClassDef); + + + +#endif diff --git a/src/nvidia/inc/libraries/nvoc/runtime.h b/src/nvidia/inc/libraries/nvoc/runtime.h new file mode 100644 index 000000000..410493a89 --- /dev/null +++ b/src/nvidia/inc/libraries/nvoc/runtime.h @@ -0,0 +1,115 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * This file is part of the NVOC runtime. + */ + +#ifndef _NVOC_RUNTIME_H_ +#define _NVOC_RUNTIME_H_ + +#include "nvport/nvport.h" +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" + +#include "nvoc/object.h" + +#ifdef __cplusplus +extern "C" { +#endif + +NVOC_CLASS_ID __nvoc_objGetClassId(Dynamic *pObj); +const NVOC_CLASS_INFO *__nvoc_objGetClassInfo(Dynamic *pObj); + +void __nvoc_objDelete(Dynamic *pDynamic); + +NV_STATUS __nvoc_objCreateDynamic( + Dynamic **pNewObject, + Dynamic *pParent, + const NVOC_CLASS_INFO *pClassInfo, + NvU32 createFlags, + ...); + +Dynamic *__nvoc_dynamicCast(Dynamic *pFromObj, const NVOC_CLASS_INFO *pClassInfo); +Dynamic *__nvoc_dynamicCastById(Dynamic *pFromObj, NVOC_CLASS_ID classId); + +void __nvoc_destructFromBase(Dynamic *pDynamic); + +Dynamic *fullyDeriveWrapper(Dynamic *pDynamic); + +extern const NVOC_RTTI_PROVIDER __nvoc_rtti_provider; + +#define objFindAncestor(pObj, classId) objFindAncestor_IMPL(staticCast((pObj), Dynamic), classId) +#define objDynamicCastById(pObj, classId) objDynamicCastById_IMPL(staticCast((pObj), Dynamic), classId) +#define objFindAncestorOfType(TYPE, pObj) dynamicCast(objFindAncestor((pObj), classId(TYPE)), TYPE) +#define __nvoc_fullyDerive(pObj) __nvoc_fullyDerive_IMPL(staticCast((pObj), Dynamic)) +#define objFullyDerive(pObj) fullyDeriveWrapper(staticCast((pObj), Dynamic)) +#define objGetExportedMethodDef(pObj, methodId) objGetExportedMethodDef_IMPL(pObj, methodId) + +//! Contains data needed to call the exported method from kernel +struct NVOC_EXPORTED_METHOD_DEF +{ + void (*pFunc) (void); // Pointer to the method itself + NvU32 flags; // Export flags used for permission, method attribute verification (eg. NO_LOCK, PRIVILEGED...) + NvU32 accessRight; // Access rights required for this method + NvU32 methodId; // Id of the method in the class. Used for method identification. + NvU32 paramSize; // Size of the parameter structure that the method takes as the argument (0 if it takes no arguments) + const NVOC_CLASS_INFO* pClassInfo; // Class info for the parent class of the method + +#if NV_PRINTF_STRINGS_ALLOWED + const char *func; // Debug info +#endif +}; + +struct NVOC_EXPORT_INFO { + NvU32 numEntries; // Number of entries + const struct NVOC_EXPORTED_METHOD_DEF *pExportEntries; //An array of exported methods +}; + +/*! + * @brief Finds the closest ancestor of this object with the given class ID. + * + * This is a linear-time operation. + */ +Dynamic *objFindAncestor_IMPL(Dynamic *pDynamic, NVOC_CLASS_ID classId); + +/*! + * @brief Finds the exported method with the given method ID. + * + * If the method isn't found in the derived class, we search the ancestors. + * Returns NULL if the search is unsuccessful. + * This is a linear-time operation. + */ +const struct NVOC_EXPORTED_METHOD_DEF* objGetExportedMethodDef_IMPL(Dynamic* pObj, NvU32 methodId); + +/*! + * @brief Dynamic cast by class id + */ +Dynamic *objDynamicCastById_IMPL(Dynamic *pFromObj, NVOC_CLASS_ID classId); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif diff --git a/src/nvidia/inc/libraries/nvoc/utility.h b/src/nvidia/inc/libraries/nvoc/utility.h new file mode 100644 index 000000000..9adb2ac6e --- /dev/null +++ b/src/nvidia/inc/libraries/nvoc/utility.h @@ -0,0 +1,28 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVOC_UTILITY_H_ +#define _NVOC_UTILITY_H_ + + +#endif // _NVOC_UTILITY_H_ diff --git a/src/nvidia/inc/libraries/nvport/atomic.h b/src/nvidia/inc/libraries/nvport/atomic.h new file mode 100644 index 000000000..8922b730c --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/atomic.h @@ -0,0 +1,418 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Atomic module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_ATOMIC_H_ +#define _NVPORT_ATOMIC_H_ +/** + * @defgroup NVPORT_ATOMIC Atomic operations + * + * @brief This module contains atomic operations + * + * @note that mixing atomic and non-atomic modifications to the same memory + * location can have undefined behavior that varies from platform to platform. + * You are better off not trying it. + * + * @note All atomic operations actually impose at least a compiler memory + * barrier - either just on the variable manipulated, or on all globally + * accessible variables. This is just a consequence of the current + * implementations, and should not be relied on. If you need a memory barrier, + * use @ref portAtomicMemFenceFull. + * + * @{ + */ + +/** See @ref PORT_UTIL_INLINE */ +#ifndef PORT_ATOMIC_INLINE +#if PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) +#define PORT_ATOMIC_INLINE PORT_INLINE +#if NVCPU_IS_64_BITS +#define PORT_ATOMIC64_INLINE PORT_INLINE +#else +#define PORT_ATOMIC64_INLINE +#endif +#else +#define PORT_ATOMIC_INLINE +#define PORT_ATOMIC64_INLINE +#endif +#endif + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Atomic addition on a signed 32b integer + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal += val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicAddS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicAddS32 +PORT_ATOMIC_INLINE NvU32 portAtomicAddU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic subtraction on a signed 32b integer + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal -= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicSubS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicSubS32 +PORT_ATOMIC_INLINE NvU32 portAtomicSubU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic set a signed 32b integer to the specified value + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal = val; + * ~~~ + * + * Once complete `val` will be visible in the location pointed to by `pVal` by all + * threads on all processors. + * + * @note On some platforms this operation is different from other atomic + * operations with respect to memory ordering. The best that can be guaranteed + * for this operation that it will behave as an acquire barrier. This + * means that operations occurring after this one in program order are + * guaranteed to not occur until the atomic operation is complete. It also + * means that it does not guarantee that previous stores are visible, or that + * previous loads have been satisfied. + * + */ +PORT_ATOMIC_INLINE void portAtomicSetS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicSetS32 +PORT_ATOMIC_INLINE void portAtomicSetU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic compare-and-swap on a signed 32b integer. + * + * A compare and swap is an atomic operation that reads a memory location, + * compares it to `oldVal` and if they are equal sets the memory location to + * `newVal`. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * if (*pVal == oldVal) + * { + * *pVal = newVal; + * return NV_TRUE; + * } + * return NV_FALSE; + * ~~~ + * + * @return NV_TRUE if the operation modified the target of `pVal`, NV_FALSE otherwise + * + */ +PORT_ATOMIC_INLINE NvBool portAtomicCompareAndSwapS32(volatile NvS32 *pVal, NvS32 newVal, NvS32 oldVal); +/// @brief Unsigned version of @ref portAtomicCompareAndSwapS32 +PORT_ATOMIC_INLINE NvBool portAtomicCompareAndSwapU32(volatile NvU32 *pVal, NvU32 newVal, NvU32 oldVal); + +/** + * @brief Atomic increments of a signed 32b integer. + * + * Adds one to the memory location pointed to by the parameter and returns the + * resulting value. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * ++(*pVal); + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + * + */ +PORT_ATOMIC_INLINE NvS32 portAtomicIncrementS32(volatile NvS32 *pVal); +/// @brief Unsigned version of @ref portAtomicIncrementS32 +PORT_ATOMIC_INLINE NvU32 portAtomicIncrementU32(volatile NvU32 *pVal); + +/** + * @brief Atomic decrements of a signed 32b integer. + * + * Subtracts one to the memory location pointed to by the parameter and returns + * the resulting value. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * --(*pVal); + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicDecrementS32(volatile NvS32 *pVal); +/// @brief Unsigned version of @ref portAtomicDecrementS32 +PORT_ATOMIC_INLINE NvU32 portAtomicDecrementU32(volatile NvU32 *pVal); + + +/** + * @brief Atomic bitwise XOR on a signed 32b integer. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal ^= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicXorS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicXorS32 +PORT_ATOMIC_INLINE NvU32 portAtomicXorU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic bitwise OR on a signed 32b integer. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal |= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicOrS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicOrS32 +PORT_ATOMIC_INLINE NvU32 portAtomicOrU32(volatile NvU32 *pVal, NvU32 val); + +/** + * @brief Atomic bitwise AND on a signed 32b integer. + * + * This function is the atomic equivalent to: + * + * ~~~{.c} + * *pVal &= val; + * return *pVal; + * ~~~ + * + * @return the new value of `*pVal` + */ +PORT_ATOMIC_INLINE NvS32 portAtomicAndS32(volatile NvS32 *pVal, NvS32 val); +/// @brief Unsigned version of @ref portAtomicAndS32 +PORT_ATOMIC_INLINE NvU32 portAtomicAndU32(volatile NvU32 *pVal, NvU32 val); + + + +/** + * @name Memory Barrier functions + * @note Memory fence functions must be marked inline, so the compiler knows + * about the barrier and doesn't reorder instructions around the call. Thus, + * this is PORT_INLINE and not PORT_ATOMIC_INLINE. (Force inline not necessary) + * + * @note A given platform is allowed to implement the load/store barriers as + * full barriers instead, if the former isn't supported. Thus, you should only + * use @ref portAtomicMemoryFenceLoad and @ref portAtomicMemoryFenceStore for + * possible performance bonus over @ref portAtomicMemoryFenceFull. Don't write + * code that relies on those being load/store only barriers. + * + * @{ + */ + +/** + * @brief Creates a full HW and compiler memory barrier. + * + * A memory fence (memory barrier) imposes a sequential ordering on access to + * all globally accessible variables. That means that all accesses found before + * the fence will finish before any of those after it. + */ +PORT_INLINE void portAtomicMemoryFenceFull(void); +/** + * @brief Creates a HW and compiler load memory barrier. + * + * A load memory fence (memory barrier) imposes a sequential ordering on all + * loads to globally accessible variables. All loads found before the barrier + * will happen before any loads found after it. A load barrier has no effect on + * store operations. + */ +PORT_INLINE void portAtomicMemoryFenceLoad(void); +/** + * @brief Creates a HW and compiler store memory barrier. + * + * A store memory fence (memory barrier) imposes a sequential ordering on all + * stores to globally accessible variables. All stores found before the barrier + * will happen before any stores found after it. A store barrier has no effect + * on load operations. + */ +PORT_INLINE void portAtomicMemoryFenceStore(void); +/// @} End memory barrier functions + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +#ifndef PORT_ATOMIC_64_BIT_SUPPORTED +/// @note We support 64bit atomics on all 64bit systems (and some 32bit) +#define PORT_ATOMIC_64_BIT_SUPPORTED NVCPU_IS_64_BITS +#endif + +#define portAtomicExAddS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExSubS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExSetS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExCompareAndSwapS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExIncrementS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExDecrementS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExXorS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExOrS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED +#define portAtomicExAndS64_SUPPORTED PORT_ATOMIC_64_BIT_SUPPORTED + +#if PORT_ATOMIC_64_BIT_SUPPORTED +/** + * @brief Like @ref portAtomicAddS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExAddS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExAddS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExAddU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicSubS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExSubS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExSubS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExSubU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicSetS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE void portAtomicExSetS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExSetS64 +PORT_ATOMIC64_INLINE void portAtomicExSetU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicCompareAndSwapS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvBool portAtomicExCompareAndSwapS64(volatile NvS64 *pVal, NvS64 newVal, NvS64 oldVal); +/// @brief Unsigned version of @ref portAtomicExCompareAndSwapS64 +PORT_ATOMIC64_INLINE NvBool portAtomicExCompareAndSwapU64(volatile NvU64 *pVal, NvU64 newVal, NvU64 oldVal); +/** + * @brief Like @ref portAtomicIncrementS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExIncrementS64(volatile NvS64 *pVal); +/// @brief Unsigned version of @ref portAtomicExIncrementS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExIncrementU64(volatile NvU64 *pVal); +/** + * @brief Like @ref portAtomicDecrementS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExDecrementS64(volatile NvS64 *pVal); +/// @brief Unsigned version of @ref portAtomicExDecrementS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExDecrementU64(volatile NvU64 *pVal); +/** + * @brief Like @ref portAtomicXorS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExXorS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExXorS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExXorU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicOrS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExOrS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExOrS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExOrU64(volatile NvU64 *pVal, NvU64 val); +/** + * @brief Like @ref portAtomicAndS32, except operating on 64bit integers + */ +PORT_ATOMIC64_INLINE NvS64 portAtomicExAndS64(volatile NvS64 *pVal, NvS64 val); +/// @brief Unsigned version of @ref portAtomicExAndS64 +PORT_ATOMIC64_INLINE NvU64 portAtomicExAndU64(volatile NvU64 *pVal, NvU64 val); + +#endif // PORT_ATOMIC_64_BIT_SUPPORTED + +/// @} End extended functions + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/atomic_libos.h" +#endif + +#if PORT_COMPILER_IS_GCC +#include "nvport/inline/atomic_gcc.h" +#elif PORT_COMPILER_IS_CLANG +#include "nvport/inline/atomic_clang.h" +#elif PORT_COMPILER_IS_MSVC +#include "nvport/inline/atomic_msvc.h" +#endif + + +/** + * @name Utility Functions + * + * These are utility functions for performing operations on pointer sized + * operands. While the 64bit functions are "extended", they should always be + * present on systems where pointers and NvLength are 64 bits. + * @{ + */ +#if !NVCPU_IS_64_BITS +#define portAtomicAddSize(a,b) (NvSPtr)portAtomicAddS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSubSize(a,b) (NvSPtr)portAtomicSubS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSetSize(a,b) portAtomicSetS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicCompareAndSwapSize(a,b,c) portAtomicCompareAndSwapS32((volatile NvSPtr *)a, (NvSPtr)b, (NvSPtr)c) +#define portAtomicIncrementSize(a) (NvSPtr)portAtomicIncrementS32((volatile NvSPtr *)a) +#define portAtomicDecrementSize(a) (NvSPtr)portAtomicDecrementS32((volatile NvSPtr *)a) +#define portAtomicXorSize(a,b) (NvSPtr)portAtomicXorS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicOrSize(a,b) (NvSPtr)portAtomicOrS32((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicAndSize(a,b) (NvSPtr)portAtomicAndS32((volatile NvSPtr *)a, (NvSPtr)b) +#else +#define portAtomicAddSize(a,b) (NvSPtr)portAtomicExAddS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSubSize(a,b) (NvSPtr)portAtomicExSubS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicSetSize(a,b) portAtomicExSetS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicCompareAndSwapSize(a,b,c) portAtomicExCompareAndSwapS64((volatile NvSPtr *)a, (NvSPtr)b, (NvSPtr)c) +#define portAtomicIncrementSize(a) (NvSPtr)portAtomicExIncrementS64((volatile NvSPtr *)a) +#define portAtomicDecrementSize(a) (NvSPtr)portAtomicExDecrementS64((volatile NvSPtr *)a) +#define portAtomicXorSize(a,b) (NvSPtr)portAtomicExXorS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicOrSize(a,b) (NvSPtr)portAtomicExOrS64((volatile NvSPtr *)a, (NvSPtr)b) +#define portAtomicAndSize(a,b) (NvSPtr)portAtomicExAndS64((volatile NvSPtr *)a, (NvSPtr)b) +#endif +/// @} + +#endif // _NVPORT_ATOMIC_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/core.h b/src/nvidia/inc/libraries/nvport/core.h new file mode 100644 index 000000000..26f8ec10f --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/core.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVPORT_CORE_H_ +#define _NVPORT_CORE_H_ + +/** + * @defgroup NVPORT_CORE Core Functions + * + * @brief These are core NvPort functions present in all configurations. + * @{ + */ +/** + * @brief Global initialization + * + * Must be called once and only once before any NvPort functions can be called + * + * If this function returns an error then calling any NvPort function will result + * in undefined behavior. + * + * + * @return NV_OK if successful, error otherwise + */ +NV_STATUS portInitialize(void); + +/** + * @brief Global shutdown + * + * Must be called once and only once when a driver is shutting down and no more + * NvPort functions will be called. + * + */ +void portShutdown(void); + +/** + * @brief Returns if NvPort is initialized + * + * This function can be called at any time. It returns if @ref portInitialize + * has been called (and @ref portShutdown has not). + */ +NvBool portIsInitialized(void); + +/// @} + +#endif // _NVPORT_CORE_H_ diff --git a/src/nvidia/inc/libraries/nvport/cpu.h b/src/nvidia/inc/libraries/nvport/cpu.h new file mode 100644 index 000000000..ac95ec023 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/cpu.h @@ -0,0 +1,637 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPU module public interface. + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_CPU_H_ +#define _NVPORT_CPU_H_ +/** + * @defgroup NVPORT_CPU CPU specifice operations. + * + * @brief This module contains cpu specific operations. + * + * @{ + */ +/** + * @brief Initialize global CPU module state. This function is called by + * @ref portInitialize. + */ +void portCpuInitialize(void); + +/** + * @brief Clean up global CPU module state. This function is called by + * @ref portShutdown + */ +void portCpuShutdown(void); + +/** + * @name Core Functions + * @{ + */ +/** + * @brief Read requested MSR + * + * @param [in] address Address of the MSR + * @param [out] *pValue Value of MSR + * + * @return NV_OK If successful. + */ +NV_STATUS portReadMsr(NvU32 address, NvU64 *pValue); + +/** + * @brief Write value to requested MSR + * + * @param [in] address Address of the MSR + * @param [in] value Value to be written + * + * @return NV_OK If successful. + */ +NV_STATUS portWriteMsr(NvU32 address, NvU64 value); + + /// @} End Core functions + + /** + * @name Extended Functions + * @{ + */ + /** + * @name Intel X86 Structures, unions and enums. + * @{ + */ + +/** +* @brief Structure representing Intel Processor's general +* features broken down into individual bit fields. +*/ +typedef struct PORT_CPU_INTEL_X86_FEATURES +{ + /// @{ + NvU32 SSE3 : 1; /**< Streaming SIMD Extensions 3.*/ + NvU32 PCLMULQDQ : 1; /**< PCLMULQDQ instruction.*/ + NvU32 DTES64 : 1; /**< 64-bit DS Area.*/ + NvU32 MONITOR : 1; /**< MONITOR/MWAIT.*/ + NvU32 DSCPL : 1; /**< CPL Qualified Debug Store.*/ + NvU32 VMX : 1; /**< Virtual Machine Extensions.*/ + NvU32 SMX : 1; /**< Safer Mode Extensions.*/ + NvU32 EIST : 1; /**< Enhanced Intel SpeedStep(R) technology*/ + NvU32 TM2 : 1; /**< Thermal Monitor 2.*/ + NvU32 SSSE3 : 1; /**< Supplemental Streaming SIMD Extensions 3*/ + NvU32 CNXTID : 1; /**< L1 Context ID*/ + NvU32 SDBG : 1; /**< IA32_DEBUG_INTERFACE MSR for silicon debug.*/ + NvU32 FMA : 1; /**< FMA extensions using YMM state.*/ + NvU32 CMPXCHG16B : 1; /**< CMPXCHG8B/CMPXCHG16B Compare and Exchange Bytes*/ + NvU32 xTPRUpdateControl : 1; /** supports changing + IA32_MISC_ENABLE[bit 23].*/ + NvU32 PDCM : 1; /**< Perfmon and Debug Capability: supports the performance + and debug feature indication MSR IA32_PERF_CAPABILITIES.*/ + NvU32 PCID : 1; /**< Process-context identifiers: Supports PCIDs and that + software may set CR4.PCIDE to 1.*/ + NvU32 DCA : 1; /**< Supports the ability to prefetch data from a memory mapped + device.*/ + NvU32 SSE41 : 1; /**< Supports SSE4.1.*/ + NvU32 SSE42 : 1; /**< Supports SSE4.2.*/ + NvU32 x2APIC : 1; /**< Support x2APIC.*/ + NvU32 MOVBE : 1; /**< Supports MOVBE instruction.*/ + NvU32 POPCNT : 1; /**< Supports the POPCNT instruction.*/ + NvU32 TSCDeadline : 1; /**< The processor's local APIC timer supports + one-shot operation using a TSC deadline value.*/ + NvU32 AES : 1; /**< Supports the AESNI instruction extensions.*/ + NvU32 XSAVE : 1; /**< Supports the XSAVE/XRSTOR processor extended states + feature, the XSETBV/XGETBV instructions, and XCR0.*/ + NvU32 OSXSAVE : 1; /**< the OS has set CR4.OSXSAVE[bit 18] to enable + XSETBV/XGETBV instructions to access XCR0 and to support + processor extended state management using + XSAVE/XRSTOR.*/ + NvU32 AVX : 1; /**< The processor supports the AVX instruction + extensions.*/ + NvU32 F16C : 1; /**< processor supports 16-bit floating-point conversion + instructions.*/ + NvU32 RDEND : 1; /**< Processor supports RDRAND instruction.*/ + NvU32 FPU : 1; /**< Floating Point Unit On-Chip.*/ + NvU32 VME : 1; /**< Virtual 8086 Mode Enhancements.*/ + NvU32 DE : 1; /**< Debugging Extensions.*/ + NvU32 PSE : 1; /**< Page Size Extension.*/ + NvU32 TSC : 1; /**< Time Stamp Counter.*/ + NvU32 MSR : 1; /**< Model Specific Registers RDMSR and WRMSR + Instructions.*/ + NvU32 PAE : 1; /**< Physical Address Extension.*/ + NvU32 MCE : 1; /**< Machine Check Exception.*/ + NvU32 CX8 : 1; /**< CMPXCHG8B Instruction.*/ + NvU32 APIC : 1; /**< APIC On-Chip.*/ + NvU32 SEP : 1; /**< SYSENTER and SYSEXIT Instructions.*/ + NvU32 MTRR : 1; /**< Memory Type Range Registers.*/ + NvU32 PGE : 1; /**< Page Global Bit*/ + NvU32 MCA : 1; /**< Machine Check Architecture.*/ + NvU32 CMOV : 1; /**< Conditional Move Instructions.*/ + NvU32 PAT : 1; /**< Page Attribute Table.*/ + NvU32 PSE36 : 1; /**< 36-Bit Page Size Extension.*/ + NvU32 PSN : 1; /**< 96-Bit Processor Serial Number.*/ + NvU32 CLFSH : 1; /**< CLFLUSH Instruction.*/ + NvU32 DEBUGS : 1; /**< Debug Store.*/ + NvU32 ACPI : 1; /**< Thermal Monitor and Software Controlled Clock + Facilities.*/ + NvU32 MMX : 1; /**< Intel MMX Technology.*/ + NvU32 FXSR : 1; /**< FXSAVE and FXRSTOR Instructions.*/ + NvU32 SSE : 1; /**< SSE Extensions.*/ + NvU32 SSE2 : 1; /**< SSE2 extensions.*/ + NvU32 SELFS : 1; /**< Self Snoop.*/ + NvU32 HTT : 1; /**< Max APIC IDs reserved field is Valid.*/ + NvU32 TM : 1; /**< Thermal Monitor.*/ + NvU32 PBE : 1; /**< Pending Break Enable.*/ + /// @} +} PORT_CPU_INTEL_X86_FEATURES; + +/** + * @brief Enum representing Intel processor family information. + * + */ +typedef enum PORT_CPU_INTEL_FAMILY +{ + PORT_CPU_INTEL_FAMILY_6 = 6, + PORT_CPU_INTEL_FAMILY_7 = 7 +} PORT_CPU_INTEL_FAMILY; + +/** + * @brief Enum representing Intel family 6 processor model information. + * + */ +typedef enum PORT_CPU_INTEL_FAMILY_6_MODEL +{ + PORT_CPU_INTEL_FAMLLY_6_MODEL_SANDYBRIDGE = 42, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SANDYBRIDGE_X = 45, + PORT_CPU_INTEL_FAMLLY_6_MODEL_IVYBRIDGE = 58, + PORT_CPU_INTEL_FAMLLY_6_MODEL_IVYBRIDGE_X = 62, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL = 60, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL_X = 63, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL_ULT = 69, + PORT_CPU_INTEL_FAMLLY_6_MODEL_HASWELL_GT3 = 70, + PORT_CPU_INTEL_FAMLLY_6_MODEL_BROADWELL = 61, + PORT_CPU_INTEL_FAMLLY_6_MODEL_BROADWELL_GT3 = 71, + PORT_CPU_INTEL_FAMLLY_6_MODEL_BROADWELL_X = 79, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SKYLAKE = 94, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SKYLAKE_MOBILE = 78, + PORT_CPU_INTEL_FAMLLY_6_MODEL_KABYLAKE = 158, + PORT_CPU_INTEL_FAMLLY_6_MODEL_KABYLAKE_MOBILE = 142, + PORT_CPU_INTEL_FAMLLY_6_MODEL_SKYLAKE_X = 85, + PORT_CPU_INTEL_FAMLLY_6_MODEL_CANNONLAKE_MOBILE = 102, + PORT_CPU_INTEL_FAMILY_6_MODEL_COMETLAKE_MOBILE = 166, + PORT_CPU_INTEL_FAMILY_6_MODEL_COMETLAKE = 165, + PORT_CPU_INTEL_FAMILY_6_MODEL_TIGERLAKE_MOBILE = 140, + PORT_CPU_INTEL_FAMILY_6_MODEL_TIGERLAKE = 141, +} PORT_CPU_INTEL_FAMILY_6_MODEL; + +/** + * @brief Union representing Intel processor family information. + * + */ +typedef union PORT_CPU_INTEL_MODEL +{ + PORT_CPU_INTEL_FAMILY_6_MODEL family6; +} PORT_CPU_INTEL_MODEL; + +/** + * @brief Enum representing Intel processor type information. + * + */ +typedef enum PORT_CPU_INTEL_PROCESSOR_TYPE +{ + PORT_CPU_INTEL_PROCESSOR_TYPE_ORIGINAL_OEM = 0, + PORT_CPU_INTEL_PROCESSOR_TYPE_OVERDRIVE = 1, + PORT_CPU_INTEL_PROCESSOR_TYPE_DUAL_PROCESSOR = 2, + PORT_CPU_INTEL_PROCESSOR_TYPE_RESERVED = 3 +} PORT_CPU_INTEL_PROCESSOR_TYPE; + +/** + * @brief Structure representing Intel Processor's Threamal & Power Management + * features broken down into individual bit fields. + */ +typedef struct PORT_CPU_INTEL_TPM_FEATURES +{ + /// @{ + NvU32 DTS : 1; /**< Digital Temperature Sensor is supported if set.*/ + NvU32 IntelTurboBoost : 1; /**< Intel Turbo Boost Technology available.*/ + NvU32 ARAT : 1; /**< APIC-Timer-always-running feature is supported + if set.*/ + NvU32 PLN : 1; /**< Power limit notification controls are supported + if set.*/ + NvU32 ECMD : 1; /**< Clock modulation duty cycle extension is supported + if set.*/ + NvU32 PTM : 1; /**< Package thermal management is supported if set.*/ + NvU32 HWP : 1; /**< HWP base registers (IA32_PM_ENABLE[bit 0], + IA32_HWP_CAPABILITIES, IA32_HWP_REQUEST, IA32_HWP_STATUS) + are supported if set.*/ + NvU32 HWPNotification : 1; /**< IA32_HWP_INTERRUPT MSR is supported + if set.*/ + NvU32 HWPActivityWindow : 1; /**< IA32_HWP_REQUEST[bits 41:32] is + supported if set.*/ + NvU32 HWPEPP : 1; /**< HWP_Energy_Performance_Preference. + IA32_HWP_REQUEST[bits 31:24] is supported if set.*/ + NvU32 HWPPLR : 1; /**< HWP_Package_Level_Request. IA32_HWP_REQUEST_PKG MSR + is supported if set.*/ + NvU32 HDC : 1; /**< HDC base registers IA32_PKG_HDC_CTL, IA32_PM_CTL1, + IA32_THREAD_STALL MSRs are supported if set.*/ + NvU32 IntelTurboBoostMaxTech30 : 1; /**< Intel(R) Turbo Boost Max Technology + 3.0 available.*/ + NvU32 HWPCapabilities : 1; /**< Highest Performance change is supported + if set.*/ + NvU32 HWPPECI : 1; /**< HWP PECI override is supported if set.*/ + NvU32 FLEXHWP : 1; /**< Flexible HWP is supported if set.*/ + NvU32 FAM : 1; /**< Fast access mode for the IA32_HWP_REQUEST MSR is + supported if set.*/ + NvU32 ILPHWPRequest : 1; /**< Ignoring Idle Logical Processor HWP request + is supported if set.*/ + NvU32 NoOfInterruptThresholdsInDTS : 4; /**< Number of Interrupt Thresholds + in Digital Thermal Sensor.*/ + NvU32 HCFC : 1; /**< Hardware Coordination Feedback Capability + (Presence of IA32_MPERF and IA32_APERF). The capability to + provide a measure of delivered processor performance + (since last reset of the counters), as a percentage of the + expected processor performance when running at the TSC + frequency.*/ + NvU32 PEBP : 1; /**< The processor supports performance-energy bias + preference if CPUID.06H:ECX.SETBH[bit 3] is set and it also + implies the presence of a new architectural MSR called + IA32_ENERGY_PERF_BIAS (1B0H).*/ + /// @} +} PORT_CPU_INTEL_TPM_FEATURES; + +/** + * @brief Structure representing Intel Processor's Architecture Performance + * monitering features broken down into individual bit fields. + */ +typedef struct PORT_CPU_INTEL_ARCH_PERF_MONITOR +{ + /// @{ + NvU32 versionId; /**< Version ID of architectural performance monitoring.*/ + NvU32 noOfGPPerfMonitoringCounters; /**< Number of general-purpose + performance monitoring counter per + logical processor.*/ + NvU32 bitWidthOfGPCounters; /** Bit width of general-purpose, performance + monitoring counter.*/ + NvU32 coreCycleEvent : 1; /**< Core cycle event available if 1.*/ + NvU32 instructionRetiredEvent : 1; /**< Instruction retired event + available if 1.*/ + NvU32 referenceCycelEvent : 1; /**< Reference cycles event available if 1.*/ + NvU32 lastLevelCacheRefEvent : 1; /**< Last-level cache reference event + available if 1.*/ + NvU32 lastLevelCacheMissEvent : 1; /**< Last-level cache misses event not + available if 1.*/ + NvU32 branchInstructionRetiredEvent : 1; /**< Branch instruction retired + event not available if 1.*/ + NvU32 branchMispredictRetiredEvent : 1; /**< Branch mispredict retired event + not available if 1.*/ + NvU32 noOfFixedFuncPerfCounters; /**< Number of fixed-function performance + counters (if Version ID > 1).*/ + NvU32 bitWidthOfFixedFuncPerfCounters; /**< Bit width of fixed-function + performance counters + (if Version ID > 1).*/ + /// @} +} PORT_CPU_INTEL_ARCH_PERF_MONITOR; + +/** + * @brief Structure representing Intel Processor version and features + * broken down into individual fields. + */ +typedef struct PORT_CPU_INTEL +{ + /// @{ + PORT_CPU_INTEL_FAMILY family; /**< Family of the Processor.*/ + PORT_CPU_INTEL_MODEL model; /**< Model of the Processor.*/ + PORT_CPU_INTEL_PROCESSOR_TYPE processorType; /**< Processor Type.*/ + NvU8 steppingId; /**< Stepping ID of the Processor.*/ + NvU8 brandIndex; /**< Numerical Index of Brand String Index Table + entry.*/ + NvU8 localApicId; /** Local APIC ID of the Processor.*/ + NvU32 threadCountPerCore; /**< Threads Per Core.*/ + NvU32 physicalCoreCount; /**< Physical Cores Per Package.*/ + NvU32 logicalCoreCount; /**< Logical Cores Per Package.*/ + PORT_CPU_INTEL_X86_FEATURES features; /**< General Features.*/ + PORT_CPU_INTEL_TPM_FEATURES tpmFeatures; /**< Thermal and Power Management + Features.*/ + PORT_CPU_INTEL_ARCH_PERF_MONITOR archPerfMonitor; /**< Architecture + Performance Monitoring + Features.*/ + /// @} +} PORT_CPU_INTEL; + +/// @} + +/** + * @name AMD X86 Structures, unions and enums. + * @{ + */ + +/** + * @brief Enum representing AMD processor family information. + * + */ +typedef enum PORT_CPU_AMD_FAMILY +{ + PORT_CPU_AMD_FAMILY_0 = 0, + PORT_CPU_AMD_FAMILY_1 = 1, + PORT_CPU_AMD_FAMILY_ZEN3 = 25 +} PORT_CPU_AMD_FAMILY; + +/** + * @brief Enum representing AMD processor family 0 model information. + * + */ +typedef enum PORT_CPU_AMD_FAMILY_0_MODEL +{ + PORT_CPU_AMD_FAMLLY_0_MODEL_X = 0, +} PORT_CPU_AMD_FAMILY_0_MODEL; + +/** + * @brief Union representing AMD processor family wise model information. + * + */ +typedef union PORT_CPU_AMD_MODEL +{ + PORT_CPU_AMD_FAMILY_0_MODEL family0; +} PORT_CPU_AMD_MODEL; + +/** + * @brief Structure representing AMD Processor's Threamal & Power Management + * features broken down into individual bit fields. + */ +typedef struct PORT_CPU_AMD_TPM_FEATURES +{ + /// @{ + NvU32 EffFreq : 1; /**< */ + /// @} +} PORT_CPU_AMD_TPM_FEATURES; + +/** + * @brief Structure representing AMD Processor version and features + * broken down into individual fields. + */ +typedef struct PORT_CPU_AMD +{ + /// @{ + PORT_CPU_AMD_FAMILY family; /**< Family of the Processor.*/ + PORT_CPU_AMD_MODEL model; /**< Model of the Processor.*/ + NvU8 steppingId; /**< Stepping ID of the Processor.*/ + NvU8 brandIndex; /**< Numerical Index of Brand String Index Table + entry.*/ + NvU8 localApicId; /** Local APIC ID of the Processor.*/ + NvU32 threadCountPerCore; /**< Threads Per Core.*/ + NvU32 physicalCoreCount; /**< Physical Cores Per Package.*/ + NvU32 logicalCoreCount; /**< Logical Cores Per Package.*/ + PORT_CPU_AMD_TPM_FEATURES tpmFeatures; /**< Thermal and Power Management + Features.*/ + /// @} +} PORT_CPU_AMD; + +/// @} + +/** + * @name Generic CPU Information Structures, unions and enums. + * @{ + */ + +/** +*@brief Maximum length of Vendor ID Null terminated string. +*/ +#define PORT_CPU_VENDOR_ID_LENGTH 20 + +/** +*@brief Enum represening the Processor Architecture Type. +*/ +typedef enum PORT_CPU_TYPE +{ + /// @{ + PORT_CPU_TYPE_INTEL_X86 = 0, /**< Intel X86/X86-64 Architecture.*/ + PORT_CPU_TYPE_AMD_X86 = 1, /**< AMD X86/AMD64 Architecture.*/ + PORT_CPU_TYPE_ARM = 2 /**< ARM Architecture.*/ + /// @} +} PORT_CPU_TYPE; + +/** +*@brief Union represening the Abstract Processor data structure. +*/ +typedef union PORT_CPU +{ + PORT_CPU_AMD amd; + PORT_CPU_INTEL intel; +} PORT_CPU; + +/** + * @brief Structure representing processor information broken down into + * individual fields. + */ +typedef struct PORT_CPU_INFORMATION +{ + /// @{ + PORT_CPU_TYPE type; /**< Type of Architecture/CPU.*/ + char vendorId[PORT_CPU_VENDOR_ID_LENGTH]; /**< Null terminated Vendor Id + String.*/ + NvLength vendorIdLength; /**< Actual length of the null terminated Vendor + Id string.*/ + PORT_CPU cpu; /**< CPU specifice information.*/ + /// @} +} PORT_CPU_INFORMATION; + +/** + * @brief Structure representing processor logical topology information broken + * down into individual fields. + */ +typedef struct PORT_CPU_LOGICAL_TOPOLOGY +{ + /// @{ + NvU64 activeCpuCount; /**< Active Logical CPUs.*/ + NvU64 activeGroupCount; /**< Active Logical CPU Group count.*/ + NvU64 maxCpuCount; /**< Maximum Logical CPUs system can support*/ + NvU64 maxGroupCount; /**< Maximum Logical CPUs Groups system can support*/ + NvU64 maxCpuPerGroup; /**< Maximum Logical CPUs system can support per group*/ + /// @} +} PORT_CPU_LOGICAL_TOPOLOGY; + +/** + * @brief Structure representing a BAR descriptor for a PCIe device + */ +typedef struct PORT_CPU_BAR_DESC +{ + /// @{ + void *pBarAddr; /**< Starting virtual address of the BAR space */ + NvU64 physAddr; /**< Starting physical address of the BAR space */ + NvU32 barSize; /**< Size of BAR space */ + /// @} +} PORT_CPU_BAR_DESC; + +/// @} End Generic CPU Information Structures, unions and enums. + +/** + * @brief Get Logical Topology of CPU. + * @param[out] pCpuTopology PORT_CPU_LOGICAL_TOPOLOGY pointer. + * @return NV_OK If successful and cpu logical topology information + * in pCpuInfo structure. + */ +NV_STATUS portCpuGetLogicalTopology(PORT_CPU_LOGICAL_TOPOLOGY *pCpuTopology); +#define portCpuGetLogicalTopology_SUPPORTED (NVOS_IS_WINDOWS) + +/** + * @brief Get CPU Logical Topology Information. + * @param[out] pCpuInfo PORT_CPU_INFORMATION pointer. + * @return NV_OK If successful and CPU Information in pCpuInfo structure. + */ +NV_STATUS portCpuGetInfo(PORT_CPU_INFORMATION* pCpuInfo); +#define portCpuGetInfo_SUPPORTED (_X86_ || _AMD64_) + +/** + * @brief Get CPU information using CPUID Instruction (X86-64 Specifice) + * @param[out] pCpuInfo Pointer to array which return value + * cpuInfo[0] = EAX, + * cpuInfo[1] = EBX, + * cpuInfo[2] = ECX, + * cpuInfo[3] = EDX. + * @param[in] functionId Function Id of CPUID instruction to execute. + * @param[in] subfunctionId Sub-Function Id of CPUID instruction to execute. + * subfunctionId enables you to gather additional information about + * the processor + + * @return NV_OK if successful, otherwise return errors. + */ +NV_STATUS portCpuExCpuId(NvU32* pCpuInfo, NvU32 functionId, + NvU32 subfunctionId); +#define portCpuExCpuId_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) +/// @} End extended functions + +/** + * @brief Retrieve the current value and frequency of the performance counter + * + * @param[out] pFreq A pointer to a variable to which this routine writes the + * performance counter frequency, in ticks per second. + * This parameter is optional and can be NULL if the caller + * does not need the counter frequency value. + * + * @return The performance counter value in units of ticks + */ +NvS64 portCpuExQueryPerformanceCounter(NvS64 *pFreq); +#define portCpuExQueryPerformanceCounter_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Enable PMC read feature + */ +void portCpuExEnablePmc(void); +#define portCpuExEnablePmc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Read requested PMC register + * + * @param [in] address Address of the PMC register + * @param [out] *pValue Value of PMC register + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExReadPmc(NvU32 address, NvU64 *pValue); +#define portCpuExReadPmc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Fill in BAR descriptor of Integrated memory controller + * + * @param [in] pImcBarDesc Pointer to BAR descriptor structure + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExAllocImcBarDesc(PORT_CPU_BAR_DESC *pImcBarDesc); +#define portCpuExAllocImcBarDesc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Free BAR descriptor of Integrated memory controller + * + * @param [in] pImcBarDesc Pointer to BAR descriptor structure + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExFreeImcBarDesc(PORT_CPU_BAR_DESC *pImcBarDesc); +#define portCpuExFreeImcBarDesc_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Reset Performance monitoring counters + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExResetPmu(void); +#define portCpuExResetPmu_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Program Performance monitoring counters + * + * @param [in] numValidEvents Number of valid events in array pPerfEvents + * @param [in] pPerfEvents Array of events to be configured into general + * purpose performance monitoring counters(PMCs) + * + * @return NV_OK If successful. + */ +NV_STATUS portCpuExProgramPmu(NvU32 numValidEvents, NvU32 *pPerfEvents); +#define portCpuExProgramPmu_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Get number of DRAM reads in terms of bytes + * + * @param [out] pNumReads + * + * @return NV_OK If successful + */ +NV_STATUS portCpuExGetDRamReads(NvU64 *pNumReads); +#define portCpuExGetDRamReads_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Get number of DRAM writes in terms of bytes + * + * @param [out] pNumWrites + * + * @return NV_OK If successful + */ +NV_STATUS portCpuExGetDRamWrites(NvU64 *pNumWrites); +#define portCpuExGetDRamWrites_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Check if the given MSR is supported on the current processor + * + * @param [in] address Address of the MSR that needs to be checked + * + * @return NV_TRUE If MSR is supported + * NV_FALSE If MSR is not supported + */ +NvBool portCpuExIsMsrSupported(NvU32 address); +#define portCpuExIsMsrSupported_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +/** + * @brief Check if the current processor supports DRAM read/write request counting + * + * @return NV_TRUE If supported + * NV_FALSE If not supported + */ +NvBool portCpuExIsDramRwCountingSupported(void); +#define portCpuExIsDramRwCountingSupported_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS && (_X86_ || _AMD64_)) + +#endif // _NVPORT_CPU_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/crypto.h b/src/nvidia/inc/libraries/nvport/crypto.h new file mode 100644 index 000000000..ab320ce70 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/crypto.h @@ -0,0 +1,346 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Crypto module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_CRYPTO_H_ +#define _NVPORT_CRYPTO_H_ +/** + * @defgroup NVPORT_CRYPTO Cryptography operations + * + * @brief This module contains cryptographic and PRNG functions + * + * + * A note on terminology: + * + * Pseudorandom numbers are deterministic and reproducible. When given the same + * seed, they will always give the same sequence, across all platforms. They are + * not suitable for cryptography or any security sensitive operations. + * + * True random numbers are generated from hardware, and as such are completely + * nondeterministic. There is no support for setting a seed, and you can expect + * the output to always be different. Unlike pseudorandom numbers, true random + * output will always vary across different platforms. + * + * These numbers are suitable for security sensitive and cryptography operations. + * + * In case of kernelmode code, the entropy pool will contain bits that are not + * available to usermode clients. As a consequence, a usermode client cannot + * deplete the entropy pool to lower the security + * + * @note Unless ending with the "-Blocking" suffix, all functions are + * non-blocking. With regards to True Random numbers, this has a consequence + * that if there are insufficient bits in the entropy pool, they will be used + * to seed a custom PRNG which will provide the final output. A blocking + * version of some functions may be available as an extended function. + * + * @note As a general rule, you should always use the non-blocking version of a + * function, unless ALL the following conditions are satisfied: + * - First time booting a clean OS + * - No connection to the network + * - The GPU is not booted yet + * - Dealing with a remote machine (i.e. no direct mouse/keyboard input) + * - No HW random support (older CPUs) + * + * For additional information, see these links: + * - http://www.2uo.de/myths-about-urandom/ + * - https://bugs.ruby-lang.org/issues/9569 + * - http://security.stackexchange.com/questions/3936/is-a-rand-from-dev-urandom-secure-for-a-login-key + * + * @{ + */ + + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Initializes global CRYPTO module state + * + * This function is called by @ref portInitialize. It is available here in case + * it is needed to initialize the CRYPTO module without initializing all the + * others. e.g. for unit tests. + * + */ +void portCryptoInitialize(void); +/** + * @brief Destroys global CRYPTO module state + * + * This function is called by @ref portShutdown. It is available here in case + * it is needed to initialize the CRYPTO module without initializing all the + * others. e.g. for unit tests. + * + */ +void portCryptoShutdown(void); + +/** + * @brief A pseudorandom number generator object + */ +typedef struct PORT_CRYPTO_PRNG PORT_CRYPTO_PRNG; + + +/** + * @brief Construct a PRNG with the given seed. + * + * @warning These objects are not Cryptographically Secure, and thus not + * appropriate for any security sensitive operations. Use "True" random instead. + * + * The same seed will always result in the same sequence returned by + * @ref portCryptoPseudoRandomGeneratorGetU32, + * @ref portCryptoPseudoRandomGeneratorGetU64 and + * @ref portCryptoPseudoRandomGeneratorFillBuffer. This behavior is consistent + * across all platforms. The following code will always print the same thing: + * ~~~{.c} + * PORT_CRYPTO_PRNG *pPrng = portCryptoPseudoRandomGeneratorCreate(0xdeadbeef); + * if (pPrng) + * { + * NvU32 n = portCryptoPseudoRandomGeneratorGetU32(pPrng); + * portDbgPrintf("%u", n); + * portCryptoPseudoRandomGeneratorDestroy(pPrng); + * } + * ~~~ + * + * @return NULL if the construction failed, a PRNG object otherwise. + * + */ +PORT_CRYPTO_PRNG *portCryptoPseudoRandomGeneratorCreate(NvU64 seed); +/** + * @brief Destroys an object created with + * @ref portCryptoPseudoRandomGeneratorCreate + * + */ +void portCryptoPseudoRandomGeneratorDestroy(PORT_CRYPTO_PRNG *pPrng); +/** + * @brief Returns a 32bit pseudorandom number from a given PRNG. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU32 instead. + * + * @param [in] pPrng - Generator object. If NULL, the default one will be used. + * + */ +NvU32 portCryptoPseudoRandomGeneratorGetU32(PORT_CRYPTO_PRNG *pPrng); +/** + * @brief Returns a 64bit pseudorandom number from a given PRNG. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU64 instead. + * + * @param [in] pPrng - Generator object. If NULL, the default one will be used + * + */ +NvU64 portCryptoPseudoRandomGeneratorGetU64(PORT_CRYPTO_PRNG *pPrng); +/** + * @brief Fills a user provided buffer with a pseudorandom sequence from a given + * PRNG + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomFillBuffer instead. + * + * @param [in] pPrng - Generator object. If NULL, the default one will be used + * + * @return NV_OK if successful; + * NV_ERR_INVALID_POINTER if pBuffer is NULL; + * + */ +NV_STATUS portCryptoPseudoRandomGeneratorFillBuffer(PORT_CRYPTO_PRNG *pPrng, NvU8 *pBuffer, NvLength bufSize); + +/** + * @brief Sets the PRNG seed of the global generator + * + * The same seed will always result in the same sequence returned by + * @ref portCryptoPseudoRandomGetU32, @ref portCryptoPseudoRandomGetU64 and + * @ref portCryptoPseudoRandomFillBuffer. This behavior is consistent across + * all platforms. The following code will print the same thing on all platforms: + * ~~~{.c} + * portCryptoPseudoRandomSetSeed(0xdeadbeef); + * NvU32 n = portCryptoPseudoRandomGetU32(); + * portDbgPrintf("%u", n); + * ~~~ + * + */ +void portCryptoPseudoRandomSetSeed(NvU64 seed); + +/** + * @brief Returns a 32bit pseudorandom number from global generator + * + * This is equivalent to calling @ref portCryptoPseudoRandomGeneratorGetU32 with + * a NULL generator object. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU32 instead. + * + */ +NvU32 portCryptoPseudoRandomGetU32(void); +/** + * @brief Returns a 64bit pseudorandom number + * + * This is equivalent to calling @ref portCryptoPseudoRandomGeneratorGetU64 with + * a NULL generator object. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomGetU64 instead. + * + */ +NvU64 portCryptoPseudoRandomGetU64(void); +/** + * @brief Fills a user provided buffer with a pseudorandom sequence. + * + * This is equivalent to calling @ref portCryptoPseudoRandomGeneratorFillBuffer + * with a NULL generator object. + * + * @warning The numbers generated in this way are not appropriate for security + * sensitive operations. Use @ref portCryptoExTrueRandomFillBuffear instead. + * + * @return NV_OK if successful; + * NV_ERR_INVALID_POINTER if pBuffer is NULL; + * + */ +NV_STATUS portCryptoPseudoRandomFillBuffer(NvU8 *pBuffer, NvLength bufSize); + +/** + * @brief Calculate the MD5 hash of a given buffer + * + * @param [in] pInBuffer - Input data. Must not be NULL. + * @param [in] bufSize - Size of input buffer, in bytes. + * @param [out] pOutBuffer - Output buffer. Must be at least 16 bytes in length + * + * @return NV_OK if successful. + */ +NV_STATUS portCryptoHashMD5(const NvU8 *pInBuffer, NvLength bufSize, NvU8 pOutBuffer[16]); +/** + * @brief Calculate the first 24 bits of the MD5 hash of a given buffer + * + * The 24 bits are interpreted as a full hash, and are stored as big endian. So, + * if the full hash was d41d8cd98f00b204e9800998ecf8427e, the short 24bit hash + * would be 0x00d41d8c. + * + * @param [in] pInBuffer - Input data. Must not be NULL. + * @param [in] bufSize - Size of input buffer, in bytes. + * @param [out] pOut - Output location. Only the lowest 24 bits are set. + * + * @return NV_OK if successful. + */ +NV_STATUS portCryptoHashMD5Short(const NvU8 *pInBuffer, NvLength bufSize, NvU32 *pOut); +/** + * @brief Convert a binary representation of the MD5 hash to a 32digit hex string + */ +NV_STATUS portCryptoHashMD5BinaryToHexString(const NvU8 pBinary[16], char pHexStr[33]); +/** + * @brief Convert a 32 digit hex string representation of the MD5 hash to binary + */ +NV_STATUS portCryptoHashMD5HexStringToBinary(const char *pHexStr, NvU8 pBinary[16]); + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ +#if defined(NV_MODS) || PORT_IS_KERNEL_BUILD +#define PORT_CRYPTO_TRUE_RANDOM_SUPPORTED 0 +#else +#define PORT_CRYPTO_TRUE_RANDOM_SUPPORTED 1 +#endif +/** + * @brief Returns a 32bit random number + * + * @note This function does not block, but rather combines the bits from the + * entropy pool with a PRNG to produce a random output of desired width. + * This is considered safe for most cryptographic applications. You can use + * @ref portCryptoExTrueRandomGetU32Blocking for a guaranteed high entropy output. + */ +NvU32 portCryptoExTrueRandomGetU32(void); +#define portCryptoExTrueRandomGetU32_SUPPORTED 0 +/** + * @brief Returns a 64bit random number + * + * @note This function does not block, but rather combines the bits from the + * entropy pool with a PRNG to produce a random output of desired width. + * This is considered safe for most cryptographic applications. You can use + * @ref portCryptoExTrueRandomGetU64Blocking for a guaranteed high entropy output. + */ +NvU64 portCryptoExTrueRandomGetU64(void); +#define portCryptoExTrueRandomGetU64_SUPPORTED 0 +/** + * @brief Fills a user provided buffer with a random sequence. + * + * @note This function does not block, but rather combines the bits from the + * entropy pool with a PRNG to produce a random output of desired width. This is + * considered safe for most cryptographic applications. You can use + * @ref portCryptoExTrueRandomFillBufferBlocking for a guaranteed high entropy + * output. + * + * @return NV_OK if successful; + * NV_ERR_INVALID_POINTER if pBuffer is NULL; + */ +NV_STATUS portCryptoExTrueRandomFillBuffer(NvU8 *pBuffer, NvLength bufSize); +#define portCryptoExTrueRandomFillBuffer_SUPPORTED 0 + +#define PORT_CRYPTO_TRUE_RANDOM_BLOCKING_SUPPORTED (!PORT_IS_KERNEL_BUILD && !NVOS_IS_WINDOWS) + +/** + * @brief Returns a 32bit random number, possibly blocking the thread. + * + * If there is not enough entropy bits available, the function will block until + * available. Use @ref portCryptoExTrueRandomGetU32 unless you really need the + * entire result to exclusively made of true random bits. + */ +NvU32 portCryptoExTrueRandomGetU32Blocking(void); +#define portCryptoExTrueRandomGetU32Blocking_SUPPORTED 0 +/** + * @brief Returns a 64bit random number, possibly blocking the thread. + * + * If there is not enough entropy bits available, the function will block until + * available. Use @ref portCryptoExTrueRandomGetU64 unless you really need the + * entire result to exclusively made of true random bits. + */ +NvU64 portCryptoExTrueRandomGetU64Blocking(void); +#define portCryptoExTrueRandomGetU64Blocking_SUPPORTED 0 + +/** + * @brief Fills a user provided buffer with a random sequence, + * possibly blocking the thread. + * + * If there is not enough entropy bits available, the function will block until + * available. Use @ref portCryptoExTrueRandomFillBuffer unless you really need the + * entire result to exclusively made of true random bits. + */ +NV_STATUS portCryptoExTrueRandomFillBufferBlocking(NvU8 *pBuffer, NvLength bufSize); +#define portCryptoExTrueRandomFillBufferBlocking_SUPPORTED 0 + +/// @} End extended functions + +/// @} + +#endif // _NVPORT_CRYPTO_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/debug.h b/src/nvidia/inc/libraries/nvport/debug.h new file mode 100644 index 000000000..2240a263b --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/debug.h @@ -0,0 +1,314 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/** + * @file + * @brief Debug module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_DEBUG_H_ +#define _NVPORT_DEBUG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup NVPORT_DEBUG Debug Support Routines + * @brief This module provides debug support routines like breakpoints and prints. + * @{ + */ + +/** @brief See @ref PORT_UTIL_INLINE */ +#ifndef PORT_DEBUG_INLINE +#define PORT_DEBUG_INLINE PORT_INLINE +#endif + +/** + * @name Core Functions + * @{ + * + * @note The breakpoint macro comes in several flavors: + * - @ref PORT_BREAKPOINT - + * Shouldn't be used directly + * - @ref PORT_BREAKPOINT_DEBUG - + * Causes a breakpoint in debug builds only, use for all debug purposes. + * - @ref PORT_BREAKPOINT_CHECKED - + * Causes a breakpoint in checked builds only, use when you want the + * @c int3 present in develop and release builds, such as QA builds. + * - @ref PORT_BREAKPOINT_ALWAYS - + * Always breaks, use only if you want to trigger @c int3 even on + * public release builds. + */ + + +/** + * @brief Prints a string to a platform dependent output stream + * + * This function will print the string where you would expect it for a given + * platform. In user space it will be standard out. In kernel space it will + * be the kernel debug log. + * + * Note NvPort does not provide advanced logging capabilities, only the ability + * to emit a string. For a more robust logging solution see the NvLog project. + * + */ +PORT_DEBUG_INLINE void portDbgPrintString(const char *str, NvLength length); + +/** + * @brief Convenience macro when printing a string literal. + */ +#define PORT_DBG_PRINT_STRING_LITERAL(s) portDbgPrintString(s, sizeof(s)-1) + +/** + * @def PORT_BREAKPOINT_DEBUG_ENABLED + * @brief Controls whether @ref PORT_BREAKPOINT_DEBUG is enabled or not + */ +#if !defined(PORT_BREAKPOINT_DEBUG_ENABLED) +#if defined(DEBUG) +#define PORT_BREAKPOINT_DEBUG_ENABLED 1 +#else +#define PORT_BREAKPOINT_DEBUG_ENABLED 0 +#endif +#endif + + +/** + * @def PORT_BREAKPOINT_DEBUG + * @brief Cause a breakpoint into the debugger only when + * @ref PORT_BREAKPOINT_DEBUG_ENABLED is defined. + * + * By default PORT_BREAKPOINT_DEBUG_ENABLED is set based on the value of DEBUG. + * However it is kept as a separate define so you can override separately if so + * desired. + */ +#if PORT_BREAKPOINT_DEBUG_ENABLED +#define PORT_BREAKPOINT_DEBUG PORT_BREAKPOINT +#else +#define PORT_BREAKPOINT_DEBUG() +#endif + +#define PORT_FILE_STR __FILE__ + +/// @cond NVPORT_INTERNAL +#if !defined(PORT_ASSERT_FAILED_USES_STRINGS) +#define PORT_ASSERT_FAILED_USES_STRINGS PORT_IS_CHECKED_BUILD +#endif + +#if PORT_ASSERT_FAILED_USES_STRINGS +#define _PORT_STRINGIFY2(x) #x +#define _PORT_STRINGIFY(x) _PORT_STRINGIFY2(x) +#define _PORT_ASSERT_MESSAGE(cond) "Assertion failed: \"" #cond "\" at " \ + PORT_FILE_STR ":" _PORT_STRINGIFY(__LINE__) "\n" +#else +#define _PORT_ASSERT_MESSAGE(cond) "Assertion failed" +#endif +/// @endcond + +/** + * @brief Causes a breakpoint if the condition evaluates to false. + */ +#define PORT_ASSERT(cond) \ + do \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (!(cond)) \ + { \ + PORT_DBG_PRINT_STRING_LITERAL(_PORT_ASSERT_MESSAGE(cond)); \ + PORT_BREAKPOINT(); \ + } \ + PORT_COVERAGE_POP(); \ + } while (0) + +/* + * Checks osDbgBreakpointEnabled and PDB_PROP_SYS_DEBUGGER_DISABLED + * to see if breakpoints are allowed + */ +NvBool nvDbgBreakpointEnabled(void); + +/** + * @def PORT_BREAKPOINT_CHECKED() + * @brief Causes a breakpoint in checked builds only + */ +/** + * @def PORT_ASSERT_CHECKED(x) + * @brief Causes an assert in checked builds only + */ +#if PORT_IS_CHECKED_BUILD + +/* + * TODO: defined(NVRM) && PORT_IS_KERNEL_BUILD && defined(NVWATCH) are all true + * when NvWatch is included in the Debug Linux AMD64 Mfg Mods build. + * This seems wrong... + */ +#if defined(NVRM) && PORT_IS_KERNEL_BUILD == 1 && !defined(NVWATCH) +#define PORT_BREAKPOINT_CHECKED() \ + do \ + { \ + if (nvDbgBreakpointEnabled()) \ + PORT_BREAKPOINT(); \ + } while (0) +#else +#define PORT_BREAKPOINT_CHECKED() PORT_BREAKPOINT() +#endif +#define PORT_ASSERT_CHECKED(x) PORT_ASSERT(x) +#else // PORT_IS_CHECKED_BUILD +#define PORT_BREAKPOINT_CHECKED() +#define PORT_ASSERT_CHECKED(x) +#endif // PORT_IS_CHECKED_BUILD + +/** + * @brief Causes a breakpoint into the debugger regardless of build configuration. + * + * Note this is equivalent to just calling @ref PORT_BREAKPOINT. It is only + * included to provide an alternative to @ref PORT_BREAKPOINT_DEBUG that is + * consistent in look and usage. + */ +#define PORT_BREAKPOINT_ALWAYS PORT_BREAKPOINT + +/** + * @def PORT_COVERAGE_PUSH_OFF() + * @brief Saves the current coverage tracking state to a stack and disables it + * + * This is useful to do around some error checking code (e.g. "default:") so the + * bullseye tool doesn't take those branches into account when checking code + * coverage. + * + * - See @ref PORT_ASSERT for usage example. + * - See http://www.bullseye.com/help/build-exclude.html for more details. + */ +/** + * @def PORT_COVERAGE_PUSH_ON() + * @brief Saves the current coverage tracking state to a stack and enables it + */ +/** + * @def PORT_COVERAGE_POP() + * @brief Restores the last saved coverage tracking state + * + * See @ref PORT_ASSERT for usage example. + */ +#if defined(NV_BULLSEYE) +#define PORT_COVERAGE_PUSH_OFF() "BullseyeCoverage save off" +#define PORT_COVERAGE_PUSH_ON() "BullseyeCoverage save on" +#define PORT_COVERAGE_POP() "BullseyeCoverage restore" +#else +#define PORT_COVERAGE_PUSH_OFF() +#define PORT_COVERAGE_PUSH_ON() +#define PORT_COVERAGE_POP() +#endif + + + +/// @} End core functions + +/** + * @def NVPORT_CHECK_PRINTF_ARGUMENTS(a,b) + * @brief Compile time check that arguments conform to printf rules + */ +#if PORT_COMPILER_HAS_ATTRIBUTE_FORMAT +#define NVPORT_CHECK_PRINTF_ARGUMENTS(a,b) __attribute__((format(printf, a, b))) +#else +#define NVPORT_CHECK_PRINTF_ARGUMENTS(a,b) +#endif + +/** + * @name Extended Functions + * @{ + */ + +#if !defined(portDbgPrintf_SUPPORTED) +#define portDbgPrintf_SUPPORTED 0 +#endif +#if !defined(portDbgExPrintfLevel_SUPPORTED) +#define portDbgExPrintfLevel_SUPPORTED 0 +#endif + +#if PORT_IS_FUNC_SUPPORTED(portDbgPrintf) +/** + * @brief Prints a formatted string to using @ref portDbgPrintString + * + * The parameters are like those of printf(). + */ +PORT_DEBUG_INLINE void portDbgPrintf(const char *format, ...) NVPORT_CHECK_PRINTF_ARGUMENTS(1, 2); +#endif + +#if PORT_IS_FUNC_SUPPORTED(portDbgExPrintfLevel) +/** + * @brief Similar to @ref portDbgPrintf, except that it passes the level to the + * underlying implementation. + * + * Some platforms (e.g. MODS) have an API where prints are given a level, and + * some tools may depend on certain prints being at a certain level. This + * function simply passes the level to that API- NvPort does not understand + * or filter these levels. + * + * @param level - An int representing the level at which to print. + */ +PORT_DEBUG_INLINE void portDbgExPrintfLevel(NvU32 level, const char *format, ...) NVPORT_CHECK_PRINTF_ARGUMENTS(2, 3); +#endif + +/// @} End extended functions + +// Include platform specific inline definitions + +#if NVOS_IS_QNX +#include "nvport/inline/debug_qnx.h" +#elif NVOS_IS_DCECORE +#include "nvport/inline/debug_dcecore.h" +#else + +#if PORT_IS_KERNEL_BUILD + +#if NVOS_IS_WINDOWS +#include "nvport/inline/debug_win_kernel.h" +#elif NVOS_IS_UNIX +#include "nvport/inline/debug_unix_kernel_os.h" +#elif NVOS_IS_LIBOS +#include "nvport/inline/debug_libos.h" +#else +#error "Unsupported target OS" +#endif + +#else // Usermode build + +#if NVOS_IS_WINDOWS +#include "nvport/inline/debug_win_user.h" +#elif NVOS_IS_UNIX +#include "nvport/inline/debug_unix_user.h" +#elif NVOS_IS_LIBOS +#include "nvport/inline/debug_libos.h" +#else +#error "Unsupported target OS" +#endif + +#endif // PORT_IS_KERNEL_BUILD +#endif // NV_MODS + +#ifdef __cplusplus +} +#endif //__cplusplus +#endif // _NVPORT_DEBUG_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h b/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h new file mode 100644 index 000000000..8d73e2f35 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/atomic_clang.h @@ -0,0 +1,472 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Atomic functions implementations using clang compiler intrinsics + */ + +#ifndef _NVPORT_ATOMIC_CLANG_H_ +#define _NVPORT_ATOMIC_CLANG_H_ + + +#if !(defined(__clang__)) +#error "Unsupported compiler: This file can only be compiled by clang" +#endif + + +PORT_INLINE void +portAtomicMemoryFenceLoad(void) +{ + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +} +PORT_INLINE void +portAtomicMemoryFenceStore(void) +{ + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +} +PORT_INLINE void +portAtomicMemoryFenceFull(void) +{ + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +} +PORT_INLINE void +portAtomicTimerBarrier(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("ISB" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("isync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("lfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence.i" : : : "memory"); +#else +#error "portAtomicTimerBarrier implementation not found" +#endif +} + +#if PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS + +PORT_ATOMIC_INLINE NvS32 +portAtomicAddS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvS32 *)pVal, val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicSubS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC_INLINE void +portAtomicSetS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + __c11_atomic_store((_Atomic NvS32 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapS32 +( + volatile NvS32 *pVal, + NvS32 newVal, + NvS32 oldVal +) +{ + NvS32 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvS32 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicIncrementS32 +( + volatile NvS32 *pVal +) +{ + return portAtomicAddS32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicDecrementS32 +( + volatile NvS32 *pVal +) +{ + return portAtomicSubS32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicXorS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicOrS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicAndS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvS32 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + +PORT_ATOMIC_INLINE NvU32 +portAtomicAddU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvU32 *)pVal, val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicSubU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC_INLINE void +portAtomicSetU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + __c11_atomic_store((_Atomic NvU32 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapU32 +( + volatile NvU32 *pVal, + NvU32 newVal, + NvU32 oldVal +) +{ + NvU32 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvU32 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicIncrementU32 +( + volatile NvU32 *pVal +) +{ + return portAtomicAddU32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicDecrementU32 +( + volatile NvU32 *pVal +) +{ + return portAtomicSubU32(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicXorU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicOrU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicAndU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvU32 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + +#if NVCPU_IS_64_BITS + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAddS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExSubS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + __c11_atomic_store((_Atomic NvS64 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapS64 +( + volatile NvS64 *pVal, NvS64 newVal, NvS64 oldVal +) +{ + NvS64 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvS64 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExIncrementS64 +( + volatile NvS64 *pVal +) +{ + return portAtomicExAddS64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExDecrementS64 +( + volatile NvS64 *pVal +) +{ + return portAtomicExSubS64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExXorS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExOrS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAndS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvS64 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAddU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_add((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) + val; +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExSubU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_sub((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) - val; +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + __c11_atomic_store((_Atomic NvU64 *)pVal, val, __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapU64 +( + volatile NvU64 *pVal, NvU64 newVal, NvU64 oldVal +) +{ + NvU64 tmp = oldVal; // Needed so the compiler can still inline this function + return __c11_atomic_compare_exchange_strong((_Atomic NvU64 *)pVal, + &tmp, + newVal, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExIncrementU64 +( + volatile NvU64 *pVal +) +{ + return portAtomicExAddU64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExDecrementU64 +( + volatile NvU64 *pVal +) +{ + return portAtomicExSubU64(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExXorU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_xor((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) ^ val; +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExOrU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_or((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) | val; +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAndU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __c11_atomic_fetch_and((_Atomic NvU64 *)pVal, + val, __ATOMIC_SEQ_CST) & val; +} + + +#endif // NVCPU_IS_64_BITS + +#endif // PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS + +#endif // _NVPORT_ATOMIC_CLANG_H_ diff --git a/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h b/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h new file mode 100644 index 000000000..0b00799b6 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/atomic_gcc.h @@ -0,0 +1,460 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Atomic functions implementations using gcc compiler intrinsics + */ + +#ifndef _NVPORT_ATOMIC_GCC_H_ +#define _NVPORT_ATOMIC_GCC_H_ + + +PORT_INLINE void +portAtomicMemoryFenceStore(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("DMB ST" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("sync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("sfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence" : : : "memory"); +#else +#error "portAtomicMemoryFenceStore implementation not found" +#endif +} +PORT_INLINE void +portAtomicMemoryFenceLoad(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("DMB SY" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("sync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("lfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence" : : : "memory"); +#else +#error "portAtomicMemoryFenceLoad implementation not found" +#endif +} +PORT_INLINE void +portAtomicMemoryFenceFull(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("DMB SY" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("sync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("mfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence" : : : "memory"); +#else +#error "portAtomicMemoryFenceFull implementation not found" +#endif +} +PORT_INLINE void +portAtomicTimerBarrier(void) +{ +#if NVCPU_IS_FAMILY_ARM + __asm__ __volatile__ ("ISB" : : : "memory"); +#elif NVCPU_IS_PPC || NVCPU_IS_PPC64LE + __asm__ __volatile__ ("isync" : : : "memory"); +#elif NVCPU_IS_X86 || NVCPU_IS_X86_64 + __asm__ __volatile__ ("lfence" : : : "memory"); +#elif NVCPU_IS_RISCV64 + __asm__ __volatile__ ("fence.i" : : : "memory"); +#else +#error "portAtomicTimerBarrier implementation not found" +#endif +} + +#if PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS + +PORT_ATOMIC_INLINE NvS32 +portAtomicAddS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicSubS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE void +portAtomicSetS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapS32 +( + volatile NvS32 *pVal, + NvS32 newVal, + NvS32 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicIncrementS32 +( + volatile NvS32 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicDecrementS32 +( + volatile NvS32 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicXorS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicOrS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvS32 +portAtomicAndS32 +( + volatile NvS32 *pVal, + NvS32 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + + +PORT_ATOMIC_INLINE NvU32 +portAtomicAddU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicSubU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE void +portAtomicSetU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC_INLINE NvBool +portAtomicCompareAndSwapU32 +( + volatile NvU32 *pVal, + NvU32 newVal, + NvU32 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicIncrementU32 +( + volatile NvU32 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicDecrementU32 +( + volatile NvU32 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicXorU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicOrU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC_INLINE NvU32 +portAtomicAndU32 +( + volatile NvU32 *pVal, + NvU32 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + + + +#if defined(NV_64_BITS) + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAddS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExSubS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapS64 +( + volatile NvS64 *pVal, + NvS64 newVal, + NvS64 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExIncrementS64 +( + volatile NvS64 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExDecrementS64 +( + volatile NvS64 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExXorS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExOrS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvS64 +portAtomicExAndS64 +( + volatile NvS64 *pVal, + NvS64 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAddU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_add_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExSubU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_sub_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE void +portAtomicExSetU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + while (!__sync_bool_compare_and_swap(pVal, *pVal, val)); +} + +PORT_ATOMIC64_INLINE NvBool +portAtomicExCompareAndSwapU64 +( + volatile NvU64 *pVal, + NvU64 newVal, + NvU64 oldVal +) +{ + return __sync_bool_compare_and_swap(pVal, oldVal, newVal); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExIncrementU64 +( + volatile NvU64 *pVal +) +{ + return __sync_add_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExDecrementU64 +( + volatile NvU64 *pVal +) +{ + return __sync_sub_and_fetch(pVal, 1); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExXorU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_xor_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExOrU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_or_and_fetch(pVal, val); +} + +PORT_ATOMIC64_INLINE NvU64 +portAtomicExAndU64 +( + volatile NvU64 *pVal, + NvU64 val +) +{ + return __sync_and_and_fetch(pVal, val); +} + +#endif // NV_64_BITS + +#endif // PORT_COMPILER_HAS_INTRINSIC_ATOMICS && !defined(NV_MODS) && !NVOS_IS_LIBOS +#endif // _NVPORT_ATOMIC_GCC_H_ diff --git a/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h b/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h new file mode 100644 index 000000000..094f5440a --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/debug_unix_kernel_os.h @@ -0,0 +1,74 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief DEBUG module implementation for Unix kernelspace. + */ +#ifndef _NVPORT_DEBUG_UNIX_KERNEL_OS_H_ +#define _NVPORT_DEBUG_UNIX_KERNEL_OS_H_ +#ifdef __cplusplus +extern "C" { +#endif + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nv-kernel-interface-api.h" +void NV_API_CALL os_dbg_breakpoint(void); +void NV_API_CALL out_string(const char *str); +int NV_API_CALL nv_printf(NvU32 debuglevel, const char *format, ...); + +// No init/shutdown needed +#define portDbgInitialize() +#define portDbgShutdown() + + +PORT_DEBUG_INLINE void +portDbgPrintString +( + const char *str, + NvLength length +) +{ + out_string(str); +} + +#define portDbgPrintf(fmt, ...) nv_printf(0xFFFFFFFF, fmt, ##__VA_ARGS__) +#undef portDbgPrintf_SUPPORTED +#define portDbgPrintf_SUPPORTED 1 + +#define portDbgExPrintfLevel(level, fmt, ...) nv_printf(level, fmt, ##__VA_ARGS__) +#undef portDbgExPrintfLevel_SUPPORTED +#define portDbgExPrintfLevel_SUPPORTED 1 + +#define PORT_BREAKPOINT() os_dbg_breakpoint() + +#ifdef __cplusplus +} +#endif //__cplusplus +#endif // _NVPORT_DEBUG_UNIX_KERNEL_OS_H_ diff --git a/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h b/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h new file mode 100644 index 000000000..5ebdae10e --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/memory_tracking.h @@ -0,0 +1,323 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief NvPort MEMORY module extension to track memory allocations + * + * This file is internal to NvPort MEMORY module. + * @cond NVPORT_INTERNAL + */ + +#ifndef _NVPORT_MEMORY_INTERNAL_H_ +#define _NVPORT_MEMORY_INTERNAL_H_ + +/** @brief Untracked paged memory allocation, platform specific */ +void *_portMemAllocPagedUntracked(NvLength lengthBytes); +/** @brief Untracked nonpaged memory allocation, platform specific */ +void *_portMemAllocNonPagedUntracked(NvLength lengthBytes); +/** @brief Untracked memory free, platform specific */ +void _portMemFreeUntracked(void *pMemory); +/** @brief Wrapper around pAlloc->_portAlloc() that tracks the allocation */ +void *_portMemAllocatorAlloc(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +/** @brief Wrapper around pAlloc->_portFree() that tracks the allocation */ +void _portMemAllocatorFree(PORT_MEM_ALLOCATOR *pAlloc, void *pMem); + + +typedef struct PORT_MEM_COUNTER +{ + volatile NvU32 activeAllocs; + volatile NvU32 totalAllocs; + volatile NvU32 peakAllocs; + volatile NvLength activeSize; + volatile NvLength totalSize; + volatile NvLength peakSize; +} PORT_MEM_COUNTER; + +typedef struct PORT_MEM_FENCE_HEAD +{ + PORT_MEM_ALLOCATOR *pAllocator; + NvLength blockSize; + NvU32 magic; +} PORT_MEM_FENCE_HEAD; + +typedef struct PORT_MEM_FENCE_TAIL +{ + NvU32 magic; +} PORT_MEM_FENCE_TAIL; + +typedef struct PORT_MEM_LIST +{ + struct PORT_MEM_LIST *pPrev; + struct PORT_MEM_LIST *pNext; +} PORT_MEM_LIST; + +#if PORT_MEM_TRACK_USE_CALLERINFO + +#if PORT_MEM_TRACK_USE_CALLERINFO_IP + +typedef NvU64 PORT_MEM_CALLERINFO; +#define PORT_MEM_CALLERINFO_MAKE ((NvU64)portUtilGetIPAddress()) + +#else // PORT_MEM_TRACK_USE_CALLERINFO_IP + +typedef struct PORT_MEM_CALLERINFO +{ + const char *file; + const char *func; + NvU32 line; +} PORT_MEM_CALLERINFO; + +/** @note Needed since not all compilers support automatic struct creation */ +static NV_INLINE PORT_MEM_CALLERINFO +_portMemCallerInfoMake +( + const char *file, + const char *func, + NvU32 line +) +{ + PORT_MEM_CALLERINFO callerInfo; + callerInfo.file = file; + callerInfo.func = func; + callerInfo.line = line; + return callerInfo; +} + +#define PORT_MEM_CALLERINFO_MAKE \ + _portMemCallerInfoMake(__FILE__, __FUNCTION__, __LINE__) +#endif // PORT_MEM_TRACK_USE_CALLERINFO_IP + +void *portMemAllocPaged_CallerInfo(NvLength, PORT_MEM_CALLERINFO); +void *portMemAllocNonPaged_CallerInfo(NvLength, PORT_MEM_CALLERINFO); +PORT_MEM_ALLOCATOR *portMemAllocatorCreatePaged_CallerInfo(PORT_MEM_CALLERINFO); +PORT_MEM_ALLOCATOR *portMemAllocatorCreateNonPaged_CallerInfo(PORT_MEM_CALLERINFO); +void portMemInitializeAllocatorTracking_CallerInfo(PORT_MEM_ALLOCATOR *, PORT_MEM_ALLOCATOR_TRACKING *, PORT_MEM_CALLERINFO); +void *_portMemAllocatorAlloc_CallerInfo(PORT_MEM_ALLOCATOR*, NvLength, PORT_MEM_CALLERINFO); +PORT_MEM_ALLOCATOR *portMemAllocatorCreateOnExistingBlock_CallerInfo(void *, NvLength, PORT_MEM_CALLERINFO); +#if portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +PORT_MEM_ALLOCATOR *portMemExAllocatorCreateLockedOnExistingBlock_CallerInfo(void *, NvLength, void *, PORT_MEM_CALLERINFO); +#endif //portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +#undef PORT_ALLOC +#define PORT_ALLOC(pAlloc, length) \ + _portMemAllocatorAlloc_CallerInfo(pAlloc, length, PORT_MEM_CALLERINFO_MAKE) + +#define portMemAllocPaged(size) \ + portMemAllocPaged_CallerInfo((size), PORT_MEM_CALLERINFO_MAKE) +#define portMemAllocNonPaged(size) \ + portMemAllocNonPaged_CallerInfo((size), PORT_MEM_CALLERINFO_MAKE) +#define portMemAllocatorCreatePaged() \ + portMemAllocatorCreatePaged_CallerInfo(PORT_MEM_CALLERINFO_MAKE) +#define portMemAllocatorCreateNonPaged() \ + portMemAllocatorCreateNonPaged_CallerInfo(PORT_MEM_CALLERINFO_MAKE) + +#define portMemInitializeAllocatorTracking(pAlloc, pTrack) \ + portMemInitializeAllocatorTracking_CallerInfo(pAlloc, pTrack, PORT_MEM_CALLERINFO_MAKE) + +#define portMemAllocatorCreateOnExistingBlock(pMem, size) \ + portMemAllocatorCreateOnExistingBlock_CallerInfo(pMem, size, PORT_MEM_CALLERINFO_MAKE) +#if portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +#define portMemExAllocatorCreateLockedOnExistingBlock(pMem, size, pLock) \ + portMemExAllocatorCreateLockedOnExistingBlock_CallerInfo(pMem, size, pLock,\ + PORT_MEM_CALLERINFO_MAKE) +#endif //portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED +#else +#define PORT_MEM_CALLERINFO_MAKE +#endif // CALLERINFO + + +#if PORT_MEM_TRACK_USE_FENCEPOSTS || PORT_MEM_TRACK_USE_ALLOCLIST || PORT_MEM_TRACK_USE_CALLERINFO +typedef struct PORT_MEM_HEADER +{ +#if PORT_MEM_TRACK_USE_CALLERINFO + PORT_MEM_CALLERINFO callerInfo; +#endif +#if PORT_MEM_TRACK_USE_ALLOCLIST + PORT_MEM_LIST list; +#endif +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_FENCE_HEAD fence; +#endif +} PORT_MEM_HEADER; + +typedef struct PORT_MEM_FOOTER +{ +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_FENCE_TAIL fence; +#endif +} PORT_MEM_FOOTER; + +#define PORT_MEM_ADD_HEADER_PTR(p) ((PORT_MEM_HEADER*)p + 1) +#define PORT_MEM_SUB_HEADER_PTR(p) ((PORT_MEM_HEADER*)p - 1) +#define PORT_MEM_STAGING_SIZE (sizeof(PORT_MEM_HEADER)+sizeof(PORT_MEM_FOOTER)) + +#else +#define PORT_MEM_ADD_HEADER_PTR(p) p +#define PORT_MEM_SUB_HEADER_PTR(p) p +#define PORT_MEM_STAGING_SIZE 0 +#endif + +struct PORT_MEM_ALLOCATOR_TRACKING +{ + PORT_MEM_ALLOCATOR *pAllocator; + struct PORT_MEM_ALLOCATOR_TRACKING *pPrev; + struct PORT_MEM_ALLOCATOR_TRACKING *pNext; + +#if PORT_MEM_TRACK_USE_COUNTER + PORT_MEM_COUNTER counter; +#endif +#if PORT_MEM_TRACK_USE_ALLOCLIST + PORT_MEM_LIST *pFirstAlloc; + void *listLock; +#endif +#if PORT_MEM_TRACK_USE_CALLERINFO + PORT_MEM_CALLERINFO callerInfo; +#endif +}; + + +#define portMemExTrackingGetActiveStats_SUPPORTED PORT_MEM_TRACK_USE_COUNTER +#define portMemExTrackingGetTotalStats_SUPPORTED PORT_MEM_TRACK_USE_COUNTER +#define portMemExTrackingGetPeakStats_SUPPORTED PORT_MEM_TRACK_USE_COUNTER +#define portMemExTrackingGetNext_SUPPORTED \ + (PORT_MEM_TRACK_USE_FENCEPOSTS & PORT_MEM_TRACK_USE_ALLOCLIST) + +#define portMemExValidate_SUPPORTED 0 +#define portMemExValidateAllocations_SUPPORTED 0 +#define portMemExFreeAll_SUPPORTED 0 + +/// @brief Actual size of an allocator structure, including internals +#define PORT_MEM_ALLOCATOR_SIZE \ + (sizeof(PORT_MEM_ALLOCATOR) + sizeof(PORT_MEM_ALLOCATOR_TRACKING)) + +#if defined(BIT) +#define NVIDIA_UNDEF_LEGACY_BIT_MACROS +#endif +#include "nvmisc.h" + +// +// Internal bitvector structures for allocators over existing blocks +// +#define PORT_MEM_BITVECTOR_CHUNK_SIZE 16U +typedef NvU8 PORT_MEM_BITVECTOR_CHUNK[PORT_MEM_BITVECTOR_CHUNK_SIZE]; +typedef struct +{ + // + // Points to a PORT_SPINLOCK that make memory thread safe. + // If this is not thread safe variant, then it is NULL. + // + void *pSpinlock; + // Points to after the bitvector, aligned to first chunk. + PORT_MEM_BITVECTOR_CHUNK *pChunks; + NvU32 numChunks; + // + // What follows are two bitvectors one next to another: + // - The first represents availability of chunks: 0=free, 1=allocated + // - The second represents allocation sizes: 1=last chunk of an allocation + // So the total size of this array is 2*numChunks bits + // The second vector continues immediately after the first, no alignment + // + // Example: numChunks = 8, 2 allocations of 3 chunks each: + // bits == |11111100| <- 2*3 chunks allocated, 2 free + // |00100100| <- Chunks 2 and 5 are last in allocation + // + NvU32 bits[NV_ANYSIZE_ARRAY]; +} PORT_MEM_BITVECTOR; + +/// @note the following can be used as arguments for static array size, so +/// they must be fully known at compile time - macros, not inline functions + +/// @brief Total number of chunks in a preallocated block of given size +#define PORT_MEM_PREALLOCATED_BLOCK_NUM_CHUNKS(size) \ + NV_DIV_AND_CEIL(size, PORT_MEM_BITVECTOR_CHUNK_SIZE) + +/// @brief Minimal nonaligned bookkeeping size required for a preallocated block +#define PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_NONALIGNED_EXTRA_SIZE \ + sizeof(PORT_MEM_ALLOCATOR) + sizeof(PORT_MEM_BITVECTOR) + +/// @brief Minimal bookkeeping size required for a preallocated block +#define PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE \ + NV_ALIGN_UP(PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_NONALIGNED_EXTRA_SIZE, \ + PORT_MEM_BITVECTOR_CHUNK_SIZE) + +/// @brief Number of chunks that can be tracked in the minimal bookkeeping size +#define PORT_MEM_PREALLOCATED_BLOCK_CHUNKS_GRATIS \ + (( \ + PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE - \ + sizeof(PORT_MEM_ALLOCATOR) - \ + NV_OFFSETOF(PORT_MEM_BITVECTOR, bits) \ + )*4U) + +// Although we can never execute the underflow branch, the compiler will complain +// if any constant expression results in underflow, even in dead code. +// Note: Skipping (parens) around a and b on purpose here. +#define _PORT_CEIL_NO_UNDERFLOW(a, b) (NV_DIV_AND_CEIL(b + a, b) - 1) + +/// @brief Required additional size for a given number of chunks +#define PORT_MEM_PREALLOCATED_BLOCK_SIZE_FOR_NONGRATIS_CHUNKS(num_chunks) \ + ((num_chunks > PORT_MEM_PREALLOCATED_BLOCK_CHUNKS_GRATIS) \ + ? _PORT_CEIL_NO_UNDERFLOW(num_chunks - PORT_MEM_PREALLOCATED_BLOCK_CHUNKS_GRATIS,\ + 4*PORT_MEM_BITVECTOR_CHUNK_SIZE) \ + * PORT_MEM_BITVECTOR_CHUNK_SIZE \ + : 0) + +/// @brief Total required bookkeeping size for a block of given useful size +#define PORT_MEM_PREALLOCATED_BLOCK_EXTRA_SIZE(size) \ + PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE + \ + PORT_MEM_PREALLOCATED_BLOCK_SIZE_FOR_NONGRATIS_CHUNKS( \ + PORT_MEM_PREALLOCATED_BLOCK_NUM_CHUNKS(size)) + +/** + * Macros for defining memory allocation wrappers. + * + * The function / file / line reference is not useful when portMemAlloc + * is called from a generic memory allocator function, such as the memCreate + * function in resman. + * + * These macros can be used to push the function /file / line reference up one + * level when defining a memory allocator function. In other words, log who + * calls memCreate instead of logging memCreate. + * + * These macros are also used throughout memory-tracking.c + */ +#if PORT_MEM_TRACK_USE_CALLERINFO + +#define PORT_MEM_CALLERINFO_PARAM _portMemCallerInfo +#define PORT_MEM_CALLERINFO_TYPE_PARAM \ + PORT_MEM_CALLERINFO PORT_MEM_CALLERINFO_PARAM +#define PORT_MEM_CALLERINFO_COMMA_PARAM ,PORT_MEM_CALLERINFO_PARAM +#define PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM ,PORT_MEM_CALLERINFO_TYPE_PARAM +#define PORT_MEM_CALLINFO_FUNC(f) f##_CallerInfo + +#else // PORT_MEM_TRACK_USE_CALLERINFO + +#define PORT_MEM_CALLERINFO_PARAM +#define PORT_MEM_CALLERINFO_TYPE_PARAM +#define PORT_MEM_CALLERINFO_COMMA_PARAM +#define PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +#define PORT_MEM_CALLINFO_FUNC(f) f + +#endif // PORT_MEM_TRACK_USE_CALLERINFO + +#endif // _NVPORT_MEMORY_INTERNAL_H_ +/// @endcond diff --git a/src/nvidia/inc/libraries/nvport/inline/safe_generic.h b/src/nvidia/inc/libraries/nvport/inline/safe_generic.h new file mode 100644 index 000000000..e851b7703 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/safe_generic.h @@ -0,0 +1,311 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// Disable warnings when constant expressions are always true/false, and +// some signed/unsigned mismatch. To get a common implementation for all safe +// functions, we need to rely on these. There is no undefined behavior here. +// +#if PORT_COMPILER_IS_MSVC +#pragma warning( disable : 4296) +#elif PORT_COMPILER_IS_GCC +// GCC 4.6+ needed for GCC diagnostic +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) +#pragma GCC diagnostic push +// Allow unknown pragmas to ignore unrecognized -W flags. +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wtautological-constant-out-of-range-compare" +#pragma GCC diagnostic ignored "-Wsign-compare" +#pragma GCC diagnostic ignored "-Wtype-limits" +#else +// +// On older GCCs we declare this as a system header, which tells the compiler +// to ignore all warnings in it (this has no effect on the primary source file) +// +#pragma GCC system_header +#endif +#elif PORT_COMPILER_IS_CLANG +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wtautological-constant-out-of-range-compare" +#pragma clang diagnostic ignored "-Wsign-compare" +#pragma clang diagnostic ignored "-Wtype-limits" +#endif + +#define PORT_SAFE_OP(a, b, pRes, _op_, _US_) \ + ((sizeof(a) == 1) ? portSafe##_op_##_US_##8 (a, b, pRes) : \ + (sizeof(a) == 2) ? portSafe##_op_##_US_##16(a, b, pRes) : \ + (sizeof(a) == 4) ? portSafe##_op_##_US_##32(a, b, pRes) : \ + (sizeof(a) == 8) ? portSafe##_op_##_US_##64(a, b, pRes) : \ + NV_FALSE) + +#define PORT_SAFE_ADD_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Add, U) +#define PORT_SAFE_SUB_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Sub, U) +#define PORT_SAFE_MUL_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Mul, U) +#define PORT_SAFE_DIV_U(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Div, U) + +#define PORT_SAFE_ADD_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Add, S) +#define PORT_SAFE_SUB_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Sub, S) +#define PORT_SAFE_MUL_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Mul, S) +#define PORT_SAFE_DIV_S(a, b, pRes) PORT_SAFE_OP(a, b, pRes, Div, S) + +#define PORT_SAFE_ADD(a, b, pRes) PORT_SAFE_ADD_U(a, b, pRes) +#define PORT_SAFE_SUB(a, b, pRes) PORT_SAFE_SUB_U(a, b, pRes) +#define PORT_SAFE_MUL(a, b, pRes) PORT_SAFE_MUL_U(a, b, pRes) +#define PORT_SAFE_DIV(a, b, pRes) PORT_SAFE_DIV_U(a, b, pRes) + +//////////////////////////////////////////////////////////////////////////////// + +#define PORT_EXPAND(X) X +#define PORT_SAFE_MAX(t) PORT_EXPAND(NV_##t##_MAX) +#define PORT_SAFE_MIN(t) PORT_EXPAND(NV_##t##_MIN) + +// These constants should really be in nvtypes.h +#if !defined (NV_UPtr_MAX) +#if defined(NV_64_BITS) +#define NV_UPtr_MAX NV_U64_MAX +#define NV_Length_MAX NV_U64_MAX +#else +#define NV_UPtr_MAX NV_U32_MAX +#define NV_Length_MAX NV_U32_MAX +#endif +#define NV_UPtr_MIN 0 +#define NV_Length_MIN 0 +#endif + +#define PORT_WILL_OVERFLOW_UADD(a, b) ((a + b) < a) +#define PORT_WILL_OVERFLOW_USUB(a, b) (b > a) +#define PORT_WILL_OVERFLOW_UMUL(a, b, r) (a != 0 && b != (r/a)) + +/** @note Signed overflow is Undefined Behavior, which means we have to detect + * it before it actually happens. We can't do (a+b) unless we are sure it won't + * overflow. + */ +#define PORT_WILL_OVERFLOW_SADD(a, b, size) \ + ((b < 0) ? (a < (NV_S##size##_MIN - b)) : (a > (NV_S##size##_MAX - b))) + +#define PORT_WILL_OVERFLOW_SSUB(a, b, size) \ + ((b < 0) ? (a > (NV_S##size##_MAX + b)) : (a < (NV_S##size##_MIN + b))) + +#define PORT_MIN_MUL(x, s) ((x < 0) ? (NV_S##s##_MAX / x) : (NV_S##s##_MIN / x)) +#define PORT_MAX_MUL(x, s) ((x < 0) ? (NV_S##s##_MIN / x) : (NV_S##s##_MAX / x)) +#define PORT_WILL_OVERFLOW_SMUL(a, b, size) \ + (a != 0 && b != 0 && (a > PORT_MAX_MUL(b, size) || a < PORT_MIN_MUL(b, size))) + +#define PORT_SAFE_DIV_IMPL(a, b, pRes) \ + ((b == 0) ? NV_FALSE : ((*pRes = a / b), NV_TRUE)) + +#define PORT_SAFE_Add_IMPL_S(a, b, pRes, n) \ + (PORT_WILL_OVERFLOW_SADD(a, b, n) ? NV_FALSE : ((*pRes = a + b), NV_TRUE)) +#define PORT_SAFE_Sub_IMPL_S(a, b, pRes, n) \ + (PORT_WILL_OVERFLOW_SSUB(a, b, n) ? NV_FALSE : ((*pRes = a - b), NV_TRUE)) +#define PORT_SAFE_Mul_IMPL_S(a, b, pRes, n) \ + (PORT_WILL_OVERFLOW_SMUL(a, b, n) ? NV_FALSE : ((*pRes = a * b), NV_TRUE)) +#define PORT_SAFE_Div_IMPL_S(a, b, pRes, n) PORT_SAFE_DIV_IMPL(a, b, pRes) + +#define PORT_SAFE_Add_IMPL_U(a, b, pRes, n) \ + ((*pRes = a + b), ((*pRes < a) ? NV_FALSE : NV_TRUE)) +#define PORT_SAFE_Sub_IMPL_U(a, b, pRes, n) \ + ((*pRes = a - b), ((b > a) ? NV_FALSE : NV_TRUE)) +#define PORT_SAFE_Mul_IMPL_U(a, b, pRes, n) \ + ((*pRes = a * b), ((a != 0 && b != *pRes/a) ? NV_FALSE : NV_TRUE)) +#define PORT_SAFE_Div_IMPL_U(a, b, pRes, n) PORT_SAFE_DIV_IMPL(a, b, pRes) + + +#define PORT_SAFE_Add_IMPL_ PORT_SAFE_Add_IMPL_U +#define PORT_SAFE_Sub_IMPL_ PORT_SAFE_Sub_IMPL_U +#define PORT_SAFE_Mul_IMPL_ PORT_SAFE_Mul_IMPL_U +#define PORT_SAFE_Div_IMPL_ PORT_SAFE_Div_IMPL_U + +#define PORT_SAFE_CAST(a, b, t) \ + ((a < PORT_SAFE_MIN(t) || a > PORT_SAFE_MAX(t)) ? \ + NV_FALSE : \ + ((b = (Nv##t) a), NV_TRUE)) + + +#define PORT_SAFE_DEFINE_MATH_FUNC(_op_, _US_, _size_) \ + PORT_SAFE_INLINE NvBool \ + portSafe##_op_##_US_##_size_ \ + ( \ + Nv##_US_##_size_ x, \ + Nv##_US_##_size_ y, \ + Nv##_US_##_size_ *pRes \ + ) \ + { \ + return PORT_EXPAND(PORT_SAFE_##_op_##_IMPL_##_US_)(x, y, pRes, _size_);\ + } + + +#define PORT_SAFE_DEFINE_CAST_FUNC(_type_from_, _type_to_) \ + PORT_SAFE_INLINE NvBool \ + portSafe##_type_from_##To##_type_to_ \ + ( \ + Nv##_type_from_ data, \ + Nv##_type_to_ *pResult \ + ) \ + { \ + if (((data<0) && (PORT_SAFE_MIN(_type_to_) == 0 || \ + PORT_SAFE_MIN(_type_to_) > data)) \ + || data > PORT_SAFE_MAX(_type_to_)) \ + return NV_FALSE; \ + *pResult = (Nv##_type_to_) data; \ + return NV_TRUE; \ + } + + + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 8) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 16) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 32) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, S, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, S, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, S, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Div, S, 64) + + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 8) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 8) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 16) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 16) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 32) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 32) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, 64) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, 64) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, U, Ptr) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, U, Ptr) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, U, Ptr) +PORT_SAFE_DEFINE_MATH_FUNC(Div, U, Ptr) + +PORT_SAFE_DEFINE_MATH_FUNC(Add, , Length) +PORT_SAFE_DEFINE_MATH_FUNC(Sub, , Length) +PORT_SAFE_DEFINE_MATH_FUNC(Mul, , Length) +PORT_SAFE_DEFINE_MATH_FUNC(Div, , Length) + + +PORT_SAFE_DEFINE_CAST_FUNC(S8, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S8, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S8, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S8, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S8, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S8, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(S16, S8) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S16, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S16, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S16, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(S32, S8) +PORT_SAFE_DEFINE_CAST_FUNC(S32, S16) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S32, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S32, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S32, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(S64, S8) +PORT_SAFE_DEFINE_CAST_FUNC(S64, S16) +PORT_SAFE_DEFINE_CAST_FUNC(S64, S32) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U8) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U16) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U32) +PORT_SAFE_DEFINE_CAST_FUNC(S64, U64) +PORT_SAFE_DEFINE_CAST_FUNC(S64, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(S64, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(U8, S8) + +PORT_SAFE_DEFINE_CAST_FUNC(U16, S8) +PORT_SAFE_DEFINE_CAST_FUNC(U16, S16) +PORT_SAFE_DEFINE_CAST_FUNC(U16, U8) + +PORT_SAFE_DEFINE_CAST_FUNC(U32, S8) +PORT_SAFE_DEFINE_CAST_FUNC(U32, S16) +PORT_SAFE_DEFINE_CAST_FUNC(U32, S32) +PORT_SAFE_DEFINE_CAST_FUNC(U32, U8) +PORT_SAFE_DEFINE_CAST_FUNC(U32, U16) + +PORT_SAFE_DEFINE_CAST_FUNC(U64, S8) +PORT_SAFE_DEFINE_CAST_FUNC(U64, S16) +PORT_SAFE_DEFINE_CAST_FUNC(U64, S32) +PORT_SAFE_DEFINE_CAST_FUNC(U64, S64) +PORT_SAFE_DEFINE_CAST_FUNC(U64, U8) +PORT_SAFE_DEFINE_CAST_FUNC(U64, U16) +PORT_SAFE_DEFINE_CAST_FUNC(U64, U32) +PORT_SAFE_DEFINE_CAST_FUNC(U64, UPtr) +PORT_SAFE_DEFINE_CAST_FUNC(U64, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S8) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S16) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S32) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, S64) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U8) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U16) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U32) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, U64) +PORT_SAFE_DEFINE_CAST_FUNC(UPtr, Length) + +PORT_SAFE_DEFINE_CAST_FUNC(Length, S8) +PORT_SAFE_DEFINE_CAST_FUNC(Length, S16) +PORT_SAFE_DEFINE_CAST_FUNC(Length, S32) +PORT_SAFE_DEFINE_CAST_FUNC(Length, S64) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U8) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U16) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U32) +PORT_SAFE_DEFINE_CAST_FUNC(Length, U64) +PORT_SAFE_DEFINE_CAST_FUNC(Length, UPtr) + + +#if PORT_COMPILER_IS_MSVC +#pragma warning( default : 4296) +#elif PORT_COMPILER_IS_GCC && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#pragma GCC diagnostic pop +#elif PORT_COMPILER_IS_CLANG +#pragma clang diagnostic pop +#endif diff --git a/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h b/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h new file mode 100644 index 000000000..be23a99a3 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/sync_tracking.h @@ -0,0 +1,211 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/** + * @file + * @brief SYNC debugging utilities + * + * If PORT_SYNC_PRINT_DEBUG_INFO is defined, the definitions in this file will + * cause all Sync operations to verbosely print out the actions performed. + */ + +#if defined(PORT_SYNC_PRINT_DEBUG_INFO) + +#if defined(PORT_SYNC_IMPL) + +#undef portSyncInitialize +#undef portSyncShutdown +#undef portSyncSpinlockInitialize +#undef portSyncSpinlockCreate +#undef portSyncSpinlockDestroy +#undef portSyncSpinlockAcquire +#undef portSyncSpinlockRelease +#undef portSyncMutexInitialize +#undef portSyncMutexCreate +#undef portSyncMutexDestroy +#undef portSyncMutexAcquire +#undef portSyncMutexRelease +#undef portSyncMutexAcquireConditional +#undef portSyncSemaphoreInitialize +#undef portSyncSemaphoreCreate +#undef portSyncSemaphoreDestroy +#undef portSyncSemaphoreAcquire +#undef portSyncSemaphoreRelease +#undef portSyncSemaphoreAcquireConditional + +#else + +#define portSyncInitialize() \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncInitialize() ", __FILE__, __LINE__); \ + portSyncInitialize(); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncShutdown() \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncShutdown() ", __FILE__, __LINE__); \ + portSyncShutdown(); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + + + +static NV_INLINE NV_STATUS _syncPrintReturnStatus(NV_STATUS status) +{ + portDbgPrintf("%s\n", nvstatusToString(status)); + return status; +} + +static NV_INLINE void *_syncPrintReturnPtr(void *ptr) +{ + portDbgPrintf("%p\n", ptr); + return ptr; +} + + +#define portSyncSpinlockInitialize(pSpinlock) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockInitialize(%p) - ", \ + __FILE__, __LINE__, pSpinlock),\ + _syncPrintReturnStatus(portSyncSpinlockInitialize(pSpinlock))) + +#define portSyncSpinlockCreate(pAllocator) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockCreate(%p) - ", \ + __FILE__, __LINE__, pAllocator),\ + _syncPrintReturnPtr(portSyncSpinlockCreate(pAllocator))) + +#define portSyncSpinlockDestroy(pSpinlock) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockDestroy(%p) ",\ + __FILE__, __LINE__, pSpinlock); \ + portSyncSpinlockDestroy(pSpinlock); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSpinlockAcquire(pSpinlock) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockAcquire(%p) ",\ + __FILE__, __LINE__, pSpinlock); \ + portSyncSpinlockAcquire(pSpinlock); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSpinlockRelease(pSpinlock) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSpinlockRelease(%p) ",\ + __FILE__, __LINE__, pSpinlock); \ + portSyncSpinlockRelease(pSpinlock); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + + + + +#define portSyncMutexInitialize(pMutex) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexInitialize(%p) - ", \ + __FILE__, __LINE__, pMutex),\ + _syncPrintReturnStatus(portSyncMutexInitialize(pMutex))) + +#define portSyncMutexCreate(pAllocator) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexCreate(%p) - ", \ + __FILE__, __LINE__, pAllocator),\ + _syncPrintReturnPtr(portSyncMutexCreate(pAllocator))) + +#define portSyncMutexDestroy(pMutex) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexDestroy(%p) ",\ + __FILE__, __LINE__, pMutex); \ + portSyncMutexDestroy(pMutex); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncMutexAcquire(pMutex) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexAcquire(%p) ",\ + __FILE__, __LINE__, pMutex); \ + portSyncMutexAcquire(pMutex); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncMutexRelease(pMutex) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexRelease(%p) ",\ + __FILE__, __LINE__, pMutex); \ + portSyncMutexRelease(pMutex); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncMutexAcquireConditional(pMutex) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncMutexAcquireConditional(%p) - ", \ + __FILE__, __LINE__, pMutex),\ + (portSyncMutexAcquireConditional(pMutex) ? \ + (portDbgPrintf("TRUE\n"),NV_TRUE) : (portDbgPrintf("FALSE\n"),NV_FALSE))) + + + + + +#define portSyncSemaphoreInitialize(pSemaphore, s, l) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreInitialize(%p, %u, %u) - ", \ + __FILE__, __LINE__, pSemaphore, s, l),\ + _syncPrintReturnStatus(portSyncSemaphoreInitialize(pSemaphore, s, l))) + +#define portSyncSemaphoreCreate(pAllocator, s, l) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreCreate(%p, %u, %u) - ", \ + __FILE__, __LINE__, pAllocator, s, l),\ + _syncPrintReturnPtr(portSyncSemaphoreCreate(pAllocator, s, l))) + +#define portSyncSemaphoreDestroy(pSemaphore) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreDestroy(%p) ",\ + __FILE__, __LINE__, pSemaphore); \ + portSyncSemaphoreDestroy(pSemaphore); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSemaphoreAcquire(pSemaphore) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreAcquire(%p) ",\ + __FILE__, __LINE__, pSemaphore); \ + portSyncSemaphoreAcquire(pSemaphore); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSemaphoreRelease(pSemaphore) \ + do { \ + portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreRelease(%p) ",\ + __FILE__, __LINE__, pSemaphore); \ + portSyncSemaphoreRelease(pSemaphore); \ + portDbgPrintf(" - Done;\n"); \ + } while (0) + +#define portSyncSemaphoreAcquireConditional(pSemaphore) \ + (portDbgPrintf("SYNC-DBG (%s:%d): portSyncSemaphoreAcquireConditional(%p) - ", \ + __FILE__, __LINE__, pSemaphore),\ + (portSyncSemaphoreAcquireConditional(pSemaphore) ? \ + (portDbgPrintf("TRUE\n"),NV_TRUE) : (portDbgPrintf("FALSE\n"),NV_FALSE))) + + +#endif // PORT_SYNC_IMPL +#endif // PORT_SYNC_PRINT_DEBUG_INFO diff --git a/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h b/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h new file mode 100644 index 000000000..e9c089c5c --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/util_gcc_clang.h @@ -0,0 +1,188 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util functions implementations using gcc and clang compiler intrinsics + */ + +#ifndef _NVPORT_UTIL_GCC_CLANG_H_ +#define _NVPORT_UTIL_GCC_CLANG_H_ + +// +// Disabling portUtilExGetStackTrace_SUPPORTED on all clients because the +// implementation is unsafe and generates warnings on new build compilers. +// +// From https://gcc.gnu.org/onlinedocs/gcc/Return-Address.html : +// Calling this function with a nonzero argument can have unpredictable effects, +// including crashing the calling program. As a result, calls that are considered +// unsafe are diagnosed when the -Wframe-address option is in effect. Such calls +// should only be made in debugging situations. +// +// If this feature is desirable, please replace the body of portUtilExGetStackTrace() +// with implementations that tie into native stacktrace reporting infrastructure +// of the platforms nvport runs on. +// +#define portUtilExGetStackTrace_SUPPORTED 0 +#define portUtilExGetStackTrace(_level) ((NvUPtr)0) + +#define portUtilGetReturnAddress() (NvUPtr)__builtin_return_address(0) + +#if NVCPU_IS_X86 || NVCPU_IS_X86_64 +#define NVPORT_DUMMY_LOOP() \ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + \ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + \ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + \ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause");\ + __asm__ __volatile__ ("pause"); +#else +#define NVPORT_DUMMY_LOOP() \ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + \ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + \ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + \ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop");\ + __asm__ __volatile__ ("nop"); +#endif + +#if (__GNUC__ < 4) || (NVCPU_IS_ARM) || (NVCPU_IS_X86 && PORT_IS_KERNEL_BUILD) || (NVCPU_IS_RISCV64) +#define PORT_UTIL_CLZ_CTX_NOT_DEFINED 1 +#else +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros64(NvU64 n) +{ + if (n == 0) + return 64; + + return __builtin_clzll(n); +} +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros32(NvU32 n) +{ + if (n == 0) + return 32; + + return __builtin_clz(n); +} + + +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros64(NvU64 n) +{ + if (n == 0) + return 64; + + return __builtin_ctzll(n); +} +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n) +{ + if (n == 0) + return 32; + + return __builtin_ctz(n); +} + +#endif + + +#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter() +{ + NvU32 lo; + NvU32 hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (lo | ((NvU64)hi << 32)); +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#elif NVCPU_IS_AARCH64 && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter() +{ + NvU64 ts = 0; + __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (ts)); + return ts; +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#elif NVCPU_IS_PPC64LE && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter() +{ + NvU64 ts; + __asm__ __volatile__ ("mfspr %0,268" : "=r"(ts)); + return ts; +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#elif NVCPU_IS_PPC && !defined(NV_MODS) +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter() +{ + NvU32 lo, hi, tmp; + __asm__ __volatile__ ( + "0:\n" + "mftbu %0\n" + "mftbl %1\n" + "mftbu %2\n" + "cmpw %0, %2\n" + "bne- 0b" + : "=r" (hi), "=r" (lo), "=r" (tmp) ); + return ((hi << 32) | lo); +} +#define portUtilExReadTimestampCounter_SUPPORTED 1 + +#else +#define portUtilExReadTimestampCounter_SUPPORTED 0 +#endif + +#endif // _NVPORT_UTIL_GCC_CLANG_H_ diff --git a/src/nvidia/inc/libraries/nvport/inline/util_generic.h b/src/nvidia/inc/libraries/nvport/inline/util_generic.h new file mode 100644 index 000000000..d9fe83a1b --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/util_generic.h @@ -0,0 +1,267 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief UTIL module generic crossplatform implementation + */ + +#ifndef _NVPORT_UTIL_GENERIC_H_ +#define _NVPORT_UTIL_GENERIC_H_ + +PORT_UTIL_INLINE NvBool +portUtilCheckOverlap +( + const NvU8 *pData0, + NvLength len0, + const NvU8 *pData1, + NvLength len1 +) +{ + return (pData0 >= pData1 && pData0 < (pData1 + len1)) || + (pData1 >= pData0 && pData1 < (pData0 + len0)); +} + +PORT_UTIL_INLINE NvBool +portUtilCheckAlignment +( + const void *address, + NvU32 align +) +{ + if (!portUtilIsPowerOfTwo(align)) + return NV_FALSE; + + return ((NvUPtr)address & (align-1)) == 0; +} + +PORT_UTIL_INLINE NvBool +portUtilIsPowerOfTwo +( + NvU64 num +) +{ + return (num & (num-1)) == 0; +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteLittleEndian16 +( + void *pBuf, + NvU16 value +) +{ + *((NvU8*)pBuf + 1) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 0) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteLittleEndian32 +( + void *pBuf, + NvU32 value +) +{ + *((NvU8*)pBuf + 3) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 0) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteLittleEndian64 +( + void *pBuf, + NvU64 value +) +{ + *((NvU8*)pBuf + 7) = (NvU8)(value >> 56); + *((NvU8*)pBuf + 6) = (NvU8)(value >> 48); + *((NvU8*)pBuf + 5) = (NvU8)(value >> 40); + *((NvU8*)pBuf + 4) = (NvU8)(value >> 32); + *((NvU8*)pBuf + 3) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 0) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteBigEndian16 +( + void *pBuf, + NvU16 value +) +{ + *((NvU8*)pBuf + 0) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 1) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteBigEndian32 +( + void *pBuf, + NvU32 value +) +{ + *((NvU8*)pBuf + 0) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 3) = (NvU8)(value); +} + +/* + * This function is designed to be able to make unaligned access + * (but might be slower because of the byte by byte access) + */ +PORT_UTIL_INLINE void +portUtilWriteBigEndian64 +( + void *pBuf, + NvU64 value +) +{ + *((NvU8*)pBuf + 0) = (NvU8)(value >> 56); + *((NvU8*)pBuf + 1) = (NvU8)(value >> 48); + *((NvU8*)pBuf + 2) = (NvU8)(value >> 40); + *((NvU8*)pBuf + 3) = (NvU8)(value >> 32); + *((NvU8*)pBuf + 4) = (NvU8)(value >> 24); + *((NvU8*)pBuf + 5) = (NvU8)(value >> 16); + *((NvU8*)pBuf + 6) = (NvU8)(value >> 8); + *((NvU8*)pBuf + 7) = (NvU8)(value); +} + +#if PORT_COMPILER_IS_GCC || PORT_COMPILER_IS_CLANG +#include "nvport/inline/util_gcc_clang.h" +#elif PORT_COMPILER_IS_MSVC +#include "nvport/inline/util_msvc.h" +#else +#error "Unsupported compiler" +#endif // switch + +#ifdef PORT_UTIL_CLZ_CTX_NOT_DEFINED +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros64(NvU64 n) +{ + NvU32 y; + + if (n == 0) + return 64; + + for (y = 0; !(n & 0x8000000000000000LL); y++) + n <<= 1; + + return y; +} +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros32(NvU32 n) +{ + NvU32 y; + + if (n == 0) + return 32; + + for (y = 0; !(n & 0x80000000); y++) + n <<= 1; + + return y; +} + +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros64(NvU64 n) +{ + NvU32 bz, b5, b4, b3, b2, b1, b0; + NvU64 y; + + y = n & (~n + 1); + bz = y ? 0 : 1; + b5 = (y & 0x00000000FFFFFFFFLL) ? 0 : 32; + b4 = (y & 0x0000FFFF0000FFFFLL) ? 0 : 16; + b3 = (y & 0x00FF00FF00FF00FFLL) ? 0 : 8; + b2 = (y & 0x0F0F0F0F0F0F0F0FLL) ? 0 : 4; + b1 = (y & 0x3333333333333333LL) ? 0 : 2; + b0 = (y & 0x5555555555555555LL) ? 0 : 1; + + return (bz + b5 + b4 + b3 + b2 + b1 + b0); +} +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n) +{ + NvU32 bz, b4, b3, b2, b1, b0; + NvU32 y; + + y = n & (~n + 1); + bz = y ? 0 : 1; + b4 = (y & 0x0000FFFF) ? 0 : 16; + b3 = (y & 0x00FF00FF) ? 0 : 8; + b2 = (y & 0x0F0F0F0F) ? 0 : 4; + b1 = (y & 0x33333333) ? 0 : 2; + b0 = (y & 0x55555555) ? 0 : 1; + + return (bz + b4 + b3 + b2 + b1 + b0); +} +#endif + +static NV_FORCEINLINE void +portUtilSpin(void) +{ + NvU32 idx; + for (idx = 0; idx < 100; idx++) + { + NVPORT_DUMMY_LOOP(); + } +} + +#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS) && PORT_IS_MODULE_SUPPORTED(atomic) +static NV_FORCEINLINE NvU64 +portUtilExReadTimestampCounterSerialized(void) +{ + NvU64 val; + + portAtomicMemoryFenceLoad(); + val = portUtilExReadTimestampCounter(); + portAtomicMemoryFenceLoad(); + + return val; +} +#define portUtilExReadTimestampCounterSerialized_SUPPORTED 1 +#else +#define portUtilExReadTimestampCounterSerialized_SUPPORTED 0 +#endif + +#endif // _NVPORT_UTIL_GENERIC_H_ diff --git a/src/nvidia/inc/libraries/nvport/inline/util_valist.h b/src/nvidia/inc/libraries/nvport/inline/util_valist.h new file mode 100644 index 000000000..4d293c3fd --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/inline/util_valist.h @@ -0,0 +1,30 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief va_list declarations for all platforms + */ + +// We used to have custom implementations in here, but now we just take the standard ones +#include // define va_* diff --git a/src/nvidia/inc/libraries/nvport/memory.h b/src/nvidia/inc/libraries/nvport/memory.h new file mode 100644 index 000000000..dd5646e52 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/memory.h @@ -0,0 +1,962 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Memory module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_MEMORY_H_ +#define _NVPORT_MEMORY_H_ + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/memory_libos.h" +#endif + +/** + * @defgroup NVPORT_MEMORY Memory + * @brief This module contains memory management related functionality. + * + * @{ + */ + +/** + * @brief Single allocation description - forward reference. + */ +struct PORT_MEM_TRACK_ALLOC_INFO; +typedef struct PORT_MEM_TRACK_ALLOC_INFO PORT_MEM_TRACK_ALLOC_INFO; + + +/** + * @name Core Functions + * @{ + */ + + +/** + * @brief Initializes global Memory tracking structures. + * + * This function is called by @ref portInitialize. It is available here in case + * it is needed to initialize the MEMORY module without initializing all the + * others. e.g. for unit tests. + */ +void portMemInitialize(void); +/** + * @brief Destroys global Memory tracking structures, and checks for leaks + * + * This function is called by @ref portShutdown. It is available here in case + * it is needed to initialize the MEMORY module without initializing all the + * others. e.g. for unit tests. + * + * @param bForceSilent - Will not print the report, even if + * @ref PORT_MEM_TRACK_PRINT_LEVEL isn't PORT_MEM_TRACK_PRINT_LEVEL_SILENT + */ +void portMemShutdown(NvBool bForceSilent); + + +/** + * @brief Allocates pageable virtual memory of given size. + * + * Will allocate at least lengthBytes bytes and return a pointer to the + * allocated virtual memory. The caller will be able to both read and write + * the returned memory via standard pointer accesses. + * + * The memory is not guaranteed to be initialized before being returned to the + * caller. + * + * An allocation request of size 0 will result in a return value of NULL. + * + * @par Checked builds only: + * Requests of size 0 will breakpoint/assert. + * + * @par Undefined: + * It is possible this function will consume more than lengthBytes of virtual + * address space. However behavior is undefined if the caller attempts to read + * or write addresses beyond lengthBytes. + * + * @return Pointer to requested memory, NULL if allocation fails. + * + * @note Calling this function is identical to calling + * @ref PORT_ALLOC ( @ref portMemAllocatorGetGlobalPaged() , lengthBytes) + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void *portMemAllocPaged(NvLength lengthBytes); + +/** + * @brief Allocates non-paged (i.e. pinned) memory. + * + * This function is essentially the same to @ref portMemAllocPaged except that + * the virtual memory once returned will always be resident in CPU memory. + * + * @return Pointer to requested memory, NULL if allocation fails. + * + * @note Calling this function is identical to calling + * @ref PORT_ALLOC ( @ref portMemAllocatorGetGlobalNonPaged() , lengthBytes) + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void *portMemAllocNonPaged(NvLength lengthBytes); + +/** + * @brief Allocates non-paged (i.e. pinned) memory on the stack or the heap + * + * USE ONLY FOR MEMORY THAT ALLOCATED AND FREED IN THE SAME FUNCTION! + * + * This function allocates memory on the stack for platforms with a large stack. + * Otherwise it is defined to @ref portMemAllocNonPaged and @ref portMemFree. + */ +#define portMemExAllocStack(lengthBytes) __builtin_alloca(lengthBytes) +#define portMemExAllocStack_SUPPORTED PORT_COMPILER_IS_GCC + +#if portMemExAllocStack_SUPPORTED && NVOS_IS_LIBOS +#define portMemAllocStackOrHeap(lengthBytes) portMemExAllocStack(lengthBytes) +#define portMemFreeStackOrHeap(pData) +#else +#define portMemAllocStackOrHeap(size) portMemAllocNonPaged(size) +#define portMemFreeStackOrHeap(pData) portMemFree(pData) +#endif + +/** + * @brief Frees memory allocated by @ref portMemAllocPaged or @ref portMemAllocNonPaged. + * + * Frees either paged or non-paged virtual memory. The pointer passed in must + * have been the exact value returned by the allocation routine. + * + * Calling with NULL has no effect. + * + * @par Checked builds only: + * Will fill the memory with a pattern to help detect use after free.
+ * Will assert/breakpoint if the memory fenceposts have been corrupted + * + * @par Undefined: + * Freeing the same address multiple times results in undefined behavior.
+ * Accessing memory in the region freed by this function results in undefined + * behavior. It may generate a page fault, or if the memory has been + * reallocated (or kept around to optimize subsequent allocation requests) then + * the access may unexpectedly work. + * + * @pre Windows: IRQL <= APC_LEVEL (DISPATCH_LEVEL if freeing NonPaged memory) + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portMemFree(void *pData); + +/** + * @brief Copies data from one address to another. + * + * Copies srcSize bytes from pSource to pDestination, returning pDestination. + * pDestination should be at least destSize bytes, pSource at least srcSize. + * destSize should be equal or greater to srcSize. + * + * If destSize is 0, it is guaranteed to not access either buffer. + * + * @par Undefined: + * Behavior is undefined if memory regions referred to by pSource and + * pDestination overlap. + * + * @par Checked builds only: + * Will assert/breakpoint if the regions overlap.
+ * Will assert/breakpoint if destSize < srcSize
+ * Will assert/breakpoint if either pointer is NULL + * + * @return pDestination on success, NULL if the operation failed. + * + */ +void *portMemCopy(void *pDestination, NvLength destSize, const void *pSource, NvLength srcSize); + +/** + * @brief Moves data from one address to another. + * + * Copies memory from pSource to pDestination, returning pDestination. + * pDestination should be at least destSize bytes, pSource at least srcSize. + * srcSize should be equal or greater to destSize. + * + * If destSize is 0, it is guaranteed to not access either buffer. + * + * Unlike @ref portMemCopy this function allows the regions to overlap. + * + * @par Checked builds only: + * Will assert/breakpoint if destSize < srcSize
+ * Will assert/breakpoint if either pointer is NULL + * + * @return pDestination on success, NULL if the operation failed. + * + */ +void *portMemMove(void *pDestination, NvLength destSize, const void *pSource, NvLength srcSize); + +/** + * @brief Sets given memory to specified value. + * + * Writes lengthBytes bytes of data starting at pData with value. + * The buffer is assumed to have the size of at least lengthBytes. + * + * if lengthBytes is 0 it is guaranteed to not access pData. + * + * @return pData + */ +void *portMemSet(void *pData, NvU8 value, NvLength lengthBytes); + +/** + * @brief Sets given memory to specified pattern + * + * Fills lengthBytes of pData repeating the pPattern pattern. + * The pData buffer is assumed to have the size of at least lengthBytes. + * The pPattern buffer is assumed to have the size of at least patternBytes. + * + * If lengthBytes is 0 it is guaranteed to not access pData. + * @par Undefined: + * Behavior is undefined if patternBytes is zero.
+ * Behavior is undefined if pPattern and pData overlap. + * + * @return pData + */ +void *portMemSetPattern(void *pData, NvLength lengthBytes, const NvU8 *pPattern, NvLength patternBytes); + +/** + * @brief Compares two memory regions. + * + * This function does a byte by byte comparison of the 2 memory regions provided. + * + * It simultaneously scans pData0 and pData1 starting from byte 0 and going + * until lengthBytes bytes have been scanned or the bytes in pData0 and pData1 + * are not equal. + * + * The return value will be + * - 0 if all lengthBytes bytes are equal. + * - <0 if pData0 is less than pData1 for the first unequal byte. + * - >0 if pData0 is greater than pData1 for the first unequal byte. + * + * Both buffers are assumed to have the size of at least lengthBytes. + * + * @par Undefined: + * Behavior is undefined if memory regions referred to by pData0 and pData1 + * overlap.
+ * Behavior is undefined if lengthBytes is 0. + * + * @par Checked builds only: + * The function will return 0 and breakpoint/assert if there is overlap.
+ * The function will return 0 and breakpoint/assert if the length is 0. + */ +NvS32 portMemCmp(const void *pData0, const void *pData1, NvLength lengthBytes); + + +typedef struct PORT_MEM_ALLOCATOR PORT_MEM_ALLOCATOR; + +/** + * @brief Function signature for PORT_MEM_ALLOCATOR::alloc. + * + * Basic behavior is similar to @ref portMemAllocPaged. What type of memory + * is returned depends on the type of allocator that was created. + * + * Must be given the same instance of @ref PORT_MEM_ALLOCATOR as that which + * contains the calling function pointer. A different copy returned by the + * same function is not sufficient. Behavior is undefined if this is not done. + */ +typedef void *PortMemAllocatorAlloc(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); + +/** + * @brief Function signature for PORT_MEM_ALLOCATOR::free. + * + * See @ref portMemFree for details. + * + * Must be given the same instance of @ref PORT_MEM_ALLOCATOR as that which + * contains the calling function pointer. A different copy returned by the + * same function is not sufficient. Behavior is undefined if this is not done. + * + * @par Checked builds only: + * Will assert if given a different pointer than the one the memory + * was allocated with. + */ +typedef void PortMemAllocatorFree(PORT_MEM_ALLOCATOR *pAlloc, void *pMemory); + +/** + * @brief Function signature for PORT_MEM_ALLOCATOR::release. + * + * This function is called by @ref portMemAllocatorRelease when the allocator is + * released. This is only needed when implementing custom allocators, to be able + * to clean up as necessary. + */ +typedef void PortMemAllocatorRelease(PORT_MEM_ALLOCATOR *pAlloc); + + +/** + * @brief Platform specific allocator implementation. + */ +typedef struct PORT_MEM_ALLOCATOR_IMPL PORT_MEM_ALLOCATOR_IMPL; + +/** + * @brief Opaque structure to hold all memory tracking information. + */ +typedef struct PORT_MEM_ALLOCATOR_TRACKING PORT_MEM_ALLOCATOR_TRACKING; + +/** + * @brief Initializes an allocator tracking structures. + * + * You only need to call this when creating a custom allocator. The functions + * declared in this file call this internally. + * + * @param pTracking - Pointer to an already allocated tracking structure. + */ +void portMemInitializeAllocatorTracking(PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_ALLOCATOR_TRACKING *pTracking); + +/** + * @brief A set of functions that can be used to manage a specific type of memory. + * + * The intent of the allocator paradigm is to allow for generic code to be + * given an instance of PORT_MEM_ALLOCATOR for use to create memory so it does + * not have to embed a policy decision in its implementation. It can also + * allow for the implementation of specialized allocators that can be leveraged + * through a generic interface. + * + * Don't call these functions directly, use @ref PORT_ALLOC and @ref PORT_FREE + * This is done to provide full tracking support for these calls. + */ +struct PORT_MEM_ALLOCATOR { + /** + * @brief see @ref PortMemAllocatorAlloc for documentation + */ + PortMemAllocatorAlloc *_portAlloc; + /** + * @brief see @ref PortMemAllocatorFree for documentation + */ + PortMemAllocatorFree *_portFree; + /** + * @brief see @ref PortMemAllocatorRelease for documentation + */ + PortMemAllocatorRelease *_portRelease; + /** + * @brief Pointer to tracking structure. + */ + PORT_MEM_ALLOCATOR_TRACKING *pTracking; + /** + * @brief Pointer to the platform specific implementation. + */ + PORT_MEM_ALLOCATOR_IMPL *pImpl; +}; + +/** + * @brief Macro for calling the alloc method of an allocator object. + * + * Please use this instead of calling the methods directly, to ensure proper + * memory tracking in all cases. + * + * @pre Windows: IRQL <= APC_LEVEL(DISPATCH_LEVEL if allocating NonPaged memory) + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +#define PORT_ALLOC(pAlloc, length) _portMemAllocatorAlloc(pAlloc, length) +/** + * @brief Macro for calling the free method of an allocator object + * + * Please use this instead of calling the methods directly, to ensure proper + * memory tracking in all cases. + * + * @pre Windows: IRQL <= APC_LEVEL (DISPATCH_LEVEL if freeing NonPaged memory) + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +#define PORT_FREE(pAlloc, pMem) _portMemAllocatorFree(pAlloc, pMem) + +/** + * @brief Creates an allocator for paged memory. + * + * Returns an allocator instance where @ref PORT_ALLOC will behave + * like @ref portMemAllocPaged. Note the memory holding the PORT_MEM_ALLOCATOR + * instance may also be paged. + * + * @return NULL if creation failed. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +PORT_MEM_ALLOCATOR *portMemAllocatorCreatePaged(void); + +/** + * @brief Creates an allocator for non-paged memory. + * + * Returns an allocator instance where @ref PORT_ALLOC will + * behave like @ref portMemAllocNonPaged. Note the memory holding the + * PORT_MEM_ALLOCATOR instance will also be non-paged. + * + * @return NULL if creation failed. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +PORT_MEM_ALLOCATOR *portMemAllocatorCreateNonPaged(void); + +/** + * @brief Creates an allocator over an existing block of memory. + * + * Adds allocator bookkeeping information to an existing memory block, so that + * it can be used with the standard allocator interface. Some of the space of + * the preallocated block will be consumed for bookkeeping, so not all of the + * memory will be allocatable. + * + * Use this to create an allocator object on an ISR stack, so memory allocations + * can be done at DIQRL. + * + * @par Implementation details: + * The allocator allocates in chunks of 16 bytes, and uses a 2bit-vector to keep + * track of free chunks. Thus, the bookkeeping structures for a block of size N + * will take about N/64+sizeof(PORT_MEM_ALLOCATOR) bytes. + * Use @ref PORT_MEM_PREALLOCATED_BLOCK if you want to specify useful(allocable) + * size instead of total size. + * + * The allocator is only valid while the memory it was created on is valid. + * @ref portMemAllocatorRelease must be called on the allocator before the + * memory lifecycle ends. + * + * @return NULL if creation failed. + * + * @pre Usable at any IRQL/interrupt context + * @note Will not put the thread to sleep. + * @note This allocator is not thread safe. + */ +PORT_MEM_ALLOCATOR *portMemAllocatorCreateOnExistingBlock(void *pPreallocatedBlock, NvLength blockSizeBytes); + +/** + * @brief Extends the given size to fit the required bookkeeping information + * + * To be used when preallocating blocks that will be used to create an allocator + * Consider these two preallocated memory blocks: + * ~~~{.c} + * NvU8 xxx[1024]; + * NvU8 yyy[PORT_MEM_PREALLOCATED_BLOCK(1024)]; + * ~~~ + * Block @c xxx has a size of 1024, but only ~950 of that can be allocated. + * Block @c yyy has a size of ~1100, and exactly 1024 bytes can be allocated. + */ +#define PORT_MEM_PREALLOCATED_BLOCK(size) \ + (size + PORT_MEM_PREALLOCATED_BLOCK_EXTRA_SIZE(size)) + +/** + * @brief releases an allocator instance. + * + * This must be called to release any resources associated with the allocator. + * + * @par Checked builds only: + * Will assert if pAllocator has unfreed allocations + * + * @par Undefined: + * pAllocator must be an instance of PORT_MEM_ALLOCATOR that was provided by one + * of the portMemAllocatorCreate* functions. + * + * These limitations don't apply to allocators created using @ref portMemAllocatorCreateOnExistingBlock and + * @ref portMemExAllocatorCreateLockedOnExistingBlock. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portMemAllocatorRelease(PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Returns the pointer to the global nonpaged allocator. + * + * This allocator is always initialized does not need to be released. + * + * Allocations performed using this allocator are identical to the ones done + * by @ref portMemAllocNonPaged + */ +PORT_MEM_ALLOCATOR *portMemAllocatorGetGlobalNonPaged(void); +/** + * @brief Returns the pointer to the global paged allocator. + * + * This allocator is always initialized does not need to be released. + * + * Allocations performed using this allocator are identical to the ones done + * by @ref portMemAllocPaged + */ +PORT_MEM_ALLOCATOR *portMemAllocatorGetGlobalPaged(void); +/** + * @brief Prints the memory details gathered by whatever tracking mechanism is + * enabled. If pAllocator is NULL, it will print data for all allocators. + * + * @note Printing is done using portDbgPrintString, which prints regardless of + * build type and debug levels. + */ +void portMemPrintTrackingInfo(const PORT_MEM_ALLOCATOR *pAllocator); + +// @} End core functions + + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Returns true if it is safe to allocate paged memory. + */ +NvBool portMemExSafeForPagedAlloc(void); +#define portMemExSafeForPagedAlloc_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Returns true if it is safe to allocate non-paged memory. + */ +NvBool portMemExSafeForNonPagedAlloc(void); +#define portMemExSafeForNonPagedAlloc_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Public allocator tracking information + */ +typedef struct PORT_MEM_TRACK_ALLOCATOR_STATS +{ + /** @brief Total number of allocations */ + NvU32 numAllocations; + /** @brief Total allocated bytes, including all staging */ + NvLength allocatedSize; + /** @brief Useful size of allocations - What was actually requested */ + NvLength usefulSize; + /** @brief Extra size allocated for tracking/debugging purposes */ + NvLength metaSize; +} PORT_MEM_TRACK_ALLOCATOR_STATS; + +/** + * @brief Returns the statistics of currently active allocations for the given + * allocator. + * + * If pAllocator is NULL, it returns stats for all allocators, as well as the + * memory allocated with @ref portMemAllocPaged and @ref portMemAllocNonPaged + */ +NV_STATUS portMemExTrackingGetActiveStats(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOCATOR_STATS *pStats); + +/** + * @brief Returns the statistics of all allocations made with the given + * allocator since it was created. + * + * If pAllocator is NULL, it returns stats for all allocators, as well as the + * memory allocated with @ref portMemAllocPaged and @ref portMemAllocNonPaged + */ +NV_STATUS portMemExTrackingGetTotalStats(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOCATOR_STATS *pStats); + +/** + * @brief Returns the statistics of peak allocations made with the given + * allocator since it was created. + * + * Peak data reports each field independently. For example, if the peak data + * reports 100 allocations and 100000 bytes allocated, those two did not + * necessarily happen *at the same time*. It could also be that the allocator + * created 100 allocations of 1 byte each, then freed them and allocated a + * single 100000 bytes block. + * + * If pAllocator is NULL, it returns stats for all allocators, as well as the + * memory allocated with @ref portMemAllocPaged and @ref portMemAllocNonPaged + */ +NV_STATUS portMemExTrackingGetPeakStats(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOCATOR_STATS *pStats); + +/** + * @brief Cycles through the tracking infos for allocations by pAllocator + * If pAllocator is NULL, it will cycle through all allocations. + * + * @param [out] pInfo The info will be written to this buffer. + * @param [in, out] pIterator + * Should point to NULL the first time it is called. + * Every next call should pass the value returned by previous. + * To reset the loop, set the iterator to NULL. + * Upon writing the last range, the iterator will be set to NULL. + * The iterator is only valid until the next alloc/free from this allocator. + * There is no need to release the iterator in any way. + * + * @return NV_ERR_OBJECT_NOT_FOUND if no allocations exist. + */ +NV_STATUS portMemExTrackingGetNext(const PORT_MEM_ALLOCATOR *pAllocator, PORT_MEM_TRACK_ALLOC_INFO *pInfo, void **pIterator); + +/** + * @brief Copies from user memory to kernel memory. + * + * When accepting data as input from user space it is necessary to take + * additional precautions to access it safely and securely. This means copy + * the user data into a kernel buffer and then using that kernel buffer for all + * needed accesses. + * + * The function will fail if pUser is an invalid user space pointer or if the + * memory it refers to is less than length bytes long. A valid kernel pointer + * is interpreted as an invalid user pointer. + * @par Checked builds only: + * Will trigger a breakpoint if pUser is invalid userspace pointer + * + * The function will fail if pKernel is NULL. + * + * The function will fail if lengthBytes is 0. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pUser is invalid or pKernel is NULL + * - NV_ERR_INVALID_ARGUMENT if lengthBytes is 0 + */ +NV_STATUS portMemExCopyFromUser(const NvP64 pUser, void *pKernel, NvLength lengthBytes); +#define portMemExCopyFromUser_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Copies from kernel memory to user memory. + * + * This is the reverse of @ref portMemExCopyFromUser. The copy in this case is + * from pKernel to pUser. + * + * See @ref portMemExCopyFromUser for more details. + * + */ +NV_STATUS portMemExCopyToUser(const void *pKernel, NvP64 pUser, NvLength lengthBytes); +#define portMemExCopyToUser_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Returns the size (in bytes) of a single memory page. + */ +NvLength portMemExGetPageSize(void); +#define portMemExGetPageSize_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Opaque container holding an allocation of physical system memory. + */ +typedef struct PORT_PHYSICAL_MEMDESC PORT_PHYSICAL_MEMDESC; + +/** + * @brief Creates a handle used to manage and manipulate a physical memory + * allocation. + * + * @param pAllocator the allocator to use the create the allocation's tracking + * structures. This allocator is *not* used to allocate physical memory. + * + * @return NULL if the allocation failed. + */ +PORT_PHYSICAL_MEMDESC *portMemExPhysicalDescCreate(PORT_MEM_ALLOCATOR *pAllocator); +#define portMemExPhysicalDescCreate_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Types of caching for physical memory mappings. + * + * In case a target architecture does not support a specific caching mode, + * the mapping call will fail. + * Specifying PORT_MEM_ANYCACHE lets the implementation pick a caching mode that + * is present on the target architecture. This way the mapping will not fail. + */ +typedef enum +{ + PORT_MEM_UNCACHED, + PORT_MEM_CACHED, + PORT_MEM_WRITECOMBINED, + PORT_MEM_ANYCACHE +} PortMemCacheMode; + +/** + * @brief Types of access protections for physical memory mappings. + */ +typedef enum +{ + PORT_MEM_PROT_NO_ACCESS = 0, + PORT_MEM_PROT_READ = 1, + PORT_MEM_PROT_WRITE = 2, + PORT_MEM_PROT_READ_WRITE = 3, + PORT_MEM_PROT_EXEC = 4, + PORT_MEM_PROT_READ_EXEC = 5, + PORT_MEM_PROT_WRITE_EXEC = 6, + PORT_MEM_PROT_READ_WRITE_EXEC = 7 +} PortMemProtectMode; + +/** + * @brief Populates a physical memory descriptor with backing pages. + * + * Populates a descriptor with physical pages. Pages will be zeroed. + */ +NV_STATUS portMemExPhysicalDescPopulate(PORT_PHYSICAL_MEMDESC *pPmd, NvLength sizeBytes, NvBool bContiguous); +#define portMemExPhysicalDescPopulate_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief allocates a PMD and populates it with memory + * + * This is a combination of @ref portMemExPhysicalDescCreate and @ref + * portMemExPhysicalDescPopulate. It should be the preferred method to allocate + * physical memory when it is possible to do it as a single step. Not only + * does the caller require less code and error handling but it allows the + * implementation the option to combine the tracking data into fewer + * allocations since it knows the size up front. + * + * @param [out] ppPmd - Pointer to the allocated PMD. + * @param pAllocator - Allocator to use when allocating the PMD + */ +NV_STATUS portMemExPhysicalDescCreateAndPopulate(PORT_MEM_ALLOCATOR *pAllocator, + PORT_PHYSICAL_MEMDESC **ppPmd, NvLength sizeBytes, NvBool bContiguous); +#define portMemExPhysicalDescCreateAndPopulate_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Adds a contiguous memory range to the physical memory descriptor + * + * To describe a non-contiguous memory range, call this function once for every + * contiguous range. Range order will be determined by function call order, + * not the range addresses. + */ +NV_STATUS portMemExPhysicalDescribeRange(PORT_PHYSICAL_MEMDESC *pPmd, NvU64 start, NvLength length); +#define portMemExPhysicalDescribeRange_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Hands back the next contiguous memory range in the memory descriptor + * + * @param [out] pStart - Physical address of the range + * @param [out] pLength - Length of the range + * @param [in, out] pIterator + * Should point to NULL the first time it is called. + * Every next call should pass the value returned by previous. + * To reset the loop, set the iterator to NULL. + * Upon writing the last range, the iterator will be set to NULL. + * The iterator is valid until pPmd is destroyed. + * There is no need to release the iterator in any way. + * + * @return NV_ERR_OBJECT_NOT_FOUND if no ranges exist. + */ +NV_STATUS portMemExPhysicalGetNextRange(PORT_PHYSICAL_MEMDESC *pPmd, + NvU64 *pStart, NvLength *pLength, void **pIterator); +#define portMemExPhysicalGetNextRange_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Frees the memory descriptor and all tracking data. The descriptor must + * have been allocated with @ref portMemExPhysicalDescCreate or + * @ref portMemExPhysicalDescCreateAndPopulate + * + * Freed memory is not automatically unmapped. + * + * It is guaranteed that after memory has been freed, the original data can no + * longer be read in any way. + * @par Undefined: + * Accessing a mapping that has been freed results in undefined behavior. + */ +void portMemExPhysicalDescFree(PORT_PHYSICAL_MEMDESC *pPmd); +#define portMemExPhysicalDescFree_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Frees physical memory allocated with @ref portMemExPhysicalDescPopulate + */ +void portMemExPhysicalFree(PORT_PHYSICAL_MEMDESC *pPmd); +#define portMemExPhysicalFree_SUPPORTED PORT_IS_KERNEL_BUILD + + +/** + * @brief Maps a region of a @ref PORT_PHYSICAL_MEMDESC + * + * @param [out] ppMapping - Virtual address where the physical memory is mapped + * @param offset - Offset of the physical memory where the region starts. + * The region must start on a page boundary. + * @param length - Length of the physical memory region. + * Needs to be a multiple of page size. + * @param protect - Mapping protections + * @param cacheMode - Mapping cache mode. + * Only PORT_MEM_ANYCACHE is guaranteed to be supported. + * + * @return NV_ERR_NOT_SUPPORTED if the specified cache mode is not supported by + * the current architecture. + */ +NV_STATUS portMemExPhysicalMap(PORT_PHYSICAL_MEMDESC *pPmd, + void **ppMapping, NvU64 offset, NvU64 length, + PortMemProtectMode protect, PortMemCacheMode cacheMode); +#define portMemExPhysicalMap_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Unmaps a region created with @ref portMemExPhysicalMap. + * + * @par Undefined: + * Accessing an unmapped memory is undefined, but it is guaranteed that the + * actual data can't be read/overwritten. + */ +NV_STATUS portMemExPhysicalUnmap(PORT_PHYSICAL_MEMDESC *pPmd, void *pMapping); +#define portMemExPhysicalUnmap_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Creates a thread safe allocator over an existing block of memory. + * + * @note See @ref portMemAllocatorCreateOnExistingBlock for other limitations. + * @note User should initialize @p pSpinlock and destroy it after it + * has finished using this allocator. + */ +PORT_MEM_ALLOCATOR *portMemExAllocatorCreateLockedOnExistingBlock(void *pPreallocatedBlock, NvLength blockSizeBytes, void *pSpinlock); +#define portMemExAllocatorCreateLockedOnExistingBlock_SUPPORTED \ + (PORT_IS_MODULE_SUPPORTED(sync)) + + +/** + * @brief Maps the given physical address range to nonpaged system space. + * + * @param[in] start Specifies the starting physical address of the I/O + * range to be mapped. + * @param[in] byteSize Specifies the number of bytes to be mapped. + * + * @return The base virtual address that maps the base physical address for + * the range + */ +void *portMemExMapIOSpace(NvU64 start, NvU64 byteSize); +#define portMemExMapIOSpace_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Unmaps a specified range of physical addresses previously mapped by + * portMapIOSpace + * + * @param[in] addr Pointer to the base virtual address to which the + * physical pages were mapped. + * @param[in] byteSize Specifies the number of bytes that were mapped. + */ +void portMemExUnmapIOSpace(void *addr, NvU64 byteSize); +#define portMemExUnmapIOSpace_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +// @} End extended functions + + +/** + * @note Memory tracking is controlled through the following compile-time flags. + * The PORT_MEM_TRACK_USE_* constants should be defined to 0 or 1. + * If nothing is defined, the default values are assigned here. + */ +#if !defined(PORT_MEM_TRACK_USE_COUNTER) +/** + * @brief Use allocations counter for all allocators + * + * Allocation counter is lightweight and can detect if a leak is present. + * Default is always on. + */ +#define PORT_MEM_TRACK_USE_COUNTER 1 +#endif +#if !defined(PORT_MEM_TRACK_USE_FENCEPOSTS) +/** + * @brief Use fenceposts around all allocated blocks + * + * Fenceposts can detect out of bounds writes and improper free calls + * Default is on for checked builds (where it will assert if an error occurs) + */ +#define PORT_MEM_TRACK_USE_FENCEPOSTS PORT_IS_CHECKED_BUILD +#endif +#if !defined(PORT_MEM_TRACK_USE_ALLOCLIST) +/** + * @brief Keep a list of all allocations. + * + * Allocation lists can give more details about detected leaks, and allow + * cycling through all allocations. + * Default is off. + * @todo Perhaps enable for checked builds? + */ +#define PORT_MEM_TRACK_USE_ALLOCLIST 0 +#endif +#if !defined(PORT_MEM_TRACK_USE_CALLERINFO) +/** + * @brief Track file:line information for all allocations + * + * On release builds the filename hash is passed instead of the string. This + * requires NvLog to be enabled. + * Default is off. + */ +#define PORT_MEM_TRACK_USE_CALLERINFO 0 +#endif +/** + * @brief Track instruction pointer instead of function/file/line information + * for all allocations + * + * Has no effect unless PORT_MEM_TRACK_USE_CALLERINFO is also set. + */ +#if !defined(PORT_MEM_TRACK_USE_CALLERINFO_IP) +#if NVCPU_IS_RISCV64 +#define PORT_MEM_TRACK_USE_CALLERINFO_IP 1 +#else +#define PORT_MEM_TRACK_USE_CALLERINFO_IP 0 +#endif +#endif +#if !defined(PORT_MEM_TRACK_USE_LOGGING) +/** + * @brief Log all alloc and free calls to a binary NvLog buffer + * Requires NvLog to be enabled. + * + * Default is off. + */ +#define PORT_MEM_TRACK_USE_LOGGING 0 +#endif + +/** @brief Nothing is printed unless @ref portMemPrintTrackingInfo is called */ +#define PORT_MEM_TRACK_PRINT_LEVEL_SILENT 0 +/** @brief Print when an error occurs and at shutdown */ +#define PORT_MEM_TRACK_PRINT_LEVEL_BASIC 1 +/** @brief Print at every alloc and free, and at any abnormal situation */ +#define PORT_MEM_TRACK_PRINT_LEVEL_VERBOSE 2 + +#if !defined(PORT_MEM_TRACK_PRINT_LEVEL) +#if PORT_IS_CHECKED_BUILD +#define PORT_MEM_TRACK_PRINT_LEVEL PORT_MEM_TRACK_PRINT_LEVEL_BASIC +#else +#define PORT_MEM_TRACK_PRINT_LEVEL PORT_MEM_TRACK_PRINT_LEVEL_SILENT +#endif // PORT_IS_CHECKED_BUILD +#endif // !defined(PORT_MEM_TRACK_PRINT_LEVEL) + +// Memory tracking header can redefine some functions declared here. +#include "nvport/inline/memory_tracking.h" + +/** + * @brief Single allocation description. + * + * Must be defined after memory_tracking.h is included for PORT_MEM_CALLERINFO. + */ +struct PORT_MEM_TRACK_ALLOC_INFO +{ +#if PORT_MEM_TRACK_USE_CALLERINFO + /** + * @brief Function / file / line or instruction pointer. + */ + PORT_MEM_CALLERINFO callerInfo; +#endif + /** + * @brief pointer to the allocated memory block. + */ + void *pMemory; + /** + * @brief Size of the allocated memory block + */ + NvLength size; + /** + * @brief Pointer to the allocator that allocated the memory. + * If the memory was allocated globally, this will be NULL + */ + PORT_MEM_ALLOCATOR *pAllocator; + /** + * @brief Timestamp of the allocation. Will be 0 if it wasn't logged. + */ + NvU64 timestamp; +}; + +/** + * @} + */ + +#endif // _NVPORT_MEMORY_H_ diff --git a/src/nvidia/inc/libraries/nvport/nvport.h b/src/nvidia/inc/libraries/nvport/nvport.h new file mode 100644 index 000000000..3049e4b68 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/nvport.h @@ -0,0 +1,262 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief NvPort global definitions + */ + +#ifndef _NVPORT_H_ +#define _NVPORT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @note nvport.h may be included through nvrm.h by projects which haven't yet + * configured their makefiles appropriately. These files don't use any NvPort + * features, so it's safe to define away this entire file instead of failing + * the build. This will be removed once NvPort becomes ubiquitous enough. + */ +#if defined(PORT_IS_KERNEL_BUILD) + +#include +#include + +#if !defined(PORT_IS_KERNEL_BUILD) +#error "PORT_IS_KERNEL_BUILD must be defined to 0 or 1 by makefile" +#endif + +#if !defined(PORT_IS_CHECKED_BUILD) +#error "PORT_IS_CHECKED_BUILD must be defined to 0 or 1 by makefile" +#endif + +/** + * @defgroup NVPORT_CORE Core Functions + * @{ + */ + +/** + * @brief Helper macro to test if an extended function is supported + * + * Whether an extended function is supported or not is a compile time decision. + * Every function has an associated define that will look like this: + * + * ~~~{.c} + * #define portSomeFunction_SUPPORTED SOME_EXPRESSION + * ~~~ + * + * That will be evaluated by the preprocessor to either 0 or 1 (not supported + * or supported). If it evaluates to 0 then the symbol will not exist and the + * function cannot be referenced. + */ +#define PORT_IS_FUNC_SUPPORTED(function) function ## _SUPPORTED + +/** + * @brief Helper macro to test if a module is supported. The argument should be + * a lowercase module name, e.g. @c PORT_IS_MODULE_SUPPORTED(memory) + * + * Whether a module is included in the build is decided at compile time. + * Modules can either not support a given platform or be explicitly disabled + * through the Makefile. + * + * This define will be equal to 1 if the module is supported. + * If it evaluates to 0 or is not defined, then none of the module's symbols or + * defines will exist in the build. + */ +#define PORT_IS_MODULE_SUPPORTED(module) PORT_MODULE_ ## module + + +#if defined(__clang__) +#define PORT_COMPILER_IS_CLANG 1 +#define PORT_COMPILER_HAS_INTRINSIC_ATOMICS __has_builtin(__c11_atomic_fetch_add) +#define PORT_COMPILER_HAS_ATTRIBUTE_FORMAT __has_attribute(__format__) +#define PORT_COMPILER_HAS_COUNTER 1 +#else +#define PORT_COMPILER_IS_CLANG 0 +#endif + +#if defined(__GNUC__) && !defined(__clang__) +#define PORT_COMPILER_IS_GCC 1 +#define PORT_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#define PORT_COMPILER_HAS_INTRINSIC_ATOMICS (PORT_GCC_VERSION >= 40100) +#define PORT_COMPILER_HAS_ATTRIBUTE_FORMAT (PORT_GCC_VERSION >= 20300) +#define PORT_COMPILER_HAS_COUNTER (PORT_GCC_VERSION >= 40300) +#define PORT_COMPILER_HAS_INTRINSIC_CPUID 1 +#else +#define PORT_COMPILER_IS_GCC 0 +#endif + +#define PORT_COMPILER_IS_MSVC 0 + +#if !(PORT_COMPILER_IS_GCC || PORT_COMPILER_IS_CLANG || PORT_COMPILER_IS_MSVC) +#error "Unsupported compiler" +#endif + +// +// Need to define an IS_MODS macro that expands to 1 or 0 as defined(NV_MODS) +// is not entirely portable when used within a macro expansion. +// e.g. this would not always work: #define PORT_IS_MODS defined(NV_MODS) +// +#define PORT_IS_MODS 0 + +#ifndef PORT_INLINE +/** + * @brief Qualifier for all inline functions declared by NvPort. + * Modules will usually define PORT__INLINE which is either PORT_INLINE + * or nothing, depending whether the functions are being inlined in that module. + */ +#define PORT_INLINE static NV_INLINE +#endif + + +/** + * @def PORT_CHECKED_ONLY(x) + * @brief Evaluates the argument only if it is a checked build + */ +#if PORT_IS_CHECKED_BUILD +#define PORT_CHECKED_ONLY(x) x +#else +#define PORT_CHECKED_ONLY(x) +#endif + +/** + * @def PORT_KERNEL_ONLY(x) + * @brief Evaluates the argument only if it is a kernel build + */ +#if PORT_IS_KERNEL_BUILD +#define PORT_KERNEL_ONLY(x) x +#else +#define PORT_KERNEL_ONLY(x) +#endif + +#ifndef PORT_INCLUDE_NEW_STYLE_ALIASES +/** + * @brief Switch to include aliases for objects and methods that conform to the + * new RM style. + * + * This switch will define type and method aliases for object types in NvPort. + * The current NvPort style object names are PORT_MODULE_OBJECT, while the + * methods are portModuleObjectMethod(). + * The update proposal dictates these to be PortModuleObject and objectMethod. + * + * @todo Currently we just alias the new names to the old ones. Once the coding + * style has been finalized, we should add a deprecation note to the old names, + * and do a mass search and replace. + */ +#define PORT_INCLUDE_NEW_STYLE_ALIASES 1 +#endif // PORT_INCLUDE_NEW_STYLE_ALIASES + +/** + * @brief Suppresses unused variable warnings + * @param x - Variable or argument name + * + * No compilation errors are reported by any compiler when we use + * the following definition. + * + * #define PORT_UNREFERENCED_VARIABLE(x) ((void)sizeof(&(x))) + * + * But Coverity reports BAD_SIZEOF error with this definition. + * Adding a Coverity annotation "coverity[bad_sizeof]" near + * the definition does not work. The preprocessor ignores all + * the comments and the Coverity annotation is also ignored + * as a legal comment. As a result, this annotation never ends + * up in the source code where this macro is used. Hence, we use + * two definitions of this macro - one for Coverity and the other + * for the rest of the targets. + * + * Coverity does not report any warnings for unused variables. + * Hence, we do nothing while building for Coverity. + */ +#if !defined(__COVERITY__) +#define PORT_UNREFERENCED_VARIABLE(x) ((void)sizeof(&(x))) +#else +#define PORT_UNREFERENCED_VARIABLE(x) +#endif + +/// @} + +#if PORT_IS_MODULE_SUPPORTED(core) +#include "nvport/core.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(example) +#include "nvport/example.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(atomic) +#include "nvport/atomic.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(debug) +#include "nvport/debug.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(util) +#include "nvport/util.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(memory) +#include "nvport/memory.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(sync) +#include "nvport/sync.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(safe) +#include "nvport/safe.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(mmio) +#include "nvport/mmio.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(thread) +#include "nvport/thread.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(time) +#include "nvport/time.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(crypto) +#include "nvport/crypto.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(string) +#include "nvport/string.h" +#endif + +#if PORT_IS_MODULE_SUPPORTED(cpu) +#include "nvport/cpu.h" +#endif + +#endif // defined(PORT_IS_KERNEL_BUILD) + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _NVPORT_H_ diff --git a/src/nvidia/inc/libraries/nvport/safe.h b/src/nvidia/inc/libraries/nvport/safe.h new file mode 100644 index 000000000..2847c71ff --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/safe.h @@ -0,0 +1,621 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Safe module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_SAFE_H_ +#define _NVPORT_SAFE_H_ + +#ifndef PORT_SAFE_INLINE +#define PORT_SAFE_INLINE PORT_INLINE +#endif + +/** + * @defgroup NVPORT_SAFE Safe integer operations module + * + * @brief This module contains functions for safe use of integer types, without + * concern for overflow errors + * + * All functions return true if the operation was performed successfully, + * and false if there was an overflow (or division by zero). + * The final value is handed back in both cases, but if the function returned + * false, the value handed back is undefined. + * + * @note These functions should be used: + * - When operating on data passing through the trust boundary (e.g. RM API) + * - When operating on signed data types (where overflows are undefined!) + * - Instead of checking overflows manually + * For internal functions, it is recommended not to clutter the code with this. + * Usually an internal overflow is a bug, and it should be fixed up the stack. + * + * + * @note A couple of artificial examples: + * - GOOD - Data received from user, not to be trusted. + * ~~~{.c} + * NvU32 NV_APIENTRY NvRmBzero(NvU8 *mem, NvLength len) + * { + * NvUPtr uptr = (NvUPtr) mem; + * if (mem == NULL || !portSafeAddUPtr(uptr, len, &uptr)) + * return NV_ERR_INVALID_PARAMETER; + * while ((NvUPtr) mem != uptr) + * *mem++ = 0; + * return NV_OK; + * } + * ~~~ + * - GOOD - Internal RM function, allowed to crash if given invalid params + * ~~~{.c} + * void bzero(NvU8 *mem, NvLength len) + * { + * while (len > 0) + * mem[--len] = 0; + * } + * ~~~ + * - BAD - If you are already checking for overflows manually, use these functions + * ~~~{.c} + * NV_STATUS osAllocMemTracked(void **ppAddress, NvU32 size) + * { + * NvU32 paddedSize = size; + * // allocate three extra dwords to hold the size and some debug tags + * paddedSize += 3 * sizeof(NvU32); + * // check for the overflow after increasing the size + * if (paddedSize < size) + * return NV_ERR_INSUFFICIENT_RESOURCES; + * size = paddedSize; + * ... + * } + * ~~~ + * - GOOD - Use provided functions + * ~~~{.c} + * NV_STATUS osAllocMemTracked(void **ppAddress, NvU32 size) + * { + * if (!portSafeAddU32(size, 3*sizeof(NvU32), &size)) + * return NV_ERR_INSUFFICIENT_RESOURCES; + * ... + * } + * ~~~ + * + * @{ + */ + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Add two signed 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS8(NvS8 augend, NvS8 addend, NvS8 *pResult); +/** + * @brief Subtract two signed 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS8(NvS8 minuend, NvS8 subtrahend, NvS8 *pResult); +/** + * @brief Multiply two signed 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS8(NvS8 multiplicand, NvS8 multiplier, NvS8 *pResult); +/** + * @brief Divide two signed 8bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS8(NvS8 dividend, NvS8 divisor, NvS8 *pResult); + + +/** + * @brief Add two signed 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS16(NvS16 augend, NvS16 addend, NvS16 *pResult); +/** + * @brief Subtract two signed 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS16(NvS16 minuend, NvS16 subtrahend, NvS16 *pResult); +/** + * @brief Multiply two signed 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS16(NvS16 multiplicand, NvS16 multiplier, NvS16 *pResult); +/** + * @brief Divide two signed 16bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS16(NvS16 dividend, NvS16 divisor, NvS16 *pResult); + + +/** + * @brief Add two signed 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS32(NvS32 augend, NvS32 addend, NvS32 *pResult); +/** + * @brief Subtract two signed 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS32(NvS32 minuend, NvS32 subtrahend, NvS32 *pResult); +/** + * @brief Multiply two signed 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS32(NvS32 multiplicand, NvS32 multiplier, NvS32 *pResult); +/** + * @brief Divide two signed 32bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS32(NvS32 dividend, NvS32 divisor, NvS32 *pResult); + + +/** + * @brief Add two signed 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddS64(NvS64 augend, NvS64 addend, NvS64 *pResult); +/** + * @brief Subtract two signed 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubS64(NvS64 minuend, NvS64 subtrahend, NvS64 *pResult); +/** + * @brief Multiply two signed 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulS64(NvS64 multiplicand, NvS64 multiplier, NvS64 *pResult); +/** + * @brief Divide two signed 64bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivS64(NvS64 dividend, NvS64 divisor, NvS64 *pResult); + + + + +/** + * @brief Add two unsigned 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU8(NvU8 augend, NvU8 addend, NvU8 *pResult); +/** + * @brief Subtract two unsigned 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU8(NvU8 minuend, NvU8 subtrahend, NvU8 *pResult); +/** + * @brief Multiply two unsigned 8bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU8(NvU8 multiplicand, NvU8 multiplier, NvU8 *pResult); +/** + * @brief Divide two unsigned 8bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU8(NvU8 dividend, NvU8 divisor, NvU8 *pResult); + + +/** + * @brief Add two unsigned 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU16(NvU16 augend, NvU16 addend, NvU16 *pResult); +/** + * @brief Subtract two unsigned 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU16(NvU16 minuend, NvU16 subtrahend, NvU16 *pResult); +/** + * @brief Multiply two unsigned 16bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU16(NvU16 multiplicand, NvU16 multiplier, NvU16 *pResult); +/** + * @brief Divide two unsigned 16bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU16(NvU16 dividend, NvU16 divisor, NvU16 *pResult); + + +/** + * @brief Add two unsigned 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU32(NvU32 augend, NvU32 addend, NvU32 *pResult); +/** + * @brief Subtract two unsigned 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU32(NvU32 minuend, NvU32 subtrahend, NvU32 *pResult); +/** + * @brief Multiply two unsigned 32bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU32(NvU32 multiplicand, NvU32 multiplier, NvU32 *pResult); +/** + * @brief Divide two unsigned 32bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU32(NvU32 dividend, NvU32 divisor, NvU32 *pResult); + + +/** + * @brief Add two unsigned 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddU64(NvU64 augend, NvU64 addend, NvU64 *pResult); +/** + * @brief Subtract two unsigned 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubU64(NvU64 minuend, NvU64 subtrahend, NvU64 *pResult); +/** + * @brief Multiply two unsigned 64bit integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulU64(NvU64 multiplicand, NvU64 multiplier, NvU64 *pResult); +/** + * @brief Divide two unsigned 64bit integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivU64(NvU64 dividend, NvU64 divisor, NvU64 *pResult); + + +/** + * @brief Add two pointer-sized integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddUPtr(NvUPtr augend, NvUPtr addend, NvUPtr *pResult); +/** + * @brief Subtract two pointer-sized integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubUPtr(NvUPtr minuend, NvUPtr subtrahend, NvUPtr *pResult); +/** + * @brief Multiply two pointer-sized integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulUPtr(NvUPtr multiplicand, NvUPtr multiplier, NvUPtr *pResult); +/** + * @brief Divide two pointer-sized integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivUPtr(NvUPtr dividend, NvUPtr divisor, NvUPtr *pResult); + + +/** + * @brief Add two length integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeAddLength(NvLength augend, NvLength addend, NvLength *pResult); +/** + * @brief Subtract two length integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeSubLength(NvLength minuend, NvLength subtrahend, NvLength *pResult); +/** + * @brief Multiply two length integers, writing the result to *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeMulLength(NvLength multiplicand, NvLength multiplier, NvLength *pResult); +/** + * @brief Divide two length integers, writing the result to *pResult. + * + * @return NV_ERR_INVALID_PARAMETER if divisor is zero + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeDivLength(NvLength dividend, NvLength divisor, NvLength *pResult); + + + + + + +/** + * @brief Convert a 8bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU8(NvS8 data, NvU8 *pResult); +/** + * @brief Convert a 8bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU16(NvS8 data, NvU16 *pResult); +/** + * @brief Convert a 8bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU32(NvS8 data, NvU32 *pResult); +/** + * @brief Convert a 8bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToU64(NvS8 data, NvU64 *pResult); +/** + * @brief Convert a 8bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToUPtr(NvS8 data, NvUPtr *pResult); +/** + * @brief Convert a 8bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS8ToLength(NvS8 data, NvLength *pResult); + + +/** + * @brief Convert a 16bit signed integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToS8(NvS16 data, NvS8 *pResult); +/** + * @brief Convert a 16bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU8(NvS16 data, NvU8 *pResult); +/** + * @brief Convert a 16bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU16(NvS16 data, NvU16 *pResult); +/** + * @brief Convert a 16bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU32(NvS16 data, NvU32 *pResult); +/** + * @brief Convert a 16bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToU64(NvS16 data, NvU64 *pResult); +/** + * @brief Convert a 16bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToUPtr(NvS16 data, NvUPtr *pResult); +/** + * @brief Convert a 16bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS16ToLength(NvS16 data, NvLength *pResult); + +/** + * @brief Convert a 32bit signed integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToS8(NvS32 data, NvS8 *pResult); +/** + * @brief Convert a 32bit signed integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToS16(NvS32 data, NvS16 *pResult); +/** + * @brief Convert a 32bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU8(NvS32 data, NvU8 *pResult); +/** + * @brief Convert a 32bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU16(NvS32 data, NvU16 *pResult); +/** + * @brief Convert a 32bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU32(NvS32 data, NvU32 *pResult); +/** + * @brief Convert a 32bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToU64(NvS32 data, NvU64 *pResult); +/** + * @brief Convert a 32bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToUPtr(NvS32 data, NvUPtr *pResult); +/** + * @brief Convert a 32bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS32ToLength(NvS32 data, NvLength *pResult); + + +/** + * @brief Convert a 64bit signed integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToS8(NvS64 data, NvS8 *pResult); +/** + * @brief Convert a 64bit signed integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToS16(NvS64 data, NvS16 *pResult); +/** + * @brief Convert a 64bit signed integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToS32(NvS64 data, NvS32 *pResult); +/** + * @brief Convert a 64bit signed integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU8(NvS64 data, NvU8 *pResult); +/** + * @brief Convert a 64bit signed integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU16(NvS64 data, NvU16 *pResult); +/** + * @brief Convert a 64bit signed integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU32(NvS64 data, NvU32 *pResult); +/** + * @brief Convert a 64bit signed integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToU64(NvS64 data, NvU64 *pResult); +/** + * @brief Convert a 64bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToUPtr(NvS64 data, NvUPtr *pResult); +/** + * @brief Convert a 64bit signed integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeS64ToLength(NvS64 data, NvLength *pResult); + + + +/** + * @brief Convert a 8bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU8ToS8(NvU8 data, NvS8 *pResult); + +/** + * @brief Convert a 16bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU16ToS8(NvU16 data, NvS8 *pResult); +/** + * @brief Convert a 16bit unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU16ToS16(NvU16 data, NvS16 *pResult); +/** + * @brief Convert a 16bit unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU16ToU8(NvU16 data, NvU8 *pResult); + + +/** + * @brief Convert a 32bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToS8(NvU32 data, NvS8 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToS16(NvU32 data, NvS16 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToS32(NvU32 data, NvS32 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToU8(NvU32 data, NvU8 *pResult); +/** + * @brief Convert a 32bit unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU32ToU16(NvU32 data, NvU16 *pResult); + + +/** + * @brief Convert a 64bit unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS8(NvU64 data, NvS8 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS16(NvU64 data, NvS16 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS32(NvU64 data, NvS32 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 64bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToS64(NvU64 data, NvS64 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToU8(NvU64 data, NvU8 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToU16(NvU64 data, NvU16 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToU32(NvU64 data, NvU32 *pResult); +/** + * @brief Convert a 64bit unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToUPtr(NvU64 data, NvUPtr *pResult); +/** + * @brief Convert a 64bit unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeU64ToLength(NvU64 data, NvLength *pResult); + + +/** + * @brief Convert a pointer-sized unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS8(NvUPtr data, NvS8 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS16(NvUPtr data, NvS16 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS32(NvUPtr data, NvS32 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 64bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToS64(NvUPtr data, NvS64*pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU8(NvUPtr data, NvU8 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU16(NvUPtr data, NvU16 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU32(NvUPtr data, NvU32 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToU64(NvUPtr data, NvU64 *pResult); +/** + * @brief Convert a pointer-sized unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeUPtrToLength(NvUPtr data, NvLength *pResult); + + +/** + * @brief Convert a length-sized unsigned integer to a 8bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS8(NvLength data, NvS8 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 16bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS16(NvLength data, NvS16 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 32bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS32(NvLength data, NvS32 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 64bit signed integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToS64(NvLength data, NvS64 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 8bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU8(NvLength data, NvU8 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 16bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU16(NvLength data, NvU16 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 32bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU32(NvLength data, NvU32 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a 64bit unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToU64(NvLength data, NvU64 *pResult); +/** + * @brief Convert a length-sized unsigned integer to a pointer-sized unsigned integer *pResult. + */ +PORT_SAFE_INLINE NvBool NV_FORCERESULTCHECK portSafeLengthToUPtr(NvLength data, NvUPtr *pResult); + + + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +// Place extended functions here + +/// @} End extended functions + + +#if NVOS_IS_WINDOWS +#include "nvport/inline/safe_win.h" +#else +#include "nvport/inline/safe_generic.h" +#endif + +#endif // _NVPORT_SAFE_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/string.h b/src/nvidia/inc/libraries/nvport/string.h new file mode 100644 index 000000000..cdc70dfc4 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/string.h @@ -0,0 +1,162 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief String module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_STRING_H_ +#define _NVPORT_STRING_H_ + +/** + * @defgroup NVPORT_STRING String module + * + * @brief This module contains string functionality used by other modules. + * + * @{ + */ + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Compare two strings, character by character. + * + * Will only compare lengthBytes bytes. Strings are assumed to be at least that + * long. + * + * Strings are allowed to overlap, but in . + * + * @returns: + * - 0 if all bytes are equal + * - <0 if str1 is less than str2 for the first unequal byte. + * - >0 if str1 is greater than str2 for the first unequal byte. + * @par Undefined: + * Behavior is undefined if str1, str2 is NULL.
+ */ +NvS32 portStringCompare(const char *str1, const char *str2, NvLength length); +/** + * @brief Copy a string. + * + * Will copy at most destSize bytes, stopping early if a null-terminator is found + * or if srcSize bytes are read from the source. + * + * Null character is always written at the end of the string. + * + * @param dest destination buffer, of at least destSize bytes (including null terminator). + * @param src source buffer, of at least srcSize bytes (including null terminator). + * + * @return size bytes copied successfully including null terminator, min(destSize, srcSize) + * + * @par Undefined: + * Number of allocated bytes in destination buffer are smaller than destSize.
+ * Behavior is undefined if destination and source overlaps.
+ */ +NvLength portStringCopy(char *dest, NvLength destSize, const char *src, NvLength srcSize); +/** + * @brief Concatenate two strings + * + * Will copy cat string after the end of str. Will copy only until str buffer is + * filled. str is assumed to point to a buffer of at least strSize bytes. + * + * Null character is always written at the end of the string. + * + * @return str if concatenation is succeeded. + * + * @par Undefined: + * Number of allocated bytes in destination buffer are smaller than destSize.
+ * Behavior is undefined if destination and source overlaps.
+ */ +char *portStringCat(char *str, NvLength strSize, const char *cat, NvLength catSize); + + +/** + * @brief Returns the index of the first NULL byte in the given string + * + */ + NvLength portStringLength(const char *str); + + + /** + * @brief Returns the index of the first NULL byte in the given string, it searches maxLength + * chars. If NULL byte is not found it returns maxLength. + * + */ + NvLength portStringLengthSafe(const char *str, NvLength maxLength); + + +/** + * @brief Converts a string from ASCII (8-bit) to UTF16 (16 bit) + * + * Can perform the conversion in place if dest == src. + * + * @returns The number of characters in destination buffer, without the null + * terminator (i.e. strlen(dest)) + */ +NvLength portStringConvertAsciiToUtf16(NvU16 *dest, NvLength destSize, const char *src, NvLength srcSize); + +/** + * @brief Writes the hexadecimal string representation of the buffer + * + * @returns The number of characters in destination buffer, without the null + * terminator (i.e. strlen(str)) + */ +NvLength portStringBufferToHex(char *str, NvLength strSize, const NvU8 *buf, NvLength bufSize); + +/** + * @brief Convert a binary buffer into readable group of hex digits + * + * @param groupCount - Number of groups + * @param groups - How to structure the groups (in number of hex chars) + * @param separator - Character to separate the groups + * + * For the traditional display of UUIDs, there would be five groups, {8,4,4,4,12} + * with the separator being '-'. + * + * @note odd numbers for group sizes are not supported, they will be rounded down + * + * @returns The number of characters in destination buffer, without the null + * terminator (i.e. strlen(str)) + */ +NvLength portStringBufferToHexGroups(char *str, NvLength strSize, const NvU8 *buf, NvLength bufSize, NvLength groupCount, const NvU32 *groups, const char *separator); + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +// Place extended functions here + +/// @} End extended functions + +#endif // _NVPORT_STRING_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/sync.h b/src/nvidia/inc/libraries/nvport/sync.h new file mode 100644 index 000000000..2db3b7e41 --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/sync.h @@ -0,0 +1,829 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Sync module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_SYNC_H_ +#define _NVPORT_SYNC_H_ +/** + * @defgroup NVPORT_SYNC Synchronization + * @brief This module includes synchronization primitives. + * + * @note The module provides two types of constructors: + * - portSyncXXXInitialize initializes the structure in the caller provided + * memory. + * - portSyncXXXCreate takes a @ref PORT_MEM_ALLOCATOR object that is used to + * allocate the memory. This memory is freed when the object is destroyed. + * If running in kernel mode, the provided memory (or allocator) must be + * non-paged. The functions do not check this, and behavior is undefined if + * the object is allocated in paged memory. + * + * Typical usage of synchronization objects is: + * ~~~{.c} + * PORT_XXX *pXxx = portSyncXxxCreate(pAllocator); + * if (!pXxx) + * return NV_ERR_INSUFFICIENT_RESOURCES; + * + * portSyncXxxAcquire(pXxx); + * doCriticalSection(); + * portSyncXxxRelease(pXxx); + * portSyncXxxDestroy(pXxx); + * ~~~ + * + * @par Checked builds only: + * The functions will assert the needed IRQL/interrupt requirements. These are + * specified for every function in a "Precondition" block. + * + * @note The IRQL/interrupt context requirements listed in "Precondition" blocks + * are only valid for Kernel Mode builds of NvPort. Usermode builds have no such + * restrictions. + * @{ + */ + +#if !PORT_IS_MODULE_SUPPORTED(memory) +#error "NvPort SYNC module requires MEMORY module to be present." +#endif + +#if LOCK_VAL_ENABLED +#define PORT_SYNC_RENAME_SUFFIX _REAL +#include "inline/sync_rename.h" +#endif + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/sync_libos.h" +#endif + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Initializes global sYNC tracking structures + * + * This function is called by @ref portInitialize. It is available here in case + * it is needed to initialize the SYNC module without initializing all the + * others. e.g. for unit tests. + */ +void portSyncInitialize(void); + +/** + * @brief Destroys global sYNC tracking structures + * + * This function is called by @ref portShutdown. It is available here in case + * it is needed to initialize the SYNC module without initializing all the + * others. e.g. for unit tests. + */ +void portSyncShutdown(void); + +/** + * @brief A spinlock data type. + * + * For documentation on what a spinlock is and how it behaves see + * http://en.wikipedia.org/wiki/Spinlock + * + * - A valid spinlock is any which is non-NULL + * - Spinlocks are not recursive. + * - Spinlocks will not put the thread to sleep. + * - No pageable data or code can be accessed while holding a spinlock (@ref + * portMemAllocPaged). + * - Spinlocks can be used in ISRs. + * + * @par Undefined: + * The behavior is undefined if the spinlock is acquired by one thread and + * released by another. + */ +typedef struct PORT_SPINLOCK PORT_SPINLOCK; +/** + * @brief Size (in bytes) of the @ref PORT_SPINLOCK structure + */ +extern NvLength portSyncSpinlockSize; + +/** + * @brief Initializes a spinlock using caller provided memory. + * + * Spinlocks are initialized in the released state. After a spinlock is + * initialized it can only be freed or acquired. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncSpinlockDestroy. + * + * @par Undefined: + * Initializing a spinlock multiple times is undefined.
+ * Using a spinlock before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pSpinlock is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncSpinlockInitialize(PORT_SPINLOCK *pSpinlock); + +/** + * @brief Creates a new spinlock using the provided allocator. The newly created + * spinlock is initialized, as if @ref portSyncSpinlockInitialize was called. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->alloc, which may have additional restrictions. + */ +PORT_SPINLOCK *portSyncSpinlockCreate(PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Destroys a spinlock created with @ref portSyncSpinlockInitialize or + * @ref portSyncSpinlockCreate + * + * This frees any internally allocated resources that may be associated with + * the spinlock. If the spinlock was created using @ref portSyncSpinlockCreate, + * the memory will also be freed. + * + * @par Checked builds only: + * Will assert if pSpinlock == NULL
+ * Will assert if the lock is being held
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized spinlock.
+ * Behavior is undefined if called on a currently acquired spinlock.
+ * Behavior is undefined if any operation is performed on a spinlock that has + * been destroyed. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncSpinlockDestroy(PORT_SPINLOCK *pSpinlock); + +/** + * @brief Acquires a spinlock + * + * Blocks until the spinlock is acquired. + * + * Recursive acquires are not allowed and will result in a deadlock. + * + * @par Checked builds only: + * Will assert if pSpinlock == NULL
+ * Will assert if the lock is held by the current thread + * + * @pre Windows: Any IRQL + * @pre Unix: Interrupt context is OK. + * @note Will not put the thread to sleep. + * @post Will raise the IRQL / mask interrupts + */ +void portSyncSpinlockAcquire(PORT_SPINLOCK *pSpinlock); + +/** + * @brief Releases a spinlock acquired with @ref portSyncSpinlockAcquire. + * + * @par Checked builds only: + * Will assert if pSpinlock == NULL
+ * Will assert if the lock is not held by the current thread + * + * @par Undefined: + * Behavior is undefined if the spinlock has not previously been acquired. + * + * @pre Windows: Any IRQL + * @pre Unix: Interrupt context is OK. + * @post Will restore the IRQL / interrupts + */ +void portSyncSpinlockRelease(PORT_SPINLOCK *pSpinlock); + +/** + * @brief A mutex data type. + * + * A PORT_MUTEX is a classic mutex that follows the following rules. + * - Only a single thread can hold the mutex. + * - The thread that acquires the mutex must be the one to release it. + * - Failure to acquire the mutex may result in the thread blocking and not + * resuming until the mutex is available. + * - Failure of a thread to release a mutex before it exits can result in a + * deadlock if any other threads attempts to acquire it. + * - Mutexes are not recursive. + * - Mutexes may put the thread to sleep. + * + * Mutexes can be used on IRQL <= DISPATCH_LEVEL on Windows, and in + * non-interrupt context on Unix. + */ +typedef struct PORT_MUTEX PORT_MUTEX; + +/** + * @brief Size (in bytes) of the @ref PORT_MUTEX structure + */ +extern NvLength portSyncMutexSize; + +/** + * @brief Creates a new mutex using the provided allocator. The newly created + * mutex is initialized, as if @ref portSyncMutexInitialize was called. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->alloc, which may have additional restrictions. + */ +PORT_MUTEX *portSyncMutexCreate(PORT_MEM_ALLOCATOR *pAllocator); +/** + * @brief Initializes a mutex using caller provided memory. + * + * Mutexes are initialized in the released state. After a mutex is + * initialized it can only be freed or acquired. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncMutexDestroy. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a mutex multiple times is undefined.
+ * Using a mutex before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pMutex is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncMutexInitialize(PORT_MUTEX *pMutex); +/** + * @brief Destroys a mutex created with @ref portSyncMutexInitialize or + * @ref portSyncMutexCreate + * + * This frees any internally allocated resources that may be associated with + * the mutex. If the mutex was created using @ref portSyncMutexCreate, + * the memory will also be freed. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the lock is being held
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized mutex.
+ * Behavior is undefined if the mutex is currently acquired and it is + * destroyed.
+ * Behavior is undefined if any operation is performed on a mutex that has + * been destroyed. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncMutexDestroy(PORT_MUTEX *pMutex); + +/** + * @brief Acquires a mutex. + * + * If the mutex is already held a call will block and the thread may be put to + * sleep until it is released. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the lock is held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + */ +void portSyncMutexAcquire(PORT_MUTEX *pMutex); + +/** + * @brief Attempts to acquire a mutex without blocking. + * + * A call to this function will immediately return NV_TRUE with the mutex + * acquired by the calling thread if the mutex is not held by another thread. + * It will immediately return NV_FALSE if the mutex is held by another thread. + * + * If the mutex is held by the calling thread then this call will always fail. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +NvBool NV_FORCERESULTCHECK portSyncMutexAcquireConditional(PORT_MUTEX *pMutex); + +/** + * @brief Releases a mutex held by the current thread. + * + * A call to this function releases control of the mutex. Immediately on + * return of this function another thread will be allowed to acquire the mutex. + * + * @par Checked builds only: + * Will assert if pMutex == NULL
+ * Will assert if the lock is not held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Attempting to release a mutex not held by the current thread will result in + * undefined behavior + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portSyncMutexRelease(PORT_MUTEX *pMutex); + +PORT_INLINE void portSyncMutexReleaseOutOfOrder(PORT_MUTEX *pMutex) +{ + portSyncMutexRelease(pMutex); +} + +/** + * @brief Represents a semaphore data type. + * + * This behaves as you would expect a classic semaphore to. It follows the + * following rules: + * - A semaphore is initialized with a starting value + * - Acquiring the semaphore decrements the count. If the count is 0 it will + * block until the count is non-zero. + * - Releasing the semaphore increments the count. + * - A semaphore can be acquired or released by any thread and a + * acquire/release pair are not required to be from the same thread. + * - PORT_SEMAPHORE is a 32 bit semaphore. + * - Semaphores may put the thread to sleep. + * + * Semaphores have varying IRQL restrictions on Windows, which is documented for + * every function separately. + * They can only be used in non-interrupt context on Unix. + */ +typedef struct PORT_SEMAPHORE PORT_SEMAPHORE; +/** + * @brief Size (in bytes) of the @ref PORT_SEMAPHORE structure + */ +extern NvLength portSyncSemaphoreSize; + +/** + * @brief Initializes a semaphore using caller provided memory. + * + * Semaphores are initialized with startValue. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncSemaphoreDestroy. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a semaphore multiple times is undefined.
+ * Using a semaphore before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pSemaphore is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: IRQL == PASSIVE_LEVEL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncSemaphoreInitialize(PORT_SEMAPHORE *pSemaphore, NvU32 startValue); +/** + * @brief Creates a new semaphore using the provided allocator. The newly + * created semaphore is initialized, as if @ref portSyncSemaphoreInitialize + * was called. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: IRQL == PASSIVE_LEVEL + * @pre Unix: Non-interrupt context + */ +PORT_SEMAPHORE *portSyncSemaphoreCreate(PORT_MEM_ALLOCATOR *pAllocator, NvU32 startValue); +/** + * @brief Destroys a semaphore created with @ref portSyncSemaphoreInitialize or + * @ref portSyncSemaphoreCreate + * + * This frees any internally allocated resources that may be associated with + * the semaphore. If the semaphore was created using + * @ref portSyncSemaphoreCreate, the memory will also be freed. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized semaphore.
+ * Behavior is undefined if the semaphore is currently acquired and it is + * destroyed.
+ * Behavior is undefined if any operation is performed on a semaphore that has + * been destroyed. + * + * @pre Windows: Any IRQL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncSemaphoreDestroy(PORT_SEMAPHORE *pSemaphore); +/** + * @brief Acquires (decrements) a semaphore. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + */ +void portSyncSemaphoreAcquire(PORT_SEMAPHORE *pSemaphore); +/** + * @brief Conditionally acquires a semaphore. + * + * A call to this function will immediately return NV_TRUE and acquire the + * semaphore if it can be done without blocking. + * + * It will immediately return NV_FALSE if acquiring the semaphore would require + * blocking. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +NvBool NV_FORCERESULTCHECK portSyncSemaphoreAcquireConditional(PORT_SEMAPHORE *pSemaphore); +/** + * @brief Releases (increments) a semaphore. + * + * @par Checked builds only: + * Will assert if pSemaphore == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + */ +void portSyncSemaphoreRelease(PORT_SEMAPHORE *pSemaphore); + +PORT_INLINE void portSyncSemaphoreReleaseOutOfOrder(PORT_SEMAPHORE *pSemaphore) +{ + portSyncSemaphoreRelease(pSemaphore); +} + +/** + * @brief Represents a readers-writer lock data type. + * + * AcquireRead and AcquireWrite will do a sleeping wait if the lock isn't + * immediately available. + * + * PORT_RWLOCK prevents starvation of both readers and writers. + * + * @par Undefined: + * Any irregular use will result in undefined behavior. This includes: + * - One thread acquiring both read and write locks + * - Performing operations on an uninitialized/destroyed lock + * - Releasing locks which weren't acquired by the releasing thread + * - Acquiring the same lock twice without releasing (it is not recursive) + * + * @note If you desire to upgrade the lock (shared->exclusive), you must first + * release the shared lock, then acquire the exclusive. + */ +typedef struct PORT_RWLOCK PORT_RWLOCK; +/** + * @brief Size (in bytes) of the @ref PORT_RWLOCK structure + */ +extern NvLength portSyncRwLockSize; + +/** + * @brief Initializes a RWLock using caller provided memory. + * + * On some platforms the underlying platform code may allocate memory. + * This memory will be freed upon calling @ref portSyncRwLockDestroy. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a lock multiple times is undefined.
+ * Using a lock before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pLock is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + */ +NV_STATUS portSyncRwLockInitialize(PORT_RWLOCK *pLock); +/** + * @brief Creates and initializes a new RWLock using the provided allocator. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + */ +PORT_RWLOCK *portSyncRwLockCreate(PORT_MEM_ALLOCATOR *pAllocator); +/** + * @brief Acquires the read (shared) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, the thread will sleep. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + * @post Windows: Normal APCs will be disabled. + */ +void portSyncRwLockAcquireRead(PORT_RWLOCK *pLock); +/** + * @brief Conditionally acquires the read (shared) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, it will return NV_FALSE without + * blocking. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NV_TRUE if the lock was acquired. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be disabled if the lock was acquired. + */ +NvBool NV_FORCERESULTCHECK portSyncRwLockAcquireReadConditional(PORT_RWLOCK *pLock); +/** + * @brief Acquires the write (exclusive) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, the thread will sleep. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note May put the thread to sleep. + * @post Windows: Normal APCs will be disabled. + */ +void portSyncRwLockAcquireWrite(PORT_RWLOCK *pLock); +/** + * @brief Conditionally acquires the write (exclusive) lock on the given RW_LOCK + * + * If the lock cannot be immediately acquired, it will return NV_FALSE without + * blocking. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NV_TRUE if the lock was acquired. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be disabled if the lock was acquired. + */ +NvBool NV_FORCERESULTCHECK portSyncRwLockAcquireWriteConditional(PORT_RWLOCK *pLock); +/** + * @brief Releases the read (shared) lock on the given RW_LOCK + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is not held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be re-enabled. + */ +void portSyncRwLockReleaseRead(PORT_RWLOCK *pLock); + +PORT_INLINE void portSyncRwLockReleaseReadOutOfOrder(PORT_RWLOCK *pLock) +{ + portSyncRwLockReleaseRead(pLock); +} + +/** + * @brief Releases the write (exclusive) lock on the given RW_LOCK + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is not held by the current thread
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + * @pre Unix: Non-interrupt context + * @note Will not put the thread to sleep. + * @post Windows: Normal APCs will be re-enabled. + */ +void portSyncRwLockReleaseWrite(PORT_RWLOCK *pLock); + +PORT_INLINE void portSyncRwLockReleaseWriteOutOfOrder(PORT_RWLOCK *pLock) +{ + portSyncRwLockReleaseWrite(pLock); +} + +/** + * @brief Destroys a RWLock created with @ref portSyncRwLockCreate o + * + * This frees any internally allocated resources that may be associated with + * the lock. + * + * @par Checked builds only: + * Will assert if pLock == NULL
+ * Will assert if the lock is being held
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Behavior is undefined if called on an uninitialized lock.
+ * Behavior is undefined if the lock is currently acquired and it is destroyed.
+ * Behavior is undefined if any operation is performed on a lock that has + * been destroyed. + * + * @pre Windows: IRQL <= APC_LEVEL + * @pre Unix: Non-interrupt context + * @pre Calls pAllocator->free, which may have additional restrictions. + */ +void portSyncRwLockDestroy(PORT_RWLOCK *pLock); + + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Creates a new fast mutex using the provided allocator. The newly + * created mutex is initialized, as if @ref portSyncExFastMutexInitialize was + * called. + * + * See @ref portSyncExFastMutexInitialize for details on fast mutex objects. + * + * @par Checked builds only: + * Will assert if pAllocator == NULL
+ * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @return NULL on failed allocation / initialization. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + */ +PORT_MUTEX *portSyncExFastMutexCreate(PORT_MEM_ALLOCATOR *pAllocator); +/** + * @brief Initializes a fast mutex using caller provided memory. + * + * A fast mutex is a subtype of the @ref PORT_MUTEX object that is generally + * more performant, but cannot be acquired from DPCs (IRQL == DISPATCH_LEVEL), + * even when using @ref portSyncMutexAcquireConditional. + * + * Code allocating fast mutex objects must ensure that conditional acquire is + * never attempted at DISPATCH_LEVEL. In checked builds, an assert will be + * triggered if this is not satisfied. + * + * Other than the limitation above, fast mutex objects have the same interface + * as regular @ref PORT_MUTEX objects. + * + * @par Checked builds only: + * Will assert if the IRQL/interrupt context preconditions are not satisfied. + * + * @par Undefined: + * Initializing a mutex multiple times is undefined.
+ * Using a mutex before it is initialized results in undefined behavior. + * + * @return + * - NV_OK if successful + * - NV_ERR_INVALID_POINTER if pMutex is NULL + * - Can return other NV_STATUS values from the OS interface layer. + * + * @pre Windows: IRQL <= DISPATCH_LEVEL + */ +NV_STATUS portSyncExFastMutexInitialize(PORT_MUTEX *pMutex); + +// Fast mutexes only make sense on Windows kernel mode +#define portSyncExFastMutexCreate_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) +#define portSyncExFastMutexInitialize_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) + +/** + * @brief Returns true if it is safe to put the current thread to sleep. + * + * Safety in this case relates only to the current interrupt level, and does not + * take into account any locks held by the thread that may result in deadlocks. + */ +NvBool portSyncExSafeToSleep(void); +#define portSyncExSafeToSleep_SUPPORTED PORT_IS_KERNEL_BUILD +/** + * @brief Returns true if it is safe to wake other threads. + * + * Safety in this case relates only to the current interrupt level. + */ +NvBool portSyncExSafeToWake(void); +#define portSyncExSafeToWake_SUPPORTED PORT_IS_KERNEL_BUILD +/** + * @brief Returns the platform specific implementation of the interrupt level. + * + * On platforms that have multiplie interrupt levels (i.e. Windows), this will + * return the numeric representation that the underlying platform uses. + * + * If a platform only has a binary distinction, this will return 0 or 1. + * + * On platforms where the concept of interrupt levels does not exist, it will + * return 0. + */ +NvU64 portSyncExGetInterruptLevel(void); +#define portSyncExGetInterruptLevel_SUPPORTED PORT_IS_KERNEL_BUILD + +/** + * @brief Disable preemption on a given CPU + * + * After calling this function, the thread will not be scheduled out of the + * current CPU until a call to @ref portSyncExRestorePreemption is made. + * + * The thread may still be paused to service an IRQ on the same CPU, but upon + * completion, execution will resume on the same CPU. + * + * @pre Can be called at any IRQL/interrupt context + * @post Blocking calls are prohibited while preemption is disabled. + * + * @return Returns the previous preemption state, that should be passed onto + * @ref portSyncExRestorePreemption + */ +NvU64 portSyncExDisablePreemption(void); +/** + * @todo bug 1583359 - Implement for other platforms + * Only on Windows for now, needed for bug 1995797 + */ +#define portSyncExDisablePreemption_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) + +/** + * @brief Restores the previous preemption state + * + * See @ref portSyncExDisablePreemption for details + */ +void portSyncExRestorePreemption(NvU64 preemptionState); +#define portSyncExRestorePreemption_SUPPORTED (PORT_IS_KERNEL_BUILD && NVOS_IS_WINDOWS) + + +/// @} End extended functions + +#include "nvport/inline/sync_tracking.h" + +#endif // _NVPORT_SYNC_H_ +/// @} diff --git a/src/nvidia/inc/libraries/nvport/thread.h b/src/nvidia/inc/libraries/nvport/thread.h new file mode 100644 index 000000000..5b9234f9c --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/thread.h @@ -0,0 +1,318 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Thread module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_THREAD_H_ +#define _NVPORT_THREAD_H_ + +/** + * Platform-specific inline implementations + */ +#if NVOS_IS_LIBOS +#include "nvport/inline/thread_libos.h" +#endif + +/** + * @defgroup NVPORT_THREAD Threading module + * + * @brief This module contains basic threading functionality. + * + * @{ + */ + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Opaque structure representing a thread. + * + * Structure is allocated on the stack. + */ +struct PORT_THREAD +{ + NvU64 threadId; +}; + +typedef struct PORT_THREAD PORT_THREAD; + +/** + * @brief Opaque structure representing a process. + * + * While this structure is opaque, you can still allocate it on the stack. + */ +struct PORT_PROCESS +{ + NvU64 pid; +}; + +typedef struct PORT_PROCESS PORT_PROCESS; + +/// @brief An invalid thread handle. Depencence on OS. +extern const PORT_THREAD PORT_THREAD_INVALID; +/// @brief An invalid process handle. Dependnce on OS. +extern const PORT_PROCESS PORT_PROCESS_INVALID; + +/** + * @brief returns true if the given thread handle is valid. + */ +NvBool portThreadIsValid(PORT_THREAD thread); + +/** + * @brief Get the handle of the currently executing thread. + * + * @note In case of win-user you need to destroy returned thread. + */ +PORT_THREAD portThreadGetCurrentThread(void); + +/** + * @brief Get the thread handle by the thread ID. + * + * This ID translates directly into the underlying platform's thread ID. + * + * @returns PORT_THREAD_INVALID if the ID is not valid, thread handle if it is. + * + * @note In case of win-user you need to destroy returned thread. + */ +PORT_THREAD portThreadGetThreadById(NvU64 threadId); + +/** + * @brief Get the id of the currently executing thread. + */ +NvU64 portThreadGetCurrentThreadId(void); + +/** + * @brief Get the process id of the currently executing thread. + */ +NvU64 portThreadGetCurrentProcessId(void); + +/** + * @brief Compare two thread handles + * + * @returns TRUE if the handles are equal. + */ +NvBool portThreadEqual(PORT_THREAD thread1, PORT_THREAD thread2); + +/** + * @brief A thread's "main" function. The arguments are passed through a single + * void*, which the thread can then cast accordingly. + */ +typedef NvS32 (*PORT_THREAD_MAIN)(void *); + +/** + * @brief A thread constructor + * + * Creates a thread with the given main function and its argument. The created + * thread will immediately start executing. Any synchronization should be done + * in the thread body. + * + * @param [out] pThread - The new thread's handle + * @param [in] threadMain - The new thread's main() function + * @param [in] argument - The void* pointer to be passed into thread's main() + * + * @return NV_OK on success + * + * @todo Should we provide a flag to automatically destroy the thread when finished? + */ +NV_STATUS portThreadCreate(PORT_THREAD *pThread, PORT_THREAD_MAIN threadMain, void *argument); + +/** + * @brief A thread destructor + * + * Destroys the given thread, freeing any resources associated with it. If the + * specified thread has not finished its execution, it will block until it finishes. + * + * Will assert if called on a thread that hasn't been created using + * @ref portThreadCreate + */ +void portThreadDestroy(PORT_THREAD thread); + +/** + * @brief End execution of the current thread, returning the status. + * + * This behaves like the C standard exit(int) function - Execution is + * immediately stopped, without any stack unwinding. No resources allocated in + * the thread are freed. The status is returned to the parent thread. + * + * @par Kernel mode only: + * Will assert if called on a thread not created by @ref portThreadCreate. + * On usermode, this is acceptable (equivalent of calling exit() from main()) + */ +void portThreadExit(NvS32 status); + +/** + * @brief Block the current thread until the given thread has finished. + * + * Sometimes called a thread join operation. The current thread is suspended + * until threadToWaitOn has completed execution, either by returning from the + * main function, by calling @ref portThreadExit, or by being killed by @ref + * portThreadKill. + * + * The current thread can also be awoken by @ref portThreadWake. + * + * @param [out, optional] pReturnStatus - The finished thread's return status. + */ +NV_STATUS portThreadWaitToComplete(PORT_THREAD threadToWaitOn, NvS32 *pReturnStatus); + +/** + * @brief Move the current thread to the end of the run queue + * + * The OS schedules other waiting threads to run, before returning to the current thread. + * + * This function must not be called in interrupt context or raised IRQL. It may not be + * advisable to call this function while holding various RM locks. + */ +void portThreadYield(void); + +/// @} End core functions + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Get the priority of a given thread as an int + * + * The priority values are defined by the OS, but they can be compared with < and > + */ +NvU64 portThreadExGetPriority(NvU64 threadId); +#define portThreadExGetPriority_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Set the priority of a given thread + * + * Only valid values are those returned by a previous call to @ref portThreadGetPriority, + * though not necessarily on the same thread object + */ +void portThreadExSetPriority(NvU64 threadId, NvU64 priority); +#define portThreadExSetPriority_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +#if PORT_IS_FUNC_SUPPORTED(portThreadExGetPriority) +extern const NvU64 portThreadPriorityMin; +extern const NvU64 portThreadPriorityDefault; +extern const NvU64 portThreadPriorityLowRealtime; +extern const NvU64 portThreadPriorityMax; +#endif + +/** + * @brief Structure representing the processors affinity of the thread. + * + * A structure describes a thread affinity, which is a set of processors on + * which a thread is allowed to run. All of the processors in this set belong + * to the group that is identified by the cpuGroup member of the structure. + * The mask member contains an affinity mask that identifies the processors in + * the set of 64 processors. +*/ +typedef struct PORT_THREAD_AFFINITY +{ + NvU64 cpuGroup; + NvU64 mask; +} PORT_THREAD_AFFINITY; + +/** + * @brief Set the affinity of a current thread. + * @param [in] pAffinity - Pointer to affinity structure. + * @param [out] pPreAffinity - Pointer to Previous affinity structure. + * @return NV_OK If successful else return the following errors + * NV_ERR_INVALID_IRQ_LEVEL: IRQL is >= DISPATCH_LEVEL in Windows Drivers. + * NV_ERR_INVALID_ARGUMENT: Either of the passed arguments are NULL. + */ +NV_STATUS portThreadExSetAffinity(const PORT_THREAD_AFFINITY *pAffinity, + PORT_THREAD_AFFINITY *pPreAffinity); +#define portThreadExSetAffinity_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Set the affinity of the current thread with input as logical core index + * + * @param [in] coreIndex Logical core to which affinity needs to be set. For + * systems with more than one group, client need to compute + * required core index. + * + * @param [out] pPrevAffinity Pointer to previous affinity + * + * @return NV_OK on success + */ +NV_STATUS portThreadExSetSystemGroupAffinity(NvU32 coreIndex, PORT_THREAD_AFFINITY* pPrevAffinity); +#define portThreadExSetSystemGroupAffinity_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +/** + * @brief Restores the previous affinity of the current thread + * + * @param [in] pPrevAffinity Specifies the new system affinity of the current thread. + Set this parameter to the value that was returned by a + previous call to the portThreadExSetSystemGroupAffinity + * + * @return NV_OK on success + */ +NV_STATUS portThreadExRevertToUserGroupAffinity(PORT_THREAD_AFFINITY* pPrevAffinity); +#define portThreadExRevertToUserGroupAffinity_SUPPORTED (NVOS_IS_WINDOWS && !PORT_IS_MODS) + +typedef enum PORT_THREAD_PROCESS_NOTIFY_EVENT +{ + PORT_THREAD_PROCESS_NOTIFY_EVENT_CREATE, + PORT_THREAD_PROCESS_NOTIFY_EVENT_EXIT +} PORT_THREAD_PROCESS_NOTIFY_EVENT; + +typedef void (*PORT_THREAD_PROCESS_NOTIFY_ROUTINE)(NvU64 processId, + PORT_THREAD_PROCESS_NOTIFY_EVENT notifyEvent, void *pPvtData); +/** + * @brief Register a callback function with nvport thread module to get thread + * create/exit event notifications. + * @param [in] pFunc Pointer to event callback function. + * @param [in] pPvtData Pointer to event callback function private data. + * @param [out] ppOldFunc Pointer to old event callback function. + * @param [out] ppPvtData Pointer to old event callback function private data. + * + * @return NV_OK on success + */ +NV_STATUS portThreadExRegisterProcessNotifyRoutine(PORT_THREAD_PROCESS_NOTIFY_ROUTINE pFunc, void *pPvtData, + PORT_THREAD_PROCESS_NOTIFY_ROUTINE *ppOldFunc, void **ppPvtData); +#define portThreadExRegisterProcessNotifyRoutine_SUPPORTED (NVOS_IS_WINDOWS && PORT_IS_KERNEL_BUILD && !PORT_IS_MODS) + +/** + * @brief Unregister a callback function with nvport thread module to get thread + * create/exit event notifications. + * @param [in] pOldFunc Pointer to old event callback function which was returned + * by portThreadExRegisterProcessNotifyRoutine. + * @param [in] pOldPvtData Pointer to old event callback function private data which + * was returned by portThreadExRegisterProcessNotifyRoutine. + * + * @return NV_OK on success + */ +NV_STATUS portThreadExUnregisterProcessNotifyRoutine(PORT_THREAD_PROCESS_NOTIFY_ROUTINE pOldFunc, void* pOldPvtData); +#define portThreadExUnregisterProcessNotifyRoutine_SUPPORTED (NVOS_IS_WINDOWS && PORT_IS_KERNEL_BUILD && !PORT_IS_MODS) +/// @} End extended functions + +#endif // _NVPORT_THREAD_H_ +/// @} + diff --git a/src/nvidia/inc/libraries/nvport/util.h b/src/nvidia/inc/libraries/nvport/util.h new file mode 100644 index 000000000..6234d618a --- /dev/null +++ b/src/nvidia/inc/libraries/nvport/util.h @@ -0,0 +1,254 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util module public interface + */ + +#ifndef _NVPORT_H_ +#error "This file cannot be included directly. Include nvport.h instead." +#endif + +#ifndef _NVPORT_UTIL_H_ +#define _NVPORT_UTIL_H_ + +/** + * @defgroup NVPORT_UTIL Utilities module + * + * @brief This module contains utility functions used by other modules. + * + * Generic implementation for all functions is in util-generic.h + * + * @{ + */ + +/** + * @def PORT_UTIL_INLINE + * + * @note There are ways to declare a function without qualifiers, and then + * redefine it as static/extern inline, but none work across all compilers that + * we use. The easiest solution is to just specify the qualifiers upon function + * declaration. We assume all these will be inline, but that can be changed + * through the makefile when adding non-inline implementations: + * MODULE_DEFINES += PORT_UTIL_INLINE + * MODULE_SOURCES += util-impl.c + */ +#ifndef PORT_UTIL_INLINE +#define PORT_UTIL_INLINE PORT_INLINE +#endif + +#if NVOS_IS_LIBOS +#include "nvport/inline/util_libos.h" +#endif + +/** + * @name Core Functions + * @{ + */ + +/** + * @brief Returns true if the two buffers overlap. + * + * Buffer length is specified in len0 and len1 params. + */ +PORT_UTIL_INLINE NvBool portUtilCheckOverlap(const NvU8 *pData0, NvLength len0, const NvU8 *pData1, NvLength len1); + +/** + * @brief Returns true if address is aligned to align bytes + * + * If align is not a power of two, it will return false. + */ +PORT_UTIL_INLINE NvBool portUtilCheckAlignment(const void *address, NvU32 align); + +/** + * @brief Returns true if num is a power of two. + */ +PORT_UTIL_INLINE NvBool portUtilIsPowerOfTwo(NvU64 num); + +/** + * @brief Write the 16bit number to pBuf in Little Endian + */ +PORT_UTIL_INLINE void portUtilWriteLittleEndian16(void *pBuf, NvU16 value); + +/** + * @brief Write the 32bit number to pBuf in Little Endian + */ +PORT_UTIL_INLINE void portUtilWriteLittleEndian32(void *pBuf, NvU32 value); + +/** + * @brief Write the 64bit number to pBuf in Little Endian + */ +PORT_UTIL_INLINE void portUtilWriteLittleEndian64(void *pBuf, NvU64 value); + +/** + * @brief Write the 16bit number to pBuf in Big Endian + */ +PORT_UTIL_INLINE void portUtilWriteBigEndian16(void *pBuf, NvU16 value); + +/** + * @brief Write the 32bit number to pBuf in Big Endian + */ +PORT_UTIL_INLINE void portUtilWriteBigEndian32(void *pBuf, NvU32 value); + +/** + * @brief Write the 64bit number to pBuf in Big Endian + */ +PORT_UTIL_INLINE void portUtilWriteBigEndian64(void *pBuf, NvU64 value); + +/** + * @brief Efficient spinloop body that doesn't waste power. + * + * This function will spin for a very short time, then return, so it should be + * called as: + * + * ~~~{.c} + * while (bShouldSpin) + * portUtilSpin(); + * ~~~ + */ +static NV_FORCEINLINE void portUtilSpin(void); + +/** + * @brief Returns true if the caller is currently in interrupt context. + * + * Interrupt context here means: + * - Unix: Interrupts are masked + * - Windows: IRQL > DISPATCH_LEVEL + */ +NvBool portUtilIsInterruptContext(void); + +/** + * @def portUtilGetReturnAddress() + * Returns the current function's return address. + */ + +/** + * @def portUtilGetIPAddress() + * Returns the current IP address. + */ +NV_NOINLINE NvUPtr portUtilGetIPAddress(void); + +/** + * @brief Returns number of leading zeros - starting from MSB; + * + * Examples: + * portUtilCountLeadingZeros64(0) == 64 + * portUtilCountLeadingZeros64(1) == 63 + * portUtilCountLeadingZeros64(2) == 62 + * portUtilCountLeadingZeros64(0xFFFFFFFFFFFFFF00) == 0 + */ +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros64(NvU64 n); + +/** + * @brief Like @ref portUtilCountLeadingZeros64 but operating on 32bit ints + */ +PORT_UTIL_INLINE NvU32 portUtilCountLeadingZeros32(NvU32 n); + +/** + * @brief Returns number of trailing zeros - starting from LSB; + * + * Examples: + * portUtilCountTrailingZeros64(0) == 64 + * portUtilCountTrailingZeros64(1) == 0 + * portUtilCountTrailingZeros64(2) == 1 + * portUtilCountTrailingZeros64(0xFFFFFFFFFFFFFF00) == 8 + */ +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros64(NvU64 n); + +/** + * @brief Like @ref portUtilCountTrailingZeros64 but operating on 32bit ints + */ +PORT_UTIL_INLINE NvU32 portUtilCountTrailingZeros32(NvU32 n); + +/// @} End core functions + +#include /* NULL */ + +/** + * @name Extended Functions + * @{ + */ + +/** + * @brief Returns a return address up the stack of the current function. + * + * @param level The number of levels up the stack to go. + * level == 0 - Gives the current IP. + * level == 1 - The current function's return address, same as + * @ref portUtilGetReturnAddress + */ +NV_NOINLINE NvUPtr portUtilExGetStackTrace(NvU32 level); + +#define portUtilExSpinNs_SUPPORTED PORT_IS_MODULE_SUPPORTED(time) +#define portUtilExDelayMs_SUPPORTED PORT_IS_MODULE_SUPPORTED(time) + +/** + * @brief Spin for the given amount of nanoseconds. + * + * Utilizes @ref portUtilSpin to spin for the given duration, without putting + * the thread to sleep. + */ +void portUtilExSpinNs(NvU32 nanoseconds); + +/** + * @brief Delay the thread execution for the given duration in milliseconds. + * + * Unlike @ref portUtilSpinNs, this function may put the thread to sleep. + */ +void portUtilExDelayMs(NvU32 milliseconds); + +#if (NVCPU_IS_FAMILY_X86 || NVCPU_IS_PPC64LE || NVCPU_IS_PPC || NVCPU_IS_AARCH64) && !defined(NV_MODS) +/** + * @brief Gets the Time stamp counter. + * + * @note This function is not serialized, and can be reorder by cpu or compiler. + * @note On Intel "pre-Nehalem multi-core" cpu and all multi-socket cpu, time + * may not be synced on all the cores and this function may return timestamps + * that are not monotonically increasing. + * @note On some old Intel cpus (P3/P4), timestamp counter was not incremented + * at a fixed clock rate, but Intel fixed this with "invariant TSC" in late P4+ + * chips. + */ +PORT_UTIL_INLINE NvU64 portUtilExReadTimestampCounter(void); +#endif + +#if NVCPU_IS_FAMILY_X86 && !defined(NV_MODS) && PORT_IS_MODULE_SUPPORTED(atomic) +/** + * @brief Gets the Time stamp counter. + * + * Unlike @ref portUtilExReadTimestampCounter, this function serializes + * the reading of time stamp counter to prevent both compiler and cpu + * reordering. + * @note Other than serialization issue, this function has same issues as + * @ref portUtilExReadTimestampCounter. + */ +static NV_FORCEINLINE NvU64 portUtilExReadTimestampCounterSerialized(void); +#endif +/// @} End extended functions + +#include "nvport/inline/util_generic.h" +#include "nvport/inline/util_valist.h" + +#endif // _NVPORT_UTIL_H_ +/// @} diff --git a/src/nvidia/inc/libraries/poolalloc.h b/src/nvidia/inc/libraries/poolalloc.h new file mode 100644 index 000000000..c1ef41831 --- /dev/null +++ b/src/nvidia/inc/libraries/poolalloc.h @@ -0,0 +1,289 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file poolalloc.h + * @brief This file contains the interfaces for pool allocator. + * A chained sub-allocator originally designed to sub-allocate GPU + * frame buffer given out by PMA (physical memory allocator). + * + * The only requirement of a node in the chained allocator is that the ratio + * between upSTreamPageSize and allocPageSize is less or equal to 64. + * + * @bug Make more abstract -- fix up the variable names + */ + + +#ifndef _NV_POOLALLOC_H_ +#define _NV_POOLALLOC_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvport/nvport.h" +#include "containers/list.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct poolnode POOLNODE; + +/*! + * Each node corresponds to one page of upStreamPageSize + * The pool allocator sub-allocates from each of these pages. + */ +struct poolnode +{ + NvU64 pageAddr; // Address of the page to sub-allocate + NvU64 bitmap; // The bit map for this page. Only used if the + // node represents a partially allocated node + POOLNODE *pParent; // The upstream pool node in case this node is + // allocated from the upper pool. + ListNode node; // For intrusive lists. +}; + +MAKE_INTRUSIVE_LIST(PoolNodeList, POOLNODE, node); + +/*! + * The handle contains a generic metadata field that is needed for fast + * access. In the case of a linked list implementation of the pool allocator, + * the metadata is the pointer to the node that contains the page it was + * sub-allocated from + */ +typedef struct poolallocHandle +{ + NvU64 address; // The base address for this chunk + void *pMetadata; // The opaque metadata for storing necessary info +} POOLALLOC_HANDLE; + + +// non-intrusive list of page handles +MAKE_LIST(PoolPageHandleList, POOLALLOC_HANDLE); + +/*! + * @brief Callback function to upstream allocators for allocating new pages + * + * This function only allocate 1 page at a time right now + * + * @param[in] ctxPtr Provides context to upstream allocator + * @param[in] pageSize Not really needed. For debugging only + * @param[out] pPage The output page handle from upstream + * + * @return NV_OK if successfully allocated NvF32 totalTest, doneTest, failTest; the page + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid or size info is not + * multiple of SMALLEST_PAGE_SIZE + * + */ +typedef NV_STATUS (*allocCallback_t)(void *ctxPtr, NvU64 pageSize, + POOLALLOC_HANDLE *pPage); + +/*! + * @brief Callback function to upstream allocators for freeing unused pages + * + * This function only allocate 1 page at a time right now + * + * @param[in] ctxPtr Provides context to upstream allocator + * @param[in] pageSize Not really needed. For debugging only + * @param[in] pPage The input page handle to be freed + * + */ +typedef void (*freeCallback_t)(void *ctxPtr, NvU64 pageSize, POOLALLOC_HANDLE *pPage); + +/*! + * Structure representing a pool. + */ +typedef struct poolalloc +{ + PoolNodeList freeList; // List of nodes representing free pages + PoolNodeList fullList; // List of nodes representing fully allocated pages + PoolNodeList partialList; // List of nodes representing partially allocated pages + + PORT_MEM_ALLOCATOR *pAllocator; + + struct + { + allocCallback_t allocCb; // Callback to upstream allocator + freeCallback_t freeCb; // Callback to free pages + void *pUpstreamCtx; // The context to pass to upstream allocator + } callBackInfo; + + NvU32 upstreamPageSize; // Page size for upstream allocations + NvU32 allocPageSize; // Page size to give out + NvU32 ratio; // Ratio == upstreamPageSize / allocPageSize + NvU32 flags; // POOLALLOC_FLAGS_* +} POOLALLOC; + + +/*! + * Dump the lists maintained by the pools. + */ +void poolAllocPrint(POOLALLOC *pPool); + +/*! + * If _AUTO_POPULATE is set to ENABLE then poolAllocate will call upstream function to repopulate + * the pool when it runs out of memory. If set to DISABLE, poolAllocate will fail when it runs out of memory + * By default this is disabled as for usecases like page tables or context buffers since upstream function can call + * into PMA with GPU lock held which has a possibility of deadlocking + */ +#define NV_RMPOOL_FLAGS_AUTO_POPULATE 1:0 +#define NV_RMPOOL_FLAGS_AUTO_POPULATE_DEFAULT 0x0 +#define NV_RMPOOL_FLAGS_AUTO_POPULATE_DISABLE 0x0 +#define NV_RMPOOL_FLAGS_AUTO_POPULATE_ENABLE 0x1 + +/*! + * @brief This function initializes a pool allocator object + * + * This function establishes a link from this allocator to its upstream + * allocator by registering a callback function that lazily allocates memory + * if needed. + * + * @param[in] upstreamPageSize The page size granularity managed by + * the allocator + * @param[in] allocPageSize The page size to hand out + * @param[in] allocCb The allocation callback function + * @param[in] freeCb The free callback function + * @param[in] pUpstreamCtxPtr The context pointer for the upstream + * allocator, passed back on callback + * @param[in] mallocFun The allocator for internal strutures + * @param[in] freeFun The free for internal structures + * @param[in] pAllocCtxPtr The context pointer for the special + * allocator + * @param[in] flags POOLALLOC_FLAGS_* + * + * @return A pointer to a POOLALLOC structure if the initialization + * succeeded; NULL otherwise + * + */ + +POOLALLOC *poolInitialize(NvU32 upstreamPageSize, NvU32 allocPageSize, + allocCallback_t allocCb, freeCallback_t freeCb, void *pUpstreamCtxPtr, + PORT_MEM_ALLOCATOR *pAllocator, NvU32 flags); + + +/*! + * @brief Reserves numPages from upstream allocator. After the call + * freeListSize will equal to/greater than numPages. + * + * Since it will call into the upstream allocator, the page size of those + * pages will be the upstream page size. + * + * @param[in] pPool The pool allocator + * @param[out] numPages Number of pages to reserve + * + * @return NV_OK if successful + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid + * + */ +NV_STATUS poolReserve(POOLALLOC *pPool, NvU64 numPages); + + +/*! + * @brief This call will give back any free pages. After the call + * freeListSize will be less or equal to preserveNum. + * + * If the allocator has less or equal number of pages than preserveNum before + * the call, this function will simply return. + * + * @param[in] pPool The pool allocator to trim from + * @param[in] preserveNum The number of pages that we try to preserve + */ +void poolTrim(POOLALLOC *pPool, NvU64 preserveNum); + + +/*! + * @brief This function allocates memory from the allocator and returns one + * page of the fixed allocPageSize as specified in the initialization function + * + * The implementation does not guarantee the allocated pages are contiguous. + * Although there is no potential synchronization issues, if two allocation + * happen to lie on upstream page bundaries, the allocation will most likely + * be discontiguous. + * + * This function will also callback to upstream allocator to get more pages if + * it does not have enough pages already reserved. + * + * @param[in] pPool The pool allocator + * @param[out] pPageHandle The allocation handle that contains address and + * metadata for optimization + * + * @return NV_OK if successful + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid + */ +NV_STATUS poolAllocate(POOLALLOC *pPool, POOLALLOC_HANDLE *pPageHandle); + + +/*! + * @brief This function allocates memory from the allocator and returns numPages + * of the fixed allocPageSize as specified in the initialization function + * + * These pages are allocated contiguously and the single start address is returned. + * Although there is no potential synchronization issues, if two allocation + * happen to lie on upstream page bundaries, the allocation will most likely + * be discontiguous. + * + * This function will not callback to upstream allocator to get more pages as + * this is relying on a single chunk of free pages to make contiguous allocations. + * So the max number of pages that can be allocated contiguously is the number of pages + * fit in upstream page size i.e the "ratio" of this pool + * + * @param[in] pPool The pool allocator + * @param[in] numPages The number of pages requested to be allocated + * @param[out] pPageHandleList The allocation handles that contain addresses and + * metadata for optimization + * + * @return NV_OK if successful + * NV_ERR_NO_MEMORY if allocator cannot allocate enough backing + * NV_ERR_BAD_PARAM if any parameter is invalid + */ +NV_STATUS poolAllocateContig(POOLALLOC *pPool, NvU32 numPages, PoolPageHandleList *pPageHandleList); + +/*! + * @brief This function frees the page based on the allocPageSize + * + * @param[in] pPool The pool allocator + * @param[out] pPageHandle The allocation handle that contains address and + * metadata for optimization + * + */ +void poolFree(POOLALLOC *pPool, POOLALLOC_HANDLE *pPageHandle); + + +/*! + * @brief Destroys the pool allocator and frees memory + */ +void poolDestroy(POOLALLOC *pPool); + +/*! + * @briefs Returns the lengths of a pool's lists + */ +void poolGetListLength(POOLALLOC *pPool, NvU32 *pFreeListLength, + NvU32 *pPartialListLength, NvU32 *pFullListLength); + +#ifdef __cplusplus +} +#endif + +#endif /* _NV_POOLALLOC_H_ */ diff --git a/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h b/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h new file mode 100644 index 000000000..e574ca395 --- /dev/null +++ b/src/nvidia/inc/libraries/prereq_tracker/prereq_tracker.h @@ -0,0 +1,3 @@ + +#include "g_prereq_tracker_nvoc.h" + diff --git a/src/nvidia/inc/libraries/resserv/resserv.h b/src/nvidia/inc/libraries/resserv/resserv.h new file mode 100644 index 000000000..7407e1cc3 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/resserv.h @@ -0,0 +1,372 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_resserv_nvoc.h" + +#ifndef _RESSERV_H_ +#define _RESSERV_H_ + +#include "nvoc/object.h" + +#include "containers/list.h" +#include "containers/map.h" +#include "containers/multimap.h" + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvos.h" +#include "nvsecurityinfo.h" +#include "rs_access.h" + +#if LOCK_VAL_ENABLED +#include "lockval/lockval.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if (RS_STANDALONE) +#include + +#ifndef NV_PRINTF +extern int g_debugLevel; +#define NV_PRINTF(level, format, ...) if (g_debugLevel) { printf(format, ##__VA_ARGS__); } +#endif +#include "utils/nvprintf.h" +#endif + +// +// Forward declarations +// +typedef struct RsServer RsServer; +typedef struct RsDomain RsDomain; +typedef struct CLIENT_ENTRY CLIENT_ENTRY; +typedef struct RsResourceDep RsResourceDep; +typedef struct RsResourceRef RsResourceRef; +typedef struct RsInterMapping RsInterMapping; +typedef struct RsCpuMapping RsCpuMapping; + +// RS-TODO INTERNAL and EXTERNAL params should be different structures +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS_INTERNAL; +typedef struct RS_CLIENT_FREE_PARAMS_INTERNAL RS_CLIENT_FREE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_RES_ALLOC_PARAMS; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS_INTERNAL; +typedef struct RS_RES_DUP_PARAMS_INTERNAL RS_RES_DUP_PARAMS; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS_INTERNAL; +typedef struct RS_RES_SHARE_PARAMS_INTERNAL RS_RES_SHARE_PARAMS; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS_INTERNAL; +typedef struct RS_RES_ALLOC_PARAMS_INTERNAL RS_CLIENT_ALLOC_PARAMS; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS_INTERNAL; +typedef struct RS_RES_FREE_PARAMS_INTERNAL RS_RES_FREE_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS_INTERNAL; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_RES_CONTROL_PARAMS; +typedef struct RS_RES_CONTROL_PARAMS_INTERNAL RS_LEGACY_CONTROL_PARAMS; +typedef struct RS_LEGACY_ALLOC_PARAMS RS_LEGACY_ALLOC_PARAMS; +typedef struct RS_LEGACY_FREE_PARAMS RS_LEGACY_FREE_PARAMS; + +typedef struct RS_CPU_MAP_PARAMS RS_CPU_MAP_PARAMS; +typedef struct RS_CPU_UNMAP_PARAMS RS_CPU_UNMAP_PARAMS; +typedef struct RS_INTER_MAP_PARAMS RS_INTER_MAP_PARAMS; +typedef struct RS_INTER_UNMAP_PARAMS RS_INTER_UNMAP_PARAMS; + +// Forward declarations for structs defined by user +typedef struct RS_RES_MAP_TO_PARAMS RS_RES_MAP_TO_PARAMS; +typedef struct RS_RES_UNMAP_FROM_PARAMS RS_RES_UNMAP_FROM_PARAMS; +typedef struct RS_INTER_MAP_PRIVATE RS_INTER_MAP_PRIVATE; +typedef struct RS_INTER_UNMAP_PRIVATE RS_INTER_UNMAP_PRIVATE; +typedef struct RS_CPU_MAPPING_PRIVATE RS_CPU_MAPPING_PRIVATE; + +typedef struct RS_CPU_MAPPING_BACK_REF RS_CPU_MAPPING_BACK_REF; +typedef struct RS_INTER_MAPPING_BACK_REF RS_INTER_MAPPING_BACK_REF; +typedef struct RS_FREE_STACK RS_FREE_STACK; +typedef struct CALL_CONTEXT CALL_CONTEXT; +typedef struct ACCESS_CONTROL ACCESS_CONTROL; +typedef struct RS_ITERATOR RS_ITERATOR; +typedef struct RS_ORDERED_ITERATOR RS_ORDERED_ITERATOR; +typedef struct RS_SHARE_ITERATOR RS_SHARE_ITERATOR; +typedef struct API_STATE API_STATE; +typedef struct RS_LOCK_INFO RS_LOCK_INFO; +typedef struct RS_CONTROL_COOKIE RS_CONTROL_COOKIE; +typedef NV_STATUS RsCtrlFunc(struct RS_RES_CONTROL_PARAMS_INTERNAL*); + +class RsClient; +class RsResource; +class RsShared; + +MAKE_LIST(RsResourceRefList, RsResourceRef*); +MAKE_LIST(RsResourceList, RsResource*); +MAKE_LIST(RsHandleList, NvHandle); +MAKE_LIST(RsClientList, CLIENT_ENTRY*); +MAKE_LIST(RsShareList, RS_SHARE_POLICY); +MAKE_MULTIMAP(RsIndex, RsResourceRef*); + +typedef NV_STATUS (*CtrlImpl_t)(RsClient*, RsResource*, void*); + +typedef void *PUID_TOKEN; + +// +// Defines +// + +/// Domain handles must start at this base value +#define RS_DOMAIN_HANDLE_BASE 0xD0D00000 + +/// Client handles must start at this base value +#define RS_CLIENT_HANDLE_BASE 0xC1D00000 + +/// +/// Internal Client handles must start at this base value +/// at either of these two bases +/// +#define RS_CLIENT_INTERNAL_HANDLE_BASE 0xC1E00000 + +#define RS_CLIENT_INTERNAL_HANDLE_BASE_EX 0xC1F00000 + +// +// Print a warning if any client's resource count exceeds this +// threshold. Unless this was intentional, this is likely a client bug. +// +#define RS_CLIENT_RESOURCE_WARNING_THRESHOLD 100000 + + +/// 0xFFFF max client handles. +#define RS_CLIENT_HANDLE_BUCKET_COUNT 0x400 // 1024 +#define RS_CLIENT_HANDLE_BUCKET_MASK 0x3FF + + +/// The default maximum number of domains a resource server can allocate +#define RS_MAX_DOMAINS_DEFAULT 4096 + +/// The maximum length of a line of ancestry for resource references +#define RS_MAX_RESOURCE_DEPTH 6 + +/// RS_LOCK_FLAGS +#define RS_LOCK_FLAGS_NO_TOP_LOCK NVBIT(0) +#define RS_LOCK_FLAGS_NO_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_FLAGS_NO_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK NVBIT(5) +#define RS_LOCK_FLAGS_FREE_SESSION_LOCK NVBIT(6) + +/// RS_LOCK_STATE +#define RS_LOCK_STATE_TOP_LOCK_ACQUIRED NVBIT(0) +#define RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED NVBIT(1) +#define RS_LOCK_STATE_CUSTOM_LOCK_2_ACQUIRED NVBIT(2) +#define RS_LOCK_STATE_CUSTOM_LOCK_3_ACQUIRED NVBIT(3) +#define RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK NVBIT(6) +#define RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED NVBIT(7) +#define RS_LOCK_STATE_SESSION_LOCK_ACQUIRED NVBIT(8) + +/// RS_LOCK_RELEASE +#define RS_LOCK_RELEASE_TOP_LOCK NVBIT(0) +#define RS_LOCK_RELEASE_CLIENT_LOCK NVBIT(1) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_1 NVBIT(2) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_2 NVBIT(3) +#define RS_LOCK_RELEASE_CUSTOM_LOCK_3 NVBIT(4) +#define RS_LOCK_RELEASE_SESSION_LOCK NVBIT(5) + +/// API enumerations used for locking knobs +typedef enum +{ + RS_LOCK_CLIENT =0, + RS_LOCK_TOP =1, + RS_LOCK_RESOURCE =2, + RS_LOCK_CUSTOM_3 =3, +} RS_LOCK_ENUM; + +typedef enum +{ + RS_API_ALLOC_CLIENT = 0, + RS_API_ALLOC_RESOURCE = 1, + RS_API_FREE_RESOURCE = 2, + RS_API_MAP = 3, + RS_API_UNMAP = 4, + RS_API_INTER_MAP = 5, + RS_API_INTER_UNMAP = 6, + RS_API_COPY = 7, + RS_API_SHARE = 8, + RS_API_CTRL = 9, + RS_API_MAX, +} RS_API_ENUM; + +NV_STATUS indexAdd(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); +NV_STATUS indexRemove(RsIndex *pIndex, NvU32 index, RsResourceRef *pResourceRef); + +// +// Externs +// +/** + * NVOC wrapper for constructing resources of a given type + * + * @param[in] pAllocator Allocator for the resource object + * @param[in] pCallContext Caller context passed to resource constructor + * @param[inout] pParams Resource allocation parameters + * @param[out] ppResource New resource object + */ +extern NV_STATUS resservResourceFactory(PORT_MEM_ALLOCATOR *pAllocator, CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, RsResource **ppResource); + +/** + * NVOC wrapper for constructing an application-specific client. + */ +extern NV_STATUS resservClientFactory(PORT_MEM_ALLOCATOR *pAllocator, RS_RES_ALLOC_PARAMS_INTERNAL *pParams, RsClient **ppRsClient); + +/** + * Validate the UID/PID security token of the current user against a client's security token. + * + * This will be obsolete after phase 1. + * + * @param[in] pClientToken + * @param[in] pCurrentToken + * + * @returns NV_OK if the current user's security token matches the client's security token + */ +extern NV_STATUS osValidateClientTokens(PSECURITY_TOKEN pClientToken, PSECURITY_TOKEN pCurrentToken); + +/** + * Get the security token of the current user for the UID/PID security model. + * + * This will be obsolete after phase 1. + */ +extern PSECURITY_TOKEN osGetSecurityToken(void); + +/** + * TLS entry id for call contexts. All servers will use the same id. + */ +#define TLS_ENTRY_ID_RESSERV_CALL_CONTEXT TLS_ENTRY_ID_RESSERV_1 + +// +// Structs +// +struct RS_FREE_STACK +{ + RS_FREE_STACK *pPrev; + RsResourceRef *pResourceRef; +}; + +struct CALL_CONTEXT +{ + RsServer *pServer; ///< The resource server instance that owns the client + RsClient *pClient; ///< Client that was the target of the call + RsResourceRef *pResourceRef; ///< Reference that was the target of the call + RsResourceRef *pContextRef; ///< Reference that may be used to provide more context [optional] + RS_LOCK_INFO *pLockInfo; ///< Saved locking context information for the call + API_SECURITY_INFO secInfo; + RS_RES_CONTROL_PARAMS_INTERNAL *pControlParams; ///< parameters of the call [optional] +}; + +typedef enum { + RS_ITERATE_CHILDREN, ///< Iterate over a RsResourceRef's children + RS_ITERATE_DESCENDANTS, ///< Iterate over a RsResourceRef's children, grandchildren, etc. (unspecified order) + RS_ITERATE_CACHED, ///< Iterate over a RsResourceRef's cache + RS_ITERATE_DEPENDANTS, ///< Iterate over a RsResourceRef's dependants +} RS_ITER_TYPE; + +typedef enum +{ + LOCK_ACCESS_READ, + LOCK_ACCESS_WRITE, +} LOCK_ACCESS_TYPE; + + + +/** + * Access control information. This information will be filled out by the user + * of the Resource Server when allocating a client or resource. + */ +struct ACCESS_CONTROL +{ + /** + * The privilege level of this access control + */ + RS_PRIV_LEVEL privilegeLevel; + + /** + * Opaque pointer for storing a security token + */ + PSECURITY_TOKEN pSecurityToken; +}; + +// +// Utility wrappers for locking validator +// +#if LOCK_VAL_ENABLED +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) \ + do { NV_ASSERT_OK(lockvalLockInit((lock), (lockClass), (inst))); } while(0) + +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireRead((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_RLOCK); \ +} while(0) + +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do \ +{ \ + NV_ASSERT_OK(lockvalPreAcquire((validator))); \ + portSyncRwLockAcquireWrite((lock)); \ + lockvalPostAcquire((validator), LOCK_VAL_WLOCK); \ +} while(0) + +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_RLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseRead((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do \ +{ \ + void *pLockValTlsEntry, *pReleasedLockNode; \ + if (bOutOfOrder) \ + NV_ASSERT_OK(lockvalReleaseOutOfOrder((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + else \ + NV_ASSERT_OK(lockvalRelease((validator), LOCK_VAL_WLOCK, &pLockValTlsEntry, &pReleasedLockNode)); \ + portSyncRwLockReleaseWrite((lock)); \ + lockvalMemoryRelease(pLockValTlsEntry, pReleasedLockNode); \ +} while(0) + +#else +#define RS_LOCK_VALIDATOR_INIT(lock, lockClass, inst) +#define RS_RWLOCK_ACQUIRE_READ(lock, validator) do { portSyncRwLockAcquireRead((lock)); } while(0) +#define RS_RWLOCK_ACQUIRE_WRITE(lock, validator) do { portSyncRwLockAcquireWrite((lock)); } while(0) +#define RS_RWLOCK_RELEASE_READ_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseRead((lock)); } while(0) +#define RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, bOutOfOrder) do { portSyncRwLockReleaseWrite((lock)); } while(0) +#endif + +#define RS_RWLOCK_RELEASE_READ(lock, validator) RS_RWLOCK_RELEASE_READ_EXT(lock, validator, NV_FALSE) +#define RS_RWLOCK_RELEASE_WRITE(lock, validator) RS_RWLOCK_RELEASE_WRITE_EXT(lock, validator, NV_FALSE) + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/resserv/rs_access_map.h b/src/nvidia/inc/libraries/resserv/rs_access_map.h new file mode 100644 index 000000000..e75da749a --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_access_map.h @@ -0,0 +1,234 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RS_ACCESS_MAP_H +#define RS_ACCESS_MAP_H + +#include "nvstatus.h" +#include "nvtypes.h" + +#include "containers/map.h" +#include "resserv/resserv.h" +#include "resserv/rs_access_rights.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*! + * @brief Returns the client's access mask for the resource, returning NULL if + * the resource is not owned by the client + * + * @param[in] pResourceRef The resource whose access mask is being checked + * @param[in] pClient The client accessing the resource + * + * @return The resource's access rights mask, or + * @return NULL if pClient does not own pResourceRef + */ +RS_ACCESS_MASK *rsAccessLookup(RsResourceRef *pResourceRef, RsClient *pClient); + +/*! + * @brief Fills a mask with all rights available to a client on a resource + * Gets both rights directly available through the access mask, + * as well as rights shared by the resource. + * + * @param[in] pResourceRef + * @param[in] pClient + * @param[out] pRightsShared The set of access rights available for this client on this resource + * + * @return none + */ +void rsAccessGetAvailableRights(RsResourceRef *pResourceRef, RsClient *pClient, + RS_ACCESS_MASK *pAvailableRights); + +/*! + * @brief Perform an access rights check on a target resource + * + * This function should be used to determine whether sufficient access + * rights are already present, NOT whether access rights should be granted. + * It will not update any state on its own. + * + * For each of the required access rights, the invoking client must hold + * that access right on the target resource. + * + * @param[in] pResourceRef A reference to the target resource for which we are + * checking access rights + * @param[in] pInvokingClient The client that is requesting access rights + * @param[in] pRightsRequired The set of access rights that the invoking client + * should have on the target resource + * + * @return NV_OK if the invoking client has the required access rights on the + * target resource + * @return NV_ERR_INSUFFICIENT_PERMISSIONS if the invoking client does not have + * the required access rights on the target resource + */ +NV_STATUS rsAccessCheckRights(RsResourceRef *pResourceRef, RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequired); + +/*! + * @brief Update what access rights are currently owned on a target resource + * based on the target resource's current privilege. + * + * This function should be used to update the access rights currently owned + * by the target resource. Most access rights are only obtained once and don't + * disappear/reappear. However, the RS_ACCESS_FLAG_UNCACHED_CHECK flag can be + * used to indicate access rights that are present/not present based on the target + * resource's current level of privilege, NOT what the level of privilege was when + * the access right was initially requested. This function is useful for updating the + * which access rights are owned when accounting for uncached access rights. + * + * @param[in] pResourceRef A reference to the target resource for which we are + * checking access rights + * @param[in] pInvokingClient The client to check level of access with + * @param[in] pRightsToUpdate If non-NULL, only access rights set in this mask + * will be updated + * + * @return none + */ +void rsAccessUpdateRights(RsResourceRef *pResourceRef, RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsToUpdate); + +/*! + * @brief Searches a resource's share list for an entry equal to the + * passed in share policy, as defined by rsSharePolicyEquals + * + * @param[in] pShareList The RsShareList to be searched + * @param[in] pSharePolicy The policy to be found + * + * @return A pointer to the corresponding policy, or + * @return NULL if no matching entry is found + */ +RS_SHARE_POLICY *rsShareListLookup(RsShareList *pShareList, RS_SHARE_POLICY *pSharePolicy); + +/*! + * @brief Adds a new share policy to a resource's share list, or merges into + * an existing policy, if possible + * + * @param[in] pShareList The RsShareList to be searched + * @param[in] pSharePolicy The policy to be added to the list, may be merged with + * another policy with a matching pSharePolicy->type and ->target. + * In this case, ->accessMask for the existing entry and the + * new pSharePolicy will be merged with a union operation. + * @param[out] pAccessMask The rights now shared for this share policy, may or + * may not match pSharePolicy->accessMask if merged with an existing policy. + * User may pass NULL, in which case nothing is written into this. + * + * @return NV_OK if the operation succeeded, + * @return NV_ERR_NO_MEMORY if a new element needed to be added to the list, but + * insufficient memory was present to allocate one + */ +NV_STATUS rsShareListInsert(RsShareList *pShareList, RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask); + +/*! + * @brief Removes certain rights from being shared in a share policy entry + * from a resource's RsShareList. + * + * @param[in] pShareList The RsShareList to be searched + * @param[in] pSharePolicy The policy to be removed from the list, matched using + * pSharePolicy->type and ->target. Only rights specified in + * pSharePolicy->accessMask are revoked, others will remain. + * Use RS_ACCESS_MASK_FILL to for a full mask to revoke all rights. + * @param[out] pAccessMask The rights still shared for this share policy, may or + * may not be empty. + * User may pass NULL, in which case nothing is written into this. + * + * @return none + */ +void rsShareListRemove(RsShareList *pShareList, RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask); + +/*! + * @brief Copy one share list into another + * Note that this does not replace the Dst list if it is not empty, + * elements will be appended onto any existing list. + * + * @param[in] pShareListDst The list to copy into + * @param[in] pShareListSrc The list to copy from + * + * @return NV_ERR_NO_MEMORY, NV_OK + */ +NV_STATUS rsShareListCopy(RsShareList *pShareListDst, RsShareList *pShareListSrc); + +/*! + * @brief Returns the list which should be used for a resource's sharing + * In order, selects either the resource's own list, the client's inherited + * list, or the server's global default list. + * + * @param[in] pResourceRef + * @param[in] pServer + * + * @return A pointer to the relevant share list + * @return NULL if no list is available, and no pServer was provided. + */ +RsShareList * rsAccessGetActiveShareList(RsResourceRef *pResourceRef, RsServer *pServer); + +/*! + * @brief Attempts to grant a set of requested access rights on this resource. + * + * This function will attempt to grant the rights specified in pRightsRequested + * to the client referred to by pClient. If successful, it will update the + * access rights of the target resource referred to by pResourceRef. + * + * The resAccessCallback method on the target resource will be invoked to + * perform checks. This requires that the target resource + * pResourceRef->pResource already be initialized. + * + * If pRightsRequested is non-NULL, then the call will return an error code if + * it is unable to grant any of the requested rights. + * + * If pRightsRequested is NULL, then the call will ignore any failure to + * grant, taking a "best-effort" approach to granting access rights. The + * rights requested will be determined as follows: + * + * - If pResourceRef is a client resource, the function will attempt to + * request all possible access rights + * - For any other resource, the function will attempt to request the + * same set of access rights held by the invoking client on the parent + * resource + * + * @param[in] pResourceRef The target resource reference on which access + * rights will be granted + * @param[in] pCallContext Information about the call context + * @param[in] pInvokingClient The client requesting the access right + * @param[in] pRightsRequested The set of access rights to attempt to grant, + * or NULL if no access rights were explicitly requested + * @param[in] pRightsRequired Any rights additionally required for the operation, + * will be requested if pRightsRequested is not specified. + * If specified, all rights in this mask must be granted for the call to succeed. + * @param[in] pAllocParams per-class allocation parameters passed into Alloc, + * NULL if this is not being called from the Alloc path. + * + * @return NV_OK if the access right should be granted + * @return NV_ERR_INSUFFICIENT_PERMISSIONS if access rights were + * explicitly requested, and the function failed to grant all of the + * requested access rights + */ +NV_STATUS rsAccessGrantRights(RsResourceRef *pResourceRef, CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, const RS_ACCESS_MASK *pRightsRequested, + const RS_ACCESS_MASK *pRightsRequired, void *pAllocParams); + +#ifdef __cplusplus +} +#endif + +#endif /* RS_ACCESS_MAP_H */ diff --git a/src/nvidia/inc/libraries/resserv/rs_access_rights.h b/src/nvidia/inc/libraries/resserv/rs_access_rights.h new file mode 100644 index 000000000..9ff6397e0 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_access_rights.h @@ -0,0 +1,167 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef RS_ACCESS_RIGHTS_H +#define RS_ACCESS_RIGHTS_H + +#include "nvstatus.h" +#include "nvtypes.h" +#include "nvmisc.h" + +// Part of this header in userspace, at sdk/nvidia/inc/rs_access.h +#include "rs_access.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/****************************************************************************/ +/* Access right flags */ +/****************************************************************************/ + +// +// The meaning of each access right flag is documented in +// drivers/resman/docs/rmapi/resource_server/rm_capabilities.adoc +// + +#define RS_ACCESS_FLAG_NONE 0U +#define RS_ACCESS_FLAG_ALLOW_KERNEL_PRIVILEGED NVBIT(1) +#define RS_ACCESS_FLAG_ALLOW_PRIVILEGED NVBIT(2) +#define RS_ACCESS_FLAG_UNCACHED_CHECK NVBIT(3) +#define RS_ACCESS_FLAG_ALLOW_OWNER NVBIT(4) + + +/****************************************************************************/ +/* Access right metadata */ +/****************************************************************************/ + +/*! + * @brief Metadata about each access right + * + * The ith entry in this array represents access right i. + */ +extern const RS_ACCESS_INFO g_rsAccessMetadata[RS_ACCESS_COUNT]; + + +/****************************************************************************/ +/* Access right macros */ +/****************************************************************************/ + +/*! + * @brief Initializer for an access mask. Avoid use if possible. + * + * To initialize an access mask, if possible, first zero-initialize it then + * add specific access rights at runtime. Zero-initialization can be performed + * with the RS_ACCESS_MASK_EMPTY static initializer, the RS_ACCESS_MASK_CLEAR() macro, + * or a memset. + * + * Only use this when a static initializer is TRULY needed, and when the code is + * generated by a script, not hardcoded. For instance, this is useful when + * statically initializing control call table entries. + * + * The ith argument will directly initialize the ith limb. An access right A + * should be placed in the limb SDK_RS_ACCESS_LIMB_INDEX(A). Each limb should be a + * mask of flags, where each flag is in the form SDK_RS_ACCESS_OFFSET_MASK(A), or 0 + * to indicate no flags. + * + * For example, suppose we have access righs A, B, and C, where + * + * SDK_RS_ACCESS_LIMB_INDEX(A) == 0 + * SDK_RS_ACCESS_LIMB_INDEX(B) == 2 + * SDK_RS_ACCESS_LIMB_INDEX(C) == 2 + * + * In this case, the appropriate way to initialize a mask containing all + * three access rights is: + * + * RS_ACCESS_MASK mask = RS_ACCESS_MASK_INITIALIZER + * ( + * SDK_RS_ACCESS_OFFSET_MASK(A), + * 0, + * SDK_RS_ACCESS_OFFSET_MASK(B) | SDK_RS_ACCESS_OFFSET_MASK(C) + * ); + */ +#define RS_ACCESS_MASK_INITIALIZER(...) { { __VA_ARGS__ } } + +/*! + * @brief Empty initializer for an access mask. + * + * An example of usage is as follows: + * + * RS_ACCESS_MASK mask = RS_ACCESS_MASK_EMPTY; + */ +#define RS_ACCESS_MASK_EMPTY RS_ACCESS_MASK_INITIALIZER(0) + + +/****************************************************************************/ +/* Access right functions */ +/****************************************************************************/ + +/*! + * @brief Checks if one access rights mask is a subset of another + * + * @param[in] pRightsPresent The access rights that are held by some actor + * @param[in] pRightsRequired The access rights that must be a subset of + * the rights in pRightsPresent + * + * @return NV_TRUE if each of the access rights in pRightsPresent is also + * present in pRightsRequired + * @return NV_FALSE otherwise + */ +NvBool rsAccessMaskIsSubset(const RS_ACCESS_MASK *pRightsPresent, + const RS_ACCESS_MASK *pRightsRequired); + +/*! + * @brief Checks if an access right mask is empty + * + * @param[in] pAccessMask The mask to check for emptiness + * + * @return NV_TRUE if the mask contains no access rights + * @return NV_FALSE otherwise + */ +NvBool rsAccessMaskIsEmpty(const RS_ACCESS_MASK *pAccessMask); + + +/*! + * @brief Converts an array of access rights into a mask + * + * This function is useful for processing a statically-initialized array of + * access rights, since it is not always desirable to directly statically + * initialize an access mask. One example of this use is the definitions used + * in resource_list.h. + * + * @param[out] pAccessMask The newly initialized access mask + * @param[in] pRightsArray An array of access right values + * @param[in] length The number of elements in pRightsList + * + * @return Void + */ +void rsAccessMaskFromArray(RS_ACCESS_MASK *pAccessMask, + const RsAccessRight *pRightsArray, + NvLength length); + + +#ifdef __cplusplus +} +#endif + +#endif /* RS_ACCESS_RIGHTS_H */ diff --git a/src/nvidia/inc/libraries/resserv/rs_client.h b/src/nvidia/inc/libraries/resserv/rs_client.h new file mode 100644 index 000000000..f2a18d868 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_client.h @@ -0,0 +1,509 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_client_nvoc.h" + +#ifndef _RS_CLIENT_H_ +#define _RS_CLIENT_H_ + + +#include "resserv/resserv.h" +#include "nvport/nvport.h" +#include "resserv/rs_resource.h" +#include "containers/list.h" +#include "utils/nvrange.h" + +#define RS_UNIQUE_HANDLE_BASE (0xcaf00000) +#define RS_UNIQUE_HANDLE_RANGE (0x00080000) + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsClient + * @addtogroup RsClient + * @{*/ + +typedef enum { + CLIENT_TYPE_USER, + CLIENT_TYPE_KERNEL +} CLIENT_TYPE; + +typedef struct AccessBackRef +{ + NvHandle hClient; + NvHandle hResource; +} AccessBackRef; + +MAKE_LIST(AccessBackRefList, AccessBackRef); + +/** + * Information about a client + */ +NVOC_PREFIX(client) class RsClient : Object +{ +public: + /** + * The handle of this client + */ + NvHandle hClient; + + /** + * Kernel or user client + */ + CLIENT_TYPE type; + + /** + * Client is in a state where it can allocate new objects + */ + NvBool bActive; + + /** + * True if client tripped the resource count warning threshold + */ + NvBool bResourceWarning; + + /** + * Maps resource handle -> RsResourceRef + */ + RsRefMap resourceMap; + + /** + * Access right back reference list of pairs + * + * A map of all hResource's (with hClient to scope the handle) that have + * shared access rights with us. + */ + AccessBackRefList accessBackRefList; + + /** + * The first generated handle in the generated resource handle space + * + * It is an error for the handleRangeStart to be 0 because that is a + * reserved handle. + * + * The first generated handle is not necessarily the lowest possible handle + * because the handle generator may overflow. The lowest possible resource + * handle is 0x1. + * + * Generated handles will be of the form: handleRangeStart + [0, handleRangeSize) + */ + NvHandle handleRangeStart; + + /** + * The size of the generated resource handle space. + * + * It is an error for the handleRangeSize to be 0. + * + * Generated handles will be of the form: handleRangeStart + [0, handleRangeSize) + */ + NvHandle handleRangeSize; + + /** + * The handles in the restricted resource handle space. + */ + NV_RANGE handleRestrictRange; + + /** + * Index used to generate the next handle in the resource handle space + */ + NvHandle handleGenIdx; + + /** + * Ordered list of resources that are to be freed + */ + RsRefFreeList pendingFreeList; + + /** + * Information about recursive resource free calls is stored here + */ + RS_FREE_STACK *pFreeStack; + + /** + * Construct a client instance + * @param[in] pClient This client + * @param[in] pAllocator NvPort memory allocation interface for client memory allocations + * @param[in] pParams The allocation params + */ + NV_STATUS clientConstruct(RsClient *pClient, PORT_MEM_ALLOCATOR *pAllocator, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + + /** + * Destruct a client instance and free all allocated resources + * @param[in] pClient This client + */ + void clientDestruct(RsClient *pClient); + + /** + * Get a resource pointer from a resource reference. No resource locks are taken. + * @param[in] pClient This client + * @param[in] pResourceRef The reference to the resource + * @param[out] ppResource Pointer to the resource + */ + NV_STATUS clientGetResourceByRef(RsClient *pClient, RsResourceRef *pResourceRef, RsResource **ppResource); + + /** + * Get a resource pointer from a resource handle. No resource locks are taken. + * @param[in] pClient This client + * @param[in] hResource Resource handle + * @param[in] internalClassId Expected internal class ID of object. Must match. + * @param[out] ppResource Pointer to the resource + */ + NV_STATUS clientGetResource(RsClient *pClient, NvHandle hResource, NvU32 internalClassId, RsResource **ppResource); + + /** + * Get the reference to a resource + * @param[in] pClient This client + * @param[in] hResource The resource to lookup + * @param[out] ppResourceRef The reference to the resource + */ + NV_STATUS clientGetResourceRef(RsClient *pClient, NvHandle hResource, RsResourceRef **ppResourceRef); + + /** + * Get the reference to a resource, but only if the passed in access rights are + * possessed by the invoking client. + * + * @param[in] pClient This client + * @param[in] hResource The resource to lookup + * @param[in] pRightsRequired The rights required for success + * @param[out] ppResourceRef The reference to the resource + */ + NV_STATUS clientGetResourceRefWithAccess(RsClient *pClient, NvHandle hResource, const RS_ACCESS_MASK *pRightsRequired, RsResourceRef **ppResourceRef); + + /** + * Get the reference to a resource (with a type check) + * @param[in] pClient This client + * @param[in] hResource The resource to lookup + * @param[in] internalClassId The internal resource class id + * @param[out] ppResourceRef The reference to the resource + */ + NV_STATUS clientGetResourceRefByType(RsClient *pClient, NvHandle hResource, NvU32 internalClassId, RsResourceRef **ppResourceRef); + + /** + * Validate that current process is allowed to use this client + * @param[in] pClient This client + * @param[in] pSecInfo Security info of the current API call + */ + virtual NV_STATUS clientValidate(RsClient *pClient, const API_SECURITY_INFO * pSecInfo); + + /** + * Allocate a resource in RM for this client + * @param[in] pClient This client + * @param[in] pServer + * @param[inout] pParams Resource allocation parameters + */ + NV_STATUS clientAllocResource(RsClient *pClient, RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + + /** + * Duplicate a resource reference into this client + * @param[in] pClient This client + * @param[in] pServer The resource server instance + * @param[inout] pParams Resource sharing parameters + */ + NV_STATUS clientCopyResource(RsClient *pClient, RsServer *pServer, RS_RES_DUP_PARAMS_INTERNAL *pParams); + + /** + * Free a resource for this client and updates resource reference book-keeping. + * If the resource has a non-zero reference count, only book-keeping will be updated. + * Resources should never be freed in control calls. + * + * @param[in] pClient This client + * @param[in] pServer + * @param[in] pParams Resource destruction parameters + */ + virtual NV_STATUS clientFreeResource(RsClient *pClient, RsServer *pServer, RS_RES_FREE_PARAMS_INTERNAL *pParams); + + /** + * Remove a resource reference to the client's resource hashmap + * @param[in] pClient This client + * @param[in] pResourceRef The reference to free + */ + virtual NV_STATUS clientDestructResourceRef(RsClient *pClient, RsServer *pServer, RsResourceRef *pResourceRef); + + /** + * Unmap a mapping that belongs to a resource reference in this client. + * @param[in] pClient This client + * @param[in] pResourceRef The reference that owns the mapping + * @param[inout] ppCpuMapping The mapping to unmap + */ + virtual NV_STATUS clientUnmapMemory(RsClient *pClient, RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, RsCpuMapping **ppCpuMapping, + API_SECURITY_INFO *pSecInfo); + /** + * Create an inter-mapping between two resources owned by this client + * Resserv only implements a stub, users should override this to fill their own MapTo params struct + * + * @param[in] pClient This client + * @param[in] pMapperRef The resource that can be used to create the mapping + * @param[in] pMappableRef The resource that can be mapped + * @param[in] pParams parameters describing the unmapping + */ + virtual NV_STATUS clientInterMap(RsClient *pClient, RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RS_INTER_MAP_PARAMS *pParams); + + /** + * Unmap an inter-mapping between two resources owned by this client + * Resserv only implements a stub, users should override this to fill their own UnmapFrom params struct + * + * @param[in] pClient This client + * @param[in] pMapperRef The reference that was was used to create the mapping + * @param[in] pParams parameters describing the unmapping + */ + virtual void clientInterUnmap(RsClient *pClient, RsResourceRef *pMapperRef, RS_INTER_UNMAP_PARAMS *pParams); + + /** + * Generate an unused handle for a resource. The handle will be generated in the white-listed range that was + * specified when the client was allocated. + * + * The handle generator will wrap-around when the number of handles generated is greater than handleRangeSize, and + * the generator will start at handle 0x1 if it overflows (0x0 is a reserved handle). + * + * The handle generator can generate up to 2^32-2 unique handles if handleRangeStart + handleRangeSize overflows + * (because 0x0 is a reserved handle). Otherwise, the handle generator can generate up to 2^32-1 unique handles. + * + * @param[in] pClient This client + * @param[out] pHandle The generated handle + * + */ + NV_STATUS clientGenResourceHandle(RsClient *pClient, NvHandle *pHandle); + + /** + * Validate that a given resource handle is well-formed and does not already + * exist under a given client. + * + * @param[in] pClient + * @param[in] hResource + * @param[in] bRestrict If true, fail validation for handles in the client's restricted range + */ + virtual NV_STATUS clientValidateNewResourceHandle(RsClient *pClient, NvHandle hResource, NvBool bRestrict); + + /** + * Wrapper that generates a resource handle if a handle of 0 is provided, or otherwise + * validates a handle that was provided. + * + * @param[in] pClient + * @param[inout] phResource + */ + NV_STATUS clientAssignResourceHandle(RsClient *pClient, NvHandle *phResource); + + /** + * Recursively generate a client's list of resources to free + * @param[in] pClient + * @param[in] pTarget The resource ref currently being processed + * @param[in] pReference The resource ref that this function was initially called on + * @param[in] bMove If NV_TRUE: Add/move the target to the front of the list + * If NV_FALSE: Add the target to the front of the list if it isn't already in the list + */ + NV_STATUS clientUpdatePendingFreeList(RsClient *pClient, RsResourceRef *pTarget, + RsResourceRef *pReference, NvBool bMove); + + /** + * Allow derived client classes to modify the generated list of resources to free + * before they are freed. + * @param[in] pClient + * @param[out] ppFirstLowPriRef A pointer to the first reference that is low priority + */ + virtual NV_STATUS clientPostProcessPendingFreeList(RsClient *pClient, RsResourceRef **ppFirstLowPriRef); + + /** + * Add a back reference to a client/resource pair that shared access with our client + * so we can remove that access entry on client destruction. + * @param[in] pClient This client + * @param[in] pResourceRef Resource reference that decided to share access with us + */ + NV_STATUS clientAddAccessBackRef(RsClient* pClient, RsResourceRef* pResourceRef); + + /** + * Remove all access map entries for all back references we stored so other clients + * reusing the same client handle won't get unauthorized access. Intended to be called + * during client destruction. + * @param[in] pClient This client + * @param[in] pServer Resource Server instance + */ + void clientFreeAccessBackRefs(RsClient *pClient, RsServer *pServer); + + /* + * Set the start handle and range for this client's handle generator. + * + * @note Supplying a range and size of 0 will set the generator to the default start handle and range + * @note The handle generator can only be set before any handle has been generated + * + * @param[in] pClient + * @param[in] handleRangeStart + * @param[in] handleRangeSize + */ + NV_STATUS clientSetHandleGenerator(RsClient *pClient, NvHandle handleRangeStart, NvHandle handleRangeSize); + + /** + * Verify whether a client is able to share a resource under a certain share policy + * + * @param[in] pClient Client attempting to share the resource + * @param[in] pReousrceRef The resource being shared + * @param[in] pSharePolicy The policy under which the resource is to be shared + * @param[in] pCallContext The context of the call intending to perform the share + */ + NV_STATUS clientCanShareResource(RsClient *pClient, RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, CALL_CONTEXT *pCallContext); + + /** + * Share access to a resource with other clients under the specified share policy. + * + * @param[in] pClient This client + * @param[in] pResourceRef Resource reference which is sharing access + * @param[in] pSharePolicy The policy under which the resource is sharing access + */ + virtual NV_STATUS clientShareResource(RsClient *pClient, RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext); + + /** + * Share access to a resource with other clients under the specified share policy. + * + * @param[in] pClient This client + * @param[in] pResourceRef Resource reference which is sharing access + * @param[in] pSharePolicy The policy under which the resource is sharing access + */ + NV_STATUS clientShareResourceTargetClient(RsClient *pClient, RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, CALL_CONTEXT *pCallContext); + /* + * Set the start handle and range for this client's restricted handle + * range. This range of handles cannot be explicitly requested. Any + * restricted handles that are in the client's resource handle generator + * range can still be generated. + * + * @note Supplying a range and size of 0 will disable the restricted range + * @note The handle generator can only be set before any handle has been generated + * + * @param[in] pClient + * @param[in] handleRangeStart + * @param[in] handleRangeSize + */ + NV_STATUS clientSetRestrictedRange(RsClient *pClient, NvHandle handleRangeStart, NvU32 handleRangeSize); +}; + +/** + * Get an iterator to the elements in the client's resource map + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] type RS_ITERATE_CHILDREN, RS_ITERATE_DESCENDANTS, RS_ITERATE_CACHED, RS_ITERATE_DEPENDANTS + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * + * @note If type=RS_ITERATE_CHILDREN, pScopeRef will restrict iteration to children of the scope ref + * @note If type=RS_ITERATE_DESCENDANTS, pScopeRef will restrict iteration to descendants of the scope ref + * @note If type=RS_ITERATE_CACHED, pScopeRef will restrict iteration to references cached by the scope ref + */ +RS_ITERATOR clientRefIter(RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, RS_ITER_TYPE type, NvBool bExactMatch); + +/** + * Get the next iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefIterNext(RsClient *pClient, RS_ITERATOR *pIt); + +/** + * Get an iterator to the elements in the client's resource map. + * + * This iterator will visit all descendants in pre-order according to the parent-child + * resource hierarchy. + * + * @param[in] pClient + * @param[in] pScopeRef Restrict the iteration based on this reference [optional] + * @param[in] internalClassId Only iterate over resources with this class id [optional] + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + */ +RS_ORDERED_ITERATOR clientRefOrderedIter(RsClient *pClient, RsResourceRef *pScopeRef, NvU32 internalClassId, NvBool bExactMatch); + +/** + * Get the next ordered iterator to the elements in the client's resource map + * @param[in] pClient + * @param[inout] pIt The iterator + */ +NvBool clientRefOrderedIterNext(RsClient *pClient, RS_ORDERED_ITERATOR *pIt); + + +/** + * RsResource interface to a RsClient + * + * This allows clients to be interfaced with as-if they were resources (e.g., + * to perform a control call on a client). + * + * An RsClientResource is automatically allocated under a client as a top-level + * object when that client is allocated and cannot be explicitly freed. Only + * one RsClientResource is permitted per-client. + * + * Any resource allocated under a client will be a descendant of the client + * proxy resource. + * + */ +NVOC_PREFIX(clientres) class RsClientResource : RsResource +{ +public: + NV_STATUS clientresConstruct(RsClientResource* pClientRes, CALL_CONTEXT *pCallContext, RS_RES_ALLOC_PARAMS_INTERNAL *pParams) + : RsResource(pCallContext, pParams); + void clientresDestruct(RsClientResource* pClientRes); + +// private: + RsClient* pClient; +}; + +/** + * Client destruction parameters + */ +struct RS_CLIENT_FREE_PARAMS_INTERNAL +{ + NvHandle hDomain; ///< [in] The parent domain + NvHandle hClient; ///< [in] The client handle + NvBool bHiPriOnly; ///< [in] Only free high priority resources + NvU32 state; ///< [in] User-defined state + + RS_RES_FREE_PARAMS_INTERNAL *pResFreeParams; ///< [in] Necessary for locking state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * Return an iterator to a resource reference multi-map + * @param[in] pIndex The multi-map to iterate + * @param[in] index Return only the references belonging to this index + */ +RsIndexIter indexRefIter(RsIndex *pIndex, NvU32 index); + +/** + * Return an iterator to all resource references in a multi-map + * @param[in] pIndex The multi-map to iterate + */ +RsIndexIter indexRefIterAll(RsIndex *pIndex); + +/** + * Get the next iterator in a resource reference multi-map + * @param[in] pIt Iterator + */ +NvBool indexRefIterNext(RsIndexIter *pIt); + +/* @} */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/resserv/rs_domain.h b/src/nvidia/inc/libraries/resserv/rs_domain.h new file mode 100644 index 000000000..856568c44 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_domain.h @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RS_DOMAIN_H_ +#define _RS_DOMAIN_H_ + +#include "resserv/resserv.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsDomain + * @addtogroup RsDomain + * @{*/ + +/** + * @brief Domains are being re-worked + */ +struct RsDomain +{ + NvU32 dummy; +}; + +/** + * Construct a domain instance + * @param[in] pDomain This domain + * @param[in] pAllocator + * @param[in] hDomain The handle for this domain + * @param[in] hParentDomain The handle for the parent domain + * @param[in] pAccessControl The privileges of the domain + */ +NV_STATUS +domainConstruct +( + RsDomain *pDomain, + PORT_MEM_ALLOCATOR *pAllocator, + NvHandle hDomain, + NvHandle hParentDomain, + ACCESS_CONTROL *pAccessControl +); + +/** + * Destruct a domain instance + * @param[in] pDomain The domain to destruct + */ +NV_STATUS +domainDestruct +( + RsDomain *pDomain +); + +/* @} */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/resserv/rs_resource.h b/src/nvidia/inc/libraries/resserv/rs_resource.h new file mode 100644 index 000000000..8df3313f9 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_resource.h @@ -0,0 +1,829 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_resource_nvoc.h" + +#ifndef _RS_RESOURCE_H_ +#define _RS_RESOURCE_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "nvoc/object.h" +#include "resserv/rs_access_map.h" + +#ifdef __cplusplus +extern "C" { +#endif + +class RsSession; + +/** + * @defgroup RsResource + * @addtogroup RsResource + * @{*/ + +#define ALLOC_STATE_INTERNAL_CLIENT_HANDLE NVBIT(5) + +/* + * Locking operations for lock-metering + */ +#define RS_LOCK_TRACE_INVALID 1 +#define RS_LOCK_TRACE_ACQUIRE 1 +#define RS_LOCK_TRACE_RELEASE 2 +#define RS_LOCK_TRACE_ALLOC 3 +#define RS_LOCK_TRACE_FREE 4 +#define RS_LOCK_TRACE_CTRL 5 +#define RS_LOCK_TRACE_MAP 6 +#define RS_LOCK_TRACE_UNMAP 7 + +/** + * Context information for top-level, resource-level, and client-level locking + * operations + */ +struct RS_LOCK_INFO +{ + RsClient *pClient; ///< Pointer to client that was locked (if any) + RsClient *pSecondClient; ///< Pointer to second client, for dual-client locking + RsResourceRef *pContextRef; ///< User-defined reference + RsSession *pSession; ///< Session object to be locked, if any + NvU32 flags; ///< RS_LOCK_FLAGS_* + NvU32 state; ///< RS_LOCK_STATE_* + NvU32 gpuMask; + NvU8 traceOp; ///< RS_LOCK_TRACE_* operation for lock-metering + NvU32 traceClassId; ///< Class of initial resource that was locked for lock metering +}; + +struct RS_RES_ALLOC_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hParent; ///< [in] The handle of the resource's parent. This may be a client or another resource. + NvHandle hResource; ///< [inout] Server will assign a handle if this is 0, or else try the value provided + NvU32 externalClassId; ///< [in] External class ID of resource + NvHandle hDomain; ///< UNUSED + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + RsClient *pClient; ///< [out] Cached client + RsResourceRef *pResourceRef; ///< [out] Cached resource reference + NvU32 allocFlags; ///< [in] Allocation flags + NvU32 allocState; ///< [inout] Allocation state + API_SECURITY_INFO *pSecInfo; + + void *pAllocParams; ///< [in] Copied-in allocation parameters + + // ... Dupe alloc + RsClient *pSrcClient; ///< The client that is sharing the resource + RsResourceRef *pSrcRef; ///< Reference to the resource that will be shared + + RS_ACCESS_MASK *pRightsRequested; ///< [in] Access rights requested on the new resource + // Buffer for storing contents of user mask. Do not use directly, use pRightsRequested instead. + RS_ACCESS_MASK rightsRequestedCopy; + + RS_ACCESS_MASK *pRightsRequired; ///< [in] Access rights required to alloc this object type +}; + +struct RS_RES_DUP_PARAMS_INTERNAL +{ + NvHandle hClientSrc; ///< [in] The handle of the source resource's client + NvHandle hResourceSrc; ///< [in] The handle of the source resource. + NvHandle hClientDst; ///< [in] The handle of the destination resource's client (may be different from source client) + NvHandle hParentDst; ///< [in] The handle of the destination resource's parent. + NvHandle hResourceDst; ///< [inout] The handle of the destination resource. Generated if 0. + void *pShareParams; ///< [in] Copied-in sharing parameters + NvU32 flags; ///< [in] Flags to denote special cases ( Bug: 2859347 to track removal) + // Internal use only + RsClient *pSrcClient; + RsResourceRef *pSrcRef; + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +struct RS_RES_SHARE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the owner's client + NvHandle hResource; ///< [in] The handle of the resource. + RS_SHARE_POLICY *pSharePolicy; ///< [in] The policy to share with + + // Internal use only + API_SECURITY_INFO *pSecInfo; ///< [in] Security info + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state +}; + +#define RS_IS_COPY_CTOR(pParams) ((pParams)->pSrcRef != NULL) + +struct RS_RES_FREE_PARAMS_INTERNAL +{ + NvHandle hClient; ///< [in] The handle of the resource's client + NvHandle hResource; ///< [in] The handle of the resource + NvBool bInvalidateOnly; ///< [in] Free the resource, but don't release its handle + NvHandle hDomain; ///< UNUSED + + // Internal use only + NvBool bHiPriOnly; ///< [in] Only free if this is a high priority resources + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + NvU32 freeFlags; ///< [in] Flags for the free operation + NvU32 freeState; ///< [inout] Free state + RsResourceRef *pResourceRef; ///< [inout] Cached RsResourceRef + NV_STATUS status; ///< [out] Status of free operation + API_SECURITY_INFO *pSecInfo; ///< [in] Security info +}; + +struct NVOC_EXPORTED_METHOD_DEF; +class OBJGPU; +class OBJGPUGRP; + +// +// RS_RES_CONTROL_PARAMS +// +// This structure encapsulates data sent to the cmd-specific rmctrl +// handlers. Along with the arguments supplied by the requesting +// client (hClient, hObject, cmd, pParams, paramSize). +// +struct RS_RES_CONTROL_PARAMS_INTERNAL +{ + NvHandle hClient; // client-specified NV01_ROOT object handle + NvHandle hObject; // client-specified object handle + NvU32 cmd; // client-specified command # + NvU32 flags; // flags related to control call execution + void *pParams; // client-specified params (in kernel space) + NvU32 paramsSize; // client-specified size of pParams in bytes + + NvHandle hParent; // handle of hObject parent + OBJGPU *pGpu; // ptr to OBJGPU struct if applicable + OBJGPUGRP *pGpuGrp; // ptr to OBJGPUGRP struct if applicable + RsResourceRef *pResourceRef; // ptr to RsResourceRef if object is managed by + // Resource Server + API_SECURITY_INFO secInfo; // information on privilege level and pointer location (user/kernel) + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + RS_CONTROL_COOKIE *pCookie; + NvBool bInternal; // True if control call was not issued from an external client + NvBool bDeferredApi; // Indicates ctrl is being dispatched via deferred API + + struct RS_RES_CONTROL_PARAMS_INTERNAL *pLegacyParams; // RS-TODO removeme +}; + +struct RS_RES_DTOR_PARAMS +{ + CALL_CONTEXT *pFreeContext; + RS_RES_FREE_PARAMS_INTERNAL *pFreeParams; +}; + +/** + * Base class for all resources. Mostly a pure virtual interface which + * should be overridden to implement resource specific behavior. + */ +NVOC_PREFIX(res) class RsResource : Object +{ +public: +// private: + + /** + * Back-reference to the RsResourceRef that owns this object + */ + RsResourceRef *pResourceRef; + + /** + * Params for dtor + */ + RS_RES_DTOR_PARAMS dtorParams; + + /** + * Flag that indicates whether the RsResource was constructed. If params to + * resConstruct are null the Resource ctor and dtor will be skipped. This is + * only added for migration where the entire class hierarchy can't be + * converted at once. + * + * RS-TODO: Remove once migrations are finished (added initially for + * DmaObject) + */ + NvBool bConstructed; + +public: + + /** + * Resource initializer + * @param[in] pResource Resource object to init + * @param[in] pCallContext + * @param[in] params Resource create parameters + */ + NV_STATUS resConstruct(RsResource *pResource, CALL_CONTEXT *pCallContext, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + + /** + * Returns TRUE if the resource can be copied + */ + virtual NvBool resCanCopy(RsResource *pResource); + + /** + * Resource destructor + * @param[in] pResource Resource object to destruct + */ + void resDestruct(RsResource *pResource); + + /** + * Resource destructor prologue (occurs before mappings are torn-down) + * @param[in] pResource Resource object to destruct + */ + virtual void resPreDestruct(RsResource *pResource); + + /** + * Resource dtors take no parameters, so set them here. + * @param[in] pResource + * @param[in] pCallContext + * @param[in] params Resource destroy parameters + */ + NV_STATUS resSetFreeParams(RsResource *pResource, CALL_CONTEXT *pCallContext, RS_RES_FREE_PARAMS_INTERNAL *pParams); + + /** + * Resource dtors take no parameters, so get them here. + * @param[in] pResource + * @param[out] ppCallContext + * @param[out] ppParams Resource destroy parameters + */ + NV_STATUS resGetFreeParams(RsResource *pResource, CALL_CONTEXT **ppCallContext, RS_RES_FREE_PARAMS_INTERNAL **ppParams); + + /** + * Lookup a control call entry from a NVOC export table + * + * @param[in] pResource + * @param[in] pParams + * @param[out] ppEntry + */ + virtual NV_STATUS resControlLookup(RsResource *pResource, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + const struct NVOC_EXPORTED_METHOD_DEF **ppEntry); + + /** + * Dispatch resource control call + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual NV_STATUS resControl(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Early filter for control calls we don't want to service on a particular platform + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual NV_STATUS resControlFilter(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Operations performed right before the control call is executed. Default stubbed. + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual NV_STATUS resControl_Prologue(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Operations performed right after the control call is executed. No return value. (void) + * + * @param[in] pResource + * @param[in] pCallContext + * @param[in] pParams + */ + virtual void resControl_Epilogue(RsResource *pResource, CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams); + + /** + * Creates a mapping of the underlying resource in the physical address space of the requested process. + * + * The difference between serverResMap and resMap is that resMap provides a locked physical address + * and serverResMap creates a virtual mapping to the physical address. For virtualization, the + * tandem resource servers should be able to map a host physical address in a guest user space + * VA without any resource-specific VA mapping code. + * + * Not all resources support mapping. + * + * @param[in] pResource Resource to map + * @param[in] pCallContext + * @param[inout] pCpuMapping + */ + virtual NV_STATUS resMap(RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping); + + /** + * Release a virtual address mapping + * @param[in] pResource Resource to map + * @param[in] pCallContext + * @param[in] pCpuMapping + */ + virtual NV_STATUS resUnmap(RsResource *pResource, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping); + + /** + * Maps to this resource from another resource + * Not all resources can be mapped to, in such a case returns NV_ERR_INVALID_OBJECT_HANDLE + * + * @param[in] pResource + * @param[inout] pParams + */ + virtual NV_STATUS resMapTo(RsResource *pResource, RS_RES_MAP_TO_PARAMS *pParams); + + /** + * Unmaps a resource mapped to this resource + * Not all resources can be unmapped, in such a case returns NV_ERR_INVALID_OBJECT_HANDLE + * + * @param[in] pResource + * @param[in] pParams + */ + virtual NV_STATUS resUnmapFrom(RsResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams); + + /** + * Gets a refcount for any underlying shared resource + * @returns refcount + */ + virtual NvU32 resGetRefCount(RsResource *pResource); + + /** + * Decides whether the invoking client should be granted an access right on this resource. + * + * The purpose of providing this function is to provide subclassed resources the ability + * to set custom policies for granting access rights. These policies can be implemented + * based on the ambient privilege of the caller, such as the PID. + * + * @param[in] pResource The resource for which the access right will be granted + * @param[in] pInvokingClient The client requesting the access right + * @param[in] pAllocParams The alloc params struct passed into the alloc call, + * NULL if called from outside the Alloc path + * @param[in] accessRight The access right to be granted + * @returns NV_TRUE if the access right should be granted, and NV_FALSE otherwise + */ + virtual NvBool resAccessCallback(RsResource *pResource, RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight); + + /** + * Decides whether rights can be shared with a client under a certain policy. + * + * The purpose of this function is to provide subclasses the ability to set custom definitions + * for how certain policies will share. Certain share types can then be created to work based + * on components not stored directly in resserv, such as PID. + * + * @param[in] pResource The resource being shared + * @param[in] pInvokingClient The client being shared with + * @param[in] pParentRef dstParent if calling from DupObject, NULL otherwise + * @param[in] pSharePolicy The policy under which to share + * @returns NV_TRUE if the share policy applies and rights should be shared, NV_FALSE otherwise + */ + virtual NvBool resShareCallback(RsResource *pResource, RsClient *pInvokingClient, RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy); + + /** + * Adds dependants that aren't in childRefMap or depRefMap to the pending free list. + * + * Due to RAM constraints, some classes can add more dependants that aren't + * represented in childRefMap or depRefMap. They can override this function + * to put them in the pending free list while we are updating it. + * + * @param[in] pClient + * @param[in] pResource The RsResource with potential additional dependants + * @param[in] pReference The pReference to pass in to + * clientUpdatePendingFreeList() + */ + virtual void resAddAdditionalDependants(RsClient *pClient, RsResource *pResource, RsResourceRef *pReference); +}; + +/* @} */ + +class OBJGPU; + +/** + * @defgroup RsCpuMapping + * @addtogroup RsCpuMapping + * @{*/ +struct RsCpuMapping +{ + NvU64 offset; + NvU64 length; + NvU32 flags; + NvP64 pLinearAddress; + RsResourceRef *pContextRef; ///< Context resource that may be needed for the mapping + void *pContext; ///< Additional context data for the mapping + NvU32 processId; + + RS_CPU_MAPPING_PRIVATE *pPrivate; ///< Opaque struct allocated and freed by resserv on behalf of the user +}; +MAKE_LIST(RsCpuMappingList, RsCpuMapping); + +/** + * CPU mapping parameters + */ +struct RS_CPU_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU64 offset; ///< [in] Offset into the resource + NvU64 length; ///< [in] Size of the region to map + NvP64 *ppCpuVirtAddr; + NvU32 flags; ///< [in] Resource-specific flags + + // Passed from RM into CpuMapping + NvU32 protect; ///< [in] Protection flags + NvBool bKernel; + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU unmapping params for resource server tests + */ +struct RS_CPU_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress; ///< [in] Address of mapped memory + NvU32 flags; ///< [in] Resource-specific flags + NvU32 processId; + + /// [in] hContext Handle of resource that provides a context for the mapping (e.g., subdevice for channel map) + NvHandle hContext; + + // RM-only + void *pProcessHandle; + + NvBool (*fnFilter)(RsCpuMapping*); ///< [in] Mapping-filter function + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info +}; + +/** + * CPU mapping back-reference + */ +struct RS_CPU_MAPPING_BACK_REF +{ + RsCpuMapping *pCpuMapping; ///< Mapping linked to this backref + RsResourceRef *pBackRef; ///< Resource reference with mapping +}; +MAKE_LIST(RsCpuMappingBackRefList, RS_CPU_MAPPING_BACK_REF); +/* @} */ + +/** + * @defgroup RsInterMapping + * @addtogroup RsInterMapping + * @{*/ +struct RS_INTER_MAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hMappable; + NvHandle hDevice; + NvU64 offset; + NvU64 length; + NvU32 flags; + NvU64 dmaOffset; ///< [inout] RS-TODO rename this + void *pMemDesc; ///< [out] + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_MAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +struct RS_INTER_UNMAP_PARAMS +{ + NvHandle hClient; + NvHandle hMapper; + NvHandle hMappable; + NvHandle hDevice; + NvU32 flags; + NvU64 dmaOffset; ///< [in] RS-TODO rename this + void *pMemDesc; ///< MEMORY_DESCRIPTOR * + + // Internal use only + RS_LOCK_INFO *pLockInfo; ///< [inout] Locking flags and state + API_SECURITY_INFO *pSecInfo; ///< [in] Security Info + + RS_INTER_UNMAP_PRIVATE *pPrivate; ///< Opaque struct controlled by caller +}; + +/** + * Inter-mapping information + * Used to keep track of inter-mappings and unmap them on free + */ +struct RsInterMapping +{ + // RsResourceRef *pMapperRef ///< (Implied) the resource that created and owns this mapping (this resource) + RsResourceRef *pMappableRef; ///< The resource being mapped by the mapper (e.g. hMemory) + RsResourceRef *pContextRef; ///< A resource used to provide additional context for the mapping (e.g. hDevice) + NvU32 flags; ///< Flags passed when mapping, same flags also passed when unmapping + NvU64 dmaOffset; + void *pMemDesc; +}; +MAKE_LIST(RsInterMappingList, RsInterMapping); + +/** + * Inter-mapping back-reference + */ +struct RS_INTER_MAPPING_BACK_REF +{ + RsResourceRef *pMapperRef; ///< Resource reference with mapping + RsInterMapping *pMapping; ///< Pointer to the inter-mapping linked to this backref +}; +MAKE_LIST(RsInterMappingBackRefList, RS_INTER_MAPPING_BACK_REF); +/* @} */ + +typedef struct RS_RESOURCE_DESC RS_RESOURCE_DESC; +RS_RESOURCE_DESC *RsResInfoByExternalClassId(NvU32); +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *); + +/** + * A reference to a resource that has been allocated in RM. + */ +struct RsResourceRef +{ + RsClient *pClient; ///< Pointer to the client that owns the ref + RsResource *pResource; ///< Pointer to the actual resource + NvHandle hResource; ///< Resource handle + struct RsResourceRef *pParentRef; ///< Parent resource reference + RsIndex childRefMap; ///< Child reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + + /** + * Cached reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * The resource reference cache is a one-way association between this resource reference and + * any other resource reference. Resource server does not populate the cache so it is up to the + * resource implementation to manage it. clientRefIter can be used to iterate this cache. + */ + RsIndex cachedRefMap; + + /** + * Dependants reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * A map of all resources that strongly depend on this resource. + */ + RsIndex depRefMap; + + /** + * Dependants back-reference multi-map: { internalClassId -> { handle -> RsResourceRef } } + * + * AKA dependencies map + * + * A map of all resources that this resource strongly depends on. + */ + RsIndex depBackRefMap; + + /** + * Policy under which this resource can be shared with other clients + */ + RsShareList sharePolicyList; + NvBool bSharePolicyListModified; + + /** + * A mask of the access rights that the owner client has on this object. + */ + RS_ACCESS_MASK accessMask; + + const RS_RESOURCE_DESC *pResourceDesc; ///< Cached pointer to the resource descriptor + NvU32 internalClassId; ///< Internal resource class id + NvU32 externalClassId; ///< External resource class id + NvU32 depth; ///< The depth of this reference in the resource graph + NvBool bInvalidated; ///< Reference has been freed but not removed yet + + RsCpuMappingList cpuMappings; ///< List of CPU mappings to the resource from this resource reference + RsCpuMappingBackRefList backRefs; ///< List of references that have this reference as a mapping context + + RsInterMappingList interMappings; ///< List of inter-resource mappings created by this resource + RsInterMappingBackRefList interBackRefs; ///< List of inter-resource mappings this resource has been mapped into + + RsSession *pSession; ///< If set, this ref depends on a shared session + RsSession *pDependantSession; ///< If set, this ref is depended on by a shared session + + ListNode freeNode; ///< Links to the client's pendingFreeList +}; +MAKE_MAP(RsRefMap, RsResourceRef); +MAKE_INTRUSIVE_LIST(RsRefFreeList, RsResourceRef, freeNode); + + +// Iterator data structure to save state while walking through a list +struct RS_ITERATOR +{ + union + { + RsRefMapIter mapIt; ///< Map iterator for all resource references under a client + RsIndexIter idxIt; ///< Index iterator for child references of a resource reference + }; + + RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over + NvU8 type; ///< RS_ITERATE_* + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId +}; + +// Iterator data structure to save state while walking through a resource tree in pre-order +struct RS_ORDERED_ITERATOR +{ + NvS8 depth; ///< Depth of index stack; special value of -1 implies that the scope reference should be iterated over as well + RsIndexIter idxIt[RS_MAX_RESOURCE_DEPTH+1]; ///< Stack of index iterators for child references of a resource reference + + RsClient *pClient; + RsResourceRef *pScopeRef; ///< Reference to the resource that limits the scope of iteration + NvU32 internalClassId; + NvBool bExactMatch; ///< If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + + RsResourceRef *pResourceRef; ///< Resource ref that is being iterated over +}; + +/** + * Macro for looking up a reference from a resource + */ +#define RES_GET_REF(pResource) (staticCast((pResource), RsResource)->pResourceRef) + +/** + * Macro for looking up a resource handle from a resource + */ +#define RES_GET_HANDLE(pResource) (RES_GET_REF(pResource)->hResource) + +/** + * Macro for looking up a resource's external class from a resource + */ +#define RES_GET_EXT_CLASS_ID(pResource) (RES_GET_REF(pResource)->externalClassId) + +/** + * Macro for looking up a resource's parent handle from a resource + */ +#define RES_GET_PARENT_HANDLE(pResource) (RES_GET_REF(pResource)->pParentRef->hResource) + +/** + * Macro for looking up a client from a resource + */ +#define RES_GET_CLIENT(pResource) (RES_GET_REF(pResource)->pClient) + +/** + * Macro for looking up a client handle from a resource + */ +#define RES_GET_CLIENT_HANDLE(pResource) (RES_GET_REF(pResource)->pClient->hClient) + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[out] ppMapping The returned mapping + */ +NV_STATUS refFindCpuMapping(RsResourceRef *pResourceRef, NvP64 pAddress, RsCpuMapping **ppMapping); + +/** + * Find a CPU mapping owned by a resource reference + * + * @param[in] pResourceRef + * @param[in] pAddress The CPU virtual address of the mapping to search for + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + * @param[out] ppMapping The returned mapping + * @param[in] fnFilter A user-provided filtering function that determines which mappings to ignore. + * If fnFilter is provided, then we will only return mappings for which fnFilter(mapping) returns NV_TRUE + * All mappings will be searched over if fnFilter is NULL. + */ +NV_STATUS refFindCpuMappingWithFilter(RsResourceRef *pResourceRef, NvP64 pAddress, NvBool (*fnFilter)(RsCpuMapping*), RsCpuMapping **ppMapping); + +/** + * Find the first child object of given type + * + * @param[in] pParentRef + * @param[in] internalClassId + * @param[in] bExactMatch If true, internalClassId must match exactly; if false, also match classes derived from the internalClassId + * @param[out] pResourceRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindChildOfType(RsResourceRef *pParentRef, NvU32 internalClassId, NvBool bExactMatch, RsResourceRef **ppResourceRef); + +/** + * Traverse up the reference parent-child hierarchy to find an ancestor reference of a given type + * + * @param[in] pDescendantRef + * @param[in] internalClassId + * @param[out] ppAncestorRef The returned RsResourceRef (Optional) + */ +NV_STATUS refFindAncestorOfType(RsResourceRef *pDescendantRef, NvU32 internalClassId, RsResourceRef **ppAncestorRef); + +/** + * Traverse up the reference parent-child hierarchy to find if a ref is a descendant of a given ancestor ref + * + * @param[in] pDescendantRef The node to start searching from (not included in the search) + * @param[in] pAncestorRef The node to search for in the parent-child hierarchy + */ +NvBool refHasAncestor(RsResourceRef *pDescendantRef, RsResourceRef *pAncestorRef); + +/** + * Add a new mapping to a reference's mapping list + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapParams The parameters used to initialize the mapping + * @param[in] pContextRef A reference to a resource that provides a context for the mapping + * @param[out] ppMapping Pointer to the allocated mapping [optional] + */ +NV_STATUS refAddMapping(RsResourceRef *pResourceRef, RS_CPU_MAP_PARAMS *pMapParams, + RsResourceRef *pContextRef, RsCpuMapping **ppMapping); + +/** + * Remove an existing mapping from a reference's mapping list and remove back-references to the mapping. + * @param[in] pResourceRef The reference to add a mapping to + * @param[in] pMapping Pointer to the allocated mapping + */ +void refRemoveMapping(RsResourceRef *pResourceRef, RsCpuMapping *pMapping); + +/** + * Allocate the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to alloc the private struct when the mapping is created + * @param[in] pMapParams The parameters which were used to create the mapping + * @param[inout] pMapping Pointer to the mapping whose private struct should be allocated + */ +NV_STATUS refAllocCpuMappingPrivate(RS_CPU_MAP_PARAMS *pMapParams, RsCpuMapping *pMapping); + +/** + * Free the user-controlled private pointer within the RsCpuMapping struct. + * Resserv will call this function to free the private struct when the mapping is removed + * @param[inout] pMapping Pointer to the mapping whose private struct should be freed + */ +void refFreeCpuMappingPrivate(RsCpuMapping *pMapping); + +/** + * Add a dependency between this resource reference and a dependent reference. + * If this reference is freed, the dependent will be invalidated and torn down. + * + * @note Dependencies are implicit between a parent resource reference and child resource reference + * @note No circular dependency checking is performed + */ +NV_STATUS refAddDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Remove the dependency between this resource reference and a dependent resource reference. + */ +NV_STATUS refRemoveDependant(RsResourceRef *pResourceRef, RsResourceRef *pDependantRef); + +/** + * Find, Add, or Remove an inter-mapping between two resources to the Mapper's list of inter-mappings + * Inter-mappings are stored in the Mapper, and are matched by both the MappableRef and offset. + * + * @param[in] pMapperRef The reference which owns the inter-mapping + * @param[in] pMappableRef The reference which was mapped from to create the inter-mapping + * If NULL, will be ignored while matching inter-mappings + * @param[in] dmaOffset The offset value assigned while mapping, used to identify mappings + * @param[in] pContextRef A reference used during mapping and locking for additional context, used to identify mappings + * @param[inout] ppMapping Writes the resulting inter-mapping, if successfully created (Add) or found (Find) + * @param[in] pMapping The inter-mapping to remove (Remove) + */ +NV_STATUS refFindInterMapping(RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RsResourceRef *pContextRef, NvU64 dmaOffset, RsInterMapping **ppMapping); +NV_STATUS refAddInterMapping(RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RsResourceRef *pContextRef, RsInterMapping **ppMapping); +void refRemoveInterMapping(RsResourceRef *pMapperRef, RsInterMapping *pMapping); + +/** + * Store a resource reference in another reference's cache. + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to store in the cache + */ +NV_STATUS refCacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Remove a resource reference from another reference's cache + * @param[in] pParentRef The resource reference that owns the cache + * @param[in] pResourceRef The resource reference to de-index + */ +NV_STATUS refUncacheRef(RsResourceRef *pParentRef, RsResourceRef *pResourceRef); + +/** + * Determine whether a reference is queued for removal + * @param[in] pResourceRef + * @param[in] pClient + */ +NvBool refPendingFree(RsResourceRef *pResourceRef, RsClient *pClient); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/resserv/rs_server.h b/src/nvidia/inc/libraries/resserv/rs_server.h new file mode 100644 index 000000000..17c561cb7 --- /dev/null +++ b/src/nvidia/inc/libraries/resserv/rs_server.h @@ -0,0 +1,928 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "g_rs_server_nvoc.h" + +#ifndef _RS_SERVER_H_ +#define _RS_SERVER_H_ + +#include "nvport/nvport.h" +#include "resserv/resserv.h" +#include "nvoc/runtime.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup RsServer + * @addtogroup RsServer + * @{*/ + +/** + * Book-keeping for individual client locks + */ +struct CLIENT_ENTRY +{ + PORT_RWLOCK *pLock; + RsClient *pClient; + NvHandle hClient; + NvU64 lockOwnerTid; ///< Thread id of the lock owner + +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK lockVal; +#endif +}; + +/** + * Base-class for objects that are shared among multiple + * RsResources (including RsResources from other clients) + */ +NVOC_PREFIX(shr) class RsShared : Object +{ +public: + NV_STATUS shrConstruct(RsShared *pShared); + void shrDestruct(RsShared *pShared); + + NvS32 refCount; + MapNode node; +}; +MAKE_INTRUSIVE_MAP(RsSharedMap, RsShared, node); + +/** + * Utility class for objects that can reference + * multiple client handle spaces. Free's and control calls + * that occur on objects which reference an RsSession will + * need to acquire pLock first. + */ +NVOC_PREFIX(session) class RsSession : RsShared +{ +public: + NV_STATUS sessionConstruct(RsSession *pSession); + void sessionDestruct(RsSession *pSession); + + NV_STATUS sessionAddDependant(RsSession *pSession, RsResourceRef *pResourceRef); + NV_STATUS sessionAddDependency(RsSession *pSession, RsResourceRef *pResourceRef); + virtual void sessionRemoveDependant(RsSession *pSession, RsResourceRef *pResourceRef); + virtual void sessionRemoveDependency(RsSession *pSession, RsResourceRef *pResourceRef); + + PORT_RWLOCK *pLock; +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK lockVal; +#endif + + NvBool bValid; + + RsResourceRefList dependencies; + RsResourceRefList dependants; +// private: + NV_STATUS sessionCheckLocksForAdd(RsSession *pSession, RsResourceRef *pResourceRef); + void sessionCheckLocksForRemove(RsSession *pSession, RsResourceRef *pResourceRef); +}; + +// Iterator data structure to save state while walking through a map +struct RS_SHARE_ITERATOR +{ + RsSharedMapIter mapIt; + NvU32 internalClassId; + RsShared *pShared; ///< Share that is being iterated over +}; + +/** + * Top-level structure that RMAPI and RM interface with + * + * This class is all that needs to be allocated to use the resource server + * library. + * + * The RsServer interface should be kept as narrow as possible. Map and + * MapTo are added because <1> the unmap variants operate in addresses and not + * handles and <2> having explicit knowledge of map operations in the server is + * helpful when dealing with multiple levels of address spaces (e.g., guest + * user-mode, guest kernel-mode, host kernel-mode). + */ +struct RsServer +{ + /** + * Privilege level determines what objects a server is allowed to allocate, and + * also determines whether additional handle validation needs to be performed. + */ + RS_PRIV_LEVEL privilegeLevel; + + RsClientList *pClientSortedList; ///< Bucket if linked List of clients (and their locks) owned by this server + NvU32 clientCurrentHandleIndex; + + NvBool bConstructed; ///< Determines whether the server is ready to be used + PORT_MEM_ALLOCATOR *pAllocator; ///< Allocator to use for all objects allocated by the server + + PORT_RWLOCK *pClientListLock; ///< Lock that needs to be taken when accessing the client list + + PORT_SPINLOCK *pShareMapLock; ///< Lock that needs to be taken when accessing the shared resource map + RsSharedMap shareMap; ///< Map of shared resources + +#if (RS_STANDALONE) + NvU64 topLockOwnerTid; ///< Thread id of top-lock owner + PORT_RWLOCK *pTopLock; ///< Top-level resource server lock + PORT_RWLOCK *pResLock; ///< Resource-level resource server lock +#if LOCK_VAL_ENABLED + LOCK_VAL_LOCK topLockVal; + LOCK_VAL_LOCK resLockVal; +#endif +#endif + + /// Print out a list of all resources that will be freed when a free request is made + NvBool bDebugFreeList; + + /// If true, control call param copies will be performed outside the top/api lock + NvBool bUnlockedParamCopy; + + /** + * Setting this flag to false disables any attempts to + * automatically acquire access rights or to control access to resources by + * checking for access rights. + */ + NvBool bRsAccessEnabled; + + /** + * Mask of interfaces (RS_API_*) that will use a read-only top lock by default + */ + NvU32 roTopLockApiMask; + + /// Share policies which clients default to when no other policies are used + RsShareList defaultInheritedSharePolicyList; + /// Share policies to apply to all shares, regardless of other policies + RsShareList globalInternalSharePolicyList; + + NvU32 internalHandleBase; + + NvU32 activeClientCount; + NvU64 activeResourceCount; +}; + +/** + * Construct a server instance. This must be performed before any other server + * operation. + * + * @param[in] pServer This server instance + * @param[in] privilegeLevel Privilege level for this resource server instance + * @param[in] maxDomains Maximum number of domains to support, or 0 for the default + */ +NV_STATUS serverConstruct(RsServer *pServer, RS_PRIV_LEVEL privilegeLevel, NvU32 maxDomains); + +/** + * Destroy a server instance. Destructing a server does not guarantee that child domains + * and clients will be appropriately freed. serverFreeDomain should be explicitly called + * on all allocated domains to ensure all clients and resources get cleaned up. + * + * @param[in] pServer This server instance + */ +NV_STATUS serverDestruct(RsServer *pServer); + +/** + * Allocate a domain handle. Domain handles are used to track clients created by a domain. + * + * @param[in] pServer This server instance + * @param[in] hParentDomain + * @param[in] pAccessControl + * @param[out] phDomain + * + */ +NV_STATUS serverAllocDomain(RsServer *pServer, NvU32 hParentDomain, ACCESS_CONTROL *pAccessControl, NvHandle *phDomain); + +/** + * Verify that the calling user is allowed to perform the access. This check only + * applies to calls from RING_USER or RING_KERNEL. No check is performed in + * RING_HOST. + * + * @param[in] pServer This server instance + * @param[in] hDomain + * @param[in] hClient + * + */ +NV_STATUS serverValidate(RsServer *pServer, NvU32 hDomain, NvHandle hClient); + +/** + * Verify that the domain has sufficient permission to allocate the given class. + * @param[in] pServer + * @param[in] hDomain + * @param[in] externalClassId External resource class id + */ +NV_STATUS serverValidateAlloc(RsServer *pServer, NvU32 hDomain, NvU32 externalClassId); + +/** + * Free a domain handle. All clients of this domain will be freed. + * + * @param[in] pServer This server instance + * @param[in] hDomain The handle of the domain to free + */ +NV_STATUS serverFreeDomain(RsServer *pServer, NvHandle hDomain); + +/** + * Allocate a client handle. A client handle is required to allocate resources. + * + * @param[in] pServer This server instance + * @param[inout] pParams Client allocation parameters + */ +NV_STATUS serverAllocClient(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pParams); + +/** + * Free a client handle. All resources references owned by the client will be + * freed. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Client free params + */ +NV_STATUS serverFreeClient(RsServer *pServer, RS_CLIENT_FREE_PARAMS* pParams); + +/** + * Free a list of client handles. All resources references owned by the client will be + * freed. All priority resources will be freed first across all listed clients. + * + * It is invalid to attempt to free a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[in] phClientList The list of client handles to free + * @param[in] numClients The number of clients in the list + * @param[in] freeState User-defined free state + * @param[in] pSecInfo Security Info + * + */ +NV_STATUS serverFreeClientList(RsServer *pServer, NvHandle *phClientList, NvU32 numClients, NvU32 freeState, API_SECURITY_INFO *pSecInfo); + +/** + * Allocate a resource. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +NV_STATUS serverAllocResource(RsServer *pServer, RS_RES_ALLOC_PARAMS *params); + +/** + * Allocate a ref-counted resource share. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + */ +NV_STATUS serverAllocShare(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, RsShared **ppShare); + +/** + * Allocate a ref-counted resource share with Halspec parent. + * + * @param[in] pServer + * @param[in] pClassInfo NVOC class info for the shared class (must derive from RsShared) + * @param[out] ppShare Allocated share + * @param[in] pHalspecParent Parent object whose Halspec can be used for the shared class object + */ +NV_STATUS serverAllocShareWithHalspecParent(RsServer *pServer, const NVOC_CLASS_INFO* pClassInfo, RsShared **ppShare, Object *pHalspecParent); + +/** + * Get the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NvS32 serverGetShareRefCount(RsServer *pServer, RsShared *pShare); + +/** + * Increment the ref-count of a resource share. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverRefShare(RsServer *pServer, RsShared *pShare); + +/** + * Decrement the ref-count of a resource share. If the ref-count + * has reached zero, the resource share will be freed. + * + * @param[in] pServer + * @param[in] pShare Resource share + */ +NV_STATUS serverFreeShare(RsServer *pServer, RsShared *pShare); + +/** + * Get an iterator to the elements in the server's shared object map + * @param[in] pServer + * @param[in] internalClassId If non-zero, only RsShared that are (or can be + * derived from) the specified class will be returned + */ +RS_SHARE_ITERATOR serverShareIter(RsServer *pServer, NvU32 internalClassId); + +/** + * Get an iterator to the elements in the server's shared object map + */ +NvBool serverShareIterNext(RS_SHARE_ITERATOR*); + + +/** + * Allocate a resource. Assumes top-level lock has been taken. + * + * It is invalid to attempt to allocate a client from a user other than the one + * that allocated it. User-implemented. + * + * @param[in] pServer This server instance + * @param[inout] pParams The allocation parameters + */ +extern NV_STATUS serverAllocResourceUnderLock(RsServer *pServer, RS_RES_ALLOC_PARAMS *pAllocParams); + +/** + * Call Free RPC for given resource. Assumes top-level lock has been taken. + * + * @param[in] pServer This server instance + * @param[inout] pFreeParams The Free parameters + */ +extern NV_STATUS serverFreeResourceRpcUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pFreeParams); + +/** + * Copy-in parameters supplied by caller, and initialize API state. User-implemented. + * @param[in] pServer + * @param[in] pAllocParams Resource allocation parameters + * @param[out] ppApiState User-defined API_STATE; should be allocated by this function + */ +extern NV_STATUS serverAllocApiCopyIn(RsServer *pServer, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, API_STATE **ppApiState); + +/** + * Copy-out parameters supplied by caller, and release API state. User-implemented. + * @param[in] pServer + * @param[in] status Status of allocation request + * @param[in] pApiState API_STATE for the allocation + */ +extern NV_STATUS serverAllocApiCopyOut(RsServer *pServer, NV_STATUS status, API_STATE *pApiState); + +/** + * Obtain a second client handle to lock if required for the allocation. + * @param[in] pParams Resource allocation parameters + * @param[in] phClient Client to lock, if any + */ +extern NV_STATUS serverLookupSecondClient(RS_RES_ALLOC_PARAMS_INTERNAL *pParams, NvHandle *phClient); + +/** + * Acquires a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + */ +extern NV_STATUS serverTopLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a top-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverTopLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a session lock. + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pResourceRef Resource reference to take session locks on + * @param[inout] pLockInfo Lock state + */ +extern NV_STATUS serverSessionLock_Prologue(LOCK_ACCESS_TYPE access, RsResourceRef *pResourceRef, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a session lock. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverSessionLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Acquires a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Output flags indicating the locks that need to be released + */ +extern NV_STATUS serverResLock_Prologue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * Releases a resource-level lock. User-implemented. + * @param[in] pServer + * @param[in] access LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +extern void serverResLock_Epilogue(RsServer *pServer, LOCK_ACCESS_TYPE access, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags); + +/** + * WAR for additional tasks that must be performed after resource-level locks are released. User-implemented. + * @param[inout] status Allocation status + * @param[in] bClientAlloc Caller is attempting to allocate a client + * @param[inout] pParams Allocation parameters + */ +extern NV_STATUS serverAllocEpilogue_WAR(RsServer *pServer, NV_STATUS status, NvBool bClientAlloc, RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams); + +/** + * Free a resource reference and all of its descendants. This will decrease the + * resource's reference count. The resource itself will only be freed if there + * are no more references to it. + * + * It is invalid to attempt to free a resource from a user other than the one that allocated it. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Same as serverFreeResourceTree except the top-level lock is assumed to have been taken. + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +NV_STATUS serverFreeResourceTreeUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags in the dup parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Dup parameters + */ +extern NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Updates the lock flags in the free parameters + * + * @param[in] pServer This server instance + * @param[in] pParams Free parameters + */ +extern NV_STATUS serverUpdateLockFlagsForFree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams); + +/** + * Updates the lock flags for automatic inter-unmap during free + * + * @param[in] pServer This server instance + * @param[inout] pParams Unmap params, contained pLockInfo will be modified + */ +extern NV_STATUS serverUpdateLockFlagsForInterAutoUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Initialize parameters for a recursive call to serverFreeResourceTree. User-implemented. + * @param[in] hClient + * @param[in] hResource + * @param[inout] pParams + */ +extern NV_STATUS serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO *pLockInfo, RS_RES_FREE_PARAMS *pParams); + +/** + * Common operations performed after top locks and client locks are taken, but before + * the control call is executed. This includes validating the control call cookie, + * looking up locking flags, parameter copy-in, and taking resource locks. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverControl_Prologue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE *pAccess, NvU32 *pReleaseFlags); + +/** + * Common operations performed after the control call is executed. This + * includes releasing locks and parameter copy-out. + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[in] pAccess Lock access type + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + * @param[in] status Control call status + */ +NV_STATUS serverControl_Epilogue(RsServer *pServer, RS_RES_CONTROL_PARAMS_INTERNAL *pParams, LOCK_ACCESS_TYPE access, NvU32 *pReleaseFlags, NV_STATUS status); + +/** + * Initialize a NVOC export control call cookie + * + * @param[in] pExportedEntry + * @param[inout] pCookie + */ +extern void serverControl_InitCookie(const struct NVOC_EXPORTED_METHOD_DEF *pExportedEntry, RS_CONTROL_COOKIE *pCookie); + +/** + * Validate a NVOC export control call cookie + * + * @param[in] pParams + * @param[inout] pCookie + */ +extern NV_STATUS serverControl_ValidateCookie(RS_RES_CONTROL_PARAMS_INTERNAL *pParams, RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-in control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyIn(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie); + +/** + * Copy-out control call parameters + * + * @param[in] pServer ResServ instance + * @param[in] pParams Control call parameters + * @param[inout] pCookie Control call cookie + */ +extern NV_STATUS serverControlApiCopyOut(RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus); + +/** + * Determine whether an API supports a read-only lock for a given lock + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] api RS_API* + */ +NvBool serverSupportsReadOnlyLock(RsServer *pServer, RS_LOCK_ENUM lock, RS_API_ENUM api); + +/** + * Determine whether the current thread has taken the RW API lock + * @param[in] pServer ResServ instance + */ +extern NvBool serverRwApiLockIsOwner(RsServer *pServer); + +/** + * Lookup locking flags for a resource alloc + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverAllocResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess); +/** + * + * Lookup level locking flags for a resource free + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverFreeResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a resource copy + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Allocation parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverCopyResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a resource access share + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Share parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverShareResourceLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for a control call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Control call parameters + * @param[in] pCookie Control call cookie + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverControlLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for a map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Lookup locking flags for an unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams CPU unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource map call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource map parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterMapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * + * Lookup locking flags for an inter-resource unmap call + * + * @param[in] pServer ResServ instance + * @param[in] lock RS_LOCK_* + * @param[in] pParams Inter-resource unmap parameters + * @param[out] pAccess Computed lock access + */ +extern NV_STATUS serverInterUnmapLookupLockFlags(RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess); + +/** + * Fill the server's share policy lists with any default or global policies needed + */ +extern NV_STATUS serverInitGlobalSharePolicies(RsServer *pServer); + +/** + * Issue a control command to a resource + * + * @param[in] pServer This server instance + * @param[in] pParams Control parameters + */ +NV_STATUS serverControl(RsServer *pServer, RS_RES_CONTROL_PARAMS *pParams); + +/** + * Copy a resource owned by one client into another client. + * + * The clients must be in the same client handle space. The underlying + * resource is not duplicated, but it is refcounted so the resource will + * not be freed until the reference count hits zero. + * + * Copying a resource will fail if the user making the call does not own + * the source client. + * + * @param[in] pServer This server instance + * @param[inout] pParams Resource sharing parameters + */ +NV_STATUS serverCopyResource(RsServer *pServer, RS_RES_DUP_PARAMS *pParams); + +/** + * Share certain access rights to a resource with other clients using the provided share policy + * + * The policy entry passed in will be added to the object's share policy list. + * If the bRevoke is true, the policy will be removed instead. + * + * Sharing will fail if the user making the call does not own the source client. + * + * @param[in] pServer This server instance + * @param[in] pParams Resource sharing parameters + */ +NV_STATUS serverShareResourceAccess(RsServer *pServer, RS_RES_SHARE_PARAMS *pParams); + +/** + * Creates a CPU mapping of the resource in the virtual address space of the process. + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_MAP_PARAMS *pParams); + +/** + * Release a CPU virtual address unmapping + * + * @param[in] pServer This server instance + * @param[in] hClient Client handle of the resource to map + * @param[in] hResource Handle of the resource to map + * @param[in] pParams CPU unmapping parameters + */ +NV_STATUS serverUnmap(RsServer *pServer, NvHandle hClient, NvHandle hResource, RS_CPU_UNMAP_PARAMS *pParams); + +/** + * Pre-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverMap_Prologue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Post-map operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverMap_Epilogue(RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams); + +/** + * Pre-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +NV_STATUS serverUnmap_Prologue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-unmap operations. Called with top/client locks acquired + * but not resource locks. + * + * @param[in] pServer + * @param[inout] pParams CPU mapping parameters + */ +void serverUnmap_Epilogue(RsServer *pServer, RS_CPU_UNMAP_PARAMS *pUnmapParams); + +/** + * Creates an inter-mapping between two resources + * + * Not all resources support mapping. + * + * @param[in] pServer This server instance + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterMap(RsServer *pServer, RS_INTER_MAP_PARAMS *pParams); + +/** + * Release an inter-mapping between two resources + * + * @param[in] pServer This server instance + * @param[in] pParams unmapping parameters + */ +NV_STATUS serverInterUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams); + +/** + * Pre-inter-map operations. Called with top/client locks acquired. + * This function acquires resource locks. + * + * @param[in] pServer + * @param[in] pMapperRef The resource that can be used to create the mapping + * @param[in] pMappableRef The resource that can be mapped + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +NV_STATUS serverInterMap_Prologue(RsServer *pServer, RsResourceRef *pMapperRef, RsResourceRef *pMappableRef, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Post-inter-map operations. Called with top, client, and resource locks acquired. + * This function releases resource locks. + * + * @param[in] pServer + * @param[inout] pMapParams mapping parameters + * @param[inout] pReleaseFlags Flags that indicate which locks were taken + */ +void serverInterMap_Epilogue(RsServer *pServer, RS_INTER_MAP_PARAMS *pMapParams, NvU32 *pReleaseFlags); + +/** + * Pre-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +NV_STATUS serverInterUnmap_Prologue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Post-inter-unmap operations. Called with top, client, and resource locks acquired. + * + * @param[in] pServer + * @param[inout] pParams mapping parameters + */ +void serverInterUnmap_Epilogue(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pUnmapParams); + +/** + * Acquire a client pointer from a client handle. The caller is responsible for + * ensuring that lock ordering is not violated (otherwise there can be + * deadlock): clients must be locked in increasing order of client index (not + * handle). + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[out] ppClient Pointer to the RsClient + */ +NV_STATUS serverAcquireClient(RsServer *pServer, NvHandle hClient, LOCK_ACCESS_TYPE lockAccess, RsClient **ppClient); + +/** + * Release a client pointer + * + * @param[in] pServer This server instance + * @param[in] lockAccess LOCK_ACCESS_READ or LOCK_ACCESS_WRITE + * @param[in] pClient Pointer to the RsClient + */ +NV_STATUS serverReleaseClient(RsServer *pServer, LOCK_ACCESS_TYPE lockAccess, RsClient *pClient); + +/** + * Get a client pointer from a client handle without taking any locks. + * + * @param[in] pServer This server instance + * @param[in] hClient The client to acquire + * @param[out] ppClient Pointer to the RsClient + */ +NV_STATUS serverGetClientUnderLock(RsServer *pServer, NvHandle hClient, RsClient **ppClient); + +/** + * Get the count of clients allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU32 serverGetClientCount(RsServer *pServer); + +/** + * Get the count of resources allocated under this resource server + * + * @param[in] pServer This server instance + */ +NvU64 serverGetResourceCount(RsServer *pServer); + +/** + * Swap a TLS call context entry and increment the TLS entry refcount. + * A new TLS entry for call context will be allocated if necessary. + * + * @note This should be paired with a corresponding resservRestoreTlsCallContext call + */ +NV_STATUS resservSwapTlsCallContext(CALL_CONTEXT **ppOldCallContext, CALL_CONTEXT *pNewCallContext); + +/** + * Get the current TLS call context. This will not increment a refcount on the TLS entry. + */ +CALL_CONTEXT *resservGetTlsCallContext(void); + +/** + * Set a TLS call context entry and decrement the TLS entry refcount. + * @note This should be paired with a corresponding resservSwapTlsCallContext call + */ +NV_STATUS resservRestoreTlsCallContext(CALL_CONTEXT *pOldCallContext); + +/** + * Find a resource reference of a given type from the TLS call context + * @param[in] internalClassId Only return a reference if it matches this type + * @param[in] bSearchAncestors Search parents of the call context resource ref + */ +RsResourceRef *resservGetContextRefByType(NvU32 internalClassId, NvBool bSearchAncestors); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/inc/libraries/tls/tls.h b/src/nvidia/inc/libraries/tls/tls.h new file mode 100644 index 000000000..eb45c2dc9 --- /dev/null +++ b/src/nvidia/inc/libraries/tls/tls.h @@ -0,0 +1,345 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Thread local storage public interface + */ + +#include "nvport/nvport.h" + +#ifndef _NV_TLS_H_ +#define _NV_TLS_H_ + +/** + * @defgroup Thread local storage operations + * + * @brief This module contains thread local storage functionality used by other + * modules. + * + * @par Module dependencies: + * - NvPort (UTIL, ATOMIC, MEMORY, SYNC and THREAD modules) + * - NvContainers (Map) + * - NvUtils (NV_PRINTF and NV_ASSERT) + * + * @par TLS architecture: + * A base TLS allocation unit is an Entry (@ref TLS_ENTRY). Entries are local + * to a thread and are identified by a 64bit ID. Entries are lazy-allocated + * and refcounted. All entries for a given thread are organized in one Map - + * i.e. TLS has as many Maps active as there are threads; each map is + * inherently single-threaded. The Map for a given thread ID is obtained by + * searching a map of all threads with thread ID as key. + * The whole TLS system can be thought of as: + * map> + * + * @par Complexity: + * All operations are O(log(numActiveThreads) + log(numEntriesForGivenThread)) + * + * @par A note on ISRs and DPCs + * Interrupt Service Routines (and in some cases Deferred Procedure Calls) do + * not have their own thread IDs - they can have the same ID as a regular + * thread. Because of this, they are kept in a separate map indexed by their + * stack pointer instead of thread ID. Because getting the exact base of the + * ISR stack can be difficult, when searching we use the closest one, in the + * direction of stack growth. This assumes that the given entry always exists, + * so ISR thread entries are preallocated with @ref tlsIsrInit. + * + * An example of how this works: + * ~~~{.c} + * if (is_isr()) + * return isr_map.find(get_approx_sp()); + * else + * return thread_map.find(get_thread_id()); + * ~~~ + * The exact definition of is_isr() varies by platform, but generally means + * "if it does not have a unique thread ID". Threaded IRQs are not ISRs. + * + * @par Locking: + * Currently, TLS has two spinlocks - separate locks for ISR and passive + * thread maps. This will be changed to RW-spinlocks in the future. + * We cannot use RW sleeper locks in passive threads, since they may modify + * their IRQL and thus be unable to acquire them, even conditionally. + * + * In cases where ISRs are not allowed to acquire a spinlock at all, the ISR + * map is implemented in a lockless fashion. This is slower than the locked + * implementation (O(maxIsrs)), but works in cases where all locks are banned. + * + * + * @{ + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @note Only returned in cases of irregular order of public API calls. + */ +#define TLS_ERROR_VAL ~0 + +/** + * @brief Global TLS structure initialization. + * + * Must be called before any TLS functions can be called. + * + * If this function returns an error then calling any TLS function will result + * in undefined behavior. + * + * Called on RmInitRm(). + * @return NV_OK if successful; + * @return Error code otherwise. + * + */ +NV_STATUS tlsInitialize(void); + +/** + * @brief Global TLS structure termination. + * + * It frees resources allocated by tlsInitialize. + * Called on RmDestroyRm(). + * + */ +void tlsShutdown(void); + +enum { + TLS_ENTRY_ID_THREADSTATE, + TLS_ENTRY_ID_RESSERV_1, + TLS_ENTRY_ID_CURRENT_GPU_INSTANCE, + TLS_ENTRY_ID_PRIORITY, + TLS_ENTRY_ID_DYNAMIC, // dynamic allocations start here + TLS_ENTRY_ID_TAG_START = 0x100000 // Custom tags start here +}; +/** + * @brief Allocates a new entry spot and returns a unique entry ID. + * + * Ids are unique for all threads. + * + * @return 0 if all ids are used; + * @return unique id otherwise. + * + */ +NvU64 tlsEntryAlloc(void); + +/** + * @brief Get pointer to TLS entry for given @p entryId. + * + * This function increments the refCount of the given entry. + * + * @return NULL if @p entryId is invalid (Not returned by @ref tlsEntryAlloc), + * in case of not enough memory. + * @return Pointer to a void* the users can use to point to custom structure. + * + * Example usage: + * ~~~{.c} + * NvU64 id = tlsEntryAlloc(); + * MY_THREAD_DATA **ppData = tlsEntryAcquire(id); + * if (**ppData == NULL) + * *ppData = portMemAllocNonPaged(sizeof(MY_THREAD_DATA)) + * ~~~ + * + * @note On first call for given @p entryId, the dereferenced (user) pointer + * will be set to NULL - (*tlsEntryAcquire(x) == NULL) + * + */ +NvP64 *tlsEntryAcquire(NvU64 entryId); + +/** + * @brief Like @ref tlsEntryAcquire, but memory is allocated using @p pAllocator. + * + * @note Should be used only when performance is important in allocation or + * when a spinlock is acquired in a non ISR thread and there is a need for the tls. + * + * @note pAllocator should be thread safe. + */ +NvP64 *tlsEntryAcquireWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Release the TLS entry for given @p entryId. + * + * This functions decrements the refCount of the given entry. + * + * @return refCount after releasing the structure if @p entryId is valid, + * @return TLS_ERROR_VAL if TLS entry for given @p entryId doesn't exist. + * + * ~~~{.c} + * if (tlsEntryRelease(id) == 0) + * portMemFree(*ppData); + * ~~~ + */ +NvU32 tlsEntryRelease(NvU64 entryId); + +/** + * @brief Like @ref tlsEntryRelease, but memory is allocated using @p pAllocator. + * + * @note Should be used only when performance is important in allocation or + * when a spinlock is acquired in a non ISR thread and there is a need for the tls. + * + * @note @p pAllocator should be thread safe. + */ +NvU32 tlsEntryReleaseWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pAllocator); + +/** + * @brief Get pointer to TLS data for given entryId. + * + * This function will not modify the refCount, and does not return a double + * pointer required to set the entry value. + * + * @return NULL if the entry doesn't exist. + * @return Otherwise pointer on user's custom structure. + * + * Example usage: + * ~~~{.c} + * NvU64 id = tlsEntryAlloc(); + * MY_THREAD_DATA **ppData = tlsEntryAcquire(id); + * if (**ppData == NULL) + * { + * *ppData = portMemAllocNonPaged(sizeof(MY_THREAD_DATA)) + * *ppData->myData = 1; + * } + * MY_THREAD_DATA *pData = tlsEntryGet(id); + * if (pData->myData == 1) + * { + * ... + * } + * ~~~ + * + */ +NvP64 tlsEntryGet(NvU64 entryId); + +/** + * @brief Increment the refCount of given TLS entry. + * + * If an entry with given entryId doesn't exist, this function does nothing. + * + * This is useful when the code requires a call to a function that might call + * @ref tlsEntryRelease, but TLS should not be freed. An example might be when + * calling a function that acquires the GPU lock while already holding the lock. + * Currently, the code will temporarily release the lock, so the nested function + * acquires it again. Since rmGpuLock{Acquire,Release} acquires/releases TLS, + * this release could cause the data to be freed. + * + * @return TLS_ERROR_VAL if the entry doesn't exist. + * @return New TLS entry refCount, after increment. + * + */ +NvU32 tlsEntryReference(NvU64 entryId); + +/** + * @brief Decrement the refCount of given TLS entry. + * + * If an entry with given entryId doesn't exist, this function does nothing. + * See @ref tlsEntryReference for details. + * + * @return TLS_ERROR_VAL if the entry doesn't exist. + * @return New TLS entry refCount, after decrement. + * + */ +NvU32 tlsEntryUnreference(NvU64 entryId); + +/// @brief Size of memory to preallocate on ISR stack for TLS +#if PORT_IS_CHECKED_BUILD +// Checked builds have per-allocation overhead for tracking +#define TLS_ISR_ALLOCATOR_SIZE 512 +#else +#if defined(LOCK_VAL_ENABLED) + #define TLS_ISR_ALLOCATOR_SIZE 400 +#else + #define TLS_ISR_ALLOCATOR_SIZE 256 +#endif +#endif + +/** + * @brief Allocates thread id for current ISR thread. + * + * @note Function should be called on the beginning of ISR, as early as possible + * + */ +void tlsIsrInit(PORT_MEM_ALLOCATOR *pIsrAllocator); + +/** + * @brief Destroys thread id for current ISR thread. + * + * @note should be called at end of ISR. Must be NOINLINE because if it gets + * inlined and tlsIsrInit doesn't, SP order can be wrong. + */ +NV_NOINLINE void tlsIsrDestroy(PORT_MEM_ALLOCATOR *pIsrAllocator); + +/** + * @brief Returns allocator that can be used for allocations of memory in ISR + * threads. + * In case this function is called outside of ISR NULL will be returned. + * @note Should be called between tlsIsrInit and tlsIsrDestroy if you are in ISR, + * otherwise it will ASSERT and return NULL. + */ +PORT_MEM_ALLOCATOR *tlsIsrAllocatorGet(void); + +/** + * @brief Set if DPCs have a unique thread ID that can be acquired by + * @ref portThreadGetCurrentThreadId. Windows DPCs have the same thread ID + * as the thread they preempted, so they are treated like ISRs. + * + * This isn't used by the TLS implementation, but is needed to decide whether + * the DPCs should call @ref tlsIsrInit + */ +#if PORT_IS_KERNEL_BUILD && !defined(NV_MODS) && NVOS_IS_WINDOWS +#define TLS_DPC_HAVE_UNIQUE_ID 0 +#else +#define TLS_DPC_HAVE_UNIQUE_ID 1 +#endif + +/** + * @brief Set if threads can modify their own IRQL/interrupt context. + * On such builds, we cannot use @ref portUtilIsInterruptContext to decide + * whether a given thread is an ISR or a passive thread, and instead use a + * per-CPU ISR counter. + */ +#if PORT_IS_KERNEL_BUILD && (defined(NV_MODS) || NVOS_IS_WINDOWS) +#define TLS_THREADS_CAN_RAISE_IRQL 1 +#else +#define TLS_THREADS_CAN_RAISE_IRQL 0 +#endif + +/** + * @brief Set if ISRs are allowed to acquire a spinlock. On VMWare, the top + * level interrupt handler (ACK function) is not allowed to hold the spinlock + * for any amount of time (enforced by validation suite), so it uses a slower + * lockless implementation. + */ +#if PORT_IS_KERNEL_BUILD && NVOS_IS_VMWARE +#define TLS_ISR_CAN_USE_LOCK 0 +#else +#define TLS_ISR_CAN_USE_LOCK 1 +#endif + +/// @brief If set, a copy of THREAD_STATE_NODE pointer will be kept in TLS. +#ifndef TLS_MIRROR_THREADSTATE +#define TLS_MIRROR_THREADSTATE 0 +#endif + +#ifdef __cplusplus +} +#endif + +///@} + +#endif diff --git a/src/nvidia/inc/libraries/utils/nv_enum.h b/src/nvidia/inc/libraries/utils/nv_enum.h new file mode 100644 index 000000000..873200a49 --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nv_enum.h @@ -0,0 +1,684 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file nv_enum.h + * @brief A header providing code-generation tools to define an enumerated type + * from a specification of a potentially-nested enum of limited depth. + * @see confluence page "Proposal for Better Enum Generation (NV_ENUM) Version 1.0" + */ + +/** + * @defgroup NV_UTILS_ENUM Infrastructure for generating better enumerated values. + * + * @brief Generates symbols comprising an enumerated type given a list of + * entries provided via macro argument. + * + * This file exposes macro functions which generate enum types and associated + * metadata from an enum specification consisting of entry names and values, + * with support for nesting enums up to a maximum depth of 2. The associated + * metadata generated from the enum specification allows for conversion of + * contiguous enums (those without holes within their valid value range) to and + * from indices, iteration over enum values (for each loop) and runtime + * determination of whether a given integer is a valid enum value. Additionally, + * macros are provided to "export" an enum such that only the entry names and + * values are defined, and no name is given to the enum. This is useful in + * situations where two different APIs utilize the same enum definition, such as + * in the RM SDK where enums are disallowed in control call parameters, but are + * very much desired inside of the driver. + * @{ + */ + +#ifndef NV_ENUM_H_ +#define NV_ENUM_H_ + +#define NV_ENUM_MIN ((NvS64) 0) +#define NV_ENUM_MAX ((NvS64)0xFFFFFFFF) + +/** @brief Fully expands both arguments, then concatenates them. */ +#define NV_ENUM_CONCATENATE(a, b) NV_ENUM_CONCATENATE2(a, b) +#define NV_ENUM_CONCATENATE2(a, b) _NV_ENUM_CONCATENATE(a, b) +#define _NV_ENUM_CONCATENATE(a, b) a##b + +/** @brief Fully expands the given argument, then stringifies it. */ +#define NV_ENUM_STRINGIFY(s) _NV_ENUM_STRINGIFY(s) +#define _NV_ENUM_STRINGIFY(s) #s + +/** @brief Expands the given argument. */ +#define NV_EXPAND_ONE(x) x + +/** @brief Discards the given argument, expands to nothing. */ +#define NV_DISCARD_ONE(x) + + +/** + * @brief Fully expands both arguments, then attempts to invoke parameter `a` as + * a macro with parameter `b` as its argument. + * + * @param a : Macro Macro to apply + * @param b : Argument List Arguments to pass to macro `a`, surrounded by parentehses + */ +#define NV_ENUM_APPLY(a, b) _NV_ENUM_APPLY(a, b) +#define _NV_ENUM_APPLY(a, b) a b + +/** @brief expands to the Nth argument */ +#define NV_ENUM_A1(a, b, c, d, e, f) a +#define NV_ENUM_A2(a, b, c, d, e, f) b +#define NV_ENUM_A3(a, b, c, d, e, f) c +#define NV_ENUM_A4(a, b, c, d, e, f) d +#define NV_ENUM_A5(a, b, c, d, e, f) e +#define NV_ENUM_A6(a, b, c, d, e, f) f + +/** + * @brief Expands to an argument list containing 6 elements with argument `b` + * moved to the last place. + */ +#define NV_ENUM_DL_POP(a, b, c, d, e, f) (a, c, d, e, f, b) + +/** + * @brief Expands to argument list `l` with its first element replaced by + * parameter `r` + */ +#define NV_ENUM_NV_ENUM_REPLACE_1(r, l) (r, NV_ENUM_APPLY(NV_ENUM_A2, l), NV_ENUM_APPLY(NV_ENUM_A3, l), NV_ENUM_APPLY(NV_ENUM_A4, l), NV_ENUM_APPLY(NV_ENUM_A5, l), NV_ENUM_APPLY(NV_ENUM_A6, l)) + +/** + * @brief Expands to argument list `l` with its first element replaced by + * parameter `r1`, its fifth argument replaced by parameter `r5`, and its + * sixth argument replaced by parameter `r6` + */ +#define NV_ENUM_REPLACE_3(r1, r5, r6, l) (r1, NV_ENUM_APPLY(NV_ENUM_A2, l), NV_ENUM_APPLY(NV_ENUM_A3, l), NV_ENUM_APPLY(NV_ENUM_A4, l), r5, r6) + +/** + * @brief Expands to argument list `l` with its first element replaced by + * parameter `r1`, its second argument replaced by parameter `r2`, its + * fifth argument replaced by parameter `r5`, and its sixth argument + * replaced by parameter `r6` + */ +#define NV_ENUM_REPLACE_4(r1, r2, r5, r6, l) (r1, r2, NV_ENUM_APPLY(NV_ENUM_A3, l), NV_ENUM_APPLY(NV_ENUM_A4, l), r5, r6) + + +/*! + * @brief Convenience LISP-like wrappers for CAR and CDR + * + * @note For those unfamiliar with LISP, most LISP interpreters allow for + * convenient macros which expand to nested invocations of CAR and CDR, + * formed by specifying 'A' and 'D' in any order between 'C' and 'R'. A + * regular expression which identifies this pattern is: 'C(A|D)+R'. The + * order of operations is performed from right to left, e.g. CAAADR + * applies CDR, then CAR, then CAR, then CAR. These are used to unpack + * data at specific locations within nested lists, which this tool uses + * often. There is no such thing as a meta-macro in the c preprocessor, so + * we have defined the operations which we use frequently here. + * + * @note instead of LISP-style structured lists which are formatted as + * records containing two elements each (e.g. (car, (cdr, ()))), this tool + * uses preprocessor argument lists (e.g. (car, cdr, etc)) because the + * former require proper recursion to deal with, which this tool does not + * have available to it. + * + * @note Because some compilers do not support variadic macros, we cannot use + * the generic versions of CAR and CDR here, so we have replaced them + * with very specific size-restricted versions. + */ +#define NV_CAAR(l) NV_ENUM_APPLY(NV_ENUM_A1, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADAR(l) NV_ENUM_APPLY(NV_ENUM_A2, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDAR(l) NV_ENUM_APPLY(NV_ENUM_A3, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDDAR(l) NV_ENUM_APPLY(NV_ENUM_A4, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDDDAR(l) NV_ENUM_APPLY(NV_ENUM_A5, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CADDDDDAR(l) NV_ENUM_APPLY(NV_ENUM_A6, NV_ENUM_APPLY(NV_ENUM_A1, l)) +#define NV_CAADR(l) NV_ENUM_APPLY(NV_ENUM_A1, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADADR(l) NV_ENUM_APPLY(NV_ENUM_A2, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADDADR(l) NV_ENUM_APPLY(NV_ENUM_A3, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADDDADR(l) NV_ENUM_APPLY(NV_ENUM_A4, NV_ENUM_APPLY(NV_ENUM_A2, l)) +#define NV_CADDDDADR(l) NV_ENUM_APPLY(NV_ENUM_A5, NV_ENUM_APPLY(NV_ENUM_A2, l)) + +/*! + * @brief Performs per-entry generation function, and either expands or extinguishes it + * + * @param dat__ Data List - Generation data table + * @param name Identifier - Name of enum entry + * @param value Integer Literal - Value for this entry + * + * @return the result of the generation function for this table, + * or nothing if this layer is being filtered (i.e. nested enum) + */ +#define NV_ENUM_ENTRY(dat__, name, value) \ + NV_ENUM_DAT_ENTRY(dat__) (NV_ENUM_DAT_GEN2(dat__) (dat__, name, value)) + +/*! + * @brief Expands enum entries within nested enum specification using an updated + * data list specification + * + * @note the Current Enum Name is concatenated with parameter `name` + * the function table has its first entry popped + * other variables are unchanged. + * + * @param dat__ Data List - Generation data table + * @param name Token - String to append to previous enum name + * @param res_lo Integer Literal - Min value of this nested enum + * @param res_hi Integer Literal - Max value of this nested enum + * @param entries Macro - Nested Enum Specification + */ +#define NV_ENUM_NEST_EXPAND0(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) +#define NV_ENUM_NEST_EXPAND1(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) +#define NV_ENUM_NEST_EXPAND2(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) +#define NV_ENUM_NEST_EXPAND3(dat__, name, res_lo, res_hi, entries) \ + entries(NV_ENUM_APPLY(NV_ENUM_DL_POP, NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_NV_ENUM_REPLACE_1(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), NV_ENUM_APPLY(NV_ENUM_A1, dat__)), dat__))) + +/*! + * @brief Performs all enum generation for the given nested enum specification + * + * @note the Current Enum Name is concatenated with parameter `name` + * the Nested Enum Name is updated to match the Current Enum Name + * Res. Min is updated with parameter `res_lo` + * Res. Max is updated with parameter `res_hi` + * the function table has its first entry popped + * other variables are unchanged + * + * @param dat__ Data List - Generation data table + * @param name Token - String to append to previous enum name + * @param res_lo Integer Literal - Min value of this nested enum + * @param res_hi Integer Literal - Max value of this nested enum + * @param entries Macro - Nested Enum Specification + */ +#define NV_ENUM_NEST_GEN(dat__, name, res_lo, res_hi, entries) \ + NV_ENUM_DAT_GEN1(dat__)( \ + NV_ENUM_APPLY( \ + NV_ENUM_DL_POP, \ + NV_ENUM_NV_ENUM_REPLACE_1( \ + NV_ENUM_REPLACE_4( \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), name), \ + res_lo, \ + res_hi, \ + NV_ENUM_APPLY( \ + NV_ENUM_A1, \ + dat__ \ + ) \ + ), \ + dat__ \ + ) \ + ), \ + entries \ + ) + +/*! + * @note Definition: Data List + * This tool packs information used in each depth of processing within a + * nested list, which is passed to each invocation of NV_ENUM_ENTRY and + * NV_ENUM_NEST. The format of this object is as follows: + * ( + * ( , <0-Depth Nested Enum Name>, , , , ) + * ( , , , ) + * ( , , , ) + * ( , , , ) + * ... + * ) + * + * Root Enum Name: Name of enum passed to NV_ENUM DEF (unaffected by NV_ENUM_NEST) + * 0-Depth Nested Enum Name: Name of the enum at depth 0 (affected by NV_ENUM_NEST) + * Prefix: Text prepended to each entry name (no spaces) + * Current Enum Name: Name of the enum at current depth + * Reserved Min: The minimum allowable enum value at this depth + * Reserved Max: The maximum allowable enum value at this depth + * Entry Fn: macro called once per entry with the entry as an argument + * Nest Fn: Duplicate definition of NV_ENUM_NEST_EXPAND to WAR recursion limits + * Per-Enum Gen Fn: Function to call once per NV_ENUM_DEF or NV_ENUM_NEST + * Per-Entry Gen Fn: Function to call once per NV_ENUM_ENTRY + * + */ + +// +// Data list accessor macros +// + +/*! @brief Given data list, returns Current Enum Name */ +#define NV_ENUM_DAT_CURR_NAME(dat__) NV_CAAR(dat__) +/*! @brief Given data list, returns 0-depth nested enum name */ +#define NV_ENUM_DAT_NEST_NAME(dat__) NV_CADAR(dat__) +/*! @brief Given data list, returns Prefix */ +#define NV_ENUM_DAT_PREFIX(dat__) NV_CADDAR(dat__) +/*! @brief Given data List, returns Root Enum Name */ +#define NV_ENUM_DAT_ROOT_NAME(dat__) NV_CADDDAR(dat__) +/*! @brief Given data list, returns Res. Min at current depth */ +#define NV_ENUM_DAT_MIN(dat__) NV_CADDDDAR(dat__) +/*! @brief Given data list, returns Res. Max at current depth */ +#define NV_ENUM_DAT_MAX(dat__) NV_CADDDDDAR(dat__) +/*! @brief Given data list, returns Entry Fn at current depth */ +#define NV_ENUM_DAT_ENTRY(dat__) NV_CAADR(dat__) +/*! @brief Given data list, returns Nest Fn at current depth */ +#define NV_ENUM_NEST(dat__) NV_CADADR(dat__) +/*! @brief Given data list, returns Per-Enum Gen Fn at current depth */ +#define NV_ENUM_DAT_GEN1(dat__) NV_CADDADR(dat__) +/*! @brief Given data list, returns Per-Entry Gen Fn at current depth */ +#define NV_ENUM_DAT_GEN2(dat__) NV_CADDDADR(dat__) + +/*! + * @brief constructs a data list to be used for generation of the root enum + */ +#define NV_ENUM_DEPTH_0(name, prefix, gen1_fn, gen2_fn) \ + ( (name, name, prefix, name, NV_ENUM_MIN, NV_ENUM_MAX) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND0, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND1, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND2, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND3, gen1_fn, gen2_fn, unused, unused) \ + , (unused, unused, unused, unused, unused, unused) \ + ) + +/*! + * @brief constructs a data list to be used for generation of enums at depth 1 + */ +#define NV_ENUM_DEPTH_1(name, prefix, gen1_fn, gen2_fn) \ + ( (name, name, prefix, name, NV_ENUM_MIN, NV_ENUM_MAX) \ + , (NV_DISCARD_ONE, NV_ENUM_NEST_GEN, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND0, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND1, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND2, gen1_fn, gen2_fn, unused, unused) \ + , (unused, unused, unused, unused, unused, unused) \ + ) + +/*! + * @brief constructs a data list to be used for generation of enums at depth 2 + */ +#define NV_ENUM_DEPTH_2(name, prefix, gen1_fn, gen2_fn) \ + ( (name, name, prefix, name, NV_ENUM_MIN, NV_ENUM_MAX) \ + , (NV_DISCARD_ONE, NV_ENUM_NEST_EXPAND0, gen1_fn, gen2_fn, unused, unused) \ + , (NV_DISCARD_ONE, NV_ENUM_NEST_GEN, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND1, gen1_fn, gen2_fn, unused, unused) \ + , (NV_EXPAND_ONE, NV_ENUM_NEST_EXPAND2, gen1_fn, gen2_fn, unused, unused) \ + , (unused, unused, unused, unused, unused, unused) \ + ) + +/// +/// Generator Functions +/// + + +/*! @brief Generates an enum type given the enum specification in entries */ +#define NV_ENUM_GEN_MAIN(dat__, entries) \ + enum NV_ENUM_DAT_CURR_NAME(dat__) { entries(dat__) }; + +/*! @brief Generates a single enum entry with the given name and value */ +#define NV_ENUM_GEN_MAIN_FN(dat__, entry_name, value) \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name) = value, + + +/*! @brief Generates an enum typedef for the given enum. All nested types receive the same typedef (i.e. the root enum */ +#define NV_ENUM_GEN_TYPEDEF(dat__, entries) \ + typedef enum NV_ENUM_DAT_ROOT_NAME(dat__) NV_ENUM_DAT_CURR_NAME(dat__); + +/*! @brief Does nothing. There is no per-entry generation for typedefs. */ +#define NV_ENUM_GEN_TYPEDEF_FN(dat__, entry_name, value) + + +/*! @brief Generates an enum with an added entry at the end to provide the enum size*/ +#define NV_ENUM_GEN_SIZE(dat__, entries) \ + enum { entries(dat__) NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE) }; + +/*! @brief Generates a single enum entry with __UNUSED appended. These values are not meant to be used. */ +#define NV_ENUM_GEN_SIZE_FN(dat__, entry_name, value) \ + NV_ENUM_CONCATENATE(NV_ENUM_CONCATENATE(NV_ENUM_DAT_NEST_NAME(dat__), entry_name), __UNUSED) = value, + + +/*! @brief Generates a conversion function from an enum value to string representation. */ +#define NV_ENUM_GEN_STRING(dat__, entries) \ + static inline const char * \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _TO_STRING) \ + ( \ + enum NV_ENUM_DAT_ROOT_NAME(dat__) in \ + ) \ + { \ + switch (in) \ + { \ + entries(dat__) \ + default: \ + break; \ + } \ + return NV_ENUM_STRINGIFY(NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __UNKNOWN)); \ + } + +/*! @brief Generates a case for the given enum entry, and its string representation. */ +#define NV_ENUM_GEN_STRING_FN(dat__, entry_name, value) \ + case NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name): \ + return NV_ENUM_STRINGIFY(NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name)); + + +/*! @brief Generates a conversion function from NvU32 to enum value. */ +#define NV_ENUM_GEN_FROM(dat__, entries) \ + static inline NV_STATUS \ + NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _FROM32) \ + ( \ + NvU32 in, \ + enum NV_ENUM_DAT_ROOT_NAME(dat__) *out \ + ) \ + { \ + switch (in) \ + { \ + entries(dat__) \ + if (out != NULL) \ + *out = ((enum NV_ENUM_DAT_ROOT_NAME(dat__))in); \ + return NV_OK; \ + default: \ + break; \ + } \ + return NV_ERR_OUT_OF_RANGE; \ + } + +/*! @brief Generates a case for the given enum entry. */ +#define NV_ENUM_GEN_FROM_FN(dat__, entry_name, value) \ + case NV_ENUM_CONCATENATE(NV_ENUM_DAT_PREFIX(dat__), entry_name): + + +/*! @brief Generates a struct constant containing the smallest value contained within the enum (plus one). */ +#define NV_ENUM_GEN_LO(dat__, entries) \ + typedef struct { char lo[(1 * entries(dat__) 0 + 1)]; } NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __LO) ; + +/*! @brief Builds a portion of the expression calculating the smallest enum value. */ +#define NV_ENUM_GEN_LO_FN(dat__, entry_name, value) \ + (value)) + (0 * + + +/*! @brief Generates a struct constant containing the number of values contained within the enum. */ +#define NV_ENUM_GEN_COUNT(dat__, entries) \ + typedef struct { char count[(0 + entries(dat__) 0)]; } NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __COUNT) ; + +/*! @brief Builds a portion of the expression calculating the number of enum values. */ +#define NV_ENUM_GEN_COUNT_FN(dat__, entry_name, value) \ + 1 + + + +/*! @brief Generates a group of struct constants containing the above generated values. */ +#define NV_ENUM_GEN_META(dat__, entries) \ + typedef struct { char lo[sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __LO) *) NULL)->lo)]; \ + char hi[NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE)];\ + char count[sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __COUNT) *) NULL)->count)]; \ + char size[NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE)]; \ + char bContiguous[(sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __COUNT) *) NULL)->count) == (NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __SIZE) - sizeof(((NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __LO) *) NULL)->lo) + 1)) + 1]; \ + } NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), __META) ; + +/*! @brief unused. Nothing needs to be generated per-entry for this generator. */ +#define NV_ENUM_GEN_META_FN(dat__, entry_name, value) + +/*! @brief Generates a compile-time assertion. */ +#define NV_ENUM_GEN_ASSERT_MONOTONIC(dat__, entries) \ + typedef char NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _assert_monotonic)[ (2 * ((0 > entries(dat__) ((NvU32)-1)) == 0)) - 1 ]; + +/*! + * @brief Builds a portion of the expression asserting that all enum values + * must be declared in strictly monotonically increasing order. + */ +#define NV_ENUM_GEN_ASSERT_MONOTONIC_FN(dat__, entry_name, value) \ + value) + (value > + + +/*! @brief Generates a compile-time assertion. */ +#define NV_ENUM_GEN_ASSERT_IN_RANGE(dat__, entries) \ + typedef char NV_ENUM_CONCATENATE(NV_ENUM_DAT_CURR_NAME(dat__), _assert_in_range)[ (2 * ((1 * entries(dat__) 1) == 1)) - 1 ]; + +/*! + * @brief Builds a portion of the expression asserting that all nested enum + * values must be within the reserved range of their parent enum. + */ +#define NV_ENUM_GEN_ASSERT_IN_RANGE_FN(dat__, entry_name, value) \ + (((NvS64)value) >= NV_ENUM_DAT_MIN(dat__)) * (((NvS64)value) <= NV_ENUM_DAT_MAX(dat__)) * + + +/// +/// End of Generator Functions +/// + + +/*! + * @brief Performs code generation for the given generator function pair + * + * @note This function must be updated if supporting deeper nesting in the future + * + * @param fn1 Macro - Per-Enum Gen Fn + * @param fn2 Macro - Per-Entry Gen Fn + * @param enum_name Token - Root Enum Name + * @param prefix Token - Prefix + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_GENERATOR(fn1, fn2, enum_name, prefix, entries) \ + fn1(NV_ENUM_DEPTH_0(enum_name, prefix, fn1, fn2), entries) \ + entries(NV_ENUM_DEPTH_1(enum_name, prefix, fn1, fn2)) \ + entries(NV_ENUM_DEPTH_2(enum_name, prefix, fn1, fn2)) \ + +// +// Windows preprocessor crashes with "ran out of heap space" errors if the +// preproccessed output from a single macro gets too large, so skip the +// verification sanity asserts when running on windows to increase the size of +// representable enums +// +#if NVOS_IS_WINDOWS + +/*! + * @brief Generates an enum and associated metadata with the given enum name and prefix + * + * @param prefix Token - Prefix + * @param enum_name Token - Root Enum Name + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_DEF_PREFIX(prefix, enum_name, entries) \ + NV_ENUM_GEN_MAIN(NV_ENUM_DEPTH_0(enum_name, prefix, NV_ENUM_GEN_MAIN, NV_ENUM_GEN_MAIN_FN), entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_TYPEDEF, NV_ENUM_GEN_TYPEDEF_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_SIZE, NV_ENUM_GEN_SIZE_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_STRING, NV_ENUM_GEN_STRING_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_FROM, NV_ENUM_GEN_FROM_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_LO, NV_ENUM_GEN_LO_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_COUNT, NV_ENUM_GEN_COUNT_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_META, NV_ENUM_GEN_META_FN, enum_name, prefix, entries) \ + +#else + +/*! + * @brief Generates an enum and associated metadata with the given enum name and prefix + * + * @param prefix Token - Prefix + * @param enum_name Token - Root Enum Name + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_DEF_PREFIX(prefix, enum_name, entries) \ + NV_ENUM_GEN_MAIN(NV_ENUM_DEPTH_0(enum_name, prefix, NV_ENUM_GEN_MAIN, NV_ENUM_GEN_MAIN_FN), entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_TYPEDEF, NV_ENUM_GEN_TYPEDEF_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_SIZE, NV_ENUM_GEN_SIZE_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_STRING, NV_ENUM_GEN_STRING_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_FROM, NV_ENUM_GEN_FROM_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_LO, NV_ENUM_GEN_LO_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_COUNT, NV_ENUM_GEN_COUNT_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_META, NV_ENUM_GEN_META_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_ASSERT_MONOTONIC, NV_ENUM_GEN_ASSERT_MONOTONIC_FN, enum_name, prefix, entries) \ + NV_ENUM_GENERATOR(NV_ENUM_GEN_ASSERT_IN_RANGE, NV_ENUM_GEN_ASSERT_IN_RANGE_FN, enum_name, prefix, entries) + + + +#endif // NVOS_IS_WINDOWS + +#define NV_ENUM_NOTHING + +/*! + * @brief Generates an enum and associated metadata with the given enum name + * + * @param prefix Token - Prefix + * @param enum_name Token - Root Enum Name + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_DEF(enum_name, entries) \ + NV_ENUM_DEF_PREFIX(NV_ENUM_NOTHING, enum_name, entries) + +/*! + * @brief Generates an exported enum with the given prefix + * + * @param prefix Token - Prefix + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_EXPORT_PREFIX(prefix, entries) \ + NV_ENUM_GEN_MAIN(NV_ENUM_DEPTH_0(NV_ENUM_NOTHING, prefix, NV_ENUM_GEN_MAIN, NV_ENUM_GEN_MAIN_FN), entries) + +/*! + * @brief Generates an exported enum + * + * @param entries Macro - Enum Specification + */ +#define NV_ENUM_EXPORT(entries) \ + NV_ENUM_EXPORT_PREFIX( , entries) + + +/// +/// Runtime Utility Functions +/// + +/*! + * @brief Converrts an unsigned integer into an enum value, or returns error. + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer value belonging to given enum + * @param[out] pResult pointer - Optional pointer to enum, updated with value on success + * + * @return NV_OK if the value belongs to the enum + * NV_ERR_OUT_OF_RANGE otherwise + */ +#define NV_ENUM_FROM32(type, value, pResult) \ + (NV_ENUM_CONCATENATE(type, _FROM32)((value), (pResult))) + +/*! + * @brief Returns a string representation of the name of the given enum value + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer value belonging to given enum + * + * @return a string representing the given value + */ +#define NV_ENUM_TO_STRING(type, value) \ + (NV_ENUM_CONCATENATE(type, _TO_STRING)(value)) + +/*! + * @brief Queries whether the given value belongs to the given enum + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer to check + * + * @return NV_TRUE if the given value is valid + * NV_FALSE otherwise + */ +#define NV_ENUM_IS(type, value) \ + (NV_OK == NV_ENUM_FROM32(type, (value), NULL)) + +/*! + * @brief Queries the value of the smallest enum entry + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_LO(type) \ + ((type)(sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->lo) - 1)) + +/*! + * @brief Queries the value of the largest enum entry + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_HI(type) \ + ((type)(sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->hi) - 1)) + +/*! + * @brief Queries the number of values between the first and last enum entries + * @note This value is large enough to use in an array declaration with enum + * entries used as indices into the array. + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_SIZE(type) \ + (sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->size)) + +/*! + * @brief Queries the number of values defined by the enum + * + * @param[in] type identifier - Enum type name + */ +#define NV_ENUM_COUNT(type) \ + (sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->count)) + +/*! + * @brief Queries whether or not the enum is defined contiguously (i.e. no holes) + * + * @param[in] type identifier - Enum type name + * + * @return NV_TRUE if each value between the lo and hi enum values are valid enum values + */ +#define NV_ENUM_IS_CONTIGUOUS(type) \ + ((NvBool)(sizeof(((NV_ENUM_CONCATENATE(type, __META) *) NULL)->bContiguous) - 1)) + +/*! + * @brief Macros providing iteration over each value defined by the enum type + * @note Iteration is faster over contiguous enums + * + * @param[in] type identifier - Enum type name + * @param[in] value lvalue - iterator holding current enum value + */ +#define FOR_EACH_IN_ENUM(type, value) \ +{ \ + NvU32 localValue; \ + for (localValue = value = NV_ENUM_LO(type); localValue <= NV_ENUM_HI(type); (value) = (type) (++localValue)) \ + { \ + if (!NV_ENUM_IS_CONTIGUOUS(type) && !NV_ENUM_IS(type, localValue)) \ + continue; + +#define FOR_EACH_IN_ENUM_END \ + } \ +} + +/*! + * @brief Given the Nth enum value defined by the enum type, returns N + * @note Only supports contiguous enums + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer value belonging to enum type + * + * @return the index at which the enum value was defined within the enum, or -1 + */ +#define NV_ENUM_TO_IDX(type, value) \ + ((NV_ENUM_IS_CONTIGUOUS(type) && NV_ENUM_IS(type, value)) ? ((value) - NV_ENUM_LO(type)) : ((NvU32)-1)) + +/*! + * @brief Returns the Nth enum value defined by the given type + * @note Only supports contiguous enums + * + * @param[in] type identifier - Enum type name + * @param[in] value rvalue - integer specifying entry index + * + * @return The Nth enum value defined within the enum, or NV_ENUM_SIZE(type) if non-existent + */ +#define NV_ENUM_FROM_IDX(type, idx) \ + ((type)((NV_ENUM_IS_CONTIGUOUS(type) && idx < NV_ENUM_COUNT(type)) ? (NV_ENUM_LO(type) + (idx)) : NV_ENUM_SIZE(type))) + +/// +/// End of Runtime Utility Functions +/// + +///@} +/// NV_UTILS_ENUM + +#endif // NV_ENUM_H_ diff --git a/src/nvidia/inc/libraries/utils/nvassert.h b/src/nvidia/inc/libraries/utils/nvassert.h new file mode 100644 index 000000000..b575373a6 --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvassert.h @@ -0,0 +1,970 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief Utility assertion macros + * + * @see "NV_ASSERT" confluence page for additional documentation + */ + +#ifndef _NV_UTILS_ASSERT_H_ +#define _NV_UTILS_ASSERT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup NV_UTILS_ASSERT Utility Assertion Macros + * + * @brief Provides a light abstraction layer for common assert macro patterns. + * + * NvPort and NvPrintf are used for debug and logging primitives. + * If an environment cannot use these directly then it can override + * the NV_PORT_HEADER and NV_PRINTF_HEADER defines in its makefile + * to point to appropriate replacements. + * @{ + */ +#include "nvstatus.h" +#include "nvmacro.h" + +// Include portability header, falling back to NvPort if not provided. +#ifndef NV_PORT_HEADER +#define NV_PORT_HEADER "nvport/nvport.h" +#endif +#include NV_PORT_HEADER + +// Include printf header, falling back to NvPrintf if not provided. +#ifndef NV_PRINTF_HEADER +#define NV_PRINTF_HEADER "utils/nvprintf.h" +#endif +#include NV_PRINTF_HEADER + +/* + * Use __builtin_expect to improve branch predictions on the GNU compiler. + * + * Note that these macros convert the parameter to bool. They should + * only be used in 'if' statements. + * + * '!= 0' is used (instead of a cast to NvBool or !!) to avoid 'will always + * evaluate as 'true'' warnings in some gcc versions. + */ +#if defined(__GNUC__) && __GNUC__ >= 3 +#define NV_LIKELY(expr) __builtin_expect(((expr) != 0), 1) +#define NV_UNLIKELY(expr) __builtin_expect(((expr) != 0), 0) +#else +#define NV_LIKELY(expr) ((expr) != 0) +#define NV_UNLIKELY(expr) ((expr) != 0) +#endif + +/* + * Set this to pass expression, function name, file name, and line number + * to the nvAssertFailed functions. + * + * NOTE: NV_PRINTF_STRINGS_ALLOWED defaults to: + * defined(DEBUG) || defined(NV_MODS) || defined(QA_BUILD) + * + * RM_ASSERT used this condition to decide whether to print assert strings: + * defined(DEBUG) || defined(ASSERT_BUILD) || defined(QA_BUILD) + */ +#if !defined(NV_ASSERT_FAILED_USES_STRINGS) +#if (NV_PRINTF_STRINGS_ALLOWED && (defined(DEBUG) || defined(ASSERT_BUILD) || defined(QA_BUILD))) +#define NV_ASSERT_FAILED_USES_STRINGS 1 +#else +#define NV_ASSERT_FAILED_USES_STRINGS 0 +#endif +#endif + +// Hook NV_ASSERT into RCDB. +#if !defined(NV_JOURNAL_ASSERT_ENABLE) +#if defined(NVRM) && (NVOS_IS_WINDOWS || NVOS_IS_UNIX || NVCPU_IS_RISCV64) && !defined(NVWATCH) && !defined(NV_MODS) +#define NV_JOURNAL_ASSERT_ENABLE 1 +#else +#define NV_JOURNAL_ASSERT_ENABLE 0 +#endif +#endif + +#if !defined(COVERITY_ASSERT_FAIL) +#if defined(__COVERITY__) +void __coverity_panic__(void); +#define COVERITY_ASSERT_FAIL() __coverity_panic__() +#else // defined(__COVERITY__) +#define COVERITY_ASSERT_FAIL() ((void) 0) +#endif // defined(__COVERITY__) +#endif // !defined(COVERITY_ASSERT_FAIL) + +const char *nvAssertStatusToString(NV_STATUS nvStatusIn); + +/* + * NV_ASSERT_FAILED, NV_ASSERT_OK_FAILED, NV_CHECK_FAILED, and NV_CHECK_OK_FAILED + * These macros are defined in three flavors: + * + * normal - expr/file/line are concatenated with format string for NVLOG. + * expr/file/line are passed in as parameters to a helper function + * for NV_PRINTF. + * + * normal for GSP-RM - expr/file/line are omitted, since each NV_PRINTF line + * already has them. NVLOG is not used. + * + * _FUNC - expr/file/line are passed in as parameters to a helper function + * for both NVLOG and NV_PRINTF. + * The _FUNC macros are used for pre-compiled headers on most platforms. + */ +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +void nvAssertInit(void); +void nvAssertDestroy(void); + +#if NV_JOURNAL_ASSERT_ENABLE +void nvAssertFailed(void); +void nvAssertOkFailed(NvU32 status); +#else +#define nvAssertFailed(...) +#define nvAssertOkFailed(...) +#endif + +#define NV_ASSERT_FAILED(exprStr) \ + do { \ + NV_PRINTF(LEVEL_ERROR, "Assertion failed: " exprStr "\n"); \ + nvAssertFailed(); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT(); \ + } while(0) + +#define NV_ASSERT_OK_FAILED(exprStr, status) \ + do { \ + NV_PRINTF(LEVEL_ERROR, "Assertion failed: %s (0x%08X) returned from " \ + exprStr "\n", nvAssertStatusToString(status), status); \ + nvAssertOkFailed(status); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT(); \ + } while(0) + +#define NV_CHECK_FAILED(level, exprStr) \ + NV_PRINTF(level, "Check failed: " exprStr "\n") + +#define NV_CHECK_OK_FAILED(level, exprStr, status) \ + NV_PRINTF(level, "Check failed: %s (0x%08X) returned from " exprStr "\n", \ + nvAssertStatusToString(status), status) + +#else // defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +#if NV_ASSERT_FAILED_USES_STRINGS +#define NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr) , exprStr, __FILE__, __LINE__ +#define NV_ASSERT_FAILED_FUNC_PARAM(exprStr) exprStr, __FILE__, __LINE__ +#define NV_ASSERT_FAILED_FUNC_COMMA_TYPE ,const char *pszExpr, const char *pszFileName, NvU32 lineNum +#define NV_ASSERT_FAILED_FUNC_TYPE const char *pszExpr, const char *pszFileName, NvU32 lineNum +#else +#define NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr) , __LINE__ +#define NV_ASSERT_FAILED_FUNC_PARAM(exprStr) __LINE__ +#define NV_ASSERT_FAILED_FUNC_COMMA_TYPE , NvU32 lineNum +#define NV_ASSERT_FAILED_FUNC_TYPE NvU32 lineNum +#endif + +void nvAssertInit(void); +void nvAssertDestroy(void); + +// Helper function prototypes for _FAILED macros below. +#if NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE +void nvAssertFailed(NV_ASSERT_FAILED_FUNC_TYPE); +void nvAssertOkFailed(NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckFailed(NvU32 level NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckOkFailed(NvU32 level, NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvAssertFailedNoLog(NV_ASSERT_FAILED_FUNC_TYPE); +void nvAssertOkFailedNoLog(NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckFailedNoLog(NvU32 level NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +void nvCheckOkFailedNoLog(NvU32 level, NvU32 status NV_ASSERT_FAILED_FUNC_COMMA_TYPE); +#else // NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE +#define nvAssertFailed(...) +#define nvAssertOkFailed(...) +#define nvCheckFailed(...) +#define nvCheckOkFailed(...) +#define nvAssertFailedNoLog(...) +#define nvAssertOkFailedNoLog(...) +#define nvCheckFailedNoLog(...) +#define nvCheckOkFailedNoLog(...) +#endif // NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE + +#define NV_ASSERT_LOG(level, fmt, ...) \ + NVLOG_PRINTF(NV_PRINTF_MODULE, NVLOG_ROUTE_RM, level, \ + NV_PRINTF_ADD_PREFIX(fmt), ##__VA_ARGS__) + +#define NV_ASSERT_FAILED(exprStr) \ + do { \ + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed: " exprStr); \ + nvAssertFailedNoLog(NV_ASSERT_FAILED_FUNC_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_ASSERT_OK_FAILED(exprStr, status) \ + do { \ + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed: 0x%08X returned from " \ + exprStr, status); \ + nvAssertOkFailedNoLog(status \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_CHECK_FAILED(level, exprStr) \ + do { \ + NV_ASSERT_LOG(level, "Check failed: " exprStr); \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckFailedNoLog(level \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + } \ + } while(0) + +#define NV_CHECK_OK_FAILED(level, exprStr, status) \ + do { \ + NV_ASSERT_LOG(level, "Check failed: 0x%08X returned from " \ + exprStr, status); \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckOkFailedNoLog(level, status \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + } \ + } while(0) + +#define NV_ASSERT_FAILED_FUNC(exprStr) \ + do { \ + nvAssertFailed(NV_ASSERT_FAILED_FUNC_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_ASSERT_OK_FAILED_FUNC(exprStr, status) \ + do { \ + nvAssertOkFail(status NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)); \ + COVERITY_ASSERT_FAIL(); \ + PORT_BREAKPOINT_CHECKED(); \ + } while(0) + +#define NV_CHECK_FAILED_FUNC(level, exprStr) \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckFailed(level NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)) \ + } + +#define NV_CHECK_OK_FAILED_FUNC(level, exprStr, status) \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + nvCheckOkFailed(level, status \ + NV_ASSERT_FAILED_FUNC_COMMA_PARAM(exprStr)) \ + } + +#endif // defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +/* + * Defines for precompiled headers. + * + * On platforms other than GSP-RM, the _INLINE macros cannot be used inside + * precompiled headers due to conflicting NVLOG_PRINT_IDs. + */ +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) +#define NV_ASSERT_FAILED_PRECOMP NV_ASSERT_FAILED +#else +#define NV_ASSERT_FAILED_PRECOMP NV_ASSERT_FAILED_FUNC +#endif + +// ***************************************************************************** +// * NV_ASSERT family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Assert that an expression is true. If not, do the actions defined + * in NV_ASSERT_FAILED as well as an "other action": + * Print an error message in the debug output + * Log an error message in NvLog + * Mark as an error condition for coverity + * Breakpoint + * Log an assert record to the journal + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if assertion failures are not logged + * in the environment. Use @ref NV_ASSERT_CHECKED if the expression should only + * be evaluated in checked builds. + * + * USE GENEROUSLY FOR any condition that requires immediate action from NVIDIA. + * Expect to be ARBed on bugs when an assert you added shows up internally + * or in the field. + * + * DO NOT USE for normal run-time conditions, such as a user application + * passing a bad parameter. + */ + +/** + * Assert that an expression is true. + * + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_ASSERT(expr) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, /* no other action */) + +/** + * Assert that an expression is true only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted. + * + * @param[in] expr Expression that evaluates to a truth value. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_ASSERT_CHECKED(expr) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, /* no other action */) +#define NV_ASSERT_CHECKED_PRECOMP(expr) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, /* no other action */) +#else +#define NV_ASSERT_CHECKED(expr) ((void)0) +#define NV_ASSERT_CHECKED_PRECOMP(expr) ((void)0) +#endif + +/** + * Assert that an expression is true or else do something. + * + * This macro can't use NV_ASSERT_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OR_ELSE(expr, elseStmt) \ + if (1) \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_ASSERT_FAILED(#expr); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } else ((void) 0) + +/** + * Assert that an expression is true or else goto a label. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] label Label to jump to when the expression is false. + */ +#define NV_ASSERT_OR_GOTO(expr, label) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, goto label) + +/** + * Assert that an expression is true or else return a value. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] retval Value to return if the expression is false. + */ +#define NV_ASSERT_OR_RETURN(expr, retval) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, return (retval)) + +/** + * Assert that an expression is true or else return void. + * + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_ASSERT_OR_RETURN_VOID(expr) \ + NV_ASSERT_OR_ELSE_STR(expr, #expr, return) + +/** + * Assert that an expression is true or else do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_ASSERT_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OR_ELSE_STR(expr, exprStr, elseStmt) \ + do \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_ASSERT_FAILED(exprStr); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while (0) + +// ***************************************************************************** +// * NV_ASSERT_OK family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Assert that an expression evaluates to NV_OK. If not, do the actions defined + * in NV_ASSERT_OK_FAILED as well as an "other action": + * Print an error message in the debug output, including decoded NV_STATUS. + * Log an error message in NvLog. + * Mark as an error condition for coverity. + * Breakpoint. + * Log an assert record to the journal. + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if assertion failures are not logged + * in the environment. Use @ref NV_ASSERT_OK_CHECKED if the expression should + * only be evaluated in checked builds. + * + * USE GENEROUSLY FOR any condition that requires immediate action from NVIDIA. + * Expect to be ARBed on bugs when an assert you added shows up internally + * or in the field. + * + * DO NOT USE for normal run-time conditions, such as a user application + * passing a bad parameter. + */ + +/** + * Assert that an expression evaluates to NV_OK. + * + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_ASSERT_OK(expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + /* no other action */); \ + } while(0) + +/** + * Assert that an expression evaluates to NV_OK only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted, + * and the status parameter is omitted. + * + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_ASSERT_OK_CHECKED(expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + return rm_pvt_status); \ + } while(0) +#else +#define NV_ASSERT_OK_CHECKED(expr) ((void)0) +#endif + +/*! + * Call a function that returns NV_STATUS and assert that the + * return values is NV_OK. In case this was a first failure + * update global status @ref status. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + if (status == NV_OK) status = rm_pvt_status); \ + } while (0) + +/** + * Assert that an expression evaluates to NV_OK or else do something. + * + * This macro can't use NV_ASSERT_OK_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OK_OR_ELSE(status, expr, elseStmt) \ + do \ + { \ + status = (expr); \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_ASSERT_OK_FAILED(#expr, status); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while(0) + +/** + * Assert that an expression evaluates to NV_OK or else goto a label. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] label Label to jump to when the expression is false. +*/ +#define NV_ASSERT_OK_OR_GOTO(status, expr, label) \ + NV_ASSERT_OK_OR_ELSE_STR(status, expr, #expr, goto label); + +/** + * Assert that an expression evaluates to NV_TRUE or else goto a label. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] cond Condition that evaluates to either NV_TRUE or NV_FALSE. + * @param[in] error Error to be reflected in @ref status when @cond evaluates + to NV_FALSE. + * @param[in] label Label to jump to when @ref cond evaluates to NV_FALSE. +*/ +#define NV_ASSERT_TRUE_OR_GOTO(status, cond, error, label) \ + NV_ASSERT_OK_OR_ELSE_STR(status, ((cond) ? NV_OK : (error)), \ + #cond, goto label); + +/** + * Assert that an expression evaluates to NV_OK or else return the status. + * + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_ASSERT_OK_OR_RETURN(expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_ASSERT_OK_OR_ELSE_STR(rm_pvt_status, expr, #expr, \ + return rm_pvt_status); \ + } while(0) + +/** + * Assert that an expression evaluates to NV_OK or else do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_ASSERT_OK_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_ASSERT_OK_OR_ELSE_STR(status, expr, exprStr, elseStmt) \ + do \ + { \ + status = (expr); \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_ASSERT_OK_FAILED(exprStr, status); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while(0) + +// ***************************************************************************** +// * NV_CHECK family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Check that an expression is true. If not, do the following actions: + * Print a message in the debug output at user specified level. + * Log a message in NvLog at user specified level. + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if check failures are not logged + * in the environment. Use @ref NV_CHECK_CHECKED if the expression should only + * be evaluated in checked builds. + * + * USE FOR error conditions that DO NOT require immediate action from NVIDIA, + * but may be useful in diagnosing other issues. + */ + +/** + * Check that an expression is true. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_CHECK(level, expr) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, /* no other action */) + +/** + * Check that an expression is true only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_CHECK_CHECKED(level, expr) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, /* no other action */) +#else +#define NV_CHECK_CHECKED(level, expr) ((void)0) +#endif + +/** + * Check that an expression is true or else do something. + * + * This macro can't use NV_CHECK_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_CHECK_OR_ELSE(level, expr, elseStmt) \ + do \ + { \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_CHECK_FAILED(level, #expr); \ + elseStmt; \ + } \ + } while (0) + +/** + * Check that an expression is true or else goto a label. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] label Label to jump to when the expression is false. + */ +#define NV_CHECK_OR_GOTO(level, expr, label) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, goto label) + +/** + * Check that an expression is true or else return a value. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] retval Value to return if the expression is false. + */ +#define NV_CHECK_OR_RETURN(level, expr, retval) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, return (retval)) + +/** + * Check that an expression is true or else return void. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + */ +#define NV_CHECK_OR_RETURN_VOID(level, expr) \ + NV_CHECK_OR_ELSE_STR(level, expr, #expr, return) + +/** + * Check that an expression is true or else do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_CHECK_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to a truth value. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression is false. + */ +#define NV_CHECK_OR_ELSE_STR(level, expr, exprStr, elseStmt) \ + do \ + { \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_CHECK_FAILED(level, exprStr); \ + elseStmt; \ + } \ + } while (0) + + +// ***************************************************************************** +// * NV_CHECK_OK family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Check that an expression evaluates to NV_OK. If not, do the following actions: + * Print a message in the debug output at user specified. + * Log a message in NvLog at user specified level. + * "Other action" as defined by each macro below. + * + * The actions are enabled or omitted based on platform and build, and the + * implementations are platform dependent. + * + * The expression is always evaluated even if assertion failures are not logged + * in the environment. Use @ref NV_ASSERT_OK_CHECKED if the expression should + * only be evaluated in checked builds. + * + * USE FOR error conditions that DO NOT require immediate action from NVIDIA, + * but may be useful in diagnosing other issues. + */ + +/** + * Check that an expression evaluates to NV_OK. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_CHECK_OK(status, level, expr) \ + do \ + { \ + NV_CHECK_OK_OR_ELSE_STR(status, level, expr, #expr, \ + /* no other action */); \ + } while(0) + +/** + * Check that an expression evaluates to NV_OK only in checked builds. + * + * @note The expression is only evaluated in checked builds so should + * not contain required side-effects. + * Also to prevent side effects, no "other action" is permitted, + * and the status parameter is omitted. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#if PORT_IS_CHECKED_BUILD +#define NV_CHECK_OK_CHECKED(level, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_CHECK_OK_OR_ELSE_STR(rm_pvt_status, level, expr, #expr, \ + /* no other action */); \ + } while(0) +#else +#define NV_CHECK_OK_CHECKED(level, expr) ((void)0) +#endif + +/*! + * Call a function that returns NV_STATUS and check that the return values is + * NV_OK. If an error is returned, record the error code. In case this was a + * first failure update global status @ref status. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, level, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_CHECK_OK_OR_ELSE_STR(rm_pvt_status, level, expr, #expr, \ + if (status == NV_OK) status = rm_pvt_status); \ + } while (0) + +/** + * Check that an expression evaluates to NV_OK or else do something. + * + * This macro can't use NV_CHECK_OK_OR_ELSE_STR when NV_PRINTF is passed in as + * the elseStmt parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] elseStmt Statement to evaluate if the expression returns error. + */ +#define NV_CHECK_OK_OR_ELSE(status, level, expr, elseStmt) \ + do \ + { \ + status = (expr); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_CHECK_OK_FAILED(level, #expr, status); \ + elseStmt; \ + } \ + } while (0) + +/** + * Check that an expression evaluates to NV_OK or else goto a label. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] label Label to jump to when the expression returns error. + */ +#define NV_CHECK_OK_OR_GOTO(status, level, expr, label) \ + NV_CHECK_OK_OR_ELSE_STR(status, level, expr, #expr, goto label) + +/** + * Check that an expression evaluates to NV_OK or return the status. + * + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + */ +#define NV_CHECK_OK_OR_RETURN(level, expr) \ + do \ + { \ + NV_STATUS rm_pvt_status; \ + NV_CHECK_OK_OR_ELSE_STR(rm_pvt_status, level, expr, #expr, \ + return rm_pvt_status); \ + } while(0) + + +/** + * Check that an expression evaluates to NV_OK or else record the error code and + * do something. + * + * Although it can be used directly, this second level macro is designed to be + * called from other macros. Passing expr through multiple levels of macros + * before it is stringified expands it. This is especially bad for DRF macros, + * which result in an embedded %, breaking the format string in the + * NV_CHECK_OK_FAILED_INLINE macro defined above. The macros in this header + * always pass the stringified expr as a into the second level macros as + * a separate parameter. + * + * @param[in] status The NV_STATUS variable to capture the status + * @param[in] level NV_PRINTF LEVEL to print at + * @param[in] expr Expression that evaluates to an NV_STATUS. + * @param[in] exprStr Expression as a string. + * @param[in] elseStmt Statement to evaluate if the expression returns error. + */ +#define NV_CHECK_OK_OR_ELSE_STR(status, level, expr, exprStr, elseStmt) \ + do \ + { \ + status = (expr); \ + if (NV_UNLIKELY(NV_OK != status)) \ + { \ + NV_CHECK_OK_FAILED(level, exprStr, status); \ + elseStmt; \ + } \ + } while (0) + + +// ***************************************************************************** +// * NV_ASSERT_PRECOMP family of macros * +// ***************************************************************************** +/** + * General notes: + * + * Exactly the same as the NV_ASSERT macros, but always safe to use in + * precompiled headers. + * + * On platforms other than GSP-RM, the _INLINE macros cannot be used inside + * precompiled headers due to conflicting NVLOG_PRINT_IDs. The PRECOMP macros + * work around this issue by calling helper functions for NvLog. + * + * Hoping for a better solution, only the macro variants that are currently + * used in precompiled headers are defined. + * + * See the NV_ASSERT documentation above for parameters and use cases. + */ +#define NV_ASSERT_PRECOMP(expr) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, /* no other action */) + +#define NV_ASSERT_OR_RETURN_PRECOMP(expr, retval) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, return (retval)) + +#define NV_ASSERT_OR_RETURN_VOID_PRECOMP(expr) \ + NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, #expr, return) + +#define NV_ASSERT_OR_ELSE_STR_PRECOMP(expr, exprStr, elseStmt) \ + do \ + { \ + PORT_COVERAGE_PUSH_OFF(); \ + if (NV_UNLIKELY(!(expr))) \ + { \ + NV_ASSERT_FAILED_PRECOMP(exprStr); \ + elseStmt; \ + } \ + PORT_COVERAGE_POP(); \ + } while (0) + +/** + * @def NV_CHECKED_ONLY + * @brief Compile a piece of code only in checked builds. + * + * This macro helps avoid #ifdefs to improve readability but should be + * used sparingly. + * + * Cases that make heavy use of this should likely define a wrapper + * macro or other abstraction for the build variation. + * For example NV_CHECKED_ONLY(NV_PRINTF(...)) is not a good use case. + * + * A typical use case is declaring and setting a canary value: + * ~~~{.c} + * typedef struct + * { + * NV_CHECKED_ONLY(NvU32 canary); + * ... + * } MY_STRUCT; + * + * void initMyStruct(MY_STRUCT *pStruct) + * { + * NV_CHECKED_ONLY(pStruct->canaray = 0xDEADBEEF); + * ... + * } + * + * void destroyMyStruct(MY_STRUCT *pStruct) + * { + * NV_ASSERT_CHECKED(pStruct->canaray == 0xDEADBEEF); + * ... + * } + * ~~~ + */ +#if PORT_IS_CHECKED_BUILD +#define NV_CHECKED_ONLY NV_EXPAND +#else +#define NV_CHECKED_ONLY NV_DISCARD +#endif + +// Verify prerequisites are defined. +#ifndef PORT_IS_CHECKED_BUILD +#error "NV_PORT_HEADER must define PORT_IS_CHECKED_BUILD" +#endif +#ifndef PORT_BREAKPOINT_CHECKED +#error "NV_PORT_HEADER must define PORT_BREAKPOINT_CHECKED" +#endif +#ifndef PORT_COVERAGE_PUSH_OFF +#error "NV_PORT_HEADER must define PORT_COVERAGE_PUSH_OFF" +#endif +#ifndef PORT_COVERAGE_POP +#error "NV_PORT_HEADER must define PORT_COVERAGE_POP" +#endif +#ifndef NV_PRINTF +#error "NV_PRINTF_HEADER must define NV_PRINTF" +#endif + + +#ifdef __cplusplus +} +#endif //__cplusplus +/// @} +#endif // _NV_UTILS_ASSERT_H_ diff --git a/src/nvidia/inc/libraries/utils/nvbitvector.h b/src/nvidia/inc/libraries/utils/nvbitvector.h new file mode 100644 index 000000000..fccd2880e --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvbitvector.h @@ -0,0 +1,476 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_UTILS_NV_BITVECTOR_H_ +#define _NV_UTILS_NV_BITVECTOR_H_ + +#include "nvport/nvport.h" +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "utils/nvassert.h" +#include "utils/nvrange.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// +// Note: This will need to be recalculated if the data size changes +// IDX(i) = (index & ~(MASK(num bits)) >> log2(num bits) +// +#define NV_BITVECTOR_IDX(index) (((index) & ~(0x3F)) >> 6) +#define NV_BITVECTOR_ARRAY_SIZE(last) (NV_BITVECTOR_IDX((last) - 1) + 1) +#define NV_BITVECTOR_BYTE_SIZE(last) (NV_BITVECTOR_ARRAY_SIZE((last)) * sizeof(NvU64)) +#define NV_BITVECTOR_OFFSET(index) ((index) & ((sizeof(NvU64) * 8) - 1)) + +/** + * \anchor NV_BITVECTOR_1 + * @defgroup NV_BITVECTOR NV_BITVECTOR + * + * @brief NV_BITVECTOR is a collection of individual consecutive bit flags + * packed within an array of 64-bit integers. Each derivative of the + * NV_BITVECTOR type may specify the number of queryable flags, and the + * array will be sized according to the minimum number of 64-bit integers + * required to hold the flags. + * + * @details NV_BITVECTOR is a general purpose data structure utility. + * It consists of a single (real) field, named \b qword. + * Flags within a NV_BITVECTOR are represented beginning with the LSB of + * index 0 of \b qword, and are packed fully within a single qword + * before expanding into a new qword. Derivatives of NV_BITVECTOR must + * provide a type name for the new type, and the first index outside of + * the range of the new type (this value must be greater than 0.) A + * bitvector with bits 63 and 64 raised is represented in memory in a + * little-endian system as follows: + * + * 63 NV_BITVECTOR_OFFSET(i) 0 + * .-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. + * 0 |1 | + * 1 | 1| + * `-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' + * + * Thus, in order to conceptually model an NV_BITVECTOR horizontally as + * a continual ordered list of bits, one would have to write the + * bitvector from highest index to lowest, and read from right to left. + * + * @note The unused bits within a derivative type of NV_BITVECTOR are reserved, + * and must not be depended upon to contain any consistent value. + * + * @{ + */ +typedef struct NV_BITVECTOR NV_BITVECTOR; +struct NV_BITVECTOR +{ + NvU64 qword; +}; + +#define TYPEDEF_BITVECTOR(bitvectTypeName) \ + union bitvectTypeName; \ + typedef union bitvectTypeName bitvectTypeName; \ + +#define IMPL_BITVECTOR(bitvectTypeName, last_val) \ + union bitvectTypeName \ + { \ + NV_BITVECTOR real; \ + NvU64 qword[NV_BITVECTOR_ARRAY_SIZE(last_val)]; \ + struct \ + { \ + char _[last_val]; \ + char asrt[1 - 2 * !(last_val > 0)]; \ + } *last; \ + } + +#define MAKE_BITVECTOR(bitvectTypeName, last_val) \ + TYPEDEF_BITVECTOR(bitvectTypeName) \ + IMPL_BITVECTOR(bitvectTypeName, last_val) + +#define MAKE_ANON_BITVECTOR(last_val) \ + IMPL_BITVECTOR( , last_val) + +#define bitVectorSizeOf(pBitVector) \ + bitVectorSizeOf_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorClrAll(pBitVector) \ + bitVectorClrAll_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorClr(pBitVector, idx) \ + bitVectorClr_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (idx)) + +#define bitVectorClrRange(pBitVector, range) \ + bitVectorClrRange_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (range)) + +#define bitVectorSetAll(pBitVector) \ + bitVectorSetAll_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorSet(pBitVector, idx) \ + bitVectorSet_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (idx)) + +#define bitVectorSetRange(pBitVector, range) \ + bitVectorSetRange_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), (range)) + +#define bitVectorFromArrayU16(pBitVector, pArr, sz) \ + bitVectorFromArrayU16_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + (pArr), \ + (sz)) + +#define bitVectorTestAllSet(pBitVector) \ + bitVectorTestAllSet_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorTestAllCleared(pBitVector) \ + bitVectorTestAllCleared_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorTestEqual(pBitVectorA, pBitVectorB) \ + bitVectorTestEqual_IMPL(&((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorTestIsSubset(pBitVectorA, pBitVectorB) \ + bitVectorTestIsSubset_IMPL(&((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorTest(pBitVector, idx) \ + bitVectorTest_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + (idx)) + +#define bitVectorAnd(pBitVectorDst, pBitVectorA, pBitVectorB) \ + bitVectorAnd_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorOr(pBitVectorDst, pBitVectorA, pBitVectorB) \ + bitVectorOr_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorXor(pBitVectorDst, pBitVectorA, pBitVectorB) \ + bitVectorXor_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorA)->real), \ + sizeof(((pBitVectorA)->last->_)), \ + &((pBitVectorB)->real), \ + sizeof(((pBitVectorB)->last->_))) + +#define bitVectorComplement(pBitVectorDst, pBitVectorSrc) \ + bitVectorComplement_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorSrc)->real), \ + sizeof(((pBitVectorSrc)->last->_))) + +#define bitVectorCopy(pBitVectorDst, pBitVectorSrc) \ + bitVectorCopy_IMPL(&((pBitVectorDst)->real), \ + sizeof(((pBitVectorDst)->last->_)), \ + &((pBitVectorSrc)->real), \ + sizeof(((pBitVectorSrc)->last->_))) + +#define bitVectorCountTrailingZeros(pBitVector) \ + bitVectorCountTrailingZeros_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorCountLeadingZeros(pBitVector) \ + bitVectorCountLeadingZeros_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorCountSetBits(pBitVector) \ + bitVectorCountSetBits_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_))) + +#define bitVectorToRaw(pBitVector, pRawMask, rawMaskSize) \ + bitVectorToRaw_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + pRawMask, \ + rawMaskSize) + +#define bitVectorFromRaw(pBitVector, pRawMask, rawMaskSize) \ + bitVectorFromRaw_IMPL(&((pBitVector)->real), \ + sizeof(((pBitVector)->last->_)), \ + pRawMask, \ + rawMaskSize) + +#define FOR_EACH_IN_BITVECTOR(pBitVector, index) \ + { \ + MAKE_ANON_BITVECTOR(sizeof(((pBitVector)->last->_))) localMask; \ + bitVectorCopy(&localMask, (pBitVector)); \ + for ((index) = bitVectorCountTrailingZeros(&localMask); \ + !bitVectorTestAllCleared(&localMask); \ + bitVectorClr(&localMask, (index)), \ + (index) = bitVectorCountTrailingZeros(&localMask)) \ + { + +#define FOR_EACH_IN_BITVECTOR_END() \ + } \ + } + +#define FOR_EACH_IN_BITVECTOR_PAIR(pBitVectorA, indexA, pBitVectorB, indexB)\ + { \ + MAKE_ANON_BITVECTOR(sizeof(((pBitVectorA)->last->_))) localMaskA; \ + bitVectorCopy(&localMaskA, (pBitVectorA)); \ + MAKE_ANON_BITVECTOR(sizeof(((pBitVectorB)->last->_))) localMaskB; \ + bitVectorCopy(&localMaskB, (pBitVectorB)); \ + for ((indexA) = bitVectorCountTrailingZeros(&localMaskA), \ + (indexB) = bitVectorCountTrailingZeros(&localMaskB); \ + !bitVectorTestAllCleared(&localMaskA) && \ + !bitVectorTestAllCleared(&localMaskB); \ + bitVectorClr(&localMaskA, (indexA)), \ + bitVectorClr(&localMaskB, (indexB)), \ + (indexA) = bitVectorCountTrailingZeros(&localMaskA), \ + (indexB) = bitVectorCountTrailingZeros(&localMaskB)) \ + { + +#define FOR_EACH_IN_BITVECTOR_PAIR_END() \ + } \ + } + +NvU32 +bitVectorSizeOf_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorClrAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorClr_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +); + +NV_STATUS +bitVectorClrRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +); + +NV_STATUS +bitVectorSetAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorSet_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +); + +NV_STATUS +bitVectorSetRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +); + +NV_STATUS +bitVectorInv_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +); + +NV_STATUS +bitVectorInvRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +); + +NV_STATUS +bitVectorFromArrayU16_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 *pIndices, + NvU32 indicesSize +); + +NvBool +bitVectorTestAllSet_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvBool +bitVectorTestAllCleared_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvBool +bitVectorTestEqual_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NvBool +bitVectorTestIsSubset_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NvBool +bitVectorTest_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +); + +NV_STATUS +bitVectorAnd_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NV_STATUS +bitVectorOr_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NV_STATUS +bitVectorXor_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +); + +NV_STATUS +bitVectorComplement_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +); + +NV_STATUS +bitVectorCopy_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +); + +NvU32 +bitVectorCountTrailingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvU32 +bitVectorCountLeadingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NvU32 +bitVectorCountSetBits_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +); + +NV_STATUS +bitVectorToRaw_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + void *pRawMask, + NvU32 rawMaskize +); + +NV_STATUS +bitVectorFromRaw_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + const void *pRawMask, + NvU32 rawMaskSize +); + +#ifdef __cplusplus +} +#endif +///@} +/// NV_UTILS_BITVECTOR +#endif diff --git a/src/nvidia/inc/libraries/utils/nvmacro.h b/src/nvidia/inc/libraries/utils/nvmacro.h new file mode 100644 index 000000000..b6407eed1 --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvmacro.h @@ -0,0 +1,251 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief Standard utility macros for some more advanced CPP operations + */ + +#ifndef _NV_UTILS_MACRO_H_ +#define _NV_UTILS_MACRO_H_ + +/** + * @defgroup NV_UTILS_MACRO Standard utility Macros + * + * @brief Implements commonly used macros for advanced CPP operations + * + * @{ + */ + +/** + * @brief Expands all arguments + */ +#define NV_EXPAND(...) __VA_ARGS__ +/** + * @brief Discards all arguments + */ +#define NV_DISCARD(...) + +/** + * @brief Fully expands the given argument, then stringifies it. + */ +#define NV_STRINGIFY(s) _NV_STRINGIFY(s) +/** + * @brief Fully expands both arguments, then concatenates them. + */ +#define NV_CONCATENATE(a, b) _NV_CONCATENATE(a, b) + +/** + * @brief Returns a number literal corresponding to the number of arguments. + * + * NV_NUM_ARGS(x) expands to 1 + * NV_NUM_ARGS(x,y,z) expands to 3 + * + * @warning Due to differences in standards, it is impossible to make this + * consistently return 0 when called without arguments. Thus, the behavior of + * NV_NUM_ARGS() is undefined, and shouldn't be counted on. + * If you do decide to use it: It usually returns 0, except when -std=c++11. + * + * @note Works for a maximum of 16 arguments + */ +#define NV_NUM_ARGS(...) _NV_NUM_ARGS(unused, ##__VA_ARGS__, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define _NV_NUM_ARGS(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, N, ...) N + +/** + * @brief Performs an operation on each of its arguments, except first + * + * @param what - Function or function-like macro that takes exactly one param. + * This will be called for other args: what(arg1), what(arg2), ... + * + * NV_FOREACH_ARG(foo, a, b, c) expands to: foo(a), foo(b), foo(c) + * #define INC(x) (x+1) + * NV_FOREACH_ARG(INC,0,1,2,3,4) expands to: (0+1), (1+1), (2+1), (3+1), (4+1) + * + * @note Works for a maximum of 16 arguments, not counting 'what' param + */ +#define NV_FOREACH_ARG(what, ...) \ + NV_CONCATENATE(_NV_FOREACH_ARG_, NV_NUM_ARGS(__VA_ARGS__)) (what, __VA_ARGS__) + +/** + * @brief Similar NV_FOREACH_ARG, but without comma in the expanded result + * + * @param what - Function or function-like macro that takes exactly one param. + * This will be called for other args: what(arg1) what(arg2) ... + * + * NV_FOREACH_ARG(foo, a, b, c) expands to: foo(a) foo(b) foo(c) + * #define OR(x) | (x) + * #define FLAGS(...) (0 NV_FOREACH_ARG_NOCOMMA(OR, __VA_ARGS__)) + * FLAGS(flag1, flag2, flag3) expands to: 0 | (flag1) | (flag2) | (flag3) + * + * @note Works for a maximum of 16 arguments, not counting 'what' param + */ +#define NV_FOREACH_ARG_NOCOMMA(what, ...) \ + NV_CONCATENATE(_NV_FOREACH_ARG_NOCOMMA_, NV_NUM_ARGS(__VA_ARGS__)) (what, __VA_ARGS__) + + +/** + * @brief Compile time evaluate a condition + * + * - If 'cond' evaluates to 1 at compile time, macro expands to 'then' + * - If 'cond' evaluates to 0 at compile time, macro expands to nothing + * - If 'cond' is undefined or evaluates to anything else, report a build error + */ +#define NV_STATIC_IF(cond, then) \ + NV_EXPAND(NV_CONCATENATE(NV_STATIC_IF_, NV_EXPAND(cond))) (then) + + +/** + * @brief Similar to @ref NV_STATIC_IF except condition is reversed + * + * - If 'cond' evaluates to 0 at compile time, macro expands to 'then' + * - If 'cond' evaluates to 1 at compile time, macro expands to nothing + * - If 'cond' is undefined or evaluates to anything else, report a build error + */ +#define NV_STATIC_IFNOT(cond, then) \ + NV_EXPAND(NV_CONCATENATE(NV_STATIC_IFNOT_, NV_EXPAND(cond))) (then) + + +/** + * @brief Similar to @ref NV_STATIC_IF except with both THEN and ELSE branches + * + * - If 'cond' evaluates to 1 at compile time, macro expands to 'then' + * - If 'cond' evaluates to 0 at compile time, macro expands to 'els' + * - If 'cond' is undefined or evaluates to anything else, report a build error + */ +#define NV_STATIC_IFELSE(cond, then, els) \ + NV_STATIC_IF(NV_EXPAND(cond), then) NV_STATIC_IFNOT(NV_EXPAND(cond), els) + +/// @} + +/// @cond NV_MACROS_IMPLEMENTATION + +#define _NV_STRINGIFY(s) #s +#define _NV_CONCATENATE(a, b) a##b + +#define NV_STATIC_IF_0(then) NV_DISCARD(then) +#define NV_STATIC_IF_1(then) NV_EXPAND(then) + +#define NV_STATIC_IFNOT_0(then) NV_EXPAND(then) +#define NV_STATIC_IFNOT_1(then) NV_DISCARD(then) + +// Iterating over empty list is unsupported. Give a semi-readable error. +#define _NV_FOREACH_ARG_0(X) NV_FOREACH_ERROR_argument_list_emtpy + +#define _NV_FOREACH_ARG_1(X, _1) \ + X(_1) +#define _NV_FOREACH_ARG_2(X, _1, _2) \ + X(_1), X(_2) +#define _NV_FOREACH_ARG_3(X, _1, _2, _3) \ + X(_1), X(_2), X(_3) +#define _NV_FOREACH_ARG_4(X, _1, _2, _3, _4) \ + X(_1), X(_2), X(_3), X(_4) +#define _NV_FOREACH_ARG_5(X, _1, _2, _3, _4, _5) \ + X(_1), X(_2), X(_3), X(_4), X(_5), +#define _NV_FOREACH_ARG_6(X, _1, _2, _3, _4, _5, _6) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6) +#define _NV_FOREACH_ARG_7(X, _1, _2, _3, _4, _5, _6, _7) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7) +#define _NV_FOREACH_ARG_8(X, _1, _2, _3, _4, _5, _6, _7, _8) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8) +#define _NV_FOREACH_ARG_9(X, _1, _2, _3, _4, _5, _6, _7, _8, _9) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9) +#define _NV_FOREACH_ARG_10(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10) +#define _NV_FOREACH_ARG_11(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11) +#define _NV_FOREACH_ARG_12(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12) +#define _NV_FOREACH_ARG_13(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13) +#define _NV_FOREACH_ARG_14(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13), X(_14) +#define _NV_FOREACH_ARG_15(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13), X(_14), X(_15) +#define _NV_FOREACH_ARG_16(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \ + X(_1), X(_2), X(_3), X(_4), X(_5), X(_6), X(_7), X(_8), X(_9), X(_10), X(_11), X(_12), X(_13), X(_14), X(_15), X(_16) + +// Iterating over empty list is unsupported. Give a semi-readable error. +#define _NV_FOREACH_ARG_NOCOMMA_0(X) NV_FOREACH_NOCOMMA_ERROR_argument_list_emtpy + +#define _NV_FOREACH_ARG_NOCOMMA_1(X, _1) \ + X(_1) +#define _NV_FOREACH_ARG_NOCOMMA_2(X, _1, _2) \ + X(_1) X(_2) +#define _NV_FOREACH_ARG_NOCOMMA_3(X, _1, _2, _3) \ + X(_1) X(_2) X(_3) +#define _NV_FOREACH_ARG_NOCOMMA_4(X, _1, _2, _3, _4) \ + X(_1) X(_2) X(_3) X(_4) +#define _NV_FOREACH_ARG_NOCOMMA_5(X, _1, _2, _3, _4, _5) \ + X(_1) X(_2) X(_3) X(_4) X(_5) +#define _NV_FOREACH_ARG_NOCOMMA_6(X, _1, _2, _3, _4, _5, _6) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) +#define _NV_FOREACH_ARG_NOCOMMA_7(X, _1, _2, _3, _4, _5, _6, _7) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) +#define _NV_FOREACH_ARG_NOCOMMA_8(X, _1, _2, _3, _4, _5, _6, _7, _8) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) +#define _NV_FOREACH_ARG_NOCOMMA_9(X, _1, _2, _3, _4, _5, _6, _7, _8, _9) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) +#define _NV_FOREACH_ARG_NOCOMMA_10(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) +#define _NV_FOREACH_ARG_NOCOMMA_11(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) +#define _NV_FOREACH_ARG_NOCOMMA_12(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) +#define _NV_FOREACH_ARG_NOCOMMA_13(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) +#define _NV_FOREACH_ARG_NOCOMMA_14(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) X(_14) +#define _NV_FOREACH_ARG_NOCOMMA_15(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) X(_14) X(_15) +#define _NV_FOREACH_ARG_NOCOMMA_16(X, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \ + X(_1) X(_2) X(_3) X(_4) X(_5) X(_6) X(_7) X(_8) X(_9) X(_10) X(_11) X(_12) X(_13) X(_14) X(_15) X(_16) + +/// @endcond + +/// @cond NV_MACROS_COMPILE_TIME_TESTS +// +// What follows are a couple of compile time smoke tests that will let us know +// if the given compiler does not properly implement these macros. +// These are disabled by default in the interest of compile time. +// +#if defined(NVMACRO_DO_COMPILETIME_TESTS) +#if NV_NUM_ARGS(a) != 1 +#error "[NvMacros CT Test] NV_NUM_ARGS fails when given 1 args" +#endif +#if NV_NUM_ARGS(a,b,c,d) != 4 +#error "[NvMacros CT Test] NV_NUM_ARGS fails when given 4 args" +#endif +#if NV_NUM_ARGS(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) != 16 +#error "[NvMacros CT Test] NV_NUM_ARGS fails when given 16 args" +#endif + +#define _NVMACRO_ADD_TYPE(x) int x +extern void _nvmacro_compiletime_test_func_proto1(NV_FOREACH_ARG(_NVMACRO_ADD_TYPE, aa, bb, cc)); + +#define _NVMACRO_ADD_TYPES(...) NV_FOREACH_ARG(_NVMACRO_ADD_TYPE, __VA_ARGS__) +extern void _nvmacro_compiletime_test_func_proto2(_NVMACRO_ADD_TYPES(a, b, c)); + +#endif // NVMACRO_DO_COMPILETIME_TESTS +/// @endcond + +#endif // _NV_UTILS_MACRO_H_ diff --git a/src/nvidia/inc/libraries/utils/nvprintf.h b/src/nvidia/inc/libraries/utils/nvprintf.h new file mode 100644 index 000000000..2588e0cba --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvprintf.h @@ -0,0 +1,453 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file + * @brief Standard printf logging interface + */ + +#ifndef _NV_UTILS_PRINTF_H_ +#define _NV_UTILS_PRINTF_H_ + +#ifdef __cplusplus +extern "C" { +#endif + + +/// @defgroup NV_PRINTF_LEVELS Printf verbosity levels +/// @{ +/// @brief Prints at this level are discarded +#define LEVEL_SILENT 0x0 +/// @brief Verbose debug logging level (e.g. signaling function entry) +#define LEVEL_INFO 0x1 +/// @brief Standard debug logging level (e.g. Illegal ctrcall call) +#define LEVEL_NOTICE 0x2 +/// @brief Warning logging level (e.g. feature not supported) +#define LEVEL_WARNING 0x3 +/// @brief Error logging level (e.g. resource allocation failed) +#define LEVEL_ERROR 0x4 +/// @brief Recoverable HW error (e.g. RC events) +#define LEVEL_HW_ERROR 0x5 +/// @brief Unrecoverable error (e.g. Bus timeout) +#define LEVEL_FATAL 0x6 +/// @} + +// Used only in nvlogFilterApplyRule() +#define NV_LEVEL_MAX LEVEL_FATAL + +/** + * @def NV_PRINTF_LEVEL_ENABLED(level) + * @brief This macro evaluates to 1 if prints of a given level will be compiled. + * + * By default, it is available on all builds that allow strings + */ +#ifndef NV_PRINTF_LEVEL_ENABLED +#define NV_PRINTF_LEVEL_ENABLED(level) ((level) >= NV_PRINTF_LEVEL) +#endif + +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +/** + * GSPRM uses a different system for logging. + * The format strings, filename, line number, etc. are stored in a separate + * data segment that is not loaded on the GSP, but is available to the decoder. + */ + +#include "libos_log.h" + +/** + * Define NV_PRINTF_LEVEL to the minimum level for debug output. This is compared + * to the level for each NV_PRINT to cull them at compile time. + */ +#define NV_PRINTF_LEVEL LEVEL_NOTICE + +#undef NV_PRINTF_ENABLED +#define NV_PRINTF_ENABLED 1 + +#undef NV_PRINTF_STRINGS_ALLOWED +#define NV_PRINTF_STRINGS_ALLOWED 0 + +#define NV_PRINTF_STRING_SECTION LIBOS_SECTION_LOGGING + +#define MAKE_NV_PRINTF_STR(str) \ +({ \ + static NV_PRINTF_STRING_SECTION const char rm_pvt_str[] = str; \ + rm_pvt_str; \ +}) + +// NVLOG is not used on GSP-RM. +#undef NVLOG_LEVEL +#define NVLOG_LEVEL LEVEL_FATAL + +// Direct dmesg printing through NV_PRINTF_STRING is a no-op on GSP-RM +#define NV_PRINTF_STRING(module, level, format, ...) + +#if defined(GSP_PLUGIN_BUILD) + +void log_vgpu_log_entry(const NvU64 n_args, const NvU64 * args); + +#define NV_PRINTF(level, format, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL(log_vgpu_log_entry, LOG_LEVEL_ERROR, \ + format, ##__VA_ARGS__); \ + } \ +} while (0) + +#define NV_PRINTF_EX(module, level, format, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL(log_vgpu_log_entry, LOG_LEVEL_ERROR, \ + format, ##__VA_ARGS__); \ + } \ +} while (0) + +#define NVLOG_PRINTF(...) + +#else + +void log_rm_log_entry(const NvU64 n_args, const NvU64 * args); + +#define NV_PRINTF(level, format, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL(log_rm_log_entry, LOG_LEVEL_ERROR, \ + format, ##__VA_ARGS__); \ + } \ +} while (0) + +#define NV_PRINTF_EX(module, level, format, ...) do { \ + if (NV_PRINTF_LEVEL_ENABLED(level)) \ + { \ + LIBOS_LOG_INTERNAL(log_rm_log_entry, LOG_LEVEL_ERROR, \ + format, ##__VA_ARGS__); \ + } \ +} while (0) + +#endif // NVOC + +#else // defined(NVRM) && NVCPU_IS_RISCV64 + +/** + * @defgroup NV_UTILS_PRINTF Utility Printing Macros + * + * @brief Provides a light abstraction layer for printf logging. + * + * NvPort and NvLog are used for portability and logging primitives. + * If an environment cannot use these directly then it can override + * the NV_PORT_HEADER and NV_LOG_HEADER defines in its makefile + * to point to appropriate replacements. + * @{ + */ + +#ifndef NV_PRINTF_PREFIX +/** + * @brief Prefix to prepend to all messages printed by @ref NV_PRINTF. + */ +#define NV_PRINTF_PREFIX "" +#endif + +#ifndef NV_PRINTF_PREFIX_SEPARATOR +/** + * @brief Separator between prefix messages printed by @ref NV_PRINTF. + * + * If defined, it must be a single character followed by an optional space. + */ +#define NV_PRINTF_PREFIX_SEPARATOR "" +#endif + +#ifndef NV_PRINTF_ADD_PREFIX +/** + * @brief Apply the full prefix string to a format string. + * + * This is a function-like macro so it can support inserting arguments after the + * format string. Example: + * #define NV_PRINTF_ADD_PREFIX(fmt) "%s():"fmt, __FUNCTION__ + */ +#define NV_PRINTF_ADD_PREFIX(fmt) NV_PRINTF_PREFIX NV_PRINTF_PREFIX_SEPARATOR fmt +#endif + +// Include portability header, falling back to NvPort if not provided. +#ifndef NV_PORT_HEADER +#define NV_PORT_HEADER "nvport/nvport.h" +#endif +#include NV_PORT_HEADER + + +// Include logging header, falling back to NvLog if not provided. +#ifndef NV_LOG_HEADER +#define NV_LOG_HEADER "nvlog/nvlog_printf.h" +#endif +#include NV_LOG_HEADER + +#define NV_PRINTF_STRING_SECTION + +#define MAKE_NV_PRINTF_STR(str) str + +/** + * @def NV_PRINTF(level, format, args...) + * @brief Standard formatted printing/logging interface. + * + * @param level - Debug level to print at. One of @ref NV_PRINTF_LEVELS + * @param format - A standard printf format string. Must be a string literal. + * @param args... - Arguments for the format string literal, like regular printf + * + * The logging header can redefine the behavior, but the basic implementation + * will just print to standard output, like the printf function. + * + * This will print to the @ref NV_PRINTF_MODULE module. If the module is not + * defined, it will default to GLOBAL. Use @ref NV_PRINTF_EX to specify another + * module. + * + * This will prefix the prints with @ref NV_PRINTF_PREFIX string and function + * name. To specify a different (or no) prefix, use @ref NV_PRINTF_EX + * + * @note The format string must be a string literal. The level can be a variable, + * but it may have positive speed and size effects to use the above levels + * directly. + */ +#ifndef NV_PRINTF +#define NV_PRINTF(level, format, ...) \ + NV_PRINTF_EX(NV_PRINTF_MODULE, level, NV_PRINTF_ADD_PREFIX(format), ##__VA_ARGS__) +#endif + + + +/** + * @def NV_PRINTF_EX(module, level, format, args...) + * @brief Extended version of the standard @ref NV_PRINTF + * + * This interface allows you to explicitly specify the module to print to and + * doesn't perform any automatic prefixing. + * + * The logging header can redefine the behavior, but the basic implementation + * will just print to standard output, like the printf function. + * + * @note The format string must be a string literal. The level can be a variable, + * but it may have positive speed and size effects to use the above levels + * directly. + */ +#ifndef NV_PRINTF_EX +#define NV_PRINTF_EX(module, level, format, ...) \ + do \ + { \ + NVLOG_PRINTF(module, NVLOG_ROUTE_RM, level, format, ##__VA_ARGS__); \ + NV_PRINTF_STRING(module, level, format, ##__VA_ARGS__); \ + } while (0) +#endif + + +/** + * @def NV_PRINTF_STRINGS_ALLOWED + * @brief This switch controls whether strings are allowed to appear in the + * final binary. + * + * By default, strings are allowed on DEBUG and QA builds, and all MODS builds + */ +#ifndef NV_PRINTF_STRINGS_ALLOWED +#if defined(DEBUG) || defined(NV_MODS) || defined(QA_BUILD) +#define NV_PRINTF_STRINGS_ALLOWED 1 +#else +#define NV_PRINTF_STRINGS_ALLOWED 0 +#endif +#endif // NV_PRINTF_STRINGS_ALLOWED + + + +// +// Default values for the compile time switches: +// - Strings are allowed on DEBUG and QA builds, and all MODS builds +// - NV_PRINTF is only available if strings are allowed +// - All levels are available if NV_PRINTF is available. + + + +// +// Special handling for RM internal prints so we have equivalent functionality +// between NV_PRINTF and DBG_PRINTF. This is needed to seamlessly migrate RM to +// the new interface. The implementations will eventually be fully extracted and +// only depend on other common code, such as NvPort. +// +#if defined(NVRM) && !defined(NVWATCH) + +#undef NV_PRINTF_PREFIX +#define NV_PRINTF_PREFIX "NVRM" +#undef NV_PRINTF_PREFIX_SEPARATOR +#define NV_PRINTF_PREFIX_SEPARATOR ": " + +#if NV_PRINTF_STRINGS_ALLOWED + +// Declare internal RM print function: +// This is utDbg_Printf in unit tests and nvDbg_Printf in regular RM builds +#if defined(RM_UNITTEST) +#define NVRM_PRINTF_FUNCTION utDbg_Printf +#else +#define NVRM_PRINTF_FUNCTION nvDbg_Printf +#endif // defined(RM_UNITTEST) + +void NVRM_PRINTF_FUNCTION(const char *file, + int line, + const char *function, + int debuglevel, + const char *s, + ...) NVPORT_CHECK_PRINTF_ARGUMENTS(5, 6); + +#define NV_PRINTF_STRING(module, level, format, ...) \ + NVRM_PRINTF_FUNCTION(NV_FILE_STR, __LINE__, NV_FUNCTION_STR, level, format, ##__VA_ARGS__) + +#endif // NV_PRINTF_STRINGS_ALLOWED + +// RM always has printf enabled +#define NV_PRINTF_ENABLED 1 +#endif // defined(NVRM) && !defined(NVWATCH) + + +// +// Default definitions if none are specified +// + +/** + * @def NV_PRINTF_ENABLED + * @brief This macro evaluates to 1 if NV_PRINTF is available (either as regular + * debug prints or binary logging) + * + * By default, it is available on all builds that allow strings + */ +#ifndef NV_PRINTF_ENABLED +#define NV_PRINTF_ENABLED NV_PRINTF_STRINGS_ALLOWED +#endif + +#if NV_PRINTF_STRINGS_ALLOWED +#define NV_PRINTF_LEVEL LEVEL_INFO +#else +#define NV_PRINTF_LEVEL LEVEL_NOTICE +#endif + +/** + * @def NV_PRINTF_STRING(module, level, format, ...) + * @brief Prints the string to the given output, if strings are allowed. + */ +#ifndef NV_PRINTF_STRING +#if NV_PRINTF_STRINGS_ALLOWED +#define NV_PRINTF_STRING(module, level, format, ...) \ + portDbgPrintf(format, ##__VA_ARGS__) + +#if !defined(portDbgPrintf) && !PORT_IS_FUNC_SUPPORTED(portDbgPrintf) +#error "NV_PORT_HEADER must implement portDbgPrintf()" +#endif + +#else +#define NV_PRINTF_STRING(module, level, format, ...) +#endif +#endif // NV_PRINTF_STRING + +#ifndef NVLOG_PRINTF +#define NVLOG_PRINTF(...) +#endif + +#endif // defined(NVRM) && NVCPU_IS_RISCV64 + +/** + * @def NV_PRINTF_COND(condition, leveltrue, levelfalse, format, args...) + * @brief NV_PRINTF with conditional print level + * + * Splits NV_PRINTF calls with a print level based on a variable or ternary + * operation, to be handled by preprocessors to remove INFO-level prints + * + * If condition is true, uses leveltrue, else uses levelfalse + */ +#ifndef NV_PRINTF_COND +#define NV_PRINTF_COND(condition, leveltrue, levelfalse, format, ...) \ + do { \ + if (condition) \ + { \ + NV_PRINTF(leveltrue, format, ##__VA_ARGS__); \ + } \ + else \ + { \ + NV_PRINTF(levelfalse, format, ##__VA_ARGS__); \ + } \ + } while (0) +#endif + + +// +// NV_FILE and NV_FUNCTION macros are used to wrap the __FILE__ and __FUNCTION__ +// macros, respectively, to enable passing them as parameters on release builds +// without linking the strings into the object files. Instead, this will use +// NV_LOG and other utilities to pass values in a way that the same information +// can be decoded on retail builds. +// +// On non-release builds, the strings are directly referenced and included in +// the builds (just like their normal references in DBG_PRINTF() and +// DBG_BREAKPOINT()). +// +// In MODS builds, we allow all printfs, but don't automatically include the +// __FILE__ or __FUNCTION__ references. +// +#if NV_PRINTF_STRINGS_ALLOWED && (!defined(NV_MODS) || defined(SIM_BUILD) || defined(DEBUG) || defined(NV_MODS_INTERNAL)) +#define NV_FILE_STR __FILE__ +#define NV_FILE __FILE__ +#define NV_FILE_FMT "%s" +#define NV_FILE_TYPE const char * +#define NV_FUNCTION_STR __FUNCTION__ +#define NV_FUNCTION __FUNCTION__ +#define NV_FUNCTION_FMT "%s" +#define NV_FUNCTION_TYPE const char * +#else +#ifndef NV_FILE_STR +#define NV_FILE_STR "" +#endif +#ifdef NVLOG_FILEID +# define NV_FILE NVLOG_FILEID +#else +# define NV_FILE 0 +#endif +#define NV_FILE_FMT "" +#define NV_FILE_TYPE NvU32 +// +// A couple caveats on portUtilExGetStackTrace(): +// +// 1. portUtilExGetStackTrace is not supported on all builds. For example, see +// GCC support in util-gcc-clang.h. +// +// 2. portUtilExGetStackTrace(0) will give us the current IP, which is +// current_function()+offset. Commands such as `ln` in windbg can translate the +// IP into func+offset. But sometimes, due to inlining/optimizations, the +// current function at runtime is not the same as at compile time. In the +// inlining example, if a function using NV_FUNCTION is inlined, the pointer +// printed will be calling_function()+offset. +// +//#define NV_FUNCTION portUtilExGetStackTrace(0) +#define NV_FUNCTION_STR "" +#define NV_FUNCTION 0 +#define NV_FUNCTION_FMT "" +#define NV_FUNCTION_TYPE NvUPtr +#endif + +#ifdef __cplusplus +} +#endif //__cplusplus + +/// @} +#endif // _NV_UTILS_PRINTF_H_ diff --git a/src/nvidia/inc/libraries/utils/nvrange.h b/src/nvidia/inc/libraries/utils/nvrange.h new file mode 100644 index 000000000..f558e11ed --- /dev/null +++ b/src/nvidia/inc/libraries/utils/nvrange.h @@ -0,0 +1,282 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NV_UTILS_NV_RANGE_H_ +#define _NV_UTILS_NV_RANGE_H_ + +#include "nvtypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * \anchor NV_RANGE_1 + * @defgroup NV_RANGE NV_RANGE + * + * @brief Range is a sequence of unsigned 64 bit integers, represented by its + * lower and upper bounds, inclusive. + * + * @details Range is a general purpose data structure utility. + * It consist of two fields, lower and upper bound. + * It is assumed that both lower and upper bounds are \b inclusive. + * Range with lower bound greater than the upper bound is considered to + * be an empty range. + * + * @note If a range is declared like Range r = {0x0, 0x2} it consist + * of elements 0x0, 0x1 and 0x2 , i.e. Range = [lo, hi] ! + * + * > There are 4 possible options + * > -# (lo, hi) lo+1 .. hi-1 + * > -# [lo, hi) lo .. hi-1 + * > -# (lo, hi] lo+1 .. hi + * > -# [lo, hi] lo .. hi + * + * Notice that only option 4 is capable of describing a full range. + * Full range would be 0x0..NvU64_MAX, where + * NvU64_MAX = 0xFFFFFFFFFFFFFFFF. + * + * @{ + */ +typedef struct NV_RANGE NV_RANGE; +struct NV_RANGE +{ + /** Lower bound of the range, where range includes the lower bound.*/ + NvU64 lo; + /** Upper bound of the range, where range includes the upper bound.*/ + NvU64 hi; +}; + +static const NV_RANGE NV_RANGE_EMPTY = {1, 0}; + +/** + * @brief Checks if range is empty, i.e. range.lo > range.hi + * + * @returns NV_TRUE if range is empty, NV_FALSE otherwise. + */ +static NV_INLINE NvBool rangeIsEmpty(NV_RANGE range) +{ + return range.lo > range.hi; +} + +/** + * @brief Calculate range length in bytes. + * + * @warning If the range is max, i.e. from 0 to NvU64_MAX, calling this + * function would result in overflow since range length is calculated + * as hi-lo+1. + * + * @par Example: + * @snippet nv_range-test.cpp rangeLengthExample + */ +static NV_INLINE NvU64 rangeLength(NV_RANGE range) +{ + return rangeIsEmpty(range) ? 0 : range.hi - range.lo + 1; +} + +/** + * @brief Creates a range + * + * @details This is useful since on some compilers the following code won't + * work: `rangeLength({0, 100})`. + * However, `rangeLength(rangeMake(0, 100))` will always work. + * + * @Returns Range of elements from and including \a lo to and + * including \a hi, i.e. [lo, hi] + */ +static NV_INLINE NV_RANGE rangeMake(NvU64 lo, NvU64 hi) +{ + NV_RANGE rng = {lo, hi}; + return rng; +} + +/** + * @brief Check if the two given ranges are equal. + */ +static NV_INLINE NvBool rangeEquals(NV_RANGE range1, NV_RANGE range2) +{ + if (rangeIsEmpty(range1) && rangeIsEmpty(range2)) + { + return NV_TRUE; + } + + return (range1.lo == range2.lo) && (range1.hi == range2.hi); +} + +/** + * @brief Check if \a range1 contains \a range2. + * + * @param[in] range1 Container. + * @param[in] range2 Containee. + * + * @par Example: + * @snippet nv_range-test.cpp rangeContainsExample + */ +static NV_INLINE NvBool rangeContains(NV_RANGE range1, NV_RANGE range2) +{ + return (range1.lo <= range2.lo) &&(range1.hi >= range2.hi); +} + +/** + * @brief Checks if intersection of two ranges is not an empty range. + * + * @par Example: + * @snippet nv_range-test.cpp rangeOverlapExample + */ +static NV_INLINE NvBool rangeOverlaps(NV_RANGE range1, NV_RANGE range2) +{ + return (range1.lo <= range2.lo && range2.lo <= range1.hi) + || + (range1.lo <= range2.hi && range2.hi <= range1.hi) + || + (range2.lo <= range1.lo && range1.lo <= range2.hi) + || + (range2.lo <= range1.hi && range1.hi <= range2.hi); +} + +/** + * @brief Returns a range representing an intersection between two given ranges. + * + * @par Example: + * @snippet nv_range-test.cpp rangeOverlapExample + */ +static NV_INLINE NV_RANGE rangeIntersection(NV_RANGE range1, NV_RANGE range2) +{ + NV_RANGE intersect; + + if (rangeIsEmpty(range1) || rangeIsEmpty(range2)) + { + return NV_RANGE_EMPTY; + } + + intersect.lo = range1.lo < range2.lo ? range2.lo : range1.lo; + intersect.hi = range1.hi > range2.hi ? range2.hi : range1.hi; + + return intersect; +} + +/** + * @brief Compares two ranges. + * @returns 0 - \a range1's lower bound is equal to \a range2's lower bound, + * <0 - \a range1's lower bound is less than \a range2's lower bound, + * >0 - \a range2's lower bound is greater than \a range2's lower bound. + * + * @warning If function returns 0 that does not mean that ranges are equal, + * just that their lower bounds are equal! + * + * @par Example: + * @snippet nv_range-test.cpp rangeCompareExample + */ +static NV_INLINE NvS32 rangeCompare(NV_RANGE range1, NV_RANGE range2) +{ + if (rangeIsEmpty(range1) && rangeIsEmpty(range2)) + { + return 0; + } + + return range1.lo >= range2.lo ? (range1.lo == range2.lo ? 0 : 1) : -1; +} + +/** + * @brief Merge two ranges into one. + * + * @returns Merged range. If two ranges have no intersection + * the returned range will be empty. + * + * @note Empty range is range with lo > hi. + * + * @par Example: + * @snippet nv_range-test.cpp rangeMergeExample + */ +static NV_INLINE NV_RANGE rangeMerge(NV_RANGE range1, NV_RANGE range2) +{ + NV_RANGE merged = NV_RANGE_EMPTY; + + if (rangeIsEmpty(range1) || rangeIsEmpty(range2) || !rangeOverlaps(range1, range2)) + { + return merged; + } + + merged.lo = range1.lo; + merged.hi = range1.hi; + + if (range2.lo < merged.lo) + { + merged.lo = range2.lo; + } + if (range2.hi > merged.hi) + { + merged.hi = range2.hi; + } + + return merged; +} + +/** + * @brief Checks if \a range1 borders with \a range2, i.e. \a range1.lo == + * \a range2.hi+1 or \a range2.lo == \a range1.hi+1 + * + * @note [a,b] borders with [b+1,c] where a < b < c + * + */ +static NV_INLINE NvBool rangeBorders(NV_RANGE range1, NV_RANGE range2) +{ + if (rangeIsEmpty(range1) || rangeIsEmpty(range2)) + { + return NV_FALSE; + } + + return (range1.hi + 1 == range2.lo) || (range2.hi + 1 == range1.lo); +} + +/** + * @brief Splits \a pBigRange + * + * @param[in] pBigRange Pointer to starting range. + * @param[in] rangeToSplit Range to split the first range over. + * @param[in] pSecondPartAfterSplit Second range after split. + * + * @par Example: + * @snippet nv_range-test.cpp rangeSplitExample + */ +static NV_INLINE NV_STATUS rangeSplit(NV_RANGE *pBigRange, + NV_RANGE rangeToSplit, NV_RANGE *pSecondPartAfterSplit) +{ + if (rangeIsEmpty(*pBigRange) || rangeIsEmpty(rangeToSplit) || + !rangeContains(*pBigRange, rangeToSplit)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pSecondPartAfterSplit->hi = pBigRange->hi; + pBigRange->hi = rangeToSplit.lo; + pSecondPartAfterSplit->lo = rangeToSplit.hi + 1; + + return NV_OK; +} + +#ifdef __cplusplus +} +#endif +///@} +/// NV_UTILS_RANGE +#endif diff --git a/src/nvidia/inc/os/dce_rm_client_ipc.h b/src/nvidia/inc/os/dce_rm_client_ipc.h new file mode 100644 index 000000000..9b1b5d0c0 --- /dev/null +++ b/src/nvidia/inc/os/dce_rm_client_ipc.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _OS_DCE_CLIENT_IPC_H_ +#define _OS_DCE_CLIENT_IPC_H_ + +// RM IPC Client Types + +#define DCE_CLIENT_RM_IPC_TYPE_SYNC 0x0 +#define DCE_CLIENT_RM_IPC_TYPE_EVENT 0x1 +#define DCE_CLIENT_RM_IPC_TYPE_MAX 0x2 + +void dceclientHandleAsyncRpcCallback(NvU32 handle, NvU32 interfaceType, + NvU32 msgLength, void *data, + void *usrCtx); +#endif diff --git a/src/nvidia/inc/physical/gpu/disp/disp.h b/src/nvidia/inc/physical/gpu/disp/disp.h new file mode 100644 index 000000000..6fa6d14ac --- /dev/null +++ b/src/nvidia/inc/physical/gpu/disp/disp.h @@ -0,0 +1,3 @@ + +#include "g_disp_nvoc.h" + diff --git a/src/nvidia/interface/acpigenfuncs.h b/src/nvidia/interface/acpigenfuncs.h new file mode 100644 index 000000000..54992f0bb --- /dev/null +++ b/src/nvidia/interface/acpigenfuncs.h @@ -0,0 +1,35 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvacpitypes.h" + +#ifndef _ACPIGENFUNCS_H_ +#define _ACPIGENFUNCS_H_ + +#define NV_ACPI_DSM_READ_SIZE (4*1024) + +#define NV_ACPI_GENERIC_FUNC_START 0x0200 +#define NV_ACPI_GENERIC_FUNC_COUNT 9 + +#endif // _ACPIGENFUNCS_H_ + diff --git a/src/nvidia/interface/deprecated/rmapi_deprecated.h b/src/nvidia/interface/deprecated/rmapi_deprecated.h new file mode 100644 index 000000000..78281d346 --- /dev/null +++ b/src/nvidia/interface/deprecated/rmapi_deprecated.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RMAPI_DEPRECATED_H_ +#define _RMAPI_DEPRECATED_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvmisc.h" +#include "nvos.h" +#include "nvsecurityinfo.h" +// +// This file provides implementations for deprecated RM API by building on the +// modern APIs. The implementations support running in either +// user-mode or kernel-mode context and should have no dependencies on RM +// internals. +// + +/*! + * GSS legacy command masks + */ +#define RM_GSS_LEGACY_MASK 0x00008000 +#define RM_GSS_LEGACY_MASK_NON_PRIVILEGED 0x00008000 +#define RM_GSS_LEGACY_MASK_PRIVILEGED 0x0000C000 + + +typedef enum +{ + RMAPI_DEPRECATED_COPYIN, + RMAPI_DEPRECATED_COPYOUT, + RMAPI_DEPRECATED_COPYRELEASE, +} RMAPI_DEPRECATED_COPY_OP; + +typedef enum +{ + RMAPI_DEPRECATED_BUFFER_EMPLACE, // Use buffer passed into CopyUser + RMAPI_DEPRECATED_BUFFER_ALLOCATE // Allocate a new buffer in CopyUser +} RMAPI_DEPRECATED_BUFFER_POLICY; + +/** + * Fields are populated by the deprecated RM API caller. RmAlloc, RmControl, and + * RmFree should be routed to RM. pExtendedContext can hold any domain specific + * state needed by the RmAlloc/etc implementations. AllocMem/FreeMem are routed + * to OS layers for allocation/free-up of system memory. + */ +typedef struct _DEPRECATED_CONTEXT +{ + NV_STATUS (*RmAlloc)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams); + + NV_STATUS (*RmControl)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hObject, + NvU32 cmd, void *pParams, NvU32 paramsSize); + + NV_STATUS (*RmFree)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hObject); + + NV_STATUS (*RmMapMemory)(struct _DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvHandle hDevice, + NvHandle hMemory, NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags); + + // Copies data in/out of user-mode address space. + NV_STATUS (*CopyUser)(struct _DEPRECATED_CONTEXT *pContext, RMAPI_DEPRECATED_COPY_OP op, + RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, NvP64 dataPtr, + NvU32 dataSize, void **ppKernelPtr); + void * (*AllocMem)(NvU32 length); + void (*FreeMem)(void *pAddress); + void *pExtendedContext; +} DEPRECATED_CONTEXT; + +/** + * List of deprecated APIs supported by this library + */ +void RmDeprecatedAllocObject(DEPRECATED_CONTEXT *pContext, NVOS05_PARAMETERS *pArgs); +void RmDeprecatedAddVblankCallback(DEPRECATED_CONTEXT *pContext, NVOS61_PARAMETERS *pArgs); +void RmDeprecatedAllocContextDma(DEPRECATED_CONTEXT *pContext, NVOS39_PARAMETERS *pArgs); +void RmDeprecatedBindContextDma(DEPRECATED_CONTEXT *pContext, NVOS49_PARAMETERS *pArgs); +void RmDeprecatedI2CAccess(DEPRECATED_CONTEXT *pContext, NVOS_I2C_ACCESS_PARAMS *pArgs); +void RmDeprecatedIdleChannels(DEPRECATED_CONTEXT *pContext, NVOS30_PARAMETERS *pArgs); +void RmDeprecatedVidHeapControl(DEPRECATED_CONTEXT *pContext, NVOS32_PARAMETERS *pArgs); +void RmDeprecatedAllocMemory(DEPRECATED_CONTEXT *pContext, NVOS02_PARAMETERS *pArgs); + + +/** + * List of utility functions (used within shims) + */ +typedef NV_STATUS (*RmDeprecatedControlHandler)(API_SECURITY_INFO*,DEPRECATED_CONTEXT*,NVOS54_PARAMETERS*); +RmDeprecatedControlHandler RmDeprecatedGetControlHandler(NVOS54_PARAMETERS *pArgs); + +NV_STATUS RmDeprecatedGetHandleParent(DEPRECATED_CONTEXT *pContext, NvHandle hClient, + NvHandle hObject, NvHandle *phParent); +NV_STATUS RmDeprecatedGetClassID(DEPRECATED_CONTEXT *pContext, NvHandle hClient, + NvHandle hObject, NvU32 *pClassId); +NV_STATUS RmDeprecatedFindOrCreateSubDeviceHandle(DEPRECATED_CONTEXT *pContext, NvHandle hClient, + NvHandle hDeviceOrSubDevice, NvHandle *pHSubDevice, + NvBool *pBMustFree); +NV_STATUS RmDeprecatedConvertOs32ToOs02Flags(NvU32 attr, NvU32 attr2, NvU32 os32Flags, NvU32 *pOs02Flags); +NV_STATUS RmDeprecatedConvertOs02ToOs32Flags(NvU32 os02Flags, NvU32 *pAttr, NvU32 *pAttr2, NvU32 *pOs32Flags); + +NV_STATUS RmDeprecatedGetOrAllocObject(DEPRECATED_CONTEXT *pContext, NvHandle hClient, NvU32 classId, NvHandle *pHObject); + +NV_STATUS RmCopyUserForDeprecatedApi(RMAPI_DEPRECATED_COPY_OP op,RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, + NvP64 dataPtr, NvU32 dataSize, void **ppKernelPtr, NvBool bUserModeArgs); +#endif diff --git a/src/nvidia/interface/deprecated/rmapi_deprecated_allocmemory.c b/src/nvidia/interface/deprecated/rmapi_deprecated_allocmemory.c new file mode 100644 index 000000000..757961fc5 --- /dev/null +++ b/src/nvidia/interface/deprecated/rmapi_deprecated_allocmemory.c @@ -0,0 +1,537 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "deprecated/rmapi_deprecated.h" + + +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR +#include "class/cl0070.h" // NV01_MEMORY_SYSTEM_DYNAMIC +#include "class/cl003f.h" // NV01_MEMORY_LOCAL_PRIVILEGED +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER +#include "class/cl00c2.h" // NV01_MEMORY_LOCAL_PHYSICAL +#include "class/cl84a0.h" // NV01_MEMORY_LIST_XXX +#include "class/cl0076.h" // NV01_MEMORY_FRAMEBUFFER_CONSOLE +#include "class/cl00f3.h" // NV01_MEMORY_FLA + +#include "ctrl/ctrl2080/ctrl2080fb.h" // NV2080_CTRL_FB_INFO + +typedef NV_STATUS RmAllocMemoryFunc( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory, + NvU32 hClass, + NvU32 flags, + NvP64 *pAddress, + NvU64 *pLimit +); + +typedef struct { + NvU32 hclass; // hClass value + RmAllocMemoryFunc *pFunc; // pointer to handler +} RmAllocMemoryEntry; + +static NV_STATUS _rmAllocMemorySystem(DEPRECATED_CONTEXT *, NvHandle, NvHandle, NvHandle, NvU32, + NvU32, NvP64 *, NvU64 *); +static NV_STATUS _rmAllocMemorySystemDynamic(DEPRECATED_CONTEXT *, NvHandle, NvHandle, NvHandle, NvU32, + NvU32, NvP64 *, NvU64 *); +static NV_STATUS _rmAllocMemorySystemOsDescriptor(DEPRECATED_CONTEXT *, NvHandle, NvHandle, NvHandle, NvU32, + NvU32, NvP64 *, NvU64 *); +static NV_STATUS _rmAllocMemoryLocalUser(DEPRECATED_CONTEXT *, NvHandle, NvHandle, NvHandle, NvU32, + NvU32, NvP64 *, NvU64 *); +static NV_STATUS _rmAllocMemoryLocalPrivileged(DEPRECATED_CONTEXT *, NvHandle, NvHandle, NvHandle, NvU32, + NvU32, NvP64 *, NvU64 *); +static NV_STATUS _rmAllocMemoryList(DEPRECATED_CONTEXT *, NvHandle, NvHandle, NvHandle, NvU32, + NvU32, NvP64 *, NvU64 *); +static NV_STATUS _rmAllocMemoryFramebufferConsole(DEPRECATED_CONTEXT *, NvHandle, NvHandle, + NvHandle, NvU32, NvU32, + NvP64 *, NvU64 *); + +static NV_STATUS _rmAllocMemoryFromFlaObject(DEPRECATED_CONTEXT *, NvHandle, NvHandle, + NvHandle, NvU32, NvU32, + NvP64 *, NvU64 *); + +static const RmAllocMemoryEntry rmAllocMemoryTable[] = +{ + { NV01_MEMORY_SYSTEM, _rmAllocMemorySystem }, + { NV01_MEMORY_SYSTEM_DYNAMIC, _rmAllocMemorySystemDynamic }, + { NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, _rmAllocMemorySystemOsDescriptor }, + { NV01_MEMORY_LOCAL_USER, _rmAllocMemoryLocalUser }, + { NV01_MEMORY_LOCAL_PRIVILEGED, _rmAllocMemoryLocalPrivileged }, + { NV01_MEMORY_LIST_SYSTEM, _rmAllocMemoryList }, + { NV01_MEMORY_LIST_FBMEM, _rmAllocMemoryList }, + { NV01_MEMORY_LIST_OBJECT, _rmAllocMemoryList }, + { NV01_MEMORY_FRAMEBUFFER_CONSOLE, _rmAllocMemoryFramebufferConsole }, + { NV01_MEMORY_FLA, _rmAllocMemoryFromFlaObject }, +}; + +static NvU32 rmAllocMemoryTableSize = sizeof(rmAllocMemoryTable) / sizeof(rmAllocMemoryTable[0]); + +void +RmDeprecatedAllocMemory +( + DEPRECATED_CONTEXT *pContext, + NVOS02_PARAMETERS *pArgs +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + // traverse through the table to match the hClass + for (i = 0; i < rmAllocMemoryTableSize; i++) + { + if (pArgs->hClass == rmAllocMemoryTable[i].hclass) + { + break; + } + } + + // check that we have a valid handler + if (i == rmAllocMemoryTableSize) + { + status = NV_ERR_INVALID_CLASS; + goto done; + } + + // call the function in rmAllocMemoryTable corresponding to hClass + status = rmAllocMemoryTable[i].pFunc(pContext, + pArgs->hRoot, + pArgs->hObjectParent, + pArgs->hObjectNew, + pArgs->hClass, + pArgs->flags, + &pArgs->pMemory, + &pArgs->limit); + +done: + pArgs->status = status; +} + +static NV_STATUS +_rmAllocMemorySystem +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory, + NvU32 hClass, + NvU32 flags, + NvP64 *pAddress, + NvU64 *pLimit +) +{ + NV_STATUS status; + NV_MEMORY_ALLOCATION_PARAMS allocParams = {0}; + + if (DRF_VAL(OS02, _FLAGS, _LOCATION, flags) != NVOS02_FLAGS_LOCATION_PCI || + DRF_VAL(OS02, _FLAGS, _ALLOC, flags) == NVOS02_FLAGS_ALLOC_NONE) + { + return NV_ERR_INVALID_FLAGS; + } + + allocParams.owner = 0x7368696D; // 'shim' + allocParams.size = *pLimit + 1; + + if (FLD_TEST_DRF(OS02, _FLAGS, _KERNEL_MAPPING, _MAP, flags)) + allocParams.type = NVOS32_TYPE_NOTIFIER; + else + allocParams.type = NVOS32_TYPE_IMAGE; + + if (RmDeprecatedConvertOs02ToOs32Flags(flags, &allocParams.attr, &allocParams.attr2, &allocParams.flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + *pAddress = NvP64_NULL; + + status = pContext->RmAlloc(pContext, hClient, hParent, &hMemory, hClass, &allocParams); + + if (status != NV_OK) + return status; + + // RmAllocMemory creates mappings by default + if (FLD_TEST_DRF(OS02, _FLAGS, _MAPPING, _DEFAULT, flags)) + { + status = pContext->RmMapMemory(pContext, hClient, hParent, hMemory, 0, *pLimit + 1, + pAddress, NV04_MAP_MEMORY_FLAGS_NONE); + + if (status != NV_OK) + { + pContext->RmFree(pContext, hClient, hMemory); + } + } + + return status; +} + +static NV_STATUS +_rmAllocMemorySystemDynamic +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory, + NvU32 hClass, + NvU32 flags, + NvP64 *pAddress, + NvU64 *pLimit +) +{ + NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS allocParams = {0}; + NV_STATUS status; + + // + // NvRmAllocMemory(NV01_MEMORY_SYSTEM_DYNAMIC) is used to allocate a hMemory + // under NV01_DEVICE_0. + // + // NvRmAlloc exposes the Device version with class + // NV01_MEMORY_SYSTEM_DYNAMIC. + // + + // This class does not allow DMA mappings. Use an illegal hVASpace handle. + allocParams.hVASpace = NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE; + + // Try with NV01_MEMORY_SYSTEM_DYNAMIC for NV01_DEVICE_0 parents + status = pContext->RmAlloc(pContext, hClient, hParent, &hMemory, + NV01_MEMORY_SYSTEM_DYNAMIC, &allocParams); + + *pLimit = allocParams.limit; + + return status; +} + +static NV_STATUS +_rmAllocMemorySystemOsDescriptor +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory, + NvU32 hClass, + NvU32 flags, + NvP64 *pAddress, + NvU64 *pLimit +) +{ + NV_STATUS status; + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS allocParams = {0}; + + // Don't support anything but PCI memory at the moment. + // Don't support default mappings, since they make no sense. + if (DRF_VAL(OS02, _FLAGS, _LOCATION, flags) != NVOS02_FLAGS_LOCATION_PCI || + DRF_VAL(OS02, _FLAGS, _MAPPING, flags) != NVOS02_FLAGS_MAPPING_NO_MAP) + { + return NV_ERR_INVALID_FLAGS; + } + + allocParams.type = NVOS32_TYPE_IMAGE; + allocParams.descriptor = *pAddress; + allocParams.descriptorType = NVOS32_DESCRIPTOR_TYPE_VIRTUAL_ADDRESS; + allocParams.limit = *pLimit; + + if (RmDeprecatedConvertOs02ToOs32Flags(flags, &allocParams.attr, &allocParams.attr2, &allocParams.flags)) + { + return NV_ERR_INVALID_FLAGS; + } + + status = pContext->RmAlloc(pContext, hClient, hParent, &hMemory, hClass, &allocParams); + + return status; +} + +static NV_STATUS _rmAllocGetHeapSize +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hDevice, + NvU64 *pHeapSize +) +{ + NV2080_CTRL_FB_GET_INFO_PARAMS fbInfoParams = {0}; + NV2080_CTRL_FB_INFO fbInfoEntries[1] = {{0}}; + NV_STATUS status; + NvHandle hSubDevice; + NvBool bMustFreeSubDevice; + + status = RmDeprecatedFindOrCreateSubDeviceHandle(pContext, hClient, hDevice, + &hSubDevice, &bMustFreeSubDevice); + + if (status != NV_OK) + return status; + + fbInfoParams.fbInfoListSize = 1; + fbInfoParams.fbInfoList = NV_PTR_TO_NvP64(&fbInfoEntries); + + fbInfoEntries[0].index = NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE; + + status = pContext->RmControl(pContext, hClient, hSubDevice, + NV2080_CTRL_CMD_FB_GET_INFO, + &fbInfoParams, + sizeof(fbInfoParams)); + + + *pHeapSize = ((NvU64)fbInfoEntries[0].data << 10); + + if (bMustFreeSubDevice) + { + pContext->RmFree(pContext, hClient, hSubDevice); + } + + return status; +} + +static NV_STATUS +_rmAllocMemoryLocalUser +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory, + NvU32 hClass, + NvU32 flags, + NvP64 *pAddress, + NvU64 *pLimit +) +{ + NV_STATUS status; + NV_PHYSICAL_MEMORY_ALLOCATION_PARAMS allocParams = {0}; + + // + // This was a poorly designed API. Non-root clients used it to query the + // heap size where we shouldn't have a hMemory and root clients used it to + // read/write offsets within video memory. + // + + // First attempt: try to allocate NV01_MEMORY_LOCAL_PHYSICAL + status = pContext->RmAlloc(pContext, hClient, hParent, &hMemory, NV01_MEMORY_LOCAL_PHYSICAL, &allocParams); + + if (status == NV_OK) + { + *pLimit = allocParams.memSize - 1; + return status; + } + else if (status == NV_ERR_INSUFFICIENT_PERMISSIONS) + { + // + // Second attempt: If client doesn't have permission (non-root) to view + // entire FB, query heap size with RmControls and allocate a dummy + // hMemory. We use NV01_MEMORY_SYSTEM_DYNAMIC because that API results + // in no underlying heap allocation. + // + + NvU64 memSize; + NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS virtAllocParams = {0}; + + virtAllocParams.hVASpace = NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE; + + status = _rmAllocGetHeapSize(pContext, hClient, hParent, &memSize); + + if (status != NV_OK) + return status; + + *pLimit = memSize - 1; + + // + // Alloc dummy memory handle to keep client happy (non-root + // user-mode clients previously received hMemory of entire FB) + // + status = pContext->RmAlloc(pContext, hClient, hParent, &hMemory, + NV01_MEMORY_SYSTEM_DYNAMIC, &virtAllocParams); + } + + return status; +} + +static NV_STATUS +_rmAllocMemoryLocalPrivileged +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory, + NvU32 hClass, + NvU32 flags, + NvP64 *pAddress, + NvU64 *pLimit +) +{ + if (DRF_VAL(OS02, _FLAGS, _ALLOC, flags) != NVOS02_FLAGS_ALLOC_NONE) + return NV_ERR_INVALID_FLAGS; + + *pLimit = 0xFFFFFFFF; // not used by clients + + return pContext->RmAlloc(pContext, hClient, hParent, &hMemory, hClass, 0); +} + +static NV_STATUS +_rmAllocMemoryList +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory, + NvU32 hClass, + NvU32 flags, + NvP64 *pAddress, + NvU64 *pLimit +) +{ + NV_MEMORY_LIST_ALLOCATION_PARAMS allocParams = {0}; + Nv01MemoryList *pMemoryList = 0; + void *pPageArray = 0; + NvP64 pageArrayBase = NvP64_NULL; + NvU32 pageArraySize = 0; + NV_STATUS status; + + status = pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYIN, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + *pAddress, sizeof(Nv01MemoryList), (void**)&pMemoryList); + if (status != NV_OK) + goto done; + + pageArrayBase = NvP64_PLUS_OFFSET(*pAddress, NV_OFFSETOF(Nv01MemoryList, pageNumber)); + pageArraySize = sizeof(NvU64) * pMemoryList->pageCount; + + status = pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYIN, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pageArrayBase, pageArraySize, &pPageArray); + if (status != NV_OK) + goto done; + + allocParams.pageNumberList = NV_PTR_TO_NvP64(pPageArray); + allocParams.limit = *pLimit; + allocParams.flagsOs02 = flags; + +#define COPY_FIELD(field) allocParams.field = pMemoryList->field + COPY_FIELD(hClient); + COPY_FIELD(hParent); + COPY_FIELD(hObject); + COPY_FIELD(hHwResClient); + COPY_FIELD(hHwResDevice); + COPY_FIELD(hHwResHandle); + COPY_FIELD(pteAdjust); + COPY_FIELD(type); + COPY_FIELD(flags); + COPY_FIELD(attr); + COPY_FIELD(attr2); + COPY_FIELD(height); + COPY_FIELD(width); + COPY_FIELD(format); + COPY_FIELD(comprcovg); + COPY_FIELD(zcullcovg); + COPY_FIELD(pageCount); + COPY_FIELD(heapOwner); + COPY_FIELD(guestId); + COPY_FIELD(rangeBegin); + COPY_FIELD(rangeEnd); + COPY_FIELD(pitch); + COPY_FIELD(ctagOffset); + COPY_FIELD(size); + COPY_FIELD(align); + + status = pContext->RmAlloc(pContext, hClient, hParent, &hMemory, hClass, &allocParams); + +done: + if (pPageArray) + { + pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYRELEASE, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pageArrayBase, pageArraySize, &pPageArray); + } + + if (pMemoryList) + { + pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYRELEASE, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + *pAddress, sizeof(Nv01MemoryList), (void**)&pMemoryList); + } + + return status; +} + +static NV_STATUS +_rmAllocMemoryFromFlaObject +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory, + NvU32 hClass, + NvU32 flags, + NvP64 *pAddress, + NvU64 *pLimit +) +{ + NV_FLA_MEMORY_ALLOCATION_PARAMS allocParams = {0}; + NV_FLA_MEMORY_ALLOCATION_PARAMS *pMemoryFla = 0; + NV_STATUS status; + + status = pContext->CopyUser(pContext, + RMAPI_DEPRECATED_COPYIN, + RMAPI_DEPRECATED_BUFFER_ALLOCATE, + *pAddress, sizeof(NV_FLA_MEMORY_ALLOCATION_PARAMS), + (void**)&pMemoryFla); + if (status != NV_OK) + goto done; + + allocParams.limit = *pLimit; + allocParams.flagsOs02 = flags; + +#define COPY_FLA_FIELD(field) allocParams.field = pMemoryFla->field + COPY_FLA_FIELD(type); + COPY_FLA_FIELD(flags); + COPY_FLA_FIELD(attr); + COPY_FLA_FIELD(attr2); + COPY_FLA_FIELD(base); + COPY_FLA_FIELD(align); + COPY_FLA_FIELD(hExportSubdevice); + COPY_FLA_FIELD(hExportHandle); + COPY_FLA_FIELD(hExportClient); + + status = pContext->RmAlloc(pContext, hClient, hParent, &hMemory, hClass, &allocParams); + +done: + if (pMemoryFla) + { + pContext->CopyUser(pContext, + RMAPI_DEPRECATED_COPYRELEASE, + RMAPI_DEPRECATED_BUFFER_ALLOCATE, + *pAddress, sizeof(NV_FLA_MEMORY_ALLOCATION_PARAMS), + (void**)&pMemoryFla); + } + + return status; +} + +static NV_STATUS +_rmAllocMemoryFramebufferConsole +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory, + NvU32 hClass, + NvU32 flags, + NvP64 *pAddress, + NvU64 *pLimit +) +{ + return pContext->RmAlloc(pContext, hClient, hParent, &hMemory, hClass, 0); +} diff --git a/src/nvidia/interface/deprecated/rmapi_deprecated_control.c b/src/nvidia/interface/deprecated/rmapi_deprecated_control.c new file mode 100644 index 000000000..1107f5d84 --- /dev/null +++ b/src/nvidia/interface/deprecated/rmapi_deprecated_control.c @@ -0,0 +1,640 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "deprecated/rmapi_deprecated.h" + +#include "utils/nvmacro.h" +#include "nvctassert.h" + +#include "ctrl/ctrl2080/ctrl2080bios.h" // NV2080_CTRL_CMD_BIOS_GET_INFO +#include "ctrl/ctrl2080/ctrl2080bus.h" // NV2080_CTRL_CMD_BUS_GET_INFO +#include "ctrl/ctrl2080/ctrl2080clk.h" // NV2080_CTRL_CMD_CLK_* +#include "ctrl/ctrl2080/ctrl2080gpu.h" // NV2080_CTRL_CMD_GPU_GET_INFO +#include "ctrl/ctrl2080/ctrl2080perf.h" // NV2080_CTRL_CMD_PERF_* +#include "ctrl/ctrl0080/ctrl0080perf.h" // NV0080_CTRL_CMD_PERF_GET_CAPS +#include "ctrl/ctrl0080/ctrl0080bsp.h" // NV0080_CTRL_CMD_BSP_GET_CAPS + +// +// TODO - deprecation shim shouldn't depend on RM internals +// Bug 3188307 +// +#include "core/core.h" +#include "gpu/gpu.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" + +#include // NULL + +/** + * Kernel-space deprecated control conversion. + * A conversion here is applicable to all RM clients on most platforms. + * + * See also arch/nvalloc/unix/lib/rmapi-user-deprecated-control.c + */ + +#define V2_CONVERTER(cmd) NV_CONCATENATE(_ctrl_convert_v2, cmd) +#define V3_CONVERTER(cmd) NV_CONCATENATE(_ctrl_convert_v3, cmd) + +// Need to declare them first to add to table below. +// Maybe reshuffle this file around so that we don't need separate decl+def for these? +static NV_STATUS V2_CONVERTER(_NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION)(API_SECURITY_INFO *pSecInfo, DEPRECATED_CONTEXT *pContextInternal, NVOS54_PARAMETERS *pArgs); +static NV_STATUS V2_CONVERTER(_NV2080_CTRL_CMD_BIOS_GET_INFO)(API_SECURITY_INFO *pSecInfo, DEPRECATED_CONTEXT *pContextInternal, NVOS54_PARAMETERS *pArgs); +static NV_STATUS V2_CONVERTER(_NV2080_CTRL_CMD_BUS_GET_INFO)(API_SECURITY_INFO *pSecInfo, DEPRECATED_CONTEXT *pContextInternal, NVOS54_PARAMETERS *pArgs); +static NV_STATUS V2_CONVERTER(_NV0080_CTRL_CMD_BSP_GET_CAPS)(API_SECURITY_INFO *pSecInfo, DEPRECATED_CONTEXT *pContextInternal, NVOS54_PARAMETERS *pArgs); +static NV_STATUS V2_CONVERTER(_NV2080_CTRL_CMD_GPU_GET_INFO)(API_SECURITY_INFO *pSecInfo, DEPRECATED_CONTEXT *pContextInternal, NVOS54_PARAMETERS *pArgs); + +typedef struct +{ + NvU32 cmd; // NVXXXX_CTRL_CMD_* value + RmDeprecatedControlHandler func; // pointer to handler + NvBool bSkipVGPU;// Do not apply to vGPU - GSP-TODO for better solution. +} RmDeprecatedControlEntry; + + +static const RmDeprecatedControlEntry rmDeprecatedControlTable[] = +{ + { NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION, V2_CONVERTER(_NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION), NV_FALSE}, + { NV2080_CTRL_CMD_BIOS_GET_INFO, V2_CONVERTER(_NV2080_CTRL_CMD_BIOS_GET_INFO), NV_FALSE}, + { NV2080_CTRL_CMD_BUS_GET_INFO, V2_CONVERTER(_NV2080_CTRL_CMD_BUS_GET_INFO), NV_FALSE}, + { NV0080_CTRL_CMD_BSP_GET_CAPS, V2_CONVERTER(_NV0080_CTRL_CMD_BSP_GET_CAPS), NV_FALSE}, + { NV2080_CTRL_CMD_GPU_GET_INFO, V2_CONVERTER(_NV2080_CTRL_CMD_GPU_GET_INFO), NV_FALSE}, + { 0, NULL, NV_FALSE } +}; + +/*! + * Check whether the cmd is part of the GSS legacy + * commands. + */ +static NvBool IsGssLegacyCall(NvU32 cmd) +{ + return !!(cmd & RM_GSS_LEGACY_MASK); +} + +RmDeprecatedControlHandler RmDeprecatedGetControlHandler(NVOS54_PARAMETERS *pArgs) +{ + NvU32 i; + NV_STATUS nvStatus; + RsClient *pClient = NULL; + POBJGPU pGpu = NULL; + + NV_CHECK_OK_OR_ELSE(nvStatus, + LEVEL_ERROR, + serverGetClientUnderLock(&g_resServ, pArgs->hClient, &pClient), + return NULL); + + gpuGetByHandle(pClient, pArgs->hObject, NULL, &pGpu); + + // search rmDeprecatedControlTable for handler + for (i = 0; rmDeprecatedControlTable[i].cmd != 0; i++) + { + if (pArgs->cmd == rmDeprecatedControlTable[i].cmd) + { + RmDeprecatedControlHandler rmDeprecatedControlHandler = rmDeprecatedControlTable[i].func; + + NV_ASSERT_OR_ELSE_STR(pArgs->flags == 0, "IRQL and BYPASS_LOCK control calls currently unsupported for deprecated control calls", + return NULL); + + // + // Do not deprecate for vGPU + // GSP-TODO: short-term only; we need to find a better way than this exclusion or + // updating the ugly file resman/kernel/inc/vgpu/rm_plugin_shared_code.h + // + if (pGpu && IS_VIRTUAL(pGpu) && rmDeprecatedControlTable[i].bSkipVGPU) + { + return NULL; + } + + return rmDeprecatedControlHandler; + } + } + + // Check if the cmd is part of the legacy GSS control. + if (pGpu && IS_GSP_CLIENT(pGpu) && IsGssLegacyCall(pArgs->cmd)) + { + extern NV_STATUS RmGssLegacyRpcCmd(API_SECURITY_INFO*, DEPRECATED_CONTEXT*, NVOS54_PARAMETERS*); + return RmGssLegacyRpcCmd; + } + + // If no handler found, the control is not deprecated and can be routed normally + return NULL; +} + +// +// Perform deep copy in/out of the user arguments +// Handlers that just reroute controls without changing their param structures +// don't need to call these. +// + +#define CONTROL_PARAM_TOKEN_MAX_POINTERS 4 +typedef struct +{ + struct + { + void *pKernelParams; + NvP64 pUserParams; + NvU32 paramSize; + NvU32 paramStructOffset; + NvU32 listSizeOffset; + NvBool bListSizeIsCount; + } params[CONTROL_PARAM_TOKEN_MAX_POINTERS]; + NvU32 paramCount; +} CONTROL_PARAM_TOKEN; + +static void _ctrlparamsTokenInit +( + CONTROL_PARAM_TOKEN *pToken, + NvP64 pParams, + NvU32 paramSize +) +{ + // Don't want any non-header dependency, including nvport.. + NVMISC_MEMSET(pToken, 0, sizeof(*pToken)); + pToken->params[0].pUserParams = pParams; + pToken->params[0].paramSize = paramSize; + pToken->params[0].paramStructOffset = 0; + pToken->params[0].listSizeOffset = (NvU32)~0; + pToken->params[0].bListSizeIsCount = NV_FALSE; + pToken->paramCount = 1; +} + +static NV_STATUS _ctrlparamsTokenAddEmbeddedPtr +( + CONTROL_PARAM_TOKEN *pToken, + NvU32 paramStructOffset, + NvU32 listSizeOffset, + NvBool bListSizeIsCount, + NvU32 listItemSize +) +{ + if (pToken->paramCount >= CONTROL_PARAM_TOKEN_MAX_POINTERS) + return NV_ERR_INSUFFICIENT_RESOURCES; + if (paramStructOffset >= pToken->params[0].paramSize) + return NV_ERR_INVALID_OFFSET; + if (!NV_IS_ALIGNED(paramStructOffset, 8)) + return NV_ERR_INVALID_OFFSET; + if (listItemSize == 0) + return NV_ERR_INVALID_ARGUMENT; + + pToken->params[pToken->paramCount].paramStructOffset = paramStructOffset; + pToken->params[pToken->paramCount].listSizeOffset = listSizeOffset; + pToken->params[pToken->paramCount].bListSizeIsCount = bListSizeIsCount; + pToken->params[pToken->paramCount].paramSize = listItemSize; + pToken->paramCount++; + return NV_OK; +} + +#define CTRL_PARAMS_TOKEN_INIT(token, paramType, pArgs) \ + _ctrlparamsTokenInit(&token, pArgs->params, sizeof(paramType)) + +#define CTRL_PARAMS_TOKEN_ADD_EMBEDDED(token, paramType, pArgs, listField, listSizeField, \ + listSizeIsCount, listElemType) \ + _ctrlparamsTokenAddEmbeddedPtr(&token, NV_OFFSETOF(paramType, listField), \ + NV_OFFSETOF(paramType, listSizeField), \ + listSizeIsCount, sizeof(listElemType)) + +static NV_STATUS ctrlparamAcquire +( + API_SECURITY_INFO *pSecInfo, + CONTROL_PARAM_TOKEN *pToken, + NVOS54_PARAMETERS *pArgs +) +{ + NV_STATUS status; + NvU32 i; + + if (pToken->paramCount == 0 || pToken->paramCount > CONTROL_PARAM_TOKEN_MAX_POINTERS) + return NV_ERR_INVALID_ARGUMENT; + + status = RmCopyUserForDeprecatedApi(RMAPI_DEPRECATED_COPYIN, + RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pToken->params[0].pUserParams, + pToken->params[0].paramSize, + &pToken->params[0].pKernelParams, + pSecInfo->paramLocation == PARAM_LOCATION_USER); + if (status != NV_OK) + goto done; + + pArgs->params = NV_PTR_TO_NvP64(pToken->params[0].pKernelParams); + for (i = 1; i < pToken->paramCount; i++) + { + NvU32 offset = pToken->params[i].paramStructOffset; + + if (pToken->params[i].listSizeOffset != (NvU32)~0) + { + NvU32 listItemCount = *(NvU32*)((NvUPtr)pArgs->params + pToken->params[i].listSizeOffset); + + if (listItemCount == 0) + { + // + // User passed a zero sized list. Do not copy parameters. + // Let the RM control decide about its parameter policy. + // + continue; + } + + // + // Either the listItemCount is the actual count of items, in which + // case we need to read the size of individual items and (safely) + // multiply them, or it is the total count in bytes, in which case + // we just attempt to copyin all of it. + // + if (pToken->params[i].bListSizeIsCount) + { + // + // Overflow check: unsigned multiplication overflows are well defined + // so it is safe to first multiply and then perform the check. + // We already checked that listItemSize != 0 in _ctrlparamsTokenAddEmbeddedPtr. + // + NvU32 listItemSize = pToken->params[i].paramSize; + pToken->params[i].paramSize = listItemSize * listItemCount; + if (listItemCount != (pToken->params[i].paramSize / listItemSize)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + else + { + pToken->params[i].paramSize = listItemCount; + } + } + + pToken->params[i].pUserParams = *(NvP64*)((NvUPtr)pArgs->params + offset); + status = RmCopyUserForDeprecatedApi(RMAPI_DEPRECATED_COPYIN, + RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pToken->params[i].pUserParams, + pToken->params[i].paramSize, + &pToken->params[i].pKernelParams, + pSecInfo->paramLocation != PARAM_LOCATION_KERNEL); + if (status != NV_OK) + goto done; + + // Update the pointer in the top level struct to point to the internal copy + *(NvP64*)((NvUPtr)pArgs->params + offset) = NV_PTR_TO_NvP64(pToken->params[i].pKernelParams); + } +done: + return status; +} + +static void ctrlparamRelease +( + API_SECURITY_INFO *pSecInfo, + CONTROL_PARAM_TOKEN *pToken, + NVOS54_PARAMETERS *pArgs +) +{ + NvU32 i; + + if (pToken->params[0].pKernelParams == NULL) + return; + + for (i = pToken->paramCount - 1; i > 0; i--) + { + NvU32 offset = pToken->params[i].paramStructOffset; + NvU32 listItemCount = *(NvU32*)((NvUPtr)pArgs->params + pToken->params[i].listSizeOffset); + + if (pToken->params[i].pKernelParams == NULL) + continue; + + if (listItemCount == 0) + continue; + + // We already checked the item size in ctrlparamAcquire + + // Update the pointer in the top level struct back to external copy + *(NvP64*)((NvUPtr)pArgs->params + offset) = pToken->params[i].pUserParams; + + // The copy out could fail, but nothing we can do there. + RmCopyUserForDeprecatedApi(RMAPI_DEPRECATED_COPYOUT, + RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pToken->params[i].pUserParams, + pToken->params[i].paramSize, + &pToken->params[i].pKernelParams, + pSecInfo->paramLocation != PARAM_LOCATION_KERNEL); + } + + RmCopyUserForDeprecatedApi(RMAPI_DEPRECATED_COPYOUT, + RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pToken->params[0].pUserParams, + pToken->params[0].paramSize, + &pToken->params[0].pKernelParams, + pSecInfo->paramLocation == PARAM_LOCATION_USER); + pArgs->params = pToken->params[0].pUserParams; +} + +// +// Many controls have embedded pointers and a _v2 variant that has it flattened. +// This macro automatically generates the converter function so that the old +// API can be added to the graveyard and translated to the new one as needed. +// +#define CONVERT_TO_V2_EX(newCmd, oldParamsType, newParamsType, \ + oldListField, newListField, listSizeField, listSizeIsCount, listElemType, \ + set, get, customSet, customGet) \ + do { \ + NV_STATUS status; \ + CONTROL_PARAM_TOKEN token; \ + CTRL_PARAMS_TOKEN_INIT(token, oldParamsType, pArgs); \ + \ + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, \ + CTRL_PARAMS_TOKEN_ADD_EMBEDDED(token, oldParamsType, pArgs, oldListField, \ + listSizeField, listSizeIsCount, listElemType)); \ + status = ctrlparamAcquire(pSecInfo, &token, pArgs); \ + if (status == NV_OK) \ + { \ + oldParamsType *pParams = NvP64_VALUE(pArgs->params); \ + newParamsType *pParams2; \ + NvU32 listSize = listSizeIsCount ? /* overflow checked in ctrlparamAcquire() */ \ + pParams->listSizeField * sizeof(listElemType) : \ + pParams->listSizeField; \ + \ + pParams2 = portMemAllocNonPaged(sizeof(newParamsType)); \ + if (pParams2 == NULL) \ + { \ + NV_PRINTF(LEVEL_ERROR, "No memory for pParams2\n"); \ + ctrlparamRelease(pSecInfo, &token, pArgs); \ + return NV_ERR_NO_MEMORY; \ + } \ + if (sizeof(pParams2->newListField) < listSize) \ + { \ + NV_PRINTF(LEVEL_ERROR, "pParams2 static array too small\n"); \ + portMemFree(pParams2); \ + ctrlparamRelease(pSecInfo, &token, pArgs); \ + return NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + \ + if (set) \ + { \ + do { customSet; } while(0); \ + NVMISC_MEMCPY(pParams2->newListField, (NvU8*)(NvUPtr)pParams->oldListField, \ + listSize); \ + } \ + \ + status = pContextInternal->RmControl(pContextInternal, pArgs->hClient, \ + pArgs->hObject, \ + newCmd, \ + pParams2, \ + sizeof(newParamsType)); \ + if ((status == NV_OK) && get) \ + { \ + do { customGet; } while(0); \ + NVMISC_MEMCPY((NvU8*)(NvUPtr)pParams->oldListField, pParams2->newListField, \ + listSize); \ + } \ + portMemFree(pParams2); \ + } \ + ctrlparamRelease(pSecInfo, &token, pArgs); \ + return status; \ + } while (0) + +// +// The default version will copy any fields from the V1 control to the same offset in the V2 +// NOTE: This only works if the actual pointer/list field is the last field in the structure. +// If that is not the case, use CONVERT_TO_V2_EX and write custom field copy logic +// +#define CONVERT_TO_V2(newCmd, oldParamsType, newParamsType, oldListField, newListField, \ + listSizeField, listSizeIsCount, listElemType, set, get) \ + CONVERT_TO_V2_EX(newCmd, oldParamsType, newParamsType, oldListField, newListField, \ + listSizeField, listSizeIsCount, listElemType, set, get, \ + { \ + ct_assert(sizeof(*pParams) <= sizeof(*pParams2)); \ + NVMISC_MEMCPY(pParams2, pParams, sizeof(*pParams)); \ + }, \ + {} \ + ) + + +// TODO: Maybe move more of this to the macro above so we have a simple table, one row per V1->V2 converter. +static NV_STATUS V2_CONVERTER(_NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION) +( + API_SECURITY_INFO *pSecInfo, + DEPRECATED_CONTEXT *pContextInternal, + NVOS54_PARAMETERS *pArgs +) +{ + NV_STATUS status; + CONTROL_PARAM_TOKEN token; + NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams2; + + pParams2 = portMemAllocNonPaged(sizeof(*pParams2)); + + if (pParams2 == NULL) + { + NV_PRINTF(LEVEL_ERROR, "No memory for pParams2\n"); + return NV_ERR_NO_MEMORY; + } + + // First fetch build strings using the V2 ctrl call + status = pContextInternal->RmControl(pContextInternal, pArgs->hClient, + pArgs->hObject, + NV0000_CTRL_CMD_SYSTEM_GET_BUILD_VERSION_V2, + pParams2, sizeof(*pParams2)); + + if (status == NV_OK) + { + // + // Only use the deprecated copy-in macro's for the params struct, this control + // call has some legacy behavior that does NOT return an error if one or more + // of the embedded pointers is NULL + // + CTRL_PARAMS_TOKEN_INIT(token, NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS, pArgs); + status = ctrlparamAcquire(pSecInfo, &token, pArgs); + + if (status == NV_OK) + { + NvU32 maxSizeOfStrings; + NvU32 driverVersionBufferLen; + NvU32 versionBufferLen; + NvU32 titleBufferLen; + NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_PARAMS *pParams = NvP64_VALUE(pArgs->params); + + // Get the maximum string size as per legacy behavior + driverVersionBufferLen = portStringLengthSafe(pParams2->driverVersionBuffer, + sizeof(pParams2->driverVersionBuffer)) + 1; + versionBufferLen = portStringLengthSafe(pParams2->versionBuffer, + sizeof(pParams2->versionBuffer)) + 1; + titleBufferLen = portStringLengthSafe(pParams2->titleBuffer, + sizeof(pParams2->titleBuffer)) + 1; + + maxSizeOfStrings = NV_MAX(driverVersionBufferLen, + NV_MAX(versionBufferLen, titleBufferLen)); + + // + // In the case that one or more of the embedded pointers is NULL, the user + // simply wants to know the maximum size of each of these strings so they know + // how much memory to dynamically allocate. + // + if (NvP64_VALUE(pParams->pDriverVersionBuffer) == NULL || + NvP64_VALUE(pParams->pVersionBuffer) == NULL || + NvP64_VALUE(pParams->pTitleBuffer) == NULL) + { + pParams->sizeOfStrings = maxSizeOfStrings; + portMemFree(pParams2); + ctrlparamRelease(pSecInfo, &token, pArgs); + return NV_OK; + } + + // Embedded char pointers aren't NULL, perform the copy out + else + { + // Ensure that the user-provided buffers are big enough + if (pParams->sizeOfStrings < maxSizeOfStrings) + { + portMemFree(pParams2); + ctrlparamRelease(pSecInfo, &token, pArgs); + return NV_ERR_INVALID_PARAM_STRUCT; + } + + // Copy the build version info to the client's memory. + if (rmapiParamsCopyOut(NULL, + pParams2->driverVersionBuffer, + pParams->pDriverVersionBuffer, + driverVersionBufferLen, + (pSecInfo->paramLocation != PARAM_LOCATION_KERNEL)) != NV_OK || + rmapiParamsCopyOut(NULL, + pParams2->versionBuffer, + pParams->pVersionBuffer, + versionBufferLen, + (pSecInfo->paramLocation != PARAM_LOCATION_KERNEL)) != NV_OK || + rmapiParamsCopyOut(NULL, + pParams2->titleBuffer, + pParams->pTitleBuffer, + titleBufferLen, + (pSecInfo->paramLocation != PARAM_LOCATION_KERNEL)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to copy out build version info to User Space.\n"); + status = NV_ERR_INVALID_PARAM_STRUCT; + } + else + { + // Copy out CL numbers + pParams->changelistNumber = pParams2->changelistNumber; + pParams->officialChangelistNumber = pParams2->officialChangelistNumber; + status = NV_OK; + } + } + } + + // Free embedded pointer memory + ctrlparamRelease(pSecInfo, &token, pArgs); + } + + // Free allocated memory + portMemFree(pParams2); + return status; +} + +static NV_STATUS V2_CONVERTER(_NV2080_CTRL_CMD_BIOS_GET_INFO) +( + API_SECURITY_INFO *pSecInfo, + DEPRECATED_CONTEXT *pContextInternal, + NVOS54_PARAMETERS *pArgs +) +{ + CONVERT_TO_V2(NV2080_CTRL_CMD_BIOS_GET_INFO_V2, + NV2080_CTRL_BIOS_GET_INFO_PARAMS, + NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS, + biosInfoList, + biosInfoList, + biosInfoListSize, + NV_TRUE, // List size is a count of elements + NV2080_CTRL_BIOS_INFO, + NV_TRUE, // Command SETs which info to fetch, do copy-in + NV_TRUE // Command GETs info, do copy-out + ); +} + +static NV_STATUS V2_CONVERTER(_NV2080_CTRL_CMD_BUS_GET_INFO) +( + API_SECURITY_INFO *pSecInfo, + DEPRECATED_CONTEXT *pContextInternal, + NVOS54_PARAMETERS *pArgs +) +{ + CONVERT_TO_V2(NV2080_CTRL_CMD_BUS_GET_INFO_V2, + NV2080_CTRL_BUS_GET_INFO_PARAMS, + NV2080_CTRL_BUS_GET_INFO_V2_PARAMS, + busInfoList, + busInfoList, + busInfoListSize, + NV_TRUE, // List size is a count of elements + NV2080_CTRL_BUS_INFO, + NV_TRUE, // Command SETs which info to fetch, do copy-in + NV_TRUE // Command GETs info, do copy-out + ); +} + +// Note: see check "listItemCount == 0" in ctrlparamAcquire(). For capsTblSize +// of 0, it will let it fall thru to here. So we need to explicitly check for +// capsTblSize here to not cause regression. +static NV_STATUS V2_CONVERTER(_NV0080_CTRL_CMD_BSP_GET_CAPS) +( + API_SECURITY_INFO *pSecInfo, + DEPRECATED_CONTEXT *pContextInternal, + NVOS54_PARAMETERS *pArgs +) +{ + CONVERT_TO_V2_EX(NV0080_CTRL_CMD_BSP_GET_CAPS_V2, // newCmd + NV0080_CTRL_BSP_GET_CAPS_PARAMS, // oldParamsType + NV0080_CTRL_BSP_GET_CAPS_PARAMS_V2,// newParamsType + capsTbl, // oldListField + capsTbl, // newListField + capsTblSize, // listSizeField + NV_TRUE, // listSizeIsCount + NvU8, // listElemType + NV_TRUE, // for below custom to run, this must be TRUE. + NV_TRUE, // return path, ie, copy out back to user + // Custom data in from user + { + if (pParams->capsTblSize != NV0080_CTRL_BSP_CAPS_TBL_SIZE) + { + NV_PRINTF(LEVEL_ERROR, "pParams capsTblSize %d invalid\n", + pParams->capsTblSize); + portMemFree(pParams2); + return NV_ERR_INVALID_ARGUMENT; + } + pParams2->instanceId = pParams->instanceId; + }, + // Custom data out back to user + {} // array has been filled. + ); +} + +static NV_STATUS V2_CONVERTER(_NV2080_CTRL_CMD_GPU_GET_INFO) +( + API_SECURITY_INFO *pSecInfo, + DEPRECATED_CONTEXT *pContextInternal, + NVOS54_PARAMETERS *pArgs +) +{ + CONVERT_TO_V2(NV2080_CTRL_CMD_GPU_GET_INFO_V2, + NV2080_CTRL_GPU_GET_INFO_PARAMS, + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS, + gpuInfoList, + gpuInfoList, + gpuInfoListSize, + NV_TRUE, // List size is a count of elements + NV2080_CTRL_GPU_INFO, + NV_TRUE, // Command SETs which info to fetch, do copy-in + NV_TRUE // Command GETs info, do copy-out + ); +} diff --git a/src/nvidia/interface/deprecated/rmapi_deprecated_misc.c b/src/nvidia/interface/deprecated/rmapi_deprecated_misc.c new file mode 100644 index 000000000..5d39ead76 --- /dev/null +++ b/src/nvidia/interface/deprecated/rmapi_deprecated_misc.c @@ -0,0 +1,313 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "deprecated/rmapi_deprecated.h" + +#include "class/cl0070.h" // NV01_MEMORY_VIRTUAL/NV01_MEMORY_SYSTEM_DYNAMIC + +#include "ctrl/ctrl0002.h" // NV01_CONTEXT_DMA_FROM_MEMORY +#include "ctrl/ctrl2080/ctrl2080i2c.h" // NV2080_CTRL_I2C_ACCESS_PARAMS +#include "ctrl/ctrl0000/ctrl0000gpu.h" // NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS +#include "ctrl/ctrl402c.h" // NV40_I2C + +void +RmDeprecatedAllocObject +( + DEPRECATED_CONTEXT *pContext, + NVOS05_PARAMETERS *pArgs +) +{ + pArgs->status = pContext->RmAlloc(pContext, pArgs->hRoot, pArgs->hObjectParent, + &pArgs->hObjectNew, pArgs->hClass, 0); +} + +void +RmDeprecatedAddVblankCallback +( + DEPRECATED_CONTEXT *pContext, + NVOS61_PARAMETERS *pArgs +) +{ + NV_STATUS status; + + if (pArgs->bAdd) + { + NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS vblankArgs = {0}; + + vblankArgs.pProc = pArgs->pProc; + vblankArgs.LogicalHead = pArgs->LogicalHead; + vblankArgs.pParm1 = pArgs->pParm1; + vblankArgs.pParm2 = pArgs->pParm2; + + status = pContext->RmAlloc(pContext, pArgs->hClient, pArgs->hDevice, + &pArgs->hVblank, NV9010_VBLANK_CALLBACK, &vblankArgs); + } + else + { + status = pContext->RmFree(pContext, pArgs->hClient, pArgs->hVblank); + } + + pArgs->status = status; +} + +void +RmDeprecatedAllocContextDma +( + DEPRECATED_CONTEXT *pContext, + NVOS39_PARAMETERS *pArgs +) +{ + NV_CONTEXT_DMA_ALLOCATION_PARAMS allocParams = {0}; + NV_STATUS status; + NvHandle hCtxDmaParent; + NvHandle hClient = pArgs->hObjectParent; + NvU32 class; + + // + // hMemory parent is used as the parent for the context dma. The legacy + // RmAllocContextDma2 unfortunately never took hParent as an argument. + // + status = RmDeprecatedGetHandleParent(pContext, hClient, pArgs->hMemory, &hCtxDmaParent); + if (status != NV_OK) + goto done; + + // + // A virtual ContextDma of a NV01_MEMORY_SYSTEM_DYNAMIC object is now implemented + // directly in the DynamicMemory object. We support allocation on the ContextDma + // path for API compatibility. + // + status = RmDeprecatedGetClassID(pContext, hClient, pArgs->hMemory, &class); + if ((status == NV_OK) && (class == NV01_MEMORY_SYSTEM_DYNAMIC)) + { + NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS allocVirtualParams = { 0 }; + + // Apply limit + allocVirtualParams.offset = pArgs->offset; + allocVirtualParams.limit = pArgs->limit; + + // Use default address space for this context + allocVirtualParams.hVASpace = 0; + + status = pContext->RmAlloc(pContext, hClient, hCtxDmaParent, + &pArgs->hObjectNew, + NV01_MEMORY_VIRTUAL, + &allocVirtualParams); + goto done; + } + + allocParams.hSubDevice = pArgs->hSubDevice; + allocParams.flags = pArgs->flags; + allocParams.hMemory = pArgs->hMemory; + allocParams.offset = pArgs->offset; + allocParams.limit = pArgs->limit; + + status = pContext->RmAlloc(pContext, hClient, hCtxDmaParent, + &pArgs->hObjectNew, pArgs->hClass, &allocParams); + +done: + pArgs->status = status; +} + +void +RmDeprecatedBindContextDma +( + DEPRECATED_CONTEXT *pContext, + NVOS49_PARAMETERS *pArgs +) +{ + NV0002_CTRL_BIND_CONTEXTDMA_PARAMS bindParams = {0}; + NV_STATUS status; + NvU32 class; + + // Allow ContextDma promoted to DynamicMemory to be silently bound for compatibility + status = RmDeprecatedGetClassID(pContext, pArgs->hClient, pArgs->hCtxDma, &class); + if ((status == NV_OK) && (class == NV01_MEMORY_SYSTEM_DYNAMIC)) + { + pArgs->status = status; + return; + } + + bindParams.hChannel = pArgs->hChannel; + + status = pContext->RmControl(pContext, pArgs->hClient, pArgs->hCtxDma, NV0002_CTRL_CMD_BIND_CONTEXTDMA, + &bindParams, sizeof(bindParams)); + + pArgs->status = status; +} + +void +RmDeprecatedI2CAccess +( + DEPRECATED_CONTEXT *pContext, + NVOS_I2C_ACCESS_PARAMS *pArgs +) +{ + NV_STATUS status = NV_OK; + NV_STATUS statusTmp = NV_OK; + NV2080_CTRL_I2C_ACCESS_PARAMS *pControlParams = 0; + void *pEmbeddedParams = 0; + NvP64 orginalEmbeddedPtr = NvP64_NULL; + NvHandle hSubDevice; + NvBool bMustFreeSubDevice = NV_FALSE; + + // Param can be either a hSubdevice or hDevice, control is on subdevice + status = RmDeprecatedFindOrCreateSubDeviceHandle(pContext, pArgs->hClient, pArgs->hDevice, + &hSubDevice, &bMustFreeSubDevice); + if (status != NV_OK) + goto done; + + // + // Need to copy fields into local address space (if kernel) before we can + // call RmControl. DEPRECATED_CONTEXT::RmControl expects all params to be in + // local address space. + // + status = pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYIN, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pArgs->paramStructPtr, sizeof(NV2080_CTRL_I2C_ACCESS_PARAMS), (void**)&pControlParams); + if (status != NV_OK) + goto done; + + if (pControlParams->dataBuffSize) + { + if (pControlParams->dataBuffSize <= NV402C_CTRL_I2C_MESSAGE_LENGTH_MAX) + { + status = pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYIN, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pControlParams->data, pControlParams->dataBuffSize, &pEmbeddedParams); + if (status != NV_OK) + goto done; + + orginalEmbeddedPtr = pControlParams->data; + pControlParams->data = NV_PTR_TO_NvP64(pEmbeddedParams); + } + else + { + // Invalid I2C access datasize, ignore the databuffer + // RS-TODO This should return an error, but ignore for now until KMD initializes their params + pControlParams->dataBuffSize = 0; + pControlParams->data = NvP64_NULL; + } + } + + status = pContext->RmControl(pContext, pArgs->hClient, hSubDevice, NV2080_CTRL_CMD_I2C_ACCESS, + pControlParams, sizeof(NV2080_CTRL_I2C_ACCESS_PARAMS)); + +done: + if (pEmbeddedParams) + { + // Restore original value before copy back + pControlParams->data = orginalEmbeddedPtr; + + statusTmp = pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYOUT, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pControlParams->data, pControlParams->dataBuffSize, &pEmbeddedParams); + if (status == NV_OK) + status = statusTmp; + } + + if (pControlParams) + { + statusTmp = pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYOUT, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pArgs->paramStructPtr, sizeof(NV2080_CTRL_I2C_ACCESS_PARAMS), (void **)&pControlParams); + if (status == NV_OK) + status = statusTmp; + } + + if (bMustFreeSubDevice) + { + pContext->RmFree(pContext, pArgs->hClient, hSubDevice); + } + + pArgs->status = status; +} + +void +RmDeprecatedIdleChannels +( + DEPRECATED_CONTEXT *pContext, + NVOS30_PARAMETERS *pArgs +) +{ + NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS params = {0}; + NV_STATUS status; + void *phClients = 0; + void *phDevices = 0; + void *phChannels = 0; + + params.hDevice = pArgs->hDevice; + params.hChannel = pArgs->hChannel; + params.numChannels = pArgs->numChannels; + params.flags = pArgs->flags; + params.timeout = pArgs->timeout; + + // XXX this should have a max - but copying old behavior for now + if (DRF_VAL(OS30, _FLAGS, _CHANNEL, pArgs->flags) == NVOS30_FLAGS_CHANNEL_LIST && + params.numChannels) + { + // Copy-in phClients + status = pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYIN, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pArgs->phClients, pArgs->numChannels * sizeof(NvU32), &phClients); + if (status != NV_OK) + goto done; + + params.phClients = NV_PTR_TO_NvP64(phClients); + + // Copy-in phDevices + status = pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYIN, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pArgs->phDevices, pArgs->numChannels * sizeof(NvU32), &phDevices); + if (status != NV_OK) + goto done; + + params.phDevices = NV_PTR_TO_NvP64(phDevices); + + // Copy-in phChannels + status = pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYIN, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pArgs->phChannels, pArgs->numChannels * sizeof(NvU32), &phChannels); + if (status != NV_OK) + goto done; + + params.phChannels = NV_PTR_TO_NvP64(phChannels); + } + + status = pContext->RmControl(pContext, pArgs->hClient, pArgs->hClient, + NV0000_CTRL_CMD_IDLE_CHANNELS, + ¶ms, sizeof(params)); + +done: + + if (phClients) + { + pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYRELEASE, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pArgs->phClients, pArgs->numChannels * sizeof(NvU32), &phClients); + } + + if (phDevices) + { + pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYRELEASE, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pArgs->phDevices, pArgs->numChannels * sizeof(NvU32), &phDevices); + } + + if (phChannels) + { + pContext->CopyUser(pContext, RMAPI_DEPRECATED_COPYRELEASE, RMAPI_DEPRECATED_BUFFER_ALLOCATE, + pArgs->phChannels, pArgs->numChannels * sizeof(NvU32), &phChannels); + } + pArgs->status = status; +} diff --git a/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c b/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c new file mode 100644 index 000000000..024a27495 --- /dev/null +++ b/src/nvidia/interface/deprecated/rmapi_deprecated_utils.c @@ -0,0 +1,421 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "deprecated/rmapi_deprecated.h" + +#include "class/cl0080.h" // NV01_DEVICE_0 +#include "class/cl2080.h" // NV20_SUBDEVICE_0 +#include "ctrl/ctrl0000/ctrl0000client.h" // NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE +#include "ctrl/ctrl0080/ctrl0080gpu.h" // NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE +#include "nvos.h" + +NV_STATUS +RmDeprecatedGetHandleParent +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hObject, + NvHandle *phParent +) +{ + NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS parentParams = {0}; + NV_STATUS status; + + parentParams.hObject = hObject; + parentParams.index = NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_PARENT; + + status = pContext->RmControl(pContext, hClient, hClient, NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO, + &parentParams, sizeof(parentParams)); + + *phParent = parentParams.data.hResult; + + return status; +} + +NV_STATUS +RmDeprecatedGetClassID +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hObject, + NvU32 *pClassId +) +{ + NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS classIdParams = {0}; + NV_STATUS status; + + classIdParams.hObject = hObject; + classIdParams.index = NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_CLASSID; + + status = pContext->RmControl(pContext, hClient, hClient, + NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO, + &classIdParams, + sizeof(classIdParams)); + + *pClassId = NvU64_LO32(classIdParams.data.iResult); + + return status; +} + +NV_STATUS +RmDeprecatedFindOrCreateSubDeviceHandle +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvHandle hDeviceOrSubDevice, + NvHandle *pHSubDevice, + NvBool *pBMustFree +) +{ + NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM findParams = {0}; + NV_STATUS status; + NvU32 classId; + + // + // Step 1.) check if we already have a subdevice + // + status = RmDeprecatedGetClassID(pContext, hClient, hDeviceOrSubDevice, &classId); + + if (status != NV_OK) + return status; + + if (classId == NV20_SUBDEVICE_0) + { + *pBMustFree = NV_FALSE; + *pHSubDevice = hDeviceOrSubDevice; + return NV_OK; + } + else if (classId != NV01_DEVICE_0) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Step 2.) check if there is a subdevice allocated under this device + // + findParams.subDeviceInst = 0; + + status = pContext->RmControl(pContext, hClient, hDeviceOrSubDevice, + NV0080_CTRL_CMD_GPU_FIND_SUBDEVICE_HANDLE, + &findParams, + sizeof(findParams)); + + if (status == NV_OK && findParams.hSubDevice) + { + *pBMustFree = NV_FALSE; + *pHSubDevice = findParams.hSubDevice; + return status; + } + + // + // Step 3.) if there is no device, we temporarily allocate a subdevice. + // Subdevice must be freed before we exit out to allow the client to reserve + // it if it chooses to do so later on. + // + *pBMustFree = NV_TRUE; + + *pHSubDevice = 0; + + status = pContext->RmAlloc(pContext, hClient, hDeviceOrSubDevice, pHSubDevice, NV20_SUBDEVICE_0, 0); + + return status; +} + +NV_STATUS RmDeprecatedGetOrAllocObject +( + DEPRECATED_CONTEXT *pContext, + NvHandle hClient, + NvU32 classId, + NvHandle *pHObject +) +{ + NV_STATUS status; + + NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS params = {0}; + params.hParent = *pHObject; + params.classId = classId; + status = pContext->RmControl(pContext, hClient, hClient, + NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE, + ¶ms, sizeof(params)); + // Object already exists, just return it + if (status == NV_OK && params.hObject != 0) + { + *pHObject = params.hObject; + } + else + { + // Object does not exist yet, allocate. + void *pAllocParams = (void*)0; // TODO: Fill for classes that need them + status = pContext->RmAlloc(pContext, hClient, *pHObject, + pHObject, classId, pAllocParams); + } + return status; +} + +NV_STATUS +RmDeprecatedConvertOs32ToOs02Flags +( + NvU32 attr, + NvU32 attr2, + NvU32 os32Flags, + NvU32 *pOs02Flags +) +{ + NvU32 os02Flags = 0; + NV_STATUS rmStatus = NV_OK; + + switch (DRF_VAL(OS32, _ATTR, _PHYSICALITY, attr)) + { + case NVOS32_ATTR_PHYSICALITY_DEFAULT: // NVOS02 defaults to contiguous. + case NVOS32_ATTR_PHYSICALITY_CONTIGUOUS: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, os02Flags); + break; + } + case NVOS32_ATTR_PHYSICALITY_NONCONTIGUOUS: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _PHYSICALITY, _NONCONTIGUOUS, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS32, _ATTR, _LOCATION, attr)) + { + case NVOS32_ATTR_LOCATION_PCI: + case NVOS32_ATTR_LOCATION_ANY: // NVOS02 defaults to PCI + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _LOCATION, _PCI, os02Flags); + break; + } + case NVOS32_ATTR_LOCATION_AGP: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _LOCATION, _AGP, os02Flags); + break; + } + case NVOS32_ATTR_LOCATION_VIDMEM: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _LOCATION, _VIDMEM, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS32, _ATTR, _COHERENCY, attr)) + { + case NVOS32_ATTR_COHERENCY_UNCACHED: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_CACHED: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _CACHED, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_COMBINE: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_COMBINE, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_THROUGH: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_THROUGH, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_PROTECT: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_PROTECT, os02Flags); + break; + } + case NVOS32_ATTR_COHERENCY_WRITE_BACK: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _WRITE_BACK, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, attr2)) + { + case NVOS32_ATTR2_GPU_CACHEABLE_YES: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, os02Flags); + break; + } + case NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT: // NVOS02 defaults to non-cacheable + case NVOS32_ATTR2_GPU_CACHEABLE_NO: + { + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _NO, os02Flags); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + // VidHeapControl never creates a mapping + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _MAPPING, _NO_MAP, os02Flags); + if (os32Flags & NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _KERNEL_MAPPING, _MAP, os02Flags); + else + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _KERNEL_MAPPING, _NO_MAP, os02Flags); + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2)) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, _YES, os02Flags); + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2)) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, _YES, os02Flags); + + if (FLD_TEST_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES, attr2)) + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, os02Flags); + else + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _NO, os02Flags); + + if (rmStatus == NV_OK) + { + *pOs02Flags = os02Flags; + } + + return rmStatus; +} + +NV_STATUS +RmDeprecatedConvertOs02ToOs32Flags +( + NvU32 os02Flags, + NvU32 *pAttr, + NvU32 *pAttr2, + NvU32 *pOs32Flags +) +{ + NvU32 os32Flags = 0; + NvU32 attr = 0, attr2 = 0; + NV_STATUS rmStatus = NV_OK; + + attr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT, attr); + + if (FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, os02Flags)) + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr); + else + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, attr); + + switch (DRF_VAL(OS02, _FLAGS, _LOCATION, os02Flags)) + { + case NVOS02_FLAGS_LOCATION_PCI: + { + attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, attr); + break; + } + case NVOS02_FLAGS_LOCATION_VIDMEM: + { + attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + switch (DRF_VAL(OS02, _FLAGS, _COHERENCY, os02Flags)) + { + case NVOS02_FLAGS_COHERENCY_UNCACHED: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _UNCACHED, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_CACHED: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _CACHED, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_COMBINE: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_COMBINE, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_THROUGH: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_THROUGH, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_PROTECT: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_PROTECT, attr); + break; + } + case NVOS02_FLAGS_COHERENCY_WRITE_BACK: + { + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _WRITE_BACK, attr); + break; + } + default: + { + rmStatus = NV_ERR_INVALID_FLAGS; + break; + } + } + + if (FLD_TEST_DRF(OS02, _FLAGS, _GPU_CACHEABLE, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES); + else + attr2 |= DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); + + if (FLD_TEST_DRF(OS02, _FLAGS, _KERNEL_MAPPING, _MAP, os02Flags)) + os32Flags |= NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP; + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_NISO_DISPLAY, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _YES); + else + attr2 |= DRF_DEF(OS32, _ATTR2, _NISO_DISPLAY, _NO); + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_USER_READ_ONLY, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY); + + if (FLD_TEST_DRF(OS02, _FLAGS, _ALLOC_DEVICE_READ_ONLY, _YES, os02Flags)) + attr2 |= DRF_DEF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY); + + if (rmStatus == NV_OK) + { + *pOs32Flags = os32Flags; + *pAttr = attr; + *pAttr2 = attr2; + } + + return rmStatus; +} diff --git a/src/nvidia/interface/deprecated/rmapi_deprecated_vidheapctrl.c b/src/nvidia/interface/deprecated/rmapi_deprecated_vidheapctrl.c new file mode 100644 index 000000000..943d1d411 --- /dev/null +++ b/src/nvidia/interface/deprecated/rmapi_deprecated_vidheapctrl.c @@ -0,0 +1,660 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "deprecated/rmapi_deprecated.h" + + +#include "class/cl00b1.h" // NV01_MEMORY_HW_RESOURCES +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR +#include "class/cl50a0.h" // NV50_MEMORY_VIRTUAL +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER + +#include "ctrl/ctrl0041.h" // NV04_MEMORY +#include "ctrl/ctrl2080/ctrl2080fb.h" // NV2080_CTRL_FB_INFO + +typedef NV_STATUS RmVidHeapControlFunc(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); + +typedef struct { + NvU32 function; // NVOS32_FUNCTION_* value + RmVidHeapControlFunc *pFunc; // pointer to handler +} RmVidHeapControlEntry; + +// forward declarations +static NV_STATUS _nvos32FunctionAllocDepthWidthHeight(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionAllocSize(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionAllocSizeRange(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionAllocTiledPitchHeight(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionFree(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionInfo(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionDump(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionReleaseReacquireCompr(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionGetMemAlignment(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionHwAlloc(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionHwFree(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); +static NV_STATUS _nvos32FunctionAllocOsDesc(DEPRECATED_CONTEXT *, NVOS32_PARAMETERS *); + +static const RmVidHeapControlEntry rmVidHeapControlTable[] = { + + { NVOS32_FUNCTION_ALLOC_DEPTH_WIDTH_HEIGHT, _nvos32FunctionAllocDepthWidthHeight }, + { NVOS32_FUNCTION_ALLOC_SIZE, _nvos32FunctionAllocSize }, + { NVOS32_FUNCTION_ALLOC_SIZE_RANGE, _nvos32FunctionAllocSizeRange }, + { NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT, _nvos32FunctionAllocTiledPitchHeight }, + { NVOS32_FUNCTION_FREE, _nvos32FunctionFree }, + { NVOS32_FUNCTION_INFO, _nvos32FunctionInfo }, + { NVOS32_FUNCTION_DUMP, _nvos32FunctionDump }, + { NVOS32_FUNCTION_RELEASE_COMPR, _nvos32FunctionReleaseReacquireCompr }, + { NVOS32_FUNCTION_REACQUIRE_COMPR, _nvos32FunctionReleaseReacquireCompr }, + { NVOS32_FUNCTION_GET_MEM_ALIGNMENT, _nvos32FunctionGetMemAlignment }, + { NVOS32_FUNCTION_HW_ALLOC, _nvos32FunctionHwAlloc }, + { NVOS32_FUNCTION_HW_FREE, _nvos32FunctionHwFree }, + { NVOS32_FUNCTION_ALLOC_OS_DESCRIPTOR, _nvos32FunctionAllocOsDesc }, +}; + +static NvU32 rmVidHeapControlTableSize = sizeof(rmVidHeapControlTable) / sizeof(rmVidHeapControlTable[0]); + +void +RmDeprecatedVidHeapControl +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NvU32 i; + NV_STATUS status; + + // IVC heap is supported only on embedded platforms. + if (pArgs->ivcHeapNumber > NVOS32_IVC_HEAP_NUMBER_DONT_ALLOCATE_ON_IVC_HEAP) + { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + // search rmVidHeapControlTable for handler + for (i = 0; i < rmVidHeapControlTableSize; i++) + if (pArgs->function == rmVidHeapControlTable[i].function) + break; + + // check that we have a valid handler + if (i == rmVidHeapControlTableSize) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // issue the call + status = rmVidHeapControlTable[i].pFunc(pContext, pArgs); + +done: + pArgs->status = status; +} + +// Helper macros to convert old vidheapctrl param structs into NV_MEMORY_ALLOCATION_PARAMS +#define _COPY_IN(newField, oldField) allocParams.newField = pArgs->data.oldField; +#define _COPY_OUT(newField, oldField) pArgs->data.oldField = allocParams.newField; +#define _NO_COPY(newField, oldField) + +static NV_STATUS +_rmVidHeapControlAllocCommon +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs, + NvHandle hClient, + NvHandle hDevice, + NvHandle *phMemory, + NV_MEMORY_ALLOCATION_PARAMS *pUserParams +) +{ + NV_STATUS status; + NvU32 externalClassId; + + if (0 == (pUserParams->flags & NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED)) + { + // RmAlloc will generate a handle when hMemory = 0 + *phMemory = 0; + } + + pUserParams->hVASpace = pArgs->hVASpace; + + if (pUserParams->flags & NVOS32_ALLOC_FLAGS_VIRTUAL) + externalClassId = NV50_MEMORY_VIRTUAL; + else if (FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, pUserParams->attr)) + externalClassId = NV01_MEMORY_LOCAL_USER; + else + externalClassId = NV01_MEMORY_SYSTEM; + + status = pContext->RmAlloc(pContext, hClient, hDevice, phMemory, externalClassId, + pUserParams); + + pArgs->free = 0; + pArgs->total = 0; + + return status; +} + +static NV_STATUS +_nvos32FunctionAllocDepthWidthHeight +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NV_MEMORY_ALLOCATION_PARAMS allocParams = {0}; + NvU32 byteWidth; + NV_STATUS status = NV_OK; + + // For NV3, scanline alignment is 32 bytes. + byteWidth = ((pArgs->data.AllocDepthWidthHeight.width * pArgs->data.AllocDepthWidthHeight.depth) + 7) >> 3; + pArgs->data.AllocDepthWidthHeight.size = pArgs->data.AllocDepthWidthHeight.height * ((byteWidth + 31) & ~31); + + // Range begin/end are captured only if the appropriate flag bit is set. + if (pArgs->data.AllocDepthWidthHeight.flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END) + { + allocParams.rangeLo = pArgs->data.AllocDepthWidthHeight.rangeBegin; + allocParams.rangeHi = pArgs->data.AllocDepthWidthHeight.rangeEnd; + } + else + { + allocParams.rangeLo = 0; + allocParams.rangeHi = 0; + } + + #define ALLOC_DEPTH_WIDTH_HEIGHT_PARAMS(_IN, _IN_OUT) \ + _IN(owner, AllocDepthWidthHeight.owner) \ + _IN(type, AllocDepthWidthHeight.type) \ + _IN(flags, AllocDepthWidthHeight.flags) \ + _IN(height, AllocDepthWidthHeight.height) \ + _IN(width, AllocDepthWidthHeight.width) \ + _IN_OUT(size, AllocDepthWidthHeight.size) \ + _IN(alignment, AllocDepthWidthHeight.alignment) \ + _IN_OUT(offset, AllocDepthWidthHeight.offset) \ + _IN_OUT(attr, AllocDepthWidthHeight.attr) \ + _IN_OUT(attr2, AllocDepthWidthHeight.attr2) \ + _IN_OUT(format, AllocDepthWidthHeight.format) \ + _IN_OUT(limit, AllocDepthWidthHeight.limit) \ + _IN_OUT(address, AllocDepthWidthHeight.address) \ + _IN_OUT(comprCovg, AllocDepthWidthHeight.comprCovg) \ + _IN_OUT(zcullCovg, AllocDepthWidthHeight.zcullCovg) \ + _IN_OUT(ctagOffset, AllocDepthWidthHeight.ctagOffset) + + ALLOC_DEPTH_WIDTH_HEIGHT_PARAMS(_COPY_IN, _COPY_IN); + + pArgs->data.AllocDepthWidthHeight.partitionStride = 256; + + // get memory + status = _rmVidHeapControlAllocCommon(pContext, + pArgs, + pArgs->hRoot, + pArgs->hObjectParent, + &pArgs->data.AllocDepthWidthHeight.hMemory, + &allocParams); + + ALLOC_DEPTH_WIDTH_HEIGHT_PARAMS(_NO_COPY, _COPY_OUT); + + return status; +} + +static NV_STATUS +_nvos32FunctionAllocSize +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NV_MEMORY_ALLOCATION_PARAMS allocParams = {0}; + NV_STATUS status = NV_OK; + + // Range begin/end are captured only if the appropriate flag bit is set. + if (pArgs->data.AllocSize.flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END) + { + allocParams.rangeLo = pArgs->data.AllocSize.rangeBegin; + allocParams.rangeHi = pArgs->data.AllocSize.rangeEnd; + } + else + { + allocParams.rangeLo = 0; + allocParams.rangeHi = 0; + } + +#define ALLOC_SIZE_PARAMS(_IN, _IN_OUT) \ + _IN(owner, AllocSize.owner) \ + _IN(type, AllocSize.type) \ + _IN(flags, AllocSize.flags) \ + _IN(height, AllocSize.height) \ + _IN(width, AllocSize.width) \ + _IN_OUT(size, AllocSize.size) \ + _IN(alignment, AllocSize.alignment) \ + _IN_OUT(offset, AllocSize.offset) \ + _IN_OUT(attr, AllocSize.attr) \ + _IN_OUT(attr2, AllocSize.attr2) \ + _IN_OUT(format, AllocSize.format) \ + _IN_OUT(limit, AllocSize.limit) \ + _IN_OUT(address, AllocSize.address) \ + _IN_OUT(comprCovg, AllocSize.comprCovg) \ + _IN_OUT(zcullCovg, AllocSize.zcullCovg) \ + _IN_OUT(ctagOffset, AllocSize.ctagOffset) + + ALLOC_SIZE_PARAMS(_COPY_IN, _COPY_IN); + + pArgs->data.AllocSize.partitionStride = 256; + + // get memory + status = _rmVidHeapControlAllocCommon(pContext, + pArgs, + pArgs->hRoot, + pArgs->hObjectParent, + &pArgs->data.AllocSize.hMemory, + &allocParams); + + ALLOC_SIZE_PARAMS(_NO_COPY, _COPY_OUT); + + return status; +} + +static NV_STATUS +_nvos32FunctionAllocSizeRange +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NV_MEMORY_ALLOCATION_PARAMS allocParams = {0}; + NV_STATUS status = NV_OK; + +#define ALLOC_SIZE_RANGE_PARAMS(_IN, _IN_OUT) \ + _IN(owner, AllocSizeRange.owner) \ + _IN(type, AllocSizeRange.type) \ + _IN(flags, AllocSizeRange.flags) \ + _IN(rangeLo, AllocSizeRange.rangeBegin) \ + _IN(rangeHi, AllocSizeRange.rangeEnd) \ + _IN_OUT(size, AllocSizeRange.size) \ + _IN(alignment, AllocSizeRange.alignment) \ + _IN_OUT(offset, AllocSizeRange.offset) \ + _IN_OUT(attr, AllocSizeRange.attr) \ + _IN_OUT(attr2, AllocSizeRange.attr2) \ + _IN_OUT(format, AllocSizeRange.format) \ + _IN_OUT(limit, AllocSizeRange.limit) \ + _IN_OUT(address, AllocSizeRange.address) \ + _IN_OUT(comprCovg, AllocSizeRange.comprCovg) \ + _IN_OUT(zcullCovg, AllocSizeRange.zcullCovg) \ + _IN_OUT(ctagOffset, AllocSizeRange.ctagOffset) + + ALLOC_SIZE_RANGE_PARAMS(_COPY_IN, _COPY_IN); + + pArgs->data.AllocSizeRange.partitionStride = 256; + + // get memory + status = _rmVidHeapControlAllocCommon(pContext, + pArgs, + pArgs->hRoot, + pArgs->hObjectParent, + &pArgs->data.AllocSizeRange.hMemory, + &allocParams); + + ALLOC_SIZE_RANGE_PARAMS(_NO_COPY, _COPY_OUT); + + return status; +} + +static NV_STATUS +_nvos32FunctionAllocTiledPitchHeight +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NV_MEMORY_ALLOCATION_PARAMS allocParams = {0}; + NV_STATUS status = NV_OK; + + pArgs->data.AllocTiledPitchHeight.size = + (NvU64)pArgs->data.AllocTiledPitchHeight.height * pArgs->data.AllocTiledPitchHeight.pitch; + + // Range begin/end are captured only if the appropriate flag bit is set. + if (pArgs->data.AllocTiledPitchHeight.flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END) + { + allocParams.rangeLo = pArgs->data.AllocTiledPitchHeight.rangeBegin; + allocParams.rangeHi = pArgs->data.AllocTiledPitchHeight.rangeEnd; + } + else + { + allocParams.rangeLo = 0; + allocParams.rangeHi = 0; + } + +#define ALLOC_TILE_PITCH_PARAMS(_IN, _IN_OUT) \ + _IN(owner, AllocTiledPitchHeight.owner) \ + _IN(type, AllocTiledPitchHeight.type) \ + _IN(flags, AllocTiledPitchHeight.flags) \ + _IN(height, AllocTiledPitchHeight.height) \ + _IN(width, AllocTiledPitchHeight.width) \ + _IN(pitch, AllocTiledPitchHeight.pitch) \ + _IN_OUT(size, AllocTiledPitchHeight.size) \ + _IN(alignment, AllocTiledPitchHeight.alignment) \ + _IN_OUT(offset, AllocTiledPitchHeight.offset) \ + _IN_OUT(attr, AllocTiledPitchHeight.attr) \ + _IN_OUT(attr2, AllocTiledPitchHeight.attr2) \ + _IN_OUT(format, AllocTiledPitchHeight.format) \ + _IN_OUT(limit, AllocTiledPitchHeight.limit) \ + _IN_OUT(address, AllocTiledPitchHeight.address) \ + _IN_OUT(comprCovg, AllocTiledPitchHeight.comprCovg) \ + _IN_OUT(zcullCovg, AllocTiledPitchHeight.zcullCovg) \ + _IN_OUT(ctagOffset, AllocTiledPitchHeight.ctagOffset) + + ALLOC_TILE_PITCH_PARAMS(_COPY_IN, _COPY_IN); + + pArgs->data.AllocTiledPitchHeight.partitionStride = 256; + + // get memory + status = _rmVidHeapControlAllocCommon(pContext, + pArgs, + pArgs->hRoot, + pArgs->hObjectParent, + &pArgs->data.AllocTiledPitchHeight.hMemory, + &allocParams); + + ALLOC_TILE_PITCH_PARAMS(_NO_COPY, _COPY_OUT); + + return status; +} + +static NV_STATUS +_nvos32FunctionFree +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NvHandle hMemory; + NvHandle hClient = pArgs->hRoot; + + if ((pArgs->data.Free.flags & NVOS32_FREE_FLAGS_MEMORY_HANDLE_PROVIDED) == 0) + { + return NV_ERR_INVALID_ARGUMENT; + } + + hMemory = pArgs->data.Free.hMemory; + + return pContext->RmFree(pContext, hClient, hMemory); +} + +static NV_STATUS +_nvos32FunctionInfo +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NV2080_CTRL_FB_GET_INFO_PARAMS fbInfoParams = {0}; + NV2080_CTRL_FB_INFO fbInfoEntries[6] = {{0}}; + NV_STATUS status; + NvHandle hSubDevice; + NvBool bMustFreeSubDevice; + NvHandle hClient = pArgs->hRoot; + NvHandle hDevice = pArgs->hObjectParent; + + status = RmDeprecatedFindOrCreateSubDeviceHandle(pContext, hClient, hDevice, + &hSubDevice, &bMustFreeSubDevice); + + if (status != NV_OK) + return status; + + fbInfoParams.fbInfoListSize = 6; + fbInfoParams.fbInfoList = NV_PTR_TO_NvP64(&fbInfoEntries); + + fbInfoEntries[0].index = NV2080_CTRL_FB_INFO_INDEX_HEAP_FREE; + fbInfoEntries[1].index = NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE; + fbInfoEntries[2].index = NV2080_CTRL_FB_INFO_INDEX_FB_TAX_SIZE_KB; + fbInfoEntries[3].index = NV2080_CTRL_FB_INFO_INDEX_HEAP_BASE_KB; + fbInfoEntries[4].index = NV2080_CTRL_FB_INFO_INDEX_LARGEST_FREE_REGION_SIZE_KB; + fbInfoEntries[5].index = NV2080_CTRL_FB_INFO_INDEX_LARGEST_FREE_REGION_BASE_KB; + + status = pContext->RmControl(pContext, hClient, hSubDevice, + NV2080_CTRL_CMD_FB_GET_INFO, + &fbInfoParams, + sizeof(fbInfoParams)); + + pArgs->free = ((NvU64)fbInfoEntries[0].data << 10); + pArgs->total = ((NvU64)fbInfoEntries[1].data << 10); + pArgs->total += ((NvU64)fbInfoEntries[2].data << 10); // For vGPU, add FB tax incurred by host RM + pArgs->data.Info.base = ((NvU64)fbInfoEntries[3].data << 10); + pArgs->data.Info.size = ((NvU64)fbInfoEntries[4].data << 10); + pArgs->data.Info.offset = ((NvU64)fbInfoEntries[5].data << 10); + + if (bMustFreeSubDevice) + { + pContext->RmFree(pContext, hClient, hSubDevice); + } + + return status; +} + +static NV_STATUS +_nvos32FunctionDump +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + // Not supported since PMA + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS +_nvos32FunctionReleaseReacquireCompr +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NV_STATUS status = NV_OK; + NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS updateParams = {0}; + + // We're using ReleaseCompr here when we have that _and_ ReacquireCompr to deal with in + // terms of args. AT least we can make sure they aren't broken runtime... + if ( !( ( NV_OFFSETOF(NVOS32_PARAMETERS, data.ReleaseCompr.flags) == + NV_OFFSETOF(NVOS32_PARAMETERS, data.ReacquireCompr.flags) ) && + ( NV_OFFSETOF(NVOS32_PARAMETERS, data.ReleaseCompr.hMemory) == + NV_OFFSETOF(NVOS32_PARAMETERS, data.ReacquireCompr.hMemory) ) && + ( NV_OFFSETOF(NVOS32_PARAMETERS, data.ReleaseCompr.owner) == + NV_OFFSETOF(NVOS32_PARAMETERS, data.ReacquireCompr.owner) ) ) ) + { + return NV_ERR_GENERIC; + } + + if (!(pArgs->data.ReleaseCompr.flags & NVOS32_RELEASE_COMPR_FLAGS_MEMORY_HANDLE_PROVIDED)) + return NV_ERR_INVALID_FLAGS; + + updateParams.bRelease = (pArgs->function == NVOS32_FUNCTION_RELEASE_COMPR); + + status = pContext->RmControl(pContext, pArgs->hRoot, pArgs->data.ReleaseCompr.hMemory, + NV0041_CTRL_CMD_UPDATE_SURFACE_COMPRESSION, + &updateParams, + sizeof(updateParams)); + + return status; +} + +static NV_STATUS +_nvos32FunctionGetMemAlignment +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS allocParams = {0}; + NV_STATUS status = NV_OK; + NvHandle hSubDevice; + NvBool bMustFreeSubDevice; + +#define GET_MEMORY_ALIGNMENT_PARAMS(_IN, _IN_OUT) \ + _IN_OUT(alignType, AllocHintAlignment.alignType) \ + _IN_OUT(alignAttr, AllocHintAlignment.alignAttr) \ + _IN_OUT(alignInputFlags, AllocHintAlignment.alignInputFlags) \ + _IN_OUT(alignSize, AllocHintAlignment.alignSize) \ + _IN_OUT(alignHeight, AllocHintAlignment.alignHeight) \ + _IN_OUT(alignWidth, AllocHintAlignment.alignWidth) \ + _IN_OUT(alignPitch, AllocHintAlignment.alignPitch) \ + _IN_OUT(alignPad, AllocHintAlignment.alignPad) \ + _IN_OUT(alignMask, AllocHintAlignment.alignMask) \ + _IN_OUT(alignOutputFlags[0], AllocHintAlignment.alignOutputFlags[0]) \ + _IN_OUT(alignOutputFlags[1], AllocHintAlignment.alignOutputFlags[1]) \ + _IN_OUT(alignOutputFlags[2], AllocHintAlignment.alignOutputFlags[2]) \ + _IN_OUT(alignOutputFlags[3], AllocHintAlignment.alignOutputFlags[3]) \ + _IN_OUT(alignBank[0], AllocHintAlignment.alignBank[0]) \ + _IN_OUT(alignBank[1], AllocHintAlignment.alignBank[1]) \ + _IN_OUT(alignBank[2], AllocHintAlignment.alignBank[2]) \ + _IN_OUT(alignBank[3], AllocHintAlignment.alignBank[3]) \ + _IN_OUT(alignKind, AllocHintAlignment.alignKind) \ + _IN_OUT(alignAdjust, AllocHintAlignment.alignAdjust) \ + _IN_OUT(alignAttr2, AllocHintAlignment.alignAttr2) + + GET_MEMORY_ALIGNMENT_PARAMS(_COPY_IN, _COPY_IN); + + // Param can be either a hSubdevice or hDevice, control is on subdevice + status = RmDeprecatedFindOrCreateSubDeviceHandle(pContext, pArgs->hRoot, pArgs->hObjectParent, + &hSubDevice, &bMustFreeSubDevice); + if (status != NV_OK) + return status; + + status = pContext->RmControl(pContext, pArgs->hRoot, hSubDevice, + NV2080_CTRL_CMD_FB_GET_MEM_ALIGNMENT, + &allocParams, + sizeof(allocParams)); + + + GET_MEMORY_ALIGNMENT_PARAMS(_NO_COPY, _COPY_OUT); + + if (bMustFreeSubDevice) + { + pContext->RmFree(pContext, pArgs->hRoot, hSubDevice); + } + + return status; +} + +static NV_STATUS +_nvos32FunctionHwAlloc +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS allocParams = {0}; + NV_STATUS status = NV_OK; + +#define ALLOC_HW_RESOURCES_PARAMS(_IN, _IN_OUT, _OUT) \ + _IN (owner, HwAlloc.allocOwner) \ + _IN_OUT(type, HwAlloc.allocType) \ + _IN (attr, HwAlloc.allocAttr) \ + _IN_OUT(flags, HwAlloc.allocInputFlags) \ + _IN_OUT(size, HwAlloc.allocSize) \ + _IN_OUT(height, HwAlloc.allocHeight) \ + _IN_OUT(width, HwAlloc.allocWidth) \ + _IN_OUT(pitch, HwAlloc.allocPitch) \ + _IN_OUT(alignment, HwAlloc.allocMask) \ + _IN_OUT(comprCovg, HwAlloc.allocComprCovg) \ + _IN_OUT(zcullCovg, HwAlloc.allocZcullCovg) \ + _IN_OUT(bindResultFunc, HwAlloc.bindResultFunc) \ + _IN_OUT(pHandle, HwAlloc.pHandle) \ + _OUT (attr, HwAlloc.retAttr) \ + _IN_OUT(kind, HwAlloc.kind) \ + _IN_OUT(osDeviceHandle, HwAlloc.osDeviceHandle) \ + _IN (attr2, HwAlloc.allocAttr2) \ + _OUT (attr2, HwAlloc.retAttr2) \ + _IN_OUT(compPageShift, HwAlloc.comprInfo.compPageShift) \ + _IN_OUT(compressedKind, HwAlloc.comprInfo.compressedKind) \ + _IN_OUT(compTagLineMin, HwAlloc.comprInfo.compTagLineMin) \ + _IN_OUT(compPageIndexLo, HwAlloc.comprInfo.compPageIndexLo) \ + _IN_OUT(compPageIndexHi, HwAlloc.comprInfo.compPageIndexHi) \ + _IN_OUT(compTagLineMultiplier, HwAlloc.comprInfo.compTagLineMultiplier) \ + _IN_OUT(uncompressedKind, HwAlloc.uncompressedKind) \ + _IN_OUT(allocAddr, HwAlloc.allocAddr) + + if (0 == (pArgs->data.AllocOsDesc.flags & NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED)) + { + // RmAlloc will generate a handle when hMemory = 0 + pArgs->data.HwAlloc.allochMemory = 0; + } + + ALLOC_HW_RESOURCES_PARAMS(_COPY_IN, _COPY_IN, _NO_COPY); + + status = pContext->RmAlloc(pContext, + pArgs->hRoot, + pArgs->hObjectParent, + &pArgs->data.HwAlloc.allochMemory, + NV01_MEMORY_HW_RESOURCES, + &allocParams); + + pArgs->data.HwAlloc.hResourceHandle = pArgs->data.HwAlloc.allochMemory; + + ALLOC_HW_RESOURCES_PARAMS(_NO_COPY, _COPY_OUT,_COPY_OUT); + + return status; +} + +static NV_STATUS +_nvos32FunctionHwFree +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NvHandle hMemory = pArgs->data.HwFree.hResourceHandle; + NvHandle hClient = pArgs->hRoot; + + return pContext->RmFree(pContext, hClient, hMemory); +} + +static NV_STATUS +_nvos32FunctionAllocOsDesc +( + DEPRECATED_CONTEXT *pContext, + NVOS32_PARAMETERS *pArgs +) +{ + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS allocParams = {0}; + NV_STATUS status = NV_OK; + + allocParams.type = pArgs->data.AllocOsDesc.type; + allocParams.flags = pArgs->data.AllocOsDesc.flags; + allocParams.attr = pArgs->data.AllocOsDesc.attr; + allocParams.attr2 = pArgs->data.AllocOsDesc.attr2; + allocParams.descriptor = pArgs->data.AllocOsDesc.descriptor; + allocParams.limit = pArgs->data.AllocOsDesc.limit; + allocParams.descriptorType = pArgs->data.AllocOsDesc.descriptorType; + + if (0 == (pArgs->data.AllocOsDesc.flags & NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED)) + { + // RmAlloc will generate a handle when hMemory = 0 + pArgs->data.AllocOsDesc.hMemory = 0; + } + + status = pContext->RmAlloc(pContext, + pArgs->hRoot, + pArgs->hObjectParent, + &pArgs->data.AllocOsDesc.hMemory, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + &allocParams); + + return status; +} diff --git a/src/nvidia/interface/deprecated/rmapi_gss_legacy_control.c b/src/nvidia/interface/deprecated/rmapi_gss_legacy_control.c new file mode 100644 index 000000000..ba913d169 --- /dev/null +++ b/src/nvidia/interface/deprecated/rmapi_gss_legacy_control.c @@ -0,0 +1,117 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "deprecated/rmapi_deprecated.h" + +#include "finn_rm_api.h" +#include "gpu/gpu.h" +#include "core/locks.h" + +/*! + * Some clients are still making these legacy GSS controls. We no longer support these in RM, + * but until all the numerous tools are updated to use alternative APIs, just forward all of them + * to GSP and let it deal with what is or isn't valid. +*/ +NV_STATUS RmGssLegacyRpcCmd +( + API_SECURITY_INFO *pSecInfo, + DEPRECATED_CONTEXT *pContextInternal, + NVOS54_PARAMETERS *pArgs +) +{ + OBJGPU *pGpu = NULL; + RsClient *pClient = NULL; + NV_STATUS status = NV_OK; + GPU_MASK gpuMaskRelease = 0; + void *pKernelParams = NULL; + + NV_ASSERT_OR_RETURN((pArgs->cmd & RM_GSS_LEGACY_MASK), + NV_ERR_INVALID_STATE); + + if (((pArgs->cmd & RM_GSS_LEGACY_MASK_PRIVILEGED) == RM_GSS_LEGACY_MASK_PRIVILEGED) && + (pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + NV_CHECK_OK_OR_ELSE(status, + LEVEL_ERROR, + serverGetClientUnderLock(&g_resServ, pArgs->hClient, &pClient), + return NV_ERR_INVALID_ARGUMENT); + + NV_CHECK_OK_OR_ELSE(status, + LEVEL_ERROR, + gpuGetByHandle(pClient, pArgs->hObject, NULL, &pGpu), + return NV_ERR_INVALID_ARGUMENT); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + if (pSecInfo->paramLocation == PARAM_LOCATION_USER) + { + pKernelParams = portMemAllocNonPaged(pArgs->paramsSize); + if (pKernelParams == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = portMemExCopyFromUser(pArgs->params, pKernelParams, pArgs->paramsSize); + if (status != NV_OK) + goto done; + } + else + { + pKernelParams = (void*)pArgs->params; + } + + status = rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_RPC, + &gpuMaskRelease); + if (status != NV_OK) + goto done; + + status = pRmApi->Control(pRmApi, + pArgs->hClient, + pArgs->hObject, + pArgs->cmd, + pKernelParams, + pArgs->paramsSize); + +done: + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + + if (pSecInfo->paramLocation == PARAM_LOCATION_USER) + { + if (status == NV_OK) + { + status = portMemExCopyToUser(pKernelParams, pArgs->params, pArgs->paramsSize); + } + portMemFree(pKernelParams); + } + + return status; +} diff --git a/src/nvidia/interface/nvRmReg.h b/src/nvidia/interface/nvRmReg.h new file mode 100644 index 000000000..00ceea0bf --- /dev/null +++ b/src/nvidia/interface/nvRmReg.h @@ -0,0 +1,2 @@ +// Files should include nvrm_registry.h directly +#include "nvrm_registry.h" diff --git a/src/nvidia/interface/nv_firmware_types.h b/src/nvidia/interface/nv_firmware_types.h new file mode 100644 index 000000000..90dd93f1e --- /dev/null +++ b/src/nvidia/interface/nv_firmware_types.h @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NV_FIRMWARE_TYPES_H +#define NV_FIRMWARE_TYPES_H + +typedef enum { + NV_FIRMWARE_MODE_DISABLED = 0, + NV_FIRMWARE_MODE_ENABLED = 1, + NV_FIRMWARE_MODE_DEFAULT = 2, + NV_FIRMWARE_MODE_INVALID = 0xFF +} NvFirmwareMode; + +#endif // NV_FIRMWARE_TYPES_H diff --git a/src/nvidia/interface/nv_uvm_types.h b/src/nvidia/interface/nv_uvm_types.h new file mode 100644 index 000000000..c0df2d4d0 --- /dev/null +++ b/src/nvidia/interface/nv_uvm_types.h @@ -0,0 +1,896 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// This file provides common types for both UVM driver and RM's UVM interface. +// + +#ifndef _NV_UVM_TYPES_H_ +#define _NV_UVM_TYPES_H_ + +#include "nvtypes.h" +#include "nvstatus.h" +#include "nvgputypes.h" +#include "nvCpuUuid.h" + + +// +// Default Page Size if left "0" because in RM BIG page size is default & there +// are multiple BIG page sizes in RM. These defines are used as flags to "0" +// should be OK when user is not sure which pagesize allocation it wants +// +#define UVM_PAGE_SIZE_DEFAULT 0x0 +#define UVM_PAGE_SIZE_4K 0x1000 +#define UVM_PAGE_SIZE_64K 0x10000 +#define UVM_PAGE_SIZE_128K 0x20000 +#define UVM_PAGE_SIZE_2M 0x200000 +#define UVM_PAGE_SIZE_512M 0x20000000 + +// +// When modifying flags, make sure they are compatible with the mirrored +// PMA_* flags in phys_mem_allocator.h. +// +// Input flags +#define UVM_PMA_ALLOCATE_DONT_EVICT NVBIT(0) +#define UVM_PMA_ALLOCATE_PINNED NVBIT(1) +#define UVM_PMA_ALLOCATE_SPECIFY_MINIMUM_SPEED NVBIT(2) +#define UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE NVBIT(3) +#define UVM_PMA_ALLOCATE_SPECIFY_REGION_ID NVBIT(4) +#define UVM_PMA_ALLOCATE_PREFER_SLOWEST NVBIT(5) +#define UVM_PMA_ALLOCATE_CONTIGUOUS NVBIT(6) +#define UVM_PMA_ALLOCATE_PERSISTENT NVBIT(7) +#define UVM_PMA_ALLOCATE_PROTECTED_REGION NVBIT(8) +#define UVM_PMA_ALLOCATE_FORCE_ALIGNMENT NVBIT(9) +#define UVM_PMA_ALLOCATE_NO_ZERO NVBIT(10) +#define UVM_PMA_ALLOCATE_TURN_BLACKLIST_OFF NVBIT(11) +#define UVM_PMA_ALLOCATE_ALLOW_PARTIAL NVBIT(12) + +// Output flags +#define UVM_PMA_ALLOCATE_RESULT_IS_ZERO NVBIT(0) + +// Input flags to pmaFree +#define UVM_PMA_FREE_IS_ZERO NVBIT(0) + +// +// Indicate that the PMA operation is being done from one of the PMA eviction +// callbacks. +// +// Notably this flag is currently used only by the UVM/RM interface and not +// mirrored in PMA. +// +#define UVM_PMA_CALLED_FROM_PMA_EVICTION 16384 + +#define UVM_UUID_LEN 16 +#define UVM_SW_OBJ_SUBCHANNEL 5 + +typedef unsigned long long UvmGpuPointer; + +// +// The following typedefs serve to explain the resources they point to. +// The actual resources remain RM internal and not exposed. +// +typedef struct uvmGpuSession_tag *uvmGpuSessionHandle; // gpuSessionHandle +typedef struct uvmGpuDevice_tag *uvmGpuDeviceHandle; // gpuDeviceHandle +typedef struct uvmGpuAddressSpace_tag *uvmGpuAddressSpaceHandle; // gpuAddressSpaceHandle +typedef struct uvmGpuChannel_tag *uvmGpuChannelHandle; // gpuChannelHandle +typedef struct uvmGpuCopyEngine_tag *uvmGpuCopyEngineHandle; // gpuObjectHandle + +typedef struct UvmGpuMemoryInfo_tag +{ + // Out: Memory layout. + NvU32 kind; + + // Out: Set to TRUE, if the allocation is in sysmem. + NvBool sysmem; + + // Out: Set to TRUE, if the allocation is a constructed + // under a Device or Subdevice. + // All permutations of sysmem and deviceDescendant are valid. + // !sysmem && !deviceDescendant implies a fabric allocation. + NvBool deviceDescendant; + + // Out: Page size associated with the phys alloc. + NvU32 pageSize; + + // Out: Set to TRUE, if the allocation is contiguous. + NvBool contig; + + // Out: Starting Addr if the allocation is contiguous. + // This is only valid if contig is NV_TRUE. + NvU64 physAddr; + + // Out: Total size of the allocation. + NvU64 size; + + // Out: Uuid of the GPU to which the allocation belongs. + // This is only valid if deviceDescendant is NV_TRUE. + // Note: If the allocation is owned by a device in + // an SLI group and the allocation is broadcast + // across the SLI group, this UUID will be any one + // of the subdevices in the SLI group. + NvProcessorUuid uuid; +} UvmGpuMemoryInfo; + +// Some resources must share the same virtual mappings across channels. A mapped +// resource must be shared by a channel iff: +// +// 1) The channel belongs to a TSG (UvmGpuChannelInstanceInfo::bTsgChannel is +// NV_TRUE). +// +// 2) The channel is in the same TSG as all other channels sharing that mapping +// (UvmGpuChannelInstanceInfo::tsgId matches among channels). +// +// 3) The channel is in the same GPU address space as the other channels +// sharing that mapping. +// +// 4) The resource handle(s) match those of the shared mapping +// (UvmGpuChannelResourceInfo::resourceDescriptor and +// UvmGpuChannelResourceInfo::resourceId). +typedef struct UvmGpuChannelResourceInfo_tag +{ + // Out: Ptr to the RM memDesc of the channel resource. + NvP64 resourceDescriptor; + + // Out: RM ID of the channel resource. + NvU32 resourceId; + + // Out: Alignment needed for the resource allocation. + NvU64 alignment; + + // Out: Info about the resource allocation. + UvmGpuMemoryInfo resourceInfo; +} UvmGpuChannelResourceInfo; + +typedef struct UvmGpuPagingChannelInfo_tag +{ + // Pointer to a shadown buffer mirroring the contents of the error notifier + // for the paging channel + NvNotification *shadowErrorNotifier; +} UvmGpuPagingChannelInfo; + +typedef enum +{ + UVM_GPU_CHANNEL_ENGINE_TYPE_GR = 1, + UVM_GPU_CHANNEL_ENGINE_TYPE_CE = 2, + UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2 = 3, +} UVM_GPU_CHANNEL_ENGINE_TYPE; + +#define UVM_GPU_CHANNEL_MAX_RESOURCES 13 + +typedef struct UvmGpuChannelInstanceInfo_tag +{ + // Out: Starting address of the channel instance. + NvU64 base; + + // Out: Set to NV_TRUE, if the instance is in sysmem. + // Set to NV_FALSE, if the instance is in vidmem. + NvBool sysmem; + + // Out: Hardware runlist ID. + NvU32 runlistId; + + // Out: Hardware channel ID. + NvU32 chId; + + // Out: NV_TRUE if the channel belongs to a subcontext or NV_FALSE if it + // belongs to a regular context. + NvBool bInSubctx; + + // Out: ID of the subcontext to which the channel belongs. + NvU32 subctxId; + + // Out: Whether the channel belongs to a TSG or not + NvBool bTsgChannel; + + // Out: ID of the TSG to which the channel belongs + NvU32 tsgId; + + // Out: Maximum number of subcontexts in the TSG to which the channel belongs + NvU32 tsgMaxSubctxCount; + + // Out: Info of channel resources associated with the channel. + UvmGpuChannelResourceInfo resourceInfo[UVM_GPU_CHANNEL_MAX_RESOURCES]; + + // Out: Number of valid entries in resourceInfo array. + NvU32 resourceCount; + + // Out: Type of the engine the channel is bound to + NvU32 channelEngineType; + + // Out: Channel handle required to ring the doorbell + NvU32 workSubmissionToken; + + // Out: Address of the doorbell + volatile NvU32 *workSubmissionOffset; + + // Out: Channel handle to be used in the CLEAR_FAULTED method + NvU32 clearFaultedToken; + + // Out: Address of the NV_CHRAM_CHANNEL register required to clear the + // ENG_FAULTED/PBDMA_FAULTED bits after servicing non-replayable faults on + // Ampere+ GPUs + volatile NvU32 *pChramChannelRegister; + + // Out: SMC engine id to which the GR channel is bound, or zero if the GPU + // does not support SMC or it is a CE channel + NvU32 smcEngineId; + + // Out: Start of the VEID range assigned to the SMC engine the GR channel + // is bound to, or zero if the GPU does not support SMC or it is a CE + // channel + NvU32 smcEngineVeIdOffset; +} UvmGpuChannelInstanceInfo; + +typedef struct UvmGpuChannelResourceBindParams_tag +{ + // In: RM ID of the channel resource. + NvU32 resourceId; + + // In: Starting VA at which the channel resource is mapped. + NvU64 resourceVa; +} UvmGpuChannelResourceBindParams; + +typedef struct UvmGpuChannelInfo_tag +{ + volatile unsigned *gpGet; + volatile unsigned *gpPut; + UvmGpuPointer *gpFifoEntries; + unsigned numGpFifoEntries; + unsigned channelClassNum; + + // The errorNotifier is filled out when the channel hits an RC error. + NvNotification *errorNotifier; + + NvU32 hwRunlistId; + NvU32 hwChannelId; + + volatile unsigned *dummyBar1Mapping; + + // These values are filled by nvUvmInterfaceCopyEngineAlloc. The work + // submission token requires the channel to be bound to a runlist and that + // happens after CE allocation. + volatile NvU32 *workSubmissionOffset; + + // To be deprecated. See pWorkSubmissionToken below. + NvU32 workSubmissionToken; + + // + // This is the memory location where the most recently updated work + // submission token for this channel will be written to. After submitting + // new work and updating GP_PUT with the appropriate fence, the token must + // be read from this location before writing it to the workSubmissionOffset + // to kick off the new work. + // + volatile NvU32 *pWorkSubmissionToken; +} UvmGpuChannelInfo; + +typedef enum +{ + // This value must be passed by Pascal and pre-Pascal GPUs for those + // allocations for which a specific location cannot be enforced. + UVM_BUFFER_LOCATION_DEFAULT = 0, + + UVM_BUFFER_LOCATION_SYS = 1, + UVM_BUFFER_LOCATION_VID = 2, +} UVM_BUFFER_LOCATION; + +typedef struct UvmGpuChannelAllocParams_tag +{ + NvU32 numGpFifoEntries; + + // The next two fields store UVM_BUFFER_LOCATION values + NvU32 gpFifoLoc; + NvU32 gpPutLoc; + + // Index of the engine the channel will be bound to + // ignored if engineType is anything other than UVM_GPU_CHANNEL_ENGINE_TYPE_CE + NvU32 engineIndex; + + // interpreted as UVM_GPU_CHANNEL_ENGINE_TYPE + NvU32 engineType; +} UvmGpuChannelAllocParams; + +typedef struct UvmGpuPagingChannelAllocParams_tag +{ + // Index of the LCE engine the channel will be bound to, a zero-based offset + // from NV2080_ENGINE_TYPE_COPY0. + NvU32 engineIndex; +} UvmGpuPagingChannelAllocParams; + +// The max number of Copy Engines supported by a GPU. +// The gpu ops build has a static assert that this is the correct number. +#define UVM_COPY_ENGINE_COUNT_MAX 10 + +typedef struct +{ + // True if the CE is supported at all + NvBool supported:1; + + // True if the CE is synchronous with GR + NvBool grce:1; + + // True if the CE shares physical CEs with any other CE + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it + // again each time a GPU is registered. + NvBool shared:1; + + // True if the CE can give enhanced performance for SYSMEM reads over other CEs + NvBool sysmemRead:1; + + // True if the CE can give enhanced performance for SYSMEM writes over other CEs + NvBool sysmemWrite:1; + + // True if the CE can be used for SYSMEM transactions + NvBool sysmem:1; + + // True if the CE can be used for P2P transactions using NVLINK + NvBool nvlinkP2p:1; + + // True if the CE can be used for P2P transactions + NvBool p2p:1; + + // Mask of physical CEs assigned to this LCE + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it + // again each time a GPU is registered. + NvU32 cePceMask; +} UvmGpuCopyEngineCaps; + +typedef struct UvmGpuCopyEnginesCaps_tag +{ + // Supported CEs may not be contiguous + UvmGpuCopyEngineCaps copyEngineCaps[UVM_COPY_ENGINE_COUNT_MAX]; +} UvmGpuCopyEnginesCaps; + +typedef enum +{ + UVM_LINK_TYPE_NONE, + UVM_LINK_TYPE_PCIE, + UVM_LINK_TYPE_NVLINK_1, + UVM_LINK_TYPE_NVLINK_2, + UVM_LINK_TYPE_NVLINK_3, +} UVM_LINK_TYPE; + +typedef struct UvmGpuCaps_tag +{ + NvU32 sysmemLink; // UVM_LINK_TYPE + NvU32 sysmemLinkRateMBps; // See UvmGpuP2PCapsParams::totalLinkLineRateMBps + NvBool numaEnabled; + NvU32 numaNodeId; + + // On ATS systems, GPUs connected to different CPU sockets can have peer + // traffic. They are called indirect peers. However, indirect peers are + // mapped using sysmem aperture. In order to disambiguate the location of a + // specific memory address, each GPU maps its memory to a different window + // in the System Physical Address (SPA) space. The following fields contain + // the base + size of such window for the GPU. systemMemoryWindowSize + // different than 0 indicates that the window is valid. + // + // - If the window is valid, then we can map GPU memory to the CPU as + // cache-coherent by adding the GPU address to the window start. + // - If numaEnabled is NV_TRUE, then we can also convert the system + // addresses of allocated GPU memory to struct pages. + // + // TODO: Bug 1986868: fix window start computation for SIMICS + NvU64 systemMemoryWindowStart; + NvU64 systemMemoryWindowSize; + + // This tells if the GPU is connected to NVSwitch. On systems with NVSwitch + // all GPUs are connected to it. If connectedToSwitch is NV_TRUE, + // nvswitchMemoryWindowStart tells the base address for the GPU in the + // NVSwitch address space. It is used when creating PTEs of memory mappings + // to NVSwitch peers. + NvBool connectedToSwitch; + NvU64 nvswitchMemoryWindowStart; +} UvmGpuCaps; + +typedef struct UvmGpuAddressSpaceInfo_tag +{ + NvU32 bigPageSize; + + NvBool atsEnabled; + + // Mapped registers that contain the current GPU time + volatile NvU32 *time0Offset; + volatile NvU32 *time1Offset; + + // Maximum number of subcontexts supported under this GPU address space + NvU32 maxSubctxCount; + + NvBool smcEnabled; + + NvU32 smcSwizzId; + + NvU32 smcGpcCount; +} UvmGpuAddressSpaceInfo; + +typedef struct UvmGpuAllocInfo_tag +{ + NvU64 rangeBegin; // Allocation will be made between + NvU64 rangeEnd; // rangeBegin & rangeEnd both included + NvU64 gpuPhysOffset; // Returns gpuPhysOffset if contiguous requested + NvU32 pageSize; // default is RM big page size - 64K or 128 K" else use 4K or 2M + NvU64 alignment; // Alignment of allocation + NvBool bContiguousPhysAlloc; // Flag to request contiguous physical allocation + NvBool bMemGrowsDown; // Causes RM to reserve physical heap from top of FB + NvBool bPersistentVidmem; // Causes RM to allocate persistent video memory + NvHandle hPhysHandle; // Handle for phys allocation either provided or retrieved +} UvmGpuAllocInfo; + +typedef enum +{ + UVM_VIRT_MODE_NONE = 0, // Baremetal or passthrough virtualization + UVM_VIRT_MODE_LEGACY = 1, // Virtualization without SRIOV support + UVM_VIRT_MODE_SRIOV_HEAVY = 2, // Virtualization with SRIOV Heavy configured + UVM_VIRT_MODE_SRIOV_STANDARD = 3, // Virtualization with SRIOV Standard configured + UVM_VIRT_MODE_COUNT = 4, +} UVM_VIRT_MODE; + +// !!! The following enums (with UvmRm prefix) are defined and documented in +// mm/uvm/interface/uvm_types.h and must be mirrored. Please refer to that file +// for more details. + +// UVM GPU mapping types +typedef enum +{ + UvmRmGpuMappingTypeDefault = 0, + UvmRmGpuMappingTypeReadWriteAtomic = 1, + UvmRmGpuMappingTypeReadWrite = 2, + UvmRmGpuMappingTypeReadOnly = 3, + UvmRmGpuMappingTypeCount = 4 +} UvmRmGpuMappingType; + +// UVM GPU caching types +typedef enum +{ + UvmRmGpuCachingTypeDefault = 0, + UvmRmGpuCachingTypeForceUncached = 1, + UvmRmGpuCachingTypeForceCached = 2, + UvmRmGpuCachingTypeCount = 3 +} UvmRmGpuCachingType; + +// UVM GPU format types +typedef enum { + UvmRmGpuFormatTypeDefault = 0, + UvmRmGpuFormatTypeBlockLinear = 1, + UvmRmGpuFormatTypeCount = 2 +} UvmRmGpuFormatType; + +// UVM GPU Element bits types +typedef enum { + UvmRmGpuFormatElementBitsDefault = 0, + UvmRmGpuFormatElementBits8 = 1, + UvmRmGpuFormatElementBits16 = 2, + // Cuda does not support 24-bit width + UvmRmGpuFormatElementBits32 = 4, + UvmRmGpuFormatElementBits64 = 5, + UvmRmGpuFormatElementBits128 = 6, + UvmRmGpuFormatElementBitsCount = 7 +} UvmRmGpuFormatElementBits; + +// UVM GPU Compression types +typedef enum { + UvmRmGpuCompressionTypeDefault = 0, + UvmRmGpuCompressionTypeEnabledNoPlc = 1, + UvmRmGpuCompressionTypeCount = 2 +} UvmRmGpuCompressionType; + +typedef struct UvmGpuExternalMappingInfo_tag +{ + // In: GPU caching ability. + UvmRmGpuCachingType cachingType; + + // In: Virtual permissions. + UvmRmGpuMappingType mappingType; + + // In: RM virtual mapping memory format + UvmRmGpuFormatType formatType; + + // In: RM virtual mapping element bits + UvmRmGpuFormatElementBits elementBits; + + // In: RM virtual compression type + UvmRmGpuCompressionType compressionType; + + // In: Size of the buffer to store PTEs (in bytes). + NvU64 pteBufferSize; + + // In: Pointer to a buffer to store PTEs. + // Out: The interface will fill the buffer with PTEs + NvU64 *pteBuffer; + + // Out: Number of PTEs filled in to the buffer. + NvU64 numWrittenPtes; + + // Out: Number of PTEs remaining to be filled + // if the buffer is not sufficient to accommodate + // requested PTEs. + NvU64 numRemainingPtes; + + // Out: PTE size (in bytes) + NvU32 pteSize; +} UvmGpuExternalMappingInfo; + +typedef struct UvmGpuP2PCapsParams_tag +{ + // Out: peerId[i] contains gpu[i]'s peer id of gpu[1 - i]. Only defined if + // the GPUs are direct peers. + NvU32 peerIds[2]; + + // Out: UVM_LINK_TYPE + NvU32 p2pLink; + + // Out: optimalNvlinkWriteCEs[i] contains gpu[i]'s optimal CE for writing to + // gpu[1 - i]. The CE indexes are valid only if the GPUs are NVLink peers. + // + // The value returned by RM for this field may change when a GPU is + // registered with RM for the first time, so UVM needs to query it again + // each time a GPU is registered. + NvU32 optimalNvlinkWriteCEs[2]; + + // Out: Maximum unidirectional bandwidth between the peers in megabytes per + // second, not taking into account the protocols overhead. The reported + // bandwidth for indirect peers is zero. + NvU32 totalLinkLineRateMBps; + + // Out: True if the peers have a indirect link to communicate. On P9 + // systems, this is true if peers are connected to different NPUs that + // forward the requests between them. + NvU32 indirectAccess : 1; +} UvmGpuP2PCapsParams; + +// Platform-wide information +typedef struct UvmPlatformInfo_tag +{ + // Out: ATS (Address Translation Services) is supported + NvBool atsSupported; + +} UvmPlatformInfo; + +typedef struct UvmGpuClientInfo_tag +{ + NvHandle hClient; + + NvHandle hSmcPartRef; +} UvmGpuClientInfo; + +#define UVM_GPU_NAME_LENGTH 0x40 + +typedef struct UvmGpuInfo_tag +{ + // Printable gpu name + char name[UVM_GPU_NAME_LENGTH]; + + // Uuid of this gpu + NvProcessorUuid uuid; + + // Gpu architecture; NV2080_CTRL_MC_ARCH_INFO_ARCHITECTURE_* + NvU32 gpuArch; + + // Gpu implementation; NV2080_CTRL_MC_ARCH_INFO_IMPLEMENTATION_* + NvU32 gpuImplementation; + + // Host (gpfifo) class; *_CHANNEL_GPFIFO_*, e.g. KEPLER_CHANNEL_GPFIFO_A + NvU32 hostClass; + + // Copy engine (dma) class; *_DMA_COPY_*, e.g. KEPLER_DMA_COPY_A + NvU32 ceClass; + + // Compute class; *_COMPUTE_*, e.g. KEPLER_COMPUTE_A + NvU32 computeClass; + + // Set if GPU supports TCC Mode & is in TCC mode. + NvBool gpuInTcc; + + // Number of subdevices in SLI group. + NvU32 subdeviceCount; + + // Virtualization mode of this gpu. + NvU32 virtMode; // UVM_VIRT_MODE + + // NV_TRUE if this is a simulated/emulated GPU. NV_FALSE, otherwise. + NvBool isSimulated; + + // Number of GPCs + // If SMC is enabled, this is the currently configured number of GPCs for + // the given partition (also see the smcSwizzId field below). + NvU32 gpcCount; + + // Maximum number of GPCs; NV_SCAL_LITTER_NUM_GPCS + // This number is independent of the partition configuration, and can be + // used to conservatively size GPU-global constructs. + NvU32 maxGpcCount; + + // Number of TPCs + NvU32 tpcCount; + + // Maximum number of TPCs per GPC + NvU32 maxTpcPerGpcCount; + + // NV_TRUE if SMC is enabled on this GPU. + NvBool smcEnabled; + + // SMC partition ID (unique per GPU); note: valid when first looked up in + // nvUvmInterfaceGetGpuInfo(), but not guaranteed to remain valid. + // nvUvmInterfaceDeviceCreate() re-verifies the swizzId and fails if it is + // no longer valid. + NvU32 smcSwizzId; + + UvmGpuClientInfo smcUserClientInfo; + +} UvmGpuInfo; + +typedef struct UvmGpuFbInfo_tag +{ + // Max physical address that can be allocated by UVM. This excludes internal + // RM regions that are not registered with PMA either. + NvU64 maxAllocatableAddress; + + NvU32 heapSize; // RAM in KB available for user allocations + NvU32 reservedHeapSize; // RAM in KB reserved for internal RM allocation + NvBool bZeroFb; // Zero FB mode enabled. +} UvmGpuFbInfo; + +typedef struct UvmGpuEccInfo_tag +{ + unsigned eccMask; + unsigned eccOffset; + void *eccReadLocation; + NvBool *eccErrorNotifier; + NvBool bEccEnabled; +} UvmGpuEccInfo; + +typedef struct UvmPmaAllocationOptions_tag +{ + NvU32 flags; + NvU32 minimumSpeed; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_MININUM_SPEED + NvU64 physBegin, physEnd; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE + NvU32 regionId; // valid if flags & UVM_PMA_ALLOCATE_SPECIFY_REGION_ID + NvU64 alignment; // valid if flags & UVM_PMA_ALLOCATE_FORCE_ALIGNMENT + NvLength numPagesAllocated; // valid if flags & UVM_PMA_ALLOCATE_ALLOW_PARTIAL + + NvU32 resultFlags; // valid if the allocation function returns NV_OK +} UvmPmaAllocationOptions; + +// +// Mirrored in PMA (PMA_STATS) +// +typedef struct UvmPmaStatistics_tag +{ + volatile NvU64 numPages2m; // PMA-wide 2MB pages count across all regions + volatile NvU64 numFreePages64k; // PMA-wide free 64KB page count across all regions + volatile NvU64 numFreePages2m; // PMA-wide free 2MB pages count across all regions +} UvmPmaStatistics; + +/******************************************************************************* + uvmEventSuspend + This function will be called by the GPU driver to signal to UVM that the + system is about to enter a sleep state. When it is called, the + following assumptions/guarantees are valid/made: + + * User channels have been preempted and disabled + * UVM channels are still running normally and will continue to do + so until after this function returns control + * User threads are still running, but can no longer issue system + system calls to the GPU driver + * Until exit from this function, UVM is allowed to make full use of + the GPUs under its control, as well as of the GPU driver + + Upon return from this function, UVM may not access GPUs under its control + until the GPU driver calls uvmEventResume(). It may still receive + calls to uvmEventIsrTopHalf() during this time, which it should return + NV_ERR_NO_INTR_PENDING from. It will not receive any other calls. +*/ +typedef NV_STATUS (*uvmEventSuspend_t) (void); + +/******************************************************************************* + uvmEventResume + This function will be called by the GPU driver to signal to UVM that the + system has exited a previously entered sleep state. When it is called, + the following assumptions/guarantees are valid/made: + + * UVM is again allowed to make full use of the GPUs under its + control, as well as of the GPU driver + * UVM channels are running normally + * User channels are still preempted and disabled + * User threads are again running, but still cannot issue system + calls to the GPU driver, nor submit new work + + Upon return from this function, UVM is expected to be fully functional. +*/ +typedef NV_STATUS (*uvmEventResume_t) (void); + +/******************************************************************************* + uvmEventStartDevice + This function will be called by the GPU driver once it has finished its + initialization to tell the UVM driver that this GPU has come up. +*/ +typedef NV_STATUS (*uvmEventStartDevice_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventStopDevice + This function will be called by the GPU driver to let UVM know that a GPU + is going down. +*/ +typedef NV_STATUS (*uvmEventStopDevice_t) (const NvProcessorUuid *pGpuUuidStruct); + +/******************************************************************************* + uvmEventIsrTopHalf_t + This function will be called by the GPU driver to let UVM know + that an interrupt has occurred. + + Returns: + NV_OK if the UVM driver handled the interrupt + NV_ERR_NO_INTR_PENDING if the interrupt is not for the UVM driver +*/ +#if defined (__linux__) +typedef NV_STATUS (*uvmEventIsrTopHalf_t) (const NvProcessorUuid *pGpuUuidStruct); +#else +typedef void (*uvmEventIsrTopHalf_t) (void); +#endif + +struct UvmOpsUvmEvents +{ + uvmEventSuspend_t suspend; + uvmEventResume_t resume; + uvmEventStartDevice_t startDevice; + uvmEventStopDevice_t stopDevice; + uvmEventIsrTopHalf_t isrTopHalf; +}; + +typedef struct UvmGpuFaultInfo_tag +{ + struct + { + // Register mappings obtained from RM + volatile NvU32* pFaultBufferGet; + volatile NvU32* pFaultBufferPut; + // Note: this variable is deprecated since buffer overflow is not a separate + // register from future chips. + volatile NvU32* pFaultBufferInfo; + volatile NvU32* pPmcIntr; + volatile NvU32* pPmcIntrEnSet; + volatile NvU32* pPmcIntrEnClear; + volatile NvU32* pPrefetchCtrl; + NvU32 replayableFaultMask; + // fault buffer cpu mapping and size + void* bufferAddress; + NvU32 bufferSize; + } replayable; + struct + { + // Shadow buffer for non-replayable faults on cpu memory. Resman copies + // here the non-replayable faults that need to be handled by UVM + void* shadowBufferAddress; + + // Execution context for the queue associated with the fault buffer + void* shadowBufferContext; + + // Fault buffer size + NvU32 bufferSize; + + // Preallocated stack for functions called from the UVM isr top half + void *isr_sp; + + // Preallocated stack for functions called from the UVM isr bottom half + void *isr_bh_sp; + } nonReplayable; + NvHandle faultBufferHandle; +} UvmGpuFaultInfo; + +typedef struct UvmGpuPagingChannel_tag +{ + struct gpuDevice *device; + NvNotification *errorNotifier; + NvHandle channelHandle; + NvHandle errorNotifierHandle; + void *pushStreamSp; +} UvmGpuPagingChannel, *UvmGpuPagingChannelHandle; + +typedef struct UvmGpuAccessCntrInfo_tag +{ + // Register mappings obtained from RM + // pointer to the Get register for the access counter buffer + volatile NvU32* pAccessCntrBufferGet; + // pointer to the Put register for the access counter buffer + volatile NvU32* pAccessCntrBufferPut; + // pointer to the Full register for the access counter buffer + volatile NvU32* pAccessCntrBufferFull; + // pointer to the hub interrupt + volatile NvU32* pHubIntr; + // pointer to interrupt enable register + volatile NvU32* pHubIntrEnSet; + // pointer to interrupt disable register + volatile NvU32* pHubIntrEnClear; + // mask for the access counter buffer + NvU32 accessCounterMask; + // access counter buffer cpu mapping and size + void* bufferAddress; + NvU32 bufferSize; + NvHandle accessCntrBufferHandle; + + // The Notification address in the access counter notification msg does not + // contain the correct upper bits 63-47 for GPA-based notifications. RM + // provides us with the correct offset to be added. + // See Bug 1803015 + NvU64 baseDmaSysmemAddr; +} UvmGpuAccessCntrInfo; + +typedef enum +{ + UVM_ACCESS_COUNTER_GRANULARITY_64K = 1, + UVM_ACCESS_COUNTER_GRANULARITY_2M = 2, + UVM_ACCESS_COUNTER_GRANULARITY_16M = 3, + UVM_ACCESS_COUNTER_GRANULARITY_16G = 4, +} UVM_ACCESS_COUNTER_GRANULARITY; + +typedef enum +{ + UVM_ACCESS_COUNTER_USE_LIMIT_NONE = 1, + UVM_ACCESS_COUNTER_USE_LIMIT_QTR = 2, + UVM_ACCESS_COUNTER_USE_LIMIT_HALF = 3, + UVM_ACCESS_COUNTER_USE_LIMIT_FULL = 4, +} UVM_ACCESS_COUNTER_USE_LIMIT; + +typedef struct UvmGpuAccessCntrConfig_tag +{ + NvU32 mimcGranularity; + + NvU32 momcGranularity; + + NvU32 mimcUseLimit; + + NvU32 momcUseLimit; + + NvU32 threshold; +} UvmGpuAccessCntrConfig; + +typedef UvmGpuChannelInfo gpuChannelInfo; +typedef UvmGpuChannelAllocParams gpuChannelAllocParams; +typedef UvmGpuCaps gpuCaps; +typedef UvmGpuCopyEngineCaps gpuCeCaps; +typedef UvmGpuCopyEnginesCaps gpuCesCaps; +typedef UvmGpuP2PCapsParams getP2PCapsParams; +typedef UvmGpuAddressSpaceInfo gpuAddressSpaceInfo; +typedef UvmGpuAllocInfo gpuAllocInfo; +typedef UvmGpuInfo gpuInfo; +typedef UvmGpuClientInfo gpuClientInfo; +typedef UvmGpuAccessCntrInfo gpuAccessCntrInfo; +typedef UvmGpuAccessCntrConfig gpuAccessCntrConfig; +typedef UvmGpuFaultInfo gpuFaultInfo; +typedef UvmGpuMemoryInfo gpuMemoryInfo; +typedef UvmGpuExternalMappingInfo gpuExternalMappingInfo; +typedef UvmGpuChannelResourceInfo gpuChannelResourceInfo; +typedef UvmGpuChannelInstanceInfo gpuChannelInstanceInfo; +typedef UvmGpuChannelResourceBindParams gpuChannelResourceBindParams; +typedef UvmGpuFbInfo gpuFbInfo; +typedef UvmGpuEccInfo gpuEccInfo; +typedef UvmGpuPagingChannel *gpuPagingChannelHandle; +typedef UvmGpuPagingChannelInfo gpuPagingChannelInfo; +typedef UvmGpuPagingChannelAllocParams gpuPagingChannelAllocParams; +typedef UvmPmaAllocationOptions gpuPmaAllocationOptions; + +#endif // _NV_UVM_TYPES_H_ diff --git a/src/nvidia/interface/nvacpitypes.h b/src/nvidia/interface/nvacpitypes.h new file mode 100644 index 000000000..4cb61150c --- /dev/null +++ b/src/nvidia/interface/nvacpitypes.h @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVACPITYPES_H_ +#define _NVACPITYPES_H_ + +typedef enum _ACPI_DSM_FUNCTION +{ + ACPI_DSM_FUNCTION_NBSI = 0, + ACPI_DSM_FUNCTION_NVHG, + ACPI_DSM_FUNCTION_MXM, + ACPI_DSM_FUNCTION_NBCI, + ACPI_DSM_FUNCTION_NVOP, + ACPI_DSM_FUNCTION_PCFG, + ACPI_DSM_FUNCTION_GPS_2X, + ACPI_DSM_FUNCTION_JT, + ACPI_DSM_FUNCTION_PEX, + ACPI_DSM_FUNCTION_NVPCF_2X, + ACPI_DSM_FUNCTION_GPS, + ACPI_DSM_FUNCTION_NVPCF, + // insert new DSM Functions here + ACPI_DSM_FUNCTION_COUNT, + ACPI_DSM_FUNCTION_CURRENT, // pseudo option to select currently available GUID which supports the subfunction. + ACPI_DSM_FUNCTION_INVALID = 0xFF +} ACPI_DSM_FUNCTION; + +#endif // _NVACPITYPES_H_ + diff --git a/src/nvidia/interface/nvrm_registry.h b/src/nvidia/interface/nvrm_registry.h new file mode 100644 index 000000000..4c22a6e81 --- /dev/null +++ b/src/nvidia/interface/nvrm_registry.h @@ -0,0 +1,1581 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1997-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVRM_REGISTRY_H +#define NVRM_REGISTRY_H + +#include "nvtypes.h" + +// +// Some shared defines with nvReg.h +// +#if defined(NV_UNIX) +#define NV4_REG_GLOBAL_BASE_KEY "" +#define NV4_REG_GLOBAL_BASE_PATH "_NV_" +#else +#define NV4_REG_GLOBAL_BASE_KEY HKEY_LOCAL_MACHINE +#define NV4_REG_GLOBAL_BASE_PATH "SOFTWARE\\NVIDIA Corporation\\Global" +#endif +#define NV4_REG_SUBKEY "NVidia" +#define NV4_REG_DISPLAY_DRIVER_SUBKEY "Display" +#define NV4_REG_RESOURCE_MANAGER_SUBKEY "System" + +// +// Globally overrides the memory type used to store surfaces. +// Used by all parts of the driver and stored in the hardware-specific key. +// Mirrored from nvReg.h +// +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE "GlobalSurfaceOverrides" +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_DISABLE (0x00000000) // Do not use global surface overrides +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_ENABLE (0x00000001) +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_RM_VALUE 1:0 +#define NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_RM_ENABLE 3:3 + + +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT "RmDefaultTimeout" +// Type Dword +// Override default RM timeout. Measured in milliseconds. +// Not scaled for emulation + +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS "RmDefaultTimeoutFlags" +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSTIMER 4 +#define NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSDELAY 8 +// Type Dword +// Override default RM timeout flags to either OSDELAY or OSTIMER. + + +#define NV_REG_STR_SUPPRESS_CLASS_LIST "SuppressClassList" +// Type String +// A list of comma separated classes to suppress +// examples: +// 5097 +// 4097, 5097 +// etc + + +// +// Allow instance memory overrides. Some fields are chip specific +// and may not apply to all chips. Since there are many fields, +// this is spread across several DWORD registry keys. +// +// Type DWORD +// Encoding: +// DEFAULT RM determines +// COH Coherent system memory +// NCOH Non-coherent system memory +// VID Local video memory +// +#define NV_REG_STR_RM_INST_LOC "RMInstLoc" +#define NV_REG_STR_RM_INST_LOC_2 "RMInstLoc2" +#define NV_REG_STR_RM_INST_LOC_3 "RMInstLoc3" +#define NV_REG_STR_RM_INST_LOC_4 "RMInstLoc4" + +#define NV_REG_STR_RM_INST_LOC_DEFAULT (0x00000000) +#define NV_REG_STR_RM_INST_LOC_COH (0x00000001) +#define NV_REG_STR_RM_INST_LOC_NCOH (0x00000002) +#define NV_REG_STR_RM_INST_LOC_VID (0x00000003) + +#define NV_REG_STR_RM_INST_LOC_ALL_DEFAULT (0x00000000) +#define NV_REG_STR_RM_INST_LOC_ALL_COH (0x55555555) +#define NV_REG_STR_RM_INST_LOC_ALL_NCOH (0xAAAAAAAA) +#define NV_REG_STR_RM_INST_LOC_ALL_VID (0xFFFFFFFF) + +// +// Allow instance memory overrides. Some fields are chip specific +// and may not apply to all chips. Since there are many fields, +// this is spread across several DWORD registry keys. +// +// The registry keys are defined in nvrm_registry. +// Specific overrrides are defined here. +// +// Type DWORD +// Encoding: +// DEFAULT RM determines +// COH Coherent system memory +// NCOH Non-coherent system memory +// VID Local video memory +// +#define NV_REG_STR_RM_INST_LOC_PTE 1:0 // Context PTE +#define NV_REG_STR_RM_INST_LOC_PTE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PTE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PTE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PTE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_BAR_PTE 3:2 // BAR PTE +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_BAR_PTE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_INSTBLK 5:4 // Instance block +#define NV_REG_STR_RM_INST_LOC_INSTBLK_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_INSTBLK_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_INSTBLK_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_INSTBLK_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_RAMFC 7:6 // RAMFC save area +#define NV_REG_STR_RM_INST_LOC_RAMFC_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_RAMFC_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_RAMFC_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_RAMFC_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_CACHE1 9:8 // CACHE1 save area +#define NV_REG_STR_RM_INST_LOC_CACHE1_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_CACHE1_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_CACHE1_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_CACHE1_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_GRCTX 11:10 // Graphics contxt +#define NV_REG_STR_RM_INST_LOC_GRCTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_GRCTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_GRCTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_GRCTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_RUNLIST 13:12 // Runlist +#define NV_REG_STR_RM_INST_LOC_RUNLIST_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_RUNLIST_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_RUNLIST_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_RUNLIST_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_DISPLAY 15:14 // Display +#define NV_REG_STR_RM_INST_LOC_DISPLAY_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_DISPLAY_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_DISPLAY_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_DISPLAY_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_USERD 17:16 // USERD +#define NV_REG_STR_RM_INST_LOC_USERD_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_USERD_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_USERD_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_USERD_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER 19:18 // EVENTBUFFER +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_EVENTBUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_UNUSED 21:20 // UNUSED +#define NV_REG_STR_RM_INST_LOC_UNUSED_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG 23:22 // Cipher exchange memory resources +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_CIPHER_XCHG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_PDE 25:24 // Context PDE +#define NV_REG_STR_RM_INST_LOC_PDE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PDE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PDE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PDE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_BAR_PDE 27:26 // BAR PDE +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_BAR_PDE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_PMUINST 29:28 // PMUINST +#define NV_REG_STR_RM_INST_LOC_PMUINST_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PMUINST_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PMUINST_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PMUINST_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_PMUUCODE 31:30 // PMU UCODE +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_PMUUCODE_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE 1:0 // Compbit backing store +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_COMPTAG_STORE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB 3:2 // Attribute Circular Buffer +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_ATTR_CB_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB 5:4 // Bundle Circular Buffer +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_BUNDLE_CB_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL 7:6 // Pagepool Buffer +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PAGEPOOL_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX 9:8 // Golden Context Image +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_GOLD_CTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX 11:10 // Bar context aperture +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_BAR_CTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMU_PWR_RAIL_VIDEO_PRED_BUFFER_SURFACE 13:12 // Power Rail Video Prediction +#define NV_REG_STR_RM_INST_LOC_2_PMU_PWR_RAIL_VIDEO_PRED_BUFFER_SURFACE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMU_PWR_RAIL_VIDEO_PRED_BUFFER_SURFACE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMU_PWR_RAIL_VIDEO_PRED_BUFFER_SURFACE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMU_PWR_RAIL_VIDEO_PRED_BUFFER_SURFACE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH 15:14 // context patch +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_CTX_PATCH_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ 17:16 // MMU Read +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_MMU_READ_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE 19:18 // MMU Write +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_MMU_WRITE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_UNUSED 21:20 // Unused +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX 23:22 // zcull context buffer +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_ZCULLCTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMCTX 25:24 // PM context buffer +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMCTX_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG 27:26 // DPU Debug/Falctrace Buffer +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_DPUDBG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMUPG 29:28 // PMU PG buffer +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMUPG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER 31:30 +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_2_PMU_LOGGER_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE 1:0 // PG log surface +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PG_LOG_SURFACE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER 3:2 // Preemption buffer +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PREEMPT_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER 5:4 // GFXP BetaCB buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_BETACB_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER 7:6 // GFXP Pagepool buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_PAGEPOOL_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE 9:8 // BSI RAM image +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_BSI_IMAGE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP 11:10 // Priv whitelist buffer +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PRIV_ACCESS_MAP_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG 13:12 // SEC2 Debug/Falctrace Buffer +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_SEC2DBG_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE 15:14 // FECS UCODE +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_FECS_UCODE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER 17:16 // GFXP Pagepool buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_SPILL_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE 19:18 // UVM Non-Replayable fault buffer +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_UVM_FAULT_BUFFER_NONREPLAYABLE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE 21:20 // BAR scratch pages +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_BAR_SCRATCH_PAGE_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST 23:22 // FLCNINST +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_FLCNINST_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER 25:24 // RTVCB buffer +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_RTVCB_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER 27:26 // GFXP RTVCB buffer +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_GFXP_RTVCB_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER 29:28 // Fault method buffer +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA 31:30 // PMU/DPU DMA transfers +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_3_PMU_DPU_DMA_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC 1:0 // Display state cache buffer +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_DISP_SC_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER 3:2 // FIFO channel push buffer +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND 5:4 // Firmware security license command +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FW_SEC_LIC_COMMAND_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_VRDS 7:6 // VBIOS runtime data security +#define NV_REG_STR_RM_INST_LOC_4_VRDS_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_VRDS_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_VRDS_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_VRDS_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS 9:8 // Falcon uCode buffers +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FLCN_UCODE_BUFFERS_VID NV_REG_STR_RM_INST_LOC_VID +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE 11:10 // UVM Replayable fault buffer +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_UVM_FAULT_BUFFER_REPLAYABLE_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of BARs. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_BAR 13:12 // BAR Bind location +#define NV_REG_STR_RM_INST_LOC_4_BAR_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_BAR_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_BAR_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_BAR_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of async CEs. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_CE 15:14 // Async CE Bind location +#define NV_REG_STR_RM_INST_LOC_4_CE_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_CE_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_CE_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_CE_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of GR/GRCE. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_GR 17:16 // GR/GRCE Bind location +#define NV_REG_STR_RM_INST_LOC_4_GR_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_GR_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_GR_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_GR_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of VEs. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_FALCON 19:18 // FALCON Bind location +#define NV_REG_STR_RM_INST_LOC_4_FALCON_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FALCON_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FALCON_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FALCON_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of HWPM PMA. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA 21:20 // HWPM PMA Bind location +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_HWPM_PMA_VID NV_REG_STR_RM_INST_LOC_VID + +// +// Separately define instance block location of HWPM PMA. Default Setting +// reverts to NV_REG_STR_RM_INST_LOC_INSTBLK +// +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF 23:22 // FECS EVENT buffer location +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_DEFAULT NV_REG_STR_RM_INST_LOC_DEFAULT +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_COH NV_REG_STR_RM_INST_LOC_COH +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_NCOH NV_REG_STR_RM_INST_LOC_NCOH +#define NV_REG_STR_RM_INST_LOC_4_FECS_EVENT_BUF_VID NV_REG_STR_RM_INST_LOC_VID + +#define NV_REG_STR_RM_DISABLE_GSP_OFFLOAD "RmDisableGspOffload" +#define NV_REG_STR_RM_DISABLE_GSP_OFFLOAD_FALSE (0x00000000) +#define NV_REG_STR_RM_DISABLE_GSP_OFFLOAD_TRUE (0x00000001) +// Type DWORD (Boolean) +// Override any other settings and disable GSP-RM offload. + + +#define NV_REG_STR_RM_MSG "RmMsg" +// Type String: Set parameters for RM DBG_PRINTF. Only for builds with printfs enabled. +// Encoding: +// rule = [!][filename|function][:startline][-endline] +// Format = rule[,rule] + + +#define NV_REG_STR_RM_THREAD_STATE_SETUP_FLAGS "RmThreadStateSetupFlags" +// Type DWORD +// Enables or disables various ThreadState features +// See resman/inc/kernel/core/thread_state.h for +// THREAD_STATE_SETUP_FLAGS values. + + +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER "RMEnableEventTracer" +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER_DISABLE 0 +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER_ENABLE 1 +#define NV_REG_STR_RM_ENABLE_EVENT_TRACER_DEFAULT NV_REG_STR_RM_ENABLE_EVENT_TRACER_DISABLE +// Type DWORD +// Encoding boolean +// Enable/Disable RM event tracing +// 0 - Disable RM event tracing +// 1 - Enable RM event tracing + + +#define NV_REG_STR_RM_COMPUTE_MODE_RULES "RmComputeModeRules" +// Type DWORD +// Saves the last compute mode rule set by the client. +// Encoding: +// Bits 31:0 : Last compute mode rule set by the client + + +#define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_1 "RMNvLogExtraBuffer1" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_2 "RMNvLogExtraBuffer2" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_3 "RMNvLogExtraBuffer3" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_4 "RMNvLogExtraBuffer4" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_5 "RMNvLogExtraBuffer5" +// #define NV_REG_STR_RM_NVLOG_EXTRA_BUFFER_6 "RMNvLogExtraBuffer6" +// Type DWORD +// Used to specify up to 6 additional logging buffers +// Encoding: +// _BUFFER_FLAGS +// x: uses NVLOG_BUFFER_FLAGS fields, for main nvlog buffer +// _BUFFER_SIZE +// n: Size of main buffer, in kilobytes + + +// Type DWORD +// This can be used for dumping NvLog buffers (in /var/log/vmkernel.log ), when +// we hit critical XIDs e.g 31/79. +#define NV_REG_STR_RM_DUMP_NVLOG "RMDumpNvLog" +#define NV_REG_STR_RM_DUMP_NVLOG_DEFAULT (0x00000000) +#define NV_REG_STR_RM_DUMP_NVLOG_DISABLE (0x00000000) +#define NV_REG_STR_RM_DUMP_NVLOG_ENABLE (0x00000001) + + +// +// Type DWORD +// RM external fabric management. +// +// RM currently uses nvlink core driver APIs which internally trigger +// link initialization and training. However, nvlink core driver now exposes a +// set of APIs for managing nvlink fabric externally (from user mode). +// +// When the regkey is enabled, RM will skip use of APIs which trigger +// link initialization and training. In that case, link training needs to be +// triggered externally. +// +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT "RMExternalFabricMgmt" +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT_MODE 0:0 +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT_MODE_DISABLE (0x00000000) + + +// +// Type DWORD +// BIT 1:0: All Data validation +// 0 - Default +// 1 - Validate the kernel data - enable all below +// 2 - Do not validate the kernel data - disable all below +// BIT 3:2: Buffer validation +// 0 - Default +// 1 - Validate the kernel buffers +// 2 - Do not validate the kernel buffers +// BIT 5:4: Handle validation +// 0 - Default +// 1 - Validate the handles +// 2 - Do not validate the handles +// BIT 7:6: Strict client validation +// 0 - Default +// 1 - Enable strict client validation +// 2 - Do not enable strict client validation +// +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION "RmValidateClientData" +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL 1:0 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_ALL_DISABLED 0x00000002 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS 3:2 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_KERNEL_BUFFERS_DISABLED 0x00000002 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE 5:4 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_HANDLE_DISABLED 0x00000002 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT 7:6 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT_DEFAULT 0x00000000 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT_ENABLED 0x00000001 +#define NV_REG_STR_RM_CLIENT_DATA_VALIDATION_STRICT_CLIENT_DISABLED 0x00000002 + + +// +// Type: Dword +// Encoding: +// 1 - Enable remote GPU +// 0 - Disable remote GPU +// +#define NV_REG_STR_RM_REMOTE_GPU "RMRemoteGpu" +#define NV_REG_STR_RM_REMOTE_GPU_ENABLE 0x00000001 +#define NV_REG_STR_RM_REMOTE_GPU_DISABLE 0x00000000 +#define NV_REG_STR_RM_REMOTE_GPU_DEFAULT NV_REG_STR_RM_REMOTE_GPU_DISABLE + + +// +// Type: DWORD +// +// This regkey configures thread priority boosting whenever +// the thread is holding a GPU lock. +// +#define NV_REG_STR_RM_PRIORITY_BOOST "RMPriorityBoost" +#define NV_REG_STR_RM_PRIORITY_BOOST_DISABLE 0x00000000 +#define NV_REG_STR_RM_PRIORITY_BOOST_ENABLE 0x00000001 +#define NV_REG_STR_RM_PRIORITY_BOOST_DEFAULT NV_REG_STR_RM_PRIORITY_BOOST_DISABLE + + +// +// Type: DWORD +// +// This regkey configures the delay (us) before a boosted thread is throttled +// down. +// +// Default value: 0 (Disable) +// +#define NV_REG_STR_RM_PRIORITY_THROTTLE_DELAY "RMPriorityThrottleDelay" +#define NV_REG_STR_RM_PRIORITY_THROTTLE_DELAY_DISABLE 0x00000000 + + +// +// Type DWORD +// Enable support for CUDA Stream Memory Operations in user-mode applications. +// +// BIT 0:0 - Feature enablement +// 0 - disable feature (default) +// 1 - enable feature +// +#define NV_REG_STR_RM_STREAM_MEMOPS "RmStreamMemOps" +#define NV_REG_STR_RM_STREAM_MEMOPS_ENABLE 0:0 +#define NV_REG_STR_RM_STREAM_MEMOPS_ENABLE_YES 1 +#define NV_REG_STR_RM_STREAM_MEMOPS_ENABLE_NO 0 + + +// +// Type DWORD: Enable read-only RMAPI locks for select interfaces +// +// Setting an interface to 0 will disable read-only API locks for that interface +// Setting an interface to 1 will enable read-only API locks for that interface, +// however, RM may still choose to take a read-write lock if it needs to. +// +#define NV_REG_STR_RM_READONLY_API_LOCK "RmRoApiLock" +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE 1:1 +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_ALLOC_RESOURCE_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE 2:2 +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_FREE_RESOURCE_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP 3:3 +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP 4:4 +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_UNMAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP 5:5 +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_MAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP 6:6 +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_INTER_UNMAP_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY 7:7 +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_COPY_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE 8:8 +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_SHARE_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL 9:9 +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_CTRL_ENABLE (0x00000001) + + +// +// Type DWORD: Enable read-only RMAPI locks for select modules +// +// Setting an interface to 0 will disable read-only API locks for that module +// Setting an interface to 1 will enable read-only API locks for that module, +// however, RM may still choose to take a read-write lock if it needs to. +// +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE "RmRoApiLockModule" +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS 0:0 +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_GPU_OPS_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM 1:1 +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_API_LOCK_MODULE_WORKITEM_ENABLE (0x00000001) + + +// +// Type DWORD: Enable read-only GPU locks for select modules +// +// Setting an interface to 0 will disable read-only GPU locks for that module +// Setting an interface to 1 will enable read-only GPU locks for that module, +// however, RM may still choose to take a read-write lock if it needs to. +// +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE "RmRoGpuLockModule" +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS 0:0 +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_GPU_OPS_ENABLE (0x00000001) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM 1:1 +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM_DEFAULT (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM_DISABLE (0x00000000) +#define NV_REG_STR_RM_READONLY_GPU_LOCK_MODULE_WORKITEM_ENABLE (0x00000001) + + +// Enable support for CACHEABLE rmapi control flag +// 0: never cache any controls +// 1 (default): cache only ROUTE_TO_PHYSICAL controls, and only if GSP-RM is running +// 2: cache all controls +#define NV_REG_STR_RM_CACHEABLE_CONTROLS "RmEnableCacheableControls" +#define NV_REG_STR_RM_CACHEABLE_CONTROLS_DISABLE 0 +#define NV_REG_STR_RM_CACHEABLE_CONTROLS_GSP_ONLY 1 +#define NV_REG_STR_RM_CACHEABLE_CONTROLS_ENABLE 2 + +// Type DWORD +// This regkey forces for Maxwell+ that on FB Unload we wait for FB pull before issuing the +// L2 clean. WAR for bug 1032432 +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL "RmL2CleanFbPull" +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL_ENABLED (0x00000000) +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL_DISABLED (0x00000001) +#define NV_REG_STR_RM_L2_CLEAN_FB_PULL_DEFAULT (0x00000000) + +// Enable backtrace dumping at assertion failure. +// If physical RM or RCDB is unavailable, then this regkey controls the behaviour of backtrace +// printing. +// 0: disable +// 1 (default): only print unique backtraces, identified by instruction pointer of the failed assert +// 2: print all +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE "RmPrintAssertBacktrace" +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_DISABLE 0 +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE 1 +#define NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_ENABLE 2 + + +// +// Type DWORD +// Used to enable no locking on copy +// +#define NV_REG_STR_RM_PARAM_COPY_NO_LOCK "RMParamCopyNoLock" + +// +// Type DWORD +// This regkey restricts profiling capabilities (creation of profiling objects +// and access to profiling-related registers) to admin only. +// 0 - (default - disabled) +// 1 - Enables admin check +// +#define NV_REG_STR_RM_PROFILING_ADMIN_ONLY "RmProfilingAdminOnly" +#define NV_REG_STR_RM_PROFILING_ADMIN_ONLY_FALSE 0x00000000 +#define NV_REG_STR_RM_PROFILING_ADMIN_ONLY_TRUE 0x00000001 + + +#define NV_REG_STR_GPU_BROKEN_FB "nvBrokenFb" +#define NV_REG_STR_GPU_BROKEN_FB_ALL_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_ALL_BROKEN 0xffffffff +#define NV_REG_STR_GPU_BROKEN_FB_DEFAULT NV_REG_STR_GPU_BROKEN_FB_ALL_OKAY +#define NV_REG_STR_GPU_BROKEN_FB_DEFAULT_GF100_A01 NV_REG_STR_GPU_BROKEN_FB_MEMORY_BROKEN +#define NV_REG_STR_GPU_BROKEN_FB_MEMORY 0:0 +#define NV_REG_STR_GPU_BROKEN_FB_MEMORY_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_MEMORY_BROKEN 0x00000001 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_CPU 1:1 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_CPU_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_CPU_BROKEN 0x00000001 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_PMU 2:2 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_PMU_OKAY 0x00000000 +#define NV_REG_STR_GPU_BROKEN_FB_REG_VIA_PMU_BROKEN 0x00000001 +// Type DWORD +// _ALL_OKAY: FB is not broken. All is okay. +// _ALL_BROKEN: FB is broken and no software will try to use it. +// _MEMORY: Memory itself can/cannot be accessed. (PDB_PROP_GPU_BROKEN_FB property) +// _REG_VIA_CPU: CPU can/cannot access FBPA/FBIO registers. (PDB_PROP_GPU_BROKEN_FB_REG_VIA_CPU property) +// _REG_VIA_PMU: PMU can/cannot access FBPA/FBIO registers. (PDB_PROP_GPU_BROKEN_FB_REG_VIA_PMU property) +// FBPA/FBIO register addresses are defined by gpuIsBrokenFbReg(). +// Note that if the CPU and the PMU can't access registers, then memory isn't going to work either. +// In other words, the only even number that makes sense for this regkey is zero. +// Default depends on the chip and mask revision. + +#define NV_REG_STR_OVERRIDE_FB_SIZE "OverrideFbSize" +// Type Dword +// Encoding Numeric Value +// Size in MB +// Used to reduce FB for testing memory management +// +#define NV_REG_STR_OVERRIDE_FB_SIZE_DEFAULT 0 + +// +// TYPE DWORD +// This regkey helps increase the size of RM reserved region. +// Exposed to clients for bug 2404337. +// Note: In GSP builds this key applies to the kernel (CPU) RM only. +// +#define NV_REG_STR_RM_INCREASE_RSVD_MEMORY_SIZE_MB "RMIncreaseRsvdMemorySizeMB" +#define NV_REG_STR_RM_INCREASE_RSVD_MEMORY_SIZE_MB_DEFAULT 0x0 + +// TYPE Dword +// Determines whether or not RM reserved space should be increased. +// 1 - Increases RM reserved space +// 0 - (default) Keeps RM reserved space as it is. + +#define NV_REG_STR_BUG_1698088_WAR "RMBug1698088War" +#define NV_REG_STR_BUG_1698088_WAR_ENABLE 0x00000001 +#define NV_REG_STR_BUG_1698088_WAR_DISABLE 0x00000000 +#define NV_REG_STR_BUG_1698088_WAR_DEFAULT NV_REG_STR_BUG_1698088_WAR_DISABLE + +// +// TYPE DWORD +// This regkey can be used to ignore upper memory on GM20X and later. If there +// is upper memory but this regkey is set to _YES, then RM will only expose the +// lower memory to clients. +// +// DEFAULT - Use the default setting of upper memory on GM20X-and-later. +// YES - Ignore upper memory on GM20X-and-later. +// +#define NV_REG_STR_RM_IGNORE_UPPER_MEMORY "RMIgnoreUpperMemory" +#define NV_REG_STR_RM_IGNORE_UPPER_MEMORY_DEFAULT (0x00000000) +#define NV_REG_STR_RM_IGNORE_UPPER_MEMORY_YES (0x00000001) + +#define NV_REG_STR_RM_NO_ECC_FB_SCRUB "RMNoECCFBScrub" + +#define NV_REG_STR_RM_DISABLE_SCRUB_ON_FREE "RMDisableScrubOnFree" +// Type DWORD +// Encoding 0 (default) - Scrub on free +// 1 - Disable Scrub on Free + +#define NV_REG_STR_RM_INIT_SCRUB "RMInitScrub" +// Type DWORD +// Encoding 1 - Scrub Fb during rminit irrespective of ECC capability + +#define NV_REG_STR_RM_DISABLE_ASYNC_MEM_SCRUB "RMDisableAsyncMemScrub" +// Type DWORD +// Encoding 0 (default) - Async memory scrubbing is enabled +// 1 - Async memory scrubbing is disabled + +#define NV_REG_STR_RM_INCREASE_ECC_SCRUB_TIMEOUT "RM1441072" +// Type DWORD +// Encoding 0 (default) - Use default ECC Scrub Timeout +// 1 - Increase ECC Scrub Timeout + +// +// Type DWORD +// Controls enable of PMA memory management instead of existing legacy +// RM FB heap manager. +// +#define NV_REG_STR_RM_ENABLE_PMA "RMEnablePMA" +#define NV_REG_STR_RM_ENABLE_PMA_YES (0x00000001) +#define NV_REG_STR_RM_ENABLE_PMA_NO (0x00000000) + +// +// Type DWORD +// Controls management of client page tables by PMA on MODS. +// Default enable. MODS will use regkey to override to disable feature. +// +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES "RMEnablePmaManagedPtables" +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES_YES (0x00000001) +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES_NO (0x00000000) +#define NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES_DEFAULT (0x00000001) + +// +// Type DWORD +// Controls enable of Address Tree memory tracking instead of regmap +// for the PMA memory manager. +// +#define NV_REG_STR_RM_ENABLE_ADDRTREE "RMEnableAddrtree" +#define NV_REG_STR_RM_ENABLE_ADDRTREE_YES (0x00000001) +#define NV_REG_STR_RM_ENABLE_ADDRTREE_NO (0x00000000) + +#define NV_REG_STR_RM_SCRUB_BLOCK_SHIFT "RMScrubBlockShift" +// Type DWORD +// Encoding Numeric Value +// A value in the range 12 to 20 represents logbase2 of maxBlockSize for heap +// scrubber. Any other value will be defaulted to 16 i.e. maxBlockSize = 64KB. + +#define NV_REG_STR_RM_INST_VPR "RMInstVPR" +// Type DWORD +// Encoding: takes effect for allocations in VIDEO memory +// TRUE Make allocation in protected region +// FALSE Make allocation in non-protected region (default) +// +#define NV_REG_STR_RM_INST_VPR_INSTBLK 0:0 // Instance block +#define NV_REG_STR_RM_INST_VPR_INSTBLK_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_INSTBLK_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_RAMFC 1:1 // RAMFC save area +#define NV_REG_STR_RM_INST_VPR_RAMFC_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_RAMFC_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_RUNLIST 2:2 // Runlist +#define NV_REG_STR_RM_INST_VPR_RUNLIST_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_RUNLIST_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_MMU_READ 3:3 // MMU Debug Read +#define NV_REG_STR_RM_INST_VPR_MMU_READ_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_MMU_READ_TRUE (0x00000001) +#define NV_REG_STR_RM_INST_VPR_MMU_WRITE 4:4 // MMU Debug Read +#define NV_REG_STR_RM_INST_VPR_MMU_WRITE_FALSE (0x00000000) +#define NV_REG_STR_RM_INST_VPR_MMU_WRITE_TRUE (0x00000001) + +#define NV_REG_STR_RM_GPU_SURPRISE_REMOVAL "RMGpuSurpriseRemoval" +// Type DWORD +// Encoding boolean +// If set, this will cause RM mark GPU as lost when it detects 0xFF from register +// access. + +#define NV_REG_STR_RM_NUM_FIFOS "RmNumFifos" +// Type Dword +// Override number of fifos (channels) on NV4X +#define NV_REG_STR_RM_NUM_FIFOS_COMPAT 0x00000020 +#define NV_REG_STR_RM_NUM_FIFOS_EXTENDED 0x00000200 + +#define NV_REG_STR_RM_SUPPORT_USERD_MAP_DMA "RMSupportUserdMapDma" +// Type DWORD +// Encoding: Boolean +// If set, allow MapMemoryDma calls to be made on channel objects + +// +// Type DWORD +// Encoding Numeric Value +// Overrides chipset-based P2P configurations. +// Only be used to test on internal issues +// +// P2P reads: +// 0 - Do not allow P2P reads +// 1 - Allow P2P reads +// 2 - Do not override chipset-selected config (default) +// P2P writes: +// 0 - Do not allow P2P writes +// 1 - Allow P2P writes +// 2 - Do not override chipset-selected config (default) +// +#define NV_REG_STR_CL_FORCE_P2P "ForceP2P" +#define NV_REG_STR_CL_FORCE_P2P_READ 1:0 +#define NV_REG_STR_CL_FORCE_P2P_READ_DISABLE 0x00000000 +#define NV_REG_STR_CL_FORCE_P2P_READ_ENABLE 0x00000001 +#define NV_REG_STR_CL_FORCE_P2P_READ_DEFAULT 0x00000002 +#define NV_REG_STR_CL_FORCE_P2P_WRITE 5:4 +#define NV_REG_STR_CL_FORCE_P2P_WRITE_DISABLE 0x00000000 +#define NV_REG_STR_CL_FORCE_P2P_WRITE_ENABLE 0x00000001 +#define NV_REG_STR_CL_FORCE_P2P_WRITE_DEFAULT 0x00000002 + +// +// Type DWORD +// Use this regkey to force RM to pick a P2P type. HW has to support the picked TYPE to take effect. +// e.g., TYPE_BAR1P2P will not work if HW does not support it. A call to create NV50_P2P object will +// will fail in such a case. +// +// TYPE_DEFAULT let RM to choose a P2P type. The priority is: +// C2C > NVLINK > BAR1P2P > mailbox P2P +// +// TYPE_C2C to use C2C P2P if it supports +// TYPE_NVLINK to use NVLINK P2P, including INDIRECT_NVLINK_P2P if it supports +// TYPE_BAR1P2P to use BAR1 P2P if it supports +// TYPE_MAILBOXP2P to use mailbox p2p if it supports +// +#define NV_REG_STR_RM_FORCE_P2P_TYPE "RMForceP2PType" +#define NV_REG_STR_RM_FORCE_P2P_TYPE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_MAILBOXP2P (0x00000001) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_BAR1P2P (0x00000002) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_NVLINK (0x00000003) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_C2C (0x00000004) +#define NV_REG_STR_RM_FORCE_P2P_TYPE_MAX NV_REG_STR_RM_FORCE_P2P_TYPE_C2C + +// +// Type: DWORD +// Enables/Disables the WAR for bug 1630288 where we disable 3rd-party peer mappings +// Disabled by default +// +#define NV_REG_STR_PEERMAPPING_OVERRIDE "PeerMappingOverride" +#define NV_REG_STR_PEERMAPPING_OVERRIDE_DEFAULT 0 + +#define NV_REG_STR_P2P_MAILBOX_CLIENT_ALLOCATED "P2PMailboxClientAllocated" +#define NV_REG_STR_P2P_MAILBOX_CLIENT_ALLOCATED_FALSE 0 +#define NV_REG_STR_P2P_MAILBOX_CLIENT_ALLOCATED_TRUE 1 +// Type Dword +// Overrides the P2P Mailbox allocation policy +// For testing only +// 0 - P2P Mailbox area is allocated by RM +// 1 - P2P Mailbox area is not allocated by RM, but by the client. + +#define NV_REG_STR_RM_MAP_P2P_PEER_ID "RMP2PPeerId" +// Type DWORD +// Encoding: +// Peer ID to use when mapping p2p to peer subdevice in p2p loopback mode +// Default: RM takes care of assigning peer ID. + +#define NV_REG_STR_OVERRIDE_GPU_NUMA_NODE_ID "RMOverrideGpuNumaNodeId" +// Type DWORD: +// Encoding -- NvS32 +// Override GPU NUMA Node ID assigned by OS + +// +// Type DWORD +// Numa allocations allow for skipping reclaim less than a specified memory occupancy threshold. +// This override allows for its tuning, value supplied here shall indicate a percent of free memory +// less than which GFP_RECLAIM flag will be dropped. +// +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE "RmNumaAllocSkipReclaimPercent" +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_DEFAULT 4 +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_DISABLED 0 +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_MIN 0 +#define NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_MAX 100 + +// +// Disable 64KB BAR1 mappings +// 0 - Disable 64KB BAR1 mappings +// 1 - Force/Enable 64KB BAR1 mappings +// +#define NV_REG_STR_RM_64KB_BAR1_MAPPINGS "RM64KBBAR1Mappings" +#define NV_REG_STR_RM_64KB_BAR1_MAPPINGS_ENABLED 0x00000001 +#define NV_REG_STR_RM_64KB_BAR1_MAPPINGS_DISABLED 0x00000000 + +#define NV_REG_STR_RM_BAR1_APERTURE_SIZE_MB "RMBar1ApertureSizeMB" +// Type DWORD +// Encoding Numeric Value +// Overrides the size of the BAR1 aperture. Used to shrink BAR1. It cannot be +// greater than the physical size of BAR1. + +#define NV_REG_STR_RM_BAR2_APERTURE_SIZE_MB "RMBar2ApertureSizeMB" +// Type DWORD +// Encoding Numeric Value +// Overrides the size of the BAR2 aperture. Cannot be greater than the +// physical size of BAR2 available to RM (which may be less than the total size +// of BAR2). When this regkey is present we cap the total aperture size to the +// RM aperture size. This can result in undefined beahvior in environments that +// rely on a virtual bar2 aperture shared between RM and VBIOS for VESA support. + +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) +// +// TYPE DWORD +// This setting will override the BAR1 Big page size +// This is used for interop testing for MODS +// +#define NV_REG_STR_RM_SET_BAR1_ADDRESS_SPACE_BIG_PAGE_SIZE "RMSetBAR1AddressSpaceBigPageSize" +#define NV_REG_STR_RM_SET_BAR1_ADDRESS_SPACE_BIG_PAGE_SIZE_64k (64 * 1024) +#define NV_REG_STR_RM_SET_BAR1_ADDRESS_SPACE_BIG_PAGE_SIZE_128k (128 * 1024) +#endif //DEVELOP || DEBUG || NV_MODS + +// This regkey is to disable coherent path CPU->Nvlink/C2C->FB and force BAR path. +#define NV_REG_STR_RM_FORCE_BAR_PATH "RMForceBarPath" +// Type DWORD +// Encoding 0 (default) - Enable Coherent C2C/NvLink Path +// 1 - Force BAR Path + +// +// Type: Dword +// Encoding: +// 0 - client RM allocated context buffer feature is disabled +// 1 - client RM allocated context buffer feature is enabled +// +#define NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER "RMSetClientRMAllocatedCtxBuffer" +#define NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER_DISABLED 0x00000000 +#define NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER_ENABLED 0x00000001 + +// +// Type: Dword +// Encoding: +// 0 - Split VA space management between server/client RM is disabled +// 1 - Split VA space management between server/client RM is enabled +// +#define NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM "RMSplitVasMgmtServerClientRm" +#define NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_DISABLED 0x00000000 +#define NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_ENABLED 0x00000001 + +// +// Restrict the VA range to be <= @ref VASPACE_SIZE_FERMI. +// Used in cases where some engines support 49 bit VA and some don't. +// Ignored if NVOS32_ALLOC_FLAGS_USE_BEGIN_END (DDMA_ALLOC_VASPACE_USE_RANGE) or +// NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE (DMA_ALLOC_VASPACE_VA_FIXED) is set. +// Default: OFF (0x0) +// Type: DWORD +// +#define NV_REG_STR_RM_RESTRICT_VA_RANGE "RMRestrictVARange" +#define NV_REG_STR_RM_RESTRICT_VA_RANGE_DEFAULT (0x0) +#define NV_REG_STR_RM_RESTRICT_VA_RANGE_ON (0x1) + +#define NV_REG_STR_RESERVE_PTE_SYSMEM_MB "RmReservePteSysmemMB" +// Type DWORD: +// Encoding -- Value = 0 -> Do not reserve sysmem for PTEs (default) +// Value > 0 -> Reserve ValueMB for PTEs when we run out of video and system memory +// + +// Type DWORD +// Contains the sysmem page size. +#define NV_REG_STR_RM_SYSMEM_PAGE_SIZE "RMSysmemPageSize" + +// +// Allows pages that are aligned to large page boundaries to be mapped as large +// pages. +// +#define NV_REG_STR_RM_ALLOW_SYSMEM_LARGE_PAGES "RMAllowSysmemLargePages" + +#define NV_REG_STR_FERMI_BIG_PAGE_SIZE "RMFermiBigPageSize" +#define NV_REG_STR_FERMI_BIG_PAGE_SIZE_64KB (64 * 1024) +#define NV_REG_STR_FERMI_BIG_PAGE_SIZE_128KB (128 * 1024) + +// +// TYPE DWORD +// This setting will disable big page size per address space +// +#define NV_REG_STR_RM_DISABLE_BIG_PAGE_PER_ADDRESS_SPACE "RmDisableBigPagePerAddressSpace" +#define NV_REG_STR_RM_DISABLE_BIG_PAGE_PER_ADDRESS_SPACE_FALSE (0x00000000) +#define NV_REG_STR_RM_DISABLE_BIG_PAGE_PER_ADDRESS_SPACE_TRUE (0x00000001) + +#define NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION "RMDisableNoncontigAlloc" +#define NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION_FALSE (0x00000000) +#define NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION_TRUE (0x00000001) +// Type DWORD: +// Encoding -- Boolean +// Disable noncontig vidmem allocation +// + +#define NV_REG_STR_RM_FBSR_PAGED_DMA "RmFbsrPagedDMA" +#define NV_REG_STR_RM_FBSR_PAGED_DMA_ENABLE 1 +#define NV_REG_STR_RM_FBSR_PAGED_DMA_DISABLE 0 +#define NV_REG_STR_RM_FBSR_PAGED_DMA_DEFAULT NV_REG_STR_RM_FBSR_PAGED_DMA_DISABLE +// Type Dword +// Encoding Numeric Value +// Enable the Paged DMA mode for FBSR +// 0 - Disable (default) +// 1 - Enable + +#define NV_REG_STR_RM_FBSR_FILE_MODE "RmFbsrFileMode" +#define NV_REG_STR_RM_FBSR_FILE_MODE_ENABLE 1 +#define NV_REG_STR_RM_FBSR_FILE_MODE_DISABLE 0 +#define NV_REG_STR_RM_FBSR_FILE_MODE_DEFAULT NV_REG_STR_RM_FBSR_FILE_MODE_DISABLE +// Type Dword +// Encoding Numeric Value +// Enable the File based power saving mode for Linux +// 0 - Disable (default) +// 1 - Enable + +#define NV_REG_STR_RM_FBSR_WDDM_MODE "RmFbsrWDDMMode" +#define NV_REG_STR_RM_FBSR_WDDM_MODE_ENABLE 1 +#define NV_REG_STR_RM_FBSR_WDDM_MODE_DISABLE 0 +#define NV_REG_STR_RM_FBSR_WDDM_MODE_DEFAULT NV_REG_STR_RM_FBSR_WDDM_MODE_DISABLE +// Type Dword +// Encoding Numeric Value +// Enable the WDDM power saving mode for FBSR +// 0 - Disable (default) +// 1 - Enable + +// Type DWORD: Disables HW fault buffers on Pascal+ chips +// Encoding : 1 -- TRUE +// : 0 -- False +// : Default -- False +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER "RmDisableHwFaultBuffer" +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER_TRUE 0x00000001 +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER_FALSE 0x00000000 +#define NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER_DEFAULT 0x00000000 + +// +// Type: DWORD +// Encoding: +// 3 - Enable interrupt-based FECS context switch logging with bottom-half/APC fall-back +// 2 - Enable interrupt-based FECS context switch logging without bottom-half/APC fall-back +// 1 - Enable periodic FECS context switch logging +// 0 - Disable FECS context switch logging +// +// Note: Interrupt-based logging and periodic logging are mutually exclusive +// +#define NV_REG_STR_RM_CTXSW_LOG "RMCtxswLog" +#define NV_REG_STR_RM_CTXSW_LOG_DISABLE 0x00000000 +#define NV_REG_STR_RM_CTXSW_LOG_ENABLE 0x00000001 +#define NV_REG_STR_RM_CTXSW_LOG_ENABLE_INTR 0x00000002 +#define NV_REG_STR_RM_CTXSW_LOG_ENABLE_INTR_APC 0x00000003 +#define NV_REG_STR_RM_CTXSW_LOG_DEFAULT NV_REG_STR_RM_CTXSW_LOG_DISABLE + +// +// Type: DWORD +// +// This regkey configures the maximum number of records that can be +// processed per DPC when using interrupt-based ctxsw logging +#define NV_REG_STR_RM_CTXSW_LOG_RECORDS_PER_INTR "RMCtxswLogMaxRecordsPerIntr" +#define NV_REG_STR_RM_CTXSW_LOG_RECORDS_PER_INTR_DEFAULT 0x30 + +// +// Type: DWORD +// Encoding: +// 0 - Disable more detailed debug INTR logs +// 1 - Enable more detailed debug INTR logs +// +#define NV_REG_STR_RM_INTR_DETAILED_LOGS "RMIntrDetailedLogs" +#define NV_REG_STR_RM_INTR_DETAILED_LOGS_DISABLE 0x00000000 +#define NV_REG_STR_RM_INTR_DETAILED_LOGS_ENABLE 0x00000001 + +#define NV_REG_STR_RM_LOCKING_MODE "RMLockingMode" +// Type DWORD +// Encoding enum +// Overrides what Locking Mode is in use. +// Default 0 +#define NV_REG_STR_RM_LOCKING_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_LOCKING_MODE_INTR_MASK (0x00000001) +#define NV_REG_STR_RM_LOCKING_MODE_LAZY_INTR_DISABLE (0x00000002) + +#define NV_REG_STR_RM_PER_INTR_DPC_QUEUING "RMDisablePerIntrDPCQueueing" +// Type DWORD +// This regkey is used to disable per interrupt DPC queuing. +// 0: Enable Per interrupt DPC Queuing +// 1: Disable Per interrupt DPC Queuing + +#define NV_REG_STR_INTR_STUCK_THRESHOLD "RM654663" +// Type DWORD +// Encoding NvU32 +// Number of iterations to see an interrupt in succession before considering it +// "stuck." +// Default - See INTR_STUCK_THRESHOLD + + +#define NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR "RMProcessNonStallIntrInLocklessIsr" + +// Type: DWORD +// Enables/Disables processing of non-stall interrupts in lockless ISR for +// Linux only. +// Non-stall interrupts are processed by the function +// intrServiceNonStall_HAL(pIntr,pGpu, TRUE /* bProcess*/); where bProcess is TRUE which +// means that event list will be traversed to notify clients registered for it. +// Disabled by default +// + +#define NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_DISABLE 0x00000000 +#define NV_REG_PROCESS_NONSTALL_INTR_IN_LOCKLESS_ISR_ENABLE 0x00000001 + +#define NV_REG_STR_RM_ROBUST_CHANNELS "RmRobustChannels" +#define NV_REG_STR_RM_ROBUST_CHANNELS_ENABLE 0x00000001 +#define NV_REG_STR_RM_ROBUST_CHANNELS_DISABLE 0x00000000 +#define NV_REG_STR_RM_ROBUST_CHANNELS_DEFAULT NV_REG_STR_RM_ROBUST_CHANNELS_DISABLE + +#define NV_REG_STR_RM_RC_WATCHDOG "RmRcWatchdog" +#define NV_REG_STR_RM_RC_WATCHDOG_ENABLE 0x00000001 +#define NV_REG_STR_RM_RC_WATCHDOG_DISABLE 0x00000000 +#define NV_REG_STR_RM_RC_WATCHDOG_DEFAULT NV_REG_STR_RM_RC_WATCHDOG_ENABLE + +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT "RmWatchDogTimeOut" +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT_LOW 0x00000007 +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT_HI 0x0000000C +#define NV_REG_STR_RM_WATCHDOG_TIMEOUT_DEFAULT NV_REG_STR_RM_WATCHDOG_TIMEOUT_LOW + +#define NV_REG_STR_RM_WATCHDOG_INTERVAL "RmWatchDogInterval" +#define NV_REG_STR_RM_WATCHDOG_INTERVAL_LOW 0x00000007 +#define NV_REG_STR_RM_WATCHDOG_INTERVAL_HI 0x0000000C +#define NV_REG_STR_RM_WATCHDOG_INTERVAL_DEFAULT NV_REG_STR_RM_WATCHDOG_INTERVAL_LOW + +#define NV_REG_STR_RM_DO_LOG_RC_EVENTS "RmLogonRC" +// Type Dword +// Encoding : 0 --> Skip Logging +// 1 --> Do log +// Enable/Disable Event Logging on RC errors +// Default is Disabled +#define NV_REG_STR_RM_DO_LOG_RC_ENABLE 0x00000001 +#define NV_REG_STR_RM_DO_LOG_RC_DISABLE 0x00000000 +#define NV_REG_STR_RM_DO_LOG_RC_DEFAULT NV_REG_STR_RM_DO_LOG_RC_DISABLE + +// Type Dword +// Encoding : 0 --> Skip Breakpoint +// nonzero --> Do Breakpoint +// Enable/Disable breakpoint on DEBUG resource manager on RC errors + +#define NV_REG_STR_RM_BREAK_ON_RC "RmBreakonRC" +#define NV_REG_STR_RM_BREAK_ON_RC_DISABLE 0x00000000 +#define NV_REG_STR_RM_BREAK_ON_RC_ENABLE 0x00000001 + +// Explicitly disable RmBreakOnRC for Retail and +// RMCFG_FEATURE_PLATFORM_GSP builds +#if ((defined(DEBUG) || defined(QA_BUILD)) && \ + (!defined(RMCFG_FEATURE_PLATFORM_GSP) || \ + (defined(RMCFG_FEATURE_PLATFORM_GSP) && !RMCFG_FEATURE_PLATFORM_GSP))) +#define NV_REG_STR_RM_BREAK_ON_RC_DEFAULT NV_REG_STR_RM_BREAK_ON_RC_ENABLE +#else +#define NV_REG_STR_RM_BREAK_ON_RC_DEFAULT NV_REG_STR_RM_BREAK_ON_RC_DISABLE +#endif + +// Volatile registry entries for previous driver version. +// Used to record driver unload/reload for debugging purposes. +#define NV_REG_STR_RM_RC_PREV_DRIVER_VERSION "RmRCPrevDriverVersion" +#define NV_REG_STR_RM_RC_PREV_DRIVER_BRANCH "RmRCPrevDriverBranch" +#define NV_REG_STR_RM_RC_PREV_DRIVER_CHANGELIST "RmRCPrevDriverChangelist" +#define NV_REG_STR_RM_RC_PREV_DRIVER_LOAD_COUNT "RmRCPrevDriverLoadCount" + +#define NV_REG_STR_USE_UNCACHED_PCI_MAPPINGS "UseUncachedPCIMappings" +// Type DWORD +// Encode -- Numeric Value +// Check to see if we are converting PCI mappings + +#define NV_REG_STR_RM_CE_USE_GEN4_MAPPING "RmCeUseGen4Mapping" +#define NV_REG_STR_RM_CE_USE_GEN4_MAPPING_TRUE 0x1 +#define NV_REG_STR_RM_CE_USE_GEN4_MAPPING_FALSE 0x0 +// Type Dword (Boolean) +// Encoding Numeric Value +// Use gen4 mapping that uses a HSHUB CE, if available +// Else, continue using FBHUB PCEs + +// Type Dword +// Enable PCE LCE auto config +#define NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG "RmCeEnableAutoConfig" +#define NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG_TRUE 0x1 +#define NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG_FALSE 0x0 + +// +// Type DWORD +// NVLINK control overrides. +// +// FORCE_DISABLE: Force disable NVLINK when the current default is ON (POR) +// +// TRAIN_AT_LOAD : Force train links during driver load +// +// FORCE_AUTOCONFIG : Force autoconfig training regardless of chiplib forced config links +// +// FORCE_ENABLE: Force enable NVLINK when the current default is OFF (bringup etc.) +// +// PARALLEL_TRAINING: Have the GPU endpoint parallelize link training +#define NV_REG_STR_RM_NVLINK_CONTROL "RMNvLinkControl" +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE 0:0 +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_FORCE_DISABLE_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD 1:1 +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_TRAIN_AT_LOAD_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN 2:2 +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_SKIP_TRAIN_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_RESERVED_0 7:3 +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG 8:8 +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_FORCE_AUTOCONFIG_NO) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE 31:31 +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_NO (0x00000000) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_YES (0x00000001) +#define NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_DEFAULT (NV_REG_STR_RM_NVLINK_CONTROL_FORCE_ENABLE_NO) + +// +// Type DWORD +// Knob to control NVLink MINION +// +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL "RMNvLinkMinionControl" +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE 3:0 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_FORCE_ON (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_FORCE_OFF (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG 7:4 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_PHY_CONFIG_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS 11:8 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_DL_STATUS_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE 15:12 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITNEGOTIATE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE 19:16 +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_INITOPTIMIZE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_MINION_CONTROL_RESERVED_0 31:20 + +// +// Type DWORD +// Knob to change NVLink link speed +// __LAST is same as highest supported speed +// NOTE: +// NVLINK_SPEED_CONTROL_SPEED_25G is exactly 25.00000Gbps on Pascal +// NVLINK_SPEED_CONTROL_SPEED_25G is exactly 25.78125Gbps on Volta and later +// NVLINK_SPEED_CONTROL_SPEED_2500000G is exactly 25.00000Gbps on all chips +// +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL "RMNvLinkSpeedControl" +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED 4:0 +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_16G (0x00000001) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_19_2G (0x00000002) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_20G (0x00000003) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_21G (0x00000004) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_22G (0x00000005) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_23G (0x00000006) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_24G (0x00000007) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_25G (0x00000008) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_25_78125G (0x00000008) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_25_00000G (0x00000009) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_16_14583G (0x0000000A) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_26_56250G (0x0000000B) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_27_34375G (0x0000000C) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_28_12500G (0x0000000D) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_32G (0x0000000E) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_40G (0x0000000F) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_50_00000G (0x00000010) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_53_12500G (0x00000011) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_FAULT (0x00000013) +#define NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED__LAST (0x00000013) + +// +// Type DWORD +// P2P Loopback over NVLINK will be enabled by default if RM +// detects loopback links. For P2P over PCIE, force disable +// P2P loopback over NVLINK using the following regkey +// +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK "RMNvLinkDisableP2PLoopback" +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK_TRUE (0x00000001) +#define NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK_FALSE (0x00000000) + +// +// Type DWORD +// Knob to control NVLink Link Power States +// +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL "RMNvLinkControlLinkPM" +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE 1:0 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_SINGLE_LANE_MODE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES 3:2 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_PROD_WRITES_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_RESERVED_0 5:4 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE 7:6 +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE_ENABLE (0x00000001) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_L2_MODE_DISABLE (0x00000002) +#define NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL_RESERVED 31:8 + +// +// Type DWORD +// Knob to force lane disable and shutdown during driver unload +// The regkey will also cause a toggle of link reset on driver load +// The regkey should not be used in S/R paths +// +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN "RMNvLinkForceLaneshutdown" +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_TRUE (0x00000001) +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_FALSE (0x00000000) +#define NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_DEFAULT (NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_FALSE) + +// +// Type DWORD +// For links that are SYSMEM, use this device type for force configs +// Choose the value from NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_* +// +#define NV_REG_STR_RM_NVLINK_SYSMEM_DEVICE_TYPE "RMNvLinkForcedSysmemDeviceType" + +// +// Type DWORD +// NVLink Disable Link Overrides +// The supplied value is ANDed with the set of discovered +// (not necessarily connected) links to remove unwanted links. +// A value of DISABLE_ALL removes/disables all links on this device. +// A value of DISABLE_NONE removes no links. +// If not present, this regkey has no effect. +// +#define NV_REG_STR_RM_NVLINK_DISABLE_LINKS "RMNvLinkDisableLinks" +#define NV_REG_STR_RM_NVLINK_DISABLE_LINKS_DISABLE_ALL (0xFFFFFFFF) +#define NV_REG_STR_RM_NVLINK_DISABLE_LINKS_DISABLE_NONE (0x00000000) + +// +// Type DWORD +// NVLINK Enable Links Overrides +// Note that this control does not force enable links, rather, it should be +// used to disable or mask off SW discovered links supported by the HW. +// +// NOTE: THIS REGKEY HAS BEEN DEPRECATED IN RM, since it does NOT work +// with NVLink auto-configuration. Instead, please move to using +// the new regkey NV_REG_STR_RM_NVLINK_DISABLE_LINKS +// +#define NV_REG_STR_RM_NVLINK_ENABLE "RMNvLinkEnable" +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX(i) (i):(i) +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX__SIZE 32 +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX_TRUE (0x00000001) +#define NV_REG_STR_RM_NVLINK_ENABLE_IDX_FALSE (0x00000000) + +// +// Type DWORD +// Knob to control NVLink Verbose Prints +// +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL "RMNvLinkverboseControlMask" +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT 0:0 +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT_DEFAULT (0x00000000) +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT_ON (0x00000001) +#define NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT_OFF (0x00000000) + +// Type DWORD: +#define NV_REG_STR_RM_PCIE_LINK_SPEED "RMPcieLinkSpeed" +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2 1:0 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN2_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3 3:2 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN3_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4 5:4 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN4_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5 7:6 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5_DEFAULT (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5_ENABLE (0x00000001) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_ALLOW_GEN5_DISABLE (0x00000002) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_LOCK_AT_LOAD 31:31 +#define NV_REG_STR_RM_PCIE_LINK_SPEED_LOCK_AT_LOAD_DISABLE (0x00000000) +#define NV_REG_STR_RM_PCIE_LINK_SPEED_LOCK_AT_LOAD_ENABLE (0x00000001) + +// +// Type DWORD +// +// +// This can be used as a per-device regkey or not, in which case the setting +// will apply to all devices. If this key is supplied as both a per-device and +// non-per-device regkey, the non-per-device option will apply first to all +// devices, and then the per-device key settings will apply, overriding the +// settings for the relevant devices. +// +// Encoding : 0 - Disable PCIe Relaxed Ordering TLP header bit setting. This is +// the default option. +// 1 - Try to enable PCIe Relaxed Ordering TLP header bit setting. +// Traverses the PCIe topology and only enables the header bit if +// it is safe to do so, with regard to all devices that could be +// affected. +// 2 - Forcibly enable PCIe Relaxed Ordering TLP header bit setting. +// Explicitly ignores the compatibility of the PCIe topology +// around the device or devices in question. +// +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING "RmSetPCIERelaxedOrdering" +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_DEFAULT 0x00000000 +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_DISABLE 0x00000000 +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_ENABLE 0x00000001 +#define NV_REG_STR_RM_SET_PCIE_TLP_RELAXED_ORDERING_FORCE_ENABLE 0x00000002 + +// Type DWORD +// This regkey overrides the default use case to optimize the GPU for. +// This regkey should not be used with the RMFermiBigPageSize regkey. +// This regkey should only be set by the RM. +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX "RMOptimizeComputeOrSparseTex" +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_DEFAULT (0x00000000) +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_COMPUTE (0x00000001) +#define NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_SPARSE_TEX (0x00000002) + +#define NV_REG_STR_CL_ASLM_CFG "AslmCfg" +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE 1:0 +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_DEFAULT 0x00000002 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET 5:4 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_HOT_RESET_DEFAULT 0x00000002 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE 9:8 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_DEFAULT 0x00000002 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE 11:10 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_NO 0x00000000 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_YES 0x00000001 +#define NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_DEFAULT 0x00000002 +// Type Dword +// Encoding Numeric Value +// Overrides chipset-based ASLM configurations. +// +// NV link upgrade: +// 0 - Do not use NV link upgrade for ASLM +// 1 - Use NV link upgrade for ASLM +// 2 - Do not override chipset-selected config (default) +// Hot reset: +// 0 - Do not use hot reset for ASLM +// 1 - Use hot reset for ASLM +// 2 - Do not override chipset-selected config (default) +// Fast link upgrade: +// 0 - Do not use fast link upgrade for ASLM +// 1 - Use fast link upgrade for ASLM +// 2 - Do not override chipset-selected config (default) +// Gen2 link width upgrade: +// 0 - Do not use Gen2 link upgrade for ASLM +// 1 - Use Gen2 link upgrade for ASLM +// 2 - Do not override chipset-selected config (default) + +#define NV_REG_STR_RM_DISABLE_BR03_FLOW_CONTROL "MB_DisableBr03FlowControl" +// Type DWORD +// Encoding 1 -> Do not program BR03 flow control registers +// 0 -> Setup BR03 flow control registers +// Determine whether we need to program BR03 flow control registers, in objcl.c + +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2 "RmForceEnableGen2" +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2_NO 0 +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2_YES 1 +#define NV_REG_STR_RM_FORCE_ENABLE_GEN2_DEFAULT NV_REG_STR_RM_FORCE_ENABLE_GEN2_NO +// Type DWORD: On some platform, Gen2 is disabled to work around system problems. +// This key is to force enabling Gen2 for testing or other purpose. It is +// ineffective on platforms not Gen2 capable. +// Encoding boolean: +// 0 - Do Nothing +// 1 - Force Enable Gen2 (to invalidate PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED) +// + +#endif // NVRM_REGISTRY_H diff --git a/src/nvidia/interface/rmapi/src/finn_rm_api.c b/src/nvidia/interface/rmapi/src/finn_rm_api.c new file mode 100644 index 000000000..434b6e6b0 --- /dev/null +++ b/src/nvidia/interface/rmapi/src/finn_rm_api.c @@ -0,0 +1,7771 @@ +#include "finn_rm_api.h" +#include "ctrl/ctrl0000/ctrl0000nvd.h" +#include "ctrl/ctrl0080/ctrl0080dma.h" +#include "ctrl/ctrl0080/ctrl0080fb.h" +#include "ctrl/ctrl0080/ctrl0080fifo.h" +#include "ctrl/ctrl0080/ctrl0080gpu.h" +#include "ctrl/ctrl0080/ctrl0080gr.h" +#include "ctrl/ctrl0080/ctrl0080host.h" +#include "ctrl/ctrl0080/ctrl0080msenc.h" +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl2080/ctrl2080i2c.h" +#include "ctrl/ctrl2080/ctrl2080nvd.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" +#include "ctrl/ctrl2080/ctrl2080rc.h" +#include "ctrl/ctrl402c.h" +#include "ctrl/ctrl83de/ctrl83dedebug.h" +#include "ctrl/ctrlb06f.h" + +#include +#if defined(NVRM) /* Kernel Mode */ +#include "nvport/nvport.h" +#else /* User Mode */ +#include +#include +#endif + + +// +// This file was generated with FINN, an NVIDIA coding tool. +// + +/* + * Serialization helper macros. These make field copying code more readable. + * Size is provided explicitly for cross-platform compatibility. + */ + +// Allocate memory. +#if defined(FINN_MALLOC) /* Use override from Makefile */ +#elif defined(NVRM) /* Kernel Mode */ +#define FINN_MALLOC(size) portMemAllocNonPaged(size) + +#else /* User Mode */ +#define FINN_MALLOC(size) malloc(size) +#endif + +// Free allocated memory. +#if defined(FINN_FREE) /* Use override from Makefile */ +#elif defined(NVRM) /* Kernel Mode */ +#define FINN_FREE(buf) portMemFree(buf) + +#else /* User Mode */ +#define FINN_FREE(buf) free(buf) +#endif + +// Set memory region to all zeroes. +#if defined(FINN_MEMZERO) /* Use override from Makefile */ +#elif defined(NVRM) /* Kernel Mode */ +#define FINN_MEMZERO(buf, size) portMemSet(buf, 0, size) + +#else /* User Mode */ +#define FINN_MEMZERO(buf, size) memset(buf, 0, size) +#endif + +// Copy nonoverlapping memory region. +#if defined(FINN_MEMCPY) /* Use override from Makefile */ +#elif defined(NVRM) /* Kernel Mode */ +#define FINN_MEMCPY(dst, src, size) portMemCopy(dst, size, src, size) + +#else /* User Mode */ +#define FINN_MEMCPY(dst, src, size) memcpy(dst, src, size) +#endif + +// Report an error. +#if defined(FINN_ERROR) /* Use override from Makefile */ +#elif defined(NVRM) /* Kernel Mode */ +#define FINN_ERROR(err) /* No-op */ + +#else /* User Mode */ +#define FINN_ERROR(err) /* No-op */ +#endif + +// Copy val into buf as type and increment buf by size. +#define FINN_COPY_TO_BUFFER(buf, val, type, size) \ + do { \ + *((type *)(buf)) = (val); \ + (buf) += (size); \ + } while(0) + +// Copy buf into var as type and increment buf by size. +#define FINN_COPY_FROM_BUFFER(var, buf, type, size) \ + do { \ + (var) = *((type *)(buf)); \ + (buf) += (size); \ + } while(0) + +// Copy size bytes from src to dst and increment dst by size. +#define FINN_MEMCPY_TO_BUFFER(dst, src, size) \ + do { \ + FINN_MEMCPY((dst), (src), (size)); \ + (dst) += (size); \ + } while(0) + +// Copy size bytes from src to dst and increment src by size. +#define FINN_MEMCPY_FROM_BUFFER(dst, src, size) \ + do { \ + FINN_MEMCPY((dst), (src), (size)); \ + (src) += (size); \ + } while(0) + +// Set ptr to buf as type and increment buf by size. +#define FINN_SET_PTR_TO_BUFFER(ptr, buf, type, size) \ + do { \ + (ptr) = (type)(NvUPtr)(buf); \ + (buf) += (size); \ + } while(0) + +// Align a byte pointer up to the 8-byte boundary. +#define FINN_ALIGN_UP_BYTE_PTR(ptr) \ + do { \ + (ptr) = (NvU8 *)(((NvUPtr)(ptr) + 7) &~ 7); \ + } while(0) \ + +NV_STATUS FinnRmApiSerializeInternal(NvU64 interface, NvU64 message, const char *src, char **dst, NvLength dst_size, NvBool seri_up); +NV_STATUS FinnRmApiDeserializeInternal(char * const *src, NvLength src_size, char *dst, NvLength dst_size, NvBool deser_up); + +static NV_STATUS FinnNv01RootNvdSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv01RootNvdDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_ROOT_NVD *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv01RootNvdGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv01RootNvdGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv01Device0DmaSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv01Device0DmaDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_DMA *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv01Device0DmaGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv01Device0DmaGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv01Device0FbSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv01Device0FbDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_FB *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv01Device0FbGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv01Device0FbGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv01Device0FifoSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv01Device0FifoDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_FIFO *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv01Device0FifoGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv01Device0FifoGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv01Device0GpuSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv01Device0GpuDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_GPU *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv01Device0GpuGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv01Device0GpuGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv01Device0GrSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv01Device0GrDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_GR *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv01Device0GrGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv01Device0GrGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv01Device0HostSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv01Device0HostDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_HOST *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv01Device0HostGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv01Device0HostGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv01Device0MsencSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv01Device0MsencDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_MSENC *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv01Device0MsencGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv01Device0MsencGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv20Subdevice0CeSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv20Subdevice0CeDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_CE *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv20Subdevice0CeGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv20Subdevice0CeGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv20Subdevice0GpuSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv20Subdevice0GpuDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_GPU *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv20Subdevice0GpuGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv20Subdevice0GpuGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv20Subdevice0I2cSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv20Subdevice0I2cDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_I2C *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv20Subdevice0I2cGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv20Subdevice0I2cGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv20Subdevice0NvdSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv20Subdevice0NvdDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_NVD *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv20Subdevice0NvdGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv20Subdevice0NvdGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv20Subdevice0PerfSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv20Subdevice0PerfDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_PERF *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv20Subdevice0PerfGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv20Subdevice0PerfGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv20Subdevice0RcSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv20Subdevice0RcDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_RC *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv20Subdevice0RcGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv20Subdevice0RcGetUnserializedSize(NvU64 message); +static NV_STATUS FinnNv40I2cI2cSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnNv40I2cI2cDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV40_I2C_I2C *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnNv40I2cI2cGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnNv40I2cI2cGetUnserializedSize(NvU64 message); +static NV_STATUS FinnGt200DebuggerDebugSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnGt200DebuggerDebugDeserialize(NvU8 **src, const NvU8 *src_max, FINN_GT200_DEBUGGER_DEBUG *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnGt200DebuggerDebugGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnGt200DebuggerDebugGetUnserializedSize(NvU64 message); +static NV_STATUS FinnMaxwellChannelGpfifoAGpfifoSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS FinnMaxwellChannelGpfifoAGpfifoDeserialize(NvU8 **src, const NvU8 *src_max, FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO *dst, NvLength dst_size, NvBool deser_up); +static NvU64 FinnMaxwellChannelGpfifoAGpfifoGetSerializedSize(NvU64 message, const NvP64 src); +static NvU64 FinnMaxwellChannelGpfifoAGpfifoGetUnserializedSize(NvU64 message); + +static NV_STATUS Nv0000CtrlNvdGetDumpParamsSerialize(const NV0000_CTRL_NVD_GET_DUMP_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0000CtrlNvdGetDumpParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0000_CTRL_NVD_GET_DUMP_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0000CtrlNvdGetDumpParamsGetSerializedSize(const NV0000_CTRL_NVD_GET_DUMP_PARAMS *src); +static NV_STATUS Nv0080CtrlDmaUpdatePde2PageTableParamsSerialize(const NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlDmaUpdatePde2PageTableParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlDmaUpdatePde2PageTableParamsGetSerializedSize(const NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *src); +static NV_STATUS Nv0080CtrlDmaUpdatePde2ParamsSerialize(const NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlDmaUpdatePde2ParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlDmaUpdatePde2ParamsGetSerializedSize(const NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *src); +static NV_STATUS Nv0080CtrlFbGetCapsParamsSerialize(const NV0080_CTRL_FB_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlFbGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_FB_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlFbGetCapsParamsGetSerializedSize(const NV0080_CTRL_FB_GET_CAPS_PARAMS *src); +static NV_STATUS Nv0080CtrlFifoGetCapsParamsSerialize(const NV0080_CTRL_FIFO_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlFifoGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlFifoGetCapsParamsGetSerializedSize(const NV0080_CTRL_FIFO_GET_CAPS_PARAMS *src); +static NV_STATUS Nv0080CtrlFifoChannelSerialize(const NV0080_CTRL_FIFO_CHANNEL *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlFifoChannelDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_FIFO_CHANNEL *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlFifoChannelGetSerializedSize(const NV0080_CTRL_FIFO_CHANNEL *src); +static NV_STATUS Nv0080CtrlFifoStartSelectedChannelsParamsSerialize(const NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlFifoStartSelectedChannelsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlFifoStartSelectedChannelsParamsGetSerializedSize(const NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *src); +static NV_STATUS Nv0080CtrlFifoGetChannellistParamsSerialize(const NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlFifoGetChannellistParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlFifoGetChannellistParamsGetSerializedSize(const NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *src); +static NV_STATUS Nv0080CtrlGpuGetClasslistParamsSerialize(const NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlGpuGetClasslistParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlGpuGetClasslistParamsGetSerializedSize(const NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *src); +static NV_STATUS Nv0080CtrlGrGetCapsParamsSerialize(const NV0080_CTRL_GR_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlGrGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_GR_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlGrGetCapsParamsGetSerializedSize(const NV0080_CTRL_GR_GET_CAPS_PARAMS *src); +static NV_STATUS Nv0080CtrlHostGetCapsParamsSerialize(const NV0080_CTRL_HOST_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlHostGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_HOST_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlHostGetCapsParamsGetSerializedSize(const NV0080_CTRL_HOST_GET_CAPS_PARAMS *src); +static NV_STATUS Nv0080CtrlMsencGetCapsParamsSerialize(const NV0080_CTRL_MSENC_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv0080CtrlMsencGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_MSENC_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv0080CtrlMsencGetCapsParamsGetSerializedSize(const NV0080_CTRL_MSENC_GET_CAPS_PARAMS *src); +static NV_STATUS Nv2080CtrlCeGetCapsParamsSerialize(const NV2080_CTRL_CE_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv2080CtrlCeGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_CE_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv2080CtrlCeGetCapsParamsGetSerializedSize(const NV2080_CTRL_CE_GET_CAPS_PARAMS *src); +static NV_STATUS Nv2080CtrlGpuGetEnginesParamsSerialize(const NV2080_CTRL_GPU_GET_ENGINES_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv2080CtrlGpuGetEnginesParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv2080CtrlGpuGetEnginesParamsGetSerializedSize(const NV2080_CTRL_GPU_GET_ENGINES_PARAMS *src); +static NV_STATUS Nv2080CtrlGpuGetEngineClasslistParamsSerialize(const NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv2080CtrlGpuGetEngineClasslistParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv2080CtrlGpuGetEngineClasslistParamsGetSerializedSize(const NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *src); +static NV_STATUS Nv2080CtrlGpumonSamplesSerialize(const NV2080_CTRL_GPUMON_SAMPLES *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up, NvU64 interface, NvU64 message); +static NV_STATUS Nv2080CtrlGpumonSamplesDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_GPUMON_SAMPLES *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv2080CtrlGpumonSamplesGetSerializedSize(const NV2080_CTRL_GPUMON_SAMPLES *src); +static NV_STATUS Nv2080CtrlI2cAccessParamsSerialize(const NV2080_CTRL_I2C_ACCESS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv2080CtrlI2cAccessParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_I2C_ACCESS_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv2080CtrlI2cAccessParamsGetSerializedSize(const NV2080_CTRL_I2C_ACCESS_PARAMS *src); +static NV_STATUS Nv2080CtrlNvdGetDumpParamsSerialize(const NV2080_CTRL_NVD_GET_DUMP_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv2080CtrlNvdGetDumpParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_NVD_GET_DUMP_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv2080CtrlNvdGetDumpParamsGetSerializedSize(const NV2080_CTRL_NVD_GET_DUMP_PARAMS *src); +static NV_STATUS Nv2080CtrlRcReadVirtualMemParamsSerialize(const NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv2080CtrlRcReadVirtualMemParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv2080CtrlRcReadVirtualMemParamsGetSerializedSize(const NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *src); +static NV_STATUS Nv402cCtrlI2cIndexedParamsSerialize(const NV402C_CTRL_I2C_INDEXED_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cIndexedParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_INDEXED_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cIndexedParamsGetSerializedSize(const NV402C_CTRL_I2C_INDEXED_PARAMS *src); +static NV_STATUS Nv402cCtrlI2cTransactionTypeValueToId(NvU8 **buf, const NvU8 *buf_max, NvU64 convert_size); +static NV_STATUS Nv402cCtrlI2cTransactionTypeIdtoValue(NvU8 **buf, const NvU8 *buf_max, NvU64 convert_size); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusQuickRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusQuickRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataSmbusQuickRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cByteRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cByteRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataI2cByteRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cBlockRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cBlockRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataI2cBlockRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusByteRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusByteRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataSmbusByteRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusWordRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusWordRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataSmbusWordRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cBufferRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cBufferRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataI2cBufferRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusBlockRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusBlockRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataSmbusBlockRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusProcessCallSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusProcessCallDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataSmbusProcessCallGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusBlockProcessCallSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusBlockProcessCallDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataSmbusBlockProcessCallGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusMultibyteRegisterBlockRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusMultibyteRegisterBlockRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataSmbusMultibyteRegisterBlockRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataReadEdidDdcSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionDataReadEdidDdcDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionDataReadEdidDdcGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *src); +static NV_STATUS Nv402cCtrlI2cTransactionDataSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up, NV402C_CTRL_I2C_TRANSACTION_TYPE transType); +static NV_STATUS Nv402cCtrlI2cTransactionDataDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA *dst, NvLength dst_size, NvBool deser_up, NV402C_CTRL_I2C_TRANSACTION_TYPE transType); +static NvU64 Nv402cCtrlI2cTransactionDataGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA *src, NV402C_CTRL_I2C_TRANSACTION_TYPE transType); +static NV_STATUS Nv402cCtrlI2cTransactionParamsSerialize(const NV402C_CTRL_I2C_TRANSACTION_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv402cCtrlI2cTransactionParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv402cCtrlI2cTransactionParamsGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_PARAMS *src); +static NV_STATUS Nv83deCtrlDebugReadMemoryParamsSerialize(const NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv83deCtrlDebugReadMemoryParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv83deCtrlDebugReadMemoryParamsGetSerializedSize(const NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *src); +static NV_STATUS Nv83deCtrlDebugWriteMemoryParamsSerialize(const NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nv83deCtrlDebugWriteMemoryParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nv83deCtrlDebugWriteMemoryParamsGetSerializedSize(const NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *src); +static NV_STATUS Nvb06fCtrlGetEngineCtxDataParamsSerialize(const NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nvb06fCtrlGetEngineCtxDataParamsDeserialize(NvU8 **src, const NvU8 *src_max, NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nvb06fCtrlGetEngineCtxDataParamsGetSerializedSize(const NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *src); +static NV_STATUS Nvb06fCtrlCmdMigrateEngineCtxDataFinnParamsSerialize(const NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up); +static NV_STATUS Nvb06fCtrlCmdMigrateEngineCtxDataFinnParamsDeserialize(NvU8 **src, const NvU8 *src_max, NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS *dst, NvLength dst_size, NvBool deser_up); +static NvU64 Nvb06fCtrlCmdMigrateEngineCtxDataFinnParamsGetSerializedSize(const NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS *src); + +NV_STATUS FinnRmApiSerializeUp(NvU64 interface, NvU64 message, const void *src, NvU8 **dst, NvLength dst_size) +{ + return FinnRmApiSerializeInternal(interface, message, (const char *) src, (char **) dst, dst_size / sizeof(NvU8), NV_TRUE); +} + +NV_STATUS FinnRmApiSerializeDown(NvU64 interface, NvU64 message, const void *src, NvU8 **dst, NvLength dst_size) +{ + return FinnRmApiSerializeInternal(interface, message, (const char *) src, (char **) dst, dst_size / sizeof(NvU8), NV_FALSE); +} + +NV_STATUS FinnRmApiDeserializeDown(NvU8 **src, NvLength src_size, void *dst, NvLength dst_size) +{ + return FinnRmApiDeserializeInternal((char **) src, src_size / sizeof(NvU8), (char *) dst, dst_size, NV_FALSE); +} + +NV_STATUS FinnRmApiDeserializeUp(NvU8 * const *src, NvLength src_size, void *dst, NvLength dst_size) +{ + return FinnRmApiDeserializeInternal((char **) src, src_size / sizeof(NvU8), (char *) dst, dst_size, NV_TRUE); +} + + +NV_STATUS FinnRmApiSerializeInternal(NvU64 interface, NvU64 message, const char *src, char **dst, NvLength dst_size, NvBool seri_up) +{ + const char *dst_max = *dst + dst_size; + + // Input validation + if (!src || !dst || !(*dst) || !dst_size) + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + + // Forward to interface-specific routine + switch (interface) + { + case FINN_INTERFACE_ID(FINN_NV01_ROOT_NVD): + return FinnNv01RootNvdSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_DMA): + return FinnNv01Device0DmaSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FB): + return FinnNv01Device0FbSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FIFO): + return FinnNv01Device0FifoSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GPU): + return FinnNv01Device0GpuSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GR): + return FinnNv01Device0GrSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_HOST): + return FinnNv01Device0HostSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_MSENC): + return FinnNv01Device0MsencSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_CE): + return FinnNv20Subdevice0CeSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_GPU): + return FinnNv20Subdevice0GpuSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_I2C): + return FinnNv20Subdevice0I2cSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_NVD): + return FinnNv20Subdevice0NvdSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_PERF): + return FinnNv20Subdevice0PerfSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_RC): + return FinnNv20Subdevice0RcSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_NV40_I2C_I2C): + return FinnNv40I2cI2cSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_GT200_DEBUGGER_DEBUG): + return FinnGt200DebuggerDebugSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + case FINN_INTERFACE_ID(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO): + return FinnMaxwellChannelGpfifoAGpfifoSerialize(message, src, (NvU8 **) dst, (const NvU8 *) dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + + +NV_STATUS FinnRmApiDeserializeInternal(char * const *src, NvLength src_size, char *dst, NvLength dst_size, NvBool deser_up) +{ + const char *src_max = *src + src_size; + + // Input validation + if (!src || !(*src) || !src_size || !dst || !dst_size) + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + + if (((NvU64*)(*src))[0] != FINN_SERIALIZATION_VERSION) + { + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + return NV_ERR_LIB_RM_VERSION_MISMATCH; + } + + if (((NvU64*)(*src))[1] > src_size || ((NvU64 *)(*src))[1] < (4 * sizeof(NvU64))) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Forward to interface-specific routine + switch (((NvU64 *)(*src))[2]) + { + case FINN_INTERFACE_ID(FINN_NV01_ROOT_NVD): + return FinnNv01RootNvdDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV01_ROOT_NVD *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_DMA): + return FinnNv01Device0DmaDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV01_DEVICE_0_DMA *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FB): + return FinnNv01Device0FbDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV01_DEVICE_0_FB *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FIFO): + return FinnNv01Device0FifoDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV01_DEVICE_0_FIFO *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GPU): + return FinnNv01Device0GpuDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV01_DEVICE_0_GPU *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GR): + return FinnNv01Device0GrDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV01_DEVICE_0_GR *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_HOST): + return FinnNv01Device0HostDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV01_DEVICE_0_HOST *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_MSENC): + return FinnNv01Device0MsencDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV01_DEVICE_0_MSENC *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_CE): + return FinnNv20Subdevice0CeDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV20_SUBDEVICE_0_CE *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_GPU): + return FinnNv20Subdevice0GpuDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV20_SUBDEVICE_0_GPU *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_I2C): + return FinnNv20Subdevice0I2cDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV20_SUBDEVICE_0_I2C *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_NVD): + return FinnNv20Subdevice0NvdDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV20_SUBDEVICE_0_NVD *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_PERF): + return FinnNv20Subdevice0PerfDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV20_SUBDEVICE_0_PERF *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_RC): + return FinnNv20Subdevice0RcDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV20_SUBDEVICE_0_RC *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_NV40_I2C_I2C): + return FinnNv40I2cI2cDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_NV40_I2C_I2C *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_GT200_DEBUGGER_DEBUG): + return FinnGt200DebuggerDebugDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_GT200_DEBUGGER_DEBUG *) dst, dst_size, deser_up); + case FINN_INTERFACE_ID(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO): + return FinnMaxwellChannelGpfifoAGpfifoDeserialize((NvU8 **) src, (const NvU8 *) src_max, (FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +NvU64 FinnRmApiGetSerializedSize(NvU64 interface, NvU64 message, const NvP64 src) +{ + // Input validation + if (!src) + return 0; + + // Forward to interface-specific routine + switch (interface) + { + case FINN_INTERFACE_ID(FINN_NV01_ROOT_NVD): + return FinnNv01RootNvdGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_DMA): + return FinnNv01Device0DmaGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FB): + return FinnNv01Device0FbGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FIFO): + return FinnNv01Device0FifoGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GPU): + return FinnNv01Device0GpuGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GR): + return FinnNv01Device0GrGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_HOST): + return FinnNv01Device0HostGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_MSENC): + return FinnNv01Device0MsencGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_CE): + return FinnNv20Subdevice0CeGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_GPU): + return FinnNv20Subdevice0GpuGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_I2C): + return FinnNv20Subdevice0I2cGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_NVD): + return FinnNv20Subdevice0NvdGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_PERF): + return FinnNv20Subdevice0PerfGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_RC): + return FinnNv20Subdevice0RcGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_NV40_I2C_I2C): + return FinnNv40I2cI2cGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_GT200_DEBUGGER_DEBUG): + return FinnGt200DebuggerDebugGetSerializedSize(message, src); + case FINN_INTERFACE_ID(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO): + return FinnMaxwellChannelGpfifoAGpfifoGetSerializedSize(message, src); + default: + return 0; + } +} + +NvU64 FinnRmApiGetUnserializedSize(NvU64 interface, NvU64 message) +{ + // Forward to interface-specific routine + switch (interface) + { + case FINN_INTERFACE_ID(FINN_NV01_ROOT_NVD): + return FinnNv01RootNvdGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_DMA): + return FinnNv01Device0DmaGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FB): + return FinnNv01Device0FbGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_FIFO): + return FinnNv01Device0FifoGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GPU): + return FinnNv01Device0GpuGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_GR): + return FinnNv01Device0GrGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_HOST): + return FinnNv01Device0HostGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV01_DEVICE_0_MSENC): + return FinnNv01Device0MsencGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_CE): + return FinnNv20Subdevice0CeGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_GPU): + return FinnNv20Subdevice0GpuGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_I2C): + return FinnNv20Subdevice0I2cGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_NVD): + return FinnNv20Subdevice0NvdGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_PERF): + return FinnNv20Subdevice0PerfGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_RC): + return FinnNv20Subdevice0RcGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_NV40_I2C_I2C): + return FinnNv40I2cI2cGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_GT200_DEBUGGER_DEBUG): + return FinnGt200DebuggerDebugGetUnserializedSize(message); + case FINN_INTERFACE_ID(FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO): + return FinnMaxwellChannelGpfifoAGpfifoGetUnserializedSize(message); + default: + return 0; + } +} + +static NV_STATUS FinnNv01RootNvdSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0000_CTRL_NVD_GET_DUMP_PARAMS): + return Nv0000CtrlNvdGetDumpParamsSerialize((const NV0000_CTRL_NVD_GET_DUMP_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv01RootNvdDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_ROOT_NVD *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV0000_CTRL_NVD_GET_DUMP_PARAMS): + return Nv0000CtrlNvdGetDumpParamsDeserialize(src, src_max, (NV0000_CTRL_NVD_GET_DUMP_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv01RootNvdGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0000_CTRL_NVD_GET_DUMP_PARAMS): + return Nv0000CtrlNvdGetDumpParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv01RootNvdGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0000_CTRL_NVD_GET_DUMP_PARAMS): + return sizeof(NV0000_CTRL_NVD_GET_DUMP_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv01Device0DmaSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS): + return Nv0080CtrlDmaUpdatePde2ParamsSerialize((const NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv01Device0DmaDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_DMA *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS): + return Nv0080CtrlDmaUpdatePde2ParamsDeserialize(src, src_max, (NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv01Device0DmaGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS): + return Nv0080CtrlDmaUpdatePde2ParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv01Device0DmaGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS): + return sizeof(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv01Device0FbSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_FB_GET_CAPS_PARAMS): + return Nv0080CtrlFbGetCapsParamsSerialize((const NV0080_CTRL_FB_GET_CAPS_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv01Device0FbDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_FB *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV0080_CTRL_FB_GET_CAPS_PARAMS): + return Nv0080CtrlFbGetCapsParamsDeserialize(src, src_max, (NV0080_CTRL_FB_GET_CAPS_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv01Device0FbGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_FB_GET_CAPS_PARAMS): + return Nv0080CtrlFbGetCapsParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv01Device0FbGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_FB_GET_CAPS_PARAMS): + return sizeof(NV0080_CTRL_FB_GET_CAPS_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv01Device0FifoSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CAPS_PARAMS): + return Nv0080CtrlFifoGetCapsParamsSerialize((const NV0080_CTRL_FIFO_GET_CAPS_PARAMS *) src, dst, dst_max, seri_up); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS): + return Nv0080CtrlFifoStartSelectedChannelsParamsSerialize((const NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *) src, dst, dst_max, seri_up); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS): + return Nv0080CtrlFifoGetChannellistParamsSerialize((const NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv01Device0FifoDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_FIFO *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CAPS_PARAMS): + return Nv0080CtrlFifoGetCapsParamsDeserialize(src, src_max, (NV0080_CTRL_FIFO_GET_CAPS_PARAMS *) dst, dst_size, deser_up); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS): + return Nv0080CtrlFifoStartSelectedChannelsParamsDeserialize(src, src_max, (NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *) dst, dst_size, deser_up); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS): + return Nv0080CtrlFifoGetChannellistParamsDeserialize(src, src_max, (NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv01Device0FifoGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CAPS_PARAMS): + return Nv0080CtrlFifoGetCapsParamsGetSerializedSize(NvP64_VALUE(src)); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS): + return Nv0080CtrlFifoStartSelectedChannelsParamsGetSerializedSize(NvP64_VALUE(src)); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS): + return Nv0080CtrlFifoGetChannellistParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv01Device0FifoGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CAPS_PARAMS): + return sizeof(NV0080_CTRL_FIFO_GET_CAPS_PARAMS); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS): + return sizeof(NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS); + case FINN_MESSAGE_ID(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS): + return sizeof(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv01Device0GpuSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS): + return Nv0080CtrlGpuGetClasslistParamsSerialize((const NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv01Device0GpuDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_GPU *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS): + return Nv0080CtrlGpuGetClasslistParamsDeserialize(src, src_max, (NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv01Device0GpuGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS): + return Nv0080CtrlGpuGetClasslistParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv01Device0GpuGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS): + return sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv01Device0GrSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_GR_GET_CAPS_PARAMS): + return Nv0080CtrlGrGetCapsParamsSerialize((const NV0080_CTRL_GR_GET_CAPS_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv01Device0GrDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_GR *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV0080_CTRL_GR_GET_CAPS_PARAMS): + return Nv0080CtrlGrGetCapsParamsDeserialize(src, src_max, (NV0080_CTRL_GR_GET_CAPS_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv01Device0GrGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_GR_GET_CAPS_PARAMS): + return Nv0080CtrlGrGetCapsParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv01Device0GrGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_GR_GET_CAPS_PARAMS): + return sizeof(NV0080_CTRL_GR_GET_CAPS_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv01Device0HostSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_HOST_GET_CAPS_PARAMS): + return Nv0080CtrlHostGetCapsParamsSerialize((const NV0080_CTRL_HOST_GET_CAPS_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv01Device0HostDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_HOST *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV0080_CTRL_HOST_GET_CAPS_PARAMS): + return Nv0080CtrlHostGetCapsParamsDeserialize(src, src_max, (NV0080_CTRL_HOST_GET_CAPS_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv01Device0HostGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_HOST_GET_CAPS_PARAMS): + return Nv0080CtrlHostGetCapsParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv01Device0HostGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_HOST_GET_CAPS_PARAMS): + return sizeof(NV0080_CTRL_HOST_GET_CAPS_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv01Device0MsencSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_MSENC_GET_CAPS_PARAMS): + return Nv0080CtrlMsencGetCapsParamsSerialize((const NV0080_CTRL_MSENC_GET_CAPS_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv01Device0MsencDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV01_DEVICE_0_MSENC *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV0080_CTRL_MSENC_GET_CAPS_PARAMS): + return Nv0080CtrlMsencGetCapsParamsDeserialize(src, src_max, (NV0080_CTRL_MSENC_GET_CAPS_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv01Device0MsencGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_MSENC_GET_CAPS_PARAMS): + return Nv0080CtrlMsencGetCapsParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv01Device0MsencGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV0080_CTRL_MSENC_GET_CAPS_PARAMS): + return sizeof(NV0080_CTRL_MSENC_GET_CAPS_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv20Subdevice0CeSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_CE_GET_CAPS_PARAMS): + return Nv2080CtrlCeGetCapsParamsSerialize((const NV2080_CTRL_CE_GET_CAPS_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv20Subdevice0CeDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_CE *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV2080_CTRL_CE_GET_CAPS_PARAMS): + return Nv2080CtrlCeGetCapsParamsDeserialize(src, src_max, (NV2080_CTRL_CE_GET_CAPS_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv20Subdevice0CeGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_CE_GET_CAPS_PARAMS): + return Nv2080CtrlCeGetCapsParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv20Subdevice0CeGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_CE_GET_CAPS_PARAMS): + return sizeof(NV2080_CTRL_CE_GET_CAPS_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv20Subdevice0GpuSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINES_PARAMS): + return Nv2080CtrlGpuGetEnginesParamsSerialize((const NV2080_CTRL_GPU_GET_ENGINES_PARAMS *) src, dst, dst_max, seri_up); + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS): + return Nv2080CtrlGpuGetEngineClasslistParamsSerialize((const NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv20Subdevice0GpuDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_GPU *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINES_PARAMS): + return Nv2080CtrlGpuGetEnginesParamsDeserialize(src, src_max, (NV2080_CTRL_GPU_GET_ENGINES_PARAMS *) dst, dst_size, deser_up); + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS): + return Nv2080CtrlGpuGetEngineClasslistParamsDeserialize(src, src_max, (NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv20Subdevice0GpuGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINES_PARAMS): + return Nv2080CtrlGpuGetEnginesParamsGetSerializedSize(NvP64_VALUE(src)); + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS): + return Nv2080CtrlGpuGetEngineClasslistParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv20Subdevice0GpuGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINES_PARAMS): + return sizeof(NV2080_CTRL_GPU_GET_ENGINES_PARAMS); + case FINN_MESSAGE_ID(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS): + return sizeof(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv20Subdevice0I2cSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_I2C_ACCESS_PARAMS): + return Nv2080CtrlI2cAccessParamsSerialize((const NV2080_CTRL_I2C_ACCESS_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv20Subdevice0I2cDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_I2C *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV2080_CTRL_I2C_ACCESS_PARAMS): + return Nv2080CtrlI2cAccessParamsDeserialize(src, src_max, (NV2080_CTRL_I2C_ACCESS_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv20Subdevice0I2cGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_I2C_ACCESS_PARAMS): + return Nv2080CtrlI2cAccessParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv20Subdevice0I2cGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_I2C_ACCESS_PARAMS): + return sizeof(NV2080_CTRL_I2C_ACCESS_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv20Subdevice0NvdSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_NVD_GET_DUMP_PARAMS): + return Nv2080CtrlNvdGetDumpParamsSerialize((const NV2080_CTRL_NVD_GET_DUMP_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv20Subdevice0NvdDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_NVD *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV2080_CTRL_NVD_GET_DUMP_PARAMS): + return Nv2080CtrlNvdGetDumpParamsDeserialize(src, src_max, (NV2080_CTRL_NVD_GET_DUMP_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv20Subdevice0NvdGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_NVD_GET_DUMP_PARAMS): + return Nv2080CtrlNvdGetDumpParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv20Subdevice0NvdGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_NVD_GET_DUMP_PARAMS): + return sizeof(NV2080_CTRL_NVD_GET_DUMP_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv20Subdevice0PerfSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM): + return Nv2080CtrlGpumonSamplesSerialize((const NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM *) src, dst, dst_max, seri_up, FINN_INTERFACE_ID(FINN_NV20_SUBDEVICE_0_PERF), FINN_MESSAGE_ID(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM)); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv20Subdevice0PerfDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_PERF *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM): + return Nv2080CtrlGpumonSamplesDeserialize(src, src_max, (NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv20Subdevice0PerfGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM): + return Nv2080CtrlGpumonSamplesGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv20Subdevice0PerfGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM): + return sizeof(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_PARAM); + default: + return 0; + } +} + +static NV_STATUS FinnNv20Subdevice0RcSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS): + return Nv2080CtrlRcReadVirtualMemParamsSerialize((const NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv20Subdevice0RcDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV20_SUBDEVICE_0_RC *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS): + return Nv2080CtrlRcReadVirtualMemParamsDeserialize(src, src_max, (NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv20Subdevice0RcGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS): + return Nv2080CtrlRcReadVirtualMemParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv20Subdevice0RcGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS): + return sizeof(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnNv40I2cI2cSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_INDEXED_PARAMS): + return Nv402cCtrlI2cIndexedParamsSerialize((const NV402C_CTRL_I2C_INDEXED_PARAMS *) src, dst, dst_max, seri_up); + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_TRANSACTION_PARAMS): + return Nv402cCtrlI2cTransactionParamsSerialize((const NV402C_CTRL_I2C_TRANSACTION_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnNv40I2cI2cDeserialize(NvU8 **src, const NvU8 *src_max, FINN_NV40_I2C_I2C *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_INDEXED_PARAMS): + return Nv402cCtrlI2cIndexedParamsDeserialize(src, src_max, (NV402C_CTRL_I2C_INDEXED_PARAMS *) dst, dst_size, deser_up); + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_TRANSACTION_PARAMS): + return Nv402cCtrlI2cTransactionParamsDeserialize(src, src_max, (NV402C_CTRL_I2C_TRANSACTION_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnNv40I2cI2cGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_INDEXED_PARAMS): + return Nv402cCtrlI2cIndexedParamsGetSerializedSize(NvP64_VALUE(src)); + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_TRANSACTION_PARAMS): + return Nv402cCtrlI2cTransactionParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnNv40I2cI2cGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_INDEXED_PARAMS): + return sizeof(NV402C_CTRL_I2C_INDEXED_PARAMS); + case FINN_MESSAGE_ID(NV402C_CTRL_I2C_TRANSACTION_PARAMS): + return sizeof(NV402C_CTRL_I2C_TRANSACTION_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnGt200DebuggerDebugSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS): + return Nv83deCtrlDebugReadMemoryParamsSerialize((const NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *) src, dst, dst_max, seri_up); + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS): + return Nv83deCtrlDebugWriteMemoryParamsSerialize((const NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnGt200DebuggerDebugDeserialize(NvU8 **src, const NvU8 *src_max, FINN_GT200_DEBUGGER_DEBUG *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS): + return Nv83deCtrlDebugReadMemoryParamsDeserialize(src, src_max, (NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *) dst, dst_size, deser_up); + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS): + return Nv83deCtrlDebugWriteMemoryParamsDeserialize(src, src_max, (NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnGt200DebuggerDebugGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS): + return Nv83deCtrlDebugReadMemoryParamsGetSerializedSize(NvP64_VALUE(src)); + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS): + return Nv83deCtrlDebugWriteMemoryParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnGt200DebuggerDebugGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS): + return sizeof(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS); + case FINN_MESSAGE_ID(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS): + return sizeof(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS); + default: + return 0; + } +} + +static NV_STATUS FinnMaxwellChannelGpfifoAGpfifoSerialize(NvU64 message, const char *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS): + return Nvb06fCtrlGetEngineCtxDataParamsSerialize((const NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *) src, dst, dst_max, seri_up); + case FINN_MESSAGE_ID(NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS): + return Nvb06fCtrlCmdMigrateEngineCtxDataFinnParamsSerialize((const NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS *) src, dst, dst_max, seri_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NV_STATUS FinnMaxwellChannelGpfifoAGpfifoDeserialize(NvU8 **src, const NvU8 *src_max, FINN_MAXWELL_CHANNEL_GPFIFO_A_GPFIFO *dst, NvLength dst_size, NvBool deser_up) +{ + // Forward to message-specific routine + switch (((NvU64 *)(*src))[3]) + { + case FINN_MESSAGE_ID(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS): + return Nvb06fCtrlGetEngineCtxDataParamsDeserialize(src, src_max, (NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *) dst, dst_size, deser_up); + case FINN_MESSAGE_ID(NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS): + return Nvb06fCtrlCmdMigrateEngineCtxDataFinnParamsDeserialize(src, src_max, (NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS *) dst, dst_size, deser_up); + default: + { + FINN_ERROR(NV_ERR_NOT_SUPPORTED); + return NV_ERR_NOT_SUPPORTED; + } + } +} + +static NvU64 FinnMaxwellChannelGpfifoAGpfifoGetSerializedSize(NvU64 message, const NvP64 src) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS): + return Nvb06fCtrlGetEngineCtxDataParamsGetSerializedSize(NvP64_VALUE(src)); + case FINN_MESSAGE_ID(NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS): + return Nvb06fCtrlCmdMigrateEngineCtxDataFinnParamsGetSerializedSize(NvP64_VALUE(src)); + default: + return 0; + } +} + +static NvU64 FinnMaxwellChannelGpfifoAGpfifoGetUnserializedSize(NvU64 message) +{ + // Forward to message-specific routine + switch (message) + { + case FINN_MESSAGE_ID(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS): + return sizeof(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS); + case FINN_MESSAGE_ID(NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS): + return sizeof(NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS); + default: + return 0; + } +} + +static NV_STATUS Nv0000CtrlNvdGetDumpParamsSerialize(const NV0000_CTRL_NVD_GET_DUMP_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0000CtrlNvdGetDumpParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x6; // Interface ID + header->message = 0x2; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->component, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->size, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->size < 0 || src->size > NV0000_CTRL_NVD_MAX_DUMP_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pBuffer); + + if (src->pBuffer) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pBuffer, (src->size)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0000CtrlNvdGetDumpParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0000_CTRL_NVD_GET_DUMP_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0000_CTRL_NVD_GET_DUMP_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->component, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->size, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->size < 0 || dst->size > NV0000_CTRL_NVD_MAX_DUMP_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pBuffer, pos, (dst->size)); + else + { + // Validate variable length buffer length + if (pos + ((dst->size)) > src_max || + pos + ((dst->size)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pBuffer, pos, NvP64, (dst->size)); + } + } + else + { + if (!deser_up) + dst->pBuffer = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0000CtrlNvdGetDumpParamsGetSerializedSize(const NV0000_CTRL_NVD_GET_DUMP_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pBuffer) + { + size += (src->size); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv0080CtrlDmaUpdatePde2PageTableParamsSerialize(const NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlDmaUpdatePde2PageTableParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->physAddr, NvU64, 8); + + FINN_COPY_TO_BUFFER(pos, src->numEntries, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->aperture, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlDmaUpdatePde2PageTableParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->physAddr, pos, NvU64, 8); + + FINN_COPY_FROM_BUFFER(dst->numEntries, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->aperture, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlDmaUpdatePde2PageTableParamsGetSerializedSize(const NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *src) +{ + // Suppress used-variable warnings. + (void) src; + + // This struct is static and its size is known at compile time. + return 56; +} + +static NV_STATUS Nv0080CtrlDmaUpdatePde2ParamsSerialize(const NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlDmaUpdatePde2ParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x8018; // Interface ID + header->message = 0xf; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x3f; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->pdeIndex, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->flags, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->hVASpace, NvHandle, 4); + + FINN_COPY_TO_BUFFER(pos, src->subDeviceId, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Bounded nested fields + for (NvU64 i = 0; i < (NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE); ++i) + { + status = Nv0080CtrlDmaUpdatePde2PageTableParamsSerialize(&src->ptParams[i], &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + } + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pPdeBuffer); + + if (src->pPdeBuffer) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pPdeBuffer, 8); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlDmaUpdatePde2ParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3f) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->pdeIndex, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->flags, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->hVASpace, pos, NvHandle, 4); + + FINN_COPY_FROM_BUFFER(dst->subDeviceId, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Bounded nested fields + for (NvU64 i = 0; i < (NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE); ++i) + { + status = Nv0080CtrlDmaUpdatePde2PageTableParamsDeserialize(&pos, src_max, &dst->ptParams[i], dst_size, deser_up); + if (status != NV_OK) + goto exit; + } + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pPdeBuffer, pos, 8); + else + { + FINN_SET_PTR_TO_BUFFER(dst->pPdeBuffer, pos, NvP64, 8); + } + } + else + { + if (!deser_up) + dst->pPdeBuffer = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlDmaUpdatePde2ParamsGetSerializedSize(const NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 168; + + // Add sizes that require runtime calculation + size += 112; + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pPdeBuffer) + { + size += 8; + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv0080CtrlFbGetCapsParamsSerialize(const NV0080_CTRL_FB_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlFbGetCapsParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x8013; // Interface ID + header->message = 0x1; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x3; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->capsTblSize, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->capsTblSize < 0 || src->capsTblSize > NV0080_CTRL_FB_CAPS_TBL_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->capsTbl); + + if (src->capsTbl) + { + FINN_MEMCPY_TO_BUFFER(pos, src->capsTbl, (src->capsTblSize)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlFbGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_FB_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_FB_GET_CAPS_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->capsTblSize, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->capsTblSize < 0 || dst->capsTblSize > NV0080_CTRL_FB_CAPS_TBL_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->capsTbl, pos, (dst->capsTblSize)); + else + { + // Validate variable length buffer length + if (pos + ((dst->capsTblSize)) > src_max || + pos + ((dst->capsTblSize)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->capsTbl, pos, NvP64, (dst->capsTblSize)); + } + } + else + { + if (!deser_up) + dst->capsTbl = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlFbGetCapsParamsGetSerializedSize(const NV0080_CTRL_FB_GET_CAPS_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->capsTbl) + { + size += (src->capsTblSize); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv0080CtrlFifoGetCapsParamsSerialize(const NV0080_CTRL_FIFO_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlFifoGetCapsParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x8017; // Interface ID + header->message = 0x1; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x3; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->capsTblSize, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->capsTblSize < 0 || src->capsTblSize > NV0080_CTRL_FIFO_CAPS_TBL_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->capsTbl); + + if (src->capsTbl) + { + FINN_MEMCPY_TO_BUFFER(pos, src->capsTbl, (src->capsTblSize)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlFifoGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_FIFO_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_FIFO_GET_CAPS_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->capsTblSize, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->capsTblSize < 0 || dst->capsTblSize > NV0080_CTRL_FIFO_CAPS_TBL_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->capsTbl, pos, (dst->capsTblSize)); + else + { + // Validate variable length buffer length + if (pos + ((dst->capsTblSize)) > src_max || + pos + ((dst->capsTblSize)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->capsTbl, pos, NvP64, (dst->capsTblSize)); + } + } + else + { + if (!deser_up) + dst->capsTbl = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlFifoGetCapsParamsGetSerializedSize(const NV0080_CTRL_FIFO_GET_CAPS_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->capsTbl) + { + size += (src->capsTblSize); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv0080CtrlFifoChannelSerialize(const NV0080_CTRL_FIFO_CHANNEL *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlFifoChannelGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x1; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->hChannel, NvHandle, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlFifoChannelDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_FIFO_CHANNEL *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_FIFO_CHANNEL) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x1) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->hChannel, pos, NvHandle, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlFifoChannelGetSerializedSize(const NV0080_CTRL_FIFO_CHANNEL *src) +{ + // Suppress used-variable warnings. + (void) src; + + // This struct is static and its size is known at compile time. + return 48; +} + +static NV_STATUS Nv0080CtrlFifoStartSelectedChannelsParamsSerialize(const NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlFifoStartSelectedChannelsParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x8017; // Interface ID + header->message = 0x5; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->fifoStartChannelListSize, NvU32, 4); + + FINN_MEMCPY_TO_BUFFER(pos, src->channelHandle, 32); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->fifoStartChannelList); + + if (src->fifoStartChannelList) + { + // Align + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + for (NvU64 i = 0; i < (src->fifoStartChannelListSize); ++i) + { + status = Nv0080CtrlFifoChannelSerialize(&(((const NV0080_CTRL_FIFO_CHANNEL *) (NvP64_VALUE(src->fifoStartChannelList)))[i]), &pos, dst_max, seri_up); + + if (status != NV_OK) + goto exit; + } + + // Free memory that was allocated during downward deserialization. + if (seri_up && src->fifoStartChannelList) + FINN_FREE(src->fifoStartChannelList); + } + + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlFifoStartSelectedChannelsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->fifoStartChannelListSize, pos, NvU32, 4); + + FINN_MEMCPY_FROM_BUFFER(dst->channelHandle, pos, 32); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + // Align + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Caller must set up the pointers when deserializing down. + if (!deser_up) + { + // Variable element size + NvU64 element_size = Nv0080CtrlFifoChannelGetSerializedSize(NvP64_VALUE(dst->fifoStartChannelList)); + + // Validate variable length buffer length + if (element_size * (dst->fifoStartChannelListSize) < element_size || + pos + (element_size * (dst->fifoStartChannelListSize)) > src_max || + pos + (element_size * (dst->fifoStartChannelListSize)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // FINN-generated code allocates memory and sets pointer when deserializing down. + dst->fifoStartChannelList = FINN_MALLOC((sizeof(NV0080_CTRL_FIFO_CHANNEL) * (dst->fifoStartChannelListSize))); + if (!dst->fifoStartChannelList) + { + status = NV_ERR_NO_MEMORY; + FINN_ERROR(NV_ERR_NO_MEMORY); + goto exit; + } + + FINN_MEMZERO(dst->fifoStartChannelList, (sizeof(NV0080_CTRL_FIFO_CHANNEL) * (dst->fifoStartChannelListSize))); + + } + + // Otherwise the pointer must be provided. + else if (!dst->fifoStartChannelList) + { + status = NV_ERR_INVALID_POINTER; + FINN_ERROR(NV_ERR_INVALID_POINTER); + goto exit; + } + + for (NvU64 i = 0; i < (dst->fifoStartChannelListSize); ++i) + { + // Deserialize each element. + status = Nv0080CtrlFifoChannelDeserialize(&pos, src_max, &(((NV0080_CTRL_FIFO_CHANNEL *) (NvP64_VALUE(dst->fifoStartChannelList)))[i]), sizeof(NV0080_CTRL_FIFO_CHANNEL), deser_up); + if (status != NV_OK) + goto exit; + } + } + + // Data is not present, set to NULL. + else + { + if (!deser_up) + dst->fifoStartChannelList = NULL; + } + + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlFifoStartSelectedChannelsParamsGetSerializedSize(const NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 80; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->fifoStartChannelList) + { + // Alignment + size = (size + 7) & ~7; + size += Nv0080CtrlFifoChannelGetSerializedSize((const NV0080_CTRL_FIFO_CHANNEL *) src->fifoStartChannelList) * ((src->fifoStartChannelListSize)); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv0080CtrlFifoGetChannellistParamsSerialize(const NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlFifoGetChannellistParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x8017; // Interface ID + header->message = 0xd; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->numChannels, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pChannelHandleList); + + if (src->pChannelHandleList) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pChannelHandleList, (src->numChannels) * 4); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Set data presence byte + *(pos++) = !!(src->pChannelList); + + if (src->pChannelList) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pChannelList, (src->numChannels) * 4); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlFifoGetChannellistParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->numChannels, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pChannelHandleList, pos, (dst->numChannels) * 4); + else + { + // Validate variable length buffer length + if ((dst->numChannels) * 4 < 4 || + pos + ((dst->numChannels) * 4) > src_max || + pos + ((dst->numChannels) * 4) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pChannelHandleList, pos, NvP64, (dst->numChannels) * 4); + } + } + else + { + if (!deser_up) + dst->pChannelHandleList = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pChannelList, pos, (dst->numChannels) * 4); + else + { + // Validate variable length buffer length + if ((dst->numChannels) * 4 < 4 || + pos + ((dst->numChannels) * 4) > src_max || + pos + ((dst->numChannels) * 4) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pChannelList, pos, NvP64, (dst->numChannels) * 4); + } + } + else + { + if (!deser_up) + dst->pChannelList = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlFifoGetChannellistParamsGetSerializedSize(const NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pChannelHandleList) + { + size += (src->numChannels) * 4; + } + + // Add padding + size = (size + 7) &~ 7; + + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pChannelList) + { + size += (src->numChannels) * 4; + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv0080CtrlGpuGetClasslistParamsSerialize(const NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlGpuGetClasslistParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x8002; // Interface ID + header->message = 0x1; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x3; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->numClasses, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->classList); + + if (src->classList) + { + FINN_MEMCPY_TO_BUFFER(pos, src->classList, (src->numClasses) * 4); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlGpuGetClasslistParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->numClasses, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->classList, pos, (dst->numClasses) * 4); + else + { + // Validate variable length buffer length + if ((dst->numClasses) * 4 < 4 || + pos + ((dst->numClasses) * 4) > src_max || + pos + ((dst->numClasses) * 4) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->classList, pos, NvP64, (dst->numClasses) * 4); + } + } + else + { + if (!deser_up) + dst->classList = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlGpuGetClasslistParamsGetSerializedSize(const NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->classList) + { + size += (src->numClasses) * 4; + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv0080CtrlGrGetCapsParamsSerialize(const NV0080_CTRL_GR_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlGrGetCapsParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x8011; // Interface ID + header->message = 0x2; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x3; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->capsTblSize, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->capsTbl); + + if (src->capsTbl) + { + FINN_MEMCPY_TO_BUFFER(pos, src->capsTbl, (src->capsTblSize)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlGrGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_GR_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_GR_GET_CAPS_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->capsTblSize, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->capsTbl, pos, (dst->capsTblSize)); + else + { + // Validate variable length buffer length + if (pos + ((dst->capsTblSize)) > src_max || + pos + ((dst->capsTblSize)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->capsTbl, pos, NvP64, (dst->capsTblSize)); + } + } + else + { + if (!deser_up) + dst->capsTbl = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlGrGetCapsParamsGetSerializedSize(const NV0080_CTRL_GR_GET_CAPS_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->capsTbl) + { + size += (src->capsTblSize); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv0080CtrlHostGetCapsParamsSerialize(const NV0080_CTRL_HOST_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlHostGetCapsParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x8014; // Interface ID + header->message = 0x1; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x3; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->capsTblSize, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->capsTblSize < 0 || src->capsTblSize > NV0080_CTRL_HOST_CAPS_TBL_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->capsTbl); + + if (src->capsTbl) + { + FINN_MEMCPY_TO_BUFFER(pos, src->capsTbl, (src->capsTblSize)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlHostGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_HOST_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_HOST_GET_CAPS_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->capsTblSize, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->capsTblSize < 0 || dst->capsTblSize > NV0080_CTRL_HOST_CAPS_TBL_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->capsTbl, pos, (dst->capsTblSize)); + else + { + // Validate variable length buffer length + if (pos + ((dst->capsTblSize)) > src_max || + pos + ((dst->capsTblSize)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->capsTbl, pos, NvP64, (dst->capsTblSize)); + } + } + else + { + if (!deser_up) + dst->capsTbl = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlHostGetCapsParamsGetSerializedSize(const NV0080_CTRL_HOST_GET_CAPS_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->capsTbl) + { + size += (src->capsTblSize); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv0080CtrlMsencGetCapsParamsSerialize(const NV0080_CTRL_MSENC_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv0080CtrlMsencGetCapsParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x801b; // Interface ID + header->message = 0x1; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x3; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->capsTblSize, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->capsTblSize < 0 || src->capsTblSize > NV0080_CTRL_MSENC_CAPS_TBL_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->capsTbl); + + if (src->capsTbl) + { + FINN_MEMCPY_TO_BUFFER(pos, src->capsTbl, (src->capsTblSize)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv0080CtrlMsencGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV0080_CTRL_MSENC_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV0080_CTRL_MSENC_GET_CAPS_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->capsTblSize, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->capsTblSize < 0 || dst->capsTblSize > NV0080_CTRL_MSENC_CAPS_TBL_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->capsTbl, pos, (dst->capsTblSize)); + else + { + // Validate variable length buffer length + if (pos + ((dst->capsTblSize)) > src_max || + pos + ((dst->capsTblSize)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->capsTbl, pos, NvP64, (dst->capsTblSize)); + } + } + else + { + if (!deser_up) + dst->capsTbl = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv0080CtrlMsencGetCapsParamsGetSerializedSize(const NV0080_CTRL_MSENC_GET_CAPS_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->capsTbl) + { + size += (src->capsTblSize); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv2080CtrlCeGetCapsParamsSerialize(const NV2080_CTRL_CE_GET_CAPS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv2080CtrlCeGetCapsParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x20802a; // Interface ID + header->message = 0x1; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->ceEngineType, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->capsTblSize, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->capsTblSize < 0 || src->capsTblSize > NV2080_CTRL_CE_CAPS_TBL_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->capsTbl); + + if (src->capsTbl) + { + FINN_MEMCPY_TO_BUFFER(pos, src->capsTbl, (src->capsTblSize)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv2080CtrlCeGetCapsParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_CE_GET_CAPS_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV2080_CTRL_CE_GET_CAPS_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->ceEngineType, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->capsTblSize, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->capsTblSize < 0 || dst->capsTblSize > NV2080_CTRL_CE_CAPS_TBL_SIZE) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->capsTbl, pos, (dst->capsTblSize)); + else + { + // Validate variable length buffer length + if (pos + ((dst->capsTblSize)) > src_max || + pos + ((dst->capsTblSize)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->capsTbl, pos, NvP64, (dst->capsTblSize)); + } + } + else + { + if (!deser_up) + dst->capsTbl = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv2080CtrlCeGetCapsParamsGetSerializedSize(const NV2080_CTRL_CE_GET_CAPS_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->capsTbl) + { + size += (src->capsTblSize); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv2080CtrlGpuGetEnginesParamsSerialize(const NV2080_CTRL_GPU_GET_ENGINES_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv2080CtrlGpuGetEnginesParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x208001; // Interface ID + header->message = 0x23; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x3; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->engineCount, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->engineList); + + if (src->engineList) + { + FINN_MEMCPY_TO_BUFFER(pos, src->engineList, (src->engineCount) * 4); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv2080CtrlGpuGetEnginesParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_GPU_GET_ENGINES_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV2080_CTRL_GPU_GET_ENGINES_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->engineCount, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->engineList, pos, (dst->engineCount) * 4); + else + { + // Validate variable length buffer length + if ((dst->engineCount) * 4 < 4 || + pos + ((dst->engineCount) * 4) > src_max || + pos + ((dst->engineCount) * 4) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->engineList, pos, NvP64, (dst->engineCount) * 4); + } + } + else + { + if (!deser_up) + dst->engineList = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv2080CtrlGpuGetEnginesParamsGetSerializedSize(const NV2080_CTRL_GPU_GET_ENGINES_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->engineList) + { + size += (src->engineCount) * 4; + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv2080CtrlGpuGetEngineClasslistParamsSerialize(const NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv2080CtrlGpuGetEngineClasslistParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x208001; // Interface ID + header->message = 0x24; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->engineType, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->numClasses, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->classList); + + if (src->classList) + { + FINN_MEMCPY_TO_BUFFER(pos, src->classList, (src->numClasses) * 4); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv2080CtrlGpuGetEngineClasslistParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->engineType, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->numClasses, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->classList, pos, (dst->numClasses) * 4); + else + { + // Validate variable length buffer length + if ((dst->numClasses) * 4 < 4 || + pos + ((dst->numClasses) * 4) > src_max || + pos + ((dst->numClasses) * 4) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->classList, pos, NvP64, (dst->numClasses) * 4); + } + } + else + { + if (!deser_up) + dst->classList = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv2080CtrlGpuGetEngineClasslistParamsGetSerializedSize(const NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->classList) + { + size += (src->numClasses) * 4; + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv2080CtrlGpumonSamplesSerialize(const NV2080_CTRL_GPUMON_SAMPLES *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up, NvU64 interface, NvU64 message) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv2080CtrlGpumonSamplesGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = interface; // Interface ID + header->message = message; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x1f; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->bufSize, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->count, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->tracker, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->type, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pSamples); + + if (src->pSamples) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pSamples, (src->bufSize)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv2080CtrlGpumonSamplesDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_GPUMON_SAMPLES *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV2080_CTRL_GPUMON_SAMPLES) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x1f) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->bufSize, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->count, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->tracker, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->type, pos, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pSamples, pos, (dst->bufSize)); + else + { + // Validate variable length buffer length + if (pos + ((dst->bufSize)) > src_max || + pos + ((dst->bufSize)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pSamples, pos, NvP64, (dst->bufSize)); + } + } + else + { + if (!deser_up) + dst->pSamples = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv2080CtrlGpumonSamplesGetSerializedSize(const NV2080_CTRL_GPUMON_SAMPLES *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 56; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pSamples) + { + size += (src->bufSize); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv2080CtrlI2cAccessParamsSerialize(const NV2080_CTRL_I2C_ACCESS_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv2080CtrlI2cAccessParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x208006; // Interface ID + header->message = 0x10; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x1ff; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->token, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->cmd, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->port, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->flags, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->status, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->dataBuffSize, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->dataBuffSize < 0 || src->dataBuffSize > NV2080_CTRL_I2C_MAX_ENTRIES) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + FINN_COPY_TO_BUFFER(pos, src->speed, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->encrClientID, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->data); + + if (src->data) + { + FINN_MEMCPY_TO_BUFFER(pos, src->data, (src->dataBuffSize)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv2080CtrlI2cAccessParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_I2C_ACCESS_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV2080_CTRL_I2C_ACCESS_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x1ff) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->token, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->cmd, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->port, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->flags, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->status, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->dataBuffSize, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->dataBuffSize < 0 || dst->dataBuffSize > NV2080_CTRL_I2C_MAX_ENTRIES) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + FINN_COPY_FROM_BUFFER(dst->speed, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->encrClientID, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->data, pos, (dst->dataBuffSize)); + else + { + // Validate variable length buffer length + if (pos + ((dst->dataBuffSize)) > src_max || + pos + ((dst->dataBuffSize)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->data, pos, NvP64, (dst->dataBuffSize)); + } + } + else + { + if (!deser_up) + dst->data = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv2080CtrlI2cAccessParamsGetSerializedSize(const NV2080_CTRL_I2C_ACCESS_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 72; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->data) + { + size += (src->dataBuffSize); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv2080CtrlNvdGetDumpParamsSerialize(const NV2080_CTRL_NVD_GET_DUMP_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv2080CtrlNvdGetDumpParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x208024; // Interface ID + header->message = 0x2; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->component, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->size, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pBuffer); + + if (src->pBuffer) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pBuffer, (src->size)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv2080CtrlNvdGetDumpParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_NVD_GET_DUMP_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV2080_CTRL_NVD_GET_DUMP_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->component, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->size, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pBuffer, pos, (dst->size)); + else + { + // Validate variable length buffer length + if (pos + ((dst->size)) > src_max || + pos + ((dst->size)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pBuffer, pos, NvP64, (dst->size)); + } + } + else + { + if (!deser_up) + dst->pBuffer = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv2080CtrlNvdGetDumpParamsGetSerializedSize(const NV2080_CTRL_NVD_GET_DUMP_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pBuffer) + { + size += (src->size); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv2080CtrlRcReadVirtualMemParamsSerialize(const NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv2080CtrlRcReadVirtualMemParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x208022; // Interface ID + header->message = 0x4; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0xf; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->virtAddress, NvU64, 8); + + FINN_COPY_TO_BUFFER(pos, src->hChannel, NvHandle, 4); + + FINN_COPY_TO_BUFFER(pos, src->bufferSize, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->bufferPtr); + + if (src->bufferPtr) + { + FINN_MEMCPY_TO_BUFFER(pos, src->bufferPtr, (src->bufferSize)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv2080CtrlRcReadVirtualMemParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0xf) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->virtAddress, pos, NvU64, 8); + + FINN_COPY_FROM_BUFFER(dst->hChannel, pos, NvHandle, 4); + + FINN_COPY_FROM_BUFFER(dst->bufferSize, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->bufferPtr, pos, (dst->bufferSize)); + else + { + // Validate variable length buffer length + if (pos + ((dst->bufferSize)) > src_max || + pos + ((dst->bufferSize)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->bufferPtr, pos, NvP64, (dst->bufferSize)); + } + } + else + { + if (!deser_up) + dst->bufferPtr = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv2080CtrlRcReadVirtualMemParamsGetSerializedSize(const NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 56; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->bufferPtr) + { + size += (src->bufferSize); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv402cCtrlI2cIndexedParamsSerialize(const NV402C_CTRL_I2C_INDEXED_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cIndexedParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x402c01; // Interface ID + header->message = 0x2; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0xff; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->flags, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->indexLength, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->indexLength < 0 || src->indexLength > NV402C_CTRL_I2C_INDEX_LENGTH_MAX) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + FINN_COPY_TO_BUFFER(pos, src->messageLength, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->address, NvU16, 2); + + FINN_COPY_TO_BUFFER(pos, src->portId, NvU8, 1); + + FINN_COPY_TO_BUFFER(pos, src->bIsWrite, NvU8, 1); + + FINN_MEMCPY_TO_BUFFER(pos, src->index, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pMessage); + + if (src->pMessage) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pMessage, (src->messageLength)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cIndexedParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_INDEXED_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_INDEXED_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0xff) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->flags, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->indexLength, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->indexLength < 0 || dst->indexLength > NV402C_CTRL_I2C_INDEX_LENGTH_MAX) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + FINN_COPY_FROM_BUFFER(dst->messageLength, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->address, pos, NvU16, 2); + + FINN_COPY_FROM_BUFFER(dst->portId, pos, NvU8, 1); + + FINN_COPY_FROM_BUFFER(dst->bIsWrite, pos, NvU8, 1); + + FINN_MEMCPY_FROM_BUFFER(dst->index, pos, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pMessage, pos, (dst->messageLength)); + else + { + // Validate variable length buffer length + if (pos + ((dst->messageLength)) > src_max || + pos + ((dst->messageLength)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pMessage, pos, NvP64, (dst->messageLength)); + } + } + else + { + if (!deser_up) + dst->pMessage = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cIndexedParamsGetSerializedSize(const NV402C_CTRL_I2C_INDEXED_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 64; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pMessage) + { + size += (src->messageLength); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv402cCtrlI2cTransactionTypeValueToId(NvU8 **buf, const NvU8 *buf_max, NvU64 convert_size) +{ + NV402C_CTRL_I2C_TRANSACTION_TYPE *pEnum = NULL; + NvU8 *buf_end = *buf + convert_size; + + // Bounds checking before overwriting data + if (buf_end > buf_max) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Convert each enum value to its corresponding ID. + while (*buf < buf_end) + { + pEnum = (NV402C_CTRL_I2C_TRANSACTION_TYPE *)*buf; + + switch (*pEnum) + { + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW: + *pEnum = 0; + break; + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW: + *pEnum = 1; + break; + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW: + *pEnum = 2; + break; + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW: + *pEnum = 3; + break; + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW: + *pEnum = 4; + break; + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW: + *pEnum = 5; + break; + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW: + *pEnum = 6; + break; + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL: + *pEnum = 7; + break; + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL: + *pEnum = 8; + break; + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW: + *pEnum = 9; + break; + case NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC: + *pEnum = 10; + break; + default: + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + } + + *buf += sizeof(NV402C_CTRL_I2C_TRANSACTION_TYPE); + } + + return NV_OK; +} + +static NV_STATUS Nv402cCtrlI2cTransactionTypeIdtoValue(NvU8 **buf, const NvU8 *buf_max, NvU64 convert_size) +{ + NvU32 *pID = NULL; + NvU8 *buf_end = *buf + convert_size; + + // Bounds checking before overwriting data + if (buf_end > buf_max) + { + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + return NV_ERR_BUFFER_TOO_SMALL; + } + + // Convert each ID to its corresponding enum value. + while (*buf < buf_end) + { + pID = (NvU32 *)*buf; + + switch (*pID) + { + case 0: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW; + break; + case 1: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW; + break; + case 2: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW; + break; + case 3: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW; + break; + case 4: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW; + break; + case 5: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW; + break; + case 6: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW; + break; + case 7: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL; + break; + case 8: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL; + break; + case 9: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW; + break; + case 10: + *pID = NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC; + break; + default: + { + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + return NV_ERR_INVALID_ARGUMENT; + } + } + + *buf += sizeof(NvU32); + } + + return NV_OK; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusQuickRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataSmbusQuickRwGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x3; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->warFlags, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->bWrite, NvBool, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusQuickRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->warFlags, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->bWrite, pos, NvBool, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataSmbusQuickRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_QUICK_RW *src) +{ + // Suppress used-variable warnings. + (void) src; + + // This struct is static and its size is known at compile time. + return 48; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cByteRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataI2cByteRwGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x3; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->bWrite, NvBool, 1); + + FINN_COPY_TO_BUFFER(pos, src->message, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cByteRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->bWrite, pos, NvBool, 1); + + FINN_COPY_FROM_BUFFER(dst->message, pos, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataI2cByteRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BYTE_RW *src) +{ + // Suppress used-variable warnings. + (void) src; + + // This struct is static and its size is known at compile time. + return 48; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cBlockRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataI2cBlockRwGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->messageLength, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->bWrite, NvBool, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pMessage); + + if (src->pMessage) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pMessage, (src->messageLength)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cBlockRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->messageLength, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->bWrite, pos, NvBool, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pMessage, pos, (dst->messageLength)); + else + { + // Validate variable length buffer length + if (pos + ((dst->messageLength)) > src_max || + pos + ((dst->messageLength)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pMessage, pos, NvP64, (dst->messageLength)); + } + } + else + { + if (!deser_up) + dst->pMessage = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataI2cBlockRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pMessage) + { + size += (src->messageLength); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusByteRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataSmbusByteRwGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->bWrite, NvBool, 1); + + FINN_COPY_TO_BUFFER(pos, src->registerAddress, NvU8, 1); + + FINN_COPY_TO_BUFFER(pos, src->message, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusByteRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->bWrite, pos, NvBool, 1); + + FINN_COPY_FROM_BUFFER(dst->registerAddress, pos, NvU8, 1); + + FINN_COPY_FROM_BUFFER(dst->message, pos, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataSmbusByteRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BYTE_RW *src) +{ + // Suppress used-variable warnings. + (void) src; + + // This struct is static and its size is known at compile time. + return 48; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusWordRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataSmbusWordRwGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->message, NvU16, 2); + + FINN_COPY_TO_BUFFER(pos, src->bWrite, NvBool, 1); + + FINN_COPY_TO_BUFFER(pos, src->registerAddress, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusWordRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->message, pos, NvU16, 2); + + FINN_COPY_FROM_BUFFER(dst->bWrite, pos, NvBool, 1); + + FINN_COPY_FROM_BUFFER(dst->registerAddress, pos, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataSmbusWordRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_WORD_RW *src) +{ + // Suppress used-variable warnings. + (void) src; + + // This struct is static and its size is known at compile time. + return 48; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cBufferRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataI2cBufferRwGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x1f; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->warFlags, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->messageLength, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->bWrite, NvBool, 1); + + FINN_COPY_TO_BUFFER(pos, src->registerAddress, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pMessage); + + if (src->pMessage) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pMessage, (src->messageLength)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataI2cBufferRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x1f) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->warFlags, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->messageLength, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->bWrite, pos, NvBool, 1); + + FINN_COPY_FROM_BUFFER(dst->registerAddress, pos, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pMessage, pos, (dst->messageLength)); + else + { + // Validate variable length buffer length + if (pos + ((dst->messageLength)) > src_max || + pos + ((dst->messageLength)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pMessage, pos, NvP64, (dst->messageLength)); + } + } + else + { + if (!deser_up) + dst->pMessage = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataI2cBufferRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 56; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pMessage) + { + size += (src->messageLength); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusBlockRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataSmbusBlockRwGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0xf; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->messageLength, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->bWrite, NvBool, 1); + + FINN_COPY_TO_BUFFER(pos, src->registerAddress, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pMessage); + + if (src->pMessage) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pMessage, (src->messageLength)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusBlockRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0xf) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->messageLength, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->bWrite, pos, NvBool, 1); + + FINN_COPY_FROM_BUFFER(dst->registerAddress, pos, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pMessage, pos, (dst->messageLength)); + else + { + // Validate variable length buffer length + if (pos + ((dst->messageLength)) > src_max || + pos + ((dst->messageLength)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pMessage, pos, NvP64, (dst->messageLength)); + } + } + else + { + if (!deser_up) + dst->pMessage = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataSmbusBlockRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pMessage) + { + size += (src->messageLength); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusProcessCallSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataSmbusProcessCallGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->writeMessage, NvU16, 2); + + FINN_COPY_TO_BUFFER(pos, src->readMessage, NvU16, 2); + + FINN_COPY_TO_BUFFER(pos, src->registerAddress, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusProcessCallDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->writeMessage, pos, NvU16, 2); + + FINN_COPY_FROM_BUFFER(dst->readMessage, pos, NvU16, 2); + + FINN_COPY_FROM_BUFFER(dst->registerAddress, pos, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataSmbusProcessCallGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_PROCESS_CALL *src) +{ + // Suppress used-variable warnings. + (void) src; + + // This struct is static and its size is known at compile time. + return 48; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusBlockProcessCallSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataSmbusBlockProcessCallGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x1f; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->writeMessageLength, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->writeMessageLength < 0 || src->writeMessageLength > NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + FINN_COPY_TO_BUFFER(pos, src->readMessageLength, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->readMessageLength < 0 || src->readMessageLength > NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + FINN_COPY_TO_BUFFER(pos, src->registerAddress, NvU8, 1); + + FINN_MEMCPY_TO_BUFFER(pos, src->writeMessage, 32); + + FINN_MEMCPY_TO_BUFFER(pos, src->readMessage, 32); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusBlockProcessCallDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x1f) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->writeMessageLength, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->writeMessageLength < 0 || dst->writeMessageLength > NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + FINN_COPY_FROM_BUFFER(dst->readMessageLength, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->readMessageLength < 0 || dst->readMessageLength > NV402C_CTRL_I2C_BLOCK_PROCESS_PROTOCOL_MAX) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + FINN_COPY_FROM_BUFFER(dst->registerAddress, pos, NvU8, 1); + + FINN_MEMCPY_FROM_BUFFER(dst->writeMessage, pos, 32); + + FINN_MEMCPY_FROM_BUFFER(dst->readMessage, pos, 32); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataSmbusBlockProcessCallGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_PROCESS_CALL *src) +{ + // Suppress used-variable warnings. + (void) src; + + // This struct is static and its size is known at compile time. + return 120; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusMultibyteRegisterBlockRwSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataSmbusMultibyteRegisterBlockRwGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x3f; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->warFlags, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->indexLength, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (src->indexLength < 0 || src->indexLength > NV402C_CTRL_I2C_INDEX_LENGTH_MAX) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + FINN_COPY_TO_BUFFER(pos, src->messageLength, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->bWrite, NvBool, 1); + + FINN_MEMCPY_TO_BUFFER(pos, src->index, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pMessage); + + if (src->pMessage) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pMessage, (src->messageLength)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSmbusMultibyteRegisterBlockRwDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x3f) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->warFlags, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->indexLength, pos, NvU32, 4); + + // Range validation, rewind buffer + pos -= 4; + + if (dst->indexLength < 0 || dst->indexLength > NV402C_CTRL_I2C_INDEX_LENGTH_MAX) + { + status = NV_ERR_OUT_OF_RANGE; + FINN_ERROR(NV_ERR_OUT_OF_RANGE); + goto exit; + } + + pos += 4; + + + FINN_COPY_FROM_BUFFER(dst->messageLength, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->bWrite, pos, NvBool, 1); + + FINN_MEMCPY_FROM_BUFFER(dst->index, pos, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pMessage, pos, (dst->messageLength)); + else + { + // Validate variable length buffer length + if (pos + ((dst->messageLength)) > src_max || + pos + ((dst->messageLength)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pMessage, pos, NvP64, (dst->messageLength)); + } + } + else + { + if (!deser_up) + dst->pMessage = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataSmbusMultibyteRegisterBlockRwGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 64; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pMessage) + { + size += (src->messageLength); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataReadEdidDdcSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataReadEdidDdcGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0xf; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->messageLength, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->segmentNumber, NvU8, 1); + + FINN_COPY_TO_BUFFER(pos, src->registerAddress, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pMessage); + + if (src->pMessage) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pMessage, (src->messageLength)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataReadEdidDdcDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0xf) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->messageLength, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->segmentNumber, pos, NvU8, 1); + + FINN_COPY_FROM_BUFFER(dst->registerAddress, pos, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pMessage, pos, (dst->messageLength)); + else + { + // Validate variable length buffer length + if (pos + ((dst->messageLength)) > src_max || + pos + ((dst->messageLength)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pMessage, pos, NvP64, (dst->messageLength)); + } + } + else + { + if (!deser_up) + dst->pMessage = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataReadEdidDdcGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pMessage) + { + size += (src->messageLength); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataSerialize(const NV402C_CTRL_I2C_TRANSACTION_DATA *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up, NV402C_CTRL_I2C_TRANSACTION_TYPE transType) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionDataGetSerializedSize(src, transType); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + + // + // Non-message type has no interface/message ID + // + + // Field bitmasks + header->fieldMask[0] = 0x7ff; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Field copying based on union selector + switch (transType) + { + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW: + { + status = Nv402cCtrlI2cTransactionDataSmbusQuickRwSerialize(&src->smbusQuickData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW: + { + status = Nv402cCtrlI2cTransactionDataI2cByteRwSerialize(&src->i2cByteData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW: + { + status = Nv402cCtrlI2cTransactionDataSmbusByteRwSerialize(&src->smbusByteData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW: + { + status = Nv402cCtrlI2cTransactionDataSmbusWordRwSerialize(&src->smbusWordData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL: + { + status = Nv402cCtrlI2cTransactionDataSmbusProcessCallSerialize(&src->smbusProcessData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL: + { + status = Nv402cCtrlI2cTransactionDataSmbusBlockProcessCallSerialize(&src->smbusBlockProcessData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW: + { + status = Nv402cCtrlI2cTransactionDataI2cBlockRwSerialize(&src->i2cBlockData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW: + { + status = Nv402cCtrlI2cTransactionDataI2cBufferRwSerialize(&src->i2cBufferData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW: + { + status = Nv402cCtrlI2cTransactionDataSmbusBlockRwSerialize(&src->smbusBlockData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW: + { + status = Nv402cCtrlI2cTransactionDataSmbusMultibyteRegisterBlockRwSerialize(&src->smbusMultibyteRegisterData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC: + { + status = Nv402cCtrlI2cTransactionDataReadEdidDdcSerialize(&src->edidData, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + } + + // Align + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionDataDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_DATA *dst, NvLength dst_size, NvBool deser_up, NV402C_CTRL_I2C_TRANSACTION_TYPE transType) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_DATA) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7ff) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Field copying based on union selector + switch (transType) + { + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW: + { + status = Nv402cCtrlI2cTransactionDataSmbusQuickRwDeserialize(&pos, src_max, &dst->smbusQuickData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW: + { + status = Nv402cCtrlI2cTransactionDataI2cByteRwDeserialize(&pos, src_max, &dst->i2cByteData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW: + { + status = Nv402cCtrlI2cTransactionDataSmbusByteRwDeserialize(&pos, src_max, &dst->smbusByteData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW: + { + status = Nv402cCtrlI2cTransactionDataSmbusWordRwDeserialize(&pos, src_max, &dst->smbusWordData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL: + { + status = Nv402cCtrlI2cTransactionDataSmbusProcessCallDeserialize(&pos, src_max, &dst->smbusProcessData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL: + { + status = Nv402cCtrlI2cTransactionDataSmbusBlockProcessCallDeserialize(&pos, src_max, &dst->smbusBlockProcessData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW: + { + status = Nv402cCtrlI2cTransactionDataI2cBlockRwDeserialize(&pos, src_max, &dst->i2cBlockData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW: + { + status = Nv402cCtrlI2cTransactionDataI2cBufferRwDeserialize(&pos, src_max, &dst->i2cBufferData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW: + { + status = Nv402cCtrlI2cTransactionDataSmbusBlockRwDeserialize(&pos, src_max, &dst->smbusBlockData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW: + { + status = Nv402cCtrlI2cTransactionDataSmbusMultibyteRegisterBlockRwDeserialize(&pos, src_max, &dst->smbusMultibyteRegisterData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC: + { + status = Nv402cCtrlI2cTransactionDataReadEdidDdcDeserialize(&pos, src_max, &dst->edidData, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + } + + // Align + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionDataGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_DATA *src, NV402C_CTRL_I2C_TRANSACTION_TYPE transType) +{ + // Start with the header size + NvU64 size = 40; + + // Calculate size based on union selector + switch (transType) + { + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_QUICK_RW: + { + size += 48; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BYTE_RW: + { + size += 48; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BYTE_RW: + { + size += 48; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_WORD_RW: + { + size += 48; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_PROCESS_CALL: + { + size += 48; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_PROCESS_CALL: + { + size += 120; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW: + { + size += Nv402cCtrlI2cTransactionDataI2cBlockRwGetSerializedSize((const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BLOCK_RW *) &(src->i2cBlockData)); + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW: + { + size += Nv402cCtrlI2cTransactionDataI2cBufferRwGetSerializedSize((const NV402C_CTRL_I2C_TRANSACTION_DATA_I2C_BUFFER_RW *) &(src->i2cBufferData)); + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW: + { + size += Nv402cCtrlI2cTransactionDataSmbusBlockRwGetSerializedSize((const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_BLOCK_RW *) &(src->smbusBlockData)); + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW: + { + size += Nv402cCtrlI2cTransactionDataSmbusMultibyteRegisterBlockRwGetSerializedSize((const NV402C_CTRL_I2C_TRANSACTION_DATA_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW *) &(src->smbusMultibyteRegisterData)); + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC: + { + size += Nv402cCtrlI2cTransactionDataReadEdidDdcGetSerializedSize((const NV402C_CTRL_I2C_TRANSACTION_DATA_READ_EDID_DDC *) &(src->edidData)); + break; + } + default: + { + break; + } + } + + // Add padding for alignment + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv402cCtrlI2cTransactionParamsSerialize(const NV402C_CTRL_I2C_TRANSACTION_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv402cCtrlI2cTransactionParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x402c01; // Interface ID + header->message = 0x5; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x1f; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->flags, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->transType, NV402C_CTRL_I2C_TRANSACTION_TYPE, 4); + + // Rewind buffer for conversion + pos -= 4; + + status = Nv402cCtrlI2cTransactionTypeValueToId(&pos, dst_max, 4); + if (status != NV_OK) + goto exit; + + FINN_COPY_TO_BUFFER(pos, src->deviceAddress, NvU16, 2); + + FINN_COPY_TO_BUFFER(pos, src->portId, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + status = Nv402cCtrlI2cTransactionDataSerialize(&src->transData, &pos, dst_max, seri_up, src->transType); + if (status != NV_OK) + goto exit; + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv402cCtrlI2cTransactionParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV402C_CTRL_I2C_TRANSACTION_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV402C_CTRL_I2C_TRANSACTION_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x1f) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->flags, pos, NvU32, 4); + + status = Nv402cCtrlI2cTransactionTypeIdtoValue(&pos, src_max, 4); + if (status != NV_OK) + goto exit; + + // Rewind buffer after conversion + pos -= 4; + + FINN_COPY_FROM_BUFFER(dst->transType, pos, NV402C_CTRL_I2C_TRANSACTION_TYPE, 4); + + FINN_COPY_FROM_BUFFER(dst->deviceAddress, pos, NvU16, 2); + + FINN_COPY_FROM_BUFFER(dst->portId, pos, NvU8, 1); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + status = Nv402cCtrlI2cTransactionDataDeserialize(&pos, src_max, &dst->transData, dst_size, deser_up, dst->transType); + if (status != NV_OK) + goto exit; + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv402cCtrlI2cTransactionParamsGetSerializedSize(const NV402C_CTRL_I2C_TRANSACTION_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 56; + + // Add sizes that require runtime calculation + size += Nv402cCtrlI2cTransactionDataGetSerializedSize((const NV402C_CTRL_I2C_TRANSACTION_DATA *) &(src->transData), + src->transType); + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv83deCtrlDebugReadMemoryParamsSerialize(const NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv83deCtrlDebugReadMemoryParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x83de03; // Interface ID + header->message = 0x15; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0xf; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->offset, NvU64, 8); + + FINN_COPY_TO_BUFFER(pos, src->hMemory, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->length, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->buffer); + + if (src->buffer) + { + FINN_MEMCPY_TO_BUFFER(pos, src->buffer, (src->length)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv83deCtrlDebugReadMemoryParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0xf) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->offset, pos, NvU64, 8); + + FINN_COPY_FROM_BUFFER(dst->hMemory, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->length, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->buffer, pos, (dst->length)); + else + { + // Validate variable length buffer length + if (pos + ((dst->length)) > src_max || + pos + ((dst->length)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->buffer, pos, NvP64, (dst->length)); + } + } + else + { + if (!deser_up) + dst->buffer = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv83deCtrlDebugReadMemoryParamsGetSerializedSize(const NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 56; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->buffer) + { + size += (src->length); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nv83deCtrlDebugWriteMemoryParamsSerialize(const NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nv83deCtrlDebugWriteMemoryParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0x83de03; // Interface ID + header->message = 0x16; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0xf; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->offset, NvU64, 8); + + FINN_COPY_TO_BUFFER(pos, src->hMemory, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->length, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->buffer); + + if (src->buffer) + { + FINN_MEMCPY_TO_BUFFER(pos, src->buffer, (src->length)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nv83deCtrlDebugWriteMemoryParamsDeserialize(NvU8 **src, const NvU8 *src_max, NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0xf) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->offset, pos, NvU64, 8); + + FINN_COPY_FROM_BUFFER(dst->hMemory, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->length, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->buffer, pos, (dst->length)); + else + { + // Validate variable length buffer length + if (pos + ((dst->length)) > src_max || + pos + ((dst->length)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->buffer, pos, NvP64, (dst->length)); + } + } + else + { + if (!deser_up) + dst->buffer = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nv83deCtrlDebugWriteMemoryParamsGetSerializedSize(const NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 56; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->buffer) + { + size += (src->length); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nvb06fCtrlGetEngineCtxDataParamsSerialize(const NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nvb06fCtrlGetEngineCtxDataParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0xb06f01; // Interface ID + header->message = 0xc; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x7; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_TO_BUFFER(pos, src->engineID, NvU32, 4); + + FINN_COPY_TO_BUFFER(pos, src->size, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Set data presence byte + *(pos++) = !!(src->pEngineCtxBuff); + + if (src->pEngineCtxBuff) + { + FINN_MEMCPY_TO_BUFFER(pos, src->pEngineCtxBuff, (src->size)); + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nvb06fCtrlGetEngineCtxDataParamsDeserialize(NvU8 **src, const NvU8 *src_max, NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x7) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Static size fields + FINN_COPY_FROM_BUFFER(dst->engineID, pos, NvU32, 4); + + FINN_COPY_FROM_BUFFER(dst->size, pos, NvU32, 4); + + // Align after static size fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Unbounded fields + // Check data presence byte + if (*(pos++)) + { + if (deser_up) + FINN_MEMCPY_FROM_BUFFER(dst->pEngineCtxBuff, pos, (dst->size)); + else + { + // Validate variable length buffer length + if (pos + ((dst->size)) > src_max || + pos + ((dst->size)) < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + FINN_SET_PTR_TO_BUFFER(dst->pEngineCtxBuff, pos, NvP64, (dst->size)); + } + } + else + { + if (!deser_up) + dst->pEngineCtxBuff = NULL; + } + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nvb06fCtrlGetEngineCtxDataParamsGetSerializedSize(const NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 48; + + // Add sizes that require runtime calculation + // Increment size to account for the data presence byte. + ++size; + + // For non-NULL pointers, proceed to size calculation. + if (src->pEngineCtxBuff) + { + size += (src->size); + } + + // Add padding + size = (size + 7) &~ 7; + + return size; +} + +static NV_STATUS Nvb06fCtrlCmdMigrateEngineCtxDataFinnParamsSerialize(const NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS *src, NvU8 **dst, const NvU8 *dst_max, NvBool seri_up) +{ + NvU8 *pos = *dst; + FINN_RM_API *header = (FINN_RM_API *)pos; + NvU64 serializedSize = Nvb06fCtrlCmdMigrateEngineCtxDataFinnParamsGetSerializedSize(src); + NV_STATUS status = NV_OK; + + // Validate buffer size + if (pos + serializedSize > dst_max) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Clear destination buffer + FINN_MEMZERO(pos, serializedSize); + + // Serialization header + header->version = FINN_SERIALIZATION_VERSION; // Serialization version + header->payloadSize = serializedSize; // Serialized size + header->interface = 0xb06f01; // Interface ID + header->message = 0xd; // Message ID + + // Field bitmasks + header->fieldMask[0] = 0x1; + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Unbounded fields + status = Nvb06fCtrlGetEngineCtxDataParamsSerialize(&src->params, &pos, dst_max, seri_up); + if (status != NV_OK) + goto exit; + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + +exit: + *dst = pos; + return status; +} + +static NV_STATUS Nvb06fCtrlCmdMigrateEngineCtxDataFinnParamsDeserialize(NvU8 **src, const NvU8 *src_max, NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS *dst, NvLength dst_size, NvBool deser_up) +{ + NvU8 *pos = *src; + FINN_RM_API *header = (FINN_RM_API *)pos; + NV_STATUS status = NV_OK; + + // Check that the destination struct fits within the destination buffer + // and that the declared size fits within the source buffer + if (sizeof(NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS) > dst_size || + header->payloadSize < (sizeof(FINN_RM_API) + sizeof(NvU64)) || + pos + header->payloadSize > src_max || + pos + header->payloadSize < pos) + { + status = NV_ERR_BUFFER_TOO_SMALL; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_BUFFER_TOO_SMALL); + goto exit; + } + + // Validate the field bitmasks. They must match the expected values for now + if (header->fieldMask[0] != 0x1) + { + status = NV_ERR_LIB_RM_VERSION_MISMATCH; + pos = (NvU8 *) &header->fieldMask; + FINN_ERROR(NV_ERR_LIB_RM_VERSION_MISMATCH); + goto exit; + } + + // Jump past header + pos += sizeof(FINN_RM_API) + (1 * sizeof(NvU64)); + + // Unbounded fields + status = Nvb06fCtrlGetEngineCtxDataParamsDeserialize(&pos, src_max, &dst->params, dst_size, deser_up); + if (status != NV_OK) + goto exit; + + // Align after unbounded fields + pos = (NvU8*)(((NvU64)pos + 7) &~ 7); + + // Check that the declared size matches the serialization outcome + if (header->payloadSize != (NvU64) (pos - *src)) + { + status = NV_ERR_INVALID_ARGUMENT; + pos = (NvU8 *) &header->payloadSize; + FINN_ERROR(NV_ERR_INVALID_ARGUMENT); + goto exit; + } + +exit: + *src = pos; + return status; +} + +static NvU64 Nvb06fCtrlCmdMigrateEngineCtxDataFinnParamsGetSerializedSize(const NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA_FINN_PARAMS *src) +{ + // Start with the portion of the size known at compile time. + NvU64 size = 40; + + // Add sizes that require runtime calculation + size += Nvb06fCtrlGetEngineCtxDataParamsGetSerializedSize((const NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS *) &(src->params)); + // Add padding + size = (size + 7) &~ 7; + + return size; +} + diff --git a/src/nvidia/interface/rmp2pdefines.h b/src/nvidia/interface/rmp2pdefines.h new file mode 100644 index 000000000..2ef8458d6 --- /dev/null +++ b/src/nvidia/interface/rmp2pdefines.h @@ -0,0 +1,31 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _RMP2PDEFINES_H_ +#define _RMP2PDEFINES_H_ + +#define NVRM_P2P_PAGESIZE_SMALL_4K (4 << 10) +#define NVRM_P2P_PAGESIZE_BIG_64K (64 << 10) +#define NVRM_P2P_PAGESIZE_BIG_128K (128 << 10) + +#endif diff --git a/src/nvidia/kernel/inc/dbgbuffer.h b/src/nvidia/kernel/inc/dbgbuffer.h new file mode 100644 index 000000000..ab081b14c --- /dev/null +++ b/src/nvidia/kernel/inc/dbgbuffer.h @@ -0,0 +1,3 @@ + +#include "g_dbgbuffer_nvoc.h" + diff --git a/src/nvidia/kernel/inc/objrpc.h b/src/nvidia/kernel/inc/objrpc.h new file mode 100644 index 000000000..184a7d80c --- /dev/null +++ b/src/nvidia/kernel/inc/objrpc.h @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// #ifndef NVOC +// #include "g_objrpc_nvoc.h" +// #endif + +#ifndef _OBJRPC_H_ +#define _OBJRPC_H_ + +#include "vgpu/rpc_headers.h" +#include "diagnostics/nv_debug_dump.h" +#include "ctrl/ctrl2080/ctrl2080event.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080gpu.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080rc.h" // rmcontrol params (from hal) +#include "ctrl/ctrl2080/ctrl2080perf.h" // rmcontrol params (from hal) +#include "ctrl/ctrl0080/ctrl0080fb.h" // rmcontrol params (from hal) +#include "ctrl/ctrl0080/ctrl0080dma.h" // rmcontrol params (from hal) +#include "gpu/gsp/message_queue.h" + + +typedef struct GSP_FIRMWARE GSP_FIRMWARE; +typedef struct _object_vgpu OBJVGPU, *POBJVGPU; + +#include "g_rpc_hal.h" // For RPC_HAL_IFACES +#include "g_rpc_odb.h" // For RPC_HAL_IFACES + +struct OBJRPC{ + OBJECT_BASE_DEFINITION(RPC); + + struct { + NvU32 ipVersion; + }__nvoc_pbase_Object[1]; // This nested structure mechanism is to bypass NVOC + + // Message buffer fields + NvU32 *message_buffer; + NvU32 *message_buffer_priv; + MEMORY_DESCRIPTOR *pMemDesc_mesg; + NvU32 maxRpcSize; + + // UVM Message buffer fields + NvU32 *message_buffer_uvm; + NvU32 *message_buffer_priv_uvm; + MEMORY_DESCRIPTOR *pMemDesc_mesg_uvm; + + // Buffer for initial GSP message. + void *init_msg_buf; + RmPhysAddr init_msg_buf_pa; + + /* Message Queue */ + struct _message_queue_info *pMessageQueueInfo; + RmPhysAddr messageQueuePhysMem; + +}; + +// +// Utility macros for composing RPC messages. +// See for message formats. +// A message has a fixed-format header and optionally a variable length +// parameter after the header. +// + +#define vgpu_rpc_message_header_v ((rpc_message_header_v*)(pRpc->message_buffer)) +#define rpc_message (vgpu_rpc_message_header_v->rpc_message_data) + +static inline void _objrpcAssignIpVersion(struct OBJRPC* pRpc, NvU32 ipVersion) +{ + pRpc->__nvoc_pbase_Object->ipVersion = ipVersion; +} + +// Initialize and free RPC infrastructure +NV_STATUS initRpcInfrastructure_VGPU(OBJGPU *pGpu); +NV_STATUS freeRpcInfrastructure_VGPU(OBJGPU *pGpu); +OBJRPC *initRpcObject(OBJGPU *pGpu); +void rpcSetIpVersion(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 ipVersion); +void rpcObjIfacesSetup(OBJRPC *pRpc); +void rpcRmApiSetup(OBJGPU *pGpu); +NV_STATUS rpcWriteCommonHeader(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 func, NvU32 paramLength); +NV_STATUS rpcWriteCommonHeaderSim(OBJGPU *pGpu); +NV_STATUS _allocRpcMemDesc(OBJGPU *pGpu, NvU64 size, NvBool bContig, NV_ADDRESS_SPACE addrSpace, MEMORY_DESCRIPTOR **ppMemDesc, void **ppMemBuffer, void **ppMemBufferPriv); +void _freeRpcMemDesc(OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, void **ppMemBuffer, void **ppMemBufferPriv); +NV_STATUS vgpuGspSetupBuffers(OBJGPU *pGpu); +void vgpuGspTeardownBuffers(OBJGPU *pGpu); + +// +// OBJGPU RPC member accessors. +// Historically, they have been defined inline by the following macros. +// These definitions were migrated to gpu.c in order to avoid having to include object headers in +// this file. +// + +OBJRPC *gpuGetGspClientRpc(OBJGPU*); +OBJRPC *gpuGetVgpuRpc(OBJGPU*); +OBJRPC *gpuGetRpc(OBJGPU*); + +#define GPU_GET_GSPCLIENT_RPC(u) gpuGetGspClientRpc(u) +#define GPU_GET_VGPU_RPC(u) gpuGetVgpuRpc(u) +#define GPU_GET_RPC(u) gpuGetRpc(u) + +#endif // _OBJRPC_H_ diff --git a/src/nvidia/kernel/inc/objsweng.h b/src/nvidia/kernel/inc/objsweng.h new file mode 100644 index 000000000..fd46e5d05 --- /dev/null +++ b/src/nvidia/kernel/inc/objsweng.h @@ -0,0 +1,3 @@ + +#include "g_objsweng_nvoc.h" + diff --git a/src/nvidia/kernel/inc/objtmr.h b/src/nvidia/kernel/inc/objtmr.h new file mode 100644 index 000000000..4b47b5020 --- /dev/null +++ b/src/nvidia/kernel/inc/objtmr.h @@ -0,0 +1,3 @@ + +#include "g_objtmr_nvoc.h" + diff --git a/src/nvidia/kernel/inc/sweng/dispsw.h b/src/nvidia/kernel/inc/sweng/dispsw.h new file mode 100644 index 000000000..63597365c --- /dev/null +++ b/src/nvidia/kernel/inc/sweng/dispsw.h @@ -0,0 +1,3 @@ + +#include "g_dispsw_nvoc.h" + diff --git a/src/nvidia/kernel/inc/syncgpuboost.h b/src/nvidia/kernel/inc/syncgpuboost.h new file mode 100644 index 000000000..054726d78 --- /dev/null +++ b/src/nvidia/kernel/inc/syncgpuboost.h @@ -0,0 +1,3 @@ + +#include "g_syncgpuboost_nvoc.h" + diff --git a/src/nvidia/kernel/inc/tmr.h b/src/nvidia/kernel/inc/tmr.h new file mode 100644 index 000000000..fd73d0eed --- /dev/null +++ b/src/nvidia/kernel/inc/tmr.h @@ -0,0 +1,3 @@ + +#include "g_tmr_nvoc.h" + diff --git a/src/nvidia/kernel/inc/vgpu/rpc.h b/src/nvidia/kernel/inc/vgpu/rpc.h new file mode 100644 index 000000000..c95edf165 --- /dev/null +++ b/src/nvidia/kernel/inc/vgpu/rpc.h @@ -0,0 +1,740 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//****************************************************************************** +// +// Declarations for the RPC module. +// +// Description: +// This module declares the RPC interface functions/macros. +// +//****************************************************************************** + +#ifndef __vgpu_dev_nv_rpc_h__ +#define __vgpu_dev_nv_rpc_h__ + +#include "class/cl84a0.h" +#include "rpc_headers.h" +#include "gpu/dce_client/dce_client.h" +#include "objrpc.h" +#include "rpc_vgpu.h" + +#include "vgpu_events.h" + +#include "kernel/gpu/fifo/kernel_fifo.h" + +typedef struct ContextDma ContextDma; + +#define NV_RM_STUB_RPC 0 + +#if NV_RM_STUB_RPC + +static inline void NV_RM_RPC_ALLOC_SHARE_DEVICE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_ALLOC_MEMORY(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_ALLOC_CHANNEL(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_ALLOC_OBJECT(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_MAP_MEMORY_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_UNMAP_MEMORY_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_ALLOC_SUBDEVICE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_DUP_OBJECT(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_IDLE_CHANNELS(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_ALLOC_EVENT(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_CONTROL(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_ALLOC(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_FREE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_MANAGE_HW_RESOURCE_BIND(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SET_GUEST_SYSTEM_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_PERF_GET_PSTATE_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_PERF_GET_VIRTUAL_PSTATE_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_PERF_GET_LEVEL_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_UNLOADING_GUEST_DRIVER(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_GPU_EXEC_REG_OPS(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_GET_STATIC_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_REGISTER_VIRTUAL_EVENT_BUFFER(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_UPDATE_BAR_PDE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SET_PAGE_DIRECTORY(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_UNSET_PAGE_DIRECTORY(OBJGPU *pGpu, ...) { return; } + +static inline void NV_RM_RPC_GET_GSP_STATIC_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_GSP_SET_SYSTEM_INFO(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SET_REGISTRY(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SUBDEV_EVENT_SET_NOTIFICATION(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_DUMP_PROTOBUF_COMPONENT(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_GSP_MSG_TIMING(OBJGPU *pGpu, ...) { return; } + +static inline void NV_RM_RPC_VGPU_PF_REG_READ32(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION(OBJGPU *pGpu, ...) { return; } + +// RPC free stubs +static inline void NV_RM_RPC_SIM_FREE_INFRA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_FREE(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_FREE_ON_ERROR(OBJGPU *pGpu, ...) { return; } + +// Simulation stubs +static inline void NV_RM_RPC_SIM_LOAD_ESCAPE_FUNCTIONS(OBJOS *pOS, ...) { return; } +static inline void NV_RM_RPC_SIM_ADD_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SIM_UPDATE_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SIM_DELETE_DISP_CONTEXT_DMA(OBJGPU *pGpu, ...) { return; } +static inline void NV_RM_RPC_SIM_UPDATE_DISP_CHANNEL_INFO(OBJGPU *pGpu, ...) { return; } + +#else // NV_RM_STUB_RPC + +#define NV_RM_RPC_ALLOC_SHARE_DEVICE_FWCLIENT(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \ + allocflags, vasize, vamode, status) \ + do \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV0000_ALLOC_PARAMETERS root_alloc_params = {0}; \ + \ + root_alloc_params.hClient = hclient; \ + \ + if (!IsT234D(pGpu)) \ + { \ + RmClient *pClient = NULL; \ + \ + /* Get process ID from the client database */ \ + if (NV_OK == serverutilGetClientUnderLock(hclient, &pClient)) \ + { \ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); \ + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); \ + \ + if (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL) \ + { \ + root_alloc_params.processID = KERNEL_PID; \ + } \ + else \ + { \ + root_alloc_params.processID = pClient->ProcID; \ + NV_ASSERT(root_alloc_params.processID == osGetCurrentProcess()); \ + } \ + } \ + else \ + NV_ASSERT(0); \ + } \ + \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, NV01_NULL_OBJECT, \ + NV01_NULL_OBJECT, NV01_ROOT, \ + &root_alloc_params); \ + \ + if (status == NV_OK) \ + { \ + NV0080_ALLOC_PARAMETERS device_alloc_params = {0}; \ + \ + device_alloc_params.hClientShare = hclientshare; \ + device_alloc_params.hTargetClient = htargetclient; \ + device_alloc_params.hTargetDevice = htargetdevice; \ + device_alloc_params.flags = allocflags; \ + device_alloc_params.vaSpaceSize = vasize; \ + \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hclient, hdevice, \ + hclass, &device_alloc_params); \ + } \ + else \ + NV_ASSERT(0); \ + } \ + while (0) + +#define NV_RM_RPC_ALLOC_MEMORY(pGpu, hclient, hdevice, hmemory, hclass, \ + flags, pmemdesc, status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL) \ + && (!(IS_VIRTUAL_WITH_SRIOV(pGpu) && \ + !gpuIsWarBug200577889SriovHeavyEnabled(pGpu) && \ + !NV_IS_MODS))) { \ + if (IS_GSP_CLIENT(pGpu) && IsT234D(pGpu)) \ + { \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV_MEMORY_LIST_ALLOCATION_PARAMS listAllocParams = {0}; \ + listAllocParams.pteAdjust = pmemdesc->PteAdjust; \ + listAllocParams.format = memdescGetPteKind(pmemdesc); \ + listAllocParams.size = pmemdesc->Size; \ + listAllocParams.pageCount = pmemdesc->PageCount; \ + listAllocParams.pageNumberList = memdescGetPteArray(pmemdesc, AT_GPU); \ + listAllocParams.hClient = NV01_NULL_OBJECT; \ + listAllocParams.hParent = NV01_NULL_OBJECT; \ + listAllocParams.hObject = NV01_NULL_OBJECT; \ + listAllocParams.limit = pmemdesc->Size - 1; \ + listAllocParams.flagsOs02 = (DRF_DEF(OS02,_FLAGS,_MAPPING,_NO_MAP) | \ + DRF_DEF(OS02,_FLAGS,_PHYSICALITY,_NONCONTIGUOUS) | \ + (flags & DRF_SHIFTMASK(NVOS02_FLAGS_COHERENCY))); \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hdevice, \ + hmemory, NV01_MEMORY_LIST_SYSTEM, &listAllocParams); \ + } \ + else \ + { \ + status = rpcAllocMemory_HAL(pGpu, pRpc, hclient, hdevice, hmemory, \ + hclass, flags, pmemdesc); \ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_MAP_MEMORY_DMA(pGpu, hclient, hdevice, hdma, hmemory, offset, length, flags, \ + dmaoffset, status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL) && \ + !gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) \ + status = rpcMapMemoryDma_HAL(pGpu, pRpc, hclient, hdevice, hdma, hmemory, offset, \ + length, flags, dmaoffset); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + + +#define NV_RM_RPC_UNMAP_MEMORY_DMA(pGpu, hclient, hdevice, hdma, hmemory, flags, dmaoffset, \ + status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL) && \ + !gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) \ + status = rpcUnmapMemoryDma_HAL(pGpu, pRpc, hclient, hdevice, hdma, hmemory, \ + flags, dmaoffset); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_IDLE_CHANNELS(pGpu, phclients, phdevices, phchannels, \ + nentries, flags, timeout, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcIdleChannels_HAL(pGpu, pRpc, phclients, phdevices, \ + phchannels, nentries, flags, timeout); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_ALLOC_SHARE_DEVICE(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \ + allocflags, vasize, vamode, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + NV_RM_RPC_ALLOC_SHARE_DEVICE_FWCLIENT(pGpu, hclient, hdevice, hclientshare, htargetclient, htargetdevice, hclass, \ + allocflags, vasize, vamode, status); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_CONTROL(pGpu, hClient, hObject, cmd, pParams, paramSize, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->Control(pRmApi, hClient, hObject, cmd, \ + pParams, paramSize); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_ALLOC_CHANNEL(pGpu, hclient, hparent, hchannel, hclass, \ + pGpfifoAllocParams, pchid, status) \ + do \ + { \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hparent, hchannel, \ + hclass, pGpfifoAllocParams); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_ALLOC_OBJECT(pGpu, hclient, hchannel, hobject, hclass, params, status)\ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hchannel, hobject, \ + hclass, params); \ + } \ + } while (0) + +#define NV_RM_RPC_FREE(pGpu, hclient, hparent, hobject, status) \ + do \ + { \ + (void) hparent; \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->Free(pRmApi, hclient, hobject); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_FREE_ON_ERROR(pGpu, hclient, hparent, hobject) \ + do \ + { \ + (void) hparent; \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + /* used in failure cases, macro doesn't overwrite rmStatus */ \ + if (pRpc != NULL) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + pRmApi->Free(pRmApi, hclient, hobject); \ + } \ + } \ + while (0) + +#define NV_RM_RPC_ALLOC_EVENT(pGpu, hclient, hparentclient, hchannel, hobject, \ + hevent, hclass, idx, status) \ + do \ + { \ + (void) hchannel; \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV0005_ALLOC_PARAMETERS allocParams = {0}; \ + allocParams.hParentClient = hparentclient; \ + allocParams.hClass = hclass; \ + allocParams.notifyIndex = idx | NV01_EVENT_CLIENT_RM; \ + allocParams.data = 0; \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, \ + hobject, hevent, \ + hclass, &allocParams); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_ALLOC_SUBDEVICE(pGpu, hclient, hdevice, hsubdevice, \ + hclass, subDeviceInst, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + NV2080_ALLOC_PARAMETERS alloc_params = {0}; \ + \ + alloc_params.subDeviceId = subDeviceInst; \ + \ + status = pRmApi->AllocWithHandle(pRmApi, hclient, hdevice, hsubdevice, \ + hclass, &alloc_params); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_DUP_OBJECT(pGpu, hclient, hparent, hobject, hclient_src, \ + hobject_src, flags, bAutoFreeRpc, pDstRef, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); \ + status = pRmApi->DupObject(pRmApi, hclient, hparent, \ + &hobject, hclient_src, \ + hobject_src, flags); \ + if ((bAutoFreeRpc) && (pDstRef != NULL) && (status == NV_OK)) \ + { \ + RmResource *pRmResource; \ + pRmResource = dynamicCast(((RsResourceRef*)pDstRef)->pResource, RmResource); \ + pRmResource->bRpcFree = NV_TRUE; \ + } \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_VGPU_PF_REG_READ32(pGpu, address, value, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + NV_ASSERT(IS_GSP_CLIENT(pGpu)); \ + status = rpcVgpuPfRegRead32_HAL(pGpu, pRpc, address, value, 0); \ + } else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +/* + * manage HW resources RPC macro + */ +#define NV_RM_RPC_MANAGE_HW_RESOURCE_ALLOC(pGpu, hclient, hdevice, hresource, \ + pfballocinfo, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcHwResourceAlloc(pGpu, pRpc, hclient, hdevice, \ + hresource, pfballocinfo); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_MANAGE_HW_RESOURCE_FREE(pGpu, hclient, hdevice, hresource, \ + flags, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcHwResourceFree(pGpu, pRpc, hclient, hdevice, \ + hresource, flags); \ + if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_MANAGE_HW_RESOURCE_BIND(pGpu, hclient, hdevice, hresource, \ + virtaddr, physaddr, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcHwResourceBind(pGpu, pRpc, hclient, hdevice, \ + hresource, virtaddr, physaddr); \ + if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_SIM_LOAD_ESCAPE_FUNCTIONS(pos) \ + do \ + { \ + NV_ASSERT(pos); \ + /* load simulation escape read/write routines */ \ + pos->osSimEscapeRead = RmRpcSimEscapeRead; \ + pos->osSimEscapeWrite = RmRpcSimEscapeWrite; \ + } \ + while(0) + +/* outgoing updates to the plugin */ +#define NV_RM_RPC_SIM_ADD_DISP_CONTEXT_DMA(pGpu, hclient, pcontextdma, channelnum) \ + do \ + { \ + NV_STATUS status; \ + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \ + status = RmRpcSimAddDisplayContextDma(pGpu, hclient, pcontextdma, channelnum); \ + NV_ASSERT(status == NV_OK); \ + SLI_LOOP_END \ + } \ + while(0) + +#define NV_RM_RPC_SIM_UPDATE_DISP_CONTEXT_DMA(pGpu, hclient, pcontextdma, physaddrnew, \ + physlimitnew, pagesize, ptekind) \ + do \ + { \ + NV_STATUS status; \ + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \ + status = RmRpcSimUpdateDisplayContextDma(pGpu, hclient, pcontextdma, physaddrnew,\ + physlimitnew, pagesize, ptekind); \ + NV_ASSERT(status == NV_OK); \ + SLI_LOOP_END \ + } \ + while(0) + +#define NV_RM_RPC_SIM_DELETE_DISP_CONTEXT_DMA(pGpu, hclient, pcontextdma) \ + do \ + { \ + NV_STATUS status; \ + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \ + status = RmRpcSimDeleteDisplayContextDma(pGpu, hclient, pcontextdma); \ + NV_ASSERT(status == NV_OK); \ + SLI_LOOP_END \ + } \ + while(0) + +#define NV_RM_RPC_SIM_UPDATE_DISP_CHANNEL_INFO(pGpu, hclient, pcontextdma, channelnum) \ + do \ + { \ + NV_STATUS status; \ + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) \ + status = RmRpcSimUpdateDispChannelInfo(pGpu, hclient, pcontextdma, channelnum); \ + NV_ASSERT(status == NV_OK); \ + SLI_LOOP_END \ + } \ + while(0) + +/* + * free RPC infrastructure for simulation (not VGPU object) + */ +#define NV_RM_RPC_SIM_FREE_INFRA(pGpu, status) \ + do \ + { \ + NV_ASSERT(status == NV_OK); \ + status = RmRpcSimFreeInfra(pGpu); \ + } \ + while (0) + +#define NV_RM_RPC_SET_GUEST_SYSTEM_INFO(pGpu, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcSetGuestSystemInfo(pGpu, pRpc); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_PERF_GET_VIRTUAL_PSTATE_INFO(pGpu, hClient, hObject, pParams, \ + pClkInfos, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = RmRpcPerfGetVirtualPstateInfo(pGpu, pRpc, hClient, hObject,\ + pParams, pClkInfos); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while(0) + +#define NV_RM_RPC_UNLOADING_GUEST_DRIVER(pGpu, status, bSuspend, bGc6Entering, newPMLevel) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcUnloadingGuestDriver_HAL(pGpu, pRpc, bSuspend, bGc6Entering, newPMLevel); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_GPU_EXEC_REG_OPS(pGpu, hClient, hObject, pParams, pRegOps, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcGpuExecRegOps_HAL(pGpu, pRpc, hClient, hObject, pParams, pRegOps); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } \ + while (0) + +#define NV_RM_RPC_GET_STATIC_INFO(pGpu, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcGetStaticInfo_HAL(pGpu, pRpc); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_REGISTER_VIRTUAL_EVENT_BUFFER(pGpu, hClient, hSubdevice, hEventBuffer, hBufferHeader, hRecordBuffer, recordSize, recordCount, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcRegisterVirtualEventBuffer_HAL(pGpu, pRpc, hClient, hSubdevice, hEventBuffer, hBufferHeader, hRecordBuffer, recordSize, recordCount); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_UPDATE_BAR_PDE(pGpu, barType, entryValue, entryLevelShift, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcUpdateBarPde_HAL(pGpu, pRpc, barType, entryValue, entryLevelShift); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_SET_PAGE_DIRECTORY(pGpu, hClient, hDevice, pParams, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcSetPageDirectory_HAL(pGpu, pRpc, hClient, hDevice, pParams); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_UNSET_PAGE_DIRECTORY(pGpu, hClient, hDevice, pParams, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcUnsetPageDirectory_HAL(pGpu, pRpc, hClient, hDevice, pParams); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION(pGpu, operation, status) \ + do \ + { \ + /* Call into RPC layer */ \ + OBJRPC *pRpc; \ + pRpc = GPU_GET_RPC(pGpu); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + { \ + status = rpcPmaScrubberSharedBufferGuestPagesOperation_HAL(pGpu, pRpc, operation); \ + } \ + } \ + while (0) + +// +// DCE_CLIENT_RM specific RPCs +// + +#define NV_RM_RPC_DCE_RM_INIT(pGpu, bInit, status) do {} while (0) + +// +// GSP_CLIENT_RM specific RPCs +// + +#define NV_RM_RPC_GET_GSP_STATIC_INFO(pGpu, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcGetGspStaticInfo_HAL(pGpu, pRpc); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_GSP_SET_SYSTEM_INFO(pGpu, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcGspSetSystemInfo_HAL(pGpu, pRpc); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_SET_REGISTRY(pGpu, status) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcSetRegistry_HAL(pGpu, pRpc); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_DUMP_PROTOBUF_COMPONENT(pGpu, status, pPrbEnc, pNvDumpState, \ + component) \ + do \ + { \ + OBJRPC *pRpc = GPU_GET_RPC(pGpu); \ + NV_ASSERT(pRpc != NULL); \ + if ((status == NV_OK) && (pRpc != NULL)) \ + status = rpcDumpProtobufComponent_HAL(pGpu, pRpc, pPrbEnc, \ + pNvDumpState, component); \ + else if (pRpc == NULL) \ + status = NV_ERR_INSUFFICIENT_RESOURCES; \ + } while (0) + +#define NV_RM_RPC_RMFS_INIT(pGpu, statusQueueMemDesc, status) do {} while(0) + +#define NV_RM_RPC_RMFS_CLOSE_QUEUE(pGpu, status) do {} while(0) + +#define NV_RM_RPC_RMFS_CLEANUP(pGpu, status) do {} while(0) + +#define NV_RM_RPC_RMFS_TEST(pGpu, numReps, testData1, testData2, \ + testData3, status) do {} while(0) + +static inline NV_STATUS RmRpcSimFreeInfra(OBJGPU *pGpu, ...) { return NV_OK; } +static inline NV_STATUS RmRpcSimAddDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimUpdateDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimDeleteDisplayContextDma(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimUpdateDispChannelInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcHwResourceAlloc(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcHwResourceFree(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcHwResourceBind(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcPerfGetPstateInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcPerfGetCurrentPstate(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcPerfGetVirtualPstateInfo(OBJGPU *pGpu, ...) { return NV_ERR_NOT_SUPPORTED; } + +static inline NV_STATUS RmRpcSimEscapeRead(OBJGPU *pGpu, const char *path, NvU32 index, + NvU32 count, NvU32 *data) { return NV_ERR_NOT_SUPPORTED; } +static inline NV_STATUS RmRpcSimEscapeWrite(OBJGPU *pGpu, const char *path, NvU32 index, + NvU32 count, NvU32 data) { return NV_ERR_NOT_SUPPORTED; } + +NV_STATUS RmRpcSetGuestSystemInfo(OBJGPU *pGpu, OBJRPC *pRpc); + +/*! + * Defines the size of the GSP sim access buffer. + */ +#define GSP_SIM_ACCESS_BUFFER_SIZE 0x4000 + +/*! + * Defines the structure used to pass SimRead data from Kernel to Physical RM. + */ +typedef struct SimAccessBuffer +{ + volatile NvU32 data[GSP_SIM_ACCESS_BUFFER_SIZE]; + volatile NvU32 seq; +} SimAccessBuffer; + +#endif // NV_RM_STUB_RPC + +#endif // __vgpu_dev_nv_rpc_h__ diff --git a/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h b/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h new file mode 100644 index 000000000..52bf33c44 --- /dev/null +++ b/src/nvidia/kernel/inc/vgpu/rpc_global_enums.h @@ -0,0 +1,236 @@ +#ifndef _RPC_GLOBAL_ENUMS_H_ +#define _RPC_GLOBAL_ENUMS_H_ + +#ifndef X +# define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC, +# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + X(RM, NOP) // 0 + X(RM, SET_GUEST_SYSTEM_INFO) // 1 + X(RM, ALLOC_ROOT) // 2 + X(RM, ALLOC_DEVICE) // 3 deprecated + X(RM, ALLOC_MEMORY) // 4 + X(RM, ALLOC_CTX_DMA) // 5 + X(RM, ALLOC_CHANNEL_DMA) // 6 + X(RM, MAP_MEMORY) // 7 + X(RM, BIND_CTX_DMA) // 8 deprecated + X(RM, ALLOC_OBJECT) // 9 + X(RM, FREE) //10 + X(RM, LOG) //11 + X(RM, ALLOC_VIDMEM) //12 + X(RM, UNMAP_MEMORY) //13 + X(RM, MAP_MEMORY_DMA) //14 + X(RM, UNMAP_MEMORY_DMA) //15 + X(RM, GET_EDID) //16 + X(RM, ALLOC_DISP_CHANNEL) //17 + X(RM, ALLOC_DISP_OBJECT) //18 + X(RM, ALLOC_SUBDEVICE) //19 + X(RM, ALLOC_DYNAMIC_MEMORY) //20 + X(RM, DUP_OBJECT) //21 + X(RM, IDLE_CHANNELS) //22 + X(RM, ALLOC_EVENT) //23 + X(RM, SEND_EVENT) //24 + X(RM, REMAPPER_CONTROL) //25 deprecated + X(RM, DMA_CONTROL) //26 + X(RM, DMA_FILL_PTE_MEM) //27 + X(RM, MANAGE_HW_RESOURCE) //28 + X(RM, BIND_ARBITRARY_CTX_DMA) //29 deprecated + X(RM, CREATE_FB_SEGMENT) //30 + X(RM, DESTROY_FB_SEGMENT) //31 + X(RM, ALLOC_SHARE_DEVICE) //32 + X(RM, DEFERRED_API_CONTROL) //33 + X(RM, REMOVE_DEFERRED_API) //34 + X(RM, SIM_ESCAPE_READ) //35 + X(RM, SIM_ESCAPE_WRITE) //36 + X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA) //37 + X(RM, FREE_VIDMEM_VIRT) //38 + X(RM, PERF_GET_PSTATE_INFO) //39 deprecated for vGPU, used by GSP + X(RM, PERF_GET_PERFMON_SAMPLE) //40 + X(RM, PERF_GET_VIRTUAL_PSTATE_INFO) //41 deprecated + X(RM, PERF_GET_LEVEL_INFO) //42 + X(RM, MAP_SEMA_MEMORY) //43 + X(RM, UNMAP_SEMA_MEMORY) //44 + X(RM, SET_SURFACE_PROPERTIES) //45 + X(RM, CLEANUP_SURFACE) //46 + X(RM, UNLOADING_GUEST_DRIVER) //47 + X(RM, TDR_SET_TIMEOUT_STATE) //48 + X(RM, SWITCH_TO_VGA) //49 + X(RM, GPU_EXEC_REG_OPS) //50 + X(RM, GET_STATIC_INFO) //51 + X(RM, ALLOC_VIRTMEM) //52 + X(RM, UPDATE_PDE_2) //53 + X(RM, SET_PAGE_DIRECTORY) //54 + X(RM, GET_STATIC_PSTATE_INFO) //55 + X(RM, TRANSLATE_GUEST_GPU_PTES) //56 + X(RM, RESERVED_57) //57 + X(RM, RESET_CURRENT_GR_CONTEXT) //58 + X(RM, SET_SEMA_MEM_VALIDATION_STATE) //59 + X(RM, GET_ENGINE_UTILIZATION) //60 + X(RM, UPDATE_GPU_PDES) //61 + X(RM, GET_ENCODER_CAPACITY) //62 + X(RM, VGPU_PF_REG_READ32) //63 + X(RM, SET_GUEST_SYSTEM_INFO_EXT) //64 + X(GSP, GET_GSP_STATIC_INFO) //65 + X(RM, RMFS_INIT) //66 + X(RM, RMFS_CLOSE_QUEUE) //67 + X(RM, RMFS_CLEANUP) //68 + X(RM, RMFS_TEST) //69 + X(RM, UPDATE_BAR_PDE) //70 + X(RM, CONTINUATION_RECORD) //71 + X(RM, GSP_SET_SYSTEM_INFO) //72 + X(RM, SET_REGISTRY) //73 + X(GSP, GSP_INIT_POST_OBJGPU) //74 deprecated + X(RM, SUBDEV_EVENT_SET_NOTIFICATION) //75 deprecated + X(GSP, GSP_RM_CONTROL) //76 + X(RM, GET_STATIC_INFO2) //77 + X(RM, DUMP_PROTOBUF_COMPONENT) //78 + X(RM, UNSET_PAGE_DIRECTORY) //79 + X(RM, GET_CONSOLIDATED_STATIC_INFO) //80 + X(RM, GMMU_REGISTER_FAULT_BUFFER) //81 deprecated + X(RM, GMMU_UNREGISTER_FAULT_BUFFER) //82 deprecated + X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER) //83 deprecated + X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated + X(RM, CTRL_SET_VGPU_FB_USAGE) //85 + X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO) //86 + X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO) //87 + X(RM, CTRL_RESET_CHANNEL) //88 + X(RM, CTRL_RESET_ISOLATED_CHANNEL) //89 + X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT) //90 + X(RM, CTRL_CLK_GET_EXTENDED_INFO) //91 + X(RM, CTRL_PERF_BOOST) //92 + X(RM, CTRL_PERF_VPSTATES_GET_CONTROL) //93 + X(RM, CTRL_GET_ZBC_CLEAR_TABLE) //94 + X(RM, CTRL_SET_ZBC_COLOR_CLEAR) //95 + X(RM, CTRL_SET_ZBC_DEPTH_CLEAR) //96 + X(RM, CTRL_GPFIFO_SCHEDULE) //97 + X(RM, CTRL_SET_TIMESLICE) //98 + X(RM, CTRL_PREEMPT) //99 + X(RM, CTRL_FIFO_DISABLE_CHANNELS) //100 + X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL) //101 + X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL) //102 + X(GSP, GSP_RM_ALLOC) //103 + X(RM, CTRL_GET_P2P_CAPS_V2) //104 + X(RM, CTRL_CIPHER_AES_ENCRYPT) //105 + X(RM, CTRL_CIPHER_SESSION_KEY) //106 + X(RM, CTRL_CIPHER_SESSION_KEY_STATUS) //107 + X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES) //108 + X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES) //109 + X(RM, CTRL_DBG_SET_EXCEPTION_MASK) //110 + X(RM, CTRL_GPU_PROMOTE_CTX) //111 + X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND) //112 + X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE) //113 + X(RM, CTRL_GR_CTXSW_ZCULL_BIND) //114 + X(RM, CTRL_GPU_INITIALIZE_CTX) //115 + X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES) //116 + X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT) //117 + X(RM, CTRL_GET_LATEST_ECC_ADDRESSES) //118 + X(RM, CTRL_MC_SERVICE_INTERRUPTS) //119 + X(RM, CTRL_DMA_SET_DEFAULT_VASPACE) //120 + X(RM, CTRL_GET_CE_PCE_MASK) //121 + X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY) //122 + X(RM, CTRL_GET_NVLINK_PEER_ID_MASK) //123 + X(RM, CTRL_GET_NVLINK_STATUS) //124 + X(RM, CTRL_GET_P2P_CAPS) //125 + X(RM, CTRL_GET_P2P_CAPS_MATRIX) //126 + X(RM, RESERVED_0) //127 + X(RM, CTRL_RESERVE_PM_AREA_SMPC) //128 + X(RM, CTRL_RESERVE_HWPM_LEGACY) //129 + X(RM, CTRL_B0CC_EXEC_REG_OPS) //130 + X(RM, CTRL_BIND_PM_RESOURCES) //131 + X(RM, CTRL_DBG_SUSPEND_CONTEXT) //132 + X(RM, CTRL_DBG_RESUME_CONTEXT) //133 + X(RM, CTRL_DBG_EXEC_REG_OPS) //134 + X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG) //135 + X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE) //136 + X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137 + X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG) //138 + X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE) //139 + X(RM, CTRL_ALLOC_PMA_STREAM) //140 + X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT) //141 + X(RM, CTRL_FB_GET_INFO_V2) //142 + X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES) //143 + X(RM, CTRL_GR_GET_CTX_BUFFER_INFO) //144 + X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES) //145 + X(RM, CTRL_GPU_EVICT_CTX) //146 + X(RM, CTRL_FB_GET_FS_INFO) //147 + X(RM, CTRL_GRMGR_GET_GR_FS_INFO) //148 + X(RM, CTRL_STOP_CHANNEL) //149 + X(RM, CTRL_GR_PC_SAMPLING_MODE) //150 + X(RM, CTRL_PERF_RATED_TDP_GET_STATUS) //151 + X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL) //152 + X(RM, CTRL_FREE_PMA_STREAM) //153 + X(RM, CTRL_TIMER_SET_GR_TICK_FREQ) //154 + X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155 + X(RM, GET_CONSOLIDATED_GR_STATIC_INFO) //156 + X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP) //157 + X(RM, CTRL_GR_GET_TPC_PARTITION_MODE) //158 + X(RM, CTRL_GR_SET_TPC_PARTITION_MODE) //159 + X(UVM, UVM_PAGING_CHANNEL_ALLOCATE) //160 + X(UVM, UVM_PAGING_CHANNEL_DESTROY) //161 + X(UVM, UVM_PAGING_CHANNEL_MAP) //162 + X(UVM, UVM_PAGING_CHANNEL_UNMAP) //163 + X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM) //164 + X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES) //165 + X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION) //166 + X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL) //167 + X(RM, DCE_RM_INIT) //168 + X(RM, REGISTER_VIRTUAL_EVENT_BUFFER) //169 + X(RM, CTRL_EVENT_BUFFER_UPDATE_GET) //170 + X(RM, GET_PLCABLE_ADDRESS_KIND) //171 + X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2) //172 + X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM) //173 + X(RM, CTRL_GET_MMU_DEBUG_MODE) //174 + X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175 + X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE) //176 + X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO) //177 + X(RM, DISABLE_CHANNELS) //178 + X(RM, CTRL_FABRIC_MEMORY_DESCRIBE) //179 + X(RM, CTRL_FABRIC_MEM_STATS) //180 + X(RM, SAVE_HIBERNATION_DATA) //181 + X(RM, RESTORE_HIBERNATION_DATA) //182 + X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183 + X(RM, CTRL_EXEC_PARTITIONS_CREATE) //184 + X(RM, CTRL_EXEC_PARTITIONS_DELETE) //185 + X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN) //186 + X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187 + X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION) //188 + X(RM, NUM_FUNCTIONS) //END +#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +}; +# undef X +# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H +#endif + +// RPC Events. Used by GSP-RM. +#ifndef E +# define E(RPC) NV_VGPU_MSG_EVENT_##RPC, +# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +enum { +#endif + E(FIRST_EVENT = 0x1000) // 0x1000 + E(GSP_INIT_DONE) // 0x1001 + E(GSP_RUN_CPU_SEQUENCER) // 0x1002 + E(POST_EVENT) // 0x1003 + E(RC_TRIGGERED) // 0x1004 + E(MMU_FAULT_QUEUED) // 0x1005 + E(OS_ERROR_LOG) // 0x1006 + E(RG_LINE_INTR) // 0x1007 + E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008 + E(SIM_READ) // 0x1009 + E(SIM_WRITE) // 0x100a + E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b + E(UCODE_LIBOS_PRINT) // 0x100c + E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d + E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e + E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f + E(VGPU_CONFIG) // 0x1010 + E(DISPLAY_MODESET) // 0x1011 + E(NUM_EVENTS) // END +#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +}; +# undef E +# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H +#endif + +#endif /*_RPC_GLOBAL_ENUMS_H_*/ diff --git a/src/nvidia/kernel/inc/vgpu/rpc_headers.h b/src/nvidia/kernel/inc/vgpu/rpc_headers.h new file mode 100644 index 000000000..edc37a744 --- /dev/null +++ b/src/nvidia/kernel/inc/vgpu/rpc_headers.h @@ -0,0 +1,230 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __vgpu_rpc_nv_headers_h__ +#define __vgpu_rpc_nv_headers_h__ + +#include "ctrl/ctrl0080/ctrl0080perf.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" +#include "nvstatus.h" + +#define MAX_GPC_COUNT 32 + +/* + * Maximum number of RegOps that can be accommodated within one RPC call + * due to RPC message buffer size being limited to 4k + */ +#define VGPU_MAX_REGOPS_PER_RPC 100 + +#define VGPU_RESERVED_HANDLE_BASE 0xCAF3F000 +#define VGPU_RESERVED_HANDLE_RANGE 0x1000 + +#define VGPU_CALC_PARAM_OFFSET(prev_offset, prev_params) (prev_offset + NV_ALIGN_UP(sizeof(prev_params), sizeof(NvU32))) + +/* + * Message header (in buffer addressed by ring entry) + * + * If message is invalid (bad length or signature), signature and length + * are forced to be valid (if in range of descriptor) and result is set to + * NV_VGPU_RESULT_INVALID_MESSAGE_FORMAT. Otherwise, signature, length, and + * function are always unchanged and result is always set. + * + * The function message header, if defined, immediately follows the main message + * header. + */ +#define NV_VGPU_MSG_HEADER_VERSION_MAJOR 31:24 /* R---D */ +#define NV_VGPU_MSG_HEADER_VERSION_MINOR 23:16 /* R---D */ +#define NV_VGPU_MSG_HEADER_VERSION_MAJOR_TOT 0x00000003 /* R---D */ +#define NV_VGPU_MSG_HEADER_VERSION_MINOR_TOT 0x00000000 /* R---D */ +/* signature must equal valid value */ +#define NV_VGPU_MSG_SIGNATURE_VALID 0x43505256 /* RW--V */ + +#include "rpc_global_enums.h" + +/* result code */ +/* codes below 0xFF000000 must match exactly the NV_STATUS codes in nvos.h */ +#define NV_VGPU_MSG_RESULT__RM NV_ERR_GENERIC:0x00000000 /* RW--D */ +#define NV_VGPU_MSG_RESULT_SUCCESS NV_OK +#define NV_VGPU_MSG_RESULT_CARD_NOT_PRESENT NV_ERR_CARD_NOT_PRESENT +#define NV_VGPU_MSG_RESULT_DUAL_LINK_INUSE NV_ERR_DUAL_LINK_INUSE +#define NV_VGPU_MSG_RESULT_GENERIC NV_ERR_GENERIC +#define NV_VGPU_MSG_RESULT_GPU_NOT_FULL_POWER NV_ERR_GPU_NOT_FULL_POWER +#define NV_VGPU_MSG_RESULT_IN_USE NV_ERR_IN_USE +#define NV_VGPU_MSG_RESULT_INSUFFICIENT_RESOURCES NV_ERR_INSUFFICIENT_RESOURCES +#define NV_VGPU_MSG_RESULT_INVALID_ACCESS_TYPE NV_ERR_INVALID_ACCESS_TYPE +#define NV_VGPU_MSG_RESULT_INVALID_ARGUMENT NV_ERR_INVALID_ARGUMENT +#define NV_VGPU_MSG_RESULT_INVALID_BASE NV_ERR_INVALID_BASE +#define NV_VGPU_MSG_RESULT_INVALID_CHANNEL NV_ERR_INVALID_CHANNEL +#define NV_VGPU_MSG_RESULT_INVALID_CLASS NV_ERR_INVALID_CLASS +#define NV_VGPU_MSG_RESULT_INVALID_CLIENT NV_ERR_INVALID_CLIENT +#define NV_VGPU_MSG_RESULT_INVALID_COMMAND NV_ERR_INVALID_COMMAND +#define NV_VGPU_MSG_RESULT_INVALID_DATA NV_ERR_INVALID_DATA +#define NV_VGPU_MSG_RESULT_INVALID_DEVICE NV_ERR_INVALID_DEVICE +#define NV_VGPU_MSG_RESULT_INVALID_DMA_SPECIFIER NV_ERR_INVALID_DMA_SPECIFIER +#define NV_VGPU_MSG_RESULT_INVALID_EVENT NV_ERR_INVALID_EVENT +#define NV_VGPU_MSG_RESULT_INVALID_FLAGS NV_ERR_INVALID_FLAGS +#define NV_VGPU_MSG_RESULT_INVALID_FUNCTION NV_ERR_INVALID_FUNCTION +#define NV_VGPU_MSG_RESULT_INVALID_HEAP NV_ERR_INVALID_HEAP +#define NV_VGPU_MSG_RESULT_INVALID_INDEX NV_ERR_INVALID_INDEX +#define NV_VGPU_MSG_RESULT_INVALID_LIMIT NV_ERR_INVALID_LIMIT +#define NV_VGPU_MSG_RESULT_INVALID_METHOD NV_ERR_INVALID_METHOD +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_BUFFER NV_ERR_INVALID_OBJECT_BUFFER +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_ERROR NV_ERR_INVALID_OBJECT +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_HANDLE NV_ERR_INVALID_OBJECT_HANDLE +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_NEW NV_ERR_INVALID_OBJECT_NEW +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_OLD NV_ERR_INVALID_OBJECT_OLD +#define NV_VGPU_MSG_RESULT_INVALID_OBJECT_PARENT NV_ERR_INVALID_OBJECT_PARENT +#define NV_VGPU_MSG_RESULT_INVALID_OFFSET NV_ERR_INVALID_OFFSET +#define NV_VGPU_MSG_RESULT_INVALID_OWNER NV_ERR_INVALID_OWNER +#define NV_VGPU_MSG_RESULT_INVALID_PARAM_STRUCT NV_ERR_INVALID_PARAM_STRUCT +#define NV_VGPU_MSG_RESULT_INVALID_PARAMETER NV_ERR_INVALID_PARAMETER +#define NV_VGPU_MSG_RESULT_INVALID_POINTER NV_ERR_INVALID_POINTER +#define NV_VGPU_MSG_RESULT_INVALID_REGISTRY_KEY NV_ERR_INVALID_REGISTRY_KEY +#define NV_VGPU_MSG_RESULT_INVALID_STATE NV_ERR_INVALID_STATE +#define NV_VGPU_MSG_RESULT_INVALID_STRING_LENGTH NV_ERR_INVALID_STRING_LENGTH +#define NV_VGPU_MSG_RESULT_INVALID_XLATE NV_ERR_INVALID_XLATE +#define NV_VGPU_MSG_RESULT_IRQ_NOT_FIRING NV_ERR_IRQ_NOT_FIRING +#define NV_VGPU_MSG_RESULT_MULTIPLE_MEMORY_TYPES NV_ERR_MULTIPLE_MEMORY_TYPES +#define NV_VGPU_MSG_RESULT_NOT_SUPPORTED NV_ERR_NOT_SUPPORTED +#define NV_VGPU_MSG_RESULT_OPERATING_SYSTEM NV_ERR_OPERATING_SYSTEM +#define NV_VGPU_MSG_RESULT_PROTECTION_FAULT NV_ERR_PROTECTION_FAULT +#define NV_VGPU_MSG_RESULT_TIMEOUT NV_ERR_TIMEOUT +#define NV_VGPU_MSG_RESULT_TOO_MANY_PRIMARIES NV_ERR_TOO_MANY_PRIMARIES +#define NV_VGPU_MSG_RESULT_IRQ_EDGE_TRIGGERED NV_ERR_IRQ_EDGE_TRIGGERED +#define NV_VGPU_MSG_RESULT_GUEST_HOST_DRIVER_MISMATCH NV_ERR_LIB_RM_VERSION_MISMATCH + +/* + * codes above 0xFF000000 and below 0xFF100000 must match one-for-one + * the vmiop_error_t codes in vmioplugin.h, with 0xFF000000 added. + */ +#define NV_VGPU_MSG_RESULT__VMIOP 0xFF000007:0xFF000000 /* RW--D */ +#define NV_VGPU_MSG_RESULT_VMIOP_INVAL 0xFF000001 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_RESOURCE 0xFF000002 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_RANGE 0xFF000003 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_READ_ONLY 0xFF000004 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_NOT_FOUND 0xFF000005 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_NO_ADDRESS_SPACE 0xFF000006 /* RW--V */ +#define NV_VGPU_MSG_RESULT_VMIOP_TIMEOUT 0xFF000007 /* RW--V */ +/* RPC-specific error codes */ +#define NV_VGPU_MSG_RESULT__RPC 0xFF100007:0xFF100000 /* RW--D */ +#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_FUNCTION 0xFF100001 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_INVALID_MESSAGE_FORMAT 0xFF100002 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_HANDLE_NOT_FOUND 0xFF100003 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_HANDLE_EXISTS 0xFF100004 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_RM_ERROR 0xFF100005 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_UNKNOWN_VMIOP_ERROR 0xFF100006 /* RW--V */ +#define NV_VGPU_MSG_RESULT_RPC_RESERVED_HANDLE 0xFF100007 /* RW--V */ +/* RPC-specific code in result for incomplete request */ +#define NV_VGPU_MSG_RESULT_RPC_PENDING 0xFFFFFFFF /* RW--V */ +/* shared union field */ +#define NV_VGPU_MSG_UNION_INIT 0x00000000 /* RW--V */ + +/* + * common PTEDESC message defines (used w/ ALLOC_MEMORY, ALLOC_VIDMEM, FILL_PTE_MEM) + */ +#define NV_VGPU_PTEDESC_INIT 0x00000000 /* RWI-V */ +#define NV_VGPU_PTEDESC__PROD 0x00000000 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_NONE 0x00000000 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_SINGLE 0x00000001 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_DOUBLE 0x00000002 /* RW--V */ +#define NV_VGPU_PTEDESC_IDR_TRIPLE 0x00000003 /* RW--V */ + +#define NV_VGPU_PTE_PAGE_SIZE 0x1000 /* R---V */ +#define NV_VGPU_PTE_SIZE 4 /* R---V */ +#define NV_VGPU_PTE_INDEX_SHIFT 10 /* R---V */ +#define NV_VGPU_PTE_INDEX_MASK 0x3FF /* R---V */ + +#define NV_VGPU_PTE_64_PAGE_SIZE 0x1000 /* R---V */ +#define NV_VGPU_PTE_64_SIZE 8 /* R---V */ +#define NV_VGPU_PTE_64_INDEX_SHIFT 9 /* R---V */ +#define NV_VGPU_PTE_64_INDEX_MASK 0x1FF /* R---V */ + +/* + * LOG message + */ +#define NV_VGPU_LOG_LEVEL_FATAL 0x00000000 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_ERROR 0x00000001 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_NOTICE 0x00000002 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_STATUS 0x00000003 /* RW--V */ +#define NV_VGPU_LOG_LEVEL_DEBUG 0x00000004 /* RW--V */ + +/* + * Enums specifying the BAR number that we are going to update its PDE + */ +typedef enum +{ + NV_RPC_UPDATE_PDE_BAR_1, + NV_RPC_UPDATE_PDE_BAR_2, + NV_RPC_UPDATE_PDE_BAR_INVALID, +} NV_RPC_UPDATE_PDE_BAR_TYPE; + +/* + * UVM method stream guest pages operation + */ +typedef enum +{ + NV_RPC_GUEST_PAGE_MAP, + NV_RPC_GUEST_PAGE_UNMAP, +} NV_RPC_GUEST_PAGE_OPERATION; + +/* + * UVM method stream guest page size + */ +typedef enum +{ + NV_RPC_GUEST_PAGE_SIZE_4K, + NV_RPC_GUEST_PAGE_SIZE_UNSUPPORTED, +} NV_RPC_GUEST_PAGE_SIZE; + +/* + * UVM paging channel VASPACE operation + */ +typedef enum +{ + UVM_PAGING_CHANNEL_VASPACE_ALLOC, + UVM_PAGING_CHANNEL_VASPACE_FREE, +} UVM_PAGING_CHANNEL_VASPACE_OPERATION; + +typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS +{ + NvU32 headIndex; + NvU32 maxHResolution; + NvU32 maxVResolution; +} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS; + +typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS +{ + NvU32 numHeads; + NvU32 maxNumHeads; +} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS; + + +/* + * Maximum guest pages that can be mapped for UVM method stream + */ +#define UVM_METHOD_STREAM_MAX_GUEST_PAGES_v1C_05 500 + +#define PMA_SCRUBBER_SHARED_BUFFER_MAX_GUEST_PAGES_v1F_0C 500 + +#endif // __vgpu_rpc_nv_headers_h__ diff --git a/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h b/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h new file mode 100644 index 000000000..f9789daf2 --- /dev/null +++ b/src/nvidia/kernel/inc/vgpu/rpc_vgpu.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __vgpu_dev_nv_rpc_vgpu_h__ +#define __vgpu_dev_nv_rpc_vgpu_h__ + +static NV_INLINE void NV_RM_RPC_ALLOC_LOCAL_USER(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_ALLOC_VIDMEM(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_ALLOC_VIRTMEM(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_MAP_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UNMAP_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_READ_EDID(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DMA_FILL_PTE_MEM(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_CREATE_FB_SEGMENT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DESTROY_FB_SEGMENT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DEFERRED_API_CONTROL(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_REMOVE_DEFERRED_API(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_FREE_VIDMEM_VIRT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_LOG(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SET_GUEST_SYSTEM_INFO_EXT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_ENGINE_UTILIZATION(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_MAP_SEMA_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UNMAP_SEMA_MEMORY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SET_SURFACE_PROPERTIES(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_CLEANUP_SURFACE(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SWITCH_TO_VGA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_TDR_SET_TIMEOUT_STATE(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_CONSOLIDATED_STATIC_INFO(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_CONSOLIDATED_GR_STATIC_INFO(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_STATIC_PSTATE_INFO(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UPDATE_PDE_2(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_TRANSLATE_GUEST_GPU_PTES(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SET_SEMA_MEM_VALIDATION_STATE(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_RESET_CURRENT_GR_CONTEXT(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_ENCODER_CAPACITY(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_STATIC_INFO2(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_ALLOC_CONTEXT_DMA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_GET_PLCABLE_ADDRESS_KIND(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_UPDATE_GPU_PDES(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_DISABLE_CHANNELS(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_SAVE_HIBERNATION_DATA(OBJGPU *pGpu, ...) { } +static NV_INLINE void NV_RM_RPC_RESTORE_HIBERNATION_DATA(OBJGPU *pGpu, ...) { } + +#endif // __vgpu_dev_nv_rpc_vgpu_h__ diff --git a/src/nvidia/kernel/inc/vgpu/sdk-structures.h b/src/nvidia/kernel/inc/vgpu/sdk-structures.h new file mode 100644 index 000000000..d69982b8f --- /dev/null +++ b/src/nvidia/kernel/inc/vgpu/sdk-structures.h @@ -0,0 +1,272 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _RPC_SDK_STRUCTURES_H_ +#define _RPC_SDK_STRUCTURES_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include "rpc_headers.h" +#include "nvctassert.h" + +typedef struct vmiopd_SM_info { + NvU32 version; + NvU32 regBankCount; + NvU32 regBankRegCount; + NvU32 maxWarpsPerSM; + NvU32 maxThreadsPerWarp; + NvU32 geomGsObufEntries; + NvU32 geomXbufEntries; + NvU32 maxSPPerSM; + NvU32 rtCoreCount; +} VMIOPD_GRSMINFO; + +// NV_SCAL_FAMILY_MAX_FBPS 16 +#define MAX_FBPS 16 //Maximum number of FBPs + +#define OBJ_MAX_HEADS 4 + +#define MAX_NVDEC_ENGINES 5 // Maximum number of NVDEC engines + +// NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_DEVICES(256) / NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES(32) +#define MAX_ITERATIONS_DEVICE_INFO_TABLE 8 + +// NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_PAGES(512) / NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES(64) +#define MAX_ITERATIONS_DYNAMIC_BLACKLIST 8 + +#define NV0000_GPUACCT_RPC_PID_MAX_QUERY_COUNT 1000 + +#define NV2080_CTRL_CLK_ARCH_MAX_DOMAINS_v1E_0D 32 + +#define NV_RM_RPC_NO_MORE_DATA_TO_READ 0 +#define NV_RM_RPC_MORE_RPC_DATA_TO_READ 1 + +//Maximum EXEC_PARTITIONS +#define NVC637_CTRL_MAX_EXEC_PARTITIONS_v18_05 8 + +//Maximum ECC Addresses +#define NV2080_CTRL_ECC_GET_LATEST_ECC_ADDRESSES_MAX_COUNT_v18_04 32 + +#define NV2080_CTRL_NVLINK_MAX_LINKS_v15_02 6 +#define NV2080_CTRL_NVLINK_MAX_LINKS_v1A_18 12 + +#define NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE_v15_02 8 +#define NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE_v1F_0D 9 + +#define NV2080_CTRL_FB_FS_INFO_MAX_QUERIES_v1A_1D 96 +#define NV2080_CTRL_FB_FS_INFO_MAX_QUERY_SIZE_v1A_1D 24 +#define NV2080_CTRL_GRMGR_GR_FS_INFO_MAX_QUERIES_v1A_1D 96 +#define NV2080_CTRL_GRMGR_MAX_SMC_IDS_v1A_1D 8 + +#define NV0080_CTRL_GR_INFO_MAX_SIZE_1B_04 (0x0000002C) +#define NV0080_CTRL_GR_INFO_MAX_SIZE_1C_01 (0x00000030) +#define NV0080_CTRL_GR_INFO_MAX_SIZE_1E_02 (0x00000032) +#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES_1B_04 8 +#define NV2080_CTRL_INTERNAL_GR_MAX_SM_v1B_05 256 +#define NV2080_CTRL_INTERNAL_GR_MAX_SM_v1E_03 240 +#define NV2080_CTRL_INTERNAL_GR_MAX_GPC_v1B_05 8 +#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT_v1B_05 0x19 +#define NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT_v1C_03 10 +#define NV2080_CTRL_INTERNAL_GR_MAX_GPC_v1C_03 12 +#define NV2080_CTRL_MC_GET_STATIC_INTR_TABLE_MAX_v1E_09 32 + +// Defined this intermediate RM-RPC structure for making RPC call from Guest as +// we have the restriction of passing max 4kb of data to plugin and the +// NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS is way more than that. +// This structure is similar to NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS +// RM control structure. +// Added passIndex member to identify from which index (in the full RM pid list +// on host)onwards the data needs to be read. Caller should initialize passIndex +// to NV_RM_RPC_MORE_RPC_DATA_TO_READ, and keep making RPC calls until the +// passIndex value is returned as NV_RM_RPC_NO_MORE_DATA_TO_READ by the RPC. +typedef struct +{ + NvU32 gpuId; + NvU32 passIndex; + NvU32 pidTbl[NV0000_GPUACCT_RPC_PID_MAX_QUERY_COUNT]; + NvU32 pidCount; +} NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS_RPC_EX; + +typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG_v03_00[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES]; + +typedef NvV32 NvRmctrlCmd; + +struct pte_desc +{ + NvU32 idr:2; + NvU32 reserved1:14; + NvU32 length:16; + union { + NvU64 pte; // PTE when IDR==0; PDE when IDR > 0 + NvU64 pde; // PTE when IDR==0; PDE when IDR > 0 + } pte_pde[] NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0 +}; + +/* + * VGPU_CACHED_RMCTRL_LIST + * + * This macro contains the list of RmCtrls which return static values and can be cached in + * guest RM. + * + * To cache a RmCtrl, add it to VGPU_CACHED_RMCTRL_LIST in the format: + * VGPU_CACHED_RMCTRL_ENTRY(, ) + */ + +#define VGPU_CACHED_RMCTRL_LIST \ + VGPU_CACHED_RMCTRL_ENTRY(NV2080_CTRL_CMD_PERF_VPSTATES_GET_INFO, NV2080_CTRL_PERF_VPSTATES_INFO) \ + VGPU_CACHED_RMCTRL_ENTRY(NV2080_CTRL_CMD_GPU_GET_MAX_SUPPORTED_PAGE_SIZE, NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS) + +enum VGPU_CACHED_RMCTRL_INDICES +{ + #define VGPU_CACHED_RMCTRL_ENTRY(ctrlCmd,type) \ + VGPU_CACHED_RMCTRL_IDX_##ctrlCmd, + + VGPU_CACHED_RMCTRL_LIST + + #undef VGPU_CACHED_RMCTRL_ENTRY + + VGPU_CACHED_RMCTRL_IDX_COUNT, +}; + +typedef struct vgpu_cached_rmctrl +{ + void *ptr; + NvBool bCached; + NV_STATUS status; +}vgpu_cached_rmctrl; + +typedef struct vgpu_cached_rmctrl_list +{ + vgpu_cached_rmctrl vgpu_cached_rmctrls[VGPU_CACHED_RMCTRL_IDX_COUNT]; +} vgpu_cached_rmctrl_list; + +typedef struct VGPU_BSP_CAPS +{ + NvU8 capsTbl[NV0080_CTRL_BSP_CAPS_TBL_SIZE]; +} VGPU_BSP_CAPS; + +#define VGPU_PAGE_SIZE 4096 +#define NUM_MFN_PAGES 16 + +typedef struct HYPERV_SHARED_MEMORY_DESCRIPTOR +{ + union + { + struct + { + NvU32 shm_lock; + NvU64 vmbus_packet_id NV_ALIGN_BYTES(8); + }; + + char control_page[VGPU_PAGE_SIZE]; + }; + + NvU32 mfn_data[NUM_MFN_PAGES * VGPU_PAGE_SIZE / sizeof(NvU32)]; + +} HYPERV_SHARED_MEMORY_DESCRIPTOR; + +#define HYPERV_SHM_MFN_WRITE_WAIT 0 +#define HYPERV_SHM_MFN_WRITE_COMPLETE 1 + +#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v15_01 (0x00000014) +#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v1A_04 (0x00000014) +#define NV2080_CTRL_GPU_ECC_UNIT_COUNT_v1C_09 (0x00000016) + +#define NV2080_ENGINE_TYPE_LAST_v18_01 (0x0000002a) +#define NV2080_ENGINE_TYPE_LAST_v1C_09 (0x00000034) + +#define NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE_v1A_0F (0x00000033) +#define NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE_v1C_09 (0x00000034) + +//Maximum GMMU_FMT_LEVELS +#define GMMU_FMT_MAX_LEVELS_v05_00 5 +#define GMMU_FMT_MAX_LEVELS_v1A_12 6 + +//Maximum MMU FMT sub levels +#define MMU_FMT_MAX_SUB_LEVELS_v09_02 2 + +//Maximum number of supported TDP clients +#define NV2080_CTRL_PERF_RATED_TDP_CLIENT_NUM_CLIENTS_v1A_1F 5 + +//Maximum number of SMs whose error state can be read in single call +#define NV83DE_CTRL_DEBUG_MAX_SMS_PER_CALL_v16_03 100 + +// Workaround for bug 200702083 (#15) +#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1A_15 0x2F +#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1A_24 0x33 +#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1E_01 0x35 +#define NV2080_CTRL_FB_INFO_MAX_LIST_SIZE_1F_0F 0x36 + +#define NV2080_CTRL_PERF_MAX_LIMITS_v1C_0B 0x100 + +// Maximum guest address that can we queried in one RPC. +// Below number is calculated as per Max. Guest Adrresses and their +// state can be returned in a single 4K (RPC Page size) iteration +#define GET_PLCABLE_MAX_GUEST_ADDRESS_v1D_05 60 + +// +// Versioned define for +// NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES +// +#define NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_MAX_RUNQUEUES_v1E_07 2 + +// Versioned define for +// NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_COUNT +#define NV2080_CTRL_PCIE_SUPPORTED_GPU_ATOMICS_OP_TYPE_COUNT_v1F_08 13 + +#endif /*_RPC_SDK_STRUCTURES_H_*/ + diff --git a/src/nvidia/kernel/inc/vgpu/vgpu_events.h b/src/nvidia/kernel/inc/vgpu/vgpu_events.h new file mode 100644 index 000000000..2205e1651 --- /dev/null +++ b/src/nvidia/kernel/inc/vgpu/vgpu_events.h @@ -0,0 +1,124 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//****************************************************************************** +// +// Declarations for the VGPU event module. +// +// Description: +// This module declares the VGPU event interface functions/macros. +// +//****************************************************************************** + +#ifndef VGPU_EVENTS_H +#define VGPU_EVENTS_H + +#include "rmconfig.h" + +#include "ctrl/ctrlc637.h" +#include "ctrl/ctrl2080/ctrl2080bios.h" +#include "ctrl/ctrl2080/ctrl2080fb.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl2080/ctrl2080gr.h" +#include "ctrl/ctrl0080/ctrl0080nvjpg.h" + +#include "vgpu/rpc_headers.h" + +#include "gpu/device/device.h" + +#include "vgpu/sdk-structures.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +typedef MC_ENGINE_BITVECTOR *PMC_ENGINE_BITVECTOR; +typedef struct HOST_VGPU_DEVICE HOST_VGPU_DEVICE, KERNEL_HOST_VGPU_DEVICE; +typedef struct _object_vgpu OBJVGPU, *POBJVGPU; + +// Create and destroy OBJVGPU *object +NV_STATUS vgpuCreateObject(OBJGPU *pGpu); +void vgpuDestructObject(OBJGPU *pGpu); + +// Check if a VGPU event is pending +NvBool vgpuGetPendingEvent(OBJGPU *pGpu, THREAD_STATE_NODE *pThreadState); + +// Service VGPU events +void vgpuService(OBJGPU *pGpu); + +// Overwrite registry keys +void vgpuInitRegistryOverWrite(OBJGPU *pGpu); + +// Get the device pointer from the calling context +Device *vgpuGetCallingContextDevice(OBJGPU *pGpu); + +// Stubs for virtualization-disabled builds +static inline NV_STATUS vgpuGetCallingContextHostVgpuDevice(OBJGPU *pGpu, HOST_VGPU_DEVICE **ppHostVgpuDevice) +{ + *ppHostVgpuDevice = NULL; + return NV_OK; +} + +static inline NV_STATUS vgpuGetCallingContextKernelHostVgpuDevice(OBJGPU *pGpu, KERNEL_HOST_VGPU_DEVICE **ppKernelHostVgpuDevice) +{ + *ppKernelHostVgpuDevice = NULL; + return NV_OK; +} + +static inline NV_STATUS vgpuGetCallingContextGfid(OBJGPU *pGpu, NvU32 *pGfid) +{ + *pGfid = GPU_GFID_PF; + return NV_OK; +} + +static inline NV_STATUS vgpuIsCallingContextPlugin(OBJGPU *pGpu, NvBool *pIsCallingContextPlugin) +{ + *pIsCallingContextPlugin = NV_FALSE; + return NV_OK; +} + +static inline NV_STATUS vgpuGetGfidFromDeviceInfo(OBJGPU *pGpu, Device *pDevice, NvU32 *pGfid) +{ + *pGfid = GPU_GFID_PF; + return NV_OK; +} + +// Update Interrupt using shared memory through vGPU +void vgpuUpdateShmIntr(OBJGPU *pGpu, NvU32 offset, NvU32 value, THREAD_STATE_NODE *pThreadState); + +// Check if SW stalling interrupt is pending, using shared memory +NV_STATUS vgpuShmIsSwPending(OBJGPU *pGpu, NvU32 *isSwPending); + +// Check if non-stalling interrupts are enabled, using shared memory +NV_STATUS vgpuShmIsNonStallEnabled(OBJGPU *pGpu, NvU32 *isNonStallEnabled); + +// Check if non-stall interrupts are pening, using shared memory +NV_STATUS vgpuIsNonStallPending(OBJGPU *pGpu, PMC_ENGINE_BITVECTOR pEngines); + +// Service non-stalling interrupts using shared memory +NV_STATUS vgpuServiceNonStall(OBJGPU *pGpu, PMC_ENGINE_BITVECTOR pEngines); + +// Initialize and free event infrastructure +NV_STATUS _setupEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu); +NV_STATUS _teardownEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu); +NV_STATUS _setupGspEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu); +void _teardownGspEventInfrastructure(OBJGPU *pGpu, OBJVGPU *pVGpu); + +#endif // VGPU_EVENTS_H diff --git a/src/nvidia/kernel/inc/vgpu/vgpu_guest_pma_scrubber.h b/src/nvidia/kernel/inc/vgpu/vgpu_guest_pma_scrubber.h new file mode 100644 index 000000000..adca3c137 --- /dev/null +++ b/src/nvidia/kernel/inc/vgpu/vgpu_guest_pma_scrubber.h @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//****************************************************************************** +// +// Declarations for VGPU PMA Guest Scrubber Shared memory structures. +// +// Description: +// This module declares the shared memory structures for VGPU PMA Guest +// Scrubber. +// +//****************************************************************************** + +#ifndef __vgpu_pma_guest_scrubber_h__ +#define __vgpu_pma_guest_scrubber_h__ + +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + +#define VGPU_GUEST_PMA_MAX_SCRUB_ITEMS 4096 +#define VGPU_GUEST_PMA_SCRUBBER_SHARED_BUFFER_SIZE ((sizeof(VGPU_SCRUB_NODE) * VGPU_GUEST_PMA_MAX_SCRUB_ITEMS) + RM_PAGE_SIZE) +#define VGPU_GUEST_PMA_SCRUBBER_SHARED_BUFFER_PFNS (NV_DIV_AND_CEIL(VGPU_GUEST_PMA_SCRUBBER_SHARED_BUFFER_SIZE, RM_PAGE_SIZE)) + +typedef struct VGPU_SCRUB_NODE { + volatile NvU32 workId; // The 32 bit ID assigned to each work + volatile NvU64 base; // The base address from which the scrub to start + volatile NvU64 size; // The size of a scrub work +} VGPU_SCRUB_NODE; + +typedef struct VGPU_GUEST_PMA_SCRUB_BUFFER_RING_HEADER { + volatile NvU32 lastSubmittedWorkId; + volatile NvU32 lastSWSemaphoreDone; + volatile NvU64 scrubberGetIdx; + volatile NvU64 scrubberPutIdx; +} VGPU_GUEST_PMA_SCRUB_BUFFER_RING_HEADER; + +typedef struct VGPU_GUEST_PMA_SCRUB_BUFFER_RING { + VGPU_GUEST_PMA_SCRUB_BUFFER_RING_HEADER *pScrubBuffRingHeader; + VGPU_SCRUB_NODE *pScrubList; +} VGPU_GUEST_PMA_SCRUB_BUFFER_RING; + +#endif // __vgpu_pma_guest_scrubber_h__ + diff --git a/src/nvidia/kernel/inc/vgpu/vgpu_version.h b/src/nvidia/kernel/inc/vgpu/vgpu_version.h new file mode 100644 index 000000000..5add972b5 --- /dev/null +++ b/src/nvidia/kernel/inc/vgpu/vgpu_version.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __vgpu_vgpu_version_h__ +#define __vgpu_vgpu_version_h__ + +/* VGX interface version */ +#define NV_RPC_VERSION_NUMBER_MAJOR 31:24 /* R---D */ +#define NV_RPC_VERSION_NUMBER_MINOR 23:16 /* R---D */ + +#define RPC_VERSION_FROM_VGX_VERSION(major, minor) ( DRF_NUM(_RPC, _VERSION_NUMBER, _MAJOR, major) | \ + DRF_NUM(_RPC, _VERSION_NUMBER, _MINOR, minor)) +#define VGX_MAJOR_VERSION_NUMBER 0x1F +#define VGX_MINOR_VERSION_NUMBER 0x0F + +// The NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL macros are auto-generated using the value from rpc-structures.def file. +#define AUTOGENERATE_RPC_MIN_SUPPORTED_VERSION_INFORMATION +#include "g_rpc-structures.h" +#undef AUTOGENERATE_RPC_MIN_SUPPORTED_VERSION_INFORMATION + +#endif // __vgpu_vgpu_version_h__ diff --git a/src/nvidia/kernel/nvd/nv/dbgbuffer.c b/src/nvidia/kernel/nvd/nv/dbgbuffer.c new file mode 100644 index 000000000..801b0f28e --- /dev/null +++ b/src/nvidia/kernel/nvd/nv/dbgbuffer.c @@ -0,0 +1,167 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "dbgbuffer.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/client.h" + +NV_STATUS +dbgbufConstruct_IMPL +( + DebugBufferApi *pDebugBufferApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDebugBufferApi); + NvDebugDump *pNvd = GPU_GET_NVD(pGpu); + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NV00DB_ALLOCATION_PARAMETERS *pUserParams = pParams->pAllocParams; + + // Allocate a memory descriptor and backing memory for this historical buffer + status = nvdAllocDebugBuffer(pGpu, pNvd, pUserParams->tag, &pUserParams->size, &pMemDesc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "DebugBuffer object could not be allocated.\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + pDebugBufferApi->pMemDesc = pMemDesc; + + return status; +} + +void +dbgbufDestruct_IMPL +( + DebugBufferApi *pDebugBufferApi +) +{ + NV_STATUS status; + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDebugBufferApi); + NvDebugDump *pNvd = GPU_GET_NVD(pGpu); + + resGetFreeParams(staticCast(pDebugBufferApi, RsResource), &pCallContext, &pParams); + + // Unlink and free historical buffer + status = nvdFreeDebugBuffer(pGpu, pNvd, pDebugBufferApi->pMemDesc); + NV_ASSERT(status == NV_OK); + + pParams->status = status; +} + +NV_STATUS +dbgbufMap_IMPL +( + DebugBufferApi *pDebugBufferApi, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + NV_STATUS status; + NvBool bKernel; + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + + status = rmapiValidateKernelMapping(rmclientGetCachedPrivilege(pClient), + pCpuMapping->flags, + &bKernel); + if (status != NV_OK) + return status; + + pCpuMapping->processId = osGetCurrentProcess(); + + // Map entire buffer (no offsets supported) + return memdescMap(pDebugBufferApi->pMemDesc, + 0, + pDebugBufferApi->pMemDesc->Size, + bKernel, + pCpuMapping->pPrivate->protect, + &pCpuMapping->pLinearAddress, + &pCpuMapping->pPrivate->pPriv); +} + +NV_STATUS +dbgbufUnmap_IMPL +( + DebugBufferApi *pDebugBufferApi, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + NV_STATUS status; + NvBool bKernel; + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + + status = rmapiValidateKernelMapping(rmclientGetCachedPrivilege(pClient), + pCpuMapping->flags, + &bKernel); + if (status != NV_OK) + return status; + + memdescUnmap(pDebugBufferApi->pMemDesc, + bKernel, + pCpuMapping->processId, + pCpuMapping->pLinearAddress, + pCpuMapping->pPrivate->pPriv); + + return NV_OK; +} + +NV_STATUS +dbgbufGetMapAddrSpace_IMPL +( + DebugBufferApi *pDebugBufferApi, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDebugBufferApi); + NV_ADDRESS_SPACE addrSpace; + PMEMORY_DESCRIPTOR pMemDesc = pDebugBufferApi->pMemDesc; + + if (pMemDesc == NULL) + return NV_ERR_INVALID_OBJECT; + + NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, pMemDesc, mapFlags, &addrSpace)); + + if (pAddrSpace) + *pAddrSpace = addrSpace; + + return NV_OK; +} + +NV_STATUS +dbgbufGetMemoryMappingDescriptor_IMPL +( + DebugBufferApi *pDebugBufferApi, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + *ppMemDesc = pDebugBufferApi->pMemDesc; + return NV_OK; +} diff --git a/src/nvidia/kernel/nvd/nv/nvdctrl.c b/src/nvidia/kernel/nvd/nv/nvdctrl.c new file mode 100644 index 000000000..7f154d94b --- /dev/null +++ b/src/nvidia/kernel/nvd/nv/nvdctrl.c @@ -0,0 +1,306 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvdump.h" +#include "os/os.h" +#include "diagnostics/nv_debug_dump.h" +#include "kernel/gpu/gpu_resource.h" +#include "kernel/gpu/subdevice/subdevice.h" + +#include "lib/protobuf/prb.h" +#include "g_nvdebug_pb.h" +#include "lib/protobuf/prb_util.h" +#include "diagnostics/journal.h" + +// +// NVD RM SubDevice Controls +// + +/*! + * @brief Get Dump Size. Returns an estimate of the number of bytes in the dump + * that can be used to allocate a buffer. The size is based on the component + * argument. + * + * @param[in] pSubDevice + * @param[in] pDumpSizeParams + * + * @returns NV_OK on success + */ +NV_STATUS +subdeviceCtrlCmdNvdGetDumpSize_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_NVD_GET_DUMP_SIZE_PARAMS *pDumpSizeParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvDebugDump *pNvd = GPU_GET_NVD(pGpu); + NVDUMP_BUFFER nvDumpBuffer = {0}; + NV_STATUS rmStatus; + + // Allow for the largest possible dump size, if needed + nvDumpBuffer.size = NVDUMP_MAX_DUMP_SIZE; + + rmStatus = nvdDumpComponent(pGpu, + pNvd, + pDumpSizeParams->component, + &nvDumpBuffer, + NVDUMP_BUFFER_COUNT, + NULL); + + pDumpSizeParams->size = nvDumpBuffer.curNumBytes; + + return rmStatus; +} + +/*! + * @brief Get Dump. Returns a dump that includes the component specified + * when the conditions in the trigger are set. + * + * @param[in] pSubDevice + * @param[in] pDumpParams + * + * @returns NV_OK on success + */ +NV_STATUS +subdeviceCtrlCmdNvdGetDump_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_NVD_GET_DUMP_PARAMS *pDumpParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvDebugDump *pNvd = GPU_GET_NVD(pGpu); + NVDUMP_BUFFER nvDumpBuffer = {0}; + NV_STATUS rmStatus = NV_OK; + + nvDumpBuffer.size = pDumpParams->size; + nvDumpBuffer.address = pDumpParams->pBuffer; + + // Dump the component + rmStatus = nvdDumpComponent(pGpu, + pNvd, + pDumpParams->component, + &nvDumpBuffer, + NVDUMP_BUFFER_PROVIDED, + NULL); + + pDumpParams->size = nvDumpBuffer.curNumBytes; + return rmStatus; + +} +/*! +* @brief helper function to convert timestamps from hi res timer to time in ms since 1970 +* OCA records time in tick since boot. so in order to convert to a time stamp we need to +* convert the ticks to ms & add it to the boot time. +* +* @returns time since 1970 in ms +*/ +static NvU64 createTimestampFromTimer(NvU64 timerVal) +{ + NvU32 currTimeSec = 0; + NvU32 currTimeUsec = 0; + NvU64 currTimeMsec; + + NvU64 timeSinceBootNsec = 0; + NvU64 timeSinceBootMsec = 0; + + NvU64 timerFreq; + NvU64 timeValMsec; + + NvU64 timestampMs; + + // get all the current time info. + osGetCurrentTick(&timeSinceBootNsec); // get the time since boot in ns + osGetCurrentTime(&currTimeSec, &currTimeUsec); // get the current time + timerFreq = osGetTimestampFreq(); // get the ticks/second. + + // convert everything to the same base (ms) + // convert the time value from ticks to ms since boot. + timeValMsec = (timerVal * 1000) / timerFreq; + + // scale time since boot to from ns to ms + timeSinceBootMsec = timeSinceBootNsec / 1000000; + + // put it together in ms + currTimeMsec = currTimeSec; // need to move this to the 64 bit value + currTimeMsec *= 1000; // before multiply to avoid overflow. + currTimeMsec += currTimeUsec / 1000; + + // put it all together. + timestampMs = currTimeMsec - timeSinceBootMsec; // determine boot time. + timestampMs += timeValMsec; // add in the timeVal since boot + return timestampMs; +} + +/*! +* @brief Get the NOCAT journal Rpt. Returns the entries in the NOCAT Journal +* +* @returns NV_OK on success +*/ +NV_STATUS +subdeviceCtrlCmdNvdGetNocatJournalRpt_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_NVD_GET_NOCAT_JOURNAL_PARAMS *pReportParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcdb = SYS_GET_RCDB(pSys); + + NvU32 idx; + NV_STATUS status; + NvU32 flags; + + if (pRcdb == NULL) + { + return NV_ERR_INVALID_STATE; + } + + // start with a clean slate + flags = pReportParams->flags; + portMemSet(pReportParams, 0, sizeof(*pReportParams)); + pReportParams->flags = flags; + + // get reports until we run out of reports or run out of space. + for (idx = 0; idx < NV2080_NOCAT_JOURNAL_MAX_JOURNAL_RECORDS; idx++) + { + status = rcdbReportNextNocatJournalEntry(&pReportParams->journalRecords[idx]); + + if (status != NV_OK) + { + if ((status == NV_ERR_OBJECT_NOT_FOUND) || (idx != 0)) + { + // call to get the next record failed, + // either we have run out of records, + // or we have put at least one record into report. + // we will call that a success so we report the records we have, or a 0 count. + // NOTE -- NvAPI translates OBJECT_NOT_FOUND to a general NVAPI_ERROR, + // so the caller can not tell the reason for the failure is we ran out of records. + // that is why we are translating that to a success here. + status = NV_OK; + } + break; + } + // fix up the time stamp + pReportParams->journalRecords[idx].timeStamp = + createTimestampFromTimer(pReportParams->journalRecords[idx].timeStamp); + } + if (status == NV_OK) + { + //update the counters. + pReportParams->nocatRecordCount = idx; + pReportParams->nocatOutstandingRecordCount = rcdbGetNocatOutstandingCount(pRcdb); + + // add in the activity counters. + portMemCopy(pReportParams->activityCounters, NV_SIZEOF32(pReportParams->activityCounters), + pRcdb->nocatJournalDescriptor.nocatEventCounters, + NV_SIZEOF32(pRcdb->nocatJournalDescriptor.nocatEventCounters)); + } + return status; +} + +/*! +* @brief Set the NOCAT TDR data collected by KMD in the NOCAT journal record +* +* @returns NV_OK on success +*/ +NV_STATUS +subdeviceCtrlCmdNvdSetNocatJournalData_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_NVD_SET_NOCAT_JOURNAL_DATA_PARAMS* pReportParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcdb = SYS_GET_RCDB(pSys); + + switch (pReportParams->dataType) + { + case NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_TDR_REASON: + rcdbSetNocatTdrReason(&pReportParams->nocatJournalData.tdrReason); + break; + + case NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_INSERT_RECORD: + { + NOCAT_JOURNAL_PARAMS newEntry; + + portMemSet(&newEntry, 0, sizeof(newEntry)); + + // fill in the newEntry structure with the data from the insertData. + newEntry.recType = pReportParams->nocatJournalData.insertData.recType; + newEntry.pSource = (char *)pReportParams->nocatJournalData.insertData.source; + newEntry.bugcheck = pReportParams->nocatJournalData.insertData.bugcheck; + newEntry.subsystem = pReportParams->nocatJournalData.insertData.subsystem; + newEntry.errorCode = pReportParams->nocatJournalData.insertData.errorCode; + + // for now we are not supporting external events with diag buffers. + newEntry.pDiagBuffer = NULL; + newEntry.diagBufferLen = 0; + newEntry.pFaultingEngine = (char *)pReportParams->nocatJournalData.insertData.faultingEngine; + + // do we want to allow NULL strings? + if (FLD_TEST_DRF(2080_CTRL, _NOCAT_INSERT, _ALLOW_NULL_STR, _NO, + pReportParams->nocatJournalData.insertData.flags)) + { + if (pReportParams->nocatJournalData.insertData.source[0] != '\0') + { + // don't pass in a pointer to null source string. + newEntry.pSource = NULL; + } + if (pReportParams->nocatJournalData.insertData.faultingEngine[0] != '\0') + { + // don't pass in a pointer to null faulting engine string. + newEntry.pFaultingEngine = NULL; + } + } + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_INSERT_RECORDS_IDX]++; + rcdbNocatInsertNocatError(NULL, &newEntry); + } + break; + + case NV2080_CTRL_NOCAT_JOURNAL_DATA_TYPE_SET_TAG: + if ((pReportParams->nocatJournalData.tagData.tag[0] == '\0') || + FLD_TEST_DRF(2080_CTRL, _NOCAT_TAG, _CLEAR, _YES, + pReportParams->nocatJournalData.insertData.flags)) + { + // clear the tag + portMemSet(pRcdb->nocatJournalDescriptor.tag, 0, + sizeof(pRcdb->nocatJournalDescriptor.tag)); + } + else + { + // save the tag + portStringCopy((char *)pRcdb->nocatJournalDescriptor.tag, + NV2080_NOCAT_JOURNAL_MAX_STR_LEN, + (char *)pReportParams->nocatJournalData.tagData.tag, + portStringLength((char *)pReportParams->nocatJournalData.tagData.tag) + 1); + } + break; + + default: + break; + } + return NV_OK; +} + diff --git a/src/nvidia/kernel/vgpu/nv/rpc.c b/src/nvidia/kernel/vgpu/nv/rpc.c new file mode 100644 index 000000000..4e4ee5205 --- /dev/null +++ b/src/nvidia/kernel/vgpu/nv/rpc.c @@ -0,0 +1,1842 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//****************************************************************************** +// +// Description: +// This module implements RPC send and receive ring buffers. +// +//****************************************************************************** + +#include "os/os.h" +#include "core/system.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu/bif/kernel_bif.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "nvVer.h" +#include "nvBldVer.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "platform/chipset/chipset.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "rmapi/alloc_size.h" +#include "rmapi/rs_utils.h" +#include "rmapi/client_resource.h" +#include "gpu/gsp/kernel_gsp.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "vgpu/vgpu_version.h" +#include "vgpu/rpc.h" +#include "vgpu/vgpu_events.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "finn_rm_api.h" + +#define SDK_ALL_CLASSES_INCLUDE_FULL_HEADER +#include "g_allclasses.h" +#undef SDK_ALL_CLASSES_INCLUDE_FULL_HEADER + +#define RPC_STRUCTURES +#define RPC_GENERIC_UNION +#include "g_rpc-structures.h" +#undef RPC_STRUCTURES +#undef RPC_GENERIC_UNION + +#define RPC_MESSAGE_STRUCTURES +#define RPC_MESSAGE_GENERIC_UNION +#include "g_rpc-message-header.h" +#undef RPC_MESSAGE_STRUCTURES +#undef RPC_MESSAGE_GENERIC_UNION + +#include "g_rpc_private.h" + + +#include "gpu/gsp/message_queue_priv.h" + +static NvBool bProfileRPC = NV_FALSE; + +typedef struct rpc_meter_list +{ + RPC_METER_ENTRY rpcData; + struct rpc_meter_list *pNext; +} RPC_METER_LIST; + +typedef struct rpc_meter_head +{ + RPC_METER_LIST *pHead; + RPC_METER_LIST *pTail; +} RPC_METER_HEAD; + +static RPC_METER_HEAD rpcMeterHead; +static NvU32 rpcProfilerEntryCount; + +typedef struct rpc_vgx_version +{ + NvU32 majorNum; + NvU32 minorNum; +} RPC_VGX_VERSION; + +static RPC_VGX_VERSION rpcVgxVersion; + +void rpcSetIpVersion(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 ipVersion) +{ + OBJHAL *pHal = GPU_GET_HAL(pGpu); + PMODULEDESCRIPTOR pMod = objhalGetModuleDescriptor(pHal); + IGRP_IP_VERSIONS_TABLE_INFO info = {0}; + + _objrpcAssignIpVersion(pRpc, ipVersion); + pMod->pHalSetIfaces->rpcHalIfacesSetupFn(&pRpc->_hal); + info.pGpu = pGpu; + info.pDynamic = (void*) pRpc; + rpc_iGrp_ipVersions_getInfo_HAL(pRpc, &info); + info.ifacesWrapupFn(&info); + +} + +NV_STATUS rpcConstruct_IMPL(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS rmStatus = NV_OK; + return rmStatus; +} + +void rpcDestroy_IMPL(OBJGPU *pGpu, OBJRPC *pRpc) +{ +} + +NV_STATUS rpcSendMessage_IMPL(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_PRINTF(LEVEL_ERROR, "virtual function not implemented.\n"); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS rpcRecvPoll_IMPL(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 expectedFunc) +{ + NV_PRINTF(LEVEL_ERROR, "virtual function not implemented.\n"); + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _issueRpcAndWait(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + RPC_METER_LIST *pNewEntry = NULL; + + // should not be called in broadcast mode + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + if (bProfileRPC) + { + // Create a new entry for our RPC profiler + pNewEntry = portMemAllocNonPaged(sizeof(RPC_METER_LIST)); + if (pNewEntry == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate RPC meter memory!\n"); + NV_ASSERT(0); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + portMemSet(pNewEntry, 0, sizeof(RPC_METER_LIST)); + + if (rpcMeterHead.pHead == NULL) + rpcMeterHead.pHead = pNewEntry; + else + rpcMeterHead.pTail->pNext = pNewEntry; + + rpcMeterHead.pTail = pNewEntry; + + pNewEntry->rpcData.rpcDataTag = vgpu_rpc_message_header_v->function; + + rpcProfilerEntryCount++; + + osGetPerformanceCounter(&pNewEntry->rpcData.startTimeInNs); + } + + status = rpcSendMessage(pGpu, pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "rpcSendMessage failed with status 0x%08x for fn %d!\n", + status, vgpu_rpc_message_header_v->function); + NV_ASSERT(0); + // + // It has been observed that returning NV_ERR_BUSY_RETRY in a bad state (RPC + // buffers full and not being serviced) can make things worse, i.e. turn RPC + // failures into app hangs such that even nvidia-bug-report.sh gets stuck. + // Avoid this for now while still returning the correct error in other cases. + // + return (status == NV_ERR_BUSY_RETRY) ? NV_ERR_GENERIC : status; + } + + status = rpcRecvPoll(pGpu, pRpc, vgpu_rpc_message_header_v->function); + if (status != NV_OK) + { + if (status == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, "rpcRecvPoll timedout for fn %d!\n", + vgpu_rpc_message_header_v->function); + } + else + { + NV_PRINTF(LEVEL_ERROR, "rpcRecvPoll failed with status 0x%08x for fn %d!\n", + status, vgpu_rpc_message_header_v->function); + } + NV_ASSERT(0); + return status; + } + + if (bProfileRPC) + osGetPerformanceCounter(&pNewEntry->rpcData.endTimeInNs); + + // Now check if RPC really succeeded + if (vgpu_rpc_message_header_v->rpc_result != NV_VGPU_MSG_RESULT_SUCCESS) + { + NV_PRINTF(LEVEL_WARNING, "RPC failed with status 0x%08x for fn %d!\n", + vgpu_rpc_message_header_v->rpc_result, + vgpu_rpc_message_header_v->function); + + if (vgpu_rpc_message_header_v->rpc_result < DRF_BASE(NV_VGPU_MSG_RESULT__VMIOP)) + return vgpu_rpc_message_header_v->rpc_result; + + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +static NV_STATUS _issueRpcAsync(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status; + + // should not be called in broadcast mode + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + status = rpcSendMessage(pGpu, pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "rpcSendMessage failed with status 0x%08x for fn %d!\n", + status, vgpu_rpc_message_header_v->function); + NV_ASSERT(0); + // + // It has been observed that returning NV_ERR_BUSY_RETRY in a bad state (RPC + // buffers full and not being serviced) can make things worse, i.e. turn RPC + // failures into app hangs such that even nvidia-bug-report.sh gets stuck. + // Avoid this for now while still returning the correct error in other cases. + // + return (status == NV_ERR_BUSY_RETRY) ? NV_ERR_GENERIC : status; + } + + return NV_OK; +} + +static NV_STATUS _issueRpcLarge +( + OBJGPU *pGpu, + OBJRPC *pRpc, + NvU32 bufSize, + const void *pBuffer, + NvBool bBidirectional, + NvBool bWait +) +{ + NvU8 *pBuf8 = (NvU8 *)pBuffer; + NV_STATUS nvStatus = NV_OK; + NvU32 expectedFunc = vgpu_rpc_message_header_v->function; + NvU32 entryLength; + NvU32 remainingSize = bufSize; + NvU32 recordCount = 0; + + // should not be called in broadcast mode + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + // Copy the initial buffer + entryLength = NV_MIN(bufSize, pRpc->maxRpcSize); + + if ((NvU8 *)vgpu_rpc_message_header_v != pBuf8) + portMemCopy(vgpu_rpc_message_header_v, entryLength, pBuf8, entryLength); + + // Set the correct length for this queue entry. + vgpu_rpc_message_header_v->length = entryLength; + + nvStatus = rpcSendMessage(pGpu, pRpc); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "rpcSendMessage failed with status 0x%08x for fn %d!\n", + nvStatus, expectedFunc); + NV_ASSERT(0); + // + // It has been observed that returning NV_ERR_BUSY_RETRY in a bad state (RPC + // buffers full and not being serviced) can make things worse, i.e. turn RPC + // failures into app hangs such that even nvidia-bug-report.sh gets stuck. + // Avoid this for now while still returning the correct error in other cases. + // + return (nvStatus == NV_ERR_BUSY_RETRY) ? NV_ERR_GENERIC : nvStatus; + } + remainingSize -= entryLength; + pBuf8 += entryLength; + + // Copy the remaining buffers + entryLength = pRpc->maxRpcSize - sizeof(rpc_message_header_v); + while (remainingSize != 0) + { + if (entryLength > remainingSize) + entryLength = remainingSize; + + portMemCopy(rpc_message, entryLength, pBuf8, entryLength); + + // Set the correct length for this queue entry. + vgpu_rpc_message_header_v->length = entryLength + sizeof(rpc_message_header_v); + vgpu_rpc_message_header_v->function = NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD; + + nvStatus = rpcSendMessage(pGpu, pRpc); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "rpcSendMessage failed with status 0x%08x for fn %d continuation record (remainingSize=0x%x)!\n", + nvStatus, expectedFunc, remainingSize); + NV_ASSERT(0); + // + // It has been observed that returning NV_ERR_BUSY_RETRY in a bad state (RPC + // buffers full and not being serviced) can make things worse, i.e. turn RPC + // failures into app hangs such that even nvidia-bug-report.sh gets stuck. + // Avoid this for now while still returning the correct error in other cases. + // + return (nvStatus == NV_ERR_BUSY_RETRY) ? NV_ERR_GENERIC : nvStatus; + } + + remainingSize -= entryLength; + pBuf8 += entryLength; + recordCount++; + } + + if (!bWait) + { + // In case of Async RPC, we are done here. + return nvStatus; + } + + // Always receive at least one.. + nvStatus = rpcRecvPoll(pGpu, pRpc, expectedFunc); + if (nvStatus != NV_OK) + { + if (nvStatus == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, "rpcRecvPoll timedout for fn %d!\n", + vgpu_rpc_message_header_v->function); + } + else + { + NV_PRINTF(LEVEL_ERROR, "rpcRecvPoll failed with status 0x%08x for fn %d!\n", + nvStatus, vgpu_rpc_message_header_v->function); + } + NV_ASSERT(0); + return nvStatus; + } + + pBuf8 = (NvU8 *)pBuffer; + remainingSize = bufSize; + entryLength = NV_MIN(bufSize, pRpc->maxRpcSize); + + if (((NvU8 *)vgpu_rpc_message_header_v != pBuf8) && bBidirectional) + portMemCopy(pBuf8, entryLength, vgpu_rpc_message_header_v, entryLength); + + remainingSize -= entryLength; + pBuf8 += entryLength; + + // For bidirectional transfer messages, need to receive all other frames as well + if (bBidirectional && (recordCount > 0)) + { + while (remainingSize > 0) + { + nvStatus = rpcRecvPoll(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD); + if (nvStatus != NV_OK) + { + if (nvStatus == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, + "rpcRecvPoll timedout for fn %d continuation record (remainingSize=0x%x)!\n", + vgpu_rpc_message_header_v->function, remainingSize); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "rpcRecvPoll failed with status 0x%08x for fn %d continuation record! (remainingSize=0x%x)\n", + nvStatus, vgpu_rpc_message_header_v->function, remainingSize); + } + NV_ASSERT(0); + return nvStatus; + } + entryLength = vgpu_rpc_message_header_v->length - sizeof(rpc_message_header_v); + NV_CHECK_OR_RETURN(LEVEL_ERROR, entryLength <= pRpc->maxRpcSize, NV_ERR_INVALID_STATE); + + if (entryLength > remainingSize) + entryLength = remainingSize; + + portMemCopy(pBuf8, entryLength, rpc_message, entryLength); + remainingSize -= entryLength; + pBuf8 += entryLength; + recordCount--; + } + vgpu_rpc_message_header_v->function = expectedFunc; + NV_ASSERT(recordCount == 0); + } + + // Now check if RPC really succeeded + if (vgpu_rpc_message_header_v->rpc_result != NV_VGPU_MSG_RESULT_SUCCESS) + { + NV_PRINTF(LEVEL_WARNING, "RPC failed with status 0x%08x for fn %d!\n", + vgpu_rpc_message_header_v->rpc_result, + vgpu_rpc_message_header_v->function); + + if (vgpu_rpc_message_header_v->rpc_result < DRF_BASE(NV_VGPU_MSG_RESULT__VMIOP)) + return vgpu_rpc_message_header_v->rpc_result; + + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +static NV_STATUS _issueRpcAndWaitLarge +( + OBJGPU *pGpu, + OBJRPC *pRpc, + NvU32 bufSize, + const void *pBuffer, + NvBool bBidirectional +) +{ + return _issueRpcLarge(pGpu, pRpc, bufSize, pBuffer, + bBidirectional, + NV_TRUE); //bWait +} + +static NV_STATUS _issueRpcAsyncLarge +( + OBJGPU *pGpu, + OBJRPC *pRpc, + NvU32 bufSize, + const void *pBuffer +) +{ + return _issueRpcLarge(pGpu, pRpc, bufSize, pBuffer, + NV_FALSE, //bBidirectional + NV_FALSE); //bWait +} + +static NV_STATUS _issuePteDescRpc +( + OBJGPU *pGpu, + OBJRPC *pRpc, + NvU32 offsetToPTE, + NvU32 pageCount, + RmPhysAddr *guestPages, + NvBool physicallyContiguous +) +{ + rpc_message_header_v *pHdr = vgpu_rpc_message_header_v; + void *pAllocatedRecord = NULL; + struct pte_desc *pPteDesc; + NvU64 contigBase; + NV_STATUS nvStatus = NV_OK; + NvU32 recordSize; + NvU32 i; + DMA_PAGE_ARRAY pageArray; + + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pRpc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(guestPages != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pHdr != NULL, NV_ERR_INVALID_ARGUMENT); + + recordSize = offsetToPTE + NV_OFFSETOF(struct pte_desc, pte_pde[0].pte) + + (pageCount * NV_VGPU_PTE_64_SIZE); + + if (recordSize > pRpc->maxRpcSize) + { + // Multiple queue entries. Create a temporary buffer for the PTEs. + pAllocatedRecord = portMemAllocNonPaged(recordSize); + if (pAllocatedRecord == NULL) + { + NV_PRINTF(LEVEL_ERROR, "no memory for allocated record\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + // Copy in the record so far. + portMemCopy(pAllocatedRecord, pHdr->length, pHdr, pHdr->length); + + // Point to the allocated record. + pHdr = (rpc_message_header_v *)pAllocatedRecord; + } + + dmaPageArrayInit(&pageArray, guestPages, pageCount); + + pPteDesc = (struct pte_desc *)NvP64_PLUS_OFFSET(pHdr, offsetToPTE); + pPteDesc->idr = NV_VGPU_PTEDESC_IDR_NONE; + pPteDesc->length = pageCount; + contigBase = (dmaPageArrayGetPhysAddr(&pageArray, 0) >> RM_PAGE_SHIFT); + + for (i = 0; i < pageCount; i++) + { + if (physicallyContiguous) + pPteDesc->pte_pde[i].pte = contigBase + i; + else + pPteDesc->pte_pde[i].pte = + (dmaPageArrayGetPhysAddr(&pageArray, i) >> RM_PAGE_SHIFT); + } + + nvStatus = _issueRpcAndWaitLarge(pGpu, pRpc, recordSize, pHdr, NV_FALSE); + + portMemFree(pAllocatedRecord); + + return nvStatus; +} + +NV_STATUS rpcAllocMemory_v13_01(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, NvU32 hClass, + NvU32 flags, MEMORY_DESCRIPTOR *pMemDesc) +{ + NV_STATUS status = NV_OK; + + if (pMemDesc == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "NVRM_RPC: AllocMemory: pMemDesc arg was NULL\n"); + return NV_ERR_GENERIC; + } + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, sizeof(rpc_alloc_memory_v13_01)); + if (status != NV_OK) + return status; + + rpc_message->alloc_memory_v13_01.hClient = hClient; + rpc_message->alloc_memory_v13_01.hDevice = hDevice; + rpc_message->alloc_memory_v13_01.hMemory = hMemory; + rpc_message->alloc_memory_v13_01.hClass = hClass; + rpc_message->alloc_memory_v13_01.flags = flags; + rpc_message->alloc_memory_v13_01.pteAdjust = pMemDesc->PteAdjust; + rpc_message->alloc_memory_v13_01.format = memdescGetPteKind(pMemDesc); + rpc_message->alloc_memory_v13_01.length = pMemDesc->Size; + rpc_message->alloc_memory_v13_01.pageCount = (NvU32)pMemDesc->PageCount; + + if (IS_GSP_CLIENT(pGpu)) + { + status = _issuePteDescRpc(pGpu, pRpc, + NV_OFFSETOF(rpc_message_header_v, rpc_message_data[0].alloc_memory_v13_01.pteDesc), + pMemDesc->PageCount, + memdescGetPteArray(pMemDesc, AT_GPU), + memdescGetContiguity(pMemDesc, AT_GPU)); + } + + return status; +} + +NV_STATUS rpcMapMemoryDma_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, NvHandle hDma, NvHandle hMemory, + NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset) +{ + NV_STATUS status; + NVOS46_PARAMETERS_v03_00 *rpc_params = &rpc_message->map_memory_dma_v03_00.params; + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_MAP_MEMORY_DMA, sizeof(rpc_map_memory_dma_v03_00)); + if (status != NV_OK) + return status; + + rpc_params->hClient = hClient; + rpc_params->hDevice = hDevice; + rpc_params->hDma = hDma; + rpc_params->hMemory = hMemory; + rpc_params->flags = flags; + + rpc_params->offset = offset; + rpc_params->length = length; + rpc_params->dmaOffset = *pDmaOffset; + + status = _issueRpcAndWait(pGpu, pRpc); + + if (status == NV_OK) + { + *pDmaOffset = rpc_params->dmaOffset; + } + return status; +} + +NV_STATUS rpcUnmapMemoryDma_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, NvHandle hDma, + NvHandle hMemory, NvU32 flags, NvU64 pDmaOffset) +{ + NV_STATUS status; + NVOS47_PARAMETERS_v03_00 *rpc_params = &rpc_message->unmap_memory_dma_v03_00.params; + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UNMAP_MEMORY_DMA, sizeof(rpc_unmap_memory_dma_v03_00)); + if (status != NV_OK) + return status; + + rpc_params->hClient = hClient; + rpc_params->hDevice = hDevice; + rpc_params->hDma = hDma; + rpc_params->hMemory = hMemory; + rpc_params->flags = flags; + rpc_params->dmaOffset = pDmaOffset; + + status = _issueRpcAndWait(pGpu, pRpc); + return status; +} + +/* max entries is how many 3 DWORD entries fit in what remains of the message_buffer */ +#define IDLE_CHANNELS_MAX_ENTRIES_v03_00 \ + ((pRpc->maxRpcSize - (sizeof(rpc_message_header_v) + sizeof(rpc_idle_channels_v03_00))) / sizeof(idle_channel_list_v03_00)) + +NV_STATUS rpcIdleChannels_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle *phClients, NvHandle *phDevices, NvHandle *phChannels, + NvU32 numEntries, NvU32 flags, NvU32 timeout) +{ + NV_STATUS status; + NvU32 i; + + if (numEntries > IDLE_CHANNELS_MAX_ENTRIES_v03_00) + { + // unable to fit all the entries in the message buffer + NV_PRINTF(LEVEL_ERROR, + "NVRM_RPC: IdleChannels: requested %u entries (but only room for %u)\n", + numEntries, (NvU32)IDLE_CHANNELS_MAX_ENTRIES_v03_00); + return NV_ERR_GENERIC; + } + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_IDLE_CHANNELS, + sizeof(rpc_idle_channels_v03_00) + numEntries * sizeof(idle_channel_list_v03_00)); + if (status != NV_OK) + return status; + + rpc_message->idle_channels_v03_00.flags = flags; + rpc_message->idle_channels_v03_00.timeout = timeout; + rpc_message->idle_channels_v03_00.nchannels = numEntries; + + for (i = 0; i < numEntries; i++) + { + rpc_message->idle_channels_v03_00.channel_list[i].phClient = ((NvU32) phClients[i]); + rpc_message->idle_channels_v03_00.channel_list[i].phDevice = ((NvU32) phDevices[i]); + rpc_message->idle_channels_v03_00.channel_list[i].phChannel = ((NvU32) phChannels[i]); + } + + status = _issueRpcAndWait(pGpu, pRpc); + return status; +} + +NV_STATUS RmRpcSetGuestSystemInfo(OBJGPU *pGpu, OBJRPC *pRpc) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_STATUS status = NV_OK; + NvS32 message_buffer_remaining; + NvU32 data_len; + + // + // Skip RPC version handshake if we've already done it on one GPU. + // + // For GSP: Multi GPU setup can have pre-Turing GPUs + // and GSP offload is disabled for all pre-Turing GPUs. + // Don't skip RPC version handshake if rpcVgxVersion.majorNum is not set + // + if (pGpuMgr->numGpuHandles > 1) + { + if (rpcVgxVersion.majorNum != 0) + { + NV_PRINTF(LEVEL_INFO, + "NVRM_RPC: Skipping RPC version handshake for instance 0x%x\n", + gpuGetInstance(pGpu)); + goto skip_ver_handshake; + } + else if (!IS_GSP_CLIENT(pGpu)) + { + status = NV_ERR_GENERIC; + NV_PRINTF(LEVEL_ERROR, + "NVRM_RPC: RPC version handshake already failed. Bailing out for device" + " instance 0x%x\n", gpuGetInstance(pGpu)); + goto skip_ver_handshake; + } + } + + rpcVgxVersion.majorNum = 0; + rpcVgxVersion.minorNum = 0; + + message_buffer_remaining = pRpc->maxRpcSize - (sizeof(rpc_message_header_v) + + sizeof(rpc_set_guest_system_info_v)); + + if (message_buffer_remaining < 0) + { + // unable to fit the data in the message buffer + NV_PRINTF(LEVEL_ERROR, + "NVRM_RPC: SetGuestSystemInfo: Insufficient space on message buffer\n"); + + return NV_ERR_BUFFER_TOO_SMALL; + } + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO, + sizeof(rpc_set_guest_system_info_v)); + if (status != NV_OK) + return status; + + if(sizeof(NV_VERSION_STRING) < NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE) + { + data_len = NV_ROUNDUP((NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE), sizeof(NvU32)); + rpc_message->set_guest_system_info_v.guestDriverVersionBufferLength = data_len; + portStringCopy(rpc_message->set_guest_system_info_v.guestDriverVersion, + sizeof(rpc_message->set_guest_system_info_v.guestDriverVersion), + (const char*)NV_VERSION_STRING, data_len); + } + else + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + if(sizeof(NV_BUILD_BRANCH_VERSION) < NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE) + { + data_len = NV_ROUNDUP((NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE), sizeof(NvU32)); + rpc_message->set_guest_system_info_v.guestVersionBufferLength = data_len; + portStringCopy(rpc_message->set_guest_system_info_v.guestVersion, + sizeof(rpc_message->set_guest_system_info_v.guestVersion), + (const char*)NV_BUILD_BRANCH_VERSION, data_len); + } + else + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + if (sizeof(NV_DISPLAY_DRIVER_TITLE) < NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE) + { + data_len = NV_ROUNDUP((NV0000_CTRL_CMD_SYSTEM_GET_VGX_SYSTEM_INFO_BUFFER_SIZE), sizeof(NvU32)); + rpc_message->set_guest_system_info_v.guestTitleBufferLength = data_len; + portStringCopy(rpc_message->set_guest_system_info_v.guestTitle, + sizeof(rpc_message->set_guest_system_info_v.guestTitle), + (const char*)NV_DISPLAY_DRIVER_TITLE, data_len); + } + else + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + rpc_message->set_guest_system_info_v.guestClNum = NV_BUILD_CHANGELIST_NUM; + rpc_message->set_guest_system_info_v.vgxVersionMajorNum = VGX_MAJOR_VERSION_NUMBER; + rpc_message->set_guest_system_info_v.vgxVersionMinorNum = VGX_MINOR_VERSION_NUMBER; + + status = _issueRpcAndWait(pGpu, pRpc); + + if ((status == NV_OK) && (vgpu_rpc_message_header_v->rpc_result_private != NV_OK)) + { + status = vgpu_rpc_message_header_v->rpc_result_private; + if ((rpc_message->set_guest_system_info_v.vgxVersionMajorNum != VGX_MAJOR_VERSION_NUMBER) || + (rpc_message->set_guest_system_info_v.vgxVersionMinorNum != VGX_MINOR_VERSION_NUMBER)) + { + if (RPC_VERSION_FROM_VGX_VERSION(rpc_message->set_guest_system_info_v.vgxVersionMajorNum, + rpc_message->set_guest_system_info_v.vgxVersionMinorNum) >= + RPC_VERSION_FROM_VGX_VERSION(NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MAJOR, + NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MINOR)) + { + NV_PRINTF(LEVEL_WARNING, + "NVRM_RPC: SetGuestSystemInfo: Guest VGX version (%d.%d) is newer than " + "the host VGX version (%d.%d)\n" + "NVRM_RPC: SetGuestSystemInfo: Retrying with the VGX version requested " + "by the host.\n", VGX_MAJOR_VERSION_NUMBER, + VGX_MINOR_VERSION_NUMBER, + rpc_message->set_guest_system_info_v.vgxVersionMajorNum, + rpc_message->set_guest_system_info_v.vgxVersionMinorNum); + status = _issueRpcAndWait(pGpu, pRpc); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "NVRM_RPC: SetGuestSystemInfo: The host version (%d.%d) is too old.\n" + "NVRM_RPC: SetGuestSystemInfo: Minimum required host version is %d.%d.\n", + rpc_message->set_guest_system_info_v.vgxVersionMajorNum, + rpc_message->set_guest_system_info_v.vgxVersionMinorNum, + NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MAJOR, + NV_VGPU_GRIDSW_VERSION_MIN_SUPPORTED_INTERNAL_MINOR); + + NV_RM_RPC_LOG(pGpu, "######## Guest NVIDIA Driver Information: ########", NV_VGPU_LOG_LEVEL_NOTICE); + NV_RM_RPC_LOG(pGpu, "Driver Version: "NV_VERSION_STRING, NV_VGPU_LOG_LEVEL_NOTICE); + NV_RM_RPC_LOG(pGpu, "Incompatible Guest/Host drivers: Host VGX version is older than the minimum version " + "supported by the Guest. Disabling vGPU.", NV_VGPU_LOG_LEVEL_ERROR); + } + } + } + + if (status == NV_OK) + { + rpcVgxVersion.majorNum = rpc_message->set_guest_system_info_v.vgxVersionMajorNum; + rpcVgxVersion.minorNum = rpc_message->set_guest_system_info_v.vgxVersionMinorNum; + } + +skip_ver_handshake: + if (status == NV_OK) + { + rpcSetIpVersion(pGpu, pRpc, + RPC_VERSION_FROM_VGX_VERSION(rpcVgxVersion.majorNum, + rpcVgxVersion.minorNum)); + + NV_RM_RPC_SET_GUEST_SYSTEM_INFO_EXT(pGpu, status); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "SET_GUEST_SYSTEM_INFO_EXT : failed.\n"); + } + } + + return status; +} + +NV_STATUS rpcUnloadingGuestDriver_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvBool bSuspend, NvBool bGc6Entering, NvU32 newPMLevel) +{ + NV_STATUS status = NV_OK; + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, 0); + if (status != NV_OK) + return status; + + status = _issueRpcAndWait(pGpu, pRpc); + + return status; +} + + +NV_STATUS rpcUnloadingGuestDriver_v1F_07(OBJGPU *pGpu, OBJRPC *pRpc, NvBool bSuspend, NvBool bGc6Entering, NvU32 newPMLevel) +{ + NV_STATUS status = NV_OK; + NvU32 headerLength = sizeof(rpc_message_header_v) + sizeof(rpc_unloading_guest_driver_v1F_07); + if (headerLength > pRpc->maxRpcSize) + { + NV_PRINTF(LEVEL_ERROR, + "Unloading guest driver parameters size (0x%x) exceed message_buffer " + "size (0x%x)\n", headerLength, pRpc->maxRpcSize); + + NV_ASSERT(0); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(rpc_unloading_guest_driver_v1F_07)); + if (status != NV_OK) + return status; + rpc_message->unloading_guest_driver_v1F_07.bSuspend = bSuspend; + rpc_message->unloading_guest_driver_v1F_07.bGc6Entering = bGc6Entering; + rpc_message->unloading_guest_driver_v1F_07.newLevel = newPMLevel; + + status = _issueRpcAndWait(pGpu, pRpc); + + return status; +} + +NV_STATUS rpcGpuExecRegOps_v12_01(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hObject, + NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS *pParams, + NV2080_CTRL_GPU_REG_OP *pRegOps) +{ + NV_STATUS status; + NvU32 i, j, regOpsExecuted = 0; + + if (pParams == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pParams->regOpCount == 0) + { + NV_PRINTF(LEVEL_ERROR,"RegOps RPC failed: Invalid regOp count - requested 0x%x regOps\n", pParams->regOpCount); + return NV_ERR_INVALID_ARGUMENT; + } + + /* RPC message buffer can accomodate a maximum of VGPU_MAX_REGOPS_PER_RPC regops only. + * This value must be adjusted(if required) in case of any change to the internal + * RegOps RPC structures. + */ + if (pRpc->maxRpcSize < + (sizeof(rpc_message_header_v) + + sizeof(rpc_gpu_exec_reg_ops_v12_01) + + VGPU_MAX_REGOPS_PER_RPC * sizeof(NV2080_CTRL_GPU_REG_OP_v03_00))) { + NV_PRINTF(LEVEL_ERROR, + "NVRM_RPC: rpcGpuExecRegOps_v12_01: Insufficient space on message buffer\n"); + return NV_ERR_BUFFER_TOO_SMALL; + } + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_GPU_EXEC_REG_OPS, + sizeof(rpc_gpu_exec_reg_ops_v12_01)); + if (status != NV_OK) + return status; + + rpc_message->gpu_exec_reg_ops_v12_01.hClient = hClient; + rpc_message->gpu_exec_reg_ops_v12_01.hObject = hObject; + + // copy params into the message buffer + rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.hClientTarget = pParams->hClientTarget; + rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.hChannelTarget = pParams->hChannelTarget; + rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.regOpCount = pParams->regOpCount; + rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.grRouteInfo.flags = pParams->grRouteInfo.flags; + rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.grRouteInfo.route = pParams->grRouteInfo.route; + + while (regOpsExecuted < pParams->regOpCount){ + for (i = 0, j = regOpsExecuted; i < VGPU_MAX_REGOPS_PER_RPC && j < pParams->regOpCount; i++, j++) + { + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regOp = pRegOps[j].regOp; + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regType = pRegOps[j].regType; + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regStatus = pRegOps[j].regStatus; + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regQuad = pRegOps[j].regQuad; + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regGroupMask = pRegOps[j].regGroupMask; + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regSubGroupMask = pRegOps[j].regSubGroupMask; + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regOffset = pRegOps[j].regOffset; + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regValueHi = pRegOps[j].regValueHi; + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regValueLo = pRegOps[j].regValueLo; + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regAndNMaskHi = pRegOps[j].regAndNMaskHi; + rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regAndNMaskLo = pRegOps[j].regAndNMaskLo; + } + rpc_message->gpu_exec_reg_ops_v12_01.params.reg_op_params.regOpCount = i; + + status = _issueRpcAndWait(pGpu, pRpc); + + if (status == NV_OK) + { + status = vgpu_rpc_message_header_v->rpc_result_private; + if (status == NV_OK) + { + for (i = 0, j = regOpsExecuted; i < VGPU_MAX_REGOPS_PER_RPC && j < pParams->regOpCount; i++, j++) + { + pRegOps[j].regStatus = rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regStatus; + pRegOps[j].regValueHi = rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regValueHi; + pRegOps[j].regValueLo = rpc_message->gpu_exec_reg_ops_v12_01.params.operations[i].regValueLo; + } + } + else + { + NV_PRINTF(LEVEL_ERROR,"RegOps RPC failed: skipping 0x%x regOps\n", pParams->regOpCount - regOpsExecuted); + } + } + regOpsExecuted = j; + } + + return status; +} + +NV_STATUS rpcGetStaticInfo_v17_05(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetStaticInfo_v18_03(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetStaticInfo_v18_04(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetStaticInfo_v18_0E(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetStaticInfo_v18_10(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetStaticInfo_v18_11(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetStaticInfo_v18_13(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetStaticInfo_v18_16(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetStaticInfo_v19_00(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetStaticInfo_v1A_00(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetStaticInfo_v1A_05(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_OK; + return status; +} + +NV_STATUS rpcGetGspStaticInfo_v14_00(OBJGPU *pGpu, OBJRPC *pRpc) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + if (IS_GSP_CLIENT(pGpu)) + { + NvU32 headerLength; + GspStaticConfigInfo *pSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + GspStaticConfigInfo *rpcInfo = (GspStaticConfigInfo *)&rpc_message->get_gsp_static_info_v14_00.data; + + NV_ASSERT_OR_RETURN(pSCI, NV_ERR_INVALID_POINTER); + + headerLength = sizeof(rpc_message_header_v) + + sizeof(GspStaticConfigInfo); + if (headerLength > pRpc->maxRpcSize) + { + NV_PRINTF(LEVEL_ERROR, + "Gsp static info parameters size (0x%x) exceed message_buffer size (0x%x)\n", + headerLength, pRpc->maxRpcSize); + + NV_ASSERT(0); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, + sizeof(GspStaticConfigInfo)); + if (status != NV_OK) + return status; + + status = _issueRpcAndWait(pGpu, pRpc); + NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, status); + + // Copy info + portMemCopy(pSCI, sizeof(*pSCI), rpcInfo, sizeof(*rpcInfo)); + } + + return status; +} + +NV_STATUS rpcUpdateBarPde_v15_00(OBJGPU *pGpu, OBJRPC *pRpc, NV_RPC_UPDATE_PDE_BAR_TYPE barType, NvU64 entryValue, NvU64 entryLevelShift) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + if (IS_GSP_CLIENT(pGpu)) + { + UpdateBarPde_v15_00 *rpc_params = &rpc_message->update_bar_pde_v15_00.info; + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, + sizeof(rpc_update_bar_pde_v15_00)); + if (status != NV_OK) + { + return status; + } + + rpc_params->barType = barType; + rpc_params->entryValue = entryValue; + rpc_params->entryLevelShift = entryLevelShift; + + status = _issueRpcAndWait(pGpu, pRpc); + } + + return status; +} + +NV_STATUS rpcSetPageDirectory_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams) +{ + NV_STATUS status = NV_OK; + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v03_00 *rpc_params = &rpc_message->set_page_directory_v03_00.params; + + if (hypervisorIsType(OS_HYPERVISOR_HYPERV)) + { + if (!FLD_TEST_DRF(0080, _CTRL_DMA_SET_PAGE_DIRECTORY, _FLAGS_APERTURE, _VIDMEM, pParams->flags)) + { + NV_ASSERT(0); + return NV_ERR_NOT_SUPPORTED; + } + } + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_SET_PAGE_DIRECTORY, sizeof(rpc_set_page_directory_v03_00)); + if (status != NV_OK) + return status; + + rpc_message->set_page_directory_v03_00.hClient = hClient; + rpc_message->set_page_directory_v03_00.hDevice = hDevice; + + rpc_params->physAddress = pParams->physAddress; + rpc_params->numEntries = pParams->numEntries; + rpc_params->flags = pParams->flags; + rpc_params->hVASpace = pParams->hVASpace; + rpc_params->chId = pParams->chId; + rpc_params->subDeviceId = pParams->subDeviceId; + + status = _issueRpcAndWait(pGpu, pRpc); + + return status; + +} + +NV_STATUS rpcSetPageDirectory_v1E_05(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams) +{ + NV_STATUS status = NV_OK; + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS_v1E_05 *rpc_params = &rpc_message->set_page_directory_v1E_05.params; + + if (hypervisorIsType(OS_HYPERVISOR_HYPERV)) + { + if (!FLD_TEST_DRF(0080, _CTRL_DMA_SET_PAGE_DIRECTORY, _FLAGS_APERTURE, _VIDMEM, pParams->flags)) + { + NV_ASSERT(0); + return NV_ERR_NOT_SUPPORTED; + } + } + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_SET_PAGE_DIRECTORY, sizeof(rpc_set_page_directory_v1E_05)); + if (status != NV_OK) + return status; + + rpc_message->set_page_directory_v1E_05.hClient = hClient; + rpc_message->set_page_directory_v1E_05.hDevice = hDevice; + + rpc_params->physAddress = pParams->physAddress; + rpc_params->numEntries = pParams->numEntries; + rpc_params->flags = pParams->flags; + rpc_params->hVASpace = pParams->hVASpace; + rpc_params->chId = pParams->chId; + rpc_params->subDeviceId = pParams->subDeviceId; + rpc_params->pasid = pParams->pasid; + + status = _issueRpcAndWait(pGpu, pRpc); + + return status; + +} + +NV_STATUS rpcUnsetPageDirectory_v03_00(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams) +{ + NV_STATUS status = NV_OK; + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v03_00 *rpc_params = &rpc_message->unset_page_directory_v03_00.params; + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UNSET_PAGE_DIRECTORY, sizeof(rpc_unset_page_directory_v03_00)); + if (status != NV_OK) + return status; + + rpc_message->unset_page_directory_v03_00.hClient = hClient; + rpc_message->unset_page_directory_v03_00.hDevice = hDevice; + + rpc_params->hVASpace = pParams->hVASpace; + rpc_params->subDeviceId = pParams->subDeviceId; + + status = _issueRpcAndWait(pGpu, pRpc); + + return status; +} + +NV_STATUS rpcUnsetPageDirectory_v1E_05(OBJGPU *pGpu, OBJRPC *pRpc, NvHandle hClient, NvHandle hDevice, + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams) +{ + NV_STATUS status = NV_OK; + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS_v1E_05 *rpc_params = &rpc_message->unset_page_directory_v1E_05.params; + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_UNSET_PAGE_DIRECTORY, sizeof(rpc_unset_page_directory_v1E_05)); + if (status != NV_OK) + return status; + + rpc_message->unset_page_directory_v1E_05.hClient = hClient; + rpc_message->unset_page_directory_v1E_05.hDevice = hDevice; + + rpc_params->hVASpace = pParams->hVASpace; + rpc_params->subDeviceId = pParams->subDeviceId; + + status = _issueRpcAndWait(pGpu, pRpc); + + return status; +} + +NV_STATUS rpcVgpuPfRegRead32_v15_00(OBJGPU *pGpu, + OBJRPC *pRpc, + NvU64 address, + NvU32 *value, + NvU32 grEngId) +{ + + NV_STATUS status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_VGPU_PF_REG_READ32, + sizeof(rpc_vgpu_pf_reg_read32_v15_00)); + if (status != NV_OK) + return status; + + rpc_message->vgpu_pf_reg_read32_v15_00.address = address; + rpc_message->vgpu_pf_reg_read32_v15_00.grEngId = grEngId; + + status = _issueRpcAndWait(pGpu, pRpc); + + if (status == NV_OK) + { + *value = rpc_message->vgpu_pf_reg_read32_v15_00.value; + } + + return status; +} + +/* + * Tells GSP-RM about the overall system environment, such as what physical + * memory addresses to use. + * + * Note that this is an asynchronous RPC. It is stuffed into the message queue + * before the GSP is booted. + */ +NV_STATUS rpcGspSetSystemInfo_v17_00 +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + if (IS_GSP_CLIENT(pGpu)) + { + GspSystemInfo *rpcInfo = (GspSystemInfo *)&rpc_message->gsp_set_system_info_v17_00.data; + const NvU32 messageLength = sizeof(rpc_message_header_v) + sizeof(*rpcInfo); + + if (messageLength > pRpc->maxRpcSize) + { + NV_PRINTF(LEVEL_ERROR, + "GSP_SET_SYSTEM_INFO parameters size (0x%x) exceed message_buffer size (0x%x)\n", + messageLength, pRpc->maxRpcSize); + + NV_ASSERT(0); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + status = rpcWriteCommonHeader(pGpu, pRpc, + NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, + sizeof(GspSystemInfo)); + if (status != NV_OK) + return status; + + rpcInfo->gpuPhysAddr = pGpu->busInfo.gpuPhysAddr; + rpcInfo->gpuPhysFbAddr = pGpu->busInfo.gpuPhysFbAddr; + rpcInfo->gpuPhysInstAddr = pGpu->busInfo.gpuPhysInstAddr; + rpcInfo->nvDomainBusDeviceFunc = pGpu->busInfo.nvDomainBusDeviceFunc; + rpcInfo->oorArch = (NvU8)pGpu->busInfo.oorArch; + + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + if (pKernelBif != NULL) + { + NV_ASSERT_OK(kbifGetPciConfigSpacePriMirror_HAL(pGpu, pKernelBif, + &rpcInfo->pciConfigMirrorBase, + &rpcInfo->pciConfigMirrorSize)); + } + + if (IS_SIMULATION(pGpu)) + { + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + rpcInfo->simAccessBufPhysAddr = memdescGetPhysAddr(pKernelGsp->pMemDesc_simAccessBuf, AT_CPU, 0); + } + else + { + rpcInfo->simAccessBufPhysAddr = 0; + } + rpcInfo->consoleMemSize = GPU_GET_MEMORY_MANAGER(pGpu)->Ram.ReservedConsoleDispMemSize; + + OBJCL *pCl = SYS_GET_CL(SYS_GET_INSTANCE()); + if (pCl != NULL) + { + clSyncWithGsp(pCl, rpcInfo); + } + status = _issueRpcAsync(pGpu, pRpc); + } + + return status; +} + +/* + * Transfers registry entries from CPU-RM to GSP-RM during init. + * + * Note that this is an asynchronous RPC. It is stuffed into the message queue + * before the GSP is booted. + */ +NV_STATUS rpcSetRegistry_v17_00 +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + if (IS_GSP_CLIENT(pGpu)) + { + NvU32 regTableSize; + NvU32 totalSize; + NvU32 remainingMessageSize; + PACKED_REGISTRY_TABLE *pRegTable; + rpc_message_header_v *largeRpcBuffer = NULL; + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, 0); + if (status != NV_OK) + return status; + + remainingMessageSize = pRpc->maxRpcSize - sizeof(rpc_message_header_v); + + // Compute size of registry table + status = osPackageRegistry(pGpu, NULL, ®TableSize); + if (status != NV_OK) + return status; + + // + // SET_REGISTRY is async RPC. If registry table exceeds size of + // message queue, we won't be able to send complete data and it's + // time to evaluate alternate implementations. Some ways to resolve + // this issue are use bigger queue, use sync RPC or allocate dedicated + // memory for sharing regkey table with GSP-RM. + // + totalSize = sizeof(rpc_message_header_v) + regTableSize; + NV_ASSERT(totalSize < pRpc->pMessageQueueInfo->commandQueueSize); + + // Find out if we need to issue large RPC + if (regTableSize > remainingMessageSize) + { + largeRpcBuffer = portMemAllocNonPaged(totalSize); + if (largeRpcBuffer == NULL) + return NV_ERR_NO_MEMORY; + + portMemCopy(largeRpcBuffer, totalSize, + vgpu_rpc_message_header_v, sizeof(rpc_message_header_v)); + + pRegTable = (PACKED_REGISTRY_TABLE *)(&largeRpcBuffer->rpc_message_data); + } + else + { + pRegTable = (PACKED_REGISTRY_TABLE *)&rpc_message; + } + + status = osPackageRegistry(pGpu, pRegTable, ®TableSize); + if (status != NV_OK) + return status; + + if (largeRpcBuffer != NULL) + { + status = _issueRpcAsyncLarge(pGpu, pRpc, totalSize, largeRpcBuffer); + portMemFree(largeRpcBuffer); + } + else + { + vgpu_rpc_message_header_v->length = totalSize; + status = _issueRpcAsync(pGpu, pRpc); + } + } + + return status; +} + +NV_STATUS rpcDumpProtobufComponent_v18_12 +( + OBJGPU *pGpu, + OBJRPC *pRpc, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState, + NVDUMP_COMPONENT component +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + if (IS_GSP_CLIENT(pGpu)) + { + rpc_dump_protobuf_component_v18_12 *rpc_params = &rpc_message->dump_protobuf_component_v18_12; + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_DUMP_PROTOBUF_COMPONENT, + sizeof(*rpc_params)); + if (status != NV_OK) + return status; + + rpc_params->component = component; + rpc_params->nvDumpType = pNvDumpState->nvDumpType; + rpc_params->countOnly = ((pPrbEnc->flags & PRB_COUNT_ONLY) != 0); + rpc_params->bugCheckCode = pNvDumpState->bugCheckCode; + rpc_params->internalCode = pNvDumpState->internalCode; + rpc_params->bufferSize = NV_MIN(pRpc->maxRpcSize, prbEncBufLeft(pPrbEnc)); + + status = _issueRpcAndWait(pGpu, pRpc); + + // Add blob to protobuf. + if ((status == NV_OK) && rpc_params->bufferSize > 0) + status = prbEncStubbedAddBytes(pPrbEnc, rpc_params->blob, rpc_params->bufferSize); + } + + return status; +} + +NV_STATUS rpcRmfsInit_v15_00 +( + OBJGPU *pGpu, + OBJRPC *pRpc, + PMEMORY_DESCRIPTOR pStatusQueueMemDesc +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + return status; +} + +NV_STATUS rpcRmfsCloseQueue_v15_00 +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + return status; +} + +NV_STATUS rpcRmfsCleanup_v15_00 +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + return status; +} + +NV_STATUS rpcRmfsTest_v15_00 +( + OBJGPU *pGpu, + OBJRPC *pRpc, + NvU32 numReps, + NvU32 flags, + NvU32 testData1, + NvU32 testData2 +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + return status; +} + +#if NV_PRINTF_STRINGS_ALLOWED +void osAssertFailed(void); +#define RPC_LOCK_DEBUG_DUMP_STACK() \ + do { \ + static NvU64 previousRetAddr; \ + NvU64 retAddr = (NvU64)NV_RETURN_ADDRESS(); \ + if (retAddr != previousRetAddr) \ + { \ + previousRetAddr = retAddr; \ + osAssertFailed(); \ + } \ + /* Add an assert so it shows as test score regression */ \ + NV_ASSERT_FAILED("RPC locking violation - see kernel_log.txt"); \ + } while(0) +#else +#define RPC_LOCK_DEBUG_DUMP_STACK() +#endif + +NV_STATUS rpcRmApiControl_GSP +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParamStructPtr, + NvU32 paramsSize +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_message_header_v *large_message_copy = NULL; + rpc_gsp_rm_control_v03_00 *rpc_params = &rpc_message->gsp_rm_control_v03_00; + + const NvU32 fixed_param_size = sizeof(rpc_message_header_v) + sizeof(*rpc_params); + NvU32 message_buffer_remaining = pRpc->maxRpcSize - fixed_param_size; + NvU32 rpc_params_size; + NvU32 total_size; + + const NvU32 interface_id = (DRF_VAL(XXXX, _CTRL_CMD, _CLASS, cmd) << 8) | + DRF_VAL(XXXX, _CTRL_CMD, _CATEGORY, cmd); + const NvU32 message_id = DRF_VAL(XXXX, _CTRL_CMD, _INDEX, cmd); + NvU8 *pSerBuffer = NULL; + NvU32 serializedSize = 0; + NvU32 origParamsSize = paramsSize; + NvU32 gpuMaskRelease = 0; + + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + NV_PRINTF(LEVEL_WARNING, "Calling RPC RmControl 0x%08x without adequate locks!\n", cmd); + RPC_LOCK_DEBUG_DUMP_STACK(); + + NV_ASSERT_OK_OR_RETURN( + rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_RPC, &gpuMaskRelease)); + } + + // Attempt to calculate the serialized size of the param struct using FINN. + serializedSize = FinnRmApiGetSerializedSize(interface_id, message_id, pParamStructPtr); + + // If successful, this is a serializable API and rpc_params->params is a serialized buffer. + // otherwise this is a flat API and paramsSize is the param struct size + if (serializedSize != 0) + { + // Allocate twice the amount to account for the return buffer + paramsSize = 2 * serializedSize; + } + + // Initialize these values now that paramsSize is known + rpc_params_size = sizeof(*rpc_params) + paramsSize; + total_size = fixed_param_size + paramsSize; + + // Write the header assuming one record. If continuation records are used, + // then the length in the header will be overwritten by _issueRpcAndWaitLarge + NV_ASSERT_OK_OR_GOTO(status, + rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, rpc_params_size), + done); + + rpc_params->hClient = hClient; + rpc_params->hObject = hObject; + rpc_params->cmd = cmd; + rpc_params->paramsSize = paramsSize; + + // If we have a big payload control, we need to make a local copy... + if (message_buffer_remaining < paramsSize) + { + large_message_copy = portMemAllocNonPaged(total_size); + NV_ASSERT_OR_ELSE(large_message_copy != NULL, {status = NV_ERR_NO_MEMORY; goto done; }); + portMemCopy(large_message_copy, total_size, vgpu_rpc_message_header_v, fixed_param_size); + rpc_params = &large_message_copy->rpc_message_data->gsp_rm_control_v03_00; + message_buffer_remaining = total_size - fixed_param_size; + } + + // If this is a serializable API, attempt to serialize the param struct using FINN, + // otherwise do a flat memcpy + if (serializedSize != 0) + { + rpc_params->serialized = NV_TRUE; + pSerBuffer = (NvU8 *)rpc_params->params; + + // Serialize into the first half of the RPC buffer. + status = FinnRmApiSerializeDown(interface_id, message_id, pParamStructPtr, + &pSerBuffer, message_buffer_remaining); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GspRmControl: Serialization failed for cmd 0x%x with status %s (0x%x) at index 0x%llx\n", + cmd, nvAssertStatusToString(status), status, + (NvUPtr)(pSerBuffer - (NvU8 *)rpc_params->params)); + goto done; + } + } + else + { + rpc_params->serialized = NV_FALSE; + + if (paramsSize != 0) + { + if (pParamStructPtr == NULL) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + else + { + // Serialize RM control - for now a simple memcpy, could be more when FINN comes + if (portMemCopy(rpc_params->params, message_buffer_remaining, pParamStructPtr, paramsSize) == NULL) + { + status = NV_ERR_BUFFER_TOO_SMALL; + goto done; + } + } + } + else if (pParamStructPtr != NULL) + { + NV_PRINTF(LEVEL_ERROR, "Bad params: ptr " NvP64_fmt " size: 0x%x\n", + pParamStructPtr, paramsSize); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + else + { + // + // paramsSize = 0 and pParamStructPtr == NULL + // rpc_params->params is static, cannot be set to NULL. + // We will allow rpc_params->paramsSize = 0 and + // rpc_params->params != NULL from here, but in + // _rpcGspRmControl() have the logic that + // pc_params->paramsSize = 0 means no params. + // + } + } + + // Issue RPC + if (large_message_copy) + { + status = _issueRpcAndWaitLarge(pGpu, pRpc, total_size, large_message_copy, NV_TRUE); + } + else + { + status = _issueRpcAndWait(pGpu, pRpc); + } + + if (status == NV_OK) + { + // If FINN was used to serialize the params, they must be deserialized on the way back, + // otherwise do a flat memcpy + if (serializedSize != 0) + { + NvU8 *pRetBuffer = pSerBuffer; + + // Deserialize from the second half of the RPC buffer. + status = FinnRmApiDeserializeUp(&pSerBuffer, paramsSize / 2, + pParamStructPtr, origParamsSize); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GspRmControl: Deserialization failed for cmd 0x%x with status %s (0x%x) at index 0x%llx\n", + cmd, nvAssertStatusToString(status), status, + (NvUPtr)(pSerBuffer - pRetBuffer)); + goto done; + } + } + else + { + if (paramsSize != 0) + { + portMemCopy(pParamStructPtr, paramsSize, rpc_params->params, paramsSize); + } + } + + if (rpc_params->status != NV_OK) + status = rpc_params->status; + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "GspRmControl failed: hClient=0x%08x; hObject=0x%08x; cmd=0x%08x; paramsSize=0x%08x; paramsStatus=0x%08x; status=0x%08x\n", + hClient, hObject, cmd, paramsSize, rpc_params->status, status); + } + +done: + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + // Free the local copy we might have allocated above + portMemFree(large_message_copy); + + return status; +} + +NV_STATUS rpcRmApiAlloc_GSP +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + rpc_gsp_rm_alloc_v03_00 *rpc_params = &rpc_message->gsp_rm_alloc_v03_00; + NvU32 paramsSize; + NvBool bNullAllowed; + + const NvU32 fixed_param_size = sizeof(rpc_message_header_v) + sizeof(*rpc_params); + const NvU32 message_buffer_remaining = pRpc->maxRpcSize - fixed_param_size; + + NvU32 gpuMaskRelease = 0; + + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + NV_PRINTF(LEVEL_WARNING, "Calling RPC RmAlloc 0x%04x without adequate locks!\n", hClass); + RPC_LOCK_DEBUG_DUMP_STACK(); + NV_ASSERT_OK_OR_RETURN( + rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_RPC, &gpuMaskRelease)); + } + + NV_ASSERT_OK_OR_GOTO(status, + rmapiGetClassAllocParamSize(¶msSize, NV_PTR_TO_NvP64(pAllocParams), &bNullAllowed, hClass), + done); + + // TODO CORERM-2934: Remove this when all client allocations take NV0000_ALLOC_PARAMETERS. + // Manually set paramsSize for client as a temporary WAR for bug 3183091, so that NV0000_ALLOC_PARAMETERS + // can be passed as pAllocParams while NvHandle is still listed in resource_list.h. + if ((hClass == NV01_ROOT) || (hClass == NV01_ROOT_CLIENT)) + { + paramsSize = sizeof(NV0000_ALLOC_PARAMETERS); + } + + if (pAllocParams == NULL && !bNullAllowed) + { + NV_PRINTF(LEVEL_ERROR, "NULL allocation params not allowed for class 0x%x\n", hClass); + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + NV_ASSERT_OK_OR_GOTO(status, + rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, + sizeof(rpc_gsp_rm_alloc_v03_00)), + done); + + rpc_params->hClient = hClient; + rpc_params->hParent = hParent; + rpc_params->hObject = hObject; + rpc_params->hClass = hClass; + rpc_params->paramsSize = pAllocParams ? paramsSize : 0; + + // Serialize RM alloc - for now a simple memcpy, could be more when FINN comes + if (paramsSize > 0) + { + if (portMemCopy(rpc_params->params, message_buffer_remaining, pAllocParams, paramsSize) == NULL) + { + status = NV_ERR_BUFFER_TOO_SMALL; + goto done; + } + } + + status = _issueRpcAndWait(pGpu, pRpc); + + if (status == NV_OK) + { + // Deserialize + if (paramsSize > 0) + { + portMemCopy(pAllocParams, paramsSize, rpc_params->params, paramsSize); + } + } + else + { + NV_PRINTF(LEVEL_ERROR, + "GspRmAlloc failed: hClient=0x%08x; hParent=0x%08x; hObject=0x%08x; hClass=0x%08x; paramsSize=0x%08x; paramsStatus=0x%08x; status=0x%08x\n", + hClient, hParent, hObject, hClass, paramsSize, rpc_params->status, status); + status = rpc_params->status; + } + +done: + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + return status; +} + +NV_STATUS rpcRmApiDupObject_GSP +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + NVOS55_PARAMETERS_v03_00 *rpc_params = &rpc_message->dup_object_v03_00.params; + NV_STATUS status; + NvU32 gpuMaskRelease = 0; + + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + NV_PRINTF(LEVEL_WARNING, "Calling RPC RmDupObject without adequate locks!\n"); + RPC_LOCK_DEBUG_DUMP_STACK(); + NV_ASSERT_OK_OR_RETURN( + rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_RPC, &gpuMaskRelease)); + } + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_DUP_OBJECT, sizeof(rpc_dup_object_v03_00)); + if (status != NV_OK) + goto done; + + rpc_params->hClient = hClient; + rpc_params->hParent = hParent; + rpc_params->hObject = *phObject; + rpc_params->hClientSrc = hClientSrc; + rpc_params->hObjectSrc = hObjectSrc; + rpc_params->flags = flags; + + status = _issueRpcAndWait(pGpu, pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GspRmDupObject failed: hClient=0x%08x; hParent=0x%08x; hObject=0x%08x; hClientSrc=0x%08x; hObjectSrc=0x%08x; flags=0x%08x; paramsStatus=0x%08x; status=0x%08x\n", + hClient, hParent, *phObject, hClientSrc, hObjectSrc, flags, rpc_params->status, status); + } +done: + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + return status; +} + +NV_STATUS rpcRmApiFree_GSP +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject +) +{ + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + NVOS00_PARAMETERS_v03_00 *rpc_params = &rpc_message->free_v03_00.params; + NV_STATUS status = NV_OK; + NvU32 gpuMaskRelease = 0; + + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + NV_PRINTF(LEVEL_WARNING, "Calling RPC RmFree without adequate locks!\n"); + RPC_LOCK_DEBUG_DUMP_STACK(); + NV_ASSERT_OK_OR_RETURN( + rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_RPC, &gpuMaskRelease)); + } + + status = rpcWriteCommonHeader(pGpu, pRpc, NV_VGPU_MSG_FUNCTION_FREE, sizeof(rpc_free_v03_00)); + if (status != NV_OK) + goto done; + + rpc_params->hRoot = hClient; + rpc_params->hObjectParent = NV01_NULL_OBJECT; + rpc_params->hObjectOld = hObject; + + status = _issueRpcAndWait(pGpu, pRpc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GspRmFree failed: hClient=0x%08x; hObject=0x%08x; paramsStatus=0x%08x; status=0x%08x\n", + hClient, hObject, rpc_params->status, status); + } +done: + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + return status; +} diff --git a/src/nvidia/nv-kernel.ld b/src/nvidia/nv-kernel.ld new file mode 100644 index 000000000..89ce36699 --- /dev/null +++ b/src/nvidia/nv-kernel.ld @@ -0,0 +1,35 @@ +/* + * resman linker script + * + * Linking nv-kernel.o has several problems: + * + * (1) We build with '-ffunction-sections -fdata-sections' to put each + * function and data into separate ELF sections, so that the linker + * can distinguish separate functions and garbage collect dead code + * ('--gc-sections'). The linker is supposed to then merge sections + * together (e.g., all the ".text.*" into ".text", all the ".data.*" + * sections into ".data"). The linker doesn't seem to do this when + * linking a relocatable object file. + * + * (2) g++ puts inline functions, vtables, template functions, etc, in + * separate ".gnu.linkonce.*" sections. Duplicates are supposed to get + * collapsed at link time. The linker doesn't seem to do this when + * linking a relocatable object file. + * + * Resolve both of these problems by defining our own naive linker + * script to do the merging described above. + */ + +SECTIONS { + + .text : { *(.text) *(.text.*) *(.gnu.linkonce.t.*) } + + .data : { *(.data) *(.data.*) } + + .rodata : { *(.rodata) *(.rodata.*) *(.gnu.linkonce.r.*) } + + .bss : { *(.bss) *(.bss.*) } + + /* The rest of the sections ("orphaned sections") will just be copied from + the input to the output */ +} diff --git a/src/nvidia/src/kernel/compute/fabric.c b/src/nvidia/src/kernel/compute/fabric.c new file mode 100644 index 000000000..5e38729be --- /dev/null +++ b/src/nvidia/src/kernel/compute/fabric.c @@ -0,0 +1,66 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This file contains the functions managing the NVLink fabric + */ + +#include "os/os.h" +#include "compute/fabric.h" + +void +fabricSetFmSessionFlags_IMPL +( + Fabric *pFabric, + NvU32 flags +) +{ + pFabric->flags = flags; +} + +NvU32 +fabricGetFmSessionFlags_IMPL +( + Fabric *pFabric +) +{ + return pFabric->flags; +} + +NV_STATUS +fabricConstruct_IMPL +( + Fabric *pFabric +) +{ + return NV_OK; +} + +void +fabricDestruct_IMPL +( + Fabric *pFabric +) +{ +} diff --git a/src/nvidia/src/kernel/compute/fm_session_api.c b/src/nvidia/src/kernel/compute/fm_session_api.c new file mode 100644 index 000000000..8d2d4b669 --- /dev/null +++ b/src/nvidia/src/kernel/compute/fm_session_api.c @@ -0,0 +1,215 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/*! + * @file + * @brief This file contains the functions managing the FmSession + */ + +#include "core/core.h" +#include "os/os.h" +#include "compute/fm_session_api.h" +#include "class/cl000f.h" +#include "resserv/rs_client.h" +#include "core/system.h" +#include "core/locks.h" +#include "compute/fabric.h" +#include "Nvcm.h" +#include "gpu_mgr/gpu_mgr.h" +#include "kernel/gpu/gpu.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" +#include "rmapi/client.h" + +static void +_clearOutstandingComputeChannels(void) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuMask = 0; + NvU32 gpuCount = 0; + NvU32 gpuInstance = 0; + RM_API *pRmApi; + + NV_ASSERT(rmGpuLockIsOwner()); + + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + if (pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_RECOVER_ALL_COMPUTE_CONTEXTS, + NULL, + 0) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to recover all compute channels for GPU %d\n", + pGpu->gpuInstance); + } + } +} + +static void +_clearFmState(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Fabric *pFabric = SYS_GET_FABRIC(pSys); + NvU32 flags = fabricGetFmSessionFlags(pFabric); + + if (!pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED)) + { + NV_PRINTF(LEVEL_INFO, + "Fabric manager state is already cleared.\n"); + return; + } + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED, NV_FALSE); + + NV_PRINTF(LEVEL_INFO, "Fabric manager state is cleared.\n"); + + if (FLD_TEST_REF(NV000F_FLAGS_CHANNEL_RECOVERY, _ENABLED, flags)) + { + _clearOutstandingComputeChannels(); + } +} + +NV_STATUS +fmsessionapiConstruct_IMPL +( + FmSessionApi *pFmSessionApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Fabric *pFabric = SYS_GET_FABRIC(pSys); + NvHandle hClient = pCallContext->pClient->hClient; + NV000F_ALLOCATION_PARAMETERS *pAllocParams = pParams->pAllocParams; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + osRmCapInitDescriptor(&pFmSessionApi->dupedCapDescriptor); + + if ((pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL) && !RMCFG_FEATURE_PLATFORM_MODS) + { + NV_PRINTF(LEVEL_ERROR, + "only supported for usermode clients\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED)) + { + NV_PRINTF(LEVEL_ERROR, "duplicate object creation\n"); + return NV_ERR_STATE_IN_USE; + } + + status = osRmCapAcquire(NULL, NV_RM_CAP_EXT_FABRIC_MGMT, + pAllocParams->capDescriptor, + &pFmSessionApi->dupedCapDescriptor); + + // + // On platforms where capability isn't implemented, + // enforce the admin-only check. + // + if (status == NV_ERR_NOT_SUPPORTED) + { + if (rmclientIsAdminByHandle(hClient, pCallContext->secInfo.privLevel)) + { + status = NV_OK; + } + else + { + NV_PRINTF(LEVEL_ERROR, "insufficient permissions\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + else if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Capability validation failed\n"); + return status; + } + + if (pFabric != NULL) + { + + fabricSetFmSessionFlags(pFabric, pAllocParams->flags); + } + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED, NV_TRUE); + + return NV_OK; +} + +void +fmsessionapiDestruct_IMPL +( + FmSessionApi *pFmSessionApi +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + NV_PRINTF(LEVEL_INFO, "Fabric manager is shutting down.\n"); + + _clearFmState(); + + osRmCapRelease(pFmSessionApi->dupedCapDescriptor); + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED, NV_FALSE); +} + +NV_STATUS +fmsessionapiCtrlCmdSetFmState_IMPL +( + FmSessionApi *pFmSessionApi +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED)) + { + NV_PRINTF(LEVEL_INFO, + "Fabric manager state is already set.\n"); + return NV_OK; + } + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED, NV_TRUE); + + NV_PRINTF(LEVEL_INFO, "Fabric manager state is set.\n"); + + return NV_OK; +} + +NV_STATUS +fmsessionapiCtrlCmdClearFmState_IMPL +( + FmSessionApi *pFmSessionApi +) +{ + _clearFmState(); + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/compute/mps_api.c b/src/nvidia/src/kernel/compute/mps_api.c new file mode 100644 index 000000000..f1d43cdd3 --- /dev/null +++ b/src/nvidia/src/kernel/compute/mps_api.c @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains the functions managing the MpsApi + * + *****************************************************************************/ + +#include "compute/mps_api.h" +#include "class/cl900e.h" + +NV_STATUS +mpsApiConstruct_IMPL +( + MpsApi *pMpsApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +mpsApiDestruct_IMPL +( + MpsApi *pMpsApi +) +{ + return; +} diff --git a/src/nvidia/src/kernel/core/bin_data.c b/src/nvidia/src/kernel/core/bin_data.c new file mode 100644 index 000000000..5cd2e1f59 --- /dev/null +++ b/src/nvidia/src/kernel/core/bin_data.c @@ -0,0 +1,383 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * @brief Bindata APIs implememtation + */ + +#include "core/bin_data.h" +#include "bin_data_pvt.h" +#include "os/os.h" +#include "nvRmReg.h" + +/* + * Private helper functions + */ +static NV_STATUS _bindataWriteStorageToBuffer(const BINDATA_STORAGE *pBinStorage, NvU8 *pBuffer); + + +/*! + * Initialize a BINDATA_RUNTIME_INFO structure for use, this function does not allocate any + * memory for data storage, only the data structure itself. + * + * @param[in] pBinStorage The BINDATA_STORAGE structure related to this + * binary resource. + * + * @param[out] ppBinInfo Location where the prepared BINDATA_RUNTIME_INFO data structure + * will be stored to. + * + * @return 'NV_OK' If all initialization operations were successful. + * + */ +NV_STATUS +bindataAcquire +( + const BINDATA_STORAGE *pBinStorage, + PBINDATA_RUNTIME_INFO *ppBinInfo +) +{ + NV_STATUS status = NV_OK; + PBINDATA_RUNTIME_INFO pBinInfo = NULL; + + // paged memory access check + osPagedSegmentAccessCheck(); + + NV_ASSERT_OR_RETURN(ppBinInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBinStorage != NULL, NV_ERR_INVALID_ARGUMENT); + + // resource data should never be NULL + NV_ASSERT_OR_RETURN(((const BINDATA_STORAGE_PVT *) pBinStorage)->pData != NULL, NV_ERR_INVALID_ARGUMENT); + + + // allocate memory for the internal structure + pBinInfo = portMemAllocNonPaged(sizeof(BINDATA_RUNTIME_INFO)); + if (pBinInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, + "Memory allocation of %u bytes failed, return code %u\n", + (NvU32)sizeof(BINDATA_RUNTIME_INFO), status); + DBG_BREAKPOINT(); + goto FAIL; + } + + portMemSet(pBinInfo, 0, sizeof(BINDATA_RUNTIME_INFO)); + + pBinInfo->pBinStoragePvt = (const BINDATA_STORAGE_PVT *) pBinStorage; + + // if resource is compressed, also initialize the GZ state struct + if (pBinInfo->pBinStoragePvt->bCompressed) + { + if ((status = utilGzAllocate((NvU8*)(pBinInfo->pBinStoragePvt->pData), + pBinInfo->pBinStoragePvt->actualSize, + &(pBinInfo->pGzState))) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "gz state allocation faileded, return code %u\n", + status); + DBG_BREAKPOINT(); + goto FAIL; + } + NV_ASSERT(pBinInfo->pGzState); + } + + *ppBinInfo = pBinInfo; + + return status; + +FAIL: + portMemFree(pBinInfo); + + return status; +} + +/*! + * Acquire helper function to implement data decompression. This function + * inflates the amount of bytes given by nBytes and write to buffer. + * + * This helper does NOT allocate any memory, so buffer is assumed to have + * at least nBytes in size. + * + * Being exposed to public interface, this function, however, is only designed + * for special uses. + * + * @param[in] pBinInfo Bindata runtime information + * @param[out] pBuffer Buffer area for writing the acquired data + * @param[in] nBytes Number of bytes to acquire (chunk size) + * + * @return 'NV_OK' If the chunk acquire was successful. + * + */ +NV_STATUS +bindataGetNextChunk +( + PBINDATA_RUNTIME_INFO pBinInfo, + NvU8 *pBuffer, + NvU32 nBytes +) +{ + NvU32 nBytesInflated; + + // paged memory access check + osPagedSegmentAccessCheck(); + + NV_ASSERT_OR_RETURN(pBinInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBuffer != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(nBytes + pBinInfo->currDataPos <= pBinInfo->pBinStoragePvt->actualSize, + NV_ERR_INVALID_ARGUMENT); + + // if the resource is compressed, the pGzState structure must be initialized + if (pBinInfo->pBinStoragePvt->bCompressed == NV_TRUE && pBinInfo->pGzState == NULL) + { + NV_PRINTF(LEVEL_ERROR, "must call bindataAcquire() first!\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_DATA; + } + + if (pBinInfo->pBinStoragePvt->bCompressed) + { + if ((nBytesInflated = utilGzGetData(pBinInfo->pGzState, + pBinInfo->currDataPos, + nBytes, + pBuffer)) != nBytes) + { + NV_PRINTF(LEVEL_ERROR, + "failed to get inflated data, got %u bytes, expecting %u\n", + nBytesInflated, nBytes); + DBG_BREAKPOINT(); + return NV_ERR_INFLATE_COMPRESSED_DATA_FAILED; + } + } + else + { + portMemCopy(pBuffer, nBytes, (NvU8*)(pBinInfo->pBinStoragePvt->pData) + pBinInfo->currDataPos, nBytes); + } + + pBinInfo->currDataPos += nBytes; + + return NV_OK; +} + +/*! + * Release the previously acquired binary resource. + * + * @param[in] pBinInfo Bindata runtime information + * + * @return void + */ +void +bindataRelease +( + PBINDATA_RUNTIME_INFO pBinInfo +) +{ + if (pBinInfo == NULL) + { + return; + } + + if (pBinInfo->pGzState != NULL) + { + utilGzDestroy(pBinInfo->pGzState); + } + + portMemFree(pBinInfo); +} + +/*! + * Retrieve data from Bindata storage and write it to the given memory buffer. When + * file overriding feature is enabled and the file exists in the target directory, + * copy the file to buffer instead. + * + * @param[in] pBinStorage Bindata storage + * @param[in] pBuffer Pointer of given buffer + * + * @return 'NV_OK' If the ucode was written to memory buffer successfully + */ +NV_STATUS +bindataWriteToBuffer +( + const BINDATA_STORAGE *pBinStorage, + NvU8 *pBuffer, + NvU32 bufferSize +) +{ + // paged memory access check + osPagedSegmentAccessCheck(); + + NV_ASSERT_OR_RETURN(bufferSize >= bindataGetBufferSize(pBinStorage), NV_ERR_BUFFER_TOO_SMALL); + + return _bindataWriteStorageToBuffer(pBinStorage, pBuffer); +} + + +/*! + * Retrieve data from Bindata storage and write it to the given memory buffer. + * + * @param[in] pBinStorage Bindata storage + * @param[in] pBuffer Pointer of given buffer + * + * @return 'NV_OK' If the ucode was written to memory buffer successfully + */ +NV_STATUS +_bindataWriteStorageToBuffer +( + const BINDATA_STORAGE *pBinStorage, + NvU8 *pBuffer +) +{ + NV_STATUS status = NV_OK; + PBINDATA_RUNTIME_INFO pBinInfo = NULL; + + NV_ASSERT_OR_RETURN(pBinStorage != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBuffer != NULL, NV_ERR_INVALID_ARGUMENT); + + if ((status = bindataAcquire(pBinStorage, &pBinInfo)) != NV_OK) + { + DBG_BREAKPOINT(); + goto EXIT; + } + + if ((status = bindataGetNextChunk(pBinInfo, + pBuffer, + pBinInfo->pBinStoragePvt->actualSize)) != NV_OK) + { + DBG_BREAKPOINT(); + goto EXIT; + } + +EXIT: + + bindataRelease(pBinInfo); + return status; +} + + +NvU32 +bindataGetBufferSize +( + const BINDATA_STORAGE *pBinStorage +) +{ + // paged memory access check + osPagedSegmentAccessCheck(); + + if (pBinStorage == NULL) + { + return 0; + } + + return ((const BINDATA_STORAGE_PVT *) pBinStorage)->actualSize; +} + + +/*! + * Get Bindata storage from the given Bindata Archive + * + * @param[in] pBinArchive Pointer to Bindata Archive + * @param[in] binName Name of the bindata + * + * @return Pointer of the target Bindata Storage or + * NULL if cannot find the target of the given name + */ +const BINDATA_STORAGE * +bindataArchiveGetStorage( + const BINDATA_ARCHIVE *pBinArchive, + const char *binName +) +{ + // paged memory access check + osPagedSegmentAccessCheck(); + + if ((pBinArchive == NULL) || (binName == NULL)) + { + return NULL; + } + + NvU32 i; + NvLength len = portStringLength(binName) + 1; + for (i = 0 ; i < pBinArchive->entryNum; i++) + { + if (portStringCompare(binName, pBinArchive->entries[i].name, len) == 0) + { + bindataMarkReferenced(pBinArchive->entries[i].pBinStorage); + return pBinArchive->entries[i].pBinStorage; + } + } + return NULL; +} + + +// File Overriding Feature is only enabled under MODS + +void bindataMarkReferenced(const BINDATA_STORAGE *pBinStorage) +{ + if (BINDATA_IS_MUTABLE) + { + // Cast away the constness + BINDATA_STORAGE_PVT *pMutablePvt = (BINDATA_STORAGE_PVT *)pBinStorage; + NV_ASSERT(pMutablePvt->pData != NULL || pMutablePvt->actualSize != 0); + pMutablePvt->bReferenced = NV_TRUE; + } +} + +void* bindataGetNextUnreferencedStorage(const BINDATA_STORAGE **iter, NvU32 *pDataSize) +{ + extern BINDATA_STORAGE_PVT g_bindata_pvt; + extern const NvU32 g_bindata_pvt_count; + + const BINDATA_STORAGE_PVT *iterPvt = *(const BINDATA_STORAGE_PVT **)iter; + const BINDATA_STORAGE_PVT *firstPvt = &g_bindata_pvt; + const BINDATA_STORAGE_PVT *lastPvt = firstPvt + g_bindata_pvt_count - 1; + + // This API makes no sense if the data is const, so just bail out early. + NV_ASSERT_OR_RETURN(BINDATA_IS_MUTABLE, NULL); + + if (iterPvt == NULL || (iterPvt >= firstPvt && iterPvt < lastPvt)) + { + // Passing in NULL means start iterating. + iterPvt = (iterPvt == NULL) ? firstPvt : (iterPvt + 1); + while (iterPvt <= lastPvt) + { + if (!iterPvt->bReferenced && iterPvt->pData != NULL) + { + *iter = (const BINDATA_STORAGE *)iterPvt; + *pDataSize = iterPvt->compressedSize; + return (void*)iterPvt->pData; + } + iterPvt++; + } + } + + *iter = NULL; + *pDataSize = 0; + return NULL; +} + +void bindataDestroyStorage(BINDATA_STORAGE *storage) +{ + BINDATA_STORAGE_PVT *pBindataPvt = (BINDATA_STORAGE_PVT *)storage; + pBindataPvt->pData = NULL; + pBindataPvt->actualSize = 0; + pBindataPvt->compressedSize = 0; +} diff --git a/src/nvidia/src/kernel/core/bin_data_pvt.h b/src/nvidia/src/kernel/core/bin_data_pvt.h new file mode 100644 index 000000000..2ef4b9ee7 --- /dev/null +++ b/src/nvidia/src/kernel/core/bin_data_pvt.h @@ -0,0 +1,91 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _BINDATA_PRIVATE_H +#define _BINDATA_PRIVATE_H + +#include "core/core.h" +#include "lib/zlib/inflate.h" +#include "core/bin_data.h" + +/************************************************************************************************************** +* +* File: bin_data_private.h +* +* Description: +* Private data structure for binary data management API +* +**************************************************************************************************************/ + +// +// WARNING: This header should not be included directly (outside of bindata +// impl) +// +// TODO: Clean up the references that have snuck in and move outside of the +// public module directory +// + + +// +// Private data structure for binary data management +// + +// +// Binary data management static information (generated by bindata.pl) +// +typedef struct +{ + NvU32 actualSize; // size of (uncompressed) pData + NvU32 compressedSize; // size of (compressed) pData array + const void * pData; // pointer to the raw binary (whether compressed or not) data + NvBool bCompressed : 1; // is compressed? + NvBool bFileOverrideSupported : 1; // contain information for file overriding? + NvBool bReferenced : 1; // Has this data been referenced before? +} BINDATA_STORAGE_PVT, *PBINDATA_STORAGE_PVT; + +// +// Binary data management runtime information +// +struct BINDATA_RUNTIME_INFO +{ + const BINDATA_STORAGE_PVT *pBinStoragePvt; // pointer to the static init struct + PGZ_INFLATE_STATE pGzState; // used by gzip + NvU32 currDataPos; // position where next chunk acquire should start at +}; + +// +// This knob controls whether the data will be placed into .rodata section and +// be considered constant for the lifetime of RM, or if it can be modified +// during execution. Right now, we only need to modify it on GSP to reclaim +// the memory as general purpose heap. +// +#define BINDATA_IS_MUTABLE RMCFG_FEATURE_PLATFORM_GSP +#if BINDATA_IS_MUTABLE +#define BINDATA_CONST +#else +#define BINDATA_CONST const +#endif + +void bindataMarkReferenced(const BINDATA_STORAGE *pBinStorage); + +#endif // _BINDATA_PRIVATE_H diff --git a/src/nvidia/src/kernel/core/hal/hal.c b/src/nvidia/src/kernel/core/hal/hal.c new file mode 100644 index 000000000..bbde4fb3f --- /dev/null +++ b/src/nvidia/src/kernel/core/hal/hal.c @@ -0,0 +1,121 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/system.h" +#include "core/hal_mgr.h" +#include "core/hal.h" + +#include "vgpu/rpc.h" + +#include "g_hal_private.h" + +PMODULEDESCRIPTOR +objhalGetModuleDescriptor_IMPL(OBJHAL *thisHal) +{ + return &thisHal->moduleDescriptor; +} + +// +// registerHalModule() is referred by functions in generated file g_hal_private.h +// So, placed it here instead of gt_hal_register.h to avoid duplications of this +// function as g_hal_private.h is included by several files +// +NV_STATUS +registerHalModule(NvU32 halImpl, const HAL_IFACE_SETUP *pHalSetIfaces) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJHALMGR *pHalMgr = SYS_GET_HALMGR(pSys); + OBJHAL *pHal; + PMODULEDESCRIPTOR pMod; + NV_STATUS rmStatus; + + // create a HAL object + rmStatus = halmgrCreateHal(pHalMgr, halImpl); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + // retrieve the HAL object + pHal = HALMGR_GET_HAL(pHalMgr, halImpl); + NV_ASSERT(pHal); + + // init the iface descriptor lists + pMod = objhalGetModuleDescriptor(pHal); + + // point to rmconfig structure that can init our engines' interfaces + pMod->pHalSetIfaces = pHalSetIfaces; + + return NV_OK; +} + +// Helper to install IP_VERSIONS function pointers into pObj->hal fn ptr table +// based on IP_VER register value. +// Uses tables and code in g_FOO_private.h (generated by rmconfig) + +NV_STATUS ipVersionsSetupHal +( + OBJGPU *pGpu, + void * pDynamic_v, // eg: pDisp + IGrp_ipVersions_getInfo getInfoFn // eg: disp_iGrp_ipVersions_getInfo() +) +{ + IGRP_IP_VERSIONS_TABLE_INFO info; + const IGRP_IP_VERSIONS_ENTRY *pVer; + const IGRP_IP_VERSION_RANGE *pRange; + NV_STATUS rmStatus; + Dynamic *pDynamic = (Dynamic*)pDynamic_v; + Object *pObj = dynamicCast(pDynamic, Object); + + // nothing to do if IP_VERSION is invalid + if ( ! IsIPVersionValid(pObj)) + return NV_OK; + + info.pGpu = pGpu; + info.pDynamic = pDynamic; + + // call into the hal to finish filling in the table + rmStatus = getInfoFn(&info); + if (rmStatus != NV_OK) + return rmStatus; + + // perform setup for *all* matching variants + for (pVer = info.pTable; pVer < info.pTable + info.numEntries; pVer++) + { + // Each version has 1 or more "version ranges". + // Invoke this version's setup fn if any of it's ranges match. + for (pRange = pVer->pRanges; pRange < (pVer->pRanges + pVer->numRanges); pRange++) + { + if ((IPVersion(pObj) >= pRange->v0) && (IPVersion(pObj) <= pRange->v1)) + { + pVer->ifacesInstallFn(&info); + break; + } + } + } + + // invoke rmconfig-generated wrapup function to handle any overrides & verification + rmStatus = info.ifacesWrapupFn(&info); + + return rmStatus; +} diff --git a/src/nvidia/src/kernel/core/hal/hals_all.c b/src/nvidia/src/kernel/core/hal/hals_all.c new file mode 100644 index 000000000..10feb000f --- /dev/null +++ b/src/nvidia/src/kernel/core/hal/hals_all.c @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* Module: hals_all.c * +* Hal interface init routines for files generated by rmconfig * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/eng_desc.h" + +#include "vgpu/rpc.h" + +// +// These defines affect what we see in halgen generated headers. +// +// If RMCFG_ENGINE_SETUP is not already defined, then setup for +// a monolithic hal. +// +// The per-gpu-family hal setups #include this file with the RMCFG +// setup defines already defined to pull in just the interfaces +// needed for that gpu family. +// + +#if ! defined(RMCFG_ENGINE_SETUP) + +# define RMCFG_ENGINE_SETUP 1 // pull in per-gpu engine interface's + +# define RMCFG_HAL_SETUP_ALL 1 // monolithic - ALL configured gpus' support in this file +# define RMCFG_HAL_SUPPORT_ALL 1 // not required, but keeps us honest + +#endif // ! defined RMCFG_ENGINE_SETUP + +// Pull in generated code to setup each engine's hal interfaces for each gpu +#include "g_hal_register.h" +#include "g_hal_private.h" + diff --git a/src/nvidia/src/kernel/core/hal/hals_stub.c b/src/nvidia/src/kernel/core/hal/hals_stub.c new file mode 100644 index 000000000..24702e945 --- /dev/null +++ b/src/nvidia/src/kernel/core/hal/hals_stub.c @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * @brief Stubs for HAL routines that are not implement for all GPUs + */ + +#include "gpu/gpu.h" + +#include "vgpu/rpc.h" +#include "vgpu/vgpu_events.h" + +typedef struct OBJGPIO_HAL OBJGPIO_HAL, *POBJGPIO_HAL; +typedef struct GPIO_HAL_IFACES GPIO_HAL_IFACES; +typedef struct _def_gpio_pin_state GPIO_PIN_STATE, *PGPIO_PIN_STATE; +typedef struct _GPIO_FUNC_LIST_ITEM GPIO_FUNC_LIST_ITEM, *PGPIO_FUNC_LIST_ITEM; + +// pull in stub interfaces generated by rmconfig +#include "g_hal_stubs.h" diff --git a/src/nvidia/src/kernel/core/hal/info_block.c b/src/nvidia/src/kernel/core/hal/info_block.c new file mode 100644 index 000000000..25ccd9580 --- /dev/null +++ b/src/nvidia/src/kernel/core/hal/info_block.c @@ -0,0 +1,171 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* + * @file + * @brief Implementation for info block utility + */ + +#include "os/os.h" +#include "core/info_block.h" + +// +// getInfoPtr +// +// Return pointer to HAL implementation-specific private data info block. +// +void * +getInfoPtr(PENG_INFO_LINK_NODE head, NvU32 dataId) +{ + PENG_INFO_LINK_NODE curNode = head; + + while (curNode && (curNode->dataId != dataId)) + curNode = curNode->next; + + if (curNode == NULL) + return NULL; + + return curNode->infoBlock; +} + +// +// testInfoPtr +// +// Returns info weather HAL implementation-specific private data info block is allocated. +// +NvBool +testInfoPtr(PENG_INFO_LINK_NODE head, NvU32 dataId) +{ + PENG_INFO_LINK_NODE curNode = head; + + while (curNode && (curNode->dataId != dataId)) + curNode = curNode->next; + + if (curNode == NULL) + return NV_FALSE; + + return NV_TRUE; +} + +// +// createLinkNode +// +// Allocate and initialize new info block. +// +static PENG_INFO_LINK_NODE +createLinkNode(NvU32 dataId, NvU32 size) +{ + PENG_INFO_LINK_NODE newNode; + NV_STATUS rmStatus; + + newNode = portMemAllocNonPaged(sizeof(ENG_INFO_LINK_NODE)); + if (newNode == NULL) + { + rmStatus = NV_ERR_NO_MEMORY; + NV_ASSERT(rmStatus == NV_OK); + return NULL; + } + + portMemSet(newNode, 0, sizeof(ENG_INFO_LINK_NODE)); + + newNode->infoBlock = portMemAllocNonPaged(size); + if (newNode->infoBlock == NULL) + { + rmStatus = NV_ERR_NO_MEMORY; + portMemFree(newNode); + NV_ASSERT(rmStatus == NV_OK); + return NULL; + } + + portMemSet(newNode->infoBlock, 0, size); + + newNode->dataId = dataId; + + return newNode; +} + +// +// addInfoPtr +// +// Create new HAL privata data block and add it to specified list. +// +void * +addInfoPtr(PENG_INFO_LINK_NODE *head, NvU32 dataId, NvU32 size) +{ + PENG_INFO_LINK_NODE curNode = *head; + PENG_INFO_LINK_NODE newNode = createLinkNode(dataId, size); + + if (newNode == NULL) + return NULL; + + while (curNode && curNode->next) + curNode = curNode->next; + + if (!curNode) + *head = newNode; + else + curNode->next = newNode; + + return newNode->infoBlock; +} + +// +// deleteInfoPtr +// +// Destroy HAL privata data block and remove it from specified list. +// +void +deleteInfoPtr(PENG_INFO_LINK_NODE *head, NvU32 dataId) +{ + PENG_INFO_LINK_NODE curNode = *head; + + if (!curNode) + return ; + + // check list head + if (curNode->dataId == dataId) + { + *head = curNode->next; + NV_ASSERT(curNode->infoBlock); + portMemFree(curNode->infoBlock); + portMemFree(curNode); + return ; + } + + // search for it + while (curNode->next && (curNode->next->dataId != dataId)) + curNode = curNode->next; + + if (curNode->next) + { + PENG_INFO_LINK_NODE delNode; + + delNode = curNode->next; + curNode->next = curNode->next->next; + NV_ASSERT(delNode->infoBlock); + portMemFree(delNode->infoBlock); + portMemFree(delNode); + } + + return ; +} diff --git a/src/nvidia/src/kernel/core/hal_mgr.c b/src/nvidia/src/kernel/core/hal_mgr.c new file mode 100644 index 000000000..65b7272ef --- /dev/null +++ b/src/nvidia/src/kernel/core/hal_mgr.c @@ -0,0 +1,229 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/hal_mgr.h" +#include "core/hal.h" + +#include "g_hal_archimpl.h" + +// NOTE: string arguments only get used when NV_PRINTF_STRINGS_ALLOWED is true. +#if NV_PRINTF_STRINGS_ALLOWED +static const char *_halmgrGetStringRepForHalImpl(OBJHALMGR *pHalMgr, HAL_IMPLEMENTATION); +#endif + +NV_STATUS +halmgrConstruct_IMPL +( + OBJHALMGR *pHalMgr +) +{ + HAL_IMPLEMENTATION i; + + // + // Make sure all the possible handles to the Hal Objects + // have been zeroed out. Also initialize the implementation + // and public id's in the hal descriptor list. + // + for (i = 0; i < HAL_IMPL_MAXIMUM; i++) + pHalMgr->pHalList[i] = NULL; + + return NV_OK; +} + +void +halmgrDestruct_IMPL +( + OBJHALMGR *pHalMgr +) +{ + NvU32 i; + + for (i = 0; i < HAL_IMPL_MAXIMUM; i++) + { + objDelete(pHalMgr->pHalList[i]); + pHalMgr->pHalList[i] = NULL; + } +} + +NV_STATUS +halmgrCreateHal_IMPL +( + OBJHALMGR *pHalMgr, + NvU32 halImpl +) +{ + OBJHAL *pHal; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(halImpl < HAL_IMPL_MAXIMUM, NV_ERR_INVALID_ARGUMENT); + + status = objCreate(&pHal, pHalMgr, OBJHAL); + if (status != NV_OK) + return status; + + // Store away the object pointer for this particular HAL object + pHalMgr->pHalList[halImpl] = pHal; + + return NV_OK; +} + +POBJHAL +halmgrGetHal_IMPL +( + OBJHALMGR *pHalMgr, + NvU32 halImpl +) +{ + if (halImpl < HAL_IMPL_MAXIMUM) + return pHalMgr->pHalList[halImpl]; + else + return NULL; +} + +static NvBool +_halmgrIsTegraSupported +( + NvU32 publicHalID, + NvU32 socChipID +) +{ + NvU32 chipid, majorRev; + + chipid = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _CHIPID, socChipID); + majorRev = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _MAJORREV, socChipID); + + // WAR: The majorrev of t234 shows 0xa on fmodel instead of 0x4 + if ((chipid == 0x23) && (majorRev == 0xa)) + { + majorRev = 0x4; + } + + // Convert to the HIDREV field format of chip-config + return ((chipid << 4) | majorRev) == chipID[publicHalID].hidrev; +} + +static NvBool +_halmgrIsChipSupported +( + OBJHALMGR *pHalMgr, + NvU32 publicHalID, + NvU32 pPmcBoot0, + NvU32 pPmcBoot42 +) +{ + NvBool retVal = NV_FALSE; + + if (chipID[publicHalID].hidrev) + return _halmgrIsTegraSupported(publicHalID, pPmcBoot0); + + if (pPmcBoot42) + { + if ((DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, pPmcBoot42) == chipID[publicHalID].arch) && + (DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, pPmcBoot42) == chipID[publicHalID].impl)) + { + retVal = NV_TRUE; + } + } + else + { + // Fail safely on older GPUs where pPmcBoot42 is not supported + retVal = NV_FALSE; + } + + return retVal; +} + +NV_STATUS +halmgrGetHalForGpu_IMPL +( + OBJHALMGR *pHalMgr, + NvU32 pPmcBoot0, + NvU32 pPmcBoot42, + NvU32 *pHalImpl +) +{ + HAL_IMPLEMENTATION halImpl; + OBJHAL *pHal; + + for (halImpl = 0; halImpl < HAL_IMPL_MAXIMUM; halImpl++) + { + pHal = pHalMgr->pHalList[halImpl]; + + // skip impls that have no hal object + if (pHal == NULL) + continue; + + if (_halmgrIsChipSupported(pHalMgr, halImpl, pPmcBoot0, pPmcBoot42)) + { + *pHalImpl = halImpl; + +#if NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "Matching %s = 0x%x to HAL_IMPL_%s\n", + pPmcBoot42 ? "PMC_BOOT_42" : "PMC_BOOT_0", + pPmcBoot42 ? pPmcBoot42 : pPmcBoot0, + _halmgrGetStringRepForHalImpl(pHalMgr, halImpl)); +#else // NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "Matching 0x%x to %u\n", + pPmcBoot42 ? "PMC_BOOT_42" : "PMC_BOOT_0", + halImpl); +#endif // NV_PRINTF_STRINGS_ALLOWED + + return NV_OK; + } + } + + return NV_ERR_NOT_SUPPORTED; +} + +// NOTE: string arguments only get used when NV_PRINTF_STRINGS_ALLOWED is true. +#if NV_PRINTF_STRINGS_ALLOWED +static const char * +_halmgrGetStringRepForHalImpl +( + OBJHALMGR *pHalMgr, + HAL_IMPLEMENTATION halImpl +) +{ + const char *chipName = "UNKNOWN"; + static const struct + { + HAL_IMPLEMENTATION halImpl; + const char *name; + } halImplNames[] = { HAL_IMPL_NAME_LIST }; // generated by rmconfig into g_hal.h + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS32(halImplNames); i++) + { + if (halImplNames[i].halImpl == halImpl) + { + chipName = halImplNames[i].name; + break; + } + } + + return chipName; +} +#endif + diff --git a/src/nvidia/src/kernel/core/locks.c b/src/nvidia/src/kernel/core/locks.c new file mode 100644 index 000000000..669726446 --- /dev/null +++ b/src/nvidia/src/kernel/core/locks.c @@ -0,0 +1,1813 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This module contains the multiprocessor synchronization +* framework provided to the RM kernel. +* +******************************************************************************/ + +#include "core/core.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "diagnostics/tracer.h" +#include "objtmr.h" +#include +#include +#include +#include "kernel/gpu/intr/intr.h" +#include + +// +// GPU lock +// +// The GPU lock is used to synchronize access across all IRQ levels. +// +// Synchronization is done using a binary semaphore. See bug 1716608 for details +// +typedef struct +{ + PORT_SEMAPHORE * pWaitSema; // Binary semaphore. See bug 1716608 + volatile NvS32 count; + volatile NvBool bRunning; + volatile NvBool bSignaled; + OS_THREAD_HANDLE threadId; + NvU16 priority; + NvU16 priorityPrev; + NvU64 timestamp; +} GPULOCK; + +// +// GPU lock info +// +// This structure contains all the state needed +// to manage the GPU locks. +// +// Access to this structure's fields is regulated by both pLock spinlock and +// the global RM API lock. +// The lock order is logically: API -> Spinlock -> Individual GPU locks, +// however, it is possible to release and reacquire the spinlock while holding +// a GPU lock. This is acceptable because no waits will be performed with +// the spinlock held. +// +typedef struct +{ + // + // Spinlock that protects access to everything in this structure. + // + PORT_SPINLOCK * pLock; + + // + // Mask of GPUs that can be locked. Starts out as zero and + // then modified as GPUs are attached to the system. + // Requires holding API, pLock or any GPU to read, API+pLock+GPUs to write. + // + NvU32 gpusLockableMask; + + // + // Mask of GPUs that have been "hidden" by the rmGpuLockHide routine. + // Atomically read/written + // + NvU32 volatile gpusHiddenMask; + + // + // Mask of GPUs currently locked. + // Requires holding pLock to read or write + // + NvU32 gpusLockedMask; + + // + // Mask of GPU locks currently frozen. + // + NvU32 gpusFreezeMask; + + // + // Tracks largest valid gpuInstance for which a lock has been allocated. + // This is useful to pare down the # of loop iterations we need to make + // when searching for lockable GPUs. + // Requires holding API, pLock or any GPU to read, API+pLock+GPUs to write. + // + NvU32 maxLockableGpuInst; + + // + // Array of per-GPU locks indexed by gpuInstance. + // + GPULOCK gpuLocks[NV_MAX_DEVICES]; + + // + // Lock call trace info. + // + LOCK_TRACE_INFO traceInfo; +} GPULOCKINFO; + +static GPULOCKINFO rmGpuLockInfo; + +static NV_STATUS _rmGpuLocksAcquire(NvU32, NvU32, NvU32, void *, NvU32 *); +static NvU32 _rmGpuLocksRelease(NvU32, NvU32, OBJGPU *, void *); +static NvBool _rmGpuLockIsOwner(NvU32); + +// +// rmGpuLockInfoInit +// +// Initialize GPU lock info state. +// +NV_STATUS +rmGpuLockInfoInit(void) +{ + portMemSet(&rmGpuLockInfo, 0, sizeof(rmGpuLockInfo)); + + rmGpuLockInfo.pLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (rmGpuLockInfo.pLock == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + rmGpuLockInfo.maxLockableGpuInst = (NvU32)-1; + + return NV_OK; +} + +// +// rmGpuLockInfoDestroy +// +// Initialize GPU lock info state. +// +void +rmGpuLockInfoDestroy(void) +{ + // + // We expect all locks to have been freed by this point. + // + NV_ASSERT_OR_RETURN_VOID(rmGpuLockInfo.gpusLockableMask == 0); + + if (rmGpuLockInfo.pLock != NULL) + portSyncSpinlockDestroy(rmGpuLockInfo.pLock); +} + +// +// rmGpuLockAlloc +// +// Allocate GPU lock. +// +NV_STATUS +rmGpuLockAlloc(NvU32 gpuInst) +{ + GPULOCK *pGpuLock; + NvU32 gpuMask, gpuLockedMask; + NV_STATUS status; + NvU64 threadId = ~0; + NvU64 timestamp; + + // validate gpuInst argument + NV_ASSERT_OR_RETURN((gpuInst < NV_MAX_DEVICES), NV_ERR_INVALID_ARGUMENT); + + pGpuLock = &rmGpuLockInfo.gpuLocks[gpuInst]; + + // test to make sure lock hasn't already been allocated + NV_ASSERT_OR_RETURN(((rmGpuLockInfo.gpusLockableMask & NVBIT(gpuInst)) == 0), + NV_ERR_INVALID_STATE); + + // TODO: RM-1492 MODS does not hold API lock when allocating GPUs. + NV_ASSERT(rmApiLockIsOwner()); + + // allocate intr mask lock + status = rmIntrMaskLockAlloc(gpuInst); + if (status != NV_OK) + return status; + + // clear struct for good measure and then init everything + portMemSet(pGpuLock, 0, sizeof(*pGpuLock)); + + pGpuLock->pWaitSema = portSyncSemaphoreCreate(portMemAllocatorGetGlobalNonPaged(), 0); + if (pGpuLock->pWaitSema == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + pGpuLock->count = 1; + pGpuLock->bRunning = NV_FALSE; + pGpuLock->bSignaled = NV_FALSE; + pGpuLock->threadId = ~(NvU64)0; + + // + // Before updating the gpusLockableMask value we need to grab the + // locks for all *other* GPUs. This ensures that the gpusLockableMask + // value cannot change in between acquire/release calls issued by + // a different thread. Reading this is safe under API lock. + // + gpuMask = rmGpuLockInfo.gpusLockableMask; + + // LOCK: acquire GPU locks + status = _rmGpuLocksAcquire(gpuMask, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT, + NV_RETURN_ADDRESS(), &gpuLockedMask); + if (status == NV_WARN_NOTHING_TO_DO) + { + // Verify that this is a valid case - i.e. we're attaching first GPU. + NV_ASSERT(gpuMask == 0); + status = NV_OK; + } + if (status != NV_OK) + goto done; + + portSyncSpinlockAcquire(rmGpuLockInfo.pLock); + // add the GPU to the lockable mask + rmGpuLockInfo.gpusLockableMask |= NVBIT(gpuInst); + + // save this gpuInst if it's the largest we've seen so far + if (rmGpuLockInfo.maxLockableGpuInst == (NvU32)-1) + { + rmGpuLockInfo.maxLockableGpuInst = gpuInst; + } + else + { + if (gpuInst > rmGpuLockInfo.maxLockableGpuInst) + rmGpuLockInfo.maxLockableGpuInst = gpuInst; + } + + + threadId = portThreadGetCurrentThreadId(); + osGetCurrentTick(×tamp); + INSERT_LOCK_TRACE(&rmGpuLockInfo.traceInfo, + NV_RETURN_ADDRESS(), + lockTraceAlloc, + gpuInst, 0, + threadId, + 0, 0, + timestamp); + + portSyncSpinlockRelease(rmGpuLockInfo.pLock); + + // UNLOCK: release GPU locks + _rmGpuLocksRelease(gpuLockedMask, GPUS_LOCK_FLAGS_NONE, NULL, NV_RETURN_ADDRESS()); + +done: + if (status != NV_OK) + { + if (pGpuLock->pWaitSema) + portSyncSemaphoreDestroy(pGpuLock->pWaitSema); + + // free intr mask lock + rmIntrMaskLockFree(gpuInst); + } + + return status; +} + +// +// rmGpuLockFree +// +// We call this routine with the API lock held, but not the GPUs +// +// This routine must always free the lock. +// +void +rmGpuLockFree(NvU32 gpuInst) +{ + NvU32 i; + GPULOCK *pGpuLock; + NV_STATUS status; + NvU32 gpuMask, gpuAttachMask, gpuLockedMask; + NvU64 threadId = ~0; + NvU64 timestamp; + + // validate gpuInst argument + NV_ASSERT_OR_RETURN_VOID((gpuInst < NV_MAX_DEVICES)); + // TODO: RM-1492 MODS does not hold API lock when allocating GPUs. + NV_ASSERT(rmApiLockIsOwner()); + + pGpuLock = &rmGpuLockInfo.gpuLocks[gpuInst]; + + // + // Don't acquire/release gpu locks for the gpus which had been detached and + // destroyed. + // NOTE: The gpuInst GPU has already been detached at this point, so gpuMask + // will not include it. + // + gpumgrGetGpuAttachInfo(NULL, &gpuAttachMask); + // Reading rmGpuLockInfo.gpusLockableMask is safe under API lock + gpuMask = (rmGpuLockInfo.gpusLockableMask & gpuAttachMask); + + // LOCK: acquire GPU locks + status = _rmGpuLocksAcquire(gpuMask, GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_DESTROY, + NV_RETURN_ADDRESS(), &gpuLockedMask); + if (status != NV_OK && status != NV_WARN_NOTHING_TO_DO) + return; + + portSyncSpinlockAcquire(rmGpuLockInfo.pLock); + // remove the GPU from the lockable mask + rmGpuLockInfo.gpusLockableMask &= ~NVBIT(gpuInst); + + threadId = portThreadGetCurrentThreadId(); + osGetCurrentTick(×tamp); + INSERT_LOCK_TRACE(&rmGpuLockInfo.traceInfo, + NV_RETURN_ADDRESS(), + lockTraceFree, + gpuInst, 0, + threadId, + 0, 0, + timestamp); + + // + // Reset max lockable gpuInstance if necessary. + // + if (gpuInst == rmGpuLockInfo.maxLockableGpuInst) + { + if (rmGpuLockInfo.gpusLockableMask != 0) + { + for (i = rmGpuLockInfo.maxLockableGpuInst; i != (NvU32)-1; i--) + { + if (rmGpuLockInfo.gpusLockableMask & NVBIT(i)) + break; + } + + rmGpuLockInfo.maxLockableGpuInst = i; + } + else + { + // no locks left so start over + rmGpuLockInfo.maxLockableGpuInst = (NvU32)-1; + } + } + portSyncSpinlockRelease(rmGpuLockInfo.pLock); + + // UNLOCK: release GPU locks + _rmGpuLocksRelease(gpuLockedMask, GPUS_LOCK_FLAGS_NONE, NULL, NV_RETURN_ADDRESS()); + + if (pGpuLock->pWaitSema) + { + // + // At this point, we may still have threads waiting on the semaphore, + // and possibly one thread holding the lock. + // Wake up all threads that are waiting, and wait until the holding one + // is done. + // + while (pGpuLock->count <= 0) // volatile read + { + portSyncSemaphoreRelease(pGpuLock->pWaitSema); + osSchedule(); // Yield execution + portSyncSemaphoreAcquire(pGpuLock->pWaitSema); + } + portSyncSemaphoreDestroy(pGpuLock->pWaitSema); + } + + portMemSet(pGpuLock, 0, sizeof(*pGpuLock)); + + // free intr mask lock + rmIntrMaskLockFree(gpuInst); +} + +// +// _gpuLocksAcquireDisableInterrupts +// +// Disable GPUs Interrupts thus blocking the ISR from +// entering. +// +static void _gpuLocksAcquireDisableInterrupts(NvU32 gpuInst, NvU32 flags) +{ + OBJGPU *pGpu = gpumgrGetGpu(gpuInst); + + // + // Handle case where we're asked to acquire a lock for a GPU that + // has been removed from the ODB (e.g. from RmCleanupNvAdapter). + // + if (pGpu == NULL) + return; + + // if hidden GPU then we skip out... + if (rmGpuLockIsHidden(pGpu)) + return; + + if (osIsSwPreInitOnly(pGpu->pOsGpuInfo)) + return; + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + return; + + + if (osLockShouldToggleInterrupts(pGpu)) + { + Intr *pIntr = GPU_GET_INTR(pGpu); + NvBool isIsr = !!(flags & GPUS_LOCK_FLAGS_COND_ACQUIRE); + NvBool bBcEnabled = gpumgrGetBcEnabledStatus(pGpu); + + // Always disable intrs for cond code + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + osDisableInterrupts(pGpu, isIsr); + + if ((pIntr != NULL) && pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING) && + (isIsr == NV_FALSE) ) + { + NvU64 oldIrql; + NvU32 intrMaskFlags; + + oldIrql = rmIntrMaskLockAcquire(pGpu); + + intrMaskFlags = intrGetIntrMaskFlags(pIntr); + intrMaskFlags &= ~INTR_MASK_FLAGS_ISR_SKIP_MASK_UPDATE; + intrSetIntrMaskFlags(pIntr, intrMaskFlags); + + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING)) + { + // During non-cond RM code, allow some intrs to come in. + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_SIMPLIFIED_VBLANK_HANDLING_FOR_CTRL_TREE)) + { + intrSetDisplayInterruptEnable_HAL(pGpu, pIntr, NV_TRUE, NULL /* threadstate */); + } + else + { + intrSetIntrMask_HAL(pGpu, pIntr, &pIntr->intrMask.engMaskUnblocked, NULL /* threadstate */); + } + } + else + { + // Lazy case - we will lazily disable Intrs via the ISR as seen + } + + intrSetIntrEnInHw_HAL(pGpu, pIntr, intrGetIntrEn(pIntr), NULL /* threadstate */); + + rmIntrMaskLockRelease(pGpu, oldIrql); + } + + gpumgrSetBcEnabledStatus(pGpu, bBcEnabled); + } +} + +// +// _rmGpuLocksAcquire +// +// Acquire the set of locks specified by gpuMask in ascending gpuInstance +// order. +// +static NV_STATUS +_rmGpuLocksAcquire(NvU32 gpuMask, NvU32 flags, NvU32 module, void *ra, NvU32 *pGpuLockedMask) +{ + NV_STATUS status = NV_OK; + NvU32 gpuInst; + NvU32 gpuMaskLocked = 0; + GPULOCK *pGpuLock; + NvBool bHighIrql, bCondAcquireCheck; + NvU32 maxLockableGpuInst; + NvU64 threadId = portThreadGetCurrentThreadId(); + NvU64 priority = 0; + NvU64 priorityPrev = 0; + NvU64 timestamp; + NvBool bLockAll = NV_FALSE; + + bHighIrql = (portSyncExSafeToSleep() == NV_FALSE); + bCondAcquireCheck = ((flags & GPUS_LOCK_FLAGS_COND_ACQUIRE) != 0); + + if (pGpuLockedMask) + *pGpuLockedMask = 0; + + threadPriorityBoost(&priority, &priorityPrev); + portSyncSpinlockAcquire(rmGpuLockInfo.pLock); + + // + // If caller wishes to lock all GPUs then convert incoming mask + // to set that are actually lockable. + // + if (gpuMask == GPUS_LOCK_ALL) + { + gpuMask = rmGpuLockInfo.gpusLockableMask; + bLockAll = NV_TRUE; + } + + // + // We may get a gpuMask of zero during setup of the first GPU attached. + // + if (gpuMask == 0) + { + status = NV_WARN_NOTHING_TO_DO; + goto done; + } + + // + // If a read-only lock was requested, check to see if the module is allowed + // to take read-only locks + // + if ((flags & GPU_LOCK_FLAGS_READ) && (module != RM_LOCK_MODULES_NONE)) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + if ((pSys->gpuLockModuleMask & NVBIT(module)) == 0) + { + flags &= ~RMAPI_LOCK_FLAGS_READ; + } + } + + if ((gpuMask & rmGpuLockInfo.gpusLockableMask) != gpuMask) + { + NV_PRINTF(LEVEL_WARNING, + "Attempting to lock GPUs (mask=%08x) that are not lockable (mask=%08x). Will skip non-lockables.\n", + gpuMask, rmGpuLockInfo.gpusLockableMask); + gpuMask &= rmGpuLockInfo.gpusLockableMask; + // Nothing to do if no requested GPUs are lockable + if (gpuMask == 0) + { + status = NV_ERR_INVALID_REQUEST; + goto done; + } + } + // Cache global variable so it doesn't change in the middle of the loop. + maxLockableGpuInst = rmGpuLockInfo.maxLockableGpuInst; + if (maxLockableGpuInst >= NV_MAX_DEVICES) + { + DBG_BREAKPOINT(); + status = NV_ERR_INVALID_STATE; + goto done; + } + + if (flags & GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE) + { + NvU32 ownedMask = rmGpuLocksGetOwnedMask(); + + // In safe mode, we never attempt to acquire locks we already own.. + gpuMask &= ~ownedMask; + // If we already own everything we need, just bail early. + if (gpuMask == 0) + { + status = NV_WARN_NOTHING_TO_DO; + goto done; + } + + // If we own a higher order lock than one of the needed ones, we are + // violating the locking order and need to do a conditional acquire + // clz32(0) == ctz(0) == 32: + // owned=0b00110000, needed=0b00001100: (4 < (32-28)), bCond=FALSE + // owned=0b00110010, needed=0b00001100: (1 < (32-28)), bCond=TRUE + // owned=0b00010000, needed=0b11000011: (4 < (32-24)), bCond=TRUE + // owned=0b00000000, needed=0b00001100: (32 < (32-28)), bCond=FALSE + // owned=0b00000001, needed=0b00000000: (0 < (32-32)), bCond=FALSE + if (portUtilCountTrailingZeros32(ownedMask) < (32-portUtilCountLeadingZeros32(gpuMask))) + { + bCondAcquireCheck = NV_TRUE; + } + } + + // + // There are two scenarios where we want to check to see if all of + // the target locks are available before proceeding: + // + // 1- this is a conditional acquire call that must fail if any + // of the locks aren't currently available + // 2- we are called at an elevated IRQL and cannot sleep waiting + // for a lock + // + if (bCondAcquireCheck || bHighIrql) + { + for (gpuInst = 0; + gpuInst <= maxLockableGpuInst; + gpuInst++) + { + if ((gpuMask & NVBIT(gpuInst)) == 0) + continue; + + pGpuLock = &rmGpuLockInfo.gpuLocks[gpuInst]; + + // + // The conditional check takes precedence here. + // + if (bCondAcquireCheck) + { + if (pGpuLock->bRunning == NV_TRUE) + { + status = NV_ERR_STATE_IN_USE; + goto done; + } + } + else if (bHighIrql) + { + if (pGpuLock->count <= 0) + { + status = NV_ERR_STATE_IN_USE; + goto done; + } + } + } + } + + // + // Now (attempt) to acquire the locks... + // + for (gpuInst = 0; + gpuInst <= maxLockableGpuInst; + gpuInst++) + { + // skip any not in the mask + if ((gpuMask & NVBIT(gpuInst)) == 0) + continue; + + // + // We might have released the spinlock while sleeping on a previous + // semaphore, so check if current GPU wasn't deleted during that time + // + if ((NVBIT(gpuInst) & rmGpuLockInfo.gpusLockableMask) == 0) + { + NV_PRINTF(LEVEL_NOTICE, + "GPU lock %d freed while we were waiting on a previous lock\n", + gpuInst); + continue; + } + + pGpuLock = &rmGpuLockInfo.gpuLocks[gpuInst]; + + // + // Check to see if the lock is not free...we should only fall into this + // case if we can actually tolerate waiting for it. + // + if (!bCondAcquireCheck && (pGpuLock->count <= 0)) + { + // + // Assert that this is not already the owner of the GpusLock + // (the lock will cause a hang if acquired recursively) + // + // Note that conditional acquires of the semaphore return + // IN_USE if the lock is already taken by this or another + // thread (ie they don't hang). + // + // We have to place the assert after the conditional acquire + // check, otherwise it could happen that: + // + // 1. We acquire the semaphore in one DPC function, but don't + // release it before finishing (a later DPC will + // rmGpuLockSetOwner and then release it). + // 2. A DPC timer function sneaks in, tries to grab the lock + // conditionally. + // 3. On Vista, the timer DPC could be running under the same + // threadid as the first DPC, so the code will believe that + // the timer DPC is the owner, triggering the assert with + // a false positive. + // + // (the scenario described above causing the false positive + // assert, happens with the stack trace: + // osTimerCallback->osRun1HzCallbacksNow->>rmGpuLocksAcquire) + // + if (_rmGpuLockIsOwner(NVBIT(gpuInst))) + { + NV_PRINTF(LEVEL_INFO, + "GPU lock is already acquired by this thread\n"); + NV_ASSERT(0); + // + // TODO: RM-1493 undo previous acquires + // + status = NV_ERR_STATE_IN_USE; + goto done; + } + + portAtomicDecrementS32(&pGpuLock->count); + do + { + portSyncSpinlockRelease(rmGpuLockInfo.pLock); + portSyncSemaphoreAcquire(pGpuLock->pWaitSema); + portSyncSpinlockAcquire(rmGpuLockInfo.pLock); + + if ((rmGpuLockInfo.gpusLockableMask & NVBIT(gpuInst)) == 0) + { + NV_PRINTF(LEVEL_WARNING, + "GPU lock %d freed while threads were still waiting.\n", + gpuInst); + // Can't release the semaphore while a spinlock is held + portSyncSpinlockRelease(rmGpuLockInfo.pLock); + portSyncSemaphoreRelease(pGpuLock->pWaitSema); + // Loop assumes spinlock is held, so reacquire + portSyncSpinlockAcquire(rmGpuLockInfo.pLock); + portAtomicIncrementS32(&pGpuLock->count); + // Skip this GPU, keep trying any others. + goto next_gpu_instance; + } + pGpuLock->bSignaled = NV_FALSE; + } + while (pGpuLock->bRunning); + } + else + { + portAtomicDecrementS32(&pGpuLock->count); + } + + // indicate that we are running + pGpuLock->bRunning = NV_TRUE; + + // save off thread that owns this GPUs lock + osGetCurrentThread(&pGpuLock->threadId); + + // now disable interrupts + _gpuLocksAcquireDisableInterrupts(gpuInst, flags); + + // mark this one as locked + gpuMaskLocked |= NVBIT(gpuInst); + + // add acquire record to GPUs lock trace + osGetCurrentTick(×tamp); + INSERT_LOCK_TRACE(&rmGpuLockInfo.traceInfo, + ra, + lockTraceAcquire, + gpuInst, gpuMask, + threadId, + bHighIrql, + (NvU16)priority, + timestamp); + pGpuLock->priority = priority; + pGpuLock->priorityPrev = priorityPrev; + pGpuLock->timestamp = timestamp; + +next_gpu_instance: + ; + } + + // update gpusLockedMask + rmGpuLockInfo.gpusLockedMask |= gpuMaskLocked; + + // Log any changes to the GPU configuration due to race conditions + if (maxLockableGpuInst != rmGpuLockInfo.maxLockableGpuInst) + { + NV_PRINTF(LEVEL_NOTICE, + "Max lockable instance changed from %d to %d\n", + maxLockableGpuInst, rmGpuLockInfo.maxLockableGpuInst); + } + if (gpuMaskLocked != gpuMask) + { + NV_PRINTF(LEVEL_WARNING, + "Locked a different GPU mask (0x%08x) than requested (0x%08x) @ %p.\n", + gpuMaskLocked, gpuMask, ra); + } + + // Log any case where we wanted to but failed to lock ALL GPUs. + if (bLockAll) + { + NV_ASSERT(gpuMaskLocked == rmGpuLockInfo.gpusLockedMask); + NV_ASSERT(gpuMaskLocked == rmGpuLockInfo.gpusLockableMask); + } + + if (pGpuLockedMask) + *pGpuLockedMask = gpuMaskLocked; + + RMTRACE_RMLOCK(_GPUS_LOCK_ACQUIRE); + +done: + portSyncSpinlockRelease(rmGpuLockInfo.pLock); + + if (status != NV_OK) + { + threadPriorityRestore(); + return status; + } + + return (gpuMaskLocked == 0) ? NV_WARN_NOTHING_TO_DO : NV_OK; +} + +// +// rmGpuLocksAcquire +// +NV_STATUS +rmGpuLocksAcquire(NvU32 flags, NvU32 module) +{ + NV_STATUS status; + NvU32 gpusLockedMask = 0; + + status = _rmGpuLocksAcquire(GPUS_LOCK_ALL, flags, module, + NV_RETURN_ADDRESS(), &gpusLockedMask); + // + // Since the request was to acquire locks for all GPUs, if there are none + // we consider the request fulfilled. + // Set to NV_OK since most callers check for NV_OK explicitly. + // + if (status == NV_WARN_NOTHING_TO_DO) + status = NV_OK; + // + // If we successfully locked all GPUs but there's still more not locked + // it means that an additional GPU was added in the meantime somehow. + // Release everything and try again (once only to prevent infinite loops) + // + if (status == NV_OK && gpusLockedMask != rmGpuLockInfo.gpusLockableMask) + { + // + // On Windows, at high IRQL we can't signal the semaphore. So we + // use a second pGpu to schedule a DPC to do that. Pick one that + // we've already locked for that purpose. + // + OBJGPU *pDpcGpu = gpumgrGetGpu(portUtilCountTrailingZeros32(gpusLockedMask)); + + if (_rmGpuLocksRelease(gpusLockedMask, flags, pDpcGpu, NV_RETURN_ADDRESS()) == NV_SEMA_RELEASE_SUCCEED) + { + // All locks successfully released without a DPC scheduled, can re-attempt. + status = _rmGpuLocksAcquire(GPUS_LOCK_ALL, flags, module, NV_RETURN_ADDRESS(), &gpusLockedMask); + // If it happened again, just release and return + if (status == NV_OK && gpusLockedMask != rmGpuLockInfo.gpusLockableMask) + { + _rmGpuLocksRelease(gpusLockedMask, flags, pDpcGpu, NV_RETURN_ADDRESS()); + status = NV_ERR_INVALID_LOCK_STATE; + } + } + else + { + status = NV_ERR_INVALID_LOCK_STATE; + } + } + + if (status == NV_OK) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + if (pCallContext != NULL) + { + NV_ASSERT(pCallContext->pLockInfo != NULL); + if (pCallContext->pLockInfo != NULL) + pCallContext->pLockInfo->state |= RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + } + } + + return status; +} + +// +// rmGpuGroupLockAcquire: Takes lock for only those gpus specified by the gpuGrpId +// +NV_STATUS +rmGpuGroupLockAcquire +( + NvU32 gpuInst, + GPU_LOCK_GRP_ID gpuGrpId, + NvU32 flags, + NvU32 module, + GPU_MASK* pGpuMask +) +{ + NvU32 status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // + // QuadroSync (previously known as GSync) is a cross GPU feature that + // synchronizes display across multiple GPUs. See changelist 16809243. If + // GSync is enabled, acquire locks for all GPUs. + // + const NvBool bGSync = pSys->getProperty(pSys, PDB_PROP_SYS_IS_GSYNC_ENABLED); + + if (pGpuMask == NULL) + return NV_ERR_INVALID_ARGUMENT; + + if (bGSync) + { + *pGpuMask = GPUS_LOCK_ALL; + } + else + { + status = rmGpuGroupLockGetMask(gpuInst, gpuGrpId, pGpuMask); + } + if (status != NV_OK) + return status; + + status = _rmGpuLocksAcquire(*pGpuMask, flags, module, NV_RETURN_ADDRESS(), pGpuMask); + if (status == NV_WARN_NOTHING_TO_DO) + { + // + // Callers using SAFE_LOCK_UPGRADE will often consider this normal, + // so silence the print if the flag is passed. + // + NV_PRINTF_COND((flags & GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE), LEVEL_INFO, LEVEL_NOTICE, + "Nothing to lock for gpuInst=%d, gpuGrpId=%d, gpuMask=0x%08x\n", + gpuInst, gpuGrpId, *pGpuMask); + status = NV_OK; + } + + if ((status == NV_OK) && bGSync) + { + GPU_MASK deviceGpuMask = 0; + rmGpuGroupLockGetMask(gpuInst, GPU_LOCK_GRP_DEVICE, &deviceGpuMask); + // Verify that we actually locked *this* device, not all others. + if ((*pGpuMask & deviceGpuMask) != deviceGpuMask) + { + // + // On Windows, at high IRQL we can't signal the semaphore. So we + // use a second pGpu to schedule a DPC to do that. Pick one that + // we've already locked for that purpose. + // + OBJGPU *pDpcGpu = gpumgrGetGpu(portUtilCountTrailingZeros32(*pGpuMask)); + + _rmGpuLocksRelease(*pGpuMask, flags, pDpcGpu, NV_RETURN_ADDRESS()); + // Notify that pGpu is gone and finish + status = NV_ERR_INVALID_DEVICE; + } + } + + return status; +} + +// +// rmGpuGroupLockIsOwner +// +// Checks if current thread already owns locks for a given gpu group. +// If NV_TRUE, it returns the current group mask +// +NvBool +rmGpuGroupLockIsOwner(NvU32 gpuInst, GPU_LOCK_GRP_ID gpuGrpId, GPU_MASK* pGpuMask) +{ + NvBool bIsOwner = NV_FALSE; + NvBool bReleaseSpinlock = NV_FALSE; + + if (rmGpuGroupLockGetMask(gpuInst, gpuGrpId, pGpuMask) != NV_OK) + return NV_FALSE; + + if ((gpuGrpId == GPU_LOCK_GRP_ALL) && (*pGpuMask == GPUS_LOCK_ALL)) + { + bReleaseSpinlock = NV_TRUE; + portSyncSpinlockAcquire(rmGpuLockInfo.pLock); + *pGpuMask = rmGpuLockInfo.gpusLockableMask; + } + + bIsOwner = _rmGpuLockIsOwner(*pGpuMask); + + if (bReleaseSpinlock) + portSyncSpinlockRelease(rmGpuLockInfo.pLock); + + return bIsOwner; +} + +// +// rmDeviceGpuLocksAcquire +// +NV_STATUS +rmDeviceGpuLocksAcquire(OBJGPU *pGpu, NvU32 flags, NvU32 module) +{ + NvU32 gpuMask; + NvU32 gpuLockedMask = 0; + NV_STATUS status; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // + // Make sure that the GPU is in usable condition before continuing further. + // If pGpu is invalid or NULL, using pGpu structure members can cause a crash. + // It is possible that another thread has teared-down the GPU as part of TDR recovery procedure. + // gpumgrGetGpuMask function called immediately after the check accesses pGpu->deviceInstance. + // If pGpu is invalid, this will cause a crash. + // See bugs 200183282, 200118671 + // + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + return NV_ERR_INVALID_DEVICE; + + // XXX: TOCTTOU issue here, but nothing to be done. Use rmGpuGroupLockXxx API instead. + // TODO: RM-1140 - Migrate to the other API and delete this one. + gpuMask = gpumgrGetGpuMask(pGpu); + + // + // QuadroSync (previously known as GSync) is a cross GPU feature that + // synchronizes display across multiple GPUs. See changelist 16809243. If + // GSync is enabled, acquire locks for all GPUs. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_IS_GSYNC_ENABLED)) + { + status = _rmGpuLocksAcquire(GPUS_LOCK_ALL, flags, module, + NV_RETURN_ADDRESS(), &gpuLockedMask); + + // Verify that we actually locked *this* pGpu, not all others. + if (status == NV_OK && ((gpuLockedMask & gpuMask) != gpuMask)) + { + // + // On Windows, at high IRQL we can't signal the semaphore. So we + // use a second pGpu to schedule a DPC to do that. Pick one that + // we've already locked for that purpose. + // + OBJGPU *pDpcGpu = gpumgrGetGpu(portUtilCountTrailingZeros32(gpuLockedMask)); + + _rmGpuLocksRelease(gpuLockedMask, flags, pDpcGpu, NV_RETURN_ADDRESS()); + // Notify that pGpu is gone and finish + status = NV_ERR_INVALID_DEVICE; + } + } + else + { + status = _rmGpuLocksAcquire(gpuMask, flags, module, + NV_RETURN_ADDRESS(), &gpuLockedMask); + // + // Verify that the SLI configuration did not change for this pGpu + // + if (status == NV_OK) + { + if (gpuMask != gpumgrGetGpuMask(pGpu)) + { + _rmGpuLocksRelease(gpuLockedMask, flags, pGpu, NV_RETURN_ADDRESS()); + status = NV_ERR_INVALID_DEVICE; + } + } + } + + // Even if we get NV_OK, there are a couple of edge cases to handle + if (status == NV_OK) + { + // + // Currently, release-and-free sequence for GPU locks is not atomic, so + // we could theoretically acquire a perfectly valid lock for a non-existent + // device. In this case, just release and return error. + // + if (!gpumgrIsGpuPointerValid(pGpu)) + { + // We don't need a pDpcGpu here as this can't happen at DIRQL. + _rmGpuLocksRelease(gpuLockedMask, flags, NULL, NV_RETURN_ADDRESS()); + status = NV_ERR_INVALID_DEVICE; + } + else + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + if (pCallContext != NULL) + { + NV_ASSERT(pCallContext->pLockInfo != NULL); + if (pCallContext->pLockInfo != NULL) + pCallContext->pLockInfo->state |= RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED; + } + } + } + // + // We're trying to acquire the lock for a specific device. If we get + // NOTHING_TO_DO, that means the device has been detached and pGpu is no + // longer valid. + // + if (status == NV_WARN_NOTHING_TO_DO) + status = NV_ERR_INVALID_DEVICE; + + return status; +} + +// +// _gpuLocksReleaseEnableInterrupts +// +// Enable GPUs Interrupts thus allowing the ISR to +// enter. +// +static void _gpuLocksReleaseEnableInterrupts(NvU32 gpuInst, NvU32 flags) +{ + OBJGPU *pGpu; + + pGpu = gpumgrGetGpu(gpuInst); + + // + // Handle case where we're asked to release a lock for a GPU + // after it has been removed from the GpuMgr (e.g. from RmCleanUpNvAdapter). + // + if (pGpu == NULL) + return; + + // if hidden GPU then we skip out... + if (rmGpuLockIsHidden(pGpu)) + return; + + if (osIsSwPreInitOnly(pGpu->pOsGpuInfo)) + return; + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + return; + + if (osLockShouldToggleInterrupts(pGpu)) + { + Intr *pIntr = GPU_GET_INTR(pGpu); + + // Make sure we only enable interrupts unicast + NvBool bBcEnabled = gpumgrGetBcEnabledStatus(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + if ((pIntr != NULL) && + pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING) ) + { + NvU64 oldIrql; + NvU32 intrMaskFlags; + MC_ENGINE_BITVECTOR engines; + + oldIrql = rmIntrMaskLockAcquire(pGpu); + + intrMaskFlags = intrGetIntrMaskFlags(pIntr); + intrMaskFlags |= INTR_MASK_FLAGS_ISR_SKIP_MASK_UPDATE; + intrSetIntrMaskFlags(pIntr, intrMaskFlags); + + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING)) + { + // Allow all intrs to be reflected and come in. + bitVectorSetAll(&engines); + intrSetIntrMask_HAL(pGpu, pIntr, &engines, NULL /* threadstate */); + } + else + { + // Lazy case - Enable all engine intrs that may have been disabled via the ISR + bitVectorClrAll(&pIntr->intrMask.engMaskIntrsDisabled); + } + + rmIntrMaskLockRelease(pGpu, oldIrql); + } + else + { + if ((pGpu->bIsSOC == NV_FALSE) && !IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) + { + NV_ASSERT(intrGetIntrEnFromHw_HAL(pGpu, pIntr, NULL) != INTERRUPT_TYPE_HARDWARE); + } + } + osEnableInterrupts(pGpu); + + gpumgrSetBcEnabledStatus(pGpu, bBcEnabled); + } +} + +static void _gpuLocksReleaseHandleDeferredWork(NvU32 gpuMask) +{ + OBJGPU *pGpu; + NvU32 i = 0; + NvU32 gpuInstance = 0; + + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (GPU_GET_INTR(pGpu) == NULL) + continue; + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + continue; + + for (i = 0; i < MAX_DEFERRED_CMDS; i++) + { + if (pGpu->pRmCtrlDeferredCmd[i].pending == RMCTRL_DEFERRED_READY) + { + // ignore failure here since caller won't be able to receive it + if (rmControl_Deferred(&pGpu->pRmCtrlDeferredCmd[i]) != NV_OK) + { + NV_ASSERT(0); + } + } + } + + // Delay reset until we have the lock + osHandleDeferredRecovery(pGpu); + + // + // Bug 1798647: Defer osIsr until semaphore release when contention is high + // osIsr does not perform its task if it is unable to acquire its locks + // immediately. In Multi-GPU environment where contention on locks is high, + // interrupts are getting starved. Ensure that osIsr is not starved by + // deferring it to lock release time if lock is not available. + // This WAR should be removed once per-GPU locks are implemented. + // + osDeferredIsr(pGpu); + } +} + +// +// _rmGpuLocksRelease +// +// Release the locks specified by gpuMask in descending gpuInstance order. +// +static NvU32 +_rmGpuLocksRelease(NvU32 gpuMask, NvU32 flags, OBJGPU *pDpcGpu, void *ra) +{ + static volatile NvU32 bug200413011_WAR_WakeUpMask; + GPULOCK *pGpuLock; + NvU32 gpuMaskWakeup = 0; + NvU32 gpuInst; + NvU32 highestInstanceInGpuMask; + NvBool bSemaCanWake = portSyncExSafeToWake(); + NvBool bHighIrql = (portSyncExSafeToSleep() == NV_FALSE); + NvU64 threadId = portThreadGetCurrentThreadId(); + NvU64 priority = 0; + NvU64 priorityPrev = 0; + NvU64 timestamp; + NV_STATUS status; + + // + // We may get a gpuMask of zero during setup of the first GPU attached. + // + if (gpuMask == 0) + return NV_OK; + + // + // The lock(s) being released must not be frozen. + // Log all attempts to do so, but don't bail early to enable recovery paths. + // + NV_ASSERT((rmGpuLockInfo.gpusFreezeMask & gpuMask) == 0); + + // + // Important test. Caller must own the lock(s) it is attempting to + // release. + // In some error recovery cases we want to be able to force release a lock. + // Log all such issues, but don't bail early to enable recovery paths. + // + NV_ASSERT(_rmGpuLockIsOwner(gpuMask)); + + _gpuLocksReleaseHandleDeferredWork(gpuMask); + + threadPriorityBoost(&priority, &priorityPrev); + portSyncSpinlockAcquire(rmGpuLockInfo.pLock); + + if ((gpuMask & rmGpuLockInfo.gpusLockableMask) != gpuMask) + { + NV_PRINTF(LEVEL_WARNING, + "Releasing nonlockable GPU (already went through teardown). gpuMask = 0x%08x, gpusLockableMask = 0x%08x.\n", + gpuMask, rmGpuLockInfo.gpusLockableMask); + } + if ((gpuMask & rmGpuLockInfo.gpusLockedMask) != gpuMask) + { + NV_PRINTF(LEVEL_WARNING, + "Attempting to release unlocked GPUs. gpuMask = 0x%08x, gpusLockedMask = 0x%08x. Will skip them.\n", + gpuMask, rmGpuLockInfo.gpusLockedMask); + gpuMask &= rmGpuLockInfo.gpusLockedMask; + + if (gpuMask == 0) + { + NV_PRINTF(LEVEL_WARNING, "No more GPUs to release after skipping"); + portSyncSpinlockRelease(rmGpuLockInfo.pLock); + status = NV_OK; + goto done; + } + } + + // Find the highest GPU instance that's locked, to be used for loop bounds + highestInstanceInGpuMask = 31 - portUtilCountLeadingZeros32(gpuMask); + if (highestInstanceInGpuMask > rmGpuLockInfo.maxLockableGpuInst) + { + NV_PRINTF(LEVEL_WARNING, "GPU mask for release (0x%08x) has higher instance that maxLockableGpuIns (%d)\n", + highestInstanceInGpuMask, rmGpuLockInfo.maxLockableGpuInst); + } + + // + // When we release locks we do it in reverse order. + // + // In this pass we check to see how many of the locks we are to release + // have something waiting. If any of them do, then we queue up a DPC + // to handle the release of all of them. + // + for (gpuInst = highestInstanceInGpuMask; + gpuInst != (NvU32)-1; + gpuInst--) + { + if ((gpuMask & NVBIT(gpuInst)) == 0) + continue; + + pGpuLock = &rmGpuLockInfo.gpuLocks[gpuInst]; + + if (pGpuLock->count < 0) + { + if (!pGpuLock->bSignaled) + { + gpuMaskWakeup |= NVBIT(gpuInst); + if (bSemaCanWake) + pGpuLock->bSignaled = NV_TRUE; + } + } + } + + // + // Check to see if we can safely release the locks. + // + // We can do this if there are no threads waiting on any of them + // or if we're running at a processor level that will allow us to + // wake any such threads. + // + // Put another way, the only time we *cannot* do this is when something + // is waiting and we are running at an elevated processor level (i.e. + // we're here from a call in our ISR). + // + if (gpuMaskWakeup == 0 || bSemaCanWake) + { + for (gpuInst = highestInstanceInGpuMask; + gpuInst != (NvU32)-1; + gpuInst--) + { + if ((gpuMask & NVBIT(gpuInst)) == 0) + continue; + + pGpuLock = &rmGpuLockInfo.gpuLocks[gpuInst]; + + // now enable interrupts + _gpuLocksReleaseEnableInterrupts(gpuInst, flags); + + // indicate that the API is not running + NV_ASSERT(pGpuLock->threadId == threadId); + pGpuLock->bRunning = NV_FALSE; + pGpuLock->threadId = ~(NvU64)0; + + portAtomicIncrementS32(&pGpuLock->count); + NV_ASSERT(pGpuLock->count <= 1); + + // update gpusLockedMask + rmGpuLockInfo.gpusLockedMask &= ~NVBIT(gpuInst); + + // add release record to GPUs lock trace + osGetCurrentTick(×tamp); + INSERT_LOCK_TRACE(&rmGpuLockInfo.traceInfo, + ra, + lockTraceRelease, + gpuInst, gpuMask, + threadId, + bHighIrql, + (NvU8)priority, + timestamp); + } + } + + RMTRACE_RMLOCK(_GPUS_LOCK_RELEASE); + + portSyncSpinlockRelease(rmGpuLockInfo.pLock); + + if (bSemaCanWake) + { + NvU32 extraWakeUp; + do { extraWakeUp = bug200413011_WAR_WakeUpMask; } + while (!portAtomicCompareAndSwapU32(&bug200413011_WAR_WakeUpMask, 0, extraWakeUp)); + gpuMaskWakeup |= extraWakeUp; + } + + // + // Handle wake up(s). + // + if (gpuMaskWakeup != 0) + { + if (bSemaCanWake) + { + for (gpuInst = highestInstanceInGpuMask; + gpuInst != (NvU32)-1; + gpuInst--) + { + if ((gpuMaskWakeup & NVBIT(gpuInst)) == 0) + continue; + + pGpuLock = &rmGpuLockInfo.gpuLocks[gpuInst]; + if (pGpuLock->pWaitSema) + portSyncSemaphoreRelease(pGpuLock->pWaitSema); + else + DBG_BREAKPOINT(); + } + status = NV_SEMA_RELEASE_NOTIFIED; + goto done; + } + else + { + if (pDpcGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Releasing GPU locks (mask:0x%08x) at raised IRQL without a DPC GPU at %p. Attempting to recover..\n", + gpuMask, ra); + portAtomicOrU32(&bug200413011_WAR_WakeUpMask, gpuMaskWakeup); + status = NV_SEMA_RELEASE_FAILED; + goto done; + } + // Use a dpc to release the locks. + NV_ASSERT((gpuMask == gpumgrGetGpuMask(pDpcGpu)) || + (gpuMask == rmGpuLockInfo.gpusLockedMask)); + if (gpuMask == gpumgrGetGpuMask(pDpcGpu)) + { + status = osGpuLocksQueueRelease(pDpcGpu, DPC_RELEASE_SINGLE_GPU_LOCK); + goto done; + } + else + { + status = osGpuLocksQueueRelease(pDpcGpu, DPC_RELEASE_ALL_GPU_LOCKS); + goto done; + } + } + } + + status = NV_SEMA_RELEASE_SUCCEED; + +done: + threadPriorityRestore(); + + return status; +} + +// +// rmGpuLocksRelease +// NOTE: This function assumes ALL GPUs were previously locked by rmGpuLocksAcquire +// Under this assumption, it is accessing gpusLockedMask and gpusLockableMask, as +// these cannot changed if all GPUs are already locked. +// If any GPU is not locked (due to API misuse most likely), this function is +// open to various race conditions. +// +NvU32 +rmGpuLocksRelease(NvU32 flags, OBJGPU *pDpcGpu) +{ + NvU32 gpuMask; + NvU32 rc; + CALL_CONTEXT *pCallContext; + + // + // It's possible that we're asked to release locks when there are none. + // This can happen when a release attempt happens and the GPU(s) in the + // mask have been hidden with rmGpuLockHide(). In such cases we won't + // even bother trying to do the release. + // + if (rmGpuLockInfo.gpusLockedMask == 0) + return NV_SEMA_RELEASE_SUCCEED; + + // Only attempt to release the ones that are both locked and lockable + gpuMask = rmGpuLockInfo.gpusLockedMask & rmGpuLockInfo.gpusLockableMask; + + if (gpuMask == 0) + { + NV_PRINTF(LEVEL_WARNING, + "Attempting to release nonlockable GPUs. gpuMask = 0x%08x, gpusLockableMask = 0x%08x\n", + gpuMask, rmGpuLockInfo.gpusLockableMask); + return NV_SEMA_RELEASE_FAILED; + } + rc = _rmGpuLocksRelease(gpuMask, flags, pDpcGpu, NV_RETURN_ADDRESS()); + + pCallContext = resservGetTlsCallContext(); + if (pCallContext != NULL) + { + NV_ASSERT(pCallContext->pLockInfo != NULL); + if (pCallContext->pLockInfo != NULL) + pCallContext->pLockInfo->state &= ~RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + } + + return rc; +} + +// +// rmGpuLocksFreeze: Freezes locks for those GPUs specified in the mask +// +void +rmGpuLocksFreeze(GPU_MASK gpuMask) +{ + rmGpuLockInfo.gpusFreezeMask |= gpuMask; +} + +// +// rmGpuLocksUnfreeze: Unfreezes locks for those GPUs specified in the mask +// +void +rmGpuLocksUnfreeze(GPU_MASK gpuMask) +{ + rmGpuLockInfo.gpusFreezeMask &= ~gpuMask; +} + +// +// rmGpuGroupLockRelease: Releases lock for only those gpus specified in the mask +// +NV_STATUS +rmGpuGroupLockRelease(GPU_MASK gpuMask, NvU32 flags) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pDpcGpu = NULL; + + // + // QuadroSync (previously known as GSync) is a cross GPU feature that + // synchronizes display across multiple GPUs. See changelist 16809243. If + // GSync is enabled, acquire locks for all GPUs. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_IS_GSYNC_ENABLED)) + { + // See note for rmGpuLocksRelease() - assumes locks are actually held. + gpuMask = rmGpuLockInfo.gpusLockedMask; + + pDpcGpu = gpumgrGetGpu(portUtilCountTrailingZeros32(gpuMask)); + } + + return _rmGpuLocksRelease(gpuMask, flags, pDpcGpu, NV_RETURN_ADDRESS()); +} + +// +// rmDeviceGpuLocksRelease +// +NvU32 +rmDeviceGpuLocksRelease(OBJGPU *pGpu, NvU32 flags, OBJGPU *pDpcGpu) +{ + NvU32 gpuMask; + NvU32 rc; + OBJSYS *pSys = SYS_GET_INSTANCE(); + CALL_CONTEXT *pCallContext; + + // + // QuadroSync (previously known as GSync) is a cross GPU feature that + // synchronizes display across multiple GPUs. See changelist 16809243. If + // GSync is enabled, acquire locks for all GPUs. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_IS_GSYNC_ENABLED)) + { + // See note for rmGpuLocksRelease() - assumes locks are actually held. + gpuMask = rmGpuLockInfo.gpusLockedMask; + } + else + { + gpuMask = gpumgrGetGpuMask(pGpu); + } + + if (gpuMask == 0) + { + return NV_SEMA_RELEASE_SUCCEED; + } + + rc = _rmGpuLocksRelease(gpuMask, flags, pDpcGpu, NV_RETURN_ADDRESS()); + + pCallContext = resservGetTlsCallContext(); + if (pCallContext != NULL) + { + NV_ASSERT(pCallContext->pLockInfo != NULL); + if (pCallContext->pLockInfo != NULL) + pCallContext->pLockInfo->state &= ~RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED; + } + + return rc; +} + +// +// rmGpuLockHide +// +// Hide the given gpuMask from the GPU lock acquire and release +// +NV_STATUS +rmGpuLockHide(NvU32 gpuMask) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuInst = 0; + Intr *pIntr; + + // We should not be getting intrs on a Hidden GPU + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInst)) != NULL) + { + // XXX - return failure for this? + pIntr = GPU_GET_INTR(pGpu); + NV_ASSERT(intrGetIntrEnFromHw_HAL(pGpu, pIntr, NULL) == INTERRUPT_TYPE_DISABLED); + } + + portAtomicOrU32(&rmGpuLockInfo.gpusHiddenMask, gpuMask); + + return NV_OK; +} + +// +// rmGpuLockShow +// +// Allow the given gpuMask to be acted upon by the GPU lock acquire and release +// +void +rmGpuLockShow(NvU32 gpuMask) +{ + portAtomicAndU32(&rmGpuLockInfo.gpusHiddenMask, ~gpuMask); +} + +NvBool rmGpuLockIsHidden(OBJGPU *pGpu) +{ + return ((rmGpuLockInfo.gpusHiddenMask & NVBIT(pGpu->gpuInstance)) != 0); +} + +// +// _rmGpuLockIsOwner +// +// Returns NV_TRUE if calling thread currently owns specified set of locks. +// +static NvBool +_rmGpuLockIsOwner(NvU32 gpuMask) +{ + NvU32 gpuInst; + OS_THREAD_HANDLE threadId; + OS_THREAD_HANDLE lockedThreadId; + NvU32 highestInstanceInGpuMask; + + if (gpuMask == 0) + return NV_FALSE; + + osGetCurrentThread(&threadId); + + // Can't use rmGpuLockInfo.maxLockableGpuInst since we may not hold the lock + highestInstanceInGpuMask = 31 - portUtilCountLeadingZeros32(gpuMask); + for (gpuInst = portUtilCountTrailingZeros32(gpuMask); + gpuInst <= highestInstanceInGpuMask; + gpuInst++) + { + // skip any not lockable + if ((gpuMask & NVBIT(gpuInst)) == 0) + continue; + + lockedThreadId = rmGpuLockInfo.gpuLocks[gpuInst].threadId; + if (lockedThreadId != threadId) + return NV_FALSE; + } + + return NV_TRUE; +} + +// +// rmGpuLockIsOwner +// +// Returns NV_TRUE if calling thread currently owns all lockable GPUs. +// +NvBool +rmGpuLockIsOwner(void) +{ + return (_rmGpuLockIsOwner(rmGpuLockInfo.gpusLockableMask)); +} + +// +// rmGpuLocksGetOwnedMask +// +// Returns mask of locks currently owned by the calling thread. +// +NvU32 +rmGpuLocksGetOwnedMask(void) +{ + NvU32 gpuMask = 0; + NvU32 gpuInst; + OS_THREAD_HANDLE threadId; + OS_THREAD_HANDLE lockedThreadId; + NvU32 lockableMask; + NvU32 highestInstanceInGpuMask; + + lockableMask = rmGpuLockInfo.gpusLockableMask; + if (lockableMask == 0) + return 0; + + osGetCurrentThread(&threadId); + + // Can't use rmGpuLockInfo.maxLockableGpuInst since we may not hold the lock + highestInstanceInGpuMask = 31 - portUtilCountLeadingZeros32(lockableMask); + for (gpuInst = portUtilCountTrailingZeros32(lockableMask); + gpuInst <= highestInstanceInGpuMask; + gpuInst++) + { + // skip any not lockable + if ((lockableMask & NVBIT(gpuInst)) == 0) + continue; + + lockedThreadId = rmGpuLockInfo.gpuLocks[gpuInst].threadId; + if (lockedThreadId == threadId) + gpuMask |= NVBIT(gpuInst); + } + return gpuMask; +} + +// +// rmDeviceGpuLockIsOwner +// + +NvBool +rmDeviceGpuLockIsOwner(NvU32 gpuInst) +{ + return (_rmGpuLockIsOwner(gpumgrGetGrpMaskFromGpuInst(gpuInst))); +} + +// +// rmGpuLockSetOwner +// +// The GPUs lock is acquired in the ISR, then released in the DPC. +// Since the threadId changes between the ISR and DPC, we need to +// refresh the owning thread in the DPC to be accurate. +// +// NOTE: Assumes all GPU locks are owned by a thread that is no longer using them +// (or no longer exists). +// +NV_STATUS +rmGpuLockSetOwner(OS_THREAD_HANDLE threadId) +{ + GPULOCK *pGpuLock; + NvU32 gpuInst; + NvU32 maxLockableGpuInst; + + maxLockableGpuInst = rmGpuLockInfo.maxLockableGpuInst; + if (maxLockableGpuInst >= NV_MAX_DEVICES) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + for (gpuInst = 0; gpuInst <= maxLockableGpuInst; gpuInst++) + { + // skip any not lockable + if ((rmGpuLockInfo.gpusLockableMask & NVBIT(gpuInst)) == 0) + continue; + + pGpuLock = &rmGpuLockInfo.gpuLocks[gpuInst]; + + if (threadId != GPUS_LOCK_OWNER_PENDING_DPC_REFRESH) + { + NV_ASSERT_OR_RETURN(pGpuLock->threadId == GPUS_LOCK_OWNER_PENDING_DPC_REFRESH, NV_ERR_INVALID_STATE); + } + pGpuLock->threadId = threadId; + } + + return NV_OK; +} + +// +// rmDeviceGpuLockSetOwner +// +// NOTE: Assumes the locks in question are owned by a thread that is no longer +// using them (or no longer exists). Also assumes that at least one lock belongs +// to pGpu. + +NV_STATUS +rmDeviceGpuLockSetOwner(OBJGPU *pGpu, OS_THREAD_HANDLE threadId) +{ + GPULOCK *pGpuLock; + NvU32 gpuInst; + NvU32 gpuMask; + NvU32 maxLockableGpuInst; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (pSys->getProperty(pSys, PDB_PROP_SYS_IS_GSYNC_ENABLED)) + { + gpuMask = rmGpuLockInfo.gpusLockedMask; + } + else + { + gpuMask = gpumgrGetGpuMask(pGpu); + } + + maxLockableGpuInst = rmGpuLockInfo.maxLockableGpuInst; + if (maxLockableGpuInst >= NV_MAX_DEVICES) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + for (gpuInst = 0; gpuInst <= maxLockableGpuInst; gpuInst++) + { + // skip any not lockable + if ((gpuMask & NVBIT(gpuInst)) == 0) + continue; + + pGpuLock = &rmGpuLockInfo.gpuLocks[gpuInst]; + + if (threadId != GPUS_LOCK_OWNER_PENDING_DPC_REFRESH) + { + NV_ASSERT_OR_RETURN(pGpuLock->threadId == GPUS_LOCK_OWNER_PENDING_DPC_REFRESH, NV_ERR_INVALID_STATE); + } + pGpuLock->threadId = threadId; + } + + return NV_OK; +} + +// +// WAR for bug 200288016: In some cases due to GSYNC updates a worker thread +// does not release all the locks it acquired. This is an attempt at recovery. +// +void bug200288016_WAR_ReleaseAllOwnedLocked(void) +{ + NvU32 gpuInst; + NvU32 gpuMask = 0; + OS_THREAD_HANDLE threadId; + + osGetCurrentThread(&threadId); + for (gpuInst = 0; gpuInst < NV_MAX_DEVICES; gpuInst++) + { + if (rmGpuLockInfo.gpuLocks[gpuInst].threadId == threadId) + gpuMask |= NVBIT(gpuInst); + } + + if (gpuMask != 0) + { + NV_PRINTF(LEVEL_ERROR, + "Worker thread finished without releasing all locks. gpuMask=%x\n", + gpuMask); + _rmGpuLocksRelease(gpuMask, GPUS_LOCK_FLAGS_NONE, NULL, NV_RETURN_ADDRESS()); + } +} + + + +// +// IntrMask Locking Support +// +typedef struct INTR_MASK_LOCK +{ + PORT_SPINLOCK *pLock; +} INTR_MASK_LOCK; + +static INTR_MASK_LOCK intrMaskLock[NV_MAX_DEVICES]; + +NV_STATUS rmIntrMaskLockAlloc(NvU32 gpuInst) +{ + intrMaskLock[gpuInst].pLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + return (intrMaskLock[gpuInst].pLock == NULL) ? NV_ERR_INSUFFICIENT_RESOURCES : NV_OK; +} + +void rmIntrMaskLockFree(NvU32 gpuInst) +{ + if (intrMaskLock[gpuInst].pLock != NULL) + portSyncSpinlockDestroy(intrMaskLock[gpuInst].pLock); +} + +NvU64 rmIntrMaskLockAcquire(OBJGPU *pGpu) +{ + if ((pGpu != NULL) && (intrMaskLock[pGpu->gpuInstance].pLock != NULL)) + portSyncSpinlockAcquire(intrMaskLock[pGpu->gpuInstance].pLock); + return 0; +} + +void rmIntrMaskLockRelease(OBJGPU *pGpu, NvU64 oldIrql) +{ + if ((pGpu != NULL) && (intrMaskLock[pGpu->gpuInstance].pLock != NULL)) + portSyncSpinlockRelease(intrMaskLock[pGpu->gpuInstance].pLock); +} + diff --git a/src/nvidia/src/kernel/core/locks_common.c b/src/nvidia/src/kernel/core/locks_common.c new file mode 100644 index 000000000..0f84bfe51 --- /dev/null +++ b/src/nvidia/src/kernel/core/locks_common.c @@ -0,0 +1,307 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "core/system.h" +#include "os/os.h" +#include "tls/tls.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" + +static NvBool s_bRmLocksAllocated = NV_FALSE; + +NV_STATUS +rmLocksAlloc(OBJSYS *pSys) +{ + NV_STATUS status; + + s_bRmLocksAllocated = NV_FALSE; + + // legacy lock model : RM system semaphore + status = osAllocRmSema(&pSys->pSema); + if (status != NV_OK) + return status; + + // RM_BASIC_LOCK_MODEL : GPU lock info (ISR/DPC synchronization) + status = rmGpuLockInfoInit(); + if (status != NV_OK) + { + osFreeRmSema(&pSys->pSema); + return status; + } + rmInitLockMetering(); + + s_bRmLocksAllocated = NV_TRUE; + + return status; +} + +void +rmLocksFree(OBJSYS *pSys) +{ + if (s_bRmLocksAllocated) + { + rmDestroyLockMetering(); + rmGpuLockInfoDestroy(); + osFreeRmSema(pSys->pSema); + + s_bRmLocksAllocated = NV_FALSE; + } +} + +/*! + * @brief Acquires all of the locks necessary to execute RM code safely + * + * Other threads and client APIs will be blocked from executing while the locks + * are held, so the locks should not be held longer than necessary. The locks + * should not be held across long HW delays. + * + * @returns NV_OK if locks are acquired successfully + * NV_ERR_INVALID_LOCK_STATE if locks cannot be acquired + */ +NV_STATUS +rmLocksAcquireAll(NvU32 module) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (osAcquireRmSemaForced(pSys->pSema) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to acquire the RM lock!\n"); + return NV_ERR_INVALID_LOCK_STATE; + } + + if (rmApiLockAcquire(API_LOCK_FLAGS_NONE, module) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to acquire the API lock!\n"); + osReleaseRmSema(pSys->pSema, NULL); + return NV_ERR_INVALID_LOCK_STATE; + } + + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, module) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to acquire the GPU lock!\n"); + rmApiLockRelease(); + osReleaseRmSema(pSys->pSema, NULL); + return NV_ERR_INVALID_LOCK_STATE; + } + + return NV_OK; +} + +/*! + * @brief Releases the locks acquired by rmLocksAcquireAll + */ +void +rmLocksReleaseAll(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + rmApiLockRelease(); + osReleaseRmSema(pSys->pSema, NULL); +} + + +NV_STATUS +workItemLocksAcquire(NvU32 gpuInstance, NvU32 flags, NvU32 *pReleaseLocks, NvU32 *pGpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPU *pGpu; + NvU32 grp; + NV_STATUS status = NV_OK; + + *pReleaseLocks = 0; + *pGpuMask = 0; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA) + { + status = osAcquireRmSema(pSys->pSema); + if (status != NV_OK) + goto done; + + *pReleaseLocks |= OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA; + } + + if ((flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO)) + { + NvU32 apiLockFlags = RMAPI_LOCK_FLAGS_NONE; + NvU32 releaseFlags = OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW; + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO) + { + apiLockFlags = RMAPI_LOCK_FLAGS_READ; + releaseFlags = OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO; + } + + status = rmApiLockAcquire(apiLockFlags, RM_LOCK_MODULES_WORKITEM); + if (status != NV_OK) + goto done; + + *pReleaseLocks |= releaseFlags; + } + + if ((flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RO) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RO) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RO)) + { + NvU32 gpuLockFlags = GPUS_LOCK_FLAGS_NONE; + NvU32 releaseFlags = OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW; + + if (((flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RO) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RO) || + (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RO)) && + (pSys->gpuLockModuleMask & RM_LOCK_MODULE_GRP(RM_LOCK_MODULES_WORKITEM))) + { + gpuLockFlags = GPU_LOCK_FLAGS_READ; + releaseFlags = OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RO; + } + + if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) + grp = GPU_LOCK_GRP_ALL; + else if (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_DEVICE_RW) + grp = GPU_LOCK_GRP_DEVICE; + else // (flags & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPU_GROUP_SUBDEVICE_RW) + grp = GPU_LOCK_GRP_SUBDEVICE; + + status = rmGpuGroupLockAcquire(gpuInstance, grp, gpuLockFlags, + RM_LOCK_MODULES_WORKITEM, pGpuMask); + if (status != NV_OK) + goto done; + + // All of these call into the same function, just share the flag + *pReleaseLocks |= releaseFlags; + + pGpu = gpumgrGetGpu(gpuInstance); + if (pGpu == NULL) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (flags & OS_QUEUE_WORKITEM_FLAGS_FULL_GPU_SANITY) + { + if (!FULL_GPU_SANITY_CHECK(pGpu) || + !pGpu->getProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED)) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, + "GPU isn't full power! gpuInstance = 0x%x.\n", + gpuInstance); + goto done; + } + } + + if (flags & OS_QUEUE_WORKITEM_FLAGS_FOR_PM_RESUME) + { + if (!FULL_GPU_SANITY_FOR_PM_RESUME(pGpu)) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, + "GPU isn't full power and isn't in resume codepath! gpuInstance = 0x%x.\n", + gpuInstance); + goto done; + } + } + } + +done: + if (status != NV_OK) + { + workItemLocksRelease(*pReleaseLocks, *pGpuMask); + *pReleaseLocks = 0; + } + return status; +} + +void +workItemLocksRelease(NvU32 releaseLocks, NvU32 gpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RW) + { + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + } + + if (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_GPUS_RO) + { + rmGpuGroupLockRelease(gpuMask, GPU_LOCK_FLAGS_READ); + } + + if ((releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RW) || + (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_API_RO)) + { + rmApiLockRelease(); + } + + if (releaseLocks & OS_QUEUE_WORKITEM_FLAGS_LOCK_SEMA) + { + osReleaseRmSema(pSys->pSema, NULL); + } +} + +// +// rmGpuGroupLockGetMask +// +// Given a GPU group ID this function returns the MASK for all GPUS in that group +// We skip the lookup for GPU_LOCK_GRP_MASK as that implies that the caller is aware of the mask +// +NV_STATUS +rmGpuGroupLockGetMask(NvU32 gpuInst, GPU_LOCK_GRP_ID gpuGrpId, GPU_MASK *pGpuMask) +{ + switch (gpuGrpId) + { + case GPU_LOCK_GRP_SUBDEVICE: + *pGpuMask = NVBIT(gpuInst); + break; + + case GPU_LOCK_GRP_DEVICE: + *pGpuMask = gpumgrGetGrpMaskFromGpuInst(gpuInst); + break; + + case GPU_LOCK_GRP_MASK: + break; + + case GPU_LOCK_GRP_ALL: + *pGpuMask = GPUS_LOCK_ALL; + break; + + default: + NV_ASSERT_FAILED("Unexpected gpuGrpId in gpu lock get mask"); + return NV_ERR_INVALID_ARGUMENT; + } + return NV_OK; +} + + +void threadPriorityStateAlloc(void) {} +void threadPriorityStateFree(void) {} +void threadPriorityThrottle(void) {} +void threadPriorityBoost(NvU64 *p, NvU64 *o) {} +void threadPriorityRestore(void) {} + diff --git a/src/nvidia/src/kernel/core/system.c b/src/nvidia/src/kernel/core/system.c new file mode 100644 index 000000000..a7b08cc93 --- /dev/null +++ b/src/nvidia/src/kernel/core/system.c @@ -0,0 +1,684 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* System Object Function Definitions. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "core/system.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "core/locks.h" +#include "os/os.h" +#include "nvrm_registry.h" +#include "core/thread_state.h" +#include "diagnostics/tracer.h" +#include "rmosxfac.h" +#include "tls/tls.h" +#include "rmapi/rmapi.h" +#include "rmapi/client.h" +#include "core/hal_mgr.h" +#include "nvoc/rtti.h" + +#include "platform/chipset/chipset.h" +#include "platform/cpu.h" +#include "platform/platform.h" +#include "diagnostics/gpu_acct.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "diagnostics/journal.h" +#include "power/gpu_boost_mgr.h" +#include "compute/fabric.h" +#include "gpu_mgr/gpu_db.h" + +// local static functions +static NV_STATUS _sysCreateOs(OBJSYS *); +static NV_STATUS _sysCreateChildObjects(OBJSYS *); +static void _sysDeleteChildObjects(OBJSYS *); +static void _sysNvSwitchDetection(OBJSYS *pSys); +static void _sysInitStaticConfig(OBJSYS *pSys);; + +// Global pointer to instance of OBJSYS +OBJSYS *g_pSys = NULL; + +typedef struct +{ + NvLength childOffset; + const NVOC_CLASS_INFO *pClassInfo; + NvBool bDynamicConstruct; +} sysChildObject; + +static sysChildObject sysChildObjects[] = +{ + { NV_OFFSETOF(OBJSYS, pHalMgr), classInfo(OBJHALMGR), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pPfm), classInfo(OBJPFM), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pOS), classInfo(OBJOS), NV_FALSE }, // OS: Wrapper macros must be enabled to use :CONSTRUCT. + { NV_OFFSETOF(OBJSYS, pCl), classInfo(OBJCL), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pGpuMgr), classInfo(OBJGPUMGR), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pGpuAcct), classInfo(GpuAccounting), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pRcDB), classInfo(OBJRCDB), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pVmm), classInfo(OBJVMM), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pGpuBoostMgr), classInfo(OBJGPUBOOSTMGR), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pFabric), classInfo(Fabric), NV_TRUE }, + { NV_OFFSETOF(OBJSYS, pGpuDb), classInfo(GpuDb), NV_TRUE }, +}; + +NV_STATUS +sysConstruct_IMPL(OBJSYS *pSys) +{ + NV_STATUS status; + OBJOS *pOS; + NvU32 sec = 0; + NvU32 uSec = 0; + + g_pSys = pSys; + + RMTRACE_INIT(); + RMTRACE_INIT_NEW(); + + _sysInitStaticConfig(pSys); + + status = _sysCreateChildObjects(pSys); + if (status != NV_OK) + { + goto failed; + } + + // Use the monotonic system clock for a unique value + pOS = SYS_GET_OS(pSys); + osGetCurrentTime(&sec, &uSec); + pSys->rmInstanceId = (NvU64)sec * 1000000 + (NvU64)uSec; + + if (!pOS->osRmInitRm(pOS)) + { + status = NV_ERR_GENERIC; + goto failed; + } + + _sysNvSwitchDetection(pSys); + + // init cpu state + RmInitCpuInfo(); + + // allocate locks, semaphores, whatever + status = rmLocksAlloc(pSys); + if (status != NV_OK) + goto failed; + + status = threadStateGlobalAlloc(); + if (status != NV_OK) + goto failed; + + status = rmapiInitialize(); + if (status != NV_OK) + goto failed; + + return NV_OK; + +failed: + _sysDeleteChildObjects(pSys); + + g_pSys = NULL; + + threadStateGlobalFree(); + + rmapiShutdown(); + rmLocksFree(pSys); + + return status; +} + +void +sysDestruct_IMPL(OBJSYS *pSys) +{ + // + // Any of these operations might fail but go ahead and + // attempt to free remaining resources before complaining. + // + listDestroy(&g_clientListBehindGpusLock); + listDestroy(&g_userInfoList); + + rmapiShutdown(); + osSyncWithRmDestroy(); + threadStateGlobalFree(); + rmLocksFree(pSys); + + // + // Free child objects + // + _sysDeleteChildObjects(pSys); + + g_pSys = NULL; + + RMTRACE_DESTROY(); + RMTRACE_DESTROY_NEW(); + +} + +// +// Create static system object offspring. +// +static NV_STATUS +_sysCreateChildObjects(OBJSYS *pSys) +{ + NV_STATUS status = NV_OK; + NvU32 i, n; + + n = NV_ARRAY_ELEMENTS32(sysChildObjects); + + for (i = 0; i < n; i++) + { + if (sysChildObjects[i].bDynamicConstruct) + { + NvLength offset = sysChildObjects[i].childOffset; + Dynamic **ppChild = reinterpretCast(reinterpretCast(pSys, NvU8*) + offset, Dynamic**); + Dynamic *pNewObj; + status = objCreateDynamic(&pNewObj, pSys, sysChildObjects[i].pClassInfo); + + if (status == NV_OK) + { + *ppChild = pNewObj; + } + } + else + { + // + // More cases should NOT be added to this list. OBJOS needs to be + // cleaned up to use the bDynamicConstruct path then this hack can + // be removed. + // + switch (sysChildObjects[i].pClassInfo->classId) + { + case classId(OBJOS): + status = _sysCreateOs(pSys); + break; + default: + NV_ASSERT(0); + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (status == NV_ERR_NOT_SUPPORTED) + status = NV_OK; + if (status != NV_OK) break; + } + + return status; +} + +static void +_sysDeleteChildObjects(OBJSYS *pSys) +{ + int i; + + osRmCapUnregister(&pSys->pOsRmCaps); + + for (i = NV_ARRAY_ELEMENTS32(sysChildObjects) - 1; i >= 0; i--) + { + NvLength offset = sysChildObjects[i].childOffset; + Dynamic **ppChild = reinterpretCast(reinterpretCast(pSys, NvU8*) + offset, Dynamic**); + objDelete(*ppChild); + *ppChild = NULL; + } +} + +static void +_sysRegistryOverrideResourceServer +( + OBJSYS *pSys, + OBJGPU *pGpu +) +{ + NvU32 data32; + + // Set read-only API lock override + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_READONLY_API_LOCK, + &data32) == NV_OK) + { + NvU32 apiMask = 0; + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _ALLOC_RESOURCE, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_ALLOC_RESOURCE); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _FREE_RESOURCE, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_FREE_RESOURCE); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _MAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_MAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _UNMAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_UNMAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _INTER_MAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_INTER_MAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _INTER_UNMAP, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_INTER_UNMAP); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _CTRL, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_CTRL); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _COPY, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_COPY); + + if (FLD_TEST_DRF(_REG_STR_RM, _READONLY_API_LOCK, _SHARE, _ENABLE, data32)) + apiMask |= NVBIT(RS_API_SHARE); + + pSys->apiLockMask = apiMask; + } + else + { + pSys->apiLockMask = NVBIT(RS_API_CTRL); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_READONLY_API_LOCK_MODULE, + &data32) == NV_OK) + { + pSys->apiLockModuleMask = data32; + } + else + { + pSys->apiLockModuleMask = RM_LOCK_MODULE_GRP(RM_LOCK_MODULES_CLIENT); + } +} + +static void +_sysRegistryOverrideExternalFabricMgmt +( + OBJSYS *pSys, + OBJGPU *pGpu +) +{ + NvU32 data32; + + // Set external fabric management property + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_EXTERNAL_FABRIC_MGMT, + &data32) == NV_OK) + { + if (FLD_TEST_DRF(_REG_STR_RM, _EXTERNAL_FABRIC_MGMT, _MODE, _ENABLE, data32)) + { + NV_PRINTF(LEVEL_INFO, + "Enabling external fabric management.\n"); + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, NV_TRUE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _EXTERNAL_FABRIC_MGMT, _MODE, _DISABLE, data32)) + { + NV_PRINTF(LEVEL_INFO, + "Disabling external fabric management.\n"); + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, NV_FALSE); + } + } +} + +void +sysEnableExternalFabricMgmt_IMPL +( + OBJSYS *pSys +) +{ + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, NV_TRUE); + + NV_PRINTF(LEVEL_INFO, + "Enabling external fabric management for Proxy NvSwitch systems.\n"); +} + +void +sysForceInitFabricManagerState_IMPL +( + OBJSYS *pSys +) +{ + // + // We should only allow force init if there is not way to run fabric + // manager. For example, HGX-2 virtualization use-case. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT) || + pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED)) + { + NV_ASSERT(0); + return; + } + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED, NV_TRUE); + + NV_PRINTF(LEVEL_INFO, + "Forcing fabric manager's state as initialized to unblock clients.\n"); +} + +static void +_sysNvSwitchDetection +( + OBJSYS *pSys +) +{ + + if (osIsNvswitchPresent()) + { + pSys->setProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT, NV_TRUE); + + NV_PRINTF(LEVEL_INFO, "NvSwitch is found in the system\n"); + + sysEnableExternalFabricMgmt(pSys); + } +} + +/*! + * @brief Initialize static system configuration data. + * + * @param[in] pSys SYSTEM object pointer + */ +static void +_sysInitStaticConfig(OBJSYS *pSys) +{ + portMemSet(&pSys->staticConfig, 0, sizeof(pSys->staticConfig)); + osInitSystemStaticConfig(&pSys->staticConfig); +} + +NV_STATUS +coreInitializeRm(void) +{ + NV_STATUS status; + OBJSYS *pSys = NULL; + + // + // Initialize libraries used by RM + // + + // Portable runtime init + status = portInitialize(); + if (status != NV_OK) + return status; + + // Required before any NvLog (NV_PRINTF) calls + NVLOG_INIT(NULL); + + // Required before any NV_PRINTF() calls + if (!DBG_INIT()) + { + status = NV_ERR_GENERIC; + return status; + } + + // + // Initialize OBJSYS which spawns all the RM internal modules + // + status = objCreate(&pSys, NVOC_NULL_OBJECT, OBJSYS); + + nvAssertInit(); + + return status; + } + +void +coreShutdownRm(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // + // Destruct OBJSYS which frees all the RM internal modules + // + objDelete(pSys); + + // + // Deinitalize libraries used by RM + // + nvAssertDestroy(); + + DBG_DESTROY(); + + NVLOG_DESTROY(); + + portShutdown(); +} + +// Obsolete RM init function -- code should migrate to new interfaces +NvS32 +RmInitRm(void) +{ + return (coreInitializeRm() == NV_OK); +} + +// Obsolete RM destroy function -- code should migrate to new interfaces +NvS32 +RmDestroyRm(void) +{ + coreShutdownRm(); + return NV_TRUE; +} + +static NV_STATUS +_sysCreateOs(OBJSYS *pSys) +{ + OBJOS *pOS; + NV_STATUS status; + + // RMCONFIG: only if OS is enabled :-) + RMCFG_MODULE_ENABLED_OR_BAIL(OS); + + status = objCreate(&pOS, pSys, OBJOS); + if (status != NV_OK) + { + return status; + } + + status = constructObjOS(pOS); + if (status != NV_OK) + { + objDelete(pOS); + return status; + } + + status = osRmCapRegisterSys(&pSys->pOsRmCaps); + if (status != NV_OK) + { + // + // Device objects needed for some access rights failed + // This is not system-critical since access rights are currently disabled, + // so continue booting, just log error. + // + // RS-TODO make this fail once RM Capabilities are enabled (Bug 2549938) + // + NV_PRINTF(LEVEL_ERROR, "RM Access Sys Cap creation failed: 0x%x\n", status); + } + + pSys->pOS = pOS; + + return NV_OK; +} + +NV_STATUS +sysCaptureState_IMPL(OBJSYS *pSys) +{ + return NV_OK; +} + +OBJOS* +sysGetOs_IMPL(OBJSYS *pSys) +{ + if (pSys->pOS) + return pSys->pOS; + + // + // A special case for any early 'get-object' calls for the OS + // object before there is an OS object. Some RC code called on + // DBG_BREAKPOINT assumes an OS object exists, and can cause a crash. + // + PORT_BREAKPOINT_ALWAYS(); + + return NULL; +} + +void +sysInitRegistryOverrides_IMPL +( + OBJSYS *pSys +) +{ + OBJGPU *pGpu = NULL; + NvU32 data32 = 0; + + if (pSys->getProperty(pSys, + PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED)) + { + // The registry overrides, if any, have already been applied. + return; + } + + // Get some GPU - as of now we need some gpu to read registry. + pGpu = gpumgrGetSomeGpu(); + if (pGpu == NULL) + { + // Too early call ! we can not read the registry. + return; + } + + if ((osReadRegistryDword(pGpu, + NV_REG_STR_RM_ENABLE_EVENT_TRACER, &data32) == NV_OK) && data32 ) + { + RMTRACE_ENABLE(data32); + } + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_CLIENT_DATA_VALIDATION, &data32) == NV_OK) + { + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _KERNEL_BUFFERS, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _KERNEL_BUFFERS, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_FALSE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _HANDLE, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _HANDLE, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_FALSE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _STRICT_CLIENT, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _STRICT_CLIENT, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_FALSE); + } + + if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _ALL, _ENABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_TRUE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_TRUE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_TRUE); + } + else if (FLD_TEST_DRF(_REG_STR_RM, _CLIENT_DATA_VALIDATION, _ALL, _DISABLED, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE, NV_FALSE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS, NV_FALSE); + pSys->setProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT, NV_FALSE); + } + } + + pSys->setProperty(pSys, PDB_PROP_SYS_REGISTRY_OVERRIDES_INITIALIZED, NV_TRUE); + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_STREAM_MEMOPS, + &data32) == NV_OK) + { + if (FLD_TEST_DRF(_REG_STR_RM, _STREAM_MEMOPS, _ENABLE, _YES, data32)) + { + pSys->setProperty(pSys, PDB_PROP_SYS_ENABLE_STREAM_MEMOPS, NV_TRUE); + } + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_PRIORITY_BOOST, + &data32) == NV_OK) + { + if (data32 == NV_REG_STR_RM_PRIORITY_BOOST_DISABLE) + pSys->setProperty(pSys, PDB_PROP_SYS_PRIORITY_BOOST, NV_FALSE); + else + pSys->setProperty(pSys, PDB_PROP_SYS_PRIORITY_BOOST, NV_TRUE); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_PRIORITY_THROTTLE_DELAY, + &data32) == NV_OK) + { + pSys->setProperty(pSys, PDB_PROP_SYS_PRIORITY_THROTTLE_DELAY_US, data32); + } + + _sysRegistryOverrideExternalFabricMgmt(pSys, pGpu); + _sysRegistryOverrideResourceServer(pSys, pGpu); + + if ((data32 = osGetReleaseAssertBehavior()) != 0) + { + NvBool bRmAssertBehaviorBugcheck = NV_FALSE; +#if defined(DEVELOP) + bRmAssertBehaviorBugcheck = ((data32 & RM_ASSERT_BEHAVIOR_BUGCHECK_DEVELOP) == RM_ASSERT_BEHAVIOR_BUGCHECK_DEVELOP); +#elif !defined(DEBUG) // !DEVELOP and !DEBUG = RELEASE + bRmAssertBehaviorBugcheck = ((data32 & RM_ASSERT_BEHAVIOR_BUGCHECK_RELEASE) == RM_ASSERT_BEHAVIOR_BUGCHECK_RELEASE); +#endif + pSys->setProperty(pSys, PDB_PROP_SYS_BSOD_ON_ASSERT, bRmAssertBehaviorBugcheck); + } + + if (osBugCheckOnTimeoutEnabled()) + { + pSys->setProperty(pSys, PDB_PROP_SYS_BUGCHECK_ON_TIMEOUT, NV_TRUE); + } +} + +void +sysApplyLockingPolicy_IMPL(OBJSYS *pSys) +{ + g_resServ.roTopLockApiMask = pSys->apiLockMask; +} + +NV_STATUS +sysSyncExternalFabricMgmtWAR_IMPL +( + OBJSYS *pSys, + OBJGPU *pGpu +) +{ + NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS params; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status = NV_OK; + + params.bExternalFabricMgmt = pSys->getProperty(pSys, + PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED); + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalClient, + NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT, + ¶ms, sizeof(params)); + + return status; +} diff --git a/src/nvidia/src/kernel/core/thread_state.c b/src/nvidia/src/kernel/core/thread_state.c new file mode 100644 index 000000000..5c081ff54 --- /dev/null +++ b/src/nvidia/src/kernel/core/thread_state.c @@ -0,0 +1,1257 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//***************************************************************************** +// +// This file contains code used for Thread State management +// +// Terminology: +// +// ISR: First level interrupt handler, acknowledge function (VMK) +// +// Deferred INT handler: DPC (Windows), Bottom-half (*nux), Interrupt handler (VMK) +// +//***************************************************************************** + +#include "core/core.h" +#include "core/thread_state.h" +#include "core/locks.h" +#include "os/os.h" +#include "containers/map.h" +#include "nvrm_registry.h" +#include "gpu/gpu.h" +#include "gpu/gpu_timeout.h" + +#include "diagnostics/journal.h" + +THREAD_STATE_DB threadStateDatabase; + +static void _threadStatePrintInfo(THREAD_STATE_NODE *pThreadNode) +{ + if ((threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_PRINT_INFO_ENABLED) == 0) + return; + + if (pThreadNode != NULL) + { + NV_PRINTF(LEVEL_NOTICE, "Thread state:\n"); + NV_PRINTF(LEVEL_NOTICE, + "threadId: 0x%llx flags: 0x0%x\n", + pThreadNode->threadId, + pThreadNode->flags); + + NV_PRINTF(LEVEL_NOTICE, + "enterTime: 0x%llx Limits: nonComputeTime: 0x%llx computeTime: 0x%llx\n", + pThreadNode->timeout.enterTime, + pThreadNode->timeout.nonComputeTime, + pThreadNode->timeout.computeTime); + } +} + +static void _threadStateFreeProcessWork(THREAD_STATE_NODE *pThreadNode) +{ + PORT_UNREFERENCED_VARIABLE(pThreadNode); +} + +/** + * @brief allocate threadState which is per-cpu and per-GPU, only supporting lockless ISR + * + * @param[in/out] ppIsrlocklessThreadNode + * + * @return NV_OK if success, error otherwise + * + */ +static NV_STATUS _threadStateAllocPerCpuPerGpu(PPTHREAD_STATE_ISR_LOCKLESS ppIsrlocklessThreadNode) +{ + NvU32 allocSize; + PTHREAD_STATE_ISR_LOCKLESS pIsrlocklessThreadNode; + NvS32 i; + NvU32 coreCount = osGetMaximumCoreCount(); + + // Bug 789767 + threadStateDatabase.maxCPUs = 32; + if (coreCount > threadStateDatabase.maxCPUs) + threadStateDatabase.maxCPUs = coreCount; + + allocSize = threadStateDatabase.maxCPUs * sizeof(PTHREAD_STATE_ISR_LOCKLESS); + + pIsrlocklessThreadNode = portMemAllocNonPaged(allocSize); + if (pIsrlocklessThreadNode == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pIsrlocklessThreadNode, 0, allocSize); + allocSize = NV_MAX_DEVICES * sizeof(THREAD_STATE_NODE *); + + // Allocate thread node for each gpu per cpu. + for (i = 0; i < (NvS32)threadStateDatabase.maxCPUs; i++) + { + pIsrlocklessThreadNode[i].ppIsrThreadStateGpu = portMemAllocNonPaged(allocSize); + if (pIsrlocklessThreadNode[i].ppIsrThreadStateGpu == NULL) + { + for (--i; i >= 0; --i) + portMemFree(pIsrlocklessThreadNode[i].ppIsrThreadStateGpu); + + portMemFree(pIsrlocklessThreadNode); + return NV_ERR_NO_MEMORY; + } + else + { + portMemSet(pIsrlocklessThreadNode[i].ppIsrThreadStateGpu, 0, allocSize); + } + } + *ppIsrlocklessThreadNode = pIsrlocklessThreadNode; + return NV_OK; +} + +/** + * @brief free threadState which is per-cpu and per-GPU, only working for lockless ISR + * + * @param[in/out] pIsrlocklessThreadNode + * + */ +static void _threadStateFreePerCpuPerGpu(PTHREAD_STATE_ISR_LOCKLESS pIsrlocklessThreadNode) +{ + NvU32 i; + // Free any memory we allocated + if (pIsrlocklessThreadNode) + { + for (i = 0; i < threadStateDatabase.maxCPUs; i++) + portMemFree(pIsrlocklessThreadNode[i].ppIsrThreadStateGpu); + portMemFree(pIsrlocklessThreadNode); + } +} + +/** + * @brief the main function to allocate the threadState + * + * @return NV_OK if the entire global threadState is created successfully, + * and an appropriate ERROR otherwise. + * + */ +NV_STATUS threadStateGlobalAlloc(void) +{ + NV_STATUS rmStatus; + NvU32 allocSize; + + NV_ASSERT(tlsInitialize() == NV_OK); + + // Init the thread sequencer id counter to 0. + threadStateDatabase.threadSeqCntr = 0; + + threadStateDatabase.spinlock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (threadStateDatabase.spinlock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + allocSize = NV_MAX_DEVICES * sizeof(THREAD_STATE_NODE *); + threadStateDatabase.ppISRDeferredIntHandlerThreadNode = portMemAllocNonPaged(allocSize); + if (threadStateDatabase.ppISRDeferredIntHandlerThreadNode == NULL) + { + portSyncSpinlockDestroy(threadStateDatabase.spinlock); + return NV_ERR_NO_MEMORY; + } + portMemSet(threadStateDatabase.ppISRDeferredIntHandlerThreadNode, 0, allocSize); + + rmStatus = _threadStateAllocPerCpuPerGpu(&threadStateDatabase.pIsrlocklessThreadNode); + if (rmStatus != NV_OK) + { + portMemFree(threadStateDatabase.ppISRDeferredIntHandlerThreadNode); + portSyncSpinlockDestroy(threadStateDatabase.spinlock); + return rmStatus; + } + + mapInitIntrusive(&threadStateDatabase.dbRoot); + mapInitIntrusive(&threadStateDatabase.dbRootPreempted); + + return rmStatus; +} + +void threadStateGlobalFree(void) +{ + // Disable all threadState usage once the spinlock is freed + threadStateDatabase.setupFlags = THREAD_STATE_SETUP_FLAGS_NONE; + + // Free any memory we allocated + _threadStateFreePerCpuPerGpu(threadStateDatabase.pIsrlocklessThreadNode); + threadStateDatabase.pIsrlocklessThreadNode = NULL; + + portMemFree(threadStateDatabase.ppISRDeferredIntHandlerThreadNode); + threadStateDatabase.ppISRDeferredIntHandlerThreadNode = NULL; + + if (threadStateDatabase.spinlock != NULL) + { + portSyncSpinlockDestroy(threadStateDatabase.spinlock); + threadStateDatabase.spinlock = NULL; + } + + mapDestroy(&threadStateDatabase.dbRoot); + mapDestroy(&threadStateDatabase.dbRootPreempted); + + tlsShutdown(); +} + +void threadStateInitRegistryOverrides(OBJGPU *pGpu) +{ + NvU32 flags; + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_THREAD_STATE_SETUP_FLAGS, &flags) == NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Overriding threadStateDatabase.setupFlags from 0x%x to 0x%x\n", + threadStateDatabase.setupFlags, flags); + threadStateDatabase.setupFlags = flags; + } +} + +void threadStateInitSetupFlags(NvU32 flags) +{ + threadStateDatabase.timeout.nonComputeTimeoutMsecs = 0; + threadStateDatabase.timeout.computeTimeoutMsecs = 0; + threadStateDatabase.timeout.computeGpuMask = 0; + threadStateDatabase.setupFlags = flags; +} + +NvU32 threadStateGetSetupFlags(void) +{ + return threadStateDatabase.setupFlags; +} + +// +// Sets the nextCpuYieldTime field to a value that corresponds to a +// short time in the future. This value represents the next time that +// the osScheduler may be invoked, during long waits. +// +static void _threadStateSetNextCpuYieldTime(THREAD_STATE_NODE *pThreadNode) +{ + NvU64 timeInNs; + osGetCurrentTick(&timeInNs); + + pThreadNode->timeout.nextCpuYieldTime = timeInNs + + (TIMEOUT_DEFAULT_OS_RESCHEDULE_INTERVAL_SECS) * 1000000 * 1000; +} + +void threadStateYieldCpuIfNecessary(OBJGPU *pGpu) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pThreadNode = NULL; + NvU64 timeInNs; + + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if ((rmStatus == NV_OK) && pThreadNode ) + { + osGetCurrentTick(&timeInNs); + if (timeInNs >= pThreadNode->timeout.nextCpuYieldTime) + { + if (NV_OK == osSchedule()) + { + NV_PRINTF(LEVEL_WARNING, "Yielding\n"); + } + + _threadStateSetNextCpuYieldTime(pThreadNode); + } + } +} + +static NV_STATUS _threadNodeInitTime(THREAD_STATE_NODE *pThreadNode) +{ + NV_STATUS rmStatus = NV_OK; + NvU64 timeInNs; + NvBool firstInit; + NvU64 computeTimeoutMsecs; + NvU64 nonComputeTimeoutMsecs; + NvBool bIsDpcOrIsr = !!(pThreadNode->flags & + (THREAD_STATE_FLAGS_IS_ISR | + THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING | + THREAD_STATE_FLAGS_IS_ISR_LOCKLESS)); + + // + // _threadNodeInitTime() is used both for the first init and + // threadStateResetTimeout(). We can tell the two apart by checking whether + // enterTime has been initialized already. + // + firstInit = (pThreadNode->timeout.enterTime == 0); + + computeTimeoutMsecs = threadStateDatabase.timeout.computeTimeoutMsecs; + nonComputeTimeoutMsecs = threadStateDatabase.timeout.nonComputeTimeoutMsecs; + + // + // If we are in DPC or ISR contexts, we need to timeout the driver before OS + // mechanisms kick in and panic the kernel + // + if (bIsDpcOrIsr) + { + // + // Note that MODS does not have interrupt timeout requirements and there are + // existing code paths that violates the timeout + // + computeTimeoutMsecs = 500; + nonComputeTimeoutMsecs = 500; + } + + osGetCurrentTick(&timeInNs); + + if (firstInit) + { + // + // Save off the time we first entered the RM. We do not + // want to reset this if we call threadStateResetTimeout() + // + pThreadNode->timeout.enterTime = timeInNs; + } + + if (pThreadNode->timeout.overrideTimeoutMsecs) + { + nonComputeTimeoutMsecs = pThreadNode->timeout.overrideTimeoutMsecs; + computeTimeoutMsecs = pThreadNode->timeout.overrideTimeoutMsecs; + } + + _threadStateSetNextCpuYieldTime(pThreadNode); + + if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + pThreadNode->timeout.nonComputeTime = timeInNs + (nonComputeTimeoutMsecs * 1000 * 1000); + pThreadNode->timeout.computeTime = timeInNs + (computeTimeoutMsecs * 1000 * 1000); + } + else if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + // Convert from msecs (1,000) to usecs (1,000,000) + pThreadNode->timeout.nonComputeTime = nonComputeTimeoutMsecs * 1000; + pThreadNode->timeout.computeTime = computeTimeoutMsecs * 1000; + } + else + { + NV_PRINTF(LEVEL_INFO, + "Bad threadStateDatabase.timeout.flags: 0x%x!\n", + threadStateDatabase.timeout.flags); + + rmStatus = NV_ERR_INVALID_STATE; + } + + return rmStatus; +} + +static void _getTimeoutDataFromGpuMode( + OBJGPU *pGpu, + THREAD_STATE_NODE *pThreadNode, + NvU64 **ppThreadNodeTime, + NvU64 *pThreadStateDatabaseTimeoutMsecs) +{ + if (pGpu) + { + if (threadStateDatabase.timeout.computeGpuMask & NVBIT(pGpu->gpuInstance)) + { + *ppThreadNodeTime = &pThreadNode->timeout.computeTime; + } + else + { + *ppThreadNodeTime = &pThreadNode->timeout.nonComputeTime; + } + + *pThreadStateDatabaseTimeoutMsecs = + NV_MAX(threadStateDatabase.timeout.computeTimeoutMsecs, threadStateDatabase.timeout.nonComputeTimeoutMsecs); + } +} + +// +// The logic in _threadNodeCheckTimeout() should closely resemble +// that of _gpuCheckTimeout(). +// +static NV_STATUS _threadNodeCheckTimeout(OBJGPU *pGpu, THREAD_STATE_NODE *pThreadNode, NvU64 *pElapsedTimeUs) +{ + NV_STATUS rmStatus = NV_OK; + NvU64 threadStateDatabaseTimeoutMsecs = 0; + NvU64 *pThreadNodeTime = NULL; + NvU64 timeInNs; + + if (pGpu) + { + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "API_GPU_ATTACHED_SANITY_CHECK failed!\n"); + return NV_ERR_TIMEOUT; + } + } + + _getTimeoutDataFromGpuMode(pGpu, pThreadNode, &pThreadNodeTime, + &threadStateDatabaseTimeoutMsecs); + if ((threadStateDatabaseTimeoutMsecs == 0) || + (pThreadNodeTime == NULL)) + { + NV_PRINTF(LEVEL_ERROR, + "threadStateDatabaseTimeoutMsecs or pThreadNodeTime was NULL!\n"); + return NV_ERR_INVALID_STATE; + } + + osGetCurrentTick(&timeInNs); + if (pElapsedTimeUs) + { + *pElapsedTimeUs = (timeInNs - pThreadNode->timeout.enterTime) / 1000; + } + + if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + if (timeInNs >= *pThreadNodeTime) + { + NV_PRINTF(LEVEL_ERROR, + "_threadNodeCheckTimeout: currentTime: %llx >= %llx\n", + timeInNs, *pThreadNodeTime); + + rmStatus = NV_ERR_TIMEOUT; + } + } + else if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + osDelayUs(100); + *pThreadNodeTime -= NV_MIN(100, *pThreadNodeTime); + if (*pThreadNodeTime == 0) + { + rmStatus = NV_ERR_TIMEOUT; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "_threadNodeCheckTimeout: Unsupported timeout.flags: 0x%x!\n", + threadStateDatabase.timeout.flags); + + rmStatus = NV_ERR_INVALID_STATE; + } + + if (rmStatus == NV_ERR_TIMEOUT) + { + // Report the time this Thread entered the RM + _threadStatePrintInfo(pThreadNode); + + // This is set via osGetTimeoutParams per platform + NV_PRINTF(LEVEL_ERROR, + "_threadNodeCheckTimeout: Timeout was set to: %lld msecs!\n", + threadStateDatabaseTimeoutMsecs); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ASSERT_ON_TIMEOUT_ENABLED) + { + NV_ASSERT(0); + } + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_RESET_ON_TIMEOUT_ENABLED) + { + threadStateResetTimeout(pGpu); + } + } + + return rmStatus; +} + +static void _threadStateFreeInvokeCallbacks +( + THREAD_STATE_NODE *pThreadNode +) +{ + THREAD_STATE_FREE_CALLBACK *pCbListNode; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->flags & + THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED); + + // Start from head to maintain FIFO semantics. + while ((pCbListNode = listHead(&pThreadNode->cbList)) != NULL) + { + (*pCbListNode->pCb)(pCbListNode->pCbData); + listRemove(&pThreadNode->cbList, pCbListNode); + } +} + +static void _threadStateLogInitCaller(THREAD_STATE_NODE *pThreadNode, NvU64 funcAddr) +{ + threadStateDatabase.traceInfo.entries[threadStateDatabase.traceInfo.index].callerRA = funcAddr; + threadStateDatabase.traceInfo.entries[threadStateDatabase.traceInfo.index].flags = pThreadNode->flags; + threadStateDatabase.traceInfo.index = + (threadStateDatabase.traceInfo.index + 1) % THREAD_STATE_TRACE_MAX_ENTRIES; +} + +/** + * @brief Initialize a threadState for regular threads (non-interrupt context) + * + * @param[in/out] pThreadNode + * @param[in] flags + * + */ +void threadStateInit(THREAD_STATE_NODE *pThreadNode, NvU32 flags) +{ + NV_STATUS rmStatus; + NvU64 funcAddr; + + // Isrs should be using threadStateIsrInit(). + NV_ASSERT((flags & (THREAD_STATE_FLAGS_IS_ISR_LOCKLESS | + THREAD_STATE_FLAGS_IS_ISR | + THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING)) == 0); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + portMemSet(pThreadNode, 0, sizeof(*pThreadNode)); + pThreadNode->threadSeqId = portAtomicIncrementU32(&threadStateDatabase.threadSeqCntr); + pThreadNode->cpuNum = osGetCurrentProcessorNumber(); + pThreadNode->flags = flags; + + // + // The thread state free callbacks are only supported in the non-ISR paths + // as they invoke memory allocation routines. + // + listInit(&pThreadNode->cbList, portMemAllocatorGetGlobalNonPaged()); + pThreadNode->flags |= THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED; + + rmStatus = _threadNodeInitTime(pThreadNode); + if (rmStatus == NV_OK) + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + + rmStatus = osGetCurrentThread(&pThreadNode->threadId); + if (rmStatus != NV_OK) + return; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->cpuNum < threadStateDatabase.maxCPUs); + + funcAddr = (NvU64) (NV_RETURN_ADDRESS()); + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + if (!mapInsertExisting(&threadStateDatabase.dbRoot, (NvU64)pThreadNode->threadId, pThreadNode)) + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + // Place in the Preempted List if threadId is already present in the API list + if (mapInsertExisting(&threadStateDatabase.dbRootPreempted, (NvU64)pThreadNode->threadId, pThreadNode)) + { + pThreadNode->flags |= THREAD_STATE_FLAGS_PLACED_ON_PREEMPT_LIST; + pThreadNode->bValid = NV_TRUE; + rmStatus = NV_OK; + } + else + { + // Reset the threadId as insertion failed on both maps. bValid is already NV_FALSE + pThreadNode->threadId = 0; + portSyncSpinlockRelease(threadStateDatabase.spinlock); + return; + } + } + else + { + pThreadNode->bValid = NV_TRUE; + rmStatus = NV_OK; + } + + _threadStateLogInitCaller(pThreadNode, funcAddr); + + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + _threadStatePrintInfo(pThreadNode); + + NV_ASSERT(rmStatus == NV_OK); + threadPriorityStateAlloc(); + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE **pTls = (THREAD_STATE_NODE **)tlsEntryAcquire(TLS_ENTRY_ID_THREADSTATE); + NV_ASSERT_OR_RETURN_VOID(pTls != NULL); + if (*pTls != NULL) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Nested threadState inits detected. Previous threadState node is %p, new is %p\n", + *pTls, pThreadNode); + } + *pTls = pThreadNode; + } +} + +/** + * @brief Initialize a threadState for locked ISR and Bottom-half + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags THREAD_STATE_FLAGS_IS_ISR or THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING + * + */ +void threadStateInitISRAndDeferredIntHandler +( + THREAD_STATE_NODE *pThreadNode, + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus; + + NV_ASSERT(pGpu); + + // should be using threadStateIsrInit(). + NV_ASSERT(flags & (THREAD_STATE_FLAGS_IS_ISR | THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING)); + + portMemSet(pThreadNode, 0, sizeof(*pThreadNode)); + pThreadNode->threadSeqId = portAtomicIncrementU32(&threadStateDatabase.threadSeqCntr); + pThreadNode->cpuNum = osGetCurrentProcessorNumber(); + pThreadNode->flags = flags; + + rmStatus = _threadNodeInitTime(pThreadNode); + + if (rmStatus == NV_OK) + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE **pTls = (THREAD_STATE_NODE **)tlsEntryAcquire(TLS_ENTRY_ID_THREADSTATE); + NV_ASSERT_OR_GOTO(pTls != NULL, TlsMirror_Exit); + if (*pTls != NULL) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Nested threadState inits detected. Previous threadState node is %p, new is %p\n", + *pTls, pThreadNode); + } + *pTls = pThreadNode; + } +TlsMirror_Exit: + + rmStatus = osGetCurrentThread(&pThreadNode->threadId); + if (rmStatus != NV_OK) + return; + + threadStateDatabase.ppISRDeferredIntHandlerThreadNode[pGpu->gpuInstance] = pThreadNode; +} + +/** + * @brief Initialize a threadState for lockless ISR + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags THREAD_STATE_FLAGS_IS_ISR_LOCKLESS + * + */ +void threadStateInitISRLockless(THREAD_STATE_NODE *pThreadNode, OBJGPU *pGpu, NvU32 flags) +{ + NV_STATUS rmStatus; + PTHREAD_STATE_ISR_LOCKLESS pThreadStateIsrLockless; + + NV_ASSERT(flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + portMemSet(pThreadNode, 0, sizeof(*pThreadNode)); + pThreadNode->threadSeqId = portAtomicIncrementU32(&threadStateDatabase.threadSeqCntr); + pThreadNode->cpuNum = osGetCurrentProcessorNumber(); + pThreadNode->flags = flags; + + rmStatus = _threadNodeInitTime(pThreadNode); + if (rmStatus == NV_OK) + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE **pTls = (THREAD_STATE_NODE **)tlsEntryAcquire(TLS_ENTRY_ID_THREADSTATE); + NV_ASSERT_OR_GOTO(pTls != NULL, TlsMirror_Exit); + if (*pTls != NULL) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Nested threadState inits detected. Previous threadState node is %p, new is %p\n", + *pTls, pThreadNode); + } + *pTls = pThreadNode; + } +TlsMirror_Exit: + + rmStatus = osGetCurrentThread(&pThreadNode->threadId); + if (rmStatus != NV_OK) + return; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->cpuNum < threadStateDatabase.maxCPUs); + + // + // We use a cpu/gpu indexed structure to store the threadNode pointer + // instead of a tree indexed by threadId because threadId is no longer + // unique in an isr. We also need to index by both cpu num and gpu instance + // because isrs can prempt one another, and run on the same processor + // at the same time. + // + pThreadStateIsrLockless = &threadStateDatabase.pIsrlocklessThreadNode[pThreadNode->cpuNum]; + NV_ASSERT(pThreadStateIsrLockless->ppIsrThreadStateGpu[pGpu->gpuInstance] == NULL); + pThreadStateIsrLockless->ppIsrThreadStateGpu[pGpu->gpuInstance] = pThreadNode; +} + +/** + * @brief Free the thread state for locked ISR and bottom-half + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags THREAD_STATE_FLAGS_IS_ISR or THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING + * + */ +void threadStateFreeISRAndDeferredIntHandler +( + THREAD_STATE_NODE *pThreadNode, + OBJGPU *pGpu, + NvU32 flags +) +{ + NV_STATUS rmStatus; + + NV_ASSERT_OR_RETURN_VOID(pGpu && + (flags & (THREAD_STATE_FLAGS_IS_ISR | THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING))); + + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + // Process any work needed before exiting. + _threadStateFreeProcessWork(pThreadNode); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED) + { + rmStatus = _threadNodeCheckTimeout(NULL /*pGpu*/, pThreadNode, NULL /*pElapsedTimeUs*/); + NV_ASSERT(rmStatus == NV_OK); + } + + threadStateDatabase.ppISRDeferredIntHandlerThreadNode[pGpu->gpuInstance] = NULL; + + if (TLS_MIRROR_THREADSTATE) + { + NvU32 r; + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + NV_ASSERT(pTlsNode); + if (pTlsNode != pThreadNode) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, pThreadNode=%p\n", + pTlsNode, pThreadNode); + } + r = tlsEntryRelease(TLS_ENTRY_ID_THREADSTATE); + if (r != 0) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: tlsEntryRelease returned %d (this is likely due to nested threadStateInit() calls)\n", + r); + } + } +} + +/** + * @brief Free the thread state for a regular thread + * + * @param[in/out] pThreadNode + * @param[in] flags + * + */ +void threadStateFree(THREAD_STATE_NODE *pThreadNode, NvU32 flags) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pNode; + ThreadStateNodeMap *pMap; + + NV_ASSERT((flags & (THREAD_STATE_FLAGS_IS_ISR_LOCKLESS | + THREAD_STATE_FLAGS_IS_ISR | + THREAD_STATE_FLAGS_DEFERRED_INT_HANDLER_RUNNING)) == 0); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + if (!(flags & THREAD_STATE_FLAGS_EXCLUSIVE_RUNNING)) + { + // + // Do not do this for exclusive running threads as all the info + // is not filled in. + // + if (!pThreadNode->bValid && pThreadNode->threadId == 0) + return; + } + + _threadStateFreeInvokeCallbacks(pThreadNode); + + listDestroy(&pThreadNode->cbList); + + // Process any work needed before exiting. + _threadStateFreeProcessWork(pThreadNode); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED) + { + rmStatus = _threadNodeCheckTimeout(NULL /*pGpu*/, pThreadNode, NULL /*pElapsedTimeUs*/); + NV_ASSERT(rmStatus == NV_OK); + } + + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + if (pThreadNode->flags & THREAD_STATE_FLAGS_PLACED_ON_PREEMPT_LIST) + { + pMap = &threadStateDatabase.dbRootPreempted; + } + else + { + pMap = &threadStateDatabase.dbRoot; + } + + pNode = mapFind(pMap, (NvU64)pThreadNode->threadId); + + if (pNode != NULL) + { + mapRemove(pMap, pThreadNode); + pThreadNode->bValid = NV_FALSE; + rmStatus = NV_OK; + } + else + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + } + + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + _threadStatePrintInfo(pThreadNode); + + NV_ASSERT(rmStatus == NV_OK); + + threadPriorityStateFree(); + + if (TLS_MIRROR_THREADSTATE) + { + NvU32 r; + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + NV_ASSERT(pTlsNode); + if (pTlsNode != pThreadNode) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, pThreadNode=%p\n", + pTlsNode, pThreadNode); + } + r = tlsEntryRelease(TLS_ENTRY_ID_THREADSTATE); + if (r != 0) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: tlsEntryRelease returned %d (this is likely due to nested threadStateInit() calls)\n", + r); + } + } +} + +/** + * @brief Free thread state for lockless ISR + * + * @param[in/out] pThreadNode + * @param[in] pGpu + * @param[in] flags + * + */ +void threadStateFreeISRLockless(THREAD_STATE_NODE *pThreadNode, OBJGPU *pGpu, NvU32 flags) +{ + NV_STATUS rmStatus = NV_OK; + PTHREAD_STATE_ISR_LOCKLESS pThreadStateIsrlockless; + + NV_ASSERT(flags & (THREAD_STATE_FLAGS_IS_ISR_LOCKLESS | THREAD_STATE_FLAGS_IS_ISR)); + NV_ASSERT(pThreadNode->cpuNum == osGetCurrentProcessorNumber()); + + // Check to see if ThreadState is enabled + if (!(threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED)) + return; + + // Process any work needed before exiting. + _threadStateFreeProcessWork(pThreadNode); + + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_CHECK_TIMEOUT_AT_FREE_ENABLED) + { + rmStatus = _threadNodeCheckTimeout(NULL /*pGpu*/, pThreadNode, NULL /*pElapsedTimeUs*/); + NV_ASSERT(rmStatus == NV_OK); + } + + pThreadStateIsrlockless = &threadStateDatabase.pIsrlocklessThreadNode[pThreadNode->cpuNum]; + NV_ASSERT(pThreadStateIsrlockless->ppIsrThreadStateGpu[pGpu->gpuInstance] != NULL); + pThreadStateIsrlockless->ppIsrThreadStateGpu[pGpu->gpuInstance] = NULL; + + if (TLS_MIRROR_THREADSTATE) + { + NvU32 r; + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + NV_ASSERT(pTlsNode); + if (pTlsNode != pThreadNode) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, pThreadNode=%p\n", + pTlsNode, pThreadNode); + } + r = tlsEntryRelease(TLS_ENTRY_ID_THREADSTATE); + if (r != 0) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: tlsEntryRelease returned %d (this is likely due to nested threadStateInit() calls)\n", + r); + } + } +} + +/** + * @brief Get the thread state with given + * + * @param[in] threadId + * @param[in] pGpu + * @param[out] ppThreadNode + * + * @return NV_OK if we are able to locate the thread state with , + * NV_ERR_OBJECT_NOT_FOUND if we can't find inside map + * NV_ERR_INVALID_STATE if the thread state is not enabled or the CPU has + * been hotpluged. + */ +static NV_STATUS _threadStateGet +( + OS_THREAD_HANDLE threadId, + OBJGPU *pGpu, + THREAD_STATE_NODE **ppThreadNode +) +{ + THREAD_STATE_NODE *pNode; + + // Check to see if ThreadState is enabled + if ((threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED) == NV_FALSE) + { + *ppThreadNode = NULL; + return NV_ERR_INVALID_STATE; + } + else + { + NvU32 cpuNum = osGetCurrentProcessorNumber(); + THREAD_STATE_NODE *pIsrlocklessThreadNode; + THREAD_STATE_NODE *pISRDeferredIntHandlerNode; + + if (cpuNum >= threadStateDatabase.maxCPUs) + { + NV_ASSERT(0); + *ppThreadNode = NULL; + return NV_ERR_INVALID_STATE; + } + + // + // Several threadState call sites will not pass a pGpu b/c it is not + // easily available, and they are not running in interrupt context. + // _threadStateGet() only needs to pGpu for getting the thread node + // when called for an isr, so that site has assumed it will never + // be in interrupt context. + // + if (pGpu) + { + // Check to see if the this is an lockless ISR running thread. + pIsrlocklessThreadNode = threadStateDatabase.pIsrlocklessThreadNode[cpuNum].ppIsrThreadStateGpu[pGpu->gpuInstance]; + if (pIsrlocklessThreadNode && (pIsrlocklessThreadNode->threadId == threadId)) + { + *ppThreadNode = pIsrlocklessThreadNode; + return NV_OK; + } + + // Check to see if the this is an ISR or bottom-half thread + pISRDeferredIntHandlerNode = threadStateDatabase.ppISRDeferredIntHandlerThreadNode[pGpu->gpuInstance]; + if (pISRDeferredIntHandlerNode && (pISRDeferredIntHandlerNode->threadId == threadId)) + { + *ppThreadNode = pISRDeferredIntHandlerNode; + return NV_OK; + } + } + } + + // Try the Preempted list first before trying the API list + portSyncSpinlockAcquire(threadStateDatabase.spinlock); + pNode = mapFind(&threadStateDatabase.dbRootPreempted, (NvU64) threadId); + if (pNode == NULL) + { + // Not found on the Preempted, try the API list + pNode = mapFind(&threadStateDatabase.dbRoot, (NvU64) threadId); + } + portSyncSpinlockRelease(threadStateDatabase.spinlock); + + *ppThreadNode = pNode; + if (pNode != NULL) + { + NV_ASSERT((*ppThreadNode)->threadId == threadId); + return NV_OK; + } + else + { + return NV_ERR_OBJECT_NOT_FOUND; + } +} + +NV_STATUS threadStateGetCurrentUnchecked(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu) +{ + NV_STATUS rmStatus; + OS_THREAD_HANDLE threadId; + + // Check to see if ThreadState is enabled + if ((threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ENABLED) == NV_FALSE) + { + *ppThreadNode = NULL; + return NV_ERR_INVALID_STATE; + } + + rmStatus = osGetCurrentThread(&threadId); + if (rmStatus == NV_OK) + { + rmStatus = _threadStateGet(threadId, pGpu, ppThreadNode); + } + + // Assert if the current lookup failed - Please add the stack from this assert to bug 690089. + if (threadStateDatabase.setupFlags & THREAD_STATE_SETUP_FLAGS_ASSERT_ON_FAILED_LOOKUP_ENABLED) + { + NV_PRINTF(LEVEL_ERROR, + "threadState[Init,Free] call may be missing from this RM entry point!\n"); + NV_ASSERT(rmStatus == NV_OK); + } + + return rmStatus; +} + +NV_STATUS threadStateGetCurrent(THREAD_STATE_NODE **ppThreadNode, OBJGPU *pGpu) +{ + NV_STATUS status = threadStateGetCurrentUnchecked(ppThreadNode, pGpu); + + if (TLS_MIRROR_THREADSTATE) + { + THREAD_STATE_NODE *pTlsNode = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_THREADSTATE)); + + if ((status == NV_OK) && (pTlsNode != *ppThreadNode)) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: pTlsNode=%p, *ppThreadNode=%p; ThreadID = %llx (NvPort:%llx), sp=%p\n", + pTlsNode, *ppThreadNode, + (NvU64)(*ppThreadNode)->threadId, + portThreadGetCurrentThreadId(), &status); + + } + else if ((status != NV_OK) && (pTlsNode != NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: TLS / threadState mismatch: ThreadNode not found (status=0x%08x), but found in TLS:%p (tid=%llx;sp=%p)\n", + status, pTlsNode, + portThreadGetCurrentThreadId(), &status); + } + } + + return status; +} + +// +// Sets the timeout value and method of timeout +// +NV_STATUS threadStateInitTimeout(OBJGPU *pGpu, NvU32 timeoutUs, NvU32 flags) +{ + NvU32 timeoutMsecs = (timeoutUs / 1000); + NvU32 gpuMode = gpuGetMode(pGpu); + NvU32 scaleIgnored = 0; + NvU32 flagsIgnored = 0; + NvU32 perOSTimeoutUs = 999; // What we'll see if osGetTimeoutParams ever fails + + if (gpuMode == NV_GPU_MODE_GRAPHICS_MODE) + { + threadStateDatabase.timeout.nonComputeTimeoutMsecs = timeoutMsecs; + threadStateDatabase.timeout.computeGpuMask &= ~NVBIT(pGpu->gpuInstance); + } + else + { + threadStateDatabase.timeout.computeGpuMask |= NVBIT(pGpu->gpuInstance); + } + // + // Initializing the compute timeout limits in all cases, but use + // per-OS values: + // + osGetTimeoutParams(pGpu, &perOSTimeoutUs, &scaleIgnored, &flagsIgnored); + timeoutMsecs = (perOSTimeoutUs / 1000); + timeoutMsecs = gpuScaleTimeout(pGpu, timeoutMsecs); + + threadStateDatabase.timeout.computeTimeoutMsecs = timeoutMsecs; + threadStateDatabase.timeout.flags = flags; + + return NV_OK; +} + +// +// Resets the current threadId time +// +NV_STATUS threadStateResetTimeout(OBJGPU *pGpu) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pThreadNode = NULL; + + // Check to see if ThreadState Timeout is enabled + if ((threadStateDatabase.setupFlags & + THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED) == NV_FALSE) + { + return NV_ERR_INVALID_STATE; + } + + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if ((rmStatus == NV_OK) && pThreadNode ) + { + // Reset the timeout + rmStatus = _threadNodeInitTime(pThreadNode); + if (rmStatus == NV_OK) + { + pThreadNode->flags |= THREAD_STATE_FLAGS_TIMEOUT_INITED; + _threadStatePrintInfo(pThreadNode); + } + } + + return rmStatus; +} + +void threadStateLogTimeout(OBJGPU *pGpu, NvU64 funcAddr, NvU32 lineNum) +{ + // Log the Timeout in the RM Journal + RmRC2GpuTimeout3_RECORD* pRec = NULL; + + rcdbAddAssertJournalRecWithLine(pGpu, lineNum, (void**)&pRec, + RmGroup, RmRC2GpuTimeout_V3, + sizeof(RmRC2GpuTimeout3_RECORD), + DRF_DEF(_RM, _ASSERT, _TYPE, _INFO), + funcAddr); + + // If this is release and we have RmBreakOnRC on -- Stop +#ifndef DEBUG + OBJSYS *pSys = SYS_GET_INSTANCE(); + if (DRF_VAL(_DEBUG, _BREAK_FLAGS, _GPU_TIMEOUT, pSys->debugFlags) == + NV_DEBUG_BREAK_FLAGS_GPU_TIMEOUT_ENABLE) + { + DBG_BREAKPOINT(); + } +#endif +} + +// +// Checks the current threadId time against a set timeout period +// +NV_STATUS threadStateCheckTimeout(OBJGPU *pGpu, NvU64 *pElapsedTimeUs) +{ + NV_STATUS rmStatus; + THREAD_STATE_NODE *pThreadNode = NULL; + + if (pElapsedTimeUs) + *pElapsedTimeUs = 0; + + // + // Make sure the DB has been initialized, we have a valid threadId, + // and that the Timeout logic is enabled + // + if ((threadStateDatabase.setupFlags & + THREAD_STATE_SETUP_FLAGS_TIMEOUT_ENABLED) == NV_FALSE) + { + return NV_ERR_INVALID_STATE; + } + if (threadStateDatabase.timeout.flags == 0) + { + return NV_ERR_INVALID_STATE; + } + + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if ((rmStatus == NV_OK) && pThreadNode ) + { + if (pThreadNode->flags & THREAD_STATE_FLAGS_TIMEOUT_INITED) + { + rmStatus = _threadNodeCheckTimeout(pGpu, pThreadNode, pElapsedTimeUs); + } + else + { + rmStatus = NV_ERR_INVALID_STATE; + } + } + + return rmStatus; +} + +// +// Set override timeout value for specified thread +// +void threadStateSetTimeoutOverride(THREAD_STATE_NODE *pThreadNode, NvU64 newTimeoutMs) +{ + NvU64 timeInNs; + + pThreadNode->timeout.overrideTimeoutMsecs = newTimeoutMs; + + osGetCurrentTick(&timeInNs); + + _threadStateSetNextCpuYieldTime(pThreadNode); + + if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + pThreadNode->timeout.nonComputeTime = timeInNs + (newTimeoutMs * 1000 * 1000); + pThreadNode->timeout.computeTime = timeInNs + (newTimeoutMs * 1000 * 1000); + } + else if (threadStateDatabase.timeout.flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + // Convert from msecs (1,000) to usecs (1,000,000) + pThreadNode->timeout.nonComputeTime = newTimeoutMs * 1000; + pThreadNode->timeout.computeTime = newTimeoutMs * 1000; + } +} + +NV_STATUS threadStateEnqueueCallbackOnFree +( + THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback +) +{ + THREAD_STATE_FREE_CALLBACK *pCbListNode; + + if (!(pThreadNode->flags & THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED)) + return NV_ERR_INVALID_OPERATION; + + if ((pThreadNode == NULL) || (pCallback == NULL) || + (pCallback->pCb == NULL)) + return NV_ERR_INVALID_ARGUMENT; + + // Add from tail to maintain FIFO semantics. + pCbListNode = listAppendNew(&pThreadNode->cbList); + if (pCbListNode == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pCbListNode->pCb = pCallback->pCb; + pCbListNode->pCbData = pCallback->pCbData; + + return NV_OK; +} + +void threadStateRemoveCallbackOnFree +( + THREAD_STATE_NODE *pThreadNode, + THREAD_STATE_FREE_CALLBACK *pCallback +) +{ + THREAD_STATE_FREE_CALLBACK *pCbListNode; + + NV_ASSERT_OR_RETURN_VOID(pThreadNode->flags & + THREAD_STATE_FLAGS_STATE_FREE_CB_ENABLED); + + // + // Remove doesn't need to obey FIFO semantics. + // + // Must remove only one entry per call to be symmetric with + // threadStateEnqueueCallbackOnFree(). It is caller's responsibility to + // invoke this API repeatedly as needed. + // + for (pCbListNode = listHead(&pThreadNode->cbList); + pCbListNode != NULL; + pCbListNode = listNext(&pThreadNode->cbList, pCbListNode)) + { + if ((pCbListNode->pCb == pCallback->pCb) && + (pCbListNode->pCbData = pCallback->pCbData)) + { + listRemove(&pThreadNode->cbList, pCbListNode); + return; + } + } +} diff --git a/src/nvidia/src/kernel/diagnostics/gpu_acct.c b/src/nvidia/src/kernel/diagnostics/gpu_acct.c new file mode 100644 index 000000000..143d9ef4d --- /dev/null +++ b/src/nvidia/src/kernel/diagnostics/gpu_acct.c @@ -0,0 +1,1399 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "diagnostics/gpu_acct.h" +#include "objtmr.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "containers/map.h" +#include "containers/list.h" +#include "ctrl/ctrl0000/ctrl0000gpuacct.h" +#include "ctrl/ctrl0000/ctrl0000gpu.h" // NV0000_CTRL_GPU_MAX_ATTACHED_GPUS +#include "virtualization/hypervisor/hypervisor.h" +#include "rmapi/rmapi.h" +#include "rmapi/rmapi_utils.h" + +static NV_STATUS gpuacctInitState(GpuAccounting *); +static NvU64 gpuacctGetCurrTime(void); +static NV_STATUS gpuacctAddProcEntry(GPU_ACCT_PROC_DATA_STORE *, GPUACCT_PROC_ENTRY *, NvBool); +static NV_STATUS gpuacctRemoveProcEntry(GPU_ACCT_PROC_DATA_STORE *, GPUACCT_PROC_ENTRY *); +static NV_STATUS gpuacctLookupProcEntry(GPU_ACCT_PROC_DATA_STORE *, NvU32, GPUACCT_PROC_ENTRY **); +static NV_STATUS gpuacctAllocProcEntry(GPU_ACCT_PROC_DATA_STORE *, NvU32, NvU32, GPUACCT_PROC_ENTRY **); +static NV_STATUS gpuacctFreeProcEntry(GPU_ACCT_PROC_DATA_STORE *, GPUACCT_PROC_ENTRY *); +static NV_STATUS gpuacctCleanupDataStore(GPU_ACCT_PROC_DATA_STORE *); +static NV_STATUS gpuacctDestroyDataStore(GPU_ACCT_PROC_DATA_STORE *); +static NV_STATUS gpuacctInitDataStore(GPU_ACCT_PROC_DATA_STORE *); +static NV_STATUS gpuacctStartTimerCallbacks(OBJGPU *, GPUACCT_GPU_INSTANCE_INFO *); +static void gpuacctStopTimerCallbacks(OBJGPU *, GPUACCT_GPU_INSTANCE_INFO *); +static NV_STATUS gpuacctSampleGpuUtil(OBJGPU *, OBJTMR *, TMR_EVENT *); + +/*! + * Constrcutor for gpu accounting class. + * + * @param[in,out] pGpuAcct GPUACCT object pointer + * + * @return NV_OK If successfully constructed. + * @return Other + * Bubbles up errors from: + * * gpuacctInitState + */ +NV_STATUS +gpuacctConstruct_IMPL +( + GpuAccounting *pGpuAcct +) +{ + NV_STATUS status = NV_OK; + + status = gpuacctInitState(pGpuAcct); + + return status; +} + +/*! + * Initializes the data store. + * + * @param[in] pDS Pointer to data store. + * + * @return NV_OK + */ +static NV_STATUS +gpuacctInitDataStore +( + GPU_ACCT_PROC_DATA_STORE *pDS +) +{ + NV_STATUS status = NV_OK; + + mapInitIntrusive(&pDS->procMap); + listInitIntrusive(&pDS->procList); + + return status; +} + +/*! + * Initializes GPUACCT state. + * + * @param[in] pGpuAcct GPUACCT object pointer + * + * @return NV_OK Upon successful initialization of GPUACCT. + * @return Other + * Bubbles up errors from: + * * gpuacctInitDataStore + */ +static NV_STATUS gpuacctInitState +( + GpuAccounting *pGpuAcct +) +{ + NV_STATUS status = NV_OK; + NvU32 i, j; + + for (i = 0; i < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS; i++) + { + pGpuAcct->gpuInstanceInfo[i].pTmrEvent = NULL; + pGpuAcct->gpuInstanceInfo[i].pSamplesParams = NULL; + + status = gpuacctInitDataStore(&pGpuAcct->gpuInstanceInfo[i].deadProcAcctInfo); + if (status != NV_OK) + { + goto gpuacctInitState_cleanup; + } + + status = gpuacctInitDataStore(&pGpuAcct->gpuInstanceInfo[i].liveProcAcctInfo); + if (status != NV_OK) + { + goto gpuacctInitState_cleanup; + } + } + + return status; + +gpuacctInitState_cleanup: + + for (j = 0; j <= i; j++) + { + gpuacctDestroyDataStore(&pGpuAcct->gpuInstanceInfo[j].deadProcAcctInfo); + gpuacctDestroyDataStore(&pGpuAcct->gpuInstanceInfo[j].liveProcAcctInfo); + } + + return status; +} + +/*! + * Cleans up data store. + * + * @param[in] pDS Pointer to data store. + * + * @return NV_OK. + * @return NV_ERR_INVALID_ARGUMENT. + */ +static NV_STATUS +gpuacctCleanupDataStore +( + GPU_ACCT_PROC_DATA_STORE *pDS +) +{ + NV_ASSERT_OR_RETURN(pDS != NULL, NV_ERR_INVALID_ARGUMENT); + + GPU_ACCT_PROC_LISTIter iter = listIterAll(&pDS->procList); + while (listIterNext(&iter)) + { + GPUACCT_PROC_ENTRY *pEntry = iter.pValue; + if (pEntry) + { + gpuacctFreeProcEntry(pDS, pEntry); + } + } + + return NV_OK; +} + +/*! + * Destroys data store. + * + * @param[in] pDS Pointer to data store which needs to be destroyed. + * + * @return NV_OK. + */ +static NV_STATUS +gpuacctDestroyDataStore +( + GPU_ACCT_PROC_DATA_STORE *pDS +) +{ + NV_STATUS status = gpuacctCleanupDataStore(pDS); + + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + mapDestroy(&pDS->procMap); + listDestroy(&pDS->procList); + + return NV_OK; +} + +/*! + * Destructor + * + * @param[in] pGpuAcct GPUACCT object pointer + */ +void gpuacctDestruct_IMPL +( + GpuAccounting *pGpuAcct +) +{ + NvU32 i; + + for (i = 0; i < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS; i++) + { + gpuacctDestroyDataStore(&pGpuAcct->gpuInstanceInfo[i].deadProcAcctInfo); + gpuacctDestroyDataStore(&pGpuAcct->gpuInstanceInfo[i].liveProcAcctInfo); + } +} + +/*! + * Allocates an entry for a process and push it in the data store. + * + * @param[in] pDS Pointer to data store where process entry is to be added. + * @param[in] pid PID of the process. + * @param[in] procType Type of the process. + * @param[out] ppEntry Pointer to process entry. + * + * @return NV_OK + * @return Other + * Bubbles up errors from: + * * portMemAllocNonPaged + * * gpuacctAddProcEntry + */ +static NV_STATUS +gpuacctAllocProcEntry +( + GPU_ACCT_PROC_DATA_STORE *pDS, + NvU32 pid, + NvU32 procType, + GPUACCT_PROC_ENTRY **ppEntry +) +{ + GPUACCT_PROC_ENTRY *pEntry; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(ppEntry != NULL, NV_ERR_INVALID_ARGUMENT); + *ppEntry = NULL; + + pEntry = portMemAllocNonPaged(sizeof(GPUACCT_PROC_ENTRY)); + if (pEntry == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet(pEntry, 0, sizeof(GPUACCT_PROC_ENTRY)); + + pEntry->procId = pid; + pEntry->procType = procType; + + status = gpuacctAddProcEntry(pDS, pEntry, NV_TRUE); + if (status != NV_OK) + { + goto out; + } + + *ppEntry = pEntry; + +out: + if (status != NV_OK) + { + portMemFree(pEntry); + } + return status; +} + +/*! + * Frees a process entry from the data store. + * + * @param[in] pDS Pointer to data store where process entry is stored. + * @param[in] pEntry Pointer to process entry. + * + * @return NV_OK + * @return NV_ERR_INVALID_ARGUMENT + */ +static NV_STATUS +gpuacctFreeProcEntry +( + GPU_ACCT_PROC_DATA_STORE *pDS, + GPUACCT_PROC_ENTRY *pEntry +) +{ + NV_STATUS status = gpuacctRemoveProcEntry(pDS, pEntry); + + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + portMemFree(pEntry); + + return NV_OK; +} + +/*! + * Looks up an entry for a process in the data store. + * + * @param[in] pDS Pointer to data store where process entry will be looked. + * @param[in] pid PID of the process. + * @param[out] ppEntry Pointer to process entry. + * + * @return NV_OK + * @return NV_ERR_INVALID_ARGUMENT + */ +static NV_STATUS +gpuacctLookupProcEntry +( + GPU_ACCT_PROC_DATA_STORE *pDS, + NvU32 pid, + GPUACCT_PROC_ENTRY **ppEntry +) +{ + NV_ASSERT_OR_RETURN(ppEntry != NULL, NV_ERR_INVALID_ARGUMENT); + *ppEntry = NULL; + + *ppEntry = (GPUACCT_PROC_ENTRY *)mapFind(&pDS->procMap, pid); + + return NV_OK; +} + +/*! + * Removes a process entry from the data store. + * + * @param[in] pDS Pointer to data store where process entry is stored. + * @param[in] pEntry Pointer to process entry. + * + * @return NV_OK + * @return NV_ERR_INVALID_ARGUMENT + */ +static NV_STATUS +gpuacctRemoveProcEntry +( + GPU_ACCT_PROC_DATA_STORE *pDS, + GPUACCT_PROC_ENTRY *pEntry +) +{ + NV_ASSERT_OR_RETURN(pDS != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pEntry != NULL, NV_ERR_INVALID_ARGUMENT); + + mapRemove(&pDS->procMap, pEntry); + listRemove(&pDS->procList, pEntry); + + return NV_OK; +} + +/*! + * Adds a process entry in the data store. + * + * @note If data store is full, the entry at the head will be removed and freed. + * + * @param[in] pDS Pointer to data store where process entry will be added. + * @param[in] pEntry Pointer to process entry. + * + * @return NV_OK + */ +static NV_STATUS +gpuacctAddProcEntry +( + GPU_ACCT_PROC_DATA_STORE *pDS, + GPUACCT_PROC_ENTRY *pEntry, + NvBool isLiveProcEntry +) +{ + NvU32 maxProcLimit; + GPUACCT_PROC_ENTRY *pOldEntry; + + NV_ASSERT_OR_RETURN(pDS != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pEntry != NULL, NV_ERR_INVALID_ARGUMENT); + + maxProcLimit = isLiveProcEntry ? NV_MAX_LIVE_ACCT_PROCESS : NV_MAX_DEAD_ACCT_PROCESS; + + if (listCount(&pDS->procList) >= maxProcLimit) + { + pOldEntry = NULL; + pOldEntry = listHead(&pDS->procList); + if (pOldEntry) + { + gpuacctFreeProcEntry(pDS, pOldEntry); + } + } + + if (mapInsertExisting(&pDS->procMap, pEntry->procId, pEntry) == NV_TRUE) + { + listAppendExisting(&pDS->procList, pEntry); + } + else + { + return NV_ERR_INSERT_DUPLICATE_NAME; + } + return NV_OK; +} + +/*! + * Finds the process entry for input PMU sample's pid/subpid. + * + * @param[in] pGpuInstanceInfo Pointer to GPU node. + * @param[in] pid Input pid. + * @param[in] subPid Input sub pid. + * @param[out] ppEntry Double pointer to return proc entry structure. + * + */ +static NV_STATUS +gpuacctFindProcEntryFromPidSubpid +( + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo, + NvU32 pid, + NvU32 subPid, + GPUACCT_PROC_ENTRY **ppEntry) +{ + NV_STATUS status; + NvU32 pidToSearch; + GPU_ACCT_PROC_DATA_STORE *pDS; + + if (pGpuInstanceInfo == NULL || ppEntry == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *ppEntry = NULL; // Set the output pointer to NULL. + pDS = NULL; // Initialize data store pointer to NULL. + pidToSearch = 0; + status = NV_OK; + + { + // It's a process running on GPU, return data store for GPU. + pDS = &pGpuInstanceInfo->liveProcAcctInfo; + pidToSearch = pid; + } + + if (pDS != NULL) + { + status = gpuacctLookupProcEntry(pDS, pidToSearch, ppEntry); + } + + return status; +} + +/*! + * Samples the gpu utilization and updates running average for all + * processes running in one GPU instance. + * + * @note This is a self scheduling callback that's executed every 1 second. + * + * @param[in] pGpu GPU object pointer. + * @param[in] pTmr Timer object pointer. + * @param[in] pContext Pointer to process entry. + * + * @return NV_OK + * @return NV_ERR_INVALID_STATE + * @return Other + * Bubbles up errors from: + * * tmrScheduleCallbackRelSec + */ +static NV_STATUS +gpuacctSampleGpuUtil +( + OBJGPU *pGpu, + OBJTMR *pTmr, + TMR_EVENT *pTmrEvent +) +{ + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo = pTmrEvent->pUserData; + NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS *pParams; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(!IS_GSP_CLIENT(pGpu), NV_ERR_INVALID_STATE); + + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (pGpuInstanceInfo == NULL || pTmr == NULL || pGpuInstanceInfo->pSamplesParams == NULL) + { + NV_PRINTF(LEVEL_ERROR, "NULL objects found\n"); + return NV_ERR_INVALID_STATE; + } + + pParams = pGpuInstanceInfo->pSamplesParams; + portMemSet(pParams, 0, sizeof(*pParams)); + pParams->type = NV2080_CTRL_GPUMON_SAMPLE_TYPE_PERFMON_UTIL; + pParams->bufSize = sizeof(pParams->samples); + pParams->tracker = 0; + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2, + pParams, + sizeof(NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GET_GPUMON_PERFMON_UTIL_SAMPLES failed with status : %d\n", + status); + goto gpuacctSampleGpuUtil_out; + } + + gpuacctProcessGpuUtil(pGpuInstanceInfo, &pParams->samples[0]); + +gpuacctSampleGpuUtil_out: + + // Reschedule + status = tmrEventScheduleRelSec(pTmr, pGpuInstanceInfo->pTmrEvent, 1); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Error sheduling callback for util 0x%x\n", + status); + return status; + } + + return status; +} + +/*! + * Processes gpu utilization samples, updating running average for all + * processes running in one GPU instance. + * + * @param[in] pGpuInstanceInfo GPUACCT gpu instance info. + * @param[in] pUtilSampleBuffer perfmon samples collected for the GPU + * + */ +void +gpuacctProcessGpuUtil +( + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo, + NV2080_CTRL_PERF_GPUMON_PERFMON_UTIL_SAMPLE *pUtilSampleBuffer +) +{ + GPUACCT_PROC_ENTRY *pEntry; + NV_STATUS status = NV_OK; + NvU64 maxTimeStamp; + NvU32 index; + + maxTimeStamp = 0; + + for (index = 0; index < NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL; ++index) + { + if (pUtilSampleBuffer[index].base.timeStamp <= pGpuInstanceInfo->lastUpdateTimestamp) + { + // Skip this input sample entry if the sample has already been processed + // in previous gpuacctSampleGpuUtil() call. + continue; + } + + // Find max timestamp in the input pUtilSampleBuffer pmon sample set. + maxTimeStamp = NV_MAX(maxTimeStamp, pUtilSampleBuffer[index].base.timeStamp); + + pGpuInstanceInfo->totalSampleCount++; + + pEntry = NULL; + + // If the PMU sample entry's pid or subpid is invalid, then we won't find the + // pid-subpid entry in data store, so skip processing this PMU gr sample. + if (pUtilSampleBuffer[index].gr.procId != NV2080_GPUMON_PID_INVALID && + pUtilSampleBuffer[index].gr.subProcessID != NV2080_GPUMON_PID_INVALID) + { + // Find data store in which we should look up the PMU gr sample's pid/subpid. + status = gpuacctFindProcEntryFromPidSubpid(pGpuInstanceInfo, + pUtilSampleBuffer[index].gr.procId, + pUtilSampleBuffer[index].gr.subProcessID, + &pEntry); + if (status == NV_OK && pEntry != NULL) + { + pEntry->sumUtil += pUtilSampleBuffer[index].gr.util; + + NV_PRINTF(LEVEL_INFO, "pid=%d subPid=%d util=%4d sumUtil=%lld sampleCount=%u (total=%u)\n", + pUtilSampleBuffer[index].gr.procId, + pUtilSampleBuffer[index].gr.subProcessID, + pUtilSampleBuffer[index].gr.util, + pEntry->sumUtil, + pGpuInstanceInfo->totalSampleCount - pEntry->startSampleCount, + pGpuInstanceInfo->totalSampleCount); + } + } + + // If the PMU sample entry's pid or subpid is invalid, then we won't find the + // pid-subpid entry in data store, so skip processing this PMU fb sample. + if (pUtilSampleBuffer[index].fb.procId != NV2080_GPUMON_PID_INVALID && + pUtilSampleBuffer[index].fb.subProcessID != NV2080_GPUMON_PID_INVALID) + { + // If GR sample and FB sample are of same pid-subpid, no need to find the proc entry again. + if (pUtilSampleBuffer[index].gr.procId != pUtilSampleBuffer[index].fb.procId || + pUtilSampleBuffer[index].gr.subProcessID != pUtilSampleBuffer[index].fb.subProcessID) + { + // Find data store in which we should look up the PMU fb sample's pid/subpid. + pEntry = NULL; + status = gpuacctFindProcEntryFromPidSubpid(pGpuInstanceInfo, + pUtilSampleBuffer[index].fb.procId, + pUtilSampleBuffer[index].fb.subProcessID, + &pEntry); + } + if (status == NV_OK && pEntry != NULL) + { + pEntry->sumFbUtil += pUtilSampleBuffer[index].fb.util; + } + } + } + + // Update max time stamp found in current input sample. + pGpuInstanceInfo->lastUpdateTimestamp = maxTimeStamp; +} + +/*! + * Starts gpu accounting for the process. + * + * @note This functions schedules a 1 second self scheduling timer callback + * that computes running average of gpu utilization until stopped. + * + * @param[in] pGpuAcct GPUACCT object pointer + * @param[in] gpuInstance gpu instance. + * @param[in] pid PID of the process. + * @param[in] subPid Pid of the process running on guest VM. + * + * @return NV_OK + * @return NV_ERR_INVALID_STATE + * @return NV_ERR_STATE_IN_USE + * @return Other + * Bubbles up errors from: + * * gpuacctLookupProcEntry + * * gpuacctAllocProcEntry + */ +NV_STATUS +gpuacctStartGpuAccounting_IMPL +( + GpuAccounting *pGpuAcct, + NvU32 gpuInstance, + NvU32 pid, + NvU32 subPid +) +{ + OBJGPU *pGpu; + NvU32 vmIndex; + NvU32 searchPid; + NV_STATUS status = NV_OK; + GPUACCT_PROC_ENTRY *pEntry = NULL; + GPU_ACCT_PROC_DATA_STORE *pDS = NULL; + + pGpu = gpumgrGetGpu(gpuInstance); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_STATE); + + GPUACCT_GPU_INSTANCE_INFO *gpuInstanceInfo = &pGpuAcct->gpuInstanceInfo[gpuInstance]; + + vmIndex = NV_INVALID_VM_INDEX; + pDS = &gpuInstanceInfo->liveProcAcctInfo; + + NV_ASSERT_OR_RETURN(pDS != NULL, NV_ERR_INVALID_STATE); + + searchPid = (vmIndex == NV_INVALID_VM_INDEX) ? pid : subPid; + + status = gpuacctLookupProcEntry(pDS, searchPid, &pEntry); + // If pid entry already exists, increment refcount and return. + if (pEntry != NULL) + { + goto out; + } + + // Create entry for the incoming pid. + status = gpuacctAllocProcEntry(pDS, searchPid, + NV_GPUACCT_PROC_TYPE_CPU, &pEntry); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + NV_ASSERT_OR_RETURN(pEntry != NULL, NV_ERR_NO_MEMORY); + + pEntry->isGuestProcess = (vmIndex == NV_INVALID_VM_INDEX) ? NV_FALSE : NV_TRUE; + + pEntry->startTime = gpuacctGetCurrTime(); + + pEntry->startSampleCount = gpuInstanceInfo->totalSampleCount; + + NV_PRINTF(LEVEL_INFO, "pid=%d startSampleCount=%u\n", + searchPid, pEntry->startSampleCount); + +out: + if (subPid != NV_INVALID_VM_PID) + { + pEntry->refCount++; + } + return status; +} + +/*! + * Stops gpu accounting for the process. + * + * @note Stops the timer for computing gpu utilization and moves the process + * entry from list of live processes to dead processes. + * + * @param[in] pGpuAcct GPUACCT object pointer + * @param[in] gpuInstance gpu instance. + * @param[in] pid PID of the process. + * @param[in] subPid Pid of the process running on guest VM. + * + * @return NV_OK + * @return NV_ERR_INVALID_STATE + * @return NV_ERR_STATE_IN_USE + * @return Other + * Bubbles up errors from: + * * gpuacctLookupProcEntry + * * gpuacctRemoveProcEntry + * * gpuacctAddProcEntry + */ +NV_STATUS +gpuacctStopGpuAccounting_IMPL +( + GpuAccounting *pGpuAcct, + NvU32 gpuInstance, + NvU32 pid, + NvU32 subPid +) +{ + OBJGPU *pGpu; + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo; + GPU_ACCT_PROC_DATA_STORE *pLiveDS; + GPU_ACCT_PROC_DATA_STORE *pDeadDS; + GPUACCT_PROC_ENTRY *pEntry; + GPUACCT_PROC_ENTRY *pOldEntry; + NV_STATUS status; + NvU32 searchPid; + NvU32 vmIndex; + + pGpu = gpumgrGetGpu(gpuInstance); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_STATE); + + pGpuInstanceInfo = &pGpuAcct->gpuInstanceInfo[gpuInstance]; + + vmIndex = NV_INVALID_VM_INDEX; + + if (vmIndex == NV_INVALID_VM_INDEX) + { + // Delete the pid from live process list only if subpid is zero. + if (subPid != 0) + { + return NV_OK; + } + pLiveDS = &pGpuInstanceInfo->liveProcAcctInfo; + pDeadDS = &pGpuInstanceInfo->deadProcAcctInfo; + + searchPid = pid; + } + + status = gpuacctLookupProcEntry(pLiveDS, searchPid, &pEntry); + if (status != NV_OK) + { + return status; + } + + if (pEntry == NULL) + { + return NV_ERR_INVALID_STATE; + } + + // return and continue accounting unless this is the last request. + if (--pEntry->refCount != 0) + { + return NV_OK; + } + + pEntry->endTime = gpuacctGetCurrTime(); + pEntry->totalSampleCount = pGpuInstanceInfo->totalSampleCount - pEntry->startSampleCount; + + NV_PRINTF(LEVEL_INFO, "pid=%d\n", searchPid); + + // Move process entry to dead process list for process that run on gpu. + if (pEntry->procType == NV_GPUACCT_PROC_TYPE_GPU) + { + // Only keep the latest entry for the pid. + status = gpuacctLookupProcEntry(pDeadDS, searchPid, &pOldEntry); + if (status != NV_OK) + { + return status; + } + + if (pOldEntry != NULL) + { + status = gpuacctFreeProcEntry(pDeadDS, pOldEntry); + if (status != NV_OK) + { + return status; + } + } + + // Move the entry to dead procs data store + status = gpuacctRemoveProcEntry(pLiveDS, pEntry); + if (status != NV_OK) + { + return status; + } + + status = gpuacctAddProcEntry(pDeadDS, pEntry, NV_FALSE); + if (status != NV_OK) + { + portMemFree(pEntry); + return status; + } + } + else + { + status = gpuacctFreeProcEntry(pLiveDS, pEntry); + if (status != NV_OK) + { + return status; + } + } + + return status; +} + +/*! + * Updates peak (high-water mark) FB utilization of the process. + * + * @param[in] pGpuAcct GPUACCT object pointer + * @param[in] gpuInstance gpu instance. + * @param[in] pid PID of the process. + * @param[in] subPid PID of the process running on guest VM. + * @param[in] fbUsage Current FB usage of the process. + * + * @return NV_OK + * @return NV_ERR_INVALID_STATE + * @return Other + * Bubbles up errors from: + * * gpuacctLookupProcEntry + */ +NV_STATUS +gpuacctUpdateProcPeakFbUsage_IMPL +( + GpuAccounting *pGpuAcct, + NvU32 gpuInstance, + NvU32 pid, + NvU32 subPid, + NvU64 fbUsage +) +{ + GPUACCT_PROC_ENTRY *pEntry; + GPU_ACCT_PROC_DATA_STORE *pDS = NULL; + NV_STATUS status; + + pDS = &pGpuAcct->gpuInstanceInfo[gpuInstance].liveProcAcctInfo; + status = gpuacctLookupProcEntry(pDS, pid, &pEntry); + + if (status != NV_OK) + { + return status; + } + + if (pEntry == NULL) + { + return NV_ERR_INVALID_STATE; + } + + pEntry->procType = NV_GPUACCT_PROC_TYPE_GPU; + + if (fbUsage > pEntry->maxFbUsage) + { + pEntry->maxFbUsage = fbUsage; + } + + return status; +} + +/*! + * Set process type. + * + * @param[in] pGpuAcct GPUACCT object pointer + * @param[in] gpuInstance gpu instance. + * @param[in] pid PID of the process. + * @param[in] subPid PID of the process running on guest VM. + * @param[in] procType Type of the process. + * + * @return NV_OK + * @return NV_ERR_INVALID_STATE + * @return Other + * Bubbles up errors from: + * * gpuacctLookupProcEntry + */ +NV_STATUS +gpuacctSetProcType_IMPL +( + GpuAccounting *pGpuAcct, + NvU32 gpuInstance, + NvU32 pid, + NvU32 subPid, + NvU32 procType +) +{ + GPUACCT_PROC_ENTRY *pEntry; + GPU_ACCT_PROC_DATA_STORE *pDS = NULL; + NV_STATUS status; + + pDS = &pGpuAcct->gpuInstanceInfo[gpuInstance].liveProcAcctInfo; + status = gpuacctLookupProcEntry(pDS, pid, &pEntry); + + if (status != NV_OK) + { + return status; + } + + if (pEntry == NULL) + { + return NV_ERR_INVALID_STATE; + } + + pEntry->procType = procType; + + return status; +} + +/*! + * Gets GPU accounting info for the process. + * + * @param[in] pGpuAcct GPUACCT object pointer + * @param[in,out] pParams NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS pointer. + * + * @return NV_OK + * @return NV_ERR_INVALID_ARGUMENT + * @return NV_ERR_INVALID_STATE + * @return NV_ERR_OBJECT_NOT_FOUND + * @return Other + * Bubbles up errors from: + * * gpuacctLookupProcEntry + */ +NV_STATUS +gpuacctGetProcAcctInfo_IMPL +( + GpuAccounting *pGpuAcct, + NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS *pParams +) +{ + GPUACCT_PROC_ENTRY *pEntry; + GPU_ACCT_PROC_DATA_STORE *pDS = NULL; + NV_STATUS status; + OBJGPU *pGpu; + NvU32 vmIndex; + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo; + NvU32 sampleCount; + NvBool isLiveProcess; + + NV_ASSERT_OR_RETURN(pParams != NULL, NV_ERR_INVALID_ARGUMENT); + + pGpu = gpumgrGetGpuFromId(pParams->gpuId); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + + pGpuInstanceInfo = &pGpuAcct->gpuInstanceInfo[pGpu->gpuInstance]; + vmIndex = NV_INVALID_VM_INDEX; + + isLiveProcess = NV_FALSE; + + // Try finding process entry in dead process list. + if (vmIndex == NV_INVALID_VM_INDEX) + { + pDS = &pGpuInstanceInfo->deadProcAcctInfo; + } + NV_ASSERT_OR_RETURN(pDS != NULL, NV_ERR_INVALID_STATE); + + if (vmIndex == NV_INVALID_VM_INDEX) + { + status = gpuacctLookupProcEntry(pDS, pParams->pid, &pEntry); + } + else + { + status = gpuacctLookupProcEntry(pDS, pParams->subPid, &pEntry); + } + + if (status != NV_OK) + { + return status; + } + + // If process entry not found in dead process list, try finding in live + // process list. + if (pEntry == NULL) + { + isLiveProcess = NV_TRUE; + + if (vmIndex == NV_INVALID_VM_INDEX) + { + pDS = &pGpuInstanceInfo->liveProcAcctInfo; + } + NV_ASSERT_OR_RETURN(pDS != NULL, NV_ERR_INVALID_STATE); + + if (vmIndex == NV_INVALID_VM_INDEX) + { + status = gpuacctLookupProcEntry(pDS, pParams->pid, &pEntry); + } + else + { + status = gpuacctLookupProcEntry(pDS, pParams->subPid, &pEntry); + } + if (status != NV_OK) + { + return status; + } + + if ((pEntry == NULL) || (pEntry->procType != NV_GPUACCT_PROC_TYPE_GPU)) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + } + + pParams->maxFbUsage = pEntry->maxFbUsage; + pParams->gpuUtil = pEntry->gpuUtil; + pParams->fbUtil = pEntry->fbUtil; + pParams->startTime = pEntry->startTime; + pParams->endTime = pEntry->endTime; + + sampleCount = isLiveProcess == NV_TRUE ? + pGpuInstanceInfo->totalSampleCount - pEntry->startSampleCount: + pEntry->totalSampleCount; + if (sampleCount) + { + pParams->gpuUtil = (NvU32)(pEntry->sumUtil / sampleCount); + pParams->gpuUtil /= 100; + + pParams->fbUtil = (NvU32)(pEntry->sumFbUtil / sampleCount); + pParams->fbUtil /= 100; + } + + return NV_OK; +} + +/*! + * Gets all the pids for which accounting data is available. + * + * @param[in] pGpuAcct GPUACCT object pointer + * @param[in,out] pParams NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS pointer. + * + * @return NV_OK + * @return NV_ERR_INVALID_ARGUMENT + * @return NV_ERR_INVALID_STATE + */ +NV_STATUS +gpuacctGetAcctPids_IMPL +( + GpuAccounting *pGpuAcct, + NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS *pParams +) +{ + GPUACCT_PROC_ENTRY *pEntry; + GPU_ACCT_PROC_LIST *pList; + OBJGPU *pGpu; + NvU32 count; + NvU32 vmIndex; + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo; + + ct_assert((NV_MAX_LIVE_ACCT_PROCESS + NV_MAX_DEAD_ACCT_PROCESS) <= NV0000_GPUACCT_PID_MAX_COUNT); + + if (pParams == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pGpu = gpumgrGetGpuFromId(pParams->gpuId); + if (pGpu == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pGpuInstanceInfo = &pGpuAcct->gpuInstanceInfo[pGpu->gpuInstance]; + count = 0; + vmIndex = NV_INVALID_VM_INDEX; + + if (vmIndex == NV_INVALID_VM_INDEX) + { + pList = &pGpuInstanceInfo->deadProcAcctInfo.procList; + } + NV_ASSERT_OR_RETURN(pList != NULL, NV_ERR_INVALID_STATE); + + GPU_ACCT_PROC_LISTIter iter = listIterAll(pList); + while (listIterNext(&iter)) + { + pEntry = iter.pValue; + if (pEntry) + { + pParams->pidTbl[count++] = pEntry->procId; + } + } + + if (vmIndex == NV_INVALID_VM_INDEX) + { + pList = &pGpuInstanceInfo->liveProcAcctInfo.procList; + } + NV_ASSERT_OR_RETURN(pList != NULL, NV_ERR_INVALID_STATE); + + iter = listIterAll(pList); + while (listIterNext(&iter)) + { + pEntry = iter.pValue; + if (pEntry && pEntry->procType == NV_GPUACCT_PROC_TYPE_GPU) + { + pParams->pidTbl[count++] = pEntry->procId; + } + } + + pParams->pidCount = count; + + return NV_OK; +} + +/*! + * Gets accounting mode. + * + * @param[in] pGpuAcct GPUACCT object pointer + * @param[in] gpuInstance gpu instance. + * @param[in,out] pGetAcctModeParams NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS pointer. + * + * @return NV_OK + * @return NV_ERR_INVALID_ARGUMENT + */ +NV_STATUS +gpuacctGetAccountingMode_IMPL +( + GpuAccounting *pGpuAcct, + NvU32 gpuInstance, + NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS *pGetAcctModeParams +) +{ + OBJGPU *pGpu; + NvU32 vmPid; + + NV_ASSERT_OR_RETURN(pGetAcctModeParams != NULL, NV_ERR_INVALID_ARGUMENT); + + pGpu = gpumgrGetGpu(gpuInstance); + if (pGpu == NULL) + return NV_ERR_INVALID_ARGUMENT; + + // Set vmPid if we are on VGX host and pParams->pid is non zero. + // pParams->pid will be set when the RM control call is coming + // from VGPU plugin, otherwise will be 0. + vmPid = (hypervisorIsVgxHyper() && (pGetAcctModeParams->pid != 0)) ? + pGetAcctModeParams->pid : + NV_INVALID_VM_PID; + + if (vmPid == NV_INVALID_VM_PID) + { + pGetAcctModeParams->state = pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON) ? + NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED : + NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED; + } + + return NV_OK; +} + +/*! + * Starts timer callbacks. + * + * @param[in] pGpu + * @param[in] pGpuInstanceInfo GPUACCT gpu instance info. + * + * @return NV_OK + * @return NV_ERR_INVALID_STATE + * @return NV_ERR_NO_MEMORY + */ +static NV_STATUS +gpuacctStartTimerCallbacks +( + OBJGPU *pGpu, + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NV_STATUS status = NV_OK; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON)) + return NV_OK; + + if (pTmr == NULL && !IS_GSP_CLIENT(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "NULL pTmr object found\n"); + return NV_ERR_INVALID_STATE; + } + + pGpuInstanceInfo->pSamplesParams = + portMemAllocNonPaged(sizeof(*pGpuInstanceInfo->pSamplesParams)); + if (pGpuInstanceInfo->pSamplesParams == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, + "Failed to allocate memory for sample params\n"); + goto done; + } + + if (IS_GSP_CLIENT(pGpu)) + goto done; + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + tmrEventCreate(pTmr, &pGpuInstanceInfo->pTmrEvent, gpuacctSampleGpuUtil, + pGpuInstanceInfo, TMR_FLAGS_NONE), + done); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + tmrEventScheduleRelSec(pTmr, pGpuInstanceInfo->pTmrEvent, 1), + done); + + done: + if (status == NV_OK) + pGpu->setProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON, NV_TRUE); + else + gpuacctStopTimerCallbacks(pGpu, pGpuInstanceInfo); + + return status; +} + +/*! + * Stops timer callbacks. + * + * @param[in] pGpu + * @param[in] pGpuInstanceInfo GPUACCT gpu instance info. + */ +static void +gpuacctStopTimerCallbacks +( + OBJGPU *pGpu, + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + if (IS_GSP_CLIENT(pGpu)) + goto done; + + if (pTmr != NULL && pGpuInstanceInfo->pTmrEvent != NULL) + { + tmrEventDestroy(pTmr, pGpuInstanceInfo->pTmrEvent); + pGpuInstanceInfo->pTmrEvent = NULL; + } + + portMemFree(pGpuInstanceInfo->pSamplesParams); + pGpuInstanceInfo->pSamplesParams = NULL; + + done: + pGpu->setProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON, NV_FALSE); +} + +/*! + * Enables accounting for the gpu. + * + * @param[in] pGpuAcct GPUACCT object pointer + * @param[in] gpuInstance gpu instance. + * @param[in,out] pSetAcctModeParams NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS pointer. + * + * @return NV_OK + * @return NV_ERR_INVALID_ARGUMENT + */ +NV_STATUS +gpuacctEnableAccounting_IMPL +( + GpuAccounting *pGpuAcct, + NvU32 gpuInstance, + NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS *pSetAcctModeParams +) +{ + OBJGPU *pGpu; + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pSetAcctModeParams != NULL, NV_ERR_INVALID_ARGUMENT); + + pGpu = gpumgrGetGpu(gpuInstance); + if (pGpu == NULL) + return NV_ERR_INVALID_ARGUMENT; + + // + // Accounting mode is not supported if SMC is enabled. + // TODO:CUDANVML-43 tracks a top-level task to make accounting mode + // MIG aware. + // + if (IS_MIG_ENABLED(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + // Set vmPid if we are on VGX host and pParams->pid is non zero. + // pParams->pid will be set when the RM control call is coming + // from VGPU plugin, otherwise will be 0. + NvU32 vmPid = (hypervisorIsVgxHyper() && (pSetAcctModeParams->pid != 0)) ? + pSetAcctModeParams->pid : + NV_INVALID_VM_PID; + + pGpuInstanceInfo = &pGpuAcct->gpuInstanceInfo[pGpu->gpuInstance]; + + if (vmPid == NV_INVALID_VM_PID) + { + status = gpuacctStartTimerCallbacks(pGpu, pGpuInstanceInfo); + if (status != NV_OK) + return status; + } + + return status; +} + +/*! + * Disables accounting for the gpu and cleans up accounting data. + * + * @param[in] pGpuAcct GPUACCT object pointer + * @param[in] gpuInstance gpu instance. + * @param[in,out] pSetAcctModeParams NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS pointer. + * + * @return NV_OK + * @return NV_ERR_INVALID_ARGUMENT + * @return NV_ERR_INVALID_STATE + */ +NV_STATUS +gpuacctDisableAccounting_IMPL +( + GpuAccounting *pGpuAcct, + NvU32 gpuInstance, + NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS *pSetAcctModeParams +) +{ + GPU_ACCT_PROC_DATA_STORE *pDS = NULL; + OBJGPU *pGpu; + NvU32 vmPid; + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo; + + NV_ASSERT_OR_RETURN(pSetAcctModeParams != NULL, NV_ERR_INVALID_ARGUMENT); + + pGpu = gpumgrGetGpu(gpuInstance); + if (pGpu == NULL) + return NV_ERR_INVALID_ARGUMENT; + + // Set vmPid if we are on VGX host and pParams->pid is non zero. + // pParams->pid will be set when the RM control call is coming + // from VGPU plugin, otherwise will be 0. + vmPid = (hypervisorIsVgxHyper() && (pSetAcctModeParams->pid != 0)) ? + pSetAcctModeParams->pid : + NV_INVALID_VM_PID; + + // On VGX host, accounting mode will be enabled by default, so that we can + // start gathering accounting data regardless of when guest comes and goes. + // Don't allow user to disable accounting mode on VGX host. + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON) && + hypervisorIsVgxHyper() && (vmPid == NV_INVALID_VM_PID)) + { + return NV_ERR_NOT_SUPPORTED; + } + + pGpuInstanceInfo = &pGpuAcct->gpuInstanceInfo[gpuInstance]; + + if (vmPid == NV_INVALID_VM_PID) + { + gpuacctStopTimerCallbacks(pGpu, pGpuInstanceInfo); + + pDS = &pGpuInstanceInfo->liveProcAcctInfo; + } + NV_ASSERT_OR_RETURN(pDS != NULL, NV_ERR_INVALID_STATE); + + // remove (stale) entries from live process data store. + gpuacctCleanupDataStore(pDS); + + return NV_OK; +} + +/*! + * Clears accounting data for the gpu. + * + * @param[in] pGpuAcct GPUACCT object pointer + * @param[in] gpuInstance gpu instance. + * @param[in,out] pClearAcctDataParams NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS pointer. + * + * @return NV_OK + * @return NV_ERR_INVALID_ARGUMENT + * @return NV_ERR_INVALID_STATE + */ +NV_STATUS +gpuacctClearAccountingData_IMPL +( + GpuAccounting *pGpuAcct, + NvU32 gpuInstance, + NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS *pClearAcctDataParams +) +{ + NvU32 vmPid; + GPUACCT_GPU_INSTANCE_INFO *gpuInstanceInfo = &pGpuAcct->gpuInstanceInfo[gpuInstance]; + + NV_ASSERT_OR_RETURN(pClearAcctDataParams != NULL, NV_ERR_INVALID_ARGUMENT); + + // Set vmPid if we are on VGX host and pParams->pid is non zero. + // pParams->pid will be set when the RM control call is coming + // from VGPU plugin, otherwise will be 0. + vmPid = (hypervisorIsVgxHyper() && (pClearAcctDataParams->pid != 0)) ? + pClearAcctDataParams->pid : + NV_INVALID_VM_PID; + + if (vmPid == NV_INVALID_VM_PID) + { + gpuacctCleanupDataStore(&gpuInstanceInfo->deadProcAcctInfo); + } + + return NV_OK; +} + +/*! + * Returns the time in micro seconds. + * + */ +static NvU64 gpuacctGetCurrTime +( + void +) +{ + NvU64 currTime = 0x00000000ffffffff; + NvU32 currTimeHi, currTimeLo; + osGetCurrentTime(&currTimeHi, &currTimeLo); + + currTime = currTime & currTimeHi; + currTime = (currTime * 1000000) + currTimeLo; + + return currTime; +} + diff --git a/src/nvidia/src/kernel/diagnostics/journal.c b/src/nvidia/src/kernel/diagnostics/journal.c new file mode 100644 index 000000000..1f28af5e8 --- /dev/null +++ b/src/nvidia/src/kernel/diagnostics/journal.c @@ -0,0 +1,4219 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* * +* RM robust error journal (formerly RCDB) * +* * +\***************************************************************************/ + +#include "gpu_mgr/gpu_mgr.h" +#include "nvRmReg.h" +#include "nvBldVer.h" +#include "nvVer.h" +#include "os/os.h" +#include "core/system.h" +#include "gpu/gpu.h" +#include "diagnostics/journal.h" +#include "platform/chipset/chipset.h" +#include "diagnostics/nv_debug_dump.h" +#include "diagnostics/tracer.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" +#include "vgpu/rpc.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "nvdevid.h" + + + +#include "ctrl/ctrl0000/ctrl0000nvd.h" + +#include "nvlimits.h" +#include "Nvcm.h" + +#include "lib/protobuf/prb_util.h" +#include "g_all_dcl_pb.h" +#include "g_nvdebug_pb.h" +#include "nv_ref.h" + +#define NOCAT_UNKNOWN_STR "*** unknown ***" +#define NOCAT_NA_STR "N/A" +#define NOCAT_LEGACY_STR "LEGACY" +#define NOCAT_FULLCHIP_TDR_STR "FULL CHIP RESET" +#define NOCAT_BUS_RESET_TDR_STR "BUS RESET" +#define NOCAT_GC6_RESET_TDR_STR "GC6 RESET" +#define NOCAT_NORMAL_TDR_STR "NORMAL TDR" +#define NOCAT_UCODE_RESET_TDR_STR "UCODE RESET" +#define NOCAT_SURPRISE_REMOVAL_TDR_STR "SURPRISE REMOVAL" +#define NOCAT_DEFAULT_TAG_VALUE_STR "prod" +#define NOCAT_DEFAULT_TDR_REASON_SRC_STR "KMD" +#define NOCAT_FBSIZETESTED 0x10 + +// Reducing size to 4K for reducing non-paged allocations on win8 +#define NVDUMP_DEBUGGER_BUFFER_SIZE (4 * 1024) + +#define JOURNAL_BUFFER_SIZE_DEFAULT (4 * 1024) + +#define JOURNAL_ASSERT_RECORD_QUALIFYING_STACK_ENTRIES 5 + +static void nvdDebuggerControlFunc(void); + +#if (defined(_WIN32) || defined(_WIN64) || defined(NV_UNIX)) && !defined(NV_MODS) +#if !defined(DEBUG) && !defined(QA_BUILD) +static NvBool rcdProbeGpuPresent(OBJGPU *pGpu, NvU64 ip); +static NvBool rcdProbeAllGpusPresent(NvU64 ip); +static volatile NvS32 probeGpuRecursion = 0; +#endif +#endif +static NvU32 _rcdbGetOcaRecordSizeWithHeader(Journal *pRcDB, RMCD_RECORD_TYPE type); +static volatile NvS32 concurrentRingBufferAccess = 0; +static volatile NvS32 assertListRecursion = 0; +static void rcdbFindRingBufferForType(Journal *pRcDB, RMCD_RECORD_TYPE recType, RING_BUFFER_LOG **ppRingBuffer); +static NV_STATUS _rcdbGetNocatJournalRecord(OBJRCDB* pRcdb, + NvU32 id, NvBool bExactMatch, + RmRCCommonJournal_RECORD** ppReturnedCommon, RM_NOCAT_JOURNAL_ENTRY** ppReturnedJournal); +static NV_STATUS _rcdbNocatReportAssert(OBJGPU* pGpu, RmRCCommonAssert_RECORD* pAssert); + +// Global flag to make sure we never re-enter the nvLog code. +#if defined(DEBUG) || defined(ASSERT_BUILD) || defined(QA_BUILD) || ((defined(_WIN32) || defined(_WIN64) || defined(NV_UNIX)) && !defined(NV_MODS)) +static volatile NvS32 nvLogRecursion = 0; +#endif + +// NvDump interface config - communicates with external kernel debuggers +NVDUMP_EXPORT volatile NV_DECLARE_ALIGNED(NVDUMP_CONFIG, 8) nvDumpConfig = +{ + NVDUMP_CONFIG_SIGNATURE, // sigHead + NvP64_NULL, // debuggerControlFuncAddr + { NvP64_NULL, NVDUMP_DEBUGGER_BUFFER_SIZE, 0 }, // buffer + 0, // gpuSelect + NVDUMP_COMPONENT_SYS_ALL, // component + NVDUMP_STATUS_IDLE, // dumpStatus + NV_OK, // rmStatus + + NVDUMP_CONFIG_SIGNATURE // sigTail +}; + +void +rcdbDestruct_IMPL(Journal *pRcDB) +{ + EVENT_JOURNAL *pJournal = &pRcDB->Journal; + + // Deallocate NvDebug debugger dump buffer. + if (nvDumpConfig.buffer.address != NvP64_NULL) + { + portMemFree(NvP64_VALUE(nvDumpConfig.buffer.address)); + nvDumpConfig.buffer.address = NvP64_NULL; + } + + // Delete Journal and Btree + if (pJournal->pBuffer != NULL) + { + portMemFree(pJournal->pBuffer); + portMemFree(pJournal->AssertList.ppList); + + // clear journal of anything + portMemSet(pJournal, 0, sizeof(EVENT_JOURNAL)); + } + + rcdbClearErrorHistory(pRcDB); + + rcdbDestroyRingBufferCollection(pRcDB); + + portMemFree(pRcDB->previousDriverVersion); + pRcDB->previousDriverVersion = NULL; + + portMemFree(pRcDB->previousDriverBranch); + pRcDB->previousDriverBranch = NULL; +} + +static void +_initJournal(EVENT_JOURNAL *pJournal, NvU32 size) +{ + // verify we are not abandoning any memory allocations. + NV_ASSERT(NULL == pJournal->pBuffer); + NV_ASSERT(NULL == (NvU8*) pJournal->AssertList.ppList); + + // init the Journal to an empty buffer. + pJournal->pBuffer = NULL; + pJournal->BufferSize = 0; + pJournal->pFree = pJournal->pBuffer; + pJournal->BufferRemaining = pJournal->BufferSize; + pJournal->pCurrCollection = NULL; + pJournal->RecordCount = 0; + + // init the assert list to an empty buffer. + pJournal->AssertList.ppList = NULL; + pJournal->AssertList.Size = 0; + pJournal->AssertList.Count = 0; + pJournal->AssertList.QualifyingStackSize = JOURNAL_ASSERT_RECORD_QUALIFYING_STACK_ENTRIES; + + // allocate and initialize journal buffer memory + pJournal->pBuffer = portMemAllocNonPaged(size); + if (pJournal->pBuffer != NULL ) + { + pJournal->BufferSize = size; + pJournal->pFree = pJournal->pBuffer; + pJournal->BufferRemaining = pJournal->BufferSize; + + // if the journal is large enough to hold at least one assert record, + // init the assert list as well. + if (sizeof(RmRCCommonAssert_RECORD) <= pJournal->BufferSize) + { + pJournal->AssertList.Size = pJournal->BufferSize / sizeof(RmRCCommonAssert_RECORD); + pJournal->AssertList.ppList = portMemAllocNonPaged(pJournal->AssertList.Size * sizeof(pJournal->AssertList.ppList[0])); + if (pJournal->AssertList.ppList == NULL ) + { + NV_PRINTF(LEVEL_ERROR, + "Failure to allocate RC assert tracking buffer \n"); + pJournal->AssertList.Size = 0; + } + } + } + else + { + NV_PRINTF(LEVEL_ERROR, "Failure to allocate RC journal buffer \n"); + } +} + +NV_STATUS +rcdbConstruct_IMPL(Journal *pRcDB) +{ + EVENT_JOURNAL *pJournal = &pRcDB->Journal; + RING_BUFFER_LOG_COLLECTION *pRingBufferColl = &pRcDB->RingBufferColl; + NvU32 i; + void *pBuffer; + + _initJournal(pJournal, JOURNAL_BUFFER_SIZE_DEFAULT); + + portMemSet(pRingBufferColl, 0x00, sizeof(pRcDB->RingBufferColl)); + + pRcDB->BugcheckCount = 0; + + // Allocate NvDebug debugger dump buffer. + pBuffer = portMemAllocNonPaged(nvDumpConfig.buffer.size); + if (pBuffer != NULL) + { + nvDumpConfig.buffer.address = NV_SIGN_EXT_PTR_TO_NvP64(pBuffer); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "failed to allocate NVD debugger dump buffer\n"); + } + + // Initialize NvDebug debugger function address. + nvDumpConfig.debuggerControlFuncAddr = NV_SIGN_EXT_PTR_TO_NvP64(nvdDebuggerControlFunc); + + // + // Create RC Diagnostic report Wrap Buffer + // + if (NULL == rcdbCreateRingBuffer(pRcDB, RmRcDiagReport, MAX_RCDB_RCDIAG_WRAP_BUFF)) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate RC Diagnostic Ring Buffer\n"); + } + // init the RC error report data + pRcDB->RcErrRptNextIdx = 0; + pRcDB->RcErrRptRecordsDropped = NV_FALSE; + + // Initialize RC Error Counters. + for ( i = 0 ; i < MAX_RC_ERROR_COUNTER ; i++) + { + pRcDB->rcErrorCounterArray[i].rcErrorType = RC_ERROR_COUNTER_TYPE_INVALID; + pRcDB->rcErrorCounterArray[i].rcErrorCount = 0; + pRcDB->rcErrorCounterArray[i].rcLastCHID = INVALID_CHID; + pRcDB->rcErrorCounterArray[i].rcLastTime = 0; + } + pRcDB->rcErrorCounterArray[RC_ERROR_COUNTER_OTHER_INDEX].rcErrorType = RC_ERROR_COUNTER_OTHER_TYPE; + + // clear the Nocat Queue descriptors & counters + portMemSet(&pRcDB->nocatJournalDescriptor, 0x00, NV_SIZEOF32(pRcDB->nocatJournalDescriptor)); + portMemSet(pRcDB->nocatJournalDescriptor.lastRecordId, 0xff, NV_SIZEOF32(pRcDB->nocatJournalDescriptor.lastRecordId)); + pRcDB->nocatJournalDescriptor.nocatLastRecordType = NV2080_NOCAT_JOURNAL_REC_TYPE_UNKNOWN; + pRcDB->nocatJournalDescriptor.cacheFreshnessPeriodticks = NOCAT_CACHE_FRESHNESS_PERIOD_MS; + pRcDB->nocatJournalDescriptor.cacheFreshnessPeriodticks *= osGetTimestampFreq(); + pRcDB->nocatJournalDescriptor.cacheFreshnessPeriodticks /= 1000ULL; + + // + // Create NOCAT report Wrap Buffer + // + if (NULL == rcdbCreateRingBuffer(pRcDB, RmNocatReport, MAX_RCDB_NOCAT_WRAP_BUFF)) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate NOCAT Ring Buffer\n"); + } + + return NV_OK; +} + +// +// Retrieve the previous driver version from volatile registry entires +// and then save the current driver version for next time. +// +NV_STATUS rcdbSavePreviousDriverVersion_IMPL +( + OBJGPU *pGpu, + Journal *pRcDB +) +{ + NV_STATUS nvStatus = NV_OK; + + NvU32 regEntrySize = 0; + NvU32 changeListNum = NV_LAST_OFFICIAL_CHANGELIST_NUM; + + // Only run this code only once each time the driver is loaded. + if (pRcDB->bPrevDriverCodeExecuted) + return NV_OK; + + pRcDB->bPrevDriverCodeExecuted = NV_TRUE; + + // + // Get the previous driver version information + // from volatile registry settings. + // + nvStatus = osReadRegistryVolatileSize(pGpu, + NV_REG_STR_RM_RC_PREV_DRIVER_VERSION, ®EntrySize); + + // Early exit if this platform does not support volatile registry. + if (nvStatus == NV_ERR_NOT_SUPPORTED) + return NV_OK; + + if ((NV_OK == nvStatus) && (0 != regEntrySize)) + { + // + // Previous driver version is there, so assume all previous driver + // information is there as well. + // + pRcDB->previousDriverVersion = portMemAllocNonPaged(regEntrySize + 1); + if (pRcDB->previousDriverVersion == NULL) + { + nvStatus = NV_ERR_NO_MEMORY; + DBG_BREAKPOINT(); + goto rcdbSavePreviousDriverVersion_writeRegistry; + } + + nvStatus = osReadRegistryVolatile(pGpu, + NV_REG_STR_RM_RC_PREV_DRIVER_VERSION, + (NvU8 *)pRcDB->previousDriverVersion, + regEntrySize); + if (nvStatus != NV_OK) + { + DBG_BREAKPOINT(); + goto rcdbSavePreviousDriverVersion_writeRegistry; + } + pRcDB->previousDriverVersion[regEntrySize] = 0; + + nvStatus = osReadRegistryVolatileSize(pGpu, + NV_REG_STR_RM_RC_PREV_DRIVER_BRANCH, ®EntrySize); + if ((nvStatus != NV_OK) || (0 == regEntrySize)) + { + DBG_BREAKPOINT(); + goto rcdbSavePreviousDriverVersion_writeRegistry; + } + + pRcDB->previousDriverBranch = portMemAllocNonPaged(regEntrySize + 1); + if (pRcDB->previousDriverBranch == NULL) + { + nvStatus = NV_ERR_NO_MEMORY; + DBG_BREAKPOINT(); + goto rcdbSavePreviousDriverVersion_writeRegistry; + } + + nvStatus = osReadRegistryVolatile(pGpu, + NV_REG_STR_RM_RC_PREV_DRIVER_BRANCH, + (NvU8 *)pRcDB->previousDriverBranch, + regEntrySize); + if (nvStatus != NV_OK) + { + DBG_BREAKPOINT(); + goto rcdbSavePreviousDriverVersion_writeRegistry; + } + pRcDB->previousDriverBranch[regEntrySize] = 0; + + nvStatus = osReadRegistryVolatile(pGpu, + NV_REG_STR_RM_RC_PREV_DRIVER_CHANGELIST, + (NvU8 *)&pRcDB->prevDriverChangelist, + sizeof(pRcDB->prevDriverChangelist)); + if (nvStatus != NV_OK) + { + DBG_BREAKPOINT(); + goto rcdbSavePreviousDriverVersion_writeRegistry; + } + + nvStatus = osReadRegistryVolatile(pGpu, + NV_REG_STR_RM_RC_PREV_DRIVER_LOAD_COUNT, + (NvU8 *)&pRcDB->driverLoadCount, + sizeof(pRcDB->driverLoadCount)); + if (nvStatus != NV_OK) + { + DBG_BREAKPOINT(); + goto rcdbSavePreviousDriverVersion_writeRegistry; + } + } + + // Always write out the driver info, even if there was an error reading it. +rcdbSavePreviousDriverVersion_writeRegistry: + pRcDB->driverLoadCount++; + + osWriteRegistryVolatile(pGpu, + NV_REG_STR_RM_RC_PREV_DRIVER_VERSION, + (NvU8 *)NV_VERSION_STRING, + sizeof(NV_VERSION_STRING)); + + osWriteRegistryVolatile(pGpu, + NV_REG_STR_RM_RC_PREV_DRIVER_BRANCH, + (NvU8 *)NV_BUILD_BRANCH_VERSION, + sizeof(NV_BUILD_BRANCH_VERSION)); + + osWriteRegistryVolatile(pGpu, + NV_REG_STR_RM_RC_PREV_DRIVER_CHANGELIST, + (NvU8 *)&changeListNum, + sizeof(changeListNum)); + + osWriteRegistryVolatile(pGpu, + NV_REG_STR_RM_RC_PREV_DRIVER_LOAD_COUNT, + (NvU8 *)&pRcDB->driverLoadCount, + sizeof(pRcDB->driverLoadCount)); + + return nvStatus; +} + +NV_STATUS rcdbAddAssertJournalRecWithLine(void *pVoidGpu, NvU32 lineNum, void** ppRec, NvU8 jGroup, NvU8 type, NvU16 size, NvU32 level, NvU64 key) +{ + OBJSYS *pSys; + Journal *pRcDB; + OBJGPU *pPossibleNULLGpu; + JOURNAL_ASSERT_LIST *pAssertList; + RmRCCommonAssert_RECORD newAssertRec; + RmRCCommonAssert_RECORD *pAssertRec; + NV_STATUS rmStatus = NV_ERR_GENERIC; + NvU32 i; + + // + // Note: we allow NULL pGpu here, as many clients (such as KMD) + // do not have access to pGpu. And much of the RM does not provide this either. + // + pPossibleNULLGpu = reinterpretCast(pVoidGpu, OBJGPU *); + + pSys = SYS_GET_INSTANCE(); + if (!pSys) + { + return NV_ERR_INVALID_STATE; + } + + pRcDB = SYS_GET_RCDB(pSys); + if (!pRcDB) + { + return NV_ERR_INVALID_STATE; + } + + pAssertList = &pRcDB->Journal.AssertList; + + *ppRec = NULL; + + RMTRACE_PROBE4_PRIMTYPE(rcjournal, assertlog, NvU32, (pPossibleNULLGpu ? pPossibleNULLGpu->gpuId : 0), NvU8, type, NvU32, level, NvU64, key); + + // create a local instance of the Assert record. + portMemSet(&newAssertRec, 0x00, sizeof(newAssertRec)); + rcdbSetCommonJournalRecord(pPossibleNULLGpu, &newAssertRec.common); + newAssertRec.count = 1; + newAssertRec.breakpointAddrHint = key; + newAssertRec.lineNum = lineNum; + + if (pRcDB->getProperty(pRcDB, PDB_PROP_RCDB_COMPRESS)) + { + // search for a pre-existing assert record with the same stack + for (i = 0; i < pAssertList->Count; ++i) + { + pAssertRec = pAssertList->ppList[i]; + if ((newAssertRec.breakpointAddrHint == pAssertRec->breakpointAddrHint) && + (0 == portMemCmp(newAssertRec.callStack, pAssertRec->callStack, + sizeof(newAssertRec.callStack[0]) * pAssertList->QualifyingStackSize))) + { + pAssertRec->count++; + pAssertRec->lastTimeStamp = newAssertRec.common.timeStamp; + + rmStatus = NV_OK; + break; + } + } + } + + if (rmStatus != NV_OK) + { + // Discard to avoid reentry from messing up record array. + if (portAtomicIncrementS32(&assertListRecursion) == 1) + { + rmStatus = rcdbAllocNextJournalRec(pRcDB, (NVCD_RECORD **)&pAssertRec, jGroup, type, size); + if (NV_OK == rmStatus) + { + // the Header is filled in when the record is allocated, so update the local instance header. + newAssertRec.common.Header = pAssertRec->common.Header; + *pAssertRec = newAssertRec; + if (pAssertList->Count < pAssertList->Size) + { + pAssertList->ppList[pAssertList->Count] = pAssertRec; + ++(pAssertList->Count); + } + else + { + // based on the way the assert list size is calculated this should never happen.... + NV_PRINTF(LEVEL_ERROR, + "failed to insert tracking for assert record\n"); + } + } + } + portAtomicDecrementS32(&assertListRecursion); + } + + if (rmStatus == NV_OK) + { + RMTRACE_RMJOURNAL(_ASSERTLOG, (pPossibleNULLGpu ? pPossibleNULLGpu->gpuId : RMTRACE_UNKNOWN_GPUID), + type, + jGroup, + key, + pAssertRec->count, + pAssertRec->common.timeStamp, + pAssertRec->lastTimeStamp); + *ppRec = pAssertRec; + + _rcdbNocatReportAssert(pPossibleNULLGpu, pAssertRec); + } + else + { + _rcdbNocatReportAssert(pPossibleNULLGpu, &newAssertRec); + } + + return rmStatus; +} + +NV_STATUS rcdbAddAssertJournalRec(void *pVoidGpu, void** ppRec, NvU8 jGroup, NvU8 type, NvU16 size, NvU32 level, NvU64 key) +{ + return rcdbAddAssertJournalRecWithLine(pVoidGpu, NV_RM_ASSERT_UNKNOWN_LINE_NUM, ppRec, jGroup, type, size, level, key); +} +// Populate stateMask with flags that represent the power state and other useful things. +static NvU64 _getCommonJournalStateMask(OBJGPU *pGpu) +{ + return 0; +} + +// Fill in the common portion of the journal structure. +void +rcdbSetCommonJournalRecord +( + OBJGPU *pGpu, + RmRCCommonJournal_RECORD *pRec +) +{ + OS_THREAD_HANDLE threadId; + + pRec->timeStamp = osGetTimestamp(); + pRec->GPUTag = 0; + pRec->CPUTag = 0; + pRec->stateMask = 0; + + if (pGpu) + { + pRec->GPUTag = pGpu->gpuId; + pRec->stateMask = _getCommonJournalStateMask(pGpu); + } + + if (NV_OK == osGetCurrentThread(&threadId)) + { + pRec->CPUTag = (NvU64)threadId; + } +} + +NV_STATUS +rcdbAddBugCheckRec_IMPL +( + OBJGPU *pGpu, + Journal *pRcDB, + NvU32 bugCheckCode +) +{ + RmJournalBugcheck_RECORD *pRec; + NV_STATUS rmStatus; + + rmStatus = rcdbAllocNextJournalRec(pRcDB, + (NVCD_RECORD **)&pRec, + RmGroup, + RmJournalBugCheck, + sizeof(*pRec)); + if (NV_OK == rmStatus) + { + rcdbSetCommonJournalRecord(pGpu, &pRec->common); + pRec->bugCheckCode = bugCheckCode; + } + + pRcDB->BugcheckCount++; + + return rmStatus; +} + +NV_STATUS +rcdbAddPowerStateRec_IMPL +( + OBJGPU *pGpu, + Journal *pRcDB, + NvU32 powerEvent, + NvU32 state, + NvU32 fastBootPowerState +) +{ + RmPowerState_RECORD newRmDiagWrapBuffRec; + + // Create Records, then write it. + newRmDiagWrapBuffRec.powerState = state; + newRmDiagWrapBuffRec.powerEvent = powerEvent; + newRmDiagWrapBuffRec.fastBootPowerState = fastBootPowerState; + rcdbAddRecToRingBuffer(pGpu, pRcDB, RmPowerState, + sizeof(RmPowerState_RECORD), (NvU8 *)&newRmDiagWrapBuffRec); + return NV_OK; +} + +NV_STATUS +rcdbGetRcDiagRecBoundaries_IMPL +( + Journal *pRcDB, + NvU16 *pStart, + NvU16 *pEnd, + NvU32 owner, + NvU32 processId +) +{ + NV_STATUS status = NV_ERR_MISSING_TABLE_ENTRY; + RmRCCommonJournal_RECORD *pCommon; + RmRcDiag_RECORD *pRecord = NULL; + RING_BUFFER_LOG *pRingBuffer = NULL; + NvU32 i; + NvU16 logicalStartIdx; + NvU16 start = 0; + NvU16 end = 0; + NvBool foundStart = NV_FALSE; + NvBool foundEnd = NV_FALSE; + + // scan the buffer to find all the qualified records & return the + // first & last indicies of the qualified records found. + + // Get the Diag Report Ring buffer. + rcdbFindRingBufferForType(pRcDB, RmRcDiagReport, &pRingBuffer); + + // attempt to claim ownership + if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1) + { + // get the logical start of the buffer. + logicalStartIdx = pRingBuffer->headIndex; + + // run thru all the entries in the buffer, start to end, until we find the start & end of the range we are looking for. + for (i = 0; i < pRingBuffer->numEntries; ++i) + { + // get a pointer to the record from the buffer. + pCommon = (RmRCCommonJournal_RECORD *)(((NvU8 *)pRingBuffer->pBuffer) + (_rcdbGetOcaRecordSizeWithHeader(pRcDB, RmRcDiagReport) * ((logicalStartIdx + i) % pRingBuffer->maxEntries))); + pRecord = (RmRcDiag_RECORD*) &(pCommon[1]); + + // check to see if the record qualifies + if (((RCDB_RCDIAG_DEFAULT_OWNER != owner) && (pRecord->owner != owner) && (NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_OWNER_ID != owner)) + || ((NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_PROCESS_ID != processId) && (pRecord->processId != processId))) + { + continue; + } + switch (foundStart) + { + case NV_FALSE: + // check if this is a start record. + // we want the first record to be a start record to insure that all the reports that are in the range are complete + // (I.E. we didn't wrap over the first record of a report) + if (0 != (pRecord->flags & NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_FIRST)) + { + // yes save the idx as the first Idx, & note that we found the start of the range. + start = pRecord->idx; + foundStart = NV_TRUE; + } + // fall thru to check if the start of the report is also the end of the report. + + case NV_TRUE: + // check if this is an end record. + // we want the last record in the range to be an end record to insure that all the reports that are in the range are complete + // (Note -- in the case of end records, this should only be an issue if we are interrupting the collection of a report) + if (0 != (pRecord->flags & NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_POS_LAST)) + { + // save the idx as the last idx & continue scanning until we have checked all the records. + // the last idx saved will be the last idx. + end = pRecord->idx; + foundEnd = foundStart; + } + break; + + default: // should not happen.... + return NV_ERR_GENERIC; + break; + } + } + // checking end is sufficient, because end can't be set w/o start being set first. + if (foundEnd) + { + // we found a complete range, mark us as succeeding. + status = NV_OK; + + // pass up the results. + if (NULL != pEnd) + { + *pEnd = end; + } + if (NULL != pStart) + { + *pStart = start; + } + } + } + else + { + // the buffer is currently busy. + status = NV_ERR_BUSY_RETRY; + } + portAtomicDecrementS32(&concurrentRingBufferAccess); + return status; +} + +NV_STATUS +rcdbAddRcDiagRec_IMPL +( + OBJGPU *pGpu, + Journal *pRcDB, + RmRcDiag_RECORD *pRmDiagWrapBuffRec +) +{ + NvU32 usec; + + // Create Records, then write it. + pRmDiagWrapBuffRec->idx = (pRcDB->RcErrRptNextIdx)++; + if (MAX_RCDB_RCDIAG_ENTRIES < pRmDiagWrapBuffRec->count) + { + NV_ASSERT_FAILED("Diag report to large for buffer"); + pRmDiagWrapBuffRec->data[MAX_RCDB_RCDIAG_ENTRIES - 1].offset = 0; + pRmDiagWrapBuffRec->data[MAX_RCDB_RCDIAG_ENTRIES - 1].tag = NV0000_CTRL_CMD_NVD_RCERR_RPT_REG_OVERFLOWED; + pRmDiagWrapBuffRec->data[MAX_RCDB_RCDIAG_ENTRIES - 1].value = pRmDiagWrapBuffRec->count - MAX_RCDB_RCDIAG_ENTRIES + 1; + pRmDiagWrapBuffRec->count = MAX_RCDB_RCDIAG_ENTRIES; + } + osGetCurrentTime(&(pRmDiagWrapBuffRec->timeStamp), &usec); + + rcdbAddRecToRingBuffer(pGpu, pRcDB, RmRcDiagReport, + sizeof(RmRcDiag_RECORD), (NvU8 *) pRmDiagWrapBuffRec); + + pRcDB->RcErrRptRecordsDropped |= pRcDB->RcErrRptNextIdx >= MAX_RCDB_RCDIAG_WRAP_BUFF; + return NV_OK; +} + +NV_STATUS +_rcdbInternalGetRcDiagRec +( + Journal *pRcDB, + NvU16 reqIdx, + RmRCCommonJournal_RECORD **ppRmDiagWrapBuffRec, + NvU32 owner, + NvU32 processId +) +{ + RmRCCommonJournal_RECORD *pCommon; + RmRcDiag_RECORD* pRecord = NULL; + NV_STATUS status = NV_ERR_INVALID_INDEX; + RING_BUFFER_LOG *pRingBuffer = NULL; + + NvU32 i; + + // assume we will fail. + *ppRmDiagWrapBuffRec = NULL; + + // Find the ring buffer for the diag reports + rcdbFindRingBufferForType(pRcDB, RmRcDiagReport, &pRingBuffer); + + // is the requested record in the buffer? + if ((NvU16)(pRcDB->RcErrRptNextIdx - reqIdx) <= pRingBuffer->numEntries) + { + // calculate the location of the record. + // find the record just past the last record in the buffer. to use as the initial offset. + i = pRingBuffer->headIndex + pRingBuffer->numEntries; + + // subtract off the diff between the next idx to be used & the requested idx. + i -= pRcDB->RcErrRptNextIdx - reqIdx; + + // wrap the offset to the size of the buffer. + i %= pRingBuffer->maxEntries; + + // get a pointer to the record from the buffer. + pCommon = (RmRCCommonJournal_RECORD *)(((NvU8 *)pRingBuffer->pBuffer) + (_rcdbGetOcaRecordSizeWithHeader(pRcDB, RmRcDiagReport) * i)); + pRecord = (RmRcDiag_RECORD*) &(pCommon[1]); + + // verify we have the record that was requested. + NV_ASSERT_OR_RETURN(pRecord->idx == reqIdx, NV_ERR_INVALID_INDEX); + + // we found the requested Index, + // check to see if the record qualifies + if (((RCDB_RCDIAG_DEFAULT_OWNER == owner) || (pRecord->owner == owner) || (NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_OWNER_ID == owner)) + && ((NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_PROCESS_ID == processId) || (pRecord->processId == processId))) + { + // combination of ANY_OWNER_ID && ANY_PROCESS_ID is not valid + if (NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_OWNER_ID == owner && NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_PROCESS_ID == processId) + { + status = NV_ERR_INSUFFICIENT_PERMISSIONS; + goto exit; + } + // we found a record that fully qualifies + *ppRmDiagWrapBuffRec = pCommon; + status = NV_OK; + } + else + { + // we found the record, but it does not pass the filter. + status = NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } +exit: + return status; +} +NV_STATUS +rcdbGetRcDiagRec_IMPL +( + Journal *pRcDB, + NvU16 reqIdx, + RmRCCommonJournal_RECORD **ppRmDiagWrapBuffRec, + NvU32 owner, + NvU32 processId +) +{ + NV_STATUS status = NV_ERR_INVALID_INDEX; + + if (ppRmDiagWrapBuffRec == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + // assume we will fail. + *ppRmDiagWrapBuffRec = NULL; + + if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1) + { + status = _rcdbInternalGetRcDiagRec(pRcDB, reqIdx, ppRmDiagWrapBuffRec, owner, processId); + } + portAtomicDecrementS32(&concurrentRingBufferAccess); + return status; +} + +// +// The function to set context data for all the RmRcDiag_RECORDs in a specified range +// +NV_STATUS +rcdbUpdateRcDiagRecContext_IMPL +( + Journal *pRcDB, + NvU16 rangeStartIdx, + NvU16 rangeEndIdx, + NvU32 processId, + NvU32 owner +) +{ + RmRCCommonJournal_RECORD *pCommon = NULL; + RmRcDiag_RECORD* pRecord = NULL; + NV_STATUS status = NV_OK; + NV_STATUS recStatus = NV_ERR_OUT_OF_RANGE; + + NvU16 i; + + // go from the start index thru the end index. + // note we use != because the indicies will wrap. + for (i = rangeStartIdx; i != (NvU16)(rangeEndIdx + 1U); i++) + { + recStatus = rcdbGetRcDiagRec(pRcDB, i, &pCommon, RCDB_RCDIAG_DEFAULT_OWNER, NV0000_CTRL_CMD_NVD_RCERR_RPT_ANY_PROCESS_ID); + if (NV_OK != recStatus) + { + // something went wrong, + // record the status & skip this record. + status = recStatus; + continue; + } + // get the pointer to the diag record. + pRecord = (RmRcDiag_RECORD*) &(pCommon[1]); + + pRecord->owner = owner; + pRecord->processId = processId; + } + return status; +} + +// +// size must include NVCD_RECORD size too +// +NV_STATUS rcdbAllocNextJournalRec_IMPL(Journal *pRcDB, NVCD_RECORD** ppRec, NvU8 jGroup, NvU8 type, NvU16 size) +{ + EVENT_JOURNAL *pJournal = &pRcDB->Journal; + + if ( ppRec == NULL ) + return NV_ERR_GENERIC; + + if ( pJournal->pBuffer == NULL || pJournal->BufferSize == 0 ) + return NV_ERR_GENERIC; + + if ( size == 0 || pJournal->BufferRemaining < size ) + { + return NV_ERR_GENERIC; + } + + *ppRec = (NVCD_RECORD*)(pJournal->pFree); + + (*ppRec)->cRecordGroup = jGroup; + (*ppRec)->cRecordType = type; + (*ppRec)->wRecordSize = size; + + if ( pJournal->pCurrCollection ) + { + pJournal->pCurrCollection->NumRecords++; + pJournal->pCurrCollection->Header.wRecordSize += size; + } + else + { + // standalone record (not part of collection) - increase total count + pJournal->RecordCount++; + } + + pJournal->pFree += size; + pJournal->BufferRemaining -= size; + + return NV_OK; +} + +NV_STATUS rcdbClearErrorHistory_IMPL(Journal *pRcDB) +{ + SYS_ERROR_INFO *pSysErrorInfo = &pRcDB->ErrorInfo; + RMFIFOERRORELEMENT_V3* pFifoErrorInfo; + RMFIFOERRORELEMENT_V3* pFreeErrorInfo; + + // Wait until any errors currently being reported are complete + while (!portAtomicCompareAndSwapU32(&pSysErrorInfo->InUse, 1, 0)) + { + // We're not going to sleep, but safe to sleep also means safe to spin.. + NV_ASSERT_OR_RETURN(portSyncExSafeToSleep(), NV_ERR_INVALID_STATE); + portUtilSpin(); + } + + pFifoErrorInfo = (RMFIFOERRORELEMENT_V3*) pSysErrorInfo->pErrorList; + while (NULL != pFifoErrorInfo) + { + pFreeErrorInfo = pFifoErrorInfo; + pFifoErrorInfo = pFifoErrorInfo->ErrorHeader.pNextError; + rcdbDeleteErrorElement(pRcDB, pFreeErrorInfo); + } + + pSysErrorInfo->ErrorCount = 0x0; + pSysErrorInfo->LogCount = 0x0; + pSysErrorInfo->pErrorList = NULL; + + portAtomicSetU32(&pSysErrorInfo->InUse, 0); + return NV_OK; +} + + +NV_STATUS rcdbDeleteErrorElement_IMPL(Journal *pRcDB, void *pDelete) +{ + RMFIFOERRORELEMENT_V3* pFifoDelete = (RMFIFOERRORELEMENT_V3*)pDelete; + RMCD_ERROR_BLOCK* pErrorBlock; + RMCD_ERROR_BLOCK* pOldErrorBlock; + + // Free Additional Error Block + for (pErrorBlock = pFifoDelete->ErrorHeader.pErrorBlock; pErrorBlock != NULL;) + { + pOldErrorBlock = pErrorBlock; + pErrorBlock = pErrorBlock->pNext; + portMemFree(pOldErrorBlock->pBlock); + portMemFree(pOldErrorBlock); + } + + // Free Error Collector + portMemFree(pFifoDelete); + + return NV_OK; +} + +// Frees up the all the ring buffers +void rcdbDestroyRingBufferCollection_IMPL(Journal *pRcDB) +{ + RING_BUFFER_LOG_COLLECTION *pRingBufferColl = &pRcDB->RingBufferColl; + NvU32 i; + RING_BUFFER_LOG* pCurrentBuffer = pRingBufferColl->pFirstEntry; + + for (i = 0; i < pRingBufferColl->NumRingBuffers; i++) + { + RING_BUFFER_LOG* pTempCurrentBuffer = pCurrentBuffer; + + NV_ASSERT(pCurrentBuffer != NULL); + NV_ASSERT(pCurrentBuffer->pBuffer != NULL); + + portMemFree(pCurrentBuffer->pBuffer); + + pCurrentBuffer = pCurrentBuffer->pNextRingBuffer; + + // Free the current ring buffer entry. + portMemFree(pTempCurrentBuffer); + } + + // pCurrentBuffer should be NULL if our accounting of NumEntries is correct + NV_ASSERT(pCurrentBuffer == NULL); + + portMemSet(pRingBufferColl, 0x00, sizeof(*pRingBufferColl)); +} + + +static NvU32 _rcdbInsertJournalRecordToList (RmRCCommonJournal_RECORD *pList, RmRCCommonJournal_RECORD *pRecord); +static void _rcdbDumpCommonJournalRecord(PRB_ENCODER *pPrbEnc,const PRB_FIELD_DESC *pFieldDesc,PRmRCCommonJournal_RECORD pRec); + +/*! + * @brief Initialize the GPU accessible flag + * + * @param[in] pGPU + * @param[in] pRcDB + * + * @return NV_OK + */ +NV_STATUS +rcdbDumpInitGpuAccessibleFlag_IMPL +( + OBJGPU *pGpu, + Journal *pRcDB +) +{ + pRcDB->nvDumpState.bGpuAccessible = + pRcDB->nvDumpState.bRMLock && + !pGpu->bIsSOC && + !IS_VIRTUAL(pGpu) && + gpuIsGpuFullPower(pGpu) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST); + + // The GPU should be there... but make sure. + if (pRcDB->nvDumpState.bGpuAccessible) + { + if (GPU_REG_RD32(pGpu, NV_PMC_BOOT_0) != pGpu->chipId0) + { + pRcDB->nvDumpState.bGpuAccessible = NV_FALSE; + } + } + + return NV_OK; +} + +/*! + * @brief Performs a dump of the specified system component into the given buffer. + * + * @param[in] pSys The system object + * @param[in] component NVDUMP_IS_SYS_COMPONENT(component) must be true. + * @param[in, out] pBuffer Buffer to populate with dump results + * @param[in] policy Policy for buffer allocation: use this one, allocate one or count + * @param[in, out] pBufferCallback Callback function for use with fixed-sized buffer encoding. + * If this is NULL then pBuffer->size is assumed to be large + * enough for the whole dump. Otherwise pBufferCallback is called + * when the buffer is full or when a message ends, allowing the + * the callback to construct the whole buffer piece by piece. + * + * @return NV_OK on success and specific error status on failure + */ +NV_STATUS +rcdbDumpComponent_IMPL +( + OBJRCDB *pRcDB, + NvU32 component, + NVDUMP_BUFFER *pBuffer, + NVDUMP_BUFFER_POLICY policy, + PrbBufferCallback *pBufferCallback +) +{ + NVD_STATE *pNvDumpState = &pRcDB->nvDumpState; + void *pBuff; + PRB_ENCODER encoder; + NV_STATUS status = NV_OK; + NvU8 startingDepth; + + // Validate arguments. + NV_ASSERT_OR_RETURN(pBuffer != NULL, NV_ERR_INVALID_ARGUMENT); + + // Make sure we were not reentered. + if (pNvDumpState->bDumpInProcess) + return NV_ERR_STATE_IN_USE; + + // Initialize dump state. + pNvDumpState->bDumpInProcess = NV_TRUE; + pNvDumpState->bugCheckCode = 0; + pNvDumpState->internalCode = NVD_ERROR_CODE(NVD_EXTERNALLY_GENERATED, 0); + pNvDumpState->bRMLock = rmApiLockIsOwner(); + pNvDumpState->bGpuAccessible = NV_FALSE; + pNvDumpState->initialbufferSize = pBuffer->size; + pNvDumpState->nvDumpType = NVD_DUMP_TYPE_API; + + // Clear dump buffer. + pBuffer->curNumBytes = 0; + + // Start encoding protobuf dump message. + switch (policy) + { + case NVDUMP_BUFFER_PROVIDED: + prbEncStart(&encoder, NVDEBUG_NVDUMP, NvP64_VALUE(pBuffer->address), + pBuffer->size, pBufferCallback); + break; + case NVDUMP_BUFFER_ALLOCATE: + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + prbEncStartAlloc(&encoder, NVDEBUG_NVDUMP, + pBuffer->size, pBufferCallback)); + break; + case NVDUMP_BUFFER_COUNT: + prbEncStartCount(&encoder, NVDEBUG_NVDUMP, NVDUMP_MAX_DUMP_SIZE); + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + startingDepth = prbEncNestingLevel(&encoder); + + switch (component) + { + case NVDUMP_COMPONENT_SYS_RCDB: + { + NV_CHECK_OK(status, LEVEL_ERROR, + rcdbDumpSystemFunc(pRcDB, &encoder, pNvDumpState)); + break; + } + case NVDUMP_COMPONENT_SYS_SYSINFO: + { + NV_CHECK_OK(status, LEVEL_ERROR, + rcdbDumpSystemInfo(pRcDB, &encoder, pNvDumpState)); + break; + } + case NVDUMP_COMPONENT_SYS_ALL: + { + NV_CHECK_OK(status, LEVEL_ERROR, + rcdbDumpSystemInfo(pRcDB, &encoder, pNvDumpState)); + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, LEVEL_ERROR, + rcdbDumpSystemFunc(pRcDB, &encoder, pNvDumpState)); + break; + } + default: + { + NV_PRINTF(LEVEL_ERROR, + "called with invalid component %u selected.\n", + component); + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, LEVEL_ERROR, + prbEncUnwindNesting(&encoder, startingDepth)); + + { + NvU32 gpu; + OBJGPU *pGpu; + + for (gpu = 0; gpu < NV_MAX_DEVICES; gpu++) + { + pGpu = gpumgrGetGpu(gpu); + + if ((pGpu != NULL) && IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_DUMP_PROTOBUF_COMPONENT(pGpu, status, &encoder, + pNvDumpState, component); + + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, LEVEL_ERROR, + prbEncUnwindNesting(&encoder, startingDepth)); + } + } + } + + // Finish encoding protobuf dump message. + pBuffer->curNumBytes = prbEncFinish(&encoder, &pBuff); + pBuffer->address = NV_SIGN_EXT_PTR_TO_NvP64(pBuff); + pNvDumpState->bDumpInProcess = NV_FALSE; + + return status; +} + +static NV_STATUS +_rcdbGetTimeInfo +( + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState, + const PRB_FIELD_DESC *pFieldDesc +) +{ + NvU64 timeSinceBoot; + NvU32 sec; + NvU32 usec; + NV_STATUS nvStatus = NV_OK; + NvU8 startingDepth = prbEncNestingLevel(pPrbEnc); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, pFieldDesc)); + + prbEncAddUInt64(pPrbEnc, + NVDEBUG_SYSTEMINFO_TIMEINFO_TIMESTAMP_FREQ, + osGetTimestampFreq()); + + // Add Timestamp + prbEncAddUInt64(pPrbEnc, + NVDEBUG_SYSTEMINFO_TIMEINFO_TIMESTAMP_DUMP, + osGetTimestamp()); + osGetCurrentTime(&sec, &usec); + prbEncAddUInt64(pPrbEnc, + NVDEBUG_SYSTEMINFO_TIMEINFO_SYSTEM_TIME_DUMP, + (NvU64)sec * 1000000 + usec); + + // Add time since boot in seconds. + osGetCurrentTick(&timeSinceBoot); + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_TIMEINFO_TIME_SINCE_BOOT_SEC, + (NvU32)(timeSinceBoot / 1000000000ULL)); + + // Unwind the protobuf to the correct depth. + NV_CHECK_OK(nvStatus, LEVEL_ERROR, + prbEncUnwindNesting(pPrbEnc, startingDepth)); + + return nvStatus; +} + +static const char * GPU_NA_UUID = "N/A"; + +NV_STATUS +rcdbDumpSystemInfo_IMPL +( + OBJRCDB *pRcDB, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState +) +{ + OBJGPU *pGpu; + NvU8 *pGidString; + NvU32 gpu; + NvU32 numGpus; + NvU32 gidStrlen; + NvU32 sizeStr; + NV_STATUS nvStatus = NV_OK; + NvBool bRelease; + NvU8 startingDepth = prbEncNestingLevel(pPrbEnc); + + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + OBJGPU *pParent; + NvU32 gpuIndex; + NvU32 gpuMask; + NvBool bGpuDone[NV_MAX_DEVICES]; + + // All of this stuff should run OK even without the RM lock. + // No need to check pRcDB->nvDumpState.bNoRMLock; + + switch (DRF_VAL(_NVD, _ERROR_CODE, _MAJOR, pNvDumpState->internalCode)) + { + case NVD_GPU_GENERATED: + case NVD_SKIP_ZERO: + // don't report on these internal codes. + return NV_OK; + break; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_NVDUMP_SYSTEM_INFO)); + + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + _rcdbGetTimeInfo(pPrbEnc, pNvDumpState, NVDEBUG_SYSTEMINFO_TIME_INFO), + External_Cleanup); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_BUGCHECK_COUNT, + pRcDB->BugcheckCount); + + // Add NorthBridge Info + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_SYSTEMINFO_NORTHBRIDGE_INFO), + External_Cleanup); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_NORTHBRIDGEINFO_ID, + pCl->FHBBusInfo.vendorID | + (pCl->FHBBusInfo.deviceID << 16)); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_NORTHBRIDGEINFO_SSID, + pCl->FHBBusInfo.subvendorID | + (pCl->FHBBusInfo.subdeviceID << 16)); + + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, // NVDEBUG_SYSTEMINFO_NORTHBRIDGE_INFO + prbEncNestedEnd(pPrbEnc), + External_Cleanup); + + //CPU Info + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_SYSTEMINFO_CPU_INFO), + External_Cleanup); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_CPUINFO_CPU_TYPE, + pSys->cpuInfo.type); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_CPUINFO_CPU_CAPS, + pSys->cpuInfo.caps); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_CPUINFO_NUM_CPU_CORES, + pSys->cpuInfo.numPhysicalCpus); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_CPUINFO_NUM_LOGICAL_CPUS, + pSys->cpuInfo.numLogicalCpus); + + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, // NVDEBUG_SYSTEMINFO_CPU_INFO + prbEncNestedEnd(pPrbEnc), + External_Cleanup); + + //GPU Info + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_SYSTEMINFO_GPU_INFO), + External_Cleanup); + + // Count the number of GPUs and List the gpuIds + numGpus = 0; + for (gpu = 0; gpu < NV_MAX_DEVICES; gpu++) + { + const NvU32 gidFlags = + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1); + + pGpu = gpumgrGetGpu(gpu); + + if (pGpu) + { + numGpus++; + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_GPUINFO_GPU_ID, + pGpu->gpuId); + + nvStatus = gpuGetGidInfo(pGpu, &pGidString, + &gidStrlen, gidFlags); + if (NV_OK == nvStatus) + { + prbEncAddBytes(pPrbEnc, + NVDEBUG_SYSTEMINFO_GPUINFO_GPU_UUID, + pGidString, gidStrlen); + portMemFree(pGidString); + } + else if (pGpu->gpuUuid.isInitialized) + { + prbEncAddBytes(pPrbEnc, + NVDEBUG_SYSTEMINFO_GPUINFO_GPU_UUID, + pGpu->gpuUuid.uuid, sizeof(pGpu->gpuUuid.uuid)); + } + else + { + prbEncAddString(pPrbEnc, + NVDEBUG_SYSTEMINFO_GPUINFO_GPU_UUID, + GPU_NA_UUID); + } + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_GPUINFO_DEVICE_ID, + pGpu->idInfo.PCIDeviceID); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_GPUINFO_PMCBOOT0, + pGpu->chipId0); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_GPUINFO_SUBDEV_ID, + pGpu->idInfo.PCISubDeviceID); + } + } + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_GPUINFO_NUM_GPUS, + numGpus); + + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, // NVDEBUG_SYSTEMINFO_GPU_INFO + prbEncNestedEnd(pPrbEnc), + External_Cleanup); + + //OS Info + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_SYSTEMINFO_OS_INFO), + External_Cleanup); + + nvStatus = osGetVersionDump(pPrbEnc); + if (nvStatus != NV_OK) + goto External_Cleanup; + + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, // NVDEBUG_SYSTEMINFO_OS_INFO + prbEncNestedEnd(pPrbEnc), + External_Cleanup); + + // Driver Info + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_SYSTEMINFO_DRIVER_INFO), + External_Cleanup); + + sizeStr = (sizeof("RELEASE") < sizeof(NV_DISPLAY_DRIVER_TITLE) ? + sizeof("RELEASE") : + sizeof(NV_DISPLAY_DRIVER_TITLE)); + + if (portMemCmp(NV_DISPLAY_DRIVER_TITLE, "RELEASE", sizeStr) == 0) + bRelease = NV_TRUE; + else + bRelease = NV_FALSE; + + prbEncAddBool(pPrbEnc, + NVDEBUG_SYSTEMINFO_DRIVERINFO_IS_RELEASE, + bRelease); + + prbEncAddString(pPrbEnc, + NVDEBUG_SYSTEMINFO_DRIVERINFO_VERSION, + NV_VERSION_STRING); + + prbEncAddString(pPrbEnc, + NVDEBUG_SYSTEMINFO_DRIVERINFO_BRANCH, + NV_BUILD_BRANCH_VERSION); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_DRIVERINFO_CHANGELIST, + NV_LAST_OFFICIAL_CHANGELIST_NUM); + + // Only write previous driver version if loaded more than once. + if (pRcDB->driverLoadCount > 1) + { + if (pRcDB->previousDriverVersion != NULL) + { + prbEncAddString(pPrbEnc, + NVDEBUG_SYSTEMINFO_DRIVERINFO_PREVIOUS_VERSION, + pRcDB->previousDriverVersion); + } + + if (pRcDB->previousDriverBranch != NULL) + { + prbEncAddString(pPrbEnc, + NVDEBUG_SYSTEMINFO_DRIVERINFO_PREVIOUS_BRANCH, + pRcDB->previousDriverBranch); + } + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_DRIVERINFO_PREVIOUS_CHANGELIST, + pRcDB->prevDriverChangelist); + } + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_DRIVERINFO_LOAD_COUNT, + pRcDB->driverLoadCount); + + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, // NVDEBUG_SYSTEMINFO_DRIVER_INFO + prbEncNestedEnd(pPrbEnc), + External_Cleanup); + + // Dump an table of + // Master GPU -- gpuId + // List all gpus involved by gpuIds + portMemSet(bGpuDone, NV_FALSE, sizeof(bGpuDone)); + for (gpu = 0; gpu < NV_MAX_DEVICES; gpu++) + { + pGpu = gpumgrGetGpu(gpu); + + if ((pGpu) && (bGpuDone[gpu] == NV_FALSE)) + { + pParent = gpumgrGetParentGPU(pGpu); + + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_SYSTEMINFO_GPU_CONFIG), + External_Cleanup); + + prbEncAddUInt32(pPrbEnc, NVDEBUG_SYSTEMINFO_CONFIG_MASTER_ID, pParent->gpuId); + gpuMask = gpumgrGetGpuMask(pGpu); + gpuIndex = 0; + pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + while (pGpu) + { + prbEncAddUInt32(pPrbEnc, NVDEBUG_SYSTEMINFO_CONFIG_GPU_ID, pGpu->gpuId); + + // gpuIndex is either the next or the MAX + bGpuDone[gpuIndex - 1] = NV_TRUE; + pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + } + + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, // NVDEBUG_SYSTEMINFO_GPU_CONFIG + prbEncNestedEnd(pPrbEnc), + External_Cleanup); + } + } + + // Error state + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_SYSTEMINFO_ERROR_STATE), + External_Cleanup); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_ERRORSTATE_BUGCHECK_CODE, + pNvDumpState->bugCheckCode); + + prbEncAddBool(pPrbEnc, + NVDEBUG_SYSTEMINFO_ERRORSTATE_GOT_RM_LOCK, + pNvDumpState->bRMLock); + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_SYSTEMINFO_ERRORSTATE_DUMP_BUFFER_SIZE, + pNvDumpState->initialbufferSize); + + // + // prbEncNestedEnd for NVDEBUG_SYSTEMINFO_ERROR_STATE and + // NVDEBUG_NVDUMP_SYSTEM_INFO are handled by prbEncUnwindNesting. + // + +External_Cleanup: + // Unwind the protobuf to the correct depth. + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_ERROR, + prbEncUnwindNesting(pPrbEnc, startingDepth)); + + return nvStatus; +} + +// +// Routine to dump RcDB Debug Info +// +NV_STATUS +rcdbDumpSystemFunc_IMPL +( + OBJRCDB *pRcDB, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState +) +{ + OBJGPU *pGpu = gpumgrGetSomeGpu(); + + switch (DRF_VAL(_NVD, _ERROR_CODE, _MAJOR, pNvDumpState->internalCode)) + { + case NVD_GPU_GENERATED: + case NVD_SKIP_ZERO: + // don't report on these internal codes. + return NV_OK; + break; + } + + rcdbDumpJournal(pRcDB, pGpu, pPrbEnc, pNvDumpState, NVDEBUG_NVDUMP_DCL_MSG); + if (pGpu != NULL) + { + rcdbDumpErrorCounters(pRcDB, pGpu, pPrbEnc); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "no GPU - won't dump ring buffers or journal\n"); + } + + return NV_OK; +} + +static NvU32 +_rcdbInsertErrorHistoryToList(RmRCCommonJournal_RECORD *pList, NVD_STATE *pNvDumpState) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + SYS_ERROR_INFO *pSysErrorInfo = &pRcDB->ErrorInfo; + RMPRBERRORELEMENT_V2* pPrbErrorElement; + RMCD_ERROR_BLOCK* pErrorBlock; + NV_STATUS status = NV_OK; + + // + // If we are called from the OCA dump, make sure we have the rm lock. + // TO DO: Try to dump as much as possible without the lock. + // + if (!pNvDumpState->bRMLock) + return NV_OK; + + // Get Past Exceptions + pPrbErrorElement = (RMPRBERRORELEMENT_V2*)pSysErrorInfo->pErrorList; + while (NULL != pPrbErrorElement) + { + pErrorBlock = pPrbErrorElement->ErrorHeader.pErrorBlock; + switch (pPrbErrorElement->RmPrbErrorData.common.Header.cRecordType) + { + case RmPrbErrorInfo_V2: + _rcdbInsertJournalRecordToList (pList, &(pPrbErrorElement->RmPrbErrorData.common)); + break; + + case RmPrbFullDump_V2: + // + // Full crash dumps are a single NvDebug.NvDump message, and + // should be contained in a single block. + // + if (pErrorBlock != NULL) + { + if (pErrorBlock->pNext != NULL) + { + NV_PRINTF(LEVEL_WARNING, + "only one error block expected!\n"); + } + _rcdbInsertJournalRecordToList (pList, &(pPrbErrorElement->RmPrbErrorData.common)); + } + break; + default: + // Can only handle protobuf formatted messages + NV_PRINTF(LEVEL_ERROR, "unknown error element type: %d\n", + pPrbErrorElement->RmPrbErrorData.common.Header.cRecordType); + break; + } + pPrbErrorElement = (RMPRBERRORELEMENT_V2*)pPrbErrorElement->ErrorHeader.pNextError; + } + return status; +} + +static void +_rcdbDumpCommonJournalRecord +( + PRB_ENCODER *pPrbEnc, + const PRB_FIELD_DESC *pFieldDesc, + RmRCCommonJournal_RECORD *pRec +) +{ + NV_STATUS nvStatus = NV_OK; + + NV_CHECK_OK(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, pFieldDesc)); + + if (nvStatus == NV_OK) + { + if (pRec->timeStamp != 0) + prbEncAddUInt64(pPrbEnc, JOURNAL_COMMON_TIME_STAMP, pRec->timeStamp); + if (pRec->GPUTag != 0) + prbEncAddUInt32(pPrbEnc, JOURNAL_COMMON_GPU_TAG, pRec->GPUTag); + if (pRec->CPUTag != 0) + prbEncAddUInt64(pPrbEnc, JOURNAL_COMMON_CPU_TAG, pRec->CPUTag); + if (pRec->stateMask != 0) + prbEncAddUInt64(pPrbEnc, JOURNAL_COMMON_STATE_MASK, pRec->stateMask); + NV_CHECK_OK(nvStatus, LEVEL_ERROR, prbEncNestedEnd(pPrbEnc)); + } +} + +static void +rcdbDumpCommonAssertRecord +( + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState, + RmRCCommonAssert_RECORD *pRec, + NvU32 type +) +{ + NvU32 i; + + prbEncAddUInt32(pPrbEnc, JOURNAL_ASSERT_TYPE, type); + + if (pRec->lastTimeStamp != 0) + prbEncAddUInt64(pPrbEnc, JOURNAL_ASSERT_LAST_TIME_STAMP, pRec->lastTimeStamp); + + prbEncAddUInt64(pPrbEnc, JOURNAL_ASSERT_BREAKPOINT_ADDR_HINT, pRec->breakpointAddrHint); + + // if there is a line number, add it to the message. + if (pRec->lineNum != NV_RM_ASSERT_UNKNOWN_LINE_NUM) + prbEncAddUInt32(pPrbEnc, JOURNAL_ASSERT_SOURCE_LINE, pRec->lineNum); + + if (pRec->count != 1) + prbEncAddUInt32(pPrbEnc, JOURNAL_ASSERT_COUNT, pRec->count); + + for (i = 0; i < NV_ARRAY_ELEMENTS32(pRec->callStack); i++) + { + if (pRec->callStack[i] == 0) + break; + + prbEncAddUInt64(pPrbEnc, JOURNAL_ASSERT_CALL_STACK, pRec->callStack[i]); + } +} + +static NV_STATUS +_rcdbDumpDclMsgRecord( + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState, + const PRB_FIELD_DESC *pFieldDesc, + RmRCCommonJournal_RECORD *pDclRecord + ) +{ + NV_STATUS nvStatus = NV_OK; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, pFieldDesc)); + + _rcdbDumpCommonJournalRecord(pPrbEnc, DCL_DCLMSG_COMMON, pDclRecord); + + switch (pDclRecord->Header.cRecordType) + { + case RmRC2SwDbgBreakpoint_V3: + case RmRC2SwRmAssert_V3: + { + RmRC2SwRmAssert3_RECORD* pRecord = (RmRC2SwRmAssert3_RECORD*)pDclRecord; + + NV_CHECK_OK(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, DCL_DCLMSG_JOURNAL_ASSERT)); + if (nvStatus == NV_OK) + { + rcdbDumpCommonAssertRecord(pPrbEnc, pNvDumpState, + &pRecord->commonAssert, pDclRecord->Header.cRecordType); + + prbEncAddUInt32(pPrbEnc, JOURNAL_ASSERT_LEVEL, pRecord->level); + NV_CHECK_OK(nvStatus, LEVEL_ERROR, prbEncNestedEnd(pPrbEnc)); + } + break; + } + case RmRC2GpuTimeout_V3: + { + RmRC2GpuTimeout3_RECORD* pRecord = (RmRC2GpuTimeout3_RECORD*)pDclRecord; + + NV_CHECK_OK(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, DCL_DCLMSG_JOURNAL_ASSERT)); + if (nvStatus == NV_OK) + { + rcdbDumpCommonAssertRecord(pPrbEnc, pNvDumpState, pRecord, pDclRecord->Header.cRecordType); + NV_CHECK_OK(nvStatus, LEVEL_ERROR, prbEncNestedEnd(pPrbEnc)); + } + break; + } + case RmBadRead_V2: + { + RmRC2BadRead2_RECORD* pRecord = (RmRC2BadRead2_RECORD*)pDclRecord; + + NV_CHECK_OK(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, DCL_DCLMSG_JOURNAL_BADREAD)); + if (nvStatus == NV_OK) + { + prbEncAddUInt32(pPrbEnc, JOURNAL_BADREAD_MEMORY_SPACE, pRecord->MemorySpace); + prbEncAddUInt32(pPrbEnc, JOURNAL_BADREAD_OFFSET, pRecord->Offset); + prbEncAddUInt32(pPrbEnc, JOURNAL_BADREAD_MASK, pRecord->Mask); + prbEncAddUInt32(pPrbEnc, JOURNAL_BADREAD_VALUE, pRecord->Value); + prbEncAddUInt32(pPrbEnc, JOURNAL_BADREAD_REASON, pRecord->Reason); + NV_CHECK_OK(nvStatus, LEVEL_ERROR, prbEncNestedEnd(pPrbEnc)); + } + break; + } + case RmDclMsg: + { + RM_DATA_COLLECTION_RECORD *pRecord = (RM_DATA_COLLECTION_RECORD*) pDclRecord; + // Add the bytes after RM_DATA_COLLECTION_RECORD + prbEncAddBytes(pPrbEnc, pRecord->fieldDesc, (void *) (pRecord + 1), + pRecord->common.Header.wRecordSize - sizeof(*pRecord)); + break; + } + case RmJournalEngDump: + { + RM_DATA_COLLECTION_RECORD *pRecord = (RM_DATA_COLLECTION_RECORD*) pDclRecord; + // Add the bytes after RM_DATA_COLLECTION_RECORD + prbEncCatMsg(pPrbEnc, (void *)(pRecord + 1), + pRecord->common.Header.wRecordSize - sizeof(*pRecord)); + break; + } + case RmJournalBugCheck: + { + RmJournalBugcheck_RECORD* pRecord = (RmJournalBugcheck_RECORD*)pDclRecord; + NV_CHECK_OK(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, DCL_DCLMSG_JOURNAL_BUGCHECK)); + if (nvStatus == NV_OK) + { + prbEncAddUInt32(pPrbEnc, JOURNAL_BUGCHECK_CODE, pRecord->bugCheckCode); + NV_CHECK_OK(nvStatus, LEVEL_ERROR, prbEncNestedEnd(pPrbEnc)); + } + break; + } + case RmPrbErrorInfo_V2: + case RmPrbFullDump_V2: + { + RMPRBERRORELEMENT_V2* pRecord = (RMPRBERRORELEMENT_V2*)((NvU8 *)pDclRecord + - NV_OFFSETOF(RMPRBERRORELEMENT_V2, RmPrbErrorData)); + RMCD_ERROR_BLOCK* pErrorBlock; + + for (pErrorBlock = pRecord->ErrorHeader.pErrorBlock; + (pErrorBlock != NULL); pErrorBlock = pErrorBlock->pNext) + { + prbEncCatMsg(pPrbEnc, (void *)pErrorBlock->pBlock, + pErrorBlock->blockSize); + } + break; + } + case RmNocatReport: + { + // currently not added to the OCA dump + break; + } + + default: + // These are the only ones we know about + NV_PRINTF(LEVEL_ERROR, + "unknown Dcl Record entry type: %d\n", + pDclRecord->Header.cRecordType); + break; + } + + NV_CHECK_OK(nvStatus, LEVEL_ERROR, prbEncNestedEnd(pPrbEnc)); + return 0; +} + +static NvU32 +_rcdbInsertJournalRecordToList (RmRCCommonJournal_RECORD *pList, RmRCCommonJournal_RECORD *pRecord) +{ + RmRCCommonJournal_RECORD *pCurrentRecord = pList; + RmRCCommonJournal_RECORD *pNextRecord; + + if ((NULL != pList) && (NULL != pRecord)) + { + for (pNextRecord = (RmRCCommonJournal_RECORD *)pList->pNext; pNextRecord != pList; pNextRecord = (RmRCCommonJournal_RECORD *)pNextRecord->pNext) + { + if (pRecord->timeStamp < pNextRecord->timeStamp) + { + break; + } + pCurrentRecord = pNextRecord; + } + pRecord->pNext = pCurrentRecord->pNext; + pCurrentRecord->pNext = (NvU8 *)pRecord; + } + return 0; +} + +// Todo: format the records into a protobuf DCL record at the source +static NvU32 +rcdbInsertRingBufferToList( + Journal *pRcDB, + RmRCCommonJournal_RECORD *pList, + RING_BUFFER_LOG *pRingBuffer +) +{ + RmRCCommonJournal_RECORD *pCommon; + NvU32 recordSize; + NvU32 i; + + recordSize = _rcdbGetOcaRecordSizeWithHeader(pRcDB, pRingBuffer->entryType); + + // + // Order does not matter here because the record will be inserted into the + // list based on the time of the record, not its postion in the buffer. + // + for (i = 0; i < pRingBuffer->numEntries; i++) + { + pCommon = (RmRCCommonJournal_RECORD *)(((NvU8 *)pRingBuffer->pBuffer) + (recordSize * i)); + + _rcdbInsertJournalRecordToList (pList, pCommon); + } + + return 0; // return value should be discarded +} + +static NvU32 +rcdbInsertRingBufferCollectionToList( + Journal *pRcDB, + RmRCCommonJournal_RECORD *pList) +{ + RING_BUFFER_LOG_COLLECTION *pRingBufferColl = &pRcDB->RingBufferColl; + RING_BUFFER_LOG *pCurrentBuffer; + NvU32 i; + + + pCurrentBuffer = pRingBufferColl->pFirstEntry; + for (i = 0; i < pRingBufferColl->NumRingBuffers; i++) + { + NvU32 recSize = pCurrentBuffer->bufferSize; + + NV_ASSERT(pCurrentBuffer->maxEntries * + _rcdbGetOcaRecordSizeWithHeader(pRcDB, pCurrentBuffer->entryType) == + pCurrentBuffer->bufferSize); + + if (recSize > 0) + { + rcdbInsertRingBufferToList (pRcDB, pList, pCurrentBuffer); + } + pCurrentBuffer = pCurrentBuffer->pNextRingBuffer; + } + + // Assert that we traversed through the entire list. + NV_ASSERT(pCurrentBuffer == NULL); + + // return value should be ignored + return 0; +} + +NvU32 +rcdbDumpJournal_IMPL +( + OBJRCDB *pRcDB, + OBJGPU *pGpu, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState, + const PRB_FIELD_DESC *pFieldDesc +) +{ + OS_DRIVER_BLOCK DriverBlock; + EVENT_JOURNAL *pJournal = &pRcDB->Journal; + NvU8 *pJournalBuff = pJournal->pBuffer; + RmRCCommonJournal_RECORD *pRecord; + NvU32 recSize; + NV_STATUS nvStatus = NV_OK; + RmRCCommonJournal_RECORD List; + + // It is OK to dump the journal entries without the RM lock. + // No need to check pRcDB->nvDumpState.bNoRMLock; + + recSize = pJournal->BufferSize - pJournal->BufferRemaining; + + if (NULL != pGpu) + { + // + // Add RVA Header, even when there are no journal records. + // This header is required to resolve code addresses using the PDB file. + // We can log code addresses outside of the journal entries. + // + NV_CHECK_OK(nvStatus, LEVEL_ERROR, prbEncNestedStart(pPrbEnc, pFieldDesc)); + if (nvStatus == NV_OK) + { + NV_CHECK_OK(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, DCL_DCLMSG_JOURNAL_RVAHEADER)); + if (nvStatus == NV_OK) + { + portMemSet(&DriverBlock, 0x00, sizeof(DriverBlock)); + osGetDriverBlock(pGpu->pOsGpuInfo, &DriverBlock); + prbEncAddUInt64(pPrbEnc, JOURNAL_RVAHEADER_DRIVER_START, (NvU64)DriverBlock.driverStart); + prbEncAddUInt32(pPrbEnc, JOURNAL_RVAHEADER_OFFSET, DriverBlock.offset); + prbEncAddUInt32(pPrbEnc, JOURNAL_RVAHEADER_POINTER_SIZE, sizeof(pJournal)); + prbEncAddUInt64(pPrbEnc, JOURNAL_RVAHEADER_UNIQUE_ID_HIGH, *((NvU64*) DriverBlock.unique_id)); + prbEncAddUInt64(pPrbEnc, JOURNAL_RVAHEADER_UNIQUE_ID_LOW, *((NvU64*) (DriverBlock.unique_id + 8))); + prbEncAddUInt32(pPrbEnc, JOURNAL_RVAHEADER_AGE, DriverBlock.age); + NV_CHECK_OK(nvStatus, LEVEL_ERROR, prbEncNestedEnd(pPrbEnc)); + } + NV_CHECK_OK(nvStatus, LEVEL_ERROR, prbEncNestedEnd(pPrbEnc)); + } + } + + // init the list to an empty state + portMemSet(&List, 0x00, sizeof(List)); + List.pNext = (NvU8 *)&List; + + // + // Don't dump the ring buffers if something is adding to them. + // If we can dump the ring buffers, hold the lock for them until the + // dump is complete to insure that a record is not changed mid-dump. + // + if (portAtomicIncrementS32(&concurrentRingBufferAccess) != 1) + { + // + // If IRQL is low, spin until it gets available + // + if (!osIsRaisedIRQL() && (NULL != pGpu)) + { + RMTIMEOUT timeout; + NV_STATUS status = NV_OK; + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + do { + portAtomicDecrementS32(&concurrentRingBufferAccess); + + if (NV_ERR_TIMEOUT == status) + { + NV_PRINTF(LEVEL_ERROR, + "timed out waiting for Rm journal ring buffer to be available\n"); + DBG_BREAKPOINT(); + return 0; + } + status = gpuCheckTimeout(pGpu, &timeout); + osSpinLoop(); + } while (portAtomicIncrementS32(&concurrentRingBufferAccess) != 1); + } + else + { + NV_ASSERT_FAILED("Ring Buffer unavailable for dump at high irql."); + } + } + + rcdbInsertRingBufferCollectionToList (pRcDB, &List); + + _rcdbInsertErrorHistoryToList(&List, pNvDumpState); + + // Skip if size is smaller than a header + while (recSize > sizeof(RmRCCommonJournal_RECORD)) + { + pRecord = (RmRCCommonJournal_RECORD *)pJournalBuff; + + if (pRecord->Header.cRecordGroup != RmGroup) + { + // We only log RM related data + NV_ASSERT(pRecord->Header.cRecordGroup == RmGroup); + break; + } + + // Just a safety net... + if (pRecord->Header.wRecordSize > recSize) + { + break; + } + _rcdbInsertJournalRecordToList (&List, pRecord); + + recSize -= pRecord->Header.wRecordSize; + pJournalBuff += pRecord->Header.wRecordSize; + } + + + // dump out the records that have been added to the list. + for (pRecord = (RmRCCommonJournal_RECORD *)List.pNext; pRecord != &List; pRecord = (RmRCCommonJournal_RECORD *)pRecord->pNext) + { + _rcdbDumpDclMsgRecord(pPrbEnc, pNvDumpState, pFieldDesc, pRecord); + } + portAtomicDecrementS32(&concurrentRingBufferAccess); + + // return value should be ignored + return 0; +} + +NvU32 +rcdbDumpErrorCounters_IMPL(Journal *pRcDB, OBJGPU *pGpu, PRB_ENCODER *pPrbEnc) +{ + NvU32 i; + NvU32 rcErrTyp = RC_ERROR_COUNTER_TYPE_INVALID; + NV_STATUS nvStatus = NV_OK; + NvU8 startingDepth = prbEncNestingLevel(pPrbEnc); + + // Opens NVDEBUG_NVDUMP_DCL_MSG + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_NVDUMP_DCL_MSG), + cleanupAndExit); + + for (i = 0; i <= RC_ERROR_COUNTER_OTHER_INDEX; i++) + { + // For Counters + rcErrTyp = pRcDB->rcErrorCounterArray[i].rcErrorType; + if (rcErrTyp != RC_ERROR_COUNTER_TYPE_INVALID) + { + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, DCL_DCLMSG_RCCOUNTER), + cleanupAndExit); + + // Write Power Event + prbEncAddUInt32(pPrbEnc, RC_RCCOUNTER_RCERRORTYPE, rcErrTyp); + + // Write Power State + prbEncAddUInt32(pPrbEnc, RC_RCCOUNTER_COUNT, pRcDB->rcErrorCounterArray[i].rcErrorCount); + + // Dump the channel ID and the last time when this error occurred on this channel ID + prbEncAddUInt32(pPrbEnc, RC_RCCOUNTER_RCLASTCHID, pRcDB->rcErrorCounterArray[i].rcLastCHID); + prbEncAddUInt64(pPrbEnc, RC_RCCOUNTER_RCLASTTIME, pRcDB->rcErrorCounterArray[i].rcLastTime); + + NV_CHECK_OK_OR_GOTO(nvStatus, LEVEL_ERROR, + prbEncNestedEnd(pPrbEnc), + cleanupAndExit); + } + } // For Counters + + // Close NVDEBUG_NVDUMP_DCL_MSG handled by prbEncUnwindNesting. + +cleanupAndExit: + // Unwind the protobuff to inital depth + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_ERROR, + prbEncUnwindNesting(pPrbEnc, startingDepth)); + + return 0; +} + +static void +_rcdbAddRmGpuDumpCallback +( + void *pData +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status; + + NvU32 gpuInstance = *((NvU32 *)pData); + status = osAcquireRmSema(pSys->pSema); + if (status == NV_OK) + { + // LOCK: acquire API lock + status = rmApiLockAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_DIAG); + if (status == NV_OK) + { + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_DIAG); + if (status == NV_OK) + { + Journal *pRcDB = SYS_GET_RCDB(pSys); + OBJGPU *pGpu = gpumgrGetGpu(gpuInstance); + + // + // Mark the Journal object as in the deferred dump path so we won't + // re-attempt again. + // + pRcDB->setProperty(pRcDB, PDB_PROP_RCDB_IN_DEFERRED_DUMP_CODEPATH, NV_TRUE); + + status = rcdbAddRmGpuDump(pGpu); + NV_ASSERT(status == NV_OK); + + pRcDB->setProperty(pRcDB, PDB_PROP_RCDB_IN_DEFERRED_DUMP_CODEPATH, NV_FALSE); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + else + { + NV_PRINTF(LEVEL_ERROR, "failed to acquire the GPU locks!\n"); + } + // UNLOCK: release API lock + rmApiLockRelease(); + } + else + { + NV_PRINTF(LEVEL_ERROR, "failed to acquire the API lock!\n"); + } + osReleaseRmSema(pSys->pSema, NULL); + } + else + { + NV_PRINTF(LEVEL_ERROR, "failed to acquire the OS semaphore!\n"); + } +} + +static NV_STATUS +nvdDebuggerBufferCallback(void *pEncoder, NvBool bBufferFull) +{ + if (bBufferFull) + { + nvDumpConfig.dumpStatus = NVDUMP_STATUS_DUMP_BUFFER_FULL; + } + else + { + nvDumpConfig.dumpStatus = NVDUMP_STATUS_DUMP_END_OF_MSG; + } + + return NV_OK; +} + +/*! + * @brief NvDebug kernel debugger dump control + * + * Allows external kernel debuggers to control the RM's dump interface + * without assuming anything about the current system state. + * + * WARNING! This function should never be called directly! + * + * If correctly setup, a kernel debugger will place a processor + * hardware watchpoint on the nvDumpConfig.handshake variable. + * Each time this is written to, the debugger will break and get a chance + * to examine the rest of the nvDumpConfig state. + * + * @return This function should never return! External debugger should abort it! + */ +static void +nvdDebuggerControlFunc(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + OBJGPU *pGpu = NULL; + NvDebugDump *pNvd = NULL; + NVDUMP_BUFFER *pBuffer = (NVDUMP_BUFFER *)&nvDumpConfig.buffer; // discard volatile + + // Process actions while debugger provides work to do. + while (nvDumpConfig.dumpStatus != NVDUMP_STATUS_IDLE) + { + nvDumpConfig.rmStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, + "Dump triggered: gpuSelect=%u, component=%u, dumpStatus=%u\n", + nvDumpConfig.gpuSelect, nvDumpConfig.component, + nvDumpConfig.dumpStatus); + + if (NVDUMP_IS_GPU_COMPONENT(nvDumpConfig.component)) + { + pGpu = gpumgrGetGpu(nvDumpConfig.gpuSelect); + pNvd = GPU_GET_NVD(pGpu); + + switch (nvDumpConfig.dumpStatus) + { + case NVDUMP_STATUS_COUNT_REQUESTED: + nvDumpConfig.rmStatus = nvdDumpComponent( + pGpu, pNvd, nvDumpConfig.component, pBuffer, + NVDUMP_BUFFER_COUNT, NULL); + nvDumpConfig.dumpStatus = NVDUMP_STATUS_COUNT_COMPLETE; + break; + case NVDUMP_STATUS_DUMP_REQUESTED: + nvDumpConfig.rmStatus = nvdDumpComponent( + pGpu, pNvd, nvDumpConfig.component, pBuffer, + NVDUMP_BUFFER_PROVIDED, &nvdDebuggerBufferCallback); + nvDumpConfig.dumpStatus = NVDUMP_STATUS_DUMP_COMPLETE; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid dumpStatus %u\n", + nvDumpConfig.dumpStatus); + nvDumpConfig.rmStatus = NV_ERR_INVALID_STATE; + nvDumpConfig.dumpStatus = NVDUMP_STATUS_ERROR; + break; + } + } + else if (NVDUMP_IS_SYS_COMPONENT(nvDumpConfig.component)) + { + switch (nvDumpConfig.dumpStatus) + { + case NVDUMP_STATUS_COUNT_REQUESTED: + nvDumpConfig.rmStatus = rcdbDumpComponent(pRcDB, + nvDumpConfig.component, pBuffer, + NVDUMP_BUFFER_COUNT, NULL); + nvDumpConfig.dumpStatus = NVDUMP_STATUS_COUNT_COMPLETE; + break; + case NVDUMP_STATUS_DUMP_REQUESTED: + nvDumpConfig.rmStatus = rcdbDumpComponent(pRcDB, + nvDumpConfig.component, pBuffer, + NVDUMP_BUFFER_PROVIDED, &nvdDebuggerBufferCallback); + nvDumpConfig.dumpStatus = NVDUMP_STATUS_DUMP_COMPLETE; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid dumpStatus %u\n", + nvDumpConfig.dumpStatus); + nvDumpConfig.rmStatus = NV_ERR_INVALID_STATE; + nvDumpConfig.dumpStatus = NVDUMP_STATUS_ERROR; + + break; + } + } + else + { + NV_PRINTF(LEVEL_ERROR, "Invalid component %u\n", + nvDumpConfig.component); + nvDumpConfig.rmStatus = NV_ERR_INVALID_PARAM_STRUCT; + nvDumpConfig.dumpStatus = NVDUMP_STATUS_ERROR; + } + } + + // Ensure we really don't exit this function without debugger. + while (1) + { + NV_PRINTF(LEVEL_ERROR, "Should never reach this point!\n"); + DBG_BREAKPOINT(); + } +} + +/*! + * @brief Release Build NV_ASSERT function + * + * @details Called by NV_ASSERT when the assertion fails. + * By putting this logic in its own function, we save on binary size. + */ +#if (defined(_WIN32) || defined(_WIN64) || defined(NV_UNIX) || RMCFG_FEATURE_PLATFORM_GSP) && !defined(NV_MODS) +static void _rcdbRmAssert(NvU32 level, NvU32 lineNum, NvU64 ip) +{ + RmRC2SwRmAssert3_RECORD* pRec = NULL; + if (rcdbAddAssertJournalRecWithLine(NULL, lineNum, (void **)&pRec, RmGroup, + RmRC2SwRmAssert_V3, sizeof(RmRC2SwRmAssert3_RECORD), + level, ip) == NV_OK) + { + pRec->level = level; + } + +#if !defined(DEBUG) && !defined(QA_BUILD) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // Add assert to NvLog. But skip when nvLog asserts to avoid stack overflow. + if (portAtomicIncrementS32(&nvLogRecursion) == 1) + { + // check for GPU lost. + rcdProbeAllGpusPresent(ip); + } + portAtomicDecrementS32(&nvLogRecursion); + + if ((pSys != NULL) && ((NV_DEBUG_BREAK_ATTRIBUTES_ASSERT) & + DRF_VAL(_DEBUG, _BREAK, _ATTRIBUTES, pSys->debugFlags))) + { + REL_DBG_BREAKPOINT_MSG("NVRM-RC: Nvidia Release NV_ASSERT Break\n"); + } + } + + // If enabled bugcheck on assert + osDbgBugCheckOnAssert(); + +#endif +} + +// +// Some param-less wrappers for rcdbXxxEx() functions. +// If the params are not needed, calling these functions saves on binary size +// +void rcdbRmAssert(NvU32 LineNum, NvU64 ip) { _rcdbRmAssert(0, LineNum, ip); } +void rcdbRmAssertStatus(NvU32 status, NvU32 LineNum, NvU64 ip) { _rcdbRmAssert(status, LineNum, ip); } + +#endif // (defined(_WIN32) || defined(_WIN64) || defined(NV_UNIX) || RMCFG_FEATURE_PLATFORM_GSP) && !defined(NV_MODS) + +#if (defined(_WIN32) || defined(_WIN64) || defined(NV_UNIX)) && !defined(NV_MODS) + +/*! + * @brief Release Build DBGBREAKPOINT() function + * + * @details Called by DBGBREAKPOINT when the assertion fails. + * By putting this logic in its own function, we save on binary size. + */ +static void _rcdbDbgBreakEx(void *pGpu, NvU32 lineNum, NvU32 level, NvU64 ip) +{ + RmRC2SwRmAssert3_RECORD* pRec = NULL; + if (rcdbAddAssertJournalRecWithLine(pGpu, lineNum, (void**)&pRec, RmGroup, + RmRC2SwDbgBreakpoint_V3, sizeof(RmRC2SwRmAssert3_RECORD), level, ip) == NV_OK) + { + pRec->level = level; + } + +#if !defined(DEBUG) && !defined(QA_BUILD) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // Add assert to NvLog. But skip when nvLog asserts to avoid stack overflow. + if (portAtomicIncrementS32(&nvLogRecursion) == 1) + { + NV_PRINTF(LEVEL_ERROR, "Breakpoint at 0x%llx.\n", ip); + } + portAtomicDecrementS32(&nvLogRecursion); + + if ((pSys != NULL) && ((NV_DEBUG_BREAK_ATTRIBUTES_DBG_BREAK) & + DRF_VAL(_DEBUG, _BREAK, _ATTRIBUTES, pSys->debugFlags))) + { + REL_DBG_BREAKPOINT_MSG("NVRM-RC: Nvidia Release Debug Break\n"); + } + } +#endif + + // If enabled bugcheck on assert + osDbgBugCheckOnAssert(); +} + +void rcdbDbgBreak(NvU64 ip) { _rcdbDbgBreakEx(NULL, NV_RM_ASSERT_UNKNOWN_LINE_NUM, 0, ip); } +void rcdbDbgBreakGpu(void *pGpu, NvU64 ip) { _rcdbDbgBreakEx(pGpu, NV_RM_ASSERT_UNKNOWN_LINE_NUM, 0, ip); } +void rcdbDbgBreakStatus(NvU32 status, NvU64 ip) { _rcdbDbgBreakEx(NULL, NV_RM_ASSERT_UNKNOWN_LINE_NUM, status, ip); } +void rcdbDbgBreakEx(void *pGpu, NvU32 status, NvU64 ip) { _rcdbDbgBreakEx(pGpu, NV_RM_ASSERT_UNKNOWN_LINE_NUM, status, ip); } + +#endif + +NV_STATUS +rcdbAddRmEngDump +( + OBJGPU *pGpu, + NvU32 component +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + NvDebugDump *pNvd = GPU_GET_NVD(pGpu); + NVDUMP_BUFFER nvDumpBuffer = {0}; + RM_DATA_COLLECTION_RECORD *pRec; + NV_STATUS rmStatus; + NvU16 totalSize; + + nvDumpBuffer.size = NVDUMP_MAX_DUMP_SIZE; + + rmStatus = nvdDumpComponent(pGpu, pNvd, component, &nvDumpBuffer, + NVDUMP_BUFFER_ALLOCATE, NULL); + if (rmStatus != NV_OK) + { + goto rcdbAddRmEngDump_error_handle; + } + + totalSize = (NvU16)(nvDumpBuffer.curNumBytes + sizeof(*pRec)); + //align to 8 bytes to keep the readability of RM journal + totalSize = (totalSize + 0x7) & ~0x7; + // check for overflow + if (((NvU32)totalSize) < nvDumpBuffer.curNumBytes + sizeof(*pRec)) + { + goto rcdbAddRmEngDump_error_handle; + } + + rmStatus = rcdbAllocNextJournalRec(pRcDB, (NVCD_RECORD **)&pRec, RmGroup, + RmJournalEngDump, totalSize); + if (rmStatus != NV_OK) + { + goto rcdbAddRmEngDump_error_handle; + } + rcdbSetCommonJournalRecord(pGpu, &pRec->common); + + // copy the dump buffer right after the RM_DATA_COLLECTION_RECORD struct + portMemCopy((void *)(pRec + 1), nvDumpBuffer.curNumBytes, NvP64_VALUE(nvDumpBuffer.address), nvDumpBuffer.curNumBytes); + + pRec->fieldDesc = NVDEBUG_NVDUMP_GPU_INFO; + +rcdbAddRmEngDump_error_handle: + if (nvDumpBuffer.address != NvP64_NULL) + { + portMemFree(NvP64_VALUE(nvDumpBuffer.address)); + } + + return rmStatus; +} + + +// Finds the ring buffer for a corresponding type. Returns error if not allocated. +static void +rcdbFindRingBufferForType +( + Journal *pRcDB, + RMCD_RECORD_TYPE recType, + RING_BUFFER_LOG **ppRingBuffer +) +{ + NvU32 i; + RING_BUFFER_LOG *pCurrentRingBuffer = NULL; + RING_BUFFER_LOG_COLLECTION *pRingBufferColl = &pRcDB->RingBufferColl; + + NV_ASSERT(ppRingBuffer != NULL); + *ppRingBuffer = NULL; + + // + // Loop through our ring buffer collection, and find the + // ring buffer corresponding to our type. + // + pCurrentRingBuffer = pRingBufferColl->pFirstEntry; + for (i = 0; i < pRingBufferColl->NumRingBuffers; i++) + { + NV_ASSERT(pCurrentRingBuffer != NULL); + if (pCurrentRingBuffer->entryType == recType) + { + *ppRingBuffer = pCurrentRingBuffer; + return; + } + pCurrentRingBuffer = pCurrentRingBuffer->pNextRingBuffer; + } + + NV_PRINTF(LEVEL_INFO, "Ring Buffer not found for type %d\n", recType); + return; +} + +// +// Creates a ring buffer capable of holding "maxEntries" number of entries, and +// adds it to the ring buffer collection. +// Returns a pointer to the created ring buffer so that individual modules can +// examine the data on-demand easily. +// +//PRINT_BUFFER_LOG +NvU8 * +rcdbCreateRingBuffer_IMPL +( + Journal *pRcDB, + RMCD_RECORD_TYPE type, + NvU32 maxEntries +) +{ + NV_STATUS status; + RING_BUFFER_LOG_COLLECTION *pRingBufferColl = &pRcDB->RingBufferColl; + RING_BUFFER_LOG *pRingBuffer; + NvU8* pBuffer = NULL; + NvU32 bufferSize, entrySize; + + rcdbFindRingBufferForType(pRcDB, type, &pRingBuffer); + + entrySize = _rcdbGetOcaRecordSizeWithHeader(pRcDB, type); + if (entrySize == 0) + { + NV_ASSERT(entrySize != 0); + return NULL; + } + + // We need to store maxEntries number of entries. Check for overflow too + if (portSafeMulU32(maxEntries, entrySize, &bufferSize) == NV_FALSE) + { + return NULL; + } + + if (pRingBuffer != NULL) + { + NvU32 totalSize; + + if (portSafeAddU32(bufferSize, pRingBuffer->bufferSize, &totalSize) == NV_FALSE) + { + return NULL; + } + + bufferSize = totalSize; + pRingBuffer->refCount++; + + // + // XXX The collect-all design of the ring buffers allows for + // interleaved entries for different GPUs. This makes it + // hard to dynamically shrink any given ring buffer as GPUs are + // torn down, and requires that an upper bound be placed on + // the buffer's size. + // + // The upper bound, as chosen, is somewhat arbitrary, but at + // the time of this writing, consistent with the use of + // this interface (i.e. the number of entries for each type is + // the same for each GPU). + // + if (bufferSize > pRingBuffer->maxBufferSize) + return NULL; + } + else + { + pRingBuffer = portMemAllocNonPaged(sizeof(RING_BUFFER_LOG)); + if (pRingBuffer == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_ASSERT(status == NV_OK); + return NULL; + } + + portMemSet(pRingBuffer, 0x00, sizeof(*pRingBuffer)); + pRingBuffer->refCount = 1; + } + + pBuffer = portMemAllocNonPaged(bufferSize); + if (pBuffer == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_ASSERT(status == NV_OK); + pRingBuffer->refCount--; + if (pRingBuffer->pBuffer == NULL) + { + portMemFree(pRingBuffer); + } + return NULL; + } + + // Now, initialize the entries the RING_BUFFER structure. + pRingBuffer->maxEntries += maxEntries; + + // Add the ring buffer to the beginning of the ring buffer collection. + if (pRingBuffer->pBuffer == NULL) + { + if (portSafeMulU32(bufferSize, NV_MAX_DEVICES, &pRingBuffer->maxBufferSize) == NV_FALSE) + { + pRingBuffer->refCount--; + if (pRingBuffer->pBuffer == NULL) + { + portMemFree(pRingBuffer); + } + + portMemFree(pBuffer); + return NULL; + } + + pRingBuffer->maxBufferSize = (bufferSize * NV_MAX_DEVICES); + pRingBuffer->entryType = type; + pRingBuffer->pNextRingBuffer = pRingBufferColl->pFirstEntry; + pRingBufferColl->pFirstEntry = pRingBuffer; + pRingBufferColl->NumRingBuffers++; + } + else + { + NvU32 copySize; + + if (portSafeSubU32(bufferSize, pRingBuffer->bufferSize, ©Size) == NV_FALSE) + { + pRingBuffer->refCount--; + if (pRingBuffer->pBuffer == NULL) + { + portMemFree(pRingBuffer); + } + + portMemFree(pBuffer); + return NULL; + } + + portMemCopy(pBuffer, copySize, pRingBuffer->pBuffer, copySize); + portMemFree(pRingBuffer->pBuffer); + } + + pRingBuffer->bufferSize = bufferSize; + pRingBuffer->pBuffer = pBuffer; + return (NvU8 *)pRingBuffer; +} + +void +rcdbDestroyRingBuffer_IMPL +( + Journal *pRcDB, + RMCD_RECORD_TYPE type +) +{ + RING_BUFFER_LOG_COLLECTION *pRingBufferColl = &pRcDB->RingBufferColl; + RING_BUFFER_LOG *pRingBuffer, *pCurrentRingBuffer; + NvU32 i; + + rcdbFindRingBufferForType(pRcDB, type, &pRingBuffer); + if (pRingBuffer == NULL) + return; + + if (--pRingBuffer->refCount > 0) + return; + + pCurrentRingBuffer = pRingBufferColl->pFirstEntry; + if (pCurrentRingBuffer == pRingBuffer) + { + pRingBufferColl->pFirstEntry = pCurrentRingBuffer->pNextRingBuffer; + } + else + { + for (i = 0; i < pRingBufferColl->NumRingBuffers; i++) + { + if (pCurrentRingBuffer->pNextRingBuffer == pRingBuffer) + { + pCurrentRingBuffer->pNextRingBuffer = + pRingBuffer->pNextRingBuffer; + break; + } + pCurrentRingBuffer = pCurrentRingBuffer->pNextRingBuffer; + } + } + + portMemFree(pRingBuffer->pBuffer); + portMemFree(pRingBuffer); + + pRingBufferColl->NumRingBuffers--; +} + +/* +** _rcdbAllocRecFromRingBuffer allocates a buffer entry from the +** specified ring buffer. +** +** parameters: +** pGpu a pointer to the GPU object associated with the entry. +** pRcdb a pointer toe the Journal that contains the ring buffers +** type the record type to locate a buffer for. +** recordSize the size of the expected record +** +** notes: +** it is assumed the caller has successfully acquired the concurrentRingBufferAccess lock. +** failure to do so can result in concurrency issues. +*/ +RmRCCommonJournal_RECORD* +_rcdbAllocRecFromRingBuffer +( + OBJGPU *pGpu, + Journal *pRcDB, + RMCD_RECORD_TYPE type +) +{ + RING_BUFFER_LOG *pRingBuffer = NULL; + NvU32 newItemIndex; + RmRCCommonJournal_RECORD + *pCommon = NULL; + + // Find the ring buffer for this entry in the collection. + rcdbFindRingBufferForType(pRcDB, type, &pRingBuffer); + + if (pRingBuffer == NULL) + { + NV_ASSERT(0); + // + // There is no ring buffer allocated for this type. + // Nothing we can do about it. + // + return NULL; + } + + newItemIndex = (pRingBuffer->numEntries + pRingBuffer->headIndex) % pRingBuffer->maxEntries; + + // prepend the rmJournalCommon record to record. + pCommon = (RmRCCommonJournal_RECORD*)(pRingBuffer->pBuffer + (_rcdbGetOcaRecordSizeWithHeader(pRcDB, type) * newItemIndex)); + pCommon->Header.cRecordGroup = RmGroup; + pCommon->Header.cRecordType = type; + pCommon->Header.wRecordSize = (NvU16)_rcdbGetOcaRecordSizeWithHeader(pRcDB, type); + rcdbSetCommonJournalRecord(pGpu, pCommon); + + // Increment the number of entries or advance the head index. + if (pRingBuffer->numEntries < pRingBuffer->maxEntries) + { + ++pRingBuffer->numEntries; + } + else + { + ++(pRingBuffer->headIndex); + if (pRingBuffer->headIndex >= pRingBuffer->maxEntries) + { + pRingBuffer->headIndex = 0; + } + } + return pCommon; +} + +/* +** rcdbAddRecToRingBuffer_IMPL allocates a buffer entry from the +** specified ring buffer & copies the supplied data buffer into it. +** +** parameters: +** pGpu a pointer to the GPU object associated with the entry. +** pRcdb a pointer toe the Journal that contains the ring buffers +** type the record type to locate a buffer for. +** recordSize the size of the expected record +** pRecord a pointer to the data that will populate the new ring buffer entry. +** +** notes: +*/ +void +rcdbAddRecToRingBuffer_IMPL +( + OBJGPU *pGpu, + Journal *pRcDB, + RMCD_RECORD_TYPE type, + NvU32 recordSize, + NvU8 *pRecord +) +{ + RmRCCommonJournal_RECORD + *pCommon; + + NV_ASSERT(recordSize == rcdbGetOcaRecordSize(pRcDB, type)); + + if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1) + { + pCommon = _rcdbAllocRecFromRingBuffer(pGpu, pRcDB, type); + if (pCommon != NULL) + { + // copy the record to follow the common header. + portMemCopy(&(pCommon[1]), recordSize, pRecord, recordSize); + } + } + portAtomicDecrementS32(&concurrentRingBufferAccess); +} + +// Non-hal function to return the sizes of records that are not chip dependent. +NvU32 rcdbGetOcaRecordSize_IMPL(Journal *pRcDB, RMCD_RECORD_TYPE type) +{ + switch(type) + { + case RmRcDiagReport: + return sizeof(RmRcDiag_RECORD); + break; + case RmNocatReport: + return sizeof(RM_NOCAT_JOURNAL_ENTRY); + break; + default: + return 0; + } +} +static NvU32 _rcdbGetOcaRecordSizeWithHeader(Journal *pRcDB, RMCD_RECORD_TYPE type) +{ + NvU32 recSz; + + recSz = rcdbGetOcaRecordSize(pRcDB, type); + if (0 < recSz) + { + recSz += sizeof(RmRCCommonJournal_RECORD); + } + + // + // On architecture like RISC-V, loads/stores need to be aligned to the + // request size (1, 2, 4, 8-byte). Here, OCA record and header are stored + // in a ring buffer, hence total recSz needs to be 8-byte aligned for both + // producer (GSP RM) and consumer (CPU RM) of this data. + // + return NV_ALIGN_UP(recSz, 8); +} + + +NV_STATUS +rcdbAddRmGpuDump +( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + NvDebugDump *pNvd = GPU_GET_NVD(pGpu); + NVD_STATE *pNvDumpState = &pRcDB->nvDumpState; + SYS_ERROR_INFO *pSysErrorInfo = &pRcDB->ErrorInfo; + RMPRBERRORELEMENT_V2 *pPrbErrorInfo = NULL; + RMPRBERRORELEMENT_V2 *pErrorList = NULL; + RMCD_ERROR_BLOCK *pNewErrorBlock = NULL; + RMERRORHEADER *pErrorHeader = NULL; + PRB_ENCODER prbEnc; + NvU32 bufferUsed; + NvU8 *pBuf = NULL; + + // + // The deferred dump codepath will block out other dumps until the DPC can + // be executed. If this is the deferred callback attempting to do the dump, + // carry on. + // + if (pNvDumpState->bDumpInProcess && + !pRcDB->getProperty(pRcDB, PDB_PROP_RCDB_IN_DEFERRED_DUMP_CODEPATH)) + { + return NV_ERR_STATE_IN_USE; + } + + prbEnc.depth = 0; + pNvDumpState->bDumpInProcess = NV_TRUE; + pNvDumpState->nvDumpType = NVD_DUMP_TYPE_OCA; + pNvDumpState->bRMLock = rmApiLockIsOwner(); + + rcdbDumpInitGpuAccessibleFlag(pGpu, pRcDB); + + // + // General process: + // 1. Start the protobuf encoder in ALLOCATE mode, and dump the data + // 2. Allocate an error element to stick in the Journal list + // 3. Add the protobuf dump to the error element + // 4. Put the error element at the end of the error list on OBJRCDB + // + status = prbEncStartAlloc(&prbEnc, NVDEBUG_NVDUMP, NVDUMP_MAX_DUMP_SIZE, + NULL); + if (status != NV_OK) + { + // + // If we couldn't allocate the memory, it may be because we're at a + // raised IRQL. It's not a great idea to be gathering a bunch of state + // from the interrupt context anyway, so queue a work item to come back + // later and try again. + // + OBJOS *pOS = SYS_GET_OS(pSys); + NvU32 *pGpuInstance = NULL; + + // + // If that's what we've already done and we're still failing, bail out + // to avoid an infinite fail/queue-work-item loop. + // + if (pRcDB->getProperty(pRcDB, PDB_PROP_RCDB_IN_DEFERRED_DUMP_CODEPATH)) + { + NV_PRINTF(LEVEL_ERROR, + "deferred GPU dump encoder init failed (status = 0x%x)\n", + status); + goto done; + } + + NV_PRINTF(LEVEL_INFO, "deferring GPU dump for normal context\n"); + + // + // This will be freed by the OS work item layer. We pass the GPU + // instance as the data separately because if the GPU has fallen off + // the bus, the OS layer may refuse to execute work items attached to + // it. Instead, use the system work item interface and handle the GPU + // ourselves. + // + pGpuInstance = portMemAllocNonPaged(sizeof(NvU32)); + if (pGpuInstance == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + *pGpuInstance = gpuGetInstance(pGpu); + status = pOS->osQueueSystemWorkItem(_rcdbAddRmGpuDumpCallback, + pGpuInstance); + if (status != NV_OK) + { + portMemFree(pGpuInstance); + goto done; + } + + // + // Since we've queued the work item, leave the dump state marked as in + // use to prevent other interrupts and codepaths from attempting to + // initiate the dump and/or queue a new work item. + // + return NV_WARN_MORE_PROCESSING_REQUIRED; + } + + status = nvdDumpAllEngines(pGpu, pNvd, &prbEnc, pNvDumpState); + if (status != NV_OK) + { + // + // If the dump failed somewhere, unwind the encoder and then drop + // through to finish it out so we can get the pointer to the + // allocated buffer to free. + // + while (prbEnc.depth > 1) + { + prbEncNestedEnd(&prbEnc); + } + } + + bufferUsed = prbEncFinish(&prbEnc, (void **)&pBuf); + + if (status != NV_OK) + { + goto done; + } + + // Allocate and initialize the error element + pPrbErrorInfo = portMemAllocNonPaged(sizeof(RMPRBERRORELEMENT_V2)); + if (pPrbErrorInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemSet(pPrbErrorInfo, 0, sizeof(RMPRBERRORELEMENT_V2)); + pPrbErrorInfo->RmPrbErrorData.common.Header.cRecordGroup = RmGroup; + pPrbErrorInfo->RmPrbErrorData.common.Header.cRecordType = RmPrbFullDump_V2; + pPrbErrorInfo->RmPrbErrorData.common.Header.wRecordSize = sizeof(RMPRBERRORELEMENT_V2); + rcdbSetCommonJournalRecord(pGpu, &(pPrbErrorInfo->RmPrbErrorData.common)); + pErrorHeader = &pPrbErrorInfo->ErrorHeader; + pErrorHeader->pErrorBlock = NULL; + + // + // Allocate and initialize the error "block" associated with this protobuf + // dump + // + pNewErrorBlock = portMemAllocNonPaged(sizeof(RMCD_ERROR_BLOCK)); + if (pNewErrorBlock == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemSet(pNewErrorBlock, 0, sizeof(RMCD_ERROR_BLOCK)); + pNewErrorBlock->pBlock = pBuf; + pNewErrorBlock->blockSize = bufferUsed; + pNewErrorBlock->pNext = NULL; + pErrorHeader->pErrorBlock = pNewErrorBlock; + + // Add the error element to the Journal list + if (pSysErrorInfo->pErrorList != NULL) + { + pErrorList = (RMPRBERRORELEMENT_V2*)pSysErrorInfo->pErrorList; + while (pErrorList->ErrorHeader.pNextError != NULL) + { + pErrorList = (RMPRBERRORELEMENT_V2*)pErrorList->ErrorHeader.pNextError; + } + + pErrorList->ErrorHeader.pNextError = (RMFIFOERRORELEMENT_V3*)pPrbErrorInfo; + } + else + { + pSysErrorInfo->pErrorList = pPrbErrorInfo; + } + + pSysErrorInfo->ErrorCount++; + +done: + if (status != NV_OK) + { + if (pBuf != NULL) + { + portMemFree(pPrbErrorInfo); + portMemFree(pBuf); + } + } + + pNvDumpState->bDumpInProcess = NV_FALSE; + return status; +} + +#if (defined(_WIN32) || defined(_WIN64) || defined(NV_UNIX)) && !defined(NV_MODS) +#if !defined(DEBUG) && !defined(QA_BUILD) +/* + */ +NvBool +rcdProbeGpuPresent( + OBJGPU *pGpu, + NvU64 ip +) +{ + NvU32 testValue; + NvBool bFoundLostGpu = NV_FALSE; + + // protect against recursion when probing the GPU. + if (portAtomicIncrementS32(&probeGpuRecursion) == 1) + { + if (NULL != pGpu) + { + // is the GPU we are checking allready reported lost? + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST)) + { + testValue = GPU_CHECK_REG_RD32(pGpu, NV_PMC_BOOT_0, (~(pGpu->chipId0))); + if (testValue == GPU_REG_VALUE_INVALID) + { + // there shouldn't be a need to make a journal entry, + // as that should have been done by GPU_CHECK_REG_RD32 + + // Add GPU lost detection to to NvLog. + // But skip when nvLog asserts to avoid stack overflow. +#if defined(DEBUG) || defined(QA_BUILD) || ((defined(_WIN32) || defined(_WIN64) || defined(NV_UNIX)) && !defined(NV_MODS)) + if (portAtomicIncrementS32(&nvLogRecursion) == 1) +#endif + { + NV_PRINTF(LEVEL_ERROR, + "found GPU %d (0x%p) inaccessible After assert\n", + pGpu->gpuInstance, pGpu); + } +#if defined(DEBUG) || defined(QA_BUILD) || ((defined(_WIN32) || defined(_WIN64) || defined(NV_UNIX)) && !defined(NV_MODS)) + portAtomicDecrementS32(&nvLogRecursion); +#endif + bFoundLostGpu = NV_TRUE; + } + } + } + } + portAtomicDecrementS32(&probeGpuRecursion); + return bFoundLostGpu; +} + +NvBool +rcdProbeAllGpusPresent( + NvU64 ip +) +{ + NvBool bFoundLostGpu = NV_FALSE; + OBJGPU *pGpu; + NvU32 gpuMask; + NvU32 gpuIndex = 0; + + gpumgrGetGpuAttachInfo(NULL, &gpuMask); + pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + while (pGpu) + { + bFoundLostGpu = bFoundLostGpu || rcdProbeGpuPresent(pGpu, ip); + pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + } + return bFoundLostGpu; +} +#endif // !defined(DEBUG) && !defined(QA_BUILD) +#endif // (defined(_WIN32) || defined(_WIN64) || defined(NV_UNIX)) && !defined(NV_MODS) + +void +rcdbAddCrashedFalcon +( + Falcon *pFlcn +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + + pRcDB->pCrashedFlcn = pFlcn; +} + + +/* +** _rcdbNocatCollectContext records the context of the GPU at the time the error is reported. +** +** parameters: +** pGpu pointer to GPU to be reported on. +** pContext pointer to context structure to be filled in. +** +** returns: +** NV_ERR_INVALID_ARGUMENT -- pContext is NULL +*/ +NV_STATUS +_rcdbNocatCollectContext(OBJGPU *pGpu, Journal* pRcdb, NV2080_NOCAT_JOURNAL_GPU_STATE* pContext) +{ + NV2080_NOCAT_JOURNAL_GPU_STATE* pContextCache = NULL; + const char *pTag; + + if (pRcdb == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // determine which tag to use. + if (pRcdb->nocatJournalDescriptor.tag[0] != '\0') + { + pTag = (char *)pRcdb->nocatJournalDescriptor.tag; + } + else + { + pTag = NOCAT_DEFAULT_TAG_VALUE_STR; + } + if (pGpu == NULL) + { + // w/o a GPU the only thing we can do is set the tag. + if (pContext != NULL) + { + portMemSet(pContext, 0, sizeof(*pContext)); + + portStringCopy((char *)pContext->tag, + NV2080_NOCAT_JOURNAL_MAX_STR_LEN, + pTag, + portStringLength(pTag) + 1); + } + return NV_OK; + } +#if NOCAT_COLLECT_PERF + pGpuCache = &(pGpu->nocatGpuCache); +#endif + pContextCache = &(pRcdb->nocatJournalDescriptor.nocatGpuState); + + // insert tag if we have one. + portStringCopy((char *)pContextCache->tag, + NV2080_NOCAT_JOURNAL_MAX_STR_LEN, + pTag, + portStringLength(pTag) + 1); + + if (!pContextCache->bValid) + { + pContextCache->deviceId = (NvU16)(DRF_VAL(_PCI, _DEVID, _DEVICE, pGpu->idInfo.PCIDeviceID)); + pContextCache->vendorId = (NvU16)(DRF_VAL(_PCI, _SUBID, _VENDOR, pGpu->idInfo.PCIDeviceID)); + pContextCache->subsystemVendor = (NvU16)(DRF_VAL(_PCI, _SUBID, _VENDOR, pGpu->idInfo.PCISubDeviceID)); + pContextCache->subsystemId = (NvU16)(DRF_VAL(_PCI, _SUBID, _DEVICE, pGpu->idInfo.PCISubDeviceID)); + pContextCache->revision = pGpu->idInfo.PCIRevisionID; + pContextCache->type = pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_MOBILE); + + portStringCopy((char *)pContextCache->vbiosProject, NV2080_NOCAT_JOURNAL_MAX_STR_LEN, + NOCAT_UNKNOWN_STR, portStringLength(NOCAT_UNKNOWN_STR) + 1); + + if (!osIsRaisedIRQL()) + { + + pContextCache->bValid = NV_TRUE; + } + } + if (pContext != NULL) + { + portMemSet(pContext, 0, sizeof(*pContext)); + + *pContext = *pContextCache; + + pContext->bFullPower = gpuIsGpuFullPower(pGpu); + pContext->bInGc6Reset = pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET); + pContext->bInFullchipReset = pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET); + pContext->bInSecBusReset = pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET); + } + return NV_OK; +} + +/* +** _rcdbSetTdrReason translates the reason code to a string & puts that string +** in the provided buffer. +** +** parameters: +** tdrReason the reason code for the TDR +** pTdrReasonStr pointer to the place to copy the reason string to +** maxLen the size of the buffer pointed to in pTdrReasonStr. +** +*/ +void _rcdbSetTdrReason +( + Journal *pRcdb, + NvU32 tdrReason, + char *pTdrReasonStr, + NvU32 maxLen +) +{ + const char *pTmpStr; + + // validate inputs. + if (pRcdb == NULL) + { + return; + } + + // is there a string buffer & is it large enough to hold more than a NULL string + if ((pTdrReasonStr == NULL) || (maxLen < 2)) + { + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BAD_PARAM_IDX]++; + return; + } + switch (tdrReason) + { + case NV2080_CTRL_NOCAT_TDR_TYPE_NONE: + pTmpStr = NOCAT_NA_STR; + break; + case NV2080_CTRL_NOCAT_TDR_TYPE_LEGACY: + pTmpStr = NOCAT_LEGACY_STR; + break; + case NV2080_CTRL_NOCAT_TDR_TYPE_FULLCHIP: + pTmpStr = NOCAT_FULLCHIP_TDR_STR; + break; + case NV2080_CTRL_NOCAT_TDR_TYPE_BUSRESET: + pTmpStr = NOCAT_BUS_RESET_TDR_STR; + break; + case NV2080_CTRL_NOCAT_TDR_TYPE_GC6_RESET: + pTmpStr = NOCAT_GC6_RESET_TDR_STR; + break; + case NV2080_CTRL_NOCAT_TDR_TYPE_SURPRISE_REMOVAL: + pTmpStr = NOCAT_SURPRISE_REMOVAL_TDR_STR; + break; + case NV2080_CTRL_NOCAT_TDR_TYPE_UCODE_RESET: + pTmpStr = NOCAT_UCODE_RESET_TDR_STR; + break; + default: + pTmpStr = NOCAT_UNKNOWN_STR; + break; + } + portStringCopy(pTdrReasonStr, maxLen, + pTmpStr, portStringLength(pTmpStr) + 1); +} + +/* +** _rcdbAllocNocatJournalRecord allocates a buffer entry from the Journal ring buffer +** for the specified type +** +** parameters: +** pGpu a pointer to the GPU object associated with the entry. +** pRcdb a pointer toe the Journal that contains the ring buffers +** type the record type to locate a buffer for. +** +** returns a pointer to a record in the ring buffer, or NULL if a record could not be allocated. +** +** notes: +** it is assumed the caller has successfully acquired the concurrentRingBufferAccess lock. +** the lock should be held until access the buffer is completed. +** failure to do so can result in concurrency issues. +** +** if successful, the buffer that is returned is cleared & an id assigned. +*/ +RM_NOCAT_JOURNAL_ENTRY* _rcdbAllocNocatJournalRecord +( + OBJGPU *pGpu, + OBJRCDB *pRcdb, + RmRCCommonJournal_RECORD **ppCommon +) +{ + nocatQueueDescriptor *pDesc = NULL; + RmRCCommonJournal_RECORD* pCommon; + RM_NOCAT_JOURNAL_ENTRY * pNocatEntry = NULL; + + // make sure someone has the lock. + if (concurrentRingBufferAccess == 0) + { + return NULL; + } + + pDesc = &pRcdb->nocatJournalDescriptor; + + // Get the next record from the appropriate nocat ring buffer. + pCommon = _rcdbAllocRecFromRingBuffer( + pGpu, + pRcdb, + RmNocatReport); + + if (pCommon != NULL) + { + // advance the pointer past the common header. + pNocatEntry = (RM_NOCAT_JOURNAL_ENTRY*)(((NvU8*)pCommon) + NV_SIZEOF32(RmRCCommonJournal_RECORD)); + + // clear the record & assign an id. + portMemSet(pNocatEntry, 0, NV_SIZEOF32(*pNocatEntry)); + pNocatEntry->id = pDesc->nextRecordId++; + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_ALLOCATED_IDX]++; + } + else + { + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_ALLOC_FAILED_IDX]++; + } + if (ppCommon != NULL) + { + *ppCommon = pCommon; + } + return pNocatEntry; +} + +/* +** _rcdbGetNocatJournalRecord returns a pointer to the requested record, +** or optionally the oldest record if the requested one is not available. +** +** parameters: +** pRcdb a pointer toe the Journal that contains the ring buffers +** id id of the record we are looking for +** bExactMatch indicates if we want an exact match, or the closest record. +** ppCommon a pointer to a pointer that will hold the pointer to +** the common part of the record. +** this can be NULL +** ppReturnedNocatEntry +** a pointer to a pointer that will hold the pointer to +** the nocat part of the record +** this can be NULL +** +** notes: +** it is assumed the caller has successfully acquired the concurrentRingBufferAccess lock. +** the lock should be held until access the buffer is completed. +** failure to do so can result in concurrency issues. +*/ +NV_STATUS +_rcdbGetNocatJournalRecord +( + OBJRCDB *pRcdb, + NvU32 reqId, + NvBool bExactMatch, + RmRCCommonJournal_RECORD + **ppReturnedCommon, + RM_NOCAT_JOURNAL_ENTRY + **ppReturnedNocatEntry +) +{ + nocatQueueDescriptor *pDesc; + RmRCCommonJournal_RECORD *pCommon = NULL; + RM_NOCAT_JOURNAL_ENTRY *pNocatEntry = NULL; + RING_BUFFER_LOG *pRingBuffer = NULL; + NvS32 offset; + NvS32 idx; + + // make sure someone has the lock. + if (concurrentRingBufferAccess == 0) + { + return NV_ERR_BUSY_RETRY; + } + + // is there anything to do + if ((ppReturnedCommon == NULL) && (ppReturnedNocatEntry == NULL)) + { + return NV_OK; + } + + // validate inputs. + if (pRcdb == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + pDesc = &pRcdb->nocatJournalDescriptor; + + // assume we will fail + if (ppReturnedCommon != NULL) + { + *ppReturnedCommon = NULL; + } + if (ppReturnedNocatEntry != NULL) + { + *ppReturnedNocatEntry = NULL; + } + + // if there is nothing in the buffer, + // we can't return a record. + if ((pDesc->nextRecordId - pDesc->nextReportedId) == 0) + { + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_MISSED_IDX]++; + return NV_ERR_OBJECT_NOT_FOUND; + } + + // Find the ring buffer for the diag reports + rcdbFindRingBufferForType(pRcdb, RmNocatReport, &pRingBuffer); + if (pRingBuffer == NULL) + { + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_MISSED_IDX]++; + return NV_ERR_OBJECT_NOT_FOUND; + } + // determine how far back from the head our record should be. + offset = pDesc->nextRecordId - reqId; + + // start of from the next record we will replace. + // this will be the oldest buffer in the record, + // or the next empty record, either way, we will wrap to the right one + idx = pRingBuffer->headIndex; + + // is the requested record in the buffer? + if ((0 <= offset) && ((NvU16)offset <= pRingBuffer->numEntries)) + { + // back out the offset from the newest/empty record. + idx += pRingBuffer->numEntries - offset; + } + else if (bExactMatch) + { + // the record is not in the buffer, & we weren't asked for the closest match. + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_MISSED_IDX]++; + return NV_ERR_OBJECT_NOT_FOUND; + } + // wrap the idx to the current size of the buffer. + idx %= pRingBuffer->numEntries; + + // get a pointer to the common record & the record from the buffer. + pCommon = (RmRCCommonJournal_RECORD*)(((NvU8*)pRingBuffer->pBuffer) + (_rcdbGetOcaRecordSizeWithHeader(pRcdb, RmNocatReport) * idx)); + + // get a pointer to the data that follows the common header, that is the record data. + pNocatEntry = (RM_NOCAT_JOURNAL_ENTRY*)(((NvU8*)pCommon) + NV_SIZEOF32(RmRCCommonJournal_RECORD)); + + // pass the record along + if (ppReturnedCommon != NULL) + { + *ppReturnedCommon = pCommon; + } + if (ppReturnedNocatEntry != NULL) + { + *ppReturnedNocatEntry = pNocatEntry; + } + return NV_OK; +} + +/* +** _rcdbGetNewestNocatJournalRecordForType returns a pointer to the newest record for the +** specified type if there is one. +** +** parameters: +** pRcdb a pointer toe the Journal that contains the ring buffers +** type type of record we want. +** ppCommon a pointer to a pointer that will hold the pointer to +** the common part of the record. +** this can be NULL +** ppCommon a pointer to a pointer that will hold the pointer to +** the nocat part of the record +** this can be NULL +** +** notes: +** it is assumed the caller has successfully acquired the concurrentRingBufferAccess lock. +** the lock should be held until access the buffer is completed. +** failure to do so can result in concurrency issues. +*/ +NV_STATUS +_rcdbGetNewestNocatJournalRecordForType +( + OBJRCDB *pRcdb, + NvU32 type, + RmRCCommonJournal_RECORD + **ppReturnedCommon, + RM_NOCAT_JOURNAL_ENTRY + **ppReturnedNocatEntry +) +{ + if (type >= NV2080_NOCAT_JOURNAL_REC_TYPE_COUNT) + { + // we failed + if (ppReturnedCommon != NULL) + { + *ppReturnedCommon = NULL; + } + if (ppReturnedNocatEntry != NULL) + { + *ppReturnedNocatEntry = NULL; + } + return NV_ERR_OBJECT_NOT_FOUND; + } + return _rcdbGetNocatJournalRecord(pRcdb, pRcdb->nocatJournalDescriptor.lastRecordId[type], NV_TRUE, + ppReturnedCommon, ppReturnedNocatEntry); +} + +/* +** rcdbReportNextNocatJournalEntry fills in the provided Nocat Journal record with the next record +** to be reported, then updates the last reported id. +** +** parameters: +** pReturnedNocatEntry a pointer to the buffer where the journal record will be transferred to +** +** returns: +** NV_OK -- the record was successfully updated with the next record to report. +** NV_ERR_INVALID_ARGUMENT -- the provided pointer is NULL +** NV_ERR_OBJECT_NOT_FOUND -- we could not locate a record to report. +** +** notes: +** we are transferring the record to the target location here instead of passing a pointer +** to insure the data is transferred while we hold the concurrentRingBufferAccess lock. +** failure to do so can result in concurrency issues. +** +** priority is determined by the record journal queue values. the lower value has +** higher priority. +** +** now that we have moved from a single entry, to a queue, we need to +** consume the entry once we report it +** +*/ +NV_STATUS +rcdbReportNextNocatJournalEntry +( + NV2080_NOCAT_JOURNAL_RECORD + *pReturnedNocatEntry +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcdb = SYS_GET_RCDB(pSys); + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + nocatQueueDescriptor *pDesc; + RmRCCommonJournal_RECORD *pCommon = NULL; + RM_NOCAT_JOURNAL_ENTRY *pNocatEntry = NULL; + + // validate inputs. + if (pRcdb == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_REQUESTED_IDX]++; + + if (pReturnedNocatEntry == NULL) + { + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BAD_PARAM_IDX]++; + return NV_ERR_INVALID_ARGUMENT; + } + portMemSet(pReturnedNocatEntry, 0, NV_SIZEOF32(*pReturnedNocatEntry)); + + if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1) + { + pDesc = &pRcdb->nocatJournalDescriptor; + _rcdbGetNocatJournalRecord(pRcdb, + pDesc->nextReportedId, NV_FALSE, + &pCommon, &pNocatEntry); + if ((pCommon != NULL) && (pNocatEntry != NULL)) + { + // we have a record, push it into the return buffer + pReturnedNocatEntry->GPUTag = pCommon->GPUTag; + + // copy over the data into the supplied buffer. + pReturnedNocatEntry->loadAddress = pDesc->loadAddress; + pReturnedNocatEntry->timeStamp = pCommon->timeStamp; + pReturnedNocatEntry->stateMask = pCommon->stateMask; + pReturnedNocatEntry->nocatGpuState = pNocatEntry->nocatGpuState; + pReturnedNocatEntry->nocatJournalEntry = pNocatEntry->nocatJournalEntry; + + // check if we lost any records. + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES1_IDX] += + pNocatEntry->id - pDesc->nextReportedId; + + // update the NocatJournalNextReportedId + pDesc->nextReportedId = pNocatEntry->id + 1; + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_REPORTED_IDX]++; + + status = NV_OK; + } + } + else + { + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BUSY_IDX]++; + status = NV_ERR_BUSY_RETRY; + } + portAtomicDecrementS32(&concurrentRingBufferAccess); + if (pRcdb->nocatJournalDescriptor.journalLocked) + { + pRcdb->nocatJournalDescriptor.journalLocked = rcdbGetNocatOutstandingCount(pRcdb) > 0; + } + return status; +} + +/* +** rcdbGetNocatOutstandingCount returns the number of NOCAT events that have +** been recorded since the last reported record. +** +** parameters: +** pRcdb -- a pointer to the Journal object. +** +** returns: +** number of NOCAT events that have been recorded since the last reported record. +** or NV_U32_MAX if a NULL journal object pointer is provided. +** +** notes: +** the returned count includes records that have been dropped due to wrapping. +** +*/ +NvU32 +rcdbGetNocatOutstandingCount(Journal *pRcdb) +{ + NvU32 count = NV_U32_MAX; + if (pRcdb != NULL) + { + if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1) + { + count = pRcdb->nocatJournalDescriptor.nextRecordId + - pRcdb->nocatJournalDescriptor.nextReportedId; + } + portAtomicDecrementS32(&concurrentRingBufferAccess); + } + return count; +} + +/* +** _rcdbSendNocatJournalNotification sends an ETW Notification that a NOCAT Journal record has been posted. +** +** parameters: +** pGpu -- a pointer to the GPU object associated with teh new entry +** (may be NULL) +** pRcdb -- a pointer to the Journal object NOCAT is using. +** posted -- the number of records posted since the last record that was retrieved. +** pCommon -- a pointer to the common record header associated with the record. +** type -- the record type +** +** returns: +** NV_OK -- the call to post the record was made. +** note that the call to post the record does not return a status, +** so we do not know if the call was successful. +** NV_ERR_INVALID_ARGUMENT -- one of the required pointers is NULL +** +*/ +NV_STATUS +_rcdbSendNocatJournalNotification +( + OBJGPU *pGpu, + Journal *pRcdb, + NvU32 posted, + RmRCCommonJournal_RECORD *pCommon, + NvU32 type +) +{ + if ((pCommon == NULL) || (pRcdb == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + RMTRACE_NOCAT(_REPORT_PENDING, (pGpu ? pGpu->gpuId : RMTRACE_UNKNOWN_GPUID), + RmNocatReport, + posted, + type, + rcdbGetNocatOutstandingCount(pRcdb), + pCommon->timeStamp); + + // count the number of notifications. + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_RES2_IDX]++; + return NV_OK; +} + +/* +** rcdbInitNocatGpuCache_IMPL initializes a per GPU cache held in the GPU object to be used by NOCAT +** +** parameters: +** pGpu -- a pointer to the GPU Object the containing the cache +** +** notes: +** this function: +** * caches the driver load address +** * allocates a small block of memory in the frame buffer for testing +** * initializes the GPU context cache +** +*/ +void rcdbInitNocatGpuCache_IMPL(OBJGPU *pGpu) +{ + OS_DRIVER_BLOCK driverBlock; + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcdb = SYS_GET_RCDB(pSys); +#if NOCAT_PROBE_FB_MEMORY + NvU8 *pCpuPtr; +#endif + if (pGpu == NULL) + { + return; + } + portMemSet(&driverBlock, 0x00, sizeof(driverBlock)); + if (osGetDriverBlock(pGpu->pOsGpuInfo, &driverBlock) == NV_OK) + { + pRcdb->nocatJournalDescriptor.loadAddress = (NvU64)driverBlock.driverStart; + } + +#if NOCAT_PROBE_FB_MEMORY + // Allocate some memory for virtual BAR2 testing + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM) && !IsAMODEL(pGpu)) + { + memdescCreateExisting(&pGpu->nocatGpuCache.fbTestMemDesc, + pGpu, NOCAT_FBSIZETESTED, ADDR_FBMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE); + if (memdescAlloc(&pGpu->nocatGpuCache.fbTestMemDesc) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate vidmem for NOCAT bar2 testing\n"); + return; + } + pCpuPtr = kbusMapRmAperture_HAL(pGpu, &pGpu->nocatGpuCache.fbTestMemDesc); + if (pCpuPtr == NULL) + { + memdescFree(&pGpu->nocatGpuCache.fbTestMemDesc); + memdescDestroy(&pGpu->nocatGpuCache.fbTestMemDesc); + pGpu->nocatGpuCache.pCpuPtr = NULL; + return; + } + pGpu->nocatGpuCache.pCpuPtr = pCpuPtr; + } +#endif + // initialize the context cache + if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1) + { + _rcdbNocatCollectContext(pGpu, pRcdb, NULL); + } + portAtomicDecrementS32(&concurrentRingBufferAccess); + + return; +} + +/* +** rcdbCleanupNocatGpuCache_IMPL returns per GPU resources used by NOCAT. +** +** parameters: +** pGpu -- a pointer to the GPU Object the containing the cache +** +** notes: +** This will free up the FB test window if allocated, and clear out the cache +** +*/ +void rcdbCleanupNocatGpuCache_IMPL(OBJGPU *pGpu) +{ +#if NOCAT_PROBE_FB_MEMORY + if (pGpu == NULL) + { + return; + } + if (pGpu->nocatGpuCache.pCpuPtr != NULL) + { + kbusUnmapRmApertureWithFlags_HAL(pGpu, &pGpu->nocatGpuCache.fbTestMemDesc, + &pGpu->nocatGpuCache.pCpuPtr, TRANSFER_FLAGS_NONE); + memdescFree(&pGpu->nocatGpuCache.fbTestMemDesc); + memdescDestroy(&pGpu->nocatGpuCache.fbTestMemDesc); + } + portMemSet(&pGpu->nocatGpuCache, 0, sizeof(pGpu->nocatGpuCache)); +#endif + + return; +} + + +/* +** rcdbNocatPostError records a reported NOCAT error +** +** parameters: +** pGpu Pointer to GPU associated with the error +** may be NULL if there is no GPU associated with the error +** if NULL the primary GPU is used +** pNewEntry A pointer to a structure that contains all the available data for the report +*/ +NvU32 +rcdbNocatInsertNocatError( + OBJGPU *pGpu, + NOCAT_JOURNAL_PARAMS *pNewEntry +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcdb = SYS_GET_RCDB(pSys); + NvBool bInsertRecord; +#if(NOCAT_PROBE_FB_MEMORY) + NvBool bCheckFBState = NV_FALSE; +#endif + RmRCCommonJournal_RECORD *pCommon = NULL; + RM_NOCAT_JOURNAL_ENTRY *pNocatEntry = NULL; + NvU32 id = INVALID_RCDB_NOCAT_ID; + const char *pSource = NULL; + NvU32 diagBufferLen = 0; + const char *pFaultingEngine = NULL; + + // validate inputs. + if (pRcdb == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_REQ_IDX]++; + if (pNewEntry == NULL) + { + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BAD_PARAM_IDX]++; + return 0; + } + + // perform any record type specific setup + switch (pNewEntry->recType) + { + case NV2080_NOCAT_JOURNAL_REC_TYPE_BUGCHECK: +#if(NOCAT_PROBE_FB_MEMORY) + bCheckFBState = NV_TRUE; +#endif + // fall thru + + case NV2080_NOCAT_JOURNAL_REC_TYPE_TDR: + // lock the journal so we don't wrap over the record we are inserting. + bInsertRecord = NV_TRUE; + pRcdb->nocatJournalDescriptor.journalLocked = NV_TRUE; + break; + + case NV2080_NOCAT_JOURNAL_REC_TYPE_RC: +#if(NOCAT_PROBE_FB_MEMORY) + bCheckFBState = NV_TRUE; +#endif + // check if we should insert the record + bInsertRecord = !pRcdb->nocatJournalDescriptor.journalLocked; + pSource = "RC Error"; + break; + + case NV2080_NOCAT_JOURNAL_REC_TYPE_ASSERT: + // check if we should insert the record + bInsertRecord = !pRcdb->nocatJournalDescriptor.journalLocked; + + pSource = "ASSERT"; + break; + + case NV2080_NOCAT_JOURNAL_REC_TYPE_ENGINE: + // check if we should insert the record + bInsertRecord = !pRcdb->nocatJournalDescriptor.journalLocked; + break; + + case NV2080_NOCAT_JOURNAL_REC_TYPE_UNKNOWN: + default: + return NV_FALSE; + break; + } + if (bInsertRecord) + { + if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1) + { + // start recording this new record by allocating a record from the buffer. + pNocatEntry = _rcdbAllocNocatJournalRecord(pGpu, pRcdb, &pCommon); + if (pNocatEntry != NULL) + { + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECTED_IDX]++; + + // save the record Id for the type. + pRcdb->nocatJournalDescriptor.lastRecordId[pNewEntry->recType] = + pRcdb->nocatJournalDescriptor.lastRecordId[NV2080_NOCAT_JOURNAL_REC_TYPE_ANY] = + pRcdb->nocatJournalDescriptor.nextRecordId - 1; + + // set the type. + pNocatEntry->nocatJournalEntry.recType = pNewEntry->recType; + + // set bugcheck + pNocatEntry->nocatJournalEntry.bugcheck = pNewEntry->bugcheck; + + // get context + _rcdbNocatCollectContext(pGpu, pRcdb, &(pNocatEntry->nocatGpuState)); + +#if(NOCAT_PROBE_FB_MEMORY) + if ((bCheckFBState) + && (pGpu != NULL) + && (pGpu->nocatGpuCache.pCpuPtr != NULL)) + { + switch (kbusVerifyBar2_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), + &pGpu->nocatGpuCache.fbTestMemDesc, pGpu->nocatGpuCache.pCpuPtr, 0, NOCAT_FBSIZETESTED)) + { + case NV_OK: // everything passed + break; + + case NV_ERR_MEMORY_ERROR: // BAR 0 failed & BAR 2 was not checked, or BAR 2 failed + // for now we don't know which BAR failed, so mark both. + // but only one BAR failed. + // (if BAR 0 Failed, BAR 2 was not checked) + pCommon->stateMask |= + NV_RM_JOURNAL_STATE_MASK_VIDMEM_FAILED_BAR0 + | NV_RM_JOURNAL_STATE_MASK_VIDMEM_FAILED_BAR2; + break; + + default: // some other processing error cause us to not test the BAR + break; + } + } +#endif + // is there a valid string for source? + // (non NULL ptr & more than just a termination) + if ((pNewEntry->pSource != NULL) && (pNewEntry->pSource[0] != '\0')) + { + // yes, use that. + pSource = pNewEntry->pSource; + } + // the caller did not supply a source, + // did we set a default source based on record type? + else if (pSource == NULL) + { + // no, supply the unknown string for source. + pSource = NOCAT_UNKNOWN_STR; + } + portStringCopy((char*)pNocatEntry->nocatJournalEntry.source, + NV2080_NOCAT_JOURNAL_MAX_STR_LEN, + pSource, + portStringLength(pSource) + 1); + + pNocatEntry->nocatJournalEntry.subsystem = pNewEntry->subsystem; + pNocatEntry->nocatJournalEntry.errorCode = pNewEntry->errorCode; + + if ((pNewEntry->pDiagBuffer != NULL) && (pNewEntry->diagBufferLen != 0)) + { + // checking length here as we don't want portMemCopy to assert + if (pNewEntry->diagBufferLen < NV_ARRAY_ELEMENTS(pNocatEntry->nocatJournalEntry.diagBuffer)) + { + diagBufferLen = pNewEntry->diagBufferLen; + } + else + { + // make best effort + diagBufferLen = NV_ARRAY_ELEMENTS(pNocatEntry->nocatJournalEntry.diagBuffer); + } + portMemCopy(pNocatEntry->nocatJournalEntry.diagBuffer, + NV_SIZEOF32(pNocatEntry->nocatJournalEntry.diagBuffer), + pNewEntry->pDiagBuffer, diagBufferLen); + } + pNocatEntry->nocatJournalEntry.diagBufferLen = diagBufferLen; + + pFaultingEngine = pNewEntry->pFaultingEngine != NULL ? + pNewEntry->pFaultingEngine : NOCAT_UNKNOWN_STR; + + portStringCopy((char*)pNocatEntry->nocatJournalEntry.faultingEngine, + NV2080_NOCAT_JOURNAL_MAX_STR_LEN, + pFaultingEngine, portStringLength(pFaultingEngine) + 1); + + _rcdbSetTdrReason(pRcdb, pNewEntry->tdrReason, + (char*)pNocatEntry->nocatJournalEntry.tdrReason, + NV_SIZEOF32(pNocatEntry->nocatJournalEntry.tdrReason)); + } + else + { + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_FAILED_IDX]++; + } + } + else + { + // we are busy, so we can't insert the record, count the record as dropped & count the busy. + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BUSY_IDX]++; + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_REQ_DROPPED_IDX]++; + bInsertRecord = NV_FALSE; + } + portAtomicDecrementS32(&concurrentRingBufferAccess); + } + else + { + // we are dropping the record, count that. + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_COLLECT_REQ_DROPPED_IDX]++; + } + // no matter what happened, trigger the event to indicate a record was processed. + _rcdbSendNocatJournalNotification(pGpu, pRcdb, bInsertRecord, pCommon, pNewEntry->recType); + + return id; +} + +/* +** rcdbNocatInsertBugcheck is the interface to record a bugcheck NOCAT report +** +** parameters: +** deviceInstance The instance of the GPU associated with the bugcheck. +** bugcheck The bugcheck number +*/ +NvU32 +rcdbNocatInsertBugcheck +( + NvU32 deviceInstance, + NvU32 bugCheckCode) +{ + NOCAT_JOURNAL_PARAMS newEntry; + + portMemSet(&newEntry, 0, sizeof(newEntry)); + newEntry.recType = NV2080_NOCAT_JOURNAL_REC_TYPE_BUGCHECK; + newEntry.bugcheck = bugCheckCode; + newEntry.pSource = "OS"; + newEntry.errorCode = bugCheckCode; + return rcdbNocatInsertNocatError(gpumgrGetGpu(deviceInstance), &newEntry); +} + +/* +** rcdbNocatInitEngineErrorEvent initializes a parameter structure for an engine error event +** +** parameters: +** pNewEntry Pointer to event parameter structure to be initialized +*/ +NV_STATUS +rcdbNocatInitEngineErrorEvent +( + NOCAT_JOURNAL_PARAMS *pNewEntry +) +{ + if (pNewEntry == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + portMemSet(pNewEntry, 0, sizeof(*pNewEntry)); + pNewEntry->recType = NV2080_NOCAT_JOURNAL_REC_TYPE_ENGINE; + return NV_OK; +} + +/* +** rcdbNocatInsertEngineError records a reported NOCAT error from an engine, +** +** parameters: +** pGpu Pointer to GPU associated with the error +** may be NULL if there is no GPU associated with the error +** if NULL the primary GPU is used +** pSource A string indicating the reporting source of the error. +** if NULL, a default values will be used +** subsystem The optional subsystem ID used by the source to identify the error +** errorCode The error code +** pDiagBuffer A pointer to the diagnostic buffer associated with the error +** may be NULL +** diagBufferLen The size of the diagnostic buffer +** if the size exceeds the supported diagBuffer size, the buffer contents will be truncated to fit. +*/ +NvU32 +rcdbNocatInsertEngineError( + OBJGPU *pGpu, + const char *pSource, + NvU32 subsystem, + NvU64 errorCode, + NvU8 *pDiagBuffer, + NvU32 diagBufferLen +) +{ + NOCAT_JOURNAL_PARAMS newEntry; + + rcdbNocatInitEngineErrorEvent(&newEntry); + newEntry.pSource = pSource; + newEntry.subsystem = subsystem; + newEntry.errorCode = errorCode; + newEntry.pDiagBuffer = pDiagBuffer; + newEntry.diagBufferLen = diagBufferLen; + return rcdbNocatInsertNocatError(pGpu, &newEntry); +} + +/* +** rcdbNocatInitEngineErrorEvent initializes a parameter structure for an engine error event +** +** parameters: +** pNewEntry Pointer to event parameter structure to be initialized +*/ +NV_STATUS +rcdbNocatInitTDRErrorEvent +( + NOCAT_JOURNAL_PARAMS *pNewEntry +) +{ + if (pNewEntry == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + portMemSet(pNewEntry, 0, sizeof(*pNewEntry)); + pNewEntry->recType = NV2080_NOCAT_JOURNAL_REC_TYPE_TDR; + return NV_OK; +} + +/* +** rcdbNocatInsertTDRError records an TDR error, +** +** parameters: +** pGpu Pointer to GPU associated with the error +** may be NULL if there is no GPU associated with the error +** if NULL the primary GPU is used +** pSource A string indicating the reporting source of the error. +** if NULL, a default values will be used +** subsystem The optional subsystem ID used by the source to identify the error +** errorCode The error code +** TDRBucket The TDR bucket +** pDiagBuffer A pointer to the diagnostic buffer associated with the error +** may be NULL +** diagBufferLen The size of the diagnostic buffer +** if the size exceeds the supported diagBuffer size, +** the buffer contents will be truncated to fit. +** tdrReason A reason code for the TDR +** pFaultingApp A pointer to the faulting app name if known +*/ +NvU32 +rcdbNocatInsertTDRError +( + OBJGPU *pGpu, + const char *pSource, + NvU32 subsystem, + NvU64 errorCode, + NvU32 TdrBucket, + NvU8 *pDiagBuffer, + NvU32 diagBufferLen, + NvU32 tdrReason, + const char *pFaultingEngine +) +{ + NOCAT_JOURNAL_PARAMS newEntry; + + rcdbNocatInitTDRErrorEvent(&newEntry); + newEntry.pSource = pSource; + newEntry.subsystem = subsystem; + newEntry.errorCode = errorCode; + newEntry.pDiagBuffer = pDiagBuffer; + newEntry.diagBufferLen = diagBufferLen; + newEntry.pFaultingEngine = pFaultingEngine; + return rcdbNocatInsertNocatError(pGpu, &newEntry); +} +NV_STATUS +rcdbNocatInitRCErrorEvent +( + NOCAT_JOURNAL_PARAMS *pNewEntry +) +{ + if (pNewEntry == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + portMemSet(pNewEntry, 0, sizeof(*pNewEntry)); + pNewEntry->recType = NV2080_NOCAT_JOURNAL_REC_TYPE_RC; + pNewEntry->pSource = "RC ERROR"; + return NV_OK; +} + +/* +** _rcdbNocatReportAssert adds an assert record. +** +** parameters: +** pGpu Pointer to GPU associated with the error +** may be NULL +** pAssertRec A pointer to the assert to report +*/ +NV_STATUS +_rcdbNocatReportAssert +( + OBJGPU *pGpu, + RmRCCommonAssert_RECORD *pAssertRec +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcdb = SYS_GET_RCDB(pSys); + NOCAT_JOURNAL_PARAMS newEntry; + RM_NOCAT_ASSERT_DIAG_BUFFER diagBuffer; + RM_NOCAT_ASSERT_DIAG_BUFFER *pDiagData; + NvU32 idx; + RM_NOCAT_JOURNAL_ENTRY *pNocatEntry = NULL; + NvU32 gpuCnt= 0; + OBJGPU *pTmpGpu = gpumgrGetGpu(0); + + // validate inputs. + if ((pRcdb == NULL) || (pAssertRec == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + if (pGpu == NULL) + { + // we don't have a GPU, if there is only 1 GPU, + // we can safely use it for logging this assert + gpumgrGetGpuAttachInfo(&gpuCnt, NULL); + if (gpuCnt == 1) + { + pGpu = pTmpGpu; + } + } + + // start off assuming we will be recording a report + portMemSet(&newEntry, 0, sizeof(newEntry)); + newEntry.recType = NV2080_NOCAT_JOURNAL_REC_TYPE_ASSERT; + newEntry.pSource = "ASSERT"; + + // save the assert point as the error code. + newEntry.errorCode = + (NvU32)((pAssertRec->breakpointAddrHint - pRcdb->nocatJournalDescriptor.loadAddress) + & 0xffffffff); + + // put the line number in the upper 32 bits. + newEntry.errorCode |= ((NvU64)pAssertRec->lineNum) << 32; + + for (idx = 0; idx < NV_ARRAY_ELEMENTS32(pAssertRec->callStack); idx++) + { + diagBuffer.callStack[idx] = + (NvU32)((pAssertRec->callStack[idx] - pRcdb->nocatJournalDescriptor.loadAddress) + & 0xffffffff); + } + // initialize count + diagBuffer.count = 1; + + // setup the pointer to our diag buffer & its length + newEntry.pDiagBuffer = (NvU8 *)&diagBuffer; + + newEntry.diagBufferLen = NV_SIZEOF32(diagBuffer); + + // is the last thing we logged an assert, & is this the same assert? + if ((pRcdb->nocatJournalDescriptor.lastRecordId[NV2080_NOCAT_JOURNAL_REC_TYPE_ASSERT] + == pRcdb->nocatJournalDescriptor.lastRecordId[NV2080_NOCAT_JOURNAL_REC_TYPE_ANY]) + && (0 == portMemCmp(&pRcdb->nocatJournalDescriptor.lastAssertData, + diagBuffer.callStack, // same stack + NV_SIZEOF32(diagBuffer.callStack)))) + { + // it is the same as the last assert we logged. so don't log it again. + // but see if we can increment the counter in an unreported assert. + // check if the last record is also an assert + if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1) + { + // get the last record from the buffer + _rcdbGetNewestNocatJournalRecordForType(pRcdb, + NV2080_NOCAT_JOURNAL_REC_TYPE_ANY, + NULL, &pNocatEntry); + if (pNocatEntry != NULL) + { + // is it an assert? + if (pNocatEntry->nocatJournalEntry.recType == (NV2080_NOCAT_JOURNAL_REC_TYPE_ASSERT)) + { + // increment the count + pDiagData = (RM_NOCAT_ASSERT_DIAG_BUFFER*)&pNocatEntry->nocatJournalEntry.diagBuffer; + pDiagData->count++; + } + } + } + else + { + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_BUSY_IDX]++; + } + portAtomicDecrementS32(&concurrentRingBufferAccess); + } + else + { + // we are logging this assert, save off the stack so we can use it to + // compare against future asserts. + portMemCopy(&pRcdb->nocatJournalDescriptor.lastAssertData, + NV_SIZEOF32(pRcdb->nocatJournalDescriptor.lastAssertData), + &diagBuffer, NV_SIZEOF32(diagBuffer)); + rcdbNocatInsertNocatError(pGpu, &newEntry); + } + + return NV_OK; +} + +/* +** rcdbNocatInsertRMCDErrorEvent creates an event from an RMCD error block +** +** parameters: +** pGpu pointer to GPU object associated with the error +** recType the type of event to create +** pSource a pointer to the source string +** subsystem the subsystem associated with the event. +** errorCode error code for the event +** pFault pointer to a faulting component identifier associated with the error +*/ +NvU32 rcdbNocatInsertRMCDErrorEvent(OBJGPU *pGpu, NvU32 recType, + const char *pSource, NvU32 subsystem, NvU64 errorCode, const char *pFault, + RMCD_ERROR_BLOCK *pRcdError) +{ + NOCAT_JOURNAL_PARAMS newEntry; + + portMemSet(&newEntry, 0, sizeof(newEntry)); + newEntry.recType = recType; + newEntry.pSource = pSource; + newEntry.subsystem = subsystem; + newEntry.errorCode = errorCode; + newEntry.pFaultingEngine = pFault; + if (pRcdError != NULL) + { + newEntry.pDiagBuffer = (NvU8 * )pRcdError->pBlock; + newEntry.diagBufferLen = pRcdError->blockSize; + } + return rcdbNocatInsertNocatError(pGpu, &newEntry); +} + +/* +** rcdbSetNocatTdrReason sets the TDR reason code in the most recent TDR record if there is one, +** otherwise, it creates one with the reason code. +** +** parameters: +** pReasonData the data supplied with including the reason code. +** if a TDR record exists, the reason will be added to the existing record, +** otherwise the rest of the data will be used to create a new TDR event. +*/ +NV_STATUS rcdbSetNocatTdrReason +( + NV2080CtrlNocatJournalDataTdrReason *pReasonData +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcdb = SYS_GET_RCDB(pSys); + RM_NOCAT_JOURNAL_ENTRY* pNocatEntry = NULL; + + // validate inputs. + if ((pRcdb == NULL) || (pReasonData == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATE_REQ_IDX]++; + + if (portAtomicIncrementS32(&concurrentRingBufferAccess) == 1) + { + // see if there is a TDR record. + _rcdbGetNewestNocatJournalRecordForType(pRcdb, + NV2080_NOCAT_JOURNAL_REC_TYPE_TDR, + NULL, &pNocatEntry); + if (pNocatEntry != NULL) + { + // there is, set the reason. + _rcdbSetTdrReason(pRcdb, pReasonData->reasonCode, + (char *)pNocatEntry->nocatJournalEntry.tdrReason, + NV_SIZEOF32(pNocatEntry->nocatJournalEntry.tdrReason)); + pRcdb->nocatJournalDescriptor.nocatEventCounters[NV2080_NOCAT_JOURNAL_REPORT_ACTIVITY_UPDATED_IDX]++; + } + } + portAtomicDecrementS32(&concurrentRingBufferAccess); + + // if we did not get a TDR record, create one. + // we need to do it after the ring buffers are released. + if (pNocatEntry == NULL) + { + NOCAT_JOURNAL_PARAMS newEntry; + + portMemSet(&newEntry, 0, sizeof(newEntry)); + newEntry.recType = NV2080_NOCAT_JOURNAL_REC_TYPE_TDR; + newEntry.pSource = (char *)pReasonData->source; + newEntry.subsystem = pReasonData->subsystem; + newEntry.errorCode = pReasonData->errorCode; + newEntry.tdrReason = pReasonData->reasonCode; + return rcdbNocatInsertNocatError(NULL, &newEntry); + } + return NV_OK; +} diff --git a/src/nvidia/src/kernel/diagnostics/nv_debug_dump.c b/src/nvidia/src/kernel/diagnostics/nv_debug_dump.c new file mode 100644 index 000000000..83a830168 --- /dev/null +++ b/src/nvidia/src/kernel/diagnostics/nv_debug_dump.c @@ -0,0 +1,774 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "diagnostics/journal.h" +#include "diagnostics/nv_debug_dump.h" +#include "diagnostics/journal.h" +#include "nvdump.h" +#include "vgpu/rpc.h" + +#include "lib/protobuf/prb.h" +#include "lib/protobuf/prb_util.h" +#include "g_all_dcl_pb.h" +#include "g_nvdebug_pb.h" + +static NV_STATUS prbAppendSubMsg(PRB_ENCODER *pPrbEnc, NvU32 tag, NvU8 *buffer, NvU32 size); + +NV_STATUS nvdConstructEngine_IMPL(OBJGPU *pGpu, NvDebugDump *pNvd, ENGDESCRIPTOR engDesc) +{ + pNvd->pHeadDebugBuffer = NULL; + return NV_OK; +} + +void nvdDestruct_IMPL(NvDebugDump *pNvd) +{ + OBJGPU *pGpu = ENG_GET_GPU(pNvd); + + // + // All debug buffers should have been freed by now + // (clidb takes care of calling nvdFreeDebugBuffer for each + // buffer individually). + // + NV_ASSERT(pNvd->pHeadDebugBuffer == NULL); + + nvdEngineRelease(pGpu, pNvd); +} + +/*! + * @brief + * + * + */ +NV_STATUS +nvdEngineSignUp_IMPL +( + OBJGPU *pGpu, + NvDebugDump *pNvd, + NvdDumpEngineFunc *pDumpEngineFunc, + NvU32 engDesc, + NvU32 flags, + void *pvData +) +{ + NVD_ENGINE_CALLBACK *pEngineCallback; + NVD_ENGINE_CALLBACK *pWalk; + NVD_ENGINE_CALLBACK *pBack; + NvU32 priority = REF_VAL(NVD_ENGINE_FLAGS_PRIORITY, flags); + + pEngineCallback = portMemAllocNonPaged(sizeof(NVD_ENGINE_CALLBACK)); + if (pEngineCallback == NULL) + return NV_ERR_NO_MEMORY; + + pEngineCallback->pDumpEngineFunc = pDumpEngineFunc; + pEngineCallback->engDesc = engDesc; + pEngineCallback->flags = flags; + pEngineCallback->pvData = pvData; + pEngineCallback->pNext = NULL; + + if (pNvd->pCallbacks == NULL) + { + pNvd->pCallbacks = pEngineCallback; + } + else + { + pWalk = pNvd->pCallbacks; + pBack = NULL; + + // Insert in Priority Order + while ((pWalk != NULL) && (REF_VAL(NVD_ENGINE_FLAGS_PRIORITY, pWalk->flags) >= priority)) + { + pBack = pWalk; + pWalk = pWalk->pNext; + } + + // At Head + if (pBack == NULL) + { + pEngineCallback->pNext = pNvd->pCallbacks; + pNvd->pCallbacks = pEngineCallback; + } + else + { + pEngineCallback->pNext = pBack->pNext; + pBack->pNext = pEngineCallback; + } + } + + return NV_OK; +} + +NV_STATUS +nvdEngineRelease_IMPL +( + OBJGPU *pGpu, + NvDebugDump *pNvd +) +{ + NVD_ENGINE_CALLBACK *pEngineCallback; + NVD_ENGINE_CALLBACK *pWalk; + + for (pWalk = pNvd->pCallbacks; pWalk != NULL;) + { + pEngineCallback = pWalk; + pWalk = pWalk->pNext; + portMemFree(pEngineCallback); + } + + return NV_OK; +} + +NV_STATUS +nvdFindEngine_IMPL +( + OBJGPU *pGpu, + NvDebugDump *pNvd, + NvU32 engDesc, + NVD_ENGINE_CALLBACK **ppEngineCallback +) +{ + NVD_ENGINE_CALLBACK *pEngineCallback; + NV_STATUS rmStatus = NV_ERR_MISSING_TABLE_ENTRY; + + *ppEngineCallback = NULL; + + for (pEngineCallback = pNvd->pCallbacks; pEngineCallback != NULL; + pEngineCallback = pEngineCallback->pNext) + { + if (pEngineCallback->engDesc == engDesc) + { + *ppEngineCallback = pEngineCallback; + rmStatus = NV_OK; + break; + } + } + + return rmStatus; +} + +static NV_STATUS +nvdEngineDumpCallbackHelper +( + OBJGPU *pGpu, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState, + NVD_ENGINE_CALLBACK *pEngineCallback +) +{ + NV_STATUS nvStatus = NV_OK; + NvU8 startingDepth = prbEncNestingLevel(pPrbEnc); + + if (!IS_GSP_CLIENT(pGpu) || + !FLD_TEST_REF(NVD_ENGINE_FLAGS_SOURCE, _GSP, pEngineCallback->flags)) + { + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_ERROR, + pEngineCallback->pDumpEngineFunc(pGpu, pPrbEnc, + pNvDumpState, pEngineCallback->pvData)); + + // Check the protobuf depth and unwind to the correct depth. + NV_ASSERT(startingDepth == prbEncNestingLevel(pPrbEnc)); + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_ERROR, + prbEncUnwindNesting(pPrbEnc, startingDepth)); + } + + if (IS_GSP_CLIENT(pGpu) && + !FLD_TEST_REF(NVD_ENGINE_FLAGS_SOURCE, _CPU, pEngineCallback->flags)) + { + NV_RM_RPC_DUMP_PROTOBUF_COMPONENT(pGpu, nvStatus, pPrbEnc, + pNvDumpState, pEngineCallback->engDesc); + + // Check the protobuf depth and unwind to the correct depth. + NV_ASSERT(startingDepth == prbEncNestingLevel(pPrbEnc)); + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_ERROR, + prbEncUnwindNesting(pPrbEnc, startingDepth)); + } + + return nvStatus; +} + +NV_STATUS +nvdDoEngineDump_IMPL +( + OBJGPU *pGpu, + NvDebugDump *pNvd, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState, + NvU32 engDesc +) +{ + NVD_ENGINE_CALLBACK *pEngineCallback; + NV_STATUS nvStatus = NV_OK; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_NVDUMP_GPU_INFO)); + + // + // Always dump the GPU engine first. The GPU engine dump saves gpuID in the + // the OCA buffer. + // + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_SILENT, + nvdFindEngine(pGpu, pNvd, NVDUMP_COMPONENT_ENG_GPU, &pEngineCallback)); + + if (pEngineCallback != NULL) + { + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_ERROR, + nvdEngineDumpCallbackHelper(pGpu, pPrbEnc, pNvDumpState, pEngineCallback)); + } + + // Now we can dump the requested engine. + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_SILENT, + nvdFindEngine(pGpu, pNvd, engDesc, &pEngineCallback)); + + if (pEngineCallback != NULL) + { + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_ERROR, + nvdEngineDumpCallbackHelper(pGpu, pPrbEnc, pNvDumpState, pEngineCallback)); + } + + prbEncNestedEnd(pPrbEnc); + + return nvStatus; +} + +NV_STATUS +nvdDumpAllEngines_IMPL +( + OBJGPU *pGpu, + NvDebugDump *pNvd, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState +) +{ + NVD_ENGINE_CALLBACK *pEngineCallback; + NV_STATUS nvStatus = NV_OK; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_NVDUMP_GPU_INFO)); + + for (pEngineCallback = pNvd->pCallbacks; + (prbEncBufLeft(pPrbEnc) > 0) && (pEngineCallback != NULL); + pEngineCallback = pEngineCallback->pNext) + { + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_ERROR, + nvdEngineDumpCallbackHelper(pGpu, pPrbEnc, pNvDumpState, pEngineCallback)); + + // Check to see if GPU is inaccessible + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_INACCESSIBLE)) + { + pNvDumpState->bGpuAccessible = NV_FALSE; + } + } + + prbEncNestedEnd(pPrbEnc); + + return nvStatus; +} + +/*! + * @brief Performs a dump of the specified GPU component into the given buffer. + * + * @param[in] pGpu Parent GPU object + * @param[in] component NVDUMP_IS_GPU_COMPONENT(component) + * @param[in, out] pBuffer Buffer to populate with dump results + * @param[in] policy Policy for buffer allocation: use this one, allocate one or count + * @param[in, out] pBufferCallback Callback function for use with fixed-sized buffer encoding. + * If this is NULL then pBuffer->size is assumed to be large + * enough for the whole dump. Otherwise pBufferCallback is called + * when the buffer is full or when a message ends, allowing the + * the callback to construct the whole buffer piece by piece. + * + * @return NV_OK on success and specific error status on failure + */ +NV_STATUS +nvdDumpComponent_IMPL +( + OBJGPU *pGpu, + NvDebugDump *pNvd, + NvU32 component, + NVDUMP_BUFFER *pBuffer, + NVDUMP_BUFFER_POLICY policy, + PrbBufferCallback *pBufferCallback +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + PRB_ENCODER encoder; + NVD_STATE *pNvDumpState = &pRcDB->nvDumpState; + NV_STATUS status = NV_OK; + void *pBuff; + + // Validate arguments. + NV_ASSERT_OR_RETURN(pBuffer != NULL, NV_ERR_INVALID_ARGUMENT); + + // Clear dump buffer. + pBuffer->curNumBytes = 0; + + // Make sure we were not reentered. + if (pNvDumpState->bDumpInProcess) + return NV_ERR_STATE_IN_USE; + + // Initialize dump state. + pNvDumpState->bDumpInProcess = NV_TRUE; + pNvDumpState->bugCheckCode = 0; + pNvDumpState->internalCode = NVD_ERROR_CODE(NVD_EXTERNALLY_GENERATED, 0); + pNvDumpState->bRMLock = NV_TRUE; // Assume we have the RM lock. + pNvDumpState->bGpuAccessible = NV_FALSE; + pNvDumpState->initialbufferSize = pBuffer->size; + pNvDumpState->nvDumpType = NVD_DUMP_TYPE_JOURNAL; + + rcdbDumpInitGpuAccessibleFlag(pGpu, pRcDB); + + // Start encoding protobuf dump message. + switch (policy) + { + case NVDUMP_BUFFER_PROVIDED: + prbEncStart(&encoder, NVDEBUG_NVDUMP, NvP64_VALUE(pBuffer->address), + pBuffer->size, pBufferCallback); + break; + case NVDUMP_BUFFER_ALLOCATE: + status = prbEncStartAlloc(&encoder, NVDEBUG_NVDUMP, pBuffer->size, + pBufferCallback); + if (status != NV_OK) + return status; + break; + case NVDUMP_BUFFER_COUNT: + prbEncStartCount(&encoder, NVDEBUG_NVDUMP, NVDUMP_MAX_DUMP_SIZE); + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + switch (component) + { + case NVDUMP_COMPONENT_DEBUG_BUFFERS: + { + status = nvdDumpDebugBuffers(pGpu, pNvd, &encoder); + break; + } + case NVDUMP_COMPONENT_ENG_GPU: + case NVDUMP_COMPONENT_ENG_MC: + case NVDUMP_COMPONENT_ENG_FIFO: + case NVDUMP_COMPONENT_ENG_GRAPHICS: + case NVDUMP_COMPONENT_ENG_FB: + case NVDUMP_COMPONENT_ENG_DISP: + case NVDUMP_COMPONENT_ENG_FAN: + case NVDUMP_COMPONENT_ENG_THERMAL: + case NVDUMP_COMPONENT_ENG_FUSE: + case NVDUMP_COMPONENT_ENG_VBIOS: + case NVDUMP_COMPONENT_ENG_PERF: + case NVDUMP_COMPONENT_ENG_BUS: + case NVDUMP_COMPONENT_ENG_PMU: + case NVDUMP_COMPONENT_ENG_CE: + case NVDUMP_COMPONENT_ENG_LPWR: + case NVDUMP_COMPONENT_ENG_NVD: + case NVDUMP_COMPONENT_ENG_VOLT: + case NVDUMP_COMPONENT_ENG_CLK: + case NVDUMP_COMPONENT_ENG_SEC2: + case NVDUMP_COMPONENT_ENG_NVLINK: + case NVDUMP_COMPONENT_ENG_BSP: + case NVDUMP_COMPONENT_ENG_DPU: + case NVDUMP_COMPONENT_ENG_FBFLCN: + case NVDUMP_COMPONENT_ENG_HDA: + case NVDUMP_COMPONENT_ENG_MSENC: + case NVDUMP_COMPONENT_ENG_GSP: + { + status = nvdDoEngineDump(pGpu, + pNvd, + &encoder, + pNvDumpState, + component); + break; + } + case NVDUMP_COMPONENT_ENG_ALL: + { + status = nvdDumpAllEngines(pGpu, + pNvd, + &encoder, + pNvDumpState); + break; + } + default: + { + NV_PRINTF(LEVEL_ERROR, + "called with invalid component %u selected.\n", + component); + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + // Finish encoding protobuf dump message. + pBuffer->curNumBytes = prbEncFinish(&encoder, &pBuff); + pBuffer->address = NV_SIGN_EXT_PTR_TO_NvP64(pBuff); + pNvDumpState->bDumpInProcess = NV_FALSE; + + return status; +} + +/*! + * @brief Performs a dump of the debug buffers + * + * @param[in] pGpu Parent GPU object + * @param[in] pNvd Parent NVD object + * @param[in] encoder Protobuf encoder to use + * + * @return NV_OK on success and specific error status on failure + */ +NV_STATUS +nvdDumpDebugBuffers_IMPL +( + OBJGPU *pGpu, + NvDebugDump *pNvd, + PRB_ENCODER *pPrbEnc +) +{ + NVD_DEBUG_BUFFER *pCurrent = pNvd->pHeadDebugBuffer; + NV_STATUS status = NV_OK; + NV_STATUS endStatus = NV_OK; + NvP64 pUmdBuffer = NvP64_NULL; + NvP64 priv = NvP64_NULL; + NvU32 bufSize = 0; + + status = prbEncNestedStart(pPrbEnc, NVDEBUG_NVDUMP_DCL_MSG); + if (status != NV_OK) + return status; + + while (pCurrent != NULL) + { + bufSize = (NvU32)pCurrent->pMemDesc->Size; + + // Map DebugBuffer to a kernel address + status = memdescMap(pCurrent->pMemDesc, 0, bufSize, NV_TRUE, // Kernel mapping? + NV_PROTECT_READABLE, &pUmdBuffer, &priv); + if (status != NV_OK) + break; + + status = prbAppendSubMsg(pPrbEnc, pCurrent->tag, NvP64_VALUE(pUmdBuffer), bufSize); + + // Unmap DebugBuffer address + memdescUnmap(pCurrent->pMemDesc, NV_TRUE, // Kernel mapping? + osGetCurrentProcess(), pUmdBuffer, priv); + + // Check the error state AFTER unmapping the memory desc + if (status != NV_OK) + break; + + pCurrent = pCurrent->pNext; + } + + endStatus = prbEncNestedEnd(pPrbEnc); + return (status != NV_OK) ? status : endStatus; +} + +static +NV_STATUS +prbAppendSubMsg +( + PRB_ENCODER *pPrbEnc, + NvU32 tag, + NvU8 *buffer, + NvU32 size +) +{ + NVDUMP_SUB_ALLOC_HEADER *header = NULL; + NvU8 *pCurrent = buffer; + NvU8 *subAlloc = NULL; + NV_STATUS status = NV_OK; + NV_STATUS endStatus = NV_OK; + NvU32 subMsgLen = 0; + NvU32 i; + + // Create field descriptor + const PRB_FIELD_DESC field_desc = { + tag, + { + PRB_OPTIONAL, + PRB_MESSAGE, + PRB_STUBBED_FIELD, + }, + 0, + 0, + PRB_MAYBE_FIELD_NAME("") + PRB_MAYBE_FIELD_DEFAULT(0) + }; + + // Start encoding the new nested message + status = prbEncNestedStart(pPrbEnc, &field_desc); + if (status != NV_OK) + return status; + + for (i = 0; i < NVDUMP_DEBUG_BUFFER_MAX_SUBALLOCATIONS; i++) + { + header = (NVDUMP_SUB_ALLOC_HEADER *)pCurrent; + subAlloc = pCurrent + sizeof(NVDUMP_SUB_ALLOC_HEADER); + + // If valid, copy contents + if (header->flags & NVDUMP_SUB_ALLOC_VALID) + { + subMsgLen = header->end - header->start; + status = prbEncStubbedAddBytes(pPrbEnc, subAlloc, subMsgLen); + if (status != NV_OK) + goto done; + } + if (!(header->flags & NVDUMP_SUB_ALLOC_HAS_MORE)) + break; + + pCurrent = subAlloc + header->length; + } + +done: + endStatus = prbEncNestedEnd(pPrbEnc); + return (status != NV_OK) ? status : endStatus; +}; + +/*! + * @brief Routine to dump miscellaneous debug info. + * + * Consider this a temporary area for debug fields that we need to check in + * quickly. Move them out once we decide where they really belong and get + * the required code reviews. + * + * @param[in] pGpu GPU object + * @param[in, out] pPrbEnc ProtoBuf encoder + * @param[in] pNvDumpState NV Dump State + * @param[in] pvData Opaque parameter passed to nvdEngineSignUp. + * + * @return NV_OK on success and specific error status on failure + */ +static +NV_STATUS +_nvdDumpEngineFunc +( + OBJGPU *pGpu, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState, + void *pvData +) +{ + switch (DRF_VAL(_NVD, _ERROR_CODE, _MAJOR, pNvDumpState->internalCode)) + { + case NVD_GPU_GENERATED: + case NVD_SKIP_ZERO: + // don't report on these internal codes. + return NV_OK; + break; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_GPUINFO_ENG_NVD)); + + prbEncNestedEnd(pPrbEnc); + + return NV_OK; +} + +NV_STATUS +nvdStateInitLocked_IMPL +( + OBJGPU *pGpu, + NvDebugDump *pNvd +) +{ + + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + + rcdbSavePreviousDriverVersion(pGpu, pRcDB); + + nvdEngineSignUp(pGpu, + pNvd, + _nvdDumpEngineFunc, + NVDUMP_COMPONENT_ENG_NVD, + REF_DEF(NVD_ENGINE_FLAGS_PRIORITY, _MED) | + REF_DEF(NVD_ENGINE_FLAGS_SOURCE, _GSP), + (void *)pNvd); + + return NV_OK; +} + +/*! + * @brief Creates a memory descriptor, and allocates memory for a debug buffer. + * + * @param[in] pGpu Parent GPU object + * @param[inout]size Size of requested buffer / Actual size allocated + * @param[out] pMemDesc Memory descriptor for the allocated buffer + * + * @return NV_OK on success and specific error status on failure + */ +NV_STATUS +nvdAllocDebugBuffer_IMPL +( + OBJGPU *pGpu, + NvDebugDump *pNvd, + NvU32 tag, + NvU32 *pSize, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + MEMORY_DESCRIPTOR *pMemDesc; + NVD_DEBUG_BUFFER *pNewDebugBuffer; + NV_STATUS status = NV_OK; + + // Make the result NULL in case of a failure. + *ppMemDesc = NULL; + + // Reduce large buffer requests to a max allowed size + if (*pSize > NVDUMP_DEBUG_BUFFER_MAX_SIZE) { + *pSize = NVDUMP_DEBUG_BUFFER_MAX_SIZE; + } + + // Create memory descriptor + status = memdescCreate(&pMemDesc, pGpu, *pSize, 0, NV_TRUE, ADDR_SYSMEM, + NV_MEMORY_WRITECOMBINED, MEMDESC_FLAGS_NONE); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "nvdAllocDebugBuffer - memdescCreate Failed: %x\n", + status); + return status; + } + + // Allocate backing memory + status = memdescAlloc(pMemDesc); + if (status != NV_OK) { + + // Destroy the memory descriptor + memdescDestroy(pMemDesc); + + NV_PRINTF(LEVEL_ERROR, "nvdAllocDebugBuffer - memdescAlloc Failed: %x\n", + status); + return status; + } + + // Create new link in debug buffer list + pNewDebugBuffer = portMemAllocNonPaged(sizeof(NVD_DEBUG_BUFFER)); + if (pNewDebugBuffer == NULL) { + + // Free backing memory + memdescFree(pMemDesc); + + // Destroy the memory descriptor + memdescDestroy(pMemDesc); + + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, + "nvdAllocDebugBuffer - portMemAllocNonPaged Failed: %x\n", status); + return status; + } + + // Insert new link at the front of the list + portMemSet(pNewDebugBuffer, 0, sizeof(NVD_DEBUG_BUFFER)); + pNewDebugBuffer->tag = tag; + pNewDebugBuffer->pMemDesc = pMemDesc; + pNewDebugBuffer->pNext = pNvd->pHeadDebugBuffer; + pNvd->pHeadDebugBuffer = pNewDebugBuffer; + + // Return memory descriptor + *ppMemDesc = pMemDesc; + + return NV_OK; +} + +/*! + * @brief Frees the memory associated with the provided debug buffer. + * + * @param[in] pGpu Parent GPU object + * @param[in] pMemDesc Memory descriptor + * + * @return NV_OK on success and specific error status on failure + */ +NV_STATUS +nvdFreeDebugBuffer_IMPL +( + OBJGPU *pGpu, + NvDebugDump *pNvd, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NVD_DEBUG_BUFFER *pCurrDebugBuffer = pNvd->pHeadDebugBuffer; + NVD_DEBUG_BUFFER *pPrevDebugBuffer = NULL; + + while (pCurrDebugBuffer != NULL) { + if (pCurrDebugBuffer->pMemDesc == pMemDesc) { + + // Target node is the head + if (pPrevDebugBuffer == NULL) { + pNvd->pHeadDebugBuffer = pCurrDebugBuffer->pNext; + } + + // Target node is some other node + else { + pPrevDebugBuffer->pNext = pCurrDebugBuffer->pNext; + } + + // Free the backing memory, mem descriptor, and list node + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + portMemFree(pCurrDebugBuffer); + + return NV_OK; + } + pPrevDebugBuffer = pCurrDebugBuffer; + pCurrDebugBuffer = pCurrDebugBuffer->pNext; + } + + // Mem descriptor not found + NV_PRINTF(LEVEL_ERROR, + "nvdFreeDebugBuffer - Memory Descriptor not found in list!\n"); + return NV_ERR_GENERIC; +} + + +/*! + * @brief Returns a simple ENUM for OCA Dump Buffer Size + * + * @param[in] pGpu Parent GPU object + * + * @return NVD_DUMP_SIZE as define in nv_debug_dump.h + * NVD_DUMP_SIZE_JOURNAL_WRITE, // Very small records only. + * NVD_DUMP_SIZE_SMALL, // Assume 8K - 512 K total + * NVD_DUMP_SIZE_MEDIUM, // Mini Dump >512K + * NVD_DUMP_SIZE_LARGE // Megs of space + */ +NVD_DUMP_SIZE +nvDumpGetDumpBufferSizeEnum +( + NVD_STATE *pNvDumpState +) +{ + if (pNvDumpState->nvDumpType == NVD_DUMP_TYPE_JOURNAL) // Check Journal first + { + return NVD_DUMP_SIZE_JOURNAL_WRITE; + } + else if (pNvDumpState->initialbufferSize < 0x80000) // Less than 512K, small MD + { + return NVD_DUMP_SIZE_SMALL; + } + else if (pNvDumpState->initialbufferSize < 0x200000) // Less than 2M, large MD + { + return NVD_DUMP_SIZE_MEDIUM; + } + // else must be really large // > 2M = full dump + return NVD_DUMP_SIZE_LARGE; + +} diff --git a/src/nvidia/src/kernel/diagnostics/nvlog.c b/src/nvidia/src/kernel/diagnostics/nvlog.c new file mode 100644 index 000000000..677d3726e --- /dev/null +++ b/src/nvidia/src/kernel/diagnostics/nvlog.c @@ -0,0 +1,727 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlog/nvlog.h" +#include "nvrm_registry.h" +#include "os/os.h" +#include "diagnostics/tracer.h" +#include "tls/tls.h" +#include "core/locks.h" + +// +// Buffer push method declarations +// +NvBool nvlogRingBufferPush (NVLOG_BUFFER *pBuffer, NvU8 *pData, NvU32 dataSize); +NvBool nvlogNowrapBufferPush(NVLOG_BUFFER *pBuffer, NvU8 *pData, NvU32 dataSize); +NvBool nvlogStringBufferPush(NVLOG_BUFFER *unused, NvU8 *pData, NvU32 dataSize); +NvBool nvlogKernelLogPush(NVLOG_BUFFER *unused, NvU8 *pData, NvU32 dataSize); + +static void _printBase64(NvU8 *pData, NvU32 dataSize); +static NV_STATUS _allocateNvlogBuffer(NvU32 size, NvU32 flags, NvU32 tag, + NVLOG_BUFFER **ppBuffer); +static void _deallocateNvlogBuffer(NVLOG_BUFFER *pBuffer); + +volatile NvU32 nvlogInitCount; +static void *nvlogRegRoot; + +// Zero (null) buffer definition. +static NVLOG_BUFFER _nvlogZeroBuffer = +{ + {nvlogStringBufferPush}, + 0, + NvU32_BUILD('l','l','u','n'), + 0, + 0, + 0 +}; + +NVLOG_LOGGER NvLogLogger = +{ + NVLOG_LOGGER_VERSION, + + // Default buffers + { + // The 0th buffer just prints to the screen in debug builds. + &_nvlogZeroBuffer + }, + + // Next available slot + 1, + + // Free slots + NVLOG_MAX_BUFFERS-1, + + // Main lock, must be allocated at runtime. + NULL +}; + +#define NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer) \ + ((hBuffer < NVLOG_MAX_BUFFERS) && (NvLogLogger.pBuffers[hBuffer] != NULL)) + +NV_STATUS +nvlogInit(void *pData) +{ + nvlogRegRoot = pData; + portInitialize(); + NvLogLogger.mainLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (NvLogLogger.mainLock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + tlsInitialize(); + return NV_OK; +} + +void nvlogUpdate() { +} + +NV_STATUS +nvlogDestroy() +{ + NvU32 i; + + tlsShutdown(); + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + nvlogDeallocBuffer(i); + } + if (NvLogLogger.mainLock != NULL) + { + portSyncSpinlockDestroy(NvLogLogger.mainLock); + NvLogLogger.mainLock = NULL; + } + + /// @todo Destructor should return void. + portShutdown(); + return NV_OK; +} + +static NV_STATUS +_allocateNvlogBuffer +( + NvU32 size, + NvU32 flags, + NvU32 tag, + NVLOG_BUFFER **ppBuffer +) +{ + NVLOG_BUFFER *pBuffer; + NVLOG_BUFFER_PUSHFUNC pushfunc; + + // Sanity check on some invalid combos: + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _EXPANDABLE, _YES, flags)) + { + // Only nonwrapping buffers can be expanded + if (!FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _NOWRAP, flags)) + return NV_ERR_INVALID_ARGUMENT; + // Full locking required to expand the buffer. + if (!FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _LOCKING, _FULL, flags)) + return NV_ERR_INVALID_ARGUMENT; + } + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _SYSTEMLOG, flags)) + { + // System log does not need to allocate memory for buffer. + pushfunc = (NVLOG_BUFFER_PUSHFUNC) nvlogKernelLogPush; + size = 0; + } + else + { + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _RING, flags)) + { + pushfunc = (NVLOG_BUFFER_PUSHFUNC) nvlogRingBufferPush; + } + else if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _NOWRAP, flags)) + { + pushfunc = (NVLOG_BUFFER_PUSHFUNC) nvlogNowrapBufferPush; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _NONPAGED, _YES, flags)) + pBuffer = portMemAllocNonPaged(sizeof(*pBuffer) + size); + else + pBuffer = portMemAllocPaged(sizeof(*pBuffer) + size); + + if (!pBuffer) + return NV_ERR_NO_MEMORY; + + portMemSet(pBuffer, 0, sizeof(*pBuffer) + size); + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _OCA, _YES, flags)) + { + osAddRecordForCrashLog(pBuffer, NV_OFFSETOF(NVLOG_BUFFER, data) + size); + } + + pBuffer->push.fn = pushfunc; + pBuffer->size = size; + pBuffer->flags = flags; + pBuffer->tag = tag; + + *ppBuffer = pBuffer; + + return NV_OK; +} + +static void +_deallocateNvlogBuffer +( + NVLOG_BUFFER *pBuffer +) +{ + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _OCA, _YES, pBuffer->flags)) + osDeleteRecordForCrashLog(pBuffer); + + portMemFree(pBuffer); +} + +NV_STATUS +nvlogAllocBuffer +( + NvU32 size, + NvU32 flags, + NvU32 tag, + NVLOG_BUFFER_HANDLE *pBufferHandle, + ... +) +{ + NVLOG_BUFFER *pBuffer; + NV_STATUS status; + + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _TYPE, _SYSTEMLOG, flags)) + { + } + else + { + NV_ASSERT_OR_RETURN(NvLogLogger.totalFree > 0, + NV_ERR_INSUFFICIENT_RESOURCES); + } + + status = _allocateNvlogBuffer(size, flags, tag, &pBuffer); + + if (status != NV_OK) + { + return status; + } + + portSyncSpinlockAcquire(NvLogLogger.mainLock); + + if (NvLogLogger.nextFree < NVLOG_MAX_BUFFERS) + { + NvLogLogger.pBuffers[NvLogLogger.nextFree] = pBuffer; + *pBufferHandle = NvLogLogger.nextFree++; + NvLogLogger.totalFree--; + } + else + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + } + + // Find the next slot in the buffers array + while (NvLogLogger.nextFree < NVLOG_MAX_BUFFERS) + { + if (NvLogLogger.pBuffers[NvLogLogger.nextFree] != NULL) + NvLogLogger.nextFree++; + else break; + } + portSyncSpinlockRelease(NvLogLogger.mainLock); + + if (status != NV_OK) + { + portMemFree(pBuffer); + } + + return status; +} + +void +nvlogDeallocBuffer +( + NVLOG_BUFFER_HANDLE hBuffer +) +{ + NVLOG_BUFFER *pBuffer; + + if ((hBuffer == 0) || !NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer)) + return; + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + pBuffer->flags = FLD_SET_DRF(LOG_BUFFER, _FLAGS, _DISABLED, + _YES, pBuffer->flags); + + while (pBuffer->threadCount > 0) { /*spin*/ } + portSyncSpinlockAcquire(NvLogLogger.mainLock); + NvLogLogger.pBuffers[hBuffer] = NULL; + NvLogLogger.nextFree = NV_MIN(hBuffer, NvLogLogger.nextFree); + NvLogLogger.totalFree++; + portSyncSpinlockRelease(NvLogLogger.mainLock); + + _deallocateNvlogBuffer(pBuffer); +} + +NV_STATUS +nvlogWriteToBuffer +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU8 *pData, + NvU32 size +) +{ + NvBool status; + NVLOG_BUFFER *pBuffer; + + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pData != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + // Normal condition when fetching nvLog from NV0000_CTRL_CMD_NVD_GET_NVLOG. + if (FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _DISABLED, _YES, pBuffer->flags)) + return NV_ERR_NOT_READY; + + portAtomicIncrementS32(&pBuffer->threadCount); + status = pBuffer->push.fn(pBuffer, pData, size); + // Get pBuffer from the handle again, as it might have realloc'd + portAtomicDecrementS32(&NvLogLogger.pBuffers[hBuffer]->threadCount); + + return (status == NV_TRUE) ? NV_OK : NV_ERR_BUFFER_TOO_SMALL; +} + + + +NV_STATUS +nvlogExtractBufferChunk +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 chunkNum, + NvU32 *pChunkSize, + NvU8 *pDest +) +{ + NVLOG_BUFFER *pBuffer; + NvU32 index; + + NV_ASSERT_OR_RETURN(*pChunkSize > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDest != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + index = chunkNum * (*pChunkSize); + NV_ASSERT_OR_RETURN(index <= pBuffer->size, NV_ERR_OUT_OF_RANGE); + *pChunkSize = NV_MIN(*pChunkSize, (pBuffer->size - index)); + + portSyncSpinlockAcquire(NvLogLogger.mainLock); + portMemCopy(pDest, *pChunkSize, &pBuffer->data[index], *pChunkSize); + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_OK; +} + + +NV_STATUS +nvlogGetBufferSize +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 *pSize +) +{ + NV_ASSERT_OR_RETURN(pSize != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + *pSize = NvLogLogger.pBuffers[hBuffer]->size; + return NV_OK; +} + +NV_STATUS +nvlogGetBufferTag +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 *pTag +) +{ + NV_ASSERT_OR_RETURN(pTag != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + *pTag = NvLogLogger.pBuffers[hBuffer]->tag; + return NV_OK; +} + +NV_STATUS +nvlogGetBufferFlags +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU32 *pFlags +) +{ + NV_ASSERT_OR_RETURN(pFlags != NULL, NV_ERR_INVALID_POINTER); + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + *pFlags = NvLogLogger.pBuffers[hBuffer]->flags; + return NV_OK; +} + + +NV_STATUS +nvlogPauseLoggingToBuffer +( + NVLOG_BUFFER_HANDLE hBuffer, + NvBool bPause +) +{ + NVLOG_BUFFER *pBuffer; + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + pBuffer->flags = (bPause) + ? FLD_SET_DRF(LOG, _BUFFER_FLAGS, _DISABLED, _YES, pBuffer->flags) + : FLD_SET_DRF(LOG, _BUFFER_FLAGS, _DISABLED, _NO, pBuffer->flags); + + return NV_OK; +} + + +NV_STATUS +nvlogPauseAllLogging +( + NvBool bPause +) +{ + return NV_OK; +} + +NV_STATUS +nvlogGetBufferHandleFromTag +( + NvU32 tag, + NVLOG_BUFFER_HANDLE *pBufferHandle +) +{ + NvU32 i; + + NV_ASSERT_OR_RETURN(pBufferHandle != NULL, NV_ERR_INVALID_POINTER); + + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + if (NvLogLogger.pBuffers[i] != NULL) + { + if (NvLogLogger.pBuffers[i]->tag == tag) + { + *pBufferHandle = i; + return NV_OK; + } + } + } + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +nvlogGetBufferSnapshot +( + NVLOG_BUFFER_HANDLE hBuffer, + NvU8 *pDest, + NvU32 destSize +) +{ + NVLOG_BUFFER *pBuffer; + + NV_ASSERT_OR_RETURN(NVLOG_IS_VALID_BUFFER_HANDLE(hBuffer), + NV_ERR_INVALID_OBJECT_HANDLE); + + NV_ASSERT_OR_RETURN(pDest != NULL, NV_ERR_INVALID_POINTER); + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + + NV_ASSERT_OR_RETURN(destSize >= NVLOG_BUFFER_SIZE(pBuffer), + NV_ERR_BUFFER_TOO_SMALL); + + portSyncSpinlockAcquire(NvLogLogger.mainLock); + portMemCopy(pDest, NVLOG_BUFFER_SIZE(pBuffer), pBuffer, NVLOG_BUFFER_SIZE(pBuffer)); + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_OK; +} + + + +NvBool +nvlogRingBufferPush +( + NVLOG_BUFFER *pBuffer, + NvU8 *pData, + NvU32 dataSize +) +{ + NvU32 writeSize; + NvU32 oldPos; + NvU32 lock = DRF_VAL(LOG, _BUFFER_FLAGS, _LOCKING, pBuffer->flags); + + if (lock != NVLOG_BUFFER_FLAGS_LOCKING_NONE) + portSyncSpinlockAcquire(NvLogLogger.mainLock); + + oldPos = pBuffer->pos; + pBuffer->extra.ring.overflow += (pBuffer->pos + dataSize) / pBuffer->size; + pBuffer->pos = (pBuffer->pos + dataSize) % pBuffer->size; + + // State locking does portMemCopy unlocked. + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_STATE) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + while (dataSize > 0) + { + writeSize = NV_MIN(pBuffer->size - oldPos, dataSize); + portMemCopy(&pBuffer->data[oldPos], writeSize, pData, writeSize); + oldPos = 0; + dataSize -= writeSize; + pData += writeSize; + } + + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_FULL) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_TRUE; +} + +NvBool +nvlogNowrapBufferPush +( + NVLOG_BUFFER *pBuffer, + NvU8 *pData, + NvU32 dataSize +) +{ + NvU32 oldPos; + NvU32 lock = DRF_VAL(LOG, _BUFFER_FLAGS, _LOCKING, pBuffer->flags); + + if (pBuffer->pos + dataSize >= pBuffer->size) + { + NvBool bExpandable = FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _EXPANDABLE, _YES, pBuffer->flags); + NvBool bNonPaged = FLD_TEST_DRF(LOG_BUFFER, _FLAGS, _NONPAGED, _YES, pBuffer->flags); + + // Expandable buffer, and we are at IRQL where we can do realloc + if (bExpandable && + ((bNonPaged && portMemExSafeForNonPagedAlloc()) || (!bNonPaged && portMemExSafeForPagedAlloc()))) + { + NVLOG_BUFFER *pNewBuffer; + NvU32 i; + NvU32 newSize = pBuffer->size * 2; + NvU32 allocSize = sizeof(*pBuffer) + newSize; + + pNewBuffer = bNonPaged ? portMemAllocNonPaged(allocSize) : portMemAllocPaged(allocSize); + if (pNewBuffer == NULL) + return NV_FALSE; + + // + // Two threads couid have entered this block at the same time, and + // both will have allocated their own bigger buffer. Only the one + // that takes the spinlock first should do the copy and the swap. + // + portSyncSpinlockAcquire(NvLogLogger.mainLock); + // Check if this buffer is still there and was not swapped for a bigger one + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + if (NvLogLogger.pBuffers[i] == pBuffer) + break; + } + if (i == NVLOG_MAX_BUFFERS) + { + // Another thread has already expanded the buffer, bail out. + // TODO: Maybe we could store the handle and then try again? + portSyncSpinlockRelease(NvLogLogger.mainLock); + return NV_FALSE; + } + + portMemCopy(pNewBuffer, allocSize, pBuffer, sizeof(*pBuffer)+pBuffer->size); + pNewBuffer->size = newSize; + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + if (NvLogLogger.pBuffers[i] == pBuffer) + NvLogLogger.pBuffers[i] = pNewBuffer; + } + portSyncSpinlockRelease(NvLogLogger.mainLock); + + // + // Before we can free this buffer, we need to make sure any threads + // that were still accessing it are done. Spin on volatile threadCount + // NOTE: threadCount includes the current thread too. + // + while (pBuffer->threadCount > 1) { /*spin*/ } + portMemFree(pBuffer); + pBuffer = pNewBuffer; + } + else + { + return NV_FALSE; + } + } + + if (lock != NVLOG_BUFFER_FLAGS_LOCKING_NONE) + portSyncSpinlockAcquire(NvLogLogger.mainLock); + + oldPos = pBuffer->pos; + pBuffer->pos = oldPos + dataSize; + + // State locking does portMemCopy unlocked. + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_STATE) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + portMemCopy(&pBuffer->data[oldPos], dataSize, pData, dataSize); + + if (lock == NVLOG_BUFFER_FLAGS_LOCKING_FULL) + portSyncSpinlockRelease(NvLogLogger.mainLock); + + return NV_TRUE; +} + +NvBool +nvlogStringBufferPush +( + NVLOG_BUFFER *unused, + NvU8 *pData, + NvU32 dataSize +) +{ + return NV_TRUE; +} + +// +// Prints the buffer encoded as base64, with a prefix for easy grepping. +// Base64 allows the padding characters ('=') to appear anywhere, not just at +// the end, so it is fine to print buffers one at a time without merging. +// +static void _printBase64(NvU8 *pData, NvU32 dataSize) +{ + const NvU8 base64_key[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + NvU8 output[64+1]; // 64 bas64 characters per line of output + NvU32 i; + + do + { + i = 0; + while (i < (sizeof(output)-1) && (dataSize > 0)) + { + output[i++] = base64_key[pData[0] >> 2]; + if (dataSize == 1) + { + output[i++] = base64_key[(pData[0] << 4) & 0x3F]; + output[i++] = '='; + output[i++] = '='; + dataSize = 0; + break; + } + + output[i++] = base64_key[((pData[0] << 4) & 0x3F) | (pData[1] >> 4)]; + if (dataSize == 2) + { + output[i++] = base64_key[(pData[1] << 2) & 0x3F]; + output[i++] = '='; + dataSize = 0; + break; + } + + output[i++] = base64_key[((pData[1] << 2) & 0x3F) | (pData[2] >> 6)]; + output[i++] = base64_key[pData[2] & 0x3F]; + + pData += 3; + dataSize -= 3; + } + output[i] = 0; + portDbgPrintf("nvrm-nvlog: %s\n", output); + } while (dataSize > 0); +} + +NvBool nvlogKernelLogPush(NVLOG_BUFFER *unused, NvU8 *pData, NvU32 dataSize) +{ + PORT_UNREFERENCED_VARIABLE(unused); + _printBase64(pData, dataSize); + return NV_TRUE; +} + +void nvlogDumpToKernelLog(NvBool bDumpUnchangedBuffersOnlyOnce) +{ + NvU32 i; + static NvU32 lastDumpPos[NVLOG_MAX_BUFFERS]; + + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + NVLOG_BUFFER *pBuf = NvLogLogger.pBuffers[i]; + + if (pBuf && pBuf->size) + { + if (bDumpUnchangedBuffersOnlyOnce) + { + NvU32 pos = pBuf->pos + (pBuf->size * pBuf->extra.ring.overflow); + + //Dump the buffer only if it's contents have changed + if (lastDumpPos[i] != pos) + { + lastDumpPos[i] = pos; + _printBase64((NvU8*)pBuf, NVLOG_BUFFER_SIZE(pBuf)); + } + } + else + { + _printBase64((NvU8*)pBuf, NVLOG_BUFFER_SIZE(pBuf)); + } + } + } +} + +void nvlogDumpToKernelLogIfEnabled(void) +{ + NvU32 dumpNvlogValue; + + // Debug and develop builds already dump everything as it happens. +#if defined(DEBUG) || defined(DEVELOP) + return; +#endif + + // Enable only if the regkey has been set + if (osReadRegistryDword(NULL, NV_REG_STR_RM_DUMP_NVLOG, &dumpNvlogValue) != NV_OK) + return; + + if (dumpNvlogValue != NV_REG_STR_RM_DUMP_NVLOG_ENABLE) + return; + + nvlogDumpToKernelLog(NV_FALSE); +} + diff --git a/src/nvidia/src/kernel/diagnostics/nvlog_printf.c b/src/nvidia/src/kernel/diagnostics/nvlog_printf.c new file mode 100644 index 000000000..9a2b0d305 --- /dev/null +++ b/src/nvidia/src/kernel/diagnostics/nvlog_printf.c @@ -0,0 +1,1750 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************************************************************\ +* * +* Description: Common debug print defines and functions * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "core/system.h" +#include "os/os.h" // to pick up declarations for osDelay() and osDelayUs() +#include "nvrm_registry.h" + +#include // NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE + +static int inttodecfmtstr(NvS64 sval, char *dest, int fieldwidth, int flags); +static int uinttohexfmtstr(NvU64 uval, char *dest, int fieldwidth, int flags); +static int strtofmtstr(const char *src, char *dest, char *destLimit, int fieldwidth, int precision, int flags); + +#if 0 +static int float64todecfmtstr(F064 f64val, NvU8 *dest, int fieldwidth, int precision, int flags); +#endif + +// +// Numeric & string conversion flags (used if you call the 'XtoYfmtstr' routines directly) +// +enum { + DONTTERMINATE = 1, // Don't null-terminate the string if this flag is set + UNSIGNED_F = 2, // Force an unsigned number conversion (other sign options are ignored) + PLUSSIGN_F = 4, // For signed numbers >= 0, force a '+' in the sign position + SPACESIGN_F = 8, // For signed numbers >= 0, force a space in the sign position + LEFTALIGN_F = 16, // Left-justify the result in the destination field (overrides zero fill) + ZEROFILL_F = 32, // Use leading zeros for padding to a field width + LOWERCASE_F = 64 // Use lower case hex digits: a-f instead of A-F +}; + +// +// nvDbgBreakpointEnabled - Returns true if triggering a breakpoint is allowed +// +NvBool osDbgBreakpointEnabled(void); +NvBool nvDbgBreakpointEnabled() +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + if (pSys != NULL) + { + if (pSys->getProperty(pSys, PDB_PROP_SYS_DEBUGGER_DISABLED)) + return NV_FALSE; + } + return osDbgBreakpointEnabled(); +} + +#if NV_PRINTF_STRINGS_ALLOWED +static PORT_SPINLOCK *_nv_dbg_lock = NULL; +static char _nv_dbg_string[MAX_ERROR_STRING]; + +// +// nvDbgInit - Allocate the printf spinlock +// +NvBool +nvDbgInit(void) +{ + if (NULL != _nv_dbg_lock) + { + // already initialized + return NV_TRUE; + } + if (portInitialize() != NV_OK) + return NV_FALSE; + + _nv_dbg_lock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (_nv_dbg_lock == NULL) + return NV_FALSE; + else + return NV_TRUE; +} + +// +// nvDbgDestroy - Free the printf spinlock +// +void +nvDbgDestroy(void) +{ + if (NULL != _nv_dbg_lock) + { + portSyncSpinlockDestroy(_nv_dbg_lock); + _nv_dbg_lock = NULL; + portShutdown(); + } +} + +// +// nvDbg_PrintMsg - Common message control for two flavors of printf +// +// Differences for mods builds. +// * Mods has its own messaging system, and we always pass messages +// to the mods unless RmMsg explicitly wants to hide a message. +// * Mods requires messages even when the debugger is not enabled. +// * Sorry for the #ifdefs, but RmMsg complicates the code enough +// that it is nice to have one implementation. +// +static NvBool +nvDbg_PrintMsg +( + const char *filename, + int linenumber, + const char *function, + int debuglevel, + const char *printf_format, + NvBool *pForce, + NvU32 *pPrefix +) +{ + NvU32 rc; + int debuglevel_min; + +#if defined(DEVELOP) || defined(DEBUG) || defined(QA_BUILD) + debuglevel_min = LEVEL_NOTICE; +#else + debuglevel_min = LEVEL_ERROR; +#endif + + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if ((NULL == pSys) || (pSys->getProperty(pSys, PDB_PROP_SYS_DEBUGGER_DISABLED))) + { + return NV_FALSE; + } + + // + // Message is filtered by an explicit RmMsg rule + // + rc = nvDbgRmMsgCheck(filename, linenumber, (char *)function, debuglevel, printf_format, pPrefix); + switch (rc) + { + case NVRM_MSG_HIDE: + // Hide this error message + return NV_FALSE; + + case NVRM_MSG_PRINT: + // Force this error message + *pForce = NV_TRUE; + return NV_TRUE; + + case NVRM_MSG_NORMAL: + default: + if (debuglevel >= debuglevel_min) + { + return NV_TRUE; + } + break; + } + return NV_FALSE; +} + +void nvDbg_Printf +( + const char *filename, + int linenumber, + const char *function, + int debuglevel, + const char *printf_format, + ... +) +{ + va_list arglist; + va_start(arglist, printf_format); + nvDbg_vPrintf(filename, linenumber, function, debuglevel, printf_format, arglist); + va_end(arglist); +} + +// +// Internal function to prepare _nv_dbg_string for printing. +// Should only be called while _nv_dbg_lock is held. +// +static void +_nvDbgPrepareString +( + const char *file, + int line, + const char *func, + const char *fmt, + NvU32 prefix, + va_list arglist +) +{ + NvU32 len = 0; + + // + // If RmMsg has added a prefix, skip the standard NV_PRINTF_PREFIX. + // If there is no prefix, don't include the RmMsg prefix. + // + if (portStringCompare(fmt, NV_PRINTF_PREFIX, sizeof(NV_PRINTF_PREFIX) - 1) == 0) + { + len = RmMsgPrefix(prefix, file, line, func, _nv_dbg_string, MAX_ERROR_STRING); + fmt += sizeof(NV_PRINTF_PREFIX) - 1; + } + + nvDbgVsnprintf(_nv_dbg_string + len, MAX_ERROR_STRING - len, fmt, arglist); +} + +// +// Temporary helper to map LEVEL_xxx constants to a platform specific level. +// +#if PORT_IS_FUNC_SUPPORTED(portDbgExPrintfLevel) +static NvU32 _nvDbgLevelToPlatformLevel(NvBool bForce, NvU32 level) +{ + return bForce ? LEVEL_FATAL : level; +} +#endif + +// +// Some varargs interfaces need a va_list interface, but still +// want the common output buffer and the RmMsg handling. +// +void nvDbg_vPrintf +( + const char *filename, + int linenumber, + const char *function, + int debuglevel, + const char *printf_format, + va_list arglist +) +{ + NvBool force = NV_FALSE; + NvU32 prefix = 0; + + if (nvDbg_PrintMsg(filename, linenumber, function, debuglevel, printf_format, &force, &prefix)) + { + portSyncSpinlockAcquire(_nv_dbg_lock); + _nvDbgPrepareString(filename, linenumber, function, printf_format, prefix, arglist); +#if PORT_IS_FUNC_SUPPORTED(portDbgExPrintfLevel) + portDbgExPrintfLevel(_nvDbgLevelToPlatformLevel(force, debuglevel), + "%.*s", MAX_ERROR_STRING, _nv_dbg_string); +#else + portDbgPrintString(_nv_dbg_string, MAX_ERROR_STRING); +#endif + portSyncSpinlockRelease(_nv_dbg_lock); + } +} + + +#define IS_PRINT(c) (((c) >= 0x20) && ((c) <= 0x7E)) + +void nvDbg_PrintBuf +( + const char *file, + int line, + const char *function, + int dbglevel, + NvU8 buffer[], + NvU32 bufsize +) +{ + NvU32 i, j; + nvDbg_Printf(file, line, function, dbglevel, NV_PRINTF_ADD_PREFIX("printBuf [BEGIN]")); + for (i = 0; i < bufsize; i += 16) + { + nvDbg_Printf(file, line, function, dbglevel, "\n"); + nvDbg_Printf(file, line, function, dbglevel, NV_PRINTF_ADD_PREFIX("printBuf 0x%p "), buffer + i); + for (j = 0; j < 16; j++) + { + if ((i + j) < bufsize) + { + nvDbg_Printf(file, line, function, dbglevel, "%02x", *(buffer + i + j)); + } + else + { + nvDbg_Printf(file, line, function, dbglevel, " "); + } + } + nvDbg_Printf(file, line, function, dbglevel, " "); + for (j = 0; j < 16; j++) + { + if ((i + j) < bufsize) + { + nvDbg_Printf(file, line, function, dbglevel, "%c", IS_PRINT(*(buffer + i + j))? *(buffer + i + j) : '.'); + } + else + { + nvDbg_Printf(file, line, function, dbglevel, " "); + } + } + } + nvDbg_Printf(file, line, function, dbglevel, "\n"); + nvDbg_Printf(file, line, function, dbglevel, NV_PRINTF_ADD_PREFIX("printBuf [END]\n")); +} + +#endif + +#define TMPBUF_SIZE 63 +//====================================================================================== +// +// nvDbgVsnprintf() +// +//====================================================================================== +int nvDbgVsnprintf(char *dest, NvU32 destSize, const char *fmt, va_list args) +{ + int ch, precision, flags; + NvU32 fieldwidth; + int longlong; + NvS32 s32val; + NvU32 u32val; + NvS64 s64val; + NvU64 u64val; + + const char *f; + const char *specptr; + char *d; + char *strpval; + char *destLimit; + void *pval; + char tmpBuf[TMPBUF_SIZE + 1]; + NvU32 tmpSize; + + if (dest == NULL || destSize == 0) + { + return(0); // If we don't have a destination, we didn't do any characters + } + + f = fmt; + d = dest; + destLimit = dest + destSize - 1; + dest[destSize - 1] = 0; + + while ((ch = *f++) != '\0') + { + if (ch != '%') + { + if (d < destLimit) + { + *d++ = (NvU8)ch; + } + continue; + } + longlong = NV_FALSE; + specptr = f - 1; // Save a pointer to the '%' specifier, in case of syntax errors + ch = *f++; + + // revert to correct printf behavior for % + // from printf.3 regarding '%' format character: + // % A `%' is written. No argument is converted. The complete conversion specification is `%%'. + if (ch == '%') { + if (d < destLimit) + { + *d++ = (NvU8)ch; + } + continue; + } + + flags = DONTTERMINATE; // Don't terminate substrings -- we'll null-terminate when we're all done + // Check for left-alignment + if (ch == '-') { + flags |= LEFTALIGN_F; + ch = *f++; + } + // Check for using a plus sign for non-negative numbers + if (ch == '+') { + flags |= PLUSSIGN_F; + ch = *f++; + } + // Check for using a space character (sign place-holder) for non-negative numbers + if (ch == ' ') { + flags |= SPACESIGN_F; + ch = *f++; + } + // Check for leading zero fill + if (ch == '0') { + flags |= ZEROFILL_F; + // Don't bump the character pointer in case '0' was the only digit + } + // Collect the field width specifier + if (ch == '*') { + // Field width specified by separate argument + fieldwidth = va_arg(args, int); + ch = *f++; + } + else { + fieldwidth = 0; // Default field width + while (ch >= '0' && ch <= '9') { + fieldwidth = fieldwidth * 10 + ch - '0'; + ch = *f++; + } + } + + // Check for a precision specifier + precision = -1; // Default unspecified precision + if (ch == '.') { // We have a precision specifier, skip the '.' + ch = *f++; + if (ch == '*') { + // precision specified by separate argument + precision = va_arg(args, int); + ch = *f++; + } + else { + while (ch >= '0' && ch <= '9') { + precision = precision * 10 + ch - '0'; + ch = *f++; + } + } + } + + if (ch == 'l') { + ch = *f++; + if (ch == 'l') { + longlong = NV_TRUE; + ch = *f++; + } + } + + // Perform the conversion operation + switch (ch) { + case 'c': // Copy an ASCII character + u32val = va_arg(args, int); + if (d < destLimit) + { + *d++ = (NvU8) u32val; + } + break; + case 'u': // Copy a formatted, unsigned decimal number + flags |= UNSIGNED_F; + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + if ( longlong ) // long long specifier "llu" or "lld" + { + u64val = va_arg(args, unsigned long long); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr(u64val, tmpBuf, fieldwidth, flags); + } + else + { + u32val = va_arg(args, unsigned int); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr((NvU64)u32val, tmpBuf, fieldwidth, flags); + } + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 'd': // Copy a formatted, signed decimal number + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + if ( longlong ) // long long specifier "llu" or "lld" + { + s64val = va_arg(args, long long); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr(s64val, tmpBuf, fieldwidth, flags); + } + else + { + s32val = va_arg(args, int); + // Format the number, increment the dest pointer by the characters copied + tmpSize = inttodecfmtstr((NvS64)s32val, tmpBuf, fieldwidth, flags); + } + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 'x': // Copy a formatted, lower-case hexadecimal number + flags |= LOWERCASE_F; + case 'X': // Copy a formatted, upper-case hexadecimal number + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + if ( longlong ) // long long specifier "llx" or "llX" + { + u64val = va_arg(args, long long); + // Format the number, increment the dest pointer by the characters copied + tmpSize = uinttohexfmtstr(u64val, tmpBuf, fieldwidth, flags); + } + else + { + u32val = va_arg(args, int); + // Format the number, increment the dest pointer by the characters copied + tmpSize = uinttohexfmtstr((NvU64)u32val, tmpBuf, fieldwidth, flags); + } + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 'p': // Copy a formatted pointer value + if (fieldwidth > TMPBUF_SIZE) + { + fieldwidth = TMPBUF_SIZE; + } + pval = va_arg(args, void *); + tmpSize = uinttohexfmtstr((NvU64)((NvUPtr)pval), tmpBuf, fieldwidth, flags); + if (d < destLimit) + { + tmpSize = (d + tmpSize) < destLimit ? tmpSize : (NvU32)(destLimit - d); + portMemCopy(d, tmpSize, tmpBuf, tmpSize); + d += tmpSize; + } + break; + case 's': // Copy a formatted string + strpval = va_arg(args, char *); + d += strtofmtstr(strpval, d, destLimit, fieldwidth, precision, flags); + break; + case 0: // Gracefully handle premature end-of-string + f--; // Back up, now f points to the null character again + default: // Unexpected conversion operator, so just echo to the destination + while (specptr < f) + { + if (d < destLimit) + { + *d++ = *specptr; + } + specptr++; + } + if (ch == 0) + { + goto stringdone; + } + break; + } + } + +stringdone: + if (d <= destLimit) + { + *d = '\0'; // Null-terminate the string + } + return((int)(d - dest)); // Return the number of characters we [might] transferred +} + +int nvDbgSnprintf(char *dest, NvU32 destSize, const char *fmt, ...) +{ + va_list arglist; + int len; + + va_start(arglist, fmt); + len = nvDbgVsnprintf(dest, destSize, fmt, arglist); + va_end(arglist); + + return len; +} + +enum { // Padding option definitions + PRESPACE_O = 1, + PREZERO_O = 2, + POSTSPACE_O = 4 +}; + +#define NUMBUFSIZE 20 // Should be enough for 64-bit integers in decimal or hex + +//====================================================================================== +// +// inttodecfmtstr() +// +// This takes a signed integer value and converts it to a formatted decimal string, +// using options (field width and flags) like those provided by sprintf(). The 32-bit +// number is assumed to be signed unless the UNSIGNED_F flag is set. Look at the code +// for dbugsprintf() above to see which formatting options are implemented. +// +//====================================================================================== +static int inttodecfmtstr(NvS64 sval, char *dest, int fieldwidth, int flags) +{ + int i, digitcount, destcount; + int sign, signchar; + int fillcount; + int pad_options; + NvU64 uval, quotient, remainder; + char *intdigp; + char nbuf[NUMBUFSIZE]; + + signchar = ' '; // avoid compiler init warning + // Process the sign-related options + if (flags & UNSIGNED_F) { // Unsigned conversion + sign = 0; // No sign character + } else { // We're doing a signed conversion + sign = 1; // Assume we'll have a sign character + if (sval < 0) { + signchar = '-'; + sval = -sval; // Make the number positive now so we can 'digitize' it + } else { // sval >= 0 + if (flags & PLUSSIGN_F) + signchar = '+'; + else if (flags & SPACESIGN_F) + signchar = ' '; + else + sign = 0; // No sign character + } + } + uval = sval; // Do unsigned math from here on out + + // Convert the number into ASCII decimal digits in our local buffer, counting them + intdigp = &nbuf[NUMBUFSIZE]; // Point past the last character in the buffer + digitcount = 0; // Nothing written to our local buffer yet + do { + quotient = uval / 10; + remainder = uval - quotient * 10; + *--intdigp = (NvU8) (remainder + '0'); // Put the digit into the next lower buffer slot + digitcount++; + uval = quotient; + } while (uval > 0); + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + fillcount = fieldwidth - (sign + digitcount); // Account for the sign, if used + if (fillcount > 0) { // We need to do left or right padding + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) + pad_options = PREZERO_O; + else + pad_options = PRESPACE_O; + } + } + + destcount = 0; // Nothing written out to the destination yet + + // Copy any leading spaces + if (pad_options & PRESPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = ' '; + destcount += fillcount; + } + // Copy the sign character, if any + if (sign) { + *dest++ = (char)signchar; + destcount++; + } + // Copy any leading zeros + if (pad_options & PREZERO_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = '0'; + destcount += fillcount; + } + // Copy the decimal digits from our local buffer + for (i = 0; i < digitcount; i++) + *dest++ = *intdigp++; + destcount += digitcount; + + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = ' '; + destcount += fillcount; + } + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *dest = 0; + return(destcount); // Return the character count, not including the null +} + +//====================================================================================== +// +// uinttohexfmtstr() +// +// This takes an unsigned integer value and converts it to a formatted hexadecimal +// string, using options (field width and flags) like those provided by sprintf(). Look +// at the code for dbugsprintf() above to see which formatting options are implemented. +// +//====================================================================================== +static int uinttohexfmtstr(NvU64 uval, char *dest, int fieldwidth, int flags) +{ + int i, digitcount, destcount; + int c, hexadjust; + int fillcount; + char fillchar = ' '; + int pad_options; + char *intdigp; + char nbuf[NUMBUFSIZE]; + + hexadjust = 'A' - '9' - 1; + if (flags & LOWERCASE_F) + hexadjust += 'a' - 'A'; + + // Convert the number into ASCII hex digits in our local buffer, counting them + intdigp = &nbuf[NUMBUFSIZE]; // Point past the last character in the buffer + digitcount = 0; // Nothing written to our local buffer yet + do { + c = (int)(uval % 16) + '0'; + if (c > '9') /* A-F */ + c += hexadjust; + *--intdigp = (NvU8)c; // Put the digit into the next lower buffer slot + digitcount++; + uval /= 16; + } while (uval > 0); + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + fillcount = fieldwidth - digitcount; // No sign to worry about + if (fillcount > 0) { // We need to do left or right padding + fillchar = ' '; // Most common fill character is the space + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) { + pad_options = PREZERO_O; + fillchar = '0'; + } else { + pad_options = PRESPACE_O; + } + } + } + + destcount = 0; // Nothing written out to the destination yet + + // Copy any leading zeros or spaces + if (pad_options & (PREZERO_O | PRESPACE_O)) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = fillchar; + destcount += fillcount; + } + // Copy the hex digits from our local buffer + for (i = 0; i < digitcount; i++) + *dest++ = *intdigp++; + destcount += digitcount; + + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = fillchar; + destcount += fillcount; + } + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *dest = 0; + return(destcount); // Return the character count, not including the null +} + + +#if 0 + +//====================================================================================== +// +// float64todecfmtstr() +// +// This takes a 64-bit floating-point value and converts it to a formatted decimal +// string, using options (field width, precision, and flags) like those provided by +// sprintf(). Look at the code for dbugsprintf() above to see which formatting options +// are implemented. +// +//====================================================================================== +static int float64todecfmtstr(F064 f64val, NvU8 *dest, int fieldwidth, int precision, int flags) +{ + int i, firstcount, destcount; + int sign, signchar, decpt; + int fillcount; + int pad_options; + int reducecount, loopdigits, digitsleft; + NvU32 u32val, quotient, remainder; + F064 f64mant9 = 0.0, f64mant9factor = 0.0, fone = 0.0, ften = 0.0, fbillion = 0.0, powerof10 = 0.0; + NvU8 *digp; + NvU8 nbuf[NUMBUFSIZE]; // This only needs to hold the first 9 digits of the integer part + + // Process the sign-related options + sign = 1; // Assume at first we'll have a sign character + if (f64val < 0.0) { + signchar = '-'; + f64val = -f64val; // Make the number positive now so we can 'digitize' it + } else { // f64val >= 0.0 + if (flags & PLUSSIGN_F) + signchar = '+'; + else if (flags & SPACESIGN_F) + signchar = ' '; + else + sign = 0; // No sign character + } + + // Round the number to N decimal places. We add 0.5 x 10^(-N), which is + // equivalent to adding 1 / (2*10^N). We'll use this latter formula. + fone = 1.0; // Keep the compiler from always loading these constants from memory + ften = 10.0; + powerof10 = fone; // 10 ^ 0 + for (i = 0; i < precision; i++) + powerof10 *= ften; // Build 10 ^ N + f64val += fone / (2.0 * powerof10); + // f64val now contains the properly rounded number + + f64mant9 = f64val; // Start hunting for the mantissa's 9 uppermost decimal digits + fbillion = 1e9; // Keep it in a register + f64mant9factor = fone; + // Reduce the mantissa to less than 1 billion, so it will fit in a 32-bit integer + for (reducecount = 0; f64mant9 >= fbillion; reducecount++) { + f64mant9 /= fbillion; + f64mant9factor *= fbillion; + } + + // Process the highest 32-bits of the mantissa so we can count those digits first + + f64mant9 = f64val / f64mant9factor; // Grab highest 9 integer decimal digits + u32val = (NvU32) f64mant9; // Drop any fraction + f64mant9 = u32val; // Now we have a float with only an integer part + f64val -= f64mant9 * f64mant9factor; // Subtract out the previous high digits + f64mant9factor /= fbillion; // Adjust our division factor + + // Convert the binary into ASCII decimal digits in our local buffer, counting them + digp = &nbuf[NUMBUFSIZE]; // Point past the last char. of these 9 digits + firstcount = 0; // No digits of the first 32-bit integer part yet + do { + quotient = u32val / 10; + remainder = u32val - quotient * 10; + *--digp = (NvU8) (remainder + '0'); // Put the digit into the next lower buffer slot + firstcount++; + u32val = quotient; + } while (u32val > 0); + + // Figure out whether we'll have a decimal point + decpt = (precision > 0); // Don't use a decimal point if no fractional part + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + // We have the information we need to calculate how many output characters we'll have + fillcount = fieldwidth - (sign + firstcount + (reducecount * 9) + decpt + precision); + if (fillcount > 0) { // We need to do left or right padding + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) + pad_options = PREZERO_O; + else + pad_options = PRESPACE_O; + } + } + + destcount = 0; // Nothing written out to the destination yet + + // Copy any leading spaces + if (pad_options & PRESPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = ' '; + destcount += fillcount; + } + // Copy the sign character, if any + if (sign) { + *dest++ = signchar; + destcount++; + } + // Copy any leading zeros + if (pad_options & PREZERO_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = '0'; + destcount += fillcount; + } + // Copy the highest chunk of integer digits from the local buffer + for (i = 0; i < firstcount; i++) + *dest++ = *digp++; + destcount += firstcount; + + // Now we need to convert the remaining integer digits, if any + for (i = 0; i < reducecount; i++) { + f64mant9 = f64val / f64mant9factor; // Grab 9 more decimal digits + u32val = (NvU32) f64mant9; // Drop any fraction + f64mant9 = u32val; // Now we have a float with only an integer part + f64val -= f64mant9 * f64mant9factor; // Subtract out the previous high digits + f64mant9factor /= fbillion; // Adjust our division factor + // Convert the integer part into ASCII decimal digits, directly to the destination + dest += 9; // Point past the last char. of this 9-digit chunk + digp = dest; + for (loopdigits = 0; loopdigits < 9; loopdigits++) { + quotient = u32val / 10; + remainder = u32val - quotient * 10; + *--digp = (NvU8) (remainder + '0'); // Put the digit into the next lower buffer slot + u32val = quotient; + } + destcount += 9; + } + // f64val has only the fractional part now + + if (!decpt) + goto checktrailing; // Skip the laborious fraction-processing part + + // Copy the decimal point + *dest++ = '.'; + destcount++; + + // Similar to how we handled the integer part processing, we'll process up to + // 9 digits at a time, by multiplying the fraction by a power of 10, + // converting to an integer, and converting digits to the destination. + + digitsleft = precision; + do { + loopdigits = digitsleft; + if (loopdigits > 9) + loopdigits = 9; + powerof10 = fone; // 10 ^ 0 + for (i = 0; i < loopdigits; i++) + powerof10 *= ften; // Build 10 ^ N + f64val *= powerof10; // Push some fractional digits into the integer part + u32val = (NvU32) f64val; // Conversion truncates any remaining fraction + f64val -= u32val; // Remove the integer part, leave remaining fraction digits + digp = dest + loopdigits; // Point past the last char. of this chunk + for (i = 0; i < loopdigits; i++) { + quotient = u32val / 10; + remainder = u32val - quotient * 10; + *--digp = (NvU8) (remainder + '0'); // Put the digit into the next lower buffer slot + u32val = quotient; + } + dest += loopdigits; + destcount += loopdigits; + digitsleft -= loopdigits; + } while (digitsleft > 0); + +checktrailing: + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + *dest++ = ' '; + destcount += fillcount; + } + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *dest = 0; + return(destcount); // Return the character count, not including the null +} + +#endif // 0 + +//====================================================================================== +// +// strtofmtstr() +// +// This takes a source C string and converts it to a formatted output C string, +// using options (field width, precision, and flags) like those provided by sprintf(). Look at +// the code for nvDbgVsnprintf() above to see which formatting options are implemented. +// +// fieldwidth - minimum total characters to output (including pad) +// precision - maximum characters from src to output; or entire string if negative +//====================================================================================== +static int strtofmtstr(const char *src, char *dest, char *destLimit, int fieldwidth, int precision, int flags) +{ + int i, srclen; + int fillcount; + char fillchar = ' '; + int pad_options; + const char *s; + char *d; + + // Make sure we have a source string to work with + if (src == NULL) + { + src = ""; + } + + // For padding calculations, we need to know the source string length + for (s = src, srclen = 0; *s != 0; s++) + srclen++; + + // But truncated to precision, if specified. + if(precision >= 0 && srclen > precision) + srclen = precision; + + // Process the field-padding options + pad_options = 0; // Assume we won't be doing any padding + fillcount = fieldwidth - srclen; + + if (fillcount > 0) { // We need to do left or right padding + fillchar = ' '; // Most common fill character is the space + if (flags & LEFTALIGN_F) { + pad_options = POSTSPACE_O; + } else { // Right-aligned, fill with zeros or spaces + if (flags & ZEROFILL_F) { + pad_options = PREZERO_O; + fillchar = '0'; + } else { + pad_options = PRESPACE_O; + } + } + } + + s = src; + d = dest; + + // Copy any leading zeros or spaces + if (pad_options & (PREZERO_O | PRESPACE_O)) + { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + { + if (d < destLimit) + { + *d++ = fillchar; + } + } + } + // Copy the characters from the source string + for (i = 0; i < srclen; i++) + { + if (d < destLimit) + { + *d++ = *s++; + } + } + + // Copy any trailing spaces + if (pad_options & POSTSPACE_O) + { + for (i = 0; i < fillcount; i++) // Copy the pad character(s) + { + if (d < destLimit) + { + *d++ = fillchar; + } + } + } + + if ((flags & DONTTERMINATE) == 0) // Null-terminate the string unless requested not to + *d = 0; + return((int)(d - dest)); // Return the character count, not including the null +} + +//******************************************************************************** +// +// NVRM_TRACE support +// low-overhead runtime state capture +// to enable, define USE_NVRM_TRACE (retail or debug builds) +// +//******************************************************************************** + +#ifdef USE_NVRM_TRACE + +// TODO: It would be really cool if we could +// find a way to tie this to a board instance. +// Or at least provide some way to tag entries +// relative to the board. It would be even +// more cool if each entry was of the form: +// +// TIMESTAMP : CPU# : BOARD# : LINE# : FILENAME : +// +// where could be either a string or a value. +// + +// the number of trace words in the ring buffer. +// a trace event can be 1 or more words +#define NVRM_TRACE_ENTRIES (2048) +// a typedef for the trace table. Add a pad to simplify +// bounds checking +typedef NvU32 NVRM_TRACE_t[NVRM_TRACE_ENTRIES + 16]; + +NvU32 NVRM_tracing = 0; // enabled or not? +// a type'd ptr to the table. This may make it easier for your debugger +// to dump out the table (definitely helps on the mac) +NVRM_TRACE_t *NVRM_TRACE_GTRACE; +// actual table pointer +NvU32 *NVRM_TRACE_table; +// current index into the table +NvU32 NVRM_TRACE_idx; + +#define NVRM_TRACE_BUMP(inc) { NVRM_TRACE_idx += (inc); \ + if (NVRM_TRACE_idx >= NVRM_TRACE_ENTRIES) NVRM_TRACE_idx = 0; \ + NVRM_TRACE_table[NVRM_TRACE_idx] = '****'; \ + } + +NvU32 NVRM_TRACE_INIT() +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + // skip out if already initialized + if (NVRM_TRACE_table) + goto done; + + // + // allocate the table + // depending on when you call NVRM_TRACE_INIT, might not be able to + // use portMemAllocNonPaged() + // + NVRM_TRACE_table = portMemAllocNonPaged(sizeof(NVRM_TRACE_t)); + + // clear the table + if (NVRM_TRACE_table) + portMemSet(NVRM_TRACE_table, 0, sizeof(NVRM_TRACE_t)); + + NVRM_TRACE_GTRACE = (void *) NVRM_TRACE_table; + + NV_PRINTF(LEVEL_ERROR, + "trace table at 0x%x through 0x%x, idx at 0x%x\n", + NVRM_TRACE_table, &NVRM_TRACE_table[NVRM_TRACE_ENTRIES], + &NVRM_TRACE_idx); + + if (NVRM_TRACE_table != NULL) + { + NVRM_tracing = 1; + } + + done: + return NVRM_tracing; +} + +NvU32 NVRM_TRACE_DISABLE(void) +{ + NvU32 previous = NVRM_tracing; + NVRM_tracing = 0; + return previous; +} + +void NVRM_TRACE_ENABLE(void) +{ + NVRM_tracing = 1; +} + +void NVRM_TRACE(NvU32 value) +{ + if ( ! NVRM_tracing) return; + if (NVRM_TRACE_table == (NvU32 *) 0) + if ( ! NVRM_TRACE_INIT()) + return; + + NVRM_TRACE_table[NVRM_TRACE_idx] = value; + NVRM_TRACE_BUMP(1); +} + +void NVRM_TRACE1(NvU32 value) +{ + if ( ! NVRM_tracing) return; + if (NVRM_TRACE_table == (NvU32 *) 0) + if ( ! NVRM_TRACE_INIT()) + return; + + NVRM_TRACE_table[NVRM_TRACE_idx] = value; + NVRM_TRACE_BUMP(1); +} + +void NVRM_TRACE2(NvU32 value1, NvU32 value2) +{ + if ( ! NVRM_tracing) return; + if (NVRM_TRACE_table == (NvU32 *) 0) + if ( ! NVRM_TRACE_INIT()) + return; + + NVRM_TRACE_table[NVRM_TRACE_idx] = value1; + NVRM_TRACE_table[NVRM_TRACE_idx+1] = value2; + NVRM_TRACE_BUMP(2); +} + +void NVRM_TRACE3(NvU32 value1, NvU32 value2, NvU32 value3) +{ + if ( ! NVRM_tracing) return; + if (NVRM_TRACE_table == (NvU32 *) 0) + if ( ! NVRM_TRACE_INIT()) + return; + + NVRM_TRACE_table[NVRM_TRACE_idx] = value1; + NVRM_TRACE_table[NVRM_TRACE_idx+1] = value2; + NVRM_TRACE_table[NVRM_TRACE_idx+2] = value3; + NVRM_TRACE_BUMP(3); +} + +void NVRM_TRACE4(NvU32 value1, NvU32 value2, NvU32 value3, NvU32 value4) +{ + if ( ! NVRM_tracing) return; + if (NVRM_TRACE_table == (NvU32 *) 0) + if ( ! NVRM_TRACE_INIT()) + return; + + NVRM_TRACE_table[NVRM_TRACE_idx] = value1; + NVRM_TRACE_table[NVRM_TRACE_idx+1] = value2; + NVRM_TRACE_table[NVRM_TRACE_idx+2] = value3; + NVRM_TRACE_table[NVRM_TRACE_idx+3] = value4; + NVRM_TRACE_BUMP(4); +} + +void NVRM_TRACE5(NvU32 value1, NvU32 value2, NvU32 value3, NvU32 value4, NvU32 value5) +{ + if ( ! NVRM_tracing) return; + if (NVRM_TRACE_table == (NvU32 *) 0) + if ( ! NVRM_TRACE_INIT()) + return; + + NVRM_TRACE_table[NVRM_TRACE_idx] = value1; + NVRM_TRACE_table[NVRM_TRACE_idx+1] = value2; + NVRM_TRACE_table[NVRM_TRACE_idx+2] = value3; + NVRM_TRACE_table[NVRM_TRACE_idx+3] = value4; + NVRM_TRACE_table[NVRM_TRACE_idx+4] = value5; + NVRM_TRACE_BUMP(5); +} + +#ifndef ACTUAL_REG_RD32 +#define ACTUAL_REG_RD32 GPU_REG_RD32 +#endif +#ifndef ACTUAL_REG_WR32 +#define ACTUAL_REG_WR32 GPU_REG_WR32 +#endif +#ifndef ACTUAL_REG_RD08 +#define ACTUAL_REG_RD08 GPU_REG_RD08 +#endif +#ifndef ACTUAL_REG_WR08 +#define ACTUAL_REG_WR08 GPU_REG_WR08 +#endif + +NvU32 NVRM_TRACE_REG_RD32(OBJGPU *pGpu, NvU32 offset) +{ + NvU32 value = ACTUAL_REG_RD32(pGpu, offset); + NVRM_TRACE3('RD32', offset, value); + return value; +} + +void NVRM_TRACE_REG_WR32(OBJGPU *pGpu, NvU32 offset, NvU32 value) +{ + ACTUAL_REG_WR32(pGpu, offset, value); + NVRM_TRACE3('WR32', offset, value); +} + +NvU8 NVRM_TRACE_REG_RD08(OBJGPU *pGpu, NvU32 offset) +{ + NvU32 value = ACTUAL_REG_RD08(pGpu, offset); + NVRM_TRACE3('RD08', offset, value); + return (NvU8) value; +} + +void NVRM_TRACE_REG_WR08(OBJGPU *pGpu, NvU32 offset, NvU8 value) +{ + ACTUAL_REG_WR08(pGpu, offset, value); + NVRM_TRACE3('WR08', offset, value); +} + +void NVRM_TRACEV(NvU32 *values, NvU32 numValues) +{ + NvU32 n; + + if ( ! NVRM_tracing) return; + if (NVRM_TRACE_table == (NvU32 *) 0) + if ( ! NVRM_TRACE_INIT()) + return; + + for ( n = 0; n < numValues; n++ ) + NVRM_TRACE_table[NVRM_TRACE_idx+n] = values[n]; + + NVRM_TRACE_BUMP(n); +} + +void NVRM_TRACE_DUMP(void) +{ + int i; + static int dumping = 0; + + // No table? + if ( ! NVRM_TRACE_table) return; + + // don't nest while dumping this + if (dumping) return; + dumping = 1; + + NVRM_TRACE_DISABLE(); + + for (i=0; i <= NVRM_TRACE_ENTRIES; i += 8) + { + NV_PRINTF(LEVEL_ERROR, "%x %x %x %x %x %x %x %x\n", + NVRM_TRACE_table[i + 0], NVRM_TRACE_table[i + 1], + NVRM_TRACE_table[i + 2], NVRM_TRACE_table[i + 3], + NVRM_TRACE_table[i + 4], NVRM_TRACE_table[i + 5], + NVRM_TRACE_table[i + 6], NVRM_TRACE_table[i + 7]); + } + dumping = 0; +} + +#endif // USE_NVRM_TRACE + +#if NV_PRINTF_STRINGS_ALLOWED +// +// String matching helper for nvDbgRmMsgCheck. +// strstr with the length of the pattern string +// passed in. +// + +static const char *nv_strnstr +( + const char *str, + const char *pat, + int patlen +) +{ + int len; + + // Should be NULL, but this makes noun optional + if (pat == NULL) + { + return str; + } + + while (*str) + { + len = 0; + while (len < patlen) + { + if (str[len] != pat[len]) + break; + len++; + } + if (len == patlen) + { + return str; + } + str++; + } + return NULL; +} + +// +// Buffer to store RmMsg string. This is stored in bss +// so it can be updated in the debugger dynamically. +// +char RmMsg[NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE]; + +// +// nvDbgRmMsgCheck +// Override priority of debug printf based on file, function with optional +// line ranges. Rules are matched on each printf. Rules are applied left to +// right and the final result is the cumulative result of all rules. +// +// Format +// rule = [!][filename|function][:startline][-endline][@level][^prefix] +// Format = rule[,rule] +// +// See RmMsg wiki for detailed documentation + +// Examples: +// "dmanv50.c" - enable all printfs in dmanv50.c +// "fifoAlloc_NV50" - enable all printfs in function fifoAlloc_NV50 +// "!fifoAlloc_NV50" - disable all printfs in function fifoAlloc_NV50 +// "dmanv50.c:150" - enable printf on line 150 of dmanv50.c +// "dmanv50.c:100-200" - enable printf on lines 100-200 in dmanv50.c +// "dmanv50.c:100-200,!dmanv50:125" - same but disable printf on line 125 +// "fifo^*" - enable verbose prefix for fifo +// ":" - enable all printfs +// "!" - disable all printfs (dangerous!) +// +NvU32 +nvDbgRmMsgCheck +( + const char * filename, + NvU32 linenumber, + const char * function, + NvU32 debuglevel, + const char * printf_format, + NvU32 * pPrefix +) +{ + enum { NOUN, STARTLINE, ENDLINE, LEVEL, PREFIX } state; + int status = NVRM_MSG_NORMAL; + int inc; + char *noun; + NvU32 nounlen; + NvU32 startline; + NvU32 endline; + NvU32 level; + NvU32 prefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FUNCTION; + NvU32 tempPrefix; + char *p; + + // Handle the normal case quickly. + if (RmMsg[0] == '\0') + { + goto done; + } + + p = RmMsg; + + while (*p != '\0') + { + // Initial default state for this rule + inc = 1; + noun = NULL; + nounlen = 0; + startline = 0; + endline = 0x7fffffff; + tempPrefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FUNCTION; + level = LEVEL_INFO; // default to everything + state = NOUN; + + for (; *p != '\0' && *p != ','; p++) + { + if (*p == ':') + { + state = STARTLINE; + continue; + } + else if (*p == '-') + { + state = ENDLINE; + endline = 0; + continue; + } + else if (*p == '!' && !noun) + { + state = NOUN; + inc = 0; + continue; + } + else if (*p == '@') + { + state = LEVEL; + level = 0; + continue; + } + else if (*p == '^') + { + state = PREFIX; + tempPrefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FUNCTION; + continue; + } + switch (state) + { + case NOUN: + if (noun == NULL) + { + noun = p; + } + nounlen++; + break; + case STARTLINE: + if ((*p >= '0') && (*p <= '9')) + { + startline *= 10; + startline += *p - '0'; + endline = startline; // only one line + } + break; + case ENDLINE: + if ((*p >= '0') && (*p <= '9')) + { + endline *= 10; + endline += *p - '0'; + } + break; + case LEVEL: + if ((*p >= '0') && (*p <= '9')) + { + level *= 10; + level += *p - '0'; + } + break; + case PREFIX: + switch (*p) + { + case '*': + tempPrefix = NVRM_MSG_PREFIX_NVRM | NVRM_MSG_PREFIX_FILE | + NVRM_MSG_PREFIX_LINE | NVRM_MSG_PREFIX_FUNCTION | + NVRM_MSG_PREFIX_OSTIMESTAMP; + break; + case 'n': + tempPrefix |= NVRM_MSG_PREFIX_NVRM; + break; + case 'N': + tempPrefix &= ~NVRM_MSG_PREFIX_NVRM; + break; + case 'c': + tempPrefix |= NVRM_MSG_PREFIX_FILE; + break; + case 'C': + tempPrefix &= ~NVRM_MSG_PREFIX_FILE; + break; + case 'l': + tempPrefix |= NVRM_MSG_PREFIX_LINE; + break; + case 'L': + tempPrefix &= ~NVRM_MSG_PREFIX_LINE; + break; + case 'f': + tempPrefix |= NVRM_MSG_PREFIX_FUNCTION; + break; + case 'F': + tempPrefix &= ~NVRM_MSG_PREFIX_FUNCTION; + break; + case 't': + tempPrefix |= NVRM_MSG_PREFIX_OSTIMESTAMP; + break; + case 'T': + tempPrefix &= ~NVRM_MSG_PREFIX_OSTIMESTAMP; + break; + } + break; + default: // ignore any trainling words + break; + } + } + + // Does the last rule hit + if (((nv_strnstr(filename, noun, nounlen) != NULL) || + (nv_strnstr(function, noun, nounlen) != NULL)) && + (linenumber >= startline) && + (linenumber <= endline) && + (debuglevel >= level)) + { + status = inc ? NVRM_MSG_PRINT : NVRM_MSG_HIDE; + prefix = tempPrefix; + } + + if (*p == '\0') + { + break; + } + p++; + } + +done: + if (pPrefix != NULL) + { + *pPrefix = prefix; + } + + return status; +} + +// +// RmMsgPrefix - Add the RmMsg prefix to the passed in string, returning +// the length of the formatted string. +// +// Format: "NVRM file linenum function timestamp: " +// +NvU32 +RmMsgPrefix +( + NvU32 prefix, + const char *filename, + NvU32 linenumber, + const char *function, + char *str, + NvU32 totalLen +) +{ + const char *space = ""; + NvU32 len = 0; + NvU32 sec, usec; + + *str = '\0'; + + if (prefix & NVRM_MSG_PREFIX_NVRM) + { + portStringCopy(str + len, totalLen - len, NV_PRINTF_PREFIX, sizeof(NV_PRINTF_PREFIX)); + len += sizeof(NV_PRINTF_PREFIX) - 1; + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_FILE) + { + len += nvDbgSnprintf(str + len, totalLen - len, "%s%s", space, filename); + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_LINE) + { + len += nvDbgSnprintf(str + len, totalLen - len, "%s%d", space, linenumber); + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_FUNCTION) + { + len += nvDbgSnprintf(str + len, totalLen - len, "%s%s", space, function); + space = " "; + } + + if (prefix & NVRM_MSG_PREFIX_OSTIMESTAMP) + { + osGetCurrentTime(&sec, &usec); + + len += nvDbgSnprintf(str + len, totalLen - len, "%s%d.%06d", space, sec, usec); + } + + return len; +} + +// +// Initialize RmMsg from the registry. Skip if the string was initialized +// already initialized (from the debugger). +// Called from the platform specific platform code. +// +void nvDbgInitRmMsg(OBJGPU *pGpu) +{ + NvU32 len = NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE; + + if (RmMsg[0] == '\0') + { + if (osReadRegistryString(pGpu, NV_REG_STR_RM_MSG, + (NvU8*)RmMsg, &len) != NV_OK) + { + len = NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE; + } + } +} + +#else // #else ! NV_PRINTF_STRINGS_ALLOWED + +void nvDbgInitRmMsg(OBJGPU *pGpu) +{ +} + +NvU32 +nvDbgRmMsgCheck +( + const char * filename, + NvU32 linenumber, + const char * function, + NvU32 debuglevel, + const char * printf_format, + NvU32 * pPrefix +) +{ + return NVRM_MSG_HIDE; +} + +#endif // #if NV_PRINTF_STRINGS_ALLOWED + +/*! + * @brief Does a byte by byte dump of the buffer passed. + * + * @param[in] pBuffer Pointer to the buffer to dump. + * @param[in] length Length of the buffer to dump (in # of bytes). + */ +void +nvDbgDumpBufferBytes +( + void *pBuffer, + NvU32 length +) +{ + NvU8 *s = (NvU8 *)pBuffer; + NvU32 remainingBytes = length % 16; + NvU32 i; + + NV_PRINTF(LEVEL_ERROR, + " x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf\n"); + + for (i = 0; i < (length / 16); i++) + { + + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12], s[13], s[14], s[15]); + + s += 16; + } + + /* + * 16 statement switch, so that these are added to nvlog correctly. + */ + switch (remainingBytes) + { + case 0: + default: + break; + case 1: + NV_PRINTF(LEVEL_ERROR, + "%p %02x .. .. .. .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0]); + break; + case 2: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x .. .. .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1]); + break; + case 3: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x .. .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2]); + break; + case 4: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x .. .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3]); + break; + case 5: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x .. .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4]); + break; + case 6: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5]); + break; + case 7: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6]); + break; + case 8: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7]); + break; + case 9: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8]); + break; + case 10: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9]); + break; + case 11: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10]); + break; + case 12: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11]); + break; + case 13: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12]); + break; + case 14: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x .. ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12], s[13]); + break; + case 15: + NV_PRINTF(LEVEL_ERROR, + "%p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x ..\n", + s, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], s[8], + s[9], s[10], s[11], s[12], s[13], s[14]); + break; + } +} + diff --git a/src/nvidia/src/kernel/diagnostics/profiler.c b/src/nvidia/src/kernel/diagnostics/profiler.c new file mode 100644 index 000000000..a21e49026 --- /dev/null +++ b/src/nvidia/src/kernel/diagnostics/profiler.c @@ -0,0 +1,227 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** ODB State Routines **************************\ +* * +* Simple API to measure elapsed times in RM for profiling * +* * +\***************************************************************************/ + +#include "diagnostics/profiler.h" +#include "os/os.h" + +static void _rmProfStopTime(RM_PROF_STATS *pStats, NvU64 stop_ns); + +/*! + * @brief Start measuring elapsed time for a specific profiling module. + * + * @param[in,out] pStats Profiling stats for the module + */ +void +rmProfStart +( + RM_PROF_STATS *pStats +) +{ + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + if (pStats->start_ns != 0) + { + NV_PRINTF(LEVEL_ERROR, + "Starting time measurement that is already started\n"); + // + // No breakpoint since this isn't fatal by itself. + // Most likely there was an error that propagated up the stack before + // the measurement was stopped on the last cycle. + // + // In that case, restarting the measurement is probably the right thing + // to do anyway. + // + } + osGetPerformanceCounter(&pStats->start_ns); +} + +/*! + * @brief Stop measuring elapsed time for a specific profiling module and + * update the module's statistics. + * + * @param[in,out] pStats Profiling stats for the module + */ +void +rmProfStop +( + RM_PROF_STATS *pStats +) +{ + NvU64 stop_ns; + + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + osGetPerformanceCounter(&stop_ns); + _rmProfStopTime(pStats, stop_ns); +} + +/*! + * Same as #rmProfStop but parameterized by the stop time. + */ +static void +_rmProfStopTime +( + RM_PROF_STATS *pStats, + NvU64 stop_ns +) +{ + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + if (pStats->start_ns == 0) + { + NV_PRINTF(LEVEL_ERROR, + "Stopping time measurement that is already stopped\n"); + DBG_BREAKPOINT(); + // + // Breakpoint since this case is more serious - something is likely + // wrong with the profiling code. Also return early so the bogus + // measurement is not recorded. + // + return; + } + RM_PROF_RECORD(pStats, stop_ns - pStats->start_ns); + pStats->start_ns = 0; +} + +/*! + * @brief Manually update the statistics for one cycle of a specific profiling + * module. + * + * @param[in,out] pStats Profiling stats for the module + * @param[in] time_ns Elapsed time in nanoseconds for this cycle. + */ +void +rmProfRecord +( + RM_PROF_STATS *pStats, + NvU64 time_ns +) +{ + NV_ASSERT_OR_RETURN_VOID(pStats != NULL); + + if (pStats->count == 0 || time_ns < pStats->min_ns) + { + pStats->min_ns = time_ns; + } + if (pStats->count == 0 || time_ns > pStats->max_ns) + { + pStats->max_ns = time_ns; + } + pStats->total_ns += time_ns; + pStats->count += 1; +} + +/*! + * @brief Start measuring time for the specified profiling group (begin a new cycle). + * + * @param[out] pGroup Profiling group structure to be used. + * @param[in/out] pTotal Optional stats for the whole group duration (may be NULL). + * @param[in/out] pFirst First module of the group. + */ +void +rmProfGroupStart +( + RM_PROF_GROUP *pGroup, + RM_PROF_STATS *pTotal, + RM_PROF_STATS *pFirst +) +{ + NV_ASSERT_OR_RETURN_VOID(pGroup != NULL); + NV_ASSERT_OR_RETURN_VOID(pFirst != NULL); + + // Start profiling the first module. + RM_PROF_START(pFirst); + + // Reuse the first modules' start time for the total module. + if (pTotal != NULL) + { + pTotal->start_ns = pFirst->start_ns; + } + + // Initialize the group structure. + pGroup->pTotal = pTotal; + pGroup->pLast = pFirst; +} + +/*! + * @brief Continue profiling the next module of a profiling group. + * + * @param[in/out] pGroup Profiling group. + * @param[in/out] pNext Next module of the group. + */ +void +rmProfGroupNext +( + RM_PROF_GROUP *pGroup, + RM_PROF_STATS *pNext +) +{ + NV_ASSERT_OR_RETURN_VOID(pGroup != NULL); + NV_ASSERT_OR_RETURN_VOID(pGroup->pLast != NULL); + NV_ASSERT_OR_RETURN_VOID(pNext != NULL); + + // Start profiling the next module. + RM_PROF_START(pNext); + + // Reuse the next modules' start time for the last module's stop time. + _rmProfStopTime(pGroup->pLast, pNext->start_ns); + + // Update the group structure. + pGroup->pLast = pNext; +} + +/*! + * @brief Stop profiling a cycle of a profiling group (ends both the last and total modules). + * + * @param[in] pGroup Profiling group. + */ +void +rmProfGroupStop +( + RM_PROF_GROUP *pGroup +) +{ + NvU64 stop_ns; + + NV_ASSERT_OR_RETURN_VOID(pGroup != NULL); + NV_ASSERT_OR_RETURN_VOID(pGroup->pLast != NULL); + + osGetPerformanceCounter(&stop_ns); + + // Reuse the same stop time for both last and total module. + _rmProfStopTime(pGroup->pLast, stop_ns); + if (pGroup->pTotal != NULL) + { + _rmProfStopTime(pGroup->pTotal, stop_ns); + } + + // Clear the group structure. + pGroup->pTotal = NULL; + pGroup->pLast = NULL; +} diff --git a/src/nvidia/src/kernel/disp/disp_sw.c b/src/nvidia/src/kernel/disp/disp_sw.c new file mode 100644 index 000000000..f9ec5e7eb --- /dev/null +++ b/src/nvidia/src/kernel/disp/disp_sw.c @@ -0,0 +1,180 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "sweng/dispsw.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu_mgr/gpu_mgr.h" +#include "rmapi/control.h" +#include "rmapi/mapping_list.h" +#include "gpu/device/device.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/head/kernel_head.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "ctrl/ctrl2080/ctrl2080internal.h" + +#include "class/cl9072.h" + +//--------------------------------------------------------------------------- +// +// Class object creation and destruction +// +//--------------------------------------------------------------------------- + +NV_STATUS +dispswConstruct_IMPL +( + DispSwObject *pDispSw, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispSw); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NV_STATUS status; + + NV2080_CTRL_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES_PARAMS ctrlParams = { 0 }; + NV9072_ALLOCATION_PARAMETERS *pUserParams = pParams->pAllocParams; + + if (!pUserParams) + { + NV_ASSERT(pUserParams); + return (NV_ERR_INVALID_ARGUMENT); + } + + if (!pKernelDisplay) + { + NV_PRINTF(LEVEL_ERROR, "Display is not enabled, can't create class\n"); + return (NV_ERR_INVALID_ARGUMENT); + } + + status = pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_ACTIVE_DISPLAY_DEVICES, + &ctrlParams, sizeof(ctrlParams)); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "RPC error, can't get the displaymask and number of heads\n"); + return status; + } + + if (pUserParams->logicalHeadId >= ctrlParams.numHeads) + { + NV_PRINTF(LEVEL_ERROR, "invalid logical head number: %d\n", + pUserParams->logicalHeadId); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (pUserParams->displayMask) + { + if (!(pUserParams->displayMask & ctrlParams.displayMask)) + { + NV_PRINTF(LEVEL_ERROR, "Device not active: 0x%08x, RM display mask: 0x%08x\n", + pUserParams->displayMask, ctrlParams.displayMask); + return NV_ERR_INVALID_ARGUMENT; + } + } + + pDispSw->DispCommon.DispObject = pDispSw; + pDispSw->DispCommon.Head = pUserParams->logicalHeadId; + + return (NV_OK); +} + +void +dispswDestruct_IMPL +( + DispSwObject *pDispSw +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pDispSw, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + KernelHead *pKernelHead = KDISP_GET_HEAD(pKernelDisplay, pDispSw->DispCommon.Head); + + chandesIsolateOnDestruct(pChannelDescendant); + + // + // Cleanup any pending VBlank callbacks + // + // These should only be set up on the parent + // + NV_ASSERT(gpumgrIsParentGPU(pGpu)); + kheadDeleteVblankCallback(pGpu, pKernelHead, &(pDispSw->NotifyOnVBlank.Callback)); + kheadDeleteVblankCallback(pGpu, pKernelHead, &(pDispSw->DispCommon.Semaphore.ReleaseCallback)); +} + +NV_STATUS dispswReleaseSemaphoreAndNotifierFill +( + OBJGPU *pGpu, + NvU64 gpuVA, + NvU32 vaSpace, + NvU32 releasevalue, + NvU32 flags, + NvU32 completionStatus, + NvHandle hClient, + NvHandle hEvent +) +{ + CLI_DMA_MAPPING_INFO *pDmaMappingInfo; + NvBool bFound = NV_FALSE; + NV_STATUS status; + + if (flags & F_SEMAPHORE_ADDR_VALID) + { + bFound = CliGetDmaMappingInfo(hClient, + hEvent, + vaSpace, + gpuVA, + gpumgrGetDeviceGpuMask(pGpu->deviceInstance), + &pDmaMappingInfo); + if (!bFound) + return NV_ERR_INVALID_ADDRESS; + } + else if (flags & F_SEMAPHORE_RELEASE) + { + status = semaphoreFillGPUVA(pGpu, + hClient, + vaSpace, + gpuVA, + releasevalue, + 0 /* Index */, + NV_TRUE); + return status; + } + else if (flags & F_NOTIFIER_FILL) + { + status = notifyFillNotifierGPUVA(pGpu, + hClient, + vaSpace, + gpuVA, + releasevalue, /* Info32 */ + 0, /* Info16 */ + completionStatus, + NV9072_NOTIFIERS_NOTIFY_ON_VBLANK /* Index */); + return status; + } + return NV9072_NOTIFICATION_STATUS_DONE_SUCCESS; +} diff --git a/src/nvidia/src/kernel/gpu/arch/ampere/kern_gpu_ga100.c b/src/nvidia/src/kernel/gpu/arch/ampere/kern_gpu_ga100.c new file mode 100644 index 000000000..74c989a99 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/arch/ampere/kern_gpu_ga100.c @@ -0,0 +1,281 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "kernel/gpu/intr/intr.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "published/ampere/ga100/dev_fb.h" +#include "published/ampere/ga100/dev_vm.h" + +/*! + * @brief Clear FBHUB POISON Interrupt state for Bug 2924523. + * This HAL handles the CPU interrupt tree + * + * @param[in] pGpu OBJGPU pointer + * + * @return NV_OK if success, else appropriate NV_STATUS code + */ +NV_STATUS +gpuClearFbhubPoisonIntrForBug2924523_GA100_KERNEL +( + OBJGPU *pGpu +) +{ + NvU32 intrVector = NV_PFB_FBHUB_POISON_INTR_VECTOR_HW_INIT; + NvU32 reg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector); + NvU32 bit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector); + NvV32 intr = 0; + + if (pGpu == NULL) + return NV_OK; + + // + // Check if FBHUB Poison interrupt got triggered before RM Init due + // to VBIOS IFR on GA100. If yes, clear the FBHUB Interrupt. This WAR is + // required for Bug 2924523 as VBIOS IFR causes FBHUB Poison intr. + // + intr = GPU_VREG_RD32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(reg), + NULL) & + NVBIT(bit); + + if (intr != 0) + { + NV_PRINTF(LEVEL_ERROR, "FBHUB Interrupt detected = 0x%X. Clearing it.\n", intr); + + // Clear FBHUB Poison interrupt + GPU_VREG_WR32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(reg), + NVBIT(bit), + NULL); + } + + return NV_OK; +} + +/*! + * @brief Returns FLA VASpace Size for Ampere + * + * @param[in] pGpu OBJGPU pointer + * @param[in] bNvSwitchVirtualization boolean + * + * @returns NvU64 -> size of FLA VASpace + */ +NvU64 +gpuGetFlaVasSize_GA100 +( + POBJGPU pGpu, + NvBool bNvswitchVirtualization +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 totalFbSize = (pMemoryManager->Ram.fbTotalMemSizeMb << 20); + + if (bNvswitchVirtualization || totalFbSize <= NVBIT64(36)) + { + return 0x2000000000; // 128GB + } + else + { + return (totalFbSize * 2); + } +} + +// +// List of GPU children that present for the chip. List entries contain$ +// {CLASS-ID, # of instances} pairs, e.g.: {CE, 2} is 2 instance of OBJCE. This$ +// list controls only engine presence. Order is defined by$ +// gpuGetChildrenOrder_HAL.$ +// +// IMPORTANT: This function is to be deleted. Engine removal should instead be$ +// handled by ConstructEngine returning NV_ERR_NOT_SUPPORTED. PLEASE DO NOT$ +// FORK THIS LIST!$ +// +// List entries contain {CLASS-ID, # of instances} pairs. +// + +static const GPUCHILDPRESENT gpuChildrenPresent_GA100[] = +{ + { classId(OBJSWENG), 1 }, + { classId(OBJUVM), 1 }, + { classId(OBJACR), 1 }, + { classId(OBJBIF), 1 }, + { classId(KernelBif), 1 }, + { classId(OBJBSP), 5 }, + { classId(OBJBUS), 1 }, + { classId(KernelBus), 1 }, + { classId(OBJCE), 10 }, + { classId(KernelCE), 10 }, + { classId(OBJCIPHER), 1 }, + { classId(ClockManager), 1 }, + { classId(OBJDISP), 1 }, + { classId(KernelDisplay), 1 }, + { classId(VirtMemAllocator), 1 }, + { classId(OBJDPAUX), 1 }, + { classId(OBJFAN), 1 }, + { classId(OBJHSHUB), 2 }, + { classId(MemorySystem), 1 }, + { classId(KernelMemorySystem), 1 }, + { classId(MemoryManager), 1 }, + { classId(OBJFBFLCN), 1 }, + { classId(KernelFifo), 1 }, + { classId(OBJFIFO), 1 }, + { classId(OBJGMMU), 1 }, + { classId(KernelGmmu), 1}, + { classId(OBJGPULOG), 1 }, + { classId(OBJGPUMON), 1 }, + { classId(GraphicsManager), 1 }, + { classId(MIGManager), 1}, + { classId(KernelMIGManager), 1 }, + { classId(KernelGraphicsManager), 1 }, + { classId(Graphics), 8 }, + { classId(KernelGraphics), 8 }, + { classId(OBJHDACODEC), 1 }, + { classId(OBJHWPM), 1 }, + { classId(OBJINFOROM), 1 }, + { classId(Intr), 1 }, + { classId(Lpwr ), 1 }, + { classId(OBJLSFM), 1 }, + { classId(OBJMC), 1 }, + { classId(KernelMc), 1 }, + { classId(PrivRing), 1 }, + { classId(SwIntr), 1 }, + { classId(OBJNVJPG), 1 }, + { classId(NvDebugDump), 1 }, + { classId(KernelNvlink), 1 }, + { classId(Nvlink), 1 }, + { classId(Perf), 1 }, + { classId(KernelPerf), 1 }, + { classId(Pmgr), 1 }, + { classId(Pmu), 1 }, + { classId(KernelPmu), 1 }, + { classId(OBJSEC2), 1 }, + { classId(Gsp), 1 }, + { classId(Therm), 1 }, + { classId(OBJTMR), 1 }, + { classId(OBJVOLT), 1 }, + { classId(OBJGRIDDISPLAYLESS), 1 }, + { classId(OBJFAS), 1 }, + { classId(OBJVMMU), 1 }, + { classId(OBJOFA), 1 }, + { classId(KernelNvdec), 1 }, + { classId(KernelSec2), 1 }, + { classId(KernelGsp), 1 }, +}; + +const GPUCHILDPRESENT * +gpuGetChildrenPresent_GA100(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS32(gpuChildrenPresent_GA100); + return gpuChildrenPresent_GA100; +} + +// +// List of GPU children that present for the chip. List entries contain$ +// {CLASS-ID, # of instances} pairs, e.g.: {CE, 2} is 2 instance of OBJCE. This$ +// list controls only engine presence. Order is defined by$ +// gpuGetChildrenOrder_HAL.$ +// +// IMPORTANT: This function is to be deleted. Engine removal should instead be$ +// handled by ConstructEngine returning NV_ERR_NOT_SUPPORTED. PLEASE DO NOT$ +// FORK THIS LIST!$ +// +// List entries contain {CLASS-ID, # of instances} pairs. +// +static const GPUCHILDPRESENT gpuChildrenPresent_GA102[] = +{ + {classId(OBJSWENG), 1}, + {classId(OBJUVM), 1}, + {classId(OBJACR), 1}, + {classId(OBJBIF), 1}, + {classId(KernelBif), 1}, + {classId(OBJNNE), 1}, + {classId(OBJBSP), 2}, + {classId(OBJBUS), 1}, + {classId(KernelBus), 1}, + {classId(OBJCE), 5}, + {classId(KernelCE), 5}, + {classId(OBJCIPHER), 1}, + {classId(ClockManager), 1}, + {classId(OBJDISP), 1}, + {classId(KernelDisplay), 1}, + {classId(VirtMemAllocator), 1}, + {classId(OBJDPAUX), 1}, + {classId(OBJFAN), 1}, + {classId(OBJHSHUB), 2 }, + {classId(MemorySystem), 1}, + {classId(KernelMemorySystem), 1}, + {classId(MemoryManager), 1}, + {classId(OBJFBFLCN), 1}, + {classId(KernelFifo), 1 }, + {classId(OBJFIFO), 1}, + {classId(OBJGMMU), 1}, + {classId(KernelGmmu), 1}, + {classId(OBJGPULOG), 1}, + {classId(OBJGPUMON), 1}, + {classId(GraphicsManager), 1 }, + {classId(MIGManager), 1}, + {classId(KernelMIGManager), 1}, + {classId(KernelGraphicsManager), 1}, + {classId(Graphics), 1}, + {classId(KernelGraphics), 1}, + {classId(OBJHDACODEC), 1}, + {classId(OBJHWPM), 1}, + {classId(OBJINFOROM), 1}, + {classId(Intr), 1}, + {classId(Lpwr ), 1}, + {classId(OBJLSFM), 1}, + {classId(OBJMC), 1}, + {classId(KernelMc), 1}, + {classId(PrivRing), 1}, + {classId(SwIntr), 1}, + {classId(OBJMSENC), 1}, + {classId(NvDebugDump), 1}, + {classId(KernelNvlink), 1}, + {classId(Nvlink), 1}, + {classId(Perf), 1}, + {classId(KernelPerf), 1 }, + {classId(Pmgr), 1}, + {classId(Pmu), 1}, + {classId(KernelPmu), 1}, + {classId(OBJSEC2), 1}, + {classId(Gsp), 1}, + {classId(Therm), 1}, + {classId(OBJTMR), 1}, + {classId(OBJVOLT), 1}, + {classId(OBJGRIDDISPLAYLESS), 1}, + {classId(OBJFAS), 1}, + {classId(OBJVMMU), 1}, + {classId(OBJOFA), 1 }, + {classId(KernelNvdec), 1}, + {classId(KernelSec2), 1}, + {classId(KernelGsp), 1}, +}; + +const GPUCHILDPRESENT * +gpuGetChildrenPresent_GA102(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS32(gpuChildrenPresent_GA102); + return gpuChildrenPresent_GA102; +} + diff --git a/src/nvidia/src/kernel/gpu/arch/maxwell/kern_gpu_gm107.c b/src/nvidia/src/kernel/gpu/arch/maxwell/kern_gpu_gm107.c new file mode 100644 index 000000000..f12fdd2cd --- /dev/null +++ b/src/nvidia/src/kernel/gpu/arch/maxwell/kern_gpu_gm107.c @@ -0,0 +1,506 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu/bif/kernel_bif.h" + +#include "published/maxwell/gm107/dev_bus.h" +#include "published/maxwell/gm107/dev_nv_xve.h" +#include "published/maxwell/gm107/dev_nv_xve1.h" + + +/*! + * @brief gpuReadBusConfigRegEx_GM107 + * + * param[in] pGpu The GPU object pointer + * param[in] index NvU32 + * param[in] *data NvU32 * + * param[in] pThreadState thread state node pointer + */ +NV_STATUS +gpuReadBusConfigRegEx_GM107 +( + OBJGPU *pGpu, + NvU32 index, + NvU32 *data, + THREAD_STATE_NODE *pThreadState +) +{ + + if (index > (PCIE_CONFIG_SPACE_SIZE - sizeof(NvU32))) + { + NV_PRINTF(LEVEL_ERROR, + "Offset 0x%08x exceeds range!\n", + index); + NV_ASSERT(0); + return NV_ERR_GENERIC; + } + + *data = GPU_REG_RD32_EX(pGpu, DEVICE_BASE(NV_PCFG) + index, pThreadState); + + return NV_OK; +} + +/*! + * @brief gpuReadBusConfigReg_GM107() + * + * param[in] pGpu The GPU object pointer + * param[in] index NvU32 + * param[in] *data NvU32 * + */ +NV_STATUS +gpuReadBusConfigReg_GM107 +( + OBJGPU *pGpu, + NvU32 index, + NvU32 *data +) +{ + return gpuReadBusConfigRegEx_HAL(pGpu, index, data, NULL); +} + +/*! + * @brief gpuWriteBusConfigReg_GM107 + * + * param[in] pGpu The GPU object pointer + * param[in] index NvU32 + * param[in] value NvU32 + */ +NV_STATUS +gpuWriteBusConfigReg_GM107 +( + OBJGPU *pGpu, + NvU32 index, + NvU32 value +) +{ + + if (index > (PCIE_CONFIG_SPACE_SIZE - sizeof(NvU32))) + { + NV_PRINTF(LEVEL_ERROR, + "Offset 0x%08x exceeds range!\n", + index); + NV_ASSERT(0); + return NV_ERR_GENERIC; + } + + GPU_REG_WR32(pGpu, DEVICE_BASE(NV_PCFG) + index, value); + + return NV_OK; +} + +NV_STATUS +gpuReadFunctionConfigReg_GM107 +( + OBJGPU *pGpu, + NvU32 function, + NvU32 index, + NvU32 *data +) +{ + NvU32 retval; + + if (index > (PCIE_CONFIG_SPACE_SIZE - sizeof(NvU32))) + { + NV_PRINTF(LEVEL_ERROR, "Offset 0x%08x exceeds range!\n", index); + + return NV_ERR_GENERIC; + } + + switch (function) + { + case 0: + { + retval = GPU_REG_RD32(pGpu, DEVICE_BASE(NV_PCFG) + index); + break; + } + + case 1: + { + if (IS_FMODEL(pGpu)) + { + // + // Function 1 is not modeled on fmodel + // + *data = 0; + return NV_OK; + } + else + { + retval = GPU_REG_RD32(pGpu, DEVICE_BASE(NV_PCFG1) + index); + } + break; + } + + default: + NV_PRINTF(LEVEL_ERROR, + "attempt to read cfg space of non-existant function %x\n", + function); + return NV_ERR_GENERIC; + } + + *data = retval; + return NV_OK; +} + + +NV_STATUS +gpuWriteFunctionConfigReg_GM107 +( + OBJGPU *pGpu, + NvU32 function, + NvU32 index, + NvU32 data +) +{ + return gpuWriteFunctionConfigRegEx_HAL(pGpu, function, index, data, + NULL /* threadstate */); +} + +NV_STATUS +gpuWriteFunctionConfigRegEx_GM107 +( + OBJGPU *pGpu, + NvU32 function, + NvU32 index, + NvU32 data, + THREAD_STATE_NODE *pThreadState +) +{ + if (index > (PCIE_CONFIG_SPACE_SIZE - sizeof(NvU32))) + { + NV_PRINTF(LEVEL_ERROR, "Offset 0x%08x exceeds range!\n", index); + + return NV_ERR_INVALID_ARGUMENT; + } + + switch (function) + { + case 0: + { + GPU_REG_WR32_EX(pGpu, DEVICE_BASE(NV_PCFG) + index, data, pThreadState); + break; + } + + case 1: + { + // + // Function 1 is not modeled on fmodel + // + if (!IS_FMODEL(pGpu)) + { + GPU_REG_WR32_EX(pGpu, DEVICE_BASE(NV_PCFG1) + index, data, pThreadState); + } + break; + } + + default: + NV_PRINTF(LEVEL_ERROR, + "attempt to read cfg space of non-existant function %x\n", + function); + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +/*! + * @brief Perform gpu-dependent error handling for error during register read sanity check + * + * @param[in] pGpu GPU object pointer + * @param[in] addr Value address + * @param[in] value Value read during check + */ +void +gpuHandleSanityCheckRegReadError_GM107 +( + OBJGPU *pGpu, + NvU32 addr, + NvU32 value +) +{ +#if NV_PRINTF_ENABLED + // + // Read the interrupt status using the direct OS reg read call so we don't recurs + // if we happen to see GPU_READ_PRI_ERROR_CODE there as well (bug 799876) + // + NvU32 intr = osGpuReadReg032(pGpu, NV_PBUS_INTR_0); + + // To be sure, filter this down further by checking the related pri interrupts: + if (FLD_TEST_DRF(_PBUS, _INTR_0, _PRI_SQUASH, _PENDING, intr) || + FLD_TEST_DRF(_PBUS, _INTR_0, _PRI_FECSERR, _PENDING, intr) || + FLD_TEST_DRF(_PBUS, _INTR_0, _PRI_TIMEOUT, _PENDING, intr)) + { +#if NV_PRINTF_STRINGS_ALLOWED + const char *errorString = "Unknown SYS_PRI_ERROR_CODE"; + + gpuGetSanityCheckRegReadError_HAL(pGpu, value, + &errorString); + NV_PRINTF(LEVEL_ERROR, + "Possible bad register read: addr: 0x%x, regvalue: 0x%x, error code: %s\n", + addr, value, errorString); +#else // NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_ERROR, + "Possible bad register read: addr: 0x%x, regvalue: 0x%x\n", + addr, value); +#endif // NV_PRINTF_STRINGS_ALLOWED + } +#endif // NV_PRINTF_ENABLED +} + +void +gpuGetIdInfo_GM107(OBJGPU *pGpu) +{ + NvU32 data; + + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_REV_ID, &data)) + { + NV_PRINTF(LEVEL_ERROR, "unable to read NV_XVE_REV_ID\n"); + return; + } + + // we only need the FIB and MASK values + pGpu->idInfo.PCIRevisionID = (data & ~GPU_DRF_SHIFTMASK(NV_XVE_REV_ID_CLASS_CODE)); + + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_SUBSYSTEM, &data)) + { + NV_PRINTF(LEVEL_ERROR, "unable to read NV_XVE_SUBSYSTEM\n"); + return; + } + pGpu->idInfo.PCISubDeviceID = data; + + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_ID, &data)) + { + NV_PRINTF(LEVEL_ERROR, "unable to read NV_XVE_ID\n"); + return; + } + + pGpu->idInfo.PCIDeviceID = data; + +} + +// GM200 used on all later GPUs + +// +// Lists the order of GPU children for engine state transitions (StateInit, StateLoad, +// StateUnload and StateDestroy). This list controls only the engine order. Engine +// presence is defined by gpuGetChildrenPresent_HAL. Engines in this list that aren't in the +// gpuGetChildrenPresent_HAL list are ignored. +// +// List entries contain {CLASS-ID, flags} pairs. +// +// Valid flags are: +// GCO_ALL - entry is used for all list types +// GCO_LIST_INIT - entry is used for init ordering (DO NOT USE) +// GCO_LIST_LOAD - entry is used for load and postload ordering (DO NOT USE) +// GCO_LIST_UNLOAD - entry is used for unload and preunload ordering (DO NOT USE) +// GCO_LIST_DESTROY - entry is used for destroy order (DO NOT USE) +// +// For UNLOAD and DESTROY the list is played back in reverse from LOAD and INIT. +// +// IMPORTANT: +// <1> GCO_ALL is the recommended flag to use for all engine types. Engines should +// always have a consist order. If there are complicated dependencies that cannot +// be resolved using this list, please use callbacks (such as fifoAddSchedulingHandler) +// <1> DO NOT FORK THIS LIST. The goal is to have a single ordered list across all +// chips. Inconsistent ordering makes it challenging to modify shared code to work +// across all variations. +// +static const GPUCHILDORDER +gpuChildOrderList_GM200[] = +{ + {classId(OBJBIF), GCO_ALL}, + {classId(KernelBif), GCO_ALL}, + {classId(OBJNNE), GCO_ALL}, + {classId(NvDebugDump), GCO_ALL}, + {classId(ClockManager), GCO_ALL}, + {classId(Pmgr), GCO_ALL}, + {classId(OBJVOLT), GCO_ALL}, + {classId(OBJMC), GCO_ALL}, + {classId(KernelMc), GCO_ALL}, + {classId(PrivRing), GCO_ALL}, + {classId(SwIntr), GCO_ALL}, + {classId(Intr), GCO_ALL}, + {classId(OBJTMR), GCO_ALL}, + {classId(Therm), GCO_ALL}, + {classId(OBJHSHUB), GCO_ALL}, + {classId(MemorySystem), GCO_ALL}, + {classId(KernelMemorySystem), GCO_ALL}, + {classId(MemoryManager), GCO_ALL}, + {classId(Nvlink), GCO_ALL}, + {classId(KernelNvlink), GCO_ALL}, + {classId(OBJHDACODEC), GCO_ALL}, + {classId(OBJGMMU), GCO_ALL}, + {classId(KernelGmmu), GCO_ALL}, + {classId(OBJVMMU), GCO_ALL}, + {classId(KernelNvdec), GCO_ALL}, + {classId(KernelSec2), GCO_ALL}, + {classId(KernelGsp), GCO_ALL}, + {classId(OBJBUS), GCO_ALL}, + {classId(KernelBus), GCO_ALL}, + {classId(OBJLSFM), GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY}, // LOAD LSFM must be before ACR and any managed Falcon. + {classId(OBJACR), GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY}, + {classId(Pmu), GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY}, + {classId(KernelPmu), GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY}, + {classId(Gsp), GCO_ALL}, + {classId(OBJFSP), GCO_ALL}, + {classId(KernelFsp), GCO_ALL}, + {classId(OBJFBFLCN), GCO_ALL}, + {classId(Lpwr ), GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY}, + {classId(Perf), GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY}, // LOAD Perf is after PMU for perfmon_sampling to work + {classId(KernelPerf), GCO_LIST_LOAD | GCO_LIST_UNLOAD | GCO_LIST_DESTROY}, + {classId(OBJDISP), GCO_LIST_DESTROY}, + {classId(KernelDisplay), GCO_LIST_DESTROY}, + {classId(OBJHDA), GCO_LIST_DESTROY}, + {classId(OBJFAN), GCO_LIST_DESTROY}, + {classId(VirtMemAllocator), GCO_ALL}, + {classId(OBJDISP), GCO_LIST_INIT}, + {classId(KernelDisplay), GCO_LIST_INIT}, + {classId(OBJHDA), GCO_LIST_INIT}, + {classId(OBJFAN), GCO_LIST_INIT}, + {classId(GraphicsManager), GCO_ALL}, + {classId(MIGManager), GCO_ALL}, + {classId(KernelMIGManager), GCO_ALL}, + {classId(KernelGraphicsManager), GCO_ALL}, + {classId(Graphics), GCO_ALL}, // INIT GR has to be initialized before LSFM because + // the ucode image pointers needed by LSFM are only + // known after GR has loaded the netlist. + + {classId(KernelGraphics), GCO_ALL}, + {classId(OBJLSFM), GCO_LIST_INIT}, + {classId(OBJACR), GCO_LIST_INIT}, + {classId(Pmu), GCO_LIST_INIT}, + {classId(KernelPmu), GCO_LIST_INIT}, + {classId(Lpwr ), GCO_LIST_INIT}, + {classId(Perf), GCO_LIST_INIT}, + {classId(KernelPerf), GCO_LIST_INIT}, + {classId(OBJBSP), GCO_ALL}, + {classId(OBJCIPHER), GCO_ALL}, + {classId(OBJDISP), GCO_LIST_LOAD | GCO_LIST_UNLOAD}, // LOAD Display is *after* cipher so that hdcp keys can be loaded . + {classId(KernelDisplay), GCO_LIST_LOAD | GCO_LIST_UNLOAD}, // LOAD Display is *after* cipher so that hdcp keys can be loaded . + {classId(OBJHDA), GCO_LIST_LOAD | GCO_LIST_UNLOAD}, + {classId(OBJFAN), GCO_LIST_LOAD | GCO_LIST_UNLOAD}, + {classId(OBJCE), GCO_ALL}, + {classId(KernelCE), GCO_ALL}, + {classId(OBJMSENC), GCO_ALL}, + {classId(OBJNVJPG), GCO_ALL}, + {classId(OBJOFA), GCO_ALL}, + {classId(OBJSEC2), GCO_ALL}, + {classId(KernelFifo), GCO_ALL}, + {classId(OBJFIFO), GCO_ALL}, + {classId(OBJDPAUX), GCO_ALL}, + {classId(OBJINFOROM), GCO_ALL}, + {classId(OBJUVM), GCO_ALL}, + {classId(OBJGPULOG), GCO_LIST_INIT | GCO_LIST_LOAD}, + {classId(OBJGPUMON), GCO_ALL}, + {classId(OBJGPULOG), GCO_LIST_UNLOAD | GCO_LIST_DESTROY}, + {classId(OBJHWPM), GCO_ALL}, + {classId(OBJSWENG), GCO_ALL}, + {classId(OBJGRIDDISPLAYLESS), GCO_ALL}, +}; + +const GPUCHILDORDER * +gpuGetChildrenOrder_GM200(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS32(gpuChildOrderList_GM200); + return gpuChildOrderList_GM200; +} + +// +// List of GPU children that present for the chip. List entries contain +// {CLASS-ID, # of instances} pairs, e.g.: {CE, 2} is 2 instance of OBJCE. This +// list controls only engine presence. Order is defined by +// gpuGetChildrenOrder_HAL. +// +// IMPORTANT: This function is to be deleted. Engine removal should instead be +// handled by ConstructEngine returning NV_ERR_NOT_SUPPORTED. PLEASE DO NOT +// FORK THIS LIST! +// +// List entries contain {CLASS-ID, # of instances} pairs. +// +static const GPUCHILDPRESENT gpuChildrenPresent_GM200[] = +{ + {classId(OBJSWENG), 1}, + {classId(OBJACR), 1}, + {classId(OBJBIF), 1}, + {classId(KernelBif), 1}, + {classId(OBJBSP), 1}, + {classId(OBJBUS), 1}, + {classId(KernelBus), 1}, + {classId(OBJCE), 3}, + {classId(KernelCE), 3}, + {classId(OBJCIPHER), 1}, + {classId(ClockManager), 1}, + {classId(OBJDISP), 1}, + {classId(KernelDisplay), 1}, + {classId(VirtMemAllocator), 1}, + {classId(OBJDPAUX), 1}, + {classId(OBJFAN), 1}, + {classId(OBJHSHUB), 1}, + {classId(MemorySystem), 1}, + {classId(KernelMemorySystem), 1}, + {classId(MemoryManager), 1}, + {classId(KernelFifo), 1}, + {classId(OBJFIFO), 1}, + {classId(OBJGMMU), 1}, + {classId(KernelGmmu), 1}, + {classId(OBJGPULOG), 1}, + {classId(OBJGPUMON), 1}, + {classId(GraphicsManager), 1}, + {classId(MIGManager), 1}, + {classId(KernelMIGManager), 1}, + {classId(KernelGraphicsManager), 1}, + {classId(Graphics), 1}, + {classId(KernelGraphics), 1}, + {classId(OBJHDA), 1}, + {classId(OBJHDACODEC), 1}, + {classId(OBJHWPM), 1}, + {classId(OBJINFOROM), 1}, + {classId(Intr), 1}, + {classId(Lpwr ), 1}, + {classId(OBJLSFM), 1}, + {classId(OBJMC), 1}, + {classId(KernelMc), 1}, + {classId(PrivRing), 1}, + {classId(SwIntr), 1}, + {classId(OBJMSENC), 2}, + {classId(NvDebugDump), 1}, + {classId(Perf), 1}, + {classId(KernelPerf), 1}, + {classId(Pmgr), 1}, + {classId(Pmu), 1}, + {classId(KernelPmu), 1}, + {classId(OBJSEC2), 1}, + {classId(Therm), 1}, + {classId(OBJTMR), 1}, + {classId(OBJVOLT), 1}, + {classId(OBJGRIDDISPLAYLESS), 1}, +}; + +const GPUCHILDPRESENT * +gpuGetChildrenPresent_GM200(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS32(gpuChildrenPresent_GM200); + return gpuChildrenPresent_GM200; +} + diff --git a/src/nvidia/src/kernel/gpu/arch/turing/kern_gpu_tu102.c b/src/nvidia/src/kernel/gpu/arch/turing/kern_gpu_tu102.c new file mode 100644 index 000000000..1352e072d --- /dev/null +++ b/src/nvidia/src/kernel/gpu/arch/turing/kern_gpu_tu102.c @@ -0,0 +1,295 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "gpu/gpu.h" +#include "published/turing/tu102/dev_vm.h" +#include "published/turing/tu102/hwproject.h" + +/*! + * @brief determines whether this GPU mode needs to be initialized with an offset + * to access the registers defined in dev_vm.ref. + * + * @param[in] pGpu OBJGPU pointer + * + * @returns NvU32 - physical offset needed to access virtual registers in host + */ +NvU32 +gpuGetVirtRegPhysOffset_TU102(OBJGPU *pGpu) +{ + // No offset is needed, only in case of VF in SR-IOV + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + return 0; + else + return DRF_BASE(NV_VIRTUAL_FUNCTION_FULL_PHYS_OFFSET); +} + +/*! + * @brief Returns the physical address width for the given @ref NV_ADDRESS_SPACE + */ +NvU32 gpuGetPhysAddrWidth_TU102 +( + OBJGPU *pGpu, + NV_ADDRESS_SPACE addrSp +) +{ + // Currently this function supports only sysmem addresses + NV_ASSERT_OR_RETURN(ADDR_SYSMEM == addrSp, 0); + + return NV_CHIP_EXTENDED_SYSTEM_PHYSICAL_ADDRESS_BITS; +} + +// +// List of GPU children that present for the chip. List entries contain$ +// {CLASS-ID, # of instances} pairs, e.g.: {CE, 2} is 2 instance of OBJCE. This$ +// list controls only engine presence. Order is defined by$ +// gpuGetChildrenOrder_HAL.$ +// +// IMPORTANT: This function is to be deleted. Engine removal should instead be$ +// handled by ConstructEngine returning NV_ERR_NOT_SUPPORTED. PLEASE DO NOT$ +// FORK THIS LIST!$ +// +// List entries contain {CLASS-ID, # of instances} pairs. +// +static const GPUCHILDPRESENT gpuChildrenPresent_TU102[] = +{ + {classId(OBJSWENG), 1}, + {classId(OBJUVM), 1}, + {classId(OBJACR), 1}, + {classId(OBJBIF), 1}, + {classId(KernelBif), 1}, + {classId(OBJBSP), 1}, + {classId(OBJBUS), 1}, + {classId(KernelBus), 1}, + {classId(OBJCE), 9}, + {classId(KernelCE), 9}, + {classId(OBJCIPHER), 1}, + {classId(ClockManager), 1}, + {classId(OBJDISP), 1}, + {classId(KernelDisplay), 1}, + {classId(VirtMemAllocator), 1}, + {classId(OBJDPAUX), 1}, + {classId(OBJFAN), 1}, + {classId(OBJHSHUB), 1}, + {classId(MemorySystem), 1}, + {classId(KernelMemorySystem), 1}, + {classId(MemoryManager), 1}, + {classId(OBJFBFLCN), 1}, + {classId(KernelFifo), 1}, + {classId(OBJFIFO), 1}, + {classId(OBJGMMU), 1}, + {classId(KernelGmmu), 1}, + {classId(OBJGPULOG), 1}, + {classId(OBJGPUMON), 1}, + {classId(GraphicsManager), 1}, + {classId(MIGManager), 1}, + {classId(KernelMIGManager), 1}, + {classId(KernelGraphicsManager), 1}, + {classId(Graphics), 1}, + {classId(KernelGraphics), 1}, + {classId(OBJHDACODEC), 1}, + {classId(OBJHWPM), 1}, + {classId(OBJINFOROM), 1}, + {classId(Intr), 1}, + {classId(Lpwr ), 1}, + {classId(OBJLSFM), 1}, + {classId(OBJMC), 1}, + {classId(KernelMc), 1}, + {classId(PrivRing), 1}, + {classId(SwIntr), 1}, + {classId(OBJMSENC), 1}, + {classId(NvDebugDump), 1}, + {classId(KernelNvlink), 1}, + {classId(Nvlink), 1}, + {classId(Perf), 1}, + {classId(KernelPerf), 1}, + {classId(Pmgr), 1}, + {classId(Pmu), 1}, + {classId(KernelPmu), 1}, + {classId(OBJSEC2), 1}, + {classId(Gsp), 1}, + {classId(Therm), 1}, + {classId(OBJTMR), 1}, + {classId(OBJVOLT), 1}, + {classId(OBJGRIDDISPLAYLESS), 1}, + {classId(OBJFAS), 1}, + {classId(OBJVMMU), 1}, + {classId(KernelNvdec), 1}, + {classId(KernelSec2), 1}, + {classId(KernelGsp), 1}, +}; + +static const GPUCHILDPRESENT gpuChildrenPresent_TU104[] = +{ + {classId(OBJSWENG), 1}, + {classId(OBJUVM), 1}, + {classId(OBJACR), 1}, + {classId(OBJBIF), 1}, + {classId(KernelBif), 1}, + {classId(OBJBSP), 2}, + {classId(OBJBUS), 1}, + {classId(KernelBus), 1}, + {classId(OBJCE), 9}, + {classId(KernelCE), 9}, + {classId(OBJCIPHER), 1}, + {classId(ClockManager), 1}, + {classId(OBJDISP), 1}, + {classId(KernelDisplay), 1}, + {classId(VirtMemAllocator), 1}, + {classId(OBJDPAUX), 1}, + {classId(OBJFAN), 1}, + {classId(OBJHSHUB), 1}, + {classId(MemorySystem), 1}, + {classId(KernelMemorySystem), 1}, + {classId(MemoryManager), 1}, + {classId(OBJFBFLCN), 1}, + {classId(KernelFifo), 1}, + {classId(OBJFIFO), 1}, + {classId(OBJGMMU), 1}, + {classId(KernelGmmu), 1}, + {classId(OBJGPULOG), 1}, + {classId(OBJGPUMON), 1}, + {classId(GraphicsManager), 1}, + {classId(MIGManager), 1}, + {classId(KernelMIGManager), 1}, + {classId(KernelGraphicsManager), 1}, + {classId(Graphics), 1}, + {classId(KernelGraphics), 1}, + {classId(OBJHDACODEC), 1}, + {classId(OBJHWPM), 1}, + {classId(OBJINFOROM), 1}, + {classId(Intr), 1}, + {classId(Lpwr ), 1}, + {classId(OBJLSFM), 1}, + {classId(OBJMC), 1}, + {classId(KernelMc), 1}, + {classId(PrivRing), 1}, + {classId(SwIntr), 1}, + {classId(OBJMSENC), 1}, + {classId(NvDebugDump), 1}, + {classId(KernelNvlink), 1}, + {classId(Nvlink), 1}, + {classId(Perf), 1}, + {classId(KernelPerf), 1}, + {classId(Pmgr), 1}, + {classId(Pmu), 1}, + {classId(KernelPmu), 1}, + {classId(OBJSEC2), 1}, + {classId(Gsp), 1}, + {classId(Therm), 1}, + {classId(OBJTMR), 1}, + {classId(OBJVOLT), 1}, + {classId(OBJGRIDDISPLAYLESS), 1}, + {classId(OBJFAS), 1}, + {classId(OBJVMMU), 1}, + {classId(KernelNvdec), 1}, + {classId(KernelSec2), 1}, + {classId(KernelGsp), 1}, +}; + +static const GPUCHILDPRESENT gpuChildrenPresent_TU106[] = +{ + {classId(OBJSWENG), 1}, + {classId(OBJUVM), 1}, + {classId(OBJACR), 1}, + {classId(OBJBIF), 1}, + {classId(KernelBif), 1}, + {classId(OBJBSP), 3}, + {classId(OBJBUS), 1}, + {classId(KernelBus), 1}, + {classId(OBJCE), 9}, + {classId(KernelCE), 9}, + {classId(OBJCIPHER), 1}, + {classId(ClockManager), 1}, + {classId(OBJDISP), 1}, + {classId(KernelDisplay), 1}, + {classId(VirtMemAllocator), 1}, + {classId(OBJDPAUX), 1}, + {classId(OBJFAN), 1}, + {classId(MemorySystem), 1}, + {classId(KernelMemorySystem), 1}, + {classId(MemoryManager), 1}, + {classId(OBJFBFLCN), 1}, + {classId(KernelFifo), 1}, + {classId(OBJFIFO), 1}, + {classId(OBJGMMU), 1}, + {classId(KernelGmmu), 1}, + {classId(OBJGPULOG), 1}, + {classId(OBJGPUMON), 1}, + {classId(GraphicsManager), 1}, + {classId(MIGManager), 1}, + {classId(KernelMIGManager), 1}, + {classId(KernelGraphicsManager), 1}, + {classId(Graphics), 1}, + {classId(KernelGraphics), 1}, + {classId(OBJHDACODEC), 1}, + {classId(OBJHWPM), 1}, + {classId(OBJINFOROM), 1}, + {classId(Intr), 1}, + {classId(Lpwr ), 1}, + {classId(OBJLSFM), 1}, + {classId(OBJMC), 1}, + {classId(KernelMc), 1}, + {classId(PrivRing), 1}, + {classId(SwIntr), 1}, + {classId(OBJMSENC), 1}, + {classId(NvDebugDump), 1}, + {classId(KernelNvlink), 1}, + {classId(Nvlink), 1}, + {classId(Perf), 1}, + {classId(KernelPerf), 1}, + {classId(Pmgr), 1}, + {classId(Pmu), 1}, + {classId(KernelPmu), 1}, + {classId(OBJSEC2), 1}, + {classId(Gsp), 1}, + {classId(Therm), 1}, + {classId(OBJTMR), 1}, + {classId(OBJVOLT), 1}, + {classId(OBJGRIDDISPLAYLESS), 1}, + {classId(OBJFAS), 1}, + {classId(OBJVMMU), 1}, + {classId(KernelNvdec), 1}, + {classId(KernelSec2), 1}, + {classId(KernelGsp), 1}, +}; + +const GPUCHILDPRESENT * +gpuGetChildrenPresent_TU102(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS32(gpuChildrenPresent_TU102); + return gpuChildrenPresent_TU102; +} + +const GPUCHILDPRESENT * +gpuGetChildrenPresent_TU104(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS32(gpuChildrenPresent_TU104); + return gpuChildrenPresent_TU104; +} + +const GPUCHILDPRESENT * +gpuGetChildrenPresent_TU106(OBJGPU *pGpu, NvU32 *pNumEntries) +{ + *pNumEntries = NV_ARRAY_ELEMENTS32(gpuChildrenPresent_TU106); + return gpuChildrenPresent_TU106; +} + diff --git a/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c b/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c new file mode 100644 index 000000000..b3af417d6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/audio/hda_codec_api.c @@ -0,0 +1,34 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/audio/hda_codec_api.h" + +NV_STATUS hdacodecConstruct_IMPL +( + Hdacodec *pHdacodecApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/bif/arch/ampere/kernel_bif_ga100.c b/src/nvidia/src/kernel/gpu/bif/arch/ampere/kernel_bif_ga100.c new file mode 100644 index 000000000..d522e3384 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bif/arch/ampere/kernel_bif_ga100.c @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* ------------------------ Includes ---------------------------------------- */ +#include "gpu/bif/kernel_bif.h" + + +/* ------------------------ Public Functions -------------------------------- */ + +/*! + * @brief Apply WAR for bug 3208922 - disable P2P on Ampere NB + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + */ +void +kbifApplyWARBug3208922_GA100 +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + if (IsMobile(pGpu)) + { + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED, NV_TRUE); + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_P2P_WRITES_DISABLED, NV_TRUE); + } +} diff --git a/src/nvidia/src/kernel/gpu/bif/arch/maxwell/kernel_bif_gm107.c b/src/nvidia/src/kernel/gpu/bif/arch/maxwell/kernel_bif_gm107.c new file mode 100644 index 000000000..08e503cd5 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bif/arch/maxwell/kernel_bif_gm107.c @@ -0,0 +1,632 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* ------------------------- System Includes -------------------------------- */ +#include "gpu/gpu.h" +#include "gpu/bif/kernel_bif.h" +#include "platform/chipset/chipset.h" + +#include "published/maxwell/gm107/dev_nv_xve.h" + +// Defines for C73 chipset registers +#ifndef NV_XVR_VEND_XP1 +#define NV_XVR_VEND_XP1 0x00000F04 /* RW-4R */ + +#define NV_XVR_VEND_XP1_IGNORE_L0S 23:23 /* RWIVF */ +#define NV_XVR_VEND_XP1_IGNORE_L0S_INIT 0x00000000 /* RWI-V */ +#define NV_XVR_VEND_XP1_IGNORE_L0S__PROD 0x00000000 /* RW--V */ +#define NV_XVR_VEND_XP1_IGNORE_L0S_EN 0x00000001 /* RW--V */ +#endif + + +/* ------------------------ Public Functions -------------------------------- */ + +/*! + * @brief Get PCIe config test registers + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + */ +void +kbifGetPcieConfigAccessTestRegisters_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvU32 *pciStart, + NvU32 *pcieStart +) +{ + *pciStart = NV_XVE_ID; + *pcieStart = NV_XVE_VCCAP_HDR; +} + +/*! + * @brief Verify PCIe config test registers + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * + * @return NV_OK + */ +NV_STATUS +kbifVerifyPcieConfigAccessTestRegisters_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvU32 nvXveId, + NvU32 nvXveVccapHdr +) +{ + NvU32 data; + + GPU_BUS_CFG_RD32(pGpu, NV_XVE_ID, &data); + + if (FLD_TEST_DRF(_XVE, _ID, _VENDOR, _NVIDIA, data)) + { + if (data != nvXveId) + return NV_ERR_NOT_SUPPORTED; + + GPU_BUS_CFG_RD32(pGpu, NV_XVE_VCCAP_HDR, &data); + + if (FLD_TEST_DRF(_XVE, _VCCAP_HDR, _ID, _VC, data) && + FLD_TEST_DRF(_XVE, _VCCAP_HDR, _VER, _1, data)) + { + if (data != nvXveVccapHdr) + return NV_ERR_NOT_SUPPORTED; + return NV_OK; + } + } + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Re-arm MSI + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif Kernel BIF object pointer + */ +void +kbifRearmMSI_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NV_STATUS status = gpuSanityCheckRegisterAccess(pGpu, 0, NULL); + + if (status != NV_OK) + { + return; + } + + // The 32 byte value doesn't matter, HW only looks at the offset. + osGpuWriteReg032(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_CYA_2, 0); +} + +/*! + * @brief Check if MSI is enabled in HW + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * + * @return True if MSI enabled else False + */ +NvBool +kbifIsMSIEnabledInHW_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NvU32 data32; + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_MSI_CTRL, &data32)) + { + NV_PRINTF(LEVEL_ERROR, "unable to read NV_XVE_MSI_CTRL\n"); + } + + return FLD_TEST_DRF(_XVE, _MSI_CTRL, _MSI, _ENABLE, data32); +} + +/*! + * @brief Check if access to PCI config space is enabled + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif Kernel BIF object pointer + * + * @return True if access to PCI config space is enabled + */ +NvBool +kbifIsPciIoAccessEnabled_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NvU32 data = 0; + + if (NV_OK == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEV_CTRL, &data)) + { + if (FLD_TEST_DRF(_XVE, _DEV_CTRL, _CMD_IO_SPACE, _ENABLED, data)) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/*! + * @brief Check if device is a 3D controller + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif Kernel BIF object pointer + * + * @return True if device is a 3D controller + */ +NvBool +kbifIs3dController_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NvU32 data = 0; + + if (NV_OK == GPU_BUS_CFG_RD32(pGpu, NV_XVE_REV_ID, &data)) + { + if (FLD_TEST_DRF(_XVE, _REV_ID, _CLASS_CODE, _3D, data)) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/*! + * @brief Enable/disable no snoop for GPU + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif Kernel BIF object pointer + * @param[in] bEnable True if No snoop needs to be enabled + * + * @return NV_OK If no snoop modified as requested + */ +NV_STATUS +kbifEnableNoSnoop_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvBool bEnable +) +{ + NvU8 fieldVal; + NvU32 regVal; + + regVal = GPU_REG_RD32(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_DEVICE_CONTROL_STATUS); + + fieldVal = bEnable ? 1 : 0; + regVal = FLD_SET_DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, + _ENABLE_NO_SNOOP, fieldVal, regVal); + + GPU_REG_WR32(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_DEVICE_CONTROL_STATUS, regVal); + + return NV_OK; +} + +/*! + * @brief Enables Relaxed Ordering PCI-E Capability in the PCI Config Space + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif Kernel BIF object pointer + */ +void +kbifPcieConfigEnableRelaxedOrdering_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NvU32 xveDevCtrlStatus; + + if(NV_ERR_GENERIC == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus)) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n"); + DBG_BREAKPOINT(); + } + else + { + GPU_BUS_CFG_FLD_WR_DRF_DEF(pGpu, xveDevCtrlStatus, _XVE, _DEVICE_CONTROL_STATUS, + _ENABLE_RELAXED_ORDERING, _INIT); + } +} + +/*! + * @brief Disables Relaxed Ordering PCI-E Capability in the PCI Config Space + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif Kernel BIF object pointer + */ +void +kbifPcieConfigDisableRelaxedOrdering_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NvU32 xveDevCtrlStatus; + + if(NV_ERR_GENERIC == GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus)) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n"); + DBG_BREAKPOINT(); + } + else + { + xveDevCtrlStatus = FLD_SET_DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, + _ENABLE_RELAXED_ORDERING, 0, xveDevCtrlStatus); + GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, xveDevCtrlStatus); + } +} + +/*! + * @brief Get XVE status bits + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * @param[out] pBits PCIe error status values + * @param[out] pStatus Full XVE status + * + * @return NV_OK + */ +NV_STATUS +kbifGetXveStatusBits_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvU32 *pBits, + NvU32 *pStatus +) +{ + // control/status reg + NvU32 xveDevCtrlStatus; + + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus)) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n"); + } + if ( pBits == NULL ) + return NV_ERR_GENERIC; + + *pBits = 0; + + // The register read above returns garbage on fmodel, so just return. + if (IS_FMODEL(pGpu)) + { + if (pStatus) + { + *pStatus = 0; + } + return NV_OK; + } + + if (pStatus) + *pStatus = xveDevCtrlStatus; + + if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _CORR_ERROR_DETECTED, 1)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR; + if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _NON_FATAL_ERROR_DETECTED, 1)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR; + if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _FATAL_ERROR_DETECTED, 1)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR; + if (xveDevCtrlStatus & DRF_NUM(_XVE, _DEVICE_CONTROL_STATUS, _UNSUPP_REQUEST_DETECTED, 1)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST; + + if (pKernelBif->EnteredRecoverySinceErrorsLastChecked) + { + pKernelBif->EnteredRecoverySinceErrorsLastChecked = NV_FALSE; + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_ENTERED_RECOVERY; + } + + return NV_OK; +} + +/*! + * @brief Clear the XVE status bits + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * @param[out] pStatus Full XVE status + * + * @return NV_OK + */ +NV_STATUS +kbifClearXveStatus_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvU32 *pStatus +) +{ + NvU32 xveDevCtrlStatus; + + if (pStatus) + { + xveDevCtrlStatus = *pStatus; + if (xveDevCtrlStatus == 0) + { + return NV_OK; + } + } + else + { + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, &xveDevCtrlStatus)) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to read NV_XVE_DEVICE_CONTROL_STATUS!\n"); + } + } + + GPU_BUS_CFG_WR32(pGpu, NV_XVE_DEVICE_CONTROL_STATUS, xveDevCtrlStatus); + + return NV_OK; +} + +/*! + * @brief Get XVE AER bits + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * @param[out] pBits PCIe AER error status values + * + * @return NV_OK + */ +NV_STATUS +kbifGetXveAerBits_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvU32 *pBits +) +{ + NvU32 xveAerUncorr; + NvU32 xveAerCorr; + + if (pBits == NULL) + { + return NV_ERR_GENERIC; + } + + *pBits = 0; + + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_AER_UNCORR_ERR, &xveAerUncorr)) + { + NV_PRINTF(LEVEL_ERROR, "Unable to read NV_XVE_AER_UNCORR_ERR\n"); + return NV_ERR_GENERIC; + } + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_AER_CORR_ERR, &xveAerCorr)) + { + NV_PRINTF(LEVEL_ERROR, "Unable to read NV_XVE_AER_CORR_ERR\n"); + return NV_ERR_GENERIC; + } + + // The register read above returns garbage on fmodel, so just return. + if (IS_FMODEL(pGpu)) + { + return NV_OK; + } + + if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _DLINK_PROTO_ERR, _ACTIVE, xveAerUncorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR; + if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _POISONED_TLP, _ACTIVE, xveAerUncorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP; + if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _CPL_TIMEOUT, _ACTIVE, xveAerUncorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT; + if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _UNEXP_CPL, _ACTIVE, xveAerUncorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL; + if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _MALFORMED_TLP, _ACTIVE, xveAerUncorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP; + if (FLD_TEST_DRF(_XVE, _AER_UNCORR_ERR, _UNSUPPORTED_REQ, _ACTIVE, xveAerUncorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ; + + if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RCV_ERR, _ACTIVE, xveAerCorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR; + if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _BAD_TLP, _ACTIVE, xveAerCorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP; + if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _BAD_DLLP , _ACTIVE, xveAerCorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP; + if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RPLY_ROLLOVER, _ACTIVE, xveAerCorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER; + if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _RPLY_TIMEOUT, _ACTIVE, xveAerCorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT; + if (FLD_TEST_DRF(_XVE, _AER_CORR_ERR, _ADVISORY_NONFATAL, _ACTIVE, xveAerCorr)) + *pBits |= NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL; + + return NV_OK; +} + +/*! + * @brief Clear the XVE AER bits + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * @param[in] bits PCIe AER error status values + * + * @return NV_OK + */ +NV_STATUS +kbifClearXveAer_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvU32 bits +) +{ + NvU32 xveAerUncorr = 0; + NvU32 xveAerCorr = 0; + + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR) + xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _DLINK_PROTO_ERR, _CLEAR, xveAerUncorr); + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP) + xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _POISONED_TLP, _CLEAR, xveAerUncorr); + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT) + xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _CPL_TIMEOUT, _CLEAR, xveAerUncorr); + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL) + xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _UNEXP_CPL, _CLEAR, xveAerUncorr); + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP) + xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _MALFORMED_TLP, _CLEAR, xveAerUncorr); + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ) + xveAerUncorr = FLD_SET_DRF(_XVE, _AER_UNCORR_ERR, _UNSUPPORTED_REQ, _CLEAR, xveAerUncorr); + + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR) + xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RCV_ERR, _CLEAR, xveAerCorr); + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP) + xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _BAD_TLP, _CLEAR, xveAerCorr); + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP) + xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _BAD_DLLP, _CLEAR, xveAerCorr); + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER) + xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RPLY_ROLLOVER, _CLEAR, xveAerCorr); + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT) + xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _RPLY_TIMEOUT, _CLEAR, xveAerCorr); + if (bits & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL) + xveAerCorr = FLD_SET_DRF(_XVE, _AER_CORR_ERR, _ADVISORY_NONFATAL, _CLEAR, xveAerCorr); + + if (xveAerUncorr != 0) + { + GPU_BUS_CFG_WR32(pGpu, NV_XVE_AER_UNCORR_ERR, xveAerUncorr); + } + if (xveAerCorr != 0) + { + GPU_BUS_CFG_WR32(pGpu, NV_XVE_AER_CORR_ERR, xveAerCorr); + } + + return NV_OK; +} + +/*! + * @brief Returns the BAR0 offset and size of the PCI config space mirror + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif Kernel BIF object pointer + * @param[out] pBase BAR0 offset of the PCI config space mirror + * @param[out] pSize Size in bytes of the PCI config space mirror + * + * @returns NV_OK + */ +NV_STATUS +kbifGetPciConfigSpacePriMirror_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvU32 *pBase, + NvU32 *pSize +) +{ + *pBase = DEVICE_BASE(NV_PCFG); + *pSize = DEVICE_EXTENT(NV_PCFG) - DEVICE_BASE(NV_PCFG) + 1; + return NV_OK; +} + +/*! + * @brief C73 chipset WAR + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif Kernel BIF object pointer + */ +void +kbifExecC73War_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJCL *pCl = SYS_GET_CL(pSys); + NvU32 val; + + if (CS_NVIDIA_C73 == pCl->Chipset) + { + // + // Turn off L0s on the chipset which are required by the suspend/resume + // cycles in Vista. See bug 400044 for more details. + // + + // vAddr is a mapped cpu virtual addr into the root ports config space. + if (!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS) && + (pGpu->gpuClData.rootPort.vAddr != 0)) + { + val = MEM_RD32((NvU8*)pGpu->gpuClData.rootPort.vAddr+NV_XVR_VEND_XP1); + val = FLD_SET_DRF(_XVR, _VEND_XP1, _IGNORE_L0S, _EN, val); + MEM_WR32((NvU8*)pGpu->gpuClData.rootPort.vAddr+NV_XVR_VEND_XP1, val); + } + else if (pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS) && + pGpu->gpuClData.rootPort.addr.valid) + { + val = osPciReadDword(pGpu->gpuClData.rootPort.addr.handle, NV_XVR_VEND_XP1); + val = FLD_SET_DRF(_XVR, _VEND_XP1, _IGNORE_L0S, _EN, val); + osPciWriteDword(pGpu->gpuClData.rootPort.addr.handle, NV_XVR_VEND_XP1, val); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Cannot turn off L0s on C73 chipset, suspend/resume may fail (Bug 400044).\n"); + DBG_BREAKPOINT(); + } + } +} + +NV_STATUS +kbifGetBusOptionsAddr_GM107 +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + BUS_OPTIONS options, + NvU32 *addrReg +) +{ + NV_STATUS status = NV_OK; + + switch (options) + { + case BUS_OPTIONS_DEV_CONTROL_STATUS: + *addrReg = NV_XVE_DEVICE_CONTROL_STATUS; + break; + case BUS_OPTIONS_LINK_CONTROL_STATUS: + *addrReg = NV_XVE_LINK_CONTROL_STATUS; + break; + case BUS_OPTIONS_LINK_CAPABILITIES: + *addrReg = NV_XVE_LINK_CAPABILITIES; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid register type passed 0x%x\n", + options); + status = NV_ERR_GENERIC; + break; + } + return status; +} diff --git a/src/nvidia/src/kernel/gpu/bif/arch/turing/kernel_bif_tu102.c b/src/nvidia/src/kernel/gpu/bif/arch/turing/kernel_bif_tu102.c new file mode 100644 index 000000000..b9c5514c8 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bif/arch/turing/kernel_bif_tu102.c @@ -0,0 +1,87 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/* ------------------------- System Includes -------------------------------- */ +#include "gpu/bif/kernel_bif.h" +#include "gpu/gpu.h" + +#include "published/turing/tu102/dev_nv_xve.h" +#include "published/turing/tu102/dev_vm.h" + + +/* ------------------------ Public Functions -------------------------------- */ + +/*! + * @brief Check if MSIX is enabled in HW + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * + * @return True if MSIX enabled else False + */ +NvBool +kbifIsMSIXEnabledInHW_TU102 +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NvU32 data32; + + if (IS_VIRTUAL(pGpu)) + { + // SR-IOV guests only support MSI-X + return IS_VIRTUAL_WITH_SRIOV(pGpu); + } + else + { + if (GPU_BUS_CFG_RD32(pGpu, NV_XVE_MSIX_CAP_HDR, &data32) != NV_OK) + { + NV_ASSERT_FAILED("Unable to read NV_XVE_MSIX_CAP_HDR\n"); + return NV_FALSE; + } + return FLD_TEST_DRF(_XVE, _MSIX_CAP_HDR, _ENABLE, _ENABLED, data32); + } +} + +/*! + * @brief Disables P2P reads/writes on VF + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + */ +void +kbifDisableP2PTransactions_TU102 +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED, NV_TRUE); + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_P2P_WRITES_DISABLED, NV_TRUE); + } +} + diff --git a/src/nvidia/src/kernel/gpu/bif/kernel_bif.c b/src/nvidia/src/kernel/gpu/bif/kernel_bif.c new file mode 100644 index 000000000..6ff7a6c44 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bif/kernel_bif.c @@ -0,0 +1,1026 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/*! + * @file + * @brief Provides KERNEL only functions for OBJBIF + */ + +/* ------------------------ Includes ---------------------------------------- */ +#include "gpu/bif/kernel_bif.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/gpu.h" +#include "gpu/intr/intr.h" +#include "os/os.h" +#include "platform/chipset/chipset.h" +#include "core/locks.h" +#include "nvrm_registry.h" +#include "diagnostics/tracer.h" +#include "nvpcie.h" +#include "vgpu/vgpu_events.h" + +/* ------------------------ Macros ------------------------------------------ */ +/* ------------------------ Compile Time Checks ----------------------------- */ +/* ------------------------ Static Function Prototypes ---------------------- */ +static void _kbifInitRegistryOverrides(OBJGPU *, KernelBif *); +static void _kbifCheckIfGpuExists(OBJGPU *, void*); + + +/* ------------------------ Public Functions -------------------------------- */ + +/*! + * @brief KernelBif Constructor + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * @param[in] engDesc Engine descriptor + */ +NV_STATUS +kbifConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + ENGDESCRIPTOR engDesc +) +{ + // Initialize registry overrides + _kbifInitRegistryOverrides(pGpu, pKernelBif); + + // WAR for Bug 3208922 - disables P2P on Ampere NB + kbifApplyWARBug3208922_HAL(pGpu, pKernelBif); + + // Disables P2P on VF + kbifDisableP2PTransactions_HAL(pGpu, pKernelBif); + + // Used to track when the link has gone into Recovery, which can cause CEs. + pKernelBif->EnteredRecoverySinceErrorsLastChecked = NV_FALSE; + + return NV_OK; +} + +/*! + * @brief KernelBif Constructor + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + */ +NV_STATUS +kbifStateInitLocked_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJCL *pCl = SYS_GET_CL(pSys); + + // Return early if GPU is connected to an unsupported chipset + if (pCl->getProperty(pCl, PDB_PROP_CL_UNSUPPORTED_CHIPSET)) + { + return NV_ERR_NOT_COMPATIBLE; + } + + // Initialize BIF static info + kbifStaticInfoInit(pGpu, pKernelBif); + + // Initialize DMA caps + kbifInitDmaCaps(pGpu, pKernelBif); + + // Check for OS w/o usable PAT support + if ((kbifGetBusIntfType_HAL(pKernelBif) == + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) && + pOS->getProperty(pOS, PDB_PROP_OS_PAT_UNSUPPORTED)) + { + NV_PRINTF(LEVEL_INFO, + "BIF disabling noncoherent on OS w/o usable PAT support\n"); + + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_SUPPORT_NONCOHERENT, NV_FALSE); + } + + return NV_OK; +} + +/*! + * @brief KernelBif state load + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * @param[in] flags GPU state flag + */ +NV_STATUS +kbifStateLoad_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvU32 flags +) +{ + NV_PRINTF(LEVEL_INFO, "BIF DMA Caps: %08x\n", kbifGetDmaCaps(pGpu, pKernelBif)); + + // Apply C73 chipset WAR + kbifExecC73War_HAL(pGpu, pKernelBif); + + // Check for stale PCI-E dev ctrl/status errors and AER errors + kbifClearConfigErrors(pGpu, pKernelBif, NV_TRUE, KBIF_CLEAR_XVE_AER_ALL_MASK); + + kbifInitPcieDeviceControlStatus(pGpu, pKernelBif); + + // + // A vGPU cannot disappear and these accesses are + // particularly expensive on vGPUs + // + if (pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_CHECK_IF_GPU_EXISTS_DEF) && + !IS_VIRTUAL(pGpu)) + { + osSchedule1SecondCallback(pGpu, _kbifCheckIfGpuExists, NULL, NV_OS_1HZ_REPEAT); + } + + return NV_OK; +} + +/*! + * @brief KernelBif state unload + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * @param[in] flags GPU state flag + */ +NV_STATUS +kbifStateUnload_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief Initialize DMA caps + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + */ +void +kbifInitDmaCaps_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + + pKernelBif->dmaCaps = REF_DEF(BIF_DMA_CAPS_NOSNOOP, _CTXDMA); + + // Set the coherency cap on host RM based on the chipset + if (IsAMODEL(pGpu) || + pCl->getProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT)) + { + pKernelBif->dmaCaps |= REF_DEF(BIF_DMA_CAPS_SNOOP, _CTXDMA); + } +} + +NvU32 +kbifGetDmaCaps_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NvU32 retval; + + // Start out with system specific DMA caps + retval = pKernelBif->dmaCaps; + + // If noncoherent support is disabled, mask out SNOOP caps + if (!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_SUPPORT_NONCOHERENT)) + { + retval &= ~DRF_SHIFTMASK(BIF_DMA_CAPS_NOSNOOP); + } + + return retval; +} + +/*! + * @brief Initialize BIF static info in Kernel object through RPC + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + */ +NV_STATUS +kbifStaticInfoInit_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NV2080_CTRL_INTERNAL_BIF_GET_STATIC_INFO_PARAMS *pStaticInfo; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status = NV_OK; + + // Allocate memory for the command parameter + pStaticInfo = portMemAllocNonPaged(sizeof(*pStaticInfo)); + if (pStaticInfo == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate pStaticInfo for KernelBif"); + status = NV_ERR_NO_MEMORY; + goto kBifStaticInfoInit_IMPL_exit; + } + portMemSet(pStaticInfo, 0, sizeof(*pStaticInfo)); + + // Send the command + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_BIF_GET_STATIC_INFO, + pStaticInfo, sizeof(*pStaticInfo)), + kBifStaticInfoInit_IMPL_exit); + + // Initialize Kernel object fields with RPC response + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_PCIE_GEN4_CAPABLE, + pStaticInfo->bPcieGen4Capable); + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_IS_C2C_LINK_UP, + pStaticInfo->bIsC2CLinkUp); + pKernelBif->dmaWindowStartAddress = pStaticInfo->dmaWindowStartAddress; + +kBifStaticInfoInit_IMPL_exit: + portMemFree(pStaticInfo); + + return status; +} + +/*! + * @brief Initialize PCI-E config space bits based on chipset and GPU support. + */ +void +kbifInitPcieDeviceControlStatus +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + + kbifEnableExtendedTagSupport_HAL(pGpu, pKernelBif); + + // + // Bug 382675 and 482867: Many SBIOSes default to disabling relaxed + // ordering on GPUs, we want to always force it back on unless + // the upstream root port is known to be broken with respect to this + // feature. + // + if (!pCl->getProperty(pCl, PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE)) + { + kbifPcieConfigEnableRelaxedOrdering_HAL(pGpu, pKernelBif); + } + else + { + kbifPcieConfigDisableRelaxedOrdering_HAL(pGpu, pKernelBif); + } + + if (!pCl->getProperty(pCl, PDB_PROP_CL_NOSNOOP_NOT_CAPABLE) && + !pCl->getProperty(pCl, PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR)) + { + // Bug 393398 - Re-enable DEVICE_CONTROL_STATUS_ENABLE_NO_SNOOP + kbifEnableNoSnoop_HAL(pGpu, pKernelBif, NV_TRUE); + } + else + { + // + // Check for NO_SNOOP P2P bug on specific chipset. More info in bug 332764. + // Check for NO_SNOOP enabled by default on specific CPU. Refer bug 1511622. + // + kbifEnableNoSnoop_HAL(pGpu, pKernelBif, NV_FALSE); + } +} + +/*! + * @brief Check and rearm MSI + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * + * @return NV_TRUE if MSI is enabled + * NV_FALSE if MSI is disabled + */ +void +kbifCheckAndRearmMSI_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + Intr *pIntr = GPU_GET_INTR(pGpu); + + if (kbifIsMSIEnabled(pGpu, pKernelBif)) + { + if (!IS_VIRTUAL(pGpu)) + { + // Send EOI to rearm + if (pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_USE_CONFIG_SPACE_TO_REARM_MSI)) + { + kbifRearmMSI_HAL(pGpu, pKernelBif); + } + else + { + intrRetriggerTopLevel_HAL(pGpu, pIntr); + } + } + } + else if (kbifIsMSIXEnabled(pGpu, pKernelBif)) + { + intrRetriggerTopLevel_HAL(pGpu, pIntr); + } +} + +/*! + * @brief Checks if MSI is enabled. Prefers to check the SW cache, but if + * uncached, checks HW state and updates the SW cache for future use + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * + * @return NV_TRUE if MSI is enabled + * NV_FALSE if MSI is disabled + */ +NvBool +kbifIsMSIEnabled_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + // + // Bug 418883: We shall rely upon value cached at boot, for the value + // should not change during execution. If however, we must ever change + // this back to be read at every ISR, we *must* read the value through + // PCI CFG cycles. + // + if (!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_IS_MSI_CACHED)) + { + if (kbifIsMSIEnabledInHW_HAL(pGpu, pKernelBif)) + { + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_IS_MSI_ENABLED, NV_TRUE); + + if (IS_VIRTUAL(pGpu)) + { + // vGPU guests want an FYI print that re-arming is not required + NV_PRINTF(LEVEL_WARNING, + "MSI is enabled for vGPU, but no need to re-ARM\n"); + } + } + else + { + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_IS_MSI_ENABLED, NV_FALSE); + } + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_IS_MSI_CACHED, NV_TRUE); + } + + return pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_IS_MSI_ENABLED); +} + +/*! + * @brief Checks if MSI-X is enabled. Prefers to check the SW cache, but if + * uncached, checks HW state and updates the SW cache for future use + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + * + * @return NV_TRUE if MSI is enabled + * NV_FALSE if MSI is disabled + */ +NvBool +kbifIsMSIXEnabled_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + if (!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_IS_MSIX_CACHED)) + { + if (kbifIsMSIXEnabledInHW_HAL(pGpu, pKernelBif)) + { + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_IS_MSIX_ENABLED, NV_TRUE); + } + else + { + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_IS_MSIX_ENABLED, NV_FALSE); + } + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_IS_MSIX_CACHED, NV_TRUE); + } + return pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_IS_MSIX_ENABLED); +} + +/*! + * @brief Clear PCIe HW PCIe config space error counters. + * All of these should be cleared using config cycles. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + */ +void +kbifClearConfigErrors_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NvBool bClearStatus, + NvU32 xveAerFlagsMask +) +{ + NvU32 xveStatusFlags = 0; + NvU32 xveStatus = 0; + NvU32 xveAerFlags = 0; + + if ((bClearStatus) && + (kbifGetXveStatusBits_HAL(pGpu, pKernelBif, &xveStatusFlags, &xveStatus) == NV_OK) && + (xveStatusFlags != 0)) + { + NV_PRINTF(LEVEL_WARNING, "PCI-E device status errors pending (%08X):\n", + xveStatusFlags); +#ifdef DEBUG + if ( xveStatusFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR ) + { + NV_PRINTF(LEVEL_WARNING, " _CORR_ERROR_DETECTED\n"); + } + if ( xveStatusFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR ) + { + NV_PRINTF(LEVEL_WARNING, " _NON_FATAL_ERROR_DETECTED\n"); + } + if ( xveStatusFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR ) + { + NV_PRINTF(LEVEL_WARNING, " _FATAL_ERROR_DETECTED\n"); + } + if ( xveStatusFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST ) + { + NV_PRINTF(LEVEL_WARNING, " _UNSUPP_REQUEST_DETECTED\n"); + } +#endif + NV_PRINTF(LEVEL_WARNING, "Clearing these errors..\n"); + kbifClearXveStatus_HAL(pGpu, pKernelBif, &xveStatus); + } + + if ((xveAerFlagsMask) && + (kbifGetXveAerBits_HAL(pGpu, pKernelBif, &xveAerFlags) == NV_OK)) + { + xveAerFlags &= xveAerFlagsMask; + + if (xveAerFlags != 0) + { + NV_PRINTF(LEVEL_WARNING, + "PCI-E device AER errors pending (%08X):\n", + xveAerFlags); +#ifdef DEBUG + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_TRAINING_ERR) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_TRAINING_ERR\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_DLINK_PROTO_ERR) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_DLINK_PROTO_ERR\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_POISONED_TLP) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_POISONED_TLP\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_FC_PROTO_ERR) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_FC_PROTO_ERR\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_TIMEOUT) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_CPL_TIMEOUT\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_CPL_ABORT) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_CPL_ABORT\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNEXP_CPL) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_UNEXP_CPL\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_RCVR_OVERFLOW) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_RCVR_OVERFLOW\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_MALFORMED_TLP) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_MALFORMED_TLP\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_ECRC_ERROR) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_ECRC_ERROR\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_UNCORR_UNSUPPORTED_REQ) + { + NV_PRINTF(LEVEL_WARNING, " _AER_UNCORR_UNSUPPORTED_REQ\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RCV_ERR) + { + NV_PRINTF(LEVEL_WARNING, " _AER_CORR_RCV_ERR\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_TLP) + { + NV_PRINTF(LEVEL_WARNING, " _AER_CORR_BAD_TLP\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_BAD_DLLP) + { + NV_PRINTF(LEVEL_WARNING, " _AER_CORR_BAD_DLLP\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_ROLLOVER) + { + NV_PRINTF(LEVEL_WARNING, " _AER_CORR_RPLY_ROLLOVER\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_RPLY_TIMEOUT) + { + NV_PRINTF(LEVEL_WARNING, " _AER_CORR_RPLY_TIMEOUT\n"); + } + if (xveAerFlags & NV2080_CTRL_BUS_INFO_PCIE_LINK_AER_CORR_ADVISORY_NONFATAL) + { + NV_PRINTF(LEVEL_WARNING, " _AER_CORR_ADVISORY_NONFATAL\n"); + } +#endif + NV_PRINTF(LEVEL_WARNING, "Clearing these errors..\n"); + kbifClearXveAer_HAL(pGpu, pKernelBif, xveAerFlags); + } + } +} + +/*! + * @brief The PCI bus family means it has the concept of bus/dev/func + * and compatible PCI config space. + */ +NvBool +kbifIsPciBusFamily_IMPL +( + KernelBif *pKernelBif +) +{ + NvU32 busType = kbifGetBusIntfType_HAL(pKernelBif); + + return ((busType == NV2080_CTRL_BUS_INFO_TYPE_PCI) || + (busType == NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) || + (busType == NV2080_CTRL_BUS_INFO_TYPE_FPCI)); +} + +/*! + * @brief Regkey Overrides for Bif + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelBif BIF object pointer + */ +static void +_kbifInitRegistryOverrides +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NvU32 data32; + + // P2P Override + pKernelBif->p2pOverride = BIF_P2P_NOT_OVERRIDEN; + if (osReadRegistryDword(pGpu, NV_REG_STR_CL_FORCE_P2P, &data32) == NV_OK) + { + pKernelBif->p2pOverride = data32; + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED, FLD_TEST_DRF(_REG_STR, _CL_FORCE_P2P, _READ, _DISABLE, data32)); + pKernelBif->setProperty(pKernelBif, PDB_PROP_KBIF_P2P_WRITES_DISABLED, FLD_TEST_DRF(_REG_STR, _CL_FORCE_P2P, _WRITE, _DISABLE, data32)); + } + + // P2P force type override + pKernelBif->forceP2PType = NV_REG_STR_RM_FORCE_P2P_TYPE_DEFAULT; + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_FORCE_P2P_TYPE, &data32) == NV_OK && + (data32 <= NV_REG_STR_RM_FORCE_P2P_TYPE_MAX)) + { + pKernelBif->forceP2PType = data32; + } + + // Peer Mapping override + pKernelBif->peerMappingOverride = NV_REG_STR_PEERMAPPING_OVERRIDE_DEFAULT; + if (osReadRegistryDword(pGpu, NV_REG_STR_PEERMAPPING_OVERRIDE, &data32) == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "allow peermapping reg key = %d\n", data32); + pKernelBif->peerMappingOverride = !!data32; + } +} + +/*! + * Callback function to check if GPU exists + * + * @param[in] pGpu GPU object pointer + * @param[in] rsvd Reserved field + */ +static void +_kbifCheckIfGpuExists +( + OBJGPU *pGpu, + void *rsvd +) +{ + if (FULL_GPU_SANITY_CHECK(pGpu)) + { + if (gpuVerifyExistence_HAL(pGpu) != NV_OK) + { + osRemove1SecondRepeatingCallback(pGpu, _kbifCheckIfGpuExists, NULL); + } + } +} + +static NvU32 +kbifGetGpuLinkCapabilities +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NvU32 addrLinkCap = 0; + NvU32 data = 0; + + if (NV_OK != kbifGetBusOptionsAddr_HAL(pGpu, pKernelBif, BUS_OPTIONS_LINK_CAPABILITIES, &addrLinkCap)) + { + return 0; + } + + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, addrLinkCap, &data)) + { + NV_PRINTF(LEVEL_ERROR, "Unable to read %x\n", addrLinkCap); + return 0; + } + + return data; +} + +static NvU32 +kbifGetGpuLinkControlStatus +( + OBJGPU *pGpu, + KernelBif *pKernelBif +) +{ + NvU32 addrLinkControlStatus = 0; + NvU32 data = 0; + + if (NV_OK != kbifGetBusOptionsAddr_HAL(pGpu, pKernelBif, BUS_OPTIONS_LINK_CONTROL_STATUS, &addrLinkControlStatus)) + { + return 0; + } + + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, addrLinkControlStatus, &data )) + { + NV_PRINTF(LEVEL_ERROR, "Unable to read %x\n", addrLinkControlStatus); + return 0; + } + + return data; +} + +static NvBool +_doesBoardHaveMultipleGpusAndSwitch(OBJGPU *pGpu) +{ + if (((gpuIsMultiGpuBoard(pGpu, NULL, NULL)) || + (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_GEMINI)))&& + ((pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_PLX_PRESENT)) || + (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR03_PRESENT)) || + (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR04_PRESENT)))) + { + return NV_TRUE; + } + else + { + return NV_FALSE; + } +} + +NV_STATUS +kbifControlGetPCIEInfo_IMPL +( + OBJGPU *pGpu, + KernelBif *pKernelBif, + NV2080_CTRL_BUS_INFO *pBusInfo +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + NvU32 index = pBusInfo->index; + NvU32 data = 0; + + if (kbifGetBusIntfType_HAL(pKernelBif) != NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) + { + // KMD cannot handle error codes for this ctrl call, hence returning + // NV_OK, once KMD fixes the bug:3545197, RM can return NV_ERR_NOT_SUPPORTED + return NV_OK; + } + + switch (index) + { + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CAPS: + { + data = kbifGetGpuLinkCapabilities(pGpu, pKernelBif); + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CAPS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CAPS: + { + if (_doesBoardHaveMultipleGpusAndSwitch(pGpu)) + { + if (clPcieReadPortConfigReg(pGpu, pCl, + &pGpu->gpuClData.boardUpstreamPort, + CL_PCIE_LINK_CAP, &data) != NV_OK) + { + data = 0; + } + } + else + { + data = kbifGetGpuLinkCapabilities(pGpu, pKernelBif); + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_GEN_INFO: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_GEN_INFO: + { + NvU32 temp; + + if (_doesBoardHaveMultipleGpusAndSwitch(pGpu)) + { + if (clPcieReadPortConfigReg(pGpu, pCl, + &pGpu->gpuClData.boardUpstreamPort, + CL_PCIE_LINK_CTRL_STATUS, &temp) != NV_OK) + { + data = 0; + break; + } + else + { + temp = REF_VAL(NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED, temp); + if (temp == NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_32000MBPS) + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_PCIE_LINK_CAP, + _CURR_LEVEL, _GEN5, data); + } + else if (temp == NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_16000MBPS) + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_PCIE_LINK_CAP, + _CURR_LEVEL, _GEN4, data); + } + else if (temp == NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_8000MBPS) + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_PCIE_LINK_CAP, + _CURR_LEVEL, _GEN3, data); + } + else if (temp == NV2080_CTRL_BUS_INFO_PCIE_LINK_CTRL_STATUS_LINK_SPEED_5000MBPS) + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_PCIE_LINK_CAP, + _CURR_LEVEL, _GEN2, data); + } + else + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_PCIE_LINK_CAP, + _CURR_LEVEL, _GEN1, data); + } + } + + if (clPcieReadPortConfigReg(pGpu, pCl, + &pGpu->gpuClData.boardUpstreamPort, + CL_PCIE_LINK_CAP, &temp) != NV_OK) + { + data = 0; + break; + } + else + { + temp = REF_VAL(NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED, temp); + if (temp == NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_32000MBPS) + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_PCIE_LINK_CAP, + _GEN, _GEN5, data); + } + else if (temp == NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_16000MBPS) + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_PCIE_LINK_CAP, + _GEN, _GEN4, data); + } + else if (temp == NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_8000MBPS) + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_PCIE_LINK_CAP, + _GEN, _GEN3, data); + } + else if (temp == NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_5000MBPS) + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_PCIE_LINK_CAP, + _GEN, _GEN2, data); + } + else + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_PCIE_LINK_CAP, + _GEN, _GEN1, data); + } + } + } + else + { + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV2080_CTRL_BUS_INFO busInfo = {0}; + NV_STATUS rmStatus = NV_OK; + + busInfo.index = NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN_INFO; + + if ((rmStatus = kbusSendBusInfo(pGpu, GPU_GET_KERNEL_BUS(pGpu), &busInfo)) != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Squashing rmStatus: %x \n", rmStatus); + rmStatus = NV_OK; + busInfo.data = 0; + } + data = busInfo.data; + } + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CAPS: + { + if (clPcieReadPortConfigReg(pGpu, pCl, + &pGpu->gpuClData.rootPort, + CL_PCIE_LINK_CAP, &data) != NV_OK) + { + data = 0; + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CAPS: + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR03_PRESENT) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR04_PRESENT)) + { + if (clPcieReadPortConfigReg(pGpu, pCl, + &pGpu->gpuClData.boardDownstreamPort, + CL_PCIE_LINK_CAP, &data) != NV_OK) + { + data = 0; + } + } + else + { + // no br03/br04, same as link from RC + if (clPcieReadPortConfigReg(pGpu, pCl, + &pGpu->gpuClData.rootPort, + CL_PCIE_LINK_CAP, &data) != NV_OK) + { + data = 0; + } + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CTRL_STATUS: + { + data = kbifGetGpuLinkControlStatus(pGpu, pKernelBif); + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CTRL_STATUS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CTRL_STATUS: + { + if (_doesBoardHaveMultipleGpusAndSwitch(pGpu)) + { + if (clPcieReadPortConfigReg(pGpu, pCl, + &pGpu->gpuClData.boardUpstreamPort, + CL_PCIE_LINK_CTRL_STATUS, &data) != NV_OK) + { + data = 0; + } + } + else + { + data = kbifGetGpuLinkControlStatus(pGpu, pKernelBif); + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CTRL_STATUS: + { + if (clPcieReadPortConfigReg(pGpu, pCl, + &pGpu->gpuClData.rootPort, + CL_PCIE_LINK_CTRL_STATUS, + &data) != NV_OK) + { + data = 0; + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CTRL_STATUS: + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR03_PRESENT) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR04_PRESENT)) + { + if (clPcieReadPortConfigReg(pGpu, pCl, + &pGpu->gpuClData.boardDownstreamPort, + CL_PCIE_LINK_CTRL_STATUS, &data) != NV_OK) + { + data = 0; + } + } + else + { + // no br03/br04, same as link from RC + if (clPcieReadPortConfigReg(pGpu, pCl, + &pGpu->gpuClData.rootPort, + CL_PCIE_LINK_CTRL_STATUS, + &data) != NV_OK) + { + data = 0; + } + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_ERRORS: + { + NvU32 xveStatus = 0; + + if (pKernelBif != NULL) + { + if (kbifGetXveStatusBits_HAL(pGpu, pKernelBif, &data, &xveStatus) != NV_OK) + { + data = 0; + break; + } + if (kbifClearXveStatus_HAL(pGpu, pKernelBif, &xveStatus) != NV_OK) + { + data = 0; + } + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_ERRORS: + { + NvU32 clStatus = 0; + + if (clPcieReadDevCtrlStatus(pGpu, pCl, &data, &clStatus) != NV_OK) + { + data = 0; + break; + } + if (clPcieClearDevCtrlStatus(pGpu, pCl, &clStatus) != NV_OK) + { + data = 0; + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_AER: + { + if (pKernelBif != NULL) + { + if (kbifGetXveAerBits_HAL(pGpu, pKernelBif, &data) != NV_OK) + { + data = 0; + break; + } + if (kbifClearXveAer_HAL(pGpu, pKernelBif, data) != NV_OK) + { + data = 0; + } + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_MSI_INFO: + { + if (kbifIsMSIEnabledInHW_HAL(pGpu, pKernelBif)) + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_MSI, + _STATUS, _ENABLED, data); + } + else + { + data = FLD_SET_DRF(2080, _CTRL_BUS_INFO_MSI, + _STATUS, _DISABLED, data); + } + break; + } + + default: + break; + } + + pBusInfo->data = data; + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/bus/arch/ampere/kern_bus_ga100.c b/src/nvidia/src/kernel/gpu/bus/arch/ampere/kern_bus_ga100.c new file mode 100644 index 000000000..0dcbc27c4 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/arch/ampere/kern_bus_ga100.c @@ -0,0 +1,1167 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" + +#include +#include // FABRIC_VASPACE_A +#include "rmapi/rs_utils.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/device/device.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "rmapi/rmapi.h" +#include "core/locks.h" +#include "vgpu/rpc.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/fabric_vaspace.h" +#include "mem_mgr/virt_mem_mgr.h" + +#include "published/ampere/ga100/dev_ram.h" // NV_RAMIN_ALLOC_SIZE +#include "ctrl/ctrl2080/ctrl2080fla.h" // NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK + +#define NVLNK_FABRIC_ADDR_GRANULARITY 36 + +/*! + * @brief Sets up the FLA state for the GPU. This function will allocate a RM + * client, which will allocate the FERMI_VASPACE_A object, and binds the PDB + * to MMU. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] base VASpace base + * @param[in] size VASpace size + + * + * @return NV_OK if successful + */ +NV_STATUS +kbusAllocateFlaVaspace_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU64 base, + NvU64 size +) +{ + NV_STATUS status = NV_OK; + OBJVMM *pVmm = SYS_GET_VMM(SYS_GET_INSTANCE()); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NV0080_ALLOC_PARAMETERS nv0080AllocParams = {0}; + NV2080_ALLOC_PARAMETERS nv2080AllocParams = {0}; + NV_VASPACE_ALLOCATION_PARAMETERS vaParams = {0}; + INST_BLK_INIT_PARAMS pInstblkParams = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + RsClient *pClient; + NvBool bAcquireLock = NV_FALSE; + + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size != 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(!pKernelBus->flaInfo.bFlaAllocated, NV_ERR_INVALID_ARGUMENT); + + pKernelBus->flaInfo.base = base; + pKernelBus->flaInfo.size = size; + + if (gpuIsSriovEnabled(pGpu)) + { + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + if (pKernelNvlink != NULL && + knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink)) + { + pKernelBus->flaInfo.bFlaRangeRegistered = NV_TRUE; + return status; + } + } + + //Allocate the client in RM which owns the FLAVASpace + status = pRmApi->AllocWithHandle(pRmApi, NV01_NULL_OBJECT, NV01_NULL_OBJECT, NV01_NULL_OBJECT, + NV01_ROOT, &pKernelBus->flaInfo.hClient); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + status = serverGetClientUnderLock(&g_resServ, pKernelBus->flaInfo.hClient, &pClient); + NV_ASSERT_OR_GOTO(status == NV_OK, free_client); + + status = serverutilGenResourceHandle(pKernelBus->flaInfo.hClient, &pKernelBus->flaInfo.hDevice); + NV_ASSERT_OR_GOTO(status == NV_OK, free_client); + + // Allocate a device handle + nv0080AllocParams.deviceId = gpuGetDeviceInstance(pGpu); + status = pRmApi->AllocWithHandle(pRmApi, pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hClient, + pKernelBus->flaInfo.hDevice, NV01_DEVICE_0, + &nv0080AllocParams); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed creating device, status=0x%x\n", status); + goto free_client; + } + + + status = serverutilGenResourceHandle(pKernelBus->flaInfo.hClient, &pKernelBus->flaInfo.hSubDevice); + NV_ASSERT_OR_GOTO(status == NV_OK, free_client); + + //Allocate a sub device handle + nv2080AllocParams.subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + status = pRmApi->AllocWithHandle(pRmApi, pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hDevice, + pKernelBus->flaInfo.hSubDevice, NV20_SUBDEVICE_0, + &nv2080AllocParams); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed creating sub-device, status=0x%x\n", + status); + goto free_client; + } + + // Allocate the FERMI_VASPACE_A FLA VASpace + vaParams.index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW; + vaParams.vaBase = base; + vaParams.vaSize = size; + vaParams.flags |= NV_VASPACE_ALLOCATION_FLAGS_IS_FLA; + + // Generate a vaspace handle for FERMI_VASPACE_A object allocation + status = serverutilGenResourceHandle(pKernelBus->flaInfo.hClient, &pKernelBus->flaInfo.hFlaVASpace); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed generating vaspace handle, status=0x%x\n", status); + goto free_client; + } + + if (rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + bAcquireLock = NV_TRUE; + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + } + + // Allocate a FERMI_VASPACE_A object and associate it with hFlaVASpace + status = pRmApi->AllocWithHandle(pRmApi, pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hDevice, + pKernelBus->flaInfo.hFlaVASpace, FERMI_VASPACE_A, + &vaParams); + if (bAcquireLock) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, + rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM_FLA)); + bAcquireLock = NV_FALSE; + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed allocating vaspace, status=0x%x\n", + status); + goto free_client; + } + + // Allocate a FABRIC_VASPACE_A object + status = vmmCreateVaspace(pVmm, FABRIC_VASPACE_A, pGpu->gpuId, gpumgrGetGpuMask(pGpu), + base, base + size - 1, 0, 0, NULL, 0, + &pGpu->pFabricVAS); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed allocating fabric vaspace, status=0x%x\n", + status); + goto free_client; + } + + // + // For SRIOV Heavy enabled guests, VAS PTs are managed by host + // Enabling the same path for GSP-RM offload, where VAS is managed in GSP-RM + // + if (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + NV2080_CTRL_FLA_RANGE_PARAMS params = {0}; + params.mode = NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_INITIALIZE; + params.base = base; + params.size = size; + params.hVASpace = pKernelBus->flaInfo.hFlaVASpace; + NV_RM_RPC_CONTROL(pGpu, pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hSubDevice, + NV2080_CTRL_CMD_FLA_RANGE, + ¶ms, sizeof(params), status); + + if (status != NV_OK) + { + goto free_client; + } + + status = vaspaceGetByHandleOrDeviceDefault(pClient, + pKernelBus->flaInfo.hDevice, + pKernelBus->flaInfo.hFlaVASpace, + &pKernelBus->flaInfo.pFlaVAS); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed getting the vaspace from handle, status=0x%x\n", + status); + goto free_rpc; + } + } + else + { + // Get the FLA VASpace associated with hFlaVASpace + status = vaspaceGetByHandleOrDeviceDefault(pClient, + pKernelBus->flaInfo.hDevice, + pKernelBus->flaInfo.hFlaVASpace, + &pKernelBus->flaInfo.pFlaVAS); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed getting the vaspace from handle, status=0x%x\n", + status); + goto free_client; + } + + // Pin the VASPACE page directory for pFlaVAS before writing the instance block + status = vaspacePinRootPageDir(pKernelBus->flaInfo.pFlaVAS, pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed pinning down FLAVASpace, status=0x%x\n", + status); + goto unpin_rootpagedir; + } + + if (pGpu->pFabricVAS != NULL) + { + // Pin the VASPACE page directory for pFabricVAS before writing the instance block + status = vaspacePinRootPageDir(pGpu->pFabricVAS, pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed pinning down fabric vaspace, status=0x%x\n", + status); + goto unpin_rootpagedir; + } + } + + // Construct instance block + status = kbusConstructFlaInstBlk_HAL(pGpu, pKernelBus, GPU_GFID_PF); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed constructing instblk for FLA, status=0x%x\n", + status); + goto free_instblk; + } + + // Instantiate Inst Blk for pFlaVAS + status = kgmmuInstBlkInit(pKernelGmmu, + pKernelBus->flaInfo.pInstblkMemDesc, + pKernelBus->flaInfo.pFlaVAS, FIFO_PDB_IDX_BASE, + &pInstblkParams); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed instantiating instblk for FLA, status=0x%x\n", + status); + goto free_instblk; + } + } + pKernelBus->flaInfo.bFlaAllocated = NV_TRUE; + + // + // For SRIOV PF/VF system, always check for P2P allocation to determine whether + // this function is allowed to bind FLA + // + if (gpuIsSriovEnabled(pGpu) || IS_VIRTUAL(pGpu)) + { + if (gpuCheckIsP2PAllocated_HAL(pGpu)) + { + status = kbusSetupBindFla(pGpu, pKernelBus, pGpu->sriovState.pP2PInfo->gfid); + } + else + { + NV_PRINTF(LEVEL_INFO, "Skipping binding FLA, because no P2P GFID is" + " validated yet\n"); + } + } + else + { + status = kbusSetupBindFla(pGpu, pKernelBus, GPU_GFID_PF); + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed binding instblk for FLA, status=0x%x\n", status); + goto free_instblk; + } + return status; + +free_rpc: + { + NV2080_CTRL_FLA_RANGE_PARAMS params = {0}; + params.mode = NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_DESTROY; + NV_RM_RPC_CONTROL(pGpu, pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hSubDevice, + NV2080_CTRL_CMD_FLA_RANGE, + ¶ms, sizeof(params), status); + goto free_client; + } + +free_instblk: + kbusDestructFlaInstBlk_HAL(pGpu, pKernelBus); + +unpin_rootpagedir: + if (pKernelBus->flaInfo.pFlaVAS != NULL) + { + vaspaceUnpinRootPageDir(pKernelBus->flaInfo.pFlaVAS, pGpu); + } + + if (pGpu->pFabricVAS != NULL) + { + vaspaceUnpinRootPageDir(pGpu->pFabricVAS, pGpu); + } + +free_client: + if (pGpu->pFabricVAS != NULL) + { + vmmDestroyVaspace(pVmm, pGpu->pFabricVAS); + pGpu->pFabricVAS = NULL; + } + + pRmApi->Free(pRmApi, pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hClient); + pKernelBus->flaInfo.bFlaAllocated = NV_FALSE; + + NV_PRINTF(LEVEL_ERROR, "failed allocating FLA VASpace status=0x%x\n", + status); + + return status; +} + +/*! + * @brief Sets up the Host Managed FLA state for the GPU. + * This function will manage bare minimum resources in host RM such as + * allocating PDB, constructing instance memory block in subheap of the vGPU device, + * binding the PDB with the VASpace. + * + * @param[in] hClient Client handle which owns the FLA resources + * @param[in] hDevice Device handle associated with FLA VAS + * @param[in] hSubdevice SubDevice handle associated with FLA VAS + * @param[in] hVASpace FLA Vaspace handle + * @param[in] base VASpace base + * @param[in] size VASpace size + * @param[in] gfid Calling Context + * + * @return NV_OK if successful + */ +NV_STATUS +kbusAllocateHostManagedFlaVaspace_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvHandle hClient, + NvHandle hDevice, + NvHandle hSubdevice, + NvHandle hVASpace, + NvU64 base, + NvU64 size, + NvU32 gfid +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + OBJVMM *pVmm = SYS_GET_VMM(SYS_GET_INSTANCE()); + INST_BLK_INIT_PARAMS pInstblkParams = {0}; + RsClient *pClient; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size != 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(IS_GFID_VF(pGpu), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(hClient != NV01_NULL_OBJECT, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(hDevice != NV01_NULL_OBJECT, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(hSubdevice != NV01_NULL_OBJECT, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(hVASpace != NV01_NULL_OBJECT, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(!pKernelBus->flaInfo.bFlaAllocated, NV_ERR_INVALID_ARGUMENT); + + pKernelBus->flaInfo.base = base; + pKernelBus->flaInfo.size = size; + pKernelBus->flaInfo.hClient = hClient; + pKernelBus->flaInfo.hDevice = hDevice; + pKernelBus->flaInfo.hSubDevice = hSubdevice; + pKernelBus->flaInfo.hFlaVASpace = hVASpace; + + status = serverGetClientUnderLock(&g_resServ, pKernelBus->flaInfo.hClient, &pClient); + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + + status = vaspaceGetByHandleOrDeviceDefault(pClient, + pKernelBus->flaInfo.hDevice, + pKernelBus->flaInfo.hFlaVASpace, + &pKernelBus->flaInfo.pFlaVAS); + + // Allocate a FABRIC_VASPACE_A object + status = vmmCreateVaspace(pVmm, FABRIC_VASPACE_A, pGpu->gpuId, gpumgrGetGpuMask(pGpu), + base, base + size - 1, 0, 0, NULL, 0, + &pGpu->pFabricVAS); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed allocating fabric vaspace, status=0x%x\n", + status); + goto cleanup; + } + + // Pin the VASPACE page directory for pFabricVAS before writing the instance block + status = vaspacePinRootPageDir(pGpu->pFabricVAS, pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed pinning down fabric vaspace, status=0x%x\n", + status); + goto cleanup; + } + + // Pin the VASPACE page directory for Legacy VAS before writing the instance block + status = vaspacePinRootPageDir(pKernelBus->flaInfo.pFlaVAS, pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed pinning down legacy vaspace, status=0x%x\n", + status); + goto unpin_fabric_page_dir; + } + + // Construct instance block + status = kbusConstructFlaInstBlk_HAL(pGpu, pKernelBus, gfid); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed constructing instblk for FLA, status=0x%x\n", + status); + goto unpin_legacy_page_dir; + } + + // Instantiate Inst Blk for FLA + status = kgmmuInstBlkInit(pKernelGmmu, + pKernelBus->flaInfo.pInstblkMemDesc, + pKernelBus->flaInfo.pFlaVAS, FIFO_PDB_IDX_BASE, + &pInstblkParams); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed instantiating instblk for FLA, status=0x%x\n", + status); + goto free_instblk; + } + + pKernelBus->flaInfo.bFlaAllocated = NV_TRUE; + + return status; + +free_instblk: + kbusDestructFlaInstBlk_HAL(pGpu, pKernelBus); + +unpin_legacy_page_dir: + if (pKernelBus->flaInfo.pFlaVAS != NULL) + { + vaspaceUnpinRootPageDir(pKernelBus->flaInfo.pFlaVAS, pGpu); + } + +unpin_fabric_page_dir: + if (pGpu->pFabricVAS != NULL) + { + vaspaceUnpinRootPageDir(pGpu->pFabricVAS, pGpu); + } + +cleanup: + if (pGpu->pFabricVAS != NULL) + { + vmmDestroyVaspace(pVmm, pGpu->pFabricVAS); + pGpu->pFabricVAS = NULL; + } + + pKernelBus->flaInfo.bFlaAllocated = NV_FALSE; + return status; +} + +/*! + * @brief Initialize the FLA data structure in OBJGPU + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] base VASpace base + * @param[in] size VASpace size + * + * @return NV_OK if successful + */ +NV_STATUS +kbusInitFla_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU64 base, + NvU64 size +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NV_STATUS status = NV_OK; + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS params; + + portMemSet(&pKernelBus->flaInfo, 0, sizeof(pKernelBus->flaInfo)); + + // Initialize FLA State Info if possible + // 1. FLA is by default enabled for GA100, + // 2. Disable FLA when MIG is enabled + // Currently MIG is persistent state, so GPU reboot will happpen, when MIG is being enabled/disabled + // so when the GPU reboot happens with a modified state, don't enable FLA. This is decent WAR for bug: 2568634 + // 3. Disable FLA when SLI is enabled + // Bug: 2985556, re-enable once we fix this bug. + // + if (((NULL != pKernelMIGManager) && !kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)) || + (IsSLIEnabled(pGpu))) + { + NV_PRINTF(LEVEL_INFO, "FLA is disabled, gpu %x is in MIG/SLI mode \n", pGpu->gpuInstance); + pKernelBus->bFlaSupported = NV_FALSE; + return NV_OK; + } + else // for all non-MIG configs, FLA is supported + { + NV_PRINTF(LEVEL_INFO, "Enabling FLA_SUPPORTED to TRUE, gpu: %x ...\n", pGpu->gpuInstance); + pKernelBus->bFlaSupported = NV_TRUE; + } + + // + // FLA VAspace is allocated from CPU, so no need to do anything + // in GSP except setting the property + // + if (RMCFG_FEATURE_PLATFORM_GSP) + return NV_OK; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + // Nvswitch virtualization enabled + if (pKernelNvlink != NULL && knvlinkIsNvswitchProxyPresent(pGpu, pKernelNvlink)) + { + portMemSet(¶ms, 0, sizeof(params)); + params.bGet = NV_TRUE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_GET_SET_NVSWITCH_FLA_ADDR, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to get the NVSwitch FLA address\n"); + return status; + } + + size = gpuGetFlaVasSize_HAL(pGpu, NV_TRUE); + + status = knvlinkSetUniqueFlaBaseAddress(pGpu, pKernelNvlink, params.addr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Failed to enable FLA for GPU: %x\n", pGpu->gpuInstance); + return status; + } + base = params.addr; + } + else + { + return status; + } + } + else // direct connected systems + { + if (hypervisorIsVgxHyper()) + { + NV_PRINTF(LEVEL_INFO, "Skipping the FLA initialization in Host vGPU \n"); + return NV_OK; + } + if (!size) + { + size = gpuGetFlaVasSize_HAL(pGpu, NV_FALSE); + base = pGpu->gpuInstance * size; + } + } + NV_ASSERT_OK_OR_RETURN(kbusAllocateFlaVaspace_HAL(pGpu, pKernelBus, base, size)); + return status; +} + +/*! + * @brief Destruct the FLA data structure and associated resources. + * Since all the resources are associated with the RM client, + * all resources will be destroyed by Resource server. + * Note: kbusDestroyFla can be called from different places + * 1. For direct connected systems, RM unload will call this function. + */ +void +kbusDestroyFla_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // TODO: if there are dangling resources, cleanup here + if ((pKernelBus->flaInfo.pFlaVAS != NULL) || (pGpu->pFabricVAS != NULL)) + { + if (pKernelBus->flaInfo.bFlaBind) + { + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV_STATUS status = NV_OK; + NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS params = {0}; + + params.flaAction = NV2080_CTRL_FLA_ACTION_UNBIND; + + NV_RM_RPC_CONTROL(pGpu, pKernelBus->flaInfo.hClient, + pKernelBus->flaInfo.hSubDevice, + NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK, + ¶ms, sizeof(params), status); + + NV_ASSERT(status == NV_OK); + pKernelBus->flaInfo.bFlaBind = NV_FALSE; + } + } + + if (pKernelBus->flaInfo.bFlaAllocated) + { + if (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + NV2080_CTRL_FLA_RANGE_PARAMS params = {0}; + NV_STATUS status = NV_OK; + params.mode = NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_DESTROY; + NV_RM_RPC_CONTROL(pGpu, pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hSubDevice, + NV2080_CTRL_CMD_FLA_RANGE, + ¶ms, sizeof(params), status); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "RPC to host failed with status: 0x%x\n", status); + } + + // + // For SRIOV-Heavy, Instance block is allocated in host, so only destroying the + // vaspace + // + if (pGpu->pFabricVAS != NULL) + { + vmmDestroyVaspace(pVmm, pGpu->pFabricVAS); + pGpu->pFabricVAS = NULL; + } + + pRmApi->Free(pRmApi, pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hClient); + portMemSet(&pKernelBus->flaInfo, 0, sizeof(pKernelBus->flaInfo)); + } + else + { + if (pKernelBus->flaInfo.pFlaVAS != NULL) + { + vaspaceUnpinRootPageDir(pKernelBus->flaInfo.pFlaVAS, pGpu); + } + + if (pGpu->pFabricVAS != NULL) + { + vaspaceUnpinRootPageDir(pGpu->pFabricVAS, pGpu); + } + + kbusDestructFlaInstBlk_HAL(pGpu, pKernelBus); + + if (pGpu->pFabricVAS != NULL) + { + vmmDestroyVaspace(pVmm, pGpu->pFabricVAS); + pGpu->pFabricVAS = NULL; + } + + pRmApi->Free(pRmApi, pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hClient); + portMemSet(&pKernelBus->flaInfo, 0, sizeof(pKernelBus->flaInfo)); + } + + pKernelBus->flaInfo.bFlaAllocated = NV_FALSE; + } + } +} + +void +kbusDestroyHostManagedFlaVaspace_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 gfid +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + + NV_PRINTF(LEVEL_INFO, "Freeing the FLA client: 0x%x FLAVASpace:%x, gpu:%x \n", + pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hFlaVASpace, pGpu->gpuInstance); + + if (pKernelBus->flaInfo.pFlaVAS) + { + NV_ASSERT(pKernelBus->flaInfo.bFlaBind == NV_FALSE); + if (pKernelBus->flaInfo.bFlaAllocated) + { + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + vaspaceUnpinRootPageDir(pKernelBus->flaInfo.pFlaVAS, pGpu); + + if (pGpu->pFabricVAS != NULL) + { + vaspaceUnpinRootPageDir(pGpu->pFabricVAS, pGpu); + vmmDestroyVaspace(pVmm, pGpu->pFabricVAS); + pGpu->pFabricVAS = NULL; + } + + kbusDestructFlaInstBlk_HAL(pGpu, pKernelBus); + pKernelBus->flaInfo.hClient = NV01_NULL_OBJECT; + pKernelBus->flaInfo.hDevice = NV01_NULL_OBJECT; + pKernelBus->flaInfo.hSubDevice = NV01_NULL_OBJECT; + pKernelBus->flaInfo.hFlaVASpace = NV01_NULL_OBJECT; + pKernelBus->flaInfo.pFlaVAS = NULL; + pKernelBus->flaInfo.bFlaAllocated = NV_FALSE; + + if (pKernelNvlink == NULL || !knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink)) + { + pKernelBus->flaInfo.bFlaRangeRegistered = NV_FALSE; + pKernelBus->flaInfo.base = 0; + pKernelBus->flaInfo.size = 0; + } + } + } +} + +/*! + * @brief This function will return the OBJVASPACE for the FLA VAS. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in/out] ppVAS OBJVASPACE double pointer + * + * @return NV_ERR_NOT_SUPPORTED, if FLA is not supported, + * else NV_OK + */ +NV_STATUS +kbusGetFlaVaspace_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + OBJVASPACE **ppVAS +) +{ + NV_STATUS status = NV_OK; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + *ppVAS = NULL; + + // Return NV_ERR_NOT_SUPPORTED if nvlink is force disabled using cmd line args + if (!IS_VIRTUAL(pGpu) && pKernelNvlink == NULL) + { + NV_PRINTF(LEVEL_WARNING, "Nvlink is not supported in this GPU: %x \n", pGpu->gpuInstance); + return NV_ERR_NOT_SUPPORTED; + } + + // Return NV_ERR_NOT_SUPPORTED when we are in MIG mode + if ((pKernelMIGManager != NULL) && !kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)) + { + NV_PRINTF(LEVEL_WARNING, "FLA is not supported with MIG enabled, GPU: %x \n", pGpu->gpuInstance); + return NV_ERR_NOT_SUPPORTED; + } + + if (!kbusIsFlaSupported(pKernelBus)) + { + NV_PRINTF(LEVEL_WARNING, "FLA is not supported, GPU: %x\n", pGpu->gpuInstance); + return NV_ERR_NOT_SUPPORTED; + } + + if (!IS_VIRTUAL(pGpu) && !kbusIsFlaEnabled(pKernelBus)) + { + if (!gpuIsSriovEnabled(pGpu) && !IS_VIRTUAL(pGpu)) + { + NV_PRINTF(LEVEL_WARNING, "FLA is not enabled, GPU: %x\n", pGpu->gpuInstance); + return NV_ERR_NOT_SUPPORTED; + } + } + // + // when FLA init moves to P2P object creation time, any client trying to get + // FLA VAS reference, needs to be returned NV_ERR_NOT_SUPPORTED. In that case, only + // way to determine is to check if links are trained in the system. Since we dont have an easy + // way to do the checks, currently we can assume that Nvlinks will not be disabled outside of MIG + // + + *ppVAS = pKernelBus->flaInfo.pFlaVAS; + + NV_PRINTF(LEVEL_INFO, "returning the vas: %p for GPU: %x start: 0x%llx, limit:0x%llx \n", + pKernelBus->flaInfo.pFlaVAS, pGpu->gpuInstance, pKernelBus->flaInfo.pFlaVAS->vasStart, + pKernelBus->flaInfo.pFlaVAS->vasLimit); + + return status; +} + +/*! + * @brief Constructor for the Instance Memory block for FLA VASpace. This will + * allocate the memory descriptor for the IMB. + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @return NV_OK, if successful + */ +NV_STATUS +kbusConstructFlaInstBlk_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 gfid +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvU32 aperture; + NvU64 size; + NvU32 cpuCacheAttrib; + NvU32 flags = MEMDESC_FLAGS_NONE; + + // Inst Blocks are by default in FB + aperture = ADDR_FBMEM; + cpuCacheAttrib = NV_MEMORY_UNCACHED; + size = NV_RAMIN_ALLOC_SIZE; + + if (gpuIsWarBug200577889SriovHeavyEnabled(pGpu) && IS_GFID_PF(gfid)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (IS_GFID_VF(gfid)) + flags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + + // create the memdesc for instance block + status = memdescCreate(&pKernelBus->flaInfo.pInstblkMemDesc, pGpu, + size, 0, NV_TRUE, + aperture, cpuCacheAttrib, flags); + + NV_ASSERT(status == NV_OK); + + status = memdescAlloc(pKernelBus->flaInfo.pInstblkMemDesc); + + NV_ASSERT(status == NV_OK); + + // Initialize the memdesc to zero + status = memmgrMemDescMemSet(pMemoryManager, + pKernelBus->flaInfo.pInstblkMemDesc, + 0, + TRANSFER_FLAGS_NONE); + NV_ASSERT(status == NV_OK); + + return status; +} + +/*! + * @brief Destruct the Instance memory block allocated for FLA VAS + * + * @param[in] pGpu + * @param[in] pKernelBus + */ +void +kbusDestructFlaInstBlk_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + // Free the FLA Inst Blk MemDesc + if (pKernelBus->flaInfo.pInstblkMemDesc != NULL) + { + memdescFree(pKernelBus->flaInfo.pInstblkMemDesc); + memdescDestroy(pKernelBus->flaInfo.pInstblkMemDesc); + pKernelBus->flaInfo.pInstblkMemDesc = NULL; + } +} + +/*! + * @brief Function to determine if the mapping can be direct mapped or BAR mapped + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pMemDesc Memory Descriptor pointer + * @param[in] mapFlags Flags used for mapping + * @param[in] bDirectSysMappingAllowed boolean to return the result + * + * returns NV_ERR_INVALID_ARGUMENT, if the reflected mapping is requested + * NV_OK, otherwise + */ +NV_STATUS +kbusIsDirectMappingAllowed_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 mapFlags, + NvBool *bDirectSysMappingAllowed +) +{ + *bDirectSysMappingAllowed = NV_FALSE; + + if (DRF_VAL(OS33, _FLAGS, _MAPPING, mapFlags) == NVOS33_FLAGS_MAPPING_REFLECTED) + { + NV_PRINTF(LEVEL_WARNING, "BAR allocation trying to request reflected mapping, " + "by passing the map flags, failing the request \n"); + } + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED)) + { + NV_PRINTF(LEVEL_WARNING, "BAR allocation trying to request reflected mapping, " + "by setting ENCRYPTED flag in memdesc, failing the request \n"); + return NV_ERR_INVALID_ARGUMENT; + } + + *bDirectSysMappingAllowed = NV_TRUE; + return NV_OK; +} + +/*! + * @brief Returns the Nvlink peer ID from pGpu0 to pGpu1 + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[out] nvlinkPeer NvU32 pointer + * + * return NV_OK on success + */ +NV_STATUS +kbusGetNvlinkP2PPeerId_GA100 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 *nvlinkPeer +) +{ + KernelNvlink *pKernelNvlink0 = GPU_GET_KERNEL_NVLINK(pGpu0); + NV_STATUS status = NV_OK; + + if (nvlinkPeer == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *nvlinkPeer = BUS_INVALID_PEER; + + // If the chip does not support NVLink, then return + if (pKernelNvlink0 == NULL) + { + return NV_OK; + } + + // + // If NVLINK topology is forced and the forced configuration has peer links, + // get the peer ID from the table + // + if (knvlinkIsForcedConfig(pGpu0, pKernelNvlink0) || + pKernelNvlink0->bRegistryLinkOverride) + { + if (knvlinkGetPeersNvlinkMaskFromHshub(pGpu0, pKernelNvlink0) != 0) + { + *nvlinkPeer = kbusGetPeerIdFromTable_HAL(pGpu0, pKernelBus0, + pGpu0->gpuInstance, + pGpu1->gpuInstance); + + if (*nvlinkPeer == BUS_INVALID_PEER) + { + return NV_ERR_INVALID_REQUEST; + } + } + return NV_OK; + } + + // + // NVLINK topology is not forced. Get the NVLink P2P peer ID for NVLink + // auto-config. + // + + // Return if there are no NVLink connections to the remote GPU + if (pKernelNvlink0->peerLinkMasks[gpuGetInstance(pGpu1)] == 0) + { + return NV_OK; + } + + // Return if a peer ID is already allocated for P2P from pGpu0 to pGpu1 + *nvlinkPeer = kbusGetPeerId_HAL(pGpu0, pKernelBus0, pGpu1); + if (*nvlinkPeer != BUS_INVALID_PEER) + { + return NV_OK; + } + + // + // Peer ID 0 is used for the following use-cases: + // 1. If the GPU is connected to itself through NVLink (loopback) + // 2. If the GPU is connected to the other GPU through NVSwitch + // + // On NVSwitch systems, peer ID 0 might not be available only if: + // 1. PCIe P2P is allowed along with NVLink P2P on NVSWitch systems + // 2. Mix of direct NVLink and NVSwitch connections is supported + // None of the above hold true currently + // + if ((pGpu0 == pGpu1) || + knvlinkIsGpuConnectedToNvswitch(pGpu0, pKernelNvlink0)) + { + *nvlinkPeer = 0; + + goto kbusGetNvlinkP2PPeerId_end; + } + + // If no peer ID has been assigned yet, find the first unused peer ID + if (*nvlinkPeer == BUS_INVALID_PEER) + { + *nvlinkPeer = kbusGetUnusedPeerId_HAL(pGpu0, pKernelBus0); + + // If could not find a free peer ID, return error + if (*nvlinkPeer == BUS_INVALID_PEER) + { + NV_PRINTF(LEVEL_WARNING, + "GPU%d: peerID not available for NVLink P2P\n", + pGpu0->gpuInstance); + return NV_ERR_GENERIC; + } + + goto kbusGetNvlinkP2PPeerId_end; + } + +kbusGetNvlinkP2PPeerId_end: + + // Reserve the peer ID for NVLink use + status = kbusReserveP2PPeerIds_HAL(pGpu0, pKernelBus0, NVBIT(*nvlinkPeer)); + + return status; +} + +/** + *@brief Select whether RM needs to use direct mapping or BAR mapping + * This function is a WAR for bug: 2494500, where FB hangs if SW issues + * reflected accesses. RM should select direct mapping for any accesses + * other than FB + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pMemDesc MEMORY_DESCRIPTOR pointer + * @param[in/out] pbAllowDirectMap NvBool pointer + * + *@returns NV_OK, if supported + * NV_ERR_NOT_SUPPORTED, otherwise + */ +NV_STATUS +kbusUseDirectSysmemMap_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvBool *pbAllowDirectMap +) +{ + *pbAllowDirectMap = NV_FALSE; + + if((memdescGetAddressSpace(pMemDesc) != ADDR_FBMEM)) + { + *pbAllowDirectMap = NV_TRUE; + } + + return NV_OK; +} + +/*! + * @brief Validates FLA base address. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param flaBaseAddr + * + * @returns On success, NV_OK. + * On failure, returns NV_ERR_XXX. + */ +NV_STATUS +kbusValidateFlaBaseAddress_GA100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU64 flaBaseAddr +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 fbSizeBytes; + + fbSizeBytes = pMemoryManager->Ram.fbTotalMemSizeMb << 20; + + // + // Ampere SKUs will be paired with NVSwitches (Limerock) supporting 2K + // mapslots that can cover 64GB each. Make sure that the fabric base + // address being used is valid to cover whole frame buffer. + // + + // Check if fabric address is aligned to mapslot size. + if (flaBaseAddr & (NVBIT64(NVLNK_FABRIC_ADDR_GRANULARITY) - 1)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Align fbSize to mapslot size. + fbSizeBytes = RM_ALIGN_UP(fbSizeBytes, NVBIT64(NVLNK_FABRIC_ADDR_GRANULARITY)); + + // Make sure the address range doesn't go beyond the limit, (2K * 64GB). + if ((flaBaseAddr + fbSizeBytes) > NVBIT64(NV_BUS_FLA_VASPACE_ADDR_HI)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +/*! + * @brief Validates FLA Range allocated in the GPU when FM registers itself to RM + * This is useful when FM gets killed/crashed during the app runtime and can + * re-spawn at any point later. We don't do any client validation, since FM is + * a privileged process managed by sysadmin. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] flaBaseAddr NvU64 address + * @param[in] flaSize NvU64 Size + * + * @returns NV_TRUE, if flaBaseAddr & flaSize matches the existing FLA VAS allocation + * else, NV_FALSE + * + */ + NvBool + kbusVerifyFlaRange_GA100 + ( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU64 flaBaseAddr, + NvU64 flaSize + ) + { + if ((pKernelBus->flaInfo.base != flaBaseAddr) || (pKernelBus->flaInfo.size != flaSize)) + return NV_FALSE; + + NV_PRINTF(LEVEL_INFO, "FLA base: %llx, size: %llx is verified \n", flaBaseAddr, flaSize); + return NV_TRUE; + } diff --git a/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm107.c b/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm107.c new file mode 100644 index 000000000..664049cd7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm107.c @@ -0,0 +1,4415 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/bif/kernel_bif.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/gpu_vaspace.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "core/system.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "rmapi/rs_utils.h" +#include "vgpu/rpc.h" +#include "nvRmReg.h" + +#include "gpu/mem_mgr/fermi_dma.h" + +#include "published/maxwell/gm107/dev_ram.h" +#include "published/maxwell/gm107/dev_bus.h" +#include "published/maxwell/gm107/dev_mmu.h" + +#include "class/cl90f1.h" // FERMI_VASPACE_A + +// +// forwards +// +static NV_STATUS _kbusInitP2P_GM107(OBJGPU *, KernelBus *); +static NV_STATUS _kbusDestroyP2P_GM107(OBJGPU *, KernelBus *); +static void _kbusLinkP2P_GM107(OBJGPU *, KernelBus *); + +static NvU32 _kbusGetSizeOfBar2PageDir_GM107(NvU64 vaBase, NvU64 vaLimit, NvU64 vaPerEntry, NvU32 entrySize); + +static NV_STATUS _kbusBar0TunnelCb_GM107(void *pPrivData, NvU64 addr, void *pData, NvU64 size, NvBool bRead); + +NV_STATUS _kbusMapAperture_GM107(OBJGPU *, PMEMORY_DESCRIPTOR, OBJVASPACE *, NvU64, NvU64 *, + NvU64 *, NvU32 mapFlags, NvHandle hClient); +NV_STATUS _kbusUnmapAperture_GM107(OBJGPU *, OBJVASPACE *, PMEMORY_DESCRIPTOR, NvU64); +MEMORY_DESCRIPTOR* kbusCreateStagingMemdesc(OBJGPU *pGpu); + +// This is the peer number assignment for SLI with +// 8 GPUs. The peer ID's should be symmetrical +static const NvU32 peerNumberTable_GM107[8][8] = +{ + {0, 0, 1, 2, 3, 4, 5, 6}, + {0, 0, 2, 3, 4, 5, 6, 7}, + {1, 2, 0, 4, 5, 6, 7, 0}, + {2, 3, 4, 0, 6, 7, 0, 1}, + {3, 4, 5, 6, 0, 0, 1, 2}, + {4, 5, 6, 7, 0, 0, 2, 3}, + {5, 6, 7, 0, 1, 2, 0, 4}, + {6, 7, 0, 1, 2, 3, 4, 0} +}; + +// Helper function to create a staging buffer memdesc with a size of one page +MEMORY_DESCRIPTOR* +kbusCreateStagingMemdesc(OBJGPU *pGpu) +{ + return NULL; +} + +NV_STATUS +kbusConstructHal_GM107(OBJGPU *pGpu, KernelBus *pKernelBus) +{ + + NV_PRINTF(LEVEL_INFO, "Entered \n"); + + pKernelBus->p2pPcie.writeMailboxBar1Addr = PCIE_P2P_INVALID_WRITE_MAILBOX_ADDR; + + pKernelBus->bar2[GPU_GFID_PF].pdeBase = 0xdeadbeef; + pKernelBus->bar2[GPU_GFID_PF].pteBase = 0xdeadbeef; + + pKernelBus->bar2[GPU_GFID_PF].cpuInvisibleBase = 0; + pKernelBus->bar2[GPU_GFID_PF].cpuInvisibleLimit = 0; + + pKernelBus->virtualBar2[GPU_GFID_PF].pVASpaceHeap = NULL; + pKernelBus->virtualBar2[GPU_GFID_PF].pMapListMemory = NULL; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB)) + { + pKernelBus->bFbFlushDisabled = NV_TRUE; + } + + // + // Conditions to disable CPU pointer for flushing VBAR2: + // 1. If inst_in_sys is passed in (regkey setting) + // 2. If FB flushing is disabled (brokenFB or regkey setting) + // 3. If we are on GSP firmware + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM) || + kbusIsFbFlushDisabled(pKernelBus) || + RMCFG_FEATURE_PLATFORM_GSP) + { + pKernelBus->bReadCpuPointerToFlush = NV_FALSE; + } + + // indicate that Bar2 is not initialized yet + pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping = NULL; + + pKernelBus->numPeers = P2P_MAX_NUM_PEERS; + + // + // Default apertures for BAR2 PTEs/PDEsr + // + pKernelBus->PTEBAR2Aperture = ADDR_FBMEM; + pKernelBus->PTEBAR2Attr = NV_MEMORY_WRITECOMBINED; + pKernelBus->PDEBAR2Aperture = ADDR_FBMEM; + pKernelBus->PDEBAR2Attr = NV_MEMORY_WRITECOMBINED; + + return NV_OK; +} + +NV_STATUS +kbusStatePreInitLocked_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NV_PRINTF(LEVEL_INFO, "gpu:%d\n", pGpu->gpuInstance); + + // kbusInitBarsSize_HAL for VGPU is called in early phase + if (! IS_VIRTUAL(pGpu)) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kbusInitBarsSize_HAL(pGpu, pKernelBus)); + } + + kbusDetermineBar1Force64KBMapping(pKernelBus); + + kbusDetermineBar1ApertureLength(pKernelBus, GPU_GFID_PF); + + if (NV_OK != kbusConstructVirtualBar2_HAL(pGpu, pKernelBus, GPU_GFID_PF)) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +/*! + * Stub eheap free for address reuse case below. This allows us to not if the code. + */ +static NV_STATUS nullEHeapFree(OBJEHEAP *thisHeap, NvU64 offset) +{ + return NV_OK; +} + +/*! + * @brief program the default BAR0 window based on the mode we are running at. + */ +static void +kbusSetupDefaultBar0Window +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + NvU64 offsetBar0; + + // + // Initialize BAR0 window to the last 1MB of FB. Since this is where it + // should already be positioned by the vbios, this should not be an issue. + // Do NOT ever move this BAR0 window away from the last 1MB since it's + // shared with the vbios + // + if (pMemorySystemConfig->bReservedMemAtBottom) + { + offsetBar0 = memmgrGetRsvdMemoryBase(pMemoryManager); + } + else + { + offsetBar0 = (pMemoryManager->Ram.fbAddrSpaceSizeMb << 20) - DRF_SIZE(NV_PRAMIN); + } + + // + // However, when running in L2 cache only mode, there is typically + // less than 1MB of L2 cache, so just position the BAR0 either at + // the start of FB or at the end of L2 depending on how big the + // window is compared to the size of L2. We want to make sure that + // the window overlaps reserved memory. + // + if (gpuIsCacheOnlyModeEnabled(pGpu) || + !(pMemorySystemConfig->bFbpaPresent)) + { + if (pMemorySystemConfig->l2CacheSize < DRF_SIZE(NV_PRAMIN)) + { + // L2 Cache size is < BAR0 window size so just set it offset to 0 + offsetBar0 = 0; + } + else + { + // + // L2 Cache size is > BAR0 window, so position it at the end of L2 to + // make sure it overlaps reserved memory, which is at the end of L2 + // + offsetBar0 = pMemorySystemConfig->l2CacheSize - DRF_SIZE(NV_PRAMIN); + } + } + + if (!IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + DEVICE_MAPPING *pDeviceMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + pKernelBus->pUncachedBar0Window = (NvU8*) &pDeviceMapping->gpuNvAddr->Reg008[DRF_BASE(NV_PRAMIN)]; + pKernelBus->pDefaultBar0Pointer = pKernelBus->pUncachedBar0Window; + pKernelBus->physicalBar0WindowSize = DRF_SIZE(NV_PRAMIN); + + kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, offsetBar0); + + } +} + +/*! + * @brief kbusStateInit routine for Kernel RM functionality. + */ +NV_STATUS +kbusStateInitLockedKernel_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + NvU32 data; + + if ((pKernelBif != NULL) && (!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED) || + !pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_WRITES_DISABLED))) + { + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_MAP_P2P_PEER_ID, &data) == NV_OK) + { + pKernelBus->p2pMapSpecifyId = NV_TRUE; + pKernelBus->p2pMapPeerId = data; + } + else + { + pKernelBus->p2pMapSpecifyId = NV_FALSE; + } + + if (gpumgrGetGpuLinkCount(pGpu->gpuInstance) > 0) + { + if (!kbusIsP2pInitialized(pKernelBus) && + !kbusIsP2pMailboxClientAllocated(pKernelBus)) + { + _kbusInitP2P_GM107(pGpu, pKernelBus); + } + } + else + { + pKernelBus->bP2pInitialized = NV_TRUE; + } + } + + kbusSetupDefaultBar0Window(pGpu, pKernelBus); + + // + // Initialize BAR2 before initializing BAR1. That way, we can use BAR2 + // rather than BAR0 to set up the BAR1 page table. This is faster because + // BAR2 can be write-combined + // + NV_ASSERT_OK_OR_RETURN(kbusInitBar2_HAL(pGpu, pKernelBus, GPU_GFID_PF)); + + // Verify that BAR2 and the MMU actually works + (void) kbusVerifyBar2_HAL(pGpu, pKernelBus, NULL, NULL, 0, 0); + + if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + vgpuGspSetupBuffers(pGpu); + } + + // + // For "unsupported" mmu invalidate skipping mode, we align virtual BAR2 + // to avoid false TLB fills and disallow address reuse. + // + if (pDma->getProperty(pDma, PDB_PROP_DMA_MMU_INVALIDATE_DISABLE)) + { + pKernelBus->virtualBar2[GPU_GFID_PF].vAlignment = 16 * RM_PAGE_SIZE; + pKernelBus->virtualBar2[GPU_GFID_PF].pVASpaceHeap->eheapFree = nullEHeapFree; + } + + return NV_OK; +} + +NV_STATUS +kbusStateInitLocked_IMPL(OBJGPU *pGpu, KernelBus *pKernelBus) +{ + // Nothing to be done in guest for the paravirtualization case. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + return NV_OK; + } + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + NV_ASSERT_OK_OR_RETURN(kbusInitBar2_HAL(pGpu, pKernelBus, GPU_GFID_PF)); + } + + NV_ASSERT_OK_OR_RETURN(kbusStateInitLockedKernel_HAL(pGpu, pKernelBus)); + + NV_ASSERT_OK_OR_RETURN(kbusStateInitLockedPhysical_HAL(pGpu, pKernelBus)); + + return NV_OK; +} + +NV_STATUS +kbusStatePostLoad_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + + if ( ! IS_GPU_GC6_STATE_EXITING(pGpu)) + { + // Bar1 is created once per Gpu on each Gpu call to kbusStatePostLoad_GM107 + if ((status = kbusInitBar1_HAL(pGpu, pKernelBus, GPU_GFID_PF)) != NV_OK) + { + return status; + } + } + + if ((pKernelBif != NULL) + && + // RM managed P2P or restoring the HW state for OS resume + (!kbusIsP2pMailboxClientAllocated(pKernelBus) || + (flags & GPU_STATE_FLAGS_PM_TRANSITION)) + && + (!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED) || + !pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_WRITES_DISABLED))) + { + _kbusLinkP2P_GM107(pGpu, pKernelBus); + } + + return status; +} + +NV_STATUS +kbusStatePreUnload_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 flags +) +{ + if (!((flags & GPU_STATE_FLAGS_PRESERVING) || + IS_GPU_GC6_STATE_ENTERING(pGpu))) + { + // + // Get rid of the bars if this is not PM. There were/are issues with user-mode + // OGL on XP not knowing that the system has enter suspend and so continuing to + // run (and issue APIs, touch bar1 resources, whatever). Therefore we cannot + // teardown bar1 path when entering suspend. + // + kbusDestroyBar1_HAL(pGpu, pKernelBus, GPU_GFID_PF); + } + + return NV_OK; +} + +NV_STATUS +kbusStateUnload_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + + if ((pKernelBif != NULL) + && + (!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED) || + !pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_WRITES_DISABLED)) + && + // RM managed P2P or unconfiguring HW P2P for OS suspend/hibernate + (!kbusIsP2pMailboxClientAllocated(pKernelBus) || + (flags & GPU_STATE_FLAGS_PM_TRANSITION))) + { + kbusUnlinkP2P_HAL(pGpu, pKernelBus); + } + + if (flags & GPU_STATE_FLAGS_PRESERVING) + { + if (!IS_GPU_GC6_STATE_ENTERING(pGpu)) + { + status = kbusTeardownBar2CpuAperture_HAL(pGpu, pKernelBus, GPU_GFID_PF); + // Do not use BAR2 physical mode for bootstrapping BAR2 across S/R. + pKernelBus->bUsePhysicalBar2InitPagetable = NV_FALSE; + } + } + else + { + // Clear write mailbox data window info. + pKernelBus->p2pPcie.writeMailboxBar1Addr = PCIE_P2P_INVALID_WRITE_MAILBOX_ADDR; + pKernelBus->p2pPcie.writeMailboxTotalSize = 0; + } + + pKernelBus->cachedBar0WindowVidOffset = 0x0; + + return status; +} + +/*! + * @brief Init BAR1. + * + * - Inits FERMI BUS HALINFO Bar1 structure + * - Sets up BAR1 address space + * - The function is skipped during GC6 cycle. It can update page table in + * VIDMEM/SYSMEM but all register access should be avoid in the function + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NV_OK on success, or rm_status from called functions on failure. + */ +NV_STATUS +kbusInitBar1_GM107(OBJGPU *pGpu, KernelBus *pKernelBus, NvU32 gfid) +{ + OBJEHEAP *pVASpaceHeap = NULL; + NV_STATUS rmStatus = NV_OK; + NvU64 apertureVirtAddr, apertureVirtLength; + NvU64 vaRangeMax; + NvU32 vaflags; + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + NvU32 vaSpaceBigPageSize = 0; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + NvU32 gpuMask = 0; + NvBool bSmoothTransitionEnabled = ((pGpu->uefiScanoutSurfaceSizeInMB != 0) && + RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM); + + vaRangeMax = pKernelBus->bar1[gfid].apertureLength - 1; + + // + // In sleep-resume, BAR1 is not destroyed - but we would have to rebind the BAR1. + // Since that's done already. Exit early from here. + // + if (pKernelBus->bar1[gfid].pVAS != NULL) + { + return rmStatus; + } + + // + // For BAR address spaces, leave a valid PTE pointed to page 0. + // According to page 196 of revision 2.1 of the PCI spec, prefetchable + // memory must have no side effects on reads, return all bytes on reads + // regardless of byte enables, and host bridges can merge processor + // writes without errors. + // + // Setting this is done by a combination of two steps. Sparsify the VAS + // to prevent faults during CPU access and set FULL_PTE. + // + // For front door simulation and mods emulation however this leads to an + // excessive amount of time updating BAR1 PTEs. So for mods in simulation + // and emulation we don't set the FULL_PTE flag. The VMA code will only + // validate the used parts of the PDE in this case, but will make sure to + // leave one unused scratch page at the end of the valid range. + // + vaflags = VASPACE_FLAGS_BAR | VASPACE_FLAGS_BAR_BAR1; + vaflags |= VASPACE_FLAGS_ALLOW_ZERO_ADDRESS; // BAR1 requires a zero VAS base. + vaflags |= VASPACE_FLAGS_ENABLE_VMM; + +#if defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + { + NvU32 data32 = 0; + // + // The BAR1 page size can be only configured for mods verification. + // for mods only we will override the default bar1 big page size if this regkey is set. + // This is the mods plan for testing interop between clients with multiple + // big page sizes. + // + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_SET_BAR1_ADDRESS_SPACE_BIG_PAGE_SIZE, &data32) == NV_OK) + { + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + if (kgmmuIsPerVaspaceBigPageEn(pKernelGmmu)) + { + vaSpaceBigPageSize = data32; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Arch doesnt support BAR1 Big page Override- Using defaults\n"); + NV_ASSERT(0); + vaSpaceBigPageSize = 0; + } + } + } +#endif // defined(DEVELOP) || defined(DEBUG) || defined(NV_MODS) + + switch (vaSpaceBigPageSize) + { + case FERMI_BIG_PAGESIZE_64K: + vaflags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _64K); + break; + case FERMI_BIG_PAGESIZE_128K: + vaflags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _128K); + break; + default: + vaflags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + break; + } + + // + // kbusIsP2pMailboxClientAllocated: + // The client allocates the mailbox area + // It is not safe to disable smooth transition from RM as it assumed to be enabled in KMD + // + if (kbusIsP2pMailboxClientAllocated(pKernelBus)) + { + // KMD requires smooth transition to have a reverse BAR1 VA space + if (bSmoothTransitionEnabled) + vaflags |= VASPACE_FLAGS_REVERSE; + } + else + { + // + // Smooth transition is enabled + // Bug# 3208922: For BAR1 range > 4gig on notebooks. + // For BAR1 range less than 4gig, otherwise + // + if (bSmoothTransitionEnabled && (IsMobile(pGpu) || (vaRangeMax < NV_U32_MAX))) + { + // + // If UEFI scanoutsurface size is configured to be non-zero, + // we are going to move all BAR1 vaspace requests to not + // conflict the UEFI scanout surface at offset 0 to the higher + // address range. + // + // P2P mailbox registers are 34 bit wide and hence can only address + // first 16 GiG of BAR1 due to the limited address width. Hence, + // they cannot be moved to the top of the BAR1 always. + // + // We are restricting this feature only to those SKUs which + // has BAR1 aperture within 4gig range, because this feature is + // notebook only, and the expectation is the BAR1 va range will + // not be that huge. Once BAR1 va range crosses 4gig (eventhough smaller? + // than 16 gig), we may have to revisit p2p mailbox and expand it to + // full fb range - as there will be new features such as dynamic BAR1. + // + // Choosing the smallest 4gig range for now. + // + vaflags |= VASPACE_FLAGS_REVERSE; + } + else + { + bSmoothTransitionEnabled = NV_FALSE; + pGpu->uefiScanoutSurfaceSizeInMB = 0; + } + } + + if (IS_GFID_VF(gfid)) + { + vaflags |= VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR; + } + + gpuMask = NVBIT(pGpu->gpuInstance); + + rmStatus = vmmCreateVaspace(pVmm, FERMI_VASPACE_A, 0, gpuMask, + 0, vaRangeMax, 0, 0, NULL, + vaflags, &pKernelBus->bar1[gfid].pVAS); + if (NV_OK != rmStatus) + { + NV_PRINTF(LEVEL_ERROR, + "Could not construct BAR1 VA space object.\n"); + pKernelBus->bar1[gfid].pVAS = NULL; + DBG_BREAKPOINT(); + return rmStatus; + } + + // Restrict normal BAR1 alloc to be within the aperture + pVASpaceHeap = vaspaceGetHeap(pKernelBus->bar1[gfid].pVAS); + + + // + // Reduce BAR1 VA space by FERMI_SMALL_PAGESIZE for host overfetch bug + // WAR (Bug 529932/525381). (FERMI_SMALL_PAGESIZE is sufficient to + // avoid big pagesize allocations at the end of BAR1 VA space.) + // + vaRangeMax -= FERMI_SMALL_PAGESIZE; + rmStatus = pVASpaceHeap->eheapSetAllocRange(pVASpaceHeap, + 0, vaRangeMax); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to set BAR1 alloc range to aperture size!\n"); + goto kbusInitBar1_failed; + } + + // + // Make sure the aperture length we are using not larger than the maximum length available. + // Usually, bar1.apertureLength should be equal to the return value of kbusGetPciBarSize, however, + // in L2 cache only mode, the aperture length being used may have been overridden to a smaller size, + // so take that into account in the assert. + // + NV_ASSERT(pKernelBus->bar1[gfid].apertureLength <= kbusGetPciBarSize(pKernelBus, 1)); + + // + // If we need to preserve a console mapping at the start of BAR1, we + // need to allocate the VA space before anything else gets allocated. + // + if (IS_GFID_PF(gfid) && + (kbusIsPreserveBar1ConsoleEnabled(pKernelBus) || bSmoothTransitionEnabled)) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 bar1VAOffset = 0; + NvU64 fbPhysOffset = 0; + NvU64 consoleSize = 0; + PMEMORY_DESCRIPTOR pConsoleMemDesc = NULL; + MEMORY_DESCRIPTOR memdesc; + + if (bSmoothTransitionEnabled) + { + // + // Smooth transition - The physical fb offset 0 to uefiScanoutSurfaceSize(InMB) should be identity mapped. + // The lower FB region at offset 0 is owned by PMA and OS in wddm and hence RM will not reserve the physical + // FB memory but only describe it. + // + pConsoleMemDesc = &memdesc; + memdescCreateExisting(pConsoleMemDesc, pGpu, pGpu->uefiScanoutSurfaceSizeInMB * 1024 * 1024, ADDR_FBMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE); + memdescDescribe(pConsoleMemDesc, ADDR_FBMEM, 0, pGpu->uefiScanoutSurfaceSizeInMB * 1024 * 1024); + pConsoleMemDesc->_pageSize = RM_PAGE_SIZE; + } + else if (kbusIsPreserveBar1ConsoleEnabled(pKernelBus)) + { + pConsoleMemDesc = memmgrGetReservedConsoleMemDesc(pGpu, pMemoryManager); + } + + if (pConsoleMemDesc) + { + consoleSize = memdescGetSize(pConsoleMemDesc); + + NV_PRINTF(LEVEL_INFO, + "preserving console BAR1 mapping (0x%llx)\n", + consoleSize); + + rmStatus = kbusMapFbAperture_HAL(pGpu, pKernelBus, pConsoleMemDesc, fbPhysOffset, + &bar1VAOffset, &consoleSize, + BUS_MAP_FB_FLAGS_MAP_UNICAST | BUS_MAP_FB_FLAGS_MAP_OFFSET_FIXED, + NV01_NULL_OBJECT); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "cannot preserve console mapping in BAR1 (0x%llx, 0x%x)\n", + consoleSize, rmStatus); + goto kbusInitBar1_failed; + } + + // + // The reserved console is assumed by the console-driving code to + // be at offset 0 of BAR1; anything else will break it. + // NOTE: Since BUS_MAP_FB_FLAGS_MAP_OFFSET_FIXED is passed we should never + // get here. But this is legacy code leaving it here. + // + if (bar1VAOffset != 0) + { + NV_PRINTF(LEVEL_ERROR, + "expected console @ BAR1 offset 0 (0x%llx, 0x%x)\n", + bar1VAOffset, rmStatus); + DBG_BREAKPOINT(); + kbusUnmapFbAperture_HAL(pGpu, pKernelBus, pConsoleMemDesc, + bar1VAOffset, consoleSize, + BUS_MAP_FB_FLAGS_MAP_UNICAST | BUS_MAP_FB_FLAGS_PRE_INIT); + goto kbusInitBar1_failed; + } + + pKernelBus->bBar1ConsolePreserved = NV_TRUE; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "no console memdesc available to preserve\n"); + DBG_BREAKPOINT(); + goto kbusInitBar1_failed; + } + } + + // Reserve space for max number of peers regardless of SLI config + if ((!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED) || + !pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_WRITES_DISABLED)) + && + IS_GFID_PF(gfid) + && + !kbusIsP2pMailboxClientAllocated(pKernelBus)) + { + rmStatus = kbusAllocP2PMailboxBar1_HAL(pGpu, pKernelBus, gfid, vaRangeMax); + + if (NV_OK != rmStatus) + { + goto kbusInitBar1_failed; + } + } + + // + // BAR1 vaspace is sparsified during vaspace creation + // and hence pdb is allocated during vaspace and destroyed + // when BAR1 is destroyed. During power-save restore cycle + // BAR1 is not destroyed, but only the instance memory is + // unbound and put in physical mode and rebound upon restore. + // Hence pdb of BAR1 is static and can be updated here during + // initialization instead of previously in mmu update pdb. + // + rmStatus = kbusBar1InstBlkVasUpdate_HAL(pGpu, pKernelBus); + + if (NV_OK != rmStatus) + { + goto kbusInitBar1_failed; + } + + kbusPatchBar1Pdb_HAL(pGpu, pKernelBus); + + apertureVirtAddr = pKernelBus->p2pPcie.writeMailboxBar1Addr; + apertureVirtLength = pKernelBus->p2pPcie.writeMailboxTotalSize; + + // + // Copy the mailbox setup to other GPUs + // + // This SLI_LOOP is only necessary because _kbusLinkP2P_GM107 is called + // after each call to kbusInitBar1_GM107 in the function busStatePostLoad_GM107. + // _kbusLinkP2P_GM107 requires that the writeMailboxAddr of every GPU be set, but + // that can only happen after kbusInitbar1_GM107 is called on every GPU. In the + // future, if we can separate the function that kbusInitBar1_GM107 is called in + // and the function that _kbusLinkP2P_GM107 is called in. Then, all of the + // kbusInitBar1_GM107 calls can finish and create writeMailboxes, and we can + // remove this SLI_LOOP. + // + if (gpumgrIsParentGPU(pGpu) && + !kbusIsP2pMailboxClientAllocated(pKernelBus)) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + pKernelBus->p2pPcie.writeMailboxBar1Addr = apertureVirtAddr; + pKernelBus->p2pPcie.writeMailboxTotalSize = apertureVirtLength; + } + SLI_LOOP_END + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + } + +kbusInitBar1_failed: + if (rmStatus != NV_OK) + { + kbusDestroyBar1_HAL(pGpu, pKernelBus, gfid); + } + + if (!bSmoothTransitionEnabled || (rmStatus != NV_OK)) + { + pGpu->uefiScanoutSurfaceSizeInMB = 0; + } + + return rmStatus; +} + +/*! + * @brief Destroy BAR1 + * + * Destroys Bar1 VA Space. BAR1 vaspace is not destroyed during + * Power save. + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NV_OK always. + */ +NV_STATUS +kbusDestroyBar1_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 gfid +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + + if (pKernelBus->bar1[gfid].pVAS != NULL) + { + + // Remove the P2P write mailbox alloc, if it exists + if ((pKernelBus->p2pPcie.writeMailboxBar1Addr != PCIE_P2P_INVALID_WRITE_MAILBOX_ADDR) && + IS_GFID_PF(gfid)) + { + if (!kbusIsP2pMailboxClientAllocated(pKernelBus)) + vaspaceFree(pKernelBus->bar1[gfid].pVAS, pKernelBus->p2pPcie.writeMailboxBar1Addr); + pKernelBus->p2pPcie.writeMailboxBar1Addr = PCIE_P2P_INVALID_WRITE_MAILBOX_ADDR; + pKernelBus->p2pPcie.writeMailboxTotalSize = 0; + } + + // Remove the preserved BAR1 console mapping, if it exists + if (pKernelBus->bBar1ConsolePreserved && IS_GFID_PF(gfid)) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + PMEMORY_DESCRIPTOR pConsoleMemDesc = + memmgrGetReservedConsoleMemDesc(pGpu, pMemoryManager); + + if (pConsoleMemDesc != NULL) + { + NvU64 consoleSize = memdescGetSize(pConsoleMemDesc); + + kbusUnmapFbAperture_HAL(pGpu, pKernelBus, pConsoleMemDesc, + 0, consoleSize, BUS_MAP_FB_FLAGS_MAP_UNICAST | BUS_MAP_FB_FLAGS_PRE_INIT); + } + else if (pGpu->uefiScanoutSurfaceSizeInMB) + { + vaspaceFree(pKernelBus->bar1[gfid].pVAS, 0); + } + else + { + NV_ASSERT(pConsoleMemDesc != NULL); + } + + pKernelBus->bBar1ConsolePreserved = NV_FALSE; + } + + vmmDestroyVaspace(pVmm, pKernelBus->bar1[gfid].pVAS); + + pKernelBus->bar1[gfid].pVAS = NULL; + } + + if (IS_GFID_VF(gfid) && (pKernelBus->bar1[gfid].pInstBlkMemDesc != NULL)) + { + memdescFree(pKernelBus->bar1[gfid].pInstBlkMemDesc); + memdescDestroy(pKernelBus->bar1[gfid].pInstBlkMemDesc); + pKernelBus->bar1[gfid].pInstBlkMemDesc = NULL; + } + + return status; +} + +/*! + * @brief Initialize BAR2 + * + * 1. Setup Bar2 VA Space. + * 2. Setup Bar2 in HW. + * 3. Host over fetch WAR. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] gfid GFID for VF + * + * @returns NV_OK on success. + */ +NV_STATUS +kbusInitBar2_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 gfid +) +{ + NV_STATUS status = NV_OK; + + // + // Nothing to be done in guest in the paravirtualization case or + // if guest is running in SRIOV heavy mode. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + status = kbusSetupBar2CpuAperture_HAL(pGpu, pKernelBus, gfid); + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + + if (KBUS_BAR2_ENABLED(pKernelBus)) + { + status = kbusSetupBar2GpuVaSpace_HAL(pGpu, pKernelBus, gfid); + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + } + + status = kbusCommitBar2_HAL(pGpu, pKernelBus, GPU_STATE_DEFAULT); + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + + if (IS_GFID_PF(gfid)) + { + pKernelBus->bIsBar2Initialized = NV_TRUE; + } + +cleanup: + if (status != NV_OK) + { + kbusDestroyBar2_HAL(pGpu, pKernelBus, gfid); + } + + return status; +} + +/*! + * @brief Destroy BAR2 + * + * 1. Tear down BAR2 Cpu Aperture. + * 2. Destroy Bar2 Gpu VA Space. + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NV_OK on success. + */ +NV_STATUS +kbusDestroyBar2_GM107(OBJGPU *pGpu, KernelBus *pKernelBus, NvU32 gfid) +{ + NV_STATUS status = NV_OK; + + if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) && IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + vgpuGspTeardownBuffers(pGpu); + } + + // + // Nothing to be done in guest in the paravirtualization case or + // if guest is running in SRIOV heavy mode. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + if (kbusTeardownBar2CpuAperture_HAL(pGpu, pKernelBus, gfid) != NV_OK) + { + status = NV_ERR_GENERIC; + } + + if (KBUS_BAR2_ENABLED(pKernelBus)) + { + if (kbusTeardownBar2GpuVaSpace_HAL(pGpu, pKernelBus, gfid) != NV_OK) + { + status = NV_ERR_GENERIC; + } + } + + if (IS_GFID_PF(gfid)) + { + pKernelBus->bIsBar2Initialized = NV_FALSE; + } + + // + // In cache only mode, do a video memory flush after unbinding BARS to + // make sure that during capture, we don't get stuck waiting on L2. + // This could probably just be done all the time, but currently limiting + // to cache only mode. + // + if (gpuIsCacheOnlyModeEnabled(pGpu) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB)) + { + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY); + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY); + } + + return status; +} + +/*! + * @brief Setup BAR2 aperture for CPU access + * + * 1. Acquire BAR2 CPU mapping. + * 2. Initialize BAR2 GPU vaspace. + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NV_OK on success. + */ +NV_STATUS +kbusSetupBar2CpuAperture_GM107(OBJGPU *pGpu, KernelBus *pKernelBus, NvU32 gfid) +{ + NV_STATUS status = NV_OK; + + // Nothing to be done in guest in the paravirtualization case. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || IS_GFID_VF(gfid) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + if (pKernelBus->virtualBar2[gfid].pCpuMapping != NULL) + { + NV_PRINTF(LEVEL_ERROR, "BAR2 already initialized!\n"); + return NV_ERR_GENERIC; + } + + if (0 == pKernelBus->bar2[gfid].pteBase) + { + NV_PRINTF(LEVEL_ERROR, + "BAR2 pteBase not initialized by fbPreInit_FERMI!\n"); + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) && + !gpuIsCacheOnlyModeEnabled(pGpu)) + { + pKernelBus->virtualBar2[gfid].pCpuMapping = NULL; + return NV_OK; + } + + if (KBUS_BAR2_TUNNELLED(pKernelBus)) + { + // + // Since GK20A doesn't support BAR2 accesses we tunnel all RM BAR2 accesses + // through the BAR0 window. For this we register a callback function with the + // OS layer which is called when RM accesses an address in the CPU BAR2 VA range. + // We skip the normal stuff we do init BAR2 (like init-ing BAR2 inst block) since + // they are not needed for GK20A. + // + + // + // Map bar2 space -- only map the space we use in the RM. Some 32b OSes are *cramped* + // for kernel virtual addresses. On GK20A, we just alloc CPU VA space since there is no + // actual bar2, and tunnel the "fake" bar2 accesses through the bar0 window. + // + pKernelBus->virtualBar2[gfid].pCpuMapping = portMemAllocNonPaged(pKernelBus->bar2[gfid].rmApertureLimit + 1); + if (pKernelBus->virtualBar2[gfid].pCpuMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, "- Unable to map bar2!\n"); + DBG_BREAKPOINT(); + return NV_ERR_NO_MEMORY; + } + + // + // Call the OS add mem filter routine now that bar2 is mapped + // Currently this is used to route bar2 accesses through bar0 on gk20A + // + status = osMemAddFilter((NvU64)((NvUPtr)(pKernelBus->virtualBar2[gfid].pCpuMapping)), + (NvU64)((NvUPtr)(pKernelBus->virtualBar2[gfid].pCpuMapping)) + + (pKernelBus->bar2[gfid].rmApertureLimit + 1), + _kbusBar0TunnelCb_GM107, + (void *)pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Cannot add os mem filter for bar2 tunneling\n"); + DBG_BREAKPOINT(); + goto cleanup; + } + } + else + { + // + // Map bar2 space -- only map the space we use in the RM. Some 32b OSes are *cramped* + // for kernel virtual addresses. + // + if (NV_OK != osMapPciMemoryKernelOld(pGpu, pKernelBus->bar2[gfid].physAddr, + (pKernelBus->bar2[gfid].rmApertureLimit + 1), + NV_PROTECT_READ_WRITE, + (void**)&(pKernelBus->virtualBar2[gfid].pCpuMapping), + NV_MEMORY_WRITECOMBINED)) + { + NV_PRINTF(LEVEL_ERROR, "- Unable to map bar2!\n"); + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + + NV_PRINTF_COND(IS_EMULATION(pGpu), LEVEL_NOTICE, LEVEL_INFO, + "BAR0 Base Cpu Mapping @ 0x%p and BAR2 Base Cpu Mapping @ 0x%p\n", + pGpu->deviceMappings[0].gpuNvAddr->Reg032, + pKernelBus->virtualBar2[gfid].pCpuMapping); + + + } + +cleanup: + if (status != NV_OK) + { + kbusTeardownBar2CpuAperture_HAL(pGpu, pKernelBus, gfid); + } + + return status; +} + +/*! + * @brief Tear down BAR2 CPU aperture + * + * 1. Release BAR2 GPU vaspace mappings. + * 2. Release BAR2 CPU mapping. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] gfid + * + * @returns NV_OK on success. + */ +NV_STATUS +kbusTeardownBar2CpuAperture_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 gfid +) +{ + // Nothing to be done in guest in the paravirtualization case. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || IS_GFID_VF(gfid) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + if (KBUS_BAR2_TUNNELLED(pKernelBus)) + { + // Unmap bar2 space + if (pKernelBus->virtualBar2[gfid].pCpuMapping) + { + // Remove the memory access filter + osMemRemoveFilter((NvU64)((NvUPtr)(pKernelBus->virtualBar2[gfid].pCpuMapping))); + portMemFree(pKernelBus->virtualBar2[gfid].pCpuMapping); + pKernelBus->virtualBar2[gfid].pCpuMapping = NULL; + } + } + else + { + if (pKernelBus->virtualBar2[gfid].pPageLevels) + { + memmgrMemDescEndTransfer(GPU_GET_MEMORY_MANAGER(pGpu), + pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc, + TRANSFER_FLAGS_NONE); + pKernelBus->virtualBar2[gfid].pPageLevels = NULL; + } + + kbusDestroyCpuPointerForBusFlush_HAL(pGpu, pKernelBus); + + kbusFlushVirtualBar2_HAL(pGpu, pKernelBus, NV_FALSE, gfid); + + if (pKernelBus->virtualBar2[gfid].pCpuMapping) + { + osUnmapPciMemoryKernelOld(pGpu, (void*)pKernelBus->virtualBar2[gfid].pCpuMapping); + // Mark the BAR as un-initialized so that a later call + // to initbar2 can succeed. + pKernelBus->virtualBar2[gfid].pCpuMapping = NULL; + } + + // + // make sure that the bar2 mode is physical so that the vesa extended + // linear framebuffer works after driver unload. Clear other bits to force + // vid. + // + // if BROKEN_FB, merely rewriting this to 0 (as it already was) causes + // FBACKTIMEOUT -- don't do it (Bug 594539) + // + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB)) + { + GPU_FLD_WR_DRF_DEF(pGpu, _PBUS, _BAR2_BLOCK, _MODE, _PHYSICAL); + // bug 1738008: temporary fix to unblock -inst_in_sys argument + // we tried to correct bar2 unbind sequence but didn't fix the real issue + // will fix this soon 4/8/16 + GPU_REG_RD32(pGpu, NV_PBUS_BAR2_BLOCK); + } + } + + return NV_OK; +} + +/*! + * @brief Setup BAR2 GPU vaspace + * + * 1. Allocate & initialize BAR2 GPU vaspace page directories & tables. + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NV_OK on success. + */ +NV_STATUS +kbusSetupBar2GpuVaSpace_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 gfid +) +{ + NV_STATUS status = NV_OK; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + MMU_WALK *pWalk = NULL; + MMU_WALK_FLAGS walkFlags = {0}; + MMU_WALK_USER_CTX userCtx = {0}; + const MMU_FMT_LEVEL *pLevelFmt = NULL; + NvU64 origVidOffset = 0; + OBJEHEAP *pVASpaceHeap; + MEMORY_DESCRIPTOR *pPageLevelsMemDesc = NULL; + NvU32 allocSize; + + // + // Nothing to be done in guest in the paravirtualization case or if + // if guest is running in SRIOV heavy mode. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + if (IS_GFID_VF(gfid)) + { + // + // VF BAR2 instance block cannot by in PF sysmem as the latter + // is not mapped into VF's IOMMU domain + // + NV_ASSERT_OR_RETURN(pKernelBus->InstBlkAperture == ADDR_FBMEM, NV_ERR_INVALID_ARGUMENT); + + if ((status = memdescCreate(&pKernelBus->bar2[gfid].pInstBlkMemDesc, + pGpu, + GF100_BUS_INSTANCEBLOCK_SIZE, + GF100_BUS_INSTANCEBLOCK_SIZE, + NV_TRUE, + pKernelBus->InstBlkAperture, + pKernelBus->InstBlkAttr, + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE)) != NV_OK) + { + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + + status = memdescAlloc(pKernelBus->bar2[gfid].pInstBlkMemDesc); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + pKernelBus->bar2[gfid].instBlockBase = + memdescGetPhysAddr(pKernelBus->bar2[gfid].pInstBlkMemDesc, + AT_GPU, 0); + } + // Add the reserved memory base, converting from relative to absolute addresses. + else + { + if (ADDR_FBMEM == pKernelBus->PDEBAR2Aperture) + pKernelBus->bar2[gfid].pdeBase += memmgrGetRsvdMemoryBase(pMemoryManager); + if (ADDR_FBMEM == pKernelBus->PTEBAR2Aperture) + pKernelBus->bar2[gfid].pteBase += memmgrGetRsvdMemoryBase(pMemoryManager); + } + + if (IS_GFID_PF(gfid)) + { + // Setup BAR0 window for page directory/table updates during BAR2 bootstrap + status = kbusSetupBar0WindowBeforeBar2Bootstrap_HAL(pGpu, pKernelBus, &origVidOffset); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + } + + // Get Bar2 VA limit. + pKernelBus->bar2[gfid].vaLimit = kbusGetVaLimitForBar2_HAL(pGpu, pKernelBus); + + // + // Reduce BAR2 VA space by FERMI_SMALL_PAGESIZE for host overfetch + // bug WAR (Bug 529932/525381); the last BAR2 page will remain + // mapped to the scratch page. + // + pVASpaceHeap = pKernelBus->virtualBar2[gfid].pVASpaceHeap; + + if (pVASpaceHeap != NULL) + { + if (pVASpaceHeap->eheapSetAllocRange(pVASpaceHeap, pKernelBus->bar2[gfid].rmApertureBase, + pKernelBus->bar2[gfid].rmApertureLimit - FERMI_SMALL_PAGESIZE) != NV_OK) + { + DBG_BREAKPOINT(); + } + } + + allocSize = kbusGetSizeOfBar2PageDirs_HAL(pGpu, pKernelBus) + + kbusGetSizeOfBar2PageTables_HAL(pGpu, pKernelBus); + + if (pKernelBus->PDEBAR2Aperture == ADDR_FBMEM) + { + // + // The page directories and page tables should all be within + // the same type of memory. + // + NV_ASSERT_OR_GOTO(pKernelBus->PDEBAR2Aperture == pKernelBus->PTEBAR2Aperture, + cleanup); + + status = memdescCreate(&pPageLevelsMemDesc, pGpu, + allocSize, + RM_PAGE_SIZE, + NV_TRUE, + pKernelBus->PDEBAR2Aperture, + pKernelBus->PDEBAR2Attr, + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + + if (IS_GFID_VF(gfid)) + { + status = memdescAlloc(pPageLevelsMemDesc); + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + + pKernelBus->bar2[gfid].pdeBase = memdescGetPhysAddr(pPageLevelsMemDesc, + AT_GPU, 0); + + pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc = pPageLevelsMemDesc; + + pKernelBus->bar2[gfid].pteBase = pKernelBus->bar2[gfid].pdeBase + + kbusGetSizeOfBar2PageDirs_HAL(pGpu, pKernelBus); + + pKernelBus->bar2[gfid].pteBase = NV_ROUNDUP(pKernelBus->bar2[gfid].pteBase, RM_PAGE_SIZE); + + pKernelBus->virtualBar2[gfid].pPageLevels = kbusMapRmAperture_HAL(pGpu, + pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc); + NV_ASSERT_OR_RETURN(pKernelBus->virtualBar2[gfid].pPageLevels, + NV_ERR_INSUFFICIENT_RESOURCES); + } + } + + // Get the MMU format for BAR2. + pKernelBus->bar2[gfid].pFmt = kgmmuFmtGet(pKernelGmmu, GMMU_FMT_VERSION_DEFAULT, 0); + NV_ASSERT_OR_GOTO(NULL != pKernelBus->bar2[gfid].pFmt, cleanup); + walkFlags.bUseIterative = gpuIsIterativeMmuWalkerEnabled(pGpu); + + // + // Initialize/allocate walker staging buffer only if PTEs in FBMEM + // and we are currently bootstrapping BAR2. + // + if (pKernelBus->bar2[gfid].pWalkStagingBuffer == NULL && + pKernelBus->PTEBAR2Aperture == ADDR_FBMEM && + pKernelBus->bar2[gfid].bBootstrap) + { + pKernelBus->bar2[gfid].pWalkStagingBuffer = kbusCreateStagingMemdesc(pGpu); + } + + // Create the MMU_WALKER state + status = mmuWalkCreate(pKernelBus->bar2[gfid].pFmt->pRoot, + NULL, + &g_bar2WalkCallbacks, + walkFlags, + &pWalk, + (struct MMU_WALK_MEMDESC *) pKernelBus->bar2[gfid].pWalkStagingBuffer); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + pKernelBus->bar2[gfid].pWalk = pWalk; + + // We want to lock the small page table + pLevelFmt = mmuFmtFindLevelWithPageShift(pKernelBus->bar2[gfid].pFmt->pRoot, + RM_PAGE_SHIFT); + + // Setup walk user context. + userCtx.pGpu = pGpu; + userCtx.gfid = gfid; + + // Pre-reserve and init 4K tables through BAR0 window (bBootstrap) mode. + mmuWalkSetUserCtx(pWalk, &userCtx); + + if (pKernelBus->bar2[gfid].cpuVisibleLimit != 0) + { + status = mmuWalkReserveEntries(pWalk, pLevelFmt, pKernelBus->bar2[gfid].cpuVisibleBase, + pKernelBus->bar2[gfid].cpuVisibleLimit, NV_FALSE); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + status = mmuWalkSparsify(pWalk, pKernelBus->bar2[gfid].cpuVisibleBase, pKernelBus->bar2[gfid].cpuVisibleLimit, NV_TRUE); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + } + + if (pKernelBus->bar2[gfid].cpuInvisibleLimit != 0) + { + status = mmuWalkReserveEntries(pWalk, pLevelFmt, pKernelBus->bar2[gfid].cpuInvisibleBase, + pKernelBus->bar2[gfid].cpuInvisibleLimit, NV_FALSE); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + status = mmuWalkSparsify(pWalk, pKernelBus->bar2[gfid].cpuInvisibleBase, pKernelBus->bar2[gfid].cpuInvisibleLimit, NV_TRUE); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + } + + NV_PRINTF(LEVEL_INFO, "(BAR2 0x%llx, PDB 0x%llx): vaLimit = 0x%llx\n", + pKernelBus->bar2[gfid].instBlockBase, pKernelBus->bar2[gfid].pdeBase, + pKernelBus->bar2[gfid].vaLimit); + + if (NULL != pKernelBus->bar2[gfid].pPDEMemDescForBootstrap) + { + memdescSetPageSize(pKernelBus->bar2[gfid].pPDEMemDescForBootstrap, AT_GPU, + FERMI_SMALL_PAGESIZE); + pKernelBus->virtualBar2[gfid].pPDB = pKernelBus->bar2[gfid].pPDEMemDescForBootstrap; + } + else + { + memdescSetPageSize(pKernelBus->bar2[gfid].pPDEMemDesc, AT_GPU, + FERMI_SMALL_PAGESIZE); + pKernelBus->virtualBar2[gfid].pPDB = pKernelBus->bar2[gfid].pPDEMemDesc; + } + + // + // Setup a memdesc that covers all of BAR2's page levels. + // + // The following is based on _bar2WalkCBLevelAlloc(). + // + if (IS_GFID_PF(gfid)) + { + switch (pKernelBus->PDEBAR2Aperture) + { + default: + case ADDR_FBMEM: + if (pPageLevelsMemDesc != NULL) + { + memdescDescribe(pPageLevelsMemDesc, + pKernelBus->PDEBAR2Aperture, + pKernelBus->bar2[gfid].pdeBase, + allocSize); + } + break; + + case ADDR_SYSMEM: + // + // In SYSMEM, page level instances are allocated one at a time. It is + // not guaranteed that they are contiguous. Thus, SYSMEM page level + // instances are dynamically mapped-in via memmap as needed instead of + // having one static mapping. + // + pPageLevelsMemDesc = NULL; + break; + } + pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc = pPageLevelsMemDesc; + } + + kbusPatchBar2Pdb_HAL(pGpu, pKernelBus); + +cleanup: + + if (IS_GFID_VF(gfid) && (pKernelBus->virtualBar2[gfid].pPageLevels != NULL)) + { + kbusUnmapRmAperture_HAL(pGpu, + pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc, + &pKernelBus->virtualBar2[gfid].pPageLevels, NV_TRUE); + pKernelBus->virtualBar2[gfid].pPageLevels = NULL; + } + + if (pWalk != NULL) + { + mmuWalkSetUserCtx(pWalk, NULL); + } + + if (!kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus) && IS_GFID_PF(gfid)) + { + // Restore BAR0 window after BAR2 bootstrap + kbusRestoreBar0WindowAfterBar2Bootstrap_HAL(pGpu, pKernelBus, origVidOffset); + } + + if (status != NV_OK) + { + if (kbusTeardownBar2GpuVaSpace_HAL(pGpu, pKernelBus, gfid) != NV_OK) + { + DBG_BREAKPOINT(); + } + } + + if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus)) + { + pKernelBus->bar2[gfid].bBootstrap = NV_FALSE; + } + + return status; +} + +/*! + * @brief Destroy BAR2 GPU vaspace + * + * 1. Free BAR2 GPU vaspace page directories & tables. + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NV_OK on success. + */ +NV_STATUS +kbusTeardownBar2GpuVaSpace_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 gfid +) +{ + NV_STATUS status = NV_OK; + + // + // Nothing to be done in the guest in the paravirtualization case or if + // guest is running SRIOV heavy mode. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + if (NULL != pKernelBus->bar2[gfid].pWalk) + { + const MMU_FMT_LEVEL *pLevelFmt = NULL; + MMU_WALK_USER_CTX userCtx = {0}; + NvU64 origVidOffset = 0; + + pLevelFmt = mmuFmtFindLevelWithPageShift(pKernelBus->bar2[gfid].pFmt->pRoot, RM_PAGE_SHIFT); + + userCtx.pGpu = pGpu; + + mmuWalkSetUserCtx(pKernelBus->bar2[gfid].pWalk, &userCtx); + + if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus) || IS_GFID_VF(gfid)) + { + mmuWalkLevelInstancesForceFree(pKernelBus->bar2[gfid].pWalk); + } + else + { + status = kbusSetupBar0WindowBeforeBar2Bootstrap_HAL(pGpu, pKernelBus, &origVidOffset); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + if (pKernelBus->bar2[gfid].cpuVisibleLimit != 0) + { + status = mmuWalkUnmap(pKernelBus->bar2[gfid].pWalk, pKernelBus->bar2[gfid].cpuVisibleBase, pKernelBus->bar2[gfid].cpuVisibleLimit); + NV_ASSERT(NV_OK == status); + mmuWalkReleaseEntries(pKernelBus->bar2[gfid].pWalk, pLevelFmt, pKernelBus->bar2[gfid].cpuVisibleBase, pKernelBus->bar2[gfid].cpuVisibleLimit); + } + + if (pKernelBus->bar2[gfid].cpuInvisibleLimit != 0) + { + status = mmuWalkUnmap(pKernelBus->bar2[gfid].pWalk, pKernelBus->bar2[gfid].cpuInvisibleBase, pKernelBus->bar2[gfid].cpuInvisibleLimit); + NV_ASSERT(NV_OK == status); + mmuWalkReleaseEntries(pKernelBus->bar2[gfid].pWalk, pLevelFmt, pKernelBus->bar2[gfid].cpuInvisibleBase, pKernelBus->bar2[gfid].cpuInvisibleLimit); + } + + kbusRestoreBar0WindowAfterBar2Bootstrap_HAL(pGpu, pKernelBus, origVidOffset); + } + + mmuWalkSetUserCtx(pKernelBus->bar2[gfid].pWalk, NULL); + + mmuWalkDestroy(pKernelBus->bar2[gfid].pWalk); + pKernelBus->bar2[gfid].pWalk = NULL; + pKernelBus->bar2[gfid].pPDEMemDesc = NULL; + pKernelBus->bar2[gfid].pPDEMemDescForBootstrap = NULL; + pKernelBus->virtualBar2[gfid].pPTEMemDesc = NULL; + + // Free staging buffer + memdescFree(pKernelBus->bar2[gfid].pWalkStagingBuffer); + memdescDestroy(pKernelBus->bar2[gfid].pWalkStagingBuffer); + pKernelBus->bar2[gfid].pWalkStagingBuffer = NULL; + + if (IS_GFID_VF(gfid) && (pKernelBus->virtualBar2[gfid].pPageLevels != NULL)) + { + kbusUnmapRmAperture_HAL(pGpu, + pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc, + &pKernelBus->virtualBar2[gfid].pPageLevels, + NV_TRUE); + pKernelBus->virtualBar2[gfid].pPageLevels = NULL; + } + + // Free the overall page levels memdesc. + if (pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc != NULL) + { + memdescFree(pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc); + memdescDestroy(pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc); + pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc = NULL; + } + + if (IS_GSP_CLIENT(pGpu)) + { + // + // Normally virtualBar2.pPDB (which equals to the memDesc + // of BAR2 root directory) gets freed when BAR2 page table got + // destroyed. But in RM-offload, virtualBar2.pPDB in CPU-RM + // is patched to GSP-RM's address, thus it won't be freed when + // destroying BAR2 page table. So we need to explicitly free it + // at destruct time. + // + if (pKernelBus->virtualBar2[gfid].pPDB != NULL) + { + memdescFree(pKernelBus->virtualBar2[gfid].pPDB); + memdescDestroy(pKernelBus->virtualBar2[gfid].pPDB); + pKernelBus->virtualBar2[gfid].pPDB = NULL; + } + + // + // No more need for CPU-RM's page table, thus requesting GSP-RM to + // delete the PDE3[0] value from GSP-RM's page table (by wrinting 0 + // to GSP-RM's PDE3[0]. + // + NV_RM_RPC_UPDATE_BAR_PDE(pGpu, NV_RPC_UPDATE_PDE_BAR_2, 0, pKernelBus->bar2[gfid].pFmt->pRoot->virtAddrBitLo, status); + } + + if (IS_GFID_VF(gfid) && (pKernelBus->bar2[gfid].pInstBlkMemDesc != NULL)) + { + memdescFree(pKernelBus->bar2[gfid].pInstBlkMemDesc); + memdescDestroy(pKernelBus->bar2[gfid].pInstBlkMemDesc); + pKernelBus->bar2[gfid].pInstBlkMemDesc = NULL; + } + } + + return status; +} + +/*! + * @brief Setup BAR0 window for BAR2 setup + * + * We point the BAR0 window to the start of the BAR2 page directory + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[out] pOrigVidOffset Location to Save the original BAR0 window offset + * + * @returns NV_OK on success. + */ +NV_STATUS +kbusSetupBar0WindowBeforeBar2Bootstrap_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU64 *pOrigVidOffset +) +{ + NV_STATUS status = NV_OK; + + // Check that Bar2 Page Dir starts at or after bar0 window vid offset + if (ADDR_FBMEM == pKernelBus->PDEBAR2Aperture || + ADDR_FBMEM == pKernelBus->PTEBAR2Aperture) + { + // Right now, PDE needs to be in FBMEM for BAR0 window to work. + NV_ASSERT_OR_RETURN(ADDR_FBMEM == pKernelBus->PDEBAR2Aperture, NV_ERR_NOT_SUPPORTED); + + // Save original BAR0 window base (restored in cleanup). + *pOrigVidOffset = kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus); + + // Set BAR0 window base to memory region reserved for BAR2 page level instances. + status = kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, + pKernelBus->bar2[GPU_GFID_PF].pdeBase & ~0xffffULL); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + // Get BAR0 window offset to be used for BAR2 init. + pKernelBus->bar2[GPU_GFID_PF].bar2OffsetInBar0Window = + (pKernelBus->bar2[GPU_GFID_PF].pdeBase - kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus)) + + NV_PRAMIN_DATA008(0); + } + + pKernelBus->bar2[GPU_GFID_PF].bBootstrap = NV_TRUE; + + return NV_OK; +} + +/*! + * @brief Restore BAR0 window after BAR2 setup + * + * Restore the BAR0 window to the original offset + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] origVidOffset Location to restore the original BAR0 window offset + * + * @returns NV_OK on success. + */ +void +kbusRestoreBar0WindowAfterBar2Bootstrap_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU64 origVidOffset +) +{ + NV_ASSERT(pKernelBus->bar2[GPU_GFID_PF].bBootstrap); + pKernelBus->bar2[GPU_GFID_PF].bBootstrap = NV_FALSE; + + if (ADDR_FBMEM == pKernelBus->PDEBAR2Aperture || + ADDR_FBMEM == pKernelBus->PTEBAR2Aperture) + { + NV_STATUS status; + status = kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, origVidOffset); + NV_ASSERT(NV_OK == status); + pKernelBus->bar2[GPU_GFID_PF].bar2OffsetInBar0Window = 0; + } +} + +/*! + * Defines the data needed to iterate over the last level during map VA op. + * Note: Used only in the new VMM code path. + */ +struct MMU_MAP_ITERATOR +{ + /*! + * @copydoc GMMU_FMT + */ + const GMMU_FMT *pFmt; + + /*! + * Physical aperture of the pages. + */ + GMMU_APERTURE aperture; + + /*! + * Physical pages to map. Always points to 4K-sized pages. + */ + DMA_PAGE_ARRAY *pPageArray; + + /*! + * The index of pPageArray that needs to be mapped. + */ + NvU32 currIdx; + + /*! + * Physical address of the last page mapped. + */ + NvU64 physAddr; + + /*! + * Template used to initialize PTEs. Contains values that do not change + * across one map operation. + */ + GMMU_ENTRY_VALUE pteTemplate; + + /*! + * The PTE physical address field to use based on the PTE aperture. + */ + const GMMU_FIELD_ADDRESS *pAddrField; +}; + +static void +_busWalkCBMapNextEntries_UpdatePhysAddr +( + OBJGPU *pGpu, + GMMU_ENTRY_VALUE *pEntryValue, + MMU_MAP_ITERATOR *pIter, + const NvU64 pageSize +) +{ + // Update the PTE with the physical address. + if (pIter->currIdx < pIter->pPageArray->count) + { + pIter->physAddr = dmaPageArrayGetPhysAddr(pIter->pPageArray, + pIter->currIdx); + pIter->physAddr = NV_ALIGN_DOWN64(pIter->physAddr, pageSize); + } + else + { + // + // As BAR2 page tables are physically contiguous, physAddr can be + // incremented. + // + // Should not be the first page (currIdx == 0) being mapped. + // + NV_ASSERT_OR_RETURN_VOID((pIter->pPageArray->count == 1) && + (pIter->currIdx > 0)); + pIter->physAddr += pageSize; + } + + gmmuFieldSetAddress(pIter->pAddrField, + kgmmuEncodePhysAddr(GPU_GET_KERNEL_GMMU(pGpu), + pIter->aperture, pIter->physAddr, NVLINK_INVALID_FABRIC_ADDR), + pEntryValue->v8); + + // + // pPageArray deals in 4K-pages. Increment by the ratio of mapping page + // size to 4K. + // + pIter->currIdx += (NvU32)(pageSize / RM_PAGE_SIZE); +} + +/*! + * Implementation of @ref MmuWalkCBMapNextEntries for BAR2 + */ +static void +_kbusWalkCBMapNextEntries_RmAperture +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_MAP_TARGET *pTarget, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + NvU32 *pProgress +) +{ + OBJGPU *pGpu = pUserCtx->pGpu; + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + MMU_MAP_ITERATOR *pIter = pTarget->pIter; + const MMU_FMT_LEVEL *pLevelFmt = pTarget->pLevelFmt; + NvU8 *pMap = NULL; + void *pPriv = NULL; + MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR*)pLevelMem; + const NvU64 pageSize = mmuFmtLevelPageSize(pLevelFmt); + NV_STATUS status = NV_OK; + GMMU_ENTRY_VALUE entryValue; + NvU32 entryIndex; + NvU32 entryOffset; + NvU32 sizeInDWord = 0; + NvU64 entry = 0; + NvU32 gfid = pUserCtx->gfid; + + NV_PRINTF(LEVEL_INFO, "[GPU%u]: PA 0x%llX, Entries 0x%X-0x%X\n", + pUserCtx->pGpu->gpuInstance, + memdescGetPhysAddr(pMemDesc, AT_GPU, 0), entryIndexLo, + entryIndexHi); + + // + // Initialize the PTE with the template. The template contains the values + // that do not change across PTEs for this map operation. + // + portMemCopy(entryValue.v8, sizeof(pIter->pteTemplate), pIter->pteTemplate.v8, sizeof(pIter->pteTemplate)); + + if (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) + { + if (pKernelBus->virtualBar2[gfid].pPageLevels != NULL) + { + // + // Determine the start of the desired page level offset from + // CPU mapping to the start of the BAR2 VAS page levels. + // + if (pKernelBus->bar2[gfid].bMigrating) + { + // In the migration phase. HW is using the page tables at bottom of FB. + NV_ASSERT_OR_RETURN_VOID(NULL != pKernelBus->virtualBar2[gfid].pPageLevelsForBootstrap); + pMap = memdescGetPhysAddr(pMemDesc, AT_GPU, 0) - + pKernelBus->bar2[gfid].pdeBaseForBootstrap + + pKernelBus->virtualBar2[gfid].pPageLevelsForBootstrap; + } + else + { + // Migration is done. HW is using the page tables at top of FB. + pMap = memdescGetPhysAddr(pMemDesc, AT_GPU, 0) - + pKernelBus->bar2[gfid].pdeBase + + pKernelBus->virtualBar2[gfid].pPageLevels; + } + + for (entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++) + { + // Update the PTE with the physical address. + _busWalkCBMapNextEntries_UpdatePhysAddr(pGpu, + &entryValue, + pIter, + pageSize); + + entryOffset = entryIndex * pLevelFmt->entrySize; + + // Commit to memory. + portMemCopy(pMap + entryOffset, pLevelFmt->entrySize, entryValue.v8, pLevelFmt->entrySize); + } + } + else if (pKernelBus->bar2[gfid].bBootstrap) + { + + for ( entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++ ) + { + // Update the PTE with the physical address. + _busWalkCBMapNextEntries_UpdatePhysAddr(pGpu, + &entryValue, + pIter, + pageSize); + + entryOffset = entryIndex * pLevelFmt->entrySize; + + if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus)) + { + pMap = kbusCpuOffsetInBar2WindowGet(pGpu, pKernelBus, pMemDesc); + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + portMemCopy(pMap + entryOffset, + pLevelFmt->entrySize, entryValue.v8, + pLevelFmt->entrySize); + } + else + { + // Use BAR0 or nvlink if available + sizeInDWord = (NvU32)NV_CEIL(pLevelFmt->entrySize, sizeof(NvU32)); + NvU64 entryStart = memdescGetPhysAddr(pMemDesc, FORCE_VMMU_TRANSLATION(pMemDesc, AT_GPU), entryOffset); + NvU32 i; + NvU8 *pMapping = NULL; + + if (pKernelBus->coherentCpuMapping.bCoherentCpuMapping) + { + NV_ASSERT_OR_RETURN_VOID(pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING)); + pMapping = kbusMapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc); + NV_ASSERT_OR_RETURN_VOID(pMapping != NULL); + for (i = 0; i < sizeInDWord; i++) + { + MEM_WR32(pMapping + entryOffset + sizeof(NvU32)*i, + entryValue.v32[i]); + } + kbusUnmapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc); + } + else + { + for (i = 0; i < sizeInDWord; i++) + { + // BAR0 write. + status = kbusMemAccessBar0Window_HAL(pGpu, pKernelBus, + (entryStart + (sizeof(NvU32) * i)), + &entryValue.v32[i], + sizeof(NvU32), + NV_FALSE, + ADDR_FBMEM); + NV_ASSERT_OR_RETURN_VOID(NV_OK == status); + } + } + + entry = entryStart; + } + } + // + // Use PRAMIN flush to make sure that BAR0 writes has reached the memory + // + if (pKernelBus->bar2[gfid].bBootstrap && + !kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus) && + !RMCFG_FEATURE_PLATFORM_GSP) + { + NvU32 data = 0; + NvU32 i; + for (i = 0; i < sizeInDWord; i++) + { + NV_ASSERT_OR_RETURN_VOID(kbusMemAccessBar0Window_HAL(pGpu, pKernelBus, + (entry + (sizeof(NvU32) * i)), &data, sizeof(NvU32), + NV_TRUE, ADDR_FBMEM) == NV_OK); + } + } + } + else + { + // + // We are migrating and old page tables are currently active. So, PTE + // updates should be made in the old page tables at the bottom of FB. + // + NV_ASSERT_OR_RETURN_VOID(pKernelBus->bar2[gfid].bMigrating); + NV_ASSERT_OR_RETURN_VOID(NULL == pKernelBus->virtualBar2[gfid].pPageLevels); + NV_ASSERT_OR_RETURN_VOID(NULL != pKernelBus->virtualBar2[gfid].pPageLevelsForBootstrap); + + pMap = memdescGetPhysAddr(pMemDesc, AT_GPU, 0) - + pKernelBus->bar2[gfid].pdeBaseForBootstrap + + pKernelBus->virtualBar2[gfid].pPageLevelsForBootstrap; + + for (entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++) + { + // Update the PTE with the physical address. + _busWalkCBMapNextEntries_UpdatePhysAddr(pGpu, + &entryValue, + pIter, + pageSize); + + entryOffset = entryIndex * pLevelFmt->entrySize; + + // Commit to memory. + portMemCopy(pMap + entryOffset, pLevelFmt->entrySize, entryValue.v8, pLevelFmt->entrySize); + } + } + } + else + { + NV_ASSERT(memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM && + pKernelBus->virtualBar2[gfid].pPageLevels == NULL); + + // Plain old memmap. + status = memdescMapOld(pMemDesc, 0, + pMemDesc->Size, + NV_TRUE, // kernel, + NV_PROTECT_READ_WRITE, + (void **)&pMap, + &pPriv); + NV_ASSERT_OR_RETURN_VOID(NV_OK == status); + + for ( entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++ ) + { + // Update the PTE with the physical address. + _busWalkCBMapNextEntries_UpdatePhysAddr(pGpu, + &entryValue, + pIter, + pageSize); + + entryOffset = entryIndex * pLevelFmt->entrySize; + + // Memory-mapped write. + portMemCopy(pMap + entryOffset, + pLevelFmt->entrySize, + entryValue.v8, + pLevelFmt->entrySize); + } + + memdescUnmapOld(pMemDesc, 1, 0, pMap, pPriv); + } + + *pProgress = entryIndexHi - entryIndexLo + 1; +} + +/*! + * @brief Third level of RmAperture support. This routine writes BAR2 PTEs. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pMemDesc The memory area to copy from. + * @param[in] vaddr Offset into bar2 to program + * @param[in] vaSize Amount of VA to write (can be greater than pMemDesc size) + * @param[in] flags Defined by UPDATE_RM_APERTURE_FLAGS_* + * + * @returns NV_OK on success, failure in some bootstrapping cases. + */ +NV_STATUS +kbusUpdateRmAperture_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + PMEMORY_DESCRIPTOR pMemDesc, + NvU64 vaddr, + NvU64 vaSize, + NvU32 flags +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + PMEMORY_DESCRIPTOR pSubDevMemDesc; + NV_STATUS status = NV_OK; + NvBool bInvalidate = !!(flags & UPDATE_RM_APERTURE_FLAGS_INVALIDATE); + NvBool bDiscard = !!(flags & UPDATE_RM_APERTURE_FLAGS_DISCARD); + NvBool bSparsify = !!(flags & UPDATE_RM_APERTURE_FLAGS_SPARSIFY); + MMU_MAP_TARGET mapTarget = {0}; + MMU_MAP_ITERATOR mapIter = {0}; + MMU_WALK_USER_CTX userCtx = {0}; + DMA_PAGE_ARRAY pageArray; + NvU64 origVidOffset = 0; + NvU64 vaLo; + NvU64 vaHi; + NvU32 gfid; + const NvU32 pageSize = FERMI_SMALL_PAGESIZE; + const GMMU_FMT *pFmt; + ADDRESS_TRANSLATION addressTranslation; + NvBool bCallingContextPlugin; + + // + // In case of SR-IOV heavy, host RM must update VF BAR2 page tables + // only for CPU invisible range. VF BAR2's CPU visible range is not + // in use on host RM. + // + if (!(flags & UPDATE_RM_APERTURE_FLAGS_CPU_INVISIBLE_RANGE)) + { + gfid = GPU_GFID_PF; + } + else + { + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + NV_ASSERT_OK_OR_RETURN(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin)); + if (bCallingContextPlugin) + { + gfid = GPU_GFID_PF; + } + } + + pFmt = pKernelBus->bar2[gfid].pFmt; + + // Math below requires page-sized va. + if (vaSize == 0 || vaSize & RM_PAGE_MASK) + { + NV_PRINTF(LEVEL_ERROR, "unsupported VA size (0x%llx)\n", vaSize); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + // Currently don't do anything at unmap. + if (bDiscard && !bSparsify) + return NV_OK; + + vaLo = NV_ALIGN_DOWN64(vaddr, pageSize); + vaHi = NV_ALIGN_UP64(vaddr + vaSize, pageSize) - 1; + pSubDevMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + // + // In case of SR-IOV heavy, host RM updates VF BAR2, so + // if the update is for VF BAR2 (IS_GFID_PF(gfid) is false), + // use GPA, otherwise use SPA. + // + if (IS_GFID_PF(gfid)) + { + addressTranslation = FORCE_VMMU_TRANSLATION(pSubDevMemDesc, AT_GPU); + } + else + { + addressTranslation = AT_GPU; + } + + dmaPageArrayInitFromMemDesc(&pageArray, pSubDevMemDesc, addressTranslation); + userCtx.pGpu = pGpu; + userCtx.gfid = gfid; + mmuWalkSetUserCtx(pKernelBus->bar2[gfid].pWalk, &userCtx); + + if (bSparsify) + { + NV_PRINTF(LEVEL_INFO, + "mmuWalkSparsify pwalk=%p, vaLo=%llx, vaHi = %llx\n", + pKernelBus->bar2[gfid].pWalk, vaLo, vaHi); + + status = mmuWalkSparsify(pKernelBus->bar2[gfid].pWalk, vaLo, vaHi, NV_FALSE); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "mmuWalkSparsify status=%x pwalk=%p, vaLo=%llx, vaHi = %llx\n", + status, pKernelBus->bar2[gfid].pWalk, vaLo, vaHi); + } + } + else + { + // MMU_MAP_CTX + mapTarget.pLevelFmt = mmuFmtFindLevelWithPageShift(pFmt->pRoot, + BIT_IDX_32(pageSize)); + mapTarget.pIter = &mapIter; + mapTarget.MapNextEntries = _kbusWalkCBMapNextEntries_RmAperture; + + // MMU_MAP_ITER + mapIter.pFmt = pFmt; + mapIter.aperture = kgmmuGetMemAperture(pKernelGmmu, pMemDesc); + mapIter.pPageArray = &pageArray; + + // + // Setup a template PTE with those values that will not change across + // PTEs during mapping. + // + nvFieldSetBool(&pFmt->pPte->fldValid, NV_TRUE, mapIter.pteTemplate.v8); + { + nvFieldSetBool(&pFmt->pPte->fldVolatile, memdescGetVolatility(pMemDesc), mapIter.pteTemplate.v8); + } + + gmmuFieldSetAperture(&pFmt->pPte->fldAperture, + mapIter.aperture, + mapIter.pteTemplate.v8); + + // + // Determine the PTE physical address field to use based on the PTE + // aperture. Physical addresses themselves will get added to the PTE + // during mapping. + // + mapIter.pAddrField = + gmmuFmtPtePhysAddrFld(pFmt->pPte, + gmmuFieldGetAperture( + &pFmt->pPte->fldAperture, + mapIter.pteTemplate.v8)); + + + // Write PTE kind. + nvFieldSet32(&pFmt->pPte->fldKind, memdescGetPteKind(pMemDesc), + mapIter.pteTemplate.v8); + + // + // We haven't yet self-mapped the BAR2 page tables. + // This call is to do the same. + // So keep BAR2 in bootstrap mode to allow BAR0 window updates. + // + if ((ADDR_FBMEM == pKernelBus->PDEBAR2Aperture || + ADDR_FBMEM == pKernelBus->PTEBAR2Aperture) && + !kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus) && + pKernelBus->virtualBar2[gfid].pPageLevels == NULL && IS_GFID_PF(gfid)) + { + status = kbusSetupBar0WindowBeforeBar2Bootstrap_HAL(pGpu, pKernelBus, &origVidOffset); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + } + status = mmuWalkMap(pKernelBus->bar2[gfid].pWalk, vaLo, vaHi, &mapTarget); + NV_ASSERT(NV_OK == status); + } + + mmuWalkSetUserCtx(pKernelBus->bar2[gfid].pWalk, NULL); + + if (pKernelBus->bar2[gfid].bBootstrap && + !kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus)) + { + kbusRestoreBar0WindowAfterBar2Bootstrap_HAL(pGpu, pKernelBus, origVidOffset); + } + + // + // Synchronize BAR2 address space to memory and then invalidate TLB + // to invalidate any cached PTEs. + // + if (bInvalidate) + { + osFlushCpuWriteCombineBuffer(); + + // PCIE_READ kbusFlush is more efficient and preferred. When not ready, use kbusSendSysmembar(). + if (pKernelBus->pReadToFlush != NULL) + { + NvU32 flushFlag = BUS_FLUSH_USE_PCIE_READ | + kbusGetFlushAperture(pKernelBus, + memdescGetAddressSpace(pKernelBus->virtualBar2[gfid].pPTEMemDesc)); + kbusFlush_HAL(pGpu, pKernelBus, flushFlag); + } + else + { + kbusSendSysmembar(pGpu, pKernelBus); + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + kgmmuInvalidateTlb_HAL(pGpu, pKernelGmmu, + pKernelBus->virtualBar2[gfid].pPDB, + pKernelBus->virtualBar2[gfid].flags, + PTE_DOWNGRADE, 0, + NV_GMMU_INVAL_SCOPE_NON_LINK_TLBS); + SLI_LOOP_END + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + } + + return status; +} + +/** + * @brief This function is used to return the BAR1 VA space. + * BAR1 VA space per-GPU, no longer shared + */ +OBJVASPACE *kbusGetBar1VASpace_GM107(OBJGPU *pGpu, KernelBus *pKernelBus) +{ + NvU32 gfid; + NvBool bCallingContextPlugin; + + NV_ASSERT_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK, NULL); + NV_ASSERT_OR_RETURN(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin) == NV_OK, NULL); + if (bCallingContextPlugin || !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + gfid = GPU_GFID_PF; + } + + return pKernelBus->bar1[gfid].pVAS; +} + +NV_STATUS +kbusMapFbAperture_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 offset, + NvU64 *pAperOffset, + NvU64 *pLength, + NvU32 flags, + NvHandle hClient +) +{ + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + OBJVASPACE *pVAS; + NV_STATUS rmStatus = NV_OK; + NV_STATUS failStatus = NV_OK; + OBJGPU *pLoopGpu = NULL; + NvU64 newAperOffset = 0; + // Track which gpus have mapped so we can free in case of error + NvU32 gpuMappingSuccessMask = 0; + + NV_ASSERT((flags & BUS_MAP_FB_FLAGS_FERMI_INVALID) == 0); + + pVAS = kbusGetBar1VASpace_HAL(pGpu, pKernelBus); + + // Set BC to enabled in UC flag not passed + if ((IsSLIEnabled(pGpu) && ((flags & BUS_MAP_FB_FLAGS_MAP_UNICAST) == 0)) && + ((flags & BUS_MAP_FB_FLAGS_PRE_INIT) == 0)) + { + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + flags |= BUS_MAP_FB_FLAGS_MAP_UNICAST; + } + else + { + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + } + + // Call _kbusMapAperture_GM107 multiple times in UC for BC mapping + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + pLoopGpu = pGpu; + + pVAS = kbusGetBar1VASpace_HAL(pGpu, pKernelBus); + rmStatus = _kbusMapAperture_GM107(pGpu, pMemDesc, + pVAS, offset, pAperOffset, + pLength, flags, hClient); + + // + // Ensure that all returned VA offsets are the same on each GPU + // The _OFFSET_FIXED flag ensures this is true unless one GPU has + // no free extent starting at the bar1 vAddr mapped by the parent + // GPU. + // + // This can and should be updated later to enable multiple Bar1 vAddr + // returns. The client functions must then be updated to handle + // multiple returns, and the OFFSET_FIXED flag can be removed from here + // and /resman/kernel/inc/gpu/bus/kern_bus.h. + // + if (gpuMappingSuccessMask == 0) + { + newAperOffset = *pAperOffset; + flags |= BUS_MAP_FB_FLAGS_MAP_OFFSET_FIXED; + } + else + { + NV_ASSERT(newAperOffset == *pAperOffset); + } + + if (rmStatus != NV_OK) + { + SLI_LOOP_BREAK; + } + gpuMappingSuccessMask |= pGpu->gpuInstance; + } + SLI_LOOP_END + + gpumgrSetBcEnabledStatus(pGpu, bBcState); + + if (rmStatus == NV_OK) + { + return rmStatus; + } + + NV_PRINTF(LEVEL_ERROR, + "Failed: [GPU%u] Could not map pAperOffset: 0x%llx\n", + pLoopGpu->gpuInstance, newAperOffset); + + // Unmap mapped addresses after BC mapping failure in SLI + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE) + { + if ((NVBIT(pGpu->gpuInstance) & gpuMappingSuccessMask) == 0) + { + SLI_LOOP_CONTINUE; + } + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + failStatus = kbusUnmapFbAperture_HAL(pGpu, pKernelBus, + pMemDesc, newAperOffset, + *pLength, + BUS_MAP_FB_FLAGS_MAP_UNICAST); + // Failure to unmap mapped address + if (failStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "[GPU%u] Could not unmap on failure to map Bar1\n", + pGpu->gpuInstance); + } + } + SLI_LOOP_END + + return rmStatus; +} + +NV_STATUS +kbusUnmapFbAperture_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 aperOffset, + NvU64 length, + NvU32 flags +) +{ + NV_STATUS rmStatus = NV_OK; + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + OBJVASPACE *pVAS = NULL; + OBJGPU *pLoopGpu = NULL; + + NV_ASSERT(pMemDesc); + + aperOffset &= ~RM_PAGE_MASK; + + // Set BC to enabled if UC flag not passed + if ((IsSLIEnabled(pGpu) && ((flags & BUS_MAP_FB_FLAGS_MAP_UNICAST) == 0)) && + ((flags & BUS_MAP_FB_FLAGS_PRE_INIT) == 0)) + { + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + } + else + { + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + } + + // Call _kbusUnmapAperture_GM107 in UC for each GPU when BC is called + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + pLoopGpu = pGpu; + pVAS = kbusGetBar1VASpace_HAL(pGpu, pKernelBus); + + if (pVAS == NULL) + { + rmStatus = NV_ERR_GENERIC; + SLI_LOOP_BREAK; + } + memdescFlushCpuCaches(pGpu, pMemDesc); + rmStatus = _kbusUnmapAperture_GM107(pGpu, pVAS, pMemDesc, aperOffset); + + if (rmStatus != NV_OK) + { + SLI_LOOP_BREAK; + } + } + SLI_LOOP_END + + if (rmStatus == NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "unmapped BAR1 offset 0x%llx\n", + aperOffset); + } + else + { + NV_PRINTF(LEVEL_ERROR, "[GPU%u] Unable to unmap aperOffset: 0x%llx\n", + pLoopGpu->gpuInstance, aperOffset); + } + + gpumgrSetBcEnabledStatus(pGpu, bBcState); + + return rmStatus; +} + +/*! + * @brief Lower level FB flush to push pending writes to FB/sysmem + * + * NOTE: Must be called inside a SLI loop + * + * @param[in] pGpu + * @param[in] KernelBus + * @param[in] flags Flags to indicate aperture and other behaviors + * @return NV_OK on success + * + */ +NV_STATUS +kbusFlushSingle_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 flags +) +{ + NvBool bCoherentCpuMapping = pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING); + + // + // Nothing to be done in the guest in the paravirtualization case or + // if guest is running in SRIOV heavy mode. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + if (bCoherentCpuMapping) + { + // + // This function issues an HWSYNC. This is needed for synchronizing read/writes + // with NVLINK mappings. + // + portAtomicMemoryFenceFull(); + return NV_OK; + } + + if (flags & BUS_FLUSH_SYSTEM_MEMORY) + { + portAtomicMemoryFenceFull(); + } + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu) || API_GPU_IN_RECOVERY_SANITY_CHECK(pGpu) || + !API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + // + // When the GPU is in full chip reset or lost + // We cannot expect to flush successfully so early return here + // + return NV_OK; + } + + if (flags & BUS_FLUSH_VIDEO_MEMORY) + { + // + // Read the FB address 0 in order to trigger a flush. + // This will not work with reflected mappings so only enable on VOLTA+ + // Note SRIOV guest does not have access to uflush register. + // + // TODO: remove the BUS_FLUSH_USE_PCIE_READ flag from RM and do this + // everywhere since it's faster than uflush. + // + if (IS_VIRTUAL(pGpu) || + (kbusIsReadCpuPointerToFlushEnabled(pKernelBus) && + (flags & BUS_FLUSH_USE_PCIE_READ))) + { + volatile NvU32 data; + NV_ASSERT(pKernelBus->pReadToFlush != NULL || pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping != NULL); + + if (pKernelBus->pReadToFlush != NULL) + { + data = MEM_RD32(pKernelBus->pReadToFlush); + } + else if (pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping != NULL) + { + // + // pReadToFlush is still not ready for use. So, use pCpuMapping + // instead which should already be mapped to FB addr 0 as + // BAR2 is in physical mode right now. + // + data = MEM_RD32(pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping); + } + (void) data; + return NV_OK; + } + else + { + if (IS_GSP_CLIENT(pGpu)) + { + // + // on GSP client, we only support PCIE_READ to do flush + // a sysmembar flush should call kbusSendSysmembarSingle_HAL explicitly + // + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_PATH); + } + else + { + return kbusSendSysmembarSingle_HAL(pGpu, pKernelBus); + } + } + } + + return NV_OK; +} + +/*! + * @brief Properly flush written PDEs, PTEs, or other + * instance memory data or context buffers. See bug 152868 + * + * NOTE: Must call kbusFlush BEFORE any calls to busInvalidate + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] flags NvU32 flags to indicate flush behavior + * + */ +NV_STATUS +kbusFlush_GM107(OBJGPU *pGpu, KernelBus *pKernelBus, NvU32 flags) +{ + NV_STATUS status = NV_OK; + + // Nothing to be done in guest in the paravirtualization case. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + return NV_OK; + } + + if (kbusIsFbFlushDisabled(pKernelBus)) + { + // Eliminate FB flushes, but keep mmu invalidates + NV_PRINTF(LEVEL_INFO, "disable_fb_flush flag, skipping flush.\n"); + return status; + } + + // Wait for the flush to flow through + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY); + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + if (kbusFlushSingle_HAL(pGpu, pKernelBus, flags) == NV_ERR_TIMEOUT) + { + status = NV_ERR_TIMEOUT; + } + SLI_LOOP_END; + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + return status; +} + +// +// _kbusMapAperture_GM107 +// Helper function: Given offset and range, alloc VA address space and update it. +// +NV_STATUS +_kbusMapAperture_GM107 +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + OBJVASPACE *pVAS, + NvU64 offset, + NvU64 *pAperOffset, + NvU64 *pLength, + NvU32 mapFlags, + NvHandle hClient +) +{ + NV_STATUS rmStatus = NV_ERR_GENERIC; + VirtMemAllocator *pDma; + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + NvU32 flags = DRF_DEF(OS46, _FLAGS, _DMA_UNICAST_REUSE_ALLOC, _FALSE); + MEMORY_DESCRIPTOR *pTempMemDesc; + NvU32 swizzId = KMIGMGR_SWIZZID_INVALID; + + // Ensure that the BAR1 VA space is the same across all subdevices + if (IsSLIEnabled(pGpu) && ((mapFlags & BUS_MAP_FB_FLAGS_MAP_UNICAST) == 0)) + { + pGpu = gpumgrGetParentGPU(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + } + + if (mapFlags & BUS_MAP_FB_FLAGS_MAP_OFFSET_FIXED) + { + flags = FLD_SET_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _TRUE, flags); + } + + pDma = GPU_GET_DMA(pGpu); + + // + // Valid client handle should be associated with a BAR1 mapping request if SMC memory + // partitioning is enabled. That's because BAR1 VA space is split among SMC partitions. + // + // Internal allocations like RM allocated USERD which require BAR1 mapping are done during RM init + // before SMC is enabled and BAR1 VA space is split. So they should work despite not having + // an associated hClient and also such BAR VA space allocations should happen before BAR1 is split. + // + if (IS_MIG_IN_USE(pGpu)) + { + MIG_INSTANCE_REF ref; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + NV_ASSERT_OR_RETURN(hClient != NV01_NULL_OBJECT, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OK_OR_RETURN(kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, &ref)); + swizzId = ref.pKernelMIGGpuInstance->swizzId; + } + + if (memdescGetCpuCacheAttrib(pMemDesc) == NV_MEMORY_CACHED) + { + flags = FLD_SET_DRF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE, flags); + } + + if (mapFlags & BUS_MAP_FB_FLAGS_MAP_DOWNWARDS) + { + flags = FLD_SET_DRF(OS46, _FLAGS, _DMA_OFFSET_GROWS, _DOWN, flags); + } + + // Disable the encryption if DIRECT mapping is requested, currently it is just for testing purpose + if (mapFlags & BUS_MAP_FB_FLAGS_DISABLE_ENCRYPTION) + { + // !!!! Nasty hack + // + // NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP used to convey the encryption info to dmaAllocMapping_HAL(). + // Since we have no bit fields left in NVOS46_FLAGS_* to specify encryption info. + // This is applicable to FERMI+ chips. + // + // NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP is _NV50 specific, and is not used in FERMI+. + // NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_DEFAULT means use default encryption status + // NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_1 means disable encryption + flags = FLD_SET_DRF(OS46, _FLAGS, _PTE_COALESCE_LEVEL_CAP, _1, flags); + } + + NV_ASSERT(!((mapFlags & BUS_MAP_FB_FLAGS_READ_ONLY) && + (mapFlags & BUS_MAP_FB_FLAGS_WRITE_ONLY))); + if (mapFlags & BUS_MAP_FB_FLAGS_READ_ONLY) + { + flags = FLD_SET_DRF(OS46, _FLAGS, _ACCESS, _READ_ONLY, flags); + } + else if (mapFlags & BUS_MAP_FB_FLAGS_WRITE_ONLY) + { + flags = FLD_SET_DRF(OS46, _FLAGS, _ACCESS, _WRITE_ONLY, flags); + } + + rmStatus = memdescCreateSubMem(&pTempMemDesc, pMemDesc, pGpu, offset, *pLength); + if (NV_OK == rmStatus) + { + rmStatus = dmaAllocMapping_HAL(pGpu, pDma, pVAS, pTempMemDesc, pAperOffset, flags, NULL, swizzId); + memdescFree(pTempMemDesc); + memdescDestroy(pTempMemDesc); + } + + gpumgrSetBcEnabledStatus(pGpu, bBcState); + + return rmStatus; +} + +// +// _kbusUnmapAperture_GM107 +// Helper function: Given offset and range, free VA address space. +// +NV_STATUS +_kbusUnmapAperture_GM107 +( + OBJGPU *pGpu, + OBJVASPACE *pVAS, + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 aperOffset +) +{ + NV_STATUS rmStatus = NV_OK; + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + + rmStatus = dmaFreeMapping_HAL(pGpu, pDma, pVAS, aperOffset, pMemDesc, 0, NULL); + + return rmStatus; +} + +NV_STATUS +_kbusInitP2P_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBusUnused +) +{ + NV_STATUS status = NV_OK; + KernelBus *pLocalKernelBus; + KernelBus *pRemoteKernelBus; + + NvU32 deviceInstance, gpuMask; + OBJGPU *pLocalGpu, *pRemoteGpu; + NvU32 localGpuInstance, remoteGpuInstance; + NvU32 localPeerIndex, remotePeerIndex, localPeerCount, remotePeerCount; + NvU32 numSubdevices; + + deviceInstance = gpuGetDeviceInstance(pGpu); + gpuMask = gpumgrGetDeviceGpuMask(deviceInstance); + numSubdevices = gpumgrGetSubDeviceCount(gpuMask); + + if ((numSubdevices < 1) || (numSubdevices > P2P_MAX_NUM_PEERS)) + { + NV_PRINTF(LEVEL_ERROR, + "Fermi only supports P2P with up to 8 subdevices in SLI configuration.\n"); + return NV_ERR_GENERIC; + } + + // Link all the GPUs. + localGpuInstance = 0; + localPeerIndex = 0; + localPeerCount = 0; + + while ((pLocalGpu = gpumgrGetNextGpu(gpuMask, &localGpuInstance)) != NULL) + { + pLocalKernelBus = GPU_GET_KERNEL_BUS(pLocalGpu); + + remoteGpuInstance = localGpuInstance; + remotePeerIndex = localPeerIndex + 1; + remotePeerCount = 0; + + while ((pRemoteGpu = gpumgrGetNextGpu(gpuMask, &remoteGpuInstance)) != NULL) + { + NvU32 locPeerId; + NvU32 remPeerId; + + NV_ASSERT(localPeerIndex != remotePeerIndex); + NV_ASSERT((localPeerCount < P2P_MAX_NUM_PEERS) && + (remotePeerCount < P2P_MAX_NUM_PEERS)); + + pRemoteKernelBus = GPU_GET_KERNEL_BUS(pRemoteGpu); + + locPeerId = kbusGetPeerIdFromTable_HAL(pLocalGpu, pLocalKernelBus, + localPeerIndex, remotePeerIndex); + remPeerId = kbusGetPeerIdFromTable_HAL(pRemoteGpu, pRemoteKernelBus, + remotePeerIndex, localPeerIndex); + + NV_ASSERT((locPeerId < P2P_MAX_NUM_PEERS) && + (remPeerId < P2P_MAX_NUM_PEERS)); + + pLocalKernelBus->p2pPcie.peerNumberMask[pRemoteGpu->gpuInstance] |= + NVBIT(locPeerId); + pRemoteKernelBus->p2pPcie.peerNumberMask[pLocalGpu->gpuInstance] |= + NVBIT(remPeerId); + + pLocalKernelBus->p2pPcie.busPeer[locPeerId].refCount++; + pLocalKernelBus->p2pPcie.busPeer[locPeerId].remotePeerId = remPeerId; + pRemoteKernelBus->p2pPcie.busPeer[remPeerId].refCount++; + pRemoteKernelBus->p2pPcie.busPeer[remPeerId].remotePeerId = locPeerId; + + remotePeerIndex++; + remotePeerCount++; + } + + pLocalKernelBus->bP2pInitialized = NV_TRUE; + + localPeerIndex++; + localPeerCount++; + } + + return status; +} + +NV_STATUS +_kbusDestroyP2P_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NV_STATUS status = NV_OK; + + OBJGPU *pRemoteGpu; + KernelBus *pRemoteKernelBus; + NvU32 i; + + + // Clear all peer numbers. + for (i = 0; i < NV_MAX_DEVICES; i++) + { + if (pKernelBus->p2pPcie.peerNumberMask[i] != 0) + { + NvU32 locPeerId, remPeerId, gpuInst; + + pRemoteGpu = gpumgrGetGpu(i); + NV_ASSERT_OR_RETURN(pRemoteGpu != NULL, NV_ERR_INVALID_STATE); + pRemoteKernelBus = GPU_GET_KERNEL_BUS(pRemoteGpu); + locPeerId = kbusGetPeerId_HAL(pGpu, pKernelBus, pRemoteGpu); + remPeerId = kbusGetPeerId_HAL(pRemoteGpu, pRemoteKernelBus, pGpu); + + NV_ASSERT_OR_RETURN(locPeerId < P2P_MAX_NUM_PEERS, + NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(remPeerId < P2P_MAX_NUM_PEERS, + NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pRemoteKernelBus->p2pPcie.busPeer[remPeerId].remotePeerId == locPeerId, + NV_ERR_INVALID_STATE); + + pKernelBus->p2pPcie.busPeer[locPeerId].refCount--; + pRemoteKernelBus->p2pPcie.busPeer[remPeerId].refCount--; + + gpuInst = gpuGetInstance(pGpu); + pKernelBus->p2pPcie.peerNumberMask[i] &= ~NVBIT(locPeerId); + pRemoteKernelBus->p2pPcie.peerNumberMask[gpuInst] &= ~NVBIT(remPeerId); + + // That should have been the only peer ID associated with the remote + NV_ASSERT(pKernelBus->p2pPcie.peerNumberMask[i] == 0); + NV_ASSERT(pRemoteKernelBus->p2pPcie.peerNumberMask[gpuInst] == 0); + } + + // Clear NVlink related data structures as well. + if (kbusGetNvlinkPeerNumberMask_HAL(pGpu, pKernelBus, i) != 0) + { + NvU32 locPeerId, remPeerId, gpuInst; + + pRemoteGpu = gpumgrGetGpu(i); + NV_ASSERT_OR_RETURN(pRemoteGpu != NULL, NV_ERR_INVALID_STATE); + pRemoteKernelBus = GPU_GET_KERNEL_BUS(pRemoteGpu); + locPeerId = kbusGetPeerId_HAL(pGpu, pKernelBus, pRemoteGpu); + remPeerId = kbusGetPeerId_HAL(pRemoteGpu, pRemoteKernelBus, pGpu); + gpuInst = gpuGetInstance(pGpu); + + NV_ASSERT_OR_RETURN(locPeerId < P2P_MAX_NUM_PEERS, + NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(remPeerId < P2P_MAX_NUM_PEERS, + NV_ERR_INVALID_STATE); + + pKernelBus->p2p.busNvlinkMappingRefcountPerGpu[i]--; + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerGpu[gpuInst]--; + pKernelBus->p2p.busNvlinkPeerNumberMask[i] &= ~NVBIT(locPeerId); + pRemoteKernelBus->p2p.busNvlinkPeerNumberMask[gpuInst] &= ~NVBIT(remPeerId); + pKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[locPeerId]--; + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[remPeerId]--; + } + } + + for (i = 0; i < P2P_MAX_NUM_PEERS; ++i) + { + if (pKernelBus->p2pPcie.busPeer[i].refCount) + { + NV_PRINTF(LEVEL_ERROR, + "non-zero peer refcount(%d) on GPU 0x%x peer %d\n", + pKernelBus->p2pPcie.busPeer[i].refCount, pGpu->gpuInstance, i); + } + pKernelBus->p2pPcie.busPeer[i].refCount = 0; + } + + pKernelBus->bP2pInitialized = NV_FALSE; + + return status; +} + + +// +// Link P2P for all GPUs +// +void +_kbusLinkP2P_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + OBJGPU *pRemoteGpu; + NV_STATUS status; + NvU32 i; + + for ( i = 0; i < NV_MAX_DEVICES; ++i) + { + if ((pKernelBus->p2pPcie.peerNumberMask[i] != 0) || + (kbusGetNvlinkPeerNumberMask_HAL(pGpu, pKernelBus, i) != 0)) + { + pRemoteGpu = gpumgrGetGpu(i); + NV_ASSERT(pRemoteGpu != NULL); + + // + // If there is a loopback mapping pRemoteGpu will return !fullPower + // since we are currently in the process of resuming it. + // Therefore, we special case it and restore the mapping anyways. + // + if (gpuIsGpuFullPower(pRemoteGpu) || + pRemoteGpu == pGpu) + { + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + KernelNvlink *pRemoteKernelNvlink = GPU_GET_KERNEL_NVLINK(pRemoteGpu); + NvU32 locPeerId = kbusGetPeerId_HAL(pGpu, pKernelBus, pRemoteGpu); + NvU32 remPeerId = kbusGetPeerId_HAL(pRemoteGpu, GPU_GET_KERNEL_BUS(pRemoteGpu), pGpu); + + NV_ASSERT(locPeerId < P2P_MAX_NUM_PEERS); + NV_ASSERT(remPeerId < P2P_MAX_NUM_PEERS); + NV_ASSERT(pKernelBus->p2pPcie.busPeer[locPeerId].remotePeerId == remPeerId); + + if ((pKernelNvlink != NULL) && (pRemoteKernelNvlink != NULL) && + (knvlinkGetP2pConnectionStatus(pGpu, pKernelNvlink, pRemoteGpu) == NV_OK)) + { + // + // These variables should only be updated for RM Managed P2P. + // And only once during RmInit, not during resume as while + // going to S3/S4, these variables are not cleared. + // + if (!kbusIsP2pMailboxClientAllocated(pKernelBus) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH)) + { + KernelBus *pRemoteKernelBus = GPU_GET_KERNEL_BUS(pRemoteGpu); + + pKernelBus->p2p.busNvlinkPeerNumberMask[pRemoteGpu->gpuInstance] |= + NVBIT(locPeerId); + pRemoteKernelBus->p2p.busNvlinkPeerNumberMask[pGpu->gpuInstance] |= + NVBIT(remPeerId); + pKernelBus->p2p.busNvlinkMappingRefcountPerGpu[pRemoteGpu->gpuInstance]++; + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerGpu[pGpu->gpuInstance]++; + pKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[locPeerId]++; + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[remPeerId]++; + } + + // Train the links to ACTIVE + if ((knvlinkTrainP2pLinksToActive(pGpu, pRemoteGpu, pKernelNvlink)) != NV_OK) + { + NV_ASSERT(0); + } + + // Use NVLINK if available + knvlinkSetupPeerMapping_HAL(pGpu, pKernelNvlink, pRemoteGpu, locPeerId); + knvlinkSetupPeerMapping_HAL(pRemoteGpu, pRemoteKernelNvlink, pGpu, remPeerId); + } + else + { + RM_API *pRmApi; + NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS params; + + // + // Fall back to PCIe otherwise + // We only expect one PCIE peer ID per remote GPU for SLI + // + NV_ASSERT(nvPopCount32(pKernelBus->p2pPcie.peerNumberMask[i]) == 1); + + kbusSetupMailboxes_HAL(pGpu, pKernelBus, + pRemoteGpu, GPU_GET_KERNEL_BUS(pRemoteGpu), + locPeerId, remPeerId); + kbusSetupMailboxes_HAL(pRemoteGpu, GPU_GET_KERNEL_BUS(pRemoteGpu), + pGpu, pKernelBus, + remPeerId, locPeerId); + // Program the registers + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + portMemSet(¶ms, 0, sizeof(params)); + params.programPciePeerMask = NVBIT32(locPeerId); + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG, + ¶ms, + sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error in programming the local PEER_CONNECTION_CFG registers\n"); + } + pRmApi = GPU_GET_PHYSICAL_RMAPI(pRemoteGpu); + portMemSet(¶ms, 0, sizeof(params)); + params.programPciePeerMask = NVBIT32(remPeerId); + status = pRmApi->Control(pRmApi, + pRemoteGpu->hInternalClient, + pRemoteGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG, + ¶ms, + sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error in programming the remote PEER_CONNECTION_CFG registers\n"); + } + } + } + } + } +} + +static NV_STATUS +kbusSendMemsysDisableNvlinkPeers +( + OBJGPU *pGpu +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + return NV_OK; + + return pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMSYS_DISABLE_NVLINK_PEERS, + NULL, 0); +} + +// +// Unlink P2P for all GPUs +// +void +kbusUnlinkP2P_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + KernelBus *pRemoteKernelBus; + OBJGPU *pRemoteGpu; + NvU32 i; + + for ( i = 0; i < NV_MAX_DEVICES; ++i) + { + if ((pKernelBus->p2pPcie.peerNumberMask[i] != 0) || + (kbusGetNvlinkPeerNumberMask_HAL(pGpu, pKernelBus, i) != 0)) + { + pRemoteGpu = gpumgrGetGpu(i); + if (pRemoteGpu == NULL) + { + // + // There is a P2P mapping involving an unloaded GPU + // Has NV50_P2P been properly freed ? + // + NV_PRINTF(LEVEL_ERROR, "There is a P2P mapping involving an unloaded GPU\n"); + continue; + } + + pRemoteKernelBus = GPU_GET_KERNEL_BUS(pRemoteGpu); + + if (gpuIsGpuFullPower(pRemoteGpu) && + kbusIsP2pInitialized(pRemoteKernelBus)) + { + // + // NVLINK mappings are static and cannot be torn down, but make + // sure we tear down any PCIe P2P mappings created. + // + if (pKernelBus->p2pPcie.peerNumberMask[i] != 0) + { + NvU32 locPeerId = kbusGetPeerId_HAL(pGpu, pKernelBus, pRemoteGpu); + NvU32 remPeerId = kbusGetPeerId_HAL(pRemoteGpu, pRemoteKernelBus, pGpu); + + // We only expect one PCIE peer ID per remote GPU for SLI + NV_ASSERT(nvPopCount32(pKernelBus->p2pPcie.peerNumberMask[i]) == 1); + + NV_ASSERT(locPeerId < P2P_MAX_NUM_PEERS); + NV_ASSERT(remPeerId < P2P_MAX_NUM_PEERS); + NV_ASSERT(pKernelBus->p2pPcie.busPeer[locPeerId].remotePeerId == remPeerId); + + kbusDestroyMailbox(pGpu, pKernelBus, pRemoteGpu, locPeerId); + kbusDestroyMailbox(pRemoteGpu, pRemoteKernelBus, pGpu, remPeerId); + } + + // + // Instead just disable the NVLINK peers + // + NV_ASSERT_OK(kbusSendMemsysDisableNvlinkPeers(pGpu)); + NV_ASSERT_OK(kbusSendMemsysDisableNvlinkPeers(pRemoteGpu)); + } + } + } +} + +/*! + * @brief Calculates the memory needed for allocating BAR2 Page Tables. + * + * Size calculation is optimized for @ref GMMU_FMT_VER_1 due to + * large % overhead of full Page Table size over the size + * actually needed for BAR2. UVM replayable fault buffer size is + * also accomodated in this calculation. + * + * @return Size in Bytes, needed for BAR2 Page Tables. + */ +NvU32 +kbusGetSizeOfBar2PageTables_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + const GMMU_FMT *pFmt = NULL; + NvU64 vaLimit; + NvU32 numPgTblsCeil; + NvU32 numPgTblsFloor; + NvU32 pgTblSize; + NvU32 numEntries; + NvU64 vaPerEntry; + const MMU_FMT_LEVEL *pPgTbl = NULL; + NvU32 gfid; + NvU32 cpuVisibleApertureSize = 0; + NvU32 cpuInisibleApertureSize = 0; + + NV_ASSERT_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK, 0); + + // Return 0 from the guest in the paravirtualization case. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + return 0; + } + + // Get the @ref GMMU_FMT for this chip + NV_ASSERT_OR_RETURN(NULL != (pFmt = kgmmuFmtGet(pKernelGmmu, GMMU_FMT_VERSION_DEFAULT, 0)), 0); + + // Get 4K page size Page Table + pPgTbl = mmuFmtFindLevelWithPageShift(pFmt->pRoot, RM_PAGE_SHIFT); + + if (pKernelBus->bar2[gfid].cpuVisibleLimit != 0) + cpuVisibleApertureSize = pKernelBus->bar2[gfid].cpuVisibleLimit - pKernelBus->bar2[gfid].cpuVisibleBase + 1; + if (pKernelBus->bar2[gfid].cpuInvisibleLimit != 0) + cpuInisibleApertureSize = pKernelBus->bar2[gfid].cpuInvisibleLimit - pKernelBus->bar2[gfid].cpuInvisibleBase + 1; + + vaLimit = cpuVisibleApertureSize + cpuInisibleApertureSize; + + + numPgTblsCeil = (NvU32)(NV_CEIL(vaLimit, NVBIT64(pPgTbl->virtAddrBitHi + 1))); + numPgTblsFloor = (NvU32)vaLimit / NVBIT64(pPgTbl->virtAddrBitHi + 1); + + // + // Let's optimize the space caculation on GMMU_FMT_VER_1 + // if the Page Table is not fully used. + // + if (0 == numPgTblsFloor) + { + vaPerEntry = mmuFmtEntryVirtAddrMask(pPgTbl) + 1; + numEntries = (NvU32)(NV_CEIL(vaLimit, vaPerEntry)); + pgTblSize = numEntries * pPgTbl->entrySize; + pKernelBus->bar2[gfid].pageTblSize = pgTblSize; + } + else + { + pKernelBus->bar2[gfid].pageTblSize = mmuFmtLevelSize(pPgTbl); + pgTblSize = numPgTblsCeil * pKernelBus->bar2[gfid].pageTblSize; + } + + pKernelBus->bar2[gfid].numPageTbls = numPgTblsCeil; + + return pgTblSize; +} + +void +kbusStateDestroy_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 offsetBar0; + + (void)kbusDestroyBar2_HAL(pGpu, pKernelBus, GPU_GFID_PF); + + // Bind the BAR0 window to its default location + // note: we can't move the window for all intents and purposes since VBIOS + // will also use the window at arbitrary locations (eg during an SMI event + if (pMemoryManager->Ram.fbAddrSpaceSizeMb) + { + offsetBar0 = (pMemoryManager->Ram.fbAddrSpaceSizeMb << 20) - DRF_SIZE(NV_PRAMIN); + (void)kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, offsetBar0); + } + + // Unmap BAR0 Writecombined Window + if(pKernelBus->pWriteCombinedBar0Window != NULL) + { + osUnmapPciMemoryKernelOld(pGpu, (void*)pKernelBus->pWriteCombinedBar0Window); + pKernelBus->pWriteCombinedBar0Window = NULL; + pKernelBus->pDefaultBar0Pointer = pKernelBus->pUncachedBar0Window; + } + + NV_PRINTF(LEVEL_INFO, "FLA Supported: %x \n", kbusIsFlaSupported(pKernelBus)); + + // clean up FLA here + // if FLA supported & enabled FLA VAS + if (IS_VIRTUAL(pGpu) && kbusIsFlaSupported(pKernelBus)) + { + NV_PRINTF(LEVEL_INFO, "Trying to destroy FLA VAS\n"); + kbusDestroyFla_HAL(pGpu, pKernelBus); + } + // + // clean up private info block + // + + if ((pKernelBif != NULL) && ((!pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED) || + !pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_WRITES_DISABLED)) && + (kbusIsP2pInitialized(pKernelBus)))) + { + (void)_kbusDestroyP2P_GM107(pGpu, pKernelBus); + } +} + +// +// +// Tests BAR2 against BAR0. +// If memDescIn is NULL, a test mem desc is created and map/unmapped. +// If memDescIn is not NULL and provided, this method assumes that it has +// already been alloc'ed and mapping/unmapping is handled outside +// this method. +// +NV_STATUS +kbusVerifyBar2_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + PMEMORY_DESCRIPTOR pMemDescIn, + NvU8 *pCpuPtrIn, + NvU64 offset, + NvU64 size +) +{ + MEMORY_DESCRIPTOR memDesc, *pMemDesc = NULL; + NvU8 *pOffset = NULL; + NvU32 index = 0; + NvU64 bar0Window = 0; + NvU64 testMemoryOffset = 0; + NvU32 testMemorySize = 0; + NV_STATUS status = NV_OK; + NvU32 testData = 0; + NvU32 temp = 0; + NV_ADDRESS_SPACE testAddrSpace = ADDR_FBMEM; + NV_ADDRESS_SPACE oldAddrSpace = ADDR_FBMEM; + NvBool bIsStandaloneTest; + const NvU32 SAMPLEDATA = 0xabcdabcd; + const NvU32 FBSIZETESTED = 0x10; + NvU64 bar0TestAddr = 0; + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + NvU32 flagsClean = 0; + + // + // kbusVerifyBar2 will test BAR0 against sysmem on Tegra; otherwise skip + // the test if inst_in_sys is used + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM) && + !IsTEGRA(pGpu)) + { + return NV_OK; + } + + // In L2 Cache only mode or FB broken, don't verify Bar2 + if (gpuIsCacheOnlyModeEnabled(pGpu) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + kbusIsBar2TestSkipped(pKernelBus)) + { + return NV_OK; + } + + NV_PRINTF(LEVEL_INFO, "\n"); + + flagsClean = NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_ALL | + NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_CLEAN; + if (kmemsysIsL2CleanFbPull(pKernelMemorySystem)) + { + flagsClean |= NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_WAIT_FB_PULL; + } + + if (pMemDescIn && pCpuPtrIn) + { + if ((size + offset) > pMemDescIn->Size) + { + NV_PRINTF(LEVEL_ERROR, + "input offset 0x%llx size 0x%llx exceeds surface size 0x%llx\n", + offset, size, pMemDescIn->Size); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + bIsStandaloneTest = NV_FALSE; + pOffset = pCpuPtrIn; + pMemDesc = pMemDescIn; + } + else + { + offset = 0; + size = FBSIZETESTED; + // Allocate some memory to test virtual BAR2 with + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM)) + { + memdescCreateExisting(&memDesc, pGpu, size, ADDR_SYSMEM, pGpu->instCacheOverride, MEMDESC_FLAGS_NONE); + } + else + { + memdescCreateExisting(&memDesc, pGpu, size, ADDR_FBMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE); + } + status = memdescAlloc(&memDesc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not allocate vidmem to test bar2 with\n"); + DBG_BREAKPOINT(); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + bIsStandaloneTest = NV_TRUE; + pOffset = kbusMapRmAperture_HAL(pGpu, &memDesc); + if (pOffset == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto kbusVerifyBar2_failed; + } + pMemDesc = &memDesc; + } + testMemoryOffset = memdescGetPhysAddr(pMemDesc, AT_GPU, 0) + offset; + testMemorySize = NvU64_LO32(size); + testAddrSpace = kgmmuGetHwPteApertureFromMemdesc(GPU_GET_KERNEL_GMMU(pGpu), pMemDesc); + + // ========================================================== + // Does the BAR0 window work? + + NV_PRINTF_COND(IS_EMULATION(pGpu), LEVEL_NOTICE, LEVEL_INFO, "Testing BAR0 window...\n"); + + bar0Window = kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus); + oldAddrSpace = DRF_VAL( _PBUS, _BAR0_WINDOW, _TARGET, GPU_REG_RD32(pGpu, NV_PBUS_BAR0_WINDOW)); + bar0TestAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + GPU_FLD_WR_DRF_NUM(pGpu, _PBUS, _BAR0_WINDOW, _BASE, NvU64_LO32(bar0TestAddr >> 16)); + GPU_FLD_WR_DRF_NUM(pGpu, _PBUS, _BAR0_WINDOW, _TARGET, testAddrSpace); + testData = GPU_REG_RD32(pGpu, DRF_BASE(NV_PRAMIN) + NvU64_LO32(bar0TestAddr & 0xffff)); + + GPU_REG_WR32(pGpu, DRF_BASE(NV_PRAMIN) + NvU64_LO32(bar0TestAddr & 0xffff), SAMPLEDATA); + + if (GPU_REG_RD32(pGpu, DRF_BASE(NV_PRAMIN) + NvU64_LO32(bar0TestAddr & 0xffff)) != SAMPLEDATA) + { + // + // Ideally, this should hit the L2 cache and even if memory is bad, + // unless something in the path up to L2 is messed up, we should not + // get here. + // + NV_PRINTF(LEVEL_ERROR, + "Pre-L2 invalidate evict: Address 0x%llx programmed through the bar0 " + "window with value 0x%x did not read back the last write.\n", + bar0TestAddr, SAMPLEDATA); + DBG_BREAKPOINT_REASON(NV_ERR_MEMORY_ERROR); + status = NV_ERR_MEMORY_ERROR; + goto kbusVerifyBar2_failed; + } + + // + // Evict L2 to ensure that the next read doesn't hit L2 and mistakenly + // assume that the BAR0 window to vidmem works + // + status = kmemsysSendL2InvalidateEvict(pGpu, pKernelMemorySystem, flagsClean); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "L2 evict failed\n"); + goto kbusVerifyBar2_failed; + } + + if (GPU_REG_RD32(pGpu, DRF_BASE(NV_PRAMIN) + NvU64_LO32(bar0TestAddr & 0xffff)) != SAMPLEDATA) + { + NV_PRINTF(LEVEL_ERROR, + "Post-L2 invalidate evict: Address 0x%llx programmed through the bar0 " + "window with value 0x%x did not read back the last write\n", + bar0TestAddr, SAMPLEDATA); + if (IS_EMULATION(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, + "Setup a trigger on write with a 3 quarters post " + "trigger capture\n"); + NV_PRINTF(LEVEL_ERROR, + "and search for the last bar0 window write not returning the same value" + " in a subsequent read\n"); + } + DBG_BREAKPOINT_REASON(NV_ERR_MEMORY_ERROR); + status = NV_ERR_MEMORY_ERROR; + goto kbusVerifyBar2_failed; + } + + NV_PRINTF_COND(IS_EMULATION(pGpu), LEVEL_NOTICE, LEVEL_INFO, "Bar0 window tests successfully\n"); + GPU_REG_WR32(pGpu, DRF_BASE(NV_PRAMIN) + NvU64_LO32(bar0TestAddr & 0xffff), testData); + GPU_FLD_WR_DRF_NUM(pGpu, _PBUS, _BAR0_WINDOW, _BASE, NvU64_LO32(bar0Window >> 16)); + GPU_FLD_WR_DRF_NUM(pGpu, _PBUS, _BAR0_WINDOW, _TARGET, oldAddrSpace); + + if ((testAddrSpace == NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY) || + (testAddrSpace == NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY)) + { + // Flush GPU write before proceeding to next test (otherwise it may stomp over following CPU writes) + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY | BUS_FLUSH_USE_PCIE_READ); + } + // ========================================================== + + + // ========================================================== + // Does MMU's translation logic work? + NV_PRINTF(LEVEL_INFO, + "MMUTest Writing test data through virtual BAR2 starting at bar2 offset" + " (%p - %p) = %p and of size 0x%x\n", (NvU8 *)pOffset, + (NvU8 *)pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping, + (NvU8 *)(pOffset - pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping), + testMemorySize); + NV_PRINTF_COND(IS_EMULATION(pGpu), LEVEL_NOTICE, LEVEL_INFO, + "MMUTest The physical address being targetted is 0x%llx\n", + testMemoryOffset); + for(index = 0; index < testMemorySize; index += 4) + { + MEM_WR32( pOffset + index, SAMPLEDATA ); + } + // Flush the bar2 writes + // A uflush should not be required since a bar0 window read follows after this + if ((testAddrSpace == NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY) || + (testAddrSpace == NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY)) + { + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY | BUS_FLUSH_USE_PCIE_READ); + } + osFlushCpuWriteCombineBuffer(); + + status = kmemsysSendL2InvalidateEvict(pGpu, pKernelMemorySystem, flagsClean); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "L2 evict failed\n"); + goto kbusVerifyBar2_failed; + } + + // Readback through the bar0 window + bar0Window = kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus); + oldAddrSpace = DRF_VAL( _PBUS, _BAR0_WINDOW, _TARGET, GPU_REG_RD32(pGpu, NV_PBUS_BAR0_WINDOW)); + GPU_FLD_WR_DRF_NUM(pGpu, _PBUS, _BAR0_WINDOW, _BASE, NvU64_LO32(testMemoryOffset >> 16)); + GPU_FLD_WR_DRF_NUM(pGpu, _PBUS, _BAR0_WINDOW, _TARGET, testAddrSpace); + + NV_PRINTF(LEVEL_INFO, + "bar0Window = 0x%llx, testMemoryOffset = 0x%llx, testAddrSpace = %d, " + "_PBUS_BAR0_WINDOW = 0x%08x\n", bar0Window, testMemoryOffset, + testAddrSpace, GPU_REG_RD32(pGpu, NV_PBUS_BAR0_WINDOW)); + + temp = (DRF_BASE(NV_PRAMIN) + (NvU32)(testMemoryOffset & 0xffff)); + for(index = 0; index < testMemorySize; index += 4) + { + NvU32 bar0WindowData = GPU_REG_RD32(pGpu, temp + index); + if (bar0WindowData != SAMPLEDATA) + { + NV_PRINTF(LEVEL_ERROR, + "MMUTest BAR0 window offset 0x%x returned garbage 0x%x\n", + temp + index, bar0WindowData); + NV_PRINTF_COND(IS_EMULATION(pGpu), LEVEL_ERROR, LEVEL_INFO, + "Setup a trigger for write and in the waves search" + " the last few bar2 virtual writes mixed with bar0 window reads\n"); + DBG_BREAKPOINT_REASON(NV_ERR_MEMORY_ERROR); + status = NV_ERR_MEMORY_ERROR; + goto kbusVerifyBar2_failed; + } + // Write through the BAR0 window to be readback through BAR2 later + GPU_REG_WR32(pGpu, temp + index, SAMPLEDATA + 0x10); + } + + GPU_FLD_WR_DRF_NUM(pGpu, _PBUS, _BAR0_WINDOW, _BASE, NvU64_LO32(bar0Window >> 16)); + GPU_FLD_WR_DRF_NUM(pGpu, _PBUS, _BAR0_WINDOW, _TARGET, oldAddrSpace); + + status = kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY | BUS_FLUSH_USE_PCIE_READ); + + // Bail now if we have encountered any error + if (status != NV_OK) + { + goto kbusVerifyBar2_failed; + } + + status = kmemsysSendL2InvalidateEvict(pGpu, pKernelMemorySystem, flagsClean); + if (NV_OK != status) + { + goto kbusVerifyBar2_failed; + } + + // Verify BAR2 virtual reads + for(index = 0; index < testMemorySize; index +=4) + { + temp = MEM_RD32(pOffset + index); + if (temp != (SAMPLEDATA + 0x10)) + { + NV_PRINTF(LEVEL_ERROR, + "MMUTest BAR2 Read of virtual addr 0x%x returned garbage 0x%x\n", + (NvU32)(pOffset - pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping) + index, + temp); + DBG_BREAKPOINT_REASON(NV_ERR_MEMORY_ERROR); + status = NV_ERR_MEMORY_ERROR; + goto kbusVerifyBar2_failed; + } + } + +kbusVerifyBar2_failed: + if (bIsStandaloneTest) + { + if (pOffset != NULL) + { + kbusUnmapRmAperture_HAL(pGpu, pMemDesc, &pOffset, NV_TRUE); + } + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + } + + if (status == NV_OK) + { + NV_PRINTF_COND(IS_EMULATION(pGpu), LEVEL_NOTICE, LEVEL_INFO, "BAR2 virtual test passes\n"); + } + + return status; +} + +/*! + * @brief Inits physical address of Bar1 and Bar2 structures + * + * @param[in] KernelBus + */ +NV_STATUS +kbusInitBarsBaseInfo_GM107 +( + KernelBus *pKernelBus +) +{ + // pKernelBus->pciBars[] should be initialized before the function gets called + NV_ASSERT_OR_RETURN(pKernelBus->pciBars[BUS_BAR_1] != 0, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelBus->pciBars[BUS_BAR_2] != 0, NV_ERR_INVALID_STATE); + + pKernelBus->bar1[GPU_GFID_PF].physAddr = pKernelBus->pciBars[BUS_BAR_1]; + pKernelBus->bar2[GPU_GFID_PF].physAddr = pKernelBus->pciBars[BUS_BAR_2]; + + return NV_OK; +} + +/** + * @brief Set BAR1/BAR2 virtual aperture size and BAR2 CPU visible limit + * + * @param pGpu + * @param pKernelBus + * @param gfid + * + * @return + */ +NV_STATUS kbusSetBarsApertureSize_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 gfid +) +{ + NvU32 data32; + + // + // Setup BAR1 aperture size only for GFID_VF + // GFID_PF is done in StateInit phase + // + if (IS_GFID_VF(gfid)) + { + kbusDetermineBar1ApertureLength(pKernelBus, gfid); + } + + // + // Setup BAR2 aperture size + // Check to see if a BAR2 aperture size override has been specified. + // + if (((NV_OK == osReadRegistryDword(pGpu, NV_REG_STR_RM_BAR2_APERTURE_SIZE_MB, + &data32))) && data32 && data32 <= BUS_BAR2_RM_APERTURE_MB) + { + // Set the BAR2 aperture size based on the override + pKernelBus->bar2[gfid].rmApertureLimit = (data32 << 20) - 1; + // + // This shrinks the bar2 page table and has the side-effect of not + // configuring the upper part of bar2 used for VESA access (because we + // only apply override if < BUS_BAR2_RM_APERTURE_MB). + // + pKernelBus->bar2[gfid].cpuVisibleLimit = (data32 << 20) - 1; + } + else + { + // + // For simulation mods we limit BAR2 size to decrease PTE init time. + // Backdoor fmodel/RTL could use the standard settings, but want to + // keep the code path the same for emulation. With a 8MB BAR2 we do + // not expect instance memory to evict a cached mapping. + // + if ((IS_SIM_MODS(GPU_GET_OS(pGpu)) && IS_SILICON(pGpu) == 0) || (!RMCFG_FEATURE_PLATFORM_MODS && IS_SIMULATION(pGpu))) + { + pKernelBus->bar2[gfid].rmApertureLimit = ((BUS_BAR2_RM_APERTURE_MB >> 1) << 20) - 1; // 8MB + pKernelBus->bar2[gfid].cpuVisibleLimit = pKernelBus->bar2[gfid].rmApertureLimit; // No VESA space + } + else + { + pKernelBus->bar2[gfid].cpuVisibleLimit = (BUS_BAR2_APERTURE_MB << 20) - 1; + pKernelBus->bar2[gfid].rmApertureLimit = (BUS_BAR2_RM_APERTURE_MB << 20) - 1; + } + } + + return NV_OK; +} + +/*! + * @brief Calculates the memory needed for allocating a BAR2 Page Dir for a given VA range + * + * @param[in] vaPerEntry The VA span of one entry within the Page Dir + * whose size is needed. + * @param[in] entrySize The size of one PDE within the Page Dir of interest. + * + * @return RM_PAGE_SIZE aligned size in Bytes, needed for the BAR2 Page Dir. + */ +static NvU32 _kbusGetSizeOfBar2PageDir_GM107 +( + NvU64 vaBase, + NvU64 vaLimit, + NvU64 vaPerEntry, + NvU32 entrySize +) +{ + NvU32 size; + NvU32 numEntries; + NvU64 vaBaseAligned; + + NV_ASSERT_OR_RETURN(0 != entrySize, 0); + NV_ASSERT_OR_RETURN(0 != vaPerEntry, 0); + + // + // Calculate number of entries needed within this level to represent + // the entire BAR2 aperture VA range, then align to 4K + // + vaBaseAligned = vaBase & ~(vaPerEntry - 1); + numEntries = (NvU32)NV_CEIL(vaLimit - vaBaseAligned, vaPerEntry); + size = numEntries * entrySize; + size = NV_ROUNDUP(size, RM_PAGE_SIZE); + + return size; +} + +/*! + * @brief Calculates the memory needed for allocating BAR2 Page Dirs + * + * Size calculation considers all Page Levels defined in @ref GMMU_FMT. + * Assumes Cpu visible region always starts before the invisible region. + * + * @return RM_PAGE_SIZE aligned size in Bytes, needed for all BAR2 Page Dirs. + */ +NvU32 kbusGetSizeOfBar2PageDirs_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + const GMMU_FMT *pFmt = NULL; + NvU32 size = 0; + const MMU_FMT_LEVEL *pLevel = NULL; + NvU64 bar2VaLimit = kbusGetVaLimitForBar2_HAL(pGpu, pKernelBus); + NvU16 i; + NvU32 gfid; + NvBool bContiguous; + + NV_ASSERT_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK, 0); + + // Return 0 from the guest in the paravirtualization case. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + return 0; + } + + // Get the @ref GMMU_FMT for this chip + NV_ASSERT_OR_RETURN(NULL != (pFmt = kgmmuFmtGet(pKernelGmmu, GMMU_FMT_VERSION_DEFAULT, 0)), 0); + pLevel = pFmt->pRoot; + + // Cache the size of the root Page Dir, once. + pKernelBus->bar2[gfid].pageDirSize = _kbusGetSizeOfBar2PageDir_GM107(pKernelBus->bar2[gfid].cpuVisibleBase, + bar2VaLimit, + mmuFmtEntryVirtAddrMask(pLevel) + 1, + pLevel->entrySize); + + // Accumulate size for all Page Directories. + pKernelBus->bar2[gfid].numPageDirs = 0; + bContiguous = (pKernelBus->bar2[gfid].cpuVisibleLimit + 1 == pKernelBus->bar2[gfid].cpuInvisibleBase) || + pKernelBus->bar2[gfid].cpuInvisibleLimit == 0; + + for (i = 0; (i < GMMU_FMT_MAX_LEVELS - 1); i++) + { + NvU32 levelSize = 0; + NvU64 vaPerEntry = mmuFmtEntryVirtAddrMask(pLevel) + 1; + + if (!bContiguous) + { + // + // Avoid double reserving size for page dir when visible and invisible bar2 share the same page directory + // In this case we treat them as contiguous. + // + if ((pKernelBus->bar2[gfid].cpuVisibleLimit & ~(vaPerEntry - 1)) == + (pKernelBus->bar2[gfid].cpuInvisibleBase & ~(vaPerEntry - 1))) + { + levelSize += _kbusGetSizeOfBar2PageDir_GM107(pKernelBus->bar2[gfid].cpuVisibleBase, + bar2VaLimit, + vaPerEntry, + pLevel->entrySize); + } + else + { + levelSize += _kbusGetSizeOfBar2PageDir_GM107(pKernelBus->bar2[gfid].cpuInvisibleBase, + pKernelBus->bar2[gfid].cpuInvisibleLimit, + vaPerEntry, + pLevel->entrySize); + + levelSize += _kbusGetSizeOfBar2PageDir_GM107(pKernelBus->bar2[gfid].cpuVisibleBase, + pKernelBus->bar2[gfid].cpuVisibleLimit, + vaPerEntry, + pLevel->entrySize); + } + } + else + { + levelSize = _kbusGetSizeOfBar2PageDir_GM107(pKernelBus->bar2[gfid].cpuVisibleBase, + bar2VaLimit, + vaPerEntry, + pLevel->entrySize); + } + + // Get the number of directories we need to initialize from the level size. + pKernelBus->bar2[gfid].numPageDirs += levelSize >> RM_PAGE_SHIFT; + size += levelSize; + + // If there's one sublevel choose that. + if (1 == pLevel->numSubLevels) + { + pLevel = &(pLevel->subLevels[0]); + } + else + { + // Choose the 4K page size sublevel. + pLevel = &(pLevel->subLevels[1]); + } + NV_ASSERT_OR_RETURN(NULL != pLevel, 0); + + // Stop accumulating size if we've exhausted all Page Dirs. + if (pLevel->bPageTable && (0 == pLevel->numSubLevels)) + { + break; + } + } + + return size; +} + +/*! + * @brief Tunnel bar2 accesses through bar0 window. + * + * This routine is used to re-direct the bar2 accesses which were mapped as + * type BUSBARMAP_TYPE_BAR through the bar0 window. This is a callback + * routine called by osMem[Rd|Wr]*, portMemSet and portMemCopy routines when they + * detect an address is in the bar2 range. + * + * @param[in] *pPrivData - Void pointer to callback-user-defined data. + * For the purpose here pPrivData just contains + * a pointer to pGpu + * @param[in] addr - The address to be tunneled. + * @param[in/out] *pData - Pointer to the data to be read/written. + * @param[in] size - Size of the data to be read/written. + * @param[in] bRead - Read/Write indicator. + * + * @returns NV_OK - if tunneling is successful. + * NV_ERR_INVALID_ARGUMENT if the addr argument is not valid + */ +static NV_STATUS +_kbusBar0TunnelCb_GM107 +( + void *pPrivData, + NvU64 addr, + void *pData, + NvU64 size, + NvBool bRead +) +{ + OBJGPU *pGpu = reinterpretCast(pPrivData, OBJGPU *); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + VirtualBar2MapListIter it; + NvU32 offset; + + it = listIterAll(&pKernelBus->virtualBar2[GPU_GFID_PF].usedMapList); + while (listIterNext(&it)) + { + VirtualBar2MapEntry *pMap = it.pValue; + + // Check if there is a valid mapping for the address passed-in + if (addr >= (NvU64)((NvUPtr)pMap->pRtnPtr) && + (addr + size - 1) < ((NvU64)((NvUPtr)pMap->pRtnPtr) + pMap->pMemDesc->Size)) + { + // Re-direct the access through bar0 window + offset = (NvU32)(addr - (NvU64)((NvUPtr)pMap->pRtnPtr)); + return kbusMemAccessBar0Window_HAL( + pGpu, + pKernelBus, + memdescGetPhysAddr(pMap->pMemDesc, FORCE_VMMU_TRANSLATION(pMap->pMemDesc, AT_GPU), offset), + pData, + size, + bRead, + memdescGetAddressSpace(pMap->pMemDesc)); + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NvU64 +kbusGetBAR0WindowAddress_GM107 +( + KernelBus *pKernelBus +) +{ + return NV_PRAMIN_DATA008(0); +} + + + /*! + * @brief Returns the first available peer Id + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NvU32 first free peer Id + */ +NvU32 +kbusGetUnusedPeerId_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NvU32 peerId; + + for (peerId = 0; peerId < pKernelBus->numPeers; peerId++) + { + if ((pKernelBus->p2pPcie.busPeer[peerId].refCount == 0) && + (!pKernelBus->p2pPcie.busPeer[peerId].bReserved)) + { + return peerId; + } + } + + return BUS_INVALID_PEER; +} + +/*! + * @brief Returns the first available PCIE peer Id + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NvU32 first free peer Id + */ +NvU32 +kbusGetUnusedPciePeerId_GM107 +( + OBJGPU* pGpu, + KernelBus* pKernelBus +) +{ + return kbusGetUnusedPeerId_HAL(pGpu, pKernelBus); +} + + + /*! + * @brief Returns the peer number from pGpu (Local) to pGpuPeer + * + * @param[in] pGpu Local + * @param[in] pKernelBus Local + * @param[in] pGpuPeer Remote + * + * @returns NvU32 bus peer number + */ +NvU32 +kbusGetPeerId_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + OBJGPU *pGpuPeer +) +{ + NvU32 gpuPeerInst = gpuGetInstance(pGpuPeer); + NvU32 peerId; + + if (pKernelBus->p2pPcie.peerNumberMask[gpuPeerInst] == 0) + { + return BUS_INVALID_PEER; + } + + peerId = pKernelBus->p2pPcie.peerNumberMask[gpuPeerInst]; + LOWESTBITIDX_32(peerId); + + return peerId; +} + +/*! + * @brief Returns whether or not the given peerId is valid for the given GPU. + * + * @returns NV_OK if the peerId corresponds to an active peer mapping + * NV_ERR_INVALID_INDEX otherwise + */ +NV_STATUS +kbusIsPeerIdValid_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 peerId +) +{ + NV_ASSERT_OR_RETURN(peerId < P2P_MAX_NUM_PEERS, NV_ERR_INVALID_INDEX); + if (pKernelBus->p2pPcie.peerNumberMask[gpuGetInstance(pGpu)] & NVBIT(peerId)) + return NV_OK; + return NV_ERR_INVALID_INDEX; +} + +/*! +* @brief Gets the BAR2 GMMU walker object +* +* @param[in] pKernelBus +* +* @returns MMU_WALK * Pointer to BAR2 MMU walker +*/ +MMU_WALK * +kbusGetBar2GmmuWalker_GM107 +( + KernelBus *pKernelBus +) +{ + OBJGPU* pGpu = ENG_GET_GPU(pKernelBus); + NvU32 gfid; + + NV_ASSERT_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK, NULL); + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NULL; + } + return pKernelBus->bar2[gfid].pWalk; +} + +/*! +* @brief Gets the BAR2 GMMU format descriptor +* +* @param[in] pKernelBus +* +* @returns const GMMU_FMT * Pointer to BAR2 GMMU format +*/ +const GMMU_FMT * +kbusGetBar2GmmuFmt_GM107 +( + KernelBus *pKernelBus +) +{ + OBJGPU* pGpu = ENG_GET_GPU(pKernelBus); + NvU32 gfid; + + NV_ASSERT_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK, NULL); + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NULL; + } + return pKernelBus->bar2[gfid].pFmt; +} + +/*! + * brief Returns the peer ID corresponding to the peer indexes + * from the peer ID table + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] locPeerIdx Local peer Index + * @param[in] remPeerIdx Remote peer Index + * + * return NvU32 peerID from the table using given peer indexes + */ +NvU32 +kbusGetPeerIdFromTable_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 locPeerIdx, + NvU32 remPeerIdx +) +{ + if (locPeerIdx >= P2P_MAX_NUM_PEERS || + remPeerIdx >= P2P_MAX_NUM_PEERS) + { + NV_PRINTF(LEVEL_ERROR, + "Peer number table doesn't support >%u GPUs\n", + P2P_MAX_NUM_PEERS); + + return BUS_INVALID_PEER; + } + + return peerNumberTable_GM107[locPeerIdx][remPeerIdx]; +} + +// +// Description: This function fills in an object array describing +// the offsets to and addresses in the PCI bus. +// +void +kbusInitPciBars_GM107 +( + KernelBus *pKernelBus +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelBus); + + pKernelBus->pciBars[0] = pGpu->busInfo.gpuPhysAddr; + pKernelBus->pciBars[1] = pGpu->busInfo.gpuPhysFbAddr; + pKernelBus->pciBars[2] = pGpu->busInfo.gpuPhysInstAddr; + + if (! IsAMODEL(pGpu)) + { + // Classic dGPUs + pKernelBus->totalPciBars = BUS_NUM_BARS; + pKernelBus->pciBars[3] = pGpu->busInfo.gpuPhysIoAddr; + } + else + { + // AMODEL doesn't have IO BAR + pKernelBus->totalPciBars = 3; + } +} + +NV_STATUS +kbusSetBAR0WindowVidOffset_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU64 vidOffset +) +{ + NV_ASSERT( (vidOffset & 0xffff)==0 ); + NV_ASSERT( (vidOffset >> 16) <= DRF_MASK(NV_PBUS_BAR0_WINDOW_BASE) ); + + // RM initialises cachedBar0WindowVidOffset with 0. Refresh its value with + // current NV_PBUS_BAR0_WINDOW_BASE. + if (pKernelBus->cachedBar0WindowVidOffset == 0) + { + pKernelBus->cachedBar0WindowVidOffset = ((NvU64) GPU_REG_RD_DRF(pGpu, _PBUS, _BAR0_WINDOW, _BASE)) << 16; + } + + // Update only if the new offset is different from the cached value + if (pKernelBus->cachedBar0WindowVidOffset != vidOffset) + { + NV_PRINTF(LEVEL_INFO, + "mapping BAR0_WINDOW to VID:%x'%08x\n", + NvU64_HI32(vidOffset), NvU64_LO32(vidOffset)); + + GPU_FLD_WR_DRF_NUM(pGpu, _PBUS, _BAR0_WINDOW, _BASE, NvU64_LO32(vidOffset >> 16)); + GPU_FLD_WR_DRF_DEF(pGpu, _PBUS, _BAR0_WINDOW, _TARGET, _VID_MEM); + + pKernelBus->cachedBar0WindowVidOffset = vidOffset; + } + + return (NV_OK); +} + +NvU64 +kbusGetBAR0WindowVidOffset_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NvU64 vidOffset; + + // RM initialises cachedBar0WindowVidOffset with 0. Refresh its value with + // current NV_PBUS_BAR0_WINDOW_BASE. + if (pKernelBus->cachedBar0WindowVidOffset == 0) + { + pKernelBus->cachedBar0WindowVidOffset = ((NvU64) GPU_REG_RD_DRF(pGpu, _PBUS, _BAR0_WINDOW, _BASE)) << 16; + } + + vidOffset = pKernelBus->cachedBar0WindowVidOffset; + + return (vidOffset); +} + +/*! + * Mem read/write through the bar0 window. + * + * This routine is used to re-direct the bar2 accesses which were mapped as + * type BUSBARMAP_TYPE_BAR through the bar0 window. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] physAddr - physical address of the accessed memory + * @param[in] accessSize - Size of the data to be read/written + * @param[in] bRead - Read or Write flag + * @param[in] addrSpace - aperture of the accessed memory + * @returns NV_STATUS + */ +NV_STATUS +kbusMemAccessBar0Window_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU64 physAddr, + void *pData, + NvU64 accessSize, + NvBool bRead, + NV_ADDRESS_SPACE addrSpace +) +{ + NvU64 bar0WindowOffset; + NvU64 bar0WindowOrig; + NvBool bRestoreWindow = NV_FALSE; + + // The following code assumes aperture to be VID_MEM (or that vidmem/sysmem are same). + NV_ASSERT(gpuIsUnifiedMemorySpaceEnabled(pGpu) || (addrSpace == ADDR_FBMEM)); + + bar0WindowOrig = kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus); + bar0WindowOffset = physAddr - bar0WindowOrig; + + if (bar0WindowOffset + accessSize > DRF_SIZE(NV_PRAMIN)) + { + kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, (physAddr & ~0xffff)); + bar0WindowOffset = physAddr - kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus); + bRestoreWindow = NV_TRUE; + } + + if (bRead) + { + // Read access + switch (accessSize) + { + case 1: + *((NvU8 *)pData) = (NvU8)GPU_REG_RD08(pGpu, NV_PRAMIN_DATA008(bar0WindowOffset)); + break; + case 2: + *((NvU16 *)pData) = (NvU16)GPU_REG_RD16(pGpu, NV_PRAMIN_DATA008(bar0WindowOffset)); + break; + case 4: + *((NvU32 *)pData) = (NvU32)GPU_REG_RD32(pGpu, NV_PRAMIN_DATA008(bar0WindowOffset)); + break; + case 8: + // TO DO: Create GPU_REG_RD64 + *((NvU32 *)pData) = (NvU32)GPU_REG_RD32(pGpu, NV_PRAMIN_DATA008(bar0WindowOffset)); + *((NvU32 *)pData + 1) = (NvU32)GPU_REG_RD32(pGpu, NV_PRAMIN_DATA008(bar0WindowOffset + 4)); + break; + + default: + NV_ASSERT(0); + return NV_ERR_GENERIC; + } + } + else + { + // Write access + switch (accessSize) + { + case 1: + GPU_REG_WR08(pGpu, NV_PRAMIN_DATA008(bar0WindowOffset), (NvU8)(*((NvU8 *)pData) & 0xff)); + break; + case 2: + GPU_REG_WR16(pGpu, NV_PRAMIN_DATA008(bar0WindowOffset), (NvU16)(*((NvU16 *)pData) & 0xffff)); + break; + case 4: + GPU_REG_WR32(pGpu, NV_PRAMIN_DATA008(bar0WindowOffset), *((NvU32 *)pData)); + break; + case 8: + // TO DO: Create GPU_REG_WR64 + GPU_REG_WR32(pGpu, NV_PRAMIN_DATA008(bar0WindowOffset), *((NvU32 *)pData)); + GPU_REG_WR32(pGpu, NV_PRAMIN_DATA008(bar0WindowOffset + 4), *((NvU32 *)pData + 1)); + break; + + default: + NV_ASSERT(0); + return NV_ERR_GENERIC; + } + } + + // + // The Bar0 window will be restored after Bar2 bootstrap + // so check if we can skip restoring the window to avoid + // these extra register writes to adjust the WINDOW which may + // cause a timeout failure on some GA10X fmodel environment tests. + // By skipping the restore function here we ensure the following + // Bar2 PT writes have the Bar0 window already set up. + // + if (bRestoreWindow && !pKernelBus->bar2[GPU_GFID_PF].bBootstrap) + { + NV_ASSERT_OK_OR_RETURN(kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, bar0WindowOrig)); + } + + return NV_OK; +} + +/*! + * Optimized memcopy through the bar0 window. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] physAddr - physical address of the accessed memory + * @param[in] pSysmem - sysmem buffer to read from/write to + * @param[in] size - Size of the data to be read/written + * @param[in] bRead - Read into sysmem buffer or write to it + * @param[in] addrSpace - aperture of the accessed memory + * @returns NV_STATUS + */ +NV_STATUS +kbusMemCopyBar0Window_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + RmPhysAddr physAddr, + void *pSysmem, + NvLength size, + NvBool bRead +) +{ + NV_STATUS ret = NV_ERR_NOT_SUPPORTED; + NvLength copied = 0; + NvU8 *pSysmemBuf = pSysmem; + NvU64 fbCopyOffset = physAddr; + const NvLength windowSize = DRF_SIZE(NV_PRAMIN); + + NV_CHECK_OR_RETURN(LEVEL_INFO, size > 0, NV_OK); + + do + { + NvU64 praminFbBase = NV_ALIGN_DOWN64(fbCopyOffset, 0x10000); + NvLength praminOffset = fbCopyOffset - praminFbBase; + NvU8 *pPramin = ((NvU8 *)pGpu->registerAccess.gpuInstAddr) + praminOffset; + NvLength copySize = NV_MIN(size - copied, windowSize - praminOffset); + NvU8 *pSource = bRead ? pPramin : pSysmemBuf; + NvU8 *pDest = bRead ? pSysmemBuf : pPramin; + + ret = kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, praminFbBase); + NV_ASSERT_OK(ret); + + // TODO: use MMIO-safe memcopy abstraction if provided + portMemCopy(pDest, copySize, pSource, copySize); + osSchedule(); + + fbCopyOffset += copySize; + pSysmemBuf += copySize; + copied += copySize; + } + while (copied < size); + + return ret; +} + +/*! + * @brief Function to determine if the mapping can be direct mapped or BAR mapped + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pMemDesc Memory Descriptor pointer + * @param[in] mapFlags Flags used for mapping + * @param[in] bDirectSysMappingAllowed boolean to return the result + * + * returns NV_OK, since HW supports reflected mappings + */ +NV_STATUS +kbusIsDirectMappingAllowed_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 mapFlags, + NvBool *bDirectSysMappingAllowed +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvBool bAllowReflectedMapping = NV_FALSE; + NvU32 pteKind = memdescGetPteKind(pMemDesc); + + // + // Bug 2033948: Will remove supporting reflected mapping for Z surfaces in sysmem, + // as soon as MODS implements Z swizzling. Only for MODS. + // + if (pKernelBus->bAllowReflectedMappingAccess && + memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_Z, pteKind)) + { + bAllowReflectedMapping = NV_TRUE; + } + + *bDirectSysMappingAllowed = + (!(bAllowReflectedMapping) && + (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED)) && + (memdescGetGpuCacheAttrib(pMemDesc) != NV_MEMORY_CACHED) && + (DRF_VAL(OS33, _FLAGS, _MAPPING, mapFlags) != NVOS33_FLAGS_MAPPING_REFLECTED)); + + return NV_OK; +} + +/** + *! + * @brief Determine if we should use a direct mapping. + * + * RM tries to pick the most efficient mapping possible. For frame buffer addresses, + * we have no choice, they must be mapped through BAR2. For system memory we prefer + * to use direct mappings on dGPU as reflected transactions can lead the PCIE bus to + * deadlock. + * + * The conditions in which we choose to map system memory through BAR2 are: + * - Running swap endian and we need BAR2 to do byte swapping + * - Allowed by verification BAR2_SYSMEM_ENABLE property + * - Memory is GPU cached + * + * Allocated is required for a direct system memory map on some platforms + * as extra information is needed to complete the mapping request. + * User allocated system memory must be Direct mapped (and NOT reflected mapping). + * But, memDesc for user allocated memory may have allocated flag as false. + * So, adding check for the same. + * + * RM does not map block linear or compressed buffers. If those come up + * we will have to check for them. + * + * We now allow mapping on Allocated memdescs & as well as submemdescs. + * The Parent descriptor check is added to handle some specific cases where + * memDesc is not allocated and doesn't have a parent. i.e when GMMU PTEs are + * allocated from Reserved Sysmem Heap, we use memdescDescribe() to populate the + * PTE memdesc. This happens in WinXP, and needs to be reflected BAR2 mapped. + * + * On Tegra we don't want to go via BAR2 (i.e tunneled via BAR0), since it is + * expensive. BUS cache maintenance code will ensure coherency b/w CPU & GPU in + * Tegra. We can even have dGPU use this path in future. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pMemDesc MEMORY_DESCRIPTOR pointer + * @param[in/out] pbAllowDirectMap NvBool pointer + * + *@returns NV_OK, if supported + * NV_ERR_NOT_SUPPORTED, otherwise + */ +NV_STATUS +kbusUseDirectSysmemMap_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvBool *pbAllowDirectMap +) +{ + *pbAllowDirectMap = NV_FALSE; + + if((memdescGetAddressSpace(pMemDesc) != ADDR_FBMEM) && + (!kbusIsBar2SysmemAccessEnabled(pKernelBus)) && + (pMemDesc->Allocated || memdescGetParentDescriptor(pMemDesc) || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM) || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) && + ((memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) || IsTEGRA(pGpu))) + { + *pbAllowDirectMap = NV_TRUE; + } + + return NV_OK; +} + +/*! + * Update BAR1 instance block VAS state and rebind it to HW. + */ +NV_STATUS +kbusBar1InstBlkVasUpdate_GM107 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + OBJVASPACE *pBar1VAS = kbusGetBar1VASpace_HAL(pGpu, pKernelBus); + INST_BLK_INIT_PARAMS params = {0}; + NvU32 gfid; + NV_STATUS status = NV_OK; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + // Nothing to be done in the guest in the paravirtualization case. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + if (IS_GFID_VF(gfid) && + pKernelBus->bar1[gfid].pInstBlkMemDesc == NULL) + { + // + // VF BAR1 instance block cannot by in PF sysmem as the latter + // is not mapped into VF's IOMMU domain + // + NV_ASSERT_OR_RETURN(pKernelBus->InstBlkAperture == ADDR_FBMEM, NV_ERR_INVALID_ARGUMENT); + + if ((status = memdescCreate(&pKernelBus->bar1[gfid].pInstBlkMemDesc, + pGpu, + GF100_BUS_INSTANCEBLOCK_SIZE, + GF100_BUS_INSTANCEBLOCK_SIZE, + NV_TRUE, + pKernelBus->InstBlkAperture, + pKernelBus->InstBlkAttr, + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE)) != NV_OK) + { + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + + status = memdescAlloc(pKernelBus->bar1[gfid].pInstBlkMemDesc); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + status = memmgrMemDescMemSet(pMemoryManager, + pKernelBus->bar1[gfid].pInstBlkMemDesc, + 0, + TRANSFER_FLAGS_NONE); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + pKernelBus->bar1[gfid].instBlockBase = + memdescGetPhysAddr(pKernelBus->bar1[gfid].pInstBlkMemDesc, + AT_GPU, 0); + } + + // Initialize the instance block VAS state. + NV_ASSERT_OK_OR_RETURN( + kgmmuInstBlkInit(pKernelGmmu, pKernelBus->bar1[gfid].pInstBlkMemDesc, pBar1VAS, + FIFO_PDB_IDX_BASE, ¶ms)); + + // + // (Re-)bind instance block so host fetches the new VAS state. + // Flush to ensure host sees the latest. + // + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY); + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm200.c b/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm200.c new file mode 100644 index 000000000..a13f56a74 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/arch/maxwell/kern_bus_gm200.c @@ -0,0 +1,926 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "platform/chipset/chipset.h" +#include "mem_mgr/vaspace.h" +#include "gpu/gpu.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/bus/p2p_api.h" +#include "gpu/bif/kernel_bif.h" + +#include "published/maxwell/gm200/dev_nv_p2p.h" + +// Defines for PCIE P2P +#define PCIE_P2P_MAX_WRITE_MAILBOX_ADDR \ + ((PCIE_P2P_WRITE_MAILBOX_SIZE << DRF_SIZE(NV_P2P_WMBOX_ADDR_ADDR)) - \ + PCIE_P2P_WRITE_MAILBOX_SIZE) + +/*! + * @brief Setup the mailboxes of 2 GPUs so that the local GPU can access remote GPU. + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[in] local2Remote Local peer ID of pRemoteGpu on pLocalGpu + * @param[in] remote2Local Remote peer ID of pLocalGpu on pRemoteGpu + * + * @return void + */ +void +kbusSetupMailboxes_GM200 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 local2Remote, + NvU32 remote2Local +) +{ + PMEMORY_DESCRIPTOR *ppMemDesc = NULL; + RmPhysAddr localP2PDomainRemoteAddr; + RmPhysAddr remoteP2PDomainLocalAddr; + RmPhysAddr remoteWMBoxLocalAddr; + NvU64 remoteWMBoxAddrU64; + NvBool bNeedWarBug999673 = kbusNeedWarForBug999673_HAL(pGpu0, pKernelBus0, pGpu1) || + kbusNeedWarForBug999673_HAL(pGpu1, pKernelBus1, pGpu0); + RM_API *pRmApi0 = GPU_GET_PHYSICAL_RMAPI(pGpu0); + RM_API *pRmApi1 = GPU_GET_PHYSICAL_RMAPI(pGpu1); + NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS params0 = {0}; + NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS params1 = {0}; + NV_STATUS status; + + NV_ASSERT_OR_RETURN_VOID(local2Remote < P2P_MAX_NUM_PEERS); + NV_ASSERT_OR_RETURN_VOID(remote2Local < P2P_MAX_NUM_PEERS); + + // Ensure we have the correct bidirectional peer mapping + NV_ASSERT_OR_RETURN_VOID(pKernelBus1->p2pPcie.busPeer[remote2Local].remotePeerId == + local2Remote); + NV_ASSERT_OR_RETURN_VOID(pKernelBus0->p2pPcie.busPeer[local2Remote].remotePeerId == + remote2Local); + + ppMemDesc = &pKernelBus0->p2pPcie.busPeer[local2Remote].pRemoteWMBoxMemDesc; + remoteWMBoxLocalAddr = kbusSetupMailboxAccess_HAL(pGpu1, pKernelBus1, + pGpu0, remote2Local, + ppMemDesc); + NV_ASSERT_OR_RETURN_VOID(remoteWMBoxLocalAddr != ~0ULL); + + ppMemDesc = &pKernelBus1->p2pPcie.busPeer[remote2Local].pRemoteP2PDomMemDesc; + localP2PDomainRemoteAddr = kbusSetupP2PDomainAccess_HAL(pGpu0, + pKernelBus0, + pGpu1, + ppMemDesc); + NV_ASSERT_OR_RETURN_VOID(localP2PDomainRemoteAddr != ~0ULL); + + ppMemDesc = &pKernelBus0->p2pPcie.busPeer[local2Remote].pRemoteP2PDomMemDesc; + remoteP2PDomainLocalAddr = kbusSetupP2PDomainAccess_HAL(pGpu1, + pKernelBus1, + pGpu0, + ppMemDesc); + NV_ASSERT_OR_RETURN_VOID(remoteP2PDomainLocalAddr != ~0ULL); + + // Setup the local GPU to access remote GPU's FB. + + // 0. Set write mail box data window on remote visible GPU to be + // used by local GPU. + remoteWMBoxAddrU64 = pKernelBus1->p2pPcie.writeMailboxBar1Addr + + PCIE_P2P_WRITE_MAILBOX_SIZE * remote2Local; + + // Write mailbox data window needs to be 64KB aligned. + NV_ASSERT((remoteWMBoxAddrU64 & 0xFFFF) == 0); + + // Setup PCIE P2P Mailbox on local GPU + params0.local2Remote = local2Remote; + params0.remote2Local = remote2Local; + params0.localP2PDomainRemoteAddr = localP2PDomainRemoteAddr; + params0.remoteP2PDomainLocalAddr = remoteP2PDomainLocalAddr; + params0.remoteWMBoxLocalAddr = remoteWMBoxLocalAddr; + params0.p2pWmbTag = 0; + params0.bNeedWarBug999673 = bNeedWarBug999673; + + status = pRmApi0->Control(pRmApi0, + pGpu0->hInternalClient, + pGpu0->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL, + ¶ms0, + sizeof(NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_LOCAL_PARAMS)); + NV_ASSERT(status == NV_OK); + + // Setup PCIE P2P Mailbox on remote GPU + params1.local2Remote = local2Remote; + params1.remote2Local = remote2Local; + params1.localP2PDomainRemoteAddr = localP2PDomainRemoteAddr; + params1.remoteP2PDomainLocalAddr = remoteP2PDomainLocalAddr; + params1.remoteWMBoxAddrU64 = remoteWMBoxAddrU64; + params1.p2pWmbTag = params0.p2pWmbTag; + + status = pRmApi1->Control(pRmApi1, + pGpu1->hInternalClient, + pGpu1->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE, + ¶ms1, + sizeof(NV2080_CTRL_CMD_INTERNAL_BUS_SETUP_P2P_MAILBOX_REMOTE_PARAMS)); + NV_ASSERT(status == NV_OK); + + kbusWriteP2PWmbTag_HAL(pGpu1, pKernelBus1, remote2Local, params0.p2pWmbTag); +} + +void +kbusWriteP2PWmbTag_GM200 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 remote2Local, + NvU64 p2pWmbTag +) +{ + // See bug 3558208 comment 34 and 50 + GPU_REG_RD32(pGpu, NV_P2P_WREQMB_L(remote2Local)); + GPU_REG_WR32(pGpu, NV_P2P_WREQMB_L(remote2Local), NvU64_LO32(p2pWmbTag)); + GPU_REG_WR32(pGpu, NV_P2P_WREQMB_H(remote2Local), NvU64_HI32(p2pWmbTag)); +} + +RmPhysAddr +kbusSetupP2PDomainAccess_GM200 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + PMEMORY_DESCRIPTOR *ppP2PDomMemDesc +) +{ + return kbusSetupPeerBarAccess(pGpu0, pGpu1, + pGpu0->busInfo.gpuPhysAddr + DRF_BASE(NV_P2P), + DRF_SIZE(NV_P2P), ppP2PDomMemDesc); +} + +/*! + * @brief Creates a mapping for the remote peer to access its mailbox in + * the local GPU's BAR1 + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] local2Remote Peer ID (local to remote) + * @param[out] ppWMBoxMemDesc + * + */ +RmPhysAddr +kbusSetupMailboxAccess_GM200 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + NvU32 local2Remote, + PMEMORY_DESCRIPTOR *ppWMBoxMemDesc +) +{ + return kbusSetupPeerBarAccess(pGpu0, pGpu1, + gpumgrGetGpuPhysFbAddr(pGpu0) + + pKernelBus0->p2pPcie.writeMailboxBar1Addr + + PCIE_P2P_WRITE_MAILBOX_SIZE * local2Remote, + PCIE_P2P_WRITE_MAILBOX_SIZE, ppWMBoxMemDesc); +} + +void +kbusDestroyPeerAccess_GM200 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 peerNum +) +{ + if (pKernelBus->p2pPcie.busPeer[peerNum].pRemoteWMBoxMemDesc != NULL) + { + memdescDestroy(pKernelBus->p2pPcie.busPeer[peerNum].pRemoteWMBoxMemDesc); + pKernelBus->p2pPcie.busPeer[peerNum].pRemoteWMBoxMemDesc = NULL; + } + + if (pKernelBus->p2pPcie.busPeer[peerNum].pRemoteP2PDomMemDesc != NULL) + { + memdescDestroy(pKernelBus->p2pPcie.busPeer[peerNum].pRemoteP2PDomMemDesc); + pKernelBus->p2pPcie.busPeer[peerNum].pRemoteP2PDomMemDesc = NULL; + } +} + +/*! + * @brief Returns the P2P mailbox attributes such as size, aligment, max offset. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[out] pMailboxAreaSize NvU32 pointer. Can be NULL + * @param[out] pMailboxAlignment NvU32 pointer. Can be NULL + * @param[out] pMailboxMaxOffset64KB NvU32 pointer. Can be NULL + * + * Returns the P2P mailbox attributes such as: + * - pMailboxAreaSize: total size + * - pMailboxAlignment: aligment + * - pMailboxMaxOffset: max supported offset + * + * return void + */ +void +kbusGetP2PMailboxAttributes_GM200 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32* pMailboxAreaSize, + NvU32* pMailboxAlignmentSize, + NvU32* pMailboxBar1MaxOffset64KB +) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + + // Initialize null values by default + if (pMailboxAreaSize != NULL) + { + *pMailboxAreaSize = 0; + } + if (pMailboxAlignmentSize != NULL) + { + *pMailboxAlignmentSize = 0; + } + if (pMailboxBar1MaxOffset64KB != NULL) + { + *pMailboxBar1MaxOffset64KB = 0; + } + + if (pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED) && + pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_WRITES_DISABLED)) + { + // Return null values + return; + } + + // Retrieve attributes + if (pMailboxAreaSize != NULL) + { + *pMailboxAreaSize = PCIE_P2P_WRITE_MAILBOX_SIZE * P2P_MAX_NUM_PEERS; + } + + if (pMailboxAlignmentSize != NULL) + { + // Write mailbox data window needs to be 64KB aligned. + *pMailboxAlignmentSize = 0x10000; + } + + if (pMailboxBar1MaxOffset64KB != NULL) + { + // Max offset, exclusive + *pMailboxBar1MaxOffset64KB = + NvU64_LO32( + (PCIE_P2P_MAX_WRITE_MAILBOX_ADDR + PCIE_P2P_WRITE_MAILBOX_SIZE) >> 16 + ); + } + + return; +} + +/*! + * @brief Create PCIE Mailbox P2P mapping between 2 GPUs + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[out] peer0 NvU32 pointer, peerId on pGpu0 + * @param[out] peer1 NvU32 pointer, peerId on pGpu1 + * @param[in] attributes Sepcial attributes for the mapping + * + * @return NV_STATUS + */ +NV_STATUS +kbusCreateP2PMapping_GM200 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 *peer0, + NvU32 *peer1, + NvU32 attributes +) +{ + if (FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, _PCIE, attributes)) + { + return kbusCreateP2PMappingForMailbox_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes); + } + + NV_PRINTF(LEVEL_ERROR, "P2P type %d is not supported\n", DRF_VAL(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, attributes)); + + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Create PCIE (not NVLINK) P2P mapping between 2 GPUs + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[out] peer0 Peer ID (local to remote) + * @param[out] peer1 Peer ID (remote to local) + * @param[in] attributes Unused + * + * @return NV_STATUS + */ +NV_STATUS +kbusCreateP2PMappingForMailbox_GM200 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 *peer0, + NvU32 *peer1, + NvU32 attributes +) +{ + RM_API *pRmApi; + NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS params; + NvU32 gpuInst0, gpuInst1; + + if (peer0 == NULL || peer1 == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + gpuInst0 = gpuGetInstance(pGpu0); + gpuInst1 = gpuGetInstance(pGpu1); + + // Is a specific peer ID mapping requested? + if ((*peer0 != BUS_INVALID_PEER) && (*peer1 != BUS_INVALID_PEER)) + { + NV_ASSERT_OR_RETURN(*peer0 < P2P_MAX_NUM_PEERS, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(*peer1 < P2P_MAX_NUM_PEERS, NV_ERR_INVALID_ARGUMENT); + // + // Ensure that if the requested peer ID is already in use, it + // corresponds to the requested remote GPU. + // + if (!pKernelBus0->p2pPcie.busPeer[*peer0].bReserved && + !pKernelBus1->p2pPcie.busPeer[*peer1].bReserved) + { + if ((pKernelBus0->p2pPcie.busPeer[*peer0].refCount == 0) && + (pKernelBus1->p2pPcie.busPeer[*peer1].refCount == 0)) + { + goto busCreateP2PMapping_setupMapping; + } + + if (((pKernelBus0->p2pPcie.peerNumberMask[gpuInst1] & NVBIT(*peer0)) != 0) && + ((pKernelBus1->p2pPcie.peerNumberMask[gpuInst0] & NVBIT(*peer1)) != 0)) + { + pKernelBus0->p2pPcie.busPeer[*peer0].refCount++; + pKernelBus1->p2pPcie.busPeer[*peer1].refCount++; + + NV_ASSERT(pKernelBus0->p2pPcie.busPeer[*peer0].remotePeerId == *peer1); + NV_ASSERT(pKernelBus1->p2pPcie.busPeer[*peer1].remotePeerId == *peer0); + + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu0); + portMemSet(¶ms, 0, sizeof(params)); + params.programPciePeerMask = NVBIT32(*peer0); + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, + pGpu0->hInternalClient, + pGpu0->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG, + ¶ms, + sizeof(params))); + + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu1); + portMemSet(¶ms, 0, sizeof(params)); + params.programPciePeerMask = NVBIT32(*peer1); + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, + pGpu1->hInternalClient, + pGpu1->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG, + ¶ms, + sizeof(params))); + + return NV_OK; + } + } + + NV_PRINTF(LEVEL_WARNING, + "explicit peer IDs %u and %u requested for GPU%u and GPU%u are not " + "available, will assign dynamically\n", *peer0, *peer1, + gpuInst0, gpuInst1); + } + + // Does a mapping already exist between these GPUs? + if ((pKernelBus0->p2pPcie.peerNumberMask[gpuInst1] != 0) && + (pKernelBus1->p2pPcie.peerNumberMask[gpuInst0] != 0)) + { + *peer0 = pKernelBus0->p2pPcie.peerNumberMask[gpuInst1]; + LOWESTBITIDX_32(*peer0); + NV_ASSERT_OR_RETURN(*peer0 < P2P_MAX_NUM_PEERS, + NV_ERR_INVALID_STATE); + + *peer1 = pKernelBus0->p2pPcie.busPeer[*peer0].remotePeerId; + NV_ASSERT_OR_RETURN(*peer1 < P2P_MAX_NUM_PEERS, + NV_ERR_INVALID_STATE); + + NV_ASSERT_OR_RETURN(pKernelBus1->p2pPcie.busPeer[*peer1].remotePeerId == *peer0, + NV_ERR_INVALID_STATE); + + pKernelBus0->p2pPcie.busPeer[*peer0].refCount++; + pKernelBus1->p2pPcie.busPeer[*peer1].refCount++; + + NV_ASSERT(!pKernelBus0->p2pPcie.busPeer[*peer0].bReserved); + NV_ASSERT(!pKernelBus1->p2pPcie.busPeer[*peer1].bReserved); + + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu0); + portMemSet(¶ms, 0, sizeof(params)); + params.programPciePeerMask = NVBIT32(*peer0); + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, + pGpu0->hInternalClient, + pGpu0->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG, + ¶ms, + sizeof(params))); + + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu1); + portMemSet(¶ms, 0, sizeof(params)); + params.programPciePeerMask = NVBIT32(*peer1); + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, + pGpu1->hInternalClient, + pGpu1->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG, + ¶ms, + sizeof(params))); + + return NV_OK; + } + + // We'd better not hit this case (one gpu has mapping and other doesn't). + NV_ASSERT((pKernelBus0->p2pPcie.peerNumberMask[gpuInst1] == 0) && + (pKernelBus1->p2pPcie.peerNumberMask[gpuInst0] == 0)); + + *peer0 = BUS_INVALID_PEER; + *peer1 = BUS_INVALID_PEER; + + // If we're in loopback mode, check for specified peer ID + if ((pGpu0 == pGpu1) && pKernelBus0->p2pMapSpecifyId) + { + if ((pKernelBus0->p2pPcie.busPeer[pKernelBus0->p2pMapPeerId].refCount == 0) && + (!pKernelBus0->p2pPcie.busPeer[pKernelBus0->p2pMapPeerId].bReserved) && + (pKernelBus1->p2pPcie.busPeer[pKernelBus1->p2pMapPeerId].refCount == 0)) + { + *peer0 = *peer1 = pKernelBus0->p2pMapPeerId; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "- ERROR: Peer ID %d is already in use. Default RM P2P mapping will be used.\n", + pKernelBus0->p2pMapPeerId); + } + } + + // + // These loops will handle loop back (pGpu0 == pGpu1) since they will find + // the same free peer twice on the same GPU. + // + if (*peer0 == BUS_INVALID_PEER) + { + *peer0 = kbusGetUnusedPciePeerId_HAL(pGpu0, pKernelBus0); + } + + if (*peer1 == BUS_INVALID_PEER) + { + *peer1 = kbusGetUnusedPciePeerId_HAL(pGpu1, pKernelBus1); + } + + // couldn't find an available peer on both gpus + if (*peer0 == BUS_INVALID_PEER || + *peer1 == BUS_INVALID_PEER) + { + *peer0 = BUS_INVALID_PEER; + *peer1 = BUS_INVALID_PEER; + NV_PRINTF(LEVEL_ERROR, "no peer IDs available\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + +busCreateP2PMapping_setupMapping: + pKernelBus0->p2pPcie.busPeer[*peer0].remotePeerId = *peer1; + pKernelBus0->p2pPcie.peerNumberMask[gpuInst1] |= NVBIT(*peer0); + pKernelBus1->p2pPcie.busPeer[*peer1].remotePeerId = *peer0; + pKernelBus1->p2pPcie.peerNumberMask[gpuInst0] |= NVBIT(*peer1); + + NV_ASSERT(pKernelBus0->p2pPcie.busPeer[*peer0].refCount == 0); + NV_ASSERT(!pKernelBus0->p2pPcie.busPeer[*peer0].bReserved); + NV_ASSERT(pKernelBus1->p2pPcie.busPeer[*peer1].refCount == 0); + NV_ASSERT(!pKernelBus1->p2pPcie.busPeer[*peer1].bReserved); + + // + // Note if this is loopback we will have a refCount of 2. This will be + // accounted for in the free. + // + pKernelBus0->p2pPcie.busPeer[*peer0].refCount++; + pKernelBus1->p2pPcie.busPeer[*peer1].refCount++; + + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu0); + portMemSet(¶ms, 0, sizeof(params)); + params.programPciePeerMask = NVBIT32(*peer0); + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, + pGpu0->hInternalClient, + pGpu0->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG, + ¶ms, + sizeof(params))); + + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu1); + portMemSet(¶ms, 0, sizeof(params)); + params.programPciePeerMask = NVBIT32(*peer1); + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, + pGpu1->hInternalClient, + pGpu1->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG, + ¶ms, + sizeof(params))); + + NV_PRINTF(LEVEL_INFO, + "added PCIe P2P mapping between GPU%u (peer %u) and GPU%u (peer %u)\n", + gpuInst0, *peer0, gpuInst1, *peer1); + + kbusSetupMailboxes_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, *peer0, *peer1); + kbusSetupMailboxes_HAL(pGpu1, pKernelBus1, pGpu0, pKernelBus0, *peer1, *peer0); + + return NV_OK; +} + +/*! + * Does it need P2P WAR for bug 999673? + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pRemoteGpu + * + * @return NvBool + * + */ +NvBool +kbusNeedWarForBug999673_GM200 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + OBJGPU *pRemoteGpu +) +{ + OBJCL *pCl = SYS_GET_CL(SYS_GET_INSTANCE()); + NvU8 pciSwitchBus = 0; + + // Return if WAR is not needed + if (!pCl->getProperty(pCl, PDB_PROP_CL_BUG_999673_P2P_ARBITRARY_SPLIT_WAR)) + { + return NV_FALSE; + } + + // See if we have a known bridge + clFindCommonDownstreamBR(pGpu, pRemoteGpu, pCl, &pciSwitchBus); + if (pciSwitchBus != 0xFF) + { + // P2P does not go through the chipset needing the WAR. + return NV_FALSE; + } + + return NV_TRUE; +} + +/*! + * @brief Remove the P2P mapping to a given peer GPU + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[in] peer0 Peer ID (local to remote) + * @param[in] peer1 Peer ID (remote to local) + * @param[in] attributes Sepcial attributes for the mapping + * + * return NV_OK on success + */ +NV_STATUS +kbusRemoveP2PMapping_GM200 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 peer0, + NvU32 peer1, + NvU32 attributes +) +{ + if (FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, _PCIE, attributes)) + { + return kbusRemoveP2PMappingForMailbox_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes); + } + + NV_PRINTF(LEVEL_ERROR, "P2P type %d is not supported\n", DRF_VAL(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, attributes)); + + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Create a P2P mapping to a given peer GPU + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[out] peer0 Peer ID (local to remote) + * @param[out] peer1 Peer ID (remote to local) + * @param[in] attributes Sepcial attributes for the mapping + * + * return NV_OK on success + */ +NV_STATUS +kbusRemoveP2PMappingForMailbox_GM200 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 peer0, + NvU32 peer1, + NvU32 attributes +) +{ + NvU32 gpuInst0 = gpuGetInstance(pGpu0); + NvU32 gpuInst1 = gpuGetInstance(pGpu1); + + // a non-existent mapping + if(peer0 == BUS_INVALID_PEER || + peer1 == BUS_INVALID_PEER) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Do the peer IDs correspond to the expected GPUs? + if (((pKernelBus0->p2pPcie.peerNumberMask[gpuInst1] & NVBIT(peer0)) == 0) || + ((pKernelBus1->p2pPcie.peerNumberMask[gpuInst0] & NVBIT(peer1)) == 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // a programming error somewhere in RM. + // A mapping exists with a refCount == 0 + // + if (pKernelBus0->p2pPcie.busPeer[peer0].refCount == 0 || + pKernelBus1->p2pPcie.busPeer[peer1].refCount == 0) + { + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + + // + // Again a programming error. The mapping should have the same refCount on + // both GPUs. + // + if (pKernelBus0->p2pPcie.busPeer[peer0].refCount != + pKernelBus1->p2pPcie.busPeer[peer1].refCount) + { + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + + + --pKernelBus1->p2pPcie.busPeer[peer1].refCount; + if (--pKernelBus0->p2pPcie.busPeer[peer0].refCount == 0) + { + NV_PRINTF(LEVEL_INFO, + "Removing mapping GPU %d Peer %d <-> GPU %d Peer %d\n", + gpuInst0, peer0, gpuInst1, peer1); + + pKernelBus0->p2pPcie.peerNumberMask[gpuInst1] &= ~NVBIT(peer0); + pKernelBus1->p2pPcie.peerNumberMask[gpuInst0] &= ~NVBIT(peer1); + + kbusDestroyMailbox(pGpu0, pKernelBus0, pGpu1, peer0); + kbusDestroyMailbox(pGpu1, pKernelBus1, pGpu0, peer1); + } + else + { + NV_PRINTF(LEVEL_INFO, + "Decremented refCount for Mapping GPU %d Peer %d <-> GPU %d Peer %d " + "New Count: %d\n", gpuInst0, peer0, gpuInst1, peer1, + pKernelBus0->p2pPcie.busPeer[peer0].refCount); + } + + return NV_OK; +} + +/*! + * @brief Reserve peer IDs for nvlink usage + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] peerMask Mask of peer IDs to reserve + * + * return NV_OK on success + */ +NV_STATUS +kbusReserveP2PPeerIds_GM200 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 peerMask +) +{ + NvU32 peerId = 0; + + FOR_EACH_INDEX_IN_MASK(32, peerId, peerMask) + { + NV_PRINTF(LEVEL_INFO, + "reserving peer ID %u on GPU%u for NVLINK/C2C use\n", peerId, + gpuGetInstance(pGpu)); + if (pKernelBus->p2pPcie.busPeer[peerId].refCount != 0) + { + return NV_ERR_IN_USE; + } + + pKernelBus->p2pPcie.busPeer[peerId].bReserved = NV_TRUE; + } + FOR_EACH_INDEX_IN_MASK_END; + + return NV_OK; +} + +/*! + * @brief Sets the BAR1 P2P mailbox address and size + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] mailboxBar1Addr NvU64 + * @param[in] mailboxTotalSize NvU32 + * + * @returns NV_STATUS + */ +NV_STATUS +kbusSetP2PMailboxBar1Area_GM200 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU64 mailboxBar1Addr, + NvU32 mailboxTotalSize +) +{ + NvU32 mailboxAreaSizeReq; + NvU32 mailboxAlignmentSizeReq; + NvU32 mailboxBar1MaxOffset64KBReq; + + + if (!kbusIsP2pMailboxClientAllocated(pKernelBus)) + { + // P2P mailbox area already allocated by RM. Nothing to do. + return NV_OK; + } + + if (mailboxTotalSize == 0) + { + NV_PRINTF(LEVEL_ERROR, "P2P mailbox area size is not set\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + kbusGetP2PMailboxAttributes_HAL(pGpu, pKernelBus, &mailboxAreaSizeReq, &mailboxAlignmentSizeReq, &mailboxBar1MaxOffset64KBReq); + + // Mailbox size + NV_ASSERT_OR_RETURN(mailboxTotalSize == mailboxAreaSizeReq, NV_ERR_INVALID_ARGUMENT); + // Mailbox alignment + NV_ASSERT_OR_RETURN((mailboxBar1Addr & (mailboxAlignmentSizeReq - 1)) == 0, NV_ERR_INVALID_ARGUMENT); + // Mailbox offset limit + NV_ASSERT_OR_RETURN((mailboxBar1Addr + mailboxTotalSize) < (((NvU64)mailboxBar1MaxOffset64KBReq) << 16), + NV_ERR_INVALID_ARGUMENT); + + if (pKernelBus->p2pPcie.writeMailboxBar1Addr != PCIE_P2P_INVALID_WRITE_MAILBOX_ADDR) + { + NV_ASSERT_OR_RETURN(mailboxBar1Addr == pKernelBus->p2pPcie.writeMailboxBar1Addr, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(mailboxTotalSize == pKernelBus->p2pPcie.writeMailboxTotalSize, NV_ERR_INVALID_ARGUMENT); + return NV_OK; + } + + pKernelBus->p2pPcie.writeMailboxBar1Addr = mailboxBar1Addr; + pKernelBus->p2pPcie.writeMailboxTotalSize = mailboxTotalSize; + pKernelBus->bP2pInitialized = NV_TRUE; + + return NV_OK; +} + + +/*! + * @brief Unset the BAR1 P2P mailbox address and size + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns void + */ +void +kbusUnsetP2PMailboxBar1Area_GM200 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NvU32 i; + + if (!kbusIsP2pMailboxClientAllocated(pKernelBus)) + { + // P2P mailbox area already allocated by RM. Nothing to do. + return; + } + + for (i = 0; i < P2P_MAX_NUM_PEERS; ++i) + { + if (pKernelBus->p2pPcie.busPeer[i].refCount) + break; + } + + if (i == P2P_MAX_NUM_PEERS) + { + pKernelBus->p2pPcie.writeMailboxBar1Addr = PCIE_P2P_INVALID_WRITE_MAILBOX_ADDR; + pKernelBus->p2pPcie.writeMailboxTotalSize = 0; + pKernelBus->bP2pInitialized = NV_FALSE; + } + + return; +} + +NV_STATUS +kbusAllocP2PMailboxBar1_GM200 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 gfid, + NvU64 vaRangeMax +) +{ + OBJGPU *pParentGpu; + NvU64 vaAllocMax; + NV_STATUS status = NV_OK; + + VAS_ALLOC_FLAGS flags = {0}; + + pParentGpu = gpumgrGetParentGPU(pGpu); + + if (!gpumgrIsParentGPU(pGpu)) + { + flags.bFixedAddressAllocate = NV_TRUE; + pKernelBus->p2pPcie.writeMailboxBar1Addr = GPU_GET_KERNEL_BUS(pParentGpu)->p2pPcie.writeMailboxBar1Addr; + } + + pKernelBus->p2pPcie.writeMailboxTotalSize = + PCIE_P2P_WRITE_MAILBOX_SIZE * P2P_MAX_NUM_PEERS; + vaAllocMax = NV_MIN(vaRangeMax, + PCIE_P2P_MAX_WRITE_MAILBOX_ADDR + PCIE_P2P_WRITE_MAILBOX_SIZE - 1); + + status = vaspaceAlloc(pKernelBus->bar1[gfid].pVAS, + pKernelBus->p2pPcie.writeMailboxTotalSize, + PCIE_P2P_WRITE_MAILBOX_SIZE, + 0, vaAllocMax, + 0, + flags, + &pKernelBus->p2pPcie.writeMailboxBar1Addr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "cannot allocate vaspace for P2P write mailboxes (0x%x)\n", + status); + goto kbusAllocP2PMailboxBar1_failed; + } + + NV_ASSERT(GPU_GET_KERNEL_BUS(pParentGpu)->p2pPcie.writeMailboxBar1Addr == pKernelBus->p2pPcie.writeMailboxBar1Addr); + + NV_PRINTF(LEVEL_INFO, + "[GPU%u] P2P write mailboxes allocated at BAR1 addr = 0x%llx\n", + gpuGetInstance(pGpu), pKernelBus->p2pPcie.writeMailboxBar1Addr); + +kbusAllocP2PMailboxBar1_failed: + if (status != NV_OK) + { + pKernelBus->p2pPcie.writeMailboxBar1Addr = PCIE_P2P_INVALID_WRITE_MAILBOX_ADDR; + pKernelBus->p2pPcie.writeMailboxTotalSize = 0; + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/bus/arch/pascal/kern_bus_gp100.c b/src/nvidia/src/kernel/gpu/bus/arch/pascal/kern_bus_gp100.c new file mode 100644 index 000000000..d1bbeccae --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/arch/pascal/kern_bus_gp100.c @@ -0,0 +1,752 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "mem_mgr/vaspace.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/bus/p2p_api.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" + +/*! + * @brief Create a P2P mapping to a given peer GPU + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[out] peer0 Peer ID (local to remote) + * @param[out] peer1 Peer ID (remote to local) + * @param[in] attributes Sepcial attributes for the mapping + * + * return NV_OK on success + */ +NV_STATUS +kbusCreateP2PMapping_GP100 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 *peer0, + NvU32 *peer1, + NvU32 attributes +) +{ + if (FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, _NVLINK, attributes) || + FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, _NVLINK_INDIRECT, attributes)) + { + return kbusCreateP2PMappingForNvlink_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes); + } + + if (FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, _PCIE, attributes)) + { + return kbusCreateP2PMappingForMailbox_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes); + } + + NV_PRINTF(LEVEL_ERROR, "P2P type %d is not supported\n", DRF_VAL(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, attributes)); + + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Create a P2P mapping to a given peer GPU + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[out] peer0 Peer ID (local to remote) + * @param[out] peer1 Peer ID (remote to local) + * @param[in] attributes Sepcial attributes for the mapping + * + * return NV_OK on success + */ +NV_STATUS +kbusCreateP2PMappingForNvlink_GP100 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 *peer0, + NvU32 *peer1, + NvU32 attributes +) +{ + NvU32 gpu0Instance = gpuGetInstance(pGpu0); + NvU32 gpu1Instance = gpuGetInstance(pGpu1); + NvBool bLoopback = (pGpu0 == pGpu1); + KernelNvlink *pKernelNvlink0 = GPU_GET_KERNEL_NVLINK(pGpu0); + KernelNvlink *pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + NV_STATUS status = NV_OK; + NvU32 nvlinkPeer0 = BUS_INVALID_PEER; + NvU32 nvlinkPeer1 = BUS_INVALID_PEER; + + NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS params; + + if (peer0 == NULL || peer1 == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Get the peer ID pGpu0 should use for P2P over NVLINK to pGpu1 + if ((status = kbusGetNvlinkP2PPeerId_HAL(pGpu0, pKernelBus0, + pGpu1, pKernelBus1, + &nvlinkPeer0)) != NV_OK) + { + return status; + } + + // Get the peer ID pGpu1 should use for P2P over NVLINK to pGpu0 + if ((status = kbusGetNvlinkP2PPeerId_HAL(pGpu1, pKernelBus1, + pGpu0, pKernelBus0, + &nvlinkPeer1)) != NV_OK) + { + return status; + } + + if ((pKernelNvlink0 == NULL) || (pKernelNvlink1 == NULL) || + (nvlinkPeer0 == BUS_INVALID_PEER) || (nvlinkPeer1 == BUS_INVALID_PEER)) + { + return NV_ERR_INVALID_REQUEST; + } + + // Set the default RM mapping if peer id's are not explicitly provided + if (*peer0 == BUS_INVALID_PEER || *peer1 == BUS_INVALID_PEER) + { + if (bLoopback) + { + if (pKernelBus0->p2pMapSpecifyId) + { + *peer0 = *peer1 = pKernelBus0->p2pMapPeerId; + } + else + { + // If no static mapping is found, set peer id as 0 for loopback + *peer0 = *peer1 = 0; + } + } + else + { + *peer0 = nvlinkPeer0; + *peer1 = nvlinkPeer1; + } + + NV_PRINTF(LEVEL_INFO, "- P2P: Using Default RM mapping for P2P.\n"); + } + + // + // Does the mapping already exist between the given pair of GPUs using the peerIDs + // *peer0 and *peer1 respectively ? + // + if ((pKernelBus0->p2p.busNvlinkPeerNumberMask[gpu1Instance] & NVBIT(*peer0)) && + (pKernelBus1->p2p.busNvlinkPeerNumberMask[gpu0Instance] & NVBIT(*peer1))) + { + // + // Increment the mapping refcount per peerID - since there is another usage + // of a mapping that is using this peerID + // + pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[*peer0]++; + pKernelBus1->p2p.busNvlinkMappingRefcountPerPeerId[*peer1]++; + + // + // Increment the mapping refcount per GPU - since there is another usage of + // the mapping to the given remote GPU + // + pKernelBus0->p2p.busNvlinkMappingRefcountPerGpu[gpu1Instance]++; + pKernelBus1->p2p.busNvlinkMappingRefcountPerGpu[gpu0Instance]++; + + if (FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _SPA, attributes)) + { + pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[*peer0]++; + pKernelBus1->p2p.busNvlinkMappingRefcountPerPeerId[*peer1]++; + } + + NV_PRINTF(LEVEL_INFO, + "- P2P: Peer mapping is already in use for gpu instances %x and %x " + "with peer id's %d and %d. Increasing the mapping refcounts for the" + " peer IDs to %d and %d respectively.\n", + gpu0Instance, gpu1Instance, *peer0, *peer1, + pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[*peer0], + pKernelBus1->p2p.busNvlinkMappingRefcountPerPeerId[*peer1]); + + return NV_OK; + } + + // + // Reached here implies the mapping between the given pair of GPUs using the peerIDs + // *peer0 and *peer1 does not exist. Create the mapping + // + + // If we're in loopback mode check for specified peer ID arg from RM or MODS + if (bLoopback && pKernelBus0->p2pMapSpecifyId) + { + if ((pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[pKernelBus0->p2pMapPeerId] == 0) && + (pKernelBus1->p2p.busNvlinkMappingRefcountPerPeerId[pKernelBus1->p2pMapPeerId] == 0)) + { + *peer0 = *peer1 = pKernelBus0->p2pMapPeerId; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "- ERROR: Peer ID %d is already in use. Default RM P2P mapping " + "will be used for loopback connection.\n", + pKernelBus0->p2pMapPeerId); + } + } + + // Set the peer IDs in the corresponding peer number masks + pKernelBus0->p2p.busNvlinkPeerNumberMask[gpu1Instance] |= NVBIT(*peer0); + pKernelBus1->p2p.busNvlinkPeerNumberMask[gpu0Instance] |= NVBIT(*peer1); + + // + // Increment the mapping refcount per peerID - since there is a new mapping that + // will use this peerID + // + pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[*peer0]++; + pKernelBus1->p2p.busNvlinkMappingRefcountPerPeerId[*peer1]++; + + if (FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _SPA, attributes)) + { + pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[*peer0]++; + pKernelBus1->p2p.busNvlinkMappingRefcountPerPeerId[*peer1]++; + } + + // + // Increment the mapping refcount per GPU - since there a new mapping now to the + // given remote GPU + // + pKernelBus0->p2p.busNvlinkMappingRefcountPerGpu[gpu1Instance]++; + pKernelBus1->p2p.busNvlinkMappingRefcountPerGpu[gpu0Instance]++; + + NV_PRINTF(LEVEL_INFO, + "added NVLink P2P mapping between GPU%u (peer %u) and GPU%u (peer %u)\n", + gpu0Instance, *peer0, gpu1Instance, *peer1); + + portMemSet(¶ms, 0, sizeof(params)); + params.peerMask = NVBIT(*peer0); + params.bEnable = NV_TRUE; + + // Set the NVLink USE_NVLINK_PEER fields in the LTCS registers for GPU0 + status = knvlinkExecGspRmRpc(pGpu0, pKernelNvlink0, + NV2080_CTRL_CMD_NVLINK_ENABLE_NVLINK_PEER, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d Failed to ENABLE USE_NVLINK_PEER for peer%d\n", + gpuGetInstance(pGpu0), *peer0); + + return status; + } + + portMemSet(¶ms, 0, sizeof(params)); + params.peerMask = NVBIT(*peer1); + params.bEnable = NV_TRUE; + + // Set the NVLink USE_NVLINK_PEER fields in the LTCS registers for GPU1 + status = knvlinkExecGspRmRpc(pGpu1, pKernelNvlink1, + NV2080_CTRL_CMD_NVLINK_ENABLE_NVLINK_PEER, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d Failed to ENABLE USE_NVLINK_PEER for peer%d\n", + gpuGetInstance(pGpu1), *peer1); + + return status; + } + + // Enable the peer configuration in the HSHUB config registers + knvlinkSetupPeerMapping_HAL(pGpu0, pKernelNvlink0, pGpu1, *peer0); + knvlinkSetupPeerMapping_HAL(pGpu1, pKernelNvlink1, pGpu0, *peer1); + + return NV_OK; +} + +/*! + * @brief Remove NVLink mapping to a given peer GPU + * + * @param[in] pGpu0 (Local) + * @param[in] pKernelBus0 (Local) + * @param[in] pGpu1 (Remote) + * @param[in] peerId peerID + * + * return NV_OK on success + */ +NV_STATUS +kbusRemoveNvlinkPeerMapping_GP100 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + NvU32 peerId, + NvU32 attributes +) +{ + NV_STATUS status = NV_OK; + NvU32 peerGpuInst = gpuGetInstance(pGpu1); + + // If no peer mapping exists between the GPUs, return NV_WARN_NOTHING_TO_DO + if ((pKernelBus0->p2p.busNvlinkPeerNumberMask[peerGpuInst] & NVBIT(peerId)) == 0) + { + return NV_WARN_NOTHING_TO_DO; + } + + // A programming error somewhere in RM: mapping exists with a zero refcount + if ((pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[peerId] == 0) || + (pKernelBus0->p2p.busNvlinkMappingRefcountPerGpu[peerGpuInst] == 0)) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + // Decrement the mapping refcount associated with the peerID + pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[peerId]--; + + // Decrement the mapping refcount for the given remote GPU1 + pKernelBus0->p2p.busNvlinkMappingRefcountPerGpu[peerGpuInst]--; + + if (FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _SPA, attributes)) + { + pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[peerId]--; + } + + // + // If mapping refcount to remote GPU1 is 0, this implies the peerID is no + // longer used for P2P from GPU0 to GPU1. Update busNvlinkPeerNumberMask + // + if (pKernelBus0->p2p.busNvlinkMappingRefcountPerGpu[peerGpuInst] == 0) + { + NV_PRINTF(LEVEL_INFO, + "Removing mapping for GPU%u peer %u (GPU%u)\n", + gpuGetInstance(pGpu0), peerId, peerGpuInst); + + pKernelBus0->p2p.busNvlinkPeerNumberMask[peerGpuInst] &= ~NVBIT(peerId); + } + + // + // Can the peerID be freed? The peer ID can *only* be freed if it is not being + // used for P2P to any GPU. Check the mapping refcount for the given peerID + // + if (pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[peerId] == 0) + { + KernelNvlink *pKernelNvlink0 = GPU_GET_KERNEL_NVLINK(pGpu0); + + NV_ASSERT_OR_RETURN(pKernelNvlink0 != NULL, NV_ERR_NOT_SUPPORTED); + NV_ASSERT(pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[peerId] == 0); + NV_PRINTF(LEVEL_INFO, + "PeerID %u is not being used for P2P from GPU%d to any other " + "remote GPU. Can be freed\n", + peerId, gpuGetInstance(pGpu0)); + + // Before removing the NVLink peer mapping in HSHUB flush both ends + kbusFlush_HAL(pGpu0, pKernelBus0, BUS_FLUSH_VIDEO_MEMORY); + kbusFlush_HAL(pGpu1, GPU_GET_KERNEL_BUS(pGpu1), BUS_FLUSH_VIDEO_MEMORY); + + NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + params.peerMask = NVBIT(peerId); + params.bEnable = NV_FALSE; + + // Unset the NVLink USE_NVLINK_PEER fields in the LTCS registers for GPU0 + status = knvlinkExecGspRmRpc(pGpu0, pKernelNvlink0, + NV2080_CTRL_CMD_NVLINK_ENABLE_NVLINK_PEER, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d Failed to UNSET USE_NVLINK_PEER for peer%d\n", + gpuGetInstance(pGpu0), peerId); + + return status; + } + + // Disable the peer configuration in the HSHUB config registers + if ((pKernelNvlink0->getProperty(pKernelNvlink0, + PDB_PROP_KNVLINK_DECONFIG_HSHUB_ON_NO_MAPPING)) && + (!knvlinkIsForcedConfig(pGpu0, pKernelNvlink0))) + { + status = knvlinkRemoveMapping_HAL(pGpu0, pKernelNvlink0, NV_FALSE, NVBIT(peerId), + NV_FALSE /* bL2Entry */); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d Failed to remove hshub mapping for peer%d\n", + gpuGetInstance(pGpu0), peerId); + + return status; + } + } + + // Call knvlinkUpdateCurrentConfig to flush settings to the registers + status = knvlinkUpdateCurrentConfig(pGpu0, pKernelNvlink0); + } + + return status; +} + +/*! + * @brief Remove the P2P mapping to a given peer GPU + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[in] peer0 Peer ID (local to remote) + * @param[in] peer1 Peer ID (remote to local) + * @param[in] attributes Sepcial attributes for the mapping + * + * return NV_OK on success + */ +NV_STATUS +kbusRemoveP2PMapping_GP100 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 peer0, + NvU32 peer1, + NvU32 attributes +) +{ + if (FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, _NVLINK, attributes) || + FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, _NVLINK_INDIRECT, attributes)) + { + return kbusRemoveP2PMappingForNvlink_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes); + } + + if (FLD_TEST_DRF(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, _PCIE, attributes)) + { + return kbusRemoveP2PMappingForMailbox_HAL(pGpu0, pKernelBus0, pGpu1, pKernelBus1, peer0, peer1, attributes); + } + + NV_PRINTF(LEVEL_ERROR, "P2P type %d is not supported\n", DRF_VAL(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, attributes)); + + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Remove the P2P mapping to a given peer GPU + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[in] peer0 Peer ID (local to remote) + * @param[in] peer1 Peer ID (remote to local) + * @param[in] attributes Sepcial attributes for the mapping + */ +NV_STATUS +kbusRemoveP2PMappingForNvlink_GP100 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 peer0, + NvU32 peer1, + NvU32 attributes +) +{ + KernelNvlink *pKernelNvlink0 = GPU_GET_KERNEL_NVLINK(pGpu0); + KernelNvlink *pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + NV_STATUS status = NV_OK; + + // If there's no NVLink mapping, fall back to PCIe + if ((pKernelNvlink0 == NULL) || (pKernelNvlink1 == NULL) || + ((pKernelBus0->p2p.busNvlinkPeerNumberMask[pGpu1->gpuInstance] & NVBIT(peer0)) == 0) || + ((pKernelBus1->p2p.busNvlinkPeerNumberMask[pGpu0->gpuInstance] & NVBIT(peer1)) == 0)) + { + return NV_ERR_INVALID_STATE; + } + + // NVLink mapping exists, remove the NVLink mapping + NV_ASSERT_OK_OR_RETURN(kbusRemoveNvlinkPeerMapping_HAL(pGpu0, pKernelBus0, pGpu1, peer0, attributes)); + NV_ASSERT_OK_OR_RETURN(kbusRemoveNvlinkPeerMapping_HAL(pGpu1, pKernelBus1, pGpu0, peer1, attributes)); + + // + // The P2P mapping for both the GPUs have been destroyed. If the mapping refcount + // for the given peer IDs is zero, then unreserve the peer IDs + // + + if ((pKernelBus0->p2p.busNvlinkMappingRefcountPerPeerId[peer0] == 0) && + pKernelNvlink0->getProperty(pKernelNvlink0, PDB_PROP_KNVLINK_DECONFIG_HSHUB_ON_NO_MAPPING)) + { + // Free the reserved peer ID since its no longer used + status = kbusUnreserveP2PPeerIds_HAL(pGpu0, pKernelBus0, NVBIT(peer0)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d: Failed to unreserve peer ID mask 0x%x\n", + pGpu0->gpuInstance, NVBIT(peer0)); + return status; + } + } + + if ((pKernelBus1->p2p.busNvlinkMappingRefcountPerPeerId[peer1] == 0) && + pKernelNvlink1->getProperty(pKernelNvlink1, PDB_PROP_KNVLINK_DECONFIG_HSHUB_ON_NO_MAPPING)) + { + // Free the reserved peer ID since its no longer used + status = kbusUnreserveP2PPeerIds_HAL(pGpu1, pKernelBus1, NVBIT(peer1)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d: Failed to unreserve peer ID mask 0x%x\n", + pGpu1->gpuInstance, NVBIT(peer1)); + return status; + } + } + + return status; +} + +/*! + * @brief Returns the peer number from pGpu (Local) to pGpuPeer + * + * @param[in] pGpu Local + * @param[in] pKernelBus Local + * @param[in] pGpuPeer Remote + * + * @returns NvU32 bus peer number + */ +NvU32 +kbusGetPeerId_GP100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + OBJGPU *pGpuPeer +) +{ + extern NvU32 kbusGetPeerId_GM107(OBJGPU *pGpu, KernelBus *pKernelBus, OBJGPU *pPeerGpu); + NvU32 gpuPeerInst = gpuGetInstance(pGpuPeer); + NvU32 peerId = pKernelBus->p2p.busNvlinkPeerNumberMask[gpuPeerInst]; + + if (peerId == 0) + { + NV_PRINTF(LEVEL_INFO, + "NVLINK P2P not set up between GPU%u and GPU%u, checking for PCIe P2P...\n", + gpuGetInstance(pGpu), gpuPeerInst); + return kbusGetPeerId_GM107(pGpu, pKernelBus, pGpuPeer); + } + + LOWESTBITIDX_32(peerId); + return peerId; +} + +/** + * @brief Returns if the given peerId is a valid for a given GPU + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] peerId The peer identifier + * + * @return return NV_OK is valid + */ +NV_STATUS +kbusIsPeerIdValid_GP100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 peerId +) +{ + extern NV_STATUS kbusIsPeerIdValid_GM107(OBJGPU *pGpu, KernelBus *pKernelBus, NvU32 peerId); + + NV_ASSERT_OR_RETURN(peerId < P2P_MAX_NUM_PEERS, NV_ERR_INVALID_INDEX); + + if (pKernelBus->p2p.busNvlinkPeerNumberMask[gpuGetInstance(pGpu)] & NVBIT(peerId)) + return NV_OK; + + return kbusIsPeerIdValid_GM107(pGpu, pKernelBus, peerId); +} + +/*! + * @brief Returns the Nvlink peer ID from pGpu0 to pGpu1 + * + * @param[in] pGpu0 (local GPU) + * @param[in] pKernelBus0 (local GPU) + * @param[in] pGpu1 (remote GPU) + * @param[in] pKernelBus1 (remote GPU) + * @param[out] nvlinkPeer NvU32 pointer + * + * return NV_OK on success + */ +NV_STATUS +kbusGetNvlinkP2PPeerId_GP100 +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + KernelBus *pKernelBus1, + NvU32 *nvlinkPeer +) +{ + KernelNvlink *pKernelNvlink0 = GPU_GET_KERNEL_NVLINK(pGpu0); + NV_STATUS status = NV_OK; + + if (nvlinkPeer == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *nvlinkPeer = BUS_INVALID_PEER; + + // + // Use the NVLINK-specific unique ID of the GPU (related to link ID) for + // the peer ID. We expect that this will remain the same across multiple + // runs, so the peer ID should be consistent. + // + // Note: this may not cover peer IDs explicitly requested by the client. + // It is assumed that explicit peer IDs are only used for NVLINK P2P, and + // no PCIe P2P will be used. + // + if ((pKernelNvlink0 != NULL) && + (knvlinkGetPeersNvlinkMaskFromHshub(pGpu0, pKernelNvlink0) != 0)) + { + if (knvlinkIsForcedConfig(pGpu0, pKernelNvlink0) || + pKernelNvlink0->bRegistryLinkOverride) + { + *nvlinkPeer = kbusGetPeerIdFromTable_HAL(pGpu0, pKernelBus0, + pGpu0->gpuInstance, + pGpu1->gpuInstance); + + if (*nvlinkPeer == BUS_INVALID_PEER) + { + return NV_ERR_INVALID_REQUEST; + } + } + else + { + *nvlinkPeer = kbusGetPeerId_HAL(pGpu0, pKernelBus0, pGpu1); + if (*nvlinkPeer != BUS_INVALID_PEER) + { + return NV_OK; + } + + // Reserve GPU0 peer IDs for NVLINK use + if (!pKernelBus0->p2p.bNvlinkPeerIdsReserved) + { + NvU32 idMask = knvlinkGetUniquePeerIdMask_HAL(pGpu0, pKernelNvlink0); + + // + // If NVLINK is topology is not forced, idMask will be non-zero + // if nvlinks are detected during topology discovery in core lib + // + if (idMask != 0) + { + // Reserve GPU0 peer IDs for NVLINK use + status = kbusReserveP2PPeerIds_HAL(pGpu0, pKernelBus0, idMask); + if (status != NV_OK) + { + return status; + } + pKernelBus0->p2p.bNvlinkPeerIdsReserved = NV_TRUE; + } + } + *nvlinkPeer = knvlinkGetUniquePeerId_HAL(pGpu0, pKernelNvlink0, pGpu1); + } + } + + return status; +} +/*! + * @brief Unreserve peer IDs reserved for nvlink usage + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] peerMask Mask of peer IDs to reserve + * + * return NV_OK on success + */ +NV_STATUS +kbusUnreserveP2PPeerIds_GP100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 peerMask +) +{ + NvU32 peerId = 0; + + FOR_EACH_INDEX_IN_MASK(32, peerId, peerMask) + { + if (pKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[peerId] > 0) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%u: Cannot unreserve peerId %u. Nvlink refcount > 0\n", + gpuGetInstance(pGpu), peerId); + + return NV_ERR_IN_USE; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + peerId = 0; + + FOR_EACH_INDEX_IN_MASK(32, peerId, peerMask) + { + NV_PRINTF(LEVEL_INFO, + "Unreserving peer ID %u on GPU%u reserved for NVLINK \n", + peerId, gpuGetInstance(pGpu)); + + if (pKernelBus->p2pPcie.busPeer[peerId].refCount != 0) + { + return NV_ERR_IN_USE; + } + + pKernelBus->p2pPcie.busPeer[peerId].bReserved = NV_FALSE; + } + FOR_EACH_INDEX_IN_MASK_END; + + return NV_OK; +} + +/*! + * @brief Return the NvLink peer number mask for that peer ID + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] peerId peer IDs + * + * return NvU32 Nvlink peer number mask for that peer ID + */ +NvU32 +kbusGetNvlinkPeerNumberMask_GP100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 peerId +) +{ + if (peerId >= NV_MAX_DEVICES) + { + NV_PRINTF(LEVEL_ERROR, + "Invalid peerId value: %d\n", peerId); + return 0; + } + + return (pKernelBus->p2p.busNvlinkPeerNumberMask[peerId]); +} diff --git a/src/nvidia/src/kernel/gpu/bus/arch/turing/kern_bus_tu102.c b/src/nvidia/src/kernel/gpu/bus/arch/turing/kern_bus_tu102.c new file mode 100644 index 000000000..05807cfb7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/arch/turing/kern_bus_tu102.c @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/bus/kern_bus.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" + +/*! + * @brief Returns the first available peer Id excluding the nvlink peerIds + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NvU32 first free peer Id + */ +NvU32 +kbusGetUnusedPciePeerId_TU102 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + NvU32 nvlinkIdMask = 0; + NvU32 peerId; + + if ((pKernelNvlink != NULL) && + (pKernelNvlink->getProperty(pKernelNvlink, + PDB_PROP_KNVLINK_WAR_BUG_3471679_PEERID_FILTERING))) + { + // + // Get the mask of NvLink peerIds, to exclude them from the + // peerIds PCIE P2P is using. + // Pre-Ampere GPUs use a static peerId assignment reserved in + // busGetNvlinkP2PPeerId_GP100() and we need to make sure the + // PCIE and nvLink P2P assignments do not collide. + // Make this Windows + Turing only until bug 3471679 is fixed. + // + nvlinkIdMask = knvlinkGetUniquePeerIdMask_HAL(pGpu, pKernelNvlink); + } + + for (peerId = 0; peerId < pKernelBus->numPeers; peerId++) + { + if ((pKernelBus->p2pPcie.busPeer[peerId].refCount == 0) && + (!pKernelBus->p2pPcie.busPeer[peerId].bReserved) && + ((BIT(peerId) & nvlinkIdMask) == 0)) + { + return peerId; + } + } + return BUS_INVALID_PEER; +} diff --git a/src/nvidia/src/kernel/gpu/bus/arch/volta/kern_bus_gv100.c b/src/nvidia/src/kernel/gpu/bus/arch/volta/kern_bus_gv100.c new file mode 100644 index 000000000..52ce2e50e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/arch/volta/kern_bus_gv100.c @@ -0,0 +1,290 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "os/os.h" + +/*! + * @brief Sets up a memdesc and a CPU pointer to the bottom + * of FB that will be used for issuing reads in order + * to flush pending writes to FB. + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NV_OK on success + */ +NV_STATUS +kbusSetupCpuPointerForBusFlush_GV100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NV_STATUS status = NV_OK; + + // Nothing to be done in paravirtualized guest or if we don't want to do CPU reads for flushing. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + !kbusIsReadCpuPointerToFlushEnabled(pKernelBus)) + { + return NV_OK; + } + + status = memdescCreate(&pKernelBus->pFlushMemDesc, pGpu, + RM_PAGE_SIZE, + RM_PAGE_SIZE, + NV_TRUE, + ADDR_FBMEM, + NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_LOST_ON_SUSPEND); + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + + // Allocate memory from reserved heap for flush + status = memdescAlloc(pKernelBus->pFlushMemDesc); + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + + // + // Please note this is a long-lived BAR2 mapping by design. + // The mapping is used for flushing all future vidmem writes on BAR2. + // + pKernelBus->pReadToFlush = memmgrMemDescBeginTransfer(GPU_GET_MEMORY_MANAGER(pGpu), + pKernelBus->pFlushMemDesc, + TRANSFER_FLAGS_PERSISTENT_CPU_MAPPING); + if (pKernelBus->pReadToFlush == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + NV_ASSERT_OR_GOTO(pKernelBus->pReadToFlush != NULL, cleanup); + } + + return NV_OK; +cleanup: + kbusDestroyCpuPointerForBusFlush_HAL(pGpu, pKernelBus); + return status; +} + +/*! + * @brief Destroys the memdesc and frees the CPU pointer to the bottom of + * FB that was used for issuing reads in order to trigger bus flushes. + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns + */ +void +kbusDestroyCpuPointerForBusFlush_GV100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + if (pKernelBus->pReadToFlush != NULL) + { + memmgrMemDescEndTransfer(GPU_GET_MEMORY_MANAGER(pGpu), + pKernelBus->pFlushMemDesc, + TRANSFER_FLAGS_DEFER_FLUSH); + pKernelBus->pReadToFlush = NULL; + } + + memdescFree(pKernelBus->pFlushMemDesc); + memdescDestroy(pKernelBus->pFlushMemDesc); + pKernelBus->pFlushMemDesc = NULL; +} + +/** + * Helper function to map coherent cpu mapping. + * + * @param[in] pGpu Pointer to GPU + * @param[in] pKernelBus Kernel bus pointer + * @param[in] pMemDesc Pointer to memdesc that is to be mapped. + * + * @return cpu pointer if success + * NULL on other errors + */ +NvU8* +kbusMapCoherentCpuMapping_GV100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + PMEMORY_DESCRIPTOR pMemDesc +) +{ + RmPhysAddr startAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + NvU64 size = memdescGetSize(pMemDesc); + RmPhysAddr endAddr = startAddr + size - 1; + RmPhysAddr rangeStart = 0; + RmPhysAddr rangeEnd = 0; + RmPhysAddr offset = 0; + NvU32 i = 0; + + for (i = COHERENT_CPU_MAPPING_WPR; i < pKernelBus->coherentCpuMapping.nrMapping; ++i) + { + // Check if requested mem in the mappings. + rangeStart = pKernelBus->coherentCpuMapping.physAddr[i]; + rangeEnd = pKernelBus->coherentCpuMapping.physAddr[i] + pKernelBus->coherentCpuMapping.size[i] - 1; + offset = 0; + + if (rangeStart <= startAddr && endAddr <= rangeEnd) + { + NV_ASSERT_OR_RETURN( + pKernelBus->coherentCpuMapping.pCpuMapping[i] != NvP64_NULL, NvP64_NULL); + + // Get the offset of the region + offset = startAddr - pKernelBus->coherentCpuMapping.physAddr[i]; + pKernelBus->coherentCpuMapping.refcnt[i]++; + return (NvU8 *)NvP64_VALUE( + ((NvUPtr)pKernelBus->coherentCpuMapping.pCpuMapping[i] + + (NvUPtr)offset)); + } + } + + NV_ASSERT_FAILED("No mappings found"); + return NvP64_NULL; +} + +/** + * Helper function to unmap coherent cpu mapping + * + * @param[in] pGpu Pointer to GPU + * @param[in] pKernelBus Kernel bus pointer + * @param[in] pMemDesc Pointer to memdesc + * + * @return void + */ +void +kbusUnmapCoherentCpuMapping_GV100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + PMEMORY_DESCRIPTOR pMemDesc +) +{ + RmPhysAddr startAddr = pMemDesc->_pteArray[0] + pMemDesc->PteAdjust; + NvU64 size = memdescGetSize(pMemDesc); + RmPhysAddr endAddr = startAddr + size - 1; + NvU32 i = 0; + + NV_ASSERT(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + + for (i = COHERENT_CPU_MAPPING_WPR; i < pKernelBus->coherentCpuMapping.nrMapping; ++i) + { + RmPhysAddr rangeStart = pKernelBus->coherentCpuMapping.physAddr[i]; + RmPhysAddr rangeEnd = pKernelBus->coherentCpuMapping.physAddr[i] + + pKernelBus->coherentCpuMapping.size[i] - 1; + if (rangeStart <= startAddr && endAddr <= rangeEnd) + { + NV_ASSERT_OR_RETURN_VOID(pKernelBus->coherentCpuMapping.refcnt[i] != 0); + pKernelBus->coherentCpuMapping.refcnt[i]--; + break; + } + } + + if (i == pKernelBus->coherentCpuMapping.nrMapping) + { + NV_ASSERT_FAILED("No mappings found"); + } + + // Flush the memory since caller writes to the FB + kbusFlush_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), BUS_FLUSH_VIDEO_MEMORY); + + return; +} +/** + * Destroy coherent cpu mapping to ACR region. + * + * This needs to be done only for P9 and not SHH. In SHH, CPU prefetches + * to WPR region because of the CPU mapping doesn't result in SW visible error + * unlike P9. + * + * @param[in] pGpu Pointer to Gpu + * @param[in] pKernelBus Kernel bus pointer + * + * @return void + */ +void kbusTeardownCoherentCpuMappingAcr_GV100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + if (pKernelBus->coherentCpuMapping.bCoherentCpuMapping) + { + NV_ASSERT_OR_RETURN_VOID(pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING)); + NV_ASSERT_OR_RETURN_VOID( pKernelBus->coherentCpuMapping.refcnt[COHERENT_CPU_MAPPING_WPR] == 0); + + osFlushGpuCoherentCpuCacheRange(pGpu->pOsGpuInfo, + (NvUPtr)pKernelBus->coherentCpuMapping.pCpuMapping[COHERENT_CPU_MAPPING_WPR], + pKernelBus->coherentCpuMapping.size[COHERENT_CPU_MAPPING_WPR]); + + osUnmapPciMemoryKernel64(pGpu, + pKernelBus->coherentCpuMapping.pCpuMapping[COHERENT_CPU_MAPPING_WPR]); + pKernelBus->coherentCpuMapping.pCpuMapping[COHERENT_CPU_MAPPING_WPR] = NvP64_NULL; + } +} + +/*! + * @brief Destroy coherent cpu mapping. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelBus Kernel bus pointer + * @param[in] bFlush Flush CPU cache or not + * + * @return 'NV_OK' if successful, an RM error code otherwise. + */ +void +kbusTeardownCoherentCpuMapping_GV100 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvBool bFlush +) +{ + NvU32 i = 0; + + if (!pKernelBus->coherentCpuMapping.bCoherentCpuMapping) + return; + + for (i = COHERENT_CPU_MAPPING_WPR; i < pKernelBus->coherentCpuMapping.nrMapping; ++i) + { + NV_ASSERT_OR_RETURN_VOID(pKernelBus->coherentCpuMapping.refcnt[i] == 0); + + if (pKernelBus->coherentCpuMapping.pCpuMapping[i] != NvP64_NULL) + { + if (bFlush) + { + osFlushGpuCoherentCpuCacheRange(pGpu->pOsGpuInfo, + (NvUPtr)pKernelBus->coherentCpuMapping.pCpuMapping[i], + pKernelBus->coherentCpuMapping.size[i]); + } + + osUnmapPciMemoryKernel64(pGpu, pKernelBus->coherentCpuMapping.pCpuMapping[i]); + pKernelBus->coherentCpuMapping.pCpuMapping[i] = NvP64_NULL; + } + } + + pKernelBus->coherentCpuMapping.bCoherentCpuMapping = NV_FALSE; +} diff --git a/src/nvidia/src/kernel/gpu/bus/kern_bus.c b/src/nvidia/src/kernel/gpu/bus/kern_bus.c new file mode 100644 index 000000000..a58b73a66 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/kern_bus.c @@ -0,0 +1,1251 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/io_vaspace.h" +#include "mem_mgr/gpu_vaspace.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/bus/kern_bus.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/mem_sys/kern_mem_sys.h" +#include "platform/chipset/chipset.h" +#include "rmapi/client.h" + +#include "gpu/subdevice/subdevice.h" +#include "gpu/gsp/gsp_static_config.h" +#include "vgpu/rpc.h" + +#include "nvRmReg.h" + +static NV_STATUS kbusInitRegistryOverrides(OBJGPU *pGpu, KernelBus *pKernelBus); + +NV_STATUS +kbusConstructEngine_IMPL(OBJGPU *pGpu, KernelBus *pKernelBus, ENGDESCRIPTOR engDesc) +{ + NV_STATUS status; + + if (IsAMPEREorBetter(pGpu) && pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM)) + { + pKernelBus->bBar1PhysicalModeEnabled = NV_TRUE; + } + + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + pKernelBus->bUsePhysicalBar2InitPagetable = NV_TRUE; + } + + // allocate HAL private info block + status = kbusConstructHal_HAL(pGpu, pKernelBus); + if (status != NV_OK) + return status; + + kbusInitRegistryOverrides(pGpu, pKernelBus); + + kbusInitPciBars_HAL(pKernelBus); + + // Special handle for VGPU. WAR for bug 3458057, bug 3458029 + if (IS_VIRTUAL(pGpu)) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kbusInitBarsSize_HAL(pGpu, pKernelBus)); + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kbusInitBarsBaseInfo_HAL(pKernelBus)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kbusSetBarsApertureSize_HAL(pGpu, pKernelBus, GPU_GFID_PF)); + + return NV_OK; +} + +/*! + * @brief Initialize all registry overrides for this object + * + * @param[in] pGpu + * @param[in,out] pKernelBus + */ +static NV_STATUS +kbusInitRegistryOverrides(OBJGPU *pGpu, KernelBus *pKernelBus) +{ + NvU32 data32; + + switch (DRF_VAL(_REG_STR_RM, _INST_LOC, _BAR_PTE, pGpu->instLocOverrides)) + { + default: + case NV_REG_STR_RM_INST_LOC_BAR_PTE_DEFAULT: + // Do not override on default.. + break; + case NV_REG_STR_RM_INST_LOC_BAR_PTE_VID: + pKernelBus->PTEBAR2Aperture = ADDR_FBMEM; + pKernelBus->PTEBAR2Attr = NV_MEMORY_WRITECOMBINED; + break; + + case NV_REG_STR_RM_INST_LOC_BAR_PTE_COH: + if (gpuIsBarPteInSysmemSupported(pGpu) || !gpuIsRegUsesGlobalSurfaceOverridesEnabled(pGpu)) + { + pKernelBus->PTEBAR2Aperture = ADDR_SYSMEM; + pKernelBus->PTEBAR2Attr = NV_MEMORY_CACHED; + } + else + { + // + // BAR PTEs in sysmem is not supported on all hardware. + // HW bug 415430. Once fixed, this property will be set on supported GPUs. + // On unsupported GPUs where the GlobalSurfaceOverrides regkey is used, show a warning and don't override. + // + NV_PRINTF(LEVEL_WARNING, + "BAR PTEs not supported in sysmem. Ignoring global override request.\n"); + } + break; + + case NV_REG_STR_RM_INST_LOC_BAR_PTE_NCOH: + if (gpuIsBarPteInSysmemSupported(pGpu) || !gpuIsRegUsesGlobalSurfaceOverridesEnabled(pGpu)) + { + pKernelBus->PTEBAR2Aperture = ADDR_SYSMEM; + pKernelBus->PTEBAR2Attr = NV_MEMORY_UNCACHED; + } + else + { + // BAR PTEs in sysmem is not supported on current hardware. See above. + NV_PRINTF(LEVEL_WARNING, + "BAR PTEs not supported in sysmem. Ignoring global override request.\n"); + } + break; + } + + NV_PRINTF(LEVEL_INFO, "Using aperture %d for BAR2 PTEs\n", + pKernelBus->PTEBAR2Aperture); + + switch (DRF_VAL(_REG_STR_RM, _INST_LOC, _BAR_PDE, pGpu->instLocOverrides)) + { + default: + case NV_REG_STR_RM_INST_LOC_BAR_PDE_DEFAULT: + // Do not override on default. + break; + case NV_REG_STR_RM_INST_LOC_BAR_PDE_VID: + pKernelBus->PDEBAR2Aperture = ADDR_FBMEM; + pKernelBus->PDEBAR2Attr = NV_MEMORY_WRITECOMBINED; + break; + + case NV_REG_STR_RM_INST_LOC_BAR_PDE_COH: + if (gpuIsBarPteInSysmemSupported(pGpu) || !gpuIsRegUsesGlobalSurfaceOverridesEnabled(pGpu)) + { + pKernelBus->PDEBAR2Aperture = ADDR_SYSMEM; + pKernelBus->PDEBAR2Attr = NV_MEMORY_CACHED; + } + else + { + // BAR PDEs in sysmem is not supported on all hardware. See above. + NV_PRINTF(LEVEL_WARNING, + "BAR PDEs not supported in sysmem. Ignoring global override request.\n"); + } + break; + + case NV_REG_STR_RM_INST_LOC_BAR_PDE_NCOH: + if (gpuIsBarPteInSysmemSupported(pGpu) || !gpuIsRegUsesGlobalSurfaceOverridesEnabled(pGpu)) + { + pKernelBus->PDEBAR2Aperture = ADDR_SYSMEM; + pKernelBus->PDEBAR2Attr = NV_MEMORY_UNCACHED; + } + else + { + // BAR PDEs in sysmem is not supported on all hardware. See above. + NV_PRINTF(LEVEL_WARNING, + "BAR PDEs not supported in sysmem. Ignoring global override request.\n"); + } + break; + } + + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM && !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TCC_MODE)) + { + // + // Aligns to unlinked SLI: Volta and up + // Next: Plan for all GPUs after validation + // + pKernelBus->bP2pMailboxClientAllocated = + pKernelBus->bP2pMailboxClientAllocatedBug3466714VoltaAndUp; + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_P2P_MAILBOX_CLIENT_ALLOCATED, &data32) == NV_OK) + { + pKernelBus->bP2pMailboxClientAllocated = !!data32; + } + + return NV_OK; +} + +/** + * @brief Gets the BAR1 VA range for a client + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] hClient Client handle + * @param[out] pBar1VARange BAR1 VA range + */ + +NV_STATUS +kbusGetBar1VARangeForClient_IMPL(OBJGPU *pGpu, KernelBus *pKernelBus, NvHandle hClient, NV_RANGE *pBar1VARange) +{ + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + OBJVASPACE *pBar1VAS = kbusGetBar1VASpace_HAL(pGpu, pKernelBus); + + NV_ASSERT_OR_RETURN(pBar1VAS != NULL, NV_ERR_INVALID_STATE); + + *pBar1VARange = rangeMake(vaspaceGetVaStart(pBar1VAS), vaspaceGetVaLimit(pBar1VAS)); + + if ((pKernelMIGManager != NULL) && kmigmgrIsMIGMemPartitioningEnabled(pGpu, pKernelMIGManager) && + !rmclientIsCapableByHandle(hClient, NV_RM_CAP_SYS_SMC_MONITOR) && + !kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient)) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + MIG_INSTANCE_REF ref; + + *pBar1VARange = memmgrGetMIGPartitionableBAR1Range(pGpu, pMemoryManager); + + NV_ASSERT_OK_OR_RETURN(kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, &ref)); + NV_ASSERT_OK_OR_RETURN(kmemsysSwizzIdToMIGMemRange(pGpu, pKernelMemorySystem, ref.pKernelMIGGpuInstance->swizzId, + *pBar1VARange, pBar1VARange)); + } + return NV_OK; +} + +RmPhysAddr +kbusSetupPeerBarAccess_IMPL +( + OBJGPU *pLocalGpu, + OBJGPU *pRemoteGpu, + RmPhysAddr base, + NvU64 size, + PMEMORY_DESCRIPTOR *ppMemDesc +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR *pMemDesc = *ppMemDesc; + IOVAMAPPING *pIovaMapping; + + NV_ASSERT_OR_RETURN(((base & RM_PAGE_MASK) == 0), ~0ULL); + + if (pMemDesc == NULL) + { + status = memdescCreate(&pMemDesc, pLocalGpu, size, 0, NV_TRUE, + ADDR_SYSMEM, NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE); + NV_ASSERT_OR_RETURN(status == NV_OK, ~0ULL); + + memdescDescribe(pMemDesc, ADDR_SYSMEM, base, size); + } + else + { + NV_ASSERT_OR_RETURN( + (memdescGetPhysAddr(pMemDesc, AT_GPU, 0) == base) && + (memdescGetSize(pMemDesc) == size), ~0ULL); + } + + // + // Even if IOMMU-remapping fails (which it shouldn't), try to continue + // using the CPU physical address. In most cases, this is still sufficient. + // + status = memdescMapIommu(pMemDesc, pRemoteGpu->busInfo.iovaspaceId); + NV_ASSERT(status == NV_OK); + + pIovaMapping = memdescGetIommuMap(pMemDesc, pRemoteGpu->busInfo.iovaspaceId); + + *ppMemDesc = pMemDesc; + + if (pIovaMapping == NULL) + { + NV_PRINTF(LEVEL_INFO, + "no IOVA mapping found for pre-existing P2P domain memdesc\n"); + return memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + } + + return pIovaMapping->iovaArray[0]; +} + +/*! + * @brief Get the bus flush aperture flag for the NV_ADDRESS_SPACE + * For use with the kbusFlush_HAL() api + * + * @param[in] addrSpace NV_ADDRESS_SPACE + * + * @returns bush flush aperture flag + */ +NvU32 kbusGetFlushAperture_IMPL(KernelBus *pKernelBus, NV_ADDRESS_SPACE addrSpace) +{ + return (addrSpace == ADDR_FBMEM) ? BUS_FLUSH_VIDEO_MEMORY : BUS_FLUSH_SYSTEM_MEMORY; +} + +void +kbusDestruct_IMPL(KernelBus *pKernelBus) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelBus); + + // + // We need to clean-up the memory resources for BAR2 as late as possible, + // and after all memory descriptors have been reclaimed. + // + kbusDestructVirtualBar2_HAL(pGpu, pKernelBus, NV_TRUE, GPU_GFID_PF); + + return; +} + +/*! Send sysmembar to all sub-devices */ +NV_STATUS +kbusSendSysmembar_IMPL +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NV_STATUS status = NV_OK; + + // Nothing to be done in guest in the paravirtualization case. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + return NV_OK; + } + + if (kbusIsFbFlushDisabled(pKernelBus)) + { + // Eliminate FB flushes, but keep mmu invalidates + NV_PRINTF(LEVEL_INFO, "disable_fb_flush flag, skipping flush.\n"); + return NV_OK; + } + + // Wait for the flush to flow through + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY); + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + if (kbusSendSysmembarSingle_HAL(pGpu, pKernelBus) == NV_ERR_TIMEOUT) + { + status = NV_ERR_TIMEOUT; + } + SLI_LOOP_END; + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + return status; +} + +/** + * @brief Send sysmembar to a single sub-devices + * Trigger RPC to Physical RM. + * + * @param[in] pGpu + * @param[in] pKernelBus + */ +NV_STATUS +kbusSendSysmembarSingle_KERNEL +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status; + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_BUS_FLUSH_WITH_SYSMEMBAR, + NULL, 0); + + return status; +} + +/*! + * @brief Commit BAR2 + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] flags GPU state flag (not used by Kernel RM) + * + * @returns NV_OK on success. + */ +NV_STATUS +kbusCommitBar2_KERNEL +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 flags +) +{ + // we will initialize bar2 to the default big page size of the system + NV_ASSERT_OK_OR_RETURN(kbusInitVirtualBar2_HAL(pGpu, pKernelBus)); + NV_ASSERT_OK_OR_RETURN(kbusSetupCpuPointerForBusFlush_HAL(pGpu, pKernelBus)); + + return NV_OK; +} + +/*! Get pci bar size in BYTE */ +NvU64 +kbusGetPciBarSize_IMPL(KernelBus *pKernelBus, NvU32 index) +{ + if (index >= pKernelBus->totalPciBars) + { + NV_PRINTF(LEVEL_ERROR, "bad index 0x%x\n", index); + return 0; + } + + return pKernelBus->pciBarSizes[index]; +} + +RmPhysAddr +kbusGetPciBarOffset_IMPL(KernelBus *pKernelBus, NvU32 index) +{ + RmPhysAddr offset = 0x0; + + if (index < pKernelBus->totalPciBars) + { + offset = pKernelBus->pciBars[index]; + } + else + { + NV_PRINTF(LEVEL_ERROR, "bad index 0x%x\n", index); + } + + return offset; +} + +/** + * @brief Determine bBar1Force64KBMapping base on regkey and bar1 size + * Determine if 64KB mappings need to be forced based on total BAR1 size. + * Default threshold is 256MB unless overridden by regkey + * Force 64KB for SKUs with BAR1 size <= 256MB + * + * @param[in] pKernelBus + */ +void +kbusDetermineBar1Force64KBMapping_IMPL +( + KernelBus *pKernelBus +) +{ + OBJGPU* pGpu = ENG_GET_GPU(pKernelBus); + NvU32 data; + + pKernelBus->bBar1Force64KBMapping = NV_TRUE; + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_64KB_BAR1_MAPPINGS, + &data) == NV_OK) + { + if (data == NV_REG_STR_RM_64KB_BAR1_MAPPINGS_DISABLED) + { + pKernelBus->bBar1Force64KBMapping = NV_FALSE; + } + } + else + { + NvU32 bar1SizeMB; + bar1SizeMB = (NvU32)(kbusGetPciBarSize(pKernelBus, 1) >> 20); + + if (bar1SizeMB > 256) + { + pKernelBus->bBar1Force64KBMapping = NV_FALSE; + } + } +} + + +/** + * @brief Determine bar1[gfid].apertureLength base on regkey and bar1 size + * + * @param[in] pKernelBus + * @param[in] gfid + */ +void +kbusDetermineBar1ApertureLength_IMPL +( + KernelBus *pKernelBus, + NvU32 gfid +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelBus); + NvU32 data32; + + if (IS_GFID_VF(gfid)) + { + pKernelBus->bar1[gfid].apertureLength = pGpu->sriovState.vfBarSize[1]; + } + else + { + pKernelBus->bar1[gfid].apertureLength = kbusGetPciBarSize(pKernelBus, 1); + } + + // We can shrink BAR1 using this reg key but cannot grow it. + if (((NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_BAR1_APERTURE_SIZE_MB, &data32))) && + data32 && (((NvU64)data32 << 20) < pKernelBus->bar1[gfid].apertureLength)) + { + // Set BAR1 aperture length based on the override + pKernelBus->bar1[gfid].apertureLength = (NvU64) data32 << 20; + } + +} + +/*! + * @brief Initialize pciBarSizes[], set pKernelBus->bPciBarSizesValid + * Trigger an internal RMAPI to get the data from Physical RM. + * + * @param[in] pGpu + * @param[in] pKernelBus + */ +NV_STATUS +kbusInitBarsSize_KERNEL +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS params; + NvU32 i; + + NV_ASSERT( ! pKernelBus->bPciBarSizesValid); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_BUS_GET_PCI_BAR_INFO, + ¶ms, sizeof(params))); + + for (i = 0; i< params.pciBarCount; i++) + { + pKernelBus->pciBarSizes[i] = params.pciBarInfo[i].barSizeBytes; + } + + pKernelBus->bPciBarSizesValid = NV_TRUE; + + return NV_OK; +} + +/*! + * @brief Remove P2P mapping to a given peer GPU + * + * @param[in] pGpu0 (Local) + * @param[in] pKernelBus0 (Local) + * @param[in] pGpu1 (Remote) + * @param[in] peerIdx + * + * return NV_OK on success + */ +void +kbusDestroyMailbox_IMPL +( + OBJGPU *pGpu0, + KernelBus *pKernelBus0, + OBJGPU *pGpu1, + NvU32 peerIdx +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu0); + NvBool bNeedWarBug999673 = kbusNeedWarForBug999673_HAL(pGpu0, pKernelBus0, pGpu1) || + kbusNeedWarForBug999673_HAL(pGpu1, GPU_GET_KERNEL_BUS(pGpu1), pGpu0); + NV2080_CTRL_INTERNAL_BUS_DESTROY_P2P_MAILBOX_PARAMS busParams = {0}; + NV2080_CTRL_INTERNAL_HSHUB_PEER_CONN_CONFIG_PARAMS hshubParams = {0}; + NV_STATUS status; + + kbusDestroyPeerAccess_HAL(pGpu0, pKernelBus0, peerIdx); + + busParams.peerIdx = peerIdx; + busParams.bNeedWarBug999673 = bNeedWarBug999673; + status = pRmApi->Control(pRmApi, pGpu0->hInternalClient, pGpu0->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_BUS_DESTROY_P2P_MAILBOX, + &busParams, sizeof(busParams)); + NV_ASSERT(status == NV_OK); + + // Create a peer mask for each peer to program their respective peer_connection_cfg registers + hshubParams.invalidatePeerMask = NVBIT32(peerIdx); + // Program connection_cfg registers + status = pRmApi->Control(pRmApi, pGpu0->hInternalClient, pGpu0->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_HSHUB_PEER_CONN_CONFIG, + &hshubParams, sizeof(hshubParams)); + NV_ASSERT(status == NV_OK); +} + +NvU8 * +kbusCpuOffsetInBar2WindowGet_IMPL +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT_OR_RETURN(NULL != pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping, NULL); + NV_ASSERT_OR_RETURN(ADDR_FBMEM == pMemDesc->_addressSpace, NULL); + + return (NvU8 *)(pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping + + memdescGetPhysAddr(pMemDesc, AT_GPU, 0)); +} + +/*! + * Calculates the BAR2 VA limit (in Byte units) which usually means the + * cpuVisible area limit in CPU-RM. Can be safely called only after + * kbusSetBarsApertureSize_HAL is executed. + * + * @param pGpu + * @param pKernelBus + * + * @return VA limit of BAR2 + */ +NvU64 +kbusGetVaLimitForBar2_KERNEL +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NvU64 limit = pKernelBus->bar2[GPU_GFID_PF].cpuVisibleLimit; + + NV_PRINTF(LEVEL_INFO, "va limit: 0x%llx\n", limit); + + // + // pKernelBus->bar2.vaLimit is set by this function. + // Assert to ensure that this value doesn't get changed. + // + NV_ASSERT(pKernelBus->bar2[GPU_GFID_PF].vaLimit == 0 || pKernelBus->bar2[GPU_GFID_PF].vaLimit == limit); + + return limit; +} + +/*! + * Patch CPU-RM's SW cache of BAR1 PDB to GSP-RM's BAR1 PDB so that CPU-RM can + * do TLB invalidation to correct VA space. + * + * @param pGpu + * @param pKernelBus + * + * @return NV_OK if PDB is updated successfully + * Or bubble up the error code returned by the callees + */ +NV_STATUS +kbusPatchBar1Pdb_GSPCLIENT +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NV_STATUS status = NV_OK; + OBJGVASPACE *pGVAS = dynamicCast(pKernelBus->bar1[GPU_GFID_PF].pVAS, OBJGVASPACE); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + MEMORY_DESCRIPTOR *pMemDesc = NULL; + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + const MMU_FMT_LEVEL *pRootFmt = pGpuState->pFmt->pRoot; + NvU32 rootSize = pRootFmt->entrySize; + MMU_WALK_USER_CTX userCtx = {0}; + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(&pMemDesc, pGpu, rootSize, RM_PAGE_SIZE, NV_TRUE, ADDR_FBMEM, + kgmmuGetPTEAttr(pKernelGmmu), MEMDESC_FLAGS_NONE)); + + memdescDescribe(pMemDesc, ADDR_FBMEM, pGSCI->bar1PdeBase, rootSize); + memdescSetPageSize(pMemDesc, VAS_ADDRESS_TRANSLATION(pKernelBus->bar1[GPU_GFID_PF].pVAS), RM_PAGE_SIZE); + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + // + // Modify the CPU-RM's walker state with the new backing memory. + // This is intended to replace CPU-RM's PDB by GSP-RM's PDB. + // + status = mmuWalkModifyLevelInstance(pGpuState->pWalk, + pRootFmt, + vaspaceGetVaStart(pKernelBus->bar1[GPU_GFID_PF].pVAS), + (MMU_WALK_MEMDESC*)pMemDesc, + mmuFmtLevelSize(pRootFmt), + NV_TRUE, + NV_TRUE, + NV_FALSE); + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "Failed to modify CPU-RM's BAR1 PDB to GSP-RM's BAR1 PDB.\n"); + return status; + } + + gvaspaceInvalidateTlb(pGVAS, pGpu, PTE_DOWNGRADE); + + return status; +} + +/*! + * Patch CPU-RM's SW cache of BAR2 PDB to GSP-RM's BAR2 PDB so that CPU-RM can + * do TLB invalidation to correct VA space. + * + * For the BAR2 support in RM-offload model, CPU-RM owns the VA range under + * PDE3[0] and GSP-RM owns the VA range under PDE3[1]. GSP-RM and CPU-RM + * establish their own BAR2 page tables respectively. After CPU-RM establishes + * its own table, it passes its PDE3[0] value to GSP-RM, then GSP-RM will fill + * this value to PDE3[0] of GSP-RM's table (only GSP-RM's BAR2 table will be + * bound to HW) so that HW sees single BAR2 page table for both GSP-RM and + * CPU-RM. + * + * @param pGpu + * @param pKernelBus + * + * @return NV_OK if PDB is updated successfully + * Or bubble up the error code returned by the callees + */ +NV_STATUS +kbusPatchBar2Pdb_GSPCLIENT +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NV_STATUS status = NV_OK; + PMEMORY_DESCRIPTOR pMemDesc; + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + NvU64 entryValue; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(&pMemDesc, pGpu, pKernelBus->bar2[GPU_GFID_PF].pageDirSize, RM_PAGE_SIZE, NV_TRUE, + ADDR_FBMEM, pKernelBus->PDEBAR2Attr, MEMDESC_FLAGS_NONE)); + + memdescDescribe(pMemDesc, ADDR_FBMEM, pGSCI->bar2PdeBase, pKernelBus->bar2[GPU_GFID_PF].pageDirSize); + + // Update CPU-RM's SW cache of PDB to GSP-RM's PDB address + pKernelBus->virtualBar2[GPU_GFID_PF].pPDB = pMemDesc; + + // + // BAR2 page table is not yet working at this point, so retrieving the + // PDE3[0] of BAR2 page table via BAR0_WINDOW + // + entryValue = GPU_REG_RD32(pGpu, (NvU32)pKernelBus->bar2[GPU_GFID_PF].bar2OffsetInBar0Window) | + ((NvU64)GPU_REG_RD32(pGpu, (NvU32)pKernelBus->bar2[GPU_GFID_PF].bar2OffsetInBar0Window + 4) << 32); + + // + // Provide the PDE3[0] value to GSP-RM so that GSP-RM can merge CPU-RM's + // page table to GSP-RM's page table + // + NV_RM_RPC_UPDATE_BAR_PDE(pGpu, NV_RPC_UPDATE_PDE_BAR_2, entryValue, pKernelBus->bar2[GPU_GFID_PF].pFmt->pRoot->virtAddrBitLo, status); + + return NV_OK; +} + +/*! + * @brief Helper function to trigger RPC to Physical RM to unbind FLA VASpace + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @return NV_OK if successful + */ +NV_STATUS +kbusSetupUnbindFla_KERNEL +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NV_STATUS status = NV_OK; + NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS params = { 0 }; + + if (!pKernelBus->flaInfo.bFlaBind) + return NV_OK; + + params.flaAction = NV2080_CTRL_FLA_ACTION_UNBIND; + + NV_RM_RPC_CONTROL(pGpu, pKernelBus->flaInfo.hClient, + pKernelBus->flaInfo.hSubDevice, + NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK, + ¶ms, sizeof(params), status); + + pKernelBus->flaInfo.bFlaBind = NV_FALSE; + pKernelBus->bFlaEnabled = NV_FALSE; + + return status; +} + +/*! + * @brief Helper function to extract information from FLA data structure and + * to trigger RPC to Physical RM to BIND FLA VASpace + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] gfid GFID + * + * @return NV_OK if successful + */ +NV_STATUS +kbusSetupBindFla_KERNEL +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU32 gfid +) +{ + NV_STATUS status = NV_OK; + NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS params = {0}; + + if (!gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + MEMORY_DESCRIPTOR *pMemDesc; + RmPhysAddr imbPhysAddr; + NvU32 addrSpace; + + pMemDesc = pKernelBus->flaInfo.pInstblkMemDesc; + imbPhysAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + addrSpace = memdescGetAddressSpace(pMemDesc); + NV2080_CTRL_FLA_ADDRSPACE paramAddrSpace = NV2080_CTRL_FLA_ADDRSPACE_FBMEM; + + switch(addrSpace) + { + case ADDR_FBMEM: + paramAddrSpace = NV2080_CTRL_FLA_ADDRSPACE_FBMEM; + break; + case ADDR_SYSMEM: + paramAddrSpace = NV2080_CTRL_FLA_ADDRSPACE_SYSMEM; + break; + } + params.imbPhysAddr = imbPhysAddr; + params.addrSpace = paramAddrSpace; + } + params.flaAction = NV2080_CTRL_FLA_ACTION_BIND; + NV_RM_RPC_CONTROL(pGpu, pKernelBus->flaInfo.hClient, + pKernelBus->flaInfo.hSubDevice, + NV2080_CTRL_CMD_FLA_SETUP_INSTANCE_MEM_BLOCK, + ¶ms, sizeof(params), status); + // Since FLA state is tracked in the Guest, Guest RM needs to set it here + pKernelBus->flaInfo.bFlaBind = NV_TRUE; + pKernelBus->bFlaEnabled = NV_TRUE; + + return status; +} + +/*! + * @brief Checks whether an engine is available or not. + * + * The 'engine' is an engine descriptor + * This function is different from busProbeRegister in a sense that it doesn't + * rely on timeouts after a read of a register in the reg space for engine. + * Instead, it + * - Return TRUE for all engines which are must present in GPU. + * - Get information about CE, MSENC, NVJPG and OFA engines from plugin. + * - Rest engines are determined from HAL creation data. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelBus KernelBus pointer + * @param[in] engDesc ENGDESCRIPTOR pointer used to check Engine presence + * + * @returns NV_TRUE if engine is available. + * NV_FALSE if engine is not available or floorswept. + * + */ +NvBool +kbusCheckEngine_KERNEL +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + ENGDESCRIPTOR engDesc +) +{ + NvU64 engineList; + NvBool bSupported; + + if (!RMCFG_FEATURE_VIRTUALIZATION && !RMCFG_FEATURE_GSP_CLIENT_RM) + return NV_TRUE; + + { + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + if (pGSCI == NULL) + { + return NV_FALSE; + } + engineList = pGSCI->engineCaps; + } + + switch (engDesc) + { + case ENG_LSFM: + case ENG_PMU: + case ENG_CLK: + case ENG_ACR: + case ENG_DISP: + return NV_FALSE; + // + // This function is used in two environments: + // (a) vGPU where display is not yet supported. + // (b) RM offload (Kernel RM) where display is supported. + // + case ENG_KERNEL_DISPLAY: + if (IS_GSP_CLIENT(pGpu)) + return NV_TRUE; + else + return NV_FALSE; + + case ENG_BIF: + case ENG_KERNEL_BIF: + case ENG_MC: + case ENG_KERNEL_MC: + case ENG_PRIV_RING: + case ENG_SW_INTR: + case ENG_TMR: + case ENG_DMA: + case ENG_BUS: + case ENG_GR(0): + case ENG_CIPHER: + case ENG_INTR: + case ENG_GPULOG: + case ENG_GPUMON: + case ENG_FIFO: + return NV_TRUE; + + case ENG_CE(0): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_COPY0))) ? NV_TRUE: NV_FALSE); + case ENG_CE(1): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_COPY1))) ? NV_TRUE: NV_FALSE); + case ENG_CE(2): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_COPY2))) ? NV_TRUE: NV_FALSE); + case ENG_CE(3): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_COPY3))) ? NV_TRUE: NV_FALSE); + case ENG_CE(4): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_COPY4))) ? NV_TRUE: NV_FALSE); + case ENG_CE(5): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_COPY5))) ? NV_TRUE: NV_FALSE); + case ENG_CE(6): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_COPY6))) ? NV_TRUE: NV_FALSE); + case ENG_CE(7): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_COPY7))) ? NV_TRUE: NV_FALSE); + case ENG_CE(8): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_COPY8))) ? NV_TRUE: NV_FALSE); + case ENG_CE(9): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_COPY9))) ? NV_TRUE: NV_FALSE); + case ENG_MSENC(0): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_NVENC0))) ? NV_TRUE: NV_FALSE); + case ENG_MSENC(1): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_NVENC1))) ? NV_TRUE: NV_FALSE); + case ENG_MSENC(2): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_NVENC2))) ? NV_TRUE: NV_FALSE); + case ENG_SEC2: + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_SEC2))) ? NV_TRUE: NV_FALSE); + case ENG_NVDEC(0): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_NVDEC0))) ? NV_TRUE: NV_FALSE); + case ENG_NVDEC(1): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_NVDEC1))) ? NV_TRUE: NV_FALSE); + case ENG_NVDEC(2): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_NVDEC2))) ? NV_TRUE: NV_FALSE); + case ENG_OFA: + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_OFA))) ? NV_TRUE: NV_FALSE); + case ENG_NVDEC(3): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_NVDEC3))) ? NV_TRUE: NV_FALSE); + case ENG_NVDEC(4): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_NVDEC4))) ? NV_TRUE: NV_FALSE); + case ENG_NVJPEG(0): + return ((engineList & (NVBIT64(NV2080_ENGINE_TYPE_NVJPEG0))) ? NV_TRUE: NV_FALSE); + case ENG_GR(1): + case ENG_GR(2): + case ENG_GR(3): + case ENG_GR(4): + case ENG_GR(5): + case ENG_GR(6): + case ENG_GR(7): + { + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + NV_ASSERT_OR_RETURN(pKernelFifo != NULL, NV_FALSE); + + if (kfifoCheckEngine_HAL(pGpu, pKernelFifo, engDesc, &bSupported) == NV_OK) + return bSupported; + else + return NV_FALSE; + } + + case ENG_INVALID: + NV_PRINTF(LEVEL_ERROR, + "Query for ENG_INVALID considered erroneous: %d\n", + engDesc); + return NV_TRUE; + // + // Check if engine descriptor is supported by current GPU. + // Callee must not send engine descriptor which are not on + // HAL lists of GPU. So Add ASSERT there. + // + default: + bSupported = gpuIsEngDescSupported(pGpu, engDesc); + + if (!bSupported) + { + NV_PRINTF(LEVEL_ERROR, "Unable to check engine ID: %d\n", + engDesc); + NV_ASSERT(bSupported); + } + return bSupported; + } +} + +// +// kbusGetDeviceCaps +// +// This routine gets cap bits in unicast. If bCapsInitialized is passed as +// NV_FALSE, the caps will be copied into pHostCaps without OR/ANDing. Otherwise, +// the caps bits for the current GPU will be OR/ANDed together with pHostCaps to +// create a single set of caps that accurately represents the functionality of +// the device. +// +void +kbusGetDeviceCaps_IMPL +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvU8 *pHostCaps, + NvBool bCapsInitialized +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + NvU8 tempCaps[NV0080_CTRL_HOST_CAPS_TBL_SIZE], temp; + NvBool bVirtualP2P; + NvBool bExplicitCacheFlushRequired; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + portMemSet(tempCaps, 0, NV0080_CTRL_HOST_CAPS_TBL_SIZE); + + /*! On KEPLER+, mailbox protocol based P2P transactions goes through virtual to + * physical translation (on request side) */ + bVirtualP2P = IsdMAXWELLorBetter(pGpu); + if (bVirtualP2P) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_HOST_CAPS, _VIRTUAL_P2P); + + /*! DMAs to/from cached memory need to have the cache flushed explicitly */ + bExplicitCacheFlushRequired = NVCPU_IS_ARM && + (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS_UNIX); + if (bExplicitCacheFlushRequired || + (!pCl->getProperty(pCL, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT))) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_HOST_CAPS, _EXPLICIT_CACHE_FLUSH_REQD); + + if ((pCl->FHBBusInfo.vendorID == PCI_VENDOR_ID_NVIDIA) && + ((pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR04_PRESENT)) || + ((pCl->FHBBusInfo.deviceID >= NVIDIA_C73_CPU_PCI_0_DEVICE_ID_SLI2) && + (pCl->FHBBusInfo.deviceID <= NVIDIA_C73_CPU_PCI_0_DEVICE_ID_RESERVED_3)))) + { + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_HOST_CAPS, _CPU_WRITE_WAR_BUG_420495); + } + + // the RM always supports GPU-coherent mappings + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_HOST_CAPS, _GPU_COHERENT_MAPPING_SUPPORTED); + + // If we don't have existing caps with which to reconcile, then just return + if (!bCapsInitialized) + { + portMemCopy(pHostCaps, NV0080_CTRL_HOST_CAPS_TBL_SIZE, tempCaps, NV0080_CTRL_HOST_CAPS_TBL_SIZE); + return; + } + + // factor in this GPUs caps: all these are feature caps, so use AND + RMCTRL_AND_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _P2P_4_WAY); + RMCTRL_AND_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _P2P_8_WAY); + RMCTRL_AND_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _VIRTUAL_P2P); + RMCTRL_AND_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _GPU_COHERENT_MAPPING_SUPPORTED); + + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _SEMA_ACQUIRE_BUG_105665); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _SYS_SEMA_DEADLOCK_BUG_148216); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _SLOWSLI); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _SEMA_READ_ONLY_BUG); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _MEM2MEM_BUG_365782); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _LARGE_NONCOH_UPSTR_WRITE_BUG_114871); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _LARGE_UPSTREAM_WRITE_BUG_115115); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _SEP_VIDMEM_PB_NOTIFIERS_BUG_83923); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _P2P_DEADLOCK_BUG_203825); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _COMPRESSED_BL_P2P_BUG_257072); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _CROSS_BLITS_BUG_270260); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _CPU_WRITE_WAR_BUG_420495); + RMCTRL_OR_CAP(pHostCaps, tempCaps, temp, + NV0080_CTRL_HOST_CAPS, _BAR1_READ_DEADLOCK_BUG_511418); + + return; +} + +NV_STATUS +kbusMapFbApertureByHandle_IMPL +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvHandle hClient, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + NvU64 *pBar1Va +) +{ + NV_STATUS status; + RsClient *pClient = NULL; + RsResourceRef *pSrcMemoryRef = NULL; + Memory *pSrcMemory = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvU64 fbApertureOffset = 0; + NvU64 fbApertureLength = size; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pClient)); + + status = clientGetResourceRef(pClient, hMemory, &pSrcMemoryRef); + if (status != NV_OK) + { + return status; + } + + pSrcMemory = dynamicCast(pSrcMemoryRef->pResource, Memory); + if (pSrcMemory == NULL) + { + return NV_ERR_INVALID_OBJECT; + } + + pMemDesc = pSrcMemory->pMemDesc; + + if (memdescGetAddressSpace(pMemDesc) != ADDR_FBMEM) + { + return NV_ERR_INVALID_ARGUMENT; + } + + status = kbusMapFbAperture_HAL(pGpu, pKernelBus, pMemDesc, offset, + &fbApertureOffset, &fbApertureLength, + BUS_MAP_FB_FLAGS_MAP_UNICAST, hClient); + if (status != NV_OK) + { + return status; + } + + NV_ASSERT_OR_GOTO(fbApertureLength >= size, failed); + + if ((!NV_IS_ALIGNED64(fbApertureOffset, osGetPageSize())) || + (!NV_IS_ALIGNED64(fbApertureLength, osGetPageSize()))) + { + status = NV_ERR_NOT_SUPPORTED; + goto failed; + } + + *pBar1Va = gpumgrGetGpuPhysFbAddr(pGpu) + fbApertureOffset; + + if (!NV_IS_ALIGNED64(*pBar1Va, osGetPageSize())) + { + status = NV_ERR_INVALID_ADDRESS; + goto failed; + } + + return NV_OK; + +failed: + // Note: fbApertureLength is not used by kbusUnmapFbAperture_HAL(), so it's passed as 0 + kbusUnmapFbAperture_HAL(pGpu, pKernelBus, pMemDesc, + fbApertureOffset, 0, + BUS_MAP_FB_FLAGS_MAP_UNICAST); + + return status; +} + +NV_STATUS +kbusUnmapFbApertureByHandle_IMPL +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NvHandle hClient, + NvHandle hMemory, + NvU64 bar1Va +) +{ + NV_STATUS status; + RsClient *pClient = NULL; + RsResourceRef *pSrcMemoryRef = NULL; + Memory *pSrcMemory = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pClient)); + + status = clientGetResourceRef(pClient, hMemory, &pSrcMemoryRef); + if (status != NV_OK) + { + return status; + } + + pSrcMemory = dynamicCast(pSrcMemoryRef->pResource, Memory); + if (pSrcMemory == NULL) + { + return NV_ERR_INVALID_OBJECT; + } + + pMemDesc = pSrcMemory->pMemDesc; + + // Note: fbApertureLength is not used by kbusUnmapFbAperture_HAL(), so it's passed as 0 + status = kbusUnmapFbAperture_HAL(pGpu, pKernelBus, pMemDesc, + bar1Va - gpumgrGetGpuPhysFbAddr(pGpu), + 0, BUS_MAP_FB_FLAGS_MAP_UNICAST); + if (status != NV_OK) + { + return status; + } + + return NV_OK; +} + +/*! + * Helper function to determine if the requested GET_BUS_INFO ctrl call needs to be served + * by GSP/host, then send RPC to GSP/host. Otherwise return directly so that the caller can + * continue the execution on CPU. + * + * @param[in] pGpu OBJGPU pointer + * @param[in/out] pBusInfo Pointer to NV2080_CTRL_BUS_INFO which specifies the index we want to query + * + * @returns RPC status + */ +NV_STATUS +kbusSendBusInfo_IMPL +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + NV2080_CTRL_BUS_INFO *pBusInfo +) +{ + NV_STATUS status = NV_OK; + NV2080_CTRL_BUS_GET_INFO_V2_PARAMS busGetInfoParams = {0}; + + busGetInfoParams.busInfoList[0] = *pBusInfo; + busGetInfoParams.busInfoListSize = 1; + + NV_RM_RPC_CONTROL(pGpu, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_BUS_GET_INFO_V2, + &busGetInfoParams, + sizeof(busGetInfoParams), + status); + + pBusInfo->data = busGetInfoParams.busInfoList[0].data; + return status; +} diff --git a/src/nvidia/src/kernel/gpu/bus/kern_bus_ctrl.c b/src/nvidia/src/kernel/gpu/bus/kern_bus_ctrl.c new file mode 100644 index 000000000..7075d63f6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/kern_bus_ctrl.c @@ -0,0 +1,651 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "nv_ref.h" +#include "platform/chipset/chipset.h" +#include "os/os.h" +#include "core/system.h" +#include "core/locks.h" +#include "gpu/bif/kernel_bif.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "vgpu/rpc.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/subdevice/subdevice_diag.h" + +#include "ctrl/ctrl2080/ctrl2080bus.h" +#include "ctrl/ctrl208f/ctrl208fbus.h" + +static NvU32 +kbusControlGetCaps +( + OBJGPU *pGpu +) +{ + NvU32 caps = 0; + + // if the Chip is integrated. + if ( IsTEGRA(pGpu) ) + { + caps |= NV2080_CTRL_BUS_INFO_CAPS_CHIP_INTEGRATED; + } + + return caps; +} + +static NV_STATUS +_kbusGetHostCaps(OBJGPU *pGpu, NvU8 *pHostCaps) +{ + NV_STATUS status = NV_OK; + NvBool bCapsInitialized = NV_FALSE; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + if (pKernelBus == NULL) + { + status = NV_ERR_INVALID_POINTER; + SLI_LOOP_BREAK; + } + + kbusGetDeviceCaps(pGpu, pKernelBus, pHostCaps, bCapsInitialized); + bCapsInitialized = NV_TRUE; + } + SLI_LOOP_END + + return status; +} + +static NV_STATUS +_getAspmL1FlagsSendRpc +( + OBJGPU *pGpu, + NvBool *bCyaMaskL1, + NvBool *bEnableAspmDtL1 +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS rmStatus; + NV2080_CTRL_INTERNAL_BIF_GET_ASPM_L1_FLAGS_PARAMS *pBifAspmL1Flags; + + // Allocate memory for the command parameter + pBifAspmL1Flags = portMemAllocNonPaged(sizeof(*pBifAspmL1Flags)); + if (pBifAspmL1Flags == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate pBifAspmL1Flags."); + rmStatus = NV_ERR_NO_MEMORY; + goto _getAspmL1FlagsSendRpc_exit; + } + portMemSet(pBifAspmL1Flags, 0, sizeof(*pBifAspmL1Flags)); + + // Send RPC to GSP to get physical BIF PDBs + rmStatus = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_BIF_GET_ASPM_L1_FLAGS, + pBifAspmL1Flags, sizeof(*pBifAspmL1Flags)); + + if (NV_OK != rmStatus) + { + NV_PRINTF(LEVEL_ERROR, + "Error 0x%x receiving bus ASPM disable flags from GSP.\n", rmStatus); + goto _getAspmL1FlagsSendRpc_exit; + } + + *bCyaMaskL1 = pBifAspmL1Flags->bCyaMaskL1; + *bEnableAspmDtL1 = pBifAspmL1Flags->bEnableAspmDtL1; + +_getAspmL1FlagsSendRpc_exit: + portMemFree(pBifAspmL1Flags); + return rmStatus; +} + +// +// HOST RM Device Controls +// +NV_STATUS +deviceCtrlCmdHostGetCaps_IMPL +( + Device *pDevice, + NV0080_CTRL_HOST_GET_CAPS_PARAMS *pHostCapsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + // sanity check array size + if (pHostCapsParams->capsTblSize != NV0080_CTRL_HOST_CAPS_TBL_SIZE) + { + NV_PRINTF(LEVEL_ERROR, "size mismatch: client 0x%x rm 0x%x\n", + pHostCapsParams->capsTblSize, + NV0080_CTRL_HOST_CAPS_TBL_SIZE); + return NV_ERR_INVALID_ARGUMENT; + } + + return _kbusGetHostCaps(pGpu, NvP64_VALUE(pHostCapsParams->capsTbl)); +} + +// +// HOST RM Device Controls +// +NV_STATUS +deviceCtrlCmdHostGetCapsV2_IMPL +( + Device *pDevice, + NV0080_CTRL_HOST_GET_CAPS_V2_PARAMS *pHostCapsParamsV2 +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvU8 *pHostCaps = pHostCapsParamsV2->capsTbl; + NV_STATUS rmStatus = NV_OK; + + rmStatus = _kbusGetHostCaps(pGpu, pHostCaps); + + return rmStatus; +} + +// +// BUS RM SubDevice Controls +// +NV_STATUS +subdeviceCtrlCmdBusGetPciInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_BUS_GET_PCI_INFO_PARAMS *pPciInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + + if (pKernelBif == NULL || !kbifIsPciBusFamily(pKernelBif)) + { + return NV_ERR_NOT_SUPPORTED; + } + + pPciInfoParams->pciDeviceId = pGpu->idInfo.PCIDeviceID; + pPciInfoParams->pciSubSystemId = pGpu->idInfo.PCISubDeviceID; + pPciInfoParams->pciRevisionId = pGpu->idInfo.PCIRevisionID; + + // + // Return device ID field. We no longer support probing past the BR02 bridge. + // + pPciInfoParams->pciExtDeviceId = REF_VAL(NV_CONFIG_PCI_NV_0_DEVICE_ID, pPciInfoParams->pciDeviceId); + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdBusGetAspmDisableFlags_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_BUS_GET_ASPM_DISABLE_FLAGS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + NV_STATUS rmStatus; + NvBool bCyaMaskL1, bEnableAspmDtL1; + + // Send RPC to GSP to obtain BIF PDB values. + rmStatus = _getAspmL1FlagsSendRpc(pGpu, &bCyaMaskL1, &bEnableAspmDtL1); + if (NV_OK != rmStatus) + { + return rmStatus; + } + + pParams->aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_L1_MASK_REGKEY_OVERRIDE] = bCyaMaskL1; + // This flag correnpond to an deprecated PDB_PROP_OS_RM_MAKES_POLICY_DECISIONS property which is always returing TURE on non-MACOSX. + pParams->aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_OS_RM_MAKES_POLICY_DECISIONS] = NV_TRUE; + pParams->aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_BEHIND_BRIDGE] = pGpu->getProperty(pGpu, PDB_PROP_GPU_BEHIND_BRIDGE); + pParams->aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_UNSUPPORTED] = pGpu->getProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED); + pParams->aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED] = pGpu->getProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED); + pParams->aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY] = pGpu->getProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY); + pParams->aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_CL_ASPM_L1_CHIPSET_DISABLED] = pCl->getProperty(pCl, PDB_PROP_CL_ASPM_L1_CHIPSET_DISABLED); + pParams->aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY] = pCl->getProperty(pCl, PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY); + pParams->aspmDisableFlags[NV2080_CTRL_ASPM_DISABLE_FLAGS_BIF_ENABLE_ASPM_DT_L1] = bEnableAspmDtL1; + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdBusGetNvlinkPeerIdMask_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_BUS_GET_NVLINK_PEER_ID_MASK_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 gfid; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // This control call should always run in context of a VF. + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + if (IS_GFID_PF(gfid)) + { + return NV_ERR_NOT_SUPPORTED; + } + + portMemCopy((void *)pParams->nvlinkPeerIdMask, + sizeof(pParams->nvlinkPeerIdMask), + (void *)pKernelBus->p2p.busNvlinkPeerNumberMask, + sizeof(pParams->nvlinkPeerIdMask)); + + return NV_OK; +} + +static NV_STATUS +getBusInfos(OBJGPU *pGpu, NV2080_CTRL_BUS_INFO *pBusInfos, NvU32 busInfoListSize) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + NV_STATUS status = NV_OK; + NvU32 i = 0; + + for (i = 0; i < busInfoListSize; i++) + { + NvBool bSendRpc = NV_FALSE; + + switch (pBusInfos[i].index) + { + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN_INFO: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_INFO: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_WIDTH_SWITCH_ERROR_COUNT: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_SPEED_SWITCH_ERROR_COUNT: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_CYA_ASPM: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CORRECTABLE_ERRORS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NONFATAL_ERRORS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FATAL_ERRORS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS_CLEAR: + bSendRpc = IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu); + break; + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_ASLM_STATUS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CORRECTABLE_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NONFATAL_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FATAL_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS: + bSendRpc = IS_GSP_CLIENT(pGpu); + break; + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_AER: + case NV2080_CTRL_BUS_INFO_INDEX_MSI_INFO: + bSendRpc = IS_VIRTUAL(pGpu); + break; + } + + if (bSendRpc) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kbusSendBusInfo(pGpu, GPU_GET_KERNEL_BUS(pGpu), &pBusInfos[i])); + continue; + } + + switch (pBusInfos[i].index) + { + case NV2080_CTRL_BUS_INFO_INDEX_TYPE: + { + pBusInfos[i].data = kbifGetBusIntfType_HAL(pKernelBif); + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_INTLINE: + { + pBusInfos[i].data = pGpu->busInfo.IntLine; + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_CAPS: + { + pBusInfos[i].data = kbusControlGetCaps(pGpu); + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CAPS: + { + } + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CAPS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CAPS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_GEN_INFO: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CAPS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CTRL_STATUS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_CTRL_STATUS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_UPSTREAM_LINK_CTRL_STATUS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_DOWNSTREAM_LINK_CTRL_STATUS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_ROOT_LINK_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN_INFO: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GEN2_INFO: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_AER: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CAPS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_GEN_INFO: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_BOARD_LINK_CTRL_STATUS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_ASLM_STATUS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_WIDTH_SWITCH_ERROR_COUNT: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_LINK_SPEED_SWITCH_ERROR_COUNT: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_CYA_ASPM: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_LINECODE_ERRORS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CRC_ERRORS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NAKS_RECEIVED_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FAILED_L0S_EXITS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CORRECTABLE_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NONFATAL_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FATAL_ERRORS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CORRECTABLE_ERRORS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_NONFATAL_ERRORS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_FATAL_ERRORS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_UNSUPPORTED_REQUESTS_CLEAR: + case NV2080_CTRL_BUS_INFO_INDEX_MSI_INFO: + { + if (kbifIsPciBusFamily(pKernelBif)) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kbifControlGetPCIEInfo(pGpu, pKernelBif, &pBusInfos[i])); + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_COHERENT_DMA_FLAGS: + { + pBusInfos[i].data = 0; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // On SOC Display all of the (system) memory that nvdisplay HW needs + // to read from/write to be allocated as non-coherent. it doesn't matter + // whether this system memory is accessed over the NISO inteface + // (e.g., for pushbuffers, semaphores, notifiers, etc) or + // over the ISO interface (for window/cursor surface pixel data, + // LUT entries, etc). on Orin, there's an ISO2AXI SHIM that ISOHUB will + // go through to interface with the memory subsystem on Orin, + // and a similar NISO2AXI SHIM will also exist for FE as well. + // a similar NISO2AXI SHIM will also exist for FE as well. + // + break; + } + + if ((pKernelBif != NULL) && + FLD_TEST_REF(BIF_DMA_CAPS_SNOOP, _CTXDMA, kbifGetDmaCaps(pGpu, pKernelBif))) + { + pBusInfos[i].data |= + DRF_DEF(2080, + _CTRL_BUS_INFO_COHERENT_DMA_FLAGS, + _CTXDMA, + _TRUE); + } + + if (pDma->gpuGartCaps & DMA_GPU_GART_CAPS_SNOOP) + { + pBusInfos[i].data |= + DRF_DEF(2080, + _CTRL_BUS_INFO_COHERENT_DMA_FLAGS, + _GPUGART, + _TRUE); + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_NONCOHERENT_DMA_FLAGS: + { + pBusInfos[i].data = 0; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // On SOC Display all of the (system) memory that nvdisplay HW needs + // to read from/write to be allocated as non-coherent. it doesn't matter + // whether this system memory is accessed over the NISO inteface + // (e.g., for pushbuffers, semaphores, notifiers, etc) or + // over the ISO interface (for window/cursor surface pixel data, + // LUT entries, etc). on Orin, there's an ISO2AXI SHIM that ISOHUB will + // go through to interface with the memory subsystem on Orin, + // and a similar NISO2AXI SHIM will also exist for FE as well. + // + pBusInfos[i].data |= + DRF_DEF(2080, + _CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS, + _CTXDMA, + _TRUE); + break; + } + + if ((pKernelBif != NULL) && + FLD_TEST_REF(BIF_DMA_CAPS_NOSNOOP, _CTXDMA, kbifGetDmaCaps(pGpu, pKernelBif))) + { + pBusInfos[i].data |= + DRF_DEF(2080, + _CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS, + _CTXDMA, + _TRUE); + } + + if (pDma->gpuGartCaps & DMA_GPU_GART_CAPS_NOSNOOP) + { + pBusInfos[i].data |= + DRF_DEF(2080, + _CTRL_BUS_INFO_NONCOHERENT_DMA_FLAGS, + _GPUGART, + _TRUE); + } + + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE: + { + pBusInfos[i].data = (NvU32)(pKernelGmmu->maxVASize >> 20); + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_SIZE_HI: + { + pBusInfos[i].data = (NvU32)((pKernelGmmu->maxVASize >> 20) >> 32); + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_GPU_GART_FLAGS: + { + pBusInfos[i].data = + DRF_DEF(2080, + _CTRL_BUS_INFO_GPU_GART_FLAGS, + _UNIFIED, + _TRUE); + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_BUS_NUMBER: + { + if (kbifIsPciBusFamily(pKernelBif)) + { + pBusInfos[i].data = gpuGetBus(pGpu); + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_DEVICE_NUMBER: + { + if (kbifIsPciBusFamily(pKernelBif)) + { + pBusInfos[i].data = gpuGetDevice(pGpu); + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_INTERFACE_TYPE: + case NV2080_CTRL_BUS_INFO_INDEX_GPU_INTERFACE_TYPE: + { + // + // We no longer support AGP/PCIe bridges so Bus/GPU interface + // types are the same + // + pBusInfos[i].data = kbifGetBusIntfType_HAL(pKernelBif); + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_DOMAIN_NUMBER: + { + pBusInfos[i].data = gpuGetDomain(pGpu); + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE: + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_NVLINK_SYSMEM)) + { + pBusInfos[i].data = NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_NVLINK; + } + else if (pGpu->getProperty(pGpu, PDB_PROP_GPU_C2C_SYSMEM)) + { + pBusInfos[i].data = NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_C2C; + } + else + { + pBusInfos[i].data = NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_PCIE; + } + break; + } + default: + { + pBusInfos[i].data = 0; + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + if (status != NV_OK) + { + break; + } + } + + return status; +} + +NV_STATUS +subdeviceCtrlCmdBusGetInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_BUS_GET_INFO_PARAMS *pBusInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if ((pBusInfoParams->busInfoListSize == 0) || + (NvP64_VALUE(pBusInfoParams->busInfoList) == NULL)) + { + return NV_OK; + } + + return getBusInfos(pGpu, NvP64_VALUE(pBusInfoParams->busInfoList), pBusInfoParams->busInfoListSize); +} + +NV_STATUS +subdeviceCtrlCmdBusGetInfoV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_BUS_GET_INFO_V2_PARAMS *pBusInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if ((pBusInfoParams->busInfoListSize > NV2080_CTRL_BUS_INFO_MAX_LIST_SIZE) || + (pBusInfoParams->busInfoListSize == 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return getBusInfos(pGpu, (NV2080_CTRL_BUS_INFO*)pBusInfoParams->busInfoList, pBusInfoParams->busInfoListSize); +} + +NV_STATUS +subdeviceCtrlCmdBusGetPciBarInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_BUS_GET_PCI_BAR_INFO_PARAMS *pBarInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 i; + + if (!kbifIsPciBusFamily(GPU_GET_KERNEL_BIF(pGpu))) + { + return NV_ERR_NOT_SUPPORTED; + } + + // store the number of valid bars + pBarInfoParams->pciBarCount = pKernelBus->totalPciBars; + + NV_ASSERT(pBarInfoParams->pciBarCount <= NV2080_CTRL_BUS_MAX_PCI_BARS); + + // store info for each of the valid bars + for (i = 0; i < pBarInfoParams->pciBarCount; i++) + { + pBarInfoParams->pciBarInfo[i].flags = 0; + pBarInfoParams->pciBarInfo[i].barSize = (NvU32)(pKernelBus->pciBarSizes[i] >> 20); + pBarInfoParams->pciBarInfo[i].barSizeBytes = pKernelBus->pciBarSizes[i]; + pBarInfoParams->pciBarInfo[i].barOffset = pKernelBus->pciBars[i]; + } + + // clear remaining entries to zero + for (; i < NV2080_CTRL_BUS_MAX_PCI_BARS; i++) + { + pBarInfoParams->pciBarInfo[i].flags = 0; + pBarInfoParams->pciBarInfo[i].barSize = 0; + pBarInfoParams->pciBarInfo[i].barSizeBytes = 0; + pBarInfoParams->pciBarInfo[i].barOffset = 0; + } + + return NV_OK; +} + +NV_STATUS +diagapiCtrlCmdBusIsBar1Virtual_IMPL +( + DiagApi *pDiagApi, + NV208F_CTRL_BUS_IS_BAR1_VIRTUAL_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + pParams->bIsVirtual = !kbusIsBar1PhysicalModeEnabled(pKernelBus); + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/bus/kern_bus_vbar2.c b/src/nvidia/src/kernel/gpu/bus/kern_bus_vbar2.c new file mode 100644 index 000000000..0b98044a3 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/kern_bus_vbar2.c @@ -0,0 +1,1183 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Common Virtual BAR2 support. Because of this we cannot + * include any chip specific headers. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "mem_mgr/io_vaspace.h" +#include "vgpu/vgpu_events.h" + +// Prototypes for static functions +static NV_STATUS _kbusConstructVirtualBar2Heaps(KernelBus *pKernelBus, NvU32 gfid); +static NV_STATUS _kbusConstructVirtualBar2Lists(KernelBus *pKernelBus, NvU32 gfid); +static void _kbusDestructVirtualBar2Heaps(KernelBus *pKernelBus, NvU32 gfid); +static void _kbusDestructVirtualBar2Lists(KernelBus *pKernelBus, NvU32 gfid); +static void _freeRmApertureMap_VBAR2(OBJGPU *, KernelBus *, VirtualBar2MapEntry *, NvU32 flags); +static MemDescDestroyCallBack _kbusReleaseRmAperture_wrapper; + +/*! + * Internal function to allocate various bar2 heaps + * @returns NV_STATUS + * + * @note This function handles the memory cleanup for heaps for failure paths. + */ +static NV_STATUS _kbusConstructVirtualBar2Heaps(KernelBus *pKernelBus, NvU32 gfid) +{ + NV_STATUS status = NV_OK; + + // + // Setup eheap for RM bar2 space management. + // + // The number of maximum eheap region descriptors needed is 2 times the number + // of cached BAR2 mappings, as the worst case is (alloc,free,alloc,free, etc.) + // in VA space + // + + NV_ASSERT_OK_OR_GOTO(status, + kbusConstructVirtualBar2CpuVisibleHeap_HAL(pKernelBus, gfid), cleanup); + + NV_ASSERT_OK_OR_GOTO(status, + kbusConstructVirtualBar2CpuInvisibleHeap_HAL(pKernelBus, gfid), cleanup); + +cleanup: + if (NV_OK != status) + _kbusDestructVirtualBar2Heaps(pKernelBus, gfid); + return status; +} + +/*! + * Internal function to allocate various bar2 lists + * @returns NV_STATUS + * + * @note This function handles the memory cleanup for failure paths. + */ +static NV_STATUS _kbusConstructVirtualBar2Lists(KernelBus *pKernelBus, NvU32 gfid) +{ + // + // TODO: This if() will go away when kbusConstructVirtualBar2 is moved back to kbusConstruct + // from kbusStatePreInit(). + // + if (pKernelBus->virtualBar2[gfid].pMapListMemory == NULL) + { + NvU32 i; + + // Pre-alloc the mapping list used for bar2 allocations + listInitIntrusive(&pKernelBus->virtualBar2[gfid].freeMapList); + listInitIntrusive(&pKernelBus->virtualBar2[gfid].cachedMapList); + listInitIntrusive(&pKernelBus->virtualBar2[gfid].usedMapList); + + pKernelBus->virtualBar2[gfid].pMapListMemory = portMemAllocNonPaged( + sizeof(VirtualBar2MapEntry) * BUS_BAR2_MAX_MAPPINGS); + if (pKernelBus->virtualBar2[gfid].pMapListMemory == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc bar2 mapping list!\n"); + DBG_BREAKPOINT(); + _kbusDestructVirtualBar2Lists(pKernelBus, gfid); + return NV_ERR_NO_MEMORY; + } + portMemSet(pKernelBus->virtualBar2[gfid].pMapListMemory, 0, sizeof(VirtualBar2MapEntry) * BUS_BAR2_MAX_MAPPINGS); + + // Initialize the free mapping list + for (i = 0; i < BUS_BAR2_MAX_MAPPINGS; i++) + { + listAppendExisting(&pKernelBus->virtualBar2[gfid].freeMapList, &(pKernelBus->virtualBar2[gfid].pMapListMemory[i])); + } + } + return NV_OK; +} + +/*! + * Initialize common virtual BAR2 data structures. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] gfid + * + * @returns None + */ +NV_STATUS +kbusConstructVirtualBar2_VBAR2(OBJGPU *pGpu, KernelBus *pKernelBus, NvU32 gfid) +{ + NV_STATUS status = NV_OK; + + // + // TODO: Enable these when cpu invisible heap gets separated from bar2 virtual heap + // construction and virtual bar2 heap construction moves under kbusConstruct. + // +// NV_ASSERT_OR_RETURN(NULL == pKernelBus->virtualBar2.pMapListMemory, NV_ERR_INVALID_STATE); +// NV_ASSERT_OR_RETURN(NULL == pKernelBus->virtualBar2.pVASpaceHeap, NV_ERR_INVALID_STATE); +// NV_ASSERT_OR_RETURN(NULL == pKernelBus->virtualBar2.pVASpaceHiddenHeap, NV_ERR_INVALID_STATE); + + // + // GSP-RM and VF in SRIOV heavy mode don't use the cpuVisible BAR2, + // so no need to construct the BAR2 lists + // + if (!RMCFG_FEATURE_PLATFORM_GSP && IS_GFID_PF(gfid)) + { + // Construct the various lists needed by BAR2 + status = _kbusConstructVirtualBar2Lists(pKernelBus, gfid); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + } + + // Construct various eheaps needed by BAR2 + status = _kbusConstructVirtualBar2Heaps(pKernelBus, gfid); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + // Default to 4KB alignment + pKernelBus->virtualBar2[gfid].vAlignment = RM_PAGE_SIZE; + + // Used for issuing TLB invalidates + pKernelBus->virtualBar2[gfid].flags = VASPACE_FLAGS_BAR | VASPACE_FLAGS_BAR_BAR2; + pKernelBus->virtualBar2[gfid].pPDB = NULL; + +#if (NV_PRINTF_ENABLED) + pKernelBus->virtualBar2[gfid].mapCount = 0; + pKernelBus->virtualBar2[gfid].cacheHit = 0; + pKernelBus->virtualBar2[gfid].evictions = 0; +#endif + + return status; +} + +/*! + * Allocate and construct the cpu-visible bar2 heap + * + * @param pKernelBus + * @param bfid + * + * @return NV_OK or bubble up the returned error code from the callee + */ +NV_STATUS +kbusConstructVirtualBar2CpuVisibleHeap_VBAR2 +( + KernelBus *pKernelBus, + NvU32 gfid +) +{ + if (IS_GFID_VF(gfid)) + { + return NV_OK; + } + + // + // TODO: This if() will go away when kbusConstructVirtualBar2 is moved back to kbusConstruct + // from kbusStatePreInit(). + // + if (pKernelBus->virtualBar2[gfid].pVASpaceHeap == NULL) + { + pKernelBus->virtualBar2[gfid].pVASpaceHeap = portMemAllocNonPaged(sizeof(OBJEHEAP)); + if (pKernelBus->virtualBar2[gfid].pVASpaceHeap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc bar2 eheap!\n"); + DBG_BREAKPOINT(); + return NV_ERR_NO_MEMORY; + } + constructObjEHeap(pKernelBus->virtualBar2[gfid].pVASpaceHeap, + (pKernelBus->bar2[gfid].cpuVisibleBase), + (pKernelBus->bar2[gfid].cpuVisibleLimit + 1), + 0, + BUS_BAR2_MAX_MAPPINGS * 2); + } + return NV_OK; +} + +/*! + * Allocate and construct the cpu-invisible bar2 heap + * + * @param pKernelBus + * @param gfid + * + * @return NV_OK or bubble up the returned error code from the callee + */ +NV_STATUS +kbusConstructVirtualBar2CpuInvisibleHeap_VBAR2 +( + KernelBus *pKernelBus, + NvU32 gfid +) +{ + // + // TODO: Move the cpu invisible Heap construction out of BAR2 construction and into kbusPreInit + // so that virtual BAR2 can be constructed during kbusConstruct + // + // Setup eheap for Hidden bar2 space management only if Invisible region is required + // Hidden heap doesn't require any pre-allocated memory structs. + // + if (pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap == NULL) + { + if (pKernelBus->bar2[gfid].cpuInvisibleLimit > pKernelBus->bar2[gfid].cpuInvisibleBase) + { + pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap = portMemAllocNonPaged(sizeof(OBJEHEAP)); + if (pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Unable to alloc hidden bar2 eheap!\n"); + DBG_BREAKPOINT(); + return NV_ERR_NO_MEMORY; + } + constructObjEHeap(pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap, + pKernelBus->bar2[gfid].cpuInvisibleBase, + (pKernelBus->bar2[gfid].cpuInvisibleLimit + 1), 0, 0); + } + } + return NV_OK; +} + +/*! + * Internal function to destroy all heap objects under bar2 + * @returns void + */ +static void _kbusDestructVirtualBar2Heaps(KernelBus *pKernelBus, NvU32 gfid) +{ + if (NULL != pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap) + { + pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap->eheapDestruct(pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap); + portMemFree(pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap); + pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap = NULL; + } + + if (NULL != pKernelBus->virtualBar2[gfid].pVASpaceHeap) + { + pKernelBus->virtualBar2[gfid].pVASpaceHeap->eheapDestruct(pKernelBus->virtualBar2[gfid].pVASpaceHeap); + portMemFree(pKernelBus->virtualBar2[gfid].pVASpaceHeap); + pKernelBus->virtualBar2[gfid].pVASpaceHeap = NULL; + } +} + +/*! + * Internal function to clean up various mapping lists + * @returns void + */ +static void _kbusDestructVirtualBar2Lists(KernelBus *pKernelBus, NvU32 gfid) +{ + listDestroy(&pKernelBus->virtualBar2[gfid].freeMapList); + listDestroy(&pKernelBus->virtualBar2[gfid].cachedMapList); + listDestroy(&pKernelBus->virtualBar2[gfid].usedMapList); + + portMemFree(pKernelBus->virtualBar2[gfid].pMapListMemory); + pKernelBus->virtualBar2[gfid].pMapListMemory = NULL; +} + +/*! + * Clean-up and free virtual BAR2 SW resources + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] shutdown True if shutting down + * @param[in] gfid + * + * @returns void + */ +void +kbusDestructVirtualBar2_VBAR2(OBJGPU *pGpu, KernelBus *pKernelBus, NvBool shutdown, NvU32 gfid) +{ + kbusFlushVirtualBar2_HAL(pGpu, pKernelBus, shutdown, gfid); + _kbusDestructVirtualBar2Lists(pKernelBus, gfid); + _kbusDestructVirtualBar2Heaps(pKernelBus, gfid); + + if (IS_GFID_PF(gfid)) + { + NV_PRINTF(LEVEL_INFO, + "MapCount: %d Bar2 Hits: %d Evictions: %d\n", + pKernelBus->virtualBar2[gfid].mapCount, + pKernelBus->virtualBar2[gfid].cacheHit, + pKernelBus->virtualBar2[gfid].evictions); + } +} + +/*! + * Clean-up virtual cache structures. + * + * Verify that there are no leaked or unreleased mappings. + * + * When shutting down the RM we should not have any outstanding memory descriptors + * remaining in BAR2, so allow an error check for this. + * + * When suspending we only need to release them as a memory descriptor may live across + * a resume, but we don't want to save the BAR2 mappings as BAR2 is destroyed and + * rebuilt on resume. We use this call directly on suspend as we don't need to reclaim + * data structures, just flush the cached mappings. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] True if shutting down + * + * @returns None + */ +void +kbusFlushVirtualBar2_VBAR2(OBJGPU *pGpu, KernelBus *pKernelBus, NvBool shutdown, NvU32 gfid) +{ + if (IS_GFID_VF(gfid)) + { + return; + } + + // Enforce RM unmapping up all BAR2 mappings + NV_ASSERT(listCount(&pKernelBus->virtualBar2[gfid].usedMapList) == 0); + + // There should be no unreleased mappings at shutdown + NV_ASSERT(!shutdown || (listCount(&pKernelBus->virtualBar2[gfid].cachedMapList) == 0)); + + // Release memory descriptors we still have cached + while (listCount(&pKernelBus->virtualBar2[gfid].cachedMapList)) + { + VirtualBar2MapEntry *pMap = listHead(&pKernelBus->virtualBar2[gfid].cachedMapList); + + NV_ASSERT(pMap->pMemDesc != NULL); + + _freeRmApertureMap_VBAR2(pGpu, pKernelBus, pMap, + UPDATE_RM_APERTURE_FLAGS_INVALIDATE | UPDATE_RM_APERTURE_FLAGS_DISCARD); + } +} + +/*! + * @brief one-time init of BAR2 Virtual Memory Manager. + * + * Sets up CPU pointer to the page tables at the top of FB. + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NV_OK on success, relevant error code otherwise + */ +NV_STATUS +kbusInitVirtualBar2_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NvU32 gfid; + MEMORY_DESCRIPTOR *pMemDesc; + NV_STATUS status = NV_OK; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + pMemDesc = pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc; + + if (KBUS_BAR2_TUNNELLED(pKernelBus)) + { + return NV_OK; + } + + if ((pMemDesc != NULL) && + (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM)) + { + // Get a CPU pointer to BAR2 page tables + pKernelBus->virtualBar2[gfid].pPageLevels = + memmgrMemDescBeginTransfer(GPU_GET_MEMORY_MANAGER(pGpu), + pMemDesc, + TRANSFER_FLAGS_PERSISTENT_CPU_MAPPING); + NV_ASSERT_OR_RETURN(pKernelBus->virtualBar2[gfid].pPageLevels, + NV_ERR_INSUFFICIENT_RESOURCES); + } + else + { + // + // In SYSMEM, page level instances are allocated one at a time. It is + // not guaranteed that they are contiguous. Thus, SYSMEM page level + // instances are dynamically mapped-in via memmap as needed instead of + // having one static mapping. + // + NV_ASSERT(pMemDesc == NULL); + pKernelBus->virtualBar2[gfid].pPageLevels = NULL; + } + + return status; +} + +/*! + * @brief Sets up CPU pointer to the temporary page tables setup at + * the bottom of FB. + * + * Sets up CPU pointer to the temporary page tables at the bottom of FB. + * + * @param[in] pGpu + * @param[in] pKernelBus + * + * @returns NV_OK on success, relevant error code otherwise + */ +NV_STATUS +kbusPreInitVirtualBar2_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus +) +{ + NvU32 gfid; + MEMORY_DESCRIPTOR *pMemDesc; + NV_STATUS status = NV_OK; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + pMemDesc = pKernelBus->virtualBar2[gfid].pPageLevelsMemDescForBootstrap; + + if (KBUS_BAR2_TUNNELLED(pKernelBus)) + { + return NV_OK; + } + + if ((pMemDesc != NULL) && + (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM)) + { + // Get a fast CPU pointer to BAR2 page tables (either direct or BAR2). + pKernelBus->virtualBar2[gfid].pPageLevelsForBootstrap = kbusMapRmAperture_HAL(pGpu, + pMemDesc); + NV_ASSERT_OR_RETURN(pKernelBus->virtualBar2[gfid].pPageLevelsForBootstrap, + NV_ERR_INSUFFICIENT_RESOURCES); + } + + return status; +} + +/*! + * @brief Helper routine to clean-up a unreferenced mapping + * + * Mapping will be moved from the cached list to the free list. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pMap Mapping to delete + * @param[in] flags Flags for kbusUpdateRmAperture_HAL + * + * @returns None + */ +static void +_freeRmApertureMap_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + VirtualBar2MapEntry *pMap, + NvU32 flags +) +{ + OBJEHEAP *pVASpaceHeap = pKernelBus->virtualBar2[GPU_GFID_PF].pVASpaceHeap; + EMEMBLOCK *pBlockFree; + NvU64 vAddr, vAddrSize; + + listRemove(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList, pMap); + + if (!KBUS_BAR2_TUNNELLED(pKernelBus) && pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping) + { + pBlockFree = pVASpaceHeap->eheapGetBlock(pVASpaceHeap, pMap->vAddr, NV_FALSE); + + if (pBlockFree != NULL) + { + vAddr = pBlockFree->begin; + vAddrSize = pBlockFree->end - vAddr + 1; + + kbusUpdateRmAperture_HAL(pGpu, pKernelBus, + pMap->pMemDesc, vAddr, vAddrSize, flags); + } + } + + pVASpaceHeap->eheapFree(pVASpaceHeap, pMap->vAddr); + + memdescRemoveDestroyCallback(pMap->pMemDesc, &pMap->memDescCallback); + pMap->pMemDesc = NULL; + + listPrependExisting(&pKernelBus->virtualBar2[GPU_GFID_PF].freeMapList, pMap); +} + +/*! + * Second level of the RmAperture support for when a mapping is going to be in BAR2. + * + * Multiple mappings of a single MEMORY_DESCRIPTOR is now refernced counted in + * the memory descriptor code. + * + * If this requests needs to update PTEs, call kbusUpdateRmAperture(). + * + * It operates on a single GPU. SLI is handled above this call. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pMemDesc The memory descriptor being mapped + * + * @returns A CPU pointer to the memory + */ +static NvU8 * +kbusMapBar2ApertureCached_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + VirtualBar2MapEntry *pMapNew; + OBJEHEAP *pVASpaceHeap = NULL; + NvU64 vAddr = 0; + NvU32 allocFlags = 0; + NvU64 allocSize = 0; + NvBool bEvictNeeded = NV_FALSE; + VirtualBar2MapListIter it; + + NV_ASSERT(pMemDesc->pGpu == pGpu); + +#if NV_PRINTF_ENABLED + pKernelBus->virtualBar2[GPU_GFID_PF].mapCount++; +#endif + + // + // Reject a illegal memdesc. Mappings that are too big will fail when + // they can't find space in the eheap. + // + NV_ASSERT_OR_RETURN((pMemDesc->Size != 0) && (pMemDesc->PageCount != 0), NULL); + + NV_ASSERT_OR_RETURN(pKernelBus->virtualBar2[GPU_GFID_PF].pVASpaceHeap != NULL, NULL); + + // + // Check the cached list for a recently used mapping + // + it = listIterAll(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList); + while (listIterNext(&it)) + { + VirtualBar2MapEntry *pMap = it.pValue; + + NV_ASSERT(pMap->pMemDesc); + + if (pMap->pMemDesc == pMemDesc) + { + // Move the mapping from the cached list to the used list + listRemove(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList, pMap); + listPrependExisting(&pKernelBus->virtualBar2[GPU_GFID_PF].usedMapList, pMap); + +#if NV_PRINTF_ENABLED + pKernelBus->virtualBar2[GPU_GFID_PF].cacheHit++; +#endif + return pMap->pRtnPtr; + } + } + + // + // We didn't find an existing mapping. If there are no free mappings + // list entries available, bail here + // + if ((listCount(&pKernelBus->virtualBar2[GPU_GFID_PF].freeMapList) == 0) && + (listCount(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList) == 0)) + { + NV_PRINTF(LEVEL_ERROR, "No free bar2 mapping struct left!\n"); + DBG_BREAKPOINT(); + return NULL; + } + + // + // Pack persistent mappings at the end of BAR2 space to avoid + // fragmentation. + // + if (flags & TRANSFER_FLAGS_PERSISTENT_CPU_MAPPING) + { + allocFlags |= NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN; + } + + // + // Allocate VA SPACE + // + pVASpaceHeap = pKernelBus->virtualBar2[GPU_GFID_PF].pVASpaceHeap; + allocSize = pMemDesc->PageCount << RM_PAGE_SHIFT; + bEvictNeeded = + (NV_OK != pVASpaceHeap->eheapAlloc(pVASpaceHeap, VAS_EHEAP_OWNER_NVRM, + &allocFlags, &vAddr, &allocSize, + pKernelBus->virtualBar2[GPU_GFID_PF].vAlignment, + pKernelBus->virtualBar2[GPU_GFID_PF].vAlignment, + NULL, NULL, NULL)); + + if (bEvictNeeded) + { + // + // Is a single mapping big enough to fit the new request? If so, lets evict it. + // Search in reverse to find the oldest mapping. + // + VirtualBar2MapEntry *pMap; + + for (pMap = listTail(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList); + pMap != NULL; + pMap = listPrev(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList, pMap)) + { + NV_ASSERT(pMap->pMemDesc != NULL); + if (pMap->pMemDesc->PageCount >= pMemDesc->PageCount) + { +#if NV_PRINTF_ENABLED + pKernelBus->virtualBar2[GPU_GFID_PF].evictions++; +#endif + _freeRmApertureMap_VBAR2(pGpu, pKernelBus, pMap, + UPDATE_RM_APERTURE_FLAGS_INVALIDATE | UPDATE_RM_APERTURE_FLAGS_DISCARD); + bEvictNeeded = NV_FALSE; + break; + } + } + + // + // If no single allocation has enough room, free all cached mappings and + // hope we get enough contiguous VASpace. + // + if (bEvictNeeded) + { + while (listCount(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList)) + { + VirtualBar2MapEntry *pMap = listHead(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList); + + NV_ASSERT(pMap->pMemDesc != NULL); + +#if NV_PRINTF_ENABLED + pKernelBus->virtualBar2[GPU_GFID_PF].evictions++; +#endif + _freeRmApertureMap_VBAR2(pGpu, pKernelBus, pMap, + UPDATE_RM_APERTURE_FLAGS_INVALIDATE | UPDATE_RM_APERTURE_FLAGS_DISCARD); + } + } + + // try to reallocate BAR2|CPU space via the eheap + if ( NV_OK != pVASpaceHeap->eheapAlloc(pVASpaceHeap, VAS_EHEAP_OWNER_NVRM, &allocFlags, &vAddr, + &allocSize, + pKernelBus->virtualBar2[GPU_GFID_PF].vAlignment, + pKernelBus->virtualBar2[GPU_GFID_PF].vAlignment, + NULL, NULL, NULL) ) + { + NV_PRINTF(LEVEL_ERROR, + "Not enough contiguous BAR2 VA space left allocSize %llx!\n", + allocSize); + DBG_BREAKPOINT(); + return NULL; + } + } + + // + // Allocate pMap - evict oldest (last) cached entry if no free entries + // + if (listCount(&pKernelBus->virtualBar2[GPU_GFID_PF].freeMapList) == 0) + { +#if NV_PRINTF_ENABLED + pKernelBus->virtualBar2[GPU_GFID_PF].evictions++; +#endif + _freeRmApertureMap_VBAR2(pGpu, pKernelBus, + listTail(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList), + UPDATE_RM_APERTURE_FLAGS_INVALIDATE | UPDATE_RM_APERTURE_FLAGS_DISCARD); + } + pMapNew = listHead(&pKernelBus->virtualBar2[GPU_GFID_PF].freeMapList); + + listRemove(&pKernelBus->virtualBar2[GPU_GFID_PF].freeMapList, pMapNew); + + // Update the page tables + if (pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping == NULL || + (!KBUS_BAR2_TUNNELLED(pKernelBus) && + NV_OK != kbusUpdateRmAperture_HAL(pGpu, pKernelBus, pMemDesc, vAddr, + pMemDesc->PageCount * RM_PAGE_SIZE, + UPDATE_RM_APERTURE_FLAGS_INVALIDATE))) + { + pVASpaceHeap->eheapFree(pVASpaceHeap, vAddr); + listPrependExisting(&pKernelBus->virtualBar2[GPU_GFID_PF].freeMapList, pMapNew); + return NULL; + } + + // Fill in the new mapping data + pMapNew->pRtnPtr = pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping + NvU64_LO32(vAddr) + pMemDesc->PteAdjust; // CPU pointer + pMapNew->pMemDesc = pMemDesc; + pMapNew->vAddr = vAddr; + + // Request notification when this memDesc is destroyed + pMapNew->memDescCallback.destroyCallback = &_kbusReleaseRmAperture_wrapper; + pMapNew->memDescCallback.pObject = (void *)pKernelBus; + memdescAddDestroyCallback(pMemDesc, &pMapNew->memDescCallback); + + listPrependExisting(&pKernelBus->virtualBar2[GPU_GFID_PF].usedMapList, pMapNew); + + return pMapNew->pRtnPtr; +} + +/*! + * This is a wrapper function to trigger kbusReleaseRmAperture_HAL(). + * This must be kept compat with MemDescDestroyCallBack. + */ +static void +_kbusReleaseRmAperture_wrapper +( + OBJGPU *pGpu, + void *pObject, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + KernelBus *pKernelBus = reinterpretCast(pObject, KernelBus*); + kbusReleaseRmAperture_HAL(ENG_GET_GPU(pKernelBus), pKernelBus, pMemDesc); +} + +/*! + * Second level of the RmAperture support for when a mapping is going to be in BAR2. + * We don't update PTEs here unless SPARSIFY flag is passed, just leave the mapping + * cached and move on. This is faster and we may get to reuse them later. + * + * This is common code shared by all chips after NV50 + * + * @param[out] pGpu + * @param[in] pKernelBus + * @param[in] pMemDesc Memory descriptor to unmap + * @param[in] flags TRANSFER_FLAGS + * @returns None + */ +static void +kbusUnmapBar2ApertureCached_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + VirtualBar2MapListIter it; + + it = listIterAll(&pKernelBus->virtualBar2[GPU_GFID_PF].usedMapList); + while (listIterNext(&it)) + { + VirtualBar2MapEntry *pMap = it.pValue; + + if (pMap->pMemDesc == pMemDesc) + { + // + // Remove from used list and move to the end start of the cached list. + // Remapping of recent buffers is common. + // + listRemove(&pKernelBus->virtualBar2[GPU_GFID_PF].usedMapList, pMap); + listPrependExisting(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList, pMap); + + if (flags & TRANSFER_FLAGS_DESTROY_MAPPING) + { + _freeRmApertureMap_VBAR2(pGpu, pKernelBus, pMap, + UPDATE_RM_APERTURE_FLAGS_INVALIDATE | UPDATE_RM_APERTURE_FLAGS_SPARSIFY); + } + + return; + } + } + + // Whoops, we didn't find the mapping region - something's wrong! + NV_PRINTF(LEVEL_ERROR, "can't find mapping struct!\n"); + DBG_BREAKPOINT(); +} + +/*! + * @brief Rubber-stamp scratch mapping as valid + */ +NvU8 * +kbusValidateBar2ApertureMapping_SCRATCH +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU8 *pCpu +) +{ + return pCpu; +} + +/*! + * @brief validate existing BAR2 mapping is still valid vs GPU reset + * + * @returns Existing or updated scratch buffer pointer + */ +NvU8 * +kbusValidateBar2ApertureMapping_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU8 *pCpu +) +{ + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu) && + !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GPU_IN_RESET)) + { + // + // Release existing mapping and replace it with a new mapping. + // + // The callee is responsbile for updating the pointer + // after it is validated. We cannot handle fixing stale + // pointers to allocated before a GPU reset here. + // + kbusUnmapBar2ApertureWithFlags_HAL(pGpu, pKernelBus, pMemDesc, &pCpu, + TRANSFER_FLAGS_NONE); + return kbusMapBar2Aperture_HAL(pGpu, pKernelBus, pMemDesc, + TRANSFER_FLAGS_NONE); + } + + return pCpu; +} + +/*! + * @brief validate existing BAR2 mapping is still valid vs GPU reset + * + * @returns Existing or updated scratch buffer pointer + */ +NvU8 * +kbusValidateBar2ApertureMapping_VBAR2_SRIOV +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU8 *pCpu +) +{ + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + return kbusValidateBar2ApertureMapping_SCRATCH(pGpu, pKernelBus, pMemDesc, pCpu); + + return kbusValidateBar2ApertureMapping_VBAR2(pGpu, pKernelBus, pMemDesc, pCpu); +} + +/*! + * @brief Fake BAR2 map API to a scratch buffer. + * + * Use for old VGPU w/o SRIOV guard cases, and when we are recovering from TDR. + */ +NvU8 * +kbusMapBar2Aperture_SCRATCH +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + return portMemAllocNonPaged((NvU32)pMemDesc->Size); +} + +/*! + * Dynamically map memory either a virtual BAR2 or with a directly CPU + * mapping. This is the HAL entry point. + * + * This is common code shared by all chips after NV50 + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pMemDesc Map this memory descriptor + * @param[in] flags Subset of TRANSFER_FLAGS + * + * @returns Master CPU pointer and an SLI set of CPU pointers + * + * @todo When using BAR2 this routine could not fail, but now with direct maps it can. + */ +NvU8 * +kbusMapBar2Aperture_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + // + // If the gpu is no longer in a state where any gpu access is allowed, + // create some dummy system memory and return the pointer to the + // caller. All of the caller operations should now become nops. Only + // reads of this data might cause problems. + // + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_GPU_IN_RESET, NV_TRUE); + return kbusMapBar2Aperture_SCRATCH(pGpu, pKernelBus, pMemDesc, flags); + } + +#if 0 // Useful for finding leaks + NV_PRINTF(LEVEL_ERROR, + "memDesc %p from function %p\n", + pMemDesc, __builtin_return_address(0)); +#endif + + // + // Raise warning on encountering Reflected Mapping on setups with sysmem nvlink. + // On 0 FB systems, Reflected mapping may be used, so don't raise warning for that. + // + if ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && + (pGpu->getProperty(pGpu, PDB_PROP_GPU_NVLINK_SYSMEM)) && + !(pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB))) + { + // + // Reflected mapping is deprecated and may cause GPU to enter deadlock + // on certain systems and result into GPU fall off the bus. (B1829446) + // If you see any hangs after this print, please fix the allocation + // code in client for the memory tracked by this memDesc to avoid + // reflected mapping. + // + NV_PRINTF(LEVEL_ERROR, + "GPU %d: Warning: Reflected Mapping Found: MapType = BAR and " + "AddressSpace = SYSMEM.\n", pGpu->gpuInstance); + NV_ASSERT(0); + } + + // Call the lower-level routine + return kbusMapBar2ApertureCached_VBAR2(pGpu, pKernelBus, pMemDesc, flags); +} + +/*! + * @brief SRIOV BAR2 map filter to decide between SRIOV and classic VGPU behavior + * + * Turing/GA100 can run in both modes, so we need the dynamic check. + */ +NvU8 * +kbusMapBar2Aperture_VBAR2_SRIOV +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + return kbusMapBar2Aperture_SCRATCH(pGpu, pKernelBus, pMemDesc, flags); + + return kbusMapBar2Aperture_VBAR2(pGpu, pKernelBus, pMemDesc, flags); +} + +/*! + * @brief Fake BAR2 unmap API to a scratch buffer. + * + * Use for old VGPU w/o SRIOV guard cases, and when we are recovering from TDR. + */ +void +kbusUnmapBar2ApertureWithFlags_SCRATCH +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU8 **pCpuPtr, + NvU32 flags +) +{ + portMemFree(*pCpuPtr); + kbusFlush_HAL(pGpu, pKernelBus, kbusGetFlushAperture(pKernelBus, memdescGetAddressSpace(pMemDesc))); +} + +/*! + * @brief Unmap instance memory, reversing kbusMapRmAperture_VBAR2 + * + * If a Destroy flag is passed, actually clear the PTE mappings, and don't + * leave on the cached free list. + * + * The value of *pCpuPtr must be the same as the value returned from + * kbusMapRmAperture_VBAR2 when the original mapping was performed. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pMemDesc Unmap this memory descriptor + * @param[in] pCpuPtr CPU VA previously returned by busMapRmAperture_VBAR2 + * @param[in] flags Bitfield of flags to perform various operations + * + * @returns None + */ +void +kbusUnmapBar2ApertureWithFlags_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU8 **pCpuPtr, + NvU32 flags +) +{ + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + // Free the dummy data we allocated earlier. + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GPU_IN_RESET)) + { + kbusUnmapBar2ApertureWithFlags_SCRATCH(pGpu, pKernelBus, pMemDesc, pCpuPtr, flags); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_GPU_IN_RESET, NV_FALSE); + return; + } + // + // Let a map created before the reset go through the normal path + // to clear out the memory. + // + } + + NV_ASSERT(!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GPU_IN_RESET)); + + // Call the lower-level routine + kbusUnmapBar2ApertureCached_VBAR2(pGpu, pKernelBus, pMemDesc, flags); +} + +/*! + * @brief SRIOV BAR2 unmap filter to decide between SRIOV and classic VGPU behavior + * + * Turing/GA100 can run in both modes, so we need the dynamic check. + */ +void +kbusUnmapBar2ApertureWithFlags_VBAR2_SRIOV +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc, + NvU8 **pCpuPtr, + NvU32 flags +) +{ + // If SR-IOV is enabled, BAR2 mappings are managed by the guest. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + kbusUnmapBar2ApertureWithFlags_SCRATCH(pGpu, pKernelBus, pMemDesc, pCpuPtr, flags); + return; + } + + kbusUnmapBar2ApertureWithFlags_VBAR2(pGpu, pKernelBus, pMemDesc, pCpuPtr, flags); +} + +/*! + * Release cached memory descriptor so the memory descriptor can be freed. + * + * This is called from the memdescDestroy/memdescRelease path when ending the + * life of a memory descriptor. + * + * We assume this should be on the free list and already unmapped. If this + * doesn't happen it will show up as a leaked mapping when shutting down. On + * debug drivers we check used list to help pinpoint source of a leaked + * mapping. + * + * @param[in] pGpu + * @param[in] pKernelBus + * @param[in] pMemDesc Map this memory descriptor + */ +void +kbusReleaseRmAperture_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + VirtualBar2MapListIter it; + + it = listIterAll(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList); + while (listIterNext(&it)) + { + VirtualBar2MapEntry *pMap = it.pValue; + + if (pMap->pMemDesc == pMemDesc) + { + _freeRmApertureMap_VBAR2(pGpu, pKernelBus, pMap, + UPDATE_RM_APERTURE_FLAGS_INVALIDATE | UPDATE_RM_APERTURE_FLAGS_DISCARD); + return; + } + } + +#ifdef DEBUG + it = listIterAll(&pKernelBus->virtualBar2[GPU_GFID_PF].usedMapList); + while (listIterNext(&it)) + { + VirtualBar2MapEntry *pMap = it.pValue; + + if (pMap->pMemDesc == pMemDesc) + { + NV_PRINTF(LEVEL_ERROR, + "Leaked mapping detected. Mapping not unmapped before memdescDestroy call.\n"); + DBG_BREAKPOINT(); + + // Must be on cached listed to be freed + listRemove(&pKernelBus->virtualBar2[GPU_GFID_PF].usedMapList, pMap); + listAppendExisting(&pKernelBus->virtualBar2[GPU_GFID_PF].cachedMapList, pMap); + + _freeRmApertureMap_VBAR2(pGpu, pKernelBus, pMap, + UPDATE_RM_APERTURE_FLAGS_INVALIDATE | UPDATE_RM_APERTURE_FLAGS_DISCARD); + + break; + } + } +#endif +} + +NV_STATUS kbusMapCpuInvisibleBar2Aperture_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + PMEMORY_DESCRIPTOR pMemDesc, + NvU64 *pVaddr, + NvU64 allocSize, + NvU32 allocFlags, + NvU32 gfid +) +{ + OBJEHEAP *pVASpaceHiddenHeap = pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap; + NV_STATUS status; + + status = pVASpaceHiddenHeap->eheapAlloc(pVASpaceHiddenHeap, VAS_EHEAP_OWNER_NVRM, + &allocFlags, pVaddr, &allocSize, + pKernelBus->virtualBar2[gfid].vAlignment, + pKernelBus->virtualBar2[gfid].vAlignment, + NULL, NULL, NULL); + + if (status != NV_OK) + { + goto done; + } + + if (IS_GFID_VF(gfid) && (pKernelBus->virtualBar2[gfid].pPageLevels == NULL)) + { + pKernelBus->virtualBar2[gfid].pPageLevels = kbusMapRmAperture_HAL(pGpu, + pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc); + NV_ASSERT_OR_RETURN(pKernelBus->virtualBar2[gfid].pPageLevels, + NV_ERR_INSUFFICIENT_RESOURCES); + } + + status = kbusUpdateRmAperture_HAL(pGpu, pKernelBus, pMemDesc, *pVaddr, + pMemDesc->PageCount * RM_PAGE_SIZE, UPDATE_RM_APERTURE_FLAGS_INVALIDATE | + UPDATE_RM_APERTURE_FLAGS_CPU_INVISIBLE_RANGE); + + if (IS_GFID_VF(gfid) && (pKernelBus->virtualBar2[gfid].pPageLevels != NULL)) + { + kbusUnmapRmAperture_HAL(pGpu, + pKernelBus->virtualBar2[gfid].pPageLevelsMemDesc, + &pKernelBus->virtualBar2[gfid].pPageLevels, NV_TRUE); + pKernelBus->virtualBar2[gfid].pPageLevels = NULL; + } + + if (status != NV_OK) + { + pVASpaceHiddenHeap->eheapFree(pVASpaceHiddenHeap, *pVaddr); + *pVaddr = 0; + } + +done: + return status; +} + +void kbusUnmapCpuInvisibleBar2Aperture_VBAR2 +( + OBJGPU *pGpu, + KernelBus *pKernelBus, + PMEMORY_DESCRIPTOR pMemDesc, + NvU64 vAddr, + NvU32 gfid +) +{ + OBJEHEAP *pVASpaceHiddenHeap = pKernelBus->virtualBar2[gfid].pVASpaceHiddenHeap; + + if (!pVASpaceHiddenHeap) + { + return; + } + + pVASpaceHiddenHeap->eheapFree(pVASpaceHiddenHeap, vAddr); +} + diff --git a/src/nvidia/src/kernel/gpu/bus/p2p.c b/src/nvidia/src/kernel/gpu/bus/p2p.c new file mode 100644 index 000000000..85117c6f2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/p2p.c @@ -0,0 +1,1465 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include +#include "gpu/gpu.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "kernel/mem_mgr/p2p.h" +#include "os/os.h" +#include "mem_mgr/vaspace.h" +#include "gpu/bus/third_party_p2p.h" +#include "gpu/device/device.h" +#include "vgpu/rpc.h" +#include "vgpu/vgpu_events.h" +#include "gpu/bus/kern_bus.h" +#include "class/cl503c.h" + + +static NvBool _isSpaceAvailableForBar1P2PMapping(OBJGPU *, Subdevice *, NvHandle, NvU64); + +static +NV_STATUS RmP2PValidateSubDevice +( + ThirdPartyP2P *pThirdPartyP2P, + OBJGPU **ppGpu +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pThirdPartyP2P->pSubdevice); + if (pGpu == NULL) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + + *ppGpu = pGpu; + return NV_OK; +} + +/*! + * @brief frees given third party p2p memory extent + */ +static +NV_STATUS _freeMappingExtentInfo +( + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfo +) +{ + if (pExtentInfo == NULL) + return NV_OK; + + if (pExtentInfo->pMemDesc != NULL) + memdescDestroy(pExtentInfo->pMemDesc); + + portMemFree(pExtentInfo); + + return NV_OK; +} + +/*! + * @brief Constructs a new third party p2p memory extent + */ +static +NV_STATUS _constructMappingExtentInfo +( + NvU64 address, + NvU64 offset, + NvU64 length, + NvU64 fbApertureOffset, + MEMORY_DESCRIPTOR *pMemDesc, + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO *ppExtentInfo +) +{ + NV_STATUS status; + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfo; + MEMORY_DESCRIPTOR *pNewMemDesc; + + NV_ASSERT_OR_RETURN((ppExtentInfo != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pMemDesc != NULL), NV_ERR_INVALID_ARGUMENT); + + *ppExtentInfo = NULL; + + pExtentInfo = portMemAllocNonPaged( + sizeof(CLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO)); + if (pExtentInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto out; + } + + status = memdescCreateSubMem(&pNewMemDesc, pMemDesc, NULL, offset, length); + if (status != NV_OK) + { + goto out; + } + + portMemSet(pExtentInfo, 0, sizeof(*pExtentInfo)); + + pExtentInfo->address = address; + pExtentInfo->length = length; + pExtentInfo->fbApertureOffset = fbApertureOffset; + pExtentInfo->pMemDesc = pNewMemDesc; + pExtentInfo->refCount = 1; + + *ppExtentInfo = pExtentInfo; + +out: + if (status != NV_OK) + _freeMappingExtentInfo(pExtentInfo); + + return status; +} + +/*! + * @brief Creates a new third party p2p memory extent + */ +static +NV_STATUS _createThirdPartyP2PMappingExtent +( + NvU64 address, + NvU64 length, + NvU64 offset, + NvHandle hClient, + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo, + CLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO_LIST *pList, + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + Subdevice *pSubDevice, + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO *ppExtentInfo, + NvU64 *pMappingStart, + NvU64 *pMappingLength +) +{ + NvU64 fbApertureOffset = 0; + NvU64 fbApertureMapLength = RM_ALIGN_UP(length, NVRM_P2P_PAGESIZE_BIG_64K); + NV_STATUS status; + KernelBus *pKernelBus; + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfoTmp; + RsClient *pClient; + Device *pDevice; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + NV_ASSERT_OR_RETURN(status == NV_OK, NV_ERR_INVALID_ARGUMENT); + + status = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + NV_ASSERT_OR_RETURN(status == NV_OK, NV_ERR_INVALID_STATE); + + NV_PRINTF(LEVEL_INFO, "New allocation for address: 0x%llx\n", address); + + NV_ASSERT_OR_RETURN((pDevice != NULL), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN((ppExtentInfo != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pList != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pMappingStart != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pMappingLength != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pMemDesc != NULL), NV_ERR_INVALID_ARGUMENT); + + *ppExtentInfo = NULL; + + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + // + // By the time the mapping extent is created, the range has been already + // verified to be correct has to fit in the memdesc. + // + NV_ASSERT(offset < memdescGetSize(pMemDesc)); + + status = _constructMappingExtentInfo(address, offset, + fbApertureMapLength, 0, pMemDesc, ppExtentInfo); + if (status != NV_OK) + { + goto out; + } + + if (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + NV_RM_RPC_MAP_MEMORY(pGpu, hClient, + RES_GET_HANDLE(pDevice), + pVidmemInfo->hMemory, + offset, + fbApertureMapLength, + 0, + &fbApertureOffset, status); + } + else + { + status = kbusMapFbAperture_HAL(pGpu, pKernelBus, (*ppExtentInfo)->pMemDesc, 0, + &fbApertureOffset, &fbApertureMapLength, + BUS_MAP_FB_FLAGS_MAP_UNICAST, hClient); + } + if (status != NV_OK) + { + goto out; + } + + (*ppExtentInfo)->fbApertureOffset = fbApertureOffset; + + for (pExtentInfoTmp = listHead(pList); + pExtentInfoTmp != NULL; + pExtentInfoTmp = listNext(pList, pExtentInfoTmp)) + { + if (pExtentInfoTmp->address > address) + break; + } + + if (pExtentInfoTmp == NULL) + listAppendExisting(pList, *ppExtentInfo); + else + listInsertExisting(pList, pExtentInfoTmp, *ppExtentInfo); + + pSubDevice->P2PfbMappedBytes += fbApertureMapLength; + *pMappingLength = length; + *pMappingStart = 0; // starts at zero in the current allocation. + +out: + if ((status != NV_OK) && (*ppExtentInfo != NULL)) + { + NV_STATUS tmpStatus = NV_OK; + + if (fbApertureMapLength != 0) + { + if (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + NV_RM_RPC_UNMAP_MEMORY(pGpu, hClient, + RES_GET_HANDLE(pDevice), + pVidmemInfo->hMemory, + 0, + fbApertureOffset, tmpStatus); + } + else + { + tmpStatus = kbusUnmapFbAperture_HAL(pGpu, pKernelBus, + (*ppExtentInfo)->pMemDesc, + fbApertureOffset, + fbApertureMapLength, + BUS_MAP_FB_FLAGS_MAP_UNICAST); + } + NV_ASSERT(tmpStatus == NV_OK); + } + + _freeMappingExtentInfo(*ppExtentInfo); + } + return status; +} + +/*! + * @brief Reuse an existing third party p2p allocation. + * + * Determines offset in the current allocation and its size that can + * be reused in the new mapping. + */ +static +NV_STATUS _reuseThirdPartyP2PMappingExtent +( + NvU64 address, + NvU64 length, + NvHandle hClient, + CLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO_LIST *pList, + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + Subdevice *pSubDevice, + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO *ppExtentInfo, + NvU64 *pMappingStart, + NvU64 *pMappingLength +) +{ + NvU64 mappingStart; + NvU64 mappingLength; + NV_STATUS status = NV_OK; + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfo = NULL; + + NV_ASSERT_OR_RETURN((ppExtentInfo != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pList != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pMappingStart != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pMappingLength != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pMemDesc != NULL), NV_ERR_INVALID_ARGUMENT); + + NV_PRINTF(LEVEL_INFO, "Reuse allocation for address: 0x%llx\n", address); + + pExtentInfo = *ppExtentInfo; + + mappingStart = address - pExtentInfo->address; + mappingLength = NV_MIN((pExtentInfo->length - mappingStart), length); + + *pMappingLength = mappingLength; + *pMappingStart = mappingStart; + + pExtentInfo->refCount++; + + return status; +} + +/*! + * @brief Frees an existing third party P2P mapping + * + * Iterates over all the p2p allocations that are used in the mapping and + * decrements its refcount. If P2p allocation's refcount has reached zero, + * it is freed and usage of FB for p2p is appropriately adjusted. + */ +static +NV_STATUS RmThirdPartyP2PMappingFree +( + NvHandle hClient, + OBJGPU *pGpu, + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo, + PCLI_THIRD_PARTY_P2P_INFO pThirdPartyP2PInfo, + Subdevice *pSubDevice, + PCLI_THIRD_PARTY_P2P_MAPPING_INFO pMappingInfo +) +{ + NV_STATUS status = NV_OK; + KernelBus *pKernelBus; + NvU64 length; + NvU64 mappingLength; + NvU64 address; + NvU64 startOffset; + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfo = NULL; + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfoNext = NULL; + RsClient *pClient; + Device *pDevice; + + NV_ASSERT_OR_RETURN((pGpu != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pMappingInfo != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pSubDevice != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pThirdPartyP2PInfo != NULL), NV_ERR_INVALID_ARGUMENT); + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + NV_ASSERT_OR_RETURN(status == NV_OK, NV_ERR_INVALID_ARGUMENT); + + status = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + NV_ASSERT_OR_RETURN(status == NV_OK, NV_ERR_INVALID_STATE); + + NV_ASSERT_OR_RETURN((pDevice != NULL), NV_ERR_INVALID_STATE); + + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + length = pMappingInfo->length; + address = pMappingInfo->address; + + for(pExtentInfo = pMappingInfo->pStart; (pExtentInfo != NULL) && (length != 0); + pExtentInfo = pExtentInfoNext) + { + pExtentInfoNext = listNext(&pVidmemInfo->mappingExtentList, pExtentInfo); + startOffset = address - pExtentInfo->address; + mappingLength = NV_MIN(length, (pExtentInfo->length - startOffset)); + + address += mappingLength; + length -= mappingLength; + pExtentInfo->refCount--; + if (pExtentInfo->refCount == 0) + { + if (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + NV_RM_RPC_UNMAP_MEMORY(pGpu, hClient, + RES_GET_HANDLE(pDevice), + pVidmemInfo->hMemory, + 0, + pExtentInfo->fbApertureOffset, status); + } + else + { + status = kbusUnmapFbAperture_HAL(pGpu, pKernelBus, + pExtentInfo->pMemDesc, + pExtentInfo->fbApertureOffset, + pExtentInfo->length, + BUS_MAP_FB_FLAGS_MAP_UNICAST); + } + NV_ASSERT(status == NV_OK); + + listRemove(&pVidmemInfo->mappingExtentList, pExtentInfo); + + pSubDevice->P2PfbMappedBytes -= pExtentInfo->length; + _freeMappingExtentInfo(pExtentInfo); + } + } + NV_ASSERT(length == 0); + + pMappingInfo->pStart = NULL; + pMappingInfo->length = 0; + + return status; +} + +/*! + * @brief Gets BAR1 mapped pages. + * + * The function creates mappings from BAR1 VASpace for registered third party + * P2P allocations, so the pages returned by this function are BAR1 addresses, + * BAR1 base + BAR1 VAs returned by RM. + * Note that PCLI_THIRD_PARTY_P2P_MAPPING_INFO is also updated to track these + * BAR1 addresses in order to reuse them across multiple allocations. + */ +static +NV_STATUS RmThirdPartyP2PBAR1GetPages +( + NvU64 address, + NvU64 length, + NvU64 offset, + NvHandle hClient, + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo, + NvU64 **ppPhysicalAddresses, + NvU32 **ppWreqMbH, + NvU32 **ppRreqMbH, + NvU32 *pEntries, + OBJGPU *pGpu, + Subdevice *pSubDevice, + PCLI_THIRD_PARTY_P2P_MAPPING_INFO pMappingInfo, + PCLI_THIRD_PARTY_P2P_INFO pThirdPartyP2PInfo +) +{ + NV_STATUS status = NV_OK; + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfoLoop = NULL; + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfo = NULL; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 mappingLength = 0; + NvU64 mappingOffset = 0; + NvU64 lengthReq = 0; + NvU64 lastAddress; + NvU32 entries = 0; + NvU64 fbApertureOffset; + NvU64 physicalFbAddress; + NvBool bFound; + + NV_ASSERT_OR_RETURN((pGpu != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pMappingInfo != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pSubDevice != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pThirdPartyP2PInfo != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((ppPhysicalAddresses != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pEntries != NULL), NV_ERR_INVALID_ARGUMENT); + + NV_PRINTF(LEVEL_INFO, + "Requesting Bar1 mappings for address: 0x%llx, length: 0x%llx\n", + address, length); + *pEntries = 0; + + pMappingInfo->length = 0; + pMappingInfo->address = address; + + pExtentInfoLoop = listHead(&pVidmemInfo->mappingExtentList); + + while (length > 0) + { + bFound = NV_FALSE; + lengthReq = length; + for(; pExtentInfoLoop != NULL; pExtentInfoLoop = listNext(&pVidmemInfo->mappingExtentList, pExtentInfoLoop)) + { + if ((address >= pExtentInfoLoop->address) && + (address < + (pExtentInfoLoop->address + pExtentInfoLoop->length))) + { + bFound = NV_TRUE; + break; + } + else if (address < pExtentInfoLoop->address) + { + // + // create new allocation for addresses that are not overlapping + // with the next allocation. + // + if ((address + length) > pExtentInfoLoop->address) + { + lengthReq = pExtentInfoLoop->address - address; + } + break; + } + } + + pExtentInfo = pExtentInfoLoop; + + if (!bFound) + { + // Check if there is still space in BAR1 to map this length + if (!_isSpaceAvailableForBar1P2PMapping(pGpu, pSubDevice, hClient, lengthReq)) + { + NV_PRINTF(LEVEL_ERROR, + "no space for BAR1 mappings, length: 0x%llx \n", lengthReq); + + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto out; + } + + pMemDesc = pVidmemInfo->pMemDesc; + status = _createThirdPartyP2PMappingExtent( + address, lengthReq, offset, hClient, + pVidmemInfo, + &pVidmemInfo->mappingExtentList, pMemDesc, pGpu, + pSubDevice, &pExtentInfo, + &mappingOffset, &mappingLength); + if (NV_OK != status) + { + goto out; + } + } + else + { + pMemDesc = pExtentInfo->pMemDesc; + status = _reuseThirdPartyP2PMappingExtent( + address, lengthReq, hClient, + &pVidmemInfo->mappingExtentList, pMemDesc, pGpu, + pSubDevice, &pExtentInfo, + &mappingOffset, &mappingLength); + if (NV_OK != status) + { + goto out; + } + } + + if (pMappingInfo->pStart == NULL) + pMappingInfo->pStart = pExtentInfo; + + // fill page table entries + fbApertureOffset = pExtentInfo->fbApertureOffset + mappingOffset; + lastAddress = (address + mappingLength - 1); + while (address < lastAddress) + { + if (ppWreqMbH != NULL && ppRreqMbH != NULL) + { + (*ppWreqMbH)[entries] = 0; + (*ppRreqMbH)[entries] = 0; + } + + physicalFbAddress = gpumgrGetGpuPhysFbAddr(pGpu); + (*ppPhysicalAddresses)[entries] = (physicalFbAddress + + fbApertureOffset); + fbApertureOffset += NVRM_P2P_PAGESIZE_BIG_64K; + address += NVRM_P2P_PAGESIZE_BIG_64K; + offset += NVRM_P2P_PAGESIZE_BIG_64K; + entries++; + } + + length -= mappingLength; + pMappingInfo->length += mappingLength; + + } + + *pEntries = entries; + +out: + if (status != NV_OK) + { + RmThirdPartyP2PMappingFree(hClient, pGpu, pVidmemInfo, pThirdPartyP2PInfo, + pSubDevice, pMappingInfo); + } + return status; +} + +/*! + * @brief Gets pages adjusted by NVLink aperture base (GPAs). + */ +static +NV_STATUS RmThirdPartyP2PNVLinkGetPages +( + OBJGPU *pGpu, + OBJVASPACE *pVAS, + NvU64 address, + NvU64 length, + NvU64 offset, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 **ppWreqMbH, + NvU32 **ppRreqMbH, + NvU64 **ppPhysicalAddresses, + NvU32 *pEntries +) +{ + NvU64 lastAddress; + NvU32 entries = 0; + RmPhysAddr physAddr; + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + + NV_ASSERT(!(address & (NVRM_P2P_PAGESIZE_BIG_64K - 1))); + NV_ASSERT(!(length & (NVRM_P2P_PAGESIZE_BIG_64K - 1))); + NV_ASSERT(!(offset & (NVRM_P2P_PAGESIZE_BIG_64K - 1))); + + lastAddress = (address + length - 1); + while (address < lastAddress) + { + physAddr = memdescGetPhysAddr(pMemDesc, VAS_ADDRESS_TRANSLATION(pVAS), offset); + + (*ppWreqMbH)[entries] = 0; + (*ppRreqMbH)[entries] = 0; + + (*ppPhysicalAddresses)[entries] = pKernelMemorySystem->coherentCpuFbBase + physAddr; + + address += NVRM_P2P_PAGESIZE_BIG_64K; + offset += NVRM_P2P_PAGESIZE_BIG_64K; + entries++; + } + + *pEntries = entries; + + return NV_OK; +} + +/*! + * @brief Gets pages for the given VidmemInfo + */ +static +NV_STATUS RmP2PGetPagesUsingVidmemInfo +( + NvU64 address, + NvU64 length, + NvU64 offset, + NvHandle hClient, + NvHandle hThirdPartyP2P, + NvU64 **ppPhysicalAddresses, + NvU32 **ppWreqMbH, + NvU32 **ppRreqMbH, + NvU32 *pEntries, + void *pPlatformData, + void (*pFreeCallback)(void *pData), + void *pData, + OBJGPU *pGpu, + Subdevice *pSubDevice, + CLI_THIRD_PARTY_P2P_VASPACE_INFO *pVASpaceInfo, + ThirdPartyP2P *pThirdPartyP2PInfo, + CLI_THIRD_PARTY_P2P_VIDMEM_INFO *pVidmemInfo +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR *pMemDesc; + Device *pDevice; + RsClient *pClient; + OBJVASPACE *pVAS; + CLI_THIRD_PARTY_P2P_MAPPING_INFO *pMappingInfo = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + NV_ASSERT_OR_RETURN(status == NV_OK, NV_ERR_INVALID_ARGUMENT); + + status = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + NV_ASSERT_OR_RETURN(status == NV_OK, NV_ERR_INVALID_STATE); + + if (pVASpaceInfo != NULL) + { + NV_ASSERT_OK_OR_RETURN( + vaspaceGetByHandleOrDeviceDefault(pClient, RES_GET_HANDLE(pDevice), + pVASpaceInfo->hVASpace, &pVAS)); + } + + pMemDesc = pVidmemInfo->pMemDesc; + + status = CliGetThirdPartyP2PMappingInfoFromKey(hClient, + hThirdPartyP2P, pVidmemInfo->hMemory, + pPlatformData, &pMappingInfo); + if (status == NV_ERR_OBJECT_NOT_FOUND) + { + status = CliAddThirdPartyP2PMappingInfo(hClient, hThirdPartyP2P, + pVidmemInfo->hMemory, pPlatformData, pFreeCallback, pData, &pMappingInfo); + } + if (status != NV_OK) + { + return status; + } + + switch(pThirdPartyP2PInfo->type) + { + case CLI_THIRD_PARTY_P2P_TYPE_BAR1: + status = RmThirdPartyP2PBAR1GetPages(address, length, offset, hClient, + pVidmemInfo, ppPhysicalAddresses, + ppWreqMbH, ppRreqMbH, pEntries, + pGpu, pSubDevice, pMappingInfo, + pThirdPartyP2PInfo); + break; + case CLI_THIRD_PARTY_P2P_TYPE_NVLINK: + status = RmThirdPartyP2PNVLinkGetPages(pGpu, pVAS, address, length, + offset, pMemDesc, ppWreqMbH, + ppRreqMbH, ppPhysicalAddresses, + pEntries); + break; + default: + status = NV_ERR_NOT_SUPPORTED; + break; + } + + return status; +} + +/*! + * @brief Gets pages or validates address range. + * + * If the argument "ppPhysicalAddresses" is NULL, + * the function just validates the address range. + */ +static +NV_STATUS RmP2PValidateAddressRangeOrGetPages +( + NvU64 address, + NvU64 length, + NvHandle hClient, + NvHandle hThirdPartyP2P, + NvU64 **ppPhysicalAddresses, + NvU32 **ppWreqMbH, + NvU32 **ppRreqMbH, + NvU32 *pEntries, + void *pPlatformData, + void (*pFreeCallback)(void *pData), + void *pData, + OBJGPU *pGpu, + Subdevice *pSubDevice, + PCLI_THIRD_PARTY_P2P_VASPACE_INFO pVASpaceInfo, + PCLI_THIRD_PARTY_P2P_INFO pThirdPartyP2PInfo +) +{ + CLI_THIRD_PARTY_P2P_VIDMEM_INFO *pVidmemInfo; + NV_STATUS status; + NvU64 offset; + + status = CliGetThirdPartyP2PVidmemInfoFromAddress(hClient, + hThirdPartyP2P, address, length, &offset, + &pVidmemInfo); + if (status != NV_OK) + { + return status; + } + + // Range validation is done at this point, so return if only validation was requested + if (ppPhysicalAddresses == NULL) + { + return NV_OK; + } + + status = RmP2PGetPagesUsingVidmemInfo(address, length, offset, hClient, + hThirdPartyP2P, ppPhysicalAddresses, + ppWreqMbH, ppRreqMbH, pEntries, + pPlatformData, pFreeCallback, + pData, pGpu, pSubDevice, pVASpaceInfo, + pThirdPartyP2PInfo, pVidmemInfo); + if (status != NV_OK) + { + return status; + } + + return NV_OK; +} + +static +NV_STATUS RmP2PGetVASpaceInfoWithoutToken +( + NvU64 address, + NvU64 length, + void *pPlatformData, + void (*pFreeCallback)(void *pData), + void *pData, + ThirdPartyP2P *pThirdPartyP2P, + PCLI_THIRD_PARTY_P2P_VASPACE_INFO *ppVASpaceInfo +) +{ + NV_STATUS status; + PCLI_THIRD_PARTY_P2P_VASPACE_INFO pVASpaceInfo = NULL; + NvBool bFound = NV_FALSE; + NvHandle hClient, hThirdPartyP2P; + Subdevice *pSubdevice; + OBJGPU *pGpu; + + hClient = pThirdPartyP2P->hClient; + hThirdPartyP2P = pThirdPartyP2P->hThirdPartyP2P; + pSubdevice = pThirdPartyP2P->pSubdevice; + + status = RmP2PValidateSubDevice(pThirdPartyP2P, &pGpu); + if (NV_OK != status) + { + return status; + } + + if ((pThirdPartyP2P->type == CLI_THIRD_PARTY_P2P_TYPE_PROPRIETARY) && + !(pThirdPartyP2P->flags & CLI_THIRD_PARTY_P2P_FLAGS_INITIALIZED)) + { + return NV_ERR_INVALID_STATE; + } + + while (1) + { + status = thirdpartyp2pGetNextVASpaceInfo(pThirdPartyP2P, &pVASpaceInfo); + if (status != NV_OK) + { + if (bFound) + { + status = NV_OK; + } + return status; + } + + // + // Passing NULL for arguments to prevent looking up or + // updating mapping info in range validation. + // + status = RmP2PValidateAddressRangeOrGetPages(address, length, hClient, + hThirdPartyP2P, NULL, NULL, + NULL, NULL, pPlatformData, + pFreeCallback, pData, pGpu, + pSubdevice, pVASpaceInfo, + pThirdPartyP2P); + if ((NV_OK == status) && bFound) + { + return NV_ERR_GENERIC; + } + else if (NV_OK == status) + { + bFound = NV_TRUE; + } + + if (NULL != ppVASpaceInfo) + { + *ppVASpaceInfo = pVASpaceInfo; + } + } + + return status; +} + +static +NV_STATUS RmP2PGetInfoWithoutToken +( + NvU64 address, + NvU64 length, + void *pPlatformData, + PCLI_THIRD_PARTY_P2P_INFO *ppThirdPartyP2PInfo, + PCLI_THIRD_PARTY_P2P_VASPACE_INFO *ppVASpaceInfo, + OBJGPU *pGpu +) +{ + NV_STATUS status; + PCLI_THIRD_PARTY_P2P_INFO pThirdPartyP2PInfo = NULL; + PCLI_THIRD_PARTY_P2P_VASPACE_INFO pVASpaceInfo = NULL; + NvBool bFound = NV_FALSE; + NvU32 processId = osGetCurrentProcess(); + + while (1) + { + RmClient *pClient; + status = CliNextThirdPartyP2PInfoWithPid(pGpu, + processId, + 0, + &pClient, + &pThirdPartyP2PInfo); + if (NV_OK != status) + { + if (bFound) + { + status = NV_OK; + } + break; + } + + if ((pThirdPartyP2PInfo->type == CLI_THIRD_PARTY_P2P_TYPE_PROPRIETARY) && + !(pThirdPartyP2PInfo->flags & CLI_THIRD_PARTY_P2P_FLAGS_INITIALIZED)) + { + status = NV_ERR_INVALID_STATE; + continue; + } + + if (0 == length) + { + // PutPages + status = CliGetThirdPartyP2PPlatformData(pThirdPartyP2PInfo, + pPlatformData); + } + else + { + // GetPages + status = RmP2PGetVASpaceInfoWithoutToken(address, + length, + pPlatformData, + NULL, + NULL, + pThirdPartyP2PInfo, + &pVASpaceInfo); + if (NV_OK == status) + { + *ppVASpaceInfo = pVASpaceInfo; + } + } + + if (NV_OK == status) + { + if (bFound) + { + status = NV_ERR_GENERIC; + break; + } + else + { + bFound = NV_TRUE; + if (NULL != ppThirdPartyP2PInfo) + { + *ppThirdPartyP2PInfo = pThirdPartyP2PInfo; + } + } + } + } + + return status; +} + +static NvBool _isSpaceAvailableForBar1P2PMapping( + OBJGPU *pGpu, + Subdevice *pSubDevice, + NvHandle hClient, + NvU64 length +) +{ + NvU64 bar1SizeBytes; + NvU64 fbAvailableBytes; + GETBAR1INFO bar1Info; + NV_STATUS status; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + status = memmgrGetBAR1InfoForClient_HAL(pGpu, pMemoryManager, hClient, &bar1Info); + if (status != NV_OK) + return NV_FALSE; + + // Convert Bar1 size to bytes as reported size is in KB. + bar1SizeBytes = ((NvU64)bar1Info.bar1Size) << 10; + + if (bar1SizeBytes < pSubDevice->P2PfbMappedBytes) + { + DBG_BREAKPOINT(); + return NV_FALSE; + } + + fbAvailableBytes = (bar1SizeBytes - pSubDevice->P2PfbMappedBytes); + return (fbAvailableBytes >= (CLI_THIRD_PARTY_P2P_BAR1_RESERVE + length)); +} + +static NV_STATUS _rmP2PGetPages( + NvU64 p2pToken, + NvU32 vaSpaceToken, + NvU64 address, + NvU64 length, + NvU64 *pPhysicalAddresses, + NvU32 *pWreqMbH, + NvU32 *pRreqMbH, + NvU32 *pEntries, + OBJGPU **ppGpu, + void *pPlatformData, + void (*pFreeCallback)(void *pData), + void *pData +) +{ + NV_STATUS status; + NvHandle hClient, hThirdPartyP2P; + OBJGPU *pGpu; + ThirdPartyP2P *pThirdPartyP2P; + Subdevice *pSubdevice; + PCLI_THIRD_PARTY_P2P_VASPACE_INFO pVASpaceInfo = NULL; + + if (address & (NVRM_P2P_PAGESIZE_BIG_64K - 1)) + { + NV_PRINTF(LEVEL_ERROR, + "invalid argument in RmP2PGetPages, address=%llx is not aligned\n", + address); + return NV_ERR_INVALID_ARGUMENT; + } + + if (0 != p2pToken) + { + status = CliGetThirdPartyP2PInfoFromToken(p2pToken, + &pThirdPartyP2P); + } + else + { + status = RmP2PGetInfoWithoutToken(address, + length, + pPlatformData, + &pThirdPartyP2P, + &pVASpaceInfo, + NULL); + } + if (status != NV_OK) + { + return status; + } + hClient = pThirdPartyP2P->hClient; + hThirdPartyP2P = pThirdPartyP2P->hThirdPartyP2P; + pSubdevice = pThirdPartyP2P->pSubdevice; + + if ((pThirdPartyP2P->type == CLI_THIRD_PARTY_P2P_TYPE_PROPRIETARY) && + !(pThirdPartyP2P->flags & CLI_THIRD_PARTY_P2P_FLAGS_INITIALIZED)) + { + status = NV_ERR_INVALID_STATE; + goto failed; + } + + status = RmP2PValidateSubDevice(pThirdPartyP2P, &pGpu); + if (status != NV_OK) + { + goto failed; + } + + if (0 != vaSpaceToken) + { + status = thirdpartyp2pGetVASpaceInfoFromToken(pThirdPartyP2P, vaSpaceToken, &pVASpaceInfo); + if (status != NV_OK) + { + goto failed; + } + } + + if (pVASpaceInfo == NULL) + { + status = NV_ERR_INVALID_STATE; + goto failed; + } + + status = RmP2PValidateAddressRangeOrGetPages(address, length, hClient, + hThirdPartyP2P, &pPhysicalAddresses, + &pWreqMbH, &pRreqMbH, pEntries, + pPlatformData, pFreeCallback, + pData, pGpu, pSubdevice, + pVASpaceInfo, pThirdPartyP2P); + if (status != NV_OK) + { + goto failed; + } + + if (ppGpu != NULL) + { + *ppGpu = pGpu; + } + + return NV_OK; +failed: + thirdpartyp2pDelMappingInfoByKey(pThirdPartyP2P, pPlatformData, NV_FALSE); + + return status; +} + +static +CLI_THIRD_PARTY_P2P_VIDMEM_INFO* _createOrReuseVidmemInfoPersistent +( + OBJGPU *pGpu, + NvU64 address, + NvU64 length, + NvU64 *pOffset, + ThirdPartyP2P *pThirdPartyP2P, + ThirdPartyP2P *pThirdPartyP2PInternal +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + CLI_THIRD_PARTY_P2P_VIDMEM_INFO *pVidmemInfo = NULL; + CLI_THIRD_PARTY_P2P_VIDMEM_INFO *pVidmemInfoInternal = NULL; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + Memory *pMemoryInternal; + RsClient *pClientInternal; + Device *pDevice; + NvU64 offset = 0; + NvHandle hMemoryDuped = 0; + NV_STATUS status; + NvBool bMemDuped = NV_FALSE; + + // + // Note: hMemory is duped(memory is ref-counted) only once for the first time. + // All subsequent get_pages_persistent() requests reuse the same VidmemInfo. + // Mappings are ref-counted using ExtentInfo in the MappingInfoList. + // The duped handle is freed when the MappingInfoList is empty in + // put_pages_persistent() path. + // + + // + // Get user client's ThirdPartyP2P's VidmemInfo + // Needed to get user's offset and hMemory + // + status = CliGetThirdPartyP2PVidmemInfoFromAddress(pThirdPartyP2P->hClient, + pThirdPartyP2P->hThirdPartyP2P, + address, + length, + &offset, + &pVidmemInfo); + if (status != NV_OK) + { + goto failed; + } + + *pOffset = offset; + + // + // Check if an internal VidmemInfo already exists. + // Every VidmemInfo is assigned a unique ID and the internal ThirdPartyP2P + // object's AddressRangeTree is keyed at user client's VidmemInfo ID instead + // of the VA. This is because the VA could have been reassigned to another + // phys allocation. + // + status = CliGetThirdPartyP2PVidmemInfoFromId(pThirdPartyP2PInternal->hClient, + pThirdPartyP2PInternal->hThirdPartyP2P, + pVidmemInfo->id, + &pVidmemInfoInternal); + if (status == NV_OK) + { + return pVidmemInfoInternal; + } + else if (status != NV_ERR_OBJECT_NOT_FOUND) + { + goto failed; + } + + pClientInternal = RES_GET_CLIENT(pThirdPartyP2PInternal); + + status = deviceGetByGpu(pClientInternal, pGpu, NV_TRUE, &pDevice); + if (status != NV_OK) + { + goto failed; + } + + // Dupe user client's hMemory + status = pRmApi->DupObject(pRmApi, + pMemoryManager->hClient, + RES_GET_HANDLE(pDevice), + &hMemoryDuped, + pThirdPartyP2P->hClient, + pVidmemInfo->hMemory, + 0); + if (status != NV_OK) + { + goto failed; + } + + bMemDuped = NV_TRUE; + + status = memGetByHandleAndDevice(pClientInternal, + hMemoryDuped, + RES_GET_HANDLE(pDevice), + &pMemoryInternal); + if (status != NV_OK) + { + goto failed; + } + + // + // Add a new VidmemInfo with the address field as user's VidmemInfo ID + // and length = 1. This is because keyStart and keyEnd for internal + // AddressRangeTree should be the user's VidmemInfo ID. + // + status = CliAddThirdPartyP2PVidmemInfo(pMemoryManager->hClient, + pThirdPartyP2PInternal->hThirdPartyP2P, + hMemoryDuped, + pVidmemInfo->id, + 1, + pVidmemInfo->offset, + pMemoryInternal); + if (status != NV_OK) + { + goto failed; + } + + // Fetch the newly added VidmemInfo to return. + status = CliGetThirdPartyP2PVidmemInfoFromId(pThirdPartyP2PInternal->hClient, + pThirdPartyP2PInternal->hThirdPartyP2P, + pVidmemInfo->id, + &pVidmemInfoInternal); + if (status != NV_OK) + { + goto failed; + } + + return pVidmemInfoInternal; + +failed: + if (bMemDuped) + { + pRmApi->Free(pRmApi, pMemoryManager->hClient, hMemoryDuped); + } + + return NULL; +} + +NV_STATUS RmP2PGetPagesPersistent( + NvU64 address, + NvU64 length, + void **p2pObject, + NvU64 *pPhysicalAddresses, + NvU32 *pEntries, + void *pPlatformData, + void *pGpuInfo +) +{ + OBJGPU *pGpu = (OBJGPU *) pGpuInfo; + ThirdPartyP2P *pThirdPartyP2P = NULL; + ThirdPartyP2P *pThirdPartyP2PInternal = NULL; + CLI_THIRD_PARTY_P2P_VASPACE_INFO *pVASpaceInfo = NULL; + CLI_THIRD_PARTY_P2P_VIDMEM_INFO *pVidmemInfo = NULL; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 offset = 0; + NV_STATUS status; + + status = RmP2PGetInfoWithoutToken(address, length, NULL, + &pThirdPartyP2P, &pVASpaceInfo, pGpu); + if (status != NV_OK) + { + return status; + } + + status = CliGetThirdPartyP2PInfo(pMemoryManager->hClient, + pMemoryManager->hThirdPartyP2P, + &pThirdPartyP2PInternal); + if (status != NV_OK) + { + return status; + } + + pVidmemInfo = _createOrReuseVidmemInfoPersistent(pGpu, address, length, &offset, + pThirdPartyP2P, + pThirdPartyP2PInternal); + if (pVidmemInfo == NULL) + { + return NV_ERR_INVALID_STATE; + } + + status = RmP2PGetPagesUsingVidmemInfo(address, length, offset, + pMemoryManager->hClient, + pThirdPartyP2PInternal->hThirdPartyP2P, + &pPhysicalAddresses, NULL, NULL, + pEntries, pPlatformData, NULL, NULL, + pGpu, pThirdPartyP2PInternal->pSubdevice, + NULL, pThirdPartyP2PInternal, pVidmemInfo); + if (status != NV_OK) + { + // Cleanup MappingInfo if it was allocated + thirdpartyp2pDelMappingInfoByKey(pThirdPartyP2PInternal, pPlatformData, NV_FALSE); + + // + // The cleanup with thirdpartyp2pDelMappingInfoByKey() above is not enough + // since creating MappingInfo with pPlatformData could have failed. + // Cleanup of the internal VidmemInfo is still needed since pPlatformData + // lookup would fail and the VidmemInfo is not available for cleanup via + // thirdpartyp2pDelPersistentMappingInfoByKey(). + // + CliDelThirdPartyP2PVidmemInfoPersistent(pThirdPartyP2PInternal, pVidmemInfo); + + return status; + } + + // + // Update p2pObject as the internal ThirdPartyP2P object + // which will be used by nvidia_p2p_put_pages() to look up mappings. + // + *p2pObject = (void *) pThirdPartyP2PInternal; + + return NV_OK; +} + +NV_STATUS RmP2PGetPages( + NvU64 p2pToken, + NvU32 vaSpaceToken, + NvU64 address, + NvU64 length, + NvU64 *pPhysicalAddresses, + NvU32 *pWreqMbH, + NvU32 *pRreqMbH, + NvU32 *pEntries, + OBJGPU **ppGpu, + void *pPlatformData, + void (*pFreeCallback)(void *pData), + void *pData +) +{ + if (pFreeCallback == NULL || pData == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "invalid argument(s) in RmP2PGetPages, pFreeCallback=%p pData=%p\n", + pFreeCallback, pData); + return NV_ERR_INVALID_ARGUMENT; + } + + return _rmP2PGetPages(p2pToken, vaSpaceToken, address, length, + pPhysicalAddresses, pWreqMbH, pRreqMbH, + pEntries, ppGpu, pPlatformData, + pFreeCallback, pData); +} + +NV_STATUS RmP2PGetPagesWithoutCallbackRegistration( + NvU64 p2pToken, + NvU32 vaSpaceToken, + NvU64 address, + NvU64 length, + NvU64 *pPhysicalAddresses, + NvU32 *pWreqMbH, + NvU32 *pRreqMbH, + NvU32 *pEntries, + OBJGPU **ppGpu, + void *pPlatformData +) +{ + return _rmP2PGetPages(p2pToken, vaSpaceToken, address, length, + pPhysicalAddresses, pWreqMbH, pRreqMbH, + pEntries, ppGpu, pPlatformData, + NULL, NULL); +} + +NV_STATUS RmP2PGetGpuByAddress( + NvU64 address, + NvU64 length, + OBJGPU **ppGpu +) +{ + ThirdPartyP2P *pThirdPartyP2P = NULL; + CLI_THIRD_PARTY_P2P_VASPACE_INFO *pVASpaceInfo = NULL; + OBJGPU *pGpu = NULL; + MemoryManager *pMemoryManager = NULL; + NV_STATUS status = NV_OK; + + status = RmP2PGetInfoWithoutToken(address, length, NULL, + &pThirdPartyP2P, &pVASpaceInfo, NULL); + if (status != NV_OK) + { + return status; + } + + status = RmP2PValidateSubDevice(pThirdPartyP2P, &pGpu); + if (status != NV_OK) + { + return status; + } + + pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + // Unsupported configs/platforms for persistent mappings + if (IS_MIG_ENABLED(pGpu) || + IS_VIRTUAL(pGpu) || + NVCPU_IS_PPC64LE || + pMemoryManager->hThirdPartyP2P == 0) + { + return NV_ERR_NOT_SUPPORTED; + } + + *ppGpu = pGpu; + + return status; +} + +NV_STATUS RmP2PRegisterCallback( + NvU64 p2pToken, + NvU64 address, + NvU64 length, + void *pPlatformData, + void (*pFreeCallback)(void *pData), + void *pData +) +{ + NV_STATUS status; + NvHandle hClient, hThirdPartyP2P; + ThirdPartyP2P *pThirdPartyP2P; + PCLI_THIRD_PARTY_P2P_VASPACE_INFO pVASpaceInfo = NULL; + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo; + NvU64 offset; + + if (0 != p2pToken) + { + status = CliGetThirdPartyP2PInfoFromToken(p2pToken, + &pThirdPartyP2P); + } + else + { + status = RmP2PGetInfoWithoutToken(address, + 0, + pPlatformData, + &pThirdPartyP2P, + &pVASpaceInfo, + NULL); + } + if (status != NV_OK) + { + return status; + } + + hClient = pThirdPartyP2P->hClient; + hThirdPartyP2P = pThirdPartyP2P->hThirdPartyP2P; + + status = CliGetThirdPartyP2PVidmemInfoFromAddress(hClient, hThirdPartyP2P, + address, length, &offset, + &pVidmemInfo); + if (status != NV_OK) + { + return status; + } + + hClient = pThirdPartyP2P->hClient; + hThirdPartyP2P = pThirdPartyP2P->hThirdPartyP2P; + + return CliRegisterThirdPartyP2PMappingCallback(hClient, hThirdPartyP2P, + pVidmemInfo->hMemory, + pPlatformData, pFreeCallback, + pData); +} + +NV_STATUS RmP2PPutPagesPersistent( + void *p2pObject, + void *pPlatformData +) +{ + NV_STATUS status; + ThirdPartyP2P *pThirdPartyP2P = NULL; + + pThirdPartyP2P = (ThirdPartyP2P *)(p2pObject); + + if ((pThirdPartyP2P->type == CLI_THIRD_PARTY_P2P_TYPE_PROPRIETARY) && + !(pThirdPartyP2P->flags & CLI_THIRD_PARTY_P2P_FLAGS_INITIALIZED)) + { + return NV_ERR_INVALID_STATE; + } + + status = thirdpartyp2pDelPersistentMappingInfoByKey(pThirdPartyP2P, pPlatformData, NV_TRUE); + + NV_ASSERT(status == NV_OK); + + return status; +} + +NV_STATUS RmP2PPutPages( + NvU64 p2pToken, + NvU32 vaSpaceToken, + NvU64 address, + void *pPlatformData +) +{ + NV_STATUS status; + ThirdPartyP2P *pThirdPartyP2P; + + if (0 != p2pToken) + { + status = CliGetThirdPartyP2PInfoFromToken(p2pToken, + &pThirdPartyP2P); + } + else + { + status = RmP2PGetInfoWithoutToken(address, + 0, + pPlatformData, + &pThirdPartyP2P, + NULL, NULL); + } + if (status != NV_OK) + { + return status; + } + + if ((pThirdPartyP2P->type == CLI_THIRD_PARTY_P2P_TYPE_PROPRIETARY) && + !(pThirdPartyP2P->flags & CLI_THIRD_PARTY_P2P_FLAGS_INITIALIZED)) + { + return NV_ERR_INVALID_STATE; + } + + status = thirdpartyp2pDelMappingInfoByKey(pThirdPartyP2P, pPlatformData, NV_TRUE); + NV_ASSERT(status == NV_OK); + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/bus/p2p_api.c b/src/nvidia/src/kernel/gpu/bus/p2p_api.c new file mode 100644 index 000000000..e4c06084b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/p2p_api.c @@ -0,0 +1,957 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/bus/p2p_api.h" +#include "gpu/bus/third_party_p2p.h" +#include "platform/p2p/p2p_caps.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "nvRmReg.h" +#include "rmapi/rs_utils.h" +#include "vgpu/rpc.h" +#include "vgpu/vgpu_events.h" + +#include "class/cl503b.h" +#include //FERMI_VASPACE_A + +/*! + * @brief Binds local BFID for SR-IOV P2P requests + * + * NOTE: This call will be dispatched to the Physical RM of the + * GPU represented by pGpu. Be sure to pass the GPU + * you are intending to program (local or remote). + * + * @param[in] pGpu GPU to dispatch the bind call to + * @param[in] gfid GFID to bind in the P2P source GPU + * @param[in] peerId Peer ID of the P2P destination GPU + */ +static NV_STATUS +s_p2papiBindLocalGfid(OBJGPU *pGpu, NvU32 gfid, NvU32 peerId) +{ + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P_PARAMS params = {0}; + + params.localGfid = gfid; + params.peerId = peerId; + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_BUS_BIND_LOCAL_GFID_FOR_P2P, + ¶ms, sizeof(params)); + return status; +} + +/*! + * @brief Binds remote GFID for SR-IOV P2P requests + * + * NOTE: This call will be dispatched to the Physical RM of the + * GPU represented by pGpu. Be sure to pass the GPU + * you are intending to program (local or remote). + * + * @param[in] pGpu GPU to dispatch the bind call to + * @param[in] gfid GFID to bind in the P2P destination GPU + */ +static NV_STATUS +s_p2papiBindRemoteGfid(OBJGPU *pGpu, NvU32 gfid) +{ + + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P_PARAMS params = {0}; + + params.remoteGfid = gfid; + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_BUS_BIND_REMOTE_GFID_FOR_P2P, + ¶ms, sizeof(params)); + return status; +} + + +NV_STATUS +p2papiConstruct_IMPL +( + P2PApi *pP2PApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NvHandle hClient; + NvHandle hP2P; + Subdevice *pSubDevice; + Subdevice *pPeerSubDevice; + NvU32 subDevicePeerIdMask; + NvU32 peerSubDevicePeerIdMask; + RsClient *pClient; + NvU32 peer1; + NvU32 peer2; + NvHandle hDevice; + NvHandle hPeerDevice; + NvHandle hSubDevice; + NvHandle hPeerSubDevice; + PNODE pNode; + OBJGPU *pGpu; + OBJGPU *pLocalGpu; + KernelBus *pLocalKernelBus; + KernelNvlink *pLocalKernelNvlink; + OBJGPU *pRemoteGpu; + KernelBus *pRemoteKernelBus; + KernelNvlink *pRemoteKernelNvlink; + NV_STATUS status; + NvU32 gpuMask; + NvBool bP2PWriteCapable = NV_FALSE; + NvBool bP2PReadCapable = NV_FALSE; + NV503B_ALLOC_PARAMETERS *pNv503bAllocParams = pParams->pAllocParams; + NvU32 gfid; + Device *pLocalDevice; + NvBool bRegisteredP2P = NV_FALSE; + NvBool bRegisteredPeerP2P = NV_FALSE; + NvU32 flags = pNv503bAllocParams->flags; + NvBool bSpaAccessOnly = FLD_TEST_DRF(503B, _FLAGS, _P2P_TYPE, _SPA, flags); + P2P_CONNECTIVITY p2pConnectionType = P2P_CONNECTIVITY_UNKNOWN; + + hClient = pParams->hClient; + hP2P = pParams->hResource; + subDevicePeerIdMask = pNv503bAllocParams->subDevicePeerIdMask; + peerSubDevicePeerIdMask = pNv503bAllocParams->peerSubDevicePeerIdMask; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NV_ERR_INVALID_ARGUMENT; + + status = subdeviceGetByHandle(pClient, pNv503bAllocParams->hSubDevice, &pSubDevice); + if (status != NV_OK) + return NV_ERR_INVALID_ARGUMENT; + + status = subdeviceGetByHandle(pClient, pNv503bAllocParams->hPeerSubDevice, &pPeerSubDevice); + if (status != NV_OK) + return NV_ERR_INVALID_ARGUMENT; + + if (pNv503bAllocParams->subDevicePeerIdMask) + { + if (!ONEBITSET(pNv503bAllocParams->subDevicePeerIdMask)) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + if (pNv503bAllocParams->peerSubDevicePeerIdMask) + { + if (!ONEBITSET(pNv503bAllocParams->peerSubDevicePeerIdMask)) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + // Ensure any loopback requests match + if (pNv503bAllocParams->hSubDevice == pNv503bAllocParams->hPeerSubDevice) + { + if (pNv503bAllocParams->subDevicePeerIdMask != pNv503bAllocParams->peerSubDevicePeerIdMask) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + // validate client + if (dynamicCast(pClient, RmClient) == NULL) + return NV_ERR_INVALID_CLIENT; + + hSubDevice = RES_GET_HANDLE(pSubDevice); + hPeerSubDevice = RES_GET_HANDLE(pPeerSubDevice); + + // Find the gpu for the subdevices passed to us + if (CliSetSubDeviceContext(hClient, hSubDevice, &hDevice, &pLocalGpu) != NV_OK || + NULL == pLocalGpu) + { + NV_PRINTF(LEVEL_ERROR, "Failed to find GPU for hSubDevice (0x%08x)\n", + hSubDevice); + return NV_ERR_INVALID_ARGUMENT; + } + + if (CliSetSubDeviceContext(hClient, hPeerSubDevice, &hPeerDevice, &pRemoteGpu) != NV_OK || + NULL == pRemoteGpu) + { + NV_PRINTF(LEVEL_ERROR, "Failed to find GPU for hSubDevice (0x%08x)\n", + hPeerSubDevice); + return NV_ERR_INVALID_ARGUMENT; + } + + API_GPU_FULL_POWER_SANITY_CHECK(pLocalGpu, NV_TRUE, NV_FALSE); + API_GPU_FULL_POWER_SANITY_CHECK(pRemoteGpu, NV_TRUE, NV_FALSE); + + if (gpuIsApmFeatureEnabled(pLocalGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // SPA peer only supported when we support ATS + if (bSpaAccessOnly && (!pLocalGpu->getProperty(pLocalGpu, PDB_PROP_GPU_ATS_SUPPORTED))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pLocalKernelBus = GPU_GET_KERNEL_BUS(pLocalGpu); + pRemoteKernelBus = GPU_GET_KERNEL_BUS(pRemoteGpu); + + // + // Allocate P2P PCIE Mailbox areas if all of the following conditions occur: + // - P2P reads or/and writes are supported + // - The P2P connection is PCIE Mailbox based + // + gpuMask = NVBIT(pLocalGpu->gpuInstance) | NVBIT(pRemoteGpu->gpuInstance); + if ((p2pGetCaps(gpuMask, &bP2PWriteCapable, &bP2PReadCapable, &p2pConnectionType) == NV_OK) && + (bP2PWriteCapable || bP2PReadCapable) && + (p2pConnectionType == P2P_CONNECTIVITY_PCIE)) + { + status = kbusSetP2PMailboxBar1Area_HAL(pLocalGpu, pLocalKernelBus, + pNv503bAllocParams->mailboxBar1Addr, + pNv503bAllocParams->mailboxTotalSize); + NV_ASSERT_OK_OR_RETURN(status); + + status = kbusSetP2PMailboxBar1Area_HAL(pRemoteGpu, pRemoteKernelBus, + pNv503bAllocParams->mailboxBar1Addr, + pNv503bAllocParams->mailboxTotalSize); + NV_ASSERT_OK_OR_RETURN(status); + } + + // Process any specific peer id requests for peer 1 + if (subDevicePeerIdMask) + { + peer1 = BIT_IDX_32(subDevicePeerIdMask); + } + else + { + peer1 = BUS_INVALID_PEER; + } + + // Process any specific peer id requests for peer 2 + if (peerSubDevicePeerIdMask) + { + peer2 = BIT_IDX_32(peerSubDevicePeerIdMask); + } + else + { + peer2 = BUS_INVALID_PEER; + } + + if (!IS_VIRTUAL(pLocalGpu)) + { + if (!(bP2PWriteCapable || bP2PReadCapable)) + { + NV_PRINTF(LEVEL_ERROR, + "ERROR: P2P is Disabled, cannot create mappings\n"); + return NV_ERR_NOT_SUPPORTED; + } + + // Train links to high speed. + pLocalKernelNvlink = GPU_GET_KERNEL_NVLINK(pLocalGpu); + pRemoteKernelNvlink = GPU_GET_KERNEL_NVLINK(pRemoteGpu); + + if (pLocalKernelNvlink && pRemoteKernelNvlink) + { + status = knvlinkTrainFabricLinksToActive(pLocalGpu, pLocalKernelNvlink); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "link training between GPU%u and SWITCH failed with status %x\n", + pLocalGpu->gpuInstance, status); + return status; + } + + status = knvlinkTrainFabricLinksToActive(pRemoteGpu, pRemoteKernelNvlink); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "link training between GPU%u and SWITCH failed with status %x\n", + pRemoteGpu->gpuInstance, status); + return status; + } + } + } + + // check to see if a p2p mapping between these two subdevices already exist + if (NV_OK == btreeSearch(hPeerSubDevice, &pNode, + pSubDevice->pP2PMappingList) && + !IsGP100orBetter(pLocalGpu)) + { + NV_PRINTF(LEVEL_INFO, + "Mapping already exists between the two subdevices (0x%08x), (0x%08x). " + "Multiple mappings not supported on pre-PASCAL GPUs\n", + hSubDevice, hPeerSubDevice); + return NV_ERR_INVALID_ARGUMENT; + } + + pP2PApi->Node.keyStart = hP2P; + pP2PApi->Node.keyEnd = hP2P; + pP2PApi->Node.Data = pP2PApi; + pP2PApi->peer1 = pSubDevice; + pP2PApi->peer2 = pPeerSubDevice; + pP2PApi->attributes = DRF_NUM(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, p2pConnectionType); + pP2PApi->attributes |= bSpaAccessOnly ? DRF_DEF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _SPA) : + DRF_DEF(_P2PAPI, _ATTRIBUTES, _LINK_TYPE, _GPA); + mapInit(&pP2PApi->dmaMappingMap, portMemAllocatorGetGlobalNonPaged()); + + // store away the p2pinfo within subdevice info for easy retrieval + status = subdeviceAddP2PApi(pSubDevice, pP2PApi); + if (NV_OK != status) + goto fail; + + bRegisteredP2P = NV_TRUE; + + // for loopback on same subdevice, we only need to store it once + if (hSubDevice != hPeerSubDevice) + { + status = subdeviceAddP2PApi(pPeerSubDevice, pP2PApi); + if (NV_OK != status) + goto fail; + + bRegisteredPeerP2P = NV_TRUE; + } + + if (!IS_VIRTUAL(pLocalGpu)) + { + // setup the p2p resources + status = kbusCreateP2PMapping_HAL(pLocalGpu, pLocalKernelBus, pRemoteGpu, + pRemoteKernelBus, &peer1, &peer2, + pP2PApi->attributes); + if (NV_OK != status) + goto fail; + } + + pGpu = pLocalGpu; + + if (IS_VIRTUAL_WITH_SRIOV(pGpu) && + gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + NvU32 gpu0Instance = gpuGetInstance(pLocalGpu); + NvU32 gpu1Instance = gpuGetInstance(pRemoteGpu); + + // loopback request + if (pNv503bAllocParams->hSubDevice == pNv503bAllocParams->hPeerSubDevice) + { + peer1 = peer2 = 0; + } + else + { + // Check if a peer ID is already allocated for P2P from pLocalGpu to pRemoteGpu + peer1 = kbusGetPeerId_HAL(pLocalGpu, pLocalKernelBus, pRemoteGpu); + + // Check if a peer ID is already allocated for P2P from pRemoteGpu to pLocalGpu + peer2 = kbusGetPeerId_HAL(pRemoteGpu, pRemoteKernelBus, pLocalGpu); + } + + if (peer1 != BUS_INVALID_PEER && peer2 != BUS_INVALID_PEER) + { + goto update_mask; + } + else if (peer1 == BUS_INVALID_PEER && peer2 == BUS_INVALID_PEER) + { + // Get the peer ID pGpu0 should use for P2P over NVLINK to pGpu1i + peer1 = kbusGetUnusedPeerId_HAL(pLocalGpu, pLocalKernelBus); + // If could not find a free peer ID, return error + if (peer1 == BUS_INVALID_PEER) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d: peerID not available for NVLink P2P\n", + gpu0Instance); + status = NV_ERR_GENERIC; + goto fail; + } + + // Reserve the peer ID for NVLink use + status = kbusReserveP2PPeerIds_HAL(pLocalGpu, pLocalKernelBus, NVBIT(peer1)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to reserve peer1, status=0x%x\n", status); + goto fail; + } + + // Get the peer ID pGpu1 should use for P2P over NVLINK to pGpu0 + peer2 = kbusGetUnusedPeerId_HAL(pRemoteGpu, pRemoteKernelBus); + // If could not find a free peer ID, return error + if (peer2 == BUS_INVALID_PEER) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d: peerID not available for NVLink P2P\n", + gpu1Instance); + status = NV_ERR_GENERIC; + goto fail; + } + + // Reserve the peer ID for NVLink use + status = kbusReserveP2PPeerIds_HAL(pRemoteGpu, pRemoteKernelBus, NVBIT(peer2)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to reserve peer2, status=0x%x\n", status); + goto fail; + } + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Unexpected state, either of the peer ID is invalid \n"); + status = NV_ERR_GENERIC; + goto fail; + } + +update_mask: + // + // Does the mapping already exist between the given pair of GPUs using the peerIDs + // peer1 and peer2 respectively ? + // + if ((pLocalKernelBus->p2p.busNvlinkPeerNumberMask[gpu1Instance] & NVBIT(peer1)) && + (pRemoteKernelBus->p2p.busNvlinkPeerNumberMask[gpu0Instance] & NVBIT(peer2))) + { + // + // Increment the mapping refcount per peerID - since there is another usage + // of a mapping that is using this peerID + // + pLocalKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[peer1]++; + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[peer2]++; + + // + // Increment the mapping refcount per GPU - since there is another usage of + // the mapping to the given remote GPU + // + pLocalKernelBus->p2p.busNvlinkMappingRefcountPerGpu[gpu1Instance]++; + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerGpu[gpu0Instance]++; + + if (bSpaAccessOnly) + { + pLocalKernelBus->p2p.busNvlinkMappingRefcountPerPeerIdSpa[peer1]++; + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[peer2]++; + } + + NV_PRINTF(LEVEL_INFO, + "- P2P: Peer mapping is already in use for gpu instances %x and %x " + "with peer id's %d and %d. Increasing the mapping refcounts for the" + " peer IDs to %d and %d respectively.\n", + gpu0Instance, gpu1Instance, peer1, peer2, + pLocalKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[peer1], + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[peer2]); + + goto update_params; + } + + // + // Reached here implies the mapping between the given pair of GPUs using the peerIDs + // peer1 and peer2 does not exist. Create the mapping + // + + // Set the peer IDs in the corresponding peer number masks + pLocalKernelBus->p2p.busNvlinkPeerNumberMask[gpu1Instance] |= NVBIT(peer1); + pRemoteKernelBus->p2p.busNvlinkPeerNumberMask[gpu0Instance] |= NVBIT(peer2); + + // + // Increment the mapping refcount per peerID - since there is a new mapping that + // will use this peerID + // + pLocalKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[peer1]++; + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[peer2]++; + + if (bSpaAccessOnly) + { + pLocalKernelBus->p2p.busNvlinkMappingRefcountPerPeerIdSpa[peer1]++; + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerPeerId[peer2]++; + } + + // + // Increment the mapping refcount per GPU - since there a new mapping now to the + // given remote GPU + // + pLocalKernelBus->p2p.busNvlinkMappingRefcountPerGpu[gpu1Instance]++; + pRemoteKernelBus->p2p.busNvlinkMappingRefcountPerGpu[gpu0Instance]++; + + NV_PRINTF(LEVEL_INFO, + "added NVLink P2P mapping between GPU%u (peer %u) and GPU%u (peer %u)\n", + gpu0Instance, peer1, gpu1Instance, peer2); + +update_params: + pNv503bAllocParams->subDevicePeerIdMask = NVBIT(peer1); + pNv503bAllocParams->peerSubDevicePeerIdMask = NVBIT(peer2); + + // Update connection type for SRIOV. + pP2PApi->attributes = FLD_SET_DRF(_P2PAPI, _ATTRIBUTES, _CONNECTION_TYPE, _NVLINK, pP2PApi->attributes); + } + + pP2PApi->peerId1 = peer1; + pP2PApi->peerId2 = peer2; + + if (IS_VIRTUAL(pLocalGpu)) + { + NV_RM_RPC_ALLOC_OBJECT(pLocalGpu, + pParams->hClient, + pParams->hParent, + pParams->hResource, + pParams->externalClassId, + pNv503bAllocParams, + status); + if (status != NV_OK) + goto fail; + } + + // + // program the GFID for HSHUB when, + // 1. In hypervisor mode, + // 2. SRIOV is enabled + // + if (!IS_VIRTUAL(pLocalGpu)) + { + NV_ASSERT_OK(deviceGetByHandle(pClient, hDevice, &pLocalDevice)); + NV_ASSERT_OK_OR_RETURN(vgpuGetGfidFromDeviceInfo(pLocalGpu, pLocalDevice, &gfid)); + + { + if (!bSpaAccessOnly) + { + if (gpuIsSriovEnabled(pLocalGpu)) + { + NV_PRINTF(LEVEL_INFO, "Trying to register GPU:%x gfid: %x for P2P access with peerId: %x\n", + pLocalGpu->deviceInstance, gfid, peer1); + NV_ASSERT_OK_OR_RETURN(s_p2papiBindLocalGfid(pLocalGpu, gfid, peer1)); + if (hSubDevice != hPeerSubDevice) + { + NV_PRINTF(LEVEL_INFO, "Trying to register GPU:%x gfid: %x for remote access \n", + pLocalGpu->deviceInstance, gfid); + + NV_ASSERT_OK_OR_RETURN(s_p2papiBindRemoteGfid(pLocalGpu, gfid)); + } + } + + if (gpuIsSriovEnabled(pRemoteGpu)) + { + if (hDevice != hPeerDevice) + { + Device *pRemoteDevice; + + NV_ASSERT_OK(deviceGetByHandle(pClient, hPeerDevice, &pRemoteDevice)); + NV_ASSERT_OK_OR_RETURN(vgpuGetGfidFromDeviceInfo(pRemoteGpu, pRemoteDevice, &gfid)); + } + + if (hSubDevice != hPeerSubDevice) + { + NV_PRINTF(LEVEL_INFO, "Trying to register GPU:%x gfid: %x for P2P access with peerId: %x\n", + pRemoteGpu->deviceInstance, gfid, peer2); + + NV_ASSERT_OK_OR_RETURN(s_p2papiBindLocalGfid(pRemoteGpu, gfid, peer2)); + } + + NV_PRINTF(LEVEL_INFO, "Trying to register GPU:%x gfid: %x for remote access \n", + pRemoteGpu->deviceInstance, gfid); + + NV_ASSERT_OK_OR_RETURN(s_p2papiBindRemoteGfid(pRemoteGpu, gfid)); + } + } + } + } + + // + // For SRIOV system, always check for P2P allocation to determine whether + // this function is allowed to bind FLA + // + if (pLocalKernelBus->flaInfo.bFlaAllocated && !pLocalKernelBus->flaInfo.bFlaBind) + { + if (!IS_VIRTUAL(pLocalGpu)) + { + goto remote_fla_bind; + } + NV_ASSERT_OK(deviceGetByHandle(pClient, hDevice, &pLocalDevice)); + NV_ASSERT_OK_OR_RETURN(vgpuGetGfidFromDeviceInfo(pLocalGpu, pLocalDevice, &gfid)); + + status = kbusSetupBindFla_HAL(pLocalGpu, pLocalKernelBus, gfid); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed binding instblk for FLA, status=0x%x\n", status); + goto fail; + } + } + +remote_fla_bind: + + if (hDevice != hPeerDevice) + { + if (pRemoteKernelBus->flaInfo.bFlaAllocated && !pRemoteKernelBus->flaInfo.bFlaBind) + { + if (!IS_VIRTUAL(pRemoteGpu)) + { + return status; + } + Device *pRemoteDevice; + NV_ASSERT_OK(deviceGetByHandle(pClient, hPeerDevice, &pRemoteDevice)); + NV_ASSERT_OK_OR_RETURN(vgpuGetGfidFromDeviceInfo(pRemoteGpu, pRemoteDevice, &gfid)); + + status = kbusSetupBindFla_HAL(pRemoteGpu, pRemoteKernelBus, gfid); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed binding instblk for FLA, status=0x%x\n", status); + goto fail; + } + } + } + return status; + +fail: + + if (bRegisteredPeerP2P) + subdeviceDelP2PApi(pPeerSubDevice, pP2PApi); + + if (bRegisteredP2P) + subdeviceDelP2PApi(pSubDevice, pP2PApi); + + return status; +} + +void +p2papiDestruct_IMPL +( + P2PApi *pP2PApi +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + NvHandle hClient; + + resGetFreeParams(staticCast(pP2PApi, RsResource), &pCallContext, &pParams); + hClient = pParams->hClient; + + // remove any resources associated with this P2P object before freeing it + pParams->status = CliInvalidateP2PInfo(hClient, pP2PApi); +} + +NV_STATUS CliAddP2PDmaMappingInfo +( + NvHandle hClient, + NvHandle hDevice, + NvU32 subDeviceInst, + NvHandle hPeerDevice, + NvU32 peerSubDeviceInst, + PCLI_DMA_MAPPING_INFO pDmaMapping +) +{ + PNODE pNode; + NV_STATUS status; + PCLI_P2P_INFO pP2PInfo = NULL; + Subdevice *pSubDevice; + Subdevice *pPeerSubDevice; + PCLI_P2P_DMA_MAPPING_INFO pP2PDmaMappingInfo; + NvHandle hSubDevice, hPeerSubDevice; + RsClient *pClient; + + if (NV_OK != serverGetClientUnderLock(&g_resServ, hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + if (NULL == pDmaMapping) + return NV_ERR_INVALID_ARGUMENT; + + // Find the subdevices for local and peer devices + status = subdeviceGetByInstance(pClient, + hDevice, + subDeviceInst, + &pSubDevice); + if ((NV_OK != status) || (NULL == pSubDevice)) + return status; + + hSubDevice = RES_GET_HANDLE(pSubDevice); + + status = subdeviceGetByInstance(pClient, + hPeerDevice, + peerSubDeviceInst, + &pPeerSubDevice); + if ((NV_OK != status) || (NULL == pPeerSubDevice)) + return status; + + hPeerSubDevice = RES_GET_HANDLE(pPeerSubDevice); + + // + // Find a P2P object that maps the two subdevices in question, just use the + // first available. If no such object exists, then this dmaMapping cannot + // be made. + // + status = btreeSearch(hPeerSubDevice, &pNode, pSubDevice->pP2PMappingList); + if (status == NV_OK) + { + PCLI_P2P_INFO_LIST *pP2PInfoList = pNode->Data; + + NV_ASSERT(listHead(pP2PInfoList) != NULL); + pP2PInfo = *listHead(pP2PInfoList); + } + + if (pP2PInfo == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "No P2P mapping between subdevices (0x%08x)and (0x%08x) on the client (0x%08x)\n", + hSubDevice, hPeerSubDevice, hClient); + return status; + } + + pNode = NULL; + + // + // It may happen that subdevices on both sides of P2P object will try + // to map the same virtual address value. We handle this by using + // CLI_P2P_DMA_MAPPING_INFO to store references to each DMA mapping at the + // address. + // + pP2PDmaMappingInfo = mapFind(&pP2PInfo->dmaMappingMap, pDmaMapping->DmaOffset); + + if (pP2PDmaMappingInfo == NULL) + { + pP2PDmaMappingInfo = mapInsertNew(&pP2PInfo->dmaMappingMap, pDmaMapping->DmaOffset); + if (pP2PDmaMappingInfo == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + portMemSet(pP2PDmaMappingInfo, 0, sizeof(CLI_P2P_DMA_MAPPING_INFO)); + } + else + { + if (pP2PDmaMappingInfo->pPeer1Info != NULL && + pP2PDmaMappingInfo->pPeer2Info != NULL) + { + NV_PRINTF(LEVEL_ERROR, "P2P DMA mapping is already allocated!\n"); + return NV_ERR_INVALID_REQUEST; + } + } + + if (pSubDevice == pP2PInfo->peer1) + { + pP2PDmaMappingInfo->pPeer1Info = pDmaMapping; + } + else + { + NV_ASSERT(pSubDevice == pP2PInfo->peer2); + pP2PDmaMappingInfo->pPeer2Info = pDmaMapping; + } + + pDmaMapping->pP2PInfo = pP2PInfo; + return NV_OK; +} + +NV_STATUS CliInvalidateP2PInfo +( + NvHandle hClient, + PCLI_P2P_INFO pP2PInfo +) +{ + OBJGPU *pLocalGpu; + KernelBus *pLocalKernelBus; + OBJGPU *pRemoteGpu; + KernelBus *pRemoteKernelBus; + RsClient *pClient; + NvHandle hSubDevice; + NvHandle hPeerSubDevice; + NvHandle hDevice; + NvHandle hPeerDevice; + NV_STATUS status = NV_OK; + + if (NV_OK != serverGetClientUnderLock(&g_resServ, hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + if (NULL == pP2PInfo) + return NV_ERR_INVALID_OBJECT_HANDLE; + + if (NULL == pP2PInfo->peer1 || NULL == pP2PInfo->peer2) + return NV_OK; + + hSubDevice = RES_GET_HANDLE(pP2PInfo->peer1); + hPeerSubDevice = RES_GET_HANDLE(pP2PInfo->peer2); + + // Find the gpu for the subdevices of this P2P object + if (CliSetSubDeviceContext(hClient, hSubDevice, &hDevice, &pLocalGpu) != NV_OK || + NULL == pLocalGpu) + { + NV_PRINTF(LEVEL_ERROR, "Failed to find GPU for hSubDevice (0x%08x)\n", + hSubDevice); + return NV_ERR_INVALID_DEVICE; + } + if (CliSetSubDeviceContext(hClient, hPeerSubDevice, &hPeerDevice, &pRemoteGpu) != NV_OK || + NULL == pRemoteGpu) + { + NV_PRINTF(LEVEL_ERROR, "Failed to find GPU for hSubDevice (0x%08x)\n", + hPeerSubDevice); + return NV_ERR_INVALID_DEVICE; + } + + pLocalKernelBus = GPU_GET_KERNEL_BUS(pLocalGpu); + pRemoteKernelBus = GPU_GET_KERNEL_BUS(pRemoteGpu); + + if (!IS_VIRTUAL(pLocalGpu)) + { + // remove any mailbox resources associated with this mapping + status = kbusRemoveP2PMapping_HAL(pLocalGpu, pLocalKernelBus, + pRemoteGpu, pRemoteKernelBus, + pP2PInfo->peerId1, pP2PInfo->peerId2, + pP2PInfo->attributes); + } + + subdeviceDelP2PApi(pP2PInfo->peer1, pP2PInfo); + if (hSubDevice != hPeerSubDevice) + { + subdeviceDelP2PApi(pP2PInfo->peer2, pP2PInfo); + } + + pP2PInfo->peer1 = NULL; + pP2PInfo->peer2 = NULL; + mapDestroy(&pP2PInfo->dmaMappingMap); + + kbusUnsetP2PMailboxBar1Area_HAL(pLocalGpu, pLocalKernelBus); + kbusUnsetP2PMailboxBar1Area_HAL(pRemoteGpu, pRemoteKernelBus); + + return status; +} + +NV_STATUS CliDelP2PDmaMappingInfo +( + NvHandle hClient, + PCLI_DMA_MAPPING_INFO pDmaMapping +) +{ + PCLI_P2P_INFO pP2PInfo; + PCLI_P2P_DMA_MAPPING_INFO pP2PDmaMappingInfo; + + NV_ASSERT_OR_RETURN(pDmaMapping != NULL, NV_ERR_INVALID_ARGUMENT); + + pP2PInfo = pDmaMapping->pP2PInfo; + if (NULL == pP2PInfo || mapCount(&pP2PInfo->dmaMappingMap) == 0) + return NV_ERR_INVALID_ARGUMENT; + + pP2PDmaMappingInfo = mapFind(&pP2PInfo->dmaMappingMap, pDmaMapping->DmaOffset); + if (pP2PDmaMappingInfo != NULL) + { + if (pP2PDmaMappingInfo->pPeer1Info == pDmaMapping) + { + pP2PDmaMappingInfo->pPeer1Info = NULL; + } + else if (pP2PDmaMappingInfo->pPeer2Info == pDmaMapping) + { + pP2PDmaMappingInfo->pPeer2Info = NULL; + } + + if (pP2PDmaMappingInfo->pPeer1Info == NULL && + pP2PDmaMappingInfo->pPeer2Info == NULL) + { + mapRemove(&pP2PInfo->dmaMappingMap, pP2PDmaMappingInfo); + } + } + + pDmaMapping->pP2PInfo = NULL; + return NV_OK; +} + +NV_STATUS CliUpdateP2PDmaMappingInList +( + NvHandle hClient, + PCLI_DMA_MAPPING_INFO pDmaMapping, + NvU64 dmaOffset +) +{ + PCLI_P2P_INFO pP2PInfo; + PCLI_P2P_DMA_MAPPING_INFO pP2PDmaMappingInfo; + PCLI_P2P_DMA_MAPPING_INFO pNewP2PDmaMappingInfo; + PCLI_DMA_MAPPING_INFO pPeer1Info = NULL; + PCLI_DMA_MAPPING_INFO pPeer2Info = NULL; + + NV_ASSERT_OR_RETURN(pDmaMapping != NULL, NV_ERR_INVALID_ARGUMENT); + + pP2PInfo = pDmaMapping->pP2PInfo; + if (NULL == pP2PInfo || mapCount(&pP2PInfo->dmaMappingMap) == 0) + return NV_ERR_INVALID_ARGUMENT; + + pP2PDmaMappingInfo = mapFind(&pP2PInfo->dmaMappingMap, pDmaMapping->DmaOffset); + if (pP2PDmaMappingInfo != NULL) + { + // Cache the old values + pPeer1Info = pP2PDmaMappingInfo->pPeer1Info; + pPeer2Info = pP2PDmaMappingInfo->pPeer2Info; + + // free the old dma mapping Info + mapRemove(&pP2PInfo->dmaMappingMap, pP2PDmaMappingInfo); + + // allocate the new object and insert in the list + pNewP2PDmaMappingInfo = mapInsertNew(&pP2PInfo->dmaMappingMap, dmaOffset); + if (pNewP2PDmaMappingInfo == NULL) + { + if (mapFind(&pP2PInfo->dmaMappingMap, dmaOffset) != NULL) + { + return NV_ERR_INSERT_DUPLICATE_NAME; + } + else + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + portMemSet(pNewP2PDmaMappingInfo, 0, sizeof(CLI_P2P_DMA_MAPPING_INFO)); + pNewP2PDmaMappingInfo->pPeer1Info = pPeer1Info; + pNewP2PDmaMappingInfo->pPeer2Info = pPeer2Info; + + } + + return NV_OK; +} + +NV_STATUS CliFreeSubDeviceP2PList +( + Subdevice *pSubdevice, + CALL_CONTEXT *pCallContext +) +{ + RsClient *pRsClient = pCallContext->pClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + PNODE pNode; + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (pResourceRef == NULL) + return NV_OK; + + while (NULL != (pNode = pSubdevice->pP2PMappingList)) + { + PCLI_P2P_INFO_LIST *pP2PInfoList = pNode->Data; + PCLI_P2P_INFO *ppP2PInfo; + PCLI_P2P_INFO *ppP2PInfoNext; + + for (ppP2PInfo = listHead(pP2PInfoList); + ppP2PInfo != NULL; + ppP2PInfo = ppP2PInfoNext) + { + ppP2PInfoNext = listNext(pP2PInfoList, ppP2PInfo); + + status = pRmApi->Free(pRmApi, pRsClient->hClient, (NvHandle)(*ppP2PInfo)->Node.keyStart); + if (NV_OK != status) + { + return status; + } + } + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/bus/third_party_p2p.c b/src/nvidia/src/kernel/gpu/bus/third_party_p2p.c new file mode 100644 index 000000000..05753cea1 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/third_party_p2p.c @@ -0,0 +1,1183 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/bus/third_party_p2p.h" +#include "platform/p2p/p2p_caps.h" +#include "gpu/bus/kern_bus.h" +#include "mem_mgr/mem.h" +#include "rmapi/rs_utils.h" +#include "vgpu/rpc.h" +#include "os/os.h" + +#include "class/cl503c.h" + +// +// A monotonic counter as ID that's assigned to every new VidmemInfo. +// This is used to get internal VidmemInfo for persistent mappings. +// +static volatile NvU64 vidmemInfoId = 0; + +// +// We make sure that only one instance of NV50_THIRD_PARTY_P2P can be active at +// a time per client per GPU. It simplifies tuple(VA,size) tracking/validation +// in SW. For example, detecting duplicate/overlapping tuples. +// +NV_STATUS +thirdpartyp2pConstruct_IMPL +( + ThirdPartyP2P *pThirdPartyP2P, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV503C_ALLOC_PARAMETERS *pNv503cAllocParams = pParams->pAllocParams; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hThirdPartyP2P = pParams->hResource; + NvU32 flags = 0; + OBJGPU *pGpu; + CLI_THIRD_PARTY_P2P_TYPE type; + RsResourceRef *pSubdeviceRef = pCallContext->pResourceRef->pParentRef; + Subdevice *pSubdevice; + NvU64 p2pToken = 0; + NvU32 peerIndex = 0; + NvU32 pidIndex = 0; + NV_STATUS status = NV_OK; + NvU32 pid = osGetCurrentProcess(); + + pSubdevice = dynamicCast(pSubdeviceRef->pResource, Subdevice); + if (pSubdevice == NULL) + return NV_ERR_INVALID_OBJECT_PARENT; + + pGpu = GPU_RES_GET_GPU(pThirdPartyP2P); + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT_PARENT; + + if (gpuIsApmFeatureEnabled(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + + if (pNv503cAllocParams != NULL) + { + flags = pNv503cAllocParams->flags; + } + + switch(DRF_VAL(503C, _ALLOC_PARAMETERS_FLAGS, _TYPE, flags)) + { + case NV503C_ALLOC_PARAMETERS_FLAGS_TYPE_BAR1: + type = CLI_THIRD_PARTY_P2P_TYPE_BAR1; + break; + case NV503C_ALLOC_PARAMETERS_FLAGS_TYPE_NVLINK: + type = CLI_THIRD_PARTY_P2P_TYPE_NVLINK; + break; + case NV503C_ALLOC_PARAMETERS_FLAGS_TYPE_PROPRIETARY: + type = CLI_THIRD_PARTY_P2P_TYPE_PROPRIETARY; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + if (type == CLI_THIRD_PARTY_P2P_TYPE_BAR1) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + p2pToken = CLI_ENCODEP2PTOKEN(pGpu->gpuId, hClient); + } + else if (type == CLI_THIRD_PARTY_P2P_TYPE_NVLINK) + { + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING)) + { + return NV_ERR_INVALID_STATE; + } + + p2pToken = CLI_ENCODEP2PTOKEN(pGpu->gpuId, hClient); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + + pThirdPartyP2P->hClient = hClient; + pThirdPartyP2P->hThirdPartyP2P = hThirdPartyP2P; + pThirdPartyP2P->type = type; + pThirdPartyP2P->pSubdevice = pSubdevice; + pThirdPartyP2P->peerIndex = peerIndex; + pThirdPartyP2P->p2pToken = p2pToken; + pThirdPartyP2P->pDestroyCallback = NULL; + pThirdPartyP2P->pData = NULL; + pThirdPartyP2P->pAddressRangeTree = NULL; + pThirdPartyP2P->Node.keyStart = hThirdPartyP2P; + pThirdPartyP2P->Node.keyEnd = hThirdPartyP2P; + pThirdPartyP2P->Node.Data = (void*)pThirdPartyP2P; + portMemSet(pThirdPartyP2P->pidClientList, 0, sizeof(pThirdPartyP2P->pidClientList)); + mapInit(&pThirdPartyP2P->vaSpaceInfoMap, portMemAllocatorGetGlobalNonPaged()); + mapInitIntrusive(&pThirdPartyP2P->vidmemInfoMap); + + status = NV_ERR_OBJECT_NOT_FOUND; + for (pidIndex = 0; pidIndex < CLI_THIRD_PARTY_P2P_MAX_CLIENT; pidIndex++) + { + if (0 == pThirdPartyP2P->pidClientList[pidIndex].pid) + { + pThirdPartyP2P->pidClientList[pidIndex].pid = pid; + pThirdPartyP2P->pidClientList[pidIndex].hClient = hClient; + status = NV_OK; + break; + } + } + + NV_ASSERT(status == NV_OK); + return status; +} + +static inline +NV_STATUS gpuFullPowerSanityCheck(OBJGPU *pGpu, NvBool bGpuAccess) +{ + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, bGpuAccess, NV_FALSE); + return NV_OK; +} + +void +thirdpartyp2pDestruct_IMPL +( + ThirdPartyP2P *pThirdPartyP2P +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pThirdPartyP2P); + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo; + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + + resGetFreeParams(staticCast(pThirdPartyP2P, RsResource), &pCallContext, &pParams); + + pParams->status = gpuFullPowerSanityCheck(pGpu, NV_TRUE); + if (pParams->status != NV_OK) + { + return; + } + + mapDestroy(&pThirdPartyP2P->vaSpaceInfoMap); + + pVidmemInfo = mapFindGEQ(&pThirdPartyP2P->vidmemInfoMap, 0); + while (pVidmemInfo != NULL) + { + status = CliDelThirdPartyP2PVidmemInfo(pThirdPartyP2P, + pVidmemInfo->hMemory); + NV_ASSERT(status == NV_OK); + pVidmemInfo = mapFindGEQ(&pThirdPartyP2P->vidmemInfoMap, 0); + } + + // + // After destroying all of the vidmem info entries, there shouldn't remain + // any entries in the address range tree. + // + NV_ASSERT(pThirdPartyP2P->pAddressRangeTree == NULL); + + if (pThirdPartyP2P->pDestroyCallback != NULL) + { + pThirdPartyP2P->pDestroyCallback(pThirdPartyP2P->pData); + } + + pParams->status = status; +} + +NV_STATUS CliGetThirdPartyP2PInfo +( + NvHandle hClient, + NvHandle hThirdPartyP2P, + ThirdPartyP2P **ppThirdPartyP2P +) +{ + RsResourceRef *pThirdPartyP2PRef; + RsClient *pRsClient; + NV_ASSERT_OR_RETURN((ppThirdPartyP2P != NULL), NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, hThirdPartyP2P, &pThirdPartyP2PRef)); + *ppThirdPartyP2P = dynamicCast(pThirdPartyP2PRef->pResource, ThirdPartyP2P); + + if (*ppThirdPartyP2P == NULL) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + return NV_OK; +} + +NV_STATUS CliGetThirdPartyP2PInfoFromToken +( + NvU64 p2pToken, + ThirdPartyP2P **ppThirdPartyP2P +) +{ + ThirdPartyP2P *pThirdPartyP2P; + RmClient **ppClient; + RmClient *pClient; + + NV_ASSERT_OR_RETURN((ppThirdPartyP2P != NULL), NV_ERR_INVALID_ARGUMENT); + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + RS_ITERATOR it; + RsClient *pRsClient; + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + it = clientRefIter(pRsClient, NULL, classId(ThirdPartyP2P), RS_ITERATE_DESCENDANTS, NV_TRUE); + while (clientRefIterNext(pRsClient, &it)) + { + pThirdPartyP2P = dynamicCast(it.pResourceRef->pResource, ThirdPartyP2P); + if (pThirdPartyP2P->p2pToken == p2pToken) + { + *ppThirdPartyP2P = pThirdPartyP2P; + return NV_OK; + } + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +static +NV_STATUS CliGetPlatformDataMatchFromVidMem +( + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo, + void *pPlatformData +) +{ + NV_STATUS status; + PNODE pNode; + + status = btreeSearch((NvU64)(NvUPtr)pPlatformData, + &pNode, pVidmemInfo->pMappingInfoList); + return status; +} + +NV_STATUS CliGetThirdPartyP2PPlatformData +( + ThirdPartyP2P *pThirdPartyP2P, + void *platformData +) +{ + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo; + CLI_THIRD_PARTY_P2P_VIDMEM_INFO_MAPIter vidMemMapIter = mapIterAll(&pThirdPartyP2P->vidmemInfoMap); + + while (mapIterNext(&vidMemMapIter)) + { + pVidmemInfo = vidMemMapIter.pValue; + if (CliGetPlatformDataMatchFromVidMem(pVidmemInfo, platformData) == NV_OK) + { + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS CliNextThirdPartyP2PInfoWithPid +( + OBJGPU *pGpu, + NvU32 pid, + NvHandle hClient, + RmClient **ppClientOut, + ThirdPartyP2P **ppThirdPartyP2P +) +{ + ThirdPartyP2P *pThirdPartyP2P; + RmClient **ppClient; + RmClient *pClient; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + RsClient *pRsClient; + RS_ITERATOR it, devIt, subDevIt; + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if (pRsClient->type == CLIENT_TYPE_KERNEL) + { + continue; + } + + devIt = clientRefIter(pRsClient, NULL, classId(Device), + RS_ITERATE_CHILDREN, NV_TRUE); + while(clientRefIterNext(pRsClient, &devIt)) + { + Device *pDevice = dynamicCast(devIt.pResourceRef->pResource, Device); + OBJGPU *pGpuFromDevice = GPU_RES_GET_GPU(pDevice); + + if ((pGpu != NULL) && (pGpu != pGpuFromDevice)) + { + continue; + } + + subDevIt = clientRefIter(pRsClient, devIt.pResourceRef, classId(Subdevice), + RS_ITERATE_CHILDREN, NV_TRUE); + while(clientRefIterNext(pRsClient, &subDevIt)) + { + it = clientRefIter(pRsClient, subDevIt.pResourceRef, + classId(ThirdPartyP2P), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pRsClient, &it)) + { + pThirdPartyP2P = dynamicCast(it.pResourceRef->pResource, ThirdPartyP2P); + if (NULL == *ppThirdPartyP2P) + { + if (thirdpartyp2pIsValidClientPid(pThirdPartyP2P, pid, hClient)) + { + *ppClientOut = pClient; + *ppThirdPartyP2P = pThirdPartyP2P; + return NV_OK; + } + } + else if (pThirdPartyP2P->p2pToken == + (*ppThirdPartyP2P)->p2pToken) + { + *ppClientOut = NULL; + *ppThirdPartyP2P = NULL; + } + } + } + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS CliAddThirdPartyP2PVASpace +( + NvHandle hClient, + NvHandle hThirdPartyP2P, + NvHandle hVASpace, + NvU32 *pVASpaceToken +) +{ + RsClient *pRsClient; + ThirdPartyP2P *pThirdPartyP2P; + CLI_THIRD_PARTY_P2P_VASPACE_INFO vaSpaceInfo; + NvU32 vaSpaceToken; + NV_STATUS status; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + NV_ASSERT_OR_RETURN((pVASpaceToken != NULL), NV_ERR_INVALID_ARGUMENT); + + status = CliGetThirdPartyP2PInfo(hClient, hThirdPartyP2P, + &pThirdPartyP2P); + if (status != NV_OK || pThirdPartyP2P == NULL) + { + return NV_ERR_INVALID_OBJECT; + } + + portMemSet(&vaSpaceInfo, 0, sizeof(CLI_THIRD_PARTY_P2P_VASPACE_INFO)); + + for (vaSpaceToken = 0xfe00; vaSpaceToken < 0xff00; vaSpaceToken++) + { + if (mapFind(&pThirdPartyP2P->vaSpaceInfoMap, vaSpaceToken) == NULL) + break; + } + if (vaSpaceToken == 0xff00) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + vaSpaceInfo.hClient = hClient; + vaSpaceInfo.hThirdPartyP2P = hThirdPartyP2P; + vaSpaceInfo.hVASpace = hVASpace; + vaSpaceInfo.vaSpaceToken = vaSpaceToken; + + if (mapInsertValue(&pThirdPartyP2P->vaSpaceInfoMap, hVASpace, &vaSpaceInfo) == NULL) + { + if (mapFind(&pThirdPartyP2P->vaSpaceInfoMap, hVASpace) != NULL) + { + return NV_ERR_INSERT_DUPLICATE_NAME; + } + else + { + return NV_ERR_NO_MEMORY; + } + } + + if (hVASpace != 0) + { + RsResourceRef *pP2PRef; + RsResourceRef *pVASpaceRef; + if ((clientGetResourceRef(pRsClient, hThirdPartyP2P, &pP2PRef) == NV_OK) && + (clientGetResourceRef(pRsClient, hVASpace, &pVASpaceRef) == NV_OK)) + { + refAddDependant(pVASpaceRef, pP2PRef); + } + } + + *pVASpaceToken = vaSpaceToken; + + return NV_OK; +} + +NV_STATUS CliDelThirdPartyP2PVASpace +( + ThirdPartyP2P *pThirdPartyP2P, + NvHandle hVASpace +) +{ + PCLI_THIRD_PARTY_P2P_VASPACE_INFO pVASpaceInfo; + + pVASpaceInfo = mapFind(&pThirdPartyP2P->vaSpaceInfoMap, hVASpace); + + if (pVASpaceInfo == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + mapRemove(&pThirdPartyP2P->vaSpaceInfoMap, pVASpaceInfo); + + return NV_OK; +} + +NV_STATUS thirdpartyp2pGetNextVASpaceInfo_IMPL +( + ThirdPartyP2P *pThirdPartyP2P, + PCLI_THIRD_PARTY_P2P_VASPACE_INFO *ppVASpaceInfo +) +{ + if (*ppVASpaceInfo == NULL) + { + *ppVASpaceInfo = mapFindGEQ(&pThirdPartyP2P->vaSpaceInfoMap, 0); + } + else + { + *ppVASpaceInfo = mapNext(&pThirdPartyP2P->vaSpaceInfoMap, *ppVASpaceInfo); + } + + if (*ppVASpaceInfo == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + else + { + return NV_OK; + } +} + +NV_STATUS thirdpartyp2pGetVASpaceInfoFromToken_IMPL +( + ThirdPartyP2P *pThirdPartyP2P, + NvU32 vaSpaceToken, + PCLI_THIRD_PARTY_P2P_VASPACE_INFO *ppVASpaceInfo +) +{ + PCLI_THIRD_PARTY_P2P_VASPACE_INFO pVASpaceInfo; + CLI_THIRD_PARTY_P2P_VASPACE_INFO_MAPIter vaSpaceInfoIter = mapIterAll(&pThirdPartyP2P->vaSpaceInfoMap); + + NV_ASSERT_OR_RETURN((ppVASpaceInfo != NULL), NV_ERR_INVALID_ARGUMENT); + + while(mapIterNext(&vaSpaceInfoIter)) + { + pVASpaceInfo = vaSpaceInfoIter.pValue; + + if (pVASpaceInfo->vaSpaceToken == vaSpaceToken) + { + *ppVASpaceInfo = pVASpaceInfo; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS CliAddThirdPartyP2PVidmemInfo +( + NvHandle hClient, + NvHandle hThirdPartyP2P, + NvHandle hMemory, + NvU64 address, + NvU64 size, + NvU64 offset, + Memory *pMemory +) +{ + NV_STATUS status; + ThirdPartyP2P *pThirdPartyP2P; + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo; + + NV_ASSERT_OR_RETURN((pMemory != NULL), NV_ERR_INVALID_ARGUMENT); + + status = CliGetThirdPartyP2PInfo(hClient, hThirdPartyP2P, &pThirdPartyP2P); + if (status != NV_OK) + { + return status; + } + + pVidmemInfo = portMemAllocNonPaged(sizeof(CLI_THIRD_PARTY_P2P_VIDMEM_INFO)); + if (pVidmemInfo == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(pVidmemInfo, 0, sizeof(CLI_THIRD_PARTY_P2P_VIDMEM_INFO)); + + listInitIntrusive(&pVidmemInfo->mappingExtentList); + + if (!mapInsertExisting(&pThirdPartyP2P->vidmemInfoMap, hMemory, pVidmemInfo)) + { + if (mapFind(&pThirdPartyP2P->vidmemInfoMap, hMemory) != NULL) + { + status = NV_ERR_INSERT_DUPLICATE_NAME; + } + else + { + status = NV_ERR_INVALID_STATE; + } + portMemFree(pVidmemInfo); + return status; + } + + pVidmemInfo->offset = offset; + + pVidmemInfo->addressRangeNode.keyStart = address; + pVidmemInfo->addressRangeNode.keyEnd = address + size - 1; + pVidmemInfo->addressRangeNode.Data = pVidmemInfo; + + status = btreeInsert(&pVidmemInfo->addressRangeNode, + &pThirdPartyP2P->pAddressRangeTree); + if (status != NV_OK) + { + mapRemove(&pThirdPartyP2P->vidmemInfoMap, pVidmemInfo); + portMemFree(pVidmemInfo); + return status; + } + + pVidmemInfo->hClient = hClient; + pVidmemInfo->hThirdPartyP2P = hThirdPartyP2P; + pVidmemInfo->hMemory = hMemory; + pVidmemInfo->pMemDesc = pMemory->pMemDesc; + pVidmemInfo->id = portAtomicExIncrementU64(&vidmemInfoId); + + return NV_OK; +} + +// For persistent mappings, free VidmemInfo if it's not used by any clients. +void CliDelThirdPartyP2PVidmemInfoPersistent +( + ThirdPartyP2P *pThirdPartyP2P, + CLI_THIRD_PARTY_P2P_VIDMEM_INFO *pVidmemInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NODE *pNode = NULL; + + NV_ASSERT((pVidmemInfo != NULL) && (pThirdPartyP2P != NULL)); + + btreeEnumStart(0, &pNode, pVidmemInfo->pMappingInfoList); + if (pNode == NULL) + { + pRmApi->Free(pRmApi, pThirdPartyP2P->hClient, pVidmemInfo->hMemory); + } +} + +NV_STATUS CliDelThirdPartyP2PVidmemInfo +( + ThirdPartyP2P *pThirdPartyP2P, + NvHandle hMemory +) +{ + NV_STATUS status; + PNODE pNode; + PCLI_THIRD_PARTY_P2P_MAPPING_INFO pMappingInfo; + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo; + void *pKey; + NvBool bPendingMappings = NV_FALSE; + + pVidmemInfo = mapFind(&pThirdPartyP2P->vidmemInfoMap, hMemory); + if (pVidmemInfo == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + pNode = pVidmemInfo->pMappingInfoList; + while (pNode != NULL) + { + pMappingInfo = pNode->Data; + pKey = (void *)(NvUPtr)pNode->keyStart; + + if (pMappingInfo->pFreeCallback != NULL) + { + pMappingInfo->pFreeCallback(pMappingInfo->pData); + } + + status = thirdpartyp2pDelMappingInfoByKey(pThirdPartyP2P, pKey, NV_FALSE); + NV_ASSERT(status == NV_OK); + + pNode = pVidmemInfo->pMappingInfoList; + + bPendingMappings = NV_TRUE; + } + + // RSYNC is needed only if there are outstanding mappings. + if (bPendingMappings) + { + osWaitForIbmnpuRsync(pVidmemInfo->pMemDesc->pGpu->pOsGpuInfo); + } + + mapRemove(&pThirdPartyP2P->vidmemInfoMap, pVidmemInfo); + + status = btreeUnlink(&pVidmemInfo->addressRangeNode, + &pThirdPartyP2P->pAddressRangeTree); + NV_ASSERT(status == NV_OK); + + portMemFree(pVidmemInfo); + + return NV_OK; +} + +NV_STATUS CliGetThirdPartyP2PVidmemInfoFromAddress +( + NvHandle hClient, + NvHandle hThirdPartyP2P, + NvU64 address, + NvU64 length, + NvU64 *pOffset, + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO *ppVidmemInfo +) +{ + NV_STATUS status; + PNODE pNode; + ThirdPartyP2P *pThirdPartyP2P; + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo; + + NV_ASSERT_OR_RETURN((pOffset != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((ppVidmemInfo != NULL), NV_ERR_INVALID_ARGUMENT); + + status = CliGetThirdPartyP2PInfo(hClient, hThirdPartyP2P, + &pThirdPartyP2P); + if (status != NV_OK) + { + return status; + } + + status = btreeSearch(address, &pNode, + pThirdPartyP2P->pAddressRangeTree); + if (status != NV_OK) + { + return status; + } + + pVidmemInfo = pNode->Data; + + if (address + length - 1 > pVidmemInfo->addressRangeNode.keyEnd) + return NV_ERR_INVALID_ARGUMENT; + + *ppVidmemInfo = pVidmemInfo; + + // + // Adjust offset w.r.t. the memdesc associated with PCLI_THIRD_PARTY_P2P_VIDMEM_INFO, + // so that it can be safely consumed by memdescGetPhysAddr. + // + *pOffset = pVidmemInfo->offset + + (address - pVidmemInfo->addressRangeNode.keyStart); + + return NV_OK; +} + +NV_STATUS CliGetThirdPartyP2PVidmemInfoFromId +( + NvHandle hClient, + NvHandle hThirdPartyP2P, + NvU64 id, + CLI_THIRD_PARTY_P2P_VIDMEM_INFO **ppVidmemInfo +) +{ + NV_STATUS status; + PNODE pNode; + ThirdPartyP2P *pThirdPartyP2P; + + NV_ASSERT_OR_RETURN((ppVidmemInfo != NULL), NV_ERR_INVALID_ARGUMENT); + + status = CliGetThirdPartyP2PInfo(hClient, hThirdPartyP2P, + &pThirdPartyP2P); + if (status != NV_OK) + { + return status; + } + + status = btreeSearch(id, &pNode, pThirdPartyP2P->pAddressRangeTree); + if (status != NV_OK) + { + return status; + } + + *ppVidmemInfo = pNode->Data; + + return NV_OK; +} + +NV_STATUS CliRegisterThirdPartyP2PMappingCallback +( + NvHandle hClient, + NvHandle hThirdPartyP2P, + NvHandle hMemory, + void *pKey, + THIRD_PARTY_P2P_VIDMEM_FREE_CALLBACK *pFreeCallback, + void *pData +) +{ + NV_STATUS status; + PCLI_THIRD_PARTY_P2P_MAPPING_INFO pMappingInfo; + + NV_ASSERT_OR_RETURN((pFreeCallback != NULL), NV_ERR_INVALID_ARGUMENT); + + status = CliGetThirdPartyP2PMappingInfoFromKey(hClient, hThirdPartyP2P, + hMemory, pKey, &pMappingInfo); + if (status != NV_OK) + { + return status; + } + + NV_ASSERT_OR_RETURN((pMappingInfo->pFreeCallback == NULL), NV_ERR_INVALID_STATE); + + pMappingInfo->pFreeCallback = pFreeCallback; + pMappingInfo->pData = pData; + + return NV_OK; +} + +NV_STATUS CliAddThirdPartyP2PMappingInfo +( + NvHandle hClient, + NvHandle hThirdPartyP2P, + NvHandle hMemory, + void *pKey, + THIRD_PARTY_P2P_VIDMEM_FREE_CALLBACK *pFreeCallback, + void *pData, + PCLI_THIRD_PARTY_P2P_MAPPING_INFO *ppMappingInfo +) +{ + NV_STATUS status; + ThirdPartyP2P *pThirdPartyP2P; + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo; + PCLI_THIRD_PARTY_P2P_MAPPING_INFO pMappingInfo; + + NV_ASSERT_OR_RETURN((pKey != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((ppMappingInfo != NULL), NV_ERR_INVALID_ARGUMENT); + + status = CliGetThirdPartyP2PInfo(hClient, hThirdPartyP2P, + &pThirdPartyP2P); + if (status != NV_OK) + { + return status; + } + + pVidmemInfo = mapFind(&pThirdPartyP2P->vidmemInfoMap, hMemory); + if (pVidmemInfo == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + pMappingInfo = portMemAllocNonPaged(sizeof(CLI_THIRD_PARTY_P2P_MAPPING_INFO)); + if (pMappingInfo == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(pMappingInfo, 0, sizeof(CLI_THIRD_PARTY_P2P_MAPPING_INFO)); + + pMappingInfo->Node.keyStart = (NvU64)(NvUPtr)pKey; + pMappingInfo->Node.keyEnd = (NvU64)(NvUPtr)pKey; + pMappingInfo->Node.Data = pMappingInfo; + + status = btreeInsert(&pMappingInfo->Node, &pVidmemInfo->pMappingInfoList); + if (status != NV_OK) + { + portMemFree(pMappingInfo); + return status; + } + + pMappingInfo->pFreeCallback = pFreeCallback; + pMappingInfo->pData = pData; + + *ppMappingInfo = pMappingInfo; + + return NV_OK; +} + +NV_STATUS CliGetThirdPartyP2PMappingInfoFromKey +( + NvHandle hClient, + NvHandle hThirdPartyP2P, + NvHandle hMemory, + void *pKey, + PCLI_THIRD_PARTY_P2P_MAPPING_INFO *ppMappingInfo +) +{ + NV_STATUS status; + ThirdPartyP2P *pThirdPartyP2P; + PNODE pNode; + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo; + + NV_ASSERT_OR_RETURN((pKey != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((ppMappingInfo != NULL), NV_ERR_INVALID_ARGUMENT); + + status = CliGetThirdPartyP2PInfo(hClient, hThirdPartyP2P, + &pThirdPartyP2P); + if (status != NV_OK) + { + return status; + } + + pVidmemInfo = mapFind(&pThirdPartyP2P->vidmemInfoMap, hMemory); + if (pVidmemInfo == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + status = btreeSearch((NvU64)(NvUPtr)pKey, + &pNode, pVidmemInfo->pMappingInfoList); + if (status != NV_OK) + { + return status; + } + + *ppMappingInfo = pNode->Data; + + return NV_OK; +} + +static NV_STATUS _thirdpartyp2pDelMappingInfoByKey +( + ThirdPartyP2P *pThirdPartyP2P, + void *pKey, + NvBool bIsRsyncNeeded, + CLI_THIRD_PARTY_P2P_VIDMEM_INFO **ppVidmemInfo +) +{ + NV_STATUS status; + PNODE pNode; + PCLI_THIRD_PARTY_P2P_VIDMEM_INFO pVidmemInfo; + PCLI_THIRD_PARTY_P2P_MAPPING_INFO pMappingInfo; + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfo; + PCLI_THIRD_PARTY_P2P_MAPPING_EXTENT_INFO pExtentInfoNext; + Subdevice *pSubdevice; + OBJGPU *pGpu = GPU_RES_GET_GPU(pThirdPartyP2P); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU64 length; + NvU64 mappingLength; + NvU64 address; + NvU64 startOffset; + CLI_THIRD_PARTY_P2P_VIDMEM_INFO_MAPIter vidMemMapIter; + + NV_ASSERT_OR_RETURN((pKey != NULL), NV_ERR_INVALID_ARGUMENT); + + pSubdevice = pThirdPartyP2P->pSubdevice; + + GPU_RES_SET_THREAD_BC_STATE(pThirdPartyP2P); + + vidMemMapIter = mapIterAll(&pThirdPartyP2P->vidmemInfoMap); + while (mapIterNext(&vidMemMapIter)) + { + pVidmemInfo = vidMemMapIter.pValue; + + status = btreeSearch((NvU64)(NvUPtr)pKey, + &pNode, pVidmemInfo->pMappingInfoList); + if (status == NV_OK) + { + pMappingInfo = pNode->Data; + length = pMappingInfo->length; + address = pMappingInfo->address; + + for(pExtentInfo = pMappingInfo->pStart; (pExtentInfo != NULL) && (length != 0); + pExtentInfo = pExtentInfoNext) + { + pExtentInfoNext = listNext(&pVidmemInfo->mappingExtentList, pExtentInfo); + startOffset = address - pExtentInfo->address; + mappingLength = NV_MIN(length, (pExtentInfo->length - startOffset)); + + address += mappingLength; + length -= mappingLength; + pExtentInfo->refCount--; + if (pExtentInfo->refCount == 0) + { + NV_PRINTF(LEVEL_INFO, + "Freeing P2P mapping for gpu VA: 0x%llx, length: 0x%llx\n", + pExtentInfo->address, pExtentInfo->length); + + if (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + NV_RM_RPC_UNMAP_MEMORY(pGpu, pThirdPartyP2P->hClient, + RES_GET_PARENT_HANDLE(pSubdevice), + pVidmemInfo->hMemory, + 0, + pExtentInfo->fbApertureOffset, status); + } + else + { + status = kbusUnmapFbAperture_HAL(pGpu, pKernelBus, + pExtentInfo->pMemDesc, + pExtentInfo->fbApertureOffset, + pExtentInfo->length, + BUS_MAP_FB_FLAGS_MAP_UNICAST); + } + NV_ASSERT(status == NV_OK); + + listRemove(&pVidmemInfo->mappingExtentList, pExtentInfo); + + pSubdevice->P2PfbMappedBytes -= pExtentInfo->length; + memdescDestroy(pExtentInfo->pMemDesc); + portMemFree(pExtentInfo); + } + } + NV_ASSERT(length == 0); + + status = btreeUnlink(&pMappingInfo->Node, + &pVidmemInfo->pMappingInfoList); + if (status == NV_OK) + { + portMemFree(pMappingInfo); + } + + if (bIsRsyncNeeded) + { + osWaitForIbmnpuRsync(pVidmemInfo->pMemDesc->pGpu->pOsGpuInfo); + } + + // + // For persistent mappings, we return the VidmemInfo and clean up + // the internal ThirdPartyP2P object and duped memory handle. + // + if (ppVidmemInfo != NULL) + { + *ppVidmemInfo = pVidmemInfo; + break; + } + } + } + + + return NV_OK; +} + +NV_STATUS thirdpartyp2pDelMappingInfoByKey_IMPL +( + ThirdPartyP2P *pThirdPartyP2P, + void *pKey, + NvBool bIsRsyncNeeded +) +{ + return _thirdpartyp2pDelMappingInfoByKey(pThirdPartyP2P, + pKey, + bIsRsyncNeeded, + NULL); +} + +NV_STATUS thirdpartyp2pDelPersistentMappingInfoByKey_IMPL +( + ThirdPartyP2P *pThirdPartyP2P, + void *pKey, + NvBool bIsRsyncNeeded +) +{ + CLI_THIRD_PARTY_P2P_VIDMEM_INFO *pVidmemInfo = NULL; + + NV_ASSERT_OK_OR_RETURN( + _thirdpartyp2pDelMappingInfoByKey(pThirdPartyP2P, pKey, + bIsRsyncNeeded, &pVidmemInfo)); + + CliDelThirdPartyP2PVidmemInfoPersistent(pThirdPartyP2P, pVidmemInfo); + + return NV_OK; +} + +NV_STATUS +CliAddThirdPartyP2PClientPid +( + NvHandle hClient, + NvHandle hThirdPartyP2P, + NvU32 pid, + NvU32 client +) +{ + RsResourceRef *pThirdPartyP2PRef; + ThirdPartyP2P *pThirdPartyP2P; + NvU32 pidIndex; + RsClient *pClient; + + NV_ASSERT_OK_OR_RETURN( + serverGetClientUnderLock(&g_resServ, hClient, &pClient)); + + NV_ASSERT_OK_OR_RETURN( + clientGetResourceRef(pClient, + hThirdPartyP2P, + &pThirdPartyP2PRef)); + + pThirdPartyP2P = dynamicCast(pThirdPartyP2PRef->pResource, ThirdPartyP2P); + if (pThirdPartyP2P == NULL) + { + return NV_ERR_INVALID_OBJECT; + } + + // Do not register another client if one already exists for this PID + if (thirdpartyp2pIsValidClientPid(pThirdPartyP2P, pid, client)) + { + return NV_OK; + } + NV_ASSERT_OR_RETURN(!thirdpartyp2pIsValidClientPid(pThirdPartyP2P, pid, 0), NV_ERR_STATE_IN_USE); + + + for (pidIndex = 0; pidIndex < CLI_THIRD_PARTY_P2P_MAX_CLIENT; pidIndex++) + { + if (0 == pThirdPartyP2P->pidClientList[pidIndex].pid) + { + pThirdPartyP2P->pidClientList[pidIndex].pid = pid; + pThirdPartyP2P->pidClientList[pidIndex].hClient = client; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NvBool +thirdpartyp2pIsValidClientPid_IMPL +( + ThirdPartyP2P* pThirdPartyP2P, + NvU32 pid, + NvHandle hClient +) +{ + NvU32 pidIndex; + + for (pidIndex = 0; pidIndex < CLI_THIRD_PARTY_P2P_MAX_CLIENT; pidIndex++) + { + if (pid == pThirdPartyP2P->pidClientList[pidIndex].pid && + (hClient == 0 || hClient == pThirdPartyP2P->pidClientList[pidIndex].hClient)) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +NV_STATUS +CliDelThirdPartyP2PClientPid +( + RmClient *pClient, + NvHandle hThirdPartyP2P, + NvU32 pid, + NvU32 client +) +{ + NvU32 pidIndex; + RsResource *pRes; + ThirdPartyP2P *pThirdPartyP2P; + + pThirdPartyP2P = serverutilGetDerived(pClient, hThirdPartyP2P, &pRes, ThirdPartyP2P); + if (pThirdPartyP2P == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + for (pidIndex = 0; pidIndex < CLI_THIRD_PARTY_P2P_MAX_CLIENT; pidIndex++) + { + if (pid == pThirdPartyP2P->pidClientList[pidIndex].pid && + client == pThirdPartyP2P->pidClientList[pidIndex].hClient) + { + pThirdPartyP2P->pidClientList[pidIndex].pid = 0; + pThirdPartyP2P->pidClientList[pidIndex].hClient = 0; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +CliUnregisterFromThirdPartyP2P +( + RmClient *pClient +) +{ + NV_STATUS status = NV_OK; + ThirdPartyP2P *pThirdPartyP2P = NULL; + NvU32 pid; + NvHandle hClient = staticCast(pClient, RsClient)->hClient; + + pid = pClient->ProcID; + + while (1) + { + RmClient *pThirdPartyP2PClient; + status = CliNextThirdPartyP2PInfoWithPid(NULL, pid, hClient, &pThirdPartyP2PClient, &pThirdPartyP2P); + if (status != NV_OK) + { + return NV_OK; + } + + CliDelThirdPartyP2PClientPid(pThirdPartyP2PClient, + pThirdPartyP2P->hThirdPartyP2P, + pid, + hClient); + } + + return status; +} + +void +CliUnregisterMemoryFromThirdPartyP2P +( + Memory *pMemory +) +{ + RsClient *pRsClient = RES_GET_CLIENT(pMemory); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + ThirdPartyP2P *pThirdPartyP2P; + Device *pDevice = pMemory->pDevice; + RsResourceRef *pDeviceRef = RES_GET_REF(pDevice); + RS_ITERATOR subDevIt; + RS_ITERATOR it; + + subDevIt = clientRefIter(pRsClient, pDeviceRef, classId(Subdevice), + RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pRsClient, &subDevIt)) + { + it = clientRefIter(pRsClient, subDevIt.pResourceRef, classId(ThirdPartyP2P), \ + RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(pRsClient, &it)) + { + pThirdPartyP2P = dynamicCast(it.pResourceRef->pResource, ThirdPartyP2P); + if (pThirdPartyP2P == NULL) + continue; + + (void)CliDelThirdPartyP2PVidmemInfo(pThirdPartyP2P, hMemory); + } + } +} diff --git a/src/nvidia/src/kernel/gpu/bus/third_party_p2p_ctrl.c b/src/nvidia/src/kernel/gpu/bus/third_party_p2p_ctrl.c new file mode 100644 index 000000000..fda6b63a8 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/bus/third_party_p2p_ctrl.c @@ -0,0 +1,196 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2011-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include // FERMI_VASPACE_A +#include +#include "gpu/bus/third_party_p2p.h" +#include "mem_mgr/mem.h" +#include "rmapi/rs_utils.h" +#include "vgpu/rpc.h" + +NV_STATUS +thirdpartyp2pCtrlCmdRegisterVaSpace_IMPL +( + ThirdPartyP2P *pThirdPartyP2P, + NV503C_CTRL_REGISTER_VA_SPACE_PARAMS *pRegisterVaSpaceParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pThirdPartyP2P); + NvHandle hObject = RES_GET_HANDLE(pThirdPartyP2P); + NvU32 vaSpaceToken; + NV_STATUS status; + OBJGPU *pGpu; + + pGpu = GPU_RES_GET_GPU(pThirdPartyP2P); + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT_PARENT; + + + status = CliAddThirdPartyP2PVASpace(hClient, + hObject, + pRegisterVaSpaceParams->hVASpace, + &vaSpaceToken); + if (status == NV_OK) + { + { + pRegisterVaSpaceParams->vaSpaceToken = vaSpaceToken; + } + } + + return status; +} + +NV_STATUS +thirdpartyp2pCtrlCmdUnregisterVaSpace_IMPL +( + ThirdPartyP2P *pThirdPartyP2P, + NV503C_CTRL_UNREGISTER_VA_SPACE_PARAMS *pUnregisterVaSpaceParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu; + + pGpu = GPU_RES_GET_GPU(pThirdPartyP2P); + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT_PARENT; + + status = CliDelThirdPartyP2PVASpace(pThirdPartyP2P, + pUnregisterVaSpaceParams->hVASpace); + + return status; +} + +NV_STATUS +thirdpartyp2pCtrlCmdRegisterVidmem_IMPL +( + ThirdPartyP2P *pThirdPartyP2P, + NV503C_CTRL_REGISTER_VIDMEM_PARAMS *pRegisterVidmemParams +) +{ + Memory *pMemory; + RsClient *pClient = RES_GET_CLIENT(pThirdPartyP2P); + NvHandle hObject = RES_GET_HANDLE(pThirdPartyP2P); + NvHandle hDevice; + NvU64 address = pRegisterVidmemParams->address; + NvU64 size = pRegisterVidmemParams->size; + NvU64 offset = pRegisterVidmemParams->offset; + NV_STATUS status = NV_OK; + OBJGPU *pGpu; + + pGpu = GPU_RES_GET_GPU(pThirdPartyP2P); + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT_PARENT; + + hDevice = RES_GET_PARENT_HANDLE(pThirdPartyP2P->pSubdevice); + + status = memGetByHandleAndDevice(pClient, + pRegisterVidmemParams->hMemory, + hDevice, + &pMemory); + if (status != NV_OK) + { + return status; + } + + if (memdescGetAddressSpace(pMemory->pMemDesc) != ADDR_FBMEM) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (size == 0) + return NV_ERR_INVALID_ARGUMENT; + + if (size & (NVRM_P2P_PAGESIZE_BIG_64K - 1)) + return NV_ERR_INVALID_ARGUMENT; + + if (address & (NVRM_P2P_PAGESIZE_BIG_64K - 1)) + return NV_ERR_INVALID_ARGUMENT; + + if (offset & (NVRM_P2P_PAGESIZE_BIG_64K - 1)) + return NV_ERR_INVALID_ARGUMENT; + + // Check for overflow + if (address + size < address || size + offset < size) + return NV_ERR_INVALID_ARGUMENT; + + if (memdescGetSize(pMemory->pMemDesc) < offset + size) + return NV_ERR_INVALID_ARGUMENT; + + status = CliAddThirdPartyP2PVidmemInfo(pClient->hClient, + hObject, + pRegisterVidmemParams->hMemory, + address, + size, + offset, + pMemory); + if (status != NV_OK) + { + return status; + } + + return status; +} + +NV_STATUS +thirdpartyp2pCtrlCmdUnregisterVidmem_IMPL +( + ThirdPartyP2P *pThirdPartyP2P, + NV503C_CTRL_UNREGISTER_VIDMEM_PARAMS *pUnregisterVidmemParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu; + + pGpu = GPU_RES_GET_GPU(pThirdPartyP2P); + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT_PARENT; + + status = CliDelThirdPartyP2PVidmemInfo(pThirdPartyP2P, + pUnregisterVidmemParams->hMemory); + return status; +} + +NV_STATUS +thirdpartyp2pCtrlCmdRegisterPid_IMPL +( + ThirdPartyP2P *pThirdPartyP2P, + NV503C_CTRL_REGISTER_PID_PARAMS *pRegisterPidParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pThirdPartyP2P); + NvHandle hObject = RES_GET_HANDLE(pThirdPartyP2P); + RmClient *pClient; + NvU32 pid; + NV_STATUS status; + + NV_ASSERT_OK_OR_RETURN(serverutilGetClientUnderLock(pRegisterPidParams->hClient, &pClient)); + pid = pClient->ProcID; + + status = CliAddThirdPartyP2PClientPid(hClient, + hObject, + pid, + pRegisterPidParams->hClient); + return status; +} diff --git a/src/nvidia/src/kernel/gpu/ce/arch/ampere/kernel_ce_ga100.c b/src/nvidia/src/kernel/gpu/ce/arch/ampere/kernel_ce_ga100.c new file mode 100644 index 000000000..31dac69e8 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/ce/arch/ampere/kernel_ce_ga100.c @@ -0,0 +1,1169 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "ctrl/ctrl2080/ctrl2080nvlink.h" +#include "gpu/ce/kernel_ce.h" +#include "gpu/nvlink/kernel_nvlink.h" +#include "gpu/ce/kernel_ce_private.h" +#include "gpu/gpu.h" +#include "gpu/bif/kernel_bif.h" +#include "platform/chipset/chipset.h" + +#include "published/ampere/ga100/dev_ce.h" +#include "published/ampere/ga100/dev_nv_xve.h" +#include "published/ampere/ga100/dev_nv_xve_addendum.h" + +#define MAX_CE_CNT 18 +#define NV_CE_INVALID_TOPO_IDX 0xFFFF + +// Ampere + +#define NV_CE_MIN_PCE_PER_SYS_LINK 2 +#define NV_CE_MIN_PCE_PER_PEER_LINK 1 + +// Defines for PCE-LCE mapping algorithm +#define NV_CE_LCE_MASK_INIT 0xFFFFFFFF +#define NV_CE_SYS_ALLOWED_LCE_MASK 0x0C +#define NV_CE_GRCE_ALLOWED_LCE_MASK 0x03 +#define NV_CE_EVEN_ASYNC_LCE_MASK 0x55555550 +#define NV_CE_ODD_ASYNC_LCE_MASK 0xAAAAAAA0 +#define NV_CE_MAX_LCE_MASK 0x3FF +#define NV_CE_MAX_GRCE 2 +#define NV_CE_NUM_HSHUB_PCES 16 +#define NV_CE_PCE_STRIDE 3 +#define NV_CE_SYS_LCE_ALLOWED_HSPCE_CONFIG 0x8 +#define NV_CE_NUM_DEFAULT_PCES 2 + +#define NV_CE_SYS_ALLOWED_LCE_MASK 0x0C +#define NV_CE_GRCE_ALLOWED_LCE_MASK 0x03 +#define NV_CE_EVEN_ASYNC_LCE_MASK 0x55555550 +#define NV_CE_ODD_ASYNC_LCE_MASK 0xAAAAAAA0 +#define NV_CE_MAX_LCE_MASK 0x3FF + +static void _ceGetAlgorithmPceIndex(OBJGPU *, KernelCE*, NvU32 *, NvU32 *, NvBool *, NvU8 *); + +/* + * sysmemLinks + * Represents the number of sysmem links detected + * This affects how many PCEs LCE0(sysmem read CE) + * and LCE1(sysmem write CE) should be mapped to + * maxLinksPerPeer + * Represents the maximum number of peer links + * between this GPU and all its peers. This affects + * how many PCEs LCE3(P2P CE) should be mapped to + * numPeers + * Represents the number of Peer GPUs discovered so far + * bSymmetric + * Represents whether the topology detected so far + * is symmetric i.e. has same number of links to all + * peers connected through nvlink. This affects how + * many PCEs to assign to LCEs3-5 (nvlink P2P CEs) + * bSwitchConfig + * Represents whether the config listed is intended + * for use with nvswitch systems + * pceLceMap + * Value of NV_CE_PCE2LCE_CONFIG0 register with the + * above values for sysmemLinks, maxLinksPerPeer, + * numLinks and bSymmetric + * grceConfig + * Value of NV_CE_GRCE_CONFIG register with the + * above values for sysmemLinks, maxLinksPerPeer, + * numLinks and bSymmetric + * exposeCeMask + * Mask of CEs to expose to clients for the above + * above values for sysmemLinks, maxLinksPerPeer, + * numLinks and bSymmetric + */ +typedef struct NVLINK_CE_AUTO_CONFIG_TABLE +{ + NvU32 sysmemLinks; + NvU32 maxLinksPerPeer; + NvU32 numPeers; + NvBool bSymmetric; + NvBool bSwitchConfig; + NvU32 pceLceMap[MAX_CE_CNT]; + NvU32 grceConfig[MAX_CE_CNT]; + NvU32 exposeCeMask; +} NVLINK_CE_AUTO_CONFIG_TABLE; + +/* + * Table for setting the PCE2LCE mapping for WAR configs that cannot be implemented + * using the algorithm because the config does not conform to the algorithm's set + * of requirements/assumptions +*/ +static NVLINK_CE_AUTO_CONFIG_TABLE nvLinkCeAutoConfigTable_GA100[] = +{ + +// +// #systmem #max #peers Symmetric Switch PCE-LCE GRCE exposeCe +// links (links/peer) Config? Config Map Config Mask +// + +// Default minimal configuration - NOTE: do not add entrys before this + {0x0, 0x0, 0x0, NV_FALSE, NV_FALSE, {0xF,0xF,0xF,0xF,0xF,0xF,0xF,0xF,0xF, + 0xF,0xF,0xF,0xF,0xF,0xF,0xF,0x2,0x3}, {0xF,0xF}, 0xF} +}; + +/*! + * @brief Returns the size of the GRCE_CONFIG register array + * + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * + * @return NV_CE_GRCE_CONFIG__SIZE_1 + * + */ +NvU32 +kceGetGrceConfigSize1_GA100 +( + KernelCE * pKCe +) +{ + return NV_CE_GRCE_CONFIG__SIZE_1; +} + +/*! + * @brief Returns the size of the PCE2LCE register array + * + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * + * @return NV_CE_PCE2LCE_CONFIG__SIZE_1 + * + */ +NvU32 +kceGetPce2lceConfigSize1_GA100 +( + KernelCE * pKCe +) +{ + return NV_CE_PCE2LCE_CONFIG__SIZE_1; +} + +/** + * Return the pce-lce mappings and grce config + * reg values when nvlink topology is NOT forced + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @param[out] pPceLceMap Stores the pce-lce mappings + * @param[out] pGrceConfig Stores the grce configuration + * @param[out] pExposeCeMask Mask of CEs to expose to clients + * + * @return NV_OK on success + */ +NV_STATUS +kceGetNvlinkAutoConfigCeValues_GA100 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NvU32 *pPceLceMap, + NvU32 *pGrceConfig, + NvU32 *pExposeCeMask +) +{ + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + OBJGPU *pRemoteGpu = NULL; + NV_STATUS status = NV_OK; + NvU32 gpuMask = 0; + NvU32 sysmemLinks = 0; + NvU32 numPeers = 0; + NvBool bSymmetric = NV_TRUE; + NvBool bCurrentTopoMax = NV_FALSE; + NvU32 maxLinksPerPeer = 0; + NvU32 gpuInstance = 0; + NvU32 topoIdx = NV_CE_INVALID_TOPO_IDX; + NvU32 pce2lceConfigSize1 = kceGetPce2lceConfigSize1_HAL(pKCe); + NvU32 grceConfigSize1 = kceGetGrceConfigSize1_HAL(pKCe); + NvBool bEntryExists; + NvU32 pceIdx, grceIdx, i; + NVLINK_TOPOLOGY_PARAMS currentTopo; + + NV_ASSERT_OR_RETURN(!RMCFG_FEATURE_PLATFORM_GSP, NV_ERR_NOT_SUPPORTED); + + if ((pPceLceMap == NULL) || (pGrceConfig == NULL) || (pExposeCeMask == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pKernelNvlink == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + portMemSet(¤tTopo, 0, sizeof(currentTopo)); + + // Initialize pPceLceMap with no mappings + for (pceIdx = 0; pceIdx < pce2lceConfigSize1; pceIdx++) + { + pPceLceMap[pceIdx] = NV_CE_PCE2LCE_CONFIG_PCE_ASSIGNED_LCE_NONE; + } + + // Bug 200283711: Use the largest of all chips in allocating these arrays + NvU32 localPceLceMap[NV_CE_PCE2LCE_CONFIG__SIZE_1_MAX]; + NvU32 localGrceConfig[MAX_CE_CNT]; + NvU32 localExposeCeMask = 0; + + // Initialize to ASSIGNED_LCE_NONE + for (i = 0; i < pce2lceConfigSize1; i++) + { + localPceLceMap[i] = NV_CE_PCE2LCE_CONFIG_PCE_ASSIGNED_LCE_NONE; + } + + sysmemLinks = knvlinkGetNumLinksToSystem(pGpu, pKernelNvlink); + + if (gpuGetNumCEs(pGpu) == 0) + { + return NV_ERR_NOT_SUPPORTED; + } + + (void)gpumgrGetGpuAttachInfo(NULL, &gpuMask); + + // Get the max{nvlinks/peer, for all connected peers} + while ((pRemoteGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + NvU32 numLinksToPeer = knvlinkGetNumLinksToPeer(pGpu, pKernelNvlink, + pRemoteGpu); + if (numLinksToPeer == 0) + { + continue; + } + + numPeers++; + + // + // The topology remains symmetric if this is either the first GPU we've + // seen connected over NVLINK, or the number of links connected to this + // peer is the same as the maximum number of links connected to any peer + // seen so far. + // + bSymmetric = (bSymmetric && + ((maxLinksPerPeer == 0) || + (maxLinksPerPeer == numLinksToPeer))); + + if (numLinksToPeer > maxLinksPerPeer) + { + maxLinksPerPeer = numLinksToPeer; + } + } + + currentTopo.sysmemLinks = sysmemLinks; + currentTopo.maxLinksPerPeer = maxLinksPerPeer; + currentTopo.numPeers = numPeers; + currentTopo.bSymmetric = bSymmetric; + currentTopo.bSwitchConfig = knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink); + + // + // Check if the current config exists in the table + // Here, we only fill exposeCeMask. + // + bEntryExists = kceGetAutoConfigTableEntry_HAL(pGpu, pKCe, ¤tTopo, nvLinkCeAutoConfigTable_GA100, + NV_ARRAY_ELEMENTS(nvLinkCeAutoConfigTable_GA100), + &topoIdx, &localExposeCeMask); + if (bEntryExists) + { + // Since entry exists, fill local variables with the associated table entry + for (pceIdx = 0; pceIdx < pce2lceConfigSize1; pceIdx++) + { + localPceLceMap[pceIdx] = nvLinkCeAutoConfigTable_GA100[topoIdx].pceLceMap[pceIdx]; + } + for (grceIdx = 0; grceIdx < grceConfigSize1; grceIdx++) + { + localGrceConfig[grceIdx] = nvLinkCeAutoConfigTable_GA100[topoIdx].grceConfig[grceIdx]; + } + } + else + { + // + // There is no table entry - use algorithm to determine mapping + // Here the currentTopo struct comes with pce-lce & grce mappings & exposeCeMask + // + + status = kceGetMappings_HAL(pGpu, pKCe, ¤tTopo, + localPceLceMap, localGrceConfig, &localExposeCeMask); + } + + // Get the largest topology that has been cached + bEntryExists = gpumgrGetSystemNvlinkTopo(gpuGetDBDF(pGpu), ¤tTopo); + + // Is this the largest topology that we've ever seen compared to the cached one? + bCurrentTopoMax = kceIsCurrentMaxTopology_HAL(pGpu, pKCe, ¤tTopo, &localExposeCeMask, &topoIdx); + + if (bCurrentTopoMax) + { + // + // Replace cached state with current config + // Store the state globally in gpumgr so that we can preserve the topology + // info across GPU loads. + // Preserving across GPU loads enables UVM to optimize perf + // + for (pceIdx = 0; pceIdx < pce2lceConfigSize1; pceIdx++) + { + currentTopo.maxPceLceMap[pceIdx] = localPceLceMap[pceIdx]; + } + for (grceIdx = 0; grceIdx < grceConfigSize1; grceIdx++) + { + currentTopo.maxGrceConfig[grceIdx] = localGrceConfig[grceIdx]; + } + currentTopo.maxExposeCeMask = localExposeCeMask; + + if (topoIdx != NV_CE_INVALID_TOPO_IDX) + { + // Only if we used table to determine config, store this value + currentTopo.maxTopoIdx = topoIdx; + currentTopo.sysmemLinks = nvLinkCeAutoConfigTable_GA100[topoIdx].sysmemLinks; + currentTopo.maxLinksPerPeer = nvLinkCeAutoConfigTable_GA100[topoIdx].maxLinksPerPeer; + currentTopo.numPeers = nvLinkCeAutoConfigTable_GA100[topoIdx].numPeers; + currentTopo.bSymmetric = nvLinkCeAutoConfigTable_GA100[topoIdx].bSymmetric; + currentTopo.bSwitchConfig = nvLinkCeAutoConfigTable_GA100[topoIdx].bSwitchConfig; + } + gpumgrUpdateSystemNvlinkTopo(gpuGetDBDF(pGpu), ¤tTopo); + } + + NV_PRINTF(LEVEL_INFO, "GPU%d : RM Configured Values for CE Config\n", gpuGetInstance(pGpu)); + + // Now, fill up the information to return. We'll always return max config information. + for (pceIdx = 0; pceIdx < pce2lceConfigSize1; pceIdx++) + { + pPceLceMap[pceIdx] = currentTopo.maxPceLceMap[pceIdx]; + NV_PRINTF(LEVEL_INFO, "PCE-LCE map: PCE %d LCE 0x%x\n", pceIdx, pPceLceMap[pceIdx]); + } + + for (grceIdx = 0; grceIdx < grceConfigSize1; grceIdx++) + { + NvU32 grceSharedLce = currentTopo.maxGrceConfig[grceIdx]; + + if (grceSharedLce != 0xF) + { + // GRCE is shared + pGrceConfig[grceIdx] = DRF_NUM(_CE, _GRCE_CONFIG, _SHARED, 1) | + DRF_NUM(_CE, _GRCE_CONFIG, _SHARED_LCE, grceSharedLce); + } + else + { + // GRCE got its own PCE + pGrceConfig[grceIdx] = DRF_NUM(_CE, _GRCE_CONFIG, _SHARED, 0); + } + NV_PRINTF(LEVEL_INFO, "GRCE Config: GRCE %d LCE 0x%x\n", grceIdx, pGrceConfig[grceIdx]); + } + + *pExposeCeMask = currentTopo.maxExposeCeMask; + NV_PRINTF(LEVEL_INFO, "exposeCeMask = 0x%x\n", *pExposeCeMask); + + return status; +} + +/** + * @brief Check if current config's topology is larger than cached one + * Return NV_TRUE if yes, else return NV_FALSE + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @param[in] pCurrentTopo NVLINK_TOPOLOGY_INFO pointer + * @param[in] pLocalExposeCeMask Pointer to caller HSHUB ID + * @param[in/out] pTopoIdx NvU32 pointer to topology index, if it exists + */ +NvBool +kceIsCurrentMaxTopology_GA100 +( + OBJGPU *pGpu, + KernelCE * pKCe, + NVLINK_TOPOLOGY_PARAMS * pCurrentTopo, + NvU32 *pLocalExposeCeMask, + NvU32 *pTopoIdx +) +{ + if (pCurrentTopo->maxExposeCeMask & ~(*pLocalExposeCeMask)) + { + // + // Current config's exposeCeMask is a subset of cached maxExposeCeMask + // Hence, we will return NV_FALSE and use cached state config + // + if (*pTopoIdx != NV_CE_INVALID_TOPO_IDX) + { + *pTopoIdx = pCurrentTopo->maxTopoIdx; + } + *pLocalExposeCeMask = pCurrentTopo->maxExposeCeMask; + return NV_FALSE; + } + + // + // Current config is equal or a superset of cached maxExposeCeMask + // This means that the topology has increased and hence we should + // cache the current config as the max config. Return NV_TRUE to do so + // + return NV_TRUE; +} + +/** + * @brief This function returns the pceIndex for a particular link ID + * Must always be called with the hshub ID for the calling link ID + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @param[in] pceAvailableMaskPerHshub Pointer to CEs available per HSHUB + * @param[out] pceIndex Pointer to caller pceIndex + * @param[out] pHshubId Pointer to caller HSHUB ID + * @param[out] pFirstIter Pointer to iteration value + */ +static void +_ceGetAlgorithmPceIndex +( + OBJGPU *pGpu, + KernelCE* pKCe, + NvU32 *pceAvailableMaskPerHshub, + NvU32 *pceIndex, + NvBool *pBFirstIter, + NvU8 *pHshubId +) +{ + NV_STATUS status = NV_OK; + + // 1. Apply PCE striding + if ((*pBFirstIter) != NV_TRUE) + { + *pceIndex += NV_CE_PCE_STRIDE; + } + *pBFirstIter = NV_FALSE; + + if(!(NVBIT32(*pceIndex) & pceAvailableMaskPerHshub[*pHshubId])) + { + // + // 2. We couldn't find an applicable strided PCE in given HSHUB + // So, we'll assign the next consecutive PCE on the same HSHUB + // + *pceIndex = CE_GET_LOWEST_AVAILABLE_IDX(pceAvailableMaskPerHshub[*pHshubId]); + if(!(NVBIT32(*pceIndex) & pceAvailableMaskPerHshub[*pHshubId])) + { + // 3. If this is not a valid PCE on given HSHUB, assign PCE from alternative HSHUB + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + if (pKernelNvlink != NULL) + { + NV2080_CTRL_INTERNAL_HSHUB_NEXT_HSHUB_ID_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.hshubId = *pHshubId; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_INTERNAL_HSHUB_NEXT_HSHUB_ID, + (void *)¶ms, sizeof(params)); + + NV_ASSERT_OK(status); + if (status == NV_OK) + { + *pHshubId = params.hshubId; + } + } + + *pceIndex = CE_GET_LOWEST_AVAILABLE_IDX(pceAvailableMaskPerHshub[*pHshubId]); + if(!(NVBIT32(*pceIndex) & pceAvailableMaskPerHshub[*pHshubId])) + { + // If we've reached this point, then we have no more available PCEs to assign + NV_PRINTF(LEVEL_ERROR, "No more available PCEs to assign!\n"); + NV_ASSERT(0); + } + } + } + return; +} + +/** + * @brief Use the algorithm to determine all the mappings for + * the given GPU. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @param[out] pLocalPceLceMap Pointer to PCE-LCE array + * @param[out] pLocalGrceMap Pointer to GRCE array + * @param[out] pLocalExposeCeMask Pointer to LCE Mask + * + * Returns NV_TRUE if algorithm ran to completion with no erros + */ + +NV_STATUS +kceGetMappings_GA100 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NVLINK_TOPOLOGY_PARAMS *pTopoParams, + NvU32 *pLocalPceLceMap, + NvU32 *pLocalGrceMap, + NvU32 *pExposeCeMask +) +{ + NvU32 lceMask = 0; + NvU32 fbPceMask = 0; + NV_STATUS status = NV_OK; + NvU32 pceIndex, lceIndex, grceIdx; + + // Prepare the per-HSHUB/FBHUB available PCE mask + status = kceGetAvailableHubPceMask(pGpu, pTopoParams); + + // A. Start with assigning PCEs for "SYSMEM" + status = kceMapPceLceForSysmemLinks_HAL(pGpu, pKCe, + pTopoParams->pceAvailableMaskPerHshub, + pLocalPceLceMap, + pExposeCeMask, + pTopoParams->fbhubPceMask); + if (status == NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, + "No sysmem connections on this chip (PCIe or NVLink)!\n"); + } + + // B. Assign PCEs to "PEER"s + status = kceMapPceLceForNvlinkPeers_HAL(pGpu, pKCe, + pTopoParams->pceAvailableMaskPerHshub, + pLocalPceLceMap, + pExposeCeMask); + if (status == NV_WARN_NOTHING_TO_DO) + { + // If there's no NVLink peers available, still expose an additional async LCE + status = kceMapAsyncLceDefault_HAL(pGpu, pKCe, + pTopoParams->pceAvailableMaskPerHshub, + pLocalPceLceMap, + pExposeCeMask, + NV_CE_NUM_DEFAULT_PCES); + } + + // C. Lastly, do the assignment for "GRCE"s + lceMask = kceGetGrceSupportedLceMask_HAL(pGpu, pKCe); + + // Get the FBHUB PCE mask + fbPceMask = pTopoParams->fbhubPceMask; + + // Store lceMask in the exposeCeMask before moving on + *pExposeCeMask |= lceMask; + + for (grceIdx = 0; grceIdx < NV_CE_MAX_GRCE; grceIdx++) + { + // + // Check if we are sharing GRCEs + // On Ampere, GRCEs can only use FBHUB PCEs + // So, we need to check if the FBHUB PCEs have already been assigned. + // + pceIndex = CE_GET_LOWEST_AVAILABLE_IDX(fbPceMask); + fbPceMask &= (~(NVBIT32(pceIndex))); + + if ((NVBIT32(pLocalPceLceMap[pceIndex])) & *pExposeCeMask) + { + // GRCE is shared - set the status and shared LCE # in register field + lceIndex = pLocalPceLceMap[pceIndex]; + pLocalGrceMap[grceIdx] = DRF_NUM(_CE, _GRCE_CONFIG, _SHARED, 1) | + DRF_NUM(_CE, _GRCE_CONFIG, _SHARED_LCE, lceIndex); + + if ((kceIsGen4orHigherSupported_HAL(pGpu, pKCe)) || (pKCe->bUseGen4Mapping == NV_TRUE)) + { + kceApplyGen4orHigherMapping_HAL(pGpu, pKCe, + &pLocalPceLceMap[0], + &pTopoParams->pceAvailableMaskPerHshub[0], + lceIndex, + pceIndex); + } + } + else + { + // GRCE got its own FBHUB PCE + // Store the LCE in associated PCE for GRCE + lceIndex = CE_GET_LOWEST_AVAILABLE_IDX(lceMask); + pLocalPceLceMap[pceIndex] = lceIndex; + lceMask &= (~(NVBIT32(lceIndex))); + // Reflect non-sharing status in register field + pLocalGrceMap[grceIdx] = DRF_NUM(_CE, _GRCE_CONFIG, _SHARED, 0) | + DRF_DEF(_CE, _GRCE_CONFIG, _SHARED_LCE, _NONE); + } + } + + return NV_OK; +} + +/** + * @brief This function assigns PCE-LCE mappings for sysmem + * for the following two cases - + * 1. PCIe links - assign FBHUB PCEs + * 2. NVLinks - assign HSHUB PCEs + * If sysLinkMask is 0, then we assume that sysmem is over PCIe. + * Else, follow step 2 as above. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @param[in] pceAvailableMaskPerHshub Pointer to CEs available per HSHUB + * @param[out] pLocalPceLceMap Pointer to PCE-LCE array + * @param[out] pLocalExposeCeMask Pointer to LCE Mask + * + * Returns NV_OK if successful in assigning PCEs and LCEs for sysmem links + */ +NV_STATUS +kceMapPceLceForSysmemLinks_GA100 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NvU32 *pceAvailableMaskPerHshub, + NvU32 *pLocalPceLceMap, + NvU32 *pLocalExposeCeMask, + NvU32 fbPceMask +) +{ + NvU32 lceMask = 0; + NvU32 pceMask = 0; + NvU32 numTotalPces = 0; + NvBool bFirstIter = NV_FALSE; + NvU32 numPcePerLink, tempFbPceMask; + NvU32 lceIndex, pceIndex; + NvU32 linkId, i; + NvU8 hshubId; + NV_STATUS status; + + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS paramsNvlinkMask; + NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS paramsHshubId; + + NV_ASSERT_OR_RETURN(pKernelNvlink != NULL, NV_ERR_NOT_SUPPORTED); + + portMemSet(¶msNvlinkMask, 0, sizeof(paramsNvlinkMask)); + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK, + (void *)¶msNvlinkMask, sizeof(paramsNvlinkMask)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to determine PCEs and LCEs for sysmem links\n"); + return status; + } + + lceMask = kceGetSysmemSupportedLceMask_HAL(pGpu, pKCe); + + // + // Assign FBHUB PCEs when sysmem is over PCIE because PCIE + // accesses are not supported over HSHUB PCEs + // + if (paramsNvlinkMask.sysmemLinkMask == 0) + { + // Store lceMask in the exposeCeMask before moving on + *pLocalExposeCeMask |= lceMask; + + tempFbPceMask = fbPceMask; + while(tempFbPceMask) + { + lceIndex = CE_GET_LOWEST_AVAILABLE_IDX(lceMask); + pceIndex = CE_GET_LOWEST_AVAILABLE_IDX(tempFbPceMask); + pLocalPceLceMap[pceIndex] = lceIndex; + // Clear the lowest set bits to get to the next index + tempFbPceMask &= (tempFbPceMask - 1); + lceMask &= (lceMask - 1); + } + + return NV_OK; + } + + // If sysmem is over NVlink, assign HSHUB PCEs + numPcePerLink = NV_CE_MIN_PCE_PER_SYS_LINK; + + portMemSet(¶msHshubId, 0, sizeof(paramsHshubId)); + paramsHshubId.linkMask = paramsNvlinkMask.sysmemLinkMask; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS, + (void *)¶msHshubId, sizeof(paramsHshubId)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to determine Hshub Id for sysmem links"); + return status; + } + + FOR_EACH_INDEX_IN_MASK(32, linkId, paramsNvlinkMask.sysmemLinkMask) + { + hshubId = paramsHshubId.hshubIds[linkId]; + pceIndex = CE_GET_LOWEST_AVAILABLE_IDX(pceAvailableMaskPerHshub[hshubId]); + bFirstIter = NV_TRUE; + for (i = 0; i < numPcePerLink; i++) + { + _ceGetAlgorithmPceIndex(pGpu, pKCe, pceAvailableMaskPerHshub, &pceIndex, &bFirstIter, &hshubId); + pceMask |= NVBIT32(pceIndex); + numTotalPces++; + // Clear out the assigned PCE + pceAvailableMaskPerHshub[hshubId] &= (~(NVBIT32(pceIndex))); + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // + // Now, enter the PCE-LCE assignment - alternatively assign PCEs + // to each of the 2 LCEs for sysmem + // + for (i = 0; i < (numTotalPces/NV_CE_MIN_PCE_PER_SYS_LINK); i++) + { + NvU32 tempLceMask = lceMask; + while(tempLceMask) + { + pceIndex = CE_GET_LOWEST_AVAILABLE_IDX(pceMask); + lceIndex = CE_GET_LOWEST_AVAILABLE_IDX(tempLceMask); + + pLocalPceLceMap[pceIndex] = lceIndex; + + pceMask &= (pceMask - 1); + tempLceMask &= (tempLceMask - 1); + } + + // Store lceMask in the exposeCeMask before moving on + *pLocalExposeCeMask |= lceMask; + } + + return NV_OK; +} + +/** + * @brief Returns mask of LCEs that can be assigned to sysmem connection + * where the index of corresponding set bit indicates the LCE index + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * + * Returns the mask of LCEs valid for SYSMEM connections + */ +NvU32 +kceGetSysmemSupportedLceMask_GA100 +( + OBJGPU *pGpu, + KernelCE *pKCe +) +{ + return (NV_CE_SYS_ALLOWED_LCE_MASK & NV_CE_MAX_LCE_MASK); +} + +/** + * @brief This function assigns PCE-LCE mappings for NVLink peers + * Based on HSHUBs that the links associated with a peer connect to, + * algorithm will attempt to assign a PCE from associated HSHUB taking into + * account striding as well. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @param[in] pceAvailableMaskPerHshub Pointer to CEs available per HSHUB + * @param[out] pLocalPceLceMap Pointer to PCE-LCE array + * @param[out] pLocalExposeCeMask Pointer to LCE Mask + * + * Returns NV_OK if successful in assigning PCEs and LCEs for each of the NVLink peers + */ +NV_STATUS +kceMapPceLceForNvlinkPeers_GA100 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NvU32 *pceAvailableMaskPerHshub, + NvU32 *pLocalPceLceMap, + NvU32 *pLocalExposeCeMask +) +{ + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = NV_OK; + NvU32 lceMask = 0; + NvU32 pceMask = 0; + NvU32 peerLinkMask = 0; + NvBool bFirstIter = NV_FALSE; + NvBool bPeerAssigned = NV_FALSE; + NvU32 peerAvailableLceMask = NV_CE_LCE_MASK_INIT; + OBJGPU *pRemoteGpu; + NvU32 numPcePerLink; + NvU32 lceIndex, pceIndex; + NvU32 linkId, gpuMask, gpuInstance = 0, i; + NvU8 hshubId, prevHshubId; + + NV2080_CTRL_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS_PARAMS params; + + if (pKernelNvlink == NULL) + { + return NV_WARN_NOTHING_TO_DO; + } + + peerAvailableLceMask = kceGetNvlinkPeerSupportedLceMask_HAL(pGpu, pKCe, peerAvailableLceMask); + + if (knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink)) + { + // + // On NVSwitch systems, we only create 1 aperture for all p2p connections. + // For PCE2LCE mapping, we should only assign 1 LCE for this connection. + // + // Since we mark the loopback connections in peerLinkMasks with the appropriate + // links (see _nvlinkUpdateSwitchLinkMasks), we can use that to calculate + // the PCE2LCE config. + // + gpuMask = NVBIT32(pGpu->gpuInstance); + } + else + { + // On direct connected systems, we'll loop over each GPU in the system + // and assign a peer LCE for each connection + (void)gpumgrGetGpuAttachInfo(NULL, &gpuMask); + } + while ((pRemoteGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + NvU32 numLinksToPeer = knvlinkGetNumLinksToPeer(pGpu, pKernelNvlink, + pRemoteGpu); + if (numLinksToPeer == 0) + { + continue; + } + + pceMask = 0; + lceMask = 0; + + if (peerAvailableLceMask == 0) + { + // + // peerAvailableLceMask is initialized to even async LCEs at the + // top of the function. + // As a result, if at any point in the loop, this mask == 0, + // it implies we have used up all even async LCEs and should move to + // using odd async LCEs. + // + peerAvailableLceMask = kceGetNvlinkPeerSupportedLceMask_HAL(pGpu, pKCe, peerAvailableLceMask); + } + // Each peer gets 1 LCE + lceIndex = CE_GET_LOWEST_AVAILABLE_IDX(peerAvailableLceMask); + lceMask |= NVBIT32(lceIndex); + + // Clear out the chosen LCE + peerAvailableLceMask &= (~(NVBIT32(lceIndex))); + + peerLinkMask = knvlinkGetLinkMaskToPeer(pGpu, pKernelNvlink, pRemoteGpu); + numPcePerLink = NV_CE_MIN_PCE_PER_PEER_LINK; + prevHshubId = 0xFF; + + portMemSet(¶ms, 0, sizeof(params)); + params.linkMask = peerLinkMask; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_HSHUB_ID_FOR_LINKS, + (void *)¶ms, sizeof(params)); + NV_ASSERT_OK_OR_RETURN(status); + + FOR_EACH_INDEX_IN_MASK(32, linkId, peerLinkMask) + { + hshubId = params.hshubIds[linkId]; + if (hshubId != prevHshubId) + { + pceIndex = CE_GET_LOWEST_AVAILABLE_IDX(pceAvailableMaskPerHshub[hshubId]); + bFirstIter = NV_TRUE; + } + for (i = 0; i < numPcePerLink; i++) + { + _ceGetAlgorithmPceIndex(pGpu, pKCe, pceAvailableMaskPerHshub, &pceIndex, &bFirstIter, &hshubId); + pceMask |= NVBIT32(pceIndex); + // Clear out the assigned PCE + pceAvailableMaskPerHshub[hshubId] &= (~(NVBIT32(pceIndex))); + prevHshubId = hshubId; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // Now, assign the PCE-LCE association for the current peer + if (pceMask != 0) + { + // We just need atleast one peer to set this to TRUE + bPeerAssigned = NV_TRUE; + + FOR_EACH_INDEX_IN_MASK(32, pceIndex, pceMask) + { + pLocalPceLceMap[pceIndex] = lceIndex; + NV_PRINTF(LEVEL_INFO, "GPU%d <-> GPU%d PCE Index: %d LCE Index: %d\n", + pGpu->gpuInstance, pRemoteGpu->gpuInstance, pceIndex, lceIndex); + } + FOR_EACH_INDEX_IN_MASK_END; + + // Store lceMask in the exposeCeMask before moving on + *pLocalExposeCeMask |= lceMask; + } + + // + // Bug 200659256 - Looping over GPUs rather than peers (CL 28776130) + // does not handle multi-GPUs/Peer as is the case on switch systems. + // We must only take this loop once on switch systems to account for this. + // If we need to support multiple peer connections with switch systems + // in the future, this code must be revisited + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + break; + } + } + + if (bPeerAssigned == NV_FALSE) + { + status = NV_WARN_NOTHING_TO_DO; + } + + return status; +} + +/** + * @brief This function assigns 2 PCEs to an additional LCE over the GRCEs and async LCEs + * for sysmem, since some clients rely on LCE 4 also being turned on when there + * are no NVLink peers + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @param[in] pceAvailableMaskPerHshub Pointer to CEs available per HSHUB + * @param[out] pLocalPceLceMap Pointer to PCE-LCE array + * @param[out] pLocalExposeCeMask Pointer to LCE Mask + * + * Returns NV_OK if successful in assigning PCEs to a default async LCE (>= 4) + */ +NV_STATUS +kceMapAsyncLceDefault_GA100 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NvU32 *pceAvailableMaskPerHshub, + NvU32 *pLocalPceLceMap, + NvU32 *pLocalExposeCeMask, + NvU32 numDefaultPces +) +{ + NvU32 peerAvailableLceMask = NV_CE_LCE_MASK_INIT; + NvU32 lceMask = 0; + NvU32 pceMask = 0; + NvU32 lceIndex, pceIndex, hshubId, i; + + peerAvailableLceMask = kceGetNvlinkPeerSupportedLceMask_HAL(pGpu, pKCe, peerAvailableLceMask); + + // Pick from the 1st HSHUB - HSHUB 0 will not be floorswept + hshubId = 0; + + // + // If no peers were found, then no async LCEs (>= 4) will be turned on. + // However, some clients rely on LCE 4 being present even without any + // NVLink peers being found. So, turn on the 1st available async LCE (>= 4) + // Reference bug 3042556 + // + lceIndex = CE_GET_LOWEST_AVAILABLE_IDX(peerAvailableLceMask); + lceMask |= NVBIT32(lceIndex); + // Clear out the chosen LCE + peerAvailableLceMask &= (~(NVBIT32(lceIndex))); + + // Assign PCEs to this LCE based on input request + for (i = 0; i < numDefaultPces; i++) + { + pceIndex = CE_GET_LOWEST_AVAILABLE_IDX(pceAvailableMaskPerHshub[hshubId]); + pceMask |= NVBIT32(pceIndex); + pceAvailableMaskPerHshub[hshubId] &= (~(NVBIT32(pceIndex))); + } + + FOR_EACH_INDEX_IN_MASK(32, pceIndex, pceMask) + { + pLocalPceLceMap[pceIndex] = lceIndex; + NV_PRINTF(LEVEL_INFO, "GPU%d <-> GPU%d PCE Index: %d LCE Index: %d\n", + pGpu->gpuInstance, pGpu->gpuInstance, pceIndex, lceIndex); + } + FOR_EACH_INDEX_IN_MASK_END; + + // Store lceMask in the exposeCeMask before moving on + *pLocalExposeCeMask |= lceMask; + + return NV_OK; +} + +/** + * @brief Returns mask of LCEs that can be assigned to NVLink peers + * where the index of corresponding set bit indicates the LCE index + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * + * Returns the mask of LCEs valid for NVLink peers + */ +NvU32 +kceGetNvlinkPeerSupportedLceMask_GA100 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NvU32 peerAvailableLceMask +) +{ + // + // Start with assigning even async LCEs first as they are sized to accommodate + // more no. of PCEs versus odd async LCEs + // Hence, if caller is using this call to get 1st set of async LCEs for NVLink + // peers, then caller should initialize peerAvailableLceMask to NV_CE_LCE_MASK_INIT. + // Else we will run out of async LCEs since we will directly assign the odd async LCEs + // and there's no wraparound or detection mechanism is place. + // + if (peerAvailableLceMask == NV_CE_LCE_MASK_INIT) + { + return (NV_CE_EVEN_ASYNC_LCE_MASK & NV_CE_MAX_LCE_MASK); + } + + return NV_CE_ODD_ASYNC_LCE_MASK & NV_CE_MAX_LCE_MASK; +} + +/** + * @brief Returns mask of LCEs that can be assigned for GRCEs + * where the index of corresponding set bit indicates the LCE index + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * + * Returns the mask of LCEs valid for GRCEs + */ +NvU32 +kceGetGrceSupportedLceMask_GA100 +( + OBJGPU *pGpu, + KernelCE *pKCe +) +{ + return (NV_CE_GRCE_ALLOWED_LCE_MASK & NV_CE_MAX_LCE_MASK); +} + +/** + * @brief This function checks for root port gen speed or GPU + * gen speed to determine if we should apply gen4+ mapping + * or gen3- mapping + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + */ +NvBool +kceIsGen4orHigherSupported_GA100 +( + OBJGPU *pGpu, + KernelCE *pKCe +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + OBJCL *pCl = SYS_GET_CL(pSys); + NvU8 genSpeed = 0; + NV_STATUS status = NV_OK; + NvBool bIsGen4orHigher = NV_FALSE; + NvU32 regVal; + + if (IS_PASSTHRU(pGpu) && (pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_PCIE_GEN4_CAPABLE))) + { + // + // On passthrough the root port is commonly not accessible or fake. To + // handle this case, we support the hypervisor explicitly communicating + // the speed to us through emulated config space. See + // bug 2927491 for more details. + // + NvU32 passthroughEmulatedConfig = osPciReadDword(osPciInitHandle(gpuGetDomain(pGpu), + gpuGetBus(pGpu), + gpuGetDevice(pGpu), + 0, NULL, NULL), + NV_XVE_PASSTHROUGH_EMULATED_CONFIG); + NvU32 rootPortSpeed = DRF_VAL(_XVE, _PASSTHROUGH_EMULATED_CONFIG, _ROOT_PORT_SPEED, passthroughEmulatedConfig); + + // 0 means the config is not being emulated and we assume gen4 + bIsGen4orHigher = (rootPortSpeed == 0 || rootPortSpeed >= 4); + + if (rootPortSpeed != 0) + { + NV_PRINTF(LEVEL_INFO, "Root port speed from emulated config space = %d\n", rootPortSpeed); + } + } + else + { + status = clPcieGetRootGenSpeed(pGpu, pCl, &genSpeed); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not get root gen speed - check for GPU gen speed!\n"); + // Check for GPU gen speed + regVal = GPU_REG_RD32(pGpu, DEVICE_BASE(NV_PCFG) + NV_XVE_LINK_CONTROL_STATUS); + genSpeed = DRF_VAL(_XVE, _LINK_CONTROL_STATUS, _LINK_SPEED, regVal); + } + NV_PRINTF(LEVEL_INFO, "Gen Speed = %d\n", genSpeed); + + if ((genSpeed >= 0x4)) + { + bIsGen4orHigher = NV_TRUE; + } + } + return bIsGen4orHigher; +} + +/** + * @brief This function applies the gen4+ mapping i.e. switches the LCE passed in + * to use an HSHUB PCE (if available) instead of an FBHUB PCE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @param[in] pLocalPceLceMap Pointer to PCE-LCE array + * @param[in] pceAvailableMaskPerHshub Pointer to CEs available per HSHUB + * @param[in] lceIndex LCE index for which mapping is being determined + * @param[in] currPceIndex PCE index for which mapping is being determined + */ +void +kceApplyGen4orHigherMapping_GA100 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NvU32 *pLocalPceLceMap, + NvU32 *pceAvailableMaskPerHshub, + NvU32 lceIndex, + NvU32 currPceIndex +) +{ + NvBool hsPceAssigned = NV_FALSE; + NvU32 fbPceIndex, hshubId; + NV_STATUS status; + + if (NVBIT32(lceIndex) & NV_CE_SYS_LCE_ALLOWED_HSPCE_CONFIG) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_HSHUB_GET_NUM_UNITS_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + + NV_ASSERT_OK_OR_ELSE(status, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_HSHUB_GET_NUM_UNITS, + ¶ms, + sizeof(params)), + params.numHshubs = 0); + + // GA100: If LCE 3, then move this to an HSHUB PCE, if available + fbPceIndex = currPceIndex; + for (hshubId = 0; hshubId < params.numHshubs; hshubId++) + { + if (pceAvailableMaskPerHshub[hshubId] != 0) + { + // We still have HS PCEs available + hsPceAssigned = NV_TRUE; + currPceIndex = CE_GET_LOWEST_AVAILABLE_IDX(pceAvailableMaskPerHshub[hshubId]); + pLocalPceLceMap[currPceIndex] = lceIndex; + // Clear out the assigned PCE + pceAvailableMaskPerHshub[hshubId] &= (~(NVBIT32(currPceIndex))); + break; + } + } + } + if (hsPceAssigned == NV_TRUE) + { + pLocalPceLceMap[fbPceIndex] = NV_CE_PCE2LCE_CONFIG_PCE_ASSIGNED_LCE_NONE; + } +} diff --git a/src/nvidia/src/kernel/gpu/ce/arch/ampere/kernel_ce_ga102.c b/src/nvidia/src/kernel/gpu/ce/arch/ampere/kernel_ce_ga102.c new file mode 100644 index 000000000..b5c92bdfe --- /dev/null +++ b/src/nvidia/src/kernel/gpu/ce/arch/ampere/kernel_ce_ga102.c @@ -0,0 +1,196 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "ctrl/ctrl2080/ctrl2080nvlink.h" +#include "gpu/ce/kernel_ce.h" +#include "gpu/nvlink/kernel_nvlink.h" +#include "gpu/ce/kernel_ce_private.h" +#include "gpu/gpu.h" + +#include "published/ampere/ga102/dev_ce.h" + +#define NV_CE_SYS_ALLOWED_LCE_MASK 0x0C +#define NV_CE_GRCE_ALLOWED_LCE_MASK 0x03 +#define NV_CE_EVEN_ASYNC_LCE_MASK 0x00000010 +#define NV_CE_ODD_ASYNC_LCE_MASK 0x00000000 +#define NV_CE_MAX_LCE_MASK 0x1F +#define NV_CE_MAX_GRCE 2 + +/*! + * @brief Returns the size of the PCE2LCE register array + * + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * + * @return NV_CE_PCE2LCE_CONFIG__SIZE_1 + * + */ +NvU32 +kceGetPce2lceConfigSize1_GA102 +( + KernelCE *pKCe +) +{ + return NV_CE_PCE2LCE_CONFIG__SIZE_1; +} + +/** + * @brief This function assigns PCE-LCE mappings for sysmem + * for the following two cases - + * 1. PCIe links - assign FBHUB PCEs + * 2. NVLinks - not POR for GA10X chips - not supported + * If sysLinkMask is 0, then we assume that sysmem is over PCIe. + * Else, follow step 2 as above. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @param[in] pceAvailableMaskPerHshub Pointer to CEs available per HSHUB + * @param[out] pLocalPceLceMap Pointer to PCE-LCE array + * @param[out] pLocalExposeCeMask Pointer to LCE Mask + * + * Returns NV_OK if successful in determining PCEs and LCEs for sysmem links + */ +NV_STATUS +kceMapPceLceForSysmemLinks_GA102 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NvU32 *pceAvailableMaskPerHshub, + NvU32 *pLocalPceLceMap, + NvU32 *pLocalExposeCeMask, + NvU32 fbPceMask +) +{ + NvU32 lceMask = 0; + NvU32 tempFbPceMask; + NvU32 lceIndex, pceIndex; + NV_STATUS status; + + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS params; + + NV_ASSERT_OR_RETURN(pKernelNvlink != NULL, NV_ERR_NOT_SUPPORTED); + portMemSet(¶ms, 0, sizeof(params)); + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to determine PCEs and LCEs for sysmem links\n"); + return status; + } + + lceMask = kceGetSysmemSupportedLceMask_HAL(pGpu, pKCe); + + // + // Assign FBHUB PCEs when sysmem is over PCIE because PCIE + // accesses are not supported over HSHUB PCEs + // + if (params.sysmemLinkMask == 0) + { + // Store lceMask in the exposeCeMask before moving on + *pLocalExposeCeMask |= lceMask; + + tempFbPceMask = fbPceMask; + while(tempFbPceMask) + { + lceIndex = CE_GET_LOWEST_AVAILABLE_IDX(lceMask); + pceIndex = CE_GET_LOWEST_AVAILABLE_IDX(tempFbPceMask); + pLocalPceLceMap[pceIndex] = lceIndex; + // Clear the lowest set bits to get to the next index + tempFbPceMask &= (tempFbPceMask - 1); + lceMask &= (lceMask - 1); + } + } + else + { + // Print error message, do not assign PCEs and simply return + NV_PRINTF(LEVEL_ERROR, + "Sysmem over NVLink is not POR!\n"); + } + + return NV_OK; +} + +/** + * @brief Returns mask of LCEs that can be assigned to sysmem connection + * where the index of corresponding set bit indicates the LCE index + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * + * Returns the mask of LCEs valid for SYSMEM connections + */ +NvU32 +kceGetSysmemSupportedLceMask_GA102 +( + OBJGPU *pGpu, + KernelCE *pKCe +) +{ + return (NV_CE_SYS_ALLOWED_LCE_MASK & NV_CE_MAX_LCE_MASK); +} + +/** + * @brief Returns mask of LCEs that can be assigned to NVLink peers + * where the index of corresponding set bit indicates the LCE index + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * + * Returns the mask of LCEs valid for NVLink peers + */ +NvU32 +kceGetNvlinkPeerSupportedLceMask_GA102 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NvU32 peerAvailableLceMask +) +{ + // There is no odd async LCE on GA10X - only LCE 4 + return (NV_CE_EVEN_ASYNC_LCE_MASK & NV_CE_MAX_LCE_MASK); +} + +/** + * @brief Returns mask of LCEs that can be assigned for GRCEs + * where the index of corresponding set bit indicates the LCE index + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * + * Returns the mask of LCEs valid for GRCEs + */ +NvU32 +kceGetGrceSupportedLceMask_GA102 +( + OBJGPU *pGpu, + KernelCE *pKCe +) +{ + return (NV_CE_GRCE_ALLOWED_LCE_MASK & NV_CE_MAX_LCE_MASK); +} diff --git a/src/nvidia/src/kernel/gpu/ce/arch/pascal/kernel_ce_gp100.c b/src/nvidia/src/kernel/gpu/ce/arch/pascal/kernel_ce_gp100.c new file mode 100644 index 000000000..815320d14 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/ce/arch/pascal/kernel_ce_gp100.c @@ -0,0 +1,232 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "ctrl/ctrl2080/ctrl2080nvlink.h" +#include "gpu/ce/kernel_ce.h" +#include "gpu/ce/kernel_ce_private.h" +#include "gpu/gpu.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" + +NV_STATUS kceStateLoad_GP100(OBJGPU *pGpu, KernelCE *pKCe, NvU32 flags) +{ + if (!IS_VIRTUAL(pGpu) && !pGpu->bIsKCeMapInitialized) + { + NV_ASSERT_OK_OR_RETURN(kceTopLevelPceLceMappingsUpdate(pGpu, pKCe)); + pGpu->bIsKCeMapInitialized = NV_TRUE; + } + + return NV_OK; +} + +NV_STATUS kceStateUnload_GP100(OBJGPU *pGpu, KernelCE *pKCe, NvU32 flags) +{ + // Apply mappings again at resume, bug 3456067 + pGpu->bIsKCeMapInitialized = NV_FALSE; + + // On vgpu, sync with the mappings of PF at resume + pGpu->bIsCeMapInitialized = NV_FALSE; + + return NV_OK; +} + +/*! + * Determine if CE should be used for sysmem read + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @returns NV_TRUE if true + */ +NvBool +kceIsCeSysmemRead_GP100 +( + OBJGPU *pGpu, + KernelCE *pKCe +) +{ + NvU32 sysmemReadCE; + NvU32 sysmemWriteCE; + NvU32 nvlinkP2PCeMask; + NvU32 gpuMask = NVBIT(pGpu->gpuInstance); + + // Initialize to maximum CEs available + sysmemReadCE = gpuGetNumCEs(pGpu); + + NV_ASSERT_OK(kceGetCeFromNvlinkConfig(pGpu, pKCe, + gpuMask, + &sysmemReadCE, + &sysmemWriteCE, + &nvlinkP2PCeMask)); + + return (sysmemReadCE == pKCe->publicID); +} + +/*! + * Determine if CE should be used for sysmem write + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @returns NV_TRUE if true + */ +NvBool +kceIsCeSysmemWrite_GP100 +( + OBJGPU *pGpu, + KernelCE *pKCe +) +{ + NvU32 sysmemReadCE; + NvU32 sysmemWriteCE; + NvU32 nvlinkP2PCeMask; + NvU32 gpuMask = NVBIT(pGpu->gpuInstance); + + // Initialize to maximum CEs available + sysmemWriteCE = gpuGetNumCEs(pGpu); + + kceGetCeFromNvlinkConfig(pGpu, pKCe, + gpuMask, + &sysmemReadCE, + &sysmemWriteCE, + &nvlinkP2PCeMask); + + return (sysmemWriteCE == pKCe->publicID); +} + +/*! + * Determine if CE should be used for NVLink P2P + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @returns NV_TRUE if true + */ +NvBool +kceIsCeNvlinkP2P_GP100 +( + OBJGPU *pGpu, + KernelCE *pKCe +) +{ + NvU32 sysmemReadCE; + NvU32 sysmemWriteCE; + NvU32 nvlinkP2PCeMask = 0; + NvU32 gpuMask = NVBIT(pGpu->gpuInstance); + + kceGetCeFromNvlinkConfig(pGpu, pKCe, + gpuMask, + &sysmemReadCE, + &sysmemWriteCE, + &nvlinkP2PCeMask); + + return (NVBIT(pKCe->publicID) & nvlinkP2PCeMask ? NV_TRUE : NV_FALSE); +} + +/** + * @brief Get the Max Nvlink Topology + * + * Compares the current topology to the cached topology and + * returns the auto config table index to the max nvlink config seen by this GPU + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pCe OBJCE pointer + * @param[in] pCurrentTopo NVLINK_TOPOLOGY_INFO pointer + * @param[in] pAutoConfigTable NVLINK_CE_AUTO_CONFIG_TABLE pointer + * @param[in] autoConfigNumEntries NvU32 num entries within pAutoConfigTable + * @param[out] pLargestTopoIdx NvU32 pointer + * + * Returns NV_TRUE if entry exists for either current topo or cached topo + */ +NvBool +kceGetNvlinkMaxTopoForTable_GP100 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NVLINK_TOPOLOGY_PARAMS *pCurrentTopo, + void *pAutoConfigTable, + NvU32 autoConfigNumEntries, + NvU32 *pLargestTopoIdx +) +{ + NvU32 cachedTopoIdx = 0; + NvU32 currentTopoIdx = 0; + NvBool bCachedIdxExists, bCurrentIdxExists; + NvU32 currentExposeCeMask, cachedExposeCeMask; + NVLINK_TOPOLOGY_PARAMS cachedTopo; + + // + // If exposeCeMask from current config is a subset of the cached topology, + // then use the cached topology data. + // We do this to ensure that we don't revoke CEs that we have exposed prevously. + // + gpumgrGetSystemNvlinkTopo(gpuGetDBDF(pGpu), &cachedTopo); + + bCachedIdxExists = kceGetAutoConfigTableEntry_HAL(pGpu, pKCe, &cachedTopo, + pAutoConfigTable, autoConfigNumEntries, &cachedTopoIdx, + &cachedExposeCeMask); + + bCurrentIdxExists = kceGetAutoConfigTableEntry_HAL(pGpu, pKCe, pCurrentTopo, + pAutoConfigTable, autoConfigNumEntries, ¤tTopoIdx, + ¤tExposeCeMask); + + if (bCachedIdxExists && bCurrentIdxExists) + { + // Both topologies are in the table + if (cachedExposeCeMask & ~currentExposeCeMask) + { + // Current topo's exposeCeMask is a subset of cached topo exposeCeMask + *pLargestTopoIdx = cachedTopoIdx; + } + else + { + // Current topo's exposeCeMask is equal or superset of cached topo exposeCeMask + *pLargestTopoIdx = currentTopoIdx; + + if (cachedExposeCeMask != currentExposeCeMask) + { + // + // Current topo's exposeCeMask is superset of cached topo exposeCeMask + // + // This means the topology has increased. We must clear previous + // optimal CE recommendations to ensure we can recommend the + // correct optimal CE recommendations going forward. + // See Bug 2051735 for details. + // + + kceClearAssignedNvlinkPeerMasks_HAL(pGpu, pKCe); + } + } + } + else if (bCachedIdxExists) + { + // only cached topo is in table + *pLargestTopoIdx = cachedTopoIdx; + } + else if (bCurrentIdxExists) + { + // only current topo is in table + *pLargestTopoIdx = currentTopoIdx; + } + else + { + // Neither are in table + return NV_FALSE; + } + + return NV_TRUE; +} diff --git a/src/nvidia/src/kernel/gpu/ce/arch/turing/kernel_ce_tu102.c b/src/nvidia/src/kernel/gpu/ce/arch/turing/kernel_ce_tu102.c new file mode 100644 index 000000000..765d1e481 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/ce/arch/turing/kernel_ce_tu102.c @@ -0,0 +1,286 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "ctrl/ctrl2080/ctrl2080nvlink.h" +#include "gpu/ce/kernel_ce.h" +#include "gpu/nvlink/kernel_nvlink.h" +#include "gpu/ce/kernel_ce_private.h" +#include "gpu/gpu.h" + +#include "published/turing/tu102/dev_ce.h" + +#define NVLINK_CE_AUTO_CONFIG_TABLE_DEFAULT_ENTRY 0 + +#define MAX_CE_CNT 15 + +/* + * sysmemLinks + * Represents the number of sysmem links detected + * This affects how many PCEs LCE0(sysmem read CE) + * and LCE1(sysmem write CE) should be mapped to + * maxLinksPerPeer + * Represents the maximum number of peer links + * between this GPU and all its peers. This affects + * how many PCEs LCE3(P2P CE) should be mapped to + * numPeers + * Represents the number of Peer GPUs discovered so far + * bSymmetric + * Represents whether the topology detected so far + * is symmetric i.e. has same number of links to all + * peers connected through nvlink. This affects how + * many PCEs to assign to LCEs3-5 (nvlink P2P CEs) + * bSwitchConfig + * Represents whether the config listed is intended + * for use with nvswitch systems + * pceLceMap + * Value of NV_CE_PCE2LCE_CONFIG0 register with the + * above values for sysmemLinks, maxLinksPerPeer, + * numLinks and bSymmetric + * grceConfig + * Value of NV_CE_GRCE_CONFIG register with the + * above values for sysmemLinks, maxLinksPerPeer, + * numLinks and bSymmetric + * exposeCeMask + * Mask of CEs to expose to clients for the above + * above values for sysmemLinks, maxLinksPerPeer, + * numLinks and bSymmetric + */ +typedef struct +{ + NvU32 sysmemLinks; + NvU32 maxLinksPerPeer; + NvU32 numPeers; + NvBool bSymmetric; + NvBool bSwitchConfig; + NvU32 pceLceMap[MAX_CE_CNT]; + NvU32 grceConfig[MAX_CE_CNT]; + NvU32 exposeCeMask; +} NVLINK_CE_AUTO_CONFIG_TABLE; + +/* + * Table for setting the PCE2LCE mapping +*/ +static NVLINK_CE_AUTO_CONFIG_TABLE nvLinkCeAutoConfigTable_TU102[] = +{ + +// +// #systmem #max #peers Symmetric Switch PCE-LCE GRCE exposeCe +// links (links/peer) Config? Config Map Config Mask +// + +// Default minimal configuration - NOTE: do not add entrys before this + {0x0, 0x0, 0x0, NV_TRUE, NV_FALSE, {0xF,0x4,0x2,0x3,0xF,0xF,0xF,0xF,0xF}, {0x4,0x3}, 0x1F}, + + {0x0, 0x1, 0x1, NV_TRUE, NV_FALSE, {0xF,0x4,0x2,0x3,0xF,0xF,0xF,0xF,0xF}, {0x3,0x4}, 0x1F}, + {0x0, 0x2, 0x1, NV_TRUE, NV_FALSE, {0x3,0x4,0x4,0x2,0xF,0xF,0xF,0xF,0xF}, {0x3,0x2}, 0x1F}, +}; + +/** + * Return the pce-lce mappings and grce config + * reg values when nvlink topology is NOT forced + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe KernelCE pointer + * @param[out] pPceLceMap Stores the pce-lce mappings + * @param[out] pGrceConfig Stores the grce configuration + * @param[out] pExposeCeMask Mask of CEs to expose to clients + * + * @return NV_OK on success + */ +NV_STATUS +kceGetNvlinkAutoConfigCeValues_TU102 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NvU32 *pPceLceMap, + NvU32 *pGrceConfig, + NvU32 *pExposeCeMask +) +{ + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + OBJGPU *pRemoteGpu = NULL; + NV_STATUS status = NV_OK; + NvU32 gpuMask = 0; + NvU32 sysmemLinks = 0; + NvU32 numPeers = 0; + NvBool bSymmetric = NV_TRUE; + NvU32 maxLinksPerPeer = 0; + NvU32 gpuInstance = 0; + NvU32 topoIdx = 0; + NvU32 pce2lceConfigSize1 = kceGetPce2lceConfigSize1_HAL(pKCe); + NvU32 grceConfigSize1 = kceGetGrceConfigSize1_HAL(pKCe); + NvBool bEntryExists; + NvU32 pceIdx, grceIdx; + NVLINK_TOPOLOGY_PARAMS currentTopo = { 0 }; + + if ((pPceLceMap == NULL) || (pGrceConfig == NULL) || (pExposeCeMask == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pKernelNvlink == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + // Initialize pPceLceMap with no mappings + for (pceIdx = 0; pceIdx < pce2lceConfigSize1; pceIdx++) + { + pPceLceMap[pceIdx] = NV_CE_PCE2LCE_CONFIG_PCE_ASSIGNED_LCE_NONE; + } + + sysmemLinks = knvlinkGetNumLinksToSystem(pGpu, pKernelNvlink); + + if (gpuGetNumCEs(pGpu) == 0) + { + return NV_ERR_NOT_SUPPORTED; + } + + (void)gpumgrGetGpuAttachInfo(NULL, &gpuMask); + + // Get the max{nvlinks/peer, for all connected peers} + while ((pRemoteGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + NvU32 numLinksToPeer = knvlinkGetNumLinksToPeer(pGpu, pKernelNvlink, + pRemoteGpu); + if (numLinksToPeer == 0) + { + continue; + } + + numPeers++; + + // + // The topology remains symmetric if this is either the first GPU we've + // seen connected over NVLINK, or the number of links connected to this + // peer is the same as the maximum number of links connected to any peer + // seen so far. + // + bSymmetric = (bSymmetric && + ((maxLinksPerPeer == 0) || + (maxLinksPerPeer == numLinksToPeer))); + + if (numLinksToPeer > maxLinksPerPeer) + { + maxLinksPerPeer = numLinksToPeer; + } + } + + currentTopo.sysmemLinks = sysmemLinks; + currentTopo.maxLinksPerPeer = maxLinksPerPeer; + currentTopo.numPeers = numPeers; + currentTopo.bSymmetric = bSymmetric; + currentTopo.bSwitchConfig = knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink); + + // Use largest topology seen by this GPU + bEntryExists = kceGetNvlinkMaxTopoForTable_HAL(pGpu, pKCe, ¤tTopo, nvLinkCeAutoConfigTable_TU102, + NV_ARRAY_ELEMENTS(nvLinkCeAutoConfigTable_TU102), &topoIdx); + + if (!bEntryExists) + { + NV_PRINTF(LEVEL_INFO, + "GPU%d : NVLINK config not found in PCE2LCE table - using default entry\n", + gpuGetInstance(pGpu)); + topoIdx = NVLINK_CE_AUTO_CONFIG_TABLE_DEFAULT_ENTRY; + } + + // + // Store the state globally in gpumgr so that we can preserve the topology + // info across GPU loads. + // Preserving across GPU loads enables UVM to optimize perf + // + currentTopo.sysmemLinks = nvLinkCeAutoConfigTable_TU102[topoIdx].sysmemLinks; + currentTopo.maxLinksPerPeer = nvLinkCeAutoConfigTable_TU102[topoIdx].maxLinksPerPeer; + currentTopo.numPeers = nvLinkCeAutoConfigTable_TU102[topoIdx].numPeers; + currentTopo.bSymmetric = nvLinkCeAutoConfigTable_TU102[topoIdx].bSymmetric; + currentTopo.bSwitchConfig = nvLinkCeAutoConfigTable_TU102[topoIdx].bSwitchConfig; + + gpumgrUpdateSystemNvlinkTopo(gpuGetDBDF(pGpu), ¤tTopo); + + for (pceIdx = 0; pceIdx < pce2lceConfigSize1; pceIdx++) + { + pPceLceMap[pceIdx] = nvLinkCeAutoConfigTable_TU102[topoIdx].pceLceMap[pceIdx]; + } + + for (grceIdx = 0; grceIdx < grceConfigSize1; grceIdx++) + { + NvU32 grceSharedLce = nvLinkCeAutoConfigTable_TU102[topoIdx].grceConfig[grceIdx]; + + if (grceSharedLce != 0xF) + { + // GRCE is shared + pGrceConfig[grceIdx] = DRF_NUM(_CE, _GRCE_CONFIG, _SHARED, 1) | + DRF_NUM(_CE, _GRCE_CONFIG, _SHARED_LCE, grceSharedLce); + } + else + { + // GRCE got its own PCE + pGrceConfig[grceIdx] = DRF_NUM(_CE, _GRCE_CONFIG, _SHARED, 0); + } + } + *pExposeCeMask = nvLinkCeAutoConfigTable_TU102[topoIdx].exposeCeMask; + + NV_PRINTF(LEVEL_INFO, + "GPU%d : RM Configured Values for CE Config : pceLceMap = " + "0x%01x%01x%01x%01x%01x%01x%01x%01x%01x, grceConfig = 0x%01x%01x, " + "exposeCeMask = 0x%08x gpuMask = 0x%08x\n", + gpuGetInstance(pGpu), pPceLceMap[8], pPceLceMap[7], + pPceLceMap[6], pPceLceMap[5], pPceLceMap[4], pPceLceMap[3], + pPceLceMap[2], pPceLceMap[1], pPceLceMap[0], + nvLinkCeAutoConfigTable_TU102[topoIdx].grceConfig[1], + nvLinkCeAutoConfigTable_TU102[topoIdx].grceConfig[0], + *pExposeCeMask, gpuMask); + + return status; +} + +/*! + * @brief Returns the size of the GRCE_CONFIG register array + * + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pCe OBJCE pointer + * + * @return NV_CE_GRCE_CONFIG__SIZE_1 + * + */ +NvU32 kceGetGrceConfigSize1_TU102(KernelCE* kce) +{ + return NV_CE_GRCE_CONFIG__SIZE_1; +} + +/*! + * @brief Returns the size of the PCE2LCE register array + * + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pCe OBJCE pointer + * + * @return NV_CE_PCE2LCE_CONFIG__SIZE_1 + * + */ +NvU32 kceGetPce2lceConfigSize1_TU102(KernelCE* kce) +{ + return NV_CE_PCE2LCE_CONFIG__SIZE_1; +} diff --git a/src/nvidia/src/kernel/gpu/ce/arch/volta/kernel_ce_gv100.c b/src/nvidia/src/kernel/gpu/ce/arch/volta/kernel_ce_gv100.c new file mode 100644 index 000000000..66eef162f --- /dev/null +++ b/src/nvidia/src/kernel/gpu/ce/arch/volta/kernel_ce_gv100.c @@ -0,0 +1,264 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "ctrl/ctrl2080/ctrl2080nvlink.h" +#include "gpu/ce/kernel_ce.h" +#include "gpu/nvlink/kernel_nvlink.h" +#include "gpu/ce/kernel_ce_private.h" +#include "gpu/ce/kernel_ce_gv100_private.h" +#include "gpu/gpu.h" + +NV_STATUS kceGetP2PCes_GV100(KernelCE *pKCe, OBJGPU *pGpu, NvU32 gpuMask, NvU32 *nvlinkP2PCeMask) +{ + NvU32 gpuCount = gpumgrGetSubDeviceCount(gpuMask); + NvU32 maxPces = 0; + + *nvlinkP2PCeMask = 0; + + // If GPU count = 1, return all possible nvlink P2P CEs + if (gpuCount == 1) + { + *nvlinkP2PCeMask |= NVBIT(NVLINK_MIN_P2P_LCE); + for (NvU32 i = NVLINK_MIN_P2P_LCE; i < gpuGetNumCEs(pGpu); i++) + { + *nvlinkP2PCeMask |= NVBIT(i); + + } + } + else + { + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + KernelCE *pKCeMatch = NULL; + KernelCE *pKCeSubMatch = NULL; + KernelCE *pKCeMaxPces = NULL; + KernelCE *pTargetCe = NULL; + KernelCE *pKCeLoop = NULL; + NvU32 numLinks = 0; + NvU32 gpuInstance = 0; + OBJGPU *pRemoteGpu = NULL; + NvBool bSwitchConfig = NV_FALSE; + + if (pKernelNvlink != NULL) + { + bSwitchConfig = knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink); + + // Get the remote GPU + while ((pRemoteGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (pRemoteGpu != pGpu) + break; + } + + NV_ASSERT_OR_RETURN(pRemoteGpu != NULL, NV_ERR_INVALID_STATE); + gpuInstance = gpuGetInstance(pRemoteGpu); + + numLinks = knvlinkGetNumLinksToPeer(pGpu, pKernelNvlink, pRemoteGpu); + } + + // + // Look for an LCE that is already assigned to this peer, + // or greedily allocate the first available one. + // + if (numLinks == 0) + { + *nvlinkP2PCeMask = NVBIT32(NVLINK_SYSMEM_WRITE_LCE); + NV_PRINTF(LEVEL_INFO, + "GPU %d Peer %d has no links (could be an indirect peer). Sysmem LCE assigned %d!\n", + gpuGetInstance(pGpu), gpuInstance, NVLINK_SYSMEM_WRITE_LCE); + return NV_OK; + } + + for (NvU32 i = NVLINK_MIN_P2P_LCE; i < gpuGetNumCEs(pGpu); i++) + { + pKCeLoop = GPU_GET_KCE(pGpu, i); + if (pKCeLoop == NULL || pKCeLoop->bStubbed) + { + continue; + } + + NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS params = {0}; + + // If we don't find a match, will use LCE with most PCEs + params.ceEngineType = NV2080_ENGINE_TYPE_COPY(pKCeLoop->publicID); + NV_STATUS rmStatus = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK, + (void *)¶ms, sizeof(params)); + NV_ASSERT_OK_OR_RETURN(rmStatus); + NvU32 numPces = nvPopCount32(params.pceMask); + + // + // Only save the LCE with the maximum PCEs if it is not + // already assigned or for nvswitch configurations where + // all peers will get the max PCE. + // + if ((numPces > maxPces) && + (bSwitchConfig || (pKCeLoop->nvlinkPeerMask == 0))) + { + pKCeMaxPces = pKCeLoop; + maxPces = numPces; + } + + if ((pKCeLoop->nvlinkPeerMask & NVBIT(gpuInstance)) != 0) + { + // LCE is already assigned to this peer + pKCeMatch = pKCeLoop; + break; + } + else if (pKCeLoop->nvlinkPeerMask != 0) + { + // LCE is assigned to another peer + continue; + } + else + { + // + // LCE is not assigned yet; check if its configuration + // matches what we need for this peer + // + if (numPces == numLinks) + { + pKCeMatch = (pKCeMatch == NULL) ? pKCeLoop : pKCeMatch; + } + else if (numPces < numLinks) + { + pKCeSubMatch = (pKCeSubMatch == NULL) ? pKCeLoop : pKCeSubMatch; + } + } + } + + // + // Prioritize an unused LCE with numPce to numLink match + // then, an unused LCE with numPces < numLinks + // finally, fall back to the LCE with max numPces + // + // For nvswitch, optimal LCE is always LCE with max PCE + // + + if (pKCeMatch != NULL && !bSwitchConfig) + { + pTargetCe = pKCeMatch; + } + else if (pKCeSubMatch != NULL && !bSwitchConfig) + { + pTargetCe = pKCeSubMatch; + } + else if (pKCeMaxPces != NULL) + { + pTargetCe = pKCeMaxPces; + } + + if (pTargetCe != NULL) + { + // assign LCE to peer + if (pTargetCe->nvlinkPeerMask == 0) + { + pTargetCe->nvlinkPeerMask = NVBIT(gpuInstance); + } + + NV_PRINTF(LEVEL_INFO, + "GPU %d Assigning Peer %d to LCE %d\n", + gpuGetInstance(pGpu), gpuInstance, + pTargetCe->publicID); + + *nvlinkP2PCeMask = NVBIT(pTargetCe->publicID); + } + } + + return NV_OK; +} + +void kceGetSysmemRWLCEs_GV100(KernelCE* pKCe, NvU32 *rd, NvU32 *wr) +{ + *rd = NVLINK_SYSMEM_READ_LCE; + *wr = NVLINK_SYSMEM_WRITE_LCE; +} + +/* + * Look up entry in NVLINK_CE_AUTO_CONFIG_TABLE + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pCe OBJCE pointer + * @param[in] pCurrentTopo NVLINK_TOPOLOGY_INFO pointer + * @param[in] pAutoConfigTable NVLINK_CE_AUTO_CONFIG_TABLE pointer + * @param[in] autoConfigNumEntries NvU32 num entries within pAutoConfigTable + * @param[out] pIdx NvU32 pointer + * @param[out] pExposeCeMask NvU32 pointer + * + * Returns: NV_TRUE if entry is found + * NV_FALSE otheriwse +*/ +NvBool +kceGetAutoConfigTableEntry_GV100 +( + OBJGPU *pGpu, + KernelCE *pKCe, + NVLINK_TOPOLOGY_PARAMS *pCurrentTopo, + NVLINK_CE_AUTO_CONFIG_TABLE *pTable, + NvU32 autoConfigNumEntries, + NvU32 *pIdx, + NvU32 *pExposeCeMask +) +{ + NvU32 i; + + for (i = 0; i < autoConfigNumEntries; i++) + { + if ((pTable[i].sysmemLinks == pCurrentTopo->sysmemLinks ) && + (pTable[i].maxLinksPerPeer == pCurrentTopo->maxLinksPerPeer) && + (pTable[i].bSymmetric == pCurrentTopo->bSymmetric ) && + (pTable[i].bSwitchConfig == pCurrentTopo->bSwitchConfig ) && + ((pTable[i].numPeers == pCurrentTopo->numPeers) || (pCurrentTopo->bSwitchConfig))) + { + *pIdx = i; + *pExposeCeMask = pTable[i].exposeCeMask; + return NV_TRUE; + } + } + return NV_FALSE; +} + +/* + * Clear the nvlinkPeerMasks from each P2P CE + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pCe OBJCE pointer + */ +void +kceClearAssignedNvlinkPeerMasks_GV100 +( + OBJGPU *pGpu, + KernelCE *pKCe +) +{ + KernelCE *pCeLoop = NULL; + NvU32 i; + + for (i = NVLINK_MIN_P2P_LCE; i < gpuGetNumCEs(pGpu); i++) + { + pCeLoop = GPU_GET_KCE(pGpu, i); + + if (pCeLoop) + pCeLoop->nvlinkPeerMask = 0; + } +} diff --git a/src/nvidia/src/kernel/gpu/ce/kernel_ce.c b/src/nvidia/src/kernel/gpu/ce/kernel_ce.c new file mode 100644 index 000000000..aaf76c203 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/ce/kernel_ce.c @@ -0,0 +1,540 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/locks.h" +#include "gpu/ce/kernel_ce.h" +#include "gpu/ce/kernel_ce_private.h" +#include "gpu/eng_desc.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu_mgr/gpu_mgr.h" +#include "kernel/gpu/intr/intr_service.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "nvRmReg.h" + +NV_STATUS kceConstructEngine_IMPL(OBJGPU *pGpu, KernelCE *pKCe, ENGDESCRIPTOR engDesc) +{ + NV_ASSERT_OR_RETURN(!RMCFG_FEATURE_PLATFORM_GSP, NV_ERR_NOT_SUPPORTED); + + NvU32 thisPublicID = GET_CE_IDX(engDesc); + + NV_PRINTF(LEVEL_INFO, "KernelCE: thisPublicID = %d\n", thisPublicID); + + pKCe->publicID = thisPublicID; + pKCe->bIsAutoConfigEnabled = NV_TRUE; + pKCe->bUseGen4Mapping = NV_FALSE; + + NvU32 data32 = 0; + if ((osReadRegistryDword(pGpu, NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG, &data32) == NV_OK) && + (data32 == NV_REG_STR_RM_CE_ENABLE_AUTO_CONFIG_FALSE)) + { + NV_PRINTF(LEVEL_INFO, "Disable CE Auto PCE-LCE Config\n"); + pKCe->bIsAutoConfigEnabled = NV_FALSE; + } + + if ((osReadRegistryDword(pGpu, NV_REG_STR_RM_CE_USE_GEN4_MAPPING, &data32) == NV_OK) && + (data32 == NV_REG_STR_RM_CE_USE_GEN4_MAPPING_TRUE)) + { + NV_PRINTF(LEVEL_INFO, "GEN4 mapping will use a HSHUB PCE (if available) for PCIe!\n"); + pKCe->bUseGen4Mapping = NV_TRUE; + } + + // OBJCE::isPresent would compute this first + pGpu->numCEs++; + + return NV_OK; +} + +NvBool kceIsPresent_IMPL(OBJGPU *pGpu, KernelCE *kce) +{ + // Use bus/fifo to detemine if LCE(i) is present. + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvBool present = NV_FALSE; + + NV_ASSERT_OR_RETURN(pKernelBus != NULL, NV_FALSE); + present = kbusCheckEngine_HAL(pGpu, pKernelBus, ENG_CE(kce->publicID)); + + NV_PRINTF(LEVEL_INFO, "KCE %d / %d: present=%d\n", kce->publicID, + pGpu->numCEs > 0 ? pGpu->numCEs - 1 : pGpu->numCEs, present); + + return present; +} + +NvBool kceIsNewMissingEngineRemovalSequenceEnabled_IMPL(OBJGPU *pGpu, KernelCE *pKCe) +{ + return NV_TRUE; +} + +static void printCaps(KernelCE *pKCe, NvU32 engineType, const NvU8 *capsTbl) +{ + NV_PRINTF(LEVEL_INFO, "LCE%d caps (engineType = %d)\n", pKCe->publicID, engineType); +#define PRINT_CAP(cap) NV_PRINTF(LEVEL_INFO, #cap ":%d\n", (RMCTRL_GET_CAP(capsTbl, NV2080_CTRL_CE_CAPS, cap) != 0) ? 1 : 0) + + PRINT_CAP(_CE_GRCE); + PRINT_CAP(_CE_SHARED); + PRINT_CAP(_CE_SYSMEM_READ); + PRINT_CAP(_CE_SYSMEM_WRITE); + PRINT_CAP(_CE_NVLINK_P2P); + PRINT_CAP(_CE_SYSMEM); + PRINT_CAP(_CE_P2P); + PRINT_CAP(_CE_BL_SIZE_GT_64K_SUPPORTED); + PRINT_CAP(_CE_SUPPORTS_NONPIPELINED_BL); + PRINT_CAP(_CE_SUPPORTS_PIPELINED_BL); + +} + +static void kceGetNvlinkCaps(OBJGPU *pGpu, KernelCE *pKCe, NvU8 *pKCeCaps) +{ + if (kceIsCeSysmemRead_HAL(pGpu, pKCe)) + RMCTRL_SET_CAP(pKCeCaps, NV2080_CTRL_CE_CAPS, _CE_SYSMEM_READ); + + if (kceIsCeSysmemWrite_HAL(pGpu, pKCe)) + RMCTRL_SET_CAP(pKCeCaps, NV2080_CTRL_CE_CAPS, _CE_SYSMEM_WRITE); + + if (kceIsCeNvlinkP2P_HAL(pGpu, pKCe)) + RMCTRL_SET_CAP(pKCeCaps, NV2080_CTRL_CE_CAPS, _CE_NVLINK_P2P); +} + +NV_STATUS kceGetDeviceCaps_IMPL(OBJGPU *pGpu, KernelCE *pKCe, NvU32 engineType, NvU8 *pKCeCaps) +{ + if (pKCe->bStubbed) + { + NV_PRINTF(LEVEL_INFO, "Skipping stubbed CE %d\n", pKCe->publicID); + return NV_ERR_NOT_SUPPORTED; + } + + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + // + // Since some CE capabilities depend on the nvlink topology, + // trigger topology detection before updating the CE caps + // + if (pKernelNvlink != NULL && !knvlinkIsForcedConfig(pGpu, pKernelNvlink)) + { + knvlinkCoreGetRemoteDeviceInfo(pGpu, pKernelNvlink); + } + + portMemSet(pKCeCaps, 0, NV2080_CTRL_CE_CAPS_TBL_SIZE); + + NV2080_CTRL_CE_GET_CAPS_V2_PARAMS physicalCaps; + portMemSet(&physicalCaps, 0, sizeof(physicalCaps)); + + physicalCaps.ceEngineType = NV2080_ENGINE_TYPE_COPY(pKCe->publicID); + NV_PRINTF(LEVEL_INFO, "Querying caps for LCE(%d)\n", pKCe->publicID); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_CE_GET_PHYSICAL_CAPS, + &physicalCaps, + sizeof(physicalCaps))); + + portMemCopy(pKCeCaps, + NV2080_CTRL_CE_CAPS_TBL_SIZE, + physicalCaps.capsTbl, + NV2080_CTRL_CE_CAPS_TBL_SIZE); + + if (pKernelNvlink != NULL) + kceGetNvlinkCaps(pGpu, pKCe, pKCeCaps); + + printCaps(pKCe, engineType, pKCeCaps); + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdCeGetAllCaps_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CE_GET_ALL_CAPS_PARAMS *pCeCapsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMIGManager *pKernelMIGManager = NULL; + MIG_INSTANCE_REF migRef; + + ct_assert(ENG_CE__SIZE_1 <= sizeof(pCeCapsParams->capsTbl) / sizeof(pCeCapsParams->capsTbl[0])); + + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + // + // Since some CE capabilities depend on the nvlink topology, + // trigger topology detection before updating the CE caps + // + if (pKernelNvlink != NULL && !knvlinkIsForcedConfig(pGpu, pKernelNvlink)) + { + knvlinkCoreGetRemoteDeviceInfo(pGpu, pKernelNvlink); + } + + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, &migRef)); + } + + portMemSet(pCeCapsParams, 0, sizeof(pCeCapsParams)); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_CE_GET_ALL_PHYSICAL_CAPS, + pCeCapsParams, + sizeof(*pCeCapsParams))); + + for (NvU32 i = 0; i < ENG_CE__SIZE_1; i++) + { + KernelCE *pKCe = GPU_GET_KCE(pGpu, i); + if (pKCe == NULL || pKCe->bStubbed) + { + NV_PRINTF(LEVEL_INFO, "Skipping missing or stubbed CE %d\n", i); + continue; + } + + if (IS_MIG_IN_USE(pGpu) && + !kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, NV2080_ENGINE_TYPE_COPY(i), migRef)) + { + NV_PRINTF(LEVEL_INFO, "Skipping CE%d not that is not in the MIG instance\n", i); + continue; + } + + pCeCapsParams->present |= BIT(i); + + NvU8 *pKCeCaps = pCeCapsParams->capsTbl[i]; + + if (pKernelNvlink != NULL) + kceGetNvlinkCaps(pGpu, pKCe, pKCeCaps); + } + + return NV_OK; +} + +/*! + * Determine appropriate CEs for sysmem read/write and P2P over NVLINK. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKCe OBJCE pointer + * @param[in] gpuMask Mask of GPUs for determining P2P CEs + * @param[out] pSysmemReadCE Pointer to fill out the LCE for sysmem read + * @param[out] pSysmemWriteCE Pointer to fill out the LCE for sysmem write + * @param[out] pP2PCE Pointer to fill out the LCE for P2P + */ +NV_STATUS +kceGetCeFromNvlinkConfig_IMPL +( + OBJGPU *pGpu, + KernelCE *pKCe, + NvU32 gpuMask, + NvU32 *pSysmemReadCE, + NvU32 *pSysmemWriteCE, + NvU32 *nvlinkP2PCeMask +) +{ + NV_STATUS rmStatus; + NvU32 gpuCount; + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS nvlinkCapsParams = {0}; + NvU8 *nvlinkCaps; + + gpuCount = gpumgrGetSubDeviceCount(gpuMask); + NV_CHECK_OR_RETURN(LEVEL_ERROR, !IsGP100(pGpu) || gpuCount <= 2, NV_ERR_INVALID_STATE); + + rmStatus = knvlinkCtrlCmdBusGetNvlinkCaps(pGpu, &nvlinkCapsParams); + NV_ASSERT_OK_OR_RETURN(rmStatus); + + nvlinkCaps = (NvU8*)&nvlinkCapsParams.capsTbl; + + // Check if GPU supports NVLink + if (NV2080_CTRL_NVLINK_GET_CAP(nvlinkCaps, NV2080_CTRL_NVLINK_CAPS_SUPPORTED)) + { + // Check if GPU supports NVLink for SYSMEM + if (NV2080_CTRL_NVLINK_GET_CAP(nvlinkCaps, NV2080_CTRL_NVLINK_CAPS_SYSMEM_ACCESS)) + kceGetSysmemRWLCEs(pKCe, pSysmemReadCE, pSysmemWriteCE); + + // Check if GPU supports NVLink for P2P + if (NV2080_CTRL_NVLINK_GET_CAP(nvlinkCaps, NV2080_CTRL_NVLINK_CAPS_P2P_SUPPORTED)) + rmStatus = kceGetP2PCes(pKCe, pGpu, gpuMask, nvlinkP2PCeMask); + } + + return rmStatus; +} + +NV_STATUS kceUpdateClassDB_KERNEL(OBJGPU *pGpu, KernelCE *pKCe) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_CE_UPDATE_CLASS_DB_PARAMS params; + + NV_STATUS status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_CE_UPDATE_CLASS_DB, + ¶ms, + sizeof(params)); + NV_ASSERT_OK_OR_RETURN(status); + + // For each LCE, check if it is stubbed out in GSP-RM + for (NvU32 i = 0; i < gpuGetNumCEs(pGpu); i++) + { + KernelCE *pKCe = GPU_GET_KCE(pGpu, i); + + if (pKCe) + { + NvBool stubbed = ((BIT(i) & params.stubbedCeMask)) != 0; + // If this CE has no PCEs assigned, remove it from classDB + if (stubbed) + { + NV_PRINTF(LEVEL_INFO, "Stubbing CE %d\n", i); + pKCe->bStubbed = NV_TRUE; + + status = gpuDeleteClassFromClassDBByEngTag(pGpu, ENG_CE(i)); + } + else + { + // If a new CE needs to be added because of the new mappings + NV_PRINTF(LEVEL_INFO, "Unstubbing CE %d\n", i); + pKCe->bStubbed = NV_FALSE; + + status = gpuAddClassToClassDBByEngTag(pGpu, ENG_CE(i)); + } + } + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, gpuUpdateEngineTable(pGpu)); + + return NV_OK; +} + +/** + * @brief Provides an opportunity to register some IntrService during intrStateInit. + */ +void +kceRegisterIntrService_IMPL +( + OBJGPU *pGpu, + KernelCE *pKCe, + IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX] +) +{ + NvU16 engineIdx = MC_ENGINE_IDX_CE(pKCe->publicID); + + NV_ASSERT(pRecords[engineIdx].pNotificationService == NULL); + pRecords[engineIdx].bFifoWaiveNotify = NV_FALSE; + pRecords[engineIdx].pNotificationService = staticCast(pKCe, IntrService); +} + +/** + * @brief Services the nonstall interrupt. + * + * @param[in] pGpu + * @param[in] pKCe The IntrService object registered to handle the engineIdx nonstall interrupt. + * @param[in] pParams + * + */ +NV_STATUS +kceServiceNotificationInterrupt_IMPL +( + OBJGPU *pGpu, + KernelCE *pKCe, + IntrServiceServiceNotificationInterruptArguments *pParams +) +{ + NV_ASSERT_OR_RETURN(pParams != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pParams->engineIdx == MC_ENGINE_IDX_CE(pKCe->publicID), NV_ERR_GENERIC); + + NV_PRINTF(LEVEL_INFO, "for CE%d\n", pKCe->publicID); + + MODS_ARCH_REPORT(NV_ARCH_EVENT_NONSTALL_CE, "%s", "processing CE nonstall interrupt\n"); + + NV_ASSERT(NV2080_NOTIFIERS_CE(pKCe->publicID)); + + kceNonstallIntrCheckAndClear_HAL(pGpu, pKCe, pParams->pThreadState); + + // Wake up channels waiting on this event + engineNonStallIntrNotify(pGpu, + NV2080_ENGINE_TYPE_COPY0 + NV2080_NOTIFIERS_CE(pKCe->publicID) - NV2080_NOTIFIERS_CE0); + + return NV_OK; +} + +NV_STATUS kceTopLevelPceLceMappingsUpdate_IMPL(OBJGPU *pGpu, KernelCE *pKCe) +{ + NvU32 pceLceMap[NV2080_CTRL_MAX_PCES] = {0}; + NvU32 grceConfig[NV2080_CTRL_MAX_GRCES] = {0}; + NvU32 exposeCeMask = 0; + NvBool bUpdateNvlinkPceLce = NV_FALSE; + NV_STATUS status = NV_OK; + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + // + // Sync class DB before proceeding with the algorithm. + // This is needed if mapping update previously originated in physical RM + // + NV_ASSERT_OK_OR_RETURN(kceUpdateClassDB_HAL(pGpu, pKCe)); + + if (pKernelNvlink && !knvlinkIsForcedConfig(pGpu, pKernelNvlink)) + { + // + // If not GSP-RM, get the auto-config PCE-LCE mappings for NVLink topology. + // This should work fine on CPU-RM and monolithic RM. + // + + // Set bUpdateNvlinkPceLce to auto-config status + bUpdateNvlinkPceLce = pKCe->bIsAutoConfigEnabled; + + if (bUpdateNvlinkPceLce) + { + status = kceGetNvlinkAutoConfigCeValues_HAL(pGpu, pKCe, pceLceMap, + grceConfig, &exposeCeMask); + if (status == NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_INFO, + "CE AutoConfig is not supported. Skipping PCE2LCE update\n"); + + bUpdateNvlinkPceLce = NV_FALSE; + } + else + { + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to get auto-config PCE-LCE mappings. Return\n"); + return status; + } + } + } + } + + // + // Pass these values to the ceUpdatePceLceMappings_HAL. + // + // bUpdateNvlinkPceLce will have the following values: + // a. NV_FALSE when auto-config is disabled through regkey. + // b. NV_FALSE when NVLink does not exist or NVLink topology is forced. + // ceUpdatePceLceMappings_HAL still have to be called because PCE-LCE + // mappings can be updated for reasons other than NVLink topology. + // c. NV_TRUE when (a) and (b) does not hold true and platform is CPU-RM + // or monolithic RM. For GSP-RM, value is NV_FALSE because GSP-RM does + // not store the NVLink topology. + // + // exposeCeMask will be 0x0 when bUpdateNvlinkPceLce is NV_FALSE. + // + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_CE_UPDATE_PCE_LCE_MAPPINGS_PARAMS params; + + if (bUpdateNvlinkPceLce) + { + portMemCopy(params.pceLceMap, + sizeof(params.pceLceMap), + pceLceMap, + sizeof(pceLceMap)); + + portMemCopy(params.grceConfig, + sizeof(params.grceConfig), + grceConfig, + sizeof(grceConfig)); + } + + params.exposeCeMask = exposeCeMask; + params.bUpdateNvlinkPceLce = bUpdateNvlinkPceLce; + + // For GSP clients, the update needs to be routed through ctrl call + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_CE_UPDATE_PCE_LCE_MAPPINGS, + ¶ms, + sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to update PCE-LCE mappings. Return\n"); + return status; + } + + // + // After the mappings are updated, LCEs which do not have PCEs mapped need + // to be stubbed out and LCEs which have PCEs need to be un-stubbed. This + // happens as a part of ceUpdatePceLceMappings_HAL which gets executed in + // GSP/monolithic RM. For CPU-RM, have to call this function explicitly. + // + status = kceUpdateClassDB_HAL(pGpu, pKCe); + + return status; +} + +NV_STATUS kceGetFaultMethodBufferSize_IMPL(OBJGPU *pGpu, NvU32 *size) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS params; + + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE, ¶ms, sizeof(params))); + + *size = params.size; + return NV_OK; +} + +/*! + * Gets the HSHUB/FBHUB PCE masks and updates NVLINK_TOPOLOGY_PARAMS. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pTopoParams Pointer to NVLINK_TOPOLOGY_PARAMS + * + * Returns NV_OK if array is filled successfully. + * NV_ERR_INVALID_ARGUMENT if pTopoParams is NULL or aray size is not equal. + */ +NV_STATUS +kceGetAvailableHubPceMask_IMPL +( + OBJGPU *pGpu, + NVLINK_TOPOLOGY_PARAMS *pTopoParams +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_CE_GET_HUB_PCE_MASK_PARAMS params; + + NV_ASSERT_OR_RETURN(pTopoParams != NULL, NV_ERR_INVALID_ARGUMENT); + ct_assert(NV_ARRAY_ELEMENTS(pTopoParams->pceAvailableMaskPerHshub) == NV_ARRAY_ELEMENTS(params.hshubPceMasks)); + + NV_ASSERT_OK_OR_RETURN( + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_CE_GET_HUB_PCE_MASK, + ¶ms, + sizeof(params)) + ); + + portMemCopy(pTopoParams->pceAvailableMaskPerHshub, + sizeof(pTopoParams->pceAvailableMaskPerHshub), + params.hshubPceMasks, + sizeof(pTopoParams->pceAvailableMaskPerHshub)); + pTopoParams->fbhubPceMask = params.fbhubPceMask; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/ce/kernel_ce_context.c b/src/nvidia/src/kernel/gpu/ce/kernel_ce_context.c new file mode 100644 index 000000000..bacdf251e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/ce/kernel_ce_context.c @@ -0,0 +1,206 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/ce/kernel_ce_context.h" +#include "gpu/ce/kernel_ce_private.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "os/os.h" +#include "resserv/rs_client.h" + +#include "class/clb0b5.h" +#include "class/clb0b5sw.h" +#include "class/clc0b5.h" +#include "class/clc0b5sw.h" +#include "class/clc1b5.h" +#include "class/clc1b5sw.h" +#include "class/clc3b5.h" +#include "class/clc3b5sw.h" +#include "class/clc5b5.h" +#include "class/clc5b5sw.h" +#include "class/clc6b5.h" +#include "class/clc6b5sw.h" +#include "class/clc7b5.h" + +/* + * This function returns an engine descriptor corresponding to the class + * and engine instance passed in. + * + * @params[in] externalClassId Id of classs being allocated + * @params[in] pAllocParams void pointer containing creation parameters. + * + * @returns + * ENG_INVALID, if creation params are NULL or for unknown engine, and returns + * the right engine descriptor otherwise. + */ +ENGDESCRIPTOR +kceGetEngineDescFromAllocParams(OBJGPU *pGpu, NvU32 externalClassId, void *pAllocParams) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + NvU32 engineInstance = 0; + + NV_ASSERT(pAllocParams); + + if (IsAMODEL(pGpu)) + { + // On AMODEL CopyEngine is allocated using OBJGR + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + + NV_ASSERT_OK( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + pCallContext->pClient->hClient, + &ref)); + + NV_ASSERT_OK( + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_GR(0), + &engineInstance)); + return ENG_GR(NV2080_ENGINE_TYPE_GR_IDX(engineInstance)); + } + return ENG_GR(0); + } + + switch (externalClassId) + { + case MAXWELL_DMA_COPY_A: + case PASCAL_DMA_COPY_A: + case PASCAL_DMA_COPY_B: + case VOLTA_DMA_COPY_A: + case TURING_DMA_COPY_A: + case AMPERE_DMA_COPY_A: + case AMPERE_DMA_COPY_B: + + { + NVB0B5_ALLOCATION_PARAMETERS *pNvA0b5CreateParms = pAllocParams; + + switch (pNvA0b5CreateParms->version) + { + case NVB0B5_ALLOCATION_PARAMETERS_VERSION_0: + { + NV_PRINTF(LEVEL_INFO, + "Version = 0, using engineType (=%d) as CE instance\n", + pNvA0b5CreateParms->engineType); + engineInstance = pNvA0b5CreateParms->engineType; + break; + } + + case NVB0B5_ALLOCATION_PARAMETERS_VERSION_1: + { + NvU32 i; + + // Loop over supported engines + for (i = 0; i < NV2080_ENGINE_TYPE_COPY_SIZE; i++) + { + if (pNvA0b5CreateParms->engineType == NV2080_ENGINE_TYPE_COPY(i)) + { + engineInstance = i; + break; + } + } + + // Make sure we found something we support + if (i == NV2080_ENGINE_TYPE_COPY_SIZE) + { + NV_PRINTF(LEVEL_ERROR, + "Unknown engine type %d requested\n", + pNvA0b5CreateParms->engineType); + return ENG_INVALID; + } + + NV_PRINTF(LEVEL_INFO, + "Version = 1, using engineType=%d\n", + pNvA0b5CreateParms->engineType); + + break; + } + + default: + { + NV_PRINTF(LEVEL_ERROR, "Unknown version = %d\n", + pNvA0b5CreateParms->version); + return ENG_INVALID; + } + } + break; + } + + default: + { + return ENG_INVALID; + } + } + + NV_STATUS status = ceIndexFromType(pGpu, pCallContext->pClient->hClient, + NV2080_ENGINE_TYPE_COPY(engineInstance), &engineInstance); + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Class %d, CE%d\n", externalClassId, engineInstance); + return ENG_CE(engineInstance); + } + else + NV_PRINTF(LEVEL_ERROR, "Failed to determine CE number\n"); + + return ENG_INVALID; +} + +NV_STATUS +kcectxConstruct_IMPL +( + KernelCeContext *pKCeContext, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pKCeContext, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + NvU32 ceIdx = GET_CE_IDX(pChannelDescendant->resourceDesc.engDesc); + + // + // Don't do anything for AMODEL + // + if (IsAMODEL(pGpu)) + { + return NV_OK; + } + + NV_ASSERT_OR_RETURN(GPU_GET_KCE(pGpu, ceIdx), NV_ERR_INVALID_PARAMETER); + + return NV_OK; +} + +void +kcectxDestruct_IMPL +( + KernelCeContext *pKCeContext +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pKCeContext, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + + if (IsAMODEL(pGpu) || IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + return; + + chandesIsolateOnDestruct(pChannelDescendant); +} diff --git a/src/nvidia/src/kernel/gpu/ce/kernel_ce_ctrl.c b/src/nvidia/src/kernel/gpu/ce/kernel_ce_ctrl.c new file mode 100644 index 000000000..f13039e51 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/ce/kernel_ce_ctrl.c @@ -0,0 +1,170 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/locks.h" +#include "gpu/ce/kernel_ce.h" +#include "gpu/ce/kernel_ce_private.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu_mgr/gpu_mgr.h" +#include "vgpu/rpc.h" + +// +// CE RM Device Controls +// + +// +// NV2080_CTRL_CMD_CE_GET_CAPS passes userspace pointer for copyin/out. +// NV2080_CTRL_CMD_CE_GET_CAPS_V2 stores data inline. +// + +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdCeGetCaps_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CE_GET_CAPS_PARAMS *pCeCapsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelCE *pKCe; + NvU32 ceNumber; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // sanity check array size + if (pCeCapsParams->capsTblSize != NV2080_CTRL_CE_CAPS_TBL_SIZE) + { + NV_PRINTF(LEVEL_ERROR, "size mismatch: client 0x%x rm 0x%x\n", + pCeCapsParams->capsTblSize, NV2080_CTRL_CE_CAPS_TBL_SIZE); + return NV_ERR_INVALID_ARGUMENT; + } + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to get blacklist information from host RM + // + if (IS_VIRTUAL(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV2080_CTRL_CE_GET_CAPS_V2_PARAMS ceCapsv2Params = { 0 }; + + ceCapsv2Params.ceEngineType = pCeCapsParams->ceEngineType; + + NV_RM_RPC_CONTROL(pGpu, pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + NV2080_CTRL_CMD_CE_GET_CAPS_V2, + &ceCapsv2Params, + sizeof(ceCapsv2Params), + status); + + if (status == NV_OK) + { + portMemCopy(NvP64_VALUE(pCeCapsParams->capsTbl), + (sizeof(NvU8) * NV2080_CTRL_CE_CAPS_TBL_SIZE), + ceCapsv2Params.capsTbl, + (sizeof(NvU8) * NV2080_CTRL_CE_CAPS_TBL_SIZE)); + } + return status; + } + + NV_ASSERT_OK_OR_RETURN(ceIndexFromType(pGpu, RES_GET_CLIENT_HANDLE(pSubdevice), pCeCapsParams->ceEngineType, &ceNumber)); + + pKCe = GPU_GET_KCE(pGpu, ceNumber); + + // Return an unsupported error for not present or stubbed CEs as they are + // not supposed to be user visible and cannot be allocated anyway. + if (!pKCe) + { + return NV_ERR_NOT_SUPPORTED; + } + + // now fill in caps for this CE + return kceGetDeviceCaps(pGpu, pKCe, pCeCapsParams->ceEngineType, NvP64_VALUE(pCeCapsParams->capsTbl)); +} + +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdCeGetCapsV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CE_GET_CAPS_V2_PARAMS *pCeCapsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelCE *pKCe; + NvU32 ceNumber; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + NV_PRINTF(LEVEL_INFO, "NV2080_CTRL_CE_GET_CAPS_V2 ceEngineType = %d\n", pCeCapsParams->ceEngineType); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to get blacklist information from host RM + // + if (IS_VIRTUAL(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + return status; + } + + NV_ASSERT_OK_OR_RETURN(ceIndexFromType(pGpu, RES_GET_CLIENT_HANDLE(pSubdevice), pCeCapsParams->ceEngineType, &ceNumber)); + + pKCe = GPU_GET_KCE(pGpu, ceNumber); + + // Return an unsupported error for not present or stubbed CEs as they are + // not supposed to be user visible and cannot be allocated anyway. + if (!pKCe) + { + NV_PRINTF(LEVEL_INFO, "Skipping stubbed CE %d\n", ceNumber); + return NV_ERR_NOT_SUPPORTED; + } + + // now fill in caps for this CE + return kceGetDeviceCaps(pGpu, pKCe, pCeCapsParams->ceEngineType, NvP64_VALUE(pCeCapsParams->capsTbl)); +} diff --git a/src/nvidia/src/kernel/gpu/ce/kernel_ce_shared.c b/src/nvidia/src/kernel/gpu/ce/kernel_ce_shared.c new file mode 100644 index 000000000..4aceef1d9 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/ce/kernel_ce_shared.c @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "gpu/bus/kern_bus.h" + +NvBool ceIsCeGrce(OBJGPU *pGpu, NvU32 ceEngineType) +{ + NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS partnerParams = {0}; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + if (IsAMODEL(pGpu) || IsT234D(pGpu)) + return NV_FALSE; + + NV_ASSERT_OR_RETURN(NV2080_ENGINE_TYPE_IS_COPY(ceEngineType), NV_FALSE); + + NvU32 i; + NV_STATUS status = NV_OK; + + partnerParams.engineType = ceEngineType; + partnerParams.numPartners = 0; + + // See if the hal wants to handle this + status = kfifoGetEnginePartnerList_HAL(pGpu, pKernelFifo, &partnerParams); + if (status != NV_OK) + { + // For channels that the hal didnt handle, we should just return + // all of the supported engines except for the target engine. + // + // Update the engine Database + status = gpuUpdateEngineTable(pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not update the engine db. This is fatal\n"); + DBG_BREAKPOINT(); + return NV_FALSE; + } + + // Make sure it all will fit + if (pGpu->engineDB.size > NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS) + { + NV_PRINTF(LEVEL_ERROR, + "PartnerList space too small. This is fatal\n"); + DBG_BREAKPOINT(); + return NV_FALSE; + } + + // Copy over all of the engines except the target + for (i = 0; i < pGpu->engineDB.size; i++) + { + // Skip the engine handed in + if (pGpu->engineDB.pType[i] != partnerParams.engineType ) + { + partnerParams.partnerList[partnerParams.numPartners++] = pGpu->engineDB.pType[i]; + } + } + } + + // check if gr is in the partnerList + for (i = 0; i < partnerParams.numPartners; i++) + { + if (partnerParams.partnerList[i] == NV2080_ENGINE_TYPE_GRAPHICS) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +NvU32 ceCountGrCe(OBJGPU *pGpu) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 engIdx; + NvU32 grCeCount; + + if (pKernelBus == NULL || IsAMODEL(pGpu)) + return 0; + + grCeCount = 0; + + // + // bug 2188230: Until FIFO reports max instance id for a given engine type, + // start by iterating over all CE indices supported by POBJGPU + // + for (engIdx = 0; engIdx < GPU_MAX_CES; ++engIdx) + { + if (kbusCheckEngine_HAL(pGpu, pKernelBus, ENG_CE(engIdx)) && + ceIsCeGrce(pGpu, NV2080_ENGINE_TYPE_COPY(engIdx))) + { + grCeCount++; + } + } + + return grCeCount; +} diff --git a/src/nvidia/src/kernel/gpu/deferred_api.c b/src/nvidia/src/kernel/gpu/deferred_api.c new file mode 100644 index 000000000..7a09a5642 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/deferred_api.c @@ -0,0 +1,693 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/deferred_api.h" +#include "mem_mgr/vaspace.h" +#include "class/cl5080.h" +#include "ctrl/ctrl2080.h" +#include "vgpu/rpc.h" +#include "rmapi/control.h" +#include "core/locks.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rs_utils.h" +#include "resserv/rs_server.h" + +// Support Routines +static NV_STATUS Class5080AddDeferredApi( + PDEFERRED_API_OBJECT pDeferredApiObject, + NvHandle hClient, + NvHandle hDeferredApi, + NV5080_CTRL_DEFERRED_API_PARAMS *pDeferredApi, + NvU32 size, + NvBool bUserModeArgs +) +{ + NV_STATUS rmStatus = NV_OK; + PDEFERRED_API_INFO pCliDeferredApi; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (NV_OK != serverGetClientUnderLock(&g_resServ, hClient, NULL)) + return NV_ERR_INVALID_CLIENT; + + // validate handle + if (!serverutilValidateNewResourceHandle(hClient, hDeferredApi)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + // allocate a new Deferred Api and add to the client + pCliDeferredApi = portMemAllocNonPaged(sizeof(DEFERRED_API_INFO) + size); + if (NULL != pCliDeferredApi) + { + // initialize the entry + pCliDeferredApi->Client = hClient; + pCliDeferredApi->Handle = hDeferredApi; + pCliDeferredApi->pDeferredApiInfo = (NvU8 *)pCliDeferredApi + sizeof(DEFERRED_API_INFO); + pCliDeferredApi->Node.keyStart = pCliDeferredApi->Handle; + pCliDeferredApi->Node.keyEnd = pCliDeferredApi->Handle; + pCliDeferredApi->Node.Data = pCliDeferredApi; + pCliDeferredApi->privLevel = pCallContext->secInfo.privLevel; + pCliDeferredApi->Flags = 0; + portMemCopy(pCliDeferredApi->pDeferredApiInfo, size, pDeferredApi, size); + + // link in the new entry + rmStatus = btreeInsert(&pCliDeferredApi->Node, &pDeferredApiObject->DeferredApiList); + if (rmStatus != NV_OK) + { + portMemFree(pCliDeferredApi); + pCliDeferredApi = NULL; + } + } + else + return NV_ERR_NO_MEMORY; + + return rmStatus; + +} // end of Class5080AddDeferredApi() + +static NV_STATUS Class5080AddDeferredApiV2( + PDEFERRED_API_OBJECT pDeferredApiObject, + NvHandle hClient, + NvHandle hDeferredApi, + NV5080_CTRL_DEFERRED_API_V2_PARAMS *pDeferredApi, + NvU32 size +) +{ + NV_STATUS rmStatus = NV_OK; + PDEFERRED_API_INFO pCliDeferredApi; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (NV_OK != serverGetClientUnderLock(&g_resServ, hClient, NULL)) + return NV_ERR_INVALID_CLIENT; + + // validate handle + if (!serverutilValidateNewResourceHandle(hClient, hDeferredApi)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + rmStatus = Class5080GetDeferredApiInfo(pDeferredApiObject, hDeferredApi, &pCliDeferredApi); + + // Object already exists + if (NV_OK == rmStatus) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + // allocate a new Deferred Api and add to the client + pCliDeferredApi = portMemAllocNonPaged(sizeof(DEFERRED_API_INFO) + size); + if (NULL != pCliDeferredApi) + { + // initialize the entry + pCliDeferredApi->Client = hClient; + pCliDeferredApi->Handle = hDeferredApi; + pCliDeferredApi->pDeferredApiInfo = (NvU8 *)pCliDeferredApi + sizeof(DEFERRED_API_INFO); + pCliDeferredApi->Node.keyStart = pCliDeferredApi->Handle; + pCliDeferredApi->Node.keyEnd = pCliDeferredApi->Handle; + pCliDeferredApi->Node.Data = pCliDeferredApi; + pCliDeferredApi->Flags = 0; + pCliDeferredApi->privLevel = pCallContext->secInfo.privLevel; + portMemCopy(pCliDeferredApi->pDeferredApiInfo, size, pDeferredApi, size); + + // link in the new entry + rmStatus = btreeInsert(&pCliDeferredApi->Node, &pDeferredApiObject->DeferredApiList); + + if (rmStatus != NV_OK) + { + portMemFree(pCliDeferredApi); + pCliDeferredApi = NULL; + } + } + else + return NV_ERR_NO_MEMORY; + + return rmStatus; + +} // end of Class5080AddDeferredApiV2() + +NV_STATUS Class5080GetDeferredApiInfo( + PDEFERRED_API_OBJECT pDeferredApiObject, + NvHandle hDeferredApi, + PDEFERRED_API_INFO * ppCliDeferredApi +) +{ + PNODE pNode; + + if (btreeSearch(hDeferredApi, &pNode, pDeferredApiObject->DeferredApiList) == NV_OK) + { + *ppCliDeferredApi = pNode->Data; + return NV_OK; + } + + return NV_ERR_INVALID_DATA; + +} // end of Class5080GetDeferredApiInfo() + +static NV_STATUS Class5080DelDeferredApi( + PDEFERRED_API_OBJECT pDeferredApiObject, + NvHandle hDeferredApi +) +{ + PDEFERRED_API_INFO pDeferredApi = NULL; + NV_STATUS status; + PNODE pNode; + + // remove the event from the client database + if (NV_OK == Class5080GetDeferredApiInfo(pDeferredApiObject, + hDeferredApi, &pDeferredApi)) + { + status = btreeSearch(hDeferredApi, &pNode, pDeferredApiObject->DeferredApiList); + if (status != NV_OK) + return NV_ERR_GENERIC; + + status = btreeUnlink(pNode, &pDeferredApiObject->DeferredApiList); + if (status == NV_OK) + { + NV5080_CTRL_DEFERRED_API_PARAMS *pDeferredApiParams; + pDeferredApiParams = (NV5080_CTRL_DEFERRED_API_PARAMS *)pDeferredApi->pDeferredApiInfo; + + if (pDeferredApi->Flags & DEFERRED_API_INFO_FLAGS_HAS_PRIVATE_DATA_ALLOC) + { + portMemFree((void *)NvP64_VALUE(pDeferredApi->pDeferredPrivateData)); + } + + if (DRF_VAL(5080_CTRL, _CMD_DEFERRED_API, _FLAGS_WAIT_FOR_TLB_FLUSH, pDeferredApiParams->flags) == + NV5080_CTRL_CMD_DEFERRED_API_FLAGS_WAIT_FOR_TLB_FLUSH_TRUE) + { + // decrement count, if API was waiting on a TLB flush, but never saw one + if ((pDeferredApi->Flags & DEFERRED_API_INFO_FLAGS_HAS_EXECUTED) && + !(pDeferredApi->Flags & DEFERRED_API_INFO_FLAGS_HAS_TLB_FLUSHED)) + { + pDeferredApiObject->NumWaitingOnTLBFlush--; + } + } + + // free the list element + portMemFree(pDeferredApi); + return NV_OK; + } + } + + return NV_ERR_GENERIC; + +} // end of Class5080DelDeferredApi() + +static NV_STATUS _Class5080UpdateTLBFlushState( + PDEFERRED_API_OBJECT pDeferredApiObject +) +{ + PNODE pNode; + PDEFERRED_API_INFO pCliDeferredApi; + NV5080_CTRL_DEFERRED_API_PARAMS *pDeferredApi; + + btreeEnumStart(0, &pNode, pDeferredApiObject->DeferredApiList); + while (pNode && + pDeferredApiObject->NumWaitingOnTLBFlush) + { + pCliDeferredApi = pNode->Data; + pDeferredApi = (NV5080_CTRL_DEFERRED_API_PARAMS *) pCliDeferredApi->pDeferredApiInfo; + + // update any APIs with WAIT_FOR_TLB_FLUSH set + if (DRF_VAL(5080_CTRL, _CMD_DEFERRED_API, _FLAGS_WAIT_FOR_TLB_FLUSH, pDeferredApi->flags) == + NV5080_CTRL_CMD_DEFERRED_API_FLAGS_WAIT_FOR_TLB_FLUSH_TRUE) + { + // check if API has EXECUTED and newly TLB_FLUSHED + if ((pCliDeferredApi->Flags & DEFERRED_API_INFO_FLAGS_HAS_EXECUTED) && + !(pCliDeferredApi->Flags & DEFERRED_API_INFO_FLAGS_HAS_TLB_FLUSHED)) + { + pCliDeferredApi->Flags |= DEFERRED_API_INFO_FLAGS_HAS_TLB_FLUSHED; + pDeferredApiObject->NumWaitingOnTLBFlush--; + + btreeEnumNext(&pNode, pDeferredApiObject->DeferredApiList); + + // check if API can now be implicitly deleted + if (DRF_VAL(5080_CTRL, _CMD_DEFERRED_API, _FLAGS_DELETE, pDeferredApi->flags) == + NV5080_CTRL_CMD_DEFERRED_API_FLAGS_DELETE_IMPLICIT) + { + Class5080DelDeferredApi(pDeferredApiObject, pCliDeferredApi->Handle); + } + continue; + } + } + btreeEnumNext(&pNode, pDeferredApiObject->DeferredApiList); + } + + return NV_OK; + +} // end of Class5080UpdateTLBFlushState() + +//--------------------------------------------------------------------------- +// +// Class object creation and destruction +// +//--------------------------------------------------------------------------- + +NV_STATUS +defapiConstruct_IMPL +( + DeferredApiObject *pDeferredApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + if (pParams->pAllocParams != NULL) + { + NV5080_ALLOC_PARAMS *pAllocParams = pParams->pAllocParams; + if (pAllocParams->notifyCompletion) + { + staticCast(pDeferredApi, ChannelDescendant)->notifyAction = NV_OS_WRITE_THEN_AWAKEN; + staticCast(pDeferredApi, ChannelDescendant)->bNotifyTrigger = NV_TRUE; + } + } + + return NV_OK; +} + +void +defapiDestruct_IMPL +( + DeferredApiObject *pDeferredApi +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pDeferredApi, ChannelDescendant); + PNODE pNode; + PDEFERRED_API_INFO pCliDeferredApi; + + chandesIsolateOnDestruct(pChannelDescendant); + + // Free All Outstanding API on the btree + btreeEnumStart(0, &pNode, pDeferredApi->DeferredApiList); + while (pNode) + { + pCliDeferredApi = pNode->Data; + + btreeEnumNext(&pNode, pDeferredApi->DeferredApiList); + Class5080DelDeferredApi(pDeferredApi, pCliDeferredApi->Handle); + } +} + +NV_STATUS +defapiCtrlCmdDeferredApi_IMPL +( + DeferredApiObject *pDeferredApiObj, + NV5080_CTRL_DEFERRED_API_PARAMS *pDeferredApi +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + OBJGPU *pGpu = GPU_RES_GET_GPU(pDeferredApiObj); + NV_STATUS status = NV_OK; + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, there is nothing to do at this point in the + // guest OS (where IS_VIRTUAL(pGpu) is true). + // + if (IS_VIRTUAL(pGpu)) + { + NV_RM_RPC_DEFERRED_API_CONTROL(pGpu, + RES_GET_CLIENT_HANDLE(pDeferredApiObj), + RES_GET_PARENT_HANDLE(pDeferredApiObj), + RES_GET_HANDLE(pDeferredApiObj), + (void *)pDeferredApi, + sizeof(NV5080_CTRL_DEFERRED_API_PARAMS), + status); + return status; + } + + return Class5080AddDeferredApi(pDeferredApiObj, + RES_GET_CLIENT_HANDLE(pDeferredApiObj), + pDeferredApi->hApiHandle, + pDeferredApi, + sizeof(NV5080_CTRL_DEFERRED_API_PARAMS), + (pCallContext->secInfo.paramLocation != PARAM_LOCATION_KERNEL)); +} + +NV_STATUS +defapiCtrlCmdDeferredApiV2_IMPL +( + DeferredApiObject *pDeferredApiObj, + NV5080_CTRL_DEFERRED_API_V2_PARAMS *pDeferredApi +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDeferredApiObj); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, there is nothing to do at this point in the + // guest OS (where IS_VIRTUAL(pGpu) is true). + // + if (IS_VIRTUAL(pGpu)) + { + NV_RM_RPC_DEFERRED_API_CONTROL(pGpu, + RES_GET_CLIENT_HANDLE(pDeferredApiObj), + RES_GET_PARENT_HANDLE(pDeferredApiObj), + RES_GET_HANDLE(pDeferredApiObj), + (void *)pDeferredApi, + sizeof(NV5080_CTRL_DEFERRED_API_V2_PARAMS), + status); + return status; + } + + return Class5080AddDeferredApiV2(pDeferredApiObj, + RES_GET_CLIENT_HANDLE(pDeferredApiObj), + pDeferredApi->hApiHandle, + pDeferredApi, + sizeof(NV5080_CTRL_DEFERRED_API_V2_PARAMS)); +} + +NV_STATUS +defapiCtrlCmdRemoveApi_IMPL +( + DeferredApiObject *pDeferredApiObj, + NV5080_CTRL_REMOVE_API_PARAMS *pRemoveApi +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDeferredApiObj); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, there is nothing to do at this point in the + // guest OS (where IS_VIRTUAL(pGpu) is true). + // + if (IS_VIRTUAL(pGpu)) + { + NV_RM_RPC_REMOVE_DEFERRED_API(pGpu, + RES_GET_CLIENT_HANDLE(pDeferredApiObj), + RES_GET_PARENT_HANDLE(pDeferredApiObj), + RES_GET_HANDLE(pDeferredApiObj), + pRemoveApi->hApiHandle, status); + return status; + } + + return Class5080DelDeferredApi(pDeferredApiObj, + pRemoveApi->hApiHandle); +} + +static NV_STATUS +_class5080DeferredApiV2 +( + OBJGPU *pGpu, + ChannelDescendant *Object, + PMETHOD Method, + NvU32 Offset, + NvU32 Data +) +{ + PDEFERRED_API_OBJECT pDeferredApiObject = dynamicCast(Object, DeferredApiObject); + PDEFERRED_API_INFO pCliDeferredApi = NULL; + NV5080_CTRL_DEFERRED_API_PARAMS *pDeferredApi; + NV_STATUS rmStatus = NV_OK; + NvU32 paramSize = 0; + NvHandle hDevice; + NvBool bIsCtrlCall = NV_TRUE; + + rmStatus = Class5080GetDeferredApiInfo(pDeferredApiObject, + Data, &pCliDeferredApi); + if (rmStatus == NV_OK) + { + pDeferredApi = pCliDeferredApi->pDeferredApiInfo; + + switch (pDeferredApi->cmd) + { + case NV2080_CTRL_CMD_GPU_INITIALIZE_CTX: + paramSize = sizeof(NV2080_CTRL_GPU_INITIALIZE_CTX_PARAMS); + break; + + case NV2080_CTRL_CMD_GPU_PROMOTE_CTX: + paramSize = sizeof(NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS); + break; + + case NV2080_CTRL_CMD_GPU_EVICT_CTX: + paramSize = sizeof(NV2080_CTRL_GPU_EVICT_CTX_PARAMS); + break; + + case NV2080_CTRL_CMD_FIFO_UPDATE_CHANNEL_INFO: + paramSize = sizeof(NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS); + break; + + case NV2080_CTRL_CMD_DMA_INVALIDATE_TLB: + { + OBJGPU *pTgtGpu; + RsClient *pClientVA; + + bIsCtrlCall = NV_FALSE; + + rmStatus = serverGetClientUnderLock(&g_resServ, pDeferredApi->hClientVA, &pClientVA); + if (rmStatus != NV_OK) + break; + + if (CliSetSubDeviceContext( + pDeferredApi->hClientVA, + pDeferredApi->hDeviceVA, + &hDevice, + &pTgtGpu) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to find target gpu from hClient(%x), hDevice(%x)\n", + pDeferredApi->hClientVA, pDeferredApi->hDeviceVA); + + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + else + { + OBJVASPACE *pVAS = NULL; + rmStatus = vaspaceGetByHandleOrDeviceDefault(pClientVA, + hDevice, + pDeferredApi->api_bundle.InvalidateTlb.hVASpace, + &pVAS); + if (NV_OK == rmStatus) + { + vaspaceInvalidateTlb(pVAS, pTgtGpu, PTE_UPGRADE); + + if (pDeferredApiObject->NumWaitingOnTLBFlush) + rmStatus = _Class5080UpdateTLBFlushState(pDeferredApiObject); + } + } + break; + } + + case NV2080_CTRL_CMD_FB_SET_GPU_CACHE_ALLOC_POLICY_V2: + paramSize = sizeof(NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS); + break; + + case NV2080_CTRL_CMD_GR_CTXSW_ZCULL_BIND: + paramSize = sizeof(NV2080_CTRL_GR_CTXSW_ZCULL_BIND_PARAMS); + break; + + case NV2080_CTRL_CMD_GR_CTXSW_PM_BIND: + paramSize = sizeof(NV2080_CTRL_GR_CTXSW_PM_BIND_PARAMS); + break; + + case NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND: + paramSize = sizeof(NV2080_CTRL_GR_CTXSW_PREEMPTION_BIND_PARAMS); + break; + + case NV2080_CTRL_CMD_FB_SET_GPU_CACHE_PROMOTION_POLICY: + paramSize = sizeof(NV2080_CTRL_FB_GPU_CACHE_PROMOTION_POLICY_PARAMS); + break; + + default: + bIsCtrlCall = NV_FALSE; + paramSize = 0; + NV_PRINTF(LEVEL_ERROR, "Unknown or Unimplemented Command %x\n", + pDeferredApi->cmd); + NV_ASSERT(0); + rmStatus = NV_ERR_INVALID_ARGUMENT; + break; + } + + if (bIsCtrlCall) + { + RmCtrlParams rmCtrlParams; + RmCtrlExecuteCookie rmCtrlExecuteCookie = {0}; + RS_LOCK_INFO lockInfo = {0}; + Subdevice *pSubdevice; + RsClient *pRsClient; + const struct NVOC_EXPORTED_METHOD_DEF *pEntry; + LOCK_ACCESS_TYPE access; + NvU32 releaseFlags = 0; + + portMemSet(&rmCtrlParams, 0, sizeof(RmCtrlParams)); + rmCtrlParams.hClient = pCliDeferredApi->Client; + rmCtrlParams.pGpu = pGpu; + rmCtrlParams.cmd = pDeferredApi->cmd; + rmCtrlParams.flags = 0; + rmCtrlParams.pParams = &pDeferredApi->api_bundle; + rmCtrlParams.paramsSize = paramSize; + rmCtrlParams.secInfo.privLevel = pCliDeferredApi->privLevel; + rmCtrlParams.secInfo.paramLocation = PARAM_LOCATION_KERNEL; + rmCtrlParams.pCookie = &rmCtrlExecuteCookie; + rmCtrlParams.pLockInfo = &lockInfo; + rmCtrlParams.bDeferredApi = NV_TRUE; + + lockInfo.flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_NO_CLIENT_LOCK; + + rmCtrlParams.flags |= NVOS54_FLAGS_LOCK_BYPASS; + + // In case of deferred API, the parameters are already copied + // from user space to kernel space when the deferred API is registered + // So the IRQL_RAISED flag is set to avoid to second copy of paramaters + if ((RMCFG_FEATURE_RM_BASIC_LOCK_MODEL && osIsRaisedIRQL()) || + hypervisorIsVgxHyper()) + { + rmCtrlParams.flags |= NVOS54_FLAGS_IRQL_RAISED; + } + + rmStatus = serverGetClientUnderLock(&g_resServ, pCliDeferredApi->Client, &pRsClient); + if (rmStatus != NV_OK) + { + goto cleanup; + } + + rmStatus = subdeviceGetByGpu(pRsClient, pGpu, &pSubdevice); + if (rmStatus != NV_OK) + { + goto cleanup; + } + + rmStatus = resControlLookup(staticCast(pSubdevice, RsResource), &rmCtrlParams, &pEntry); + if (rmStatus != NV_OK) + { + goto cleanup; + } + + NV_ASSERT(pEntry != NULL); + // Initialize the execution cookie + serverControl_InitCookie(pEntry, &rmCtrlExecuteCookie); + + rmStatus = serverControl_Prologue(&g_resServ, &rmCtrlParams, &access, &releaseFlags); + + if (rmStatus == NV_OK) + { + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pResourceRef = RES_GET_REF(pSubdevice); + callContext.pClient = pRsClient; + callContext.secInfo = rmCtrlParams.secInfo; + callContext.pServer = &g_resServ; + callContext.pControlParams = &rmCtrlParams; + callContext.pLockInfo = rmCtrlParams.pLockInfo; + resservSwapTlsCallContext(&pOldContext, &callContext); + + if (pEntry->paramSize == 0) + { + typedef NV_STATUS (*CONTROL_EXPORT_FNPTR_NO_PARAMS)(void*); + CONTROL_EXPORT_FNPTR_NO_PARAMS pFunc = ((CONTROL_EXPORT_FNPTR_NO_PARAMS) pEntry->pFunc); + rmStatus = pFunc((void*)pSubdevice); + } + else + { + typedef NV_STATUS (*CONTROL_EXPORT_FNPTR)(void*, void*); + CONTROL_EXPORT_FNPTR pFunc = ((CONTROL_EXPORT_FNPTR) pEntry->pFunc); + rmStatus = pFunc((void*)pSubdevice, rmCtrlParams.pParams); + } + + resservRestoreTlsCallContext(pOldContext); + } + + rmStatus = serverControl_Epilogue(&g_resServ, &rmCtrlParams, access, &releaseFlags, rmStatus); + } + +cleanup: + + pCliDeferredApi->Flags |= DEFERRED_API_INFO_FLAGS_HAS_EXECUTED; + + if (DRF_VAL(5080_CTRL, _CMD_DEFERRED_API, _FLAGS_DELETE, pDeferredApi->flags) == + NV5080_CTRL_CMD_DEFERRED_API_FLAGS_DELETE_IMPLICIT) + { + // delete implicitly, unless WAIT_FOR_TLB_FLUSH is also NV_TRUE + if (DRF_VAL(5080_CTRL, _CMD_DEFERRED_API, _FLAGS_WAIT_FOR_TLB_FLUSH, pDeferredApi->flags) == + NV5080_CTRL_CMD_DEFERRED_API_FLAGS_WAIT_FOR_TLB_FLUSH_TRUE) + { + pDeferredApiObject->NumWaitingOnTLBFlush++; + } + else + { + Class5080DelDeferredApi(pDeferredApiObject, Data); + } + } + } + + return rmStatus; +} + +static METHOD Nv50DeferredApi[] = +{ + { mthdNoOperation, 0x0100, 0x0103 }, + { _class5080DeferredApiV2, 0x0200, 0x0203 }, +}; + +NV_STATUS defapiGetSwMethods_IMPL +( + DeferredApiObject *pDeferredApi, + METHOD **ppMethods, + NvU32 *pNumMethods +) +{ + *ppMethods = Nv50DeferredApi; + *pNumMethods = NV_ARRAY_ELEMENTS32(Nv50DeferredApi); + return NV_OK; +} + +NvBool defapiIsSwMethodStalling_IMPL +( + DeferredApiObject *pDeferredApi, + NvU32 hDeferredApi +) +{ + PDEFERRED_API_INFO pCliDeferredApi = NULL; + NV5080_CTRL_DEFERRED_API_PARAMS * pDeferredApiParams; + + NV_STATUS rmStatus = Class5080GetDeferredApiInfo(pDeferredApi, + hDeferredApi, &pCliDeferredApi); + if (rmStatus == NV_OK) + { + pDeferredApiParams = pCliDeferredApi->pDeferredApiInfo; + + // Clear the PBDMA interrupt before executing the software method. + if (pDeferredApiParams->cmd == NV2080_CTRL_CMD_FIFO_UPDATE_CHANNEL_INFO) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + diff --git a/src/nvidia/src/kernel/gpu/device.c b/src/nvidia/src/kernel/gpu/device.c new file mode 100644 index 000000000..0eedc106e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/device.c @@ -0,0 +1,652 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This is a device resource implementation. +* +******************************************************************************/ + + + +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "gpu/device/device.h" + +#include "class/cl0080.h" +#include "core/locks.h" +#include "vgpu/rpc.h" +#include "mem_mgr/mem.h" + +#include "rmapi/rs_utils.h" +#include "nvsecurityinfo.h" + +#include "gpu/gr/kernel_sm_debugger_session.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "Nvcm.h" +#include "diagnostics/gpu_acct.h" +#include "gpu/perf/kern_cuda_limit.h" + +static NV_STATUS _deviceTeardown(Device *pDevice, CALL_CONTEXT *pCallContext); +static NV_STATUS _deviceTeardownRef(Device *pDevice, CALL_CONTEXT *pCallContext); + +NV_STATUS +deviceConstruct_IMPL +( + Device *pDevice, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV0080_ALLOC_PARAMETERS *pNv0080AllocParams = pParams->pAllocParams; + NvU32 deviceInst, flags, vaMode; + NvU32 deviceClass = pParams->externalClassId; + NvHandle hClientShare; + NvHandle hTargetClient = NV01_NULL_OBJECT; + NvHandle hTargetDevice = NV01_NULL_OBJECT; + NvU64 vaSize = 0; + NV_STATUS rmStatus = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJGPU *pGpu; + NvU64 vaStartInternal = 0; + NvU64 vaLimitInternal = 0; + NvU32 physicalAllocFlags; + + if (pNv0080AllocParams == NULL) + { + deviceInst = pParams->externalClassId - NV01_DEVICE_0; + hClientShare = NV01_NULL_OBJECT; + flags = 0; + vaSize = 0; + vaMode = 0; + } + else + { + deviceInst = pNv0080AllocParams->deviceId; + hClientShare = pNv0080AllocParams->hClientShare; + hTargetClient = pNv0080AllocParams->hTargetClient; + hTargetDevice = pNv0080AllocParams->hTargetDevice; + flags = pNv0080AllocParams->flags; + vaSize = pNv0080AllocParams->vaSpaceSize; + vaMode = pNv0080AllocParams->vaMode; + + // valid only if NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS is flagged. + if (flags & NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS) + { + vaStartInternal = pNv0080AllocParams->vaStartInternal; + vaLimitInternal = pNv0080AllocParams->vaLimitInternal; + + if ((vaLimitInternal < vaStartInternal) || (vaLimitInternal == 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + } + + // validate device instance + if (gpumgrIsDeviceInstanceValid(deviceInst) != NV_OK) + { + return NV_ERR_INVALID_CLASS; + } + + // Make sure this device has not been disabled + if (gpumgrIsDeviceEnabled(deviceInst) == NV_FALSE) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // add new device to client and set the device context + rmStatus = deviceInit(pDevice, pCallContext, pParams->hClient, pParams->hResource, deviceInst, + hClientShare, hTargetClient, hTargetDevice, vaSize, vaStartInternal, vaLimitInternal, + flags, vaMode); + if (rmStatus != NV_OK) + return rmStatus; + + pGpu = GPU_RES_GET_GPU(pDevice); + + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + { + if (!osIsGpuAccessible(pGpu)) + { + // Delete the device from the client since we should not be allocating it + _deviceTeardownRef(pDevice, pCallContext); + _deviceTeardown(pDevice, pCallContext); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + // + // Make sure this device is not in fullchip reset on OSes where it is + // restricted. + // + if (pOS->getProperty(pOS, PDB_PROP_OS_LIMIT_GPU_RESET) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET)) + { + // Delete the device from the client since we should not be allocating it + _deviceTeardownRef(pDevice, pCallContext); + _deviceTeardown(pDevice, pCallContext); + return NV_ERR_GPU_IN_FULLCHIP_RESET; + } + + { + // + // If using thwap to generate an allocation failure here, fail the alloc + // right away + // + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + if (pKernelRc != NULL && + !krcTestAllowAlloc(pGpu, pKernelRc, + NV_ROBUST_CHANNEL_ALLOCFAIL_DEVICE)) + { + _deviceTeardownRef(pDevice, pCallContext); + _deviceTeardown(pDevice, pCallContext); + return NV_ERR_GENERIC; + } + } + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + physicalAllocFlags = flags & ~(NV_DEVICE_ALLOCATION_FLAGS_PLUGIN_CONTEXT + | NV_DEVICE_ALLOCATION_FLAGS_HOST_VGPU_DEVICE); + + NV_RM_RPC_ALLOC_SHARE_DEVICE(pGpu, pParams->hParent, pParams->hResource, pDevice->hClientShare, + hTargetClient, hTargetDevice, deviceClass, physicalAllocFlags, vaSize, vaMode, rmStatus); + if (rmStatus != NV_OK) + { + return rmStatus; + } + } + + return rmStatus; +} // end of deviceConstruct_IMPL + +void +deviceDestruct_IMPL +( + Device *pDevice +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + NV_STATUS rmStatus = NV_OK; + NV_STATUS tmpStatus; + NvHandle hClient; + NODE *pNode; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + resGetFreeParams(staticCast(pDevice, RsResource), &pCallContext, &pParams); + + hClient = pCallContext->pClient->hClient; + + NV_PRINTF(LEVEL_INFO, " type: device\n"); + + LOCK_METER_DATA(FREE_DEVICE, 0, 0, 0); + + // Free all device memory + btreeEnumStart(0, &pNode, pDevice->DevMemoryTable); + while (pNode != NULL) + { + Memory *pMemory = pNode->Data; + btreeEnumNext(&pNode, pDevice->DevMemoryTable); + + tmpStatus = pRmApi->Free(pRmApi, hClient, RES_GET_HANDLE(pMemory)); + if ((tmpStatus != NV_OK) && (rmStatus == NV_OK)) + rmStatus = tmpStatus; + } + + // free the device + if (_deviceTeardownRef(pDevice, pCallContext) != NV_OK || + _deviceTeardown(pDevice, pCallContext) != NV_OK) + { + tmpStatus = NV_ERR_INVALID_OBJECT_HANDLE; + if (tmpStatus != NV_OK && rmStatus == NV_OK) + rmStatus = tmpStatus; + } + + // + // If the client was created, but never had any devices successfully + // attached, we'll get here. The client's device structure will have + // been created, but pGpu will be NULL if the device was later found + // to be non-existent + // + if (GPU_RES_GET_GPU(pDevice)) + { + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + // vGpu support + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NvHandle hDevice = pResourceRef->hResource; + + if (rmStatus == NV_OK) + { + NV_RM_RPC_FREE(pGpu, hClient, hClient, hDevice, rmStatus); + } + + if (rmStatus != NV_OK) + { + pParams->status = rmStatus; + return; + } + + NV_RM_RPC_FREE(pGpu, hClient, NV01_NULL_OBJECT, hClient, rmStatus); + } + } +} // end of deviceDestruct_IMPL + +NV_STATUS +deviceControl_IMPL +( + Device *pDevice, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + + // + // Some assertions to make RMCTRL to NVOC migration smooth + // Those will be removed at the end of ctrl0080.def migration + // + NV_ASSERT_OR_RETURN(pParams->hClient == RES_GET_CLIENT_HANDLE(pDevice), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pParams->hObject == RES_GET_HANDLE(pDevice), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pParams->hParent == RES_GET_PARENT_HANDLE(pDevice), NV_ERR_INVALID_STATE); + + pParams->pGpuGrp = GPU_RES_GET_GPUGRP(pDevice); + return gpuresControl_IMPL(staticCast(pDevice, GpuResource), + pCallContext, pParams); +} + +NV_STATUS +deviceInternalControlForward_IMPL +( + Device *pDevice, + NvU32 command, + void *pParams, + NvU32 size +) +{ + return gpuresInternalControlForward_IMPL(staticCast(pDevice, GpuResource), command, pParams, size); +} + +// +// add a device with specified handle, instance num, within a specified client +// (hClientShare also specified) +// +NV_STATUS +deviceInit_IMPL +( + Device *pDevice, + CALL_CONTEXT *pCallContext, + NvHandle hClient, + NvHandle hDevice, + NvU32 deviceInst, + NvHandle hClientShare, + NvHandle hTargetClient, + NvHandle hTargetDevice, + NvU64 vaSize, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 allocFlags, + NvU32 vaMode +) +{ + OBJGPU *pGpu; + NV_STATUS status; + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + Device *pExistingDevice; + NvU32 gpuInst; + + if (deviceInst >= NV_MAX_DEVICES) + return NV_ERR_INVALID_ARGUMENT; + + // Check if device inst already allocated, fail if this call succeeds + status = deviceGetByInstance(pCallContext->pClient, deviceInst, &pExistingDevice); + if (status == NV_OK) + { + // + // RS-TODO: Status code should be NV_ERR_STATE_IN_USE, however keeping + // existing code from CliAllocElement (for now) + // + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + // Look up GPU and GPU Group + gpuInst = gpumgrGetPrimaryForDevice(deviceInst); + + if ((pGpu = gpumgrGetGpu(gpuInst)) == NULL) + { + return NV_ERR_INVALID_STATE; + } + + pDevice->hTargetClient = hTargetClient; + pDevice->hTargetDevice = hTargetDevice; + pDevice->pHostVgpuDevice = NULL; + pDevice->pKernelHostVgpuDevice = NULL; + + pDevice->deviceInst = deviceInst; + + // Update VA Mode + pDevice->vaMode = vaMode; + + gpuresSetGpu(pGpuResource, pGpu, NV_TRUE); + + status = deviceSetClientShare(pDevice, hClientShare, vaSize, + vaStartInternal, vaLimitInternal, allocFlags); + if (NV_OK != status) + goto done; + + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(pSys); + RsClient *pRsClient = pCallContext->pClient; + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON)) + { + // Try to start accounting for this procId/SubProcessId. + // If gpuacctStartGpuAccounting() fails, just assert and print error. + // gpuacctStartGpuAccounting() is not a major failure, we will continue with deviceInit() as normal. + if ((pRsClient->type == CLIENT_TYPE_USER) && (gpuacctStartGpuAccounting(pGpuAcct, + pGpu->gpuInstance, pClient->ProcID, pClient->SubProcessID) != NV_OK)) + { + NV_ASSERT(0); + NV_PRINTF(LEVEL_ERROR, + "gpuacctStartGpuAccounting() failed for procId : %d and SubProcessID : " + "%d. Ignoring the failure and continuing.\n", + pClient->ProcID, pClient->SubProcessID); + } + } + } + +done: + if (status != NV_OK) + { + deviceRemoveFromClientShare(pDevice); + } + + return status; +} // end of deviceInit_IMPL() + +// +// delete a device with a specified handle within a client +// +static NV_STATUS +_deviceTeardown +( + Device *pDevice, + CALL_CONTEXT *pCallContext +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + PORT_UNREFERENCED_VARIABLE(pGpu); + + deviceRemoveFromClientShare(pDevice); + + // DM-TODO: Force the client to move to Unicast... + NV_STATUS status = deviceKPerfCudaLimitCliDisable(pDevice, pGpu); + + // Adding status check here, but not returning it as we do not want to + // introduce any change in functionality. + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR,"Disable of Cuda limit activation failed"); + DBG_BREAKPOINT(); + } + + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(pSys); + RsClient *pRsClient = pCallContext->pClient; + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + + if ((pRsClient->type == CLIENT_TYPE_USER) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON)) + { + gpuacctStopGpuAccounting(pGpuAcct, + pGpu->gpuInstance, pClient->ProcID, pClient->SubProcessID); + } + } + + return NV_OK; +} + +static NV_STATUS _deviceTeardownRef +( + Device *pDevice, + CALL_CONTEXT *pCallContext +) +{ + + return NV_OK; +} + +NV_STATUS +deviceGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDevice, + Device **ppDevice +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppDevice = NULL; + + status = clientGetResourceRef(pClient, hDevice, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDevice = dynamicCast(pResourceRef->pResource, Device); + + return (*ppDevice) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +deviceGetByInstance_IMPL +( + RsClient *pClient, + NvU32 deviceInstance, + Device **ppDevice +) +{ + RS_ITERATOR it; + Device *pDevice; + + *ppDevice = NULL; + + it = clientRefIter(pClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(it.pClient, &it)) + { + pDevice = dynamicCast(it.pResourceRef->pResource, Device); + + if ((pDevice != NULL) && (deviceInstance == pDevice->deviceInst)) + { + *ppDevice = pDevice; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +deviceGetByGpu_IMPL +( + RsClient *pClient, + OBJGPU *pGpu, + NvBool bAnyInGroup, + Device **ppDevice +) +{ + NvU32 deviceInstance = gpuGetDeviceInstance(pGpu); + NV_STATUS status; + + status = deviceGetByInstance(pClient, deviceInstance, ppDevice); + if (status != NV_OK) + return status; + + // If pGpu is not the primary GPU return failure + if (!bAnyInGroup && pGpu != GPU_RES_GET_GPU(*ppDevice)) + { + *ppDevice = NULL; + return NV_ERR_OBJECT_NOT_FOUND; + } + + return NV_OK; +} + +// **************************************************************************** +// Deprecated Functions +// **************************************************************************** + +/** + * WARNING: This function is deprecated! Please use deviceGetByHandle. + */ +Device * +CliGetDeviceInfo +( + NvHandle hClient, + NvHandle hDevice +) +{ + RsClient *pClient; + NV_STATUS status; + Device *pDevice; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NULL; + + status = deviceGetByHandle(pClient, hDevice, &pDevice); + + return (status == NV_OK) ? pDevice : NULL; +} + +/** + * WARNING: This function is deprecated and use is *strongly* discouraged + * (especially for new code!) + * + * From the function name (CliSetGpuContext) it appears as a simple accessor but + * violates expectations by modifying the SLI BC threadstate (calls to + * GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully managed + * by the caller. + * + * Instead of using this routine, please use deviceGetByHandle then call + * GPU_RES_GET_GPU, GPU_RES_GET_GPUGRP, GPU_RES_SET_THREAD_BC_STATE as needed. + * + * Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice, + * pSubdevice, the base pResource type, and any resource that inherits from + * GpuResource. That is, instead of using CliSetGpuContext or + * CliSetSubDeviceContext, please use following pattern to look up the pGpu: + * + * OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource) + * + * To set the threadstate, please use: + * + * GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource); + */ +NV_STATUS +CliSetGpuContext +( + NvHandle hClient, + NvHandle hDevice, + OBJGPU **ppGpu, + OBJGPUGRP **ppGpuGrp +) +{ + Device *pDevice; + RsClient *pClient; + NV_STATUS status; + + if (ppGpuGrp != NULL) + *ppGpuGrp = NULL; + + if (ppGpu != NULL) + *ppGpu = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return status; + + status = deviceGetByHandle(pClient, hDevice, &pDevice); + if (status != NV_OK) + return status; + + if (ppGpu != NULL) + *ppGpu = GPU_RES_GET_GPU(pDevice); + + if (ppGpuGrp != NULL) + *ppGpuGrp = GPU_RES_GET_GPUGRP(pDevice); + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + return NV_OK; +} + +/** + * WARNING: This function is deprecated! Please use gpuGetByRef() + */ +POBJGPU +CliGetGpuFromContext +( + RsResourceRef *pContextRef, + NvBool *pbBroadcast +) +{ + NV_STATUS status; + OBJGPU *pGpu; + + status = gpuGetByRef(pContextRef, pbBroadcast, &pGpu); + + return (status == NV_OK) ? pGpu : NULL; +} + +/** + * WARNING: This function is deprecated! Please use gpuGetByHandle() + */ +POBJGPU +CliGetGpuFromHandle +( + NvHandle hClient, + NvHandle hResource, + NvBool *pbBroadcast +) +{ + RsClient *pClient; + NV_STATUS status; + OBJGPU *pGpu; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NULL; + + status = gpuGetByHandle(pClient, hResource, pbBroadcast, &pGpu); + + return (status == NV_OK) ? pGpu : NULL; +} diff --git a/src/nvidia/src/kernel/gpu/device_ctrl.c b/src/nvidia/src/kernel/gpu/device_ctrl.c new file mode 100644 index 000000000..0dc6915ad --- /dev/null +++ b/src/nvidia/src/kernel/gpu/device_ctrl.c @@ -0,0 +1,370 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * device (NV01_DEVICE_0) class. Device-level control calls + * are broadcasted to all GPUs within the device. + */ + +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "core/system.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "kernel/gpu/rc/kernel_rc.h" + + + +// +// This rmctrl MUST NOT touch hw since it's tagged as NO_GPUS_ACCESS in ctrl0080.def +// RM allow this type of rmctrl to go through when GPU is not available. +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdGpuGetClasslist_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS *pClassListParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpuGetClassList(pGpu, &pClassListParams->numClasses, + NvP64_VALUE(pClassListParams->classList), ENG_INVALID); +} + +// +// This rmctrl MUST NOT touch hw since it's tagged with flag NO_GPUS_ACCESS in device.h +// RM allow this type of rmctrl to go through when GPU is not available. +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdGpuGetClasslistV2_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS *pClassListParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pClassListParams->numClasses = NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE; + + return gpuGetClassList(pGpu, &pClassListParams->numClasses, + pClassListParams->classList, ENG_INVALID); +} + +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdGpuGetNumSubdevices_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS *pSubDeviceCountParams +) +{ + pSubDeviceCountParams->numSubDevices = 1; + + return NV_OK; +} + +NV_STATUS +deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 gpuMask, index; + NvBool bEnable; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + OBJGPU *pTmpGpu; + + if (NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED == + pParams->newState) + { + bEnable = NV_TRUE; + } + else if (NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED == + pParams->newState) + { + bEnable = NV_FALSE; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Get the gpuMask for the device pGpu belongs to + gpuMask = gpumgrGetGpuMask(pGpu); + + index = 0; + while ((pTmpGpu = gpumgrGetNextGpu(gpuMask, &index)) != NULL) + { + if (bEnable) + { + pGpuMgr->persistentSwStateGpuMask |= NVBIT(pTmpGpu->gpuInstance); + pTmpGpu->setProperty(pTmpGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE, + NV_TRUE); + } + else + { + pGpuMgr->persistentSwStateGpuMask &= ~NVBIT(pTmpGpu->gpuInstance); + pTmpGpu->setProperty(pTmpGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE, + NV_FALSE); + } + + // Set/Clear OS-specific persistence flags + osModifyGpuSwStatePersistence(pTmpGpu->pOsGpuInfo, bEnable); + } + + return NV_OK; +} + +NV_STATUS +deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PERSISTENT_SW_STATE)) + { + pParams->swStatePersistence = + NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_ENABLED; + } + else + { + pParams->swStatePersistence = + NV0080_CTRL_GPU_SW_STATE_PERSISTENCE_DISABLED; + } + + return NV_OK; +} + +/*! + * @brief This Command is used to get the virtualization mode of GPU. GPU + * can be in NMOS/VGX/host-vGPU/host-vSGA mode. + * + * @return Returns NV_STATUS + * NV_OK If GPU is present. + * NV_ERR_INVALID_ARGUMENT If GPU is not present. + */ +NV_STATUS +deviceCtrlCmdGpuGetVirtualizationMode_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (IS_VIRTUAL(pGpu)) + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_VGX; + } + else if (IS_PASSTHRU(pGpu)) + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NMOS; + } + else + { + pParams->virtualizationMode = + NV0080_CTRL_GPU_VIRTUALIZATION_MODE_NONE; + } + + NV_PRINTF(LEVEL_INFO, "Virtualization Mode: %x\n", + pParams->virtualizationMode); + + return NV_OK; +} + +/*! + * @brief This command is used to find a subdevice handle by subdeviceinst + */ +NV_STATUS +deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM *pParams +) +{ + NV_STATUS status; + Subdevice *pSubdevice; + + status = subdeviceGetByInstance(RES_GET_CLIENT(pDevice), + RES_GET_HANDLE(pDevice), + pParams->subDeviceInst, + &pSubdevice); + + if (status == NV_OK) + { + pParams->hSubDevice = RES_GET_HANDLE(pSubdevice); + } + + return status; +} + +/*! + * @brief Get the GPU's sparse texture compute mode setting information. + * + * This setting indicates how the RM should set the large page size for the + * GPU, based on which use case it should optimize for. + * + * @param[in, out] pModeParams Pointer to struct of user params. + * defaultSetting: The default use case to optimize for on this + * GPU. + * currentSetting: The use case that the large page size was + * optimized for on this GPU, on the last driver + * load. + * pendingSetting: The use case that the large page size will + * be optimized for on this GPU, on the next + * driver reload. + * + * @returns NV_STATUS + * NV_OK Success + */ +NV_STATUS +deviceCtrlCmdGpuGetSparseTextureComputeMode_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS *pModeParams +) +{ + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + status = gpuGetSparseTextureComputeMode(pGpu, + &pModeParams->defaultSetting, + &pModeParams->currentSetting, + &pModeParams->pendingSetting); + + return status; +} + +/*! + * @brief Set the GPU's sparse texture compute mode setting to apply on the + * next driver load. + * + * This setting indicates how the RM should set the large page size for the + * GPU, based on which use case it should optimize for. + * + * @param[in, out] pModeParams Pointer to struct of user params. + * setting: The use case that the large page size should + * be optimized for on this GPU, on the next + * driver reload. + * + * @returns NV_STATUS + * NV_ERR_INVALID_ARGUMENT The specified setting is invalid + * NV_OK Success + */ +NV_STATUS +deviceCtrlCmdGpuSetSparseTextureComputeMode_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_SET_SPARSE_TEXTURE_COMPUTE_MODE_PARAMS *pModeParams +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // + // In SLI, both GPUs will have the same setting for sparse texture/compute + // mode. Individual toggling is not allowed. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + status = gpuSetSparseTextureComputeMode(pGpu, pModeParams->setting); + } + SLI_LOOP_END + + return status; +} + +/*! + * @brief Get the GPU's VGX capability depending upon state of VGX hardware fuse. + * + * @returns NV_STATUS + * NV_OK Success + */ +NV_STATUS +deviceCtrlCmdGpuGetVgxCaps_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_GET_VGX_CAPS_PARAMS *pParams +) +{ + pParams->isVgx = NV_FALSE; + + return NV_OK; +} + +/* + * @brief Request per-VF BAR1 resizing and, subsequently, the number + * of VFs that can be created. The request will take a per-VF + * BAR1 size in MB and calculate the number of possible VFs + * + * @param[in] pParams NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS + * pointer detailing the per-VF BAR1 size and + * number of VFs + */ + +NV_STATUS +deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL +( + Device *pDevice, + NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + return gpuSetVFBarSizes_HAL(pGpu, pParams); +} diff --git a/src/nvidia/src/kernel/gpu/device_share.c b/src/nvidia/src/kernel/gpu/device_share.c new file mode 100644 index 000000000..9b09f4caa --- /dev/null +++ b/src/nvidia/src/kernel/gpu/device_share.c @@ -0,0 +1,391 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "mem_mgr/vaspace.h" +#include "core/system.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu_mgr/gpu_group.h" +#include "class/cl00f2.h" // IO_VASPACE_A +#include "gpu/mem_mgr/vaspace_api.h" +#include "rmapi/rs_utils.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "gpu/mem_mgr/virt_mem_allocator.h" + +#include "gpu_mgr/gpu_mgr.h" + +#include "gpu/mmu/kern_gmmu.h" + +/*! + * @brief Save client share allocation information for this device + * + * Save client share allocation information for this device. The + * client share is actually allocated as a result of CliGetVASpace() + * before the VAShare is actually used. + * + * @param[in] pDevice + * @param[in] hClientShare RM client specified share handle + * @param[in] deviceAllocFlags Allocation flags from RM client + * + * @returns NV_STATUS + */ +NV_STATUS +deviceSetClientShare_IMPL +( + Device *pDevice, + NvHandle hClientShare, + NvU64 vaSize, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 deviceAllocFlags +) +{ + pDevice->pVASpace = NULL; + pDevice->hClientShare = hClientShare; + pDevice->deviceAllocFlags = deviceAllocFlags; + pDevice->deviceInternalAllocFlags = 0; + pDevice->vaSize = vaSize; + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS) + { + pDevice->vaStartInternal = vaStartInternal; + pDevice->vaLimitInternal = vaLimitInternal; + } + + if ((deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SIZE) && (vaSize == 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +/*! + * @brief Initialize the device VASPACE + */ +static NV_STATUS +deviceInitClientShare +( + Device *pDevice, + NvHandle hClientShare, + NvU64 vaSize, + NvU32 deviceAllocFlags, + NvU32 deviceAllocInternalFlags +) +{ + Device *pShareDevice; + RsClient *pClientShare; + OBJVASPACE *pVAS = NULL; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvU32 gpuMask = gpumgrGetGpuMask(pGpu); + NvU32 vaspaceClass = 0; + + pDevice->pVASpace = NULL; + + // Set broadcast state for thread + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + // + // Share "default" behavior is defined by "share w/null", which + // attaches to the global address space. + // + if (hClientShare == NV01_NULL_OBJECT) + { + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + status = gpugrpGetGlobalVASpace(pGpuGrp, &pVAS); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + vaspaceIncRefCnt(pVAS); + status = NV_OK; + } + + // + // "Force a new share" behavior is defined by "share w/myself" + // + else if (hClientShare == RES_GET_CLIENT_HANDLE(pDevice)) + { + NvU32 flags = VASPACE_FLAGS_DEFAULT_PARAMS; + NvU64 vaLimit; + + flags |= (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SHARED_MANAGEMENT) ? + VASPACE_FLAGS_SHARED_MANAGEMENT : 0; + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE) + { + flags |= VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE; + } + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS) + { + flags |= VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS; + } + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_SIZE) + { + vaLimit = pDevice->vaSize - 1; + } + else + { + flags |= VASPACE_FLAGS_DEFAULT_SIZE; // only needed for Tesla + vaLimit = 0; + } + + if ( (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_64k) && + (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_128k) ) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_64k) + { + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + } + else if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_BIG_PAGE_SIZE_128k) + { + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + } + else + { + // will cause it to use the default size + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + } + + if (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_RESTRICT_RESERVED_VALIMITS) + { + flags |= VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS; + NV_ASSERT(pDevice->vaStartInternal); + NV_ASSERT(pDevice->vaLimitInternal); + } + else + { + NV_ASSERT(!pDevice->vaStartInternal); + NV_ASSERT(!pDevice->vaLimitInternal); + } + + // + // NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_MIRRORED will be removed once CUDA phases out + // and uses the ctrl call NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE + // to set privileged address space + // + if ((deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_IS_MIRRORED) + || (deviceAllocInternalFlags & NV_DEVICE_INTERNAL_ALLOCATION_FLAGS_ENABLE_PRIVILEGED_VASPACE) + ) + { + flags |= VASPACE_FLAGS_SET_MIRRORED; + } + if (NULL != GPU_GET_KERNEL_GMMU(pGpu)) + vaspaceClass = kgmmuGetVaspaceClass_HAL(GPU_GET_KERNEL_GMMU(pGpu)); + if (NULL == GPU_GET_KERNEL_GMMU(pGpu) && (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY) || IsDFPGA(pGpu))) + vaspaceClass = IO_VASPACE_A; + else if (vaspaceClass == 0) + { + NV_ASSERT(0); + return NV_ERR_OBJECT_NOT_FOUND; + } + + flags |= VASPACE_FLAGS_ENABLE_VMM; + + // + // Page tables are allocated in guest subheap only inside non SRIOV guests + // and on host RM. + // + if (!gpuIsSplitVasManagementServerClientRmEnabled(pGpu) || + !IS_VIRTUAL(pGpu)) + { + flags |= VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR; + } + + // + // XXX NV_DEVICE_ALLOCATION_FLAGS_VASPACE_PTABLE_PMA_MANAGED should not + // be exposed to clients. It should be the default RM behavior. + // + // Until it is made the default, certain clients such as OpenGL + // might still need PTABLE allocations to go through PMA, so this + // flag has been temporary exposed. + // + // See bug 1880192 + // + // Note: Some clients (including scrubber) depend on page tables not + // being PMA managed, so if this is made the default then an opt-out + // flag should still be exposed, or some other solution implemented. + // See bug 2844476 + // + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + if (memmgrIsPmaInitialized(pMemoryManager) && + memmgrAreClientPageTablesPmaManaged(pMemoryManager) && + (deviceAllocFlags & NV_DEVICE_ALLOCATION_FLAGS_VASPACE_PTABLE_PMA_MANAGED)) + { + flags |= VASPACE_FLAGS_PTETABLE_PMA_MANAGED; + } + status = vmmCreateVaspace(pVmm, vaspaceClass, 0, gpuMask, 0, + vaLimit, pDevice->vaStartInternal, + pDevice->vaLimitInternal, NULL, flags, &pVAS); + if (NV_OK != status) + { + NV_ASSERT(0); + return status; + } + } + + // + // Try to attach to another clients VA Share. Validate client and pull the + // share information off the first device. + // + else + { + status = serverGetClientUnderLock(&g_resServ, hClientShare, &pClientShare); + if (status != NV_OK) + return status; + + // + // If the share client doesn't have a device allocated for this GPU, + // there's no address space to share. + // + status = deviceGetByInstance(pClientShare, pDevice->deviceInst, &pShareDevice); + if (status != NV_OK) + return status; + + // Init target share if needed + if (pShareDevice->pVASpace == NULL) + { + status = deviceInitClientShare(pShareDevice, + pShareDevice->hClientShare, + pShareDevice->vaSize, + pShareDevice->deviceAllocFlags, + pShareDevice->deviceInternalAllocFlags); + if (status != NV_OK) + return status; + } + + pVAS = pShareDevice->pVASpace; + vaspaceIncRefCnt(pVAS); + } + + pDevice->pVASpace = pVAS; + return status; +} + + +/*! + * @brief Detach this pDevice from the share group + */ +void +deviceRemoveFromClientShare_IMPL +( + Device *pDevice +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + + if (pDevice->pVASpace != NULL) + { + vmmDestroyVaspace(pVmm, pDevice->pVASpace); + pDevice->pVASpace = NULL; + } +} + +NV_STATUS +deviceGetDefaultVASpace_IMPL +( + Device *pDevice, + OBJVASPACE **ppVAS +) +{ + NV_STATUS status = NV_OK; + + // + // There are some cases in SLI transitions where we allocate + // a device before the hal is initialized. + // + if (pDevice->pVASpace == NULL) + { + status = deviceInitClientShare(pDevice, + pDevice->hClientShare, + pDevice->vaSize, + pDevice->deviceAllocFlags, + pDevice->deviceInternalAllocFlags); + } + + *ppVAS = pDevice->pVASpace; + + return status; +} + +/*! + * @brief Associate the given address space object as the default VASpace + * + * This function will associate the given address space object as the + * default vaspace of the parent device. + * + * @param[in] hClient RM client + * @param[in] hDevice RM device under this client + * @param[in] hVASpace VASpace object handle that is under this device + * + * @returns NV_STATUS or NV_ERR_INVALID_OBJECT_HANDLE + */ +NV_STATUS +deviceSetDefaultVASpace_IMPL +( + Device *pDevice, + NvHandle hVASpace +) +{ + NV_STATUS status = NV_OK; + VaSpaceApi *pVaSpaceApi = NULL; + RsResourceRef *pResourceRef; + + if (hVASpace == NV01_NULL_OBJECT) + return NV_ERR_INVALID_ARGUMENT; + + status = serverutilGetResourceRefWithParent(RES_GET_CLIENT_HANDLE(pDevice), + RES_GET_HANDLE(pDevice), + hVASpace, + classId(VaSpaceApi), + &pResourceRef); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid object handle 0x%x pEntry %p\n", + hVASpace, pVaSpaceApi); + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + pVaSpaceApi = dynamicCast(pResourceRef->pResource, VaSpaceApi); + + if (pDevice->pVASpace != NULL) + { + NV_PRINTF(LEVEL_ERROR, "device already has an Associated VASPace\n"); + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + // associate the vaspace as default + pDevice->pVASpace = pVaSpaceApi->pVASpace; + vaspaceIncRefCnt(pVaSpaceApi->pVASpace); + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c b/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c new file mode 100644 index 000000000..cd935a2c7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/arch/v03/kern_disp_0300.c @@ -0,0 +1,272 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" + +#include "disp/v03_00/dev_disp.h" + +#include "class/clc371.h" +#include "class/clc373.h" + +NV_STATUS +kdispGetChannelNum_v03_00 +( + KernelDisplay *pKernelDisplay, + DISPCHNCLASS channelClass, + NvU32 channelInstance, + NvU32 *pChannelNum +) +{ + NV_STATUS status = NV_ERR_INVALID_ARGUMENT; + + if (pChannelNum == NULL) + return NV_ERR_INVALID_ARGUMENT; + + const KernelDisplayStaticInfo *pStaticInfo = pKernelDisplay->pStaticInfo; + NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_ERR_INVALID_STATE); + + switch (channelClass) + { + case dispChnClass_Curs: + if ((channelInstance < NV_PDISP_CHN_NUM_CURS__SIZE_1) && + (channelInstance < NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS__SIZE_1)) + { + if (FLD_IDX_TEST_DRF(_PDISP, _FE_HW_SYS_CAP, _HEAD_EXISTS, channelInstance, _YES, pStaticInfo->feHwSysCap)) + { + *pChannelNum = NV_PDISP_CHN_NUM_CURS(channelInstance); + status = NV_OK; + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + } + break; + + case dispChnClass_Winim: + if (channelInstance < NV_PDISP_CHN_NUM_WINIM__SIZE_1) + { + if (pStaticInfo->windowPresentMask & NVBIT32(channelInstance)) + { + *pChannelNum = NV_PDISP_CHN_NUM_WINIM(channelInstance); + status = NV_OK; + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + } + break; + + case dispChnClass_Core: + *pChannelNum = NV_PDISP_CHN_NUM_CORE; + status = NV_OK; + break; + + case dispChnClass_Win: + if (channelInstance < NV_PDISP_CHN_NUM_WIN__SIZE_1) + { + if (pStaticInfo->windowPresentMask & NVBIT32(channelInstance)) + { + *pChannelNum = NV_PDISP_CHN_NUM_WIN(channelInstance); + status = NV_OK; + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + } + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Unknown channel class %x\n", channelClass); + status = NV_ERR_INVALID_CHANNEL; + DBG_BREAKPOINT(); + break; + } + + return status; +} + +/*! + * @brief Get the register base address for display capabilities registers + * + * @param pGpu + * @param pKernelDisplay + * @param[out] pOffset NvU32 pointer to return base offset + * @param[out] pSize NvU32 pointer to return size + */ +void +kdispGetDisplayCapsBaseAndSize_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + { + // Tegra offsets needs to be subtracted with -0x610000. + *pOffset = DRF_BASE(NV_PDISP_FE_SW) + + kdispGetBaseOffset_HAL(pGpu, pKernelDisplay); + } + + if (pSize) + { + *pSize = sizeof(NvC373DispCapabilities_Map); + } +} + +/*! + * @brief Get the register base address for SF user space. + * + * @param pGpu + * @param pKernelDisplay + * @param[out] pOffset NvU32 pointer to return base offset + * @param[out] pSize NvU32 pointer to return size + */ +void +kdispGetDisplaySfUserBaseAndSize_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + { + // Tegra offsets needs to be subtracted with -0x610000. + *pOffset = DRF_BASE(NV_PDISP_SF_USER_0) + + kdispGetBaseOffset_HAL(pGpu, pKernelDisplay); + } + + if (pSize) + { + *pSize = sizeof(NvC371DispSfUserMap); + } +} + +/*! + * @brief Get the register base address and size of channel user area + * + * @param pGpu + * @param pKernelDisplay + * @param[in] channelClass Class of the channel + * @param[in] channelInstance Channel instance # + * @param[out] pOffset User space bease address + * @param[out] pSize User space length (optional) + * + * @return NV_STATUS + */ +NV_STATUS +kdispGetDisplayChannelUserBaseAndSize_v03_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + DISPCHNCLASS channelClass, + NvU32 channelInstance, + NvU32 *pOffset, + NvU32 *pSize +) +{ + NvU32 dispChannelNum; + NV_STATUS status; + + if (pOffset == NULL) + return NV_ERR_INVALID_ARGUMENT; + + status = kdispGetChannelNum_HAL(pKernelDisplay, channelClass, channelInstance, &dispChannelNum); + if (status != NV_OK) + return status; + + NV_ASSERT(dispChannelNum < NV_UDISP_FE_CHN_ASSY_BASEADR__SIZE_1); + + *pOffset = NV_UDISP_FE_CHN_ASSY_BASEADR(dispChannelNum); + + // + // The user are size for Core Channel is 64KB (32K for Armed and 32k for Assembly), + // and all other channels are 4KB (2K for Armed and 2k for Assembly). + // + if (pSize != NULL) + { + switch (channelClass) + { + case dispChnClass_Curs: + *pSize = NV_UDISP_FE_CHN_ASSY_BASEADR_CURS(dispChannelNum + 1) - NV_UDISP_FE_CHN_ASSY_BASEADR_CURS(dispChannelNum); + break; + + case dispChnClass_Winim: + *pSize = NV_UDISP_FE_CHN_ASSY_BASEADR_WINIM(dispChannelNum + 1) - NV_UDISP_FE_CHN_ASSY_BASEADR_WINIM(dispChannelNum); + break; + + case dispChnClass_Core: + *pSize = (NV_UDISP_FE_CHN_ARMED_BASEADR_CORE - NV_UDISP_FE_CHN_ASSY_BASEADR_CORE) * 2; + break; + + case dispChnClass_Win: + *pSize = NV_UDISP_FE_CHN_ASSY_BASEADR_WIN(dispChannelNum + 1) - NV_UDISP_FE_CHN_ASSY_BASEADR_WIN(dispChannelNum); + break; + + default: + break; + } + } + + return NV_OK; +} + +/*! + * @brief Validate selected sw class. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelDisplay KernelDisplay object pointer + * @param[in] swClass Selected class name + */ +NV_STATUS +kdispSelectClass_v03_00_KERNEL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 swClass +) +{ + if (!gpuIsClassSupported(pGpu, swClass)) + { + NV_PRINTF(LEVEL_ERROR, "class %x not supported\n", swClass); + return NV_ERR_INVALID_CLASS; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0400.c b/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0400.c new file mode 100644 index 000000000..97ccba272 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/arch/v04/kern_disp_0400.c @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" + +#include "published/disp/v04_00/dev_disp.h" + +/*! + * @brief Get the VGA workspace base address, if valid. + * + */ +NvBool +kdispGetVgaWorkspaceBase_v04_00 +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU64 *pOffset +) +{ + NvU32 vgaReg = GPU_REG_RD32(pGpu, NV_PDISP_VGA_WORKSPACE_BASE); + + if (FLD_TEST_DRF(_PDISP, _VGA_WORKSPACE_BASE, _STATUS, _VALID, vgaReg)) + { + *pOffset = GPU_DRF_VAL(_PDISP, _VGA_WORKSPACE_BASE, _ADDR, vgaReg) << 16; + return NV_TRUE; + } + + return NV_FALSE; +} diff --git a/src/nvidia/src/kernel/gpu/disp/arch/v04/kernel_head_gpu.c b/src/nvidia/src/kernel/gpu/disp/arch/v04/kernel_head_gpu.c new file mode 100644 index 000000000..5010082ef --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/arch/v04/kernel_head_gpu.c @@ -0,0 +1,63 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/disp/head/kernel_head.h" +#include "published/disp/v04_00/dev_disp.h" + +void kheadResetPendingVblank_v04_00_KERNEL(OBJGPU *pGpu, KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState) +{ + NvU32 writeIntr = 0; + + writeIntr = DRF_DEF(_PDISP, _FE_EVT_STAT_HEAD_TIMING, _LAST_DATA, _RESET); + + GPU_REG_WR32(pGpu, NV_PDISP_FE_EVT_STAT_HEAD_TIMING(pKernelHead->PublicId), + writeIntr); +} + +void kheadResetPendingVblankForKernel_v04_00_KERNEL(OBJGPU *pGpu, KernelHead *pKernelHead, THREAD_STATE_NODE *pThreadState) +{ + kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState); +} + +NvU32 kheadReadPendingVblank_v04_00_KERNEL(OBJGPU *pGpu, KernelHead *pKernelHead, NvU32 headIntrMask) +{ + + NvU32 intr = GPU_REG_RD32(pGpu, NV_PDISP_FE_RM_INTR_DISPATCH); + + if (!FLD_IDX_TEST_DRF(_PDISP, _FE_RM_INTR_DISPATCH, _HEAD_TIMING, pKernelHead->PublicId, _PENDING, intr)) + return headIntrMask; + + intr = GPU_REG_RD32(pGpu, NV_PDISP_FE_EVT_STAT_HEAD_TIMING(pKernelHead->PublicId)); + + if (FLD_TEST_DRF(_PDISP, _FE_EVT_STAT_HEAD_TIMING, _LAST_DATA, _PENDING, intr)) + { + headIntrMask |= headIntr_vblank; + } + if (headIntrMask & headIntr_vblank) + { + return NVBIT(pKernelHead->PublicId); + } + + return headIntr_none; +} + diff --git a/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c b/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c new file mode 100644 index 000000000..41316cff6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_capabilities.c @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispCapabilities class. +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" + +#include "gpu/gpu.h" +#include "gpu/disp/disp_capabilities.h" +#include "gpu/disp/kern_disp.h" + +NV_STATUS +dispcapConstruct_IMPL +( + DispCapabilities *pDispCapabilities, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispCapabilities); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + // Set display caps RegBase offsets + kdispGetDisplayCapsBaseAndSize_HAL(pGpu, pKernelDisplay, + &pDispCapabilities->ControlOffset, + &pDispCapabilities->ControlLength); + + return NV_OK; +} + +NV_STATUS +dispcapGetRegBaseOffsetAndSize_IMPL +( + DispCapabilities *pDispCapabilities, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + { + *pOffset = pDispCapabilities->ControlOffset; + } + if (pSize) + { + *pSize = pDispCapabilities->ControlLength; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/disp_channel.c b/src/nvidia/src/kernel/gpu/disp/disp_channel.c new file mode 100644 index 000000000..b2c1c8de3 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_channel.c @@ -0,0 +1,722 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispChannel and its derived classes. +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" + +#include "gpu/device/device.h" +#include "gpu/gpu_resource.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "vgpu/rpc.h" + +static void +dispchnParseAllocParams +( + DispChannel *pDispChannel, + void *pAllocParams, + NvU32 *pChannelInstance, + NvHandle *pHObjectBuffer, + NvU32 *pInitialGetPutOffset, + NvBool *pAllowGrabWithinSameClient, + NvBool *pConnectPbAtGrab +) +{ + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = NULL; + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = NULL; + + *pAllowGrabWithinSameClient = NV_FALSE; + *pConnectPbAtGrab = NV_FALSE; + + if (pDispChannel->bIsDma) + { + pDmaChannelAllocParams = pAllocParams; + *pChannelInstance = pDmaChannelAllocParams->channelInstance; + *pHObjectBuffer = pDmaChannelAllocParams->hObjectBuffer; + *pInitialGetPutOffset = pDmaChannelAllocParams->offset; + + if (FLD_TEST_DRF(50VAIO_CHANNELDMA_ALLOCATION, _FLAGS, + _CONNECT_PB_AT_GRAB, _YES, + pDmaChannelAllocParams->flags)) + { + *pConnectPbAtGrab = NV_TRUE; + } + + if (pDmaChannelAllocParams->hObjectNotify != 0) + { + NV_PRINTF(LEVEL_WARNING, "Error notifier parameter is not used in Display channel allocation.\n"); + } + } + else + { + pPioChannelAllocParams = pAllocParams; + *pChannelInstance = pPioChannelAllocParams->channelInstance; + *pHObjectBuffer = 0; // No one should look at this. So, 0 should be fine. + *pInitialGetPutOffset = 0; // No one should look at this. So, 0 should be fine. + + if (pPioChannelAllocParams->hObjectNotify != 0) + { + NV_PRINTF(LEVEL_WARNING, "Error notifier parameter is not used in Display channel allocation.\n"); + } + } +} + +NV_STATUS +dispchnConstruct_IMPL +( + DispChannel *pDispChannel, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvU32 isDma +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NV_STATUS rmStatus = NV_OK; + NvU32 channelInstance; + NvHandle hObjectBuffer; + NvBool bIsDma = !!isDma; + NvU32 initialGetPutOffset; + NvBool allowGrabWithinSameClient; + NvBool connectPbAtGrab; + DISPCHNCLASS internalDispChnClass; + void *pAllocParams = pParams->pAllocParams; + RsResourceRef *pParentRef = RES_GET_REF(pDispChannel)->pParentRef; + DispObject *pDispObject = dynamicCast(pParentRef->pResource, DispObject); + ContextDma *pBufferContextDma = NULL; + NvU32 hClass = RES_GET_EXT_CLASS_ID(pDispChannel); + + NV_ASSERT_OR_RETURN(pDispObject, NV_ERR_INVALID_OBJECT_HANDLE); + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // + // Make sure this channel class is supported on this chip. + // Need to have the check below since, the switch in RmAlloc + // doesn't tell if the current chip supports the class + // + if (!gpuIsClassSupported(pGpu, RES_GET_EXT_CLASS_ID(pDispChannel))) + { + NV_PRINTF(LEVEL_ERROR, "Unsupported class in\n"); + return NV_ERR_INVALID_CLASS; + } + + // Move params into RM's address space + pDispChannel->pDispObject = pDispObject; + pDispChannel->bIsDma = bIsDma; + dispchnParseAllocParams(pDispChannel, pAllocParams, + &channelInstance, + &hObjectBuffer, + &initialGetPutOffset, + &allowGrabWithinSameClient, + &connectPbAtGrab); + + rmStatus = kdispGetIntChnClsForHwCls(pKernelDisplay, + RES_GET_EXT_CLASS_ID(pDispChannel), + &internalDispChnClass); + if (rmStatus != NV_OK) + return rmStatus; + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + { + rmStatus = kdispSetPushBufferParamsToPhysical_HAL(pGpu, + pKernelDisplay, + pDispChannel, + hObjectBuffer, + pBufferContextDma, + hClass, + channelInstance, + internalDispChnClass); + if (rmStatus != NV_OK) + return rmStatus; + } + SLI_LOOP_END + + // Acquire the underlying HW resources + rmStatus = kdispAcquireDispChannelHw_HAL(pKernelDisplay, + pDispChannel, + channelInstance, + hObjectBuffer, + initialGetPutOffset, + allowGrabWithinSameClient, + connectPbAtGrab); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel[0x%x] alloc failed. Return status = 0x%x\n", + channelInstance, rmStatus); + + return rmStatus; + } + + // Channel allocation is successful, initialize new channel's data structures + pDispChannel->DispClass = internalDispChnClass; + pDispChannel->InstanceNumber = channelInstance; + dispchnSetRegBaseOffsetAndSize(pDispChannel, pGpu); + + // Map memory for parent GPU + rmStatus = kdispMapDispChannel_HAL(pKernelDisplay, pDispChannel); + + // setup to return pControl to client + if (pDispChannel->bIsDma) + { + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = pAllocParams; + pDmaChannelAllocParams->pControl = pDispChannel->pControl; + } + else + { + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = pAllocParams; + pPioChannelAllocParams->pControl = pDispChannel->pControl; + } + + return rmStatus; +} + +// +// Performs grab operation for a channel. +// +// Pre-Volta Linux swapgroups is the only remaining use of channel grabbing. +// Bug 2869820 is tracking the transition of swapgroups from requiring this +// RM feature. +// +NV_STATUS +dispchnGrabChannel_IMPL +( + DispChannel *pDispChannel, + NvHandle hClient, + NvHandle hParent, + NvHandle hChannel, + NvU32 hClass, + void *pAllocParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NvU32 channelInstance; + NvHandle hObjectBuffer; + NvU32 initialGetPutOffset; + NvBool allowGrabWithinSameClient; + NvBool connectPbAtGrab; + ContextDma *pBufferContextDma = NULL; + DISPCHNCLASS internalDispChnClass; + + if (RES_GET_PARENT_HANDLE(pDispChannel) != hParent) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel grab failed because of bad display parent 0x%x\n", + hParent); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // Move params into RM's address space + dispchnParseAllocParams(pDispChannel, pAllocParams, + &channelInstance, + &hObjectBuffer, + &initialGetPutOffset, + &allowGrabWithinSameClient, + &connectPbAtGrab); + + // + // The handle already exists in our DB. + // The supplied params must be same as what we already have with us + // + if (RES_GET_EXT_CLASS_ID(pDispChannel) != hClass || + pDispChannel->InstanceNumber != channelInstance) + { + NV_PRINTF(LEVEL_ERROR, + "Information supplied for handle 0x%x doesn't match that in RM's client DB\n", + hChannel); + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + rmStatus = kdispGetIntChnClsForHwCls(pKernelDisplay, + hClass, + &internalDispChnClass); + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + { + rmStatus = kdispSetPushBufferParamsToPhysical_HAL(pGpu, + pKernelDisplay, + pDispChannel, + hObjectBuffer, + pBufferContextDma, + hClass, + channelInstance, + internalDispChnClass); + if (rmStatus != NV_OK) + return rmStatus; + } + SLI_LOOP_END + + // Acquire the underlying HW resources + rmStatus = kdispAcquireDispChannelHw_HAL(pKernelDisplay, + pDispChannel, + channelInstance, + hObjectBuffer, + initialGetPutOffset, + allowGrabWithinSameClient, + connectPbAtGrab); + + // setup to return pControl to client + if (pDispChannel->bIsDma) + { + NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *pDmaChannelAllocParams = pAllocParams; + pDmaChannelAllocParams->pControl = pDispChannel->pControl; + } + else + { + NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *pPioChannelAllocParams = pAllocParams; + pPioChannelAllocParams->pControl = pDispChannel->pControl; + } + + return rmStatus; +} + +NV_STATUS +dispchnGetRegBaseOffsetAndSize_IMPL +( + DispChannel *pDispChannel, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + if (pOffset) + *pOffset = pDispChannel->ControlOffset; + + if (pSize) + *pSize = pDispChannel->ControlLength; + + return NV_OK; +} + +void +dispchnSetRegBaseOffsetAndSize_IMPL +( + DispChannel *pDispChannel, + OBJGPU *pGpu +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + (void)kdispGetDisplayChannelUserBaseAndSize_HAL(pGpu, pKernelDisplay, + pDispChannel->DispClass, + pDispChannel->InstanceNumber, + &pDispChannel->ControlOffset, + &pDispChannel->ControlLength); + + // Tegra offsets needs to be subtracted with -0x610000. + pDispChannel->ControlOffset += kdispGetBaseOffset_HAL(pGpu, pKernelDisplay); +} + +/*! + * @brief Maps channel user area for parent GPU. + */ +NV_STATUS +kdispMapDispChannel_IMPL +( + KernelDisplay *pKernelDisplay, + DispChannel *pDispChannel +) +{ + NV_STATUS rmStatus; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + RsClient *pClient = RES_GET_CLIENT(pDispChannel); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pRmClient); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + // + // Only need the map for the parent GPU since we require the client to + // use RmMapMemory for subdevice channel mapping. + // + rmStatus = osMapGPU(pGpu, privLevel, + pDispChannel->ControlOffset, + pDispChannel->ControlLength, + NV_PROTECT_READ_WRITE, + &pDispChannel->pControl, + &pDispChannel->pPriv); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel[0x%x] mapping failed. Return status = 0x%x\n", + pDispChannel->InstanceNumber, rmStatus); + + (void) pRmApi->Free(pRmApi, + RES_GET_CLIENT_HANDLE(pDispChannel), + RES_GET_HANDLE(pDispChannel)); + + return rmStatus; + } + + return NV_OK; +} + +/*! + * @brief Unbinds Context DMAs and unmaps channel user area for the given channel. + */ +void kdispUnbindUnmapDispChannel_IMPL +( + KernelDisplay *pKernelDisplay, + DispChannel *pDispChannel +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + RsClient *pClient = RES_GET_CLIENT(pDispChannel); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pRmClient); + RS_ITERATOR ContextDmaIt; + + // Unbind all context dmas bound to this channel + ContextDmaIt = clientRefIter(pClient, RES_GET_REF(GPU_RES_GET_DEVICE(pDispChannel)), classId(ContextDma), RS_ITERATE_DESCENDANTS, NV_TRUE); + while (clientRefIterNext(ContextDmaIt.pClient, &ContextDmaIt)) + { + ContextDma *pContextDma; + + pContextDma = dynamicCast(ContextDmaIt.pResourceRef->pResource, ContextDma); + if (pContextDma == NULL) + continue; + + // Quickly skip unbound ContextDmas + if (ctxdmaIsBound(pContextDma)) + { + // Ignore unbind status as the ContextDma may not be bound to this channel + (void)dispchnUnbindCtx(pDispChannel, pGpu, pContextDma); + } + } + + // Unmap the channel + osUnmapGPU(pGpu->pOsGpuInfo, privLevel, pDispChannel->pControl, + pDispChannel->ControlLength, pDispChannel->pPriv); +} + +void +dispchnDestruct_IMPL +( + DispChannel *pDispChannel +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispChannel); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + LOCK_METER_DATA(FREE_CHANNEL_DISP, pDispChannel->DispClass, 0, 0); + + // + // Before freeing the CORE channel, make sure all satellite channels are + // torn down. This is currently necessary on UNIX to deal with cases + // where X (i.e. the userspace display driver) terminates before other + // RM clients with satellite channel allocations, e.g. OpenGL clients with + // BASE channel allocations. + // + if ((pDispChannel->DispClass == dispChnClass_Core) && + pKernelDisplay->bWarPurgeSatellitesOnCoreFree) + { + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + Device *pDevice; + OBJGPU *pTmpGpu; + DispChannel *pTmpDispChannel; + + NV_ASSERT(gpuIsGpuFullPower(pGpu)); + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(it.pClient, &it)) + { + RS_ITERATOR dispIt; + RsResourceRef *pResourceRef; + DispObject *pDispObject; + + pDevice = dynamicCast(it.pResourceRef->pResource, Device); + + pTmpGpu = GPU_RES_GET_GPU(pDevice); + if (pTmpGpu != pGpu) + continue; + + rmStatus = dispobjGetByDevice(pRsClient, pDevice, &pDispObject); + if (rmStatus != NV_OK) + continue; + + pResourceRef = RES_GET_REF(pDispObject); + + dispIt = clientRefIter(pRsClient, pResourceRef, classId(DispChannel), RS_ITERATE_CHILDREN, NV_FALSE); + + while (clientRefIterNext(dispIt.pClient, &dispIt)) + { + pTmpDispChannel = dynamicCast(dispIt.pResourceRef->pResource, DispChannel); + + if (pTmpDispChannel->DispClass != dispChnClass_Core) + { + rmStatus = pRmApi->Free(pRmApi, + RES_GET_CLIENT_HANDLE(pTmpDispChannel), + RES_GET_HANDLE(pTmpDispChannel)); + + if (rmStatus == NV_OK) + { + // Client's resource map has been modified, re-snap iterators + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + dispIt = clientRefIter(pRsClient, it.pResourceRef, classId(DispChannel), RS_ITERATE_DESCENDANTS, NV_FALSE); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Failed to free satellite DispChannel 0x%x!\n", + RES_GET_HANDLE(pTmpDispChannel)); + } + } + } + } + } + } + + // + // Unbind all context dmas bound to this channel, unmap the channel and + // finally release HW resources. + // + kdispUnbindUnmapDispChannel_HAL(pKernelDisplay, pDispChannel); + rmStatus = kdispReleaseDispChannelHw_HAL(pKernelDisplay, pDispChannel); + + if (rmStatus != NV_OK) + { + // Try to avoid returning error codes on free under new resource server design + NV_ASSERT(0); + } +} + +NV_STATUS +dispchnpioConstruct_IMPL +( + DispChannelPio *pDispChannelPio, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +dispchndmaConstruct_IMPL +( + DispChannelDma *pDispChannelDma, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +dispchnGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDisplayChannel, + DispChannel **ppDispChannel +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppDispChannel = NULL; + + status = clientGetResourceRef(pClient, hDisplayChannel, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispChannel = dynamicCast(pResourceRef->pResource, DispChannel); + + return (*ppDispChannel) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +// +// Bind the DMA context to a display channel +// +NV_STATUS +dispchnBindCtx_IMPL +( + DispChannel *pDispChannel, + OBJGPU *pGpu, + ContextDma *pContextDma +) +{ + NV_STATUS rmStatus = NV_OK; + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + + // + // Enforce alignment requirements + // ISO ctx dmas need to be a multiple of 256B and 256B aligned + // NISO ctx dmas need to be a multiple of 4K and 4K aligned + // We can only ensure common minimum -- 4K alignment and 4K size + // Limit alignment is handled by rounding up in lower-level code. + // This will be in hw in future. + // + if (pContextDma->pMemDesc->PteAdjust != 0) + { + NV_PRINTF(LEVEL_ERROR, + "ISO ctx dmas must be 4K aligned. PteAdjust = 0x%x\n", + pContextDma->pMemDesc->PteAdjust); + return NV_ERR_INVALID_OFFSET; + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + rmStatus = instmemBindContextDma(pGpu, pInstMem, pContextDma, pDispChannel); + if (rmStatus != NV_OK) + { + SLI_LOOP_RETURN(rmStatus); + } + + SLI_LOOP_END + + return NV_OK; +} + +NV_STATUS +dispchnUnbindCtx_IMPL +( + DispChannel *pDispChannel, + OBJGPU *pGpu, + ContextDma *pContextDma +) +{ + NV_STATUS rmStatus = NV_OK; + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + NvBool bFound = NV_FALSE; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + rmStatus = instmemUnbindContextDma(pGpu, pInstMem, pContextDma, pDispChannel); + if (rmStatus == NV_OK) + { + bFound = NV_TRUE; + } + + SLI_LOOP_END + + return bFound ? NV_OK : NV_ERR_INVALID_STATE; +} + +NV_STATUS +kdispSetPushBufferParamsToPhysical_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + DispChannel *pDispChannel, + NvHandle hObjectBuffer, + ContextDma *pBufferContextDma, + NvU32 hClass, + NvU32 channelInstance, + DISPCHNCLASS internalDispChnClass +) +{ + RsClient *pClient = RES_GET_CLIENT(pDispChannel); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS rmStatus = NV_OK; + NvU32 dispChannelNum; + NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS pushBufferParams = {0}; + + rmStatus = kdispGetChannelNum_HAL(pKernelDisplay, internalDispChnClass, channelInstance, &dispChannelNum); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + pushBufferParams.hclass = hClass; + pushBufferParams.channelInstance = channelInstance; + + if (pDispChannel->bIsDma) + { + rmStatus = ctxdmaGetByHandle(pClient, hObjectBuffer, &pBufferContextDma); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "disp channel[0x%x] didn't have valid ctxdma 0x%x\n", + channelInstance, hObjectBuffer); + return rmStatus; + } + + pushBufferParams.limit = pBufferContextDma->Limit; + pushBufferParams.addressSpace = memdescGetAddressSpace(pBufferContextDma->pMemDesc); + if ((pushBufferParams.addressSpace != ADDR_SYSMEM) && (pushBufferParams.addressSpace != ADDR_FBMEM)) + { + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + // Generate PUSHBUFFER_ADDR. Shift the addr to get the size in 4KB + pushBufferParams.physicalAddr = memdescGetPhysAddr(memdescGetMemDescFromGpu(pBufferContextDma->pMemDesc, pGpu), AT_GPU, 0); + pushBufferParams.cacheSnoop= pBufferContextDma->CacheSnoop; + pushBufferParams.valid = NV_TRUE; + } + else + { + pushBufferParams.valid = NV_FALSE; + } + + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER, + &pushBufferParams, sizeof(pushBufferParams)); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c b/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c new file mode 100644 index 000000000..92a8bd6b8 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c @@ -0,0 +1,210 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /** + * @file disp_common_kern_ctrl_minimal.c implements rmctrls which + * (a) are declared in disp_common_ctrl_minimal.h; i.e. + * (i) are dispcmnCtrlCmd* functions + * (ii) which are used by Tegra SOC NVDisplay and/or OS layer; and + * (b) are implemented in Kernel RM. + */ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/disp_objs.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" + +NV_STATUS +dispcmnCtrlCmdSystemGetHotplugUnplugState_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_SYSTEM_GET_HOTPLUG_UNPLUG_STATE_PARAMS *pHotplugParams +) +{ + NvHandle hDevice = RES_GET_PARENT_HANDLE(pDispCommon); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(DISPAPI_GET_GPU(pDispCommon)); + NvU32 hotPlugMask = 0; + NvU32 hotUnplugMask = 0; + NV_STATUS status; + + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pDispCommon), + RES_GET_HANDLE(pDispCommon), + NV0073_CTRL_CMD_INTERNAL_GET_HOTPLUG_UNPLUG_STATE, + pHotplugParams, + sizeof(*pHotplugParams)); + + hotPlugMask = pHotplugParams->hotPlugMask; + hotUnplugMask = pHotplugParams->hotUnplugMask; + pHotplugParams->hotPlugMask = 0; + pHotplugParams->hotUnplugMask = 0; + + if (status != NV_OK) + { + return status; + } + + if ((hotPlugMask != 0) || (hotUnplugMask != 0)) + { + RmClient **ppClient; + RsClient *pRsClient; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pRsClient = staticCast(*ppClient, RsClient); + DispCommon *pDispCommonLoop; + + dispcmnGetByDevice(pRsClient, hDevice, &pDispCommonLoop); + if (pDispCommonLoop == NULL) + continue; + + pDispCommonLoop->hotPlugMaskToBeReported |= hotPlugMask & (~(pDispCommonLoop->hotPlugMaskToBeReported & hotUnplugMask)); + pDispCommonLoop->hotUnplugMaskToBeReported |= hotUnplugMask & (~(pDispCommonLoop->hotUnplugMaskToBeReported & hotPlugMask)); + } + } + + pHotplugParams->hotPlugMask = pDispCommon->hotPlugMaskToBeReported; + pHotplugParams->hotUnplugMask = pDispCommon->hotUnplugMaskToBeReported; + pDispCommon->hotPlugMaskToBeReported = 0; + pDispCommon->hotUnplugMaskToBeReported = 0; + + return status; +} + +/*! + * @brief Allocate display bandwidth. + */ +NV_STATUS +dispcmnCtrlCmdSystemAllocateDisplayBandwidth_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_SYSTEM_ALLOCATE_DISPLAY_BANDWIDTH_PARAMS *pParams +) +{ + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + NV_STATUS status; + + // client gave us a subdevice #: get right pGpu for it + status = dispapiSetUnicastAndSynchronize_HAL( + staticCast(pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pDispCommon), + &pGpu, + pParams->subDeviceInstance); + if (status != NV_OK) + { + return status; + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + return kdispArbAndAllocDisplayBandwidth_HAL(pGpu, + pKernelDisplay, + DISPLAY_ICC_BW_CLIENT_EXT, + pParams->averageBandwidthKBPS, + pParams->floorBandwidthKBPS); +} + +NV_STATUS +dispcmnCtrlCmdDpGenerateFakeInterrupt_IMPL +( + DispCommon *pDispCommon, + NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PARAMS *pParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDispCommon); + NvU32 displayId = pParams->displayId; + NvU32 interruptType = pParams->interruptType; + NV_STATUS status = NV_OK; + + // get target pGpu + status = dispapiSetUnicastAndSynchronize_HAL( + staticCast(pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pDispCommon), + &pGpu, + pParams->subDeviceInstance); + if (status != NV_OK) + { + return status; + } + + NV_ASSERT_OR_RETURN(pParams->displayId, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pGpu, NV_ERR_INVALID_ARGUMENT); + + // Send a DP IRQ (short pulse) to a registered client. + if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_IRQ) + { + Nv2080DpIrqNotification params = {0}; + params.displayId = displayId; + + // Check eDP power state; if off, return an error. + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV0073_CTRL_DP_GET_EDP_DATA_PARAMS edpData; + + portMemSet(&edpData, 0, sizeof(edpData)); + + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pDispCommon), + RES_GET_HANDLE(pDispCommon), + NV0073_CTRL_CMD_DP_GET_EDP_DATA, + &edpData, + sizeof(edpData)); + + if (status == NV_OK && FLD_TEST_DRF(0073_CTRL_DP, _GET_EDP_DATA, _PANEL_POWER, _OFF, edpData.data)) + { + return NV_ERR_GENERIC; + } + + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_DP_IRQ, ¶ms, sizeof(params), 0, 0); + } + else if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PLUG || + interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_UNPLUG) + { + Nv2080HotplugNotification hotplugNotificationParams; + portMemSet(&hotplugNotificationParams, 0, sizeof(hotplugNotificationParams)); + + if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_PLUG) + { + hotplugNotificationParams.plugDisplayMask = displayId; + hotplugNotificationParams.unplugDisplayMask = 0; + } + else if (interruptType == NV0073_CTRL_CMD_DP_GENERATE_FAKE_INTERRUPT_UNPLUG) + { + hotplugNotificationParams.plugDisplayMask = 0; + hotplugNotificationParams.unplugDisplayMask = displayId; + } + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_HOTPLUG, + &hotplugNotificationParams, sizeof(hotplugNotificationParams), 0, 0); + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c b/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c new file mode 100644 index 000000000..393f9eb17 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c @@ -0,0 +1,98 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/disp_objs.h" +#include "class/cl5070.h" +#include "mem_mgr/mem.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" + +NV_STATUS +dispobjCtrlCmdEventSetTrigger_IMPL +( + DispObject *pDispObject +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDispObject); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + kdispNotifyEvent(pGpu, pKernelDisplay, NV5070_NOTIFIERS_SW, NULL, 0, 0, 0); + + return NV_OK; +} + +NV_STATUS +dispobjCtrlCmdEventSetMemoryNotifies_IMPL +( + DispObject *pDispObject, + NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDispObject); + DisplayApi *pDisplayApi = staticCast(pDispObject, DisplayApi); + RsClient *pClient = RES_GET_CLIENT(pDispObject); + Memory *pMemory; + NvU32 *pNotifyActions, i; + + // error check subDeviceInstance + if (pSetMemoryNotifiesParams->subDeviceInstance >= gpumgrGetSubDeviceCountFromGpu(pGpu)) + { + NV_PRINTF(LEVEL_INFO, "bad subDeviceInstance 0x%x\n", + pSetMemoryNotifiesParams->subDeviceInstance); + return NV_ERR_INVALID_ARGUMENT; + } + + pNotifyActions = pDisplayApi->pNotifyActions[pSetMemoryNotifiesParams->subDeviceInstance]; + + // ensure there's no pending notifications + for (i = 0; i < pDisplayApi->numNotifiers; i++) + { + if (pNotifyActions[i] != NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + return NV_ERR_STATE_IN_USE; + } + } + + if (pSetMemoryNotifiesParams->hMemory == NV01_NULL_OBJECT) + { + pDisplayApi->hNotifierMemory = pSetMemoryNotifiesParams->hMemory; + pDisplayApi->pNotifierMemory = NULL; + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(pClient, pSetMemoryNotifiesParams->hMemory, &pMemory)); + + if (pMemory->pMemDesc->Size < sizeof(NvNotification) * pDisplayApi->numNotifiers) + { + return NV_ERR_INVALID_LIMIT; + } + + pDisplayApi->hNotifierMemory = pSetMemoryNotifiesParams->hMemory; + pDisplayApi->pNotifierMemory = pMemory; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/disp_objs.c b/src/nvidia/src/kernel/gpu/disp/disp_objs.c new file mode 100644 index 000000000..0fb4c6143 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_objs.c @@ -0,0 +1,750 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing the display - both Disp and DispCommon +* entries with their insides (DispChannelList and DispDmaControlList) +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "core/locks.h" +#include "resserv/rs_client.h" + +#include "gpu/gpu.h" +#include "gpu/device/device.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/kern_disp.h" +#include "gpu_mgr/gpu_mgr.h" + +#include "kernel/gpu/intr/intr.h" + +#include "class/cl0073.h" // NV04_DISPLAY_COMMON +#include "class/cl5070.h" // NV50_DISPLAY +#include "class/clc370.h" // NVC370_DISPLAY + +NV_STATUS +dispapiConstruct_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + CLASSDESCRIPTOR *pClassDescriptor; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + NvBool bBcResource; + NvU32 i; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // Use gpuGetByRef instead of GpuResource because it will work even if resource + // isn't a GpuResource. + status = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if (status != NV_OK) + return status; + + // Find class in class db (verifies class is valid for this GPU) + status = gpuGetClassByClassId(pGpu, pParams->externalClassId, &pClassDescriptor); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "bad class 0x%x\n", pParams->externalClassId); + return NV_ERR_INVALID_CLASS; + } + + // Check display is enabled (i.e. not displayless) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + if (pKernelDisplay == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + pDisplayApi->pNotifyActions[i] = NULL; + + pDisplayApi->pGpuInRmctrl = NULL; + pDisplayApi->pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + pDisplayApi->bBcResource = bBcResource; + pDisplayApi->hNotifierMemory = NV01_NULL_OBJECT; + pDisplayApi->pNotifierMemory = NULL; + + gpuSetThreadBcState(pGpu, bBcResource); + + return status; +} + +void +dispapiDestruct_IMPL +( + DisplayApi *pDisplayApi +) +{ + NvU32 i; + + // Free notify actions memory if it's been allocated + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + { + portMemFree(pDisplayApi->pNotifyActions[i]); + pDisplayApi->pNotifyActions[i] = NULL; + } +} + +static NV_STATUS +_dispapiNotifierInit +( + DisplayApi *pDisplayApi, + NvU32 numNotifiers, + NvU32 disableCmd +) +{ + NvU32 i, j; + NV_STATUS status = NV_OK; + + pDisplayApi->numNotifiers = numNotifiers; + + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + { + // get memory for pNotifyActions table + pDisplayApi->pNotifyActions[i] = portMemAllocNonPaged( + pDisplayApi->numNotifiers * sizeof(NvU32)); + if (pDisplayApi->pNotifyActions[i] != NULL) + { + // default actions for each notifier type is disabled + for (j = 0; j < pDisplayApi->numNotifiers; j++) + { + pDisplayApi->pNotifyActions[i][j] = disableCmd; + } + } + else + { + goto fail; + } + } + + return status; + +fail: + // first release any notifyActions memory + for (i = 0; i < NV2080_MAX_SUBDEVICES; i++) + { + portMemFree(pDisplayApi->pNotifyActions[i]); + pDisplayApi->pNotifyActions[i] = NULL; + } + + return NV_ERR_INSUFFICIENT_RESOURCES; +} + +NV_STATUS +dispobjConstructHal_IMPL +( + DispObject *pDispObject, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + DisplayApi *pDisplayApi = staticCast(pDispObject, DisplayApi); + Device *pDevice = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, Device); + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + OBJGPU *pGpu = pGpuResource->pGpu; + NV_STATUS rmStatus = NV_ERR_INVALID_STATE; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + { + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + rmStatus = kdispSelectClass_HAL(pGpu, pKernelDisplay, pCallContext->pResourceRef->externalClassId); + + if (rmStatus != NV_OK) + { + // If the operation fails, it should fail on the first try + NV_ASSERT(gpumgrIsParentGPU(pGpu)); + SLI_LOOP_BREAK; + } + } + SLI_LOOP_END; + + if (rmStatus != NV_OK) + return rmStatus; + + if(dynamicCast(pDisplayApi, NvDispApi)) + { + rmStatus = _dispapiNotifierInit(pDisplayApi, + NVC370_NOTIFIERS_MAXCOUNT, + NVC370_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); + } + else + { + rmStatus = _dispapiNotifierInit(pDisplayApi, + NV5070_NOTIFIERS_MAXCOUNT, + NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); + } + + return rmStatus; +} + +NV_STATUS +dispobjConstruct_IMPL +( + DispObject *pDispObject, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + pDispObject->rmFreeFlags = NV5070_CTRL_SET_RMFREE_FLAGS_NONE; + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return dispobjConstructHal_HAL(pDispObject, pCallContext, pParams); +} + +NV_STATUS +dispobjGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDispObject, + DispObject **ppDispObject +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hDispObject, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispObject = dynamicCast(pResourceRef->pResource, DispObject); + + return (*ppDispObject) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +dispobjGetByDevice_IMPL +( + RsClient *pClient, + Device *pDevice, + DispObject **ppDispObject +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = refFindChildOfType(RES_GET_REF(pDevice), classId(DispObject), NV_FALSE /*bExactMatch*/, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispObject = dynamicCast(pResourceRef->pResource, DispObject); + + return (*ppDispObject) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +// +// Most display control calls take a subDeviceInstance argument. +// We need to verify that this argument is valid and then use it to +// locate the correct OBJGPU for the particular subdevice. +// +NV_STATUS +dispapiSetUnicastAndSynchronize_KERNEL +( + DisplayApi *pDisplayApi, + OBJGPUGRP *pGpuGroup, + OBJGPU **ppGpu, + NvU32 subDeviceInstance +) +{ + NV_STATUS nvStatus = NV_OK; + + nvStatus = gpugrpGetGpuFromSubDeviceInstance(pGpuGroup, subDeviceInstance, ppGpu); + if (nvStatus != NV_OK) + return nvStatus; + + gpumgrSetBcEnabledStatus(*ppGpu, NV_FALSE); + + return nvStatus; +} + +NV_STATUS +dispapiControl_Prologue_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams +) +{ + NvU32 subdeviceIndex; + NV_STATUS status; + RmResource *pResource = staticCast(pDisplayApi, RmResource); + + if (dynamicCast(pDisplayApi, DispCommon)) + { + Device *pDevice = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, Device); + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + + pResource->rpcGpuInstance = gpuGetInstance(pGpuResource->pGpu); + pDisplayApi->pGpuInRmctrl = pGpuResource->pGpu; + return rmresControl_Prologue_IMPL(pResource, pCallContext, pRmCtrlParams); + } + + // Read the subdevice ID out and swap GPU pointer + if (dynamicCast(pDisplayApi, NvDispApi)) + { + NVC370_CTRL_CMD_BASE_PARAMS *pBaseParameters = pRmCtrlParams->pParams; + + // + // All non-NULL disp control 5070 methods have + // NVC370_CTRL_CMD_BASE_PARAMS as their first member. + // + if ((pBaseParameters == NULL) || (pRmCtrlParams->paramsSize < sizeof(NVC370_CTRL_CMD_BASE_PARAMS))) + { + status = NV_ERR_INVALID_PARAM_STRUCT; + goto done; + } + subdeviceIndex = pBaseParameters->subdeviceIndex; + } + else if (dynamicCast(pDisplayApi, DispSwObj)) + { + NVC372_CTRL_CMD_BASE_PARAMS *pBaseParameters = pRmCtrlParams->pParams; + + // + // All non-NULL disp control C372 methods have + // NVC372_CTRL_CMD_BASE_PARAMS as their first member. + // + if ((pBaseParameters == NULL) || (pRmCtrlParams->paramsSize < sizeof(NVC372_CTRL_CMD_BASE_PARAMS))) + { + status = NV_ERR_INVALID_PARAM_STRUCT; + goto done; + } + subdeviceIndex = pBaseParameters->subdeviceIndex; + } + else + { + NV5070_CTRL_CMD_BASE_PARAMS *pBaseParameters = pRmCtrlParams->pParams; + + // + // All non-NULL disp control 5070 methods have + // NV5070_CTRL_CMD_BASE_PARAMS as their first member. + // + if ((pBaseParameters == NULL) || (pRmCtrlParams->paramsSize < sizeof(NV5070_CTRL_CMD_BASE_PARAMS))) + { + status = NV_ERR_INVALID_PARAM_STRUCT; + goto done; + } + subdeviceIndex = pBaseParameters->subdeviceIndex; + } + + status = dispapiSetUnicastAndSynchronize_HAL(pDisplayApi, + pRmCtrlParams->pGpuGrp, + &pRmCtrlParams->pGpu, + subdeviceIndex); + + if (status == NV_OK) + { + pResource->rpcGpuInstance = gpuGetInstance(pRmCtrlParams->pGpu); + pDisplayApi->pGpuInRmctrl = pRmCtrlParams->pGpu; + return rmresControl_Prologue_IMPL(pResource, pCallContext, pRmCtrlParams); + } + +done: + return status; +} + +void +dispapiControl_Epilogue_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams +) +{ + if (dynamicCast(pDisplayApi, DispCommon) == NULL) + { + RmResource *pResource = staticCast(pDisplayApi, RmResource); + pResource->rpcGpuInstance = ~0; + } + + pDisplayApi->pGpuInRmctrl = NULL; +} + +NV_STATUS +dispapiControl_IMPL +( + DisplayApi *pDisplayApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + Intr *pIntr; + NV_STATUS status = NV_OK; + Device *pDevice = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, Device); + GpuResource *pGpuResource = staticCast(pDevice, GpuResource); + RmCtrlParams *pRmCtrlParams = pParams->pLegacyParams; + OBJGPU *pGpu = pGpuResource->pGpu; + + NV_PRINTF(LEVEL_INFO, "class: 0x%x cmd 0x%x\n", + RES_GET_EXT_CLASS_ID(pDisplayApi), + pRmCtrlParams->cmd); + + pRmCtrlParams->pGpu = pGpu; + pRmCtrlParams->pGpuGrp = pGpuResource->pGpuGrp; + + gpuSetThreadBcState(pGpu, NV_TRUE); + + pIntr = GPU_GET_INTR(pGpu); + if (pIntr != NULL) + { + bitVectorClrAll(&pIntr->helperEngineMask); + bitVectorSet(&pIntr->helperEngineMask, MC_ENGINE_IDX_GR); + bitVectorSet(&pIntr->helperEngineMask, MC_ENGINE_IDX_DISP); + bitVectorSet(&pIntr->helperEngineMask, MC_ENGINE_IDX_FIFO); + } + + status = resControl_IMPL(staticCast(pDisplayApi, RsResource), + pCallContext, pParams); + + if (pIntr != NULL) + { + bitVectorClrAll(&pIntr->helperEngineMask); + } + + return status; +} + +NV_STATUS +dispswobjConstruct_IMPL +( + DispSwObj *pDispSwObj, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + return NV_OK; +} + +NV_STATUS +dispcmnConstruct_IMPL +( + DispCommon *pDispCommon, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + DisplayApi *pDisplayApi = staticCast(pDispCommon, DisplayApi); + + // + // Not adding the priv-level check for this class + // as it is being used by OpenGL from userspace.Once the Cleanup is done from the OpenGL + // we can add the priv level check here below + // + + pDispCommon->hotPlugMaskToBeReported = 0; + pDispCommon->hotUnplugMaskToBeReported = 0; + + return _dispapiNotifierInit(pDisplayApi, + NV0073_NOTIFIERS_MAXCOUNT, + NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); +} + +NV_STATUS +dispcmnGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hDispCommon, + DispCommon **ppDispCommon +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hDispCommon, &pResourceRef); + if (status != NV_OK) + return status; + + *ppDispCommon = dynamicCast(pResourceRef->pResource, DispCommon); + + return (*ppDispCommon) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +void +dispcmnGetByDevice_IMPL +( + RsClient *pClient, + NvHandle hDevice, + DispCommon **ppDispCommon +) +{ + Device *pDevice; + RsResourceRef *pResourceRef; + + *ppDispCommon = NULL; /* return failure by default */ + + if (deviceGetByHandle(pClient, hDevice, &pDevice) != NV_OK) + return; + + if (refFindChildOfType(RES_GET_REF(pDevice), + classId(DispCommon), + NV_FALSE, + &pResourceRef) != NV_OK) + return; + + *ppDispCommon = dynamicCast(pResourceRef->pResource, DispCommon); +} + +/** + * @brief Return NV_TRUE if RmFree() needs to preserve the HW, otherwise NV_FALSE + * + * @param[in] DispObject Pointer + */ +NvBool dispobjGetRmFreeFlags_IMPL(DispObject *pDispObject) +{ + return !!(pDispObject->rmFreeFlags & NV5070_CTRL_SET_RMFREE_FLAGS_PRESERVE_HW); +} + +/** + * @brief Clears the RmFree() temporary flags + * + * @param[in] DispObject Pointer + * + * @return void + */ +void dispobjClearRmFreeFlags_IMPL(DispObject *pDispObject) +{ + pDispObject->rmFreeFlags = NV5070_CTRL_SET_RMFREE_FLAGS_NONE; +} + +NV_STATUS +nvdispapiConstruct_IMPL +( + NvDispApi *pNvdispApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +// **************************************************************************** +// Deprecated Functions +// **************************************************************************** + +/** + * @warning This function is deprecated! Please use dispchnGetByHandle. + */ +NV_STATUS +CliFindDispChannelInfo +( + NvHandle hClient, + NvHandle hDispChannel, + DispChannel **ppDispChannel, + NvHandle *phParent +) +{ + RsClient *pClient; + NV_STATUS status; + + *ppDispChannel = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NV_ERR_INVALID_CLIENT; + + status = dispchnGetByHandle(pClient, hDispChannel, ppDispChannel); + if (status != NV_OK) + return status; + + if (phParent) + *phParent = RES_GET_PARENT_HANDLE(*ppDispChannel); + + return NV_OK; +} + +/** + * @warning This function is deprecated! Please use dispcmnGetByHandle. + */ +NvBool +CliGetDispCommonInfo +( + NvHandle hClient, + NvHandle hDispCommon, + DisplayApi **ppDisplayApi +) +{ + RsClient *pClient; + NV_STATUS status; + DispCommon *pDispCommon; + + *ppDisplayApi = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NV_FALSE; + + status = dispcmnGetByHandle(pClient, hDispCommon, &pDispCommon); + if (status != NV_OK) + return NV_FALSE; + + *ppDisplayApi = staticCast(pDispCommon, DisplayApi); + + return NV_TRUE; +} + +/** + * @warning This function is deprecated! Please use dispobjGetByHandle. + */ +NvBool +CliGetDispInfo +( + NvHandle hClient, + NvHandle hObject, + DisplayApi **pDisplayApi +) +{ + if (!pDisplayApi) + return NV_FALSE; + + *pDisplayApi = CliGetDispFromDispHandle(hClient, hObject); + + return *pDisplayApi ? NV_TRUE : NV_FALSE; +} + +/** + * @warning This function is deprecated! Please use dispobjGetByHandle. + */ +DisplayApi * +CliGetDispFromDispHandle +( + NvHandle hClient, + NvHandle hDisp +) +{ + RsClient *pClient; + NV_STATUS status; + DispObject *pDispObject; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NULL; + + status = dispobjGetByHandle(pClient, hDisp, &pDispObject); + if (status != NV_OK) + return NULL; + + return staticCast(pDispObject, DisplayApi); +} + +// +// DISP Event RM Controls +// +NV_STATUS +dispapiCtrlCmdEventSetNotification_IMPL +( + DisplayApi *pDisplayApi, + NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams +) +{ + OBJGPU *pGpu = DISPAPI_GET_GPU(pDisplayApi); + NvU32 *pNotifyActions; + NV_STATUS status = NV_OK; + PEVENTNOTIFICATION pEventNotifications = inotifyGetNotificationList(staticCast(pDisplayApi, INotifier)); + + // NV01_EVENT must have been plugged into this subdevice + if (pEventNotifications == NULL) + { + NV_PRINTF(LEVEL_INFO, "cmd 0x%x: no event list\n", NV5070_CTRL_CMD_EVENT_SET_NOTIFICATION); + return NV_ERR_INVALID_STATE; + } + + // error check event index + if (pSetEventParams->event >= pDisplayApi->numNotifiers) + { + NV_PRINTF(LEVEL_INFO, "bad event 0x%x\n", pSetEventParams->event); + return NV_ERR_INVALID_ARGUMENT; + } + + // error check subDeviceInstance + if (pSetEventParams->subDeviceInstance >= gpumgrGetSubDeviceMaxValuePlus1(pGpu)) + { + NV_PRINTF(LEVEL_INFO, "bad subDeviceInstance 0x%x\n", + pSetEventParams->subDeviceInstance); + return NV_ERR_INVALID_ARGUMENT; + } + + pNotifyActions = pDisplayApi->pNotifyActions[pSetEventParams->subDeviceInstance]; + + switch (pSetEventParams->action) + { + case NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE: + case NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT: + { + // must be in disabled state to transition to an active state + if (pNotifyActions[pSetEventParams->event] != NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + status = NV_ERR_INVALID_STATE; + break; + } + + // bind hEvent to particular subdeviceInst + status = bindEventNotificationToSubdevice(pEventNotifications, + pSetEventParams->hEvent, + pSetEventParams->subDeviceInstance); + if (status != NV_OK) + return status; + + pNotifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + + case NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE: + { + pNotifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c b/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c new file mode 100644 index 000000000..7c1bb0002 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/disp_sf_user.c @@ -0,0 +1,87 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This file contains functions managing DispSfUser class. +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" + +#include "gpu/gpu.h" +#include "gpu/disp/disp_sf_user.h" +#include "gpu/disp/kern_disp.h" + +NV_STATUS +dispsfConstruct_IMPL +( + DispSfUser *pDispSfUser, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDispSfUser); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + if (pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_USER_ROOT) + { + NV_PRINTF(LEVEL_ERROR, + "Failure allocating display class 0x%08x: Only root(admin)/kernel clients are allowed\n", + pParams->externalClassId); + + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Set sf user RegBase offset + kdispGetDisplaySfUserBaseAndSize_HAL(pGpu, pKernelDisplay, + &pDispSfUser->ControlOffset, + &pDispSfUser->ControlLength); + + return NV_OK; +} + +NV_STATUS +dispsfGetRegBaseOffsetAndSize_IMPL +( + DispSfUser *pDispSfUser, + OBJGPU *pGpu, + NvU32* pOffset, + NvU32* pSize +) +{ + if (pOffset) + { + *pOffset = pDispSfUser->ControlOffset; + } + + if (pSize) + { + *pSize = pDispSfUser->ControlLength; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c b/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c new file mode 100644 index 000000000..6b8bfc177 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/head/kernel_head.c @@ -0,0 +1,419 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 +#include "gpu/disp/head/kernel_head.h" +#include "objtmr.h" + +NV_STATUS +kheadConstruct_IMPL(KernelHead *pKernelHead) +{ + return NV_OK; +} + +NvU32 +kheadGetVblankTotalCounter_IMPL +( + KernelHead *pKernelHead +) +{ + return pKernelHead->Vblank.Counters.Total; +} + +void +kheadSetVblankTotalCounter_IMPL +( + KernelHead *pKernelHead, + NvU32 counter +) +{ + pKernelHead->Vblank.Counters.Total = counter; +} + +NvU32 +kheadGetVblankLowLatencyCounter_IMPL +( + KernelHead *pKernelHead +) +{ + return pKernelHead->Vblank.Counters.LowLatency; +} + +void +kheadSetVblankLowLatencyCounter_IMPL +( + KernelHead *pKernelHead, + NvU32 counter +) +{ + pKernelHead->Vblank.Counters.LowLatency = counter; +} + +NvU32 +kheadGetVblankNormLatencyCounter_IMPL +( + KernelHead *pKernelHead +) +{ + return pKernelHead->Vblank.Counters.NormLatency; +} + +void +kheadSetVblankNormLatencyCounter_IMPL +( + KernelHead *pKernelHead, + NvU32 counter +) +{ + pKernelHead->Vblank.Counters.NormLatency = counter; +} + +static NvBool +kheadIsVblankCallbackDue +( + VBLANKCALLBACK *pCallback, + NvU32 state, + NvU64 time, + NvU32 vblankCount +) +{ + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_TIMESTAMP) + { + // + // Time stamp based call backs don't have a valid vblank count + // vblank might be delayed and we might see only one vblank instead of two. + // so, count doesn't make sense in case of TS. + // and since the semantics is flip on vblank at TS >= TS specified, we can't + // use tmrCallbacks (they might flip outside vblank) + // + return (time >= pCallback->TimeStamp); + } + else + { + // + // These are now guaranteed to be sorted by VBlank + // and, now all have a VBlankCount to make processing simpler + // in this function, 'due' means "the next time the queue's counter is incremented, + // will it be time to process this callback?" This definition requires us to add 1 to + // the current vblankCount during the comparison. + // + if (VBLANK_STATE_PROCESS_IMMEDIATE & state) + { + return NV_TRUE; + } + + // Persistent callbacks that want to run every vblank + if ((pCallback->Flags & VBLANK_CALLBACK_FLAG_PERSISTENT) && (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT)) + { + return NV_TRUE; + } + + // Every other callback whose time has come. + if (pCallback->VBlankCount == 1+vblankCount) + { + // Some callbacks might have become due, but only want ISR time exclusively (no DPC) + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY__ISR_ONLY) + { + if (!(state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR)) + { + // Callback explicitly wants ISR time for its processing. + return NV_FALSE; + } + } + + return NV_TRUE; + } + } + return NV_FALSE; +} + +NvU32 +kheadCheckVblankCallbacksQueued_IMPL +( + OBJGPU *thisGpu, + KernelHead *pKernelHead, + NvU32 state, + NvU32 *expiring +) +{ + OBJTMR *pTmr; + NvU64 time; + NvU32 queues = 0; + + pTmr = GPU_GET_TIMER(thisGpu); + tmrGetCurrentTime(pTmr, &time); + + if (expiring) + { + *expiring = 0; + } + // + // return a union of queues (represented by VBLANK_STATE_PROCESS_XXX_LATENCY flags,) + // that are nonempty, i.e. have at least one callback. + // optionally, also return (via 'expiring', when non-NULL) which of those non-empty queues contain + // callbacks that are due to be processed, the next time that queue's counter gets incremented. + // + if ( (pKernelHead->Vblank.Callback.pListLL) && + (state & VBLANK_STATE_PROCESS_LOW_LATENCY) ) + { + queues |= VBLANK_STATE_PROCESS_LOW_LATENCY; + + if (expiring) + { + NvU32 vblankCount; + VBLANKCALLBACK *pCallback; + + vblankCount = pKernelHead->Vblank.Counters.LowLatency; + pCallback = pKernelHead->Vblank.Callback.pListLL; + + do + { + if (kheadIsVblankCallbackDue(pCallback, state, time, vblankCount)) + { + *expiring |= VBLANK_STATE_PROCESS_LOW_LATENCY; + } + pCallback = pCallback->Next; + } + while (pCallback && !(*expiring & VBLANK_STATE_PROCESS_LOW_LATENCY)); + } + } + + if ( (pKernelHead->Vblank.Callback.pListNL) && + (state & VBLANK_STATE_PROCESS_NORMAL_LATENCY) ) + { + queues |= VBLANK_STATE_PROCESS_NORMAL_LATENCY; + + if (expiring) + { + NvU32 vblankCount; + VBLANKCALLBACK *pCallback; + + vblankCount = pKernelHead->Vblank.Counters.NormLatency; + pCallback = pKernelHead->Vblank.Callback.pListNL; + + do + { + if (kheadIsVblankCallbackDue(pCallback, state, time, vblankCount)) + { + *expiring |= VBLANK_STATE_PROCESS_NORMAL_LATENCY; + } + + pCallback = pCallback->Next; + } + while (pCallback && !(*expiring & VBLANK_STATE_PROCESS_NORMAL_LATENCY)); + } + } + + return queues & state; +} +NvU32 +kheadReadVblankIntrState_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead +) +{ + // Check to make sure that our SW state grooves with the HW state + if (kheadReadVblankIntrEnable_HAL(pGpu, pKernelHead)) + { + // HW is enabled, check if SW state is not enabled + if (pKernelHead->Vblank.IntrState != NV_HEAD_VBLANK_INTR_ENABLED) + { + NV_PRINTF(LEVEL_ERROR, + "Head %d: HW: %d != SW: %d! Fixing SW State...\n", + pKernelHead->PublicId, NV_HEAD_VBLANK_INTR_ENABLED, + pKernelHead->Vblank.IntrState); + pKernelHead->Vblank.IntrState = NV_HEAD_VBLANK_INTR_ENABLED; + } + } + else + { + // + // If HW is not enabled, SW state would depend on whether head is + // driving display. Check for both the SW states and base the + // SW state decision on head initialized state. + // If head is initialized SW state should be AVAILABLE else + // UNAVAILABLE. + // + if ((pKernelHead->Vblank.IntrState == NV_HEAD_VBLANK_INTR_ENABLED) || + (pKernelHead->Vblank.IntrState == NV_HEAD_VBLANK_INTR_UNAVAILABLE)) + { + NvU32 state = NV_HEAD_VBLANK_INTR_UNAVAILABLE; + + // + // We should say HW not enabled is AVAILABLE or UNAVAILABLE + // So, we'll base the correct decision on whether or not + // this head is driving any display. + // + if (kheadGetDisplayInitialized_HAL(pGpu, pKernelHead)) + { + state = NV_HEAD_VBLANK_INTR_AVAILABLE; + } + + if (state != pKernelHead->Vblank.IntrState) + { + NV_PRINTF(LEVEL_ERROR, + "Head %d: HW: %d != SW: %d! Fixing SW State...\n", + pKernelHead->PublicId, state, pKernelHead->Vblank.IntrState); + pKernelHead->Vblank.IntrState = state; + } + } + else if (pKernelHead->Vblank.IntrState == NV_HEAD_VBLANK_INTR_AVAILABLE) + { + // + // If HW is not enabled and head is not driving any display then + // the SW state should be UNAVAILABLE + // + if (!kheadGetDisplayInitialized_HAL(pGpu, pKernelHead)) + { + NV_PRINTF(LEVEL_ERROR, + "Head %d: HW: %d != SW: %d! Fixing SW State...\n", + pKernelHead->PublicId, NV_HEAD_VBLANK_INTR_UNAVAILABLE, pKernelHead->Vblank.IntrState); + pKernelHead->Vblank.IntrState = NV_HEAD_VBLANK_INTR_UNAVAILABLE; + } + } + } + + return pKernelHead->Vblank.IntrState; +} + +void +kheadWriteVblankIntrState_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + NvU32 newstate +) +{ + NvU32 previous; + NvBool enablehw = NV_FALSE; // Dont update the hw by default + NvBool updatehw = NV_FALSE; // Dont enable the hw by default + + // Get the previous state for various other stuff + previous = pKernelHead->Vblank.IntrState; + + // Make sure we really support the requested next state + if ( (newstate != NV_HEAD_VBLANK_INTR_UNAVAILABLE) && + (newstate != NV_HEAD_VBLANK_INTR_AVAILABLE) && + (newstate != NV_HEAD_VBLANK_INTR_ENABLED) ) + { + NV_PRINTF(LEVEL_ERROR, "Unknown state %x requested on head %d.\n", + newstate, pKernelHead->PublicId); + return; + } + + // Spew where we were and where we are going for tracking... +#if defined(DEBUG) + + NV_PRINTF(LEVEL_INFO, "Changing vblank state on pGpu=%p head %d: ", pGpu, + pKernelHead->PublicId); + + switch(previous) + { + case NV_HEAD_VBLANK_INTR_UNAVAILABLE: + NV_PRINTF(LEVEL_INFO, "UNAVAILABLE -> "); + break; + case NV_HEAD_VBLANK_INTR_AVAILABLE: + NV_PRINTF(LEVEL_INFO, "AVAILABLE -> "); + break; + case NV_HEAD_VBLANK_INTR_ENABLED: + NV_PRINTF(LEVEL_INFO, "ENABLED -> "); + break; + default: + NV_PRINTF(LEVEL_INFO, "UNKNOWN -> "); + break; + } + + switch(newstate) + { + case NV_HEAD_VBLANK_INTR_UNAVAILABLE: + NV_PRINTF(LEVEL_INFO, "UNAVAILABLE\n"); + break; + case NV_HEAD_VBLANK_INTR_AVAILABLE: + NV_PRINTF(LEVEL_INFO, "AVAILABLE\n"); + break; + case NV_HEAD_VBLANK_INTR_ENABLED: + NV_PRINTF(LEVEL_INFO, "ENABLED\n"); + break; + default: + NV_PRINTF(LEVEL_INFO, "UNKNOWN\n"); + break; + } + +#endif + + // Move to the new state + switch(newstate) + { + // Move to the unavailable state. This has an implied disabled state. + case NV_HEAD_VBLANK_INTR_UNAVAILABLE: + + // If the hw is on, turn it off + if (previous == NV_HEAD_VBLANK_INTR_ENABLED) + { + enablehw = NV_FALSE; + updatehw = NV_TRUE; + } + break; + + // Move to the available state. This has an implied disabled state. + case NV_HEAD_VBLANK_INTR_AVAILABLE: + + // If the hw is on, turn it off + if (previous == NV_HEAD_VBLANK_INTR_ENABLED) + { + enablehw = NV_FALSE; + updatehw = NV_TRUE; + } + break; + + // Move to the enabled state. This has an implied available state. + case NV_HEAD_VBLANK_INTR_ENABLED: + + // If the hw was off, turn it on + if (previous != NV_HEAD_VBLANK_INTR_ENABLED) + { + enablehw = NV_TRUE; + updatehw = NV_TRUE; + } + break; + + default: + // We REALLY should never get here with the correct filtering above. + NV_PRINTF(LEVEL_ERROR, "Unknown state %x requested on head %d.\n", + newstate, pKernelHead->PublicId); + DBG_BREAKPOINT(); + return; + break; + } + + // Update the sw state + pKernelHead->Vblank.IntrState = newstate; + + // Update the hw + if (updatehw) + { + kheadWriteVblankIntrEnable_HAL(pGpu, pKernelHead, enablehw); + } +} + diff --git a/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c b/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c new file mode 100644 index 000000000..7c2373559 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c @@ -0,0 +1,346 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Display Instance Memory Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/context_dma.h" +#include "disp/v03_00/dev_disp.h" + +/*! + * @brief Get display instance memory and hash table size + * + * @param[in] pGpu + * @param[in] PInstMem + * @param[out] pTotalInstMemSize pointer to instance memory size + * @param[out] pHashTableSize pointer to hash table size + * + * @return void + */ +void +instmemGetSize_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 *pTotalInstMemSize, + NvU32 *pHashTableSize +) +{ + if (pTotalInstMemSize != NULL) + { + *pTotalInstMemSize = (NV_UDISP_HASH_LIMIT - NV_UDISP_HASH_BASE + 1) + + (NV_UDISP_OBJ_MEM_LIMIT - NV_UDISP_OBJ_MEM_BASE + 1); + } + + if (pHashTableSize != NULL) + { + *pHashTableSize = (NV_UDISP_HASH_LIMIT - NV_UDISP_HASH_BASE + 1); + } +} + +NvU32 +instmemGetHashTableBaseAddr_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + return NV_UDISP_HASH_BASE; +} + +/*! Check if the instance memory pointer is valid */ +NvBool +instmemIsValid_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 offset +) +{ + return (((offset << 5) < NV_UDISP_OBJ_MEM_LIMIT) && + ((offset << 5) > NV_UDISP_HASH_LIMIT)); +} + +NV_STATUS +instmemHashFunc_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvHandle hClient, + NvHandle hContextDma, + NvU32 dispChannelNum, + NvU32 *pResult +) +{ + NV_ASSERT_OR_RETURN(pResult, NV_ERR_INVALID_ARGUMENT); + + // channel id is the range of 0-80 (as defined by the NV_PDISP_CHN_NUM_*) + NV_ASSERT(!(dispChannelNum >> 7)); + + // + // The hash function for display will be: + // hContextDma[9:0] + // ^ hContextDma[19:10] + // ^ hContextDma[29:20] + // ^ {hClient[7:0], hContextDma[31:30]} + // ^ {dispChannelNum[3:0], hClient[13:8]} + // ^ {7'h00, dispChannelNum[6:4]} + // + *pResult = ((hContextDma >> 0) & 0x3FF) ^ + ((hContextDma >> 10) & 0x3FF) ^ + ((hContextDma >> 20) & 0x3FF) ^ + (((hClient & 0xFF) << 2) | (hContextDma >> 30)) ^ + (((dispChannelNum & 0xF) << 6) | ((hClient >> 8) & 0x3F))^ + ((dispChannelNum >> 4) & 0x7); + + return NV_OK; +} + +/*! Generate hash table data */ +NvU32 +instmemGenerateHashTableData_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 hClient, + NvU32 offset, + NvU32 dispChannelNum +) +{ + return (SF_NUM(_UDISP, _HASH_TBL_CLIENT_ID, hClient) | + SF_NUM(_UDISP, _HASH_TBL_INSTANCE, offset) | + SF_NUM(_UDISP, _HASH_TBL_CHN, dispChannelNum)); +} + +/*! Write the Context DMA to display instance memory */ +NV_STATUS +instmemCommitContextDma_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma +) +{ + MEMORY_DESCRIPTOR *pMemDesc = memdescGetMemDescFromGpu(pContextDma->pMemDesc, pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + RmPhysAddr FrameAddr, Limit; + RmPhysAddr FrameAddr256Align; + RmPhysAddr Limit256Align; + NvU32 ctxDMAFlag; + NvU32 instoffset; + NvU8 *pInstMemCpuVA; + NvU32 kind; + NvBool bIsSurfaceBl = NV_FALSE; + TRANSFER_SURFACE dest = {0}; + + // This function must be called in unicast. + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + instoffset = pContextDma->Instance[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] << 4; + NV_ASSERT_OR_RETURN(instoffset, NV_ERR_INVALID_OBJECT); + + FrameAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + Limit = FrameAddr + pContextDma->Limit; + + kind = memdescGetPteKindForGpu(pMemDesc, pGpu); + + // Cannot bind a Z surface to display. Bug 439965. + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_Z, kind)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Set surface format + // + ctxDMAFlag = 0; + + bIsSurfaceBl = memmgrIsSurfaceBlockLinear_HAL(pMemoryManager, pContextDma->pMemory, + kind, pContextDma->Flags); + + if (bIsSurfaceBl) + { + ctxDMAFlag |= SF_DEF(_DMA, _KIND, _BLOCKLINEAR); + } + else + { + ctxDMAFlag |= SF_DEF(_DMA, _KIND, _PITCH); + } + + if (pContextDma->bReadOnly) + { + ctxDMAFlag |= SF_DEF(_DMA, _ACCESS, _READ_ONLY); + } + else + { + ctxDMAFlag |= SF_DEF(_DMA, _ACCESS, _READ_AND_WRITE); + } + + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_SYSMEM: + case ADDR_REGMEM: + // SOC Display always need _PHYSICAL_NVM flag to be set as display is not over PCI + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_NVM); + } + else + { + if (pContextDma->CacheSnoop) + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_PCI_COHERENT); + else + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_PCI); + } + break; + case ADDR_FBMEM: + ctxDMAFlag |= SF_DEF(_DMA, _TARGET_NODE, _PHYSICAL_NVM); + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid address space: %d\n", + memdescGetAddressSpace(pMemDesc)); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = instoffset; + + pInstMemCpuVA = memmgrMemBeginTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC); + if (pInstMemCpuVA == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_TARGET_NODE), ctxDMAFlag); // word 0 + + // Address in disp ctxdma is 256B aligned + FrameAddr256Align = FrameAddr >> 8; + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_BASE_LO), // word 1 + NvU64_LO32(FrameAddr256Align)); + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_BASE_HI), // word 2 + NvU64_HI32(FrameAddr256Align)); + + Limit256Align = Limit >> 8; + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_LO), // word 3 + NvU64_LO32(Limit256Align)); + MEM_WR32(pInstMemCpuVA + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_HI), // word 4 + NvU64_HI32(Limit256Align)); + + memmgrMemEndTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC); + + return NV_OK; +} + +/*! + * @brief Update the Context DMA already in display instance memory + * + * NOTE: this control call may be called at high IRQL on WDDM. + */ +NV_STATUS +instmemUpdateContextDma_v03_00 +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + NvU64 *pNewAddress, + NvU64 *pNewLimit, + NvHandle hMemory, + NvU32 comprInfo +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvU8 *pInst; + NvU32 instoffset; + TRANSFER_SURFACE dest = {0}; + + // Must use comprInfo to specify kind + NV_CHECK_OR_RETURN(LEVEL_SILENT, hMemory == NV01_NULL_OBJECT, NV_ERR_INVALID_ARGUMENT); + + instoffset = pContextDma->Instance[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] << 4; + NV_ASSERT(instoffset); + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = instoffset; + + pInst = memmgrMemBeginTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC | TRANSFER_FLAGS_SHADOW_INIT_MEM); + if (pInst == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto exit; + } + + if (pNewAddress != NULL) + { + // Address in disp ctxdma is 256B aligned + NvU64 newAddress256Align = (*pNewAddress) >> 8; + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_BASE_LO), + NvU64_LO32(newAddress256Align)); + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_BASE_HI), + NvU64_HI32(newAddress256Align)); + } + + if (pNewLimit != NULL) + { + NvU64 newLimit256Align = (*pNewLimit) >> 8; + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_LO), + NvU64_LO32(newLimit256Align)); + MEM_WR32(pInst + SF_OFFSET(NV_DMA_ADDRESS_LIMIT_HI), + NvU64_HI32(newLimit256Align)); + } + + if (comprInfo != NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_NONE) + { + NvU32 word = MEM_RD32(pInst + SF_OFFSET(NV_DMA_KIND)); + + if (comprInfo == NV0002_CTRL_CMD_UPDATE_CONTEXTDMA_FLAGS_USE_COMPR_INFO_FORMAT_BLOCK_LINEAR) + { + word = FLD_SF_DEF(_DMA, _KIND, _BLOCKLINEAR, word); + } + else + { + word = FLD_SF_DEF(_DMA, _KIND, _PITCH, word); + } + + MEM_WR32(pInst + SF_OFFSET(NV_DMA_KIND), word); + } + + memmgrMemEndTransfer(pMemoryManager, &dest, NV_DMA_SIZE, + TRANSFER_FLAGS_SHADOW_ALLOC | TRANSFER_FLAGS_SHADOW_INIT_MEM); + +exit: + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c b/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c new file mode 100644 index 000000000..515e53a58 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/inst_mem/disp_inst_mem.c @@ -0,0 +1,884 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************** Instmem Rotuines *****************************\ +* * +* Display instance memory object function Definitions. * +* * +\***************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "os/nv_memory_type.h" +#include "os/os.h" + +/*! + * Display Context DMA instance memory is always 2 16B blocks in size on all chips. There + * is no HW support for scatter lists. Instance memory should be naturally aligned. + */ +#define DISPLAY_CONTEXT_DMA_INST_SIZE 2 +#define DISPLAY_CONTEXT_DMA_INST_ALIGN 2 + +#define DISP_INST_MEM_EHEAP_OWNER NvU32_BUILD('i','n','s','t') + +/*! + * A hardware display hash table entry. + */ +typedef struct +{ + NvHandle ht_ObjectHandle; + NvV32 ht_Context; +} DISP_HW_HASH_TABLE_ENTRY; + + +/*! @brief Constructor */ +NV_STATUS +instmemConstruct_IMPL +( + DisplayInstanceMemory *pInstMem +) +{ + pInstMem->pInstMem = NULL; + pInstMem->pAllocedInstMemDesc = NULL; + pInstMem->pInstMemDesc = NULL; + pInstMem->pHashTable = NULL; + pInstMem->pInstHeap = NULL; + + return NV_OK; +} + + +/*! + * @brief Instmem destructor + */ +void +instmemDestruct_IMPL +( + DisplayInstanceMemory *pInstMem +) +{ +} + +/*! @brief Initialized heap related files in display instance memory */ +static NV_STATUS +instmemInitBitmap +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 instMemSize, + NvU32 hashTableSize +) +{ + NV_STATUS status = NV_OK; + NvU64 base, limit; + NvU64 allocSize, allocOffset; + NvU32 allocFlags; + NvU32 freeInstMemBase; + NvU32 freeInstMemSize; + NvU32 freeInstMemMax; + + // + // Locate and size the free instance area. This is the base where + // allocations should start and size of the allocatable inst mem. + // Initially hash table is the only entity that's allocated. + // + freeInstMemBase = hashTableSize >> 4; + freeInstMemSize = instMemSize - hashTableSize; + freeInstMemMax = (freeInstMemSize / 16) & ~0x07; + + // Allocate the Instmem heap manager + pInstMem->pInstHeap = portMemAllocNonPaged(sizeof(OBJEHEAP)); + if (pInstMem->pInstHeap == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Unable to allocate instance memory heap manager.\n"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + portMemSet(pInstMem->pInstHeap, 0x00, sizeof(OBJEHEAP)); + + NV_PRINTF(LEVEL_INFO, "FB Free Size = 0x%x\n", freeInstMemSize); + NV_PRINTF(LEVEL_INFO, "FB Free Inst Base = 0x%x\n", freeInstMemBase); + NV_PRINTF(LEVEL_INFO, "FB Free Inst Max = 0x%x\n", + freeInstMemMax + freeInstMemBase); + + // + // Construct the Instmem heap manager - Pre-allocate mgmt structures + // to avoid dynamic allocation and allow bind/unbind at high IRQL + // on Windows. Size to fill hash table + NULL instance. + // + base = freeInstMemBase; + limit = freeInstMemBase + freeInstMemMax + 1; + constructObjEHeap( + pInstMem->pInstHeap, + base, + limit, + 0, // sizeofMemBlock + pInstMem->nHashTableEntries + 1); // numPreAllocMemStruct + + // Reserve instance 0 as the NULL instance. + allocSize = 1; + allocOffset = base; + allocFlags = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + status = pInstMem->pInstHeap->eheapAlloc( + pInstMem->pInstHeap, // thisHeap + DISP_INST_MEM_EHEAP_OWNER, // owner + &allocFlags, // flags + &allocOffset, // offset + &allocSize, // size + 1, // offsetAlign + 1, // sizeAlign + NULL, // ppMemBlock + NULL, // isolation id + NULL); // callback ownership checker + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "eheapAlloc failed for instance memory heap manager.\n"); + status = NV_ERR_NO_MEMORY; + } + +exit: + return status; +} + +/*! @brief Initialized hash table related files in display instance memory */ +static NV_STATUS +instmemInitHashTable +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 hashTableSize +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + pInstMem->nHashTableEntries = hashTableSize / sizeof(DISP_HW_HASH_TABLE_ENTRY); + pInstMem->hashTableBaseAddr = instmemGetHashTableBaseAddr_HAL(pGpu, pInstMem); + + // Allocate Hash Table structure. + pInstMem->pHashTable = portMemAllocNonPaged(pInstMem->nHashTableEntries * + sizeof(SW_HASH_TABLE_ENTRY)); + if (pInstMem->pHashTable == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Unable to allocate hash table.\n"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + + // Initialize Hash Table. + for (i = 0; i < pInstMem->nHashTableEntries; i++) + { + pInstMem->pHashTable[i].pContextDma = NULL; + } + +exit: + return status; +} + +/*! + * @brief Save instance memory parameters + * + * For dGPU called from mem_mgr initialization with reserved frame buffer memory. For SOC + * we dynamically allocate system memory later. + */ +void +instmemSetMemory_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NV_ADDRESS_SPACE dispInstMemAddrSpace, + NvU32 dispInstMemAttr, + NvU64 dispInstMemBase, + NvU32 dispInstMemSize +) +{ + pInstMem->instMemAddrSpace = dispInstMemAddrSpace; + pInstMem->instMemAttr = dispInstMemAttr; + pInstMem->instMemBase = dispInstMemBase; + pInstMem->instMemSize = dispInstMemSize; +} + +/*! @brief Initialize instance memory descriptor */ +static NV_STATUS +instmemInitMemDesc +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 instMemSize +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // FB reserved memory logic not be getting called for Tegra system memory scanout. + // So as InstMem Desc is not getting initialized, currently hardcoding + // dispInstMemAttr to NV_MEMORY_CACHED this needs to be set based on system configuration/registry parameter. + // + instmemSetMemory(pGpu, pInstMem, + ADDR_SYSMEM, NV_MEMORY_CACHED, + 0 /* base */, instMemSize); + } + else if (IS_GSP_CLIENT(pGpu)) + { + // ToDO: Need to respect RM overrides and keep monolithic design same as offload. + instmemSetMemory(pGpu, pInstMem, + ADDR_FBMEM, NV_MEMORY_WRITECOMBINED, + 0 , instMemSize); + } + + switch (pInstMem->instMemAddrSpace) + { + default: + case ADDR_FBMEM: + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescCreate(&pInstMem->pInstMemDesc, pGpu, + pInstMem->instMemSize, + DISP_INST_MEM_ALIGN, + NV_TRUE, pInstMem->instMemAddrSpace, + pInstMem->instMemAttr, + MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO), + exit); + + memdescDescribe(pInstMem->pInstMemDesc, + ADDR_FBMEM, + memmgrGetRsvdMemoryBase(pMemoryManager) + pInstMem->instMemBase, + pInstMem->instMemSize); + } + break; + + case ADDR_SYSMEM: + { + // + // memdescAlloc won't (currently) honor a request for sysmem alloc alignment! Overallocate + // and round up the address to work around this. + // + // Create a sub-memdesc to the aligned block. This keeps the alignment calculation local + // to this function. + // + NvU64 base; + NvU64 offset; + NvBool bContig = NV_TRUE; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // On Orin, display FE goes through the NISO SMMU to read + // from Instance Memory. As such, there's absolutely no + // reason why we need a contiguous allocation for Instance + // Memory. + // + bContig = NV_FALSE; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescCreate(&pInstMem->pAllocedInstMemDesc, pGpu, + instMemSize + (DISP_INST_MEM_ALIGN - RM_PAGE_SIZE), + DISP_INST_MEM_ALIGN, + bContig, pInstMem->instMemAddrSpace, + pInstMem->instMemAttr, + MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO), + exit); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescAlloc(pInstMem->pAllocedInstMemDesc), + exit); + + base = memdescGetPhysAddr(pInstMem->pAllocedInstMemDesc, AT_GPU, 0); + offset = RM_ALIGN_UP(base, DISP_INST_MEM_ALIGN) - base; + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescCreateSubMem(&pInstMem->pInstMemDesc, pInstMem->pAllocedInstMemDesc, + pGpu, + offset, + instMemSize), + exit); + } + break; + } + +exit: + // Clean-up is handled by the caller + return status; +} + +/*! @brief Free all memory allocations done for display instance memory */ +static void +instmemDestroy +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + // Free up the inst mem descriptors + if (pInstMem->pInstMemDesc != NULL) + { + memdescDestroy(pInstMem->pInstMemDesc); + pInstMem->pInstMemDesc = NULL; + } + if (pInstMem->pAllocedInstMemDesc != NULL) + { + memdescFree(pInstMem->pAllocedInstMemDesc); + memdescDestroy(pInstMem->pAllocedInstMemDesc); + pInstMem->pAllocedInstMemDesc = NULL; + } + + if (pInstMem->pInstHeap != NULL) + { + pInstMem->pInstHeap->eheapDestruct(pInstMem->pInstHeap); + portMemFree(pInstMem->pInstHeap); + pInstMem->pInstHeap = NULL; + } + + portMemFree(pInstMem->pHashTable); + pInstMem->pHashTable = NULL; +} + +NV_STATUS +instmemStateInitLocked_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + NV_STATUS status = NV_OK; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NvU32 instMemSize, hashTableSize; + NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS ctrlParams; + + instmemGetSize_HAL(pGpu, pInstMem, &instMemSize, &hashTableSize); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitHashTable(pGpu, pInstMem, hashTableSize), exit); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitBitmap(pGpu, pInstMem, instMemSize, hashTableSize), exit); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemInitMemDesc(pGpu, pInstMem, instMemSize), exit); + + // Make internal RPC to write the instance memory register + ctrlParams.instMemAddrSpace = memdescGetAddressSpace(pInstMem->pInstMemDesc); + ctrlParams.instMemCpuCacheAttr = memdescGetCpuCacheAttrib(pInstMem->pInstMemDesc); + ctrlParams.instMemPhysAddr = memdescGetPhysAddr(pInstMem->pInstMemDesc, AT_GPU, 0); + ctrlParams.instMemSize = memdescGetSize(pInstMem->pInstMemDesc); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM, + &ctrlParams, sizeof(ctrlParams)), exit); + +exit: + if (status != NV_OK) + { + instmemDestroy(pGpu, pInstMem); + } + + return status; +} + +void +instmemStateDestroy_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem +) +{ + instmemDestroy(pGpu, pInstMem); +} + +NV_STATUS +instmemStateLoad_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 flags +) +{ + NvBool bPersistent; + + // + // We keep a persistent mapping to instance memory in two cases: + // * Windows issues bind/update/unbind control calls with with BYPASS_LOCK, + // so we cannot generate a new BAR2 mapping at control call time. + // * System memory backing. + // + bPersistent = (pInstMem->instMemAddrSpace == ADDR_SYSMEM); + if (bPersistent) + { + // + // Windows issues bind/update/unbind control calls with BYPASS_LOCK, + // so we generate a new BAR2 mapping control call time. + // + pInstMem->pInstMem = memdescMapInternal(pGpu, pInstMem->pInstMemDesc, + TRANSFER_FLAGS_PERSISTENT_CPU_MAPPING); + if (pInstMem->pInstMem == NULL) + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + +NV_STATUS +instmemStateUnload_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 flags +) +{ + if (pInstMem->pInstMem != NULL) + { + memdescUnmapInternal(pGpu, pInstMem->pInstMemDesc, TRANSFER_FLAGS_NONE); + pInstMem->pInstMem = NULL; + } + + return NV_OK; +} + +/*! + * @brief Reserve a chunk of display instance memory (will always be for Context DMAs). + * @return offset from the base of display instance memory (not base of FB). + */ +static NV_STATUS +_instmemReserveContextDma +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 *offset +) +{ + NV_STATUS rmStatus; + NvU64 allocSize = DISPLAY_CONTEXT_DMA_INST_SIZE; // size << 4; + NvU64 allocOffset; + NvU32 allocFlags = 0; + + *offset = 0; + + rmStatus = pInstMem->pInstHeap->eheapAlloc( + pInstMem->pInstHeap, // thisHeap + DISP_INST_MEM_EHEAP_OWNER, // owner + &allocFlags, // flags + &allocOffset, // offset + &allocSize, // size + DISPLAY_CONTEXT_DMA_INST_ALIGN, // offsetAlign + DISPLAY_CONTEXT_DMA_INST_ALIGN, // sizeAlign + NULL, // ppMemBlock + NULL, // isolation id + NULL); // callback ownership checker + + // return the allocation offset if successful + if (rmStatus == NV_OK) + { + *offset = (NvU32)allocOffset; + } + else + { + rmStatus = NV_ERR_NO_MEMORY; + } + + return rmStatus; +} + +/*! + * @brief Free display instance memory reserved for Context DMA. + */ +static NV_STATUS +_instmemFreeContextDma +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + NvU32 offset +) +{ + // + // If instance is already set to 0, then it has already been freed. This can + // happen in some cases when a mode switch is happening while MIDI is playing + // using the timer alarm notifies. Ignoring this case can potentially cause a + // protection fault, so be careful. + // + if (offset == 0) + return NV_OK; + + if (pInstMem->pInstHeap == NULL) + return NV_OK; + + pInstMem->pInstHeap->eheapFree( + pInstMem->pInstHeap, // thisHeap + offset); // offset + + return NV_OK; +} + +static NV_STATUS +_instmemRemoveHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU32 htEntry, entryOffset; + TRANSFER_SURFACE dest = {0}; + DISP_HW_HASH_TABLE_ENTRY entry; + + for (htEntry = 0; htEntry < pInstMem->nHashTableEntries; htEntry++) + { + if ( (pInstMem->pHashTable[htEntry].pContextDma == pContextDma) && + (pInstMem->pHashTable[htEntry].pDispChannel == pDispChannel)) + { + pInstMem->pHashTable[htEntry].pContextDma = NULL; + pInstMem->pHashTable[htEntry].pDispChannel = NULL; + + // + // If we found the entry, clear the inst mem copy of the entry + // Start with offset of base of inst mem + // Add offset of base of hash table from base of inst mem + // Add the offset of entry from base of hash table + // + entryOffset = pInstMem->hashTableBaseAddr + + (sizeof(DISP_HW_HASH_TABLE_ENTRY) * htEntry); + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = entryOffset; + + entry.ht_ObjectHandle = 0; + entry.ht_Context = instmemGenerateHashTableData_HAL(pGpu, pInstMem, + 0 /* client id */, + 0 /* NV_UDISP_HASH_TBL_INSTANCE_INVALID */, + 0 /* dispChannelNum */); + + NV_ASSERT_OK_OR_RETURN(memmgrMemWrite(pMemoryManager, &dest, &entry, sizeof(entry), + TRANSFER_FLAGS_NONE)); + + return NV_OK; + } + } + + return NV_ERR_INVALID_STATE; +} + +static NV_STATUS +_instmemAddHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel, + NvU32 offset +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pContextDma); + NvU32 entryOffset, dispChannelNum; + NvU32 Limit, i, Entry; + NvHandle handle = RES_GET_HANDLE(pContextDma); + NvU32 hash; + NV_STATUS status; + TRANSFER_SURFACE dest = {0}; + DISP_HW_HASH_TABLE_ENTRY entry; + + status = kdispGetChannelNum_HAL(pKernelDisplay, pDispChannel->DispClass, pDispChannel->InstanceNumber, &dispChannelNum); + if (status != NV_OK) + return status; + + // Query HAL for starting entry for this pair. + instmemHashFunc_HAL(pGpu, pInstMem, hClient, RES_GET_HANDLE(pContextDma), dispChannelNum, &hash); + + // + // Since all the ctx dmas are 32 byte aligned, we don't need to + // store offsets in bytes. We store "which 32 byte chunk" does the + // ctx dma reside in. So, right shift the whole thing by 5 after + // left shifting by 4 (need to left shift by 4 since internally we + // track offsets in 16 byte chunks + // + offset >>= (5 - 4); // offset <<= 4; followed by offset >>= 5 + + if (offset == 0) //NV_UDISP_HASH_TBL_INSTANCE_INVALID + { + NV_PRINTF(LEVEL_ERROR, "Instance pointer is invalid!!\n"); + return (NV_ERR_GENERIC); + } + + // + // Make sure instance memory pointer is valid as well. That is, + // it's within the mask range of possible instance values + // + NV_ASSERT(instmemIsValid_HAL(pGpu, pInstMem, offset)); + + // Make sure that hash is valid as well. + NV_ASSERT(hash < pInstMem->nHashTableEntries); + + // + // Search table for free slot. + // + // Here's the Old way that we did this - Allows for arbitrary sized hash tables + // + // Limit = hash + pDispHalPvtInfo->pPram[ChID].nHashTableEntries; // loop over whole table + // Entry = hash; + // while(Entry < Limit) + // { + // if (pDispHalPvtInfo->pPram[ChID].pHashTable[Entry].Object == NULL) + // break; + // + // // + // // if we just checked the last entry and have more entries + // // to check for empty, wrap search back to beginning of table + // // + // if (Entry == (pDispHalPvtInfo->pPram[ChID].nHashTableEntries-1) && + // ((Entry + 1) < Limit)) + // { + // Limit = Limit - Entry - 1; // -1 since we count the one we just checked + // Entry = 0; + // continue; + // } + // + // Entry++; + // } + // + // But since we know that this hash table is always 512 in size, let's go ahead + // and make this assumption to make the loops faster. Or even better, lets just + // make sure that the Hash Depth is a power of 2. That way, we can use + // nHashTableEntries - 1 as the mask of what entries are valid - and this allows for any + // nHashTableEntries that is a power of 2. + // + NV_ASSERT(!(pInstMem->nHashTableEntries & (pInstMem->nHashTableEntries - 1))); + + Limit = hash + pInstMem->nHashTableEntries; // loop over whole table + Entry = hash; + for (i = hash; i < Limit; i++) + { + // Mask off high bits of i since we loop the hash table. + Entry = i & (pInstMem->nHashTableEntries - 1); + if (pInstMem->pHashTable[Entry].pContextDma == NULL) + break; + } + + if (pInstMem->pHashTable[Entry].pContextDma != NULL) + { + NV_PRINTF(LEVEL_ERROR, "Display Hash table is FULL!!\n"); + return NV_ERR_TOO_MANY_PRIMARIES; + } + + entryOffset = pInstMem->hashTableBaseAddr + + (Entry * sizeof(DISP_HW_HASH_TABLE_ENTRY)); + + // Add object to the Hash Table. + pInstMem->pHashTable[Entry].pContextDma = pContextDma; + pInstMem->pHashTable[Entry].pDispChannel = pDispChannel; + + dest.pMemDesc = pInstMem->pInstMemDesc; + dest.offset = entryOffset; + + entry.ht_ObjectHandle = handle; + + // Note that we have full 32 bit client id at this point and we only need to tell hw the lower 14 bits + entry.ht_Context = instmemGenerateHashTableData_HAL( + pGpu, + pInstMem, + hClient, + offset, + dispChannelNum); + + NV_ASSERT_OK_OR_RETURN(memmgrMemWrite(pMemoryManager, &dest, &entry, sizeof(entry), + TRANSFER_FLAGS_NONE)); + + return NV_OK; +} + +/*! + * @brief Is the this ContextDma bound to this DispChannel + */ +static NV_STATUS +_instmemProbeHashEntry +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NvU32 dispChannelNum; + NV_STATUS status; + NvU32 hash = 0; + NvU32 limit; + NvU32 i; + + status = kdispGetChannelNum_HAL(pKernelDisplay, pDispChannel->DispClass, pDispChannel->InstanceNumber, &dispChannelNum); + if (status == NV_OK) + { + instmemHashFunc_HAL(pGpu, pInstMem, + RES_GET_CLIENT_HANDLE(pContextDma), + RES_GET_HANDLE(pContextDma), + dispChannelNum, &hash); + } + + // Hash table must be a power of 2 currently + NV_ASSERT(!(pInstMem->nHashTableEntries & (pInstMem->nHashTableEntries - 1))); + + limit = hash + pInstMem->nHashTableEntries; // loop over whole table + + for (i = hash; i < limit; i++) { NvU32 htEntry = i & (pInstMem->nHashTableEntries - 1); + + if ((pInstMem->pHashTable[htEntry].pDispChannel == pDispChannel) && + (pInstMem->pHashTable[htEntry].pContextDma == pContextDma)) + { + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +/*! + * @brief Bind the ContextDma to the given Display Channel + */ +NV_STATUS +instmemBindContextDma_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + NvU32 gpuSubDevInst; + NV_STATUS status; + + gpuSubDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + // Production SW requires each context is bound only once + status = _instmemProbeHashEntry(pGpu, pInstMem, pContextDma, pDispChannel); + if (status == NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "The ctx dma (0x%x) has already been bound\n", + RES_GET_HANDLE(pContextDma)); + status = NV_ERR_STATE_IN_USE; + goto exit; + } + + if (pContextDma->InstRefCount[gpuSubDevInst] == 0) + { + // Reserve inst mem space for this ctx dma + status = _instmemReserveContextDma(pGpu, pInstMem, &(pContextDma->Instance[gpuSubDevInst])); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to alloc space in disp inst mem for ctx dma 0x%x\n", + RES_GET_HANDLE(pContextDma)); + goto exit; + } + + // Call into HAL to write inst mem with the ctx dma info + status = instmemCommitContextDma_HAL(pGpu, pInstMem, pContextDma); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to commit ctx dma (0x%x) to inst mem\n", + RES_GET_HANDLE(pContextDma)); + _instmemFreeContextDma(pGpu, pInstMem, pContextDma->Instance[gpuSubDevInst]); + pContextDma->Instance[gpuSubDevInst] = 0; + goto exit; + } + } + + // + // Now add the hash table entry for this ctx dma + // We loop around this call instead of looping at MEM_WR level because we + // also want to propagate the SW hash table. + // + status = _instmemAddHashEntry(pGpu, pInstMem, + pContextDma, + pDispChannel, + pContextDma->Instance[gpuSubDevInst]); + if (status != NV_OK) + { + if (pContextDma->InstRefCount[gpuSubDevInst] == 0) + { + instmemDecommitContextDma_HAL(pGpu, pInstMem, pContextDma); + _instmemFreeContextDma(pGpu, pInstMem, pContextDma->Instance[gpuSubDevInst]); + pContextDma->Instance[gpuSubDevInst] = 0; + } + goto exit; + } + + // We have one more reference to the context DMA in instance memory now. + pContextDma->InstRefCount[gpuSubDevInst]++; + +exit: + + return status; +} + +/*! + * @brief Unbind the ContextDma from the given Display Channel + */ +NV_STATUS +instmemUnbindContextDma_IMPL +( + OBJGPU *pGpu, + DisplayInstanceMemory *pInstMem, + ContextDma *pContextDma, + DispChannel *pDispChannel +) +{ + NvU32 gpuSubDevInst; + NV_STATUS status; + + gpuSubDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + // If ContextDma is not bound to this subdevice, there is no bookkeeping to do + status = _instmemRemoveHashEntry(pGpu, pInstMem, pContextDma, pDispChannel); + if (status == NV_OK) + { + NV_ASSERT(pContextDma->InstRefCount[gpuSubDevInst]); + if (pContextDma->InstRefCount[gpuSubDevInst]) + { + pContextDma->InstRefCount[gpuSubDevInst]--; + + // Remove DMA object if this is the last binding + if (pContextDma->InstRefCount[gpuSubDevInst] == 0) + { + instmemDecommitContextDma_HAL(pGpu, pInstMem, pContextDma); + _instmemFreeContextDma(pGpu, pInstMem, pContextDma->Instance[gpuSubDevInst]); + pContextDma->Instance[gpuSubDevInst] = 0; + } + } + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/disp/kern_disp.c b/src/nvidia/src/kernel/gpu/disp/kern_disp.c new file mode 100644 index 000000000..b68a084df --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/kern_disp.c @@ -0,0 +1,979 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Kernel Display Module +* This file contains functions managing display on CPU RM +* +******************************************************************************/ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "resserv/resserv.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "os/os.h" + +#include "gpu/gpu.h" +#include "gpu/device/device.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/disp/head/kernel_head.h" +#include "gpu/disp/disp_objs.h" +#include "gpu_mgr/gpu_mgr.h" +#include "objtmr.h" +#include "core/locks.h" + +#include "kernel/gpu/intr/engine_idx.h" + +#include "ctrl/ctrl2080.h" + +#include "class/cl5070.h" +#include "class/cl917a.h" +#include "class/cl917b.h" +#include "class/cl917c.h" +#include "class/cl917d.h" +#include "class/cl917e.h" +#include "class/cl927c.h" +#include "class/cl927d.h" +#include "class/cl947d.h" +#include "class/cl957d.h" +#include "class/cl977d.h" +#include "class/cl987d.h" +#include "class/clc37a.h" +#include "class/clc37b.h" +#include "class/clc37d.h" +#include "class/clc37e.h" +#include "class/clc57a.h" +#include "class/clc57b.h" +#include "class/clc57d.h" +#include "class/clc57e.h" +#include "class/clc67a.h" +#include "class/clc67b.h" +#include "class/clc67d.h" +#include "class/clc67e.h" + +#include "gpu/disp/rg_line_callback/rg_line_callback.h" + +NV_STATUS +kdispConstructEngine_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + ENGDESCRIPTOR engDesc) +{ + NV_STATUS status; + + // + // NOTE: DO NOT call IpVersion _HAL functions in ConstructEngine. + // IP version based _HAL functions can only be used starting StatePreInit. + // Long-term: RM offload initialization will be moved earlier so KernelDisplay + // has the ability to use IP version HAL functions even in construct phase. + // + + // + // Sanity check: the only time KERNEL_DISPLAY module should be enabled + // while DISP is disabled is on KERNEL_ONLY build. + // + NV_ASSERT(IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu) || RMCFG_MODULE_DISP); + + // + // We also need to check if we are in certain configurations which can't + // even attempt a control call to DISP. + // + if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IS_MISSING)) + return NV_ERR_NOT_SUPPORTED; + + // Create children + pKernelDisplay->pInst = NULL; + status = kdispConstructInstMem_HAL(pKernelDisplay); + if (status != NV_OK) + { + return status; + } + + status = kdispConstructKhead(pKernelDisplay); + + // We defer checking whether DISP has been disabled some other way until + // StateInit, when we can do a control call. + + return status; +} + +void +kdispDestruct_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + // Destroy children + kdispDestructInstMem_HAL(pKernelDisplay); + kdispDestructKhead(pKernelDisplay); +} + +/*! Constructor for DisplayInstanceMemory */ +NV_STATUS +kdispConstructInstMem_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + NV_STATUS status; + DisplayInstanceMemory *pInst; + + status = objCreate(&pInst, pKernelDisplay, DisplayInstanceMemory); + if (status != NV_OK) + { + return status; + } + + pKernelDisplay->pInst = pInst; + return NV_OK; +} + +/*! Destructor for DisplayInstanceMemory */ +void +kdispDestructInstMem_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + if (pKernelDisplay->pInst != NULL) + { + objDelete(pKernelDisplay->pInst); + pKernelDisplay->pInst = NULL; + } +} + +/*! Constructor for Kernel head */ +NV_STATUS +kdispConstructKhead_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + NV_STATUS status; + KernelHead *pKernelHead; + NvU8 headIdx; + + for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) + { + status = objCreate(&pKernelHead, pKernelDisplay, KernelHead); + if (status != NV_OK) + { + return status; + } + + pKernelDisplay->pKernelHead[headIdx] = pKernelHead; + pKernelDisplay->pKernelHead[headIdx]->PublicId = headIdx; + } + return NV_OK; +} + +/*! Destructor for Kernel head */ +void +kdispDestructKhead_IMPL +( + KernelDisplay *pKernelDisplay +) +{ + NvU8 headIdx; + + for (headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) + { + if (pKernelDisplay->pKernelHead[headIdx] != NULL) + { + objDelete(pKernelDisplay->pKernelHead[headIdx]); + pKernelDisplay->pKernelHead[headIdx] = NULL; + } + } +} + +NV_STATUS +kdispStatePreInitLocked_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay) +{ + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NV2080_CTRL_INTERNAL_DISPLAY_GET_IP_VERSION_PARAMS ctrlParams; + + status = pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_IP_VERSION, + &ctrlParams, sizeof(ctrlParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Failed to read display IP version (FUSE disabled), status=0x%x\n", + status); + return status; + } + + // NOTE: KernelDisplay IpVersion _HAL functions can only be called after this point. + status = gpuInitDispIpHal(pGpu, ctrlParams.ipVersion); + + return status; +} + +NV_STATUS +kdispStateInitLocked_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status = NV_OK; + KernelDisplayStaticInfo *pStaticInfo; + + pStaticInfo = portMemAllocNonPaged(sizeof(KernelDisplayStaticInfo)); + if (pStaticInfo == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate KernelDisplayStaticInfo"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + portMemSet(pStaticInfo, 0, sizeof(*pStaticInfo)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO, + pStaticInfo, sizeof(*pStaticInfo)), + exit); + + pKernelDisplay->pStaticInfo = pStaticInfo; + pStaticInfo = NULL; + + if (pKernelDisplay->pInst != NULL) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + instmemStateInitLocked(pGpu, pKernelDisplay->pInst), exit); + } + + if (pKernelDisplay->getProperty(pKernelDisplay, PDB_PROP_KDISP_IMP_ENABLE)) + { + // NOTE: Fills IMP parameters and populate those to disp object in Tegra + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kdispImportImpData_HAL(pKernelDisplay), exit); + } + +exit: + portMemFree(pStaticInfo); + + return status; +} + +void +kdispStateDestroy_IMPL(OBJGPU *pGpu, + KernelDisplay *pKernelDisplay) +{ + if (pKernelDisplay->pInst != NULL) + { + instmemStateDestroy(pGpu, pKernelDisplay->pInst); + } + + portMemFree((void*) pKernelDisplay->pStaticInfo); + pKernelDisplay->pStaticInfo = NULL; +} + +NV_STATUS +kdispStateLoad_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + + if (pKernelDisplay->pInst != NULL) + status = instmemStateLoad(pGpu, pKernelDisplay->pInst, flags); + + return status; +} + +NV_STATUS +kdispStateUnload_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + + if (pKernelDisplay->pInst != NULL) + status = instmemStateUnload(pGpu, pKernelDisplay->pInst, flags); + + return status; +} + +/*! Get and Populate IMP init data for Tegra */ +NV_STATUS +kdispImportImpData_IMPL(KernelDisplay *pKernelDisplay) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 hClient = pGpu->hInternalClient; + NvU32 hSubdevice = pGpu->hInternalSubdevice; + NV2080_CTRL_INTERNAL_DISPLAY_SET_IMP_INIT_INFO_PARAMS params; + NvU32 simulationMode; + + // + // FPGA has different latency characteristics, and the current code latency + // models that IMP uses for silicon will not work for FPGA, so keep IMP + // disabled by default on Tegra FPGA. + // + simulationMode = osGetSimulationMode(); + if (simulationMode == NV_SIM_MODE_TEGRA_FPGA) + { + pKernelDisplay->setProperty(pDisp, PDB_PROP_KDISP_IMP_ENABLE, NV_FALSE); + return NV_OK; + } + + NV_ASSERT_OK_OR_RETURN(osTegraSocGetImpImportData(¶ms.tegraImpImportData)); + + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_SET_IMP_INIT_INFO, + ¶ms, sizeof(params))); + + return NV_OK; +} + +/*! Get internal enum equivalent of the HW class number */ +NV_STATUS +kdispGetIntChnClsForHwCls_IMPL +( + KernelDisplay *pKernelDisplay, + NvU32 hwClass, + DISPCHNCLASS *pDispChnClass +) +{ + // sanity check + if (pDispChnClass == NULL) + return NV_ERR_INVALID_ARGUMENT; + + switch (hwClass) + { + case NV917A_CURSOR_CHANNEL_PIO: + case NVC37A_CURSOR_IMM_CHANNEL_PIO: + case NVC57A_CURSOR_IMM_CHANNEL_PIO: + case NVC67A_CURSOR_IMM_CHANNEL_PIO: + *pDispChnClass = dispChnClass_Curs; + break; + + case NV917B_OVERLAY_IMM_CHANNEL_PIO: + *pDispChnClass = dispChnClass_Ovim; + break; + + case NV917C_BASE_CHANNEL_DMA: + case NV927C_BASE_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Base; + break; + + case NV917D_CORE_CHANNEL_DMA: + case NV927D_CORE_CHANNEL_DMA: + case NV947D_CORE_CHANNEL_DMA: + case NV957D_CORE_CHANNEL_DMA: + case NV977D_CORE_CHANNEL_DMA: + case NV987D_CORE_CHANNEL_DMA: + case NVC37D_CORE_CHANNEL_DMA: + case NVC57D_CORE_CHANNEL_DMA: + case NVC67D_CORE_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Core; + break; + + case NV917E_OVERLAY_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Ovly; + break; + + case NVC37B_WINDOW_IMM_CHANNEL_DMA: + case NVC57B_WINDOW_IMM_CHANNEL_DMA: + case NVC67B_WINDOW_IMM_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Winim; + break; + + case NVC37E_WINDOW_CHANNEL_DMA: + case NVC57E_WINDOW_CHANNEL_DMA: + case NVC67E_WINDOW_CHANNEL_DMA: + *pDispChnClass = dispChnClass_Win; + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Unknown channel class %x\n", hwClass); + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +void +kdispNotifyEvent_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 notifyIndex, + void *pNotifyParams, + NvU32 notifyParamsSize, + NvV32 info32, + NvV16 info16 +) +{ + PEVENTNOTIFICATION pEventNotifications; + NvU32 *pNotifyActions; + NvU32 disableCmd, singleCmd; + NvU32 subDeviceInst; + RS_SHARE_ITERATOR it = serverutilShareIter(classId(NotifShare)); + + // search notifiers with events hooked up for this gpu + while (serverutilShareIterNext(&it)) + { + RsShared *pShared = it.pShared; + DisplayApi *pDisplayApi; + INotifier *pNotifier; + Device *pDevice; + NotifShare *pNotifierShare = dynamicCast(pShared, NotifShare); + + if ((pNotifierShare == NULL) || (pNotifierShare->pNotifier == NULL)) + continue; + + pNotifier = pNotifierShare->pNotifier; + pDisplayApi = dynamicCast(pNotifier, DisplayApi); + + // Only notify matching GPUs + if (pDisplayApi == NULL) + continue; + + pDevice = dynamicCast(RES_GET_REF(pDisplayApi)->pParentRef->pResource, Device); + + if (GPU_RES_GET_GPU(pDevice) != pGpu) + continue; + + gpuSetThreadBcState(GPU_RES_GET_GPU(pDevice), pDisplayApi->bBcResource); + + disableCmd = NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + singleCmd = NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + + // get notify actions list + subDeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pNotifyActions = pDisplayApi->pNotifyActions[subDeviceInst]; + if (pNotifyActions == NULL) + { + continue; + } + + // get event list + pEventNotifications = inotifyGetNotificationList(pNotifier); + if (pEventNotifications == NULL) + { + continue; + } + + // skip if client not "listening" to events of this type + if (pNotifyActions[notifyIndex] == disableCmd) + { + continue; + } + + if (pDisplayApi->hNotifierMemory != NV01_NULL_OBJECT && + pDisplayApi->pNotifierMemory != NULL) + { + notifyFillNotifierMemory(pGpu, pDisplayApi->pNotifierMemory, info32, info16, + NV5070_NOTIFICATION_STATUS_DONE_SUCCESS, notifyIndex); + } + + // ping events bound to subdevice associated with pGpu + osEventNotification(pGpu, pEventNotifications, + (notifyIndex | OS_EVENT_NOTIFICATION_INDEX_MATCH_SUBDEV), + pNotifyParams, notifyParamsSize); + + // reset if single shot notify action + if (pNotifyActions[notifyIndex] == singleCmd) + { + pNotifyActions[notifyIndex] = disableCmd; + } + } +} + +void +kdispSetWarPurgeSatellitesOnCoreFree_IMPL +( + KernelDisplay *pKernelDisplay, + NvBool value +) +{ + pKernelDisplay->bWarPurgeSatellitesOnCoreFree = value; +} + +NV_STATUS +kdispRegisterRgLineCallback_IMPL +( + KernelDisplay *pKernelDisplay, + RgLineCallback *pRgLineCallback, + NvU32 head, + NvU32 rgIntrLine, + NvBool bEnable +) +{ + NV_ASSERT_OR_RETURN(head < OBJ_MAX_HEADS, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD, NV_ERR_INVALID_ARGUMENT); + + RgLineCallback **slot = &pKernelDisplay->rgLineCallbackPerHead[head][rgIntrLine]; + + if (bEnable && *slot == NULL) + { + *slot = pRgLineCallback; + } + else if (!bEnable && *slot == pRgLineCallback) + { + *slot = NULL; + } + else + { + // + // OBJDISP is the authority for *allocating* these "slots"; + // KernelDisplay trusts it as an allocator. + // If we try to register a callback in an existing slot, or free an + // empty slot, it means OBJDISP has created conflicting allocations or + // has allowed a double-free. (Or RgLineCallback has provided invalid + // parameters.) + // + NV_ASSERT_FAILED("Invalid KernelDisplay state for RgLineCallback"); + return NV_ERR_INVALID_STATE; + } + + return NV_OK; +} + +void +kdispInvokeRgLineCallback_KERNEL +( + KernelDisplay *pKernelDisplay, + NvU32 head, + NvU32 rgIntrLine, + NvBool bIsIrqlIsr +) +{ + NV_ASSERT_OR_RETURN_VOID(head < OBJ_MAX_HEADS); + NV_ASSERT_OR_RETURN_VOID(rgIntrLine < MAX_RG_LINE_CALLBACKS_PER_HEAD); + + RgLineCallback *pCallbackObject = pKernelDisplay->rgLineCallbackPerHead[head][rgIntrLine]; + + if (pCallbackObject != NULL) + { + rglcbInvoke(pCallbackObject, bIsIrqlIsr); + } + else if (IS_GSP_CLIENT(ENG_GET_GPU(pKernelDisplay))) + { + // + // For offloaded RM case, getting a callback invocation without a registered callback could + // happen during or after deregistration: there might already have been an event in the + // queue by the time we asked physical RM to deconfigure the interrupt. + // + // Because this could lead to an A-B-A situation where a new callback is registered to the + // same slot and invoked in place of the old callback, we must assert against this case. + // To avoid this, RgLineCallback must drain the client RM event queue after deconfiguring + // the interrupt and before calling kdispRegisterRgLineCallback to deregister the callback. + // + NV_ASSERT_FAILED("got RgLineCallback invocation for null callback"); + } + else + { + // + // For the monolithic RM case, getting a callback invocation without a registered callback + // could happen during registration: after configuring hardware for the interrupt, but + // before registering the callback with KernelDisplay, the interrupt could be handled. + // + // This is not a bug in and of itself as it is harmless and expected. On the other hand we + // would not expect to see this warning in the log more than a few times per registration, + // e.g. if it were printed for every single interrupt, as the callback ought to be fully + // registered before excessively many interrupts are handled. + // + NV_PRINTF(LEVEL_WARNING, "got RgLineCallback invocation for null callback\n"); + } +} + +#define HOTPLUG_PROFILE 0 + +#if HOTPLUG_PROFILE + + #define ISR_TSTAMP_SIZE 18000 /* 5 minutes (5*60Hz*60)*/ + + NvU32 timeStampIndexISR = ISR_TSTAMP_SIZE-1; + + tmr_tstamp_u timeStampStartISR[ISR_TSTAMP_SIZE]; + tmr_tstamp_u timeStampDeltaISR[ISR_TSTAMP_SIZE]; + +#endif + +void +kdispServiceVblank_KERNEL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + NvU32 headmask, + NvU32 state, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 pending, check_pending, pending_checked; + NvU32 Head; + NvU32 maskNonEmptyQueues[OBJ_MAX_HEADS]; // array of masks of VBLANK_STATE_PROCESS_XXX_LATENCY bits, indicating which queues are non-empty + NvU32 unionNonEmptyQueues = 0; // mask of VBLANK_STATE_PROCESS_XXX_LATENCY bits, union of queue states of all heads w/ pending vblank ints + NvU32 Count = 0; + NvU32 i, skippedcallbacks; + NvU32 maskCallbacksStillPending = 0; + KernelHead *pKernelHead = NULL; + +#if HOTPLUG_PROFILE + OBJTMR *pTmr; + pTmr = GPU_GET_TIMER(pGpu); + if (++timeStampIndexISR >= ISR_TSTAMP_SIZE) + timeStampIndexISR = 0; + + tmrGetCurrentTime(pTmr, &timeStampStartISR[timeStampIndexISR].time32.hi, &timeStampStartISR[timeStampIndexISR].time32.lo); + + // For the ISR we want to know how much time since the last ISR. + if (timeStampIndexISR) + { + NvU64 temp64; + + temp64 = timeStampStartISR[timeStampIndexISR].time64; + temp64 -= timeStampStartISR[timeStampIndexISR-1].time64; + + timeStampDeltaISR[timeStampIndexISR].time64 = temp64; + } +#endif + + + // If the caller failed to spec which queue, figure they wanted all of them + if (!(state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) ) + { + state |= VBLANK_STATE_PROCESS_ALL_CALLBACKS; + } + + // If the headmask is 0, we should process all heads + if (headmask == 0) + { + headmask = 0xFFFFFFFF; + } + + // + // If we are being asked to process the callbacks now, regardless of the true irqspending, + // we force the pending mask to the head mask passed in. + // + if (state & VBLANK_STATE_PROCESS_IMMEDIATE) + { + pending = headmask; + } + else + { + // We're here because at least one of the PCRTC bits MAY be pending. + pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState); + } + + // No sense in doing anything if there is nothing pending. + if (pending == 0) + { + return; + } + + // + // We want to check for pending service now and then we check again each + // time through the loop. Keep these seperate. + // + check_pending = pending; + + // We have not checked anything yet + pending_checked = 0; + + // Start with head 0 + Head = 0; + + // + // We keep scanning all supported heads, and if we have something pending, + // check the associated queues + // + while(pending_checked != pending) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); + + // Move on if this crtc's interrupt isn't pending... + if ( (headmask & check_pending & ~pending_checked) & NVBIT(Head)) + { + // Track that we have now checked this head + pending_checked |= NVBIT(Head); + + // If our queues are empty, we can bail early + maskNonEmptyQueues[Head] = kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, state, NULL); + unionNonEmptyQueues |= maskNonEmptyQueues[Head]; + + // This function will check to see if there are callback states in which the + // caller has skipped execution. + skippedcallbacks = ((state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) ^ VBLANK_STATE_PROCESS_ALL_CALLBACKS); + skippedcallbacks |= (state & (VBLANK_STATE_PROCESS_CALLED_FROM_ISR | VBLANK_STATE_PROCESS_IMMEDIATE)); + + // now lets see if there's callbacks pending on the skipped callbacks + maskCallbacksStillPending |= NVBIT(Head) * !!kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, skippedcallbacks, NULL); + } + + // Don't check for new interrupts if we are in immediate mode + if (!(state & VBLANK_STATE_PROCESS_IMMEDIATE) ) + { + pending = kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, pThreadState); + } + + // if there was a change in the pending state, we should recheck everything + if (check_pending != pending) + { + // We need to recheck heads that were not pending before + check_pending = pending; + Head = 0; + } + else + { + // Nothing changed, so move on to the next head + Head++; + } + + // Make sure we dont waste time on heads that dont exist + if (Head >= OBJ_MAX_HEADS) + { + break; + } + } + + if (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) + { + // store off which heads have pending vblank interrupts, for comparison at the next DPC time. + pKernelDisplay->isrVblankHeads = pending; + + } + + // increment the per-head vblank total counter, for any head with a pending vblank intr + for (Head=0; Head < OBJ_MAX_HEADS; Head++) + { + // Move on if this crtc's interrupt isn't pending... + if ((pending & NVBIT(Head)) == 0) + { + continue; + } + + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); + // + // increment vblank counters, as appropriate. + // + + // Track the fact that we passed through here. This keeps the RC manager happy. + Count = kheadGetVblankTotalCounter_HAL(pKernelHead) + 1; + kheadSetVblankTotalCounter_HAL(pKernelHead, Count); + + // + // Update the vblank counter if we are single chip or multichip master. + // We now have two queues, so we need to have two vblank counters. + // + + // did they ask for processing of low-latency work? + if (state & VBLANK_STATE_PROCESS_LOW_LATENCY /* & maskNonEmptyQueues[Head]*/) + { + // + // don't let the DPC thread increment the low-latency counter. + // otherwise, the counter will frequently increment at double the + // expected rate, breaking things like swapInterval. + // + // XXX actually, there is one case where it would be OK for the DPC + // thread to increment this counter: if the DPC thread could ascertain + // that 'pending & NVBIT(Head)' represented a new interrupt event, and + // not simply the one that the ISR left uncleared in PCRTC_INTR_0, for + // the purpose of causing this DPC thread to get queued. + // Not sure how to do that. + // + if ( !(state & VBLANK_STATE_PROCESS_CALLED_FROM_DPC) || (pending & NVBIT(Head) & ~pKernelDisplay->isrVblankHeads) ) + { + // either we were called from the ISR, or vblank is asserted in DPC when it wasn't in the ISR + + // low latency queue requested, and this isn't a DPC thread. + Count = kheadGetVblankLowLatencyCounter_HAL(pKernelHead) + 1; + kheadSetVblankLowLatencyCounter_HAL(pKernelHead, Count); + } + } + + // did they ask for processing of normal-latency work? + if (state & VBLANK_STATE_PROCESS_NORMAL_LATENCY /* & maskNonEmptyQueues[Head]*/) + { + // processing of the normal latency queue requested + Count = kheadGetVblankNormLatencyCounter_HAL(pKernelHead) + 1; + kheadSetVblankNormLatencyCounter_HAL(pKernelHead, Count); + } + } + + // + // If we have nothing to process (no work to do in queue), + // we can bail early. We got here for some reason, so make + // sure we clear the interrupts. + // + + if (!unionNonEmptyQueues) + { + // all queues (belonging to heads with pending vblank ints) are empty. + kheadResetPendingVblankForKernel_HAL(pGpu, pKernelHead, pThreadState); + return; + } + + NVRM_TRACE('VBLK'); + + // + // Although we have separate handlers for each head, attempt to process all + // interrupting heads now. What about DPCs schedule already? + // + for (Head = 0; Head < OBJ_MAX_HEADS; Head++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, Head); + // Move on if this crtc's interrupt isn't pending... + if ((pending & NVBIT(Head)) == 0) + { + continue; + } + + // Process the callback list for this Head... + kheadProcessVblankCallbacks_HAL(pGpu, pKernelHead, state); + } + + // + // if there are still callbacks pending, and we are in an ISR, + // then don't clear PCRTC_INTR; XXXar why would we *ever* want + // to clear PCRTC_INTR if there are still things pending? + // + if ( (maskCallbacksStillPending) && + (state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) ) + { + // + // there are still callbacks pending; don't clear + // PCRTC_INTR, yet. The expectation is that the OS layer + // will see that interrupts are still pending and queue a + // DPC/BottomHalf/whatever to service the rest of the + // vblank callback queues + // + for(i=0; i< OBJ_MAX_HEADS; i++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i); + kheadResetPendingVblankForKernel_HAL(pGpu, pKernelHead, pThreadState); + } + } + else + { + // reset the VBlank intrs we've handled, and don't reset the vblank intrs we haven't. + for(i=0; i< OBJ_MAX_HEADS; i++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, i); + if (pending & NVBIT(i) & ~maskCallbacksStillPending) + { + kheadResetPendingVblank_HAL(pGpu, pKernelHead, pThreadState); + } + } + } + + return; + NVRM_TRACE('vblk'); +} + +NvU32 kdispReadPendingVblank_KERNEL(OBJGPU *pGpu, KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *pThreadState) +{ + KernelHead *pKernelHead; + NvU32 headIntrMask; + NvU32 pending = 0; + NvU8 headIdx; + + for(headIdx = 0; headIdx < OBJ_MAX_HEADS; headIdx++) + { + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, headIdx); + headIntrMask = headIntr_none; + pending |= kheadReadPendingVblank_HAL(pGpu, pKernelHead, headIntrMask); + } + return pending; +} + +/** + * @brief Provides an opportunity to register some IntrService during intrStateInit. + */ +void +kdispRegisterIntrService_IMPL +( + OBJGPU *pGpu, + KernelDisplay *pKernelDisplay, + IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX] +) +{ + NvU32 engineIdx = MC_ENGINE_IDX_DISP; + NV_ASSERT(pRecords[engineIdx].pInterruptService == NULL); + pRecords[engineIdx].pInterruptService = staticCast(pKernelDisplay, IntrService); +} + +/*! + * @brief Route modeset start/end notification to kernel RM + * + * Physical RM is expected to send a "start" notification at the beginning of + * every display modeset (supervisor interrupt sequence), and an "end" + * notification at the end. However, if physical RM detects back-to-back + * modesets, the intervening "end" notification MAY be skipped; in this case, + * the "start" notification for the next modeset serves as the "end notification + * for the previous modeset. + * + * Kernel RM will use the notification to update the BW allocation for display. + * The ICC call that is required to update the BW allocation cannot be made + * from physical RM. + * + * @param[in] pKernelDisplay KernelDisplay pointer + * @param[in] bModesetStart NV_TRUE -> start of modeset; + * NV_FALSE -> end of modeset + * @param[in] minRequiredIsoBandwidthKBPS Min ISO BW required by IMP (KB/sec) + * @param[in] minRequiredFloorBandwidthKBPS Min dramclk freq * pipe width (KB/sec) + */ +void +kdispInvokeDisplayModesetCallback_KERNEL +( + KernelDisplay *pKernelDisplay, + NvBool bModesetStart, + NvU32 minRequiredIsoBandwidthKBPS, + NvU32 minRequiredFloorBandwidthKBPS +) +{ + NV_STATUS status; + + NV_PRINTF(LEVEL_INFO, + "Kernel RM received \"%s of modeset\" notification " + "(minRequiredIsoBandwidthKBPS = %u, minRequiredFloorBandwidthKBPS = %u)\n", + bModesetStart ? "start" : "end", + minRequiredIsoBandwidthKBPS, + minRequiredFloorBandwidthKBPS); + + OBJGPU *pGpu = ENG_GET_GPU(pKernelDisplay); + status = + kdispArbAndAllocDisplayBandwidth_HAL(pGpu, + pKernelDisplay, + DISPLAY_ICC_BW_CLIENT_RM, + minRequiredIsoBandwidthKBPS, + minRequiredFloorBandwidthKBPS); + // + // The modeset cannot be aborted, so, if there is an error, no recovery + // is possible. + // + NV_ASSERT_OK(status); +} diff --git a/src/nvidia/src/kernel/gpu/disp/rg_line_callback/rg_line_callback.c b/src/nvidia/src/kernel/gpu/disp/rg_line_callback/rg_line_callback.c new file mode 100644 index 000000000..d84e99708 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/rg_line_callback/rg_line_callback.c @@ -0,0 +1,135 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define RM_STRICT_CONFIG_EMIT_DISP_ENGINE_DEFINITIONS 0 + +#include "gpu/disp/rg_line_callback/rg_line_callback.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/device/device.h" +#include "gpu/disp/disp_objs.h" +#include "rmapi/client.h" +#include "gpu/gpu.h" + +static NV_STATUS +_registerRgLineCallback +( + RgLineCallback *pRgLineCallback, + NvBool bEnableRgLineIntr +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = NULL; + KernelDisplay *pKernelDisplay; + RM_API *pRmApi; + NvU32 hClient; + NvU32 hSubdevice; + NV2080_CTRL_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR_PARAMS ctrlParams; + + if (pRgLineCallback->pCallbkFn == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Trying to register/un-register a NULL RG line callback\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // Get the right pGpu from subdevice instance given by client + status = dispapiSetUnicastAndSynchronize_HAL(staticCast(pRgLineCallback->pDispCommon, DisplayApi), + DISPAPI_GET_GPUGRP(pRgLineCallback->pDispCommon), + &pGpu, + pRgLineCallback->subDeviceInstance); + if (status != NV_OK) + return status; + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NV_ASSERT(pKernelDisplay != NULL); + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + hClient = pGpu->hInternalClient; + hSubdevice = pGpu->hInternalSubdevice; + + ctrlParams.head = pRgLineCallback->head; + ctrlParams.rgLineNum = pRgLineCallback->rgLineNum; + ctrlParams.intrLine = pRgLineCallback->rgIntrLine; + ctrlParams.bEnable = bEnableRgLineIntr; + + status = pRmApi->Control(pRmApi, hClient, hSubdevice, + NV2080_CTRL_CMD_INTERNAL_DISPLAY_SETUP_RG_LINE_INTR, + &ctrlParams, sizeof(ctrlParams)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, status); + + if (bEnableRgLineIntr) + { + pRgLineCallback->rgIntrLine = ctrlParams.intrLine; + } + + status = kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, + pRgLineCallback->head, pRgLineCallback->rgIntrLine, bEnableRgLineIntr); + + return status; +} + +NV_STATUS +rglcbConstruct_IMPL +( + RgLineCallback *pRgLineCallback, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS *pAllocParams = pParams->pAllocParams; + + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + pRgLineCallback->subDeviceInstance = pAllocParams->subDeviceInstance; + pRgLineCallback->head = pAllocParams->head; + pRgLineCallback->rgLineNum = pAllocParams->rgLineNum; + pRgLineCallback->pCallbkFn = pAllocParams->pCallbkFn; + pRgLineCallback->pCallbkParams = pAllocParams->pCallbkParams; + pRgLineCallback->rgIntrLine = ~0; + + pRgLineCallback->pDispCommon = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, DispCommon); + + return _registerRgLineCallback(pRgLineCallback, NV_TRUE); + +} + +void +rglcbDestruct_IMPL +( + RgLineCallback *pRgLineCallback +) +{ + _registerRgLineCallback(pRgLineCallback, NV_FALSE); +} + +void +rglcbInvoke_IMPL +( + RgLineCallback *pRgLineCallback, + NvBool bIsIrqlIsr +) +{ + (pRgLineCallback->pCallbkFn)(pRgLineCallback->rgIntrLine, pRgLineCallback->pCallbkParams, bIsIrqlIsr); +} diff --git a/src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c b/src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c new file mode 100644 index 000000000..a4a78f5b7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank.c @@ -0,0 +1,650 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/disp/vblank_callback/vblank.h" + +#include "kernel/gpu/disp/head/kernel_head.h" +#include "kernel/gpu/disp/kern_disp.h" +#include "kernel/gpu/gpu.h" +#include "kernel/os/os.h" + +#include "objtmr.h" + +void +kheadAddVblankCallback_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + VBLANKCALLBACK *pCallback +) +{ + NvBool OktoAdd = NV_TRUE; + VBLANKCALLBACK *pCheck = NULL; + VBLANKCALLBACK *pNext = NULL; + VBLANKCALLBACK *pPrev = NULL; + VBLANKCALLBACK *pList = NULL; + NvU32 Count; + NvBool vblankIntrIsBeingGenerated = NV_FALSE; + + // + // If callback needs vblank safety, make it low-latency, persistent and promote-to-front. + // The callback is responsible for clearing its own persistence & safety flags, + // once it achieves its raison d'etre, within it's own particular idiom. + // + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_GUARANTEE_SAFETY) + { + pCallback->Flags |= VBLANK_CALLBACK_FLAG_PERSISTENT; + pCallback->Flags |= VBLANK_CALLBACK_FLAG_LOW_LATENCY; + pCallback->Flags |= VBLANK_CALLBACK_FLAG_PROMOTE_TO_FRONT; + } + + // Cache the requested queue and its current vblank count + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY) + { + pList = pKernelHead->Vblank.Callback.pListLL; + Count = pKernelHead->Vblank.Counters.LowLatency; + } + else + { + pList = pKernelHead->Vblank.Callback.pListNL; + Count = pKernelHead->Vblank.Counters.NormLatency; + } + + // + // If this callback is supposed to fire at a specific vblank count, + // then that count (VBlankCount) better be in the future still. + // + NV_ASSERT(!(pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT) || + (pCallback->VBlankCount > Count) ); + + NV_PRINTF(LEVEL_INFO, "headAddVblankCallback: pGpu=%p cb=%p\n", pGpu, + pCallback); + NV_PRINTF(LEVEL_INFO, + " cbproc=%p cbobj=%p p1=0x%x p2=0x%x count=0x%x flags=0x%x offset=0x%x\n", + pCallback->Proc, pCallback->pObject, pCallback->Param1, + pCallback->Param2, pCallback->VBlankCount, pCallback->Flags, + pCallback->VBlankOffset); + + if (kheadReadVblankIntrState(pGpu, pKernelHead) != NV_HEAD_VBLANK_INTR_UNAVAILABLE) + vblankIntrIsBeingGenerated = NV_TRUE; + + if ( vblankIntrIsBeingGenerated || (pCallback->Flags & VBLANK_CALLBACK_FLAG_PERSISTENT) ) + { + pCheck = pList; + + // + // Check that the list doesn't become a circular queue of one element, which can happen in multichip, if the a method + // is called twice on multiple devices. If this happens, we'll be in an infinite loop in the while(Callback) below. + // + while (NULL != pCheck) + { + if (pCheck == pCallback) + { + // + // It is expected that we may try to add the same callback again, as we may not get a + // dacdisable (which deletes callbacks) between modesets and/or dacenables. + // + NV_PRINTF(LEVEL_INFO, + "headAddVblankCallback: VblankCallback already on the Callback List\n"); + OktoAdd = NV_FALSE; + } + pCheck = pCheck->Next; + } + + if (OktoAdd) + { + // + // Best-effort test to verify that this callback is not already part of any callback list + // (the test won't detect callbacks added twice at the end of two lists) + // + NV_ASSERT(pCallback->Next == NULL); + + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT) + { + // We set the target to the one that the caller supplied. + Count = pCallback->VBlankCount; + } + else if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_TIMESTAMP) + { + // We don't know which vblank would correspond to the timestamp, so just add it to end of list. + Count = 0xFFFFFFFF; + } + else if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET) + { + // We set the target to the current plus the offset that the caller supplied. + Count += pCallback->VBlankOffset; + pCallback->VBlankCount = Count; + + // If we are persistent, we should convert the vblank offset flag to a vblank count flag. + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_PERSISTENT) + { + pCallback->Flags &= ~VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET; + pCallback->Flags |= VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT; + } + } + else if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT) + { + // We set the target to the current plus one (the next vblank) + Count += 1; + pCallback->VBlankCount = Count; + } + else + { + // + // We set the target to the current plus one (the next vblank). + // We use this case when we dont know the request or legacy support. + // + Count += 1; + pCallback->VBlankCount = Count; + } + + // These are now guaranteed to be sorted by VBlank + pPrev = NULL; + pNext = pList; + + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_PROMOTE_TO_FRONT) + { + // To the front of the group that shares the same 'VBlankCount' value + while ((NULL != pNext) && (Count > pNext->VBlankCount)) + { + pPrev = pNext; + pNext = pNext->Next; + } + } + else + { + // To the back of the group that shares the same 'VBlankCount' value + while ((NULL != pNext) && (Count >= pNext->VBlankCount)) + { + pPrev = pNext; + pNext = pNext->Next; + } + } + + // Are we at the head? + if (pPrev == NULL) + { + pCallback->Next = pList; + + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY) + { + pKernelHead->Vblank.Callback.pListLL = pCallback; + } + else + { + pKernelHead->Vblank.Callback.pListNL = pCallback; + } + } + else // In the middle or tail + { + pPrev->Next = pCallback; + pCallback->Next = pNext; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "VBlankCallback discarded in dacCRTCAddVblankCallback to avoid infinite loop\n"); + } + } + else + { + // call it now + if (pCallback->Proc) + { + NV_PRINTF(LEVEL_INFO, + "headAddVblankCallback: immediate invocation\n"); + pCallback->bImmediateCallback = NV_TRUE; + + // Force it to appear to be on the correct VBlankCount + pCallback->VBlankCount = Count; + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_USER) + { + // This is a user call back, they don't get a pointer to our pDev or Object data structs. + pCallback->Proc(NULL, + NULL, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + else + { + // + // this callback was scheduled when a trace was being conducted, + // turn tracing back to make sure that we record this callback's + // register operations too, so the trace will be complete + // DON'T LOG USER CALLBACKS, not RM activity. (plus the tracing system + // requires a pDev ptr to find its own data structures) + // + pCallback->Proc(pGpu, + pCallback->pObject, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + } + } + + // After all of that, if at least one callback is scheduled, head is enabled and the vblank is AVAILABLE, enable it now. + if (vblankIntrIsBeingGenerated) + { + if ( (pKernelHead->Vblank.Callback.pListLL) || + (pKernelHead->Vblank.Callback.pListNL) ) + { + if (kheadReadVblankIntrState(pGpu, pKernelHead) != NV_HEAD_VBLANK_INTR_ENABLED) + { + kheadWriteVblankIntrState(pGpu, pKernelHead, NV_HEAD_VBLANK_INTR_ENABLED); + NV_PRINTF(LEVEL_INFO, + "headAddVblankCallback: Changed vblank stat to ENABLED\n"); + } + } + } +} + +void +kheadDeleteVblankCallback_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + VBLANKCALLBACK *pCallback +) +{ + VBLANKCALLBACK *pList = NULL; + NvBool enabled = NV_FALSE; + NvU32 Count; + + // Cache the requested queue and its current vblank count + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY) + { + pList = pKernelHead->Vblank.Callback.pListLL; + Count = pKernelHead->Vblank.Counters.LowLatency; + } + else + { + pList = pKernelHead->Vblank.Callback.pListNL; + Count = pKernelHead->Vblank.Counters.NormLatency; + } + + // Disable VBlank (if it is even on) while we scan/process the callback list + enabled = kheadReadVblankIntrEnable(pGpu, pKernelHead); + + if (enabled) + { + kheadWriteVblankIntrState(pGpu, pKernelHead, NV_HEAD_VBLANK_INTR_AVAILABLE); + } + + // Search the list and remove this Callback entry + if (pList == pCallback) + { + // + // Found it. + // Unlink it now. If we call it, it may try to add itself again, and wont be able to. + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY) + { + pKernelHead->Vblank.Callback.pListLL = pCallback->Next; + } + else + { + pKernelHead->Vblank.Callback.pListNL = pCallback->Next; + } + + // + // Should the callback be executed as part of the object destroy? + // (safe to do, since we already hold the necessary lock). + // + if ( (pCallback->Proc) && + (pCallback->Flags & VBLANK_CALLBACK_FLAG_COMPLETE_ON_OBJECT_CLEANUP) ) + { + // Force it to appear to be on the correct VBlankCount + pCallback->VBlankCount = Count; + + // This is a user call back, they don't get a pointer to our pDev or Object data structs. + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_USER) + { + pCallback->Proc(NULL, + NULL, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + else + { + pCallback->Proc(pGpu, + pCallback->pObject, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + } + } + else + { + VBLANKCALLBACK *pPrev = pList; + + while (pPrev) + { + if (pPrev->Next == pCallback) + { + // + // Found it. + // Should the callback be executed as part of the object destroy? + // (safe to do, since we already hold the necessary lock). + // + if ( (pCallback->Proc) && + (pCallback->Flags & VBLANK_CALLBACK_FLAG_COMPLETE_ON_OBJECT_CLEANUP) ) + { + // Force it to appear to be on the correct VBlankCount + pCallback->VBlankCount = Count; + + // This is a user call back, they don't get a pointer to our pDev or Object data structs. + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_USER) + { + pCallback->Proc(NULL, + NULL, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + else + { + pCallback->Proc(pGpu, + pCallback->pObject, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + } + } + + pPrev->Next = pCallback->Next; + break; + } + pPrev = pPrev->Next; + } + } + pCallback->Next = NULL; + + // Check to see if there are no callbacks scheduled for this head + if (!(pKernelHead->Vblank.Callback.pListLL) && + !(pKernelHead->Vblank.Callback.pListNL) ) + { + // + // Since there are no callbacks scheduled, then we don't need + // to reenable anything. + // + enabled = NV_FALSE; + } + + // Restore VBlank enable + if (enabled) + { + kheadWriteVblankIntrState(pGpu, pKernelHead, NV_HEAD_VBLANK_INTR_ENABLED); + } +} + +void +kheadProcessVblankCallbacks_IMPL +( + OBJGPU *pGpu, + KernelHead *pKernelHead, + NvU32 state +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + VBLANKCALLBACK *pCallback = NULL; + VBLANKCALLBACK *pNext = NULL; + VBLANKCALLBACK **ppPrev = NULL; + NvBool done = NV_FALSE; + NvBool removed = NV_FALSE; + NvBool queueDPC = NV_FALSE; + NvU32 newstate; + NvU32 Count = 0; + NvU64 time = 0; + + // If the caller failed to spec which queue, figure they wanted all of them + if ((state & VBLANK_STATE_PROCESS_ALL_CALLBACKS) == 0) + { + state |= VBLANK_STATE_PROCESS_ALL_CALLBACKS; + } + + // Keep a local copy we can mess with + newstate = state; + + // We may have more then one queue to process, so this is the main loop. + while (!done) + { + // Select the next queue to process. Give priority to the low latency folks. + if (newstate & VBLANK_STATE_PROCESS_LOW_LATENCY) + { + // We dont want to come back here again. + newstate &= ~VBLANK_STATE_PROCESS_LOW_LATENCY; + + // Grab the low latency queue and vblank count + pCallback = pKernelHead->Vblank.Callback.pListLL; + ppPrev = &pKernelHead->Vblank.Callback.pListLL; + Count = pKernelHead->Vblank.Counters.LowLatency; + } + else if (newstate & VBLANK_STATE_PROCESS_NORMAL_LATENCY) + { + // We dont want to come back here again. + newstate &= ~VBLANK_STATE_PROCESS_NORMAL_LATENCY; + + // Grab the normal latency queue and vblank count + pCallback = pKernelHead->Vblank.Callback.pListNL; + ppPrev = &pKernelHead->Vblank.Callback.pListNL; + Count = pKernelHead->Vblank.Counters.NormLatency; + } + else + { + // We appear to have gone through all of the queues + done = NV_TRUE; + } + + // If we are not done, proces the next callback queue + if (!done) + { + while (pCallback) + { + pNext = pCallback->Next; + + if ( (pCallback->Flags & VBLANK_CALLBACK_FLAG_LOW_LATENCY__ISR_ONLY) && !(state & VBLANK_STATE_PROCESS_CALLED_FROM_ISR) ) + { + // someone doesn't want this low-latency callback being processed at DPC time. + ppPrev = &pCallback->Next; + } + else if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_TIMESTAMP) + { + // + // Time stamp based call backs don't have a valid vblank count + // Vblank might be delayed and we might see only one vblank instead of two. + // So, count doesn't make sense in case of TS. + // And since the semantics is flip on vblank at TS >= TS specified, we can't + // use tmrCallbacks (they might flip outside vblank) + // + + // Only re-read the time if we don't already know the result + if (time < pCallback->TimeStamp) + { + tmrGetCurrentTime(pTmr, &time); + } + + if (time >= pCallback->TimeStamp) + { + // + // Unlink it before we call it. Otherwise, it may + // try to add itself again, and wont be able to. + // + pCallback->Next = NULL; + *ppPrev = pNext; + + // We better have something to do if we are wasting time reading TS + NV_ASSERT(pCallback->Proc); + + pCallback->Proc(pGpu, + pCallback->pObject, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + queueDPC = NV_TRUE; + } + else + { + ppPrev = &pCallback->Next; + } + } + else + { + if ( (pCallback->VBlankCount == Count) || + ((pCallback->VBlankCount + 1) == Count) || + (VBLANK_STATE_PROCESS_IMMEDIATE == state) ) + { + pCallback->VBlankCount = Count; + + removed = NV_FALSE; + + // + // If this is not a persistent callback, unlink it before we call it. + // Otherwise, it may try to add itself again, and wont be able to. + // + if ( !(pCallback->Flags & VBLANK_CALLBACK_FLAG_PERSISTENT) ) + { + pCallback->Next = NULL; + *ppPrev = pNext; + removed = NV_TRUE; + } + + // Call the function now + if (pCallback->Proc) + { + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_USER) + { + // + // DON'T LOG USER CALLBACKS, not RM activity. (plus the tracing system + // requires a pDev ptr to find its own data structures) + // + + // This is a user call back, they don't get a pointer to our pDev or Object data structs. + pCallback->Proc(NULL, + NULL, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + queueDPC = NV_TRUE; + } + else + { + // + // This callback was scheduled when a trace was being conducted, + // turn tracing back to make sure that we record this callback's + // register operations too, so the trace will be complete + // + pCallback->Proc(pGpu, + pCallback->pObject, + pCallback->Param1, + pCallback->Param2, + pCallback->Status); + queueDPC = NV_TRUE; + } + } + + // If this is a persistent callback make sure to updates its time to run if we are not multichip and not the last chip + if ( (pCallback->Flags & VBLANK_CALLBACK_FLAG_PERSISTENT) ) + { + // + // So, it appears there are those that like to update vblank counts and such within the callback. + // This is fine I suppose, but we dont promise that this order is sorted then. + // Anyway, it may be that the callbacker updated the vblank offset also, so update that now. + // We should never see an OFFSET and PERSISTENT within the process loop. + // + if (pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET) + { + // We set the target to the current plus the offset that the caller supplied. + pCallback->VBlankCount = Count + pCallback->VBlankOffset; + + // We are persistent, so we should convert the vblank offset flag to a vblank count flag. + pCallback->Flags &= ~VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_OFFSET; + pCallback->Flags |= VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT; + } + + // + // If the vblank count has already been specified, we don't need to increment the + // the vblank count. + // + if ( !(pCallback->Flags & VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_COUNT) ) + { + pCallback->VBlankCount = Count + 1; + } + + // Leave in callback chain. + ppPrev = &pCallback->Next; + } + else + { + if (!removed) + { + // + // Yes, the proper way to terminate a persistent callback from within a callback is + // to make it non-persistant. This is what the cursor functions do, and so we should + // check again after the callback. + // + pCallback->Next = NULL; + *ppPrev = pNext; + removed = NV_TRUE; + } + } + } + // This condition arises at wrap time which is about every 331 days at 150 Hz + else + { + // + // MK: A callback may increase it's vblank count as part of it's execution. Since the + // callback list is only sorted at insertion time, this can render the list + // unsorted. So, we need to read the remaining entries in the list. + // + ppPrev = &pCallback->Next; + + } + } + + pCallback = pNext; + } + } + } + + if (queueDPC) + { + } + + // After all of that, if the callback lists are null and the vblank is ENABLED, move it to AVAILABLE now. + if (!(pKernelHead->Vblank.Callback.pListLL) && + !(pKernelHead->Vblank.Callback.pListNL) ) + { + if (kheadReadVblankIntrState(pGpu, pKernelHead) == NV_HEAD_VBLANK_INTR_ENABLED) + { + kheadWriteVblankIntrState(pGpu, pKernelHead, NV_HEAD_VBLANK_INTR_AVAILABLE); + + NV_PRINTF(LEVEL_INFO, + "Changed vblank state on head %d to AVAILABLE\n", + pKernelHead->PublicId); + } + } +} diff --git a/src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank_callback.c b/src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank_callback.c new file mode 100644 index 000000000..3e604e7dc --- /dev/null +++ b/src/nvidia/src/kernel/gpu/disp/vblank_callback/vblank_callback.c @@ -0,0 +1,154 @@ +/* + * SPDX-FileCopyrightText: Copyright (c)2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* VblankCallback Module +* This file contains functions managing the vblank callback. +* +******************************************************************************/ + +#include "gpu/disp/vblank_callback/vblank_callback.h" +#include "os/os.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/head/kernel_head.h" + +static NV_STATUS +_vblankCallback +( + OBJGPU *pGpu, + void *pObject, + NvU32 Parm1, + NvU32 Parm2, + NV_STATUS rmStatus +) +{ + VblankCallback *pVblankCallback = (VblankCallback *)pObject; + if (pVblankCallback->CallBack.bIsVblankNotifyEnable) + { + pVblankCallback->pProc(pVblankCallback->pParm1, pVblankCallback->pParm2); + } + return NV_OK; +} + +NV_STATUS +vblcbConstruct_IMPL +( + VblankCallback *pVblankCallback, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJGPU *pGpu = GPU_RES_GET_GPU(pVblankCallback); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + KernelHead *pKernelHead = NULL; + NV_STATUS status = NV_OK; + NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS *pAllocParams = pParams->pAllocParams; + + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + if (pKernelDisplay == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + pKernelHead = KDISP_GET_HEAD(pKernelDisplay, pAllocParams->LogicalHead); + + if (pKernelHead == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pVblankCallback->LogicalHead = pAllocParams->LogicalHead; + pVblankCallback->pProc = pAllocParams->pProc; + pVblankCallback->pParm1 = pAllocParams->pParm1; + pVblankCallback->pParm2 = pAllocParams->pParm2; + pVblankCallback->CallBack.Flags = VBLANK_CALLBACK_FLAG_LOW_LATENCY | VBLANK_CALLBACK_FLAG_PERSISTENT | VBLANK_CALLBACK_FLAG_SPECIFIED_VBLANK_NEXT; + pVblankCallback->CallBack.Proc = _vblankCallback; + pVblankCallback->CallBack.pObject = pVblankCallback; + pVblankCallback->CallBack.bObjectIsChannelDescendant = NV_FALSE; + pVblankCallback->CallBack.Param1 = 0; + pVblankCallback->CallBack.Param2 = 0; + pVblankCallback->CallBack.VBlankCount = 0; + pVblankCallback->CallBack.VBlankOffset = 0; + pVblankCallback->CallBack.TimeStamp = 0; + pVblankCallback->CallBack.MC_CallbackFlag = 0; + pVblankCallback->CallBack.Status = NV_OK; + pVblankCallback->CallBack.bIsVblankNotifyEnable = NV_TRUE; + pVblankCallback->CallBack.Next = NULL; + + kheadAddVblankCallback(pGpu, pKernelHead, &pVblankCallback->CallBack); + status = pOS->osSetupVBlank(pGpu, pAllocParams->pProc, pAllocParams->pParm1, pAllocParams->pParm2, pAllocParams->LogicalHead, &pVblankCallback->CallBack); + + if (status != NV_OK) + { + kheadDeleteVblankCallback(pGpu, pKernelHead, &pVblankCallback->CallBack); + } + + return status; +} + +void +vblcbDestruct_IMPL +( + VblankCallback *pVblankCallback +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJGPU *pGpu = GPU_RES_GET_GPU(pVblankCallback); + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + KernelHead *pKernelHead = KDISP_GET_HEAD(pKernelDisplay, pVblankCallback->LogicalHead); + + pOS->osSetupVBlank(pGpu, NULL, NULL, NULL, pVblankCallback->LogicalHead, NULL); + kheadDeleteVblankCallback(pGpu, pKernelHead, &pVblankCallback->CallBack); +} + +NV_STATUS +vblcbCtrlSetVBlankNotification_IMPL +( + VblankCallback *pVblankCallback, + NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_INVALID_ARGUMENT; + if (pVblankCallback->CallBack.Proc != NULL) + { + if (pParams->bSetVBlankNotifyEnable) + { + pVblankCallback->CallBack.bIsVblankNotifyEnable = NV_TRUE; + } + else + { + pVblankCallback->CallBack.bIsVblankNotifyEnable = NV_FALSE; + } + status = NV_OK; + } + return status; +} + diff --git a/src/nvidia/src/kernel/gpu/eng_state.c b/src/nvidia/src/kernel/gpu/eng_state.c new file mode 100644 index 000000000..05de436f7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/eng_state.c @@ -0,0 +1,547 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu/eng_state.h" +#include "core/hal.h" +#include "core/info_block.h" +#include "core/locks.h" + +#include "gpu/bus/kern_bus.h" + +// Function pointer wrapper +#define engstateStatePreInitUnlocked_Fnptr(pEngstate) pEngstate->__engstateStatePreInitUnlocked__ +#define engstateStateInitUnlocked_Fnptr(pEngstate) pEngstate->__engstateStateInitUnlocked__ + +NV_STATUS +engstateConstructBase_IMPL +( + OBJENGSTATE *pEngstate, + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc +) +{ + pEngstate->pGpu = pGpu; + pEngstate->engDesc = engDesc; + pEngstate->currentState = ENGSTATE_STATE_UNDEFINED; + +#if NV_PRINTF_STRINGS_ALLOWED + nvDbgSnprintf(pEngstate->name, sizeof(pEngstate->name), "%s:%d", + objGetClassName(pEngstate), ENGDESC_FIELD(pEngstate->engDesc, _INST)); +#endif + return NV_OK; +} + +void +engstateLogStateTransitionPre_IMPL +( + OBJENGSTATE *pEngstate, + ENGSTATE_STATE targetState, + ENGSTATE_TRANSITION_DATA *pData +) +{ + ENGSTATE_STATS *stats = &pEngstate->stats[targetState]; + NV_ASSERT_OR_RETURN_VOID(targetState < ENGSTATE_STATE_COUNT); + + // First call, init + portMemSet(stats, 0, sizeof(ENGSTATE_STATS)); + portMemSet(pData, 0, sizeof(ENGSTATE_TRANSITION_DATA)); + osGetPerformanceCounter(&pData->transitionStartTimeNs); + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetActiveStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS memstats = {0}; + portMemExTrackingGetActiveStats(NULL, &memstats); + + pData->memoryAllocCount = (NvS64) memstats.numAllocations; + pData->memoryAllocSize = (NvS64) memstats.usefulSize; + } +#endif +} + +void +engstateLogStateTransitionPost_IMPL +( + OBJENGSTATE *pEngstate, + ENGSTATE_STATE targetState, + ENGSTATE_TRANSITION_DATA *pData +) +{ + ENGSTATE_STATS *stats = &pEngstate->stats[targetState]; + NvU64 endTimeNs; + + NV_ASSERT_OR_RETURN_VOID(targetState < ENGSTATE_STATE_COUNT); + + osGetPerformanceCounter(&endTimeNs); + stats->transitionTimeUs = (endTimeNs - pData->transitionStartTimeNs) / 1000; + +#if NV_PRINTF_STRINGS_ALLOWED + const char *stateStrings[ENGSTATE_STATE_COUNT] = + { + "Undefined", + "Construct", + "Pre-Init", + "Init", + "Pre-Load", + "Load", + "Post-Load", + "Pre-Unload", + "Unload", + "Post-Unload", + "Destroy" + }; + ct_assert(ENGSTATE_STATE_COUNT == 11); + + NV_PRINTF(LEVEL_INFO, + "Engine %s state change: %s -> %s, took %uus\n", + engstateGetName(pEngstate), + stateStrings[pEngstate->currentState], stateStrings[targetState], + stats->transitionTimeUs); +#else + NV_PRINTF(LEVEL_INFO, + "Engine 0x%06x:%d state change: %d -> %d, took %uus\n", + objGetClassId(pEngstate), ENGDESC_FIELD(pEngstate->engDesc, _INST), + pEngstate->currentState, targetState, + stats->transitionTimeUs); +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetActiveStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS memstats = {0}; + portMemExTrackingGetActiveStats(NULL, &memstats); + + stats->memoryAllocCount = (NvS32)((NvS64)memstats.numAllocations - pData->memoryAllocCount); + stats->memoryAllocSize = (NvS32)((NvS64)memstats.usefulSize - pData->memoryAllocSize); + + NV_PRINTF(LEVEL_INFO, " Memory usage change: %d allocations, %d bytes\n", + stats->memoryAllocCount, stats->memoryAllocSize); + } +#endif + + pEngstate->currentState = targetState; +} + +const char * +engstateGetName_IMPL +( + OBJENGSTATE *pEngstate +) +{ +#if NV_PRINTF_STRINGS_ALLOWED + return pEngstate->name; +#else + return ""; +#endif +} + +/*! + * @brief generic constructor + */ +NV_STATUS +engstateConstructEngine_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + ENGDESCRIPTOR engDesc +) +{ + return NV_OK; +} + +/*! + * @brief destructor + */ +void +engstateDestruct_IMPL +( + OBJENGSTATE *pEngstate +) +{ + portMemFree(pEngstate->pOriginalTunableState); + pEngstate->pOriginalTunableState = NULL; +} + +/*! + * @brief init missing engine + */ +void +engstateInitMissing_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ + return; +} + +/*! + * @brief Wrapper around StatePreInitUnlocked and StatePreInitLocked + */ +NV_STATUS +engstateStatePreInit_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + /* Check if we overrode the unlocked variant */ + if ((engstateStatePreInitUnlocked_Fnptr(pEngstate) != + engstateStatePreInitUnlocked_IMPL)) + { + NV_STATUS status, lockStatus; + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + status = engstateStatePreInitUnlocked(pGpu, pEngstate); + + lockStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_INIT); + + if (status == NV_OK) + status = lockStatus; + if (status != NV_OK) + return status; + } + + return engstateStatePreInitLocked(pGpu, pEngstate); +} + +/*! + * @brief state pre-init locked + */ +NV_STATUS +engstateStatePreInitLocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief state pre-init unlocked + */ +NV_STATUS +engstateStatePreInitUnlocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief Wrapper around StateInitUnlocked and StateInitLocked + */ +NV_STATUS +engstateStateInit_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + /* Check if we overrode the unlocked variant */ + if (engstateStateInitUnlocked_Fnptr(pEngstate) != engstateStateInitUnlocked_IMPL) + { + NV_STATUS status, lockStatus; + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + status = engstateStateInitUnlocked(pGpu, pEngstate); + lockStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_INIT); + + if (status == NV_OK) + status = lockStatus; + if (status != NV_OK) + return status; + } + + return engstateStateInitLocked(pGpu, pEngstate); +} + +/*! + * @brief state init locked + */ +NV_STATUS +engstateStateInitLocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief state init unlocked + */ +NV_STATUS +engstateStateInitUnlocked_IMPL(OBJGPU *pGpu, OBJENGSTATE *pEngstate) +{ + return NV_OK; +} + +/*! + * @brief state pre-load + */ +NV_STATUS +engstateStatePreLoad_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state load + */ +NV_STATUS +engstateStateLoad_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state post-load + */ +NV_STATUS +engstateStatePostLoad_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state unload + */ +NV_STATUS +engstateStateUnload_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state pre-unload + */ +NV_STATUS +engstateStatePreUnload_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state post-unload + */ +NV_STATUS +engstateStatePostUnload_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + NvU32 flags +) +{ + return NV_OK; +} + +/*! + * @brief state destroy + */ +void +engstateStateDestroy_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ +} + +/*! + * @brief allocates a tunable state structure + * + * @param[in] pGpu + * @param[in] pEngstate + * @param[out] ppTunableState + */ +NV_STATUS +engstateAllocTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void **ppTunableState +) +{ + *ppTunableState = NULL; + return NV_OK; +} + +/*! + * @brief frees a tunable state structure + * + * @param[in] pGpu + * @param[in] pEngstate + * @param[in] pTunableState + */ +void +engstateFreeTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void *pTunableState +) +{ + portMemFree(pTunableState); +} + +/*! + * @brief fills pTunableState with the current state + * + * @param[in] pGpu + * @param[in] pEngstate + * @param[out] pTunableState + */ +NV_STATUS +engstateGetTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void *pTunableState +) +{ + return NV_OK; +} + +/*! + * @brief sets the current state to values in pTunableState + * + * @param[in] pGpu + * @param[in,out] pEngstate + * @param[in] pTunableState + */ +NV_STATUS +engstateSetTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void *pTunableState +) +{ + return NV_OK; +} + +/*! + * @brief modifies pTunableState to be compatible with pEngstate->pOriginalTunableState + * + * @param[in] pGpu + * @param[in] pEngstate + * @param[in,out] pTunableState + */ +NV_STATUS +engstateReconcileTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void *pTunableState +) +{ + return NV_OK; +} + +/*! + * @brief returns NV_ERR_GENERIC if two tunable states are incompatible + * + * @param[in] pGpu + * @param[in] pEngstate + * @param[in] pTunables1 + * @param[in] pTunables2 + */ +NV_STATUS +engstateCompareTunableState_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate, + void *pTunables1, + void *pTunables2 +) +{ + return NV_OK; +} + +/*! + * @brief returns the ENGDESCRIPTOR associated with this ENGSTATE + * + * @param[in] pEngstate + */ +ENGDESCRIPTOR +engstateGetDescriptor_IMPL +( + OBJENGSTATE *pEngstate +) +{ + return pEngstate->engDesc; +} + +/*! + * @brief checks for presence of the hardware associated with this ENGSTATE + * + * @param[in] pGpu + * @param[in] pEngstate + */ +NvBool +engstateIsPresent_IMPL +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + NV_ASSERT(pEngstate != NULL); + return kbusCheckEngine_HAL(pGpu, pKernelBus, pEngstate->engDesc); +} + + +/*! + * @brief returns the FIFO associated with this ENGSTATE + * + * @param[in] pEngstate + */ +OBJFIFO * +engstateGetFifo_IMPL +( + OBJENGSTATE *pEngstate +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pEngstate); + + return GPU_GET_FIFO(pGpu); +} + diff --git a/src/nvidia/src/kernel/gpu/falcon/arch/ampere/kernel_falcon_ga100.c b/src/nvidia/src/kernel/gpu/falcon/arch/ampere/kernel_falcon_ga100.c new file mode 100644 index 000000000..3496e9fc9 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/falcon/arch/ampere/kernel_falcon_ga100.c @@ -0,0 +1,79 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Provides the implementation for all GA100+ specific KernelFalcon + * interfaces. + */ + +#include "gpu/falcon/kernel_falcon.h" + +#include "published/ampere/ga100/dev_falcon_v4.h" + +/*! + * Retrigger an interrupt message from the engine to the NV_CTRL tree + */ +void +kflcnIntrRetrigger_GA100 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_INTR_RETRIGGER(0), + DRF_DEF(_PFALCON, _FALCON_INTR_RETRIGGER, _TRIGGER, _TRUE)); +} + +/*! + * Mask a IMEM address to have only the BLK and OFFSET bits set. + * + * @param[in] addr IMEM address + */ +NvU32 +kflcnMaskImemAddr_GA100 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 addr +) +{ + return (addr & (DRF_SHIFTMASK(NV_PFALCON_FALCON_IMEMC_OFFS) | + DRF_SHIFTMASK(NV_PFALCON_FALCON_IMEMC_BLK))); +} + +/*! + * Mask a DMEM address to have only the BLK and OFFSET bits set. + * + * @param[in] addr DMEM address + */ +NvU32 +kflcnMaskDmemAddr_GA100 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 addr +) +{ + return (addr & (DRF_SHIFTMASK(NV_PFALCON_FALCON_DMEMC_OFFS) | + DRF_SHIFTMASK(NV_PFALCON_FALCON_DMEMC_BLK))); +} diff --git a/src/nvidia/src/kernel/gpu/falcon/arch/ampere/kernel_falcon_ga102.c b/src/nvidia/src/kernel/gpu/falcon/arch/ampere/kernel_falcon_ga102.c new file mode 100644 index 000000000..980b95f2a --- /dev/null +++ b/src/nvidia/src/kernel/gpu/falcon/arch/ampere/kernel_falcon_ga102.c @@ -0,0 +1,237 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Provides the implementation for all GA102+ specific KernelFalcon + * interfaces. + */ + +#include "gpu/falcon/kernel_falcon.h" +#include "os/os.h" + +#include "published/ampere/ga102/dev_falcon_v4.h" +#include "published/ampere/ga102/dev_falcon_v4_addendum.h" +#include "published/ampere/ga102/dev_riscv_pri.h" + + +#define PRE_RESET_PRE_SILICON_TIMEOUT_US 300000 +#define PRE_RESET_TIMEOUT_US 150 + + +/*! + * Function to check if RISCV is active + */ +NvBool +kflcnIsRiscvActive_GA10X +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + NvU32 val = kflcnRiscvRegRead_HAL(pGpu, pKernelFlcn, NV_PRISCV_RISCV_CPUCTL); + + return FLD_TEST_DRF(_PRISCV, _RISCV_CPUCTL, _ACTIVE_STAT, _ACTIVE, val); +} + +/*! + * Program BCR register of RISCV + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFlcn KernelFalcon pointer + * @param[in] bBRFetch BR_FETCH field value of BCR register + */ +void +kflcnRiscvProgramBcr_GA102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvBool bBRFetch +) +{ + NvU32 bcr; + + bcr = DRF_DEF(_PRISCV_RISCV, _BCR_CTRL, _CORE_SELECT, _RISCV) | + DRF_DEF(_PRISCV_RISCV, _BCR_CTRL, _VALID, _TRUE) | + DRF_NUM(_PRISCV_RISCV, _BCR_CTRL, _BRFETCH, bBRFetch); + + kflcnRiscvRegWrite_HAL(pGpu, pKernelFlcn, NV_PRISCV_RISCV_BCR_CTRL, bcr); +} + +/*! + * Switch the core to FALCON. Releases priv lockdown. + * Should not be called while in reset. See bug 200586493. + */ +void kflcnSwitchToFalcon_GA10X +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + NvU32 bcrCtrl; + RMTIMEOUT timeout; + NV_STATUS status = NV_OK; + + // If RISC-V is not enabled, then core must already be in Falcon + if (!kflcnIsRiscvCpuEnabled_HAL(pGpu, pKernelFlcn)) + return; + + bcrCtrl = kflcnRiscvRegRead_HAL(pGpu, pKernelFlcn, NV_PRISCV_RISCV_BCR_CTRL); + + if (FLD_TEST_DRF(_PRISCV_RISCV, _BCR_CTRL, _CORE_SELECT, _FALCON, bcrCtrl)) + return; + + kflcnRiscvRegWrite_HAL(pGpu, pKernelFlcn, NV_PRISCV_RISCV_BCR_CTRL, + DRF_DEF(_PRISCV_RISCV, _BCR_CTRL, _CORE_SELECT, _FALCON)); + + // Wait for Peregrine to report VALID, indicating that the core switch is successful + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + for (;;) + { + bcrCtrl = kflcnRiscvRegRead_HAL(pGpu, pKernelFlcn, NV_PRISCV_RISCV_BCR_CTRL); + if (FLD_TEST_DRF(_PRISCV_RISCV, _BCR_CTRL, _VALID, _TRUE, bcrCtrl)) + break; + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + status = NV_ERR_GPU_IN_FULLCHIP_RESET; + else if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + status = NV_ERR_GPU_IS_LOST; + else + status = gpuCheckTimeout(pGpu, &timeout); + + if (status != NV_OK) + break; + } + + if (status != NV_OK) + { + NV_ASSERT_OK_FAILED("Failed to switch core to Falcon mode", status); + } +} + +/*! + * Pre-Reset sequence for Falcon/RiscV core. + * + * Read RESET_READY bit of HWCFG2 register. + * Bug 3419321: This sometimes may not get set by HW, so use time out. + */ +NV_STATUS +kflcnPreResetWait_GA10X +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + NvU32 hwcfg2; + RMTIMEOUT timeout; + NvU32 flags = GPU_TIMEOUT_FLAGS_DEFAULT | GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG; + + if (!IS_SILICON(pGpu) && !IS_EMULATION(pGpu)) + { + return NV_OK; + } + + hwcfg2 = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_HWCFG2); + + if (IS_SILICON(pGpu)) + { + gpuSetTimeout(pGpu, PRE_RESET_TIMEOUT_US, &timeout, flags); + } + else if (IS_EMULATION(pGpu)) + { + gpuSetTimeout(pGpu, PRE_RESET_PRE_SILICON_TIMEOUT_US, &timeout, flags); + } + + while (!FLD_TEST_DRF(_PFALCON, _FALCON_HWCFG2, _RESET_READY, _TRUE, hwcfg2)) + { + if (gpuCheckTimeout(pGpu, &timeout) == NV_ERR_TIMEOUT) + { + break; + } + + osSpinLoop(); + + hwcfg2 = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_HWCFG2); + } + + return NV_OK; +} + +/*! + * Wait for Falcon memory scrubbing to finish. + * + * Receives a Gpu pointer and a void pointer that must be to a KernelFalcon + * object, to facilitate use with gpuTimeoutCondWait. + * + * @param pGpu OBJGPU pointer + * @param pVoid KernelFalcon pointer + */ +static NvBool +_kflcnWaitForScrubbingToFinish(OBJGPU *pGpu, void *pVoid) +{ + NvBool bResult = NV_FALSE; + NvU32 dmaCtrl = 0; + KernelFalcon *pKernelFlcn = reinterpretCast(pVoid, KernelFalcon *); + + dmaCtrl = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_HWCFG2); + + if (FLD_TEST_DRF(_PFALCON, _FALCON_HWCFG2, _MEM_SCRUBBING, _DONE, dmaCtrl)) + { + bResult = NV_TRUE; + } + + return bResult; +} + +NV_STATUS +kflcnWaitForResetToFinish_GA102(OBJGPU *pGpu, KernelFalcon *pKernelFlcn) +{ + // Skip the wait if we are in the reset path + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + return NV_ERR_GPU_IN_FULLCHIP_RESET; + + // + // We could potentially bypass the polling if we are going to read from IMEM or DMEM. + // But waiting ensures we avoid pri timouts. See bug 623410. + // + return gpuTimeoutCondWait(pGpu, _kflcnWaitForScrubbingToFinish, pKernelFlcn, NULL); +} + +/*! + * Read the IRQ status of the RISCV Falcon. + * + * @return IRQ status mask + */ +NvU32 +kflcnReadIntrStatus_GA102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + return ((kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IRQSTAT) & + kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IRQMASK) & + kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IRQDEST)) | + (kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IRQSTAT) & + kflcnRiscvRegRead_HAL(pGpu, pKernelFlcn, NV_PRISCV_RISCV_IRQMASK) & + kflcnRiscvRegRead_HAL(pGpu, pKernelFlcn, NV_PRISCV_RISCV_IRQDEST))); +} diff --git a/src/nvidia/src/kernel/gpu/falcon/arch/turing/kernel_falcon_tu102.c b/src/nvidia/src/kernel/gpu/falcon/arch/turing/kernel_falcon_tu102.c new file mode 100644 index 000000000..a51ca68c5 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/falcon/arch/turing/kernel_falcon_tu102.c @@ -0,0 +1,457 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Provides the implementation for all TU102+ specific KernelFalcon interfaces. + */ + +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/mc/kernel_mc.h" +#include "os/os.h" + +#include "published/turing/tu102/dev_riscv_pri.h" +#include "published/turing/tu102/dev_falcon_v4.h" +#include "published/turing/tu102/dev_fbif_v4.h" + +/*! + * Read a Falcon register. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFlcn KernelFalcon pointer + * @param[in] offset Offset into the Falcon register space. + * + * @returns The value of the register. + */ +NvU32 +kflcnRegRead_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 offset +) +{ + return REG_INST_DEVIDX_RD32_EX(pGpu, DEVICE_INDEX_GPU, 0, + pKernelFlcn->registerBase + offset, NULL); +} + +/*! + * Write a Falcon register. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFlcn KernelFalcon pointer + * @param[in] offset Offset into the Falcon register space. + * @param[in] data Data to write to the register. + */ +void +kflcnRegWrite_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 offset, + NvU32 data +) +{ + REG_INST_DEVIDX_WR32_EX(pGpu, DEVICE_INDEX_GPU, 0, + pKernelFlcn->registerBase + offset, data, NULL); +} + +/*! + * Read a RISCV register. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFlcn KernelFalcon pointer + * @param[in] offset Offset into the RISCV register space. + * + * @returns The value of the register. + */ +NvU32 +kflcnRiscvRegRead_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 offset +) +{ + return REG_INST_DEVIDX_RD32_EX(pGpu, DEVICE_INDEX_GPU, 0, + pKernelFlcn->riscvRegisterBase + offset, NULL); +} + +/*! + * Write a RISCV register. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFlcn KernelFalcon pointer + * @param[in] offset Offset into the RISCV register space. + * @param[in] data Data to write to the register. + */ +void +kflcnRiscvRegWrite_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 offset, + NvU32 data +) +{ + REG_INST_DEVIDX_WR32_EX(pGpu, DEVICE_INDEX_GPU, 0, + pKernelFlcn->riscvRegisterBase + offset, data, NULL); +} + +/*! + * Check the existence of RISCV CPU. + */ +NvBool +kflcnIsRiscvCpuEnabled_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + NvU32 reg = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_HWCFG2); + + return FLD_TEST_DRF(_PFALCON, _FALCON_HWCFG2, _RISCV, _ENABLE, reg); +} + +/*! + * Function to check if RISCV is active. + */ +NvBool +kflcnIsRiscvActive_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + NvU32 val; + + val = kflcnRiscvRegRead_HAL(pGpu, pKernelFlcn, NV_PRISCV_RISCV_CORE_SWITCH_RISCV_STATUS); + + return FLD_TEST_DRF(_PRISCV, _RISCV_CORE_SWITCH_RISCV_STATUS, _ACTIVE_STAT, _ACTIVE, val); +} + +/*! + * Perform a reset of the Falcon. + */ +void +kflcnReset_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + kflcnEnable_HAL(pGpu, pKernelFlcn, NV_FALSE); + kflcnEnable_HAL(pGpu, pKernelFlcn, NV_TRUE); +} + +/*! + * Does a reset of the Falcon using secure reset and switches to Falcon mode. + */ +void +kflcnSecureReset_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + NV_ASSERT_OR_RETURN_VOID(kflcnPreResetWait(pGpu, pKernelFlcn) == NV_OK); + NV_ASSERT_OK(kflcnResetHw(pGpu, pKernelFlcn)); + + kflcnWaitForResetToFinish_HAL(pGpu, pKernelFlcn); + + kflcnSwitchToFalcon_HAL(pGpu, pKernelFlcn); +} + +void +_kflcnClearInterrupts(OBJGPU *pGpu, KernelFalcon *pKernelFlcn) +{ + // Delay 1us in case engine is still resetting. + osDelayUs(1); + + // Clear Interrupts + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IRQMCLR, 0xffffffff); +} + +/*! + * Enable or disable the Falcon to FALCON mode. + */ +void +kflcnEnable_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvBool bEnable +) +{ + KernelMc *pKernelMc = GPU_GET_KERNEL_MC(pGpu); + + if (!bEnable) + { + // Switch to Falcon to release lockdown + kflcnSwitchToFalcon_HAL(pGpu, pKernelFlcn); + + _kflcnClearInterrupts(pGpu, pKernelFlcn); + + // Disable in PMC if engine is present in PMC + if (pKernelFlcn->pmcEnableMask > 0) + { + kmcWritePmcEnableReg_HAL(pGpu, pKernelMc, pKernelFlcn->pmcEnableMask, + NV_FALSE, pKernelFlcn->bIsPmcDeviceEngine); + // Read back to create enough of a delay + kmcReadPmcEnableReg_HAL(pGpu, pKernelMc, pKernelFlcn->bIsPmcDeviceEngine); + kmcReadPmcEnableReg_HAL(pGpu, pKernelMc, pKernelFlcn->bIsPmcDeviceEngine); + } + else + { + kflcnSecureReset(pGpu, pKernelFlcn); + } + } + else + { + // Enable in PMC if engine is present in PMC + if (pKernelFlcn->pmcEnableMask > 0) + { + kmcWritePmcEnableReg_HAL(pGpu, pKernelMc, pKernelFlcn->pmcEnableMask, + NV_TRUE, pKernelFlcn->bIsPmcDeviceEngine); + // Read back to create enough of a delay + kmcReadPmcEnableReg_HAL(pGpu, pKernelMc, pKernelFlcn->bIsPmcDeviceEngine); + kmcReadPmcEnableReg_HAL(pGpu, pKernelMc, pKernelFlcn->bIsPmcDeviceEngine); + } + else + { + kflcnSecureReset(pGpu, pKernelFlcn); + } + + kflcnSwitchToFalcon_HAL(pGpu, pKernelFlcn); + + kflcnWaitForResetToFinish_HAL(pGpu, pKernelFlcn); + + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_RM, + pGpu->chipId0); + } +} + +/*! + * Start a Falcon CPU. + */ +void +kflcnStartCpu_TU102(OBJGPU *pGpu, KernelFalcon *pKernelFlcn) +{ + if (FLD_TEST_DRF(_PFALCON, _FALCON_CPUCTL, _ALIAS_EN, _TRUE, + kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_CPUCTL))) + { + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_CPUCTL_ALIAS, + DRF_DEF(_PFALCON, _FALCON_CPUCTL_ALIAS, _STARTCPU, _TRUE)); + } + else + { + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_CPUCTL, + DRF_DEF(_PFALCON, _FALCON_CPUCTL, _STARTCPU, _TRUE)); + } +} + +/*! + * Disables context requirement of Falcon. + */ +void +kflcnDisableCtxReq_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + NvU32 data = 0; + + data = GPU_REG_RD32(pGpu, pKernelFlcn->fbifBase + NV_PFALCON_FBIF_CTL); + data = FLD_SET_DRF(_PFALCON, _FBIF_CTL, _ALLOW_PHYS_NO_CTX, _ALLOW, data); + + // Allow physical address without CTX + GPU_REG_WR32(pGpu, pKernelFlcn->fbifBase + NV_PFALCON_FBIF_CTL, data); + + // Allow issue DMA request without block bind + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMACTL, 0x0); +} + +/*! + * Checks if Falcon memory scrubbing is finished. + * + * @param pGpu OBJGPU pointer + * @param pVoid void pointer to a KernelFalcon instance + */ +static NvBool +_kflcnMemScrubbingFinished +( + OBJGPU *pGpu, + void *pVoid +) +{ + NvBool bResult = NV_FALSE; + NvU32 dmaCtrl = 0; + KernelFalcon *pKernelFlcn = reinterpretCast(pVoid, KernelFalcon *); + + dmaCtrl = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMACTL); + + if (FLD_TEST_DRF(_PFALCON, _FALCON_DMACTL, _DMEM_SCRUBBING, _DONE, dmaCtrl) && + FLD_TEST_DRF(_PFALCON, _FALCON_DMACTL, _IMEM_SCRUBBING, _DONE, dmaCtrl)) + { + bResult = NV_TRUE; + } + + return bResult; +} + +/*! + * Wait for Falcon reset to finish. + */ +NV_STATUS +kflcnWaitForResetToFinish_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + // Skip the wait if we are in the GPU reset path + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + return NV_ERR_GPU_IN_FULLCHIP_RESET; + + // + // We could potentially bypass the polling if we are going to read from IMEM or DMEM. + // But waiting ensures we avoid pri timouts. See bug 623410. + // + return gpuTimeoutCondWait(pGpu, _kflcnMemScrubbingFinished, pKernelFlcn, NULL); +} + +/*! + * Wait for Falcon to halt. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFlcn KernelFalcon pointer + * @param[in] timeoutUs Timeout value + * + * @returns NV_ERR_TIMEOUT if falcon fails to halt. + */ +NV_STATUS +kflcnWaitForHalt_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 timeoutUs, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + RMTIMEOUT timeout; + + gpuSetTimeout(pGpu, timeoutUs, &timeout, flags); + + while (!FLD_TEST_DRF(_PFALCON, _FALCON, _CPUCTL_HALTED, _TRUE, + kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_CPUCTL))) + { + status = gpuCheckTimeout(pGpu, &timeout); + if (status == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, "Timeout waiting for Falcon to halt\n"); + DBG_BREAKPOINT(); + break; + } + osSpinLoop(); + } + + return status; +} + +/*! + * Read the IRQ status of the RISCV Falcon. + * + * @return IRQ status mask + */ +NvU32 +kflcnReadIntrStatus_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn +) +{ + return ((kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IRQSTAT) & + kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IRQMASK) & + kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IRQDEST)) | + (kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IRQSTAT) & + kflcnRiscvRegRead_HAL(pGpu, pKernelFlcn, NV_PRISCV_RISCV_IRQMASK) & + kflcnRiscvRegRead_HAL(pGpu, pKernelFlcn, NV_PRISCV_RISCV_IRQDEST))); +} + + +/*! + * Mask a IMEM address to have only the BLK and OFFSET bits set. + * + * @param[in] addr IMEM address + */ +NvU32 +kflcnMaskImemAddr_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 addr + +) +{ + return (addr & (DRF_SHIFTMASK(NV_PFALCON_FALCON_IMEMC_OFFS) | + DRF_SHIFTMASK(NV_PFALCON_FALCON_IMEMC_BLK))); +} + +/*! + * Mask a DMEM address to have only the BLK and OFFSET bits set. + * + * @param[in] addr DMEM address + */ +NvU32 +kflcnMaskDmemAddr_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 addr +) +{ + return (addr & (DRF_SHIFTMASK(NV_PFALCON_FALCON_DMEMC_OFFS) | + DRF_SHIFTMASK(NV_PFALCON_FALCON_DMEMC_BLK))); +} + +void gkflcnNonstallIntrCheckAndClear_TU102(OBJGPU *pGpu, GenericKernelFalcon *pGKF, THREAD_STATE_NODE *pThreadState) +{ + NvU32 registerBase = staticCast(pGKF, KernelFalcon)->registerBase; + NvU32 intr, clearBits; + + NV_ASSERT(registerBase != 0); + + intr = GPU_REG_RD32_EX(pGpu, registerBase + NV_PFALCON_FALCON_IRQSTAT, + pThreadState); + + if (DRF_VAL( _PFALCON_FALCON, _IRQSTAT, _SWGEN1, intr)) + { + NV_PRINTF(LEVEL_INFO, "Handling Trap Interrupt\n"); + + // Clear interrupt + clearBits = DRF_NUM(_PFALCON_FALCON, _IRQSTAT, _SWGEN1, 1); + GPU_REG_WR32_EX(pGpu, registerBase + NV_PFALCON_FALCON_IRQSCLR, + clearBits, pThreadState); + } +} diff --git a/src/nvidia/src/kernel/gpu/falcon/kernel_falcon.c b/src/nvidia/src/kernel/gpu/falcon/kernel_falcon.c new file mode 100644 index 000000000..b1bfea398 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/falcon/kernel_falcon.c @@ -0,0 +1,368 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/sec2/kernel_sec2.h" +#include "gpu/nvdec/kernel_nvdec.h" +#include "gpu/gsp/kernel_gsp.h" + +#include "gpu/fifo/kernel_fifo.h" +#include "gpu/fifo/kernel_channel.h" +#include "gpu/fifo/kernel_channel_group.h" +#include "gpu/fifo/kernel_channel_group_api.h" +#include "gpu/intr/intr.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "mem_mgr/gpu_vaspace.h" +#include "mem_mgr/ctx_buf_pool.h" +#include "rmapi/rmapi.h" + + +void kflcnConfigureEngine_IMPL(OBJGPU *pGpu, KernelFalcon *pKernelFalcon, KernelFalconEngineConfig *pFalconConfig) +{ + pKernelFalcon->registerBase = pFalconConfig->registerBase; + pKernelFalcon->riscvRegisterBase = pFalconConfig->riscvRegisterBase; + pKernelFalcon->fbifBase = pFalconConfig->fbifBase; + pKernelFalcon->bBootFromHs = pFalconConfig->bBootFromHs; + pKernelFalcon->pmcEnableMask = pFalconConfig->pmcEnableMask; + pKernelFalcon->bIsPmcDeviceEngine = pFalconConfig->bIsPmcDeviceEngine; + pKernelFalcon->physEngDesc = pFalconConfig->physEngDesc; + pKernelFalcon->ctxAttr = pFalconConfig->ctxAttr; + pKernelFalcon->ctxBufferSize = pFalconConfig->ctxBufferSize; + pKernelFalcon->addrSpaceList = pFalconConfig->addrSpaceList; + + NV_PRINTF(LEVEL_INFO, "for physEngDesc 0x%x\n", pKernelFalcon->physEngDesc); +} + +KernelFalcon *kflcnGetKernelFalconForEngine_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR physEngDesc) +{ + // + // Check for any special objects that are instantiated as GPU children. + // Otherwise, OBJGPU keeps track of all falcons as reported by GSP + // + switch (physEngDesc) + { + // this list is mirrored in subdeviceCtrlCmdInternalGetConstructedFalconInfo_IMPL + case ENG_SEC2: return staticCast(GPU_GET_KERNEL_SEC2(pGpu), KernelFalcon); + case ENG_GSP: return staticCast(GPU_GET_KERNEL_GSP(pGpu), KernelFalcon); + case ENG_NVDEC(0): return staticCast(GPU_GET_KERNEL_NVDEC(pGpu), KernelFalcon); + default: + return staticCast(gpuGetGenericKernelFalconForEngine(pGpu, physEngDesc), KernelFalcon); + } +} + + +static NvBool _kflcnNeedToAllocContext(OBJGPU *pGpu, KernelChannel *pKernelChannel) +{ + NvU32 gfid = kchannelGetGfid(pKernelChannel); + + // + // In case of vGPU, when client allocated ctx buffer feature enabled, vGPU guest + // RM will alloc all FLCN context buffers for VF channels. + // But, for PF channels (IS_GFID_PF(gfid) is TRUE), host RM needs to allocate the + // FLCN buffers. + // + if (!gpuIsClientRmAllocatedCtxBufferEnabled(pGpu) || IS_GFID_VF(gfid)) + return NV_FALSE; + + return NV_TRUE; +} + +static NV_STATUS _kflcnAllocAndMapCtxBuffer +( + OBJGPU *pGpu, + KernelFalcon *pKernelFalcon, + KernelChannel *pKernelChannel +) +{ + MEMORY_DESCRIPTOR *pCtxMemDesc = NULL; + CTX_BUF_POOL_INFO *pCtxBufPool = NULL; + KernelChannelGroup *pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup; + OBJGVASPACE *pGVAS = dynamicCast(pKernelChannel->pVAS, OBJGVASPACE); + NvU8 *pInstMem; + NV_STATUS status = NV_OK; + NvU64 flags = MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + + if (kchannelIsCtxBufferAllocSkipped(pKernelChannel)) + return NV_OK; + + kchangrpGetEngineContextMemDesc(pGpu, pKernelChannelGroup, &pCtxMemDesc); + if (pCtxMemDesc != NULL) + { + NV_PRINTF(LEVEL_ERROR, "This channel already has a falcon engine instance on engine %d:%d\n", + ENGDESC_FIELD(pKernelFalcon->physEngDesc, _CLASS), + ENGDESC_FIELD(pKernelFalcon->physEngDesc, _INST)); + return NV_OK; + } + + if (ctxBufPoolIsSupported(pGpu) && pKernelChannelGroup->pCtxBufPool != NULL) + { + flags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + pCtxBufPool = pKernelChannelGroup->pCtxBufPool; + } + + // + // Setup an engine context and initialize. + // + NV_ASSERT_OK_OR_RETURN(memdescCreate(&pCtxMemDesc, pGpu, + pKernelFalcon->ctxBufferSize, + FLCN_BLK_ALIGNMENT, + NV_TRUE, + ADDR_UNKNOWN, + pKernelFalcon->ctxAttr, + flags)); + NV_ASSERT_OK_OR_GOTO(status, + memdescSetCtxBufPool(pCtxMemDesc, pCtxBufPool), + done); + NV_ASSERT_OK_OR_GOTO(status, + memdescAllocList(pCtxMemDesc, memdescU32ToAddrSpaceList(pKernelFalcon->addrSpaceList)), + done); + + pInstMem = memdescMapInternal(pGpu, pCtxMemDesc, 0); + if (pInstMem != NULL) + { + // Clear the engine context buffer + NvU32 i; + for (i = 0; i < pKernelFalcon->ctxBufferSize; i += 4) + { + MEM_WR32(pInstMem + i, 0); + } + memdescUnmapInternal(pGpu, pCtxMemDesc, 0); + } + else + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + NV_ASSERT_OK_OR_GOTO(status, + kchannelSetEngineContextMemDesc(pGpu, pKernelChannel, pKernelFalcon->physEngDesc, pCtxMemDesc), + done); + + if (!gvaspaceIsExternallyOwned(pGVAS)) + { + NV_ASSERT_OK_OR_GOTO(status, + kchannelMapEngineCtxBuf(pGpu, pKernelChannel, pKernelFalcon->physEngDesc), + done); + } + +done: + if (status != NV_OK) + { + memdescFree(pCtxMemDesc); + memdescDestroy(pCtxMemDesc); + } + + return status; +} + +static NV_STATUS _kflcnPromoteContext +( + OBJGPU *pGpu, + KernelFalcon *pKernelFalcon, + KernelChannel *pKernelChannel +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + RsClient *pClient = RES_GET_CLIENT(pKernelChannel); + Subdevice *pSubdevice; + NvU64 addr; + NvU32 engineType; + ENGINE_CTX_DESCRIPTOR *pEngCtx; + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS rmCtrlParams = {0}; + + NV_ASSERT_OK_OR_RETURN(subdeviceGetByGpu(pClient, pGpu, &pSubdevice)); + NV_ASSERT_OR_RETURN(gpumgrGetSubDeviceInstanceFromGpu(pGpu) == 0, NV_ERR_INVALID_STATE); + + pEngCtx = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[0]; + NV_ASSERT_OR_RETURN(pEngCtx != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OK_OR_RETURN(vaListFindVa(&pEngCtx->vaList, pKernelChannel->pVAS, &addr)); + + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + ENGINE_INFO_TYPE_ENG_DESC, pKernelFalcon->physEngDesc, + ENGINE_INFO_TYPE_NV2080, &engineType)); + + rmCtrlParams.hClient = pClient->hClient; + rmCtrlParams.hObject = RES_GET_HANDLE(pKernelChannel); + rmCtrlParams.hChanClient = pClient->hClient; + rmCtrlParams.virtAddress = addr; + rmCtrlParams.size = pKernelFalcon->ctxBufferSize; + rmCtrlParams.engineType = engineType; + rmCtrlParams.ChID = pKernelChannel->ChID; + + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, pClient->hClient, RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_GPU_PROMOTE_CTX, &rmCtrlParams, sizeof(rmCtrlParams))); + + return NV_OK; +} + + +NV_STATUS kflcnAllocContext_IMPL +( + OBJGPU *pGpu, + KernelFalcon *pKernelFalcon, + KernelChannel *pKernelChannel, + NvU32 classNum +) +{ + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); + + if (!_kflcnNeedToAllocContext(pGpu, pKernelChannel)) + return NV_OK; + + NV_ASSERT_OR_RETURN(gpuIsClassSupported(pGpu, classNum), NV_ERR_INVALID_OBJECT); + + NV_ASSERT_OK_OR_RETURN(_kflcnAllocAndMapCtxBuffer(pGpu, pKernelFalcon, pKernelChannel)); + + return _kflcnPromoteContext(pGpu, pKernelFalcon, pKernelChannel); +} + +NV_STATUS kflcnFreeContext_IMPL +( + OBJGPU *pGpu, + KernelFalcon *pKernelFalcon, + KernelChannel *pKernelChannel, + NvU32 classNum +) +{ + MEMORY_DESCRIPTOR *pCtxMemDesc = NULL; + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); + + if (!_kflcnNeedToAllocContext(pGpu, pKernelChannel)) + return NV_OK; + + if (kchannelIsCtxBufferAllocSkipped(pKernelChannel)) + return NV_OK; + + kchangrpGetEngineContextMemDesc(pGpu, + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup, + &pCtxMemDesc); + + if (pCtxMemDesc == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "The channel 0x%x does not have a falcon engine instance for engDesc=0x%x\n", + kchannelGetDebugTag(pKernelChannel), pKernelFalcon->physEngDesc); + return NV_OK; + } + + kchannelUnmapEngineCtxBuf(pGpu, pKernelChannel, pKernelFalcon->physEngDesc); + kchannelSetEngineContextMemDesc(pGpu, pKernelChannel, pKernelFalcon->physEngDesc, NULL); + memdescFree(pCtxMemDesc); + memdescDestroy(pCtxMemDesc); + + return NV_OK; +} + +NV_STATUS gkflcnConstruct_IMPL +( + GenericKernelFalcon *pGenericKernelFalcon, + OBJGPU *pGpu, + KernelFalconEngineConfig *pFalconConfig +) +{ + KernelFalcon *pKernelFalcon = staticCast(pGenericKernelFalcon, KernelFalcon); + if (pFalconConfig != NULL) + { + kflcnConfigureEngine(pGpu, pKernelFalcon, pFalconConfig); + } + return NV_OK; +} + +NV_STATUS gkflcnResetHw_IMPL(OBJGPU *pGpu, GenericKernelFalcon *pGenKernFlcn) +{ + NV_ASSERT_FAILED("This should only be called on full KernelFalcon implementations"); + return NV_ERR_NOT_SUPPORTED; +} +NvBool gkflcnIsEngineInReset_IMPL(OBJGPU *pGpu, GenericKernelFalcon *pGenKernFlcn) +{ + NV_ASSERT_FAILED("This should only be called on full KernelFalcon implementations"); + return NV_FALSE; +} + +void gkflcnRegisterIntrService_IMPL(OBJGPU *pGpu, GenericKernelFalcon *pGenericKernelFalcon, IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX]) +{ + KernelFalcon *pKernelFalcon = staticCast(pGenericKernelFalcon, KernelFalcon); + NV_ASSERT_OR_RETURN_VOID(pKernelFalcon); + + NV_PRINTF(LEVEL_INFO, "physEngDesc 0x%x\n", pKernelFalcon->physEngDesc); + + if (!IS_NVDEC(pKernelFalcon->physEngDesc) && + pKernelFalcon->physEngDesc != ENG_OFA && + !IS_NVJPEG(pKernelFalcon->physEngDesc) && + !IS_MSENC(pKernelFalcon->physEngDesc)) + return; + + // Register to handle nonstalling interrupts of the corresponding physical falcon in kernel rm + if (pKernelFalcon->physEngDesc != ENG_INVALID) + { + NvU32 mcIdx = MC_ENGINE_IDX_NULL; + + NV_STATUS status = kfifoEngineInfoXlate_HAL(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + ENGINE_INFO_TYPE_ENG_DESC, pKernelFalcon->physEngDesc, + ENGINE_INFO_TYPE_MC, &mcIdx); + + NV_ASSERT_OR_RETURN_VOID(status == NV_OK); + + NV_PRINTF(LEVEL_INFO, "Registering 0x%x/0x%x to handle nonstall intr\n", pKernelFalcon->physEngDesc, mcIdx); + + NV_ASSERT(pRecords[mcIdx].pNotificationService == NULL); + pRecords[mcIdx].bFifoWaiveNotify = NV_FALSE; + pRecords[mcIdx].pNotificationService = staticCast(pGenericKernelFalcon, IntrService); + } +} + +NV_STATUS gkflcnServiceNotificationInterrupt_IMPL(OBJGPU *pGpu, GenericKernelFalcon *pGenericKernelFalcon, IntrServiceServiceNotificationInterruptArguments *pParams) +{ + NvU32 idxMc = pParams->engineIdx; + NvU32 idx2080 = NV2080_ENGINE_TYPE_NULL; + + NV_PRINTF(LEVEL_INFO, "nonstall intr for MC 0x%x\n", idxMc); + + if (MC_ENGINE_IDX_NVDECn(0) <= idxMc && + idxMc < MC_ENGINE_IDX_NVDECn(NV2080_ENGINE_TYPE_NVDEC_SIZE)) + { + NvU32 nvdecIdx = idxMc - MC_ENGINE_IDX_NVDECn(0); + idx2080 = NV2080_ENGINE_TYPE_NVDEC(nvdecIdx); + } else if (idxMc == MC_ENGINE_IDX_OFA0) + idx2080 = NV2080_ENGINE_TYPE_OFA; + else if (MC_ENGINE_IDX_NVJPEGn(0) <= idxMc && + idxMc < MC_ENGINE_IDX_NVJPEGn(NV2080_ENGINE_TYPE_NVJPEG_SIZE)) + { + NvU32 nvjpgIdx = idxMc - MC_ENGINE_IDX_NVJPEGn(0); + idx2080 = NV2080_ENGINE_TYPE_NVJPEG(nvjpgIdx); + } else if (MC_ENGINE_IDX_MSENCn(0) <= idxMc && + idxMc < MC_ENGINE_IDX_MSENCn(NV2080_ENGINE_TYPE_NVENC_SIZE)) + { + NvU32 msencIdx = idxMc - MC_ENGINE_IDX_MSENCn(0); + idx2080 = NV2080_ENGINE_TYPE_NVENC(msencIdx); + } + + NV_ASSERT_OR_RETURN(idx2080 != NV2080_ENGINE_TYPE_NULL, NV_ERR_INVALID_STATE); + + gkflcnNonstallIntrCheckAndClear_HAL(pGpu, pGenericKernelFalcon, pParams->pThreadState); + + // Wake up channels waiting on this event + engineNonStallIntrNotify(pGpu, idx2080); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/arch/ampere/kernel_channel_ga100.c b/src/nvidia/src/kernel/gpu/fifo/arch/ampere/kernel_channel_ga100.c new file mode 100644 index 000000000..4f3a936e0 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/arch/ampere/kernel_channel_ga100.c @@ -0,0 +1,47 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_channel.h" +#include "published/ampere/ga100/dev_ram.h" + +/** + * @brief Verify that the given userd physical address is of the correct size + * + * @param[in] pKernelChannel KernelChannel pointer + * @param[in] userdAddrLo low USERD physical address bits + * @param[in] userdAddrHi high USERD physical address bits + * + * @returns NV_TRUE if the given userd physical address is of the correct size + NV_FALSE otherwise + */ +NvBool +kchannelIsUserdAddrSizeValid_GA100 +( + KernelChannel *pKernelChannel, + NvU32 userdAddrLo, + NvU32 userdAddrHi +) +{ + return ((userdAddrLo & SF_MASK(NV_RAMRL_ENTRY_CHAN_USERD_PTR_LO)) == userdAddrLo) && + ((userdAddrHi & SF_MASK(NV_RAMRL_ENTRY_CHAN_USERD_PTR_HI_HW)) == userdAddrHi); +} diff --git a/src/nvidia/src/kernel/gpu/fifo/arch/ampere/kernel_fifo_ga100.c b/src/nvidia/src/kernel/gpu/fifo/arch/ampere/kernel_fifo_ga100.c new file mode 100644 index 000000000..a756011cf --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/arch/ampere/kernel_fifo_ga100.c @@ -0,0 +1,347 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/ce/kernel_ce_shared.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/bus/kern_bus.h" + +#include "vgpu/vgpu_events.h" + +#include "published/ampere/ga100/dev_ram.h" +#include "published/ampere/ga100/dev_ctrl.h" + +/** + * @brief Translates between 2 engine values + * + * To iterate through a value for all engines call with inType of + * ENGINE_INFO_TYPE_INVALID for 0 through fifoGetNumEngines(). + * + * @param pGpu + * @param pKernelFifo + * @param[in] inType ENGINE_INFO_TYPE_* + * @param[in] inVal + * @param[in] outType ENGINE_INFO_TYPE_* + * @param[out] pOutVal + */ +NV_STATUS +kfifoEngineInfoXlate_GA100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + ENGINE_INFO_TYPE inType, + NvU32 inVal, + ENGINE_INFO_TYPE outType, + NvU32 *pOutVal +) +{ + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + + // We no longer store ENGINE_INFO_TYPE_INTR on Ampere+ (bug 24110055) + if (inType == ENGINE_INFO_TYPE_INTR || outType == ENGINE_INFO_TYPE_INTR) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // We need extra logic for translation when SMC is enabled and input or output is + // MMU_FAULT_ID because device Info cannot translate MMU_FAULT_ID to/from any type for GR > 0 + // + if (IS_MIG_IN_USE(pGpu)) + { + NvU32 baseGrFaultId; + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_GV100(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_ENG_DESC, ENG_GR(0), + ENGINE_INFO_TYPE_MMU_FAULT_ID, &baseGrFaultId)); + + if (inType == ENGINE_INFO_TYPE_MMU_FAULT_ID) + { + NvU32 subctxId, grIdx; + NvU32 maxSubctx = kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, NV_FALSE); + + // check if input fault ID corresponds to GR + if ((inVal >= baseGrFaultId) && (inVal < (baseGrFaultId + maxSubctx))) + { + subctxId = inVal - baseGrFaultId; + NV_ASSERT_OK_OR_RETURN(kgrmgrGetGrIdxForVeid(pGpu, pKernelGraphicsManager, subctxId, &grIdx)); + inVal = NV2080_ENGINE_TYPE_GR(grIdx); + inType = ENGINE_INFO_TYPE_NV2080; + } + } + + if (outType == ENGINE_INFO_TYPE_MMU_FAULT_ID) + { + NvU32 engineId, grIdx, startSubctxId; + + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_GV100(pGpu, pKernelFifo, inType, inVal, + ENGINE_INFO_TYPE_NV2080, &engineId)); + + // check if engineId corresponding to input is GR + if (NV2080_ENGINE_TYPE_IS_GR(engineId)) + { + grIdx = NV2080_ENGINE_TYPE_GR_IDX(engineId); + NV_ASSERT_OK_OR_RETURN(kgrmgrGetVeidBaseForGrIdx(pGpu, pKernelGraphicsManager, grIdx, &startSubctxId)); + *pOutVal = baseGrFaultId + startSubctxId; + return NV_OK; + } + } + } + + return kfifoEngineInfoXlate_GV100(pGpu, pKernelFifo, inType, inVal, outType, pOutVal); +} + + +/*! + * @brief Get the local maximum number of subctx allowed in this TSG + * + * @param pGpu + * @param pKernelFifo + * @param[in] pKernelChannelGroup + * @param[in] bLegacyMode Is TSG in legacy mode. + */ +NvU32 +kfifoChannelGroupGetLocalMaxSubcontext_GA100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + KernelChannelGroup *pKernelChannelGroup, + NvBool bLegacyMode +) +{ + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + + NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_ARGUMENT); + + if (IS_MIG_IN_USE(pGpu) && !bLegacyMode && + NV2080_ENGINE_TYPE_IS_GR(pKernelChannelGroup->engineType)) + { + NvU32 grIdx = NV2080_ENGINE_TYPE_GR_IDX( + pKernelChannelGroup->engineType); + return nvPopCount64(pKernelGraphicsManager->grIdxVeidMask[grIdx]); + } + + // In SMC-Legacy mode, revert to pre-Ampere behavior + return kfifoChannelGroupGetLocalMaxSubcontext_GM107(pGpu, pKernelFifo, + pKernelChannelGroup, + bLegacyMode); +} + +/*! + * @brief Clear USERD memory + */ +void +kfifoSetupUserD_GA100 +( + KernelFifo *pKernelFifo, + NvU8 *pUserD +) +{ + NV_ASSERT_OR_RETURN_VOID(pUserD != NULL); + + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_PUT ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_GET ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_REF ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_PUT_HI ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_TOP_LEVEL_GET ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_TOP_LEVEL_GET_HI ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_GET_HI ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_GP_GET ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_GP_PUT ), 0 ); +} + +/*! + * @brief Update the usermode doorbell register with work submit token to notify + * host that work is available on this channel. + * + * @param[in] pGpu + * @param[in] pKernelFifo + * @param[in] workSubmitToken Token to update the doorbell with + * @param[in] runlistId Runlist ID + */ +NV_STATUS +kfifoUpdateUsermodeDoorbell_GA100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 workSubmitToken, + NvU32 runlistId +) +{ + // + // Updating the usermode doorbell is different for CPU vs. GSP. + // + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + return kfifoUpdateUsermodeDoorbell_TU102(pGpu, pKernelFifo, workSubmitToken, runlistId); + } + else + { + return kfifoUpdateInternalDoorbellForUsermode_HAL(pGpu, pKernelFifo, workSubmitToken, runlistId); + } + + return NV_OK; +} + +/*! + * @brief Construct the worksubmit token. Caller cannot make assumption about this handle. + * + * @param[in] pGpu + * @param[in] pKernelFifo + * @param[in] pKernelChannel + * @param[out] pGeneratedToken Store the generated token + * @param[in] bUsedForHost Used on Host RM + * + */ +NV_STATUS +kfifoGenerateWorkSubmitToken_GA100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + KernelChannel *pKernelChannel, + NvU32 *pGeneratedToken, + NvBool bUsedForHost +) +{ + NvU32 chId; + NvU32 gfId; + NvU32 runlistId; + NvU32 val = 0; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); + + NV_ASSERT_OR_RETURN(pGeneratedToken != NULL, NV_ERR_INVALID_OBJECT); + NV_ASSERT_OR_RETURN((pKernelChannel->pKernelChannelGroupApi != NULL) && + (pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup != NULL), + NV_ERR_INVALID_STATE); + + chId = pKernelChannel->ChID; + + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfId)); + + // TODO: Remove check on Ampere. Bug 200606706. + if (!bUsedForHost && IS_GFID_VF(gfId)) + { + NvU32 vChId; + + NV_ASSERT_OK_OR_RETURN(kfifoGetVChIdForSChId_HAL(pGpu, pKernelFifo, + chId, gfId, + kchannelGetEngineType(pKernelChannel), + &vChId)); + chId = vChId; + } + + // TODO: Remove, on Ampere channels should be set to a valid runlist before allocation. Bug 200606706. + if (!kchannelIsRunlistSet(pGpu, pKernelChannel)) + { + NV_PRINTF(LEVEL_NOTICE, + "FAILED Channel 0x%x is not assigned to runlist yet\n", + chId); + return NV_ERR_INVALID_STATE; + } + + runlistId = kchannelGetRunlistId(pKernelChannel); + + // Here we construct token to be a concatenation of runlist id and channel id + val = FLD_SET_DRF_NUM(_CTRL, _VF_DOORBELL, _RUNLIST_ID, runlistId, val); + val = FLD_SET_DRF_NUM(_CTRL, _VF_DOORBELL, _VECTOR, chId, val); + + NV_PRINTF(LEVEL_INFO, + "Generated workSubmitToken 0x%x for channel 0x%x runlist 0x%x\n", + val, chId, runlistId); + } + else // RMCFG_FEATURE_PLATFORM_GSP + { + NV_ASSERT_OK_OR_RETURN(kfifoGenerateInternalWorkSubmitToken_HAL(pGpu, pKernelFifo, pKernelChannel)); + } + + *pGeneratedToken = val; + + return NV_OK; +} + +/** + * @brief Get the runlist base shift amount + * + * @param pKernelFifo + * + * @return shift amount + */ +NvU32 +kfifoRunlistGetBaseShift_GA100 +( + KernelFifo *pKernelFifo +) +{ + return NV_RAMRL_ENTRY_BASE_SHIFT; +} + +/*! + * Special function to be used early when the CHID_MGRs aren't and cannot be + * constructed in all cases. Do not use otherwise + */ +NvU32 +kfifoGetMaxCeChannelGroups_GA100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + ENGDESCRIPTOR eng = 0; + NvU32 deviceIndex; + const ENGINE_INFO *pEngineInfo = kfifoGetEngineInfo(pKernelFifo); + NvU32 maxCeChannels = 0; + + // If called before kfifoConstructEngineList has executed + if (pEngineInfo == NULL) + return 0; + + // + // We can't use chidMgr here as this gets called before chidMgr is completely initialized + // Use device info table instead + // + for (deviceIndex = 0; deviceIndex < pEngineInfo->engineInfoListSize; deviceIndex++) + { + eng = pEngineInfo->engineInfoList[deviceIndex].engineData[ENGINE_INFO_TYPE_ENG_DESC]; + + // All GR CE use the same pool as GR + if ((eng == ENG_GR(0)) || + (IS_CE(eng) && + (!ceIsCeGrce(pGpu, pEngineInfo->engineInfoList[deviceIndex].engineData[ENGINE_INFO_TYPE_NV2080])))) + { + maxCeChannels += kfifoRunlistQueryNumChannels_HAL(pGpu, pKernelFifo, 0); + } + } + + // override max channels if we can run out of BAR2 page tables + if (kbusIsBug2751296LimitBar2PtSize(GPU_GET_KERNEL_BUS(pGpu))) + { + // 2k for GR CE and 2k for the rest + maxCeChannels = 4096; + } + + return maxCeChannels; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/arch/ampere/kernel_fifo_ga102.c b/src/nvidia/src/kernel/gpu/fifo/arch/ampere/kernel_fifo_ga102.c new file mode 100644 index 000000000..d0d1a5227 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/arch/ampere/kernel_fifo_ga102.c @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "published/ampere/ga102/dev_ram.h" + +/** + * @brief Get the runlist base shift amount + * + * @param pKernelFifo + * + * @return shift amount + */ +NvU32 +kfifoRunlistGetBaseShift_GA102 +( + KernelFifo *pKernelFifo +) +{ + return NV_RAMRL_ENTRY_BASE_SHIFT; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/arch/maxwell/kernel_channel_gm107.c b/src/nvidia/src/kernel/gpu/fifo/arch/maxwell/kernel_channel_gm107.c new file mode 100644 index 000000000..57bddbefe --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/arch/maxwell/kernel_channel_gm107.c @@ -0,0 +1,741 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/mem_mgr/mem.h" +#include "kernel/gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "class/cl906f.h" + +#include "published/maxwell/gm107/dev_ram.h" +#include "published/maxwell/gm107/dev_mmu.h" + +static NV_STATUS _kchannelCreateRMUserdMemDesc(OBJGPU *pGpu, KernelChannel *pKernelChannel); + +static NV_STATUS _kchannelDestroyRMUserdMemDesc(OBJGPU *pGpu, KernelChannel *pKernelChannel); + +/*! + * The reason this is a hal method is because it primarily concerns with returning EngineID + * and unfortunately, the defines for these are not standard across chips. + * + * Reason we have a classEngineID concat is to present an opaque handle that clients can do + * setobject with directly. Some of them may also require to know the class, engine IDs. + */ +NV_STATUS +kchannelGetClassEngineID_GM107 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvHandle handle, + NvU32 *pClassEngineID, + NvU32 *pClassID, + NvU32 *pEngineID +) +{ + NV_STATUS status = NV_OK; + NvU32 halEngineTag = 0; + NvU32 hwEngineID = 0; + NvU32 classID; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + ChannelDescendant *pObject = NULL; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, kchannelFindChildByHandle(pKernelChannel, handle, &pObject)); + NV_ASSERT_OR_RETURN(pObject != NULL, NV_ERR_OBJECT_NOT_FOUND); + + *pClassID = classID = RES_GET_EXT_CLASS_ID(pObject); + halEngineTag = pObject->resourceDesc.engDesc; + + status = kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, ENGINE_INFO_TYPE_ENG_DESC, + halEngineTag, ENGINE_INFO_TYPE_FIFO_TAG, &hwEngineID); + + if (halEngineTag == ENG_SW) + { + classID = pObject->classID; + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + ": Invalid Engine Tag %x associated with object handle = %x\n", + halEngineTag, handle); + return NV_ERR_OBJECT_NOT_FOUND; + } + + status = gpuXlateEngDescToClientEngineId(pGpu, halEngineTag, pEngineID); + + if (status == NV_OK) + { + *pClassEngineID = DRF_NUM(906F, _SET_OBJECT, _NVCLASS, classID); + } + + NV_PRINTF(LEVEL_INFO, + "class ID: 0x%08x engine id 0x%08x classEngine ID: 0x%08x\n", + classID, hwEngineID, *pClassEngineID); + + return status; +} + +NV_STATUS +kchannelEnableVirtualContext_GM107 +( + KernelChannel *pKernelChannel +) +{ + pKernelChannel->bSkipCtxBufferAlloc = NV_TRUE; + + return NV_OK; +} + +/*! + * Create the sub memory descriptor from preallocated USERD memory + * allocated by RM for a channel + */ +static NV_STATUS +_kchannelCreateRMUserdMemDesc +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + NV_STATUS status = NV_OK; + NvU32 userdSize; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + const PREALLOCATED_USERD_INFO *pUserdInfo = kfifoGetPreallocatedUserdInfo(pKernelFifo); + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + MEMORY_DESCRIPTOR **ppUserdSubdevMemDesc = + &pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]; + + kfifoGetUserdSizeAlign_HAL(pKernelFifo, &userdSize, NULL); + + status = memdescCreateSubMem(ppUserdSubdevMemDesc, + pUserdInfo->userdPhysDesc[subdevInst], + pGpu, + pKernelChannel->ChID * userdSize, + userdSize ); + return status; +} + +/*! Delete prealloc userd submemdesc for the channel */ +static NV_STATUS +_kchannelDestroyRMUserdMemDesc +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + MEMORY_DESCRIPTOR **ppUserdSubdevMemDesc = + &pKernelChannel->pUserdSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + if ((ppUserdSubdevMemDesc != NULL) && (*ppUserdSubdevMemDesc != NULL)) + { + memdescFree(*ppUserdSubdevMemDesc); + memdescDestroy(*ppUserdSubdevMemDesc); + *ppUserdSubdevMemDesc = NULL; + } + + return NV_OK; +} + +/*! + * @brief Create and alloc channel instance mem, + * ramfc and userd subdevice memdescs. + * + * @param pGpu[in] OBJGPU pointer + * @param pKernelChannel[in] KernelChannel pointer + * @param flags[in] Flags + * @param verifFlags[in] verifFlags + * + * @returns NV_STATUS + */ +NV_STATUS kchannelAllocMem_GM107 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvU32 Flags, + NvU32 verifFlags +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NV_STATUS status = NV_OK; + NvU32 CpuCacheAttrib; + FIFO_INSTANCE_BLOCK *pInstanceBlock = NULL; + NvU32 userdSize; + NvU64 instMemSize; + NvU64 instMemAlign; + NvBool bInstProtectedMem; + const NV_ADDRESS_SPACE *pInstAllocList; + CTX_BUF_POOL_INFO *pChannelBufPool = NULL; + NvU64 allocFlags = MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + NvU32 scgType; + NvU32 runqueue; + KernelChannelGroup *pKernelChannelGroup; + NvU32 subdevInst; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup; + + scgType = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_THREAD, Flags); + runqueue = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, Flags); + + if (!kfifoValidateSCGTypeAndRunqueue_HAL(pKernelFifo, scgType, runqueue)) + return NV_ERR_INVALID_ARGUMENT; + + kfifoGetUserdSizeAlign_HAL(pKernelFifo, &userdSize, NULL); + + status = kfifoGetInstMemInfo_HAL(pKernelFifo, &instMemSize, &instMemAlign, + &bInstProtectedMem, &CpuCacheAttrib, &pInstAllocList); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to get instance memory info!\n"); + goto fail; + } + + /// Alloc Instance block + if (IsSLIEnabled(pGpu) || IS_GSP_CLIENT(pGpu)) + { + pInstAllocList = ADDRLIST_FBMEM_ONLY; + CpuCacheAttrib = NV_MEMORY_UNCACHED; + } + + // check for allocating VPR memory + if (bInstProtectedMem) + allocFlags |= MEMDESC_ALLOC_FLAGS_PROTECTED; + + pChannelBufPool = pKernelChannelGroup->pChannelBufPool; + if (pChannelBufPool != NULL) + allocFlags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pInstanceBlock = pKernelChannel->pFifoHalData[subdevInst]; + + if (pInstanceBlock == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Instance block is NULL for hClient 0x%x Channel 0x%x!\n", + RES_GET_CLIENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel)); + SLI_LOOP_BREAK; + } + + status = memdescCreate(&pInstanceBlock->pInstanceBlockDesc, pGpu, + instMemSize, instMemAlign, NV_TRUE, + ADDR_UNKNOWN, CpuCacheAttrib, allocFlags); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to allocate instance memory descriptor!\n"); + SLI_LOOP_BREAK; + } + + if ((memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc) == ADDR_SYSMEM) && + (gpuIsInstanceMemoryAlwaysCached(pGpu))) + { + memdescSetGpuCacheAttrib(pInstanceBlock->pInstanceBlockDesc, NV_MEMORY_CACHED); + } + + if (pChannelBufPool != NULL) + { + status = memdescSetCtxBufPool(pInstanceBlock->pInstanceBlockDesc, pChannelBufPool); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + SLI_LOOP_BREAK; + } + } + + status = memdescAllocList(pInstanceBlock->pInstanceBlockDesc, pInstAllocList); + if (status == NV_OK) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + // Initialize the instance block of the channel with zeros + status = memmgrMemDescMemSet(pMemoryManager, + pInstanceBlock->pInstanceBlockDesc, + 0, + TRANSFER_FLAGS_NONE); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + SLI_LOOP_BREAK; + } + + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Instance block allocation for hClient 0x%x hChannel 0x%x failed\n", + RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_HANDLE(pKernelChannel)); + SLI_LOOP_BREAK; + } + + /// Alloc RAMFC Desc + status = memdescCreateSubMem(&pInstanceBlock->pRamfcDesc, + pInstanceBlock->pInstanceBlockDesc, + pGpu, 0, DRF_SIZE( NV_RAMIN_RAMFC ) / 8); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate memdesc for RAMFC\n"); + SLI_LOOP_BREAK; + } + + // TODO: Move this elsewhere. + if (!pKernelChannel->bClientAllocatedUserD) + { + NV_ASSERT(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst] == NULL); + + status = _kchannelCreateRMUserdMemDesc(pGpu, pKernelChannel); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate sub memdesc for USERD\n"); // TODO SLI BREAK + SLI_LOOP_BREAK; + } + } + + status = kchannelCreateUserMemDesc_HAL(pGpu, pKernelChannel); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "kchannelCreateUserMemDesc failed \n"); + SLI_LOOP_BREAK; + } + + NV_PRINTF(LEVEL_INFO, + "hChannel 0x%x hClient 0x%x, Class ID 0x%x " + "Instance Block @ 0x%llx (%s %x) " + "USERD @ 0x%llx " + "for subdevice %d\n", + RES_GET_HANDLE(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel), + memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0), + memdescGetApertureString(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)), + (NvU32)(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)), + (pKernelChannel->pUserdSubDeviceMemDesc[subdevInst] == NULL) ? 0x0LL : + memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst], AT_GPU, 0LL), + subdevInst); + + SLI_LOOP_END + +fail: + // Just a note about our failure path, null and unallocated + // memdescFrees are allowed so this is not a bug. + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not create Channel\n"); + DBG_BREAKPOINT(); + } + + return status; +} + +/*! + * @brief Free and destroy channel memdescs + * created during channel alloc mem + * + * @param pGpu[in] OBJGPU pointer + * @param pKernelChannel[in] KernelChannel pointer + * + * @return void + */ +void +kchannelDestroyMem_GM107 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + FIFO_INSTANCE_BLOCK *pInstanceBlock = NULL; + NvU32 subdevInst; + + NV_ASSERT_OR_RETURN_VOID(pKernelChannel != NULL); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + if (!pKernelChannel->bClientAllocatedUserD) + { + _kchannelDestroyRMUserdMemDesc(pGpu, pKernelChannel); + } + + pInstanceBlock = pKernelChannel->pFifoHalData[subdevInst]; + if (pInstanceBlock != NULL) + { + // Release RAMFC sub memdesc + if (pInstanceBlock->pRamfcDesc != NULL) + { + memdescFree(pInstanceBlock->pRamfcDesc); + memdescDestroy(pInstanceBlock->pRamfcDesc); + pInstanceBlock->pRamfcDesc = NULL; + } + + // Release Inst block Desc + if (pInstanceBlock->pInstanceBlockDesc != NULL) + { + memdescFree(pInstanceBlock->pInstanceBlockDesc); + memdescDestroy(pInstanceBlock->pInstanceBlockDesc); + pInstanceBlock->pInstanceBlockDesc = NULL; + } + } + + // Remove USERD memDescs + if (pKernelChannel->pInstSubDeviceMemDesc[subdevInst] != NULL) + { + memdescDestroy(pKernelChannel->pInstSubDeviceMemDesc[subdevInst]); + pKernelChannel->pInstSubDeviceMemDesc[subdevInst] = NULL; + } + + SLI_LOOP_END + + return; +} + +/** + * @brief reserves a hardware channel slot + * + * Only responsible for indicating a hardware channel is in use, does not set + * any other software state. + * + * @param pGpu + * @param[in] pKernelChannel the pre-allocated KernelChannel + * @param[in] hClient + * @param[in] allocMode CHANNEL_HW_ID_ALLC_MODE_* + * @param[in] ChID + * @param[in] bForceInternalIdx true if requesting specific index within USERD page + * @param[in] internalIdx requested index within USERD page when bForceInternalIdx + * true + */ +NV_STATUS +kchannelAllocHwID_GM107 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvHandle hClient, + NvU32 Flags, + NvU32 verifFlags2, + NvU32 ChID +) +{ + NV_STATUS status; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + CHID_MGR *pChidMgr = NULL; + NvU32 internalIdx = 0; + NvU32 userdPageIdx = 0; + NvBool bForceInternalIdx = NV_FALSE; + NvBool bForceUserdPage = NV_FALSE; + CHANNEL_HW_ID_ALLOC_MODE allocMode = CHANNEL_HW_ID_ALLOC_MODE_GROW_UP; + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + // per runlist channel heap is supported for sriov only + NV_CHECK_OR_RETURN(LEVEL_ERROR, + !kfifoIsPerRunlistChramEnabled(pKernelFifo), + NV_ERR_INVALID_STATE); + + pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, + CHIDMGR_RUNLIST_ID_LEGACY); + + NV_CHECK_OR_RETURN(LEVEL_INFO, + kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo, + pChidMgr, ChID) == NULL, + NV_OK); + allocMode = CHANNEL_HW_ID_ALLOC_MODE_PROVIDED; + } + else + { + if (FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_USERD_INDEX_PAGE_FIXED, _TRUE, Flags)) + { + bForceUserdPage = NV_TRUE; + userdPageIdx = DRF_VAL(OS04, _FLAGS, _CHANNEL_USERD_INDEX_PAGE_VALUE, Flags); + internalIdx = DRF_VAL(OS04, _FLAGS, _CHANNEL_USERD_INDEX_VALUE, Flags); + + NV_ASSERT_OR_RETURN(FLD_TEST_DRF(OS04, + _FLAGS, + _CHANNEL_USERD_INDEX_FIXED, + _FALSE, + Flags), + NV_ERR_INVALID_STATE); + } + + if (FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_USERD_INDEX_FIXED, _TRUE, Flags)) + { + bForceInternalIdx = NV_TRUE; + internalIdx = DRF_VAL(OS04, _FLAGS, _CHANNEL_USERD_INDEX_VALUE, Flags); + } + } + + pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, kchannelGetRunlistId(pKernelChannel)); + + status = kfifoChidMgrAllocChid(pGpu, + pKernelFifo, + pChidMgr, + hClient, + allocMode, + bForceInternalIdx, + internalIdx, + bForceUserdPage, + userdPageIdx, + ChID, + pKernelChannel); + + return status; +} + +/** + * @brief Releases a hardware channel ID. + * + * Not responsible for freeing any software state beyond that which indicates a + * hardware channel is in use. + * + * @param pGpu + * @param pKernelChannel + */ +NV_STATUS +kchannelFreeHwID_GM107 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + NV_STATUS status; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, + kchannelGetRunlistId(pKernelChannel)); + EMEMBLOCK *pFifoDataBlock; + + pFifoDataBlock = pChidMgr->pFifoDataHeap->eheapGetBlock( + pChidMgr->pFifoDataHeap, + pKernelChannel->ChID, + NV_FALSE); + NV_ASSERT_OR_RETURN(pFifoDataBlock, NV_ERR_OBJECT_NOT_FOUND); + NV_ASSERT(pFifoDataBlock->pData == pKernelChannel); + + status = kfifoChidMgrFreeChid(pGpu, pKernelFifo, pChidMgr, pKernelChannel->ChID); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to Free Channel From Heap: %d\n", + pKernelChannel->ChID); + DBG_BREAKPOINT(); + } + + return status; +} + +NV_STATUS +kchannelGetUserdInfo_GM107 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvU64 *userBase, + NvU64 *offset, + NvU64 *length +) +{ + NV_STATUS status; + NvU64 bar1MapOffset; + NvU32 bar1MapSize; + CLI_CHANNEL_CLASS_INFO classInfo; + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + + NvBool bCoherentCpuMapping = pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING); + + CliGetChannelClassInfo(RES_GET_EXT_CLASS_ID(pKernelChannel), &classInfo); + + switch (classInfo.classType) + { + case CHANNEL_CLASS_TYPE_GPFIFO: + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + // USERD is not pre-allocated in BAR1 so there is no offset/userBase + NV_ASSERT_OR_RETURN(!pKernelChannel->bClientAllocatedUserD, + NV_ERR_INVALID_REQUEST); + + status = kchannelGetUserdBar1MapOffset_HAL(pGpu, + pKernelChannel, + &bar1MapOffset, + &bar1MapSize); + if (status == NV_OK) + { + *offset = bar1MapOffset; + *length = bar1MapSize; + + if (userBase) + { + if (bCoherentCpuMapping) + { + NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)); + *userBase = pKernelMemorySystem->coherentCpuFbBase; + } + else + { + *userBase = gpumgrGetGpuPhysFbAddr(pGpu); + } + } + } + break; + + default: + NV_PRINTF(LEVEL_ERROR, + "class = %x not supported for user base mapping\n", + RES_GET_EXT_CLASS_ID(pKernelChannel)); + status = NV_ERR_GENERIC; + break; + } + return status; +} + +// +// Takes as input a Channel * and returns the BAR1 offset that this channel's +// USERD has been mapped to. Also returns the size of the BAR1 mapping that +// pertains to this channel. The BAR1 map of all USERDs should have already +// been setup before the first channel was created. +// +// For example, USERD of 40 channels have been mapped at BAR1 offset 0x100. +// USERD of one channel is of size 4k. In which case this function will return +// ( 0x100 + ( 0x1000 * 0xa ) ) if the input ChID = 0xa. +// +NV_STATUS +kchannelGetUserdBar1MapOffset_GM107 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvU64 *bar1MapOffset, + NvU32 *bar1MapSize +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + const PREALLOCATED_USERD_INFO *pUserdInfo = kfifoGetPreallocatedUserdInfo(pKernelFifo); + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + // + // only supported when bUsePerRunlistChannelRam is disabled. + // We don't pre-allocate userd for all channels across all runlists; we expect + // clients to have moved to client allocated userd. + // + NV_ASSERT_OR_RETURN(!kfifoIsPerRunlistChramEnabled(pKernelFifo), + NV_ERR_NOT_SUPPORTED); + + if (pUserdInfo->userdBar1MapSize == 0) + { + NV_PRINTF(LEVEL_ERROR, + "fifoGetUserdBar1Offset_GF100: BAR1 map of USERD has not " + "been setup yet\n"); + NV_ASSERT(0); + return NV_ERR_GENERIC; + } + + kfifoGetUserdSizeAlign_HAL(pKernelFifo, bar1MapSize, NULL); + + *bar1MapOffset = pKernelChannel->ChID * *bar1MapSize + + pUserdInfo->userdBar1MapStartOffset; + + NV_ASSERT((*bar1MapOffset + *bar1MapSize) <= + (pUserdInfo->userdBar1MapStartOffset + + pUserdInfo->userdBar1MapSize)); + + return NV_OK; +} + +/*! + * @brief Creates a memory descriptor to be used for creating a GPU mapped MMIO + * region for a given channel. + */ +NV_STATUS +kchannelCreateUserMemDesc_GM107 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NV_STATUS status; + MEMORY_DESCRIPTOR *pSubDevInstMemDesc = NULL; + MEMORY_DESCRIPTOR **ppMemDesc = + &pKernelChannel->pInstSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + status = kfifoChannelGetFifoContextMemDesc_HAL(pGpu, pKernelFifo, pKernelChannel, + FIFO_CTX_INST_BLOCK, &pSubDevInstMemDesc); + + if (status != NV_OK) + return status; + + NV_ASSERT_OR_RETURN(pSubDevInstMemDesc, NV_ERR_OBJECT_NOT_FOUND); + + status = memdescCreate(ppMemDesc, pGpu, RM_PAGE_SIZE, 0, + memdescGetContiguity(pSubDevInstMemDesc, AT_GPU), + memdescGetAddressSpace(pSubDevInstMemDesc), + memdescGetCpuCacheAttrib(pSubDevInstMemDesc), + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE); + + if (status != NV_OK) + return status; + + NV_ASSERT(*ppMemDesc); + + memdescDescribe(*ppMemDesc, memdescGetAddressSpace(pSubDevInstMemDesc), + memdescGetPhysAddr(pSubDevInstMemDesc, AT_GPU, 0), RM_PAGE_SIZE); + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, *ppMemDesc, AT_GPU, RM_ATTR_PAGE_SIZE_4KB); + + memdescSetPteKind(*ppMemDesc, NV_MMU_PTE_KIND_SMHOST_MESSAGE); + + return NV_OK; +} + +/** + * @brief Retrieves the engine ID (NV_PFIFO_ENGINE_*) a given channel is operating on. + * + * This value will not be valid for a channel that has not been scheduled. + * + * @param pGpu + * @param pKernelChannel + * @param[out] pEngDesc + */ +NV_STATUS +kchannelGetEngine_GM107 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvU32 *pEngDesc +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + if (pEngDesc == NULL) + return NV_ERR_INVALID_ARGUMENT; + + NV_PRINTF(LEVEL_INFO, "0x%x\n", kchannelGetDebugTag(pKernelChannel)); + + *pEngDesc = kchannelGetRunlistId(pKernelChannel); + + // This will pick the first engine on this runlist (may not be the only one). + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_RUNLIST, *pEngDesc, + ENGINE_INFO_TYPE_ENG_DESC, pEngDesc)); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/arch/maxwell/kernel_channel_group_gm107.c b/src/nvidia/src/kernel/gpu/fifo/arch/maxwell/kernel_channel_group_gm107.c new file mode 100644 index 000000000..c19669ca9 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/arch/maxwell/kernel_channel_group_gm107.c @@ -0,0 +1,62 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" + +#include "kernel/gpu/fifo/kernel_channel_group.h" + +/** + * @brief Gets the default runlist id to use for TSGs allocated with no engines on them. + * + * @param[in] pGpu + * @param[in] pKernelChannelGroup - TSG to retrieve default runlist id for + */ +NvU32 +kchangrpGetDefaultRunlist_GM107 +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 runlistId = INVALID_RUNLIST_ID; + ENGDESCRIPTOR engDesc = ENG_GR(0); + + if (NV2080_ENGINE_TYPE_IS_VALID(pKernelChannelGroup->engineType)) + { + // if translation fails, default is GR0 + NV_ASSERT_OK(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, + pKernelChannelGroup->engineType, + ENGINE_INFO_TYPE_ENG_DESC, + &engDesc)); + } + + // if translation fails, defualt is INVALID_RUNLIST_ID + NV_ASSERT_OK( + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_ENG_DESC, engDesc, + ENGINE_INFO_TYPE_RUNLIST, &runlistId)); + + return runlistId; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/arch/maxwell/kernel_fifo_gm107.c b/src/nvidia/src/kernel/gpu/fifo/arch/maxwell/kernel_fifo_gm107.c new file mode 100644 index 000000000..1695a4b83 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/arch/maxwell/kernel_fifo_gm107.c @@ -0,0 +1,1506 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/gpu/fifo/kernel_sched_mgr.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mmu/kern_gmmu.h" + +#include "nvRmReg.h" + +#include "vgpu/rpc.h" +#include "gpu/bus/kern_bus.h" + +#include "published/maxwell/gm107/dev_ram.h" +#include "published/maxwell/gm107/dev_mmu.h" + +/*! Construct kfifo object */ +NV_STATUS +kfifoConstructHal_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NV_STATUS status; + PREALLOCATED_USERD_INFO *pUserdInfo = &pKernelFifo->userdInfo; + + if (FLD_TEST_DRF(_REG_STR_RM, _INST_VPR, _INSTBLK, _TRUE, pGpu->instVprOverrides)) + { + pKernelFifo->bInstProtectedMem = NV_TRUE; + } + + // Instance Memory + switch (DRF_VAL( _REG_STR_RM, _INST_LOC, _INSTBLK, pGpu->instLocOverrides)) + { + default: + case NV_REG_STR_RM_INST_LOC_INSTBLK_DEFAULT: + if (kfifoIsMixedInstmemApertureDefAllowed(pKernelFifo)) + pKernelFifo->pInstAllocList = ADDRLIST_FBMEM_PREFERRED; + else + pKernelFifo->pInstAllocList = ADDRLIST_FBMEM_ONLY; + + pKernelFifo->InstAttr = NV_MEMORY_UNCACHED; + break; + case NV_REG_STR_RM_INST_LOC_INSTBLK_VID: + pKernelFifo->pInstAllocList = ADDRLIST_FBMEM_ONLY; + pKernelFifo->InstAttr = NV_MEMORY_UNCACHED; + break; + case NV_REG_STR_RM_INST_LOC_INSTBLK_COH: + pKernelFifo->pInstAllocList = ADDRLIST_SYSMEM_ONLY; + pKernelFifo->InstAttr = NV_MEMORY_CACHED; + break; + case NV_REG_STR_RM_INST_LOC_INSTBLK_NCOH: + pKernelFifo->pInstAllocList = ADDRLIST_SYSMEM_ONLY; + pKernelFifo->InstAttr = NV_MEMORY_UNCACHED; + break; + } + + // USERD + pUserdInfo->userdAperture = ADDR_FBMEM; + pUserdInfo->userdAttr = NV_MEMORY_WRITECOMBINED; + memdescOverrideInstLoc(DRF_VAL( _REG_STR_RM, _INST_LOC, _USERD, pGpu->instLocOverrides), + "USERD", + &pUserdInfo->userdAperture, + &pUserdInfo->userdAttr); + + // Create child object KernelSchedMgr + if (kfifoIsSchedSupported(pKernelFifo)) + { + pKernelFifo->pKernelSchedMgr = NULL; + status = objCreate(&pKernelFifo->pKernelSchedMgr, pKernelFifo, KernelSchedMgr); + if (status != NV_OK) + { + pKernelFifo->pKernelSchedMgr = NULL; + return status; + } + kschedmgrConstructPolicy(pKernelFifo->pKernelSchedMgr, pGpu); + } + + return NV_OK; +} + +/** + * @brief Allocate a page for dummy page directory + * + * On GV100, PDB corresponding to subcontexts that are freed + * will point to a dummy page directory instead of setting it to NULL + * Here we allocate a page for this page directory + */ +static NV_STATUS +_kfifoAllocDummyPage +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NV_STATUS status = NV_OK; + NvU32 flags = MEMDESC_FLAGS_NONE; + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + + if (bBcState) + { + flags |= MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE; + } + + // Using instance block attributes to allocate dummy page + status = memdescCreate(&pKernelFifo->pDummyPageMemDesc, pGpu, + RM_PAGE_SIZE, + 0, + NV_FALSE, + ADDR_UNKNOWN, + pKernelFifo->InstAttr, + flags); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not memdescCreate for dummy page\n"); + DBG_BREAKPOINT(); + return status; + } + + status = memdescAllocList(pKernelFifo->pDummyPageMemDesc, pKernelFifo->pInstAllocList); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate dummy page\n"); + DBG_BREAKPOINT(); + memdescDestroy(pKernelFifo->pDummyPageMemDesc); + pKernelFifo->pDummyPageMemDesc = NULL; + } + + return status; +} + +/** + * @brief Free the page used for dummy page directory + */ +static void +_kfifoFreeDummyPage +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + // Free dummy page memdesc + memdescFree(pKernelFifo->pDummyPageMemDesc); + memdescDestroy(pKernelFifo->pDummyPageMemDesc); + pKernelFifo->pDummyPageMemDesc = NULL; +} + +NV_STATUS +kfifoStatePostLoad_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + const PREALLOCATED_USERD_INFO *pUserdInfo = kfifoGetPreallocatedUserdInfo(pKernelFifo); + + if (!(flags & GPU_STATE_FLAGS_PRESERVING)) + { + // Prealloc USERD + NV_ASSERT_OK_OR_RETURN(kfifoPreAllocUserD_HAL(pGpu, pKernelFifo)); + + if (gpumgrIsParentGPU(pGpu)) + { + if (kfifoIsZombieSubctxWarEnabled(pKernelFifo)) + { + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + + status = _kfifoAllocDummyPage(pGpu, pKernelFifo); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to allocate dummy page for zombie subcontexts\n"); + DBG_BREAKPOINT(); + gpumgrSetBcEnabledStatus(pGpu, bBcState); + return status; + } + + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + NV2080_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB_PARAMS params; + MEMORY_DESCRIPTOR *pDummyPageMemDesc = kfifoGetDummyPageMemDesc(pKernelFifo); + + portMemSet(¶ms, 0, sizeof(params)); + + params.base = memdescGetPhysAddr(pDummyPageMemDesc, AT_GPU, 0);; + params.size = pDummyPageMemDesc->Size; + params.addressSpace = memdescGetAddressSpace(pDummyPageMemDesc); + params.cacheAttrib = memdescGetCpuCacheAttrib(pDummyPageMemDesc); + + NV_RM_RPC_CONTROL(pGpu, + pGpu->hDefaultClientShare, + pGpu->hDefaultClientShareSubDevice, + NV2080_CTRL_CMD_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB, + ¶ms, + sizeof(params), + status); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "RM control call to setup zombie subctx failed, status 0x%x\n", status); + DBG_BREAKPOINT(); + return status; + } + } + + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + } + } + + // Since we have successfully setup BAR1 USERD rsvd memory + // lets inform hw (only if the snoop is not disabled.) + kfifoSetupBar1UserdSnoop_HAL(pGpu, pKernelFifo, NV_TRUE, pUserdInfo->userdBar1MapStartOffset); + + if (IS_GSP_CLIENT(pGpu) || IS_VIRTUAL(pGpu)) + { + status = kfifoTriggerPostSchedulingEnableCallback(pGpu, pKernelFifo); + if (status != NV_OK) + return status; + } + + return status; +} + +NV_STATUS +kfifoStatePreUnload_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + NvU32 sliLoopReentrancy; + + NV_PRINTF(LEVEL_INFO, "start\n"); + + if (!(flags & GPU_STATE_FLAGS_PRESERVING) && gpumgrIsParentGPU(pGpu)) + { + NvBool bBcState = NV_FALSE; + + if (kfifoIsZombieSubctxWarEnabled(pKernelFifo)) + { + _kfifoFreeDummyPage(pGpu, pKernelFifo); + } + + // Notify the handlers that the channel will soon be disabled. + status = kfifoTriggerPreSchedulingDisableCallback(pGpu, pKernelFifo); + + // Enable broadcast on SLI + bBcState = gpumgrGetBcEnabledStatus(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + + // As we have forced here SLI broadcast mode, temporarily reset the reentrancy count + sliLoopReentrancy = gpumgrSLILoopReentrancyPop(pGpu); + + // Ask host to stop snooping + kfifoSetupBar1UserdSnoop_HAL(pGpu, pKernelFifo, NV_FALSE, 0); + + // Restore the reentrancy count + gpumgrSLILoopReentrancyPush(pGpu, sliLoopReentrancy); + + // Restore prior broadcast state + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + + if (!(flags & GPU_STATE_FLAGS_PRESERVING)) + { + // Free preallocated userd + kfifoFreePreAllocUserD_HAL(pGpu, pKernelFifo); + } + + return status; +} + +/** + * Returns the default timeslice (in us) for a channelgroup as defined by hardware. + */ +NvU64 +kfifoChannelGroupGetDefaultTimeslice_GM107 +( + KernelFifo *pKernelFifo +) +{ + return NV_RAMRL_ENTRY_TIMESLICE_TIMEOUT_128 << NV_RAMRL_ENTRY_TIMESLICE_SCALE_3; +} + +/*! Get size and alignment requirements for instance memory */ +NV_STATUS +kfifoGetInstMemInfo_GM107 +( + KernelFifo *pKernelFifo, + NvU64 *pSize, + NvU64 *pAlignment, + NvBool *pbInstProtectedMem, + NvU32 *pInstAttr, + const NV_ADDRESS_SPACE **ppInstAllocList +) +{ + NV_ASSERT_OR_RETURN(pSize != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pAlignment != NULL, NV_ERR_INVALID_ARGUMENT); + + *pSize = NV_RAMIN_ALLOC_SIZE; + *pAlignment = 1 << NV_RAMIN_BASE_SHIFT; + + if(pbInstProtectedMem != NULL) + *pbInstProtectedMem = pKernelFifo->bInstProtectedMem; + + if(pInstAttr != NULL) + *pInstAttr = pKernelFifo->InstAttr; + + if(ppInstAllocList != NULL) + *ppInstAllocList = pKernelFifo->pInstAllocList; + + return NV_OK; +} + +/*! Gets instance block size and offset align for instance memory */ +void +kfifoGetInstBlkSizeAlign_GM107 +( + KernelFifo *pKernelFifo, + NvU32 *pSize, + NvU32 *pShift +) +{ + *pSize = NV_RAMIN_ALLOC_SIZE; + *pShift = NV_RAMIN_BASE_SHIFT; + + return; +} + +/*! + * @brief Gets the default runlist id to use for channels allocated with no engines on them. + * + * @param[in] pGpu + * @param[in] pKernelFifo + * @param[in] engineType - Engine type of the channel to retrieve default runlist id for + */ +NvU32 +kfifoGetDefaultRunlist_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 engineType +) +{ + NvU32 runlistId = INVALID_RUNLIST_ID; + ENGDESCRIPTOR engDesc = ENG_GR(0); + + if (NV2080_ENGINE_TYPE_IS_VALID(engineType)) + { + // if translation fails, defualt is ENG_GR(0) + NV_ASSERT_OK( + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, engineType, + ENGINE_INFO_TYPE_ENG_DESC, &engDesc)); + } + + // if translation fails, defualt is INVALID_RUNLIST_ID + NV_ASSERT_OK( + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_ENG_DESC, engDesc, + ENGINE_INFO_TYPE_RUNLIST, &runlistId)); + + return runlistId; +} + +/** + * @brief Programs a channel's runlist id to a given value + * + * Verifies that the requested engine is valid based on the current channel's + * state. Does not bind the channel to the runlist in sw or hw. @ref kfifoRunlistSetId. + * + * @param pGpu + * @param pKernelFifo + * @param[in/out] pKernelChannel + * @param[in] runlistId runlist ID to use + */ +NV_STATUS +kfifoRunlistSetId_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + KernelChannel *pKernelChannel, + NvU32 runlistId +) +{ + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + if ((runlistId != kchannelGetRunlistId(pKernelChannel)) && + kchannelIsRunlistSet(pGpu, pKernelChannel)) + { + NV_PRINTF(LEVEL_ERROR, + "Channel has already been assigned a runlist incompatible with this " + "engine (requested: 0x%x current: 0x%x).\n", runlistId, + kchannelGetRunlistId(pKernelChannel)); + return NV_ERR_INVALID_STATE; + } + + // + // For TSG channel, the RL should support TSG. + // We relax this requirement if the channel is TSG wrapped by RM. + // In that case, RM won't write the TSG header in the RL. + // + if (!kfifoRunlistIsTsgHeaderSupported_HAL(pGpu, pKernelFifo, runlistId) && + (pKernelChannel->pKernelChannelGroupApi != NULL) && + !pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm) + { + NV_PRINTF(LEVEL_ERROR, "Runlist does not support TSGs\n"); + return NV_ERR_INVALID_STATE; + } + + // If you want to set runlistId of channel - first set it on TSG + if (pKernelChannel->pKernelChannelGroupApi != NULL) + { + // Change TSG runlist if channel is the only one + if (pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->chanCount == 1 || + !pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bRunlistAssigned) + { + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->runlistId = runlistId; + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bRunlistAssigned = NV_TRUE; + } + else + { + NV_ASSERT_OR_RETURN(pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->runlistId == + runlistId, + NV_ERR_INVALID_STATE); + } + } + + kchannelSetRunlistId(pKernelChannel, runlistId); + kchannelSetRunlistSet(pGpu, pKernelChannel, NV_TRUE); + return NV_OK; +} + +/** + * @brief Programs a channel's runlist id given the engine tag + * + * Verifies that the requested engine is valid based on the current channel's + * state. Does not bind the channel to the runlist in sw or hw. @ref kfifoRunlistSetIdByEngine. + * + * @param pGpu + * @param pKernelFifo + * @param[in/out] pKernelChannel + * @param[in] engDesc + */ +NV_STATUS +kfifoRunlistSetIdByEngine_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + KernelChannel *pKernelChannel, + NvU32 engDesc +) +{ + NvU32 runlistId; + NV_STATUS status; + NvU32 subctxType = 0; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + kfifoGetSubctxType_HAL(pGpu, pKernelFifo, pKernelChannel, &subctxType); + + if (!kfifoValidateEngineAndRunqueue_HAL(pGpu, pKernelFifo, engDesc, kchannelGetRunqueue(pKernelChannel))) + return NV_ERR_INVALID_ARGUMENT; + + if (!kfifoValidateEngineAndSubctxType_HAL(pGpu, pKernelFifo, engDesc, subctxType)) + return NV_ERR_INVALID_ARGUMENT; + + // + // SW objects can go on any runlist so we defer committing of runlist ID to + // scheduling or another object's allocation. + // + if ((engDesc == ENG_SW) || (engDesc == ENG_BUS)) + return NV_OK; + + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, ENGINE_INFO_TYPE_ENG_DESC, + engDesc, ENGINE_INFO_TYPE_RUNLIST, &runlistId)); + + status = kfifoRunlistSetId_HAL(pGpu, pKernelFifo, pKernelChannel, runlistId); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to program runlist for %s\n", + kfifoGetEngineName_HAL(pKernelFifo, ENGINE_INFO_TYPE_ENG_DESC, engDesc)); + } + + return status; +} + +NV_STATUS +kfifoChannelGetFifoContextMemDesc_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + KernelChannel *pKernelChannel, + FIFO_CTX engineState, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + FIFO_INSTANCE_BLOCK *pInstanceBlock; + + /* UVM calls nvGpuOpsGetChannelInstanceMemInfo + * which calls current function to fetch FIFO_CTX_INST_BLOCK */ + /* Currenltly, UVM supported on SRIOV vGPUs only. */ + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + return NV_OK; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); + + pInstanceBlock = pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + if (pInstanceBlock == NULL) + return NV_ERR_INVALID_STATE; + + switch (engineState) + { + case FIFO_CTX_INST_BLOCK: + *ppMemDesc = pInstanceBlock->pInstanceBlockDesc; + break; + + case FIFO_CTX_RAMFC: + *ppMemDesc = pInstanceBlock->pRamfcDesc; + break; + + default: + NV_PRINTF(LEVEL_ERROR, + "bad engineState 0x%x on engine 0x%x\n", + engineState, ENG_FIFO); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ASSERT(!memdescHasSubDeviceMemDescs(*ppMemDesc)); + + NV_PRINTF(LEVEL_INFO, + "Channel %d engine 0x%x engineState 0x%x *ppMemDesc %p\n", + kchannelGetDebugTag(pKernelChannel), ENG_FIFO, engineState, *ppMemDesc); + + return NV_OK; +} + +/** + * @brief lookup the kernelchannel data associated with a given instance address/target + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFifo KernelFifo pointer + * @param[in] pInst INST_BLOCK_DESC pointer + * @param[out] ppKernelChannel KernelChannel ptr + */ +NV_STATUS +kfifoConvertInstToKernelChannel_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + INST_BLOCK_DESC *pInst, + KernelChannel **ppKernelChannel +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelChannel *pKernelChannel = NULL; + FIFO_INSTANCE_BLOCK *pInstanceBlock; + MEMORY_DESCRIPTOR instMemDesc; + NV_ADDRESS_SPACE instAperture; + CHANNEL_ITERATOR chanIt; + + NV_ASSERT_OR_RETURN(pInst != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(ppKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + *ppKernelChannel = NULL; + + switch (pInst->aperture) + { + case INST_BLOCK_APERTURE_SYSTEM_COHERENT_MEMORY: + case INST_BLOCK_APERTURE_SYSTEM_NON_COHERENT_MEMORY: + instAperture = ADDR_SYSMEM; + break; + case INST_BLOCK_APERTURE_VIDEO_MEMORY: + instAperture = ADDR_FBMEM; + break; + default: + NV_PRINTF(LEVEL_ERROR, "unknown inst target 0x%x\n", pInst->aperture); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ADDRESS; + } + + // + // The MMU_PTE version of aperture is what the HW should always + // report for an instance block. Compare the SW defines against + // these values here. + // + VERIFY_INST_BLOCK_APERTURE(NV_MMU_PTE_APERTURE_VIDEO_MEMORY, + NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY, + NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY); + + memdescCreateExisting(&instMemDesc, pGpu, NV_RAMIN_ALLOC_SIZE, + ADDR_UNKNOWN, NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE); + + memdescDescribe(&instMemDesc, instAperture, pInst->address, NV_RAMIN_ALLOC_SIZE); + + kfifoGetChannelIterator(pGpu, pKernelFifo, &chanIt); + while (kfifoGetNextKernelChannel(pGpu, pKernelFifo, &chanIt, &pKernelChannel) == NV_OK) + { + NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue); + + pInstanceBlock = pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + + if (pInstanceBlock != NULL && + pInstanceBlock->pInstanceBlockDesc != NULL && + kchannelGetGfid(pKernelChannel) == pInst->gfid && + memmgrComparePhysicalAddresses_HAL(pGpu, pMemoryManager, + kgmmuGetHwPteApertureFromMemdesc(GPU_GET_KERNEL_GMMU(pGpu), + pInstanceBlock->pInstanceBlockDesc), + memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, + AT_GPU, 0), + kgmmuGetHwPteApertureFromMemdesc(GPU_GET_KERNEL_GMMU(pGpu), + &instMemDesc), + memdescGetPhysAddr(&instMemDesc, AT_GPU, 0))) + { + *ppKernelChannel = pKernelChannel; + memdescDestroy(&instMemDesc); + return NV_OK; + } + } + + NV_PRINTF(LEVEL_INFO, + "No channel found for instance 0x%016llx (target 0x%x)\n", + memdescGetPhysAddr(&instMemDesc, AT_GPU, 0), + kgmmuGetHwPteApertureFromMemdesc(GPU_GET_KERNEL_GMMU(pGpu), &instMemDesc)); + memdescDestroy(&instMemDesc); + + return NV_ERR_INVALID_CHANNEL; +} + +/** + * @brief Translates between 2 engine values + * + * To iterate through a value for all engines call with inType of + * ENGINE_INFO_TYPE_INVALID for 0 through fifoGetNumEngines(). + * + * @param pGpu + * @param pKernelFifo + * @param[in] inType ENGINE_INFO_TYPE_* + * @param[in] inVal + * @param[in] outType ENGINE_INFO_TYPE_* + * @param[out] pOutVal + */ +NV_STATUS +kfifoEngineInfoXlate_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + ENGINE_INFO_TYPE inType, + NvU32 inVal, + ENGINE_INFO_TYPE outType, + NvU32 *pOutVal +) +{ + const ENGINE_INFO *pEngineInfo = kfifoGetEngineInfo(pKernelFifo); + NvU32 i; + + NV_ASSERT_OR_RETURN(pOutVal, NV_ERR_INVALID_ARGUMENT); + + // PBDMA_ID can only be inType + NV_ASSERT_OR_RETURN(outType != ENGINE_INFO_TYPE_PBDMA_ID, + NV_ERR_INVALID_ARGUMENT); + + if (pEngineInfo == NULL) + { + NV_ASSERT_OK_OR_RETURN(kfifoConstructEngineList_HAL(pGpu, pKernelFifo)); + + pEngineInfo = kfifoGetEngineInfo(pKernelFifo); + NV_ASSERT_OR_RETURN(pEngineInfo != NULL, NV_ERR_INVALID_STATE); + } + + if (inType == ENGINE_INFO_TYPE_INVALID) + { + NV_ASSERT_OR_RETURN(inVal < pEngineInfo->engineInfoListSize, + NV_ERR_INVALID_ARGUMENT); + *pOutVal = pEngineInfo->engineInfoList[inVal].engineData[outType]; + return NV_OK; + } + + for (i = 0; i < pEngineInfo->engineInfoListSize; ++i) + { + FIFO_ENGINE_LIST *pFifoEngineList = &pEngineInfo->engineInfoList[i]; + NvBool bFound = NV_FALSE; + + if (inType == ENGINE_INFO_TYPE_PBDMA_ID) + { + NvU32 j; + for (j = 0; j < pFifoEngineList->numPbdmas; ++j) + { + if (pFifoEngineList->pbdmaIds[j] == inVal) + { + bFound = NV_TRUE; + break; + } + } + } + else if (pFifoEngineList->engineData[inType] == inVal) + { + bFound = NV_TRUE; + } + + if (bFound) + { + *pOutVal = pFifoEngineList->engineData[outType]; + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +/** + * @brief Get the local maximum number of subctx allowed in this TSG + */ +NvU32 +kfifoChannelGroupGetLocalMaxSubcontext_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + KernelChannelGroup *pKernelChannelGroup, + NvBool bLegacyMode +) +{ + // Pre-AMPERE, each channel group has the global maximum available + return kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, bLegacyMode); +} + +void +kfifoSetupUserD_GM107 +( + KernelFifo *pKernelFifo, + NvU8 *pUserD +) +{ + NV_ASSERT_OR_RETURN_VOID(pUserD != NULL); + + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_PUT ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_GET ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_REF ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_PUT_HI ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_REF_THRESHOLD ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_GP_TOP_LEVEL_GET ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_GP_TOP_LEVEL_GET_HI ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_GET_HI ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_GP_GET ), 0 ); + MEM_WR32( pUserD + SF_OFFSET( NV_RAMUSERD_GP_PUT ), 0 ); +} +/** + * @brief return number of HW engines + * + * Can be used to loop over all engines in the system by looping from 0 + * through the value returned by this function and then using + * kfifoEngineInfoXlate() with an input type of ENGINE_INFO_TYPE_INVALID. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFifo KernelFifo pointer + * + * @returns number of HW engines present on chip. + */ +NvU32 +kfifoGetNumEngines_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + const ENGINE_INFO *pEngineInfo = kfifoGetEngineInfo(pKernelFifo); + + if (pEngineInfo == NULL) + { + NV_ASSERT_OR_RETURN(kfifoConstructEngineList_HAL(pGpu, pKernelFifo) == NV_OK, 0); + + pEngineInfo = kfifoGetEngineInfo(pKernelFifo); + NV_ASSERT_OR_RETURN(pEngineInfo != NULL, 0); + } + + NV_ASSERT(pEngineInfo->engineInfoListSize); + + // we don't count the SW engine entry at the end of the list + return pEngineInfo->engineInfoListSize-1; +} + +/** + * @brief Retrieves the name of the engine corresponding to the given @ref ENGINE_INFO_TYPE + * + * @param pKernelFifo + * @param[in] inType + * @param[in] inVal + * + * @returns a string + */ +const char * +kfifoGetEngineName_GM107 +( + KernelFifo *pKernelFifo, + ENGINE_INFO_TYPE inType, + NvU32 inVal +) +{ + const ENGINE_INFO *pEngineInfo = kfifoGetEngineInfo(pKernelFifo); + NvU32 i; + + if (inType == ENGINE_INFO_TYPE_INVALID) + { + NV_ASSERT_OR_RETURN (inVal < pEngineInfo->engineInfoListSize, NULL); + return pEngineInfo->engineInfoList[inVal].engineName; + } + for (i = 0; i < pEngineInfo->engineInfoListSize; ++i) + { + if (pEngineInfo->engineInfoList[i].engineData[inType] == inVal) + { + return pEngineInfo->engineInfoList[i].engineName; + } + } + + return "UNKNOWN"; +} + +/** + * @brief Returns the maximum possible number of runlists. + * + * Returns a number which represents the limit of any runlistId indexed + * registers in hardware. Does not necessarily return how many runlists are + * active. In the range of 0..kfifoGetMaxNumRunlists() there may be runlists + * that are not used. + * + * @param pGpu + * @param pKernelFifo + */ +NvU32 +kfifoGetMaxNumRunlists_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + const ENGINE_INFO *pEngineInfo = kfifoGetEngineInfo(pKernelFifo); + + // We use bit-masks of these values + NV_ASSERT(pEngineInfo->maxNumRunlists <= 32); + + return pEngineInfo->maxNumRunlists; +} + +NV_STATUS +kfifoGetEnginePbdmaIds_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + ENGINE_INFO_TYPE type, + NvU32 val, + NvU32 **ppPbdmaIds, + NvU32 *pNumPbdmas +) +{ + const ENGINE_INFO *pEngineInfo = kfifoGetEngineInfo(pKernelFifo); + NvU32 i; + + if (pEngineInfo == NULL) + { + NV_ASSERT_OK_OR_RETURN(kfifoConstructEngineList_HAL(pGpu, pKernelFifo)); + + pEngineInfo = kfifoGetEngineInfo(pKernelFifo); + NV_ASSERT_OR_RETURN(pEngineInfo != NULL, NV_ERR_INVALID_STATE); + } + + if (type == ENGINE_INFO_TYPE_INVALID) + { + NV_ASSERT_OR_RETURN(val < pEngineInfo->engineInfoListSize, NV_ERR_INVALID_ARGUMENT); + *ppPbdmaIds = pEngineInfo->engineInfoList[val].pbdmaIds; + *pNumPbdmas = pEngineInfo->engineInfoList[val].numPbdmas; + return NV_OK; + } + + for (i = 0; i < pEngineInfo->engineInfoListSize; i++) + { + if (pEngineInfo->engineInfoList[i].engineData[type] == val) + { + *ppPbdmaIds = pEngineInfo->engineInfoList[i].pbdmaIds; + *pNumPbdmas = pEngineInfo->engineInfoList[i].numPbdmas; + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +/** + * @brief finds all engines on the same pbdma as the input + * + * pPartnerListParams->partnershipClassId is currently ignored. + * + * @param pGpu + * @param pKernelFifo + * @param[in/out] pPartnerListParams engineType is input, partnerList/numPartners are ouput + * + * @returns NV_OK if successful, error otherwise + */ +NV_STATUS +kfifoGetEnginePartnerList_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams +) +{ + const NvU32 numEngines = kfifoGetNumEngines_HAL(pGpu, pKernelFifo); + NvU32 i; + NvU32 srcRunlist; + NvU32 runlist; + NvU32 nv2080type; + NvU32 *pSrcPbdmaIds; + NvU32 numSrcPbdmaIds; + NvU32 srcPbdmaId; + NvU32 *pPbdmaIds; + NvU32 numPbdmaIds; + NvU32 numClasses = 0; + ENGDESCRIPTOR engDesc; + + if (pPartnerListParams->runqueue >= kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo)) + return NV_ERR_INVALID_ARGUMENT; + + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, + pPartnerListParams->engineType, + ENGINE_INFO_TYPE_RUNLIST, + &srcRunlist)); + + NV_ASSERT_OK_OR_RETURN(kfifoGetEnginePbdmaIds_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, + pPartnerListParams->engineType, + &pSrcPbdmaIds, + &numSrcPbdmaIds)); + + pPartnerListParams->numPartners = 0; + + // Get the PBDMA ID for the runqueue-th runqueue + if (pPartnerListParams->runqueue >= numSrcPbdmaIds) + { + return NV_ERR_INVALID_ARGUMENT; + } + srcPbdmaId = pSrcPbdmaIds[pPartnerListParams->runqueue]; + + // + // Find all engines sharing a runlist with the input engine, add each to + // the output array. + // + for (i = 0; i < numEngines; i++) + { + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_INVALID, i, + ENGINE_INFO_TYPE_ENG_DESC, &engDesc)); + + NV_ASSERT_OK_OR_RETURN(gpuGetClassList(pGpu, &numClasses, NULL, engDesc)); + if (numClasses == 0) + { + NV_PRINTF(LEVEL_INFO, + "EngineID %x is not part classDB, skipping\n", + engDesc); + continue; + } + + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_INVALID, i, + ENGINE_INFO_TYPE_RUNLIST, &runlist)); + + if (runlist == srcRunlist) + { + NvU32 j; + + NV_ASSERT_OK_OR_RETURN(kfifoGetEnginePbdmaIds_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_INVALID, i, + &pPbdmaIds, &numPbdmaIds)); + + for (j = 0; j < numPbdmaIds; j++) + { + if (pPbdmaIds[j] == srcPbdmaId) + { + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_INVALID, i, + ENGINE_INFO_TYPE_NV2080, &nv2080type)); + + // Don't include input in output list + if (nv2080type != pPartnerListParams->engineType) + { + pPartnerListParams->partnerList[pPartnerListParams->numPartners++] = nv2080type; + + if (pPartnerListParams->numPartners >= NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS) + return NV_ERR_INVALID_ARGUMENT; + } + } + } + } + } + + return NV_OK; +} + +/** + * @brief Check if the runlist has TSG support + * + * Currently, we only enable the TSG runlist for GR + * + * @return NV_TRUE if TSG is supported, NV_FALSE if not + */ +NvBool +kfifoRunlistIsTsgHeaderSupported_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 runlistId +) +{ + NvU32 tmp_runlist; + + if (kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, ENGINE_INFO_TYPE_ENG_DESC, + ENG_GR(0), ENGINE_INFO_TYPE_RUNLIST, &tmp_runlist) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "can't find runlist ID for engine ENG_GR(0)!\n"); + NV_ASSERT(0); + return NV_FALSE; + } + + return tmp_runlist == runlistId; +} + +/** + * @brief Get the runlist entry size + * + * @param pKernelFifo + * + * @return size in bytes + */ +NvU32 +kfifoRunlistGetEntrySize_GM107 +( + KernelFifo *pKernelFifo +) +{ + return NV_RAMRL_ENTRY_SIZE; +} + +/** + * @brief Get the runlist base shift amount + * + * @param pKernelFifo + * + * @return shift amount + */ +NvU32 +kfifoRunlistGetBaseShift_GM107 +( + KernelFifo *pKernelFifo +) +{ + return NV_RAMRL_BASE_SHIFT; +} + +/** + * @brief Pre-allocate BAR1 userd space + * + * @param pGpu + * @param pKernelFifo + * + * @returns NV_STATUS + */ +NV_STATUS +kfifoPreAllocUserD_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + OBJGPU *pParentGpu = gpumgrGetParentGPU(pGpu); + KernelFifo *pParentKernelFifo = GPU_GET_KERNEL_FIFO(pParentGpu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvBool bCoherentCpuMapping = NV_FALSE; + NV_STATUS status = NV_OK; + NvU64 temp = 0; + NvU32 userdSize; + NvU32 userdShift; + NvU32 numChannels; + NvBool bFifoFirstInit; + NvU32 flags = MEMDESC_FLAGS_NONE; + NvU32 mapFlags = BUS_MAP_FB_FLAGS_MAP_DOWNWARDS | + BUS_MAP_FB_FLAGS_MAP_UNICAST; + NvU32 currentGpuInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, 0); + + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + PREALLOCATED_USERD_INFO *pUserdInfo = &pParentKernelFifo->userdInfo; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + // We don't support RM allocated USERD for vGPU guest with SRIOV + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + return NV_OK; + } + + bCoherentCpuMapping = pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING); + + if (pUserdInfo->userdBar1CpuPtr == NULL) + { + bFifoFirstInit = NV_TRUE; + } + else + { + mapFlags |= BUS_MAP_FB_FLAGS_MAP_OFFSET_FIXED; + bFifoFirstInit = NV_FALSE; + } + + // + // Allocate the physical memory associated with the UserD if this is + // the first GPU to init fifo. This relies on the assumption that + // UserD is shared physmem. + // + if (bFifoFirstInit) + { + pUserdInfo->userdBar1MapStartOffset = 0; + pUserdInfo->userdBar1MapSize = 0; + + // This is a WAR for HW bug 600241 + if (pUserdInfo->userdAperture == ADDR_SYSMEM) + { + pKernelFifo->bUserdInSystemMemory = NV_TRUE; + } + } + + kfifoGetUserdSizeAlign_HAL(pKernelFifo, &userdSize, &userdShift); + + numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + + // Alloc USERD of size numChannels * sizeof( USERD ) for each gpu + status = memdescCreate(&pUserdInfo->userdPhysDesc[currentGpuInst], pGpu, + userdSize * numChannels, + 1ULL << userdShift, + NV_TRUE, + pUserdInfo->userdAperture, + pUserdInfo->userdAttr, + flags); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not memdescCreate for USERD for %x #channels\n", + numChannels); + DBG_BREAKPOINT(); + goto fail; + } + temp = pUserdInfo->userdPhysDesc[currentGpuInst]->Size; + + // + // For vGPU, do not allocate USERD memory in guest. + // vGPU does all HW management in host, so host RM will + // allocate the real USERD memory. + // + if (IS_VIRTUAL(pGpu)) + { + // Force page size to 4KB to match host phys access + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, + pUserdInfo->userdPhysDesc[currentGpuInst], + AT_GPU, RM_ATTR_PAGE_SIZE_4KB); + if (bFifoFirstInit) + { + pUserdInfo->userdBar1MapStartOffset = kfifoGetUserdBar1MapStartOffset_HAL(pGpu, pKernelFifo); + } + } + else + { + status = memdescAlloc(pUserdInfo->userdPhysDesc[currentGpuInst]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not allocate USERD for %x #channels\n", + numChannels); + DBG_BREAKPOINT(); + goto fail; + } + + // Force page size to 4KB in broadcast to match host phys access + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, pUserdInfo->userdPhysDesc[currentGpuInst], + AT_GPU, RM_ATTR_PAGE_SIZE_4KB); + + // + // If coherent link is available, just get a coherent mapping to USERD and + // lie about the BAR1 offset, since we are not using BAR1 + // TODO: Make these bar1 offsets unicast on each gpu as well + // + if (bCoherentCpuMapping && + (memdescGetAddressSpace(pUserdInfo->userdPhysDesc[currentGpuInst]) == ADDR_FBMEM)) + { + + NV_PRINTF(LEVEL_INFO, "Mapping USERD with coherent link.\n"); + NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)); + NV_ASSERT(pUserdInfo->userdPhysDesc[currentGpuInst]->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + + if (bFifoFirstInit) + { + pUserdInfo->userdBar1MapStartOffset = pUserdInfo->userdPhysDesc[currentGpuInst]->_pteArray[0] + + pUserdInfo->userdPhysDesc[currentGpuInst]->PteAdjust; + } + } + else + { + // vGpu may boot with partitioning enabled but that's not true for host RM + if ((pKernelMIGManager != NULL) && kmigmgrIsMIGMemPartitioningEnabled(pGpu, pKernelMIGManager)) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, "Pre-allocated USERD is not supported with MIG\n"); + DBG_BREAKPOINT(); + goto fail; + } + // Now BAR1 map it + status = kbusMapFbAperture_HAL(pGpu, pKernelBus, pUserdInfo->userdPhysDesc[currentGpuInst], 0, + &pUserdInfo->userdBar1MapStartOffset, + &temp, mapFlags | BUS_MAP_FB_FLAGS_PRE_INIT, NV01_NULL_OBJECT); + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not map USERD to BAR1\n"); + DBG_BREAKPOINT(); + goto fail; + } + + // Add current GPU to list of GPUs referencing pFifo userD bar1 + pUserdInfo->userdBar1RefMask |= NVBIT(pGpu->gpuInstance); + } + + if (bFifoFirstInit) + { + pUserdInfo->userdBar1MapSize = NvU64_LO32(temp); + + if (bCoherentCpuMapping && + (memdescGetAddressSpace(pUserdInfo->userdPhysDesc[currentGpuInst]) == ADDR_FBMEM)) + { + pUserdInfo->userdBar1CpuPtr = kbusMapCoherentCpuMapping_HAL(pGpu, pKernelBus, + pUserdInfo->userdPhysDesc[currentGpuInst]); + status = pUserdInfo->userdBar1CpuPtr == NULL ? NV_ERR_GENERIC : NV_OK; + } + else + { + // Cpu map the BAR1 snoop range + status = osMapPciMemoryKernelOld(pGpu, gpumgrGetGpuPhysFbAddr(pGpu) + + pUserdInfo->userdBar1MapStartOffset, + pUserdInfo->userdBar1MapSize, + NV_PROTECT_READ_WRITE, + (void**)&pUserdInfo->userdBar1CpuPtr, + NV_MEMORY_UNCACHED); + } + + if ((pUserdInfo->userdBar1CpuPtr == NULL) && (status != NV_OK)) + { + NV_PRINTF(LEVEL_ERROR, "Could not cpu map BAR1 snoop range\n"); + DBG_BREAKPOINT(); + goto fail; + } + } + + NV_PRINTF(LEVEL_INFO, + "USERD Preallocated phys @ 0x%llx bar1 offset @ 0x%llx of size 0x%x\n", + memdescGetPhysAddr(pUserdInfo->userdPhysDesc[currentGpuInst], AT_GPU, 0), + pUserdInfo->userdBar1MapStartOffset, + pUserdInfo->userdBar1MapSize); + + return status; + +fail: + kfifoFreePreAllocUserD_HAL(pGpu, pKernelFifo); + + return status; +} + +/** + * @brief Free the pre-allocated BAR1 userd space + * + * @param pGpu + * @param pKernelFifo + * + * @returns NV_STATUS + */ +void +kfifoFreePreAllocUserD_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + OBJGPU *pParentGpu = gpumgrGetParentGPU(pGpu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 currentGpuInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + KernelFifo *pParentKernelFifo = GPU_GET_KERNEL_FIFO(pParentGpu); + PREALLOCATED_USERD_INFO *pUserdInfo = &pParentKernelFifo->userdInfo; + NvBool bCoherentCpuMapping = pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) && + (memdescGetAddressSpace(pUserdInfo->userdPhysDesc[currentGpuInst]) == ADDR_FBMEM); + + // We don't support RM allocated USERD for vGPU guest with SRIOV + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + return; + } + + if (gpumgrGetBcEnabledStatus(pGpu)) + { + DBG_BREAKPOINT(); + } + + if (bCoherentCpuMapping) + { + NV_PRINTF(LEVEL_INFO, "Unmapping USERD from NVLINK.\n"); + NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)); + } + + if (pUserdInfo->userdBar1CpuPtr) + { + if (bCoherentCpuMapping) + { + kbusUnmapCoherentCpuMapping_HAL(pGpu, pKernelBus, + pUserdInfo->userdPhysDesc[currentGpuInst]); + } + else + { + osUnmapPciMemoryKernelOld(pGpu, pUserdInfo->userdBar1CpuPtr); + } + + pUserdInfo->userdBar1CpuPtr = NULL; + } + + if (pUserdInfo->userdBar1MapSize) + { + if ((!IS_VIRTUAL(pGpu)) && (!bCoherentCpuMapping)) + { + if ((pUserdInfo->userdBar1RefMask & NVBIT(pGpu->gpuInstance)) != 0) + { + // + // Unmap in UC for each GPU with a pKernelFifo userd + // reference mapped through bar1 + // + kbusUnmapFbAperture_HAL(pGpu, pKernelBus, + pUserdInfo->userdPhysDesc[currentGpuInst], + pUserdInfo->userdBar1MapStartOffset, + pUserdInfo->userdBar1MapSize, + BUS_MAP_FB_FLAGS_MAP_UNICAST | BUS_MAP_FB_FLAGS_PRE_INIT); + pUserdInfo->userdBar1RefMask &= (~NVBIT(pGpu->gpuInstance)); + } + + } + } + + // Unallocated memdescFrees are allowed. + memdescFree(pUserdInfo->userdPhysDesc[currentGpuInst]); + memdescDestroy(pUserdInfo->userdPhysDesc[currentGpuInst]); + pUserdInfo->userdPhysDesc[currentGpuInst] = NULL; + NV_PRINTF(LEVEL_INFO, "Freeing preallocated USERD phys and bar1 range\n"); +} + +// +// Returns the BAR1 offset and size of the entire USERD mapping. +// +NV_STATUS +kfifoGetUserdBar1MapInfo_GM107 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU64 *pBar1MapOffset, + NvU32 *pBar1MapSize +) +{ + const PREALLOCATED_USERD_INFO *pUserdInfo = kfifoGetPreallocatedUserdInfo(pKernelFifo); + + // We don't support RM allocated USERD in vGPU guest with SRIOV + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + *pBar1MapOffset = 0; + *pBar1MapSize = 0; + + return NV_OK; + } + + if (pUserdInfo->userdBar1MapSize == 0 ) + { + NV_PRINTF(LEVEL_ERROR, "BAR1 map of USERD has not been setup yet\n"); + NV_ASSERT( 0 ); + return NV_ERR_GENERIC; + } + + *pBar1MapOffset = pUserdInfo->userdBar1MapStartOffset; + *pBar1MapSize = pUserdInfo->userdBar1MapSize; + + return NV_OK; +} + +/** + * @brief Determines the aperture and attribute of memory where userd is located. + * + * @param pKernelFifo[in] + * @param pUserdAperture[out] + * @param pUserdAttribute[out] + * + * @returns NV_STATUS + */ +NV_STATUS +kfifoGetUserdLocation_GM107 +( + KernelFifo *pKernelFifo, + NvU32 *pUserdAperture, + NvU32 *pUserdAttribute +) +{ + const PREALLOCATED_USERD_INFO *pUserdInfo = kfifoGetPreallocatedUserdInfo(pKernelFifo); + + NV_ASSERT_OR_RETURN(pUserdAperture != NULL && pUserdAttribute != NULL, + NV_ERR_INVALID_POINTER); + + *pUserdAperture = pUserdInfo->userdAperture; + *pUserdAttribute = pUserdInfo->userdAttr; + + return NV_OK; +} + +/** + * @brief Returns size/address shift for USERD's BAR1 mapping + * + * @param pKernelFifo + * @param[out] pSize populated with USERD size if non-null + * @param[out] pAddrShift populated with USERD address shift if non-null + */ +void +kfifoGetUserdSizeAlign_GM107 +( + KernelFifo *pKernelFifo, + NvU32 *pSize, + NvU32 *pAddrShift +) +{ + if (pSize != NULL) + *pSize = 1<ChID; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + // + // In case of vGPU with SR-IOV, host RM is currently generating token using + // virtual chid that was allocated inside the guest. This needs to change + // once the guest starts managing its own channels. The guest would then + // generate its own tokens. + // + if (!bUsedForHost && IS_GFID_VF(gfid)) + { + NvU32 vChId; + + NV_ASSERT_OK_OR_RETURN(kfifoGetVChIdForSChId_HAL(pGpu, pKernelFifo, + chId, gfid, + kchannelGetEngineType(pKernelChannel), + &vChId)); + chId = vChId; + } + + if (!kchannelIsRunlistSet(pGpu, pKernelChannel)) + { + NV_PRINTF(LEVEL_ERROR, + "FAILED Channel 0x%x is not assigned to runlist yet\n", + kchannelGetDebugTag(pKernelChannel)); + return NV_ERR_INVALID_STATE; + } + + // Here we construct token to be a concatenation of runlist id and channel id + val = FLD_SET_DRF_NUM(_CTRL, _VF_DOORBELL, _RUNLIST_ID, kchannelGetRunlistId(pKernelChannel), val); + val = FLD_SET_DRF_NUM(_CTRL, _VF_DOORBELL, _VECTOR, chId, val); + *pGeneratedToken = val; + + NV_PRINTF(LEVEL_INFO, + "Generated workSubmitToken 0x%x for channel 0x%x runlist 0x%x\n", + *pGeneratedToken, chId, kchannelGetRunlistId(pKernelChannel)); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/arch/volta/kernel_channel_group_gv100.c b/src/nvidia/src/kernel/gpu/fifo/arch/volta/kernel_channel_group_gv100.c new file mode 100644 index 000000000..0f2836df2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/arch/volta/kernel_channel_group_gv100.c @@ -0,0 +1,321 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/virtualization/hypervisor/hypervisor.h" + +#include "nvRmReg.h" + +#include "gpu/bus/kern_bus.h" + +/*! + * @brief Allocate buffer to save/restore faulting engine methods + */ +NV_STATUS +kchangrpAllocFaultMethodBuffers_GV100 +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup +) +{ + NV_STATUS status = NV_OK; + NvU32 bufSizeInBytes = 0; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 runQueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); + NvU32 index = 0; + NvU32 faultBufApert = ADDR_SYSMEM; + NvU32 faultBufAttr = NV_MEMORY_CACHED; + NvU8 *pRmMapAddr = NULL; + NvU64 memDescFlags = MEMDESC_FLAGS_LOST_ON_SUSPEND; + HW_ENG_FAULT_METHOD_BUFFER *pFaultMthdBuf = NULL; + NvU32 gfid = pKernelChannelGroup->gfid; + + // + // Allocate method buffer if applicable + // For SR-IOV, Guest RM allocates the mthd buffers, no work done by host-RM + // For SR-IOV HEAVY and legacy vGpu, mthd buffers allocated by host RM, + // For GSP config, method buffer allocation is done by CPU-RM + // + // Skip method buffer allocation for the rest + // + if ((IS_GFID_VF(gfid) && !IS_SRIOV_HEAVY(pGpu)) || // SRIOV guest on Host + RMCFG_FEATURE_PLATFORM_GSP || // GSP-RM + IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || // legacy vgpu + IS_SRIOV_HEAVY_GUEST(pGpu)) // SRIOV-heavy guest + { + return NV_OK; + } + + // Pre-reqs + NV_ASSERT_OR_RETURN((pKernelChannelGroup->pMthdBuffers != NULL), NV_ERR_INVALID_STATE); + + // Calculate size of buffer + NV_ASSERT_OK_OR_RETURN(gpuGetCeFaultMethodBufferSize(pGpu, &bufSizeInBytes)); + NV_ASSERT((bufSizeInBytes > 0)); + + if (IS_SRIOV_HEAVY_HOST(pGpu)) + { + // + // In case of SRIOV heavy mode host RM is allocating fault method buffers + // on behalf of guest. As VF channels cannot use sysmem allocated in the + // host, force fault buffer aperture to vid mem. + // + faultBufApert = ADDR_FBMEM; + faultBufAttr = NV_MEMORY_CACHED; + memDescFlags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + else + { + // Get the right aperture/attribute + faultBufApert = ADDR_SYSMEM; + faultBufAttr = NV_MEMORY_CACHED; + memdescOverrideInstLoc(DRF_VAL(_REG_STR_RM, _INST_LOC_3, _FAULT_METHOD_BUFFER, pGpu->instLocOverrides3), + "fault method buffer", &faultBufApert, &faultBufAttr); + if (faultBufApert == ADDR_FBMEM) + memDescFlags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + + // Allocate buffer for each runqueue + for (index = 0; index < runQueues; index++) + { + pFaultMthdBuf = &(pKernelChannelGroup->pMthdBuffers[index]); + + // Allocate and initialize MEMDESC + status = memdescCreate(&(pFaultMthdBuf->pMemDesc), pGpu, bufSizeInBytes, 0, + NV_FALSE, faultBufApert, faultBufAttr, memDescFlags); + if (status != NV_OK) + { + DBG_BREAKPOINT(); + goto fail; + } + + status = memdescAlloc(pFaultMthdBuf->pMemDesc); + if (status != NV_OK) + { + DBG_BREAKPOINT(); + memdescDestroy(pFaultMthdBuf->pMemDesc); + pFaultMthdBuf->pMemDesc = NULL; + goto fail; + } + + // Map the buffer to RM + pRmMapAddr = kbusMapRmAperture_HAL(pGpu, pFaultMthdBuf->pMemDesc); + if (!pRmMapAddr) + { + status = NV_ERR_INVALID_ADDRESS; + goto fail; + } + + // Memset to 0 + portMemSet(pRmMapAddr, 0, bufSizeInBytes); + + // Unmap the buffer from RM + kbusUnmapRmAperture_HAL(pGpu, pFaultMthdBuf->pMemDesc, &(pRmMapAddr), + NV_TRUE); + pRmMapAddr = NULL; + + pFaultMthdBuf->bar2Addr = 0; + } + + return status; + +fail: + kchangrpFreeFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup); + return status; +} + +/*! + * @brief Free method buffers + */ +NV_STATUS +kchangrpFreeFaultMethodBuffers_GV100 +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup +) +{ + NV_STATUS status = NV_OK; + HW_ENG_FAULT_METHOD_BUFFER *pFaultMthdBuf = NULL; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 runQueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); + NvU32 index = 0; + NvU32 gfid = pKernelChannelGroup->gfid; + + + // + // Free the method buffer if applicable + // For SR-IOV, Guest RM allocates the mthd buffers but later RPCs into the + // host, and populates the data structure, but it should be free-d only by + // guest RM. + // For SR-IOV HEAVY and legacy vGpu, mthd buffers should be free-d by host RM, + // For GSP config, we need to free the method buffer in GSP-RM + // + // Skip free for the rest + // + if ((IS_GFID_VF(gfid) && !IS_SRIOV_HEAVY(pGpu)) || // SRIOV guest on Host + IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || // legacy vgpu + IS_SRIOV_HEAVY_GUEST(pGpu)) // SRIOV-heavy guest + { + return NV_OK; + } + + NV_ASSERT_OR_RETURN((pKernelChannelGroup->pMthdBuffers != NULL), NV_ERR_INVALID_STATE); + + // Free method buffer memdesc if allocated + for (index = 0; index < runQueues; index++) + { + pFaultMthdBuf = &((pKernelChannelGroup->pMthdBuffers)[index]); + if ((pFaultMthdBuf != NULL) && (pFaultMthdBuf->pMemDesc != NULL)) + { + // Free the memory + memdescFree(pFaultMthdBuf->pMemDesc); + memdescDestroy(pFaultMthdBuf->pMemDesc); + pFaultMthdBuf->pMemDesc = NULL; + } + } + + return status; +} + +/*! + * @brief Map method buffer to invisible BAR2 region + */ +NV_STATUS +kchangrpMapFaultMethodBuffers_GV100 +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup, + NvU32 runqueue +) +{ + NvU32 gfid = pKernelChannelGroup->gfid; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 runQueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + HW_ENG_FAULT_METHOD_BUFFER *pFaultMthdBuf = NULL; + + // + // Map method buffer to invisible BAR2 if applicable + // For SR-IOV, Guest RM maps the mthd buffers, no work done by host-RM + // For SR-IOV HEAVY and legacy vGpu, mthd buffers mapped to BAR2 by host RM, + // For GSP config, method buffer BAR2 mapping is done by GSP-RM + // + // Skip method buffer allocation for the rest + // + if ((IS_GFID_VF(gfid) && !IS_SRIOV_HEAVY(pGpu)) || // SRIOV guest on Host + IS_GSP_CLIENT(pGpu) || // CPU-RM + IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || // legacy vgpu + IS_SRIOV_HEAVY_GUEST(pGpu)) // SRIOV-heavy guest + { + return NV_OK; + } + + // Pre-reqs + NV_ASSERT_OR_RETURN((pKernelChannelGroup->pMthdBuffers != NULL), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN((runqueue < runQueues), NV_ERR_INVALID_STATE); + + // Get method buffer handle + pFaultMthdBuf = &(pKernelChannelGroup->pMthdBuffers[runqueue]); + + // Skip mapping if memdesc not allocated or if bar2 mapping has already been done + if ((pFaultMthdBuf->pMemDesc == NULL) || (pFaultMthdBuf->bar2Addr != 0)) + { + return NV_OK; + } + + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, pFaultMthdBuf->pMemDesc, + AT_GPU, RM_ATTR_PAGE_SIZE_4KB); + + // Map the buffer to BAR2 invisible region + NV_ASSERT_OK_OR_RETURN(kbusMapCpuInvisibleBar2Aperture_HAL(pGpu, pKernelBus, + pFaultMthdBuf->pMemDesc, + &(pFaultMthdBuf->bar2Addr), + pFaultMthdBuf->pMemDesc->Size, + 0, + gfid)); + + NV_PRINTF(LEVEL_INFO, + "Allocating Method buffer with Bar2Addr LO 0x%08x Bar2Addr " + "HI 0x%08x runqueue 0x%0x\n", + NvU64_LO32(pFaultMthdBuf->bar2Addr), + NvU64_HI32(pFaultMthdBuf->bar2Addr), runqueue); + + return NV_OK; +} + +/*! + * @brief Unmap method buffers from CPU invisible BAR2 + */ +NV_STATUS +kchangrpUnmapFaultMethodBuffers_GV100 +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup, + NvU32 runqueue +) +{ + NvU32 gfid = pKernelChannelGroup->gfid; + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 runQueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); + HW_ENG_FAULT_METHOD_BUFFER *pFaultMthdBuf = NULL; + + // + // Unmap the method buffer if applicable + // For SR-IOV, Guest RM maps and is unmapped only by guest-RM + // For SR-IOV HEAVY and legacy vGpu, mthd buffers should be unmapped by host RM, + // For GSP config, method buffer BAR2 unmapping is done on GSP-RM + // + // Skip unmap for the rest + // + if ((IS_GFID_VF(gfid) && !IS_SRIOV_HEAVY(pGpu)) || // SRIOV guest on Host + IS_GSP_CLIENT(pGpu) || // CPU-RM + IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || // legacy vgpu + IS_SRIOV_HEAVY_GUEST(pGpu)) // SRIOV-heavy guest + { + return NV_OK; + } + + // Pre-reqs + NV_ASSERT_OR_RETURN((pKernelChannelGroup->pMthdBuffers != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((runqueue < runQueues), NV_ERR_INVALID_STATE); + + // Get method buffer handle + pFaultMthdBuf = &(pKernelChannelGroup->pMthdBuffers[runqueue]); + + // TODO: Check to be removed as part of fix for Bug 200691429 + if (!RMCFG_FEATURE_PLATFORM_GSP) + NV_ASSERT_OR_RETURN((pFaultMthdBuf->pMemDesc != NULL), NV_ERR_INVALID_STATE); + + // Unmap method buffer from bar2 invisible region and reset bar2addr + if (pFaultMthdBuf->bar2Addr != 0) + { + kbusUnmapCpuInvisibleBar2Aperture_HAL(pGpu, pKernelBus, pFaultMthdBuf->pMemDesc, + pFaultMthdBuf->bar2Addr, gfid); + pFaultMthdBuf->bar2Addr = 0; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/arch/volta/kernel_channel_gv100.c b/src/nvidia/src/kernel/gpu/fifo/arch/volta/kernel_channel_gv100.c new file mode 100644 index 000000000..79b31c7d8 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/arch/volta/kernel_channel_gv100.c @@ -0,0 +1,288 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/mem_mgr/mem.h" +#include "kernel/gpu/mmu/kern_gmmu.h" + +#include "published/volta/gv100/dev_pbdma.h" +#include "rmapi/rs_utils.h" + +/** + * @brief Verify that the given userd physical address is of the correct size + * + * @param[in] pKernelChannel KernelChannel pointer + * @param[in] userdAddrLo low USERD physical address bits + * @param[in] userdAddrHi high USERD physical address bits + * + * @returns NV_TRUE if the given userd physical address is of the correct size + NV_FALSE otherwise + */ +NvBool +kchannelIsUserdAddrSizeValid_GV100 +( + KernelChannel *pKernelChannel, + NvU32 userdAddrLo, + NvU32 userdAddrHi +) +{ + return ((userdAddrLo & DRF_MASK(NV_PPBDMA_USERD_ADDR)) == userdAddrLo) && + ((userdAddrHi & DRF_MASK(NV_PPBDMA_USERD_HI_ADDR)) == userdAddrHi); +} + +/** + * @brief Create the memory descriptor for USERD memory allocated + * by client using memory handle + * + * + * @param[in] pGpu + * @param[in] pKernelChannel + * @param[in] hClient + * @param[in] pUserdMemory + * @param[in] pUserdOffset + * + * @returns NV_STATUS + */ +NV_STATUS +kchannelCreateUserdMemDescBc_GV100 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvHandle hClient, + NvHandle *phUserdMemory, + NvU64 *pUserdOffset +) +{ + NV_STATUS rmStatus = NV_OK; + + if (phUserdMemory[0] != 0) + { + NvU32 iter = 0; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + iter = IsSLIEnabled(pGpu) ? gpumgrGetSubDeviceInstanceFromGpu(pGpu) : 0; + + NvHandle hUserdMemory = phUserdMemory[iter]; + NvU64 userdOffset = pUserdOffset[iter]; + + if (!hUserdMemory) + { + // + // pUserdMemory[iter] may be 0 for non-zero iter in case USERD is in FB + // The following hack will be removed once clients fix this. + // See bug 1659362, comment 26. + // + NV_PRINTF(LEVEL_ERROR, + "User provided memory info for index %d is NULL\n", + iter); + NV_PRINTF(LEVEL_ERROR, + "NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS needs to have all subdevice info\n"); + + hUserdMemory = phUserdMemory[0]; + userdOffset = pUserdOffset[0]; + + } + + rmStatus = kchannelCreateUserdMemDesc_HAL(pGpu, pKernelChannel, hClient, + hUserdMemory, + userdOffset, + NULL, NULL); + + if (rmStatus != NV_OK) + { + SLI_LOOP_BREAK; + } + + SLI_LOOP_END + + + if (rmStatus != NV_OK) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE); + kchannelDestroyUserdMemDesc_HAL(pGpu, pKernelChannel); + SLI_LOOP_END + } + else + { + pKernelChannel->bClientAllocatedUserD = NV_TRUE; + } + } + return rmStatus; +} + +/** + * @brief Create the memory descriptor for USERD memory allocated + * by client using memory handle + * + * + * @param[in] pGpu + * @param[in] pKernelChannel + * @param[in] hClient + * @param[in] pUserdMemory + * @param[in] pUserdOffset + * @param[out] pUserdAddr (optional) returns the USERD PA + * @param[out] pUserdAper (optional) returns the USERD aperture + * + * @returns NV_STATUS + */ +NV_STATUS +kchannelCreateUserdMemDesc_GV100 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvHandle hClient, + NvHandle hUserdMemory, + NvU64 userdOffset, + NvU64 *pUserdAddr, + NvU32 *pUserdAper +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + RsResourceRef *pUserdMemoryRef; + Memory *pUserdMemory = NULL; + PMEMORY_DESCRIPTOR pUserdMemDescForSubDev = NULL; + PMEMORY_DESCRIPTOR pUserdSubMemDesc = NULL; + RmPhysAddr userdAddr = 0; + NvU32 userdSize = 0; + NV_STATUS status = NV_OK; + NvU32 userdShift; + NvU32 userdAddrLo; + NvU32 userdAddrHi; + NvU32 userdAlignment; + NvU32 pageSize; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + pKernelChannel->pUserdSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = NULL; + + // Get the userd size + kfifoGetUserdSizeAlign_HAL(pKernelFifo, &userdSize, &userdShift); + userdAlignment = (NvU32) (1ULL << userdShift); + + if (serverutilGetResourceRefWithType(hClient, + hUserdMemory, + classId(Memory), + &pUserdMemoryRef) != NV_OK) + { + NV_ASSERT(pUserdMemoryRef); + return NV_ERR_OBJECT_NOT_FOUND; + } + + pUserdMemory = dynamicCast(pUserdMemoryRef->pResource, Memory); + + // Get the userd memdesc for the gpu + pUserdMemDescForSubDev = memdescGetMemDescFromGpu(pUserdMemory->pMemDesc, pGpu); + + // check that the memory is not VPR + if (memdescGetFlag(pUserdMemDescForSubDev, MEMDESC_ALLOC_FLAGS_PROTECTED)) + { + return NV_ERR_INVALID_FLAGS; + } + + userdAddr = memdescGetPhysAddr(pUserdMemDescForSubDev, + AT_GPU, + userdOffset); + + userdAddrLo = NvU64_LO32(userdAddr) >> userdShift; + userdAddrHi = NvU64_HI32(userdAddr); + + // Check that the physical address is of the correct size + if (!kchannelIsUserdAddrSizeValid_HAL(pKernelChannel, userdAddrLo, userdAddrHi)) + { + NV_PRINTF(LEVEL_ERROR, + "physical addr size of userdAddrHi=0x%08x, userAddrLo=0x%08x is incorrect!\n", + userdAddrHi, userdAddrLo); + + NV_ASSERT(0); + return NV_ERR_INVALID_ADDRESS; + } + + // + // USERD page size should be RM_PAGE_SIZE for BAR2 mapping to not waste size. + // Client allocated userds can be of larger pagesizes + // submemdesc uses parent page size to determine "actual size" which gets + // used by bar2 mapper. + // Therefore override the size here and restore it later below. + // + pageSize = memdescGetPageSize(pUserdMemDescForSubDev, AT_GPU); + memdescSetPageSize(pUserdMemDescForSubDev, AT_GPU, RM_PAGE_SIZE); + + // The userd memory descriptor may be shared across channels or gpus. + // Create a Sub-memory descriptor and ref count the base memory desc. + status = memdescCreateSubMem(&pUserdSubMemDesc, + pUserdMemDescForSubDev, pGpu, + userdOffset, + userdSize); + // restore the pagesize + memdescSetPageSize(pUserdMemDescForSubDev, AT_GPU, pageSize); + + if (status != NV_OK) + { + return status; + } + + // check alignment + if ((pUserdMemory->pMemDesc->Alignment < userdAlignment) && + (pUserdMemory->pMemDesc->Alignment != 0)) + { + memdescDestroy(pUserdSubMemDesc); + return NV_ERR_INVALID_ADDRESS; + } + + pKernelChannel->pUserdSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = pUserdSubMemDesc; + + if (status != NV_OK) + { + DBG_BREAKPOINT(); + } + + if (pUserdAddr != NULL) + *pUserdAddr = userdAddr; + + if (pUserdAper != NULL) + *pUserdAper = kgmmuGetHwPteApertureFromMemdesc(GPU_GET_KERNEL_GMMU(pGpu), pUserdMemDescForSubDev); + + return status; +} + +/** + * @brief Delete the memory descriptors for userd memory allocated + * by client + */ +NV_STATUS +kchannelDestroyUserdMemDesc_GV100 +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + if (pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]) + { + memdescDestroy(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]); + pKernelChannel->pUserdSubDeviceMemDesc[subdevInst] = NULL; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/arch/volta/kernel_fifo_gv100.c b/src/nvidia/src/kernel/gpu/fifo/arch/volta/kernel_fifo_gv100.c new file mode 100644 index 000000000..c066661a7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/arch/volta/kernel_fifo_gv100.c @@ -0,0 +1,351 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_ctxshare.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/rmapi/rmapi.h" +#include "kernel/gpu/gpu.h" + +#include "nvRmReg.h" + +#include "published/volta/gv100/dev_ram.h" + +#include "ctrl/ctrlc36f.h" +#include "class/clc361.h" // VOLTA_USERMODE_A + +/** + * Returns the default timeslice (in us) for a channelgroup as defined by hardware. + */ +NvU64 +kfifoChannelGroupGetDefaultTimeslice_GV100 +( + KernelFifo *pKernelFifo +) +{ + return NV_RAMRL_ENTRY_TSG_TIMESLICE_TIMEOUT_128 << NV_RAMRL_ENTRY_TSG_TIMESLICE_SCALE_3; +} + +/** +* @brief Get the work submit token to be used to ring the doorbell +* +* @param[in] pKernelFifo +* @param[in] hClient +* @param[in] hChannel +* @param[out] pWorkSubmitToken: Pointer to where the +* updated token should be stored. +* TODO: Remove this call and let caller directly invoke the +* token generation HAL. +*/ +NV_STATUS +kfifoRmctrlGetWorkSubmitToken_GV100 +( + KernelFifo *pKernelFifo, + NvHandle hClient, + NvHandle hChannel, + NvU32 *pWorkSubmitToken +) +{ + NV_STATUS rmStatus; + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + portMemSet(¶ms, 0, sizeof(params)); + + if (pWorkSubmitToken == NULL) + { + NV_PRINTF(LEVEL_WARNING, "FAILED to get work submit token.\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + rmStatus = pRmApi->Control(pRmApi, hClient, hChannel, + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN, + ¶ms, sizeof(params)); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "Unable to get work submit token.\n"); + return rmStatus; + } + *pWorkSubmitToken = params.workSubmitToken; + return NV_OK; +} + +/** + * @brief Translates between 2 engine values + * + * To iterate through a value for all engines call with inType of + * ENGINE_INFO_TYPE_INVALID for 0 through fifoGetNumEngines(). + * + * @param pGpu + * @param pKernelFifo + * @param[in] inType ENGINE_INFO_TYPE_* + * @param[in] inVal + * @param[in] outType ENGINE_INFO_TYPE_* + * @param[out] pOutVal + */ +NV_STATUS +kfifoEngineInfoXlate_GV100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + ENGINE_INFO_TYPE inType, + NvU32 inVal, + ENGINE_INFO_TYPE outType, + NvU32 *pOutVal +) +{ + extern NV_STATUS kfifoEngineInfoXlate_GM107(OBJGPU *pGpu, KernelFifo *pKernelFifo, ENGINE_INFO_TYPE inType, NvU32 inVal, ENGINE_INFO_TYPE outType, NvU32 *pOutVal); + + // GR supports a range of faults ids + if (inType == ENGINE_INFO_TYPE_MMU_FAULT_ID) + { + NvU32 grFaultId; + // RM-SMC AMPERE-TODO this translation must be extended to work with SMC + NvU32 maxSubctx = kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, NV_FALSE); + + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_GM107(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_ENG_DESC, ENG_GR(0), + ENGINE_INFO_TYPE_MMU_FAULT_ID, &grFaultId)); + + if ((inVal >= grFaultId) && (inVal < (grFaultId + maxSubctx))) + { + inVal = grFaultId; + } + } + + return kfifoEngineInfoXlate_GM107(pGpu, pKernelFifo, inType, inVal, outType, pOutVal); +} + +/* + * @brief Gives the maxinum number of channels allowed per channel group + */ +NvU32 +kfifoGetMaxChannelGroupSize_GV100 +( + KernelFifo *pKernelFifo +) +{ + return NV_RAMRL_ENTRY_TSG_LENGTH_MAX; +} + +/** + * @brief Get usermode register offset and size + * + * @param[in] pGpu + * @param[in] pKernelFifo + * @param[out] offset + * @param[out] size + */ +NV_STATUS +kfifoGetUsermodeMapInfo_GV100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU64 *pOffset, + NvU32 *pSize +) +{ + NvU32 offset; + + NV_ASSERT_OK_OR_RETURN(gpuGetRegBaseOffset_HAL(pGpu, NV_REG_BASE_USERMODE, &offset)); + + if (pOffset) + *pOffset = offset; + + if (pSize) + *pSize = DRF_SIZE(NVC361); + + return NV_OK; +} +/** + * @brief Get the maximum number of subcontext within a TSG. + * + * @param pKernelFifo + */ +NvU32 +kfifoGetMaxSubcontext_GV100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvBool bLegacyMode +) +{ + extern NvU32 kfifoGetMaxSubcontext_GM200(OBJGPU *pGpu, KernelFifo *pKernelFifo, NvBool bLegacyMode); + + if (bLegacyMode || + !kfifoIsSubcontextSupported(pKernelFifo) || + !pGpu || // there is no GPU for some strange reason + IsDFPGA(pGpu)) // dFPGA doesn't have GR + { + return 2; + } + + if (pKernelFifo->maxSubcontextCount == 0) + { + NvU32 maxVeid = kfifoGetMaxSubcontextFromGr_HAL(pGpu, pKernelFifo); + + // Verify that subcontext mask array is properly sized + NV_ASSERT_OR_RETURN(maxVeid / 32 <= SUBCTX_MASK_ARRAY_SIZE, 0); + pKernelFifo->maxSubcontextCount = maxVeid; + } + + return pKernelFifo->maxSubcontextCount; +} + +/** + * @brief Get the subcontext type info for the channel + * + * @param[in] pGpu + * @param[in] pKernelFifo + * @param[in] pKernelChannel + * @param[out] pSubctxType + */ + +void +kfifoGetSubctxType_GV100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + KernelChannel *pKernelChannel, + NvU32 *pSubctxType +) +{ + NvU32 subctxType; + KernelCtxShare *pKernelCtxShare = NULL; + + NV_ASSERT_OR_RETURN_VOID(pKernelChannel != NULL); + + // In case of lite channel mode there is no subcontext associated with a TSG. Return SYNC in such cases + if (kfifoIsLiteModeEnabled_HAL(pGpu, pKernelFifo)) + { + pSubctxType = NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC; + return; + } + + NV_ASSERT(pKernelChannel->subctxId != FIFO_PDB_IDX_BASE); + + KernelChannelGroup *pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup; + PEMEMBLOCK pBlock = pKernelChannelGroup->pSubctxIdHeap->eheapGetBlock( + pKernelChannelGroup->pSubctxIdHeap, + pKernelChannel->subctxId, + NV_FALSE); + + if (pBlock == NULL) + { + NV_PRINTF(LEVEL_ERROR, "subcontext not allocated for this TSG\n"); + NV_ASSERT(pBlock); + return; + } + + pKernelCtxShare = (KernelCtxShare *)pBlock->pData; + subctxType = DRF_VAL(_CTXSHARE, _ALLOCATION_FLAGS, _SUBCONTEXT, pKernelCtxShare->flags); + if (subctxType == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SPECIFIED) + { + subctxType = NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC; + } + + if (pSubctxType) + { + *pSubctxType = subctxType; + } +} + +/** + * @brief Get the runlist entry size + * + * @param pKernelFifo + * + * @return size in bytes + */ +NvU32 +kfifoRunlistGetEntrySize_GV100 +( + KernelFifo *pKernelFifo +) +{ + return NV_RAMRL_ENTRY_SIZE; +} +/*! + * @brief Calculates the size of all fault method buffers + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFifo KernelFifo pointer + * @param[in] bCalcForFbRsvd NV_TRUE if this calculation is requested by FB calc + * + * @return Size in bytes + */ +NvU32 +kfifoCalcTotalSizeOfFaultMethodBuffers_GV100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvBool bCalcForFbRsvd +) +{ + NvU32 maxChannelGroups = 0; + NvU32 runQueues = 0; + NvU32 totalSize = 0; + + // Return 0 from guest in the paravirtualization case. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + return 0; + } + + maxChannelGroups = kfifoGetMaxCeChannelGroups_HAL(pGpu, pKernelFifo); + runQueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); + + NV_ASSERT_OK(gpuGetCeFaultMethodBufferSize(pGpu, &totalSize)); + + totalSize *= maxChannelGroups * runQueues; + + // FB reserved memory is required only when FB aperture is enabled for this buffer. + if ((bCalcForFbRsvd) && (DRF_VAL( _REG_STR_RM, _INST_LOC_3, _FAULT_METHOD_BUFFER, pGpu->instLocOverrides3 ) != + NV_REG_STR_RM_INST_LOC_3_FAULT_METHOD_BUFFER_VID)) + { + totalSize = 0; + } + + return totalSize; +} + +/*! + * Special function to be used early when the CHID_MGRs aren't and cannot be + * constructed in all cases. Do not use otherwise + */ +NvU32 +kfifoGetMaxCeChannelGroups_GV100 +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NvU32 numChannels = kfifoRunlistQueryNumChannels_HAL(pGpu, pKernelFifo, 0); + + if (pKernelFifo->bNumChannelsOverride) + numChannels = NV_MIN(pKernelFifo->numChannelsOverride, numChannels); + + return numChannels; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/arch/volta/usermode_api_gv100.c b/src/nvidia/src/kernel/gpu/fifo/arch/volta/usermode_api_gv100.c new file mode 100644 index 000000000..ee1c88bbe --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/arch/volta/usermode_api_gv100.c @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/usermode_api.h" +#include "kernel/gpu/fifo/kernel_fifo.h" + +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl003f.h" // NV01_MEMORY_LOCAL_PRIVILEGED + +NV_STATUS +usrmodeConstructHal_GV100 +( + UserModeApi *pUserModeApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + Memory *pMemory = staticCast(pUserModeApi, Memory); + NV_STATUS status = NV_OK; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + OBJGPU *pGpu = pMemory->pGpu; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU64 offset = 0; + NvU32 size = 0; + NvU32 attr = 0; + NvU32 attr2 = 0; + + attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr); + attr = FLD_SET_DRF(OS32, _ATTR, _COHERENCY, _CACHED, attr); + + attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, attr2 ); + + NV_ASSERT_OK_OR_RETURN(kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo, &offset, &size)); + + status = memCreateMemDesc(pGpu, &pMemDesc, ADDR_REGMEM, + offset, size, attr, attr2); + + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK, NV_TRUE); + + status = memConstructCommon(pMemory, NV01_MEMORY_LOCAL_PRIVILEGED, + 0, pMemDesc, 0, NULL, 0, 0, 0, 0, NVOS32_MEM_TAG_NONE, NULL); + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/channel_descendant.c b/src/nvidia/src/kernel/gpu/fifo/channel_descendant.c new file mode 100644 index 000000000..5af82ca56 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/channel_descendant.c @@ -0,0 +1,337 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/locks.h" +#include "rmapi/event.h" +#include "rmapi/rmapi.h" +#include "rmapi/rmapi_utils.h" +#include "kernel/gpu/falcon/kernel_falcon.h" +#include "kernel/gpu/fifo/channel_descendant.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +NV_STATUS +chandesConstruct_IMPL +( + ChannelDescendant *pChannelDescendant, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + PARAM_TO_ENGDESC_FUNCTION *pParamToEngDescFn +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + NV_STATUS status = NV_OK; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pParentRef = pResourceRef->pParentRef; + RsResource *pParent = NULL; + KernelChannel *pKernelChannel; + CLASSDESCRIPTOR internalClassDescriptor; + PCLASSDESCRIPTOR pClassDescriptor; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + + pParent = pParentRef->pResource; + if (pParent == NULL) + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + pKernelChannel = dynamicCast(pParent, KernelChannel); + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_OBJECT_PARENT); + + // Bad class creation can happen when GPU is in low power because class DB is invalid + NV_ASSERT(gpuIsGpuFullPower(pGpu)); + + NV_ASSERT(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // + // If debug mode is enabled on this GPU, check if the GPU is occupied by a + // long running compute object. + // + if (gpuIsDebuggerActive_HAL(pGpu)) + { + return NV_ERR_BUSY_RETRY; + } + + // + // Lookup the CLASSDESCRIPTOR (external class id, engine tuple) + // + pClassDescriptor = NULL; + + // + // Check if we have moved to per engine channel Ram, if yes we should have + // engineType set in channel + // + if (kfifoIsPerRunlistChramEnabled(pKernelFifo) && + (kchannelGetEngineType(pKernelChannel) == NV2080_ENGINE_TYPE_NULL)) + { + NV_PRINTF(LEVEL_ERROR, + "Channel should have engineType associated with it\n"); + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // + // From Ampere onwards a client can't create a channel without engineType, + // so engineType from channel gets first priority while determining class + // descriptor. For legacy chips, we will fall back to user-allocated params + // or default engine determination based on classId + // TO-DO - Restrict this for MIG and Ampere only however these checks should + // be removed once we move to per engine chid management. + // + if (kfifoIsHostEngineExpansionSupported(pKernelFifo) && + NV2080_ENGINE_TYPE_IS_VALID(kchannelGetEngineType(pKernelChannel)) && + (gpuIsCCorApmFeatureEnabled(pGpu) || bMIGInUse)) + { + if (rmapiutilIsExternalClassIdInternalOnly(pParams->externalClassId)) + { + ENGDESCRIPTOR engDesc; + + // + // Internal classes do not appear in the classdb, as they are not + // allowed to be allocated directly from usermode. Use the channel's + // engine type to determine the engdesc and don't bother checking + // the classdb. + // + NV_ASSERT_OK_OR_RETURN( + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, kchannelGetEngineType(pKernelChannel), + ENGINE_INFO_TYPE_ENG_DESC, &engDesc)); + portMemSet(&internalClassDescriptor, 0, sizeof(internalClassDescriptor)); + internalClassDescriptor.externalClassId = pParams->externalClassId; + internalClassDescriptor.engDesc = engDesc; + pClassDescriptor = &internalClassDescriptor; + } + else + { + // bypass this check for ENG_SW classes, as they can be under any engine + status = gpuGetClassByClassId(pGpu, pParams->externalClassId, &pClassDescriptor); + if ((status != NV_OK) || (pClassDescriptor->engDesc != ENG_SW)) + { + NvU32 engDesc; + NvU32 engineId = kchannelGetEngineType(pKernelChannel); + // detect the GRCE case where we may be allocating a CE object on GR channel + if ((status == NV_OK) && IS_CE(pClassDescriptor->engDesc) && NV2080_ENGINE_TYPE_IS_GR(engineId)) + { + // + // Get the partner CE of GR engine based on runqueue of this channel + // Use this partner CE alongside externalClassId to fetch the correct class descriptor + // + NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS partnerParams = {0}; + partnerParams.engineType = engineId; + partnerParams.runqueue = kchannelGetRunqueue(pKernelChannel); + NV_ASSERT_OK_OR_RETURN(kfifoGetEnginePartnerList_HAL(pGpu, pKernelFifo, &partnerParams)); + NV_ASSERT_OR_RETURN(partnerParams.numPartners == 1, NV_ERR_INVALID_STATE); + engineId = partnerParams.partnerList[0]; + } + + // Get the engDesc from engineType + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, + engineId, + ENGINE_INFO_TYPE_ENG_DESC, + &engDesc)); + + status = gpuGetClassByEngineAndClassId(pGpu, pParams->externalClassId, + engDesc, &pClassDescriptor); + } + } + } + else if ((pParams->pAllocParams != NULL) && (pParamToEngDescFn != NULL)) + { + // + // For classes like copy engine the per-engine code determines which + // engine index to use based on the allocation params. + // + ENGDESCRIPTOR engDesc = pParamToEngDescFn(pGpu, pParams->externalClassId, + pParams->pAllocParams); + + if (rmapiutilIsExternalClassIdInternalOnly(pParams->externalClassId)) + { + // + // Internal classes do not appear in the classdb, as they are not + // allowed to be allocated directly from usermode. Use the internal + // class's paramToEngDescFn to determine the engdesc and don't + // bother checking the classdb. + // + portMemSet(&internalClassDescriptor, 0, sizeof(internalClassDescriptor)); + internalClassDescriptor.externalClassId = pParams->externalClassId; + internalClassDescriptor.engDesc = engDesc; + pClassDescriptor = &internalClassDescriptor; + status = NV_OK; + } + else if (engDesc != ENG_INVALID) + { + status = gpuGetClassByEngineAndClassId(pGpu, pParams->externalClassId, + engDesc, &pClassDescriptor); + } + else + { + status = NV_ERR_INVALID_CLASS; + } + } + else + { + status = gpuGetClassByClassId(pGpu, pParams->externalClassId, &pClassDescriptor); + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "bad class 0x%x\n", pParams->externalClassId); + return NV_ERR_INVALID_CLASS; + } + + ENGDESCRIPTOR engDesc = pClassDescriptor->engDesc; + + // + // Verify the engine exists + // + if (IS_GR(engDesc)) + { + // + // Graphics engine can be disabled on kernel RM, so instead we are checking + // the existence of KernelGraphics engine here when engDesc = ENG_GR(X) + // + engDesc = MKENGDESC(classId(KernelGraphics), GET_GR_IDX(engDesc)); + } + else if (IS_CE(engDesc) && gpuGetEngstate(pGpu, engDesc) == NULL) + { + // If CE is missing, check for KCE instead + engDesc = MKENGDESC(classId(KernelCE), GET_CE_IDX(engDesc)); + } + + void *pEngObject = gpuGetEngstate(pGpu, engDesc); + // + // In a kernel-only config, falcons are represented by KernelFalcons and do not have an + // engstate. + // + if (pEngObject == NULL) + pEngObject = kflcnGetKernelFalconForEngine(pGpu, engDesc); + + if (pEngObject == NULL) + { + NV_PRINTF(LEVEL_ERROR, "engine is missing for class 0x%x\n", + pParams->externalClassId); + return NV_ERR_INVALID_CLASS; + } + + pChannelDescendant->pKernelChannel = pKernelChannel; + + pChannelDescendant->resourceDesc.externalClassId = pClassDescriptor->externalClassId; + pChannelDescendant->resourceDesc.engDesc = pClassDescriptor->engDesc; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + status = kfifoRunlistSetIdByEngine_HAL(pGpu, pKernelFifo, pKernelChannel, pChannelDescendant->resourceDesc.engDesc); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Invalid object allocation request on channel:0x%08x\n", + kchannelGetDebugTag(pKernelChannel)); + SLI_LOOP_RETURN(status); + } + SLI_LOOP_END + + status = kchannelRegisterChild(pKernelChannel, pChannelDescendant); + + if (status != NV_OK) + { + return status; + } + + return NV_OK; +} + +void +chandesDestruct_IMPL +( + ChannelDescendant *pChannelDescendant +) +{ + NV_STATUS status; + + // scrub event references for this object + CliDelObjectEvents(RES_GET_CLIENT_HANDLE(pChannelDescendant), RES_GET_HANDLE(pChannelDescendant)); + + status = kchannelDeregisterChild(pChannelDescendant->pKernelChannel, pChannelDescendant); + NV_ASSERT(status == NV_OK); + + chandesDestroy_HAL(pChannelDescendant); +} + +NV_STATUS +chandesGetSwMethods_IMPL +( + ChannelDescendant *pChannelDescendant, + METHOD **ppMethods, + NvU32 *pNumMethods +) +{ + // Default behavior is SW methods not supported. Subclasses can implement + // handlers if required. + return NV_ERR_NOT_SUPPORTED; +} + +//--------------------------------------------------------------------------- +// +// Helpers for ChannelDescendant::chandesGetSwMethods method table +// +//--------------------------------------------------------------------------- + +NV_STATUS mthdNoOperation +( + OBJGPU *pGpu, + ChannelDescendant *Object, + PMETHOD Method, + NvU32 Offset, + NvU32 Data +) +{ + NV_PRINTF(LEVEL_INFO, "Method NoOperation: Class=0x%x Data=0x%x\n", + Object->resourceDesc.externalClassId, Data); + return (NV_OK); +} + +/* + * Check whether the software method should? * stall the PBDMA through the execution + * of the software method. By default - yes + */ +NvBool chandesIsSwMethodStalling_IMPL(ChannelDescendant *pChannelDescendant, NvU32 hHandle) +{ + return NV_TRUE; +} + +NV_STATUS +chandesCheckMemInterUnmap_IMPL +( + ChannelDescendant *pChannelDescendant, + NvBool bSubdeviceHandleProvided +) +{ + if (bSubdeviceHandleProvided) + { + NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings of non-memory objects not supported.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/kernel_channel.c b/src/nvidia/src/kernel/gpu/fifo/kernel_channel.c new file mode 100644 index 000000000..8f880eea7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/kernel_channel.c @@ -0,0 +1,4342 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_channel.h" + +#include "kernel/core/locks.h" +#include "kernel/diagnostics/gpu_acct.h" +#include "kernel/gpu/device/device.h" +#include "kernel/gpu/fifo/kernel_ctxshare.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/mem_mgr/context_dma.h" +#include "kernel/gpu/mem_mgr/heap.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "kernel/mem_mgr/ctx_buf_pool.h" +#include "kernel/mem_mgr/gpu_vaspace.h" +#include "kernel/rmapi/event.h" +#include "kernel/rmapi/rmapi.h" +#include "kernel/rmapi/rs_utils.h" +#include "kernel/virtualization/hypervisor/hypervisor.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" + +#include "class/cl0090.h" // KERNEL_GRAPHICS_CONTEXT +#include "class/cl906fsw.h" // GF100_GPFIFO +#include "class/cla06c.h" // KEPLER_CHANNEL_GROUP_A +#include "class/cla06f.h" // KEPLER_CHANNEL_GPFIFO_A +#include "class/cla06fsw.h" // KEPLER_CHANNEL_GPFIFO_A +#include "class/cla16f.h" // KEPLER_CHANNEL_GPFIFO_B +#include "class/cla16fsw.h" // KEPLER_CHANNEL_GPFIFO_B +#include "class/clb06f.h" // MAXWELL_CHANNEL_GPFIFO_A +#include "class/clb06fsw.h" // MAXWELL_CHANNEL_GPFIFO_A +#include "class/clc06f.h" // PASCAL_CHANNEL_GPFIFO_A +#include "class/clc06fsw.h" // PASCAL_CHANNEL_GPFIFO_A +#include "class/clc36f.h" // VOLTA_CHANNEL_GPFIFO_A +#include "class/clc36fsw.h" // VOLTA_CHANNEL_GPFIFO_A +#include "class/clc46f.h" // TURING_CHANNEL_GPFIFO_A +#include "class/clc46fsw.h" // TURING_CHANNEL_GPFIFO_A +#include "class/clc56f.h" // AMPERE_CHANNEL_GPFIFO_A +#include "class/clc56fsw.h" // AMPERE_CHANNEL_GPFIFO_A +#include "class/clc572.h" // PHYSICAL_CHANNEL_GPFIFO + +#include "ctrl/ctrl906f.h" +#include "ctrl/ctrlc46f.h" + +#include "Nvcm.h" +#include "libraries/resserv/resserv.h" +#include "libraries/resserv/rs_client.h" +#include "libraries/resserv/rs_resource.h" +#include "libraries/resserv/rs_server.h" +#include "nvRmReg.h" +#include "nvstatuscodes.h" +#include "vgpu/rpc.h" + +// Instmem static functions +static NV_STATUS _kchannelAllocHalData(OBJGPU *pGpu, KernelChannel *pKernelChannel); +static void _kchannelFreeHalData(OBJGPU *pGpu, KernelChannel *pKernelChannel); +static NV_STATUS _kchannelAllocOrDescribeInstMem( + KernelChannel *pKernelChannel, + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGpfifoParams); +static NV_STATUS _kchannelDescribeMemDescsFromParams( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGpfifoParams); +static NV_STATUS _kchannelDescribeMemDescsHeavySriov(OBJGPU *pGpu, KernelChannel *pKernelChannel); +static NV_STATUS _kchannelSendChannelAllocRpc( + KernelChannel *pKernelChannel, + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGpfifoParams, + KernelChannelGroup *pKernelChannelGroup, + NvBool bFullSriov); + +static NV_STATUS _kchannelSetupNotifyActions(KernelChannel *pKernelChannel, + NvU32 classNum); +static void _kchannelCleanupNotifyActions(KernelChannel *pKernelChannel); +static NV_STATUS _kchannelNotifyOfChid(OBJGPU *pGpu, KernelChannel *pKernelChannel, RsClient *pRsClient); +static NV_STATUS _kchannelGetUserMemDesc(OBJGPU *pGpu, KernelChannel *pKernelChannel, PMEMORY_DESCRIPTOR *ppMemDesc); +static void _kchannelUpdateFifoMapping(KernelChannel *pKernelChannel, + OBJGPU *pGpu, + NvBool bKernel, + NvP64 cpuAddress, + NvP64 priv, + NvU64 cpuMapLength, + NvU32 flags, + NvHandle hSubdevice, + RsCpuMapping *pMapping); + +/*! + * @brief Construct a new KernelChannel, which also creates a Channel. + * + * @param[in,out] pCallContext The call context + * @param[in,out] pParams Params for the *_CHANNEL_GPFIFO class + * object being created + * + * @returns NV_OK on success, specific error code on failure. + */ +NV_STATUS +kchannelConstruct_IMPL +( + KernelChannel *pKernelChannel, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + OBJSYS *pSys = SYS_GET_INSTANCE(); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + RsClient *pRsClient = pCallContext->pClient; + RmClient *pRmClient = NULL; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pKernelCtxShareRef = NULL; + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + NvHandle hClient = pRsClient->hClient; + NvHandle hParent = pResourceRef->pParentRef->hResource; + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGpfifoParams = pParams->pAllocParams; + RsResourceRef *pChanGrpRef = NULL; + KernelChannelGroupApi *pKernelChannelGroupApi = NULL; + NvHandle hKernelCtxShare = pChannelGpfifoParams->hContextShare; + NvBool bTsgAllocated = NV_FALSE; + NvHandle hChanGrp = NV01_NULL_OBJECT; + RsResourceRef *pDeviceRef = NULL; + NvBool bMIGInUse; + KernelChannelGroup *pKernelChannelGroup = NULL; + NvU32 chID = ~0; + NvU32 flags = pChannelGpfifoParams->flags; + NvU32 globalEngineType = NV2080_ENGINE_TYPE_NULL; + NvU32 verifFlags2 = 0; + NvBool bChidAllocated = NV_FALSE; + NvBool bLockAcquired = NV_FALSE; + NvBool bNotifyActionsSetup = NV_FALSE; + CTX_BUF_POOL_INFO *pChannelBufPool = NULL; + CTX_BUF_INFO bufInfo = {0}; + NvBool bRpcAllocated = NV_FALSE; + NvBool bFullSriov = IS_VIRTUAL_WITH_SRIOV(pGpu) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu); + NvBool bAddedToGroup = NV_FALSE; + NvU64 errContextMemDescFlags = 0; + NvU32 callingContextGfid; + + // We only support physical channels. + NV_ASSERT_OR_RETURN(FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_TYPE, _PHYSICAL, flags), + NV_ERR_NOT_SUPPORTED); + + pKernelChannel->refCount = 1; + pKernelChannel->bIsContextBound = NV_FALSE; + pKernelChannel->nextObjectClassID = 0; + pKernelChannel->hVASpace = pChannelGpfifoParams->hVASpace; + pKernelChannel->subctxId = 0; + pKernelChannel->bSkipCtxBufferAlloc = FLD_TEST_DRF(OS04, _FLAGS, + _SKIP_CTXBUFFER_ALLOC, _TRUE, flags); + pKernelChannel->cid = portAtomicIncrementU32(&pSys->currentCid); + pKernelChannel->runqueue = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, flags); + pKernelChannel->engineType = NV2080_ENGINE_TYPE_NULL; + pChannelGpfifoParams->cid = pKernelChannel->cid; + NV_ASSERT_OK_OR_GOTO(status, refFindAncestorOfType(pResourceRef, classId(Device), &pDeviceRef), cleanup); + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &callingContextGfid)); + + // Internal fields must be cleared when RMAPI call is from client + if (!hypervisorIsVgxHyper() || IS_GSP_CLIENT(pGpu)) + pChannelGpfifoParams->hPhysChannelGroup = NV01_NULL_OBJECT; + pChannelGpfifoParams->internalFlags = 0; + portMemSet(&pChannelGpfifoParams->errorNotifierMem, 0, + sizeof pChannelGpfifoParams->errorNotifierMem); + portMemSet(&pChannelGpfifoParams->eccErrorNotifierMem, 0, + sizeof pChannelGpfifoParams->eccErrorNotifierMem); + pChannelGpfifoParams->ProcessID = 0; + pChannelGpfifoParams->SubProcessID = 0; + + pRmClient = dynamicCast(pRsClient, RmClient); + if (pRmClient == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + pKernelChannel->pUserInfo = pRmClient->pUserInfo; + + // + // GSP-RM needs privilegeLevel passed in as an alloc param because it cannot + // check pRmClient for kernel/admin. + // Other platforms check pRmClient to determine privilegeLevel. + // + if (RMCFG_FEATURE_PLATFORM_GSP) + { + // Guest-RM clients can allocate a privileged channel to perform + // actions such as updating page tables in physical mode or scrubbing. + // Security for these channels is enforced by VMMU and IOMMU + if (gpuIsSriovEnabled(pGpu) && IS_GFID_VF(callingContextGfid) && + FLD_TEST_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, flags)) + { + pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN; + } + else + { + pKernelChannel->privilegeLevel = + DRF_VAL(_KERNELCHANNEL, _ALLOC_INTERNALFLAGS, _PRIVILEGE, pChannelGpfifoParams->internalFlags); + } + pKernelChannel->ProcessID = pChannelGpfifoParams->ProcessID; + pKernelChannel->SubProcessID = pChannelGpfifoParams->SubProcessID; + } + else + { + RS_PRIV_LEVEL privLevel = pCallContext->secInfo.privLevel; + if (privLevel >= RS_PRIV_LEVEL_KERNEL) + { + pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL; + pChannelGpfifoParams->flags = FLD_SET_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, pChannelGpfifoParams->flags); + } + else if (rmclientIsAdmin(pRmClient, privLevel) || hypervisorCheckForObjectAccess(hClient)) + { + pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN; + pChannelGpfifoParams->flags = FLD_SET_DRF(OS04, _FLAGS, _PRIVILEGED_CHANNEL, _TRUE, pChannelGpfifoParams->flags); + } + else + { + pKernelChannel->privilegeLevel = NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER; + } + + pKernelChannel->ProcessID = pRmClient->ProcID; + pKernelChannel->SubProcessID = pRmClient->SubProcessID; + } + + // Context share and vaspace handles can't be active at the same time. + if ((hKernelCtxShare != NV01_NULL_OBJECT) && (pKernelChannel->hVASpace != NV01_NULL_OBJECT)) + { + NV_PRINTF(LEVEL_ERROR, + "Both context share and vaspace handles can't be valid at the same time\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + bMIGInUse = IS_MIG_IN_USE(pGpu); + + // + // The scrubber is allocated by Kernel RM in offload mode, and is disabled + // completely on GSP, so it is not possible for GSP to determine whether + // this allocation should be allowed or not. CPU RM can and should properly + // check this. + // + if (IS_MIG_ENABLED(pGpu) && !RMCFG_FEATURE_PLATFORM_GSP && !bMIGInUse) + { + NvBool bTopLevelScrubberEnabled = NV_FALSE; + NvBool bTopLevelScrubberConstructed = NV_FALSE; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (memmgrIsPmaInitialized(pMemoryManager)) + { + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvU32 pmaConfigs = PMA_QUERY_SCRUB_ENABLED | PMA_QUERY_SCRUB_VALID; + NV_ASSERT_OK(pmaQueryConfigs(&pHeap->pmaObject, &pmaConfigs)); + bTopLevelScrubberEnabled = (pmaConfigs & PMA_QUERY_SCRUB_ENABLED) != 0x0; + bTopLevelScrubberConstructed = (pmaConfigs & PMA_QUERY_SCRUB_VALID) != 0x0; + } + + // + // Exception: Top level scrubber must be initialized before + // GPU instances can be created, and therefore must be allowed to + // create a CE context if the scrubber is supported. + // + + if (!bTopLevelScrubberEnabled || bTopLevelScrubberConstructed || + !kchannelCheckIsKernel(pKernelChannel)) + { + NV_PRINTF(LEVEL_ERROR, + "Channel allocation not allowed when MIG is enabled without GPU instancing\n"); + return NV_ERR_INVALID_STATE; + } + } + + + + // Find the TSG, or create the TSG if we need to wrap it + status = clientGetResourceRefByType(pRsClient, hParent, + classId(KernelChannelGroupApi), + &pChanGrpRef); + if (status != NV_OK) + { + NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS tsgParams = { 0 }; + + // Context share can only be used with a TSG channel + if (hKernelCtxShare != NV01_NULL_OBJECT) + { + NV_PRINTF(LEVEL_ERROR, + "Non-TSG channels can't use context share\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + tsgParams.hVASpace = pKernelChannel->hVASpace; + tsgParams.engineType = pChannelGpfifoParams->engineType; + // vGpu plugin context flag should only be set if context is plugin + if (gpuIsSriovEnabled(pGpu)) + { + tsgParams.bIsCallingContextVgpuPlugin = FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_VGPU_PLUGIN_CONTEXT, _TRUE, pChannelGpfifoParams->flags); + } + // + // Internally allocate a TSG to wrap this channel. There is no point + // in mirroring this allocation in the host, as the channel is + // already mirrored. + // + status = pRmApi->AllocWithSecInfo(pRmApi, + hClient, + hParent, + &pChannelGpfifoParams->hPhysChannelGroup, + KEPLER_CHANNEL_GROUP_A, + NV_PTR_TO_NvP64(&tsgParams), + RMAPI_ALLOC_FLAGS_SKIP_RPC, + NvP64_NULL, + &pRmApi->defaultSecInfo); + + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + bTsgAllocated = NV_TRUE; + hChanGrp = pChannelGpfifoParams->hPhysChannelGroup; + + status = clientGetResourceRefByType(pRsClient, hChanGrp, + classId(KernelChannelGroupApi), + &pChanGrpRef); + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + + pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource, + KernelChannelGroupApi); + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + pKernelChannelGroup->bAllocatedByRm = NV_TRUE; + } + else + { + hChanGrp = hParent; + pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource, + KernelChannelGroupApi); + if (pKernelChannelGroupApi == NULL || + pKernelChannelGroupApi->pKernelChannelGroup == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid KernelChannelGroup* for channel 0x%x\n", + pResourceRef->hResource); + status = NV_ERR_INVALID_POINTER; + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + } + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + + // TSG channel should specify a context share object, rather than vaspace directly + if (pKernelChannel->hVASpace != NV01_NULL_OBJECT) + { + NV_PRINTF(LEVEL_ERROR, + "TSG channels can't use an explicit vaspace\n"); + status = NV_ERR_INVALID_ARGUMENT; + NV_ASSERT_OR_GOTO(status == NV_OK, cleanup); + } + } + pKernelChannel->pKernelChannelGroupApi = pKernelChannelGroupApi; + + NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_STATE); + + // + // Reserve memory for channel instance block from PMA + // into a pool tied to channel's parent TSG. + // RM will later allocate memory for instance block from this pool. + // + pChannelBufPool = pKernelChannelGroup->pChannelBufPool; + if (pChannelBufPool != NULL) + { + NvBool bIsScrubSkipped; + NvBool bRequestScrubSkip = FLD_TEST_DRF(OS04, _FLAGS, _CHANNEL_SKIP_SCRUBBER, _TRUE, pChannelGpfifoParams->flags); + + if (bRequestScrubSkip) + { + if (!kchannelCheckIsKernel(pKernelChannel)) + { + status = NV_ERR_INVALID_ARGUMENT; + NV_PRINTF(LEVEL_ERROR, "Only kernel priv clients can skip scrubber\n"); + goto cleanup; + } + + // + // If this is first channel in the TSG then setup ctx buf pool to skip scrubbing. + // For subsequent channels, setting should match with ctx buf pool's state. + // + if (pKernelChannelGroup->chanCount == 0) + { + ctxBufPoolSetScrubSkip(pChannelBufPool, NV_TRUE); + NV_PRINTF(LEVEL_INFO, "Skipping scrubber for all allocations on this context\n"); + } + } + + bIsScrubSkipped = ctxBufPoolIsScrubSkipped(pChannelBufPool); + if (bIsScrubSkipped ^ bRequestScrubSkip) + { + status = NV_ERR_INVALID_ARGUMENT; + NV_PRINTF(LEVEL_ERROR, "Mismatch between channel and parent TSG's policy on skipping scrubber\n"); + NV_PRINTF(LEVEL_ERROR, "scrubbing %s skipped for TSG and %s for channel\n", (bIsScrubSkipped ? "is" : "is not"), + (bRequestScrubSkip ? "is" : "is not")); + goto cleanup; + } + NV_ASSERT_OK_OR_GOTO(status, + kfifoGetInstMemInfo_HAL(pKernelFifo, &bufInfo.size, &bufInfo.align, NULL, NULL, NULL), + cleanup); + bufInfo.attr = RM_ATTR_PAGE_SIZE_DEFAULT; + NV_ASSERT_OK_OR_GOTO(status, ctxBufPoolReserve(pGpu, pChannelBufPool, &bufInfo, 1), cleanup); + } + else + { + NV_PRINTF(LEVEL_INFO, "Not using ctx buf pool\n"); + } + + //-------------------------------------------------------------------------- + // we acquire the GPU lock below. + // From here down do not return early, use goto cleanup + //-------------------------------------------------------------------------- + + NV_ASSERT_OK_OR_GOTO(status, + rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO), + cleanup); + bLockAcquired = NV_TRUE; + + // + // Initialize the notification indices used for different notifications + // + pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR] + = NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR; + pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN] + = NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN; + + // Bake channel group error handlers into the channel + pKernelChannel->hErrorContext = pChannelGpfifoParams->hObjectError; + pKernelChannel->hEccErrorContext = pChannelGpfifoParams->hObjectEccError; + + if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT) + { + pKernelChannel->hErrorContext = ( + pKernelChannel->pKernelChannelGroupApi->hErrorContext); + } + if (pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT) + { + pKernelChannel->hEccErrorContext = ( + pKernelChannel->pKernelChannelGroupApi->hEccErrorContext); + } + + if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT) + { + pKernelChannel->errorContextType = ERROR_NOTIFIER_TYPE_NONE; + } + else if (!RMCFG_FEATURE_PLATFORM_GSP) + { + NV_ASSERT_OK(kchannelGetNotifierInfo(pGpu, pRsClient, + pKernelChannel->hErrorContext, + &pKernelChannel->pErrContextMemDesc, + &pKernelChannel->errorContextType, + &pKernelChannel->errorContextOffset)); + NV_ASSERT(pKernelChannel->errorContextType != + ERROR_NOTIFIER_TYPE_NONE); + } + if (pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT) + { + pKernelChannel->eccErrorContextType = ERROR_NOTIFIER_TYPE_NONE; + } + else if (!RMCFG_FEATURE_PLATFORM_GSP) + { + NV_ASSERT_OK(kchannelGetNotifierInfo(pGpu, pRsClient, + pKernelChannel->hEccErrorContext, + &pKernelChannel->pEccErrContextMemDesc, + &pKernelChannel->eccErrorContextType, + &pKernelChannel->eccErrorContextOffset)); + NV_ASSERT(pKernelChannel->eccErrorContextType != + ERROR_NOTIFIER_TYPE_NONE); + } + + if (IS_GSP_CLIENT(pGpu) || bFullSriov) + { + if (pKernelChannel->hErrorContext != NV01_NULL_OBJECT) + { + pChannelGpfifoParams->errorNotifierMem.base = ( + memdescGetPhysAddr(pKernelChannel->pErrContextMemDesc, + AT_GPU, 0) + + pKernelChannel->errorContextOffset); + pChannelGpfifoParams->errorNotifierMem.size = ( + pKernelChannel->pErrContextMemDesc->Size - + pKernelChannel->errorContextOffset); + pChannelGpfifoParams->errorNotifierMem.addressSpace = + memdescGetAddressSpace(pKernelChannel->pErrContextMemDesc); + pChannelGpfifoParams->errorNotifierMem.cacheAttrib = + memdescGetCpuCacheAttrib(pKernelChannel->pErrContextMemDesc); + + } + if (pKernelChannel->hEccErrorContext != NV01_NULL_OBJECT) + { + pChannelGpfifoParams->eccErrorNotifierMem.base = ( + memdescGetPhysAddr(pKernelChannel->pEccErrContextMemDesc, + AT_GPU, 0) + + pKernelChannel->eccErrorContextOffset); + pChannelGpfifoParams->eccErrorNotifierMem.size = ( + pKernelChannel->pEccErrContextMemDesc->Size - + pKernelChannel->eccErrorContextOffset); + pChannelGpfifoParams->eccErrorNotifierMem.addressSpace = + memdescGetAddressSpace(pKernelChannel->pEccErrContextMemDesc); + pChannelGpfifoParams->eccErrorNotifierMem.cacheAttrib = + memdescGetCpuCacheAttrib(pKernelChannel->pEccErrContextMemDesc); + } + + pChannelGpfifoParams->internalFlags = FLD_SET_DRF_NUM( + _KERNELCHANNEL_ALLOC, _INTERNALFLAGS, _ERROR_NOTIFIER_TYPE, + pKernelChannel->errorContextType, + pChannelGpfifoParams->internalFlags); + pChannelGpfifoParams->internalFlags = FLD_SET_DRF_NUM( + _KERNELCHANNEL_ALLOC, _INTERNALFLAGS, _ECC_ERROR_NOTIFIER_TYPE, + pKernelChannel->eccErrorContextType, + pChannelGpfifoParams->internalFlags); + } + else if (RMCFG_FEATURE_PLATFORM_GSP) + { + pKernelChannel->errorContextType = DRF_VAL(_KERNELCHANNEL_ALLOC, + _INTERNALFLAGS, _ERROR_NOTIFIER_TYPE, + pChannelGpfifoParams->internalFlags); + pKernelChannel->eccErrorContextType = DRF_VAL(_KERNELCHANNEL_ALLOC, + _INTERNALFLAGS, _ECC_ERROR_NOTIFIER_TYPE, + pChannelGpfifoParams->internalFlags); + + errContextMemDescFlags = MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + if (gpuIsSriovEnabled(pGpu) && + IS_GFID_VF(callingContextGfid) && + !pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bIsCallingContextVgpuPlugin) + { + errContextMemDescFlags |= MEMDESC_FLAGS_GUEST_ALLOCATED; + } + + if (pKernelChannel->errorContextType != ERROR_NOTIFIER_TYPE_NONE) + { + NV_ASSERT_OK_OR_GOTO(status, + memdescCreate(&pKernelChannel->pErrContextMemDesc, + pGpu, + pChannelGpfifoParams->errorNotifierMem.size, + 1 /* Alignment */, + NV_TRUE /* PhysicallyContiguous */, + ADDR_UNKNOWN, + NV_MEMORY_UNCACHED, + errContextMemDescFlags), + cleanup); + memdescDescribe(pKernelChannel->pErrContextMemDesc, + pChannelGpfifoParams->errorNotifierMem.addressSpace, + pChannelGpfifoParams->errorNotifierMem.base, + pChannelGpfifoParams->errorNotifierMem.size); + } + if (pKernelChannel->eccErrorContextType != ERROR_NOTIFIER_TYPE_NONE) + { + NV_ASSERT_OK_OR_GOTO(status, + memdescCreate(&pKernelChannel->pEccErrContextMemDesc, + pGpu, + pChannelGpfifoParams->eccErrorNotifierMem.size, + 1 /* Alignment */, + NV_TRUE /* PhysicallyContiguous */, + ADDR_UNKNOWN, + NV_MEMORY_UNCACHED, + errContextMemDescFlags), + cleanup); + memdescDescribe(pKernelChannel->pEccErrContextMemDesc, + pChannelGpfifoParams->eccErrorNotifierMem.addressSpace, + pChannelGpfifoParams->eccErrorNotifierMem.base, + pChannelGpfifoParams->eccErrorNotifierMem.size); + } + } + // + // The error context types should be set on all RM configurations + // (GSP/baremetal/CPU-GSP client) + // + NV_ASSERT(pKernelChannel->errorContextType != ERROR_NOTIFIER_TYPE_UNKNOWN); + NV_ASSERT(pKernelChannel->eccErrorContextType != + ERROR_NOTIFIER_TYPE_UNKNOWN); + + // Get KernelCtxShare (supplied or legacy) + if (hKernelCtxShare) + { + // + // Get object pointers from supplied hKernelCtxShare. + // If hKernelCtxShare is nonzero, the ChannelGroup is not internal either, + // so it should have the same parent as hParent. + // + NV_ASSERT_OR_ELSE(!pKernelChannelGroup->bLegacyMode, + status = NV_ERR_INVALID_STATE; + goto cleanup); + + NV_ASSERT_OK_OR_GOTO(status, + clientGetResourceRefByType(pRsClient, hKernelCtxShare, classId(KernelCtxShareApi), &pKernelCtxShareRef), + cleanup); + + // Check that the parent matches + NV_ASSERT_OR_ELSE((pKernelCtxShareRef->pParentRef) && (pKernelCtxShareRef->pParentRef->hResource == hParent), + status = NV_ERR_INVALID_OBJECT_PARENT; goto cleanup); + } + else + { + NvU32 subctxFlag; + NvHandle hLegacyKernelCtxShare; + + // Set this ChannelGroup to legacy mode and get the KernelCtxShare from it. + if (!pKernelChannelGroup->bLegacyMode) + { + NV_ASSERT_OK_OR_GOTO(status, + kchangrpapiSetLegacyMode(pKernelChannelGroupApi, + pGpu, pKernelFifo, hClient), + cleanup); + } + + subctxFlag = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_THREAD, flags); + hLegacyKernelCtxShare = (subctxFlag == + NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC) ? + pKernelChannelGroupApi->hLegacykCtxShareSync : + pKernelChannelGroupApi->hLegacykCtxShareAsync; + NV_ASSERT_OK_OR_GOTO(status, + clientGetResourceRefByType(pRsClient, + hLegacyKernelCtxShare, + classId(KernelCtxShareApi), + &pKernelCtxShareRef), + cleanup); + } + + pKernelChannel->pKernelCtxShareApi = dynamicCast(pKernelCtxShareRef->pResource, KernelCtxShareApi); + NV_ASSERT_OR_ELSE(pKernelChannel->pKernelCtxShareApi != NULL, status = NV_ERR_INVALID_OBJECT; goto cleanup); + NV_ASSERT_OR_ELSE(pKernelChannel->pKernelCtxShareApi->pShareData != NULL, status = NV_ERR_INVALID_OBJECT; goto cleanup); + pKernelChannel->pVAS = pKernelChannel->pKernelCtxShareApi->pShareData->pVAS; + NV_ASSERT_OR_ELSE(pKernelChannel->pVAS != NULL, status = NV_ERR_INVALID_OBJECT; goto cleanup); + + if (kfifoIsPerRunlistChramSupportedInHw(pKernelFifo)) + { + // TSG should always have a valid engine Id. + if (!NV2080_ENGINE_TYPE_IS_VALID(pKernelChannelGroup->engineType)) + { + NV_ASSERT( + NV2080_ENGINE_TYPE_IS_VALID(pKernelChannelGroup->engineType)); + status = NV_ERR_INVALID_STATE; + goto cleanup; + } + + if (NV2080_ENGINE_TYPE_IS_VALID(pChannelGpfifoParams->engineType)) + { + globalEngineType = pChannelGpfifoParams->engineType; + // Convert it to global engine id if MIG is enabled + if (bMIGInUse) + { + MIG_INSTANCE_REF ref; + + NV_CHECK_OK_OR_GOTO( + status, + LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref), + cleanup); + + NV_CHECK_OK_OR_GOTO( + status, + LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + pChannelGpfifoParams->engineType, + &globalEngineType), + cleanup); + } + + // Throw an error if TSG engine Id does NOT match with channel engine Id + if (globalEngineType != pKernelChannelGroup->engineType) + { + NV_PRINTF(LEVEL_ERROR, + "Engine type of channel = 0x%x not compatible with engine type of TSG = 0x%x\n", + pChannelGpfifoParams->engineType, + pKernelChannelGroup->engineType); + + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + } + + // Assign the engine type from the parent TSG + pKernelChannel->engineType = pKernelChannelGroup->engineType; + } + + // Determine initial runlist ID (based on engine type if provided or inherited from TSG) + pKernelChannel->runlistId = kfifoGetDefaultRunlist_HAL(pGpu, pKernelFifo, pKernelChannel->engineType); + + // Set TLS state and BAR0 window if we are working with Gr + if (bMIGInUse && NV2080_ENGINE_TYPE_IS_GR(pKernelChannel->engineType)) + { + NV_ASSERT_OK(kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, pRsClient->hClient, + &pKernelChannel->partitionRef)); + } + + // Allocate the ChId (except legacy VGPU which allocates ChID on the host) + if (!IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + status = kchannelAllocHwID_HAL(pGpu, pKernelChannel, hClient, + flags, verifFlags2, chID); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id %d for hClient %d hKernelChannel %d \n", + chID, hClient, pResourceRef->hResource); + DBG_BREAKPOINT(); + goto cleanup; + + } + + chID = pKernelChannel->ChID; + bChidAllocated = NV_TRUE; + } + + // + // RPC alloc the channel in legacy VGPU / Heavy SRIOV so that instmem details can be gotten from it + // + if (IS_VIRTUAL(pGpu) && (!bFullSriov)) + { + NV_ASSERT_OK_OR_GOTO(status, + _kchannelSendChannelAllocRpc(pKernelChannel, + pChannelGpfifoParams, + pKernelChannelGroup, + bFullSriov), + cleanup); + bRpcAllocated = NV_TRUE; + } + + // Legacy VGPU: allocate chid that the host provided + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + chID = pKernelChannel->ChID; + + status = kchannelAllocHwID_HAL(pGpu, pKernelChannel, hClient, + flags, verifFlags2, chID); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error in Allocating channel id %d for hClient %d hKernelChannel %d \n", + chID, hClient, pResourceRef->hResource); + chID = ~0; + DBG_BREAKPOINT(); + goto cleanup; + } + + bChidAllocated = NV_TRUE; + } + + // + // Do instmem setup here + // (Requires the channel to be created on the host if legacy VGPU / Heavy SRIOV. + // Does not require a Channel object.) + // + NV_ASSERT_OK_OR_GOTO(status, + _kchannelAllocOrDescribeInstMem(pKernelChannel, pChannelGpfifoParams), + cleanup); + + // Join the channel group here + NV_ASSERT_OK_OR_GOTO(status, + kchangrpAddChannel(pGpu, pKernelChannelGroup, pKernelChannel), + cleanup); + bAddedToGroup = NV_TRUE; + + // Assign to the same runlistId as the KernelChannelGroup if it's already determined + if (pKernelChannelGroup->bRunlistAssigned) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + NV_ASSERT_OK_OR_ELSE(status, + kfifoRunlistSetId_HAL(pGpu, + GPU_GET_KERNEL_FIFO(pGpu), + pKernelChannel, + pKernelChannelGroup->runlistId), + SLI_LOOP_GOTO(cleanup)); + } + SLI_LOOP_END + } + + // Allocate the physical channel + NV_ASSERT_OK_OR_GOTO(status, + kchannelAllocChannel_HAL(pKernelChannel, pChannelGpfifoParams), + cleanup); + + // Set up pNotifyActions + _kchannelSetupNotifyActions(pKernelChannel, pResourceRef->externalClassId); + bNotifyActionsSetup = NV_TRUE; + + // Initialize the userd length + if (!pKernelChannel->bClientAllocatedUserD) + { + NvU64 temp_offset; + + kchannelGetUserdInfo_HAL(pGpu, + pKernelChannel, + NULL, + &temp_offset, + &pKernelChannel->userdLength); + } + else + { + kfifoGetUserdSizeAlign_HAL(pKernelFifo, (NvU32*)&pKernelChannel->userdLength, NULL); + } + + // Set GPU accounting + if (RMCFG_MODULE_GPUACCT && + pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON)) + { + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(SYS_GET_INSTANCE()); + + gpuacctSetProcType(pGpuAcct, + pGpu->gpuInstance, + pRmClient->ProcID, + pRmClient->SubProcessID, + NV_GPUACCT_PROC_TYPE_GPU); + } + + // + // RPC to allocate the channel on GSPFW/host. + // (Requires a Channel object but only for hPhysChannel.) + // + if (IS_GSP_CLIENT(pGpu) || bFullSriov) + { + NV_ASSERT_OK_OR_GOTO(status, + _kchannelSendChannelAllocRpc(pKernelChannel, + pChannelGpfifoParams, + pKernelChannelGroup, + bFullSriov), + cleanup); + bRpcAllocated = NV_TRUE; + } + + if (kfifoIsPerRunlistChramEnabled(pKernelFifo) || + (gpuIsCCorApmFeatureEnabled(pGpu) || bMIGInUse)) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + KernelFifo *pTempKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + // + // If we have a separate channel RAM for each runlist then we need to set + // runlistId as we already picked a chID from channel RAM based on this runlistId. + // This will also ensure runlistId is not overridden later to a different value + // + NV_ASSERT_OK_OR_GOTO(status, + kfifoRunlistSetId_HAL(pGpu, pTempKernelFifo, pKernelChannel, pKernelChannel->runlistId), + cleanup); + } + SLI_LOOP_END; + } + + // + // If we alloced this group, we want to free KernelChannel first, + // so we should set KernelChannel as its dependent. + // + if (bTsgAllocated) + { + NV_ASSERT_OK_OR_GOTO(status, refAddDependant(pChanGrpRef, pResourceRef), cleanup); + } + + // We depend on VASpace if it was provided + if (pKernelChannel->hVASpace != NV01_NULL_OBJECT) + { + RsResourceRef *pVASpaceRef = NULL; + + NV_ASSERT_OK_OR_GOTO(status, clientGetResourceRef(pRsClient, pKernelChannel->hVASpace, &pVASpaceRef), cleanup); + NV_ASSERT_OR_ELSE(pVASpaceRef != NULL, status = NV_ERR_INVALID_OBJECT; goto cleanup); + + NV_ASSERT_OK_OR_GOTO(status, refAddDependant(pVASpaceRef, pResourceRef), cleanup); + } + + // + // If KernelCtxShare was provided, we depend on it (and if we created it then we + // also want KernelChannel to be freed first.) + // + if (pKernelChannel->pKernelCtxShareApi != NULL) + { + NV_ASSERT_OK_OR_GOTO( + status, + refAddDependant(RES_GET_REF(pKernelChannel->pKernelCtxShareApi), pResourceRef), + cleanup); + } + + pKernelChannel->hKernelGraphicsContext = pKernelChannelGroupApi->hKernelGraphicsContext; + if (pKernelChannel->hKernelGraphicsContext != NV01_NULL_OBJECT) + { + KernelGraphicsContext *pKernelGraphicsContext; + + NV_ASSERT_OK_OR_GOTO(status, + kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext), + cleanup); + + NV_ASSERT_OK_OR_GOTO(status, + refAddDependant(RES_GET_REF(pKernelGraphicsContext), pResourceRef), + cleanup); + } + + if (pChannelGpfifoParams->hObjectError != 0) + { + NV_ASSERT_OK_OR_GOTO( + status, + _kchannelNotifyOfChid(pGpu, pKernelChannel, pRsClient), + cleanup); + } + +cleanup: + if (bLockAcquired) + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + + // These fields are only needed internally; clear them here + pChannelGpfifoParams->hPhysChannelGroup = 0; + pChannelGpfifoParams->internalFlags = 0; + portMemSet(&pChannelGpfifoParams->errorNotifierMem, 0, + sizeof pChannelGpfifoParams->errorNotifierMem); + portMemSet(&pChannelGpfifoParams->eccErrorNotifierMem, 0, + sizeof pChannelGpfifoParams->eccErrorNotifierMem); + pChannelGpfifoParams->ProcessID = 0; + pChannelGpfifoParams->SubProcessID = 0; + + // Free the allocated resources if there was an error + if (status != NV_OK) + { + if (bNotifyActionsSetup) + { + _kchannelCleanupNotifyActions(pKernelChannel); + } + + if (bAddedToGroup) + { + kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel); + } + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + // Free memdescs created during construct on GSP path. + memdescFree(pKernelChannel->pErrContextMemDesc); + memdescDestroy(pKernelChannel->pErrContextMemDesc); + memdescFree(pKernelChannel->pEccErrContextMemDesc); + memdescDestroy(pKernelChannel->pEccErrContextMemDesc); + } + pKernelChannel->pErrContextMemDesc = NULL; + pKernelChannel->pEccErrContextMemDesc = NULL; + + if (bRpcAllocated) + { + NV_RM_RPC_FREE_ON_ERROR(pGpu, hClient, hParent, RES_GET_HANDLE(pKernelChannel)); + } + + _kchannelFreeHalData(pGpu, pKernelChannel); + + if (pChannelBufPool != NULL) + { + ctxBufPoolRelease(pChannelBufPool); + } + + if (bTsgAllocated) + { + pRmApi->Free(pRmApi, hClient, hChanGrp); + } + + if (bChidAllocated) + { + kchannelFreeHwID_HAL(pGpu, pKernelChannel); + } + } + + return status; +} + +void +kchannelDestruct_IMPL +( + KernelChannel *pKernelChannel +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + NvHandle hClient; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + NV_STATUS status = NV_OK; + KernelChannelGroup *pKernelChannelGroup = NULL; + + resGetFreeParams(staticCast(pKernelChannel, RsResource), &pCallContext, &pParams); + hClient = pCallContext->pClient->hClient; + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + // Free memdescs created during construct on GSP path. + memdescFree(pKernelChannel->pErrContextMemDesc); + memdescDestroy(pKernelChannel->pErrContextMemDesc); + memdescFree(pKernelChannel->pEccErrContextMemDesc); + memdescDestroy(pKernelChannel->pEccErrContextMemDesc); + } + pKernelChannel->pErrContextMemDesc = NULL; + pKernelChannel->pEccErrContextMemDesc = NULL; + + // GSP and vGPU support + if ((IS_GSP_CLIENT(pGpu) || IS_VIRTUAL(pGpu))) + { + // + // GSP: + // + // Method buffer is allocated by CPU-RM during TSG construct + // but mapped to invisible BAR2 in GSP during channel construct + // During Free, first the BAR2 mapping must be unmapped in GSP + // and then freeing of method buffer should be done on CPU. + // This RPC call is especially required for the internal channel case + // where channelDestruct calls free for its TSG + // + NV_RM_RPC_FREE(pGpu, + hClient, + RES_GET_PARENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel), + status); + } + + { + KernelGraphicsContext *pKernelGraphicsContext; + + // Perform GR ctx cleanup tasks on channel destruction + if (kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext) == NV_OK) + shrkgrctxDetach(pGpu, pKernelGraphicsContext->pShared, pKernelGraphicsContext, pKernelChannel); + } + + _kchannelCleanupNotifyActions(pKernelChannel); + + _kchannelFreeHalData(pGpu, pKernelChannel); + + NV_ASSERT(pKernelChannel->pKernelChannelGroupApi != NULL); + + pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup; + + NV_ASSERT(pKernelChannelGroup != NULL); + + // remove channel from the group + kchangrpRemoveChannel(pGpu, pKernelChannelGroup, pKernelChannel); + + // Free the Ctx Buf pool + if (pKernelChannelGroup->pChannelBufPool != NULL) + { + ctxBufPoolRelease(pKernelChannelGroup->pChannelBufPool); + } + + // Free the channel group, if we alloced it + if (pKernelChannelGroup->bAllocatedByRm) + { + pRmApi->Free(pRmApi, hClient, + RES_GET_HANDLE(pKernelChannel->pKernelChannelGroupApi)); + pKernelChannelGroup = NULL; + pKernelChannel->pKernelChannelGroupApi = NULL; + } + + kchannelFreeHwID_HAL(pGpu, pKernelChannel); + + NV_ASSERT(pKernelChannel->refCount == 1); +} + +NV_STATUS +kchannelMap_IMPL +( + KernelChannel *pKernelChannel, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu; + NV_STATUS rmStatus; + RsClient *pRsClient = pCallContext->pClient; + RmClient *pRmClient = dynamicCast(pRsClient, RmClient); + GpuResource *pGpuResource; + + NV_ASSERT_OR_RETURN(!pKernelChannel->bClientAllocatedUserD, NV_ERR_INVALID_REQUEST); + + rmStatus = gpuresGetByDeviceOrSubdeviceHandle(pRsClient, + pCpuMapping->pContextRef->hResource, + &pGpuResource); + if (rmStatus != NV_OK) + return rmStatus; + + pGpu = GPU_RES_GET_GPU(pGpuResource); + GPU_RES_SET_THREAD_BC_STATE(pGpuResource); + + // If the flags are fifo default then offset/length passed in + if (DRF_VAL(OS33, _FLAGS, _FIFO_MAPPING, pCpuMapping->flags) == NVOS33_FLAGS_FIFO_MAPPING_DEFAULT) + { + // Validate the offset and limit passed in. + if (pCpuMapping->offset >= pKernelChannel->userdLength) + return NV_ERR_INVALID_BASE; + if (pCpuMapping->length == 0) + return NV_ERR_INVALID_LIMIT; + if (pCpuMapping->offset + pCpuMapping->length > pKernelChannel->userdLength) + return NV_ERR_INVALID_LIMIT; + } + else + { + pCpuMapping->offset = 0x0; + pCpuMapping->length = pKernelChannel->userdLength; + } + + rmStatus = kchannelMapUserD(pGpu, pKernelChannel, + rmclientGetCachedPrivilege(pRmClient), + pCpuMapping->offset, + pCpuMapping->pPrivate->protect, + &pCpuMapping->pLinearAddress, + &(pCpuMapping->pPrivate->pPriv)); + + if (rmStatus != NV_OK) + return rmStatus; + + // Save off the mapping + _kchannelUpdateFifoMapping(pKernelChannel, + pGpu, + (pRsClient->type == CLIENT_TYPE_KERNEL), + pCpuMapping->pLinearAddress, + pCpuMapping->pPrivate->pPriv, + pCpuMapping->length, + pCpuMapping->flags, + pCpuMapping->pContextRef->hResource, + pCpuMapping); + + return NV_OK; +} + +NV_STATUS +kchannelUnmap_IMPL +( + KernelChannel *pKernelChannel, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu; + RsClient *pRsClient = pCallContext->pClient; + RmClient *pRmClient = dynamicCast(pRsClient, RmClient); + + if (pKernelChannel->bClientAllocatedUserD) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_REQUEST; + } + + pGpu = pCpuMapping->pPrivate->pGpu; + + kchannelUnmapUserD(pGpu, + pKernelChannel, + rmclientGetCachedPrivilege(pRmClient), + &pCpuMapping->pLinearAddress, + &pCpuMapping->pPrivate->pPriv); + + return NV_OK; +} + +NV_STATUS +kchannelGetMapAddrSpace_IMPL +( + KernelChannel *pKernelChannel, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 userdAperture; + NvU32 userdAttribute; + + NV_ASSERT_OK_OR_RETURN(kfifoGetUserdLocation_HAL(pKernelFifo, + &userdAperture, + &userdAttribute)); + if (pAddrSpace) + *pAddrSpace = userdAperture; + + return NV_OK; +} + +NV_STATUS +kchannelGetMemInterMapParams_IMPL +( + KernelChannel *pKernelChannel, + RMRES_MEM_INTER_MAP_PARAMS *pParams +) +{ + OBJGPU *pGpu = pParams->pGpu; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + MEMORY_DESCRIPTOR *pSrcMemDesc = NULL; + NV_STATUS status; + + if (pParams->bSubdeviceHandleProvided) + { + NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings of USERD not supported.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if (!kfifoIsUserdMapDmaSupported(pKernelFifo)) + return NV_ERR_INVALID_OBJECT_HANDLE; + + status = _kchannelGetUserMemDesc(pGpu, pKernelChannel, &pSrcMemDesc); + if (status != NV_OK) + return status; + + pParams->pSrcMemDesc = pSrcMemDesc; + pParams->pSrcGpu = pSrcMemDesc->pGpu; + + return NV_OK; +} + +NV_STATUS +kchannelCheckMemInterUnmap_IMPL +( + KernelChannel *pKernelChannel, + NvBool bSubdeviceHandleProvided +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + if (bSubdeviceHandleProvided) + { + NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings of channels not supported.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + + if (!kfifoIsUserdMapDmaSupported(pKernelFifo)) + return NV_ERR_INVALID_OBJECT_HANDLE; + + return NV_OK; +} + +/** + * @brief Creates an iterator to iterate all channels in a given scope. + * + * Iterates over all channels under a given scope. For a device it will loop + * through all channels that are descendants of the device (including children + * of channel groups). For a channel group it will only iterate over the + * channels within that group. Ordering is ensured for channel group. + * All channels within a channel group will be iterated together before moving to + * another channel group or channel. + * + * @param[in] pClient + * @param[in] pScopeRef The resource that defines the scope of iteration + */ +RS_ORDERED_ITERATOR +kchannelGetIter +( + RsClient *pClient, + RsResourceRef *pScopeRef +) +{ + return clientRefOrderedIter(pClient, pScopeRef, classId(KernelChannel), NV_TRUE); +} + +/** + * @brief Given a client, parent, and KernelChannel handle retrieves the + * KernelChannel object + * + * @param[in] hClient + * @param[in] hParent Device or Channel Group parent + * @param[in] hKernelChannel + * @param[out] ppKernelChannel Valid iff NV_OK is returned. + * + * @return NV_OK if successful, appropriate error otherwise + */ +NV_STATUS +CliGetKernelChannelWithDevice +( + NvHandle hClient, + NvHandle hParent, + NvHandle hKernelChannel, + KernelChannel **ppKernelChannel +) +{ + RsClient *pRsClient; + RsResourceRef *pParentRef; + RsResourceRef *pResourceRef; + KernelChannel *pKernelChannel; + + if (ppKernelChannel == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *ppKernelChannel = NULL; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, hKernelChannel, &pResourceRef)); + + pKernelChannel = dynamicCast(pResourceRef->pResource, KernelChannel); + NV_CHECK_OR_RETURN(LEVEL_INFO, pKernelChannel != NULL, NV_ERR_OBJECT_NOT_FOUND); + + pParentRef = pResourceRef->pParentRef; + NV_CHECK_OR_RETURN(LEVEL_INFO, pParentRef != NULL, NV_ERR_OBJECT_NOT_FOUND); + + // + // Check that the parent matches requested handle. Parent handle can be a + // device or a ChannelGroup. The first case can match either, the second + // matches a Device when the parent is a ChannelGroup. + // + NV_CHECK_OR_RETURN(LEVEL_INFO, (pParentRef->hResource == hParent) || + (RES_GET_HANDLE(GPU_RES_GET_DEVICE(pKernelChannel)) == hParent), + NV_ERR_OBJECT_NOT_FOUND); + + *ppKernelChannel = pKernelChannel; + return NV_OK; +} // end of CliGetKernelChannelWithDevice() + + +/** + * @brief Given a classNum this routine returns various sdk specific values for + * that class. + * + * @param[in] classNum + * @param[out] pClassInfo + */ +void +CliGetChannelClassInfo +( + NvU32 classNum, + CLI_CHANNEL_CLASS_INFO *pClassInfo +) +{ + switch (classNum) + { + case GF100_CHANNEL_GPFIFO: + { + pClassInfo->notifiersMaxCount = NV906F_NOTIFIERS_MAXCOUNT; + pClassInfo->eventActionDisable = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + pClassInfo->eventActionSingle = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + pClassInfo->eventActionRepeat = NV906F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + pClassInfo->rcNotifierIndex = NV906F_NOTIFIERS_RC; + pClassInfo->classType = CHANNEL_CLASS_TYPE_GPFIFO; + break; + } + case KEPLER_CHANNEL_GPFIFO_A: + { + pClassInfo->notifiersMaxCount = NVA06F_NOTIFIERS_MAXCOUNT; + pClassInfo->eventActionDisable = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + pClassInfo->eventActionSingle = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + pClassInfo->eventActionRepeat = NVA06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + pClassInfo->rcNotifierIndex = NVA06F_NOTIFIERS_RC; + pClassInfo->classType = CHANNEL_CLASS_TYPE_GPFIFO; + break; + } + case KEPLER_CHANNEL_GPFIFO_B: + { + pClassInfo->notifiersMaxCount = NVA16F_NOTIFIERS_MAXCOUNT; + pClassInfo->eventActionDisable = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + pClassInfo->eventActionSingle = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + pClassInfo->eventActionRepeat = NVA16F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + pClassInfo->rcNotifierIndex = NVA16F_NOTIFIERS_RC; + pClassInfo->classType = CHANNEL_CLASS_TYPE_GPFIFO; + break; + } + case MAXWELL_CHANNEL_GPFIFO_A: + { + pClassInfo->notifiersMaxCount = NVB06F_NOTIFIERS_MAXCOUNT; + pClassInfo->eventActionDisable = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + pClassInfo->eventActionSingle = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + pClassInfo->eventActionRepeat = NVB06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + pClassInfo->rcNotifierIndex = NVB06F_NOTIFIERS_RC; + pClassInfo->classType = CHANNEL_CLASS_TYPE_GPFIFO; + break; + } + case PASCAL_CHANNEL_GPFIFO_A: + { + pClassInfo->notifiersMaxCount = NVC06F_NOTIFIERS_MAXCOUNT; + pClassInfo->eventActionDisable = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + pClassInfo->eventActionSingle = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + pClassInfo->eventActionRepeat = NVC06F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + pClassInfo->rcNotifierIndex = NVC06F_NOTIFIERS_RC; + pClassInfo->classType = CHANNEL_CLASS_TYPE_GPFIFO; + break; + } + case VOLTA_CHANNEL_GPFIFO_A: + { + pClassInfo->notifiersMaxCount = NVC36F_NOTIFIERS_MAXCOUNT; + pClassInfo->eventActionDisable = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + pClassInfo->eventActionSingle = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + pClassInfo->eventActionRepeat = NVC36F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + pClassInfo->rcNotifierIndex = NVC36F_NOTIFIERS_RC; + pClassInfo->classType = CHANNEL_CLASS_TYPE_GPFIFO; + break; + } + case TURING_CHANNEL_GPFIFO_A: + { + pClassInfo->notifiersMaxCount = NVC46F_NOTIFIERS_MAXCOUNT; + pClassInfo->eventActionDisable = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + pClassInfo->eventActionSingle = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + pClassInfo->eventActionRepeat = NVC46F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + pClassInfo->rcNotifierIndex = NVC46F_NOTIFIERS_RC; + pClassInfo->classType = CHANNEL_CLASS_TYPE_GPFIFO; + break; + } + case AMPERE_CHANNEL_GPFIFO_A: + { + pClassInfo->notifiersMaxCount = NVC56F_NOTIFIERS_MAXCOUNT; + pClassInfo->eventActionDisable = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + pClassInfo->eventActionSingle = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + pClassInfo->eventActionRepeat = NVC56F_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + pClassInfo->rcNotifierIndex = NVC56F_NOTIFIERS_RC; + pClassInfo->classType = CHANNEL_CLASS_TYPE_GPFIFO; + break; + } + + // + // Does not make sense. Call with the class type from the client not the + // internal type + // + case PHYSICAL_CHANNEL_GPFIFO: + NV_PRINTF(LEVEL_ERROR, + "Invalid class for CliGetChannelClassInfo\n"); + + default: + { + pClassInfo->notifiersMaxCount = 0; + pClassInfo->eventActionDisable = 0; + pClassInfo->eventActionSingle = 0; + pClassInfo->eventActionRepeat = 0; + pClassInfo->rcNotifierIndex = 0; + pClassInfo->classType = CHANNEL_CLASS_TYPE_DMA; + break; + } + } +} + + +/** + * @brief Returns the next KernelChannel from the iterator. + * + * Iterates over runlist IDs and ChIDs and returns the next KernelChannel found + * on the heap, if any. + * + * (error guaranteed if pointer is NULL; non-NULL pointer guaranteed if NV_OK) + * + * @param[in] pGpu + * @param[in] pIt the channel iterator + * @param[out] ppKernelChannel returns a KernelChannel * + * + * @return NV_OK if the returned pointer is valid or error + */ +NV_STATUS kchannelGetNextKernelChannel +( + OBJGPU *pGpu, + CHANNEL_ITERATOR *pIt, + KernelChannel **ppKernelChannel +) +{ + KernelChannel *pKernelChannel; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + if (ppKernelChannel == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *ppKernelChannel = NULL; + + while (pIt->runlistId < pIt->numRunlists) + { + CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, pIt->runlistId); + + if (pChidMgr == NULL) + { + pIt->runlistId++; + continue; + } + + pIt->numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + while (pIt->physicalChannelID < pIt->numChannels) + { + pKernelChannel = kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo, + pChidMgr, pIt->physicalChannelID); + pIt->physicalChannelID++; + + // + // This iterator can be used during an interrupt, when a KernelChannel may + // be in the process of being destroyed. Don't return it if so. + // + if (pKernelChannel == NULL) + continue; + if (!kchannelIsValid_HAL(pKernelChannel)) + continue; + + *ppKernelChannel = pKernelChannel; + return NV_OK; + } + + pIt->runlistId++; + // Reset channel index to 0 for next runlist + pIt->physicalChannelID = 0; + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +/** + * @brief Finds the corresponding KernelChannel given client and channel handle + * + * Looks in client object store for the channel handle. Scales with total + * number of registered objects in the client, not just the number of channels. + * + * @param[in] hClient + * @param[in] hKernelChannel a KernelChannel Channel handle + * @param[out] ppKernelChannel + * + * @return NV_STATUS + */ +NV_STATUS +CliGetKernelChannel +( + NvHandle hClient, + NvHandle hKernelChannel, + KernelChannel **ppKernelChannel +) +{ + NV_STATUS status; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + + *ppKernelChannel = NULL; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + + status = clientGetResourceRef(pRsClient, hKernelChannel, &pResourceRef); + if (status != NV_OK) + { + return status; + } + + *ppKernelChannel = dynamicCast(pResourceRef->pResource, KernelChannel); + NV_CHECK_OR_RETURN(LEVEL_INFO, + *ppKernelChannel != NULL, + NV_ERR_INVALID_CHANNEL); + return NV_OK; +} + +/*! + * @brief Notify client that channel is stopped. + * + * @param[in] pKernelChannnel + */ +NV_STATUS +kchannelNotifyRc_IMPL +( + KernelChannel *pKernelChannel +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + NvU32 engineID = NV2080_ENGINE_TYPE_NULL; + NV_STATUS rmStatus = NV_OK; + + if (IS_GFID_VF(kchannelGetGfid(pKernelChannel))) + { + NV_PRINTF(LEVEL_INFO, "Notification for channel 0x%x stop is already performed on guest-RM\n", + kchannelGetDebugTag(pKernelChannel)); + return NV_OK; + } + + if (pKernelChannel->hErrorContext == NV01_NULL_OBJECT && + pKernelChannel->hEccErrorContext == NV01_NULL_OBJECT) + { + NV_PRINTF(LEVEL_WARNING, "Channel 0x%x has no notifier set\n", + kchannelGetDebugTag(pKernelChannel)); + return NV_OK; + } + + if (NV2080_ENGINE_TYPE_IS_VALID(kchannelGetEngineType(pKernelChannel))) + { + engineID = kchannelGetEngineType(pKernelChannel); + } + rmStatus = krcErrorSetNotifier(pGpu, GPU_GET_KERNEL_RC(pGpu), + pKernelChannel, + ROBUST_CHANNEL_PREEMPTIVE_REMOVAL, + engineID, + RC_NOTIFIER_SCOPE_CHANNEL); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to set error notifier for channel 0x%x with error 0x%x.\n", + kchannelGetDebugTag(pKernelChannel), rmStatus); + } + return rmStatus; +} + +/** + * @brief Writes notifier specified by index + * + * @param[in] pKernelChannel + * @param[in] notifyIndex + * @param[in] pNotifyParams + * @parms[in] notifyParamsSize + */ +void kchannelNotifyGeneric_IMPL +( + KernelChannel *pKernelChannel, + NvU32 notifyIndex, + void *pNotifyParams, + NvU32 notifyParamsSize +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + ContextDma *pContextDma; + EVENTNOTIFICATION *pEventNotification; + CLI_CHANNEL_CLASS_INFO classInfo; + + CliGetChannelClassInfo(RES_GET_EXT_CLASS_ID(pKernelChannel), &classInfo); + + // validate notifyIndex + NV_CHECK_OR_RETURN_VOID(LEVEL_INFO, notifyIndex < classInfo.notifiersMaxCount); + + // handle notification if client wants it + if (pKernelChannel->pNotifyActions[notifyIndex] != classInfo.eventActionDisable) + { + // get notifier context dma for the channel + if (ctxdmaGetByHandle(RES_GET_CLIENT(pKernelChannel), + pKernelChannel->hErrorContext, + &pContextDma) == NV_OK) + { + // make sure it's big enough + if (pContextDma->Limit >= + ((classInfo.notifiersMaxCount * sizeof (NvNotification)) - 1)) + { + // finally, write out the notifier + notifyFillNotifierArray(pGpu, pContextDma, + 0x0, 0x0, 0x0, + notifyIndex); + } + } + } + + // handle event if client wants it + pEventNotification = inotifyGetNotificationList(staticCast(pKernelChannel, INotifier)); + if (pEventNotification != NULL) + { + // ping any events on the list of type notifyIndex + osEventNotification(pGpu, pEventNotification, notifyIndex, pNotifyParams, notifyParamsSize); + } + + // reset if single shot notify action + if (pKernelChannel->pNotifyActions[notifyIndex] == classInfo.eventActionSingle) + pKernelChannel->pNotifyActions[notifyIndex] = classInfo.eventActionDisable; + + return; +} + +/*! + * @brief Stop channel and notify client + * + * @param[in] pKernelChannnel + * @param[in] pStopChannelParams + */ +NV_STATUS +kchannelCtrlCmdStopChannel_IMPL +( + KernelChannel *pKernelChannel, + NVA06F_CTRL_STOP_CHANNEL_PARAMS *pStopChannelParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + RES_GET_HANDLE(pKernelChannel), + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + rmStatus); + if (rmStatus != NV_OK) + return rmStatus; + } + else + { + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + kchannelFwdToInternalCtrl_HAL(pGpu, + pKernelChannel, + NVA06F_CTRL_CMD_INTERNAL_STOP_CHANNEL, + pRmCtrlParams)); + } + + NV_ASSERT_OK_OR_RETURN(kchannelNotifyRc_HAL(pKernelChannel)); + + return NV_OK; +} + +/*! + * @brief Helper to get type and memdesc of a channel notifier (memory/ctxdma) + */ +NV_STATUS +kchannelGetNotifierInfo +( + OBJGPU *pGpu, + RsClient *pRsClient, + NvHandle hErrorContext, + MEMORY_DESCRIPTOR **ppMemDesc, + ErrorNotifierType *pNotifierType, + NvU64 *pOffset +) +{ + NvHandle hDevice; + Device *pDevice = NULL; + ContextDma *pContextDma = NULL; + Memory *pMemory = NULL; + + NV_ASSERT_OR_RETURN(ppMemDesc != NULL, NV_ERR_INVALID_PARAMETER); + NV_ASSERT_OR_RETURN(pNotifierType != NULL, NV_ERR_INVALID_PARAMETER); + + *ppMemDesc = NULL; + *pNotifierType = ERROR_NOTIFIER_TYPE_UNKNOWN; + *pOffset = 0; + + if (hErrorContext == NV01_NULL_OBJECT) + { + *pNotifierType = ERROR_NOTIFIER_TYPE_NONE; + return NV_OK; + } + + NV_ASSERT_OK_OR_RETURN(deviceGetByInstance(pRsClient, + gpuGetDeviceInstance(pGpu), + &pDevice)); + hDevice = RES_GET_HANDLE(pDevice); + + if (memGetByHandleAndDevice(pRsClient, hErrorContext, hDevice, &pMemory) == + NV_OK) + { + if (memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_VIRTUAL) + { + // + // GPUVA case: Get the underlying DMA mapping in this case. In GSP + // client mode + SLI, GSP won't be able to write to notifiers on + // other GPUs. + // + NvU64 offset; + NvU32 subdeviceInstance; + NvU64 notifyGpuVA = memdescGetPhysAddr(pMemory->pMemDesc, + AT_GPU_VA, 0); + CLI_DMA_MAPPING_INFO *pDmaMappingInfo; + NvBool bFound; + + bFound = CliGetDmaMappingInfo( + pRsClient->hClient, + RES_GET_HANDLE(pDevice), + RES_GET_HANDLE(pMemory), + notifyGpuVA, + gpumgrGetDeviceGpuMask(pGpu->deviceInstance), + &pDmaMappingInfo); + + if (!bFound) + { + NV_PRINTF(LEVEL_ERROR, + "Cannot find DMA mapping for GPU_VA notifier\n"); + return NV_ERR_INVALID_STATE; + } + + offset = notifyGpuVA - pDmaMappingInfo->DmaOffset; + if (offset + sizeof(NOTIFICATION) > pDmaMappingInfo->pMemDesc->Size) + { + NV_PRINTF(LEVEL_ERROR, + "Notifier does not fit within DMA mapping for GPU_VA\n"); + return NV_ERR_INVALID_STATE; + } + + subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu( + gpumgrGetParentGPU(pGpu)); + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE) + if (IsSLIEnabled(pGpu) && IS_GSP_CLIENT(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "GSP does not support SLI\n"); + return NV_ERR_NOT_SUPPORTED; + } + SLI_LOOP_END + + if (!pDmaMappingInfo->KernelVAddr[subdeviceInstance]) + { + NV_PRINTF(LEVEL_ERROR, + "Kernel VA addr mapping not present for notifier\n"); + return NV_ERR_INVALID_STATE; + } + *ppMemDesc = pDmaMappingInfo->pMemDesc; + // The notifier format here is struct NOTIFICATION, same as ctxdma + *pNotifierType = ERROR_NOTIFIER_TYPE_CTXDMA; + *pOffset = offset; + } + else + { + *ppMemDesc = pMemory->pMemDesc; + *pNotifierType = ERROR_NOTIFIER_TYPE_MEMORY; + } + return NV_OK; + } + + if (ctxdmaGetByHandle(pRsClient, hErrorContext, &pContextDma) == NV_OK) + { + *ppMemDesc = pContextDma->pMemDesc; + *pNotifierType = ERROR_NOTIFIER_TYPE_CTXDMA; + return NV_OK; + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +/*! + * @brief Check if the client that owns this channel is kernel. + * + * This replaces using call context for privilege checking, + * and is callable from both CPU and GSP. + * + * @param[in] pGpu + * @param[in] pKernelChannel + * + * @returns NV_TRUE if owned by kernel or NV_FALSE. + */ +NvBool +kchannelCheckIsKernel_IMPL +( + KernelChannel *pKernelChannel +) +{ + return pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL; +} + +/*! + * @brief Check if the client that owns this channel is admin. + * + * This replaces using call context for admin privilege checking, + * but is callable from both CPU and GSP. + * + * @param[in] pGpu + * @param[in] pKernelChannel + * + * @returns NV_TRUE if owned by admin or NV_FALSE. + */ +NvBool +kchannelCheckIsAdmin_IMPL +( + KernelChannel *pKernelChannel +) +{ + return (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL) || + (pKernelChannel->privilegeLevel == NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN); +} + + +/*! + * @brief Check if the channel is bound to its resources. + * + * This is to make sure channel went through the UVM registration step before it can be scheduled. + * This applies only to UVM owned channels. + * + * @param[in] pKernelChannel + * @param[in] pGVAS + * + * @returns NV_TRUE if bound. + */ +NvBool +kchannelIsSchedulable_IMPL +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + OBJGVASPACE *pGVAS = NULL; + NvU32 engineId = 0; + NvU32 gfId; + + gfId = kchannelGetGfid(pKernelChannel); + if (IS_GFID_VF(gfId)) + { + NV_PRINTF(LEVEL_INFO, "Check for channel schedulability for channel 0x%x is already performed on guest-RM\n", + kchannelGetDebugTag(pKernelChannel)); + return NV_TRUE; + } + + pGVAS = dynamicCast(pKernelChannel->pVAS, OBJGVASPACE); + + // + // It should be an error to have allocated and attempt to schedule a + // channel without having allocated a GVAS. We ignore this check on + // AMODEL, which has its own dummy AVAS. + // + NV_ASSERT_OR_RETURN(pGVAS != NULL || IS_MODS_AMODEL(pGpu), NV_FALSE); + + NV_ASSERT_OR_RETURN(kchannelGetEngine_HAL(pGpu, pKernelChannel, &engineId) == NV_OK, NV_FALSE); + + if (pGVAS != NULL && gvaspaceIsExternallyOwned(pGVAS) && IS_GR(engineId) && !pKernelChannel->bIsContextBound) + { + NV_PRINTF(LEVEL_ERROR, + "Cannot schedule externally-owned channel with unbound allocations :0x%x!\n", + kchannelGetDebugTag(pKernelChannel)); + return NV_FALSE; + } + return NV_TRUE; +} + +// Alloc pFifoHalData +static NV_STATUS +_kchannelAllocHalData +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + portMemSet(pKernelChannel->pFifoHalData, 0, sizeof(pKernelChannel->pFifoHalData)); + + // Alloc 1 page of instmem per GPU instance + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = portMemAllocNonPaged(sizeof(FIFO_INSTANCE_BLOCK)); + + NV_ASSERT_OR_ELSE(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] != NULL, + SLI_LOOP_GOTO(failed)); + + portMemSet(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)], 0, sizeof(FIFO_INSTANCE_BLOCK)); + + SLI_LOOP_END + + return NV_OK; + +failed: + DBG_BREAKPOINT(); + _kchannelFreeHalData(pGpu, pKernelChannel); + return NV_ERR_NO_MEMORY; +} + +// Free memdescs and pFifoHalData, if any +static void +_kchannelFreeHalData +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + // Unmap / delete memdescs + kchannelDestroyMem_HAL(pGpu, pKernelChannel); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + kchannelDestroyUserdMemDesc(pGpu, pKernelChannel); + + // Free pFifoHalData + portMemFree(pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]); + pKernelChannel->pFifoHalData[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = NULL; + SLI_LOOP_END +} + +// Returns the proper VerifFlags for kchannelAllocMem +static NvU32 +_kchannelgetVerifFlags +( + OBJGPU *pGpu, + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGpfifoParams +) +{ + NvU32 verifFlags = 0; + + return verifFlags; +} + +// Allocate and describe instance memory +static NV_STATUS +_kchannelAllocOrDescribeInstMem +( + KernelChannel *pKernelChannel, + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGpfifoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + KernelChannelGroupApi *pKernelChannelGroupApi = pKernelChannel->pKernelChannelGroupApi; + KernelChannelGroup *pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + NvU32 gfid = pKernelChannelGroup->gfid; + NV_STATUS status; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannel); + + // Alloc pFifoHalData + NV_ASSERT_OK_OR_RETURN(_kchannelAllocHalData(pGpu, pKernelChannel)); + + // + // GSP RM and host RM on full SRIOV setup will not be aware of the client allocated userd handles, + // translate the handle on client GSP. GSP RM or host RM on full SRIOV setup will get the translated + // addresses which it will later memdescribe. + // + // However it is still client allocated userd from GSP RM or host RM on full SRIOV setup + // perspective so set the flag accordingly. + // + if (!RMCFG_FEATURE_PLATFORM_GSP && + !(IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + pKernelChannel->bClientAllocatedUserD = NV_FALSE; + NV_ASSERT_OK_OR_GOTO(status, + kchannelCreateUserdMemDescBc_HAL(pGpu, pKernelChannel, hClient, + pChannelGpfifoParams->hUserdMemory, + pChannelGpfifoParams->userdOffset), + failed); + } + else + { + pKernelChannel->bClientAllocatedUserD = NV_TRUE; + } + + // Alloc/describe instmem memdescs depending on platform + if (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + // On Heavy SRIOV, describe memdescs using RPC + NV_ASSERT_OK_OR_GOTO(status, + _kchannelDescribeMemDescsHeavySriov(pGpu, pKernelChannel), + failed); + } + else if (RMCFG_FEATURE_PLATFORM_GSP || + (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + // On GSPFW or non-heavy SRIOV, describe memdescs from params + NV_ASSERT_OK_OR_GOTO(status, + _kchannelDescribeMemDescsFromParams(pGpu, pKernelChannel, pChannelGpfifoParams), + failed); + } + else if (!IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + // On baremetal, GSP client, or SRIOV host, alloc mem + NV_ASSERT_OK_OR_GOTO(status, + kchannelAllocMem_HAL(pGpu, + pKernelChannel, + pChannelGpfifoParams->flags, + _kchannelgetVerifFlags(pGpu, pChannelGpfifoParams)), + failed); + } + + // Setup USERD + if (IS_VIRTUAL(pGpu)) + { + PMEMORY_DESCRIPTOR pUserdSubDeviceMemDesc = + pKernelChannel->pUserdSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + NvBool bFullSriov = IS_VIRTUAL_WITH_SRIOV(pGpu) && + !gpuIsWarBug200577889SriovHeavyEnabled(pGpu); + + if (pUserdSubDeviceMemDesc && + (memdescGetAddressSpace(pUserdSubDeviceMemDesc) == ADDR_SYSMEM)) + { + NvP64 pUserDMem = NvP64_NULL; + NvP64 pPriv = NvP64_NULL; + + NV_ASSERT_OK_OR_GOTO(status, + memdescMap(pUserdSubDeviceMemDesc, 0, + memdescGetSize(pUserdSubDeviceMemDesc), NV_TRUE, + NV_PROTECT_READ_WRITE, &pUserDMem, &pPriv), + failed); + + kfifoSetupUserD_HAL(pKernelFifo, KERNEL_POINTER_FROM_NvP64(NvU8 *, pUserDMem)); + + memdescUnmap(pUserdSubDeviceMemDesc, + NV_TRUE, + osGetCurrentProcess(), + pUserDMem, + pPriv); + } // Clear Userd if it is in FB for SRIOV environment without BUG 200577889 + else if (pUserdSubDeviceMemDesc && (memdescGetAddressSpace(pUserdSubDeviceMemDesc) == ADDR_FBMEM) + && bFullSriov) + { + NvU8 *pUserDMem; + + pUserDMem = kbusMapRmAperture_HAL(pGpu, + pKernelChannel->pUserdSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]); + + NV_ASSERT_OR_ELSE(pUserDMem != NULL, status = NV_ERR_INSUFFICIENT_RESOURCES; goto failed); + + kfifoSetupUserD_HAL(pKernelFifo, pUserDMem); + + kbusUnmapRmAperture_HAL(pGpu, pUserdSubDeviceMemDesc, &pUserDMem, + NV_TRUE); + } + } + return NV_OK; + +failed: + _kchannelFreeHalData(pGpu, pKernelChannel); + return status; +} + +/** + * @brief Create and describe channel instance memory ramfc and userd memdescs + * Done using info in pChanGpfifoParams + * + * @param pGpu : OBJGPU pointer + * @param pKernelChannel : KernelChannel pointer + * @param pChanGpfifoParams : Pointer to channel allocation params + */ +static NV_STATUS +_kchannelDescribeMemDescsFromParams +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGpfifoParams +) +{ + NV_STATUS status = NV_OK; + FIFO_INSTANCE_BLOCK *pInstanceBlock = NULL; + NvU32 subDevInst; + NvU32 gfid = GPU_GFID_PF; + NvU32 runqueue; + KernelChannelGroupApi *pKernelChannelGroupApi = + pKernelChannel->pKernelChannelGroupApi; + + NV_ASSERT_OR_RETURN((pKernelChannelGroupApi != NULL), NV_ERR_INVALID_STATE); + gfid = pKernelChannelGroupApi->pKernelChannelGroup->gfid; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_PLATFORM_GSP || + (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)), + NV_ERR_INVALID_STATE); + + NV_ASSERT_OR_RETURN((pChannelGpfifoParams != NULL), NV_ERR_INVALID_ARGUMENT); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + pInstanceBlock = (FIFO_INSTANCE_BLOCK*) pKernelChannel->pFifoHalData[subDevInst]; + + // Create memory descriptor for the instance memory + status = memdescCreate(&pInstanceBlock->pInstanceBlockDesc, pGpu, + pChannelGpfifoParams->instanceMem.size, 1 , NV_TRUE, + ADDR_UNKNOWN, pChannelGpfifoParams->instanceMem.cacheAttrib, + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to allocate instance memory descriptor!\n"); + SLI_LOOP_RETURN(status); + } + + memdescDescribe(pInstanceBlock->pInstanceBlockDesc, pChannelGpfifoParams->instanceMem.addressSpace, + pChannelGpfifoParams->instanceMem.base, pChannelGpfifoParams->instanceMem.size); + + + // Create memory descriptor for the ramfc + status = memdescCreate(&pInstanceBlock->pRamfcDesc, pGpu, + pChannelGpfifoParams->ramfcMem.size, 1 , NV_TRUE, + ADDR_UNKNOWN, pChannelGpfifoParams->ramfcMem.cacheAttrib, + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to allocate instance memory descriptor!\n"); + SLI_LOOP_RETURN(status); + } + + memdescDescribe(pInstanceBlock->pRamfcDesc, pChannelGpfifoParams->ramfcMem.addressSpace, + pChannelGpfifoParams->ramfcMem.base, pChannelGpfifoParams->ramfcMem.size); + + // Create userd memory descriptor + status = memdescCreate(&pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], pGpu, + pChannelGpfifoParams->userdMem.size, 1 , NV_TRUE, + ADDR_UNKNOWN, pChannelGpfifoParams->userdMem.cacheAttrib, + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to allocate instance memory descriptor!\n"); + SLI_LOOP_RETURN(status); + } + + memdescDescribe(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], + pChannelGpfifoParams->userdMem.addressSpace, + pChannelGpfifoParams->userdMem.base, pChannelGpfifoParams->userdMem.size); + + if (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + /* + * For full SRIOV, guest RM allocates and sends istance, ramfc and userd memory. + * Set MEMDESC_FLAGS_GUEST_ALLOCATED flag in memory descriptor + */ + memdescSetFlag(pInstanceBlock->pInstanceBlockDesc, MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE); + memdescSetFlag(pInstanceBlock->pRamfcDesc, MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE); + memdescSetFlag(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], MEMDESC_FLAGS_GUEST_ALLOCATED, NV_TRUE); + } + + // Create method buffer memory descriptor + runqueue = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, pChannelGpfifoParams->flags); + if (IS_GFID_VF(gfid) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + pKernelChannelGroupApi->pKernelChannelGroup->pMthdBuffers[runqueue] + .bar2Addr = pChannelGpfifoParams->mthdbufMem.base; + } + else if (pKernelChannelGroupApi->pKernelChannelGroup + ->pMthdBuffers[runqueue].pMemDesc == NULL) + { + NV_ASSERT(pChannelGpfifoParams->mthdbufMem.size > 0); + NV_ASSERT(pChannelGpfifoParams->mthdbufMem.base != 0); + status = memdescCreate(&pKernelChannelGroupApi->pKernelChannelGroup + ->pMthdBuffers[runqueue].pMemDesc, + pGpu, + pChannelGpfifoParams->mthdbufMem.size, + 1, + NV_TRUE, + ADDR_UNKNOWN, + pChannelGpfifoParams->mthdbufMem.cacheAttrib, + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to allocate instance memory descriptor!\n"); + SLI_LOOP_RETURN(status); + } + memdescDescribe(pKernelChannelGroupApi->pKernelChannelGroup + ->pMthdBuffers[runqueue].pMemDesc, + pChannelGpfifoParams->mthdbufMem.addressSpace, + pChannelGpfifoParams->mthdbufMem.base, + pChannelGpfifoParams->mthdbufMem.size); + } + + NV_PRINTF(LEVEL_INFO, + "hChannel 0x%x hClient 0x%x, Class ID 0x%x " + "Instance Block @ 0x%llx (%s %x) " + "USERD @ 0x%llx " + "for subdevice %d\n", + RES_GET_HANDLE(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel), + memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0), + memdescGetApertureString(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)), + (NvU32)(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)), + (pKernelChannel->pUserdSubDeviceMemDesc[subDevInst] == NULL) ? 0x0LL : + memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], AT_GPU, 0LL), subDevInst); + + SLI_LOOP_END + + return status; +} + +/** + * @brief Create and describe channel instance memory ramfc and userd memdescs + * Done using RPC for Heavy SRIOV guest + * + * @param pGpu : OBJGPU pointer + * @param pKernelChannel : KernelChannel pointer + */ +static NV_STATUS +_kchannelDescribeMemDescsHeavySriov +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + NV_STATUS status = NV_OK; + FIFO_INSTANCE_BLOCK *pInstanceBlock = NULL; + NvU32 subDevInst; + NvHandle hSubDevice = 0; + NvU32 apert = ADDR_UNKNOWN; + NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS memInfoParams; + + NV_ASSERT_OR_RETURN(IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu), + NV_ERR_INVALID_STATE); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + pInstanceBlock = (FIFO_INSTANCE_BLOCK*) pKernelChannel->pFifoHalData[subDevInst]; + + // + // In SRIOV enabled systems, MMU fault interrupts for guest contexts are received and handled in guests. + // Inorder to correctly find the faulting channel, faulting instance address has be compared with list of allocated channels. + // But since contexts are currently allocated in host during channelConstruct, we need + // context info from host and save it locally for the above channel lookup to pass. This piece of code uses GET_CHANNEL_MEM_INFO + // to fetch the info and update pFifoHalData with the relevant details. + // + + portMemSet(&memInfoParams, 0, sizeof(NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS)); + memInfoParams.hChannel = RES_GET_HANDLE(pKernelChannel); + + + if ((status = CliGetSubDeviceHandleFromGpu(RES_GET_CLIENT_HANDLE(pKernelChannel), + pGpu, &hSubDevice)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to get subdevice handle.\n"); + DBG_BREAKPOINT(); + SLI_LOOP_RETURN(status); + } + + NV_RM_RPC_CONTROL(pGpu, + RES_GET_CLIENT_HANDLE(pKernelChannel), + hSubDevice, + NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO, + &memInfoParams, + sizeof(NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS), + status); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "RM Control call to fetch channel meminfo failed, hKernelChannel 0x%x\n", + RES_GET_HANDLE(pKernelChannel)); + DBG_BREAKPOINT(); + SLI_LOOP_RETURN(status); + } + + // Find the aperture + if (memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_VIDMEM) + { + apert = ADDR_FBMEM; + } + else if ((memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_COH) || + (memInfoParams.chMemInfo.inst.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_NCOH)) + { + apert = ADDR_SYSMEM; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Unknown aperture, hClient 0x%x, hKernelChannel 0x%x\n", + RES_GET_CLIENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel)); + status = NV_ERR_INVALID_ARGUMENT; + DBG_BREAKPOINT(); + SLI_LOOP_RETURN(status); + } + + status = memdescCreate(&pInstanceBlock->pInstanceBlockDesc, pGpu, + memInfoParams.chMemInfo.inst.size, 1 , NV_TRUE, + ADDR_UNKNOWN, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to allocate instance memory descriptor!\n"); + SLI_LOOP_RETURN(status); + } + + memdescDescribe(pInstanceBlock->pInstanceBlockDesc, apert, memInfoParams.chMemInfo.inst.base, memInfoParams.chMemInfo.inst.size); + + NV_PRINTF(LEVEL_INFO, + "hChannel 0x%x hClient 0x%x, Class ID 0x%x " + "Instance Block @ 0x%llx (%s %x) " + "USERD @ 0x%llx " + "for subdevice %d\n", + RES_GET_HANDLE(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel), + memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0), + memdescGetApertureString(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)), + (NvU32)(memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc)), + (pKernelChannel->pUserdSubDeviceMemDesc[subDevInst] == NULL) ? 0x0LL : + memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subDevInst], AT_GPU, 0LL), subDevInst); + + SLI_LOOP_END + + return status; +} + +static NV_STATUS +_kchannelSendChannelAllocRpc +( + KernelChannel *pKernelChannel, + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGpfifoParams, + KernelChannelGroup *pKernelChannelGroup, + NvBool bFullSriov +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pRpcParams; + NV_STATUS status = NV_OK; + + pRpcParams = portMemAllocNonPaged(sizeof(*pRpcParams)); + NV_ASSERT_OR_RETURN(pRpcParams != NULL, NV_ERR_NO_MEMORY); + portMemSet(pRpcParams, 0, sizeof(*pRpcParams)); + + pRpcParams->hObjectError = pChannelGpfifoParams->hObjectError; + pRpcParams->hObjectBuffer = 0; + pRpcParams->gpFifoOffset = pChannelGpfifoParams->gpFifoOffset; + pRpcParams->gpFifoEntries = pChannelGpfifoParams->gpFifoEntries; + pRpcParams->flags = pChannelGpfifoParams->flags; + pRpcParams->hContextShare = pChannelGpfifoParams->hContextShare; + pRpcParams->hVASpace = pChannelGpfifoParams->hVASpace; + pRpcParams->engineType = pChannelGpfifoParams->engineType; + pRpcParams->subDeviceId = pChannelGpfifoParams->subDeviceId; + pRpcParams->hObjectEccError = pChannelGpfifoParams->hObjectEccError; + pRpcParams->hPhysChannelGroup = pChannelGpfifoParams->hPhysChannelGroup; + pRpcParams->internalFlags = pChannelGpfifoParams->internalFlags; + + portMemCopy((void*)pRpcParams->hUserdMemory, + sizeof(NvHandle) * NV2080_MAX_SUBDEVICES, + (const void*)pChannelGpfifoParams->hUserdMemory, + sizeof(NvHandle) * NV2080_MAX_SUBDEVICES); + + portMemCopy((void*)pRpcParams->userdOffset, + sizeof(NvU64) * NV2080_MAX_SUBDEVICES, + (const void*)pChannelGpfifoParams->userdOffset, + sizeof(NvU64) * NV2080_MAX_SUBDEVICES); + + // + // These fields are only filled out for GSP client or full SRIOV + // i.e. the guest independently allocs ChID and instmem + // + if (IS_GSP_CLIENT(pGpu) || bFullSriov) + { + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + FIFO_INSTANCE_BLOCK *pInstanceBlock = pKernelChannel->pFifoHalData[subdevInst]; + NvU32 runqueue = DRF_VAL(OS04, _FLAGS, _GROUP_CHANNEL_RUNQUEUE, pChannelGpfifoParams->flags); + + NV_ASSERT_OR_ELSE(pInstanceBlock != NULL, status = NV_ERR_INVALID_STATE; goto cleanup); + + portMemCopy(&pRpcParams->errorNotifierMem, + sizeof pRpcParams->errorNotifierMem, + &(pChannelGpfifoParams->errorNotifierMem), + sizeof pChannelGpfifoParams->errorNotifierMem); + portMemCopy(&pRpcParams->eccErrorNotifierMem, + sizeof pRpcParams->eccErrorNotifierMem, + &(pChannelGpfifoParams->eccErrorNotifierMem), + sizeof pChannelGpfifoParams->eccErrorNotifierMem); + + // Fill the instance block + if (pInstanceBlock) + { + pRpcParams->instanceMem.base = + memdescGetPhysAddr(pInstanceBlock->pInstanceBlockDesc, AT_GPU, 0); + pRpcParams->instanceMem.size = pInstanceBlock->pInstanceBlockDesc->Size; + pRpcParams->instanceMem.addressSpace = + memdescGetAddressSpace(pInstanceBlock->pInstanceBlockDesc); + pRpcParams->instanceMem.cacheAttrib = + memdescGetCpuCacheAttrib(pInstanceBlock->pInstanceBlockDesc); + + pRpcParams->ramfcMem.base = + memdescGetPhysAddr(pInstanceBlock->pRamfcDesc, AT_GPU, 0); + pRpcParams->ramfcMem.size = pInstanceBlock->pRamfcDesc->Size; + pRpcParams->ramfcMem.addressSpace = + memdescGetAddressSpace(pInstanceBlock->pRamfcDesc); + pRpcParams->ramfcMem.cacheAttrib = + memdescGetCpuCacheAttrib(pInstanceBlock->pRamfcDesc); + } + + // Fill the userd memory descriptor + if (pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]) + { + pRpcParams->userdMem.base = + memdescGetPhysAddr(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst], AT_GPU, 0); + pRpcParams->userdMem.size = pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]->Size; + pRpcParams->userdMem.addressSpace = + memdescGetAddressSpace(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]); + pRpcParams->userdMem.cacheAttrib = + memdescGetCpuCacheAttrib(pKernelChannel->pUserdSubDeviceMemDesc[subdevInst]); + } + + // Fill the method buffer memory descriptor + if (pKernelChannelGroup->pMthdBuffers != NULL && + pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc != NULL) + { + if (bFullSriov) + { + pRpcParams->mthdbufMem.base = + pKernelChannelGroup->pMthdBuffers[runqueue].bar2Addr; + pRpcParams->mthdbufMem.size = + pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc->Size; + pRpcParams->mthdbufMem.addressSpace = ADDR_VIRTUAL; + pRpcParams->mthdbufMem.cacheAttrib = 0; + } + else + { + pRpcParams->mthdbufMem.base = memdescGetPhysAddr( + pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc, + AT_GPU, 0); + pRpcParams->mthdbufMem.size = + pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc->Size; + pRpcParams->mthdbufMem.addressSpace = memdescGetAddressSpace( + pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc); + pRpcParams->mthdbufMem.cacheAttrib = memdescGetCpuCacheAttrib( + pKernelChannelGroup->pMthdBuffers[runqueue].pMemDesc); + } + } + + if (IS_GSP_CLIENT(pGpu)) + { + // + // Setting these param flags will make the Physical RMAPI use our + // ChID (which is already decided) + // + + NvU32 numChannelsPerUserd = NVBIT(DRF_SIZE(NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE)); // 1<<3 -> 4K / 512B + + pRpcParams->flags = FLD_SET_DRF(OS04, _FLAGS, + _CHANNEL_USERD_INDEX_FIXED, _FALSE, pRpcParams->flags); + pRpcParams->flags = FLD_SET_DRF(OS04, _FLAGS, + _CHANNEL_USERD_INDEX_PAGE_FIXED, _TRUE, pRpcParams->flags); + pRpcParams->flags = FLD_SET_DRF_NUM(OS04, _FLAGS, + _CHANNEL_USERD_INDEX_VALUE, pKernelChannel->ChID % numChannelsPerUserd, pRpcParams->flags); + pRpcParams->flags = FLD_SET_DRF_NUM(OS04, _FLAGS, + _CHANNEL_USERD_INDEX_PAGE_VALUE, pKernelChannel->ChID / numChannelsPerUserd, pRpcParams->flags); + + // GSP client needs to pass in privilege level as an alloc param since GSP-RM cannot check this + pRpcParams->internalFlags = + FLD_SET_DRF_NUM(_KERNELCHANNEL, _ALLOC_INTERNALFLAGS, _PRIVILEGE, + pKernelChannel->privilegeLevel, pRpcParams->internalFlags); + pRpcParams->ProcessID = pKernelChannel->ProcessID; + pRpcParams->SubProcessID= pKernelChannel->SubProcessID; + } + } + + NV_RM_RPC_ALLOC_CHANNEL(pGpu, RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_PARENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel), + pRpcParams, &pKernelChannel->ChID, status); + NV_ASSERT_OR_ELSE(status == NV_OK, goto cleanup); + + NV_PRINTF(LEVEL_INFO, + "Alloc Channel chid %d, hClient:0x%x, " + "hParent:0x%x, hObject:0x%x, hClass:0x%x\n", pKernelChannel->ChID, + RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_PARENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel), RES_GET_EXT_CLASS_ID(pKernelChannel)); + +cleanup: + portMemFree(pRpcParams); + + return status; +} + +/*! + * @brief Bind a single channel to a runlist + * + * This is a helper function for kchannelCtrlCmdBind and kchangrpapiCtrlCmdBind + */ +NV_STATUS kchannelBindToRunlist_IMPL +( + KernelChannel *pKernelChannel, + NvU32 localEngineType, + ENGDESCRIPTOR engineDesc +) +{ + OBJGPU *pGpu; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + pGpu = GPU_RES_GET_GPU(pKernelChannel); + + // copied from setRunlistIdByEngineType + if ((engineDesc == ENG_SW) || (engineDesc == ENG_BUS)) + { + return NV_OK; + } + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NVA06F_CTRL_BIND_PARAMS params; + + params.engineType = localEngineType; + + NV_RM_RPC_CONTROL(pGpu, + RES_GET_CLIENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel), + NVA06F_CTRL_CMD_BIND, + ¶ms, + sizeof(params), + status); + + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + + status = kfifoRunlistSetIdByEngine_HAL(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + pKernelChannel, engineDesc); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to set RunlistID 0x%08x for channel 0x%08x\n", + engineDesc, kchannelGetDebugTag(pKernelChannel)); + SLI_LOOP_BREAK; + } + + SLI_LOOP_END; + + return status; +} + +// +// channelCtrlCmdEventSetNotification +// +// This command handles set notification operations for all tesla, +// fermi, kepler, and maxwell based gpfifo classes: +// +// NV50_DISPLAY (Class: NV5070) +// GF100_CHANNEL_GPFIFO (Class: NV906F) +// KEPLER_CHANNEL_GPFIFO_A (Class: NVA06F) +// KEPLER_CHANNEL_GPFIFO_B (Class: NVA16F) +// KEPLER_CHANNEL_GPFIFO_C (Class: NVA26F) +// MAXWELL_CHANNEL_GPFIFO_A (Class: NVB06F) +// PASCAL_CHANNEL_GPFIFO_A (Class: NVC06F) +// +NV_STATUS +kchannelCtrlCmdEventSetNotification_IMPL +( + KernelChannel *pKernelChannel, + NV906F_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams +) +{ + CLI_CHANNEL_CLASS_INFO classInfo; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + // NV01_EVENT must have been plugged into this subdevice + if (inotifyGetNotificationList(staticCast(pKernelChannel, INotifier)) == NULL) + { + NV_PRINTF(LEVEL_INFO, "cmd 0x%x: no event list\n", pRmCtrlParams->cmd); + return NV_ERR_INVALID_STATE; + } + + // get channel class-specific properties + CliGetChannelClassInfo(REF_VAL(NVXXXX_CTRL_CMD_CLASS, pRmCtrlParams->cmd), + &classInfo); + + if (pSetEventParams->event >= classInfo.notifiersMaxCount) + { + NV_PRINTF(LEVEL_INFO, "bad event 0x%x\n", pSetEventParams->event); + return NV_ERR_INVALID_ARGUMENT; + } + + if ((pSetEventParams->action == classInfo.eventActionSingle) || + (pSetEventParams->action == classInfo.eventActionRepeat)) + { + // must be in disabled state to transition to an active state + if (pKernelChannel->pNotifyActions[pSetEventParams->event] != classInfo.eventActionDisable) + { + return NV_ERR_INVALID_STATE; + } + + pKernelChannel->pNotifyActions[pSetEventParams->event] = pSetEventParams->action; + } + else if (pSetEventParams->action == classInfo.eventActionDisable) + { + pKernelChannel->pNotifyActions[pSetEventParams->event] = pSetEventParams->action; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NV_STATUS +kchannelCtrlCmdGetClassEngineid_IMPL +( + KernelChannel *pKernelChannel, + NV906F_CTRL_GET_CLASS_ENGINEID_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NV_STATUS status = NV_OK; + + // + // MODS uses hObject 0 to figure out if this call is supported or not. + // In SRIOV VF scenario, plugin asserts if host returns an error code + // for a control call. Adding a temporary work around till MODS submits + // a proper fix. + // + if (pParams->hObject == NV01_NULL_OBJECT) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + RES_GET_HANDLE(pKernelChannel), + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + NV_ASSERT_OK_OR_RETURN( + kchannelGetClassEngineID_HAL(pGpu, pKernelChannel, pParams->hObject, + &pParams->classEngineID, + &pParams->classID, + &pParams->engineID)); + + if (IS_MIG_IN_USE(pGpu) && + kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, pParams->engineID)) + { + MIG_INSTANCE_REF ref; + NvU32 localEngineType; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + RES_GET_CLIENT_HANDLE(pKernelChannel), + &ref)); + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + pParams->engineID, + &localEngineType)); + + NV_PRINTF(LEVEL_INFO, "Overriding global engine type 0x%x to local engine type 0x%x due to MIG\n", + pParams->engineID, localEngineType); + pParams->engineID = localEngineType; + } + + return status; +} + +NV_STATUS +kchannelCtrlCmdResetChannel_IMPL +( + KernelChannel *pKernelChannel, + NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS *pResetChannelParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + if (!(pRmCtrlParams->bInternal || + pResetChannelParams->resetReason < + NV906F_CTRL_CMD_RESET_CHANNEL_REASON_ENUM_MAX)) + { + return NV_ERR_INVALID_PARAMETER; + } + + // + // All real hardware management is done in the host. + // Do an RPC to the host to do the hardware update and return. + // + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + RES_GET_HANDLE(pKernelChannel), + NV906F_CTRL_CMD_RESET_CHANNEL, + pResetChannelParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + // + // Do an internal control call to do channel reset + // on Host (Physical) RM + // + return kchannelFwdToInternalCtrl_HAL(pGpu, + pKernelChannel, + NVA06F_CTRL_CMD_INTERNAL_RESET_CHANNEL, + pRmCtrlParams); +} + +// +// channelCtrlCmdEventSetTrigger +// +// This command handles set trigger operations for all kepler and maxwell based +// gpfifo classes: +// +// KEPLER_CHANNEL_GPFIFO_A (Class: NVA06F) +// KEPLER_CHANNEL_GPFIFO_B (Class: NVA16F) +// KEPLER_CHANNEL_GPFIFO_C (Class: NVA26F) +// MAXWELL_CHANNEL_GPFIFO_A (Class: NVB06F) +// PASCAL_CHANNEL_GPFIFO_A (Class: NVC06F) +// +NV_STATUS +kchannelCtrlCmdEventSetTrigger_IMPL +( + KernelChannel *pKernelChannel +) +{ + kchannelNotifyGeneric(pKernelChannel, NVA06F_NOTIFIERS_SW, NULL, 0); + + return NV_OK; +} + +NV_STATUS +kchannelCtrlCmdGpFifoSchedule_IMPL +( + KernelChannel *pKernelChannel, + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + NV_STATUS rmStatus = NV_OK; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + // + // Bug 1737765: Prevent Externally Owned Channels from running unless bound + // It is possible for clients to allocate and schedule channels while + // skipping the UVM registration step which binds the appropriate + // allocations in RM. We need to fail channel scheduling if the channels + // have not been registered with UVM. + // This check is performed on baremetal, CPU-RM and guest-RM + // + NV_ASSERT_OR_RETURN(kchannelIsSchedulable_HAL(pGpu, pKernelChannel), NV_ERR_INVALID_STATE); + + // + // If this was a host-only channel we'll have never set the runlist id, so + // force it here to ensure it is immutable now that the channel is scheduled. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + kchannelSetRunlistSet(pGpu, pKernelChannel, NV_TRUE); + SLI_LOOP_END + + + // + // All real hardware management is done in the host. + // Do an RPC to the host to do the hardware update and return. + // + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + + NV_RM_RPC_CONTROL(pGpu, + RES_GET_CLIENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel), + NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + rmStatus); + + return rmStatus; + } + + // + // Do an internal control call to do channel reset + // on Host (Physical) RM + // + return kchannelFwdToInternalCtrl_HAL(pGpu, + pKernelChannel, + NVA06F_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE, + pRmCtrlParams); +} + +NV_STATUS +kchannelCtrlCmdGetEngineCtxSize_IMPL +( + KernelChannel *pKernelChannel, + NVB06F_CTRL_GET_ENGINE_CTX_SIZE_PARAMS *pCtxSizeParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +kchannelCtrlCmdSetErrorNotifier_IMPL +( + KernelChannel *pKernelChannel, + NVA06F_CTRL_SET_ERROR_NOTIFIER_PARAMS *pSetErrorNotifierParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + Device *pDevice; + RsClient *pClient = RES_GET_CLIENT(pKernelChannel); + RC_NOTIFIER_SCOPE scope; + NV_STATUS rmStatus = NV_OK; + + rmStatus = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Cannot find device for client 0x%x\n", + pClient->hClient); + return NV_ERR_INVALID_DEVICE; + } + + NV_PRINTF(LEVEL_INFO, + "calling setErrorNotifier on channel: 0x%x, broadcast to TSG: %s\n", + kchannelGetDebugTag(pKernelChannel), + pSetErrorNotifierParams->bNotifyEachChannelInTSG ? "true" : "false"); + + scope = pSetErrorNotifierParams->bNotifyEachChannelInTSG ? + RC_NOTIFIER_SCOPE_TSG : + RC_NOTIFIER_SCOPE_CHANNEL; + + rmStatus = krcErrorSetNotifier(pGpu, GPU_GET_KERNEL_RC(pGpu), + pKernelChannel, + ROBUST_CHANNEL_GR_ERROR_SW_NOTIFY, + kchannelGetEngineType(pKernelChannel), + scope); + return rmStatus; +} + +NV_STATUS +kchannelCtrlCmdBind_IMPL +( + KernelChannel *pKernelChannel, + NVA06F_CTRL_BIND_PARAMS *pParams +) +{ + NvU32 globalEngineType; + NvU32 localEngineType; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannel); + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + NV_STATUS rmStatus = NV_OK; + ENGDESCRIPTOR engineDesc; + + if (!pParams) + return NV_ERR_INVALID_ARGUMENT; + + // Check if channel belongs to TSG that is not internal RM TSG + if (!pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm) + { + // This may be valid request if we added new channel to TSG that is + // already running. In that case we just have to check that it uses + // the same runlist as whole TSG. + // We do that in fifoRunlistSetId() + NV_PRINTF(LEVEL_INFO, + "Bind requested for channel %d belonging to TSG %d.\n", + kchannelGetDebugTag(pKernelChannel), + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->grpID); + } + + localEngineType = globalEngineType = pParams->engineType; + + if (bMIGInUse) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, localEngineType, + &globalEngineType)); + + } + + NV_PRINTF(LEVEL_INFO, "Binding Channel %d to Engine %d\n", + kchannelGetDebugTag(pKernelChannel), globalEngineType); + + // Translate globalEnginetype -> enginedesc + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + gpuXlateClientEngineIdToEngDesc(pGpu, globalEngineType, &engineDesc)); + + if (rmStatus == NV_OK) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kchannelBindToRunlist(pKernelChannel, localEngineType, engineDesc)); + } + + return rmStatus; +} + +NV_STATUS +kchannelCtrlCmdSetInterleaveLevel_IMPL +( + KernelChannel *pKernelChannel, + NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + NV_STATUS status = NV_OK; + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, + RES_GET_CLIENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel), + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, NV_ERR_NOT_SUPPORTED); + } + + status = kchangrpSetInterleaveLevel(pGpu, pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup, pParams->channelInterleaveLevel); + + return status; +} + +NV_STATUS +kchannelCtrlCmdGetInterleaveLevel_IMPL +( + KernelChannel *pKernelChannel, + NVA06F_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + + pParams->channelInterleaveLevel = + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pInterleaveLevel[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + + return NV_OK; +} + +NV_STATUS +kchannelCtrlCmdGpfifoGetWorkSubmitToken_IMPL +( + KernelChannel *pKernelChannel, + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS *pTokenParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NvBool bIsMIGEnabled = IS_MIG_ENABLED(pGpu); + NvBool bIsModsVgpu = IS_VIRTUAL(pGpu) && NV_IS_MODS; + NvBool bIsVgpuRpcNeeded = bIsModsVgpu || (IS_VIRTUAL(pGpu) && + !(IS_VIRTUAL_WITH_SRIOV(pGpu) && !bIsMIGEnabled && + kfifoIsPerRunlistChramEnabled(pKernelFifo))); + // + // vGPU: + // + // Since host is taking care of channel allocations for the guest + // we must call into the host to get the worksubmit token. This + // should go away once the guest starts managing its own channels. + // + // RPC not needed for SR-IOV vGpu + // + if (bIsVgpuRpcNeeded) + { + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + RES_GET_HANDLE(pKernelChannel), + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + rmStatus); + // + // All done if error or for non-MODS vGPU guest (host did notification in RPC). + // GSP FW is not able to perform the notification, nor is MODS vGPU host, + // so it still needs to be handled by the client/guest outside the RPC. + // + if (rmStatus != NV_OK || + (IS_VIRTUAL(pGpu) && !NV_IS_MODS)) + { + return rmStatus; + } + } + + // + // For GSP client or MODS vGPU guest, pTokenParams->workSubmitToken already filled by RPC. + // For baremetal RM, generate it here. + // + if (!bIsModsVgpu) + { + NV_ASSERT_OR_RETURN(pKernelChannel->pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup != NULL, NV_ERR_INVALID_STATE); + rmStatus = kfifoGenerateWorkSubmitToken_HAL(pGpu, pKernelFifo, pKernelChannel, + &pTokenParams->workSubmitToken, + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bIsCallingContextVgpuPlugin); + NV_CHECK_OR_RETURN(LEVEL_INFO, rmStatus == NV_OK, rmStatus); + } + + rmStatus = kchannelNotifyWorkSubmitToken(pGpu, pKernelChannel, pTokenParams->workSubmitToken); + return rmStatus; +} + +NV_STATUS +kchannelCtrlCmdGpfifoSetWorkSubmitTokenNotifIndex_IMPL +( + KernelChannel *pKernelChannel, + NVC36F_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX_PARAMS *pParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvBool bIsMIGEnabled = IS_MIG_ENABLED(pGpu); + NvBool bIsVgpuRpcNeeded = IS_VIRTUAL(pGpu) && + !(IS_VIRTUAL_WITH_SRIOV(pGpu) && !bIsMIGEnabled && + kfifoIsPerRunlistChramEnabled(pKernelFifo)); + + // + // vGPU: + // + // Since vgpu plugin is required to update notifier for guest, send an RPC + // to host RM for the plugin to hook. + // RPC not needed for SR-IOV vGpu. + // + // GSP-RM: + // + // Notification is done in CPU-RM, so RPC is not made to FW-RM. + // + if (!NV_IS_MODS && bIsVgpuRpcNeeded) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + RES_GET_HANDLE(pKernelChannel), + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + rmStatus); + return rmStatus; + } + + rmStatus = kchannelUpdateWorkSubmitTokenNotifIndex(pGpu, pKernelChannel, pParams->index); + return rmStatus; +} + +NV_STATUS +kchannelRegisterChild_IMPL +( + KernelChannel *pKernelChannel, + ChannelDescendant *pObject +) +{ + NvU16 firstObjectClassID; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + // + // On recent GPU architectures such as FERMI, SetObject operations + // require an EngineID:ClassID tuple as an argument, rather than + // an object handle. In order to be able to differentiate between + // different instances of any given software class, the ClassID + // field needs to be unique within the FIFO context. The code below + // attempts to find a qualifying 16-bit ClassID. + // + if (pObject->resourceDesc.engDesc == ENG_SW) + { + RS_ORDERED_ITERATOR it; + RsClient *pClient; + ChannelDescendant *pMatchingObject = NULL; + + firstObjectClassID = pKernelChannel->nextObjectClassID; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, RES_GET_CLIENT_HANDLE(pKernelChannel), &pClient)); + + do + { + if (++pKernelChannel->nextObjectClassID == firstObjectClassID) + { + NV_PRINTF(LEVEL_ERROR, "channel %08x:%08x: out of handles!\n", + RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_HANDLE(pKernelChannel)); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + if (pKernelChannel->nextObjectClassID == 0) + continue; + + it = clientRefOrderedIter(pClient, RES_GET_REF(pKernelChannel), classId(ChannelDescendant), NV_FALSE); + + while (clientRefOrderedIterNext(pClient, &it)) + { + pMatchingObject = dynamicCast(it.pResourceRef->pResource, ChannelDescendant); + NV_ASSERT_OR_ELSE(pMatchingObject != NULL, continue); + + if ((pMatchingObject->resourceDesc.engDesc == ENG_SW) && + (pMatchingObject->classID == pKernelChannel->nextObjectClassID)) + { + break; + } + + pMatchingObject = NULL; + } + } + while (pMatchingObject != NULL); + + pObject->classID = pKernelChannel->nextObjectClassID; + } + + return kfifoAddObject_HAL(pGpu, pKernelFifo, pObject); +} + +NV_STATUS +kchannelDeregisterChild_IMPL +( + KernelChannel *pKernelChannel, + ChannelDescendant *pObject +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + status = kfifoDeleteObject_HAL(pGpu, pKernelFifo, pObject); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not delete hal resources with object\n"); + DBG_BREAKPOINT(); + } + + return status; +} + +void +kchannelGetChildIterator +( + KernelChannel *pKernelChannel, + NvU32 classID, + NvU32 engineID, + KernelChannelChildIterator *pIter +) +{ + RsClient *pClient = RES_GET_CLIENT(pKernelChannel); + NV_ASSERT_OR_RETURN_VOID(pIter != NULL); + + portMemSet(pIter, 0, sizeof(*pIter)); + pIter->classID = classID; + pIter->engineID = engineID; + pIter->rsIter = clientRefOrderedIter(pClient, RES_GET_REF(pKernelChannel), classId(ChannelDescendant), NV_FALSE); +} + +ChannelDescendant * +kchannelGetNextChild +( + KernelChannelChildIterator *pIter +) +{ + ChannelDescendant *pChild; + + NV_ASSERT_OR_RETURN(pIter != NULL, NULL); + + while (clientRefOrderedIterNext(pIter->rsIter.pClient, &pIter->rsIter)) + { + pChild = dynamicCast(pIter->rsIter.pResourceRef->pResource, ChannelDescendant); + NV_ASSERT_OR_RETURN(pChild != NULL, NULL); + + // Continue to the next child if it doesn't match these filters: + if (pIter->engineID != pChild->resourceDesc.engDesc) + continue; + if (pIter->classID != 0) + { + if ((RES_GET_EXT_CLASS_ID(pChild) != pIter->classID) && + (pChild->classID != pIter->classID)) + continue; + } + + // Yield this matching child + return pChild; + } + + return NULL; +} + +ChannelDescendant * +kchannelGetOneChild +( + KernelChannel *pKernelChannel, + NvU32 classID, + NvU32 engineID +) +{ + KernelChannelChildIterator iter; + + kchannelGetChildIterator(pKernelChannel, classID, engineID, &iter); + return kchannelGetNextChild(&iter); +} + +/** + * @brief Gets object iterator for a channel or channel group + * + * @param[in] pKernelChannel + * @param[in] classNum + * @param[in] engDesc + * @param[out] pIt + * + */ +void +kchannelGetChildIterOverGroup +( + KernelChannel *pKernelChannel, + NvU32 classNum, + NvU32 engDesc, + KernelChannelChildIterOverGroup *pIt +) +{ + NV_ASSERT_OR_RETURN_VOID(pIt != NULL); + portMemSet(pIt, 0, sizeof(*pIt)); + + NV_ASSERT_OR_RETURN_VOID(pKernelChannel != NULL); + + pIt->classNum = classNum; + pIt->engDesc = engDesc; + + pIt->channelNode.pKernelChannel = + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pKernelChannel; + pIt->channelNode.pNext = + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pNext; + + kchannelGetChildIterator(pIt->channelNode.pKernelChannel, pIt->classNum, pIt->engDesc, &pIt->kchannelIter); +} + +/** + * @brief Get the next object based on given class/engine tag. + * When the class number is 0, it is ignored. + * + * @param[in] pIt + * + * Returns: found child or NULL + * + */ +ChannelDescendant * +kchannelGetNextChildOverGroup +( + KernelChannelChildIterOverGroup *pIt +) +{ + PCHANNEL_NODE pHead = NULL; + ChannelDescendant *pObject = NULL; + + NV_ASSERT_OR_RETURN(pIt != NULL, NULL); + + // Start iterating from the given object (if any) of the given channel. + pHead = &pIt->channelNode; + + while ((pHead != NULL) && (pHead->pKernelChannel != NULL)) + { + pObject = kchannelGetNextChild(&pIt->kchannelIter); + + if (pObject != NULL) + break; + + // + // If there are no more objects to inspect in the given channel, + // move to the next channel (if any, for TSGs). + // + pHead = pHead->pNext; + if (pHead != NULL) + { + NV_ASSERT_OR_ELSE(pHead->pKernelChannel != NULL, break); + // Re-initialize the channeldescendant iterator based on this channel + kchannelGetChildIterator(pHead->pKernelChannel, pIt->classNum, pIt->engDesc, &pIt->kchannelIter); + } + } + + // Cache off the next channel to start searching from in future iterations. + pIt->channelNode.pKernelChannel = pHead ? pHead->pKernelChannel : NULL; + pIt->channelNode.pNext = pHead ? pHead->pNext : NULL; + + return pObject; +} + +NV_STATUS +kchannelFindChildByHandle +( + KernelChannel *pKernelChannel, + NvHandle hResource, + ChannelDescendant **ppObject +) +{ + RsClient *pClient = RES_GET_CLIENT(pKernelChannel); + RsResourceRef *pResourceRef = NULL; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, clientGetResourceRef(pClient, hResource, &pResourceRef)); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pResourceRef->pParentRef->hResource == RES_GET_HANDLE(pKernelChannel), NV_ERR_OBJECT_NOT_FOUND); + + *ppObject = dynamicCast(pResourceRef->pResource, ChannelDescendant); + NV_CHECK_OR_RETURN(LEVEL_ERROR, *ppObject != NULL, NV_ERR_OBJECT_NOT_FOUND); + + return NV_OK; +} + +static NV_STATUS +_kchannelClearVAList +( + OBJGPU *pGpu, + VA_LIST *pVaList, + NvBool bUnmap +) +{ + VA_LISTIter it; + NV_STATUS status; + + // + // Subcontext handling + // We need to unmap the mappings on all the subcontext, since the this call will be made only on one of the TSG channels. + // + it = mapIterAll(pVaList); + while (mapIterNext(&it)) + { + OBJVASPACE *pVAS = (OBJVASPACE *) NvP64_VALUE(mapKey(pVaList, it.pValue)); + VA_INFO *pVaInfo; + NvU64 vaddr = 0, vaddrCached = 0; + + if (pVAS == NULL) + { + // Ignore the special node + continue; + } + + pVaInfo = (VA_INFO *)it.pValue; + vaddrCached = pVaInfo->vAddr; + + pVaInfo->refCnt = 1; + + status = vaListRemoveVa(pVaList, pVAS); + NV_ASSERT(status == NV_OK); + + status = vaListFindVa(pVaList, pVAS, &vaddr); + if (status != NV_OK) + { + if (status == NV_ERR_OBJECT_NOT_FOUND) + status = NV_OK; + else + { + NV_PRINTF(LEVEL_ERROR, "vaListFindVa failed with unexpected status = 0x%x\n", status); + NV_ASSERT(0); + } + it = mapIterAll(pVaList); + + if (bUnmap) + { + dmaUnmapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, vaddrCached); + } + } + } + + return NV_OK; +} + +/** + * @brief Set or clear the Engine Context Memdesc. + * + * Should be committed to hardware after this using channelCommitEngineContext(). + * Should be unmapped before cleared/changed using kchannelUnmapEngineCtxBuf() + * + * @param[in] pGpu + * @param[in] pKernelChannel + * @param[in] engine + * @param[in] pMemDesc the new memdesc to assign, or NULL to clear + * + * Returns: status + */ +NV_STATUS +kchannelSetEngineContextMemDesc_IMPL +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvU32 engine, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_STATUS status = NV_OK; + ENGINE_CTX_DESCRIPTOR *pEngCtxDesc; + KernelChannelGroup *pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup; + + NV_PRINTF(LEVEL_INFO, + "ChID %x engine 0x%x pMemDesc %p\n", + kchannelGetDebugTag(pKernelChannel), engine, pMemDesc); + + NV_ASSERT_OR_RETURN(engine != ENG_FIFO, NV_ERR_INVALID_PARAMETER); + + if (IS_GR(engine)) + { + NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel)); + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + + // Get or allocate the EngCtxDesc + pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + + if (pEngCtxDesc == NULL && pMemDesc == NULL) + { + // There is no need to clean up or alloc anything. + SLI_LOOP_CONTINUE; + } + + if (pEngCtxDesc != NULL) + { + // Cleanup for the engine context that existed before + if (pEngCtxDesc->pMemDesc != NULL) + { + memdescFree(pEngCtxDesc->pMemDesc); + memdescDestroy(pEngCtxDesc->pMemDesc); + } + + // + // If the VAList is not managed by RM, remove all VAs here. + // If the VAList is managed by RM, unmap and remove all VAs in kchannelUnmapEngineCtxBuf() + // + if (!vaListGetManaged(&pEngCtxDesc->vaList)) + { + status = _kchannelClearVAList(pGpu, &pEngCtxDesc->vaList, NV_FALSE); + NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail)); + } + + // + // Free all subcontext headers in TSG. This usually happens while last channel in TSG is being freed. + // we make sure all GR objects have already been freed before this point(see kgrctxUnmapAssociatedCtxBuffers). + // Other cases where this is exercised are special cases like golden image init where ctx buffers are swapped, + // control calls which force ctx buffer unmap, re-initializing of virtual ctxs, evicting a ctx, fifoStateDestroy etc. + // + if(IS_GR(engine)) + { + status = kchangrpFreeGrSubcontextHdrs_HAL(pGpu, pKernelChannelGroup); + } + } + else + { + NV_ASSERT_OK_OR_ELSE(status, + kchangrpAllocEngineContextDescriptor(pGpu, pKernelChannelGroup), + SLI_LOOP_GOTO(fail)); + pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + NV_ASSERT_OR_ELSE(pEngCtxDesc != NULL, status = NV_ERR_NO_MEMORY; SLI_LOOP_GOTO(fail)); + } + + if (pMemDesc != NULL) + { + // We are setting a memdesc + if (pMemDesc->Allocated > 0) + pMemDesc->Allocated++; + memdescAddRef(pMemDesc); + + if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL) + { + NvU64 virtAddr; + + // Since the memdesc is already virtual, we do not manage it + status = vaListSetManaged(&pEngCtxDesc->vaList, NV_FALSE); + NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail)); + + // memdescGetPhysAddr of a virtual memdesc is a virtual addr + virtAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + status = vaListAddVa(&pEngCtxDesc->vaList, pKernelChannel->pVAS, virtAddr); + NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail)); + } + } + + // Assign the memdesc (or NULL) + pEngCtxDesc->pMemDesc = pMemDesc; + pEngCtxDesc->engDesc = engine; + + SLI_LOOP_END + +fail: + return status; +} + +/** + * @brief Unmaps everything from the Engine Context Memdesc. + * + * @param[in] pGpu + * @param[in] pKernelChannel + * @param[in] engine + * + * Returns: status + */ +NV_STATUS +kchannelUnmapEngineCtxBuf_IMPL +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvU32 engine +) +{ + NV_STATUS status = NV_OK; + ENGINE_CTX_DESCRIPTOR *pEngCtxDesc; + + NV_PRINTF(LEVEL_INFO, + "ChID %x engine 0x%x\n", + kchannelGetDebugTag(pKernelChannel), engine); + + NV_ASSERT_OR_RETURN(engine != ENG_FIFO, NV_ERR_INVALID_PARAMETER); + + if (IS_GR(engine)) + { + NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel)); + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pEngCtxDesc = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + + // EngCtxDesc and MemDesc will be here, or else nothing can be mapped + if ((pEngCtxDesc == NULL) || (pEngCtxDesc->pMemDesc == NULL)) + { + SLI_LOOP_CONTINUE; + } + + // Only unmaps if RM manages this VAList + if (!vaListGetManaged(&pEngCtxDesc->vaList)) + { + SLI_LOOP_CONTINUE; + } + + // Clear VA list, including unmap + NV_ASSERT(memdescGetAddressSpace(memdescGetMemDescFromGpu(pEngCtxDesc->pMemDesc, pGpu)) != ADDR_VIRTUAL); + status = _kchannelClearVAList(pGpu, &pEngCtxDesc->vaList, NV_TRUE); + NV_ASSERT_OR_ELSE(status == NV_OK, SLI_LOOP_GOTO(fail)); + + SLI_LOOP_END + +fail: + return status; +} + +// Check that BcState stays consistent for GR channel engine context +NV_STATUS +kchannelCheckBcStateCurrent_IMPL +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ +#define KERNEL_CHANNEL_BCSTATE_UNINITIALIZED (0) +#define KERNEL_CHANNEL_BCSTATE_DISABLED (1) +#define KERNEL_CHANNEL_BCSTATE_ENABLED (2) + + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + NvU8 channelBcStateEnum = bBcState ? KERNEL_CHANNEL_BCSTATE_ENABLED : KERNEL_CHANNEL_BCSTATE_DISABLED; + + NV_PRINTF( + LEVEL_INFO, + "GPU = %d, ChID = %d, bcStateCurrent = %d, channelBcStateEnum = %d\n", + pGpu->gpuInstance, + kchannelGetDebugTag(pKernelChannel), + pKernelChannel->bcStateCurrent, + channelBcStateEnum); + + // Check that the BC status did not change - 0 = first call, 1 = disable, 2 = enable. + if (pKernelChannel->bcStateCurrent == KERNEL_CHANNEL_BCSTATE_UNINITIALIZED) + { + pKernelChannel->bcStateCurrent = channelBcStateEnum; + } + NV_ASSERT_OR_RETURN(pKernelChannel->bcStateCurrent == channelBcStateEnum, NV_ERR_INVALID_STATE); + + return NV_OK; +} + +// Map the Engine Context Memdesc and add it's VAddr +NV_STATUS +kchannelMapEngineCtxBuf_IMPL +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvU32 engine +) +{ + OBJVASPACE *pVAS = NULL; + NV_STATUS status = NV_OK; + ENGINE_CTX_DESCRIPTOR *pEngCtx; + NvU64 addr; + MEMORY_DESCRIPTOR *pTempMemDesc; + OBJGVASPACE *pGVAS; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + NV_ASSERT_OR_RETURN(engine != ENG_FIFO, NV_ERR_INVALID_ARGUMENT); + + if (IS_GR(engine)) + { + NV_ASSERT_OK_OR_RETURN(kchannelCheckBcStateCurrent(pGpu, pKernelChannel)); + } + + NV_PRINTF(LEVEL_INFO, "ChID %d engine %s (0x%x) \n", + kchannelGetDebugTag(pKernelChannel), + kfifoGetEngineName_HAL(GPU_GET_KERNEL_FIFO(pGpu), ENGINE_INFO_TYPE_ENG_DESC, engine), + engine); + + pVAS = pKernelChannel->pVAS; + pGVAS = dynamicCast(pVAS, OBJGVASPACE); + NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_INVALID_STATE); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + pEngCtx = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + NV_ASSERT_OR_ELSE(pEngCtx != NULL, status = NV_ERR_INVALID_STATE; goto fail); + + pTempMemDesc = pEngCtx->pMemDesc; + NV_ASSERT_OR_ELSE(pTempMemDesc != NULL, status = NV_ERR_INVALID_STATE; goto fail); + + // + // For virtual context, UMD has already alloced/mapped the engine context. + // So simply get the vaddr + // + + status = vaListFindVa(&pEngCtx->vaList, pVAS, &addr); + if (status == NV_OK) + { + // VAddr already exists and needs no action + SLI_LOOP_CONTINUE; + } + else if (status == NV_ERR_OBJECT_NOT_FOUND) + { + NvU32 flags = DMA_ALLOC_VASPACE_NONE; + if (gvaspaceIsExternallyOwned(pGVAS)) + { + // We should never land up here if VA space is externally owned! + NV_ASSERT_FAILED("Externally owned object not found"); + status = NV_ERR_INVALID_OPERATION; + goto fail; + } + + kfifoGetCtxBufferMapFlags_HAL(pGpu, pKernelFifo, engine, &flags); + + status = dmaMapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, pTempMemDesc, &addr, + flags, DMA_UPDATE_VASPACE_FLAGS_NONE); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not map context buffer for engine 0x%x\n", + engine); + goto fail; + } + else + { + status = vaListAddVa(&pEngCtx->vaList, pVAS, addr); + NV_ASSERT(status == NV_OK); + } + } + else + { + NV_ASSERT_OK_FAILED("vaListFindVa", status); + goto fail; + } + +fail: + if (status != NV_OK) + { + SLI_LOOP_BREAK; + } + SLI_LOOP_END + + return status; +} + +/** + * @brief Updates the notifier index with which to update the work submit + * notifier on request. + * + * @param[IN] pGpu OBJGPU + * @param[in] pKernelChannel KernelChannel + * @param[in] index Updated notifier index + * + * @return NV_OK + * NV_ERR_OUT_OF_RANGE if index is beyond the bounds of the notifier + */ +NV_STATUS +kchannelUpdateWorkSubmitTokenNotifIndex_IMPL +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvU32 index +) +{ + NvHandle hNotifier; + RsClient *pClient = RES_GET_CLIENT(pKernelChannel); + Device *pDevice; + Memory *pMemory; + ContextDma *pContextDma; + NvU32 addressSpace; + NV_STATUS status; + + hNotifier = pKernelChannel->hErrorContext; + + // Clobbering error notifier index is illegal + NV_CHECK_OR_RETURN(LEVEL_INFO, index != NV_CHANNELGPFIFO_NOTIFICATION_TYPE_ERROR, + NV_ERR_INVALID_ARGUMENT); + + status = deviceGetByInstance(pClient, gpuGetDeviceInstance(pGpu), &pDevice); + if (status != NV_OK) + return NV_ERR_INVALID_DEVICE; + + if (NV_OK == memGetByHandleAndDevice(pClient, hNotifier, RES_GET_HANDLE(pDevice), &pMemory)) + { + addressSpace = memdescGetAddressSpace(pMemory->pMemDesc); + + NV_CHECK_OR_RETURN(LEVEL_INFO, pMemory->Length >= ((index + 1) * sizeof(NvNotification)), + NV_ERR_OUT_OF_RANGE); + switch (addressSpace) + { + case ADDR_VIRTUAL: + { + NvU64 physAddr = memdescGetPhysAddr(pMemory->pMemDesc, AT_GPU_VA, 0); + PCLI_DMA_MAPPING_INFO pDmaMappingInfo; + + NV_CHECK_OR_RETURN(LEVEL_INFO, + CliGetDmaMappingInfo(pClient->hClient, + RES_GET_HANDLE(pDevice), + RES_GET_HANDLE(pMemory), + physAddr, + gpumgrGetDeviceGpuMask(pGpu->deviceInstance), + &pDmaMappingInfo), + NV_ERR_GENERIC); + + NV_CHECK_OR_RETURN(LEVEL_INFO, pDmaMappingInfo->pMemDesc->Size >= ((index + 1) * sizeof(NvNotification)), + NV_ERR_OUT_OF_RANGE); + break; + } + case ADDR_FBMEM: + // fall through + case ADDR_SYSMEM: + // Covered by check prior to switch/case + break; + default: + return NV_ERR_NOT_SUPPORTED; + } + } + else if (NV_OK == ctxdmaGetByHandle(pClient, hNotifier, &pContextDma)) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, pContextDma->Limit >= (((index + 1) * sizeof(NvNotification)) - 1), + NV_ERR_OUT_OF_RANGE); + } + else + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN] + = index; + + return NV_OK; +} + +/** + * @brief Updates the work submit notifier passed to the channel during channel + * creation with the new work submit token. + * + * @param[IN] pGpu OBJGPU + * @param[in] pKernelChannel KernelChannel + * @param[in] token Work submit token to notify clients of + * + * @return NV_OK on successful notify + * NV_OK if client has not set up the doorbell notifier. This should + * be an error once all clients have been updated. + */ +NV_STATUS +kchannelNotifyWorkSubmitToken_IMPL +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvU32 token +) +{ + NV_STATUS status = NV_OK; + NvHandle hNotifier; + RsClient *pClient = RES_GET_CLIENT(pKernelChannel); + Device *pDevice; + Memory *pMemory; + ContextDma *pContextDma; + NvU32 addressSpace; + NvU16 notifyStatus = 0x0; + NvU32 index; + + hNotifier = pKernelChannel->hErrorContext; + index = pKernelChannel->notifyIndex[NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN]; + + status = deviceGetByInstance(pClient, gpuGetDeviceInstance(pGpu), &pDevice); + if (status != NV_OK) + return NV_ERR_INVALID_DEVICE; + + notifyStatus = + FLD_SET_DRF(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _IN_PROGRESS, _TRUE, notifyStatus); + notifyStatus = + FLD_SET_DRF_NUM(_CHANNELGPFIFO, _NOTIFICATION_STATUS, _VALUE, 0xFFFF, notifyStatus); + + if (NV_OK == memGetByHandleAndDevice(pClient, hNotifier, RES_GET_HANDLE(pDevice), &pMemory)) + { + addressSpace = memdescGetAddressSpace(pMemory->pMemDesc); + + // + // If clients did not allocate enough memory for the doorbell + // notifier, return NV_OK so as not to regress older clients + // + NV_CHECK_OR_RETURN(LEVEL_INFO, pMemory->Length >= ((index + 1) * sizeof(NvNotification)), NV_OK); + switch (addressSpace) + { + case ADDR_VIRTUAL: + { + NvU64 physAddr = memdescGetPhysAddr(pMemory->pMemDesc, AT_GPU_VA, 0); + PCLI_DMA_MAPPING_INFO pDmaMappingInfo; + + NV_CHECK_OR_RETURN(LEVEL_INFO, + CliGetDmaMappingInfo(pClient->hClient, + RES_GET_HANDLE(pDevice), + RES_GET_HANDLE(pMemory), + physAddr, + gpumgrGetDeviceGpuMask(pGpu->deviceInstance), + &pDmaMappingInfo), + NV_OK); + + // + // If clients did not map enough memory for the doorbell + // notifier, return NV_OK so as not to regress older clients + // + NV_CHECK_OR_RETURN(LEVEL_INFO, pDmaMappingInfo->pMemDesc->Size >= ((index + 1) * sizeof(NvNotification)), NV_OK); + + status = notifyFillNotifierGPUVA(pGpu, pClient->hClient, RES_GET_HANDLE(pMemory), physAddr, + token, 0x0U, notifyStatus, index); + break; + } + case ADDR_FBMEM: + // fall through + case ADDR_SYSMEM: + status = notifyFillNotifierMemory(pGpu, pMemory, token, 0x0U, notifyStatus, index); + break; + default: + status = NV_ERR_NOT_SUPPORTED; + break; + } + } + else if (NV_OK == ctxdmaGetByHandle(pClient, hNotifier, &pContextDma)) + { + // + // If clients did not allocate enough memory for the doorbell + // notifier, return NV_OK so as not to regress older clients + // + NV_CHECK_OR_RETURN(LEVEL_INFO, pContextDma->Limit >= (((index + 1) * sizeof(NvNotification)) - 1), NV_OK); + + status = notifyFillNotifierOffset(pGpu, pContextDma, token, 0x0U, notifyStatus, + (sizeof(NvNotification) * index)); + + } + + return status; +} + +/** + * @brief Alloc and set up pNotifyActions + * + * @param[in] pKernelChannel + * @param[in] classNuml Channel class + * + * @return NV_OK or error code + */ +static NV_STATUS +_kchannelSetupNotifyActions +( + KernelChannel *pKernelChannel, + NvU32 classNum +) +{ + CLI_CHANNEL_CLASS_INFO classInfo; + + // Allocate notifier action table for the maximum supported by this class + CliGetChannelClassInfo(classNum, &classInfo); + if (classInfo.notifiersMaxCount > 0) + { + pKernelChannel->pNotifyActions = portMemAllocNonPaged( + classInfo.notifiersMaxCount * sizeof(*pKernelChannel->pNotifyActions)); + if (pKernelChannel->pNotifyActions == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pKernelChannel->pNotifyActions, 0, + classInfo.notifiersMaxCount * sizeof(*pKernelChannel->pNotifyActions)); + } + + return NV_OK; +} // end of _kchannelSetupNotifyActions() + +/** + * @brief Cleans up pNotifyActions + * + * @param[in] pKernelChannel + */ +static void +_kchannelCleanupNotifyActions +( + KernelChannel *pKernelChannel +) +{ + // free memory associated with notify actions table + portMemFree(pKernelChannel->pNotifyActions); + pKernelChannel->pNotifyActions = NULL; +} // end of _kchannelCleanupNotifyActions() + +static NV_STATUS +_kchannelNotifyOfChid +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + RsClient *pRsClient +) +{ + ContextDma *pContextDma; + + // + // Return the chid to the drivers in the error context DMA + // + // We need to update this when virtual channel gets mapped in. + // + + if ((ctxdmaGetByHandle(pRsClient, pKernelChannel->hErrorContext, &pContextDma)) == NV_OK) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, pContextDma->Limit >= sizeof(NvNotification) - 1, NV_ERR_INVALID_ARGUMENT); + notifyFillNotifier(pGpu, pContextDma, pKernelChannel->ChID, 0, NV_OK); + } + + return NV_OK; +} + +NvU32 +kchannelGetGfid_IMPL +( + KernelChannel *pKernelChannel +) +{ + return pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->gfid; +} + +NvBool +kchannelIsCpuMapped +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + return !!(pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] & + KERNEL_CHANNEL_SW_STATE_CPU_MAP); +} + +void +kchannelSetCpuMapped +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvBool bCpuMapped +) +{ + if (bCpuMapped) + { + pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] |= + KERNEL_CHANNEL_SW_STATE_CPU_MAP; + } + else + { + pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &= + ~(KERNEL_CHANNEL_SW_STATE_CPU_MAP); + } +} + +NvBool +kchannelIsRunlistSet +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel +) +{ + return !!(pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] & + KERNEL_CHANNEL_SW_STATE_RUNLIST_SET); +} + +void +kchannelSetRunlistSet +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NvBool bRunlistSet +) +{ + if (bRunlistSet) + { + pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] |= + KERNEL_CHANNEL_SW_STATE_RUNLIST_SET; + } + else + { + pKernelChannel->swState[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] &= + ~(KERNEL_CHANNEL_SW_STATE_RUNLIST_SET); + } +} + +NV_STATUS +kchannelGetChannelPhysicalState_KERNEL +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + NV_STATUS status = NV_OK; + + // Get the physical state from GSP + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + NV_ASSERT_OK_OR_RETURN(status); + + return NV_OK; +} + +NV_STATUS +kchannelMapUserD_IMPL +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + RS_PRIV_LEVEL privLevel, + NvU64 offset, + NvU32 protect, + NvP64 *ppCpuVirtAddr, + NvP64 *ppPriv +) +{ + NV_STATUS status = NV_OK; + NvU64 userBase; + NvU64 userOffset; + NvU64 userSize; + NvU32 cachingMode = NV_MEMORY_UNCACHED; + + // if USERD is allocated by client + if (pKernelChannel->bClientAllocatedUserD) + { + return NV_OK; + } + + status = kchannelGetUserdInfo_HAL(pGpu, pKernelChannel, + &userBase, &userOffset, &userSize); + + if (status != NV_OK) + return status; + + + if (userBase == pGpu->busInfo.gpuPhysAddr) + { + // Create a mapping of BAR0 + status = osMapGPU(pGpu, privLevel, NvU64_LO32(userOffset+offset), + NvU64_LO32(userSize), protect, ppCpuVirtAddr, ppPriv); + goto done; + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING)) + { + cachingMode = NV_MEMORY_CACHED; + } + + // + // If userBase is not bar0, then it is bar1 and we create a regular memory + // mapping. + // + if (privLevel >= RS_PRIV_LEVEL_KERNEL) + { + status = osMapPciMemoryKernel64(pGpu, userBase + userOffset + offset, + userSize, protect, ppCpuVirtAddr, cachingMode); + } + else + { + status = osMapPciMemoryUser(pGpu->pOsGpuInfo, + userBase + userOffset + offset, + userSize, protect, ppCpuVirtAddr, + ppPriv, cachingMode); + } + if (!((status == NV_OK) && *ppCpuVirtAddr)) + { + NV_PRINTF(LEVEL_ERROR, + "BAR1 offset 0x%llx for USERD of channel %x could not be cpu mapped\n", + userOffset, kchannelGetDebugTag(pKernelChannel)); + } + +done: + + // Indicate channel is mapped + if (status == NV_OK) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + kchannelSetCpuMapped(pGpu, pKernelChannel, NV_TRUE); + SLI_LOOP_END + } + + return status; +} + +void +kchannelUnmapUserD_IMPL +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + RS_PRIV_LEVEL privLevel, + NvP64 *ppCpuVirtAddr, + NvP64 *ppPriv +) +{ + NV_STATUS status; + NvU64 userBase; + NvU64 userOffset; + NvU64 userSize; + + if (pKernelChannel->bClientAllocatedUserD) + { + return; + } + + status = kchannelGetUserdInfo_HAL(pGpu, pKernelChannel, + &userBase, &userOffset, &userSize); + + NV_ASSERT_OR_RETURN_VOID(status == NV_OK); + + if (userBase == pGpu->busInfo.gpuPhysAddr) + { + osUnmapGPU(pGpu->pOsGpuInfo, privLevel, *ppCpuVirtAddr, + NvU64_LO32(userSize), *ppPriv); + } + else + { + // GF100+ + // Unmap Cpu virt mapping + if (privLevel >= RS_PRIV_LEVEL_KERNEL) + { + osUnmapPciMemoryKernel64(pGpu, *ppCpuVirtAddr); + } + else + { + osUnmapPciMemoryUser(pGpu->pOsGpuInfo, *ppCpuVirtAddr, + userSize, *ppPriv); + } + } + + // Indicate channel is !mapped + kchannelSetCpuMapped(pGpu, pKernelChannel, NV_FALSE); + return; +} + +static NV_STATUS +_kchannelGetUserMemDesc +( + OBJGPU *pGpu, + KernelChannel *pKernelChannel, + PMEMORY_DESCRIPTOR *ppMemDesc +) +{ + NV_ASSERT_OR_RETURN(ppMemDesc != NULL, NV_ERR_INVALID_STATE); + *ppMemDesc = NULL; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_STATE); + + *ppMemDesc = pKernelChannel->pInstSubDeviceMemDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + + return *ppMemDesc ? NV_OK : NV_ERR_INVALID_STATE; +} + +/*! + * @brief Retrieve a KernelChannel from either a KernelChannel or TSG handle. KernelChannel is + * checked first. If TSG is provided, the head of the TSG is returned. + * + * @param[in] hClient Client + * @param[in] hDual NvHandle either to TSG or to KernelChannel + * @param[out] ppKernelChannel Referenced KernelChannel + */ +NV_STATUS +kchannelGetFromDualHandle_IMPL +( + NvHandle hClient, + NvHandle hDual, + KernelChannel **ppKernelChannel +) +{ + KernelChannel *pKernelChannel; + RsResourceRef *pChanGrpRef; + + NV_ASSERT_OR_RETURN(ppKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + *ppKernelChannel = NULL; + + if (CliGetKernelChannel(hClient, hDual, &pKernelChannel) == NV_OK) + { + *ppKernelChannel = pKernelChannel; + return NV_OK; + } + + if (CliGetChannelGroup(hClient, hDual, &pChanGrpRef, NULL) == NV_OK) + { + KernelChannelGroupApi *pKernelChannelGroupApi = dynamicCast( + pChanGrpRef->pResource, + KernelChannelGroupApi); + + NV_ASSERT_OR_RETURN( + (pKernelChannelGroupApi != NULL) && + (pKernelChannelGroupApi->pKernelChannelGroup != NULL), + NV_ERR_INVALID_ARGUMENT); + + if (pKernelChannelGroupApi->pKernelChannelGroup->chanCount == 0) + return NV_ERR_INVALID_ARGUMENT; + + *ppKernelChannel = + pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead->pKernelChannel; + NV_ASSERT_OR_RETURN(*ppKernelChannel != NULL, NV_ERR_INVALID_STATE); + + return NV_OK; + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +/*! + * @brief Retrieve a KernelChannel from either a KernelChannel or TSG handle. KernelChannel is + * checked first. If TSG is provided, the head of the TSG is returned. If + * KernelChannel handle is provided, it must not be part of a client-allocated TSG. + * + * @param[in] hClient Client + * @param[in] hDual NvHandle either to TSG or to bare Channel + * @param[out] ppKernelChannel Referenced KernelChannel + */ +NV_STATUS +kchannelGetFromDualHandleRestricted_IMPL +( + NvHandle hClient, + NvHandle hDual, + KernelChannel **ppKernelChannel +) +{ + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kchannelGetFromDualHandle(hClient, hDual, ppKernelChannel)); + if ((RES_GET_HANDLE(*ppKernelChannel) == hDual) && + (((*ppKernelChannel)->pKernelChannelGroupApi->pKernelChannelGroup != NULL) && + !(*ppKernelChannel)->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm)) + { + NV_PRINTF(LEVEL_ERROR, "channel handle 0x%08x is part of a channel group, not allowed!\n", + RES_GET_HANDLE(*ppKernelChannel)); + return NV_ERR_INVALID_ARGUMENT; + } + return NV_OK; +} + +static void +_kchannelUpdateFifoMapping +( + KernelChannel *pKernelChannel, + OBJGPU *pGpu, + NvBool bKernel, + NvP64 cpuAddress, + NvP64 priv, + NvU64 cpuMapLength, + NvU32 flags, + NvHandle hSubdevice, + RsCpuMapping *pMapping +) +{ + pMapping->pPrivate->pGpu = pGpu; + pMapping->pPrivate->bKernel = bKernel; + pMapping->processId = osGetCurrentProcess(); + pMapping->pLinearAddress = cpuAddress; + pMapping->pPrivate->pPriv = priv; + pMapping->length = cpuMapLength; + pMapping->flags = flags; + pMapping->pContext = (void*)(NvUPtr)pKernelChannel->ChID; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/kernel_channel_group.c b/src/nvidia/src/kernel/gpu/fifo/kernel_channel_group.c new file mode 100644 index 000000000..09eb708f3 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/kernel_channel_group.c @@ -0,0 +1,735 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_fifo.h" + +#include "ctrl/ctrla06c.h" // NVA06C_CTRL_INTERLEAVE_LEVEL_* + +// Static functions +static void _kchangrpFreeAllEngCtxDescs(OBJGPU *pGpu, KernelChannelGroup *pKernelChannelGroup); + +NV_STATUS +kchangrpConstruct_IMPL(KernelChannelGroup *pKernelChannelGroup) +{ + return NV_OK; +} + +void +kchangrpDestruct_IMPL(KernelChannelGroup *pKernelChannelGroup) +{ + return; +} + +void +kchangrpSetState_IMPL +( + KernelChannelGroup *pKernelChannelGroup, + NvU32 subdevice, + CHANNELGROUP_STATE state +) +{ + ct_assert(CHANNELGROUP_STATE_COUNT <= 8 * sizeof(NvU32)); + pKernelChannelGroup->pStateMask[subdevice] |= NVBIT(state); +} + +void +kchangrpClearState_IMPL +( + KernelChannelGroup *pKernelChannelGroup, + NvU32 subdevice, + CHANNELGROUP_STATE state +) +{ + ct_assert(CHANNELGROUP_STATE_COUNT <= 8 * sizeof(NvU32)); + pKernelChannelGroup->pStateMask[subdevice] &= ~NVBIT(state); +} + +NvBool +kchangrpIsStateSet_IMPL +( + KernelChannelGroup *pKernelChannelGroup, + NvU32 subdevice, + CHANNELGROUP_STATE state +) +{ + ct_assert(CHANNELGROUP_STATE_COUNT <= 8 * sizeof(NvU32)); + return !!(pKernelChannelGroup->pStateMask[subdevice] & NVBIT(state)); +} + +/** + * @brief Allocates sw state for channel group + * + * This allocates and initiazes sw state for channel group. + * No channels are added to this at this point. It will be done + * on alloc of a channel to this group using kchangrpAddChannelToGroup. + * + * All channels in a group share VASpace and KernelCtxShare. ChannelGroup + * alloc can be passed in a valid ctx share pointer. If not, we + * will allocate a new ctx share. + * + * This function is not called in broadcast mode + * + * @param pGpu + * @param pChanGrp + * @param[in] pVAS + * @param[in] gfid + * @param[in] engineType + * @returns NV_OK on success + */ +NV_STATUS +kchangrpInit_IMPL +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup, + OBJVASPACE *pVAS, + NvU32 gfid +) +{ + NV_STATUS status = NV_OK; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + CHID_MGR *pChidMgr = NULL; + NvU32 grpID = 0; + NvU32 maxSubctx; + NvU32 index; + NvBool bMapFaultMthdBuffers = NV_FALSE; + NvU32 runlistId = 0; + NvU32 runQueues = 0; + NvU32 subDeviceCount = gpumgrGetSubDeviceMaxValuePlus1(pGpu); + + // Initialize subctx bitmasks, state mask and interleave level + { + NvU32 subDeviceCount = gpumgrGetSubDeviceMaxValuePlus1(pGpu); + + pKernelChannelGroup->ppSubctxMask = portMemAllocNonPaged( + subDeviceCount * (sizeof *pKernelChannelGroup->ppSubctxMask)); + pKernelChannelGroup->ppZombieSubctxMask = portMemAllocNonPaged( + subDeviceCount * (sizeof *pKernelChannelGroup->ppZombieSubctxMask)); + pKernelChannelGroup->pStateMask = portMemAllocNonPaged( + subDeviceCount * (sizeof *pKernelChannelGroup->pStateMask)); + pKernelChannelGroup->pInterleaveLevel = portMemAllocNonPaged( + subDeviceCount * (sizeof *pKernelChannelGroup->pInterleaveLevel)); + + NV_ASSERT_OR_ELSE((pKernelChannelGroup->ppSubctxMask != NULL && + pKernelChannelGroup->ppZombieSubctxMask != NULL && + pKernelChannelGroup->pStateMask != NULL && + pKernelChannelGroup->pInterleaveLevel != NULL), + status = NV_ERR_NO_MEMORY; goto failed); + + portMemSet(pKernelChannelGroup->ppSubctxMask, 0, + subDeviceCount * (sizeof *pKernelChannelGroup->ppSubctxMask)); + portMemSet(pKernelChannelGroup->ppZombieSubctxMask, 0, + subDeviceCount * (sizeof *pKernelChannelGroup->ppZombieSubctxMask)); + portMemSet(pKernelChannelGroup->pStateMask, 0, + subDeviceCount * (sizeof *pKernelChannelGroup->pStateMask)); + portMemSet(pKernelChannelGroup->pInterleaveLevel, 0, + subDeviceCount * (sizeof *pKernelChannelGroup->pInterleaveLevel)); + } + + // Determine initial runlist for this TSG, using engine type if provided + pKernelChannelGroup->runlistId = kchangrpGetDefaultRunlist_HAL(pGpu, pKernelChannelGroup); + + if (kfifoIsPerRunlistChramEnabled(pKernelFifo)) + { + // + // pKernelChannelGroup->engineType should hold the valid engine type if + // bUsePerRunlistChram is set. If it is not set, + // it will be ignored by the subsequent call to fifoChidMgrGet anyway + // + NV_ASSERT_OK_OR_RETURN( + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, + pKernelChannelGroup->engineType, + ENGINE_INFO_TYPE_RUNLIST, + &runlistId)); + } + + pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, runlistId); + + NV_ASSERT_OK_OR_RETURN(kfifoChidMgrAllocChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, &grpID)); + + pKernelChannelGroup->grpID = grpID; + pKernelChannelGroup->timesliceUs = kfifoChannelGroupGetDefaultTimeslice_HAL(pKernelFifo); + + NV_ASSERT_OK_OR_GOTO(status, + kfifoChannelGroupSetTimeslice(pGpu, pKernelFifo, pKernelChannelGroup, + pKernelChannelGroup->timesliceUs, NV_TRUE), + failed); + + NV_ASSERT_OK_OR_GOTO(status, + kfifoChannelListCreate(pGpu, pKernelFifo, &pKernelChannelGroup->pChanList), + failed); + + // Alloc space for one ENGINE_CTX_DESCRIPTOR* per subdevice) + pKernelChannelGroup->ppEngCtxDesc = portMemAllocNonPaged(subDeviceCount * sizeof(ENGINE_CTX_DESCRIPTOR *)); + NV_ASSERT_OR_ELSE(pKernelChannelGroup->ppEngCtxDesc != NULL, status = NV_ERR_NO_MEMORY; goto failed); + portMemSet(pKernelChannelGroup->ppEngCtxDesc, 0, subDeviceCount * sizeof(ENGINE_CTX_DESCRIPTOR *)); + + pKernelChannelGroup->pSubctxIdHeap = portMemAllocNonPaged(sizeof(OBJEHEAP)); + if (pKernelChannelGroup->pSubctxIdHeap == NULL) + { + NV_CHECK(LEVEL_ERROR, pKernelChannelGroup->pSubctxIdHeap != NULL); + status = NV_ERR_NO_MEMORY; + goto failed; + } + + + maxSubctx = kfifoChannelGroupGetLocalMaxSubcontext_HAL(pGpu, pKernelFifo, + pKernelChannelGroup, + NV_FALSE); + + constructObjEHeap(pKernelChannelGroup->pSubctxIdHeap, + 0, + maxSubctx, + sizeof(KernelCtxShare *), + 0); + + // Subcontext mode is now enabled on all chips. + pKernelChannelGroup->bLegacyMode = NV_FALSE; + + // We cache the TSG VAS to support legacy mode + pKernelChannelGroup->pVAS = pVAS; + pKernelChannelGroup->gfid = gfid; + + // Get number of runqueues + runQueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); + NV_ASSERT((runQueues > 0)); + + // Allocate method buffer struct. One per runqueue + pKernelChannelGroup->pMthdBuffers = NULL; + pKernelChannelGroup->pMthdBuffers = portMemAllocNonPaged( + (sizeof(HW_ENG_FAULT_METHOD_BUFFER) * runQueues)); + if (pKernelChannelGroup->pMthdBuffers == NULL) + { + NV_CHECK(LEVEL_ERROR, pKernelChannelGroup->pMthdBuffers != NULL); + status = NV_ERR_NO_MEMORY; + goto failed; + } + portMemSet(pKernelChannelGroup->pMthdBuffers, + 0, + (sizeof(HW_ENG_FAULT_METHOD_BUFFER) * runQueues)); + + // Allocate method buffer memory + status = kchangrpAllocFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + " Fault method buffer allocation failed for group ID 0x%0x with status 0x%0x\n", + grpID, status); + DBG_BREAKPOINT(); + goto failed; + } + + // + // Map method buffer to invisible BAR2 + // Skipped for GSP since its done in channel setup + // + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + bMapFaultMthdBuffers = NV_TRUE; + + for (index = 0; index < runQueues; index++) + { + status = kchangrpMapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, index); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + " Fault method buffer BAR2 mapping failed for group ID 0x%0x with status 0x%0x\n", + grpID, status); + DBG_BREAKPOINT(); + goto failed; + } + } + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + // make sure zombie subcontext mask is reset + for (index = 0; index < SUBCTX_MASK_ARRAY_SIZE; index++) + pKernelChannelGroup->ppZombieSubctxMask[gpumgrGetSubDeviceInstanceFromGpu(pGpu)][index] = 0; + SLI_LOOP_END + + // + // Add the channel group to the table that keeps track of + // tuples + // + if (mapInsertExisting(pChidMgr->pChanGrpTree, grpID, pKernelChannelGroup)) + { + status = NV_OK; + } + else + { + status = NV_ERR_INVALID_STATE; + } + + NV_ASSERT(status == NV_OK); + + return NV_OK; + +failed: + if (pKernelChannelGroup->pSubctxIdHeap != NULL) + { + pKernelChannelGroup->pSubctxIdHeap->eheapDestruct( + pKernelChannelGroup->pSubctxIdHeap); + portMemFree(pKernelChannelGroup->pSubctxIdHeap); + pKernelChannelGroup->pSubctxIdHeap = NULL; + } + + _kchangrpFreeAllEngCtxDescs(pGpu, pKernelChannelGroup); + + if (pKernelChannelGroup->pChanList != NULL) + { + kfifoChannelListDestroy(pGpu, pKernelFifo, pKernelChannelGroup->pChanList); + pKernelChannelGroup->pChanList = NULL; + } + + if (pKernelChannelGroup->pMthdBuffers != NULL) + { + if (bMapFaultMthdBuffers) + { + for (index = 0; index < runQueues; index++) + { + kchangrpUnmapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, index); + } + } + + kchangrpFreeFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup); + + portMemFree(pKernelChannelGroup->pMthdBuffers); + pKernelChannelGroup->pMthdBuffers = NULL; + } + + if (pChidMgr != NULL) + kfifoChidMgrFreeChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, grpID); + + portMemFree(pKernelChannelGroup->ppSubctxMask); + pKernelChannelGroup->ppSubctxMask = NULL; + portMemFree(pKernelChannelGroup->ppZombieSubctxMask); + pKernelChannelGroup->ppZombieSubctxMask = NULL; + portMemFree(pKernelChannelGroup->pStateMask); + pKernelChannelGroup->pStateMask = NULL; + portMemFree(pKernelChannelGroup->pInterleaveLevel); + pKernelChannelGroup->pInterleaveLevel = NULL; + + return status; +} + +/** + * @brief Frees sw state for channel group + * + * This frees sw state for channel group. + * + * This function is not called in broadcast mode + * + * @param pGpu + * @param pKernelChannelGroup + * + * @returns NV_OK on success + */ +NV_STATUS +kchangrpDestroy_IMPL +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup +) +{ + NV_STATUS status; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + CHID_MGR *pChidMgr; + KernelChannelGroup *pKernelChannelGroupTemp; + NvU64 maxSubctx; + NvU64 numFreeSubctx; + NvU32 runlistId = 0; + NvU32 runQueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); + NvU32 index; + + if (kfifoIsPerRunlistChramEnabled(pKernelFifo)) + { + // + // pKernelChannelGroup->engineType should hold the valid engine type if + // bUsePerRunlistChram is set. If it is not set, + // it will be ignored by the subsequent call to fifoChidMgrGet anyway + // + NV_ASSERT_OK_OR_RETURN( + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, + pKernelChannelGroup->engineType, + ENGINE_INFO_TYPE_RUNLIST, + &runlistId)); + } + + pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, runlistId); + + // If already destroyed, nothing to be done + if (pKernelChannelGroup == NULL) + return NV_OK; + + // There should be no channels in this group + NV_ASSERT(pKernelChannelGroup->chanCount == 0); + + status = pKernelChannelGroup->pSubctxIdHeap->eheapGetSize( + pKernelChannelGroup->pSubctxIdHeap, + &maxSubctx); + NV_ASSERT(status == NV_OK); + + status = pKernelChannelGroup->pSubctxIdHeap->eheapGetFree( + pKernelChannelGroup->pSubctxIdHeap, + &numFreeSubctx); + NV_ASSERT(status == NV_OK); + + // + // Resource Server should have already freed any client allocated or legacy + // pre-allocated context shares + // + NV_ASSERT(maxSubctx == kfifoChannelGroupGetLocalMaxSubcontext_HAL( + pGpu, + pKernelFifo, + pKernelChannelGroup, + pKernelChannelGroup->bLegacyMode)); + NV_ASSERT(maxSubctx == numFreeSubctx); + + pKernelChannelGroup->pSubctxIdHeap->eheapDestruct( + pKernelChannelGroup->pSubctxIdHeap); + portMemFree(pKernelChannelGroup->pSubctxIdHeap); + pKernelChannelGroup->pSubctxIdHeap = NULL; + + _kchangrpFreeAllEngCtxDescs(pGpu, pKernelChannelGroup); + + kfifoChannelListDestroy(pGpu, pKernelFifo, pKernelChannelGroup->pChanList); + pKernelChannelGroup->pChanList= NULL; + + // Remove this from the that we maintain in OBJFIFO + pKernelChannelGroupTemp = mapFind(pChidMgr->pChanGrpTree, pKernelChannelGroup->grpID); + if (pKernelChannelGroupTemp == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not find channel group %d\n", + pKernelChannelGroup->grpID); + return NV_ERR_OBJECT_NOT_FOUND; + } + mapRemove(pChidMgr->pChanGrpTree, pKernelChannelGroupTemp); + + // Release the free grpID + kfifoChidMgrFreeChannelGroupHwID(pGpu, pKernelFifo, pChidMgr, pKernelChannelGroup->grpID); + + // + // Free the method buffer if applicable + // For SR-IOV, Guest RM allocates the mthd buffers but later RPCs into the + // host, and populates the data structure, but it should be free-d only by + // guest RM. + // In host RM, we only need to free the memory allocated for pMthdBuffers. + // In case of SRIOV heavy, we need to free the method buffer in host RM. + // + if ((IS_GFID_PF(pKernelChannelGroup->gfid) || + gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) && + pKernelChannelGroup->pMthdBuffers) + { + // + // Unmap method buffer from invisible BAR2 + // Skipped for GSP since its done during fifoFree + // + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + for (index = 0; index < runQueues; index++) + { + kchangrpUnmapFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup, index); + } + } + + kchangrpFreeFaultMethodBuffers_HAL(pGpu, pKernelChannelGroup); + } + + portMemFree(pKernelChannelGroup->pMthdBuffers); + pKernelChannelGroup->pMthdBuffers = NULL; + + portMemFree(pKernelChannelGroup->ppSubctxMask); + pKernelChannelGroup->ppSubctxMask = NULL; + portMemFree(pKernelChannelGroup->ppZombieSubctxMask); + pKernelChannelGroup->ppZombieSubctxMask = NULL; + portMemFree(pKernelChannelGroup->pStateMask); + pKernelChannelGroup->pStateMask = NULL; + portMemFree(pKernelChannelGroup->pInterleaveLevel); + pKernelChannelGroup->pInterleaveLevel = NULL; + + return NV_OK; +} + + +/** + * @brief Adds channel to a channel group + * + * This function is not called in broadcast mode + * + * @returns NV_OK on success + */ +NV_STATUS +kchangrpAddChannel_IMPL +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup, + KernelChannel *pKernelChannel +) +{ + NV_STATUS status; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 maxChanCount; + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + KernelCtxShare *pKernelCtxShare; + + + NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_POINTER); + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_POINTER); + + maxChanCount = kfifoGetMaxChannelGroupSize_HAL(pKernelFifo); + if (pKernelChannelGroup->chanCount == maxChanCount) + { + NV_PRINTF(LEVEL_ERROR, + "There are already max %d channels in this group\n", + maxChanCount); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + NV_ASSERT_OR_RETURN(pKernelChannel->pKernelCtxShareApi != NULL, NV_ERR_INVALID_STATE); + pKernelCtxShare = pKernelChannel->pKernelCtxShareApi->pShareData; + NV_ASSERT_OR_RETURN(pKernelCtxShare != NULL, NV_ERR_INVALID_STATE); + + // If runlist was set on channel - assert that it's the same runlist as TSG, + if (kchannelIsRunlistSet(pGpu, pKernelChannel)) + NV_ASSERT_OR_RETURN(pKernelChannelGroup->runlistId == + kchannelGetRunlistId(pKernelChannel), + NV_ERR_INVALID_STATE); + + pKernelChannel->subctxId = pKernelCtxShare->subctxId; + + NV_PRINTF(LEVEL_INFO, + "Channel 0x%x within TSG 0x%x is using subcontext 0x%x\n", + kchannelGetDebugTag(pKernelChannel), pKernelChannelGroup->grpID, pKernelChannel->subctxId); + + status = kfifoChannelListAppend(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + pKernelChannel, + pKernelChannelGroup->pChanList); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not add channel to channel list\n"); + return status; + } + pKernelChannelGroup->chanCount++; + + // Initialize channel's interleave level to match TSG's + NV_ASSERT_OK_OR_RETURN( + kchangrpSetInterleaveLevel(pGpu, + pKernelChannelGroup, + pKernelChannelGroup->pInterleaveLevel[subdevInst])); + + return NV_OK; +} + + +/** + * @brief Removes channel from a channel group + * + * This function is not called in broadcast mode + * + * @param pGpu + * @param pKernelChannelGroup + * @param[in] pKernelChannel + * + * @returns NV_OK on success + */ +NV_STATUS +kchangrpRemoveChannel_IMPL +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup, + KernelChannel *pKernelChannel +) +{ + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_POINTER); + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_POINTER); + + status = kfifoChannelListRemove(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + pKernelChannel, + pKernelChannelGroup->pChanList); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Could not remove channel from channel list\n"); + return status; + } + + if (pKernelChannelGroup->chanCount == 0) + { + NV_PRINTF(LEVEL_ERROR, "Channelcount in channel group not right!!!\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + pKernelChannelGroup->chanCount--; + + if (pKernelChannelGroup->chanCount == 0) + { + pKernelChannelGroup->bRunlistAssigned = NV_FALSE; + + // + // Unmap method buffer from invisible BAR2 if this is the last channel in TSG + // Done for GSP only + // + if (pKernelChannelGroup->pMthdBuffers != NULL && RMCFG_FEATURE_PLATFORM_GSP) + { + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 runQueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); + NvU32 index; + + for (index = 0; index < runQueues; index++) + { + kchangrpUnmapFaultMethodBuffers_HAL(pGpu, + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup, + index); + } + } + } + + return NV_OK; +} + + +/** + * @brief Set interleave level for channel group + * + * This function sets interleave level for channel group + * and also updates interleave level for all channels + * + * @param pGpu + * @param pKernelChannelGroup + * @param value + * + * @returns NV_OK on success + */ +NV_STATUS +kchangrpSetInterleaveLevel_IMPL +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup, + NvU32 value +) +{ + switch (value) + { + case NVA06C_CTRL_INTERLEAVE_LEVEL_LOW: + case NVA06C_CTRL_INTERLEAVE_LEVEL_MEDIUM: + case NVA06C_CTRL_INTERLEAVE_LEVEL_HIGH: + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pKernelChannelGroup->pInterleaveLevel[subdevInst] = value; + } + SLI_LOOP_END + + NV_ASSERT_OK_OR_RETURN(kchangrpSetInterleaveLevelSched(pGpu, + pKernelChannelGroup, value)); + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +// Helper function to clean up all EngCtxDescs as well as the pointer storage +static void +_kchangrpFreeAllEngCtxDescs +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup +) +{ + ENGINE_CTX_DESCRIPTOR *pEngCtxDescriptor; + + // Nothing to do + if (pKernelChannelGroup->ppEngCtxDesc == NULL) + return; + + // Destroy each of the EngCtxDescriptors + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + pEngCtxDescriptor = pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + if (pEngCtxDescriptor != NULL) + { + vaListDestroy(&pEngCtxDescriptor->vaList); + } + portMemFree(pEngCtxDescriptor); + pKernelChannelGroup->ppEngCtxDesc[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = NULL; + + SLI_LOOP_END + + // Destroy the array of pointers + portMemFree(pKernelChannelGroup->ppEngCtxDesc); + pKernelChannelGroup->ppEngCtxDesc = NULL; + + return; +} + +NV_STATUS +kchangrpAllocEngineContextDescriptor_IMPL +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup +) +{ + NvU32 subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + NV_STATUS status; + + pKernelChannelGroup->ppEngCtxDesc[subdeviceInstance] = portMemAllocNonPaged(sizeof(ENGINE_CTX_DESCRIPTOR)); + NV_ASSERT_OR_RETURN(pKernelChannelGroup->ppEngCtxDesc[subdeviceInstance] != NULL, NV_ERR_NO_MEMORY); + portMemSet(pKernelChannelGroup->ppEngCtxDesc[subdeviceInstance], 0, sizeof(ENGINE_CTX_DESCRIPTOR)); + + NV_ASSERT_OK_OR_GOTO(status, + vaListInit(&pKernelChannelGroup->ppEngCtxDesc[subdeviceInstance]->vaList), + failed); + return NV_OK; + +failed: + portMemFree(pKernelChannelGroup->ppEngCtxDesc[subdeviceInstance]); + pKernelChannelGroup->ppEngCtxDesc[subdeviceInstance] = NULL; + return status; +} + +NV_STATUS +kchangrpGetEngineContextMemDesc_IMPL +( + OBJGPU *pGpu, + KernelChannelGroup *pKernelChannelGroup, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + ENGINE_CTX_DESCRIPTOR *pEngCtxDesc; + NvU32 subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[subDevInst]; + + if (NULL != pEngCtxDesc) + *ppMemDesc = pEngCtxDesc->pMemDesc; + else + *ppMemDesc = NULL; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/kernel_channel_group_api.c b/src/nvidia/src/kernel/gpu/fifo/kernel_channel_group_api.c new file mode 100644 index 000000000..c1f81836a --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/kernel_channel_group_api.c @@ -0,0 +1,1373 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_channel_group_api.h" + +#include "kernel/core/locks.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/falcon/kernel_falcon.h" + +#include "class/cl0090.h" // KERNEL_GRAPHICS_CONTEXT +#include "class/cl9067.h" // FERMI_CONTEXT_SHARE_A + +#include "libraries/utils/nvprintf.h" +#include "gpu/gpu.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "gpu/mem_mgr/vaspace_api.h" +#include "vgpu/rpc.h" +#include "rmapi/rs_utils.h" + +NV_STATUS +kchangrpapiConstruct_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NvBool bTsgAllocated = NV_FALSE; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NV_STATUS rmStatus; + OBJVASPACE *pVAS = NULL; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannelGroupApi); + KernelMIGManager *pKernelMIGManager = NULL; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvHandle hVASpace = NV01_NULL_OBJECT; + Device *pDevice = NULL; + NvU32 gfid = GPU_GFID_PF; + RsShared *pShared; + RsClient *pClient; + NvBool bLockAcquired = NV_FALSE; + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvBool bMIGInUse = NV_FALSE; + CTX_BUF_INFO *bufInfoList = NULL; + NvU32 bufCount = 0; + NvBool bReserveMem = NV_FALSE; + MIG_INSTANCE_REF ref; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + KernelChannelGroup *pKernelChannelGroup = NULL; + NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS *pAllocParams = NULL; + + NV_PRINTF(LEVEL_INFO, + "hClient: 0x%x, hParent: 0x%x, hObject:0x%x, hClass: 0x%x\n", + pParams->hClient, pParams->hParent, pParams->hResource, + pParams->externalClassId); + + if (RS_IS_COPY_CTOR(pParams)) + { + NV_ASSERT_OK_OR_GOTO(rmStatus, + rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO), + done); + bLockAcquired = NV_TRUE; + rmStatus = kchangrpapiCopyConstruct_IMPL(pKernelChannelGroupApi, + pCallContext, pParams); + goto done; + } + + // + // Make sure this GPU is not already locked by this thread + // Ideally this thread shouldn't have locked any GPU in the system but + // checking this is sufficient as memory allocation from PMA requires + // current GPU's lock not to be held + // + if (rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + NV_PRINTF(LEVEL_ERROR, "TSG alloc should be called without acquiring GPU lock\n"); + LOCK_ASSERT_AND_RETURN(0); + } + + bufInfoList = portMemAllocNonPaged(NV_ENUM_SIZE(GR_CTX_BUFFER) * sizeof(*bufInfoList)); + if (bufInfoList == NULL) + { + return NV_ERR_NO_MEMORY; + } + + // Acquire the lock *only after* PMA is done allocating. + NV_ASSERT_OK_OR_GOTO(rmStatus, + rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO), + done); + bLockAcquired = NV_TRUE; + + pAllocParams = pParams->pAllocParams; + hVASpace = pAllocParams->hVASpace; + + NV_ASSERT_OK_OR_GOTO(rmStatus, + serverAllocShareWithHalspecParent(&g_resServ, classInfo(KernelChannelGroup), + &pShared, staticCast(pGpu, Object)), + failed); + + pKernelChannelGroup = dynamicCast(pShared, KernelChannelGroup); + pKernelChannelGroupApi->pKernelChannelGroup = pKernelChannelGroup; + + if (!gpuIsClassSupported(pGpu, pResourceRef->externalClassId)) + { + NV_PRINTF(LEVEL_ERROR, "class %x not supported\n", + pResourceRef->externalClassId); + rmStatus = NV_ERR_NOT_SUPPORTED; + goto failed; + } + + + rmStatus = serverGetClientUnderLock(&g_resServ, pParams->hClient, &pClient); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid client handle!\n"); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + rmStatus = deviceGetByHandle(pClient, pParams->hParent, &pDevice); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid parent/device handle!\n"); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + bMIGInUse = IS_MIG_IN_USE(pGpu); + + if (kfifoIsPerRunlistChramSupportedInHw(pKernelFifo)) + { + if (!NV2080_ENGINE_TYPE_IS_VALID(pAllocParams->engineType)) + { + NV_PRINTF(LEVEL_NOTICE, "Valid engine Id must be specified while allocating TSGs or bare channels!\n"); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + // + // If we have a separate channel RAM for each runlist then we need + // to determine runlistId from engineId passed by client. This + // runlistId is used to associate all future channels in this TSG to + // that runlist. Setting the engineType will cause the runlist + // corresponding to that engine to be chosen in + // kchangrpGetDefaultRunlist_HAL. + // + pKernelChannelGroup->engineType = pAllocParams->engineType; + } + + // + // If MIG is enabled, client passes a logical engineId w.r.t its own GPU instance + // we need to convert this logical Id to a physical engine Id as we use it + // to set runlistId + // + if (bMIGInUse) + { + NvU32 engineId; + + // Engine type must be valid for MIG + NV_CHECK_OR_ELSE(LEVEL_NOTICE, NV2080_ENGINE_TYPE_IS_VALID(pKernelChannelGroup->engineType), + rmStatus = NV_ERR_INVALID_STATE; goto failed); + + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, pParams->hClient, &ref), + failed); + + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + pAllocParams->engineType, + &engineId), + failed); + + // Rewrite the engineType with the global engine type + pKernelChannelGroup->engineType = engineId; + pHeap = ref.pKernelMIGGpuInstance->pMemoryPartitionHeap; + } + + if((pDevice->vaMode != NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES) || (hVASpace != 0)) + { + NV_ASSERT_OK_OR_GOTO(rmStatus, + vaspaceGetByHandleOrDeviceDefault(pClient, pParams->hParent, hVASpace, &pVAS), + failed); + + if (pVAS == NULL) + { + rmStatus = NV_ERR_INVALID_STATE; + goto failed; + } + } + + + // vGpu plugin context flag should only be set on host if context is plugin + if (gpuIsSriovEnabled(pGpu)) + pKernelChannelGroup->bIsCallingContextVgpuPlugin = pAllocParams->bIsCallingContextVgpuPlugin; + + if (pKernelChannelGroup->bIsCallingContextVgpuPlugin) + gfid = GPU_GFID_PF; + else + { + NV_ASSERT_OK_OR_GOTO(rmStatus, vgpuGetCallingContextGfid(pGpu, &gfid), failed); + } + + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + NV_ASSERT_OK_OR_GOTO(rmStatus, + ctxBufPoolInit(pGpu, pHeap, &pKernelChannelGroup->pCtxBufPool), + failed); + + NV_ASSERT_OK_OR_GOTO(rmStatus, + ctxBufPoolInit(pGpu, pHeap, &pKernelChannelGroup->pChannelBufPool), + failed); + } + + NV_ASSERT_OK_OR_GOTO(rmStatus, + kchangrpInit(pGpu, pKernelChannelGroup, pVAS, gfid), + failed); + bTsgAllocated = NV_TRUE; + + pKernelChannelGroupApi->hLegacykCtxShareSync = 0; + pKernelChannelGroupApi->hLegacykCtxShareAsync = 0; + + if (hVASpace != 0) + { + RsResourceRef *pVASpaceRef; + rmStatus = clientGetResourceRef(pCallContext->pClient, hVASpace, &pVASpaceRef); + NV_ASSERT(rmStatus == NV_OK); + if (rmStatus == NV_OK) + refAddDependant(pVASpaceRef, pResourceRef); + } + + pKernelChannelGroupApi->hErrorContext = pAllocParams->hObjectError; + pKernelChannelGroupApi->hEccErrorContext = pAllocParams->hObjectEccError; + + // Default interleave level + NV_ASSERT_OK_OR_GOTO( + rmStatus, + kchangrpSetInterleaveLevel(pGpu, pKernelChannelGroup, + NVA06C_CTRL_INTERLEAVE_LEVEL_MEDIUM), + failed); + + // + // If ctx buf pools are enabled, filter out partitionable engines + // that aren't part of our instance. + // + // Memory needs to be reserved in the pool only for buffers for + // engines in instance. + // + if (pKernelChannelGroup->pCtxBufPool != NULL && + kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, pKernelChannelGroup->engineType, ref)) + { + // GR Buffers + if (NV2080_ENGINE_TYPE_IS_GR(pKernelChannelGroup->engineType)) + { + KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, NV2080_ENGINE_TYPE_GR_IDX(pKernelChannelGroup->engineType)); + NvU32 bufId; + portMemSet(&bufInfoList[0], 0, sizeof(CTX_BUF_INFO) * NV_ENUM_SIZE(GR_CTX_BUFFER)); + bufCount = 0; + FOR_EACH_IN_ENUM(GR_CTX_BUFFER, bufId) + { + // TODO expose engine class capabilities to kernel RM + if (kgrmgrIsCtxBufSupported(bufId, !IS_MIG_ENABLED(pGpu))) + { + const CTX_BUF_INFO *pBufInfo = kgraphicsGetCtxBufferInfo(pGpu, pKernelGraphics, bufId); + bufInfoList[bufCount] = *pBufInfo; + NV_PRINTF(LEVEL_INFO, "Reserving 0x%llx bytes for GR ctx bufId = %d\n", + bufInfoList[bufCount].size, bufId); + bufCount++; + } + } + FOR_EACH_IN_ENUM_END; + bReserveMem = NV_TRUE; + } + else + { + // Allocate falcon context buffers if engine has (Kernel) Falcon object + NvU32 ctxBufferSize; + if (IS_GSP_CLIENT(pGpu)) + { + ENGDESCRIPTOR engDesc; + KernelFalcon *pKernelFalcon = NULL; + + NV_ASSERT_OK_OR_GOTO(rmStatus, + gpuXlateClientEngineIdToEngDesc(pGpu, + pKernelChannelGroup->engineType, + &engDesc), + failed); + + pKernelFalcon = kflcnGetKernelFalconForEngine(pGpu, engDesc); + if (pKernelFalcon != NULL) + { + ctxBufferSize = pKernelFalcon->ctxBufferSize; + bReserveMem = NV_TRUE; + } + } + + if (bReserveMem) + { + bufInfoList[0].size = ctxBufferSize; + bufInfoList[0].align = RM_PAGE_SIZE; + bufInfoList[0].attr = RM_ATTR_PAGE_SIZE_4KB; + bufInfoList[0].bContig = NV_TRUE; + NV_PRINTF(LEVEL_INFO, "Reserving 0x%llx bytes for engineType %u flcn ctx buffer\n", + bufInfoList[0].size, pKernelChannelGroup->engineType); + bufCount++; + } + else + { + NV_PRINTF(LEVEL_INFO, "No buffer reserved for engineType %u in ctx_buf_pool\n", + pKernelChannelGroup->engineType); + } + } + } + + if ((!bMIGInUse || NV2080_ENGINE_TYPE_IS_GR(pKernelChannelGroup->engineType)) + && !IsT234D(pGpu)) + { + NV_ASSERT_OK_OR_GOTO(rmStatus, + pRmApi->AllocWithSecInfo(pRmApi, + pParams->hClient, + RES_GET_HANDLE(pKernelChannelGroupApi), + &pKernelChannelGroupApi->hKernelGraphicsContext, + KERNEL_GRAPHICS_CONTEXT, + NvP64_NULL, + RMAPI_ALLOC_FLAGS_SKIP_RPC, + NvP64_NULL, + &pRmApi->defaultSecInfo), + failed); + } + + NV_PRINTF(LEVEL_INFO, "Adding group Id: %d hClient:0x%x\n", + pKernelChannelGroup->grpID, pParams->hClient); + + if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) && + !(pParams->allocFlags & RMAPI_ALLOC_FLAGS_SKIP_RPC)) + { + NV_RM_RPC_ALLOC_OBJECT(pGpu, + pParams->hClient, + pParams->hParent, + pParams->hResource, + pParams->externalClassId, + pAllocParams, + rmStatus); + // + // Make sure that corresponding RPC occurs when freeing + // KernelChannelGroupApi. Resource server checks this variable during + // free and ignores any RPC flags set in resource_list.h + // + staticCast(pKernelChannelGroupApi, RmResource)->bRpcFree = NV_TRUE; + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "KernelChannelGroupApi alloc RPC to vGpu Host failed\n"); + goto failed; + } + + if (IS_VIRTUAL_WITH_FULL_SRIOV(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS params = { + 0}; + NvU32 runqueueIdx; + NvU32 maxRunqueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); + + for (runqueueIdx = 0; runqueueIdx < maxRunqueues; ++runqueueIdx) + { + MEMORY_DESCRIPTOR *pSrcMemDesc; + HW_ENG_FAULT_METHOD_BUFFER *pMthdBuffer; + pMthdBuffer = &pKernelChannelGroup->pMthdBuffers[runqueueIdx]; + pSrcMemDesc = pMthdBuffer->pMemDesc; + + params.methodBufferMemdesc[runqueueIdx].size = ( + pSrcMemDesc->Size); + params.methodBufferMemdesc[runqueueIdx].addressSpace = ( + memdescGetAddressSpace(pSrcMemDesc)); + params.methodBufferMemdesc[runqueueIdx].cpuCacheAttrib = ( + memdescGetCpuCacheAttrib(pSrcMemDesc)); + params.methodBufferMemdesc[runqueueIdx].alignment = 1; + + if (IS_VIRTUAL_WITH_FULL_SRIOV(pGpu)) + { + params.bar2Addr[runqueueIdx] = pMthdBuffer->bar2Addr; + params.methodBufferMemdesc[runqueueIdx].base = ( + memdescGetPhysAddr(pSrcMemDesc, AT_CPU, 0)); + } + else + { + // + // The case of both vGpu full SRIOV + GSP_CLIENT host is not + // supported. This else branch considers the case of + // GSP_CLIENT only without vGpu. + // + params.methodBufferMemdesc[runqueueIdx].base = ( + memdescGetPhysAddr(pSrcMemDesc, AT_GPU, 0)); + } + } + params.numValidEntries = runqueueIdx; + + rmStatus = pRmApi->Control(pRmApi, + pParams->hClient, + RES_GET_HANDLE(pKernelChannelGroupApi), + NVA06C_CTRL_CMD_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS, + ¶ms, + sizeof params); + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Control call to update method buffer memdesc failed\n"); + goto failed; + } + } + } + + if (kfifoIsZombieSubctxWarEnabled(pKernelFifo)) + { + kchangrpSetSubcontextZombieState_HAL(pGpu, pKernelChannelGroup, 0, NV_TRUE); + kchangrpUpdateSubcontextMask_HAL(pGpu, pKernelChannelGroup, 0, NV_TRUE); + } + + // initialize apiObjList with original client's KernelChannelGroupApi object + listInit(&pKernelChannelGroup->apiObjList, portMemAllocatorGetGlobalNonPaged()); + + if (listAppendValue(&pKernelChannelGroup->apiObjList, &pKernelChannelGroupApi) == NULL) + { + rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; + listClear(&pKernelChannelGroup->apiObjList); + goto failed; + } + +failed: + if (rmStatus != NV_OK) + { + if (pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, pParams->hClient, + pKernelChannelGroupApi->hKernelGraphicsContext); + } + + if (pKernelChannelGroup != NULL) + { + if (bTsgAllocated) + kchangrpDestroy(pGpu, pKernelChannelGroup); + + if (pKernelChannelGroup->pCtxBufPool != NULL) + ctxBufPoolDestroy(&pKernelChannelGroup->pCtxBufPool); + + if (pKernelChannelGroup->pChannelBufPool != NULL) + ctxBufPoolDestroy(&pKernelChannelGroup->pChannelBufPool); + + } + + if (pShared) + serverFreeShare(&g_resServ, pShared); + } + +done: + + if (bLockAcquired) + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + if (bReserveMem) + { + // GPU lock should not be held when reserving memory for ctxBufPool + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + ctxBufPoolReserve(pGpu, pKernelChannelGroup->pCtxBufPool, bufInfoList, bufCount)); + } + + portMemFree(bufInfoList); + + return rmStatus; +} + +NV_STATUS +kchangrpapiControl_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pResourceRef = RES_GET_REF(pKernelChannelGroupApi); + + (void)pResourceRef; + NV_PRINTF(LEVEL_INFO, "grpID 0x%x handle 0x%x cmd 0x%x\n", + pKernelChannelGroupApi->pKernelChannelGroup->grpID, + pResourceRef->hResource, pParams->pLegacyParams->cmd); + + return gpuresControl_IMPL(staticCast(pKernelChannelGroupApi, GpuResource), + pCallContext, pParams); +} + +void +kchangrpapiDestruct_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + RsResourceRef *pResourceRef; + RsClient *pClient; + KernelChannelGroup *pKernelChannelGroup = + pKernelChannelGroupApi->pKernelChannelGroup; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannelGroupApi); + NV_STATUS rmStatus = NV_OK; + RS_ORDERED_ITERATOR it; + RsShared *pShared = staticCast(pKernelChannelGroup, RsShared); + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + resGetFreeParams(staticCast(pKernelChannelGroupApi, RsResource), + &pCallContext, &pParams); + pResourceRef = pCallContext->pResourceRef; + pClient = pCallContext->pClient; + + NV_PRINTF(LEVEL_INFO, "\n"); + + // RS-TODO should still free channels? + if (serverGetShareRefCount(&g_resServ, pShared) > 1) + { + // Remove this kchangrpapi object from the list of owners in the shared object + listRemoveFirstByValue(&pKernelChannelGroupApi->pKernelChannelGroup->apiObjList, &pKernelChannelGroupApi); + goto done; + } + + kchangrpSetRealtime_HAL(pGpu, pKernelChannelGroup, NV_FALSE); + + // If channels still exist in this group, free them + // RS-TODO this can be removed after re-parenting support is added + it = kchannelGetIter(pClient, pResourceRef); + while (clientRefOrderedIterNext(pClient, &it)) + { + NV_STATUS tmpStatus; + + tmpStatus = pRmApi->Free(pRmApi, pClient->hClient, it.pResourceRef->hResource); + if ((tmpStatus != NV_OK) && (rmStatus == NV_OK)) + rmStatus = tmpStatus; + } + + NV_ASSERT(rmStatus == NV_OK); + + if (pKernelChannelGroup != NULL) + { + kchangrpDestroy(pGpu, pKernelChannelGroup); + + if (pKernelChannelGroup->pCtxBufPool != NULL) + { + ctxBufPoolRelease(pKernelChannelGroup->pCtxBufPool); + ctxBufPoolDestroy(&pKernelChannelGroup->pCtxBufPool); + } + + if (pKernelChannelGroup->pChannelBufPool != NULL) + { + ctxBufPoolRelease(pKernelChannelGroup->pChannelBufPool); + ctxBufPoolDestroy(&pKernelChannelGroup->pChannelBufPool); + } + } + + listClear(&pKernelChannelGroup->apiObjList); + +done: + serverFreeShare(&g_resServ, pShared); + + pParams->status = rmStatus; +} + +NV_STATUS +kchangrpapiCopyConstruct_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + RsClient *pDstClient = pCallContext->pClient; + RsResourceRef *pDstRef = pCallContext->pResourceRef; + RsResourceRef *pSrcRef = pParams->pSrcRef; + KernelChannelGroupApi *pChanGrpSrc = dynamicCast(pSrcRef->pResource, + KernelChannelGroupApi); + RS_ITERATOR iter; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannelGroupApi); + NV_STATUS status = NV_OK; + RsResourceRef *pVaspaceRef = NULL; + VaSpaceApi *pVaspaceApi = NULL; + + pKernelChannelGroupApi->hKernelGraphicsContext = NV01_NULL_OBJECT; + pKernelChannelGroupApi->hLegacykCtxShareSync = NV01_NULL_OBJECT; + pKernelChannelGroupApi->hLegacykCtxShareAsync = NV01_NULL_OBJECT; + + pKernelChannelGroupApi->pKernelChannelGroup = + pChanGrpSrc->pKernelChannelGroup; + serverRefShare(&g_resServ, + staticCast(pKernelChannelGroupApi->pKernelChannelGroup, RsShared)); + + iter = serverutilRefIter(pDstClient->hClient, pDstRef->pParentRef->hResource, classId(VaSpaceApi), RS_ITERATE_DESCENDANTS, NV_TRUE); + while (clientRefIterNext(iter.pClient, &iter)) + { + pVaspaceRef = iter.pResourceRef; + pVaspaceApi = dynamicCast(pVaspaceRef->pResource, VaSpaceApi); + NV_ASSERT_OR_RETURN(pVaspaceApi != NULL, NV_ERR_INVALID_STATE); + + if (pVaspaceApi->pVASpace == + pKernelChannelGroupApi->pKernelChannelGroup->pVAS) + { + refAddDependant(pVaspaceRef, pDstRef); + break; + } + } + + if (pChanGrpSrc->hKernelGraphicsContext != NV01_NULL_OBJECT) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->DupObject(pRmApi, + pDstClient->hClient, + pDstRef->hResource, + &pKernelChannelGroupApi->hKernelGraphicsContext, + pParams->pSrcClient->hClient, + pChanGrpSrc->hKernelGraphicsContext, + 0), + fail); + } + + // + // If this channel group is in legacy mode, new client needs its own handles to the + // sync and async internally allocated kctxshares + // + if (pChanGrpSrc->pKernelChannelGroup->bLegacyMode) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->DupObject(pRmApi, + pDstClient->hClient, + pDstRef->hResource, + &pKernelChannelGroupApi->hLegacykCtxShareSync, + pParams->pSrcClient->hClient, + pChanGrpSrc->hLegacykCtxShareSync, + 0), + fail); + + // All chips have SYNC, Some chips won't have an ASYNC kctxshare + if (pChanGrpSrc->hLegacykCtxShareAsync != 0) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->DupObject(pRmApi, + pDstClient->hClient, + pDstRef->hResource, + &pKernelChannelGroupApi->hLegacykCtxShareAsync, + pParams->pSrcClient->hClient, + pChanGrpSrc->hLegacykCtxShareAsync, + 0), + fail); + } + } + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_DUP_OBJECT(pGpu, pDstClient->hClient, pDstRef->pParentRef->hResource, pDstRef->hResource, + pParams->pSrcClient->hClient, pSrcRef->hResource, 0, + NV_TRUE, // automatically issue RPC_FREE on object free + pDstRef, status); + + if (status != NV_OK) + goto fail; + } + + if (listAppendValue(&pKernelChannelGroupApi->pKernelChannelGroup->apiObjList, &pKernelChannelGroupApi) == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto fail; + } + + return status; + +fail: + if (pKernelChannelGroupApi->hLegacykCtxShareAsync != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, pDstClient->hClient, + pKernelChannelGroupApi->hLegacykCtxShareAsync); + } + if (pKernelChannelGroupApi->hLegacykCtxShareSync != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, pDstClient->hClient, + pKernelChannelGroupApi->hLegacykCtxShareSync); + } + if (pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, pDstClient->hClient, + pKernelChannelGroupApi->hKernelGraphicsContext); + } + + serverFreeShare(&g_resServ, + staticCast(pKernelChannelGroupApi->pKernelChannelGroup, RsShared)); + + return status; +} + +NvBool +kchangrpapiCanCopy_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi +) +{ + return NV_TRUE; +} + +NV_STATUS +CliGetChannelGroup +( + NvHandle hClient, + NvHandle hChanGrp, + RsResourceRef **ppChanGrpRef, + NvHandle *phDevice +) +{ + NV_STATUS status; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + RsResourceRef *pParentRef; + + if (!ppChanGrpRef) + { + return NV_ERR_INVALID_ARGUMENT; + } + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + NV_ASSERT(status == NV_OK); + if (status != NV_OK) + return status; + + status = clientGetResourceRefByType(pRsClient, hChanGrp, + classId(KernelChannelGroupApi), + &pResourceRef); + if (status != NV_OK) + return status; + + *ppChanGrpRef = pResourceRef; + + if (phDevice) + { + pParentRef = pResourceRef->pParentRef; + *phDevice = pParentRef->hResource; + } + + return NV_OK; +} + +/*! + * @brief Use TSG in legacy mode + * + * In legacy mode, RM pre-allocates the subcontexts in a TSG. + * This is needed for the following reasons: + * + * 1. We are also using subcontext to represent TSG contexts in pre-VOLTA chips (see below). + * But RM clients haven't yet moved to the subcontext model in production code. + * So RM implicitly creates it for them, until they make the switch. + * + * 2. Pre-VOLTA, we only support one address space in a TSG. + * Preallocating the subcontext prevents accidental use of multiple address spaces within a TSG. + * So we use the vaspace specified/implied at TSG creation to create the subcontexts. + * + * 3. Tests and clients on VOLTA that don't explicitly specify subcontexts need to behave similar + * to previous chips until they allocate the kctxshares themselves. + * + * Legacy subcontexts are interpreted in the following ways: + * + * VOLTA+ : subcontext 0 is VEID 0, subcontext 1 is VEID 1 + * GM20X thru PASCAL : subcontext 0 is SCG type 0, subcontext 1 is SCG type 1 + * pre-GM20X : just a single subcontext 0; no SCG or VEIDs attached to it. + * + * @param[in] pKernelChannelGroupApi Channel group pointer + * @param[in] pGpu GPU object pointer + * @param[in] pKernelFifo FIFO object pointer + * @param[in] hClient Client handle + * + */ +NV_STATUS +kchangrpapiSetLegacyMode_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvHandle hClient +) +{ + KernelChannelGroup *pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + NvHandle hTsg = RES_GET_HANDLE(pKernelChannelGroupApi); + NvHandle hkCtxShare = 0; + NV_STATUS status = NV_OK; + NvU32 maxSubctx = 0; + NvU64 numMax = 0; + NvU64 numFree = 0; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + KernelChannelGroupApiListIter it; + + NV_CTXSHARE_ALLOCATION_PARAMETERS kctxshareParams = { 0 }; + + ct_assert(NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC == 0); + ct_assert(NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC == 1); + + NV_ASSERT_OK(pKernelChannelGroup->pSubctxIdHeap->eheapGetSize( + pKernelChannelGroup->pSubctxIdHeap, + &numMax)); + + NV_ASSERT_OK(pKernelChannelGroup->pSubctxIdHeap->eheapGetFree( + pKernelChannelGroup->pSubctxIdHeap, + &numFree)); + + NV_ASSERT(numMax == + kfifoChannelGroupGetLocalMaxSubcontext_HAL(pGpu, pKernelFifo, + pKernelChannelGroup, + NV_FALSE)); + + NV_ASSERT_OR_RETURN(numMax == numFree && numMax != 0, NV_ERR_INVALID_STATE); + + pKernelChannelGroup->pSubctxIdHeap->eheapDestruct( + pKernelChannelGroup->pSubctxIdHeap); + // + // There should only be 1 (SYNC) or 2 legacy kctxshares (SYNC + ASYNC), + // depending on chip + // + maxSubctx = kfifoChannelGroupGetLocalMaxSubcontext_HAL(pGpu, pKernelFifo, + pKernelChannelGroup, + NV_TRUE); + NV_ASSERT_OR_RETURN(numMax == numFree, NV_ERR_INVALID_STATE); + NV_ASSERT(maxSubctx == 1 || maxSubctx == 2); + + constructObjEHeap(pKernelChannelGroup->pSubctxIdHeap, + 0, maxSubctx, sizeof(KernelCtxShare *), 0); + + pKernelChannelGroup->bLegacyMode = NV_TRUE; + + // Allocate SYNC + hkCtxShare = 0; + kctxshareParams.hVASpace = 0; + kctxshareParams.flags = NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC; + kctxshareParams.subctxId = 0xFFFFFFFF; + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithSecInfo(pRmApi, + hClient, + hTsg, + &hkCtxShare, + FERMI_CONTEXT_SHARE_A, + NV_PTR_TO_NvP64(&kctxshareParams), + RMAPI_ALLOC_FLAGS_SKIP_RPC, + NvP64_NULL, + &pRmApi->defaultSecInfo), + fail); + + NV_ASSERT(kctxshareParams.subctxId == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC); + + pKernelChannelGroupApi->hLegacykCtxShareSync = hkCtxShare; + + if(maxSubctx == 2) + { + // Allocate ASYNC + hkCtxShare = 0; + kctxshareParams.hVASpace = 0; + kctxshareParams.flags = NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC; + kctxshareParams.subctxId = 0xFFFFFFFF; + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithSecInfo(pRmApi, + hClient, + hTsg, + &hkCtxShare, + FERMI_CONTEXT_SHARE_A, + NV_PTR_TO_NvP64(&kctxshareParams), + RMAPI_ALLOC_FLAGS_SKIP_RPC, + NvP64_NULL, + &pRmApi->defaultSecInfo), + fail); + + NV_ASSERT(kctxshareParams.subctxId == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC); + + pKernelChannelGroupApi->hLegacykCtxShareAsync = hkCtxShare; + } + + NV_ASSERT_OK_OR_GOTO(status, + pKernelChannelGroup->pSubctxIdHeap->eheapGetFree( + pKernelChannelGroup->pSubctxIdHeap, + &numFree), + fail); + + NV_ASSERT_OR_GOTO(numFree == 0, fail); + + // + // If this channel group has been duped, we need to provide kctxshareApi handles to the + // other channelGroupApi objects that share this channel group since the handles will + // only work for a single client. + // + it = listIterAll(&pKernelChannelGroup->apiObjList); + while (listIterNext(&it)) + { + KernelChannelGroupApi *pChanGrpDest = *it.pValue; + + if(pChanGrpDest == pKernelChannelGroupApi) + continue; + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->DupObject(pRmApi, + RES_GET_CLIENT_HANDLE(pChanGrpDest), + RES_GET_HANDLE(pChanGrpDest), + &pChanGrpDest->hLegacykCtxShareSync, + RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi), + pKernelChannelGroupApi->hLegacykCtxShareSync, + 0), + fail); + + if (maxSubctx == 2) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->DupObject(pRmApi, + RES_GET_CLIENT_HANDLE(pChanGrpDest), + RES_GET_HANDLE(pChanGrpDest), + &pChanGrpDest->hLegacykCtxShareAsync, + RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi), + pKernelChannelGroupApi->hLegacykCtxShareAsync, + 0), + fail); + } + } + + return status; + +fail: + NV_PRINTF(LEVEL_ERROR, "Failed to set channel group in legacy mode.\n"); + + pKernelChannelGroup->bLegacyMode = NV_FALSE; + + it = listIterAll(&pKernelChannelGroup->apiObjList); + + while (listIterNext(&it)) + { + KernelChannelGroupApi *pChanGrpIt = *it.pValue; + + if (pChanGrpIt->hLegacykCtxShareSync != 0) + { + pRmApi->Free(pRmApi, RES_GET_CLIENT_HANDLE(pChanGrpIt), pChanGrpIt->hLegacykCtxShareSync); + pChanGrpIt->hLegacykCtxShareSync = 0; + } + + if (pChanGrpIt->hLegacykCtxShareAsync != 0) + { + pRmApi->Free(pRmApi, RES_GET_CLIENT_HANDLE(pChanGrpIt), pChanGrpIt->hLegacykCtxShareAsync); + pChanGrpIt->hLegacykCtxShareAsync = 0; + } + } + + if(status == NV_OK) + { + status = NV_ERR_INVALID_STATE; + } + + return status; +} + +NV_STATUS +kchangrpapiCtrlCmdGpFifoSchedule_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS *pSchedParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannelGroupApi); + RsResourceRef *pResourceRef = RES_GET_REF(pKernelChannelGroupApi); + KernelChannelGroup *pKernelChannelGroup = NULL; + NV_STATUS status = NV_OK; + KernelFifo *pKernelFifo; + CLASSDESCRIPTOR *pClass = NULL; + CHANNEL_NODE *pChanNode = NULL; + CHANNEL_LIST *pChanList = NULL; + NvU32 runlistId = INVALID_RUNLIST_ID; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + if (pKernelChannelGroupApi->pKernelChannelGroup == NULL) + return NV_ERR_INVALID_OBJECT; + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + + if (gpuGetClassByClassId(pGpu, pResourceRef->externalClassId, &pClass) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "class %x not supported\n", + pResourceRef->externalClassId); + } + NV_ASSERT_OR_RETURN((pClass != NULL), NV_ERR_NOT_SUPPORTED); + + // + // Bug 1737765: Prevent Externally Owned Channels from running unless bound + // It is possible for clients to allocate and schedule channels while + // skipping the UVM registration step which binds the appropriate + // allocations in RM. We need to fail channel scheduling if the channels + // have not been registered with UVM. + // We include this check for every channel in the group because it is + // expected that Volta+ may use a separate VAS for each channel. + // + + pChanList = pKernelChannelGroup->pChanList; + + for (pChanNode = pChanList->pHead; pChanNode; pChanNode = pChanNode->pNext) + { + NV_CHECK_OR_RETURN(LEVEL_NOTICE, kchannelIsSchedulable_HAL(pGpu, pChanNode->pKernelChannel), + NV_ERR_INVALID_STATE); + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + pChanList = pKernelChannelGroup->pChanList; + + // + // Some channels may not have objects allocated on them, so they won't have + // a runlist committed yet. Force them all onto the same runlist so the + // low level code knows what do to with them. + // + // First we walk through the channels to see if there is a runlist assigned + // already and if so are the channels consistent. + // + runlistId = pKernelChannelGroup->runlistId; // Start with TSG runlistId + for (pChanNode = pChanList->pHead; pChanNode; pChanNode = pChanNode->pNext) + { + KernelChannel *pKernelChannel = pChanNode->pKernelChannel; + + NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue); + + if (kchannelIsRunlistSet(pGpu, pKernelChannel)) + { + if (runlistId == INVALID_RUNLIST_ID) + { + runlistId = kchannelGetRunlistId(pKernelChannel); + } + else // Catch if 2 channels in the same TSG have different runlistId + { + if (runlistId != kchannelGetRunlistId(pKernelChannel)) + { + NV_PRINTF(LEVEL_ERROR, + "Channels in TSG %d have different runlist IDs this should never happen!\n", + pKernelChannelGroup->grpID); + DBG_BREAKPOINT(); + } + } + } + } + + // If no channels have a runlist set, get the default and use it. + if (runlistId == INVALID_RUNLIST_ID) + { + runlistId = kchangrpGetDefaultRunlist_HAL(pGpu, pKernelChannelGroup); + } + + // We can rewrite TSG runlist id just as we will do that for all TSG channels below + pKernelChannelGroup->runlistId = runlistId; + + // + // Now go through and force any channels w/o the runlist set to use either + // the default or whatever we found other channels to be allocated on. + // + for (pChanNode = pChanList->pHead; pChanNode; pChanNode = pChanNode->pNext) + { + KernelChannel *pKernelChannel = pChanNode->pKernelChannel; + + NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue); + + if (!kchannelIsRunlistSet(pGpu, pKernelChannel)) + { + kfifoRunlistSetId_HAL(pGpu, pKernelFifo, pKernelChannel, runlistId); + } + } + SLI_LOOP_END + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi); + NvHandle hObject = RES_GET_HANDLE(pKernelChannelGroupApi); + + NV_RM_RPC_CONTROL(pGpu, + hClient, + hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + + // + // Do an internal control call to do channel reset + // on Host (Physical) RM + // + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi), + RES_GET_HANDLE(pKernelChannelGroupApi), + NVA06C_CTRL_CMD_INTERNAL_GPFIFO_SCHEDULE, + pSchedParams, + sizeof(NVA06C_CTRL_GPFIFO_SCHEDULE_PARAMS)); + + return status; +} + +NV_STATUS +kchangrpapiCtrlCmdBind_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + NVA06C_CTRL_BIND_PARAMS *pParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannelGroupApi); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi); + CHANNEL_NODE *pChanNode; + NvU32 localEngineType; + NvU32 globalEngineType; + ENGDESCRIPTOR engineDesc; + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + + NV_ASSERT_OR_RETURN(pParams != NULL, NV_ERR_INVALID_ARGUMENT); + + localEngineType = globalEngineType = pParams->engineType; + + if (bMIGInUse) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + localEngineType, + &globalEngineType)); + } + + NV_PRINTF(LEVEL_INFO, + "Binding TSG %d to Engine %d\n", + pKernelChannelGroupApi->pKernelChannelGroup->grpID, + globalEngineType); + + // Translate globalEnginetype -> enginedesc + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + gpuXlateClientEngineIdToEngDesc(pGpu, globalEngineType, &engineDesc)); + + // Translate engineDesc -> runlistId for TSG + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kfifoEngineInfoXlate_HAL(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + ENGINE_INFO_TYPE_ENG_DESC, + engineDesc, + ENGINE_INFO_TYPE_RUNLIST, + &pKernelChannelGroupApi->pKernelChannelGroup->runlistId)); + + for (pChanNode = + pKernelChannelGroupApi->pKernelChannelGroup->pChanList->pHead; + pChanNode != NULL; + pChanNode = pChanNode->pNext) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kchannelBindToRunlist(pChanNode->pKernelChannel, + localEngineType, + engineDesc)); + if (rmStatus != NV_OK) + { + break; + } + } + + return rmStatus; +} + +NV_STATUS +kchangrpapiCtrlCmdGetTimeslice_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + NVA06C_CTRL_TIMESLICE_PARAMS *pTsParams +) +{ + KernelChannelGroup *pKernelChannelGroup = NULL; + + if (pKernelChannelGroupApi->pKernelChannelGroup == NULL) + return NV_ERR_INVALID_OBJECT; + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + + pTsParams->timesliceUs = pKernelChannelGroup->timesliceUs; + + return NV_OK; +} + +NV_STATUS +kchangrpapiCtrlCmdSetTimeslice_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + NVA06C_CTRL_TIMESLICE_PARAMS *pTsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannelGroupApi); + RsResourceRef *pResourceRef = RES_GET_REF(pKernelChannelGroupApi); + KernelChannelGroup *pKernelChannelGroup = NULL; + NV_STATUS status = NV_OK; + CLASSDESCRIPTOR *pClass = NULL; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + if (pKernelChannelGroupApi->pKernelChannelGroup == NULL) + return NV_ERR_INVALID_OBJECT; + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + + if (gpuGetClassByClassId(pGpu, pResourceRef->externalClassId, &pClass) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "class %x not supported\n", + pResourceRef->externalClassId); + } + NV_ASSERT_OR_RETURN((pClass != NULL), NV_ERR_NOT_SUPPORTED); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi); + NvHandle hObject = RES_GET_HANDLE(pKernelChannelGroupApi); + NVA06C_CTRL_TIMESLICE_PARAMS *pParams = (NVA06C_CTRL_TIMESLICE_PARAMS *)(pRmCtrlParams->pParams); + + NV_RM_RPC_CONTROL(pGpu, + hClient, + hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + // Update guest RM's internal bookkeeping with the timeslice. + if (status == NV_OK) + { + pKernelChannelGroup->timesliceUs = pParams->timesliceUs; + } + + return status; + } + + // + // Do an internal control call to do channel reset + // on Host (Physical) RM + // + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi), + RES_GET_HANDLE(pKernelChannelGroupApi), + NVA06C_CTRL_CMD_INTERNAL_SET_TIMESLICE, + pTsParams, + sizeof(NVA06C_CTRL_TIMESLICE_PARAMS)); + + return status; +} + +NV_STATUS +kchangrpapiCtrlCmdGetInfo_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + NVA06C_CTRL_GET_INFO_PARAMS *pParams +) +{ + KernelChannelGroup *pKernelChannelGroup = NULL; + + if (pKernelChannelGroupApi->pKernelChannelGroup == NULL) + return NV_ERR_INVALID_OBJECT; + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + + pParams->tsgID = pKernelChannelGroup->grpID; + + return NV_OK; +} + +NV_STATUS +kchangrpapiCtrlCmdSetInterleaveLevel_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannelGroupApi); + RsResourceRef *pResourceRef = RES_GET_REF(pKernelChannelGroupApi); + KernelChannelGroup *pKernelChannelGroup = + pKernelChannelGroupApi->pKernelChannelGroup; + PCLASSDESCRIPTOR pClass = NULL; + NV_STATUS status = NV_OK; + + if (gpuGetClassByClassId(pGpu, pResourceRef->externalClassId, &pClass) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "class %x not supported\n", + pResourceRef->externalClassId); + } + NV_ASSERT_OR_RETURN((pClass != NULL), NV_ERR_NOT_SUPPORTED); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannelGroupApi); + NvHandle hObject = RES_GET_HANDLE(pKernelChannelGroupApi); + + NV_RM_RPC_CONTROL(pGpu, + hClient, + hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, NV_ERR_NOT_SUPPORTED); + } + + status = kchangrpSetInterleaveLevel(pGpu, pKernelChannelGroup, pParams->tsgInterleaveLevel); + + return status; +} + +NV_STATUS +kchangrpapiCtrlCmdGetInterleaveLevel_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + NVA06C_CTRL_INTERLEAVE_LEVEL_PARAMS *pParams +) +{ + KernelChannelGroup *pKernelChannelGroup = NULL; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannelGroupApi); + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + if (pKernelChannelGroupApi->pKernelChannelGroup == NULL) + return NV_ERR_INVALID_OBJECT; + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + + pParams->tsgInterleaveLevel = pKernelChannelGroup->pInterleaveLevel[subdevInst]; + + return NV_OK; +} + +/*! + * @brief Handler for NVA06C_CTRL_CMD_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS + * + * This is currently un-implemented as split change for bug 200691429 + */ +NV_STATUS +kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + NVA06C_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS_PARAMS *pParams +) +{ + NV_PRINTF(LEVEL_INFO, + "bug 200691429: kchangrpapiCtrlCmdInternalPromoteFaultMethodBuffers_IMPL received\n"); + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/kernel_ctxshare.c b/src/nvidia/src/kernel/gpu/fifo/kernel_ctxshare.c new file mode 100644 index 000000000..65909c657 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/kernel_ctxshare.c @@ -0,0 +1,656 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr/gpu_vaspace.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "kernel/gpu/fifo/kernel_ctxshare.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "vgpu/rpc.h" +#include "gpu/device/device.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/gr/kernel_graphics_manager.h" +#include "core/locks.h" +#include "gpu/mem_mgr/vaspace_api.h" +#include "rmapi/rs_utils.h" + +#define SUBCTXID_EHEAP_OWNER NvU32_BUILD('n','v','r','m') + +NV_STATUS +kctxshareapiConstruct_IMPL +( + KernelCtxShareApi *pKernelCtxShareApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJVASPACE *pVAS; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelCtxShareApi); + KernelChannelGroupApi *pKernelChannelGroupApi; + KernelChannelGroup *pKernelChannelGroup; + Device *pDevice = NULL; + RsResourceRef *pChanGrpRef; + RsClient *pClient; + NvHandle hDevice; + NvHandle hClient = pParams->hClient; + NvHandle hVASpace = 0; + NV_CTXSHARE_ALLOCATION_PARAMETERS *pUserParams = pParams->pAllocParams; + RsShared *pShared = NULL; + + // To make context share a child of a TSG, a TSG must exist. + if (CliGetChannelGroup(pParams->hClient, pParams->hParent, + &pChanGrpRef, &hDevice) == NV_OK) + { + pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource, + KernelChannelGroupApi); + NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL, + NV_ERR_INVALID_STATE); + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + } + else + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + // Copy Constructor path + if (RS_IS_COPY_CTOR(pParams)) + { + rmStatus = kctxshareapiCopyConstruct_IMPL(pKernelCtxShareApi, pCallContext, pParams); + return rmStatus; + } + + rmStatus = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid client handle!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Depending on the va mode, allocating a context share might require allocation + // parameters that has a va space handle. If multiple vaspace mode is + // enabled (no va space under a device), a va handle is required! + // + // OPTIONAL_MULTIVA or SINGLE_VA MODES: Use the device va space. + // + + rmStatus = deviceGetByHandle(pClient, hDevice, &pDevice); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid parent/device handle!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + hVASpace = pUserParams->hVASpace; + NV_ASSERT((hVASpace == NV01_NULL_OBJECT) || (pDevice->vaMode != NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE)); + + if (pKernelChannelGroup->bLegacyMode) + { + // + // RM is trying to pre-allocate the kctxshares to for legacy mode + // In this case, we use the the parent TSG's pVAS rather than + // the hVASpace param + // + NV_PRINTF(LEVEL_INFO, "Constructing Legacy Context Share\n"); + NV_ASSERT(hVASpace == NV01_NULL_OBJECT); + pVAS = pKernelChannelGroup->pVAS; + } + else + { + NV_PRINTF(LEVEL_INFO, "Constructing Client Allocated Context Share\n"); + rmStatus = vaspaceGetByHandleOrDeviceDefault(pClient, hDevice, hVASpace, &pVAS); + } + + NV_ASSERT_OR_RETURN((rmStatus == NV_OK), rmStatus); + NV_ASSERT_OR_RETURN((pVAS != NULL), NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_GOTO(rmStatus, + serverAllocShareWithHalspecParent(&g_resServ, classInfo(KernelCtxShare), &pShared, staticCast(pGpu, Object)), + failed); + + NV_ASSERT_OK_OR_GOTO(rmStatus, + kctxshareInitCommon(dynamicCast(pShared, KernelCtxShare), + pKernelCtxShareApi, + pGpu, + pVAS, + pUserParams->flags, + &pUserParams->subctxId, + pKernelChannelGroupApi), + failed); + + pKernelCtxShareApi->pShareData = dynamicCast(pShared, KernelCtxShare); + + if (hVASpace != NV01_NULL_OBJECT) + { + RsResourceRef *pVASpaceRef; + rmStatus = clientGetResourceRef(pCallContext->pClient, hVASpace, &pVASpaceRef); + if (rmStatus != NV_OK) + goto failed; + + refAddDependant(pVASpaceRef, pCallContext->pResourceRef); + } + + if (pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT) + { + RsResourceRef *pKernelGraphicsContextRef; + rmStatus = clientGetResourceRef(pCallContext->pClient, pKernelChannelGroupApi->hKernelGraphicsContext, &pKernelGraphicsContextRef); + if (rmStatus != NV_OK) + goto failed; + + refAddDependant(pKernelGraphicsContextRef, pCallContext->pResourceRef); + } + +failed: + if (rmStatus != NV_OK) + { + if (pShared) + { + serverFreeShare(&g_resServ, pShared); + } + } + + return rmStatus; +} + +void +kctxshareapiDestruct_IMPL +( + KernelCtxShareApi *pKernelCtxShareApi +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelCtxShareApi); + KernelChannelGroupApi *pKernelChannelGroupApi = NULL; + KernelChannelGroup *pKernelChannelGroup = NULL; + RsResourceRef *pChanGrpRef; + RsShared *pShared = NULL; + NvS32 refcnt = 0; + + resGetFreeParams(staticCast(pKernelCtxShareApi, RsResource), &pCallContext, &pParams); + pChanGrpRef = pCallContext->pResourceRef->pParentRef; + if (pChanGrpRef != NULL) + { + pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource, + KernelChannelGroupApi); + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + } + + NV_ASSERT(pKernelChannelGroup); + + if (pKernelCtxShareApi->pShareData != NULL) + { + NV_ASSERT(pKernelCtxShareApi->pShareData->pKernelChannelGroup == + pKernelChannelGroup); + + + NV_PRINTF(LEVEL_INFO, "KernelCtxShareApi Ptr: %p ChanGrp: %p !\n", + pKernelCtxShareApi, pKernelCtxShareApi->pShareData->pKernelChannelGroup); + + pShared = staticCast(pKernelCtxShareApi->pShareData, RsShared); + refcnt = serverGetShareRefCount(&g_resServ, pShared); + + NV_PRINTF(LEVEL_INFO, "kctxshareapiDestruct_IMPL called on KernelCtxShare %p with refcnt %d\n", + pShared, refcnt); + + NV_ASSERT_OR_RETURN_VOID(refcnt >= 1); + + if (refcnt > 1) + { + // + // serverFreeShare will delete the object automatically if the count hits 0; + // we'd still need it to free all underlying resourcees, however. + // For this reason we only decrement here if no free is needed + // + serverFreeShare(&g_resServ, pShared); + + NV_PRINTF(LEVEL_INFO, "kctxshareapiDestruct_IMPL: KernelCtxShare %p has %d references left\n", + pShared, refcnt-1); + } + else + { + NV_PRINTF(LEVEL_INFO, "kctxshareapiDestruct_IMPL: KernelCtxShare %p has no more references, destroying...\n", + pShared); + + pParams->status = kctxshareDestroyCommon(pKernelCtxShareApi->pShareData, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi); + NV_ASSERT(pParams->status == NV_OK); + + serverFreeShare(&g_resServ, pShared); + } + } +} + +NV_STATUS +kctxshareapiCopyConstruct_IMPL +( + KernelCtxShareApi *pKernelCtxShareApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelCtxShareApi); + RsClient *pDstClient = pCallContext->pClient; + RsResourceRef *pDstRef = pCallContext->pResourceRef; + RsResourceRef *pSrcRef = pParams->pSrcRef; + KernelCtxShareApi *pKernelCtxShareSrc = dynamicCast(pSrcRef->pResource, KernelCtxShareApi); + KernelChannelGroupApi *pKernelChannelGroupApi; + RS_ITERATOR iter; + RsResourceRef *pVaspaceRef = NULL; + VaSpaceApi *pVaspaceApi = NULL; + RsResourceRef *pChanGrpRef = pDstRef->pParentRef; + + pKernelCtxShareApi->pShareData = pKernelCtxShareSrc->pShareData; + + RsShared *pShared = staticCast(pKernelCtxShareApi->pShareData, RsShared); + serverRefShare(&g_resServ, pShared); + + iter = serverutilRefIter(pDstClient->hClient, pDstRef->pParentRef->pParentRef->hResource, classId(VaSpaceApi), RS_ITERATE_DESCENDANTS, NV_TRUE); + while (clientRefIterNext(iter.pClient, &iter)) + { + pVaspaceRef = iter.pResourceRef; + pVaspaceApi = dynamicCast(pVaspaceRef->pResource, VaSpaceApi); + NV_ASSERT_OR_ELSE(pVaspaceApi != NULL, rmStatus = NV_ERR_INVALID_STATE; goto done); + + if (pVaspaceApi->pVASpace == pKernelCtxShareApi->pShareData->pVAS) + { + refAddDependant(pVaspaceRef, pDstRef); + break; + } + } + + pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource, + KernelChannelGroupApi); + NV_ASSERT_OR_ELSE(pKernelChannelGroupApi != NULL, + rmStatus = NV_ERR_INVALID_STATE; goto done); + + if (pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT) + { + RsResourceRef *pKernelGraphicsContextRef; + NV_ASSERT_OK_OR_ELSE(rmStatus, + clientGetResourceRef(pCallContext->pClient, pKernelChannelGroupApi->hKernelGraphicsContext, &pKernelGraphicsContextRef), + goto done); + + refAddDependant(pKernelGraphicsContextRef, pDstRef); + } + + // + // For legacy internal kctxshares, RPC is handled by the channelgroup object's copy ctor, + // so we skip the automatic RPC here + // + if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) && !pKernelCtxShareApi->pShareData->pKernelChannelGroup->bLegacyMode) + { + NV_RM_RPC_DUP_OBJECT(pGpu, pDstClient->hClient, pDstRef->pParentRef->hResource, pDstRef->hResource, + pParams->pSrcClient->hClient, pSrcRef->hResource, 0, + NV_TRUE, // automatically issue RPC_FREE on object free + pDstRef, rmStatus); + } + +done: + if (rmStatus != NV_OK) + { + serverFreeShare(&g_resServ, pShared); + } + + return rmStatus; +} + +NvBool +kctxshareapiCanCopy_IMPL +( + KernelCtxShareApi *pKernelCtxShareApi +) +{ + return NV_TRUE; +} + +NV_STATUS +kctxshareConstruct_IMPL +( + KernelCtxShare *pKernelCtxShare +) +{ + return NV_OK; +} + +/** + * @brief Initializes a new context share tracking structure. + * + * To be called immediately after allocation, initializes a broadcast context share + * object to what the client specified. Afterwards, include the context share object + * inside of a ChannelGroup's heap object based on the flag provided. + * + * @param pKernelCtxShare + * @param pKernelCtxShareApi + * @param pGpu + * @param[in] pVAS + * @param[in] Flags + * @param[in,out] subctxId + * @param[in] pKernelChannelGroupApi + */ +NV_STATUS +kctxshareInitCommon_IMPL +( + KernelCtxShare *pKernelCtxShare, + KernelCtxShareApi *pKernelCtxShareApi, + OBJGPU *pGpu, + OBJVASPACE *pVAS, + NvU32 Flags, + NvU32 *pSubctxId, + KernelChannelGroupApi *pKernelChannelGroupApi +) +{ + NV_STATUS status = NV_OK; + NvU32 heapFlag = 0; + NvU64 offset = 0; + NvU64 size = 1; + PEMEMBLOCK pBlock; + KernelChannelGroup *pKernelChannelGroup; + + NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL, NV_ERR_INVALID_STATE); + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + NV_ASSERT(pKernelChannelGroup != NULL); + NV_ASSERT(pVAS != NULL); + + // GPU lock must be held before calling this function + LOCK_ASSERT_AND_RETURN(rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + // + // For external VAS, create subcontext only after SetPageDirectory() call is made. + // This will ensure that new PDB will be updated in all channels subcontext array. + // See Bug 1805222 comment #11 for more details + // + if (!IsAMODEL(pGpu) && vaspaceIsExternallyOwned(pVAS)) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + if (vaspaceGetPageDirBase(pVAS, pGpu) == NULL) + { + NV_ASSERT(0); + SLI_LOOP_RETURN(NV_ERR_INVALID_STATE); + } + SLI_LOOP_END + } + + // If flag is equal to SYNC, allocate context share from veId 0. + if (Flags == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SYNC) + { + heapFlag = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + offset = 0; + } + // + // If the flag is Async, we want to allocate a free block in reverse order. + // This allocates a block between veId 1 and veId 63. + // If no blocks are available between veId 1 and veId 63, use veId 0. + // + else if (Flags == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_ASYNC) + { + heapFlag = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN; + } + else if (Flags == NV_CTXSHARE_ALLOCATION_FLAGS_SUBCONTEXT_SPECIFIED) + { + heapFlag = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + offset = *pSubctxId; + } + else + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + status = pKernelChannelGroup->pSubctxIdHeap->eheapAlloc( + pKernelChannelGroup->pSubctxIdHeap, + SUBCTXID_EHEAP_OWNER, + &heapFlag, + &offset, + &size, + 1, + 1, + &pBlock, + NULL, + NULL); + if (status != NV_OK) + { + return status; + } + + pKernelCtxShare->pVAS = pVAS; + pKernelCtxShare->subctxId = NvU64_LO32(offset); + pKernelCtxShare->pKernelChannelGroup = pKernelChannelGroup; + pKernelCtxShare->flags = Flags; + + pBlock->pData = (void *)pKernelCtxShare; + + status = kctxshareInit_HAL(pKernelCtxShare, pKernelCtxShareApi, pGpu, pVAS, + pKernelChannelGroupApi, offset, pBlock); + + if(status != NV_OK) + { + goto done; + } + +// @todo Code under label "fail" should handle failure case +done: + if (status == NV_OK) + { + *pSubctxId = NvU64_LO32(offset); + + NV_PRINTF(LEVEL_INFO, + "New Context Share 0x%p allocated with id 0x%x\n", + pKernelCtxShare, NvU64_LO32(offset)); + } + else + { + NV_STATUS tmpStatus; + + tmpStatus = pKernelChannelGroup->pSubctxIdHeap->eheapFree( + pKernelChannelGroup->pSubctxIdHeap, + offset); + NV_ASSERT(tmpStatus == NV_OK); + + NV_PRINTF(LEVEL_INFO, + "Context Share 0x%p allocation with id 0x%x failed, status is %x\n", + pKernelCtxShare, NvU64_LO32(offset), status); + } + + return status; +} + +/** + * @brief Frees a context share tracking structure if no references remain. + * + * This funtion should be used to free kctxshare rather than freeing object + * directly using serverFreeShare or objDelete. + * Frees child ENGINE_CTX_DESCRIPTORs but does not free any memory pointed at + * by pPrivCtxData. It is the responsiblity of the user of that memory to + * ensure it is freed before this function is called (or that another pointer + * exists). + * + * @param[in] pKernelCtxShare + * @param[in] pKernelCtxShareApi + * @param pGpu + * @param pKernelChannelGroupApi + */ +NV_STATUS +kctxshareDestroyCommon_IMPL +( + KernelCtxShare *pKernelCtxShare, + KernelCtxShareApi *pKernelCtxShareApi, + OBJGPU *pGpu, + KernelChannelGroupApi *pKernelChannelGroupApi +) +{ + NV_STATUS status = NV_OK; + NvU32 subctxId, i; + KernelChannelGroup *pKernelChannelGroup; + NvU32 subDevInst; + ENGINE_CTX_DESCRIPTOR *pEngCtxDesc = NULL; + NvU64 numMax = 0; + NvBool bRelease = NV_TRUE; + NvU64 vaddr = 0; + RsShared *pShared = NULL; + NvS32 refcnt = 0; + + NV_ASSERT_OR_RETURN(pKernelCtxShare != NULL, NV_ERR_INVALID_STATE); + + // This function should only be called on the last free of the object + pShared = staticCast(pKernelCtxShare, RsShared); + refcnt = serverGetShareRefCount(&g_resServ, pShared); + NV_ASSERT_OR_RETURN(refcnt == 1, NV_ERR_INVALID_STATE); + + // GPU lock must be held before calling this function + LOCK_ASSERT_AND_RETURN(rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + pKernelChannelGroup = pKernelCtxShare->pKernelChannelGroup; + NV_ASSERT(pKernelChannelGroup == pKernelChannelGroupApi->pKernelChannelGroup); + subctxId = pKernelCtxShare->subctxId; + + // + // Handle the case when VAS is shared by subcontexts. + // Release the shared resources only when the last subcontext using this VAS is freed. + // + status = pKernelChannelGroup->pSubctxIdHeap->eheapGetSize( + pKernelChannelGroup->pSubctxIdHeap, + &numMax); + NV_ASSERT(status == NV_OK); + + for (i = 0; i < numMax; i++) + { + if (i == pKernelCtxShare->subctxId) + { + continue; + } + + PEMEMBLOCK pBlock = pKernelChannelGroup->pSubctxIdHeap->eheapGetBlock( + pKernelChannelGroup->pSubctxIdHeap, + i, + NV_FALSE); + if (pBlock) + { + OBJVASPACE *pSubctxVAS = ((KernelCtxShare *)pBlock->pData)->pVAS; + if (pSubctxVAS == pKernelCtxShare->pVAS) + { + bRelease = NV_FALSE; + break; + } + } + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + RsResourceRef *pParentRef = RES_GET_REF(pKernelCtxShareApi)->pParentRef; + KernelChannelGroupApi *pKernelChannelGroupApi = dynamicCast(pParentRef->pResource, KernelChannelGroupApi); + KernelGraphicsContext *pKernelGraphicsContext; + + subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + if ((pKernelChannelGroup->ppEngCtxDesc != NULL) && + kgrctxFromKernelChannelGroupApi(pKernelChannelGroupApi, &pKernelGraphicsContext) == NV_OK) + { + if (bRelease) + { + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelGraphicsContext); + NvHandle hParent = RES_GET_PARENT_HANDLE(pKernelGraphicsContext); + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + KernelGraphics *pKernelGraphics; + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + kgrmgrCtrlSetChannelHandle(hParent, &grRouteInfo); + + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics); + + kgrctxReleaseSubctxResources_HAL(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelCtxShare->pVAS, pKernelCtxShare->subctxId); + + pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[subDevInst]; + if (pEngCtxDesc != NULL) + { + vaddr = 0; + if (vaListFindVa(&pEngCtxDesc->vaList, pKernelCtxShare->pVAS, &vaddr) == NV_OK) + { + NvU64 tmpVaddr = 0; + while (vaListFindVa(&pEngCtxDesc->vaList, pKernelCtxShare->pVAS, &tmpVaddr) == NV_OK) + { + status = vaListRemoveVa(&pEngCtxDesc->vaList, pKernelCtxShare->pVAS); + NV_ASSERT(status == NV_OK); + } + + if (vaListGetManaged(&pEngCtxDesc->vaList)) + { + dmaUnmapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pKernelCtxShare->pVAS, vaddr); + } + } + } + } + } + SLI_LOOP_END + + + subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + status = kctxshareDestroy_HAL(pKernelCtxShare, pKernelCtxShareApi, pGpu, pKernelChannelGroupApi, bRelease); + if (status != NV_OK) + { + goto fail; + } + + status = pKernelChannelGroup->pSubctxIdHeap->eheapFree( + pKernelChannelGroup->pSubctxIdHeap, + subctxId); + NV_ASSERT_OR_GOTO(status == NV_OK, fail); + +fail: + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Freed Context Share 0x%p with id 0x%x\n", + pKernelCtxShare, subctxId); + } + else + { + NV_PRINTF(LEVEL_INFO, "Failed to free Context Share 0x%p with id 0x%x\n", + pKernelCtxShare, subctxId); + } + + return status; +} + +void +kctxshareDestruct_IMPL +( + KernelCtxShare *pKernelCtxShare +) +{ + // + // Assert that kctxshareDestroyCommon was called to free kctxshare resources before + // getting here by checking if subctxId has been freed from heap. + // pKernelChannelGroup may not be set if kctxshare failed initialization. + // + if(pKernelCtxShare->pKernelChannelGroup != NULL) + { + PEMEMBLOCK pBlock = + pKernelCtxShare->pKernelChannelGroup->pSubctxIdHeap->eheapGetBlock( + pKernelCtxShare->pKernelChannelGroup->pSubctxIdHeap, + pKernelCtxShare->subctxId, + NV_FALSE); + + NV_ASSERT(pBlock == NULL); + } +} diff --git a/src/nvidia/src/kernel/gpu/fifo/kernel_fifo.c b/src/nvidia/src/kernel/gpu/fifo/kernel_fifo.c new file mode 100644 index 000000000..9d33143db --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/kernel_fifo.c @@ -0,0 +1,2761 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/gpu/fifo/kernel_sched_mgr.h" +#include "rmapi/rs_utils.h" +#include "rmapi/client.h" + +#include "kernel/core/locks.h" +#include "lib/base_utils.h" + +#include "gpu/mmu/kern_gmmu.h" +#include "vgpu/rpc.h" +#include "vgpu/vgpu_events.h" +#include "nvRmReg.h" + +#include "class/cl0080.h" +#include "class/cl2080.h" +#include "class/cl208f.h" +#include "class/clc572.h" + +#include "ctrl/ctrl0080/ctrl0080fifo.h" + +#define KFIFO_EHEAP_OWNER NvU32_BUILD('f','i','f','o') + +static EHeapOwnershipComparator _kfifoUserdOwnerComparator; + +static NV_STATUS _kfifoChidMgrAllocChidHeaps(OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr); + +static NV_STATUS _kfifoChidMgrAllocVChidHeapPointers(OBJGPU *pGpu, CHID_MGR *pChidMgr); + +static NV_STATUS _kfifoChidMgrInitChannelGroupMgr(OBJGPU *pGpu, CHID_MGR *pChidMgr); + +static void _kfifoChidMgrDestroyChidHeaps(CHID_MGR *pChidMgr); + +static void _kfifoChidMgrDestroyChannelGroupMgr(CHID_MGR *pChidMgr); + +static NV_STATUS _kfifoChidMgrFreeIsolationId(CHID_MGR *pChidMgr, NvU32 ChID); + +NV_STATUS +kfifoChidMgrConstruct_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + NvU32 numEngines; + + // + // Allocate memory for the array of CHID_MGR pointers. Since this is an + // array, we allocate memory for pointers unto maxNumRunlists. We will only + // allocate objects for the valid ones. + // + if (kfifoIsPerRunlistChramEnabled(pKernelFifo)) + { + // + // Construct the engine list if it isn't already constructed (internally + // checks if it was already constructed) + // + NV_ASSERT_OK_OR_RETURN(kfifoConstructEngineList_HAL(pGpu, pKernelFifo)); + pKernelFifo->numChidMgrs = kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo); + } + else + pKernelFifo->numChidMgrs = 1; + + if (pKernelFifo->numChidMgrs > MAX_NUM_RUNLISTS) + { + // + // This only currently defines the size of our bitvector + // pKernelFifo->chidMgrValid. Catch this case if HW expands beyond this so we + // can increase the size allocated to the bitvector + // + NV_PRINTF(LEVEL_ERROR, "numChidMgrs 0x%x exceeds MAX_NUM_RUNLISTS\n", + pKernelFifo->numChidMgrs); + DBG_BREAKPOINT(); + return NV_ERR_BUFFER_TOO_SMALL; + } + + pKernelFifo->ppChidMgr = portMemAllocNonPaged(sizeof(CHID_MGR *) * pKernelFifo->numChidMgrs); + if (pKernelFifo->ppChidMgr == NULL) + { + status = NV_ERR_NO_MEMORY; + pKernelFifo->ppChidMgr = NULL; + NV_PRINTF(LEVEL_ERROR, "Failed to allocate pFifo->pChidMgr\n"); + DBG_BREAKPOINT(); + return status; + } + portMemSet(pKernelFifo->ppChidMgr, 0, sizeof(CHID_MGR *) * pKernelFifo->numChidMgrs); + + // Initialize the valid mask + if (kfifoIsPerRunlistChramEnabled(pKernelFifo)) + { + numEngines = kfifoGetNumEngines_HAL(pGpu, pKernelFifo); + for (i = 0; i < numEngines; i++) + { + NvU32 runlistId; + status = kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_INVALID, i, + ENGINE_INFO_TYPE_RUNLIST, &runlistId); + if (status == NV_OK) + bitVectorSet(&pKernelFifo->chidMgrValid, runlistId); + else + { + NV_PRINTF(LEVEL_ERROR, "Translation to runlistId failed for engine %d\n", i); + DBG_BREAKPOINT(); + goto fail; + } + } + } + else + { + bitVectorSet(&pKernelFifo->chidMgrValid, 0); // We only have 1 chidmgr + } + + // Allocate memory for each CHID_MGR and its members (only the valid ones) + for (i = 0; i < pKernelFifo->numChidMgrs; i++) + { + if (!bitVectorTest(&pKernelFifo->chidMgrValid, i)) + continue; + + pKernelFifo->ppChidMgr[i] = portMemAllocNonPaged(sizeof(CHID_MGR)); + if (pKernelFifo->ppChidMgr[i] == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, "Failed to allocate pFifo->pChidMgr[%d]\n", i); + DBG_BREAKPOINT(); + goto fail; + } + portMemSet(pKernelFifo->ppChidMgr[i], 0, sizeof(CHID_MGR)); + + pKernelFifo->ppChidMgr[i]->runlistId = i; + + pKernelFifo->ppChidMgr[i]->pChanGrpTree = portMemAllocNonPaged(sizeof(KernelChannelGroupMap)); + mapInitIntrusive(pKernelFifo->ppChidMgr[i]->pChanGrpTree); + + status = _kfifoChidMgrAllocChidHeaps(pGpu, pKernelFifo, pKernelFifo->ppChidMgr[i]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error allocating FifoDataHeap in " + "pChidMgr. Status = %s (0x%x)\n", + nvstatusToString(status), status); + DBG_BREAKPOINT(); + goto fail; + } + + status = _kfifoChidMgrInitChannelGroupMgr(pGpu, pKernelFifo->ppChidMgr[i]); + if (status != NV_OK) + goto fail; + } + + + return status; + +fail: + kfifoChidMgrDestruct(pKernelFifo); + return status; +} + +void +kfifoChidMgrDestruct_IMPL +( + KernelFifo *pKernelFifo +) +{ + NvU32 i; + + for (i = 0; i < pKernelFifo->numChidMgrs; i++) + { + if (pKernelFifo->ppChidMgr[i] != NULL) + { + mapDestroy(pKernelFifo->ppChidMgr[i]->pChanGrpTree); + portMemFree(pKernelFifo->ppChidMgr[i]->pChanGrpTree); + _kfifoChidMgrDestroyChidHeaps(pKernelFifo->ppChidMgr[i]); + _kfifoChidMgrDestroyChannelGroupMgr(pKernelFifo->ppChidMgr[i]); + portMemFree(pKernelFifo->ppChidMgr[i]); + pKernelFifo->ppChidMgr[i] = NULL; + } + } + + portMemFree(pKernelFifo->ppChidMgr); + pKernelFifo->ppChidMgr = NULL; + bitVectorClrAll(&pKernelFifo->chidMgrValid); + pKernelFifo->numChidMgrs = 0; +} + +/* + * @brief Allocate and initialize the virtual ChId heap pointers + */ +static NV_STATUS +_kfifoChidMgrAllocVChidHeapPointers +( + OBJGPU *pGpu, + CHID_MGR *pChidMgr +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + if (IS_VIRTUAL(pGpu)) + { + return NV_OK; + } + + if (IS_GSP_CLIENT(pGpu)) + { + return NV_OK; + } + + if (gpuIsSriovEnabled(pGpu)) + { + // + // For Virtual Channel Heap + // Allocate Memory for Heap Object pointers + // + pChidMgr->ppVirtualChIDHeap = portMemAllocNonPaged(sizeof(OBJEHEAP *) * (VMMU_MAX_GFID)); + if (pChidMgr->ppVirtualChIDHeap == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Error allocating memory for virtual channel heap pointers\n"); + return NV_ERR_NO_MEMORY; + } + + // initialize + for (i = 0; i < VMMU_MAX_GFID; i++) + { + pChidMgr->ppVirtualChIDHeap[i] = NULL; + } + } + return status; +} + + +/* + * @brief Allocates & initializes ChID heaps + */ +static NV_STATUS +_kfifoChidMgrAllocChidHeaps +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr +) +{ + NV_STATUS status = NV_OK; + + if (pChidMgr->numChannels == 0) + { + if (kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr) == 0) + { + NV_PRINTF(LEVEL_ERROR, "pChidMgr->numChannels is 0\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + } + + pChidMgr->pFifoDataHeap = portMemAllocNonPaged(sizeof(*pChidMgr->pFifoDataHeap)); + if (pChidMgr->pFifoDataHeap == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, + "Error in Allocating memory for pFifoDataHeap! Status = %s (0x%x)\n", + nvstatusToString(status), status); + return status; + } + constructObjEHeap(pChidMgr->pFifoDataHeap, 0, pChidMgr->numChannels, + sizeof(KernelChannel *), 0); + + if (kfifoIsChidHeapEnabled(pKernelFifo)) + { + NvU32 userdBar1Size; + NvU32 numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + NvU32 subProcessIsolation = 1; + + pChidMgr->pGlobalChIDHeap = portMemAllocNonPaged(sizeof(OBJEHEAP)); + if (pChidMgr->pGlobalChIDHeap == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Error in Allocating memory for global ChID heap!\n"); + return NV_ERR_NO_MEMORY; + } + constructObjEHeap(pChidMgr->pGlobalChIDHeap, 0, numChannels, + sizeof(PFIFO_ISOLATIONID), 0); + + // + // Enable USERD allocation isolation. USERD allocated by different clients + // should not be in the same page + // + kfifoGetUserdSizeAlign_HAL(pKernelFifo, &userdBar1Size, NULL); + pChidMgr->pGlobalChIDHeap->eheapSetOwnerIsolation(pChidMgr->pGlobalChIDHeap, + NV_TRUE, + RM_PAGE_SIZE / userdBar1Size); + + // Disable USERD allocation isolation for guest if disabled from vmioplugin + { + // In this case subProcessIsolation is always 0 + if (IS_GSP_CLIENT(pGpu)) + { + subProcessIsolation = 0; + } + } + if (!subProcessIsolation) + { + pChidMgr->pGlobalChIDHeap->eheapSetOwnerIsolation( + pChidMgr->pGlobalChIDHeap, + NV_FALSE, + RM_PAGE_SIZE / userdBar1Size); + #if (defined(_WIN32) || defined(_WIN64) || defined(NV_UNIX)) && !defined(NV_MODS) + NV_PRINTF(LEVEL_INFO, + "Sub Process channel isolation disabled by vGPU plugin\n"); + #endif + } + + status = _kfifoChidMgrAllocVChidHeapPointers(pGpu, pChidMgr); + } + + return status; +} + +static void +_kfifoChidMgrDestroyChidHeaps +( + CHID_MGR *pChidMgr +) +{ + if (pChidMgr->pFifoDataHeap != NULL) + { + pChidMgr->pFifoDataHeap->eheapDestruct(pChidMgr->pFifoDataHeap); + portMemFree(pChidMgr->pFifoDataHeap); + pChidMgr->pFifoDataHeap = NULL; + } + if (pChidMgr->pGlobalChIDHeap != NULL) + { + pChidMgr->pGlobalChIDHeap->eheapDestruct(pChidMgr->pGlobalChIDHeap); + portMemFree(pChidMgr->pGlobalChIDHeap); + pChidMgr->pGlobalChIDHeap = NULL; + } + + portMemFree(pChidMgr->ppVirtualChIDHeap); + pChidMgr->ppVirtualChIDHeap = NULL; +} + + +static NV_STATUS +_kfifoChidMgrInitChannelGroupMgr +( + OBJGPU *pGpu, + CHID_MGR *pChidMgr +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + FIFO_HW_ID *pFifoHwID = &pChidMgr->channelGrpMgr; + NvU32 allocSize; + NvU32 numChannelGroups = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + + if (numChannelGroups == 0) + { + return NV_OK; + } + + // Rounds up to dword alignemnt, then converts bits to bytes. + allocSize = RM_ALIGN_UP(numChannelGroups, 32)/8; + + pFifoHwID->pHwIdInUse = portMemAllocNonPaged(allocSize); + if (pFifoHwID->pHwIdInUse == NULL) + return NV_ERR_NO_MEMORY; + + // bytes to NvU32[] elements + pFifoHwID->hwIdInUseSz = allocSize/4; + + portMemSet(pFifoHwID->pHwIdInUse, 0, allocSize); + + // + // If numChannelGroups isn't a multiple of 32 we need to set the bits > numChannelGroups to + // 1. Otherwise when we allocate IDs starting at the top we'll allocate + // ids >numChannelGroups. + // + if (numChannelGroups % 32 != 0) + { + pFifoHwID->pHwIdInUse[numChannelGroups/32] |= ~ ((1<<(numChannelGroups%32))-1); + } + + return NV_OK; +} + +static void +_kfifoChidMgrDestroyChannelGroupMgr +( + CHID_MGR *pChidMgr +) +{ + if (pChidMgr->channelGrpMgr.pHwIdInUse) + { + portMemFree(pChidMgr->channelGrpMgr.pHwIdInUse); + pChidMgr->channelGrpMgr.pHwIdInUse = NULL; + pChidMgr->channelGrpMgr.hwIdInUseSz = 0; + } +} + +static NV_STATUS +_kfifoChidMgrFreeIsolationId +( + CHID_MGR *pChidMgr, + NvU32 ChID +) +{ + EMEMBLOCK *pIsolationIdBlock = pChidMgr->pGlobalChIDHeap->eheapGetBlock( + pChidMgr->pGlobalChIDHeap, + ChID, + NV_FALSE); + + NV_ASSERT_OR_RETURN(pIsolationIdBlock, NV_ERR_OBJECT_NOT_FOUND); + NV_ASSERT(pIsolationIdBlock->refCount > 0); + NV_ASSERT(pIsolationIdBlock->pData != NULL); + portMemFree(pIsolationIdBlock->pData); + + pIsolationIdBlock->pData = NULL; + + return NV_OK; +} + +/*! + * @breif Fifo defined call back comparator to compare eheap block ownership ID + * + * @param[in] pRequesterID Ownership ID constructed by caller + * @param[in] pIsolationID + * + * @return NV_TRUE if two ownership IDs belong to the same owner + */ +static NvBool +_kfifoUserdOwnerComparator +( + void *pRequesterID, + void *pIsolationID +) +{ + PFIFO_ISOLATIONID pAllocID = (PFIFO_ISOLATIONID)pRequesterID; + PFIFO_ISOLATIONID pBlockID = (PFIFO_ISOLATIONID)pIsolationID; + + // + // The block's data will be NULL if the channel has been destroyed but there + // is still a refcount on the channel ID. In that case no work can be issued + // to that channel ID now or in the future, so we can act as though the + // channel does not exist. + // + if (!pBlockID) + return NV_TRUE; + + if ((pAllocID->domain != pBlockID->domain) || + (pAllocID->processID != pBlockID->processID) || + (pAllocID->subProcessID != pBlockID->subProcessID)) + { + return NV_FALSE; + } + else + { + return NV_TRUE; + } +} + +/*! + * @brief Allocates one Channel ID on heap + * + * @param[in] OBJGPU GPU Object + * @param[in] KernelFifo KernelFifo Object + * @param[in] CHID_MGR Channel ID manager + * @param[in] chFlag NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN + * NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE + * NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP + * @param[in] bForceInternalIdx true if requesting specific index within USERD + * page + * @param[in] internalIdx requested index within USERD page when + * bForceInternalIdx true + * @param[in] ChID ChID to assign in case of ADDRESS_ALLOCATE + * @param[in,out] pKernelChannel The previously allocated KernelChannel structure + * + * @return NV_OK if allocation is successful + * NV_ERR_NO_FREE_FIFOS: allocated channel ID exceeds MAX channels. + */ +NV_STATUS +kfifoChidMgrAllocChid_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr, + NvHandle hClient, + CHANNEL_HW_ID_ALLOC_MODE chIdFlag, + NvBool bForceInternalIdx, + NvU32 internalIdx, + NvBool bForceUserdPage, + NvU32 userdPageIdx, + NvU32 ChID, + KernelChannel *pKernelChannel +) +{ + NvU64 chSize; + NvU32 chFlag = chIdFlag; + NvU64 ChID64 = 0; + NvU64 subProcessID = 0; + NvU64 processID = 0; + NvBool bIsSubProcessDisabled = NV_FALSE; + RmClient *pClient; + NvU32 offsetAlign = 1; + NvU32 gfid; + PFIFO_ISOLATIONID pIsolationID = NULL; + NV_STATUS status; + NvU32 numChannels; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + + switch (chIdFlag) + { + case CHANNEL_HW_ID_ALLOC_MODE_GROW_DOWN: + chFlag = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN; + break; + case CHANNEL_HW_ID_ALLOC_MODE_GROW_UP: + chFlag = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP; + break; + case CHANNEL_HW_ID_ALLOC_MODE_PROVIDED: + ChID64 = ChID; + chFlag = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid channel ID alloc mode %d\n", chFlag); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + // we are allocating only one Channel at a time + chSize = 1; + + // Create unique isolation ID for each process + if (serverutilGetClientUnderLock(hClient, &pClient) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid client handle %ux\n", hClient); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_CLIENT; + } + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + { + // + // Legacy / SRIOV vGPU Host, SRIOV guest, baremetal CPU RM, GSP FW, GSP + // client allocate from global heap + // + pIsolationID = portMemAllocNonPaged(sizeof(FIFO_ISOLATIONID)); + NV_ASSERT_OR_RETURN((pIsolationID != NULL), NV_ERR_NO_MEMORY); + portMemSet(pIsolationID, 0, sizeof(FIFO_ISOLATIONID)); + + // + // Check if the allocation request is from the guest RM or host RM + // + processID = pClient->ProcID; + subProcessID = pClient->SubProcessID; + bIsSubProcessDisabled = pClient->bIsSubProcessDisabled; + + if (RMCFG_FEATURE_PLATFORM_GSP || kchannelCheckIsKernel(pKernelChannel)) + { + // + // If not GSPFW: Allocation request is from host RM kernel + // If GSPFW: ChID has already been chosen by CPU-RM, but pClient + // doesn't have the true processID, so just allow the whole pool. + // + pIsolationID->domain = HOST_KERNEL; + processID = KERNEL_PID; + } + else + { + if (0x0 != subProcessID) + { + // + // Allocation request is from the guest RM + // + if (KERNEL_PID == subProcessID) + { + pIsolationID->domain = GUEST_KERNEL; + } + else + { + pIsolationID->domain = GUEST_USER; + } + } + else + { + pIsolationID->domain = HOST_USER; + } + } + + pIsolationID->processID = processID; + pIsolationID->subProcessID = subProcessID; + + // + // Overwrite isolation ID if guest USERD isolation is disabled + // + if ((subProcessID != 0x0) && (bIsSubProcessDisabled)) + { + pIsolationID->domain = GUEST_INSECURE; + pIsolationID->subProcessID = KERNEL_PID; + } + + /* Channel USERD manipuliation only supported without GFID */ + if (bForceUserdPage) + { + NV_ASSERT_OR_RETURN(!bForceInternalIdx, NV_ERR_INVALID_STATE); + ChID64 = ((NvU64)userdPageIdx) * + pChidMgr->pGlobalChIDHeap->ownerGranularity + + internalIdx; + chFlag |= NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + } + else if (bForceInternalIdx) + { + chFlag |= NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX; + offsetAlign = internalIdx; + } + + status = pChidMgr->pGlobalChIDHeap->eheapAlloc( + pChidMgr->pGlobalChIDHeap, // This Heap + KFIFO_EHEAP_OWNER, // owner + &chFlag, // Alloc Flags + &ChID64, // Alloc Offset + &chSize, // Size + offsetAlign, // offsetAlign + 1, // sizeAlign + NULL, // Allocated mem block + pIsolationID, // Isolation ID + _kfifoUserdOwnerComparator // Fifo defined ownership comparator + ); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate Channel ID on heap\n"); + DBG_BREAKPOINT(); + goto fail; + } + } + + // + // Now allocate at a fixed offset from the pFifoDataHeap once the previous + // ID allocation told us which ID to use. + // + chFlag = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + status = pChidMgr->pFifoDataHeap->eheapAlloc( + pChidMgr->pFifoDataHeap, // This Heap + KFIFO_EHEAP_OWNER, // owner + &chFlag, // Alloc Flags + &ChID64, // Alloc Offset + &chSize, // Size + 1, // offsetAlign + 1, // sizeAlign + NULL, // Allocated mem block + NULL, // Isolation ID + NULL // ownership comparator + ); + + if (status != NV_OK) + { + // + // Should never happen since we're mirroring the global chid heap, or + // pre-reserving space on the global chid heap for SR-IOV capable + // systems. + // + NV_PRINTF(LEVEL_ERROR, "Failed to allocate Channel on fifo data heap\n"); + goto fail; + } + + ChID = NvU64_LO32(ChID64); + + if (ChID < numChannels) + { + PEMEMBLOCK pFifoDataBlock = pChidMgr->pFifoDataHeap->eheapGetBlock( + pChidMgr->pFifoDataHeap, + ChID, + NV_FALSE); + PEMEMBLOCK pIsolationIdBlock = pChidMgr->pGlobalChIDHeap->eheapGetBlock( + pChidMgr->pGlobalChIDHeap, + ChID, + NV_FALSE); + + if (IS_GFID_PF(gfid)) + pIsolationIdBlock->pData = pIsolationID; + + pFifoDataBlock->pData = pKernelChannel; + pKernelChannel->ChID = ChID; + } + else + { + NV_PRINTF(LEVEL_WARNING, "No allocatable FIFO available.\n"); + status = NV_ERR_NO_FREE_FIFOS; + goto fail; + } + return NV_OK; + +fail: + // We already know that pIsolationID is non-NULL here. + portMemFree(pIsolationID); + return status; +} + +/* + * Retain a channel ID which has already been allocated by + * kfifoChidMgrAllocChid. Until released, the HW channel ID will not be + * allocated by any new channels even after kfifoChidMgrFreeChid has been + * called. + */ +NV_STATUS +kfifoChidMgrRetainChid_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr, + NvU32 ChID +) +{ + NvU32 gfid; + PEMEMBLOCK pFifoDataBlock = NULL; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + if (IS_GFID_VF(gfid)) + { + NV_ASSERT_OR_RETURN(pChidMgr->ppVirtualChIDHeap[gfid] != NULL, + NV_ERR_INVALID_STATE); + PEMEMBLOCK pVirtChIdBlock = pChidMgr->ppVirtualChIDHeap[gfid]->eheapGetBlock( + pChidMgr->ppVirtualChIDHeap[gfid], + ChID, + NV_FALSE); + NV_ASSERT_OR_RETURN(pVirtChIdBlock != NULL, NV_ERR_OBJECT_NOT_FOUND); + NV_ASSERT(pVirtChIdBlock->refCount > 0); + ++pVirtChIdBlock->refCount; + } + else + { + NV_ASSERT_OR_RETURN(pChidMgr->pGlobalChIDHeap != NULL, NV_ERR_INVALID_STATE); + PEMEMBLOCK pChIdBlock = pChidMgr->pGlobalChIDHeap->eheapGetBlock( + pChidMgr->pGlobalChIDHeap, + ChID, + NV_FALSE); + NV_ASSERT_OR_RETURN(pChIdBlock != NULL, NV_ERR_OBJECT_NOT_FOUND); + NV_ASSERT(pChIdBlock->refCount > 0); + ++pChIdBlock->refCount; + } + + NV_ASSERT_OR_RETURN(pChidMgr->pFifoDataHeap != NULL, NV_ERR_INVALID_STATE); + pFifoDataBlock = pChidMgr->pFifoDataHeap->eheapGetBlock( + pChidMgr->pFifoDataHeap, + ChID, + NV_FALSE); + NV_ASSERT_OR_RETURN(pFifoDataBlock != NULL, NV_ERR_OBJECT_NOT_FOUND); + NV_ASSERT(pFifoDataBlock->refCount > 0); + ++pFifoDataBlock->refCount; + + return NV_OK; +} + +/* + * Drop the refcount on the given channel (ID), removing it from pFifo's heap if + * its refcount reaches 0. + */ +NV_STATUS +kfifoChidMgrReleaseChid_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr, + NvU32 ChID +) +{ + NvU32 gfid; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + if (IS_GFID_VF(gfid)) + { + NV_ASSERT_OR_RETURN(pChidMgr->ppVirtualChIDHeap[gfid] != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OK(pChidMgr->ppVirtualChIDHeap[gfid]->eheapFree(pChidMgr->ppVirtualChIDHeap[gfid], ChID)); + } + else + { + NV_ASSERT_OR_RETURN(pChidMgr->pGlobalChIDHeap != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OK(pChidMgr->pGlobalChIDHeap->eheapFree(pChidMgr->pGlobalChIDHeap, ChID)); + } + + NV_ASSERT_OR_RETURN(pChidMgr->pFifoDataHeap != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OK_OR_RETURN(pChidMgr->pFifoDataHeap->eheapFree(pChidMgr->pFifoDataHeap, ChID)); + + return NV_OK; +} + +/* + * Removes the association between pKernelChannel and its channel ID. Note that this + * will not remove the channel ID itself from pFifo's heap if + * fifoHeapRetainChannelId has been called. + */ +NV_STATUS +kfifoChidMgrFreeChid_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr, + NvU32 ChID +) +{ + EMEMBLOCK *pFifoDataBlock; + NV_STATUS status; + NvU32 gfid; + + // + // This channel is going away, so clear its pointer from the channel ID's heap + // block. + // + pFifoDataBlock = pChidMgr->pFifoDataHeap->eheapGetBlock( + pChidMgr->pFifoDataHeap, + ChID, + NV_FALSE); + NV_ASSERT(pFifoDataBlock->refCount > 0); + pFifoDataBlock->pData = NULL; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + if (IS_GFID_PF(gfid)) + { + // + // This marks the channel ID as orphaned and causes it to be ignored for + // isolation purposes. This only matters if there will still be a reference + // on the ID after we release ours below. + // + status = _kfifoChidMgrFreeIsolationId(pChidMgr, ChID); + if(status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to free IsolationId. Status = 0x%x\n", status); + DBG_BREAKPOINT(); + return status; + } + } + + return kfifoChidMgrReleaseChid(pGpu, pKernelFifo, pChidMgr, ChID); +} + +NvU32 +kfifoChidMgrGetNumChannels_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr +) +{ + // Cache ChidMgr's numChannels if not set + if (pChidMgr->numChannels == 0) + { + NvU32 numChannels = kfifoRunlistQueryNumChannels_HAL(pGpu, pKernelFifo, + pChidMgr->runlistId); + + if (pKernelFifo->bNumChannelsOverride) + { + pChidMgr->numChannels = NV_MIN(pKernelFifo->numChannelsOverride, numChannels); + } + else + { + pChidMgr->numChannels = numChannels; + } + + // Once we have set calculated value disable any overrides. + pKernelFifo->bNumChannelsOverride = 0; + } + + return pChidMgr->numChannels; +} + +NvU32 +kfifoRunlistQueryNumChannels_KERNEL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 runlistId +) +{ + NvU32 numChannels = 0; + NvU32 status; + + // Do internal control call and set numChannels + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS numChannelsParams = {0}; + + numChannelsParams.runlistId = runlistId; + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_FIFO_GET_NUM_CHANNELS, + &numChannelsParams, + sizeof(NV2080_CTRL_INTERNAL_FIFO_GET_NUM_CHANNELS_PARAMS)); + if (status != NV_OK) + { + DBG_BREAKPOINT(); + return 0; + } + + numChannels = numChannelsParams.numChannels; + } + + NV_ASSERT(numChannels > 0); + + return numChannels; +} + +/** + * @brief reserves a hardware channel slot for a channel group + * + * Only responsible for indicating a hardware channel is in use, does not set + * any other software state. + * + * This function is not called in broadcast mode + * + * @param pGpu + * @param pKernelFifo + * @param pChidMgr + * @param[out] grpID + */ +NV_STATUS +kfifoChidMgrAllocChannelGroupHwID_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr, + NvU32 *pChGrpID +) +{ + NvU32 maxChannelGroups; + + if (pChGrpID == NULL) + return NV_ERR_INVALID_ARGUMENT; + + maxChannelGroups = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + if (maxChannelGroups == 0) + { + NV_PRINTF(LEVEL_ERROR, "Zero max channel groups!!!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // Find the least unused grpID + *pChGrpID = nvBitFieldLSZero(pChidMgr->channelGrpMgr.pHwIdInUse, + pChidMgr->channelGrpMgr.hwIdInUseSz); + + if (*pChGrpID < maxChannelGroups) + { + nvBitFieldSet(pChidMgr->channelGrpMgr.pHwIdInUse, + pChidMgr->channelGrpMgr.hwIdInUseSz, *pChGrpID, NV_TRUE); + } + else + { + *pChGrpID = maxChannelGroups; + NV_PRINTF(LEVEL_ERROR, "No allocatable FIFO available.\n"); + return NV_ERR_NO_FREE_FIFOS; + } + return NV_OK; +} + + +/** + * @brief Releases a hardware channel group ID. + * + * Not responsible for freeing any software state beyond that which indicates a + * hardware channel is in use. + * + * This function is not called in broadcast mode + * + * @param pGpu + * @param pFifo + * @param pChidMgr + * @param chGrpID + */ +NV_STATUS +kfifoChidMgrFreeChannelGroupHwID_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr, + NvU32 chGrpID +) +{ + NvU32 maxChannelGroups; + + maxChannelGroups = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + if (maxChannelGroups == 0) + { + NV_PRINTF(LEVEL_ERROR, "Zero max channel groups!!!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + NV_ASSERT_OR_RETURN(chGrpID < maxChannelGroups, NV_ERR_INVALID_ARGUMENT); + + // + // Look for the channel group, check to make sure it's InUse bit is set + // and then clear it to indicate the grpID is no longer in use + // + NV_ASSERT(nvBitFieldTest(pChidMgr->channelGrpMgr.pHwIdInUse, + pChidMgr->channelGrpMgr.hwIdInUseSz, chGrpID)); + nvBitFieldSet(pChidMgr->channelGrpMgr.pHwIdInUse, + pChidMgr->channelGrpMgr.hwIdInUseSz, chGrpID, NV_FALSE); + + return NV_OK; +} + +CHID_MGR * +kfifoGetChidMgr_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 runlistId +) +{ + if (!kfifoIsPerRunlistChramEnabled(pKernelFifo)) + { + // We only have 1 chidmgr when we don't have a per-runlist channel RAM + if ((pKernelFifo->numChidMgrs != 1) || + (pKernelFifo->ppChidMgr == NULL) || + !bitVectorTest(&pKernelFifo->chidMgrValid, 0)) + { + return NULL; + } + return pKernelFifo->ppChidMgr[0]; + } + else + { + if (runlistId >= pKernelFifo->numChidMgrs) + { + return NULL; + } + // + // It is valid to return a NULL value as long as runlistId is less than + // maxNumRunlists since it is possible that not everything in the range + // [0, numChidMgrs) represents a valid runlistId. The onus is on the + // caller to check for NULL and only then use the CHIDMGR pointer + // + return pKernelFifo->ppChidMgr[runlistId]; + } +} + +/*! Gets associated CHIDMGR object for given FIFO_ENGINE_INFO_TYPE and value */ +NV_STATUS +kfifoGetChidMgrFromType_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 engineType, + NvU32 val, + CHID_MGR **ppChidMgr +) +{ + NV_STATUS status = NV_OK; + NvU32 runlistId; + + NV_CHECK_OR_RETURN(LEVEL_INFO, ppChidMgr != NULL, NV_ERR_INVALID_ARGUMENT); + + // Initialize the pointer to NULL, in case we fail and return early + *ppChidMgr = NULL; + + status = kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + engineType, val, + ENGINE_INFO_TYPE_RUNLIST, &runlistId); + NV_CHECK_OR_RETURN(LEVEL_INFO, NV_OK == status, status); + + *ppChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, runlistId); + + return NV_OK; +} + +/*! + * @brief Fetch pKernelChannel based on chidmgr and chid. + * + * This look-up uses the chid heap. It should find the first allocation of the channel, + * which is useful if the handle is duped to another client. + * + * @param[in] pGpu + * @param[in] pKernelFifo + * @param[in] pChidMgr the ChIDMgr (per-runlist) + * @param[in] ChID the ChID + * + * @return the KernelChannel * or NULL + */ +KernelChannel * +kfifoChidMgrGetKernelChannel_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr, + NvU32 ChID +) +{ + EMEMBLOCK *pFifoDataBlock; + NvU32 numChannels; + + NV_ASSERT_OR_RETURN(pChidMgr != NULL, NULL); + // Lite mode channels don't have KernelChannel yet + NV_ASSERT_OR_RETURN(!kfifoIsLiteModeEnabled_HAL(pGpu, pKernelFifo), NULL); + + numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + if (ChID >= numChannels) + { + return NULL; + } + + pFifoDataBlock = pChidMgr->pFifoDataHeap->eheapGetBlock( + pChidMgr->pFifoDataHeap, + ChID, + NV_FALSE); + if (pFifoDataBlock != NULL) + { + return (KernelChannel *)pFifoDataBlock->pData; + } + + return NULL; +} + +/*! Gets channel group data corresponding to grpID */ +KernelChannelGroup * +kfifoChidMgrGetKernelChannelGroup_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHID_MGR *pChidMgr, + NvU32 grpID +) +{ + KernelChannelGroup *pKernelChannelGroup = NULL; + + pKernelChannelGroup = mapFind(pChidMgr->pChanGrpTree, grpID); + if (pKernelChannelGroup == NULL) + { + NV_PRINTF(LEVEL_INFO, "Can't find channel group %d\n", grpID); + } + + return pKernelChannelGroup; +} + +/*! + * @brief Gets channel group data corresponding to grpID + * + * This function is not called in broadcast mode + * + * @param pGpu + * @param pFifo + * @param[in] grpID + * @param[in] runlistID pass CHIDMGR_RUNLIST_ID_LEGACY if not known + * + * @returns KernelChannelGroup * on success + * NULL if channel group is not found + */ +KernelChannelGroup * +kfifoGetChannelGroup_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 grpID, + NvU32 runlistID +) +{ + CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, runlistID); + + return kfifoChidMgrGetKernelChannelGroup(pGpu, pKernelFifo, pChidMgr, grpID); +} + +/*! Gets total number of channel groups in use */ +NvU32 +kfifoGetChannelGroupsInUse_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NvU32 numChannelGroups = 0; + NvU32 numChannelGroupsInUse = 0; + NvU32 chGrpID, i; + + for (i = 0; i < pKernelFifo->numChidMgrs; i++) + { + if (pKernelFifo->ppChidMgr[i] != NULL) + { + numChannelGroups = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, + pKernelFifo->ppChidMgr[i]); + + for (chGrpID = 0; chGrpID < numChannelGroups; chGrpID++) + { + if (nvBitFieldTest(pKernelFifo->ppChidMgr[i]->channelGrpMgr.pHwIdInUse, + pKernelFifo->ppChidMgr[i]->channelGrpMgr.hwIdInUseSz, + chGrpID)) + { + numChannelGroupsInUse++; + } + } + } + } + return numChannelGroupsInUse; +} + +/*! Gets total number of channel groups in use per engine */ +NvU32 +kfifoGetRunlistChannelGroupsInUse_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 runlistId +) +{ + NvU32 numChannelGroups = 0; + NvU32 numChannelGroupsInUse = 0; + NvU32 chGrpID; + CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, runlistId); + + numChannelGroups = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + for (chGrpID = 0; chGrpID < numChannelGroups; chGrpID++) + { + if (nvBitFieldTest(pChidMgr->channelGrpMgr.pHwIdInUse, + pChidMgr->channelGrpMgr.hwIdInUseSz, + chGrpID)) + { + numChannelGroupsInUse++; + } + } + return numChannelGroupsInUse; +} + +/** + * @brief Sets the timeslice for the specified channel group. + * + * @returns NV_OK if success, appropriate error otherwise + */ +NV_STATUS +kfifoChannelGroupSetTimeslice_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + KernelChannelGroup *pKernelChannelGroup, + NvU64 timesliceUs, + NvBool bSkipSubmit +) +{ + NV_STATUS status = NV_OK; + + NV_PRINTF(LEVEL_INFO, "Setting TSG %d Timeslice to %lldus\n", + pKernelChannelGroup->grpID, timesliceUs); + + if (!RMCFG_FEATURE_PLATFORM_MODS && + (timesliceUs < kfifoRunlistGetMinTimeSlice_HAL(pKernelFifo))) + { + NV_PRINTF(LEVEL_ERROR, + "Setting Timeslice to %lldus not allowed. Min value is %lldus\n", + timesliceUs, kfifoRunlistGetMinTimeSlice_HAL(pKernelFifo)); + return NV_ERR_NOT_SUPPORTED; + } + + pKernelChannelGroup->timesliceUs = timesliceUs; + + NV_ASSERT_OK_OR_RETURN(kfifoChannelGroupSetTimesliceSched(pGpu, + pKernelFifo, + pKernelChannelGroup, + timesliceUs, + bSkipSubmit)); + + return status; +} + +void +kfifoFillMemInfo_IMPL +( + KernelFifo *pKernelFifo, + MEMORY_DESCRIPTOR *pMemDesc, + NV2080_CTRL_FIFO_MEM_INFO *pMemory +) +{ + if (pMemDesc == NULL) + { + pMemory->aperture = NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_INVALID; + NV_PRINTF(LEVEL_ERROR, "kfifoFillMemInfo: pMemDesc = NULL\n"); + } + else + { + if (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) + { + pMemory->aperture = NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_VIDMEM; + } + else if (memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) + { + if (memdescGetCpuCacheAttrib(pMemDesc) == NV_MEMORY_CACHED) + { + pMemory->aperture = NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_COH; + } + else if (memdescGetCpuCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) + { + pMemory->aperture = NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_NCOH; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "kfifoFillMemInfo: Unknown cache attribute for sysmem aperture\n"); + NV_ASSERT(NV_FALSE); + } + } + pMemory->base = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + pMemory->size = pMemDesc->Size; + } +} + +void +kfifoGetChannelIterator_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHANNEL_ITERATOR *pIt +) +{ + portMemSet(pIt, 0, sizeof(*pIt)); + pIt->physicalChannelID = 0; + pIt->runlistId = 0; + pIt->numRunlists = 1; + if (kfifoIsPerRunlistChramEnabled(pKernelFifo)) + { + pIt->numRunlists = kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo); + } +} + +/** + * @brief Returns the next KernelChannel from the iterator. + * + * Iterates over runlist IDs and ChIDs and returns the next KernelChannel found + * on the heap, if any. + * + * (error guaranteed if pointer is NULL; non-NULL pointer guaranteed if NV_OK) + * + * @param[in] pGpu + * @param[in] pKernelFifo + * @param[in] pIt the channel iterator + * @param[out] ppKernelChannel returns a KernelChannel * + * + * @return NV_OK if the returned pointer is valid or error + */ +NV_STATUS kfifoGetNextKernelChannel_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHANNEL_ITERATOR *pIt, + KernelChannel **ppKernelChannel +) +{ + KernelChannel *pKernelChannel; + + if (ppKernelChannel == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *ppKernelChannel = NULL; + + while (pIt->runlistId < pIt->numRunlists) + { + CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, pIt->runlistId); + + if (pChidMgr == NULL) + { + pIt->runlistId++; + continue; + } + + pIt->numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + while (pIt->physicalChannelID < pIt->numChannels) + { + pKernelChannel = kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo, + pChidMgr, + pIt->physicalChannelID); + pIt->physicalChannelID++; + + // + // This iterator can be used during an interrupt, when a KernelChannel may + // be in the process of being destroyed. If a KernelChannel expects a pChannel + // but does not have one, it means it's being destroyed and we don't want to + // return it. + // + if (pKernelChannel == NULL) + continue; + if (!kchannelIsValid_HAL(pKernelChannel)) + continue; + *ppKernelChannel = pKernelChannel; + return NV_OK; + } + + pIt->runlistId++; + // Reset channel index to 0 for next runlist + pIt->physicalChannelID = 0; + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +/*! + * @brief Performs an RPC into Host RM to read its device info table. + * + * This is necessary because in virtual environments, we cannot directly read + * the device info table, and do not have the physical GPU partitioning + * information to determine which engines belong to this guest, so we have Host + * RM do the filtering and send us the filtered table. + * + * @param[in] pGpu + * @param[in] pKernelFifo + * + * @return NV_OK if succcessful, + * NV_ERR_NOT_SUPPORTED if Host RM calls this interface + * NV_ERR_INVALID_STATE if host supplied invalid data + * NV_STATUS supplied by RPC response from Host + */ + +NV_STATUS +kfifoGetHostDeviceInfoTable_KERNEL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + ENGINE_INFO *pEngineInfo +) +{ + NV_STATUS status = NV_OK; + NvHandle hClient = NV01_NULL_OBJECT; + NvHandle hObject = NV01_NULL_OBJECT; + NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *pParams; + NV2080_CTRL_FIFO_DEVICE_ENTRY *pHostEntries; + NvU32 numEntries; + NvU32 device; + NvU32 entry; + NvU32 numRunlists; + NvU32 maxRunlistId; + NvU32 maxPbdmaId; + NvU32 minPbdmaFaultId; + NvU32 i; + struct { + NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS params; + NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_DEVICES]; + } *pLocals; + + + NV_ASSERT_OR_RETURN(IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu), + NV_ERR_NOT_SUPPORTED); + + // RPC call for GSP will throw INVALID_CLIENT error with NULL handles + if (IS_GSP_CLIENT(pGpu)) + { + hClient = pGpu->hInternalClient; + hObject = pGpu->hInternalSubdevice; + } + + // Allocate pHostEntries and params on the heap to avoid stack overflow + pLocals = portMemAllocNonPaged(sizeof(*pLocals)); + NV_ASSERT_OR_RETURN((pLocals != NULL), NV_ERR_NO_MEMORY); + + pParams = &pLocals->params; + pHostEntries = pLocals->entries; + + // + // Read device info table entries from Host RM until Host indicates that + // there are no more valid entries in the table (by setting bMore flag) + // + numEntries = 0; + for (device = 0; + device < NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_DEVICES; + device += NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES) + { + portMemSet(pParams, 0x0, sizeof(*pParams)); + pParams->baseIndex = device; + + NV_RM_RPC_CONTROL(pGpu, + hClient, + hObject, + NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, + pParams, + sizeof(*pParams), + status); + + if (status != NV_OK) + goto cleanup; + + // Assert that host RM didn't tell us an invalid number of entries + if (pParams->numEntries > + NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES) + { + DBG_BREAKPOINT(); + status = NV_ERR_INVALID_STATE; + goto cleanup; + } + + portMemCopy(&pHostEntries[device], pParams->numEntries * + sizeof(NV2080_CTRL_FIFO_DEVICE_ENTRY), pParams->entries, pParams->numEntries * + sizeof(NV2080_CTRL_FIFO_DEVICE_ENTRY)); + + numEntries += pParams->numEntries; + + if (!pParams->bMore) + { + break; + } + } + + pEngineInfo->engineInfoListSize = numEntries; + pEngineInfo->engineInfoList = portMemAllocNonPaged(sizeof(*pEngineInfo->engineInfoList) * + pEngineInfo->engineInfoListSize); + if (pEngineInfo->engineInfoList == NULL) + { + NV_CHECK(LEVEL_ERROR, pEngineInfo->engineInfoList != NULL); + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + + // Copy each entry from the table + numRunlists = 0; + maxRunlistId = 0; + maxPbdmaId = 0; + minPbdmaFaultId = NV_U32_MAX; + for (entry = 0; entry < numEntries; ++entry) + { + portMemCopy(pEngineInfo->engineInfoList[entry].engineData, + ENGINE_INFO_TYPE_INVALID * sizeof(*(pEngineInfo->engineInfoList[entry].engineData)), + pHostEntries[entry].engineData, + ENGINE_INFO_TYPE_INVALID * sizeof(*(pEngineInfo->engineInfoList[entry].engineData))); + + pEngineInfo->engineInfoList[entry].numPbdmas = pHostEntries[entry].numPbdmas; + portMemCopy(pEngineInfo->engineInfoList[entry].pbdmaIds, + FIFO_ENGINE_MAX_NUM_PBDMA * sizeof(*(pEngineInfo->engineInfoList[entry].pbdmaIds)), + pHostEntries[entry].pbdmaIds, + FIFO_ENGINE_MAX_NUM_PBDMA * sizeof(*(pEngineInfo->engineInfoList[entry].pbdmaIds))); + + portMemCopy(pEngineInfo->engineInfoList[entry].pbdmaFaultIds, + FIFO_ENGINE_MAX_NUM_PBDMA * sizeof(*(pEngineInfo->engineInfoList[entry].pbdmaFaultIds)), + pHostEntries[entry].pbdmaFaultIds, + FIFO_ENGINE_MAX_NUM_PBDMA * sizeof(*(pEngineInfo->engineInfoList[entry].pbdmaFaultIds))); + + portStringCopy((char *)pEngineInfo->engineInfoList[entry].engineName, + sizeof(pEngineInfo->engineInfoList[entry].engineName), + (char *)pHostEntries[entry].engineName, + FIFO_ENGINE_NAME_MAX_SIZE); + + if (0 != pEngineInfo->engineInfoList[entry].engineData[ENGINE_INFO_TYPE_IS_ENGINE]) + { + numRunlists++; + } + maxRunlistId = NV_MAX(maxRunlistId, + pHostEntries[entry].engineData[ENGINE_INFO_TYPE_RUNLIST]); + + for (i = 0; i < pEngineInfo->engineInfoList[entry].numPbdmas; i++) + { + maxPbdmaId = NV_MAX(maxPbdmaId, pEngineInfo->engineInfoList[entry].pbdmaIds[i]); + + // + // SW engine while being constructed does not populate any PBDMA Fault IDs. + // Hence, skipping it. + // + if (pEngineInfo->engineInfoList[entry].engineData[ENGINE_INFO_TYPE_ENG_DESC] != ENG_SW) + { + minPbdmaFaultId = NV_MIN(minPbdmaFaultId, pEngineInfo->engineInfoList[entry].pbdmaFaultIds[i]); + } + } + } + + // + // Host RM sends back a copy of their devinfo table, which includes the SW + // engine. This engine has no runlist, so decrement the runlist count. + // + if (numRunlists > 0) + { + --numRunlists; + } + + pEngineInfo->numRunlists = numRunlists; + pEngineInfo->maxNumRunlists = maxRunlistId + 1; + pEngineInfo->maxNumPbdmas = maxPbdmaId + 1; + pEngineInfo->basePbdmaFaultId = minPbdmaFaultId; + +cleanup: + portMemFree(pLocals); + + return status; +} + +/*! + * @brief Constructs EngineInfo List + * + * @param[in] pGpu + * @param[in] pKernelFifo + * + * @return NV_OK if succcessful, + * NV_STATUS supplied by HALs called + */ +NV_STATUS +kfifoConstructEngineList_KERNEL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + ENGINE_INFO *pEngineInfo = &pKernelFifo->engineInfo; + + // Return early if EngineList is already constructed + if (pEngineInfo->engineInfoList != NULL) + return NV_OK; + + if (IS_GSP_CLIENT(pGpu)) + { + NV_ASSERT_OK_OR_RETURN(gpuConstructDeviceInfoTable_HAL(pGpu)); + } + + NV_ASSERT_OK_OR_RETURN(kfifoGetHostDeviceInfoTable_HAL(pGpu, pKernelFifo, pEngineInfo)); + + return NV_OK; +} + +/** + * @brief Create a list of channels. + * + * @param pGpu + * @param pKernelFifo + * @param pList + */ +NV_STATUS +kfifoChannelListCreate_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + CHANNEL_LIST **ppList +) +{ + if (!ppList) + return NV_ERR_INVALID_ARGUMENT; + + *ppList = portMemAllocNonPaged(sizeof(CHANNEL_LIST)); + NV_ASSERT_OR_RETURN((*ppList != NULL), NV_ERR_NO_MEMORY); + + (*ppList)->pHead = NULL; + (*ppList)->pTail = NULL; + + return NV_OK; +} + +/** + * @brief Append a channel to a channel list. + * + * @param pGpu + * @param pKernelFifo + * @param pKernelChannel + * @param pList + */ + +NV_STATUS +kfifoChannelListAppend_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernel, + KernelChannel *pKernelChannel, + CHANNEL_LIST *pList +) +{ + PCHANNEL_NODE pNewNode = NULL; + + if (!pKernelChannel || !pList) + return NV_ERR_INVALID_ARGUMENT; + + pNewNode = portMemAllocNonPaged(sizeof(CHANNEL_NODE)); + NV_ASSERT_OR_RETURN((pNewNode != NULL), NV_ERR_NO_MEMORY); + + pNewNode->pKernelChannel = pKernelChannel; + pKernelChannel->refCount++; + + pNewNode->pNext = NULL; + + // Searching based on the ChID + if (pList->pTail) + { + pList->pTail->pNext = pNewNode; + pList->pTail = pNewNode; + } + else + { + pList->pHead = pNewNode; + pList->pTail = pNewNode; + } + + return NV_OK; +} + +/** + * @brief remove channel from the given channel list + * look for duplicates. + * + * @param pGpu + * @param pKernelFifo + * @param pKernelChannel + * @param pList + */ +NV_STATUS +kfifoChannelListRemove_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + KernelChannel *pKernelChannel, + CHANNEL_LIST *pList +) +{ + PCHANNEL_NODE pNewNode = NULL; + PCHANNEL_NODE pPrevNode = NULL; + PCHANNEL_NODE pTempNode = NULL; + NvBool bFoundOnce = NV_FALSE; + NV_STATUS status = NV_OK; + + if (!pKernelChannel) + return NV_ERR_INVALID_ARGUMENT; + + if (!pList) + return NV_ERR_INVALID_ARGUMENT; + + pNewNode = pList->pHead; + pPrevNode = NULL; + + while (pNewNode) + { + + if (pKernelChannel != pNewNode->pKernelChannel) + { + pPrevNode = pNewNode; + pNewNode = pNewNode->pNext; + continue; + } + + // Deleting first node + if (pList->pHead == pNewNode) + { + pList->pHead = pNewNode->pNext; + } + + // Deleting tail node + if (pList->pTail == pNewNode) + { + pList->pTail = pPrevNode; + } + + // First node does not have previous node. + if (pPrevNode) + { + pPrevNode->pNext = pNewNode->pNext; + } + + pTempNode = pNewNode; + pNewNode = pNewNode->pNext; + portMemFree(pTempNode); + + bFoundOnce = NV_TRUE; + + if (0 == pKernelChannel->refCount) + { + NV_PRINTF(LEVEL_ERROR, "RefCount for channel is not right!!!\n"); + DBG_BREAKPOINT(); + status = NV_ERR_GENERIC; + break; + } + + pKernelChannel->refCount--; + } + + + if (!bFoundOnce) + { + NV_PRINTF(LEVEL_INFO, + "Can't find channel in channelGroupList (Normal during RC Recovery on " + "GK110+ or if software scheduling is enabled).\n"); + + status = NV_ERR_INVALID_CHANNEL; + } + + return status; +} + +/** + * @brief Destroy channel list. + * + * @param pGpu + * @param pKernelFifo + * @param pList + */ +NV_STATUS +kfifoChannelListDestroy_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernel, + CHANNEL_LIST *pList +) +{ + PCHANNEL_NODE pTempNode; + + if (!pList) + return NV_OK; + + while (pList->pHead) + { + pTempNode = pList->pHead; + + NV_ASSERT_OR_RETURN(pTempNode->pKernelChannel && pTempNode->pKernelChannel->refCount, NV_ERR_INVALID_STATE); + + pTempNode->pKernelChannel->refCount--; + + pList->pHead = pTempNode->pNext; + portMemFree(pTempNode); + } + + portMemFree(pList); + + return NV_OK; +} + +/*! + * @brief Determines whether provided engines have any channels/contexts assigned + * + * @param[IN] pGpu OBJGPU + * @param[IN] pKernelFifo KernelFifo + * @param[IN] pEngines Which engines to check (NV2080_ENGINE_TYPE_***) + * @param[IN] engineCount Number of engines to check + * + * @return Returns NV_TRUE if any provided engines are active + */ +NvBool +kfifoEngineListHasChannel_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 *pEngines, + NvU32 engineCount +) +{ + KernelChannel *pKernelChannel; + CHANNEL_ITERATOR it; + NvU32 i; + + NV_ASSERT_OR_RETURN((pEngines != NULL) && (engineCount > 0), NV_TRUE); + + // Find any channels or contexts on passed engines + kfifoGetChannelIterator(pGpu, pKernelFifo, &it); + while (kchannelGetNextKernelChannel(pGpu, &it, &pKernelChannel) == NV_OK) + { + NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue); + + // If the client supplied the engine type, directly check it + if (NV2080_ENGINE_TYPE_IS_VALID(kchannelGetEngineType(pKernelChannel))) + { + for (i = 0; i < engineCount; ++i) + { + if (kchannelGetEngineType(pKernelChannel) == pEngines[i]) + { + NV_PRINTF(LEVEL_ERROR, + "Found channel on engine 0x%x owned by 0x%x\n", + kchannelGetEngineType(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel)); + + return NV_TRUE; + } + } + } + else + { + NvU32 runlistId; + + // Ideally valid engine Id should always be set in channel if this property is enabled + NV_ASSERT_OR_RETURN(!kfifoIsPerRunlistChramEnabled(pKernelFifo), NV_TRUE); + + // + // If runlist Id for channel is set then check if it matches with any of the engines + // If channel is not associated with any engine then there is a chance + // it can be created on one of the engines we care about. + // + if (kchannelIsRunlistSet(pGpu, pKernelChannel)) + { + for (i = 0; i < engineCount; ++i) + { + NV_ASSERT_OR_RETURN((kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, pEngines[i], + ENGINE_INFO_TYPE_RUNLIST, &runlistId) == NV_OK), NV_TRUE); + if (kchannelGetRunlistId(pKernelChannel) == runlistId) + { + NV_PRINTF(LEVEL_ERROR, + "Found channel on runlistId 0x%x owned by 0x%x\n", + kchannelGetRunlistId(pKernelChannel), RES_GET_CLIENT_HANDLE(pKernelChannel)); + + return NV_TRUE; + } + } + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Found channel owned by 0x%x that can be associated to any engine\n", + RES_GET_CLIENT_HANDLE(pKernelChannel)); + + return NV_TRUE; + } + } + } + + return NV_FALSE; +} + +/** + * @brief Maximum number of subcontexts for a non-legacy mode TSG + */ +CTX_BUF_POOL_INFO * +kfifoGetRunlistBufPool_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 engineType +) +{ + return pKernelFifo->pRunlistBufPool[engineType]; +} + +/** + * @brief Get size and alignment requirements for runlist buffers + * + * @param[in] pGpu Pointer to OBJGPU + * @param[in] pKernelFifo Pointer to KernelFifo + * @param[in] runlistId Runlist ID + * @param[in] bTsgSupported Is TSG supported + * @param[in] maxRunlistEntries Max entries to be supported in a runlist + * @param[out] pSize Size of runlist buffer + * @param[out] pAlignment Alignment for runlist buffer + */ +NV_STATUS +kfifoGetRunlistBufInfo_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 runlistId, + NvBool bTsgSupported, + NvU32 maxRunlistEntries, + NvU64 *pSize, + NvU64 *pAlignment +) +{ + NvU32 runlistEntrySize = 0; + NvU32 maxRunlistEntriesSupported = 0; + CHID_MGR *pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, runlistId); + + NV_ASSERT_OR_RETURN(pSize != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pAlignment != NULL, NV_ERR_INVALID_ARGUMENT); + + if (kfifoIsPerRunlistChramEnabled(pKernelFifo)) + { + NV_ASSERT_OR_RETURN(pChidMgr != NULL, NV_ERR_INVALID_ARGUMENT); + // + // We assume worst case of one TSG wrapper per channel, and + // the number of TSGs + number of channels is how we get + // the 2 x number of fifos. + // + maxRunlistEntriesSupported = 2 * kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + } + else + { + maxRunlistEntriesSupported = kfifoGetMaxChannelsInSystem(pGpu, pKernelFifo); + maxRunlistEntriesSupported += (bTsgSupported ? + kfifoGetMaxChannelGroupsInSystem(pGpu, pKernelFifo) + : 0); + } + + NV_ASSERT_OR_RETURN(maxRunlistEntries <= maxRunlistEntriesSupported, NV_ERR_INVALID_ARGUMENT); + + if (maxRunlistEntries == 0) + { + maxRunlistEntries = maxRunlistEntriesSupported; + } + + runlistEntrySize = kfifoRunlistGetEntrySize_HAL(pKernelFifo); + *pSize = (NvU64)runlistEntrySize * maxRunlistEntries; + + *pAlignment = NVBIT64(kfifoRunlistGetBaseShift_HAL(pKernelFifo)); + return NV_OK; +} + +/*! + * @brief Gets total number of channels supported by the system + */ +NvU32 +kfifoGetMaxChannelsInSystem_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NvU32 numChannels = 0; + NvU32 i; + + for (i = 0; i < pKernelFifo->numChidMgrs; i++) + { + if (pKernelFifo->ppChidMgr[i] != NULL) + { + numChannels += kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pKernelFifo->ppChidMgr[i]); + } + } + return numChannels; +} + +/*! + * @brief Gets total number of channel groups supported by the system + */ +NvU32 +kfifoGetMaxChannelGroupsInSystem_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + // Max channel groups is the same as max channels + return kfifoGetMaxChannelsInSystem(pGpu, pKernelFifo); +} + +/*! + * @brief Get runlist buffer allocation params + * + * @param[in] pGpu + * @param[out] *pAperture Aperture to use for runlist buffer allocation + * @param[out] *pAttr Attributes to use for runlits buffer allocation + * @param[out] *pAllocFlags Allocation flags to use for runlist buffer allocation + */ +void +kfifoRunlistGetBufAllocParams_IMPL +( + OBJGPU *pGpu, + NV_ADDRESS_SPACE *pAperture, + NvU32 *pAttr, + NvU64 *pAllocFlags +) +{ + *pAperture = ADDR_FBMEM; + *pAttr = NV_MEMORY_WRITECOMBINED; + + memdescOverrideInstLoc(DRF_VAL(_REG_STR_RM, _INST_LOC, _RUNLIST, pGpu->instLocOverrides), + "runlist", pAperture, pAttr); + + *pAllocFlags = FLD_TEST_DRF(_REG_STR_RM, _INST_VPR, _RUNLIST, _TRUE, pGpu->instVprOverrides) + ? MEMDESC_ALLOC_FLAGS_PROTECTED : MEMDESC_FLAGS_NONE; +} + +/*! + * @brief Allocate Runlist buffers for a single runlistId + * + * @param[in] pGpu + * @param[in] pKernelFifo + * @param[in] bSupportTsg Will this runlist support TSGs? + * @param[in] aperture NV_ADDRESS_SPACE requested + * @param[in] runlistId runlistId to allocate buffer for + * @param[in] attr CPU cacheability requested + * @param[in] allocFlags MEMDESC_FLAGS_* + * @param[in] maxRunlistEntries Can pass zero to determine in function + * @param[in] bHWRL Is this runlist a HW runlist? (verif feature specific) + * @param[out] ppMemDesc memdesc created/allocated by function + */ +NV_STATUS +kfifoRunlistAllocBuffers_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvBool bSupportTsg, + NV_ADDRESS_SPACE aperture, + NvU32 runlistId, + NvU32 attr, + NvU64 allocFlags, + NvU64 maxRunlistEntries, + NvBool bHWRL, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + NV_STATUS status = NV_OK; + NvU64 runlistSz = 0; + NvU64 runlistAlign = 0; + NvU32 counter; + + status = kfifoGetRunlistBufInfo(pGpu, pKernelFifo, runlistId, bSupportTsg, + maxRunlistEntries, &runlistSz, &runlistAlign); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to get runlist buffer info 0x%08x\n", + status); + DBG_BREAKPOINT(); + goto failed; + } + + for (counter = 0; counter < NUM_BUFFERS_PER_RUNLIST; ++counter) + { + ppMemDesc[counter] = NULL; + + status = memdescCreate(&ppMemDesc[counter], pGpu, runlistSz, runlistAlign, + NV_TRUE, aperture, attr, allocFlags); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Runlist buffer memdesc create failed 0x%08x\n", status); + DBG_BREAKPOINT(); + goto failed; + } + + // If flag is set then allocate runlist from ctx buf pool + if (allocFlags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL) + { + NvU32 engineType; + CTX_BUF_POOL_INFO *pCtxBufPool = NULL; + status = kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, ENGINE_INFO_TYPE_RUNLIST, + runlistId, ENGINE_INFO_TYPE_NV2080, &engineType); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to translate runlistId 0x%x to NV2080 engine type\n", runlistId); + DBG_BREAKPOINT(); + goto failed; + } + status = ctxBufPoolGetGlobalPool(pGpu, CTX_BUF_ID_RUNLIST, engineType, &pCtxBufPool); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to get ctx buf pool for engine type 0x%x\n", engineType); + DBG_BREAKPOINT(); + goto failed; + } + status = memdescSetCtxBufPool(ppMemDesc[counter], pCtxBufPool); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to set ctx buf pool for runlistId 0x%x\n", runlistId); + DBG_BREAKPOINT(); + goto failed; + } + } + + status = memdescAlloc(ppMemDesc[counter]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Runlist buffer mem alloc failed 0x%08x\n", + status); + DBG_BREAKPOINT(); + goto failed; + } + } + + return NV_OK; + +failed: + for (counter = 0; counter < NUM_BUFFERS_PER_RUNLIST; counter++) + { + if (ppMemDesc[counter]) + { + memdescFree(ppMemDesc[counter]); + memdescDestroy(ppMemDesc[counter]); + ppMemDesc[counter] = NULL; + } + } + return status; +} + +NvU32 +kfifoGetMaxSubcontextFromGr_KERNEL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + + NV_ASSERT_OR_RETURN(pKernelGraphicsManager != NULL, 0); + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized, 0); + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo != NULL, 0); + + return pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo->infoList[NV0080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT].data; +} + +NvU32 +kfifoReturnPushbufferCaps_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NvU32 kfifoBitMask = 0; + + // PCI is always supported + kfifoBitMask = PCI_PB_ALLOWED; + + if (!gpuIsUnifiedMemorySpaceEnabled(pGpu)) + { + kfifoBitMask |= VID_PB_ALLOWED; + } + + return kfifoBitMask; +} + +/*! + * @brief kfifoGetDeviceCaps + * + * This routine gets cap bits in unicast. If pbCapsInitialized is passed as + * NV_FALSE, the caps will be copied into pKfifoCaps without OR/ANDING. + * Otherwise, the caps bits for the current GPU will be ORed/ANDed together with + * pKfifoCaps to create a single set of caps that accurately represents the + * functionality of the device. + */ +void kfifoGetDeviceCaps_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU8 *pKfifoCaps, + NvBool bCapsInitialized +) +{ + NvU8 tempCaps[NV0080_CTRL_FIFO_CAPS_TBL_SIZE]; + NvU8 temp; + NvU32 kfifoBitMask; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + portMemSet(tempCaps, 0, NV0080_CTRL_FIFO_CAPS_TBL_SIZE); + + kfifoBitMask = kfifoReturnPushbufferCaps(pGpu, pKernelFifo); + + if (kfifoBitMask & PCI_PB_ALLOWED) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FIFO_CAPS, _SUPPORT_PCI_PB); + if (kfifoBitMask & VID_PB_ALLOWED) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FIFO_CAPS, _SUPPORT_VID_PB); + + if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) && + !gpuIsPipelinedPteMemEnabled(pGpu)) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FIFO_CAPS, _NO_PIPELINED_PTE_BLIT); + + if (kfifoIsUserdInSystemMemory(pKernelFifo)) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FIFO_CAPS, _USERD_IN_SYSMEM); + + if (kfifoIsUserdMapDmaSupported(pKernelFifo)) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FIFO_CAPS, _GPU_MAP_CHANNEL); + + if (kfifoHostHasLbOverflow(pKernelFifo)) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FIFO_CAPS, _HAS_HOST_LB_OVERFLOW_BUG_1667921); + + if (kfifoIsSubcontextSupported(pKernelFifo)) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FIFO_CAPS, _MULTI_VAS_PER_CHANGRP); + + if (kfifoIsWddmInterleavingPolicyEnabled(pKernelFifo)) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FIFO_CAPS, _SUPPORT_WDDM_INTERLEAVING); + + // if this is the first GPU in the device, then start with it's caps + if (bCapsInitialized == NV_FALSE) + { + portMemCopy(pKfifoCaps, NV0080_CTRL_FIFO_CAPS_TBL_SIZE, + tempCaps, NV0080_CTRL_FIFO_CAPS_TBL_SIZE); + return; + } + + RMCTRL_AND_CAP(pKfifoCaps, tempCaps, temp, + NV0080_CTRL_FIFO_CAPS, _SUPPORT_PCI_PB); + RMCTRL_AND_CAP(pKfifoCaps, tempCaps, temp, + NV0080_CTRL_FIFO_CAPS, _SUPPORT_VID_PB); + RMCTRL_AND_CAP(pKfifoCaps, tempCaps, temp, + NV0080_CTRL_FIFO_CAPS, _GPU_MAP_CHANNEL); + + RMCTRL_OR_CAP(pKfifoCaps, tempCaps, temp, + NV0080_CTRL_FIFO_CAPS, _MULTI_VAS_PER_CHANGRP); + + RMCTRL_OR_CAP(pKfifoCaps, tempCaps, temp, + NV0080_CTRL_FIFO_CAPS, _HAS_HOST_LB_OVERFLOW_BUG_1667921); + + RMCTRL_OR_CAP(pKfifoCaps, tempCaps, temp, + NV0080_CTRL_FIFO_CAPS, _SUPPORT_WDDM_INTERLEAVING); + return; +} + +/*! + * @brief Add handlers for scheduling enable and/or disable. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFifo KernelFifo pointer + * @param[in] pPostSchedulingEnableHandler No action if NULL + * @param[in] pPostSchedulingEnableHandlerData Data to pass to + * @p pPostSchedulingEnableHandler + * @param[in] pPreSchedulingDisableHandler No action if NULL + * @param[in] pPreSchedulingDisableHandlerData Data to pass to + * @p pPreSchedulingDisableHandler + * + * @returns NV_OK if successfully processed both handlers + * NV_WARN_NOTHING_TO_DO if: - Both handlers are NULL + * - Both handlers are already installed + * NV_ERR_INVALID_STATE if one handler is already installed, but not both + */ +NV_STATUS +kfifoAddSchedulingHandler_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + PFifoSchedulingHandler pPostSchedulingEnableHandler, + void *pPostSchedulingEnableHandlerData, + PFifoSchedulingHandler pPreSchedulingDisableHandler, + void *pPreSchedulingDisableHandlerData +) +{ + FifoSchedulingHandlerEntry *pEntry; + NvBool bPostHandlerAlreadyPresent = NV_FALSE; + NvBool bPreHandlerAlreadyPresent = NV_FALSE; + FifoSchedulingHandlerEntry postEntry; + FifoSchedulingHandlerEntry preEntry; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, + (pPostSchedulingEnableHandler != NULL) || + (pPreSchedulingDisableHandler != NULL), + NV_WARN_NOTHING_TO_DO); + + // Check for already installed handler if non-NULL + if (pPostSchedulingEnableHandler != NULL) + { + for (pEntry = listHead(&pKernelFifo->postSchedulingEnableHandlerList); + pEntry != NULL; + pEntry = listNext(&pKernelFifo->postSchedulingEnableHandlerList, pEntry)) + { + if (pEntry->pCallback == pPostSchedulingEnableHandler && + pEntry->pCallbackParam == pPostSchedulingEnableHandlerData) + { + bPostHandlerAlreadyPresent = NV_TRUE; + break; + } + } + } + + // Check for already installed handler if non-NULL + if (pPreSchedulingDisableHandler != NULL) + { + for (pEntry = listHead(&pKernelFifo->preSchedulingDisableHandlerList); + pEntry != NULL; + pEntry = listNext(&pKernelFifo->preSchedulingDisableHandlerList, pEntry)) + { + if (pEntry->pCallback == pPreSchedulingDisableHandler && + pEntry->pCallbackParam == pPreSchedulingDisableHandlerData) + { + bPreHandlerAlreadyPresent = NV_TRUE; + break; + } + } + } + + // + // If we are installing both handlers, and one is already present, but not + // the other, we will do nothing, so assert loudly in that case + // + if ((pPostSchedulingEnableHandler != NULL) && (pPreSchedulingDisableHandler != NULL)) + { + NV_ASSERT_OR_RETURN(!(bPostHandlerAlreadyPresent ^ bPreHandlerAlreadyPresent), + NV_ERR_INVALID_STATE); + } + + // Return early unless all non-null handlers are not already installed + NV_CHECK_OR_RETURN(LEVEL_SILENT, + !bPostHandlerAlreadyPresent && !bPreHandlerAlreadyPresent, + NV_WARN_NOTHING_TO_DO); + + // Add handler entry to list unless NULL + if (pPostSchedulingEnableHandler != NULL) + { + postEntry.pCallback = pPostSchedulingEnableHandler; + postEntry.pCallbackParam = pPostSchedulingEnableHandlerData; + postEntry.bHandled = NV_FALSE; + NV_ASSERT_OR_RETURN(listPrependValue(&pKernelFifo->postSchedulingEnableHandlerList, &postEntry), + NV_ERR_NO_MEMORY); + } + + // Add handler entry to list unless NULL + if (pPreSchedulingDisableHandler != NULL) + { + preEntry.pCallback = pPreSchedulingDisableHandler; + preEntry.pCallbackParam = pPreSchedulingDisableHandlerData; + preEntry.bHandled = NV_FALSE; + NV_ASSERT_OR_RETURN(listPrependValue(&pKernelFifo->preSchedulingDisableHandlerList, &preEntry), + NV_ERR_NO_MEMORY); + } + + return NV_OK; +} + +/*! + * @brief Remove handlers for scheduling enable and/or disable. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFifo KernelFifo pointer + * @param[in] pPostSchedulingEnableHandler No action if NULL + * @param[in] pPostSchedulingEnableHandlerData Data argument set for the + * handler. + * @param[in] pPreSchedulingDisableHandler No action if NULL + * @param[in] pPreSchedulingDisableHandlerData Data argument set for the + * handler. + */ +void +kfifoRemoveSchedulingHandler_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + PFifoSchedulingHandler pPostSchedulingEnableHandler, + void *pPostSchedulingEnableHandlerData, + PFifoSchedulingHandler pPreSchedulingDisableHandler, + void *pPreSchedulingDisableHandlerData +) +{ + FifoSchedulingHandlerEntry *pEntry; + FifoSchedulingHandlerEntry *pTemp; + + // Search for the post handler in the post handler list and remove it if present + pEntry = listHead(&pKernelFifo->postSchedulingEnableHandlerList); + while (pEntry != NULL) + { + pTemp = listNext(&pKernelFifo->postSchedulingEnableHandlerList, pEntry); + + if (pEntry->pCallback == pPostSchedulingEnableHandler && + pEntry->pCallbackParam == pPostSchedulingEnableHandlerData) + { + listRemove(&pKernelFifo->postSchedulingEnableHandlerList, pEntry); + } + + pEntry = pTemp; + } + + // Search for the pre handler in the pre handler list and remove it if present + pEntry = listHead(&pKernelFifo->preSchedulingDisableHandlerList); + while (pEntry != NULL) + { + pTemp = listNext(&pKernelFifo->preSchedulingDisableHandlerList, pEntry); + + if (pEntry->pCallback == pPreSchedulingDisableHandler && + pEntry->pCallbackParam == pPreSchedulingDisableHandlerData) + { + listRemove(&pKernelFifo->preSchedulingDisableHandlerList, pEntry); + } + + pEntry = pTemp; + } +} + + +/*! + * @brief Notify handlers that scheduling has been enabled. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFifo KernelFifo pointer + * + * @returns NV_STATUS + */ +NV_STATUS +kfifoTriggerPostSchedulingEnableCallback_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NV_STATUS status = NV_OK; + FifoSchedulingHandlerEntry *pEntry; + NvBool bRetry = NV_FALSE; + + for (pEntry = listHead(&pKernelFifo->postSchedulingEnableHandlerList); + pEntry != NULL; + pEntry = listNext(&pKernelFifo->postSchedulingEnableHandlerList, pEntry)) + { + NV_ASSERT_OR_ELSE(pEntry->pCallback != NULL, + status = NV_ERR_INVALID_STATE; break;); + + pEntry->bHandled = NV_FALSE; + status = pEntry->pCallback(pGpu, pEntry->pCallbackParam); + + // Retry mechanism: Some callbacks depend on other callbacks in this list. + bRetry = bRetry || (status == NV_WARN_MORE_PROCESSING_REQUIRED); + + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) + // Quash retry status + status = NV_OK; + else if (status == NV_OK) + // Successfully handled, no need to retry + pEntry->bHandled = NV_TRUE; + else + // Actual error, abort + break; + } + + // If we hit an actual error or completed everything successfully, return early. + if ((status != NV_OK) || !bRetry) + return status; + + // Second pass, retry anything that asked nicely to be deferred + for (pEntry = listHead(&pKernelFifo->postSchedulingEnableHandlerList); + pEntry != NULL; + pEntry = listNext(&pKernelFifo->postSchedulingEnableHandlerList, pEntry)) + { + NV_ASSERT_OR_ELSE(pEntry->pCallback != NULL, + status = NV_ERR_INVALID_STATE; break;); + + // Skip anything that was completed successfully + if (pEntry->bHandled) + continue; + + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + pEntry->pCallback(pGpu, pEntry->pCallbackParam), + break; ); + } + + return status; +} + + +/*! + * @brief Notify handlers that scheduling will soon be disabled. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelFifo KernelFifo pointer + * + * @returns NV_STATUS + */ +NV_STATUS +kfifoTriggerPreSchedulingDisableCallback_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NV_STATUS status = NV_OK; + FifoSchedulingHandlerEntry *pEntry; + NvBool bRetry = NV_FALSE; + + // First pass + for (pEntry = listHead(&pKernelFifo->preSchedulingDisableHandlerList); + pEntry != NULL; + pEntry = listNext(&pKernelFifo->preSchedulingDisableHandlerList, pEntry)) + { + NV_ASSERT_OR_ELSE(pEntry->pCallback != NULL, + status = NV_ERR_INVALID_STATE; break;); + + pEntry->bHandled = NV_FALSE; + status = pEntry->pCallback(pGpu, pEntry->pCallbackParam); + + // Retry mechanism: Some callbacks depend on other callbacks in this list. + bRetry = bRetry || (status == NV_WARN_MORE_PROCESSING_REQUIRED); + + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) + // Quash retry status + status = NV_OK; + else if (status == NV_OK) + // Successfully handled, no need to retry + pEntry->bHandled = NV_TRUE; + else + // Actual error, abort + break; + } + + // If we hit an actual error or completed everything successfully, return early. + if ((status != NV_OK) || !bRetry) + return status; + + // Second pass, retry anything that asked nicely to be deferred + for (pEntry = listHead(&pKernelFifo->preSchedulingDisableHandlerList); + pEntry != NULL; + pEntry = listNext(&pKernelFifo->preSchedulingDisableHandlerList, pEntry)) + { + NV_ASSERT_OR_ELSE(pEntry->pCallback != NULL, + status = NV_ERR_INVALID_STATE; break;); + + // Skip anything that was completed successfully + if (pEntry->bHandled) + continue; + + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + pEntry->pCallback(pGpu, pEntry->pCallbackParam), + break; ); + } + + return status; +} + +/** + * @brief Gets vChid corresponding to a sChid + */ +NV_STATUS +kfifoGetVChIdForSChId_FWCLIENT +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 sChId, + NvU32 gfid, + NvU32 engineId, + NvU32 *pVChid +) +{ + NV_ASSERT_OR_RETURN(pVChid != NULL, NV_ERR_INVALID_ARGUMENT); + *pVChid = sChId; + + return NV_OK; +} + +/* + * @brief Gets a list of engine ids that use this runlist + * + * @param[in] runlistId Runlist id + * @param[out] pOutEngineIds List of engineids + * @param[out] pNumEngines # of entries in pOutEngines + */ +NV_STATUS +kfifoGetEngineListForRunlist_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvU32 runlistId, + NvU32 *pOutEngineIds, + NvU32 *pNumEngines +) +{ + NV_STATUS status = NV_OK; + NvU32 numEngines = kfifoGetNumEngines_HAL(pGpu, pKernelFifo); + NvU32 i; + + // Sanity check the input + NV_CHECK_OR_RETURN(LEVEL_ERROR, pOutEngineIds != NULL, NV_ERR_INVALID_ARGUMENT); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pNumEngines != NULL, NV_ERR_INVALID_ARGUMENT); + + *pNumEngines = 0; + NV_PRINTF(LEVEL_INFO, "Engine list for runlistId 0x%x:\n", runlistId); + + for (i = 0; i < numEngines; i++) + { + NvU32 engineType; + NvU32 thisRunlistId; + + NV_ASSERT_OK_OR_GOTO(status, + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_INVALID, + i, + ENGINE_INFO_TYPE_RUNLIST, + &thisRunlistId), done); + if (runlistId == thisRunlistId) + { + NV_ASSERT_OK_OR_GOTO(status, + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_INVALID, + i, + ENGINE_INFO_TYPE_NV2080, + &engineType), done); + pOutEngineIds[(*pNumEngines)++] = engineType; + + NV_PRINTF(LEVEL_INFO, "Engine name: %s\n", + kfifoGetEngineName_HAL(pKernelFifo, ENGINE_INFO_TYPE_NV2080, + engineType)); + } + } +done: + if ((status != NV_OK) && (*pNumEngines != 0)) + { + portMemSet(pOutEngineIds, 0, sizeof(NvU32) * (*pNumEngines)); + *pNumEngines = 0; + } + return status; +} + +/** + * @brief Return bitmask of currently allocated channels. + * + * TODO: Deprecate this later + */ +NvU32 +kfifoGetAllocatedChannelMask_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + CHID_MGR *pChidMgr = NULL; + KernelChannel *pKernelChannel = NULL; + NvU32 i; + NvU32 val; + NvU32 numChannels; + + // Return 0 if using the new channel structure + if (kfifoIsPerRunlistChramEnabled(pKernelFifo)) + { + NV_PRINTF(LEVEL_ERROR, "not supported with per-runlist channel RAM\n"); + return 0; + } + + pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, CHIDMGR_RUNLIST_ID_LEGACY); + numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + + // Build bitmask of allocated channels + for (i = 0, val = 0; i < numChannels; i++) + { + pKernelChannel = kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo, pChidMgr, i); + val |= ((pKernelChannel != NULL) ? 1 << i : 0); + } + + return val; +} + +/*! + * Get host channel class + */ +NvU32 +kfifoGetChannelClassId_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NvU32 numClasses; + NvU32 *pClassList = NULL; + CLI_CHANNEL_CLASS_INFO classInfo; + NvU32 i; + NvU32 class = 0; + + NV_ASSERT_OR_RETURN(NV_OK == gpuGetClassList(pGpu, &numClasses, NULL, ENG_KERNEL_FIFO), 0); + NV_ASSERT_OR_RETURN(numClasses > 0, 0); + pClassList = portMemAllocNonPaged(sizeof(NvU32) * numClasses); + NV_ASSERT_OR_RETURN((pClassList != NULL), 0); + + if (NV_OK == gpuGetClassList(pGpu, &numClasses, pClassList, ENG_KERNEL_FIFO)) + { + for (i = 0; i < numClasses; i++) + { + if (pClassList[i] == PHYSICAL_CHANNEL_GPFIFO) + { + // Skip the physical channel class + continue; + } + CliGetChannelClassInfo(pClassList[i], &classInfo); + if (classInfo.classType == CHANNEL_CLASS_TYPE_GPFIFO) + class = NV_MAX(class, pClassList[i]); + } + } + + NV_ASSERT(class); + portMemFree(pClassList); + return class; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/kernel_fifo_ctrl.c b/src/nvidia/src/kernel/gpu/fifo/kernel_fifo_ctrl.c new file mode 100644 index 000000000..349bdb415 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/kernel_fifo_ctrl.c @@ -0,0 +1,712 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/device/device.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "kernel/gpu/subdevice/subdevice_diag.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/core/locks.h" +#include "lib/base_utils.h" + +#include "vgpu/rpc.h" +#include "vgpu/vgpu_events.h" + +#include "class/cl0080.h" +#include "class/cl2080.h" +#include "class/cl208f.h" + +#include "ctrl/ctrl0080/ctrl0080fifo.h" + +static NV_STATUS _kfifoGetCaps(OBJGPU *pGpu, NvU8 *pKfifoCaps); + +/*! + * + * @brief deviceCtrlCmdFifoGetChannelList + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +deviceCtrlCmdFifoGetChannelList_IMPL +( + Device *pDevice, + NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS *pChannelParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvU32 *pChannelHandleList = NvP64_VALUE(pChannelParams->pChannelHandleList); + NvU32 *pChannelList = NvP64_VALUE(pChannelParams->pChannelList); + NvU32 counter; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // Validate input / Size / Args / Copy args + if (pChannelParams->numChannels == 0) + { + NV_PRINTF(LEVEL_ERROR, + "Invalid Params for command NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + for (counter = 0; counter < pChannelParams->numChannels; counter++) + { + KernelChannel *pKernelChannel; + NvU32 chid = NV0080_CTRL_FIFO_GET_CHANNELLIST_INVALID_CHANNEL; + NV_STATUS status; + + // Searching through the rm client db. + status = CliGetKernelChannel(RES_GET_CLIENT_HANDLE(pDevice), pChannelHandleList[counter], &pKernelChannel); + + if (status == NV_OK) + { + chid = pKernelChannel->ChID; + + // Amodel-specific : Encode runlist ID + if (pGpu && (IS_MODS_AMODEL(pGpu))) + { + chid |= ((kchannelGetRunlistId(pKernelChannel) & 0xffff) << 16); + } + } + + pChannelList[counter] = chid; + } + + return NV_OK; +} + +NV_STATUS +deviceCtrlCmdFifoIdleChannels_IMPL +( + Device *pDevice, + NV0080_CTRL_FIFO_IDLE_CHANNELS_PARAMS *pParams +) +{ + NvBool isGpuLockAcquired = NV_FALSE; + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + + // Check buffer size against maximum + if (pParams->numChannels > NV0080_CTRL_CMD_FIFO_IDLE_CHANNELS_MAX_CHANNELS) + return NV_ERR_INVALID_ARGUMENT; + + // + // Acquire GPU lock manually in control call body instead of letting Resource + // Server do it to ensure that RM_LOCK_MODULES_FIFO is used. + // + if (!rmGpuLockIsOwner()) + { + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_FIFO); + + if (status != NV_OK) + goto done; + + isGpuLockAcquired = NV_TRUE; + } + + // + // Send RPC if running in Guest/CPU-RM. Do this manually instead of ROUTE_TO_PHYSICAL + // so that we can acquire the GPU lock in CPU-RM first. + // + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + +done: + + if (isGpuLockAcquired) + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + return status; +} + +NV_STATUS +subdeviceCtrlCmdGetPhysicalChannelCount_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FIFO_GET_PHYSICAL_CHANNEL_COUNT_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 numChannelsInUse = 0; + NvU32 numChannels; + NvU32 i; + NvU32 chGrpID; + + pParams->physChannelCount = NV_U32_MAX; + pParams->physChannelCountInUse = 0; + + // TODO: Follow up with clients before turning on per esched chidmgr + for (i = 0; i < pKernelFifo->numChidMgrs; i++) + { + if (pKernelFifo->ppChidMgr[i] != NULL) + { + // Get the max number of HW channels on the runlist + numChannels = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pKernelFifo->ppChidMgr[i]); + + // Get the number of channels already in use + for (chGrpID = 0; chGrpID < numChannels; chGrpID++) + { + if (nvBitFieldTest(pKernelFifo->ppChidMgr[i]->channelGrpMgr.pHwIdInUse, + pKernelFifo->ppChidMgr[i]->channelGrpMgr.hwIdInUseSz, + chGrpID)) + { + numChannelsInUse++; + } + } + + pParams->physChannelCount = NV_MIN(pParams->physChannelCount, numChannels); + pParams->physChannelCountInUse = NV_MAX(pParams->physChannelCountInUse, numChannelsInUse); + } + } + return NV_OK; +} + +/*! + * @brief subdeviceCtrlCmdFifoGetInfo + * + * Lock Requirements: + * Assert that both the GPUs lock and API lock are held on entry. + */ +NV_STATUS +subdeviceCtrlCmdFifoGetInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FIFO_GET_INFO_PARAMS *pFifoInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvU32 runlistId; + CHID_MGR *pChidMgr; + NvU32 i; + NvU32 data; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // error checck + if (pFifoInfoParams->fifoInfoTblSize > NV2080_CTRL_FIFO_GET_INFO_MAX_ENTRIES) + return NV_ERR_INVALID_PARAM_STRUCT; + + // step thru list + for (i = 0; i < pFifoInfoParams->fifoInfoTblSize; i++) + { + switch (pFifoInfoParams->fifoInfoTbl[i].index) + { + case NV2080_CTRL_FIFO_INFO_INDEX_INSTANCE_TOTAL: + data = memmgrGetRsvdMemorySize(pMemoryManager); + break; + case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS: + // + // TODO: Follow up with clients using this control call before + // turning on per esched chidmgr + // + data = kfifoGetMaxChannelGroupsInSystem(pGpu, pKernelFifo); + break; + case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNELS_PER_GROUP: + data = kfifoGetMaxChannelGroupSize_HAL(pKernelFifo); + break; + case NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE: + // + // TODO: Follow up with clients using this control call before + // turning on per esched chidmgr + // + data = kfifoGetChannelGroupsInUse(pGpu, pKernelFifo); + break; + case NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP: + // + // RM-SMC AMPERE-TODO This data is incompatible with SMC, where + // different engines can have different max VEID counts + // + data = kfifoGetMaxSubcontext_HAL(pGpu, pKernelFifo, NV_FALSE); + break; + case NV2080_CTRL_FIFO_INFO_INDEX_DEFAULT_CHANNEL_TIMESLICE: + { + NvU64 timeslice = kfifoChannelGroupGetDefaultTimeslice_HAL(pKernelFifo); + data = NvU64_LO32(timeslice); + NV_ASSERT_OR_RETURN((NvU64_HI32(timeslice) == 0), NV_ERR_INVALID_PARAM_STRUCT); + } + break; + case NV2080_CTRL_FIFO_INFO_INDEX_IS_PER_RUNLIST_CHANNEL_RAM_SUPPORTED: + data = (NvU32) kfifoIsPerRunlistChramEnabled(pKernelFifo); + break; + case NV2080_CTRL_FIFO_INFO_INDEX_MAX_CHANNEL_GROUPS_PER_ENGINE: + // Get runlist ID for Engine type. + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, pFifoInfoParams->engineType, + ENGINE_INFO_TYPE_RUNLIST, &runlistId)); + pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, runlistId); + data = kfifoChidMgrGetNumChannels(pGpu, pKernelFifo, pChidMgr); + break; + case NV2080_CTRL_FIFO_INFO_INDEX_CHANNEL_GROUPS_IN_USE_PER_ENGINE: + // Get runlist ID for Engine type. + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, pFifoInfoParams->engineType, + ENGINE_INFO_TYPE_RUNLIST, &runlistId)); + data = kfifoGetRunlistChannelGroupsInUse(pGpu, pKernelFifo, runlistId); + break; + default: + data = 0; + status = NV_ERR_INVALID_ARGUMENT; + break; + } + + if (status != NV_OK) + break; + + // save off data value + pFifoInfoParams->fifoInfoTbl[i].data = data; + } + + return status; +} + +/*! + * @brief subdeviceCtrlCmdFifoGetUserdLocation + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdFifoGetUserdLocation_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_PARAMS *pUserdLocationParams +) +{ + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + Device *pDevice; + NvU32 userdAperture; + NvU32 userdAttribute; + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + rmStatus = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + if (rmStatus != NV_OK) + return NV_ERR_INVALID_DEVICE; + + rmStatus = kfifoGetUserdLocation_HAL(pKernelFifo, + &userdAperture, + &userdAttribute); + + if (rmStatus != NV_OK) + return rmStatus; + + // Support for NVLINK coherent memory is not yet available in RM + + if (userdAperture == ADDR_FBMEM) + { + pUserdLocationParams->aperture = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_VIDMEM; + } + else if (userdAperture == ADDR_SYSMEM) + { + pUserdLocationParams->aperture = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_APERTURE_SYSMEM; + } + else + { + NV_PRINTF(LEVEL_ERROR, "Invalid userdAperture value = 0x%08x\n", + userdAperture); + return NV_ERR_INVALID_STATE; + } + + if (userdAttribute == NV_MEMORY_CACHED) + { + pUserdLocationParams->attribute = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_CACHED; + } + else if (userdAttribute == NV_MEMORY_UNCACHED) + { + pUserdLocationParams->attribute = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_UNCACHED; + } + else if (userdAttribute == NV_MEMORY_WRITECOMBINED) + { + pUserdLocationParams->attribute = NV2080_CTRL_CMD_FIFO_GET_USERD_LOCATION_ATTRIBUTE_WRITECOMBINED; + } + else + { + NV_PRINTF(LEVEL_ERROR, "Invalid userdAttribute value = 0x%08x\n", + userdAttribute); + return NV_ERR_INVALID_STATE; + } + + return rmStatus; +} + +/*! + * @brief subdeviceCtrlCmdFifoGetChannelMemInfo + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdFifoGetChannelMemInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_INFO_PARAMS *pChannelMemParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + Device *pDevice; + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + NV_STATUS rmStatus = NV_OK; + NvU32 index; + NvU32 runqueues; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + KernelChannel *pKernelChannel; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NV2080_CTRL_FIFO_CHANNEL_MEM_INFO chMemInfo; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + rmStatus = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + if (rmStatus != NV_OK) + return NV_ERR_INVALID_DEVICE; + + rmStatus = CliGetKernelChannelWithDevice(pClient->hClient, + RES_GET_HANDLE(pDevice), + pChannelMemParams->hChannel, + &pKernelChannel); + if (rmStatus != NV_OK) + { + return NV_ERR_INVALID_CHANNEL; + } + + portMemSet((void *)&chMemInfo, 0, sizeof(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO)); + + // Get Inst Block Mem Info + rmStatus = kfifoChannelGetFifoContextMemDesc_HAL(pGpu, + pKernelFifo, + pKernelChannel, + FIFO_CTX_INST_BLOCK, + &pMemDesc); + if (rmStatus != NV_OK) + return rmStatus; + + kfifoFillMemInfo(pKernelFifo, pMemDesc, &chMemInfo.inst); + + // Get RAMFC mem Info + pMemDesc = NULL; + kfifoChannelGetFifoContextMemDesc_HAL(pGpu, + pKernelFifo, + pKernelChannel, + FIFO_CTX_RAMFC, + &pMemDesc); + kfifoFillMemInfo(pKernelFifo, pMemDesc, &chMemInfo.ramfc); + + // Get Method buffer mem info + runqueues = kfifoGetNumRunqueues_HAL(pGpu, pKernelFifo); + NV_ASSERT((runqueues <= NV2080_CTRL_FIFO_GET_CHANNEL_MEM_INFO_MAX_COUNT)); + for (index = 0; index < runqueues; index++) + { + pMemDesc = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pMthdBuffers[index].pMemDesc; + if (pMemDesc != NULL) + { + kfifoFillMemInfo(pKernelFifo, pMemDesc, &chMemInfo.methodBuf[index]); + chMemInfo.methodBufCount++; + } + } + + // copy into the kernel structure, there is no userland pointer + // maybe later structure is copied out to userland + portMemCopy(&pChannelMemParams->chMemInfo, + sizeof(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO), + &chMemInfo, + sizeof(NV2080_CTRL_FIFO_CHANNEL_MEM_INFO)); + + return rmStatus; +} + +NV_STATUS +diagapiCtrlCmdFifoEnableVirtualContext_IMPL +( + DiagApi *pDiagApi, + NV208F_CTRL_FIFO_ENABLE_VIRTUAL_CONTEXT_PARAMS *pEnableVCParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi); + Device *pDevice; + NV_STATUS rmStatus = NV_OK; + KernelChannel *pKernelChannel = NULL; + RsClient *pClient = RES_GET_CLIENT(pDiagApi); + + rmStatus = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + if (rmStatus != NV_OK) + return NV_ERR_INVALID_DEVICE; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + CliGetKernelChannelWithDevice(pClient->hClient, + RES_GET_HANDLE(pDevice), + pEnableVCParams->hChannel, + &pKernelChannel)); + + rmStatus = kchannelEnableVirtualContext_HAL(pKernelChannel); + return rmStatus; +} + +/*! + * @brief subdeviceCtrlCmdFifoUpdateChannelInfo + * + * This function is broken for SLI. + * Will be fixed after instance block and userd + * is made unicast. + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdFifoUpdateChannelInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FIFO_UPDATE_CHANNEL_INFO_PARAMS *pChannelInfo +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelChannel *pKernelChannel = NULL; + NV_STATUS status = NV_OK; + NvU64 userdAddr = 0; + NvU32 userdAper = 0; + + // Bug 724186 -- Skip this check for deferred API + LOCK_ASSERT_AND_RETURN(pRmCtrlParams->bDeferredApi || rmGpuLockIsOwner()); + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + CliGetKernelChannel(pChannelInfo->hClient, + pChannelInfo->hChannel, + &pKernelChannel)); + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); + + if (!pChannelInfo->hUserdMemory) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (!pKernelChannel->bClientAllocatedUserD) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + if (status != NV_OK) + return status; + + // Destroy the submemdescriptor of the previous USERD + kchannelDestroyUserdMemDesc_HAL(pGpu, pKernelChannel); + + // Get the userd hMemory and create a submemdescriptor + // Store it in pKernelChannel + status = kchannelCreateUserdMemDesc_HAL(pGpu, pKernelChannel, hClient, + pChannelInfo->hUserdMemory, + pChannelInfo->userdOffset, + &userdAddr, &userdAper); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "kchannelCreateUserdMemDesc_HAL" + "failed for hClient 0x%x and channel 0x%x status 0x%x\n", + hClient, kchannelGetDebugTag(pKernelChannel), status); + } + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + + return status; +} + +NV_STATUS +diagapiCtrlCmdFifoGetChannelState_IMPL +( + DiagApi *pDiagApi, + NV208F_CTRL_FIFO_GET_CHANNEL_STATE_PARAMS *pChannelStateParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi); + KernelChannel *pKernelChannel; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + CliGetKernelChannel(pChannelStateParams->hClient, pChannelStateParams->hChannel, &pKernelChannel)); + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + kchannelGetChannelPhysicalState(pGpu, pKernelChannel, pChannelStateParams)); + + // Fill out kernel state here + pChannelStateParams->bCpuMap = kchannelIsCpuMapped(pGpu, pKernelChannel); + pChannelStateParams->bRunlistSet = kchannelIsRunlistSet(pGpu, pKernelChannel); + + return NV_OK; +} + +static NV_STATUS +_kfifoGetCaps +( + OBJGPU *pGpu, + NvU8 *pKfifoCaps +) +{ + NV_STATUS rmStatus = NV_OK; + NvBool bCapsInitialized = NV_FALSE; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + VERIFY_OBJ_PTR(pKernelFifo); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + if (pKernelFifo == NULL) + { + rmStatus = NV_ERR_INVALID_POINTER; + SLI_LOOP_BREAK; + } + kfifoGetDeviceCaps(pGpu, pKernelFifo, pKfifoCaps, bCapsInitialized); + bCapsInitialized = NV_TRUE; + } + SLI_LOOP_END + + return rmStatus; +} + +/*! + * @brief deviceCtrlCmdFifoGetCaps + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +deviceCtrlCmdFifoGetCaps_IMPL +( + Device *pDevice, + NV0080_CTRL_FIFO_GET_CAPS_PARAMS *pKfifoCapsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvU8 *pKfifoCaps = NvP64_VALUE(pKfifoCapsParams->capsTbl); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // sanity check array size + if (pKfifoCapsParams->capsTblSize != NV0080_CTRL_FIFO_CAPS_TBL_SIZE) + { + NV_PRINTF(LEVEL_ERROR, "size mismatch: client 0x%x rm 0x%x\n", + pKfifoCapsParams->capsTblSize, + NV0080_CTRL_FIFO_CAPS_TBL_SIZE); + return NV_ERR_INVALID_ARGUMENT; + } + + // now accumulate caps for entire device + return _kfifoGetCaps(pGpu, pKfifoCaps); +} + +/*! + * @brief deviceCtrlCmdFifoGetCapsV2 + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +deviceCtrlCmdFifoGetCapsV2_IMPL +( + Device *pDevice, + NV0080_CTRL_FIFO_GET_CAPS_V2_PARAMS *pKfifoCapsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvU8 *pKfifoCaps = pKfifoCapsParams->capsTbl; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // now accumulate caps for entire device + return _kfifoGetCaps(pGpu, pKfifoCaps); +} + +/** + * @brief Disables or enables the given channels. + */ +NV_STATUS +subdeviceCtrlCmdFifoDisableChannels_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS *pDisableChannelParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + // Validate use of pRunlistPreemptEvent to allow use by Kernel clients only + if ((pDisableChannelParams->pRunlistPreemptEvent != NULL) && + (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Send RPC to handle message on Host-RM + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + } + // Send internal control call to actually disable channels + else + { + status = NV_ERR_NOT_SUPPORTED; + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/kernel_fifo_init.c b/src/nvidia/src/kernel/gpu/fifo/kernel_fifo_init.c new file mode 100644 index 000000000..262ce3e59 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/kernel_fifo_init.c @@ -0,0 +1,216 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_sched_mgr.h" +#include "kernel/gpu/rc/kernel_rc.h" + +#include "vgpu/vgpu_events.h" + +#include "nvRmReg.h" + +#include "class/cl2080.h" + +static void _kfifoPreConstructRegistryOverrides(OBJGPU *pGpu, KernelFifo *pKernelFifo); + +NV_STATUS +kfifoConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + ENGDESCRIPTOR engDesc +) +{ + NvU32 i; + + _kfifoPreConstructRegistryOverrides(pGpu, pKernelFifo); + + portMemSet((void *)&pKernelFifo->engineInfo, 0, sizeof(ENGINE_INFO)); + + portMemSet((void *)&pKernelFifo->userdInfo, 0, sizeof(PREALLOCATED_USERD_INFO)); + + for (i = 0; i < NV2080_ENGINE_TYPE_LAST; i++) + { + pKernelFifo->pRunlistBufPool[i] = NULL; + } + + pKernelFifo->pDummyPageMemDesc = NULL; + pKernelFifo->pppRunlistBufMemDesc = NULL; + + NV_ASSERT_OK_OR_RETURN(kfifoConstructHal_HAL(pGpu, pKernelFifo)); + + listInit(&pKernelFifo->postSchedulingEnableHandlerList, + portMemAllocatorGetGlobalNonPaged()); + listInit(&pKernelFifo->preSchedulingDisableHandlerList, + portMemAllocatorGetGlobalNonPaged()); + + return NV_OK; +} + +static void +_kfifoPreConstructRegistryOverrides +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + NvU32 data32; + + pKernelFifo->bNumChannelsOverride = 0; + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_NUM_FIFOS, + &pKernelFifo->numChannelsOverride) == NV_OK) + { + pKernelFifo->bNumChannelsOverride = 1; + } + + pKernelFifo->bPerRunlistChramOverride = NV_FALSE; + + if ((osReadRegistryDword(pGpu, NV_REG_STR_RM_SUPPORT_USERD_MAP_DMA, + &data32) == NV_OK) && data32) + { + NV_PRINTF(LEVEL_ERROR, "Enabling MapMemoryDma of USERD\n"); + pKernelFifo->bUserdMapDmaSupported = NV_TRUE; + } + + return; +} + +void +kfifoDestruct_IMPL +( + KernelFifo *pKernelFifo +) +{ + ENGINE_INFO *pEngineInfo = &pKernelFifo->engineInfo; + + // Free all outstanding callback entries + listDestroy(&pKernelFifo->postSchedulingEnableHandlerList); + listDestroy(&pKernelFifo->preSchedulingDisableHandlerList); + + if (pKernelFifo->pKernelSchedMgr != NULL) + { + objDelete(pKernelFifo->pKernelSchedMgr); + pKernelFifo->pKernelSchedMgr = NULL; + } + + portMemFree(pEngineInfo->engineInfoList); + pEngineInfo->engineInfoList = NULL; + + return; +} + +NV_STATUS +kfifoStateInitLocked_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + + // Check if per runlist channel ram should be enabled + if (kfifoIsPerRunlistChramSupportedInHw(pKernelFifo) && + !pKernelFifo->bPerRunlistChramOverride) + { + // + // On production platforms. SRIOV gets enabled + // only on host RM for SR-IOV capable SKUs (See gpuInitRegistryOverrides). + // On MODS, the tests use the regkey to turn on SR-IOV + // + { + if (gpuIsSriovEnabled(pGpu)) + { + NV_PRINTF(LEVEL_INFO, "Enabling per runlist channel RAM on host RM\n"); + pKernelFifo->bUsePerRunlistChram = NV_TRUE; + } + } + } + + NV_ASSERT_OK_OR_RETURN(kfifoChidMgrConstruct(pGpu, pKernelFifo)); + + if (pKernelRc != NULL) + { + krcInitRegistryOverridesDelayed(pGpu, pKernelRc); + } + + return NV_OK; +} + +void +kfifoStateDestroy_IMPL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo +) +{ + KernelChannel *pKernelChannel; + CHANNEL_ITERATOR chanIt; + + // On LDDM, we don't free these during freechannel because it's possible + // we wouldn't be able to reallocate them (we want to keep them preallocated + // from boot time). But we need to free before shutdown, so do that here. + kfifoGetChannelIterator(pGpu, pKernelFifo, &chanIt); + while ((kfifoGetNextKernelChannel(pGpu, pKernelFifo, &chanIt, &pKernelChannel) == NV_OK)) + { + NvU32 engineType; + + engineType = kchannelGetEngineType(pKernelChannel); + + if (NV2080_ENGINE_TYPE_IS_GR(engineType)) + { + MEMORY_DESCRIPTOR *grCtxBufferMemDesc = NULL; + NvU32 grIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + + NV_ASSERT_OK( + kchangrpGetEngineContextMemDesc(pGpu, + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup, + &grCtxBufferMemDesc)); + + if (grCtxBufferMemDesc != NULL) + { + memdescFree(grCtxBufferMemDesc); + memdescDestroy(grCtxBufferMemDesc); + + // Now clear context buffer pointer. + NV_ASSERT_OK(kchannelUnmapEngineCtxBuf(pGpu, pKernelChannel, ENG_GR(grIdx))); + + NV_ASSERT_OK( + kchannelSetEngineContextMemDesc(pGpu, + pKernelChannel, + ENG_GR(grIdx), + NULL)); + } + } + } + + // Notify the handlers that the channel will soon be disabled + NV_ASSERT_OK(kfifoTriggerPreSchedulingDisableCallback(pGpu, pKernelFifo)); + + // + // Free up allocated memory. + // + kfifoChidMgrDestruct(pKernelFifo); + + return; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/kernel_idle_channels.c b/src/nvidia/src/kernel/gpu/fifo/kernel_idle_channels.c new file mode 100644 index 000000000..bdd41cce3 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/kernel_idle_channels.c @@ -0,0 +1,292 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/rmapi.h" +#include "core/locks.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "vgpu/rpc.h" +#include "kernel/gpu/fifo/kernel_fifo.h" + +NV_STATUS +kfifoIdleChannelsPerDevice_KERNEL +( + OBJGPU *pGpu, + KernelFifo *pKernelFifo, + NvHandle *phClients, + NvHandle *phDevices, + NvHandle *phChannels, + NvU32 numChannels, + NvU32 flags, + NvU32 timeout +) +{ + NV_STATUS rmStatus = NV_OK; + NV_RM_RPC_IDLE_CHANNELS(pGpu, phClients, phDevices, phChannels, + numChannels, flags, timeout, rmStatus); + return rmStatus; +} + + +NV_STATUS +RmIdleChannels +( + NvHandle hClient, + NvHandle hDevice, + NvHandle hChannel, + NvU32 numChannels, + NvP64 clients, + NvP64 devices, + NvP64 channels, + NvU32 flags, + NvU32 timeout, + NvBool bUserModeArgs +) +{ + OBJGPU *pGpu; + KernelFifo *pKernelFifo; + NV_STATUS rmStatus = NV_OK; + RMAPI_PARAM_COPY paramCopyClients; + RMAPI_PARAM_COPY paramCopyDevices; + RMAPI_PARAM_COPY paramCopyChannels; + NvU32 gpuIdx, chanIdx; + NvHandle *phClients = NULL; + NvHandle *phDevices = NULL; + NvHandle *phChannels = NULL; + NvU32 numChannelsPerGpu[NV_MAX_DEVICES] = {0}; + NvBool isGpuGrpLockAcquired = NV_FALSE; + NvU32 gpuLockMask = 0; + + NV_PRINTF(LEVEL_INFO, "hChannel: 0x%x, numChannels: %u\n", hChannel, + numChannels); + + LOCK_METER_DATA(IDLE_CHANNELS, flags, numChannels, 0); + + switch(DRF_VAL(OS30, _FLAGS, _CHANNEL, flags)) + { + case NVOS30_FLAGS_CHANNEL_SINGLE: + numChannels = 1; + phClients = &hClient; + phDevices = &hDevice; + phChannels = &hChannel; + break; + + case NVOS30_FLAGS_CHANNEL_LIST: + + if (numChannels == 0) + { + return NV_OK; + } + + // setup for access to client's parameters + RMAPI_PARAM_COPY_INIT(paramCopyClients, + phClients, + clients, + numChannels, sizeof(NvU32)); + + rmStatus = rmapiParamsAcquire(¶mCopyClients, bUserModeArgs); + if (rmStatus != NV_OK) + goto done; + + RMAPI_PARAM_COPY_INIT(paramCopyDevices, + phDevices, + devices, + numChannels, sizeof(NvU32)); + + rmStatus = rmapiParamsAcquire(¶mCopyDevices, bUserModeArgs); + if (rmStatus != NV_OK) + goto done; + + RMAPI_PARAM_COPY_INIT(paramCopyChannels, + phChannels, + channels, + numChannels, sizeof(NvU32)); + + rmStatus = rmapiParamsAcquire(¶mCopyChannels, bUserModeArgs); + if (rmStatus != NV_OK) + goto done; + + break; + default: + return NV_ERR_INVALID_FLAGS; + } + + // This loop converts subdevice handles to device handles (IMO it's hopeless + // to try to idle a channel on just one subdevice, if it's in use on + // both). + for (chanIdx = 0; chanIdx < numChannels; chanIdx++) + { + NvU32 gpuInst; + RsClient *pClient; + GpuResource *pGpuResource; + + // + // Don't allow other clients' resources to be accessed/modified by this + // control call. + // + if (hClient != phClients[chanIdx]) + { + rmStatus = NV_ERR_INSUFFICIENT_PERMISSIONS; + goto done; + } + + rmStatus = serverGetClientUnderLock(&g_resServ, phClients[chanIdx], + &pClient); + if (rmStatus != NV_OK) + goto done; + + rmStatus = gpuresGetByDeviceOrSubdeviceHandle(pClient, + phDevices[chanIdx], + &pGpuResource); + if (rmStatus != NV_OK) + goto done; + + pGpu = GPU_RES_GET_GPU(pGpuResource); + + GPU_RES_SET_THREAD_BC_STATE(pGpuResource); + + // Update hDevice if it was originally a hSubdevice + phDevices[chanIdx] = RES_GET_HANDLE(GPU_RES_GET_DEVICE(pGpuResource)); + + // Update lock mask + gpuInst = gpuGetInstance(pGpu); + numChannelsPerGpu[gpuInst] += 1; + gpuLockMask |= gpumgrGetGrpMaskFromGpuInst(gpuInst); + } + + // This acquire locks(If not already acquired) in ascending order of + // gpus we're trying to idle. + if (!rmGpuGroupLockIsOwner(0, GPU_LOCK_GRP_MASK, &gpuLockMask)) + { + // LOCK: acquire Device lock + rmStatus = rmGpuGroupLockAcquire(0, GPU_LOCK_GRP_MASK, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_FIFO, &gpuLockMask); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to acquire Device lock, error 0x%x\n", rmStatus); + goto done; + } + isGpuGrpLockAcquired = NV_TRUE; + } + + // Loop over all devices, idling those necessary given this channel list + for (gpuIdx = 0; gpuIdx < NV_MAX_DEVICES; ++gpuIdx) + { + NvHandle *pPerGpuClients; + NvHandle *pPerGpuDevices; + NvHandle *pPerGpuChannels; + NvU32 perGpuIdx = 0; + + if (numChannelsPerGpu[gpuIdx] == 0) + { + continue; + } + + pGpu = gpumgrGetGpu(gpuIdx); + pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + pPerGpuClients = portMemAllocNonPaged((sizeof *pPerGpuClients) * + numChannelsPerGpu[gpuIdx]); + pPerGpuDevices = portMemAllocNonPaged((sizeof *pPerGpuDevices) * + numChannelsPerGpu[gpuIdx]); + pPerGpuChannels = portMemAllocNonPaged((sizeof *pPerGpuChannels) * + numChannelsPerGpu[gpuIdx]); + + for (chanIdx = 0; + chanIdx < numChannels && perGpuIdx < numChannelsPerGpu[gpuIdx]; + chanIdx++) + { + RsClient *pClient; + GpuResource *pGpuResource; + + // + // This does occasionally fail when the client was asynchronously + // killed + // NOTE: We may not hold the GPU lock here. + // + if (serverGetClientUnderLock(&g_resServ, + phClients[chanIdx], + &pClient) == NV_OK && + gpuresGetByDeviceOrSubdeviceHandle(pClient, + phDevices[chanIdx], + &pGpuResource) == NV_OK && + gpuGetInstance(GPU_RES_GET_GPU(pGpuResource)) == gpuIdx) + { + pPerGpuClients[perGpuIdx] = phClients[chanIdx]; + pPerGpuDevices[perGpuIdx] = phDevices[chanIdx]; + pPerGpuChannels[perGpuIdx] = phChannels[chanIdx]; + perGpuIdx++; + } + } + + rmStatus = kfifoIdleChannelsPerDevice_HAL(pGpu, pKernelFifo, pPerGpuClients, pPerGpuDevices, pPerGpuChannels, + numChannelsPerGpu[gpuIdx], flags, timeout); + portMemFree(pPerGpuClients); + portMemFree(pPerGpuDevices); + portMemFree(pPerGpuChannels); + + if (rmStatus != NV_OK) + { + goto done; + } + } + +done: + + if (isGpuGrpLockAcquired) + { + //UNLOCK: release Device lock + rmGpuGroupLockRelease(gpuLockMask, GPUS_LOCK_FLAGS_NONE); + } + + // paramCopy structs not initialized for CHANNEL_SINGLE + if (DRF_VAL(OS30, _FLAGS, _CHANNEL, flags) == NVOS30_FLAGS_CHANNEL_LIST) + { + // No need to copy these back out + if (phClients != NULL) + { + paramCopyClients.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + (void) rmapiParamsRelease(¶mCopyClients); + } + + if (phDevices != NULL) + { + paramCopyDevices.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + (void) rmapiParamsRelease(¶mCopyDevices); + } + + if (phChannels != NULL) + { + paramCopyChannels.flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + (void) rmapiParamsRelease(¶mCopyChannels); + } + } + + NV_PRINTF(LEVEL_INFO, + "DONE. hChannel: 0x%x, numChannels: %u, rmStatus: 0x%x\n", + hChannel, numChannels, rmStatus); + + return rmStatus; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/kernel_sched_mgr.c b/src/nvidia/src/kernel/gpu/fifo/kernel_sched_mgr.c new file mode 100644 index 000000000..ae2e0b5b7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/kernel_sched_mgr.c @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief KernelSchedMgr object module + * + * This module is for managing and manipulating state related to KernelSchedMgr + */ + +/* -------------------------------- Includes -------------------------------- */ + +#include "os/os.h" +#include "nvRmReg.h" + +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/fifo/kernel_sched_mgr.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +#include "virtualization/hypervisor/hypervisor.h" + +/* -------------------------------- Functions ------------------------------- */ + +/*! + * Obtains the valid scheduling policy for the current platform. + * Use: Determine whether software scheduling is required. + */ +static const char * +_kschedmgrGetSchedulerPolicy +( + KernelSchedMgr *pKernelSchedMgr, + OBJGPU *pGpu, + NvU32 *pSchedPolicy +) +{ + NvBool bSupportSwScheduler = NV_FALSE; + NvU32 schedPolicy = SCHED_POLICY_DEFAULT; + + // + // Disable OBJSCHED_SW_ENABLE when GPU is older than Pascal. + // This is true for WDDM and vGPU scheduling + if (!IsPASCALorBetter(pGpu)) + { + bSupportSwScheduler = NV_FALSE; + } + + // Disable OBJSCHED_SW_ENABLE when mig is enabled. + if (bSupportSwScheduler && IS_MIG_ENABLED(pGpu)) + { + bSupportSwScheduler = NV_FALSE; + portDbgPrintf("NVRM: Software Scheduler is not supported in MIG mode\n"); + } + + *pSchedPolicy = schedPolicy; + + switch (schedPolicy) + { + case SCHED_POLICY_VGPU_RELATIVE: + return "EQUAL_SHARE"; + case SCHED_POLICY_PGPU_SHARE: + return "FIXED_SHARE"; + case SCHED_POLICY_GFN_LSTT: + return "GFN_LSTT"; + default: + if (hypervisorIsVgxHyper()) + return "BEST_EFFORT"; + else // For baremetal and PT + return "NONE"; + } +} + +/*! + * Configure PDBs related to scheduler policy per RM configuration. + */ +void +kschedmgrConstructPolicy_IMPL +( + KernelSchedMgr *pKernelSchedMgr, + OBJGPU *pGpu +) +{ + const char *schedPolicyName; + NvU32 domain = gpuGetDomain(pGpu); + NvU32 bus = gpuGetBus(pGpu); + NvU32 device = gpuGetDevice(pGpu); + + schedPolicyName = _kschedmgrGetSchedulerPolicy(pKernelSchedMgr, pGpu, &pKernelSchedMgr->configSchedPolicy); + + // PVMRL is disabled when GPU is older than Pascal + if (hypervisorIsVgxHyper() && IsPASCALorBetter(pGpu)) + { + pKernelSchedMgr->bIsSchedSwEnabled = (pKernelSchedMgr->configSchedPolicy != SCHED_POLICY_DEFAULT); + + portDbgPrintf("NVRM: GPU at %04x:%02x:%02x.0 has software scheduler %s with policy %s.\n", + domain, bus, device, + pKernelSchedMgr->bIsSchedSwEnabled ? "ENABLED" : "DISABLED", + schedPolicyName); + } + else + { + // RM is not yet ready to print this message in release builds on baremetal. + NV_PRINTF(LEVEL_INFO, + "GPU at %04x:%02x:%02x.0 has software scheduler %s with policy %s.\n", + domain, bus, device, + pKernelSchedMgr->bIsSchedSwEnabled ? "ENABLED" : "DISABLED", + schedPolicyName); + } + + // Enabled SWRL Granular locking only if SWRL is enabled on hypervisor. + if (hypervisorIsVgxHyper() && pKernelSchedMgr->bIsSchedSwEnabled) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_SWRL_GRANULAR_LOCKING, NV_TRUE); + } +} diff --git a/src/nvidia/src/kernel/gpu/fifo/usermode_api.c b/src/nvidia/src/kernel/gpu/fifo/usermode_api.c new file mode 100644 index 000000000..5819530b4 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/usermode_api.c @@ -0,0 +1,40 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/fifo/usermode_api.h" + +NV_STATUS +usrmodeConstruct_IMPL +( + UserModeApi *pUserModeApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return usrmodeConstructHal_HAL(pUserModeApi, pCallContext, pParams); +} + +NvBool +usrmodeCanCopy_IMPL(UserModeApi *pUserModeApi){ + return NV_TRUE; +} diff --git a/src/nvidia/src/kernel/gpu/fifo/uvm_channel_retainer.c b/src/nvidia/src/kernel/gpu/fifo/uvm_channel_retainer.c new file mode 100644 index 000000000..093256b65 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/fifo/uvm_channel_retainer.c @@ -0,0 +1,158 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvtypes.h" +#include "Nvcm.h" +#include "kernel/gpu/fifo/uvm_channel_retainer.h" +#include "class/clc574.h" // UVM_CHANNEL_RETAINER +#include "virtualization/hypervisor/hypervisor.h" +#include "vgpu/vgpu_events.h" +#include "rmapi/client.h" + +/*! + * Construct a new UvmChannelRetainer, which refcounts chId and instance memory. + * + * @param[in,out] pCallContext The call context + * @param[in,out] pParams UVM_CHANNEL_RETAINER alloc params + * + * @returns NV_OK on success, specific error code on failure. + */ +NV_STATUS +uvmchanrtnrConstruct_IMPL +( + UvmChannelRetainer *pUvmChannelRetainer, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pUvmChannelRetainer); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NV_STATUS rmStatus = NV_OK; + KernelChannel *pKernelChannel = NULL; + NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS *pUvmChannelRetainerParams = pParams->pAllocParams; + CHID_MGR *pChidMgr = NULL; + + NV_ASSERT_OK_OR_RETURN(CliGetKernelChannel(pUvmChannelRetainerParams->hClient, pUvmChannelRetainerParams->hChannel, &pKernelChannel)); + + if (!uvmchanrtnrIsAllocationAllowed(pUvmChannelRetainer, pCallContext, pKernelChannel)) + { + NV_PRINTF(LEVEL_ERROR, "class Id %d can only be allocated by internal kernel clients\n", + pCallContext->pResourceRef->externalClassId); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, kchannelGetRunlistId(pKernelChannel)); + + // Take a reference on channel Id + NV_ASSERT_OK_OR_RETURN(kfifoChidMgrRetainChid(pGpu, pKernelFifo, pChidMgr, pKernelChannel->ChID)); + pUvmChannelRetainer->chId = pKernelChannel->ChID; + pUvmChannelRetainer->runlistId = kchannelGetRunlistId(pKernelChannel); + + // Take a reference on the instance pointer memory + NV_ASSERT_OK_OR_GOTO(rmStatus, + kfifoChannelGetFifoContextMemDesc_HAL(pGpu, + pKernelFifo, + pKernelChannel, + FIFO_CTX_INST_BLOCK, + &pUvmChannelRetainer->pInstMemDesc), + fail); + if (pUvmChannelRetainer->pInstMemDesc->Allocated > 0) + pUvmChannelRetainer->pInstMemDesc->Allocated++; + + memdescAddRef(pUvmChannelRetainer->pInstMemDesc); + +fail: + if (rmStatus != NV_OK) + { + NV_ASSERT_OK(kfifoChidMgrReleaseChid(pGpu, pKernelFifo, pChidMgr, pKernelChannel->ChID)); + } + + return rmStatus; +} + +void +uvmchanrtnrDestruct_IMPL +( + UvmChannelRetainer *pUvmChannelRetainer +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pUvmChannelRetainer); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + CHID_MGR *pChidMgr = NULL; + + if (pUvmChannelRetainer->pInstMemDesc->Allocated > 0) + memdescFree(pUvmChannelRetainer->pInstMemDesc); + + memdescDestroy(pUvmChannelRetainer->pInstMemDesc); + + pChidMgr = kfifoGetChidMgr(pGpu, pKernelFifo, pUvmChannelRetainer->runlistId); + NV_ASSERT_OK(kfifoChidMgrReleaseChid(pGpu, pKernelFifo, pChidMgr, pUvmChannelRetainer->chId)); +} + +/*! + * @brief Check if the client allocating this is an internal kernel client on + * Baremetal, Guest RM and admin client on Host RM. + * + * @param[in] pRmApi + * @param[in] hClient + * @param[in] pKernelChannel + * + * @returns NV_TRUE if allowed. + */ +NvBool +uvmchanrtnrIsAllocationAllowed_IMPL +( + UvmChannelRetainer *pUvmChannelRetainer, + CALL_CONTEXT *pCallContext, + KernelChannel *pKernelChannel +) + +{ + NvBool bIsAllowed = NV_FALSE; + RS_PRIV_LEVEL privLevel = pCallContext->secInfo.privLevel; + OBJGPU *pGpu = GPU_RES_GET_GPU(pUvmChannelRetainer); + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_FALSE); + + if (gpuIsSriovEnabled(pGpu)) + { + NvU32 gfid; + NV_ASSERT_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK, NV_FALSE); + if (IS_GFID_VF(gfid)) + { + bIsAllowed = (gfid == kchannelGetGfid(pKernelChannel)); + } + else + { + NvHandle hClient = pCallContext->pClient->hClient; + bIsAllowed = rmclientIsAdminByHandle(hClient, privLevel) || hypervisorCheckForObjectAccess(hClient); + } + } + else + { + bIsAllowed = ((privLevel >= RS_PRIV_LEVEL_KERNEL) && + (pCallContext->secInfo.paramLocation == PARAM_LOCATION_KERNEL)); + } + return bIsAllowed; +} diff --git a/src/nvidia/src/kernel/gpu/gpu.c b/src/nvidia/src/kernel/gpu/gpu.c new file mode 100644 index 000000000..2ef054dfa --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu.c @@ -0,0 +1,4168 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief HW State Routines: System Object Function Definitions. + */ + + +#include "lib/base_utils.h" +#include "gpu/gpu.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/eng_desc.h" +#include "nv_ref.h" +#include "os/os.h" +#include "nvrm_registry.h" +#include "gpu_mgr/gpu_mgr.h" +#include "core/thread_state.h" +#include "core/locks.h" +#include "diagnostics/tracer.h" +#include "rmapi/client_resource.h" +#include "diagnostics/journal.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "core/hal_mgr.h" +#include "vgpu/rpc.h" + +#include + +#include "gpu/nvdec/kernel_nvdec.h" +#include "gpu/sec2/kernel_sec2.h" +#include "gpu/gsp/kernel_gsp.h" +#include "platform/platform.h" +#include "platform/chipset/chipset.h" +#include "kernel/gpu/host_eng/host_eng.h" +#include "gpu/ce/kernel_ce.h" +#include "gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/intr/intr.h" + +#include "diagnostics/gpu_acct.h" + +#include "g_odb.h" + +typedef struct GPUCHILDINFO *PGPUCHILDINFO; +typedef struct GPUCHILDINFO GPUCHILDINFO; + +typedef struct GPUCHILDTYPE *PGPUCHILDTYPE; +typedef struct GPUCHILDTYPE GPUCHILDTYPE; + +#define RMTRACE_ENGINE_PROFILE_EVENT(EventName, EngineId, ReadCount, WriteCount) \ +{ \ + RMTRACE_PROBE4(generic, marker, \ + NvU32, EngineId, sizeof(NvU32), \ + char*, EventName, sizeof(EventName), \ + NvU32, ReadCount, sizeof(NvU32), \ + NvU32, WriteCount, sizeof(NvU32)); \ + } + +// Public interface functions + +static NV_STATUS gpuRemoveMissingEngines(OBJGPU *); + +// local static function +static NV_STATUS gpuCreateChildObjects(OBJGPU *, NvBool); +static NV_STATUS gpuStatePreLoad(OBJGPU *, NvU32); +static NV_STATUS gpuStatePostLoad(OBJGPU *, NvU32); +static NV_STATUS gpuStatePreUnload(OBJGPU *, NvU32); +static NV_STATUS gpuStatePostUnload(OBJGPU *, NvU32); +static void gpuXlateHalImplToArchImpl(OBJGPU *, HAL_IMPLEMENTATION, NvU32 *, NvU32 *); +static NvBool gpuSatisfiesTemporalOrder(OBJGPU *, HAL_IMPLEMENTATION, NvU32, NvU32); +static NvBool gpuSatisfiesTemporalOrderMaskRev(OBJGPU *, HAL_IMPLEMENTATION, NvU32, NvU32, NvU32); +static NvBool gpuIsT124ImplementationOrBetter(OBJGPU *); +static NvBool gpuShouldCreateObject(PGPUCHILDINFO, PENGDESCRIPTOR, NvU32); + +static void gpuDestroyMissingEngine(OBJGPU *, OBJENGSTATE *); +static void gpuRemoveMissingEngineClasses(OBJGPU *, NvU32); + +static NV_STATUS _gpuCreateEngineOrderList(OBJGPU *pGpu); +static void _gpuFreeEngineOrderList(OBJGPU *pGpu); + + +static void _gpuInitPciHandle(OBJGPU *pGpu); +static void _gpuInitPhysicalRmApi(OBJGPU *pGpu); +static NV_STATUS _gpuAllocateInternalObjects(OBJGPU *pGpu); +static void _gpuFreeInternalObjects(OBJGPU *pGpu); + +typedef struct +{ + NvS32 childOrderIndex; + NvS32 instanceID; + NvU32 flags; + NvBool bStarted; +} ENGLIST_ITER, *PENGLIST_ITER; + +static ENGLIST_ITER gpuGetEngineOrderListIter(OBJGPU *pGpu, NvU32 flags); +static NvBool gpuGetNextInEngineOrderList(OBJGPU *pGpu, ENGLIST_ITER *pIt, PENGDESCRIPTOR pEngDesc); + +static inline void _setPlatformNoHostbridgeDetect(NvBool bValue) +{ + OBJPFM *pPfm = SYS_GET_PFM(SYS_GET_INSTANCE()); + pPfm->setProperty(pPfm, PDB_PROP_PFM_NO_HOSTBRIDGE_DETECT, bValue); +} + +// Forward declare all the class definitions so that we don't need to pull in all the headers +#define GPU_CHILD(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + extern const struct NVOC_CLASS_DEF NV_CONCATENATE(__nvoc_class_def_, className); + +#include "gpu/gpu_child_list.h" + + +// Describes a child type (e.g.: classId(OBJCE)) +struct GPUCHILDTYPE +{ + NvBool bConstructEarly; // bConstructEarly objects are created in a separate step. FUSE must be created + // before BIF since we need to know the OPSB fuse value for enabling/disabling + // certain features in bifInitRegistryOverrides + NvBool bAlwaysCreate; + NvU32 instances; + NvU32 gpuChildPtrOffset; + const NVOC_CLASS_INFO *pClassInfo; // NULL if engine is disabled by chip-config +}; + +// List of all possible GPU offspring +static GPUCHILDTYPE gpuChildTypeList[] = +{ + #define GPU_CHILD(className, accessorName, numInstances, bConstructEarly, bAlwaysCreate, gpuField) \ + { bConstructEarly, bAlwaysCreate, numInstances, NV_OFFSETOF(OBJGPU, gpuField), classInfo(className) }, + + #include "gpu/gpu_child_list.h" +}; + +// Describes a child instance (e.g.: classId(OBJCE) instanceID #1) +struct GPUCHILDINFO +{ + NvBool bAlwaysCreate; + NvBool bConstructEarly; + ENGDESCRIPTOR engDesc; + NvU32 gpuChildPtrOffset; + const NVOC_CLASS_INFO *pClassInfo; + GPUCHILDTYPE *pChildType; +}; + +static PGPUCHILDTYPE gpuGetChildType(NVOC_CLASS_ID classId); +static NV_STATUS gpuGetChildInfo(NVOC_CLASS_ID classId, NvU32 instanceID, PGPUCHILDINFO pChildInfoOut); +static Dynamic **gpuGetChildPtr(OBJGPU *pGpu, NvU32 gpuChildPtrOffset); + +#define GPU_NUM_CHILD_TYPES \ + ((sizeof(gpuChildTypeList) / sizeof(GPUCHILDTYPE))) + +/*! + * GFID allocation state + */ +typedef enum _gfid_alloc_state +{ + GFID_FREE = 0, + GFID_ALLOCATED = 1 +} GFID_ALLOC_STATUS; + +// +// Generate a 32-bit id from domain, bus and device tuple. +// +// This is a one way function that is not guaranteed to generate a unique id for +// each domain, bus, device tuple as domain alone can be 32-bit. Historically, +// we have been assuming that the domain can only be 16-bit, but that has never +// been true on Linux and Hyper-V virtualization has exposed that by using +// arbitrary 32-bit domains for passthrough GPUs. This is the only known case +// today that requires immediate support. The domains on Hyper-V come from +// hashing some system and GPU information and are claimed to be unique even if +// we consider the lower 16-bits only. Hence, as a temporary solution, only the +// lower 16-bits are used and it's asserted that top 16-bits are only non-0 on +// Hyper-V. +// +// Long term the 32-bit ids should be changed to 64-bit or the generation scheme +// should be changed to guarantee uniqueness. Both of these are impactful as the +// biggest user of this is the commonly used 32-bit OBJGPU::gpuId. +// +NvU32 gpuGenerate32BitId(NvU32 domain, NvU8 bus, NvU8 device) +{ + NvU32 id = gpuEncodeBusDevice(bus, device); + + // Include only the lower 16-bits to match the old gpuId scheme + id |= (domain & 0xffff) << 16; + + return id; +} + +void gpuChangeComputeModeRefCount_IMPL(OBJGPU *pGpu, NvU32 command) +{ + switch(command) + { + case NV_GPU_COMPUTE_REFCOUNT_COMMAND_INCREMENT: + NV_ASSERT(pGpu->computeModeRefCount >= 0); + ++pGpu->computeModeRefCount; + + if (1 == pGpu->computeModeRefCount) + { + NV_PRINTF(LEVEL_INFO, "GPU (ID: 0x%x): new mode: COMPUTE\n", + pGpu->gpuId); + + timeoutInitializeGpuDefault(&pGpu->timeoutData, pGpu); + } + break; + + case NV_GPU_COMPUTE_REFCOUNT_COMMAND_DECREMENT: + --pGpu->computeModeRefCount; + NV_ASSERT(pGpu->computeModeRefCount >= 0); + + if (pGpu->computeModeRefCount < 0) + { + pGpu->computeModeRefCount = 0; + } + + if (0 == pGpu->computeModeRefCount) + { + NV_PRINTF(LEVEL_INFO, "GPU (ID: 0x%x): new mode: GRAPHICS\n", + pGpu->gpuId); + + timeoutInitializeGpuDefault(&pGpu->timeoutData, pGpu); + } + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Bad command: 0x%x\n", command); + NV_ASSERT(0); + break; + } +} + +// +// gpuPostConstruct +// +// Called by the gpu manager to finish OBJGPU construction phase. +// Tasks handled here include binding a HAL module to the gpu +// and the construction of engine object offspring. +// +NV_STATUS +gpuPostConstruct_IMPL +( + OBJGPU *pGpu, + GPUATTACHARG *pAttachArg +) +{ + NV_STATUS rmStatus; + + gpumgrAddDeviceInstanceToGpus(NVBIT(pGpu->gpuInstance)); + + rmStatus = regAccessConstruct(&pGpu->registerAccess, pGpu); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to construct IO Apertures for attached devices \n"); + return rmStatus; + } + + gpuInitChipInfo(pGpu); + + // + // gpuInitRegistryOverrides() must be called before the child engines + // are being created. This would ensure that the child engines + // pick up any overrides from the GPU. We need to keep this here since the + // gpuDetermineVirtualMode will be using these overrides + // + gpuInitRegistryOverrides_HAL(pGpu); + gpuInitInstLocOverrides_HAL(pGpu); + + NV_ASSERT_OK_OR_RETURN(gpuPrivSecInitRegistryOverrides(pGpu)); + + // + // gpuDetermineVirtualMode inits hPci but only for virtualization case. So if + // it does not init it, do here for using it for non-virtualization as well + // + if (pGpu->hPci == NULL) + { + // + // We don't check the return status. Even if PCI handle is not obtained + // it should not block rest of the gpu init sequence. + // + _gpuInitPciHandle(pGpu); + } + + // + // Initialize the base offset for the virtual registers for physical function + // or baremetal + // + pGpu->sriovState.virtualRegPhysOffset = gpuGetVirtRegPhysOffset_HAL(pGpu); + + // + // Check if FBHUB Poison interrupt is triggered before RM Init due + // to VBIOS IFR on GA100. If yes, clear the FBHUB Interrupt. This WAR is + // required for Bug 2924523 where VBIOS IFR causes FBHUB Poison intr. + // We need to clear this before RM Init begins, as an FBHUB Poison as part of + // RM Init is a valid interrupt + // + // Additional details which might be of interest exist in bug 200620015 + // comments 43-45 pertaining to the necessity of the WAR so close to the + // register write enablement. + // + gpuClearFbhubPoisonIntrForBug2924523_HAL(pGpu); + + // + // Initialize engine order before engine init/load/etc + // + rmStatus = _gpuCreateEngineOrderList(pGpu); + if ( rmStatus != NV_OK ) + return rmStatus; + + gpuBuildClassDB(pGpu); + + // The first time the emulation setting is checked is in timeoutInitializeGpuDefault. + pGpu->computeModeRefCount = 0; + pGpu->hComputeModeReservation = NV01_NULL_OBJECT; + + // Setting default timeout values + timeoutInitializeGpuDefault(&pGpu->timeoutData, pGpu); + + // Set 2 stage error recovery if Vista or Unix or GSP-RM. + if (!IsAMODEL(pGpu)) + { + pGpu->bTwoStageRcRecoveryEnabled = NV_TRUE; + } + + // create core objects (i.e. bif) + rmStatus = gpuCreateChildObjects(pGpu, /* bConstructEarly */ NV_TRUE); + if (rmStatus != NV_OK) + return rmStatus; + + gpuGetIdInfo_HAL(pGpu); + gpuUpdateIdInfo_HAL(pGpu); + + _gpuInitPhysicalRmApi(pGpu); + + // need to get illumination values after the GPU Id + // has been setup to allow for GPU specific settings + gpuDeterminePersistantIllumSettings(pGpu); + + // Construct and update the engine database + rmStatus = gpuConstructEngineTable(pGpu); + if (rmStatus != NV_OK) + return rmStatus; + rmStatus = gpuUpdateEngineTable(pGpu); + if (rmStatus != NV_OK) + return rmStatus; + + // create remaining gpu offspring + rmStatus = gpuCreateChildObjects(pGpu, /* bConstructEarly */ NV_FALSE); + if (rmStatus != NV_OK) + return rmStatus; + + gpuGetHwDefaults(pGpu); + + // Handle per-device core logic registry settings + OBJCL *pCl = SYS_GET_CL(SYS_GET_INSTANCE()); + if (pCl != NULL) + { + clInitPropertiesFromRegistry(pGpu, pCl); + } + + // Set any state overrides required for L2 cache only mode + if (gpuIsCacheOnlyModeEnabled(pGpu)) + { + gpuSetCacheOnlyModeOverrides_HAL(pGpu); + } + + // Register the OCA dump callback function. + gpuDumpCallbackRegister(pGpu); + + // Initialize reference count for external kernel clients + pGpu->externalKernelClientCount = 0; + + return NV_OK; +} + +NV_STATUS gpuConstruct_IMPL +( + OBJGPU *pGpu, + NvU32 gpuInstance +) +{ + + pGpu->gpuInstance = gpuInstance; + + // allocate OS-specific GPU extension area + osInitOSHwInfo(pGpu); + + return gpuConstructPhysical(pGpu); +} + +// NVOC-TODO : delete this after all Rmconfig modules migrated to NVOC +NV_STATUS +gpuBindHalLegacy_IMPL +( + OBJGPU *pGpu, + NvU32 chipId0, + NvU32 chipId1 +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJHALMGR *pHalMgr = SYS_GET_HALMGR(pSys); + NV_STATUS status; + + // chipId0 and chipId1 needs to be function parameter since GPU Reg read + // is not ready at this point. + pGpu->chipId0 = chipId0; + pGpu->chipId1 = chipId1; + + // + // The system object will pass PMC_BOOT_0 and PMC_BOOT_42 to all the HAL's and return the + // one that claims it supports this chip arch/implementation + // + status = halmgrGetHalForGpu(pHalMgr, pGpu->chipId0, pGpu->chipId1, &pGpu->halImpl); + if (status != NV_OK) + return status; + + pGpu->pHal = halmgrGetHal(pHalMgr, pGpu->halImpl); + + return status; +} + +static void +_gpuInitPciHandle +( + OBJGPU *pGpu +) +{ + NvU32 domain = gpuGetDomain(pGpu); + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU8 function = 0; + + pGpu->hPci = osPciInitHandle(domain, bus, device, function, NULL, NULL); +} + +static NV_STATUS _gpuRmApiControl +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize +) +{ + RmCtrlParams rmCtrlParams; + CALL_CONTEXT callCtx, *oldCtx = NULL; + RS_LOCK_INFO lockInfo = {0}; + NV_STATUS status = NV_OK; + + OBJGPU *pGpu = (OBJGPU*)pRmApi->pPrivateContext; + + // This API is only used to route locally on monolithic or UCODE + NV_ASSERT_OR_RETURN(!IS_GSP_CLIENT(pGpu), NV_ERR_INVALID_STATE); + + // + // The physical API can be used on any controls and any handles and it is + // expected to be routed correctly. However, if the caller is using the GPU + // internal handles, we can skip the resource server overhead and make a + // direct function call instead. + // + if (hClient == pGpu->hInternalClient && hObject == pGpu->hInternalSubdevice) + { + NV_ASSERT_OR_RETURN(pGpu->pCachedSubdevice && pGpu->pCachedRsClient, NV_ERR_INVALID_STATE); + + const struct NVOC_EXPORTED_METHOD_DEF *pEntry; + pEntry = objGetExportedMethodDef((void*)pGpu->pCachedSubdevice, cmd); + + NV_ASSERT_OR_RETURN(pEntry != NULL, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pEntry->paramSize == paramsSize, NV_ERR_INVALID_PARAM_STRUCT); + NV_PRINTF(LEVEL_INFO, "GPU Internal RM control 0x%08x on gpuInst:%x hClient:0x%08x hSubdevice:0x%08x\n", + cmd, pGpu->gpuInstance, hClient, hObject); + + portMemSet(&rmCtrlParams, 0, sizeof(rmCtrlParams)); + rmCtrlParams.hClient = hClient; + rmCtrlParams.hObject = hObject; + rmCtrlParams.pGpu = pGpu; + rmCtrlParams.cmd = cmd; + rmCtrlParams.flags = NVOS54_FLAGS_LOCK_BYPASS; + rmCtrlParams.pParams = pParams; + rmCtrlParams.paramsSize = paramsSize; + rmCtrlParams.secInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + rmCtrlParams.secInfo.paramLocation = PARAM_LOCATION_KERNEL; + rmCtrlParams.bInternal = NV_TRUE; + + lockInfo.flags = RM_LOCK_FLAGS_NO_GPUS_LOCK | RM_LOCK_FLAGS_NO_CLIENT_LOCK; + rmCtrlParams.pLockInfo = &lockInfo; + + portMemSet(&callCtx, 0, sizeof(callCtx)); + callCtx.pResourceRef = RES_GET_REF(pGpu->pCachedSubdevice); + callCtx.pClient = pGpu->pCachedRsClient; + callCtx.secInfo = rmCtrlParams.secInfo; + callCtx.pServer = &g_resServ; + callCtx.pControlParams = &rmCtrlParams; + callCtx.pLockInfo = rmCtrlParams.pLockInfo; + + resservSwapTlsCallContext(&oldCtx, &callCtx); + + if (pEntry->paramSize == 0) + { + status = ((NV_STATUS(*)(void*))pEntry->pFunc)(pGpu->pCachedSubdevice); + } + else + { + status = ((NV_STATUS(*)(void*,void*))pEntry->pFunc)(pGpu->pCachedSubdevice, pParams); + } + + resservRestoreTlsCallContext(oldCtx); + } + else + { + RM_API *pInternalRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_ASSERT_OR_RETURN(rmDeviceGpuLockIsOwner(pGpu->gpuInstance), NV_ERR_INVALID_LOCK_STATE); + + status = pInternalRmApi->Control(pInternalRmApi, hClient, hObject, cmd, pParams, paramsSize); + } + + return status; +} + +static NV_STATUS _gpuRmApiAllocWithHandle +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams +) +{ + // Simple forwarder for now + RM_API *pInternalRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + return pInternalRmApi->AllocWithHandle(pInternalRmApi, hClient, hParent, hObject, hClass, pAllocParams); +} +static NV_STATUS _gpuRmApiFree +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject +) +{ + // Simple forwarder for now + RM_API *pInternalRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + return pInternalRmApi->Free(pInternalRmApi, hClient, hObject); +} + +static void +_gpuInitPhysicalRmApi +( + OBJGPU *pGpu +) +{ + // Populate all unused APIs with stubs + pGpu->physicalRmApi = *rmapiGetInterface(RMAPI_STUBS); + pGpu->physicalRmApi.pPrivateContext = pGpu; + + portMemSet(&pGpu->physicalRmApi.defaultSecInfo, 0, + sizeof(pGpu->physicalRmApi.defaultSecInfo)); + pGpu->physicalRmApi.defaultSecInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + pGpu->physicalRmApi.defaultSecInfo.paramLocation = PARAM_LOCATION_KERNEL; + pGpu->physicalRmApi.bHasDefaultSecInfo = NV_TRUE; + pGpu->physicalRmApi.bTlsInternal = NV_TRUE; + pGpu->physicalRmApi.bApiLockInternal = NV_TRUE; + pGpu->physicalRmApi.bRmSemaInternal = NV_TRUE; + pGpu->physicalRmApi.bGpuLockInternal = NV_TRUE; + + // Only initialize the methods that exist on GSP/DCE as well + pGpu->physicalRmApi.Control = _gpuRmApiControl; + pGpu->physicalRmApi.AllocWithHandle = _gpuRmApiAllocWithHandle; + pGpu->physicalRmApi.Free = _gpuRmApiFree; +} + +static NV_STATUS +_gpuInitChipInfo +( + OBJGPU *pGpu +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + const NvU32 paramSize = sizeof(NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS); + NV_STATUS status; + + pGpu->pChipInfo = portMemAllocNonPaged(paramSize); + NV_ASSERT_OR_RETURN(pGpu->pChipInfo != NULL, NV_ERR_NO_MEMORY); + + portMemSet(pGpu->pChipInfo, 0, paramSize); + + NV_ASSERT_OK_OR_GOTO(status, pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GPU_GET_CHIP_INFO, + pGpu->pChipInfo, paramSize), done); + + pGpu->chipInfo.subRevision = pGpu->pChipInfo->chipSubRev; + pGpu->idInfo.PCIDeviceID = pGpu->pChipInfo->pciDeviceId; + pGpu->idInfo.PCISubDeviceID = pGpu->pChipInfo->pciSubDeviceId; + pGpu->idInfo.PCIRevisionID = pGpu->pChipInfo->pciRevisionId; + +done: + if (status != NV_OK) + { + portMemFree(pGpu->pChipInfo); + pGpu->pChipInfo = NULL; + } + + return status; +} + +static NV_STATUS +gpuInitVmmuInfo +( + OBJGPU *pGpu +) +{ + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_GPU_GET_VMMU_SEGMENT_SIZE_PARAMS params; + + pGpu->vmmuSegmentSize = 0; + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_VMMU_SEGMENT_SIZE, + ¶ms, sizeof(params)); + + if (status == NV_ERR_NOT_SUPPORTED) + { + // Leave segment size initialized to zero to signal no VMMU present on physical + return NV_OK; + } + else if (status != NV_OK) + { + return status; + } + + pGpu->vmmuSegmentSize = params.vmmuSegmentSize; + + return status; +} + +static NV_STATUS _gpuAllocateInternalObjects +( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + + if (IS_GSP_CLIENT(pGpu)) + { + if (IsT234D(pGpu)) + { + // + // NOTE: We add +1 to the client base because DCE-RM will also + // allocate internal objects, taking the !IS_GSP_CLIENT path below. + // + pGpu->hInternalClient = RS_CLIENT_INTERNAL_HANDLE_BASE + 1; + pGpu->hInternalDevice = NV_GPU_INTERNAL_DEVICE_HANDLE; + pGpu->hInternalSubdevice = NV_GPU_INTERNAL_SUBDEVICE_HANDLE; + } + else + { + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + NV_ASSERT_OR_RETURN(pGSCI != NULL, NV_ERR_INVALID_STATE); + + pGpu->hInternalClient = pGSCI->hInternalClient; + pGpu->hInternalDevice = pGSCI->hInternalDevice; + pGpu->hInternalSubdevice = pGSCI->hInternalSubdevice; + } + } + else + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_ASSERT_OK_OR_RETURN(rmapiutilAllocClientAndDeviceHandles( + pRmApi, pGpu, &pGpu->hInternalClient, &pGpu->hInternalDevice, &pGpu->hInternalSubdevice)); + + NV_ASSERT_OK_OR_GOTO(status, serverGetClientUnderLock(&g_resServ, pGpu->hInternalClient, + &pGpu->pCachedRsClient), done); + NV_ASSERT_OK_OR_GOTO(status, subdeviceGetByGpu(pGpu->pCachedRsClient, pGpu, + &pGpu->pCachedSubdevice), done); + } + +done: + if (status != NV_OK) + { + _gpuFreeInternalObjects(pGpu); + } + + return status; +} + +static void _gpuFreeInternalObjects +( + OBJGPU *pGpu +) +{ + if (!IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + rmapiutilFreeClientAndDeviceHandles(pRmApi, + &pGpu->hInternalClient, &pGpu->hInternalDevice, &pGpu->hInternalSubdevice); + } +} + +static NV_STATUS +_gpuCreateEngineOrderList +( + OBJGPU *pGpu +) +{ + NvU32 i; + NvU32 numClassDesc; + NvU32 numLists; + NV_STATUS status = NV_OK; + PGPU_ENGINE_ORDER pEngineOrder = &pGpu->engineOrder; + NvU32 numEngineDesc, curEngineDesc; + NvU32 listTypes[] = {GCO_LIST_INIT, GCO_LIST_LOAD, GCO_LIST_UNLOAD, GCO_LIST_DESTROY}; + PENGDESCRIPTOR *ppEngDescriptors[4]; + ENGLIST_ITER it; + ENGDESCRIPTOR engDesc; + + ct_assert(NV_ARRAY_ELEMENTS32(ppEngDescriptors) == NV_ARRAY_ELEMENTS32(listTypes)); + +#define GPU_CHILD(a, b, numInstances, c, d, e) +numInstances + + struct ChildList { + char children[ 0 + + #include "gpu/gpu_child_list.h" + ]; + }; + + // + // The maximum number of engines known to RM controls + // must be at least the number of actual OBJGPU children. + // + ct_assert(NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS >= + sizeof(((struct ChildList*)(NULL))->children) /* sizeof(ChildList::children) */); + + numLists = NV_ARRAY_ELEMENTS32(listTypes); + + ppEngDescriptors[0] = &pEngineOrder->pEngineInitDescriptors; + ppEngDescriptors[1] = &pEngineOrder->pEngineLoadDescriptors; + ppEngDescriptors[2] = &pEngineOrder->pEngineUnloadDescriptors; + ppEngDescriptors[3] = &pEngineOrder->pEngineDestroyDescriptors; + + // + // Find the size of the engine descriptor list. The sizes of all lists + // are checked for consistency to catch mistakes. + // + // The list is copied into OBJGPU storage as it's modified during + // dynamic engine removal (e.g.: gpuMissingEngDescriptor). + // + numEngineDesc = 0; + + for (i = 0; i < numLists; i++) + { + curEngineDesc = 0; + + it = gpuGetEngineOrderListIter(pGpu, listTypes[i]); + + while (gpuGetNextInEngineOrderList(pGpu, &it, &engDesc)) + { + curEngineDesc++; + } + + if ((numEngineDesc != 0) && (numEngineDesc != curEngineDesc)) + { + NV_PRINTF(LEVEL_ERROR, + "Sizes of all engine order lists do not match!\n"); + NV_ASSERT(0); + status = NV_ERR_INVALID_STATE; + goto done; + } + + numEngineDesc = curEngineDesc; + } + + pEngineOrder->numEngineDescriptors = numEngineDesc; + + + for (i = 0; i < numLists; i++) + { + curEngineDesc = 0; + + *ppEngDescriptors[i] = portMemAllocNonPaged(sizeof(ENGDESCRIPTOR) * numEngineDesc); + if ( NULL == *ppEngDescriptors[i]) + { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto done; + } + + it = gpuGetEngineOrderListIter(pGpu, listTypes[i]); + + while (gpuGetNextInEngineOrderList(pGpu, &it, &engDesc)) + { + (*ppEngDescriptors[i])[curEngineDesc] = engDesc; + curEngineDesc++; + } + } + + pEngineOrder->pClassDescriptors = gpuGetClassDescriptorList_HAL(pGpu, &numClassDesc); + pEngineOrder->numClassDescriptors = numClassDesc; + + return NV_OK; + +done: + portMemFree(pEngineOrder->pEngineInitDescriptors); + pEngineOrder->pEngineInitDescriptors = NULL; + + portMemFree(pEngineOrder->pEngineDestroyDescriptors); + pEngineOrder->pEngineDestroyDescriptors = NULL; + + portMemFree(pEngineOrder->pEngineLoadDescriptors); + pEngineOrder->pEngineLoadDescriptors = NULL; + + portMemFree(pEngineOrder->pEngineUnloadDescriptors); + pEngineOrder->pEngineUnloadDescriptors = NULL; + + return status; +} + +static void +_gpuFreeEngineOrderList +( + OBJGPU *pGpu +) +{ + PGPU_ENGINE_ORDER pEngineOrder = &pGpu->engineOrder; + + if (!pEngineOrder->pEngineInitDescriptors) + return; + + portMemFree(pEngineOrder->pEngineInitDescriptors); + portMemFree(pEngineOrder->pEngineDestroyDescriptors); + portMemFree(pEngineOrder->pEngineLoadDescriptors); + portMemFree(pEngineOrder->pEngineUnloadDescriptors); + + pEngineOrder->pEngineInitDescriptors = NULL; + pEngineOrder->pEngineDestroyDescriptors = NULL; + pEngineOrder->pEngineLoadDescriptors = NULL; + pEngineOrder->pEngineUnloadDescriptors = NULL; + pEngineOrder->pClassDescriptors = NULL; +} + +/*! + * @brief Returns a pointer to the GPU's pointer to a child specified by its childInfo + * + * @param[in] pGpu OBJPGU pointer + * @param[in] pChildInfo Pointer to table entry + */ +static Dynamic** +gpuGetChildPtr(OBJGPU *pGpu, NvU32 gpuChildPtrOffset) +{ + return (Dynamic**)((NvU8*)pGpu + gpuChildPtrOffset); +} + +/*! + * @brief Looks up for an instance of engine + * + * @param[in] classId + * @param[in] instanceID + * @param[out] pChildInfoOut + */ +static NV_STATUS +gpuGetChildInfo(NVOC_CLASS_ID classId, NvU32 instanceID, PGPUCHILDINFO pChildInfoOut) +{ + PGPUCHILDTYPE pChildType; + + NV_ASSERT_OR_RETURN(pChildInfoOut, NV_ERR_INVALID_STATE); + + pChildType = gpuGetChildType(classId); + + NV_ASSERT_OR_RETURN(pChildType && (instanceID < pChildType->instances), NV_ERR_INVALID_OBJECT); + + pChildInfoOut->engDesc = MKENGDESC(classId, instanceID); + pChildInfoOut->bAlwaysCreate = pChildType->bAlwaysCreate; + pChildInfoOut->bConstructEarly = pChildType->bConstructEarly; + pChildInfoOut->pClassInfo = pChildType->pClassInfo; + pChildInfoOut->pChildType = pChildType; + + pChildInfoOut->gpuChildPtrOffset = pChildType->gpuChildPtrOffset + sizeof(void *) * instanceID; + + return NV_OK; +} + +/*! + * @brief Looks up for a class in the table based on class id + * + * All classes are uniquely identified by their classId. + * + * @param[in] classId NVOC_CLASS_ID + */ +static PGPUCHILDTYPE +gpuGetChildType(NVOC_CLASS_ID classId) +{ + NvU32 i; + + for (i = 0; i < GPU_NUM_CHILD_TYPES; i++) + { + if (gpuChildTypeList[i].pClassInfo && gpuChildTypeList[i].pClassInfo->classId == classId) + { + return &gpuChildTypeList[i]; + } + } + + return NULL; +} + +/** + * @brief Initializes iterator for all possible ENGDESCRIPTORs that could be GPU + * children. + * + * @return GPU_CHILD_ITER + */ +GPU_CHILD_ITER +gpuGetPossibleEngDescriptorIter(void) +{ + GPU_CHILD_ITER it = {0}; + return it; +} + +/** + * @brief Iterator over all possible ENGDESCRIPTORs that could be GPU children. + * + * @param[in,out] pIt Iterator + * @param[out] pEngDesc The next engine descriptor + * + * @return NV_TRUE if *pEngDesc is valid, NV_FALSE if there are no more engines + */ +NvBool +gpuGetNextPossibleEngDescriptor(GPU_CHILD_ITER *pIt, ENGDESCRIPTOR *pEngDesc) +{ + PGPUCHILDTYPE pChildType; + GPUCHILDINFO childInfo; + + if (pIt->childTypeIdx >= GPU_NUM_CHILD_TYPES) + return NV_FALSE; + + pChildType = &gpuChildTypeList[pIt->childTypeIdx]; + + // Advance instance # + if (pIt->childInst < pChildType->instances && pChildType->pClassInfo) + { + NV_STATUS status = gpuGetChildInfo(pChildType->pClassInfo->classId, pIt->childInst, &childInfo); + + NV_ASSERT(status == NV_OK); + + pIt->gpuChildPtrOffset = childInfo.gpuChildPtrOffset; + pIt->childInst++; + + *pEngDesc = childInfo.engDesc; + return NV_TRUE; + } + + pIt->childTypeIdx++; + pIt->childInst = 0; + + // Recurse (max depth is 1) + return gpuGetNextPossibleEngDescriptor(pIt, pEngDesc); +} + +/*! + * @brief Returns the unshared engstate for the child object with the given engine + * descriptor (i.e.: the ENGSTATE without any of the SLI sharing hacks). + * + * All engines are uniquely identified by their engine descriptor. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc ENGDESCRIPTOR + */ +POBJENGSTATE +gpuGetEngstateNoShare_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + ENGSTATE_ITER it = gpuGetEngstateIter(pGpu); + OBJENGSTATE *pEngstate; + + while (gpuGetNextEngstate(pGpu, &it, &pEngstate)) + { + if (engstateGetDescriptor(pEngstate) == engDesc) + { + return pEngstate; + } + } + + return NULL; +} + +/*! + * @brief Returns the engstate for the child object with the given engine descriptor + * + * All engines are uniquely identified by their engine descriptor. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc ENGDESCRIPTOR + */ +POBJENGSTATE +gpuGetEngstate_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + if (ENGDESC_FIELD(engDesc, _CLASS) == classId(KernelFifo)) + { + return staticCast(gpuGetKernelFifoShared(pGpu), OBJENGSTATE); + } + + // Everything else is unshared + return gpuGetEngstateNoShare(pGpu, engDesc); +} + + +/*! + * @brief Gets the shared, aka the dominate KernelFifo, for a linked group of GPUs. + * + * Much of the SW state for KernelFifo is stored in the pKernelFifo hanging off + * the parent GPU when in SLI. + * + * @param[in] pGpu OBJGPU pointer + */ +KernelFifo* +gpuGetKernelFifoShared_IMPL(OBJGPU *pGpu) +{ + // If SLI is not active use parent GPU + pGpu = (!gpumgrIsParentGPU(pGpu) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_SLI_LINK_ACTIVE)) ? + gpumgrGetParentGPU(pGpu) : pGpu; + + return pGpu->pKernelFifo; +} + +/*! + * @brief Iterates over pGpu's child engstates. Returns NV_FALSE when there are + * no more. + * + * @param[in] pGpu OBJGPU pointer + * @param[in,out] pIt Iterator + * @param[out] ppEngState The next engstate + * + * @return NV_TRUE if ppEngstate is valid, NV_FALSE if no more found + */ +NvBool +gpuGetNextEngstate_IMPL(OBJGPU *pGpu, ENGSTATE_ITER *pIt, OBJENGSTATE **ppEngstate) +{ + ENGDESCRIPTOR engDesc; + OBJENGSTATE *pEngstate; + Dynamic **ppChild; + + while (gpuGetNextPossibleEngDescriptor(pIt, &engDesc)) + { + ppChild = gpuGetChildPtr(pGpu, pIt->gpuChildPtrOffset); + if (*ppChild != NULL) + { + pEngstate = dynamicCast(*ppChild, OBJENGSTATE); + if (pEngstate != NULL) + { + *ppEngstate = pEngstate; + return NV_TRUE; + } + } + } + + return NV_FALSE; +} + +/*! + * @brief Iterates over pGpu's child engstates that implement INTRABLE. + * Returns NV_FALSE when there are no more. + * + * @param[in] pGpu OBJGPU pointer + * @param[in,out] pIt Iterator + * @param[out] ppPmuclient The next PMU client + * + * @return NV_TRUE if ppPmuclient is valid, NV_FALSE if no more found + */ +NvBool +gpuGetNextStaticIntrable_IMPL(OBJGPU *pGpu, GPU_CHILD_ITER *pIt, OBJINTRABLE **ppIntrable) +{ + ENGDESCRIPTOR engDesc; + OBJHOSTENG *pHostEng; + OBJINTRABLE *pIntrable; + Dynamic **ppChild; + + while (gpuGetNextPossibleEngDescriptor(pIt, &engDesc)) + { + ppChild = gpuGetChildPtr(pGpu, pIt->gpuChildPtrOffset); + if (*ppChild != NULL) + { + pHostEng = dynamicCast(*ppChild, OBJHOSTENG); + + // Exclude host engines for now, as we only want static units + if (pHostEng == NULL) + { + pIntrable = dynamicCast(*ppChild, OBJINTRABLE); + if (pIntrable != NULL) + { + *ppIntrable = pIntrable; + return NV_TRUE; + } + } + } + } + + return NV_FALSE; +} + +/*! + * @brief Returns the hosteng for the child object with the given engine descriptor + * + * All engines are uniquely identified by their engine descriptor. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc ENGDESCRIPTOR + */ +POBJHOSTENG +gpuGetHosteng_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, engDesc); + OBJHOSTENG *pHosteng; + + if (pEngstate) + pHosteng = dynamicCast(pEngstate, OBJHOSTENG); + else + { + NV_PRINTF(LEVEL_ERROR, "Failed to get hosteng.\n"); + return NULL; + } + + return pHosteng; +} + +/*! + * @brief The generic object constructor + * + * @param[in] pGpu POBJGPU + * @param[in] classId NVOC_CLASS_ID + * @param[in] instanceID NvU32 + * + */ +NV_STATUS +gpuCreateObject_IMPL +( + OBJGPU *pGpu, + NVOC_CLASS_ID classId, + NvU32 instanceID +) +{ + NV_STATUS status; + OBJENGSTATE *pEngstate; + GPUCHILDINFO childInfo; + Dynamic **ppChildPtr; + ENGSTATE_TRANSITION_DATA engTransitionData; + + status = gpuGetChildInfo(classId, instanceID, &childInfo); + + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + ppChildPtr = gpuGetChildPtr(pGpu, childInfo.gpuChildPtrOffset); + + // Ask the object database utility to create a child object. + status = objCreateDynamic(ppChildPtr, pGpu, childInfo.pClassInfo); + + if (status != NV_OK) + { + return status; + } + NV_ASSERT_OR_RETURN(*ppChildPtr, NV_ERR_INVALID_STATE); + + pEngstate = dynamicCast(*ppChildPtr, OBJENGSTATE); + + if (pEngstate == NULL) + { + status = NV_ERR_INVALID_STATE; + goto gpuCreateObject_exit; + } + + status = engstateConstructBase(pEngstate, pGpu, childInfo.engDesc); + NV_ASSERT_OR_GOTO(status == NV_OK, gpuCreateObject_exit); + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_CONSTRUCT, &engTransitionData); + status = engstateConstructEngine(pGpu, pEngstate, childInfo.engDesc); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_CONSTRUCT, &engTransitionData); + + // If engine is missing, free it immediately + if (pEngstate->getProperty(pEngstate, PDB_PROP_ENGSTATE_IS_MISSING)) + { + status = NV_ERR_NOT_SUPPORTED; + } + +gpuCreateObject_exit: + if (status != NV_OK) + { + objDelete(*ppChildPtr); + *ppChildPtr = NULL; + } + + return status; +} + + +void +gpuDestruct_IMPL +( + OBJGPU *pGpu +) +{ + HWBC_LIST *pGpuHWBCList = NULL; + int typeNum; + int instNum; + GPUCHILDTYPE *pChildTypeCur; + GPUCHILDINFO childInfoCur; + Dynamic **pChildPtr; + + // Call gpuacctDisableAccounting if accounting is enabled since it does some memory deallocation + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON)) + { + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(SYS_GET_INSTANCE()); + NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS params; + NV_STATUS status; + + /* + * On VGX host, users are not allowed to disable accounting. But we still + * need to do that while cleaning up (destroy timer part of this cleanup) + * in gpuDestruct_IMPL() path. If PDB_PROP_GPU_ACCOUNTING_ON is NV_TRUE and + * we call gpuacctDisableAccounting_IMPL() in gpuDestruct_IMPL() path, + * it throws not supported error. To bypass the not supported case in + * the gpuacctDisableAccounting_IMPL(), we are setting + * PDB_PROP_GPU_ACCOUNTING_ON to NV_FALSE here in gpuDestruct_IMPL(), so + * that execution goes forward in gpuacctDisableAccounting_IMPL() and + * timer gets destroyed properly. + */ + pGpu->setProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON, NV_FALSE); + + params.gpuId = pGpu->gpuId; + params.pid = 0; + params.newState = NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED; + + status = gpuacctDisableAccounting(pGpuAcct, pGpu->gpuInstance, ¶ms); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "gpuacctDisableAccounting failed with error %d on GPU ID %d\n", + status, pGpu->gpuId); + } + } + + // Free children in reverse order from construction + for (typeNum = GPU_NUM_CHILD_TYPES - 1; typeNum >= 0; typeNum--) + { + pChildTypeCur = &gpuChildTypeList[typeNum]; + + if (!pChildTypeCur->pClassInfo) + { + continue; + } + + for (instNum = pChildTypeCur->instances - 1; instNum >= 0; instNum--) + { + NV_STATUS status; + + status = gpuGetChildInfo(pChildTypeCur->pClassInfo->classId, instNum, &childInfoCur); + + NV_ASSERT(status == NV_OK); + + pChildPtr = gpuGetChildPtr(pGpu, childInfoCur.gpuChildPtrOffset); + + if (*pChildPtr) + { + objDelete(*pChildPtr); + *pChildPtr = NULL; + } + } + } + + // + // If device instance is unassigned, we haven't initialized far enough to + // do any accounting with it + // + if (gpuGetDeviceInstance(pGpu) != NV_MAX_DEVICES) + { + rmapiReportLeakedDevices(gpuGetGpuMask(pGpu)); + } + + _gpuFreeEngineOrderList(pGpu); + + portMemFree(pGpu->pUserRegisterAccessMap); + pGpu->pUserRegisterAccessMap = NULL; + + portMemFree(pGpu->pUnrestrictedRegisterAccessMap); + pGpu->pUnrestrictedRegisterAccessMap = NULL; + + portMemFree(pGpu->pDeviceInfoTable); + pGpu->pDeviceInfoTable = NULL; + pGpu->numDeviceInfoEntries = 0; + + pGpu->userRegisterAccessMapSize = 0; + + gpuDestroyEngineTable(pGpu); + gpuDestroyClassDB(pGpu); + osDestroyOSHwInfo(pGpu); + + while(pGpu->pHWBCList) + { + pGpuHWBCList = pGpu->pHWBCList; + pGpu->pHWBCList = pGpuHWBCList->pNext; + portMemFree(pGpuHWBCList); + } + + // + // Destroy and free the RegisterAccess object linked to this GPU + // This should be moved out to gpu_mgr in the future to line up with + // the construction, but currently depends on pGpu still existing + // + regAccessDestruct(&pGpu->registerAccess); + + NV_ASSERT(pGpu->numConstructedFalcons == 0); + + portMemFree(pGpu->pRegopOffsetScratchBuffer); + pGpu->pRegopOffsetScratchBuffer = NULL; + + portMemFree(pGpu->pRegopOffsetAddrScratchBuffer); + pGpu->pRegopOffsetAddrScratchBuffer = NULL; + + pGpu->regopScratchBufferMaxOffsets = 0; + + gpuDestructPhysical(pGpu); +} + +static NV_STATUS +gpuCreateChildObjects +( + OBJGPU *pGpu, + NvBool bConstructEarly +) +{ + PENGDESCRIPTOR pEngDescriptors; + NvU32 numEngDescriptors; + PGPUCHILDTYPE pChildTypeCur; + GPUCHILDINFO childInfoCur; + NvU32 t, i; + NV_STATUS rmStatus = NV_OK; + + pEngDescriptors = gpuGetInitEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + for (t = 0; t < GPU_NUM_CHILD_TYPES; t++) + { + pChildTypeCur = &gpuChildTypeList[t]; + + if (!pChildTypeCur->pClassInfo) + { + continue; + } + + for (i = 0; i < pChildTypeCur->instances; i++) + { + NVOC_CLASS_ID classId = pChildTypeCur->pClassInfo->classId; + + rmStatus = gpuGetChildInfo(classId, i, &childInfoCur); + + NV_ASSERT(rmStatus == NV_OK); + + if ((bConstructEarly == childInfoCur.bConstructEarly) && + gpuShouldCreateObject(&childInfoCur, + pEngDescriptors, + numEngDescriptors)) + { + rmStatus = gpuCreateObject(pGpu, classId, i); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + { + rmStatus = NV_OK; + } + else if (rmStatus != NV_OK) + { + return rmStatus; + } + } + } + + // Bail out of both loops. + if (rmStatus != NV_OK) + { + break; + } + } + + return rmStatus; +} + +static NvBool +gpuShouldCreateObject +( + PGPUCHILDINFO pChildInfo, + PENGDESCRIPTOR pEngDescriptors, + NvU32 numEngDescriptors +) +{ + NvBool retVal = NV_FALSE; + NvU32 curEngDescIdx; + + if (pChildInfo->bAlwaysCreate) + { + // For now all SW engines get created + retVal = NV_TRUE; + } + else + { + // Let the HAL confirm that we should create an object for this engine. + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + if (pChildInfo->engDesc == pEngDescriptors[curEngDescIdx]) + { + retVal = NV_TRUE; + break; + } + } + } + + return retVal; +} + +NvU32 +gpuGetGpuMask_IMPL +( + OBJGPU *pGpu +) +{ + if (IsSLIEnabled(pGpu)) + { + return 1 << (gpumgrGetSubDeviceInstanceFromGpu(pGpu)); + } + else + { + return 1 << (pGpu->gpuInstance); + } +} + +static NV_STATUS gspSupportsEngine(OBJGPU *pGpu, ENGDESCRIPTOR engdesc, NvBool *supports) +{ + if (!IS_GSP_CLIENT(pGpu)) + return NV_WARN_NOTHING_TO_DO; + + NvU32 clientEngineId = 0; + + if (gpuXlateEngDescToClientEngineId(pGpu, engdesc, &clientEngineId) != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Failed to xlate engdesc 0x%x\n", engdesc); + return NV_WARN_NOTHING_TO_DO; + } + + if (pGpu->gspSupportedEngines == NULL) + { + pGpu->gspSupportedEngines = portMemAllocNonPaged(sizeof(*pGpu->gspSupportedEngines)); + NV_ASSERT_OR_RETURN(pGpu->gspSupportedEngines != NULL, NV_ERR_NO_MEMORY); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV_STATUS status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_ENGINES_V2, + pGpu->gspSupportedEngines, + sizeof(*pGpu->gspSupportedEngines)); + + if (status != NV_OK) + { + portMemFree(pGpu->gspSupportedEngines); + return status; + } + } + + NvU32 i; + for (i = 0; i < pGpu->gspSupportedEngines->engineCount; i++) + { + if (pGpu->gspSupportedEngines->engineList[i] == clientEngineId) + { + *supports = NV_TRUE; + return NV_OK; + } + } + + *supports = NV_FALSE; + return NV_OK; +} + +/* + * The engine removal protocol is as follows: + * - engines returning an error code from ConstructEngine will be immediately + * removed (this happens in gpuCreateObject) + * - engines may set ENGSTATE_IS_MISSING at any time before gpuStatePreInit + * - engines with ENGSTATE_IS_MISSING set at gpuStatePreInit will be removed + * - engines that return NV_FALSE from engstateIsPresent at gpuStatePreInit + * will be removed + * + * gpuRemoveMissingEngines takes place before the main loop in gpuStatePreInit + * and is responsible for removing engines satisfying the last two bullets + * above. + */ +static NV_STATUS +gpuRemoveMissingEngines +( + OBJGPU *pGpu +) +{ + NvU32 curEngDescIdx; + PENGDESCRIPTOR engDescriptorList = gpuGetInitEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NV_STATUS rmStatus = NV_OK; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + OBJENGSTATE *pEngstate; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + NVOC_CLASS_ID curClassId = ENGDESC_FIELD(curEngDescriptor, _CLASS); + + if (curClassId == classId(OBJINVALID)) + { + continue; + } + + pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate != NULL) + { + if (!pEngstate->getProperty(pEngstate, PDB_PROP_ENGSTATE_IS_MISSING) && + engstateIsPresent(pGpu, pEngstate)) + { + continue; + } + + gpuDestroyMissingEngine(pGpu, pEngstate); + pEngstate = NULL; + } + + // + // pEngstate is NULL or missing, so we must be sure to unregister + // all associated API classes and remove the stale engine descriptors + // from the GPU HAL engine lists. + // + NV_PRINTF(LEVEL_INFO, "engine %d:%d is missing, removing\n", + ENGDESC_FIELD(curEngDescriptor, _CLASS), + ENGDESC_FIELD(curEngDescriptor, _INST)); + + rmStatus = gpuDeleteEngineOnPreInit(pGpu, curEngDescriptor); + NV_ASSERT(rmStatus == NV_OK || !"Error while trying to remove missing engine"); + } + + return rmStatus; +} + +/* + * Removing classes from classDB of a missing engine + */ +static void +gpuRemoveMissingEngineClasses +( + OBJGPU *pGpu, + NvU32 missingEngDescriptor +) +{ + NvU32 numClasses, i; + NvU32 *pClassList = NULL; + if (gpuGetClassList(pGpu, &numClasses, NULL, missingEngDescriptor) == NV_OK) + { + pClassList = portMemAllocNonPaged(sizeof(NvU32) * numClasses); + if (NV_OK == gpuGetClassList(pGpu, &numClasses, pClassList, missingEngDescriptor)) + { + for (i = 0; i < numClasses; i++) + { + gpuDeleteClassFromClassDBByClassId(pGpu, pClassList[i]); + } + } + portMemFree(pClassList); + pClassList = NULL; + } +} + +/* + * Destroy and unregister engine object of a missing engine + */ +static void +gpuDestroyMissingEngine +( + OBJGPU *pGpu, + OBJENGSTATE *pEngstate +) +{ + GPUCHILDINFO childInfo; + Dynamic **pChildPtr; + NV_STATUS status; + ENGDESCRIPTOR engDesc; + + engstateInitMissing(pGpu, pEngstate); + + engDesc = engstateGetDescriptor(pEngstate); + + status = gpuGetChildInfo(ENGDESC_FIELD(engDesc, _CLASS), ENGDESC_FIELD(engDesc, _INST), &childInfo); + + NV_ASSERT_OR_RETURN_VOID(status == NV_OK); + + pChildPtr = gpuGetChildPtr(pGpu, childInfo.gpuChildPtrOffset); + + objDelete(*pChildPtr); + *pChildPtr = NULL; +} + +/* + * @brief Find if given engine descriptor is supported by GPU + * + * @param[in] pGpu OBJGPU pointer + * @param[in] descriptor engine descriptor to search for + * + * @returns NV_TRUE if given engine descriptor was found in a + * given engine descriptor list, NV_FALSE otherwise. + * + */ +NvBool +gpuIsEngDescSupported_IMPL +( + OBJGPU *pGpu, + NvU32 descriptor +) +{ + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + PENGDESCRIPTOR pEngDescriptor = gpuGetInitEngineDescriptors(pGpu); + NvU32 counter = 0; + NvBool engDescriptorFound = NV_FALSE; + + for (counter = 0; counter < numEngDescriptors; counter++) + { + if (pEngDescriptor[counter] == descriptor) + { + engDescriptorFound = NV_TRUE; + break; + } + } + + return engDescriptorFound; +} +/*! + * @brief Mark given Engine Descriptor with ENG_INVALID engine descriptor. + * + * Note: It is legal to have more than one entry with equal Descriptor + * in the Engine Descriptor list. + * + * @param[in] pEngDescriptor Pointer to array of engine descriptors + * @param[in] maxDescriptors Size of engine descriptor array + * @param[in] descriptor Engine descriptor to be changed to ENG_INVALID engine descriptor + * + * @returns void + */ +static void +gpuMissingEngDescriptor(PENGDESCRIPTOR pEngDescriptor, NvU32 maxDescriptors, + ENGDESCRIPTOR descriptor) +{ + NvU32 counter; + + for (counter = 0; counter < maxDescriptors; counter++) + { + if (pEngDescriptor[counter] == descriptor) + { + pEngDescriptor[counter] = ENG_INVALID; + } + } +} + + +/*! + * @brief Delete an engine from class DB. + * + * WARNING! Function doesn't remove INIT/DESTROY engines from HAL lists. + * gpuInitEng and gpuDestroyEng won't be no-ops for relevant engine. + * + * Use case: + * If an engine needs to be removed, but StateInit/Destroy are required. + * It's better to use gpuDeleteEngineOnPreInit instead. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID to search and remove + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteEngineFromClassDB_IMPL(OBJGPU *pGpu, NvU32 engDesc) +{ + PENGDESCRIPTOR pEngDesc = NULL; + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 engDescriptor = engDesc; + + // remove Class tagged with engDesc from Class Database + gpuDeleteClassFromClassDBByEngTag(pGpu, engDesc); + + // + // Bug 370327 + // Q: Why remove load/unload? + // A: Since this engine does not exist, we should prevent hw accesses to it + // which should ideally only take place in load/unload ( not init/destroy ) + // + // Q: Why not remove init/destroy, the engines gone right? + // A: If init does some alloc and loadhw does the probe then removing destroy + // will leak. + // + + // Remove load + pEngDesc = gpuGetLoadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove unload + pEngDesc = gpuGetUnloadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +/*! + * @brief Delete an engine from class DB only prior or on gpuPreInit stage. + * + * WARNING! Function must be used only before INIT stage, to avoid leaks. + * See gpuDeleteEngineFromClassDB for more information. + * + * Function removes Classes with given Engine Tag from class DB + * and removes Engines from HAL lists with equal Engine Tags. + * Function doesn't remove Engines from HAL Sync list, + * see gpuDeleteEngineFromClassDB for more information. + * + * Use case: + * Any platform where an engine is absent and it is required to + * prevent engine's load/unload and init/destroy calls from getting executed. + * In other words, this function is used when it is OK to remove/STUB all + * of the HALs of an engine without jeopardizing the initialization and + * operation of other engines. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID to search and remove + * + * @returns NV_STATUS - NV_OK on success, error otherwise. + * + */ +NV_STATUS +gpuDeleteEngineOnPreInit_IMPL(OBJGPU *pGpu, NvU32 engDesc) +{ + PENGDESCRIPTOR pEngDesc = NULL; + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + ENGDESCRIPTOR engDescriptor = engDesc; + NV_STATUS rmStatus = NV_OK; + NvBool bGspSupported = NV_FALSE; + + rmStatus = gspSupportsEngine(pGpu, engDesc, &bGspSupported); + if (rmStatus == NV_WARN_NOTHING_TO_DO) + rmStatus = NV_OK; + + NV_ASSERT_OK_OR_RETURN(rmStatus); + + // remove Class tagged with engDesc from Class Database. + if (!bGspSupported) + gpuDeleteClassFromClassDBByEngTag(pGpu, engDesc); + + // Remove Load Engine Descriptors + pEngDesc = gpuGetLoadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove Unload Engine Descriptors + pEngDesc = gpuGetUnloadEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove Init Engine Descriptors + pEngDesc = gpuGetInitEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + // Remove Destroy Engine Descriptors + pEngDesc = gpuGetDestroyEngineDescriptors(pGpu); + gpuMissingEngDescriptor(pEngDesc, numEngDescriptors, + engDescriptor); + + if (!bGspSupported) + { + rmStatus = gpuUpdateEngineTable(pGpu); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Update engine table operation failed!\n"); + DBG_BREAKPOINT(); + } + } + + return rmStatus; +} + +/*! + * @brief Perform GPU pre init tasks + * + * Function tries to pre-init all engines from HAL Init Engine Descriptor list. + * If engine is not present, or its engine pre-init function reports it is unsupported + * then engine will be deleted from Class DB and HAL lists. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * TODO: Fix "init missing" concept to not create unsupported objects at all. + * + * @param[in] pGpu OBJGPU pointer + * + * @returns NV_OK upon successful pre-initialization + */ +NV_STATUS +gpuStatePreInit_IMPL +( + OBJGPU *pGpu +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + + // Quadro, Geforce SMB, Tesla, VGX, Titan GPU detection + gpuInitBranding(pGpu); + + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + NV_ASSERT_OK_OR_RETURN(_gpuAllocateInternalObjects(pGpu)); + NV_ASSERT_OK_OR_RETURN(_gpuInitChipInfo(pGpu)); + NV_ASSERT_OK_OR_RETURN(gpuConstructUserRegisterAccessMap(pGpu)); + NV_ASSERT_OK_OR_RETURN(gpuBuildGenericKernelFalconList(pGpu)); + + rmStatus = gpuRemoveMissingEngines(pGpu); + NV_ASSERT(rmStatus == NV_OK); + + pGpu->bFullyConstructed = NV_TRUE; + + engDescriptorList = gpuGetInitEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_PRE_INIT, &engTransitionData); + rmStatus = engstateStatePreInit(pGpu, pEngstate); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_PRE_INIT, &engTransitionData); + + if (rmStatus == NV_ERR_NOT_SUPPORTED) + { + switch (curEngDescriptor) + { + // + // Allow removing kernel engines in StatePreInit if their + // physical counterpart is absent. + // + case ENG_KERNEL_DISPLAY: + // On Displayless GPU's, Display Engine is not present. So, RM should not keep the display + // classes in GET_CLASSLIST. Hence removing the Display classes from the ClassDB + gpuRemoveMissingEngineClasses(pGpu, ENG_KERNEL_DISPLAY); + break; + // + // Explicitly track engines that trigger this block + // so that we can verify they function properly + // after they are no longer removed here. + // + case ENG_INFOROM: + // TODO: try to remove this special case + NV_PRINTF(LEVEL_WARNING, + "engine removal in PreInit with NV_ERR_NOT_SUPPORTED is deprecated (%s)\n", + engstateGetName(pEngstate)); + break; + default: + NV_PRINTF(LEVEL_ERROR, + "disallowing NV_ERR_NOT_SUPPORTED PreInit removal of untracked engine (%s)\n", + engstateGetName(pEngstate)); + DBG_BREAKPOINT(); + NV_ASSERT(0); + break; + } + + gpuDestroyMissingEngine(pGpu, pEngstate); + pEngstate = NULL; + + rmStatus = gpuDeleteEngineOnPreInit(pGpu, curEngDescriptor); + // TODO: destruct engine here after MISSING support is removed + NV_ASSERT(rmStatus == NV_OK || !"Error while trying to remove missing engine"); + } + else if (rmStatus != NV_OK) + { + break; + } + } + + pGpu->boardInfo = portMemAllocNonPaged(sizeof(*pGpu->boardInfo)); + if (pGpu->boardInfo) + { + // To avoid potential race of xid reporting with the control, zero it out + portMemSet(pGpu->boardInfo, '\0', sizeof(*pGpu->boardInfo)); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + if (pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_OEM_BOARD_INFO, + pGpu->boardInfo, + sizeof(*pGpu->boardInfo)) != NV_OK) + { + portMemFree(pGpu->boardInfo); + pGpu->boardInfo = NULL; + } + } + + return rmStatus; +} + +// TODO: Merge structurally equivalent code with other gpuState* functions. +NV_STATUS +gpuStateInit_IMPL +( + OBJGPU *pGpu +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + + // Initialize numaNodeId to invalid node ID as "0" can be considered valid node + pGpu->numaNodeId = NV0000_CTRL_NO_NUMA_NODE; + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the below code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + engDescriptorList = gpuGetInitEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + // Do this before calling stateInit() of child engines. + objCreate(&pGpu->pPrereqTracker, pGpu, PrereqTracker, pGpu); + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the above code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_INIT, &engTransitionData); + rmStatus = engstateStateInit(pGpu, pEngstate); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_INIT, &engTransitionData); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + goto gpuStateInit_exit; + } + + // Set a property indicating that the state initialization has been done + pGpu->setProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED, NV_TRUE); + +gpuStateInit_exit: + return rmStatus; +} + +/*! + * @brief Top level pre-load routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. + * + * StatePreLoad() is called before StateLoad() likewise StatePostUnload() is + * called after StateUnload(). + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePreLoad +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + + engDescriptorList = gpuGetLoadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePreLoadEngStart", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_PRE_LOAD, &engTransitionData); + rmStatus = engstateStatePreLoad(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_PRE_LOAD, &engTransitionData); + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePreLoadEngEnd", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + // + // An engine load leaving the broadcast status to NV_TRUE + // will most likely mess up the pre-load of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + break; + + // + // Release and re-acquire the lock to allow interrupts + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_STATE_LOAD); + if (rmStatus != NV_OK) + break; + } + + return rmStatus; +} + +// TODO: Merge structurally equivalent code with other gpuState* functions. +NV_STATUS +gpuStateLoad_IMPL +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + NvU32 status = NV_OK; + + pGpu->registerAccess.regReadCount = pGpu->registerAccess.regWriteCount = 0; + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadStart", pGpu->gpuId, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + // Initialize SRIOV specific members of OBJGPU + status = gpuInitSriov_HAL(pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error initializing SRIOV: 0x%0x\n", status); + return status; + } + + // It is a no-op on baremetal and inside non SRIOV guest. + rmStatus = gpuCreateDefaultClientShare_HAL(pGpu); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + rmStatus = gpuStatePreLoad(pGpu, flags); + if (rmStatus != NV_OK) + { + // + // return early if we broke out of the preLoad sequence with + // rmStatus != NV_OK + // + return rmStatus; + } + + engDescriptorList = gpuGetLoadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Set indicator that we are running state load + pGpu->bStateLoading = NV_TRUE; + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadEngStart", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_LOAD, &engTransitionData); + rmStatus = engstateStateLoad(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_LOAD, &engTransitionData); + + + // TODO: This is temporary and may be dead with TESLA + if (rmStatus == NV_ERR_INVALID_ADDRESS) + { + NV_PRINTF(LEVEL_ERROR, "NV_ERR_INVALID_ADDRESS is no longer supported in StateLoad (%s)\n", + engstateGetName(pEngstate)); + DBG_BREAKPOINT(); + } + + // + // An engine load leaving the broadcast status to NV_TRUE + // will most likely mess up the load of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + goto gpuStateLoad_exit; + + // + // Release and re-acquire the lock to allow interrupts + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_STATE_LOAD); + if (rmStatus != NV_OK) + goto gpuStateLoad_exit; + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadEngEnd", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + } + + rmStatus = gpuInitVmmuInfo(pGpu); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error initializing VMMU info: 0x%0x\n", status); + goto gpuStateLoad_exit; + } + + { + // Perform post load operations + rmStatus = gpuStatePostLoad(pGpu, flags); + if (rmStatus != NV_OK) + goto gpuStateLoad_exit; + + } + + // Clear indicator that we are running state load + pGpu->bStateLoading = NV_FALSE; + + // Set a property indicating that the state load has been done + pGpu->bStateLoaded = NV_TRUE; + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStateLoadEnd", pGpu->gpuId, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + +gpuStateLoad_exit: + return rmStatus; +} + +/*! + * @brief Top level post-load routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. For + * example, OBJFB depends on OBJCE on Fermi (for memory scrubbing), likewise + * OBJCE also depends on OBJFB (for instance memory). + * + * StatePostLoad() is called after StateLoad() likewise StatePreUnload() is + * called prior to StateUnload(). + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePostLoad +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + + engDescriptorList = gpuGetLoadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePostLoadEngStart", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_POST_LOAD, &engTransitionData); + rmStatus = engstateStatePostLoad(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_POST_LOAD, &engTransitionData); + RMTRACE_ENGINE_PROFILE_EVENT("gpuStatePostLoadEngEnd", curEngDescriptor, pGpu->registerAccess.regReadCount, pGpu->registerAccess.regWriteCount); + + // RMCONFIG: Bail on errors unless the feature/object/engine/class + // is simply unsupported + if (rmStatus == NV_ERR_NOT_SUPPORTED) + rmStatus = NV_OK; + if (rmStatus != NV_OK) + goto gpuStatePostLoad_exit; + + // + // Release and re-acquire the lock to allow interrupts + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + rmStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_STATE_LOAD); + + if (rmStatus != NV_OK) + goto gpuStatePostLoad_exit; + } + +gpuStatePostLoad_exit: + return rmStatus; +} + +/*! + * @brief Top level pre-unload routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. For + * example, OBJFB depends on OBJCE on Fermi (for memory scrubbing), likewise + * OBJCE also depends on OBJFB (for instance memory). + * + * StatePostLoad() is called after StateLoad() likewise StatePreUnload() is + * called prior to StateUnload(). + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePreUnload +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + NV_STATUS rmStatus = NV_OK; + + engDescriptorList = gpuGetUnloadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_PRE_UNLOAD, &engTransitionData); + rmStatus = engstateStatePreUnload(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_PRE_UNLOAD, &engTransitionData); + + // + // During unload, failure of a single engine may not be fatal. + // ASSERT if there is a failure, but ignore the status and continue + // unloading other engines to prevent (worse) memory leaks. + // + if (rmStatus != NV_OK) + { + if (rmStatus != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to pre unload engine with descriptor index: 0x%x and descriptor: 0x%x\n", + curEngDescIdx, curEngDescriptor); + if (!IS_FMODEL(pGpu)) + { + NV_ASSERT(0); + } + } + rmStatus = NV_OK; + } + + // Ensure that intr on other GPUs are serviced + gpuServiceInterruptsAllGpus(pGpu); + } + + return rmStatus; +} + +NV_STATUS +gpuEnterShutdown_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus = gpuStateUnload(pGpu, GPU_STATE_DEFAULT); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to unload the device with error 0x%x\n", rmStatus); + } + + return rmStatus; +} + +// TODO: Merge structurally equivalent code with other gpuState* functions. +NV_STATUS +gpuStateUnload_IMPL +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + OBJENGSTATE *pEngstate; + NV_STATUS rmStatus = NV_OK; + NV_STATUS fatalErrorStatus = NV_OK; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // Set indicator that state is currently unloading. + pGpu->bStateUnloading = NV_TRUE; + + { + rmStatus = gpuStatePreUnload(pGpu, flags); + } + + if (rmStatus != NV_OK) + return rmStatus; + + engDescriptorList = gpuGetUnloadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + + pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_UNLOAD, &engTransitionData); + rmStatus = engstateStateUnload(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_UNLOAD, &engTransitionData); + + // + // An engine unload leaving the broadcast status to NV_TRUE + // will most likely mess up the unload of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // + // During unload, failure of a single engine may not be fatal. + // ASSERT if there is a failure, but ignore the status and continue + // unloading other engines to prevent (worse) memory leaks. + // + if (rmStatus != NV_OK) + { + if (rmStatus != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to unload engine with descriptor index: 0x%x and descriptor: 0x%x\n", + curEngDescIdx, curEngDescriptor); + if (!IS_FMODEL(pGpu)) + { + NV_ASSERT(0); + + if (flags & GPU_STATE_FLAGS_PRESERVING) + { + // + // FBSR can fail due to low sysmem. + // So return error. + // See bugs 2051056, 2049141 + // + if (objDynamicCastById(pEngstate, classId(MemorySystem))) + { + fatalErrorStatus = rmStatus; + } + } + } + } + rmStatus = NV_OK; + } + // Ensure that intr on other GPUs are serviced + gpuServiceInterruptsAllGpus(pGpu); + } + + // Call the gpuStatePostUnload routine + rmStatus = gpuStatePostUnload(pGpu, flags); + NV_ASSERT_OK(rmStatus); + + gpuDestroyDefaultClientShare_HAL(pGpu); + + // De-init SRIOV + gpuDeinitSriov_HAL(pGpu); + + // Set indicator that state unload finished. + pGpu->bStateUnloading = NV_FALSE; + + // Set a property indicating that the state unload has been done + if (rmStatus == NV_OK) + { + pGpu->bStateLoaded = NV_FALSE; + } + + if (fatalErrorStatus != NV_OK) + { + rmStatus = fatalErrorStatus; + } + + return rmStatus; +} + +/*! + * @brief Top level post-unload routine + * + * Provides a mechanism to resolve cyclic dependencies between engines. + * + * + * Dependencies which are DAGs should continue to be resolved by reordering the + * engine descriptor lists. Reordering the descriptor lists won't solve cyclic + * dependencies as at least one constraint would always be violated. + * + * TODO: Merge structurally equivalent code with other gpuState* functions. + * + * @param[in] pGpu OBJPGU pointer + * @param[in] flags Type of transition + */ +static NV_STATUS +gpuStatePostUnload +( + OBJGPU *pGpu, + NvU32 flags +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + OBJENGSTATE *pEngstate; + NV_STATUS rmStatus = NV_OK; + + engDescriptorList = gpuGetUnloadEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + + pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_POST_UNLOAD, &engTransitionData); + rmStatus = engstateStatePostUnload(pGpu, pEngstate, flags); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_POST_UNLOAD, &engTransitionData); + + // + // An engine post-unload leaving the broadcast status to NV_TRUE + // will most likely mess up the post-unload of the next engines + // + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // + // During unload, failure of a single engine may not be fatal. + // ASSERT if there is a failure, but ignore the status and continue + // unloading other engines to prevent (worse) memory leaks. + // + if (rmStatus != NV_OK) + { + if (rmStatus != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to post unload engine with descriptor index: 0x%x and descriptor: 0x%x\n", + curEngDescIdx, curEngDescriptor); + if (!IS_FMODEL(pGpu)) + { + NV_ASSERT(0); + } + } + rmStatus = NV_OK; + } + + // Ensure that intr on other GPUs are serviced + gpuServiceInterruptsAllGpus(pGpu); + } + + return rmStatus; +} + +NV_STATUS +gpuStateDestroy_IMPL +( + OBJGPU *pGpu +) +{ + PENGDESCRIPTOR engDescriptorList; + NvU32 numEngDescriptors; + NvU32 curEngDescIdx; + OBJENGSTATE *pEngstate; + NV_STATUS rmStatus = NV_OK; + + engDescriptorList = gpuGetDestroyEngineDescriptors(pGpu); + numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + + NV_RM_RPC_SIM_FREE_INFRA(pGpu, rmStatus); + + // Order is determined by gpuGetChildrenOrder_HAL pulling gpuChildOrderList array + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGSTATE_TRANSITION_DATA engTransitionData; + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + + pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + continue; + } + + engstateLogStateTransitionPre(pEngstate, ENGSTATE_STATE_DESTROY, &engTransitionData); + engstateStateDestroy(pGpu, pEngstate); + engstateLogStateTransitionPost(pEngstate, ENGSTATE_STATE_DESTROY, &engTransitionData); + } + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the below code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + // Do this after calling stateDestroy() of child engines. + objDelete(pGpu->pPrereqTracker); + pGpu->pPrereqTracker = NULL; + + // Clear the property indicating that the state initialization has been done + if (rmStatus == NV_OK) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_STATE_INITIALIZED, NV_FALSE); + } + + if (IS_GSP_CLIENT(pGpu)) + { + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + rmStatus = kgspUnloadRm(pGpu, pKernelGsp); + } + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + // TODO: Move the above code into appropriate ENGSTATE objects. + // DO NOT ADD MORE SPECIAL CASES HERE! + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + _gpuFreeInternalObjects(pGpu); + gpuDestroyGenericKernelFalconList(pGpu); + + portMemFree(pGpu->boardInfo); + pGpu->boardInfo = NULL; + + portMemFree(pGpu->gspSupportedEngines); + pGpu->gspSupportedEngines = NULL; + + portMemFree(pGpu->pChipInfo); + pGpu->pChipInfo = NULL; + + pGpu->bFullyConstructed = NV_FALSE; + + return rmStatus; +} + +// +// Logic: If arch = requested AND impl = requested --> NV_TRUE +// OR If arch = requested AND impl = requested AND maskRev = requested --> NV_TRUE +// OR If arch = requested AND impl = requested AND rev = requested --> NV_TRUE +// +NvBool +gpuIsImplementation_IMPL +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl, + NvU32 maskRevision, + NvU32 revision +) +{ + NvU32 gpuArch, gpuImpl; + NvBool result = NV_FALSE; + + NV_ASSERT(revision == GPU_NO_REVISION); + + gpuXlateHalImplToArchImpl(pGpu, halImpl, &gpuArch, &gpuImpl); + + result = ((gpuGetChipArch(pGpu) == gpuArch) && + (gpuGetChipImpl(pGpu) == gpuImpl)); + + if (maskRevision != GPU_NO_MASK_REVISION) + { + result = result && (GPU_GET_MASKREVISION(pGpu) == maskRevision); + } + + return result; +} + +// Check the software state to decide if we are in full power mode or not. +NvBool +gpuIsGpuFullPower_IMPL +( + OBJGPU *pGpu +) +{ + NvBool retVal = NV_TRUE; + + // + // SW may have indicated that the GPU ins in standby, hibernate, or powered off, + // indicating a logical power state. + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_STANDBY) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE)) + { + retVal = NV_FALSE; + } + + return retVal; +} + +// Check the software state to decide if we are in full power mode or not. +NvBool +gpuIsGpuFullPowerForPmResume_IMPL +( + OBJGPU *pGpu +) +{ + NvBool retVal = NV_TRUE; + // + // SW may have indicated that the GPU ins in standby, resume, hibernate, or powered off, + // indicating a logical power state. + // + if ((!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) && + (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_STANDBY) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE))) + { + retVal = NV_FALSE; + } + return retVal; +} + +NvBool +gpuIsImplementationOrBetter_IMPL +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl, + NvU32 maskRevision, + NvU32 revision +) +{ + NvU32 gpuArch, gpuImpl; + NvU32 chipArch; + NvBool result = NV_FALSE; + + NV_ASSERT(revision == GPU_NO_REVISION); + + gpuXlateHalImplToArchImpl(pGpu, halImpl, &gpuArch, &gpuImpl); + + // "is implementation or better" is only defined between 2 gpus within + // the same "gpu series" as defined in config/Gpus.pm and gpuarch.h + chipArch = gpuGetChipArch(pGpu); + + if (DRF_VAL(GPU, _ARCHITECTURE, _SERIES, chipArch) == DRF_VAL(GPU, _ARCHITECTURE, _SERIES, gpuArch)) + { + if (maskRevision != GPU_NO_MASK_REVISION) + { + result = gpuSatisfiesTemporalOrderMaskRev(pGpu, halImpl, gpuArch, + gpuImpl, maskRevision); + } + else + { + // In case there is a temporal ordering we need to account for + result = gpuSatisfiesTemporalOrder(pGpu, halImpl, gpuArch, gpuImpl); + } + } + + return result; +} + +static void +gpuXlateHalImplToArchImpl +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl, + NvU32 *gpuArch, + NvU32 *gpuImpl +) +{ + switch (halImpl) + { + case HAL_IMPL_GM107: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL; + *gpuImpl = GPU_IMPLEMENTATION_GM107; + break; + } + + case HAL_IMPL_GM108: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL; + *gpuImpl = GPU_IMPLEMENTATION_GM108; + break; + } + + case HAL_IMPL_GM200: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL2; + *gpuImpl = GPU_IMPLEMENTATION_GM200; + break; + } + + case HAL_IMPL_GM204: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL2; + *gpuImpl = GPU_IMPLEMENTATION_GM204; + break; + } + + case HAL_IMPL_GM206: + { + *gpuArch = GPU_ARCHITECTURE_MAXWELL2; + *gpuImpl = GPU_IMPLEMENTATION_GM206; + break; + } + + case HAL_IMPL_GP100: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP100; + break; + } + + case HAL_IMPL_GP102: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP102; + break; + } + + case HAL_IMPL_GP104: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP104; + break; + } + + case HAL_IMPL_GP106: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP106; + break; + } + + case HAL_IMPL_GP107: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP107; + break; + } + + case HAL_IMPL_GP108: + { + *gpuArch = GPU_ARCHITECTURE_PASCAL; + *gpuImpl = GPU_IMPLEMENTATION_GP108; + break; + } + + case HAL_IMPL_GV100: + { + *gpuArch = GPU_ARCHITECTURE_VOLTA; + *gpuImpl = GPU_IMPLEMENTATION_GV100; + break; + } + + case HAL_IMPL_GV11B: + { + *gpuArch = GPU_ARCHITECTURE_VOLTA2; + *gpuImpl = GPU_IMPLEMENTATION_GV11B; + break; + } + + case HAL_IMPL_TU102: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU102; + break; + } + + case HAL_IMPL_TU104: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU104; + break; + } + + case HAL_IMPL_TU106: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU106; + break; + } + + case HAL_IMPL_TU116: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU116; + break; + } + + case HAL_IMPL_TU117: + { + *gpuArch = GPU_ARCHITECTURE_TURING; + *gpuImpl = GPU_IMPLEMENTATION_TU117; + break; + } + + case HAL_IMPL_AMODEL: + { + *gpuArch = GPU_ARCHITECTURE_SIMS; + *gpuImpl = GPU_IMPLEMENTATION_AMODEL; + break; + } + + case HAL_IMPL_T124: + { + *gpuArch = GPU_ARCHITECTURE_T12X; + *gpuImpl = GPU_IMPLEMENTATION_T124; + break; + } + + case HAL_IMPL_T132: + { + *gpuArch = GPU_ARCHITECTURE_T13X; + *gpuImpl = GPU_IMPLEMENTATION_T132; + break; + } + + case HAL_IMPL_T210: + { + *gpuArch = GPU_ARCHITECTURE_T21X; + *gpuImpl = GPU_IMPLEMENTATION_T210; + break; + } + + case HAL_IMPL_T186: + { + *gpuArch = GPU_ARCHITECTURE_T18X; + *gpuImpl = GPU_IMPLEMENTATION_T186; + break; + } + + case HAL_IMPL_T194: + { + *gpuArch = GPU_ARCHITECTURE_T19X; + *gpuImpl = GPU_IMPLEMENTATION_T194; + break; + } + + case HAL_IMPL_GA100: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA100; + break; + } + + case HAL_IMPL_GA102: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA102; + break; + } + + case HAL_IMPL_GA102F: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA102F; + break; + } + + case HAL_IMPL_GA103: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA103; + break; + } + + case HAL_IMPL_GA104: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA104; + break; + } + + case HAL_IMPL_GA106: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA106; + break; + } + + case HAL_IMPL_GA107: + { + *gpuArch = GPU_ARCHITECTURE_AMPERE; + *gpuImpl = GPU_IMPLEMENTATION_GA107; + break; + } + + + default: + { + *gpuArch = 0; + *gpuImpl = 0; + NV_PRINTF(LEVEL_ERROR, "Invalid halimpl\n"); + DBG_BREAKPOINT(); + break; + } + } +} + +// +// default Logic: If arch is greater than requested --> NV_TRUE +// OR If arch is = requested AND impl is >= requested --> NV_TRUE +// +// NOTE: only defined for gpus within same gpu series +// +static NvBool +gpuSatisfiesTemporalOrder +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl, + NvU32 gpuArch, + NvU32 gpuImpl +) +{ + NvBool result = NV_FALSE; + + switch (halImpl) + { + // + // Comparison of Tegra series isn't straightforward with the chip ids + // following different formats and so we can't use them + // to figure out the relative ordering of chips. + // T12X, T13X use 0x40, 0x13. + // + case HAL_IMPL_T124: + { + result = gpuIsT124ImplementationOrBetter(pGpu); + break; + } + default: + { + NvU32 chipArch = gpuGetChipArch(pGpu); + NvU32 chipImpl = gpuGetChipImpl(pGpu); + + result = ((chipArch > gpuArch) || + ((chipArch == gpuArch) && + (chipImpl >= gpuImpl))); + break; + } + } + + return result; +} + +/*! + * @brief Checks if current GPU is T124OrBetter + * + * T124+ corresponds to BIG-GPU tegra chips that + * are either T124 or beyond. + * ChipArch which the generic implementation relies + * on doesn't give the hierarchy of chips + * accurately. Hence the explicit check for chips + * below. + * + * @param[in] pGpu GPU object pointer + * + * @returns NV_TRUE if T124 or any later big-gpu tegra chip, + * NV_FALSE otherwise + */ +static NvBool +gpuIsT124ImplementationOrBetter +( + OBJGPU *pGpu +) +{ + NvU32 chipArch = gpuGetChipArch(pGpu); + NvU32 chipImpl = gpuGetChipImpl(pGpu); + + // + // All Big-gpu chips like T124, T132 or later satisy the condition. + // This makes the assumption that starting from T186, there are no + // AURORA chips. + // + return (((chipArch == GPU_ARCHITECTURE_T12X) && (chipImpl == GPU_IMPLEMENTATION_T124)) || + ((chipArch == GPU_ARCHITECTURE_T13X) && (chipImpl == GPU_IMPLEMENTATION_T132)) || + ((chipArch == GPU_ARCHITECTURE_T21X) && (chipImpl == GPU_IMPLEMENTATION_T210)) || + ((chipArch == GPU_ARCHITECTURE_T19X) && (chipImpl == GPU_IMPLEMENTATION_T194)) || + ((chipArch == GPU_ARCHITECTURE_T23X) && (chipImpl == GPU_IMPLEMENTATION_T234D)) || + ((chipArch == GPU_ARCHITECTURE_T23X) && (chipImpl == GPU_IMPLEMENTATION_T234)) || + ((chipArch >= GPU_ARCHITECTURE_T18X) && (chipImpl == GPU_IMPLEMENTATION_T186))); +} + + +// +// default Logic: If arch = requested AND impl = requested AND +// maskRev is >= requested --> NV_TRUE +// +static NvBool +gpuSatisfiesTemporalOrderMaskRev +( + OBJGPU *pGpu, + HAL_IMPLEMENTATION halImpl, + NvU32 gpuArch, + NvU32 gpuImpl, + NvU32 maskRevision +) +{ + NvBool result = NV_FALSE; + + result = ((gpuGetChipArch(pGpu)== gpuArch) && + (gpuGetChipImpl(pGpu) == gpuImpl) && + (GPU_GET_MASKREVISION(pGpu) >= maskRevision)); + + return result; +} + +// =============== Engine Database ============================== + +typedef struct { + NvU32 clientEngineId; + NVOC_CLASS_ID class; + NvU32 instance; + NvBool bHostEngine; +} EXTERN_TO_INTERNAL_ENGINE_ID; + +static const EXTERN_TO_INTERNAL_ENGINE_ID rmClientEngineTable[] = +{ + { NV2080_ENGINE_TYPE_GR0, classId(Graphics) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR1, classId(Graphics) , 1, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR2, classId(Graphics) , 2, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR3, classId(Graphics) , 3, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR4, classId(Graphics) , 4, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR5, classId(Graphics) , 5, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR6, classId(Graphics) , 6, NV_TRUE }, + { NV2080_ENGINE_TYPE_GR7, classId(Graphics) , 7, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY0, classId(OBJCE) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY1, classId(OBJCE) , 1, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY2, classId(OBJCE) , 2, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY3, classId(OBJCE) , 3, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY4, classId(OBJCE) , 4, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY5, classId(OBJCE) , 5, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY6, classId(OBJCE) , 6, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY7, classId(OBJCE) , 7, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY8, classId(OBJCE) , 8, NV_TRUE }, + { NV2080_ENGINE_TYPE_COPY9, classId(OBJCE) , 9, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVDEC0, classId(OBJBSP) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVDEC1, classId(OBJBSP) , 1, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVDEC2, classId(OBJBSP) , 2, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVDEC3, classId(OBJBSP) , 3, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVDEC4, classId(OBJBSP) , 4, NV_TRUE }, + { NV2080_ENGINE_TYPE_CIPHER, classId(OBJCIPHER) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVENC0, classId(OBJMSENC) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVENC1, classId(OBJMSENC) , 1, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVENC2, classId(OBJMSENC) , 2, NV_TRUE }, + { NV2080_ENGINE_TYPE_SW, classId(OBJSWENG) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_SEC2, classId(OBJSEC2) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_NVJPEG0, classId(OBJNVJPG) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_OFA, classId(OBJOFA) , 0, NV_TRUE }, + { NV2080_ENGINE_TYPE_DPU, classId(OBJDPU) , 0, NV_FALSE }, + { NV2080_ENGINE_TYPE_PMU, classId(Pmu) , 0, NV_FALSE }, + { NV2080_ENGINE_TYPE_FBFLCN, classId(OBJFBFLCN) , 0, NV_FALSE }, + { NV2080_ENGINE_TYPE_HOST, classId(KernelFifo) , 0, NV_FALSE }, +}; + +NV_STATUS gpuConstructEngineTable_IMPL +( + OBJGPU *pGpu +) +{ + NvU32 engineId = 0; + + // Alloc engine DB + pGpu->engineDB.bValid = NV_FALSE; + pGpu->engineDB.pType = portMemAllocNonPaged( + NV_ARRAY_ELEMENTS(rmClientEngineTable) * sizeof(*pGpu->engineDB.pType)); + if (pGpu->engineDB.pType == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "gpuConstructEngineTable: Could not allocate engine DB\n"); + DBG_BREAKPOINT(); + return NV_ERR_NO_MEMORY; + } + pGpu->engineDB.size = 0; // That's right, its the size not the capacity + // of the engineDB + + // Initialize per-GPU per-engine list of non-stall interrupt event nodes. + for (engineId = 0; engineId < NV2080_ENGINE_TYPE_LAST; engineId++) + { + pGpu->engineNonstallIntr[engineId].pEventNode = NULL; + pGpu->engineNonstallIntr[engineId].pSpinlock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (pGpu->engineNonstallIntr[engineId].pSpinlock == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +NV_STATUS gpuUpdateEngineTable_IMPL +( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + NvU32 counter = 0; + NvU32 numClasses = 0; + + if (pGpu->engineDB.pType == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "gpuUpdateEngineTable: EngineDB has not been created yet\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + if (pGpu->engineDB.bValid) + { + return NV_OK; + } + + // Read through the classDB and populate engineDB + pGpu->engineDB.size = 0; + for (counter = 0; counter < NV_ARRAY_ELEMENTS(rmClientEngineTable); counter++) + { + // There are tests such as ClassA06fTest that attempt to bind all engines reported + if (!rmClientEngineTable[counter].bHostEngine) + { + continue; + } + + status = gpuGetClassList(pGpu, &numClasses, NULL, + MKENGDESC(rmClientEngineTable[counter].class, rmClientEngineTable[counter].instance)); + if ((status != NV_OK) || ( numClasses == 0)) + { + continue; + } + pGpu->engineDB.pType[pGpu->engineDB.size++] = + rmClientEngineTable[counter].clientEngineId; + } + + pGpu->engineDB.bValid = NV_TRUE; + + return NV_OK; +} +void gpuDestroyEngineTable_IMPL(OBJGPU *pGpu) +{ + NvU32 engineId = 0; + + if (pGpu->engineDB.pType) + { + pGpu->engineDB.size = 0; + portMemFree(pGpu->engineDB.pType); + pGpu->engineDB.pType = NULL; + pGpu->engineDB.bValid = NV_FALSE; + } + + for (engineId = 0; engineId < NV2080_ENGINE_TYPE_LAST; engineId++) + { + NV_ASSERT(pGpu->engineNonstallIntr[engineId].pEventNode == NULL); + + if (pGpu->engineNonstallIntr[engineId].pSpinlock != NULL) + { + portSyncSpinlockDestroy(pGpu->engineNonstallIntr[engineId].pSpinlock); + } + } +} + +NvBool gpuCheckEngineTable_IMPL +( + OBJGPU *pGpu, + NvU32 engType +) +{ + NvU32 engineId; + + if (!IS_MODS_AMODEL(pGpu)) + { + NV_ASSERT_OR_RETURN(pGpu->engineDB.bValid, NV_FALSE); + } + + NV_ASSERT_OR_RETURN(engType < NV2080_ENGINE_TYPE_LAST, NV_FALSE); + + for (engineId = 0; engineId < pGpu->engineDB.size; engineId++) + { + if (engType == pGpu->engineDB.pType[engineId]) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} + +NV_STATUS +gpuXlateClientEngineIdToEngDesc_IMPL +( + OBJGPU *pGpu, + NvU32 clientEngineID, + ENGDESCRIPTOR *pEngDesc + +) +{ + NvU32 counter; + + for (counter = 0; counter < NV_ARRAY_ELEMENTS(rmClientEngineTable); counter++) + { + if (rmClientEngineTable[counter].clientEngineId == clientEngineID) + { + *pEngDesc = MKENGDESC(rmClientEngineTable[counter].class, rmClientEngineTable[counter].instance); + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +gpuXlateEngDescToClientEngineId_IMPL +( + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc, + NvU32 *pClientEngineID +) +{ + NvU32 counter; + + for (counter = 0; counter < NV_ARRAY_ELEMENTS(rmClientEngineTable); counter++) + { + if (MKENGDESC(rmClientEngineTable[counter].class, rmClientEngineTable[counter].instance) == engDesc) + { + *pClientEngineID = rmClientEngineTable[counter].clientEngineId; + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +gpuGetFlcnFromClientEngineId_IMPL +( + OBJGPU *pGpu, + NvU32 clientEngineId, + Falcon **ppFlcn +) +{ + *ppFlcn = NULL; + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +gpuGetGidInfo_IMPL +( + OBJGPU *pGpu, + NvU8 **ppGidString, + NvU32 *pGidStrlen, + NvU32 gidFlags +) +{ + NV_STATUS rmStatus = NV_OK; + NvU8 gidData[RM_SHA1_GID_SIZE]; + NvU32 gidSize = RM_SHA1_GID_SIZE; + + if (!FLD_TEST_DRF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1,gidFlags)) + { + return NV_ERR_INVALID_FLAGS; + } + + if (pGpu->gpuUuid.isInitialized) + { + portMemCopy(gidData, gidSize, &pGpu->gpuUuid.uuid[0], gidSize); + goto fillGidData; + } + + rmStatus = gpuGenGidData_HAL(pGpu, gidData, gidSize, gidFlags); + + if (rmStatus != NV_OK) + { + return rmStatus; + } + + // if not cached, cache it here + portMemCopy(&pGpu->gpuUuid.uuid[0], gidSize, gidData, gidSize); + pGpu->gpuUuid.isInitialized = NV_TRUE; + +fillGidData: + if (ppGidString != NULL) + { + if (FLD_TEST_DRF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY, + gidFlags)) + { + // + // Instead of transforming the Gid into a string, just use it in its + // original binary form. The allocation rules are the same as those + // followed by the transformGidToUserFriendlyString routine: we + // allocate ppGidString here, and the caller frees ppGidString. + // + *ppGidString = portMemAllocNonPaged(gidSize); + if (*ppGidString == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemCopy(*ppGidString, gidSize, gidData, gidSize); + *pGidStrlen = gidSize; + } + else + { + NV_ASSERT_OR_RETURN(pGidStrlen != NULL, NV_ERR_INVALID_ARGUMENT); + rmStatus = transformGidToUserFriendlyString(gidData, gidSize, + ppGidString, pGidStrlen, gidFlags); + } + } + + return rmStatus; +} + +void +gpuSetDisconnectedProperties_IMPL +( + OBJGPU *pGpu +) +{ + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_LOST, NV_TRUE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_CONNECTED, NV_FALSE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH, NV_FALSE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_STANDBY, NV_FALSE); + pGpu->bInD3Cold = NV_FALSE; + pGpu->setProperty(pGpu, PDB_PROP_GPU_IN_HIBERNATE, NV_FALSE); + +} + +/*! + * @brief: Get the GPU's sparse texture compute mode setting information. + * + * @param[in] pGpu GPU object pointer + * @param[out] pDefault The default sparse texture compute mode setting + * for this GPU on this platform. + * @param[out] pCurrent The current sparse texture compute mode setting + * that was applied on the last driver load. + * @param[out] pPending The sparse texture compute mode setting that will + * be applied on the next driver load. + * + * @returns NV_OK if the setting information is available + * NV_ERR_INVALID_ARGUMENT if any of the pointers are invalid + * NV_ERR_NOT_SUPPORTED if the sparse texture RMCFG is not enabled, + * or the GPU cannot support optimizing sparse texture vs compute. + */ +NV_STATUS +gpuGetSparseTextureComputeMode_IMPL +( + OBJGPU *pGpu, + NvU32 *pDefault, + NvU32 *pCurrent, + NvU32 *pPending +) +{ + NV_STATUS status; + NvU32 data; + + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K)) + { + return NV_ERR_NOT_SUPPORTED; + } + + if ((pDefault == NULL) || (pCurrent == NULL) || (pPending == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE)) + { + return NV_ERR_NOT_SUPPORTED; + } + + *pDefault = pGpu->getProperty(pGpu, PDB_PROP_GPU_OPTIMIZE_SPARSE_TEXTURE_BY_DEFAULT) ? + NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_SPARSE_TEXTURE : + NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_COMPUTE; + + *pCurrent = *pDefault; + if (pGpu->optimizeUseCaseOverride != NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_DEFAULT) + { + switch (pGpu->optimizeUseCaseOverride) + { + case NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_COMPUTE: + *pCurrent = NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_COMPUTE; + break; + case NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_SPARSE_TEX: + *pCurrent = NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_SPARSE_TEXTURE; + break; + default: + break; + } + } + + // + // The pending starts out as the default value; we will only attempt an + // override if the regkey is set and has a valid value. + // + *pPending = *pDefault; + status = osReadRegistryDword(pGpu, + NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX, + &data); + if (status == NV_OK) + { + switch (data) + { + case NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_COMPUTE: + *pPending = NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_COMPUTE; + break; + case NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_SPARSE_TEX: + *pPending = NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_SPARSE_TEXTURE; + break; + default: + break; + } + } + + return NV_OK; +} + +/*! + * @brief: Set the GPU's sparse texture compute mode setting to apply on the + * next driver load. + * + * @param[in] pGpu GPU object pointer + * @param[out] setting The sparse texture compute mode setting that should + * be applied on the next driver load. + * + * @returns NV_OK if the setting is saved in the registry + * NV_ERR_INVALID_ARGUMENT if pGpu or the setting is invalid + * NV_ERR_NOT_SUPPORTED if the sparse texture RMCFG is not enabled, + * or the GPU cannot support optimizing sparse texture vs compute. + */ +NV_STATUS +gpuSetSparseTextureComputeMode_IMPL +( + OBJGPU *pGpu, + NvU32 setting +) +{ + NV_STATUS status; + NvU32 data = NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_DEFAULT; + + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_VGPU_BIG_PAGE_SIZE_64K)) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE)) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (setting == NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_COMPUTE) + { + data = NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_COMPUTE; + } + else if (setting == NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_OPTIMIZE_SPARSE_TEXTURE) + { + data = NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_SPARSE_TEX; + } + else if (setting != NV0080_CTRL_GPU_SPARSE_TEXTURE_COMPUTE_MODE_DEFAULT) + { + return NV_ERR_INVALID_ARGUMENT; + } + + status = osWriteRegistryDword(pGpu, + NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX, + data); + return status; +} + +NV_STATUS +gpuAddConstructedFalcon_IMPL +( + OBJGPU *pGpu, + Falcon *pFlcn +) +{ + NV_ASSERT_OR_RETURN(pFlcn, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN( + pGpu->numConstructedFalcons < NV_ARRAY_ELEMENTS(pGpu->constructedFalcons), + NV_ERR_BUFFER_TOO_SMALL); + + pGpu->constructedFalcons[pGpu->numConstructedFalcons++] = pFlcn; + return NV_OK; +} + +NV_STATUS +gpuRemoveConstructedFalcon_IMPL +( + OBJGPU *pGpu, + Falcon *pFlcn +) +{ + NvU32 i, j; + for (i = 0; i < pGpu->numConstructedFalcons; i++) + { + if (pGpu->constructedFalcons[i] == pFlcn) + { + for (j = i+1; j < pGpu->numConstructedFalcons; j++) + { + pGpu->constructedFalcons[j-1] = pGpu->constructedFalcons[j]; + } + pGpu->numConstructedFalcons--; + pGpu->constructedFalcons[pGpu->numConstructedFalcons] = NULL; + return NV_OK; + } + } + NV_ASSERT_FAILED("Attempted to remove a non-existent initialized Falcon!"); + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +gpuGetConstructedFalcon_IMPL +( + OBJGPU *pGpu, + NvU32 index, + Falcon **ppFlcn +) +{ + if (index >= pGpu->numConstructedFalcons) + return NV_ERR_OUT_OF_RANGE; + + *ppFlcn = pGpu->constructedFalcons[index]; + NV_ASSERT(*ppFlcn != NULL); + return NV_OK; +} + +NV_STATUS gpuBuildGenericKernelFalconList_IMPL(OBJGPU *pGpu) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status; + NvU32 i; + + NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *pParams; + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + NV_ASSERT_OR_RETURN(pParams != NULL, NV_ERR_NO_MEMORY); + + portMemSet(pParams, 0, sizeof(*pParams)); + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO, + pParams, sizeof(*pParams)), + done); + + NV_ASSERT_TRUE_OR_GOTO(status, + pParams->numConstructedFalcons <= NV_ARRAY_ELEMENTS(pGpu->genericKernelFalcons), + NV_ERR_BUFFER_TOO_SMALL, done); + + for (i = 0; i < pParams->numConstructedFalcons; i++) + { + KernelFalconEngineConfig config = {0}; + + config.physEngDesc = pParams->constructedFalconsTable[i].engDesc; + config.ctxAttr = pParams->constructedFalconsTable[i].ctxAttr; + config.ctxBufferSize = pParams->constructedFalconsTable[i].ctxBufferSize; + config.addrSpaceList = pParams->constructedFalconsTable[i].addrSpaceList; + config.registerBase = pParams->constructedFalconsTable[i].registerBase; + + status = objCreate(&pGpu->genericKernelFalcons[i], pGpu, GenericKernelFalcon, pGpu, &config); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create a GenericKernelFalcon object %d\n", i); + goto done; + } + } + + pGpu->numGenericKernelFalcons = pParams->numConstructedFalcons; + +done: + portMemFree(pParams); + if (status != NV_OK) + { + gpuDestroyGenericKernelFalconList(pGpu); + } + return status; +} + +void gpuDestroyGenericKernelFalconList_IMPL(OBJGPU *pGpu) +{ + NvU32 i; + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpu->genericKernelFalcons); i++) + { + if (pGpu->genericKernelFalcons[i] != NULL) + { + objDelete(pGpu->genericKernelFalcons[i]); + pGpu->genericKernelFalcons[i] = NULL; + } + } + pGpu->numGenericKernelFalcons = 0; +} + + +GenericKernelFalcon * +gpuGetGenericKernelFalconForEngine_IMPL +( + OBJGPU *pGpu, + ENGDESCRIPTOR engDesc +) +{ + NvU32 i; + for (i = 0; i < pGpu->numGenericKernelFalcons; i++) + { + KernelFalcon *pKernelFalcon = staticCast(pGpu->genericKernelFalcons[i], KernelFalcon); + if (pKernelFalcon->physEngDesc == engDesc) + return pGpu->genericKernelFalcons[i]; + } + return NULL; +} + +void gpuRegisterGenericKernelFalconIntrService_IMPL(OBJGPU *pGpu, void *pRecords) +{ + NvU32 i; + for (i = 0; i < pGpu->numGenericKernelFalcons; i++) + { + IntrService *pIntrService = staticCast(pGpu->genericKernelFalcons[i], IntrService); + if (pIntrService != NULL) + intrservRegisterIntrService(pGpu, pIntrService, pRecords); + } +} + +/** + * @brief Initializes iterator for ENGDESCRIPTOR load order + * + * @return GPU_CHILD_ITER + */ +static ENGLIST_ITER +gpuGetEngineOrderListIter(OBJGPU *pGpu, NvU32 flags) +{ + ENGLIST_ITER it = { 0 }; + it.flags = flags; + return it; +} + + +static const GPUCHILDPRESENT * +gpuFindChildPresent(const GPUCHILDPRESENT *pChildPresentList, NvU32 numChildPresent, NvU32 classId) +{ + NvU32 i; + + for (i = 0; i < numChildPresent; i++) + { + if (pChildPresentList[i].classId == classId) + return &pChildPresentList[i]; + } + + return NULL; +} + +/*! + * @brief Sanity checks on given gfid + * + * @param[in] pGpu OBJGPU pointer + * @param[in] gfid GFID to be validated + * @param[in] bInUse NV_TRUE if GFID is being set for use + */ +NV_STATUS +gpuSanityCheckGfid_IMPL(OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) +{ + // Error if pAllocatedGfids + if (pGpu->sriovState.pAllocatedGfids == NULL) + { + return NV_ERR_INVALID_ADDRESS; + } + + // Sanity check on GFID + if (gfid > pGpu->sriovState.maxGfid) + { + return NV_ERR_OUT_OF_RANGE; + } + else if((bInUse == NV_TRUE) && (pGpu->sriovState.pAllocatedGfids[gfid] == GFID_ALLOCATED)) + { + return NV_ERR_IN_USE; + } + else if((bInUse == NV_FALSE) && (pGpu->sriovState.pAllocatedGfids[gfid] == GFID_FREE)) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +/*! + * @brief Set/Unset bit in pAllocatedGfids + * + * @param[in] pGpu OBJGPU pointer + * @param[in] gfid GFID to be set/unset (Assumes GFID is sanity checked before calling this function) + * @param[in] bInUse NV_TRUE if GFID in use + */ +void +gpuSetGfidUsage_IMPL(OBJGPU *pGpu, NvU32 gfid, NvBool bInUse) +{ + NV_ASSERT_OR_RETURN_VOID(pGpu->sriovState.pAllocatedGfids != NULL); + + if (bInUse == NV_TRUE) + pGpu->sriovState.pAllocatedGfids[gfid] = GFID_ALLOCATED; + else + pGpu->sriovState.pAllocatedGfids[gfid] = GFID_FREE; +} + +/** + * @brief Iterates over the engine ordering list + * + * @param[in,out] pIt Iterator + * @param[out] pEngDesc The next engine descriptor + * + * @return NV_TRUE if *pEngDesc is valid, NV_FALSE if there are no more engines + */ +NvBool +gpuGetNextInEngineOrderList(OBJGPU *pGpu, ENGLIST_ITER *pIt, PENGDESCRIPTOR pEngDesc) +{ + NvBool bReverse = !!(pIt->flags & (GCO_LIST_UNLOAD | GCO_LIST_DESTROY)); + const GPUCHILDORDER *pChildOrderList; + NvU32 numChildOrder; + const GPUCHILDPRESENT *pChildPresentList; + NvU32 numChildPresent; + const GPUCHILDPRESENT *pCurChildPresent; + const GPUCHILDORDER *pCurChildOrder; + NvBool bAdvance = NV_FALSE; + + pChildOrderList = gpuGetChildrenOrder_HAL(pGpu, &numChildOrder); + pChildPresentList = gpuGetChildrenPresent_HAL(pGpu, &numChildPresent); + + if (!pIt->bStarted) + { + pIt->bStarted = NV_TRUE; + pIt->childOrderIndex = bReverse ? (NvS32)numChildOrder - 1 : 0; + } + + while (1) + { + if (bAdvance) + pIt->childOrderIndex += bReverse ? -1 : 1; + + if ((pIt->childOrderIndex >= (NvS32)numChildOrder) || (pIt->childOrderIndex < 0)) + return NV_FALSE; + + pCurChildOrder = &pChildOrderList[pIt->childOrderIndex]; + + if ((pCurChildOrder->flags & pIt->flags) != pIt->flags) + { + bAdvance = NV_TRUE; + continue; + } + + pCurChildPresent = gpuFindChildPresent(pChildPresentList, numChildPresent, pCurChildOrder->classId); + + if (!pCurChildPresent) + { + bAdvance = NV_TRUE; + continue; + } + + if (bAdvance) + { + pIt->instanceID = bReverse ? pCurChildPresent->instances - 1 : 0; + } + + if ((pIt->instanceID < (NvS32)pCurChildPresent->instances) && (pIt->instanceID >= 0)) + { + *pEngDesc = MKENGDESC(pCurChildOrder->classId, pIt->instanceID); + + pIt->instanceID += bReverse ? -1 : 1; + + return NV_TRUE; + } + + bAdvance = NV_TRUE; + } + + return NV_FALSE; +} + +/** + * Set SLI broadcast state in threadstate if SLI is enabled for the GPU + */ +void +gpuSetThreadBcState_IMPL(OBJGPU *pGpu, NvBool bcState) +{ + { + gpumgrSetBcEnabledStatus(pGpu, bcState); + } +} + + +NV_STATUS +gpuInitDispIpHal_IMPL +( + OBJGPU *pGpu, + NvU32 ipver +) +{ + RmHalspecOwner *pRmHalspecOwner = staticCast(pGpu, RmHalspecOwner); + DispIpHal *pDispIpHal = &pRmHalspecOwner->dispIpHal; + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + // + // 0xFFFFFFFF ipver value happens when Display engines is disabled. NVOC Disp IP + // halspec doesn't have a hal variant maps to this value. Convert it to DISPv0000. + // + if (ipver == 0xFFFFFFFF) + { + ipver = 0; + } + else if (ipver == 0x03010000) + { + // + // Display on GV100 has 0x0301 IP ver while it uses v0300 manuals. It is listed + // in disp.def IP_VERSIONS table as v03_00 since we added the chip. This wasn't a + // problem in chip-config as there it maps a range of IP ver to an implementation. + // Versions in "v0300 <= ipver < 0400" map to _v03_00 or lower IP version function. + // NVOC maps exact number but not range, thus we need to override the value when + // initializing halspec. + // + ipver = 0x03000000; + } + + __nvoc_init_halspec_DispIpHal(pDispIpHal, ipver & 0xFFFF0000); + + if ((ipver & 0xFFFF0000) != 0) + { + DispIpHal dispIpHalv00; + __nvoc_init_halspec_DispIpHal(&dispIpHalv00, 0); + + // + // At GPU creation time, dispIpHal.__nvoc_HalVarIdx is initialized with DISPv0000. + // Any valid non-zero IP version listed in halspec DispIpHal assigns __nvoc_HalVarIdx + // to different value. + // + // If dispIpHal.__nvoc_HalVarIdx keeps same idx of DISPv0000 for a non-zero ipver, + // this means the IP ver is not listed in halspec DispIpHal and should be fixed. + // + // NVOC-TODO : make __nvoc_init_halspec_DispIpHal return error code and remove the check + if (pDispIpHal->__nvoc_HalVarIdx == dispIpHalv00.__nvoc_HalVarIdx) + { + NV_PRINTF(LEVEL_ERROR, "Invalid dispIpHal.__nvoc_HalVarIdx %d for Disp IP Vertion 0x%08x\n", + pDispIpHal->__nvoc_HalVarIdx, ipver); + + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + } + + void __nvoc_init_funcTable_KernelDisplay(KernelDisplay *, RmHalspecOwner *); + __nvoc_init_funcTable_KernelDisplay(pKernelDisplay, pRmHalspecOwner); + + void __nvoc_init_funcTable_DisplayInstanceMemory(DisplayInstanceMemory *, RmHalspecOwner *); + __nvoc_init_funcTable_DisplayInstanceMemory(KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay), + pRmHalspecOwner); + + return NV_OK; +} + +/*! + * @brief: Initialize chip related info + * This function fills up the chip info structure of OBJGPU. + * + * @param[in] pGpu OBJGPU pointer + * + * @returns void + */ + +void +gpuInitChipInfo_IMPL +( + OBJGPU *pGpu +) +{ + // + // NOTE: Register access and DRF field splitting should generally always + // go in HAL functions, but PMC_BOOT_0 and PMC_BOOT_42 are an exception + // as these are guaranteed to remain the same across chips, since we use + // them to figure out which chip it is and how to wire up the HALs. + // + pGpu->chipInfo.pmcBoot0.impl = DRF_VAL(_PMC, _BOOT_0, _IMPLEMENTATION, pGpu->chipId0); + pGpu->chipInfo.pmcBoot0.arch = DRF_VAL(_PMC, _BOOT_0, _ARCHITECTURE, pGpu->chipId0) << GPU_ARCH_SHIFT; + pGpu->chipInfo.pmcBoot0.majorRev = DRF_VAL(_PMC, _BOOT_0, _MAJOR_REVISION, pGpu->chipId0); + pGpu->chipInfo.pmcBoot0.minorRev = DRF_VAL(_PMC, _BOOT_0, _MINOR_REVISION, pGpu->chipId0); + pGpu->chipInfo.pmcBoot0.minorExtRev = NV2080_CTRL_GPU_INFO_MINOR_REVISION_EXT_NONE; + pGpu->chipInfo.pmcBoot42.impl = DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, pGpu->chipId1); + pGpu->chipInfo.pmcBoot42.arch = DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, pGpu->chipId1) << GPU_ARCH_SHIFT; + pGpu->chipInfo.pmcBoot42.majorRev = DRF_VAL(_PMC, _BOOT_42, _MAJOR_REVISION, pGpu->chipId1); + pGpu->chipInfo.pmcBoot42.minorRev = DRF_VAL(_PMC, _BOOT_42, _MINOR_REVISION, pGpu->chipId1); + pGpu->chipInfo.pmcBoot42.minorExtRev = DRF_VAL(_PMC, _BOOT_42, _MINOR_EXTENDED_REVISION, pGpu->chipId1); + + // + // SOC do not use pmcBoot0/pmcBoot42 and instead write the impl details to + // these top level chipInfo fields, which is what the getters return. + // + pGpu->chipInfo.implementationId = pGpu->chipInfo.pmcBoot42.impl; + pGpu->chipInfo.platformId = pGpu->chipInfo.pmcBoot42.arch; +} + +/*! + * @brief: Returns physical address of end of DMA accessible range. + * + * @param[in] pGpu GPU object pointer + * + * @returns physical address of end of DMA accessible range + */ +RmPhysAddr +gpuGetDmaEndAddress_IMPL(OBJGPU *pGpu) +{ + NvU32 numPhysAddrBits = gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM); + RmPhysAddr dmaWindowStartAddr = gpuGetDmaStartAddress(pGpu); + + return dmaWindowStartAddr + (1ULL << numPhysAddrBits) - 1; +} + +void *gpuGetStaticInfo(OBJGPU *pGpu) +{ + + return NULL; +} + +void *gpuGetGspStaticInfo(OBJGPU *pGpu) +{ + return &(GPU_GET_KERNEL_GSP(pGpu)->gspStaticInfo); +} + +OBJRPC *gpuGetGspClientRpc(OBJGPU *pGpu) +{ + if (IS_GSP_CLIENT(pGpu)) + { + return GPU_GET_KERNEL_GSP(pGpu)->pRpc; + } + return NULL; +} + +OBJRPC *gpuGetVgpuRpc(OBJGPU *pGpu) +{ + return NULL; +} + +OBJRPC *gpuGetRpc(OBJGPU *pGpu) +{ + if (IS_VIRTUAL(pGpu)) + return gpuGetVgpuRpc(pGpu); + + if (IS_GSP_CLIENT(pGpu)) + return gpuGetGspClientRpc(pGpu); + + return NULL; +} + +/*! + * @brief: Check if system memory is accessible by GPU + * Dependent on NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS only exercised on Windows. + * + * @param[in] pGpu OBJGPU pointer + * + * @returns NvBool NV_TRUE is system memory is accessible, + * NV_FALSE otherwise + */ +NvBool +gpuCheckSysmemAccess_IMPL(OBJGPU* pGpu) +{ + return NV_TRUE; +} + +/*! + * @brief Read the pcie spec registers using config cycles + * + * @param[in] pGpu GPU object pointer + * @param[in] index Register offset in PCIe config space + * @param[out] pData Value of the register + * + * @returns NV_OK on success + */ +NV_STATUS +gpuReadBusConfigCycle_IMPL +( + OBJGPU *pGpu, + NvU32 index, + NvU32 *pData +) +{ + NvU32 domain = gpuGetDomain(pGpu); + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU8 function = 0; + + if (pGpu->hPci == NULL) + { + pGpu->hPci = osPciInitHandle(domain, bus, device, function, NULL, NULL); + } + + *pData = osPciReadDword(pGpu->hPci, index); + + return NV_OK; +} + +/*! + * @brief Write to pcie spec registers using config cycles + * + * @param[in] pGpu GPU object pointer + * @param[in] index Register offset in PCIe config space + * @param[in] value Write this value to the register + * + * @returns NV_OK on success + */ +NV_STATUS +gpuWriteBusConfigCycle_IMPL +( + OBJGPU *pGpu, + NvU32 index, + NvU32 value +) +{ + NvU32 domain = gpuGetDomain(pGpu); + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU8 function = 0; + + if (pGpu->hPci == NULL) + { + pGpu->hPci = osPciInitHandle(domain, bus, device, function, NULL, NULL); + } + + osPciWriteDword(pGpu->hPci, index, value); + + return NV_OK; +} + +NV_STATUS gpuGetCeFaultMethodBufferSize_KERNEL(OBJGPU *pGpu, NvU32 *size) +{ + NvU32 sz = pGpu->ceFaultMethodBufferSize; + NV_STATUS status = NV_OK; + + if (sz == 0) + status = kceGetFaultMethodBufferSize(pGpu, &sz); + + if (status == NV_OK) + *size = sz; + + return NV_OK; +} + +void gpuServiceInterruptsAllGpus_IMPL +( + OBJGPU *pGpu +) +{ + Intr *pIntr = GPU_GET_INTR(pGpu); + MC_ENGINE_BITVECTOR engines; + if (pIntr != NULL) + { + bitVectorSetAll(&engines); + intrServiceStallListAllGpusCond(pGpu, pIntr, &engines, NV_TRUE); + } +} diff --git a/src/nvidia/src/kernel/gpu/gpu_access.c b/src/nvidia/src/kernel/gpu/gpu_access.c new file mode 100644 index 000000000..b5c712c48 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_access.c @@ -0,0 +1,1779 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/gpu.h" +#include "kernel/diagnostics/journal.h" + +#include "core/thread_state.h" +#include "nv_ref.h" + +// Following enums are duplicated in 'apps/nvbucket/oca/ocarm.h'. +typedef enum { + BAD_READ_GPU_OFF_BUS = 1, + BAD_READ_LOW_POWER, + BAD_READ_PCI_DEVICE_DISABLED, + BAD_READ_GPU_RESET, + BAD_READ_DWORD_SHIFT, + BAD_READ_UNKNOWN, +} RMCD_BAD_READ_REASON; + +static NV_STATUS _allocGpuIODevice(GPU_IO_DEVICE **ppIODevice); +static void _gpuCleanRegisterFilterList(DEVICE_REGFILTER_INFO *); +static NvU32 _gpuHandleReadRegisterFilter(OBJGPU *, DEVICE_INDEX devIndex, NvU32 devInstance, NvU32 addr, NvU32 accessSize, NvU32 *pFlags, THREAD_STATE_NODE *pThreadState); +static void _gpuHandleWriteRegisterFilter(OBJGPU *, DEVICE_INDEX devIndex, NvU32 devInstance, NvU32 addr, NvU32 val, NvU32 accessSize, NvU32 *pFlags, THREAD_STATE_NODE *pThreadState); + +static void _gpuApertureWriteRegUnicast(OBJGPU *, IO_APERTURE *pAperture, NvU32 addr, NvV32 val, NvU32 size); +static NvU32 _gpuApertureReadReg(IO_APERTURE *pAperture, NvU32 addr, NvU32 size); + +static NvU8 _gpuApertureReadReg008(IO_APERTURE *a, NvU32 addr); +static NvU16 _gpuApertureReadReg016(IO_APERTURE *a, NvU32 addr); +static NvU32 _gpuApertureReadReg032(IO_APERTURE *a, NvU32 addr); +static void _gpuApertureWriteReg008(IO_APERTURE *a, NvU32 addr, NvV8 value); +static void _gpuApertureWriteReg016(IO_APERTURE *a, NvU32 addr, NvV16 value); +static void _gpuApertureWriteReg032(IO_APERTURE *a, NvU32 addr, NvV32 value); +static void _gpuApertureWriteReg032Unicast(IO_APERTURE *a, NvU32 addr, NvV32 value); +static NvBool _gpuApertureValidReg(IO_APERTURE *a, NvU32 addr); + +static REGISTER_FILTER * _findGpuRegisterFilter(DEVICE_INDEX devIndex, NvU32 devInstance, NvU32 addr, REGISTER_FILTER *); +static NV_STATUS _gpuInitIODeviceAndAperture(OBJGPU *, NvU32, NvU32, RmPhysAddr, NvU32); + +NV_STATUS +regAccessConstruct +( + RegisterAccess *pRegisterAccess, + OBJGPU *pGpu +) +{ + NV_STATUS rmStatus = NV_OK; + DEVICE_INDEX deviceIndex, minDeviceIndex, maxDeviceIndex; + + pRegisterAccess->pGpu = pGpu; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // DEVICE_INDEX_GPU aperture is of GPU, as Tegra SOC NvDisplay constructs + // display device IO aperture as part of objdisp construction so its safe to + // skip this function. + return NV_OK; + } + + // Check that GPU is the first device + ct_assert(DEVICE_INDEX_GPU == 0); + + minDeviceIndex = DEVICE_INDEX_GPU; + maxDeviceIndex = pGpu->bIsSOC ? (DEVICE_INDEX_MAX - 1) : (DEVICE_INDEX_GPU); + + for (deviceIndex = minDeviceIndex; deviceIndex <= maxDeviceIndex; deviceIndex++) + { + // Initialize IO Device and Aperture + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, 0); + if (pMapping != NULL) + { + rmStatus = _gpuInitIODeviceAndAperture(pGpu, deviceIndex, + pMapping->gpuDeviceEnum, + pMapping->gpuNvPAddr, + pMapping->gpuNvLength); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to initialize pGpu IO device/aperture for deviceIndex=%d.\n", deviceIndex); + return rmStatus; + } + } + } + + return rmStatus; +} + +void +regAccessDestruct +( + RegisterAccess *pRegisterAccess +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + DEVICE_INDEX deviceIndex; + NvU32 mappingNum; + IO_APERTURE *pIOAperture; + REGISTER_FILTER *pNode; + + // Ignore attempt to destruct a not-fully-constructed RegisterAccess + if (pGpu == NULL) + { + return; + } + + for (deviceIndex = 0; deviceIndex < DEVICE_INDEX_MAX; deviceIndex++) + { + pIOAperture = pGpu->pIOApertures[deviceIndex]; + if (pIOAperture != NULL) + { + portMemFree(pIOAperture->pDevice); + ioaccessDestroyIOAperture(pIOAperture); + } + } + + for (mappingNum = 0; mappingNum < pGpu->gpuDeviceMapCount; mappingNum++) + { + // Device-specific register filter list + NV_ASSERT(!pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterList); + if (NULL != pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterLock) + { + portSyncSpinlockDestroy(pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterLock); + pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterLock = NULL; + } + + while (pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterRecycleList) + { + pNode = pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterRecycleList; + + pGpu->deviceMappings[mappingNum].devRegFilterInfo.pRegFilterRecycleList = pNode->pNext; + portMemFree(pNode); + } + } +} + +/*! + * @brief Allocates GPU_IO_DEVICE object + * + * @param[in] ppIODevice Pointer to uninitialized GPU_IO_DEVICE + */ +static NV_STATUS +_allocGpuIODevice +( + GPU_IO_DEVICE **ppIODevice +) +{ + GPU_IO_DEVICE *pDevice; + + pDevice = portMemAllocNonPaged(sizeof(GPU_IO_DEVICE)); + if (pDevice == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "memory allocation failed for GPU IO Device\n"); + DBG_BREAKPOINT(); + return NV_ERR_NO_MEMORY; + } + + portMemSet(pDevice, 0, sizeof(GPU_IO_DEVICE)); + + *ppIODevice = pDevice; + + return NV_OK; +} + + +// +// The following register I/O functions are organized into two groups; +// a multi-chip unaware group and a multi-chip aware group. +// The multi-chip aware group of register I/O functions is also split +// into two groups; one that really does multi-chip logic and another +// that has the same interface but doesn't do any of the multi-chip +// logic. +// +// In the interests of performance, the determination as to whether +// multi-chip logic is necessary is done at two levels; the upper-level +// functions use 'MC' register I/O macros where multi-chip considerations +// are required, and when the 'MC' register I/O macros are used they +// call through GPU object pointers that are polymorphic - they contain +// pointers to one of the two groups of multi-chip aware functions +// depending on whether the multi-chip condition actually exists. +// This avoids a run-time SLI LOOP call. +// +static void +_regWriteUnicast +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvU32 val, + NvU32 size, + THREAD_STATE_NODE *pThreadState +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + NvU32 flags = 0; + NV_STATUS status; + DEVICE_MAPPING *pMapping; + + pRegisterAccess->regWriteCount++; + + pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, instance); + if (pMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for reg %x, deviceIndex=0x%x instance=%d\n", + addr, deviceIndex, instance); + NV_ASSERT(0); + return; + } + + status = gpuSanityCheckRegisterAccess(pGpu, addr, NULL); + if (status != NV_OK) + { + return; + } + + _gpuHandleWriteRegisterFilter(pGpu, deviceIndex, instance, addr, val, size, &flags, pThreadState); + + if (!(flags & REGISTER_FILTER_FLAGS_WRITE)) + { + switch (size) + { + case 8: + osDevWriteReg008(pGpu, pMapping, addr, 0xFFU & (val)); + break; + case 16: + osDevWriteReg016(pGpu, pMapping, addr, 0xFFFFU & (val)); + break; + case 32: + osDevWriteReg032(pGpu, pMapping, addr, val); + break; + } + } +} + +static void +_gpuApertureWriteRegUnicast +( + OBJGPU *pGpu, + IO_APERTURE *pAperture, + NvU32 addr, + NvV32 val, + NvU32 size +) +{ + NV_ASSERT_OR_RETURN_VOID(pAperture); + NV_ASSERT_OR_RETURN_VOID(pAperture->pDevice); + + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*) pAperture->pDevice; + NvU32 deviceIndex = pDevice->deviceIndex; + NvU32 instance = pDevice->instance; + NvU32 regAddr = pAperture->baseAddress + addr; + NvU32 flags = 0; + NV_STATUS status; + THREAD_STATE_NODE *pThreadState; + DEVICE_MAPPING *pMapping; + + pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, instance); + + if (pMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for reg %x, deviceIndex=0x%x instance=%d\n", + regAddr, deviceIndex, instance); + NV_ASSERT(0); + return; + } + + status = gpuSanityCheckRegisterAccess(pGpu, regAddr, NULL); + if (status != NV_OK) + { + return; + } + + threadStateGetCurrentUnchecked(&pThreadState, pGpu); + + _gpuHandleWriteRegisterFilter(pGpu, deviceIndex, instance, regAddr, + val, size, &flags, pThreadState); + + if (!(flags & REGISTER_FILTER_FLAGS_WRITE)) + { + switch (size) + { + case 8: + osDevWriteReg008(pGpu, pMapping, regAddr, 0xFFU & (val)); + break; + case 16: + osDevWriteReg016(pGpu, pMapping, regAddr, 0xFFFFU & (val)); + break; + case 32: + osDevWriteReg032(pGpu, pMapping, regAddr, val); + break; + } + } +} + +void +_gpuApertureWriteReg008 +( + IO_APERTURE *pAperture, + NvU32 addr, + NvV8 val +) +{ + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*)pAperture->pDevice; + OBJGPU *pGpu = pDevice->pGpu; + + // + // NOTE: The SLI loop below reuses pAperture's values across all iterations + // OBJGPU's apertures are initialized to have the same baseAddress and length + // on all GPU device instances, so reusing the aperture here is fine. + // Device-specific instances are obtained via gpuGetDeviceMapping in the SLI loop. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _gpuApertureWriteRegUnicast(pGpu, pAperture, addr, val, 8 /* size */); + SLI_LOOP_END; +} + +void +_gpuApertureWriteReg016 +( + IO_APERTURE *pAperture, + NvU32 addr, + NvV16 val +) +{ + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*)pAperture->pDevice; + OBJGPU *pGpu = pDevice->pGpu; + + // + // NOTE: The SLI loop below reuses pAperture's values across all iterations + // OBJGPU's apertures are initialized to have the same baseAddress and length + // on all GPU device instances, so reusing the aperture here is fine. + // Device-specific instances are obtained via gpuGetDeviceMapping in the SLI loop. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _gpuApertureWriteRegUnicast(pGpu, pAperture, addr, val, 16 /* size */); + SLI_LOOP_END; +} + +void +_gpuApertureWriteReg032 +( + IO_APERTURE *pAperture, + NvU32 addr, + NvV32 val +) +{ + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*)pAperture->pDevice; + OBJGPU *pGpu = pDevice->pGpu; + + // + // NOTE: The SLI loop below reuses pAperture's values across all iterations + // OBJGPU's apertures are initialized to have the same baseAddress and length + // on all GPU device instances, so reusing the aperture here is fine. + // Device-specific instances are obtained via gpuGetDeviceMapping in the SLI loop. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _gpuApertureWriteRegUnicast(pGpu, pAperture, addr, val, 32 /* size */); + SLI_LOOP_END; +} + +void +_gpuApertureWriteReg032Unicast +( + IO_APERTURE *pAperture, + NvU32 addr, + NvV32 val +) +{ + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*)pAperture->pDevice; + OBJGPU *pGpu = pDevice->pGpu; + + _gpuApertureWriteRegUnicast(pGpu, pAperture, addr, val, 32 /* size */); +} + +void +regWrite008 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV8 val +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _regWriteUnicast(GPU_GET_REGISTER_ACCESS(pGpu), deviceIndex, instance, addr, val, 8, NULL); + SLI_LOOP_END; +} +void +regWrite016 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV16 val +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + _regWriteUnicast(GPU_GET_REGISTER_ACCESS(pGpu), deviceIndex, instance, addr, val, 16, NULL); + SLI_LOOP_END; +} + +void +regWrite032 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV32 val, + THREAD_STATE_NODE *pThreadState +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + regWrite032Unicast(GPU_GET_REGISTER_ACCESS(pGpu), deviceIndex, instance, addr, val, pThreadState); + SLI_LOOP_END +} + +void +regWrite032Unicast +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvV32 val, + THREAD_STATE_NODE *pThreadState +) +{ + + _regWriteUnicast(pRegisterAccess, deviceIndex, instance, addr, val, 32, pThreadState); +} + +static NvU32 +_gpuApertureReadReg +( + IO_APERTURE *pAperture, + NvU32 addr, + NvU32 size +) +{ + NV_ASSERT_OR_RETURN(pAperture, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pAperture->pDevice, NV_ERR_INVALID_ARGUMENT); + + NvU32 flags = 0; + NvU32 returnValue = 0; + GPU_IO_DEVICE *pDevice = (GPU_IO_DEVICE*) pAperture->pDevice; + OBJGPU *pGpu = pDevice->pGpu; + NV_STATUS status = NV_OK; + NvU32 regAddr = pAperture->baseAddress + addr; + NvU32 deviceIndex = pDevice->deviceIndex; + NvU32 instance = pDevice->instance; + THREAD_STATE_NODE *pThreadState; + + pGpu->registerAccess.regReadCount++; + + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, instance); + if (!pMapping) + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for reg %x, deviceIndex=0x%x instance=%d\n", + regAddr, deviceIndex, instance); + NV_ASSERT(0); + return 0xd0d0d0d0U; + } + + status = gpuSanityCheckRegisterAccess(pGpu, regAddr, NULL); + if (status != NV_OK) + { + return (~0); + } + + threadStateGetCurrentUnchecked(&pThreadState, pGpu); + + returnValue = _gpuHandleReadRegisterFilter(pGpu, deviceIndex, instance, + regAddr, size, &flags, pThreadState); + + if (!(flags & REGISTER_FILTER_FLAGS_READ)) + { + switch (size) + { + case 8: + returnValue = osDevReadReg008(pGpu, pMapping, regAddr); + break; + case 16: + returnValue = osDevReadReg016(pGpu, pMapping, regAddr); + break; + case 32: + returnValue = osDevReadReg032(pGpu, pMapping, regAddr); + break; + } + } + + // Make sure the value read is sane before we party on it. + gpuSanityCheckRegRead(pGpu, regAddr, size, &returnValue); + + return returnValue; +} + +NvU8 +_gpuApertureReadReg008 +( + IO_APERTURE *pAperture, + NvU32 addr +) +{ + return (NvU8) _gpuApertureReadReg(pAperture, addr, 8 /* size */); +} + +static NvU16 +_gpuApertureReadReg016 +( + IO_APERTURE *pAperture, + NvU32 addr +) +{ + return (NvU16) _gpuApertureReadReg(pAperture, addr, 16 /* size */); +} + +static NvU32 +_gpuApertureReadReg032 +( + IO_APERTURE *pAperture, + NvU32 addr + +) +{ + return _gpuApertureReadReg(pAperture, addr, 32 /* size */); +} + +/*! + * Checks if the register address is valid for a particular aperture + * + * @param[in] pAperture IO_APERTURE pointer + * @param[in] addr register address + * + * @returns NV_TRUE Register offset is valid + */ +static NvBool +_gpuApertureValidReg +( + IO_APERTURE *pAperture, + NvU32 addr +) +{ + NV_ASSERT_OR_RETURN(pAperture != NULL, NV_FALSE); + + return addr < pAperture->length; +} + +static NvU32 +_regRead +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + NvU32 size, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 flags = 0; + NvU32 returnValue = 0; + OBJGPU *pGpu = pRegisterAccess->pGpu; + DEVICE_MAPPING *pMapping; + NV_STATUS status = NV_OK; + + pRegisterAccess->regReadCount++; + + pMapping = gpuGetDeviceMapping(pGpu, deviceIndex, instance); + if (pMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for reg %x, deviceIndex=0x%x instance=%d\n", + addr, deviceIndex, instance); + NV_ASSERT(0); + return 0xd0d0d0d0; + } + + if ((size == 32) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_BUGCHECK_CALLBACK_ROUTINE)) + { + return osDevReadReg032(pGpu, pMapping, addr); + } + + status = gpuSanityCheckRegisterAccess(pGpu, addr, &returnValue); + if (status != NV_OK) + return returnValue; + + returnValue = _gpuHandleReadRegisterFilter(pGpu, deviceIndex, instance, + addr, size, &flags, pThreadState); + + if (!(flags & REGISTER_FILTER_FLAGS_READ)) + { + switch (size) + { + case 8: + returnValue = osDevReadReg008(pGpu, pMapping, addr); + break; + case 16: + returnValue = osDevReadReg016(pGpu, pMapping, addr); + break; + case 32: + returnValue = osDevReadReg032(pGpu, pMapping, addr); + break; + } + } + + // Make sure the value read is sane before we party on it. + gpuSanityCheckRegRead(pGpu, addr, size, &returnValue); + + return returnValue; +} + +NvU8 +regRead008 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr +) +{ + return _regRead(pRegisterAccess, deviceIndex, instance, addr, 8, NULL); +} + +NvU16 +regRead016 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr +) +{ + return _regRead(pRegisterAccess, deviceIndex, instance, addr, 16, NULL); +} + +/*! + * This function is used for converting do-while read register constructs in RM to + * equivalent PMU sequencer handling. The idea is to construct seq instruction + * which polls on a field in the given register. + * + * @param[in] pRegisterAccess RegisterAccess object pointer + * @param[in] deviceIndex deviceIndex + * @param[in] addr register address + * @param[in] mask required mask for the field + * @param[in] val value to poll for + * + * @returns NV_OK if val is found + * NV_ERR_TIMEOUT if val is not found within timeout limit + */ +NV_STATUS +regRead032_AndPoll +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 addr, + NvU32 mask, + NvU32 val +) +{ + RMTIMEOUT timeout; + OBJGPU *pGpu = pRegisterAccess->pGpu; + NvU32 data = 0; + NV_STATUS status = NV_OK; + + { + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + + do + { + data = GPU_REG_RD32(pGpu, addr); + + if ((data & mask) == val) + { + status = NV_OK; + break; + } + + // Loosen this loop + osSpinLoop(); + + status = gpuCheckTimeout(pGpu, &timeout); + } while (status != NV_ERR_TIMEOUT); + } + + return status; +} + +NvU32 +regRead032 +( + RegisterAccess *pRegisterAccess, + DEVICE_INDEX deviceIndex, + NvU32 instance, + NvU32 addr, + THREAD_STATE_NODE *pThreadState +) +{ + if (pRegisterAccess == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + return _regRead(pRegisterAccess, deviceIndex, instance, addr, 32, pThreadState); +} + +/*! + * @brief Allocates and initializes GPU_IO_DEVICE and IO Aperture. + * + * @param pGpu + * @param[in] deviceIndex DEVICE_INDEX enum value for identifying device type + * @param[in] gpuDeviceEnum Device ID NV_DEVID_* + * @param[in] gpuNvPAddr Physical Base Address + * @param[in] gpuNvLength Length of Aperture + * + * @return NV_OK if IO Aperture is successfully initialized, error otherwise. + */ +static NV_STATUS +_gpuInitIODeviceAndAperture +( + OBJGPU *pGpu, + NvU32 deviceIndex, + NvU32 gpuDeviceEnum, + RmPhysAddr gpuNvPAddr, + NvU32 gpuNvLength +) +{ + NV_STATUS rmStatus; + GPU_IO_DEVICE *pIODevice = NULL; + + // Initialize GPU IO Device + rmStatus = _allocGpuIODevice(&pIODevice); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to initialize pGpu IO device for devIdx %d.\n", + deviceIndex); + + return rmStatus; + } + + pIODevice->pGpu = pGpu; + pIODevice->deviceIndex = deviceIndex; + pIODevice->gpuDeviceEnum = gpuDeviceEnum; + pIODevice->gpuNvPAddr = gpuNvPAddr; + pIODevice->gpuNvLength = gpuNvLength; + pIODevice->refCount = 0; + + // GPU register operations are always on instance 0 + pIODevice->instance = 0; + + // Initialize register functions in IO_DEVICE + pIODevice->parent.pReadReg008Fn = (ReadReg008Fn*) &_gpuApertureReadReg008; + pIODevice->parent.pReadReg016Fn = (ReadReg016Fn*) &_gpuApertureReadReg016; + pIODevice->parent.pReadReg032Fn = (ReadReg032Fn*) &_gpuApertureReadReg032; + pIODevice->parent.pWriteReg008Fn = (WriteReg008Fn*) &_gpuApertureWriteReg008; + pIODevice->parent.pWriteReg016Fn = (WriteReg016Fn*) &_gpuApertureWriteReg016; + pIODevice->parent.pWriteReg032Fn = (WriteReg032Fn*) &_gpuApertureWriteReg032; + pIODevice->parent.pWriteReg032UcFn = (WriteReg032Fn*) &_gpuApertureWriteReg032Unicast; + pIODevice->parent.pValidRegFn = (ValidRegFn*) &_gpuApertureValidReg; + + rmStatus = ioaccessCreateIOAperture(&pGpu->pIOApertures[deviceIndex], + NULL, // no parent aperture + (IO_DEVICE*) pIODevice, + 0, gpuNvLength); // offset, length + if (rmStatus != NV_OK) + { + portMemFree(pIODevice); + + NV_PRINTF(LEVEL_ERROR, + "Failed to initialize pGpu IO aperture for devIdx %d.\n", + deviceIndex); + + return rmStatus; + } + + return NV_OK; +} + + +NV_STATUS +regAddRegisterFilter +( + RegisterAccess *pRegisterAccess, + NvU32 flags, + DEVICE_INDEX devIndex, NvU32 devInstance, + NvU32 rangeStart, NvU32 rangeEnd, + GpuWriteRegCallback pWriteCallback, + GpuReadRegCallback pReadCallback, + void *pParam, + REGISTER_FILTER **ppFilter +) +{ + DEVICE_REGFILTER_INFO *pRegFilter; + REGISTER_FILTER *pNode; + REGISTER_FILTER *pTmpNode; + DEVICE_MAPPING *pMapping; + + NV_ASSERT_OR_RETURN(devIndex < DEVICE_INDEX_MAX, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pRegisterAccess != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(ppFilter != NULL, NV_ERR_INVALID_ARGUMENT); + + // Get the device filter + pMapping = gpuGetDeviceMapping(pRegisterAccess->pGpu, devIndex, devInstance); + NV_ASSERT_OR_RETURN(pMapping != NULL, NV_ERR_INVALID_ARGUMENT); + + pRegFilter = &pMapping->devRegFilterInfo; + + if (!pWriteCallback && !pReadCallback) + { + // At least one register callback needs to be passed. + NV_PRINTF(LEVEL_ERROR, + "Need to specify at least one callback function.\n"); + + return NV_ERR_NOT_SUPPORTED; + } + + NV_ASSERT(!(flags & REGISTER_FILTER_FLAGS_INVALID)); + + if ((flags & REGISTER_FILTER_FLAGS_READ) && !pReadCallback) + { + // If REGISTER_FILTER_FLAGS_READ is specified, then a read + // callback must also be specified. + NV_PRINTF(LEVEL_ERROR, + "REGISTER_FILTER_FLAGS_READ requires a read callback function.\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + if ((flags & REGISTER_FILTER_FLAGS_WRITE) && !pWriteCallback) + { + // If REGISTER_FILTER_FLAGS_WRITE is specified, then a write + // callback must also be specified. + NV_PRINTF(LEVEL_ERROR, + "REGISTER_FILTER_FLAGS_WRITE requires a write callback function.\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + // If the regfilter hasn't been used yet, then allocate a lock + if (NULL == pRegFilter->pRegFilterLock) + { + // Allocate spinlock for reg filter access + pRegFilter->pRegFilterLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + NV_ASSERT_OR_RETURN(pRegFilter->pRegFilterLock != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + } + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + + if (NULL != pRegFilter->pRegFilterRecycleList) + { + pNode = pRegFilter->pRegFilterRecycleList; + pRegFilter->pRegFilterRecycleList = pNode->pNext; + } + else + { + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + pNode = portMemAllocNonPaged(sizeof(REGISTER_FILTER)); + if (NULL == pNode) + { + return NV_ERR_NO_MEMORY; + } + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + } + + // Print a warning if there's another register filter already registered. + if (((pTmpNode = _findGpuRegisterFilter(devIndex, devInstance, rangeStart, pRegFilter->pRegFilterList)) != NULL) || + ((pTmpNode = _findGpuRegisterFilter(devIndex, devInstance, rangeEnd, pRegFilter->pRegFilterList)) != NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "WARNING!! Previously registered reg filter found. Handle: %p, dev: " + "%d(%d) Range : 0x%x - 0x%x, WR/RD Callback: %p/%p, flags : %x\n", + pTmpNode, pTmpNode->devIndex, pTmpNode->devInstance, + pTmpNode->rangeStart, pTmpNode->rangeEnd, + pTmpNode->pWriteCallback, pTmpNode->pReadCallback, + pTmpNode->flags); + } + + // Populate structure + pNode->flags = flags; + pNode->devIndex = devIndex; + pNode->devInstance = devInstance; + pNode->rangeStart = rangeStart; + pNode->rangeEnd = rangeEnd; + pNode->pWriteCallback = pWriteCallback; + pNode->pReadCallback = pReadCallback; + pNode->pParam = pParam; + + // Link in + pNode->pNext = pRegFilter->pRegFilterList; + pRegFilter->pRegFilterList = pNode; + + // return pNode + *ppFilter = pNode; + + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return NV_OK; +} + +void +regRemoveRegisterFilter +( + RegisterAccess *pRegisterAccess, + REGISTER_FILTER *pFilter +) +{ + REGISTER_FILTER *pNode; + REGISTER_FILTER *pPrev = NULL; + REGISTER_FILTER *pNext = NULL; + DEVICE_REGFILTER_INFO *pRegFilter; + DEVICE_MAPPING *pMapping; + + // Get the device filter + pMapping = gpuGetDeviceMapping(pRegisterAccess->pGpu, pFilter->devIndex, pFilter->devInstance); + NV_ASSERT_OR_RETURN_VOID(pMapping != NULL); + + pRegFilter = &pMapping->devRegFilterInfo; + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pNode = pRegFilter->pRegFilterList; + while (pNode) + { + // + // we could have used a doubly linked list to do a quick removal, but + // iterating the list to find the match serves as sanity test, so let's + // stick with a singly linked list. + // + if (pNode == pFilter) + { + if (pRegFilter->regFilterRefCnt > 0) + { + // defer removal if another thread is working on the list + pNode->flags |= REGISTER_FILTER_FLAGS_INVALID; + pRegFilter->bRegFilterNeedRemove = NV_TRUE; + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return; + } + + // Unlink + pNext = pNode->pNext; + + // place on recycle list + pNode->pNext = pRegFilter->pRegFilterRecycleList; + pRegFilter->pRegFilterRecycleList = pNode; + + if (pPrev) + { + pPrev->pNext = pNext; + } + else + { + pRegFilter->pRegFilterList = pNext; + } + + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return; + } + + pPrev = pNode; + pNode = pNode->pNext; + } + NV_ASSERT_FAILED("Attempted to remove a nonexistent filter"); + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); +} + +// called with lock held +static void +_gpuCleanRegisterFilterList +( + DEVICE_REGFILTER_INFO *pRegFilter +) +{ + REGISTER_FILTER *pNode = pRegFilter->pRegFilterList; + REGISTER_FILTER *pPrev = NULL; + REGISTER_FILTER *pNext = NULL; + + while (pNode) + { + if (pNode->flags & REGISTER_FILTER_FLAGS_INVALID) + { + // Unlink + pNext = pNode->pNext; + + // place on recycle list + pNode->pNext = pRegFilter->pRegFilterRecycleList; + pRegFilter->pRegFilterRecycleList = pNode; + + if (pPrev) + { + pPrev->pNext = pNext; + } + else + { + pRegFilter->pRegFilterList = pNext; + } + + pNode = pNext; + continue; + } + + pPrev = pNode; + pNode = pNode->pNext; + } +} + +static NvU32 +_gpuHandleReadRegisterFilter +( + OBJGPU *pGpu, + DEVICE_INDEX devIndex, + NvU32 devInstance, + NvU32 addr, + NvU32 accessSize, + NvU32 *pFlags, + THREAD_STATE_NODE *pThreadState +) +{ + REGISTER_FILTER *pFilter; + NvU32 returnValue = 0; + NvU32 tempVal = 0; + DEVICE_REGFILTER_INFO *pRegFilter; + DEVICE_MAPPING *pMapping; + + // Get the device filter + pMapping = gpuGetDeviceMapping(pGpu, devIndex, devInstance); + NV_ASSERT_OR_RETURN(pMapping != NULL, returnValue); + + pRegFilter = &pMapping->devRegFilterInfo; + + // if there is no filter, do nothing. just bail out. + if (pRegFilter->pRegFilterList == NULL) + { + return returnValue; + } + + if (pThreadState != NULL) + { + // Filters should be only used with GPU lock is held. + if (pThreadState->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + return returnValue; + } + } +#ifdef DEBUG + else + { + THREAD_STATE_NODE *pCurThread; + + if (NV_OK == threadStateGetCurrentUnchecked(&pCurThread, pGpu)) + { + // Filters should be only used with GPU lock is held. + // Assert because ISRs are expected to pass threadstate down the stack. + // Don't bale out to keep release and debug path behavior identical. + if (pCurThread->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + NV_ASSERT(0); + } + } + } +#endif + + // + // NOTE: we can't simply grab the lock and release it after + // the search since it is not safe to assume that + // callbacks can be called with spinlock held + // + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt++; + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + + // + // Note there is potential thread race condition where a filter may be + // being added or removed in one thread (dispatch) while another thread + // is searching the list. This search should have a lock in place. + // + pFilter = pRegFilter->pRegFilterList; + while ((pFilter) && (pFilter = _findGpuRegisterFilter(devIndex, devInstance, addr, pFilter))) + { + if (pFilter->pReadCallback) + { + tempVal = pFilter->pReadCallback(pGpu, pFilter->pParam, addr, + accessSize, *pFlags); + // + // if there are multiple filters, we use the last filter found to + // save returnValue + // + if (pFilter->flags & REGISTER_FILTER_FLAGS_READ) + { + returnValue = tempVal; + } + } + *pFlags |= pFilter->flags; + pFilter = pFilter->pNext; + } + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt--; + if (pRegFilter->regFilterRefCnt == 0 && pRegFilter->bRegFilterNeedRemove) + { + // no other thread can be touching the list. remove invalid entries + _gpuCleanRegisterFilterList(pRegFilter); + pRegFilter->bRegFilterNeedRemove = NV_FALSE; + } + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + return returnValue; +} + +static void +_gpuHandleWriteRegisterFilter +( + OBJGPU *pGpu, + DEVICE_INDEX devIndex, + NvU32 devInstance, + NvU32 addr, + NvU32 val, + NvU32 accessSize, + NvU32 *pFlags, + THREAD_STATE_NODE *pThreadState +) +{ + REGISTER_FILTER *pFilter; + DEVICE_REGFILTER_INFO *pRegFilter; + DEVICE_MAPPING *pMapping; + + // Get the device filter + pMapping = gpuGetDeviceMapping(pGpu, devIndex, devInstance); + NV_ASSERT_OR_RETURN_VOID(pMapping != NULL); + + pRegFilter = &pMapping->devRegFilterInfo; + + // if there is no filter, do nothing. just bail out. + if (pRegFilter->pRegFilterList == NULL) + { + return; + } + + if (pThreadState != NULL) + { + // Filters should be only used with GPU lock is held. + if (pThreadState->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + return; + } + } +#ifdef DEBUG + else + { + THREAD_STATE_NODE *pCurThread; + + if (NV_OK == threadStateGetCurrentUnchecked(&pCurThread, pGpu)) + { + // Filters should be only used with GPU lock is held. + // Assert because ISRs are expected to pass threadstate down the stack. + // Don't bale out to keep release and debug path behavior identical. + if (pCurThread->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + NV_ASSERT(0); + } + } + } +#endif + + // + // NOTE: we can't simply grab the lock and release it after + // the search since it is not safe to assume that + // callbacks can be called with spinlock held + // + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt++; + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); + + // + // Note there is potential thread race condition where a filter may be + // being added or removed in one thread (dispatch) while another thread + // is searching the list. This search should have a lock in place. + // + pFilter = pRegFilter->pRegFilterList; + while ((pFilter) && (pFilter = _findGpuRegisterFilter(devIndex, devInstance, addr, pFilter))) + { + if (pFilter->pWriteCallback) + { + pFilter->pWriteCallback(pGpu, pFilter->pParam, addr, val, + accessSize, *pFlags); + } + *pFlags |= pFilter->flags; + pFilter = pFilter->pNext; + } + + portSyncSpinlockAcquire(pRegFilter->pRegFilterLock); + pRegFilter->regFilterRefCnt--; + if (pRegFilter->regFilterRefCnt == 0 && pRegFilter->bRegFilterNeedRemove) + { + // no other thread can be touching the list. remove invalid entries + _gpuCleanRegisterFilterList(pRegFilter); + pRegFilter->bRegFilterNeedRemove = NV_FALSE; + } + portSyncSpinlockRelease(pRegFilter->pRegFilterLock); +} + +static REGISTER_FILTER * +_findGpuRegisterFilter +( + DEVICE_INDEX devIndex, + NvU32 devInstance, + NvU32 addr, + REGISTER_FILTER *pFilter +) +{ + while (pFilter != NULL) + { + if (!(pFilter->flags & REGISTER_FILTER_FLAGS_INVALID) && + (devIndex == pFilter->devIndex) && + (devInstance == pFilter->devInstance) && + (addr >= pFilter->rangeStart) && (addr <= pFilter->rangeEnd)) + { + break; + } + + pFilter = pFilter->pNext; + } + + return pFilter; +} + +static NvBool +_gpuEnablePciMemSpaceAndCheckPmcBoot0Match +( + OBJGPU *pGpu +) +{ + NvU16 VendorId; + NvU16 DeviceId; + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU32 domain = gpuGetDomain(pGpu); + void *Handle = osPciInitHandle(domain, bus, device, 0, &VendorId, &DeviceId); + NvU32 Enabled = osPciReadDword(Handle, NV_CONFIG_PCI_NV_1); + NvU32 pmcBoot0; + + // If Memory Spaced is not enabled, enable it + if (DRF_VAL(_CONFIG, _PCI_NV_1, _MEMORY_SPACE, Enabled) != NV_CONFIG_PCI_NV_1_MEMORY_SPACE_ENABLED) + { + osPciWriteDword(Handle, NV_CONFIG_PCI_NV_1, + Enabled | + (DRF_DEF(_CONFIG, _PCI_NV_1, _MEMORY_SPACE, _ENABLED) | + DRF_DEF(_CONFIG, _PCI_NV_1, _BUS_MASTER, _ENABLED))); + } + + // Check PMC_ENABLE to make sure that it matches + pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + if (pmcBoot0 == pGpu->chipId0) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +static NvU32 +_regCheckReadFailure +( + OBJGPU *pGpu, + NvU32 value +) +{ + NvU32 flagsFailed; + NvU32 reason = BAD_READ_UNKNOWN; + + if ((!pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH)) && + (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST))) + { + gpuSanityCheck(pGpu, GPU_SANITY_CHECK_FLAGS_ALL, &flagsFailed); + + // This is where we need to determine why we might be seeing this failure + if (value == GPU_REG_VALUE_INVALID) + { + // Does PCI Space Match + if (flagsFailed & GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH) + { + reason = BAD_READ_GPU_OFF_BUS; + goto exit; + } + + // Is Memory Spaced Enabled + if (flagsFailed & GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED) + { + reason = BAD_READ_PCI_DEVICE_DISABLED; + + if (!_gpuEnablePciMemSpaceAndCheckPmcBoot0Match(pGpu)) + { + // We have been reset! + reason = BAD_READ_GPU_RESET; + goto exit; + } + } + } + + // Are we off by N + if (flagsFailed & GPU_SANITY_CHECK_FLAGS_OFF_BY_N) + { + reason = BAD_READ_DWORD_SHIFT; + } + } + else + { + reason = BAD_READ_LOW_POWER; + } + +exit: + return reason; +} + +void +regCheckAndLogReadFailure +( + RegisterAccess *pRegisterAccess, + NvU32 addr, + NvU32 mask, + NvU32 value +) +{ + OBJGPU *pGpu = pRegisterAccess->pGpu; + const NvU32 failureReason = _regCheckReadFailure(pGpu, value); + PRmRC2BadRead2_RECORD pBadRead = NULL; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // Record a Journal Entry about this failure + if (rcdbAllocNextJournalRec(SYS_GET_RCDB(pSys), + (NVCD_RECORD **)&pBadRead, + RmGroup, + RmBadRead_V2, + sizeof *pBadRead) == NV_OK) + { + rcdbSetCommonJournalRecord(pGpu, &pBadRead->common); + pBadRead->MemorySpace = MEMORY_BAR0; + pBadRead->Offset = addr; + pBadRead->Mask = mask; + pBadRead->Value = value; + pBadRead->Reason = failureReason; + + // We are seeing some misreads in DVS runs. Adding this so that we can get + // stack traces of why this is happening + if ((NV_DEBUG_BREAK_ATTRIBUTES_CRASH) & + DRF_VAL(_DEBUG, _BREAK, _ATTRIBUTES, pSys->debugFlags)) + { + osBugCheck(OS_BUG_CHECK_BUGCODE_INTERNAL_TEST); + } + } + + PORT_UNREFERENCED_VARIABLE(failureReason); +} + +NvU32 +regCheckRead032 +( + RegisterAccess *pRegisterAccess, + NvU32 addr, + NvU32 mask, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 returnValue; + OBJGPU *pGpu = pRegisterAccess->pGpu; + + returnValue = GPU_REG_RD32_EX(pGpu, addr, pThreadState); + if (returnValue & mask) + { + if (!API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + regCheckAndLogReadFailure(pRegisterAccess, addr, mask, returnValue); + returnValue = 0; + } + + return returnValue; +} + +#if GPU_REGISTER_ACCESS_DUMP + +NvU8 +gpuRegRd08_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr) +{ + NvU8 val = REG_INST_RD08(pGpu, GPU, 0, addr); + // filter out duplicate read + static NvU32 prev_addr = 0; + static NvU8 prev_val = 0; + if (addr != prev_addr || val != prev_val) + { + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "READ func: %s, reg name: %s, addr: %08x, val: %02x\n", + func, addrStr, addr, val); + } + prev_addr = addr; + prev_val = val; + } + return val; +} + +NvU16 +gpuRegRd16_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr) +{ + NvU16 val = REG_INST_RD16(pGpu, GPU, 0, addr); + // filter out duplicate read + static NvU32 prev_addr = 0; + static NvU16 prev_val = 0; + if (addr != prev_addr || val != prev_val) + { + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "READ func: %s, reg name: %s, addr: %08x, val: %04x\n", + func, addrStr, addr, val); + } + prev_addr = addr; + prev_val = val; + } + return val; +} + +NvU32 +gpuRegRd32_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr) +{ + NvU32 val = REG_INST_RD32(pGpu, GPU, 0, addr); + // filter out duplicate read + static NvU32 prev_addr = 0; + static NvU32 prev_val = 0; + if (addr != prev_addr || val != prev_val) + { + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "READ %s func: %s, reg name: %s, addr: %08x, val: %08x\n", + vreg, func, addrStr, addr, val); + } + prev_addr = addr; + prev_val = val; + } + return val; +} + +void +gpuRegWr08_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV8 val) +{ + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE func: %s, reg name: %s, addr: %08x, val: %02x\n", + func, addrStr, addr, val); + } + REG_INST_WR08(pGpu, GPU, 0, addr, val); +} + +void +gpuRegWr16_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV16 val) +{ + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE func: %s, reg name: %s, addr: %08x, val: %04x\n", + func, addrStr, addr, val); + } + REG_INST_WR16(pGpu, GPU, 0, addr, val); +} + +void +gpuRegWr32_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV32 val) +{ + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE %s func: %s, reg name: %s, addr: %08x, val: %08x\n", + vreg, func, addrStr, addr, val); + } + REG_INST_WR32(pGpu, GPU, 0, addr, val); +} + +void +gpuRegWr32Uc_dumpinfo(const char *func, const char *addrStr, const char *vreg, OBJGPU *pGpu, NvU32 addr, NvV32 val) +{ + // filter out bar0 windows registers (NV_PRAMIN – range 0x007FFFFF:0x00700000 ) + if ((addr & 0xFFF00000) != 0x00700000) + { + NV_PRINTF(LEVEL_NOTICE, + "WRITE func: %s, reg name: %s, addr: %08x, val: %08x\n", + func, addrStr, addr, val); + } + REG_INST_WR32_UC(pGpu, GPU, 0, addr, val); +} + +#endif // GPU_REGISTER_ACCESS_DUMP + +/*! + * @brief Do any sanity checks for the GPU's state before actually reading/writing to the chip. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] addr Address of the register to be sanity checked + * @param[out] pRetVal Default return value for read accesses incase of sanity check failure. Only for U032 hals. + * + * @returns NV_ERR_GPU_IN_FULLCHIP_RESET if GPU is in reset + * NV_ERR_GPU_IS_LOST if GPU is inaccessible + * NV_ERR_GPU_NOT_FULL_POWER if GPU is not at full power AND + * GPU is not in resume codepath + * sim low power reg access is disabled + * NV_OK Otherwise + */ +NV_STATUS +gpuSanityCheckRegisterAccess_IMPL +( + OBJGPU *pGpu, + NvU32 addr, + NvU32 *pRetVal +) +{ + NV_STATUS status = NV_OK; + NvU32 retVal = ~0; + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + status = NV_ERR_GPU_IN_FULLCHIP_RESET; + goto done; + } + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + status = NV_ERR_GPU_IS_LOST; + goto done; + } + + if ((status = gpuSanityCheckVirtRegAccess_HAL(pGpu, addr)) != NV_OK) + { + // Return 0 to match with HW behavior + retVal = 0; + goto done; + } + + // + // Make sure the GPU is in full power or resuming. When the OS has put the + // GPU in suspend (i.e. any of the D3 variants) there's no guarantee the GPU is + // accessible over PCI-E: the GPU may be completely powered off, the + // upstream bridges may not be properly configured, etc. Attempts to access + // the GPU may then result in PCI-E errors and/or bugchecks. For examples, + // see Bugs 440565 and 479003. + // On Mshybrid, the OS will make sure we are up and alive before calling + // into the driver. So we can skip this check on MsHybrid. + // + // DO NOT IGNORE OR REMOVE THIS ASSERT. It is a warning that improperly + // written RM code further up the stack is trying to access a GPU which is + // in suspend (i.e. low power). Any entry points into the RM (especially + // those between GPUs or for asynchronous callbacks) should always check + // that the GPU is in full power via gpuIsGpuFullPower(), bailing out in the + // appropriate manner when it returns NV_FALSE. + // + // If you are not an RM engineer and are encountering this assert, please + // file a bug against the RM. + // + if ((gpuIsGpuFullPower(pGpu) == NV_FALSE) && + !IS_GPU_GC6_STATE_ENTERING(pGpu) && + !(IS_GPU_GC6_STATE_ENTERED(pGpu) + ) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_MSHYBRID_GC6_ACTIVE) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_ENABLE_REG_ACCESS_IN_LOW_POWER_FOR_SIM_SRTEST) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + DBG_BREAKPOINT(); + status = NV_ERR_GPU_NOT_FULL_POWER; + goto done; + } + + // TODO: More complete sanity checking + +done: + // Assign the return value + if ((status != NV_OK) && (pRetVal != NULL)) + { + *pRetVal = retVal; + } + return status; +} + +/** + * @brief checks if the register offset is valid + * + * @param[in] pGpu + * @param[in] offset + * + * @returns NV_OK if valid + * @returns NV_ERR_INVALID_ARGUMENT if offset is too large for bar + * @returns NV_ERR_INSUFFICIENT_PERMISSIONS if user is not authorized to access register + */ +NV_STATUS +gpuValidateRegOffset_IMPL +( + OBJGPU *pGpu, + NvU32 offset +) +{ + NvU64 maxBar0Size = pGpu->deviceMappings[0].gpuNvLength; + + // The register offset should be 4 bytes smaller than the max bar size + if (offset > (maxBar0Size - 4)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (!osIsAdministrator() && + !gpuGetUserRegisterAccessPermissions(pGpu, offset)) + { + NV_PRINTF(LEVEL_ERROR, + "User does not have permission to access register offset 0x%x\n", + offset); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} + +/*! + * @brief Verify existence function. + * + * @param[in] pGpu + * + * @returns NV_OK if GPU is still accessible + * NV_ERR_INVALID_STATE if GPU is inaccessible + */ +NV_STATUS +gpuVerifyExistence_IMPL +( + OBJGPU *pGpu +) +{ + NvU32 regVal = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + + if (regVal != pGpu->chipId0) + { + osHandleGpuLost(pGpu); + regVal = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + if (regVal != pGpu->chipId0) + { + return NV_ERR_GPU_IS_LOST; + } + } + + return NV_OK; +} + +/*! + * @brief Perform a sanity check on a register read value + * Starts with gpu-independent check, then calls into HAL for specific cases + * + * @param[in] pGpu GPU object pointer + * @param[in] addr Value address + * @param[in] size Access size + * @param[in/out] pValue Value to sanity check + */ +NV_STATUS +gpuSanityCheckRegRead_IMPL +( + OBJGPU *pGpu, + NvU32 addr, + NvU32 size, + void *pValue +) +{ + NvU8 *pValue8; + NvU16 *pValue16; + NvU32 *pValue32; + NvU32 value; + + switch (size) + { + case 8: + { + pValue8 = ((NvU8 *) pValue); + if (*pValue8 == (NvU8) (~0)) + { + // + // The result looks suspicious, let's check if the GPU is still attached. + // + NvU32 testValue = osGpuReadReg032(pGpu, NV_PMC_BOOT_0); + if (testValue == GPU_REG_VALUE_INVALID) + { + osHandleGpuLost(pGpu); + *pValue8 = osGpuReadReg008(pGpu, addr); + } + } + break; + } + case 16: + { + pValue16 = ((NvU16 *) pValue); + if (*pValue16 == (NvU16) (~0)) + { + // + // The result looks suspicious, let's check if the GPU is still attached. + // + NvU32 testValue = osGpuReadReg032(pGpu, NV_PMC_BOOT_0); + if (testValue == GPU_REG_VALUE_INVALID) + { + osHandleGpuLost(pGpu); + *pValue16 = osGpuReadReg016(pGpu, addr); + } + } + break; + } + case 32: + { + pValue32 = ((NvU32 *) pValue); + if (*pValue32 == (NvU32) (~0)) + { + // + // The result looks suspicious, let's check if the GPU is still attached. + // + NvU32 testValue = osGpuReadReg032(pGpu, NV_PMC_BOOT_0); + if (testValue == GPU_REG_VALUE_INVALID) + { + osHandleGpuLost(pGpu); + *pValue32 = osGpuReadReg032(pGpu, addr); + } + } + + value = *((NvU32 *)pValue); + + // + // HW will return 0xbad in the upper 3 nibbles + // when there is a possible issue. + // + if ((value & GPU_READ_PRI_ERROR_MASK) == GPU_READ_PRI_ERROR_CODE) + { + gpuHandleSanityCheckRegReadError_HAL(pGpu, addr, value); + } + break; + } + default: + { + NV_ASSERT_FAILED("Invalid access size"); + break; + } + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_device_mapping.c b/src/nvidia/src/kernel/gpu/gpu_device_mapping.c new file mode 100644 index 000000000..7adaa44b7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_device_mapping.c @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu/gpu_device_mapping.h" +#include "core/thread_state.h" +#include "nv_ref.h" + +/** + * @brief Finds the device mapping matching the specified address and device index + * + * @param[in] pGpu + * @param[in] deviceIndex device specific device enum (DEVICE_INDEX_*) + * @param[in] addr device register address + * + * @returns matching mapping, or NULL if not found. + */ +static DEVICE_MAPPING * +_gpuFindDeviceMapping +( + OBJGPU *pGpu, + DEVICE_INDEX deviceIndex, + NvU32 instance +) +{ + NvU32 i; + NvU32 devId = 0; + DEVICE_ID_MAPPING *deviceIdMapping; + NvU32 numDeviceIDs; + + numDeviceIDs = gpuGetDeviceIDList_HAL(pGpu, &deviceIdMapping); + + // Find the devID that matches the requested device index + for (i = 0; i < numDeviceIDs; i++) + { + if (deviceIdMapping[i].deviceIndex == deviceIndex) + { + devId = deviceIdMapping[i].devId; + break; + } + } + + if (devId == 0) + { + // For discrete GPU, just return BAR0 mapping + if (deviceIndex == DEVICE_INDEX_GPU) + { + return &pGpu->deviceMappings[0]; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Could not find mapping for deviceIndex=%d\n", + deviceIndex); + return NULL; + } + } + return gpuGetDeviceMappingFromDeviceID(pGpu, devId, instance); +} + +DEVICE_MAPPING * +gpuGetDeviceMapping_IMPL +( + OBJGPU *pGpu, + DEVICE_INDEX deviceIndex, + NvU32 instance +) +{ + // Fast lookup path for first instance of a device + if ((deviceIndex < DEVICE_INDEX_MAX) && (instance == 0)) + { + if (!pGpu->pDeviceMappingsByDeviceInstance[deviceIndex]) + { + pGpu->pDeviceMappingsByDeviceInstance[deviceIndex] = _gpuFindDeviceMapping(pGpu, deviceIndex, instance); + } + return pGpu->pDeviceMappingsByDeviceInstance[deviceIndex]; + } + + return _gpuFindDeviceMapping(pGpu, deviceIndex, instance); +} + +/** + * @brief Returns the device mapping matching the specified device ID from + * project relocation table + * + * @param[in] pGpu OBJGPU pointer + * @param[in] deviceId device ID from project relocation table + * @param[in] instance instance of the particular device ID + * + * @returns matching mapping, or NULL if not found. + */ + +DEVICE_MAPPING * +gpuGetDeviceMappingFromDeviceID_IMPL +( + OBJGPU *pGpu, + NvU32 deviceId, + NvU32 instance +) +{ + NvU32 i; + + // + // For SOC, walk the list of devices to find the device/instance requested. + // For GPU (legacy), only NV_DEVID_GPU(0) is expected & allowed + // + if (pGpu->bIsSOC) + { + for (i = 0; i < pGpu->gpuDeviceMapCount; i++) + { + if (pGpu->deviceMappings[i].gpuDeviceEnum == deviceId) + { + // Find the Nth instance of the requested device + if (instance) + instance--; + else + return &pGpu->deviceMappings[i]; + } + } + + NV_PRINTF(LEVEL_ERROR, "Could not find mapping for deviceId=%d\n", + deviceId); + } + else + { + // For GPU, always assume NV_DEVID_GPU instance 0. + NV_ASSERT(instance == 0); + NV_ASSERT(pGpu->gpuDeviceMapCount == 1); + + return &pGpu->deviceMappings[0]; + } + + return NULL; +} + +static NvBool _gpuCheckIsBar0OffByN(OBJGPU *pGpu) +{ + NvU32 i, pmcBoot0; + + // Check to see if we can find PMC_BOOT_0 + for (i = 0; i < 20; i++) + { + pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0 + (i * 4)); + if (pmcBoot0 == pGpu->chipId0) + { + break; + } + } + + if ((i != 0) && (i != 20)) + { + // We are off by N + return NV_TRUE; + } + + // Everything looks ok + return NV_FALSE; +} + +static NvBool _gpuCheckDoesPciSpaceMatch(OBJGPU *pGpu) +{ + NvU16 VendorId; + NvU16 DeviceId; + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU32 domain = gpuGetDomain(pGpu); + + osPciInitHandle(domain, bus, device, 0, &VendorId, &DeviceId); + if ((DeviceId == 0xFFFF) || + (VendorId != 0x10DE)) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +static NvBool _gpuCheckIsPciMemSpaceEnabled(OBJGPU *pGpu) +{ + NvU16 VendorId; + NvU16 DeviceId; + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU32 domain = gpuGetDomain(pGpu); + void *Handle = osPciInitHandle(domain, bus, device, 0, &VendorId, &DeviceId); + NvU32 Enabled = osPciReadDword(Handle, NV_CONFIG_PCI_NV_1); + + // Is Memory Spaced Enabled + if (DRF_VAL(_CONFIG, _PCI_NV_1, _MEMORY_SPACE, Enabled) != NV_CONFIG_PCI_NV_1_MEMORY_SPACE_ENABLED) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NV_STATUS gpuSanityCheck_IMPL +( + OBJGPU *pGpu, + NvU32 flags, + NvU32 *pFlagsFailed +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 flagsFailed = GPU_SANITY_CHECK_FLAGS_NONE; + THREAD_STATE_NODE *pThreadNode = NULL; + + if (pFlagsFailed != NULL) + { + *pFlagsFailed = GPU_SANITY_CHECK_FLAGS_NONE; + } + + if (pGpu->bIsSOC) + { + flags &= ~( + GPU_SANITY_CHECK_FLAGS_BOOT_0 | + GPU_SANITY_CHECK_FLAGS_OFF_BY_N | + GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH | + GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED | + GPU_SANITY_CHECK_FLAGS_FB); + + } + + // + // Check to make sure the lock is held for this thread as the underlying + // functions can touch state and lists that expect exclusive access. + // + rmStatus = threadStateGetCurrent(&pThreadNode, pGpu); + if (rmStatus != NV_OK) + { + return rmStatus; + } + if (pThreadNode->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS) + { + return NV_ERR_NOT_SUPPORTED; + } + + // Check to make sure we are powered on first + if (gpuIsGpuFullPower(pGpu) == NV_FALSE) + { + NV_ASSERT(0); + return NV_ERR_GPU_NOT_FULL_POWER; + } + + if (flags & GPU_SANITY_CHECK_FLAGS_BOOT_0) + { + // + // When GPU is in reset reg reads will return 0xFFFFFFFF. + // Without this check RM would keep hitting assert during TDR recovery. + // + if (!API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + NvU32 pmcBoot0 = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + if (pmcBoot0 != pGpu->chipId0) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_BOOT_0; + NV_ASSERT(0); + } + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_OFF_BY_N) + { + if (_gpuCheckIsBar0OffByN(pGpu)) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_OFF_BY_N; + NV_ASSERT(0); + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH) + { + if (!_gpuCheckDoesPciSpaceMatch(pGpu)) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_PCI_SPACE_MATCH; + NV_ASSERT(0); + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED) + { + if (!_gpuCheckIsPciMemSpaceEnabled(pGpu)) + { + flagsFailed |= GPU_SANITY_CHECK_FLAGS_PCI_MEM_SPACE_ENABLED; + NV_ASSERT(0); + } + } + + if (flags & GPU_SANITY_CHECK_FLAGS_FB) + { + if (!gpuIsGpuFullPower(pGpu)) + { + NV_ASSERT(0); + } + } + + if (flagsFailed != GPU_SANITY_CHECK_FLAGS_NONE) + { + rmStatus = NV_ERR_GENERIC; + NV_PRINTF(LEVEL_ERROR, "Failed test flags: 0x%x\n", flagsFailed); + } + + if (pFlagsFailed != NULL) + { + *pFlagsFailed = flagsFailed; + } + + return rmStatus; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_gspclient.c b/src/nvidia/src/kernel/gpu/gpu_gspclient.c new file mode 100644 index 000000000..d757b723b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_gspclient.c @@ -0,0 +1,333 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief GSP Client (CPU RM) specific GPU routines reside in this file. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "ctrl/ctrl2080.h" + +#include "gpu/gr/kernel_graphics_manager.h" + +#include "gpu/gsp/kernel_gsp.h" + +/*! + * @brief Determines if the GPU has INTERNAL SKU FUSE parts by checking the GSP + * static info + * + * @param[in] pGpu OBJGPU pointer + * + * @returns NV_TRUE if the GPU has INTERNAL SKU FUSE parts, NV_FALSE otherwise. + */ +NvBool +gpuIsInternalSku_FWCLIENT +( + OBJGPU *pGpu +) +{ + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + + return pGSCI->bGpuInternalSku; +} + +NV_STATUS +gpuInitSriov_FWCLIENT +( + OBJGPU *pGpu +) +{ + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + NvU32 totalPcieFns = 0; + + pGpu->sriovState.totalVFs = pGSCI->sriovCaps.totalVFs; + pGpu->sriovState.firstVFOffset = pGSCI->sriovCaps.firstVfOffset; + pGpu->sriovState.firstVFBarAddress[0] = pGSCI->sriovCaps.FirstVFBar0Address; + pGpu->sriovState.firstVFBarAddress[1] = pGSCI->sriovCaps.FirstVFBar1Address; + pGpu->sriovState.firstVFBarAddress[2] = pGSCI->sriovCaps.FirstVFBar2Address; + pGpu->sriovState.vfBarSize[0] = pGSCI->sriovCaps.bar0Size; + pGpu->sriovState.vfBarSize[1] = pGSCI->sriovCaps.bar1Size; + pGpu->sriovState.vfBarSize[2] = pGSCI->sriovCaps.bar2Size; + pGpu->sriovState.b64bitVFBar0 = pGSCI->sriovCaps.b64bitBar0; + pGpu->sriovState.b64bitVFBar1 = pGSCI->sriovCaps.b64bitBar1; + pGpu->sriovState.b64bitVFBar2 = pGSCI->sriovCaps.b64bitBar2; + + pGpu->sriovState.maxGfid = pGSCI->sriovMaxGfid; + + // note: pGpu->sriovState.virtualRegPhysOffset is initialized separately + + // owned by physical RM, so leave uninitialized + pGpu->sriovState.pP2PInfo = NULL; + pGpu->sriovState.bP2PAllocated = NV_FALSE; + pGpu->sriovState.maxP2pGfid = 0; + + // Include Physical function that occupies GFID 0 + totalPcieFns = pGpu->sriovState.totalVFs + 1; + + pGpu->sriovState.pAllocatedGfids = portMemAllocNonPaged(totalPcieFns); + + if (pGpu->sriovState.pAllocatedGfids == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Memory allocation failed for GFID tracking\n"); + DBG_BREAKPOINT(); + return NV_ERR_NO_MEMORY; + } + + portMemSet(pGpu->sriovState.pAllocatedGfids, 0, totalPcieFns); + pGpu->sriovState.maxGfid = pGpu->sriovState.totalVFs; + + // Set GFID 0 in use. + gpuSetGfidUsage(pGpu, GPU_GFID_PF, NV_TRUE); + + return NV_OK; +} + +NV_STATUS +gpuDeinitSriov_FWCLIENT +( + OBJGPU *pGpu +) +{ + if (pGpu->sriovState.pAllocatedGfids != NULL) + { + // Clear GFID 0 usage. + gpuSetGfidUsage(pGpu, GPU_GFID_PF, NV_FALSE); + + portMemFree(pGpu->sriovState.pAllocatedGfids); + pGpu->sriovState.pAllocatedGfids = NULL; + } + + return NV_OK; +} + +NvBool +gpuCheckPageRetirementSupport_GSPCLIENT +( + OBJGPU *pGpu +) +{ + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + return pGSCI->bPageRetirementSupported; +} + +void gpuInitBranding_FWCLIENT(OBJGPU *pGpu) +{ + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + + pGpu->bIsQuadro = pGSCI->bIsQuadroAd || pGSCI->bIsQuadroGeneric; + pGpu->bIsNvidiaNvs = pGSCI->bIsNvidiaNvs; + pGpu->bIsVgx = pGSCI->bIsVgx; + pGpu->bGeforceSmb = pGSCI->bGeforceSmb; + pGpu->bIsTitan = pGSCI->bIsTitan; + pGpu->bIsTesla = pGSCI->bIsTesla; + + pGpu->bIsGeforce = !(pGpu->bIsQuadro || pGpu->bIsTesla || pGpu->bIsNvidiaNvs); +} + +BRANDING_TYPE gpuDetectBranding_FWCLIENT(OBJGPU *pGpu) +{ + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + + if (pGSCI->bIsQuadroGeneric) + return BRANDING_TYPE_QUADRO_GENERIC; + if (pGSCI->bIsQuadroAd) + return BRANDING_TYPE_QUADRO_AD; + if (pGSCI->bIsNvidiaNvs) + return BRANDING_TYPE_NVS_NVIDIA; + + return BRANDING_TYPE_NONE; +} + +COMPUTE_BRANDING_TYPE +gpuDetectComputeBranding_FWCLIENT(OBJGPU *pGpu) +{ + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + return pGSCI->computeBranding; +} + +BRANDING_TYPE +gpuDetectVgxBranding_FWCLIENT(OBJGPU *pGpu) +{ + return pGpu->bIsVgx ? BRANDING_TYPE_VGX : BRANDING_TYPE_NONE; +} + +NV_STATUS +gpuGenGidData_FWCLIENT +( + OBJGPU *pGpu, + NvU8 *pGidData, + NvU32 gidSize, + NvU32 gidFlags +) +{ + if (FLD_TEST_DRF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1, gidFlags)) + { + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + portMemCopy(pGidData, RM_SHA1_GID_SIZE, pGSCI->gidInfo.data, RM_SHA1_GID_SIZE); + return NV_OK; + } + return NV_ERR_NOT_SUPPORTED; +} + +NvU32 gpuGetActiveFBIOs_FWCLIENT(OBJGPU *pGpu) +{ + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + return pGSCI->fbio_mask; +} + +NvBool gpuIsGlobalPoisonFuseEnabled_FWCLIENT(OBJGPU *pGpu) +{ + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + return pGSCI->poisonFuseEnabled; +} + +/*! + * @brief These functions are used on CPU RM when pGpu is a GSP client. + * Data is fetched from GSP using subdeviceCtrlCmdInternalGetChipInfo and cached, + * then retrieved through the internal gpuGetChipInfo. + * + * Functions either return value directly, or through a second [out] param, depending + * on the underlying function. + * + * @param[in] pGpu + */ +NvU8 +gpuGetChipSubRev_FWCLIENT +( + OBJGPU *pGpu +) +{ + const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo = gpuGetChipInfo(pGpu); + NV_ASSERT_OR_RETURN(pChipInfo != NULL, 0); + + return pChipInfo->chipSubRev; +} + +NvU32 +gpuGetEmulationRev1_FWCLIENT +( + OBJGPU *pGpu +) +{ + const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo = gpuGetChipInfo(pGpu); + NV_ASSERT_OR_RETURN(pChipInfo != NULL, 0); + + return pChipInfo->emulationRev1; +} + +NV_STATUS +gpuConstructDeviceInfoTable_FWCLIENT +( + OBJGPU *pGpu +) +{ + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_INTERNAL_GET_DEVICE_INFO_TABLE_PARAMS *pParams; + const NvU32 cmd = NV2080_CTRL_CMD_INTERNAL_GET_DEVICE_INFO_TABLE; + + if (pGpu->pDeviceInfoTable) // already initialized + return NV_OK; + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + NV_ASSERT_OR_RETURN(pParams != NULL, NV_ERR_NO_MEMORY); + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + cmd, pParams, sizeof(*pParams)); + if (status != NV_OK) + goto done; + + if (pParams->numEntries == 0) + goto done; + + pGpu->pDeviceInfoTable = portMemAllocNonPaged(pParams->numEntries * sizeof(DEVICE_INFO2_TABLE)); + NV_ASSERT_TRUE_OR_GOTO(status, pGpu->pDeviceInfoTable != NULL, NV_ERR_NO_MEMORY, done); + + pGpu->numDeviceInfoEntries = pParams->numEntries; + portMemCopy(pGpu->pDeviceInfoTable, pGpu->numDeviceInfoEntries * sizeof(DEVICE_INFO2_TABLE), + pParams->deviceInfoTable, pParams->numEntries * sizeof(DEVICE_INFO2_TABLE)); + +done: + portMemFree(pParams); + return status; +} + +NvU32 +gpuGetLitterValues_FWCLIENT +( + OBJGPU *pGpu, + NvU32 index +) +{ + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + const NV2080_CTRL_INTERNAL_STATIC_GR_INFO *pGrInfo; + NvU32 i; + + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized, 0); + pGrInfo = pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo; + NV_ASSERT_OR_RETURN(pGrInfo != NULL, 0); + + for (i = 0; i < NV_ARRAY_ELEMENTS32(pGrInfo->infoList); i++) + { + if (pGrInfo->infoList[i].index == index) + return pGrInfo->infoList[i].data; + } + return 0; +} + +NV_STATUS +gpuGetRegBaseOffset_FWCLIENT +( + OBJGPU *pGpu, + NvU32 regBase, + NvU32 *pOffset +) +{ + const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo = gpuGetChipInfo(pGpu); + NV_ASSERT_OR_RETURN(pChipInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(regBase < NV_ARRAY_ELEMENTS(pChipInfo->regBases), NV_ERR_NOT_SUPPORTED); + + if (pChipInfo->regBases[regBase] != 0xFFFFFFFF) + { + *pOffset = pChipInfo->regBases[regBase]; + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NvU32 +gpuReadBAR1Size_FWCLIENT +( + OBJGPU *pGpu +) +{ + const NV2080_CTRL_INTERNAL_GPU_GET_CHIP_INFO_PARAMS *pChipInfo = gpuGetChipInfo(pGpu); + NV_ASSERT_OR_RETURN(pChipInfo != NULL, 0); + + return pChipInfo->bar1Size; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_name_kernel.c b/src/nvidia/src/kernel/gpu/gpu_name_kernel.c new file mode 100644 index 000000000..55ed9f0d2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_name_kernel.c @@ -0,0 +1,76 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" + +NV_STATUS +gpuGetNameString_KERNEL +( + OBJGPU *pGpu, + NvU32 type, + void *nameStringBuffer +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_NAME_STRING, ¶ms, sizeof(params))); + + if (type == NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII) + { + portMemCopy(nameStringBuffer, sizeof(params.gpuNameString.ascii), + params.gpuNameString.ascii, sizeof(params.gpuNameString.ascii)); + } + else + { + portMemCopy(nameStringBuffer, sizeof(params.gpuNameString.unicode), + params.gpuNameString.unicode, sizeof(params.gpuNameString.unicode)); + } + + return NV_OK; +} + +NV_STATUS +gpuGetShortNameString_KERNEL +( + OBJGPU *pGpu, + NvU8 *nameStringBuffer +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_GPU_GET_SHORT_NAME_STRING_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_GET_SHORT_NAME_STRING, ¶ms, sizeof(params))); + + portMemCopy(nameStringBuffer, sizeof(params.gpuShortNameString), + params.gpuShortNameString, sizeof(params.gpuShortNameString)); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_protobuf.c b/src/nvidia/src/kernel/gpu/gpu_protobuf.c new file mode 100644 index 000000000..0ef27fa11 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_protobuf.c @@ -0,0 +1,142 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" + +#include "diagnostics/nv_debug_dump.h" + +#include "lib/protobuf/prb_util.h" +#include "g_nvdebug_pb.h" + +// +// Routine to dump gpu engine common fields +// +static +NV_STATUS +_gpuDumpEngine_CommonFields +( + OBJGPU *pGpu, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState +) +{ + NV_STATUS rmStatus = NV_OK; + + prbEncAddUInt32(pPrbEnc, + NVDEBUG_ENG_GPU_GPU_ID, + pGpu->gpuId); + + prbEncAddBool(pPrbEnc, + NVDEBUG_ENG_GPU_IS_SLI, + IsSLIEnabled(pGpu)); + + prbEncAddBool(pPrbEnc, + NVDEBUG_ENG_GPU_IS_NOTEBOOK, + IsMobile(pGpu)); + + prbEncAddBool(pPrbEnc, + NVDEBUG_ENG_GPU_IS_VIRTUAL, + IS_VIRTUAL(pGpu)); + + prbEncAddBool(pPrbEnc, + NVDEBUG_ENG_GPU_IS_FULL_POWER, + gpuIsGpuFullPower(pGpu)); + + prbEncAddBool(pPrbEnc, + NVDEBUG_ENG_GPU_IS_IN_FULLCHIP_RESET, + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET)); + + prbEncAddBool(pPrbEnc, + NVDEBUG_ENG_GPU_IS_IN_SEC_BUS_RESET, + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_SECONDARY_BUS_RESET)); + + prbEncAddBool(pPrbEnc, + NVDEBUG_ENG_GPU_IS_IN_GC6_RESET, + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_GC6_RESET)); + + prbEncAddBool(pPrbEnc, + NVDEBUG_ENG_GPU_IS_SUSPENDED, + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH)); + + prbEncAddBool(pPrbEnc, + NVDEBUG_ENG_GPU_IS_LOST, + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST)); + + prbEncAddBool(pPrbEnc, + NVDEBUG_ENG_GPU_IS_ACCESSIBLE, + pNvDumpState->bGpuAccessible); + + return rmStatus; +} + +// +// Routine to dump gpu engine debug info +// +static +NV_STATUS +_gpuDumpEngineFunc +( + OBJGPU *pGpu, + PRB_ENCODER *pPrbEnc, + NVD_STATE *pNvDumpState, + void *pvData +) +{ + NV_STATUS nvStatus = NV_OK; + NvU8 startingDepth = prbEncNestingLevel(pPrbEnc); + + // Dump basic GPU info for all error types. + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_GPUINFO_ENG_GPU)); + + // Dump common fields. + NV_CHECK_OK(nvStatus, LEVEL_ERROR, + _gpuDumpEngine_CommonFields(pGpu, pPrbEnc, pNvDumpState)); + + // Unwind the protobuf to the correct depth. + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(nvStatus, LEVEL_ERROR, + prbEncUnwindNesting(pPrbEnc, startingDepth)); + + return nvStatus; +} + +void +gpuDumpCallbackRegister_IMPL +( + OBJGPU *pGpu +) +{ + NvDebugDump *pNvd = GPU_GET_NVD(pGpu); + if (pNvd != NULL) + { + nvdEngineSignUp(pGpu, + pNvd, + _gpuDumpEngineFunc, + NVDUMP_COMPONENT_ENG_GPU, + REF_DEF(NVD_ENGINE_FLAGS_PRIORITY, _MED) | + REF_DEF(NVD_ENGINE_FLAGS_SOURCE, _GSP), + (void *)pGpu); + } +} + diff --git a/src/nvidia/src/kernel/gpu/gpu_register_access_map.c b/src/nvidia/src/kernel/gpu/gpu_register_access_map.c new file mode 100644 index 000000000..2d8474597 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_register_access_map.c @@ -0,0 +1,353 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "core/core.h" +#include "gpu/gpu.h" +#include "os/os.h" +#include "lib/base_utils.h" +#include "lib/zlib/inflate.h" +#include "nvRmReg.h" + +/** + * @brief Changes the user-space permissions for a given register address range + * + * @param pGpu + * @param[in] offset byte address of register address range start + * @param[in] size size in bytes of register address range + * @param[in] bAllow whether or not to allow register access from user space + * + * @return NV_OK if success, error otherwise + */ +NV_STATUS +gpuSetUserRegisterAccessPermissions_IMPL(OBJGPU *pGpu, NvU32 offset, NvU32 size, NvBool bAllow) +{ + NvU32 mapSize = pGpu->userRegisterAccessMapSize * 8; // total number of bits + NvU32 bitOffset; + NvU32 bitSize; + + NV_ASSERT_OR_RETURN(pGpu->pUserRegisterAccessMap != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN((offset & 3) == 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((size & 3) == 0, NV_ERR_INVALID_ARGUMENT); + + NV_PRINTF(LEVEL_INFO, "%sllowing access to 0x%x-0x%x\n", + (bAllow ? "A" : "Disa"), offset, (offset + size - 1)); + + NV_PRINTF(LEVEL_INFO, "Byte 0x%x Bit 0x%x through Byte 0x%x Bit 0x%x\n", + offset / 4 / 8, offset / 4 % 8, (offset + size) / 4 / 8, + (offset + size - 1) / 4 % 8); + + bitOffset = offset/sizeof(NvU32); + bitSize = size/sizeof(NvU32); + + NV_ASSERT_OR_RETURN(bitOffset < mapSize, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((bitOffset+bitSize) <= mapSize, NV_ERR_INVALID_ARGUMENT); + + // Deal with bits up to first byte. + for (; bitOffset%8 != 0 && bitSize; bitOffset++, bitSize--) + { + nvBitFieldSet((NvU32*) pGpu->pUserRegisterAccessMap, + pGpu->userRegisterAccessMapSize / sizeof(NvU32), + bitOffset, bAllow); + } + + if (!bitSize) + return NV_OK; + + // Deal with any full bytes. + portMemSet(pGpu->pUserRegisterAccessMap + bitOffset/8, bAllow ? 0xff : 0x0, NV_ALIGN_DOWN(bitSize, 8)/8); + + bitOffset += NV_ALIGN_DOWN(bitSize, 8); + bitSize -= NV_ALIGN_DOWN(bitSize, 8); + + // Any remaining bits + for (; bitSize; bitOffset++, bitSize--) + { + nvBitFieldSet((NvU32*) pGpu->pUserRegisterAccessMap, + pGpu->userRegisterAccessMapSize / sizeof(NvU32), + bitOffset, bAllow); + } + + return NV_OK; +} + +/** + * @brief Changes the user-space permissions for the given (in bulk) register address ranges + * + * @param pGpu + * @param[in] pOffsetsSizesArr flat array of (register offset, register size in bytes) pairs + * @param[in] arrSizeBytes size in bytes of the pOffsetsSizesArr array + * @param[in] bAllow whether or not to allow register access from user space + * + * @return NV_OK if success, error otherwise + */ +NV_STATUS +gpuSetUserRegisterAccessPermissionsInBulk_IMPL(OBJGPU *pGpu, const NvU32 *pOffsetsSizesArr, + NvU32 arrSizeBytes, NvBool bAllow) +{ + NV_ASSERT_OR_RETURN((arrSizeBytes & (2 * sizeof(NvU32) - 1)) == 0, NV_ERR_INVALID_ARGUMENT); + NvU32 numElements = arrSizeBytes / sizeof(NvU32); + + NvU32 i; + NV_STATUS status; + for (i = 0; i < numElements; i += 2) + { + status = gpuSetUserRegisterAccessPermissions(pGpu, + pOffsetsSizesArr[i], pOffsetsSizesArr[i + 1], bAllow); + + if (status != NV_OK) + { + return status; + } + } + + return NV_OK; +} + +/** + * @brief returns if a given register address can be accessed from userspace. + * + * @param pGpu + * @param[in] offset Register offset to test, must be dword aligned. + * + * @return NV_TRUE if register is accessible, NV_FALSE if not. + */ +NvBool +gpuGetUserRegisterAccessPermissions_IMPL(OBJGPU *pGpu, NvU32 offset) +{ + NvU32 bitOffset = offset / sizeof(NvU32); + + if (!pGpu->pUserRegisterAccessMap) + { + // + // If very early in the init sequence, everything is accessible, since + // we can't have gotten any user originating accesses yet. + // + if (!gpuIsFullyConstructed(pGpu)) + return NV_TRUE; + + NV_ASSERT_FAILED("No user register access map available to read"); + return NV_FALSE; + } + + if (bitOffset >= (pGpu->userRegisterAccessMapSize * 8)) + { + NV_PRINTF(LEVEL_ERROR, "Parameter `offset` = %u is out of bounds.\n", + offset); + return NV_FALSE; + } + + if ((offset % 4) != 0) + { + NV_PRINTF(LEVEL_ERROR, + "Parameter `offset` = %u must be 4-byte aligned.\n", offset); + return NV_FALSE; + } + + // pGpu->pUserRegisterAccessMap is pageable, must not be at raised IRQ + NV_ASSERT_OR_RETURN(!osIsRaisedIRQL(), NV_ERR_INVALID_IRQ_LEVEL); + + return nvBitFieldTest((NvU32*) pGpu->pUserRegisterAccessMap, pGpu->userRegisterAccessMapSize / sizeof(NvU32), bitOffset); +} + + +static NvBool _getIsProfilingPrivileged(OBJGPU *pGpu) +{ +#if defined(DEBUG) || defined(DEVELOP) + return NV_FALSE; +#else + NvU32 data32; + if (NV_OK == osReadRegistryDword(pGpu, NV_REG_STR_RM_PROFILING_ADMIN_ONLY, &data32)) + { + return (data32 == NV_REG_STR_RM_PROFILING_ADMIN_ONLY_TRUE); + } + + return NV_TRUE; +#endif +} +/** + * @brief Constructs the bitmap used to control whether a register can be accessed by user space. + * + * Bitmap contains a single bit per 32b register. + * + * @param pGpu + * + * @return NV_OK if success, error otherwise + */ +NV_STATUS +gpuConstructUserRegisterAccessMap_IMPL(OBJGPU *pGpu) +{ + NV2080_CTRL_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP_PARAMS *pParams = NULL; + NV_STATUS status = NV_OK; + NvU32 compressedSize = 0; + NvU32 profilingRangesSize = 0; + const NvU8 *compressedData = NULL; + const NvU32 *profilingRangesArr = NULL; + + NV_ASSERT(pGpu->userRegisterAccessMapSize == 0); + + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + pParams = portMemAllocPaged(sizeof(*pParams)); + NV_ASSERT_OR_RETURN(pParams != NULL, NV_ERR_NO_MEMORY); + portMemSet(pParams, 0, sizeof(*pParams)); + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GPU_GET_USER_REGISTER_ACCESS_MAP, + pParams, sizeof(*pParams)), done); + + pGpu->userRegisterAccessMapSize = pParams->userRegisterAccessMapSize; + compressedSize = pParams->compressedSize; + profilingRangesSize = pParams->profilingRangesSize; + compressedData = (const NvU8*)pParams->compressedData; + profilingRangesArr = (const NvU32*)pParams->profilingRanges; + } + + // + // We round up to a 32b multiple to be used with bitfield helpers. + // (Of course it should already be a 32b multiple, but just to be sure.) + // + pGpu->userRegisterAccessMapSize = NV_ALIGN_UP(pGpu->userRegisterAccessMapSize, sizeof(NvU32)); + if (pGpu->userRegisterAccessMapSize == 0) + { + NV_PRINTF(LEVEL_INFO, + "User Register Access Map unsupported for this chip.\n"); + status = NV_OK; + goto done; + } + + pGpu->pUserRegisterAccessMap = portMemAllocPaged(pGpu->userRegisterAccessMapSize); + if (pGpu->pUserRegisterAccessMap == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + pGpu->pUnrestrictedRegisterAccessMap = portMemAllocPaged(pGpu->userRegisterAccessMapSize); + if (pGpu->pUnrestrictedRegisterAccessMap == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + NV_PRINTF(LEVEL_INFO, "Allocated User Register Access Map of 0x%xB @%p\n", + pGpu->userRegisterAccessMapSize, pGpu->pUserRegisterAccessMap); + + if (!pGpu->bUseRegisterAccessMap || compressedSize == 0) + { + NV_PRINTF(LEVEL_INFO, + "GPU/Platform does not have restricted user register access! Allowing all registers.\n"); + portMemSet(pGpu->pUserRegisterAccessMap, 0xFF, pGpu->userRegisterAccessMapSize); + } + else + { + NV_ASSERT_OK_OR_GOTO(status, + gpuInitRegisterAccessMap(pGpu, pGpu->pUserRegisterAccessMap, + pGpu->userRegisterAccessMapSize, compressedData, compressedSize), done); + } + + // copy permissions from user access map + if (portMemCopy(pGpu->pUnrestrictedRegisterAccessMap, pGpu->userRegisterAccessMapSize, + pGpu->pUserRegisterAccessMap, pGpu->userRegisterAccessMapSize) == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to initialize unrestricted register access map\n"); + status = NV_ERR_INVALID_ADDRESS; + goto done; + } + + pGpu->bRmProfilingPrivileged = _getIsProfilingPrivileged(pGpu); + if (pGpu->bRmProfilingPrivileged && profilingRangesSize > 0) + { + // remove profiling registers from user map + status = gpuSetUserRegisterAccessPermissionsInBulk( + pGpu, profilingRangesArr, profilingRangesSize, NV_FALSE); + if (status != NV_OK) + { + pGpu->bRmProfilingPrivileged = NV_FALSE; + goto done; + } + } + +done: + if (status != NV_OK) + { + portMemFree(pGpu->pUserRegisterAccessMap); + pGpu->pUserRegisterAccessMap = NULL; + + portMemFree(pGpu->pUnrestrictedRegisterAccessMap); + pGpu->pUnrestrictedRegisterAccessMap = NULL; + + pGpu->userRegisterAccessMapSize = 0; + } + portMemFree(pParams); + + return status; +} + + +/** + * @brief Initializes the register access map + * + * Extracts compressed data representing access map. + * + * @param pGpu + * + * @return NV_OK if success, error otherwise + */ +NV_STATUS +gpuInitRegisterAccessMap_IMPL(OBJGPU *pGpu, NvU8 *pAccessMap, NvU32 accessMapSize, const NvU8 *pComprData, const NvU32 comprDataSize) +{ + PGZ_INFLATE_STATE pGzState = NULL; + NvU32 inflatedBytes = 0; + + NV_ASSERT_OR_RETURN(pAccessMap != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(accessMapSize != 0, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pComprData != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(comprDataSize != 0, NV_ERR_INVALID_STATE); + + // + // Strip off gzlib 10-byte header + // XXX this really belongs in the RM GZ library + // + pComprData += 10; + + NV_ASSERT_OK_OR_RETURN(utilGzAllocate((NvU8*)pComprData, accessMapSize, &pGzState)); + + NV_ASSERT(pGzState); + + inflatedBytes = utilGzGetData(pGzState, 0, accessMapSize, pAccessMap); + + utilGzDestroy(pGzState); + + if (inflatedBytes != accessMapSize) + { + NV_PRINTF(LEVEL_ERROR, + "failed to get inflated data, got %u bytes, expecting %u\n", + inflatedBytes, pGpu->userRegisterAccessMapSize); + DBG_BREAKPOINT(); + return NV_ERR_INFLATE_COMPRESSED_DATA_FAILED; + } + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_registry.c b/src/nvidia/src/kernel/gpu/gpu_registry.c new file mode 100644 index 000000000..26a42d825 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_registry.c @@ -0,0 +1,343 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "os/os.h" +#include "gpu/gpu_timeout.h" +#include "gpu/gpu_access.h" +#include "core/thread_state.h" + +#include "nvrm_registry.h" + +static void _gpuInitGlobalSurfaceOverride(OBJGPU *pGpu); + +/*! + * @brief Read out any overrides and settings from the registry + */ +NV_STATUS +gpuInitRegistryOverrides_KERNEL +( + OBJGPU *pGpu +) +{ + NvU32 data32 = 0; + + // Override timeout settings + timeoutRegistryOverride(&pGpu->timeoutData, pGpu); + + // Check the registry for an override of the "broken FB" property. + if (osReadRegistryDword(pGpu, NV_REG_STR_GPU_BROKEN_FB, + &data32) != NV_OK) + { + // Apply defaults based on the chip and mask. + data32 = NV_REG_STR_GPU_BROKEN_FB_DEFAULT; + } + + // for 0FB set as though FB memory is broken to cover all the tests + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB)) + { + data32 = FLD_SET_DRF(_REG_STR_GPU, _BROKEN_FB, _MEMORY, _BROKEN, data32); + } + + // Set the property for the broken memory access. + if (FLD_TEST_DRF(_REG_STR_GPU, _BROKEN_FB, _MEMORY, _BROKEN, data32)) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_BROKEN_FB, NV_TRUE); + } + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_INST_VPR, &pGpu->instVprOverrides) != NV_OK) + { + pGpu->instVprOverrides = 0; + } + + // + // Persistent compute mode rules (fix for bug 544798): If a + // client had enabled compute mode earlier, we would have + // recorded this in the registry. + // + pGpu->computeModeRules = NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE; + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_COMPUTE_MODE_RULES, &data32)) + { + pGpu->computeModeRules = data32; + } + + // Check to see if we have any ThreadState registry overrides + threadStateInitRegistryOverrides(pGpu); + + // Check to see if we enable surprise removal support + // Enable SR support by default, disable if the regkey is set to 0 + pGpu->bSurpriseRemovalSupported = NV_TRUE; + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_GPU_SURPRISE_REMOVAL, &data32) == NV_OK) + { + if (data32 == 0) + { + pGpu->bSurpriseRemovalSupported = NV_FALSE; + } + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER, &data32) == NV_OK) + { + pGpu->bClientRmAllocatedCtxBuffer = (data32 == NV_REG_STR_RM_CLIENT_RM_ALLOCATED_CTX_BUFFER_ENABLED); + + NV_PRINTF(LEVEL_INFO, "Setting Client RM managed context buffer to %u\n", + pGpu->bClientRmAllocatedCtxBuffer); + } + else if (IS_GSP_CLIENT(pGpu) || RMCFG_FEATURE_PLATFORM_GSP) + { + pGpu->bClientRmAllocatedCtxBuffer = NV_TRUE; + } + else if ( NV_IS_MODS || !(pGpu->bSriovEnabled || IS_VIRTUAL(pGpu)) ) + { + // TODO : enable this feature on mods + pGpu->bClientRmAllocatedCtxBuffer = NV_FALSE; + } + + if ((pGpu->bSriovEnabled && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) || + RMCFG_FEATURE_PLATFORM_GSP || IS_GSP_CLIENT(pGpu)) + { + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM, &data32) == NV_OK) + { + pGpu->bSplitVasManagementServerClientRm = + (data32 == NV_REG_STR_RM_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_ENABLED); + } + else + { + pGpu->bSplitVasManagementServerClientRm = NV_TRUE; + } + + NV_PRINTF(LEVEL_INFO, "Split VAS mgmt between Server/Client RM %u\n", + pGpu->bSplitVasManagementServerClientRm); + } + + return NV_OK; +} + +/*! + * @brief Initialize gpu instLocOverrides, called after registry overrides to avoid ordering issues + */ +NV_STATUS +gpuInitInstLocOverrides_IMPL +( + OBJGPU *pGpu +) +{ + { + // + // The pGpu fields are initialized to zero. Try to fill them from the + // registry; if the reads fail, the values will remain zero. + // + osReadRegistryDword(pGpu, NV_REG_STR_RM_INST_LOC, &pGpu->instLocOverrides); + osReadRegistryDword(pGpu, NV_REG_STR_RM_INST_LOC_2, &pGpu->instLocOverrides2); + osReadRegistryDword(pGpu, NV_REG_STR_RM_INST_LOC_3, &pGpu->instLocOverrides3); + osReadRegistryDword(pGpu, NV_REG_STR_RM_INST_LOC_4, &pGpu->instLocOverrides4); + + // + // Currently only InstLoc uses the global registry override + // If no global override, leave the regkey hierarchy + // + _gpuInitGlobalSurfaceOverride(pGpu); + } + + // + // If instloc overrides were not provided, then default everything to + // sysmem for ZeroFB (except for a few things if L2 cache is available). + // Ampere onwards, default value of RMInstLoc will change to 0x10000000 + // since MODS will append pmu_instloc_coh coh to commandlines when SMC + // args are present. Until MODS is fixed to not modify RMInstLoc when there + // is no commandline arg doing so, change RM to recognize 0x10000000 as the + // new default value; else we fail on 0 FB chips that do not use -fb_broken + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) && + ((pGpu->instLocOverrides == 0) || (pGpu->instLocOverrides == 0x10000000)) && + pGpu->instLocOverrides2 == 0 && + pGpu->instLocOverrides3 == 0 && + pGpu->instLocOverrides4 == 0) + { + pGpu->instLocOverrides = NV_REG_STR_RM_INST_LOC_ALL_COH; + pGpu->instLocOverrides2 = NV_REG_STR_RM_INST_LOC_ALL_COH; + pGpu->instLocOverrides3 = NV_REG_STR_RM_INST_LOC_ALL_COH; + // Leave instLocOverrides4 as _DEFAULT until all flavors are tested. + + if (gpuIsCacheOnlyModeEnabled(pGpu)) + { + // + // If cache only mode is enabled then we will override + // userD and bar page tables to vidmem(l2 cache). + // This is to avoid deadlocks on platforms + // that don't support reflected accesses. + // Such platforms will need to enable cache only mode to + // run test zeroFb + // NOTE: Since this puts USERD in vidmem, you probably also want to + // reduce the number of channels to allocate, or else + // fifoPreAllocUserD_GF100 will fail due to the limited amount of + // L2 available as "vidmem". (Use the RmNumFifos regkey.) + // + pGpu->instLocOverrides = FLD_SET_DRF(_REG, _STR_RM_INST_LOC, _BAR_PTE, _VID, pGpu->instLocOverrides); + pGpu->instLocOverrides = FLD_SET_DRF(_REG, _STR_RM_INST_LOC, _USERD, _VID, pGpu->instLocOverrides); + pGpu->instLocOverrides = FLD_SET_DRF(_REG, _STR_RM_INST_LOC, _BAR_PDE, _VID, pGpu->instLocOverrides); + } + } + + // + // Move all checks of instLocOverrides to the end of the function to avoid + // ordering issues. + // + + // If All inst is in sysmem, set the property as well as caching attribute + if (((pGpu->instLocOverrides == NV_REG_STR_RM_INST_LOC_ALL_NCOH) || + (pGpu->instLocOverrides == 0x9aaaaaaa)) && + (pGpu->instLocOverrides2 == NV_REG_STR_RM_INST_LOC_ALL_NCOH) && + (pGpu->instLocOverrides3 == NV_REG_STR_RM_INST_LOC_ALL_NCOH)) + // TODO: Check instLocOverrides4 after MODS is updated. + { + // Force to _DEFAULT until all flavors are tested and MODS is updated. + pGpu->instLocOverrides4 = NV_REG_STR_RM_INST_LOC_ALL_DEFAULT; + pGpu->instLocOverrides4 = FLD_SET_DRF(_REG_STR, _RM_INST_LOC_4, _BAR, _NCOH, pGpu->instLocOverrides4); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM, NV_TRUE); + pGpu->instCacheOverride = NV_MEMORY_UNCACHED; + } + else if (((pGpu->instLocOverrides == NV_REG_STR_RM_INST_LOC_ALL_COH) || + (pGpu->instLocOverrides == 0x95555555)) && + (pGpu->instLocOverrides2 == NV_REG_STR_RM_INST_LOC_ALL_COH) && + (pGpu->instLocOverrides3 == NV_REG_STR_RM_INST_LOC_ALL_COH)) + // TODO: Check instLocOverrides4 after MODS is updated. + { + pGpu->instLocOverrides4 = NV_REG_STR_RM_INST_LOC_ALL_DEFAULT; // Force to _DEFAULT until all flavors are tested and MODS is updated. + pGpu->instLocOverrides4 = FLD_SET_DRF(_REG_STR, _RM_INST_LOC_4, _BAR, _COH, pGpu->instLocOverrides4); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM, NV_TRUE); + pGpu->instCacheOverride = NV_MEMORY_CACHED; + } + + // + // If all-inst-in-sysmem is specified, we must leave all bits in instLocOverrides, + // instLocOverrides2, and instLocOverrides3 alone, because they are checked bitwise + // all over RM. all-inst-in-sysmem is specified with bit patterns 0x55555555 + // (coh) or 0xaaaaaaaa (non-coh) for instLocOverrides/2/3. If it is not + // all-inst-in-sysmem (i.e. a la carte selection of bits about what goes in sysmem + // and what goes in FB), we are free to clear BAR PTEs/PDEs in sysmem bits and + // we must on Volta+, because on Volta and Turing, reflected BAR mappings (e.g. + // accessing a BAR PTE on the CPU as CPU->XVE/XTL->HOST/XAL->FBHUB->HOST/XAL->XVE/XTL->SYSMEM) + // may cause a deadlock in the GPU. On Ampere+, it will surely cause a deadlock. + // Note that the BAR PTE accesses to SYSMEM originating inside the GPU (from CEs and + // SMs) are fine. + // + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM) && + !(FLD_TEST_DRF(_REG_STR_RM, _INST_LOC, _BAR_PTE, _DEFAULT, pGpu->instLocOverrides) && + FLD_TEST_DRF(_REG_STR_RM, _INST_LOC, _BAR_PDE, _DEFAULT, pGpu->instLocOverrides))) + { + pGpu->instLocOverrides = FLD_SET_DRF(_REG, _STR_RM_INST_LOC, _BAR_PTE, _DEFAULT, pGpu->instLocOverrides); + pGpu->instLocOverrides = FLD_SET_DRF(_REG, _STR_RM_INST_LOC, _BAR_PDE, _DEFAULT, pGpu->instLocOverrides); + NV_PRINTF(LEVEL_WARNING, "Ignoring regkeys to place BAR PTE/PDE in SYSMEM\n"); + } + + return NV_OK; +} + +#define GP100_BYPASS_47BIT_PA_WAR 4 + +/*! + * @brief This function sets the global surface location override value based + * on a regkey or a PDB property. The PDB property can be auto-set or + * can be set (elsewhere) based on the platform and chip. + */ +static void +_gpuInitGlobalSurfaceOverride +( + OBJGPU *pGpu +) +{ + NvU32 globalOverride; + + // + // Precedence of global overrides. + // 1. HAL layer forces an override + // 2. Regkey override. + // + if (pGpu->bInstLoc47bitPaWar) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + (pGpu->instLocOverrides != 0) || + (pGpu->instLocOverrides2 != 0) || + (pGpu->instLocOverrides3 != 0) || + (pGpu->instLocOverrides4 != 0)) + { + NV_PRINTF(LEVEL_ERROR, + "INSTLOC overrides may not work with large mem systems on GP100+\n"); + } + else + { + globalOverride = GP100_BYPASS_47BIT_PA_WAR; + pGpu->bRegUsesGlobalSurfaceOverrides = NV_TRUE; + } + } + + if (!pGpu->bRegUsesGlobalSurfaceOverrides) + { + NvU32 data32; + if (osReadRegistryDword(pGpu, NV_REG_STR_GLOBAL_SURFACE_OVERRIDE, &data32) == NV_OK) + { + if (DRF_VAL(_REG_STR, _GLOBAL_SURFACE_OVERRIDE_RM, _ENABLE, data32) == NV_REG_STR_GLOBAL_SURFACE_OVERRIDE_ENABLE) + { + globalOverride = DRF_VAL(_REG_STR, _GLOBAL_SURFACE_OVERRIDE_RM, _VALUE, data32); + pGpu->bRegUsesGlobalSurfaceOverrides = NV_TRUE; + } + } + } + + // Apply global overrides, if any + if (pGpu->bRegUsesGlobalSurfaceOverrides) + { + if (globalOverride == GP100_BYPASS_47BIT_PA_WAR) + { + // force units not supporting 47 bit PAs to vidmem for GP100 + pGpu->instLocOverrides = FLD_SET_DRF(_REG_STR_RM, _INST_LOC, _INSTBLK, _VID, pGpu->instLocOverrides); + pGpu->instLocOverrides = FLD_SET_DRF(_REG_STR_RM, _INST_LOC, _RUNLIST, _VID, pGpu->instLocOverrides); + pGpu->instLocOverrides = FLD_SET_DRF(_REG_STR_RM, _INST_LOC, _USERD, _VID, pGpu->instLocOverrides); + pGpu->instLocOverrides = FLD_SET_DRF(_REG_STR_RM, _INST_LOC, _PMUINST, _VID, pGpu->instLocOverrides); + pGpu->instLocOverrides2 = 0; + pGpu->instLocOverrides3 = FLD_SET_DRF(_REG_STR_RM, _INST_LOC_3, _FLCNINST, _VID, pGpu->instLocOverrides3); + pGpu->instLocOverrides4 = 0; + } + else + { + NvU32 i; + NvU32 ovBits; + + ovBits = DRF_VAL(_REG_STR, _GLOBAL_SURFACE_OVERRIDE_RM, _VALUE, globalOverride); + pGpu->instLocOverrides = 0; + + // Propagate two-bit global override value to 32-bit RM override value + for (i = 0; i < 32; i += 2) + { + pGpu->instLocOverrides |= (ovBits << i); + } + pGpu->instLocOverrides2 = pGpu->instLocOverrides; + pGpu->instLocOverrides3 = pGpu->instLocOverrides; + pGpu->instLocOverrides4 = pGpu->instLocOverrides; + } + } +} diff --git a/src/nvidia/src/kernel/gpu/gpu_resource.c b/src/nvidia/src/kernel/gpu/gpu_resource.c new file mode 100644 index 000000000..577d4ba2c --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_resource.c @@ -0,0 +1,461 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This implements functions of the base class for gpu resources. +* +******************************************************************************/ + +#include "core/core.h" +#include "os/os.h" +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "rmapi/client.h" +#include "rmapi/resource.h" +#include "gpu/gpu.h" +#include "gpu/gpu_resource.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu_mgr/gpu_mgr.h" + +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +#include "g_allclasses.h" + +NV_STATUS +gpuresConstruct_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pDeviceRef; + RsResourceRef *pSubdeviceRef; + OBJGPU *pGpu = NULL; + NvBool bBcResource = NV_TRUE; + NV_STATUS status; + + // Check if instance is a subdevice + pGpuResource->pSubdevice = dynamicCast(pGpuResource, Subdevice); + + // Else check for ancestor + if (!pGpuResource->pSubdevice) + { + status = refFindAncestorOfType(pResourceRef, classId(Subdevice), &pSubdeviceRef); + if (status == NV_OK) + pGpuResource->pSubdevice = dynamicCast(pSubdeviceRef->pResource, Subdevice); + } + + // Check if instance is a device + pGpuResource->pDevice = dynamicCast(pGpuResource, Device); + + // Else check for ancestor + if (!pGpuResource->pDevice) + { + status = refFindAncestorOfType(pResourceRef, classId(Device), &pDeviceRef); + if (status == NV_OK) + pGpuResource->pDevice = dynamicCast(pDeviceRef->pResource, Device); + } + + if (RS_IS_COPY_CTOR(pParams)) + return gpuresCopyConstruct(pGpuResource, pCallContext, pParams); + + // Fails during device/subdevice ctor. Subclass ctor calls gpuresSetGpu + status = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if (status == NV_OK) + gpuresSetGpu(pGpuResource, pGpu, bBcResource); + + return NV_OK; +} + +NV_STATUS +gpuresCopyConstruct_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + GpuResource *pGpuResourceSrc = dynamicCast(pParams->pSrcRef->pResource, GpuResource); + + if (pGpuResourceSrc == NULL) + return NV_ERR_INVALID_OBJECT; + + gpuresSetGpu(pGpuResource, pGpuResourceSrc->pGpu, pGpuResourceSrc->bBcResource); + + return NV_OK; +} + +NV_STATUS +gpuresMap_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu; + NvU32 offset, size; + NV_STATUS rmStatus; + NvBool bBroadcast; + + pGpu = CliGetGpuFromContext(pCpuMapping->pContextRef, &bBroadcast); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + gpuSetThreadBcState(pGpu, bBroadcast); + + rmStatus = gpuresGetRegBaseOffsetAndSize(pGpuResource, pGpu, &offset, &size); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = rmapiMapGpuCommon(staticCast(pGpuResource, RsResource), + pCallContext, + pCpuMapping, + pGpu, + offset, + size); + pCpuMapping->processId = osGetCurrentProcess(); + + if (pParams->ppCpuVirtAddr) + *pParams->ppCpuVirtAddr = pCpuMapping->pLinearAddress; + + return rmStatus; +} + +NV_STATUS +gpuresUnmap_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + OBJGPU *pGpu; + NvBool bBroadcast; + + pGpu = CliGetGpuFromContext(pCpuMapping->pContextRef, &bBroadcast); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + gpuSetThreadBcState(pGpu, bBroadcast); + + osUnmapGPU(pGpu->pOsGpuInfo, + rmclientGetCachedPrivilege(pClient), + pCpuMapping->pLinearAddress, + pCpuMapping->length, + pCpuMapping->pPrivate->pPriv); + + return NV_OK; +} + +NvBool +gpuresShareCallback_IMPL +( + GpuResource *pGpuResource, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pGpuResource); + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + NvU16 shareType = pSharePolicy->type; + + if ((shareType == RS_SHARE_TYPE_SMC_PARTITION) && !bMIGInUse) + { + // When MIG is not enabled, ignore Require restrictions + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + return NV_TRUE; + + // Fallback if feature is not available + shareType = RS_SHARE_TYPE_GPU; + } + + switch (shareType) + { + case RS_SHARE_TYPE_SMC_PARTITION: + { + if (RS_ACCESS_MASK_TEST(&pSharePolicy->accessMask, RS_ACCESS_DUP_OBJECT)) + { + // Special exceptions only for Dup + RsResourceRef *pSrcRef = RES_GET_REF(pGpuResource); + + switch (pSrcRef->externalClassId) + { + // + // XXX Bug 2815350: We exempt NV01_MEMORY_LOCAL_USER and NV01_MEMORY_SYSTEM because there is a WAR + // in place for these classes in memCopyConstruct. The WAR can only be removed after bug 2815350 is + // resolved. Once bug 2815350 is resolved and WAR removed, we can apply the partition check to + // NV01_MEMORY_LOCAL_USER and NV01_MEMORY_SYSTEM as well. + // + case NV01_MEMORY_LOCAL_USER: + case NV01_MEMORY_SYSTEM: + return NV_TRUE; + // + // We exempt this check for cases when a kernel client is trying to dup AMPERE_SMC_PARTITION_REF object. + // UVM dups AMPERE_SMC_PARTITION_REF from its user's client. see Bug 2826455 for details + // + case AMPERE_SMC_PARTITION_REF: + { + RmClient *pRmClient = dynamicCast(pInvokingClient, RmClient); + RS_PRIV_LEVEL privLevel = RS_PRIV_LEVEL_USER; + + if (pRmClient != NULL) + { + privLevel = rmclientGetCachedPrivilege(pRmClient); + } + + if ((privLevel >= RS_PRIV_LEVEL_KERNEL) + ) + return NV_TRUE; + + break; + } + } + + } + + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF refClient; + MIG_INSTANCE_REF refResource; + + if (bMIGInUse && + (kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, pInvokingClient->hClient, + &refClient) == NV_OK) && + (kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, RES_GET_CLIENT_HANDLE(pGpuResource), + &refResource) == NV_OK)) + { + // Ignore execution partition differences when sharing + refClient = kmigmgrMakeGIReference(refClient.pKernelMIGGpuInstance); + refResource = kmigmgrMakeGIReference(refResource.pKernelMIGGpuInstance); + if (kmigmgrAreMIGReferencesSame(&refClient, &refResource)) + return NV_TRUE; + } + } + break; + } + case RS_SHARE_TYPE_GPU: + { + RsResourceRef *pDeviceAncestorRef; + RsResourceRef *pParentDeviceAncestorRef; + + // This share type only works when called from dup + if (pParentRef == NULL) + break; + + if (pParentRef->internalClassId == classId(Device)) + { + // pParentRef is allowed to itself be the Device ancestor + pParentDeviceAncestorRef = pParentRef; + } + else + { + // If pParentRef is not itself the device, try to find a Device ancestor. If none exist, fail. + if (refFindAncestorOfType(pParentRef, classId(Device), &pParentDeviceAncestorRef) != NV_OK) + break; + } + // Check that the source resource's ancestor device instance matches the destination parent's device instance + if (refFindAncestorOfType(RES_GET_REF(pGpuResource), classId(Device), &pDeviceAncestorRef) == NV_OK) + { + Device *pDevice = dynamicCast(pDeviceAncestorRef->pResource, Device); + Device *pParentDevice = dynamicCast(pParentDeviceAncestorRef->pResource, Device); + + if ((pDevice != NULL) && (pParentDevice != NULL) && + (pDevice->deviceInst == pParentDevice->deviceInst)) + { + return NV_TRUE; + } + } + } + } + + // Delegate to superclass + return rmresShareCallback_IMPL(staticCast(pGpuResource, RmResource), pInvokingClient, pParentRef, pSharePolicy); +} + +NV_STATUS +gpuresGetRegBaseOffsetAndSize_IMPL +( + GpuResource *pGpuResource, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +gpuresGetMapAddrSpace_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pGpuResource); + NV_STATUS status; + NvU32 offset; + NvU32 size; + + // Default to REGMEM if the GPU resource has a register base and offset defined + status = gpuresGetRegBaseOffsetAndSize(pGpuResource, pGpu, &offset, &size); + if (status != NV_OK) + return status; + + if (pAddrSpace) + *pAddrSpace = ADDR_REGMEM; + + return NV_OK; +} + +/*! + * @brief Forward a control call to the Physical RM portion of this API. + */ +NV_STATUS +gpuresInternalControlForward_IMPL +( + GpuResource *pGpuResource, + NvU32 command, + void *pParams, + NvU32 size +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(GPU_RES_GET_GPU(pGpuResource)); + return pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pGpuResource), + gpuresGetInternalObjectHandle(pGpuResource), + command, + pParams, + size); +} + +/*! + * @brief Retrieve the handle associated with the Physical RM portion of the API. + * For non-split object, this is the same as the handle of the object. + */ +NvHandle +gpuresGetInternalObjectHandle_IMPL(GpuResource *pGpuResource) +{ + return RES_GET_HANDLE(pGpuResource); +} + +NV_STATUS +gpuresControl_IMPL +( + GpuResource *pGpuResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + gpuresControlSetup(pParams, pGpuResource); + + return resControl_IMPL(staticCast(pGpuResource, RsResource), + pCallContext, pParams); +} + +void +gpuresControlSetup_IMPL +( + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + GpuResource *pGpuResource +) +{ + RmCtrlParams *pRmCtrlParams = pParams->pLegacyParams; + pRmCtrlParams->pGpu = pGpuResource->pGpu; + + GPU_RES_SET_THREAD_BC_STATE(pGpuResource); +} + +void +gpuresSetGpu_IMPL +( + GpuResource *pGpuResource, + OBJGPU *pGpu, + NvBool bBcResource +) +{ + if (pGpu != NULL) + { + RmResource *pResource = staticCast(pGpuResource, RmResource); + pResource->rpcGpuInstance = gpuGetInstance(pGpu); + pGpuResource->pGpu = pGpu; + pGpuResource->pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpuResource->pGpu); + pGpuResource->bBcResource = bBcResource; + gpuSetThreadBcState(pGpu, bBcResource); + } +} + +NV_STATUS +gpuresGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hResource, + GpuResource **ppGpuResource +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppGpuResource = NULL; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + *ppGpuResource = dynamicCast(pResourceRef->pResource, GpuResource); + + return (*ppGpuResource) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +gpuresGetByDeviceOrSubdeviceHandle +( + RsClient *pClient, + NvHandle hResource, + GpuResource **ppGpuResource +) +{ + NV_STATUS status; + + status = gpuresGetByHandle(pClient, hResource, ppGpuResource); + + if (status != NV_OK) + return status; + + // Must be device or subdevice + if (!dynamicCast(*ppGpuResource, Device) && + !dynamicCast(*ppGpuResource, Subdevice)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_resource_desc.c b/src/nvidia/src/kernel/gpu/gpu_resource_desc.c new file mode 100644 index 000000000..959bd8965 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_resource_desc.c @@ -0,0 +1,512 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Object Manager: Object Classes are defined in this module. + */ + +#include "gpu/gpu.h" +#include "os/os.h" +#include "core/locks.h" +#include "nvrm_registry.h" +#include "lib/base_utils.h" + +ct_assert(NVOC_CLASS_ID_MAX_WIDTH <= SF_WIDTH(ENGDESC_CLASS)); + +NV_STATUS +gpuBuildClassDB_IMPL(OBJGPU *pGpu) +{ + PGPU_ENGINE_ORDER pEngineOrder = &pGpu->engineOrder; + PCLASSDESCRIPTOR pClassDynamic; + const CLASSDESCRIPTOR *pClassStatic; + NvU32 numClasses; + NvU32 i, j; + NV_STATUS status; + PGPUCLASSDB pClassDB = &pGpu->classDB; + + // + // Calculate number of classes supported by this device. + // + // Loop through the list of GPU-specific classes throwing out any the + // rmconfig has marked not supported. + // + numClasses = 0; + + pClassStatic = &pEngineOrder->pClassDescriptors[0]; + for (i = 0; i < pEngineOrder->numClassDescriptors; i++) + { + // RMCONFIG: throw out any that are not supported + if (pClassStatic[i].externalClassId == (NvU32)~0) + continue; + + numClasses++; + } + + NV_PRINTF(LEVEL_INFO, "num class descriptors: 0x%x\n", numClasses); + + // + // Allocate space for correct number of entries. + // + pClassDynamic = portMemAllocNonPaged(sizeof(CLASSDESCRIPTOR) * numClasses); + if (pClassDynamic == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, "alloc failed: 0x%x\n", status); + DBG_BREAKPOINT(); + return status; + } + portMemSet((void *)pClassDynamic, 0, sizeof(CLASSDESCRIPTOR) * numClasses); + + // + // Now load up chip-dependent classes into pClass table. + // + pClassStatic = &pEngineOrder->pClassDescriptors[0]; + i = 0; + for (j = 0; j < pEngineOrder->numClassDescriptors; j++) + { + // RMCONFIG: skip over any that are not supported + if (pClassStatic[j].externalClassId == (NvU32)~0) + continue; + + // store info for class in class DB entry + pClassDynamic[i] = pClassStatic[j]; + + // move to next slot in class DB + i++; + } + + pClassDB->pClasses = pClassDynamic; + pClassDB->numClasses = numClasses; + pClassDB->pSuppressClasses = NULL; + pClassDB->bSuppressRead = NV_FALSE; + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +NV_STATUS +gpuDestroyClassDB_IMPL(OBJGPU *pGpu) +{ + portMemFree(pGpu->classDB.pClasses); + portMemFree(pGpu->classDB.pSuppressClasses); + + pGpu->engineDB.bValid = NV_FALSE; + return NV_OK; +} + +NvBool +gpuIsClassSupported_IMPL(OBJGPU *pGpu, NvU32 externalClassId) +{ + PCLASSDESCRIPTOR pClassDesc; + NV_STATUS status; + + status = gpuGetClassByClassId(pGpu, externalClassId, &pClassDesc); + + return (status == NV_OK) && (pClassDesc); +} + +NV_STATUS +gpuGetClassByClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId, PCLASSDESCRIPTOR *ppClassDesc) +{ + PGPUCLASSDB pClassDB = &pGpu->classDB; + NvU32 i; + + for (i = 0; i < pClassDB->numClasses; i++) + { + if (pClassDB->pClasses[i].externalClassId == externalClassId) + { + if (ppClassDesc != NULL) + { + *ppClassDesc = &pClassDB->pClasses[i]; + } + return NV_OK; + } + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +gpuGetClassByEngineAndClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId, NvU32 engDesc, PCLASSDESCRIPTOR *ppClassDesc) +{ + PGPUCLASSDB pClassDB = &pGpu->classDB; + NvU32 i; + + for (i = 0; i < pClassDB->numClasses; i++) + { + if (pClassDB->pClasses[i].externalClassId == externalClassId && pClassDB->pClasses[i].engDesc == engDesc) + { + *ppClassDesc = &pClassDB->pClasses[i]; + return NV_OK; + } + } + + return NV_ERR_GENERIC; +} + +static NvU32 * +gpuGetSuppressedClassList +( + OBJGPU *pGpu +) +{ + NvU8 *pStr; + NvU8 *pEndStr; + NvU8 *pSaveStr; + NvU32 strLength; + NvU32 nIndex; + NvU32 nCount = 0; + NvU32 *pData = NULL; + NvU32 numAModelClassesInChip = 0; + NvBool bSuppressClassList = NV_FALSE; + NvU32 numFound; + + // alloc regkey buffer + strLength = 256; + pStr = portMemAllocNonPaged(strLength); + if (pStr == NULL) + { + NV_PRINTF(LEVEL_ERROR, "portMemAllocNonPaged failed\n"); + return NULL; + } + + pSaveStr = pStr; + + if (osReadRegistryString(pGpu, NV_REG_STR_SUPPRESS_CLASS_LIST, pStr, &strLength) == NV_OK) + { + bSuppressClassList = NV_TRUE; + } + + if (bSuppressClassList) + { + // count number of classes + for (; *pStr; pStr = pEndStr, nCount++) + { + nvStrToL(pStr, &pEndStr, BASE16, 0, &numFound); + } + } + + // allocate memory only if there is something to suppress. + if ( ! ( nCount + numAModelClassesInChip ) ) + { + portMemFree(pSaveStr); + return NULL; + } + + // + // add one dword to store the count of classes here. + // This fixes a memory leak caused by changelist 1620538 + // + nCount++; + + pData = portMemAllocNonPaged(sizeof(NvU32)*(nCount + numAModelClassesInChip)); + if (pData == NULL) + { + NV_PRINTF(LEVEL_ERROR, "portMemAllocNonPaged failed\n"); + portMemFree(pSaveStr); + return NULL; + } + + // fill array -- first is number of classes + pData[0]=nCount; + + if (bSuppressClassList) + { + pStr = pSaveStr; + for (nIndex = 1; *pStr; pStr = pEndStr, nIndex++) + { + pData[nIndex] = nvStrToL(pStr, &pEndStr, BASE16, 0, &numFound); + } + } + + portMemFree(pSaveStr); + + return pData; +} + +/** + * @brief Returns list of classes supported by engDesc. + * If ( engDesc == ENG_INVALID ) returns classes + * supported by all engines. + * @param[in] pGpu OBJGPU pointer + * @param[in/out] pNumClasses in - denotes the size of pClassList when pClassList != NULL + out - when pClassList is NULL, denotes the number of matching + classes found + * @param[out] pClassList Returns matching class(s) when pNumClasses in not 0 + * @param[out] engDesc Engine ID + * + * @return NV_OK if class match found + */ +NV_STATUS +gpuGetClassList_IMPL(OBJGPU *pGpu, NvU32 *pNumClasses, NvU32 *pClassList, NvU32 engDesc) +{ + NvU32 *pSuppressClasses = NULL; + NvU32 numClasses; + NV_STATUS status = NV_OK; + NvU32 i, k; + NvBool bCount; + PCLASSDESCRIPTOR classDB = pGpu->classDB.pClasses; + + // Read the registry one time to get the list + if (NV_FALSE == pGpu->classDB.bSuppressRead) + { + pGpu->classDB.pSuppressClasses = gpuGetSuppressedClassList(pGpu); + pGpu->classDB.bSuppressRead = NV_TRUE; + } + + pSuppressClasses = pGpu->classDB.pSuppressClasses; + + numClasses = 0; + + for (i = 0; i < pGpu->classDB.numClasses; i++) + { + if ((engDesc != ENG_INVALID) && (classDB[i].engDesc != engDesc)) + continue; + + bCount = NV_TRUE; + + if (pSuppressClasses != NULL) + { + for (k=1; k < pSuppressClasses[0]; k++) + { + if (pSuppressClasses[k] == classDB[i].externalClassId) + { + bCount = NV_FALSE; + break; + } + } + } + + if (bCount) + { + // save the class in caller's buffer, if provided + if (pClassList) + { + if (numClasses < *pNumClasses) + pClassList[numClasses] = classDB[i].externalClassId; + else + status = NV_ERR_INVALID_PARAM_STRUCT; + } + numClasses++; + } + } + + // and return number of classes + if (status == NV_OK) + *pNumClasses = numClasses; + + return status; +} + +/*! + * @brief Add a class to class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pEngDesc EngDesc of Classes to be added to Class DB + * (NULL = don't care) + * @param[in] pExternalClassId Class to add to DB (NULL = don't care) + * + * @returns NV_STATUS - + * NV_ERR_INVALID_ARGUMENT if both pEngineTag and pClass are NULL. + * NV_OK otherwise + */ +static NV_STATUS +_gpuAddClassToClassDBByEngTagClassId(OBJGPU *pGpu, ENGDESCRIPTOR *pEngDesc, NvU32 *pExternalClassId) +{ + PGPU_ENGINE_ORDER pEngineOrder = &pGpu->engineOrder; + const CLASSDESCRIPTOR *pClassDesc = &pEngineOrder->pClassDescriptors[0]; + PGPUCLASSDB pClassDB = &pGpu->classDB; + NvU32 numClasses = pClassDB->numClasses; + NvU32 i; + + NV_CHECK_OR_RETURN(LEVEL_INFO, (NULL != pEngDesc) || (NULL != pExternalClassId), NV_ERR_INVALID_ARGUMENT); + + // Return early if requested class/engine is already in classdb + for (i = 0; i < pClassDB->numClasses; i++) + { + if (((NULL == pEngDesc) || (pClassDB->pClasses[i].engDesc == *pEngDesc)) && + ((NULL == pExternalClassId) || (pClassDB->pClasses[i].externalClassId == *pExternalClassId))) + { + return NV_OK; + } + } + + // Populate the ClassDB with information from PMODULEDESCRIPTOR (R/O classhal.h data) + for (i = 0; i < pEngineOrder->numClassDescriptors; i++) + { + // RMCONFIG: skip over any that are not supported + if (pClassDesc[i].externalClassId == (NvU32)~0) + continue; + + if (((NULL == pEngDesc) || (pClassDesc[i].engDesc == *pEngDesc)) && + ((NULL == pExternalClassId) || (pClassDesc[i].externalClassId == *pExternalClassId))) + { + // store info for class in class DB entry + pClassDB->pClasses[numClasses] = pClassDesc[i]; + pClassDB->numClasses++; + break; + } + } + + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +/*! + * @brief Add a class to class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID of Classes to be added to Class DB + * @param[in] class Class to add to DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuAddClassToClassDBByEngTagClassId_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc, NvU32 externalClassId) +{ + return _gpuAddClassToClassDBByEngTagClassId(pGpu, &engDesc, &externalClassId); +} + +/*! + * @brief Add a class to class DB with given Engine Tag. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine ID of Class to be added to Class DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS gpuAddClassToClassDBByEngTag_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + return _gpuAddClassToClassDBByEngTagClassId(pGpu, &engDesc, NULL); +} + +/*! + * @brief Add a class to class DB with given Class ID. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] class Class ID + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS gpuAddClassToClassDBByClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId) +{ + return _gpuAddClassToClassDBByEngTagClassId(pGpu, NULL, &externalClassId); +} + +/*! + * @brief Delete a class from class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pEngDesc Engine Tag of Classes to be removed from Class DB + * (NULL = don't care) + * @param[in] pExternalClassId Class to remove from DB (NULL = don't care) + * + * @returns NV_STATUS - NV_OK always. + */ +static NV_STATUS +_gpuDeleteClassFromClassDBByEngTagClassId(OBJGPU *pGpu, ENGDESCRIPTOR *pEngDesc, NvU32 *pExternalClassId) +{ + PGPUCLASSDB pClassDB = &pGpu->classDB; + NvU32 i, j; + + NV_CHECK_OR_RETURN(LEVEL_INFO, (NULL != pEngDesc) || (NULL != pExternalClassId), NV_ERR_INVALID_ARGUMENT); + + for (i = 0; i < pClassDB->numClasses; i++) + { + if (((NULL == pEngDesc) || (pClassDB->pClasses[i].engDesc == *pEngDesc)) && + ((NULL == pExternalClassId) || (pClassDB->pClasses[i].externalClassId == *pExternalClassId))) + { + for (j = i; j < pClassDB->numClasses - 1; j++) + { + pClassDB->pClasses[j] = pClassDB->pClasses[j + 1]; + } + pClassDB->numClasses--; + i--; // Be sure to check the new entry at index i on the next loop. + } + } + + pGpu->engineDB.bValid = NV_FALSE; + + return NV_OK; +} + +/*! + * @brief Delete a class from class DB with given Engine Tag and Class Id. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc EngDesc of Classes to be removed from Class DB + * @param[in] externalClassId Class to remove from DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteClassFromClassDBByEngTagClassId_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc, NvU32 externalClassId) +{ + return _gpuDeleteClassFromClassDBByEngTagClassId(pGpu, &engDesc, &externalClassId); +} + +/*! + * @brief Delete a class from class DB with given Engine Tag. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] externalClassId Class to remove from DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteClassFromClassDBByClassId_IMPL(OBJGPU *pGpu, NvU32 externalClassId) +{ + return _gpuDeleteClassFromClassDBByEngTagClassId(pGpu, NULL, &externalClassId); +} + +/*! + * @brief Delete a class from class DB with given Engine Tag. + * + * @side Sets engineDB.bValid to NV_FALSE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] engDesc Engine Descriptor of Classes to be removed from Class DB + * + * @returns NV_STATUS - NV_OK always. + */ +NV_STATUS +gpuDeleteClassFromClassDBByEngTag_IMPL(OBJGPU *pGpu, ENGDESCRIPTOR engDesc) +{ + return _gpuDeleteClassFromClassDBByEngTagClassId(pGpu, &engDesc, NULL); +} diff --git a/src/nvidia/src/kernel/gpu/gpu_rmapi.c b/src/nvidia/src/kernel/gpu/gpu_rmapi.c new file mode 100644 index 000000000..fb4a6b10a --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_rmapi.c @@ -0,0 +1,877 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "class/cl0040.h" /* NV01_MEMORY_LOCAL_USER */ +#include "class/cl84a0.h" /* NV01_MEMORY_LIST_XXX */ +#include "class/cl00b1.h" /* NV01_MEMORY_HW_RESOURCES */ + +#include "nverror.h" + +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" +#include "rmapi/client.h" +#include "rmapi/resource_fwd_decls.h" +#include "core/thread_state.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/fifo/kernel_channel.h" + +NV_STATUS +gpuSetExternalKernelClientCount_IMPL(OBJGPU *pGpu, NvBool bIncr) +{ + if (bIncr) + { + pGpu->externalKernelClientCount++; + } + else + { + NV_ASSERT_OR_RETURN(pGpu->externalKernelClientCount > 0, NV_ERR_INVALID_OPERATION); + pGpu->externalKernelClientCount--; + } + + return NV_OK; +} + +// Get the count of user clients that are using given gpu +static NvU32 +_gpuGetUserClientCount +( + OBJGPU *pGpu, + NvBool bCount +) +{ + NvU32 count = 0; + Device *pDevice; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + NV_STATUS status; + + // Search list of clients for any that have an InUse ref to the gpu + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + // Skip internal client + if (pRsClient->type == CLIENT_TYPE_KERNEL) + continue; + + status = deviceGetByGpu(pRsClient, pGpu, NV_TRUE /* bAnyInGroup */, &pDevice); + + if (status != NV_OK) + continue; + + count++; + + if (!bCount) + break; + } + + return count; +} + +NvBool +gpuIsInUse_IMPL +( + OBJGPU *pGpu +) +{ + return !!_gpuGetUserClientCount(pGpu, NV_FALSE) || + (pGpu->externalKernelClientCount > 0); +} + +// Get the count of user clients that are using given gpu +NvU32 +gpuGetUserClientCount_IMPL +( + OBJGPU *pGpu +) +{ + return _gpuGetUserClientCount(pGpu, NV_TRUE); +} + +// Get the count of external clients (User+External modules) that are using given gpu +NvU32 +gpuGetExternalClientCount_IMPL +( + OBJGPU *pGpu +) +{ + return _gpuGetUserClientCount(pGpu, NV_TRUE) + pGpu->externalKernelClientCount; +} + +/** + * Find the GPU associated with a resource reference in this order: + * + * 1. Directly from the RsResource if the resource is a Device or Subdevice + * 2. From an ancestor subdevice (if any) + * 3. From an ancestor device (if any) + * + * If the resource your querying is guaranteed to be a GpuResource you should + * directly call GPU_RES_GET_GPU() + * + * @param[out] pbBroadcast True if the found GPU corresponds to a device + * [optional] + */ +NV_STATUS +gpuGetByRef +( + RsResourceRef *pContextRef, + NvBool *pbBroadcast, + OBJGPU **ppGpu +) +{ + NV_STATUS status = NV_OK; + RsResourceRef *pDeviceRef; + RsResourceRef *pSubdeviceRef; + GpuResource *pGpuResource; + + if (ppGpu != NULL) + *ppGpu = NULL; + + if (pContextRef == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pGpuResource = dynamicCast(pContextRef->pResource, GpuResource); + + // + // NULL check on GpuResource::pGpu as this routine is used from within + // GpuResource::Construct to initialize GpuResource::pGpu + // + if ((pGpuResource == NULL) || (pGpuResource->pGpu == NULL)) + { + status = refFindAncestorOfType(pContextRef, classId(Subdevice), &pSubdeviceRef); + if (status == NV_OK) + { + pGpuResource = dynamicCast(pSubdeviceRef->pResource, GpuResource); + if ((pGpuResource == NULL) || (pGpuResource->pGpu == NULL)) + status = NV_ERR_OBJECT_NOT_FOUND; + } + + if (status != NV_OK) + { + status = refFindAncestorOfType(pContextRef, classId(Device), &pDeviceRef); + if (status == NV_OK) + { + pGpuResource = dynamicCast(pDeviceRef->pResource, GpuResource); + if ((pGpuResource == NULL) || (pGpuResource->pGpu == NULL)) + status = NV_ERR_OBJECT_NOT_FOUND; + } + } + } + + if (status == NV_OK) + { + if (pbBroadcast != NULL) + *pbBroadcast = pGpuResource->bBcResource; + + if (ppGpu != NULL) + *ppGpu = pGpuResource->pGpu; + } + + return status; +} + +/** + * Wrapper for gpuGetByRef that takes a pClient + hResource instead of a + * pResourceRef. + * + * Find the GPU associated with a resource; + */ +NV_STATUS +gpuGetByHandle +( + RsClient *pClient, + NvHandle hResource, + NvBool *pbBroadcast, + OBJGPU **ppGpu +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + if (ppGpu != NULL) + *ppGpu = NULL; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + return gpuGetByRef(pResourceRef, pbBroadcast, ppGpu); +} + +/*! + * @brief Determine whether the given event should be triggered on the given + * subdevice based upon MIG attribution, and translate encoded global IDs into + * instance-local IDs if necessary + * + * @param[in] pSubdevice Subdevice under consideration for notification + * @param[in,out] pNotifyType Current notify type which may be overwritten + * @param[in,out] pInfo32 Current generic 32 bit info which may be + * overwritten + * @param[in] rcInstanceAttributionId MIG instance attribution valid only if + * *pNotifyType == NV2080_NOTIFIERS_RC_ERROR + * + * @return NV_OK if subdevice should be notified. + * pNotifyType and pInfo32 may be + * overwritten with new values + * NV_ERR_INSUFFICIENT_PERMISSIONS if client is not subscribed and + * unprivileged, and therefore should + * not be notified + * NV_ERR_OBJECT_NOT_FOUND if client is subscribed, but the + * notification references an object + * which is not part of their instance + * and therefore should not be notified. + * NV_ERR_INVALID_ARGUMENT if NULL is provided for parameters + */ +static NV_STATUS +_gpuFilterSubDeviceEventInfo +( + OBJGPU *pGpu, + Subdevice *pSubdevice, + NvU32 *pNotifyType, + NvU32 *pInfo32, + NvU16 rcInstanceAttributionId +) +{ + MIG_INSTANCE_REF ref; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice->pDevice); + NvU32 engineIdx; + NvU32 engineType; + NvU32 localType; + NvU32 localIdx; + NV_STATUS status = NV_OK; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + NV_ASSERT_OR_RETURN(pNotifyType != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pInfo32 != NULL, NV_ERR_INVALID_ARGUMENT); + + // No filtering needed if no instances + if (!IS_MIG_IN_USE(pGpu)) + return NV_OK; + + // Retrieve instance reference for this client + status = kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref); + if (status != NV_OK) + { + // + // Privileged or has the mig monitor capability, unsubscribed clients + // may be notified with no filtering, but other unsubscribed clients + // must not be notified + // + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilegeByHandle(hClient); + NV_CHECK_OR_RETURN(LEVEL_INFO, + rmclientIsCapableOrAdminByHandle(hClient, NV_RM_CAP_SYS_SMC_MONITOR, privLevel), + NV_ERR_INSUFFICIENT_PERMISSIONS); + return NV_OK; + } + + // Determine whether we need to convert global IDs to local + if (*pNotifyType == NV2080_NOTIFIERS_RC_ERROR) + { + // + // If this error was attributed, don't notify unless this client is + // subscribed to the attributed instance + // + if (kmigmgrIsInstanceAttributionIdValid(rcInstanceAttributionId)) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, + kmigmgrGetAttributionIdFromMIGReference(ref) == + rcInstanceAttributionId, + NV_ERR_OBJECT_NOT_FOUND); + } + + // + // RC errors encode the global error type in the *pInfo32, + // these need to be converted to local IDs + // + if (ROBUST_CHANNEL_IS_CE_ERROR(*pInfo32)) + { + engineIdx = ROBUST_CHANNEL_CE_ERROR_IDX(*pInfo32); + engineType = NV2080_ENGINE_TYPE_COPY(engineIdx); + + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, ref)) + return NV_ERR_OBJECT_NOT_FOUND; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + engineType, + &localType)); + localIdx = NV2080_ENGINE_TYPE_COPY_IDX(localType); + *pInfo32 = ROBUST_CHANNEL_CE_ERROR(localIdx); + } + else if (ROBUST_CHANNEL_IS_NVDEC_ERROR(*pInfo32)) + { + engineIdx = ROBUST_CHANNEL_NVDEC_ERROR_IDX(*pInfo32); + engineType = NV2080_ENGINE_TYPE_NVDEC(engineIdx); + + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, ref)) + return NV_ERR_OBJECT_NOT_FOUND; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + engineType, + &localType)); + localIdx = NV2080_ENGINE_TYPE_NVDEC_IDX(localType); + *pInfo32 = ROBUST_CHANNEL_NVDEC_ERROR(localIdx); + } + else if (ROBUST_CHANNEL_IS_NVENC_ERROR(*pInfo32)) + { + engineIdx = ROBUST_CHANNEL_NVENC_ERROR_IDX(*pInfo32); + engineType = NV2080_ENGINE_TYPE_NVENC(engineIdx); + + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, ref)) + return NV_ERR_OBJECT_NOT_FOUND; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + engineType, + &localType)); + localIdx = NV2080_ENGINE_TYPE_NVENC_IDX(localType); + *pInfo32 = ROBUST_CHANNEL_NVENC_ERROR(localIdx); + } + } + else if (NV2080_NOTIFIER_TYPE_IS_GR(*pNotifyType)) + { + engineIdx = NV2080_NOTIFIERS_GR_IDX(*pNotifyType); + engineType = NV2080_ENGINE_TYPE_GR(engineIdx); + + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, ref)) + return NV_ERR_OBJECT_NOT_FOUND; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + engineType, + &localType)); + localIdx = NV2080_ENGINE_TYPE_GR_IDX(localType); + *pNotifyType = NV2080_NOTIFIERS_GR(localIdx); + } + else if (NV2080_NOTIFIER_TYPE_IS_CE(*pNotifyType)) + { + engineIdx = NV2080_NOTIFIERS_CE_IDX(*pNotifyType); + engineType = NV2080_ENGINE_TYPE_COPY(engineIdx); + + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, ref)) + return NV_ERR_OBJECT_NOT_FOUND; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + engineType, + &localType)); + localIdx = NV2080_ENGINE_TYPE_COPY_IDX(localType); + *pNotifyType = NV2080_NOTIFIERS_CE(localIdx); + } + else if (NV2080_NOTIFIER_TYPE_IS_NVDEC(*pNotifyType)) + { + engineIdx = NV2080_NOTIFIERS_NVDEC_IDX(*pNotifyType); + engineType = NV2080_ENGINE_TYPE_NVDEC(engineIdx); + + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, ref)) + return NV_ERR_OBJECT_NOT_FOUND; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + engineType, + &localType)); + localIdx = NV2080_ENGINE_TYPE_NVDEC_IDX(localType); + *pNotifyType = NV2080_NOTIFIERS_NVDEC(localIdx); + } + else if (NV2080_NOTIFIER_TYPE_IS_NVENC(*pNotifyType)) + { + engineIdx = NV2080_NOTIFIERS_NVENC_IDX(*pNotifyType); + engineType = NV2080_ENGINE_TYPE_NVENC(engineIdx); + + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, ref)) + return NV_ERR_OBJECT_NOT_FOUND; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + engineType, + &localType)); + localIdx = NV2080_ENGINE_TYPE_NVENC_IDX(localType); + *pNotifyType = NV2080_NOTIFIERS_NVENC(localIdx); + } + return NV_OK; +} + +// +// For a particular gpu, find all the clients waiting for a particular event, +// fill in the notifier if allocated, and raise an event to the client if registered. +// +void +gpuNotifySubDeviceEvent_IMPL +( + OBJGPU *pGpu, + NvU32 notifyIndex, + void *pNotifyParams, + NvU32 notifyParamsSize, + NvV32 info32, + NvV16 info16 +) +{ + PEVENTNOTIFICATION pEventNotification; + THREAD_STATE_NODE *pCurThread; + RS_SHARE_ITERATOR it = serverutilShareIter(classId(NotifShare)); + NvU32 localNotifyType; + NvU32 localInfo32; + + if (NV_OK == threadStateGetCurrent(&pCurThread, pGpu)) + { + // This function shouldn't be used from lockless ISR. + // Use engineNonStallIntrNotify() to notify event from lockless ISR. + NV_ASSERT_OR_RETURN_VOID(!(pCurThread->flags & THREAD_STATE_FLAGS_IS_ISR_LOCKLESS)); + } + + NV_ASSERT(notifyIndex < NV2080_NOTIFIERS_MAXCOUNT); + + // search notifiers with events hooked up for this gpu + while (serverutilShareIterNext(&it)) + { + RsShared *pShared = it.pShared; + Subdevice *pSubdevice; + INotifier *pNotifier; + NotifShare *pNotifierShare = dynamicCast(pShared, NotifShare); + + if ((pNotifierShare == NULL) || (pNotifierShare->pNotifier == NULL)) + continue; + + pNotifier = pNotifierShare->pNotifier; + pSubdevice = dynamicCast(pNotifier, Subdevice); + + // Only notify matching GPUs + if ((pSubdevice == NULL) || (GPU_RES_GET_GPU(pSubdevice) != pGpu)) + continue; + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + // + // For SMC, partitioned engines have partition local IDs and events are + // registered using partition localId while RM deals with global Ids. + // Convert global to partition local if necessary + // + localNotifyType = notifyIndex; + localInfo32 = info32; + + { + NvU16 partitionAttributionId = info16; + if (_gpuFilterSubDeviceEventInfo(pGpu, pSubdevice, + &localNotifyType, + &localInfo32, + partitionAttributionId) != NV_OK) + { + continue; + } + } + + if (pSubdevice->notifyActions[localNotifyType] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + continue; + } + + if (pSubdevice->hNotifierMemory != NV01_NULL_OBJECT && + pSubdevice->pNotifierMemory != NULL) + { + notifyFillNotifierMemory(pGpu, pSubdevice->pNotifierMemory, localInfo32, info16, + NV2080_SUBDEVICE_NOTIFICATION_STATUS_DONE_SUCCESS, localNotifyType); + } + + pEventNotification = inotifyGetNotificationList(pNotifier); + if (pEventNotification != NULL) + { + // ping any events on the list of type notifyIndex + osEventNotificationWithInfo(pGpu, pEventNotification, localNotifyType, localInfo32, info16, + pNotifyParams, notifyParamsSize); + } + + // reset if single shot notify action + if (pSubdevice->notifyActions[localNotifyType] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE) + { + if (notifyIndex == NV2080_NOTIFIERS_FIFO_EVENT_MTHD) + { + NV_ASSERT(pGpu->activeFifoEventMthdNotifiers); + pGpu->activeFifoEventMthdNotifiers--; + } + + pSubdevice->notifyActions[localNotifyType] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + } +} + + +// +// Searches the Pid Array to see if the process this client belongs to is already +// in the list. +// +static NvBool +_gpuiIsPidSavedAlready +( + NvU32 pid, + NvU32 *pPidArray, + NvU32 pidCount +) +{ + NvU32 j; + + for (j = 0; j < pidCount; j++) + { + if (pid == pPidArray[j]) + return NV_TRUE; + } + return NV_FALSE; +} + +// +// Searches through clients to find processes with clients that have +// allocated an ElementType of class, defined by elementID. The return values +// are the array containing the PIDs for the processes and the count for the +// array. +// If a valid partitionRef is provided, the scope of search gets limited to a +// partition +// +NV_STATUS +gpuGetProcWithObject_IMPL +( + OBJGPU *pGpu, + NvU32 elementID, + NvU32 internalClassId, + NvU32 *pPidArray, + NvU32 *pPidArrayCount, + MIG_INSTANCE_REF *pRef +) +{ + NvU32 pidcount = 0; + NvHandle hClient; + Device *pDevice; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + + NV_ASSERT_OR_RETURN((pPidArray != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pPidArrayCount != NULL), NV_ERR_INVALID_ARGUMENT); + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + NvBool elementInClient = NV_FALSE; + RS_ITERATOR iter; + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(*ppClient); + + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + hClient = pRsClient->hClient; + + // Skip reporting of kernel mode and internal RM clients + if ((privLevel >= RS_PRIV_LEVEL_KERNEL) && rmclientIsAdmin(pClient, privLevel)) + continue; + + if (_gpuiIsPidSavedAlready(pClient->ProcID, pPidArray, pidcount)) + continue; + + if (deviceGetByGpu(pRsClient, pGpu, NV_TRUE /* bAnyInGroup */, &pDevice) != NV_OK) + continue; + + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + if ((pKernelMIGManager != NULL) && (pRef->pKernelMIGGpuInstance != NULL)) + { + MIG_INSTANCE_REF clientRef = kmigmgrMakeNoMIGReference(); + if ((kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, + &clientRef) != NV_OK)) + continue; + + // Check partition subscription against requested partition + if ((pRef->pKernelMIGGpuInstance != clientRef.pKernelMIGGpuInstance) || + ((pRef->pMIGComputeInstance != NULL) && + (pRef->pMIGComputeInstance != clientRef.pMIGComputeInstance))) + { + continue; + } + } + } + + iter = serverutilRefIter(hClient, NV01_NULL_OBJECT, 0, RS_ITERATE_DESCENDANTS, NV_TRUE); + + // + // At this point it has been determined that the client's subdevice + // is associated with the Gpu of interest, and it is not already + // included in the pidArray. In the call, objects belonging to the + // client are returned. If any object in the client belongs to + // the class being queried, then that process is added to the array. + // + while (clientRefIterNext(iter.pClient, &iter)) + { + pResourceRef = iter.pResourceRef; + + if (!objDynamicCastById(pResourceRef->pResource, internalClassId)) + continue; + + switch (internalClassId) + { + case (classId(ChannelDescendant)): + { + ChannelDescendant *Object = dynamicCast(pResourceRef->pResource, ChannelDescendant); + if (RES_GET_EXT_CLASS_ID(Object) == elementID) + { + GpuResource *pGpuResource = (Object->pKernelChannel != NULL) + ? dynamicCast(Object->pKernelChannel, GpuResource) : NULL; + + if ((pGpuResource != NULL) && (GPU_RES_GET_DEVICE(pGpuResource) == pDevice)) + { + elementInClient = NV_TRUE; + } + } + break; + } + + case (classId(Device)): + case (classId(Subdevice)): + { + // + // It has been already verified that the client's subdevice + // or device is associated with the GPU of interest. + // Hence, Just add the client->pid into the list. + // + elementInClient = NV_TRUE; + break; + } + case (classId(MpsApi)): + { + elementInClient = NV_TRUE; + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + } + if (elementInClient) + { + pPidArray[pidcount] = pClient->ProcID; + pidcount++; + + if (pidcount == NV2080_CTRL_GPU_GET_PIDS_MAX_COUNT) + { + NV_PRINTF(LEVEL_ERROR, + "Maximum PIDs reached. Returning.\n"); + + goto done; + } + + break; + } + } + } +done: + *pPidArrayCount = pidcount; + + return NV_OK; +} + +// +// _gpuCollectMemInfo +// +// Retrieves all the FB memory allocated for that client and returned as *pData. +// If the input parameter bIsGuestProcess is true, that means we are on VGX host +// and the caller is trying to find FB memory usage of a process which is +// running inside a VM. +// +static void +_gpuCollectMemInfo +( + NvHandle hClient, + NvHandle hDevice, + Heap *pTargetedHeap, + NV2080_CTRL_GPU_PID_INFO_VIDEO_MEMORY_USAGE_DATA *pData, + NvBool bIsGuestProcess, + NvBool bGlobalInfo +) +{ + RS_ITERATOR iter; + Memory *pMemory = NULL; + RsResourceRef *pResourceRef; + + NV_ASSERT_OR_RETURN_VOID(pData != NULL); + + iter = serverutilRefIter(hClient, NV01_NULL_OBJECT, 0, RS_ITERATE_DESCENDANTS, NV_TRUE); + + while (clientRefIterNext(iter.pClient, &iter)) + { + pResourceRef = iter.pResourceRef; + pMemory = dynamicCast(pResourceRef->pResource, Memory); + + if (!pMemory) + continue; + + // In case we are trying to find memory allocated by a process running + // on a VM - the case where isGuestProcess is true, only consider the + // memory : + // 1. which is allocated by the guest VM or by a process running in it. + // 2. if the memory is not tagged with NVOS32_TYPE_UNUSED type. + // Windows KMD and Linux X driver makes dummy allocations which is + // done using NV01_MEMORY_LOCAL_USER class with rmAllocMemory() + // function. + // On VGX, while passing this allocation in RPC, we use the memory + // type NVOS32_TYPE_UNUSED. So while calculating the per process FB + // usage, only consider the allocation if memory type is not + // NVOS32_TYPE_UNUSED. + if ((pResourceRef->externalClassId == NV01_MEMORY_LOCAL_USER || + pResourceRef->externalClassId == NV01_MEMORY_LIST_FBMEM || + pResourceRef->externalClassId == NV01_MEMORY_LIST_OBJECT || + pResourceRef->externalClassId == NV01_MEMORY_HW_RESOURCES) && + (pMemory->categoryClassId == NV01_MEMORY_LOCAL_USER) && + (bGlobalInfo || (pMemory->pHeap == pTargetedHeap)) && + (RES_GET_HANDLE(pMemory->pDevice) == hDevice) && + (pMemory->pMemDesc != NULL) && + ((!bIsGuestProcess && (!memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_LIST_MEMORY))) || + (bIsGuestProcess && (memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) && (pMemory->Type != NVOS32_TYPE_UNUSED)))) + { + + if (pMemory->pMemDesc->DupCount == 1) + { + pData->memPrivate += pMemory->Length; + } + else if (pMemory->isMemDescOwner) + { + pData->memSharedOwned += pMemory->Length; + } + else + { + pData->memSharedDuped += pMemory->Length; + } + } + } +} + +// +// This function takes in the PID for the process of interest, and queries all +// clients for elementType. The 64-bit Data is updated by specific functions +// which handle queries for different elementTypes. +// +NV_STATUS +gpuFindClientInfoWithPidIterator_IMPL +( + OBJGPU *pGpu, + NvU32 pid, + NvU32 subPid, + NvU32 internalClassId, + NV2080_CTRL_GPU_PID_INFO_DATA *pData, + NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo, + MIG_INSTANCE_REF *pRef, + NvBool bGlobalInfo +) +{ + NvHandle hClient; + Device *pDevice; + NvHandle hDevice; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvU32 computeInstanceId = PARTITIONID_INVALID; + NvU32 gpuInstanceId = PARTITIONID_INVALID; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN((pid != 0), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pData != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pSmcInfo != NULL), NV_ERR_INVALID_ARGUMENT); + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if (((subPid == 0) && (pClient->ProcID == pid)) || + ((subPid != 0) && (pClient->ProcID == pid) && (pClient->SubProcessID == subPid))) + { + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pClient); + hClient = pRsClient->hClient; + + // Skip reporting of kernel mode and internal RM clients + if ((privLevel >= RS_PRIV_LEVEL_KERNEL) && rmclientIsAdmin(pClient, privLevel)) + continue; + + if (deviceGetByGpu(pRsClient, pGpu, NV_TRUE, &pDevice) != NV_OK) + continue; + + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF clientRef = kmigmgrMakeNoMIGReference(); + + if (kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, + &clientRef) != NV_OK) + { + continue; + } + + gpuInstanceId = clientRef.pKernelMIGGpuInstance->swizzId; + if (clientRef.pMIGComputeInstance != NULL) + { + computeInstanceId = clientRef.pMIGComputeInstance->id; + } + + // Check instance subscription against requested instance reference + if (!bGlobalInfo && + !kmigmgrAreMIGReferencesSame(pRef, &clientRef)) + { + continue; + } + + if (kmigmgrIsMIGReferenceValid(pRef)) + { + pHeap = pRef->pKernelMIGGpuInstance->pMemoryPartitionHeap; + } + } + + hDevice = RES_GET_HANDLE(pDevice); + + switch (internalClassId) + { + case (classId(Memory)): + { + // TODO - + // When single process spanning across multiple GI or CI by creating multiple + // clients, RM needs to provide the unique list being used by the client + _gpuCollectMemInfo(hClient, hDevice, pHeap, + &pData->vidMemUsage, ((subPid != 0) ? NV_TRUE : NV_FALSE), + bGlobalInfo); + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + } + } + } + + pSmcInfo->computeInstanceId = computeInstanceId; + pSmcInfo->gpuInstanceId = gpuInstanceId; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_timeout.c b/src/nvidia/src/kernel/gpu/gpu_timeout.c new file mode 100644 index 000000000..4a0d26e55 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_timeout.c @@ -0,0 +1,541 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief GPU Timeout related routines. + */ + +/* ------------------------ Includes ---------------------------------------- */ + +#include "lib/base_utils.h" +#include "gpu/gpu.h" +#include "objtmr.h" +#include "nvrm_registry.h" +#include "core/thread_state.h" +#include "core/locks.h" +#include "gpu_mgr/gpu_mgr.h" + +/* ------------------------ Public Functions ------------------------------- */ + +/*! + * @brief Initializes default timeout values from a provided GPU. + */ +void +timeoutInitializeGpuDefault +( + TIMEOUT_DATA *pTD, + OBJGPU *pGpu +) +{ + NvU32 timeoutDefault; + + pTD->pGpu = pGpu; + + // Set default timeout mode before loading HAL state + osGetTimeoutParams(pGpu, &timeoutDefault, &(pTD->scale), &(pTD->defaultFlags)); + if (!pTD->bDefaultOverridden) + { + pTD->defaultResetus = timeoutDefault; + pTD->defaultus = timeoutDefault; + pTD->bScaled = NV_FALSE; + } + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the host, use the OS + // timer by default in the guest OS (where IS_VIRTUAL(pGpu) is true), + // as it (hopefully) tracks a VM's actual time executing + // (vs. reading the HW PTIMER which'll be too fast). + // SOC NvDisplay: + // SOC NvDisplay doesn't have HW timer so use OSTIMER as default + // + pTD->defaultFlags = GPU_TIMEOUT_FLAGS_OSTIMER; + } + + // Using this boolean to ensure defaultus isn't scaled more than once. + if (!pTD->bScaled) + { + pTD->defaultus = gpuScaleTimeout(pGpu, pTD->defaultus); + pTD->bScaled = NV_TRUE; + } + + // + // Note we need to call threadStateResetTimeout() now that the timeout + // mechanism and values are known to allow threadStateCheckTimeout() + // to work after this point during init. + // + threadStateInitTimeout(pGpu, pTD->defaultus, pTD->defaultFlags); + threadStateResetTimeout(pGpu); +} + +/*! + * @brief Applies external timeout override based on registry values. + */ +void +timeoutRegistryOverride +( + TIMEOUT_DATA *pTD, + OBJGPU *pGpu +) +{ + NvU32 data32 = 0; + + // Override timeout value + if ((osReadRegistryDword(pGpu, + NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT, + &data32) == NV_OK) && (data32 != 0)) + { + // Handle 32-bit overflow. + if (data32 > (NV_U32_MAX / 1000)) + { + pTD->defaultus = NV_U32_MAX; + pTD->defaultResetus = NV_U32_MAX; + } + else + { + // Convert to [us] + pTD->defaultus = data32 * 1000; + pTD->defaultResetus = data32 * 1000; + } + pTD->bDefaultOverridden = NV_TRUE; + NV_PRINTF(LEVEL_ERROR, "Overriding default timeout to 0x%08x\n", + pTD->defaultus); + } + + // Override timeout flag values + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS, + &data32) == NV_OK) + { + switch (data32) + { + case NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSDELAY: + { + pTD->defaultFlags = GPU_TIMEOUT_FLAGS_OSDELAY; + break; + } + + case NV_REG_STR_RM_OVERRIDE_DEFAULT_TIMEOUT_FLAGS_OSTIMER: + { + pTD->defaultFlags = GPU_TIMEOUT_FLAGS_OSTIMER; + break; + } + + default: + { + NV_PRINTF(LEVEL_ERROR, "Unknown TIMEOUT_FLAGS value: 0x%08x\n", + data32); + NV_ASSERT(0); + } + } + + NV_PRINTF(LEVEL_ERROR, "Overriding default flags to 0x%08x\n", + pTD->defaultFlags); + } +} + +/*! + * @brief Applies external timeout override. + */ +void +timeoutOverride +( + TIMEOUT_DATA *pTD, + NvBool bOverride, + NvU32 timeoutMs +) +{ + pTD->bDefaultOverridden = bOverride; + + pTD->defaultus = bOverride ? (timeoutMs * 1000) : pTD->defaultResetus; +} + +/*! + * @brief Initialize the RMTIMEOUT structure with the selected timeout scheme. + */ +void +timeoutSet +( + TIMEOUT_DATA *pTD, + RMTIMEOUT *pTimeout, + NvU32 timeoutUs, + NvU32 flags +) +{ + OBJTMR *pTmr; + NvU64 timeInNs; + NvU64 timeoutNs; + + portMemSet(pTimeout, 0, sizeof(*pTimeout)); + + // + // Note that if GPU_TIMEOUT_DEFAULT is used we will go through + // threadStateCheckTimeout rather than timeoutCheck as we do + // not want to have "stacked" gpuSetTimeouts. The intent of + // GPU_TIMEOUT_DEFAULT was to cover the *entire* RM API stack. + // If GPU_TIMEOUT_DEFAULT was specified, this is essentially a + // NULL operation other than setting the flags to route us to + // threadStateCheckTimeout. This can be overridden by + // setting GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE. + // + pTimeout->flags = flags; + if ((flags == 0) || (flags & GPU_TIMEOUT_FLAGS_DEFAULT) || + !(flags & (GPU_TIMEOUT_FLAGS_OSTIMER | GPU_TIMEOUT_FLAGS_OSDELAY | + GPU_TIMEOUT_FLAGS_TMR | GPU_TIMEOUT_FLAGS_TMRDELAY))) + { + pTimeout->flags |= pTD->defaultFlags; + } + + if (timeoutUs == GPU_TIMEOUT_DEFAULT) + { + timeoutUs = pTD->defaultus; + + // + // Use the ThreadState by default if GPU_TIMEOUT_DEFAULT was specified + // unless we were told explicitly not to. + // ThreadState only supports OSTIMER and OSDELAY + // + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE) && + (pTimeout->flags & (GPU_TIMEOUT_FLAGS_OSTIMER | GPU_TIMEOUT_FLAGS_OSDELAY))) + { + pTimeout->flags |= GPU_TIMEOUT_FLAGS_USE_THREAD_STATE; + } + } + + // Set end time for elapsed time methods + timeoutNs = (NvU64)timeoutUs * 1000; + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + // + // For small timeouts (timeout durations on the order of magnitude of + // the OS tick resolution), starting the timeout near the end of a tick + // could cause a premature timeout since the start time is determined + // by the start of the tick. Mitigate this by always padding the + // timeout using the OS tick resolution, to bump us to the next tick. + // + timeoutNs += osGetTickResolution(); + + osGetCurrentTick(&timeInNs); + + pTimeout->pTmrGpu = NULL; + pTimeout->timeout = timeInNs + timeoutNs; + } + else if ((pTimeout->flags & GPU_TIMEOUT_FLAGS_TMR) || + (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMRDELAY)) + { + OBJGPU *pGpu = pTD->pGpu; + NV_ASSERT_OR_RETURN_VOID(pGpu != NULL); + + OBJGPU *pParentGpu = gpumgrGetParentGPU(pGpu); + + // + // Set timer GPU to primary GPU for accurate timeout with SLI loop. But only + // use the primary GPU if it is in full power mode or in the process of resuming. + // Also don't use the primary if it is in full chip reset. + // + if (gpumgrIsParentGPU(pGpu) || + ((gpuIsGpuFullPower(pParentGpu) == NV_FALSE) && + !pParentGpu->getProperty(pParentGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) || + pParentGpu->getProperty(pParentGpu, PDB_PROP_GPU_IN_FULLCHIP_RESET)) + { + pTimeout->pTmrGpu = pGpu; + } + else + { + pTimeout->pTmrGpu = pParentGpu; + } + + pTmr = GPU_GET_TIMER(pTimeout->pTmrGpu); + NV_ASSERT_OR_RETURN_VOID(pTmr != NULL); + + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMR) + { + + // nanoseconds + tmrGetCurrentTime(pTmr, &pTimeout->timeout); + pTimeout->timeout += timeoutNs; + } + else // GPU_TIMEOUT_FLAGS_TMRDELAY + { + pTimeout->timeout = timeoutUs; + } + } + else + { + pTimeout->pTmrGpu = NULL; + pTimeout->timeout = timeoutUs; + } +} + +/*! + * We typically only use this code if a time other than GPU_TIMEOUT_DEFAULT + * was specified. For GPU_TIMEOUT_DEFAULT we use threadStateCheckTimeout. + * The logic in the _threadNodeCheckTimeout() should closely resemble that + * of the _checkTimeout(). + */ +static NV_STATUS +_checkTimeout +( + RMTIMEOUT *pTimeout +) +{ + NV_STATUS status = NV_OK; + OBJTMR *pTmr; + NvU64 current; + NvU64 timeInNs; + + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_OSTIMER) + { + osGetCurrentTick(&timeInNs); + if (timeInNs >= pTimeout->timeout) + { + NV_PRINTF(LEVEL_INFO, "OS elapsed %llx >= %llx\n", + timeInNs, pTimeout->timeout); + status = NV_ERR_TIMEOUT; + } + } + else if (pTimeout->flags & GPU_TIMEOUT_FLAGS_OSDELAY) + { + osDelayUs(100); + + // + // TODO: Bug: 3312158 - Isolate the fix timeout logic to emulation. + // This is because of the numerous timeout issues exposed in DVS + // Emulation requires this to make sure we are not wasting emulation resources + // by waiting for timeouts too long. + // Once DVS issues are fixed, this fix will be enabled for all platforms. + // + if ((pTimeout->pTmrGpu != NULL) && (IS_EMULATION(pTimeout->pTmrGpu))) + { + // + // Adjust the remaining time. Note that the remaining time is in nanoseconds unit + // for GPU_TIMEOUT_FLAGS_OSDELAY + // + pTimeout->timeout -= NV_MIN(100ULL * 1000ULL, pTimeout->timeout); + } + else + { + pTimeout->timeout -= NV_MIN(100ULL , pTimeout->timeout); + } + + if (pTimeout->timeout == 0) + { + NV_PRINTF(LEVEL_INFO, "OS timeout == 0\n"); + status = NV_ERR_TIMEOUT; + } + } + else if (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMR) + { + NV_ASSERT_OR_RETURN(pTimeout->pTmrGpu != NULL, NV_ERR_INVALID_STATE); + if (!API_GPU_ATTACHED_SANITY_CHECK(pTimeout->pTmrGpu)) + return NV_ERR_TIMEOUT; + + pTmr = GPU_GET_TIMER(pTimeout->pTmrGpu); + NV_ASSERT_OR_RETURN(pTmr != NULL, NV_ERR_INVALID_STATE); + + tmrDelay(pTmr, 5ULL * 1000ULL); + tmrGetCurrentTime(pTmr, ¤t); + + if (current >= pTimeout->timeout) + { + NV_PRINTF(LEVEL_ERROR, "ptmr elapsed %llx >= %llx\n", + current, pTimeout->timeout); + status = NV_ERR_TIMEOUT; + } + } + else if (pTimeout->flags & GPU_TIMEOUT_FLAGS_TMRDELAY) + { + NV_ASSERT_OR_RETURN(pTimeout->pTmrGpu != NULL, NV_ERR_INVALID_STATE); + if (!API_GPU_ATTACHED_SANITY_CHECK(pTimeout->pTmrGpu)) + return NV_ERR_TIMEOUT; + + pTmr = GPU_GET_TIMER(pTimeout->pTmrGpu); + NV_ASSERT_OR_RETURN(pTmr != NULL, NV_ERR_INVALID_STATE); + + tmrDelay(pTmr, 5ULL * 1000ULL); + pTimeout->timeout -= NV_MIN(5, pTimeout->timeout); + + if (pTimeout->timeout == 0) + { + NV_PRINTF(LEVEL_INFO, "ptmr timeout == 0\n"); + status = NV_ERR_TIMEOUT; + } + } + else + { + NV_PRINTF(LEVEL_ERROR, "Invalid timeout flags 0x%08x\n", + pTimeout->flags); + DBG_BREAKPOINT(); + status = NV_ERR_INVALID_STATE; + } + + return status; +} + +/*! + * @brief Check if the passed in RMTIMEOUT struct has expired. + */ +NV_STATUS +timeoutCheck +( + TIMEOUT_DATA *pTD, + RMTIMEOUT *pTimeout, + NvU32 lineNum +) +{ + OBJGPU *pGpu = pTD->pGpu; + NV_STATUS status = NV_OK; + + NV_ASSERT(pTimeout != NULL); + + if ((pGpu != NULL) && API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + return NV_ERR_TIMEOUT; + + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_CPU_YIELD)) + { + threadStateYieldCpuIfNecessary(pGpu); + } + + // + // Note that if GPU_TIMEOUT_DEFAULT is used we will go through + // threadStateCheckTimeout rather than timeoutCheck as we do + // not want to have "stacked" gpuSetTimeouts. The intent of + // GPU_TIMEOUT_DEFAULT is to cover the *entire* RM API stack. + // If we are going through the case below, we should have just + // called threadStateCheckTimeout directly rather than + // timeoutCheck. + // + + // If local timeout check was intended, check that first. + if (!(pTimeout->flags & GPU_TIMEOUT_FLAGS_USE_THREAD_STATE)) + { + status = _checkTimeout(pTimeout); + if (status == NV_ERR_TIMEOUT) + { + // Mark that this Timeout is the result of a local timeout + pTimeout->flags |= GPU_TIMEOUT_FLAGS_STATUS_LOCAL_TIMEOUT; + } + } + + // + // Always check for the thread timeout in addition to any local timeout + // unless we have EXPLICITLY been instructed not to by a timeout flag. + // + if ((status != NV_ERR_TIMEOUT) && !(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE)) + { + status = threadStateCheckTimeout(pGpu, NULL /*pElapsedTime*/); + + if (status == NV_ERR_TIMEOUT) + { + // Mark that this Timeout is the result of ThreadState + pTimeout->flags |= GPU_TIMEOUT_FLAGS_STATUS_THREAD_STATE_TIMEOUT; + } + else if (status != NV_OK) + { + // Try the local timeout as fallback, unless it was already checked. + if (pTimeout->flags & GPU_TIMEOUT_FLAGS_USE_THREAD_STATE) + { + status = _checkTimeout(pTimeout); + if (status == NV_ERR_TIMEOUT) + { + // Mark that this Timeout is the result of a local timeout + pTimeout->flags |= GPU_TIMEOUT_FLAGS_STATUS_LOCAL_TIMEOUT; + } + } + } + } + + // Throttle priority of boosted threads if necessary + threadPriorityThrottle(); + + // Log the Timeout in the RM Journal + if ( (status == NV_ERR_TIMEOUT) && + !(pTimeout->flags & GPU_TIMEOUT_FLAGS_BYPASS_JOURNAL_LOG)) + { + NvU64 funcAddr = (NvU64) (NV_RETURN_ADDRESS()); + threadStateLogTimeout(pGpu, funcAddr, lineNum); + } + + return status; +} + +/*! + * @brief Wait for a condition function to return NV_TRUE or timeout. + * + * @param[in] pTD Timeout data + * @param[in] pTimeout RM timeout structure to be used, or NULL to use default timeout + * @param[in] pCondFunc Function implementing condition check to wait for + * @param[in] pCondData An optional param to @ref pCondFunc (NULL if unused) + * + * @return NV_OK Condition met within the provided timeout period. + * @return NV_ERR_TIMEOUT Timed out while waiting for the condition. + * + * @note This interface addresses the recurring problem of reporting time-out + * when condition is actually met. That can happen since RM can get + * preempted by the OS any time during the execution. It is achieved by + * one additional condition check before the exit in case when timeout + * has been detected. + */ +NV_STATUS +timeoutCondWait +( + TIMEOUT_DATA *pTD, + RMTIMEOUT *pTimeout, + GpuWaitConditionFunc *pCondFunc, + void *pCondData, + NvU32 lineNum +) +{ + OBJGPU *pGpu = pTD->pGpu; + NV_STATUS status = NV_OK; + RMTIMEOUT timeout; + + if (pTimeout == NULL) + { + timeoutSet(pTD, &timeout, GPU_TIMEOUT_DEFAULT, 0); + pTimeout = &timeout; + } + + while (!pCondFunc(pGpu, pCondData)) + { + osSpinLoop(); + + status = timeoutCheck(pTD, pTimeout, lineNum); + if (status != NV_OK) + { + if ((status == NV_ERR_TIMEOUT) && + pCondFunc(pGpu, pCondData)) + { + status = NV_OK; + } + break; + } + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gpu_uuid.c b/src/nvidia/src/kernel/gpu/gpu_uuid.c new file mode 100644 index 000000000..c259e1373 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gpu_uuid.c @@ -0,0 +1,317 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu_uuid.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "os/os.h" +#include "nvSha1.h" + +/** + * @brief Transforms a raw GPU ID into an ASCII string of the form + * "GPU-%08x-%04x-%04x-%04x-%012x" (SHA-1) + * + * @param[in] pGidData Raw GID from OBJPMU/OBJBIF + * @param[in] gidSize Size of the raw ID + * @param[out] ppGidString Return pointer for the GID string + * @param[out] pGidStrlen Return pointer for the GID string length + * @param[in] gidFlags NV2080_GPU_CMD_GPU_GET_GID_FLAGS values: selects + * SHA-1 only + * + * @returns matching mapping, or NULL if not found. + */ +NV_STATUS +transformGidToUserFriendlyString +( + const NvU8 *pGidData, + NvU32 gidSize, + NvU8 **ppGidString, + NvU32 *pGidStrlen, + NvU32 gidFlags +) +{ + NvUuid uuid; + + if (!FLD_TEST_DRF(2080_GPU_CMD,_GPU_GET_GID_FLAGS,_TYPE,_SHA1,gidFlags)) + { + return NV_ERR_INVALID_FLAGS; + } + + NV_ASSERT(NV_UUID_LEN == gidSize); + + portMemCopy(uuid.uuid, NV_UUID_LEN, pGidData, gidSize); + + *ppGidString = portMemAllocNonPaged(NV_UUID_STR_LEN); + if (*ppGidString == NULL) + { + return NV_ERR_NO_MEMORY; + } + + nvGetGpuUuidString(&uuid, (char*)*ppGidString); + *pGidStrlen = NV_UUID_STR_LEN; + + return NV_OK; +} + +static NvU32 +_nvCopyUuid +( + NvU8 *pBuff, + NvU32 index, + NvU32 size, + void *pInfo +) +{ + NvU8 *pBytes = pInfo; + portMemCopy(pBuff, size, pBytes + index, size); + return size; +} + +/** + * @brief Generates SHA1 UUID for a GPU or a MIG instance. + * + * The UUID will be computed as SHA1(message) where the message is as follows: + * + * offset 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 + * value c8 16 c9 a3 52 24 56 bf 9d 9a ac 7e a7 03 fb 5b + * + * offset 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + * value N V I D I A '' G P U 02 x x 08 y y + * + * offset 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 + * value y y y y y y 0b S M C z z z z p p + * + * offset 48 49 + * value p p + */ +/** + * where, + * Char is the byte value in ASCII encoding ('' is space = 0x20) + * Number is the numeric byte value in hex (0x02) + * xx is the chip id in little endian format. + * The chip ID ARCH+IMPL. For example: 0x017B for GA10B + * yyyyyyyy is the 64-bit PDI in little endian. PDI = (PDI_1 << 32) OR PDI_0. + * + * Additionally, when fractional GPU with MIG is used, and the MIG + * configurations are exposed as separate logical devices, the following bytes + * are appended in the message: + * + * zzzz is the numeric value of the swizzle id (32-bit little-endian) + * pppp is the numeric value of the graphics engine physical + * sys pipe ID (32-bit little-endian) + * + * See bug 3028068 for more details. + * + * @param[in] bMIG "MIG" or "GPU" UUID prefix + * @param[in] chipId GPU chip ID + * @param[in] pdi GPU PDI + * @param[in] swizzId MIG GPU instance swizz ID (only needed for MIG) + * @param[in] syspipeId MIG GPU instance syspipe ID (only needed for MIG) + * @param[out] pUuid UUID + * + * @returns NV_OK upon success, otherwise returns NV_ERR_* + */ + +#define UUID_MESSAGE_SIZE 50 +#define GPU_UUID_MESSAGE_SIZE 38 +#define SMC_UUID_MESSAGE_SIZE UUID_MESSAGE_SIZE + +static const NvU8 uuidMessage[UUID_MESSAGE_SIZE] = +{ + 0xc8, 0x16, 0xc9, 0xa3, 0x52, 0x24, 0x56, 0xbf, 0x9d, 0x9a, 0xac, + 0x7e, 0xa7, 0x03, 0xfb, 0x5b, 'N', 'V', 'I', 'D', 'I', 'A', + ' ', 'G', 'P', 'U', 0x02, 'x', 'x', 0x08, 'y', 'y', 'y', + 'y', 'y', 'y', 'y', 'y', 0x0b, 'S', 'M', 'C', 'z', 'z', + 'z', 'z', 'p', 'p', 'p', 'p' +}; + +static NV_STATUS +_nvGenerateUuid +( + NvBool bMIG, + NvU16 chipId, + NvU64 pdi, + NvU32 swizzId, + NvU32 syspipeId, + NvUuid *pUuid +) +{ + NvU8 *pSha1Digest; + NvU8 *pMessage; + NvU32 messageSize = GPU_UUID_MESSAGE_SIZE; + + pSha1Digest = portMemAllocNonPaged(NV_SHA1_DIGEST_LENGTH + + UUID_MESSAGE_SIZE); + if (pSha1Digest == NULL) + { + return NV_ERR_NO_MEMORY; + } + + pMessage = pSha1Digest + NV_SHA1_DIGEST_LENGTH; + + portMemCopy(pMessage, UUID_MESSAGE_SIZE, uuidMessage, UUID_MESSAGE_SIZE); + + portUtilWriteLittleEndian16(&pMessage[27], chipId); + portUtilWriteLittleEndian64(&pMessage[30], pdi); + + if (bMIG) + { + portUtilWriteLittleEndian32(&pMessage[42], swizzId); + portUtilWriteLittleEndian32(&pMessage[46], syspipeId); + + messageSize = SMC_UUID_MESSAGE_SIZE; + } + + // UUID strings only use the first 16 bytes of the 20-byte SHA-1 digest. + sha1Generate(pSha1Digest, pMessage, messageSize, _nvCopyUuid); + portMemCopy(pUuid->uuid, NV_UUID_LEN, pSha1Digest, NV_UUID_LEN); + + // version 5 - SHA1-based + pUuid->uuid[6] = (pUuid->uuid[6] & 0x0f) | 0x50; + // variant 1 - network byte ordering + pUuid->uuid[8] = (pUuid->uuid[8] & 0x3f) | 0x80; + + portMemFree(pSha1Digest); + + return NV_OK; +} + +/** + * @brief Generates SHA1 UUID for GPU. + * + * @param[in] chipId GPU chip ID + * @param[in] pdi GPU PDI + * @param[out] pUuid UUID + * + * @returns NV_OK upon success, otherwise returns NV_ERR_* + */ +NV_STATUS +nvGenerateGpuUuid +( + NvU16 chipId, + NvU64 pdi, + NvUuid *pUuid +) +{ + return _nvGenerateUuid(NV_FALSE, chipId, pdi, 0, 0, pUuid); +} + +/** + * @brief Generates SHA1 UUID for MIG instance. + * + * @param[in] chipId GPU chip ID + * @param[in] pdi GPU PDI + * @param[in] swizzId MIG GPU instance swizz ID (only needed for _TYPE_SMC) + * @param[in] syspipeId MIG GPU instance syspipe ID (only needed for _TYPE_SMC) + * @param[out] pUuid UUID + * + * @returns NV_OK upon success, otherwise returns NV_ERR_* + */ +NV_STATUS +nvGenerateSmcUuid +( + NvU16 chipId, + NvU64 pdi, + NvU32 swizzId, + NvU32 syspipeId, + NvUuid *pUuid +) +{ + return _nvGenerateUuid(NV_TRUE, chipId, pdi, swizzId, syspipeId, pUuid); +} + +/** + * @brief Gets UUID ASCII string, "GPU-%08x-%04x-%04x-%04x-%012x" + * (SHA-1) or "MIG-%08x-%04x-%04x-%04x-%012x" (SHA-1) + * + * @param[in] bMIG "MIG" or "GPU" UUID prefix + * @param[in] pUuid UUID + * @param[out] pUuidStr Returns UUID string + * + * @returns void + */ +static void +_nvGetUuidString +( + NvBool bMIG, + const NvUuid *pUuid, + char *pUuidStr +) +{ + const NvU32 sha1GroupEntryNum[] = { 8, 4, 4, 4, 12 }; + const NvU32 *pGroupEntryNum; + const NvU32 extraSymbolLen = 9; // 'G' 'P' 'U' '-'(x5), '\0x0', total = 9 + const NvU8 prefixLen = 4; + const char *pPrefix; + NvU32 groupCount; + NvU32 expectedStringLength = (NV_UUID_LEN << 1) + extraSymbolLen; + + pGroupEntryNum = sha1GroupEntryNum; + groupCount = NV_ARRAY_ELEMENTS(sha1GroupEntryNum); + + pPrefix = bMIG ? "MIG-" : "GPU-"; + portMemCopy(pUuidStr, prefixLen, pPrefix, prefixLen); + pUuidStr += prefixLen; + + portStringBufferToHexGroups(pUuidStr, (expectedStringLength - prefixLen), + pUuid->uuid, NV_UUID_LEN, + groupCount, pGroupEntryNum, "-"); +} + +/** + * @brief Gets UUID ASCII string, "GPU-%08x-%04x-%04x-%04x-%012x" + * (SHA-1) + * + * @param[in] pUuid UUID + * @param[out] pUuidStr Returns UUID string + * + * @returns void + */ +void +nvGetGpuUuidString +( + const NvUuid *pUuid, + char *pUuidStr +) +{ + _nvGetUuidString(NV_FALSE, pUuid, pUuidStr); +} + +/** + * @brief Gets UUID ASCII string, "MIG-%08x-%04x-%04x-%04x-%012x" + * (SHA-1) + * + * @param[in] pUuid UUID + * @param[out] pUuidStr Returns UUID string + * + * @returns void + */ +void +nvGetSmcUuidString +( + const NvUuid *pUuid, + char *pUuidStr +) +{ + _nvGetUuidString(NV_TRUE, pUuid, pUuidStr); +} diff --git a/src/nvidia/src/kernel/gpu/gr/arch/maxwell/kgraphics_gm200.c b/src/nvidia/src/kernel/gpu/gr/arch/maxwell/kgraphics_gm200.c new file mode 100644 index 000000000..70943a441 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gr/arch/maxwell/kgraphics_gm200.c @@ -0,0 +1,278 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" + +#include "ctrl/ctrl0080/ctrl0080fifo.h" + +/*! + * @brief Allocate common buffers that are required by the graphics context + */ +NV_STATUS +kgraphicsAllocGrGlobalCtxBuffers_GM200 +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 gfid, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + MEMORY_DESCRIPTOR **ppMemDesc; + GR_GLOBALCTX_BUFFERS *pCtxBuffers; + GR_BUFFER_ATTR *pCtxAttr; + NvU64 cbAllocFlags; + NvBool bPhysicallyContiguous; + NvU64 flags = MEMDESC_FLAGS_NONE; + NvU32 circularBufferSize; + NvU32 circularBufferAlign; + NvU32 pagepoolBufferSize; + NvU32 pagepoolBufferAlign; + NvU32 attribBufferSize; + NvU32 attribBufferAlign; + NvU32 privMapBufferSize; + NvU32 privMapBufferAlign; + NvU32 unresPrivMapBufferSize; + NvU32 unresPrivMapBufferAlign; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + CTX_BUF_POOL_INFO *pCtxBufPool; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + + circularBufferSize = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB].size; + circularBufferAlign = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB].alignment; + + pagepoolBufferSize = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL].size; + pagepoolBufferAlign = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL].alignment; + + attribBufferSize = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB].size; + attribBufferAlign = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB].alignment; + + privMapBufferSize = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP].size; + privMapBufferAlign = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP].alignment; + + unresPrivMapBufferSize = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP].size; + unresPrivMapBufferAlign = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP].alignment; + + // Setup the Circular Buffer DB + cbAllocFlags = MEMDESC_FLAGS_LOST_ON_SUSPEND; + + if (kgraphicsShouldSetContextBuffersGPUPrivileged(pGpu, pKernelGraphics)) + { + cbAllocFlags |= MEMDESC_FLAGS_GPU_PRIVILEGED; + } + + pCtxBufPool = NULL; + if (pKernelGraphicsContext != NULL) + { + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + pCtxBuffers = &pKernelGraphicsContextUnicast->localCtxBuffer; + pCtxAttr = pKernelGraphics->globalCtxBuffersInfo.localCtxAttr; + + // + // if we already have local buffers allocated, return as we may + // get called multiple times per-channel + // + if (pCtxBuffers->bAllocated) + return NV_OK; + + // check for allocating local buffers in VPR memory (don't want for global memory) + if ( + pKernelGraphicsContextUnicast->bVprChannel) + cbAllocFlags |= MEMDESC_ALLOC_FLAGS_PROTECTED; + + // If allocated per channel, ensure allocations goes into Suballocator if available + cbAllocFlags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + else + { + pCtxBuffers = &pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid]; + pCtxAttr = pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr; + NV_ASSERT_OK_OR_RETURN( + ctxBufPoolGetGlobalPool(pGpu, CTX_BUF_ID_GR_GLOBAL, + NV2080_ENGINE_TYPE_GR(pKernelGraphics->instance), + &pCtxBufPool)); + } + + // Handle VF - must use VF attributes and flags for both global and local buffers + if (IS_GFID_VF(gfid)) + { + pCtxAttr = pKernelGraphics->globalCtxBuffersInfo.vfGlobalCtxAttr; + + cbAllocFlags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + flags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + + if (pCtxBufPool != NULL) + { + cbAllocFlags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + flags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + } + + // Circular Buffer + if (circularBufferSize > 0) + { + ppMemDesc = &pCtxBuffers->memDesc[GR_GLOBALCTX_BUFFER_BUNDLE_CB]; + bPhysicallyContiguous = pCtxAttr[GR_GLOBALCTX_BUFFER_BUNDLE_CB].pAllocList == ADDRLIST_FBMEM_ONLY; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(ppMemDesc, pGpu, + circularBufferSize, + circularBufferAlign, + bPhysicallyContiguous, + ADDR_UNKNOWN, + pCtxAttr[GR_GLOBALCTX_BUFFER_BUNDLE_CB].cpuAttr, + cbAllocFlags | MEMDESC_FLAGS_GPU_PRIVILEGED | MEMDESC_FLAGS_HIGH_PRIORITY)); + + memdescSetGpuCacheAttrib(*ppMemDesc, NV_MEMORY_CACHED); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescAllocList(*ppMemDesc, pCtxAttr[GR_GLOBALCTX_BUFFER_BUNDLE_CB].pAllocList)); + } + + // Page Pool + if (pagepoolBufferSize > 0) + { + ppMemDesc = &pCtxBuffers->memDesc[GR_GLOBALCTX_BUFFER_PAGEPOOL]; + bPhysicallyContiguous = pCtxAttr[GR_GLOBALCTX_BUFFER_PAGEPOOL].pAllocList == ADDRLIST_FBMEM_ONLY; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(ppMemDesc, pGpu, + pagepoolBufferSize, + pagepoolBufferAlign, + bPhysicallyContiguous, + ADDR_UNKNOWN, + pCtxAttr[GR_GLOBALCTX_BUFFER_PAGEPOOL].cpuAttr, + cbAllocFlags | MEMDESC_FLAGS_GPU_PRIVILEGED)); + + memdescSetGpuCacheAttrib(*ppMemDesc, NV_MEMORY_CACHED); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescAllocList(*ppMemDesc, pCtxAttr[GR_GLOBALCTX_BUFFER_PAGEPOOL].pAllocList)); + } + + // Attribute Buffer + if (attribBufferSize > 0) + { + ppMemDesc = &pCtxBuffers->memDesc[GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB]; + bPhysicallyContiguous = pCtxAttr[GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB].pAllocList == ADDRLIST_FBMEM_ONLY; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(ppMemDesc, pGpu, + attribBufferSize, + attribBufferAlign, + bPhysicallyContiguous, + ADDR_UNKNOWN, + pCtxAttr[GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB].cpuAttr, + cbAllocFlags | MEMDESC_FLAGS_HIGH_PRIORITY)); + + memdescSetGpuCacheAttrib(*ppMemDesc, NV_MEMORY_CACHED); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescAllocList(*ppMemDesc, pCtxAttr[GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB].pAllocList)); + } + + // we do not want/need a priv access map allocated per-channel, so skip allocating + if (pKernelGraphicsContext == NULL) + { + if (kgraphicsDoesUcodeSupportPrivAccessMap(pGpu, pKernelGraphics)) + { + NvBool bIsContiguous = kgraphicsShouldForceMainCtxContiguity_HAL(pGpu, pKernelGraphics) && gpuIsClientRmAllocatedCtxBufferEnabled(pGpu); + + // PRIV access map + if (privMapBufferSize > 0) + { + ppMemDesc = &pCtxBuffers->memDesc[GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP]; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(ppMemDesc, pGpu, + privMapBufferSize, + privMapBufferAlign, + bIsContiguous, + ADDR_UNKNOWN, + pCtxAttr[GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP].cpuAttr, + flags)); + + // + // Force page size to 4KB, we can change this later when RM + // access method support 64k pages + // + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, *ppMemDesc, AT_GPU, RM_ATTR_PAGE_SIZE_4KB); + NV_ASSERT_OK_OR_RETURN(memdescSetCtxBufPool(*ppMemDesc, pCtxBufPool)); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescAllocList(*ppMemDesc, pCtxAttr[GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP].pAllocList)); + } + + // + // vGPU does not support unrestricted priv access map buffer. Hence, avoid + // allocating it on vGPU configuration. + // + if ((unresPrivMapBufferSize > 0) && kgraphicsIsUnrestrictedAccessMapSupported_HAL(pGpu, pKernelGraphics)) + { + // Unrestricted PRIV access map + ppMemDesc = &pCtxBuffers->memDesc[GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP]; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(ppMemDesc, pGpu, + unresPrivMapBufferSize, + unresPrivMapBufferAlign, + bIsContiguous, + ADDR_UNKNOWN, + pCtxAttr[GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP].cpuAttr, + flags)); + + // + // Force page size to 4KB, we can change this later when RM + // access method support 64k pages + // + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, *ppMemDesc, AT_GPU, RM_ATTR_PAGE_SIZE_4KB); + NV_ASSERT_OK_OR_RETURN(memdescSetCtxBufPool(*ppMemDesc, pCtxBufPool)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescAllocList(*ppMemDesc, + pCtxAttr[GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP].pAllocList)); + } + } + } + + pCtxBuffers->bAllocated = NV_TRUE; + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/gr/arch/pascal/kgraphics_gp100.c b/src/nvidia/src/kernel/gpu/gr/arch/pascal/kgraphics_gp100.c new file mode 100644 index 000000000..b162fee75 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gr/arch/pascal/kgraphics_gp100.c @@ -0,0 +1,290 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "nvRmReg.h" + +#include "ctrl/ctrl0080/ctrl0080fifo.h" + +void +kgraphicsInitFecsRegistryOverrides_GP100 +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + NvU32 data; + + // init the FECS buffer attributes before allocating buffer + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_CTXSW_LOG, &data) == NV_OK) + { + NvBool bIntrFallback = NV_FALSE; + NvBool bIntr = NV_FALSE; + NvBool bLog = NV_FALSE; + + switch (data) + { + case NV_REG_STR_RM_CTXSW_LOG_ENABLE_INTR_APC: + bIntrFallback = NV_TRUE; + // Intentional fall-through + case NV_REG_STR_RM_CTXSW_LOG_ENABLE_INTR: + bIntr = NV_TRUE; + // Intentional fall-through + case NV_REG_STR_RM_CTXSW_LOG_ENABLE: + bLog = NV_TRUE; + break; + default: + break; + } + + kgraphicsSetBottomHalfCtxswLoggingEnabled(pGpu, pKernelGraphics, bIntrFallback); + kgraphicsSetIntrDrivenCtxswLoggingEnabled(pGpu, pKernelGraphics, bIntr); + kgraphicsSetCtxswLoggingSupported(pGpu, pKernelGraphics, bLog); + } + + fecsSetRecordsPerIntr(pGpu, pKernelGraphics, NV_REG_STR_RM_CTXSW_LOG_RECORDS_PER_INTR_DEFAULT); + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_CTXSW_LOG_RECORDS_PER_INTR, &data) == NV_OK) + { + fecsSetRecordsPerIntr(pGpu, pKernelGraphics, data); + } +} + +/*! + * @brief Allocate common local/global buffers that are required by the graphics context for GfxP Pool + * + * @param[in] pGpu + * @param[in] pKernelGraphics + * @param[in] gfid host or guest gfid + * @param[in] pKernelGraphicsContext graphics context - if valid allocate local + */ +NV_STATUS +kgraphicsAllocGrGlobalCtxBuffers_GP100 +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 gfid, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + extern NV_STATUS kgraphicsAllocGrGlobalCtxBuffers_GM200(OBJGPU *pGpu, KernelGraphics *pKernelGraphics, NvU32 gfid, KernelGraphicsContext *pKernelGraphicsContext); + GR_GLOBALCTX_BUFFERS *pCtxBuffers; + NvU64 allocFlags = MEMDESC_FLAGS_NONE; + NV_STATUS status; + CTX_BUF_POOL_INFO *pCtxBufPool; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + pCtxBufPool = NULL; + if (pKernelGraphicsContext != NULL) + { + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + pCtxBuffers = &pKernelGraphicsContextUnicast->localCtxBuffer; + + // + // if we already have local buffers allocated, return as we may get + // called multiple times per-channel + // + if (pCtxBuffers->bAllocated) + return NV_OK; + + // check for allocating local buffers in VPR memory (don't want for global memory) + if ( + pKernelGraphicsContextUnicast->bVprChannel) + allocFlags |= MEMDESC_ALLOC_FLAGS_PROTECTED; + + // If allocated per channel, ensure allocations goes into Suballocator if available + allocFlags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + else + { + pCtxBuffers = &pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid]; + NV_ASSERT_OK_OR_RETURN(ctxBufPoolGetGlobalPool(pGpu, CTX_BUF_ID_GR_GLOBAL, + NV2080_ENGINE_TYPE_GR(pKernelGraphics->instance), &pCtxBufPool)); + } + + if (pCtxBufPool != NULL) + { + allocFlags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + } + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + + status = kgraphicsAllocGrGlobalCtxBuffers_GM200(pGpu, pKernelGraphics, gfid, pKernelGraphicsContext); + + return status; +} + +NV_STATUS +kgraphicsAllocGlobalCtxBuffers_GP100 +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 gfid +) +{ + CTX_BUF_POOL_INFO *pCtxBufPool = NULL; + NvU64 allocFlags = 0; + NvU32 fecsBufferSize = 0; + NvU32 fecsBufferAlign = 0x0; + GR_GLOBALCTX_BUFFERS *pCtxBuffers; + GR_BUFFER_ATTR *pCtxAttr; + + // SKIP FECS buffer allocation for Virtual context + if (IS_GFID_VF(gfid)) + { + return NV_OK; + } + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + pCtxBuffers = &pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid]; + pCtxAttr = pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr; + NV_ASSERT_OK_OR_RETURN( + ctxBufPoolGetGlobalPool(pGpu, + CTX_BUF_ID_GR_GLOBAL, + NV2080_ENGINE_TYPE_GR(pKernelGraphics->instance), + &pCtxBufPool)); + + if (pCtxBufPool != NULL) + { + allocFlags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + } + + // TODO: Need this check for vGPU configs without SRIOV support? + if (!IS_VIRTUAL(pGpu) || IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + NvU32 engineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + fecsBufferSize = pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[engineId].size; + fecsBufferAlign = pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[engineId].alignment; + } + + // Allocate the FECS buffer if the right regkey (RmCtxswLog) is enabled + if ((fecsBufferSize > 0) && + kgraphicsIsCtxswLoggingSupported(pGpu, pKernelGraphics)) + { + NvBool bIsFbBroken; + MEMORY_DESCRIPTOR **ppMemDesc = &pCtxBuffers->memDesc[GR_GLOBALCTX_BUFFER_FECS_EVENT]; + + bIsFbBroken = pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM); + + if (bIsFbBroken) + { + pCtxAttr[GR_GLOBALCTX_BUFFER_FECS_EVENT].pAllocList = ADDRLIST_SYSMEM_ONLY; + pCtxAttr[GR_GLOBALCTX_BUFFER_FECS_EVENT].cpuAttr = NV_MEMORY_UNCACHED; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(ppMemDesc, pGpu, + fecsBufferSize, // size + fecsBufferAlign, // alignment + NV_TRUE, // physically contiguous + ADDR_UNKNOWN, + pCtxAttr[GR_GLOBALCTX_BUFFER_FECS_EVENT].cpuAttr, + allocFlags | MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS | MEMDESC_FLAGS_GPU_PRIVILEGED)); + + if ((*ppMemDesc)->_addressSpace == ADDR_FBMEM) + memdescSetGpuCacheAttrib(*ppMemDesc, NV_MEMORY_CACHED); + + NV_ASSERT_OK_OR_RETURN(memdescSetCtxBufPool(*ppMemDesc, pCtxBufPool)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescAllocList(*ppMemDesc, pCtxAttr[GR_GLOBALCTX_BUFFER_FECS_EVENT].pAllocList)); + } + + pCtxBuffers->bFecsBufferAllocated = NV_TRUE; + + return NV_OK; +} + +/** + * @brief Services the GRn_FECS_LOG interrupts. + * + * @returns Zero, because KernelGraphics opts out of stuck interrupt detection. + */ +NvU32 +kgraphicsServiceInterrupt_GP100 +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + IntrServiceServiceInterruptArguments *pParams +) +{ + NvU32 grIdx = pKernelGraphics->instance; + + NV_ASSERT_OR_RETURN(pParams != NULL, 0); + NV_ASSERT_OR_RETURN(pParams->engineIdx == MC_ENGINE_IDX_GRn_FECS_LOG(grIdx), 0); + + // if MIG is disabled and an access has been made to any GR engine ID > 0, fail + if (!IS_MIG_IN_USE(pGpu) && (grIdx != 0)) + { + NV_ASSERT_FAILED("GR[1-7]_FECS_LOG is not supported if MIG is disabled!"); + return 0; + } + + if ((pGpu->fecsCtxswLogConsumerCount > 0) && + (kgraphicsIsIntrDrivenCtxswLoggingEnabled(pGpu, pKernelGraphics))) + { + if (fecsClearIntrPendingIfPending(pGpu, pKernelGraphics)) + { + nvEventBufferFecsCallback(pGpu, (void*)pKernelGraphics); + } + } + return 0; +} + +/** + * @brief Clears the stall interrupt leaf vector and return whether to call ServiceStall. + * @details Normally there's no need to override this function; however, + * the FECS_LOG engine idxs do not have real interrupt vectors to clear. + * This implementation just tells INTR to continue with servicing. + * + * @returns NV_TRUE indicating the interrupt should be serviced. + */ +NvBool +kgraphicsClearInterrupt_GP100 +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + IntrServiceClearInterruptArguments *pParams +) +{ + NvU32 grIdx = pKernelGraphics->instance; + + NV_ASSERT_OR_RETURN(pParams != NULL, NV_FALSE); + NV_ASSERT_OR_RETURN(pParams->engineIdx == MC_ENGINE_IDX_GRn_FECS_LOG(grIdx), 0); + + return NV_TRUE; +} + diff --git a/src/nvidia/src/kernel/gpu/gr/arch/turing/kgraphics_tu102.c b/src/nvidia/src/kernel/gpu/gr/arch/turing/kgraphics_tu102.c new file mode 100644 index 000000000..f64d9ee72 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gr/arch/turing/kgraphics_tu102.c @@ -0,0 +1,136 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/gr/kernel_graphics.h" + +#include "ctrl/ctrl0080/ctrl0080fifo.h" + +/*! + * @brief Allocate common local/global buffers that are required by the graphics context + * + * @param[in] pGpu + * @param[in] pKernelGraphics + * @param[in] gfid host or guest gfid + * @param[in] pKernelGraphicsContext context pointer - if valid allocate local + */ +NV_STATUS +kgraphicsAllocGrGlobalCtxBuffers_TU102 +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 gfid, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + extern NV_STATUS kgraphicsAllocGrGlobalCtxBuffers_GP100(OBJGPU *pGpu, KernelGraphics *pKernelGraphics, NvU32 gfid, KernelGraphicsContext *pKernelGraphicsContext); + MEMORY_DESCRIPTOR **ppMemDesc; + GR_GLOBALCTX_BUFFERS *pCtxBuffers; + GR_BUFFER_ATTR *pCtxAttr; + NvU64 allocFlags = MEMDESC_FLAGS_NONE; + NvBool bIsFbBroken = NV_FALSE; + NvU32 rtvcbBufferSize; + NvU32 rtvcbBufferAlign; + NV_STATUS status; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM))) + { + bIsFbBroken = NV_TRUE; + } + + // Setup the Circular Buffer DB + allocFlags = MEMDESC_FLAGS_LOST_ON_SUSPEND; + + if (kgraphicsShouldSetContextBuffersGPUPrivileged(pGpu, pKernelGraphics)) + { + allocFlags |= MEMDESC_FLAGS_GPU_PRIVILEGED; + } + + if (pKernelGraphicsContext != NULL) + { + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + pCtxBuffers = &pKernelGraphicsContextUnicast->localCtxBuffer; + pCtxAttr = pKernelGraphics->globalCtxBuffersInfo.localCtxAttr; + + // + // if we already have local buffers allocated, return as we may get + // called multiple times per-channel + // + if (pCtxBuffers->bAllocated) + return NV_OK; + + // check for allocating local buffers in VPR memory (don't want for global memory) + if ( + pKernelGraphicsContextUnicast->bVprChannel) + allocFlags |= MEMDESC_ALLOC_FLAGS_PROTECTED; + + // If allocated per channel, ensure allocations goes into Suballocator if available + allocFlags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + else + { + pCtxBuffers = &pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid]; + pCtxAttr = pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr; + } + + if (IS_GFID_VF(gfid)) + { + pCtxAttr = pKernelGraphics->globalCtxBuffersInfo.vfGlobalCtxAttr; + allocFlags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + + rtvcbBufferSize = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL].size; + rtvcbBufferAlign = + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL].alignment; + + if (rtvcbBufferSize > 0) + { + ppMemDesc = &pCtxBuffers->memDesc[GR_GLOBALCTX_BUFFER_RTV_CB]; + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(ppMemDesc, pGpu, + rtvcbBufferSize, + rtvcbBufferAlign, + !bIsFbBroken, + ADDR_UNKNOWN, + pCtxAttr[GR_GLOBALCTX_BUFFER_RTV_CB].cpuAttr, + allocFlags)); + + memdescSetGpuCacheAttrib(*ppMemDesc, NV_MEMORY_CACHED); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescAllocList(*ppMemDesc, pCtxAttr[GR_GLOBALCTX_BUFFER_RTV_CB].pAllocList)); + } + status = kgraphicsAllocGrGlobalCtxBuffers_GP100(pGpu, pKernelGraphics, gfid, pKernelGraphicsContext); + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gr/fecs_event_list.c b/src/nvidia/src/kernel/gpu/gr/fecs_event_list.c new file mode 100644 index 000000000..e044b23f4 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gr/fecs_event_list.c @@ -0,0 +1,1468 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************************************************************\ +* * +* Module: fecs_event_list.c * +* Description: * +* This module contains an implementation of the Event Buffer * +* callback for FECS events * +* * +\***************************************************************************/ + +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/rmapi/event.h" +#include "kernel/rmapi/event_buffer.h" +#include "libraries/resserv/rs_server.h" +#include "kernel/core/locks.h" +#include "kernel/os/os.h" +#include "kernel/gpu/gr/fecs_event_list.h" +#include "kernel/gpu/mig_mgr/gpu_instance_subscription.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/bus/kern_bus.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "rmapi/client.h" + +#include "class/cl90cdtypes.h" +#include "ctrl/ctrl90cd.h" + +typedef struct +{ + NvU32 magic_lo; + NvU32 magic_hi; + NvU32 context_id; + NvU32 context_ptr; + NvU32 new_context_id; + NvU32 new_context_ptr; + NvU64 ts[]; +} FECS_EVENT_RECORD; + +#define NV_FECS_TRACE_MAX_TIMESTAMPS 5 +#define NV_FECS_TRACE_MAGIC_INVALIDATED 0xdededede // magic number for entries that have been read +#define NV_FECS_TRACE_MAGIC_PENDING 0xfefefefe // magic number for new entries that have been detected + +/*! Opaque pointer to private data */ +typedef struct VGPU_FECS_TRACE_STAGING_BUFFER VGPU_FECS_TRACE_STAGING_BUFFER; + +/*! Private FECS event buffer data stored per-KGR */ +struct KGRAPHICS_FECS_TRACE_INFO +{ + NvU8 *pFecsBufferMapping; + NvU16 fecsCtxswLogRecordsPerIntr; + NvU16 fecsTraceRdOffset; + NvU16 fecsTraceCounter; + NvU32 fecsCtxswLogIntrPending; + +#if PORT_IS_MODULE_SUPPORTED(crypto) + PORT_CRYPTO_PRNG *pFecsLogPrng; +#endif + + // + // GR Routing information for GPU instance to which this engine is assigned if MIG is enabled. + // Will be 0/NULL for unassigned GR engines or if MIG is disabled + // + NvHandle hClient; + NvHandle hSubdevice; + NvU32 localGrEngineIdx; + + // vGPU FECS staging eventbuffer (guest only) + VGPU_FECS_TRACE_STAGING_BUFFER *pVgpuStaging; +}; + +/*! + * @brief Function to populate client/subdevice/grRouteInfo from cached + * information in order to make calls into the specific MIG GPU instance + * to which a GR engine is assigned. If MIG is not enabled, GPU + * internal client/subdevice handles will be used and grRouteInfo is + * cleared + * + * @param[in] pGpu + * @param[in] pKernelGraphics + * @param[out] phClient Client handle to populate + * @param[out] phSubdevice Subdevice handle to populate + * @param[out] pGrRouteInfo Internal GR Routing info to populate + */ +static NV_STATUS +_fecsLoadInternalRoutingInfo +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvHandle *phClient, + NvHandle *phSubdevice, + NV2080_CTRL_GR_ROUTE_INFO *pGrRouteInfo +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN(pFecsTraceInfo != NULL, NV_ERR_INVALID_STATE); + + portMemSet(pGrRouteInfo, 0, sizeof(*pGrRouteInfo)); + + if (!IS_MIG_IN_USE(pGpu)) + { + *phClient = pGpu->hInternalClient; + *phSubdevice = pGpu->hInternalSubdevice; + return NV_OK; + } + + // GR Engines not allocated to any GPU instance will have null handles + NV_CHECK_OR_RETURN(LEVEL_INFO, pFecsTraceInfo->hClient != NV01_NULL_OBJECT, NV_ERR_INVALID_ARGUMENT); + + kgrmgrCtrlSetEngineID(pFecsTraceInfo->localGrEngineIdx, pGrRouteInfo); + *phClient = pFecsTraceInfo->hClient; + *phSubdevice = pFecsTraceInfo->hSubdevice; + + return NV_OK; +} + +static NV_STATUS +fecsExtractTagAndTimestamp +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU64 rawTimestamp, + NvU64 *pTimestampVal, + NvU8 *pTag +) +{ + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pFecsTraceDefines != NULL, NV_ERR_INVALID_STATE); + + *pTag = ((NvU64_HI32(rawTimestamp)) >> pKernelGraphicsStaticInfo->pFecsTraceDefines->timestampHiTagShift) & pKernelGraphicsStaticInfo->pFecsTraceDefines->timestampHiTagMask; + *pTimestampVal = rawTimestamp & pKernelGraphicsStaticInfo->pFecsTraceDefines->timestampVMask; + + // timestamp encoded as right shifted N bits, since they hold zeros. RM needs to reverse that here. + *pTimestampVal <<= pKernelGraphicsStaticInfo->pFecsTraceDefines->numLowerBitsZeroShift; + return NV_OK; +} + +// +// The function formats the information from the FECS Buffer into a format +// suitable for EventBuffer, and then checks to see whether the subscriber +// needs to be notified. If so, the subscriber is notified. +// +// pGpu is used to retrieve data on the pid, and +// seqno provides the sequence number for the user to keep track of +// whether any entry has been dropped. +// pRecord is the current FECS entry. +// +static void +formatAndNotifyFecsRecord +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + FECS_EVENT_RECORD *pRecord +) +{ + FECS_EVENT_NOTIFICATION_DATA notifRecord; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + KernelChannel *pKernelChannel = NULL; + KernelChannel *pKernelChannelNew = NULL; + MIG_INSTANCE_REF *pChannelRef; + MIG_INSTANCE_REF *pNewChannelRef; + INST_BLOCK_DESC inst; + NvU32 timestampId; + NvU64 noisyTimestampStart = 0; + NvU64 noisyTimestampRange = 0; + NvU32 instSize; + NvU32 instShift; + NV_STATUS status; + + if (pRecord == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid FECS record!\n"); + DBG_BREAKPOINT(); + return; + } + + kfifoGetInstBlkSizeAlign_HAL(pKernelFifo, &instSize, &instShift); + + portMemSet(¬ifRecord, 0, sizeof(notifRecord)); + + inst.address = ((NvU64)pRecord->context_ptr) << instShift; + inst.aperture = INST_BLOCK_APERTURE_VIDEO_MEMORY; + inst.gfid = GPU_GFID_PF; + if (pRecord->context_ptr && + (kfifoConvertInstToKernelChannel_HAL(pGpu, pKernelFifo, &inst, &pKernelChannel) != NV_OK)) + { + NV_PRINTF(LEVEL_INFO, "Error getting channel!\n"); + pKernelChannel = NULL; + } + + inst.address = ((NvU64)pRecord->new_context_ptr) << instShift; + inst.aperture = INST_BLOCK_APERTURE_VIDEO_MEMORY; + inst.gfid = GPU_GFID_PF; + if (pRecord->new_context_ptr && + (kfifoConvertInstToKernelChannel_HAL(pGpu, pKernelFifo, &inst, &pKernelChannelNew) != NV_OK)) + { + NV_PRINTF(LEVEL_INFO, "Error getting new channel!\n"); + pKernelChannelNew = NULL; + } + + pChannelRef = (pKernelChannel != NULL) ? kchannelGetMIGReference(pKernelChannel) : NULL; + pNewChannelRef = (pKernelChannelNew != NULL) ? kchannelGetMIGReference(pKernelChannelNew) : NULL; + + for (timestampId = 0; timestampId < NV_FECS_TRACE_MAX_TIMESTAMPS; timestampId++) + { + NV_ASSERT_OK_OR_ELSE(status, + fecsExtractTagAndTimestamp(pGpu, pKernelGraphics, + pRecord->ts[timestampId], + ¬ifRecord.timestamp, + ¬ifRecord.tag), + return); + + // + // determine a few more fields of the current record by subevent type, + // before we notify the subscriber + // + switch (notifRecord.tag) + { + case NV_EVENT_BUFFER_FECS_CTXSWTAG_RESTORE_START: + case NV_EVENT_BUFFER_FECS_CTXSWTAG_CONTEXT_START: + if (pKernelChannelNew != NULL) + { + notifRecord.pid = pKernelChannelNew->ProcessID; + notifRecord.subpid = pKernelChannelNew->SubProcessID; + notifRecord.userInfo = (NvU64)(NvUPtr)pKernelChannelNew->pUserInfo; + notifRecord.context_id = kchannelGetCid(pKernelChannelNew); + + if (kmigmgrIsMIGReferenceValid(pNewChannelRef)) + { + notifRecord.swizzId = pNewChannelRef->pKernelMIGGpuInstance->swizzId; + if (pNewChannelRef->pMIGComputeInstance) + notifRecord.computeInstanceId = pNewChannelRef->pMIGComputeInstance->id; + else + notifRecord.computeInstanceId = NV_EVENT_BUFFER_KERNEL_MIG_CI; + } + + if (notifRecord.tag == NV_EVENT_BUFFER_FECS_CTXSWTAG_RESTORE_START) + { + noisyTimestampStart = notifRecord.timestamp; + } + else + { + noisyTimestampRange = notifRecord.timestamp - noisyTimestampStart; + } + } + break; + + case NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_WFI: + case NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_GFXP: + case NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_CTAP: + case NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_CILP: + if (pKernelChannel != NULL) + { + notifRecord.pid = pKernelChannel->ProcessID; + notifRecord.subpid = pKernelChannel->SubProcessID; + notifRecord.userInfo = (NvU64)(NvUPtr)pKernelChannel->pUserInfo; + notifRecord.context_id = kchannelGetCid(pKernelChannel); + + if (kmigmgrIsMIGReferenceValid(pChannelRef)) + { + notifRecord.swizzId = pChannelRef->pKernelMIGGpuInstance->swizzId; + if (pChannelRef->pMIGComputeInstance) + notifRecord.computeInstanceId = pChannelRef->pMIGComputeInstance->id; + else + notifRecord.computeInstanceId = NV_EVENT_BUFFER_KERNEL_MIG_CI; + } + } + break; + + case NV_EVENT_BUFFER_FECS_CTXSWTAG_CTXSW_REQ_BY_HOST: + case NV_EVENT_BUFFER_FECS_CTXSWTAG_SAVE_END: + if (pKernelChannel != NULL) + { + notifRecord.pid = pKernelChannel->ProcessID; + notifRecord.subpid = pKernelChannel->SubProcessID; + notifRecord.userInfo = (NvU64)(NvUPtr)pKernelChannel->pUserInfo; + notifRecord.context_id = kchannelGetCid(pKernelChannel); + + if (kmigmgrIsMIGReferenceValid(pChannelRef)) + { + notifRecord.swizzId = pChannelRef->pKernelMIGGpuInstance->swizzId; + if (pChannelRef->pMIGComputeInstance != NULL) + notifRecord.computeInstanceId = pChannelRef->pMIGComputeInstance->id; + else + notifRecord.computeInstanceId = NV_EVENT_BUFFER_KERNEL_MIG_CI; + } + + if (notifRecord.tag == NV_EVENT_BUFFER_FECS_CTXSWTAG_CTXSW_REQ_BY_HOST) + { + noisyTimestampStart = notifRecord.timestamp; + } + else + { + noisyTimestampRange = notifRecord.timestamp - noisyTimestampStart; + } + } + break; + + default: + continue; + } + + if ((pKernelChannel != NULL) || (pKernelChannelNew != NULL)) + { + FecsEventBufferBindMultiMapSubmap *pSubmap; + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN_VOID(pFecsTraceInfo != NULL); + + notifRecord.noisyTimestamp = 0; + if ((noisyTimestampRange > 0) && (pFecsTraceInfo->pFecsLogPrng != NULL)) + notifRecord.noisyTimestamp = noisyTimestampStart + portCryptoPseudoRandomGeneratorGetU32(pFecsTraceInfo->pFecsLogPrng) % noisyTimestampRange; + + if (notifRecord.userInfo != 0) + { + // Notify event buffers listening for the current UID + pSubmap = multimapFindSubmap(&pGpu->fecsEventBufferBindingsUid, notifRecord.userInfo); + notifyEventBuffers(pGpu, pSubmap, ¬ifRecord); + } + + // Notify event buffers listening for all UIDs + pSubmap = multimapFindSubmap(&pGpu->fecsEventBufferBindingsUid, 0); + notifyEventBuffers(pGpu, pSubmap, ¬ifRecord); + + } + } +} + +static NV_STATUS +_fecsEventBufferAdd +( + OBJGPU *pGpu, + NV_EVENT_BUFFER_BIND_POINT_FECS *pBind, + NvU8 tag, + NvU32 pid, + NvU8 swizzId, + NvU8 computeInstanceId, + NvU32 context_id, + NvU64 timestamp +) +{ + NV_STATUS status; + NvBool bNotify; + NvP64 notificationHandle; + EVENT_BUFFER_PRODUCER_DATA notifyEvent; + NvU32 notifyIndex; + + switch (pBind->version) + { + case 2: + notifyIndex = NV_EVENT_BUFFER_RECORD_TYPE_FECS_CTX_SWITCH_V2; + break; + case 1: + notifyIndex = NV_EVENT_BUFFER_RECORD_TYPE_FECS_CTX_SWITCH; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + portMemSet(¬ifyEvent, 0, sizeof(notifyEvent)); + notifyEvent.pVardata = NV_PTR_TO_NvP64(NULL); + notifyEvent.vardataSize = 0; + + NV_EVENT_BUFFER_FECS_RECORD_V2 fecsRecord; + portMemSet(&fecsRecord, 0, sizeof(fecsRecord)); + fecsRecord.tag = tag; + fecsRecord.pid = pid; + if (pBind->version >= 2) + { + fecsRecord.migGpuInstanceId = swizzId; + fecsRecord.migComputeInstanceId = computeInstanceId; + } + fecsRecord.context_id = context_id; + fecsRecord.timestamp = timestamp; + fecsRecord.seqno = pBind->pEventBuffer->seqNo++; + + notifyEvent.pPayload = NV_PTR_TO_NvP64(&fecsRecord); + notifyEvent.payloadSize = sizeof(fecsRecord); + + status = eventBufferAdd(pBind->pEventBuffer, ¬ifyEvent, notifyIndex, &bNotify, ¬ificationHandle); + + if ((status == NV_OK) && bNotify && notificationHandle) + { + osEventNotification(pGpu, + pBind->pEventBuffer->pListeners, + notifyIndex, + ¬ifyEvent, + 0); // Do not copy structure -- embedded pointers. + pBind->pEventBuffer->bNotifyPending = NV_TRUE; + } + + return status; +} + +void +notifyEventBuffers +( + OBJGPU *pGpu, + FecsEventBufferBindMultiMapSubmap *pSubmap, + FECS_EVENT_NOTIFICATION_DATA const *pRecord +) +{ + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + + if (pSubmap != NULL) + { + FecsEventBufferBindMultiMapIter iter = multimapSubmapIterItems(&pGpu->fecsEventBufferBindingsUid, pSubmap); + + while (multimapItemIterNext(&iter)) + { + NV_EVENT_BUFFER_BIND_POINT_FECS *pBind = iter.pValue; + NvBool bSanitizeKernel = (!pBind->bKernel) && (pRecord->userInfo == 0); + NvBool bSanitizeUser = (!pBind->bAdmin) && (pBind->pUserInfo != pRecord->userInfo); + NvBool bSanitize = bSanitizeKernel || bSanitizeUser; + NvU32 pid; + NvU32 context_id; + NvU64 timestamp; + NvU32 tag = pRecord->tag; + NvU8 swizzId = pRecord->swizzId; + NvU8 computeInstanceId = pRecord->computeInstanceId; + + pBind->pEventBuffer->seqNo += pRecord->dropCount; + + if (bSanitize || !(NVBIT(pRecord->tag) & pBind->eventMask)) + { + // + // Re-map CONTEXT_START as SIMPLE_START and SAVE_END as SIMPLE_END if + // the binding has simple level-of-detail or is being sanitized + // + if ((bSanitize || (pBind->eventMask & NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_SIMPLE_START)) && + (tag == NV_EVENT_BUFFER_FECS_CTXSWTAG_CONTEXT_START)) + { + tag = NV_EVENT_BUFFER_FECS_CTXSWTAG_SIMPLE_START; + } + else if ((bSanitize || (pBind->eventMask & NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_SIMPLE_END)) && + (tag == NV_EVENT_BUFFER_FECS_CTXSWTAG_SAVE_END)) + { + tag = NV_EVENT_BUFFER_FECS_CTXSWTAG_SIMPLE_END; + } + else if ((tag != NV_EVENT_BUFFER_FECS_CTXSWTAG_SIMPLE_START) && + (tag != NV_EVENT_BUFFER_FECS_CTXSWTAG_SIMPLE_END)) + { + continue; + } + } + + // + // While MIG is enabled, if the bindpoint is registered for a specific MIG GPU instance + // then filter out records from other GPU instances + // + if (bMIGInUse && + ((pBind->swizzId != NV2080_CTRL_GPU_PARTITION_ID_INVALID) && + (pRecord->swizzId != pBind->swizzId))) + continue; + + // While MIG is enabled, pause tracing of V1 bindpoints + if (bMIGInUse && (pBind->version < 2)) + continue; + + if (bSanitizeKernel) + { + timestamp = pRecord->noisyTimestamp; + pid = NV_EVENT_BUFFER_KERNEL_PID; + context_id = NV_EVENT_BUFFER_KERNEL_CONTEXT; + swizzId = NV_EVENT_BUFFER_KERNEL_MIG_GI; + computeInstanceId = NV_EVENT_BUFFER_KERNEL_MIG_CI; + } + else if (bSanitizeUser) + { + timestamp = pRecord->noisyTimestamp; + pid = NV_EVENT_BUFFER_HIDDEN_PID; + context_id = NV_EVENT_BUFFER_HIDDEN_CONTEXT; + swizzId = NV_EVENT_BUFFER_HIDDEN_MIG_GI; + computeInstanceId = NV_EVENT_BUFFER_HIDDEN_MIG_CI; + } + else + { + timestamp = pRecord->timestamp; + pid = pRecord->pid; + context_id = pRecord->context_id; + } + + _fecsEventBufferAdd(pGpu, pBind, tag, + pid, + bMIGInUse ? swizzId : NV_EVENT_BUFFER_INVALID_MIG_GI, + bMIGInUse ? computeInstanceId : NV_EVENT_BUFFER_INVALID_MIG_CI, + context_id, timestamp); + } + } +} + +static NV_STATUS +_getFecsMemDesc +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + MEMORY_DESCRIPTOR **ppFecsMemDesc +) +{ + MEMORY_DESCRIPTOR *pMemDesc; + GR_GLOBALCTX_BUFFERS *pGlobalCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, GPU_GFID_PF); + + *ppFecsMemDesc = NULL; + NV_CHECK_OR_RETURN(LEVEL_SILENT, pGlobalCtxBuffers != NULL, NV_ERR_INVALID_STATE); + pMemDesc = pGlobalCtxBuffers->memDesc[GR_GLOBALCTX_BUFFER_FECS_EVENT]; + if (pMemDesc != NULL) + pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + + *ppFecsMemDesc = pMemDesc; + + return NV_OK; +} + +static NV_STATUS +_getFecsEventListParameters +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + MEMORY_DESCRIPTOR **ppFecsMemDesc, + NvU32 *pFecsRecordSize +) +{ + const KGRAPHICS_STATIC_INFO *pStaticInfo; + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _getFecsMemDesc(pGpu, pKernelGraphics, ppFecsMemDesc)); + + pStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pStaticInfo->pFecsTraceDefines != NULL, NV_ERR_INVALID_STATE); + *pFecsRecordSize = pStaticInfo->pFecsTraceDefines->fecsRecordSize; + + return NV_OK; +} + +/*! + * @brief Set cached routing info for this KernelGraphics engine to make RPC calls + * into the specific MIG GPU instance to which this engine is assigned + * + * @param[in] pGpu + * @param[in] pKernelGraphics + * @param[in] hClient Client handle to make calls into MIG GPU instance + * @param[in] hSubdevice Subdevice handle to make calls into MIG GPU instance + * @param[in] localGrEngineIdx Local GR index for this GR engine in MIG GPU instance + */ +void fecsSetRoutingInfo +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvHandle hClient, + NvHandle hSubdevice, + NvU32 localGrEngineIdx +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN_VOID(pFecsTraceInfo != NULL); + + pFecsTraceInfo->hClient = hClient; + pFecsTraceInfo->hSubdevice = hSubdevice; + pFecsTraceInfo->localGrEngineIdx = localGrEngineIdx; +} + +// Clear cached routing info used to make GR calls into specific MIG GPU instance +void fecsClearRoutingInfo +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN_VOID(pFecsTraceInfo != NULL); + + pFecsTraceInfo->hClient = NV01_NULL_OBJECT; + pFecsTraceInfo->hSubdevice = NV01_NULL_OBJECT; + pFecsTraceInfo->localGrEngineIdx = 0; +} + +NV_STATUS +fecsCtxswLoggingInit +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + KGRAPHICS_FECS_TRACE_INFO **ppFecsTraceInfo +) +{ + NvU64 seed; + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo; + + NV_ASSERT_OR_RETURN(ppFecsTraceInfo != NULL, NV_ERR_NOT_SUPPORTED); + pFecsTraceInfo = portMemAllocNonPaged(sizeof(*pFecsTraceInfo)); + if (pFecsTraceInfo == NULL) + return NV_ERR_NO_MEMORY; + portMemSet(pFecsTraceInfo, 0, sizeof(*pFecsTraceInfo)); + + *ppFecsTraceInfo = pFecsTraceInfo; + + osGetCurrentTick(&seed); + pFecsTraceInfo->pFecsLogPrng = portCryptoPseudoRandomGeneratorCreate(seed); + multimapInit(&pGpu->fecsEventBufferBindingsUid, portMemAllocatorGetGlobalNonPaged()); + + kgraphicsInitFecsRegistryOverrides_HAL(pGpu, pKernelGraphics); + + return NV_OK; +} + +void +fecsCtxswLoggingTeardown +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN_VOID(pFecsTraceInfo != NULL); + + multimapDestroy(&pGpu->fecsEventBufferBindingsUid); + + portCryptoPseudoRandomGeneratorDestroy(pFecsTraceInfo->pFecsLogPrng); + pFecsTraceInfo->pFecsLogPrng = NULL; + portMemFree(pFecsTraceInfo); +} + +/*! set num records to process per intr */ +void fecsSetRecordsPerIntr +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 recordsPerIntr +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN_VOID(pFecsTraceInfo != NULL); + pFecsTraceInfo->fecsCtxswLogRecordsPerIntr = recordsPerIntr; +} + +NvBool +fecsBufferChanged +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + NvU8 *pFecsBufferMapping; + MEMORY_DESCRIPTOR *pFecsMemDesc = NULL; + NvU32 fecsRecordSize; + FECS_EVENT_RECORD *pPeekRecord; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pFecsTraceInfo != NULL, NV_FALSE); + pFecsBufferMapping = pFecsTraceInfo->pFecsBufferMapping; + + status = _getFecsEventListParameters(pGpu, pKernelGraphics, &pFecsMemDesc, &fecsRecordSize); + if ((status != NV_OK) || (pFecsMemDesc == NULL) || (pFecsBufferMapping == NULL)) + return NV_FALSE; + + pPeekRecord = (FECS_EVENT_RECORD*)(pFecsBufferMapping + + (pFecsTraceInfo->fecsTraceRdOffset * fecsRecordSize)); + + if ((pPeekRecord->magic_lo != NV_FECS_TRACE_MAGIC_INVALIDATED) && + (pPeekRecord->magic_lo != NV_FECS_TRACE_MAGIC_PENDING)) + { + pPeekRecord->magic_lo = NV_FECS_TRACE_MAGIC_PENDING; + return NV_TRUE; + } + + return NV_FALSE; +} + +/** + * @brief The callback function that transfers FECS Buffer entries to an EventBuffer + */ +void +nvEventBufferFecsCallback +( + OBJGPU *pGpu, + void *pArgs +) +{ + KernelGraphics *pKernelGraphics = (KernelGraphics*)pArgs; + NvU32 fecsReadOffset; + NvU32 fecsReadOffsetPrev; + NvU64 fecsBufferSize; + NvU32 fecsRecordSize; + NvU64 watermark; + NvU32 i, j; + NvU8 *pFecsBufferMapping; + MEMORY_DESCRIPTOR *pFecsMemDesc = NULL; + FECS_EVENT_RECORD *pPeekRecord; + NvU16 maxFecsRecordsPerIntr; + NV_STATUS status; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU8 numIterations = (pArgs == NULL) + ? KGRMGR_MAX_GR + : 1; + + NV_ASSERT_OR_RETURN_VOID(rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + NV_ASSERT_OR_RETURN_VOID(pGpu->fecsCtxswLogConsumerCount >= 0); + if (pGpu->fecsCtxswLogConsumerCount <= 0) + return; + + for (j = 0; j < numIterations; j++) + { + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo; + + if (pArgs == NULL) + { + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, j); + if (pKernelGraphics == NULL) + continue; + } + + pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN_VOID(pFecsTraceInfo != NULL); + + pFecsBufferMapping = pFecsTraceInfo->pFecsBufferMapping; + maxFecsRecordsPerIntr = pFecsTraceInfo->fecsCtxswLogRecordsPerIntr; + + if (pFecsBufferMapping == NULL) + continue; + + status = _getFecsEventListParameters(pGpu, pKernelGraphics, &pFecsMemDesc, &fecsRecordSize); + if ((status != NV_OK) || (pFecsMemDesc == NULL)) + continue; + + fecsBufferSize = memdescGetSize(pFecsMemDesc) / fecsRecordSize; + NV_ASSERT_OR_RETURN_VOID(fecsBufferSize > 0); + fecsReadOffset = pFecsTraceInfo->fecsTraceRdOffset; + + if (!osIsRaisedIRQL()) + maxFecsRecordsPerIntr = fecsBufferSize; + + // Bail out if the buffer has not changed + pPeekRecord = (FECS_EVENT_RECORD*)(pFecsBufferMapping + + (fecsReadOffset * fecsRecordSize)); + + if (pPeekRecord->magic_lo == NV_FECS_TRACE_MAGIC_INVALIDATED) + continue; + + // Get the read offset from hw if the buffer wrapped around + fecsReadOffsetPrev = (fecsReadOffset - 1) % fecsBufferSize; + pPeekRecord = (FECS_EVENT_RECORD*)(pFecsBufferMapping + + (fecsReadOffsetPrev * fecsRecordSize)); + + if (pPeekRecord->magic_lo != NV_FECS_TRACE_MAGIC_INVALIDATED) + { + NvHandle hClient; + NvHandle hSubdevice; + NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET_PARAMS params; + + NV_PRINTF(LEVEL_ERROR, "FECS buffer overflow detected\n"); + + NV_ASSERT_OK_OR_ELSE( + status, + _fecsLoadInternalRoutingInfo(pGpu, + pKernelGraphics, + &hClient, + &hSubdevice, + ¶ms.grRouteInfo), + return); + + NV_ASSERT_OK_OR_ELSE( + status, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_RD_OFFSET, + ¶ms, + sizeof(params)), + return); + fecsReadOffset = params.offset; + pFecsTraceInfo->fecsTraceCounter = 0; + } + + // + // Over here we want to go through all EVENTNOTIFICATION nodes and + // loop through them in lockstep with the FECS_EVENT_RECORD records + // + for (i = 0; i < maxFecsRecordsPerIntr; ++i) + { + FECS_EVENT_RECORD *pCurrRecord = (FECS_EVENT_RECORD *)(pFecsBufferMapping + + (fecsReadOffset * fecsRecordSize)); + + if (pCurrRecord->magic_lo == NV_FECS_TRACE_MAGIC_INVALIDATED) + break; + + pCurrRecord->magic_lo = NV_FECS_TRACE_MAGIC_INVALIDATED; + osFlushCpuWriteCombineBuffer(); + + // Loop through all bound event buffers and copy filtered data to user buffers + formatAndNotifyFecsRecord(pGpu, pKernelGraphics, pCurrRecord); + + // Update read reg + pFecsTraceInfo->fecsTraceCounter++; + fecsReadOffset++; + if (fecsReadOffset >= fecsBufferSize) + { + fecsReadOffset = 0; + } + } + + // + // In order to avoid register accesses, only synchronize the position + // with hardware when the buffer exceeds a watermark level + // + watermark = (3 * fecsBufferSize) / 4; + if (pFecsTraceInfo->fecsTraceCounter > watermark) + { + NvHandle hClient; + NvHandle hSubdevice; + NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS params; + + params.offset = fecsReadOffset; + NV_ASSERT_OK_OR_ELSE( + status, + _fecsLoadInternalRoutingInfo(pGpu, + pKernelGraphics, + &hClient, + &hSubdevice, + ¶ms.grRouteInfo), + return); + + NV_ASSERT_OK_OR_ELSE( + status, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET, + ¶ms, + sizeof(params)), + return); + pFecsTraceInfo->fecsTraceCounter = 0; + } + pFecsTraceInfo->fecsTraceRdOffset = fecsReadOffset; + + // Re-arm interrupt if there may be more records + if (i == maxFecsRecordsPerIntr) + fecsSignalIntrPendingIfNotPending(pGpu, pKernelGraphics); + } +} + +NV_STATUS +fecsAddBindpoint +( + OBJGPU *pGpu, + RsClient *pClient, + RsResourceRef *pEventBufferRef, + NvHandle hNotifier, + NvBool bAllUsers, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD levelOfDetail, + NvU32 eventFilter, + NvU8 version, + NvU32 *pReasonCode +) +{ + NV_STATUS status; + NvHandle hClient = pClient->hClient; + RmClient *pRmClient = dynamicCast(pClient, RmClient); + NvHandle hEventBuffer = pEventBufferRef->hResource; + EventBuffer *pEventBuffer; + NvBool bAdmin = osIsAdministrator(); + NvU32 eventMask = 0; + NvU64 targetUser; + NvS32 gpuConsumerCount = pGpu->fecsCtxswLogConsumerCount; + NvBool bFecsBindingActive = (pGpu->fecsCtxswLogConsumerCount > 0); + NvBool bScheduled = NV_FALSE; + NvBool bIntrDriven = NV_FALSE; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + NvU8 numIterations; + NvU8 grIdx; + NvBool bKernel; + NvBool bSelectLOD; + NV_EVENT_BUFFER_BIND_POINT_FECS *pBind; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + bKernel = pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL; + + bSelectLOD = bKernel; + +#if defined(DEBUG) || defined(DEVELOP) || defined(NV_VERIF_FEATURES) + bSelectLOD = NV_TRUE; +#endif + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + if (bSelectLOD) + { + switch (levelOfDetail) + { + case NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_FULL: + eventMask = ~0; + break; + case NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_COMPAT: + eventMask |= NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_RESTORE_START | + NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_CONTEXT_START | + NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_CTXSW_REQ_BY_HOST; + break; + case NV2080_CTRL_GR_FECS_BIND_EVTBUF_LOD_CUSTOM: + eventMask = eventFilter; + break; + default: + // Default to SIMPLIFIED level-of-detail + eventMask |= NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_SIMPLE_START | + NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_SIMPLE_END; + } + } + else + { + // Default to SIMPLIFIED level-of-detail + eventMask |= NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_SIMPLE_START | + NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_SIMPLE_END; + } + + if (eventMask & NV_EVENT_BUFFER_FECS_BITMASK_CTXSWTAG_FE_ACK) + { + eventMask |= NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_WFI | + NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_GFXP | + NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_CTAP | + NV_EVENT_BUFFER_FECS_CTXSWTAG_FE_ACK_CILP; + } + + if (bAllUsers) + { + targetUser = 0; + } + else + { + targetUser = (NvU64)(NvUPtr)pRmClient->pUserInfo; + + // Filtering UIDs is not yet implemented in legacy vGPU + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + if (pReasonCode != NULL) + *pReasonCode = NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NOT_ENABLED; + + return NV_ERR_NOT_SUPPORTED; + } + } + + pEventBuffer = dynamicCast(pEventBufferRef->pResource, EventBuffer); + if (NULL == pEventBuffer) + return NV_ERR_INVALID_ARGUMENT; + + if (NULL == multimapFindSubmap(&pGpu->fecsEventBufferBindingsUid, targetUser)) + { + if (NULL == multimapInsertSubmap(&pGpu->fecsEventBufferBindingsUid, targetUser)) + { + NV_PRINTF(LEVEL_ERROR, "failed to add UID binding!\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + + // If the binding exists already, we're done + if (NULL != multimapFindItem(&pGpu->fecsEventBufferBindingsUid, targetUser, (NvU64)(NvUPtr)pEventBuffer)) + return NV_OK; + + pBind = multimapInsertItemNew(&pGpu->fecsEventBufferBindingsUid, targetUser, (NvU64)(NvUPtr)pEventBuffer); + if (pBind == NULL) + return NV_ERR_INVALID_ARGUMENT; + ++pGpu->fecsCtxswLogConsumerCount; + + pBind->hClient = hClient; + pBind->hNotifier = hNotifier; + pBind->hEventBuffer = hEventBuffer; + pBind->pEventBuffer = pEventBuffer; + pBind->pUserInfo = (NvU64)(NvUPtr)pRmClient->pUserInfo; + pBind->bAdmin = bAdmin; + pBind->eventMask = eventMask; + pBind->bKernel = bKernel; + pBind->version = version; + + status = registerEventNotification(&pEventBuffer->pListeners, + hClient, + hNotifier, + hEventBuffer, + (version == 2 ? + NV_EVENT_BUFFER_RECORD_TYPE_FECS_CTX_SWITCH_V2 : + NV_EVENT_BUFFER_RECORD_TYPE_FECS_CTX_SWITCH) | NV01_EVENT_WITHOUT_EVENT_DATA, + NV_EVENT_BUFFER_BIND, + pEventBuffer->producerInfo.notificationHandle, + NV_FALSE); + if (status != NV_OK) + goto done; + + if (bMIGInUse) + { + if (kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient)) + { + pBind->swizzId = NV2080_CTRL_GPU_PARTITION_ID_INVALID; + } + else + { + GPUInstanceSubscription *pGPUInstanceSubscription; + status = gisubscriptionGetGPUInstanceSubscription(pClient, hNotifier, &pGPUInstanceSubscription); + if (status != NV_OK) + goto done; + + if (pGPUInstanceSubscription->pKernelMIGGpuInstance == NULL) + { + if (pReasonCode != NULL) + *pReasonCode = NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NOT_ENABLED; + + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + pBind->swizzId = pGPUInstanceSubscription->pKernelMIGGpuInstance->swizzId; + } + } + + numIterations = bMIGInUse ? GPU_MAX_GRS: 1; + for (grIdx = 0; grIdx < numIterations; grIdx++) + { + KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, grIdx); + if (pKernelGraphics == NULL) + continue; + + if (!kgraphicsIsCtxswLoggingSupported(pGpu, pKernelGraphics)) + { + if (pReasonCode) + *pReasonCode = NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NOT_ENABLED_GPU; + + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + if (!bFecsBindingActive) + { + + fecsBufferReset(pGpu, pKernelGraphics); + } + + bIntrDriven |= kgraphicsIsIntrDrivenCtxswLoggingEnabled(pGpu, pKernelGraphics); + } + + if (!bFecsBindingActive && !bIntrDriven) + { + status = osSchedule1SecondCallback(pGpu, + nvEventBufferFecsCallback, + NULL, + NV_OS_1HZ_REPEAT); + + if (status != NV_OK) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + bScheduled = NV_TRUE; + } + +done: + if (status != NV_OK) + { + if (gpuConsumerCount != pGpu->fecsCtxswLogConsumerCount) + fecsRemoveBindpoint(pGpu, targetUser, pBind); + + if (bScheduled) + osRemove1SecondRepeatingCallback(pGpu, nvEventBufferFecsCallback, NULL); + } + + return status; +} + +void +fecsRemoveBindpoint +( + OBJGPU *pGpu, + NvU64 uid, + NV_EVENT_BUFFER_BIND_POINT_FECS *pBind +) +{ + EventBuffer *pEventBuffer = pBind->pEventBuffer; + + --pGpu->fecsCtxswLogConsumerCount; + + unregisterEventNotificationWithData(&pEventBuffer->pListeners, + pBind->hClient, + pBind->hNotifier, + pBind->hEventBuffer, + NV_TRUE, + pEventBuffer->producerInfo.notificationHandle); + + multimapRemoveItemByKey(&pGpu->fecsEventBufferBindingsUid, + uid, + (NvU64)(NvUPtr)pEventBuffer); + + if (pGpu->fecsCtxswLogConsumerCount == 0) + { + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + NvU8 grIdx; + NvBool bIntrDriven = NV_FALSE; + + NvU8 numIterations = bMIGInUse ? GPU_MAX_GRS : 1; + for (grIdx = 0; grIdx < numIterations; grIdx++) + { + KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, grIdx); + if (pKernelGraphics == NULL) + continue; + + // + // Disable HW without unmapping buffer so that new event buffers still work properly + // HW enable will happen on bindpoint creation. + // Mapping only occurs on Graphics load/alloc, so unmapping should only occur when Graphics is destroyed. + // + fecsBufferDisableHw(pGpu, pKernelGraphics); + bIntrDriven |= kgraphicsIsIntrDrivenCtxswLoggingEnabled(pGpu, pKernelGraphics); + + } + + if (!bIntrDriven) + { + osRemove1SecondRepeatingCallback(pGpu, nvEventBufferFecsCallback, NULL); + } + } +} + +void +fecsRemoveAllBindpoints +( + EventBuffer *pEventBuffer +) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuMask = 0; + NvU32 gpuIndex = 0; + FecsEventBufferBindMultiMapSupermapIter uidBindIter; + + eventBufferSetEnable(&pEventBuffer->producerInfo, NV_FALSE); + + gpumgrGetGpuAttachInfo(NULL, &gpuMask); + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex)) != NULL) + { + uidBindIter = multimapSubmapIterAll(&pGpu->fecsEventBufferBindingsUid); + while (multimapSubmapIterNext(&uidBindIter)) + { + FecsEventBufferBindMultiMapSubmap *pSubmap = uidBindIter.pValue; + NV_EVENT_BUFFER_BIND_POINT_FECS *pBind = NULL; + NvU64 uid = mapKey_IMPL(uidBindIter.iter.pMap, pSubmap); + + while ((pBind = multimapFindItem(&pGpu->fecsEventBufferBindingsUid, + uid, + (NvU64)(NvUPtr)pEventBuffer)) != NULL) + { + fecsRemoveBindpoint(pGpu, uid, pBind); + } + } + + } +} + +void +fecsBufferReset +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + MEMORY_DESCRIPTOR *pFecsMemDesc = NULL; + NV_STATUS status; + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN_VOID(pFecsTraceInfo != NULL); + + if (pFecsTraceInfo->pFecsBufferMapping == NULL) + return; + + status = _getFecsMemDesc(pGpu, pKernelGraphics, &pFecsMemDesc); + + if ((status == NV_OK) && (pFecsMemDesc != NULL)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvHandle hClient; + NvHandle hSubdevice; + NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET_PARAMS traceWrOffsetParams; + NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET_PARAMS traceRdOffsetParams; + NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS hwEnableParams; + + portMemSet(pFecsTraceInfo->pFecsBufferMapping, (NvU8)(NV_FECS_TRACE_MAGIC_INVALIDATED & 0xff), memdescGetSize(pFecsMemDesc)); + + traceWrOffsetParams.offset = 0; + NV_ASSERT_OK_OR_ELSE( + status, + _fecsLoadInternalRoutingInfo(pGpu, + pKernelGraphics, + &hClient, + &hSubdevice, + &traceWrOffsetParams.grRouteInfo), + return); + + // Routing info is the same for all future calls in this series + traceRdOffsetParams.grRouteInfo = traceWrOffsetParams.grRouteInfo; + hwEnableParams.grRouteInfo = traceWrOffsetParams.grRouteInfo; + + NV_ASSERT_OK_OR_ELSE( + status, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_WR_OFFSET, + &traceWrOffsetParams, + sizeof(traceWrOffsetParams)), + return); + + traceRdOffsetParams.offset = 0; + NV_ASSERT_OK_OR_ELSE( + status, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_RD_OFFSET, + &traceRdOffsetParams, + sizeof(traceRdOffsetParams)), + return); + pFecsTraceInfo->fecsTraceRdOffset = 0; + + hwEnableParams.bEnable = NV_TRUE; + NV_ASSERT_OK_OR_ELSE( + status, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE, + &hwEnableParams, + sizeof(hwEnableParams)), + return); + } +} + +void +fecsBufferDisableHw +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvHandle hClient; + NvHandle hSubdevice; + NV2080_CTRL_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE_PARAMS getHwEnableParams; + NV2080_CTRL_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE_PARAMS setHwEnableParams; + NV_STATUS status; + + // This function may be called with unused GR Engines + NV_CHECK_OK_OR_ELSE( + status, + LEVEL_INFO, + _fecsLoadInternalRoutingInfo(pGpu, + pKernelGraphics, + &hClient, + &hSubdevice, + &getHwEnableParams.grRouteInfo), + return); + + NV_ASSERT_OK_OR_ELSE( + status, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_GR_GET_FECS_TRACE_HW_ENABLE, + &getHwEnableParams, + sizeof(getHwEnableParams)), + return); + + if (getHwEnableParams.bEnable) + { + // Copy previously loaded routing info + setHwEnableParams.grRouteInfo = getHwEnableParams.grRouteInfo; + setHwEnableParams.bEnable = NV_FALSE; + + NV_ASSERT_OK_OR_ELSE( + status, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_GR_SET_FECS_TRACE_HW_ENABLE, + &setHwEnableParams, + sizeof(setHwEnableParams)), + return); + } +} + +void +fecsBufferTeardown +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + fecsBufferDisableHw(pGpu, pKernelGraphics); + fecsBufferUnmap(pGpu, pKernelGraphics); +} + +/*! Is the FECS trace buffer mapped? */ +NvBool +fecsBufferIsMapped +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN(pFecsTraceInfo != NULL, NV_FALSE); + return pFecsTraceInfo->pFecsBufferMapping != NULL; +} + +NV_STATUS +fecsBufferMap +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + MEMORY_DESCRIPTOR *pFecsMemDesc = NULL; + NvU8 *pFecsBufferMapping = NULL; + NV_STATUS status; + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN(pFecsTraceInfo != NULL, NV_ERR_INVALID_STATE); + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + return NV_OK; + + if (pFecsTraceInfo->pFecsBufferMapping != NULL) + return NV_OK; + + status = _getFecsMemDesc(pGpu, pKernelGraphics, &pFecsMemDesc); + if ((status != NV_OK) || (pFecsMemDesc == NULL)) + return NV_ERR_INVALID_STATE; + + pFecsBufferMapping = kbusMapRmAperture_HAL(pGpu, pFecsMemDesc); + if (pFecsBufferMapping == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pFecsTraceInfo->pFecsBufferMapping = pFecsBufferMapping; + + return NV_OK; +} + +void +fecsBufferUnmap +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + MEMORY_DESCRIPTOR *pFecsMemDesc = NULL; + NV_STATUS status; + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN_VOID(pFecsTraceInfo != NULL); + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + return; + + status = _getFecsMemDesc(pGpu, pKernelGraphics, &pFecsMemDesc); + if ((status == NV_OK) && (pFecsMemDesc != NULL) && (pFecsTraceInfo->pFecsBufferMapping != NULL)) + kbusUnmapRmAperture_HAL(pGpu, pFecsMemDesc, + &pFecsTraceInfo->pFecsBufferMapping, + NV_TRUE); +} + +/*! Atomically set intr callback pending, return NV_TRUE if wasn't pending prior */ +NvBool +fecsSignalIntrPendingIfNotPending +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN(pFecsTraceInfo != NULL, NV_FALSE); + + return portAtomicCompareAndSwapU32(&pFecsTraceInfo->fecsCtxswLogIntrPending, 1, 0); +} + +/*! Atomically clear intr callback pending, return NV_TRUE if was pending */ +NvBool +fecsClearIntrPendingIfPending +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN(pFecsTraceInfo != NULL, NV_FALSE); + + return portAtomicCompareAndSwapU32(&pFecsTraceInfo->fecsCtxswLogIntrPending, 0, 1); +} + +/*! Atomically check if intr callback pending */ +NvBool fecsIsIntrPending +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN(pFecsTraceInfo != NULL, NV_FALSE); + + return portAtomicOrU32(&pFecsTraceInfo->fecsCtxswLogIntrPending, 0) != 0; +} + +/*! Retrieve the current VGPU staging buffer */ +VGPU_FECS_TRACE_STAGING_BUFFER * +fecsGetVgpuStagingBuffer +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN(pFecsTraceInfo != NULL, NULL); + + return pFecsTraceInfo->pVgpuStaging; +} + +/*! Store the given VGPU staging buffer */ +void +fecsSetVgpuStagingBuffer +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + VGPU_FECS_TRACE_STAGING_BUFFER *pStagingBuffer +) +{ + KGRAPHICS_FECS_TRACE_INFO *pFecsTraceInfo = kgraphicsGetFecsTraceInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN_VOID(pFecsTraceInfo != NULL); + + pFecsTraceInfo->pVgpuStaging = pStagingBuffer; +} + diff --git a/src/nvidia/src/kernel/gpu/gr/kernel_graphics.c b/src/nvidia/src/kernel/gpu/gr/kernel_graphics.c new file mode 100644 index 000000000..cde65f1e7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gr/kernel_graphics.c @@ -0,0 +1,3479 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + + +#include "kernel/gpu/gr/kernel_graphics_manager.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/device/device.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "kernel/rmapi/rmapi_utils.h" +#include "kernel/core/locks.h" +#include "kernel/gpu/mem_sys/kern_mem_sys.h" +#include "kernel/mem_mgr/gpu_vaspace.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "nvRmReg.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/mem_mgr/heap.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "gpu/mmu/kern_gmmu.h" +#include "rmapi/rs_utils.h" +#include "rmapi/client.h" + +#include "vgpu/vgpu_events.h" +#include "vgpu/rpc.h" + +#include "class/clb0c0.h" +#include "class/clb1c0.h" +#include "class/clc0c0.h" +#include "class/clc1c0.h" +#include "class/clc3c0.h" +#include "class/clc5c0.h" +#include "class/clc6c0.h" +#include "class/clc7c0.h" + +#include "class/cl0080.h" +#include "class/cl2080.h" +#include "class/cla06f.h" +#include "class/cla06fsubch.h" +#include "class/cl90f1.h" // FERMI_VASPACE_A +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl50a0.h" // NV50_MEMORY_VIRTUAL +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER +#include "class/clc36f.h" // VOLTA_CHANNEL_GPFIFO_A +#include "class/clc46f.h" // TURING_CHANNEL_GPFIFO_A +#include "class/clc56f.h" // AMPERE_CHANNEL_GPFIFO_A +#include "class/clc637.h" +#include "class/clc638.h" + +// +// We use NV2080_CTRL_INTERNAL_GR_MAX_GPC to statically allocate certain +// GPC related array in ctrl call header file. We will need to adjust +// NV2080_CTRL_INTERNAL_GR_MAX_GPC if some day KGRMGR_MAX_GPC gets changed +// +ct_assert(NV2080_CTRL_INTERNAL_GR_MAX_GPC == KGRMGR_MAX_GPC); + +// +// Ensure the external and internal ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT +// will always in sync +// +ct_assert(NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT == + NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT); + +typedef struct KGRAPHICS_PRIVATE_DATA +{ + NvBool bInitialized; + KGRAPHICS_STATIC_INFO staticInfo; +} KGRAPHICS_PRIVATE_DATA; +static NV_STATUS _kgraphicsMapGlobalCtxBuffer(OBJGPU *pGpu, KernelGraphics *pKernelGraphics, NvU32 gfid, OBJVASPACE *, + KernelGraphicsContext *, GR_GLOBALCTX_BUFFER, NvBool bIsReadOnly); +static NV_STATUS _kgraphicsPostSchedulingEnableHandler(OBJGPU *, void *); + +NV_STATUS +kgraphicsConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + ENGDESCRIPTOR engDesc +) +{ + KGRAPHICS_PRIVATE_DATA *pPrivate; + NvU32 idx; + GR_GLOBALCTX_BUFFER buf; + GR_CTX_BUFFER localBuf; + + pKernelGraphics->instance = ENGDESC_FIELD(engDesc, _INST); + + pPrivate = portMemAllocNonPaged(sizeof(*pPrivate)); + if (pPrivate == NULL) + return NV_ERR_NO_MEMORY; + portMemSet(pPrivate, 0, sizeof(*pPrivate)); + pKernelGraphics->pPrivate = pPrivate; + + // All local ctx buffers default to uncached FB preferred + FOR_EACH_IN_ENUM(GR_CTX_BUFFER, localBuf) + { + pKernelGraphics->ctxAttr[localBuf].pAllocList = ADDRLIST_FBMEM_PREFERRED; + pKernelGraphics->ctxAttr[localBuf].cpuAttr = NV_MEMORY_UNCACHED; + } + FOR_EACH_IN_ENUM_END; + + // Process instloc overrides + { + struct + { + GR_CTX_BUFFER buf; + NvU32 override; + } instlocOverrides[] = + { + { GR_CTX_BUFFER_MAIN, DRF_VAL(_REG_STR_RM, _INST_LOC, _GRCTX, pGpu->instLocOverrides) }, + { GR_CTX_BUFFER_PATCH, DRF_VAL(_REG_STR_RM, _INST_LOC_2, _CTX_PATCH, pGpu->instLocOverrides2) }, + { GR_CTX_BUFFER_ZCULL, DRF_VAL(_REG_STR_RM, _INST_LOC_2, _ZCULLCTX, pGpu->instLocOverrides2) }, + { GR_CTX_BUFFER_PM, DRF_VAL(_REG_STR_RM, _INST_LOC_2, _PMCTX, pGpu->instLocOverrides2) }, + { GR_CTX_BUFFER_PREEMPT, DRF_VAL(_REG_STR_RM, _INST_LOC_3, _PREEMPT_BUFFER, pGpu->instLocOverrides3) }, + { GR_CTX_BUFFER_BETA_CB, DRF_VAL(_REG_STR_RM, _INST_LOC_3, _GFXP_BETACB_BUFFER, pGpu->instLocOverrides3) }, + { GR_CTX_BUFFER_PAGEPOOL, DRF_VAL(_REG_STR_RM, _INST_LOC_3, _GFXP_PAGEPOOL_BUFFER, pGpu->instLocOverrides3) }, + { GR_CTX_BUFFER_SPILL, DRF_VAL(_REG_STR_RM, _INST_LOC_3, _GFXP_SPILL_BUFFER, pGpu->instLocOverrides3) }, + { GR_CTX_BUFFER_RTV_CB, DRF_VAL(_REG_STR_RM, _INST_LOC_3, _GFXP_RTVCB_BUFFER, pGpu->instLocOverrides3) } + }; + + for (idx = 0; idx < NV_ARRAY_ELEMENTS(instlocOverrides); ++idx) + { + memdescOverrideInstLocList(instlocOverrides[idx].override, + NV_ENUM_TO_STRING(GR_CTX_BUFFER, instlocOverrides[idx].buf), + &pKernelGraphics->ctxAttr[instlocOverrides[idx].buf].pAllocList, + &pKernelGraphics->ctxAttr[instlocOverrides[idx].buf].cpuAttr); + } + } + + // Most global ctx buffers default to uncached FB preferred + FOR_EACH_IN_ENUM(GR_GLOBALCTX_BUFFER, buf) + { + pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr[buf].pAllocList = ADDRLIST_FBMEM_PREFERRED; + pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr[buf].cpuAttr = NV_MEMORY_UNCACHED; + } + FOR_EACH_IN_ENUM_END; + + // FECS event buffer defaults to cached SYSMEM + pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr[GR_GLOBALCTX_BUFFER_FECS_EVENT].pAllocList = ADDRLIST_SYSMEM_ONLY; + pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr[GR_GLOBALCTX_BUFFER_FECS_EVENT].cpuAttr = NV_MEMORY_CACHED; + + // Process instloc overrides + { + struct + { + GR_GLOBALCTX_BUFFER buf; + NvU32 override; + } instlocOverrides[] = + { + { GR_GLOBALCTX_BUFFER_FECS_EVENT, DRF_VAL(_REG_STR_RM, _INST_LOC_4, _FECS_EVENT_BUF, pGpu->instLocOverrides4) }, + { GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB, DRF_VAL(_REG_STR_RM, _INST_LOC_2, _ATTR_CB, pGpu->instLocOverrides2) }, + { GR_GLOBALCTX_BUFFER_BUNDLE_CB, DRF_VAL(_REG_STR_RM, _INST_LOC_2, _BUNDLE_CB, pGpu->instLocOverrides2) }, + { GR_GLOBALCTX_BUFFER_PAGEPOOL, DRF_VAL(_REG_STR_RM, _INST_LOC_2, _PAGEPOOL, pGpu->instLocOverrides2) }, + { GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP, DRF_VAL(_REG_STR_RM, _INST_LOC_3, _PRIV_ACCESS_MAP, pGpu->instLocOverrides3) }, + { GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP, DRF_VAL(_REG_STR_RM, _INST_LOC_3, _PRIV_ACCESS_MAP, pGpu->instLocOverrides3) }, + { GR_GLOBAL_BUFFER_GLOBAL_PRIV_ACCESS_MAP, DRF_VAL(_REG_STR_RM, _INST_LOC_3, _PRIV_ACCESS_MAP, pGpu->instLocOverrides3) }, + { GR_GLOBALCTX_BUFFER_RTV_CB, DRF_VAL(_REG_STR_RM, _INST_LOC_3, _RTVCB_BUFFER, pGpu->instLocOverrides3) } + }; + + for (idx = 0; idx < NV_ARRAY_ELEMENTS(instlocOverrides); ++idx) + { + memdescOverrideInstLocList(instlocOverrides[idx].override, + NV_ENUM_TO_STRING(GR_GLOBALCTX_BUFFER, instlocOverrides[idx].buf), + &pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr[instlocOverrides[idx].buf].pAllocList, + &pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr[instlocOverrides[idx].buf].cpuAttr); + } + } + + // Copy final global buffer attributes for local versions + FOR_EACH_IN_ENUM(GR_GLOBALCTX_BUFFER, buf) + { + // Host RM cannot allocate system memory on behalf of the VF RM, so force FB. + pKernelGraphics->globalCtxBuffersInfo.vfGlobalCtxAttr[buf].pAllocList = ADDRLIST_FBMEM_ONLY; + pKernelGraphics->globalCtxBuffersInfo.vfGlobalCtxAttr[buf].cpuAttr = NV_MEMORY_UNCACHED; + + // Local context allocation + pKernelGraphics->globalCtxBuffersInfo.localCtxAttr[buf] = pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr[buf]; + } + FOR_EACH_IN_ENUM_END; + + // + // Default context buffers to non size aligned. The attribute buffer is + // required to be mapped with an offset naturally aligned to the size. + // + for (idx = 0; idx < GR_GLOBALCTX_BUFFER_COUNT; idx++) + pKernelGraphics->globalCtxBuffersInfo.bSizeAligned[idx] = NV_FALSE; + pKernelGraphics->globalCtxBuffersInfo.bSizeAligned[GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB] = NV_TRUE; + + NV_ASSERT_OK_OR_RETURN(fecsCtxswLoggingInit(pGpu, pKernelGraphics, &pKernelGraphics->pFecsTraceInfo)); + + return NV_OK; +} + +void +kgraphicsDestruct_IMPL +( + KernelGraphics *pKernelGraphics +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelGraphics); + + fecsCtxswLoggingTeardown(pGpu, pKernelGraphics); + pKernelGraphics->pFecsTraceInfo = NULL; + kgraphicsInvalidateStaticInfo(pGpu, pKernelGraphics); + + portMemFree(pKernelGraphics->pPrivate); + pKernelGraphics->pPrivate = NULL; +} + +NV_STATUS +kgraphicsStateInitLocked_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + NvU32 nGlobalCtx = 1; + NvU32 numClasses; + + NV_ASSERT_OK_OR_RETURN(gpuGetClassList(pGpu, &numClasses, NULL, ENG_GR(pKernelGraphics->instance))); + + // + // Number of supported class can be zero when Graphics engine is disabled, but we still + // need those classes in ClassDB for KernelGraphics engine operation, thus here we are adding + // the ENG_GR(X) supported classes back to ClassDB + // + if (numClasses == 0) + { + PGPU_ENGINE_ORDER pEngineOrder = &pGpu->engineOrder; + const CLASSDESCRIPTOR *pClassDesc = &pEngineOrder->pClassDescriptors[0]; + NvU32 i; + NvU32 classNum; + + for (i = 0; i < pEngineOrder->numClassDescriptors; i++) + { + classNum = pClassDesc[i].externalClassId; + if (classNum == (NvU32)~0) + continue; + + if (ENG_GR(pKernelGraphics->instance) == pClassDesc[i].engDesc) + { + NV_PRINTF(LEVEL_INFO, "Adding class ID 0x%x to ClassDB\n", classNum); + NV_ASSERT_OK_OR_RETURN( + gpuAddClassToClassDBByEngTagClassId(pGpu, ENG_GR(pKernelGraphics->instance), classNum)); + } + } + } + + // + // Allocate guest context db array + // + if (gpuIsSriovEnabled(pGpu)) + { + nGlobalCtx = VMMU_MAX_GFID; + } + + pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers = portMemAllocNonPaged(sizeof(*pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers) * nGlobalCtx); + if (pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet(pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers, 0, + sizeof(*pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers) * nGlobalCtx); + + if (pKernelGraphics->instance == 0) + { + // + // GSP_CLIENT creates the golden context channel GR post load. However, + // if PMA scrubber is enabled, a scrubber channel must be constructed + // first as a part of Fifo post load. Hence, add the golden channel + // creation as a fifo post-scheduling-enablement callback. + // + kfifoAddSchedulingHandler(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + _kgraphicsPostSchedulingEnableHandler, + (void *)((NvUPtr)(pKernelGraphics->instance)), + NULL, NULL); + } + + return NV_OK; +} + +NV_STATUS +kgraphicsStateUnload_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 flags + +) +{ + if (pKernelGraphics->instance != 0) + return NV_OK; + + kfifoRemoveSchedulingHandler(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + _kgraphicsPostSchedulingEnableHandler, + (void *)((NvUPtr)(pKernelGraphics->instance)), + NULL, NULL); + + return NV_OK; +} + +NV_STATUS +kgraphicsStateLoad_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 flags +) +{ + if (pGpu->fecsCtxswLogConsumerCount > 0) + { + fecsBufferMap(pGpu, pKernelGraphics); + fecsBufferReset(pGpu, pKernelGraphics); + } + + return NV_OK; +} + +NV_STATUS +kgraphicsStatePreUnload_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 flags +) +{ + fecsBufferUnmap(pGpu, pKernelGraphics); + + // Release global buffers used as part of the gr context, when not in S/R + if (!(flags & GPU_STATE_FLAGS_PRESERVING)) + kgraphicsFreeGlobalCtxBuffers(pGpu, pKernelGraphics, GPU_GFID_PF); + + return NV_OK; +} + +void +kgraphicsStateDestroy_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + fecsBufferTeardown(pGpu, pKernelGraphics); + + portMemFree(pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers); + pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers = NULL; +} + +NvBool kgraphicsIsPresent_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 unused; + + if (IsDFPGA(pGpu)) + return NV_FALSE; + + if (IS_MODS_AMODEL(pGpu)) + return NV_TRUE; + + return kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, NV2080_ENGINE_TYPE_GR(pKernelGraphics->instance), + ENGINE_INFO_TYPE_INVALID, &unused) == NV_OK; +} + +NV_STATUS +kgraphicsStatePostLoad_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 flags +) +{ + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kgraphicsLoadStaticInfo(pGpu, pKernelGraphics, KMIGMGR_SWIZZID_INVALID)); + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + + if ((!IS_VIRTUAL(pGpu)) && + (pKernelGraphicsStaticInfo != NULL) && + (pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL) && + (!pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[GPU_GFID_PF].bFecsBufferAllocated)) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsAllocGlobalCtxBuffers_HAL(pGpu, pKernelGraphics, GPU_GFID_PF)); + } + + return NV_OK; +} + +/*! + * @brief Create a golden image channel after Fifo post load + * Instead of lazily waiting until first client request, we proactively create a + * golden channel here. + */ +static NV_STATUS +_kgraphicsPostSchedulingEnableHandler +( + OBJGPU *pGpu, + void *pGrIndex +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, ((NvU32)(NvUPtr)pGrIndex)); + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + + // Nothing to do for non-GSPCLIENT + if (!IS_GSP_CLIENT(pGpu)) + return NV_OK; + + // Defer golden context channel creation to GPU instance configuration + if (IS_MIG_ENABLED(pGpu)) + return NV_OK; + + // Skip for MIG engines with 0 GPCs + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + if (pKernelGraphicsStaticInfo->floorsweepingMasks.gpcMask == 0x0) + return NV_OK; + + if (memmgrIsPmaInitialized(pMemoryManager)) + { + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvU32 pmaConfig = PMA_QUERY_SCRUB_ENABLED | PMA_QUERY_SCRUB_VALID; + + NV_ASSERT_OK_OR_RETURN(pmaQueryConfigs(&pHeap->pmaObject, &pmaConfig)); + + // + // Scrubber is also constructed from the same Fifo post scheduling + // enable callback queue. This check enforces the dependency that + // scrubber must be initialized first + // + if ((pmaConfig & PMA_QUERY_SCRUB_ENABLED) && + !(pmaConfig & PMA_QUERY_SCRUB_VALID)) + { + return NV_WARN_MORE_PROCESSING_REQUIRED; + } + } + + return kgraphicsCreateGoldenImageChannel(pGpu, pKernelGraphics); +} + +void +kgraphicsInvalidateStaticInfo_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + portMemFree(pKernelGraphics->pPrivate->staticInfo.pGrInfo); + pKernelGraphics->pPrivate->staticInfo.pGrInfo = NULL; + + portMemFree(pKernelGraphics->pPrivate->staticInfo.pPpcMasks); + pKernelGraphics->pPrivate->staticInfo.pPpcMasks = NULL; + + portMemFree(pKernelGraphics->pPrivate->staticInfo.pZcullInfo); + pKernelGraphics->pPrivate->staticInfo.pZcullInfo = NULL; + + portMemFree(pKernelGraphics->pPrivate->staticInfo.pRopInfo); + pKernelGraphics->pPrivate->staticInfo.pRopInfo = NULL; + + portMemFree(pKernelGraphics->pPrivate->staticInfo.pContextBuffersInfo); + pKernelGraphics->pPrivate->staticInfo.pContextBuffersInfo = NULL; + + portMemFree(pKernelGraphics->pPrivate->staticInfo.pSmIssueRateModifier); + pKernelGraphics->pPrivate->staticInfo.pSmIssueRateModifier = NULL; + + portMemFree(pKernelGraphics->pPrivate->staticInfo.pFecsTraceDefines); + pKernelGraphics->pPrivate->staticInfo.pFecsTraceDefines = NULL; + + portMemSet(&pKernelGraphics->pPrivate->staticInfo, 0, sizeof(pKernelGraphics->pPrivate->staticInfo)); + pKernelGraphics->pPrivate->bInitialized = NV_FALSE; +} + +const KGRAPHICS_STATIC_INFO * +kgraphicsGetStaticInfo_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + KGRAPHICS_PRIVATE_DATA *pPrivate = pKernelGraphics->pPrivate; + return ((pPrivate != NULL) && pPrivate->bInitialized) ? &pPrivate->staticInfo : NULL; +} + +static NV_STATUS +_kgraphicsInternalClientAlloc +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 swizzId, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubdevice +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); //FIXME = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 grIdx = pKernelGraphics->instance; + + NV_ASSERT_OR_RETURN(phClient != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(phDevice != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(phSubdevice != NULL, NV_ERR_INVALID_ARGUMENT); + + if (IS_MIG_IN_USE(pGpu)) + { + NvHandle hSubscription; + + // Delay initialization to GPU instance configuration + if (swizzId == KMIGMGR_SWIZZID_INVALID) + return NV_ERR_NOT_READY; + + // With MIG enabled, we need to use a client subscribed to the correct GPU instance. + NV_ASSERT_OK_OR_RETURN( + rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, phClient, phDevice, phSubdevice)); + + { + NVC637_ALLOCATION_PARAMETERS params; + NV_ASSERT_OK( + serverutilGenResourceHandle(*phClient, &hSubscription)); + portMemSet(¶ms, 0, sizeof(params)); + params.swizzId = swizzId; + NV_ASSERT_OK( + pRmApi->AllocWithHandle(pRmApi, *phClient, *phSubdevice, hSubscription, AMPERE_SMC_PARTITION_REF, ¶ms)); + } + + } + else if (grIdx != 0) + { + // Static data is only defined for GR0 in legacy mode + return NV_ERR_NOT_READY; + } + else + { + NV_ASSERT_OK_OR_RETURN( + rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, phClient, phDevice, phSubdevice)); + } + + return NV_OK; +} + +/*! + * @brief Initialize static data that isn't collected right away + */ +NV_STATUS +kgraphicsInitializeDeferredStaticData_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvHandle hClient, + NvHandle hSubdevice +) +{ + NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *pParams; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + KGRAPHICS_PRIVATE_DATA *pPrivate = pKernelGraphics->pPrivate; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvU32 grIdx = pKernelGraphics->instance; + NV_STATUS status = NV_OK; + NvBool bInternalClientAllocated = NV_FALSE; + NvU32 gfid; + NvBool bCallingContextPlugin; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + NV_ASSERT_OK_OR_RETURN(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin)); + + if (bCallingContextPlugin) + { + gfid = GPU_GFID_PF; + } + + // Not ready + if (!pPrivate->bInitialized) + return NV_OK; + + // Already done + if (pPrivate->staticInfo.pContextBuffersInfo != NULL) + return NV_OK; + + // In progress + if (pKernelGraphics->bCollectingDeferredStaticData) + return NV_OK; + + if (hClient == NV01_NULL_OBJECT) + { + NvHandle hDevice = NV01_NULL_OBJECT; + NvU32 swizzId = KMIGMGR_SWIZZID_INVALID; + + if (IS_MIG_IN_USE(pGpu)) + { + MIG_INSTANCE_REF ref; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetMIGReferenceFromEngineType(pGpu, pKernelMIGManager, + NV2080_ENGINE_TYPE_GR(pKernelGraphics->instance), &ref)); + + swizzId = ref.pKernelMIGGpuInstance->swizzId; + } + + status = _kgraphicsInternalClientAlloc(pGpu, pKernelGraphics, swizzId, &hClient, &hDevice, &hSubdevice); + if (status == NV_ERR_NOT_READY) + { + return NV_OK; + } + NV_ASSERT_OR_RETURN(status == NV_OK, status); + NV_ASSERT_OR_RETURN(hClient != NV01_NULL_OBJECT, NV_ERR_INVALID_STATE); + bInternalClientAllocated = NV_TRUE; + } + + // Prevent recursion when deferred static data collection is ON + pKernelGraphics->bCollectingDeferredStaticData = NV_TRUE; + + if (IS_MIG_IN_USE(pGpu)) + { + MIG_INSTANCE_REF ref; + NvU32 localEngineType; + + // Physical RM will fill with local indices, so localize the index + NV_CHECK_OK_OR_GOTO( + status, + LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref), + cleanup); + NV_CHECK_OK_OR_GOTO( + status, + LEVEL_ERROR, + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_GR(grIdx), + &localEngineType), + cleanup); + grIdx = NV2080_ENGINE_TYPE_GR_IDX(localEngineType); + } + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet(pParams, 0, sizeof(*pParams)); + NV_CHECK_OK_OR_GOTO( + status, + LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO, + pParams, + sizeof(*pParams)), + cleanup_context_buffers_info); + + pPrivate->staticInfo.pContextBuffersInfo = + portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pContextBuffersInfo)); + + if (pPrivate->staticInfo.pContextBuffersInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup_context_buffers_info; + } + + portMemCopy(pPrivate->staticInfo.pContextBuffersInfo, + sizeof(*pPrivate->staticInfo.pContextBuffersInfo), + &pParams->engineContextBuffersInfo[grIdx], + sizeof(pParams->engineContextBuffersInfo[grIdx])); + +cleanup_context_buffers_info: + portMemFree(pParams); + + // + // We are not cleaning pContextBuffersInfo here since it's used after this + // function so has to be cleaned after used. + // + +cleanup: + if (bInternalClientAllocated) + { + pRmApi->Free(pRmApi, hClient, hClient); + } + + pKernelGraphics->bCollectingDeferredStaticData = NV_FALSE; + + if (status == NV_OK) + { + // + // Allocate Ctx Buffers that are global to all channels if they have yet + // to be allocated. We delay them until now to save memory when runs + // are done without using graphics contexts! + // + if (!pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid].bAllocated && + (!gpuIsClientRmAllocatedCtxBufferEnabled(pGpu) || + (gpuIsSriovEnabled(pGpu) && IS_GFID_PF(gfid)))) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsAllocGrGlobalCtxBuffers_HAL(pGpu, pKernelGraphics, gfid, NULL)); + } + } + + return status; +} + +NV_STATUS +kgraphicsLoadStaticInfo_KERNEL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 swizzId +) +{ + KGRAPHICS_PRIVATE_DATA *pPrivate = pKernelGraphics->pPrivate; + NvHandle hClient = NV01_NULL_OBJECT; + NvHandle hDevice; + NvHandle hSubdevice; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvU32 grIdx = pKernelGraphics->instance; + NV_STATUS status = NV_OK; + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + union + { + NV2080_CTRL_INTERNAL_STATIC_GR_GET_CAPS_PARAMS caps; + NV2080_CTRL_INTERNAL_STATIC_GR_GET_INFO_PARAMS info; + NV2080_CTRL_INTERNAL_STATIC_GR_GET_GLOBAL_SM_ORDER_PARAMS globalSmOrder; + NV2080_CTRL_INTERNAL_STATIC_GR_GET_FLOORSWEEPING_MASKS_PARAMS floorsweepingMasks; + NV2080_CTRL_INTERNAL_STATIC_GR_GET_PPC_MASKS_PARAMS ppcMasks; + NV2080_CTRL_INTERNAL_STATIC_GR_GET_ZCULL_INFO_PARAMS zcullInfo; + NV2080_CTRL_INTERNAL_STATIC_GR_GET_ROP_INFO_PARAMS ropInfo; + NV2080_CTRL_INTERNAL_STATIC_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS smIssueRateModifier; + NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_RECORD_SIZE_PARAMS fecsRecordSize; + NV2080_CTRL_INTERNAL_STATIC_GR_GET_FECS_TRACE_DEFINES_PARAMS fecsTraceDefines; + NV2080_CTRL_INTERNAL_STATIC_GR_GET_PDB_PROPERTIES_PARAMS pdbProperties; + } *pParams = NULL; + + NV_ASSERT_OR_RETURN(pPrivate != NULL, NV_ERR_INVALID_STATE); + + if (pPrivate->bInitialized) + return NV_OK; + + status = _kgraphicsInternalClientAlloc(pGpu, pKernelGraphics, swizzId, &hClient, &hDevice, &hSubdevice); + + if (status == NV_ERR_NOT_READY) + { + return NV_OK; + } + NV_ASSERT_OR_RETURN(status == NV_OK, status); + NV_ASSERT_OR_RETURN(hClient != NV01_NULL_OBJECT, NV_ERR_INVALID_STATE); + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + NvU32 localEngineType; + + // Physical RM will fill with local indices, so localize the index + NV_CHECK_OK_OR_GOTO( + status, + LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref), + cleanup); + NV_CHECK_OK_OR_GOTO( + status, + LEVEL_ERROR, + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_GR(grIdx), + &localEngineType), + cleanup); + grIdx = NV2080_ENGINE_TYPE_GR_IDX(localEngineType); + } + + // GR Caps + portMemSet(pParams, 0, sizeof(*pParams)); + NV_CHECK_OK_OR_GOTO( + status, + LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CAPS, + pParams, + sizeof(pParams->caps)), + cleanup); + + portMemCopy(&pPrivate->staticInfo.grCaps, sizeof(pPrivate->staticInfo.grCaps), + &pParams->caps.engineCaps[grIdx], sizeof(pParams->caps.engineCaps[grIdx])); + + // GR Info + portMemSet(pParams, 0, sizeof(*pParams)); + status = pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_INFO, + pParams, + sizeof(pParams->info)); + + if (status == NV_OK) + { + pPrivate->staticInfo.pGrInfo = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pGrInfo)); + if (pPrivate->staticInfo.pGrInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + + portMemCopy(pPrivate->staticInfo.pGrInfo->infoList, + NV0080_CTRL_GR_INFO_MAX_SIZE * sizeof(*pPrivate->staticInfo.pGrInfo->infoList), + pParams->info.engineInfo[grIdx].infoList, + NV0080_CTRL_GR_INFO_MAX_SIZE * sizeof(*pParams->info.engineInfo[grIdx].infoList)); + + } + + // Floorsweeping masks + portMemSet(pParams, 0, sizeof(*pParams)); + NV_CHECK_OK_OR_GOTO( + status, + LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FLOORSWEEPING_MASKS, + pParams, + sizeof(pParams->floorsweepingMasks)), + cleanup); + + portMemCopy(&pPrivate->staticInfo.floorsweepingMasks, sizeof(pPrivate->staticInfo.floorsweepingMasks), + &pParams->floorsweepingMasks.floorsweepingMasks[grIdx], sizeof(pParams->floorsweepingMasks.floorsweepingMasks[grIdx])); + + // + // Most of GR is stub'd in AMODEL. However, some tests still need the CAPS/INFO data, + // so we still need to generate CAPS/INFO data for AMODEL + // + if (IS_MODS_AMODEL(pGpu)) + { + pPrivate->bInitialized = NV_TRUE; + if (!IS_MIG_IN_USE(pGpu) && (grIdx == 0)) + { + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + + // Cache legacy GR mask info (i.e. GR0 with MIG disabled) to pKernelGraphicsManager->legacyFsMaskState + kgrmgrSetLegacyKgraphicsStaticInfo(pGpu, pKernelGraphicsManager, pKernelGraphics); + } + status = NV_OK; + goto cleanup; + } + + // GR Global SM Order + portMemSet(pParams, 0, sizeof(*pParams)); + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_GLOBAL_SM_ORDER, + pParams, + sizeof(pParams->globalSmOrder)), + cleanup); + + portMemCopy(&pPrivate->staticInfo.globalSmOrder, sizeof(pPrivate->staticInfo.globalSmOrder), + &pParams->globalSmOrder.globalSmOrder[grIdx], sizeof(pParams->globalSmOrder.globalSmOrder[grIdx])); + + // PPC Mask + portMemSet(pParams, 0, sizeof(*pParams)); + status = pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_PPC_MASKS, + pParams, + sizeof(pParams->ppcMasks)); + + if (status == NV_OK) + { + pPrivate->staticInfo.pPpcMasks = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pPpcMasks)); + if (pPrivate->staticInfo.pPpcMasks == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + + portMemCopy(pPrivate->staticInfo.pPpcMasks, sizeof(*pPrivate->staticInfo.pPpcMasks), + &pParams->ppcMasks.enginePpcMasks[grIdx], sizeof(pParams->ppcMasks.enginePpcMasks[grIdx])); + } + else if (status == NV_ERR_NOT_SUPPORTED) + { + // + // Some chips don't support this call, so just keep the pPpcMasks + // pointer as NULL, but don't return error + // + status = NV_OK; + } + + portMemSet(pParams, 0, sizeof(*pParams)); + status = pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_ZCULL_INFO, + pParams, + sizeof(pParams->zcullInfo)); + + if (status == NV_OK) + { + pPrivate->staticInfo.pZcullInfo = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pZcullInfo)); + if (pPrivate->staticInfo.pZcullInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + + portMemCopy(pPrivate->staticInfo.pZcullInfo, sizeof(*pPrivate->staticInfo.pZcullInfo), + &pParams->zcullInfo.engineZcullInfo[grIdx], sizeof(pParams->zcullInfo.engineZcullInfo[grIdx])); + } + else if (status == NV_ERR_NOT_SUPPORTED) + { + // It's expected to get this error when MIG is enabled, thus don't return error + if (IS_MIG_ENABLED(pGpu)) + { + status = NV_OK; + } + } + + // ROP Info + portMemSet(pParams, 0, sizeof(*pParams)); + status = pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_ROP_INFO, + pParams, + sizeof(pParams->ropInfo)); + + if (status == NV_OK) + { + pPrivate->staticInfo.pRopInfo = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pRopInfo)); + if (pPrivate->staticInfo.pRopInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + + portMemCopy(pPrivate->staticInfo.pRopInfo, sizeof(*pPrivate->staticInfo.pRopInfo), + &pParams->ropInfo.engineRopInfo[grIdx], sizeof(pParams->ropInfo.engineRopInfo[grIdx])); + } + else if (status == NV_ERR_NOT_SUPPORTED) + { + // It's expected to get this error when MIG is enabled, thus don't return error + if (IS_MIG_ENABLED(pGpu)) + { + status = NV_OK; + } + } + + // SM Issue Rate Modifier + portMemSet(pParams, 0, sizeof(*pParams)); + status = pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_SM_ISSUE_RATE_MODIFIER, + pParams, + sizeof(pParams->smIssueRateModifier)); + + if (status == NV_OK) + { + pPrivate->staticInfo.pSmIssueRateModifier = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pSmIssueRateModifier)); + if (pPrivate->staticInfo.pSmIssueRateModifier == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + + portMemCopy(pPrivate->staticInfo.pSmIssueRateModifier, sizeof(*pPrivate->staticInfo.pSmIssueRateModifier), + &pParams->smIssueRateModifier.smIssueRateModifier[grIdx], sizeof(pParams->smIssueRateModifier.smIssueRateModifier[grIdx])); + } + else if (status == NV_ERR_NOT_SUPPORTED) + { + status = NV_OK; + } + + // FECS Record Size + portMemSet(pParams, 0, sizeof(*pParams)); + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_RECORD_SIZE, + pParams, + sizeof(pParams->fecsRecordSize)), + cleanup); + + pPrivate->staticInfo.fecsRecordSize.fecsRecordSize = pParams->fecsRecordSize.fecsRecordSize[grIdx].fecsRecordSize; + + // FECS Trace Defines + portMemSet(pParams, 0, sizeof(*pParams)); + status = pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_FECS_TRACE_DEFINES, + pParams, + sizeof(pParams->fecsTraceDefines)); + if (status == NV_OK) + { + pPrivate->staticInfo.pFecsTraceDefines = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pFecsTraceDefines)); + if (pPrivate->staticInfo.pFecsTraceDefines == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + portMemCopy(pPrivate->staticInfo.pFecsTraceDefines, sizeof(*pPrivate->staticInfo.pFecsTraceDefines), + &pParams->fecsTraceDefines.fecsTraceDefines[grIdx], sizeof(pParams->fecsTraceDefines.fecsTraceDefines[grIdx])); + } + else if (status == NV_ERR_NOT_SUPPORTED) + { + status = NV_OK; + } + + // PDB Properties + portMemSet(pParams, 0, sizeof(*pParams)); + NV_CHECK_OK_OR_GOTO( + status, + LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_PDB_PROPERTIES, + pParams, + sizeof(pParams->pdbProperties)), + cleanup); + + portMemCopy(&pPrivate->staticInfo.pdbTable, sizeof(pPrivate->staticInfo.pdbTable), + &pParams->pdbProperties.pdbTable[grIdx], sizeof(pParams->pdbProperties.pdbTable[grIdx])); + kgraphicsSetPerSubcontextContextHeaderSupported(pGpu, pKernelGraphics, pPrivate->staticInfo.pdbTable.bPerSubCtxheaderSupported); + + // Publish static configuration + pPrivate->bInitialized = NV_TRUE; + + // The deferred data is ready after MIG is enabled, so no need to defer the initialization + if (IS_MIG_IN_USE(pGpu) || + !kgraphicsShouldDeferContextInit(pGpu, pKernelGraphics)) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kgraphicsInitializeDeferredStaticData(pGpu, pKernelGraphics, hClient, hSubdevice), cleanup); + } + + if (!IS_MIG_IN_USE(pGpu) && (grIdx == 0)) + { + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + + // Cache legacy GR mask info (i.e. GR0 with MIG disabled) to pKernelGraphicsManager->legacyFsMaskState + kgrmgrSetLegacyKgraphicsStaticInfo(pGpu, pKernelGraphicsManager, pKernelGraphics); + } + +cleanup: + if (status != NV_OK) + { + // Redact static configuration + pPrivate->bInitialized = NV_FALSE; + + portMemFree(pPrivate->staticInfo.pGrInfo); + pPrivate->staticInfo.pGrInfo = NULL; + + portMemFree(pPrivate->staticInfo.pPpcMasks); + pPrivate->staticInfo.pPpcMasks = NULL; + + portMemFree(pPrivate->staticInfo.pZcullInfo); + pPrivate->staticInfo.pZcullInfo = NULL; + + portMemFree(pPrivate->staticInfo.pRopInfo); + pPrivate->staticInfo.pRopInfo = NULL; + + portMemFree(pPrivate->staticInfo.pContextBuffersInfo); + pPrivate->staticInfo.pContextBuffersInfo = NULL; + + portMemFree(pPrivate->staticInfo.pSmIssueRateModifier); + pPrivate->staticInfo.pSmIssueRateModifier = NULL; + + portMemFree(pPrivate->staticInfo.pFecsTraceDefines); + pPrivate->staticInfo.pFecsTraceDefines = NULL; + } + + // If we had to subscribe specifically, free the hclient we allocated + if (hClient != NV01_NULL_OBJECT) + pRmApi->Free(pRmApi, hClient, hClient); + + if (gpumgrGetBcEnabledStatus(pGpu) != bBcState) + { + // Corrupted broadcast state! + NV_ASSERT(gpumgrGetBcEnabledStatus(pGpu) != bBcState); + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + + portMemFree(pParams); + + return status; +} + +/*! Retrieve ctxbufpool parameters for given local ctx buffer */ +const CTX_BUF_INFO * +kgraphicsGetCtxBufferInfo_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + GR_CTX_BUFFER buf +) +{ + NV_ASSERT_OR_RETURN(NV_ENUM_IS(GR_CTX_BUFFER, buf), NULL); + return &pKernelGraphics->maxCtxBufSize[buf]; +} + +/*! Set ctxbufpool parameters for given local ctx buffer */ +void +kgraphicsSetCtxBufferInfo_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + GR_CTX_BUFFER buf, + NvU64 size, + NvU64 align, + RM_ATTR_PAGE_SIZE attr, + NvBool bContiguous +) +{ + CTX_BUF_INFO *pInfo; + NV_ASSERT_OR_RETURN_VOID(NV_ENUM_IS(GR_CTX_BUFFER, buf)); + + pInfo = &pKernelGraphics->maxCtxBufSize[buf]; + pInfo->size = size; + pInfo->align = align; + pInfo->attr = attr; + pInfo->bContig = bContiguous; +} + +/*! Clear ctxbufpool parameters for all local ctx buffers */ +void +kgraphicsClearCtxBufferInfo_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + portMemSet(pKernelGraphics->maxCtxBufSize, 0, sizeof(pKernelGraphics->maxCtxBufSize)); +} + +/*! Initialize ctxbufpool for this engine */ +NV_STATUS +kgraphicsInitCtxBufPool_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + Heap *pHeap +) +{ + return ctxBufPoolInit(pGpu, pHeap, &pKernelGraphics->pCtxBufPool); +} + +/*! Retrieve ctxbufpool for this engine */ +CTX_BUF_POOL_INFO * +kgraphicsGetCtxBufPool_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + return pKernelGraphics->pCtxBufPool; +} + +/*! destroy ctxbufpool for this engine */ +void +kgraphicsDestroyCtxBufPool_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + if (pKernelGraphics->pCtxBufPool == NULL) + return; + + ctxBufPoolRelease(pKernelGraphics->pCtxBufPool); + ctxBufPoolDestroy(&pKernelGraphics->pCtxBufPool); + pKernelGraphics->pCtxBufPool = NULL; +} + +/*! Get the global ctx buffers for the given GFID */ +GR_GLOBALCTX_BUFFERS * +kgraphicsGetGlobalCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 gfid +) +{ + if (pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers == NULL) + return NULL; + return &pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid]; +} + +/*! Should this global ctx buffer be mapped as size aligned? */ +NvBool +kgraphicsIsGlobalCtxBufferSizeAligned_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + GR_GLOBALCTX_BUFFER buf +) +{ + NV_ASSERT_OR_RETURN(NV_ENUM_IS(GR_GLOBALCTX_BUFFER, buf), NV_FALSE); + return pKernelGraphics->globalCtxBuffersInfo.bSizeAligned[buf]; +} + +/*! Get ctx buf attr for global priv access map */ +const GR_BUFFER_ATTR * +kgraphicsGetGlobalPrivAccessMapAttr_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + return &pKernelGraphics->globalCtxBuffersInfo.globalCtxAttr[GR_GLOBAL_BUFFER_GLOBAL_PRIV_ACCESS_MAP]; +} + +/* + * @brief Get Main context buffer size + * + * @param[in] pGpu + * @param[in] pKernelGraphics + * @param[in] bIncludeSubctxHdrs If subctx headers should be included in size calculation + * @param[out] pSize Main Context buffer size + */ +NV_STATUS +kgraphicsGetMainCtxBufferSize_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvBool bIncludeSubctxHdrs, + NvU32 *pSize +) +{ + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NvU32 size; + + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pGrInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + + size = pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS].size; + + // Allocate space for per VEID headers in the golden context buffer. + if (bIncludeSubctxHdrs && + kgraphicsIsPerSubcontextContextHeaderSupported(pGpu, pKernelGraphics)) + { + // TODO size this down to max per-engine subcontexts + NvU32 maxSubctx = pKernelGraphicsStaticInfo->pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT].data; + + // The header needs to start at a 4 KB aligned address + size = RM_ALIGN_UP(size, RM_PAGE_SIZE); + + // The header is only 256 bytes; but needs to be 4KB aligned. + size += (RM_PAGE_SIZE * maxSubctx); + } + + *pSize = size; + return NV_OK; +} + +NV_STATUS +kgraphicsAllocKgraphicsBuffers_KERNEL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + KernelGraphicsContext *pKernelGraphicsContext, + KernelChannel *pKernelChannel +) +{ + NvU32 gfid; + OBJGVASPACE *pGVAS; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); + pGVAS = dynamicCast(pKernelChannel->pVAS, OBJGVASPACE); + gfid = kchannelGetGfid(pKernelChannel); + + // Deferred static info is necessary at this point for FECS buffer allocation. Skip for guest RM + if (!IS_VIRTUAL(pGpu)) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsInitializeDeferredStaticData(pGpu, pKernelGraphics, NV01_NULL_OBJECT, NV01_NULL_OBJECT)); + } + + // + // Allocate global context buffers for this gfid, if they haven't been + // already + // + if (!pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid].bFecsBufferAllocated) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsAllocGlobalCtxBuffers_HAL(pGpu, pKernelGraphics, gfid)); + } + + if (kgraphicsIsCtxswLoggingSupported(pGpu, pKernelGraphics) && + !pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid].bFecsTraceUnsupportedInGuest && + pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid].bFecsBufferAllocated) + { + if (!gvaspaceIsExternallyOwned(pGVAS) && !IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + // + // We map CTXSW buffer on each object allocation including compute object + // Other global context buffers are not mapped during compute object alloc with subcontexts since + // those are GR-only buffers and not needed for compute-only contexts + // + _kgraphicsMapGlobalCtxBuffer(pGpu, pKernelGraphics, gfid, pKernelChannel->pVAS, pKernelGraphicsContext, + GR_GLOBALCTX_BUFFER_FECS_EVENT, NV_FALSE); + } + + if (!fecsBufferIsMapped(pGpu, pKernelGraphics)) + { + fecsBufferMap(pGpu, pKernelGraphics); + } + + if (pGpu->fecsCtxswLogConsumerCount > 0) + fecsBufferReset(pGpu, pKernelGraphics); + } + + return NV_OK; +} + +static NV_STATUS +_kgraphicsMapGlobalCtxBuffer +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 gfid, + OBJVASPACE *pVAS, + KernelGraphicsContext *pKernelGraphicsContext, + GR_GLOBALCTX_BUFFER buffId, + NvBool bIsReadOnly +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status = NV_OK; + NvU64 vaddr = 0; + MEMORY_DESCRIPTOR *pMemDesc; + NvBool bSizeAligned; + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + bSizeAligned = pKernelGraphics->globalCtxBuffersInfo.bSizeAligned[buffId]; + pMemDesc = pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid].memDesc[buffId]; + + if (pMemDesc == NULL) + { + NvU32 buffSize; + NvU32 fifoEngineId; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN(kgrctxGlobalCtxBufferToFifoEngineId(buffId, &fifoEngineId)); + buffSize = pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[fifoEngineId].size; + if (buffSize == 0) + { + NV_PRINTF(LEVEL_INFO, + "Could not map %s Buffer as buffer is not supported\n", + NV_ENUM_TO_STRING(GR_GLOBALCTX_BUFFER, buffId)); + return NV_OK; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Could not map %s Buffer, no memory allocated for it!\n", + NV_ENUM_TO_STRING(GR_GLOBALCTX_BUFFER, buffId)); + return NV_ERR_INVALID_ARGUMENT; + } + } + + // Unconditionally call map for refcounting + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsMapCtxBuffer(pGpu, pKernelGraphics, pMemDesc, pVAS, + &pKernelGraphicsContextUnicast->globalCtxBufferVaList[buffId], + bSizeAligned, + bIsReadOnly)); + + NV_ASSERT_OK(vaListFindVa(&pKernelGraphicsContextUnicast->globalCtxBufferVaList[buffId], pVAS, &vaddr)); + + NV_PRINTF(LEVEL_INFO, + "GPU:%d %s Buffer PA @ 0x%llx VA @ 0x%llx of Size 0x%llx\n", + pGpu->gpuInstance, NV_ENUM_TO_STRING(GR_GLOBALCTX_BUFFER, buffId), + memdescGetPhysAddr(memdescGetMemDescFromGpu(pMemDesc, pGpu), AT_GPU, 0), + vaddr, pMemDesc->Size); + + return status; +} + +/*! + * @brief Map a GR ctx buffer + */ +NV_STATUS +kgraphicsMapCtxBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + MEMORY_DESCRIPTOR *pMemDesc, + OBJVASPACE *pVAS, + VA_LIST *pVaList, + NvBool bAlignSize, + NvBool bIsReadOnly +) +{ + NV_STATUS status = NV_OK; + NvU64 vaddr = 0; + OBJGVASPACE *pGVAS; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_PRINTF(LEVEL_INFO, "gpu:%d isBC=%d\n", pGpu->gpuInstance, + gpumgrGetBcEnabledStatus(pGpu)); + + pGVAS = dynamicCast(pVAS, OBJGVASPACE); + NV_ASSERT_OR_RETURN(!gvaspaceIsExternallyOwned(pGVAS), NV_ERR_INVALID_OPERATION); + + status = vaListFindVa(pVaList, pVAS, &vaddr); + if (status == NV_ERR_OBJECT_NOT_FOUND) + { + // create a new subcontext mapping + NvU32 allocFlags = bAlignSize ? DMA_ALLOC_VASPACE_SIZE_ALIGNED : DMA_ALLOC_VASPACE_NONE; + NvU32 updateFlags = bIsReadOnly ? (DMA_UPDATE_VASPACE_FLAGS_READ_ONLY | + DMA_UPDATE_VASPACE_FLAGS_SHADER_READ_ONLY) : DMA_UPDATE_VASPACE_FLAGS_NONE; + + if (kgraphicsIsPerSubcontextContextHeaderSupported(pGpu, pKernelGraphics)) + { + status = dmaMapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, pMemDesc, &vaddr, + allocFlags, updateFlags); + } + else + { + // + // Per subcontext headers not enabled. + // If subcontext is not supported, create a new mapping. + // If subcontext is supported, create an identity mapping to the existing one. + // + + // Get the first node after the dummy node + VA_INFO *pVaInfo = mapFind(pVaList, 0); + NV_ASSERT_OR_RETURN(pVaInfo != NULL, NV_ERR_INVALID_STATE); + pVaInfo = mapNext(pVaList, pVaInfo); + if (pVaInfo == NULL) + { + status = dmaMapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, + pMemDesc, + &vaddr, + allocFlags, + updateFlags); + } + else + { + NvU32 mapFlags = 0x0; + + if (bIsReadOnly) + { + mapFlags = FLD_SET_DRF(OS46, _FLAGS, _ACCESS, _READ_ONLY, mapFlags); + mapFlags = FLD_SET_DRF(OS46, _FLAGS, _SHADER_ACCESS, _READ_ONLY, mapFlags); + } + mapFlags = FLD_SET_DRF(OS46, _FLAGS, _DMA_UNICAST_REUSE_ALLOC, _FALSE, mapFlags); + mapFlags = FLD_SET_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _TRUE, mapFlags); + + NV_ASSERT(!bAlignSize); // TODO: Add support for size align + vaddr = pVaInfo->vAddr; + NV_ASSERT_OK_OR_ELSE(status, + dmaAllocMapping_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, pMemDesc, + &vaddr, + mapFlags, + NULL, + KMIGMGR_SWIZZID_INVALID), + /* do nothing on error, but make sure we overwrite status */;); + NV_ASSERT(vaddr == pVaInfo->vAddr); + } + } + + NV_PRINTF(LEVEL_INFO, "New ctx buffer mapping at VA 0x%llx\n", vaddr); + } + + if (status == NV_OK) + NV_ASSERT_OK_OR_RETURN(vaListAddVa(pVaList, pVAS, vaddr)); + + return status; +} + +/*! + * @brief Unmap a GR ctx buffer + */ +void +kgraphicsUnmapCtxBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + OBJVASPACE *pVAS, + VA_LIST *pVaList +) +{ + NV_STATUS status = NV_OK; + NvU64 vaddr = 0; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_PRINTF(LEVEL_INFO, "gpu:%d isBC=%d\n", pGpu->gpuInstance, + gpumgrGetBcEnabledStatus(pGpu)); + + status = vaListFindVa(pVaList, pVAS, &vaddr); + if (status == NV_OK) + { + NV_ASSERT_OK(vaListRemoveVa(pVaList, pVAS)); + + status = vaListFindVa(pVaList, pVAS, &vaddr); + + NV_ASSERT((NV_OK == status) || (NV_ERR_OBJECT_NOT_FOUND == status)); + if (NV_ERR_OBJECT_NOT_FOUND == status) + { + if (vaListGetManaged(pVaList)) + { + dmaUnmapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, vaddr); + } + + NV_PRINTF(LEVEL_INFO, "Freed ctx buffer mapping at VA 0x%llx\n", + vaddr); + } + } +} + +/*! + * @brief Get the Class number for a given gr object type + * + * @param[in] pGpu + * @param[in] pKernelGraphics + * @param[in] wantObjectType GR object type to lookup + * @param[out] pClass class number + */ +NV_STATUS +kgraphicsGetClassByType_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 wantObjectType, + NvU32 *pClass +) +{ + NV_STATUS status = NV_OK; + NvU32 objectType; + NvU32 i; + NvU32 numClasses; + NvU32 *pClassesSupported; + + *pClass = 0; + + if (wantObjectType >= GR_OBJECT_TYPE_INVALID) + { + NV_PRINTF(LEVEL_ERROR, "bad requested object type : %d\n", + wantObjectType); + return NV_ERR_INVALID_ARGUMENT; + } + + // find out how many classes of type ENG_GR(0) we have + NV_ASSERT_OK_OR_RETURN( + gpuGetClassList(pGpu, &numClasses, NULL, ENG_GR(pKernelGraphics->instance))); + + pClassesSupported = portMemAllocNonPaged(sizeof(NvU32) * numClasses); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pClassesSupported != NULL, NV_ERR_NO_MEMORY); + + status = gpuGetClassList(pGpu, &numClasses, pClassesSupported, ENG_GR(pKernelGraphics->instance)); + + if (status == NV_OK) + { + for (i = 0; i < numClasses; i++) + { + kgrmgrGetGrObjectType(pClassesSupported[i], &objectType); + + NV_PRINTF(LEVEL_INFO, "classNum=0x%08x, type=%d\n", + pClassesSupported[i], objectType); + + if (objectType == wantObjectType) + *pClass = pClassesSupported[i]; + } + } + + portMemFree(pClassesSupported); + + return (*pClass != 0) ? NV_OK : NV_ERR_INVALID_CLASS; +} + +/*! + * @brief retrieve the ctx attributes for the given buffer + */ +const GR_BUFFER_ATTR * +kgraphicsGetContextBufferAttr_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + GR_CTX_BUFFER buf +) +{ + NV_ASSERT_OR_RETURN(NV_ENUM_IS(GR_CTX_BUFFER, buf), NULL); + return &pKernelGraphics->ctxAttr[buf]; +} + +/*! + * @brief Creates a VEID0 channel for Golden Image creation + * + * @return NV_OK if channel and golden image created successfully + */ +NV_STATUS +kgraphicsCreateGoldenImageChannel_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + NV_STATUS status = NV_OK; + NvHandle hClientId = NV01_NULL_OBJECT; + NvHandle hDeviceId; + NvHandle hSubdeviceId; + NvHandle hVASpace = 0xbaba0042; + NvHandle hPBVirtMemId = 0xbaba0043; + NvHandle hPBPhysMemId = 0xbaba0044; + NvHandle hChannelId = 0xbaba0045; + NvHandle hObj3D = 0xbaba0046; + NvHandle hUserdId = 0xbaba0049; + NvU32 gpFifoEntries = 32; // power-of-2 random choice + NvU64 gpFifoSize = NVA06F_GP_ENTRY__SIZE * gpFifoEntries; + NvU64 chSize = gpFifoSize; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bNeedMIGWar; + NvBool bBcStatus; + NvBool bClientUserd = IsVOLTAorBetter(pGpu); + NvBool bAcquireLock = NV_FALSE; + NvU32 sliLoopReentrancy; + NV_VASPACE_ALLOCATION_PARAMETERS vaParams; + NV_MEMORY_ALLOCATION_PARAMS memAllocParams; + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS channelGPFIFOAllocParams; + NvU32 classNum; + + // XXX This should be removed when braodcast SLI support is deprecated + if (!gpumgrIsParentGPU(pGpu)) + { + return NV_OK; + } + + bBcStatus = gpumgrGetBcEnabledStatus(pGpu); + + // FIXME these allocations corrupt BC state + NV_ASSERT_OK_OR_RETURN( + rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, &hClientId, &hDeviceId, &hSubdeviceId)); + + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + + // As we have forced here SLI broadcast mode, temporarily reset the reentrancy count + sliLoopReentrancy = gpumgrSLILoopReentrancyPop(pGpu); + + bNeedMIGWar = IS_MIG_IN_USE(pGpu); + + // Allocate subdevices for secondary GPUs + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + NvHandle hSecondary; + NV2080_ALLOC_PARAMETERS nv2080AllocParams; + + // Allocate a subDevice + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + serverutilGenResourceHandle(hClientId, &hSecondary), + cleanup); + + portMemSet(&nv2080AllocParams, 0, sizeof(nv2080AllocParams)); + nv2080AllocParams.subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + NV_CHECK_OK(status, LEVEL_WARNING, + pRmApi->AllocWithHandle(pRmApi, + hClientId, + hDeviceId, + hSecondary, + NV20_SUBDEVICE_0, + &nv2080AllocParams)); + } + SLI_LOOP_END; + + if (bNeedMIGWar) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvHandle hPartitionRef = 0xbaba0048; + NvHandle hExecPartitionRef = 0xbaba004a; + NVC637_ALLOCATION_PARAMETERS nvC637AllocParams = {0}; + MIG_INSTANCE_REF ref; + + // Get swizzId for this GR + NV_ASSERT_OK_OR_GOTO(status, + kmigmgrGetMIGReferenceFromEngineType(pGpu, pKernelMIGManager, + NV2080_ENGINE_TYPE_GR(pKernelGraphics->instance), &ref), + cleanup); + + portMemSet(&nvC637AllocParams, 0, sizeof(NVC637_ALLOCATION_PARAMETERS)); + nvC637AllocParams.swizzId = ref.pKernelMIGGpuInstance->swizzId; + + // allocate partition reference + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithHandle(pRmApi, + hClientId, + hSubdeviceId, + hPartitionRef, + AMPERE_SMC_PARTITION_REF, + &nvC637AllocParams), + cleanup); + + if (ref.pMIGComputeInstance != NULL) + { + NVC638_ALLOCATION_PARAMETERS nvC638AllocParams = {0}; + nvC638AllocParams.execPartitionId = ref.pMIGComputeInstance->id; + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithHandle(pRmApi, + hClientId, + hPartitionRef, + hExecPartitionRef, + AMPERE_SMC_EXEC_PARTITION_REF, + &nvC638AllocParams), + cleanup); + } + } + + // + // VidHeapControl and vaspace creation calls should happen outside GPU locks + // UVM/CUDA may be holding the GPU locks here and the allocation may subsequently fail + // So explicitly release GPU locks before RmVidHeapControl + // See Bug 1735851-#24 + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + bAcquireLock = NV_TRUE; + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + + // Create a new VAspace for channel + portMemSet(&vaParams, 0, sizeof(NV_VASPACE_ALLOCATION_PARAMETERS)); + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithHandle(pRmApi, hClientId, hDeviceId, hVASpace, FERMI_VASPACE_A, &vaParams), + cleanup); + + // Allocate gpfifo entries + portMemSet(&memAllocParams, 0, sizeof(NV_MEMORY_ALLOCATION_PARAMS)); + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = chSize; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + memAllocParams.hVASpace = 0; // Physical allocations don't expect vaSpace handles + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithHandle(pRmApi, hClientId, hDeviceId, hPBPhysMemId, NV01_MEMORY_SYSTEM, &memAllocParams), + cleanup); + + portMemSet(&memAllocParams, 0, sizeof(NV_MEMORY_ALLOCATION_PARAMS)); + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = chSize; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + memAllocParams.flags = NVOS32_ALLOC_FLAGS_VIRTUAL; + memAllocParams.hVASpace = hVASpace; // Virtual allocation expect vaSpace handles + // 0 handle = allocations on gpu default vaSpace + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithHandle(pRmApi, hClientId, hDeviceId, hPBVirtMemId, NV50_MEMORY_VIRTUAL, &memAllocParams), + cleanup); + + // Allocate Userd + if (bClientUserd) + { + NvU32 userdMemClass = NV01_MEMORY_LOCAL_USER; + NvU32 ctrlSize; + + if (gpuIsClassSupported(pGpu, VOLTA_CHANNEL_GPFIFO_A)) + { + ctrlSize = sizeof(Nvc36fControl); + } + else if (gpuIsClassSupported(pGpu, TURING_CHANNEL_GPFIFO_A)) + { + ctrlSize = sizeof(Nvc46fControl); + } + else if (gpuIsClassSupported(pGpu, AMPERE_CHANNEL_GPFIFO_A)) + { + ctrlSize = sizeof(Nvc56fControl); + } + else + { + status = NV_ERR_NOT_SUPPORTED; + goto cleanup; + } + + portMemSet(&memAllocParams, 0, sizeof(NV_MEMORY_ALLOCATION_PARAMS)); + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.size = ctrlSize; + memAllocParams.type = NVOS32_TYPE_IMAGE; + + // Apply registry overrides to USERD. + switch (DRF_VAL(_REG_STR_RM, _INST_LOC, _USERD, pGpu->instLocOverrides)) + { + case NV_REG_STR_RM_INST_LOC_USERD_NCOH: + case NV_REG_STR_RM_INST_LOC_USERD_COH: + userdMemClass = NV01_MEMORY_SYSTEM; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + break; + + case NV_REG_STR_RM_INST_LOC_USERD_VID: + case NV_REG_STR_RM_INST_LOC_USERD_DEFAULT: + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM); + break; + } + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithHandle(pRmApi, hClientId, hDeviceId, hUserdId, + userdMemClass, &memAllocParams), + cleanup); + } + + // Get fifo channel class Id + classNum = kfifoGetChannelClassId(pGpu, GPU_GET_KERNEL_FIFO(pGpu)); + NV_ASSERT_OR_GOTO(classNum != 0, cleanup); + + // Allocate a bare channel + portMemSet(&channelGPFIFOAllocParams, 0, sizeof(NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS)); + channelGPFIFOAllocParams.hVASpace = hVASpace; + channelGPFIFOAllocParams.hObjectBuffer = hPBVirtMemId; + channelGPFIFOAllocParams.gpFifoEntries = gpFifoEntries; + // + // Set the gpFifoOffset to zero intentionally since we only need this channel + // to be created, but will not submit any work to it. So it's fine not to + // provide a valid offset here. + // + channelGPFIFOAllocParams.gpFifoOffset = 0; + if (bClientUserd) + { + channelGPFIFOAllocParams.hUserdMemory[0] = hUserdId; + } + + if (bNeedMIGWar) + { + MIG_INSTANCE_REF ref; + NvU32 localEngineType; + + NV_ASSERT_OK_OR_GOTO(status, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClientId, &ref), + cleanup); + + NV_ASSERT_OK_OR_GOTO(status, + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, NV2080_ENGINE_TYPE_GR(pKernelGraphics->instance), &localEngineType), + cleanup); + + channelGPFIFOAllocParams.engineType = localEngineType; + } + else + { + channelGPFIFOAllocParams.engineType = NV2080_ENGINE_TYPE_GR0; + } + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithHandle(pRmApi, hClientId, hDeviceId, hChannelId, + classNum, &channelGPFIFOAllocParams), + cleanup); + + // + // When using split VAS, we need to reserve enough pagepool memory to + // sustain large context buffer mappings. For GSPCLIENT where the golden + // context buffer channel is initialized on boot, the pagepool does not have + // enough reserved memory to accommodate these buffers, so we need to + // reserve extra here. + // + if (IS_GSP_CLIENT(pGpu)) + { + KernelChannel *pKernelChannel; + NvU64 reserveSize; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NvU32 i; + + NV_ASSERT_OK(CliGetKernelChannel(hClientId, hChannelId, &pKernelChannel)); + + NV_ASSERT_OR_ELSE(pKernelGraphicsStaticInfo != NULL, + status = NV_ERR_INVALID_STATE; + goto cleanup;); + NV_ASSERT_OR_ELSE(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, + status = NV_ERR_INVALID_STATE; + goto cleanup;); + + reserveSize = 0; + for (i = 0; i < NV_ARRAY_ELEMENTS(pKernelGraphicsStaticInfo->pContextBuffersInfo->engine); ++i) + { + if (pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[i].size != NV_U32_MAX) + reserveSize += pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[i].size; + } + + NV_ASSERT_OK( + vaspaceReserveMempool(pKernelChannel->pVAS, pGpu, hClientId, + reserveSize, RM_PAGE_SIZE, + VASPACE_RESERVE_FLAGS_NONE)); + } + + // Reaquire the GPU locks + NV_ASSERT_OK_OR_GOTO(status, + rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_GR), + cleanup); + bAcquireLock = NV_FALSE; + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // Get KernelGraphicsObject class Id + if (!bNeedMIGWar) + { + NV_ASSERT_OK_OR_GOTO(status, + kgraphicsGetClassByType(pGpu, pKernelGraphics, GR_OBJECT_TYPE_3D, &classNum), + cleanup); + } + else + { + NV_ASSERT_OK_OR_GOTO(status, + kgraphicsGetClassByType(pGpu, pKernelGraphics, GR_OBJECT_TYPE_COMPUTE, &classNum), + cleanup); + } + NV_ASSERT_OR_GOTO(classNum != 0, cleanup); + + // Allocate a GR object on the channel + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithHandle(pRmApi, hClientId, hChannelId, hObj3D, classNum, NULL), + cleanup); + +cleanup: + + if (bAcquireLock) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, + rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_GR)); + } + + // Free all handles + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, + pRmApi->Free(pRmApi, hClientId, hClientId)); + + // Restore the reentrancy count + gpumgrSLILoopReentrancyPush(pGpu, sliLoopReentrancy); + + gpumgrSetBcEnabledStatus(pGpu, bBcStatus); + + return status; +} + +/*! + * @brief Free context buffers shared by all/most graphics contexts + */ +void kgraphicsFreeGlobalCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU32 gfid +) +{ + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + GR_GLOBALCTX_BUFFERS *pCtxBuffers; + GR_GLOBALCTX_BUFFER buff; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + if (pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers == NULL) + return; + + pCtxBuffers = &pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid]; + + // no ctx buffers allocated, so get out early + if (!pCtxBuffers->bAllocated && !pCtxBuffers->bFecsBufferAllocated) + return; + + FOR_EACH_IN_ENUM(GR_GLOBALCTX_BUFFER, buff) + { + memdescFree(pCtxBuffers->memDesc[buff]); + memdescDestroy(pCtxBuffers->memDesc[buff]); + pCtxBuffers->memDesc[buff] = NULL; + pCtxBuffers->bInitialized[buff] = NV_FALSE; + } + FOR_EACH_IN_ENUM_END; + + pCtxBuffers->bAllocated = NV_FALSE; + pCtxBuffers->bFecsBufferAllocated = NV_FALSE; + + // make sure all L2 cache lines using CB buffers are clear after we free them + NV_ASSERT_OK(kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, NULL, FB_CACHE_VIDEO_MEMORY, FB_CACHE_EVICT)); +} + +NV_STATUS +kgraphicsGetCaps_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + NvU8 *pGrCaps +) +{ + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN(pGrCaps != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + portMemCopy(pGrCaps, + NV0080_CTRL_GR_CAPS_TBL_SIZE * sizeof(*pGrCaps), + pKernelGraphicsStaticInfo->grCaps.capsTbl, + NV0080_CTRL_GR_CAPS_TBL_SIZE * sizeof(*pGrCaps)); + + return NV_OK; +} + +/*! + * @brief Return whether unrestricted register access bufffer is supported or not. + */ +NvBool +kgraphicsIsUnrestrictedAccessMapSupported_PF +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics +) +{ + return !hypervisorIsVgxHyper(); +} + +/*! + * @brief Provides an opportunity to register some IntrService during intrStateInit. + */ +void +kgraphicsRegisterIntrService_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX] +) +{ + NvU32 engineIdx = MC_ENGINE_IDX_GRn_FECS_LOG(pKernelGraphics->instance); + + NV_ASSERT(pRecords[engineIdx].pInterruptService == NULL); + pRecords[engineIdx].pInterruptService = staticCast(pKernelGraphics, IntrService); + + engineIdx = MC_ENGINE_IDX_GRn(pKernelGraphics->instance); + + NV_ASSERT(pRecords[engineIdx].pNotificationService == NULL); + pRecords[engineIdx].bFifoWaiveNotify = NV_FALSE; + pRecords[engineIdx].pNotificationService = staticCast(pKernelGraphics, IntrService); +} + +/*! + * @brief Services the nonstall interrupt. + */ +NvU32 +kgraphicsServiceNotificationInterrupt_IMPL +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + IntrServiceServiceNotificationInterruptArguments *pParams +) +{ + NvU32 grIdx = pKernelGraphics->instance; + + NV_ASSERT_OR_RETURN(pParams != NULL, 0); + NV_ASSERT_OR_RETURN(pParams->engineIdx == MC_ENGINE_IDX_GRn(grIdx), 0); + + MODS_ARCH_REPORT(NV_ARCH_EVENT_NONSTALL_GR, "%s", "processing GR nonstall interrupt\n"); + + kgraphicsNonstallIntrCheckAndClear_HAL(pGpu, pKernelGraphics, pParams->pThreadState); + engineNonStallIntrNotify(pGpu, NV2080_ENGINE_TYPE_GR(pKernelGraphics->instance)); + return NV_OK; +} + +/*! + * KernelGraphics RM Device Controls + */ + +/*! + * deviceCtrlCmdKGrGetCaps_IMPL + * + * Lock Requirements: + * Assert that API lock held on entry + * + * TODO: remove once all uses have been migrated to V2 + */ +NV_STATUS +deviceCtrlCmdKGrGetCaps_IMPL +( + Device *pDevice, + NV0080_CTRL_GR_GET_CAPS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvBool bCapsPopulated = NV_FALSE; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (IsDFPGA(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + NV_STATUS status; + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + kgrmgrCtrlSetEngineID(0, &grRouteInfo); + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, RES_GET_CLIENT_HANDLE(pDevice), &grRouteInfo, &pKernelGraphics), + SLI_LOOP_RETURN(status);); + + if (!bCapsPopulated) + { + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + kgraphicsGetCaps(pGpu, pKernelGraphics, pParams->capsTbl), + SLI_LOOP_RETURN(status);); + + bCapsPopulated = NV_TRUE; + } + } + SLI_LOOP_END + + return NV_OK; +} + +/*! + * deviceCtrlCmdKGrGetCapsV2_IMPL + * + * Lock Requirements: + * Assert that API lock held on entry + */ +NV_STATUS +deviceCtrlCmdKGrGetCapsV2_IMPL +( + Device *pDevice, + NV0080_CTRL_GR_GET_CAPS_V2_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (IsDFPGA(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo = pParams->grRouteInfo; + NV_STATUS status; + + kgrmgrCtrlSetEngineID(0, &grRouteInfo); + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, RES_GET_CLIENT_HANDLE(pDevice), &grRouteInfo, &pKernelGraphics), + SLI_LOOP_RETURN(status);); + + if (!pParams->bCapsPopulated) + { + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + kgraphicsGetCaps(pGpu, pKernelGraphics, pParams->capsTbl), + SLI_LOOP_RETURN(status);); + + pParams->bCapsPopulated = NV_TRUE; + } + } + SLI_LOOP_END + + return NV_OK; +} + +static NV_STATUS +_kgraphicsCtrlCmdGrGetInfoV2 +( + OBJGPU *pGpu, + NvHandle hClient, + NV2080_CTRL_GR_GET_INFO_V2_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NvU32 grInfoListSize = pParams->grInfoListSize; + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvU32 i; + + if (pKernelGraphicsManager == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + if ((0 == grInfoListSize) || + (grInfoListSize > NV2080_CTRL_GR_INFO_MAX_SIZE)) + { + NV_PRINTF(LEVEL_ERROR, "Invalid grInfoList size: 0x%x\n", grInfoListSize); + return NV_ERR_INVALID_ARGUMENT; + } + + if (kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient)) + { + NvU32 grIdx; + for (grIdx = 0; grIdx < GPU_MAX_GRS; grIdx++) + { + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, grIdx); + if (pKernelGraphics != NULL) + break; + } + if (pKernelGraphics == NULL) + return NV_ERR_INVALID_STATE; + } + else + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &pParams->grRouteInfo, &pKernelGraphics)); + } + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pGrInfo != NULL, NV_ERR_NOT_SUPPORTED); + + for (i = 0; i < grInfoListSize; i++) + { + NV_CHECK_OR_RETURN(LEVEL_ERROR, pParams->grInfoList[i].index < NV2080_CTRL_GR_INFO_MAX_SIZE, NV_ERR_INVALID_ARGUMENT); + pParams->grInfoList[i].data = + pKernelGraphicsStaticInfo->pGrInfo->infoList[pParams->grInfoList[i].index].data; + } + + return status; +} + +/*! + * deviceCtrlCmdKGrGetInfo + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + * + * TODO: remove once all uses have been migrated to V2 + */ +NV_STATUS +deviceCtrlCmdKGrGetInfo_IMPL +( + Device *pDevice, + NV0080_CTRL_GR_GET_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NV0080_CTRL_GR_GET_INFO_V2_PARAMS grInfoParamsV2; + NV0080_CTRL_GR_INFO *pGrInfos = NvP64_VALUE(pParams->grInfoList); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pDevice); + NvU32 grInfoListSize = NV_MIN(pParams->grInfoListSize, + NV0080_CTRL_GR_INFO_MAX_SIZE); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pGrInfos != NULL, NV_ERR_INVALID_ARGUMENT); + + portMemSet(&grInfoParamsV2, 0, sizeof(grInfoParamsV2)); + portMemCopy(grInfoParamsV2.grInfoList, grInfoListSize * sizeof(*pGrInfos), + pGrInfos, grInfoListSize * sizeof(*pGrInfos)); + grInfoParamsV2.grInfoListSize = grInfoListSize; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _kgraphicsCtrlCmdGrGetInfoV2(pGpu, hClient, &grInfoParamsV2)); + + portMemCopy(pGrInfos, grInfoListSize * sizeof(*pGrInfos), + grInfoParamsV2.grInfoList, grInfoListSize * sizeof(*pGrInfos)); + return NV_OK; +} + +/*! + * deviceCtrlCmdKGrGetInfoV2 + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +deviceCtrlCmdKGrGetInfoV2_IMPL +( + Device *pDevice, + NV0080_CTRL_GR_GET_INFO_V2_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pDevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _kgraphicsCtrlCmdGrGetInfoV2(pGpu, hClient, pParams)); + + return NV_OK; +} + +/*! + * KernelGraphics RM SubDevice Controls + */ + +/*! + * subdeviceCtrlCmdKGrGetCapsV2 + * + * Lock Requirements: + * Assert that API lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetCapsV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_CAPS_V2_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo = pParams->grRouteInfo; + + kgrmgrCtrlSetEngineID(0, &grRouteInfo); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, RES_GET_CLIENT_HANDLE(pSubdevice), &grRouteInfo, &pKernelGraphics)); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + if (pKernelGraphicsStaticInfo == NULL) + { + return NV_ERR_INVALID_STATE; + } + + if (!pParams->bCapsPopulated) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsGetCaps(pGpu, pKernelGraphics, pParams->capsTbl)); + + pParams->bCapsPopulated = NV_TRUE; + } + + return NV_OK; +} + +/*! + * subdeviceCtrlCmdKGrGetInfo + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + * + * TODO: remove once all uses have been migrated to V2 + */ +NV_STATUS +subdeviceCtrlCmdKGrGetInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_INFO_PARAMS *pParams +) +{ + NV2080_CTRL_GR_GET_INFO_V2_PARAMS grInfoParamsV2; + NV2080_CTRL_GR_INFO *pGrInfos = NvP64_VALUE(pParams->grInfoList); + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo = pParams->grRouteInfo; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvU32 grInfoListSize = NV_MIN(pParams->grInfoListSize, + NV2080_CTRL_GR_INFO_MAX_SIZE); + + // + // Adding the null check as engine GRMGR is missing for DFPGA. + // + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pKernelGraphicsManager != NULL, NV_ERR_NOT_SUPPORTED); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pGrInfos != NULL, NV_ERR_INVALID_ARGUMENT); + + portMemSet(&grInfoParamsV2, 0, sizeof(grInfoParamsV2)); + grInfoParamsV2.grInfoListSize = grInfoListSize; + portMemCopy(grInfoParamsV2.grInfoList, grInfoListSize * sizeof(*pGrInfos), + pGrInfos, grInfoListSize * sizeof(*pGrInfos)); + grInfoParamsV2.grRouteInfo = grRouteInfo; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _kgraphicsCtrlCmdGrGetInfoV2(pGpu, hClient, &grInfoParamsV2)); + + portMemCopy(pGrInfos, grInfoListSize * sizeof(*pGrInfos), + grInfoParamsV2.grInfoList, grInfoListSize * sizeof(*pGrInfos)); + return NV_OK; +} + +/*! + * subdeviceCtrlCmdKGrGetInfoV2 + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetInfoV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_INFO_V2_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _kgraphicsCtrlCmdGrGetInfoV2(pGpu, hClient, pParams)); + + return NV_OK; +} + +/*! + * subdeviceCtrlCmdKGrGetSmToGpcTpcMappings + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetSmToGpcTpcMappings_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphics *pKernelGraphics; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + const KGRAPHICS_STATIC_INFO *pStaticInfo; + NvU32 i; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (pKernelGraphicsManager == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &pParams->grRouteInfo, &pKernelGraphics)); + + // Verify static info is available + pStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_ERR_INVALID_STATE); + + // Verify limits are within bounds + NV_ASSERT_OR_RETURN(pStaticInfo->globalSmOrder.numSm <= NV2080_CTRL_GR_GET_SM_TO_GPC_TPC_MAPPINGS_MAX_SM_COUNT, + NV_ERR_INVALID_LIMIT); + + // Populate output data + pParams->smCount = pStaticInfo->globalSmOrder.numSm; + for (i = 0; i < pStaticInfo->globalSmOrder.numSm; ++i) + { + pParams->smId[i].gpcId = pStaticInfo->globalSmOrder.globalSmId[i].gpcId; + pParams->smId[i].tpcId = pStaticInfo->globalSmOrder.globalSmId[i].localTpcId; + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdKGrGetGlobalSmOrder_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_GLOBAL_SM_ORDER_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphics *pKernelGraphics; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + const KGRAPHICS_STATIC_INFO *pStaticInfo; + NvU32 i; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &pParams->grRouteInfo, &pKernelGraphics)); + + // Verify static info is available + pStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_ERR_INVALID_STATE); + + // Verify limits are within bounds + NV_ASSERT_OR_RETURN(pStaticInfo->globalSmOrder.numSm <= NV2080_CTRL_CMD_GR_GET_GLOBAL_SM_ORDER_MAX_SM_COUNT, + NV_ERR_INVALID_LIMIT); + + // Populate output data + pParams->numSm = pStaticInfo->globalSmOrder.numSm; + pParams->numTpc = pStaticInfo->globalSmOrder.numTpc; + for (i = 0; i < pStaticInfo->globalSmOrder.numSm; ++i) + { + pParams->globalSmId[i].gpcId = pStaticInfo->globalSmOrder.globalSmId[i].gpcId; + pParams->globalSmId[i].localTpcId = pStaticInfo->globalSmOrder.globalSmId[i].localTpcId; + pParams->globalSmId[i].localSmId = pStaticInfo->globalSmOrder.globalSmId[i].localSmId; + pParams->globalSmId[i].globalTpcId = pStaticInfo->globalSmOrder.globalSmId[i].globalTpcId; + pParams->globalSmId[i].virtualGpcId = pStaticInfo->globalSmOrder.globalSmId[i].virtualGpcId; + pParams->globalSmId[i].migratableTpcId = pStaticInfo->globalSmOrder.globalSmId[i].migratableTpcId; + } + + return status; +} + +/*! + * subdeviceCtrlCmdKGrGetSmIssueRateModifier + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetSmIssueRateModifier_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_SM_ISSUE_RATE_MODIFIER_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphics *pKernelGraphics; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + const KGRAPHICS_STATIC_INFO *pStaticInfo; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient)) + { + NvU32 grIdx; + for (grIdx = 0; grIdx < GPU_MAX_GRS; grIdx++) + { + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, grIdx); + if (pKernelGraphics != NULL) + break; + } + if (pKernelGraphics == NULL) + return NV_ERR_INVALID_STATE; + } + else + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &pParams->grRouteInfo, &pKernelGraphics)); + } + + // Verify static info is available + pStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pStaticInfo->pSmIssueRateModifier != NULL, NV_ERR_NOT_SUPPORTED); + + pParams->imla0 = pStaticInfo->pSmIssueRateModifier->imla0; + pParams->fmla16 = pStaticInfo->pSmIssueRateModifier->fmla16; + pParams->dp = pStaticInfo->pSmIssueRateModifier->dp; + pParams->fmla32 = pStaticInfo->pSmIssueRateModifier->fmla32; + pParams->ffma = pStaticInfo->pSmIssueRateModifier->ffma; + pParams->imla1 = pStaticInfo->pSmIssueRateModifier->imla1; + pParams->imla2 = pStaticInfo->pSmIssueRateModifier->imla2; + pParams->imla3 = pStaticInfo->pSmIssueRateModifier->imla3; + pParams->imla4 = pStaticInfo->pSmIssueRateModifier->imla4; + + return NV_OK; +} + +/*! + * subdeviceCtrlCmdKGrGetGpcMask + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetGpcMask_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_GPC_MASK_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + if (!IS_MIG_IN_USE(pGpu) || + kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient)) + { + pParams->gpcMask = kgrmgrGetLegacyGpcMask(pGpu, pKernelGraphicsManager); + } + else + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &pParams->grRouteInfo, &pKernelGraphics)); + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + pParams->gpcMask = pKernelGraphicsStaticInfo->floorsweepingMasks.gpcMask; + } + + return status; +} + +/*! + * subdeviceCtrlCmdKGrGetTpcMask + * + * Note: + * pParams->gpcId is physical GPC id for non-MIG case, but logical GPC id for + * MIG case. + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetTpcMask_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_TPC_MASK_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvU32 gpcCount; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + if (!IS_MIG_IN_USE(pGpu) || + kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient)) + { + pParams->tpcMask = kgrmgrGetLegacyTpcMask(pGpu, pKernelGraphicsManager, pParams->gpcId); + } + else + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &pParams->grRouteInfo, &pKernelGraphics)); + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + gpcCount = nvPopCount32(pKernelGraphicsStaticInfo->floorsweepingMasks.gpcMask); + if (pParams->gpcId >= gpcCount) + { + NV_PRINTF(LEVEL_ERROR, "Incorrect GPC-Idx provided = %d\n", pParams->gpcId); + return NV_ERR_INVALID_ARGUMENT; + } + + pParams->tpcMask = pKernelGraphicsStaticInfo->floorsweepingMasks.tpcMask[pParams->gpcId]; + } + + return status; +} + +NV_STATUS +subdeviceCtrlCmdKGrGetNumTpcsForGpc_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_NUM_TPCS_FOR_GPC_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + NvU32 gpcCount; + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics)); + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + gpcCount = nvPopCount32(pKernelGraphicsStaticInfo->floorsweepingMasks.gpcMask); + if (pParams->gpcId >= gpcCount) + { + NV_PRINTF(LEVEL_ERROR, "Incorrect GPC-Idx provided = %d\n", pParams->gpcId); + return NV_ERR_INVALID_ARGUMENT; + } + + pParams->numTpcs = pKernelGraphicsStaticInfo->floorsweepingMasks.tpcCount[pParams->gpcId]; + + return NV_OK; +} + +/*! + * subdeviceCtrlCmdKGrGetPpcMask + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetPpcMask_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_PPC_MASK_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + if (kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient)) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, kgrmgrGetLegacyPpcMask(pGpu, pKernelGraphicsManager, pParams->gpcId, &pParams->ppcMask)); + } + else + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &pParams->grRouteInfo, &pKernelGraphics)); + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pPpcMasks != NULL, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pGrInfo != NULL, NV_ERR_NOT_SUPPORTED); + + if (pParams->gpcId >= + pKernelGraphicsStaticInfo->pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS].data) + { + NV_PRINTF(LEVEL_ERROR, "Incorrect GPC-Idx provided = %d\n", pParams->gpcId); + return NV_ERR_INVALID_ARGUMENT; + } + + pParams->ppcMask = pKernelGraphicsStaticInfo->pPpcMasks->mask[pParams->gpcId]; + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdKGrFecsBindEvtbufForUid +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdKGrFecsBindEvtbufForUid_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_PARAMS *pParams +) +{ + NV_STATUS status; + RmClient *pClient; + RsResourceRef *pEventBufferRef = NULL; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvHandle hNotifier = RES_GET_HANDLE(pSubdevice); + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_ASSERT_OK_OR_RETURN( + serverutilGetResourceRefWithType(hClient, pParams->hEventBuffer, classId(EventBuffer), &pEventBufferRef)); + + NV_ASSERT_OK_OR_RETURN(serverutilGetClientUnderLock(hClient, &pClient)); + + if (bMIGInUse) + return NV_ERR_NOT_SUPPORTED; + + status = fecsAddBindpoint(pGpu, + staticCast(pClient, RsClient), + pEventBufferRef, + hNotifier, + pParams->bAllUsers, + pParams->levelOfDetail, + pParams->eventFilter, + 1, + NULL); + + return status; +} + +// +// subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2 +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdKGrFecsBindEvtbufForUidV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_FECS_BIND_EVTBUF_FOR_UID_V2_PARAMS *pParams +) +{ + NV_STATUS status; + RmClient *pClient; + RsResourceRef *pEventBufferRef = NULL; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvHandle hNotifier = RES_GET_HANDLE(pSubdevice); + pParams->reasonCode = NV2080_CTRL_GR_FECS_BIND_REASON_CODE_NONE; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_ASSERT_OK_OR_RETURN( + serverutilGetResourceRefWithType(hClient, pParams->hEventBuffer, classId(EventBuffer), &pEventBufferRef)); + + NV_ASSERT_OK_OR_RETURN(serverutilGetClientUnderLock(hClient, &pClient)); + + status = fecsAddBindpoint(pGpu, + staticCast(pClient, RsClient), + pEventBufferRef, + hNotifier, + pParams->bAllUsers, + pParams->levelOfDetail, + pParams->eventFilter, + 2, + &pParams->reasonCode); + return status; +} + +/*! + * subdeviceCtrlCmdKGrGetPhysGpcMask + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetPhysGpcMask_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_PHYS_GPC_MASK_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvU32 grIdx = 0; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + if (!IS_MIG_ENABLED(pGpu)) + { + grIdx = 0; + } + // + // if MIG is enabled we follow below policies: + // For device level monitoring with no subscription - Return GPC mask for + // requested syspipe + // For valid subscription - Return physical GPC mask after validating that + // a physical syspipe exist in given GPU instance + // + else if (kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient)) + { + NV_ASSERT_OR_RETURN(pParams->physSyspipeId < GPU_MAX_GRS, NV_ERR_INVALID_ARGUMENT); + grIdx = pParams->physSyspipeId; + } + else + { + MIG_INSTANCE_REF ref; + NvU32 localEngineType; + + // + // Get the relevant subscription and see if provided physicalId is + // valid in defined GPU instance + // + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, + &ref)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_GR(pParams->physSyspipeId), + &localEngineType)); + // Not failing above means physSyspipeId is valid in GPU instance + grIdx = pParams->physSyspipeId; + } + + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, grIdx); + NV_ASSERT_OR_RETURN(pKernelGraphics != NULL, NV_ERR_INVALID_STATE); + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + pParams->gpcMask = pKernelGraphicsStaticInfo->floorsweepingMasks.physGpcMask; + + return NV_OK; +} + +/*! + * subdeviceCtrlCmdKGrGetZcullMask_IMPL + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetZcullMask_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + if (kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient)) + { + pParams->zcullMask = kgrmgrGetLegacyZcullMask(pGpu, pKernelGraphicsManager, pParams->gpcId); + } + else + { + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics)); + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + if (pParams->gpcId >= + pKernelGraphicsStaticInfo->pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS].data) + { + NV_PRINTF(LEVEL_ERROR, "Incorrect GPC-Idx provided = %d\n", pParams->gpcId); + return NV_ERR_INVALID_ARGUMENT; + } + + if (pKernelGraphicsStaticInfo->floorsweepingMasks.zcullMask[pParams->gpcId] == NV_U32_MAX) + { + return NV_ERR_NOT_SUPPORTED; + } + else + { + pParams->zcullMask = pKernelGraphicsStaticInfo->floorsweepingMasks.zcullMask[pParams->gpcId]; + } + } + + return NV_OK; +} + +/*! + * subdeviceCtrlCmdKGrGetZcullInfo + * + * Lock Requirements: + * Assert that API lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetZcullInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_ZCULL_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (pKernelGraphicsManager == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics)); + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pZcullInfo != NULL, NV_ERR_NOT_SUPPORTED); + + portMemCopy(pParams, + sizeof(*pParams), + pKernelGraphicsStaticInfo->pZcullInfo, + sizeof(*pKernelGraphicsStaticInfo->pZcullInfo)); + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdKGrCtxswPmMode_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_CTXSW_PM_MODE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + if (IS_GSP_CLIENT(pGpu)) + { + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo = pParams->grRouteInfo; + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + KernelChannel *pKernelChannel; + KernelGraphicsContext *pKernelGraphicsContext; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (pParams->pmMode != NV2080_CTRL_CTXSW_PM_MODE_NO_CTXSW) + { + kgrmgrCtrlSetChannelHandle(pParams->hChannel, &grRouteInfo); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, + RES_GET_CLIENT_HANDLE(pSubdevice), + &grRouteInfo, + &pKernelGraphics)); + + // Retrieve channel from either bare channel or TSG handle + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kchannelGetFromDualHandleRestricted(RES_GET_CLIENT_HANDLE(pSubdevice), pParams->hChannel, &pKernelChannel)); + + NV_ASSERT_OK_OR_RETURN( + kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext)); + + // Setup / promote the PM ctx buffer if required + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxSetupDeferredPmBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel)); + } + + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_GR_CTXSW_PM_MODE, + pParams, + sizeof(*pParams)); + } + + return status; +} + +/*! + * @brief Gets information about ROPs. + * + * Lock Requirements: + * Assert that API and Gpus lock held on entry + * + * @return NV_OK if success. Error otherwise. + */ +NV_STATUS +subdeviceCtrlCmdKGrGetROPInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_ROP_INFO_PARAMS *pRopInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics)); + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pRopInfo != NULL, NV_ERR_NOT_SUPPORTED); + + portMemCopy(pRopInfoParams, + sizeof(*pRopInfoParams), + pKernelGraphicsStaticInfo->pRopInfo, + sizeof(*pKernelGraphicsStaticInfo->pRopInfo)); + + return NV_OK; +} + +/*! + * @brief Gets the current attribute buffer size. + * + * Lock Requirements: + * Assert that API lock held on entry + * + * @return NV_OK if success. Error otherwise. + */ +NV_STATUS +subdeviceCtrlCmdKGrGetAttributeBufferSize_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_ATTRIBUTE_BUFFER_SIZE_PARAMS *pAttribBufferSizeParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvU32 engineId; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics)); + + // Verify static info is available + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + if (pKernelGraphicsStaticInfo->pContextBuffersInfo == NULL) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsInitializeDeferredStaticData(pGpu, pKernelGraphics, NV01_NULL_OBJECT, NV01_NULL_OBJECT)); + + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + } + + engineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB; + pAttribBufferSizeParams->attribBufferSize = pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[engineId].size; + + return NV_OK; +} + +/*! + * subdeviceCtrlCmdKGrGetEngineContextProperties + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetEngineContextProperties_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_ENGINE_CONTEXT_PROPERTIES_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvU32 size = 0; + NvU32 alignment = RM_PAGE_SIZE; + NvU32 engineId; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + engineId = DRF_VAL(0080_CTRL_FIFO, _GET_ENGINE_CONTEXT_PROPERTIES, _ENGINE_ID, pParams->engineId); + + if (engineId >= NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &pParams->grRouteInfo, &pKernelGraphics)); + + // Verify static info is available + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + if (pKernelGraphicsStaticInfo->pContextBuffersInfo == NULL) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsInitializeDeferredStaticData(pGpu, pKernelGraphics, NV01_NULL_OBJECT, NV01_NULL_OBJECT)); + + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + } + + size = pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[engineId].size; + alignment = pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[engineId].alignment; + + if (size == NV_U32_MAX) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (pParams->bInfoPopulated) + { + size = NV_MAX(size, pParams->size); + alignment = NV_MAX(alignment, pParams->alignment); + } + + pParams->size = size; + pParams->alignment = alignment; + pParams->bInfoPopulated = NV_TRUE; + + return NV_OK; +} + +/*! + * @brief Gets the Graphics Context buffer size and alignment + * + * Lock Requirements: + * Assert that API and Gpus lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetCtxBufferSize_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_CTX_BUFFER_SIZE_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphics *pKernelGraphics; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NV2080_CTRL_GR_CTX_BUFFER_INFO *pCtxBufferInfo; + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + KernelChannel *pKernelChannel; + KernelGraphicsContext *pKernelGraphicsContext; + NvU32 bufferCount; + NvU64 totalBufferSize; + NvU64 prevAlignment; + NvU32 i; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to fetch the total GR Context Buffer Size. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + kgrmgrCtrlSetChannelHandle(pParams->hChannel, &grRouteInfo); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics)); + + // Get channel from provided handle and owner client + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + CliGetKernelChannel(hClient, pParams->hChannel, &pKernelChannel)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext)); + + // Get the total buffer count + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetBufferCount(pGpu, pKernelGraphicsContext, pKernelGraphics, &bufferCount)); + + pCtxBufferInfo = portMemAllocNonPaged(bufferCount * sizeof(NV2080_CTRL_GR_CTX_BUFFER_INFO)); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + pCtxBufferInfo != NULL, + status = NV_ERR_NO_MEMORY; + goto done;); + portMemSet(pCtxBufferInfo, 0, bufferCount * sizeof(NV2080_CTRL_GR_CTX_BUFFER_INFO)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kgrctxGetCtxBufferInfo(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + kchannelGetGfid(pKernelChannel), + bufferCount, + &bufferCount, + pCtxBufferInfo), + done); + + // + // Calculate total size by walking thru all buffers & alignments. Adjust the total size + // by adding the respective alignment so that the mapping VA can be adjusted. + // + totalBufferSize = 0; + prevAlignment = 0x0; + for (i = 0; i < bufferCount; i++) + { + if (prevAlignment != pCtxBufferInfo[i].alignment) + { + totalBufferSize += pCtxBufferInfo[i].alignment; + prevAlignment = pCtxBufferInfo[i].alignment; + } + + totalBufferSize += (pCtxBufferInfo[i].alignment != 0x0) ? + NV_ALIGN_UP(pCtxBufferInfo[i].size, pCtxBufferInfo[i].alignment) : pCtxBufferInfo[i].size; + } + + pParams->totalBufferSize = totalBufferSize; + +done: + portMemFree(pCtxBufferInfo); + return status; +} + +/*! + * @brief Gets the Graphics Context buffer info like opaque buffer pointer + * size, alignment, aperture, allocation contiguity etc. + * + * Lock Requirements: + * Assert that API and Gpus lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetCtxBufferInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + KernelGraphics *pKernelGraphics; + KernelChannel *pKernelChannel; + KernelGraphicsContext *pKernelGraphicsContext; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to get Graphics context buffers information. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + kgrmgrCtrlSetChannelHandle(pParams->hChannel, &grRouteInfo); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, pParams->hUserClient, &grRouteInfo, &pKernelGraphics)); + + // Get channel from provided handle and owner client + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + CliGetKernelChannel(pParams->hUserClient, pParams->hChannel, &pKernelChannel)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetCtxBufferInfo(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + kchannelGetGfid(pKernelChannel), + NV_ARRAY_ELEMENTS(pParams->ctxBufferInfo), + &pParams->bufferCount, + pParams->ctxBufferInfo)); + + return status; +} + +/*! + * subdeviceCtrlCmdKGrInternalGetCtxBufferPtes + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + */ +NV_STATUS +subdeviceCtrlCmdKGrGetCtxBufferPtes_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + KernelGraphics *pKernelGraphics; + KernelChannel *pKernelChannel; + KernelGraphicsContext *pKernelGraphicsContext; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // + // Currently, ROUTE_TO_VGPU_HOST instructs resource server to call the RPC + // on all vGPU configurations including SRIOV Standard which is not required. + // Hence, manually dispatching the RPC for required vGPU configs. + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to get Graphics context buffers PTEs information. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvHandle hObject = RES_GET_HANDLE(pSubdevice); + + NV_RM_RPC_CONTROL(pGpu, + hClient, + hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + + kgrmgrCtrlSetChannelHandle(pParams->hChannel, &grRouteInfo); + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, pParams->hUserClient, &grRouteInfo, &pKernelGraphics)); + + // Get channel from provided handle and owner client + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + CliGetKernelChannel(pParams->hUserClient, pParams->hChannel, &pKernelChannel)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxFromKernelChannel(pKernelChannel, &pKernelGraphicsContext)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetCtxBufferPtes(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + kchannelGetGfid(pKernelChannel), + pParams->bufferType, + pParams->firstPage, + pParams->physAddrs, + NV_ARRAY_ELEMENTS(pParams->physAddrs), + &pParams->numPages, + &pParams->bNoMorePages)); + + return status; +} + diff --git a/src/nvidia/src/kernel/gpu/gr/kernel_graphics_context.c b/src/nvidia/src/kernel/gpu/gr/kernel_graphics_context.c new file mode 100644 index 000000000..df36c5104 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gr/kernel_graphics_context.c @@ -0,0 +1,3441 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/gr/kernel_graphics_context.h" +#include "kernel/gpu/gr/kernel_graphics_manager.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/mem_mgr/gpu_vaspace.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/mem_sys/kern_mem_sys.h" +#include "kernel/core/locks.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "vgpu/rpc.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "kernel/virtualization/hypervisor/hypervisor.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "gpu/mmu/kern_gmmu.h" +#include "rmapi/client.h" + +/*! + * @brief Retrieve the context object from a KernelChannel. + * + * @return NV_ERR_OBJECT_NOT_FOUND if context object is missing + */ +NV_STATUS +kgrctxFromKernelChannel_IMPL +( + KernelChannel *pKernelChannel, + KernelGraphicsContext **ppKernelGraphicsContext +) +{ + RsResourceRef *pResourceRef; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + if (pKernelChannel->hKernelGraphicsContext != NV01_NULL_OBJECT) + { + NV_ASSERT_OK_OR_RETURN( + clientGetResourceRefByType(RES_GET_CLIENT(pKernelChannel), + pKernelChannel->hKernelGraphicsContext, + classId(KernelGraphicsContext), + &pResourceRef)); + } + else if ((pKernelChannel->pKernelChannelGroupApi != NULL) && + (pKernelChannel->pKernelChannelGroupApi->hKernelGraphicsContext != NV01_NULL_OBJECT)) + { + NV_ASSERT_OK_OR_RETURN( + clientGetResourceRefByType(RES_GET_CLIENT(pKernelChannel), + pKernelChannel->pKernelChannelGroupApi->hKernelGraphicsContext, + classId(KernelGraphicsContext), + &pResourceRef)); + } + else + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + *ppKernelGraphicsContext = dynamicCast(pResourceRef->pResource, KernelGraphicsContext); + NV_ASSERT_OR_RETURN(*ppKernelGraphicsContext != NULL, NV_ERR_INVALID_STATE); + + return NV_OK; +} + +/*! + * @brief Retrieve the context object from a ChannelGroup. + * + * @return NV_ERR_OBJECT_NOT_FOUND if context object is missing + */ +NV_STATUS +kgrctxFromKernelChannelGroupApi_IMPL +( + KernelChannelGroupApi *pKernelChannelGroupApi, + KernelGraphicsContext **ppKernelGraphicsContext +) +{ + RsResourceRef *pResourceRef; + + NV_ASSERT_OR_RETURN(pKernelChannelGroupApi != NULL, NV_ERR_INVALID_ARGUMENT); + + if (pKernelChannelGroupApi->hKernelGraphicsContext == NV01_NULL_OBJECT) + return NV_ERR_OBJECT_NOT_FOUND; + + NV_ASSERT_OK_OR_RETURN( + clientGetResourceRefByType(RES_GET_CLIENT(pKernelChannelGroupApi), + pKernelChannelGroupApi->hKernelGraphicsContext, + classId(KernelGraphicsContext), + &pResourceRef)); + + *ppKernelGraphicsContext = dynamicCast(pResourceRef->pResource, KernelGraphicsContext); + NV_ASSERT_OR_RETURN(*ppKernelGraphicsContext != NULL, NV_ERR_INVALID_STATE); + + return NV_OK; +} + +/** + * @brief Handle NV0090 ctrl call forwarding. The current control call is + * dispatched to the KernelGraphicsContext object provided. + */ +NV_STATUS kgrctxCtrlHandle +( + CALL_CONTEXT *pCallContext, + NvHandle hKernelGraphicsContext +) +{ + RsResourceRef *pResourceRef; + + NV_ASSERT_OK_OR_RETURN( + clientGetResourceRefByType(pCallContext->pClient, + hKernelGraphicsContext, + classId(KernelGraphicsContext), + &pResourceRef)); + + return resControl(pResourceRef->pResource, pCallContext, pCallContext->pControlParams); +} + +/** + * @brief Translate global ctx buffer enum to external NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID + */ +NV_STATUS +kgrctxGetGlobalContextBufferExternalId_IMPL +( + GR_GLOBALCTX_BUFFER id, + NvU32 *pExternalId +) +{ + NV_ASSERT_OR_RETURN(pExternalId != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV_ENUM_IS(GR_GLOBALCTX_BUFFER, id), NV_ERR_INVALID_ARGUMENT); + + switch (id) + { + case GR_GLOBALCTX_BUFFER_BUNDLE_CB: + *pExternalId = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB; + break; + case GR_GLOBALCTX_BUFFER_PAGEPOOL: + *pExternalId = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL; + break; + case GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB: + *pExternalId = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB; + break; + case GR_GLOBALCTX_BUFFER_RTV_CB: + *pExternalId = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL; + break; + case GR_GLOBALCTX_BUFFER_GFXP_POOL: + *pExternalId = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL; + break; + case GR_GLOBALCTX_BUFFER_GFXP_CTRL_BLK: + *pExternalId = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK; + break; + case GR_GLOBALCTX_BUFFER_FECS_EVENT: + *pExternalId = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT; + break; + case GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP: + *pExternalId = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP; + break; + case GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP: + *pExternalId = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP; + break; + case GR_GLOBAL_BUFFER_GLOBAL_PRIV_ACCESS_MAP: + // TODO this is not valid, this is not a context buffer + *pExternalId = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP; + break; + // + // No default case: Compiler will enforce that this switch is updated if + // new global ctx buffers are added + // + } + + return NV_OK; +} + +/** + * @brief Translate NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID to global ctx buffer enum + */ +NV_STATUS +kgrctxGetGlobalContextBufferInternalId_IMPL +( + NvU32 externalId, + GR_GLOBALCTX_BUFFER *pInternalId +) +{ + NV_ASSERT_OR_RETURN(pInternalId != NULL, NV_ERR_INVALID_ARGUMENT); + + switch (externalId) + { + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN: + // fall through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM: + // fall through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH: + return NV_ERR_INVALID_ARGUMENT; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB: + *pInternalId = GR_GLOBALCTX_BUFFER_BUNDLE_CB; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL: + *pInternalId = GR_GLOBALCTX_BUFFER_PAGEPOOL; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB: + *pInternalId = GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL: + *pInternalId = GR_GLOBALCTX_BUFFER_RTV_CB; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL: + *pInternalId = GR_GLOBALCTX_BUFFER_GFXP_POOL; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK: + *pInternalId = GR_GLOBALCTX_BUFFER_GFXP_CTRL_BLK; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT: + *pInternalId = GR_GLOBALCTX_BUFFER_FECS_EVENT; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP: + *pInternalId = GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP: + *pInternalId = GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP: + *pInternalId = GR_GLOBAL_BUFFER_GLOBAL_PRIV_ACCESS_MAP; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +/*! Translate GR_CTX_BUFFER to NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */ +NV_STATUS +kgrctxCtxBufferToFifoEngineId_IMPL +( + GR_CTX_BUFFER buffer, + NvU32 *pFifoEngineId +) +{ + NV_ASSERT_OR_RETURN(NV_ENUM_IS(GR_CTX_BUFFER, buffer), NV_ERR_INVALID_ARGUMENT); + + switch (buffer) + { + case GR_CTX_BUFFER_MAIN: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS; + break; + case GR_CTX_BUFFER_ZCULL: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL; + break; + case GR_CTX_BUFFER_PM: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM; + break; + case GR_CTX_BUFFER_PREEMPT: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT; + break; + case GR_CTX_BUFFER_SPILL: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL; + break; + case GR_CTX_BUFFER_BETA_CB: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB; + break; + case GR_CTX_BUFFER_PAGEPOOL: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL; + break; + case GR_CTX_BUFFER_RTV_CB: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV; + break; + case GR_CTX_BUFFER_PATCH: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH; + break; + // No default case - Compiler enforces switch update if enum is changed + } + + return NV_OK; +} + +/** + * @brief Translate global ctx buffer enum to NV0080 FIFO engine context properties index + */ +NV_STATUS +kgrctxGlobalCtxBufferToFifoEngineId_IMPL +( + GR_GLOBALCTX_BUFFER buffId, + NvU32 *pFifoEngineId +) +{ + NV_ASSERT_OR_RETURN(pFifoEngineId != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV_ENUM_IS(GR_GLOBALCTX_BUFFER, buffId), NV_ERR_INVALID_ARGUMENT); + + switch (buffId) + { + case GR_GLOBALCTX_BUFFER_BUNDLE_CB: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB; + break; + case GR_GLOBALCTX_BUFFER_PAGEPOOL: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL; + break; + case GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB; + break; + case GR_GLOBALCTX_BUFFER_RTV_CB: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL; + break; + case GR_GLOBALCTX_BUFFER_GFXP_POOL: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL; + break; + case GR_GLOBALCTX_BUFFER_GFXP_CTRL_BLK: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK; + break; + case GR_GLOBALCTX_BUFFER_FECS_EVENT: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT; + break; + case GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP: + // fall-through + case GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP: + // fall-through + case GR_GLOBAL_BUFFER_GLOBAL_PRIV_ACCESS_MAP: + *pFifoEngineId = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP; + break; + // No default case - compiler enforces update if enum changes + } + + return NV_OK; +} + +NV_STATUS +kgrctxGetGidInfoInPlace_IMPL +( + OBJGPU *pGpu, + NvU8 *pUuidBuffer, + NvU32 uuidBufferSize, + NvU32 flags +) +{ + NvU8 *pUuid; + NvU32 uuidLength; + NV_STATUS status; + + // on success, allocates memory for uuid + status = gpuGetGidInfo(pGpu, &pUuid, &uuidLength, flags); + if (status != NV_OK) + { + return status; + } + + if (uuidLength == uuidBufferSize) + { + portMemCopy(pUuidBuffer, uuidBufferSize, pUuid, uuidLength); + } + + portMemFree(pUuid); + + if (uuidLength != uuidBufferSize) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NV_STATUS +kgrctxFillCtxBufferInfo_IMPL +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 externalId, + NvBool bBufferGlobal, + NV2080_CTRL_GR_CTX_BUFFER_INFO *pCtxBufferInfo +) +{ + NvU32 pageSize; + NV_STATUS status; + + MEMORY_DESCRIPTOR *pRootMemDesc = memdescGetRootMemDesc(pMemDesc, NULL); + + pCtxBufferInfo->bufferHandle = NV_PTR_TO_NvP64(pMemDesc); + pCtxBufferInfo->bufferType = externalId; + pCtxBufferInfo->bIsContigous = memdescGetContiguity(pMemDesc, AT_GPU); + pCtxBufferInfo->aperture = memdescGetAddressSpace(pMemDesc); + pCtxBufferInfo->pageCount = pMemDesc->PageCount; + pCtxBufferInfo->kind = memdescGetPteKindForGpu(pMemDesc, pMemDesc->pGpu); + + { + NvU64 physAddr; + GMMU_APERTURE aperture = kgmmuGetExternalAllocAperture(pCtxBufferInfo->aperture); + + memdescGetPhysAddrsForGpu(pMemDesc, pMemDesc->pGpu, + AT_GPU, 0, 0, 1, + &physAddr); + + pCtxBufferInfo->physAddr = + kgmmuEncodePhysAddr(GPU_GET_KERNEL_GMMU(pMemDesc->pGpu), aperture, physAddr, + NVLINK_INVALID_FABRIC_ADDR); + } + + pageSize = memdescGetPageSize(pMemDesc, AT_GPU); + if (pageSize == 0) + { + status = memmgrSetMemDescPageSize_HAL(pMemDesc->pGpu, + GPU_GET_MEMORY_MANAGER(pMemDesc->pGpu), + pMemDesc, + AT_GPU, + RM_ATTR_PAGE_SIZE_DEFAULT); + if (status != NV_OK) + return status; + + pageSize = memdescGetPageSize(pMemDesc, AT_GPU); + NV_ASSERT(pageSize != 0); + } + + // + // Alignment is used to adjust the mapping VA. Hence, we need to make sure + // that at least pageSize to make mapping calculation work correctly. + // + pCtxBufferInfo->alignment = (pMemDesc->Alignment != 0) ? + NV_ALIGN_UP(pMemDesc->Alignment, pageSize) : pageSize; + + pCtxBufferInfo->size = pMemDesc->ActualSize; + pCtxBufferInfo->pageSize = pageSize; + + pCtxBufferInfo->bGlobalBuffer = bBufferGlobal; + pCtxBufferInfo->bLocalBuffer = !bBufferGlobal; + pCtxBufferInfo->bDeviceDescendant = pRootMemDesc->pGpu != NULL; + + if (pCtxBufferInfo->bDeviceDescendant) + { + status = kgrctxGetGidInfoInPlace(pMemDesc->pGpu, pCtxBufferInfo->uuid, sizeof(pCtxBufferInfo->uuid), + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY)); + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +/*! + * @brief Construct dupable kernel graphics context. + */ +NV_STATUS +kgrctxConstruct_IMPL +( + KernelGraphicsContext *pKernelGraphicsContext, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + RsShared *pShared; + + if (RS_IS_COPY_CTOR(pParams)) + return kgrctxCopyConstruct_IMPL(pKernelGraphicsContext, pCallContext, pParams); + + NV_ASSERT_OK_OR_GOTO(status, + serverAllocShare(&g_resServ, classInfo(KernelGraphicsContextShared), &pShared), + cleanup); + + pKernelGraphicsContext->pShared = dynamicCast(pShared, KernelGraphicsContextShared); + NV_ASSERT_OK_OR_GOTO(status, + shrkgrctxInit(GPU_RES_GET_GPU(pKernelGraphicsContext), + pKernelGraphicsContext->pShared, + pKernelGraphicsContext), + cleanup); + +cleanup: + if (status != NV_OK) + { + if (pKernelGraphicsContext->pShared != NULL) + serverFreeShare(&g_resServ, pShared); + + } + + return status; +} + +/*! + * @brief Copy Construct dupable kernel graphics context. + */ +NV_STATUS +kgrctxCopyConstruct_IMPL +( + KernelGraphicsContext *pKernelGraphicsContextDst, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pSrcRef = pParams->pSrcRef; + KernelGraphicsContext *pKernelGraphicsContextSrc = + dynamicCast(pSrcRef->pResource, KernelGraphicsContext); + + pKernelGraphicsContextDst->pShared = pKernelGraphicsContextSrc->pShared; + serverRefShare(&g_resServ, staticCast(pKernelGraphicsContextDst->pShared, RsShared)); + + return NV_OK; +} + +NvHandle +kgrctxGetInternalObjectHandle_IMPL(KernelGraphicsContext *pKernelGraphicsContext) +{ + return NV01_NULL_OBJECT; +} + +/*! + * @brief Destruct dupable kernel graphics context. + */ +void +kgrctxDestruct_IMPL +( + KernelGraphicsContext *pKernelGraphicsContext +) +{ + RsShared *pShared = staticCast(pKernelGraphicsContext->pShared, RsShared); + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelGraphicsContext); + + if (serverGetShareRefCount(&g_resServ, pShared) == 1) + { + shrkgrctxTeardown_IMPL(pGpu, pKernelGraphicsContext->pShared, pKernelGraphicsContext); + } + + serverFreeShare(&g_resServ, pShared); +} + +/*! + * @brief Retrieve unicast context state + */ +NV_STATUS kgrctxGetUnicast_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphicsContextUnicast **ppKernelGraphicsContextUnicast +) +{ + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + NV_ASSERT_OR_RETURN(pKernelGraphicsContext->pShared != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsContext->pShared->pKernelGraphicsContextUnicast != NULL, + NV_ERR_INVALID_STATE); + + *ppKernelGraphicsContextUnicast = + &pKernelGraphicsContext->pShared->pKernelGraphicsContextUnicast[subdevInst]; + return NV_OK; +} + +NV_STATUS kgrctxLookupMmuFault_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + NV83DE_MMU_FAULT_INFO *pMmuFaultInfo +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + *pMmuFaultInfo = pKernelGraphicsContextUnicast->mmuFaultInfo; + return NV_OK; +} + +NV_STATUS kgrctxClearMmuFault_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + pKernelGraphicsContextUnicast->mmuFaultInfo.valid = NV_FALSE; + pKernelGraphicsContextUnicast->mmuFaultInfo.faultInfo = 0; + return NV_OK; +} + +void kgrctxRecordMmuFault_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + NvU32 mmuFaultInfo +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status; + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast), + return;); + + pKernelGraphicsContextUnicast->mmuFaultInfo.valid = NV_TRUE; + pKernelGraphicsContextUnicast->mmuFaultInfo.faultInfo = mmuFaultInfo; +} + +/*! + * @brief returns if the main context buffer has been allocated + * + * @return NV_TRUE if allocated, NV_FALSE otherwise + */ +NvBool +kgrctxIsMainContextAllocated_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + MEMORY_DESCRIPTOR *pMemDesc; + return (kgrctxGetMainContextBuffer(pGpu, pKernelGraphicsContext, &pMemDesc) == NV_OK) && + (pMemDesc != NULL); +} + +/*! Retrieve the memdesc containing the main ctx buffer */ +NV_STATUS kgrctxGetMainContextBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + ENGINE_CTX_DESCRIPTOR *pEngCtxDesc; + NvU32 subDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + RsResourceRef *pParentRef = RES_GET_REF(pKernelGraphicsContext)->pParentRef; + KernelChannelGroupApi *pKernelChannelGroupApi; + KernelChannelGroup *pKernelChannelGroup; + + *ppMemDesc = NULL; + pKernelChannelGroupApi = dynamicCast(pParentRef->pResource, KernelChannelGroupApi); + pKernelChannelGroup = pKernelChannelGroupApi->pKernelChannelGroup; + pEngCtxDesc = pKernelChannelGroup->ppEngCtxDesc[subDevInst]; + + if (pEngCtxDesc != NULL) + *ppMemDesc = pEngCtxDesc->pMemDesc; + + return NV_OK; +} + +/*! + * @brief Retrieve information about the context buffers + */ +NV_STATUS +kgrctxGetCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + NvU32 gfid, + NvU32 bufferCount, + MEMORY_DESCRIPTOR **ppBuffers, + NvU32 *pCtxBufferType, + NvU32 *pBufferCountOut, + NvU32 *pFirstGlobalBuffer +) +{ + GR_GLOBALCTX_BUFFERS *pGlobalCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + MEMORY_DESCRIPTOR *pGrCtxBufferMemDesc; + NvU32 bufferCountOut = 0; + NvU32 i; + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + // Get local context buffer memdesc. + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetMainContextBuffer(pGpu, pKernelGraphicsContext, &pGrCtxBufferMemDesc)); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, + pGrCtxBufferMemDesc != NULL, + NV_ERR_INVALID_OBJECT); + + NV_CHECK_OR_RETURN(LEVEL_INFO, bufferCountOut < bufferCount, NV_ERR_INVALID_ARGUMENT); + pCtxBufferType[bufferCountOut] = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN; + ppBuffers[bufferCountOut++] = pGrCtxBufferMemDesc; + + // Get context patch buffer memdesc. + NV_CHECK_OR_RETURN(LEVEL_SILENT, + pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc != NULL, + NV_ERR_INVALID_STATE); + + NV_CHECK_OR_RETURN(LEVEL_INFO, bufferCountOut < bufferCount, NV_ERR_INVALID_ARGUMENT); + pCtxBufferType[bufferCountOut] = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH; + ppBuffers[bufferCountOut++] = pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc; + + // Add PM ctxsw buffer if it's allocated. + if (pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc != NULL) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, bufferCountOut < bufferCount, NV_ERR_INVALID_ARGUMENT); + pCtxBufferType[bufferCountOut] = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM; + ppBuffers[bufferCountOut++] = pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc; + } + + if (pFirstGlobalBuffer != NULL) + { + *pFirstGlobalBuffer = bufferCountOut; + } + + // Add global buffers. + for (i = 0; i < GR_GLOBALCTX_BUFFER_COUNT; i++) + { + MEMORY_DESCRIPTOR *pMemDesc = pGlobalCtxBuffers->memDesc[i]; + if (pMemDesc != NULL) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, bufferCountOut < bufferCount, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OK( + kgrctxGetGlobalContextBufferExternalId(i, &pCtxBufferType[bufferCountOut])); + ppBuffers[bufferCountOut++] = pMemDesc; + } + } + + *pBufferCountOut = bufferCountOut; + + return NV_OK; +} + +/* + * @brief Get maximum context buffer count including global and local buffers + * + * @param[in] pGpu + * @param[in] pKernelGraphicsContext + * @param[in] pKernelGraphics + * @param[out] pBufferCount Max context buffer count + */ +NV_STATUS +kgrctxGetBufferCount_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + NvU32 *pBufferCount +) +{ + NvU32 gfid; + NvBool bCallingContextPlugin; + GR_GLOBALCTX_BUFFERS *pGlobalCtxBuffers; + NvU32 i; + + *pBufferCount = 0; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + NV_ASSERT_OK_OR_RETURN(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin)); + + pGlobalCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + + if (bCallingContextPlugin) + { + gfid = GPU_GFID_PF; + } + + for (i = 0; i < GR_GLOBALCTX_BUFFER_COUNT; i++) + { + if (pGlobalCtxBuffers->memDesc[i] != NULL) + (*pBufferCount)++; + } + + // Increment by 3 to include local ctx buffer, patch context buffer and PM Ctxsw buffer + *pBufferCount += 3; + + return NV_OK; +} + +/* + * @brief Get context buffer info like size, alignment for global and + * local buffers + * + * @param[in] pGpu + * @param[in] pKernelGraphicsContext + * @param[in] pKernelGraphics + * @param[in] gfid + * @param[in] bufferMaxCount Amount of space provided in pCtxBufferInfo + * @param[out] bufferCount Number of buffers described + * @param[out] pCtxBufferInfo Structure to fill buffer information + * + * @return NV_OK successfully probed all buffers + * @return NV_ERR_INVALID_ARGUMENT not enough space provided for buffers queried + */ +NV_STATUS +kgrctxGetCtxBufferInfo_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + NvU32 gfid, + NvU32 bufferMaxCount, + NvU32 *pBufferCount, + NV2080_CTRL_GR_CTX_BUFFER_INFO *pCtxBufferInfo +) +{ + MEMORY_DESCRIPTOR *pMemDescArray[3 + GR_GLOBALCTX_BUFFER_COUNT]; + NvU32 bufferExternalId[3 + GR_GLOBALCTX_BUFFER_COUNT]; + NvU32 memdescCount; + NvU32 firstGlobalBuffer; + NvU32 i; + NvU32 j; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetCtxBuffers(pGpu, pKernelGraphicsContext, pKernelGraphics, + gfid, + NV_ARRAY_ELEMENTS(pMemDescArray), + pMemDescArray, + bufferExternalId, + &memdescCount, + &firstGlobalBuffer)); + + if (bufferMaxCount < memdescCount) + return NV_ERR_INVALID_ARGUMENT; + + for (i = 0; i < memdescCount; i++) + { + NvBool bGlobalBuffer = (i >= firstGlobalBuffer); + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + kgrctxFillCtxBufferInfo(pMemDescArray[i], + bufferExternalId[i], + bGlobalBuffer, + &pCtxBufferInfo[i])); + } + + // + // Sort the buffer info in descending order using alignment so that the + // VA range calculation can figure out the optimal VA range size. + // + for (i = 0; i < memdescCount; i++) + { + for (j = 0; j < memdescCount - 1; j++) + { + if (pCtxBufferInfo[j].alignment < pCtxBufferInfo[j + 1].alignment) + { + NV2080_CTRL_GR_CTX_BUFFER_INFO tmp; + + portMemCopy(&tmp, sizeof(tmp), &pCtxBufferInfo[j], sizeof(tmp)); + portMemCopy(&pCtxBufferInfo[j], sizeof(tmp), &pCtxBufferInfo[j + 1], sizeof(tmp)); + portMemCopy(&pCtxBufferInfo[j + 1], sizeof(tmp), &tmp, sizeof(tmp)); + } + } + } + + *pBufferCount = memdescCount; + + return NV_OK; +} + +/* + * @brief Return physical addresses of context buffer starting from its 'firstPage' page. + * The function is intended to be called repeatedly while advancing + * 'firstPage' parameter until all pages of the buffer are queried. + * This condition is indicated by *pNoMorePage == NV_TRUE. + * If requested buffer is contiguous, address of first page is returned + * always. + * + * @param[in] pGpu + * @param[in] pKernelGraphicsContext + * @param[in] pKernelGraphics + * @param[in] gfid + * @param[in] bufferType Requested ctx buffer type + * @param[in] firstPage First page of the buffer to be queried + * @param[out] pPhysAddrs Array to be filled with page addresses + * @param[in] addrsSize Number of elements of pPhysAddrs + * @param[out] pNumPages Number of page addresses returned + * @param[out] pbNoMorePages End of buffer reached + * + * @return NV_OK successfully obtained requested addresses + * @return NV_ERR_INVALID_ARGUMENT pMemDesc does not specify a context buffer + */ +NV_STATUS +kgrctxGetCtxBufferPtes_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + NvU32 gfid, + NvU32 bufferType, + NvU32 firstPage, + NvU64 *pPhysAddrs, + NvU32 addrsSize, + NvU32 *pNumPages, + NvBool *pbNoMorePages +) +{ + NvU64 bufferSize; + NvU64 pageSize; + NvU32 numPages; + NvU32 bufferCount; + NvU32 i; + MEMORY_DESCRIPTOR *pMemDescArray[3 + GR_GLOBALCTX_BUFFER_COUNT]; + NvU32 bufferExternalId[3 + GR_GLOBALCTX_BUFFER_COUNT]; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetCtxBuffers(pGpu, pKernelGraphicsContext, pKernelGraphics, + gfid, + NV_ARRAY_ELEMENTS(pMemDescArray), + pMemDescArray, + bufferExternalId, + &bufferCount, + NULL)); + + for (i = 0; i < bufferCount; i++) + { + if (bufferExternalId[i] == bufferType) + break; + } + + if (addrsSize == 0) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NV_CHECK_OR_RETURN(LEVEL_ERROR, i != bufferCount, NV_ERR_INVALID_ARGUMENT); + + bufferSize = memdescGetSize(pMemDescArray[i]); + pageSize = memdescGetPageSize(pMemDescArray[i], AT_GPU); + + if (pageSize == 0) + { + return NV_ERR_INVALID_STATE; + } + + numPages = NV_ROUNDUP(bufferSize, pageSize) / pageSize; + + if (firstPage >= numPages) + { + numPages = 0; + *pbNoMorePages = NV_TRUE; + goto done; + } + + if (memdescGetContiguity(pMemDescArray[i], AT_GPU)) + { + firstPage = 0; + numPages = 1; + *pbNoMorePages = NV_TRUE; + } + else + { + numPages -= firstPage; + *pbNoMorePages = (numPages <= addrsSize); + numPages = NV_MIN(numPages, addrsSize); + } + + if (numPages > 0) + { + memdescGetPhysAddrs(pMemDescArray[i], + AT_GPU, + firstPage * pageSize, + pageSize, + numPages, + pPhysAddrs); + } + +done: + *pNumPages = numPages; + + return NV_OK; +} + +/*! + * This function does following things + * 1. Allocate main GR context buffer + * 2. Sets up GR context memory descriptor by calling kchannelSetEngineContextMemDesc + */ +NV_STATUS +kgrctxAllocMainCtxBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelChannel *pKernelChannel +) +{ + MEMORY_DESCRIPTOR *pGrCtxBufferMemDesc = NULL; + NvU32 ctxSize; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NvU64 allocFlags = MEMDESC_FLAGS_GPU_PRIVILEGED; + CTX_BUF_POOL_INFO *pCtxBufPool = NULL; + NvBool bIsContiguous = kgraphicsShouldForceMainCtxContiguity_HAL(pGpu, pKernelGraphics); + const GR_BUFFER_ATTR *pAttr = kgraphicsGetContextBufferAttr(pGpu, pKernelGraphics, GR_CTX_BUFFER_MAIN); + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + // + // Allocate space for per-subcontext headers in the context buffer. + // With subcontext, per-subcontext context header is programmed in the channel instance block. + // Per-subcontext headers can be seperate from the context buffer. + // For the initial phase, we allocate them at the end of the context buffers for easier tracking. + // This will waste some memory (256 KB), if the number of subcontexts are sparse. + // Will cleanup this up later to be on-demand. + // + // We also need to report the updated context size to KMD for virtual context. + // Bug 1764102 tracks the VC support. + // + NV_ASSERT_OK_OR_RETURN( + kgraphicsGetMainCtxBufferSize(pGpu, pKernelGraphics, NV_TRUE, &ctxSize)); + + if (ctxBufPoolIsSupported(pGpu) && + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pCtxBufPool != NULL) + { + allocFlags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + pCtxBufPool = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pCtxBufPool; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(&pGrCtxBufferMemDesc, pGpu, ctxSize, + RM_PAGE_SIZE, bIsContiguous, ADDR_UNKNOWN, + pAttr->cpuAttr, + allocFlags | MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE)); + + // + // Force page size to 4KB, we can change this later when RM access method + // support 64k pages + // + NV_ASSERT_OK_OR_RETURN( + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, pGrCtxBufferMemDesc, AT_GPU, RM_ATTR_PAGE_SIZE_4KB)); + + NV_ASSERT_OK_OR_RETURN(memdescSetCtxBufPool(pGrCtxBufferMemDesc, pCtxBufPool)); + NV_ASSERT_OK_OR_RETURN( + memdescAllocList(pGrCtxBufferMemDesc, pAttr->pAllocList)); + + NV_ASSERT_OK_OR_RETURN( + kchannelSetEngineContextMemDesc(pGpu, pKernelChannel, + ENG_GR(kgraphicsGetInstance(pGpu, pKernelGraphics)), + pGrCtxBufferMemDesc)); + pKernelGraphicsContextUnicast->pMainCtxBuffer = pGrCtxBufferMemDesc; + return NV_OK; +} + +/*! + * @brief Allocate and setup the GR ctx patch buffer + */ +NV_STATUS +kgrctxAllocPatchBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelChannel *pKernelChannel +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + const KGRAPHICS_STATIC_INFO *pStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NvU32 status = NV_OK; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + MEMORY_DESCRIPTOR **ppMemDesc; + NvU64 size; + NvU64 flags = MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + CTX_BUF_POOL_INFO *pCtxBufPool; + const GR_BUFFER_ATTR *pAttr = kgraphicsGetContextBufferAttr(pGpu, pKernelGraphics, GR_CTX_BUFFER_PATCH); + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + pCtxBufPool = NULL; + if (ctxBufPoolIsSupported(pGpu) && + (pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pCtxBufPool) != NULL) + { + flags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + pCtxBufPool = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pCtxBufPool; + } + + ppMemDesc = &pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc; + size = pStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH].size; + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(ppMemDesc, pGpu, size, RM_PAGE_SIZE, NV_TRUE, + ADDR_UNKNOWN, + pAttr->cpuAttr, + flags)); + + // + // Force page size to 4KB we can change this later when RM access method + // support 64k pages + // + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, *ppMemDesc, AT_GPU, RM_ATTR_PAGE_SIZE_4KB); + NV_ASSERT_OK_OR_GOTO(status, + memdescSetCtxBufPool(*ppMemDesc, pCtxBufPool), + failed); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescAllocList(*ppMemDesc, pAttr->pAllocList), + failed); + + return NV_OK; + +failed: + memdescFree(*ppMemDesc); + memdescDestroy(*ppMemDesc); + *ppMemDesc = NULL; + return status; +} + +/*! + * @brief Allocate the local ctx PM buffer + */ +NV_STATUS +kgrctxAllocPmBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelChannel *pKernelChannel +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NvU32 size; + NV_STATUS status = NV_OK; + NvU64 flags = MEMDESC_FLAGS_GPU_PRIVILEGED; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + CTX_BUF_POOL_INFO *pCtxBufPool; + MEMORY_DESCRIPTOR **ppMemDesc; + const GR_BUFFER_ATTR *pAttr = kgraphicsGetContextBufferAttr(pGpu, pKernelGraphics, GR_CTX_BUFFER_PM); + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + size = pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM].size; + if (size == 0) + return NV_ERR_INVALID_STATE; + + pCtxBufPool = NULL; + if (ctxBufPoolIsSupported(pGpu) && + pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pCtxBufPool != NULL) + { + flags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + pCtxBufPool = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->pCtxBufPool; + } + + // + // For SRIOV Heavy, the PM ctxsw buffer allocation will be redirected to + // host RM subheap. Subheap is used by host RM to allocate memory + // on behalf of the guest(VF) context. + // + if (gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + flags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + + ppMemDesc = &pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc; + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(ppMemDesc, pGpu, + size, + RM_PAGE_SIZE, + NV_TRUE, + ADDR_UNKNOWN, + pAttr->cpuAttr, + flags)); + + // + // Force page size to 4KB we can change this later when RM access method + // support 64k pages + // + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, *ppMemDesc, AT_GPU, RM_ATTR_PAGE_SIZE_4KB), + error); + + NV_ASSERT_OK_OR_GOTO(status, memdescSetCtxBufPool(*ppMemDesc, pCtxBufPool), error); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memdescAllocList(*ppMemDesc, pAttr->pAllocList), + error); + + return NV_OK; + +error: + memdescFree(*ppMemDesc); + memdescDestroy(*ppMemDesc); + *ppMemDesc = NULL; + + return status; +} + +/*! + * This function allocates and maps various GR buffers. + */ +NV_STATUS +kgrctxAllocCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelGraphicsObject *pKernelGraphicsObject +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + NV_STATUS status = NV_OK; + NvU32 classNum = pChannelDescendant->resourceDesc.externalClassId; + NvU32 objType; + NvU32 gfid = kchannelGetGfid(pChannelDescendant->pKernelChannel); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + GR_GLOBALCTX_BUFFERS *pGlobalCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OR_RETURN(pKernelGraphicsObject->pKernelGraphicsContext != NULL, NV_ERR_INVALID_STATE); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetUnicast(pGpu, + pKernelGraphicsObject->pKernelGraphicsContext, + &pKernelGraphicsContextUnicast)); + + // Allocate the GR ctx buffer if required + if (!kgrctxIsMainContextAllocated(pGpu, pKernelGraphicsContext) && + !kchannelIsCtxBufferAllocSkipped(pChannelDescendant->pKernelChannel)) + { + if (pKernelGraphicsContextUnicast->pMainCtxBuffer == NULL) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxAllocMainCtxBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel)); + } + else + { + // Ctx buffer was uninstalled in FIFO but not freed. Reinstall it + NV_ASSERT_OK_OR_RETURN( + kchannelSetEngineContextMemDesc(pGpu, pKernelChannel, + ENG_GR(kgraphicsGetInstance(pGpu, pKernelGraphics)), + pKernelGraphicsContextUnicast->pMainCtxBuffer)); + } + } + + kgrmgrGetGrObjectType(classNum, &objType); + + // + // for each channel, we need to allocate the context patch buffer, this memory region + // will be used to override settings in the context after it is restored, things + // like the global ctx buffer addresses, etc. + // + if (pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc == NULL) + { + NV_ASSERT_OK_OR_RETURN( + kgrctxAllocPatchBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel)); + } + + // Pre-allocate the PM ctxsw buffer, if required + if (kgrctxShouldPreAllocPmBuffer_HAL(pGpu, pKernelGraphicsContext, pChannelDescendant->pKernelChannel)) + { + NV_ASSERT_OK_OR_RETURN( + kgrctxAllocPmBuffer(pGpu, pKernelGraphicsObject->pKernelGraphicsContext, + pKernelGraphics, + pChannelDescendant->pKernelChannel)); + } + + // + // Allocate Ctx Buffers that are local to this channel if required + // and they have yet to be allocated. + // + if ((pKernelGraphicsContextUnicast->bVprChannel + ) && !pKernelGraphicsContextUnicast->localCtxBuffer.bAllocated) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsAllocGrGlobalCtxBuffers_HAL(pGpu, pKernelGraphics, gfid, + pKernelGraphicsObject->pKernelGraphicsContext)); + } + + // Allocate global context for this gfid if they haven't been already + if (!pGlobalCtxBuffers->bAllocated) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsAllocGrGlobalCtxBuffers_HAL(pGpu, pKernelGraphics, gfid, NULL)); + } + + return status; +} + +// Map common buffer to a channel's context helper function +NV_STATUS +kgrctxMapGlobalCtxBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + NvU32 gfid, + OBJVASPACE *pVAS, + GR_GLOBALCTX_BUFFER buffId, + NvBool bIsReadOnly +) +{ + GR_GLOBALCTX_BUFFERS *pGlobalCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status = NV_OK; + NvU64 vaddr; + MEMORY_DESCRIPTOR *pMemDesc; + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + // if we have local buffers per-channel allocated, use them, otherwise use the global buffers + pMemDesc = pKernelGraphicsContextUnicast->localCtxBuffer.memDesc[buffId]; + if (pMemDesc == NULL) + { + pMemDesc = pGlobalCtxBuffers->memDesc[buffId]; + } + + if (pMemDesc == NULL) + { + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + NvU32 fifoEngineId; + NvU32 buffSize; + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OK_OR_RETURN(kgrctxGlobalCtxBufferToFifoEngineId(buffId, &fifoEngineId)); + buffSize = pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[fifoEngineId].size; + + if (buffSize == 0) + { + NV_PRINTF(LEVEL_INFO, + "Could not map %s Buffer as buffer is not supported\n", + NV_ENUM_TO_STRING(GR_GLOBALCTX_BUFFER, buffId)); + return NV_OK; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Could not map %s Buffer, no memory allocated for it!\n", + NV_ENUM_TO_STRING(GR_GLOBALCTX_BUFFER, buffId)); + return NV_ERR_INVALID_ARGUMENT; + } + } + + // Unconditionally call map for refcounting + NV_ASSERT_OK_OR_ELSE(status, + kgraphicsMapCtxBuffer(pGpu, pKernelGraphics, pMemDesc, pVAS, + &pKernelGraphicsContextUnicast->globalCtxBufferVaList[buffId], + kgraphicsIsGlobalCtxBufferSizeAligned(pGpu, pKernelGraphics, buffId), + bIsReadOnly), + NV_PRINTF(LEVEL_ERROR, "%s Buffer could not be mapped\n", + NV_ENUM_TO_STRING(GR_GLOBALCTX_BUFFER, buffId)); + return status; ); + + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, + vaListFindVa(&pKernelGraphicsContextUnicast->globalCtxBufferVaList[buffId], pVAS, &vaddr)); + + NV_PRINTF(LEVEL_INFO, + "GPU:%d %s Buffer PA @ 0x%llx VA @ 0x%llx of Size 0x%llx\n", + pGpu->gpuInstance, NV_ENUM_TO_STRING(GR_GLOBALCTX_BUFFER, buffId), + memdescGetPhysAddr(memdescGetMemDescFromGpu(pMemDesc, pGpu), AT_GPU, 0), + vaddr, pMemDesc->Size); + + return status; +} + +// +// Map common buffers to a channel's context +// +NV_STATUS +kgrctxMapGlobalCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + NvU32 gfid, + KernelChannel *pKernelChannel +) +{ + GR_GLOBALCTX_BUFFERS *pGlobalCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + // if global ctx buffers were never allocated then bail out early + if (!pKernelGraphicsContextUnicast->localCtxBuffer.bAllocated && + !pGlobalCtxBuffers->bAllocated) + { + return NV_ERR_INVALID_STATE; + } + + kgrctxMapGlobalCtxBuffer(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + gfid, + pKernelChannel->pVAS, + GR_GLOBALCTX_BUFFER_BUNDLE_CB, + NV_FALSE); + kgrctxMapGlobalCtxBuffer(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + gfid, + pKernelChannel->pVAS, + GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB, + NV_FALSE); + kgrctxMapGlobalCtxBuffer(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + gfid, + pKernelChannel->pVAS, + GR_GLOBALCTX_BUFFER_PAGEPOOL, + NV_FALSE); + + if (kgraphicsIsRtvCbSupported(pGpu, pKernelGraphics)) + { + kgrctxMapGlobalCtxBuffer(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + gfid, + pKernelChannel->pVAS, + GR_GLOBALCTX_BUFFER_RTV_CB, + NV_FALSE); + } + + return NV_OK; +} + +/*! + * @brief This function allocates and maps various GR buffers. + */ +NV_STATUS +kgrctxMapCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelGraphicsObject *pKernelGraphicsObject +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + NvU32 classNum = pChannelDescendant->resourceDesc.externalClassId; + OBJGVASPACE *pGVAS; + NvU32 objType; + NvU32 gfid = kchannelGetGfid(pKernelChannel); + NvBool bAcquire3d = NV_FALSE; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsObject->pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + pGVAS = dynamicCast(pKernelChannel->pVAS, OBJGVASPACE); + if (gvaspaceIsExternallyOwned(pGVAS)) + return NV_OK; + + // + // in kernel RM we skip the context buffer mapping as part of + // fifoSetUpChannelDma. fifoSetUpChannelDma is not enabled in kernel RM. + // So it's possible for the main ctx buffer to be unmapped by this point. + // + if (!kchannelIsCtxBufferAllocSkipped(pKernelChannel)) + { + ENGINE_CTX_DESCRIPTOR *pEngCtx; + MEMORY_DESCRIPTOR *pMemDesc; + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + pEngCtx = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[subdevInst]; + + NV_ASSERT_OR_RETURN(pEngCtx != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pEngCtx->pMemDesc != NULL, NV_ERR_INVALID_STATE); + pMemDesc = pEngCtx->pMemDesc; + + NV_ASSERT_OK_OR_RETURN( + kgraphicsMapCtxBuffer(pGpu, pKernelGraphics, pMemDesc, pKernelChannel->pVAS, + &pEngCtx->vaList, NV_FALSE, NV_FALSE)); + } + + kgrmgrGetGrObjectType(classNum, &objType); + + NV_ASSERT_OK_OR_RETURN( + kgraphicsMapCtxBuffer(pGpu, pKernelGraphics, pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc, pKernelChannel->pVAS, + &pKernelGraphicsContextUnicast->ctxPatchBuffer.vAddrList, NV_FALSE, NV_FALSE)); + + // TODO: Check if this can be moved to subcontext allocation time + if (kgraphicsIsPerSubcontextContextHeaderSupported(pGpu, pKernelGraphics) && + (pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc != NULL)) + { + NvU64 vaddr = 0; + + if (vaListFindVa(&pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList, pKernelChannel->pVAS, &vaddr) != NV_OK) + { + if (vaListGetManaged(&pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList)) + { + NV_ASSERT_OK_OR_RETURN( + kgraphicsMapCtxBuffer(pGpu, pKernelGraphics, pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc, pKernelChannel->pVAS, + &pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList, NV_FALSE, NV_FALSE)); + } + } + } + + if (kgraphicsDoesUcodeSupportPrivAccessMap(pGpu, pKernelGraphics)) + { + kgrctxMapGlobalCtxBuffer(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + gfid, + pKernelChannel->pVAS, + kgrctxGetRegisterAccessMapId_HAL(pGpu, pKernelGraphicsContext, pChannelDescendant->pKernelChannel), + NV_TRUE); + } + + // + // Condition for acquiring 3d context buffer mappings. + // For non-TSG & legacy TSG mode, always map + // For subcontext TSG case, only map on 2D/3D allocations + // + if (pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bLegacyMode) + { + bAcquire3d = NV_TRUE; + } + else + { + bAcquire3d = ((objType == GR_OBJECT_TYPE_2D) || (objType == GR_OBJECT_TYPE_3D)); + } + + if (bAcquire3d) + { + kgrctxMapGlobalCtxBuffers(pGpu, pKernelGraphicsContext, pKernelGraphics, gfid, pKernelChannel); + } + + return NV_OK; +} + +/*! + * @brief Set parameters for promoting the PA of a ctx buffer to physical RM. + */ +NV_STATUS +kgrctxPrepareInitializeCtxBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelChannel *pKernelChannel, + NvU32 externalId, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *pEntry, + NvBool *pbAddEntry +) +{ + MEMORY_DESCRIPTOR *pMemDesc; + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NvU32 physAttr; + + *pbAddEntry = NV_FALSE; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + switch (externalId) + { + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN: + { + ENGINE_CTX_DESCRIPTOR *pEngCtx; + NvU32 subdevInst; + if (pKernelGraphicsContextUnicast->bKGrMainCtxBufferInitialized) + return NV_OK; + + // Do not try to init a KMD virtual context buffer + if (kchannelIsCtxBufferAllocSkipped(pKernelChannel)) + return NV_OK; + + subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pEngCtx = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[subdevInst]; + + NV_ASSERT_OR_RETURN(pEngCtx != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pEngCtx->pMemDesc != NULL, NV_ERR_INVALID_STATE); + pMemDesc = pEngCtx->pMemDesc; + break; + } + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM: + if (pKernelGraphicsContextUnicast->bKGrPmCtxBufferInitialized) + return NV_OK; + + if (pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc == NULL) + return NV_OK; + + pMemDesc = pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH: + if (pKernelGraphicsContextUnicast->bKGrPatchCtxBufferInitialized) + return NV_OK; + + NV_ASSERT(pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc != NULL); + pMemDesc = pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL: + // No initialization from kernel RM + return NV_OK; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK: + { + GR_GLOBALCTX_BUFFER internalId; + NvU32 gfid = kchannelGetGfid(pKernelChannel); + GR_GLOBALCTX_BUFFERS *pCtxBuffers; + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetGlobalContextBufferInternalId(externalId, &internalId)); + + if (pKernelGraphicsContextUnicast->localCtxBuffer.bAllocated) + { + pCtxBuffers = &pKernelGraphicsContextUnicast->localCtxBuffer; + } + else + { + pCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + } + + if (pCtxBuffers->bInitialized[internalId]) + return NV_OK; + + pMemDesc = pCtxBuffers->memDesc[internalId]; + break; + } + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT: + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP: + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP: + { + GR_GLOBALCTX_BUFFER internalId; + NvU32 gfid = kchannelGetGfid(pKernelChannel); + GR_GLOBALCTX_BUFFERS *pCtxBuffers; + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetGlobalContextBufferInternalId(externalId, &internalId)); + + pCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + + if (pCtxBuffers->bInitialized[internalId]) + return NV_OK; + + pMemDesc = pCtxBuffers->memDesc[internalId]; + + break; + } + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP: + // No initialization from kernel RM + return NV_OK; + default: + NV_ASSERT_OR_RETURN(!"Unrecognized promote ctx enum", NV_ERR_INVALID_ARGUMENT); + } + + // If these buffers are not supported or not allocated, no need to init + if (pMemDesc == NULL) + return NV_OK; + + physAttr = 0x0; + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_FBMEM: + physAttr = FLD_SET_DRF(2080, _CTRL_GPU_INITIALIZE_CTX, + _APERTURE, _VIDMEM, physAttr); + break; + + case ADDR_SYSMEM: + if (memdescGetCpuCacheAttrib(pMemDesc) == NV_MEMORY_CACHED) + { + physAttr = FLD_SET_DRF(2080, _CTRL_GPU_INITIALIZE_CTX, + _APERTURE, _COH_SYS, physAttr); + } + else if (memdescGetCpuCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) + { + physAttr = FLD_SET_DRF(2080, _CTRL_GPU_INITIALIZE_CTX, + _APERTURE, _NCOH_SYS, physAttr); + } + else + { + return NV_ERR_INVALID_STATE; + } + break; + + default: + return NV_ERR_INVALID_STATE; + } + + physAttr = FLD_SET_DRF(2080, _CTRL_GPU_INITIALIZE_CTX, _GPU_CACHEABLE, _NO, physAttr); + + pEntry->gpuPhysAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + pEntry->size = pMemDesc->Size; + pEntry->physAttr = physAttr; + pEntry->bufferId = externalId; + pEntry->bInitialize = NV_TRUE; + pEntry->bNonmapped = NV_TRUE; + + *pbAddEntry = NV_TRUE; + + return NV_OK; +} + +/*! + * @brief Set parameters for promoting the VA of a ctx buffer to physical RM. + */ +NV_STATUS +kgrctxPreparePromoteCtxBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelChannel *pKernelChannel, + NvU32 externalId, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *pEntry, + NvBool *pbAddEntry +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + VA_LIST *pVaList; + NvU64 vaddr; + NvU64 refCount; + OBJGVASPACE *pGVAS = dynamicCast(pKernelChannel->pVAS, OBJGVASPACE); + NV_STATUS status; + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + *pbAddEntry = NV_FALSE; + + // RM is not responsible for promoting the buffers when UVM is enabled + if (gvaspaceIsExternallyOwned(pGVAS)) + return NV_OK; + + switch (externalId) + { + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN: + { + ENGINE_CTX_DESCRIPTOR *pEngCtx; + NvU32 subdevInst; + + // Do not try to promote a KMD virtual context buffer + if (kchannelIsCtxBufferAllocSkipped(pKernelChannel)) + return NV_OK; + + subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pEngCtx = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->ppEngCtxDesc[subdevInst]; + + NV_ASSERT_OR_RETURN(pEngCtx != NULL, NV_ERR_INVALID_STATE); + pVaList = &pEngCtx->vaList; + break; + } + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM: + pVaList = &pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH: + pVaList = &pKernelGraphicsContextUnicast->ctxPatchBuffer.vAddrList; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP: + { + GR_GLOBALCTX_BUFFER internalId; + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetGlobalContextBufferInternalId(externalId, &internalId)); + + pVaList = &pKernelGraphicsContextUnicast->globalCtxBufferVaList[internalId]; + break; + } + default: + NV_ASSERT_OR_RETURN(!"Unrecognized promote ctx enum", NV_ERR_INVALID_ARGUMENT); + } + + // If the buffer isn't currently mapped or was already promoted, nothing to do + status = vaListGetRefCount(pVaList, pKernelChannel->pVAS, &refCount); + if ((status != NV_OK) || (refCount > 1)) + return NV_OK; + + NV_ASSERT_OK_OR_RETURN(vaListFindVa(pVaList, pKernelChannel->pVAS, &vaddr)); + + pEntry->bufferId = externalId; + pEntry->gpuVirtAddr = vaddr; + pEntry->bNonmapped = NV_FALSE; + + *pbAddEntry = NV_TRUE; + return NV_OK; +} + +/*! Mark the context buffer as initialized, prevent further calls to init */ +void +kgrctxMarkCtxBufferInitialized_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelChannel *pKernelChannel, + NvU32 externalId +) +{ + NV_STATUS status; + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, + pKernelGraphicsContext, + &pKernelGraphicsContextUnicast), + return;); + + switch (externalId) + { + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN: + pKernelGraphicsContextUnicast->bKGrMainCtxBufferInitialized = NV_TRUE; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM: + pKernelGraphicsContextUnicast->bKGrPmCtxBufferInitialized = NV_TRUE; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH: + pKernelGraphicsContextUnicast->bKGrPatchCtxBufferInitialized = NV_TRUE; + break; + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK: + { + // If "local" global ctx buffers are allocated, check those first + if (pKernelGraphicsContextUnicast->localCtxBuffer.bAllocated) + { + GR_GLOBALCTX_BUFFER internalId; + GR_GLOBALCTX_BUFFERS *pKCtxBuffers; + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetGlobalContextBufferInternalId(externalId, &internalId), + return;); + + pKCtxBuffers = &pKernelGraphicsContextUnicast->localCtxBuffer; + pKCtxBuffers->bInitialized[internalId] = NV_TRUE; + break; + } + // fall-through + } + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP: + // fall-through + case NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP: + { + GR_GLOBALCTX_BUFFER internalId; + NvU32 gfid = kchannelGetGfid(pKernelChannel); + GR_GLOBALCTX_BUFFERS *pKCtxBuffers; + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetGlobalContextBufferInternalId(externalId, &internalId), + return;); + + // TODO XXX Make context buffers available from KGRCTX alone + pKCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + + pKCtxBuffers->bInitialized[internalId] = NV_TRUE; + break; + } + default: + NV_ASSERT(!"Unrecognized promote ctx enum"); + } +} + +/*! Non-UVM late bind PM ctx buffer */ +NV_STATUS +kgrctxSetupDeferredPmBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelChannel *pKernelChannel +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + OBJGVASPACE *pGVAS = dynamicCast(pKernelChannel->pVAS, OBJGVASPACE); + NV_STATUS status = NV_OK; + Subdevice *pSubdevice; + + pSubdevice = CliGetSubDeviceInfoFromGpu(RES_GET_CLIENT_HANDLE(pKernelChannel), pGpu); + NV_ASSERT_OR_RETURN(pSubdevice != NULL, NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + if (gvaspaceIsExternallyOwned(pGVAS)) + { + // + // The PM Ctxsw buffer is now pre-allocated along with the other context buffers + // This is done as a WAR for the issue tracked under bug 1760699 + // Reason: With the enablement of UVM8 by default, the UVM channel registration relies on the + // buffers being allocated at channel allocation time and are not going to work for a buffer + // created later with an rm ctrl + // + NV_ASSERT_OR_RETURN(pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc != NULL, NV_ERR_INVALID_STATE); + return NV_OK; + } + else if (pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc != NULL) + { + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxAllocPmBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel)); + + // + // !!! + // From this point on, use `goto failed` to exit + // !!! + // + + if (kgraphicsIsPerSubcontextContextHeaderSupported(pGpu, pKernelGraphics)) + { + RS_ORDERED_ITERATOR it; + RsResourceRef *pScopeRef = RES_GET_REF(pKernelChannel); + + // Iterate over all channels in this TSG and map the new buffer + if (!pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm) + pScopeRef = RES_GET_REF(pKernelChannel->pKernelChannelGroupApi); + + it = kchannelGetIter(RES_GET_CLIENT(pKernelChannel), pScopeRef); + while (clientRefOrderedIterNext(it.pClient, &it)) + { + pKernelChannel = dynamicCast(it.pResourceRef->pResource, KernelChannel); + NV_ASSERT_OR_ELSE(pKernelChannel != NULL, + status = NV_ERR_INVALID_STATE; + goto failed;); + + NV_ASSERT_OK_OR_GOTO(status, + kgraphicsMapCtxBuffer(pGpu, pKernelGraphics, + pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc, + pKernelChannel->pVAS, + &pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList, + NV_FALSE, + NV_FALSE), + failed); + } + } + + { + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvBool bInitialize; + NvBool bPromote; + + portMemSet(¶ms, 0, sizeof(params)); + + // Setup parameters to initialize the PA if necessary + NV_ASSERT_OK_OR_GOTO(status, + kgrctxPrepareInitializeCtxBuffer(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + pKernelChannel, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM, + ¶ms.promoteEntry[0], + &bInitialize), + failed); + + // Setup parameters to promote the VA if necessary + NV_ASSERT_OK_OR_GOTO(status, + kgrctxPreparePromoteCtxBuffer(pGpu, + pKernelGraphicsContext, + pKernelChannel, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM, + ¶ms.promoteEntry[0], + &bPromote), + failed); + + NV_ASSERT_OR_ELSE(bInitialize && bPromote, + status = NV_ERR_INVALID_STATE; + goto failed;); + + params.engineType = NV2080_ENGINE_TYPE_GR(kgraphicsGetInstance(pGpu, pKernelGraphics)); + params.hChanClient = RES_GET_CLIENT_HANDLE(pKernelChannel); + params.hObject = RES_GET_HANDLE(pKernelChannel); + params.entryCount = 1; + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_GPU_PROMOTE_CTX, + ¶ms, + sizeof(params)), + failed); + + // + // If we successfully promoted the PA, flip a flag to ensure we don't + // try to promote it again. The VA_LIST should already track this for + // VA, but we can't rely on it for PA due to UVM. + // + if (params.promoteEntry[0].bInitialize) + { + kgrctxMarkCtxBufferInitialized(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + pKernelChannel, + params.promoteEntry[0].bufferId); + } + } + + return NV_OK; + +failed: + + if (kgraphicsIsPerSubcontextContextHeaderSupported(pGpu, pKernelGraphics)) + { + RS_ORDERED_ITERATOR it; + RsResourceRef *pScopeRef = RES_GET_REF(pKernelChannel); + + // Iterate over all channels in this TSG and try to unmap the PM ctx buffer + if (!pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bAllocatedByRm) + pScopeRef = RES_GET_REF(pKernelChannel->pKernelChannelGroupApi); + + it = kchannelGetIter(RES_GET_CLIENT(pKernelChannel), pScopeRef); + while (clientRefOrderedIterNext(it.pClient, &it)) + { + pKernelChannel = dynamicCast(it.pResourceRef->pResource, KernelChannel); + if (pKernelChannel == NULL) + continue; + + kgraphicsUnmapCtxBuffer(pGpu, pKernelGraphics, + pKernelChannel->pVAS, + &pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList); + } + } + + kgrctxFreePmBuffer(pGpu, pKernelGraphicsContext); + + return status; +} + +/** + * @brief unmap the memory for the pm context buffer associated with a channel + */ +void +kgrctxUnmapCtxPmBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + OBJVASPACE *pVAS +) +{ + MEMORY_DESCRIPTOR *pMemDesc; + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast), + return;); + + pMemDesc = pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc; + if (pMemDesc != NULL) + { + // + // This func assumes that the buffer was not allocated per subdevice, + // and will leak any mappings performed by the secondary. + // + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + kgraphicsUnmapCtxBuffer(pGpu, pKernelGraphics, pVAS, &pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList); + } +} + +/** + * @brief Unmap associated ctx buffers (main, patch, global buffers etc). + * + * This function is called on every channel free and therefore can't assume any + * graphics objects actually exist on the given channel. + * TODO: Bug 3164256 + * This function also unmaps and frees zcull and preempt buffers. Ideally we want to decouple + * unmap and free for all buffers and move free of buffers to kgrctxFreeAssociatedCtxBuffers + */ +void +kgrctxUnmapAssociatedCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelChannel *pKernelChannel +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NvBool bRelease3d = NV_FALSE; + NvU32 gfid = kchannelGetGfid(pKernelChannel); + NvU32 status; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, + pKernelGraphicsContext, + &pKernelGraphicsContextUnicast), + return;); + + // + // Unmap 3D context buffers on the last 3D channel free + // + // Condition for unmapping 3d context buffer mappings: + // For non-TSG & legacy TSG mode, always unmap 3d context buffers. + // For subcontext TSG case, unmap on the last graphics channel free. + // + if (pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bLegacyMode) + { + bRelease3d = pKernelGraphicsContextUnicast->channelObjects == 0; + } + else + { + bRelease3d = ((pKernelGraphicsContextUnicast->objectCounts[GR_OBJECT_TYPE_3D] == 0) && + (pKernelGraphicsContextUnicast->objectCounts[GR_OBJECT_TYPE_2D] == 0)); + } + + if (bRelease3d) + { + // Unmap Circular buffer from the current channel's address space + kgrctxUnmapGlobalCtxBuffers(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel->pVAS, gfid); + } + + // + // When sharing contexts across channels we need to defer this until all + // objects have been freed. + // + NV_CHECK_OR_RETURN_VOID(LEVEL_SILENT, + pKernelGraphicsContextUnicast->channelObjects == 0); + + if (pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc != NULL) + { + // + // Delay freeing the patch buffer until the last channel free. + // these buffers are accessed even after the last GR object is freed. + // + kgraphicsUnmapCtxBuffer(pGpu, pKernelGraphics, pKernelChannel->pVAS, &pKernelGraphicsContextUnicast->ctxPatchBuffer.vAddrList); + } + + kgrctxUnmapCtxPmBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel->pVAS); + + // if we have a zcull ctxsw buffer, this will free it up + kgrctxFreeZcullBuffer(pGpu, pKernelGraphicsContext, pKernelChannel->pVAS); + + // Release all preemption buffers if they were allocated + kgrctxFreeCtxPreemptionBuffers(pGpu, pKernelGraphicsContext, pKernelChannel->pVAS); + + kgrctxUnmapMainCtxBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel); +} + +/*! + * @brief is this object responsible for any cleanup tasks i.e. buffer unmapping? + */ +NvBool kgrctxShouldCleanup_KERNEL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + return gpuIsClientRmAllocatedCtxBufferEnabled(pGpu); +}; + +/*! + * This function returns whether PM ctxsw buffer should be pre-allocated or not. + */ +NvBool kgrctxShouldPreAllocPmBuffer_PF +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelChannel *pKernelChannel +) +{ + OBJGVASPACE *pGVAS = NULL; + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + NV_ASSERT_OR_RETURN(NULL != pKernelChannel, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsContext != NULL, NV_ERR_INVALID_STATE); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetUnicast(pGpu, + pKernelGraphicsContext, + &pKernelGraphicsContextUnicast)); + + pGVAS = dynamicCast(pKernelChannel->pVAS, OBJGVASPACE); + + // Do not allocate the buffer, if already allocated + if (pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc != NULL) + { + return NV_FALSE; + } + + // + // The PM Ctxsw buffer is now pre-allocated along with the other context buffers + // This is done as a WAR for the issue tracked under bug 1760699 + // Reason: With the enablement of UVM8 by default, the UVM channel registration relies on the + // buffers being allocated at channel allocation time and are not going to work for a buffer + // created later with an rm ctrl + // + return gvaspaceIsExternallyOwned(pGVAS); +} + +/*! + * This function returns whether PM ctxsw buffer should be pre-allocated + * or not. + */ +NvBool +kgrctxShouldPreAllocPmBuffer_VF +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelChannel *pKernelChannel +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast = NULL; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + NV_ASSERT_OR_RETURN(NULL != pKernelChannel, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsContext != NULL, NV_ERR_INVALID_STATE); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxGetUnicast(pGpu, + pKernelGraphicsContext, + &pKernelGraphicsContextUnicast)); + + // Do not allocate the buffer, if already allocated + if (pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc != NULL) + { + return NV_FALSE; + } + + // + // Pre-allocate the PM Ctxsw buffer along with the other context buffers + // in below scenarios: + // 1. For externally owned VA spaces. This is done as a WAR for the issue + // tracked under bug 1760699. + // Reason: With the enablement of UVM8 by default, the UVM channel registration relies on the + // buffers being allocated at channel allocation time and are not going to work for a buffer + // created later with an rm ctrl. + // 2. For full SRIOV vGPU guests with Profiling capability enabled + // + + return kgrctxShouldPreAllocPmBuffer_PF(pGpu, pKernelGraphicsContext, pKernelChannel); +} + +/*! + * @brief should this layer manage the ctx buffers? + */ +NvBool +kgrctxShouldManageCtxBuffers_KERNEL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + NvU32 gfid +) +{ + return gpuIsClientRmAllocatedCtxBufferEnabled(pGpu) && !IS_GFID_VF(gfid); +} + +/*! + * @brief should this layer manage the ctx buffers? + * If client RM is managing the ctx buffers but the channel is a plugin + * channel, we should still manage them. + */ +NvBool +kgrctxShouldManageCtxBuffers_PHYSICAL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + NvU32 gfid +) +{ + return !gpuIsClientRmAllocatedCtxBufferEnabled(pGpu) || (gpuIsSriovEnabled(pGpu) && IS_GFID_PF(gfid)); +} + +/** + * @brief Unmaps buffers associated with this context. + */ +void kgrctxUnmapBuffers_KERNEL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast, + KernelChannel *pKernelChannel +) +{ + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NV_STATUS status; + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + KernelGraphics *pKernelGraphics; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelGraphicsContext); + NvHandle hParent = RES_GET_PARENT_HANDLE(pKernelGraphicsContext); + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + kgrmgrCtrlSetChannelHandle(hParent, &grRouteInfo); + NV_ASSERT_OK_OR_ELSE(status, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics), + return; ); + + kgrctxUnmapAssociatedCtxBuffers(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel); +} + +/*! + * @brief Unmap main GR ctx buffer + * + * @param[in] pGpu + * @param[in] pKernelGraphicsContext + * @param[in] pKernelGraphics + * @param[in] pKernelChannel + */ +void +kgrctxUnmapMainCtxBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelChannel *pKernelChannel +) +{ + MEMORY_DESCRIPTOR *pCtxBufferMemDesc; + NV_STATUS status; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + status = kchangrpGetEngineContextMemDesc(pGpu, pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup, &pCtxBufferMemDesc); + if ((status == NV_OK) && (pCtxBufferMemDesc != NULL)) + { + // TODO remove Channel, ENG_GR dependencies + kchannelUnmapEngineCtxBuf(pGpu, pKernelChannel, ENG_GR(kgraphicsGetInstance(pGpu, pKernelGraphics))); + kchannelSetEngineContextMemDesc(pGpu, pKernelChannel, ENG_GR(kgraphicsGetInstance(pGpu, pKernelGraphics)), NULL); + } +} + +/*! + * @brief Unmap all global ctx buffers from this context + */ +void +kgrctxUnmapGlobalCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + OBJVASPACE *pVAS, + NvU32 gfid +) +{ + GR_GLOBALCTX_BUFFERS *pGlobalCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast), + return;); + + // if global ctx buffers were never allocated then bail out early + if (!pKernelGraphicsContextUnicast->localCtxBuffer.bAllocated && + !pGlobalCtxBuffers->bAllocated) + { + return; + } + + kgrctxUnmapGlobalCtxBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pVAS, GR_GLOBALCTX_BUFFER_BUNDLE_CB); + kgrctxUnmapGlobalCtxBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pVAS, GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB); + kgrctxUnmapGlobalCtxBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pVAS, GR_GLOBALCTX_BUFFER_PAGEPOOL); + + if (kgraphicsIsRtvCbSupported(pGpu, pKernelGraphics)) + { + kgrctxUnmapGlobalCtxBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pVAS, GR_GLOBALCTX_BUFFER_RTV_CB); + } + +} + +/*! + * @brief Unmap specified global ctx buffer from the given VAS + */ +void +kgrctxUnmapGlobalCtxBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + OBJVASPACE *pVAS, + GR_GLOBALCTX_BUFFER buffId +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status; + NvU64 vaddr; + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast), + return;); + + status = vaListFindVa(&pKernelGraphicsContextUnicast->globalCtxBufferVaList[buffId], pVAS, &vaddr); + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Unmapping %s from VA @ 0x%llx\n", + NV_ENUM_TO_STRING(GR_GLOBALCTX_BUFFER, buffId), vaddr); + + kgraphicsUnmapCtxBuffer(pGpu, pKernelGraphics, pVAS, &pKernelGraphicsContextUnicast->globalCtxBufferVaList[buffId]); + } + else + { + NV_PRINTF(LEVEL_INFO, "Buffer for %s already unmapped\n", + NV_ENUM_TO_STRING(GR_GLOBALCTX_BUFFER, buffId)); + } +} + +/*! + * @brief Free main GR ctx buffer + */ +void +kgrctxFreeMainCtxBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status; + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast), + return;); + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + if (pKernelGraphicsContextUnicast->pMainCtxBuffer != NULL) + { + memdescFree(pKernelGraphicsContextUnicast->pMainCtxBuffer); + memdescDestroy(pKernelGraphicsContextUnicast->pMainCtxBuffer); + pKernelGraphicsContextUnicast->pMainCtxBuffer = NULL; + pKernelGraphicsContextUnicast->bKGrMainCtxBufferInitialized = NV_FALSE; + } +} + +/** + * @brief unmap and free the memory for the zcull context buffer + */ +void +kgrctxFreeZcullBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + OBJVASPACE *pVAS +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + MEMORY_DESCRIPTOR *pMemDesc; + VA_LIST *pVaList; + NvU64 vaddr; + NV_STATUS status; + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast), + return;); + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + pMemDesc = pKernelGraphicsContextUnicast->zcullCtxswBuffer.memDesc; + pVaList = &pKernelGraphicsContextUnicast->zcullCtxswBuffer.vAddrList; + if (pMemDesc != NULL) + { + // + // This func assumes that the buffer was not allocated per subdevice, + // and will leak any mappings performed by the secondaries. + // + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + // TODO separate unmapping from free + NV_ASSERT_OK(vaListFindVa(pVaList, pVAS, &vaddr)); + dmaUnmapBuffer_HAL(pGpu, GPU_GET_DMA(pGpu), pVAS, vaddr); + vaListRemoveVa(pVaList, pVAS); + + // buffer can be shared and refcounted -- released on final free + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + } + else if (vaListFindVa(pVaList, pVAS, &vaddr) == NV_OK) + { + // Zcull buffer mapped by client. Remove the VA here + vaListRemoveVa(pVaList, pVAS); + } + else + { + NV_PRINTF(LEVEL_INFO, + "call to free zcull ctx buffer not RM managed, skipped!\n"); + } + + pKernelGraphicsContextUnicast->zcullCtxswBuffer.memDesc = NULL; +} + +/** + * @brief unmap and free the memory for the preemption context buffers + */ +void +kgrctxFreeCtxPreemptionBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + OBJVASPACE *pVAS +) +{ + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast), + return;); + + if (pKernelGraphicsContextUnicast->preemptCtxswBuffer.pMemDesc != NULL) + { + dmaUnmapBuffer_HAL(pGpu, pDma, pVAS, + pKernelGraphicsContextUnicast->preemptCtxswBuffer.virtualAddr); + + memdescFree(pKernelGraphicsContextUnicast->preemptCtxswBuffer.pMemDesc); + memdescDestroy(pKernelGraphicsContextUnicast->preemptCtxswBuffer.pMemDesc); + } + + if (pKernelGraphicsContextUnicast->spillCtxswBuffer.pMemDesc != NULL) + { + dmaUnmapBuffer_HAL(pGpu, pDma, pVAS, + pKernelGraphicsContextUnicast->spillCtxswBuffer.virtualAddr); + + memdescFree(pKernelGraphicsContextUnicast->spillCtxswBuffer.pMemDesc); + memdescDestroy(pKernelGraphicsContextUnicast->spillCtxswBuffer.pMemDesc); + } + + if (pKernelGraphicsContextUnicast->betaCBCtxswBuffer.pMemDesc != NULL) + { + dmaUnmapBuffer_HAL(pGpu, pDma, pVAS, + pKernelGraphicsContextUnicast->betaCBCtxswBuffer.virtualAddr); + + memdescFree(pKernelGraphicsContextUnicast->betaCBCtxswBuffer.pMemDesc); + memdescDestroy(pKernelGraphicsContextUnicast->betaCBCtxswBuffer.pMemDesc); + } + + if (pKernelGraphicsContextUnicast->pagepoolCtxswBuffer.pMemDesc != NULL) + { + dmaUnmapBuffer_HAL(pGpu, pDma, pVAS, + pKernelGraphicsContextUnicast->pagepoolCtxswBuffer.virtualAddr); + + memdescFree(pKernelGraphicsContextUnicast->pagepoolCtxswBuffer.pMemDesc); + memdescDestroy(pKernelGraphicsContextUnicast->pagepoolCtxswBuffer.pMemDesc); + } + + if (pKernelGraphicsContextUnicast->rtvCbCtxswBuffer.pMemDesc != NULL) + { + dmaUnmapBuffer_HAL(pGpu, pDma, pVAS, + pKernelGraphicsContextUnicast->rtvCbCtxswBuffer.virtualAddr); + + memdescFree(pKernelGraphicsContextUnicast->rtvCbCtxswBuffer.pMemDesc); + memdescDestroy(pKernelGraphicsContextUnicast->rtvCbCtxswBuffer.pMemDesc); + } + + pKernelGraphicsContextUnicast->preemptCtxswBuffer.pMemDesc = NULL; + pKernelGraphicsContextUnicast->preemptCtxswBuffer.virtualAddr = 0; + + pKernelGraphicsContextUnicast->spillCtxswBuffer.pMemDesc = NULL; + pKernelGraphicsContextUnicast->spillCtxswBuffer.virtualAddr = 0; + + pKernelGraphicsContextUnicast->betaCBCtxswBuffer.pMemDesc = NULL; + pKernelGraphicsContextUnicast->betaCBCtxswBuffer.virtualAddr = 0; + + pKernelGraphicsContextUnicast->pagepoolCtxswBuffer.pMemDesc = NULL; + pKernelGraphicsContextUnicast->pagepoolCtxswBuffer.virtualAddr = 0; + + pKernelGraphicsContextUnicast->rtvCbCtxswBuffer.pMemDesc = NULL; + pKernelGraphicsContextUnicast->rtvCbCtxswBuffer.virtualAddr = 0; +} + +/*! + * @brief Free patch GR ctx buffer + */ +void +kgrctxFreePatchBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status; + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, + pKernelGraphicsContext, + &pKernelGraphicsContextUnicast), + return;); + + if (pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc == NULL) + { + NV_PRINTF(LEVEL_INFO, + "Attempt to free null ctx patch buffer pointer, skipped!\n"); + return; + } + + memdescFree(pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc); + memdescDestroy(pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc); + + pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc = NULL; + pKernelGraphicsContextUnicast->bKGrPatchCtxBufferInitialized = NV_FALSE; +} + +/** + * @brief Free the memory for the pm context buffer associated with a channel + */ +void +kgrctxFreePmBuffer_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + MEMORY_DESCRIPTOR *pMemDesc; + NV_STATUS status; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, + pKernelGraphicsContext, + &pKernelGraphicsContextUnicast), + return;); + + pMemDesc = pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc; + if (pMemDesc != NULL) + { + // + // This func assumes that the buffer was not allocated per subdevice, + // and will leak any mappings performed by the secondaries. + // + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + } + else + { + NV_PRINTF(LEVEL_INFO, + "Attempt to free null pm ctx buffer pointer??\n"); + } + + pKernelGraphicsContextUnicast->pmCtxswBuffer.memDesc = NULL; + pKernelGraphicsContextUnicast->bKGrPmCtxBufferInitialized = NV_FALSE; +} + +/*! + * @brief Free "local" global context buffers + */ +void kgrctxFreeLocalGlobalCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + MEMORY_DESCRIPTOR *pMemDesc; + GR_GLOBALCTX_BUFFERS *pCtxBuffers; + GR_GLOBALCTX_BUFFER buff; + NV_STATUS status; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast), + return;); + + pCtxBuffers = &pKernelGraphicsContextUnicast->localCtxBuffer; + + // no ctx buffers allocated, so get out early + if (!pCtxBuffers->bAllocated) + return; + + FOR_EACH_IN_ENUM(GR_GLOBALCTX_BUFFER, buff) + { + pMemDesc = pCtxBuffers->memDesc[buff]; + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + pCtxBuffers->memDesc[buff] = NULL; + } + FOR_EACH_IN_ENUM_END; + + pCtxBuffers->bAllocated = NV_FALSE; + + // make sure all L2 cache lines using CB buffers are clear after we free them + NV_ASSERT_OK(kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, NULL, FB_CACHE_VIDEO_MEMORY, FB_CACHE_EVICT)); +} + +/** + * @brief Free all associated ctx buffers (main, patch, PM and private global buffers for cases like VPR). + * + * This function is called on every channel free and thefore can't assume any + * graphics objects actually exist on the given channel. + */ +void kgrctxFreeAssociatedCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NV_STATUS status; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, + pKernelGraphicsContext, + &pKernelGraphicsContextUnicast), + return;); + + // + // When sharing contexts across channels we need to defer this until all + // objects have been freed. + // + NV_CHECK_OR_RETURN_VOID(LEVEL_SILENT, pKernelGraphicsContextUnicast->channelObjects == 0); + + // if we have a context patch buffer, this will free it up + kgrctxFreePatchBuffer(pGpu, pKernelGraphicsContext); + + // if we have a PM ctxsw buffer, this will free it up + kgrctxFreePmBuffer(pGpu, pKernelGraphicsContext); + + // Release all common buffers used as part of the gr context. + kgrctxFreeLocalGlobalCtxBuffers(pGpu, pKernelGraphicsContext); + + kgrctxFreeMainCtxBuffer(pGpu, pKernelGraphicsContext); +} + +/*! + * This function unmaps various GR buffers. + */ +NV_STATUS +kgrctxUnmapCtxBuffers_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphicsObject *pKernelGraphicsObject, + KernelGraphics *pKernelGraphics, + NvBool bDestructor +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + NvU32 classNum = pChannelDescendant->resourceDesc.externalClassId; + NvBool bRelease3d = NV_FALSE; + NvU32 objType; + NvU32 gfid; + + NV_PRINTF(LEVEL_INFO, "gpu:%d isBC=%d\n", pGpu->gpuInstance, + gpumgrGetBcEnabledStatus(pGpu)); + + gfid = kchannelGetGfid(pKernelChannel); + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, + pKernelGraphicsObject->pKernelGraphicsContext, + &pKernelGraphicsContextUnicast)); + + kgrmgrGetGrObjectType(classNum, &objType); + + if (!bDestructor) + { + // + // If we are cleaning up from the constructor, then we know the main ctx + // buffer is not being used yet, and no other context buffers need to be + // cleaned up, so we can return early here. If we are coming from the + // destructor, the context buffers are not freed until channel + // destruction. + // TODO move this up one stack frame + // + kgrctxUnmapMainCtxBuffer(pGpu, pKernelGraphicsContext, pKernelGraphics, pChannelDescendant->pKernelChannel); + kgrctxFreeMainCtxBuffer(pGpu, pKernelGraphicsContext); + return NV_OK; + } + + if ((pKernelGraphicsContextUnicast->channelObjects != 0) && + (pKernelGraphicsContextUnicast->ctxPatchBuffer.memDesc != NULL)) + { + // + // Delay freeing the patch buffer until the last channel free. + // these buffers are accessed even after the last GR object is freed. + // + kgraphicsUnmapCtxBuffer(pGpu, pKernelGraphics, pKernelChannel->pVAS, &pKernelGraphicsContextUnicast->ctxPatchBuffer.vAddrList); + } + + if (kgraphicsDoesUcodeSupportPrivAccessMap(pGpu, pKernelGraphics)) + { + kgrctxUnmapGlobalCtxBuffer(pGpu, + pKernelGraphicsContext, + pKernelGraphics, + pKernelChannel->pVAS, + kgrctxGetRegisterAccessMapId_HAL(pGpu, pKernelGraphicsContext, pChannelDescendant->pKernelChannel)); + } + + // + // Condition for releasing 3d context buffer mappings: + // For non-TSG & legacy TSG mode, always unmap as long as its not the last + // object in TSG + // If it is the last object, then unmap during channel free as these buffers + // are accessed even after the last 3D object is freed. + // For subcontext TSG case, only unmap on 2D/3D object free as long its not + // the last 2D/3D object + // For compute object with subcontext we never map so we don't need to call + // unmap. + // If we free on last object then buffer gets actually unmapped (as opposed + // to decrement in refcount) and we start hitting page faults + // + if (pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bLegacyMode) + { + bRelease3d = (pKernelGraphicsContextUnicast->channelObjects > 0); + } + else + { + NvBool bIs3dBuffer = ((objType == GR_OBJECT_TYPE_2D) || (objType == GR_OBJECT_TYPE_3D)); + bRelease3d = (bIs3dBuffer && ((pKernelGraphicsContextUnicast->objectCounts[GR_OBJECT_TYPE_2D] > 0) || + (pKernelGraphicsContextUnicast->objectCounts[GR_OBJECT_TYPE_3D] > 0))); + } + + if (bRelease3d) + { + kgrctxUnmapGlobalCtxBuffers(pGpu, pKernelGraphicsContext, pKernelGraphics, pKernelChannel->pVAS, gfid); + } + + return NV_OK; +} + +/** + * @brief Release subcontext GR resources. + * + * To free GR resources (per-subcontext mappings) where refcouning doesn't work. + * For e.g, PM buffer can be enabled at any time. + * We also free CTXSW logging buffer here since since it is mapped unconditionally for + * all objects but can't be unmapped unconditionally as FECS may try to access this buffer + * even after all objects are freed and may hit page faults. + * We free them when subcontext is freed by calling this function. + */ +NV_STATUS +kgrctxReleaseSubctxResources_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + OBJVASPACE *pVAS, + NvU32 veid +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NvU64 vaddr; + NvU32 gfid; + NvBool bCallingContextPlugin; + GR_GLOBALCTX_BUFFERS *pGlobalCtxBuffers; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + NV_ASSERT_OK_OR_RETURN(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin)); + + pGlobalCtxBuffers = kgraphicsGetGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + + if (bCallingContextPlugin) + { + gfid = GPU_GFID_PF; + } + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + NV_ASSERT_OK_OR_RETURN( + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast)); + + while (vaListFindVa(&pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList, pVAS, &vaddr) == NV_OK) + { + kgraphicsUnmapCtxBuffer(pGpu, pKernelGraphics, pVAS, &pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList); + } + + if (kgraphicsIsCtxswLoggingSupported(pGpu, pKernelGraphics)) + { + MEMORY_DESCRIPTOR *pMemDesc = pGlobalCtxBuffers->memDesc[GR_GLOBALCTX_BUFFER_FECS_EVENT]; + if (pMemDesc == NULL) + goto done; + + while (vaListFindVa(&pKernelGraphicsContextUnicast->globalCtxBufferVaList[GR_GLOBALCTX_BUFFER_FECS_EVENT], pVAS, &vaddr) == NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "Unmapping CTXSW Buffer PA @ 0x%llx VA @ 0x%llx of Size 0x%llx for VEID = %d\n", + memdescGetPhysAddr(memdescGetMemDescFromGpu(pMemDesc, pGpu), AT_GPU, 0), + vaddr, pMemDesc->Size, veid); + + kgraphicsUnmapCtxBuffer(pGpu, pKernelGraphics, pVAS, &pKernelGraphicsContextUnicast->globalCtxBufferVaList[GR_GLOBALCTX_BUFFER_FECS_EVENT]); + } + } + +done: + return NV_OK; +} + +/*! + * Function to increment the GR channel object count + */ +void +kgrctxIncObjectCount_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + NvU32 classNum +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NvU32 objType; + NV_STATUS status; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + kgrmgrGetGrObjectType(classNum, &objType); + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast), + return;); + + switch (objType) + { + case GR_OBJECT_TYPE_COMPUTE: + pKernelGraphicsContextUnicast->objectCounts[GR_OBJECT_TYPE_COMPUTE]++; + gpuChangeComputeModeRefCount(pGpu, NV_GPU_COMPUTE_REFCOUNT_COMMAND_INCREMENT); + break; + + case GR_OBJECT_TYPE_3D: + pKernelGraphicsContextUnicast->objectCounts[GR_OBJECT_TYPE_3D]++; + break; + + case GR_OBJECT_TYPE_2D: + pKernelGraphicsContextUnicast->objectCounts[GR_OBJECT_TYPE_2D]++; + break; + + case GR_OBJECT_TYPE_MEM: + pKernelGraphicsContextUnicast->objectCounts[GR_OBJECT_TYPE_MEM]++; + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Unrecognized graphics class 0x%x\n", + classNum); + DBG_BREAKPOINT(); + return; + } + + pKernelGraphicsContextUnicast->channelObjects++; + NV_PRINTF(LEVEL_INFO, + "Class 0x%x allocated. %d objects allocated\n", + classNum, + pKernelGraphicsContextUnicast->channelObjects); +} + +/*! + * Function to decrement the GR channel object count of the given class + */ +void +kgrctxDecObjectCount_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + NvU32 classNum +) +{ + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast; + NvU32 objType; + NvU32 countIdx; + NV_STATUS status; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + kgrmgrGetGrObjectType(classNum, &objType); + + NV_ASSERT_OK_OR_ELSE(status, + kgrctxGetUnicast(pGpu, pKernelGraphicsContext, &pKernelGraphicsContextUnicast), + return;); + + NV_ASSERT_OR_ELSE(pKernelGraphicsContextUnicast->channelObjects != 0, + NV_PRINTF(LEVEL_ERROR, "No active GR objects to free for Class 0x%x\n", classNum)); + + // + // Handle 2D and Compute class reference counting + // + switch (objType) + { + case GR_OBJECT_TYPE_COMPUTE: + gpuChangeComputeModeRefCount(pGpu, NV_GPU_COMPUTE_REFCOUNT_COMMAND_DECREMENT); + countIdx = GR_OBJECT_TYPE_COMPUTE; + break; + + case GR_OBJECT_TYPE_3D: + countIdx = GR_OBJECT_TYPE_3D; + break; + + case GR_OBJECT_TYPE_2D: + countIdx = GR_OBJECT_TYPE_2D; + break; + + case GR_OBJECT_TYPE_MEM: + countIdx = GR_OBJECT_TYPE_MEM; + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Unrecognized graphics class 0x%x\n", + classNum); + DBG_BREAKPOINT(); + return; + } + + NV_ASSERT_OR_RETURN_VOID(pKernelGraphicsContextUnicast->objectCounts[countIdx] > 0); + pKernelGraphicsContextUnicast->objectCounts[countIdx]--; + pKernelGraphicsContextUnicast->channelObjects--; +} + +/*! + * @brief Return the register access buffer used for a given Channel's permission + * + * The base register access map comes from the architecture team. The user version + * of the buffer removes access to PM regisers. + * + * The policy currently depends on administrator access on the system, except on + * one VGPU configuration. + */ +GR_GLOBALCTX_BUFFER +kgrctxGetRegisterAccessMapId_PF +( + OBJGPU *pGpu, + KernelGraphicsContext *pKernelGraphicsContext, + KernelChannel *pKernelChannel +) +{ + RmClient *pRmClient = dynamicCast(RES_GET_CLIENT(pKernelChannel), RmClient); + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pRmClient); + + // Using cached privilege because this function is called at a raised IRQL. + if ((privLevel >= RS_PRIV_LEVEL_USER_ROOT) + ) + { + return GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP; + } + + return GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP; +} + +NV_STATUS +kgrctxCtrlGetTpcPartitionMode_IMPL +( + KernelGraphicsContext *pKernelGraphicsContext, + NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelGraphicsContext); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV_STATUS status = NV_OK; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + return status; + } + + return gpuresInternalControlForward_IMPL(staticCast(pKernelGraphicsContext, GpuResource), + NV0090_CTRL_CMD_INTERNAL_GET_TPC_PARTITION_MODE, + pParams, + sizeof(*pParams)); +} + +NV_STATUS +kgrctxCtrlSetTpcPartitionMode_IMPL +( + KernelGraphicsContext *pKernelGraphicsContext, + NV0090_CTRL_TPC_PARTITION_MODE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelGraphicsContext); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV_STATUS status = NV_OK; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + return status; + } + + return gpuresInternalControlForward_IMPL(staticCast(pKernelGraphicsContext, GpuResource), + NV0090_CTRL_CMD_INTERNAL_SET_TPC_PARTITION_MODE, + pParams, + sizeof(*pParams)); +} + +NV_STATUS +kgrctxCtrlGetMMUDebugMode_IMPL +( + KernelGraphicsContext *pKernelGraphicsContext, + NV0090_CTRL_GET_MMU_DEBUG_MODE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelGraphicsContext); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV_STATUS status = NV_OK; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + return status; + } + + return gpuresInternalControlForward_IMPL(staticCast(pKernelGraphicsContext, GpuResource), + NV0090_CTRL_CMD_INTERNAL_GET_MMU_DEBUG_MODE, + pParams, + sizeof(*pParams)); +} + +NV_STATUS +kgrctxCtrlProgramVidmemPromote_IMPL +( + KernelGraphicsContext *pKernelGraphicsContext, + NV0090_CTRL_PROGRAM_VIDMEM_PROMOTE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelGraphicsContext); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV_STATUS status = NV_OK; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + return status; + } + + return gpuresInternalControlForward_IMPL(staticCast(pKernelGraphicsContext, GpuResource), + NV0090_CTRL_CMD_INTERNAL_PROGRAM_VIDMEM_PROMOTE, + pParams, + sizeof(*pParams)); +} + +/*! + * @brief Construct shared kernel graphics context. (Does nothing) + */ +NV_STATUS +shrkgrctxConstruct_IMPL(KernelGraphicsContextShared *pKernelGraphicsContextShared) +{ + return NV_OK; +} + +/*! + * @brief Construct shared kernel graphics context + * + * @param[in] pGpu + * @param[in] pKernelGraphicsContextShared + * @param[in] pKernelGraphicsContext + */ +NV_STATUS +shrkgrctxInit_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContextShared *pKernelGraphicsContextShared, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + NV_STATUS status = NV_OK; + NvU32 subdevCount = gpumgrGetSubDeviceMaxValuePlus1(pGpu); + + pKernelGraphicsContextShared->pKernelGraphicsContextUnicast = + portMemAllocNonPaged(subdevCount * sizeof(*pKernelGraphicsContextShared->pKernelGraphicsContextUnicast)); + if (pKernelGraphicsContextShared->pKernelGraphicsContextUnicast == NULL) + return NV_ERR_NO_MEMORY; + portMemSet(pKernelGraphicsContextShared->pKernelGraphicsContextUnicast, 0, + subdevCount * sizeof(*pKernelGraphicsContextShared->pKernelGraphicsContextUnicast)); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo; + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelGraphicsContext); + NvHandle hParent = RES_GET_PARENT_HANDLE(pKernelGraphicsContext); + + portMemSet(&grRouteInfo, 0, sizeof(grRouteInfo)); + kgrmgrCtrlSetChannelHandle(hParent, &grRouteInfo); + + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics)); + + if (status != NV_OK) + SLI_LOOP_BREAK; + + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, LEVEL_ERROR, + shrkgrctxConstructUnicast(pGpu, pKernelGraphicsContextShared, + pKernelGraphicsContext, + pKernelGraphics, + &pKernelGraphicsContextShared->pKernelGraphicsContextUnicast[subdevInst])); + + if (status != NV_OK) + SLI_LOOP_BREAK; + } + SLI_LOOP_END; + + if (status != NV_OK) + shrkgrctxTeardown_IMPL(pGpu, pKernelGraphicsContextShared, pKernelGraphicsContext); + + return status; +} + +/*! + * @brief Construct unicast state for this context + * + * @param[in] pGpu + * @param[in] pKernelGraphicsContextShared + * @param[in] pKernelGraphicsContext + * @param[in] pKernelGraphics + * @param[in] pKernelGraphicsContextUnicast + */ +NV_STATUS +shrkgrctxConstructUnicast_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContextShared *pKernelGraphicsContextShared, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphics *pKernelGraphics, + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast +) +{ + NvU32 i; + + for (i = 0; i < GR_GLOBALCTX_BUFFER_COUNT; i++) + NV_ASSERT_OK_OR_RETURN(vaListInit(&pKernelGraphicsContextUnicast->globalCtxBufferVaList[i])); + + NV_ASSERT_OK_OR_RETURN(vaListInit(&pKernelGraphicsContextUnicast->ctxPatchBuffer.vAddrList)); + NV_ASSERT_OK_OR_RETURN(vaListInit(&pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList)); + NV_ASSERT_OK_OR_RETURN(vaListInit(&pKernelGraphicsContextUnicast->zcullCtxswBuffer.vAddrList)); + + pKernelGraphicsContextUnicast->bSupportsPerSubctxHeader = + kgraphicsIsPerSubcontextContextHeaderSupported(pGpu, pKernelGraphics); + + return NV_OK; +} + +/*! + * @brief Destruct shared kernel graphics context. (Does nothing) + */ +void +shrkgrctxDestruct_IMPL(KernelGraphicsContextShared *pKernelGraphicsContextShared) +{ + return; +} + +/*! + * @brief Destroy the shared context state + */ +void +shrkgrctxTeardown_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContextShared *pKernelGraphicsContextShared, + KernelGraphicsContext *pKernelGraphicsContext +) +{ + if (pKernelGraphicsContextShared->pKernelGraphicsContextUnicast != NULL) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + shrkgrctxDestructUnicast(pGpu, pKernelGraphicsContextShared, + pKernelGraphicsContext, + &pKernelGraphicsContextShared->pKernelGraphicsContextUnicast[subdevInst]); + } + SLI_LOOP_END; + } + + portMemFree(pKernelGraphicsContextShared->pKernelGraphicsContextUnicast); + pKernelGraphicsContextShared->pKernelGraphicsContextUnicast = NULL; +} + +/*! + * Destroy the unicast context state + */ +void +shrkgrctxDestructUnicast_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContextShared *pKernelGraphicsContextShared, + KernelGraphicsContext *pKernelGraphicsContext, + KernelGraphicsContextUnicast *pKernelGraphicsContextUnicast +) +{ + GR_GLOBALCTX_BUFFER buff; + + if (kgrctxShouldCleanup_HAL(pGpu, pKernelGraphicsContext)) + kgrctxFreeAssociatedCtxBuffers(pGpu, pKernelGraphicsContext); + + FOR_EACH_IN_ENUM(GR_GLOBALCTX_BUFFER, buff) + { + vaListDestroy(&pKernelGraphicsContextUnicast->globalCtxBufferVaList[buff]); + } + FOR_EACH_IN_ENUM_END; + vaListDestroy(&pKernelGraphicsContextUnicast->ctxPatchBuffer.vAddrList); + vaListDestroy(&pKernelGraphicsContextUnicast->pmCtxswBuffer.vAddrList); + vaListDestroy(&pKernelGraphicsContextUnicast->zcullCtxswBuffer.vAddrList); +} + +/*! + * @brief Perform cleanup tasks run on channel removal from context + */ +void shrkgrctxDetach_IMPL +( + OBJGPU *pGpu, + KernelGraphicsContextShared *pKernelGraphicsContextShared, + KernelGraphicsContext *pKernelGraphicsContext, + KernelChannel *pKernelChannel +) +{ + + if (NV2080_ENGINE_TYPE_IS_GR(kchannelGetEngineType(pKernelChannel))) + { + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelChannel); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelChannel); + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo = {0}; + KernelGraphics *pKernelGraphics; + NV_STATUS status; + + kgrmgrCtrlSetChannelHandle(RES_GET_HANDLE(pKernelChannel), &grRouteInfo); + NV_ASSERT_OK_OR_ELSE(status, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics), + return;); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + // Last chance to process FECS ctxsw log before channel goes away + nvEventBufferFecsCallback(pGpu, pKernelGraphics); + } + SLI_LOOP_END + } + + // + // If PDB_PROP_GPU_CLIENT_RM_ALLOCATED_CTX_BUFFER is set, then kernel RM + // is responsible for unmapping the context buffers, otherwise it is left to + // physical RM. + // + if (!kgrctxShouldCleanup(pGpu, pKernelGraphicsContext)) + return; + + + if (pKernelGraphicsContextShared->pKernelGraphicsContextUnicast != NULL) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + kgrctxUnmapBuffers_HAL(pGpu, pKernelGraphicsContext, + &pKernelGraphicsContextShared->pKernelGraphicsContextUnicast[subdevInst], + pKernelChannel); + } + SLI_LOOP_END; + } +} + diff --git a/src/nvidia/src/kernel/gpu/gr/kernel_graphics_manager.c b/src/nvidia/src/kernel/gpu/gr/kernel_graphics_manager.c new file mode 100644 index 000000000..751375752 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gr/kernel_graphics_manager.c @@ -0,0 +1,963 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/gr/kernel_graphics_manager.h" +#include "kernel/gpu/gr/kernel_graphics.h" + +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" + +#include "kernel/rmapi/client.h" +#include "kernel/rmapi/client_resource.h" + +// COMPUTE +#include "class/clb0c0.h" +#include "class/clb1c0.h" +#include "class/clc0c0.h" +#include "class/clc1c0.h" +#include "class/clc3c0.h" +#include "class/clc5c0.h" +#include "class/clc6c0.h" +#include "class/clc7c0.h" +// GFX +#include "class/clb097.h" +#include "class/clb197.h" +#include "class/clc097.h" +#include "class/clc197.h" +#include "class/clc397.h" +#include "class/clc597.h" +#include "class/clc697.h" +#include "class/clc797.h" +// TWOD +#include "class/cl902d.h" + +// MEM2MEM +#include "class/cla140.h" + +static NvBool +_kgrmgrGPUInstanceHasComputeInstances +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance != NULL, NV_FALSE); + NvU32 computeInstanceIdx; + + for (computeInstanceIdx = 0; + computeInstanceIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); + ++computeInstanceIdx) + { + if (pKernelMIGGpuInstance->MIGComputeInstance[computeInstanceIdx].bValid) + return NV_TRUE; + } + + return NV_FALSE; +} + +/*! + * @brief Get GR object type from the class number + * + * @param[IN] classNum external class number + * @param[OUT] pObjectType GR class subtype + */ +void +kgrmgrGetGrObjectType_IMPL +( + NvU32 classNum, + NvU32 *pObjectType +) +{ + switch (classNum) + { + case MAXWELL_COMPUTE_A: + case MAXWELL_COMPUTE_B: + case PASCAL_COMPUTE_A: + case PASCAL_COMPUTE_B: + case VOLTA_COMPUTE_A: + case TURING_COMPUTE_A: + case AMPERE_COMPUTE_A: + case AMPERE_COMPUTE_B: + *pObjectType = GR_OBJECT_TYPE_COMPUTE; + break; + case MAXWELL_A: + case MAXWELL_B: + case PASCAL_A: + case PASCAL_B: + case VOLTA_A: + case TURING_A: + case AMPERE_A: + case AMPERE_B: + *pObjectType = GR_OBJECT_TYPE_3D; + break; + case FERMI_TWOD_A: + *pObjectType = GR_OBJECT_TYPE_2D; + break; + case KEPLER_INLINE_TO_MEMORY_B: + *pObjectType = GR_OBJECT_TYPE_MEM; + break; + default: + *pObjectType = GR_OBJECT_TYPE_INVALID; + break; + } +} + +/*! + * @brief Is local ctx buffer supported + * + * @param[IN] bufId buffer Id + * @param[IN] bClassSupported2D Is 2D class supported + */ +NvBool +kgrmgrIsCtxBufSupported_IMPL +( + GR_CTX_BUFFER bufId, + NvBool bClassSupported2D +) +{ + NvBool bSupported = NV_FALSE; + + NV_ASSERT_OR_RETURN(NV_ENUM_IS(GR_CTX_BUFFER, bufId), NV_FALSE); + + // All buffers are supported when 2D class is supported + if (bClassSupported2D) + { + return NV_TRUE; + } + + switch (bufId) + { + case GR_CTX_BUFFER_ZCULL: + // fall-through + case GR_CTX_BUFFER_PREEMPT: + // fall-through + case GR_CTX_BUFFER_SPILL: + // fall-through + case GR_CTX_BUFFER_BETA_CB: + // fall-through + case GR_CTX_BUFFER_PAGEPOOL: + // fall-through + case GR_CTX_BUFFER_RTV_CB: + bSupported = NV_FALSE; + break; + + case GR_CTX_BUFFER_PM: + // fall-through + case GR_CTX_BUFFER_MAIN: + // fall-through + case GR_CTX_BUFFER_PATCH: + bSupported = NV_TRUE; + break; + + // No default case - compiler enforces update if enum changes + } + return bSupported; +} + +/*! + * @brief Is globalctx buffer supported + * + * @param[IN] bufId buffer Id + * @param[IN] bClassSupported2D Is 2D class supported + */ +NvBool +kgrmgrIsGlobalCtxBufSupported_IMPL +( + GR_GLOBALCTX_BUFFER bufId, + NvBool bClassSupported2D +) +{ + NvBool bSupported = NV_FALSE; + + NV_ASSERT_OR_RETURN(NV_ENUM_IS(GR_GLOBALCTX_BUFFER, bufId), NV_FALSE); + + // All buffers are supported when 2D class is supported + if (bClassSupported2D) + { + return NV_TRUE; + } + + switch (bufId) + { + case GR_GLOBALCTX_BUFFER_BUNDLE_CB: + // fall-through + case GR_GLOBALCTX_BUFFER_PAGEPOOL: + // fall-through + case GR_GLOBALCTX_BUFFER_ATTRIBUTE_CB: + // fall-through + case GR_GLOBALCTX_BUFFER_RTV_CB: + // fall-through + case GR_GLOBALCTX_BUFFER_GFXP_POOL: + // fall-through + case GR_GLOBALCTX_BUFFER_GFXP_CTRL_BLK: + bSupported = NV_FALSE; + break; + + case GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP: + // fall-through + case GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP: + // fall-through + case GR_GLOBALCTX_BUFFER_FECS_EVENT: + // fall-through + case GR_GLOBAL_BUFFER_GLOBAL_PRIV_ACCESS_MAP: + bSupported = NV_TRUE; + break; + + // No default case - compiler enforces update if enum changes + } + + return bSupported; +} + +/*! + * @brief Sets swizzID and engineID on routing info if they don't already exist. + * + * @param[in] engID GR engine ID + * @param[in, out] pRouteInfo Client provided routing info + */ +void +kgrmgrCtrlSetEngineID_IMPL +( + NvU32 engID, + NV2080_CTRL_GR_ROUTE_INFO *pRouteInfo +) +{ + if (NULL == pRouteInfo) + { + return; + } + else if (NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_NONE == + DRF_VAL(2080_CTRL_GR, _ROUTE_INFO_FLAGS, _TYPE, pRouteInfo->flags)) + { + pRouteInfo->flags = DRF_DEF(2080_CTRL_GR, _ROUTE_INFO_FLAGS, _TYPE, _ENGID); + pRouteInfo->route = DRF_NUM64(2080_CTRL_GR, _ROUTE_INFO_DATA, _ENGID, engID); + } +} + +/*! + * @brief Sets channel handle on routing info if it doesn't already exist. + * + * @param[in] hChannel Channel handle + * @param[in, out] pRouteInfo Client provided routing info + */ +void +kgrmgrCtrlSetChannelHandle_IMPL +( + NvHandle hChannel, + NV2080_CTRL_GR_ROUTE_INFO *pRouteInfo +) +{ + if (NULL == pRouteInfo) + { + return; + } + else if (NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_NONE == + DRF_VAL(2080_CTRL_GR, _ROUTE_INFO_FLAGS, _TYPE, pRouteInfo->flags)) + { + pRouteInfo->flags = DRF_DEF(2080_CTRL_GR, _ROUTE_INFO_FLAGS, _TYPE, _CHANNEL); + pRouteInfo->route = DRF_NUM64(2080_CTRL_GR, _ROUTE_INFO_DATA, _CHANNEL_HANDLE, hChannel); + } +} + +NV_STATUS +kgrmgrConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + ENGDESCRIPTOR engDesc +) +{ + return NV_OK; +} + +void +kgrmgrDestruct_IMPL +( + KernelGraphicsManager *pKernelGraphicsManager +) +{ + portMemSet(&pKernelGraphicsManager->legacyKgraphicsStaticInfo.floorsweepingMasks, 0, + sizeof(pKernelGraphicsManager->legacyKgraphicsStaticInfo.floorsweepingMasks)); + portMemFree(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pPpcMasks); + pKernelGraphicsManager->legacyKgraphicsStaticInfo.pPpcMasks = NULL; + portMemFree(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo); + pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo = NULL; + pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized = NV_FALSE; +} + +/*! + * @brief Set legacy Kgraphics Static Info (i.e. state of GR0) + */ +void +kgrmgrSetLegacyKgraphicsStaticInfo_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + KernelGraphics *pKernelGraphics +) +{ + NV_CHECK_OR_RETURN_VOID(LEVEL_INFO, !pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized); + NV_ASSERT_OR_RETURN_VOID((pKernelGraphics != NULL) && (kgraphicsGetInstance(pGpu, pKernelGraphics) == 0)); + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN_VOID(pKernelGraphicsStaticInfo != NULL); + + portMemCopy(&pKernelGraphicsManager->legacyKgraphicsStaticInfo.floorsweepingMasks, sizeof(pKernelGraphicsStaticInfo->floorsweepingMasks), + &pKernelGraphicsStaticInfo->floorsweepingMasks, sizeof(pKernelGraphicsStaticInfo->floorsweepingMasks)); + + if (pKernelGraphicsStaticInfo->pPpcMasks != NULL) + { + pKernelGraphicsManager->legacyKgraphicsStaticInfo.pPpcMasks = portMemAllocNonPaged(sizeof(*pKernelGraphicsStaticInfo->pPpcMasks)); + NV_ASSERT_OR_GOTO(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pPpcMasks != NULL, cleanup); + + portMemCopy(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pPpcMasks, sizeof(*pKernelGraphicsStaticInfo->pPpcMasks), + pKernelGraphicsStaticInfo->pPpcMasks, sizeof(*pKernelGraphicsStaticInfo->pPpcMasks)); + } + + if (pKernelGraphicsStaticInfo->pGrInfo != NULL) + { + pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo = portMemAllocNonPaged(sizeof(*pKernelGraphicsStaticInfo->pGrInfo)); + NV_ASSERT_OR_GOTO(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo != NULL, cleanup); + + portMemCopy(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo, sizeof(*pKernelGraphicsStaticInfo->pGrInfo), + pKernelGraphicsStaticInfo->pGrInfo, sizeof(*pKernelGraphicsStaticInfo->pGrInfo)); + } + + pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized = NV_TRUE; + return; + +cleanup: + portMemFree(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pPpcMasks); + pKernelGraphicsManager->legacyKgraphicsStaticInfo.pPpcMasks = NULL; + portMemFree(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo); + pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo = NULL; +} + +/*! + * @brief Retrieves associated KernelGraphics engine for given client / route info + * + * @param[in] pGpu + * @param[in] pKernelGraphicsManager + * @param[in] hClient Client handle + * @param[in] grRouteInfo Client-provided info to direct GR accesses + * @param[out] ppKernelGraphics (Optional) Ptr to store appropriate KernelGraphics *, if desired. + */ +NV_STATUS +kgrmgrCtrlRouteKGR_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + NvHandle hClient, + const NV2080_CTRL_GR_ROUTE_INFO *pGrRouteInfo, + KernelGraphics **ppKernelGraphics +) +{ + MIG_INSTANCE_REF ref; + KernelGraphics *pKernelGraphics; + NvU32 type; + NV_STATUS status = NV_OK; + NvU32 grIdx; + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo = *pGrRouteInfo; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + if (!IS_MIG_IN_USE(pGpu)) + { + grIdx = 0; + goto done; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref)); + + // + // Compute instances always have 1 GR engine, so automatically fill in + // the route info when subscribed to a compute instance + // + if (ref.pMIGComputeInstance != NULL) + { + portMemSet(&grRouteInfo, 0, sizeof(NV2080_CTRL_GR_ROUTE_INFO)); + kgrmgrCtrlSetEngineID(0, &grRouteInfo); + } + else + { + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilegeByHandle(hClient); + if (!rmclientIsAdminByHandle(hClient, privLevel) && + _kgrmgrGPUInstanceHasComputeInstances(pGpu, pKernelGraphicsManager, ref.pKernelMIGGpuInstance)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + type = DRF_VAL(2080_CTRL_GR, _ROUTE_INFO_FLAGS, _TYPE, grRouteInfo.flags); + switch (type) + { + case NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_NONE: + NV_PRINTF(LEVEL_ERROR, + "Cannot give GR Route flag of TYPE_NONE with MIG enabled!\n"); + return NV_ERR_INVALID_ARGUMENT; + + case NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_ENGID: + { + NvU32 localGrIdx = DRF_VAL64(2080_CTRL_GR, _ROUTE_INFO_DATA, + _ENGID, grRouteInfo.route); + NvU32 globalEngType; + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_GR(localGrIdx), + &globalEngType)); + NV_ASSERT_OR_RETURN(NV2080_ENGINE_TYPE_IS_GR(globalEngType), NV_ERR_INVALID_STATE); + grIdx = NV2080_ENGINE_TYPE_GR_IDX(globalEngType); + + break; + } + + case NV2080_CTRL_GR_ROUTE_INFO_FLAGS_TYPE_CHANNEL: + { + KernelChannel *pKernelChannel; + NvU32 engineType; + NvHandle hChannel = DRF_VAL64(2080_CTRL_GR, _ROUTE_INFO_DATA, + _CHANNEL_HANDLE, grRouteInfo.route); + + status = CliGetKernelChannel(hClient, hChannel, &pKernelChannel); + if (status != NV_OK) + { + RsResourceRef *pChanGrpRef; + KernelChannelGroupApi *pKernelChannelGroupApi = NULL; + KernelChannelGroup *pKernelChannelGroup = NULL; + + // + // If retrieving a channel with the given hChannel doesn't work, + // try interpreting it as a handle to a channel group instead. + // + status = CliGetChannelGroup(hClient, hChannel, &pChanGrpRef, NULL); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to find a channel or TSG with given handle 0x%08x associated with hClient=0x%08x\n", + hChannel, hClient); + return NV_ERR_INVALID_ARGUMENT; + } + + pKernelChannelGroupApi = dynamicCast(pChanGrpRef->pResource, + KernelChannelGroupApi); + NV_ASSERT_OR_RETURN( + (pKernelChannelGroupApi != NULL && + pKernelChannelGroupApi->pKernelChannelGroup != NULL), + NV_ERR_INVALID_STATE); + pKernelChannelGroup = + pKernelChannelGroupApi->pKernelChannelGroup; + + NV_PRINTF(LEVEL_INFO, + "Found TSG with given handle 0x%08x, using this to determine GR engine ID\n", + hChannel); + engineType = pKernelChannelGroup->engineType; + } + else + { + NV_PRINTF(LEVEL_INFO, + "Found channel with given handle 0x%08x, using this to determine GR engine ID\n", + hChannel); + engineType = kchannelGetEngineType(pKernelChannel); + } + + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to route GR using non-GR engine type 0x%x\n", + engineType); + return NV_ERR_INVALID_ARGUMENT; + } + + grIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + break; + } + default: + NV_PRINTF(LEVEL_ERROR, + "Unrecognized GR Route flag type 0x%x!\n", type); + return NV_ERR_INVALID_ARGUMENT; + } + +done: + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, grIdx); + NV_ASSERT_OR_RETURN(pKernelGraphics != NULL, NV_ERR_INVALID_STATE); + + if (ppKernelGraphics != NULL) + *ppKernelGraphics = pKernelGraphics; + + return status; +} + +/*! + * @return legacy GPC mask enumerated by this chip + */ +NvU32 +kgrmgrGetLegacyGpcMask_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager +) +{ + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized, 0); + + return pKernelGraphicsManager->legacyKgraphicsStaticInfo.floorsweepingMasks.gpcMask; +} + +/*! + * @return legacy TPC mask for certain GPC + * + * @param[in] pGpu + * @param[in] KernelGraphicsManager + * @param[in] gpcId Indicates logical GPC ID when MIG enabled or physical + * GPC ID when MIG disabled + */ +NvU32 +kgrmgrGetLegacyTpcMask_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + NvU32 gpcId +) +{ + NvU32 maxNumGpcs; + + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized, 0); + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo != NULL, 0); + + maxNumGpcs = pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS].data; + NV_CHECK_OR_RETURN(LEVEL_ERROR, (gpcId < maxNumGpcs), 0); + + return pKernelGraphicsManager->legacyKgraphicsStaticInfo.floorsweepingMasks.tpcMask[gpcId]; +} + +/*! + * @brief Get legacy PPC mask for certain GPC + */ +NV_STATUS +kgrmgrGetLegacyPpcMask_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + NvU32 physGpcId, + NvU32 *pPpcMask +) +{ + NvU32 maxNumGpcs; + + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo != NULL, NV_ERR_INVALID_STATE); + maxNumGpcs = pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS].data; + NV_CHECK_OR_RETURN(LEVEL_ERROR, (physGpcId < maxNumGpcs), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pPpcMask != NULL), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pKernelGraphicsManager->legacyKgraphicsStaticInfo.pPpcMasks != NULL), NV_ERR_NOT_SUPPORTED); + + *pPpcMask = pKernelGraphicsManager->legacyKgraphicsStaticInfo.pPpcMasks->mask[physGpcId]; + + return NV_OK; +} + +/*! + * @brief Returns legacy zcull mask for specific gpc + */ +NvU32 +kgrmgrGetLegacyZcullMask_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + NvU32 physGpcId +) +{ + NvU32 maxNumGpcs; + + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized, 0); + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo != NULL, NV_ERR_INVALID_STATE); + maxNumGpcs = pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS].data; + NV_CHECK_OR_RETURN(LEVEL_ERROR, (physGpcId < maxNumGpcs), NV_ERR_INVALID_ARGUMENT); + + return pKernelGraphicsManager->legacyKgraphicsStaticInfo.floorsweepingMasks.zcullMask[physGpcId]; +} + +/*! + * @brief Function to Alloc VEIDs for a GR engine + * + * @param[IN] pGpu + * @param[IN] pKernelGraphicsManager + * @param[IN] grIdx phys gr idx + * @param[IN] gpcCount Total GPCs connected to this GR engine + * @param[IN] pKernelMIGGPUInstance + */ +NV_STATUS +kgrmgrAllocVeidsForGrIdx_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + NvU32 grIdx, + NvU32 gpcCount, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance +) +{ + NvU32 maxVeidsPerGpc; + NvU32 maxVeidsForThisGr; + NvU32 veidStart = 0; + NvU32 veidEnd = 0; + NvU32 GPUInstanceVeidEnd; + NvU64 GPUInstanceVeidMask; + NvU64 GPUInstanceFreeVeidMask; + NvU64 grVeidMask; + NvU64 reqVeidMask; + NvU32 i; + + // This GR should not be already configured to use any VEIDs + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->grIdxVeidMask[grIdx] == 0, NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN( + kgrmgrGetMaxVeidsPerGpc(pGpu, pKernelGraphicsManager, &maxVeidsPerGpc)); + + // We statically assign VEIDs to a GR based on the number of GPCs connected to it + maxVeidsForThisGr = maxVeidsPerGpc * gpcCount; + reqVeidMask = NVBIT64(maxVeidsForThisGr) - 1; + + // Create a mask for VEIDs associated with this GPU instance + GPUInstanceVeidEnd = pKernelMIGGPUInstance->resourceAllocation.veidOffset + pKernelMIGGPUInstance->resourceAllocation.veidCount - 1; + GPUInstanceVeidMask = DRF_SHIFTMASK64(GPUInstanceVeidEnd:pKernelMIGGPUInstance->resourceAllocation.veidOffset); + + NV_ASSERT_OR_RETURN(GPUInstanceVeidMask != 0x0, NV_ERR_INVALID_STATE); + + GPUInstanceFreeVeidMask = ~pKernelGraphicsManager->veidInUseMask & GPUInstanceVeidMask; + + for (i = pKernelMIGGPUInstance->resourceAllocation.veidOffset; i <= GPUInstanceVeidEnd; i += maxVeidsPerGpc) + { + // See if requested slots are available within this range + if (((GPUInstanceFreeVeidMask >> i) & reqVeidMask) == reqVeidMask) + { + veidStart = i; + veidEnd = veidStart + maxVeidsForThisGr - 1; + break; + } + } + + NV_CHECK_OR_RETURN(LEVEL_SILENT, i <= GPUInstanceVeidEnd, + NV_ERR_INSUFFICIENT_RESOURCES); + + grVeidMask = DRF_SHIFTMASK64(veidEnd:veidStart); + NV_ASSERT_OR_RETURN(grVeidMask != 0x0, NV_ERR_INVALID_STATE); + + // VEID range should not overlap with existing VEIDs in use + NV_ASSERT_OR_RETURN((pKernelGraphicsManager->veidInUseMask & grVeidMask) == 0, NV_ERR_STATE_IN_USE); + + // mark each VEID in the range as "in use" + pKernelGraphicsManager->veidInUseMask |= grVeidMask; + pKernelGraphicsManager->grIdxVeidMask[grIdx] |= grVeidMask; + + return NV_OK; +} + +/*! + * @brief Function to Clear Gr Engine to VEIDs mapping + */ +void +kgrmgrClearVeidsForGrIdx_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + NvU32 grIdx +) +{ + NvU64 veidMask = pKernelGraphicsManager->grIdxVeidMask[grIdx]; + + // mark all VEIDs of this GR engine as "not in use" + NV_ASSERT((pKernelGraphicsManager->veidInUseMask & veidMask) == veidMask); + pKernelGraphicsManager->veidInUseMask &= ~veidMask; + pKernelGraphicsManager->grIdxVeidMask[grIdx] = 0; +} + +/*! + * @brief Function to get max VEID count per GPC + */ +NV_STATUS +kgrmgrGetMaxVeidsPerGpc_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + NvU32 *pMaxVeidsPerGpc +) +{ + NvU32 maxVeids; + NvU32 maxGpcCount; + + NV_ASSERT_OR_RETURN(pMaxVeidsPerGpc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo != NULL, NV_ERR_INVALID_STATE); + + maxVeids = pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo->infoList[NV0080_CTRL_GR_INFO_INDEX_MAX_SUBCONTEXT_COUNT].data; + + maxGpcCount = gpuGetLitterValues_HAL(pGpu, NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS); + NV_ASSERT_OR_RETURN(maxGpcCount != 0, NV_ERR_INSUFFICIENT_RESOURCES); + + *pMaxVeidsPerGpc = (maxVeids / maxGpcCount); + + return NV_OK; +} + +/*! + * @brief Function to get starting VEID for a Gr Engine + */ +NV_STATUS +kgrmgrGetVeidBaseForGrIdx_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + NvU32 grIdx, + NvU32 *pVeidStart +) +{ + NvU64 veidMask; + NV_ASSERT_OR_RETURN(pVeidStart != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(grIdx != KGRMGR_MAX_GR, NV_ERR_INVALID_ARGUMENT); + + *pVeidStart = 0; + + veidMask = pKernelGraphicsManager->grIdxVeidMask[grIdx]; + + // + // If a GR is not configured, VEID mask for it will be "0" and counting + // "0" in a zero-based mask will result in max bit-width size. + // + if (veidMask != 0x0) + *pVeidStart = portUtilCountTrailingZeros64(veidMask); + + return NV_OK; +} + +/*! + * @brief Function to get GR index for a VEID + */ +NV_STATUS +kgrmgrGetGrIdxForVeid_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + NvU32 veid, + NvU32 *pGrIdx +) +{ + NvU32 i; + NvU64 veidMask = NVBIT64(veid); + + NV_ASSERT_OR_RETURN(pGrIdx != NULL, NV_ERR_INVALID_ARGUMENT); + for (i = 0; i < KGRMGR_MAX_GR; ++i) + { + if ((pKernelGraphicsManager->grIdxVeidMask[i] & veidMask) != 0) + break; + } + NV_ASSERT_OR_RETURN(i != KGRMGR_MAX_GR, NV_ERR_OBJECT_NOT_FOUND); + *pGrIdx = i; + return NV_OK; +} + +/*! + * @brief discovers maximum size of local ctx buffers. + */ +NV_STATUS +kgrmgrDiscoverMaxLocalCtxBufInfo_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + KernelGraphics *pKernelGraphics, + NvU32 swizzId +) +{ + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NvU32 bufId; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, + kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, swizzId), NV_OK); + + // Make sure sizes of all buffers are setup + NV_ASSERT_OK_OR_RETURN( + kgraphicsInitializeDeferredStaticData(pGpu, pKernelGraphics, NV01_NULL_OBJECT, NV01_NULL_OBJECT)); + + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + + // Get sizes of local ctx buffers + FOR_EACH_IN_ENUM(GR_CTX_BUFFER, bufId) + { + if (bufId == GR_CTX_BUFFER_MAIN) + { + NvU32 size; + + // Get size of main buffer including subctx headers + NV_ASSERT_OK_OR_RETURN(kgraphicsGetMainCtxBufferSize(pGpu, pKernelGraphics, NV_TRUE, &size)); + kgraphicsSetCtxBufferInfo(pGpu, pKernelGraphics, bufId, + size, + RM_PAGE_SIZE, + RM_ATTR_PAGE_SIZE_4KB, + kgraphicsShouldForceMainCtxContiguity_HAL(pGpu, pKernelGraphics)); + } + else + { + NvU32 fifoEngineId; + + NV_ASSERT_OK_OR_RETURN( + kgrctxCtxBufferToFifoEngineId(bufId, &fifoEngineId)); + + kgraphicsSetCtxBufferInfo(pGpu, pKernelGraphics, bufId, + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[fifoEngineId].size, + RM_PAGE_SIZE, + RM_ATTR_PAGE_SIZE_4KB, + ((bufId == GR_CTX_BUFFER_PATCH) || (bufId == GR_CTX_BUFFER_PM))); + } + } + FOR_EACH_IN_ENUM_END; + return NV_OK; +} + +/*! + * @brief Get ctxbufpool info for the global ctx buffer + */ +const CTX_BUF_INFO * +kgrmgrGetGlobalCtxBufInfo_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + GR_GLOBALCTX_BUFFER buf +) +{ + NV_ASSERT_OR_RETURN(NV_ENUM_IS(GR_GLOBALCTX_BUFFER, buf), NULL); + return &pKernelGraphicsManager->globalCtxBufInfo[buf]; +} + +/*! + * @brief Set ctxbufpool parameters for the global ctx buffer + */ +void +kgrmgrSetGlobalCtxBufInfo_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + GR_GLOBALCTX_BUFFER buf, + NvU64 size, + NvU64 align, + RM_ATTR_PAGE_SIZE attr, + NvBool bContiguous +) +{ + CTX_BUF_INFO *pInfo; + NV_ASSERT_OR_RETURN_VOID(NV_ENUM_IS(GR_GLOBALCTX_BUFFER, buf)); + + pInfo = &pKernelGraphicsManager->globalCtxBufInfo[buf]; + pInfo->size = size; + pInfo->align = align; + pInfo->attr = attr; + pInfo->bContig = bContiguous; +} + +/*! + * @brief Gets maximum size of GR global ctx buffers. + * These are sizes of buffer for GR0 in legacy mode with all GPCs + * connected to GR0. + */ +NV_STATUS +kgrmgrDiscoverMaxGlobalCtxBufSizes_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + KernelGraphics *pKernelGraphics, + NvBool bMemoryPartitioningNeeded +) +{ + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + GR_GLOBALCTX_BUFFER bufId; + + NV_ASSERT_OR_RETURN(!IS_MIG_IN_USE(pGpu), NV_ERR_INVALID_STATE); + + // + // Bug 2915422: Eventually we expect this check to be replaced by ctxBufPoolIsSupported + // we can't use that check today because PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA is not enabled + // when this function is called because + // kgraphicsInitializeDeferredStaticData below will eventually lead to + // global ctx buffer allocation from ctxBufPools even before these pools are + // populated which happens later during GPU instance creation. Once we are + // able to rip out global buffer alloc from + // kgraphicsInitializeDeferredStaticData, we can enable the above property + // early. + // + NV_CHECK_OR_RETURN(LEVEL_SILENT, bMemoryPartitioningNeeded, NV_OK); + + // Make sure sizes of all buffers are setup + NV_ASSERT_OK_OR_RETURN( + kgraphicsInitializeDeferredStaticData(pGpu, pKernelGraphics, NV01_NULL_OBJECT, NV01_NULL_OBJECT)); + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo->pContextBuffersInfo != NULL, NV_ERR_INVALID_STATE); + + // Get sizes of global ctx buffers + FOR_EACH_IN_ENUM(GR_GLOBALCTX_BUFFER, bufId) + { + NvU32 fifoEngineId; + NV_ASSERT_OK_OR_RETURN( + kgrctxGlobalCtxBufferToFifoEngineId(bufId, &fifoEngineId)); + + // + // contiguity is determined later before reservation as it depends on settings + // that take effect after this point. + // + kgrmgrSetGlobalCtxBufInfo(pGpu, pKernelGraphicsManager, bufId, + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[fifoEngineId].size, + pKernelGraphicsStaticInfo->pContextBuffersInfo->engine[fifoEngineId].alignment, + RM_ATTR_PAGE_SIZE_4KB, NV_FALSE); + } + FOR_EACH_IN_ENUM_END; + + return NV_OK; +} + +/*! + * @return legacy TPC count for certain GPC + * + * @param[in] pGpu + * @param[in] KernelGraphicsManager + * @param[in] gpcId Indicates logical GPC ID + */ +NvU32 +kgrmgrGetLegacyGpcTpcCount_IMPL +( + OBJGPU *pGpu, + KernelGraphicsManager *pKernelGraphicsManager, + NvU32 gpcId +) +{ + NvU32 maxNumGpcs; + + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized, 0); + NV_ASSERT_OR_RETURN(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo != NULL, 0); + + maxNumGpcs = nvPopCount32(pKernelGraphicsManager->legacyKgraphicsStaticInfo.floorsweepingMasks.gpcMask); + NV_CHECK_OR_RETURN(LEVEL_ERROR, (gpcId < maxNumGpcs), 0); + + return pKernelGraphicsManager->legacyKgraphicsStaticInfo.floorsweepingMasks.tpcCount[gpcId]; +} + diff --git a/src/nvidia/src/kernel/gpu/gr/kernel_graphics_object.c b/src/nvidia/src/kernel/gpu/gr/kernel_graphics_object.c new file mode 100644 index 000000000..80ce778c6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gr/kernel_graphics_object.c @@ -0,0 +1,692 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/gr/kernel_graphics_manager.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/core/locks.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "vgpu/rpc.h" +#include "kernel/mem_mgr/gpu_vaspace.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" + +#include "class/cl0020.h" + +#include "ctrl/ctrl2080/ctrl2080gr.h" // NV2080_CTRL_GR_ROUTE_INFO + +/*! + * This function calls promote context RPC to promote GR buffers. + * Each ctx buffer may or may not may not have its PA updated (initialized) in + * physical RM, and each ctx buffer should have its VA updated (promoted) in + * physical RM at least once per VAS, of which there may be multiple in a TSG. + * When UVM is enabled, this function is responsible only for updating the PA of + * a given context buffer, and not the VA. + */ +NV_STATUS +kgrobjPromoteContext_IMPL +( + OBJGPU *pGpu, + KernelGraphicsObject *pKernelGraphicsObject, + KernelGraphics *pKernelGraphics +) +{ + NvU32 promoteIds[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES]; + NvU32 promoteIdsSize; + NvBool bAttemptPromote; + NvU32 i; + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS params; + NvU32 entryCount; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_STATUS status; + Subdevice *pSubdevice; + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + + if (IS_MODS_AMODEL(pGpu)) + return NV_OK; + + pSubdevice = CliGetSubDeviceInfoFromGpu(RES_GET_CLIENT_HANDLE(pKernelGraphicsObject), pGpu); + NV_ASSERT_OR_RETURN(pSubdevice != NULL, NV_ERR_INVALID_STATE); + + kgrobjGetPromoteIds_HAL(pGpu, pKernelGraphicsObject, + NV_ARRAY_ELEMENTS(promoteIds), + promoteIds, + &promoteIdsSize, + &bAttemptPromote); + + if (promoteIdsSize == 0) + return NV_OK; + + portMemSet(¶ms, 0, sizeof(params)); + + entryCount = 0; + for (i = 0; i < promoteIdsSize; ++i) + { + NvBool bInitialize; + NvBool bPromote = NV_FALSE; + + // Setup parameters to initialize the PA if necessary + NV_ASSERT_OK_OR_RETURN( + kgrctxPrepareInitializeCtxBuffer(pGpu, + pKernelGraphicsObject->pKernelGraphicsContext, + pKernelGraphics, + pChannelDescendant->pKernelChannel, + promoteIds[i], + ¶ms.promoteEntry[entryCount], + &bInitialize)); + + if (bAttemptPromote) + { + // Setup parameters to promote the VA if necessary + NV_ASSERT_OK_OR_RETURN( + kgrctxPreparePromoteCtxBuffer(pGpu, + pKernelGraphicsObject->pKernelGraphicsContext, + pChannelDescendant->pKernelChannel, + promoteIds[i], + ¶ms.promoteEntry[entryCount], + &bPromote)); + } + + // If initialization / promotion was necessary, then move to next index + if (bInitialize || bPromote) + entryCount++; + } + + // Nothing to promote, so return early + if (entryCount == 0) + return NV_OK; + + params.engineType = NV2080_ENGINE_TYPE_GR(kgraphicsGetInstance(pGpu, pKernelGraphics)); + params.hChanClient = RES_GET_CLIENT_HANDLE(pChannelDescendant); + params.hObject = RES_GET_PARENT_HANDLE(pChannelDescendant); + params.entryCount = entryCount; + + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_GPU_PROMOTE_CTX, + ¶ms, + sizeof(params)); + + if (status == NV_OK) + { + // + // If we successfully promoted the PA, flip a flag to ensure we don't + // try to promote it again. The VA_LIST should already track this for + // VA, but we can't rely on it for PA due to UVM. + // + for (i = 0; i < entryCount; ++i) + { + if (!params.promoteEntry[i].bInitialize) + continue; + + kgrctxMarkCtxBufferInitialized(pGpu, + pKernelGraphicsObject->pKernelGraphicsContext, + pKernelGraphics, + pChannelDescendant->pKernelChannel, + params.promoteEntry[i].bufferId); + } + } + + return status; +} + +/*! + * @brief Construct the Kernel GR object for the given GPU + * + * @param[in] pGpu + * @param[in] pKernelGraphics + * @param[in] pKernelGraphicsObject + */ +static NV_STATUS +_kgrAlloc +( + OBJGPU *pGpu, + KernelGraphics *pKernelGraphics, + KernelGraphicsObject *pKernelGraphicsObject +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + NvU32 numGpcs; + NvU32 classNum = pChannelDescendant->resourceDesc.externalClassId; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NvU32 gfid = kchannelGetGfid(pChannelDescendant->pKernelChannel); + + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + // Prevent ctx creation on sys pipes with 0 GPCs attached + if (!IS_MODS_AMODEL(pGpu)) + { + numGpcs = nvPopCount32(pKernelGraphicsStaticInfo->floorsweepingMasks.gpcMask); + NV_CHECK_OR_RETURN(LEVEL_ERROR, numGpcs > 0, NV_ERR_INSUFFICIENT_RESOURCES); + } + + // Each object may need to create an Mmio mapping + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrobjSetComputeMmio_HAL(pGpu, pKernelGraphicsObject)); + + listInit(&pKernelGraphicsObject->activeDebuggers, portMemAllocatorGetGlobalNonPaged()); + + // Ensure that ctx buffer information is initialized + if (!IS_MODS_AMODEL(pGpu)) + { + NV_ASSERT_OK_OR_RETURN( + kgraphicsInitializeDeferredStaticData(pGpu, pKernelGraphics, NV01_NULL_OBJECT, NV01_NULL_OBJECT)); + } + + pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid].bFecsTraceUnsupportedInGuest = NV_FALSE; + + // Allocate FECS buffer in Guest for SRIOV configs. + if (kgrctxShouldManageCtxBuffers_HAL(pGpu, pKernelGraphicsObject->pKernelGraphicsContext, gfid) || IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgraphicsAllocKgraphicsBuffers_HAL(pGpu, pKernelGraphics, pKernelGraphicsObject->pKernelGraphicsContext, pChannelDescendant->pKernelChannel)); + } + + if (kgrctxShouldManageCtxBuffers_HAL(pGpu, pKernelGraphicsObject->pKernelGraphicsContext, gfid)) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxAllocCtxBuffers(pGpu, pKernelGraphicsObject->pKernelGraphicsContext, pKernelGraphics, pKernelGraphicsObject)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrctxMapCtxBuffers(pGpu, pKernelGraphicsObject->pKernelGraphicsContext, pKernelGraphics, pKernelGraphicsObject)); + } + + kgrctxIncObjectCount_HAL(pGpu, pKernelGraphicsObject->pKernelGraphicsContext, classNum); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrobjPromoteContext(pGpu, pKernelGraphicsObject, pKernelGraphics)); + + return NV_OK; +} + +/*! + * @brief Destruct the Kernel GR object + * + * @param[in] pKernelGraphicsObject + * @param[in] bDestructor NV_TRUE if called from destructor, used to share + * cleanup code with constructor + */ +static void _kgrobjDestruct +( + KernelGraphicsObject *pKernelGraphicsObject, + NvBool bDestructor +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelGraphicsObject); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + NV_STATUS status = NV_OK; + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo = {0}; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pChannelDescendant); + NvHandle hParent = RES_GET_PARENT_HANDLE(pChannelDescendant); + NvU32 classNum = pChannelDescendant->resourceDesc.externalClassId; + NvU32 gfid = kchannelGetGfid(pChannelDescendant->pKernelChannel); + + // If MIG is enabled, perform GR instance routing based upon parent channel handle + kgrmgrCtrlSetChannelHandle(hParent, &grRouteInfo); + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics)); + + if (status != NV_OK) + SLI_LOOP_CONTINUE; + + pKernelGraphics->globalCtxBuffersInfo.pGlobalCtxBuffers[gfid].bFecsTraceUnsupportedInGuest = NV_FALSE; + + // Free Compute Mmio mapping + kgrobjFreeComputeMmio_HAL(pGpu, pKernelGraphicsObject); + + NV_ASSERT(listCount(&pKernelGraphicsObject->activeDebuggers) == 0); + listDestroy(&pKernelGraphicsObject->activeDebuggers); + + if (bDestructor) + kgrctxDecObjectCount_HAL(pGpu, pKernelGraphicsObject->pKernelGraphicsContext, classNum); + + if (kgrobjShouldCleanup_HAL(pGpu, pKernelGraphicsObject)) + { + kgrctxUnmapCtxBuffers(pGpu, + pKernelGraphicsObject->pKernelGraphicsContext, + pKernelGraphicsObject, + pKernelGraphics, + bDestructor); + } + } + SLI_LOOP_END; +} + +// +// Graphics engine object creation routine. +// +NV_STATUS +kgrobjConstruct_IMPL +( + KernelGraphicsObject *pKernelGraphicsObject, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + NvHandle hClient = pCallContext->pClient->hClient; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_STATE); + + NV_PRINTF(LEVEL_INFO, "class: 0x%x on channel: 0x%08x\n", + pChannelDescendant->resourceDesc.externalClassId, kchannelGetDebugTag(pKernelChannel)); + + // + // Legacy code believed this to be possible, but Resource Server should + // prevent NV01_NULL_OBJECT from being chosen as a handle. + // + NV_ASSERT_OR_RETURN(pParams->hResource != NV01_NULL_OBJECT, + NV_ERR_INVALID_STATE); + + { + KernelChannel *pkChannel = pChannelDescendant->pKernelChannel; + + NV_ASSERT_OR_RETURN(pkChannel != NULL, NV_ERR_INVALID_STATE); + + if (kgrctxFromKernelChannel(pkChannel, &pKernelGraphicsObject->pKernelGraphicsContext) == NV_OK) + { + KernelGraphicsContext *pKernelGraphicsContext = pKernelGraphicsObject->pKernelGraphicsContext; + + // + // Add each gr object as a dependant of the context such that all + // objects are guaranteed to be torn down before the context + // + refAddDependant(RES_GET_REF(pKernelGraphicsContext), + RES_GET_REF(pKernelGraphicsObject)); + + } + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo = {0}; + + // If MIG is enabled, perform GR instance routing based upon parent channel handle + kgrmgrCtrlSetChannelHandle(pParams->hParent, &grRouteInfo); + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, hClient, &grRouteInfo, &pKernelGraphics)); + if (status != NV_OK) + SLI_LOOP_BREAK; + + // Construct the Kernel Object + status = _kgrAlloc(pGpu, pKernelGraphics, pKernelGraphicsObject); + + if (status != NV_OK) + SLI_LOOP_BREAK; + } + SLI_LOOP_END; + + // failed + if (status != NV_OK) + { + // Destroy the kernel object from the constructor + _kgrobjDestruct(pKernelGraphicsObject, NV_FALSE); + } + + return status; +} + +/*! + * @brief GR object Destructor + * + * @param[in] pKernelGraphicsObject + */ +void +kgrobjDestruct_IMPL +( + KernelGraphicsObject *pKernelGraphicsObject +) +{ + // Destroy the kernel object from the destructor + _kgrobjDestruct(pKernelGraphicsObject, NV_TRUE); +} + +NV_STATUS +kgrobjGetMemInterMapParams_IMPL +( + KernelGraphicsObject *pKernelGraphicsObject, + RMRES_MEM_INTER_MAP_PARAMS *pParams +) +{ + MEMORY_DESCRIPTOR *pSrcMemDesc = pKernelGraphicsObject->pMmioMemDesc; + + if (pParams->bSubdeviceHandleProvided) + { + NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings of non-memory objects not supported.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if (pSrcMemDesc == NULL) + return NV_ERR_INVALID_OBJECT_HANDLE; + + pParams->pSrcMemDesc = pSrcMemDesc; + pParams->pSrcGpu = pSrcMemDesc->pGpu; + + return NV_OK; +} + +/*! + * @brief Set up MMIO memDesc for Compute Object + */ +NV_STATUS +kgrobjSetComputeMmio_IMPL +( + OBJGPU *pGpu, + KernelGraphicsObject *pKernelGraphicsObject +) +{ + ChannelDescendant *pChanDes = staticCast(pKernelGraphicsObject, ChannelDescendant); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU32 classNum = pChanDes->resourceDesc.externalClassId; + NvU32 objType; + + kgrmgrGetGrObjectType(classNum, &objType); + + // Nothing to do for non-compute + if (objType != GR_OBJECT_TYPE_COMPUTE) + return NV_OK; + + // This can be called multiple times in SLI, so just skip it if present + if (pKernelGraphicsObject->pMmioMemDesc != NULL) + return NV_OK; + + // Set up MMIO memDesc to allow GPU mappings of compute object + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memdescCreate(&pKernelGraphicsObject->pMmioMemDesc, pGpu, RM_PAGE_SIZE, 0, + NV_TRUE, ADDR_FBMEM, NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_NONE)); + + // + // The address field is completely ignored for these mappings. We use the + // chid as the address strictly for debug purposes. + // + memdescDescribe(pKernelGraphicsObject->pMmioMemDesc, ADDR_FBMEM /* Ignored */, + RM_PAGE_SIZE * pChanDes->pKernelChannel->ChID, + RM_PAGE_SIZE); + memdescSetPteKind(pKernelGraphicsObject->pMmioMemDesc, + memmgrGetMessageKind_HAL(pGpu, pMemoryManager)); + + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, pKernelGraphicsObject->pMmioMemDesc, AT_GPU, + RM_ATTR_PAGE_SIZE_4KB); + + return NV_OK; +} + +/*! + * @brief Free up MMIO memDesc for Compute Object + */ +void +kgrobjFreeComputeMmio_IMPL +( + OBJGPU *pGpu, + KernelGraphicsObject *pKernelGraphicsObject +) +{ + memdescDestroy(pKernelGraphicsObject->pMmioMemDesc); + pKernelGraphicsObject->pMmioMemDesc = NULL; +} + +/*! + * @brief Should we perform grobj cleanup? + * If client RM is not managing the ctx buffers, kernel RM should not unmap ctx buffers + */ +NvBool +kgrobjShouldCleanup_KERNEL +( + OBJGPU *pGpu, + KernelGraphicsObject *pKernelGraphicsObject +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + NvU32 gfid = kchannelGetGfid(pChannelDescendant->pKernelChannel); + + return gpuIsClientRmAllocatedCtxBufferEnabled(pGpu) && !IS_GFID_VF(gfid); +} + +/*! + * @brief Should we perform grobj cleanup? + * If client RM is managing the ctx buffers, physical RM should not unmap ctx buffers + */ +NvBool +kgrobjShouldCleanup_PHYSICAL +( + OBJGPU *pGpu, + KernelGraphicsObject *pKernelGraphicsObject +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + NvU32 gfid = kchannelGetGfid(pChannelDescendant->pKernelChannel); + + return !gpuIsClientRmAllocatedCtxBufferEnabled(pGpu) || (gpuIsSriovEnabled(pGpu) && IS_GFID_PF(gfid)); +} + +/*! + * @brief Retrieve the IDs of ctx buffers which need to be promoted for this ctx + * This version only serves SRIOV-Heavy, which allocates just the FECS + * event buffer in guest RM and promotes its PA to the host, while the + * rest of the buffers are managed by the host. + */ +void +kgrobjGetPromoteIds_VF +( + OBJGPU *pGpu, + KernelGraphicsObject *pKernelGraphicsObject, + NvU32 maxPromoteIds, + NvU32 *pPromoteIds, + NvU32 *pNumEntries, + NvBool *pbPromote +) +{ + void kgrobjGetPromoteIds_FWCLIENT(OBJGPU *, KernelGraphicsObject *, NvU32, NvU32 *, NvU32 *, NvBool *); + NvU32 promoteSriovHeavy[] = + { + // + // For SRIOV Heavy, guest allocates FECS event buffer and informs host + // of its address, other buffers are managed by host + // + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT + }; + NvBool bSriovHeavyEnabled = gpuIsWarBug200577889SriovHeavyEnabled(pGpu); + + if (!bSriovHeavyEnabled) + { + // Use the same implementation as FWCLIENT + kgrobjGetPromoteIds_FWCLIENT(pGpu, pKernelGraphicsObject, maxPromoteIds, pPromoteIds, pNumEntries, pbPromote); + return; + } + + *pNumEntries = 0; + // SRIOV Heavy only initializes and does not promote the FECS buffer + *pbPromote = NV_FALSE; + + NV_ASSERT_OR_RETURN_VOID(NV_ARRAY_ELEMENTS(promoteSriovHeavy) <= maxPromoteIds); + *pNumEntries = NV_ARRAY_ELEMENTS(promoteSriovHeavy); + portMemCopy(pPromoteIds, sizeof(promoteSriovHeavy), promoteSriovHeavy, sizeof(promoteSriovHeavy)); +} + +/*! + * @brief Retrieve the IDs of ctx buffers which need to be promoted for this ctx + * This version serves SRIOV and FWCLIENT, which allocate and map the ctx + * buffers themselves and promote the PA/VA to host RM. + */ +void +kgrobjGetPromoteIds_FWCLIENT +( + OBJGPU *pGpu, + KernelGraphicsObject *pKernelGraphicsObject, + NvU32 maxPromoteIds, + NvU32 *pPromoteIds, + NvU32 *pNumEntries, + NvBool *pbPromote +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + NvBool bAcquire3d; + + // Buffers which need to be promoted if we are not allocating a 3d context + NvU32 promoteNon3d[] = + { + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP + }; + // Buffers which need to be promoted if we are allocating a 3d context + NvU32 promote3d[] = + { + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP + }; + + *pNumEntries = 0; + *pbPromote = NV_TRUE; + + if (!gpuIsClientRmAllocatedCtxBufferEnabled(pGpu)) + return; + + // Do we need to promote any 3D-specific context buffers? + if (pChannelDescendant->pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bLegacyMode) + { + bAcquire3d = NV_TRUE; + } + else + { + NvU32 classNum = pChannelDescendant->resourceDesc.externalClassId; + NvU32 objType; + kgrmgrGetGrObjectType(classNum, &objType); + bAcquire3d = ((objType == GR_OBJECT_TYPE_2D) || (objType == GR_OBJECT_TYPE_3D)); + } + + // Determine which set of buffers we need to try to init/promote + if (bAcquire3d) + { + NV_ASSERT_OR_RETURN_VOID(NV_ARRAY_ELEMENTS(promote3d) <= maxPromoteIds); + *pNumEntries = NV_ARRAY_ELEMENTS(promote3d); + portMemCopy(pPromoteIds, sizeof(promote3d), promote3d, sizeof(promote3d)); + } + else + { + NV_ASSERT_OR_RETURN_VOID(NV_ARRAY_ELEMENTS(promoteNon3d) <= maxPromoteIds); + *pNumEntries = NV_ARRAY_ELEMENTS(promoteNon3d); + portMemCopy(pPromoteIds, sizeof(promoteNon3d), promoteNon3d, sizeof(promoteNon3d)); + } +} + +/*! + * @brief Retrieve the IDs of ctx buffers which need to be promoted for this ctx + * This version serves bare metal and GSP when these environments are + * managing the ctx buffers. There is additional physical-only + * initialization required for these buffers on allocation which would + * otherwise be handled when these buffers are promoted from client RM, + * but in absence of above should be called manually by physical RM to + * keep the flow / initialization consistent. + */ +void +kgrobjGetPromoteIds_PHYSICAL +( + OBJGPU *pGpu, + KernelGraphicsObject *pKernelGraphicsObject, + NvU32 maxPromoteIds, + NvU32 *pPromoteIds, + NvU32 *pNumEntries, + NvBool *pbPromote +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pKernelGraphicsObject, ChannelDescendant); + NvU32 gfid = kchannelGetGfid(pChannelDescendant->pKernelChannel); + NvBool bAcquire3d; + + // Buffers which need to be promoted if we are not allocating a 3d context + NvU32 promoteNon3d[] = + { + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP + }; + // Buffers which need to be promoted if we are allocating a 3d context + NvU32 promote3d[] = + { + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP, + NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP + }; + + *pNumEntries = 0; + *pbPromote = NV_FALSE; + + // If client is managing ctx buffers, we expect client to initialize these buffers + if (gpuIsClientRmAllocatedCtxBufferEnabled(pGpu) && !(gpuIsSriovEnabled(pGpu) && IS_GFID_PF(gfid))) + return; + + // Do we need to promote any 3D-specific context buffers? + if (pChannelDescendant->pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bLegacyMode) + { + bAcquire3d = NV_TRUE; + } + else + { + NvU32 classNum = pChannelDescendant->resourceDesc.externalClassId; + NvU32 objType; + kgrmgrGetGrObjectType(classNum, &objType); + bAcquire3d = ((objType == GR_OBJECT_TYPE_2D) || (objType == GR_OBJECT_TYPE_3D)); + } + + // Determine which set of buffers we need to try to init/promote + if (bAcquire3d) + { + NV_ASSERT_OR_RETURN_VOID(NV_ARRAY_ELEMENTS(promote3d) <= maxPromoteIds); + *pNumEntries = NV_ARRAY_ELEMENTS(promote3d); + portMemCopy(pPromoteIds, sizeof(promote3d), promote3d, sizeof(promote3d)); + } + else + { + NV_ASSERT_OR_RETURN_VOID(NV_ARRAY_ELEMENTS(promoteNon3d) <= maxPromoteIds); + *pNumEntries = NV_ARRAY_ELEMENTS(promoteNon3d); + portMemCopy(pPromoteIds, sizeof(promoteNon3d), promoteNon3d, sizeof(promoteNon3d)); + } +} + diff --git a/src/nvidia/src/kernel/gpu/gr/kernel_sm_debugger_session.c b/src/nvidia/src/kernel/gpu/gr/kernel_sm_debugger_session.c new file mode 100644 index 000000000..f45d40d4a --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gr/kernel_sm_debugger_session.c @@ -0,0 +1,449 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/os/os.h" +#include "kernel/core/locks.h" +#include "kernel/gpu/gr/kernel_sm_debugger_session.h" +#include "kernel/gpu/gr/kernel_graphics_object.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "kernel/gpu/device/device.h" +#include "libraries/resserv/rs_client.h" +#include "kernel/rmapi/rs_utils.h" +#include "virtualization/hypervisor/hypervisor.h" + +#include "ctrl/ctrl83de/ctrl83dedebug.h" + +#include "class/cl0080.h" +#include "class/clc637.h" +#include "class/cl2080.h" +#include "class/cl83de.h" + +// Macro to validate two clients having the same security tokens +#define VALIDATE_MATCHING_SEC_TOKENS(handle1, handle2, secInfo, status) \ + do { \ + OBJSYS *pSys = SYS_GET_INSTANCE(); \ + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE) && \ + ((secInfo).privLevel < RS_PRIV_LEVEL_USER_ROOT)) \ + { \ + status = osValidateClientTokens((void*)rmclientGetSecurityTokenByHandle(handle1), \ + (void*)rmclientGetSecurityTokenByHandle(handle2)); \ + NV_ASSERT_OR_RETURN(status == NV_OK, status); \ + } \ + } while (0); + +static NV_STATUS _ShareDebugger(KernelSMDebuggerSession *, RsResourceRef *, RsResourceRef *); + +void +dbgSessionRemoveDependant_IMPL +( + RmDebuggerSession *pDbgSession, + RsResourceRef *pResourceRef +) +{ + RsSession *pSession = dynamicCast(pDbgSession, RsSession); + + // Freeing a KernelSMDebuggerSession dependant should just call the destructor normally + if (pSession->bValid && (pResourceRef->externalClassId == GT200_DEBUGGER)) + ksmdbgssnFreeCallback(dynamicCast(pResourceRef->pResource, KernelSMDebuggerSession)); + + sessionRemoveDependant_IMPL(staticCast(pDbgSession, RsSession), pResourceRef); +} + +void +dbgSessionRemoveDependency_IMPL +( + RmDebuggerSession *pDbgSession, + RsResourceRef *pResourceRef +) +{ + RsSession *pSession = dynamicCast(pDbgSession, RsSession); + + // + // Call all registered KernelSMDebuggerSessions' free callbacks (destructor basically) + // when the underlying KernelGraphicsObject goes away. This invalidates the KernelSMDebuggerSession + // and causes all control calls on it to fail since the KernelGraphicsObject dependancy has disappeared. + // + if (pSession->bValid) + { + RsResourceRefListIter it; + + it = listIterAll(&pSession->dependants); + + while (listIterNext(&it)) + { + RsResourceRef *pDependency = *(it.pValue); + + if (pDependency->externalClassId == GT200_DEBUGGER) + ksmdbgssnFreeCallback(dynamicCast(pDependency->pResource, KernelSMDebuggerSession)); + } + } + + // This call will invalidate the RmDebuggerSession + sessionRemoveDependency_IMPL(staticCast(pDbgSession, RsSession), pResourceRef); +} + +static NV_STATUS +_ksmdbgssnInitClient +( + OBJGPU *pGpu, + KernelSMDebuggerSession *pKernelSMDebuggerSession +) +{ + NV0080_ALLOC_PARAMETERS nv0080AllocParams; + NV2080_ALLOC_PARAMETERS nv2080AllocParams; + NV_STATUS status = NV_OK; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + + pKernelSMDebuggerSession->hInternalClient = NV01_NULL_OBJECT; + + // Allocate a (kernel-space) client. + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &pKernelSMDebuggerSession->hInternalClient), + failed); + + // Allocate a device. + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pKernelSMDebuggerSession->hInternalClient, + &pKernelSMDebuggerSession->hInternalDevice), + failed); + portMemSet(&nv0080AllocParams, 0, sizeof(nv0080AllocParams)); + nv0080AllocParams.deviceId = gpuGetDeviceInstance(pGpu); + nv0080AllocParams.hClientShare = pKernelSMDebuggerSession->hInternalClient; + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + pKernelSMDebuggerSession->hInternalClient, + pKernelSMDebuggerSession->hInternalClient, + pKernelSMDebuggerSession->hInternalDevice, + NV01_DEVICE_0, + &nv0080AllocParams), + failed); + + // Allocate a subdevice. + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pKernelSMDebuggerSession->hInternalClient, + &pKernelSMDebuggerSession->hInternalSubdevice), + failed); + portMemSet(&nv2080AllocParams, 0, sizeof(nv2080AllocParams)); + nv2080AllocParams.subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + pKernelSMDebuggerSession->hInternalClient, + pKernelSMDebuggerSession->hInternalDevice, + pKernelSMDebuggerSession->hInternalSubdevice, + NV20_SUBDEVICE_0, + &nv2080AllocParams), + failed); + + if (bMIGInUse) + { + NVC637_ALLOCATION_PARAMETERS nvC637AllocParams; + MIG_INSTANCE_REF ref; + + portMemSet(&nvC637AllocParams, 0, sizeof(nvC637AllocParams)); + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, pKernelSMDebuggerSession->hDebuggerClient, &ref), + failed); + + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pKernelSMDebuggerSession->hInternalClient, + &pKernelSMDebuggerSession->hInternalSubscription), + failed); + nvC637AllocParams.swizzId = ref.pKernelMIGGpuInstance->swizzId; + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + pKernelSMDebuggerSession->hInternalClient, + pKernelSMDebuggerSession->hInternalSubdevice, + pKernelSMDebuggerSession->hInternalSubscription, + AMPERE_SMC_PARTITION_REF, + &nvC637AllocParams), + failed); + } + + return NV_OK; +failed: + if (pKernelSMDebuggerSession->hInternalClient != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, pKernelSMDebuggerSession->hInternalClient, pKernelSMDebuggerSession->hInternalClient); + pKernelSMDebuggerSession->hInternalClient = NV01_NULL_OBJECT; + pKernelSMDebuggerSession->hInternalDevice = NV01_NULL_OBJECT; + pKernelSMDebuggerSession->hInternalSubdevice = NV01_NULL_OBJECT; + pKernelSMDebuggerSession->hInternalSubscription = NV01_NULL_OBJECT; + pKernelSMDebuggerSession->hInternalMemMapping = NV01_NULL_OBJECT; + } + + return status; +} + +NV_STATUS +ksmdbgssnConstruct_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV83DE_ALLOC_PARAMETERS *pNv83deAllocParams = pParams->pAllocParams; + OBJGPU *pGpu; + NvHandle hAppChannel; + NvHandle hAppClient; + NvHandle hClass3dObject; + NvHandle hKernelSMDebuggerSession; + NvHandle hSubdevice; + NV_STATUS status = NV_OK; + RsClient *pAppClient; + RsResourceRef *pGrResourceRef; + RsResourceRef *pParentRef; + + // The app using the new interface should initialize this to 0. + if (pNv83deAllocParams->hDebuggerClient_Obsolete) + { + NV_ASSERT_FAILED("Old Nv83deAllocParams interface not supported"); + return NV_ERR_INVALID_ARGUMENT; + } + + hAppClient = pNv83deAllocParams->hAppClient; + hClass3dObject = pNv83deAllocParams->hClass3dObject; + hKernelSMDebuggerSession = pParams->hResource; + + // Validate + lookup the application client + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + serverGetClientUnderLock(&g_resServ, hAppClient, &pAppClient)); + + status = clientGetResourceRef(pAppClient, hClass3dObject, &pGrResourceRef); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "hObject 0x%x not found for client 0x%x\n", + pParams->hParent, pParams->hClient); + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // + // On GSP, the security token is either the GFID or NULL. When the security token is + // set to the GFID this will properly constrain debugger access to wtihin a single + // Guest on GHV environments. When it is NULL, this allows access to any client in + // the system but in order to take advantage of this CPU-RM would already have + // to have been compromised anyway. + // + // On legacy vGPU systems, the security token will match the Guest's vGPU plugin. So you'd + // only be able to access other resources allocated by the same Guest. + // + if (RMCFG_FEATURE_PLATFORM_GSP || hypervisorIsVgxHyper()) + { + API_SECURITY_INFO *pSecInfo = pParams->pSecInfo; + + VALIDATE_MATCHING_SEC_TOKENS((pCallContext->pClient->hClient), hAppClient, + *pSecInfo, status); + } + else + { + RS_ACCESS_MASK debugAccessMask; + + // + // On CPU-RM and Guest RM systems check that debugging rights were shared. + // + // Check that the application client allowed debugging rights for the debugger + // client on the compute object (i.e. the current client allocating this object). + // + // + RS_ACCESS_MASK_CLEAR(&debugAccessMask); + RS_ACCESS_MASK_ADD(&debugAccessMask, RS_ACCESS_DEBUG); + + status = rsAccessCheckRights(pGrResourceRef, pCallContext->pClient, + &debugAccessMask); + + NV_CHECK_OR_ELSE(LEVEL_ERROR, + status == NV_OK, + NV_PRINTF(LEVEL_ERROR, "Current user does not have debugging rights on the compute object. Status = 0x%x\n", status); + return NV_ERR_INSUFFICIENT_PERMISSIONS;); + } + + pKernelSMDebuggerSession->pObject = dynamicCast(pGrResourceRef->pResource, KernelGraphicsObject); + if (pKernelSMDebuggerSession->pObject == NULL) + { + return NV_ERR_INVALID_OBJECT; + } + + pParentRef = pGrResourceRef->pParentRef; + hAppChannel = pParentRef->hResource; + + // Ensure that debugger session is created under same device as the object under debug + pGpu = GPU_RES_GET_GPU(pKernelSMDebuggerSession); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pGpu == GPU_RES_GET_GPU(pKernelSMDebuggerSession->pObject), + NV_ERR_INVALID_ARGUMENT); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + CliGetSubDeviceHandleFromGpu(pAppClient->hClient, pGpu, &hSubdevice)); + + // Initialize the object info + pKernelSMDebuggerSession->hChannelClient = pAppClient->hClient; + pKernelSMDebuggerSession->hDebugger = hKernelSMDebuggerSession; + pKernelSMDebuggerSession->hDebuggerClient = pCallContext->pClient->hClient; + pKernelSMDebuggerSession->hChannel = hAppChannel; + pKernelSMDebuggerSession->hSubdevice = hSubdevice; + + // Insert it into this Object's debugger list + if (listAppendValue(&pKernelSMDebuggerSession->pObject->activeDebuggers, &pKernelSMDebuggerSession) == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to insert Debugger into channel list, handle = 0x%x\n", + pKernelSMDebuggerSession->hDebugger); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _ksmdbgssnInitClient(pGpu, pKernelSMDebuggerSession)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _ShareDebugger(pKernelSMDebuggerSession, pCallContext->pResourceRef, pGrResourceRef)); + + return status; +} + +/** + * @brief Binds a debugger object to the given KernelGraphicsObject's RsSession object + * or allocates a new one if it's not currently referencing one. + * + * @param[in] pKernelSMDebuggerSession Underlying debugger object + * @param[in,out] pGrResourceRef RsResourceRef for the channel that will be + * bound to an RsSession if one isn't already + * there. + * @param[in,out] pDebuggerRef RsResourceRef for the debugger object that will + * be bound to a new RsSession or the channel's + * existing one. + * + * @return NV_OK on success, error code on failure + */ +static NV_STATUS +_ShareDebugger +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + RsResourceRef *pDebuggerRef, + RsResourceRef *pGrResourceRef +) +{ + NV_STATUS status = NV_OK; + RsSession *pRsSession; + + // Allocate a new RsSession if the KernelGraphicsObject doesn't reference one already + if (pGrResourceRef->pDependantSession == NULL) + { + RsShared *pShared = NULL; + + status = serverAllocShare(&g_resServ, classInfo(RmDebuggerSession), &pShared); + if (status != NV_OK) + return status; + + pKernelSMDebuggerSession->pDebugSession = dynamicCast(pShared, RmDebuggerSession); + pRsSession = staticCast(pKernelSMDebuggerSession->pDebugSession, RsSession); + + // Add KernelGraphicsObject as a dependency + sessionAddDependency(pRsSession, pGrResourceRef); + + // Add debugger object as a dependant of the new RsSession object + sessionAddDependant(pRsSession, pDebuggerRef); + + // + // Decrease ref count if newly allocated, we only want the ref + // count for the RsSession object to be 2 in this case + // + serverFreeShare(&g_resServ, pShared); + } + else + { + pKernelSMDebuggerSession->pDebugSession = dynamicCast(pGrResourceRef->pDependantSession, + RmDebuggerSession); + + if (pKernelSMDebuggerSession->pDebugSession == NULL) + { + NV_PRINTF(LEVEL_ERROR, "KernelGraphicsObject already a dependent of a non-debugger session\n"); + return NV_ERR_INVALID_STATE; + } + + // Add debugger object as a dependant of the existing RsSession object + pRsSession = staticCast(pKernelSMDebuggerSession->pDebugSession, RsSession); + sessionAddDependant(pRsSession, pDebuggerRef); + } + + return NV_OK; +} + +// +// Empty destructor since the destruction is done in the free callback which is invoked +// by Resource Server when the RmDebuggerSession shared object is invalidated due to either +// the KernelSMDebuggerSession being freed or the underlying KernelGraphicsObject dependancy being freed. +// +void +ksmdbgssnDestruct_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession +) +{} + +// +// The free callback will always be invoked before the destructor for either the KernelSMDebuggerSession +// (empty since we clean up here) and before the KernelGraphicsObject dependancy's destructor. This is a bit +// different from how other Resource Server classes clean up since there is a dependancy owned +// by a different RM client. +// +void +ksmdbgssnFreeCallback_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // This should free the entire hierarchy of objects. + pRmApi->Free(pRmApi, pKernelSMDebuggerSession->hInternalClient, pKernelSMDebuggerSession->hInternalClient); + + // Remove it from the pObject debugger list + listRemoveFirstByValue(&pKernelSMDebuggerSession->pObject->activeDebuggers, &pKernelSMDebuggerSession); + +} + +NV_STATUS +ksmdbgssnInternalControlForward_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NvU32 command, + void *pParams, + NvU32 size +) +{ + return gpuresInternalControlForward_IMPL(staticCast(pKernelSMDebuggerSession, GpuResource), command, pParams, size); +} + +NvHandle +ksmdbgssnGetInternalObjectHandle_IMPL(KernelSMDebuggerSession *pKernelSMDebuggerSession) +{ + return NV01_NULL_OBJECT; +} diff --git a/src/nvidia/src/kernel/gpu/gr/kernel_sm_debugger_session_ctrl.c b/src/nvidia/src/kernel/gpu/gr/kernel_sm_debugger_session_ctrl.c new file mode 100644 index 000000000..e31aee377 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gr/kernel_sm_debugger_session_ctrl.c @@ -0,0 +1,883 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************************************************************\ +* * +* Description: * +* This module contains Nv04Control support for Debugger object * +* represented by GT200_DEBUGGER class instantiations. * +* * +* * +\***************************************************************************/ + +#include "kernel/rmapi/control.h" +#include "kernel/rmapi/rmapi.h" +#include "kernel/os/os.h" +#include "kernel/core/locks.h" +#include "vgpu/rpc.h" +#include "kernel/gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "kernel/rmapi/rs_utils.h" +#include "gpu/mmu/mmu_trace.h" +#include "kernel/gpu/gr/kernel_sm_debugger_session.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/mem_mgr/virt_mem_allocator_common.h" +#include "kernel/gpu/gr/kernel_graphics_context.h" +#include "kernel/gpu/gr/kernel_graphics_object.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/bus/kern_bus.h" + +#include "class/cl83de.h" +#include "class/clc637.h" +#include "ctrl/ctrl83de.h" + +// +// _nv83deCtrlCmdFetchVAS +// +// Helper to fetch the the OBJVASPACE *from the hChannel without needing +// for the caller to explicitly pass in the handle corresponding to the VaSpaceApi: +// +static NV_STATUS +_nv83deCtrlCmdFetchVAS(NvU32 hClient, NvU32 hChannel, OBJVASPACE **ppVASpace) +{ + KernelChannel *pKernelChannel = NULL; + + NV_ASSERT_OR_RETURN(ppVASpace != NULL, NV_ERR_INVALID_ARGUMENT); + + // Fetch the corresponding Channel object from our handle + NV_ASSERT_OK_OR_RETURN(CliGetKernelChannel(hClient, hChannel, &pKernelChannel)); + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + *ppVASpace = pKernelChannel->pVAS; + + return NV_OK; +} + +// +// _nv83deCtrlCmdValidateRange +// +// Helper to traverse through the virtual memory page hierarchy and +// determine whether or not the virtual address (VA) range provided as +// input has valid and allocated pages mapped to it in its entirety. +// +// This command's input is NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS which +// contains a buffer of NV83DE_CTRL_DEBUG_ACCESS_OPs +// +// Possible return values: +// NV_OK +// NV_ERR_INVALID_ARGUMENT +// NV_ERR_INVALID_XLATE +// +static NV_STATUS +_nv83deCtrlCmdValidateRange +( + OBJGPU *pGpu, + OBJVASPACE *pVASpace, + NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS *pParams +) +{ + NvU32 i; + NvU64 totalLength; + NV_STATUS status = NV_OK; + + // Loop through to validate range for all provided ops + for (i = 0; i < pParams->count; i++) + { + MMU_TRACE_PARAM mmuParams; + MMU_TRACE_ARG traceArg = {0}; + + // Ensure that input gpuVA is 4-byte aligned. cpuVA is handled directly by portmemCopy. + NV_ASSERT_OR_RETURN((pParams->opsBuffer[i].gpuVA & 3) == 0, NV_ERR_INVALID_ARGUMENT); + + // Sanity-check the requested size + if (pParams->opsBuffer[i].size == 0 || !portSafeAddU64(pParams->opsBuffer[i].gpuVA, pParams->opsBuffer[i].size, &totalLength)) + return NV_ERR_INVALID_ARGUMENT; + + mmuParams.mode = MMU_TRACE_MODE_VALIDATE; + mmuParams.va = pParams->opsBuffer[i].gpuVA; + mmuParams.vaLimit = pParams->opsBuffer[i].gpuVA + pParams->opsBuffer[i].size - 1; + mmuParams.pArg = &traceArg; + + NV_ASSERT_OK_OR_RETURN(mmuTrace(pGpu, pVASpace, &mmuParams)); + + // + // mmuTrace may return NV_OK if the range is invalid but the translation did + // not otherwise cause errors. Use traceArg.valid to satisfy the output + // status needed for _nv83deCtrlCmdValidateRange. + // + if (traceArg.valid) + { + pParams->opsBuffer[i].valid = 1; + } + else + { + status = NV_ERR_INVALID_XLATE; + pParams->opsBuffer[i].valid = 0; + } + } + + return status; +} + +static NV_STATUS +_nv8deCtrlCmdReadWriteSurface +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS *pParams, + NvBool bWrite +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelSMDebuggerSession); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelSMDebuggerSession); + OBJVASPACE *pVASpace = NULL; + NvU32 count = pParams->count; + NvU32 i; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (count > MAX_ACCESS_OPS) + return NV_ERR_INVALID_ARGUMENT; + + // Attempt to retrieve the VAS pointer + NV_ASSERT_OK_OR_RETURN( + _nv83deCtrlCmdFetchVAS(hClient, pKernelSMDebuggerSession->hChannel, &pVASpace)); + + // Validate VA range and fail if invalid + NV_ASSERT_OK_OR_RETURN( + _nv83deCtrlCmdValidateRange(pGpu, pVASpace, pParams)); + + for (i = 0; i < count; i++) + { + NvU8 *pBase; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvU64 virtAddr = pParams->opsBuffer[i].gpuVA; + NvP64 bufPtr = pParams->opsBuffer[i].pCpuVA; + NvU32 bufSize = pParams->opsBuffer[i].size; + NvU32 pageStartOffset; + NvU32 start4kPage; + NvU32 end4kPage; + NvU32 curSize; + NvU32 cur4kPage; + status = NV_OK; + + // Break it up by 4K pages for now + pageStartOffset = NvOffset_LO32(virtAddr) & RM_PAGE_MASK; + start4kPage = (NvOffset_LO32(virtAddr) >> 12) & 0x1FFFF; + end4kPage = (NvOffset_LO32(virtAddr + bufSize - 1) >> 12) & 0x1FFFF; + + curSize = RM_PAGE_SIZE - pageStartOffset; + virtAddr &= ~RM_PAGE_MASK; + + for (cur4kPage = start4kPage; cur4kPage <= end4kPage; ++cur4kPage) + { + MMU_TRACE_PARAM mmuParams = {0}; + MMU_TRACE_ARG traceArg = {0}; + + mmuParams.mode = MMU_TRACE_MODE_TRANSLATE; + mmuParams.va = virtAddr; + mmuParams.vaLimit = virtAddr; + mmuParams.pArg = &traceArg; + + NV_ASSERT_OK_OR_RETURN( + mmuTrace(pGpu, pVASpace, &mmuParams)); + + if (curSize > bufSize) + { + curSize = bufSize; + } + + if (traceArg.aperture == ADDR_SYSMEM) + { + NvP64 physAddr = NV_PTR_TO_NvP64(traceArg.pa); + NvU64 limit = (NvU64)(curSize - 1); + + NvU32 os02Flags = DRF_DEF(OS02, _FLAGS, _LOCATION, _PCI) | + DRF_DEF(OS02, _FLAGS, _MAPPING, _NO_MAP) | + DRF_DEF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS) | + DRF_DEF(OS02, _FLAGS, _COHERENCY, _CACHED); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + osCreateMemFromOsDescriptor(pGpu, + physAddr, + pKernelSMDebuggerSession->hInternalClient, + os02Flags, + &limit, + &pMemDesc, + NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR, + RS_PRIV_LEVEL_KERNEL)); + } + else if (traceArg.aperture == ADDR_FBMEM) + { + memdescCreate(&pMemDesc, pGpu, curSize, 0, NV_TRUE, traceArg.aperture, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE); + memdescDescribe(pMemDesc, traceArg.aperture, traceArg.pa, curSize); + } + + pBase = kbusMapRmAperture_HAL(pGpu, pMemDesc); + NV_ASSERT_OR_ELSE( + pBase != NULL, + memdescDestroy(pMemDesc); + return NV_ERR_INVALID_ARGUMENT; ); + + if (bWrite) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, + portMemExCopyFromUser(bufPtr, pBase + pageStartOffset, curSize)); + } + else + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, + portMemExCopyToUser(pBase + pageStartOffset, bufPtr, curSize)); + } + + kbusUnmapRmAperture_HAL(pGpu, pMemDesc, &pBase, NV_FALSE); + memdescDestroy(pMemDesc); + + if (status != NV_OK) + return status; + + pBase = NULL; + pageStartOffset = 0; + bufPtr = NvP64_PLUS_OFFSET(bufPtr,curSize); + bufSize -= curSize; + curSize = RM_PAGE_SIZE; + virtAddr += RM_PAGE_SIZE; + } + } + + return status; +} + +NV_STATUS +ksmdbgssnCtrlCmdReadSurface_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS *pParams +) +{ + return _nv8deCtrlCmdReadWriteSurface(pKernelSMDebuggerSession, pParams, NV_FALSE); +} + +NV_STATUS +ksmdbgssnCtrlCmdWriteSurface_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_ACCESS_SURFACE_PARAMETERS *pParams +) +{ + return _nv8deCtrlCmdReadWriteSurface(pKernelSMDebuggerSession, pParams, NV_TRUE); +} + +NV_STATUS +ksmdbgssnCtrlCmdGetMappings_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_GET_MAPPINGS_PARAMETERS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelSMDebuggerSession); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pKernelSMDebuggerSession); + OBJVASPACE *pVASpace = NULL; + MMU_TRACE_ARG traceArg = {0}; + MMU_TRACE_PARAM mmuParams = {0}; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // Attempt to retrieve the VAS pointer + NV_ASSERT_OK_OR_RETURN( + _nv83deCtrlCmdFetchVAS(hClient, pKernelSMDebuggerSession->hChannel, &pVASpace)); + + traceArg.pMapParams = pParams; + + mmuParams.mode = MMU_TRACE_MODE_DUMP_RANGE; + mmuParams.va = pParams->vaLo; + mmuParams.vaLimit = pParams->vaHi; + mmuParams.pArg = &traceArg; + + return mmuTrace(pGpu, pVASpace, &mmuParams); +} + +typedef enum { + GRDBG_MEM_ACCESS_TYPE_INVALID, + GRDBG_MEM_ACCESS_TYPE_READ, + GRDBG_MEM_ACCESS_TYPE_WRITE, +} GrdbgMemoryAccessType; + +static NV_STATUS +_nv83deFlushAllGpusL2Cache(MEMORY_DESCRIPTOR *pMemDesc) +{ + NvU32 gpuCount; + NvU32 gpuMask; + NvU32 gpuInstance = 0; + OBJGPU *pTempGpu; + NV_STATUS rmStatus = NV_OK; + + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + + while ((pTempGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + // + // On GPUs with write-back caches, FB_CACHE_INVALIDATE is reduced + // to FB_CACHE_EVICT, which first writes back dirty lines and then + // invalidates clean ones, exactly what we want. On write-through + // caches, it will invalidate clean lines as expected. + // + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmemsysCacheOp_HAL(pTempGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pTempGpu), pMemDesc, + FB_CACHE_SYSTEM_MEMORY, FB_CACHE_INVALIDATE)); + } + + return rmStatus; +} + +static NV_STATUS +_nv83deUnmapMemoryFromGrdbgClient +( + OBJGPU *pTargetGpu, + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NvP64 pCpuVirtAddr, + NvU32 flags +) +{ + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // Unmap memory. + status = pRmApi->UnmapFromCpu(pRmApi, + pKernelSMDebuggerSession->hInternalClient, + pKernelSMDebuggerSession->hInternalDevice, + pKernelSMDebuggerSession->hInternalMemMapping, + NvP64_VALUE(pCpuVirtAddr), + flags, + osGetCurrentProcess()); + + // Free the memory handle + pRmApi->Free(pRmApi, + pKernelSMDebuggerSession->hInternalClient, + pKernelSMDebuggerSession->hInternalMemMapping); + pKernelSMDebuggerSession->hInternalMemMapping = NV01_NULL_OBJECT; + + return status; +} + +static NV_STATUS +_nv83deMapMemoryIntoGrdbgClient +( + OBJGPU *pTargetGpu, + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NvHandle hClient, + NvHandle hMemory, + NvU64 offset, + NvU32 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags +) +{ + NV_STATUS rmStatus = NV_OK; + void *pCpuVirtAddr = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // + // We use our own internal client, dup the memory object into our client + // and then call RmMapMemory on it, so that the memory is never mapped + // into the client's address space. + // hInternalMemMappping must be freed after use, since we don't have space to + // store multiple memory handles. + // + NV_ASSERT_OR_RETURN(pKernelSMDebuggerSession->hInternalMemMapping == NV01_NULL_OBJECT, + NV_ERR_INVALID_STATE); + + rmStatus = pRmApi->DupObject(pRmApi, + pKernelSMDebuggerSession->hInternalClient, + pKernelSMDebuggerSession->hInternalDevice, + &pKernelSMDebuggerSession->hInternalMemMapping, + hClient, + hMemory, + 0); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to dup source memory (0x%x,0x%x) under device (status = 0x%x). Attempting subdevice dup.\n", + hClient, hMemory, rmStatus); + + rmStatus = pRmApi->DupObject(pRmApi, + pKernelSMDebuggerSession->hInternalClient, + pKernelSMDebuggerSession->hInternalSubdevice, + &pKernelSMDebuggerSession->hInternalMemMapping, + hClient, + hMemory, + 0); + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to dup source memory (0x%x,0x%x) under subdevice (status = 0x%x). Aborting.\n", + hClient, hMemory, rmStatus); + return rmStatus; + } + } + + // Map memory + rmStatus = pRmApi->MapToCpu(pRmApi, + pKernelSMDebuggerSession->hInternalClient, + pKernelSMDebuggerSession->hInternalSubdevice, + pKernelSMDebuggerSession->hInternalMemMapping, + offset, + length, + &pCpuVirtAddr, + flags); + if (NV_OK != rmStatus) + { + NV_PRINTF(LEVEL_WARNING, "RmMapMemory failed 0x%x\n", rmStatus); + + // Free the memory handle + pRmApi->Free(pRmApi, + pKernelSMDebuggerSession->hInternalClient, + pKernelSMDebuggerSession->hInternalMemMapping); + pKernelSMDebuggerSession->hInternalMemMapping = NV01_NULL_OBJECT; + + return rmStatus; + } + + *ppCpuVirtAddr = NV_PTR_TO_NvP64(pCpuVirtAddr); + + return rmStatus; +} + +static NV_STATUS +_nv83deCtrlCmdDebugAccessMemory +( + OBJGPU *pTargetGpu, + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NvHandle hClient, + NvHandle hMemory, + NvU64 offset, + NvU32 length, + NvP64 buffer, + GrdbgMemoryAccessType accessType +) +{ + RsResourceRef *pResourceRef; + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 totalLength; + NvP64 pCpuVirtAddr = NvP64_NULL; + NV_STATUS rmStatus = NV_OK; + NV_STATUS rmUnmapStatus = NV_OK; + NvU32 flags = 0; + NvBool bGpuCached; + NvBool bCpuMemory; + + // + // SECURITY: Find the hMemory object in the RmCtrl caller's database. + // This ensures the RmCtrl caller has rights to access hMemory. + // + if (serverutilGetResourceRef(hClient, hMemory, &pResourceRef) != NV_OK) + return NV_ERR_INSUFFICIENT_PERMISSIONS; + + // Get a memdesc for this object to determine its attributes + if (!dynamicCast(pResourceRef->pResource, Memory)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + NV_PRINTF(LEVEL_WARNING, + "Invalid handle: hMemory %x is not of type classId(Memory): (GPU 0x%llx, hClient 0x%x, hMemory %x, offset 0x%llx, length 0x%x, flags 0x%x)\n", + hMemory, + pTargetGpu->busInfo.gpuPhysAddr, + hClient, + hMemory, + offset, + length, + flags); + return rmStatus; + } + + pMemDesc = dynamicCast(pResourceRef->pResource, Memory)->pMemDesc; + if (pMemDesc == NULL) + return NV_ERR_INVALID_STATE; + + // Sanity-check the requested size + if ((length == 0) || (!portSafeAddU64(offset, length, &totalLength))) + return NV_ERR_INVALID_ARGUMENT; + if (totalLength > pMemDesc->Size) + return NV_ERR_INVALID_ARGUMENT; + + // Setup mapping flags based on the kind of memory, access type etc. + if (accessType == GRDBG_MEM_ACCESS_TYPE_READ) + { + flags = FLD_SET_DRF(OS33, _FLAGS, _ACCESS, _READ_ONLY, flags); + } + else + { + flags = FLD_SET_DRF(OS33, _FLAGS, _ACCESS, _WRITE_ONLY, flags); + } + + bCpuMemory = (memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM); + bGpuCached = (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_CACHED); + + // + // Ask for a direct mapping to this memory, to avoid getting a reflected + // mapping. We'll do explicit cache management to ensure coherence. + // + if (bCpuMemory) + { + flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, flags); + } + + // Map memory into the internal smdbg client + rmStatus = _nv83deMapMemoryIntoGrdbgClient(pTargetGpu, + pKernelSMDebuggerSession, + hClient, + hMemory, + offset, + length, + &pCpuVirtAddr, + flags); + if (NV_OK != rmStatus) + { + NV_PRINTF(LEVEL_WARNING, + "Failed to map memory into internal smdbg client (GPU 0x%llx, hClient 0x%x, hMemory %x, offset 0x%llx, length 0x%x, flags 0x%x): (rmStatus = %x)\n", + pTargetGpu->busInfo.gpuPhysAddr, + hClient, + hMemory, + offset, + length, + flags, + rmStatus); + return rmStatus; + } + + // Fence to ensure previous in-flight accesses are complete + osFlushCpuWriteCombineBuffer(); + + // + // Flush and invalidate SYSMEM lines from L2s of all GPUs. + // Some GPUs have write-back caches, so this must be done both for + // accessType == READ and accessType == WRITE. + // + if (bCpuMemory && bGpuCached) + { + rmStatus = _nv83deFlushAllGpusL2Cache(pMemDesc); + if (NV_OK != rmStatus) + { + NV_PRINTF(LEVEL_WARNING, + "Failed to flush GPU L2 (GPU 0x%llx): (rmStatus = %x)\n", + pTargetGpu->busInfo.gpuPhysAddr, rmStatus); + goto cleanup_mapping; + } + } + + // Perform the requested accessType operation + if (accessType == GRDBG_MEM_ACCESS_TYPE_READ) + { + if (!portMemCopy(NvP64_VALUE(buffer), length, NvP64_VALUE(pCpuVirtAddr), length)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + NV_PRINTF(LEVEL_WARNING, + "portMemCopy failed (from VA 0x" NvP64_fmt " to 0x" NvP64_fmt ", length 0x%x)\n", + pCpuVirtAddr, buffer, length); + goto cleanup_mapping; + } + NV_PRINTF(LEVEL_INFO, "Reading %d bytes of memory from 0x%x\n", + length, hMemory); + } + else + { + if (!portMemCopy(NvP64_VALUE(pCpuVirtAddr), length, NvP64_VALUE(buffer), length)) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + NV_PRINTF(LEVEL_WARNING, + "portMemCopy failed (from VA 0x" NvP64_fmt " to 0x" NvP64_fmt ", length 0x%x)\n", + buffer, pCpuVirtAddr, length); + goto cleanup_mapping; + } + + NV_PRINTF(LEVEL_INFO, "Writing %d bytes of memory to 0x%x\n", length, + hMemory); + } + + // Another fence to ensure our own new accesses are complete + osFlushCpuWriteCombineBuffer(); + +cleanup_mapping: + // Unmap memory. + rmUnmapStatus = _nv83deUnmapMemoryFromGrdbgClient(pTargetGpu, + pKernelSMDebuggerSession, + pCpuVirtAddr, + flags); + // Return the first failure + return (rmStatus != NV_OK ? rmStatus: rmUnmapStatus); +} + +NV_STATUS +ksmdbgssnCtrlCmdDebugReadMemory_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS *pParams +) +{ + return _nv83deCtrlCmdDebugAccessMemory(GPU_RES_GET_GPU(pKernelSMDebuggerSession), + pKernelSMDebuggerSession, + RES_GET_CLIENT_HANDLE(pKernelSMDebuggerSession), + pParams->hMemory, + pParams->offset, + pParams->length, + pParams->buffer, + GRDBG_MEM_ACCESS_TYPE_READ); +} + +NV_STATUS +ksmdbgssnCtrlCmdDebugWriteMemory_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS *pParams +) +{ + return _nv83deCtrlCmdDebugAccessMemory(GPU_RES_GET_GPU(pKernelSMDebuggerSession), + pKernelSMDebuggerSession, + RES_GET_CLIENT_HANDLE(pKernelSMDebuggerSession), + pParams->hMemory, + pParams->offset, + pParams->length, + pParams->buffer, + GRDBG_MEM_ACCESS_TYPE_WRITE); +} + +NV_STATUS +ksmdbgssnCtrlCmdDebugGetHandles_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_GET_HANDLES_PARAMS *pParams +) +{ + pParams->hChannel = pKernelSMDebuggerSession->hChannel; + pParams->hSubdevice = pKernelSMDebuggerSession->hSubdevice; + + return NV_OK; +} + +NV_STATUS ksmdbgssnCtrlCmdDebugExecRegOps_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_EXEC_REG_OPS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelSMDebuggerSession); + NV_STATUS status = NV_OK; + NvBool isClientGspPlugin = NV_FALSE; + + // Check if User have permission to access register offset + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, + gpuValidateRegOps(pGpu, pParams->regOps, pParams->regOpCount, + pParams->bNonTransactional, isClientGspPlugin)); + + if (IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +ksmdbgssnCtrlCmdDebugReadBatchMemory_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelSMDebuggerSession); + NV_STATUS status = NV_OK; + NvU32 i; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pParams->count <= MAX_ACCESS_MEMORY_OPS, + NV_ERR_INVALID_ARGUMENT); + + for (i = 0; i < pParams->count; ++i) + { + NV_STATUS localStatus = NV_OK; + NvP64 pData = (NvP64)(((NvU8 *)pParams->pData) + pParams->entries[i].dataOffset); + + NV_CHECK_OR_ELSE(LEVEL_ERROR, + pParams->entries[i].dataOffset < pParams->dataLength, + localStatus = NV_ERR_INVALID_OFFSET; + goto updateStatus; ); + + NV_CHECK_OK_OR_GOTO(localStatus, LEVEL_ERROR, + _nv83deCtrlCmdDebugAccessMemory(pGpu, + pKernelSMDebuggerSession, + RES_GET_CLIENT_HANDLE(pKernelSMDebuggerSession), + pParams->entries[i].hMemory, + pParams->entries[i].memOffset, + pParams->entries[i].length, + pData, + GRDBG_MEM_ACCESS_TYPE_READ), + updateStatus); + +updateStatus: + pParams->entries[i].status = localStatus; + if ((status == NV_OK) && (localStatus != NV_OK)) + status = localStatus; + } + + return status; +} + +NV_STATUS +ksmdbgssnCtrlCmdDebugWriteBatchMemory_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelSMDebuggerSession); + NV_STATUS status = NV_OK; + NvU32 i; + + for (i = 0; i < pParams->count; ++i) + { + NV_STATUS localStatus = NV_OK; + NvP64 pData = (NvP64)(((NvU8 *)pParams->pData) + pParams->entries[i].dataOffset); + + NV_CHECK_OR_ELSE(LEVEL_ERROR, + (pParams->entries[i].dataOffset + pParams->entries[i].length) <= pParams->dataLength, + localStatus = NV_ERR_INVALID_OFFSET; + goto updateStatus; ); + + NV_CHECK_OK_OR_GOTO(localStatus, LEVEL_ERROR, + _nv83deCtrlCmdDebugAccessMemory(pGpu, + pKernelSMDebuggerSession, + RES_GET_CLIENT_HANDLE(pKernelSMDebuggerSession), + pParams->entries[i].hMemory, + pParams->entries[i].memOffset, + pParams->entries[i].length, + pData, + GRDBG_MEM_ACCESS_TYPE_WRITE), + updateStatus); + +updateStatus: + pParams->entries[i].status = localStatus; + if ((status == NV_OK) && (localStatus != NV_OK)) + status = localStatus; + } + + return status; +} + +NV_STATUS +ksmdbgssnCtrlCmdDebugReadAllSmErrorStates_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_READ_ALL_SM_ERROR_STATES_PARAMS *pParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelSMDebuggerSession); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (IS_VIRTUAL(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + + NV_RM_RPC_CONTROL(pRmCtrlParams->pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + rmStatus); + + // + // SR-IOV vGPU + // + // MMU fault info is to be managed from within the guest, since host is + // not aware of MMU fault info about the VF. + // SM exception info is still fetched from host via the RPC above. + // + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + NV_ASSERT_OK( + kgrctxLookupMmuFault(pGpu, + pKernelSMDebuggerSession->pObject->pKernelGraphicsContext, + &pParams->mmuFault)); + } + + return rmStatus; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +ksmdbgssnCtrlCmdDebugClearAllSmErrorStates_IMPL +( + KernelSMDebuggerSession *pKernelSMDebuggerSession, + NV83DE_CTRL_DEBUG_CLEAR_ALL_SM_ERROR_STATES_PARAMS *pParams +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pKernelSMDebuggerSession); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (IS_VIRTUAL(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + NV_RM_RPC_CONTROL(pRmCtrlParams->pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + rmStatus); + + // + // SR-IOV vGPU + // + // MMU fault info is to be managed from within the guest, since host is + // not aware of MMU fault info about the VF. + // SM exception info is still fetched from host via the RPC above. + // + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + NV_ASSERT_OK( + kgrctxClearMmuFault(pGpu, pKernelSMDebuggerSession->pObject->pKernelGraphicsContext)); + } + + return rmStatus; + } + + return NV_ERR_NOT_SUPPORTED; +} + diff --git a/src/nvidia/src/kernel/gpu/gsp/arch/ampere/kernel_gsp_falcon_ga102.c b/src/nvidia/src/kernel/gpu/gsp/arch/ampere/kernel_gsp_falcon_ga102.c new file mode 100644 index 000000000..bcc5b4d52 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/arch/ampere/kernel_gsp_falcon_ga102.c @@ -0,0 +1,236 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Provides GA102+ specific KernelGsp HAL implementations related to + * execution of Falcon cores. + */ + +#include "gpu/gsp/kernel_gsp.h" + +#include "gpu/gpu.h" +#include "gpu/falcon/kernel_falcon.h" + +#include "published/ampere/ga102/dev_falcon_v4.h" +#include "published/ampere/ga102/dev_falcon_second_pri.h" +#include "published/ampere/ga102/dev_fbif_v4.h" + +static NV_STATUS +s_dmaTransfer_GA102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 dest, + NvU32 memOff, + RmPhysAddr srcPhysAddr, + NvU32 sizeInBytes, + NvU32 dmaCmd +) +{ + NV_STATUS status = NV_OK; + RMTIMEOUT timeout; + NvU32 data; + NvU32 bytesXfered = 0; + + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFBASE, NvU64_LO32(srcPhysAddr >> 8)); + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFBASE1, NvU64_HI32(srcPhysAddr >> 8) & 0x1FF); + + while (bytesXfered < sizeInBytes) + { + data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_DMATRFMOFFS, _OFFS, dest, 0); + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFMOFFS, data); + + data = FLD_SET_DRF_NUM(_PFALCON, _FALCON_DMATRFFBOFFS, _OFFS, memOff, 0); + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFFBOFFS, data); + + // Write the command + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFCMD, dmaCmd); + + // Poll for completion + data = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFCMD); + + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + while(FLD_TEST_DRF(_PFALCON_FALCON, _DMATRFCMD, _IDLE, _FALSE, data)) + { + status = gpuCheckTimeout(pGpu, &timeout); + if (status == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, "Timeout waiting for Falcon DMA to finish\n"); + DBG_BREAKPOINT(); + return status; + } + osSpinLoop(); + data = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMATRFCMD); + } + + bytesXfered += FLCN_BLK_ALIGNMENT; + dest += FLCN_BLK_ALIGNMENT; + memOff += FLCN_BLK_ALIGNMENT; + } + + return status; +} + +/*! + * Execute the HS falcon ucode provided in pFlcnUcode on the falcon engine + * represented by pKernelFlcn and wait for its completion. + * + * For _GA102, pFlcnUcode must be of the BOOT_FROM_HS variant. + * + * Note: callers are expected to reset pKernelFlcn before calling this + * function. + + * @param[in] pGpu GPU object pointer + * @param[in] pKernelGsp KernelGsp object pointer + * @param[in] pFlcnUcode Falcon ucode to execute + * @param[in] pKernelFlcn KernelFalcon engine to execute on + * @param[inout] pMailbox0 Pointer to value of MAILBOX0 to provide/read (or NULL) + * @param[inout] pMailbox0 Pointer to value of MAILBOX1 to provide/read (or NULL) + */ +NV_STATUS +kgspExecuteHsFalcon_GA102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + KernelGspFlcnUcode *pFlcnUcode, + KernelFalcon *pKernelFlcn, + NvU32 *pMailbox0, + NvU32 *pMailbox1 +) +{ + NV_STATUS status; + KernelGspFlcnUcodeBootFromHs *pUcode; + + NvU32 data = 0; + NvU32 dmaCmd; + + NV_ASSERT_OR_RETURN(pFlcnUcode != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pKernelFlcn != NULL, NV_ERR_INVALID_STATE); + + NV_ASSERT_OR_RETURN(pKernelFlcn->bBootFromHs, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pFlcnUcode->bootType == KGSP_FLCN_UCODE_BOOT_FROM_HS, NV_ERR_NOT_SUPPORTED); + + pUcode = &pFlcnUcode->ucodeBootFromHs; + + NV_ASSERT_OR_RETURN(pUcode->pUcodeMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OR_RETURN(memdescGetAddressSpace(pUcode->pUcodeMemDesc) == ADDR_SYSMEM, + NV_ERR_INVALID_ARGUMENT); + + kflcnDisableCtxReq_HAL(pGpu, pKernelFlcn); + + // Program TRANSCFG to fetch the DMA data + data = GPU_REG_RD32(pGpu, pKernelFlcn->fbifBase + NV_PFALCON_FBIF_TRANSCFG(0 /* ctxDma */)); + data = FLD_SET_DRF(_PFALCON, _FBIF_TRANSCFG, _TARGET, _COHERENT_SYSMEM, data); + data = FLD_SET_DRF(_PFALCON, _FBIF_TRANSCFG, _MEM_TYPE, _PHYSICAL, data); + GPU_REG_WR32(pGpu, pKernelFlcn->fbifBase + NV_PFALCON_FBIF_TRANSCFG(0 /* ctxDma */), data); + + // Prepare DMA command + dmaCmd = 0; + dmaCmd = FLD_SET_DRF(_PFALCON, _FALCON_DMATRFCMD, _WRITE, _FALSE, dmaCmd); + dmaCmd = FLD_SET_DRF(_PFALCON, _FALCON_DMATRFCMD, _SIZE, _256B, dmaCmd); + dmaCmd = FLD_SET_DRF_NUM(_PFALCON, _FALCON_DMATRFCMD, _CTXDMA, 0, dmaCmd); + + // Prepare DMA command for IMEM + dmaCmd = FLD_SET_DRF(_PFALCON, _FALCON_DMATRFCMD, _IMEM, _TRUE, dmaCmd); + dmaCmd = FLD_SET_DRF_NUM(_PFALCON, _FALCON_DMATRFCMD, _SEC, 0x1, dmaCmd); + + // Perform DMA for IMEM + { + RmPhysAddr srcPhysAddr = memdescGetPhysAddr(pUcode->pUcodeMemDesc, AT_GPU, 0); + srcPhysAddr = srcPhysAddr + pUcode->codeOffset - pUcode->imemVa; + + NV_ASSERT_OK_OR_RETURN( + s_dmaTransfer_GA102(pGpu, pKernelFlcn, + pUcode->imemPa, // dest + pUcode->imemVa, // memOff + srcPhysAddr, // srcPhysAddr + pUcode->imemSize, // sizeInBytes + dmaCmd)); + } + + // Prepare DMA command for DMEM + dmaCmd = FLD_SET_DRF(_PFALCON, _FALCON_DMATRFCMD, _IMEM, _FALSE, dmaCmd); + dmaCmd = FLD_SET_DRF_NUM(_PFALCON, _FALCON_DMATRFCMD, _SEC, 0x0, dmaCmd); + if (pUcode->dmemVa != FLCN_DMEM_VA_INVALID) + { + dmaCmd = FLD_SET_DRF(_PFALCON, _FALCON_DMATRFCMD, _SET_DMTAG, _TRUE, dmaCmd); + } + + // Perform DMA for DMEM + { + NvU32 memOff = 0; + + RmPhysAddr srcPhysAddr = memdescGetPhysAddr(pUcode->pUcodeMemDesc, AT_GPU, 0); + srcPhysAddr += pUcode->dataOffset; + if (pUcode->dmemVa != FLCN_DMEM_VA_INVALID) + { + srcPhysAddr -= pUcode->dmemVa; + memOff = pUcode->dmemVa; + } + + NV_ASSERT_OK_OR_RETURN( + s_dmaTransfer_GA102(pGpu, pKernelFlcn, + pUcode->dmemPa, // dest + memOff, // memOff + srcPhysAddr, // srcPhysAddr + pUcode->dmemSize, // sizeInBytes + dmaCmd)); + } + + // Program BROM registers for PKC signature validation. + { + kflcnRiscvRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON2_FALCON_BROM_PARAADDR(0), pUcode->hsSigDmemAddr); + + kflcnRiscvRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON2_FALCON_BROM_ENGIDMASK, pUcode->engineIdMask); + + kflcnRiscvRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID, + DRF_NUM(_PFALCON2_FALCON, _BROM_CURR_UCODE_ID, _VAL, pUcode->ucodeId)); + + kflcnRiscvRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON2_FALCON_MOD_SEL, + DRF_NUM(_PFALCON2_FALCON, _MOD_SEL, _ALGO, NV_PFALCON2_FALCON_MOD_SEL_ALGO_RSA3K)); + } + + // Set BOOTVEC to start of secure code. + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_BOOTVEC, pUcode->imemVa); + + // Write mailboxes if requested. + if (pMailbox0 != NULL) + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_MAILBOX0, *pMailbox0); + if (pMailbox1 != NULL) + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_MAILBOX1, *pMailbox1); + + // Start CPU now. + kflcnStartCpu_HAL(pGpu, pKernelFlcn); + + // Wait for completion. + status = kflcnWaitForHalt_HAL(pGpu, pKernelFlcn, GPU_TIMEOUT_DEFAULT, 0); + + // Read mailboxes if requested. + if (pMailbox0 != NULL) + *pMailbox0 = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_MAILBOX0); + if (pMailbox1 != NULL) + *pMailbox1 = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_MAILBOX1); + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gsp/arch/ampere/kernel_gsp_ga100.c b/src/nvidia/src/kernel/gpu/gsp/arch/ampere/kernel_gsp_ga100.c new file mode 100644 index 000000000..6c7062e24 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/arch/ampere/kernel_gsp_ga100.c @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Provides GA100+ specific KernelGsp HAL implementations. + */ + +#include "gpu/gsp/kernel_gsp.h" +#include "published/ampere/ga100/dev_fuse.h" + + +/*! + * Check if the GSP is in debug mode + * + * @return whether the GSP is in debug mode or not + */ +NvBool +kgspIsDebugModeEnabled_GA100 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NvU32 data; + + data = GPU_REG_RD32(pGpu, NV_FUSE_OPT_SECURE_GSP_DEBUG_DIS); + + return FLD_TEST_DRF(_FUSE, _OPT_SECURE_GSP_DEBUG_DIS, _DATA, _NO, data); +} + +/*! + * Returns the GSP fuse version of the provided ucode id (1-indexed) + * + * @param pGpu OBJGPU pointer + * @param pKernelGsp KernelGsp pointer + * @param[in] ucodeId Ucode Id (1-indexed) to read fuse for + */ +NvU32 +kgspReadUcodeFuseVersion_GA100 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + const NvU32 ucodeId +) +{ + NvU32 fuseVal = 0; + NvU32 index = ucodeId - 1; // adjust to 0-indexed + + // TODO: Bug 3519329: switch to indexed register once available + // if (index < NV_FUSE_OPT_FPF_GSP_UCODE_VERSION__SIZE_1) + if (index < 16) + { + // fuseVal = GPU_REG_IDX_RD_DRF(pGpu, _FUSE, _OPT_FPF_GSP_UCODE_VERSION, index, _DATA); + fuseVal = GPU_REG_RD32(pGpu, NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION + (4 * index)); + + if (fuseVal) + { + HIGHESTBITIDX_32(fuseVal); + fuseVal = fuseVal + 1; + } + } + + return fuseVal; +} diff --git a/src/nvidia/src/kernel/gpu/gsp/arch/ampere/kernel_gsp_ga102.c b/src/nvidia/src/kernel/gpu/gsp/arch/ampere/kernel_gsp_ga102.c new file mode 100644 index 000000000..7327415c6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/arch/ampere/kernel_gsp_ga102.c @@ -0,0 +1,338 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Provides GA102+ specific KernelGsp HAL implementations. + */ + +#include "gpu/gsp/kernel_gsp.h" + +#include "gpu/bus/kern_bus.h" +#include "rmgspseq.h" +#include "vgpu/rpc.h" + +#include "published/ampere/ga102/dev_falcon_v4.h" +#include "published/ampere/ga102/dev_riscv_pri.h" +#include "published/ampere/ga102/dev_falcon_second_pri.h" +#include "published/ampere/ga102/dev_gsp.h" +#include "published/ampere/ga102/dev_gsp_addendum.h" + + +#define RISCV_BR_ADDR_ALIGNMENT (8) + +void +kgspConfigureFalcon_GA102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + KernelFalconEngineConfig falconConfig; + + portMemSet(&falconConfig, 0, sizeof(falconConfig)); + + falconConfig.registerBase = DRF_BASE(NV_PGSP); + falconConfig.riscvRegisterBase = NV_FALCON2_GSP_BASE; + falconConfig.fbifBase = NV_PGSP_FBIF_BASE; + falconConfig.bBootFromHs = NV_TRUE; + falconConfig.pmcEnableMask = 0; + falconConfig.bIsPmcDeviceEngine = NV_FALSE; + falconConfig.physEngDesc = ENG_GSP; + + kflcnConfigureEngine(pGpu, staticCast(pKernelGsp, KernelFalcon), &falconConfig); +} + +/*! + * Reset RISCV using secure reset + * + * @return NV_OK if the RISCV reset was successful. + * Appropriate NV_ERR_xxx value otherwise. + */ +static NV_STATUS +_kgspResetIntoRiscv +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + KernelFalcon *pKernelFlcn = staticCast(pKernelGsp, KernelFalcon); + NV_ASSERT_OK_OR_RETURN(kflcnPreResetWait(pGpu, pKernelFlcn)); + GPU_FLD_WR_DRF_DEF(pGpu, _PGSP, _FALCON_ENGINE, _RESET, _TRUE); + GPU_FLD_WR_DRF_DEF(pGpu, _PGSP, _FALCON_ENGINE, _RESET, _FALSE); + + NV_ASSERT_OK_OR_RETURN(kflcnWaitForResetToFinish_HAL(pGpu, pKernelFlcn)); + + kflcnRiscvProgramBcr_HAL(pGpu, pKernelFlcn, NV_TRUE); + + return NV_OK; +} + +/*! + * Boot GSP-RM. + * + * This routine handles the following: + * - prepares boot binary image + * - prepares RISCV core to run GSP-RM + * - prepares libos initialization args + * - prepares GSP-RM initialization message + * - starts the RISCV core and passes control to boot binary image + * - waits for GSP-RM to complete initialization + * + * Note that boot binary and GSP-RM images have already been placed + * in fbmem by kgspCalculateFbLayout_HAL(). + * + * Note that this routine is based on flcnBootstrapRiscvOS_GA102(). + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelGsp GSP object pointer + * @param[in] pGspFw GSP_FIRMWARE image pointer + * + * @return NV_OK if GSP-RM RISCV boot was successful. + * Appropriate NV_ERR_xxx value otherwise. + */ +NV_STATUS +kgspBootstrapRiscvOSEarly_GA102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + GSP_FIRMWARE *pGspFw +) +{ + KernelFalcon *pKernelFalcon = staticCast(pKernelGsp, KernelFalcon); + NV_STATUS status = NV_OK; + + // Only for GSP client builds + if (!IS_GSP_CLIENT(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "IS_GSP_CLIENT is not set.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if (!kflcnIsRiscvCpuEnabled_HAL(pGpu, pKernelFalcon)) + { + NV_PRINTF(LEVEL_ERROR, "RISC-V core is not enabled.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + kgspPopulateGspRmInitArgs(pGpu, pKernelGsp, NULL); + + { + // Execute FWSEC to setup FRTS if we have a FRTS region + if (kgspGetFrtsSize_HAL(pGpu, pKernelGsp) > 0) + { + kflcnReset_HAL(pGpu, pKernelFalcon); + + NV_ASSERT_OK_OR_GOTO(status, + kgspExecuteFwsecFrts_HAL(pGpu, pKernelGsp, pKernelGsp->pFwsecUcode, + pKernelGsp->pWprMeta->frtsOffset), exit); + } + } + + NV_ASSERT_OK_OR_RETURN(_kgspResetIntoRiscv(pGpu, pKernelGsp)); + + // + // Stuff the message queue with async init messages that will be run + // before OBJGPU is created. + // + NV_RM_RPC_GSP_SET_SYSTEM_INFO(pGpu, status); + if (status != NV_OK) + { + NV_ASSERT_OK_FAILED("NV_RM_RPC_GSP_SET_SYSTEM_INFO", status); + goto exit; + } + + NV_RM_RPC_SET_REGISTRY(pGpu, status); + if (status != NV_OK) + { + NV_ASSERT_OK_FAILED("NV_RM_RPC_SET_REGISTRY", status); + goto exit; + } + + // First times setup of libos init args + kgspSetupLibosInitArgs(pGpu, pKernelGsp); + + // Fb configuration is done so setup libos arg list + kgspProgramLibosBootArgsAddr_HAL(pGpu, pKernelGsp); + + RM_RISCV_UCODE_DESC *pRiscvDesc = pKernelGsp->pGspRmBootUcodeDesc; + + { + status = kgspExecuteBooterLoad_HAL(pGpu, pKernelGsp, + memdescGetPhysAddr(pKernelGsp->pWprMetaDescriptor, AT_GPU, 0)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute Booter Load (ucode for initial boot): 0x%x\n", status); + goto exit; + } + } + + // Program FALCON_OS + kflcnRegWrite_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_OS, pRiscvDesc->appVersion); + + // Ensure the CPU is started + if (kflcnIsRiscvActive_HAL(pGpu, pKernelFalcon)) + { + NV_PRINTF(LEVEL_INFO, "GSP ucode loaded and RISCV started.\n"); + } + else + { + NV_ASSERT_FAILED("Failed to boot GSP"); + status = NV_ERR_NOT_READY; + goto exit; + } + + NV_PRINTF(LEVEL_INFO, "Waiting for GSP fw RM to be ready...\n"); + + // Link the status queue. + NV_ASSERT_OK_OR_GOTO(status, GspStatusQueueInit(pGpu, &pKernelGsp->pRpc->pMessageQueueInfo), + exit); + + NV_ASSERT_OK_OR_GOTO(status, kgspWaitForRmInitDone(pGpu, pKernelGsp), + exit); + + NV_PRINTF(LEVEL_INFO, "GSP FW RM ready.\n"); + +exit: + NV_ASSERT(status == NV_OK); + + return status; +} + +void +kgspGetGspRmBootUcodeStorage_GA102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + BINDATA_STORAGE **ppBinStorageImage, + BINDATA_STORAGE **ppBinStorageDesc +) +{ + const BINDATA_ARCHIVE *pBinArchive = kgspGetBinArchiveGspRmBoot_HAL(pKernelGsp); + + if (kgspIsDebugModeEnabled(pGpu, pKernelGsp)) + { + *ppBinStorageImage = (BINDATA_STORAGE *)bindataArchiveGetStorage(pBinArchive, "ucode_image_dbg"); + *ppBinStorageDesc = (BINDATA_STORAGE *)bindataArchiveGetStorage(pBinArchive, "ucode_desc_dbg"); + } + else + { + *ppBinStorageImage = (BINDATA_STORAGE *)bindataArchiveGetStorage(pBinArchive, "ucode_image_prod"); + *ppBinStorageDesc = (BINDATA_STORAGE *)bindataArchiveGetStorage(pBinArchive, "ucode_desc_prod"); + } +} + +/*! + * Execute GSP sequencer operation + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelGsp KernelGsp object pointer + * @param[in] opCode Sequencer opcode + * @param[in] pPayload Pointer to payload + * @param[in] payloadSize Size of payload in bytes + * + * @return NV_OK if the sequencer operation was successful. + * Appropriate NV_ERR_xxx value otherwise. + */ +NV_STATUS +kgspExecuteSequencerCommand_GA102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + NvU32 opCode, + NvU32 *pPayload, + NvU32 payloadSize +) +{ + NV_STATUS status = NV_OK; + KernelFalcon *pKernelFalcon = staticCast(pKernelGsp, KernelFalcon); + + switch (opCode) + { + case GSP_SEQ_BUF_OPCODE_CORE_RESET: + { + NV_ASSERT_OR_RETURN(payloadSize == 0, NV_ERR_INVALID_ARGUMENT); + + // Reset falcon + kflcnEnable_HAL(pGpu, pKernelFalcon, NV_FALSE); + kflcnEnable_HAL(pGpu, pKernelFalcon, NV_TRUE); + + kflcnDisableCtxReq_HAL(pGpu, pKernelFalcon); + break; + } + case GSP_SEQ_BUF_OPCODE_CORE_START: + { + NV_ASSERT_OR_RETURN(payloadSize == 0, NV_ERR_INVALID_ARGUMENT); + + kflcnStartCpu_HAL(pGpu, pKernelFalcon); + break; + } + case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: + { + NV_ASSERT_OR_RETURN(payloadSize == 0, NV_ERR_INVALID_ARGUMENT); + + // Wait for the bootloader to complete execution. + status = kflcnWaitForHalt_HAL(pGpu, pKernelFalcon, GPU_TIMEOUT_DEFAULT, 0); + break; + } + case GSP_SEQ_BUF_OPCODE_CORE_RESUME: + { + RM_RISCV_UCODE_DESC *pRiscvDesc = pKernelGsp->pGspRmBootUcodeDesc; + + NV_ASSERT_OR_RETURN(payloadSize == 0, NV_ERR_INVALID_ARGUMENT); + + { + NV_ASSERT_OK_OR_RETURN(_kgspResetIntoRiscv(pGpu, pKernelGsp)); + kgspProgramLibosBootArgsAddr_HAL(pGpu, pKernelGsp); + + status = kgspExecuteBooterReload_HAL(pGpu, pKernelGsp); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute Booter Reload (ucode for resume from sequencer): 0x%x\n", status); + break; + } + } + + // Program FALCON_OS + kflcnRegWrite_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_OS, pRiscvDesc->appVersion); + + // Ensure the CPU is started + if (kflcnIsRiscvActive_HAL(pGpu, pKernelFalcon)) + { + NV_PRINTF(LEVEL_INFO, "GSP ucode loaded and RISCV started.\n"); + } + else + { + NV_ASSERT_FAILED("Failed to boot GSP"); + status = NV_ERR_NOT_READY; + } + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_booter_tu102.c b/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_booter_tu102.c new file mode 100644 index 000000000..65e1df370 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_booter_tu102.c @@ -0,0 +1,190 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gsp/kernel_gsp.h" + +#include "gpu/gpu.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/nvdec/kernel_nvdec.h" +#include "gpu/sec2/kernel_sec2.h" + +#include "published/turing/tu102/dev_fb.h" // for NV_PFB_PRI_MMU_WPR2_ADDR_HI +#include "published/turing/tu102/dev_falcon_v4.h" + + +static NV_STATUS +s_executeBooterUcode_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + KernelGspFlcnUcode *pBooterUcode, + KernelFalcon *pKernelFlcn, + const NvU32 mailbox0Arg, + const NvU32 mailbox1Arg +) +{ + NV_STATUS status; + NvU32 mailbox0, mailbox1; + + NV_ASSERT_OR_RETURN(pBooterUcode != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pKernelFlcn != NULL, NV_ERR_INVALID_STATE); + + mailbox0 = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_MAILBOX0); + mailbox1 = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_MAILBOX1); + + NV_PRINTF(LEVEL_INFO, "before Booter mailbox0 0x%08x, mailbox1 0x%08x\n", mailbox0, mailbox1); + + mailbox0 = mailbox0Arg; + mailbox1 = mailbox1Arg; + + NV_PRINTF(LEVEL_INFO, "starting Booter with mailbox0 0x%08x, mailbox1 0x%08x\n", mailbox0, mailbox1); + + status = kgspExecuteHsFalcon_HAL(pGpu, pKernelGsp, + pBooterUcode, pKernelFlcn, + &mailbox0, &mailbox1); + + NV_PRINTF(LEVEL_INFO, "after Booter mailbox0 0x%08x, mailbox1 0x%08x\n", mailbox0, mailbox1); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute Booter: status 0x%x, mailbox 0x%x\n", status, mailbox0); + return status; + } + + if (mailbox0 != 0) + { + NV_PRINTF(LEVEL_ERROR, "Booter failed with non-zero error code: 0x%x\n", mailbox0); + return NV_ERR_GENERIC; + } + + return status; +} + +NV_STATUS +kgspExecuteBooterLoad_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + const NvU64 gspFwWprMetaOffset +) +{ + NV_STATUS status; + NvU32 mailbox0, mailbox1; + + KernelSec2 *pKernelSec2 = GPU_GET_KERNEL_SEC2(pGpu); + + NV_ASSERT_OR_RETURN(pKernelGsp->pBooterLoadUcode != NULL, NV_ERR_INVALID_STATE); + + // Provide gspFwWprMetaOffset in falcon SEC mailboxes 0 (low 32 bits) and 1 (high 32 bits) + mailbox0 = NvU64_LO32(gspFwWprMetaOffset); + mailbox1 = NvU64_HI32(gspFwWprMetaOffset); + + NV_PRINTF(LEVEL_INFO, "executing Booter Load, gspFwWprMetaOffset 0x%llx\n", + gspFwWprMetaOffset); + + kflcnReset_HAL(pGpu, staticCast(pKernelSec2, KernelFalcon)); + + status = s_executeBooterUcode_TU102(pGpu, pKernelGsp, + pKernelGsp->pBooterLoadUcode, + staticCast(pKernelSec2, KernelFalcon), + mailbox0, mailbox1); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute Booter Load: 0x%x\n", status); + return status; + } + + return status; +} + +NV_STATUS +kgspExecuteBooterReload_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NV_STATUS status; + + KernelNvdec *pKernelNvdec = GPU_GET_KERNEL_NVDEC(pGpu); + + NV_PRINTF(LEVEL_INFO, "executing Booter Reload\n"); + NV_ASSERT_OR_RETURN(pKernelGsp->pBooterReloadUcode != NULL, NV_ERR_INVALID_STATE); + + kflcnReset_HAL(pGpu, staticCast(pKernelNvdec, KernelFalcon)); + status = s_executeBooterUcode_TU102(pGpu, pKernelGsp, + pKernelGsp->pBooterReloadUcode, + staticCast(pKernelNvdec, KernelFalcon), + 0xFF, 0xFF); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute Booter Reload: 0x%x\n", status); + return status; + } + + return status; +} + +NV_STATUS +kgspExecuteBooterUnloadIfNeeded_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NV_STATUS status; + KernelSec2 *pKernelSec2 = GPU_GET_KERNEL_SEC2(pGpu); + + // skip actually executing Booter Unload if WPR2 is not up + NvU32 data = GPU_REG_RD32(pGpu, NV_PFB_PRI_MMU_WPR2_ADDR_HI); + NvU32 wpr2HiVal = DRF_VAL(_PFB, _PRI_MMU_WPR2_ADDR_HI, _VAL, data); + if (wpr2HiVal == 0) + { + NV_PRINTF(LEVEL_INFO, "skipping executing Booter Unload as WPR is not up\n"); + return NV_OK; + } + + NV_PRINTF(LEVEL_INFO, "executing Booter Unload\n"); + NV_ASSERT_OR_RETURN(pKernelGsp->pBooterUnloadUcode != NULL, NV_ERR_INVALID_STATE); + + kflcnReset_HAL(pGpu, staticCast(pKernelSec2, KernelFalcon)); + status = s_executeBooterUcode_TU102(pGpu, pKernelGsp, + pKernelGsp->pBooterUnloadUcode, + staticCast(pKernelSec2, KernelFalcon), + 0xFF, 0xFF); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute Booter Unload: 0x%x\n", status); + return status; + } + + data = GPU_REG_RD32(pGpu, NV_PFB_PRI_MMU_WPR2_ADDR_HI); + wpr2HiVal = DRF_VAL(_PFB, _PRI_MMU_WPR2_ADDR_HI, _VAL, data); + if (wpr2HiVal > 0) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute Booter Unload: WPR2 is still up\n"); + return NV_ERR_GENERIC; + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_falcon_tu102.c b/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_falcon_tu102.c new file mode 100644 index 000000000..14a27e9f4 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_falcon_tu102.c @@ -0,0 +1,365 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Provides TU102+ specific KernelGsp HAL implementations related to + * execution of Falcon cores. + */ + +#include "gpu/gsp/kernel_gsp.h" + +#include "gpu/gpu.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/sec2/kernel_sec2.h" +#include "gpu/nvdec/kernel_nvdec.h" +#include "rmflcnbl.h" + +#include "published/turing/tu102/dev_falcon_v4.h" +#include "published/turing/tu102/dev_fbif_v4.h" + +/*! + * Copy sizeBytes from pSrc to DMEM offset dmemDest using DMEM access port 0. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelFlcn KernelFalcon pointer + * @param[in] dmemDest Destination in DMEM + * @param[in] pSrc Desired DMEM contents + * @param[in] sizeInBytes Number of bytes to copy + */ +static NV_STATUS +s_dmemCopyTo_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 dmemDest, + const NvU8 *pSrc, + NvU32 sizeBytes +) +{ + NvU32 numWords; + NvU32 wordIdx; + NvU32 *pSrcWords; + NvU32 reg32; + + if (sizeBytes == 0) + { + return NV_OK; + } + + NV_ASSERT_OR_RETURN(RM_IS_ALIGNED(dmemDest, FLCN_DMEM_ACCESS_ALIGNMENT), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSrc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(RM_IS_ALIGNED(sizeBytes, FLCN_DMEM_ACCESS_ALIGNMENT), NV_ERR_INVALID_ARGUMENT); + + numWords = sizeBytes >> 2; + pSrcWords = (NvU32 *) pSrc; + + // Prepare DMEMC register + reg32 = kflcnMaskDmemAddr_HAL(pGpu, pKernelFlcn, dmemDest); + reg32 = FLD_SET_DRF_NUM(_PFALCON, _FALCON_DMEMC, _AINCW, 0x1, reg32); + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMEMC(0), reg32); + + for (wordIdx = 0; wordIdx < numWords; wordIdx++) + { + // Write DMEM data + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_DMEMD(0), pSrcWords[wordIdx]); + } + + return NV_OK; +} + +/*! + * Copy sizeBytes from pSrc to IMEM offset imemDest using IMEM access port 0. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelFlcn KernelFalcon pointer + * @param[in] imemDest Destination in IMEM + * @param[in] pSrc Desired IMEM contents + * @param[in] sizeInBytes Number of bytes to copy + * @param[in] bSecure Whether IMEM copied should be marked secure + * @param[in] tag Desired IMEM tag + */ +static NV_STATUS +s_imemCopyTo_TU102 +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + NvU32 imemDest, + const NvU8 *pSrc, + NvU32 sizeBytes, + NvBool bSecure, + NvU32 tag +) +{ + NvU32 numWords; + NvU32 wordIdx; + NvU32 *pSrcWords; + NvU32 reg32; + + if (sizeBytes == 0) + { + return NV_OK; + } + + // Require block alignment on IMEM addr (due to tagging at block granularity) + NV_ASSERT_OR_RETURN(RM_IS_ALIGNED(imemDest, FLCN_BLK_ALIGNMENT), NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OR_RETURN(pSrc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(RM_IS_ALIGNED(sizeBytes, FLCN_IMEM_ACCESS_ALIGNMENT), NV_ERR_INVALID_ARGUMENT); + + numWords = sizeBytes >> 2; + pSrcWords = (NvU32 *) pSrc; + + // Prepare IMEMC register + reg32 = kflcnMaskImemAddr_HAL(pGpu, pKernelFlcn, imemDest); + reg32 = FLD_SET_DRF_NUM(_PFALCON_FALCON, _IMEMC, _AINCW, 0x1, reg32); + reg32 = FLD_SET_DRF_NUM(_PFALCON_FALCON, _IMEMC, _SECURE, bSecure, reg32); + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IMEMC(0), reg32); + + tag = tag >> 8; + for (wordIdx = 0; wordIdx < numWords; wordIdx++) + { + // Tag blocks (at block granularity) + if ((wordIdx & ((1u << (FALCON_IMEM_BLKSIZE2 - 2)) - 1)) == 0) + { + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IMEMT(0), + DRF_NUM(_PFALCON_FALCON, _IMEMT, _TAG, tag)); + tag++; + } + + // Write IMEM data + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_IMEMD(0), + DRF_NUM(_PFALCON_FALCON, _IMEMD, _DATA, pSrcWords[wordIdx])); + } + + return NV_OK; +} + +static NV_STATUS +s_prepareHsFalconDirect +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + KernelGspFlcnUcodeBootDirect *pUcode +) +{ + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pUcode->pImage != NULL, NV_ERR_INVALID_ARGUMENT); + + kflcnDisableCtxReq_HAL(pGpu, pKernelFlcn); + + // Copy non-secure IMEM code + NV_ASSERT_OK_OR_RETURN( + s_imemCopyTo_TU102(pGpu, pKernelFlcn, + 0, + pUcode->pImage + pUcode->imemNsPa, + pUcode->imemNsSize, + NV_FALSE, + pUcode->imemNsPa)); + + // Copy secure IMEM code after non-secure block + NV_ASSERT_OK_OR_RETURN( + s_imemCopyTo_TU102(pGpu, pKernelFlcn, + NV_ALIGN_UP(pUcode->imemNsSize, FLCN_BLK_ALIGNMENT), + pUcode->pImage + pUcode->imemSecPa, + pUcode->imemSecSize, + NV_TRUE, + pUcode->imemSecPa)); + + // Load DMEM (note: signatures must already be patched) + NV_ASSERT_OK_OR_RETURN( + s_dmemCopyTo_TU102(pGpu, pKernelFlcn, + pUcode->dmemPa, + pUcode->pImage + pUcode->dataOffset, + pUcode->dmemSize)); + + // Set BOOTVEC to start of non-secure code + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_BOOTVEC, 0); + + return status; +} + +static NV_STATUS +s_prepareHsFalconWithLoader +( + OBJGPU *pGpu, + KernelFalcon *pKernelFlcn, + KernelGspFlcnUcodeBootWithLoader *pUcode +) +{ + NV_STATUS status = NV_OK; + RM_FLCN_BL_DMEM_DESC blDmemDesc; + NvU64 ucodePACode; + NvU64 ucodePAData; + + NvU32 data; + NvU32 imemDstBlk; + NvU32 virtAddr; + NvU32 blSize; + + KernelSec2 *pKernelSec2 = GPU_GET_KERNEL_SEC2(pGpu); + const RM_FLCN_BL_DESC *pBlUcDesc = NULL; + const NvU8 *pBlImg = NULL; + + NV_ASSERT_OR_RETURN(pUcode->pCodeMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pUcode->pDataMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OR_RETURN(pKernelSec2 != NULL, NV_ERR_INVALID_STATE); + + // Note: adapted from _vbiosFwseclicCmdOffloadToFlcn + ucodePACode = memdescGetPhysAddr(pUcode->pCodeMemDesc, AT_GPU, 0); + ucodePAData = memdescGetPhysAddr(pUcode->pDataMemDesc, AT_GPU, 0); + + blDmemDesc.signature[0] = 0; + blDmemDesc.signature[1] = 0; + blDmemDesc.signature[2] = 0; + blDmemDesc.signature[3] = 0; + blDmemDesc.ctxDma = 4; // dmaIdx for PHYS_SYS_NCOH, consumed by the generic falcon boot loader + RM_FLCN_U64_PACK(&blDmemDesc.codeDmaBase, &ucodePACode); + + blDmemDesc.nonSecureCodeOff = pUcode->imemNsPa; + blDmemDesc.nonSecureCodeSize = pUcode->imemNsSize; + + blDmemDesc.secureCodeOff = pUcode->imemSecPa; + blDmemDesc.secureCodeSize = pUcode->imemSecSize; + + blDmemDesc.codeEntryPoint = 0; + + RM_FLCN_U64_PACK(&blDmemDesc.dataDmaBase, &ucodePAData); + blDmemDesc.dataSize = pUcode->dmemSize; + + // Get the generic BL image and descriptor from SEC2 + NV_ASSERT_OK_OR_RETURN(ksec2GetGenericBlUcode_HAL(pGpu, pKernelSec2, &pBlUcDesc, &pBlImg)); + blSize = NV_ALIGN_UP(pBlUcDesc->blImgHeader.blCodeSize, FLCN_BLK_ALIGNMENT); + + kflcnDisableCtxReq_HAL(pGpu, pKernelFlcn); + + // Program TRANSCFG to fetch the DMA data + data = GPU_REG_RD32(pGpu, pKernelFlcn->fbifBase + NV_PFALCON_FBIF_TRANSCFG(blDmemDesc.ctxDma)); + data = FLD_SET_DRF(_PFALCON, _FBIF_TRANSCFG, _TARGET, _COHERENT_SYSMEM, data); + data = FLD_SET_DRF(_PFALCON, _FBIF_TRANSCFG, _MEM_TYPE, _PHYSICAL, data); + GPU_REG_WR32(pGpu, pKernelFlcn->fbifBase + NV_PFALCON_FBIF_TRANSCFG(blDmemDesc.ctxDma), data); + + // Copy dmem desc to DMEM offset 0 + NV_ASSERT_OK_OR_RETURN( + s_dmemCopyTo_TU102(pGpu, pKernelFlcn, + 0, + (NvU8 *) &blDmemDesc, + sizeof(RM_FLCN_BL_DMEM_DESC))); + + // Compute location of bootloader at top of IMEM + { + NvU32 imemSizeBlk = DRF_VAL(_PFALCON, _FALCON_HWCFG, _IMEM_SIZE, + kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_HWCFG)); + imemDstBlk = imemSizeBlk - blSize / (1u << FALCON_IMEM_BLKSIZE2); + } + + // Copy bootloader to top of IMEM + virtAddr = pBlUcDesc->blStartTag << 8; + NV_ASSERT_OK_OR_RETURN( + s_imemCopyTo_TU102(pGpu, pKernelFlcn, + imemDstBlk << FALCON_IMEM_BLKSIZE2, + pBlImg, + blSize, + NV_FALSE, + virtAddr)); + + // Set BOOTVEC to location of bootloader + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_BOOTVEC, virtAddr); + + return status; +} + +/*! + * Execute the HS falcon ucode provided in pFlcnUcode on the falcon engine + * represented by pKernelFlcn and wait for its completion. + * + * For _TU102, pFlcnUcode must be of the DIRECT or WITH_LOADER variant. + * + * Note: callers are expected to reset pKernelFlcn before calling this + * function. + + * @param[in] pGpu GPU object pointer + * @param[in] pKernelGsp KernelGsp object pointer + * @param[in] pFlcnUcode Falcon ucode to execute + * @param[in] pKernelFlcn KernelFalcon engine to execute on + * @param[inout] pMailbox0 Pointer to value of MAILBOX0 to provide/read (or NULL) + * @param[inout] pMailbox0 Pointer to value of MAILBOX1 to provide/read (or NULL) + */ +NV_STATUS +kgspExecuteHsFalcon_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + KernelGspFlcnUcode *pFlcnUcode, + KernelFalcon *pKernelFlcn, + NvU32 *pMailbox0, + NvU32 *pMailbox1 +) +{ + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pFlcnUcode != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pKernelFlcn != NULL, NV_ERR_INVALID_STATE); + + NV_ASSERT_OR_RETURN(!pKernelFlcn->bBootFromHs, NV_ERR_NOT_SUPPORTED); + + // Prepare IMEM, DMEM, BOOTVEC, etc. as appropriate for boot type + if (pFlcnUcode->bootType == KGSP_FLCN_UCODE_BOOT_WITH_LOADER) + { + status = s_prepareHsFalconWithLoader(pGpu, pKernelFlcn, &pFlcnUcode->ucodeBootWithLoader); + } + else if (pFlcnUcode->bootType == KGSP_FLCN_UCODE_BOOT_DIRECT) + { + status = s_prepareHsFalconDirect(pGpu, pKernelFlcn, &pFlcnUcode->ucodeBootDirect); + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + + if (status != NV_OK) + { + return status; + } + + // Write mailboxes if requested + if (pMailbox0 != NULL) + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_MAILBOX0, *pMailbox0); + if (pMailbox1 != NULL) + kflcnRegWrite_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_MAILBOX1, *pMailbox1); + + // Start CPU now + kflcnStartCpu_HAL(pGpu, pKernelFlcn); + + // Wait for completion + status = kflcnWaitForHalt_HAL(pGpu, pKernelFlcn, GPU_TIMEOUT_DEFAULT, 0); + + // Read mailboxes if requested + if (pMailbox0 != NULL) + *pMailbox0 = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_MAILBOX0); + if (pMailbox1 != NULL) + *pMailbox1 = kflcnRegRead_HAL(pGpu, pKernelFlcn, NV_PFALCON_FALCON_MAILBOX1); + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_frts_tu102.c b/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_frts_tu102.c new file mode 100644 index 000000000..fb76d5bb8 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_frts_tu102.c @@ -0,0 +1,442 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * KernelGsp functions and helpers for executing FWSEC ucode for FRTS. + * + * Note: Other than those suffixed by a chip name, functions here + * do not actually need to be HAL'd; we are simply keeping them all in + * one file to try to keep it self-contained. + */ + +#include "gpu/gsp/kernel_gsp.h" + +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "published/turing/tu102/dev_bus.h" // for NV_PBUS_VBIOS_SCRATCH +#include "published/turing/tu102/dev_fb.h" // for NV_PFB_PRI_MMU_WPR2_ADDR_HI +#include "published/turing/tu102/dev_gc6_island_addendum.h" // for NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE_1MB_IN_4K + +/*! + * Get size of FRTS data. + * + * Currently, FRTS data size is hard-coded to be 1MB + * (if FRTS exists for the chip). + */ +NvU32 +kgspGetFrtsSize_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NvU32 sizeIn4k = NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE_1MB_IN_4K; + return sizeIn4k * 0x1000; +} + + +// --------------------------------------------------------------------------- +// Structures and defines for FWSEC commands +// --------------------------------------------------------------------------- + +typedef struct +{ + NvU8 version; + NvU8 headerSize; + NvU8 entrySize; + NvU8 entryCount; +} FALCON_APPLICATION_INTERFACE_HEADER_V1; + +typedef struct +{ + NvU32 id; + NvU32 dmemOffset; +} FALCON_APPLICATION_INTERFACE_ENTRY_V1; + +#define FALCON_APPLICATION_INTERFACE_ENTRY_ID_DMEMMAPPER (0x4) + +typedef struct +{ + NvU32 signature; + NvU16 version; + NvU16 size; + NvU32 cmd_in_buffer_offset; + NvU32 cmd_in_buffer_size; + NvU32 cmd_out_buffer_offset; + NvU32 cmd_out_buffer_size; + NvU32 nvf_img_data_buffer_offset; + NvU32 nvf_img_data_buffer_size; + NvU32 printfBufferHdr; + NvU32 ucode_build_time_stamp; + NvU32 ucode_signature; + NvU32 init_cmd; + NvU32 ucode_feature; + NvU32 ucode_cmd_mask0; + NvU32 ucode_cmd_mask1; + NvU32 multiTgtTbl; +} FALCON_APPLICATION_INTERFACE_DMEM_MAPPER_V3; + +#define FALCON_APPLICATION_INTERFACE_DMEM_MAPPER_V3_CMD_FRTS (0x15) + +typedef struct +{ + NvU32 version; + NvU32 size; + NvU64 gfwImageOffset; + NvU32 gfwImageSize; + NvU32 flags; +} FWSECLIC_READ_VBIOS_DESC; + +#define FWSECLIC_READ_VBIOS_STRUCT_FLAGS (2) + +typedef struct +{ + NvU32 version; + NvU32 size; + NvU32 frtsRegionOffset4K; + NvU32 frtsRegionSize; + NvU32 frtsRegionMediaType; +} FWSECLIC_FRTS_REGION_DESC; + +#define FWSECLIC_FRTS_REGION_MEDIA_FB (2) +#define FWSECLIC_FRTS_REGION_SIZE_1MB_IN_4K (0x100) + +typedef struct +{ + FWSECLIC_READ_VBIOS_DESC readVbiosDesc; + FWSECLIC_FRTS_REGION_DESC frtsRegionDesc; +} FWSECLIC_FRTS_CMD; + +#define NV_VBIOS_FWSECLIC_SCRATCH_INDEX_0E 0x0E +#define NV_VBIOS_FWSECLIC_FRTS_ERR_CODE 31:16 +#define NV_VBIOS_FWSECLIC_FRTS_ERR_CODE_NONE 0x00000000 + + +// --------------------------------------------------------------------------- +// Functions for preparing and executing FWSEC commands +// --------------------------------------------------------------------------- + +/*! + * Patch DMEM of FWSEC for FRTS command + * + * @param[inout] pMappedData Pointer to mapped DMEM of FWSEC + * @param[in] mappedDataSize Number of bytes valid under pMappedData + * @param[in] pFrtsCmd FRTS command to patch in + * @param[in] interfaceOffset Interface offset given by VBIOS for FWSEC + */ +NV_STATUS +s_vbiosPatchFrtsInterfaceData +( + NvU8 *pMappedData, // inout + const NvU32 mappedDataSize, + const FWSECLIC_FRTS_CMD *pFrtsCmd, + const NvU32 interfaceOffset +) +{ + FALCON_APPLICATION_INTERFACE_HEADER_V1 *pIntFaceHdr = NULL; + FALCON_APPLICATION_INTERFACE_ENTRY_V1 *pIntFaceEntry = NULL; + FALCON_APPLICATION_INTERFACE_DMEM_MAPPER_V3 *pDmemMapper = NULL; + + NvBool bSafe; + NvU32 index; + + NvU32 curOffset; + NvU32 nextOffset; + + if (interfaceOffset >= mappedDataSize) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(interfaceOffset, sizeof(*pIntFaceHdr), &nextOffset); + if (!bSafe || nextOffset > mappedDataSize) + { + return NV_ERR_INVALID_OFFSET; + } + + pIntFaceHdr = (FALCON_APPLICATION_INTERFACE_HEADER_V1 *) (pMappedData + interfaceOffset); + if (pIntFaceHdr->entryCount < 2) + { + NV_PRINTF(LEVEL_ERROR, "too few interface entires found for FRTS\n"); + return NV_ERR_INVALID_DATA; + } + + curOffset = nextOffset; + for (index = 0; index < pIntFaceHdr->entryCount; index++) + { + if (curOffset >= mappedDataSize) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(curOffset, sizeof(*pIntFaceEntry), &nextOffset); + if (!bSafe || nextOffset > mappedDataSize) + { + return NV_ERR_INVALID_OFFSET; + } + + pIntFaceEntry = (FALCON_APPLICATION_INTERFACE_ENTRY_V1 *) (pMappedData + curOffset); + curOffset = nextOffset; + + if (pIntFaceEntry->id == FALCON_APPLICATION_INTERFACE_ENTRY_ID_DMEMMAPPER) + { + NvU32 dmemMapperMaxOffset; + + if (pIntFaceEntry->dmemOffset >= mappedDataSize) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(pIntFaceEntry->dmemOffset, sizeof(*pDmemMapper), + &dmemMapperMaxOffset); + if (!bSafe || dmemMapperMaxOffset > mappedDataSize) + { + return NV_ERR_INVALID_OFFSET; + } + + pDmemMapper = (FALCON_APPLICATION_INTERFACE_DMEM_MAPPER_V3 *) + (pMappedData + pIntFaceEntry->dmemOffset); + } + } + + if (!pDmemMapper) + { + NV_PRINTF(LEVEL_ERROR, "failed to find required interface entry for FRTS\n"); + return NV_ERR_INVALID_DATA; + } + + pDmemMapper->init_cmd = FALCON_APPLICATION_INTERFACE_DMEM_MAPPER_V3_CMD_FRTS; + + if (pDmemMapper->cmd_in_buffer_size < sizeof(FWSECLIC_FRTS_CMD)) + { + NV_PRINTF(LEVEL_ERROR, "insufficient cmd buffer for FRTS interface\n"); + } + + if (pDmemMapper->cmd_in_buffer_offset >= mappedDataSize) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(pIntFaceEntry->dmemOffset, sizeof(*pFrtsCmd), &nextOffset); + if (!bSafe || nextOffset > mappedDataSize) + { + return NV_ERR_INVALID_OFFSET; + } + + portMemCopy(pMappedData + pDmemMapper->cmd_in_buffer_offset, sizeof(*pFrtsCmd), + pFrtsCmd, sizeof(*pFrtsCmd)); + + return NV_OK; +} + +/*! + * Excecute FWSEC for FRTS and wait for completion. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelGsp KernelGsp pointer + * @param[in] pFwsecUcode KernelGspFlcnUcode structure of FWSEC ucode + * @param[in] frtsOffset Desired offset in FB of FRTS data and WPR2 + */ +NV_STATUS +kgspExecuteFwsecFrts_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + KernelGspFlcnUcode *pFwsecUcode, + const NvU64 frtsOffset +) +{ + NV_STATUS status; + + NvU32 blockSizeIn4K; + FWSECLIC_FRTS_CMD frtsCmd; + + NV_ASSERT_OR_RETURN(!IS_VIRTUAL(pGpu), NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(IS_GSP_CLIENT(pGpu), NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pFwsecUcode != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(frtsOffset > 0, NV_ERR_INVALID_ARGUMENT); + + // Build up FRTS args + blockSizeIn4K = NV_PGC6_AON_FRTS_INPUT_WPR_SIZE_SECURE_SCRATCH_GROUP_03_0_WPR_SIZE_1MB_IN_4K; + + frtsCmd.frtsRegionDesc.version = 1; + frtsCmd.frtsRegionDesc.size = sizeof(frtsCmd.frtsRegionDesc); + frtsCmd.frtsRegionDesc.frtsRegionOffset4K = (NvU32) (frtsOffset >> 12); + frtsCmd.frtsRegionDesc.frtsRegionSize = blockSizeIn4K; + frtsCmd.frtsRegionDesc.frtsRegionMediaType = FWSECLIC_FRTS_REGION_MEDIA_FB; + + frtsCmd.readVbiosDesc.version = 1; + frtsCmd.readVbiosDesc.size = sizeof(frtsCmd.readVbiosDesc); + frtsCmd.readVbiosDesc.gfwImageOffset = 0; + frtsCmd.readVbiosDesc.gfwImageSize = 0; + frtsCmd.readVbiosDesc.flags = FWSECLIC_READ_VBIOS_STRUCT_FLAGS; + + if (pFwsecUcode->bootType == KGSP_FLCN_UCODE_BOOT_FROM_HS) + { + KernelGspFlcnUcodeBootFromHs *pUcode = &pFwsecUcode->ucodeBootFromHs; + NvU8 *pMappedImage; + NvU8 *pMappedData; + + NvBool bSafe; + + NvU32 ucodeVersionVal; + NvU32 hsSigVersions; + NvU32 sigOffset; + NvU32 sigMaxOffset; + + NV_ASSERT_OR_RETURN(pUcode->pUcodeMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pUcode->pSignatures != NULL, NV_ERR_INVALID_ARGUMENT); + + ucodeVersionVal = kgspReadUcodeFuseVersion_HAL(pGpu, pKernelGsp, pUcode->ucodeId); + + ucodeVersionVal = 1 << ucodeVersionVal; + hsSigVersions = pUcode->vbiosSigVersions; + + if ((ucodeVersionVal & hsSigVersions) == 0) + { + return NV_ERR_NOT_SUPPORTED;; + } + + sigOffset = 0; + while ((ucodeVersionVal & hsSigVersions & 1) == 0) + { + sigOffset += (hsSigVersions & 1) * pUcode->sigSize; + hsSigVersions >>= 1; + ucodeVersionVal >>= 1; + } + + if (sigOffset >= pUcode->signaturesTotalSize) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(sigOffset, pUcode->sigSize, &sigMaxOffset); + if (!bSafe || sigMaxOffset > pUcode->signaturesTotalSize) + { + return NV_ERR_INVALID_OFFSET; + } + + pMappedImage = memdescMapInternal(pGpu, pUcode->pUcodeMemDesc, TRANSFER_FLAGS_NONE); + if (pMappedImage == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + pMappedData = pMappedImage + pUcode->dataOffset; + + status = s_vbiosPatchFrtsInterfaceData(pMappedData, pUcode->dmemSize, + &frtsCmd, pUcode->interfaceOffset); + + portMemCopy(pMappedData + pUcode->hsSigDmemAddr, pUcode->sigSize, + ((NvU8 *) pUcode->pSignatures) + sigOffset, pUcode->sigSize); + + memdescUnmapInternal(pGpu, pUcode->pUcodeMemDesc, + TRANSFER_FLAGS_DESTROY_MAPPING); + pMappedImage = NULL; + pMappedData = NULL; + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to prepare interface data for FRTS: 0x%x\n", status); + return status; + } + } + else if (pFwsecUcode->bootType == KGSP_FLCN_UCODE_BOOT_WITH_LOADER) + { + KernelGspFlcnUcodeBootWithLoader *pUcode = &pFwsecUcode->ucodeBootWithLoader; + NvU8 *pMappedData; + + NV_ASSERT_OR_RETURN(pUcode->pCodeMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pUcode->pDataMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + + pMappedData = memdescMapInternal(pGpu, pUcode->pDataMemDesc, TRANSFER_FLAGS_NONE); + if (pMappedData == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + status = s_vbiosPatchFrtsInterfaceData(pMappedData, pUcode->dmemSize, + &frtsCmd, pUcode->interfaceOffset); + + memdescUnmapInternal(pGpu, pUcode->pDataMemDesc, + TRANSFER_FLAGS_DESTROY_MAPPING); + pMappedData = NULL; + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to prepare interface data for FRTS: 0x%x\n", status); + return status; + } + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + + status = kgspExecuteHsFalcon_HAL(pGpu, pKernelGsp, pFwsecUcode, + staticCast(pKernelGsp, KernelFalcon), NULL, NULL); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute FWSEC for FRTS: status 0x%x\n", status); + return status; + } + + { + NvU32 data; + NvU32 frtsErrCode; + NvU32 wpr2HiVal; + NvU32 wpr2LoVal; + NvU32 expectedLoVal; + + data = GPU_REG_RD32(pGpu, NV_PBUS_VBIOS_SCRATCH(NV_VBIOS_FWSECLIC_SCRATCH_INDEX_0E)); + frtsErrCode = DRF_VAL(_VBIOS, _FWSECLIC, _FRTS_ERR_CODE, data); + if (frtsErrCode != NV_VBIOS_FWSECLIC_FRTS_ERR_CODE_NONE) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute FWSEC for FRTS: FRTS error code 0x%x\n", frtsErrCode); + return NV_ERR_GENERIC; + } + + data = GPU_REG_RD32(pGpu, NV_PFB_PRI_MMU_WPR2_ADDR_HI); + wpr2HiVal = DRF_VAL(_PFB, _PRI_MMU_WPR2_ADDR_HI, _VAL, data); + if (wpr2HiVal == 0) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute FWSEC for FRTS: no initialized WPR2 found\n"); + return NV_ERR_GENERIC; + } + + data = GPU_REG_RD32(pGpu, NV_PFB_PRI_MMU_WPR2_ADDR_LO); + wpr2LoVal = DRF_VAL(_PFB, _PRI_MMU_WPR2_ADDR_LO, _VAL, data); + expectedLoVal = (NvU32) (frtsOffset >> NV_PFB_PRI_MMU_WPR2_ADDR_LO_ALIGNMENT); + if (wpr2LoVal != expectedLoVal) + { + NV_PRINTF(LEVEL_ERROR, + "failed to execute FWSEC for FRTS: WPR2 initialized at an unexpected location: 0x%08x (expected 0x%08x)\n", + wpr2LoVal, expectedLoVal); + return NV_ERR_GENERIC; + } + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_tu102.c b/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_tu102.c new file mode 100644 index 000000000..fff6e1b15 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_tu102.c @@ -0,0 +1,840 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * Provides TU102+ specific KernelGsp HAL implementations. + */ + +#include "gpu/gsp/kernel_gsp.h" + +#include "gpu/bus/kern_bus.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "vgpu/rpc.h" +#include "rmgspseq.h" +#include "core/thread_state.h" +#include "os/os.h" +#include "nverror.h" +#include "gsp/gsp_error.h" + +#include "published/turing/tu102/dev_gsp.h" +#include "published/turing/tu102/dev_gsp_addendum.h" +#include "published/turing/tu102/dev_riscv_pri.h" +#include "published/turing/tu102/dev_fbif_v4.h" +#include "published/turing/tu102/dev_falcon_v4.h" +#include "published/turing/tu102/dev_fuse.h" +#include "published/turing/tu102/dev_ram.h" +#include "published/turing/tu102/dev_gc6_island.h" +#include "published/turing/tu102/dev_gc6_island_addendum.h" + +#define RPC_STRUCTURES +#define RPC_GENERIC_UNION +#include "g_rpc-structures.h" +#undef RPC_STRUCTURES +#undef RPC_GENERIC_UNION + + +void +kgspConfigureFalcon_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + KernelFalconEngineConfig falconConfig; + + portMemSet(&falconConfig, 0, sizeof(falconConfig)); + + falconConfig.registerBase = DRF_BASE(NV_PGSP); + falconConfig.riscvRegisterBase = NV_FALCON2_GSP_BASE; + falconConfig.fbifBase = NV_PGSP_FBIF_BASE; + falconConfig.bBootFromHs = NV_FALSE; + falconConfig.pmcEnableMask = 0; + falconConfig.bIsPmcDeviceEngine = NV_FALSE; + falconConfig.physEngDesc = ENG_GSP; + + kflcnConfigureEngine(pGpu, staticCast(pKernelGsp, KernelFalcon), &falconConfig); +} + +/*! + * Check if the GSP is in debug mode + * + * @return whether the GSP is in debug mode or not + */ +NvBool +kgspIsDebugModeEnabled_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NvU32 data; + + data = GPU_REG_RD32(pGpu, NV_FUSE_OPT_SECURE_GSP_DEBUG_DIS); + + return FLD_TEST_DRF(_FUSE, _OPT_SECURE_GSP_DEBUG_DIS, _DATA, _NO, data); +} + +NV_STATUS +kgspAllocBootArgs_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NvP64 pVa = NvP64_NULL; + NvP64 pPriv = NvP64_NULL; + NV_STATUS nvStatus = NV_OK; + + // Allocate WPR meta data + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescCreate(&pKernelGsp->pWprMetaDescriptor, + pGpu, 0x1000, 0x1000, + NV_TRUE, ADDR_SYSMEM, NV_MEMORY_CACHED, + MEMDESC_FLAGS_NONE), + _kgspAllocBootArgs_exit_cleanup); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescAlloc(pKernelGsp->pWprMetaDescriptor), + _kgspAllocBootArgs_exit_cleanup); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescMap(pKernelGsp->pWprMetaDescriptor, 0, + memdescGetSize(pKernelGsp->pWprMetaDescriptor), + NV_TRUE, NV_PROTECT_READ_WRITE, + &pVa, &pPriv), + _kgspAllocBootArgs_exit_cleanup); + + pKernelGsp->pWprMeta = (GspFwWprMeta *)NvP64_VALUE(pVa); + pKernelGsp->pWprMetaMappingPriv = pPriv; + + // + // Setup libos arguments memory + // + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescCreate(&pKernelGsp->pLibosInitArgumentsDescriptor, + pGpu, + LIBOS_INIT_ARGUMENTS_SIZE, + LIBOS_INIT_ARGUMENTS_SIZE, + NV_TRUE, ADDR_SYSMEM, NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_NONE), + _kgspAllocBootArgs_exit_cleanup); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescAlloc(pKernelGsp->pLibosInitArgumentsDescriptor), + _kgspAllocBootArgs_exit_cleanup); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescMap(pKernelGsp->pLibosInitArgumentsDescriptor, 0, + memdescGetSize(pKernelGsp->pLibosInitArgumentsDescriptor), + NV_TRUE, NV_PROTECT_READ_WRITE, + &pVa, &pPriv), + _kgspAllocBootArgs_exit_cleanup); + + pKernelGsp->pLibosInitArgumentsCached = (LibosMemoryRegionInitArgument *)NvP64_VALUE(pVa); + pKernelGsp->pLibosInitArgumentsMappingPriv = pPriv; + + // Setup bootloader arguments memory. + NV_ASSERT(sizeof(GSP_ARGUMENTS_CACHED) <= 0x1000); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescCreate(&pKernelGsp->pGspArgumentsDescriptor, + pGpu, 0x1000, 0x1000, + NV_TRUE, ADDR_SYSMEM, NV_MEMORY_CACHED, + MEMDESC_FLAGS_NONE), + _kgspAllocBootArgs_exit_cleanup); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescAlloc(pKernelGsp->pGspArgumentsDescriptor), + _kgspAllocBootArgs_exit_cleanup); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescMap(pKernelGsp->pGspArgumentsDescriptor, 0, + memdescGetSize(pKernelGsp->pGspArgumentsDescriptor), + NV_TRUE, NV_PROTECT_READ_WRITE, + &pVa, &pPriv), + _kgspAllocBootArgs_exit_cleanup); + + pKernelGsp->pGspArgumentsCached = (GSP_ARGUMENTS_CACHED *)NvP64_VALUE(pVa); + pKernelGsp->pGspArgumentsMappingPriv = pPriv; + + return nvStatus; + +_kgspAllocBootArgs_exit_cleanup: + kgspFreeBootArgs_HAL(pGpu, pKernelGsp); + return nvStatus; +} + +void +kgspFreeBootArgs_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + // release wpr meta data resources + if (pKernelGsp->pWprMeta != NULL) + { + memdescUnmap(pKernelGsp->pWprMetaDescriptor, + NV_TRUE, osGetCurrentProcess(), + (void *)pKernelGsp->pWprMeta, + pKernelGsp->pWprMetaMappingPriv); + pKernelGsp->pWprMeta = NULL; + pKernelGsp->pWprMetaMappingPriv = NULL; + } + if (pKernelGsp->pWprMetaDescriptor != NULL) + { + memdescFree(pKernelGsp->pWprMetaDescriptor); + memdescDestroy(pKernelGsp->pWprMetaDescriptor); + pKernelGsp->pWprMetaDescriptor = NULL; + } + + // release libos init argument resources + if (pKernelGsp->pLibosInitArgumentsCached != NULL) + { + memdescUnmap(pKernelGsp->pLibosInitArgumentsDescriptor, + NV_TRUE, osGetCurrentProcess(), + (void *)pKernelGsp->pLibosInitArgumentsCached, + pKernelGsp->pLibosInitArgumentsMappingPriv); + pKernelGsp->pLibosInitArgumentsCached = NULL; + pKernelGsp->pLibosInitArgumentsMappingPriv = NULL; + } + if (pKernelGsp->pLibosInitArgumentsDescriptor != NULL) + { + memdescFree(pKernelGsp->pLibosInitArgumentsDescriptor); + memdescDestroy(pKernelGsp->pLibosInitArgumentsDescriptor); + pKernelGsp->pLibosInitArgumentsDescriptor = NULL; + } + + // release init argument page resources + if (pKernelGsp->pGspArgumentsCached != NULL) + { + memdescUnmap(pKernelGsp->pGspArgumentsDescriptor, + NV_TRUE, osGetCurrentProcess(), + (void *)pKernelGsp->pGspArgumentsCached, + pKernelGsp->pGspArgumentsMappingPriv); + pKernelGsp->pGspArgumentsCached = NULL; + pKernelGsp->pGspArgumentsMappingPriv = NULL; + } + if (pKernelGsp->pGspArgumentsDescriptor != NULL) + { + memdescFree(pKernelGsp->pGspArgumentsDescriptor); + memdescDestroy(pKernelGsp->pGspArgumentsDescriptor); + pKernelGsp->pGspArgumentsDescriptor = NULL; + } + + // Release radix3 version of GSP-RM ucode + if (pKernelGsp->pGspUCodeRadix3Descriptor != NULL) + { + memdescFree(pKernelGsp->pGspUCodeRadix3Descriptor); + memdescDestroy(pKernelGsp->pGspUCodeRadix3Descriptor); + pKernelGsp->pGspUCodeRadix3Descriptor = NULL; + } + + // Release signature memory + if (pKernelGsp->pSignatureMemdesc != NULL) + { + memdescFree(pKernelGsp->pSignatureMemdesc); + memdescDestroy(pKernelGsp->pSignatureMemdesc); + pKernelGsp->pSignatureMemdesc = NULL; + } +} + +/*! + * Set command queue head for CPU to GSP message queue + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelGsp KernelGsp object pointer (not used) + * @param[in] queueIdx index + * @param[in] value value to set command queue head to. + * + * @return NV_OK if the operation was successful. + */ +NV_STATUS +kgspSetCmdQueueHead_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + NvU32 queueIdx, + NvU32 value +) +{ + NV_ASSERT_OR_RETURN(queueIdx < NV_PGSP_QUEUE_HEAD__SIZE_1, NV_ERR_INVALID_ARGUMENT); + + // Write the value to the correct queue head. + GPU_REG_WR32(pGpu, NV_PGSP_QUEUE_HEAD(queueIdx), value); + + return NV_OK; +} + +/*! + * Load entrypoint address of boot binary into mailbox regs. + */ +void +kgspProgramLibosBootArgsAddr_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NvU64 addr = + memdescGetPhysAddr(pKernelGsp->pLibosInitArgumentsDescriptor, AT_GPU, 0); + + GPU_REG_WR32(pGpu, NV_PGSP_FALCON_MAILBOX0, NvU64_LO32(addr)); + GPU_REG_WR32(pGpu, NV_PGSP_FALCON_MAILBOX1, NvU64_HI32(addr)); +} + +NV_STATUS +kgspBootstrapRiscvOSEarly_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + GSP_FIRMWARE *pGspFw +) +{ + NV_STATUS status = NV_OK; + KernelFalcon *pKernelFalcon = staticCast(pKernelGsp, KernelFalcon); + + // Only for GSP client builds + if (!IS_GSP_CLIENT(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "IS_GSP_CLIENT is not set.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if (!kflcnIsRiscvCpuEnabled_HAL(pGpu, pKernelFalcon)) + { + NV_PRINTF(LEVEL_ERROR, "RISC-V core is not enabled.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + // + // Setup for libos bootloader execution including reserving space in the + // fb for placement and bootloader args initialization. + // + kgspPopulateGspRmInitArgs(pGpu, pKernelGsp, NULL); + + { + // Execute FWSEC to setup FRTS if we have a FRTS region + if (kgspGetFrtsSize_HAL(pGpu, pKernelGsp) > 0) + { + kflcnReset_HAL(pGpu, pKernelFalcon); + + NV_ASSERT_OK_OR_GOTO(status, + kgspExecuteFwsecFrts_HAL(pGpu, pKernelGsp, pKernelGsp->pFwsecUcode, + pKernelGsp->pWprMeta->frtsOffset), exit); + } + } + + kflcnReset_HAL(pGpu, pKernelFalcon); + + // + // Stuff the message queue with async init messages that will be run + // before OBJGPU is created. + // + NV_RM_RPC_GSP_SET_SYSTEM_INFO(pGpu, status); + if (status != NV_OK) + { + NV_ASSERT_OK_FAILED("NV_RM_RPC_GSP_SET_SYSTEM_INFO", status); + goto exit; + } + + NV_RM_RPC_SET_REGISTRY(pGpu, status); + if (status != NV_OK) + { + NV_ASSERT_OK_FAILED("NV_RM_RPC_SET_REGISTRY", status); + goto exit; + } + + // Initialize libos init args list + kgspSetupLibosInitArgs(pGpu, pKernelGsp); + + // Load init args into mailbox regs + kgspProgramLibosBootArgsAddr_HAL(pGpu, pKernelGsp); + + { + status = kgspExecuteBooterLoad_HAL(pGpu, pKernelGsp, + memdescGetPhysAddr(pKernelGsp->pWprMetaDescriptor, AT_GPU, 0)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute Booter Load (ucode for initial boot): 0x%x\n", status); + goto exit; + } + } + + // Ensure the CPU is started + if (kflcnIsRiscvActive_HAL(pGpu, pKernelFalcon)) + { + NV_PRINTF(LEVEL_INFO, "GSP ucode loaded and RISCV started.\n"); + } + else + { + NV_PRINTF(LEVEL_ERROR, "Failed to boot GSP.\n"); + + status = NV_ERR_NOT_READY; + goto exit; + } + + NV_PRINTF(LEVEL_INFO, "Waiting for GSP fw RM to be ready...\n"); + + // Link the status queue. + NV_ASSERT_OK_OR_GOTO(status, GspStatusQueueInit(pGpu, &pKernelGsp->pRpc->pMessageQueueInfo), + exit); + + NV_ASSERT_OK_OR_GOTO(status, kgspWaitForRmInitDone(pGpu, pKernelGsp), + exit); + + NV_PRINTF(LEVEL_INFO, "GSP FW RM ready.\n"); + +exit: + return status; +} + +void +kgspGetGspRmBootUcodeStorage_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + BINDATA_STORAGE **ppBinStorageImage, + BINDATA_STORAGE **ppBinStorageDesc +) +{ + const BINDATA_ARCHIVE *pBinArchive = kgspGetBinArchiveGspRmBoot_HAL(pKernelGsp); + + *ppBinStorageImage = (BINDATA_STORAGE *) bindataArchiveGetStorage(pBinArchive, "ucode_image"); + *ppBinStorageDesc = (BINDATA_STORAGE *) bindataArchiveGetStorage(pBinArchive, "ucode_desc"); +} + +/*! + * Calculate the FB layout. Also, copy GSP FW booter image to FB. + * + * Firmware scrubs the last 256mb of FB, no memory outside of this region + * may be used until the FW RM has scrubbed the remainder of memory. + * + * ---------------------------- <- fbSize (end of FB, 1M aligned) + * | VGA WORKSPACE | + * ---------------------------- <- vbiosReservedOffset (64K? aligned) + * | (potential align. gap) | + * ---------------------------- <- gspFwWprEnd (128K aligned) + * | FRTS data | (frtsSize is 0 on GA100) + * | ------------------------ | <- frtsOffset + * | BOOT BIN (e.g. SK + BL) | + * ---------------------------- <- bootBinOffset + * | GSP FW ELF | + * ---------------------------- <- gspFwOffset + * | GSP FW (WPR) HEAP | + * ---------------------------- <- gspFwHeapOffset + * | Booter-placed metadata | + * | (struct GspFwWprMeta) | + * ---------------------------- <- gspFwWprStart (128K aligned) + * | GSP FW (non-WPR) HEAP | + * ---------------------------- <- nonWprHeapOffset, gspFwRsvdStart + * + * @param pGpu GPU object pointer + * @param pKernelGsp KernelGsp object pointer + * @param pGspFw Pointer to GSP-RM fw image. + */ +NV_STATUS +kgspCalculateFbLayout_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + GSP_FIRMWARE *pGspFw +) +{ + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + GspFwWprMeta *pWprMeta = pKernelGsp->pWprMeta; + RM_RISCV_UCODE_DESC *pRiscvDesc = pKernelGsp->pGspRmBootUcodeDesc; + NvU64 vbiosReservedOffset; + NvU64 mmuLockLo, mmuLockHi; + NvBool bIsMmuLockValid; + + ct_assert(sizeof(*pWprMeta) == 256); + + NV_ASSERT_OR_RETURN(IS_GSP_CLIENT(pGpu), NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pKernelGsp->pGspRmBootUcodeImage != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGsp->gspRmBootUcodeSize != 0, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pRiscvDesc != NULL, NV_ERR_INVALID_STATE); + + portMemSet(pWprMeta, 0, sizeof *pWprMeta); + + NV_ASSERT_OK_OR_RETURN(kmemsysGetUsableFbSize_HAL(pGpu, pKernelMemorySystem, &pWprMeta->fbSize)); + + // + // Start layout calculations at the top and work down. + // Figure out where VGA workspace is located. We do not have to adjust + // it ourselves (see vgaRelocateWorkspaceBase_HAL()). + // + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + + if (kdispGetVgaWorkspaceBase(pGpu, pKernelDisplay, &pWprMeta->vgaWorkspaceOffset)) + { + if (pWprMeta->vgaWorkspaceOffset < (pWprMeta->fbSize - DRF_SIZE(NV_PRAMIN))) + { + const NvU32 VBIOS_WORKSPACE_SIZE = 0x20000; + + // Point NV_PDISP_VGA_WORKSPACE_BASE to end-of-FB + pWprMeta->vgaWorkspaceOffset = (pWprMeta->fbSize - VBIOS_WORKSPACE_SIZE); + } + } + else + { + pWprMeta->vgaWorkspaceOffset = (pWprMeta->fbSize - DRF_SIZE(NV_PRAMIN)); + } + pWprMeta->vgaWorkspaceSize = pWprMeta->fbSize - pWprMeta->vgaWorkspaceOffset; + + // Check for MMU locked region (locked by VBIOS) + NV_ASSERT_OK_OR_RETURN( + memmgrReadMmuLock_HAL(pGpu, pMemoryManager, &bIsMmuLockValid, &mmuLockLo, &mmuLockHi)); + + if (bIsMmuLockValid) + vbiosReservedOffset = NV_MIN(mmuLockLo, pWprMeta->vgaWorkspaceOffset); + else + vbiosReservedOffset = pWprMeta->vgaWorkspaceOffset; + + // End of WPR region (128KB aligned) + pWprMeta->gspFwWprEnd = NV_ALIGN_DOWN64(vbiosReservedOffset, 0x20000); + + pWprMeta->frtsSize = kgspGetFrtsSize(pGpu, pKernelGsp); + pWprMeta->frtsOffset = pWprMeta->gspFwWprEnd - pWprMeta->frtsSize; + + // Offset of boot binary image (4K aligned) + pWprMeta->sizeOfBootloader = pKernelGsp->gspRmBootUcodeSize; + pWprMeta->bootBinOffset = NV_ALIGN_DOWN64(pWprMeta->frtsOffset - pWprMeta->sizeOfBootloader, 0x1000); + + // Compute GSP firmware image size + pWprMeta->sizeOfRadix3Elf = pGspFw->size; + + // + // Compute the start of the ELF. Align to 64K to avoid issues with + // inherent alignment constraints (e.g. GC6 buffers which are allocated + // just below this offset). + // + pWprMeta->gspFwOffset = NV_ALIGN_DOWN64(pWprMeta->bootBinOffset - pWprMeta->sizeOfRadix3Elf, 0x10000); + +#define GSP_HEAP_SIZE (64 * 1024 * 1024) + + // Start of WPR region (128KB aligned) + pWprMeta->gspFwWprStart = + NV_ALIGN_UP64(pWprMeta->gspFwOffset - GSP_HEAP_SIZE, 0x20000); + + // GSP-RM heap in WPR + pWprMeta->gspFwHeapOffset = NV_ALIGN_UP64(pWprMeta->gspFwWprStart + sizeof *pWprMeta, 0x1000); + pWprMeta->gspFwHeapSize = pWprMeta->gspFwOffset - pWprMeta->gspFwHeapOffset; + + // Non WPR heap + pWprMeta->nonWprHeapSize = kgspGetNonWprHeapSize(pGpu, pKernelGsp); + pWprMeta->nonWprHeapOffset = pWprMeta->gspFwWprStart - pWprMeta->nonWprHeapSize; + + pWprMeta->gspFwRsvdStart = pWprMeta->nonWprHeapOffset; + + // Make sure carveout size is less than 256MB + NV_ASSERT_OR_RETURN((pWprMeta->fbSize - pWprMeta->gspFwRsvdStart) < (256 * 1024 * 1024), + NV_ERR_OUT_OF_RANGE); + + // Physical address of GSP-RM firmware in system memory. + pWprMeta->sysmemAddrOfRadix3Elf = + memdescGetPhysAddr(pKernelGsp->pGspUCodeRadix3Descriptor, AT_GPU, 0); + + // Physical address of boot loader firmware in system memory. + pWprMeta->sysmemAddrOfBootloader = + memdescGetPhysAddr(pKernelGsp->pGspRmBootUcodeMemdesc, AT_GPU, 0); + + // Set necessary info from bootloader desc + pWprMeta->bootloaderCodeOffset = pRiscvDesc->monitorCodeOffset; + pWprMeta->bootloaderDataOffset = pRiscvDesc->monitorDataOffset; + pWprMeta->bootloaderManifestOffset = pRiscvDesc->manifestOffset; + + if (pKernelGsp->pSignatureMemdesc != NULL) + { + pWprMeta->sysmemAddrOfSignature = memdescGetPhysAddr(pKernelGsp->pSignatureMemdesc, AT_GPU, 0); + pWprMeta->sizeOfSignature = memdescGetSize(pKernelGsp->pSignatureMemdesc); + } + pWprMeta->bootCount = 0; + pWprMeta->verified = 0; + pWprMeta->revision = GSP_FW_WPR_META_REVISION; + pWprMeta->magic = GSP_FW_WPR_META_MAGIC; + +#if 0 + NV_PRINTF(LEVEL_ERROR, "WPR meta data offset: 0x%016llx\n", pWprMeta->gspFwWprStart); + NV_PRINTF(LEVEL_ERROR, " magic: 0x%016llx\n", pWprMeta->magic); + NV_PRINTF(LEVEL_ERROR, " revision: 0x%016llx\n", pWprMeta->revision); + NV_PRINTF(LEVEL_ERROR, " sysmemAddrOfRadix3Elf: 0x%016llx\n", pWprMeta->sysmemAddrOfRadix3Elf); + NV_PRINTF(LEVEL_ERROR, " sizeOfRadix3Elf: 0x%016llx\n", pWprMeta->sizeOfRadix3Elf); + NV_PRINTF(LEVEL_ERROR, " sysmemAddrOfBootloader: 0x%016llx\n", pWprMeta->sysmemAddrOfBootloader); + NV_PRINTF(LEVEL_ERROR, " sizeOfBootloader: 0x%016llx\n", pWprMeta->sizeOfBootloader); + NV_PRINTF(LEVEL_ERROR, " sysmemAddrOfSignature: 0x%016llx\n", pWprMeta->sysmemAddrOfSignature); + NV_PRINTF(LEVEL_ERROR, " sizeOfSignature: 0x%016llx\n", pWprMeta->sizeOfSignature); + NV_PRINTF(LEVEL_ERROR, " gspFwRsvdStart: 0x%016llx\n", pWprMeta->gspFwRsvdStart); + NV_PRINTF(LEVEL_ERROR, " nonWprHeap: 0x%016llx - 0x%016llx (0x%016llx)\n", pWprMeta->nonWprHeapOffset, pWprMeta->nonWprHeapOffset + pWprMeta->nonWprHeapSize - 1, pWprMeta->nonWprHeapSize); + NV_PRINTF(LEVEL_ERROR, " gspFwWprStart: 0x%016llx\n", pWprMeta->gspFwWprStart); + NV_PRINTF(LEVEL_ERROR, " gspFwHeap: 0x%016llx - 0x%016llx (0x%016llx)\n", pWprMeta->gspFwHeapOffset, pWprMeta->gspFwHeapOffset + pWprMeta->gspFwHeapSize - 1, pWprMeta->gspFwHeapSize); + NV_PRINTF(LEVEL_ERROR, " gspFwOffset: 0x%016llx - 0x%016llx (0x%016llx)\n", pWprMeta->gspFwOffset, pWprMeta->gspFwOffset + pWprMeta->sizeOfRadix3Elf - 1, pWprMeta->sizeOfRadix3Elf); + NV_PRINTF(LEVEL_ERROR, " bootBinOffset: 0x%016llx - 0x%016llx (0x%016llx)\n", pWprMeta->bootBinOffset, pWprMeta->bootBinOffset + pWprMeta->sizeOfBootloader - 1, pWprMeta->sizeOfBootloader); + NV_PRINTF(LEVEL_ERROR, " frtsOffset: 0x%016llx - 0x%016llx (0x%016llx)\n", pWprMeta->frtsOffset, pWprMeta->frtsOffset + pWprMeta->frtsSize - 1, pWprMeta->frtsSize); + NV_PRINTF(LEVEL_ERROR, " gspFwWprEnd: 0x%016llx\n", pWprMeta->gspFwWprEnd); + NV_PRINTF(LEVEL_ERROR, " fbSize: 0x%016llx\n", pWprMeta->fbSize); + NV_PRINTF(LEVEL_ERROR, " vgaWorkspaceOffset: 0x%016llx - 0x%016llx (0x%016llx)\n", pWprMeta->vgaWorkspaceOffset, pWprMeta->vgaWorkspaceOffset + pWprMeta->vgaWorkspaceSize - 1, pWprMeta->vgaWorkspaceSize); + NV_PRINTF(LEVEL_ERROR, " bootCount: 0x%016llx\n", pWprMeta->bootCount); + NV_PRINTF(LEVEL_ERROR, " verified: 0x%016llx\n", pWprMeta->verified); +#endif + + return NV_OK; +} + +/*! + * Execute GSP sequencer operation + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelGsp KernelGsp object pointer + * @param[in] opCode Sequencer opcode + * @param[in] pPayload Pointer to payload + * @param[in] payloadSize Size of payload in bytes + * + * @return NV_OK if the sequencer operation was successful. + * Appropriate NV_ERR_xxx value otherwise. + */ +NV_STATUS +kgspExecuteSequencerCommand_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + NvU32 opCode, + NvU32 *pPayload, + NvU32 payloadSize +) +{ + NV_STATUS status = NV_OK; + KernelFalcon *pKernelFalcon = staticCast(pKernelGsp, KernelFalcon); + switch (opCode) + { + case GSP_SEQ_BUF_OPCODE_CORE_RESUME: + { + { + kflcnSecureReset_HAL(pGpu, pKernelFalcon); + kgspProgramLibosBootArgsAddr_HAL(pGpu, pKernelGsp); + + status = kgspExecuteBooterReload_HAL(pGpu, pKernelGsp); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to execute Booter Reload (ucode for resume from sequencer): 0x%x\n", status); + break; + } + } + + break; + } + + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + return status; +} + +/*! + * Reset the GSP HW + * + * @return NV_OK if the GSP HW was properly reset + */ +NV_STATUS +kgspResetHw_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + GPU_FLD_WR_DRF_DEF(pGpu, _PGSP, _FALCON_ENGINE, _RESET, _TRUE); + GPU_FLD_WR_DRF_DEF(pGpu, _PGSP, _FALCON_ENGINE, _RESET, _FALSE); + + return NV_OK; +} + +/*! + * Return NV_OK along with the Engine Reset state. + */ +NvBool +kgspIsEngineInReset_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NvU32 val = GPU_REG_RD32(pGpu, NV_PGSP_FALCON_ENGINE); + + return FLD_TEST_DRF(_PGSP_FALCON, _ENGINE, _RESET, _TRUE, val); +} + +void +kgspHealthCheck_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NvU32 mb0 = GPU_REG_RD32(pGpu, NV_PGSP_MAILBOX(0)); + + // + // Check for an error message in the GSP mailbox. Any error here is severe + // enough that it should be reported as an Xid. Clear the error so more can + // potentially be reported by GSP, if it was able to recover. In that case, + // it's possible that GSP will skip reporting some more errors that happened + // before the clear, and it will just update the "skipped" count. + // + if (FLD_TEST_DRF(_GSP, _ERROR, _TAG, _VAL, mb0)) + { + NvU32 mb1 = GPU_REG_RD32(pGpu, NV_PGSP_MAILBOX(1)); + + GPU_REG_WR32(pGpu, NV_PGSP_MAILBOX(0), 0); + + nvErrorLog_va((void*)pGpu, GSP_ERROR, + "GSP Error: Task %d raised error code 0x%x for reason 0x%x at 0x%x (%d more errors skipped)", + DRF_VAL(_GSP, _ERROR, _TASK, mb0), + DRF_VAL(_GSP, _ERROR, _CODE, mb0), + DRF_VAL(_GSP, _ERROR, _REASON, mb0), + mb1, + DRF_VAL(_GSP, _ERROR, _SKIPPED, mb0)); + } +} + +/*! + * GSP Interrupt Service Routine + * + * @return 32-bit interrupt status AFTER all known interrupt-sources were + * serviced. + */ +NvU32 +kgspService_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NvU32 clearBits = 0; + NvU32 intrStatus; + KernelFalcon *pKernelFalcon = staticCast(pKernelGsp, KernelFalcon); + + // Get the IRQ status and mask the sources not directed to host. + intrStatus = kflcnReadIntrStatus_HAL(pGpu, pKernelFalcon); + + // Exit immediately if there is nothing to do + if (intrStatus == 0) + { + return 0; + } + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "GPU is detached, bailing!\n"); + return 0; + } + + if (intrStatus & DRF_DEF(_PFALCON, _FALCON_IRQSTAT, _HALT, _TRUE)) + { + clearBits |= DRF_DEF(_PFALCON, _FALCON_IRQSCLR, _HALT, _SET); + + // + // Currently, GSP-RISCV triggers _HALT interrupt to RM when it finds + // itself running into a bad state. Triggering _HALT interrupt to RM + // provides RM a chance to handle it so we have better debugability + // into GSP-RISCV issues. + // + kgspDumpGspLogs(pGpu, pKernelGsp, NV_FALSE); + kgspHealthCheck_HAL(pGpu, pKernelGsp); + } + if (intrStatus & DRF_DEF(_PFALCON, _FALCON_IRQSTAT, _SWGEN0, _TRUE)) + { + // + // Clear edge triggered interupt BEFORE (and never after) + // servicing it to avoid race conditions. + // + kflcnRegWrite_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_IRQSCLR, + DRF_DEF(_PFALCON, _FALCON_IRQSCLR, _SWGEN0, _SET)); + + kgspRpcRecvEvents(pGpu, pKernelGsp); + } + + // Clear any sources that were serviced and get the new status + kflcnRegWrite_HAL(pGpu, pKernelFalcon, NV_PFALCON_FALCON_IRQSCLR, clearBits); + + kflcnIntrRetrigger_HAL(pGpu, pKernelFalcon); + + intrStatus = kflcnReadIntrStatus_HAL(pGpu, pKernelFalcon); + + return intrStatus; +} + +#define FWSECLIC_PROG_START_TIMEOUT 50000 // 50ms +#define FWSECLIC_PROG_COMPLETE_TIMEOUT 2000000 // 2s + +NV_STATUS +kgspWaitForGfwBootOk_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NvU32 elapsed = 0; + NvU32 timeoutMs = FWSECLIC_PROG_START_TIMEOUT + FWSECLIC_PROG_COMPLETE_TIMEOUT; + + while (1) + { + // + // Before reading the actual GFW_BOOT status register, + // we want to check that FWSEC has lowered its PLM first. + // If not then obviously it has not completed. + // + if (GPU_FLD_TEST_DRF_DEF(pGpu, + _PGC6, + _AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK, + _READ_PROTECTION_LEVEL0, + _ENABLE) + ) + { + if (GPU_FLD_TEST_DRF_DEF(pGpu, + _PGC6, + _AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT, + _PROGRESS, + _COMPLETED) + ) + { + return NV_OK; + } + } + if (elapsed < timeoutMs) + { + osDelay(100); + elapsed += 100; + } + else + { + return NV_ERR_TIMEOUT; + } + } +} diff --git a/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_vbios_tu102.c b/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_vbios_tu102.c new file mode 100644 index 000000000..9f05423e2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/arch/turing/kernel_gsp_vbios_tu102.c @@ -0,0 +1,529 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * KernelGsp functions and helpers for extracting a VBIOS image from + * ROM. + * + * TODO: JIRA CORERM-4685: Consider moving stuff in here to, e.g. KernelVbios + * + * Note: Most functions here (other than those suffixed by a chip name) + * do not actually need to be HAL'd; we are simply keeping them all in + * one file to try to keep it self-contained. + */ + +#include "gpu/gsp/kernel_gsp.h" + +#include "platform/pci_exp_table.h" // PCI_EXP_ROM_* +#include "gpu/gpu.h" + +#include "published/turing/tu102/dev_bus.h" // for NV_PBUS_IFR_FMT_FIXED* +#include "published/turing/tu102/dev_ext_devices.h" // for NV_PROM_DATA + +#define NV_ROM_DIRECTORY_IDENTIFIER 0x44524652 // "RFRD" + +#define NV_BCRT_HASH_INFO_BASE_CODE_TYPE_VBIOS_BASE 0x00 +#define NV_BCRT_HASH_INFO_BASE_CODE_TYPE_VBIOS_EXT 0xE0 + +typedef struct RomImgSrc +{ + + NvU32 baseOffset; + NvU32 maxOffset; + + OBJGPU *pGpu; +} RomImgSrc; + +/*! + * Read unaligned data from PROM (e.g. VBIOS ROM image) using 32-bit accesses. + * + * @param[in] pSrc RomImgSrc pointer + * @param[in] offset NV_PROM offset to read (note: added to baseOffset) + * @param[in] sizeBytes byte count to read (1, 2, or 4) + * @param[out] pStatus status of attempted read (NV_OK on success) + * + * @return value read + */ +static NvU32 +s_romImgReadGeneric +( + const RomImgSrc * const pSrc, + NvU32 offset, + NvU32 sizeBytes, + NV_STATUS *pStatus +) +{ + + union + { + NvU32 word[2]; + NvU8 byte[2 * sizeof(NvU32)]; + } buf; + + NvU32 retValue = 0; + NvU32 byteIndex; + NvBool bSafe; + NvBool bReadWord1; + + NV_ASSERT(pSrc != NULL); + NV_ASSERT(pStatus != NULL); + NV_ASSERT(sizeBytes <= 4); + + if (NV_UNLIKELY(*pStatus != NV_OK)) + { + // Do not attempt read if previous status was not NV_OK + return 0; + } + + bSafe = portSafeAddU32(offset, pSrc->baseOffset, &offset); + if (NV_UNLIKELY(!bSafe)) + { + *pStatus = NV_ERR_INVALID_OFFSET; + return 0; + } + + if (pSrc->maxOffset > 0) + { + NvU32 tmp; + bSafe = portSafeAddU32(offset, sizeBytes, &tmp); + if (NV_UNLIKELY(!bSafe || tmp > pSrc->maxOffset)) + { + *pStatus = NV_ERR_INVALID_OFFSET; + return 0; + } + } + + byteIndex = offset & (sizeof(NvU32) - 1); // buf.byte[] index for first byte to read. + offset -= byteIndex; // Align offset. + byteIndex += sizeBytes; // Index of last byte to read + 1. + bReadWord1 = (byteIndex > sizeof(NvU32)); // Last byte past the first 32-bit word? + + NV_ASSERT(pSrc->pGpu != NULL); + + // Offset is in NV_PROM. + offset = NV_PROM_DATA(offset); + + // Read bios image as aligned 32-bit word(s). + buf.word[0] = GPU_REG_RD32(pSrc->pGpu, offset); + if (bReadWord1) + { + buf.word[1] = GPU_REG_RD32(pSrc->pGpu, offset + sizeof(NvU32)); + } + + // Combine bytes into number. + for (; sizeBytes > 0; sizeBytes--) + { + retValue = (retValue << 8) + buf.byte[--byteIndex]; + } + + *pStatus = NV_OK; + return retValue; +} + +/*! + * Read a byte from PROM + */ +static NvU8 s_romImgRead8(const RomImgSrc *pSrc, NvU32 offset, NV_STATUS *pStatus) +{ + return (NvU8) s_romImgReadGeneric(pSrc, offset, sizeof(NvU8), pStatus); +} + +/*! + * Read a word in lsb,msb format from PROM + */ +static NvU16 s_romImgRead16(const RomImgSrc *pSrc, NvU32 offset, NV_STATUS *pStatus) +{ + return (NvU16) s_romImgReadGeneric(pSrc, offset, sizeof(NvU16), pStatus); +} + +/*! + * Read a dword in lsb,msb format from PROM + */ +static NvU32 s_romImgRead32(const RomImgSrc *pSrc, NvU32 offset, NV_STATUS *pStatus) +{ + return (NvU32) s_romImgReadGeneric(pSrc, offset, sizeof(NvU32), pStatus); +} + +/*! + * Determine the size of the IFR section from the beginning of a VBIOS image. + * + * @param[in] pGpu OBJGPU pointer + * @param[out] pIfrSize size of the IFR section + */ +static NV_STATUS +s_romImgFindPciHeader_TU102 +( + const RomImgSrc * const pSrc, + NvU32 *pIfrSize +) +{ + + NV_STATUS status = NV_OK; + + NvU32 fixed0; + NvU32 fixed1; + NvU32 fixed2; + NvU32 extendedOffset; + NvU32 imageOffset = 0; + NvU32 ifrVersion = 0; + NvU32 ifrTotalDataSize; + NvU32 flashStatusOffset; + NvU32 romDirectoryOffset; + NvU32 romDirectorySig; + + NV_ASSERT_OR_RETURN(pIfrSize != NULL, NV_ERR_INVALID_ARGUMENT); + + fixed0 = s_romImgRead32(pSrc, NV_PBUS_IFR_FMT_FIXED0, &status); + fixed1 = s_romImgRead32(pSrc, NV_PBUS_IFR_FMT_FIXED1, &status); + fixed2 = s_romImgRead32(pSrc, NV_PBUS_IFR_FMT_FIXED2, &status); + NV_ASSERT_OK_OR_RETURN(status); + + // Check for IFR signature. + if (REF_VAL(NV_PBUS_IFR_FMT_FIXED0_SIGNATURE, fixed0) == + NV_PBUS_IFR_FMT_FIXED0_SIGNATURE_VALUE) + { + ifrVersion = REF_VAL(NV_PBUS_IFR_FMT_FIXED1_VERSIONSW, fixed1); + switch (ifrVersion) + { + case 0x01: + case 0x02: + extendedOffset = REF_VAL(NV_PBUS_IFR_FMT_FIXED1_FIXED_DATA_SIZE, fixed1); + imageOffset = s_romImgRead32(pSrc, extendedOffset + 4, &status); + break; + + case 0x03: + ifrTotalDataSize = REF_VAL(NV_PBUS_IFR_FMT_FIXED2_TOTAL_DATA_SIZE, fixed2); + flashStatusOffset = s_romImgRead32(pSrc, ifrTotalDataSize, &status); + romDirectoryOffset = flashStatusOffset + 4096; + romDirectorySig = s_romImgRead32(pSrc, romDirectoryOffset, &status); + NV_ASSERT_OK_OR_RETURN(status); + + if (romDirectorySig == NV_ROM_DIRECTORY_IDENTIFIER) + { + imageOffset = s_romImgRead32(pSrc, romDirectoryOffset + 8, &status); + } + else + { + NV_PRINTF(LEVEL_ERROR, "Error: ROM Directory not found = 0x%08x.\n", + romDirectorySig); + return NV_ERR_INVALID_DATA; + } + break; + + default: + NV_PRINTF(LEVEL_ERROR, "Error: IFR version not supported = 0x%08x.\n", + ifrVersion); + return NV_ERR_NOT_SUPPORTED; + } + } + + NV_ASSERT_OK_OR_RETURN(status); + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED(imageOffset, 4), NV_ERR_INVALID_ADDRESS); + *pIfrSize = imageOffset; + + return NV_OK; +} + +static NV_STATUS +s_locateExpansionRoms +( + const RomImgSrc * const pSrc, + const NvU32 pciOffset, + NvU32 *pBiosSize, + NvU32 *pExpansionRomOffset +) +{ + NV_STATUS status = NV_OK; + NvU32 currBlock = pciOffset; + + // Note: used to compute output for pExpanionRomOffset + NvU32 extRomOffset = 0; + NvU32 baseRomSize = 0; + + NvU8 type; + NvU32 blockOffset = 0; + NvU32 blockSize = 0; + + // Find all ROMs + for (;;) { + RomImgSrc currSrc; + NvU32 pciBlck; + NvU32 pciDataSig; + + NvBool bIsLastImage; + NvU32 imgLen; + NvU32 subImgLen; + + currSrc = *pSrc; + currSrc.baseOffset = currBlock; + + pciBlck = s_romImgRead16(&currSrc, OFFSETOF_PCI_EXP_ROM_PCI_DATA_STRUCT_PTR, &status); + NV_ASSERT_OK_OR_RETURN(status); + + currSrc.baseOffset = currBlock + pciBlck; + + pciDataSig = s_romImgRead32(&currSrc, OFFSETOF_PCI_EXP_ROM_SIG, &status); + NV_ASSERT_OK_OR_RETURN(status); + + if (!IS_VALID_PCI_DATA_SIG(pciDataSig)) + { + return NV_ERR_INVALID_DATA; + } + + bIsLastImage = \ + ((s_romImgRead8(&currSrc, OFFSETOF_PCI_DATA_STRUCT_LAST_IMAGE, &status) & PCI_LAST_IMAGE) != 0); + imgLen = s_romImgRead16(&currSrc, OFFSETOF_PCI_DATA_STRUCT_IMAGE_LEN, &status); + subImgLen = imgLen; + NV_ASSERT_OK_OR_RETURN(status); + + // Look for PCI Data Extension + { + RomImgSrc extSrc; + NvU16 pciDataStructLen = s_romImgRead16(&currSrc, OFFSETOF_PCI_DATA_STRUCT_LEN, &status); + NvU32 nvPciDataExtAt = (currSrc.baseOffset + pciDataStructLen + 0xF) & ~0xF; + NvU32 nvPciDataExtSig; + NV_ASSERT_OK_OR_RETURN(status); + + extSrc = currSrc; + extSrc.baseOffset = nvPciDataExtAt; + + nvPciDataExtSig = s_romImgRead32(&extSrc, OFFSETOF_PCI_DATA_EXT_STRUCT_SIG, &status); + NV_ASSERT_OK_OR_RETURN(status); + + if (nvPciDataExtSig == NV_PCI_DATA_EXT_SIG) + { + NvU16 nvPciDataExtRev = s_romImgRead16(&extSrc, OFFSETOF_PCI_DATA_EXT_STRUCT_REV, &status); + NV_ASSERT_OK_OR_RETURN(status); + + if ((nvPciDataExtRev == NV_PCI_DATA_EXT_REV_10) || (nvPciDataExtRev == NV_PCI_DATA_EXT_REV_11)) + { + NvU16 nvPciDataExtLen = s_romImgRead16(&extSrc, OFFSETOF_PCI_DATA_EXT_STRUCT_LEN, &status); + + // use the image length from PCI Data Extension + subImgLen = s_romImgRead16(&extSrc, OFFSETOF_PCI_DATA_EXT_STRUCT_SUBIMAGE_LEN, &status); + NV_ASSERT_OK_OR_RETURN(status); + + // use the last image from PCI Data Extension if it is present + if (OFFSETOF_PCI_DATA_EXT_STRUCT_LAST_IMAGE + sizeof(NvU8) <= nvPciDataExtLen) + { + bIsLastImage = \ + ((s_romImgRead8(&extSrc, OFFSETOF_PCI_DATA_EXT_STRUCT_LAST_IMAGE, &status) & PCI_LAST_IMAGE) != 0); + } + else if (subImgLen < imgLen) + { + bIsLastImage = NV_FALSE; + } + + NV_ASSERT_OK_OR_RETURN(status); + } + } + } + + // Determine size and offset for this expansion ROM + type = s_romImgRead8(&currSrc, OFFSETOF_PCI_DATA_STRUCT_CODE_TYPE, &status); + NV_ASSERT_OK_OR_RETURN(status); + + blockOffset = currBlock - pciOffset; + blockSize = subImgLen * PCI_ROM_IMAGE_BLOCK_SIZE; + + if (extRomOffset == 0 && type == NV_BCRT_HASH_INFO_BASE_CODE_TYPE_VBIOS_EXT) + { + extRomOffset = blockOffset; + } + else if (baseRomSize == 0 && type == NV_BCRT_HASH_INFO_BASE_CODE_TYPE_VBIOS_BASE) + { + baseRomSize = blockSize; + } + + // Advance to next ROM + if (bIsLastImage) + { + break; + } + else + { + currBlock = currBlock + subImgLen * PCI_ROM_IMAGE_BLOCK_SIZE; + } + } + + if (pBiosSize != NULL) + { + // Pick up last ROM found for total size + *pBiosSize = blockOffset + blockSize; + } + + if (pExpansionRomOffset != NULL) + { + if (extRomOffset > 0 && baseRomSize > 0) + { + *pExpansionRomOffset = extRomOffset - baseRomSize; + } + else + { + *pExpansionRomOffset = 0; + } + } + + return status; +} + +/*! + * Returns max size of VBIOS image in ROM + * (including expansion ROMs). + */ +static NvU32 +s_getBaseBiosMaxSize_TU102 +( + OBJGPU *pGpu +) +{ + return 0x100000; // 1 MB +} + +/*! + * Extract VBIOS image from ROM. + * + * The resulting KernelGspVbiosImg should be freed with kgspFreeVbiosImg + * after use. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pGpu KernelGsp pointer + * @param[out] ppVbiosImg Pointer to resulting KernelGspVbiosImg + */ +NV_STATUS +kgspExtractVbiosFromRom_TU102 +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + KernelGspVbiosImg **ppVbiosImg +) +{ + + NV_STATUS status = NV_OK; + + KernelGspVbiosImg *pVbiosImg = NULL; + + RomImgSrc src; + NvU32 romSig; + NvU32 pciOffset = 0; + + NvU32 biosSize = s_getBaseBiosMaxSize_TU102(pGpu); + NvU32 biosSizeFromRom; + NvU32 expansionRomOffset; + + NV_ASSERT_OR_RETURN(!IS_VIRTUAL(pGpu), NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(IS_GSP_CLIENT(pGpu), NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(ppVbiosImg != NULL, NV_ERR_INVALID_ARGUMENT); + + pVbiosImg = portMemAllocNonPaged(sizeof(*pVbiosImg)); + if (pVbiosImg == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet(pVbiosImg, 0, sizeof(*pVbiosImg)); + + portMemSet(&src, 0, sizeof(src)); + src.baseOffset = 0; + src.maxOffset = biosSize; + src.pGpu = pGpu; + + // Find ROM start + romSig = s_romImgRead16(&src, OFFSETOF_PCI_EXP_ROM_SIG, &status); + NV_ASSERT_OK_OR_GOTO(status, status, out); + if (!IS_VALID_PCI_ROM_SIG(romSig)) + { + NV_ASSERT_OK_OR_GOTO(status, s_romImgFindPciHeader_TU102(&src, &pciOffset), out); + + // Adjust base offset for PCI header + src.baseOffset = pciOffset; + + romSig = s_romImgRead16(&src, OFFSETOF_PCI_EXP_ROM_SIG, &status); + NV_ASSERT_OK_OR_GOTO(status, status, out); + } + + if (!IS_VALID_PCI_ROM_SIG(romSig)) + { + NV_PRINTF(LEVEL_ERROR, "did not find valid ROM signature\n"); + status = NV_ERR_INVALID_DATA; + goto out; + } + + status = s_locateExpansionRoms(&src, pciOffset, &biosSizeFromRom, &expansionRomOffset); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to locate expansion ROMs: 0x%x\n", status); + goto out; + } + + if (biosSizeFromRom > biosSize) + { + NV_PRINTF(LEVEL_ERROR, "expansion ROM has exceedingly large size: 0x%x\n", biosSizeFromRom); + status = NV_ERR_INVALID_DATA; + goto out; + } + + biosSize = biosSizeFromRom; + + // Copy to system memory and populate pVbiosImg + { + NvU32 i; + NvU32 biosSizeAligned; + + NvU32 *pImageDwords = portMemAllocNonPaged(biosSize); + if (pImageDwords == NULL) + { + status = NV_ERR_NO_MEMORY; + goto out; + } + + biosSizeAligned = biosSize & (~0x3); + for (i = 0; i < biosSizeAligned; i += 4) + { + pImageDwords[i >> 2] = GPU_REG_RD32(pGpu, pciOffset + NV_PROM_DATA(i)); + } + + for (; i < biosSize; i++) + { + // Finish for non-32-bit-aligned biosSize + ((NvU8 *) pImageDwords)[i] = GPU_REG_RD08(pGpu, pciOffset + NV_PROM_DATA(i)); + } + + pVbiosImg->pImage = (NvU8 *) pImageDwords; + pVbiosImg->biosSize = biosSize; + pVbiosImg->expansionRomOffset = expansionRomOffset; + } + +out: + if (status == NV_OK) + { + *ppVbiosImg = pVbiosImg; + } + else + { + kgspFreeVbiosImg(pVbiosImg); + pVbiosImg = NULL; + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gsp/kernel_gsp.c b/src/nvidia/src/kernel/gpu/gsp/kernel_gsp.c new file mode 100644 index 000000000..e823882be --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/kernel_gsp.c @@ -0,0 +1,2296 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gsp/kernel_gsp.h" + +#include "kernel/core/thread_state.h" +#include "kernel/core/locks.h" +#include "kernel/diagnostics/gpu_acct.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "kernel/gpu/mem_mgr/heap.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "kernel/gpu/disp/kern_disp.h" + +#include "class/cl2080.h" // NV20_SUBDEVICE_0 + +#include "logdecode.h" +#include "nverror.h" +#include "nvtypes.h" +#include "objrpc.h" +#include "objtmr.h" +#include "os/os.h" +#include "rmgspseq.h" +#include "sweng/dispsw.h" +#include "vgpu/rpc.h" +#include "kernel/gpu/pmu/kern_pmu.h" +#include "gpu/perf/kern_perf.h" +#include "core/locks.h" + +#define RPC_STRUCTURES +#define RPC_GENERIC_UNION +#include "g_rpc-structures.h" +#undef RPC_STRUCTURES +#undef RPC_GENERIC_UNION + +#define RPC_MESSAGE_STRUCTURES +#define RPC_MESSAGE_GENERIC_UNION +#include "g_rpc-message-header.h" +#undef RPC_MESSAGE_STRUCTURES +#undef RPC_MESSAGE_GENERIC_UNION + +#include "gpu/gsp/message_queue_priv.h" +#include "elf.h" + + +#define RPC_HDR ((rpc_message_header_v*)(pRpc->message_buffer)) + +// +// RPC_PARAMS defines the rpc_params pointer and initializes it to the correct +// sub-structure. +// +// RPC_PARAMS intentionally assigns the the latest version structure to the +// versioned rpc_params pointer. With the -Werror=incompatible-pointer-types +// compiler flag, this checks for mismatched structure versions at compile time. +// +// For example: +// RPC_PARAMS(free, _v03_00); +// expands to +// rpc_free_v03_00 *rpc_params = &RPC_HDR->rpc_message_data->free_v; +// +#define RPC_PARAMS(r, v) rpc_##r##v *rpc_params = &RPC_HDR->rpc_message_data->r##_v + +static NV_STATUS _kgspInitRpcInfrastructure(OBJGPU *, KernelGsp *); +static void _kgspFreeRpcInfrastructure(OBJGPU *, KernelGsp *); + +static NV_STATUS _kgspRpcSendMessage(OBJGPU *, OBJRPC *); +static NV_STATUS _kgspRpcRecvPoll(OBJGPU *, OBJRPC *, NvU32); +static NV_STATUS _kgspRpcDrainEvents(OBJGPU *, KernelGsp *, NvU32); + +static NV_STATUS _kgspAllocSimAccessBuffer(OBJGPU *pGpu, KernelGsp *pKernelGsp); +static void _kgspFreeSimAccessBuffer(OBJGPU *pGpu, KernelGsp *pKernelGsp); + +static void _kgspStopLogPolling(OBJGPU *pGpu, KernelGsp *pKernelGsp); + +static void _kgspFreeBootBinaryImage(OBJGPU *pGpu, KernelGsp *pKernelGsp); + +static NV_STATUS _kgspPrepareGspRmBinaryImage(OBJGPU *pGpu, KernelGsp *pKernelGsp, GSP_FIRMWARE *pGspFw); + +static NV_STATUS _kgspGetAndClearSignatureFromBinary(OBJGPU *pGpu, KernelGsp *pKernelGsp, + GSP_FIRMWARE *pGspFw, MEMORY_DESCRIPTOR **ppSignatureMemdesc); + +static NV_STATUS _kgspCreateRadix3(OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemdescRadix3, + MEMORY_DESCRIPTOR *pMemdescData, const void *pData, NvU64 size); + +/*! + * GSP client RM RPC send routine + */ +static NV_STATUS +_kgspRpcSendMessage +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + NV_STATUS nvStatus; + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + + NV_ASSERT(rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + nvStatus = GspMsgQueueSendCommand(pRpc->pMessageQueueInfo, pGpu); + + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "GspMsgQueueSendCommand failed: 0x%x\n", nvStatus); + return nvStatus; + } + + // GSPRM TODO: Use this call to pass the actual index. + kgspSetCmdQueueHead_HAL(pGpu, pKernelGsp, 0, 0); + + return NV_OK; +} + +static NV_STATUS +_kgspRpcRunCpuSequencer +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + RPC_PARAMS(run_cpu_sequencer, _v17_00); + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + + return kgspExecuteSequencerBuffer(pGpu, pKernelGsp, rpc_params); +} + +static void +_kgspProcessEccNotifier +( + OBJGPU *pGpu, + void *eventData +) +{ + NV_STATUS nvStatus = NV_OK; + MemoryManager *pMemoryMgr = GPU_GET_MEMORY_MANAGER(pGpu); + + if (pMemoryMgr->bEnableDynamicPageOfflining) + { + Nv2080EccDbeNotification *pParams = (Nv2080EccDbeNotification*)eventData; + if ((nvStatus = heapStorePendingBlackList(pGpu, GPU_GET_HEAP(pGpu), pParams->physAddress , + pParams->physAddress)) != NV_OK) + { + if (nvStatus == NV_ERR_RESET_REQUIRED) + { + NV_PRINTF(LEVEL_INFO, "Since we hit the DED on the reserved region, nothing to handle in this code path... \n"); + NV_PRINTF(LEVEL_INFO, "Relying on FBHUB interrupt to kill all the channels and force reset the GPU..\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, "Dynamically blacklisting the DED page offset failed with, status: %x\n", nvStatus); + DBG_BREAKPOINT(); + } + } + + } +} + +/*! + * Receive an event notification from GSP-RM. + * + * When an event fires in GSP-RM, osNotifyEvent and osEventNotification check + * whether the event was originally allocated from client-RM. If so, they post + * it to the event queue and take no further action. Client RM picks up the + * event here and handles it. + */ +static NV_STATUS +_kgspRpcPostEvent +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + RPC_PARAMS(post_event, _v17_00); + PEVENTNOTIFICATION pNotifyList = NULL; + PEVENTNOTIFICATION pNotifyEvent = NULL; + Event *pEvent = NULL; + NV_STATUS nvStatus = NV_OK; + + // Get the notification list that contains this event. + NV_ASSERT_OR_RETURN(CliGetEventInfo(rpc_params->hClient, + rpc_params->hEvent, &pEvent), NV_ERR_OBJECT_NOT_FOUND); + + if (pEvent->pNotifierShare != NULL) + pNotifyList = pEvent->pNotifierShare->pEventList; + + NV_ASSERT_OR_RETURN(pNotifyList != NULL, NV_ERR_INVALID_POINTER); + + switch (rpc_params->notifyIndex) + { + case NV2080_NOTIFIERS_ECC_DBE: + _kgspProcessEccNotifier(pGpu, rpc_params->eventData); + break; + } + + // Send the event. + if (rpc_params->bNotifyList) + { + // Send notification to all matching events on the list. + nvStatus = osEventNotification(pGpu, pNotifyList, rpc_params->notifyIndex, + rpc_params->eventData, rpc_params->eventDataSize); + } + else + { + // Send event to a specific hEvent. Find hEvent in the notification list. + for (pNotifyEvent = pNotifyList; pNotifyEvent; pNotifyEvent = pNotifyEvent->Next) + { + if (pNotifyEvent->hEvent == rpc_params->hEvent) + { + nvStatus = osNotifyEvent(pGpu, pNotifyEvent, 0, + rpc_params->data, rpc_params->status); + break; + } + } + NV_ASSERT_OR_RETURN(pNotifyEvent != NULL, NV_ERR_OBJECT_NOT_FOUND); + } + + return nvStatus; +} + +/*! + * Receive RC notification from GSP-RM. + * + * RC error handling ("Channel Teardown sequence") is executed in GSP-RM. + * Client notifications, OS interaction etc happen in CPU-RM (Kernel RM). + */ +static NV_STATUS +_kgspRpcRCTriggered +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + RPC_PARAMS(rc_triggered, _v17_02); + + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + KernelChannel *pKernelChannel; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + CHID_MGR *pChidMgr; + NvU32 status = NV_OK; + + // check if there's a PCI-E error pending either in device status or in AER + krcCheckBusError_HAL(pGpu, pKernelRc); + + status = kfifoGetChidMgrFromType(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, + rpc_params->nv2080EngineType, + &pChidMgr); + if (status != NV_OK) + return status; + + pKernelChannel = kfifoChidMgrGetKernelChannel(pGpu, pKernelFifo, + pChidMgr, + rpc_params->chid); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + pKernelChannel != NULL, + NV_ERR_INVALID_CHANNEL); + + return krcErrorSendEventNotifications_HAL(pGpu, pKernelRc, + pKernelChannel, + rpc_params->nv2080EngineType, // unused on kernel side + rpc_params->exceptType, + rpc_params->scope, + rpc_params->partitionAttributionId); +} + +/*! + * Receive Xid notification from GSP-RM + * + * Passes Xid errors that are triggered on GSP-RM to nvErrorLog for OS interactions + * (logging and OS notifications). + */ +static void +_kgspRpcOsErrorLog +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + RPC_PARAMS(os_error_log, _v17_00); + + nvErrorLog_va(pGpu, rpc_params->exceptType, "%s", rpc_params->errString); +} + +/*! + * Receives RPC events containing periodic perfmon utilization samples, passing them + * to GPUACCT for processing. + */ +static void +_kgspRpcGpuacctPerfmonUtilSamples +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(pSys); + GPUACCT_GPU_INSTANCE_INFO *pGpuInstanceInfo = &pGpuAcct->gpuInstanceInfo[pGpu->gpuInstance]; + RPC_PARAMS(gpuacct_perfmon_util_samples, _v17_00); + + NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS_v17_00 *src = &rpc_params->params; + NV2080_CTRL_PERF_GET_GPUMON_PERFMON_UTIL_SAMPLES_V2_PARAMS *dest; + NvU32 i; + + dest = pGpuInstanceInfo->pSamplesParams; + portMemSet(dest, 0, sizeof(*dest)); + + dest->type = src->type; + dest->bufSize = src->bufSize; + dest->count = src->count; + dest->tracker = src->tracker; + + for (i = 0; i < NV2080_CTRL_PERF_GPUMON_SAMPLE_COUNT_PERFMON_UTIL; i++) + { + dest->samples[i].base.timeStamp = src->samples[i].timeStamp; + + dest->samples[i].fb.util = src->samples[i].fb.util; + dest->samples[i].fb.procId = src->samples[i].fb.procId; + dest->samples[i].fb.subProcessID = src->samples[i].fb.subProcessID; + + dest->samples[i].gr.util = src->samples[i].gr.util; + dest->samples[i].gr.procId = src->samples[i].gr.procId; + dest->samples[i].gr.subProcessID = src->samples[i].gr.subProcessID; + + dest->samples[i].nvenc.util = src->samples[i].nvenc.util; + dest->samples[i].nvenc.procId = src->samples[i].nvenc.procId; + dest->samples[i].nvenc.subProcessID = src->samples[i].nvenc.subProcessID; + + dest->samples[i].nvdec.util = src->samples[i].nvdec.util; + dest->samples[i].nvdec.procId = src->samples[i].nvdec.procId; + dest->samples[i].nvdec.subProcessID = src->samples[i].nvdec.subProcessID; + } + + gpuacctProcessGpuUtil(pGpuInstanceInfo, &dest->samples[0]); +} + +/*! + * Receives RPC events containing current GPU Boost synchronization limits + * that should be cached and considered in the GPU Boost algorithm and runs + * the algorithm. + */ +static void +_kgspRpcPerfGpuBoostSyncLimitsCallback +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + KernelPerf *pKernelPerf = GPU_GET_KERNEL_PERF(pGpu); + + RPC_PARAMS(perf_gpu_boost_sync_limits_callback, _v17_00); + + NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS_v17_00 *src = &rpc_params->params; + NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS dest; + NvU32 i; + + dest.flags = src->flags; + dest.bBridgeless = src->bBridgeless; + + for (i = 0; i < NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM; i++) + { + dest.currLimits[i] = src->currLimits[i]; + } + + kperfDoSyncGpuBoostLimits(pGpu, pKernelPerf, &dest); + +} + +/*! + * Recieves RPC events containing latest change of bridgeless information + */ +static void +_kgspRpcPerfBridgelessInfoUpdate +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + RPC_PARAMS(perf_bridgeless_info_update, _v17_00); + + kPerfGpuBoostSyncBridgelessUpdateInfo(pGpu, rpc_params->bBridgeless); +} + +/*! + * Receive MMU fault queue notification from GSP-RM. + * + * Non-replayable fault handling is split between GSP-RM and the UVM driver. + * GSP-RM copies designated faults to the UVM driver's shadow buffer, + * and sends a notification. CPU-RM, in turn, needs to notify the UVM + * driver (schedule the UVM ISR to be run). + */ +static NV_STATUS +_kgspRpcMMUFaultQueued( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + osQueueMMUFaultHandler(pGpu); + + return NV_OK; +} + +static NV_STATUS +_kgspRpcSimRead +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + RPC_PARAMS(sim_read, _v1E_01); + if (IS_SIMULATION(pGpu)) + { + const NvU32 count = rpc_params->index + (rpc_params->count / sizeof(NvU32)); + NvU32 i; + + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + NV_ASSERT_OR_RETURN(rpc_params->count <= sizeof(pKernelGsp->pSimAccessBuf->data), NV_ERR_BUFFER_TOO_SMALL); + + for (i = rpc_params->index; i < count; i++) + { + NvU32 data; + pOS->osSimEscapeRead(pGpu, rpc_params->path, i, 4, &data); + pKernelGsp->pSimAccessBuf->data[i] = data; + } + + pKernelGsp->pSimAccessBuf->seq++; + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS +_kgspRpcSimWrite +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + RPC_PARAMS(sim_write, _v1E_01); + if (IS_SIMULATION(pGpu)) + { + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + pOS->osSimEscapeWrite(pGpu, rpc_params->path, rpc_params->index, rpc_params->count, rpc_params->data); + pKernelGsp->pSimAccessBuf->seq++; + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS +_kgspRpcSemaphoreScheduleCallback( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + RPC_PARAMS(semaphore_schedule_callback, _v17_00); + + return dispswReleaseSemaphoreAndNotifierFill(pGpu, + rpc_params->GPUVA, + rpc_params->hVASpace, + rpc_params->ReleaseValue, + rpc_params->Flags, + rpc_params->completionStatus, + rpc_params->hClient, + rpc_params->hEvent); +} + +static NV_STATUS +_kgspRpcUcodeLibosPrint +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + RPC_PARAMS(ucode_libos_print, _v1E_08); + + // Check ucodes registered with the libos print mechanism + switch (rpc_params->ucodeEngDesc) + { + case ENG_PMU: + { + KernelPmu *pKernelPmu = GPU_GET_KERNEL_PMU(pGpu); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pKernelPmu != NULL, NV_ERR_OBJECT_NOT_FOUND); + + kpmuLogBuf(pGpu, pKernelPmu, + rpc_params->libosPrintBuf, rpc_params->libosPrintBufSize); + + return NV_OK; + } + default: + NV_ASSERT_FAILED("Attempting to use libos prints with an unsupported ucode!\n"); + return NV_ERR_NOT_SUPPORTED; + } +} + +static NV_STATUS +_kgspRpcGspVgpuConfig +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS +_kgspRpcRgLineIntr +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + RPC_PARAMS(rg_line_intr, _v17_00); + + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pKernelDisplay != NULL, NV_ERR_OBJECT_NOT_FOUND); + + kdispInvokeRgLineCallback(pKernelDisplay, rpc_params->head, rpc_params->rgIntr, NV_FALSE); + + return NV_OK; +} + +/*! + * GSP client process RPC events + */ +static NV_STATUS +_kgspProcessRpcEvent +( + OBJGPU *pGpu, + OBJRPC *pRpc +) +{ + rpc_message_header_v *pMsgHdr = RPC_HDR; + NV_STATUS nvStatus = NV_OK; + + NV_PRINTF(LEVEL_INFO, "received event %d: status: %d size: %d\n", + pMsgHdr->function, pMsgHdr->rpc_result, pMsgHdr->length); + + switch(pMsgHdr->function) + { + case NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER: + nvStatus = _kgspRpcRunCpuSequencer(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_POST_EVENT: + nvStatus = _kgspRpcPostEvent(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_RC_TRIGGERED: + nvStatus = _kgspRpcRCTriggered(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED: + nvStatus = _kgspRpcMMUFaultQueued(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_SIM_READ: + nvStatus = _kgspRpcSimRead(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_SIM_WRITE: + nvStatus = _kgspRpcSimWrite(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_OS_ERROR_LOG: + _kgspRpcOsErrorLog(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_GPUACCT_PERFMON_UTIL_SAMPLES: + _kgspRpcGpuacctPerfmonUtilSamples(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK: + _kgspRpcPerfGpuBoostSyncLimitsCallback(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE: + _kgspRpcPerfBridgelessInfoUpdate(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_SEMAPHORE_SCHEDULE_CALLBACK: + _kgspRpcSemaphoreScheduleCallback(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_RG_LINE_INTR: + _kgspRpcRgLineIntr(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT: + nvStatus = _kgspRpcUcodeLibosPrint(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_VGPU_CONFIG: + nvStatus = _kgspRpcGspVgpuConfig(pGpu, pRpc); + break; + + case NV_VGPU_MSG_EVENT_GSP_INIT_DONE: // Handled by _kgspRpcRecvPoll. + default: + // + // We will get here if the previous RPC timed out. The response + // eventually comes in as an unexpected event. The error handling + // for the timeout has already happened, and returning an error here + // causes subsequent messages to fail. So return NV_OK. + // + NV_PRINTF(LEVEL_ERROR, "Unexpected RPC function 0x%x\n", pMsgHdr->function); + break; + } + + return nvStatus; +} + +/*! + * Handle a single RPC event from GSP unless the event is [an RPC return for] expectedFunc, + * or there are no events available in the buffer. + * + * @return + * NV_OK if the event is successfully handled. + * NV_WARN_NOTHING_TO_DO if there are no events available. + * NV_WARN_MORE_PROCESSING_REQUIRED if the event is expectedFunc: it is unhandled and in the staging area. + * (Another status) if event reading or processing fails. + */ +static NV_STATUS +_kgspRpcDrainOneEvent +( + OBJGPU *pGpu, + OBJRPC *pRpc, + NvU32 expectedFunc +) +{ + NV_STATUS nvStatus; + + // Issue a memory barrier to ensure we see any queue updates. + // Note: Without the fence, the CPU may get stuck in an infinite loop + // waiting for a message that has already arrived. + portAtomicMemoryFenceFull(); + + nvStatus = GspMsgQueueReceiveStatus(pRpc->pMessageQueueInfo); + + if (nvStatus == NV_OK) + { + rpc_message_header_v *pMsgHdr = RPC_HDR; + if (pMsgHdr->function == expectedFunc) + return NV_WARN_MORE_PROCESSING_REQUIRED; + + nvStatus = _kgspProcessRpcEvent(pGpu, pRpc); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to process received event %d: status=0x%x\n", + pMsgHdr->function, nvStatus); + } + } + + // + // We don't expect the NV_WARN_MORE_PROCESSING_REQUIRED from either called function. + // If we get it we need to suppress it to avoid confusing our caller, for whom it has special meaning. + // + NV_ASSERT_OR_ELSE(nvStatus != NV_WARN_MORE_PROCESSING_REQUIRED, + nvStatus = NV_ERR_GENERIC); + + return nvStatus; +} + +/*! + * Handle RPC events from GSP until the event is [an RPC return for] expectedFunc, + * or there are no events available in the buffer. + * + * Also dump GSP logs, and check for severe errors coming from GSP. + * + * @return + * NV_OK if one or more events are handled and there are none left. + * NV_WARN_MORE_PROCESSING_REQUIRED if an expectedFunc event is found: it is unhandled and in the staging area. + * (Zero or more preceding events were successfully handled.) + * (Another status) if event reading or processing fails. + */ +static NV_STATUS +_kgspRpcDrainEvents +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + NvU32 expectedFunc +) +{ + NV_STATUS nvStatus = NV_OK; + OBJRPC *pRpc = GPU_GET_RPC(pGpu); + + while (nvStatus == NV_OK) + { + nvStatus = _kgspRpcDrainOneEvent(pGpu, pRpc, expectedFunc); + kgspDumpGspLogs(pGpu, pKernelGsp, NV_FALSE); + } + + kgspHealthCheck_HAL(pGpu, pKernelGsp); + + if (nvStatus == NV_WARN_NOTHING_TO_DO) + nvStatus = NV_OK; + + return nvStatus; +} + +static +const char *_getRpcName +( + NvU32 func +) +{ + static const char *rpcName[] = + { + #define X(UNIT, a) #a, + #define E(a) #a, + #undef _RPC_GLOBAL_ENUMS_H_ + #include "vgpu/rpc_global_enums.h" + #undef X + #undef E + }; + + NV_ASSERT_OR_RETURN(func < NV_VGPU_MSG_FUNCTION_NUM_FUNCTIONS, ""); + + return rpcName[func]; +} + +/*! + * Log Xid 119 - GSP RPC Timeout + */ +static void +_kgspLogXid119 +( + OBJGPU *pGpu, + OBJRPC *pRpc, + NvU32 expectedFunc +) +{ + NvU32 data[2] = {0}; + + NV_ASSERT(expectedFunc == vgpu_rpc_message_header_v->function); + + if (expectedFunc == NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL) + { + rpc_gsp_rm_control_v03_00 *rpc_params = &rpc_message->gsp_rm_control_v03_00; + data[0] = rpc_params->cmd; + data[1] = rpc_params->paramsSize; + } + else + if (expectedFunc == NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC) + { + rpc_gsp_rm_alloc_v03_00 *rpc_params = &rpc_message->gsp_rm_alloc_v03_00; + data[0] = rpc_params->hClass; + data[1] = rpc_params->paramsSize; + } + + nvErrorLog_va((void*)pGpu, GSP_RPC_TIMEOUT, + "Timeout waiting for RPC from GSP! Expected function %s (0x%x 0x%x).", + _getRpcName(expectedFunc), + data[0], data[1]); +#if defined(DEVELOP) || defined(DEBUG) + // dump the stack + osAssertFailed(); +#endif +} + +/*! + * GSP client RM RPC poll routine + */ +static NV_STATUS +_kgspRpcRecvPoll +( + OBJGPU *pGpu, + OBJRPC *pRpc, + NvU32 expectedFunc +) +{ + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + NV_STATUS nvStatus; + RMTIMEOUT timeout; + NvU32 timeoutUs = GPU_TIMEOUT_DEFAULT; + NvBool bSlowGspRpc = IS_EMULATION(pGpu) || IS_SIMULATION(pGpu); + + // + // GSP-RM init in emulation/simulation environment is extremely slow, + // so need to increment timeout. + // Apply the timeout extension to other RPCs as well, mostly so that + // we'll reset the thread state after each RPC, not just while waiting + // for the INIT_DONE event. + // + if (bSlowGspRpc) + { + NvU32 timeoutResult; + + // On slow Apollo emulators, GSP-RM init could take more than an hour + NV_ASSERT(portSafeMulU32(GSP_SCALE_TIMEOUT_EMU_SIM, 1500000, &timeoutResult)); + timeoutUs = timeoutResult; + } + + NV_ASSERT(rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + gpuSetTimeout(pGpu, timeoutUs, &timeout, 0); + + for (;;) + { + nvStatus = _kgspRpcDrainEvents(pGpu, pKernelGsp, expectedFunc); + + switch (nvStatus) { + case NV_WARN_MORE_PROCESSING_REQUIRED: + return NV_OK; + case NV_OK: + // Check timeout and continue outer loop. + break; + default: + return nvStatus; + } + + osSpinLoop(); + + nvStatus = gpuCheckTimeout(pGpu, &timeout); + if (nvStatus == NV_ERR_TIMEOUT) + { + _kgspLogXid119(pGpu, pRpc, expectedFunc); + return nvStatus; + } + } + + if (bSlowGspRpc) + { + // Avoid cumulative timeout due to slow RPC + threadStateResetTimeout(pGpu); + } + + return nvStatus; +} + +/*! + * Initialize stripped down version of RPC infra init for GSP clients. + */ +static NV_STATUS +_kgspInitRpcInfrastructure +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NV_STATUS nvStatus = NV_OK; + + pKernelGsp->pRpc = initRpcObject(pGpu); + if (pKernelGsp->pRpc == NULL) + { + NV_PRINTF(LEVEL_ERROR, "initRpcObject failed\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + OBJRPC *pRpc = pKernelGsp->pRpc; + + pRpc->pMessageQueueInfo = NULL; + + nvStatus = GspMsgQueueInit(pGpu, &pRpc->pMessageQueueInfo); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "GspMsgQueueInit failed\n"); + _kgspFreeRpcInfrastructure(pGpu, pKernelGsp); + return nvStatus; + } + + pRpc->messageQueuePhysMem = pRpc->pMessageQueueInfo->sharedMemPA; + pRpc->message_buffer = (NvU32 *)pRpc->pMessageQueueInfo->pRpcMsgBuf; + pRpc->maxRpcSize = GSP_MSG_QUEUE_RPC_SIZE_MAX; + pRpc->init_msg_buf = (NvU32 *)pRpc->pMessageQueueInfo->pInitMsgBuf; + pRpc->init_msg_buf_pa = pRpc->pMessageQueueInfo->initMsgBufPA; + + portMemSet(&pKernelGsp->gspStaticInfo, 0, + sizeof(pKernelGsp->gspStaticInfo)); + + rpcSendMessage_FNPTR(pKernelGsp->pRpc) = _kgspRpcSendMessage; + rpcRecvPoll_FNPTR(pKernelGsp->pRpc) = _kgspRpcRecvPoll; + + return NV_OK; +} + +static void +_kgspFreeRpcInfrastructure +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + if (pKernelGsp->pRpc != NULL) + { + GspMsgQueueCleanup(&pKernelGsp->pRpc->pMessageQueueInfo); + rpcDestroy(pGpu, pKernelGsp->pRpc); + portMemFree(pKernelGsp->pRpc); + pKernelGsp->pRpc = NULL; + } +} + +/*! + * Free LIBOS task logging structures + */ +static void +_kgspFreeLibosLoggingStructures +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NvU8 idx; + + _kgspStopLogPolling(pGpu, pKernelGsp); + + // Make sure there is no lingering debug output. + kgspDumpGspLogs(pGpu, pKernelGsp, NV_FALSE); + + libosLogDestroy(&pKernelGsp->logDecode); + + for (idx = 0; idx < LOGIDX_SIZE; idx++) + { + RM_LIBOS_LOG_MEM *pLog = &pKernelGsp->rmLibosLogMem[idx]; + + // release log memory for each task. + if (pLog->pTaskLogBuffer != NULL) + { + memdescUnmap(pLog->pTaskLogDescriptor, + NV_TRUE, osGetCurrentProcess(), + (void *)pLog->pTaskLogBuffer, + pLog->pTaskLogMappingPriv); + pLog->pTaskLogBuffer = NULL; + pLog->pTaskLogMappingPriv = NULL; + } + + if (pLog->pTaskLogDescriptor != NULL) + { + memdescFree(pLog->pTaskLogDescriptor); + memdescDestroy(pLog->pTaskLogDescriptor); + pLog->pTaskLogDescriptor = NULL; + } + } + + portMemFree(pKernelGsp->pLogElf); + pKernelGsp->pLogElf = NULL; +} + +/*! + * Convert init arg name to 64bit id value. + * + * @param[in] name String representing name of init arg + */ +static NvU64 +_kgspGenerateInitArgId(const char *name) +{ + NvU64 id = 0; + NvU8 c; + NvU32 i; + + // Convert at most 8 characters from name into id. + for (i = 0; i < (sizeof(NvU64) / sizeof(NvU8)); ++i) + { + c = (NvU8)*name++; + if (c == '\0') + { + break; + } + id = (id << 8) | c; + } + + return id; +} + +/*! + * Initialize LIBOS task logging structures + */ +static NV_STATUS +_kgspInitLibosLoggingStructures +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + GSP_FIRMWARE *pGspFw +) +{ + static const struct + { + const char *szMemoryId; + const char *szPrefix; + NvU32 size; + } logInitValues[] = + { + {"LOGINIT", "INIT", 0x10000}, // 64KB for stack traces +#if defined(DEVELOP) || defined(DEBUG) + {"LOGRM", "RM", 0x40000} // 256KB RM debug log on develop/debug builds +#else + {"LOGRM", "RM", 0x10000} // 64KB RM debug log on release builds +#endif + }; + ct_assert(NV_ARRAY_ELEMENTS(logInitValues) <= LIBOS_LOG_MAX_LOGS); + ct_assert(NV_ARRAY_ELEMENTS(logInitValues) == LOGIDX_SIZE); + + NV_STATUS nvStatus = NV_OK; + NvU8 idx; + + libosLogCreate(&pKernelGsp->logDecode); + + for (idx = 0; idx < LOGIDX_SIZE; idx++) + { + RM_LIBOS_LOG_MEM *pLog = &pKernelGsp->rmLibosLogMem[idx]; + NvP64 pVa = NvP64_NULL; + NvP64 pPriv = NvP64_NULL; + + // + // Setup logging memory for each task. + // Use MEMDESC_FLAGS_CPU_ONLY -- to early to call memdescMapIommu. + // + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescCreate(&pLog->pTaskLogDescriptor, + pGpu, + logInitValues[idx].size, + RM_PAGE_SIZE, + NV_TRUE, ADDR_SYSMEM, NV_MEMORY_CACHED, + MEMDESC_FLAGS_NONE), + error_cleanup); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescAlloc(pLog->pTaskLogDescriptor), + error_cleanup); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescMap(pLog->pTaskLogDescriptor, 0, + memdescGetSize(pLog->pTaskLogDescriptor), + NV_TRUE, NV_PROTECT_READ_WRITE, + &pVa, &pPriv), + error_cleanup); + + pLog->pTaskLogBuffer = pVa; + pLog->pTaskLogMappingPriv = pPriv; + portMemSet(pLog->pTaskLogBuffer, 0, memdescGetSize(pLog->pTaskLogDescriptor)); + + // Pass the PTE table for the log buffer in the log buffer, after the put pointer. + memdescGetPhysAddrs(pLog->pTaskLogDescriptor, + AT_GPU, + 0, + RM_PAGE_SIZE, + NV_CEIL(memdescGetSize(pLog->pTaskLogDescriptor), RM_PAGE_SIZE), + &pLog->pTaskLogBuffer[1]); + + pLog->id8 = _kgspGenerateInitArgId(logInitValues[idx].szMemoryId); + + libosLogAddLog(&pKernelGsp->logDecode, + pLog->pTaskLogBuffer, + memdescGetSize(pLog->pTaskLogDescriptor), + pGpu->gpuInstance, + logInitValues[idx].szPrefix); + } + + // Setup symbol decoder + if (pGspFw->pLogElf) + { + pKernelGsp->pLogElf = portMemAllocNonPaged(pGspFw->logElfSize); + if (pKernelGsp->pLogElf == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate memory for log elf"); + nvStatus = NV_ERR_NO_MEMORY; + goto error_cleanup; + } + portMemCopy(pKernelGsp->pLogElf, pGspFw->logElfSize, pGspFw->pLogElf, pGspFw->logElfSize); + + if (pKernelGsp->pLogElf) + libosLogInit(&pKernelGsp->logDecode, pKernelGsp->pLogElf); + } + +error_cleanup: + if (nvStatus != NV_OK) + _kgspFreeLibosLoggingStructures(pGpu, pKernelGsp); + + return nvStatus; +} + +static NV_STATUS +_kgspAllocSimAccessBuffer(OBJGPU *pGpu, KernelGsp *pKernelGsp) +{ + NvP64 pVa = NvP64_NULL; + NvP64 pPriv = NvP64_NULL; + NV_STATUS nvStatus; + + if (!IS_SIMULATION(pGpu)) + { + pKernelGsp->pMemDesc_simAccessBuf = NULL; + pKernelGsp->pSimAccessBuf = NULL; + pKernelGsp->pSimAccessBufPriv = NULL; + return NV_ERR_NOT_SUPPORTED; + } + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescCreate(&pKernelGsp->pMemDesc_simAccessBuf, + pGpu, + sizeof(SimAccessBuffer), + RM_PAGE_SIZE, + NV_TRUE, ADDR_SYSMEM, NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_NONE), + error_cleanup); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescAlloc(pKernelGsp->pMemDesc_simAccessBuf), + error_cleanup); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescMap(pKernelGsp->pMemDesc_simAccessBuf, 0, + memdescGetSize(pKernelGsp->pMemDesc_simAccessBuf), + NV_TRUE, NV_PROTECT_READ_WRITE, + &pVa, &pPriv), + error_cleanup); + + pKernelGsp->pSimAccessBuf = (SimAccessBuffer*)pVa; + pKernelGsp->pSimAccessBufPriv = pPriv; + + portMemSet(pKernelGsp->pSimAccessBuf, 0, memdescGetSize(pKernelGsp->pMemDesc_simAccessBuf)); + +error_cleanup: + if (nvStatus != NV_OK) + _kgspFreeSimAccessBuffer(pGpu, pKernelGsp); + + return nvStatus; +} + +static void +_kgspFreeSimAccessBuffer(OBJGPU *pGpu, KernelGsp *pKernelGsp) +{ + if (!IS_SIMULATION(pGpu)) + { + return; + } + + if (pKernelGsp->pMemDesc_simAccessBuf != NULL) + { + memdescFree(pKernelGsp->pMemDesc_simAccessBuf); + memdescDestroy(pKernelGsp->pMemDesc_simAccessBuf); + } + + pKernelGsp->pMemDesc_simAccessBuf = NULL; + pKernelGsp->pSimAccessBuf = NULL; + pKernelGsp->pSimAccessBufPriv = NULL; +} + +/*! + * Create KernelGsp object and initialize RPC infrastructure + */ +NV_STATUS +kgspConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + ENGDESCRIPTOR engDesc +) +{ + NV_STATUS nvStatus = NV_OK; + + if (!IS_GSP_CLIENT(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + kgspConfigureFalcon_HAL(pGpu, pKernelGsp); + + nvStatus = _kgspInitRpcInfrastructure(pGpu, pKernelGsp); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "init RPC infrastructure failed\n"); + return nvStatus; + } + + nvStatus = kgspAllocBootArgs_HAL(pGpu, pKernelGsp); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "boot arg alloc failed: 0x%x\n", nvStatus); + _kgspFreeRpcInfrastructure(pGpu, pKernelGsp); + return nvStatus; + } + + if (IS_SIMULATION(pGpu)) + { + nvStatus = _kgspAllocSimAccessBuffer(pGpu, pKernelGsp); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "sim access buffer alloc failed: 0x%x\n", nvStatus); + kgspFreeBootArgs_HAL(pGpu, pKernelGsp); + _kgspFreeRpcInfrastructure(pGpu, pKernelGsp); + return nvStatus; + } + } + + return NV_OK; +} + +/*! + * Initialize GSP-RM + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelGsp KernelGsp object pointer + * @param[in] pGspFw GSP firmware structure pointer + * + * @return NV_OK if GSP fw RM offload successfully initialized. + * Appropriate NV_ERR_xxx value otherwise. + */ +NV_STATUS +kgspInitRm_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + GSP_FIRMWARE *pGspFw +) +{ + NV_STATUS status = NV_OK; + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + GPU_MASK gpusLockedMask = 0; + + if (!IS_GSP_CLIENT(pGpu)) + return NV_OK; + + if ((pGspFw == NULL) || (pGspFw->pBuf == NULL) || (pGspFw->size == 0)) + { + NV_PRINTF(LEVEL_ERROR, "need firmware to initialize GSP\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // Need to hold the GPU instance lock in order to write to the RPC queue + NV_ASSERT_OK_OR_GOTO(status, + rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT, &gpusLockedMask), + done); + + // Set the GPU time to the wall-clock time before loading GSP ucode. + tmrSetCurrentTime_HAL(pGpu, pTmr); + + /* + * For GSP-RM boot, we must trigger FRTS (if it exists for the chip) + * before loading GSP-RM so that FRTS data and GSP-RM code/data/heap can coexist + * in WPR2. FRTS is triggered by running a VBIOS-provided ucode called FWSEC. + * + * Here, we extract a VBIOS image from ROM, and parse it for FWSEC. + */ + if (kgspGetFrtsSize_HAL(pGpu, pKernelGsp) > 0) + { + if (pKernelGsp->pFwsecUcode == NULL) + { + KernelGspVbiosImg *pVbiosImg = NULL; + + status = kgspExtractVbiosFromRom_HAL(pGpu, pKernelGsp, &pVbiosImg); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to extract VBIOS image from ROM: 0x%x\n", + status); + goto done; + } + + status = kgspParseFwsecUcodeFromVbiosImg(pGpu, pKernelGsp, pVbiosImg, + &pKernelGsp->pFwsecUcode); + kgspFreeVbiosImg(pVbiosImg); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to parse FWSEC ucode from VBIOS image: 0x%x\n", + status); + goto done; + } + } + } + + /* + * We use a set of Booter ucodes to boot GSP-RM as well as manage its lifecycle. + * + * Booter Load loads, verifies, and boots GSP-RM in WPR2. + * Booter Reload resumes GSP-RM after it has suspended for running GSP sequencer. + * Booter Unload tears down WPR2 for driver unload. + * + * Here we prepare the Booter ucode images in SYSMEM so they may be loaded onto + * SEC2 (Load / Unload) and NVDEC0 (Unload). + * + * GSPRM-TODO: remove Reload (and Reload comment) once reload is handled by SEC2-RTOS + */ + { + if (pKernelGsp->pBooterLoadUcode == NULL) + { + status = kgspAllocateBooterLoadUcodeImage(pGpu, pKernelGsp, + &pKernelGsp->pBooterLoadUcode); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate Booter Load ucode: 0x%x\n", status); + goto done; + } + } + + if (pKernelGsp->pBooterReloadUcode == NULL) + { + KernelNvdec *pKernelNvdec = GPU_GET_KERNEL_NVDEC(pGpu); + if (pKernelNvdec == NULL) + { + NV_PRINTF(LEVEL_ERROR, "missing NVDEC0 engine, cannot initialize GSP-RM\n"); + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + status = kgspAllocateBooterReloadUcodeImage(pGpu, pKernelGsp, + &pKernelGsp->pBooterReloadUcode); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate Booter Reload ucode: 0x%x\n", status); + goto done; + } + } + + if (pKernelGsp->pBooterUnloadUcode == NULL) + { + status = kgspAllocateBooterUnloadUcodeImage(pGpu, pKernelGsp, + &pKernelGsp->pBooterUnloadUcode); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate Booter Unload ucode: 0x%x\n", status); + goto done; + } + } + + // execute Booter Unload if needed to reset from unclean shutdown + kgspExecuteBooterUnloadIfNeeded_HAL(pGpu, pKernelGsp); + } + + // Prepare boot binary image. + status = kgspPrepareBootBinaryImage(pGpu, pKernelGsp); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error preparing boot binary image\n"); + goto done; + } + + // Prepare GSP-RM image. + status = _kgspPrepareGspRmBinaryImage(pGpu, pKernelGsp, pGspFw); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error preparing GSP-RM image\n"); + goto done; + } + + status = kgspCalculateFbLayout(pGpu, pKernelGsp, pGspFw); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error calculating FB layout\n"); + goto done; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, kgspInitLogging(pGpu, pKernelGsp, pGspFw), done); + + // Wait for GFW_BOOT OK status + kgspWaitForGfwBootOk_HAL(pGpu, pKernelGsp); + + // bring up ucode with RM offload task + status = kgspBootstrapRiscvOSEarly_HAL(pGpu, pKernelGsp, pGspFw); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "cannot bootstrap riscv/gsp: 0x%x\n", status); + kgspHealthCheck_HAL(pGpu, pKernelGsp); + goto done; + } + + // at this point we should be able to exchange RPCs with RM offload task + NV_RM_RPC_SET_GUEST_SYSTEM_INFO(pGpu, status); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "SET_GUEST_SYSTEM_INFO failed: 0x%x\n", status); + goto done; + } + + NV_RM_RPC_GET_GSP_STATIC_INFO(pGpu, status); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "GET_GSP_STATIC_INFO failed: 0x%x\n", status); + goto done; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, kgspStartLogPolling(pGpu, pKernelGsp), done); + +done: + if (gpusLockedMask != 0) + { + rmGpuGroupLockRelease(gpusLockedMask, GPUS_LOCK_FLAGS_NONE); + } + return status; +} + +NV_STATUS +kgspInitLogging_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + GSP_FIRMWARE *pGspFw +) +{ + NV_STATUS nvStatus; + + nvStatus = _kgspInitLibosLoggingStructures(pGpu, pKernelGsp, pGspFw); + if (nvStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "init LIBOS logging structures failed: 0x%x\n", nvStatus); + return nvStatus; + } + + return nvStatus; +} + +/*! + * Unload GSP-RM + */ +NV_STATUS +kgspUnloadRm_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NV_STATUS rpcStatus = NV_OK; + NV_STATUS status; + + NV_PRINTF(LEVEL_INFO, "unloading GSP-RM\n"); + NV_RM_RPC_UNLOADING_GUEST_DRIVER(pGpu, rpcStatus, NV_FALSE, NV_FALSE, 0); + + { + // After instructing GSP-RM to unload itself, run Booter Unload to teardown WPR2 + status = kgspExecuteBooterUnloadIfNeeded_HAL(pGpu, pKernelGsp); + } + + if (rpcStatus != NV_OK) + { + return rpcStatus; + } + + return status; +} + +/*! + * Free RPC infrastructure and KernelGsp object + */ +void +kgspDestruct_IMPL +( + KernelGsp *pKernelGsp +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelGsp); + + if (!IS_GSP_CLIENT(pGpu)) + return; + + kgspFreeFlcnUcode(pKernelGsp->pFwsecUcode); + pKernelGsp->pFwsecUcode = NULL; + + kgspFreeFlcnUcode(pKernelGsp->pBooterLoadUcode); + pKernelGsp->pBooterLoadUcode = NULL; + + kgspFreeFlcnUcode(pKernelGsp->pBooterReloadUcode); + pKernelGsp->pBooterReloadUcode = NULL; + + kgspFreeFlcnUcode(pKernelGsp->pBooterUnloadUcode); + pKernelGsp->pBooterUnloadUcode = NULL; + + kgspFreeBootArgs_HAL(pGpu, pKernelGsp); + _kgspFreeLibosLoggingStructures(pGpu, pKernelGsp); + _kgspFreeRpcInfrastructure(pGpu, pKernelGsp); + _kgspFreeBootBinaryImage(pGpu, pKernelGsp); + _kgspFreeSimAccessBuffer(pGpu, pKernelGsp); +} + +/*! + * Dump logs coming from GSP-RM + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelGsp KernelGsp pointer + * @param[in] bSyncNvLog NV_TRUE: Copy a snapshot of the libos logs + * into the nvLog wrap buffers. + */ +void +kgspDumpGspLogs_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + NvBool bSyncNvLog +) +{ + if (pKernelGsp->pLogElf || bSyncNvLog) + libosExtractLogs(&pKernelGsp->logDecode, bSyncNvLog); +} + +/*! + * Populate GSP-RM init arguments. + */ +void +kgspPopulateGspRmInitArgs_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + GSP_SR_INIT_ARGUMENTS *pGspInitArgs +) +{ + GSP_ARGUMENTS_CACHED *pGspArgs = pKernelGsp->pGspArgumentsCached; + MESSAGE_QUEUE_INIT_ARGUMENTS *pMQInitArgs = &pGspArgs->messageQueueInitArguments; + MESSAGE_QUEUE_INFO *pMQI = pKernelGsp->pRpc->pMessageQueueInfo; + GSP_SR_INIT_ARGUMENTS *pSrInitArgs = &pGspArgs->srInitArguments; + + // Setup the message queue arguments + pMQInitArgs->sharedMemPhysAddr = pKernelGsp->pRpc->messageQueuePhysMem; + pMQInitArgs->pageTableEntryCount = pMQI->pageTableEntryCount; + pMQInitArgs->cmdQueueOffset = pMQI->pageTableSize; + pMQInitArgs->statQueueOffset = pMQInitArgs->cmdQueueOffset + pMQI->commandQueueSize; + + if (pGspInitArgs == NULL) + { + pSrInitArgs->bInPMTransition = NV_FALSE; + pSrInitArgs->oldLevel = 0; + pSrInitArgs->flags = 0; + } + else + { + pSrInitArgs->bInPMTransition = NV_TRUE; + pSrInitArgs->oldLevel = pGspInitArgs->oldLevel; + pSrInitArgs->flags = pGspInitArgs->flags; + } +} + +/*! + * Prepare boot binary image for GSP-RM boot. + * + * @return NV_OK if boot binary image prepared successfully. + * Appropriate NV_ERR_xxx value otherwise. + */ +NV_STATUS +kgspPrepareBootBinaryImage_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NV_STATUS status; + BINDATA_STORAGE *pBinStorageImage; + BINDATA_STORAGE *pBinStorageDesc; + NvU32 bufSize; + NvU32 bufSizeAligned; + NvU8 *pDesc = NULL; + NvP64 pVa = NvP64_NULL; + NvP64 pPriv = NvP64_NULL; + + NV_ASSERT_OR_RETURN(pKernelGsp->pGspRmBootUcodeImage == NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelGsp->pGspRmBootUcodeDesc == NULL, NV_ERR_INVALID_STATE); + + // get the bindata storage for the image/descriptor + kgspGetGspRmBootUcodeStorage_HAL(pGpu, pKernelGsp, &pBinStorageImage, &pBinStorageDesc); + + // copy the image to sysmem + bufSize = bindataGetBufferSize(pBinStorageImage); + bufSizeAligned = NV_ALIGN_UP(bufSize, 0x1000); + + NV_ASSERT_OK_OR_GOTO(status, + memdescCreate(&pKernelGsp->pGspRmBootUcodeMemdesc, + pGpu, + bufSizeAligned, + RM_PAGE_SIZE, + NV_TRUE, ADDR_SYSMEM, NV_MEMORY_CACHED, + MEMDESC_FLAGS_NONE), + fail); + + NV_ASSERT_OK_OR_GOTO(status, + memdescAlloc(pKernelGsp->pGspRmBootUcodeMemdesc), + fail); + + NV_ASSERT_OK_OR_GOTO(status, + memdescMap(pKernelGsp->pGspRmBootUcodeMemdesc, 0, + memdescGetSize(pKernelGsp->pGspRmBootUcodeMemdesc), + NV_TRUE, NV_PROTECT_READ_WRITE, + &pVa, &pPriv), + fail); + + pKernelGsp->gspRmBootUcodeSize = bufSize; + pKernelGsp->pGspRmBootUcodeImage = (NvU8 *)NvP64_VALUE(pVa);; + pKernelGsp->pGspRmBootUcodeMemdescPriv = pPriv; + + NV_ASSERT_OK_OR_GOTO(status, + bindataWriteToBuffer(pBinStorageImage, + pKernelGsp->pGspRmBootUcodeImage, + bufSize), + fail); + + // copy the image descriptor + bufSize = bindataGetBufferSize(pBinStorageDesc); + pDesc = portMemAllocNonPaged(bufSize); + if (pDesc == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate ucode desc buffer\n"); + status = NV_ERR_NO_MEMORY; + goto fail; + } + + pKernelGsp->pGspRmBootUcodeDesc = (RM_RISCV_UCODE_DESC*)pDesc; + + NV_ASSERT_OK_OR_GOTO(status, + bindataWriteToBuffer(pBinStorageDesc, pDesc, bufSize), + fail); + + return status; + +fail: + _kgspFreeBootBinaryImage(pGpu, pKernelGsp); + return status; +} + +static void +_kgspFreeBootBinaryImage +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + portMemFree(pKernelGsp->pGspRmBootUcodeDesc); + pKernelGsp->pGspRmBootUcodeDesc = NULL; + + if (pKernelGsp->pGspRmBootUcodeImage != NULL) + { + memdescUnmap(pKernelGsp->pGspRmBootUcodeMemdesc, + NV_TRUE, osGetCurrentProcess(), + (void *)pKernelGsp->pGspRmBootUcodeImage, + pKernelGsp->pGspRmBootUcodeMemdescPriv); + pKernelGsp->pGspRmBootUcodeImage = NULL; + pKernelGsp->pGspRmBootUcodeMemdescPriv = NULL; + } + if (pKernelGsp->pGspRmBootUcodeMemdesc != NULL) + { + memdescFree(pKernelGsp->pGspRmBootUcodeMemdesc); + memdescDestroy(pKernelGsp->pGspRmBootUcodeMemdesc); + pKernelGsp->pGspRmBootUcodeMemdesc = NULL; + } + + pKernelGsp->gspRmBootUcodeSize = 0; +} + +static NV_STATUS +_kgspPrepareGspRmBinaryImage +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + GSP_FIRMWARE *pGspFw +) +{ + NV_STATUS status = NV_OK; + + // Get signature from gsp.bin and zero out the signature sections + status = + _kgspGetAndClearSignatureFromBinary(pGpu, pKernelGsp, + pGspFw, &pKernelGsp->pSignatureMemdesc); + + if ((status != NV_OK) && (status != NV_ERR_NOT_SUPPORTED)) + { + return status; + } + + NV_ASSERT_OK_OR_RETURN( + _kgspCreateRadix3(pGpu, &pKernelGsp->pGspUCodeRadix3Descriptor, + NULL, pGspFw->pBuf, pGspFw->size)); + + return NV_OK; +} + +static NV_STATUS +_kgspCreateRadix3 +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR **ppMemdescRadix3, + MEMORY_DESCRIPTOR *pMemdescData, + const void *pData, + NvU64 size +) +{ + const NvU64 entriesLog2 = LIBOS_MEMORY_REGION_RADIX_PAGE_LOG2 - 3; + NvU8 *pRadix3Buf; + NvP64 pVaKernel; + NvP64 pPrivKernel; + NvU64 ptSize; + NvU64 allocSize; + NvU64 nPages = 0; + NvU64 dataOffset = 0; + NvU32 i; + NV_STATUS status = NV_OK; + + // radix3 working array. + struct + { + NvU64 nPages; + NvU64 offset; + } radix3[4]; + + NV_ASSERT_OR_RETURN(ppMemdescRadix3 != NULL, NV_ERR_INVALID_PARAMETER); + NV_ASSERT_OR_ELSE_STR((pMemdescData != NULL) != (pData != NULL), + "Specify pMemdescData or pData, but not both", + return NV_ERR_INVALID_PARAMETER); + + // If the size is not specified, get it from the memory descriptor. + if ((size == 0) && (pMemdescData != NULL)) + size = memdescGetSize(pMemdescData); + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_OUT_OF_RANGE); + + // Clear working structure. + portMemSet(radix3, 0, sizeof radix3); + + // Populate npages, high to low. + i = NV_ARRAY_ELEMENTS(radix3) - 1; + radix3[i].nPages = (size + LIBOS_MEMORY_REGION_RADIX_PAGE_SIZE - 1) >> + LIBOS_MEMORY_REGION_RADIX_PAGE_LOG2; + for (; i > 0; i--) + radix3[i - 1].nPages = ((radix3[i].nPages - 1) >> entriesLog2) + 1; + + // Populate offset, low to high. + for (i = 1; i < NV_ARRAY_ELEMENTS(radix3); i++) + { + nPages += radix3[i - 1].nPages; + radix3[i].offset = nPages << LIBOS_MEMORY_REGION_RADIX_PAGE_LOG2; + } + + NV_ASSERT_OR_RETURN(radix3[0].nPages == 1, NV_ERR_OUT_OF_RANGE); + + // Allocate space for PTEs and PDEs. + ptSize = nPages << LIBOS_MEMORY_REGION_RADIX_PAGE_LOG2; + allocSize = ptSize; + + if (pData != NULL) + { + // We don't have a separate descriptor for the data. We need PTEs, + // so include space for data in the new descriptor. + allocSize += radix3[3].nPages << LIBOS_MEMORY_REGION_RADIX_PAGE_LOG2; + } + + NV_ASSERT_OK_OR_GOTO(status, + memdescCreate(ppMemdescRadix3, pGpu, allocSize, + LIBOS_MEMORY_REGION_RADIX_PAGE_SIZE, + NV_MEMORY_NONCONTIGUOUS, + ADDR_SYSMEM, + NV_MEMORY_CACHED, + MEMDESC_FLAGS_KERNEL_MODE), + done); + + NV_ASSERT_OK_OR_GOTO(status, memdescAlloc(*ppMemdescRadix3), error_ret); + + // Create kernel mapping. + NV_ASSERT_OK_OR_GOTO(status, + memdescMap(*ppMemdescRadix3, 0, allocSize, NV_TRUE, NV_PROTECT_WRITEABLE, + &pVaKernel, &pPrivKernel), + error_ret); + + if (pVaKernel == NvP64_NULL) + { + NV_PRINTF(LEVEL_ERROR, "VA error for radix3 shared buffer\n"); + status = NV_ERR_NO_MEMORY; + goto error_ret; + } + + pRadix3Buf = KERNEL_POINTER_FROM_NvP64(NvU8 *, pVaKernel); + + // Zap out page table. + portMemSet(pRadix3Buf, 0, ptSize); + + // Fill in PDEs. + for (i = 0; i < NV_ARRAY_ELEMENTS(radix3) - 2; i++) + { + memdescGetPhysAddrs(*ppMemdescRadix3, + AT_GPU, // addressTranslation + radix3[i + 1].offset, // offset + RM_PAGE_SIZE, // stride + radix3[i + 1].nPages, // count + (RmPhysAddr *)(pRadix3Buf + radix3[i].offset)); // physical address table + } + + if (pData != NULL) + { + dataOffset = radix3[3].offset; + + // Optionally copy data into the radix3 buffer. + portMemCopy(pRadix3Buf + dataOffset, size, pData, size); + + // If we only have part of the last page, clear the rest. + NvU32 clearSize = allocSize - dataOffset - size; + if (clearSize != 0) + portMemSet(pRadix3Buf + dataOffset + size, 0, clearSize); + + pMemdescData = *ppMemdescRadix3; + } + + memdescGetPhysAddrs(*ppMemdescRadix3, + AT_GPU, // addressTranslation + dataOffset, // offset + RM_PAGE_SIZE, // stride + radix3[3].nPages, // count + (RmPhysAddr *)(pRadix3Buf + radix3[2].offset)); // physical address table + + // + // No reason to keep this memory mapped on the CPU side. Only GSP will + // access it after this point. + // + memdescUnmap(*ppMemdescRadix3, NV_TRUE, osGetCurrentProcess(), + pVaKernel, pPrivKernel); +done: + return status; + +error_ret: + if (*ppMemdescRadix3 != NULL) + { + memdescFree(*ppMemdescRadix3); + memdescDestroy(*ppMemdescRadix3); + *ppMemdescRadix3 = NULL; + } + + return status; +} + +/*! + * Process gsp.bin elf buffer and extract the corresponding signature. + * + * All signatures will also be cleared (set to 0) because the binary was signed + * before the signatures were inserted. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelGsp KernelGsp object pointer + * @param[inout] pGspFw GSP firmware structure pointer, sections + * whose names start with the signature + * section name prefix will be cleared + * @param[out] ppSignatureMemdesc Memdesc to store the signature. If + * return code is NV_OK, the memdesc must + * be freed by caller + */ +static NV_STATUS +_kgspGetAndClearSignatureFromBinary +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + GSP_FIRMWARE *pGspFw, + MEMORY_DESCRIPTOR **ppSignatureMemdesc +) +{ + NV_STATUS status = NV_OK; + NvU8 *pGspBuf = (NvU8*) pGspFw->pBuf; + const elf64_header *pElfHeader; + const elf64_shdr *pElfSectionHeader; + NvU64 elfSectionHeaderTableLength; + NvU64 elfSectionHeaderMaxIdx; + NvU64 elfSectionNamesTableOffset; + NvU64 elfSectionNamesTableSize; + NvU64 elfSectionNamesTableMaxIdx; + NvU64 elfSectionMaxIdx; + static const NvU32 elfMagicNumber = 0x464C457F; + static const NvU8 elfClass64 = 0x2; + static const NvU8 elfLittleEndian = 0x1; + const char *pSignatureSectionName = kgspGetSignatureSectionName_HAL(pGpu, pKernelGsp); + NvLength signatureSectionNameLength; + NvLength signaturePrefixLength; + NvU8 *pSignatureVa = NULL; + NvS16 idx; + NvBool signatureSectionFound = NV_FALSE; + + NV_ASSERT_OR_RETURN(ppSignatureMemdesc != NULL, NV_ERR_INVALID_PARAMETER); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pSignatureSectionName != NULL, NV_ERR_NOT_SUPPORTED); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pGspFw->size >= sizeof(elf64_header), NV_ERR_INVALID_DATA); + + signatureSectionNameLength = portStringLength(pSignatureSectionName); + signaturePrefixLength = portStringLength(SIGNATURE_SECTION_NAME_PREFIX); + + *ppSignatureMemdesc = NULL; + + pElfHeader = (const elf64_header*) pGspBuf; + + // Check for the elf identifier at the beginning of the file + NV_CHECK_OR_RETURN(LEVEL_ERROR, *(NvU32*)&pElfHeader->ident == elfMagicNumber, NV_ERR_INVALID_DATA); + // Make sure the data is formatted as little endian + NV_CHECK_OR_RETURN(LEVEL_ERROR, pElfHeader->ident[5] == elfLittleEndian, NV_ERR_INVALID_DATA); + // Check the class type, only ELFCLASS64 is supported + NV_CHECK_OR_RETURN(LEVEL_ERROR, pElfHeader->ident[4] == elfClass64, NV_ERR_INVALID_DATA); + + // Make sure that the elf section header table is valid + NV_CHECK_OR_RETURN(LEVEL_ERROR, pElfHeader->shentsize == sizeof(elf64_shdr), NV_ERR_INVALID_DATA); + NV_CHECK_OR_RETURN(LEVEL_ERROR, portSafeMulU64(pElfHeader->shentsize, pElfHeader->shnum, &elfSectionHeaderTableLength), NV_ERR_INVALID_DATA); + NV_CHECK_OR_RETURN(LEVEL_ERROR, portSafeAddU64(pElfHeader->shoff, elfSectionHeaderTableLength - 1, &elfSectionHeaderMaxIdx), NV_ERR_INVALID_DATA); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pGspFw->size >= elfSectionHeaderMaxIdx, NV_ERR_INVALID_DATA); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pElfHeader->shstrndx <= pElfHeader->shnum, NV_ERR_INVALID_DATA); + + // Get the offset and size of the table that holds the section names and make sure they are valid + pElfSectionHeader = (const elf64_shdr*) &pGspBuf[pElfHeader->shoff + (pElfHeader->shstrndx * pElfHeader->shentsize)]; + elfSectionNamesTableOffset = pElfSectionHeader->offset; + elfSectionNamesTableSize = pElfSectionHeader->size; + NV_CHECK_OR_RETURN(LEVEL_ERROR, portSafeAddU64(elfSectionNamesTableOffset, elfSectionNamesTableSize - 1, &elfSectionNamesTableMaxIdx), NV_ERR_INVALID_DATA); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pGspFw->size >= elfSectionNamesTableMaxIdx, NV_ERR_INVALID_DATA); + + // Iterate through all of the section headers to find the signatures + pElfSectionHeader = (const elf64_shdr*) &pGspBuf[elfSectionHeaderMaxIdx + 1 - sizeof(*pElfSectionHeader)]; + for (idx = pElfHeader->shnum - 1; idx >= 0; idx--, pElfSectionHeader--) + { + // Make sure the header name index fits within the section names table + NV_CHECK_OR_GOTO(LEVEL_ERROR, elfSectionNamesTableSize - 1 >= pElfSectionHeader->name, fail_invalid_data); + + // Check whether the section name matches the signature prefix. All signature binaries need to be + // cleared from the elf because the gsp binary was signed with them empty + if (portStringCompare((const char *)&pGspBuf[elfSectionNamesTableOffset + pElfSectionHeader->name], + SIGNATURE_SECTION_NAME_PREFIX, + signaturePrefixLength) == 0) + { + signatureSectionFound = NV_TRUE; + + // Make sure the elf section size and offset are valid + NV_CHECK_OR_GOTO(LEVEL_ERROR, portSafeAddU64(pElfSectionHeader->offset, pElfSectionHeader->size - 1, &elfSectionMaxIdx), fail_invalid_data); + NV_CHECK_OR_GOTO(LEVEL_ERROR, pGspFw->size >= elfSectionMaxIdx, fail_invalid_data); + + // Check whether the section name matches the current chip signature + if (portStringCompare((const char *)&pGspBuf[elfSectionNamesTableOffset + pElfSectionHeader->name + signaturePrefixLength], + pSignatureSectionName + signaturePrefixLength, + signatureSectionNameLength - signaturePrefixLength + 1) == 0) + { + // NOTE: align to 256 because that's the alignment needed for Booter DMA + NV_ASSERT_OK_OR_GOTO(status, + memdescCreate(ppSignatureMemdesc, pGpu, + NV_ALIGN_UP(pElfSectionHeader->size, 256), 256, + NV_TRUE, ADDR_SYSMEM, NV_MEMORY_CACHED, MEMDESC_FLAGS_NONE), + fail); + NV_ASSERT_OK_OR_GOTO(status, memdescAlloc(*ppSignatureMemdesc), fail); + pSignatureVa = memdescMapInternal(pGpu, *ppSignatureMemdesc, TRANSFER_FLAGS_NONE); + if (pSignatureVa == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto fail; + } + portMemCopy(pSignatureVa, memdescGetSize(*ppSignatureMemdesc), + &pGspBuf[pElfSectionHeader->offset], pElfSectionHeader->size); + memdescUnmapInternal(pGpu, *ppSignatureMemdesc, 0); + pSignatureVa = NULL; + } + + // Clear the signature binary + portMemSet(&pGspBuf[pElfSectionHeader->offset], 0, pElfSectionHeader->size); + } + // We assume that all signature sections are grouped together sequentially + else if (signatureSectionFound == NV_TRUE) + { + break; + } + } + + return status; + +fail_invalid_data: + status = NV_ERR_INVALID_DATA; +fail: + if (pSignatureVa != NULL) + memdescUnmapInternal(pGpu, *ppSignatureMemdesc, 0); + memdescFree(*ppSignatureMemdesc); + memdescDestroy(*ppSignatureMemdesc); + *ppSignatureMemdesc = NULL; + return status; +} + +/*! + * Setup libos init arguments. + */ +void +kgspSetupLibosInitArgs_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + LibosMemoryRegionInitArgument *pLibosInitArgs = pKernelGsp->pLibosInitArgumentsCached; + NvU8 idx; + + portMemSet(pLibosInitArgs, 0, LIBOS_INIT_ARGUMENTS_SIZE); + + // Add memory areas for logging each LIBOS task. + // @note LOGINIT must be first for early init logging to work. + // @note: These should be switched to radix regions to remove the need + // for large apertures in the RM task for logging. + for (idx = 0; idx < LOGIDX_SIZE; idx++) + { + pLibosInitArgs[idx].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + pLibosInitArgs[idx].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + pLibosInitArgs[idx].id8 = pKernelGsp->rmLibosLogMem[idx].id8; + pLibosInitArgs[idx].pa = pKernelGsp->rmLibosLogMem[idx].pTaskLogBuffer[1]; + pLibosInitArgs[idx].size = memdescGetSize(pKernelGsp->rmLibosLogMem[idx].pTaskLogDescriptor); + } + + // insert GSP-RM ELF args address; id must match libos-config.py entry + pLibosInitArgs[idx].kind = LIBOS_MEMORY_REGION_CONTIGUOUS; + pLibosInitArgs[idx].loc = LIBOS_MEMORY_REGION_LOC_SYSMEM; + pLibosInitArgs[idx].id8 = _kgspGenerateInitArgId("RMARGS"); + pLibosInitArgs[idx].pa = memdescGetPhysAddr(pKernelGsp->pGspArgumentsDescriptor, AT_GPU, 0); + pLibosInitArgs[idx].size = memdescGetSize(pKernelGsp->pGspArgumentsDescriptor); + + portAtomicMemoryFenceFull(); +} + +/*! + * Receive and process RPC event from GSP-RM. + * + * This function is called from interrupt bottom-half handler (DPC) and + * would race with normal RPC flow, _kgspRpcRecvPoll(). + * This race is currently avoided only because DPC is executed under + * gpus lock, so RPC and Bottom-half handler are mutually exclusive + * control flows. + */ +void +kgspRpcRecvEvents_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NV_ASSERT(rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + // + // We should never have an event with code NV_VGPU_MSG_FUNCTION_NUM_FUNCTIONS. + // If we do the assert will fail on NV_WARN_MORE_PROCESSING_REQUIRED, + // in addition to general error codes. + // + NV_ASSERT_OK(_kgspRpcDrainEvents(pGpu, pKernelGsp, NV_VGPU_MSG_FUNCTION_NUM_FUNCTIONS)); +} + +/*! + * Wait for GSP-RM initialization to complete. + */ +NV_STATUS +kgspWaitForRmInitDone_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NV_STATUS nvStatus = NV_OK; + + // + // Kernel RM can timeout when GSP-RM has an error condition. Give GSP-RM + // a chance to report the error before we pull the rug out from under it. + // + threadStateResetTimeout(pGpu); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + rpcRecvPoll(pGpu, pKernelGsp->pRpc, NV_VGPU_MSG_EVENT_GSP_INIT_DONE)); + + // Now check if RPC really succeeded + NV_ASSERT_OK_OR_RETURN(((rpc_message_header_v *)pKernelGsp->pRpc->message_buffer)->rpc_result); + if (nvStatus != NV_VGPU_MSG_RESULT_SUCCESS) + { + NV_ASSERT_OK_FAILED("nvStatus", nvStatus); + } + + return nvStatus; +} + +/*! + * Execute a sequencer buffer coming from GSP + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelGsp KernelGsp object pointer + * @param[in] pRunCpuSeqParams Sequence buffer RPC parameters + * + * @return NV_OK if the GSP sequencer buffer has been executed successfully + * NV_ERR_INVALID_STATE if the sequencer buffer is not allocated + * NV_ERR_INVALID_DATA is the sequencer buffer is malformed + */ +NV_STATUS +kgspExecuteSequencerBuffer_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + void *pRunCpuSeqParams +) +{ + rpc_run_cpu_sequencer_v17_00 *pParams = (rpc_run_cpu_sequencer_v17_00 *)pRunCpuSeqParams; + NvU32 *pCmd = pParams->commandBuffer; + NvU32 buffer_end = pParams->cmdIndex; + NvU32 current_cmd_index = 0; + NV_STATUS nvStatus = NV_OK; + NvU32 payloadSize; + + NV_ASSERT_OR_RETURN(IS_GSP_CLIENT(pGpu), NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN((pParams->bufferSizeDWord != 0), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(buffer_end < pParams->bufferSizeDWord, NV_ERR_INVALID_DATA); + + while (current_cmd_index < buffer_end) + { + NvU32 opCode = pCmd[current_cmd_index++]; + payloadSize = GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opCode); + + NV_ASSERT_OR_RETURN(current_cmd_index + payloadSize <= buffer_end, NV_ERR_INVALID_DATA); + + // + // Handling of sequencer commands is split between those commands + // that are common to all architectures (handled directly here) and + // those commands that are arch-specific and are handled via the + // kgspExecuteSequencerCommand_HAL() call below. + // + switch (opCode) + { + // 2 arguments + case GSP_SEQ_BUF_OPCODE_REG_WRITE: + { + GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite; + portMemCopy(®Write, sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE), &pCmd[current_cmd_index], sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE)); + + GPU_REG_WR32(pGpu, regWrite.addr, regWrite.val); + break; + } + + // 3 arguments + case GSP_SEQ_BUF_OPCODE_REG_MODIFY: + { + GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify; + NvU32 regVal; + + portMemCopy(®Modify, sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY), &pCmd[current_cmd_index], sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY)); + + regVal = GPU_REG_RD32(pGpu, regModify.addr); + regVal = regVal & ~regModify.mask; + regVal = regVal | regModify.val; + GPU_REG_WR32(pGpu, regModify.addr, regVal); + break; + } + + // 5 arguments + case GSP_SEQ_BUF_OPCODE_REG_POLL: + { + GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll; + NvU32 regval; + RMTIMEOUT timeout; + + portMemCopy(®Poll, sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL), &pCmd[current_cmd_index], sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL)); + + regval = GPU_REG_RD32(pGpu, regPoll.addr); + + gpuSetTimeout(pGpu, regPoll.timeout, &timeout, 0); + while ((regval & regPoll.mask) != regPoll.val) + { + nvStatus = gpuCheckTimeout(pGpu, &timeout); + if (nvStatus == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, "Timeout waiting for register to settle, value = 0x%x, err_code = 0x%x\n", + regval, regPoll.error); + DBG_BREAKPOINT(); + return nvStatus; + } + osSpinLoop(); + regval = GPU_REG_RD32(pGpu, regPoll.addr); + } + break; + } + + case GSP_SEQ_BUF_OPCODE_DELAY_US: + { + GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs; + portMemCopy(&delayUs, sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US), &pCmd[current_cmd_index], sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US)); + + osDelayUs(delayUs.val); + break; + } + + case GSP_SEQ_BUF_OPCODE_REG_STORE: + { + GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore; + portMemCopy(®Store, sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE), &pCmd[current_cmd_index], sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE)); + + NV_ASSERT_OR_RETURN(regStore.index < GSP_SEQ_BUF_REG_SAVE_SIZE, NV_ERR_INVALID_ARGUMENT); + + pParams->regSaveArea[regStore.index] = GPU_REG_RD32(pGpu, regStore.addr); + break; + } + + default: + // + // Route this command to the arch-specific handler. + // + NV_ASSERT_OK_OR_RETURN(kgspExecuteSequencerCommand_HAL(pGpu, pKernelGsp, opCode, &pCmd[current_cmd_index], payloadSize * sizeof (*pCmd))); + break; + } + current_cmd_index += payloadSize; + } + + return NV_OK; +} + +#if LIBOS_LOG_DECODE_ENABLE +static void +_kgspLogPollingCallback +( + OBJGPU *pGpu, + void *data +) +{ + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + kgspDumpGspLogs(pGpu, pKernelGsp, NV_FALSE); +} + +NV_STATUS +kgspStartLogPolling_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + NV_STATUS status; + status = osSchedule1SecondCallback(pGpu, _kgspLogPollingCallback, NULL, NV_OS_1HZ_REPEAT); + return status; +} + +static void +_kgspStopLogPolling +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + osRemove1SecondRepeatingCallback(pGpu, _kgspLogPollingCallback, NULL); +} + +#else // LIBOS_LOG_DECODE_ENABLE + +NV_STATUS +kgspStartLogPolling_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + return NV_OK; +} + +static void +_kgspStopLogPolling +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp +) +{ + return; +} +#endif // LIBOS_LOG_DECODE_ENABLE + +/*! + * Provides an opportunity to register some IntrService during intrStateInit. + */ +void +kgspRegisterIntrService_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX] +) +{ + NvU32 engineIdx = MC_ENGINE_IDX_GSP; + + if (!IS_GSP_CLIENT(pGpu)) + return; + + NV_ASSERT(pRecords[engineIdx].pInterruptService == NULL); + pRecords[engineIdx].pInterruptService = staticCast(pKernelGsp, IntrService); +} + +/*! + * Service GSP interrupts. + * + * @returns Zero, or any implementation-chosen nonzero value. If the same nonzero value is returned enough + * times the interrupt is considered stuck. + */ +NvU32 +kgspServiceInterrupt_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + IntrServiceServiceInterruptArguments *pParams +) +{ + NV_ASSERT_OR_RETURN(pParams != NULL, 0); + NV_ASSERT_OR_RETURN(pParams->engineIdx == MC_ENGINE_IDX_GSP, 0); + + return kgspService_HAL(pGpu, pKernelGsp); +} diff --git a/src/nvidia/src/kernel/gpu/gsp/kernel_gsp_booter.c b/src/nvidia/src/kernel/gpu/gsp/kernel_gsp_booter.c new file mode 100644 index 000000000..85eadc924 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/kernel_gsp_booter.c @@ -0,0 +1,502 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gsp/kernel_gsp.h" + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/nvdec/kernel_nvdec.h" +#include "gpu/sec2/kernel_sec2.h" +#include "core/bin_data.h" + +/*! + * Free a KernelGspVbiosImg structure. + * + * @param[in] pVbiosImg structure to free + */ +void +kgspFreeVbiosImg +( + KernelGspVbiosImg *pVbiosImg +) +{ + if (pVbiosImg == NULL) + { + return; + } + + portMemFree(pVbiosImg->pImage); + pVbiosImg->pImage = NULL; + + portMemFree(pVbiosImg); +} + +/*! + * Free a KernelGspFlcnUcode structure. + * + * @param[in] pFlcnUcode structure to free + */ +void +kgspFreeFlcnUcode +( + KernelGspFlcnUcode *pFlcnUcode +) +{ + if (pFlcnUcode == NULL) + { + return; + } + + if (pFlcnUcode->bootType == KGSP_FLCN_UCODE_BOOT_FROM_HS) + { + KernelGspFlcnUcodeBootFromHs *pUcode = &pFlcnUcode->ucodeBootFromHs; + if (pUcode->pUcodeMemDesc != NULL) + { + memdescFree(pUcode->pUcodeMemDesc); + memdescDestroy(pUcode->pUcodeMemDesc); + pUcode->pUcodeMemDesc = NULL; + } + portMemFree(pUcode->pSignatures); + pUcode->pSignatures = NULL; + } + else if (pFlcnUcode->bootType == KGSP_FLCN_UCODE_BOOT_WITH_LOADER) + { + KernelGspFlcnUcodeBootWithLoader *pUcode = &pFlcnUcode->ucodeBootWithLoader; + if (pUcode->pCodeMemDesc != NULL) + { + memdescFree(pUcode->pCodeMemDesc); + memdescDestroy(pUcode->pCodeMemDesc); + pUcode->pCodeMemDesc = NULL; + } + if (pUcode->pDataMemDesc != NULL) + { + memdescFree(pUcode->pDataMemDesc); + memdescDestroy(pUcode->pDataMemDesc); + pUcode->pDataMemDesc = NULL; + } + } + else if (pFlcnUcode->bootType == KGSP_FLCN_UCODE_BOOT_DIRECT) + { + KernelGspFlcnUcodeBootDirect *pUcode = &pFlcnUcode->ucodeBootDirect; + portMemFree(pUcode->pImage); + pUcode->pImage = NULL; + } + + portMemFree(pFlcnUcode); +} + +static NV_STATUS +s_bindataWriteToFixedSizeBuffer +( + const BINDATA_STORAGE *pBinStorage, + void *pBuf, // out + NvU32 bufSize +) +{ + NV_STATUS status = NV_OK; + + if (bindataGetBufferSize(pBinStorage) != bufSize) + { + status = NV_ERR_INVALID_DATA; + return status; + } + + status = bindataWriteToBuffer(pBinStorage, (NvU8 *) pBuf, bufSize); + if (status != NV_OK) + { + return status; + } + + return status; +} + +static NV_STATUS +s_patchBooterUcodeSignature +( + OBJGPU *pGpu, + NvBool bIsForNvdec, + NvU32 ucodeId, + NvU8 *pImage, + NvU32 sigDestOffset, + NvU32 imageSize, + NvU32 *pSignatures, + NvU32 signaturesTotalSize, + NvU32 numSigs +) +{ + NvU32 sigIndex = 0; + NvU32 sigSize = signaturesTotalSize / numSigs; + NvU32 fuseVer; + + NV_ASSERT_OR_RETURN(pImage != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(imageSize > sigDestOffset, NV_ERR_INVALID_DATA); + NV_ASSERT_OR_RETURN(imageSize - sigDestOffset > sigSize, NV_ERR_INVALID_DATA); + NV_ASSERT_OR_RETURN(pSignatures != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(numSigs > 0, NV_ERR_INVALID_DATA); + + // Booter Reload is on NVDEC0, all other Booters are on SEC2 + if (bIsForNvdec) + { + KernelNvdec *pKernelNvdec = GPU_GET_KERNEL_NVDEC(pGpu); + NV_ASSERT_OR_RETURN(pKernelNvdec != NULL, NV_ERR_INVALID_STATE); + fuseVer = knvdecReadUcodeFuseVersion_HAL(pGpu, pKernelNvdec, ucodeId); + } + else + { + KernelSec2 *pKernelSec2 = GPU_GET_KERNEL_SEC2(pGpu); + NV_ASSERT_OR_RETURN(pKernelSec2 != NULL, NV_ERR_INVALID_STATE); + fuseVer = ksec2ReadUcodeFuseVersion_HAL(pGpu, pKernelSec2, ucodeId); + } + + if (numSigs > 1) + { + if (fuseVer > numSigs - 1) + { + NV_PRINTF(LEVEL_ERROR, "signature for fuse version %u not present\n", fuseVer); + return NV_ERR_OUT_OF_RANGE; + } + sigIndex = numSigs - 1 - fuseVer; + } + + portMemCopy(pImage + sigDestOffset, sigSize, ((NvU8 *) pSignatures) + sigIndex * sigSize, sigSize); + return NV_OK; +} + +static NV_STATUS +s_allocateUcodeFromBinArchive +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + const BINDATA_ARCHIVE *pBinArchive, + const NvBool bIsForNvdec, + KernelGspFlcnUcode **ppFlcnUcode // out +) +{ + NV_STATUS status; + KernelGspFlcnUcode *pFlcnUcode; + + struct { + NvU32 osCodeOffset; + NvU32 osCodeSize; + NvU32 osDataOffset; + NvU32 osDataSize; + NvU32 numApps; + NvU32 appCodeOffset; + NvU32 appCodeSize; + NvU32 appDataOffset; + NvU32 appDataSize; + } header; + + struct { + NvU32 fuseVer; + NvU32 engineId; + NvU32 ucodeId; + } patchMeta; + + NvU32 patchLoc; + NvU32 patchSig; + NvU32 numSigs; + NvU32 signaturesTotalSize; + NvU32 *pSignatures = NULL; + + const BINDATA_STORAGE *pBinImage; + const BINDATA_STORAGE *pBinHeader; + const BINDATA_STORAGE *pBinSig; + const BINDATA_STORAGE *pBinPatchSig; + const BINDATA_STORAGE *pBinPatchLoc; + const BINDATA_STORAGE *pBinPatchMeta; + const BINDATA_STORAGE *pBinNumSigs; + + if (kgspIsDebugModeEnabled_HAL(pGpu, pKernelGsp)) + { + pBinImage = bindataArchiveGetStorage(pBinArchive, "image_dbg"); + pBinHeader = bindataArchiveGetStorage(pBinArchive, "header_dbg"); + pBinSig = bindataArchiveGetStorage(pBinArchive, "sig_dbg"); + } + else + { + pBinImage = bindataArchiveGetStorage(pBinArchive, "image_prod"); + pBinHeader = bindataArchiveGetStorage(pBinArchive, "header_prod"); + pBinSig = bindataArchiveGetStorage(pBinArchive, "sig_prod"); + } + + NV_ASSERT_OR_RETURN(pBinImage != NULL, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pBinHeader != NULL, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pBinSig != NULL, NV_ERR_NOT_SUPPORTED); + + pBinPatchSig = bindataArchiveGetStorage(pBinArchive, "patch_sig"); + pBinPatchLoc = bindataArchiveGetStorage(pBinArchive, "patch_loc"); + pBinPatchMeta = bindataArchiveGetStorage(pBinArchive, "patch_meta"); + pBinNumSigs = bindataArchiveGetStorage(pBinArchive, "num_sigs"); + + NV_ASSERT_OR_RETURN(pBinPatchSig != NULL, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pBinPatchLoc != NULL, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pBinPatchMeta != NULL, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pBinNumSigs != NULL, NV_ERR_NOT_SUPPORTED); + + pFlcnUcode = portMemAllocNonPaged(sizeof(*pFlcnUcode)); + if (pFlcnUcode == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet(pFlcnUcode, 0, sizeof(*pFlcnUcode)); + + // Retrieve header + NV_ASSERT_OK_OR_GOTO(status, + s_bindataWriteToFixedSizeBuffer(pBinHeader, &header, sizeof(header)), + out); + + if (header.numApps != 1) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_DATA; + goto out; + } + + // Retrieve signature patch location + NV_ASSERT_OK_OR_GOTO(status, + s_bindataWriteToFixedSizeBuffer(pBinPatchLoc, &patchLoc, sizeof(patchLoc)), + out); + + // Retrieve signature patch index + NV_ASSERT_OK_OR_GOTO(status, + s_bindataWriteToFixedSizeBuffer(pBinPatchSig, &patchSig, sizeof(patchSig)), + out); + + if (patchSig != 0) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_DATA; + goto out; + } + + // Retrieve signature patch metadata + NV_ASSERT_OK_OR_GOTO(status, + s_bindataWriteToFixedSizeBuffer(pBinPatchMeta, &patchMeta, sizeof(patchMeta)), + out); + + // Retrieve signatures + NV_ASSERT_OK_OR_GOTO(status, + s_bindataWriteToFixedSizeBuffer(pBinNumSigs, &numSigs, sizeof(numSigs)), + out); + + if (numSigs == 0) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_DATA; + goto out; + } + + signaturesTotalSize = bindataGetBufferSize(pBinSig); + + if ((signaturesTotalSize == 0) || ((signaturesTotalSize % numSigs) != 0)) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_DATA; + goto out; + } + + pSignatures = portMemAllocNonPaged(signaturesTotalSize); + if (pSignatures == NULL) + { + status = NV_ERR_NO_MEMORY; + goto out; + } + + NV_ASSERT_OK_OR_GOTO(status, + bindataWriteToBuffer(pBinSig, (NvU8 *) pSignatures, signaturesTotalSize), + out); + + // Populate KernelGspFlcnUcode structure + if (staticCast(pKernelGsp, KernelFalcon)->bBootFromHs) + { + KernelGspFlcnUcodeBootFromHs *pUcode = &pFlcnUcode->ucodeBootFromHs; + NvU8 *pMappedUcodeMem; + + pFlcnUcode->bootType = KGSP_FLCN_UCODE_BOOT_FROM_HS; + + pUcode->size = bindataGetBufferSize(pBinImage); + + pUcode->codeOffset = header.appCodeOffset; + pUcode->imemSize = header.appCodeSize; + pUcode->imemPa = 0; + pUcode->imemVa = header.appCodeOffset; + + pUcode->dataOffset = header.osDataOffset; + pUcode->dmemSize = header.osDataSize; + pUcode->dmemPa = 0; + pUcode->dmemVa = FLCN_DMEM_VA_INVALID; + + pUcode->hsSigDmemAddr = patchLoc - pUcode->dataOffset; + pUcode->ucodeId = patchMeta.ucodeId; + pUcode->engineIdMask = patchMeta.engineId; + + NV_ASSERT_OK_OR_GOTO(status, + memdescCreate(&pUcode->pUcodeMemDesc, pGpu, pUcode->size, + 16, NV_TRUE, ADDR_SYSMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE), out); + + status = memdescAlloc(pUcode->pUcodeMemDesc); + if (status != NV_OK) + { + goto out; + } + + pMappedUcodeMem = memdescMapInternal(pGpu, pUcode->pUcodeMemDesc, TRANSFER_FLAGS_NONE); + if (pMappedUcodeMem == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto out; + } + + // Copy in the whole image + status = bindataWriteToBuffer(pBinImage, pMappedUcodeMem, pUcode->size); + NV_ASSERT(status == NV_OK); + + // Patch signatures (only if image copy above succeeded) + if (status == NV_OK) + { + status = s_patchBooterUcodeSignature(pGpu, + bIsForNvdec, patchMeta.ucodeId, + pMappedUcodeMem, patchLoc, pUcode->size, + pSignatures, signaturesTotalSize, numSigs); + NV_ASSERT(status == NV_OK); + } + + memdescUnmapInternal(pGpu, pUcode->pUcodeMemDesc, TRANSFER_FLAGS_DESTROY_MAPPING); + pMappedUcodeMem = NULL; + + if (status != NV_OK) + { + goto out; + } + } + else + { + KernelGspFlcnUcodeBootDirect *pUcode = &pFlcnUcode->ucodeBootDirect; + + pFlcnUcode->bootType = KGSP_FLCN_UCODE_BOOT_DIRECT; + + pUcode->size = bindataGetBufferSize(pBinImage); + + pUcode->imemNsPa = header.osCodeOffset; + pUcode->imemNsSize = header.osCodeSize; + pUcode->imemSecPa = header.appCodeOffset; + pUcode->imemSecSize = header.appCodeSize; + + pUcode->dataOffset = header.osDataOffset; + pUcode->dmemPa = 0; + pUcode->dmemSize = header.osDataSize; + + pUcode->pImage = portMemAllocNonPaged(pUcode->size); + if (pUcode->pImage == NULL) + { + status = NV_ERR_NO_MEMORY; + goto out; + } + + // Copy in the whole image + NV_ASSERT_OK_OR_GOTO(status, + bindataWriteToBuffer(pBinImage, pUcode->pImage, pUcode->size), + out); + + // Patch signatures + NV_ASSERT_OK_OR_GOTO(status, + s_patchBooterUcodeSignature(pGpu, + bIsForNvdec, patchMeta.ucodeId, + pUcode->pImage, patchLoc, pUcode->size, + pSignatures, signaturesTotalSize, numSigs), + out); + } + +out: + portMemFree(pSignatures); + pSignatures = NULL; + + if (status == NV_OK) + { + *ppFlcnUcode = pFlcnUcode; + } + else + { + kgspFreeFlcnUcode(pFlcnUcode); + pFlcnUcode = NULL; + } + + return status; +} + +NV_STATUS +kgspAllocateBooterLoadUcodeImage_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + KernelGspFlcnUcode **ppBooterLoadUcode // out +) +{ + const BINDATA_ARCHIVE *pBinArchive; + + NV_ASSERT_OR_RETURN(ppBooterLoadUcode != NULL, NV_ERR_INVALID_ARGUMENT); + + pBinArchive = kgspGetBinArchiveBooterLoadUcode_HAL(pKernelGsp); + NV_ASSERT_OR_RETURN(pBinArchive != NULL, NV_ERR_NOT_SUPPORTED); + + return s_allocateUcodeFromBinArchive(pGpu, pKernelGsp, pBinArchive, + NV_FALSE /* i.e. not NVDEC */, ppBooterLoadUcode); +} + +NV_STATUS +kgspAllocateBooterReloadUcodeImage_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + KernelGspFlcnUcode **ppBooterReloadUcode // out +) +{ + const BINDATA_ARCHIVE *pBinArchive; + + NV_ASSERT_OR_RETURN(ppBooterReloadUcode != NULL, NV_ERR_INVALID_ARGUMENT); + + pBinArchive = kgspGetBinArchiveBooterReloadUcode_HAL(pKernelGsp); + NV_ASSERT_OR_RETURN(pBinArchive != NULL, NV_ERR_NOT_SUPPORTED); + + return s_allocateUcodeFromBinArchive(pGpu, pKernelGsp, pBinArchive, + NV_TRUE /* i.e. NVDEC */, ppBooterReloadUcode); +} + +NV_STATUS +kgspAllocateBooterUnloadUcodeImage_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + KernelGspFlcnUcode **ppBooterUnloadUcode // out +) +{ + const BINDATA_ARCHIVE *pBinArchive; + + NV_ASSERT_OR_RETURN(ppBooterUnloadUcode != NULL, NV_ERR_INVALID_ARGUMENT); + + pBinArchive = kgspGetBinArchiveBooterUnloadUcode_HAL(pKernelGsp); + NV_ASSERT_OR_RETURN(pBinArchive != NULL, NV_ERR_NOT_SUPPORTED); + + return s_allocateUcodeFromBinArchive(pGpu, pKernelGsp, pBinArchive, + NV_FALSE /* i.e. not NVDEC */, ppBooterUnloadUcode); +} diff --git a/src/nvidia/src/kernel/gpu/gsp/kernel_gsp_fwsec.c b/src/nvidia/src/kernel/gpu/gsp/kernel_gsp_fwsec.c new file mode 100644 index 000000000..e712d8e5c --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/kernel_gsp_fwsec.c @@ -0,0 +1,1092 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * KernelGsp functions and helpers for parsing FWSEC ucode from a + * VBIOS image. + * + * TODO: JIRA CORERM-4685: Consider moving stuff in here to, e.g. KernelVbios + */ + +#include "gpu/gsp/kernel_gsp.h" + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/gpu.h" +#include "gpu/vbios/bios_types.h" +#include "gpu/mem_mgr/mem_desc.h" + +// --------------------------------------------------------------------------- +// BIOS Information Table (BIT) structures and defines +// (header, tokens, falcon ucodes) +// --------------------------------------------------------------------------- + +#define BIT_HEADER_ID 0xB8FF +#define BIT_HEADER_SIGNATURE 0x00544942 // "BIT\0" +#define BIT_HEADER_SIZE_OFFSET 8 + +struct BIT_HEADER_V1_00 +{ + bios_U016 Id; + bios_U032 Signature; + bios_U016 BCD_Version; + bios_U008 HeaderSize; + bios_U008 TokenSize; + bios_U008 TokenEntries; + bios_U008 HeaderChksum; +}; +#define BIT_HEADER_V1_00_FMT "1w1d1w4b" +typedef struct BIT_HEADER_V1_00 BIT_HEADER_V1_00; + +struct BIT_TOKEN_V1_00 +{ + bios_U008 TokenId; + bios_U008 DataVersion; + bios_U016 DataSize; + bios_U016 DataPtr; +}; +#define BIT_TOKEN_V1_00_FMT "2b2w" +typedef struct BIT_TOKEN_V1_00 BIT_TOKEN_V1_00; + +#define BIT_TOKEN_FALCON_DATA 0x70 + +typedef struct +{ + bios_U032 FalconUcodeTablePtr; +} BIT_DATA_FALCON_DATA_V2; + +#define BIT_DATA_FALCON_DATA_V2_4_FMT "1d" +#define BIT_DATA_FALCON_DATA_V2_SIZE_4 4 + +typedef struct +{ + bios_U008 Version; + bios_U008 HeaderSize; + bios_U008 EntrySize; + bios_U008 EntryCount; + bios_U008 DescVersion; + bios_U008 DescSize; +} FALCON_UCODE_TABLE_HDR_V1; + +#define FALCON_UCODE_TABLE_HDR_V1_VERSION 1 +#define FALCON_UCODE_TABLE_HDR_V1_SIZE_6 6 +#define FALCON_UCODE_TABLE_HDR_V1_6_FMT "6b" + +typedef struct +{ + bios_U008 ApplicationID; + bios_U008 TargetID; + bios_U032 DescPtr; +} FALCON_UCODE_TABLE_ENTRY_V1; + +#define FALCON_UCODE_TABLE_ENTRY_V1_VERSION 1 +#define FALCON_UCODE_TABLE_ENTRY_V1_SIZE_6 6 +#define FALCON_UCODE_TABLE_ENTRY_V1_6_FMT "2b1d" + +#define FALCON_UCODE_ENTRY_APPID_FIRMWARE_SEC_LIC 0x05 +#define FALCON_UCODE_ENTRY_APPID_FWSEC_DBG 0x45 +#define FALCON_UCODE_ENTRY_APPID_FWSEC_PROD 0x85 + +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_FLAGS_VERSION 0:0 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_FLAGS_VERSION_UNAVAILABLE 0x00 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_FLAGS_VERSION_AVAILABLE 0x01 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_FLAGS_RESERVED 1:1 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_FLAGS_ENCRYPTED 2:2 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_RESERVED 7:3 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION 15:8 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V1 0x01 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V2 0x02 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V3 0x03 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V4 0x04 +#define NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_SIZE 31:16 + +typedef struct +{ + bios_U032 vDesc; +} FALCON_UCODE_DESC_HEADER; +#define FALCON_UCODE_DESC_HEADER_FORMAT "1d" + +typedef struct +{ + FALCON_UCODE_DESC_HEADER Hdr; + bios_U032 StoredSize; + bios_U032 UncompressedSize; + bios_U032 VirtualEntry; + bios_U032 InterfaceOffset; + bios_U032 IMEMPhysBase; + bios_U032 IMEMLoadSize; + bios_U032 IMEMVirtBase; + bios_U032 IMEMSecBase; + bios_U032 IMEMSecSize; + bios_U032 DMEMOffset; + bios_U032 DMEMPhysBase; + bios_U032 DMEMLoadSize; + bios_U032 altIMEMLoadSize; + bios_U032 altDMEMLoadSize; +} FALCON_UCODE_DESC_V2; + +#define FALCON_UCODE_DESC_V2_SIZE_60 60 +#define FALCON_UCODE_DESC_V2_60_FMT "15d" + +typedef struct { + FALCON_UCODE_DESC_HEADER Hdr; + bios_U032 StoredSize; + bios_U032 PKCDataOffset; + bios_U032 InterfaceOffset; + bios_U032 IMEMPhysBase; + bios_U032 IMEMLoadSize; + bios_U032 IMEMVirtBase; + bios_U032 DMEMPhysBase; + bios_U032 DMEMLoadSize; + bios_U016 EngineIdMask; + bios_U008 UcodeId; + bios_U008 SignatureCount; + bios_U016 SignatureVersions; + bios_U016 Reserved; +} FALCON_UCODE_DESC_V3; + +#define FALCON_UCODE_DESC_V3_SIZE_44 44 +#define FALCON_UCODE_DESC_V3_44_FMT "9d1w2b2w" +#define BCRT30_RSA3K_SIG_SIZE 384 + +typedef union +{ + // v1 is unused on platforms supported by GSP-RM + FALCON_UCODE_DESC_V2 v2; + FALCON_UCODE_DESC_V3 v3; +} FALCON_UCODE_DESC_UNION; + +typedef struct FlcnUcodeDescFromBit +{ + NvU32 descVersion; + NvU32 descOffset; + NvU32 descSize; + FALCON_UCODE_DESC_UNION descUnion; +} FlcnUcodeDescFromBit; + + +// --------------------------------------------------------------------------- +// Functions for parsing FWSEC falcon ucode from VBIOS image +// --------------------------------------------------------------------------- + +/*! + * Calculate packed data size based on given data format + * + * @param[in] format Data format + */ +static NvU32 +s_biosStructCalculatePackedSize +( + const char *format +) +{ + + NvU32 packedSize = 0; + NvU32 count; + char fmt; + + while ((fmt = *format++)) + { + count = 0; + while ((fmt >= '0') && (fmt <= '9')) + { + count *= 10; + count += fmt - '0'; + fmt = *format++; + } + if (count == 0) + count = 1; + + switch (fmt) + { + case 'b': + packedSize += count * 1; + break; + + case 's': // signed byte + packedSize += count * 1; + break; + + case 'w': + packedSize += count * 2; + break; + + case 'd': + packedSize += count * 4; + break; + } + } + + return packedSize; +} + +/*! + * Parse packed little endian data and unpack into padded structure. + * + * @param[in] packedData Packed little endien data + * @param[out] unpackedData Unpacked padded structure + * @param[in] format Data format + */ +static NV_STATUS +s_biosUnpackStructure +( + const NvU8 *packedData, + NvU32 *unpackedData, // out + const char *format +) +{ + + NvU32 count; + NvU32 data; + char fmt; + + while ((fmt = *format++)) + { + count = 0; + while ((fmt >= '0') && (fmt <= '9')) + { + count *= 10; + count += fmt - '0'; + fmt = *format++; + } + if (count == 0) + count = 1; + + while (count--) + { + switch (fmt) + { + case 'b': + data = *packedData++; + break; + + case 's': // signed byte + data = *packedData++; + if (data & 0x80) + data |= ~0xff; + break; + + case 'w': + data = *packedData++; + data |= *packedData++ << 8; + break; + + case 'd': + data = *packedData++; + data |= *packedData++ << 8; + data |= *packedData++ << 16; + data |= *packedData++ << 24; + break; + + default: + return NV_ERR_GENERIC; + } + *unpackedData++ = data; + } + } + + return NV_OK; +} + +/*! + * Read packed little endian data and unpack it to padded structure. + * + * @param[in] pVbiosImg VBIOS image containing packed little endien data + * @param[out] pStructure Unpacked padded structure + * @param[in] offset Offset within packed data + * @param[in] format Data format + */ +static NV_STATUS +s_vbiosReadStructure +( + const KernelGspVbiosImg * const pVbiosImg, + void *pStructure, // out + const NvU32 offset, + const char *format +) +{ + + NvU32 packedSize; + NvU32 maxOffset; + NvBool bSafe; + + // check for overflow in offset + packedSize + packedSize = s_biosStructCalculatePackedSize(format); + + bSafe = portSafeAddU32(offset, packedSize, &maxOffset); + if (NV_UNLIKELY(!bSafe || maxOffset > pVbiosImg->biosSize)) + { + return NV_ERR_INVALID_OFFSET; + } + + return s_biosUnpackStructure(pVbiosImg->pImage + offset, pStructure, format); +} + +static NvU8 s_vbiosRead8(const KernelGspVbiosImg *pVbiosImg, NvU32 offset, NV_STATUS *pStatus) +{ + bios_U008 data; // ReadStructure expects 'bios' types + if (NV_UNLIKELY(*pStatus != NV_OK)) + { + return 0; + } + *pStatus = s_vbiosReadStructure(pVbiosImg, &data, offset, "b"); + return (NvU8) data; +} + +static NvU16 s_vbiosRead16(const KernelGspVbiosImg *pVbiosImg, NvU32 offset, NV_STATUS *pStatus) +{ + bios_U016 data; // ReadStructure expects 'bios' types + if (NV_UNLIKELY(*pStatus != NV_OK)) + { + return 0; + } + *pStatus = s_vbiosReadStructure(pVbiosImg, &data, offset, "w"); + return (NvU16) data; +} + +static NvU32 s_vbiosRead32(const KernelGspVbiosImg *pVbiosImg, NvU32 offset, NV_STATUS *pStatus) +{ + bios_U032 data; // ReadStructure expects 'bios' types + if (NV_UNLIKELY(*pStatus != NV_OK)) + { + return 0; + } + *pStatus = s_vbiosReadStructure(pVbiosImg, &data, offset, "d"); + return (NvU32) data; +} + +/*! + * Find offset of BIT header (BIOS Information Table header) within VBIOS image. + * + * @param[in] pVbiosImg VBIOS image + * @param[out] pBitAddr Offset of BIT header (if found) + */ +static NV_STATUS +s_vbiosFindBitHeader +( + const KernelGspVbiosImg * const pVbiosImg, + NvU32 *pBitAddr // out +) +{ + + NV_STATUS status = NV_OK; + NvU32 addr; + + NV_ASSERT_OR_RETURN(pVbiosImg != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVbiosImg->pImage != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVbiosImg->biosSize > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBitAddr != NULL, NV_ERR_INVALID_ARGUMENT); + + for (addr = 0; addr < pVbiosImg->biosSize - 3; addr++) + { + if ((s_vbiosRead16(pVbiosImg, addr, &status) == BIT_HEADER_ID) && + (s_vbiosRead32(pVbiosImg, addr + 2, &status) == BIT_HEADER_SIGNATURE)) + { + // found candidate BIT header + NvU32 candidateBitAddr = addr; + + // verify BIT header checksum + NvU32 headerSize = s_vbiosRead8(pVbiosImg, + candidateBitAddr + BIT_HEADER_SIZE_OFFSET, &status); + + NvU32 checksum = 0; + NvU32 j; + + for (j = 0; j < headerSize; j++) + { + checksum += (NvU32) s_vbiosRead8(pVbiosImg, candidateBitAddr + j, &status); + } + + NV_ASSERT_OK_OR_RETURN(status); + + if ((checksum & 0xFF) == 0x0) + { + // found! + // candidate BIT header passes checksum, lets use it + *pBitAddr = candidateBitAddr; + return status; + } + } + + NV_ASSERT_OK_OR_RETURN(status); + } + + // not found + return NV_ERR_GENERIC; +} + +/*! + * Find and parse a ucode desc (from BIT) for FWSEC from VBIOS image. + * + * @param[in] pVbiosImg VBIOS image + * @param[in] bitAddr Offset of BIT header within VBIOS image + * @param[in] bUseDebugFwsec Whether to look for debug or prod FWSEC + * @param[out] pFwsecUcodeDescFromBit Resulting ucode desc + */ +static NV_STATUS +s_vbiosParseFwsecUcodeDescFromBit +( + const KernelGspVbiosImg * const pVbiosImg, + const NvU32 bitAddr, + const NvBool bUseDebugFwsec, + FlcnUcodeDescFromBit *pFwsecUcodeDescFromBit // out +) +{ + + NV_STATUS status; + BIT_HEADER_V1_00 bitHeader; + NvU32 tokIdx; + + NV_ASSERT_OR_RETURN(pVbiosImg != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVbiosImg->pImage != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVbiosImg->biosSize > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pFwsecUcodeDescFromBit != NULL, NV_ERR_INVALID_ARGUMENT); + + // read BIT header + status = s_vbiosReadStructure(pVbiosImg, &bitHeader, bitAddr, BIT_HEADER_V1_00_FMT); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to read BIT table structure: 0x%x\n", status); + return status; + } + + // loop through all BIT tokens + for (tokIdx = 0; tokIdx < bitHeader.TokenEntries; tokIdx++) + { + BIT_TOKEN_V1_00 bitToken; + + BIT_DATA_FALCON_DATA_V2 falconData; + FALCON_UCODE_TABLE_HDR_V1 ucodeHeader; + NvU32 entryIdx; + + // read BIT token + status = s_vbiosReadStructure(pVbiosImg, &bitToken, + bitAddr + bitHeader.HeaderSize + + tokIdx * bitHeader.TokenSize, + BIT_TOKEN_V1_00_FMT); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to read BIT token %u, skipping: 0x%x\n", + tokIdx, status); + continue; + } + + // skip tokens that are not for falcon ucode data v2 + if (bitToken.TokenId != BIT_TOKEN_FALCON_DATA || + bitToken.DataVersion != 2 || + bitToken.DataSize < BIT_DATA_FALCON_DATA_V2_SIZE_4) + { + continue; + } + + // read falcon ucode data + status = s_vbiosReadStructure(pVbiosImg, &falconData, + bitToken.DataPtr, BIT_DATA_FALCON_DATA_V2_4_FMT); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to read Falcon ucode data (BIT token %u), skipping: 0x%x\n", + tokIdx, status); + continue; + } + + // read falcon ucode header + status = s_vbiosReadStructure(pVbiosImg, &ucodeHeader, + pVbiosImg->expansionRomOffset + + falconData.FalconUcodeTablePtr, + FALCON_UCODE_TABLE_HDR_V1_6_FMT); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to read Falcon ucode header (BIT token %u), skipping: 0x%x\n", + tokIdx, status); + continue; + } + + // skip headers with undesired version + if ((ucodeHeader.Version != FALCON_UCODE_TABLE_HDR_V1_VERSION) || + (ucodeHeader.HeaderSize < FALCON_UCODE_TABLE_HDR_V1_SIZE_6) || + (ucodeHeader.EntrySize < FALCON_UCODE_TABLE_ENTRY_V1_SIZE_6)) + { + continue; + } + + // loop through falcon ucode entries + for (entryIdx = 0; entryIdx < ucodeHeader.EntryCount; entryIdx++) + { + FALCON_UCODE_TABLE_ENTRY_V1 ucodeEntry; + FALCON_UCODE_DESC_HEADER ucodeDescHdr; + + NvU8 ucodeDescVersion; + NvU32 ucodeDescSize; + NvU32 ucodeDescOffset; + const char *ucodeDescFmt; + + status = s_vbiosReadStructure(pVbiosImg, &ucodeEntry, + pVbiosImg->expansionRomOffset + + falconData.FalconUcodeTablePtr + + ucodeHeader.HeaderSize + + entryIdx * ucodeHeader.EntrySize, + FALCON_UCODE_TABLE_ENTRY_V1_6_FMT); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to read Falcon ucode entry %u (BIT token %u), skipping: 0x%x\n", + entryIdx, tokIdx, status); + continue; + } + + // skip entries that are not FWSEC + if (ucodeEntry.ApplicationID != FALCON_UCODE_ENTRY_APPID_FIRMWARE_SEC_LIC && + ((bUseDebugFwsec && (ucodeEntry.ApplicationID != FALCON_UCODE_ENTRY_APPID_FWSEC_DBG)) || + (!bUseDebugFwsec && (ucodeEntry.ApplicationID != FALCON_UCODE_ENTRY_APPID_FWSEC_PROD)))) + { + continue; + } + + // determine desc version, format, and size + status = s_vbiosReadStructure(pVbiosImg, &ucodeDescHdr, + pVbiosImg->expansionRomOffset + ucodeEntry.DescPtr, + FALCON_UCODE_DESC_HEADER_FORMAT); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to read Falcon ucode desc header for entry %u (BIT token %u), skipping: 0x%x\n", + entryIdx, tokIdx, status); + continue; + } + + // skip entries with desc version not V2 and V3 (FWSEC should be either V2 or V3) + if (FLD_TEST_DRF(_BIT, _FALCON_UCODE_DESC_HEADER_VDESC_FLAGS, _VERSION, _UNAVAILABLE, ucodeDescHdr.vDesc)) + { + NV_PRINTF(LEVEL_ERROR, + "unexpected ucode desc version missing for entry %u (BIT token %u), skipping\n", + entryIdx, tokIdx); + continue; + } + else + { + ucodeDescVersion = (NvU8) DRF_VAL(_BIT, _FALCON_UCODE_DESC_HEADER_VDESC, _VERSION, ucodeDescHdr.vDesc); + ucodeDescSize = DRF_VAL(_BIT, _FALCON_UCODE_DESC_HEADER_VDESC, _SIZE, ucodeDescHdr.vDesc); + } + + if (ucodeDescVersion == NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V2 && + ucodeDescSize >= FALCON_UCODE_DESC_V2_SIZE_60) + { + ucodeDescFmt = FALCON_UCODE_DESC_V2_60_FMT; + } + else if (ucodeDescVersion == NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V3 && + ucodeDescSize >= FALCON_UCODE_DESC_V3_SIZE_44) + { + ucodeDescFmt = FALCON_UCODE_DESC_V3_44_FMT; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "unexpected ucode desc version 0x%x or size 0x%x for entry %u (BIT token %u), skipping\n", + ucodeDescVersion, ucodeDescSize, entryIdx, tokIdx); + continue; + } + + ucodeDescOffset = ucodeEntry.DescPtr + pVbiosImg->expansionRomOffset; + + status = s_vbiosReadStructure(pVbiosImg, &pFwsecUcodeDescFromBit->descUnion, + ucodeDescOffset, ucodeDescFmt); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to read Falcon ucode desc (desc version 0x%x) for entry %u (BIT token %u), skipping: 0x%x\n", + ucodeDescVersion, entryIdx, tokIdx, status); + continue; + } + + pFwsecUcodeDescFromBit->descVersion = ucodeDescVersion; + pFwsecUcodeDescFromBit->descOffset = ucodeDescOffset; + pFwsecUcodeDescFromBit->descSize = ucodeDescSize; + + return NV_OK; + } + } + + // not found + return NV_ERR_INVALID_DATA; +} + +/*! + * Fill a KernelGspFlcnUcode structure from a V2 ucode desc (from BIT). + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pVbiosImg VBIOS image + * @param[in] pDescV2 V2 ucode desc (from BIT) + * @param[in] descOffset Offset of ucode desc (from BIT) in VBIOS image + * @param[in] descSize Size of ucode desc (from BIT) + * @param[out] pFlcnUcode KernelGspFlcnUcode structure to fill + */ +static NV_STATUS +s_vbiosFillFlcnUcodeFromDescV2 +( + OBJGPU *pGpu, // for memdesc + const KernelGspVbiosImg * const pVbiosImg, + const FALCON_UCODE_DESC_V2 * const pDescV2, + const NvU32 descOffset, + const NvU32 descSize, + KernelGspFlcnUcode *pFlcnUcode // out +) +{ + NV_STATUS status; + KernelGspFlcnUcodeBootWithLoader *pUcode = NULL; + + NvU8 *pMappedCodeMem = NULL; + NvU8 *pMappedDataMem = NULL; + + NvBool bSafe; + NvU32 codeSizeAligned; + NvU32 dataSizeAligned; + + // offsets within pVbiosImg->pImage + NvU32 imageCodeOffset; + NvU32 imageCodeMaxOffset; + NvU32 imageDataOffset; + NvU32 imageDataMaxOffset; + + // offsets within mapped mem + NvU32 mappedCodeMaxOffset; + NvU32 mappedDataMaxOffset; + + NV_ASSERT(pVbiosImg != NULL); + NV_ASSERT(pVbiosImg->pImage != NULL); + NV_ASSERT(pDescV2 != NULL); + NV_ASSERT(pFlcnUcode != NULL); + + pFlcnUcode->bootType = KGSP_FLCN_UCODE_BOOT_WITH_LOADER; + pUcode = &pFlcnUcode->ucodeBootWithLoader; + + pUcode->pCodeMemDesc = NULL; + pUcode->pDataMemDesc = NULL; + + pUcode->codeOffset = 0; + pUcode->imemSize = pDescV2->IMEMLoadSize; + pUcode->imemNsSize = pDescV2->IMEMLoadSize - pDescV2->IMEMSecSize; + pUcode->imemNsPa = pDescV2->IMEMPhysBase; + pUcode->imemSecSize = NV_ALIGN_UP(pDescV2->IMEMSecSize, 256); + pUcode->imemSecPa = pDescV2->IMEMSecBase - pDescV2->IMEMVirtBase + pDescV2->IMEMPhysBase; + pUcode->codeEntry = pDescV2->VirtualEntry; // 0? + + pUcode->dataOffset = pDescV2->DMEMOffset; + pUcode->dmemSize = pDescV2->DMEMLoadSize; + pUcode->dmemPa = pDescV2->DMEMPhysBase; + + pUcode->interfaceOffset = pDescV2->InterfaceOffset; + + codeSizeAligned = NV_ALIGN_UP(pUcode->imemSize, 256); + dataSizeAligned = NV_ALIGN_UP(pUcode->dmemSize, 256); + + // verify offsets within pVbiosImg->pImage + bSafe = portSafeAddU32(descOffset, descSize, &imageCodeOffset); + if (!bSafe || imageCodeOffset >= pVbiosImg->biosSize) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(imageCodeOffset, pUcode->imemSize, &imageCodeMaxOffset); + if (!bSafe || imageCodeMaxOffset > pVbiosImg->biosSize) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(imageCodeOffset, pUcode->dataOffset, &imageDataOffset); + if (!bSafe || imageDataOffset >= pVbiosImg->biosSize) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(imageDataOffset, pUcode->dmemSize, &imageDataMaxOffset); + if (!bSafe || imageDataMaxOffset > pVbiosImg->biosSize) + { + return NV_ERR_INVALID_OFFSET; + } + + // verify offsets within mapped mem + if (pUcode->imemNsPa >= codeSizeAligned) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(pUcode->imemNsPa, pUcode->imemSize, &mappedCodeMaxOffset); + if (!bSafe || mappedCodeMaxOffset > codeSizeAligned) + { + return NV_ERR_INVALID_OFFSET; + } + + if (pUcode->dmemPa >= dataSizeAligned) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(pUcode->dmemPa, pUcode->dmemSize, &mappedDataMaxOffset); + if (!bSafe || mappedDataMaxOffset > dataSizeAligned) + { + return NV_ERR_INVALID_OFFSET; + } + + NV_ASSERT_OK_OR_RETURN( + memdescCreate(&pUcode->pCodeMemDesc, pGpu, codeSizeAligned, + 256, NV_TRUE, ADDR_SYSMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)); + + NV_ASSERT_OK_OR_RETURN( + memdescCreate(&pUcode->pDataMemDesc, pGpu, dataSizeAligned, + 256, NV_TRUE, ADDR_SYSMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)); + + status = memdescAlloc(pUcode->pCodeMemDesc); + if (status != NV_OK) + { + return status; + } + + status = memdescAlloc(pUcode->pDataMemDesc); + if (status != NV_OK) + { + return status; + } + + pMappedCodeMem = memdescMapInternal(pGpu, pUcode->pCodeMemDesc, TRANSFER_FLAGS_NONE); + if (pMappedCodeMem == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pMappedDataMem = memdescMapInternal(pGpu, pUcode->pDataMemDesc, TRANSFER_FLAGS_NONE); + if (pMappedDataMem == NULL) + { + memdescUnmapInternal(pGpu, pUcode->pCodeMemDesc, + TRANSFER_FLAGS_DESTROY_MAPPING); + pMappedCodeMem = NULL; + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + portMemSet(pMappedCodeMem, 0, codeSizeAligned); + portMemCopy(pMappedCodeMem + pUcode->imemNsPa, pUcode->imemSize, + pVbiosImg->pImage + imageCodeOffset, pUcode->imemSize); + + portMemSet(pMappedDataMem, 0, dataSizeAligned); + portMemCopy(pMappedDataMem + pUcode->dmemPa, pUcode->dmemSize, + pVbiosImg->pImage + imageDataOffset, pUcode->dmemSize); + + memdescUnmapInternal(pGpu, pUcode->pCodeMemDesc, + TRANSFER_FLAGS_DESTROY_MAPPING); + pMappedCodeMem = NULL; + memdescUnmapInternal(pGpu, pUcode->pDataMemDesc, + TRANSFER_FLAGS_DESTROY_MAPPING); + pMappedDataMem = NULL; + + return status; +} + +/*! + * Fill a KernelGspFlcnUcode structure from a V3 ucode desc (from BIT). + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pVbiosImg VBIOS image + * @param[in] pDescV3 V3 ucode desc (from BIT) + * @param[in] descOffset Offset of ucode desc (from BIT) in VBIOS image + * @param[in] descSize Size of ucode desc (from BIT) + * @param[out] pFlcnUcode KernelGspFlcnUcode structure to fill + */ +static NV_STATUS +s_vbiosFillFlcnUcodeFromDescV3 +( + OBJGPU *pGpu, // for memdesc + const KernelGspVbiosImg * const pVbiosImg, + const FALCON_UCODE_DESC_V3 * const pDescV3, + const NvU32 descOffset, + const NvU32 descSize, + KernelGspFlcnUcode *pFlcnUcode // out +) +{ + NV_STATUS status; + KernelGspFlcnUcodeBootFromHs *pUcode = NULL; + + NvU8 *pMappedUcodeMem = NULL; + NvBool bSafe; + + // offsets within pVbiosImg->pImage + NvU32 imageOffset; + NvU32 imageMaxOffset; + + // offsets with ucode image + NvU32 dataMaxOffset; + NvU32 sigDataOffset; + NvU32 sigDataMaxOffset; + + NV_ASSERT(pVbiosImg != NULL); + NV_ASSERT(pVbiosImg->pImage != NULL); + NV_ASSERT(pDescV3 != NULL); + NV_ASSERT(pFlcnUcode != NULL); + + pFlcnUcode->bootType = KGSP_FLCN_UCODE_BOOT_FROM_HS; + pUcode = &pFlcnUcode->ucodeBootFromHs; + + pUcode->pUcodeMemDesc = NULL; + pUcode->size = RM_ALIGN_UP(pDescV3->StoredSize, 256); + + pUcode->codeOffset = 0; + pUcode->imemSize = pDescV3->IMEMLoadSize; + pUcode->imemPa = pDescV3->IMEMPhysBase; + pUcode->imemVa = pDescV3->IMEMVirtBase; + + pUcode->dataOffset = pUcode->imemSize; + pUcode->dmemSize = pDescV3->DMEMLoadSize; + pUcode->dmemPa = pDescV3->DMEMPhysBase; + pUcode->dmemVa = FLCN_DMEM_VA_INVALID; + + pUcode->hsSigDmemAddr = pDescV3->PKCDataOffset; + pUcode->ucodeId = pDescV3->UcodeId; + pUcode->engineIdMask = pDescV3->EngineIdMask; + + pUcode->pSignatures = NULL; + pUcode->signaturesTotalSize = 0; + pUcode->sigSize = BCRT30_RSA3K_SIG_SIZE; + pUcode->sigCount = pDescV3->SignatureCount; + + pUcode->vbiosSigVersions = pDescV3->SignatureVersions; + pUcode->interfaceOffset = pDescV3->InterfaceOffset; + + // compute imageOffset and sanity check size + bSafe = portSafeAddU32(descOffset, descSize, &imageOffset); + if (!bSafe || imageOffset >= pVbiosImg->biosSize) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(imageOffset, pUcode->size, &imageMaxOffset); + if (!bSafe || imageMaxOffset > pVbiosImg->biosSize) + { + return NV_ERR_INVALID_OFFSET; + } + + // sanity check imemSize + if (pUcode->imemSize > pUcode->size) + { + return NV_ERR_INVALID_OFFSET; + } + + // sanity check dataOffset and dataSize + if (pUcode->dataOffset >= pUcode->size) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(pUcode->dataOffset, pUcode->dmemSize, &dataMaxOffset); + if (!bSafe || dataMaxOffset > pUcode->size) + { + return NV_ERR_INVALID_OFFSET; + } + + // sanity check hsSigDmemAddr + bSafe = portSafeAddU32(pUcode->dataOffset, pUcode->hsSigDmemAddr, &sigDataOffset); + if (!bSafe || sigDataOffset >= pUcode->size) + { + return NV_ERR_INVALID_OFFSET; + } + + bSafe = portSafeAddU32(sigDataOffset, pUcode->sigSize, &sigDataMaxOffset); + if (!bSafe || sigDataMaxOffset > pUcode->size) + { + return NV_ERR_INVALID_OFFSET; + } + + // compute signaturesTotalSize and populate pSignatures + if (descSize < FALCON_UCODE_DESC_V3_SIZE_44) + { + return NV_ERR_INVALID_STATE; + } + pUcode->signaturesTotalSize = descSize - FALCON_UCODE_DESC_V3_SIZE_44; + + pUcode->pSignatures = portMemAllocNonPaged(pUcode->signaturesTotalSize); + if (pUcode->pSignatures == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemCopy(pUcode->pSignatures, pUcode->signaturesTotalSize, + pVbiosImg->pImage + descOffset + FALCON_UCODE_DESC_V3_SIZE_44, pUcode->signaturesTotalSize); + + NV_ASSERT_OK_OR_RETURN( + memdescCreate(&pUcode->pUcodeMemDesc, pGpu, pUcode->size, + 256, NV_TRUE, ADDR_SYSMEM, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE)); + + status = memdescAlloc(pUcode->pUcodeMemDesc); + if (status != NV_OK) + { + return status; + } + + pMappedUcodeMem = memdescMapInternal(pGpu, pUcode->pUcodeMemDesc, TRANSFER_FLAGS_NONE); + if (pMappedUcodeMem == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + portMemCopy(pMappedUcodeMem, pUcode->size, + pVbiosImg->pImage + imageOffset, pUcode->size); + + memdescUnmapInternal(pGpu, pUcode->pUcodeMemDesc, + TRANSFER_FLAGS_DESTROY_MAPPING); + pMappedUcodeMem = NULL; + + return status; +} + +/*! + * Create a new KernelGspFlcnUcode structure from a ucode desc (from BIT). + * + * The resulting KernelGspFlcnUcode should be freed with kgspFreeFlcnUcode + * after use. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pVbiosImg VBIOS image + * @param[in] pFlcnUcodeDescFromBit V2 ucode desc (from BIT) + * @param[out] ppFlcnUcode Pointer to resulting KernelGspFlcnUcode + */ +static NV_STATUS +s_vbiosNewFlcnUcodeFromDesc +( + OBJGPU *pGpu, // for memdesc + const KernelGspVbiosImg * const pVbiosImg, + const FlcnUcodeDescFromBit * const pFlcnUcodeDescFromBit, + KernelGspFlcnUcode **ppFlcnUcode // out +) +{ + NV_STATUS status; + KernelGspFlcnUcode *pFlcnUcode = NULL; + + NV_ASSERT(pGpu != NULL); + NV_ASSERT(pVbiosImg != NULL); + NV_ASSERT(pFlcnUcodeDescFromBit != NULL); + NV_ASSERT(ppFlcnUcode != NULL); + + pFlcnUcode = portMemAllocNonPaged(sizeof(*pFlcnUcode)); + if (pFlcnUcode == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet(pFlcnUcode, 0, sizeof(*pFlcnUcode)); + + if (pFlcnUcodeDescFromBit->descVersion == NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V2) + { + status = s_vbiosFillFlcnUcodeFromDescV2(pGpu, pVbiosImg, + &pFlcnUcodeDescFromBit->descUnion.v2, + pFlcnUcodeDescFromBit->descOffset, + pFlcnUcodeDescFromBit->descSize, + pFlcnUcode); + } + else if (pFlcnUcodeDescFromBit->descVersion == NV_BIT_FALCON_UCODE_DESC_HEADER_VDESC_VERSION_V3) + { + status = s_vbiosFillFlcnUcodeFromDescV3(pGpu, pVbiosImg, + &pFlcnUcodeDescFromBit->descUnion.v3, + pFlcnUcodeDescFromBit->descOffset, + pFlcnUcodeDescFromBit->descSize, + pFlcnUcode); + } + else + { + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to parse/prepare Falcon ucode (desc: version 0x%x, offset 0x%x, size 0x%x): 0x%x\n", + pFlcnUcodeDescFromBit->descVersion, + pFlcnUcodeDescFromBit->descOffset, + pFlcnUcodeDescFromBit->descSize, + status); + + kgspFreeFlcnUcode(pFlcnUcode); + pFlcnUcode = NULL; + } + + *ppFlcnUcode = pFlcnUcode; + return NV_OK; +} + +/*! + * Parse FWSEC ucode from VBIOS image. + * + * The resulting KernelGspFlcnUcode should be freed with kgspFlcnUcodeFree + * after use. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelGsp KernelGsp pointer + * @param[in] pVbiosImg VBIOS image + * @param[out] ppFwsecUcode Pointer to resulting KernelGspFlcnUcode + */ +NV_STATUS +kgspParseFwsecUcodeFromVbiosImg_IMPL +( + OBJGPU *pGpu, + KernelGsp *pKernelGsp, + const KernelGspVbiosImg * const pVbiosImg, + KernelGspFlcnUcode **ppFwsecUcode // out +) +{ + NV_STATUS status; + + FlcnUcodeDescFromBit fwsecUcodeDescFromBit; + NvU32 bitAddr; + NvBool bUseDebugFwsec = NV_FALSE; + + NV_ASSERT_OR_RETURN(!IS_VIRTUAL(pGpu), NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(IS_GSP_CLIENT(pGpu), NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pVbiosImg != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVbiosImg->pImage != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(ppFwsecUcode, NV_ERR_INVALID_ARGUMENT); + + status = s_vbiosFindBitHeader(pVbiosImg, &bitAddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to find BIT header in VBIOS image: 0x%x\n", status); + return status; + } + + bUseDebugFwsec = kgspIsDebugModeEnabled_HAL(pGpu, pKernelGsp); + status = s_vbiosParseFwsecUcodeDescFromBit(pVbiosImg, bitAddr, bUseDebugFwsec, &fwsecUcodeDescFromBit); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to parse FWSEC ucode desc from VBIOS image: 0x%x\n", status); + return status; + } + + status = s_vbiosNewFlcnUcodeFromDesc(pGpu, pVbiosImg, &fwsecUcodeDescFromBit, ppFwsecUcode); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to prepare new flcn ucode for FWSEC: 0x%x\n", + status); + return status; + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/gsp/message_queue_cpu.c b/src/nvidia/src/kernel/gpu/gsp/message_queue_cpu.c new file mode 100644 index 000000000..1a3b74a77 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/gsp/message_queue_cpu.c @@ -0,0 +1,628 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * GSP MESSAGE QUEUE - CPU-SIDE CODE + */ + +#include "core/core.h" +#include "core/thread_state.h" + + +#include "os/os.h" + +#include "vgpu/rpc_headers.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + +#define RPC_STRUCTURES +#define RPC_GENERIC_UNION +#include "g_rpc-structures.h" +#undef RPC_STRUCTURES +#undef RPC_GENERIC_UNION + +#define RPC_MESSAGE_STRUCTURES +#define RPC_MESSAGE_GENERIC_UNION +#include "g_rpc-message-header.h" +#undef RPC_MESSAGE_STRUCTURES +#undef RPC_MESSAGE_GENERIC_UNION + +#include "gpu/gsp/message_queue.h" +#include "gpu/gsp/message_queue_priv.h" +#include "msgq/msgq_priv.h" +#include "gpu/gsp/kernel_gsp.h" + +ct_assert(GSP_MSG_QUEUE_HEADER_SIZE > sizeof(msgqTxHeader) + sizeof(msgqRxHeader)); + +static void +_getMsgQueueParams +( + OBJGPU *pGpu, + MESSAGE_QUEUE_INFO *pMQI +) +{ + NvLength queueSize; + NvU32 numPtes; + const NvLength defaultCommandQueueSize = 0x40000; // 256 KB + const NvLength defaultStatusQueueSize = 0x40000; // 256 KB + + if (IS_SILICON(pGpu)) + { + pMQI->commandQueueSize = defaultCommandQueueSize; + pMQI->statusQueueSize = defaultStatusQueueSize; + } + else + { + // + // Pre-silicon platforms need a large command queue in order to send + // the VBIOS image via RPC. + // + pMQI->commandQueueSize = defaultCommandQueueSize * 6; + pMQI->statusQueueSize = defaultStatusQueueSize; + } + + // + // Calculate the number of entries required to map both queues in addition + // to the page table itself. + // + queueSize = pMQI->commandQueueSize + pMQI->statusQueueSize; + NV_ASSERT((queueSize & RM_PAGE_MASK) == 0); + numPtes = (queueSize >> RM_PAGE_SHIFT); + + // Account for the pages needed to store the PTEs + numPtes += NV_DIV_AND_CEIL(numPtes * sizeof(RmPhysAddr), RM_PAGE_SIZE); + + // + // Align the page table size to RM_PAGE_SIZE, so that the command queue is + // aligned. + // + pMQI->pageTableSize = RM_PAGE_ALIGN_UP(numPtes * sizeof(RmPhysAddr)); + pMQI->pageTableEntryCount = numPtes; +} + +/*! + * GspMsgQueueInit + * + * Initialize the command queue for CPU side. + * Must not be called before portInitialize. + */ +NV_STATUS +GspMsgQueueInit +( + OBJGPU *pGpu, + MESSAGE_QUEUE_INFO **ppMQI +) +{ + MESSAGE_QUEUE_INFO *pMQI = NULL; + RmPhysAddr *pPageTbl; + int nRet; + NvP64 pVaKernel; + NvP64 pPrivKernel; + NV_STATUS nvStatus = NV_OK; + NvLength sharedBufSize; + NvLength firstCmdOffset; + NvU32 workAreaSize; + + if (*ppMQI != NULL) + { + NV_PRINTF(LEVEL_ERROR, "GSP message queue was already initialized.\n"); + return NV_ERR_INVALID_STATE; + } + + pMQI = portMemAllocNonPaged(sizeof *pMQI); + if (pMQI == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error allocating queue info area.\n"); + nvStatus = NV_ERR_NO_MEMORY; + goto done; + } + portMemSet(pMQI, 0, sizeof *pMQI); + + _getMsgQueueParams(pGpu, pMQI); + + sharedBufSize = pMQI->pageTableSize + + pMQI->commandQueueSize + + pMQI->statusQueueSize; + + // + // For now, put all shared queue memory in one block. + // + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescCreate(&pMQI->pSharedMemDesc, pGpu, sharedBufSize, + RM_PAGE_SIZE, NV_MEMORY_NONCONTIGUOUS, ADDR_SYSMEM, NV_MEMORY_CACHED, + MEMDESC_FLAGS_NONE), + done); + + memdescSetFlag(pMQI->pSharedMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_TRUE); + + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescAlloc(pMQI->pSharedMemDesc), + error_ret); + + // Create kernel mapping for command queue. + NV_ASSERT_OK_OR_GOTO(nvStatus, + memdescMap(pMQI->pSharedMemDesc, 0, sharedBufSize, + NV_TRUE, NV_PROTECT_WRITEABLE, + &pVaKernel, &pPrivKernel), + error_ret); + + memdescSetKernelMapping(pMQI->pSharedMemDesc, pVaKernel); + memdescSetKernelMappingPriv(pMQI->pSharedMemDesc, pPrivKernel); + + if (pVaKernel == NvP64_NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error allocating message queue shared buffer\n"); + nvStatus = NV_ERR_NO_MEMORY; + goto error_ret; + } + + portMemSet((void *)pVaKernel, 0, sharedBufSize); + + pPageTbl = pVaKernel; + + // Shared memory layout. + // + // Each of the following are page aligned: + // Shared memory layout header (includes page table) + // Command queue header + // Command queue entries + // Status queue header + // Status queue entries + memdescGetPhysAddrs(pMQI->pSharedMemDesc, + AT_GPU, // addressTranslation + 0, // offset + RM_PAGE_SIZE, // stride + pMQI->pageTableEntryCount, // count + pPageTbl); // physical address table + + pMQI->pCommandQueue = NvP64_VALUE( + NvP64_PLUS_OFFSET(pVaKernel, pMQI->pageTableSize)); + + pMQI->pStatusQueue = NvP64_VALUE( + NvP64_PLUS_OFFSET(NV_PTR_TO_NvP64(pMQI->pCommandQueue), pMQI->commandQueueSize)); + + NV_ASSERT(NvP64_PLUS_OFFSET(pVaKernel, sharedBufSize) == + NvP64_PLUS_OFFSET(NV_PTR_TO_NvP64(pMQI->pStatusQueue), pMQI->statusQueueSize)); + + // Allocate work area. + workAreaSize = (1 << GSP_MSG_QUEUE_ELEMENT_ALIGN) + + GSP_MSG_QUEUE_ELEMENT_SIZE_MAX + msgqGetMetaSize(); + pMQI->pWorkArea = portMemAllocNonPaged(workAreaSize); + + if (pMQI->pWorkArea == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error allocating pWorkArea.\n"); + nvStatus = NV_ERR_NO_MEMORY; + goto error_ret; + } + + portMemSet(pMQI->pWorkArea, 0, workAreaSize); + + pMQI->pCmdQueueElement = (GSP_MSG_QUEUE_ELEMENT *) + NV_ALIGN_UP((NvUPtr)pMQI->pWorkArea, 1 << GSP_MSG_QUEUE_ELEMENT_ALIGN); + pMQI->pMetaData = (void *)((NvUPtr)pMQI->pCmdQueueElement + GSP_MSG_QUEUE_ELEMENT_SIZE_MAX); + + nRet = msgqInit(&pMQI->hQueue, pMQI->pMetaData); + if (nRet < 0) + { + NV_PRINTF(LEVEL_ERROR, "msgqInit failed: %d\n", nRet); + nvStatus = NV_ERR_GENERIC; + goto error_ret; + } + + nRet = msgqTxCreate(pMQI->hQueue, + pMQI->pCommandQueue, + pMQI->commandQueueSize, + GSP_MSG_QUEUE_ELEMENT_SIZE_MIN, + GSP_MSG_QUEUE_HEADER_ALIGN, + GSP_MSG_QUEUE_ELEMENT_ALIGN, + MSGQ_FLAGS_SWAP_RX); + if (nRet < 0) + { + NV_PRINTF(LEVEL_ERROR, "msgqTxCreate failed: %d\n", nRet); + nvStatus = NV_ERR_GENERIC; + goto error_ret; + } + + NV_PRINTF(LEVEL_INFO, "Created command queue.\n"); + + *ppMQI = pMQI; + pMQI->sharedMemPA = pPageTbl[0]; + pMQI->pRpcMsgBuf = &pMQI->pCmdQueueElement->rpc; + + firstCmdOffset = pMQI->pageTableSize + GSP_MSG_QUEUE_HEADER_SIZE; + pMQI->pInitMsgBuf = NvP64_PLUS_OFFSET(pVaKernel, firstCmdOffset); + pMQI->initMsgBufPA = pPageTbl[firstCmdOffset >> RM_PAGE_SHIFT] + + (firstCmdOffset & RM_PAGE_MASK); +done: + return nvStatus; + +error_ret: + GspMsgQueueCleanup(&pMQI); + return nvStatus; +} + +NV_STATUS GspStatusQueueInit(OBJGPU *pGpu, MESSAGE_QUEUE_INFO **ppMQI) +{ + NV_STATUS nvStatus = NV_ERR_GENERIC; + int nRet = 0; + int nRetries; + RMTIMEOUT timeout; + NvU32 timeoutUs = 2000000; + NvU32 timeoutFlags = GPU_TIMEOUT_FLAGS_DEFAULT; + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + + // GSP-RM in emulation/simulation environment is extremely slow + if (IS_EMULATION(pGpu) || IS_SIMULATION(pGpu)) + { + // + // Scaling timeoutUs by GSP_SCALE_TIMEOUT_EMU_SIM overflows 32 bits, + // so just max it out instead. + // + timeoutUs = NV_U32_MAX; + + // + // On slower emulators and simulation, the time it takes to link the + // status queue is longer than the thread state timeout, so bypass + // the thread state so our longer timeout applies. + // + timeoutFlags |= GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE; + } + + gpuSetTimeout(pGpu, timeoutUs, &timeout, timeoutFlags); + + // Wait other end of the queue to run msgqInit. Retry for up to 10 ms. + for (nRetries = 0; ; nRetries++) + { + // Link in status queue + portAtomicMemoryFenceFull(); + + nRet = msgqRxLink((*ppMQI)->hQueue, (*ppMQI)->pStatusQueue, + (*ppMQI)->statusQueueSize, GSP_MSG_QUEUE_ELEMENT_SIZE_MIN); + + if (nRet == 0) + { + NV_PRINTF(LEVEL_INFO, "Status queue linked to command queue.\n"); + + // + // If we've bypassed the thread state timeout check for slower + // environments, it will have lapsed by now, so reset it so that + // the next timeout check doesn't fail immediately. + // + if (timeoutFlags & GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE) + threadStateResetTimeout(pGpu); + + return NV_OK; + } + + osSpinLoop(); + + nvStatus = gpuCheckTimeout(pGpu, &timeout); + if (nvStatus != NV_OK) + break; + + kgspDumpGspLogs(pGpu, pKernelGsp, NV_FALSE); + } + + if (nRet < 0) + { + NV_PRINTF(LEVEL_ERROR, + "msgqRxLink failed: %d, nvStatus 0x%08x, retries: %d\n", + nRet, nvStatus, nRetries); + GspMsgQueueCleanup(ppMQI); + } + + return nvStatus; +} + +void GspMsgQueueCleanup(MESSAGE_QUEUE_INFO **ppMQI) +{ + MESSAGE_QUEUE_INFO *pMQI = NULL; + + if ((ppMQI == NULL) || (*ppMQI == NULL)) + return; + + pMQI = *ppMQI; + pMQI->hQueue = NULL; + + if (pMQI->pWorkArea != NULL) + { + portMemFree(pMQI->pWorkArea); + pMQI->pWorkArea = NULL; + pMQI->pCmdQueueElement = NULL; + pMQI->pMetaData = NULL; + } + + if (pMQI->pSharedMemDesc != NULL) + { + NvP64 pVaKernel = memdescGetKernelMapping(pMQI->pSharedMemDesc); + NvP64 pPrivKernel = memdescGetKernelMappingPriv(pMQI->pSharedMemDesc); + + // Destroy kernel mapping for command queue. + if (pVaKernel != 0) + { + memdescUnmap(pMQI->pSharedMemDesc, NV_TRUE, osGetCurrentProcess(), + pVaKernel, pPrivKernel); + } + + // Free command queue memory. + memdescFree(pMQI->pSharedMemDesc); + memdescDestroy(pMQI->pSharedMemDesc); + pMQI->pSharedMemDesc = NULL; + } + + portMemFree(pMQI); + *ppMQI = NULL; +} + +/*! + * Calculate 32-bit checksum + * + * This routine assumes that the data is padded out with zeros to the next + * 8-byte alignment, and it is OK to read past the end to the 8-byte alignment. + */ +static NV_INLINE NvU32 _checkSum32(void *pData, NvU32 uLen) +{ + NvU64 *p = (NvU64 *)pData; + NvU64 *pEnd = (NvU64 *)((NvUPtr)pData + uLen); + NvU64 checkSum = 0; + + while (p < pEnd) + checkSum ^= *p++; + + return NvU64_HI32(checkSum) ^ NvU64_LO32(checkSum); +} + +/*! + * GspMsgQueueSendCommand + * + * Move a command record from our staging area to the command queue. + * + * Returns + * NV_OK - Record sucessfully sent. + * NV_ERR_INVALID_PARAM_STRUCT - Bad record length. + * NV_ERR_BUSY_RETRY - No space in the queue. + * NV_ERR_INVALID_STATE - Something really bad happenned. + */ +NV_STATUS GspMsgQueueSendCommand(MESSAGE_QUEUE_INFO *pMQI, OBJGPU *pGpu) +{ + GSP_MSG_QUEUE_ELEMENT *pCQE = pMQI->pCmdQueueElement; + NvU8 *pSrc = (NvU8 *)pCQE; + NvU8 *pNextElement = NULL; + int nRet; + int i; + int nRetries; + int nElements; + RMTIMEOUT timeout; + NV_STATUS nvStatus = NV_OK; + NvU32 uElementSize = GSP_MSG_QUEUE_ELEMENT_HDR_SIZE + + pMQI->pCmdQueueElement->rpc.length; + + if ((uElementSize < sizeof(GSP_MSG_QUEUE_ELEMENT)) || + (uElementSize > GSP_MSG_QUEUE_ELEMENT_SIZE_MAX)) + { + NV_PRINTF(LEVEL_ERROR, "Incorrect length %u\n", + pMQI->pCmdQueueElement->rpc.length); + nvStatus = NV_ERR_INVALID_PARAM_STRUCT; + goto done; + } + + // Make sure the queue element in our working space is zero padded for checksum. + if ((uElementSize & 7) != 0) + portMemSet(pSrc + uElementSize, 0, 8 - (uElementSize & 7)); + + pCQE->seqNum = pMQI->txSeqNum++; + pCQE->checkSum = 0; + pCQE->checkSum = _checkSum32(pSrc, uElementSize); + + nElements = GSP_MSG_QUEUE_BYTES_TO_ELEMENTS(uElementSize); + + for (i = 0; i < nElements; i++) + { + // Set a timeout of 1 sec + gpuSetTimeout(pGpu, 1000000, &timeout, 0); + + // Wait for space to put the next element. Retry for up to 10 ms. + for (nRetries = 0; ; nRetries++) + { + // Must get the buffers one at a time, since they could wrap. + pNextElement = (NvU8 *)msgqTxGetWriteBuffer(pMQI->hQueue, i); + + if (pNextElement != NULL) + break; + + if (gpuCheckTimeout(pGpu, &timeout) != NV_OK) + break; + + portAtomicMemoryFenceFull(); + + osSpinLoop(); + } + + if (pNextElement == NULL) + { + NV_PRINTF(LEVEL_ERROR, "buffer is full\n"); + nvStatus = NV_ERR_BUSY_RETRY; + goto done; + } + + portMemCopy(pNextElement, GSP_MSG_QUEUE_ELEMENT_SIZE_MIN, + pSrc, GSP_MSG_QUEUE_ELEMENT_SIZE_MIN); + pSrc += GSP_MSG_QUEUE_ELEMENT_SIZE_MIN; + } + + // + // If write after write (WAW) memory ordering is relaxed in a CPU, then + // it's possible that below msgq update reaches memory first followed by + // above portMemCopy data. This is an issue for GSP RM which will read + // incorrect data because msgq was updated first. This is a typical + // example of producer consumer problem in memory ordering world. Hence, + // a store fence is needed here. + // + portAtomicMemoryFenceStore(); + + nRet = msgqTxSubmitBuffers(pMQI->hQueue, nElements); + + if (nRet != 0) + { + NV_PRINTF(LEVEL_ERROR, "msgqTxSubmitBuffers failed: %d\n", nRet); + nvStatus = NV_ERR_INVALID_STATE; + goto done; + } + + nvStatus = NV_OK; + +done: + return nvStatus; +} + +/*! + * GspMsgQueueReceiveStatus + * + * Get a status record from the GSP and move it from the rx queue to our + * staging area. + * + * Returns + * NV_OK - Record sucessfully read. + * NV_ERR_INVALID_PARAM_STRUCT - Bad record length. + * NV_ERR_NOT_READY - Partial read. + * NV_ERR_INVALID_STATE - Something really bad happenned. + */ +NV_STATUS GspMsgQueueReceiveStatus(MESSAGE_QUEUE_INFO *pMQI) +{ + const NvU8 *pNextElement = NULL; + NvU8 *pTgt = (NvU8 *)pMQI->pCmdQueueElement; + int nRet; + int i; + int nRetries; + int nElements = 1; // Assume record fits in one 256-byte queue element for now. + NvU32 uElementSize = 0; + NV_STATUS nvStatus = NV_OK; + + for (nRetries = 0; nRetries < 3; nRetries++) + { + pTgt = (NvU8 *)pMQI->pCmdQueueElement; + nvStatus = NV_OK; + nElements = 1; // Assume record fits in one 256-byte queue element for now. + + for (i = 0; i < nElements; i++) + { + // Get the pointer to the next queue element. + pNextElement = msgqRxGetReadBuffer(pMQI->hQueue, i); + if (pNextElement == NULL) + { + // Early exit if this is the first read and there is no data. + if (i == 0) + return NV_WARN_NOTHING_TO_DO; + + // + // We already successfully read part of the record, so we are here + // because the data is in flight (no fence) or the length was wrong. + // + NV_PRINTF(LEVEL_ERROR, "Incomplete read.\n"); + nvStatus = NV_ERR_NOT_READY; + break; + } + + // Copy the next element to our staging area. + portMemCopy(pTgt, GSP_MSG_QUEUE_ELEMENT_SIZE_MIN, + pNextElement, GSP_MSG_QUEUE_ELEMENT_SIZE_MIN); + pTgt += GSP_MSG_QUEUE_ELEMENT_SIZE_MIN; + + if (i != 0) + continue; + + // + // Special processing for first element of the record. + // Pull out the length and make sure it is valid. + // + uElementSize = GSP_MSG_QUEUE_ELEMENT_HDR_SIZE + + pMQI->pCmdQueueElement->rpc.length; + + if ((uElementSize < sizeof(GSP_MSG_QUEUE_ELEMENT)) || + (uElementSize > GSP_MSG_QUEUE_ELEMENT_SIZE_MAX)) + { + // The length is not valid. If we are running without a fence, + // this could mean that the data is still in flight from the CPU. + NV_PRINTF(LEVEL_ERROR, "Incorrect length %u\n", + pMQI->pCmdQueueElement->rpc.length); + nvStatus = NV_ERR_INVALID_PARAM_STRUCT; + break; + } + + // This adjusts the loop condition. + nElements = GSP_MSG_QUEUE_BYTES_TO_ELEMENTS(uElementSize); + } + + // Retry if there was an error. + if (nvStatus != NV_OK) + continue; + + // Retry if checksum fails. + if (_checkSum32(pMQI->pCmdQueueElement, uElementSize) != 0) + { + NV_PRINTF(LEVEL_ERROR, "Bad checksum.\n"); + nvStatus = NV_ERR_INVALID_DATA; + continue; + } + + // Retry if sequence number is wrong. + if (pMQI->pCmdQueueElement->seqNum != pMQI->rxSeqNum) + + { + NV_PRINTF(LEVEL_ERROR, "Bad sequence number. Expected %u got %u.\n", + pMQI->rxSeqNum, pMQI->pCmdQueueElement->seqNum); + nvStatus = NV_ERR_INVALID_DATA; + continue; + } + + // We have the whole record, so break out of the retry loop. + break; + } + + if (nvStatus == NV_OK) + { + pMQI->rxSeqNum++; + + nRet = msgqRxMarkConsumed(pMQI->hQueue, nElements); + if (nRet < 0) + { + NV_PRINTF(LEVEL_ERROR, "msgqRxMarkConsumed failed: %d\n", nRet); + nvStatus = NV_ERR_GENERIC; + } + } + + if (nRetries > 0) + { + if (nvStatus == NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Read succeeded with %d retries.\n", nRetries); + } + else + { + NV_PRINTF(LEVEL_ERROR, "Read failed after %d retries.\n", nRetries); + } + } + + return nvStatus; +} + diff --git a/src/nvidia/src/kernel/gpu/host_eng/host_eng.c b/src/nvidia/src/kernel/gpu/host_eng/host_eng.c new file mode 100644 index 000000000..6a0fe42d4 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/host_eng/host_eng.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/prelude.h" +#include "kernel/gpu/host_eng/host_eng.h" +#include "core/hal.h" +#include "core/info_block.h" +#include "os/os.h" + +/*! + * @brief generic host engine halt + */ +NV_STATUS +hostengHaltEngine_IMPL +( + OBJGPU *pGpu, + OBJHOSTENG *pHosteng +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief generic host engine error reset initialization + */ +NV_STATUS +hostengHaltAndReset_IMPL +( + OBJGPU *pGpu, + OBJHOSTENG *pHosteng, + RMTIMEOUT *pTimeout +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief generic host engine reset + */ +NV_STATUS +hostengReset_IMPL +( + OBJGPU *pGpu, + OBJHOSTENG *pHosteng, + NvBool bReload, + KernelChannel *pKernelChannel, + KernelChannel **ppCurrentKernelChannel +) +{ + return NV_ERR_NOT_SUPPORTED; +} diff --git a/src/nvidia/src/kernel/gpu/hwpm/profiler_v1/kern_profiler_v1.c b/src/nvidia/src/kernel/gpu/hwpm/profiler_v1/kern_profiler_v1.c new file mode 100644 index 000000000..13fcab7ec --- /dev/null +++ b/src/nvidia/src/kernel/gpu/hwpm/profiler_v1/kern_profiler_v1.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu/hwpm/profiler_v1.h" + +NV_STATUS +profilerConstruct_IMPL +( + Profiler *pProfiler, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + if (profilerIsProfilingPermitted_HAL(pProfiler)) + { + return profilerConstructState_HAL(pProfiler, pCallContext, pParams); + } + + return NV_ERR_INSUFFICIENT_PERMISSIONS; +} + +NvBool +profilerIsProfilingPermitted_IMPL +( + Profiler *pProfiler +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pProfiler); + + if (gpuIsRmProfilingPrivileged(pGpu) && !osIsAdministrator()) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NV_STATUS profilerControl_IMPL +( + Profiler *pProfiler, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, profilerControlHwpmSupported_HAL(pProfiler, pParams)); + + return gpuresControl_IMPL(staticCast(pProfiler, GpuResource), + pCallContext, pParams); +} diff --git a/src/nvidia/src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2.c b/src/nvidia/src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2.c new file mode 100644 index 000000000..0bf852603 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2.c @@ -0,0 +1,238 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/hwpm/profiler_v2.h" +#include "vgpu/rpc.h" + +static NV_INLINE NvBool +_isDeviceProfilingPermitted(OBJGPU *pGpu, ProfilerBase *pProf, API_SECURITY_INFO *pSecInfo) +{ + if (pSecInfo->privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + return NV_TRUE; + } + + if (!gpuIsRmProfilingPrivileged(pGpu)) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +static NvBool +_isMemoryProfilingPermitted(OBJGPU *pGpu, ProfilerBase *pProf, NvHandle hClient) +{ + NvBool bSmcGpuPartitioningEnabled = IS_MIG_IN_USE(pGpu); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + if (bSmcGpuPartitioningEnabled && !kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient)) + { + MIG_INSTANCE_REF ref; + + if (kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref) != NV_OK) + return NV_FALSE; + + if (!kmigmgrIsMIGReferenceValid(&ref)) + return NV_FALSE; + + NV_ASSERT_OR_RETURN((ref.pKernelMIGGpuInstance != NULL) && (ref.pMIGComputeInstance != NULL), NV_FALSE); + return (ref.pKernelMIGGpuInstance->resourceAllocation.gpcCount == + ref.pMIGComputeInstance->resourceAllocation.gpcCount); + + } + + return NV_TRUE; +} + +NV_STATUS +profilerBaseConstruct_IMPL +( + ProfilerBase *pProf, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return profilerBaseConstructState_HAL(pProf, pCallContext, pParams); +} + +void +profilerBaseDestruct_IMPL +( + ProfilerBase *pProf +) +{ + profilerBaseDestructState_HAL(pProf); +} + +NV_STATUS +profilerDevConstruct_IMPL +( + ProfilerDev *pProfDev, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + PROFILER_CLIENT_PERMISSIONS clientPermissions; + + if (!profilerDevQueryCapabilities_HAL(pProfDev, pCallContext, pParams, + &clientPermissions)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return profilerDevConstructState_HAL(pProfDev, pCallContext, pParams, clientPermissions); +} + +NvBool +profilerDevQueryCapabilities_IMPL +( + ProfilerDev *pProfDev, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + PROFILER_CLIENT_PERMISSIONS *pClientPermissions +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pProfDev); + ProfilerBase *pProfBase = staticCast(pProfDev, ProfilerBase); + API_SECURITY_INFO *pSecInfo = pParams->pSecInfo; + NvHandle hClient = pCallContext->pClient->hClient; + NvBool bAnyProfilingPermitted = NV_FALSE; + + pClientPermissions->bMemoryProfilingPermitted = + _isMemoryProfilingPermitted(pGpu, pProfBase, hClient); + + if (pSecInfo->privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + bAnyProfilingPermitted = NV_TRUE; + pClientPermissions->bAdminProfilingPermitted = NV_TRUE; + } + + pClientPermissions->bDevProfilingPermitted = + _isDeviceProfilingPermitted(pGpu, pProfBase, pSecInfo); + + if (pClientPermissions->bDevProfilingPermitted) + bAnyProfilingPermitted = NV_TRUE; + + return bAnyProfilingPermitted; +} + +NV_STATUS +profilerDevConstructState_IMPL +( + ProfilerDev *pProfDev, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, + PROFILER_CLIENT_PERMISSIONS clientPermissions +) +{ + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, profilerDevConstructStatePrologue_HAL(pProfDev, + pCallContext, pAllocParams)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, profilerDevConstructStateInterlude_HAL(pProfDev, + pCallContext, pAllocParams, clientPermissions)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, profilerDevConstructStateEpilogue_HAL(pProfDev, + pCallContext, pAllocParams)); + + return NV_OK; +} + +NV_STATUS +profilerDevConstructStatePrologue_FWCLIENT +( + ProfilerDev *pProfDev, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pProfDev); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pProfDev); + NvHandle hParent = RES_GET_PARENT_HANDLE(pProfDev); + NvHandle hObject = RES_GET_HANDLE(pProfDev); + NvU32 class = RES_GET_EXT_CLASS_ID(pProfDev); + NV_STATUS status = NV_OK; + + NV_RM_RPC_ALLOC_OBJECT(pGpu, hClient, hParent, hObject, class, + pAllocParams->pAllocParams, status); + + return status; +} + +NV_STATUS +profilerDevConstructStateInterlude_IMPL +( + ProfilerDev *pProfDev, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, + PROFILER_CLIENT_PERMISSIONS clientPermissions +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pProfDev); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pProfDev); + NvHandle hObject = RES_GET_HANDLE(pProfDev); + + NVB0CC_CTRL_INTERNAL_PERMISSIONS_INIT_PARAMS params = {0}; + + params.bDevProfilingPermitted = clientPermissions.bDevProfilingPermitted; + params.bAdminProfilingPermitted = clientPermissions.bAdminProfilingPermitted; + params.bMemoryProfilingPermitted = clientPermissions.bMemoryProfilingPermitted; + + return pRmApi->Control(pRmApi, + hClient, + hObject, + NVB0CC_CTRL_CMD_INTERNAL_PERMISSIONS_INIT, + ¶ms, sizeof(params)); +} + +void +profilerDevDestruct_IMPL +( + ProfilerDev *pProfDev +) +{ + profilerDevDestructState_HAL(pProfDev); +} + +void +profilerDevDestructState_FWCLIENT +( + ProfilerDev *pProfDev +) +{ + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + CALL_CONTEXT *pCallContext; + OBJGPU *pGpu = GPU_RES_GET_GPU(pProfDev); + NV_STATUS status = NV_OK; + + resGetFreeParams(staticCast(pProfDev, RsResource), &pCallContext, &pParams); + hClient = pCallContext->pClient->hClient; + hParent = pCallContext->pResourceRef->pParentRef->hResource; + hObject = pCallContext->pResourceRef->hResource; + + NV_RM_RPC_FREE(pGpu, hClient, hParent, hObject, status); +} diff --git a/src/nvidia/src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2_ctrl.c b/src/nvidia/src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2_ctrl.c new file mode 100644 index 000000000..712d9d6f9 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2_ctrl.c @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "rmapi/rs_utils.h" +#include "gpu/hwpm/profiler_v2.h" +#include "ctrl/ctrlb0cc/ctrlb0ccinternal.h" + +NV_STATUS +profilerBaseCtrlCmdAllocPmaStream_IMPL +( + ProfilerBase *pProfiler, + NVB0CC_CTRL_ALLOC_PMA_STREAM_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pProfiler); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pProfiler); + NvHandle hParent = RES_GET_PARENT_HANDLE(pProfiler); + NvHandle hObject = RES_GET_HANDLE(pProfiler); + NvBool bMemPmaBufferRegistered = NV_FALSE; + NvBool bMemPmaBytesAvailableRegistered = NV_FALSE; + + // + // REGISTER MEMDESCs TO GSP + // These are no-op with BareMetal/No GSP + // + status = memdescRegisterToGSP(pGpu, hClient, hParent, pParams->hMemPmaBuffer); + if (status != NV_OK) + { + goto fail; + } + bMemPmaBufferRegistered = NV_TRUE; + + status = memdescRegisterToGSP(pGpu, hClient, hParent, pParams->hMemPmaBytesAvailable); + if (status != NV_OK) + { + goto fail; + } + bMemPmaBytesAvailableRegistered = NV_TRUE; + + // + // With BareMetal/No GSP: this control is a direct call to + // profilerBaseCtrlCmdInternalReleaseHwpmLegacy_IMPL + // + status = pRmApi->Control(pRmApi, + hClient, + hObject, + NVB0CC_CTRL_CMD_INTERNAL_ALLOC_PMA_STREAM, + pParams, sizeof(*pParams)); + if (status != NV_OK) + { + goto fail; + } + + return status; + +fail: + if (bMemPmaBufferRegistered) + { + // These are no-op with BareMetal/No GSP + NV_ASSERT_OK(memdescDeregisterFromGSP(pGpu, hClient, hParent, pParams->hMemPmaBuffer)); + } + + if (bMemPmaBytesAvailableRegistered) + { + // These are no-op with BareMetal/No GSP + NV_ASSERT_OK(memdescDeregisterFromGSP(pGpu, hClient, hParent, pParams->hMemPmaBytesAvailable)); + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/i2c/i2c_api.c b/src/nvidia/src/kernel/gpu/i2c/i2c_api.c new file mode 100644 index 000000000..e4b587620 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/i2c/i2c_api.c @@ -0,0 +1,43 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/i2c/i2c_api.h" + +NV_STATUS +i2capiConstruct_IMPL +( + I2cApi *pI2cApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +i2capiDestruct_IMPL +( + I2cApi *pI2cApi +) +{ +} diff --git a/src/nvidia/src/kernel/gpu/intr/arch/ampere/intr_cpu_ga102.c b/src/nvidia/src/kernel/gpu/intr/arch/ampere/intr_cpu_ga102.c new file mode 100644 index 000000000..4e2ad3fc4 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/arch/ampere/intr_cpu_ga102.c @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/intr.h" +#include "gpu/gpu.h" + +#include "published/ampere/ga102/dev_vm.h" + +// +// Ampere HAL routines that access NV_VIRTUAL_FUNCTION_* registers. +// Every function in this file needs to have a GSP equivalent +// accessing NV_GSP_INTR_* registers. +// + +NvU32 +intrReadRegTopEnSet_GA102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + THREAD_STATE_NODE *pThreadState +) +{ + return GPU_VREG_RD32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET(regIndex), + pThreadState); +} + +void +intrWriteRegTopEnSet_GA102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + NvU32 value, + THREAD_STATE_NODE *pThreadState +) +{ + GPU_VREG_WR32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET(regIndex), + value, + pThreadState); +} + +void +intrWriteRegTopEnClear_GA102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + NvU32 value, + THREAD_STATE_NODE *pThreadState +) +{ + GPU_VREG_WR32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR(regIndex), + value, + pThreadState); +} diff --git a/src/nvidia/src/kernel/gpu/intr/arch/ampere/intr_ga100.c b/src/nvidia/src/kernel/gpu/intr/arch/ampere/intr_ga100.c new file mode 100644 index 000000000..d26fd4bc3 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/arch/ampere/intr_ga100.c @@ -0,0 +1,168 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/intr.h" +#include "gpu/gpu.h" +#include "objtmr.h" +#include "os/os.h" + +#include "published/ampere/ga100/dev_vm.h" +#include "published/ampere/ga100/dev_vm_addendum.h" + +// +// intrga100.c - HAL routines for Ampere interrupt object +// + +/*! + * @brief Returns a 64 bit mask, where all the bits set to 0 are the ones we + * intend to leave enabled in the client shared subtree even when we disable + * interrupts (for example, when we take the GPU lock). + * + * The non-replayable fault interrupt is shared with the client, and in the + * top half of the interrupt handler, as such, we only copy fault packets from + * the HW buffer to the appropriate SW buffers. + * + * The timer interrupt also does not need to be blocked by the GPU lock + * since SWRL granular locking requires the timer interrupt to be serviced + * outside the GPU lock. + * Note - While we keep the timer interrupt enabled during the GPU lock, + * we don't enable it in the PTIMER level when SWRL granular locking is disabled. + */ +NvU64 +intrGetUvmSharedLeafEnDisableMask_GA100 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NvU32 intrVectorNonReplayableFault; + NvU32 intrVectorTimerSwrl = NV_INTR_VECTOR_INVALID; + NvU64 mask = 0; + + // GSP RM services both MMU non-replayable fault and FIFO interrupts + if (IS_GSP_CLIENT(pGpu)) + { + return ~mask; + } + + intrVectorNonReplayableFault = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_NON_REPLAYABLE_FAULT, NV_FALSE); + + if (!IS_VIRTUAL(pGpu)) + { + intrVectorTimerSwrl = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_TMR_SWRL, NV_FALSE); + } + + if (intrVectorTimerSwrl != NV_INTR_VECTOR_INVALID) + { + // Ascertain that they're in the same subtree and same leaf + NV_ASSERT(NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(intrVectorNonReplayableFault) == + NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(intrVectorTimerSwrl)); + NV_ASSERT(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVectorNonReplayableFault) == + NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVectorTimerSwrl)); + } + + // Ascertain that they're in the first leaf + NV_ASSERT(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVectorNonReplayableFault) == + NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(NV_CPU_INTR_UVM_SHARED_SUBTREE_START)); + + // + // Compile-time ascertain that we only have 1 client subtree (we assume + // this since we cache only 64 bits). + // + ct_assert(NV_CPU_INTR_UVM_SHARED_SUBTREE_START == NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST); + + // + // Compile-time ascertain that we only have 2 subtrees as this is what we currently support + // by only caching 64 bits + // + ct_assert((NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST) - 1) == + NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(NV_CPU_INTR_UVM_SHARED_SUBTREE_START)); + + mask = NVBIT32(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVectorNonReplayableFault)); + + if (intrVectorTimerSwrl != NV_INTR_VECTOR_INVALID) + { + mask |= NVBIT32(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVectorTimerSwrl)); + } + + mask <<= 32; + + return ~mask; +} + +/*! + * @brief Sanity check that the given stall engine interrupt vector is in the right tree + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] vector stall interrupt vector + */ +void +intrSanityCheckEngineIntrStallVector_GA100 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 vector, + NvU16 mcEngine +) +{ + // + // The leafIndex must be within the engine stall tree: leaf 6 on Ampere. + // Try to catch this on pre-release drivers. Don't need this on release drivers since this + // is only to catch issues during code development. Should never happen in practice. + // + if (NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(vector) != 6) + { + NV_PRINTF(LEVEL_ERROR, "MC_ENGINE_IDX %u has invalid stall intr vector %u\n", mcEngine, vector); + DBG_BREAKPOINT(); + } +} + +/*! + * @brief Sanity check that the given notification engine interrupt vector is in the right tree + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] vector stall interrupt vector + */ +void +intrSanityCheckEngineIntrNotificationVector_GA100 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 vector, + NvU16 mcEngine +) +{ + // + // The leafIndex must be within the engine notification tree: leaf 0 on Ampere. + // Try to catch this on pre-release drivers. Don't need this on release drivers since this + // is only to catch issues during code development. Should never happen in practice. + // + if(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(vector) != 0) + { + NV_PRINTF(LEVEL_ERROR, "MC_ENGINE_IDX %u has invalid notification intr vector %u\n", mcEngine, vector); + DBG_BREAKPOINT(); + } +} diff --git a/src/nvidia/src/kernel/gpu/intr/arch/maxwell/intr_gm107.c b/src/nvidia/src/kernel/gpu/intr/arch/maxwell/intr_gm107.c new file mode 100644 index 000000000..4cc40bd71 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/arch/maxwell/intr_gm107.c @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/intr.h" +#include "gpu/gpu.h" +#include "core/locks.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "gpu/gsp/kernel_gsp.h" +#include "resserv/rs_server.h" +#include "vgpu/vgpu_events.h" +#include "gpu/disp/kern_disp.h" + +#include "published/maxwell/gm107/dev_boot.h" + +/** + * @brief Returns a bitfield with the MC_ENGINES that have pending interrupts + */ +NV_STATUS +intrGetPendingStall_GM107 +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngines, + THREAD_STATE_NODE *pThreadState +) +{ + NvU8 i; + + NV_ASSERT_OR_RETURN(pEngines != NULL, NV_ERR_INVALID_ARGUMENT); + + bitVectorClrAll(pEngines); + + // + // If the GPU is in GC6 (aka powered down or rail-gated state), return + // early from this routine. We don't want to touch any GPU registers while + // its powered off as it will hang the system. + // + // If the GPU has fallen off the bus, there are obviously no interrupts + // pending that we can do anything about, but attempting to read a status + // register might indicate otherwise (returning 0xffffffff). Its better to do a GPU + // sanity check after we read the status register and bail out if necessary. + // + if (IS_GPU_GC6_STATE_ENTERED(pGpu)) + { + return NV_ERR_GPU_NOT_FULL_POWER; + } + + NV_ASSERT_OK_OR_RETURN(intrGetPendingStallEngines_HAL(pGpu, pIntr, pEngines, pThreadState)); + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + return NV_ERR_GPU_IS_LOST; + } + + if (IS_VIRTUAL(pGpu) && vgpuGetPendingEvent(pGpu, pThreadState)) + bitVectorSet(pEngines, MC_ENGINE_IDX_VGPU); + + if (pGpu->fecsCtxswLogConsumerCount > 0) + { + // + // WARNING: This loop must not call any GR HALs or + // access any PGRAPH registers + // + for (i = 0; i < GPU_MAX_GRS; i++) + { + KernelGraphics *pKernelGraphics = pGpu->pKernelGraphics[i]; + if ((pKernelGraphics != NULL) && + kgraphicsIsIntrDrivenCtxswLoggingEnabled(pGpu, pKernelGraphics) && + fecsIsIntrPending(pGpu, pKernelGraphics)) + { + bitVectorSet(pEngines, MC_ENGINE_IDX_GRn_FECS_LOG(i)); + } + } + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/intr/arch/pascal/intr_gp100.c b/src/nvidia/src/kernel/gpu/intr/arch/pascal/intr_gp100.c new file mode 100644 index 000000000..e9bf9da3f --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/arch/pascal/intr_gp100.c @@ -0,0 +1,264 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/intr.h" +#include "gpu/gpu.h" +#include "core/thread_state.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "objtmr.h" +#include "vgpu/vgpu_events.h" + +#include "published/pascal/gp100/dev_boot.h" + +static void +_intrSetIntrEnInHw_GP100 +( + OBJGPU *pGpu, + Intr *pIntr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 pmcIntrEnSet; + NvU32 pmcIntrEnClear; + // + // If the GPU is in GC6 (aka powered down or rail-gated state), return + // early from this routine. We don't want to touch any GPU registers while + // its powered off as it will hang the system. + // + if (IS_GPU_GC6_STATE_ENTERED(pGpu)) + { + return; + } + + // Only set the mask interrupt line and clear the rest all interrupt lines. + pmcIntrEnSet = pIntr->intrCachedEnSet & pIntr->intrMask.cached; + pmcIntrEnClear = pIntr->intrCachedEnClear | (~pIntr->intrMask.cached); + + // Mask the leaf level interrupts for cases where top PMC intr is not toggeled + intrSetHubLeafIntr_HAL(pGpu, pIntr, pIntr->intrCachedEn0, &pmcIntrEnClear, &pmcIntrEnSet, pThreadState); + + GPU_REG_WR32_EX(pGpu, NV_PMC_INTR_EN_CLEAR(0), pmcIntrEnClear, pThreadState); + GPU_REG_WR32_EX(pGpu, NV_PMC_INTR_EN_SET(0), pmcIntrEnSet, pThreadState); +} + +/** + * @brief Write the top level intr enable 0 state to hardware + * Also Set HUB leaf interrupts if supported according to + * RM owned mask. + * @param[in] pGpu + * @param[in] pIntr + * @param[in] flags Select BROADCAST (default) or UNICAST + * @param[in] intrEn0 The value to write + * @param[in] pThreadState thread state node pointer + */ +void +intrSetIntrEnInHw_GP100 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 intrEn0, + THREAD_STATE_NODE *pThreadState +) +{ + NV_ASSERT(intrEn0 <= INTERRUPT_TYPE_MAX); + + pIntr->intrCachedEn0 = intrEn0; + + intrEncodeStallIntrEn_HAL(pGpu, pIntr, intrEn0, &pIntr->intrCachedEnSet, &pIntr->intrCachedEnClear); + + _intrSetIntrEnInHw_GP100(pGpu, pIntr, pThreadState); +} + +NV_STATUS +intrSetIntrMask_GP100 +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngMask, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 intrMask = 0; + + if (bitVectorTestAllSet(pEngMask)) + { + intrMask = INTERRUPT_MASK_ENABLED; + } + else if (bitVectorTestAllCleared(pEngMask)) + { + intrMask = INTERRUPT_MASK_DISABLED; + } + else + { + intrMask = intrConvertEngineMaskToPmcIntrMask(pGpu, pIntr, pEngMask); + } + + pIntr->intrMask.cached = intrMask; + + _intrSetIntrEnInHw_GP100(pGpu, pIntr, pThreadState); + return NV_OK; +} + +/** + * @brief Get the top level intr enable 0 state from hardware + * @param[in] pGpu + * @param[in] pIntr + */ +NvU32 +intrGetIntrEnFromHw_GP100 +( + OBJGPU *pGpu, + Intr *pIntr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 intrEn; + // + // If the GPU is in GC6 (aka powered down or rail-gated state), return + // early from this routine. We don't want to touch any GPU registers while + // its powered off as it will hang the system. + // + if (IS_GPU_GC6_STATE_ENTERED(pGpu)) + { + return INTERRUPT_TYPE_DISABLED; + } + intrEn = GPU_REG_RD32_EX(pGpu, NV_PMC_INTR_EN(0), pThreadState); + return intrDecodeStallIntrEn_HAL(pGpu, pIntr, intrEn); +} + +NV_STATUS +intrGetIntrMask_GP100 +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngMask, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 intrMask = pIntr->intrMask.cached; + + if (intrMask == 0x0) + { + bitVectorClrAll(pEngMask); + } + else if (intrMask == INTERRUPT_MASK_ENABLED) + { + bitVectorSetAll(pEngMask); + } + else + { + intrConvertPmcIntrMaskToEngineMask(pGpu, pIntr, intrMask, pEngMask); + } + + return NV_OK; +} + +/** + * @brief Returns a bitfield with the MC_ENGINES that have pending interrupts + */ +NV_STATUS +intrGetPendingStall_GP100 +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngines, + THREAD_STATE_NODE *pThreadState +) +{ + extern NV_STATUS intrGetPendingStall_GM107(OBJGPU *pGpu, Intr *pIntr, MC_ENGINE_BITVECTOR *pEngines, THREAD_STATE_NODE *pThreadState); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + MC_ENGINE_BITVECTOR pendingEngines; + + NV_ASSERT_OK_OR_RETURN(intrGetPendingStall_GM107(pGpu, pIntr, pEngines, pThreadState)); + + if (pTmr) + { + NvU32 retVal; + + tmrGetIntrStatus_HAL(pGpu, pTmr, &retVal, pThreadState); + if (retVal != 0) + { + bitVectorSet(pEngines, MC_ENGINE_IDX_TMR); + } + } + + intrGetGmmuInterrupts(pGpu, pIntr, &pendingEngines, pThreadState); + bitVectorOr(pEngines, pEngines, &pendingEngines); + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + return NV_ERR_GPU_IS_LOST; + } + + return NV_OK; +} + +/*! + * @brief Encode the interrupt mode to be able to write it out to HW + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] intrEn the enable value to encode + * @param[out] pIntrEnSet the value to write to the set register + * @param[out] pIntrEnClear the value to write to the clear register + * + */ +void +intrEncodeStallIntrEn_GP100 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 intrEn, + NvU32 *pIntrEnSet, + NvU32 *pIntrEnClear +) +{ + if (intrEn == INTERRUPT_TYPE_SOFTWARE) + { + *pIntrEnSet = INTERRUPT_MASK_SOFTWARE; + *pIntrEnClear = INTERRUPT_MASK_HARDWARE; + } + else if (intrEn == INTERRUPT_TYPE_HARDWARE) + { + *pIntrEnSet = INTERRUPT_MASK_HARDWARE; + *pIntrEnClear = INTERRUPT_MASK_SOFTWARE; + } + else if (intrEn == INTERRUPT_TYPE_DISABLED) + { + *pIntrEnSet = INTERRUPT_MASK_DISABLED; + *pIntrEnClear = INTERRUPT_MASK_ENABLED; + } + else + { + *pIntrEnSet = INTERRUPT_MASK_ENABLED; + *pIntrEnClear = INTERRUPT_MASK_DISABLED; + } + + // Only toggle those interrupts that RM owns. + if (pGpu->pmcRmOwnsIntrMask != 0) + { + *pIntrEnSet &= pGpu->pmcRmOwnsIntrMask; + *pIntrEnClear &= pGpu->pmcRmOwnsIntrMask; + } +} diff --git a/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_cpu_tu102.c b/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_cpu_tu102.c new file mode 100644 index 000000000..40fabfec1 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_cpu_tu102.c @@ -0,0 +1,170 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/intr.h" +#include "gpu/gpu.h" + +#include "published/turing/tu102/dev_vm.h" + +// +// Turing HAL routines that access NV_VIRTUAL_FUNCTION_* registers. +// Every function in this file needs to have a GSP equivalent +// accessing NV_GSP_INTR_* registers. +// + +NvU32 +intrReadRegLeafEnSet_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + THREAD_STATE_NODE *pThreadState +) +{ + return GPU_VREG_RD32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET(regIndex), + pThreadState); +} + +NvU32 +intrReadRegLeaf_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + THREAD_STATE_NODE *pThreadState +) +{ + return GPU_VREG_RD32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(regIndex), + pThreadState); +} + +NvU32 +intrReadRegTopEnSet_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + THREAD_STATE_NODE *pThreadState +) +{ + return GPU_VREG_RD32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET(regIndex), + pThreadState); +} + +NvU32 +intrReadRegTop_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + THREAD_STATE_NODE *pThreadState +) +{ + return GPU_VREG_RD32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP(regIndex), + pThreadState); +} + +void +intrWriteRegLeafEnSet_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + NvU32 value, + THREAD_STATE_NODE *pThreadState +) +{ + GPU_VREG_WR32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET(regIndex), + value, + pThreadState); +} + +void +intrWriteRegLeafEnClear_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + NvU32 value, + THREAD_STATE_NODE *pThreadState +) +{ + GPU_VREG_WR32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR(regIndex), + value, + pThreadState); +} + +void +intrWriteRegLeaf_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + NvU32 value, + THREAD_STATE_NODE *pThreadState +) +{ + GPU_VREG_WR32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(regIndex), + value, + pThreadState); +} + +void +intrWriteRegTopEnSet_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + NvU32 value, + THREAD_STATE_NODE *pThreadState +) +{ + GPU_VREG_WR32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET(regIndex), + value, + pThreadState); +} + +void +intrWriteRegTopEnClear_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 regIndex, + NvU32 value, + THREAD_STATE_NODE *pThreadState +) +{ + GPU_VREG_WR32_EX(pGpu, + NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR(regIndex), + value, + pThreadState); +} diff --git a/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_nonstall_tu102.c b/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_nonstall_tu102.c new file mode 100644 index 000000000..379866364 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_nonstall_tu102.c @@ -0,0 +1,519 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/intr.h" +#include "gpu/gpu.h" +#include "gpu/ce/kernel_ce.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "os/os.h" +#include "rmapi/event.h" +#include "vgpu/rpc.h" +#include "vgpu/vgpu_events.h" + +#include "published/turing/tu102/dev_vm.h" + +/*! + * @brief Get the current non-stall interrupt enable status + */ +NvU32 +intrGetNonStallEnable_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 i; + NvU32 val; + NvU32 nonStallMask; + + NvU32 isNonStallEnabled = 0; + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + if (vgpuShmIsNonStallEnabled(pGpu, &isNonStallEnabled) == NV_OK) + { + if (isNonStallEnabled) + return INTERRUPT_TYPE_MULTI; + } + return INTERRUPT_TYPE_DISABLED; + } + + // + // We're doing an optimization below to read the TOP_EN_CLEAR register once + // after the for() loop that loops over the subtrees. It assumes that we + // only have one single top level enable register. HW has support for 2 top + // level registers, but until we get to using the second top level register, + // there's no need for us to write code that uses it (it should be a long + // time away, anyway). Use compile-time asserts to catch that this + // assumption has not changed. + // + ct_assert(sizeof(nonStallMask) == sizeof(NvU32)); + + val = intrReadRegTopEnSet_HAL(pGpu, pIntr, 0, pThreadState); + + nonStallMask = intrGetIntrTopNonStallMask_HAL(pGpu, pIntr); + FOR_EACH_INDEX_IN_MASK(32, i, nonStallMask) + { + if (val & NVBIT(NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(i))) + { + // + // If any top-level subtree corresponding to non-stall interrupts + // is enabled, return that non-stall interrupts are enabled. We + // only support enabling or disabling all non-stall interrupts at + // once, not a subset. + // Note that INTERRUPT_TYPE_MULTI basically means that all kinds + // of non-stall interrupts are enabled. The legacy pre-Pascal code + // had support to only enable software-triggerable interrupts or + // hardware-triggerable interrupts or both. We're just continuing + // to the same enum, but the naming of MULTI in the new interrupt + // tree warrants some explanation, hence the detailed comment. + // + return INTERRUPT_TYPE_MULTI; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return INTERRUPT_TYPE_DISABLED; +} + +/*! + * @brief Enable all nonstall interrupts in dev_ctrl at top level + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] pThreadState thread state node pointer + */ +void +intrEnableTopNonstall_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 i; + NvU32 val = 0; + NvU32 nonStallMask; + + // + // We're doing an optimization below to write the TOP_EN_CLEAR register once + // after the for() loop that loops over the subtrees. It assumes that we + // only have one single top level enable register. HW has support for 2 top + // level registers, but until we get to using the second top level register, + // there's no need for us to write code that uses it (it should be a long + // time away, anyway). Use compile-time asserts to catch that this + // assumption has not changed. + // + ct_assert(sizeof(nonStallMask) == sizeof(NvU32)); + + nonStallMask = intrGetIntrTopNonStallMask_HAL(pGpu, pIntr); + FOR_EACH_INDEX_IN_MASK(32, i, nonStallMask) + { + val |= (NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_ENABLE << (NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(i))); + } + FOR_EACH_INDEX_IN_MASK_END; + + // This optimization of one single register write + intrWriteRegTopEnSet_HAL(pGpu, pIntr, NV_CTRL_INTR_SUBTREE_TO_TOP_IDX(0), val, pThreadState); +} + +/*! + * @brief Disable all nonstall interrupts in dev_ctrl at top level + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] pThreadState thread state node pointer + */ +void +intrDisableTopNonstall_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 i; + NvU32 val = 0; + NvU32 nonStallMask; + + // + // We're doing an optimization below to write the TOP_EN_CLEAR register once + // after the for() loop that loops over the subtrees. It assumes that we + // only have one single top level enable register. HW has support for 2 top + // level registers, but until we get to using the second top level register, + // there's no need for us to write code that uses it (it should be a long + // time away, anyway). Use compile-time asserts to catch that this + // assumption has not changed. + // + ct_assert(sizeof(nonStallMask) == sizeof(NvU32)); + + nonStallMask = intrGetIntrTopNonStallMask_HAL(pGpu, pIntr); + FOR_EACH_INDEX_IN_MASK(32, i, nonStallMask) + { + val |= (NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_DISABLE << (NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(i))); + } + FOR_EACH_INDEX_IN_MASK_END; + + intrWriteRegTopEnClear_HAL(pGpu, pIntr, NV_CTRL_INTR_SUBTREE_TO_TOP_IDX(0), val, pThreadState); +} + +void +intrRestoreNonStall_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 intrEn1, + THREAD_STATE_NODE *pThreadState +) +{ + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED)) + { + return; + } + + // + // We only support enabling all non-stall interrupts or disabling all + // non-stall interrupts in one go. We don't have a usecase to enable/disable + // some of them selectively. + // + if (intrEn1 == 0) + { + intrDisableTopNonstall_HAL(pGpu, pIntr, pThreadState); + } + else + { + intrEnableTopNonstall_HAL(pGpu, pIntr, pThreadState); + } +} + +/*! + * @brief get a bitvector of engines with pending nonstall interrupts + * + * @param[in] pGpu OBJGPU pointer + * @param[out] pEngines bitvector of engines that have pending interrupts + * @param[in] pThreadState thread state node pointer + */ +NV_STATUS +intrGetPendingNonStall_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + PMC_ENGINE_BITVECTOR pEngines, + THREAD_STATE_NODE *pThreadState +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + INTR_TABLE_ENTRY *pIntrTable; + NvU32 intrTableSz; + NvU32 i, j, k; + NvU32 pending; + NvU32 intrVector; + + NV_ASSERT_OR_RETURN(pEngines != NULL, NV_ERR_INVALID_ARGUMENT); + + bitVectorClrAll(pEngines); + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // If the GPU is in GC6 (aka powered down or rail-gated state), return + // early from this routine. We don't want to touch any GPU registers while + // its powered off as it will hang the system + // + if (IS_GPU_GC6_STATE_ENTERED(pGpu)) + { + return NV_ERR_GPU_NOT_FULL_POWER; + } + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + return vgpuIsNonStallPending(pGpu, pEngines); + } + + NV_ASSERT_OK_OR_RETURN(intrGetInterruptTable_HAL(pGpu, pIntr, &pIntrTable, &intrTableSz)); + + FOR_EACH_INDEX_IN_MASK(32, i, intrGetIntrTopNonStallMask_HAL(pGpu, pIntr)) + { + j = NV_CTRL_INTR_SUBTREE_TO_TOP_IDX(i); + pending = intrReadRegTop_HAL(pGpu, pIntr, j, pThreadState); + + if ((pending & (NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE_INTR_PENDING << NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(i))) == 0) + { + continue; + } + + for (j = NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(i); j <= NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(i); j++) + { + NvU32 intr = intrReadRegLeaf_HAL(pGpu, pIntr, j, pThreadState) & + intrReadRegLeafEnSet_HAL(pGpu, pIntr, j, pThreadState); + if (intr == 0) + { + continue; + } + + for (k = 0; k < intrTableSz; k++) + { + NvU32 intrVector = pIntrTable[k].intrVectorNonStall; + + if (intrVector == NV_INTR_VECTOR_INVALID) + { + // This engine does not have a valid nonstall interrupt vector + continue; + } + + if (intrVector < NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_START(j) || + intrVector > NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_END(j)) + { + // This engine's interrupt vector isn't part of this leaf anyway + continue; + } + if (intr & NVBIT(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector))) + { + bitVectorSet(pEngines, pIntrTable[k].mcEngine); + } + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // check for FIFO non stall interrupt + if (!kfifoIsHostEngineExpansionSupported(pKernelFifo) && + !IS_GSP_CLIENT(pGpu) && !IS_VIRTUAL(pGpu)) + { + intrVector = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_FIFO, NV_TRUE); + if (intrVector != NV_INTR_VECTOR_INVALID) + { + NvU32 intr; + NvU32 leafIdx = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector); + + intr = intrReadRegLeaf_HAL(pGpu, pIntr, leafIdx, pThreadState) & + intrReadRegLeafEnSet(pGpu, pIntr, leafIdx, pThreadState) & + NVBIT(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector)); + if (intr != 0) + { + bitVectorSet(pEngines, MC_ENGINE_IDX_FIFO); + } + } + } + + return NV_OK; +} + +/*! + * @brief Given an interrupt status from a dev_ctrl LEAF register and the LEAF + * index, service the pending interrupts + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] intr intr status from the intrLeafIdx'th leaf register + * @param[in] intrLeafIdx index of the leaf register + * @param[in] pThreadState thread state node pointer + */ +static NV_STATUS +_intrServiceNonStallLeaf_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 intr, + NvU32 intrLeafIdx, + PMC_ENGINE_BITVECTOR pEngines, + THREAD_STATE_NODE *pThreadState +) +{ + INTR_TABLE_ENTRY *pIntrTable; + NvU32 intrTableSz; + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus; + NvU32 i; + NvU16 mcEngineIdx; + + // Don't clear the bitvector pEngines since caller accumulates + + NV_ASSERT_OK_OR_RETURN(intrGetInterruptTable_HAL(pGpu, pIntr, &pIntrTable, &intrTableSz)); + + for (i = 0; i < intrTableSz; i++) + { + NvU32 intrVector; + NvU32 intrPending; + + intrVector = pIntrTable[i].intrVectorNonStall; + if (intrVector == NV_INTR_VECTOR_INVALID) + { + // This engine does not have a valid nonstall interrupt vector + continue; + } + + if (intrVector < NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_START(intrLeafIdx) || + intrVector > NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_END(intrLeafIdx)) + { + // This engine's interrupt vector isn't part of this leaf anyway + continue; + } + + intrPending = intr & NVBIT(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector)); + if (!intrPending) + { + continue; + } + + mcEngineIdx = pIntrTable[i].mcEngine; + bitVectorSet(pEngines, mcEngineIdx); + + // + // clear the latched state in dev_ctrl and then call the servicing + // routine. If the servicing routine is unable to serve this interrupt, + // then we'll throw a breakpoint but march on (interrupt would be + // cleared so we wouldn't get ourselves into an interrupt storm unless + // it fires again). + // + intrClearLeafVector_HAL(pGpu, pIntr, intrVector, pThreadState); + if (NV_OK != (tmpStatus = intrServiceNotificationRecords(pGpu, pIntr, + mcEngineIdx, + pThreadState))) + { + NV_PRINTF(LEVEL_ERROR, "Could not service nonstall interrupt from " + "mcEngineIdx %d. NV_STATUS = 0x%x\n", mcEngineIdx, tmpStatus); + DBG_BREAKPOINT(); + status = tmpStatus; + } + } + return status; +} + +void +intrDisableNonStall_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + THREAD_STATE_NODE *pThreadState +) +{ + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED)) + { + intrDisableTopNonstall_HAL(pGpu, pIntr, pThreadState); + } +} + +NV_STATUS +intrServiceNonStall_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + PMC_ENGINE_BITVECTOR pEngines, + THREAD_STATE_NODE *pThreadState +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus = NV_OK; + NvU32 i, pending; + NvU32 intr, intrVector; + + bitVectorClrAll(pEngines); + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + return NV_ERR_GPU_IS_LOST; + } + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + return NV_ERR_GPU_IN_FULLCHIP_RESET; + } + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + return vgpuServiceNonStall(pGpu, pEngines); + } + + FOR_EACH_INDEX_IN_MASK(32, i, intrGetIntrTopNonStallMask_HAL(pGpu, pIntr)) + { + NvU32 j = NV_CTRL_INTR_SUBTREE_TO_TOP_IDX(i); + pending = intrReadRegTop_HAL(pGpu, pIntr, j, pThreadState); + + if ((pending & (NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE_INTR_PENDING << NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(i))) == 0) + { + continue; + } + + for (j = NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(i); j <= NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(i); j++) + { + intr = intrReadRegLeaf_HAL(pGpu, pIntr, j, pThreadState) & + intrReadRegLeafEnSet_HAL(pGpu, pIntr, j, pThreadState); + if (intr == 0) + { + continue; + } + + if ((tmpStatus = _intrServiceNonStallLeaf_TU102(pGpu, pIntr, intr, j, pEngines, pThreadState)) != NV_OK) + { + NV_ASSERT_OK_FAILED("Could not service nonstall interrupt leafs", tmpStatus); + status = tmpStatus; + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // + // Special handling for the FIFO "non-stall" interrupt that does not report + // into the non-stall subtree (unfortunately, by HW design) ane hence, is + // not handled above. + // + // This is a physical-only interrupt + // + if (!kfifoIsHostEngineExpansionSupported(pKernelFifo) && + !IS_GSP_CLIENT(pGpu) && !IS_VIRTUAL(pGpu)) + { + intrVector = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_FIFO, NV_TRUE); + if (intrVector != NV_INTR_VECTOR_INVALID) + { + NvU32 leafIdx = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector); + + intr = intrReadRegLeaf_HAL(pGpu, pIntr, leafIdx, pThreadState) & + intrReadRegLeafEnSet_HAL(pGpu, pIntr, leafIdx, pThreadState) & + NVBIT(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector)); + if (intr != 0) + { + if ((tmpStatus = _intrServiceNonStallLeaf_TU102(pGpu, pIntr, intr, leafIdx, pEngines, pThreadState)) != NV_OK) + { + NV_ASSERT_OK_FAILED("Could not service FIFO 'non-stall' intr", tmpStatus); + status = tmpStatus; + } + } + } + } + + // + if (status != NV_OK) + { + DBG_BREAKPOINT(); + } + return status; +} diff --git a/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_swintr_tu102.c b/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_swintr_tu102.c new file mode 100644 index 000000000..657accbb5 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_swintr_tu102.c @@ -0,0 +1,150 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/intr.h" +#include "gpu/gpu.h" +#include "vgpu/vgpu_events.h" + +#include "published/turing/tu102/dev_ctrl.h" +#include "published/turing/tu102/dev_vm.h" + +// +// HAL routines for Turing software interrupts +// + +/*! + * @brief Set stalling software interrupt + */ +NV_STATUS +intrSetStallSWIntr_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_TRIGGER, + NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + return NV_OK; +} + +/*! + * @brief Clear stalling software interrupt + */ +NV_STATUS +intrClearStallSWIntr_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NvU32 leafReg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + NvU32 leafBit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + NvU32 clearSwIntr = NVBIT(leafBit); + + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(leafReg), + clearSwIntr); + return NV_OK; +} + +/*! + * @brief Enable stalling software interrupt + */ +void +intrEnableStallSWIntr_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NvU32 leafReg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + NvU32 leafBit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + NvU32 subtree = NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + NvU32 topIdx = NV_CTRL_INTR_SUBTREE_TO_TOP_IDX(subtree); + NvU32 topBit = NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(subtree); + + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET(leafReg), NVBIT(leafBit)); + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET(topIdx), NVBIT(topBit)); +} + +/*! + * @brief Disable stalling software interrupt + * Note: This disables the entire subtree for the stalling SW interrupt and as + * such, should only be used in _osVerifyInterrupts, where it is guaranteed that + * interrupts will be re-ennabled at a later time. + */ +void +intrDisableStallSWIntr_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NvU32 leafReg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + NvU32 leafBit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + NvU32 subtree = NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + NvU32 topIdx = NV_CTRL_INTR_SUBTREE_TO_TOP_IDX(subtree); + NvU32 topBit = NV_CTRL_INTR_SUBTREE_TO_TOP_BIT(subtree); + + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR(leafReg), NVBIT(leafBit)); + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR(topIdx), NVBIT(topBit)); +} + +/*! + * @brief Get interrupt mode (mode & pending) + * + * @param[in] pGpu GPU Object + * @param[in] pIntr Intr Object + * @param[out] intrmode Interrupt mode + * 0 = disabled + * 1 = hardware + * 2 = software + * @param[out] pending Interrupt pending? + */ +void +intrGetStallInterruptMode_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 *pIntrmode, + NvBool *pPending +) +{ + NvU32 reg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + NvU32 bit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(NV_CTRL_CPU_DOORBELL_VECTORID_VALUE_CONSTANT); + NvU32 swPending = 0; + + *pIntrmode = INTERRUPT_TYPE_SOFTWARE; // value put in to match with legacy + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + if (vgpuShmIsSwPending(pGpu, &swPending) == NV_OK) + *pPending = swPending; + else + *pPending = NV_FALSE; + return; + } + + swPending = intrReadRegLeaf_HAL(pGpu, pIntr, reg, NULL); + *pPending = (swPending & NVBIT(bit)) ? NV_TRUE : NV_FALSE; +} diff --git a/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_tu102.c b/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_tu102.c new file mode 100644 index 000000000..5e8d0bd9c --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/arch/turing/intr_tu102.c @@ -0,0 +1,1198 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/intr.h" +#include "gpu/gpu.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "gpu/bif/kernel_bif.h" +#include "objtmr.h" +#include "gpu/uvm/uvm.h" +#include "os/os.h" +#include "vgpu/vgpu_events.h" +#include "vgpu/rpc.h" +#include "gpu/mmu/kern_gmmu.h" + +#include "published/turing/tu102/dev_ctrl.h" +#include "published/turing/tu102/dev_vm.h" +#include "published/turing/tu102/dev_vm_addendum.h" +/*! + * @brief Get the base interrupt vector to use when indexing engine nonstall + * interrupts + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * + * @returns the base interrupt vector for engine nonstall interrupts + */ +NvU32 +intrGetNonStallBaseVector_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NvU32 base = 0; + + if (!IS_VIRTUAL(pGpu)) + { + base = GPU_REG_RD32(pGpu, NV_CTRL_LEGACY_ENGINE_NONSTALL_INTR_BASE_VECTORID); + } + else + { + NV_STATUS status = NV_OK; + NV_RM_RPC_VGPU_PF_REG_READ32(pGpu, NV_CTRL_LEGACY_ENGINE_NONSTALL_INTR_BASE_VECTORID, &base, status); + } + return base; +} + +// +// Static interface functions +// +static NvU32 _intrGetUvmLeafMask_TU102(OBJGPU *, Intr *); +static void _intrEnableStall_TU102(OBJGPU *, Intr *, THREAD_STATE_NODE *pThreadState); +static void _intrDisableStall_TU102(OBJGPU *, Intr *, THREAD_STATE_NODE *pThreadState); +static void _intrClearLeafEnables_TU102(OBJGPU *pGpu, Intr *pIntr); + +// Compile time asserts to make sure we don't write beyond the leaf register array + +ct_assert(NV_CPU_INTR_STALL_SUBTREE_START < NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF__SIZE_1); +ct_assert(NV_CPU_INTR_STALL_SUBTREE_LAST < NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF__SIZE_1); +ct_assert(NV_CPU_INTR_STALL_SUBTREE_START < NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET__SIZE_1); +ct_assert(NV_CPU_INTR_STALL_SUBTREE_LAST < NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET__SIZE_1); +ct_assert(NV_CPU_INTR_STALL_SUBTREE_START < NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR__SIZE_1); +ct_assert(NV_CPU_INTR_STALL_SUBTREE_LAST < NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR__SIZE_1); + +ct_assert(NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF__SIZE_1 == NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET__SIZE_1); +ct_assert(NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF__SIZE_1 == NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR__SIZE_1); + +// +// Few design issues and intentions stated upfront: +// Q: Why are interrupts being enabled/ disabled here instead of their respective HALs? +// A: The intent is to keep the "alternate tree" (nonstall tree) invisible from the rest of RM. +// +// Q: Then how does a HAL register its interrupts on this alternate tree? +// A: It does not. The alternate tree is an aberration of nature meant to service **non stall interrupts** +// without using locking. +// +// Q: If the alternate tree does not respect locks taken by osAcquireRmSema then how do we prevent +// race conditions? +// A: We dont!! The plan here is to *manually* inspect every piece of code that gets executed on the ISR/DPC +// for this tree and make sure concurrent actions from elsewhere do not lead us in an inconsistent state. +// In future before adding code to this tree, **carefully inspect it yourself**. +// +// A final note, if and when RM gets fine grained locks in the main interrupt tree, it might be worthwhile +// getting rid of this. More code is more complexity!! +// +NV_STATUS +intrStateLoad_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + INTR_TABLE_ENTRY *pIntrTable; + NvU32 intrTableSz, i; + + NV_ASSERT_OK_OR_RETURN(intrGetInterruptTable_HAL(pGpu, pIntr, &pIntrTable, &intrTableSz)); + + // + // Make sure all leaf nodes are disabled before we enable them. Older drivers + // and versions of mods leave them enabled. Bug 3299004. + // + _intrClearLeafEnables_TU102(pGpu, pIntr); + + // + // Enable interrupts either in legacy NV_PMC_INTR tree or new NV_CTRL tree + // as per the MC interrupt vector table. + // + // We have to make an exception for the TMR engine though, since for now, + // it reports into both PMC and dev_ctrl. We need the PTIMER alarm in + // PMC, which is the only place where it reports, and we need it in + // dev_ctrl for the countdown/callback timer, which we use in the PF + // and all the VFs + // + pGpu->pmcRmOwnsIntrMask = INTERRUPT_MASK_DISABLED; + for (i = 0; i < intrTableSz; i++) + { + if (pIntrTable[i].pmcIntrMask != NV_PMC_INTR_INVALID_MASK) + { + pGpu->pmcRmOwnsIntrMask |= pIntrTable[i].pmcIntrMask; + + if (pIntrTable[i].mcEngine != MC_ENGINE_IDX_TMR) + continue; + } + + if (pIntrTable[i].intrVector != NV_INTR_VECTOR_INVALID) + { + intrEnableLeaf_HAL(pGpu, pIntr, pIntrTable[i].intrVector); + } + + if ((pIntrTable[i].intrVectorNonStall != NV_INTR_VECTOR_INVALID) && + !pIntrTable[i].bDisableNonStall) + { + intrEnableLeaf_HAL(pGpu, pIntr, pIntrTable[i].intrVectorNonStall); + } + } + + status = intrCacheIntrFields_HAL(pGpu, pIntr); + if (status != NV_OK) + { + goto exit; + } + +exit: + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_ENABLE_DETAILED_LOGS)) + { + intrDumpState_HAL(pGpu, pIntr); + } + + return status; +} + +NV_STATUS +intrStateUnload_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 flags +) +{ + // Disable all interrupts since we're unloading + + intrWriteRegTopEnClear_HAL(pGpu, pIntr, 0, 0xFFFFFFFF, NULL); + + _intrClearLeafEnables_TU102(pGpu, pIntr); + + return NV_OK; +} + +/*! + * @brief Cache few Intr fields for ease of use in interrupt or RPC context. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + */ +NV_STATUS +intrCacheIntrFields_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NV_STATUS status = NV_OK; + OBJDISP *pDisp = GPU_GET_DISP(pGpu); + NvU32 leafEnHi, leafEnLo; + NvU32 uvmSharedLeafIdxStart = NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(NV_CPU_INTR_UVM_SHARED_SUBTREE_START); + NvU32 uvmSharedLeafIdxEnd = NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST); + NvU32 stallSubtreeLast = intrGetStallSubtreeLast_HAL(pGpu, pIntr); + NvU32 i; + + // + // Compile time assert to make sure we have only one client shared subtree. + // The below code assumes that. + // + ct_assert(NV_CPU_INTR_UVM_SHARED_SUBTREE_START == NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST); + + // Now cache the leaf enable mask for the subtree shared with the client + leafEnHi = intrReadRegLeafEnSet_HAL(pGpu, pIntr, uvmSharedLeafIdxStart, NULL); + leafEnLo = intrReadRegLeafEnSet_HAL(pGpu, pIntr, uvmSharedLeafIdxEnd, NULL); + + pIntr->uvmSharedCpuLeafEn = ((NvU64)(leafEnHi) << 32) | leafEnLo; + pIntr->uvmSharedCpuLeafEnDisableMask = intrGetUvmSharedLeafEnDisableMask_HAL(pGpu, pIntr); + + // + // Cache the CPU_INTR_TOP_EN mask to clear when disabling stall interrupts + // (other interrupts are either not disabled or disabled selectively at leaf level) + // + for (i = NV_CPU_INTR_STALL_SUBTREE_START; i <= stallSubtreeLast; i++) + { + pIntr->cpuTopEnMask |= NVBIT(i); + } + + // Cache client owned, shared interrupt, and display vectors for ease of use later + pIntr->replayableFaultIntrVector = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_REPLAYABLE_FAULT, NV_FALSE); + pIntr->accessCntrIntrVector = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_ACCESS_CNTR, NV_FALSE); + if (pDisp != NULL) + { + pIntr->displayIntrVector = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_DISP, NV_FALSE); + } + else + { + pIntr->displayIntrVector = NV_INTR_VECTOR_INVALID; + } + + // + // Ensure that both UVM vectors are in the same leaf register (check right + // now so we don't have to check later in latency critical paths where this + // is assumed to be true) + // + if (NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(pIntr->replayableFaultIntrVector) != NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(pIntr->accessCntrIntrVector)) + { + NV_PRINTF(LEVEL_ERROR, "UVM interrupt vectors for replayable fault 0x%x " + "and access counter 0x%x are in different CPU_INTR_LEAF registers\n", + pIntr->replayableFaultIntrVector, pIntr->accessCntrIntrVector); + DBG_BREAKPOINT(); + status = NV_ERR_GENERIC; + goto exit; + } + + // + // Now ensure that they're in the expected subtree (check right now so we + // don't have to check later in latency critical paths where this is assumed + // to be true) + // + if (NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(pIntr->replayableFaultIntrVector) != NV_CPU_INTR_UVM_SUBTREE_START) + { + NV_PRINTF(LEVEL_ERROR, "UVM interrupt vectors for replayable fault and " + "access counter are in an unexpected subtree. Expected = 0x%x, actual = 0x%x\n", + NV_CPU_INTR_UVM_SUBTREE_START, + NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(pIntr->replayableFaultIntrVector)); + DBG_BREAKPOINT(); + status = NV_ERR_GENERIC; + goto exit; + } + +exit: + return status; +} + +/*! + * @brief Get the base interrupt vector to use when indexing engine stall + * interrupts + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * + * @returns the base interrupt vector for engine stall interrupts + */ +NvU32 +intrGetStallBaseVector_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NvU32 base = GPU_REG_RD32(pGpu, NV_CTRL_LEGACY_ENGINE_STALL_INTR_BASE_VECTORID); + return base; +} + +/*! + * @brief Enable a given interrupt vector in dev_ctrl at leaf level + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] intrVector nonstall interrupt vector to enable + */ +void +intrEnableLeaf_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 intrVector +) +{ + NvU32 reg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector); + NvU32 leafBit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector); + NvU32 intrLeafEnSetSize = intrGetLeafSize_HAL(pGpu, pIntr); + + if (reg >= intrLeafEnSetSize) + { + NV_PRINTF(LEVEL_ERROR, "Exceeding the range of INTR leaf registers. " + "intrVector = 0x%x, Reg = 0x%x\n", intrVector, reg); + NV_ASSERT(0); + return; + } + + intrWriteRegLeafEnSet_HAL(pGpu, pIntr, reg, NVBIT(leafBit), NULL); +} + +/*! + * @brief Disable a given interrupt vector in dev_ctrl at leaf level + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] intrVector nonstall interrupt vector to enable + */ +void +intrDisableLeaf_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 intrVector +) +{ + NvU32 reg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector); + NvU32 leafBit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector); + NvU32 intrLeafEnClearSize = intrGetLeafSize_HAL(pGpu, pIntr); + + if (reg >= intrLeafEnClearSize) + { + NV_PRINTF(LEVEL_ERROR, "Exceeding the range of INTR leaf registers. " + "intrVector = 0x%x, Reg = 0x%x\n", intrVector, reg); + NV_ASSERT(0); + return; + } + + intrWriteRegLeafEnClear_HAL(pGpu, pIntr, reg, NVBIT(leafBit), NULL); +} + +/*! + * @brief Disable/Enable stall interrupts in dev_ctrl + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] pThreadState thread state node pointer + */ +void +intrSetStall_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 intrType, + THREAD_STATE_NODE *pThreadState +) +{ + // dev_ctrl tree is not used for legacy-vGPU + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + return; + } + + if (intrType == INTERRUPT_TYPE_DISABLED) + { + _intrDisableStall_TU102(pGpu, pIntr, pThreadState); + } + else + { + _intrEnableStall_TU102(pGpu, pIntr, pThreadState); + } +} + +/*! + * @brief Clear all interrupt leaf nodes + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + */ +static void _intrClearLeafEnables_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NvU32 i; + NvU32 intrLeafSize = intrGetLeafSize_HAL(pGpu, pIntr); + + for (i = 0; i < intrLeafSize; i++) + { + intrWriteRegLeafEnClear_HAL(pGpu, pIntr, i, 0xFFFFFFFF, NULL); + } +} + +/*! + * @brief Enable all stall interrupts in dev_ctrl + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] pThreadState thread state node pointer + */ +static void +_intrEnableStall_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 val, idx; + + // + // 1. Enable the UVM interrupts that RM currently owns at INTR_LEAF + // level. + // + val = _intrGetUvmLeafMask_TU102(pGpu, pIntr); + idx = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(pIntr->replayableFaultIntrVector); + if (val != 0) + { + intrWriteRegLeafEnSet_HAL(pGpu, pIntr, idx, val, pThreadState); + } + + // + // 2. Enable all interrupts in the client shared subtree at INTR_LEAF + // level, based on the cached value. + // + + // + // Compile time assert to make sure we have only one client shared subtree. + // The below code assumes that. + // + ct_assert(NV_CPU_INTR_UVM_SHARED_SUBTREE_START == NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST); + idx = NV_CPU_INTR_UVM_SHARED_SUBTREE_START; + + if (NvU64_HI32(pIntr->uvmSharedCpuLeafEn) != 0) + { + intrWriteRegLeafEnSet_HAL(pGpu, pIntr, + NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(idx), + NvU64_HI32(pIntr->uvmSharedCpuLeafEn), + pThreadState); + } + if (NvU64_LO32(pIntr->uvmSharedCpuLeafEn) != 0) + { + intrWriteRegLeafEnSet_HAL(pGpu, pIntr, + NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(idx), + NvU64_LO32(pIntr->uvmSharedCpuLeafEn), + pThreadState); + } + + // We use the assumption that 1 == ENABLE below + ct_assert(NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET_SUBTREE_ENABLE == 1); + + // + // 3. Enable all interrupt subtrees (except nonstall) at top level. Nonstall + // enablement is handled by a different function. + // + val = 0xFFFFFFFF; + + val &= ~intrGetIntrTopNonStallMask_HAL(pGpu, pIntr); + + intrWriteRegTopEnSet_HAL(pGpu, pIntr, 0, val, pThreadState); +} + +/*! + * @brief Disable all stall interrupts in dev_ctrl + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] pThreadState thread state node pointer + */ +static void +_intrDisableStall_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 idx, val; + + // 1. Disable the UVM interrupts that RM currently owns at INTR_LEAF level + idx = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(pIntr->replayableFaultIntrVector); + val = _intrGetUvmLeafMask_TU102(pGpu, pIntr); + if (val != 0) + { + intrWriteRegLeafEnClear_HAL(pGpu, pIntr, idx, val, pThreadState); + } + + // + // 2. Disable all interrupts in the client shared subtree at INTR_LEAF + // level, except the ones that can be handled outside the GPU lock. + // + + // + // Compile time assert to make sure we have only one client shared subtree. + // The below code assumes that. + // + ct_assert(NV_CPU_INTR_UVM_SHARED_SUBTREE_START == NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST); + idx = NV_CPU_INTR_UVM_SHARED_SUBTREE_START; + + if (!gpuIsStateLoaded(pGpu)) + { + // + // If GPU state load has not finished, there is nothing we can or want to + // do here, since our cached state of interrupt vectors isn't valid yet + // anyway. + // + intrWriteRegLeafEnClear_HAL(pGpu, pIntr, + NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(idx), + 0xFFFFFFFF, pThreadState); + intrWriteRegLeafEnClear_HAL(pGpu, pIntr, + NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(idx), + 0xFFFFFFFF, pThreadState); + } + else + { + if ((NvU64_HI32(pIntr->uvmSharedCpuLeafEnDisableMask) != 0) && + (NvU64_HI32(pIntr->uvmSharedCpuLeafEn) != 0)) + { + // + // Only write to the register is both the enable mask and the + // disable mask is non-zero. If there are no interrupts we're + // interested in handling in one of the leafs, the enable mask will + // be zero and the disable mask will be all 0xFs. There's no point + // writing the register in that case since interrupts are already + // not enabled. Using the cached value helps us avoid a register + // read in latency critical paths. + // + intrWriteRegLeafEnClear_HAL(pGpu, pIntr, + NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(idx), + NvU64_HI32(pIntr->uvmSharedCpuLeafEnDisableMask), + pThreadState); + } + if ((NvU64_LO32(pIntr->uvmSharedCpuLeafEnDisableMask) != 0) && + (NvU64_LO32(pIntr->uvmSharedCpuLeafEn) != 0)) + { + // + // Only write to the register is both the enable mask and the + // disable mask is non-zero. If there are no interrupts we're + // interested in handling in one of the leafs, the enable mask will + // be zero and the disable mask will be all 0xFs. There's no point + // writing the register in that case since interrupts are already + // not enabled. Using the cached value helps us avoid a register + // read in latency critical paths. + // + intrWriteRegLeafEnClear_HAL(pGpu, pIntr, + NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(idx), + NvU64_LO32(pIntr->uvmSharedCpuLeafEnDisableMask), + pThreadState); + } + } + + // + // 3. Disable some interrupt subtrees at top level (information about which + // ones to disable is cached in pIntr->cpuTopEnMask) + // + intrWriteRegTopEnClear_HAL(pGpu, pIntr, 0, pIntr->cpuTopEnMask, pThreadState); +} + +/*! + * @brief Clears a given interrupt vector at the dev_ctrl LEAF level + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] intrVector interrupt vector to clear + * @param[in] pThreadState thread state node pointer + */ +void +intrClearLeafVector_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 intrVector, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 reg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector); + NvU32 bit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector); + + intrWriteRegLeaf_HAL(pGpu, pIntr, reg, NVBIT(bit), pThreadState); +} + +/*! + * @brief Checks if the given interrupt vector is pending at the dev_ctrl LEAF level + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] intrVector interrupt vector to check + * @param[in] pThreadState thread state node pointer + */ +NvBool +intrIsVectorPending_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 intrVector, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 reg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector); + NvU32 bit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector); + NvU32 val = intrReadRegLeaf_HAL(pGpu, pIntr, reg, pThreadState); + + if (val & NVBIT(bit)) + return NV_TRUE; + return NV_FALSE; +} + +/*! +* @brief Returns the INTR_LEAF mask for RM owned client interrupts. +* +* NOTE: Must be called after @intrStateLoad_TU102. This code assumes that the +* client owned interrupts are in the same leaf register. We would have checked +* whether that assumption is true in @intrStateLoad_TU102 and if it was +* violated, we'd have failed state load. +*/ +static NvU32 +_intrGetUvmLeafMask_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NvU32 val = 0; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + if (pKernelGmmu != NULL) + { + NvBool bRmOwnsReplayableFault = !!(pKernelGmmu->uvmSharedIntrRmOwnsMask & RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY); + NvBool bRmOwnsAccessCntr = !!(pKernelGmmu->uvmSharedIntrRmOwnsMask & RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY); + + if (bRmOwnsReplayableFault) + { + val |= NVBIT(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(pIntr->replayableFaultIntrVector)); + } + if (bRmOwnsAccessCntr) + { + val |= NVBIT(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(pIntr->accessCntrIntrVector)); + } + } + return val; +} + +/*! +* @brief Returns a 64 bit mask, where all the bits set to 0 are the ones we +* intend to leave enabled in the client shared subtree even when we disable +* interrupts (for example, when we take the GPU lock). +* +* The non-replayable fault interrupt is shared with the client, and in the +* top half of the interrupt handler, as such, we only copy fault packets from +* the HW buffer to the appropriate SW buffers. +* The fifo non-stall interrupt is used for runlist events, which also does not +* need to be blocked by the GPU lock (existing codepaths already ascertain that +* this is safe, so we're maintaining that behavior in NV_CTRL). +*/ +NvU64 +intrGetUvmSharedLeafEnDisableMask_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NvU32 intrVectorNonReplayableFault; + NvU32 intrVectorFifoNonstall = NV_INTR_VECTOR_INVALID; + NvU64 mask = 0; + + // GSP RM services both MMU non-replayable fault and FIFO interrupts + if (IS_GSP_CLIENT(pGpu)) + { + return ~mask; + } + + intrVectorNonReplayableFault = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_NON_REPLAYABLE_FAULT, NV_FALSE); + + if (!IS_VIRTUAL(pGpu)) + { + intrVectorFifoNonstall = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_FIFO, NV_TRUE); + } + + if (intrVectorFifoNonstall != NV_INTR_VECTOR_INVALID) + { + // Ascertain that they're in the same subtree and same leaf + NV_ASSERT(NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(intrVectorNonReplayableFault) == + NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(intrVectorFifoNonstall)); + NV_ASSERT(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVectorNonReplayableFault) == + NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVectorFifoNonstall)); + } + + // Ascertain that they're in the first leaf + NV_ASSERT(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVectorNonReplayableFault) == + NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(NV_CPU_INTR_UVM_SHARED_SUBTREE_START)); + + // + // Compile-time ascertain that we only have 1 client subtree (we assume + // this since we cache only 64 bits). + // + ct_assert(NV_CPU_INTR_UVM_SHARED_SUBTREE_START == NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST); + + // + // Compile-time ascertain that we only have 2 subtrees as this is what we currently support + // by only caching 64 bits + // + ct_assert((NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST) - 1) == + NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(NV_CPU_INTR_UVM_SHARED_SUBTREE_START)); + + mask |= NVBIT64(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVectorNonReplayableFault)); + + if (intrVectorFifoNonstall != NV_INTR_VECTOR_INVALID) + { + mask |= NVBIT64(NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVectorFifoNonstall)); + } + + mask <<= 32; + + return ~mask; +} + +/*! + * @brief Gets list of engines with pending stalling interrupts as per the interrupt trees + * + * @param[in] pGpu + * @param[in] pIntr + * @param[out] pEngines List of engines that have pending stall interrupts + * @param[in] pThreadState + * + * @return NV_OK if the list of engines that have pending stall interrupts was retrieved + */ +NV_STATUS +intrGetPendingStallEngines_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngines, + THREAD_STATE_NODE *pThreadState +) +{ + INTR_TABLE_ENTRY *pIntrTable; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvU32 intrTableSz, i; + + NvU32 stallSubtreeLast = intrGetStallSubtreeLast_HAL(pGpu, pIntr); + NvU32 numIntrLeaves = intrGetNumLeaves_HAL(pGpu, pIntr); + NV_ASSERT(numIntrLeaves <= NV_MAX_INTR_LEAVES); + NvU32 intrLeafValues[NV_MAX_INTR_LEAVES]; + + portMemSet(intrLeafValues, 0, numIntrLeaves * sizeof(NvU32)); + bitVectorClrAll(pEngines); + + // dev_ctrl tree is not used for legacy-vGPU + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + return NV_OK; + } + + NV_ASSERT_OK_OR_RETURN(intrGetLeafStatus_HAL(pGpu, pIntr, intrLeafValues, pThreadState)); + NV_ASSERT_OK_OR_RETURN(intrGetInterruptTable_HAL(pGpu, pIntr, &pIntrTable, &intrTableSz)); + + for (i = 0; i < intrTableSz; i++) + { + NvU32 intrVector; + NvU32 leaf, leafIndex, leafBit; + + intrVector = pIntrTable[i].intrVector; + + // Check if this engine has a valid stalling interrupt vector in the new tree + if (intrVector == NV_INTR_VECTOR_INVALID) + { + continue; + } + + leafIndex = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector); + leafBit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector); + + // + // The leafIndex must be within the stall tree. Try to catch this on + // pre-release drivers. Don't need this on release drivers since this + // is only to catch issues during code development. Should never happen + // in practice + // + if ((leafIndex < NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(NV_CPU_INTR_UVM_SUBTREE_START)) || + (leafIndex > NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(stallSubtreeLast))) + { + NV_PRINTF(LEVEL_ERROR, "MC_ENGINE_IDX %u has invalid stall intr vector %u\n", pIntrTable[i].mcEngine, intrVector); + DBG_BREAKPOINT(); + continue; + } + // + // Check if interrupt is pending. We skip checking if it is enabled in + // the leaf register since we mess around with the leaf enables in + // the interrupt disable path and will need special casing to handle it + // In the transition period from NV_PMC to NV_CTRL, the interrupt vector + // for engines that haven't yet switched would be INVALID, so we'd never + // get here anyway. + // + leaf = intrLeafValues[leafIndex] & NVBIT(leafBit); + + if (leaf == 0) + { + continue; + } + + // Add engine to bitvector + bitVectorSet(pEngines, pIntrTable[i].mcEngine); + } + + if (pKernelGmmu != NULL) + { + NvBool bRmOwnsReplayableFault = !!(pKernelGmmu->uvmSharedIntrRmOwnsMask & RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY); + NvBool bRmOwnsAccessCntr = !!(pKernelGmmu->uvmSharedIntrRmOwnsMask & RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY); + + // + // Add non replayable fault engine if there is something in the shadow buffer, + // as the interrupt itself is cleared earlier. + // + if (portAtomicOrS32(&pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].fatalFaultIntrPending, 0)) + { + bitVectorSet(pEngines, MC_ENGINE_IDX_NON_REPLAYABLE_FAULT); + } + + // If AM doesn't own either UVM interrupt, remove it from the pending mask + if (!bRmOwnsReplayableFault) + { + bitVectorClr(pEngines, MC_ENGINE_IDX_REPLAYABLE_FAULT); + } + + if (!bRmOwnsAccessCntr) + { + bitVectorClr(pEngines, MC_ENGINE_IDX_ACCESS_CNTR); + } + } + + return NV_OK; +} + +/*! + * @brief Checks and services MMU non=replayable fault interrupts that may not + * have been queued as DPC if we didn't get the GPU lock in the top half. + * + * If the MMU non-replayable fault interrupt was the only interrupt pending and + * we were unable to get the GPU lock in the top half, a DPC would not have + * been scheduled, but the non-replayable fault interrupt packet(s) would have + * been copied into the SW buffers. Try to do the bottom-half servicing of + * interrupts that could have been cleared in the top half. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] pThreadState THREAD_STATE_NODE pointer + */ +NV_STATUS +intrCheckAndServiceNonReplayableFault_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + THREAD_STATE_NODE *pThreadState +) +{ + NV_STATUS status = NV_OK; + return status; +} + +/*! + * @brief Retrigger interrupts by toggling enables of those subtrees not + * toggled at top level in GPU lock acquire/release. Subtrees that are toggled + * at top level will be implicitly re-armed when the GPU lock is released. + * + * @param[in] pGpu GPU Object + * @param[in] pIntr Intr Object + */ +void +intrRetriggerTopLevel_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + NvU32 val = 0; + NvU32 i; + + // We use the assumption that 1 == DISABLE below + ct_assert(NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR_SUBTREE_DISABLE == 1); + + // + // Toggle the top level interrupt enables for all interrupts whose top + // level enables are not toggled during RM lock acquire/release. + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_HANDLE_LOCKLESS) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_ALTERNATE_TREE_ENABLED)) + { + // + // 1. If the alternate tree (nonstall tree) is handled "lockless", it + // is not disabled during RM lock acquire, so needs re-arming. + // + val |= intrGetIntrTopNonStallMask_HAL(pGpu, pIntr); + } + + // + // 2. UVM-owned interrupt tree (never disabled at top level) + // 3. UVM/RM shared interrupt tree (never disabled at top level) + // + for (i = NV_CPU_INTR_UVM_SUBTREE_START; i <= NV_CPU_INTR_UVM_SHARED_SUBTREE_LAST; i++) + { + val |= NVBIT(i); + } + + // + // Bypass GPU_REG_WR32 that requires the GPU lock to be held (for some + // register filters) by using the OS interface directly. + // + osGpuWriteReg032(pGpu, + GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_CLEAR(0)), + val); + osGpuWriteReg032(pGpu, + GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_EN_SET(0)), + val); +} + +/*! + * @brief read all leaf interrupt registers into an array + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[out] pLeafVals array that leaf values will be returned in. + * assumes that it is sufficiently large + */ +NV_STATUS +intrGetLeafStatus_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 *pLeafVals, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 subtreeIndex; + NvU32 leafIndex; + + // Read all the stalling interrupt leaf status + NvU32 stallSubtreeLast = intrGetStallSubtreeLast_HAL(pGpu, pIntr); + NV_ASSERT_OR_RETURN(NV_CPU_INTR_UVM_SUBTREE_START <= stallSubtreeLast, NV_ERR_INVALID_STATE); + subtreeIndex = NV_CPU_INTR_UVM_SUBTREE_START; + for (; subtreeIndex <= stallSubtreeLast; subtreeIndex++) + { + leafIndex = NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(subtreeIndex); + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_READ_ONLY_EVEN_NUMBERED_INTR_LEAF_REGS)) + { + // + // Since we know that on Turing, only one leaf per subtree has valid + // interrupts, optimize to only read those leaf registers. + // + pLeafVals[leafIndex] = intrReadRegLeaf_HAL(pGpu, pIntr, leafIndex, pThreadState); + } + else + { + for (; leafIndex <= NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(subtreeIndex); leafIndex++) + { + pLeafVals[leafIndex] = intrReadRegLeaf_HAL(pGpu, pIntr, leafIndex, pThreadState); + } + } + } + + return NV_OK; +} + +/*! + * @brief Returns a bitfield with only MC_ENGINE_IDX_DISP set if it's pending + * On Turing+, there are multiple stall interrupt registers, and reading them + * all in the top half would be expensive. To saitsfy bug 3220319, only find out + * if display interrupt is pending. Fix this in bug 3279300 + * + * @param[in] pGpu + * @param[in] pMc + * @param[out] pEngines List of engines that have pending stall interrupts + * @param[in] pThreadState + * + * @return NV_OK if the list of engines that have pending stall interrupts was retrieved + */ +NV_STATUS +intrGetPendingDisplayIntr_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + PMC_ENGINE_BITVECTOR pEngines, + THREAD_STATE_NODE *pThreadState +) +{ + bitVectorClrAll(pEngines); + + if (IS_GPU_GC6_STATE_ENTERED(pGpu)) + { + return NV_ERR_GPU_NOT_FULL_POWER; + } + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + return NV_ERR_GPU_IS_LOST; + } + + if (pIntr->displayIntrVector == NV_INTR_VECTOR_INVALID) + { + return NV_OK; + } + else if (intrIsVectorPending_TU102(pGpu, pIntr, pIntr->displayIntrVector, pThreadState)) + { + bitVectorSet(pEngines, MC_ENGINE_IDX_DISP); + } + + return NV_OK; +} + + +/** + * @brief Enable or disable the display interrupt. + * This implements the missing functionality of PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING + * for Turing+: The ability to leave display interrrupts unmasked while the GPU lock is held + * The PMC_INTR_MASK HW registers were deprecated in Pascal, but the Pascal-Volta interrupt + * code still emulates them in SW. The Turing+ code did not implement any of the masking code, + * but as seen in bug 3152190, the ability to leave the display interupt unmasked is still + * needed. The ability to unmask the interrupts to enable them to show up in interrupt registers + * is not needed, so this call is not needed at callsites that just do that + * (_intrEnterCriticalSection / _intrExitCriticalSection) + * This whole interrupts code mess needs refactored - bug 3279300 + * + * @param[in] pGpu + * @param[in] pIntr + * @param[in] bEnable + * @param[in] pThreadState - Needed for bypassing register filters in unlocked top half + * + */ +void +intrSetDisplayInterruptEnable_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvBool bEnable, + THREAD_STATE_NODE *pThreadState +) +{ + if (pIntr->displayIntrVector == NV_INTR_VECTOR_INVALID) + { + return; + } + + NvU32 reg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(pIntr->displayIntrVector); + NvU32 leafBit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(pIntr->displayIntrVector); + + if (bEnable) + { + intrWriteRegLeafEnSet_HAL(pGpu, pIntr, reg, NVBIT(leafBit), pThreadState); + } + else + { + intrWriteRegLeafEnClear_HAL(pGpu, pIntr, reg, NVBIT(leafBit), pThreadState); + } +} + +/*! + * @brief Dumps interrupt state (registers, vector table) for debugging purpose. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + */ +void +intrDumpState_TU102 +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + INTR_TABLE_ENTRY *pIntrTable; + NvU32 intrTableSz = 0; + NvU32 i; + NvU32 intrLeafSize = intrGetLeafSize_HAL(pGpu, pIntr); + + NV_PRINTF(LEVEL_INFO, "Interrupt registers:\n"); + for (i = 0; i < NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP__SIZE_1; i++) + { + NV_PRINTF(LEVEL_INFO, "INTR_TOP_EN_SET(%u)=0x%x\n", i, + intrReadRegTopEnSet_HAL(pGpu, pIntr, i, NULL)); + } + + for (i = 0; i < intrLeafSize; i++) + { + NV_PRINTF(LEVEL_INFO, "INTR_LEAF_EN_SET(%u)=0x%x\n", i, + intrReadRegLeafEnSet_HAL(pGpu, pIntr, i, NULL)); + } + + NV_PRINTF(LEVEL_INFO, "MC Interrupt table:\n"); + intrGetInterruptTable_HAL(pGpu, pIntr, &pIntrTable, &intrTableSz); + + for (i = 0; i < intrTableSz; i++) + { + NV_PRINTF(LEVEL_INFO, + "%2u: mcEngineIdx=%-4u intrVector=%-10u intrVectorNonStall=%-10u bDisableNonStall=%u\n", i, + pIntrTable[i].mcEngine, + pIntrTable[i].intrVector, + pIntrTable[i].intrVectorNonStall, + pIntrTable[i].bDisableNonStall); + } +} + +/*! + * @brief Gets the stall subtree end index + */ +NvU32 +intrGetStallSubtreeLast_TU102(OBJGPU *pGpu, Intr *pIntr) +{ + return NV_CPU_INTR_STALL_SUBTREE_LAST; +} + +/*! + * @brief Gets the number of leaf registers used + */ +NvU32 +intrGetNumLeaves_TU102(OBJGPU *pGpu, Intr *pIntr) +{ + ct_assert((NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(NV_CPU_INTR_STALL_SUBTREE_LAST) + 1) <= NV_MAX_INTR_LEAVES); + return (NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_END(NV_CPU_INTR_STALL_SUBTREE_LAST) + 1); +} + +/*! + * @brief Gets the value of VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF__SIZE_1 + */ +NvU32 +intrGetLeafSize_TU102(OBJGPU *pGpu, Intr *pIntr) +{ + return NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF__SIZE_1; +} + +/*! + * @brief Gets the mask of INTR_TOP that covers nonstall interrupts + */ +NvU32 +intrGetIntrTopNonStallMask_TU102(OBJGPU *pGpu, Intr *pIntr) +{ + // Compile-time assert against the highest set bit that will be returned + #define NV_CPU_INTR_NOSTALL_SUBTREE_HIGHEST NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(0) + + ct_assert(NV_CPU_INTR_NOSTALL_SUBTREE_HIGHEST < NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF__SIZE_1); + ct_assert(NV_CPU_INTR_NOSTALL_SUBTREE_HIGHEST < NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET__SIZE_1); + ct_assert(NV_CPU_INTR_NOSTALL_SUBTREE_HIGHEST < NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR__SIZE_1); + + return NVBIT32(NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_TOP_SUBTREE(0)); +} + +/*! + * @brief Decode the interrupt mode for SW to use + * + * @param[in] pIntr Intr Object + * @param[in] intrEn the enable value to decode + * + * @returns the value of the decoded interrupt + * + */ +NvU32 +intrDecodeStallIntrEn_TU102 +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 intrEn +) +{ + // mask with interrupts that RM owns + if (pGpu->pmcRmOwnsIntrMask != 0) + { + intrEn &= pGpu->pmcRmOwnsIntrMask; + } + + switch (intrEn) + { + case INTERRUPT_MASK_DISABLED: + return INTERRUPT_TYPE_DISABLED; + case INTERRUPT_MASK_HARDWARE: + return INTERRUPT_TYPE_HARDWARE; + case INTERRUPT_MASK_SOFTWARE: + return INTERRUPT_TYPE_SOFTWARE; + default: + return INTERRUPT_TYPE_MULTI; + } +} diff --git a/src/nvidia/src/kernel/gpu/intr/intr.c b/src/nvidia/src/kernel/gpu/intr/intr.c new file mode 100644 index 000000000..5bd1c0b4c --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/intr.c @@ -0,0 +1,1650 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/intr.h" +#include "gpu/gpu.h" +#include "core/locks.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/subdevice/subdevice.h" +#include "kernel/gpu/gr/fecs_event_list.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "kernel/gpu/intr/intr_service.h" +#include "gpu/mmu/kern_gmmu.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "os/os.h" +#include "resserv/rs_server.h" +#include "vgpu/rpc.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "gpu/gsp/kernel_gsp.h" + +#include "nv_ref.h" +#include "nvRmReg.h" + + +// +// Used by _intrServiceStallExactList inside a critical section. +// Declared here as it needs to be cleared only at top of DPC processing. +// +static struct +{ + NvU32 intrCount; + NvU32 intrVal; +} stuckIntr[MC_ENGINE_IDX_MAX]; + +static NvBool _intrServiceStallExactList(OBJGPU *pGpu, Intr *pIntr, MC_ENGINE_BITVECTOR *pEngines); +static void _intrInitServiceTable(OBJGPU *pGpu, Intr *pIntr); + +void +intrServiceStall_IMPL(OBJGPU *pGpu, Intr *pIntr) +{ + MC_ENGINE_BITVECTOR pendingEngines; + NV_STATUS status; + NvBool bPending; + NvU16 nextEngine; + NvU32 regReadValue; + + NVRM_TRACE('MISR'); + + // + // If the GPU is off the BUS or surprise removed during servicing DPC for ISRs + // we wont know about GPU state until after we start processing DPCs for every + // pending engine. This is because, the reg read to determine pending engines + // return 0xFFFFFFFF due to GPU being off the bus. To prevent further processing, + // reading PMC_BOOT_0 register to check if the GPU was surprise removed/ off the bus + // and setting PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING to attempt Secondary Bus reset + // at lower IRQL later to attempt recover the GPU and avoid all ISR DPC processing till + // GPU is recovered. + // + + regReadValue = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + + if (regReadValue == GPU_REG_VALUE_INVALID) + { + NV_PRINTF(LEVEL_ERROR, + "Failed GPU reg read : 0x%x. Check whether GPU is present on the bus\n", + regReadValue); + } + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + goto exit; + } + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + goto exit; + } + + portMemSet(stuckIntr, 0, sizeof(stuckIntr)); + + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_DISABLE_PER_INTR_DPC_QUEUEING)) + { + do { + NV_ASSERT_OK_OR_ELSE(status, intrGetPendingStall_HAL(pGpu, pIntr, &pendingEngines, NULL /* threadstate */), return); + bPending = _intrServiceStallExactList(pGpu, pIntr, &pendingEngines); + } while (bPending); + } + else if (!pIntr->bDpcStarted) + { + intrGetPendingStall_HAL(pGpu, pIntr, &pendingEngines, NULL /* threadstate */); + if (!bitVectorTestAllCleared(&pendingEngines)) + { + nextEngine = bitVectorCountTrailingZeros(&pendingEngines); + intrQueueInterruptBasedDpc(pGpu, pIntr, nextEngine); + bitVectorClr(&pIntr->pmcIntrPending, nextEngine); + pIntr->bDpcStarted = NV_TRUE; + intrProcessDPCQueue_HAL(pGpu, pIntr); + } + } + else + { + intrProcessDPCQueue_HAL(pGpu, pIntr); + } + +exit: + NVRM_TRACE('misr'); +} + +NV_STATUS +subdeviceCtrlCmdMcServiceInterrupts_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS *pServiceInterruptParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + Intr *pIntr = GPU_GET_INTR(pGpu); + MC_ENGINE_BITVECTOR engines; + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + + bitVectorClrAll(&engines); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV_STATUS status = NV_OK; + + NV_RM_RPC_CONTROL(pGpu, pRmCtrlParams->hClient, pRmCtrlParams->hObject, pRmCtrlParams->cmd, + pRmCtrlParams->pParams, pRmCtrlParams->paramsSize, status); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "NVRM_RPC: NV2080_CTRL_CMD_MC_SERVICE_INTERRUPTS failed " + "with error 0x%x\n", status); + return status; + } + } + + // convert RMCTRL engine flags to internal engine flags + if (pServiceInterruptParams->engines == NV2080_CTRL_MC_ENGINE_ID_ALL) + { + bitVectorSetAll(&engines); + } + + // check for individual engines. (currently only GR) + else if (pServiceInterruptParams->engines & NV2080_CTRL_MC_ENGINE_ID_GRAPHICS) + { + // If MIG is enabled, check for all GRs available in that GPU instance + if (bMIGInUse) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + NvU32 grCount = 0; + NvU32 i; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref)); + + // Compute instances always contain 1 GR + grCount = 1; + if (ref.pMIGComputeInstance == NULL) + { + // + // If client is only subscribed to GPU instance, use all + // GPU instance GR engines + // + grCount = kmigmgrCountEnginesOfType(&ref.pKernelMIGGpuInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_GR(0)); + } + + for (i = 0; i < grCount; ++i) + { + NvU32 globalEngineType; + NvU32 grIdx; + + NV_ASSERT_OK( + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_GR(i), + &globalEngineType)); + + grIdx = NV2080_ENGINE_TYPE_GR_IDX(globalEngineType); + bitVectorSet(&engines, MC_ENGINE_IDX_GRn(grIdx)); + } + } + else + { + bitVectorSet(&engines, MC_ENGINE_IDX_GR); + } + } + + intrServiceStallList_HAL(pGpu, pIntr, &engines, NV_TRUE); + + return NV_OK; +} + +/*! + * @brief checks for GMMU interrupts + * + * @param[in] pGpu + * @param[in] pIntr + * @param[out] pEngines + * @param[in] pThreadState + */ +void +intrGetGmmuInterrupts_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngines, + THREAD_STATE_NODE *pThreadState +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + bitVectorClrAll(pEngines); + + // Check if we have any Gmmu interrupt pending + if (pKernelGmmu != NULL) + { + // + // Read the hub interrupt register as we apply mask while grabbing RM lock at leaf levels and + // that would disable the top level PMC interrupt. + // + intrGetHubLeafIntrPending_HAL(pGpu, pIntr, pEngines, pThreadState); + + // Check if any fault was copied only if any other interrupt on GMMU is not pending. + if (!bitVectorTest(pEngines, MC_ENGINE_IDX_GMMU)) + { + if (portAtomicOrS32(&pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].fatalFaultIntrPending, 0)) + { + bitVectorSet(pEngines, MC_ENGINE_IDX_GMMU); + } + else + { + bitVectorClrAll(pEngines); + } + } + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS)) + { + bitVectorClr(pEngines, MC_ENGINE_IDX_GMMU); + } +} + +void +intrSetDefaultIntrEn_IMPL(Intr *pIntr, NvU32 intrEn0) +{ + NV_ASSERT(intrEn0 <= INTERRUPT_TYPE_MAX); + pIntr->intrEn0 = pIntr->intrEn0Orig = intrEn0; +} + +NvU32 +intrGetDefaultIntrEn_IMPL(Intr *pIntr) +{ + return (pIntr->intrEn0Orig); +} + +void +intrSetIntrEn_IMPL(Intr *pIntr, NvU32 intrEn0) +{ + if ( pIntr->halIntrEnabled == NV_FALSE ) + { + // Interrupts cannot be enabled as long as the freeze is true + NV_PRINTF(LEVEL_INFO, + "intrSetIntrEn: set interrupt refused since interrupts are disabled in the HAL\n"); + return; + } + + NV_ASSERT(intrEn0 <= INTERRUPT_TYPE_MAX); + pIntr->intrEn0 = intrEn0; +} + +NvU32 +intrGetIntrEn_IMPL(Intr *pIntr) +{ + if ( pIntr->halIntrEnabled == NV_FALSE ) + { + NV_PRINTF(LEVEL_INFO, + "intrGetIntrEn: Returning interrupt disabled. Interrupts disabled in the HAL\n"); + return INTERRUPT_TYPE_DISABLED; + } + + return (pIntr->intrEn0); +} + +void +intrSetIntrMaskUnblocked_IMPL(Intr *pIntr, MC_ENGINE_BITVECTOR *pUnblockedEngines) +{ + NV_ASSERT(pUnblockedEngines); + bitVectorCopy(&pIntr->intrMask.engMaskUnblocked, pUnblockedEngines); +} + +void +intrGetIntrMaskUnblocked_IMPL(Intr *pIntr, MC_ENGINE_BITVECTOR *pUnblockedEngines) +{ + NV_ASSERT(pUnblockedEngines); + bitVectorCopy(pUnblockedEngines, &pIntr->intrMask.engMaskUnblocked); +} + +NvU32 +intrGetIntrMaskFlags_IMPL(Intr *pIntr) +{ + return pIntr->intrMask.flags; +} + +void +intrSetIntrMaskFlags_IMPL(Intr *pIntr, NvU32 flags) +{ + pIntr->intrMask.flags = flags; +} + +void +intrQueueDpc_IMPL(OBJGPU *pGpu, Intr *pIntr, DPCQUEUE *pDPCQueue, DPCNODE *pNode) +{ + if (pDPCQueue->pFront == NULL) + { + pDPCQueue->pFront = pDPCQueue->pRear = pNode; + } + else + { + pDPCQueue->pRear->pNext = pNode; + pDPCQueue->pRear = pNode; + } + pDPCQueue->numEntries++; +} + +DPCNODE * +intrDequeueDpc_IMPL(OBJGPU *pGpu, Intr *pIntr, DPCQUEUE *pDPCQueue) +{ + DPCNODE *pNode; + + pNode = pDPCQueue->pFront; + if (pDPCQueue->pFront->pNext == NULL) + { + pDPCQueue->pFront = pDPCQueue->pRear = NULL; + } + else + { + pDPCQueue->pFront = pDPCQueue->pFront->pNext; + } + return pNode; +} + +NvBool +intrIsDpcQueueEmpty_IMPL(OBJGPU *pGpu, Intr *pIntr, DPCQUEUE *pDPCQueue) +{ + return (NULL == pDPCQueue->pFront); +} + +void intrQueueInterruptBasedDpc_IMPL(OBJGPU *pGpu, Intr *pIntr, NvU16 engine) +{ + DPCNODE *pNode; + DPCQUEUE *pDPCQueue = &pIntr->dpcQueue; + + pNode = portMemAllocNonPaged(sizeof(DPCNODE)); + if (NULL != pNode) + { + pNode->pNext = NULL; + pNode->dpctype = INTERRUPT_BASED_DPC; + bitVectorClrAll(&pNode->dpcdata.pendingEngines); + bitVectorSet(&pNode->dpcdata.pendingEngines, engine); + + intrQueueDpc(pGpu, pIntr, pDPCQueue, pNode); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Cannot allocate memory for the DPC queue entry\n"); + DBG_BREAKPOINT(); + } +} + +/*! + * @brief Conditionally service interrupts across all gpus on the provided engine list. + * + * If GPU lock is held for all GPUs, then service interrupts for all GPUs. + * Else, service the interrupts for the device corresponding to the input GPU. + * Operations that use resources across multiple gpus may fail while interrupts on a gpu are pending. + * + * @param[in] pEngines List of engines to be serviced. + */ + +void +intrServiceStallListAllGpusCond_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngines, + NvBool checkIntrEnabled +) +{ + NvU32 gpuAttachCnt, gpuAttachMask, gpuInstance; + NvBool bBCState = NV_FALSE; + + if (!rmGpuLockIsOwner()) + { + // + // We shouldn't service other GPU interrupts, if we don't have their lock. + // Reason being that if an intr happens, it would trigger the isr if the locks are not held. + // See bug 1911524 + // + intrServiceStallListDevice(pGpu, pIntr, pEngines, checkIntrEnabled); + return; + } + + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + gpuInstance = 0; + + while ((pGpu = gpumgrGetNextGpu(gpuAttachMask, &gpuInstance)) != NULL) + { + pIntr = GPU_GET_INTR(pGpu); + + // + // deviceInstance can be invalid when we loop over all attached gpus + // in SLI unlink path: Bug 2462254 + // + if (IsDeviceDestroyed(pGpu)) + continue; + + // Check that the GPU state is neither loaded nor loading + if (!gpuIsStateLoading(pGpu) && !gpuIsStateLoaded(pGpu)) + { + continue; + } + + // + // checkIntrEnabled: Service intr for a GPU only if they are enabled + // eg: In Unload path, the intr for the gpu that is being unloaded + // are explicitly disabled and we do not wish to service those + // But other GPU intr should make forward progress. + // + if (checkIntrEnabled && !intrGetIntrEn(pIntr)) + continue; + + bBCState = gpumgrGetBcEnabledStatus(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + // Dont service interrupts if GPU is not powered up or is Surprise Removed + if (gpuIsGpuFullPower(pGpu) && API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + intrServiceStallList_HAL(pGpu, pIntr, pEngines, NV_FALSE); + } + + gpumgrSetBcEnabledStatus(pGpu, bBCState); + } +} + +/*! + * @brief Service interrupts on given gpu. + * + * Service pending interrupts of given device on the provided engine list. + * + * @param[in] pGpu GPU object pointer + * @param[in] pEngines List of engines to be serviced. + */ +void +intrServiceStallListDevice_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngines, + NvBool checkIntrEnabled +) +{ + NvBool bBCState = NV_FALSE; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + + if (checkIntrEnabled && !intrGetIntrEn(GPU_GET_INTR(pGpu))) + SLI_LOOP_CONTINUE; + + bBCState = gpumgrGetBcEnabledStatus(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + if (gpuIsGpuFullPower(pGpu)) + { + pIntr = GPU_GET_INTR(pGpu); + intrServiceStallList_HAL(pGpu, pIntr, pEngines, NV_FALSE); + } + + gpumgrSetBcEnabledStatus(pGpu, bBCState); + + SLI_LOOP_END; +} + +/*! + * @brief Get the interrupt vector for the given engine + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] mcEngineId Engine id (i.e. one of MC_ENGINE_IDX_* defines) + * @param[in] bNonStall NV_TRUE if non-stall, else NV_FALSE + * + * @returns the nonstall interrupt vector for that engine + */ +NvU32 +intrGetVectorFromEngineId_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + NvU16 mcEngineId, + NvBool bNonStall +) +{ + INTR_TABLE_ENTRY *pIntrTable; + NvU32 intrTableSz; + NvU32 i; + NV_STATUS status; + + status = intrGetInterruptTable_HAL(pGpu, pIntr, &pIntrTable, &intrTableSz); + if (status != NV_OK) + { + NV_ASSERT_OK_FAILED("Failed to get interrupt table", status); + return NV_INTR_VECTOR_INVALID; + } + + for (i = 0; i < intrTableSz; i++) + { + if (pIntrTable[i].mcEngine == mcEngineId) + { + if (bNonStall) + { + return pIntrTable[i].intrVectorNonStall; + } + else + { + return pIntrTable[i].intrVector; + } + } + } + + NV_PRINTF(LEVEL_ERROR, "Could not find the specified engine Id %u\n", mcEngineId); + DBG_BREAKPOINT(); + return NV_INTR_VECTOR_INVALID; +} + +/*! + * @brief Convert a general MC_ENGINE_BITVECTOR to its corresponding hardware + * interrupt mask in PMC. + * + * @param[in] engineMask Mask of engines to get interrupt mask for + * + * @returns A bitmask of PMC interrupt bits corresponding to the engines + * specified in the engineMask parameter + */ +NvU32 +intrConvertEngineMaskToPmcIntrMask_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + PMC_ENGINE_BITVECTOR engineMask +) +{ + INTR_TABLE_ENTRY *pIntrTable; + NvU32 intrTableSz; + NvU32 i; + NvU32 pmcIntrMask = 0; + + if (NV_OK != intrGetInterruptTable_HAL(pGpu, pIntr, &pIntrTable, &intrTableSz)) + { + NV_ASSERT(pmcIntrMask); + return pmcIntrMask; + } + + for (i = 0; i < intrTableSz; i++) + { + if (bitVectorTest(engineMask, pIntrTable[i].mcEngine)) + { + pmcIntrMask |= pIntrTable[i].pmcIntrMask; + } + } + + return pmcIntrMask; +} + +/*! + * @brief Convert a PMC interrupt mask to a general MC_ENGINE_BITVECTOR. + * + * @param[in] pmcIntrMask Mask of PMC interrupt bits to get engine mask for + * + * @returns A bitmask of engines corresponding to the interrupts specified in + * the intrMask parameter + */ +void +intrConvertPmcIntrMaskToEngineMask_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 pmcIntrMask, + PMC_ENGINE_BITVECTOR pEngines +) +{ + INTR_TABLE_ENTRY *pIntrTable; + NvU32 intrTableSz; + NvU32 i; + + NV_ASSERT_OR_RETURN_VOID(pEngines != NULL); + NV_ASSERT_OR_RETURN_VOID(intrGetInterruptTable_HAL(pGpu, pIntr, &pIntrTable, &intrTableSz) == NV_OK); + + bitVectorClrAll(pEngines); + for (i = 0; i < intrTableSz; i++) + { + if (pIntrTable[i].pmcIntrMask == NV_PMC_INTR_INVALID_MASK) + { + continue; + } + + if (pIntrTable[i].pmcIntrMask & pmcIntrMask) + { + bitVectorSet(pEngines, pIntrTable[i].mcEngine); + } + } +} + +/*! + * @brief Returns the smallest notification interrupt vector, used to find the + * interrupt vector space in which partition assigned interrupt vectors should + * be remapped. + */ +NV_STATUS +intrGetSmallestNotificationVector_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + NvU32 *pSmallestVector +) +{ + INTR_TABLE_ENTRY *pIntrTable; + NvU32 intrTableSz; + NvU32 i, leafIdx; + NvU32 val = NV_INTR_VECTOR_INVALID; + + *pSmallestVector = val; + + NV_ASSERT_OR_RETURN(pSmallestVector != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OK_OR_RETURN(intrGetInterruptTable_HAL(pGpu, pIntr, &pIntrTable, &intrTableSz)); + + for (i = 0; i < intrTableSz; i++) + { + NvU32 curVector = pIntrTable[i].intrVectorNonStall; + if (curVector < val) + { + val = curVector; + } + } + + // + // Return the smallest interrupt vector in the subtree to which val belongs. + // This should be consistent across all chips even if they have different + // floorsweeping configs + // + leafIdx = NV_CTRL_INTR_SUBTREE_TO_LEAF_IDX_START(NV_CTRL_INTR_GPU_VECTOR_TO_SUBTREE(val)); + *pSmallestVector = NV_CTRL_INTR_LEAF_IDX_TO_GPU_VECTOR_START(leafIdx); + return NV_OK; +} + +/*! + * @brief Reads NV_PFB_NISO_INTR register and determine if we have an interrupt pending + * The function returns NVBIT64(MC_ENGINE_IDX_GMMU) if any interrupt is found pending + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[out] pEngines list of pending engines + * @param[in] pThreadState + */ +void +intrGetHubLeafIntrPending_STUB +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngines, + THREAD_STATE_NODE *pThreadState +) +{ + bitVectorClrAll(pEngines); +} + +static void _intrInitRegistryOverrides(OBJGPU *, Intr *); + +NV_STATUS +intrConstructEngine_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + ENGDESCRIPTOR engDesc +) +{ + pIntr->dpcQueue.pFront = NULL; + pIntr->dpcQueue.pRear = NULL; + pIntr->bDpcStarted = NV_FALSE; + + if (!RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM) + { + pIntr->setProperty(pIntr, PDB_PROP_INTR_DISABLE_PER_INTR_DPC_QUEUEING, NV_TRUE); + } + + return NV_OK; +} + +void +intrDestruct_IMPL +( + Intr *pIntr +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pIntr); + DPCQUEUE *pDPCQueue = &pIntr->dpcQueue; + DPCNODE *pNode = NULL; + + while (!intrIsDpcQueueEmpty(pGpu, pIntr, pDPCQueue)) + { + pNode = intrDequeueDpc(pGpu, pIntr, pDPCQueue); + portMemFree(pNode); + } +} + + +NV_STATUS +intrStateInitUnlocked_IMPL +( + OBJGPU *pGpu, + Intr *pIntr) +{ + NvU32 data = 0; + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_INTR_DETAILED_LOGS, &data) == NV_OK) + { + if (data == NV_REG_STR_RM_INTR_DETAILED_LOGS_ENABLE) + { + pIntr->setProperty(pIntr, PDB_PROP_INTR_ENABLE_DETAILED_LOGS, NV_TRUE); + } + } + + _intrInitRegistryOverrides(pGpu, pIntr); + + return NV_OK; +} + +NV_STATUS +intrStateInitLocked_IMPL +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + // Enable interrupts in the HAL + pIntr->halIntrEnabled = NV_TRUE; + + // Enable the interrupt mapping within the chip + intrSetDefaultIntrEn(pIntr, INTERRUPT_TYPE_HARDWARE); + + // Initially mask will allow all interrupts. + pIntr->intrMask.cached = INTERRUPT_MASK_ENABLED; + + //initialize all GPU interrupts to be serviced by RM + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + pGpu->pmcRmOwnsIntrMask = INTERRUPT_MASK_ENABLED; + if (pKernelGmmu != NULL) + { + pKernelGmmu->uvmSharedIntrRmOwnsMask = RM_UVM_SHARED_INTR_MASK_ALL; + } + + NV_ASSERT_OK_OR_RETURN(intrInitInterruptTable_HAL(pGpu, pIntr)); + _intrInitServiceTable(pGpu, pIntr); + + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING)) + { + intrGetIntrMask_HAL(pGpu, pIntr, &pIntr->intrMask.engMaskOrig, NULL /* threadstate */); + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM) + { + MC_ENGINE_BITVECTOR engines; + + bitVectorClrAll(&engines); + bitVectorSet(&engines, MC_ENGINE_IDX_FIFO); + bitVectorSet(&engines, MC_ENGINE_IDX_DISP); + + intrSetIntrMaskUnblocked(pIntr, &engines); + } + // Hypervisor will set the intr unblocked mask later at the time of SWRL init. + } + + return NV_OK; +} + +void +intrStateDestroy_IMPL +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + intrStateDestroyPhysical_HAL(pGpu, pIntr); + + // Disable interrupts in the HAL + pIntr->halIntrEnabled = NV_FALSE; + + // Ensure we don't try to call through any stale IntrService pointers. + portMemSet(pIntr->intrServiceTable, 0, sizeof(pIntr->intrServiceTable)); + + NV_ASSERT_OK(intrDestroyInterruptTable_HAL(pGpu, pIntr)); +} + +/* + * Service interrupts that may not have been queued due to GPU lock unavailablity + * + * @param[in] pGpu + * @param[in] pIntr + * @param[in] pThreadState + */ +NV_STATUS +intrServiceNonStallBottomHalf_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pIntrPending, + THREAD_STATE_NODE *pThreadState +) +{ + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus; + + tmpStatus = intrCheckAndServiceNonReplayableFault_HAL(pGpu, pIntr, pThreadState); + if (tmpStatus != NV_OK) + status = tmpStatus; + + tmpStatus = intrCheckAndServiceFecsEventbuffer(pGpu, pIntr, pIntrPending, pThreadState); + if (tmpStatus != NV_OK) + status = tmpStatus; + + return status; +} + +static void +_intrInitRegistryOverrides +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + OBJGPU *pGpuLoop; + Intr *pIntrLoop; + NvU32 gpuInstance; + NvU32 gpuMask; + NvU32 data = 0; + + // This code has only the ability to disable the mechanism and will not + // enable it. This property is autoset only on GPUs on which it is + // unsupported. And it is not reset anywhere which will allow the + // mechanism to be enabled on the GPUs which are unsupported. + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_PER_INTR_DPC_QUEUING, &data) == NV_OK) + { + if(data) + { + gpuMask = gpumgrGetGpuMask(pGpu); + gpuInstance = 0; + while ((pGpuLoop = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + pIntrLoop = GPU_GET_INTR(pGpuLoop); + pIntrLoop->setProperty(pIntrLoop, PDB_PROP_INTR_DISABLE_PER_INTR_DPC_QUEUEING, NV_TRUE); + } + } + } + + pIntr->setProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING, NV_FALSE); + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM || hypervisorIsVgxHyper()) + { + // Enable IntrMask Locking by default if supported + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_MASK_SUPPORTED) && + (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu))) + { + pIntr->setProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING, NV_TRUE); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_LOCKING_MODE, &data) == NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "NV_REG_STR_RM_LOCKING_MODE was set to: 0x%x\n", data); + + switch (data) + { + case NV_REG_STR_RM_LOCKING_MODE_INTR_MASK: + { + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_MASK_SUPPORTED)) + { + pIntr->setProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING, NV_TRUE); + } + } + break; + + case NV_REG_STR_RM_LOCKING_MODE_LAZY_INTR_DISABLE: + break; + + default: + { + pIntr->setProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING, NV_FALSE); + } + } + } + } + + pIntr->intrStuckThreshold = INTR_STUCK_THRESHOLD; + if (osReadRegistryDword(pGpu, NV_REG_STR_INTR_STUCK_THRESHOLD, &data) == NV_OK) + { + pIntr->intrStuckThreshold = data; + } +} + +void +intrSaveIntrEn0FromHw_IMPL +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + pIntr->saveIntrEn0 = intrGetIntrEnFromHw_HAL(pGpu, pIntr, NULL /* threadState */); +} + +NV_STATUS +intrInitInterruptTable_KERNEL +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status = NV_OK; + NvU32 i = 0; + INTR_TABLE_ENTRY *pIntrTable = NULL; + NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *pParams; + + NV_ASSERT_OR_RETURN(pIntr->pIntrTable == NULL, NV_ERR_INVALID_STATE); + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate params for kernel intr table control"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, + pParams, sizeof(*pParams)), + exit); + + status = NV_ERR_INVALID_PARAMETER; + NV_ASSERT_OR_GOTO(pParams->tableLen <= NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE, exit); + + pIntrTable = portMemAllocNonPaged(sizeof(INTR_TABLE_ENTRY) * pParams->tableLen); + if (pIntrTable == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate kernel interrupt table"); + status = NV_ERR_NO_MEMORY; + goto exit; + } + portMemSet(pIntrTable, 0, sizeof(INTR_TABLE_ENTRY) * pParams->tableLen); + + for (i = 0; i < pParams->tableLen; ++i) + { + pIntrTable[i].mcEngine = pParams->table[i].engineIdx; + pIntrTable[i].pmcIntrMask = pParams->table[i].pmcIntrMask; + pIntrTable[i].intrVector = pParams->table[i].vectorStall; + pIntrTable[i].intrVectorNonStall = pParams->table[i].vectorNonStall; + } + + // Transfer ownership of allocated table to pIntr and clear local to avoid MemFree + pIntr->pIntrTable = pIntrTable; + pIntr->intrTableSz = pParams->tableLen; + pIntrTable = NULL; + status = NV_OK; + +exit: + portMemFree(pParams); + portMemFree(pIntrTable); + + return status; +} + +static void +_intrInitServiceTable +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + ENGSTATE_ITER iter = gpuGetEngstateIter(pGpu); + OBJENGSTATE *pEngstate; + + portMemSet(pIntr->intrServiceTable, 0, sizeof(pIntr->intrServiceTable)); + + while (gpuGetNextEngstate(pGpu, &iter, &pEngstate)) + { + IntrService *pIntrService = dynamicCast(pEngstate, IntrService); + if (pIntrService != NULL) + { + intrservRegisterIntrService(pGpu, pIntrService, pIntr->intrServiceTable); + } + } + + gpuRegisterGenericKernelFalconIntrService(pGpu, pIntr->intrServiceTable); +} + +NvU32 intrServiceInterruptRecords_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + NvU16 engineIdx, + NvBool *pServiced +) +{ + IntrService *pIntrService = pIntr->intrServiceTable[engineIdx].pInterruptService; + NvU32 ret = 0; + NvBool bShouldService; + IntrServiceClearInterruptArguments clearParams = {engineIdx}; + IntrServiceServiceInterruptArguments serviceParams = {engineIdx}; + + if (pIntrService == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Missing interrupt handler for engine idx %d\n", engineIdx); + NV_ASSERT_FAILED("Missing interrupt handler"); + bShouldService = NV_FALSE; + } + else + { + bShouldService = intrservClearInterrupt(pGpu, pIntrService, &clearParams); + } + + if (pServiced != NULL) + { + *pServiced = bShouldService; + } + + if (bShouldService) + { + ret = intrservServiceInterrupt(pGpu, pIntrService, &serviceParams); + } + return ret; +} + +NV_STATUS intrServiceNotificationRecords_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + NvU16 engineIdx, + THREAD_STATE_NODE *pThreadState +) +{ + NV_STATUS status; + IntrService *pIntrService = pIntr->intrServiceTable[engineIdx].pNotificationService; + IntrServiceServiceNotificationInterruptArguments params = {pThreadState, engineIdx}; + + if (pIntrService == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Missing notification interrupt handler for engine idx %d\n", engineIdx); + NV_ASSERT_FAILED("Missing notification interrupt handler"); + return NV_ERR_GENERIC; + } + + status = intrservServiceNotificationInterrupt(pGpu, pIntrService, ¶ms); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not service notification interrupt for engine idx %d; returned NV_STATUS = 0x%x\n", + engineIdx, status); + NV_ASSERT_FAILED("Could not service notification interrupt"); + return NV_ERR_GENERIC; + } + + // + // On Turing onwards, all non-stall interrupts, including the ones from + // PBDMA, have moved to reporting on the runlist that is served by the + // PBDMA. There are still some clients that use the PBDMA interrupts + // but currently register for a notifier of type NV2080_ENGINE_TYPE_HOST. + // Until those clients change to using the new notifiers, RM will fire + // the host notifier for all non-stall interrupts from host-driven + // engines. See bug 1866491. + // + if (pIntr->bDefaultNonstallNotify && + pGpu->activeFifoEventMthdNotifiers != 0 && + !pIntr->intrServiceTable[engineIdx].bFifoWaiveNotify) + { + engineNonStallIntrNotify(pGpu, NV2080_ENGINE_TYPE_HOST); + } + + return NV_OK; +} + +NV_STATUS intrCheckFecsEventbufferPending_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pIntrPending, + NvBool *pbCtxswLog +) +{ + NvU8 i; + + NV_ASSERT_OR_RETURN(pbCtxswLog != NULL, NV_ERR_INVALID_ARGUMENT); + + *pbCtxswLog = NV_FALSE; + + if (pGpu->fecsCtxswLogConsumerCount <= 0) + { + NV_ASSERT(pGpu->fecsCtxswLogConsumerCount == 0); + return NV_OK; + } + + for (i = 0; i < GPU_MAX_GRS; i++) + { + KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, i); + if ((pKernelGraphics != NULL) && + (kgraphicsIsIntrDrivenCtxswLoggingEnabled(pGpu, pKernelGraphics)) && + bitVectorTest(pIntrPending, MC_ENGINE_IDX_GRn(i))) + { + if (!fecsBufferChanged(pGpu, pKernelGraphics)) + continue; + + if (fecsSignalIntrPendingIfNotPending(pGpu, pKernelGraphics)) + { + *pbCtxswLog = kgraphicsIsBottomHalfCtxswLoggingEnabled(pGpu, pKernelGraphics); + } + } + } + + return NV_OK; +} + +NV_STATUS +intrCheckAndServiceFecsEventbuffer_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pIntrPending, + THREAD_STATE_NODE *pThreadState +) +{ + NvU8 i; + + if (bitVectorTestAllCleared(pIntrPending)) + return NV_OK; + + if (pGpu->fecsCtxswLogConsumerCount <= 0) + { + NV_ASSERT(pGpu->fecsCtxswLogConsumerCount == 0); + return NV_OK; + } + + for (i = 0; i < GPU_MAX_GRS; i++) + { + KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, i); + if ((pKernelGraphics != NULL) && + bitVectorTest(pIntrPending, MC_ENGINE_IDX_GRn_FECS_LOG(i)) && + kgraphicsIsIntrDrivenCtxswLoggingEnabled(pGpu, pKernelGraphics) && + fecsClearIntrPendingIfPending(pGpu, pKernelGraphics)) + { + nvEventBufferFecsCallback(pGpu, (void*)pKernelGraphics); + } + } + + return NV_OK; +} + +NV_STATUS +intrGetInterruptTable_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + INTR_TABLE_ENTRY **ppTable, + NvU32 *pTableSz +) +{ + NV_ASSERT_OR_RETURN(ppTable != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pTableSz != NULL, NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OR_RETURN(pIntr->pIntrTable != NULL, NV_ERR_INVALID_STATE); + + *ppTable = pIntr->pIntrTable; + *pTableSz = pIntr->intrTableSz; + + return NV_OK; +} + +/** + * @brief Frees memory associated with interrupt table + * + * @param pGpu + * @param pMc + */ +NV_STATUS +intrDestroyInterruptTable_IMPL +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + if (pIntr->pIntrTable != NULL) + { + portMemFree(pIntr->pIntrTable); + pIntr->pIntrTable = NULL; + pIntr->intrTableSz = 0; + } + + return NV_OK; +} + +void intrProcessDPCQueue_IMPL +( + OBJGPU *pGpu, + Intr *pIntr +) +{ + DPCQUEUE *pDPCQueue = &pIntr->dpcQueue; + MC_ENGINE_BITVECTOR pendingEngines; + NvU16 nextEngine; + + do + { + bitVectorClrAll(&pendingEngines); + if (!intrIsDpcQueueEmpty(pGpu, pIntr, pDPCQueue)) + { + DPCNODE *pNode; + pNode = intrDequeueDpc(pGpu, pIntr, pDPCQueue); + if (pNode->dpctype == INTERRUPT_BASED_DPC) + { + _intrServiceStallExactList(pGpu, pIntr, &pNode->dpcdata.pendingEngines); + + portMemFree(pNode); + + if (!bitVectorTestAllCleared(&pIntr->pmcIntrPending)) + { + nextEngine = bitVectorCountTrailingZeros(&pIntr->pmcIntrPending); + intrQueueInterruptBasedDpc(pGpu, pIntr, nextEngine); + bitVectorClr(&pIntr->pmcIntrPending, nextEngine); + } + } + } + + if (intrIsDpcQueueEmpty(pGpu, pIntr, pDPCQueue)) + { + // + // Process all exceptions as required + // + intrGetPendingStall_HAL(pGpu, pIntr, &pendingEngines, NULL /* threadstate */); + if (!bitVectorTestAllCleared(&pendingEngines)) + { + nextEngine = bitVectorCountTrailingZeros(&pendingEngines); + intrQueueInterruptBasedDpc(pGpu, pIntr, nextEngine); + bitVectorCopy(&pIntr->pmcIntrPending, &pendingEngines); + bitVectorClr(&pIntr->pmcIntrPending, nextEngine); + pIntr->bDpcStarted = NV_TRUE; + } + } + } while (!bitVectorTestAllCleared(&pendingEngines)); +} + +/*! + * @brief Prevent the isr from coming in. + * + * Disable intrs to prevent the ISR from coming in and enable all engine intrs + * so that intrs will be reflected in NV_PMC_INTR_0. If the ISR was already + * executing, prevent it from updating engIntrs via setting + * INTR_MASK_FLAGS_ISR_SKIP_MASK_UPDATE. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[out] pIntrMaskCtx Pointer to INTR_MASK_CTX where the current + * interrupt mask related information (intr enable and + * intr mask) is to be stored. The information here + * will be used to restore the original state of + * interrup mask when we're allowing the ISR again. + */ +static void +_intrEnterCriticalSection +( + OBJGPU *pGpu, + Intr *pIntr, + INTR_MASK_CTX *pIntrMaskCtx +) +{ + NvU64 oldIrql; + NvU32 intrMaskFlags; + MC_ENGINE_BITVECTOR engines; + + bitVectorSetAll(&engines); + + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING)) + { + NV_ASSERT(pIntrMaskCtx != NULL); + + // Cannot do this outside of here because of bug 657283. + NV_ASSERT(rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + // + // Disable intrs to prevent the ISR from coming in and enable all engine + // intrs so that intrs will be reflected in NV_PMC_INTR_0. + // If the ISR was already executing, prevent it from updating engIntrs + // via setting MC_INTR_MASK_FLAGS_ISR_SKIP_MASK_UPDATE. + // + oldIrql = rmIntrMaskLockAcquire(pGpu); + + pIntrMaskCtx->intrEnable = intrGetIntrEnFromHw_HAL(pGpu, pIntr, NULL /* threadstate */); + intrSetIntrEnInHw_HAL(pGpu, pIntr, INTERRUPT_TYPE_DISABLED, NULL /* threadstate */); + intrSetStall_HAL(pGpu, pIntr, INTERRUPT_TYPE_DISABLED, NULL /* threadstate */); + intrMaskFlags = intrGetIntrMaskFlags(pIntr); + intrMaskFlags |= INTR_MASK_FLAGS_ISR_SKIP_MASK_UPDATE; + intrSetIntrMaskFlags(pIntr, intrMaskFlags); + + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING)) + { + intrGetIntrMask_HAL(pGpu, pIntr, &pIntrMaskCtx->intrMask, NULL /* threadstate */); + intrSetIntrMask_HAL(pGpu, pIntr, &engines, NULL /* threadstate */); + } + + rmIntrMaskLockRelease(pGpu, oldIrql); + } +} + +/*! + * @brief Allow the isr to come in. + * + * Allow the isr to come in if it was already allowed when lazy intr disable + * for locking is in use. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[out] pIntrMaskCtx Pointer to INTR_MASK_CTX where the current + * interrupt mask related information (intr enable and + * intr mask) is to be stored. The information here + * will be used to restore the original state of + * interrup mask when we're allowing the ISR again. + */ +static void +_intrExitCriticalSection +( + OBJGPU *pGpu, + Intr *pIntr, + INTR_MASK_CTX *pIntrMaskCtx +) +{ + NvU64 oldIrql; + NvU32 intrMaskFlags; + + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING)) + { + NV_ASSERT(pIntrMaskCtx != NULL); + + // Restore intrEnable and allow the ISR to come in. + oldIrql = rmIntrMaskLockAcquire(pGpu); + + intrMaskFlags = intrGetIntrMaskFlags(pIntr); + intrMaskFlags &= ~INTR_MASK_FLAGS_ISR_SKIP_MASK_UPDATE; + intrSetIntrMaskFlags(pIntr, intrMaskFlags); + + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING)) + { + intrSetIntrMask_HAL(pGpu, pIntr, &pIntrMaskCtx->intrMask, NULL /* threadstate */); + } + + intrSetIntrEnInHw_HAL(pGpu, pIntr, pIntrMaskCtx->intrEnable, NULL /* threadstate */); + + rmIntrMaskLockRelease(pGpu, oldIrql); + } +} + +static NvBool +_intrServiceStallExactList +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngines +) +{ + NV_STATUS status; + + NvU32 engineIdx; + NvU32 intr; + NvU32 i; + NvBool bHandled; + NvBool bIntrStuck = NV_FALSE; + NvBool bPending = NV_FALSE; + NvBool bRequiresPossibleErrorNotifier; + + INTR_TABLE_ENTRY *pIntrTable; + NvU32 intrTableSz; + + if (bitVectorTestAllCleared(pEngines)) + { + return NV_FALSE; + } + + NV_ASSERT_OK_OR_ELSE(status, intrGetInterruptTable_HAL(pGpu, pIntr, &pIntrTable, &intrTableSz), + return NV_FALSE); + + bRequiresPossibleErrorNotifier = intrRequiresPossibleErrorNotifier_HAL(pGpu, pIntr, pEngines); + + if (bRequiresPossibleErrorNotifier) + { + // + // Notify CUDA there may be an error in ERR_CONT that they may miss because we're + // about to clear it out of the NV_CTRL tree backing ERR_CONT before the interrupt + // is serviced. + // + // info32 contains shadowed value of ERR_CONT + // + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_POSSIBLE_ERROR, NULL, 0, intrReadErrCont_HAL(pGpu, pIntr), 0); + } + + for (i = 0; i < intrTableSz; i++) + { + // Skip servicing interrupts when GPU is off the bus + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + return NV_FALSE; + } + + // Skip servicing interrupts when GPU is in Reset + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + return NV_FALSE; + } + + engineIdx = pIntrTable[i].mcEngine; + + if (bitVectorTest(pEngines, engineIdx)) + { + bHandled = NV_FALSE; + intr = intrServiceInterruptRecords(pGpu, pIntr, engineIdx, &bHandled); + + if (bHandled) + { + if ((intr != 0) && (intr == stuckIntr[engineIdx].intrVal)) + { + stuckIntr[engineIdx].intrCount++; + if (stuckIntr[engineIdx].intrCount > pIntr->intrStuckThreshold) + { + NV_PRINTF(LEVEL_ERROR, + "Stuck interrupt detected for mcEngine %u\n", + engineIdx); + bIntrStuck = NV_TRUE; + NV_ASSERT(0); + } + } + + stuckIntr[engineIdx].intrVal = intr; + + bPending = bPending || (intr != 0); + } + } + } + + if (IS_VIRTUAL(pGpu) && bitVectorTest(pEngines, MC_ENGINE_IDX_VGPU)) + { + vgpuService(pGpu); + } + + if (bRequiresPossibleErrorNotifier) + { + // + // Notify CUDA there may be an error in ERR_CONT that they may miss because we're + // about to clear it out of the NV_CTRL tree backing ERR_CONT before the interrupt + // is serviced. + // + // info32 contains shadowed value of ERR_CONT + // + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_POSSIBLE_ERROR, NULL, 0, intrReadErrCont_HAL(pGpu, pIntr), 0); + } + + if (bIntrStuck) + { + NV_PRINTF(LEVEL_ERROR, + "Interrupt is stuck. Bailing after %d iterations.\n", + pIntr->intrStuckThreshold); + return NV_FALSE; + } + + return bPending; +} + +/*! + * @brief Perform inline servicing of requested interrupts. + * + * Do inline servicing of all requested engines and VGPU. + * If NULL is passed for pEngines, service all engines but not VGPU. + * + * This special casing of the VGPU interrupt servicing is a holdover from + * before refactoring intrServiceStall and intrServiceStallList to use a common + * inner loop; previously, intrServiceStallList simply didn't attempt to + * service VGPU interrupts even if requested. + * + * @param[in] pGpu + * @param[in] pIntr + * @param[in] pEngines (See above for behavior when NULL) + * @param[in] bLoop Continue servicing interrupts in loop until completed or stuck interrupt detected. + * + */ +void +intrServiceStallList_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + MC_ENGINE_BITVECTOR *pEngines, + NvBool bLoop +) +{ + NV_STATUS status; + INTR_MASK_CTX intrMaskCtx; + MC_ENGINE_BITVECTOR exactEngines; + NvBool bPending; + CALL_CONTEXT *pOldContext = NULL; + NvU32 regReadValue; + + if (gpumgrGetBcEnabledStatus(pGpu)) + { + NV_ASSERT_FAILED("intrServiceStallList_IMPL is expected to be unicast! Please post a stacktrace in bug 2003060!"); + } + + if (IS_GSP_CLIENT(pGpu)) + { + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + kgspDumpGspLogs(pGpu, pKernelGsp, NV_FALSE); + } + + // + // If the GPU is off the BUS or surprise removed during servicing DPC for ISRs + // we wont know about GPU state until after we start processing DPCs for every + // pending engine. This is because, the reg read to determine pending engines + // return 0xFFFFFFFF due to GPU being off the bus. To prevent further processing, + // reading PMC_BOOT_0 register to check if the GPU was surprise removed/ off the bus + // and setting PDB_PROP_GPU_SECONDARY_BUS_RESET_PENDING to attempt Secondary Bus reset + // at lower IRQL later to attempt recover the GPU and avoid all ISR DPC processing till + // GPU is recovered. + // + + regReadValue = GPU_REG_RD32(pGpu, NV_PMC_BOOT_0); + + if (regReadValue == GPU_REG_VALUE_INVALID) + { + NV_PRINTF(LEVEL_ERROR, + "Failed GPU reg read : 0x%x. Check whether GPU is present on the bus\n", + regReadValue); + } + + // Dont service interrupts if GPU is surprise removed + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu) || API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + return; + } + + resservSwapTlsCallContext(&pOldContext, NULL); + + // prevent the isr from coming in + _intrEnterCriticalSection(pGpu, pIntr, &intrMaskCtx); + + portMemSet(stuckIntr, 0, sizeof(stuckIntr)); + + do + { + NV_ASSERT_OK_OR_ELSE(status, intrGetPendingStall_HAL(pGpu, pIntr, &exactEngines, NULL /* threadstate */), + goto done); + + if (pEngines == NULL) + { + bitVectorClr(&exactEngines, MC_ENGINE_IDX_VGPU); + } + else + { + bitVectorAnd(&exactEngines, &exactEngines, pEngines); + } + + bPending = _intrServiceStallExactList(pGpu, pIntr, &exactEngines); + } + while (bPending && bLoop); + +done: + // allow the isr to come in. + _intrExitCriticalSection(pGpu, pIntr, &intrMaskCtx); + + resservRestoreTlsCallContext(pOldContext); +} + + +void +intrServiceStallSingle_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + NvU16 engIdx, + NvBool bLoop +) +{ + MC_ENGINE_BITVECTOR engines; + bitVectorClrAll(&engines); + bitVectorSet(&engines, engIdx); + intrServiceStallList_HAL(pGpu, pIntr, &engines, bLoop); +} + +/*! + * @brief Allow the isr to come in. + * + * Checks whether the interrupts are enabled in HW. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntr Intr pointer + * @param[in] pThreadState THREAD_STATE information + * @returns Returns whether interrupts are enabled + */ +NvBool +intrIsIntrEnabled_IMPL +( + OBJGPU *pGpu, + Intr *pIntr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 intrEn; + + intrEn = intrGetIntrEnFromHw_HAL(pGpu, pIntr, pThreadState); + + if (INTERRUPT_TYPE_DISABLED == intrEn) + { + return NV_FALSE; + } + + return NV_TRUE; +} diff --git a/src/nvidia/src/kernel/gpu/intr/intr_service.c b/src/nvidia/src/kernel/gpu/intr/intr_service.c new file mode 100644 index 000000000..fa26181ab --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/intr_service.c @@ -0,0 +1,96 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/intr_service.h" +#include "gpu/gpu.h" +#include "kernel/gpu/intr/intr.h" + +/** + * @brief Provides an opportunity to register some IntrService during intrStateInit. + * + * @param[in] pGpu + * @param[in] pIntrService The IntrService object discovered as a GPU child; + * not necessarily the one to be registered. + * @param[in] pRecords + */ +void +intrservRegisterIntrService_IMPL(OBJGPU *pGpu, IntrService *pIntrService, IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX]) +{ + return; +} + +/** + * @brief Clears the stall interrupt leaf vector and return whether to call ServiceStall. + * @details Normally there's no need to override this function as its default is used by almost all handlers. + * + * @param[in] pGpu + * @param[in] pIntrService The IntrService object registered to handle the engineIdx stall interrupt. + * @param[in] pParams + * + * @returns A boolean which is NV_FALSE if the stall interrupt should not actually be handled. + */ +NvBool +intrservClearInterrupt_IMPL(OBJGPU *pGpu, IntrService *pIntrService, IntrServiceClearInterruptArguments *pParams) +{ + NV_ASSERT_OR_RETURN(pParams != NULL, NV_FALSE); + + Intr *pIntr = GPU_GET_INTR(pGpu); + intrClearLeafVector_HAL(pGpu, pIntr, + intrGetVectorFromEngineId(pGpu, pIntr, pParams->engineIdx, NV_FALSE), + NULL); + return NV_TRUE; +} + +/** + * @brief Services the stall interrupt. + * + * @param[in] pGpu + * @param[in] pIntrService The IntrService object registered to handle the engineIdx stall interrupt. + * @param[in] pParams + * + * @returns Zero, or any implementation-chosen nonzero value. If the same nonzero value is returned enough + * times the interrupt is considered stuck. + */ +NvU32 +intrservServiceInterrupt_IMPL(OBJGPU *pGpu, IntrService *pIntrService, IntrServiceServiceInterruptArguments *pParams) +{ + NV_ASSERT_FAILED("intrservServiceInterrupt called but not implemented"); + // Return 0; no need to redundantly report stuck interrupt. + return 0; +} + +/** + * @brief Services the nonstall interrupt. + * + * @param[in] pGpu + * @param[in] pIntrService The IntrService object registered to handle the engineIdx nonstall interrupt. + * @param[in] pParams + * + */ +NV_STATUS +intrservServiceNotificationInterrupt_IMPL(OBJGPU *pGpu, IntrService *pIntrService, IntrServiceServiceNotificationInterruptArguments *pParams) +{ + NV_ASSERT_FAILED("intrservServiceNotificationInterrupt called but not implemented"); + return NV_ERR_INVALID_STATE; +} diff --git a/src/nvidia/src/kernel/gpu/intr/swintr.c b/src/nvidia/src/kernel/gpu/intr/swintr.c new file mode 100644 index 000000000..fca2c8bd6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intr/swintr.c @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/intr/swintr.h" +#include "gpu/gpu.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "kernel/gpu/intr/intr.h" + +/** + * @brief Provides an opportunity to register some IntrService during intrStateInit. + */ +void +swintrRegisterIntrService_IMPL +( + OBJGPU *pGpu, + SwIntr *pSwIntr, + IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX] +) +{ + NvU32 engineIdx = MC_ENGINE_IDX_CPU_DOORBELL; + NV_ASSERT(pRecords[engineIdx].pInterruptService == NULL); + pRecords[engineIdx].pInterruptService = staticCast(pSwIntr, IntrService); + + engineIdx = MC_ENGINE_IDX_PRIV_DOORBELL; + NV_ASSERT(pRecords[engineIdx].pInterruptService == NULL); + pRecords[engineIdx].pInterruptService = staticCast(pSwIntr, IntrService); +} + +/** + * @brief Service stall interrupts. + * + * @returns Zero, or any implementation-chosen nonzero value. If the same nonzero value is returned enough + * times the interrupt is considered stuck. + */ +NvU32 +swintrServiceInterrupt_IMPL +( + OBJGPU *pGpu, + SwIntr *pSwIntr, + IntrServiceServiceInterruptArguments *pParams +) +{ + NV_ASSERT_OR_RETURN(pParams != NULL, 0); + + switch (pParams->engineIdx) + { + case MC_ENGINE_IDX_CPU_DOORBELL: + { + MODS_ARCH_REPORT(NV_ARCH_EVENT_CPU_DOORBELL, "%s", "processing CPU doorbell interrupt\n"); + + // + // CPU_DOORBELL interrupt is used to notify a vGPU guest that there is an event pending. + // The vGPU event itself will be picked up by vgpuGetPendingEvent() + // + return 0; + } + case MC_ENGINE_IDX_PRIV_DOORBELL: + { + MODS_ARCH_REPORT(NV_ARCH_EVENT_PRIV_DOORBELL, "%s", "processing PRIV doorbell interrupt\n"); + + // Then service all virtual interrupts that may be pending + intrServiceVirtual_HAL(pGpu, GPU_GET_INTR(pGpu)); + return 0; + } + default: + { + NV_ASSERT_FAILED("Invalid engineIdx"); + return 0; + } + } +} diff --git a/src/nvidia/src/kernel/gpu/intrable/intrable.c b/src/nvidia/src/kernel/gpu/intrable/intrable.c new file mode 100644 index 000000000..63c27c449 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/intrable/intrable.c @@ -0,0 +1,255 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "kernel/gpu/intrable/intrable.h" +#include "kernel/gpu/intr/intr.h" + +/*! + * @brief Intrable object constructor + * + * @param[in] pIntrable OBJINTRABLE pointer + * + * @return NV_OK + */ +NV_STATUS +intrableConstruct_IMPL +( + OBJINTRABLE *pIntrable +) +{ + pIntrable->partitionAssignedNotificationVector = NV_INTR_VECTOR_INVALID; + pIntrable->originalNotificationIntrVector = NV_INTR_VECTOR_INVALID; + + return NV_OK; +} + +/*! + * @brief Placeholder function to return error + * + * Inheriting classes should override and return the notification interrupt vector + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntrable OBJINTRABLE pointer + * @param[in] maxIntrs max interrupt vectors to return + * @param[out] pIntrVectors table of interrupt vectors + * @param[out] pMcEngineIdxs table of MC_ENGINE_IDXs. Only required for non-engines + * @param[out] pCount how many interrupt vectors were found + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +intrableGetPhysicalIntrVectors_IMPL +( + OBJGPU *pGpu, + OBJINTRABLE *pIntrable, + NvU32 maxIntrs, + NvU32 *pIntrVectors, + NvU32 *pMcEngineIdxs, + NvU32 *pCount +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Placeholder function to return error + * + * Inheriting classes should override and return the notification interrupt vector + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntrable OBJINTRABLE pointer + * @param[in] maxIntrs max interrupt vectors to return + * @param[out] pIntrVectors table of interrupt vectors + * @param[out] pMcEngineIdxs table of MC_ENGINE_IDXs. Only required for non-engines + * @param[out] pCount how many interrupt vectors were found + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +intrableGetKernelIntrVectors_IMPL +( + OBJGPU *pGpu, + OBJINTRABLE *pIntrable, + NvU32 maxIntrs, + NvU32 *pIntrVectors, + NvU32 *pMcEngineIdxs, + NvU32 *pCount +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Placeholder function to return error + * + * Inheriting classes should override and return the notification interrupt vector + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntrable OBJINTRABLE pointer + * @param[in] pIntrVector Pointer to store the interrupt vector + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +intrableGetNotificationIntrVector_IMPL +( + OBJGPU *pGpu, + OBJINTRABLE *pIntrable, + NvU32 *pIntrVector +) +{ + // Should never be called + DBG_BREAKPOINT(); + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Placeholder function to return error + * + * Inheriting classes should override and set the notification interrupt vector # here + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntrable OBJINTRABLE pointer + * @param[in] intrVector the interrupt vector + * + * @return NV_ERR_NOT_SUPPORTED + */ +NV_STATUS +intrableSetNotificationIntrVector_IMPL +( + OBJGPU *pGpu, + OBJINTRABLE *pIntrable, + NvU32 intrVector +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Cache (and then write to HW) the partition assigned notification + * intr vector + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntrable OBJINTRABLE pointer + * @param[in] intrVector the interrupt vector + * + * @return NV_STATUS + */ +NV_STATUS +intrableCacheAndSetPartitionNotificationIntrVector_IMPL +( + OBJGPU *pGpu, + OBJINTRABLE *pIntrable, + NvU32 intrVector +) +{ + if (pIntrable->originalNotificationIntrVector == NV_INTR_VECTOR_INVALID) + { + // Remember the initial HW value before we overwrite it + NV_ASSERT_OK_OR_RETURN(intrableGetNotificationIntrVector(pGpu, pIntrable, + &pIntrable->originalNotificationIntrVector)); + } + + pIntrable->partitionAssignedNotificationVector = intrVector; + return intrableSetNotificationIntrVector(pGpu, pIntrable, intrVector); +} + +/*! + * @brief Write the cached partition assigned notification interrupt vector + * to HW again + * + * If the partition value is default, do nothing. + * Else write it to HW again. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntrable OBJINTRABLE pointer + * + * @return NV_STATUS + */ +NV_STATUS +intrableSetPartitionNotificationIntrVector_IMPL +( + OBJGPU *pGpu, + OBJINTRABLE *pIntrable +) +{ + if (pIntrable->partitionAssignedNotificationVector == NV_INTR_VECTOR_INVALID) + { + // No-op if this has not been configured + return NV_OK; + } + + return intrableSetNotificationIntrVector( + pGpu, pIntrable, pIntrable->partitionAssignedNotificationVector); +} + +/*! + * @brief Returns the partition assigned notification interrupt vector + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntrable OBJINTRABLE pointer + * + * @return partitionAssignedNotificationVector + */ +NvU32 +intrableGetPartitionNotificationIntrVector_IMPL +( + OBJGPU *pGpu, + OBJINTRABLE *pIntrable +) +{ + return pIntrable->partitionAssignedNotificationVector; +} + +/*! + * @brief Return the notification interrupt vector to HW's original value, if + * is is not at its default value + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pIntrable OBJINTRABLE pointer + * + * @return NV_STATUS + */ +NV_STATUS +intrableRevertNotificationIntrVector_IMPL +( + OBJGPU *pGpu, + OBJINTRABLE *pIntrable +) +{ + if (pIntrable->originalNotificationIntrVector == NV_INTR_VECTOR_INVALID) + { + // Don't expect to revert when there is nothing to revert to + return NV_ERR_INVALID_STATE; + } + + // + // Forget the partition assigned vector so that it does not get written + // again + // + pIntrable->partitionAssignedNotificationVector = NV_INTR_VECTOR_INVALID; + + return intrableSetNotificationIntrVector( + pGpu, pIntrable, pIntrable->originalNotificationIntrVector); +} diff --git a/src/nvidia/src/kernel/gpu/mc/arch/ampere/kernel_mc_ga100.c b/src/nvidia/src/kernel/gpu/mc/arch/ampere/kernel_mc_ga100.c new file mode 100644 index 000000000..40112a590 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mc/arch/ampere/kernel_mc_ga100.c @@ -0,0 +1,129 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/mc/kernel_mc.h" +#include "gpu/gpu.h" + +#include "published/ampere/ga100/dev_boot.h" + + +/*! + * @brief Updates PMC_ENABLE register whose pmcEnableMask is passed. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelMc + * @param[in] pmcEnableMask PMC Mask of engines to be reset. + * @param[in] bEnable If True, Enable the engine, else Reset the engines. + * @param[in] bIsPmcDeviceEngine NV_TRUE if it is PMC_DEVICE_ENABLE register + * else its PMC_ENABLE register. + * + * @return NV_OK + */ +NV_STATUS +kmcWritePmcEnableReg_GA100 +( + OBJGPU *pGpu, + KernelMc *pKernelMc, + NvU32 pmcEnableMask, + NvBool bEnable, + NvBool bIsPmcDeviceEngine +) +{ + NvU32 regAddr; + NvU32 newPmc; + + // + // If hardware increases the size of this register in future chips, we would + // need to catch this and fork another HAL. + // + if ((sizeof(NvU32) * NV_PMC_DEVICE_ENABLE__SIZE_1) > sizeof(NvU32)) + { + NV_ASSERT_FAILED("Assert for Mcheck to catch increase in register size. Fork this HAL"); + } + + if (bIsPmcDeviceEngine) + { + regAddr = NV_PMC_DEVICE_ENABLE(0); + } + else + { + regAddr = NV_PMC_ENABLE; + } + + // + // Reset PMC Engines. + // + newPmc = GPU_REG_RD32(pGpu, regAddr); + + // Enable Engines. + if (bEnable) + newPmc |= pmcEnableMask; + // Reset Engines. + else + newPmc &= ~pmcEnableMask; + + GPU_REG_WR32(pGpu, regAddr, newPmc); + + // + // Read from NV_PMC_ENABLE to create enough delay for engines reset to complete. + // + newPmc = GPU_REG_RD32(pGpu, regAddr); + newPmc = GPU_REG_RD32(pGpu, regAddr); + newPmc = GPU_REG_RD32(pGpu, regAddr); + + return NV_OK; +} + +/*! + * @brief Returns NV_PMC_ENABLE or NV_PMC_DEVICE_ENABLE register based on bIsPmcDeviceEngine. + * If bIsPmcDeviceEngine is NV_TRUE, then return NV_PMC_DEVICE_ENABLE (available from Ampere), + * If bIsPmcDeviceEngine is NV_FALSE, then return NV_PMC_ENABLE. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelMc + * @param[in] bIsPmcDeviceEngine if true return NV_PMC_DEVICE_ENABLE else return NV_PMC_ENABLE register. + * + * @return NvU32 containing register data + */ +NvU32 +kmcReadPmcEnableReg_GA100 +( + OBJGPU *pGpu, + KernelMc *pKernelMc, + NvBool bIsPmcDeviceEngine +) +{ + // + // If hardware increases the size of this register in future chips, we would + // need to catch this and fork another HAL. + // + if ((sizeof(NvU32) * NV_PMC_DEVICE_ENABLE__SIZE_1) > sizeof(NvU32)) + { + NV_ASSERT_FAILED("Assert for Mcheck to catch increase in register size. Fork this HAL"); + } + + return bIsPmcDeviceEngine ? + GPU_REG_RD32(pGpu, NV_PMC_DEVICE_ENABLE(0)) : + GPU_REG_RD32(pGpu, NV_PMC_ENABLE); +} diff --git a/src/nvidia/src/kernel/gpu/mc/arch/maxwell/kernel_mc_gm107.c b/src/nvidia/src/kernel/gpu/mc/arch/maxwell/kernel_mc_gm107.c new file mode 100644 index 000000000..7e11e3f85 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mc/arch/maxwell/kernel_mc_gm107.c @@ -0,0 +1,145 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/mc/kernel_mc.h" +#include "gpu/gpu.h" + +#include "published/maxwell/gm107/dev_boot.h" + +/*! + * @brief Returns the BAR0 offset and size of the PMC range. + * + * @param[in] pGpu + * @param[in] pKernelMc + * @param[out] pBar0MapOffset + * @param[out] pBar0MapSize + * + * @return NV_STATUS + */ +NV_STATUS +kmcGetMcBar0MapInfo_GK104 +( + OBJGPU *pGpu, + KernelMc *pKernelMc, + NvU64 *pBar0MapOffset, + NvU32 *pBar0MapSize +) +{ + *pBar0MapOffset = DRF_BASE(NV_PMC); + *pBar0MapSize = DRF_SIZE(NV_PMC); + return NV_OK; +} + +// +// This function is used to power-up the engines before we do a full-chip +// reset. +// +NV_STATUS +kmcPrepareForXVEReset_GK104 +( + OBJGPU *pGpu, + KernelMc *pKernelMc +) +{ + // FERMI-TODO + return NV_OK; +} + +/*! + * @brief Updates PMC_ENABLE register whose pmcEnableMask is passed. + * If bEnable is NV_TRUE, then update entire PMC register, else + * if bEnable is false, then reset the engines whose mask is passed as input. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelMc + * @param[in] pmcEnableMask PMC Mask of engines to be reset. + * @param[in] bEnable If True, Enable the engine, else Reset the engines. + * @param[in] bIsPmcDeviceEngine NV_TRUE if it is PMC_DEVICE_ENABLE register + * else its PMC_ENABLE register. + * + * @return NV_OK + */ +NV_STATUS +kmcWritePmcEnableReg_GK104 +( + OBJGPU *pGpu, + KernelMc *pKernelMc, + NvU32 pmcEnableMask, + NvBool bEnable, + NvBool bIsPmcDeviceEngine +) +{ + NvU32 newPmc; + + // NV_PMC_DEVICE_ENABLE register is supported on Ampere and later only. + if (bIsPmcDeviceEngine) + return NV_OK; + + newPmc = GPU_REG_RD32(pGpu, NV_PMC_ENABLE); + + // Enable Engines. + if (bEnable) + newPmc |= pmcEnableMask; + // Reset Engines. + else + newPmc &= ~pmcEnableMask; + + GPU_REG_WR32(pGpu, NV_PMC_ENABLE, newPmc); + + // + // Read from NV_PMC_ENABLE to create enough delay for engines reset to complete. + // + GPU_REG_RD32(pGpu, NV_PMC_ENABLE); + GPU_REG_RD32(pGpu, NV_PMC_ENABLE); + GPU_REG_RD32(pGpu, NV_PMC_ENABLE); + + return NV_OK; +} + +/*! + * @brief Returns PMC_ENABLE or PMC_DEVICE_ENABLE register based on bIsPmcDeviceEngine. + * If bIsPmcDeviceEngine is NV_TRUE, then return 0 on pre-Ampere, + * as NV_PMC_DEVICE_ENABLE is available from Ampere, else + * If bIsPmcDeviceEngine is NV_FALSE, then return NV_PMC_ENABLE. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelMc + * @param[in] bIsPmcDeviceEngine if true return 0 else return PMC_ENABLE register. + * + * @return NvU32 containing register data + */ +NvU32 +kmcReadPmcEnableReg_GK104 +( + OBJGPU *pGpu, + KernelMc *pKernelMc, + NvBool bIsPmcDeviceEngine +) +{ + // Ensure that caller never sets bIsPmcDeviceEngine = NV_TRUE for pre-Ampere. + NV_ASSERT(bIsPmcDeviceEngine == NV_FALSE); + + return bIsPmcDeviceEngine ? 0 : + GPU_REG_RD32(pGpu, NV_PMC_ENABLE); +} diff --git a/src/nvidia/src/kernel/gpu/mc/kernel_mc.c b/src/nvidia/src/kernel/gpu/mc/kernel_mc.c new file mode 100644 index 000000000..dbde26f9b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mc/kernel_mc.c @@ -0,0 +1,215 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "kernel/gpu/mc/kernel_mc.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "gpu/gpu.h" +#include "diagnostics/nv_debug_dump.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mmu/kern_gmmu.h" + +#include "lib/protobuf/prb_util.h" +#include "g_nvdebug_pb.h" + +// +// MC RM SubDevice Controls +// +// This rmctrl MUST NOT touch hw since it's tagged as NO_GPUS_ACCESS in ctrl2080.def +// RM allow this type of rmctrl to go through when GPU is not available. +// +NV_STATUS +subdeviceCtrlCmdMcGetArchInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS *pArchInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (IsTEGRA(pGpu)) + { + pArchInfoParams->architecture = pGpu->chipInfo.platformId; + pArchInfoParams->implementation = pGpu->chipInfo.implementationId; + pArchInfoParams->revision = pGpu->chipInfo.revisionId; + } + else + { + if (pGpu->idInfo.ArchitectureExternal && pGpu->idInfo.ImplementationExternal) + { + pArchInfoParams->architecture = pGpu->idInfo.ArchitectureExternal; + pArchInfoParams->implementation = pGpu->idInfo.ImplementationExternal; + } + else + { + pArchInfoParams->architecture = pGpu->chipInfo.pmcBoot0.arch; + pArchInfoParams->implementation = pGpu->chipInfo.pmcBoot0.impl; + } + pArchInfoParams->revision = (pGpu->chipInfo.pmcBoot0.majorRev << 4) | pGpu->chipInfo.pmcBoot0.minorRev; + pArchInfoParams->subRevision = pGpu->chipInfo.subRevision; + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdMcGetManufacturer_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_MC_GET_MANUFACTURER_PARAMS *pManufacturerParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + pManufacturerParams->manufacturer = pGpu->idInfo.Manufacturer; + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdMcQueryHostclkSlowdownStatus_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_MC_QUERY_HOSTCLK_SLOWDOWN_STATUS_PARAMS *pGetStatusParams +) +{ + pGetStatusParams->bDisabled = NV_FALSE; + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdMcSetHostclkSlowdownStatus_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_MC_SET_HOSTCLK_SLOWDOWN_STATUS_PARAMS *pParams +) +{ + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdMcChangeReplayableFaultOwnership_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS *pReplayableFaultOwnrshpParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + if (pKernelGmmu != NULL) + { + kgmmuChangeReplayableFaultOwnership_HAL(pGpu, pKernelGmmu, pReplayableFaultOwnrshpParams->bOwnedByRm); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + + return NV_OK; +} + +// +// Routine to dump Engine State on Error Conditions +// +static NV_STATUS +_kmcDumpEngineFunc(OBJGPU *pGpu, PRB_ENCODER *pPrbEnc, NVD_STATE *pNvDumpState, void *pvData) +{ + NV_STATUS rmStatus = NV_OK; + NvU8 startingDepth = prbEncNestingLevel(pPrbEnc); + + switch (DRF_VAL(_NVD, _ERROR_CODE, _MAJOR, pNvDumpState->internalCode)) + { + case NVD_GPU_GENERATED: + case NVD_SKIP_ZERO: + // don't report on these internal codes. + return NV_OK; + } + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_GPUINFO_ENG_MC), + External_Cleanup); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_ENG_MC_RM_DATA), + External_Cleanup); + + prbEncAddUInt32(pPrbEnc, NVDEBUG_ENG_MC_RMDATA_PMCBOOT0, pGpu->chipId0); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, // NVDEBUG_ENG_MC_RM_DATA + prbEncNestedEnd(pPrbEnc), + External_Cleanup); + + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 i = 0; + + for (i = 0; i < pKernelBus->totalPciBars; ++i) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + prbEncNestedStart(pPrbEnc, NVDEBUG_ENG_MC_PCI_BARS), + External_Cleanup); + + // Memory variables only. RM lock not needed. + prbEncAddUInt64(pPrbEnc, NVDEBUG_ENG_MC_PCIBARINFO_OFFSET, + kbusGetPciBarOffset(pKernelBus, i)); + + prbEncAddUInt64(pPrbEnc, NVDEBUG_ENG_MC_PCIBARINFO_LENGTH, + kbusGetPciBarSize(pKernelBus, i)); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, // NVDEBUG_ENG_MC_PCI_BARS + prbEncNestedEnd(pPrbEnc), + External_Cleanup); + } + } + +External_Cleanup: + // Unwind the protobuff to inital depth + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, LEVEL_ERROR, + prbEncUnwindNesting(pPrbEnc, startingDepth)); + + return rmStatus; +} + +NV_STATUS +kmcStateInitLocked_IMPL +( + OBJGPU *pGpu, + KernelMc *pKernelMc +) +{ + NvDebugDump *pNvd = GPU_GET_NVD(pGpu); + if(pNvd != NULL) + { + nvdEngineSignUp(pGpu, + pNvd, + _kmcDumpEngineFunc, + NVDUMP_COMPONENT_ENG_MC, + REF_DEF(NVD_ENGINE_FLAGS_PRIORITY, _CRITICAL) | + REF_DEF(NVD_ENGINE_FLAGS_SOURCE, _BOTH), + (void *)pKernelMc); + } + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/ampere/fbsr_ga100.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/ampere/fbsr_ga100.c new file mode 100644 index 000000000..d5f865730 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/ampere/fbsr_ga100.c @@ -0,0 +1,103 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/fbsr.h" + +static NV_STATUS +fbsrSendMemsysProgramRawCompressionMode +( + OBJGPU *pGpu, + NvBool bRawMode +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE_PARAMS params = {0}; + + params.bRawMode = bRawMode; + + return pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMSYS_PROGRAM_RAW_COMPRESSION_MODE, + ¶ms, sizeof(params)); +} + +/*! + * Start save/restore operation + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pFbsr OBJFBSR pointer + * @param[in] op Type of operation + * + * @returns NV_OK on success + */ +NV_STATUS +fbsrBegin_GA100(OBJGPU *pGpu, OBJFBSR *pFbsr, FBSR_OP_TYPE op) +{ + if (op == FBSR_OP_RESTORE) + { + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + /* + * Temporarily disable raw mode to prevent FBSR restore operations + * from corrupting compressed surfaces. + * + * See bug 3172217 + */ + + if (pMemorySystemConfig->bUseRawModeComptaglineAllocation) + { + NV_ASSERT_OK(fbsrSendMemsysProgramRawCompressionMode(pGpu, NV_FALSE)); + pFbsr->bRawModeWasEnabled = NV_TRUE; + } + } + + return fbsrBegin_GM107(pGpu, pFbsr, op); +} + +/*! + * End save/restore operation + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pFbsr OBJFBSR pointer + * + * @returns NV_OK on success + */ +NV_STATUS +fbsrEnd_GA100(OBJGPU *pGpu, OBJFBSR *pFbsr) +{ + NV_STATUS status = fbsrEnd_GM107(pGpu, pFbsr); + + if (pFbsr->op == FBSR_OP_RESTORE && + pFbsr->bRawModeWasEnabled) + { + /* + * Reenable raw mode if it was disabled by fbsrBegin_GA100. + */ + NV_ASSERT_OK(fbsrSendMemsysProgramRawCompressionMode(pGpu, NV_TRUE)); + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/ampere/mem_mgr_ga100.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/ampere/mem_mgr_ga100.c new file mode 100644 index 000000000..facef5ff3 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/ampere/mem_mgr_ga100.c @@ -0,0 +1,489 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/mem_mgr/mem_desc.h" + +#include "nvRmReg.h" + +#include "kernel/gpu/intr/intr.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "gpu/subdevice/subdevice.h" +#include "vgpu/vgpu_events.h" + +#include "published/ampere/ga100/dev_mmu.h" +#include "published/ampere/ga100/dev_fb.h" + +#define NV_CBC_MAX_SIZE_BUG_2509894_WAR ((3 * NVBIT64(30)) / 2) // 1.5GBs + +/*! + * @brief This function will return the Kind that should be used by surfaces which + * maps the FLA object + * + * @param[in/out] pPteKind + * + * @returns NV_OK + */ +NV_STATUS +memmgrGetFlaKind_GA100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 *pPteKind +) +{ + *pPteKind = NV_MMU_PTE_KIND_SMSKED_MESSAGE; + return NV_OK; +} + +/*! + * @brief Determine Alignment for a surface, if the surface is compressible with + * the reg key enabled, set the hwAlignment to 256KB + * else fall back to pre-Ampere way + * + * returns NV_STATUS + */ +NV_STATUS +memmgrAllocDetermineAlignment_GA100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU64 *pMemSize, + NvU64 *pAlign, + NvU64 alignPad, + NvU32 allocFlags, + NvU32 retAttr, + NvU32 retAttr2, + NvU64 hwAlignment +) +{ + + return memmgrAllocDetermineAlignment_GM107(pGpu, pMemoryManager, pMemSize, pAlign, alignPad, + allocFlags, retAttr, retAttr2, hwAlignment); +} + +/** + * @brief Override Scrubber related PDB properties based on regkeys and platform configs + */ +void +memmgrScrubRegistryOverrides_GA100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + //Fix me: Fork the same function for GA10x. + + // + // Disabling the SCRUB_ON_FREE property for all the platforms except Windows TCC Mode. + // Disabling in Non-TCC windows because the OS manages FB + // Disabling for Simulation Platforms, since slower in simulation + // Disabling in DFPGA, since they skip the Host Load + // Disabling for vGPU (host), since the plugin has scrubbing support + // Disabling for legacy VGPU (guest), blocked on bug #1929798 + // Disabling for SLI for now, until the bug # 1790190 is fixed. + // Disabling for GSP-RM ucode, since scrubbing is done from CPU-side kernel RM. + // Enabling virtual scrubbing mode for SRIOV-HEAVY mode. + // + // Temporary: Disabling scrub on free if CC is enabled. Once the + // support for secure work launch is in, this temporary change can be + // reverted. Bug: 3334708 + // + + if ((RMCFG_FEATURE_PLATFORM_WINDOWS && !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TCC_MODE)) || + IS_SIMULATION(pGpu) || IsDFPGA(pGpu) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU) || + IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + RMCFG_FEATURE_PLATFORM_GSP || + (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) && !gpuIsCacheOnlyModeEnabled(pGpu)) || + gpuIsCCFeatureEnabled(pGpu) || + IsSLIEnabled(pGpu)) + { + pMemoryManager->bScrubOnFreeEnabled = NV_FALSE; + } + + // + // CE virtual writes are used in the following cases + // 1. When SR-IOV heavy is in use on GA100 + // 2. When APM is enabled on GA100. + // + if (pMemoryManager->bScrubOnFreeEnabled && + ((IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) || + gpuIsApmFeatureEnabled(pGpu))) + { + pMemoryManager->bUseVasForCeMemoryOps = NV_TRUE; + } +} + +/*! + * Read and validate MMU Lock registers. + */ +NV_STATUS +memmgrReadMmuLock_GA100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvBool *pbIsValid, + NvU64 *pMmuLockLo, + NvU64 *pMmuLockHi +) +{ + NvU32 plm = 0; + NvU32 tmp = 0; + + + *pbIsValid = NV_FALSE; + *pMmuLockLo = 0; + *pMmuLockHi = 0; + + // Ensure RM can read MMU_LOCKED region + plm = GPU_REG_RD32(pGpu, NV_PFB_PRI_MMU_LOCK_ADDR_LO__PRIV_LEVEL_MASK); + + if (!FLD_TEST_DRF(_PFB_PRI, _MMU_LOCK_CFG_PRIV_LEVEL_MASK, _READ_PROTECTION_LEVEL0, _ENABLE, plm)) + { + NV_PRINTF(LEVEL_ERROR, "MMU_LOCK read permission disabled, PLM val 0x%0x\n", + plm); + NV_ASSERT(0); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + // Read MEM lock values + tmp = DRF_VAL(_PFB, _PRI_MMU_LOCK_ADDR_LO, _VAL, GPU_REG_RD32(pGpu, NV_PFB_PRI_MMU_LOCK_ADDR_LO)); + *pMmuLockLo = ((NvU64)tmp) << NV_PFB_PRI_MMU_LOCK_ADDR_LO_ALIGNMENT; + + tmp = DRF_VAL(_PFB, _PRI_MMU_LOCK_ADDR_HI, _VAL, GPU_REG_RD32(pGpu, NV_PFB_PRI_MMU_LOCK_ADDR_HI)); + *pMmuLockHi = ((NvU64)tmp) << NV_PFB_PRI_MMU_LOCK_ADDR_HI_ALIGNMENT; + + // Check for validity + if (*pMmuLockHi >= *pMmuLockLo) + *pbIsValid = NV_TRUE; + + return NV_OK; +} + +/* + * Bug 2974274 + * As stated in the bug, row remapper takes up few MBs at end of FB but LOCAL_MEMORY_RANGE register + * rounds it down by nearest 1GB boundary. Since RM uses LOCAL_MEMORY_RANGE to figure out the total + * FB size, this results in almost 1GB of loss in usable FB size and this function along with WARs + * in VBIOS & ACR solves the issue. + * VBIOS rounds up the size to nearest 1GB boundary and locks down (MMU_LOCK) the difference between the usable + * and rounded up size. This function ensures the difference is blocked from RM allocation. + * + */ +NV_STATUS +memmgrBlockMemLockedMemory_GA100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + FB_REGION_DESCRIPTOR blockedFbRegion; + NvU64 memLockLo = 0; + NvU64 memLockHi = 0; + NvU64 size = 0; + NvBool bIsMmuLockValid = NV_FALSE; + + NV_ASSERT_OK_OR_RETURN(memmgrReadMmuLock_HAL(pGpu, pMemoryManager, &bIsMmuLockValid, &memLockLo, &memLockHi)); + + // MMU_LOCK is not set in OLD Vbios that programs 1GB less in LOCAL_MEMORY_RANGE + if (!bIsMmuLockValid) + { + return NV_OK; + } + + memLockHi = NV_ALIGN_UP(memLockHi, 0x10000) - 1; // Align up to cover till the last byte + + // Check if memLockHi equals FB_TOP, if not MMU_LOCK is set in unexpected range + if (((memLockHi + 1) >> 20) != pMemoryManager->Ram.fbTotalMemSizeMb) + { + return NV_ERR_INVALID_STATE; + } + + // Add a new region that will be blocked for any usage. + portMemSet(&blockedFbRegion, 0, sizeof(blockedFbRegion)); + size = RM_PAGE_ALIGN_UP(((memLockHi - memLockLo) + 1)); + + blockedFbRegion.base = memLockLo; + blockedFbRegion.limit = memLockHi; + blockedFbRegion.rsvdSize = 0; + blockedFbRegion.bRsvdRegion = NV_TRUE; + blockedFbRegion.performance = 0; + blockedFbRegion.bSupportCompressed = NV_FALSE; + blockedFbRegion.bSupportISO = NV_FALSE; + blockedFbRegion.bProtected = NV_FALSE; + blockedFbRegion.bInternalHeap = NV_FALSE; + blockedFbRegion.bLostOnSuspend = NV_TRUE; + + memmgrInsertFbRegion(pGpu, pMemoryManager, &blockedFbRegion); + + pMemoryManager->Ram.fbUsableMemSize -= size; + + NV_PRINTF(LEVEL_INFO, "Blocked Start: 0x%0llx End: 0x%0llx Size: 0x%0llx\n", + memLockLo, memLockHi, size); + return NV_OK; +} + +/*! + * Returns the max context size + * + * @returns NvU64 + */ +NvU64 +memmgrGetMaxContextSize_GA100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NvU64 size = memmgrGetMaxContextSize_TU102(pGpu, pMemoryManager); + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + if (!gpuIsClientRmAllocatedCtxBufferEnabled(pGpu)) + { + // + // When ctx buffer management is in GSP-RM, GSP-RM needs extra + // 100 MBs to meet max CUDA context allocation requirement + // + size += 100 * 1024 * 1024; + } + } + + // + // See bug 200619860. We are running out of memory during allocation + // of GR buffers. Since GR buffers are not allocated inside guest RM + // we are skipping reservation there + // + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM && + pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TCC_MODE)) + { + size += 32 * 1024 * 1024; + } + + return size; +} + +void +memmgrGetDisablePlcKind_GA100 +( + MemoryManager *pMemoryManager, + NvU32 *pKind +) +{ + if (pKind != NULL) + { + *pKind = NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC; + } +} + +/*! + * @brief This function sets the PDB property to enable/disable dynamic page offlining + */ +void +memmgrEnableDynamicPageOfflining_GA100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + if (gpuIsGlobalPoisonFuseEnabled(pGpu)) + { + pMemoryManager->bEnableDynamicPageOfflining = NV_TRUE; + } + + return; +} + +/*! + * @brief Get blacklist page details. + * + * @param[in] pGpu OBJGPU + * @param[in] pMemoryManager MemoryManager + * @param[out] pBlAddrs BLACKLIST_ADDRESSES where count is taken + * as input and the addressed and count is + * returned. + * @param[in/out] pCount Takes size of pBlAddrs as input and returns + * the number of populated addresses in + * pBlAddrs. + @returns NV_STATUS + * + */ +NV_STATUS +memmgrGetBlackListPages_GA100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + BLACKLIST_ADDRESS *pBlAddrs, + NvU32 *pCount +) +{ + NvU32 baseIndex = 0; + NV_STATUS status = NV_OK; + NvU32 idx = 0; + NvU32 entryIdx = 0; + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT) || + !gpuCheckPageRetirementSupport_HAL(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // Read the inforom for a list of pages to blacklist. + // SLI support requires investigation to ensure + // identical heaps on both devices (bug 756971). + // + if (IsSLIEnabled(pGpu) && !gpuIsEccPageRetirementWithSliAllowed(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + while (baseIndex < NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_PAGES) + { + RM_API *pRmApi = IS_GSP_CLIENT(pGpu) ? GPU_GET_PHYSICAL_RMAPI(pGpu) + : rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV2080_CTRL_FB_GET_DYNAMIC_OFFLINED_PAGES_PARAMS blParams = {0}; + + blParams.baseIndex = baseIndex; + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_FB_GET_DYNAMIC_OFFLINED_PAGES, + &blParams, + sizeof(blParams)); + if(NV_OK != status) + { + if (NV_ERR_NOT_SUPPORTED == status || + NV_ERR_OBJECT_NOT_FOUND == status) + { + NV_PRINTF(LEVEL_ERROR, + "No blacklisted pages\n"); + } + else + { + NV_ASSERT(0); + NV_PRINTF(LEVEL_ERROR, + "Failed to read black list addresses\n"); + } + break; + } + + for (idx = 0; idx < blParams.validEntries; idx++) + { + + if (entryIdx >= *pCount) + { + status = NV_ERR_BUFFER_TOO_SMALL; + goto done; + } + pBlAddrs[entryIdx].address = blParams.offlined[idx].pageNumber << RM_PAGE_SHIFT; + pBlAddrs[entryIdx].type = blParams.offlined[idx].source; + entryIdx++; + } + + if (!blParams.bMore) { + break; + } + + baseIndex += NV2080_CTRL_FB_DYNAMIC_BLACKLIST_MAX_ENTRIES; + } + +done: + *pCount = entryIdx; + + return status; +} + +/*! + * @brief Inserts an unprotected segment at the start of FB on GA100 + to prevent VPR from getting allocated here + * + * @returns NV_STATUS + */ +NV_STATUS +memmgrInsertUnprotectedRegionAtBottomOfFb_GA100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU64 *pSize +) +{ + FB_REGION_DESCRIPTOR fbRegion = {0}; + NvU64 memLockLo = 0; + NvU64 memLockHi = 0; + NvBool bIsMmuLockValid = NV_FALSE; + NvU64 size; + + NV_ASSERT_OK_OR_RETURN(memmgrReadMmuLock_HAL(pGpu, pMemoryManager, + &bIsMmuLockValid, &memLockLo, + &memLockHi)); + + // MMU_LOCK is not set in OLD Vbios that programs 1GB less in LOCAL_MEMORY_RANGE + if (!bIsMmuLockValid) + { + *pSize = 0; + return NV_OK; + } + + memLockHi = NV_ALIGN_UP(memLockHi, 0x10000) - 1; // Align up to cover till the last byte + + size = RM_PAGE_ALIGN_UP(memLockHi - memLockLo + 1); + + // + // Bug 2509894: In order to prevent CBC wrap around and clobbering of + // VPR contents, we move VPR out of the bottom 1.5GB of video memory. + // For raw mode, HW assumes a max of 384 MBs (for 96GB FB) of CBC. However, + // on GA100 due to a HW bug this figure comes around ~1GB. By experimentation, + // we found that 1.5GBs works fine for us. + // + size = NV_MAX(size, NV_CBC_MAX_SIZE_BUG_2509894_WAR); + + // SEC2 ucode expects VPR start address to be 1MB aligned + size = NV_ALIGN_UP(size, 1 * 1024 * 1024); + + fbRegion.base = 0; + fbRegion.limit = size - 1; + fbRegion.rsvdSize = 0; + fbRegion.bRsvdRegion = NV_FALSE; + fbRegion.performance = 0; + fbRegion.bSupportCompressed = NV_TRUE; + fbRegion.bSupportISO = NV_FALSE; + fbRegion.bProtected = NV_FALSE; + fbRegion.bInternalHeap = NV_FALSE; + + memmgrInsertFbRegion(pGpu, pMemoryManager, &fbRegion); + + NV_PRINTF(LEVEL_INFO, "Unprotected Block Start: 0x%0llx End: 0x%0llx Size: 0x%0llx\n", + memLockLo, memLockHi, size); + + // Return the size + *pSize = size; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/ampere/mem_mgr_ga102.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/ampere/mem_mgr_ga102.c new file mode 100644 index 000000000..5090fd0f5 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/ampere/mem_mgr_ga102.c @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "virtualization/hypervisor/hypervisor.h" + + +/*! + * @brief This function sets the PDB property to enable/disable dynamic page offlining + */ +void +memmgrEnableDynamicPageOfflining_GA102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + if (IS_VIRTUAL(pGpu) || hypervisorIsVgxHyper() || + gpuIsGlobalPoisonFuseEnabled(pGpu)) + { + pMemoryManager->bEnableDynamicPageOfflining = NV_TRUE; + } + + return; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/fbsr_gm107.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/fbsr_gm107.c new file mode 100644 index 000000000..d3a487d8d --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/fbsr_gm107.c @@ -0,0 +1,417 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/fbsr.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "published/maxwell/gm107/dev_ram.h" +#include "core/thread_state.h" + +// +// Implementation notes: +// +// This file implements two mechanisms for FB save / restore on Fermi. I +// would've liked to split the implementation for Begin()/End()/CopyMemory() +// into seperate subclasses but our current object model makes inheritance a tad +// cumbersome. +// +// Mechanism #1 (TYPE_DMA): The data is DMA'ed (via CE) to a large non-paged +// system memory allocation. This is the preferred / performance path. +// +// Mechanism #2 (TYPE_CPU): The backup buffer is allocated in system memory +// chunks. The sysmem buffers are currently non-paged, long-term the plan is to +// switch the allocations to be paged. For this approach we pre-allocate a page +// size buffer for CE to DMA into. From that buffer we use CPU IO to move the +// data out into the intended storage buffer. The reason for chunks is that it +// has been noticed that large non-paged allocations tend to fail more often +// than multiple smaller non-paged allocations. Once we move over to paged +// allocations here this *might* not be needed. Bug 579780 and 579765 are +// tracking the RFE for paged memory. The reason for using CE here is bar2 isn't +// yet set up and BAR0 would be really really slow (not that we care about +// performance much for this sheme). +// +// Mechanism #3 (TYPE_PERSISTENT): The video Memory[Fb] data is transferred to +// sys_mem by means of DMA[CE engine], sys_mem allocation is pinned across S3 +// transitions. Sys_mem allocations are done at first S3 cycle and release during +// driver unload. this approach reduces system VM fragmentation. Optimus systems, +// keeps GPU in D3 state, as long as there is no work for GPU. Because of frequent +// transitions between D0 & D3, system is running out of *CONTIGOUS* VM, with this +// approach Optimus system could avoid the above problem. +// +// Mechanism #4 (TYPE_PAGED_DMA): It is basically the same with the TYPE_DMA +// method except that we allocate the buffer from pagable memory pool. After the +// buffer is allocated, we need to use memdescLock to lock the buffer in physical +// memory so that CE can access it and then use memdescUnlock to unlock it. +// +// Mechanisms #5 and #6 are targetted for WDDM, a large VA section (Paged region) +// and a small pinned region are committed on boot. +// +// Mechanism #5 (TYPE_WDDM_FAST_DMA_DEFERRED_NONPAGED): +// For power save, map and pin the large va region and if the map succeeds, +// ce copy to this large pinned page. At this point it is similar to TYPE_DMA. +// If the map and pin fails, fall back to TYPE_WDDM_SLOW_CPU +// +// Mechanism #6 (TYPE_WDDM_SLOW_CPU_PAGED): +// When TYPE_WDDM_FAST_DMA_DEFERRED_NONPAGED fails, use a small 64k pinned page +// that was preallocated and dma to this pinned page from FB. Once the ce completes, +// the chunk is then copied to the paged CPU memory. A 64k chunk size is chosen +// because the Windows ZwMapViewOfSection requires 64K alignment +// +// While technically mechanism #2 can fail (even with paged memory) another +// approach worth considering would be to pre-allocate the save buffer in the +// video memory allocation path (memdescAlloc). However, with this approach we'd +// incur a memory overhead even if S/R was never used. +// + +#ifdef DEBUG +#endif + +#define CPU_PINNED_BUFFER_SIZE RM_PAGE_SIZE +#define CPU_MAX_PINNED_BUFFER_SIZE 0x10000 + +// +// Maximum data copy size in bytes for file operations (read/write) +// which can be transferred with default thread timeout. +// +#define MAX_FILE_COPY_SIZE_WITHIN_DEFAULT_THREAD_TIMEOUT (64 * 1024 * 1024) + +/*! + * Init + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pFbsr OBJFBSR pointer + * + * @returns None + */ +NV_STATUS +fbsrInit_GM107(OBJGPU *pGpu, OBJFBSR *pFbsr) +{ + NV_STATUS status; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + portMemSet(&pFbsr->pagedBufferInfo, 0, sizeof(pFbsr->pagedBufferInfo)); + + // Commit an upper bound VA for both slow cpu and fast dma. + if ((pFbsr->type == FBSR_TYPE_WDDM_FAST_DMA_DEFERRED_NONPAGED || + pFbsr->type == FBSR_TYPE_WDDM_SLOW_CPU_PAGED)) + { + // We need it only once not per fbsr scheme though + pFbsr->pagedBufferInfo.maxLength = memmgrGetRsvdSizeForSr_HAL(pGpu, pMemoryManager); + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT)) + { + status = osReserveCpuAddressSpaceUpperBound(&pFbsr->pagedBufferInfo.sectionHandle, + pFbsr->pagedBufferInfo.maxLength); + + if (status != NV_OK) + return status; + } + } + + // + // Allocate a 64K/4k sized buffer for forward progress if the WDDM Fast path + // cannot pin the large buffer. + // + if (pFbsr->type == FBSR_TYPE_CPU || + pFbsr->type == FBSR_TYPE_WDDM_SLOW_CPU_PAGED || + pFbsr->type == FBSR_TYPE_FILE) + { + NvU32 memSize = 0; + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + // Can't allocate sysmem from GSP FW. + return NV_ERR_NOT_SUPPORTED; + + // GSP FW TODO: Allocate this memory on the client side. + } + + if (pFbsr->type == FBSR_TYPE_CPU) + { + memSize = CPU_PINNED_BUFFER_SIZE; + } + else + { + memSize = CPU_MAX_PINNED_BUFFER_SIZE; + } + // + // Pre-allocate a page size buffer for CE to DMA into. + // This buffer is accessed with the CPU so it is best + // to to use cached memory. + // + status = memdescCreate(&pFbsr->pSysMemDesc, pGpu, memSize, + 0, NV_TRUE, ADDR_SYSMEM, NV_MEMORY_CACHED, + MEMDESC_FLAGS_NONE); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + goto fail; + } + + status = memdescAlloc(pFbsr->pSysMemDesc); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + memdescDestroy(pFbsr->pSysMemDesc); + pFbsr->pSysMemDesc = NULL; + goto fail; + } + + status = memdescMapOld(pFbsr->pSysMemDesc, 0, memSize, NV_TRUE /*kernel*/ , + NV_PROTECT_READ_WRITE, + (pFbsr->type == FBSR_TYPE_FILE ? (void**)&pFbsr->pDmaBuffer: + (void**)&pFbsr->pPinnedBuffer), + (void **)&pFbsr->pMapCookie); + if (status != NV_OK) + { + NV_ASSERT(0); + memdescFree(pFbsr->pSysMemDesc); + memdescDestroy(pFbsr->pSysMemDesc); + pFbsr->pSysMemDesc = NULL; + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto fail; + } + + NV_ASSERT(!pFbsr->pSysMemDesc->PteAdjust); + } + pFbsr->bInitialized = NV_TRUE; + return NV_OK; + + fail: + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT) && + (pFbsr->type == FBSR_TYPE_WDDM_FAST_DMA_DEFERRED_NONPAGED || + pFbsr->type == FBSR_TYPE_WDDM_SLOW_CPU_PAGED)) + { + osReleaseCpuAddressSpaceUpperBound(pFbsr->pagedBufferInfo.sectionHandle); + } + + return status; +} + +/*! + * Destroy + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pFbsr OBJFBSR pointer + * + * @returns None + */ +void +fbsrDestroy_GM107(OBJGPU *pGpu, OBJFBSR *pFbsr) +{ + if (pFbsr->type == FBSR_TYPE_CPU || + pFbsr->type == FBSR_TYPE_WDDM_SLOW_CPU_PAGED || + pFbsr->type == FBSR_TYPE_FILE) + { + if (pFbsr->pSysMemDesc) + { + memdescUnmapOld(pFbsr->pSysMemDesc, 1 /*kernel*/, 0, + (pFbsr->type == FBSR_TYPE_FILE) ? (void*)pFbsr->pDmaBuffer : + (void*)pFbsr->pPinnedBuffer, + pFbsr->pMapCookie); + memdescFree(pFbsr->pSysMemDesc); + memdescDestroy(pFbsr->pSysMemDesc); + pFbsr->pSysMemDesc = NULL; + } + } + + if (pFbsr->type == FBSR_TYPE_WDDM_FAST_DMA_DEFERRED_NONPAGED || + pFbsr->type == FBSR_TYPE_WDDM_SLOW_CPU_PAGED) + { + osReleaseCpuAddressSpaceUpperBound(pFbsr->pagedBufferInfo.sectionHandle); + } +} + +/*! + * Start save/restore operation + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pFbsr OBJFBSR pointer + * @param[in] op Type of operation + * + * @returns NV_OK on success + */ +NV_STATUS +fbsrBegin_GM107(OBJGPU *pGpu, OBJFBSR *pFbsr, FBSR_OP_TYPE op) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * End save/restore operation + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pFbsr OBJFBSR pointer + * + * @returns NV_OK on success + */ +NV_STATUS +fbsrEnd_GM107(OBJGPU *pGpu, OBJFBSR *pFbsr) +{ + NvBool bIommuEnabled = pGpu->getProperty(pGpu, PDB_PROP_GPU_ENABLE_IOMMU_SUPPORT); + NV_STATUS status = NV_OK; + + if (pFbsr->op != FBSR_OP_SIZE_BUF && pFbsr->op != FBSR_OP_DESTROY) + { + } + + if (pFbsr->op == FBSR_OP_RESTORE || pFbsr->bOperationFailed || pFbsr->op == FBSR_OP_DESTROY) + { + switch (pFbsr->type) + { + case FBSR_TYPE_PAGED_DMA: + case FBSR_TYPE_DMA: + { + if (pFbsr->type == FBSR_TYPE_PAGED_DMA) + { + memdescUnlock(pFbsr->pSysMemDesc); + } + memdescFree(pFbsr->pSysMemDesc); + memdescDestroy(pFbsr->pSysMemDesc); + pFbsr->pSysMemDesc = NULL; + break; + } + case FBSR_TYPE_WDDM_SLOW_CPU_PAGED: + // Flush out writes + osFlushCpuWriteCombineBuffer(); + if (pFbsr->pagedBufferInfo.avblViewSz) + { + if (osUnmapViewFromSection(pGpu->pOsGpuInfo, + NvP64_VALUE(pFbsr->pagedBufferInfo.sysAddr), + bIommuEnabled) != NV_OK) + { + pFbsr->bOperationFailed = NV_TRUE; + } + } + break; + case FBSR_TYPE_FILE: + // Flush out writes + osFlushCpuWriteCombineBuffer(); + + // Close the file + osCloseFile(pFbsr->pagedBufferInfo.sectionHandle); + break; + case FBSR_TYPE_CPU: + { + PFBSR_NODE pNode; + PFBSR_NODE pNext; + + // Flush out writes + osFlushCpuWriteCombineBuffer(); + + // Free up list + pNode = pFbsr->pSysMemNodeHead; + + while (pNode) + { + pNext = pNode->pNext; + portMemFree(pNode); + pNode = pNext; + } + + pFbsr->pSysMemNodeHead = NULL; + + break; + } + + case FBSR_TYPE_WDDM_FAST_DMA_DEFERRED_NONPAGED: + memdescDestroy(pFbsr->pSysMemDesc); + pFbsr->pSysMemDesc = NULL; + if(bIommuEnabled) + { + status = osSrUnpinSysmem(pGpu->pOsGpuInfo); + } + else + { + status = osUnmapViewFromSection(pGpu->pOsGpuInfo, + NvP64_VALUE(pFbsr->pagedBufferInfo.sysAddr), + bIommuEnabled); + } + if (status!= NV_OK) + { + pFbsr->bOperationFailed = NV_TRUE; + } + break; + + } + } + else if (pFbsr->op == FBSR_OP_SAVE) + { + switch (pFbsr->type) + { + case FBSR_TYPE_PAGED_DMA: + memdescUnlock(pFbsr->pSysMemDesc); + break; + case FBSR_TYPE_WDDM_FAST_DMA_DEFERRED_NONPAGED: + memdescDestroy(pFbsr->pSysMemDesc); + pFbsr->pSysMemDesc = NULL; + if(bIommuEnabled) + { + status = osSrUnpinSysmem(pGpu->pOsGpuInfo); + } + else + { + status = osUnmapViewFromSection(pGpu->pOsGpuInfo, + NvP64_VALUE(pFbsr->pagedBufferInfo.sysAddr), + bIommuEnabled); + } + if (status != NV_OK) + { + pFbsr->bOperationFailed = NV_TRUE; + } + break; + case FBSR_TYPE_WDDM_SLOW_CPU_PAGED: + if (pFbsr->pagedBufferInfo.avblViewSz) + { + if (osUnmapViewFromSection(pGpu->pOsGpuInfo, + NvP64_VALUE(pFbsr->pagedBufferInfo.sysAddr), + bIommuEnabled) != NV_OK) + { + pFbsr->bOperationFailed = NV_TRUE; + } + } + break; + } + } + + return pFbsr->bOperationFailed ? NV_ERR_GENERIC : NV_OK; +} + +/*! + * Saves or restores a region of video memory. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pFbsr OBJFBSR pointer + * @param[in] pVidMemDesc Memory descriptor for vidmem region + * + * @returns None + */ +void +fbsrCopyMemoryMemDesc_GM107(OBJGPU *pGpu, OBJFBSR *pFbsr, MEMORY_DESCRIPTOR *pVidMemDesc) +{ +} + +#ifdef DEBUG +#endif diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/mem_mgr_gm107.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/mem_mgr_gm107.c new file mode 100644 index 000000000..178db5206 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/mem_mgr_gm107.c @@ -0,0 +1,1634 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/bus/kern_bus.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +#include "gpu/bif/kernel_bif.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" + +#include "rmifrif.h" + +#include "published/maxwell/gm107/dev_mmu.h" +#include "published/maxwell/gm107/dev_ram.h" + +#include "vgpu/rpc.h" +#include "vgpu/vgpu_events.h" + +// +// statics +// +static NV_STATUS memmgrComputeAndSetVgaDisplayMemoryBase_GM107(OBJGPU *, NvU64); + +NvU32 +memmgrChooseKindCompressC_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat) +{ + NvU32 kind = NV_MMU_PTE_KIND_PITCH; + NvU32 attrdepth = DRF_VAL(OS32, _ATTR, _DEPTH, pFbAllocPageFormat->attr); + NvU32 aasamples = DRF_VAL(OS32, _ATTR, _AA_SAMPLES, pFbAllocPageFormat->attr); + NvBool prefer_zbc = !FLD_TEST_DRF(OS32, _ATTR2, _ZBC, _PREFER_NO_ZBC, pFbAllocPageFormat->attr2); + NvU32 ssampling = 0; // TODO + + switch (attrdepth) + { + case NVOS32_ATTR_DEPTH_UNKNOWN: + case NVOS32_ATTR_DEPTH_8: + case NVOS32_ATTR_DEPTH_16: + kind = NV_MMU_PTE_KIND_GENERIC_16BX2; + break; + case NVOS32_ATTR_DEPTH_32: + switch (aasamples) + { + case NVOS32_ATTR_AA_SAMPLES_1: + kind = NV_MMU_PTE_KIND_C32_2CRA; + break; + case NVOS32_ATTR_AA_SAMPLES_2: + kind = memmgrChooseKindCompressCForMS2_HAL(pGpu, pMemoryManager, attrdepth); + break; + case NVOS32_ATTR_AA_SAMPLES_4: + case NVOS32_ATTR_AA_SAMPLES_4_ROTATED: + case NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_8: + case NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16: + kind = prefer_zbc? (ssampling? NV_MMU_PTE_KIND_C32_MS4_2CBA : NV_MMU_PTE_KIND_C32_MS4_2CBR) : NV_MMU_PTE_KIND_C32_MS4_2BRA; + break; + case NVOS32_ATTR_AA_SAMPLES_8: + case NVOS32_ATTR_AA_SAMPLES_16: + case NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_16: + case NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_32: + kind = NV_MMU_PTE_KIND_C32_MS8_MS16_2CRA; + break; + } + break; + case NVOS32_ATTR_DEPTH_64: + switch (aasamples) + { + case NVOS32_ATTR_AA_SAMPLES_1: + kind = NV_MMU_PTE_KIND_C64_2CRA; + break; + case NVOS32_ATTR_AA_SAMPLES_2: + kind = memmgrChooseKindCompressCForMS2_HAL(pGpu, pMemoryManager, attrdepth); + break; + case NVOS32_ATTR_AA_SAMPLES_4: + case NVOS32_ATTR_AA_SAMPLES_4_ROTATED: + case NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_8: + case NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16: + kind = prefer_zbc? (ssampling? NV_MMU_PTE_KIND_C64_MS4_2CBA : NV_MMU_PTE_KIND_C64_MS4_2CBR) : NV_MMU_PTE_KIND_C64_MS4_2BRA; + break; + case NVOS32_ATTR_AA_SAMPLES_8: + case NVOS32_ATTR_AA_SAMPLES_16: + case NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_16: + case NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_32: + kind = NV_MMU_PTE_KIND_C64_MS8_MS16_2CRA; + break; + } + break; + case NVOS32_ATTR_DEPTH_128: + switch (aasamples) + { + case NVOS32_ATTR_AA_SAMPLES_1: + kind = NV_MMU_PTE_KIND_C128_2CR; + break; + case NVOS32_ATTR_AA_SAMPLES_2: + kind = NV_MMU_PTE_KIND_C128_MS2_2CR; + break; + case NVOS32_ATTR_AA_SAMPLES_4: + case NVOS32_ATTR_AA_SAMPLES_4_ROTATED: + case NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_8: + case NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16: + kind = NV_MMU_PTE_KIND_C128_MS4_2CR; + break; + case NVOS32_ATTR_AA_SAMPLES_8: + case NVOS32_ATTR_AA_SAMPLES_16: + case NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_16: + case NVOS32_ATTR_AA_SAMPLES_8_VIRTUAL_32: + kind = NV_MMU_PTE_KIND_C128_MS8_MS16_2CR; + break; + } + break; + } + + return kind; +} + +NV_STATUS +memmgrAllocDetermineAlignment_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU64 *pMemSize, + NvU64 *pAlign, + NvU64 alignPad, + NvU32 allocFlags, + NvU32 retAttr, + NvU32 retAttr2, + NvU64 hwAlignment +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + switch (dmaNvos32ToPageSizeAttr(retAttr, retAttr2)) + { + case RM_ATTR_PAGE_SIZE_4KB: + hwAlignment = NV_MAX(hwAlignment, RM_PAGE_SIZE - 1); + break; + case RM_ATTR_PAGE_SIZE_BIG: + // we will always align to the biggest page size + hwAlignment = NV_MAX(hwAlignment, kgmmuGetMaxBigPageSize_HAL(pKernelGmmu) - 1); + break; + case RM_ATTR_PAGE_SIZE_HUGE: + NV_ASSERT_OR_RETURN(kgmmuIsHugePageSupported(pKernelGmmu), + NV_ERR_INVALID_ARGUMENT); + hwAlignment = NV_MAX(hwAlignment, RM_PAGE_SIZE_HUGE - 1); + break; + case RM_ATTR_PAGE_SIZE_512MB: + NV_ASSERT_OR_RETURN(kgmmuIsPageSize512mbSupported(pKernelGmmu), + NV_ERR_INVALID_ARGUMENT); + hwAlignment = NV_MAX(hwAlignment, RM_PAGE_SIZE_512M - 1); + break; + case RM_ATTR_PAGE_SIZE_DEFAULT: + case RM_ATTR_PAGE_SIZE_INVALID: + NV_PRINTF(LEVEL_ERROR, "- invalid page size specified\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (!FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, retAttr)) + { + if (FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, retAttr) && + !(allocFlags & NVOS32_ALLOC_FLAGS_VIRTUAL) && + pMemoryManager->bSmallPageCompression) + { + // + // No offset alignment requirement for 4KB compression. + // The size should be aligned to compression pagesize. + // + NvU32 comprPageSize = pMemorySystemConfig->comprPageSize; + *pMemSize = ((*pMemSize + alignPad + comprPageSize - 1) / comprPageSize) * comprPageSize; + } + else + { + // Both size and offset should be aligned to compression pagesize. + hwAlignment = NV_MAX(hwAlignment, pMemorySystemConfig->comprPageSize - 1); + + if (FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, retAttr) && + !pMemoryManager->bSmallPageCompression) + { + NV_PRINTF(LEVEL_INFO, + "Compression requested on small page size mappings\n"); + } + } + } + + // a non-zero alignment means it's a requested alignment. Ensure the requested + // alignment is still aligned to the hw requirements + if ((*pAlign) && + (((*pAlign > hwAlignment) && !(*pAlign % (hwAlignment+1))) || // align is >=1 multiple of hwAlignment + ((*pAlign <= hwAlignment+1) && !((hwAlignment+1) % *pAlign)))) // hwAlignment is a >= mulitple of align + { + if ( *pAlign <= hwAlignment+1 ) + { + *pAlign = hwAlignment + 1; + } + + (*pAlign)--; // convert to (alignment-1) (not really a "mask") + // calculate the new size based on hw alignment + *pMemSize = ((*pMemSize + alignPad + hwAlignment) / (hwAlignment+1)) * (hwAlignment+1); + hwAlignment = *pAlign; // this aligns the offset to the requested alignment + } + else + { + // if this alignment was a force or fail, fail it here + if (*pAlign != 0 && (allocFlags & NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE)) + { + *pAlign = 0; + return (NV_ERR_INVALID_ARGUMENT); + } + + // + // By default round to the hw alignment. It is important to pad to the page size + // on fermi for all allocations as we cannot mix page size on the same physical + // memory due to page swizzle. + // + *pAlign = hwAlignment; + *pMemSize = ((*pMemSize + alignPad + hwAlignment) / (hwAlignment+1)) * (hwAlignment+1); + } + + return NV_OK; +} + +static void +memmgrSetZbcReferenced +( + OBJGPU *pGpu, + NvBool bZbcSurfacesExist +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS params = {0}; + + // Allocations are RPCed to host, so they are counted there + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + return; + + params.bZbcSurfacesExist = bZbcSurfacesExist; + NV_ASSERT_OK( + pRmApi->Control( + pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_ZBC_REFERENCED, + ¶ms, + sizeof(params))); +} + +// +// Update user alloc request parameter according to memory +// type and (possibly) reserve hw resources. +// +NV_STATUS +memmgrAllocHal_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_INFO *pFbAllocInfo +) +{ + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + NV_STATUS status = NV_OK; + NvU32 comprAttr, tiledAttr, zcullAttr, type; + NvU32 cacheAttr; + NvU32 format, kind, bAlignPhase; + NvU32 retAttr = pFbAllocInfo->retAttr; + NvU32 retAttr2 = pFbAllocInfo->retAttr2; + NV_ADDRESS_SPACE addrSpace; + NvBool bComprWar = NV_FALSE; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + + // get the specified attribute values + comprAttr = DRF_VAL(OS32, _ATTR, _COMPR, pFbAllocInfo->pageFormat->attr); + tiledAttr = DRF_VAL(OS32, _ATTR, _TILED, pFbAllocInfo->pageFormat->attr); + zcullAttr = DRF_VAL(OS32, _ATTR, _ZCULL, pFbAllocInfo->pageFormat->attr); + format = DRF_VAL(OS32, _ATTR, _FORMAT, pFbAllocInfo->pageFormat->attr); + cacheAttr = DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, pFbAllocInfo->pageFormat->attr2); + type = pFbAllocInfo->pageFormat->type; + addrSpace = memmgrAllocGetAddrSpace(pMemoryManager, pFbAllocInfo->pageFormat->flags, retAttr); + + if ( NVOS32_ATTR_LOCATION_AGP == DRF_VAL(OS32, _ATTR, _LOCATION, pFbAllocInfo->pageFormat->attr) ) + return NV_ERR_NOT_SUPPORTED; // only local vid & pci (sysmem) supported + + bAlignPhase = !!(pFbAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC); + + // + // errorcheck specified attributes + // NOTE: With the new macro, the distinction between 32-bit colour + // compression and Z compression is in the value of 'type' - DEPTH or IMAGE + // So the caller is urged to verify integrity. + // + if ( + // Tiling is not supported in nv50+ + (tiledAttr == NVOS32_ATTR_TILED_REQUIRED) || + (tiledAttr == 0x3) || + // check the value of compression attribute + // attributes verification for compressed surfaces + !(memmgrVerifyComprAttrs_HAL(pMemoryManager, type, format, comprAttr)) || + // depth buffer attribute verification + !(memmgrVerifyDepthSurfaceAttrs_HAL(pMemoryManager, type, format)) + || (zcullAttr == NVOS32_ATTR_ZCULL_REQUIRED) || (zcullAttr == NVOS32_ATTR_ZCULL_SHARED) + ) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Fermi does not support tiling + retAttr = FLD_SET_DRF(OS32, _ATTR, _TILED, _NONE, retAttr); + + if (cacheAttr == NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT) + { + // + // The GPU cache is not sysmem coherent. Caching sysmem in GPU cache requires + // clients to issue GPU cache invalidates to maintain coherency. + // + if (addrSpace == ADDR_SYSMEM) + { + retAttr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, retAttr2); + } + else + { + retAttr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _YES, retAttr2); + } + } + + if (!FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, retAttr)) + { + if (pMemorySystemConfig->bDisableCompbitBacking) + { + NV_PRINTF(LEVEL_INFO, "compression disabled due to regkey\n"); + retAttr = FLD_SET_DRF(OS32, _ATTR, _COMPR, _NONE, retAttr); + } + else if (!memmgrComprSupported(pMemoryManager, addrSpace)) + { + if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _REQUIRED, retAttr)) + { + NV_PRINTF(LEVEL_ERROR, + "Compression not supported for this configuration.\n"); + return NV_ERR_NOT_SUPPORTED; + } + else + { + retAttr = FLD_SET_DRF(OS32, _ATTR, _COMPR, _NONE, retAttr); + } + } + } + + { + status = memmgrChooseKind_HAL(pGpu, pMemoryManager, pFbAllocInfo->pageFormat, + DRF_VAL(OS32, _ATTR, _COMPR, retAttr), &kind); + if (status != NV_OK) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + + pFbAllocInfo->pageFormat->kind = kind; + + // + // See Bug 351429: It should not be an error to specify an uncompressible kind + // via -pte_kind[CZ] should be sufficient even if -compress[CZ] not specified + // + if(!memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind)) + { + retAttr = FLD_SET_DRF(OS32, _ATTR, _COMPR, _NONE, retAttr); + } + + // Ideally compression should only be enabled on big/huge page mapping + if (FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, retAttr) && + !FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, retAttr) && + !pMemoryManager->bSmallPageCompression) + { + if (FLD_TEST_DRF(OS32, _ATTR, _COMPR, _REQUIRED, retAttr)) + { + // We allow in MODS due to test requirement. + if (!RMCFG_FEATURE_PLATFORM_MODS) + { + NV_PRINTF(LEVEL_ERROR, + "ERROR: Compression requested for small page allocation.\n"); + return NV_ERR_NOT_SUPPORTED; + } + } + else + { + if (RMCFG_FEATURE_PLATFORM_MODS) + { + retAttr = FLD_SET_DRF(OS32, _ATTR, _COMPR, _NONE, retAttr); + if(memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind)) + { + kind = memmgrGetUncompressedKind_HAL(pGpu, pMemoryManager, kind, NV_FALSE); + pFbAllocInfo->pageFormat->kind = kind; + } + } + else + { + bComprWar = NV_TRUE; + } + } + } + + // Allocate zcull before we save the pitch and size + pFbAllocInfo->hwResId = 0; + + // + // Attempt to allocate ctags to go with this allocation. + // Note: The way things work we're actually allocating ctags for a region + // which hasn't been allocated yet. We only know the size the region will be. + // Later we'll get a call to bind (fbsetallocparams). But, this fbsetallocparams + // call isn't late enough! We need a contextdma to actually bind with! + // So we have to keep track of the allocation by creating a marker for it and using + // the hwresid to invoke it later :( + // + if (!FLD_TEST_DRF(OS32, _ATTR, _COMPR, _NONE, retAttr)) + { + if (!bAlignPhase && !bComprWar && + !(pFbAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_VIRTUAL)) + { + status = kmemsysAllocComprResources_HAL(pGpu, pKernelMemorySystem, pFbAllocInfo, + pFbAllocInfo->origSize, 1, &retAttr, retAttr2); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "memsysAllocComprResources failed\n"); + + return status; + } + } + } + // + // !!WARNING!!! + // + // This flag is introduced as a temporary WAR to enable color compression + // without ZBC. RM will skip refcounting the ZBC table when this flag is set. + // PTE Kind could still support ZBC (there are sometimes no non-zbc equivalent) + // Hence UMD has to disable zbc for the app by masking all the zbc slots. + // It's a temporary WAR until we implement per process zbc slot management. + // + if (FLD_TEST_DRF(OS32, _ATTR2, _ZBC_SKIP_ZBCREFCOUNT, _NO, pFbAllocInfo->pageFormat->attr2)) + { + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_ZBC, pFbAllocInfo->pageFormat->kind) && + !(pFbAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_VIRTUAL) && + !IS_MIG_ENABLED(pGpu)) + { + retAttr2 = FLD_SET_DRF(OS32, _ATTR2, _ZBC, _PREFER_ZBC, retAttr2); + if (!bAlignPhase) + { + pMemoryManager->zbcSurfaces++; + NV_PRINTF(LEVEL_INFO, + "zbcSurfaces = 0x%x, hwResId = 0x%x\n", + pMemoryManager->zbcSurfaces, pFbAllocInfo->hwResId); + + if (pMemoryManager->zbcSurfaces == 1) + memmgrSetZbcReferenced(pGpu, NV_TRUE); + } + } + else + { + retAttr2 = FLD_SET_DRF(OS32, _ATTR2, _ZBC, _PREFER_NO_ZBC, retAttr2); + } + } + else + { + NV_ASSERT(FLD_TEST_DRF(OS32, _ATTR2, _ZBC, _PREFER_NO_ZBC, retAttr2)); + } + + pFbAllocInfo->format = kind; + pFbAllocInfo->retAttr = retAttr; + pFbAllocInfo->retAttr2 = retAttr2; + + return (NV_OK); +} + +NV_STATUS +memmgrSetAllocParameters_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_INFO *pFbAllocInfo +) +{ + + return NV_OK; +} + +// +// Release tile back to the free pool. +// +NV_STATUS +memmgrFreeHal_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_INFO *pFbAllocInfo, + PRMTIMEOUT pTimeout +) +{ + NvU32 commitResId = pFbAllocInfo->hwResId; + + if (pFbAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC) + { + // for vGPU, we set this flag in memmgrAllocHwResources + return NV_OK; + } + + // We might want to move this check to higher-level + if (IS_MIG_ENABLED(pGpu)) + { + // In SMC mode, we do not program ZCULL or ZBC + return NV_OK; + } + + kmemsysFreeComprResources_HAL(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu), commitResId); + + if (FLD_TEST_DRF(OS32, _ATTR2, _ZBC_SKIP_ZBCREFCOUNT, _NO, pFbAllocInfo->pageFormat->attr2) && + memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_ZBC, pFbAllocInfo->format)) + { + NV_ASSERT(pMemoryManager->zbcSurfaces !=0 ); + if (pMemoryManager->zbcSurfaces != 0) + { + pMemoryManager->zbcSurfaces--; + + if (pMemoryManager->zbcSurfaces == 0) + memmgrSetZbcReferenced(pGpu, NV_FALSE); + } + + NV_PRINTF(LEVEL_INFO, + "[1] hwResId = 0x%x, offset = 0x%llx, size = 0x%llx\n", + pFbAllocInfo->hwResId, pFbAllocInfo->offset, + pFbAllocInfo->size); + + NV_PRINTF(LEVEL_INFO, "[2] zbcSurfaces = 0x%x\n", + pMemoryManager->zbcSurfaces); + } + + return NV_OK; +} + +NV_STATUS +memmgrGetSurfacePhysAttr_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + Memory *pMemory, + NvU64 *pOffset, + NvU32 *pMemAperture, + NvU32 *pMemKind, + NvU32 *pComprOffset, + NvU32 *pComprKind, + NvU32 *pLineMin, + NvU32 *pLineMax, + NvU32 *pZCullId, + NvU32 *pGpuCacheAttr, + NvU32 *pGpuP2PCacheAttr, + NvU64 *contigSegmentSize +) +{ + NV_STATUS rmStatus; + PMEMORY_DESCRIPTOR pMemDesc = memdescGetMemDescFromGpu(pMemory->pMemDesc, pGpu); + COMPR_INFO comprInfo; + NvU32 unused; + + NV_ASSERT(pMemDesc); + + rmStatus = memmgrFillMemdescForPhysAttr(pGpu, pMemoryManager, pMemDesc, AT_GPU, pOffset, pMemAperture, + pMemKind, pZCullId, pGpuCacheAttr, pGpuP2PCacheAttr, + contigSegmentSize); + if (NV_OK != rmStatus) + { + return rmStatus; + } + + if ((!memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, *pMemKind)) || + !FB_HWRESID_CTAGID_VAL_FERMI(memdescGetHwResId(pMemDesc))) + { + *pComprKind = 0; + return NV_OK; + } + // vGPU: pPrivate->pCompTags is not + // currently initialized in the guest RM + // vGPU does not use compression tags yet. + // GSPTODO: sort out ctags + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + *pComprOffset = 0x0; + *pLineMin = 0x0; + *pLineMax = 0x0; + return NV_OK; + } + + rmStatus = memmgrGetKindComprFromMemDesc(pMemoryManager, pMemDesc, 0, &unused, &comprInfo); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "dmaGetKidnCompr failed: %x\n", rmStatus); + return rmStatus; + } + + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, comprInfo.kind)) + { + *pLineMin = comprInfo.compTagLineMin; + *pLineMax = comprInfo.compPageIndexHi - comprInfo.compPageIndexLo + comprInfo.compTagLineMin; + *pComprOffset = comprInfo.compPageIndexLo; + *pComprKind = 1; + } + else + { + // No coverage at all (stripped by release/reacquire or invalid hw res). + *pLineMin = ~0; + *pLineMax = ~0; + *pComprKind = 0; + } + + return NV_OK; +} + +NV_STATUS +memmgrGetBAR1InfoForClient_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvHandle hClient, + PGETBAR1INFO bar1Info +) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU64 largestFreeSize; + NvU64 freeSize; + OBJVASPACE *pBar1VAS; + OBJEHEAP *pVASHeap; + NV_RANGE bar1VARange = NV_RANGE_EMPTY; + + /* + * For legacy vGPU and SRIOV heavy, get BAR1 information from vGPU plugin. + */ + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + NV_STATUS status = NV_OK; + NV2080_CTRL_FB_GET_INFO_V2_PARAMS fbInfoParams = {0}; + RsClient *pRsClient; + Subdevice *pSubdevice; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + NV_ASSERT_OK_OR_RETURN(subdeviceGetByGpu(pRsClient, pGpu, &pSubdevice)); + + fbInfoParams.fbInfoList[0].index = NV2080_CTRL_FB_INFO_INDEX_BAR1_SIZE; + fbInfoParams.fbInfoList[1].index = NV2080_CTRL_FB_INFO_INDEX_BAR1_AVAIL_SIZE; + fbInfoParams.fbInfoList[2].index = NV2080_CTRL_FB_INFO_INDEX_BAR1_MAX_CONTIGUOUS_AVAIL_SIZE; + fbInfoParams.fbInfoList[3].index = NV2080_CTRL_FB_INFO_INDEX_BANK_SWIZZLE_ALIGNMENT; + + fbInfoParams.fbInfoListSize = 4; + + NV_RM_RPC_CONTROL(pGpu, hClient, RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_FB_GET_INFO_V2, + &fbInfoParams, sizeof(fbInfoParams), + status); + if (status == NV_OK) { + bar1Info->bar1Size = fbInfoParams.fbInfoList[0].data; + bar1Info->bar1AvailSize = fbInfoParams.fbInfoList[1].data; + bar1Info->bar1MaxContigAvailSize = fbInfoParams.fbInfoList[2].data; + bar1Info->bankSwizzleAlignment = fbInfoParams.fbInfoList[3].data; + } + return status; + } + + pBar1VAS = kbusGetBar1VASpace_HAL(pGpu, pKernelBus); + pVASHeap = vaspaceGetHeap(pBar1VAS); + + NV_ASSERT_OK_OR_RETURN(kbusGetBar1VARangeForClient(pGpu, pKernelBus, hClient, &bar1VARange)); + bar1Info->bar1Size = (NvU32)(rangeLength(bar1VARange) / 1024); + bar1Info->bankSwizzleAlignment = vaspaceGetBigPageSize(pBar1VAS); + + bar1Info->bar1AvailSize = 0; + + if (pVASHeap != NULL) + { + pVASHeap->eheapInfoForRange(pVASHeap, bar1VARange, NULL, &largestFreeSize, NULL, &freeSize); + bar1Info->bar1AvailSize = (NvU32)(freeSize / 1024); + bar1Info->bar1MaxContigAvailSize = (NvU32)(largestFreeSize / 1024); + } + return NV_OK; +} + +NvU32 +memmgrGetReservedHeapSizeMb_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NvU32 i; + NvU64 rsvdSize = 0; + + // Display and tally the results to make sure the numbers add up. + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + rsvdSize += pMemoryManager->Ram.fbRegion[i].rsvdSize; + + if (pMemoryManager->Ram.fbRegion[i].rsvdSize > 0) + { + NV_PRINTF(LEVEL_INFO, "FB region #%d:rsvdSize=%d\n", i, + NvU64_LO32(pMemoryManager->Ram.fbRegion[i].rsvdSize)); + } + } + + rsvdSize = rsvdSize / (1024 * 1024); // convert byte to MB + + return (NvU64_LO32(rsvdSize)); +} + +/*! + * @brief Set up additional RM reserved memory space for physical carveout. + */ +static void +memmgrStateInitReservedMemory +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + if (IS_GSP_CLIENT(pGpu)) + return; + +} + +/*! + * @brief Correct RM reserved memory addresses by adding region base to them. + * Before this point, all reserved memory addresses started at 0. + */ +static NV_STATUS +memmgrStateInitAdjustReservedMemory +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + + return NV_OK; +} + +/*! + * @brief Checks if the reserved memory size passed fits in the bar0 window + * + * @param[in] rsvdMemorySizeBytes The value to check against the bar0 size + */ +NV_STATUS +memmgrCheckReservedMemorySize_GK104 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NV_ASSERT_OR_RETURN(pMemoryManager->rsvdMemorySize < DRF_SIZE(NV_PRAMIN), NV_ERR_INSUFFICIENT_RESOURCES); + + return NV_OK; +} + +/*! + * @brief - This routine initializes the reserved video memory + * regions specific to GPUs using this HAL entry point; size + * arguments are in units of bytes. + * + * @param[in] fbSize The size of video memory + */ +NV_STATUS +memmgrInitReservedMemory_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU64 fbSize +) +{ + NvU64 tmpAddr = 0; + NvU32 i; + NvBool bRsvdRegionIsValid = NV_FALSE; + NvU32 rsvdRegion = 0; + NvU64 rsvdTopOfMem = 0; + NvU64 rsvdAlignment = 0; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + if (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) + { + if (memmgrComputeAndSetVgaDisplayMemoryBase_GM107(pGpu, fbSize) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to compute/set VGA display memory base!\n"); + DBG_BREAKPOINT(); + } + } + + memmgrStateInitReservedMemory(pGpu, pMemoryManager); + + // Align reserved memory to 64K granularity + pMemoryManager->rsvdMemorySize = NV_ALIGN_UP(pMemoryManager->rsvdMemorySize, 0x10000); + + NV_PRINTF(LEVEL_INFO, "Final reserved memory size = 0x%x\n", pMemoryManager->rsvdMemorySize); + + if (!IS_VIRTUAL(pGpu)) + { + // + // Reserved memory must fit in BAR0 window - well compression backing is after this. + // Does not matter for GSP itself as BAR0 is not used. + // + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_PLATFORM_GSP || + memmgrCheckReservedMemorySize_HAL(pGpu, pMemoryManager) == NV_OK, NV_ERR_INSUFFICIENT_RESOURCES); + } + + NV_PRINTF(LEVEL_INFO, "RESERVED Memory size: 0x%x\n", pMemoryManager->rsvdMemorySize); + + // *************************************************************** + // Done sizing reserved memory + // *************************************************************** + + if (pMemoryManager->Ram.numFBRegions > 0) + { + // + // Find the last region in memory which is not already reserved or + // protected. RM's reserved memory will then be carved out of it below + // (once the final size and address are determined). + // + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + if (pMemoryManager->Ram.fbRegion[i].bRsvdRegion || + pMemoryManager->Ram.fbRegion[i].bProtected) + continue; + + bRsvdRegionIsValid = NV_TRUE; + rsvdRegion = i; + } + } + + + // No need to create a reserved region for vGPU. + // For vGPU, memory required for the host will be reserved separately. + // + if (IS_VIRTUAL(pGpu)) + { + bRsvdRegionIsValid = NV_FALSE; + } + + rsvdAlignment = RM_PAGE_SIZE; + + // + // Generate the FB physical offset of reserved mem. + // + // In L2 cache only mode, base this off the size of L2 cache + // If reserved memory at top of FB, base this off the size of FB + // + if (gpuIsCacheOnlyModeEnabled(pGpu) || !pMemorySystemConfig->bReservedMemAtBottom) + { + { + rsvdTopOfMem = pMemoryManager->Ram.fbAddrSpaceSizeMb << 20; + + // + // We are assuming that subheap is at the end of guest FB. We place + // the guest RM reserved region at the end of the guest client owned + // portion of the guest FB (total guest FB minus the subheap). The + // guest FB is partitioned in the following way (Addresses increasing + // from left to right). + // + // Region 0 Region 1 Region 2 + // [Guest client owned FB] [Guest RM reserved region] [Guest subheap] + // + // Guest heap is created only for Region 0. + // + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + rsvdTopOfMem -= memmgrGetFbTaxSize_HAL(pGpu, pMemoryManager); + + if (bRsvdRegionIsValid) + { + rsvdTopOfMem = NV_MIN(pMemoryManager->Ram.fbRegion[rsvdRegion].limit + 1, rsvdTopOfMem); + } + } + tmpAddr = rsvdTopOfMem - pMemoryManager->rsvdMemorySize; + pMemoryManager->rsvdMemoryBase = RM_ALIGN_DOWN(tmpAddr, rsvdAlignment); + pMemoryManager->rsvdMemorySize = NvU64_LO32(rsvdTopOfMem - pMemoryManager->rsvdMemoryBase); + + // make sure we didn't just blindly truncate that... + NV_ASSERT(0 == NvU64_HI32(rsvdTopOfMem - pMemoryManager->rsvdMemoryBase)); + } + // Reserved memory located at bottom of FB, base this at start of FB + else + { + tmpAddr = pMemoryManager->heapStartOffset; + if (bRsvdRegionIsValid) + { + tmpAddr = NV_MAX(pMemoryManager->Ram.fbRegion[rsvdRegion].base, tmpAddr); + } + pMemoryManager->rsvdMemoryBase = RM_ALIGN_UP(tmpAddr, rsvdAlignment); + pMemoryManager->rsvdMemorySize = RM_PAGE_ALIGN_UP(pMemoryManager->rsvdMemorySize); + } + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB)) + { + NV_ASSERT(pMemoryManager->Ram.fbUsableMemSize >= pMemoryManager->rsvdMemorySize); + pMemoryManager->Ram.fbUsableMemSize -= RM_PAGE_ALIGN_UP(pMemoryManager->rsvdMemorySize); + } + + // Now update the region table to remove rsvd memory + if (bRsvdRegionIsValid && pMemoryManager->rsvdMemorySize) + { + FB_REGION_DESCRIPTOR rsvdFbRegion; + portMemSet(&rsvdFbRegion, 0, sizeof(rsvdFbRegion)); + + // Add a new region that is a hole for reserved memory + rsvdFbRegion.bRsvdRegion = NV_TRUE; + rsvdFbRegion.base = pMemoryManager->rsvdMemoryBase; + rsvdFbRegion.limit = + pMemoryManager->rsvdMemoryBase + pMemoryManager->rsvdMemorySize - 1; + rsvdFbRegion.performance = 0; + rsvdFbRegion.bSupportCompressed = NV_FALSE; + rsvdFbRegion.bSupportISO = NV_FALSE; + rsvdFbRegion.rsvdSize = pMemoryManager->rsvdMemorySize; + rsvdFbRegion.bProtected = NV_FALSE; + rsvdFbRegion.bInternalHeap = NV_TRUE; + + memmgrInsertFbRegion(pGpu, pMemoryManager, &rsvdFbRegion); + } + + // Add above reserved FB region base to reserved memory + NV_ASSERT_OK_OR_RETURN(memmgrStateInitAdjustReservedMemory(pGpu, pMemoryManager)); + + return NV_OK; +} + +/** + * @brief Compares two addresses and apertures and returns if they are equivalent. + * + * @param[in] target0 + * @param[in] address0 + * @param[in] target1 + * @param[in] address1 + * + * @return NV_TRUE if adresses refer to same memory location, NV_FALSE otherwise + */ +NvBool +memmgrComparePhysicalAddresses_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 target0, + NvU64 address0, + NvU32 target1, + NvU64 address1 +) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + + // + // Sysmem inst blocks can be flipped: + // * PDB_PROP_FIFO_BUG_442481_NCOH_INST_BLOCK_DEF: ncoh -> coh + // For system memory there is no harm in matching both sysmem + // apertures, it is really only vital that we check between vid + // and system memory. Force both to SYS NCOH if system coherent. + // + if (target0 == NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY) + target0 = NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY; + if (target1 == NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY) + target1 = NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY; + + if (target0 == NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY) + { + // + // One of the addresses may not account for the DMA window while the + // other does. Given the nature of the DMA window (its offset must be + // outside the addressable range of the GPU or 0), there's no danger + // in trying to account for it here; it can't cause any false + // positives. + // + if (address0 < address1) + address0 += pKernelBif->dmaWindowStartAddress; + else if (address1 < address0) + address1 += pKernelBif->dmaWindowStartAddress; + } + + return (target0 == target1) && (address0 == address1); +} + +/*! + * @brief - This routine computes the location in memory to + * relocate VGA display memory to; size arguments are + * expected in units of bytes. + * + * @param[in] pGpu GPU object pointer + * @param[in] pFb FB object pointer + * @param[in] vbiosSpaceSize The current size of the VBIOS space + * @param[in] fbSize The size of video memory + */ +static NV_STATUS +memmgrComputeAndSetVgaDisplayMemoryBase_GM107 +( + OBJGPU *pGpu, + NvU64 fbSize +) +{ + + return NV_OK; +} + +/*! + * @brief: Returns the PTE kind of block linear surfaces + */ +NvU32 +memmgrGetPteKindBl_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return NV_MMU_PTE_KIND_GENERIC_16BX2; +} + +/*! + * @brief: Returns the PTE kind of pitch linear surfaces + */ +NvU32 +memmgrGetPteKindPitch_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return NV_MMU_PTE_KIND_PITCH; +} + +#define PAGE_ALIGN_MATCH(value, pageSize) ((value & (pageSize - 1)) == 0) + +// +// Try to determine the optimial page size. See if both the aligment of the +// physical address and the alignment of the allocation size fit one of the +// larger page sizes. +// +static RM_ATTR_PAGE_SIZE +_memmgrGetOptimalSysmemPageSize +( + RmPhysAddr physAddr, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 bigPageSize, + NvU64 sysmemPageSize +) +{ + NvBool bIsContiguous = memdescGetContiguity(pMemDesc, AT_GPU); + + // + // Optimization currently only applies to contiguous memory. + // + if (bIsContiguous) + { + if (PAGE_ALIGN_MATCH(physAddr, RM_PAGE_SIZE_HUGE) && + PAGE_ALIGN_MATCH(pMemDesc->Size, RM_PAGE_SIZE_HUGE)) + { + return RM_PAGE_SIZE_HUGE; + } + + if (PAGE_ALIGN_MATCH(physAddr, bigPageSize) && + PAGE_ALIGN_MATCH(pMemDesc->Size, bigPageSize)) + { + return bigPageSize; + } + } + + return sysmemPageSize; +} + +#undef PAGE_ALIGN_MATCH + +// +// Set the page size for the memory descriptor. The page size for a piece of memory +// may be set when it is mapped or when it is bound to a display channel. Current +// algorithm is simple. Default is 4KB in system memory (only choice) or large +// pages in video memory if the allocation is larger than the small page size. +// +// Some APIs allow the page size to be specified. Allow this if the page size is unset, +// other wise error check it against the existing page size. +// +// We depend on fbgf100.c rounding up allocations to 4KB or bigPageSize to have coherent +// mapping sizes. This does not show up in pMemDesc->Size at this point, so we have +// to trust that nothing is overlapping and cannot do full error checking. +// +// Big and huge pages are supported only in vidmem by default. In order to support +// big/huge pages in sysmem as is required by ATS (on Volta) and a few arch tests on Pascal +// (better TLB hit), we need to set the regkey RMSysmemPageSize equal to the page size. +// See bugs 1700272 and 1622233. +// +// TODO: Due to the page size swizzling, allocations should not physically overlap +// within their swizzle range. I am not sure the heap enforces this. +// +// NOTE: Scattered vidmem with big pages not checked right as it is not supported yet +// +NV_STATUS +memmgrSetMemDescPageSize_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + RM_ATTR_PAGE_SIZE pageSizeAttr +) +{ + NvU32 newPageSize = RM_PAGE_SIZE; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NV_ADDRESS_SPACE addrSpace = memdescGetAddressSpace(pMemDesc); + NvU32 oldPageSize; + + // This policy is meaningless for virtual memdescs, so abort early. + if (ADDR_VIRTUAL == addrSpace) + { + return NV_OK; + } + + if (ADDR_SYSMEM == addrSpace) + { + RmPhysAddr physAddr = memdescGetPte(pMemDesc, addressTranslation, 0); + switch (pageSizeAttr) + { + case RM_ATTR_PAGE_SIZE_INVALID: + NV_PRINTF(LEVEL_ERROR, "invalid page size attr\n"); + return NV_ERR_INVALID_ARGUMENT; + case RM_ATTR_PAGE_SIZE_DEFAULT: + newPageSize = _memmgrGetOptimalSysmemPageSize(physAddr, + pMemDesc, kgmmuGetBigPageSize_HAL(pKernelGmmu), + pMemoryManager->sysmemPageSize); + break; + case RM_ATTR_PAGE_SIZE_4KB: + newPageSize = RM_PAGE_SIZE; + break; + case RM_ATTR_PAGE_SIZE_BIG: + newPageSize = kgmmuGetBigPageSize_HAL(pKernelGmmu); + break; + case RM_ATTR_PAGE_SIZE_HUGE: + NV_ASSERT_OR_RETURN(kgmmuIsHugePageSupported(pKernelGmmu), + NV_ERR_NOT_SUPPORTED); + // If forcing the huge page size the underlying memory must be aligned + NV_ASSERT_OR_RETURN(0 == (physAddr & (RM_PAGE_SIZE_HUGE - 1)), NV_ERR_INVALID_OFFSET); + newPageSize = RM_PAGE_SIZE_HUGE; + break; + case RM_ATTR_PAGE_SIZE_512MB: + NV_ASSERT_OR_RETURN(kgmmuIsPageSize512mbSupported(pKernelGmmu), + NV_ERR_NOT_SUPPORTED); + // If forcing the 512MB page size the underlying memory must be aligned + NV_ASSERT_OR_RETURN(0 == (physAddr & (RM_PAGE_SIZE_512M - 1)), NV_ERR_INVALID_OFFSET); + newPageSize = RM_PAGE_SIZE_512M; + break; + } + } + else if (ADDR_FBMEM == addrSpace) + { + RmPhysAddr physAddr = memdescGetPte(pMemDesc, addressTranslation, 0); + switch (pageSizeAttr) + { + case RM_ATTR_PAGE_SIZE_INVALID: + NV_PRINTF(LEVEL_ERROR, "invalid page size attr\n"); + return NV_ERR_INVALID_ARGUMENT; + case RM_ATTR_PAGE_SIZE_DEFAULT: + { + NvBool bUseDefaultHugePagesize = NV_TRUE; + // WDDMV2 Windows it expect default page size to be 4K /64KB /128KB + // Big enough and aligned for huge pages? + if (bUseDefaultHugePagesize && + kgmmuIsHugePageSupported(pKernelGmmu) && + (pMemDesc->Size >= RM_PAGE_SIZE_HUGE) && + (0 == (physAddr & (RM_PAGE_SIZE_HUGE - 1)))) + { + newPageSize = RM_PAGE_SIZE_HUGE; + } + // Big enough and aligned for big pages? + else if (((pMemDesc->Size >= kgmmuGetMinBigPageSize(pKernelGmmu)) || + (memmgrIsKindCompressible_HAL(pMemoryManager, memdescGetPteKind(pMemDesc)))) && + ((physAddr & (kgmmuGetMaxBigPageSize_HAL(pKernelGmmu) - 1)) == 0)) + { + newPageSize = kgmmuGetBigPageSize_HAL(pKernelGmmu); + } + break; + } + case RM_ATTR_PAGE_SIZE_4KB: + newPageSize = RM_PAGE_SIZE; + break; + case RM_ATTR_PAGE_SIZE_BIG: + newPageSize = kgmmuGetBigPageSize_HAL(pKernelGmmu); + // If forcing the big page size the underlying memory must be aligned + NV_ASSERT_OR_RETURN(0 == (physAddr & (newPageSize - 1)), NV_ERR_INVALID_OFFSET); + break; + case RM_ATTR_PAGE_SIZE_HUGE: + NV_ASSERT_OR_RETURN(kgmmuIsHugePageSupported(pKernelGmmu), + NV_ERR_NOT_SUPPORTED); + // If forcing the huge page size the underlying memory must be aligned + NV_ASSERT_OR_RETURN(0 == (physAddr & (RM_PAGE_SIZE_HUGE - 1)), NV_ERR_INVALID_OFFSET); + newPageSize = RM_PAGE_SIZE_HUGE; + break; + case RM_ATTR_PAGE_SIZE_512MB: + NV_ASSERT_OR_RETURN(kgmmuIsPageSize512mbSupported(pKernelGmmu), + NV_ERR_NOT_SUPPORTED); + // If forcing the 512MB page size the underlying memory must be aligned + NV_ASSERT_OR_RETURN(0 == (physAddr & (RM_PAGE_SIZE_512M - 1)), NV_ERR_INVALID_OFFSET); + newPageSize = RM_PAGE_SIZE_512M; + break; + } + } + + // Only update the memory descriptor if it is unset + oldPageSize = memdescGetPageSize(pMemDesc, addressTranslation); + if (0 == oldPageSize) + { + memdescSetPageSize(pMemDesc, addressTranslation, newPageSize); + } + else if (pageSizeAttr != RM_ATTR_PAGE_SIZE_DEFAULT) + { + // If this memdesc already has a page size, the override must match + NV_ASSERT_OR_RETURN(oldPageSize == newPageSize, NV_ERR_INVALID_ARGUMENT); + } + + return NV_OK; +} + +/*! + * @brief Calculate the Vista reserved memory requirement + * per FB region for UVM to create sysmem mappings for UVM objects. + * + * @param[out] rsvdSlowSize generic reserved RM memory needed in slow region + */ +void +memmgrCalcReservedFbSpaceForUVM_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU64 *rsvdSlowSize +) +{ + // + // For WDDM-UVM, reserve space to create identity mapping (deviceVA = devicePA). (Kepler only) + // + *rsvdSlowSize += + 1 * 1024 * 1024; // 1MB space to map 4K pages of ~512MB sysmem = Pushbuffers(480MB) + SemaphoreVA(8KB) + PDE(512 entries * 8) +} + +/*! + * @brief Calculate the reserved memory requirement for pre-allocated UserD. + * + * @return Size of UserD reserved memory in bytes. + */ +NvU32 +memmgrGetUserdReservedFbSpace_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return 2 * 1024 * 1024; +} + +/*! + * @brief - This function will return the size reserved for WDDM + * S/R buffer. RM is returning a bigger size just so that, our S/R buffer + * consumption will never go beyond that. There is no deterministic way to find this + * value, so returning a constant here. + * Note: OS doesn't really allocate any buffer for the size, we will get the real + * memory only when we try to map during suspend/resume. + */ +NvU64 +memmgrGetRsvdSizeForSr_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + if ((pMemoryManager->Ram.fbTotalMemSizeMb >> 10) > 32) + { + // + // For SKUs with more than 32GB FB, need to reserve for more memory for S/R + // Bug Id:2468357 + // + return 512 * 1024 * 1024; + } + else + { + return 256 * 1024 * 1024; + } +} + +NvU32 +memmgrGetRunlistEntriesReservedFbSpace_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + // Kepler Runlist: 4096 entries * 8B/entry * 7 engines * 2 runlists/engine = 458K + return (4096 * 8 * 7 * 2); +} + + +/*! + * @brief Override Scrubber related PDB properties based on regkeys and platform configs + */ +void +memmgrScrubRegistryOverrides_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + // + // Disabling the SCRUB_ON_FREE property for all the platforms except Windows TCC Mode. + // Disabling in Non-TCC windows because the OS manages FB + // Disabling in RTLSIM and FMODEL because the feature is slower in simulation platforms + // Disabling in DFPGA, since they skip the Host Load + // Disabling in MODS, because scrub on free slows down the MODS run time. + // Disabling for vGPU (host), since the plugin has scrubbing support + // Disabling for VGPU (guest), blocked on bug #1929798 + // Disabling for SLI for now, until the bug # 1790190 is fixed. + // Disabling for GSP-RM ucode, since scrubbing is done from CPU-side kernel RM. + // Disabling when reg key override is used + if ((RMCFG_FEATURE_PLATFORM_WINDOWS && !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TCC_MODE)) || + IS_RTLSIM(pGpu) || IS_FMODEL(pGpu) || IsDFPGA(pGpu) || + (RMCFG_FEATURE_PLATFORM_MODS && !pMemorySystemConfig->bOneToOneComptagLineAllocation) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU) || + IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + RMCFG_FEATURE_PLATFORM_GSP || + IsSLIEnabled(pGpu)) + { + pMemoryManager->bScrubOnFreeEnabled = NV_FALSE; + } + + if (pMemoryManager->bDisableAsyncScrubforMods) + { + // need to force disable the scrub on free in case the wrong set of regkeys are set + pMemoryManager->bScrubOnFreeEnabled = NV_FALSE; + } + + if ((IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) || + pMemorySystemConfig->bOneToOneComptagLineAllocation) + { + pMemoryManager->bUseVasForCeMemoryOps = NV_TRUE; + } +} + +/*! + * @Get the top of memory in MB + * + * Calculate the actual physical address space size of FB, without + * regard for overrides or caps. + * + * @returns the physical address space size of FB, which is greater + * than or equal to the populated FB memory size + */ +NvU32 +memmgrGetAddrSpaceSizeMB_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NV_ASSERT(pMemoryManager->Ram.fbAddrSpaceSizeMb != 0); + + return NvU64_LO32(pMemoryManager->Ram.fbAddrSpaceSizeMb); +} + +// +// Get fb ram size (usable and mappable). +// +NvU32 +memmgrGetUsableMemSizeMB_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NV_ASSERT(pMemoryManager->Ram.fbAddrSpaceSizeMb != 0); + + // we shouldn't ever need this, but... + NV_ASSERT(0 == NvU64_HI32(pMemoryManager->Ram.fbUsableMemSize >> 20)); + return NvU64_LO32(pMemoryManager->Ram.fbUsableMemSize >> 20); +} + +#define _MAX_COVG (100*NVOS32_ALLOC_COMPR_COVG_SCALE) + +// +// memmgrGetBankPlacementData +// +NV_STATUS +memmgrGetBankPlacementData_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 *pPlacementStrategy +) +{ + // set up bank placement arrays + pPlacementStrategy[BANK_PLACEMENT_IMAGE] = ((0) + | BANK_MEM_GROW_UP + | MEM_GROW_UP + | 0xFFFFFF00); + pPlacementStrategy[BANK_PLACEMENT_DEPTH] = ((0) + | BANK_MEM_GROW_DOWN + | MEM_GROW_DOWN + | 0xFFFFFF00); + pPlacementStrategy[BANK_PLACEMENT_TEX_OVERLAY_FONT] = ((0) + | BANK_MEM_GROW_DOWN + | MEM_GROW_UP + | 0xFFFFFF00); + pPlacementStrategy[BANK_PLACEMENT_OTHER] = ((0) + | BANK_MEM_GROW_DOWN + | MEM_GROW_DOWN + | 0xFFFFFF00); + return (NV_OK); +} + +// +// memmgrDirtyForPmTest +// Developed soley for testing suspend/resume path. Goal here is before +// resuming the GPU, we want to dirty the entire FB to verify whether +// RM has saved and restored all the critical data structures and states +// during suspend/resume. Called using the RMCTRL NV208F_CTRL_CMD_SUSPEND_RESUME_QUICK. +// WARNING: This function uses BAR0 window (which is always physical) +// to dirty the FB contents. Upon exit of this call FB ram is dirty and +// cannot be used, unless all the data needed is restored during resume. +// +void +memmgrDirtyForPmTest_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvBool partialDirty +) +{ +} + +/*! + * @Return an invalid placeholder FB offset. Should be 128KB aligned for Fermi chips. + * + * @returns offset + */ +RmPhysAddr +memmgrGetInvalidOffset_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return 0xdead000000000000ull; +} + +/*! + * @brief Get blacklist page details. + * + * @param[in] pGpu OBJGPU + * @param[in] pMemoryManager MemoryManager + * @param[out] pBlAddrs BLACKLIST_ADDRESSES where count is taken + * as input and the addressed and count is + * returned. + * @param[in/out] pCount Takes size of pBlAddrs as input and returns + * the number of populated addresses in + * pBlAddrs. + @returns NV_STATUS + * + */ +NV_STATUS +memmgrGetBlackListPages_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + BLACKLIST_ADDRESS *pBlAddrs, + NvU32 *pCount +) +{ + RM_API *pRmApi; + NV_STATUS status = NV_OK; + NvU32 idx; + NvU32 entryIdx = 0; + NV2080_CTRL_FB_GET_OFFLINED_PAGES_PARAMS *pParams; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT) || + !gpuCheckPageRetirementSupport_HAL(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // Read the inforom for a list of pages to blacklist. + // SLI support requires investigation to ensure + // identical heaps on both devices (bug 756971). + // + if (IsSLIEnabled(pGpu) && !gpuIsEccPageRetirementWithSliAllowed(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + pParams = portMemAllocStackOrHeap(sizeof(*pParams)); + if (pParams == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet(pParams, 0, sizeof(*pParams)); + + pRmApi = IS_GSP_CLIENT(pGpu) ? GPU_GET_PHYSICAL_RMAPI(pGpu) : + rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_FB_GET_OFFLINED_PAGES, + pParams, + sizeof(*pParams)); + + if (status == NV_OK) + { + for (idx = 0; idx < pParams->validEntries; idx++) + { + if (entryIdx >= *pCount) + { + status = NV_ERR_BUFFER_TOO_SMALL; + goto done; + } + if (pMemorySystemConfig->bEnabledEccFBPA) + { + pBlAddrs[entryIdx].address = + pParams->offlined[idx].pageAddressWithEccOn << RM_PAGE_SHIFT; + } + else + { + pBlAddrs[entryIdx].address = + pParams->offlined[idx].pageAddressWithEccOff << RM_PAGE_SHIFT; + } + pBlAddrs[entryIdx].type = pParams->offlined[idx].source; + entryIdx++; + } + } + else if (NV_ERR_NOT_SUPPORTED == status) + { + NV_PRINTF(LEVEL_INFO, + "Offlining pages not supported\n"); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Failed to read offlined addresses\n"); + } + +done: + *pCount = entryIdx; + + portMemFreeStackOrHeap(pParams); + + return status; +} + +// +// Get the blackList pages and notify heap +// +NV_STATUS +memmgrGetBlackListPagesForHeap_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + Heap *pHeap +) +{ + BLACKLIST_ADDRESS *pBlAddrs; + NvU32 idx; + NV_STATUS status; + NvU32 count; + + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + count = pMemorySystemConfig->maximumBlacklistPages; + pBlAddrs = portMemAllocNonPaged(sizeof(BLACKLIST_ADDRESS) * count); + if (pBlAddrs == NULL) + { + return NV_ERR_NO_MEMORY; + } + + status = memmgrGetBlackListPages_HAL(pGpu, pMemoryManager, pBlAddrs, &count); + NV_ASSERT(status != NV_ERR_BUFFER_TOO_SMALL); + + if (status == NV_OK) + { + for (idx = 0; idx < count; idx++) + { + + status = heapAddPageToBlackList(pGpu, pHeap, + pBlAddrs[idx].address >> RM_PAGE_SHIFT, + pBlAddrs[idx].type); + + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "No more space in blacklist, status: %x!\n", status); + NV_ASSERT(0); + break; + } + } + } + + portMemFree(pBlAddrs); + + // Failure to read offlined pages from host is not fatal + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/mem_mgr_gm200.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/mem_mgr_gm200.c new file mode 100644 index 000000000..109c7ac68 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/mem_mgr_gm200.c @@ -0,0 +1,107 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/mem_desc.h" + +/*! + * Returns the max context size + * + * @returns NvU64 + */ +NvU64 +memmgrGetMaxContextSize_GM200 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NvU64 size; + + // + // This function's original purpose was to estimate how much heap memory RM + // needs to keep in reserve from Windows LDDM driver to pass WHQL MaxContexts + // test. This estimation is done after heap init before KMD allocates a + // kernel-managed chunk. + // UVM & PMA similarly require RM to estimate how much heap memory RM needs + // to reserve for page tables, contexts, etc. This estimation is used during + // heap init to divide the FB into internal heap and external PMA managed + // spaces. + // + + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL)) + { + // KMD in WDDM mode + // 640KB per context and WHQL_TEST_MAX_CONTEXTS(100) contexts + size = 640 * 1024 * WHQL_TEST_MAX_CONTEXTS; + // Additional 50MB in case of SLI + if (IsSLIEnabled(pGpu)) + { + size += (50 * 1024 * 1024); + } + } + else + { + // KMD in TCC mode + // + // Reserve enough memory for a moderate number of page tables. + size = 48 * 1024 * 1024; + } + } + else if (RMCFG_FEATURE_PLATFORM_MODS) + { + // TODO: Remove the PMA check after enabling on all chips. + if (memmgrIsPmaInitialized(pMemoryManager) && + !memmgrAreClientPageTablesPmaManaged(pMemoryManager)) + { + // Reserve enough memory for a moderate context size. + size = 32 * 1024 * 1024; + } + else + { + // Reserve 16M -- MODS doesn't need RM to reserve excessive memory + size = 16 * 1024 * 1024; + } + } + else + { + if (memmgrIsPmaInitialized(pMemoryManager)) + { + // + // We need to estimate the reserved memory needs before PMA is initialized + // Reserve enough memory for a moderate number of page tables + // + size = 32 * 1024 * 1024; + } + else + { + // Non-specific platform -- non-specific reserved memory requirements + size = 0; + } + } + return size; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/mem_utils_gm107.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/mem_utils_gm107.c new file mode 100644 index 000000000..339af8cfd --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/mem_utils_gm107.c @@ -0,0 +1,2541 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "os/os.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/mem_mgr/mem_scrub.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/ce/kernel_ce.h" +#include "gpu/ce/kernel_ce_private.h" +#include "mem_mgr/gpu_vaspace.h" +#include "core/locks.h" +#include "nvRmReg.h" +#include "rmapi/rs_utils.h" +#include "mem_mgr/ctx_buf_pool.h" +#include "gpu/subdevice/subdevice.h" +#include "vgpu/rpc.h" +#include "kernel/gpu/fifo/kernel_channel.h" + +#include "class/clc0b5sw.h" +#include "class/cla06fsubch.h" // NVA06F_SUBCHANNEL_COPY_ENGINE +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER +#include "class/cl0080.h" // NV01_DEVICE_0 +#include "class/cl50a0.h" // NV50_MEMORY_VIRTUAL +#include "class/clc637.h" // AMPERE_SMC_PARTITION_REF +#include "class/cl00c2.h" // NV01_MEMORY_LOCAL_PHYSICAL +#include "class/clb0b5.h" // MAXWELL_DMA_COPY_A +#include "class/cl0005.h" // NV01_EVENT +#include "class/cl90f1.h" // FERMI_VASPACE_A + +#define NONSTALL_METHOD_SIZE 8 +#define SEMAPHORE_ONLY_METHOD_SIZE 32 +#define MAX_EXTRA_PAYLOAD (NONSTALL_METHOD_SIZE + SEMAPHORE_ONLY_METHOD_SIZE) + + +static NV_STATUS _memUtilsChannelAllocatePB_GM107(OBJGPU *pGpu, MemoryManager *pMemoryManager, OBJCHANNEL *pChannel); +static NV_STATUS _memUtilsAllocateChannel(OBJGPU *pGpu, MemoryManager *pMemoryManager, NvHandle hClientId, + NvHandle hDeviceId, NvHandle hChannelId, NvHandle hObjectError, + NvHandle hObjectBuffer, OBJCHANNEL *pChannel); +static NV_STATUS _memUtilsAllocCe_GM107(OBJGPU *pGpu, MemoryManager *pMemoryManager, OBJCHANNEL *pChannel, + NvHandle hClientId, NvHandle hChannelId, NvHandle hCopyObjectId); +static NV_STATUS _memUtilsAllocateUserD(OBJGPU *pGpu, MemoryManager *pMemoryManager, NvHandle hClientId, + NvHandle hDeviceId, OBJCHANNEL *pChannel); +static NV_STATUS _memUtilsMapUserd_GM107(OBJGPU *pGpu, MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, NvHandle hClientId, NvHandle hDeviceId, + NvHandle hChannelId); +static NV_STATUS _memUtilsAllocateReductionSema(OBJGPU *pGpu, MemoryManager *pMemoryManager, OBJCHANNEL *pChannel); +static NvU32 _ceChannelScheduleBatchWork_GM107(OBJGPU *pGpu, MemoryManager *pMemoryManager, OBJCHANNEL *pChannel, + RmPhysAddr src, NV_ADDRESS_SPACE srcAddressSpace, NvU32 srcCpuCacheAttrib, + RmPhysAddr dst, NV_ADDRESS_SPACE dstAddressSpace, NvU32 dstCpuCacheAttrib, + NvU64 size, NvBool bMemcopy); +static NvU32 _ceChannelScheduleWork_GM107(OBJGPU *pGpu, MemoryManager *pMemoryManager, OBJCHANNEL *pChannel, + RmPhysAddr src, NV_ADDRESS_SPACE srcAddressSpace, NvU32 srcCpuCacheAttrib, + RmPhysAddr dst, NV_ADDRESS_SPACE dstAddressSpace, NvU32 dstCpuCacheAttrib, + NvU64 size, NvBool blocking, NvBool insertFinishPayload, NvBool bMemcopy); +static void _ceChannelUpdateGpFifo_GM107(OBJGPU *pGpu, MemoryManager *pMemoryManager, OBJCHANNEL *pChannel, + NvU32 gpOffset,NvU32 gpSize); +static NvU32 _ceChannelPushMethodsBlock_GM107(OBJGPU *pGpu, MemoryManager *pMemoryManager, OBJCHANNEL *pChannel, + RmPhysAddr src, NV_ADDRESS_SPACE srcAddressSpace, NvU32 srcCpuCacheAttrib, + RmPhysAddr dst, NV_ADDRESS_SPACE dstAddressSpace, NvU32 dstCpuCacheAttrib, + NvU64 size, NvU32 **pPtr, NvBool addPayloadSema, + NvBool addNonStallIntr, NvBool addFinishPayload, NvBool bMemcopy); +static NvU32 _getSpaceInPb(OBJCHANNEL *pChannel); +static NvBool _checkSynchronization(OBJGPU *pGpu, MemoryManager *pMemoryManager, OBJCHANNEL *pChannel, NvU32 block); + +static NV_STATUS +_memUtilsAllocateReductionSema +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel +) +{ + + NV_MEMORY_ALLOCATION_PARAMS memAllocParams; + NV_STATUS rmStatus; + NvU32 i; + NV_STATUS lockStatus; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + // allocate physical memory for a bit map semaphore + portMemSet(&memAllocParams, 0, sizeof(memAllocParams)); + + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = (((pChannel->blockCount + 31)/32)*4); + memAllocParams.attr = 0; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + // memAllocParams.attr |= NVOS32_ATTR_COHERENCY_WRITE_COMBINE; + memAllocParams.attr2 = NVOS32_ATTR2_NONE; + memAllocParams.flags = 0; + + NV_ASSERT_OK_OR_RETURN( + pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + pChannel->deviceId, + pChannel->bitMapSemPhysId, + NV01_MEMORY_SYSTEM, + &memAllocParams)); + + // allocate virtual memory for a bit map semaphore + portMemSet(&memAllocParams, 0, sizeof(memAllocParams)); + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = (((pChannel->blockCount + 31)/32)*4); + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + memAllocParams.attr2 = NVOS32_ATTR2_NONE; + memAllocParams.flags = 0; + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_VIRTUAL; + + NV_ASSERT_OK_OR_RETURN( + pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + pChannel->deviceId, + pChannel->bitMapSemVirtId, + NV50_MEMORY_VIRTUAL, + &memAllocParams)); + + lockStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM); + if(lockStatus != NV_OK) + { + NV_ASSERT_FAILED("Could not get back lock after allocating reduction sema"); + return NV_ERR_GENERIC; + } + + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + pRmApi->Map(pRmApi, + pChannel->hClient, + pChannel->deviceId, + pChannel->bitMapSemVirtId, + pChannel->bitMapSemPhysId, //hMemory, + 0, + (((pChannel->blockCount + 31)/32)*4), + NV04_MAP_MEMORY_FLAGS_NONE, + &pChannel->pbGpuBitMapVA), + exit_sema_creation); + + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + pRmApi->MapToCpu(pRmApi, + pChannel->hClient, + pChannel->deviceId, + pChannel->bitMapSemPhysId, + 0, + (((pChannel->blockCount + 31)/32)*4), + (void **)&pChannel->pbBitMapVA, + 0), + exit_sema_creation); + + for(i = 0; i < (((pChannel->blockCount + 31) / 32) * 4);) + { + MEM_WR32((NvU8*)pChannel->pbBitMapVA + (i), 0); + i = i + 4; + } + + return NV_OK; +exit_sema_creation: + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hClient); + NV_PRINTF(LEVEL_INFO, "end NV_STATUS=0x%08x\n", rmStatus); + return rmStatus; +} + +static NV_STATUS +_memUtilsChannelAllocatePB_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel + + // OBJMEMUTILS *to be added here +) +{ + NV_STATUS rmStatus = NV_OK; + NV_MEMORY_ALLOCATION_PARAMS memAllocParams; + NvHandle hDevice; + NvHandle hPhysMem; + NvU64 size; + NvHandle hVirtMem; + NvU32 hClass; + NvU32 attr; + NvU32 attrNotifier = NVOS32_ATTR_NONE; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // Apply registry overrides to channel pushbuffer. + switch (DRF_VAL(_REG_STR_RM, _INST_LOC_4, _CHANNEL_PUSHBUFFER, pGpu->instLocOverrides4)) + { + case NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_VID: + hClass = NV01_MEMORY_LOCAL_USER; + attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM) | + DRF_DEF(OS32, _ATTR, _COHERENCY, _UNCACHED); + attrNotifier = attr; + break; + + case NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_COH: + hClass = NV01_MEMORY_SYSTEM; + attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) | + DRF_DEF(OS32, _ATTR, _COHERENCY, _CACHED); + attrNotifier = attr; + break; + + case NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_NCOH: + hClass = NV01_MEMORY_SYSTEM; + attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) | + DRF_DEF(OS32, _ATTR, _COHERENCY, _UNCACHED); + attrNotifier = attr; + break; + + case NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_DEFAULT: + default: + hClass = NV01_MEMORY_SYSTEM; + attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) | + DRF_DEF(OS32, _ATTR, _COHERENCY, _UNCACHED); + + // + // The work submit token is read from notifier memory to support + // VM migration for the memory scrubber. The token is read from + // the notifier memory every time when the scrubber submits the work. + // It will help performance by changing the default setting of + // the notifier memory to be cached. + // + attrNotifier = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) | + DRF_DEF(OS32, _ATTR, _COHERENCY, _CACHED); + break; + } + + hDevice = pChannel->deviceId; + hPhysMem = pChannel->physMemId; + hVirtMem = pChannel->pushBufferId; + size = pChannel->channelSize; + + LOCK_ASSERT_AND_RETURN(!rmGpuLockIsOwner()); + // allocate the physical memory + portMemSet(&memAllocParams, 0, sizeof(memAllocParams)); + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = size; + memAllocParams.attr = attr; + memAllocParams.attr2 = NVOS32_ATTR2_NONE; + memAllocParams.flags = 0; + memAllocParams.internalflags = NVOS32_ALLOC_INTERNAL_FLAGS_SKIP_SCRUB; + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + hDevice, + hPhysMem, + hClass, + &memAllocParams)); + + // allocate the Virtual memory + portMemSet(&memAllocParams, 0, sizeof(memAllocParams)); + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = size; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + memAllocParams.attr2 = NVOS32_ATTR2_NONE; + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_VIRTUAL; + memAllocParams.hVASpace = pChannel->hVASpaceId; + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + hDevice, + hVirtMem, + NV50_MEMORY_VIRTUAL, + &memAllocParams)); + + // allocate the physmem for the notifier + portMemSet(&memAllocParams, 0, sizeof(memAllocParams)); + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = pChannel->channelNotifierSize; + memAllocParams.attr = attrNotifier; + memAllocParams.attr2 = NVOS32_ATTR2_NONE; + memAllocParams.flags = 0; + memAllocParams.internalflags = NVOS32_ALLOC_INTERNAL_FLAGS_SKIP_SCRUB; + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + hDevice, + pChannel->errNotifierIdPhys, + hClass, + &memAllocParams)); + + // allocate Virtual Memory for the notifier + portMemSet(&memAllocParams, 0, sizeof(memAllocParams)); + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = pChannel->channelNotifierSize; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + memAllocParams.attr2 = NVOS32_ATTR2_NONE; + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_VIRTUAL; + memAllocParams.hVASpace = pChannel->hVASpaceId; + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + hDevice, + pChannel->errNotifierIdVirt, + NV50_MEMORY_VIRTUAL, + &memAllocParams)); + + return rmStatus; +} + +NV_STATUS +memmgrMemUtilsChannelInitialize_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel +) +{ + NV_STATUS rmStatus; + NV_STATUS lockStatus; + NvHandle hDevice; // device handle + NvHandle hPhysMem; // memory handle + NvU64 size; + NvHandle hChannel; // channel handle + NvHandle hErrNotifierVirt; + NvHandle hErrNotifierPhys; + NvHandle hPushBuffer; + NvHandle hClient; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + RmClient *pClient; + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + NvU8 *pErrNotifierCpuVA = NULL; + NV_ADDRESS_SPACE userdAddrSpace; + NV_ADDRESS_SPACE pushBuffAddrSpace; + NV_ADDRESS_SPACE gpFifoAddrSpace; + + // + // Heap alloc one chunk of memory to hold all of our alloc parameters to + // reduce stack usage + // + union + { + NV0080_ALLOC_PARAMETERS nv0080; + NV2080_ALLOC_PARAMETERS nv2080; + NVC637_ALLOCATION_PARAMETERS nvC637; + NV_VASPACE_ALLOCATION_PARAMETERS va; + NV_MEMORY_ALLOCATION_PARAMS mem; + } *pParams = NULL; + + size = pChannel->channelSize; + hPhysMem = pChannel->physMemId; + hChannel = pChannel->channelId; + hErrNotifierVirt = pChannel->errNotifierIdVirt; + hErrNotifierPhys = pChannel->errNotifierIdPhys; + hPushBuffer = pChannel->pushBufferId; + + if (!pChannel->bClientAllocated) + { + RsClient *pRsClient; + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, NV01_NULL_OBJECT, NV01_NULL_OBJECT, + NV01_NULL_OBJECT, NV01_ROOT, + &pChannel->hClient)); + + NV_ASSERT_OK_OR_GOTO( + rmStatus, + serverutilGetClientUnderLock(pChannel->hClient, &pClient), + exit_free_client); + + NV_ASSERT_OK_OR_GOTO( + rmStatus, + serverGetClientUnderLock(&g_resServ, pChannel->hClient, &pChannel->pRsClient), + exit_free_client); + + pRsClient = staticCast(pClient, RsClient); + + if (IS_VIRTUAL(pGpu)) + { + NV_ASSERT_OK_OR_GOTO( + rmStatus, + clientSetHandleGenerator(pRsClient, RS_UNIQUE_HANDLE_BASE, + RS_UNIQUE_HANDLE_RANGE/2 - VGPU_RESERVED_HANDLE_RANGE), + exit_free_client); + } + else + { + NV_ASSERT_OK_OR_GOTO( + rmStatus, + clientSetHandleGenerator(pRsClient, 1U, ~0U - 1U), + exit_free_client); + } + } + + hClient = pChannel->hClient; + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + rmStatus = NV_ERR_NO_MEMORY; + goto exit_free_client; + } + + NV_ASSERT_OK_OR_GOTO( + rmStatus, + serverutilGenResourceHandle(hClient, &pChannel->deviceId), + exit_free_client); + + hDevice = pChannel->deviceId; + + { + NV0080_ALLOC_PARAMETERS *pNv0080 = &pParams->nv0080; + + portMemSet(pNv0080, 0, sizeof(*pNv0080)); + // Which device are we? + pNv0080->deviceId = gpuGetDeviceInstance(pGpu); + pNv0080->hClientShare = hClient; + + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, hClient, hClient, hDevice, + NV01_DEVICE_0, pNv0080), + exit_free_client); + } + + NV_ASSERT_OK_OR_GOTO( + rmStatus, + serverutilGenResourceHandle(hClient, &pChannel->subdeviceId), + exit_free_client); + + // allocate a subdevice + { + NV2080_ALLOC_PARAMETERS *pNv2080 = &pParams->nv2080; + portMemSet(pNv2080, 0, sizeof(*pNv2080)); + pNv2080->subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, hClient, hDevice, + pChannel->subdeviceId, + NV20_SUBDEVICE_0, + pNv2080), + exit_free_client); + } + + // MIG support is only added for PMA scrubber + if (bMIGInUse && (pChannel->pKernelMIGGpuInstance != NULL)) + { + NVC637_ALLOCATION_PARAMETERS *pNvC637 = &pParams->nvC637; + + NV_ASSERT_OK_OR_GOTO( + rmStatus, + serverutilGenResourceHandle(hClient, + &pChannel->hPartitionRef), + exit_free_client); + + portMemSet(pNvC637, 0, sizeof(*pNvC637)); + pNvC637->swizzId = pChannel->pKernelMIGGpuInstance->swizzId; + + NV_ASSERT_OK_OR_GOTO( + rmStatus, + pRmApi->AllocWithHandle(pRmApi, hClient, + pChannel->subdeviceId, + pChannel->hPartitionRef, + AMPERE_SMC_PARTITION_REF, + pNvC637), + exit_free_client); + + pHeap = pChannel->pKernelMIGGpuInstance->pMemoryPartitionHeap; + } + + // + // client allocated userd only supported on volta+ + // TODO: Use property to check if client allocated userd is supported + // + pChannel->bClientUserd = NV_FALSE; + if (IsVOLTAorBetter(pGpu)) + { + NvU32 pmaConfig = 0; + pmaConfig = PMA_QUERY_NUMA_ENABLED | PMA_QUERY_NUMA_ONLINED; + NV_ASSERT_OK_OR_GOTO( + rmStatus, + pmaQueryConfigs(&pHeap->pmaObject, &pmaConfig), + exit_free_client); + if (pmaConfig & PMA_QUERY_NUMA_ENABLED) + { + if (pmaConfig & PMA_QUERY_NUMA_ONLINED) + pChannel->bClientUserd = NV_TRUE; + else + pChannel->bClientUserd = NV_FALSE; + } + else + { + pChannel->bClientUserd = NV_TRUE; + } + } + + // + // We need to allocate a VAS to use for CE copies, but also for + // GSP-RM + MIG, so that it doesn't get the device + // default VAS during channel bind (which is not properly handled + // by split VAS in MIG currently). We only need the identity mapping + // when actually using the VAS for copies. + // + if (pChannel->bUseVasForCeCopy || + (IS_GSP_CLIENT(pGpu) && bMIGInUse)) + { + NvBool bAcquireLock = NV_FALSE; + NvU64 startFbOffset = GPU_GET_HEAP(pGpu)->base; + NvU64 fbSize = GPU_GET_HEAP(pGpu)->total; + NvU64 vaStartOffset = startFbOffset; + + NV_PRINTF(LEVEL_INFO, "Channel VAS heap base: %llx total: %llx \n", GPU_GET_HEAP(pGpu)->base, + GPU_GET_HEAP(pGpu)->total); + + pChannel->startFbOffset = startFbOffset; + pChannel->fbSize = fbSize; + + if (pChannel->bUseVasForCeCopy) + { + NV_ASSERT_OK_OR_GOTO(rmStatus, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->hFbAlias), + exit_free_client); + + rmStatus = memmgrMemUtilsCreateMemoryAlias_HAL(pGpu, pMemoryManager, pChannel); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Setting Identity mapping failed.. status: %x\n", rmStatus); + goto exit_free_client; + } + } + + { + NV_VASPACE_ALLOCATION_PARAMETERS *pVa = &pParams->va; + + portMemSet(pVa, 0, sizeof(*pVa)); + pVa->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW; + pVa->vaBase = pChannel->startFbOffset; + // + // how large should we go here ? we definitely need more than heapSize to allocate + // other metadata related to chnanel. Also need to account the discontiguous VA Range + // for split VAS, where we allocate 4GB to (4GB + 512MB) for Server VAS (mirrored). + // Rough VASpace Layout will be documented here: + // + // + if (gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + pVa->vaSize += (SPLIT_VAS_SERVER_RM_MANAGED_VA_START + + SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE) ; + } + pVa->vaSize += fbSize + pChannel->channelSize + SCRUBBER_VASPACE_BUFFER_SIZE; + + // + // We definitely need ALLOW_ZERO_ADDRESS, but SKIP_SCRUB_MEMPOOL is a patch + // until we figure out the right place for Scrubber page tables + // + pVa->flags |= NV_VASPACE_ALLOCATION_FLAGS_ALLOW_ZERO_ADDRESS | + NV_VASPACE_ALLOCATION_FLAGS_SKIP_SCRUB_MEMPOOL | + NV_VASPACE_ALLOCATION_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE; + + if (rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + bAcquireLock = NV_TRUE; + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + } + + rmStatus = pRmApi->AllocWithHandle(pRmApi, hClient, pChannel->deviceId, + pChannel->hVASpaceId, FERMI_VASPACE_A, + pVa); + } + if (bAcquireLock) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM)); + bAcquireLock = NV_FALSE; + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + } + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed allocating scrubber vaspace, status=0x%x\n", + rmStatus); + goto exit_free_client; + } + + rmStatus = vaspaceGetByHandleOrDeviceDefault(pChannel->pRsClient, + pChannel->deviceId, + pChannel->hVASpaceId, + &pChannel->pVAS); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed getting the scrubber vaspace from handle, status=0x%x\n", + rmStatus); + goto exit_free_client; + } + + if (pChannel->bUseVasForCeCopy) + { + if (!gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + rmStatus = vaspacePinRootPageDir(pChannel->pVAS, pGpu); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed pinning down Scrubber VAS, status=0x%x\n", + rmStatus); + goto exit_free_client; + } + } + + NV_ASSERT_OK_OR_GOTO(rmStatus, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->hFbAliasVA), exit_free_client); + } + + if (gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + OBJGVASPACE *pGVAS = dynamicCast(pChannel->pVAS, OBJGVASPACE); + vaStartOffset += pGVAS->vaLimitServerRMOwned + 1; + pChannel->vaStartOffset = vaStartOffset; + } + + if (rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + bAcquireLock = NV_TRUE; + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + } + + // Allocate virtual memory for Identity Mapping + if (pChannel->bUseVasForCeCopy) + { + NV_MEMORY_ALLOCATION_PARAMS *pMem = &pParams->mem; + portMemSet(pMem, 0, sizeof(*pMem)); + pMem->owner = NVOS32_TYPE_OWNER_RM; + pMem->type = NVOS32_TYPE_IMAGE; + pMem->size = pChannel->fbSize; + pMem->attr = (DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) | + DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _BIG)); + pMem->attr2 = NVOS32_ATTR2_NONE; + pMem->offset = vaStartOffset; + pMem->flags = 0; + pMem->flags |= NVOS32_ALLOC_FLAGS_VIRTUAL | + NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE | + NVOS32_ALLOC_FLAGS_LAZY; + pMem->hVASpace = pChannel->hVASpaceId; + + rmStatus = pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + pChannel->deviceId, + pChannel->hFbAliasVA, + NV50_MEMORY_VIRTUAL, + pMem); + } + + if (bAcquireLock) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM)); + bAcquireLock = NV_FALSE; + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + } + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Allocating VASpace for (base, size): (%llx, %llx) failed," + " with status: %x\n", vaStartOffset, pChannel->fbSize, rmStatus); + goto exit_free_client; + } + + // set up mapping of VA -> PA + if (pChannel->bUseVasForCeCopy) + { + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + pRmApi->Map(pRmApi, + pChannel->hClient, + pChannel->deviceId, + pChannel->hFbAliasVA, + pChannel->hFbAlias, + 0, + pChannel->fbSize, + DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_WRITE) | + DRF_DEF(OS46, _FLAGS, _PAGE_SIZE, _BIG) | + DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE), + &pChannel->fbAliasVA), + exit_free_client); + + NV_PRINTF(LEVEL_INFO, "Scrubber VAS :%x identity mapped with start addr: %llx, size: %llx\n", + pChannel->hFbAliasVA, pChannel->fbAliasVA, pChannel->fbSize); + } + } + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + // + // Fetch the physical location of the push buffer + // + // Bug 3434881 filed to track the following + // a.Implementation of the utility function to parse the + // push buffer and userd regkeys + // b.Replace all instances of regkey pushbuffer/userd regkey + // parsing with the utility function + // + switch (DRF_VAL(_REG_STR_RM, _INST_LOC_4, _CHANNEL_PUSHBUFFER, pGpu->instLocOverrides4)) + { + case NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_VID: + pushBuffAddrSpace = ADDR_FBMEM; + break; + + case NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_COH: + case NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_NCOH: + case NV_REG_STR_RM_INST_LOC_4_CHANNEL_PUSHBUFFER_DEFAULT: + default: + pushBuffAddrSpace = ADDR_SYSMEM; + break; + } + + gpFifoAddrSpace = pushBuffAddrSpace; + + //Fetch the physical location of userD + switch (DRF_VAL(_REG_STR_RM, _INST_LOC, _USERD, pGpu->instLocOverrides)) + { + case NV_REG_STR_RM_INST_LOC_USERD_NCOH: + case NV_REG_STR_RM_INST_LOC_USERD_COH: + userdAddrSpace = ADDR_SYSMEM; + break; + + case NV_REG_STR_RM_INST_LOC_USERD_VID: + case NV_REG_STR_RM_INST_LOC_USERD_DEFAULT: + default: + userdAddrSpace = ADDR_FBMEM; + break; + } + + // RM WAR for Bug 3313719 + // Disallow USERD in sysmem and (GPFIFO or pushbuffer) in vidmem + rmStatus = kfifoCheckChannelAllocAddrSpaces_HAL(GPU_GET_KERNEL_FIFO(pGpu), + userdAddrSpace, + pushBuffAddrSpace, + gpFifoAddrSpace); + if (rmStatus != NV_OK) + { + NV_ASSERT_FAILED("USERD in sysmem and PushBuffer/GPFIFO in vidmem not allowed"); + goto exit_free_client; + } + + _memUtilsChannelAllocatePB_GM107(pGpu, pMemoryManager, pChannel); + lockStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM); + if(lockStatus != NV_OK) + { + NV_ASSERT_FAILED("Could not get back lock after allocating Push Buffer sema"); + goto exit_free_client; + } + + // map the pushbuffer + rmStatus = pRmApi->Map(pRmApi, hClient, hDevice, + hPushBuffer, + hPhysMem, //hMemory, + 0, + size, + NV04_MAP_MEMORY_FLAGS_NONE, + &pChannel->pbGpuVA); + // map the error notifier + rmStatus = pRmApi->Map(pRmApi, hClient, hDevice, + hErrNotifierVirt, + hErrNotifierPhys, //hMemory, + 0, + pChannel->channelNotifierSize, + DRF_DEF(OS46, _FLAGS, _KERNEL_MAPPING, _ENABLE), + &pChannel->pbGpuNotifierVA); + + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + _memUtilsAllocateChannel(pGpu, + pMemoryManager, + hClient, + hDevice, + hChannel, + hErrNotifierVirt, + hPushBuffer, + pChannel), + exit_free_client); + + // _memUtilsMapUserd + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + _memUtilsMapUserd_GM107(pGpu, pMemoryManager, pChannel, + hClient, hDevice, hChannel), + exit_free_client); + + // + // map cpu pointer + // Map the pushbuffer memory to CPU viewable region + // + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + pRmApi->MapToCpu(pRmApi, + hClient, + hDevice, + hPhysMem, + 0, + size, + (void **)&pChannel->pbCpuVA, + 0), + exit_free_client); + + portMemSet(pChannel->pbCpuVA, 0, (NvLength)size); + + // Map the notifier memory to CPU viewable region + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + pRmApi->MapToCpu(pRmApi, + hClient, + hDevice, + hErrNotifierPhys, + 0, + pChannel->channelNotifierSize, + (void **)&pErrNotifierCpuVA, + 0), + exit_free_client); + + pChannel->pTokenFromNotifier = + (NvNotification *)(pErrNotifierCpuVA + + (NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN * + sizeof(NvNotification))); + + // + // Allocate and map the doorbell region to use in scrub on free + // Set the doorbellregister to False, since pre-volta chips doesn't support + // + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + memmgrScrubMapDoorbellRegion_HAL(pGpu, pMemoryManager, pChannel), + exit_free_client); + + portMemFree(pParams); + return NV_OK; + +exit_free_client: + if(!pChannel->bClientAllocated) + { + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hClient); + } + portMemFree(pParams); + NV_PRINTF(LEVEL_INFO, "end NV_STATUS=0x%08x\n", rmStatus); + return rmStatus; +} + + +/** memmgrMemUtilsCreateMemoryAlias_GM107 + * + * @brief Creates an alias for the FB region + * This function doesn't allocate any memory but just creates memory handle + * which refers to FB range. This call can support for both baremetal and vGPU. + * @param[in] pChannel CHANNEL Pointer + * + * @returns NV_OK on success + */ +NV_STATUS +memmgrMemUtilsCreateMemoryAlias_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_STATUS status = NV_OK; + + NV_PHYSICAL_MEMORY_ALLOCATION_PARAMS physMemParams = {0}; + + memmgrGetPteKindForScrubber_HAL(pMemoryManager, &physMemParams.format); + + + status = pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + pChannel->deviceId, + pChannel->hFbAlias, + NV01_MEMORY_LOCAL_PHYSICAL, + &physMemParams); + if (status != NV_OK) + { + NV_CHECK_OK_FAILED(LEVEL_WARNING, "Aliasing FbListMem", status); + return status; + } + + NV_PRINTF(LEVEL_INFO, "Allocating FbAlias: %x for size: %llx, kind: %x\n", pChannel->hFbAlias, + pChannel->fbSize, physMemParams.format); + + + return NV_OK; +} + +/*! + * Registers the callback specified in clientHeap.callback for the channel + * driven scrub. The callback is triggered by NV906F_NON_STALL_INTERRUPT. + */ +static NV_STATUS +memmgrMemUtilsScrubInitRegisterCallback +( + OBJGPU *pGpu, + OBJCHANNEL *pChannel +) +{ + NV0005_ALLOC_PARAMETERS nv0005AllocParams; + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS nv2080EventNotificationParams; + NV_STATUS rmStatus; + NvHandle subDeviceHandle = 0; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (NV_OK != CliGetSubDeviceHandleFromGpu(pChannel->hClient, + pGpu, + &subDeviceHandle)) + { + NV_PRINTF(LEVEL_WARNING, "Unable to get subdevice handle.\n"); + //Allocate a sub device if we dont have it created before hand + NV2080_ALLOC_PARAMETERS nv2080AllocParams; + + portMemSet(&nv2080AllocParams, 0, sizeof(NV2080_ALLOC_PARAMETERS)); + nv2080AllocParams.subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + rmStatus = pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + pChannel->deviceId, + pChannel->subdeviceId, + NV20_SUBDEVICE_0, + &nv2080AllocParams); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to allocate a subdevice.\n"); + return NV_ERR_GENERIC; + } + } + + // Register callback + portMemSet(&nv0005AllocParams, 0, sizeof(NV0005_ALLOC_PARAMETERS)); + nv0005AllocParams.hParentClient = pChannel->hClient; + nv0005AllocParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + nv0005AllocParams.notifyIndex = NV2080_NOTIFIERS_FIFO_EVENT_MTHD | NV01_EVENT_NONSTALL_INTR ; + nv0005AllocParams.data = NV_PTR_TO_NvP64(&pChannel->callback); + + rmStatus = pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + subDeviceHandle, + pChannel->eventId, + NV01_EVENT_KERNEL_CALLBACK_EX, + &nv0005AllocParams); + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "event allocation failed\n"); + return NV_ERR_GENERIC; + } + + // Setup periodic event notification + portMemSet(&nv2080EventNotificationParams, 0, sizeof(NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS)); + nv2080EventNotificationParams.event = NV2080_NOTIFIERS_FIFO_EVENT_MTHD; + nv2080EventNotificationParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT; + + rmStatus = pRmApi->Control(pRmApi, + pChannel->hClient, + subDeviceHandle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + &nv2080EventNotificationParams, + sizeof(NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS)); + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "event notification control failed\n"); + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +/*! + * Schedules the scrubber channel for execution. + */ +static NV_STATUS +memmgrMemUtilsScrubInitScheduleChannel +( + OBJGPU *pGpu, + OBJCHANNEL *pChannel +) +{ + NV_STATUS rmStatus; + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS nvA06fScheduleParams; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (pChannel->bUseVasForCeCopy) + { + NVA06F_CTRL_BIND_PARAMS bindParams; + portMemSet(&bindParams, 0, sizeof(bindParams)); + + bindParams.engineType = pChannel->engineType; + + rmStatus = pRmApi->Control(pRmApi, + pChannel->hClient, + pChannel->channelId, + NVA06F_CTRL_CMD_BIND, + &bindParams, + sizeof(bindParams)); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to bind Channel, status: %x\n", rmStatus); + return rmStatus; + } + } + + portMemSet(&nvA06fScheduleParams, 0, sizeof(NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS)); + nvA06fScheduleParams.bEnable = NV_TRUE; + + rmStatus = pRmApi->Control(pRmApi, + pChannel->hClient, + pChannel->channelId, + NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, + &nvA06fScheduleParams, + sizeof(NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS)); + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to schedule channel, status: %x\n", rmStatus); + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +NV_STATUS +memmgrMemUtilsCopyEngineInitialize_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel +) +{ + NvU32 classID, engineID; + NV_STATUS rmStatus; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + KernelChannel *pFifoKernelChannel = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + //allocce + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + _memUtilsAllocCe_GM107(pGpu, + pMemoryManager, + pChannel, + pChannel->hClient, + pChannel->channelId, + pChannel->copyObjectId), + exit_free); + + // schedulechannel + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_WARNING, + memmgrMemUtilsScrubInitScheduleChannel(pGpu, pChannel), + exit_free); + + // Determine classEngineID for SetObject usage + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + CliGetKernelChannelWithDevice(pChannel->hClient, + pChannel->deviceId, + pChannel->channelId, + &pFifoKernelChannel), + exit_free); + + + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + kchannelGetClassEngineID_HAL(pGpu, + pFifoKernelChannel, + pChannel->copyObjectId, + &pChannel->classEngineID, + &classID, + &engineID), + exit_free); + + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + memmgrMemUtilsScrubInitRegisterCallback(pGpu, pChannel), + exit_free); + + NV_CHECK_OK_OR_GOTO( + rmStatus, + LEVEL_ERROR, + kfifoRmctrlGetWorkSubmitToken_HAL(pKernelFifo, + pChannel->hClient, + pChannel->channelId, + &pChannel->workSubmitToken), + exit_free); + + // initialize the channel parameters (should be done by the parent object) + pChannel->channelPutOffset = 0; + MEM_WR32(pChannel->pbCpuVA + pChannel->semaOffset, 0); + return NV_OK; + + exit_free: + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hClient); + NV_PRINTF(LEVEL_INFO, "end NV_STATUS=0x%08x\n", rmStatus); + return rmStatus; +} + +static NV_STATUS _memUtilsGetCe_GM107 +( + OBJGPU *pGpu, + NvHandle hClient, + KernelCE **ppKCe +) +{ + KernelCE *pKCe = NULL; + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NV_STATUS status = NV_OK; + NvU32 ceInst = 0; + + *ppKCe = NULL; + + if (IS_MIG_IN_USE(pGpu)) + { + status = kmigmgrGetGPUInstanceScrubberCe(pGpu, GPU_GET_KERNEL_MIG_MANAGER(pGpu), hClient, &ceInst); + } + else + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, gpuUpdateEngineTable(pGpu)); + + for (ceInst = 0; ceInst < ENG_CE__SIZE_1; ceInst++) + { + pKCe = GPU_GET_KCE(pGpu, ceInst); + if (pKCe == NULL) + { + continue; + } + + if (kbusCheckEngine_HAL(pGpu, pKernelBus, ENG_CE(pKCe->publicID)) && + !ceIsCeGrce(pGpu, NV2080_ENGINE_TYPE_COPY(pKCe->publicID)) && + gpuCheckEngineTable(pGpu, NV2080_ENGINE_TYPE_COPY(pKCe->publicID))) + { + break; + } + } + if (ceInst == ENG_CE__SIZE_1) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + } + } + + NV_ASSERT_OK_OR_RETURN(status); + + *ppKCe = GPU_GET_KCE(pGpu, ceInst); + return status; +} + + +static NV_STATUS _memUtilsAllocCe_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + NvHandle hClientId, + NvHandle hChannelId, + NvHandle hCopyObjectId + +) +{ + KernelCE *pKCe = NULL; + NVC0B5_ALLOCATION_PARAMETERS createParams; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + createParams.version = NVC0B5_ALLOCATION_PARAMETERS_VERSION_1; + + NV_ASSERT_OK_OR_RETURN(_memUtilsGetCe_GM107(pGpu, hClientId, &pKCe)); + NV_ASSERT_OR_RETURN((pKCe != NULL), NV_ERR_INVALID_STATE); + + createParams.engineType = NV2080_ENGINE_TYPE_COPY(pKCe->publicID); + memmgrMemUtilsGetCopyEngineClass_HAL(pGpu, pMemoryManager, &pChannel->hTdCopyClass); + pChannel->engineType = createParams.engineType; + + if (!pChannel->hTdCopyClass) + { + NV_PRINTF(LEVEL_ERROR, "Unable to determine CE's engine class.\n"); + return NV_ERR_GENERIC; + } + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + hClientId, + hChannelId, + hCopyObjectId, + pChannel->hTdCopyClass, + &createParams)); + + pChannel->pKCe = pKCe; + return NV_OK; +} + +static NV_STATUS +_memUtilsMapUserd_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + NvHandle hClientId, + NvHandle hDeviceId, + NvHandle hChannelId +) +{ + NvU32 userdSize; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + kfifoGetUserdSizeAlign_HAL(GPU_GET_KERNEL_FIFO(pGpu), &userdSize, NULL); + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + pRmApi->MapToCpu(pRmApi, + hClientId, + hDeviceId, + pChannel->bClientUserd ? pChannel->hUserD : hChannelId, + 0, + userdSize, + (void **)&pChannel->pControlGPFifo, + 0)); + + return NV_OK; +} + +static NV_STATUS +_memUtilsAllocateUserD +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvHandle hClientId, + NvHandle hDeviceId, + OBJCHANNEL *pChannel +) +{ + NV_STATUS rmStatus = NV_OK; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV_MEMORY_ALLOCATION_PARAMS memAllocParams; + NvU32 userdMemClass = NV01_MEMORY_LOCAL_USER; + + // Ensure that call is not made with lock held + LOCK_ASSERT_AND_RETURN(!rmGpuLockIsOwner()); + + portMemSet(&memAllocParams, 0, sizeof(memAllocParams)); + + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + kfifoGetUserdSizeAlign_HAL(pKernelFifo, (NvU32 *)&memAllocParams.size, NULL); + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.internalflags = NVOS32_ALLOC_INTERNAL_FLAGS_SKIP_SCRUB; + + // Apply registry overrides to USERD. + switch (DRF_VAL(_REG_STR_RM, _INST_LOC, _USERD, pGpu->instLocOverrides)) + { + case NV_REG_STR_RM_INST_LOC_USERD_NCOH: + case NV_REG_STR_RM_INST_LOC_USERD_COH: + userdMemClass = NV01_MEMORY_SYSTEM; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + break; + + case NV_REG_STR_RM_INST_LOC_USERD_VID: + case NV_REG_STR_RM_INST_LOC_USERD_DEFAULT: + userdMemClass = NV01_MEMORY_LOCAL_USER; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM); + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM; + break; + } + + NV_ASSERT_OK_OR_RETURN(pRmApi->AllocWithHandle(pRmApi, hClientId, hDeviceId, + pChannel->hUserD, + userdMemClass, + &memAllocParams)); + + return rmStatus; +} + +static NV_STATUS +_memUtilsAllocateChannel +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvHandle hClientId, + NvHandle hDeviceId, + NvHandle hChannelId, + NvHandle hObjectError, + NvHandle hObjectBuffer, + OBJCHANNEL *pChannel +) +{ + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS channelGPFIFOAllocParams; + NV_STATUS rmStatus = NV_OK; + NvU32 hClass; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + KernelCE *pKCe; + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + + NV_ASSERT_OK_OR_RETURN(_memUtilsGetCe_GM107(pGpu, hClientId, &pKCe)); + NV_ASSERT_OR_RETURN((pKCe != NULL), NV_ERR_INVALID_STATE); + + portMemSet(&channelGPFIFOAllocParams, 0, sizeof(NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS)); + channelGPFIFOAllocParams.hObjectError = hObjectError; + channelGPFIFOAllocParams.hObjectBuffer = hObjectBuffer; + channelGPFIFOAllocParams.gpFifoOffset = pChannel->pbGpuVA + pChannel->channelPbSize; + channelGPFIFOAllocParams.gpFifoEntries = pChannel->channelNumGpFifioEntries; + channelGPFIFOAllocParams.hContextShare = NV01_NULL_OBJECT; + channelGPFIFOAllocParams.flags = DRF_DEF(OS04, _FLAGS, _CHANNEL_SKIP_SCRUBBER, _TRUE); + channelGPFIFOAllocParams.hVASpace = pChannel->hVASpaceId; + + // + // Use GPU instance local Id if MIG is enabled + // TODO: Maybe we need a VAS for each GPU instance ? + // + if (bMIGInUse && (pChannel->pKernelMIGGpuInstance != NULL)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + NvU32 localCe; + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClientId, &ref)); + // Clear the Compute instance portion, if present + ref = kmigmgrMakeGIReference(ref.pKernelMIGGpuInstance); + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_COPY(pKCe->publicID), + &localCe)); + channelGPFIFOAllocParams.engineType = localCe; + } + else + { + channelGPFIFOAllocParams.engineType = NV2080_ENGINE_TYPE_COPY(pKCe->publicID); + } + + hClass = kfifoGetChannelClassId(pGpu, GPU_GET_KERNEL_FIFO(pGpu)); + if (!hClass) + { + NV_PRINTF(LEVEL_ERROR, "Unable to determine CE's channel class.\n"); + return NV_ERR_GENERIC; + } + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + if (pChannel->bClientUserd) + { + NV_ASSERT_OK_OR_GOTO( + rmStatus, + _memUtilsAllocateUserD(pGpu, + pMemoryManager, + hClientId, + hDeviceId, + pChannel), + cleanup); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + channelGPFIFOAllocParams.hUserdMemory[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = pChannel->hUserD; + channelGPFIFOAllocParams.userdOffset[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = 0; + SLI_LOOP_END + } + + + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR( + rmStatus, + pRmApi->AllocWithHandle(pRmApi, + hClientId, + hDeviceId, + hChannelId, + hClass, + &channelGPFIFOAllocParams)); + +cleanup: + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM)); + + return rmStatus; +} + +/*! + * Do a Non Blocking Memeset + * + * @param[in] pChannel OBJCHANNEL pointer + * @param[in] base Offset in FB + * @param[in] size size to scrub + * @param[in] freeToken will be returned as a semaphore + * @param[in] *pNumBlocks returns the number of blocks that were scrubbed + * @returns NV_STATUS + */ +NV_STATUS +memmgrMemUtilsMemSet_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + RmPhysAddr base, + NvU64 size, + NvU32 freeToken, + NvU32 *pNumBlocks +) +{ + NvU32 blocksPushed = 0; + + if ((size % pChannel->minBlockSize) != 0) + { + NV_PRINTF(LEVEL_ERROR, "Size should be a multiple of %d\n", + pChannel->minBlockSize); + return NV_ERR_GENERIC; + + } + if (pChannel->isProgressChecked) + { + // if progress is checked insert the semaphore with freeToken as payload + pChannel->finishPayload = freeToken; + _ceChannelScheduleWork_GM107(pGpu, pMemoryManager, pChannel, + 0, 0, 0, /*src parameters*/ + base, ADDR_FBMEM, 0, /*dst parameters*/ + size, + NV_FALSE, + NV_TRUE, + NV_FALSE /*scrubbing*/); + + } + else + { + // issue a standard async scrub + blocksPushed = _ceChannelScheduleWork_GM107(pGpu, pMemoryManager, pChannel, + 0, 0, 0, /*src parameters*/ + base, ADDR_FBMEM, 0, /*dst parameters*/ + size, + NV_FALSE, + NV_FALSE, + NV_FALSE /*scrubbing*/); + } + *pNumBlocks = blocksPushed; + return NV_OK; +} + +/*! + * Do a Blocking Memset + * + * @param[in] pChannel OBJCHANNEL pointer + * @param[in] base Offset in FB + * @param[in] size size to scrub + * @returns NV_STATUS + */ + +NV_STATUS +memmgrMemUtilsMemSetBlocking_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + RmPhysAddr base, + NvU64 size +) +{ + NvU32 blocksPushed = 0; + + if((size % pChannel->minBlockSize) != 0) + { + NV_PRINTF(LEVEL_ERROR, "Size should be a multiple of %d\n", + pChannel->minBlockSize); + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + + } + + blocksPushed = _ceChannelScheduleWork_GM107(pGpu, pMemoryManager, pChannel, + 0, 0, 0, /*src parameters*/ + base, ADDR_FBMEM, 0, /*dst parameters*/ + size, + NV_TRUE, + NV_FALSE, + NV_FALSE /*scrubbing*/); + + if (blocksPushed > 0) + { + NvU8 *semAddr = pChannel->pbCpuVA + pChannel->semaOffset; + NV_STATUS timeoutStatus = NV_OK; + RMTIMEOUT timeout; + + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + + while (MEM_RD32(semAddr) != pChannel->lastPayloadPushed) + { + NV_PRINTF(LEVEL_INFO, "Semaphore Payload is 0x%x last is 0x%x\n", + MEM_RD32(semAddr), pChannel->lastPayloadPushed); + + if (timeoutStatus == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, + "Timed Out wating for CE semaphore in blocking scrub!\n"); + + NV_PRINTF(LEVEL_ERROR, + "GET=0x%x, PUT=0x%x, GPGET=0x%x, GPPET=0x%x\n", + pChannel->pControlGPFifo->Get, + pChannel->pControlGPFifo->Put, + pChannel->pControlGPFifo->GPGet, + pChannel->pControlGPFifo->GPPut); + + DBG_BREAKPOINT_REASON(NV_ERR_TIMEOUT); + return NV_ERR_GENERIC; + } + + timeoutStatus = gpuCheckTimeout(pGpu, &timeout); + } + } + + return NV_OK; +} + +/*! + * Do a Blocking Memset + * + * @param[in] pCHannel OBJCHANNEL pointer + * @param[in] base Offset in FB + * @param[in] size size to scrub + * @returns NV_STATUS + */ + +NV_STATUS +memmgrMemUtilsMemSetBatched_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + RmPhysAddr base, + NvU64 size +) +{ + NvU32 blocksPushed = 0; + + blocksPushed = _ceChannelScheduleBatchWork_GM107(pGpu, pMemoryManager, pChannel, + 0, 0, 0, /*src parameters*/ + base, ADDR_FBMEM, 0, /*dst parameters*/ + size, + NV_FALSE); /*scrubbing*/ + + if (blocksPushed > 0) + { + NvU8 *semAddr = pChannel->pbCpuVA + pChannel->finishPayloadOffset; + NV_STATUS timeoutStatus = NV_OK; + RMTIMEOUT timeout; + + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + + while(MEM_RD32(semAddr) != pChannel->finishPayload) + { + NV_PRINTF(LEVEL_INFO, "Semaphore Payload is 0x%x last is 0x%x\n", + MEM_RD32(semAddr), pChannel->finishPayload); + + if (timeoutStatus == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, + "Timed Out wating for CE semaphore in blocking scrub!\n"); + + NV_PRINTF(LEVEL_ERROR, + "GET=0x%x, PUT=0x%x, GPGET=0x%x, GPPET=0x%x\n", + MEM_RD32(&pChannel->pControlGPFifo->Get), + MEM_RD32(&pChannel->pControlGPFifo->Put), + MEM_RD32(&pChannel->pControlGPFifo->GPGet), + MEM_RD32(&pChannel->pControlGPFifo->GPPut)); + + DBG_BREAKPOINT_REASON(NV_ERR_TIMEOUT); + return NV_ERR_GENERIC; + } + + // + // mcServiceList() can be enabled for debugging purposes + // mcServiceListPgSafe(pGpu, GPU_GET_MC(pGpu), MC_ENGINES_ALL, NV_FALSE); + // + + osSpinLoop(); + timeoutStatus = gpuCheckTimeout(pGpu, &timeout); + } + } + + return NV_OK; +} + +/*! + * Do a Blocking Memcopy + * + * @param[in] pChannel OBJCHANNEL pointer + * @param[in] src Offset of src to copy from + * @param[in] srcAddressSpace source surface address space type + * @param[in] srcCpuCacheAttrib source surface address space attributes + * @param[in] dst Offset of dst to scrub/copy to + * @param[in] dstAddressSpace destination surface address space type + * @param[in] dstCpuCacheAttrib destination surface address space attributes + * @param[in] size size to scrub + * @returns NV_STATUS + */ +NV_STATUS +memmgrMemUtilsMemCopyBatched_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + RmPhysAddr src, + NV_ADDRESS_SPACE srcAddressSpace, + NvU32 srcCpuCacheAttrib, + RmPhysAddr dst, + NV_ADDRESS_SPACE dstAddressSpace, + NvU32 dstCpuCacheAttrib, + NvU64 size +) +{ + NvU32 blocksPushed = _ceChannelScheduleBatchWork_GM107(pGpu, pMemoryManager, pChannel, + src, srcAddressSpace, srcCpuCacheAttrib, /*src parameters*/ + dst, dstAddressSpace, dstCpuCacheAttrib, /*dst parameters*/ + size, NV_TRUE /*memcpy*/); + + if (blocksPushed > 0) + { + NvU8 *semAddr = pChannel->pbCpuVA + pChannel->finishPayloadOffset; + NV_STATUS timeoutStatus = NV_OK; + RMTIMEOUT timeout; + + // + // Originally the flag is 0, but to WAR bug 2441762, add flag + // GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE to bypass using threadStateCheckTimeout + // + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE); + + while (MEM_RD32(semAddr) != pChannel->finishPayload) + { + NV_PRINTF(LEVEL_INFO, "Semaphore Payload is 0x%x last is 0x%x\n", + MEM_RD32(semAddr), pChannel->finishPayload); + + NV_PRINTF(LEVEL_INFO, + "GET=0x%x, PUT=0x%x, GPGET=0x%x, GPPET=0x%x\n", + MEM_RD32(&pChannel->pControlGPFifo->Get), + MEM_RD32(&pChannel->pControlGPFifo->Put), + MEM_RD32(&pChannel->pControlGPFifo->GPGet), + MEM_RD32(&pChannel->pControlGPFifo->GPPut)); + + if (timeoutStatus == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, + "Timed Out wating for CE semaphore in blocking scrub!\n"); + + NV_PRINTF(LEVEL_ERROR, + "GET=0x%x, PUT=0x%x, GPGET=0x%x, GPPET=0x%x\n", + MEM_RD32(&pChannel->pControlGPFifo->Get), + MEM_RD32(&pChannel->pControlGPFifo->Put), + MEM_RD32(&pChannel->pControlGPFifo->GPGet), + MEM_RD32(&pChannel->pControlGPFifo->GPPut)); + + DBG_BREAKPOINT_REASON(NV_ERR_TIMEOUT); + return NV_ERR_GENERIC; + } + + // + // mcServiceList() can be enabled for debugging purposes + // mcServiceListPgSafe(pGpu, GPU_GET_MC(pGpu), MC_ENGINES_ALL, NV_FALSE); + // + + osSpinLoop(); + timeoutStatus = gpuCheckTimeout(pGpu, &timeout); + } + } + + return NV_OK; +} + +/*! + * This function allocates the ECC scrubber + * + * @param[in] pChannel OBJCHANNEL pointer + * @returns Bool + */ +NV_STATUS +memmgrMemUtilsAllocateEccScrubber_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel +) +{ + memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pChannel); + + memmgrMemUtilsCopyEngineInitialize_HAL(pGpu, pMemoryManager, pChannel); + + _memUtilsAllocateReductionSema(pGpu, pMemoryManager, pChannel); + + return NV_OK; +} + +/*! + * This function allocates the ecc scrubber and the + * DUpes the bitmap semaphore which is used for sync + * + * @param[in] pChannel OBJCHANNEL pointer + * @returns Bool + */ +NV_STATUS +memmgrMemUtilsAllocateEccAllocScrubber_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel +) +{ + OBJSCRUB *pEccTD = &pMemoryManager->eccScrubberState; + OBJCHANNEL *pEccSyncChannel = &pEccTD->allocationScrubberState; + OBJCHANNEL *pEccAsyncChannel = &pEccTD->tdHeapState; + NV_MEMORY_ALLOCATION_PARAMS memAllocParams; + NV_STATUS lockStatus; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pEccSyncChannel); + memmgrMemUtilsCopyEngineInitialize_HAL(pGpu, pMemoryManager, pEccSyncChannel); + + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + // dup the reduction sema bit map object + NV_ASSERT_OK( + pRmApi->DupObject(pRmApi, + pEccSyncChannel->hClient, + pEccSyncChannel->deviceId, + &pEccSyncChannel->bitMapSemPhysId, + pEccAsyncChannel->hClient, + pEccAsyncChannel->bitMapSemPhysId, + 0)); + + // allocate virtual memory for a bit map semaphore + portMemSet(&memAllocParams, 0, sizeof(memAllocParams)); + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = (((pEccSyncChannel->blockCount + 31)/32)*4); + memAllocParams.attr = NVOS32_ATTR_NONE; + memAllocParams.attr2 = NVOS32_ATTR2_NONE; + memAllocParams.flags = 0; + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_VIRTUAL; + + NV_ASSERT_OK( + pRmApi->AllocWithHandle(pRmApi, + pEccSyncChannel->hClient, + pEccSyncChannel->deviceId, + pEccSyncChannel->bitMapSemVirtId, + NV50_MEMORY_VIRTUAL, + &memAllocParams)); + + lockStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM); + + if (lockStatus != NV_OK) + { + NV_ASSERT_FAILED("Could not get back lock after allocating reduction sema"); + return NV_ERR_GENERIC; + } + + NV_ASSERT_OK( + pRmApi->Map(pRmApi, + pEccSyncChannel->hClient, + pEccSyncChannel->deviceId, + pEccSyncChannel->bitMapSemVirtId, + pEccSyncChannel->bitMapSemPhysId, //hMemory, + 0, + (((pEccSyncChannel->blockCount + 31) / 32) * 4), + NV04_MAP_MEMORY_FLAGS_NONE, + &pEccSyncChannel->pbGpuBitMapVA)); + + pEccSyncChannel->pbBitMapVA = pEccAsyncChannel->pbBitMapVA; + + return NV_OK; +} + +/*! + * FUnction calculates the available space in PB + * This is based on the reading the semaphore that + * has the previous PUT pointer where methods were + * inserted + * + * @param[in] pChannel OBJCHANNEL pointer + * @returns size + */ +static NvU32 +_getSpaceInPb(OBJCHANNEL *pChannel) +{ + NvU32 filledSpace; + NvU32 avlblSpace; + + if (pChannel->channelPutOffset >= MEM_RD32((NvU8*)pChannel->pbCpuVA + pChannel->semaOffset)) + { + filledSpace = (pChannel->channelPutOffset - MEM_RD32((NvU8*)pChannel->pbCpuVA + pChannel->semaOffset)); + avlblSpace = pChannel->channelPbSize - filledSpace; + + } + else + { + avlblSpace = (MEM_RD32((NvU8*)pChannel->pbCpuVA + pChannel->semaOffset) - pChannel->channelPutOffset); + } + + NV_PRINTF(LEVEL_INFO, "Space in PB is %d\n", avlblSpace); + + return avlblSpace; + +} + +/*! + * This function allows batch mode of submitting work. + * The work is submitted to Host only when the pushbuffer runs out of space. + * + * @param[in] pChannel OBJCHANNEL pointer + * @param[in] src Offset of src to copy from + * @param[in] srcAddressSpace source surface address space type + * @param[in] srcCpuCacheAttrib source surface address space attributes + * @param[in] dst Offset of dst to scrub/copy to + * @param[in] dstAddressSpace destination surface address space type + * @param[in] dstCpuCacheAttrib destination surface address space attributes + * @param[in] size size to scrub/copy + * @param[in] bMemcopy NV_TRUE for memory copy / NV_FALSE for scrubbing + * @returns Bool + */ +static NvU32 +_ceChannelScheduleBatchWork_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + RmPhysAddr src, + NV_ADDRESS_SPACE srcAddressSpace, + NvU32 srcCpuCacheAttrib, + RmPhysAddr dst, + NV_ADDRESS_SPACE dstAddressSpace, + NvU32 dstCpuCacheAttrib, + NvU64 size, + NvBool bMemcopy +) +{ + NvU32 spaceInPb; + NvU32 bytesPushed; + NvU32 *ptr; + NvU32 blocksPushed = 0; + + spaceInPb = pChannel->channelPbSize - pChannel->channelPutOffset; + NV_ASSERT_OR_RETURN(spaceInPb >= pChannel->methodSizePerBlock, 0); + + // Support for sending semaphore release only work. + if (size > 0) + { + NV_PRINTF(LEVEL_INFO, "Space in PB is %d and starting fill at 0x%x\n", + spaceInPb, pChannel->channelPutOffset); + + ptr = (NvU32 *)(pChannel->pbCpuVA + pChannel->channelPutOffset); + + bytesPushed = _ceChannelPushMethodsBlock_GM107(pGpu, pMemoryManager, pChannel, + src, srcAddressSpace, srcCpuCacheAttrib, /*src parameters*/ + dst, dstAddressSpace, dstCpuCacheAttrib, /*dst parameters*/ + size, &ptr, NV_FALSE, NV_FALSE, NV_FALSE, bMemcopy); + pChannel->finishPayload += NvU64_LO32(size); + NV_ASSERT(NvU64_HI32(size) == 0); + NV_ASSERT(bytesPushed != 0); + } + + spaceInPb = pChannel->channelPbSize - pChannel->channelPutOffset; + + // + // Submit a semaphore release only work followed by a GPFIFO update. + // We do this in the following cases: + // 1. We run out of pushbuffer space + // 2. Flush remaining/last work + // + if (spaceInPb < pChannel->methodSizePerBlock || (pChannel->channelPutOffset && !size)) + { + ptr = (NvU32 *)(pChannel->pbCpuVA + pChannel->channelPutOffset); + + bytesPushed = _ceChannelPushMethodsBlock_GM107(pGpu, pMemoryManager, pChannel, + 0, 0, 0, /*src parameters*/ + 0, 0, 0, /*dst parameters*/ + 0, &ptr, NV_FALSE, NV_FALSE, NV_TRUE, bMemcopy); + + NV_ASSERT(bytesPushed != 0); + + _ceChannelUpdateGpFifo_GM107(pGpu, pMemoryManager, pChannel, 0, pChannel->channelPutOffset); + blocksPushed = 1; + pChannel->channelPutOffset = 0; + } + + return blocksPushed; +} + +/*! + * This function manages the PushBUffer + * It will insert methods into the PB, manage wrap around + * and decide when we need to add NON-STALL interrupts + * and etra token semaphores + * + * @param[in] pChannel OBJCHANNEL pointer + * @param[in] src Offset of src to copy from + * @param[in] srcAddressSpace source surface address space type + * @param[in] srcCpuCacheAttrib source surface address space attributes + * @param[in] dst Offset of dst to scrub/copy to + * @param[in] dstAddressSpace destination surface address space type + * @param[in] dstCpuCacheAttrib destination surface address space attributes + * @param[in] size size to scrub/copy + * @param[in] blocking blocking will not insert non-stall + * @param[in] payload will insert a token for the last block submitted + * @param[in] bMemcopy NV_TRUE for memory copy / NV_FALSE for scrubbing + * @returns Bool + */ +static NvU32 +_ceChannelScheduleWork_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + RmPhysAddr src, + NV_ADDRESS_SPACE srcAddressSpace, + NvU32 srcCpuCacheAttrib, + RmPhysAddr dst, + NV_ADDRESS_SPACE dstAddressSpace, + NvU32 dstCpuCacheAttrib, + NvU64 size, + NvBool blocking, + NvBool insertFinishPayload, + NvBool bMemcopy +) +{ + RMTIMEOUT timeout; + NvU32 spaceInPb; + NvU32 numBytes; + NvU32 bytesPushed; + NvU32 *ptr; + NvU32 gpBase; + NvU32 semaCount = 0; + NvBool addNonStallIntr = NV_FALSE; + NvU32 blocksPushed = 0; + NvBool addFinishPayload; + NvU32 blockSize = 0; + + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + + spaceInPb = _getSpaceInPb(pChannel); + + NV_PRINTF(LEVEL_INFO, "Space in PB is %d and starting fill at 0x%x\n", + spaceInPb, pChannel->channelPutOffset); + + ptr = (NvU32 *)(pChannel->pbCpuVA + pChannel->channelPutOffset); + gpBase = pChannel->channelPutOffset; + numBytes = 0; + do + { + // while we have space greater than one block + while((spaceInPb > (pChannel->methodSizePerBlock+MAX_EXTRA_PAYLOAD))) + { + // if inserting one more block is greater than PB size then wrap around to the beginning + if((pChannel->channelPutOffset + (pChannel->methodSizePerBlock+MAX_EXTRA_PAYLOAD)) > pChannel->channelPbSize) + { + NV_PRINTF(LEVEL_INFO, "Wrap numBytes %d\n", numBytes); + //submit to gpfifo with numBytes and wrap around the PutOffset + if(numBytes > 0) + { + _ceChannelUpdateGpFifo_GM107(pGpu, pMemoryManager, pChannel, (gpBase), numBytes); + } + pChannel->channelPutOffset = 0; + ptr = (NvU32 *)(pChannel->pbCpuVA + pChannel->channelPutOffset); + gpBase = 0; + numBytes = 0; + bytesPushed = 0; + // update the available space + spaceInPb = _getSpaceInPb(pChannel); + NV_PRINTF(LEVEL_INFO, "Wrapping PB around\n"); + continue; + } + + blockSize = (size > pChannel->maxBlockSize) ? + pChannel->maxBlockSize : (NvU32) size; + + // add a non-stall interupt every (8th of the size) or when we insert the last block + if((semaCount > (pChannel->channelPbSize >> 3)) || (size <= pChannel->maxBlockSize)) + { + addNonStallIntr = NV_TRUE; + semaCount = 0; + } + else + { + addNonStallIntr = NV_FALSE; + } + // the finsh payload corresponds to inserting a token for every call to scrub that finishes + if((insertFinishPayload) && (size <= pChannel->maxBlockSize)) + { + addFinishPayload = NV_TRUE; + NV_PRINTF(LEVEL_INFO, "Inserting Finish Payload!!!!!!!!!!\n"); + } + else + { + addFinishPayload = NV_FALSE; + } + if(_checkSynchronization(pGpu, pMemoryManager, pChannel, BLOCK_INDEX_FROM_ADDR(dst, pChannel->blockShift))) + { + bytesPushed = _ceChannelPushMethodsBlock_GM107(pGpu, pMemoryManager, pChannel, + src, srcAddressSpace, srcCpuCacheAttrib, /*src parameters*/ + dst, dstAddressSpace, dstCpuCacheAttrib, /*dst parameters*/ + blockSize, &ptr, NV_TRUE, (addNonStallIntr && !blocking), + addFinishPayload, bMemcopy); + spaceInPb = spaceInPb - bytesPushed; + numBytes = numBytes + bytesPushed; + semaCount = semaCount + bytesPushed; + blocksPushed++; + // we are done pushing all methods + } + + dst += (NvU64) blockSize; + if (bMemcopy) + src += (NvU64) blockSize; + size -= (NvU64) blockSize; + + if(size == 0) + { + _ceChannelUpdateGpFifo_GM107(pGpu, pMemoryManager, pChannel, gpBase, numBytes); + return blocksPushed; + } + } + spaceInPb = _getSpaceInPb(pChannel); + if(spaceInPb <= (pChannel->methodSizePerBlock + MAX_EXTRA_PAYLOAD)) + { + //no space in pb to push all blocks so put what we have and wait for space + if(numBytes > 0) + { + _ceChannelUpdateGpFifo_GM107(pGpu, pMemoryManager, pChannel, gpBase, numBytes); + } + gpBase = pChannel->channelPutOffset; + numBytes = 0; + } + if (gpuCheckTimeout(pGpu, &timeout) == NV_ERR_TIMEOUT) + { + NV_ASSERT_FAILED("Timed out waiting for Space in PB!"); + return NV_ERR_GENERIC; + } + } while(1); +} + + +/*! + * This function checks if the block has already been submitted + * or scrubbed based on 2 bitmaps. One is a pending bitmap + * updated by the CPU and one is a "Finished" bitmap updated by + * the GPU + * + * @param[in] pChannel OBJCHANNEL pointer + * @param[in] block block number + * + * @returns Bool + */ +static NvBool +_checkSynchronization +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + NvU32 block +) +{ + NvU32 blockSema; + + if (!pChannel->isChannelSynchronized) + { + //synchronization is not required for this channel + return NV_TRUE; + } + + blockSema = MEM_RD32((NvU8*)pChannel->pbBitMapVA + ((block/32)*4)); + + if( ((blockSema) & (1 << (block%32))) == 0 ) + { + if (((pChannel->pBlockPendingState[block / 32] & (1 << (block % 32))) == 0) && + ((pChannel->pBlockDoneState[block / 32] & (1 << (block % 32))) == 0) ) + { + pChannel->pBlockPendingState[block / 32] |= (1 << (block % 32)); + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/*! + * Updates the GPfifo with the methods in the PB for + * the given channel + * @param[in] pChannel OBJCHANNEL pointer + * @param[in] gpOffset Offset in the PB + * @param[in] gpSize Size of segment + * @returns None + */ +static void +_ceChannelUpdateGpFifo_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + NvU32 gpOffset, + NvU32 gpSize + +) +{ + RMTIMEOUT timeout; + NvU32 GPPut; + NvU32 GPGet; + NvU64 get; + NvU32 length; + NvU32 *pGpEntry; + NvU32 GpEntry0; + NvU32 GpEntry1; + NvU32 GPPutNext; + NvU32 workSubmitToken = 0; + KernelChannel *pFifoKernelChannel; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + GPPut = MEM_RD32(&pChannel->pControlGPFifo->GPPut); + GPGet = MEM_RD32(&pChannel->pControlGPFifo->GPGet); + + GPPutNext = (GPPut + 1) % pChannel->channelNumGpFifioEntries; + + NV_PRINTF(LEVEL_INFO, "Put %d Get %d PutNext%d\n", GPPut, GPGet, + GPPutNext); + + NV_PRINTF(LEVEL_INFO, "gp Base 0x%x, Size %d\n", (NvU32)(gpOffset), + gpSize); + + // if the size passed is zero do not update gpput + if (gpSize == 0) + return; + + if (GPPut >= pChannel->channelNumGpFifioEntries) + { + // if the Put pointer is invalid, the GPU is likely inaccessible + NV_PRINTF(LEVEL_INFO, "invalid Put %u >= %u\n", GPPut, + pChannel->channelNumGpFifioEntries); + return; + } + + while (GPPutNext == GPGet) + { + // need to wait for space + GPGet = MEM_RD32(&pChannel->pControlGPFifo->GPGet); + + if (gpuCheckTimeout(pGpu, &timeout) == NV_ERR_TIMEOUT) + { + NV_ASSERT_FAILED("Timed Out waiting for space in GPFIFIO!"); + return; + } + else if (GPGet >= pChannel->channelNumGpFifioEntries) + { + // if the Get pointer is invalid, the GPU is likely inaccessible + NV_PRINTF(LEVEL_INFO, "invalid Get %u >= %u\n", GPGet, + pChannel->channelNumGpFifioEntries); + return; + } + } + + get = pChannel->pbGpuVA + gpOffset; + length = gpSize; + + GpEntry0 = + DRF_DEF(906F, _GP_ENTRY0, _NO_CONTEXT_SWITCH, _FALSE) | + DRF_NUM(906F, _GP_ENTRY0, _GET, NvU64_LO32(get) >> 2); + GpEntry1 = + DRF_NUM(906F, _GP_ENTRY1, _GET_HI, NvU64_HI32(get)) | + DRF_NUM(906F, _GP_ENTRY1, _LENGTH, length >> 2) | + DRF_DEF(906F, _GP_ENTRY1, _PRIV, _USER) | + DRF_DEF(906F, _GP_ENTRY1, _LEVEL, _MAIN); + + + pGpEntry = (NvU32 *)(((NvU8*)pChannel->pbCpuVA) + pChannel->channelPbSize + + GPPut*NV906F_GP_ENTRY__SIZE); + + MEM_WR32(&pGpEntry[0], GpEntry0); + MEM_WR32(&pGpEntry[1], GpEntry1); + + // need to flush WRC buffer + osFlushCpuWriteCombineBuffer(); + + // write gpput + MEM_WR32(&pChannel->pControlGPFifo->GPPut, GPPutNext); + osFlushCpuWriteCombineBuffer(); + + if (kfifoIsLiteModeEnabled_HAL(pGpu, pKernelFifo)) + { + NV_ASSERT_OR_RETURN_VOID(0); + } + else + { + workSubmitToken = pChannel->workSubmitToken; + NV_ASSERT_OR_RETURN_VOID(CliGetKernelChannelWithDevice(pChannel->hClient, + pChannel->deviceId, pChannel->channelId, + &pFifoKernelChannel) == NV_OK); + } + if (!kchannelIsRunlistSet(pGpu, pFifoKernelChannel)) + { + NV_PRINTF(LEVEL_ERROR, + "FAILED Channel 0x%x is not assigned to runlist yet\n", + kchannelGetDebugTag(pFifoKernelChannel)); + return; + } + // update doorbell register + kfifoUpdateUsermodeDoorbell_HAL(pGpu, pKernelFifo, workSubmitToken, kchannelGetRunlistId(pFifoKernelChannel)); +} + +/*! + * Inserts methods into the push buffer for one block + * + * @param[in] pChannel OBJCHANNEL pointer + * @param[in] src Offset of src to copy from + * @param[in] srcAddressSpace source surface address space type + * @param[in] srcCpuCacheAttrib source surface address space attributes + * @param[in] dst Offset of dst to scrub/copy to + * @param[in] dstAddressSpace destination surface address space type + * @param[in] dstCpuCacheAttrib destination surface address space attributes + * @param[in] pPtr Double pointer to PB offset + * @returns None + */ +static void +_ceChannelPushMethodAperture_GM107 +( + OBJCHANNEL *pChannel, + NV_ADDRESS_SPACE srcAddressSpace, + NvU32 srcCpuCacheAttrib, + NV_ADDRESS_SPACE dstAddressSpace, + NvU32 dstCpuCacheAttrib, + NvU32 **pPtr +) +{ + NvU32 *ptr = *pPtr; + NvU32 data = 0; + + // Set source parameters + data = ((srcAddressSpace == ADDR_FBMEM) ? DRF_DEF(B0B5, _SET_SRC_PHYS_MODE, _TARGET, _LOCAL_FB) : + (srcCpuCacheAttrib == NV_MEMORY_CACHED) ? DRF_DEF(B0B5, _SET_SRC_PHYS_MODE, _TARGET, _COHERENT_SYSMEM) : + DRF_DEF(B0B5, _SET_SRC_PHYS_MODE, _TARGET, _NONCOHERENT_SYSMEM)); + + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NVB0B5_SET_SRC_PHYS_MODE, data); + + // Set destination parameters + data = ((dstAddressSpace == ADDR_FBMEM) ? DRF_DEF(B0B5, _SET_DST_PHYS_MODE, _TARGET, _LOCAL_FB) : + (dstCpuCacheAttrib == NV_MEMORY_CACHED) ? DRF_DEF(B0B5, _SET_DST_PHYS_MODE, _TARGET, _COHERENT_SYSMEM) : + DRF_DEF(B0B5, _SET_DST_PHYS_MODE, _TARGET, _NONCOHERENT_SYSMEM)); + + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NVB0B5_SET_DST_PHYS_MODE, data); + + *pPtr = ptr; +} + +/*! + * Inserts methods into the push buffer for one block + * + * @param[in] pChannel OBJCHANNEL pointer + * @param[in] src Offset of src to copy from + * @param[in] srcAddressSpace source surface address space type + * @param[in] srcCpuCacheAttrib source surface address space attributes + * @param[in] dst Offset of dst to scrub/copy to + * @param[in] dstAddressSpace destination surface address space type + * @param[in] dstCpuCacheAttrib destination surface address space attributes + * @param[in] size size of the region to scrub/copy + * @param[in] pPtr Double pointer to PB offset + * @param[in] addPayloadSema Bool to add default payload + * @param[in] addNonStallInt Bool to add a non stall at the end + * @param[in] addFinishPayload Bool to add an extra sema release for token + * @param[in] bMemcopy NV_TRUE for memcopy / NV_FALSE for scrubbing + * @returns None + */ +static NvU32 +_ceChannelPushMethodsBlock_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *channel, + RmPhysAddr src, + NV_ADDRESS_SPACE srcAddressSpace, + NvU32 srcCpuCacheAttrib, + RmPhysAddr dst, + NV_ADDRESS_SPACE dstAddressSpace, + NvU32 dstCpuCacheAttrib, + NvU64 size, + NvU32 **pPtr, + NvBool addPayloadSema, + NvBool addNonStallIntr, + NvBool addFinishPayload, + NvBool bMemcopy +) +{ + NvU32 launchParams = 0; + NvU32 *ptr = *pPtr; + NvU32 *pStartPtr = ptr; + NvBool addReductionOp = channel->isChannelSynchronized; + NvU32 remapConstB = 0; + NvU32 remapComponentSize = 0; + + NV_PRINTF(LEVEL_INFO, "Base = 0x%llx, Size = 0x%llx, PB location = %p\n", + dst, size, ptr); + + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NV906F_SET_OBJECT, channel->classEngineID); + + if (size > 0) + { + NvU32 payLoad = channel->channelPutOffset + channel->methodSizePerBlock; + + if (addNonStallIntr) payLoad = payLoad + NONSTALL_METHOD_SIZE; + if (addReductionOp) payLoad = payLoad + SEMAPHORE_ONLY_METHOD_SIZE; + if (addFinishPayload) payLoad = payLoad + SEMAPHORE_ONLY_METHOD_SIZE; + + if (addPayloadSema) + { + memmgrChannelPushSemaphoreMethodsBlock_HAL(pMemoryManager, + NVA06F_SUBCHANNEL_COPY_ENGINE, + channel->pbGpuVA+channel->semaOffset, payLoad, &ptr); + + NV_PRINTF(LEVEL_INFO, "Pushing Semaphore Payload 0x%x\n", payLoad); + channel->lastPayloadPushed = payLoad; + } + + if (IS_SIMULATION(pGpu)) + { + // + // fmodel CE is slow (compared to emulation) so we don't bother + // scrubbing the whole block. Fmodel already scrubs memory via ramif + // so we'll never get exceptions + // + size = NV_MIN(size, 0x20); + } + + memmgrChannelPushAddressMethodsBlock_HAL(pMemoryManager, NV_FALSE, + NVA06F_SUBCHANNEL_COPY_ENGINE, dst, &ptr); + + if (bMemcopy) + { + memmgrChannelPushAddressMethodsBlock_HAL(pMemoryManager, NV_TRUE, + NVA06F_SUBCHANNEL_COPY_ENGINE, src, &ptr); + + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NVB0B5_LINE_LENGTH_IN, NvU64_LO32(size)); + } + else + { + { + remapComponentSize = DRF_DEF(B0B5, _SET_REMAP_COMPONENTS, _COMPONENT_SIZE, _FOUR); + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NVB0B5_LINE_LENGTH_IN, NvU64_LO32(size >> 2)); + } + + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NVB0B5_SET_REMAP_COMPONENTS, + DRF_DEF(B0B5, _SET_REMAP_COMPONENTS, _DST_X, _CONST_A) | + DRF_DEF(B0B5, _SET_REMAP_COMPONENTS, _NUM_SRC_COMPONENTS, _ONE) | + DRF_DEF(B0B5, _SET_REMAP_COMPONENTS, _NUM_DST_COMPONENTS, _ONE) | + remapComponentSize | + remapConstB); + + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NVB0B5_SET_REMAP_CONST_A, 0x00000000); + + NV_ASSERT(srcAddressSpace == 0); + NV_ASSERT(dstAddressSpace == ADDR_FBMEM); + + srcAddressSpace = ADDR_FBMEM; + } + + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NVB0B5_LINE_COUNT, 1); + + _ceChannelPushMethodAperture_GM107(channel, srcAddressSpace, srcCpuCacheAttrib, dstAddressSpace, dstCpuCacheAttrib, &ptr); + + launchParams = DRF_DEF(B0B5, _LAUNCH_DMA, _INTERRUPT_TYPE, _NONE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _SRC_MEMORY_LAYOUT, _PITCH) | + DRF_DEF(B0B5, _LAUNCH_DMA, _SRC_TYPE, _PHYSICAL) | + DRF_DEF(B0B5, _LAUNCH_DMA, _DST_MEMORY_LAYOUT, _PITCH) | + DRF_DEF(B0B5, _LAUNCH_DMA, _DST_TYPE, _PHYSICAL) | + DRF_DEF(B0B5, _LAUNCH_DMA, _DATA_TRANSFER_TYPE, _PIPELINED); + + if (addPayloadSema) + { + launchParams |= DRF_DEF(B0B5, _LAUNCH_DMA, _SEMAPHORE_TYPE, _RELEASE_ONE_WORD_SEMAPHORE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _FLUSH_ENABLE, _TRUE); + } + else + { + launchParams |= DRF_DEF(B0B5, _LAUNCH_DMA, _SEMAPHORE_TYPE, _NONE); + } + + { + if (!bMemcopy) + { + launchParams |= DRF_DEF(B0B5, _LAUNCH_DMA, _REMAP_ENABLE, _TRUE); + } + + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NVB0B5_LAUNCH_DMA, launchParams); + } + } + + if (addReductionOp) + { + NvU32 currentBlock = BLOCK_INDEX_FROM_ADDR((dst), channel->blockShift); + NvU32 blockOffset; + NvU32 bitFlip; + + blockOffset = (currentBlock / 32) * 4; + bitFlip = ((NvU32)1 << (currentBlock % 32)); + memmgrChannelPushSemaphoreMethodsBlock_HAL(pMemoryManager, + NVA06F_SUBCHANNEL_COPY_ENGINE, + channel->pbGpuBitMapVA+(blockOffset), bitFlip, &ptr); + + launchParams = DRF_DEF(B0B5, _LAUNCH_DMA, _SEMAPHORE_TYPE, _RELEASE_ONE_WORD_SEMAPHORE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _INTERRUPT_TYPE, _NONE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _FLUSH_ENABLE, _TRUE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _REMAP_ENABLE, _TRUE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _SRC_MEMORY_LAYOUT, _PITCH) | + DRF_DEF(B0B5, _LAUNCH_DMA, _DST_MEMORY_LAYOUT, _PITCH) | + DRF_DEF(B0B5, _LAUNCH_DMA, _SEMAPHORE_REDUCTION_ENABLE, _TRUE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _SEMAPHORE_REDUCTION_SIGN, _UNSIGNED) | + DRF_DEF(B0B5, _LAUNCH_DMA, _SEMAPHORE_REDUCTION, _IOR) | + DRF_DEF(B0B5, _LAUNCH_DMA, _DATA_TRANSFER_TYPE, _NONE); + // push only the second semaphore release + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NVB0B5_LAUNCH_DMA, launchParams); + } + + if (addFinishPayload) + { + memmgrChannelPushSemaphoreMethodsBlock_HAL(pMemoryManager, + NVA06F_SUBCHANNEL_COPY_ENGINE, + channel->pbGpuVA+channel->finishPayloadOffset, + channel->finishPayload, &ptr); + + launchParams = DRF_DEF(B0B5, _LAUNCH_DMA, _SEMAPHORE_TYPE, _RELEASE_ONE_WORD_SEMAPHORE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _INTERRUPT_TYPE, _NONE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _FLUSH_ENABLE, _TRUE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _REMAP_ENABLE, _TRUE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _SRC_MEMORY_LAYOUT, _PITCH) | + DRF_DEF(B0B5, _LAUNCH_DMA, _DST_MEMORY_LAYOUT, _PITCH) | + DRF_DEF(B0B5, _LAUNCH_DMA, _DATA_TRANSFER_TYPE, _NONE); + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NVB0B5_LAUNCH_DMA, launchParams); + NV_PRINTF(LEVEL_INFO, "Pushing Finishing Semaphore Payload 0x%x\n", + channel->finishPayload); + } + + if (addNonStallIntr) + { + PUSH_PAIR(NVA06F_SUBCHANNEL_COPY_ENGINE, NV906F_NON_STALL_INTERRUPT, 0); + } + + channel->channelPutOffset = (NvU32)((NvU8 *)ptr - (NvU8 *)channel->pbCpuVA); + *pPtr = ptr; + + // return length of methods inserted + return (NvU32)((NvU8*)ptr - (NvU8*)pStartPtr); +} + +/*! + * Getting the Copy Engine Class + * + * @param[in] pGpu OBJGPU pointer + * @param[out] pClass pointer to class + */ +NV_STATUS +memmgrMemUtilsGetCopyEngineClass_GM107 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 *pClass +) +{ + NV_STATUS status; + NvU32 numClasses; + NvU32 *pClassList = NULL; + NvU32 i; + NvU32 class = 0; + NvU32 eng; + + // + // Pascal+ chips will have any combination of the 6 CEs + // available. Loop over all the CEs to get the CE class + // for the first available CE instead of using ENG_CE(0) + // + for (eng = 0; eng < ENG_CE__SIZE_1; eng++) + { + NV_ASSERT_OK_OR_ELSE( + status, + gpuGetClassList(pGpu, &numClasses, NULL, ENG_CE(eng)), + return 0); + + if (numClasses > 0) + { + break; + } + } + + pClassList = portMemAllocNonPaged(sizeof(*pClassList) * numClasses); + NV_ASSERT_OR_RETURN((pClassList != NULL), 0); + + if (NV_OK == gpuGetClassList(pGpu, &numClasses, pClassList, ENG_CE(eng))) + { + for (i = 0; i < numClasses; i++) + { + class = NV_MAX(class, pClassList[i]); + } + } + + NV_ASSERT(class != 0); + portMemFree(pClassList); + *pClass = class; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/virt_mem_allocator_gm107.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/virt_mem_allocator_gm107.c new file mode 100644 index 000000000..809f31514 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/maxwell/virt_mem_allocator_gm107.c @@ -0,0 +1,2363 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/*! + * @file + * @brief The FERMI specific HAL VMA routines reside in this file + * + * =========================================================================== + * GLOSSARY OF INCONSISTENCIES + * =========================================================================== + * + * -------- + * LOW, MIN + * -------- + * (1) Synonyms for the first address or index in a range. + * e.g. In the inclusive range 37 to 509, the "low" or "min" is 37. + * + * --------------------- + * HIGH, MAX, LIMIT, END + * --------------------- + * (1) Synonyms for the last address or index in a range. + * e.g. In the inclusive range 37 to 509, the "limit" is 509. + * (2) Sometimes "high" or "end" are used for the "limit plus one" - e.g. 510. + * Currently this can only be determined by context. + * TODO: Decide on consistent terms and clean this up. + * + * --- + * PDB + * --- + * (1) Page Directory Base + * The base address of a page directory, + * e.g. written to the PD_BASE field of an instance block. + * (2) Page Directory Block + * The entire physical memory block of a page directory, + * e.g. described by a memdesc associated with a VASPACE object. + * (3) Property DataBase - e.g. in PDB_PROP_* + * The common meaning to the rest of RM - boolean values associated + * with an object. Completely unrelated to (1) and (2). + * + * --- + * PDE + * --- + * (1) Page Directory Entry + * An *ENTRY* within a page directory, containing the physical + * addresses and attributes of a single small/big page table pair. + * (2) !!!WRONG!!! The page direcory itself + * Somtimes also used in the plural form "PDEs". + * Use "page directory" or "PD" instead. + * + * -------------------------- + * PDE ENTRY !!!DO NOT USE!!! + * -------------------------- + * (1) !!!WRONG!!! Page Directory Entry Entry(?!) + * This is redundant - just use "PDE". + * (2) Page Dir*E*ctory Entry + * Desperate bacronym to justify current usage. + * + * -------- + * PDE SIZE + * -------- + * (1) Size or index corresponding to the NV_MMU_PDE_SIZE field of a PDE. + * This refers to the size of *page tables* that this + * PDE points to (1/8, 1/4, 1/2, full), not the size of the PDE itself. + * The more accurate term is "PT size" - most code has been cleaned up + * to use this instead, but some API params remain. + * (2) Size of the PDE itself (8 bytes), defined by the constant NV_MMU_PDE__SIZE. + * + * --- + * PTE + * --- + * (1) Page Table Entry + * An *ENTRY* within a page table, containing the physical + * address and attributes of a single page (small or big). + * (2) !!!WRONG!!! The page table itself + * Somtimes also used in the plural form "PTEs". + * Use "page table" or "PT" instead. + * + * -------------------------- + * PTE ENTRY !!!DO NOT USE!!! + * -------------------------- + * (1) !!!WRONG!!! Page Table Entry Entry(?!) + * This is redundant - just use "PTE". + * (2) Page Tabl*E* Entry + * Desperate bacronym to justify current usage. + * + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "lib/base_utils.h" +#include "gpu/mem_mgr/heap.h" +#include "os/os.h" +#include "rmapi/client.h" +#include "nvRmReg.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "gpu/bif/kernel_bif.h" +#include "core/system.h" +#include "core/thread_state.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/fabric_vaspace.h" +#include "mem_mgr/virt_mem_mgr.h" + +#include "mem_mgr/fla_mem.h" + +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu_mgr/gpu_group.h" +#include "mmu/mmu_fmt.h" +#include "gpu/device/device.h" +#include "gpu/nvlink/kernel_nvlink.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "gpu/mem_mgr/fermi_dma.h" + +#include "published/maxwell/gm107/dev_mmu.h" +#include "published/maxwell/gm107/dev_bus.h" + +#include "ctrl/ctrl0002.h" + +#include "vgpu/rpc.h" + +#define _MMUXLATEVADDR_FLAG_SHOW_INVALID NVBIT(0) +#define _MMUXLATEVADDR_FLAG_VALIDATE_ONLY NVBIT(1) // incomplete +#define _MMUXLATEVADDR_FLAG_VALIDATE_TERSELY NVBIT(2) // incomplete +// no trace output +#define _MMUXLATEVADDR_FLAG_XLATE_ONLY _MMUXLATEVADDR_FLAG_VALIDATE_TERSELY + +static NV_STATUS _dmaGetFabricAddress(OBJGPU *pGpu, NvU32 aperture, NvU32 kind, NvU64 *fabricAddr); + +static NV_STATUS +_dmaApplyWarForBug2720120 +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi +); + +// +// Virtual Address Space Block - Data tracked per virtual allocation +// +// only used with NV_REG_STR_RESERVE_PTE_SYSMEM_MB. Protential dead code. +typedef struct VASINFO_MAXWELL +{ + PNODE pMapTree; // Tree of current mappings. + NvU32 pageSizeMask; // Mask of page size indices supported. + // See VAS_PAGESIZE_MASK. + VAS_ALLOC_FLAGS flags; + VA_MANAGEMENT management; // Level of management. +} VASINFO_MAXWELL, *PVASINFO_MAXWELL; + +/*! + * @brief Allocate virtual memory and map it to physical memory. + * + * The virtual memory may already be allocated, in which case it is just + * initialized (backing page table tables allocated). + * + * VMM-TODO: If possible remove overloading - e.g. just map, never allocate. + * Definitely move MMU stuff down. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pDma VirtMemAllocator pointer + * @param[in] pVAS OBJVASPACE pointer + * @param[in] pMemDesc Physical memory descriptor + * @param[in/out] pVaddr Pointer to Virtual memory base address + * @param[in] flags Mapping options + * @param[in] pDmaMappingInfo CLI_DMA_MAPPING_INFO pointer (for RM Client mappings) + * @param[in] swizzId SMC swizzId (Only used for BAR1 mapping) + * + * @returns NV_STATUS status = NV_OK on success, or status upon failure. + */ +NV_STATUS +dmaAllocMapping_GM107 +( + OBJGPU *pGpu, + VirtMemAllocator *pDma, + OBJVASPACE *pVAS, + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 *pVaddr, + NvU32 flags, + CLI_DMA_ALLOC_MAP_INFO *pCliMapInfo, + NvU32 swizzId +) +{ + NV_STATUS status = NV_OK; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + OBJEHEAP *pVASpaceHeap = NULL; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + FABRIC_VASPACE *pFabricVAS = dynamicCast(pGpu->pFabricVAS, FABRIC_VASPACE); + MEMORY_DESCRIPTOR *pAdjustedMemDesc = pMemDesc; + ADDRESS_TRANSLATION addressTranslation; + NvU32 gfid; + NvBool bCallingContextPlugin; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + struct + { + NvU32 pteCount; + NvU32 pageCount4k; + NvU32 overMap; + NvU64 vaLo; + NvU64 vaHi; + NvU64 mapLength; + NvU64 pageOffset; + NvU64 pageSize; + NvU64 vaRangeLo; + NvU64 vaRangeHi; + NvU32 kind; + NvU32 priv; + NvU32 cacheSnoop; + COMPR_INFO comprInfo; + NvU32 aperture; + NvU32 tlbLock; + NvU32 p2p; + NvU32 writeOnly; + NvU32 readOnly; + NvU32 subDevIdSrc; + NvU32 deferInvalidate; + NODE *pMapNode; + NvU32 shaderFlags; + NvU32 disableEncryption; + VASINFO_MAXWELL *pVASInfo; + OBJGPU *pSrcGpu; + NvU32 peerNumber; + NvBool bAllocVASpace; + NvBool bIsBarOrPerf; + NvBool bIsBar1; + NvBool bIsMIGMemPartitioningEnabled; + RmPhysAddr *pPteArray; + DMA_PAGE_ARRAY pageArray; + NvU64 vaspaceBigPageSize; + NvBool bIsMemContiguous; + NvU64 fabricAddr; + NvU32 indirectPeer; + NvBool bFlaImport; + NV_RANGE totalVaRange; + MEMORY_DESCRIPTOR *pRootMemDesc; + MEMORY_DESCRIPTOR *pTempMemDesc; + Memory *pMemory; + } *pLocals = portMemAllocNonPaged(sizeof(*pLocals)); + // Heap Allocate to avoid stack overflow + + if (pLocals == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pLocals, 0, sizeof(*pLocals)); + pLocals->pSrcGpu = pGpu; + pLocals->peerNumber = BUS_INVALID_PEER; + pLocals->totalVaRange = NV_RANGE_EMPTY; + + if (pCliMapInfo != NULL) + pLocals->pMemory = pCliMapInfo->pMemory; + + pLocals->vaspaceBigPageSize = vaspaceGetBigPageSize(pVAS); + pLocals->bIsBarOrPerf = (vaspaceGetFlags(pVAS) & + (VASPACE_FLAGS_BAR|VASPACE_FLAGS_PERFMON|VASPACE_FLAGS_HDA)) != 0; + pLocals->p2p = DRF_VAL(OS46, _FLAGS, _P2P_ENABLE, flags); + pLocals->subDevIdSrc = DRF_VAL(OS46, _FLAGS, _P2P_SUBDEV_ID_SRC, flags); + pLocals->deferInvalidate = FLD_TEST_DRF(OS46, _FLAGS, _DEFER_TLB_INVALIDATION, _TRUE, flags) ? + DMA_DEFER_TLB_INVALIDATE : DMA_TLB_INVALIDATE; + pLocals->bAllocVASpace = FLD_TEST_DRF(OS46, _FLAGS, _DMA_UNICAST_REUSE_ALLOC, _FALSE, flags); + pLocals->bIsBar1 = (vaspaceGetFlags(pVAS) & VASPACE_FLAGS_BAR_BAR1) != 0; + pLocals->bIsMIGMemPartitioningEnabled = (pKernelMIGManager != NULL) && kmigmgrIsMIGMemPartitioningEnabled(pGpu, pKernelMIGManager); + + pLocals->cacheSnoop = (NVOS46_FLAGS_CACHE_SNOOP_ENABLE == DRF_VAL(OS46, _FLAGS, _CACHE_SNOOP, flags)); + pLocals->writeOnly = (NVOS46_FLAGS_ACCESS_WRITE_ONLY == DRF_VAL(OS46, _FLAGS, _ACCESS, flags)); + pLocals->readOnly = (NVOS46_FLAGS_ACCESS_READ_ONLY == DRF_VAL(OS46, _FLAGS, _ACCESS, flags)) ? + DMA_UPDATE_VASPACE_FLAGS_READ_ONLY : 0; + pLocals->tlbLock = (NVOS46_FLAGS_TLB_LOCK_ENABLE == DRF_VAL(OS46, _FLAGS, _TLB_LOCK, flags)) ? + DMA_UPDATE_VASPACE_FLAGS_TLB_LOCK : 0; + + switch (DRF_VAL(OS46, _FLAGS, _SHADER_ACCESS, flags)) + { + default: + case NVOS46_FLAGS_SHADER_ACCESS_DEFAULT: + // The default (0) case we pick up the SHADER_ACCESS from ACCESS. + pLocals->shaderFlags = 0; + if (pLocals->readOnly) + pLocals->shaderFlags |= DMA_UPDATE_VASPACE_FLAGS_SHADER_READ_ONLY; + if (pLocals->writeOnly) + pLocals->shaderFlags |= DMA_UPDATE_VASPACE_FLAGS_SHADER_WRITE_ONLY; + break; + case NVOS46_FLAGS_SHADER_ACCESS_READ_WRITE: + pLocals->shaderFlags = 0; + break; + case NVOS46_FLAGS_SHADER_ACCESS_READ_ONLY: + pLocals->shaderFlags = DMA_UPDATE_VASPACE_FLAGS_SHADER_READ_ONLY; + break; + case NVOS46_FLAGS_SHADER_ACCESS_WRITE_ONLY: + pLocals->shaderFlags = DMA_UPDATE_VASPACE_FLAGS_SHADER_WRITE_ONLY; + break; + } + + addressTranslation = VAS_ADDRESS_TRANSLATION(pVAS); + // In SRIOV-heavy plugin may map subheap allocations for itself using BAR1 + NV_ASSERT_OK_OR_RETURN(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin)); + if (bCallingContextPlugin) + addressTranslation = FORCE_VMMU_TRANSLATION(pMemDesc, addressTranslation); + + if (pFabricVAS != NULL) + { + status = fabricvaspaceGetGpaMemdesc(pFabricVAS, pMemDesc, pGpu, &pAdjustedMemDesc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get the adjusted memdesc for the fabric memdesc\n"); + return status; + } + } + + // Get pageSize + pLocals->pTempMemDesc = memdescGetMemDescFromGpu(pAdjustedMemDesc, pGpu); + pLocals->pageSize = memdescGetPageSize(pLocals->pTempMemDesc, addressTranslation); + + if (memdescGetFlag(pLocals->pTempMemDesc, MEMDESC_FLAGS_DEVICE_READ_ONLY)) + { + NV_ASSERT_OR_ELSE((pLocals->readOnly == DMA_UPDATE_VASPACE_FLAGS_READ_ONLY), + status = NV_ERR_INVALID_ARGUMENT; goto cleanup); + } + + // For verify purposes we should allow small page override for mapping. + // This will be used for testing VASpace interop. + if (RMCFG_FEATURE_PLATFORM_MODS && + FLD_TEST_DRF(OS46, _FLAGS, _PAGE_SIZE, _4KB, flags) && + kgmmuIsVaspaceInteropSupported(pKernelGmmu)) + { + pLocals->pageSize = RM_PAGE_SIZE; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetPageSize(memdescGetMemDescFromGpu(pAdjustedMemDesc, pGpu), addressTranslation, (NvU32)pLocals->pageSize); + SLI_LOOP_END + } + else if (kgmmuIsPerVaspaceBigPageEn(pKernelGmmu) && + (pLocals->pageSize >= RM_PAGE_SIZE_64K)) + { + NV_ASSERT(pLocals->pageSize != RM_PAGE_SIZE_HUGE); + + // + // This is a temp WAR till the memdesc->_pageSize is cleaned up + // If the memdesc page size is >= the smallest big page size then + // we will correct it to the Big page size of the VASpace + // + pLocals->pageSize = pLocals->vaspaceBigPageSize; + } + + // + // Force BAR1 VA pageSize at bigPageSize only if total BAR1 size is less + // than threshold(default: 256MB) to not waste BAR1. + // For large BAR1 SKUs, avoid forcing 64KB size and use the pagesize of + // the memdesc. + // + if (kgmmuIsVaspaceInteropSupported(pKernelGmmu) && + pLocals->bIsBar1) + { + if ((pLocals->pageSize > pLocals->vaspaceBigPageSize) && + kbusIsBar1Force64KBMappingEnabled(pKernelBus)) + { + pLocals->pageSize = pLocals->vaspaceBigPageSize; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetPageSize(memdescGetMemDescFromGpu(pAdjustedMemDesc, pGpu), + addressTranslation, (NvU32)pLocals->pageSize); + SLI_LOOP_END + } + } + + // Get mapping params on current gpu memdesc + pLocals->pageOffset = memdescGetPhysAddr(pLocals->pTempMemDesc, addressTranslation, 0) & (pLocals->pageSize - 1); + pLocals->mapLength = RM_ALIGN_UP(pLocals->pageOffset + pLocals->pTempMemDesc->Size, pLocals->pageSize); + pLocals->pageCount4k = NvU64_LO32(pLocals->mapLength >> RM_PAGE_SHIFT); + pLocals->bIsMemContiguous = memdescGetContiguity(pLocals->pTempMemDesc, addressTranslation); + + pLocals->kind = NV_MMU_PTE_KIND_PITCH; + + // Get compression/pte pLocals->kind on current gpu memdesc + status = memmgrGetKindComprFromMemDesc(pMemoryManager, + pLocals->pTempMemDesc, + 0, + &pLocals->kind, &pLocals->comprInfo); + + if (NV_OK != status) + goto cleanup; + +#ifdef DEBUG + // Check for subdevices consistency if broadcast memdesc is passed in + if (memdescHasSubDeviceMemDescs(pAdjustedMemDesc)) + { + // Check pageOffset, pageSize consistency across subdevices + memdescCheckSubDevicePageSizeConsistency(pGpu, pAdjustedMemDesc, pVAS, pLocals->pageSize, pLocals->pageOffset); + + // Check mem contiguity consistency across subdevices + memdescCheckSubDeviceMemContiguityConsistency(pGpu, pAdjustedMemDesc, pVAS, pLocals->bIsMemContiguous); + + // Check compression/pte pLocals->kind consistency across subdevices + status = memdescCheckSubDeviceKindComprConsistency(pGpu, pAdjustedMemDesc, pVAS, + pLocals->kind, &pLocals->comprInfo); + NV_ASSERT(!status); + } +#endif + + // + // +-- +-- +------------+ --+ + // | | | | |==> pageOffset + // pageSize <==| | | Page 0 | --+ + // | | | | | + // +-- | +------------+ | + // | | | | + // | | Page 1 | | + // | | | | + // mapLength <==| +------------+ |==> pMemDesc->Size + // | | | | + // | | ... | | + // | | | | + // | +------------+ | + // | | | | + // | | Page N-1 | --+ + // | | | + // +-- +------------+ + // + + if (pLocals->bIsMemContiguous) + { + // FIXME: Throwing away physical length information is dangerous. + pLocals->pteCount = 1; + } + else + { + // FIXME: This is broken for page size > 4KB and page offset + // that crosses a page boundary (can overrun pPteArray). + pLocals->pteCount = pLocals->pageCount4k; + } + + // Disable PLC Compression for FLA->PA Mapping because of the HW Bug: 3046774 + if (pMemorySystemConfig->bUseRawModeComptaglineAllocation && + pMemorySystemConfig->bDisablePlcForCertainOffsetsBug3046774) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (((vaspaceGetFlags(pVAS) & VASPACE_FLAGS_FLA) || (dynamicCast(pVAS, FABRIC_VASPACE) != NULL)) && + memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, pLocals->kind) && + !memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_DISALLOW_PLC, pLocals->kind)) + { + memmgrGetDisablePlcKind_HAL(pMemoryManager, &pLocals->kind); + } + } + + if (pLocals->bIsBarOrPerf) + { + pLocals->totalVaRange = rangeMake(vaspaceGetVaStart(pVAS), vaspaceGetVaLimit(pVAS)); + + // !!!! Nasty hack + // + // NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP used to get the encryption info from _busMapAperture_GF100(). + // Since we have no bit fields left in NVOS46_FLAGS_* to specify encryption info. + // This is applicable to FERMI+ chips. + // + // NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP is _NV50 specific, and is not used in FERMI+. + // NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_DEFAULT means use default encryption status + // NVOS46_FLAGS_PTE_COALESCE_LEVEL_CAP_1 means disable encryption + // + // VMM-TODO: Add meaningful alias defines or just expand flag bits? + // + pLocals->disableEncryption = FLD_TEST_DRF(OS46, _FLAGS, _PTE_COALESCE_LEVEL_CAP, _1, flags) ? + DMA_UPDATE_VASPACE_FLAGS_DISABLE_ENCRYPTION : 0; + + if (pLocals->bIsMemContiguous) + { + pLocals->overMap = pLocals->pageCount4k + NvU64_LO32((pLocals->pageOffset + RM_PAGE_MASK) / RM_PAGE_SIZE); + } + else + { + pLocals->overMap = pLocals->pageCount4k; + } + + NV_ASSERT_OK_OR_GOTO(status, vgpuGetCallingContextGfid(pGpu, &gfid), cleanup); + + // BAR1 VA space is split when MIG mem partitioning is enabled + if (pLocals->bIsBar1 && pLocals->bIsMIGMemPartitioningEnabled && IS_GFID_PF(gfid)) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + + pLocals->totalVaRange = memmgrGetMIGPartitionableBAR1Range(pGpu, pMemoryManager); + NV_ASSERT_OK_OR_GOTO(status, + kmemsysSwizzIdToMIGMemRange(pGpu, pKernelMemorySystem, swizzId, pLocals->totalVaRange, &pLocals->totalVaRange), + cleanup); + } + + if (!FLD_TEST_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _TRUE, flags)) + { + pLocals->vaRangeLo = pLocals->totalVaRange.lo; + pLocals->vaRangeHi = pLocals->totalVaRange.hi; + } + } + else + { + + NvU64 targetSpaceLength, targetSpaceBase, targetSpaceLimit; + + NV_ASSERT((pLocals->pageSize == pLocals->vaspaceBigPageSize) || + (pLocals->pageSize == RM_PAGE_SIZE) || + (pLocals->pageSize == RM_PAGE_SIZE_HUGE) || + (pLocals->pageSize == RM_PAGE_SIZE_512M)); + + pLocals->overMap = 0; + + if (pCliMapInfo != NULL) + { + VirtualMemory *pVirtualMemory = pCliMapInfo->pVirtualMemory; + + virtmemGetAddressAndSize(pVirtualMemory, &targetSpaceBase, &targetSpaceLength); + targetSpaceLimit = targetSpaceBase + targetSpaceLength - 1; + } + else + { + // RM internal mappings. Alt to dmaMapBuffer_HAL() + targetSpaceBase = vaspaceGetVaStart(pVAS); + targetSpaceLimit = vaspaceGetVaLimit(pVAS); + targetSpaceLength = targetSpaceLimit - targetSpaceBase + 1; + } + + if (pLocals->pteCount > ((targetSpaceLength + (RM_PAGE_SIZE-1)) / RM_PAGE_SIZE)) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + pVASpaceHeap = vaspaceGetHeap(pVAS); + + if (!FLD_TEST_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _TRUE, flags)) + { + // offset of the context dma passed in when ctxdma allocated + // Virtual memory don't have any SMMU mapping. It is still OK to use the engine MMU context; it dont have any effect. + + pLocals->vaRangeLo = NV_MAX(targetSpaceBase, vaspaceGetVaStart(pVAS)); + pLocals->vaRangeHi = NV_MIN(targetSpaceLimit, vaspaceGetVaLimit(pVAS)); + + // + // Handle 32bit pointer requests. 32b pointers are forced below 32b + // on all chips. Non-32b requests are only forced on some chips, + // typically kepler, and only if there are no other address hints. + // + if (DRF_VAL(OS46, _FLAGS, _32BIT_POINTER, flags) == + NVOS46_FLAGS_32BIT_POINTER_ENABLE) + { + pLocals->vaRangeHi = NV_MIN(0xffffffff, pLocals->vaRangeHi); + } + else if (pDma->getProperty(pDma, PDB_PROP_DMA_ENFORCE_32BIT_POINTER) && + (pVASpaceHeap->free > NVBIT64(32))) // Pressured address spaces are exempt + { + pLocals->vaRangeLo = NV_MAX(NVBIT64(32), pLocals->vaRangeLo); + } + } + } + + // + // Align the virtual address passed in down to the page size. + // + // There is no requirement that the physical offset of a mapping + // be page-aligned, so we need to map the entire page that contains + // the desired offset. We then add the page offset + // onto the returned virtual address. + // + pLocals->vaLo = RM_ALIGN_DOWN(*pVaddr, pLocals->pageSize); + + if (pLocals->bAllocVASpace) + { + // + // TODO: This flag handling logic should be consolidated with dmaMapBuffer_GM107 + // when old path removed. + // + VAS_ALLOC_FLAGS allocFlags = {0}; + NvU64 compAlign = NVBIT64(pLocals->comprInfo.compPageShift); + NvU64 vaAlign = NV_MAX(pLocals->pageSize, compAlign); + NvU64 vaSize = RM_ALIGN_UP(pLocals->mapLength, vaAlign); + NvU64 pageSizeLockMask = 0; + + if (FLD_TEST_DRF(OS46, _FLAGS, _PAGE_SIZE, _BOTH, flags)) + { + vaAlign = NV_MAX(vaAlign, pLocals->vaspaceBigPageSize); + vaSize = RM_ALIGN_UP(pLocals->mapLength, vaAlign); + } + + // + // Third party code path, nvidia_p2p_get_pages, expects on BAR1 VA to be + // always aligned at 64K. + // + // Also, RmMapMemory on PPC64LE expects BAR1 VA to be aligned at 64K. + // + if (pLocals->bIsBar1) + { + vaAlign = NV_MAX(vaAlign, pLocals->vaspaceBigPageSize); + vaSize = RM_ALIGN_UP(pLocals->mapLength, vaAlign); + } + if (FLD_TEST_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _TRUE, flags)) + { + pLocals->vaRangeLo = pLocals->vaLo; + pLocals->vaRangeHi = pLocals->vaLo + vaSize - 1; + if (pLocals->bIsBar1) + { + NV_RANGE requestedRange = rangeMake(pLocals->vaRangeLo, pLocals->vaRangeHi); + if (!rangeContains(pLocals->totalVaRange, requestedRange)) + { + NV_PRINTF(LEVEL_ERROR, "Requested BAR1 VA Lo=0x%llx Hi=0x%llx\n" + "total BAR1 VA range Lo=0x%llx Hi=0x%llx\n", + requestedRange.lo, requestedRange.hi, + pLocals->totalVaRange.lo, pLocals->totalVaRange.hi); + status = NV_ERR_INVALID_ARGUMENT; + DBG_BREAKPOINT(); + goto cleanup; + } + } + } + else if (pDma->getProperty(pDma, PDB_PROP_DMA_RESTRICT_VA_RANGE)) + { + // See comments in vaspaceFillAllocParams_IMPL. + pLocals->vaRangeHi = NV_MIN(pLocals->vaRangeHi, NVBIT64(40) - 1); + } + + if (FLD_TEST_DRF(OS46, _FLAGS, _PAGE_SIZE, _BOTH, flags)) + { + NV_ASSERT(pLocals->pageSize <= pLocals->vaspaceBigPageSize); + pageSizeLockMask |= RM_PAGE_SIZE; + pageSizeLockMask |= pLocals->vaspaceBigPageSize; + } + else + { + pageSizeLockMask |= pLocals->pageSize; + } + + allocFlags.bReverse = FLD_TEST_DRF(OS46, _FLAGS, _DMA_OFFSET_GROWS, _DOWN, flags); + + status = vaspaceAlloc(pVAS, vaSize, vaAlign, pLocals->vaRangeLo, pLocals->vaRangeHi, + pageSizeLockMask, allocFlags, &pLocals->vaLo); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "can't alloc VA space for mapping.\n"); + goto cleanup; + } + NV_ASSERT_OR_ELSE(0 == (pLocals->vaLo & (pLocals->pageSize - 1)), + status = NV_ERR_INVALID_STATE; + goto cleanup; ); + NV_ASSERT_OR_ELSE(vaSize >= pLocals->mapLength, + status = NV_ERR_INVALID_STATE; + goto cleanup; ); + + // + // Handle overmapping for BAR1. + // + // BAR1 VA is allocated at big page size granularity + // regardless of the physical memory size being mapped. + // Unmapped regions of BAR1 need to be mapped to dummy + // pages (or sparse) to avoid faults on PCIe prefetch. + // + // Overmap solves this by wrapping around the target physical + // memory for the remainder of the last big page so + // any left over 4K pages are "scratch invalidated." + // + // When this is used, the mapLength must be extended to + // to the entire VA range and dmaUpdateVASpace + // takes care of the overMap modulus. + // + // TODO: With VMM enabled BAR1 scratch invalidate is handled + // transparently with SW (or HW) sparse support. + // Removing this special overmap logic should be + // possible when the old VAS path is fully + // deprecated. + // + // See Bug 200090426. + // + if (pLocals->overMap != 0) + { + pLocals->mapLength = vaSize; + } + } + else + { + // + // We are mapping to an existing virtual memory allocation. + // + // The virtual offset passed in may or may not account for + // the page offset. Check for either the page-aligned case or + // the adjusted case to ensure clients are not requesting + // bogus offsets. + // + if (((*pVaddr - pLocals->vaLo) != 0) && + ((*pVaddr - pLocals->vaLo) != pLocals->pageOffset)) + { + NV_PRINTF(LEVEL_ERROR, + "Virtual address 0x%llX is not compatible with page size 0x%llX or page" + " offset 0x%llX.\n", *pVaddr, pLocals->pageSize, pLocals->pageOffset); + DBG_BREAKPOINT(); + status = NV_ERR_INVALID_OFFSET; + goto cleanup; + } + } + + // + // Calculate mapping virtual address limit based on + // mapping length derived from number of physical pages going to map. + // + pLocals->vaHi = pLocals->vaLo + pLocals->mapLength - 1; + + if (pLocals->p2p == NVOS46_FLAGS_P2P_ENABLE_NOSLI) + { + NV_ASSERT_OR_GOTO(pLocals->pMemory != NULL, fail_post_register); + + FlaMemory *pFlaMemory = dynamicCast(pLocals->pMemory, FlaMemory); + if (pFlaMemory != NULL) + { + pLocals->pSrcGpu = gpumgrGetGpu(pFlaMemory->peerGpuInst); + pLocals->bFlaImport = NV_TRUE; + + if (!pLocals->pSrcGpu) + { + NV_PRINTF(LEVEL_ERROR, "Cannot map FLA Memory without a valid srcGpu, failing....\n"); + status = NV_ERR_INVALID_ARGUMENT; + DBG_BREAKPOINT(); + goto fail_post_register; + } + } + else + { + pLocals->pSrcGpu = pLocals->pMemory->pGpu; + + // XXX - is this required here if we disable SLI BC below? + GPU_RES_SET_THREAD_BC_STATE(pLocals->pMemory->pDevice); + } + + if (IsSLIEnabled(pLocals->pSrcGpu)) + { + NvU32 deviceInstance = gpuGetDeviceInstance(pLocals->pSrcGpu); + + pLocals->pSrcGpu = gpumgrGetGpuFromSubDeviceInst(deviceInstance, pLocals->subDevIdSrc); + gpumgrSetBcEnabledStatus(pLocals->pSrcGpu, NV_FALSE); + } + + pLocals->peerNumber = kbusGetPeerId_HAL(pGpu, pKernelBus, pLocals->pSrcGpu); + + // only needed pLocals->pSrcGpu for the one line above, swap back now. + if (IsSLIEnabled(pLocals->pSrcGpu)) + { + pLocals->pSrcGpu = gpumgrGetParentGPU(pLocals->pSrcGpu); + gpumgrSetBcEnabledStatus(pLocals->pSrcGpu, NV_TRUE); + } + + NV_PRINTF(LEVEL_INFO, + "P2P LOOPBACK setup with physical vidmem at 0x%llx and virtual address " + "at 0x%llx\n", + memdescGetPhysAddr(pAdjustedMemDesc, addressTranslation, 0), pLocals->vaLo); + } + else if (pLocals->p2p == NVOS46_FLAGS_P2P_ENABLE_SLI) + { + // + // All the peer GPUs will have valid PTEs written as + // P2P mappings. The local GPU will have this region marked as + // invalid. + // + const NvU32 deviceInst = gpuGetDeviceInstance(pGpu); + pLocals->pSrcGpu = gpumgrGetGpuFromSubDeviceInst(deviceInst, pLocals->subDevIdSrc); + } + + pLocals->pRootMemDesc = memdescGetRootMemDesc(pAdjustedMemDesc, NULL); + if (memdescGetAddressSpace(pLocals->pRootMemDesc) == ADDR_FBMEM) + { + if (gpumgrCheckIndirectPeer(pGpu, pLocals->pRootMemDesc->pGpu)) + { + pLocals->indirectPeer = DMA_UPDATE_VASPACE_FLAGS_INDIRECT_PEER; + } + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + if (pLocals->p2p) + { + if (pLocals->bFlaImport) + { + pLocals->pTempMemDesc = memdescGetMemDescFromGpu(pAdjustedMemDesc, pGpu); + } + else + { + pLocals->pTempMemDesc = memdescGetMemDescFromGpu(pAdjustedMemDesc, pLocals->pSrcGpu); + } + } + else + { + pLocals->pTempMemDesc = memdescGetMemDescFromGpu(pAdjustedMemDesc, pGpu); + } + + // Commit the mapping update + pLocals->pPteArray = memdescGetPteArray(pLocals->pTempMemDesc, addressTranslation); + dmaPageArrayInit(&pLocals->pageArray, pLocals->pPteArray, pLocals->pteCount); + + // Get pLocals->aperture + if (memdescGetAddressSpace(pLocals->pTempMemDesc) == ADDR_FBMEM) + { + if (pLocals->p2p) + { + pLocals->aperture = NV_MMU_PTE_APERTURE_PEER_MEMORY; + } + else if (pLocals->indirectPeer) + { + pLocals->aperture = NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY; + } + else + { + pLocals->aperture = NV_MMU_PTE_APERTURE_VIDEO_MEMORY; + } + } + else if ((memdescGetAddressSpace(pLocals->pTempMemDesc) == ADDR_FABRIC) || + (memdescGetAddressSpace(pLocals->pTempMemDesc) == ADDR_FABRIC_V2)) + { + OBJGPU *pMappingGpu = pGpu; + OBJGPU *pPeerGpu; + pLocals->peerNumber = BUS_INVALID_PEER; + + if (pLocals->pMemory == NULL) + { + status = NV_ERR_INVALID_STATE; + DBG_BREAKPOINT(); + SLI_LOOP_BREAK; + } + + pPeerGpu = pLocals->pMemory->pGpu; + + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, pLocals->kind)) + { + NV_PRINTF(LEVEL_ERROR, + "Fabric memory should not be compressible.\n"); + status = NV_ERR_INVALID_STATE; + DBG_BREAKPOINT(); + SLI_LOOP_BREAK; + } + + pLocals->aperture = NV_MMU_PTE_APERTURE_PEER_MEMORY; + + if (pPeerGpu != NULL) + { + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pMappingGpu); + + if ((pKernelNvlink != NULL) && + knvlinkIsNvlinkP2pSupported(pMappingGpu, pKernelNvlink, pPeerGpu)) + { + pLocals->peerNumber = kbusGetPeerId_HAL(pMappingGpu, GPU_GET_KERNEL_BUS(pMappingGpu), + pPeerGpu); + } + } + + if (pLocals->peerNumber == BUS_INVALID_PEER) + { + status = NV_ERR_INVALID_STATE; + DBG_BREAKPOINT(); + SLI_LOOP_BREAK; + } + } + else + { + // No P2P for system memory + if (pLocals->p2p) + { + status = NV_ERR_INVALID_ARGUMENT; + NV_PRINTF(LEVEL_ERROR, "No P2P for system memory.\n"); + SLI_LOOP_BREAK; + } + + if (pLocals->cacheSnoop || memdescGetFlag(pAdjustedMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1)) + { + pLocals->aperture = NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY; + } + else + { + pLocals->aperture = NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY; + } + } + + if (pLocals->p2p == NVOS46_FLAGS_P2P_ENABLE_SLI) + { + if (pLocals->pSrcGpu == pGpu) + { + // Leave the local GPU VA range unmapped (invalid). + SLI_LOOP_CONTINUE; + } + else + { + pLocals->peerNumber = kbusGetPeerId_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), pLocals->pSrcGpu); + } + } + + if (pLocals->aperture == NV_MMU_PTE_APERTURE_PEER_MEMORY && + pLocals->peerNumber == BUS_INVALID_PEER) + { + status = NV_ERR_INVALID_STATE; + DBG_BREAKPOINT(); + SLI_LOOP_BREAK; + } + + // + // ADDR_FABRIC/ADDR_FABRIC_V2 memory descriptors are pre-encoded with the fabric base address + // use NVLINK_INVALID_FABRIC_ADDR to avoid encoding twice + // + if (pLocals->bFlaImport || (memdescGetAddressSpace(pLocals->pTempMemDesc) == ADDR_FABRIC) || + (memdescGetAddressSpace(pLocals->pTempMemDesc) == ADDR_FABRIC_V2)) + { + pLocals->fabricAddr = NVLINK_INVALID_FABRIC_ADDR; + } + else + { + status = _dmaGetFabricAddress(pLocals->pSrcGpu, pLocals->aperture, pLocals->kind, &pLocals->fabricAddr); + if (status != NV_OK) + { + DBG_BREAKPOINT(); + SLI_LOOP_BREAK; + } + } + + pDma = GPU_GET_DMA(pGpu); + + status = dmaUpdateVASpace_HAL(pGpu, pDma, + pVAS, + pLocals->pTempMemDesc, + NULL, + pLocals->vaLo, pLocals->vaHi, + DMA_UPDATE_VASPACE_FLAGS_UPDATE_ALL | pLocals->readOnly | pLocals->priv | + pLocals->tlbLock | pLocals->shaderFlags | pLocals->disableEncryption | pLocals->indirectPeer, + &pLocals->pageArray, pLocals->overMap, + &pLocals->comprInfo, + 0, + NV_MMU_PTE_VALID_TRUE, + pLocals->aperture, + pLocals->peerNumber, + pLocals->fabricAddr, + pLocals->deferInvalidate, + NV_FALSE); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, + "can't update VA space for mapping @vaddr=0x%llx\n", + pLocals->vaLo); + DBG_BREAKPOINT(); + SLI_LOOP_BREAK; + } + } + SLI_LOOP_END + + if (NV_OK == status) + { + // + // Fill in the final virtual address of this mapping. + // + // This accounts for page offset for all cases, whether or not + // the input *pVaddr accounted for it. + // + *pVaddr = pLocals->vaLo + pLocals->pageOffset; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + // This is needed for cliDB tracking of the map. + memdescSetPageSize(memdescGetMemDescFromGpu(pAdjustedMemDesc, pGpu), addressTranslation, NvU64_LO32(pLocals->pageSize)); + SLI_LOOP_END + } + else + { +fail_post_register: + if (pLocals->pMapNode) + btreeUnlink(pLocals->pMapNode, &pLocals->pVASInfo->pMapTree); + + portMemFree(pLocals->pMapNode); + + // Only free the VA allocation if we allocated here. + if (pLocals->bAllocVASpace) + { + vaspaceFree(pVAS, pLocals->vaLo); + } + } + +cleanup: + + if (pAdjustedMemDesc != pMemDesc) + fabricvaspacePutGpaMemdesc(pFabricVAS, pAdjustedMemDesc); + + portMemFree(pLocals); + + return status; +} + +/*! + * @brief Unmap a virtual allocation. + * + * For client allocations, invalidate the page tables, but don't bother freeing. + * For internal allocations, free the allocation, but don't bother invalidating. + * Wait, what? + * + * VMM-TODO: Split into two APIs - one for clients one for internal? + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pDma VirtMemAllocator pointer + * @param[in] pVAS OBJVASPACE pointer + * @param[in] vAddr Virtual memory base address + * @param[in] pMemDesc Physical memory descriptor + * @param[in] flags Unmap options + * @param[in] pCliMapInfo PCLI_DMA_ALLOC_MAP_INFO pointer (for RM Client mappings) + * + * @returns NV_STATUS status = NV_OK on success, or status upon failure. + */ +NV_STATUS +dmaFreeMapping_GM107 +( + OBJGPU *pGpu, + VirtMemAllocator *pDma, + OBJVASPACE *pVAS, + NvU64 vAddr, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags, + CLI_DMA_ALLOC_MAP_INFO *pCliMapInfo +) +{ + VirtualMemory *pVirtualMemory = NULL; + NvU32 p2p = NVOS46_FLAGS_P2P_ENABLE_NONE; + NvU64 vaLo; + NvU64 vaHi; + NvU64 mapLength; + NvU64 pageOffset; + NvU64 pageSize; + NvU32 deferInvalidate; + NvU32 subDevIdSrc; + OBJGPU *pLocalGpu = NULL; + + NV_STATUS status = NV_OK; + MEMORY_DESCRIPTOR *pTempMemDesc = NULL; + + NV_ASSERT_OR_RETURN(NULL != pMemDesc, NV_ERR_INVALID_ARGUMENT); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + // ensure the page size has been set before continuing + NV_ASSERT(memdescGetPageSize(memdescGetMemDescFromGpu(pMemDesc, pGpu), VAS_ADDRESS_TRANSLATION(pVAS)) != 0); + SLI_LOOP_END + + if (pCliMapInfo) + { + p2p = DRF_VAL(OS46, _FLAGS, _P2P_ENABLE, pCliMapInfo->pDmaMappingInfo->Flags); + subDevIdSrc = DRF_VAL(OS46, _FLAGS, _P2P_SUBDEV_ID_SRC, pCliMapInfo->pDmaMappingInfo->Flags); + pVirtualMemory = pCliMapInfo->pVirtualMemory; + } + + if (p2p == NVOS46_FLAGS_P2P_ENABLE_SLI) + { + const NvU32 deviceInst = gpuGetDeviceInstance(pGpu); + pLocalGpu = gpumgrGetGpuFromSubDeviceInst(deviceInst, subDevIdSrc); + } + + deferInvalidate = DRF_VAL(OS47, _FLAGS, _DEFER_TLB_INVALIDATION, flags) ? DMA_DEFER_TLB_INVALIDATE : DMA_TLB_INVALIDATE; + + // Handle NV50_MEMORY_VIRTUAL use case + if ((pVirtualMemory != NULL) && pVirtualMemory->bReserveVaOnAlloc) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + if (p2p == NVOS46_FLAGS_P2P_ENABLE_SLI) + { + if (pLocalGpu == pGpu) + { + SLI_LOOP_CONTINUE; + } + } + + pTempMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + pageSize = memdescGetPageSize(pTempMemDesc, VAS_ADDRESS_TRANSLATION(pVAS)); + pageOffset = memdescGetPhysAddr(pTempMemDesc, VAS_ADDRESS_TRANSLATION(pVAS), 0) & (pageSize - 1); + mapLength = RM_ALIGN_UP(pageOffset + pTempMemDesc->Size, pageSize); + vaLo = RM_ALIGN_DOWN(vAddr, pageSize); + vaHi = vaLo + mapLength - 1; + + pDma = GPU_GET_DMA(pGpu); + if (vaspaceGetFlags(pVAS) & VASPACE_FLAGS_BAR_BAR1) + { + NV_PRINTF(LEVEL_ERROR, "Using dmaFreeMapping with sparse == False in BAR1 path!\n"); + NV_ASSERT(0); + return status; + } + + dmaUpdateVASpace_HAL(pGpu, pDma, + pVAS, + pTempMemDesc, + NULL, + vaLo, vaHi, + DMA_UPDATE_VASPACE_FLAGS_UPDATE_VALID, // only change validity + NULL, 0, + NULL, 0, + NV_MMU_PTE_VALID_FALSE, + kgmmuGetHwPteApertureFromMemdesc(GPU_GET_KERNEL_GMMU(pGpu), pTempMemDesc), 0, + NVLINK_INVALID_FABRIC_ADDR, + deferInvalidate, + NV_FALSE); + } + SLI_LOOP_END + } + else + { + vaspaceFree(pVAS, vAddr); + } + + // + // invalidate any cached peer data if this memory was mapped p2p cached. + // for SLI case - kmemsysCacheOp would loop through all GPUs + // for non-SLI case pGpu is pointing to the P2P mapped GPU would + // invalidate only on that GPU. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + if ((memdescGetGpuP2PCacheAttrib(memdescGetMemDescFromGpu(pMemDesc, pGpu)) == NV_MEMORY_CACHED) && + (p2p != NVOS46_FLAGS_P2P_ENABLE_NONE)) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, pMemDesc, FB_CACHE_PEER_MEMORY, + FB_CACHE_INVALIDATE); + } + SLI_LOOP_END + + return status; +} + +/*! + * Defines the data needed to iterate over the last level duing map VA op. + * Note: Used only in the new VMM code path. + */ +struct MMU_MAP_ITERATOR +{ + /*! + * @copydoc GMMU_FMT + */ + const GMMU_FMT *pFmt; + + /*! + * Physical aperture of the pages. + */ + GMMU_APERTURE aperture; + + /*! + * Opaque array of physical memory to map. Always points to 4K sized pages. + */ + DMA_PAGE_ARRAY *pPageArray; + + /*! + * Points to the index in pPageArray that needs to be mapped. + */ + NvU32 currIdx; + + /*! + * Base offset in bytes into the logical surface being mapped. + */ + NvU64 surfaceOffset; + + /*! + * Physical address of the last page mapped. + */ + NvU64 physAddr; + + /*! + * NvLink fabric address. Used for NVSwitch systems only! + */ + NvU64 fabricAddr; + + /*! + * @copydoc COMPR_INFO + */ + COMPR_INFO comprInfo; + + + /*! + * Non-compressed kind. + */ + NvU32 kindNoCompr; + + /*! + * Indicates whether compression is enabled. + */ + NvBool bCompr; + + /*! + * Template used to initialize the actual PTEs. Will have values that do not + * change across one map operation. + */ + NvU8 pteTemplate[GMMU_FMT_MAX_ENTRY_SIZE] NV_ALIGN_BYTES(8); + + /*! + * The addr field that needs to be filled out, based on the + * aperture. + */ + const GMMU_FIELD_ADDRESS *pAddrField; + + /*! + * Indicates after how many indexes in pPageArray, should the + * map wrap around to the first mapped page. + */ + NvU32 overMapModulus; + + /*! + * Indicates to read-modify-write each PTE instead of + * using the pteTemplate as the base value. + */ + NvBool bReadPtes; + + /*! + * Indicates to update physical address field of each PTE. + */ + NvBool bUpdatePhysAddr; + + /*! + * Indicates to update comptag line and kind of each PTE + * that points to a compressed page. + */ + NvBool bUpdateCompr; + + /*! + * Indicates that we are writing PDEs for Bug 2720120. + * Applicable only to GA100 + */ + NvBool bApplyWarForBug2720120; + + /*! + * Current page table BAR2 aperture mapping (or user buffer). + */ + NvU8 *pMap; +}; + +static void +_gmmuWalkCBMapNextEntries_Direct +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_MAP_TARGET *pTarget, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + NvU32 *pProgress +) +{ + NvU32 i; + const MMU_FMT_LEVEL *pLevelFmt = pTarget->pLevelFmt; + MMU_MAP_ITERATOR *pIter = pTarget->pIter; + NvU8 *pMap = pIter->pMap; + const NvU64 pageSize = mmuFmtLevelPageSize(pLevelFmt); + NvU32 bufferAdjust = 0; + GMMU_ENTRY_VALUE entry; + + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + + if (NULL == pLevelMem) + { + // + // Calculate buffer adjustment to account for entryIndexLo in + // buffered mode. + // RM writes the user-supplied buffer starting at offset 0 + // even if the it is in middle of the page table. + // + bufferAdjust = entryIndexLo * pLevelFmt->entrySize; + } + + for (i = entryIndexLo; i <= entryIndexHi; ++i) + { + // Copy out current PTE if we are overwriting (Read-Modify-Write) + if (pIter->bReadPtes) + { + portMemCopy(entry.v8, pLevelFmt->entrySize, pMap + (i * pLevelFmt->entrySize), pLevelFmt->entrySize); + } + else + { + // Copy the static fields passed in, if we aren't overwriting a subset of fields. + portMemCopy(entry.v8, pLevelFmt->entrySize, pIter->pteTemplate, pLevelFmt->entrySize); + } + + if (pIter->bApplyWarForBug2720120) + { + // Commit to memory. + portMemCopy(pMap + (i * pLevelFmt->entrySize) - bufferAdjust, pLevelFmt->entrySize, + entry.v8, pLevelFmt->entrySize); + continue; + } + + // Calculate the new physical address for the compression check below. + if (pIter->bUpdatePhysAddr) + { + NvU32 currIdxMod = pIter->currIdx; + + // Wrap the curr idx to the start offset for BAR1 overmapping. + if (0 != pIter->overMapModulus) + { + currIdxMod %= pIter->overMapModulus; + } + + // Extract the physical address of the page to map. + if (currIdxMod < pIter->pPageArray->count) + { + pIter->physAddr = dmaPageArrayGetPhysAddr(pIter->pPageArray, currIdxMod); + // Hack to WAR submemesc mappings + pIter->physAddr = NV_ALIGN_DOWN64(pIter->physAddr, pageSize); + } + else + { + // + // Physically contiguous just increments physAddr + // Should not be the first page (currIdxMod == 0) being mapped. + // + NV_ASSERT_OR_RETURN_VOID((pIter->pPageArray->count == 1) && + (currIdxMod > 0)); + pIter->physAddr += pageSize; + } + } + + // Init comptag + if (pIter->bUpdateCompr) + { + OBJGPU *pGpu = pUserCtx->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvBool bCompressible = NV_TRUE; + + // + // Check if the FB physical address lands in a segment that + // supports compression. + // On WDDM neither RM or KMD has the physical address + // information at compression allocation time. + // On non-WDDM platforms, RM allocates compression before the + // actual physical allocation. For non-contig allocations, the + // physical pages can be spread across multiple regions + // Therefore compression tags are always allocated and compression must + // be disabled on a per-PTE basis at map time. + // + if ((pMemoryManager->Ram.numFBRegions > 1) && + (gmmuFieldGetAperture(&pIter->pFmt->pPte->fldAperture, entry.v8) == + GMMU_APERTURE_VIDEO)) + { + NvU32 iRegion; + // Find the region in which the candidate block resides + for (iRegion = 0; iRegion < pMemoryManager->Ram.numFBRegions; iRegion++) + { + // Does the block resides within this region? If so, then we are done searching. + if ((pIter->physAddr >= pMemoryManager->Ram.fbRegion[iRegion].base) && + (pIter->physAddr <= pMemoryManager->Ram.fbRegion[iRegion].limit)) + { + // Check if the region supports compression + bCompressible = pMemoryManager->Ram.fbRegion[iRegion].bSupportCompressed; + break; + } + } + } + + // + // TODO: The flags that enable compression are confusing - + // complicated by memsysReleaseReacquireCompr_GF100 usage. + // Clean this up when removing old path and simplifying + // the primitive "map" interface. + // + if (pIter->bCompr && bCompressible) + { + // + // For VF, HW does 1 to 1 FB-comptag mapping. HW manages comptag + // allocation, hence RM can skip the comptagline assignment to PTE. + // Just updating the compressed kind is sufficient for VF. + // + if (!IS_VIRTUAL_WITH_SRIOV(pGpu) && pIter->pFmt->version <= GMMU_FMT_VERSION_2) + { + NvBool bIsWarApplied = NV_FALSE; + NvU32 savedKind = pIter->comprInfo.kind; + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + + if (pMemorySystemConfig->bUseRawModeComptaglineAllocation && + pMemorySystemConfig->bDisablePlcForCertainOffsetsBug3046774 && + !memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_DISALLOW_PLC, pIter->comprInfo.kind) && + !kmemsysIsPagePLCable_HAL(pGpu, pKernelMemorySystem, (pIter->surfaceOffset + pIter->currIdx * RM_PAGE_SIZE), pageSize)) + { + bIsWarApplied = NV_TRUE; + memmgrGetDisablePlcKind_HAL(pMemoryManager, &pIter->comprInfo.kind); + } + kgmmuFieldSetKindCompTags(GPU_GET_KERNEL_GMMU(pGpu), pIter->pFmt, pLevelFmt, &pIter->comprInfo, pIter->physAddr, + pIter->surfaceOffset + pIter->currIdx * RM_PAGE_SIZE, + i, entry.v8); + // + // restore the kind to PLC if changd, since kind is associated with entire surface, and the WAR applies to + // individual pages in the surface. + // + if (bIsWarApplied) + pIter->comprInfo.kind = savedKind; + } + else + { + nvFieldSet32(&pIter->pFmt->pPte->fldKind, pIter->comprInfo.kind, entry.v8); + } + } + else + { + nvFieldSet32(&pIter->pFmt->pPte->fldKind, pIter->kindNoCompr, entry.v8); + + if (pIter->pFmt->version <= GMMU_FMT_VERSION_2) + { + nvFieldSet32(&pIter->pFmt->pPte->fldCompTagLine, 0, entry.v8); + if (nvFieldIsValid32(&pIter->pFmt->pPte->fldCompTagSubIndex)) + { + nvFieldSet32(&pIter->pFmt->pPte->fldCompTagSubIndex, 0, entry.v8); + } + } + } + } + + // Fill the physical address field. + if (pIter->bUpdatePhysAddr && (pIter->pAddrField != NULL)) + { + // Update the pte with the physical address + gmmuFieldSetAddress(pIter->pAddrField, + kgmmuEncodePhysAddr(GPU_GET_KERNEL_GMMU(pUserCtx->pGpu), pIter->aperture, pIter->physAddr, + pIter->fabricAddr), + entry.v8); + } + + // Commit to memory. + portMemCopy(pMap + (i * pLevelFmt->entrySize) - bufferAdjust, pLevelFmt->entrySize, + entry.v8, pLevelFmt->entrySize); + + // + // pPageArray deals in 4K pages. + // So increment by the ratio of mapping page size to 4K + // + pIter->currIdx += (NvU32)(pageSize / RM_PAGE_SIZE); + } + + *pProgress = entryIndexHi - entryIndexLo + 1; +} + +static void +_gmmuWalkCBMapNextEntries_RmAperture +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_MAP_TARGET *pTarget, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + NvU32 *pProgress +) +{ + OBJGPU *pGpu = pUserCtx->pGpu; + MMU_MAP_ITERATOR *pIter = pTarget->pIter; + MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR*)pLevelMem; + + NV_PRINTF(LEVEL_INFO, "[GPU%u]: PA 0x%llX, Entries 0x%X-0x%X\n", + pUserCtx->pGpu->gpuInstance, + memdescGetPhysAddr(pMemDesc, AT_GPU, 0), entryIndexLo, + entryIndexHi); + + pIter->pMap = kbusMapRmAperture_HAL(pGpu, pMemDesc); + NV_ASSERT_OR_RETURN_VOID(NULL != pIter->pMap); + + _gmmuWalkCBMapNextEntries_Direct(pUserCtx, pTarget, pLevelMem, + entryIndexLo, entryIndexHi, pProgress); + + kbusUnmapRmAperture_HAL(pGpu, pMemDesc, &pIter->pMap, NV_TRUE); +} + +static NV_STATUS _dmaGetFabricAddress +( + OBJGPU *pGpu, + NvU32 aperture, + NvU32 kind, + NvU64 *fabricAddr +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + *fabricAddr = NVLINK_INVALID_FABRIC_ADDR; + + if (pKernelNvlink == NULL) + { + return NV_OK; + } + + if (aperture != NV_MMU_PTE_APERTURE_PEER_MEMORY) + { + return NV_OK; + } + + // + // Fabric address should be available for NVSwitch connected GPUs, + // otherwise it is a NOP. + // + *fabricAddr = knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink); + if (*fabricAddr == NVLINK_INVALID_FABRIC_ADDR) + { + return NV_OK; + } + + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind)) + { + NV_PRINTF(LEVEL_ERROR, + "Nvswitch systems don't support compression.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + return NV_OK; +} + +// VMM-TODO: PL(N) mmuPageLevelUpdate - but major splits +NV_STATUS +dmaUpdateVASpace_GF100 +( + OBJGPU *pGpu, + VirtMemAllocator *pDma, + OBJVASPACE *pVAS, + MEMORY_DESCRIPTOR *pMemDesc, + NvU8 *pTgtPteMem, // CPU pointer to PTE memory for Vista updates + NvU64 vAddr, + NvU64 vAddrLimit, + NvU32 flags, + DMA_PAGE_ARRAY *pPageArray, + NvU32 overmapPteMod, + COMPR_INFO *pComprInfo, + NvU64 surfaceOffset, + NvU32 valid, + NvU32 aperture, + NvU32 peer, + NvU64 fabricAddr, + NvU32 deferInvalidate, + NvBool bSparse +) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvBool readPte = NV_FALSE; + NV_STATUS status = NV_OK; + NvBool isVolatile = NV_TRUE; + NvU32 encrypted = 0; + NvU32 tlbLock; + NvU32 readOnly; + NvU32 priv; + NvU32 writeDisable; + NvU32 readDisable; + NvU32 pageSize = 0; + NvU32 vaSpaceBigPageSize = 0; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + NvU32 alignSize = pMemorySystemConfig->comprPageSize; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvBool bFillPteMem = !!(flags & DMA_UPDATE_VASPACE_FLAGS_FILL_PTE_MEM); + NvBool bUnmap = !bFillPteMem && + (flags & DMA_UPDATE_VASPACE_FLAGS_UPDATE_VALID) && + (SF_VAL(_MMU, _PTE_VALID, valid) == NV_MMU_PTE_VALID_FALSE); + NvBool bIsIndirectPeer; + VAS_PTE_UPDATE_TYPE update_type; + + priv = (flags & DMA_UPDATE_VASPACE_FLAGS_PRIV) ? NV_MMU_PTE_PRIVILEGE_TRUE : NV_MMU_PTE_PRIVILEGE_FALSE; + tlbLock = (flags & DMA_UPDATE_VASPACE_FLAGS_TLB_LOCK) ? NV_MMU_PTE_LOCK_TRUE : NV_MMU_PTE_LOCK_FALSE; + readOnly = (flags & DMA_UPDATE_VASPACE_FLAGS_READ_ONLY) ? NV_MMU_PTE_READ_ONLY_TRUE : NV_MMU_PTE_READ_ONLY_FALSE; + writeDisable = !!(flags & DMA_UPDATE_VASPACE_FLAGS_SHADER_READ_ONLY); + readDisable = !!(flags & DMA_UPDATE_VASPACE_FLAGS_SHADER_WRITE_ONLY); + bIsIndirectPeer = !!(flags & DMA_UPDATE_VASPACE_FLAGS_INDIRECT_PEER); + + // + // Determine whether we are invalidating or revoking privileges, so we know + // whether to flush page accesses or not. ReadDisable and writeDisable have + // been deprecated Pascal+, and we don't have the capability to guarantee + // coherency post TLB invalidate on pre-Pascal, so we ignore them here. + // + update_type = (bUnmap || (NV_MMU_PTE_LOCK_FALSE == tlbLock) + || (NV_MMU_PTE_READ_ONLY_TRUE == readOnly)) ? PTE_DOWNGRADE : PTE_UPGRADE; + + if (pMemDesc == NULL) + { + NV_ASSERT(pMemDesc); + return NV_ERR_INVALID_ARGUMENT; + } + + switch (aperture) + { + case NV_MMU_PTE_APERTURE_PEER_MEMORY: + isVolatile = (memdescGetGpuP2PCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) ? NV_TRUE : NV_FALSE; + break; + case NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY: + case NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY: + if (bIsIndirectPeer) + isVolatile = (memdescGetGpuP2PCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) ? NV_TRUE : NV_FALSE; + else + isVolatile = (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) ? NV_TRUE : NV_FALSE; + + break; + default: + case NV_MMU_PTE_APERTURE_VIDEO_MEMORY: + isVolatile = NV_FALSE; + break; + } + + encrypted = (flags & DMA_UPDATE_VASPACE_FLAGS_DISABLE_ENCRYPTION) ? 0 : + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED); + + vaSpaceBigPageSize = vaspaceGetBigPageSize(pVAS); + pageSize = memdescGetPageSize(pMemDesc, VAS_ADDRESS_TRANSLATION(pVAS)); + NV_ASSERT_OR_RETURN(pageSize, NV_ERR_INVALID_ARGUMENT); + + if (kgmmuIsPerVaspaceBigPageEn(pKernelGmmu) && + (pageSize >= RM_PAGE_SIZE_64K)) + { + NV_ASSERT(pageSize != RM_PAGE_SIZE_HUGE); + NV_ASSERT(vaSpaceBigPageSize); + + // + // This is a temp WAR till the memdesc->_pageSize is cleaned up + // If the memdesc page size is >= the smallest big page size then + // we will correct it to the Big page size of the VASpace + // This will also set it to the correct size for the memDesc + // + pageSize = vaSpaceBigPageSize; + } + + NV_ASSERT_OR_RETURN(pageSize, NV_ERR_INVALID_ARGUMENT); + + // Check this here so we don't have to in the loop(s) below as necessary. + if ((flags & DMA_UPDATE_VASPACE_FLAGS_UPDATE_PADDR) && (pPageArray == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Must get some attrs from existing PTE and Only if PTE is + // going to be invalidated, no need to read it + // + if (DMA_UPDATE_VASPACE_FLAGS_UPDATE_ALL != (flags & DMA_UPDATE_VASPACE_FLAGS_UPDATE_ALL)) + { + readPte = !((flags & DMA_UPDATE_VASPACE_FLAGS_UPDATE_VALID) && + (SF_VAL(_MMU, _PTE_VALID, valid) == NV_MMU_PTE_VALID_FALSE)); + } + + // + // Compressed surfaces must be aligned to the compression page size + // but don't check for LDDM which may pass in unaligned surfaces incrementally. + // Chips that support full comp tag lines will support the VA being aligned to + // the big page size. This is because the PA alignement chooses between even/odd pages + // and SW programs the PA alignment. + // + if (pDma->getProperty(pDma, PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE)) + { + alignSize = vaSpaceBigPageSize; + } + + // + // VMM-TODO: Merge into PL1 traveral. + // + // If the pageSize of the mapping != 4K then be sure that the 4k pages + // making up the big physical page are contiguous. This is currently + // necessary since pMemDesc->PteArray is always in terms of 4KB pages. + // Different large pages do not have to be contiguous with each other. + // This check isn't needed for contig allocations. + // + if (pPageArray && (pageSize != RM_PAGE_SIZE) && (pPageArray->count > 1) && + !(flags & DMA_UPDATE_VASPACE_FLAGS_SKIP_4K_PTE_CHECK)) + { + NvU32 i, j; + RmPhysAddr pageAddr, pagePrevAddr; + + for (i = 0; i < pPageArray->count; i += j) + { + for (j = i + 1; j < pPageArray->count; j++) + { + pagePrevAddr = dmaPageArrayGetPhysAddr(pPageArray, j - 1); + pageAddr = dmaPageArrayGetPhysAddr(pPageArray, j); + + if ((1 + (pagePrevAddr/(RM_PAGE_SIZE))) != + (pageAddr/(RM_PAGE_SIZE))) + { + NV_PRINTF(LEVEL_ERROR, + "MMU: given non-contig 4KB pages for %dkB mapping\n", + pageSize / 1024); + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + + // Are we at the pageSize boundary yet? + if ((pageAddr + RM_PAGE_SIZE) + % pageSize == 0) + { + j++; + break; + } + } + } + } + + // Zero peer on non-peer requests to simplify pte construction + if (aperture != NV_MMU_PTE_APERTURE_PEER_MEMORY) + { + peer = 0; + } + + MMU_MAP_TARGET mapTarget = {0}; + MMU_MAP_ITERATOR mapIter = {0}; + OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE); + const NvU64 vaLo = NV_ALIGN_DOWN64(vAddr, pageSize); + const NvU64 vaHi = NV_ALIGN_DOWN64(vAddrLimit + pageSize, pageSize) - 1; + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + const GMMU_FMT *pFmt = pGpuState->pFmt; + + // Enforce unicast. + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + if (bUnmap) + { + gvaspaceUnmap(pGVAS, pGpu, vaLo, vaHi); + } + else + { + NvU32 kind = pComprInfo->kind; + NvU32 kindNoCompression; + + // + // If the original kind is compressible we need to know what the non-compresible + // kind is so we can fall back to that if we run out of compression tags. + // + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind)) + { + kindNoCompression = memmgrGetUncompressedKind_HAL(pGpu, pMemoryManager, kind, NV_FALSE); + } + else + { + kindNoCompression = kind; + } + + if (!RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM && + memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, pComprInfo->kind) && + ((vAddr & (alignSize-1)) != 0) && + !(flags & DMA_UPDATE_VASPACE_FLAGS_UNALIGNED_COMP)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // MMU_MAP_CTX + mapTarget.pLevelFmt = mmuFmtFindLevelWithPageShift(pFmt->pRoot, + BIT_IDX_32(pageSize)); + mapTarget.pIter = &mapIter; + mapTarget.MapNextEntries = _gmmuWalkCBMapNextEntries_RmAperture; + + //MMU_MAP_ITER + mapIter.pFmt = pFmt; + mapIter.pPageArray = pPageArray; + mapIter.surfaceOffset = surfaceOffset; + mapIter.comprInfo = *pComprInfo; + mapIter.overMapModulus = overmapPteMod; + mapIter.bReadPtes = readPte; + mapIter.kindNoCompr = kindNoCompression; + mapIter.bCompr = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, pComprInfo->kind); + mapIter.bUpdatePhysAddr = !!(flags & DMA_UPDATE_VASPACE_FLAGS_UPDATE_PADDR); + mapIter.bUpdateCompr = !!(flags & DMA_UPDATE_VASPACE_FLAGS_UPDATE_COMPR); + mapIter.fabricAddr = fabricAddr; + + if ((pageSize == RM_PAGE_SIZE_512M) && kgmmuIsBug2720120WarEnabled(pKernelGmmu)) + { + NV_ASSERT_OK_OR_RETURN(_dmaApplyWarForBug2720120(pGVAS, pGpu, + vaLo, vaHi)); + } + + // Build PTE template + { + if (bSparse) + { + const GMMU_FMT_FAMILY *pFmtFamily = + kgmmuFmtGetFamily(pKernelGmmu, pFmt->version); + NV_ASSERT_OR_RETURN(NULL != pFmtFamily, NV_ERR_INVALID_DATA); + portMemCopy(mapIter.pteTemplate, + mapTarget.pLevelFmt->entrySize, pFmtFamily->sparsePte.v8, + mapTarget.pLevelFmt->entrySize); + } + else + { + nvFieldSetBool(&pFmt->pPte->fldValid, !!valid, + mapIter.pteTemplate); + nvFieldSet32(&pFmt->pPte->fldAperture._enum.desc, + aperture, mapIter.pteTemplate); + nvFieldSet32(&pFmt->pPte->fldPeerIndex, peer, + mapIter.pteTemplate); + + nvFieldSetBool(&pFmt->pPte->fldVolatile, !!isVolatile, + mapIter.pteTemplate); + nvFieldSet32(&pFmt->pPte->fldKind, kindNoCompression, + mapIter.pteTemplate); + nvFieldSetBool(&pFmt->pPte->fldReadOnly, !!readOnly, + mapIter.pteTemplate); + nvFieldSetBool(&pFmt->pPte->fldPrivilege, !!priv, + mapIter.pteTemplate); + nvFieldSetBool(&pFmt->pPte->fldEncrypted, !!encrypted, + mapIter.pteTemplate); + if (nvFieldIsValid32(&pFmt->pPte->fldReadDisable.desc)) + { + nvFieldSetBool(&pFmt->pPte->fldReadDisable, !!readDisable, + mapIter.pteTemplate); + } + if (nvFieldIsValid32(&pFmt->pPte->fldWriteDisable.desc)) + { + nvFieldSetBool(&pFmt->pPte->fldWriteDisable, !!writeDisable, + mapIter.pteTemplate); + } + if (nvFieldIsValid32(&pFmt->pPte->fldLocked.desc)) + { + nvFieldSetBool(&pFmt->pPte->fldLocked, !!tlbLock, + mapIter.pteTemplate); + } + if (nvFieldIsValid32(&pFmt->pPte->fldAtomicDisable.desc)) + { + // tlbLock is overridden by atomic_disable + nvFieldSetBool(&pFmt->pPte->fldAtomicDisable, !!tlbLock, + mapIter.pteTemplate); + } + } + } + + // Extract the physical address field based on aperture. + mapIter.aperture = + gmmuFieldGetAperture(&pFmt->pPte->fldAperture, + mapIter.pteTemplate); + if (mapIter.aperture != GMMU_APERTURE_INVALID) + { + mapIter.pAddrField = + gmmuFmtPtePhysAddrFld(pFmt->pPte, mapIter.aperture); + } + + // + // FillPteMem case must be handled specially as it violates + // internal VAS alignment and constistency checks. + // + if (bFillPteMem) + { + // If caller supplies buffer to write PTEs to, use that + if (NULL != pTgtPteMem) + { + MMU_WALK_USER_CTX userCtx = {0}; + NvU32 progress = 0; + NvU32 entryIndexLo = mmuFmtVirtAddrToEntryIndex(mapTarget.pLevelFmt, vaLo); + // Calculated to allow cross-page-table-boundary updates. + NvU32 entryIndexHi = (NvU32)(vaHi >> mapTarget.pLevelFmt->virtAddrBitLo) - + (NvU32)(vaLo >> mapTarget.pLevelFmt->virtAddrBitLo) + + entryIndexLo; + + // + // Use pTgtPteMem directly as mapping and pass NULL memdesc to + // indicate buffered mode. + // + userCtx.pGpu = pGpu; + mapIter.pMap = pTgtPteMem; + _gmmuWalkCBMapNextEntries_Direct(&userCtx, &mapTarget, NULL, + entryIndexLo, entryIndexHi, &progress); + NV_ASSERT(progress == entryIndexHi - entryIndexLo + 1); + } + // Otherwise use walker directly. + else + { + GVAS_BLOCK *pVASBlock = NULL; + EMEMBLOCK *pMemBlock = NULL; + MMU_WALK_USER_CTX userCtx = {0}; + + pMemBlock = pGVAS->pHeap->eheapGetBlock(pGVAS->pHeap, vaLo, 0); + NV_ASSERT_OR_RETURN(NULL != pMemBlock, NV_ERR_INVALID_ARGUMENT); + pVASBlock = pMemBlock->pData; + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, pVASBlock, &userCtx); + status = mmuWalkMap(userCtx.pGpuState->pWalk, vaLo, vaHi, &mapTarget); + NV_ASSERT(NV_OK == status); + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + } + } + else + { + VAS_MAP_FLAGS mapFlags = {0}; + mapFlags.bRemap = readPte || + (flags & DMA_UPDATE_VASPACE_FLAGS_ALLOW_REMAP); + status = gvaspaceMap(pGVAS, pGpu, vaLo, vaHi, &mapTarget, mapFlags); + NV_ASSERT(NV_OK == status); + } + } + + // Invalidate VAS TLB entries. + if ((NULL == pTgtPteMem) && DMA_TLB_INVALIDATE == deferInvalidate) + { + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY | + BUS_FLUSH_SYSTEM_MEMORY | + BUS_FLUSH_USE_PCIE_READ); + gvaspaceInvalidateTlb(pGVAS, pGpu, update_type); + } + +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) + if (DBG_RMMSG_CHECK(LEVEL_INFO)) + { + MMU_TRACE_ARG arg = {0}; + MMU_TRACE_PARAM params = {0}; + params.mode = MMU_TRACE_MODE_TRACE_VERBOSE; + params.va = vAddr; + params.vaLimit = vAddrLimit; + params.pArg = &arg; + + mmuTrace(pGpu, pVAS, ¶ms); + } +#endif + return status; +} + +NV_STATUS +dmaInit_GM107(OBJGPU *pGpu, VirtMemAllocator *pDma) +{ + DMAHALINFO_FERMI *pDHPI = NULL; + NvU32 data; + + // Allocate and link in an 'info block' for this engine. + if (NULL == (pDHPI = (PDMAHALINFO_FERMI)addInfoPtr(&pDma->infoList, HAL_IMPL_GF100, + sizeof(DMAHALINFO_FERMI)))) + { + return NV_ERR_NO_MEMORY; + } + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + pGpu->optimizeUseCaseOverride = + NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_DEFAULT; + } + + pDHPI->vasReverse = !(!pDHPI->vasReverse); + + pDHPI->compTagLineMultiplier = 1; + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_RESTRICT_VA_RANGE, &data) + == NV_OK) + { + if (NV_REG_STR_RM_RESTRICT_VA_RANGE_ON == data) + { + pDma->setProperty(pDma, PDB_PROP_DMA_RESTRICT_VA_RANGE, NV_TRUE); + } + } + + return NV_OK; +} + +void +dmaDestruct_GM107(VirtMemAllocator *pDma) +{ + deleteInfoPtr(&pDma->infoList, HAL_IMPL_GF100); +} + +// Called when IsSLI = NV_TRUE and all linked GPUs are loaded +NV_STATUS +dmaStatePostLoad_GM107(OBJGPU *pGpu, VirtMemAllocator *pDma, NvU32 flags) +{ +#ifdef DEBUG + DMAHALINFO_FERMI *pDHPI = DMA_GET_FERMI_INFOBLK(pDma); + DMAHALINFO_FERMI *pDHPIPeer; + VirtMemAllocator *pPeerDma; + + pPeerDma = GPU_GET_DMA(pGpu); + pDHPIPeer = DMA_GET_FERMI_INFOBLK(pPeerDma); + + // + // Require these attributes to be symmetric for now. If we need to support + // heterogeneous SLI across GPUs that don't match here we'll need to implement + // dma[Get|Set]TunableState. + // + NV_ASSERT(pDHPIPeer->vasReverse == pDHPI->vasReverse); + NV_ASSERT(pDHPIPeer->compTagLineMultiplier == pDHPI->compTagLineMultiplier); +#endif + return NV_OK; +} + +// VMM-TODO: Remove or merge with dmaAllocMapping_GF100. +NV_STATUS +dmaMapBuffer_GM107 +( + OBJGPU *pGpu, + VirtMemAllocator *pDma, + OBJVASPACE *pVAS, + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 *pVaddr, + NvU32 flagsForAlloc, + NvU32 flagsForUpdate +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvU32 kind; + COMPR_INFO comprInfo; + NvU32 pteCount, aperture; + NvU64 mapLength; + NvU64 vaddr; + NV_STATUS status = NV_OK; + NvU64 rangeLo = 0; + NvU64 rangeHi = 0; + NvU64 compAlign; + NvU64 vaSize; + NvU64 vaAlign; + OBJEHEAP *pVASpaceHeap = vaspaceGetHeap(pVAS); + NvU64 pageSize = 0; + NvU64 pageSizeSubDev = 0; + NvU64 pageOffs = 0; + NvU64 pageOffsSubDev = 0; + NvU32 flags; + + DMA_PAGE_ARRAY pageArray; + MEMORY_DESCRIPTOR *pSubDevMemDesc = NULL; + VAS_ALLOC_FLAGS allocFlags = {0}; + + NV_ASSERT(pVaddr); + NV_ASSERT(pVAS); + + // + // Sets the page size for all subdevice memdescs when present. Since we don't support + // different page size per subdevice, it asserts when the page size differs. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pSubDevMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + if (memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, pSubDevMemDesc, VAS_ADDRESS_TRANSLATION(pVAS), + RM_ATTR_PAGE_SIZE_DEFAULT) != NV_OK) + { + SLI_LOOP_RETURN(NV_ERR_INVALID_ARGUMENT); + } + pageSizeSubDev = memdescGetPageSize(pSubDevMemDesc, VAS_ADDRESS_TRANSLATION(pVAS)); + pageOffsSubDev = memdescGetPhysAddr(pSubDevMemDesc, VAS_ADDRESS_TRANSLATION(pVAS), 0) & + (pageSizeSubDev - 1); + if (0 == pageSize) + { + pageSize = pageSizeSubDev; + pageOffs = pageOffsSubDev; + } + else + { + NV_ASSERT(pageSize == pageSizeSubDev); + NV_ASSERT(pageOffs == pageOffsSubDev); + } + SLI_LOOP_END + + status = memmgrGetKindComprFromMemDesc(pMemoryManager, pMemDesc, 0, &kind, &comprInfo); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "memmgrGetKindComprFromMemDesc failed\n"); + return NV_ERR_GENERIC; + } + + if (kgmmuIsPerVaspaceBigPageEn(pKernelGmmu) && + (pageSize >= RM_PAGE_SIZE_64K)) + { + NV_ASSERT(pageSize != RM_PAGE_SIZE_HUGE); + pageSize = vaspaceGetBigPageSize(pVAS); + } + + mapLength = RM_ALIGN_UP(pageOffs + memdescGetSize(pMemDesc), pageSize); + + vaddr = 0; + compAlign = NVBIT64(comprInfo.compPageShift); + vaAlign = NV_MAX(pageSize, compAlign); + vaSize = RM_ALIGN_UP(mapLength, vaAlign); + + if (flagsForAlloc & DMA_ALLOC_VASPACE_SIZE_ALIGNED) + { + NvU64 temp = vaSize; + ROUNDUP_POW2_U64(temp); + vaAlign = NV_MAX(vaAlign, temp); + } + + rangeLo = vaspaceGetVaStart(pVAS); + rangeHi = vaspaceGetVaLimit(pVAS); + + // If trying to conserve 32bit address space, map RM buffers at 4GB+ + if (pDma->getProperty(pDma, PDB_PROP_DMA_ENFORCE_32BIT_POINTER) && + (pVASpaceHeap->free > NVBIT64(32))) + { + rangeLo = NV_MAX(NVBIT64(32), rangeLo); + } + + if (flagsForAlloc & DMA_VA_LIMIT_57B) + { + rangeHi = NV_MIN(rangeHi, NVBIT64(57) - 1); + } + else if (flagsForAlloc & DMA_VA_LIMIT_49B) + { + rangeHi = NV_MIN(rangeHi, NVBIT64(49) - 1); + } + else if (pDma->getProperty(pDma, PDB_PROP_DMA_RESTRICT_VA_RANGE)) + { + // See comments in vaspaceFillAllocParams_IMPL. + rangeHi = NV_MIN(rangeHi, NVBIT64(40) - 1); + } + + status = vaspaceAlloc(pVAS, vaSize, vaAlign, rangeLo, rangeHi, + pageSize, allocFlags, &vaddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "vaspaceAlloc failed\n"); + return status; + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + + pSubDevMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + + pteCount = memdescGetContiguity(pSubDevMemDesc, VAS_ADDRESS_TRANSLATION(pVAS)) ? 1 : + (NvU32)(mapLength >> RM_PAGE_SHIFT); + + dmaPageArrayInit(&pageArray, + memdescGetPteArray(pSubDevMemDesc, VAS_ADDRESS_TRANSLATION(pVAS)), + pteCount); + flags = flagsForUpdate; + flags |= memdescGetFlag(pSubDevMemDesc, MEMDESC_FLAGS_GPU_PRIVILEGED) ? + DMA_UPDATE_VASPACE_FLAGS_PRIV : 0; + + if (memdescGetAddressSpace(pSubDevMemDesc) == ADDR_FBMEM) + { + aperture = NV_MMU_PTE_APERTURE_VIDEO_MEMORY; + } + else if (memdescGetCpuCacheAttrib(pSubDevMemDesc) == NV_MEMORY_CACHED) + { + aperture = NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY; + } + else + { + aperture = NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY; + } + + status = dmaUpdateVASpace_HAL(pGpu, pDma, pVAS, + pSubDevMemDesc, + NULL, + vaddr, vaddr + mapLength - 1, + flags | DMA_UPDATE_VASPACE_FLAGS_UPDATE_ALL, + &pageArray, 0, &comprInfo, + 0, + NV_MMU_PTE_VALID_TRUE, + aperture, 0, + NVLINK_INVALID_FABRIC_ADDR, + NV_FALSE, NV_FALSE); + + if (status != NV_OK) + { + SLI_LOOP_BREAK; + } + + SLI_LOOP_END + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "dmaUpdateVASpace_GF100 failed\n"); + vaspaceFree(pVAS, vaddr); + return NV_ERR_GENERIC; + } + + if (pVaddr) + { + *pVaddr = vaddr; + } + + return NV_OK; +} + +void dmaUnmapBuffer_GM107(OBJGPU *pGpu, VirtMemAllocator *pDma, OBJVASPACE *pVAS, NvU64 vaddr) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pVAS); + + vaspaceFree(pVAS, vaddr); +} + +#ifdef DEBUG +/* + * These routines are not used by the RM proper. They are meant to be used by by + * external debuggers. Because of this we do not have have a global prototype. + */ +NvU32 _mmuReadFb32(OBJGPU *pGpu, RmPhysAddr addr, NvU32 aperture); +void _mmuWriteFb32(OBJGPU *pGpu, RmPhysAddr addr, NvU32 data, NvU32 aperture); + +NvU32 _mmuReadFb32(OBJGPU *pGpu, RmPhysAddr addr, NvU32 aperture) +{ + MEMORY_DESCRIPTOR memDesc = {0}; + NvU8 *pOffset = NULL; + NvU32 data = 0; + + if (aperture == 0) + aperture = ADDR_FBMEM; + memdescCreateExisting(&memDesc, pGpu, 4, aperture, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE); + memdescDescribe(&memDesc, aperture, addr, 4); // Note that this will probably fail with MODS/sysmem + pOffset = kbusMapRmAperture_HAL(pGpu, &memDesc); + if (pOffset == NULL) + { + NV_ASSERT(pOffset != NULL); + goto _mmuReadFb32_failed; + } + + data = MEM_RD32(pOffset); + + kbusUnmapRmAperture_HAL(pGpu, &memDesc, &pOffset, NV_TRUE); +_mmuReadFb32_failed: + memdescDestroy(&memDesc); + + return data; +} + +void _mmuWriteFb32(OBJGPU *pGpu, RmPhysAddr addr, NvU32 data, NvU32 aperture) +{ + MEMORY_DESCRIPTOR memDesc = {0}; + NvU8 *pOffset = NULL; + + if (aperture == 0) + aperture = ADDR_FBMEM; + memdescCreateExisting(&memDesc, pGpu, 4, aperture, NV_MEMORY_UNCACHED, MEMDESC_FLAGS_NONE); + memdescDescribe(&memDesc, aperture, addr, 4); // Note that this will probably fail with MODS/sysmem + pOffset = kbusMapRmAperture_HAL(pGpu, &memDesc); + if (pOffset == NULL) + { + NV_ASSERT(pOffset != NULL); + goto _mmuWriteFb32_failed; + } + + MEM_WR32(pOffset, data); + + kbusUnmapRmAperture_HAL(pGpu, &memDesc, &pOffset, NV_TRUE); +_mmuWriteFb32_failed: + memdescDestroy(&memDesc); +} + +#endif // DEBUG + +//-------------------------------------------------------------------------------- +// dmaXlateVAtoPAforChannel_GM107 - this function translates virtual address +// to physical address through page table walk for a given channel id. +// +// Returns NV_OK if translation was successful, NV_ERR_GENERIC otherwise. +// +// Output parameters: +// pAddr - physical address +// memType - memory type where this physical address belongs to +// (ADDR_SYSMEM or ADDR_FBMEM) +// +//-------------------------------------------------------------------------------- +NV_STATUS +dmaXlateVAtoPAforChannel_GM107 +( + OBJGPU *pGpu, + VirtMemAllocator *pDma, + KernelChannel *pKernelChannel, + NvU64 vAddr, + NvU64 *pAddr, + NvU32 *memType +) +{ + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pAddr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(memType != NULL, NV_ERR_INVALID_ARGUMENT); + + MMU_TRACE_ARG arg = {0}; + MMU_TRACE_PARAM params = {0}; + NV_STATUS status; + + params.mode = MMU_TRACE_MODE_TRANSLATE; + params.va = vAddr; + params.vaLimit = vAddr; + params.pArg = &arg; + + status = mmuTrace(pGpu, pKernelChannel->pVAS, ¶ms); + if (status == NV_OK) + { + *memType = arg.aperture; + *pAddr = arg.pa; + } + + return status; +} + +static NV_STATUS +_dmaApplyWarForBug2720120 +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + const GMMU_FMT *pFmt = pGpuState->pFmt; + const GMMU_FMT_FAMILY *pFmtFamily = kgmmuFmtGetFamily(pKernelGmmu, pFmt->version); + GVAS_BLOCK *pVASBlock = NULL; + EMEMBLOCK *pMemBlock = NULL; + MMU_WALK_USER_CTX userCtx = {0}; + MMU_MAP_TARGET mapTarget = {0}; + MMU_MAP_ITERATOR mapIter = {0}; + + // MMU_MAP_CTX + mapTarget.pLevelFmt = mmuFmtFindLevelWithPageShift(pFmt->pRoot, 29); + mapTarget.pIter = &mapIter; + mapTarget.MapNextEntries = _gmmuWalkCBMapNextEntries_RmAperture; + + //MMU_MAP_ITER + mapIter.pFmt = pFmt; + mapIter.bApplyWarForBug2720120 = NV_TRUE; + + // Copy the template + portMemCopy(mapIter.pteTemplate, + mapTarget.pLevelFmt->entrySize, pFmtFamily->bug2720120WarPde1.v8, + mapTarget.pLevelFmt->entrySize); + + pMemBlock = pGVAS->pHeap->eheapGetBlock(pGVAS->pHeap, vaLo, 0); + NV_ASSERT_OR_RETURN(pMemBlock != NULL, NV_ERR_INVALID_ARGUMENT); + pVASBlock = pMemBlock->pData; + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, pVASBlock, &userCtx); + NV_ASSERT_OK_OR_RETURN(mmuWalkMap(userCtx.pGpuState->pWalk, + vaLo, vaHi, &mapTarget)); + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + + // Flush PTE writes to vidmem and issue TLB invalidate + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY | + BUS_FLUSH_SYSTEM_MEMORY | + BUS_FLUSH_USE_PCIE_READ); + gvaspaceInvalidateTlb(pGVAS, pGpu, PTE_UPGRADE); + + return NV_OK; +} + +NV_STATUS +dmaInitGart_GM107(OBJGPU *pGpu, VirtMemAllocator *pDma) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + pDma->gpuGartCaps = DMA_GPU_GART_CAPS_NOSNOOP; + + if ((pKernelBif != NULL) && FLD_TEST_REF(BIF_DMA_CAPS_SNOOP, _CTXDMA, + kbifGetDmaCaps(pGpu, pKernelBif))) + { + pDma->gpuGartCaps |= DMA_GPU_GART_CAPS_SNOOP; + } + + return NV_OK; +} + +/*! + * @brief This function returns the size of a large page + * + * @param[in] pGpu OBJGPU pointer + * + * @returns The size of GPU PTE + */ +NvU32 +dmaGetPTESize_GM107(OBJGPU *pGpu, VirtMemAllocator *pDma) +{ + return NV_MMU_PTE__SIZE; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/pascal/mem_mgr_gp100.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/pascal/mem_mgr_gp100.c new file mode 100644 index 000000000..a78d83324 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/pascal/mem_mgr_gp100.c @@ -0,0 +1,291 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "published/pascal/gp100/dev_mmu.h" +#include "class/cl906f.h" // GF100_CHANNEL_GPFIFO +#include "class/clc0b5.h" // PASCAL_DMA_COPY_A + +/*! + * @brief Determine the kind of compressed PTE for a given allocation for color. + * + * @param[in] pFbAllocInfo FB_ALLOC_INFO pointer + * + * @returns PTE Kind. + */ +NvU32 +memmgrChooseKindCompressC_GP100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat +) +{ + extern NvU32 memmgrChooseKindCompressC_GM107(OBJGPU *pGpu, MemoryManager *pMemoryManager, FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat); + NvU32 kind = NV_MMU_PTE_KIND_PITCH; + NvU32 attrdepth = DRF_VAL(OS32, _ATTR, _DEPTH, pFbAllocPageFormat->attr); + NvU32 aasamples = DRF_VAL(OS32, _ATTR, _AA_SAMPLES, pFbAllocPageFormat->attr); + + if ((attrdepth == NVOS32_ATTR_DEPTH_32) && + ((aasamples == NVOS32_ATTR_AA_SAMPLES_4) || + (aasamples == NVOS32_ATTR_AA_SAMPLES_4_ROTATED) || + (aasamples == NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_8) || + (aasamples == NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16))) + { + kind = NV_MMU_PTE_KIND_C32_MS4_4CBRA; + } + else if ((attrdepth == NVOS32_ATTR_DEPTH_64) && + ((aasamples == NVOS32_ATTR_AA_SAMPLES_4) || + (aasamples == NVOS32_ATTR_AA_SAMPLES_4_ROTATED) || + (aasamples == NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_8) || + (aasamples == NVOS32_ATTR_AA_SAMPLES_4_VIRTUAL_16))) + { + kind = NV_MMU_PTE_KIND_C64_MS4_4CBRA; + } + else + { + kind = memmgrChooseKindCompressC_GM107(pGpu, pMemoryManager, pFbAllocPageFormat); + } + + return kind; + +} + +void +memmgrHandleSizeOverrides_GP100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + // If the fbOverrideSizeMb is set, insert a reserved region to "remove" the memory + if (pMemoryManager->Ram.fbTotalMemSizeMb > pMemoryManager->Ram.fbOverrideSizeMb) + { + FB_REGION_DESCRIPTOR newRegion = {0}; + NvU32 newRegionIndex; + NvU64 memDiff = (pMemoryManager->Ram.fbTotalMemSizeMb - pMemoryManager->Ram.fbOverrideSizeMb) << 20; + // + // overrideheapmax till scrub end is marked as reserved and unusable + // + NvU64 regionLimit = pMemoryManager->Ram.fbRegion[0].limit; + NvU64 regionBase; + + // Ensure that regionLimit is 64KB aligned - necessary for PMA + regionLimit = NV_ALIGN_UP(regionLimit, 0x10000) - 1; + + // + // If there is an overridden heap max already, then reserve everything + // above that. Otherwise, just go with where it would already land + // + regionBase = NV_MIN(pMemoryManager->overrideHeapMax, regionLimit - memDiff) + 1; + + newRegion.base = regionBase; + newRegion.limit = regionLimit; + newRegion.rsvdSize = 0; + newRegion.bRsvdRegion = NV_TRUE; + newRegion.performance = 0; + newRegion.bSupportCompressed = NV_FALSE; + newRegion.bSupportISO = NV_FALSE; + newRegion.bProtected = NV_FALSE; + newRegion.bInternalHeap = NV_FALSE; + + newRegionIndex = memmgrInsertFbRegion(pGpu, pMemoryManager, &newRegion); + + pMemoryManager->overrideHeapMax = pMemoryManager->Ram.fbRegion[newRegionIndex].base - 1; + } +} + +NV_STATUS +memmgrFinishHandleSizeOverrides_GP100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NV_STATUS rmStatus = NV_OK; + + if (pMemoryManager->overrideInitHeapMin > 0) + { + // + // We want all the memory above the overrideHeapMax to be inaccessible, + // so make everything above the MAX now reserved + // + NvU32 i; + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + if (pMemoryManager->Ram.fbRegion[i].limit > pMemoryManager->overrideHeapMax) + { + if (pMemoryManager->Ram.fbRegion[i].base >= pMemoryManager->overrideHeapMax + 1) + { + // If the region is completely above the max, just mark it internal + pMemoryManager->Ram.fbRegion[i].bRsvdRegion = NV_TRUE; + } + else if (!pMemoryManager->Ram.fbRegion[i].bRsvdRegion) + { + // + // Otherwise, if the region is straddling and not already reserved, + // split it into one reserved and one non-reserved region + // + FB_REGION_DESCRIPTOR newRegion = {0}; + newRegion.base = pMemoryManager->overrideHeapMax + 1; + newRegion.limit = pMemoryManager->Ram.fbRegion[i].limit; + newRegion.rsvdSize = 0; + newRegion.bRsvdRegion = NV_TRUE; + newRegion.performance = 0; + newRegion.bSupportCompressed = NV_FALSE; + newRegion.bSupportISO = NV_FALSE; + newRegion.bProtected = NV_FALSE; + newRegion.bInternalHeap = NV_FALSE; + i = memmgrInsertFbRegion(pGpu, pMemoryManager, &newRegion); + } + } + } + + // + // Scrubbing should be finished before the next allocation, so this can + // safely be reset. + // + pMemoryManager->overrideInitHeapMin = 0; + } + + return rmStatus; +} + +/*! + * Inserts semaphore methods into the push buffer for one block" + * + * @param[in] subCh Subchannel Id + * @param[in] data Semaphore address. + * @param[in] payload size of copies. + * @param[in/out] **pPtr Pointer to location in pushbuffer. + */ +void +memmgrChannelPushSemaphoreMethodsBlock_GP100 +( + MemoryManager *pMemoryManager, + NvU32 subCh, + NvU64 data, + NvU32 payload, + NvU32 **pPtr +) +{ + NvU32 *ptr = *pPtr; + PUSH_PAIR(subCh, NVC0B5_SET_SEMAPHORE_A, DRF_NUM(C0B5, _SET_SEMAPHORE_A, + _UPPER, NvU64_HI32(data))); + PUSH_PAIR(subCh, NVC0B5_SET_SEMAPHORE_B, DRF_NUM(C0B5, _SET_SEMAPHORE_B, + _LOWER, NvU64_LO32(data))); + PUSH_PAIR(subCh, NVC0B5_SET_SEMAPHORE_PAYLOAD, payload); + *pPtr = ptr; +} + +/*! + * @brief Inserts address methods into the push buffer for one block + * + * @param[in] bSrc If true the address passed is for source + * @param[in] subCh Subchannel Id + * @param[in] addr Physical address of source/destination + * @param[in/out] **pPtr Pointer to location in pushbuffer. + */ +void +memmgrChannelPushAddressMethodsBlock_GP100 +( + MemoryManager *pMemoryManager, + NvBool bSrc, + NvU32 subCh, + RmPhysAddr addr, + NvU32 **pPtr +) +{ + NvU32 *ptr = *pPtr; + if (bSrc == NV_TRUE) + { + PUSH_PAIR(subCh, NVC0B5_OFFSET_IN_UPPER, DRF_NUM(C0B5, + _OFFSET_IN_UPPER, _UPPER, NvU64_HI32(addr))); + PUSH_PAIR(subCh, NVC0B5_OFFSET_IN_LOWER, DRF_NUM(C0B5, + _OFFSET_IN_LOWER, _VALUE, NvU64_LO32(addr))); + } + else + { + PUSH_PAIR(subCh, NVC0B5_OFFSET_OUT_UPPER, DRF_NUM(C0B5, + _OFFSET_OUT_UPPER, _UPPER, NvU64_HI32(addr))); + PUSH_PAIR(subCh, NVC0B5_OFFSET_OUT_LOWER, DRF_NUM(C0B5, + _OFFSET_OUT_LOWER, _VALUE, NvU64_LO32(addr))); + } + *pPtr = ptr; +} + +/*! + * Returns the max context size + * + * @returns NvU64 + */ +NvU64 +memmgrGetMaxContextSize_GP100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + extern NvU64 memmgrGetMaxContextSize_GM200(OBJGPU *pGpu, MemoryManager *pMemoryManager); + + NvU64 size = memmgrGetMaxContextSize_GM200(pGpu, pMemoryManager); + + // + // This function's original purpose was to estimate how much heap memory RM + // needs to keep in reserve from Windows LDDM driver to pass WHQL MaxContexts + // test. This estimation is done after heap init before KMD allocates a + // kernel-managed chunk. + // UVM & PMA similarly require RM to estimate how much heap memory RM needs + // to reserve for page tables, contexts, etc. This estimation is used during + // heap init to divide the FB into internal heap and external PMA managed + // spaces. + // Update for Pascal+ chips: on WDDMv2 KMD manages the reserve by locking down + // lowest level PDEs at RM device creation time (=process creation) via + // NV90F1_CTRL_CMD_VASPACE_RESERVE_ENTRIES rmControl call. Thus RM has to allocate + // the low level PTs for the entire reserve which is 4Gb (range 4Gb-8Gb). + // When PD0 is locked down and RM PD1 entries are valid, KMD can simply copy them + // at the setRootPageTable ddi call and don't restore at the unsetRootPT time. + // Because of the above reservation RM has to create quite a few 4k page tables and + // this results in extra ~28k consumption per default DX device (with default 2 contexts). + // On Kepler and Maxwell, the up-to-date wddm2 driver supports up to ~400 processes. + // On Pascal, with the same amount of reserve, we can only have ~200 processes. + // Hence we need to increase the RM physical reserve size for MMUv2 enabled chips + // to have supported process count on parity with previous chips. + // If any changes to RM reserve are introduced, for testing it with multi-process scenarios, + // a new kmdtest (CreateNProcesses) should be used. + + + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM) + { + // Only needs increase in single GPU case as 400 process requirement is satisfied on SLI with the additional SLI reserve + if (!IsSLIEnabled(pGpu) && pGpu->getProperty(pGpu, PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL)) + { + // KMD in WDDM mode + } + } + + return size; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/pascal/mem_mgr_scrub_gp100.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/pascal/mem_mgr_scrub_gp100.c new file mode 100644 index 000000000..557481147 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/pascal/mem_mgr_scrub_gp100.c @@ -0,0 +1,184 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_scrub.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/fifo/kernel_fifo.h" + +static NV_STATUS _memmgrPostSchedulingEnableHandler_GP100(OBJGPU *, void *); +static NV_STATUS _memmgrPreSchedulingDisableHandler_GP100(OBJGPU *, void *); + +/*! + * Scrub initialization routine + * + * @returns NV_OK on success, NV_ERR_GENERIC on failure + */ +NV_STATUS +memmgrScrubInit_GP100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + if (memmgrIsScrubOnFreeEnabled(pMemoryManager)) + { + NV_ASSERT_OK_OR_RETURN(kfifoAddSchedulingHandler(pGpu, + GPU_GET_KERNEL_FIFO(pGpu), + _memmgrPostSchedulingEnableHandler_GP100, NULL, + _memmgrPreSchedulingDisableHandler_GP100, NULL)); + } + + return NV_OK; +} + +/*! + * Performs initialization that is dependant on work done in gpuStateLoad() such + * as channel initialization. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pUnusedData Callback data which is unused + * + * @returns NV_OK on success, NV_ERR_GENERIC on failure + */ +static NV_STATUS +_memmgrPostSchedulingEnableHandler_GP100 +( + OBJGPU *pGpu, + void *pUnusedData +) +{ + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvBool bIsMIGEnabled = IS_MIG_ENABLED(pGpu); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bIsVgpuLegacyPolicy = (pKernelMIGManager != NULL) && kmigmgrUseLegacyVgpuPolicy(pGpu, pKernelMIGManager); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + // + // Disabling scrub on free for SLI, waiting on the bug:1915380 + // Bug: 2997744, skipping the top level scrubber since partitions are not created. + // + if (!IsSLIEnabled(pGpu) && + memmgrIsScrubOnFreeEnabled(pMemoryManager) && + memmgrIsPmaInitialized(pMemoryManager) && + !(bIsMIGEnabled && IS_VIRTUAL(pGpu) && !bIsVgpuLegacyPolicy)) + { + NV_ASSERT_OK_OR_RETURN(scrubberConstruct(pGpu, pHeap)); + } + + return NV_OK; +} + +NV_STATUS +memmgrScrubHandlePostSchedulingEnable_GP100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return _memmgrPostSchedulingEnableHandler_GP100(pGpu, NULL); +} + +/*! + * Performs cleanup on resources that need to be freed before StateUnload routes + * are called + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pUnusedData Unused callback data + * + * @returns NV_OK + */ +static NV_STATUS +_memmgrPreSchedulingDisableHandler_GP100 +( + OBJGPU *pGpu, + void *pUnusedData +) +{ + Heap *pHeap = GPU_GET_HEAP(pGpu); + OBJMEMSCRUB *pMemscrub = NULL; + NvBool bIsMIGEnabled = IS_MIG_ENABLED(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvBool bIsMIGInUse = IS_MIG_IN_USE(pGpu); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bIsVgpuLegacyPolicy = (pKernelMIGManager != NULL) && kmigmgrUseLegacyVgpuPolicy(pGpu, pKernelMIGManager); + + if (!pHeap) + return NV_ERR_GENERIC; + + // + // Top level scrubber was allocated with MIG disabled, it must be destroyed + // with MIG disabled as well + // + if (bIsMIGInUse && !(IS_VIRTUAL(pGpu) && bIsVgpuLegacyPolicy)) + return NV_WARN_MORE_PROCESSING_REQUIRED; + + pMemscrub = pHeap->pmaObject.pScrubObj; + + // Bug: 2997744, skipping the top level scrubber since GPU instances are not created. + if (!IsSLIEnabled(pGpu) && + memmgrIsScrubOnFreeEnabled(pMemoryManager) && + memmgrIsPmaInitialized(pMemoryManager) && + !(bIsMIGEnabled && IS_VIRTUAL(pGpu) && !bIsVgpuLegacyPolicy)) + { + scrubberDestruct(pGpu, pHeap, pMemscrub); + pHeap->pmaObject.pScrubObj = NULL; + } + + return NV_OK; +} + +NV_STATUS +memmgrScrubHandlePreSchedulingDisable_GP100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return _memmgrPreSchedulingDisableHandler_GP100(pGpu, NULL); +} + +/*! + * FB scrub cleanup routine. Deallocates any dynamic memory used. + * + * @returns NV_OK on success, NV_ERR_GENERIC on failure + */ +void +memmgrScrubDestroy_GP100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + if (memmgrIsScrubOnFreeEnabled(pMemoryManager)) + { + kfifoRemoveSchedulingHandler(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + _memmgrPostSchedulingEnableHandler_GP100, NULL, + _memmgrPreSchedulingDisableHandler_GP100, NULL); + } +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102.c new file mode 100644 index 000000000..6fb047f66 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102.c @@ -0,0 +1,587 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/mem_desc.h" + +#include "virtualization/hypervisor/hypervisor.h" +#include "vgpu/vgpu_events.h" + +#include "published/turing/tu102/dev_mmu.h" +#include "published/turing/tu102/kind_macros.h" +#include "published/turing/tu102/dev_fb_addendum.h" +#include "nvRmReg.h" + +/*! + * @brief Determine the kind of uncompressed PTE for a given allocation. + * + * @param[in] pFbAllocPageFormat FB_ALLOC_PAGE_FORMAT pointer + * + * @returns PTE kind. + */ +NvU32 +memmgrChooseKindZ_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat +) +{ + NvU32 kind = NV_MMU_PTE_KIND_INVALID; + NvU32 zformat = DRF_VAL(OS32, _ATTR, _Z_TYPE, pFbAllocPageFormat->attr); + NvU32 zspacking = DRF_VAL(OS32, _ATTR, _ZS_PACKING, pFbAllocPageFormat->attr); + NvU32 depth = DRF_VAL(OS32, _ATTR, _DEPTH, pFbAllocPageFormat->attr); + + if (zspacking == NVOS32_ATTR_ZS_PACKING_S8 && depth == NVOS32_ATTR_DEPTH_8) + { + kind = NV_MMU_PTE_KIND_S8; + } + else + { + if (zformat == NVOS32_ATTR_Z_TYPE_FIXED) + { + switch (zspacking) + { + case NVOS32_ATTR_ZS_PACKING_Z16: + { + kind = NV_MMU_PTE_KIND_Z16; + break; + } + case NVOS32_ATTR_ZS_PACKING_S8Z24: + case NVOS32_ATTR_ZS_PACKING_X8Z24: + { + kind = NV_MMU_PTE_KIND_S8Z24; + break; + } + case NVOS32_ATTR_ZS_PACKING_Z24S8: + case NVOS32_ATTR_ZS_PACKING_Z24X8: + { + kind = NV_MMU_PTE_KIND_Z24S8; + break; + } + } + } + else if (zformat == NVOS32_ATTR_Z_TYPE_FLOAT) + { + switch (zspacking) + { + case NVOS32_ATTR_ZS_PACKING_Z32: + { + kind = NV_MMU_PTE_KIND_GENERIC_MEMORY; + break; + } + case NVOS32_ATTR_ZS_PACKING_Z32_X24S8: + { + kind = NV_MMU_PTE_KIND_ZF32_X24S8; + break; + } + } + } + } + + return kind; + +} + +/*! + * @brief Determine the kind of compressed PTE with PLC disabled for a given allocation. + * + * @param[in] pFbAllocPageFormat FB_ALLOC_PAGE_FORMAT pointer + * + * @returns PTE kind. + */ +NvU32 +memmgrChooseKindCompressZ_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat +) +{ + NvU32 kind = NV_MMU_PTE_KIND_INVALID; + NvU32 zformat = DRF_VAL(OS32, _ATTR, _Z_TYPE, pFbAllocPageFormat->attr); + NvU32 zspacking = DRF_VAL(OS32, _ATTR, _ZS_PACKING, pFbAllocPageFormat->attr); + NvU32 depth = DRF_VAL(OS32, _ATTR, _DEPTH, pFbAllocPageFormat->attr); + + kind = NV_MMU_PTE_KIND_INVALID; + + if (zspacking == NVOS32_ATTR_ZS_PACKING_S8 && depth == NVOS32_ATTR_DEPTH_8) + { + kind = NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC; + } + else + { + if (zformat == NVOS32_ATTR_Z_TYPE_FIXED) + { + switch (zspacking) + { + case NVOS32_ATTR_ZS_PACKING_Z16: + { + kind = NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC; + break; + } + case NVOS32_ATTR_ZS_PACKING_S8Z24: + case NVOS32_ATTR_ZS_PACKING_X8Z24: + { + kind = NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC; + break; + } + case NVOS32_ATTR_ZS_PACKING_Z24S8: + case NVOS32_ATTR_ZS_PACKING_Z24X8: + { + kind = NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC; + break; + } + } + } + else if (zformat == NVOS32_ATTR_Z_TYPE_FLOAT) + { + switch (zspacking) + { + case NVOS32_ATTR_ZS_PACKING_Z32: + { + kind = NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC; + break; + } + case NVOS32_ATTR_ZS_PACKING_Z32_X24S8: + { + kind = NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC; + break; + } + } + } + } + + return kind; +} + +/*! + * @brief Choose kind for a surface + * + * @param[in] pFbAllocPageFormat User Alloc Requirements + * @param[in] comprAttr Compression attribute + * + * @returns compression kind + */ +NvU32 +memmgrChooseKind_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat, + NvU32 comprAttr, + NvU32 *pKind +) +{ + NvU32 kind = NV_MMU_PTE_KIND_INVALID; + NvBool bRmToChooseKind = NV_TRUE; + NV_STATUS status = NV_OK; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL) && + !(pFbAllocPageFormat->flags & NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC) && + !hypervisorIsVgxHyper()) + { + bRmToChooseKind = NV_FALSE; + kind = pFbAllocPageFormat->kind; + + if ( comprAttr == NVOS32_ATTR_COMPR_NONE && + memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind)) + { + // + // If client passes a compressible PTE kind but sets + // NVOS32_ATTR_COMPR_NONE, then RM will need to choose + // an uncompressible kind for the client, instead of + // using the kind from the client. + // + NV_PRINTF(LEVEL_ERROR, + "Client sets a compressible PTE kind 0x%x, while sets " + "NVOS32_ATTR_COMPR_NONE. RM will ignore the PTE kind from client and " + "choose an uncompressible kind instead.\n", kind); + bRmToChooseKind = NV_TRUE; + } + } + + if (bRmToChooseKind) + { + kind = NV_MMU_PTE_KIND_INVALID; + + { + switch (pFbAllocPageFormat->type) + { + case NVOS32_TYPE_IMAGE: + case NVOS32_TYPE_TEXTURE: + case NVOS32_TYPE_VIDEO: + case NVOS32_TYPE_CURSOR: + case NVOS32_TYPE_DMA: + case NVOS32_TYPE_PRIMARY: + case NVOS32_TYPE_UNUSED: + case NVOS32_TYPE_SHADER_PROGRAM: + case NVOS32_TYPE_OWNER_RM: + case NVOS32_TYPE_NOTIFIER: + case NVOS32_TYPE_RESERVED: + case NVOS32_TYPE_PMA: + { + if (comprAttr == NVOS32_ATTR_COMPR_NONE) + { + kind = NV_MMU_PTE_KIND_GENERIC_MEMORY; + } + else if (comprAttr == NVOS32_ATTR_COMPR_DISABLE_PLC_ANY || + pMemorySystemConfig->bDisablePostL2Compression) + { + kind = NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC; + } + else + { + kind = NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE; + } + break; + } + case NVOS32_TYPE_DEPTH: + case NVOS32_TYPE_STENCIL: + { + if (comprAttr == NVOS32_ATTR_COMPR_NONE) + { + kind = memmgrChooseKindZ_HAL(pGpu, pMemoryManager, pFbAllocPageFormat); + } + else + { + kind = memmgrChooseKindCompressZ_HAL(pGpu, pMemoryManager, pFbAllocPageFormat); + } + break; + } + } + } + } + + *pKind = kind; + + if (kind == NV_MMU_PTE_KIND_INVALID) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to set a kind, dumping attributes:comprAttr = 0x%x, type = " + "0x%x, attr = 0x%x\n", comprAttr, pFbAllocPageFormat->type, + pFbAllocPageFormat->attr); + } + + return status; +} + +/* + * @brief Return an uncompressible kind for the given kind. There are two + * modes of operation, one for ReleaseReacquire and the full fledged + * mode. + * @param[in] NvU32 - PTE kind + * @param[in] NvBool - ReleaseReacquire / full-fledge mode + * + * @returns NvU32 - Uncompressed kind for the compressed PTE kind type, or it will Assert + */ +NvU32 +memmgrGetUncompressedKind_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 kind, + NvBool bReleaseReacquire +) +{ + // + // This check is to return GENERIC_MEMORY if mods want to bypass uncompression + // But it doesn't work for S8 surfaces, so returning the uncompressed kind for + // that only + // + if (bReleaseReacquire && kind != NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC) + { + return NV_MMU_PTE_KIND_GENERIC_MEMORY; + } + + switch (kind) + { + case NV_MMU_PTE_KIND_GENERIC_MEMORY: + case NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE: + case NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC: + return NV_MMU_PTE_KIND_GENERIC_MEMORY; + case NV_MMU_PTE_KIND_S8: + case NV_MMU_PTE_KIND_S8_COMPRESSIBLE_DISABLE_PLC: + return NV_MMU_PTE_KIND_S8; + case NV_MMU_PTE_KIND_Z16: + case NV_MMU_PTE_KIND_Z16_COMPRESSIBLE_DISABLE_PLC: + return NV_MMU_PTE_KIND_Z16; + case NV_MMU_PTE_KIND_S8Z24: + case NV_MMU_PTE_KIND_S8Z24_COMPRESSIBLE_DISABLE_PLC: + return NV_MMU_PTE_KIND_S8Z24; + case NV_MMU_PTE_KIND_Z24S8: + case NV_MMU_PTE_KIND_Z24S8_COMPRESSIBLE_DISABLE_PLC: + return NV_MMU_PTE_KIND_Z24S8; + case NV_MMU_PTE_KIND_ZF32_X24S8: + case NV_MMU_PTE_KIND_ZF32_X24S8_COMPRESSIBLE_DISABLE_PLC: + return NV_MMU_PTE_KIND_ZF32_X24S8; + default: + { + NV_PRINTF(LEVEL_ERROR, "Unknown kind 0x%x.\n", kind); + DBG_BREAKPOINT(); + + return NV_MMU_PTE_KIND_INVALID; + } + } +} + +/*! + * @brief Get the proper NV_MMU_PTE_KIND_SMSKED_MESSAGE kind. + * + * @returns NV_MMU_PTE_KIND_SMSKED_MESSAGE. + */ +NvU32 +memmgrGetMessageKind_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return NV_MMU_PTE_KIND_SMSKED_MESSAGE; +} + +/*! + * @brief Returns default PTE kind for no memory handle. + * + * @return The default PTE kind. + */ +NvU32 +memmgrGetDefaultPteKindForNoHandle_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return NV_MMU_PTE_KIND_GENERIC_MEMORY; +} + +/*! + * @brief Returns whether the kind can be compressible or not. + * + * @param[in] kind NvU32 Kind value + * + * @returns NV_FALSE, if the kind is not compressible + * NV_TRUE, if the kind is compressible + */ +NvBool +memmgrIsKindCompressible_TU102 +( + MemoryManager *pMemoryManager, + NvU32 kind +) +{ + return memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind); +} + +/*! + * @brief Get Ctag offset from FB_ALLOC_INFO + * + * @param[in] pFbAllocInfo pointer to FB_ALLOC_INFO + * + * @returns a valid NvU32 (CTAG OFFSET) stored in the FB_ALLOC_INFO structure by RM + * + */ +NvU32 +memmgrGetCtagOffsetFromParams_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_INFO *pFbAllocInfo +) +{ + // Ensure max possible size of ctagOffset fits in comptagline + ct_assert(DRF_VAL(OS32, _ALLOC, _COMPTAG_OFFSET_START, NV_U32_MAX) < + NVBIT32(GPU_DRF_WIDTH(NV_MMU_PTE_COMPTAGLINE))); + + return DRF_VAL(OS32, _ALLOC, _COMPTAG_OFFSET_START, pFbAllocInfo->ctagOffset); +} + +/*! + * @brief Set Ctag offset in FB_ALLOC_INFO structure from the ctag offset input + * + * @param[in/out] pFbAllocInfo pointer to FB_ALLOC_INFO + * @param[in] comptagOffset comptag offset that needs to be stored in FB_ALLOC_INFO + * + * + */ +void +memmgrSetCtagOffsetInParams_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_INFO *pFbAllocInfo, + NvU32 comptagOffset +) +{ + if (comptagOffset < (NVBIT32(GPU_DRF_WIDTH(NV_MMU_PTE_COMPTAGLINE)))) + { + pFbAllocInfo->ctagOffset = FLD_SET_DRF_NUM(OS32, _ALLOC, _COMPTAG_OFFSET_START, comptagOffset, + pFbAllocInfo->ctagOffset); + NV_PRINTF(LEVEL_INFO, "Setting ctag offset before allocating: %x\n", + pFbAllocInfo->ctagOffset); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "- comptagline offset is outside the bounds, offset: %x, limit:%x.\n", + comptagOffset, NVBIT32(GPU_DRF_WIDTH(NV_MMU_PTE_COMPTAGLINE))); + } +} + +/*! + * @brief Determine comptag offset from physical address + * + * @param[out]physAddr physical address for the surface + * + * @returns comptag offset from physical address of the surface + */ +NvU32 +memmgrDetermineComptag_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + RmPhysAddr physAddr +) +{ + return 1 + NvU64_LO32(DRF_VAL64(_MMU, _PTE_COMPTAGLINE, _BITS_FROM_SPA, physAddr)); +} + +/*! + * @brief This function converts RM_DEFAULT_PTE_KIND to the chip specific kind + * + *@param[in] pteKind PTE Kind + * + * @returns the HW PTE Kind + * + */ +NvU32 +memmgrGetHwPteKindFromSwPteKind_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 pteKind +) +{ + if (pteKind == RM_DEFAULT_PTE_KIND) + return NV_MMU_PTE_KIND_GENERIC_MEMORY; + else + return pteKind; +} + +/*! + * @brief This function converts from chip specific kind to RM_DEFAULT_PTE_KIND + * + * @param[in] pteKind PTE Kind + * + * @returns the Chip Specific Kind/SW PTE KIND + * + */ +NvU32 +memmgrGetSwPteKindFromHwPteKind_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 pteKind +) +{ + if (pteKind == NV_MMU_PTE_KIND_GENERIC_MEMORY) + return RM_DEFAULT_PTE_KIND; + else + return pteKind; +} + +/*! + * @brief This function returns Kind to be used for Scrub on free + * + * @param[out] pteKind PTE Kind to be returned + * + * @returns void + * + */ +void +memmgrGetPteKindForScrubber_TU102 +( + MemoryManager *pMemoryManager, + NvU32 *pteKind +) +{ + if (pteKind == NULL) + return; + + *pteKind = NV_MMU_PTE_KIND_GENERIC_MEMORY_COMPRESSIBLE_DISABLE_PLC; +} + +/*! + * Returns the max context size + * + * @returns NvU64 + */ +NvU64 +memmgrGetMaxContextSize_TU102 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + extern NvU64 memmgrGetMaxContextSize_GV100(OBJGPU *pGpu, MemoryManager *pMemoryManager); + + NvU64 size = memmgrGetMaxContextSize_GV100(pGpu, pMemoryManager); + + if (RMCFG_FEATURE_PLATFORM_MODS) + { + if (memmgrIsPmaInitialized(pMemoryManager)) + { + // Double the context size + size *= 2; + } + } + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + if (gpuIsClientRmAllocatedCtxBufferEnabled(pGpu)) + { + // + // When ctx buffer management is in CPU-RM, GSP-RM needs extra + // 32 MBs to meet max CUDA context allocation requirement + // (Including 16MB reservation for possible global ctx buffers + // allocation) + // + size += 32 * 1024 * 1024; + } + else + { + // + // When ctx buffer management is in GSP-RM, GSP-RM needs extra + // 160 MBs to meet max CUDA context allocation requirement + // + size += 160 * 1024 * 1024; + } + } + + return size; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c new file mode 100644 index 000000000..f6845bcdb --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "turing/tu102/dev_mmu.h" +#include "turing/tu102/kind_macros.h" + +/*! + * @brief Returns NV_TRUE if memory kind matches the given op. + * + * @param[in] op Kind-type to check for + * @param[in] kind Value to check + * + * @return NV_TRUE if "kind" matches kind-type specified by op. + * NV_FALSE otherwise. + */ +NvBool +memmgrIsKind_TU102 +( + MemoryManager *pMemoryManager, + FB_IS_KIND_OP op, + NvU32 kind +) +{ + switch (op) + { + case FB_IS_KIND_Z: + return KIND_Z(kind); + case FB_IS_KIND_ZBC: + return PTEKIND_COMPRESSIBLE(kind); + case FB_IS_KIND_COMPRESSIBLE: + return PTEKIND_COMPRESSIBLE(kind); + case FB_IS_KIND_ZBC_ALLOWS_1: + case FB_IS_KIND_ZBC_ALLOWS_2: + case FB_IS_KIND_COMPRESSIBLE_1: + case FB_IS_KIND_COMPRESSIBLE_2: + case FB_IS_KIND_COMPRESSIBLE_4: + return NV_FALSE; + case FB_IS_KIND_SUPPORTED: + return (PTEKIND_SUPPORTED(kind) && !(KIND_INVALID(kind))); + case FB_IS_KIND_DISALLOW_PLC: + return PTEKIND_DISALLOWS_PLC(kind); + default: + NV_PRINTF(LEVEL_ERROR, "Bad op (%08x) passed in\n", op); + DBG_BREAKPOINT(); + return NV_FALSE; + } +} + +/** + * From Turing, we will not have Pitch Kind, so this function will determine + * type of surface from pMemoryInfo of the allocation. + * return NV_TRUE for BL surfaces and NV_FALSE otherwise. + */ +NvBool +memmgrIsSurfaceBlockLinear_TU102 +( + MemoryManager *pMemoryManager, + Memory *pMemory, + NvU32 kind, + NvU32 dmaFlags +) +{ + if (FLD_TEST_DRF(OS03, _FLAGS, _PTE_KIND, _BL, dmaFlags)) + { + return NV_TRUE; + } + else if (FLD_TEST_DRF(OS03, _FLAGS, _PTE_KIND, _PITCH, dmaFlags)) + { + return NV_FALSE; + } + + return FLD_TEST_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, pMemory->Attr); +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/arch/volta/mem_mgr_gv100.c b/src/nvidia/src/kernel/gpu/mem_mgr/arch/volta/mem_mgr_gv100.c new file mode 100644 index 000000000..f96ac8448 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/arch/volta/mem_mgr_gv100.c @@ -0,0 +1,155 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "published/volta/gv100/dev_mmu.h" + +#include "class/clc361.h" + +NV_STATUS +memmgrScrubMapDoorbellRegion_GV100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel +) +{ + NV_STATUS status = NV_OK; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // GSPTODO: disable doorbell region scrubbing for now + if (RMCFG_FEATURE_PLATFORM_GSP) + return NV_OK; + + // allocate object for the class VOLTA_USER_MODE_A + status = pRmApi->AllocWithHandle(pRmApi, + pChannel->hClient, + pChannel->subdeviceId, + pChannel->doorbellRegionHandle, + VOLTA_USERMODE_A, + NULL); + if (status != NV_OK) + goto exit; + + // Map the doorbell region onto CPU to submit work in the future + status = pRmApi->MapToCpu(pRmApi, + pChannel->hClient, + pChannel->deviceId, + pChannel->doorbellRegionHandle, + 0, + NVC361_NV_USERMODE__SIZE, + (void**)(&pChannel->pDoorbellRegion), + DRF_DEF(OS33, _FLAGS, _ACCESS, _WRITE_ONLY)); + if (status != NV_OK) + { + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->doorbellRegionHandle); + goto exit; + } + + pChannel->pDoorbellRegisterOffset = (NvU32*)(pChannel->pDoorbellRegion + + NVC361_NOTIFY_CHANNEL_PENDING); + + // setting the use of doorbell register for this channel + pChannel->bUseDoorbellRegister = NV_TRUE; +exit: + return status; + + +} + +/*! + * Returns the max context size + * + * @returns NvU64 + */ +NvU64 +memmgrGetMaxContextSize_GV100 +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + extern NvU64 memmgrGetMaxContextSize_GP100(OBJGPU *pGpu, MemoryManager *pMemoryManager); + + NvU64 size = memmgrGetMaxContextSize_GP100(pGpu, pMemoryManager); + + // In Volta, the GR context buffer size increased by about 847 KB (doubled from Pascal) + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM) + { + // + // We are increasing the reserved mem size by 10 MB. + // This is to account for GR context buffer allocs by KMD's private channels. + // KMD allocates 10 GR channels (not in virtual context mode). + // This is causing an additional 8470 KB allocations from RM reserved heap. + // See bug 1882679 for more details. + // + size += (10 * 1024 *1024); + } + else if (RMCFG_FEATURE_PLATFORM_MODS) + { + // Double the context size + size *= 2; + } + else + { + // TODO: Remove the PMA check after enabling on all chips. + if (memmgrIsPmaInitialized(pMemoryManager)) + { + // + // Increase the context size by 120 MB. + // This is needed to run the same number glxgears instances as in GP102. + // See bug 1885000 comment 7 and bug 1885000 comment 36 + // + size += (120 * 1024 *1024); + } + } + + return size; +} + +/** + * This will return NV_TRUE if surface is BL. otherwise it returns NV_FALSE. + */ +NvBool +memmgrIsSurfaceBlockLinear_GV100 +( + MemoryManager *pMemoryManager, + Memory *pMemory, + NvU32 kind, + NvU32 dmaFlags +) +{ + if (FLD_TEST_DRF(OS03, _FLAGS, _PTE_KIND, _BL, dmaFlags)) + { + return NV_TRUE; + } + else if (FLD_TEST_DRF(OS03, _FLAGS, _PTE_KIND, _PITCH, dmaFlags)) + { + return NV_FALSE; + } + + return (kind != NV_MMU_PTE_KIND_PITCH) ? NV_TRUE: NV_FALSE; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c b/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c new file mode 100644 index 000000000..aaf30691b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/context_dma.c @@ -0,0 +1,916 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This module contains contextDma implementation. +* +******************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu/mem_mgr/context_dma.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/disp_channel.h" +#include "gpu/disp/inst_mem/disp_inst_mem.h" +#include "os/os.h" +#include "gpu_mgr/gpu_mgr.h" +#include "vgpu/rpc.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rs_utils.h" +#include "rmapi/mapping_list.h" + +#include "gpu/bus/kern_bus.h" + +#include "ctrl/ctrl0002.h" + +static NV_STATUS _ctxdmaConstruct(ContextDma *pContextDma, RsClient *, NvHandle, NvU32, NvU32, RsResourceRef *, NvU64, NvU64); +static NV_STATUS _ctxdmaDestruct(ContextDma *pContextDma, NvHandle hClient); +static void _ctxdmaDestroyBindings(RsClient *pClient, ContextDma *pContextDma, OBJGPU *pGpu); + +static void +_ctxdmaDestroyFBMappings +( + ContextDma *pContextDma, + OBJGPU *pGpu +) +{ + NvU32 gpuSubDevInst; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + + gpuSubDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + if (pContextDma->KernelVAddr[gpuSubDevInst] != NULL) + { + RsCpuMapping *pCpuMapping = NULL; + RsResourceRef *pMemoryRef = RES_GET_REF(pContextDma->pMemory); + refFindCpuMapping(pMemoryRef, + NV_PTR_TO_NvP64(pContextDma->KernelVAddr[gpuSubDevInst]), + &pCpuMapping); + + // + // It is actually expected that in some cases KernelVAddr != NULL but + // that no mappings exist. There are scenarios where the memory + // mapping will be freed before we get here (CliDelDeviceMemory). + // KernelVAddr[i] may be non-null, but the mapping has been + // freed and it is no longer valid. So we just mark it as NULL and + // continue. + // + if (pCpuMapping) + { + osUnmapPciMemoryKernelOld(pGpu, pContextDma->KernelVAddr[gpuSubDevInst]); + refRemoveMapping(pMemoryRef, pCpuMapping); + + /// + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, there is nothing to do beyond this point in the + // guest OS (where IS_VIRTUAL(pGpu) is true). + // + if (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu) && + (pContextDma->FbApertureLen[gpuSubDevInst] != 0)) + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + kbusUnmapFbAperture_HAL(pGpu, pKernelBus, pContextDma->pMemDesc, + pContextDma->FbAperture[gpuSubDevInst], + pContextDma->FbApertureLen[gpuSubDevInst], + BUS_MAP_FB_FLAGS_MAP_UNICAST); + pContextDma->FbAperture[gpuSubDevInst] = (NvU64)-1; + pContextDma->FbApertureLen[gpuSubDevInst] = 0; + } + } + + pContextDma->KernelVAddr[gpuSubDevInst] = NULL; + } + + SLI_LOOP_END +} + +NV_STATUS +ctxdmaConstruct_IMPL +( + ContextDma *pContextDma, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + NV_CONTEXT_DMA_ALLOCATION_PARAMS *pAllocParams = pParams->pAllocParams; + NvU32 cachesnoop, type, i; + NvBool bReadOnly; + RsResourceRef *pMemoryRef; + NvHandle hParentFromMemory; + RsClient *pClient = pCallContext->pClient; + NvHandle hSubDevice = pAllocParams->hSubDevice; + NvU32 hClass = pParams->externalClassId; + NvU32 flags = pAllocParams->flags; + NvU64 offset = pAllocParams->offset; + NvU64 limit = pAllocParams->limit; + + status = clientGetResourceRef(pClient, pAllocParams->hMemory, &pMemoryRef); + if (status != NV_OK) + return status; + + hParentFromMemory = pMemoryRef->pParentRef ? pMemoryRef->pParentRef->hResource : 0; + + if (RES_GET_PARENT_HANDLE(pContextDma) != hParentFromMemory) + return NV_ERR_INVALID_OBJECT_PARENT; + + // validate the flags + switch (flags >> DRF_SHIFT(NVOS03_FLAGS_ACCESS) & DRF_MASK(NVOS03_FLAGS_ACCESS)) + { + case NVOS03_FLAGS_ACCESS_WRITE_ONLY: + // we don't currently have a need to distinguish write-only + // permissions; fall through to read/write + + case NVOS03_FLAGS_ACCESS_READ_WRITE: + bReadOnly = NV_FALSE; + break; + + case NVOS03_FLAGS_ACCESS_READ_ONLY: + bReadOnly = NV_TRUE; + break; + + default: + return NV_ERR_INVALID_FLAGS; + } + + switch (DRF_VAL(OS03, _FLAGS, _CACHE_SNOOP, flags)) + { + case NVOS03_FLAGS_CACHE_SNOOP_ENABLE: + cachesnoop = NV_TRUE; + break; + + case NVOS03_FLAGS_CACHE_SNOOP_DISABLE: + cachesnoop = NV_FALSE; + break; + + default: + return NV_ERR_INVALID_FLAGS; + } + + /* + * Note that the NV_OS03_FLAGS_MAPPING is an alias to xg + * the LSB of the NV_OS03_FLAGS_TYPE. And in fact if + * type is NV_OS03_FLAGS_TYPE_NOTIFIER (bit 20 set) + * then it implicitly means that NV_OS03_FLAGS_MAPPING + * is _MAPPING_KERNEL. If the client wants to have a + * Kernel Mapping, it should use the _MAPPING_KERNEL + * flag set and the _TYPE_NOTIFIER should be used only + * with NOTIFIERS. + */ + type = DRF_VAL(OS03, _FLAGS, _MAPPING, flags); + + // fill in dmaInfo + pContextDma->Flags = flags; + pContextDma->bReadOnly = bReadOnly; + pContextDma->CacheSnoop = cachesnoop; + pContextDma->Type = type; + pContextDma->Limit = limit; + + for (i = 0; i < NV_ARRAY_ELEMENTS32(pContextDma->KernelVAddr); i++) + pContextDma->KernelVAddr[i] = NULL; + + pContextDma->KernelPriv = NULL; + + for (i = 0; i < NV_ARRAY_ELEMENTS32(pContextDma->FbAperture); i++) + { + pContextDma->FbAperture[i] = (NvU64)-1; + pContextDma->FbApertureLen[i] = 0; + } + + for (i = 0; i < NV_ARRAY_ELEMENTS32(pContextDma->Instance); i++) + { + pContextDma->Instance[i] = 0; + pContextDma->InstRefCount[i] = 0; + } + + pContextDma->pMemDesc = NULL; + pContextDma->AddressSpace = ADDR_UNKNOWN; + + // Display context dmas have always been explicitly bound. + if (DRF_VAL(OS03, _FLAGS, _HASH_TABLE, flags) == NVOS03_FLAGS_HASH_TABLE_ENABLE) + { + NV_PRINTF(LEVEL_ERROR, "HASH_TABLE=ENABLE no longer supported!\n"); + return NV_ERR_INVALID_FLAGS; + } + + status = _ctxdmaConstruct(pContextDma, pClient, hSubDevice, hClass, + flags, pMemoryRef, offset, limit); + + if (status == NV_OK) + refAddDependant(pMemoryRef, RES_GET_REF(pContextDma)); + + return status; +} + +void +ctxdmaDestruct_IMPL +( + ContextDma *pContextDma +) +{ + _ctxdmaDestruct(pContextDma, RES_GET_CLIENT_HANDLE(pContextDma)); +} + +/*! + * NOTE: this control call may be called at high IRQL with LOCK_BYPASS on WDDM. + */ +NV_STATUS +ctxdmaCtrlCmdUpdateContextdma_IMPL +( + ContextDma *pContextDma, + NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxDmaParams +) +{ + RsClient *pClient = RES_GET_CLIENT(pContextDma); + OBJGPU *pGpu; + KernelDisplay *pKernelDisplay; + DisplayInstanceMemory *pInstMem; + NvU64 *pNewAddress = NULL; + NvU64 *pNewLimit = NULL; + NvHandle hMemory = NV01_NULL_OBJECT; + NvU32 comprInfo; + NV_STATUS status = NV_OK; + + // + // Validate that if hCtxDma is passed in it is the same as the hCtxDma + // used for the top level RmControl hObject + // + if (pUpdateCtxDmaParams->hCtxDma != NV01_NULL_OBJECT) + NV_ASSERT_OR_RETURN(pUpdateCtxDmaParams->hCtxDma == RES_GET_HANDLE(pContextDma), NV_ERR_INVALID_OBJECT); + + if (pUpdateCtxDmaParams->hSubDevice != NV01_NULL_OBJECT) + { + Subdevice *pSubdevice; + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + subdeviceGetByHandle(pClient, pUpdateCtxDmaParams->hSubDevice, &pSubdevice)); + + // Ensure requested hSubDevice is valid for the GPU associated with this contextdma + NV_CHECK_OR_RETURN(LEVEL_ERROR, pSubdevice->pDevice == pContextDma->pDevice, NV_ERR_INVALID_OBJECT_HANDLE); + + pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + } + else + { + pGpu = pContextDma->pGpu; + gpuSetThreadBcState(pGpu, !pContextDma->bUnicast); + } + + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + if (pKernelDisplay == NULL) + return NV_ERR_GENERIC; + + if (FLD_TEST_DRF(0002_CTRL_CMD, _UPDATE_CONTEXTDMA, _FLAGS_BASEADDRESS, _VALID, pUpdateCtxDmaParams->flags)) + pNewAddress = &pUpdateCtxDmaParams->baseAddress; + if (FLD_TEST_DRF(0002_CTRL_CMD, _UPDATE_CONTEXTDMA, _FLAGS_LIMIT, _VALID, pUpdateCtxDmaParams->flags)) + pNewLimit = &pUpdateCtxDmaParams->limit; + if (FLD_TEST_DRF(0002_CTRL_CMD, _UPDATE_CONTEXTDMA, _FLAGS_HINT, _VALID, pUpdateCtxDmaParams->flags)) + hMemory = pUpdateCtxDmaParams->hintHandle; + + comprInfo = DRF_VAL(0002_CTRL_CMD, _UPDATE_CONTEXTDMA_FLAGS, _USE_COMPR_INFO, pUpdateCtxDmaParams->flags); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + pInstMem = KERNEL_DISPLAY_GET_INST_MEM(pKernelDisplay); + + status = instmemUpdateContextDma_HAL(pGpu, pInstMem, pContextDma, + pNewAddress, pNewLimit, hMemory, comprInfo); + NV_ASSERT(status == NV_OK); + + SLI_LOOP_END + + return status; +} + +static NV_STATUS +_ctxdmaDestruct +( + ContextDma *pContextDma, + NvHandle hClient +) +{ + RsClient *pClient = RES_GET_CLIENT(pContextDma); + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = NULL; + + pGpu = pContextDma->pGpu; + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_WARN_NULL_OBJECT); + gpuSetThreadBcState(pGpu, !pContextDma->bUnicast); + + if (pContextDma->bUnicast || RES_GET_PARENT_HANDLE(pContextDma) == RES_GET_HANDLE(pContextDma->pDevice)) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if ((IS_VIRTUAL(pGpu) && + (!(IS_VIRTUAL_WITH_SRIOV(pGpu) && (!gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))))) + { + NV_RM_RPC_FREE(pGpu, hClient, RES_GET_HANDLE(pContextDma->pMemory), RES_GET_HANDLE(pContextDma), rmStatus); + } + } + + // Clean-up the context, first unbind from display + _ctxdmaDestroyBindings(pClient, pContextDma, pGpu); + + // Handle unicast sysmem mapping mapping before _ctxdmaDestroyFBMappings() + if (pContextDma->AddressSpace == ADDR_SYSMEM) + { + NvU32 gpuDevInst = gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu)); + + if (pContextDma->KernelVAddr[gpuDevInst]) + { + memdescUnmapOld(pContextDma->pMemory->pMemDesc, NV_TRUE, 0, + pContextDma->KernelVAddr[gpuDevInst], + pContextDma->KernelPriv); + pContextDma->KernelVAddr[gpuDevInst] = NULL; + pContextDma->KernelPriv = NULL; + } + } + + // release video memory mappings associated with context dma + _ctxdmaDestroyFBMappings(pContextDma, pGpu); + + // Ideally we'd do all of the below in RmFreeDeviceContextDma when + // DeviceRefCount goes to 0 but leaving here because RmFreeDeviceContextDma + // is also called from other places. + memdescFree(pContextDma->pMemDesc); + memdescDestroy(pContextDma->pMemDesc); + pContextDma->pMemDesc = NULL; + + return rmStatus; +} + +static void +_ctxdmaDestroyBindings +( + RsClient *pClient, + ContextDma *pContextDma, + OBJGPU *pGpu +) +{ + NV_STATUS status; + DispObject *pDispObject; + DispChannel *pDispChannel; + RS_ITERATOR channelIt; + RsResourceRef *pResourceRef; + + if (!ctxdmaIsBound(pContextDma)) + return; + + status = dispobjGetByDevice(pClient, pContextDma->pDevice, &pDispObject); + if (status != NV_OK) + return; + + pResourceRef = RES_GET_REF(pDispObject); + + // Unbind the ctx dma from all disp channels + channelIt = clientRefIter(pClient, pResourceRef, classId(DispChannel), RS_ITERATE_CHILDREN, NV_FALSE); + + while (clientRefIterNext(channelIt.pClient, &channelIt)) + { + pDispChannel = dynamicCast(channelIt.pResourceRef->pResource, DispChannel); + + // Make sure we are not bound. Will return an error if not bound. + (void)dispchnUnbindCtx(pDispChannel, pGpu, pContextDma); + } +} + +/*! + * NOTE: this control call may be called at high IRQL with LOCK_BYPASS on WDDM. + */ +NV_STATUS +ctxdmaCtrlCmdBindContextdma_IMPL +( + ContextDma *pContextDma, + NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxDmaParams +) +{ + DispChannel *pDispChannel = NULL; + RsClient *pClient = RES_GET_CLIENT(pContextDma); + NvHandle hChannel = pBindCtxDmaParams->hChannel; + + gpuSetThreadBcState(pContextDma->pGpu, !pContextDma->bUnicast); + + if (!osIsRaisedIRQL()) + { + NvHandle hClient = RES_GET_CLIENT_HANDLE(pContextDma); + RsResourceRef *pChannelRef; + + // Silently allow host channel bind API to succeed. There is no HW state to update. + if (serverutilGetResourceRefWithType(hClient, hChannel, classId(KernelChannel), &pChannelRef) == NV_OK) + { + return NV_OK; + } + } + + // Look-up channel given by client + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + dispchnGetByHandle(pClient, hChannel, &pDispChannel)); + + // Ensure ContextDma and DisplayChannel are on the save device + NV_CHECK_OR_RETURN(LEVEL_ERROR, pContextDma->pDevice == GPU_RES_GET_DEVICE(pDispChannel), + NV_ERR_INVALID_DEVICE); + + API_GPU_FULL_POWER_SANITY_CHECK(pContextDma->pGpu, NV_TRUE, NV_FALSE); + + // + // Call the hal to alloc inst mem, write the ctxdma data, and write + // the hash table entry. + // + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, dispchnBindCtx(pDispChannel, pContextDma->pGpu, pContextDma)); + + return NV_OK; +} + +/*! + * NOTE: this control call may be called at high IRQL with LOCK_BYPASS on WDDM. + */ +NV_STATUS +ctxdmaCtrlCmdUnbindContextdma_IMPL +( + ContextDma *pContextDma, + NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxDmaParams +) +{ + DispChannel *pDispChannel = NULL; + RsClient *pClient = RES_GET_CLIENT(pContextDma); + NvHandle hChannel = pUnbindCtxDmaParams->hChannel; + + gpuSetThreadBcState(pContextDma->pGpu, !pContextDma->bUnicast); + + // Look-up channel given by client + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + dispchnGetByHandle(pClient, hChannel, &pDispChannel)); + + // Ensure ContextDma and DisplayChannel are on the save device + NV_CHECK_OR_RETURN(LEVEL_ERROR, pContextDma->pDevice == GPU_RES_GET_DEVICE(pDispChannel), + NV_ERR_INVALID_DEVICE); + + API_GPU_FULL_POWER_SANITY_CHECK(pContextDma->pGpu, NV_TRUE, NV_FALSE); + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + dispchnUnbindCtx(pDispChannel, pContextDma->pGpu, pContextDma)); + + return NV_OK; +} + +static NV_STATUS +_ctxdmaConstruct +( + ContextDma *pContextDma, + RsClient *pClient, + NvHandle hSubDevice, + NvU32 hClass, + NvU32 flags, + RsResourceRef *pMemoryRef, + NvU64 offset, + NvU64 limit +) +{ + NV_STATUS rmStatus = NV_OK; + Memory *pMemory = NULL; + OBJGPU *pGpu = NULL; + MemoryManager *pMemoryManager = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvHandle hDevice = 0; + NvHandle hClient = pClient->hClient; + Device *pDevice = NULL; + + pMemory = dynamicCast(pMemoryRef->pResource, Memory); + if (pMemory == NULL) + return NV_ERR_INVALID_OBJECT; + + if (hSubDevice != 0) + { + pContextDma->bUnicast = NV_TRUE; + rmStatus = gpuGetByHandle(pClient, hSubDevice, NULL, &pGpu); + if (rmStatus != NV_OK) + return rmStatus; + } + else + { + pContextDma->bUnicast = NV_FALSE; + pGpu = pMemory->pGpu; + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT_PARENT; + } + + gpuSetThreadBcState(pGpu, !pContextDma->bUnicast); + + rmStatus = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + if (rmStatus != NV_OK) + return NV_ERR_INVALID_OBJECT_PARENT; + + pContextDma->pDevice = pDevice; + + hDevice = RES_GET_HANDLE(pDevice); + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + + pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + pMemDesc = pMemory->pMemDesc; + + // + // Validate the offset and limit passed in + // Check end of contextdma is within the memory object which was created (RmAllocMemory) + // Since "limit" is inclusive, it should be strictly less than the length + // + { + NvU64 combinedLimit; + if (!portSafeAddU64(offset, limit, &combinedLimit) || + (combinedLimit >= pMemory->Length)) + { + return NV_ERR_INVALID_LIMIT; + } + } + + // The destructor expects the following fields in pContextDma to be set, + // so do not invoke destructor (goto done) before they are assigned. + pContextDma->pMemory = pMemory; + pContextDma->pGpu = pGpu; + + pContextDma->AddressSpace = memdescGetAddressSpace(memdescGetMemDescFromGpu(pMemDesc, pGpu)); + + // Fail allocation of virtual ContextDmas. These have moved the DynamicMemory. + if (pContextDma->AddressSpace == ADDR_VIRTUAL) + { + return NV_ERR_OBJECT_TYPE_MISMATCH; + } + + // + // Create a MEMORY_DESCRIPTOR describing this region of the memory alloc + // in question + // + rmStatus = memdescCreateSubMem( + &pContextDma->pMemDesc, pMemDesc, pGpu, offset, limit+1); + if (rmStatus != NV_OK) + goto done; + + if (pContextDma->AddressSpace == ADDR_SYSMEM) + { + if (pContextDma->Type == NVOS03_FLAGS_MAPPING_KERNEL) + { + rmStatus = memdescMapOld( + pMemDesc, + offset, limit+1, NV_TRUE, NV_PROTECT_READ_WRITE, + &pContextDma->KernelVAddr[gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu))], + &pContextDma->KernelPriv); + if (rmStatus != NV_OK) + goto done; + } + } + + if (FLD_TEST_DRF(OS03, _FLAGS, _PTE_KIND, _BL, flags)) + { + memdescSetPteKind(pContextDma->pMemDesc, memmgrGetPteKindBl_HAL(pGpu, pMemoryManager)); + } + else if (FLD_TEST_DRF(OS03, _FLAGS, _PTE_KIND, _PITCH, flags)) + { + memdescSetPteKind(pContextDma->pMemDesc, memmgrGetPteKindPitch_HAL(pGpu, pMemoryManager)); + } + + // + // If this ctxdma is a notifier AND it is in vidmem, create a kernel mapping to + // it for use later in case a SW method needs to update it. + // + if ( (DRF_VAL(OS03, _FLAGS, _TYPE, flags) == NVOS03_FLAGS_TYPE_NOTIFIER) && + (pContextDma->AddressSpace == ADDR_FBMEM) ) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 gpuSubDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + // + // Allocate GPU virtual address space for the video memory region + // for those GPUs that support it. + // + pContextDma->FbApertureLen[gpuSubDevInst] = pContextDma->Limit + 1; + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + NV_RM_RPC_MAP_MEMORY(pGpu, hClient, hDevice, RES_GET_HANDLE(pMemory), offset, pContextDma->FbApertureLen[gpuSubDevInst], + 0, &pContextDma->FbAperture[gpuSubDevInst], rmStatus); + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + if (rmStatus == NV_OK) + { + pContextDma->FbAperture[gpuSubDevInst] = + pContextDma->FbAperture[gpuSubDevInst] - pGpu->busInfo.gpuPhysFbAddr; + } + } + else + { + rmStatus = kbusMapFbAperture_HAL(pGpu, pKernelBus, + pMemDesc, offset, + &pContextDma->FbAperture[gpuSubDevInst], + &pContextDma->FbApertureLen[gpuSubDevInst], + BUS_MAP_FB_FLAGS_MAP_UNICAST, hClient); + } + if (rmStatus != NV_OK) + { + pContextDma->FbApertureLen[gpuSubDevInst] = 0; + SLI_LOOP_GOTO(done); + } + + memdescSetPageSize(pContextDma->pMemDesc, AT_GPU, + memdescGetPageSize(pMemDesc, AT_GPU)); + + rmStatus = osMapPciMemoryKernelOld(pGpu, + gpumgrGetGpuPhysFbAddr(pGpu) + pContextDma->FbAperture[gpuSubDevInst], + pContextDma->Limit+1, + NV_PROTECT_READ_WRITE, + &pContextDma->KernelVAddr[gpuSubDevInst], + NV_MEMORY_WRITECOMBINED); + if (rmStatus != NV_OK) + { + // Force out of the SLI loop + SLI_LOOP_BREAK; + } + + { + RsResourceRef *pResourceRef = RES_GET_REF(pMemory); + RsCpuMapping *pCpuMapping = NULL; + RS_CPU_MAP_PARAMS dummyParams; + portMemSet(&dummyParams, 0, sizeof(dummyParams)); + + refAddMapping(pResourceRef, &dummyParams, pResourceRef->pParentRef, &pCpuMapping); + rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping, + NV_TRUE, + NvP64_NULL, + NV_PTR_TO_NvP64(pContextDma->KernelVAddr[gpuSubDevInst]), + pContextDma->Limit+1, + pContextDma->FbAperture[gpuSubDevInst], + pContextDma->FbApertureLen[gpuSubDevInst], + NVOS33_FLAGS_ACCESS_READ_WRITE); + pCpuMapping->pPrivate->pGpu = pGpu; + } + + } + SLI_LOOP_END + + if (rmStatus != NV_OK) + goto done; + } + +done: + + if (rmStatus == NV_OK) + { + if (IS_VIRTUAL(pGpu)) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + NV_RM_RPC_ALLOC_CONTEXT_DMA(pGpu, hClient, hDevice, RES_GET_HANDLE(pContextDma), hClass, + flags, RES_GET_HANDLE(pMemory), offset, limit, rmStatus); + } + } + + if (rmStatus != NV_OK) + { + memdescDestroy(pContextDma->pMemDesc); + pContextDma->pMemDesc = NULL; + + if (pContextDma->AddressSpace == ADDR_FBMEM) + _ctxdmaDestroyFBMappings(pContextDma, pGpu); + + _ctxdmaDestruct(pContextDma, hClient); + } + + return rmStatus; +} + +// +// Fetch ContextDma from resource server +// +NV_STATUS +ctxdmaGetByHandle +( + RsClient *pClient, + NvHandle hContextDma, + ContextDma **ppContextDma +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppContextDma = NULL; + + status = clientGetResourceRef(pClient, hContextDma, &pResourceRef); + if (status != NV_OK) + { + return status; + } + + *ppContextDma = dynamicCast(pResourceRef->pResource, ContextDma); + + return (*ppContextDma) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +// +// Validate that the range described by Start+Length is within ContextDma +// limits. +// +NV_STATUS +ctxdmaValidate_IMPL +( + ContextDma *pContextDma, + NvU64 Start, + NvU64 Length +) +{ + if (pContextDma == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid DMA context in ctxdmaValidate\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_XLATE; + } + DBG_VAL_PTR(pContextDma); + + if ((Start + Length - 1) > pContextDma->Limit) + return NV_ERR_INVALID_OFFSET; + + return NV_OK; +} + +// +// Return the CPU VA of a DMA buffer. +// +NV_STATUS +ctxdmaGetKernelVA_IMPL +( + ContextDma *pContextDma, + NvU64 Start, + NvU64 Length, + void **ppAddress, + NvU32 VA_idx +) +{ + NV_STATUS status; + + if (pContextDma == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid DMA context in ctxdmaGetKernelVA\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_XLATE; + } + DBG_VAL_PTR(pContextDma); + + status = ctxdmaValidate(pContextDma, Start, Length); + if (status != NV_OK) + return status; + + if (pContextDma->KernelVAddr[VA_idx] == NULL) + return NV_ERR_DMA_MEM_NOT_LOCKED; + + *ppAddress = (NvU8*)pContextDma->KernelVAddr[VA_idx] + Start; + + return NV_OK; +} + +// **************************************************************************** +// Deprecated Functions +// **************************************************************************** + +/** + * @warning This function is deprecated! Please use ctxdmaGetByHandle. + */ +NV_STATUS +CliGetContextDma +( + NvHandle hClient, + NvHandle hContextDma, + ContextDma **ppContextDma +) +{ + RsClient *pClient; + NV_STATUS status; + + *ppContextDma = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NV_ERR_INVALID_CLIENT; + + return ctxdmaGetByHandle(pClient, hContextDma, ppContextDma); +} + +NV_STATUS +ctxdmaMapTo_IMPL +( + ContextDma *pContextDma, + RS_RES_MAP_TO_PARAMS *pParams +) +{ + OBJGPU *pGpu = pParams->pGpu; + MEMORY_DESCRIPTOR *pSrcMemDesc = pParams->pSrcMemDesc; + NvU64 offset = pParams->offset; + + // + // For video memory, provide a way to look up the offset of an FB allocation within + // the given context target context dma. still useful for dFPGA. + // It is used by mods. + // + if ((memdescGetAddressSpace(memdescGetMemDescFromGpu(pSrcMemDesc, pGpu)) == ADDR_FBMEM) && + (memdescGetAddressSpace(memdescGetMemDescFromGpu(pContextDma->pMemDesc, pGpu)) == ADDR_FBMEM)) + { + RmPhysAddr physaddr; + if (!memdescGetContiguity(pSrcMemDesc, AT_GPU)) + { + NV_PRINTF(LEVEL_ERROR, "Cannot obtain the video memory offset of a noncontiguous vidmem alloc!\n"); + return NV_ERR_GENERIC; + } + + // Return an Big GPU device physical address, if available + physaddr = memdescGetPhysAddr(pSrcMemDesc, AT_GPU, offset); + *pParams->pDmaOffset = physaddr - memdescGetPhysAddr(pContextDma->pMemDesc, AT_GPU, 0); + return NV_OK; + } + + // We no longer support tracking mappings on ContextDma. Has moved to DynamicMemory. + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +ctxdmaUnmapFrom_IMPL +( + ContextDma *pContextDma, + RS_RES_UNMAP_FROM_PARAMS *pParams +) +{ + // + // With ContextDmas only supporting physical (or IOMMU VA) there is + // nothing to unmap. We silently allow this call for compatibility. + // + return NV_OK; +} + +/*! + * @brief Is the ContextDma bound to a display channel? + * + * This is a fast check to see if a ContextDma is bound to a display channel. + * + * This is called during display channel or ContextDma teardown only, + * which DD cannot do while a using LOCK_BYPASS bind is active with + * these objects. Locking would require per subdevice lock/unlock. + */ +NvBool +ctxdmaIsBound_IMPL +( + ContextDma *pContextDma +) +{ + NvU32 refs = 0; + NvU32 i; + + for (i=0; i < NV_MAX_SUBDEVICES; i++) + refs += pContextDma->InstRefCount[i]; + + return refs != 0; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/dma.c b/src/nvidia/src/kernel/gpu/mem_mgr/dma.c new file mode 100644 index 000000000..1f83f3020 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/dma.c @@ -0,0 +1,1232 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/********************************* VMA Manager *****************************\ +* * +* The VirtMemAllocator is managed in this module. All priviledged * +* state and object interaction is handled here. * +* * +****************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "lib/base_utils.h" +#include "rmapi/control.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "os/os.h" +#include "core/system.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "diagnostics/profiler.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/gpu_vaspace.h" +#include "mem_mgr/virtual_mem.h" +#include "class/cl0000.h" +#include "class/cl90f1.h" // FERMI_VASPACE_A +#include "ctrl/ctrl0080/ctrl0080dma.h" +#include "ctrl/ctrl208f/ctrl208fdma.h" +#include "vgpu/rpc.h" +#include "core/locks.h" +#include "gpu/subdevice/subdevice_diag.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/bus/kern_bus.h" + +/*! + * @brief Allocate mapping. + * +* @todo Update function comment. + * Please update function description and argument comments + * if you do understand what function does and arguments mean. + * Below is just a template for you. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pDma VirtMemAllocator pointer + * @param[in] pVirtualMemory VirtualMemory pointer + * @param[in] pMemory Memory object to map + * @param[in] pDmaMappingInfo CLI_DMA_MAPPING_INFO pointer + * + * @returns NV_STATUS status = NV_OK on success, or status upon failure. + */ +NV_STATUS +dmaAllocMap_IMPL +( + OBJGPU *pGpu, + VirtMemAllocator *pDma, + OBJVASPACE *pVAS, + VirtualMemory *pVirtualMemory, + Memory *pMemory, + CLI_DMA_MAPPING_INFO *pDmaMappingInfo +) +{ + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvU32 p2p; + NvU64 vaddr; + NvU32 dmaAllocMapFlag; + NvU64 baseVirtAddr; + NvU64 virtSize; + NvU32 swizzId = KMIGMGR_SWIZZID_INVALID; + + CLI_DMA_ALLOC_MAP_INFO mapInfo; + + NV_ASSERT(pVirtualMemory != NULL); + + p2p = DRF_VAL(OS46, _FLAGS, _P2P_ENABLE, pDmaMappingInfo->Flags); + + // + // By default ADDR_FABRIC_V2 should be mapped as peer memory. So, don't honor + // any P2P flags. + // + if (memdescGetAddressSpace(pDmaMappingInfo->pMemDesc) == ADDR_FABRIC_V2) + { + p2p = 0; + } + + if ((p2p == NVOS46_FLAGS_P2P_ENABLE_NOSLI) && IsSLIEnabled(pGpu)) + { + NvU32 deviceInst = gpuGetDeviceInstance(pGpu); + NvU32 subDevIdTgt = DRF_VAL(OS46, _FLAGS, _P2P_SUBDEV_ID_TGT, pDmaMappingInfo->Flags); + + pGpu = gpumgrGetGpuFromSubDeviceInst(deviceInst, subDevIdTgt); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + pDma = GPU_GET_DMA(pGpu); + } + + // + // Temporarily set _DMA_UNICAST_REUSE_ALLOC for NV50_MEMORY_VIRTUAL since that + // class has already assigned VA space and allocated PTEs. + // + dmaAllocMapFlag = pDmaMappingInfo->Flags; + if (pVirtualMemory->bReserveVaOnAlloc) + dmaAllocMapFlag = FLD_SET_DRF(OS46, _FLAGS, _DMA_UNICAST_REUSE_ALLOC, _TRUE, dmaAllocMapFlag); + + // + // Calculate the virtual address of the mapping. + // + virtmemGetAddressAndSize(pVirtualMemory, &baseVirtAddr, &virtSize); + if (FLD_TEST_DRF(OS46, _FLAGS, _DMA_OFFSET_FIXED, _TRUE, pDmaMappingInfo->Flags)) + { + // Fixed offset indicates an absolute virtual address. + vaddr = pDmaMappingInfo->DmaOffset; + } + else + { + // Otherwise the offset is relative to the target virtual allocation. + vaddr = baseVirtAddr + pDmaMappingInfo->DmaOffset; + } + + // + // Check the result is within the bounds of the target virtual allocation. + // + // Only perform this check for mappings to existing virtual memory. + // For CTXDMA case this check is meaningless since the [IN] dmaOffset will be garbage. + // + if (FLD_TEST_DRF(OS46, _FLAGS, _DMA_UNICAST_REUSE_ALLOC, _TRUE, dmaAllocMapFlag)) + { + NV_ASSERT_OR_RETURN(vaddr >= baseVirtAddr, NV_ERR_INVALID_OFFSET); + NV_ASSERT_OR_RETURN(vaddr < (baseVirtAddr + virtSize), NV_ERR_INVALID_OFFSET); + } + + mapInfo.pVirtualMemory = pVirtualMemory; + mapInfo.pMemory = pMemory; + mapInfo.pDmaMappingInfo = pDmaMappingInfo; + + if ((pKernelMIGManager != NULL) && kmigmgrIsMIGMemPartitioningEnabled(pGpu, pKernelMIGManager)) + { + NvHandle hClient = RES_GET_CLIENT_HANDLE(pVirtualMemory); + MIG_INSTANCE_REF ref; + + NV_ASSERT_OK_OR_RETURN(kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref)); + swizzId = ref.pKernelMIGGpuInstance->swizzId; + } + + status = dmaAllocMapping_HAL(pGpu, + pDma, + pVAS, + pDmaMappingInfo->pMemDesc, + &vaddr, + dmaAllocMapFlag, // Use locally updated dmaAllocMapFlag + &mapInfo, + swizzId); + + if (status == NV_OK) + { + pDmaMappingInfo->DmaOffset = vaddr; + } + + if ((p2p == NVOS46_FLAGS_P2P_ENABLE_NOSLI) && IsSLIEnabled(pGpu)) + { + pGpu = gpumgrGetParentGPU(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + } + + return status; +} + +/*! + * @brief Free mapping. + * + * @todo Update function comment. + * Please update function description and argument comments + * if you do understand what function does and arguments mean. + * Below is just a template for you. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pDma VirtMemAllocator pointer + * @param[in] pVirtualMemory VirtualMemory pointer + * @param[in] pDmaMappingInfo CLI_DMA_MAPPING_INFO pointer + * + * @returns NV_STATUS status = NV_OK on success, or status upon failure. + */ +NV_STATUS +dmaFreeMap_IMPL +( + OBJGPU *pGpu, + VirtMemAllocator *pDma, + OBJVASPACE *pVAS, + VirtualMemory *pVirtualMemory, + CLI_DMA_MAPPING_INFO *pDmaMappingInfo, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + NvU32 p2p, subDevIdTgt; + NvU32 deviceInst = gpuGetDeviceInstance(pGpu); + CLI_DMA_ALLOC_MAP_INFO mapInfo; + + p2p = DRF_VAL(OS46, _FLAGS, _P2P_ENABLE, pDmaMappingInfo->Flags); + subDevIdTgt = DRF_VAL(OS46, _FLAGS, _P2P_SUBDEV_ID_TGT, pDmaMappingInfo->Flags); + + if ((p2p == NVOS46_FLAGS_P2P_ENABLE_NOSLI) && IsSLIEnabled(pGpu)) + { + pGpu = gpumgrGetGpuFromSubDeviceInst(deviceInst, subDevIdTgt); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + pDma = GPU_GET_DMA(pGpu); + } + + mapInfo.pVirtualMemory = pVirtualMemory; + mapInfo.pMemory = NULL; + mapInfo.pDmaMappingInfo = pDmaMappingInfo; + + // free mapping in context dma + status = dmaFreeMapping_HAL(pGpu, pDma, pVAS, pDmaMappingInfo->DmaOffset, + pDmaMappingInfo->pMemDesc, flags, &mapInfo); + + if ((p2p == NVOS46_FLAGS_P2P_ENABLE_NOSLI) && IsSLIEnabled(pGpu)) + { + pGpu = gpumgrGetParentGPU(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + } + + return status; +} + +// +// deviceCtrlCmdDmaGetPteInfo_IMPL +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +deviceCtrlCmdDmaGetPteInfo_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + OBJVASPACE *pVAS = NULL; + NV_STATUS status = NV_OK; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), pRmCtrlParams->hObject, + pParams->hVASpace, &pVAS)); + + status = vaspaceGetPteInfo(pVAS, pGpu, pParams, NULL); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "vaspaceGetPteInfo failed\n"); + } + + return status; +} + +// +// deviceCtrlCmdDmaUpdatePde2_IMPL +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +deviceCtrlCmdDmaUpdatePde2_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice);; + OBJVASPACE *pVAS = NULL; + NV_STATUS status = NV_OK; + NvBool bBcState = NV_TRUE; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if ( + (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + NV_RM_RPC_UPDATE_PDE_2(pGpu, RES_GET_CLIENT_HANDLE(pDevice), RES_GET_HANDLE(pDevice), pParams, status); + return status; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice), + pParams->hVASpace, &pVAS)); + + // Force to UC if client passed in sub-device handle. + if (0 != pParams->subDeviceId) + { + bBcState = gpumgrGetBcEnabledStatus(pGpu); + + pGpu = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), + pParams->subDeviceId - 1); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE); + if (pGVAS == NULL) + { + status = NV_ERR_NOT_SUPPORTED; + SLI_LOOP_BREAK; + } + status = gvaspaceUpdatePde2(pGVAS, pGpu, pParams); + if (status != NV_OK) + { + SLI_LOOP_BREAK; + } + } + SLI_LOOP_END + + // Restore BC if required. + if (0 != pParams->subDeviceId) + { + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + + return status; +} + +// +// deviceCtrlCmdDmaSetVASpaceSize_IMPL +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +deviceCtrlCmdDmaSetVASpaceSize_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + OBJVASPACE *pVAS = NULL; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice), + pParams->hVASpace, &pVAS)); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in non SRIOV (legacy) guest RM, do a + // RPC to the host to do the hardware update. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + NV_STATUS status = NV_OK; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + if (status != NV_OK) + { + return status; + } + } + + OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE); + NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OK_OR_RETURN(gvaspaceResize(pGVAS, pParams)); + + return NV_OK; +} + +// +// deviceCtrlCmdDmaSetPageDirectory_IMPL +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +deviceCtrlCmdDmaSetPageDirectory_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pDevice); + NvHandle hDevice = RES_GET_HANDLE(pDevice); + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + OBJVASPACE *pVAS; + NV_STATUS status = NV_OK; + NvBool bBcState = NV_FALSE; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_SET_PAGE_DIRECTORY(pGpu, hClient, hDevice, pParams, status); + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || status != NV_OK) + { + return status; + } + } + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), hDevice, + pParams->hVASpace, &pVAS)); + + // Force to UC if client passed in sub-device handle. + if (0 != pParams->subDeviceId) + { + bBcState = gpumgrGetBcEnabledStatus(pGpu); + + pGpu = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), + pParams->subDeviceId - 1); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + } + + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE); + if (pGVAS == NULL) + { + status = NV_ERR_NOT_SUPPORTED; + SLI_LOOP_BREAK; + } + status = gvaspaceExternalRootDirCommit(pGVAS, hClient, pGpu, pParams); + if (status != NV_OK) + { + SLI_LOOP_BREAK; + } + } + SLI_LOOP_END + + // Restore BC if required. + if (0 != pParams->subDeviceId) + { + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + + if (status != NV_OK && IS_GSP_CLIENT(pGpu)) + { + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS params = {0}; + + params.hVASpace = pParams->hVASpace; + params.subDeviceId = pParams->subDeviceId; + + NV_RM_RPC_UNSET_PAGE_DIRECTORY(pGpu, hClient, hDevice, ¶ms, status); + } + + if (status != NV_OK && IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS params = {0}; + + params.hVASpace = pParams->hVASpace; + params.subDeviceId = pParams->subDeviceId; + + NV_RM_RPC_CONTROL(pGpu, + hClient, + hDevice, + NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY, + ¶ms, + sizeof(NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS), + status); + + } + + return status; +} + +// +// deviceCtrlCmdDmaUnsetPageDirectory_IMPL +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +deviceCtrlCmdDmaUnsetPageDirectory_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + OBJVASPACE *pVAS = NULL; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + NvBool bBcState = NV_FALSE; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice), + pParams->hVASpace, &pVAS)); + + OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE); + NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_NOT_SUPPORTED); + + // Force to UC if client passed in sub-device handle. + if (pParams->subDeviceId != 0) + { + bBcState = gpumgrGetBcEnabledStatus(pGpu); + + pGpu = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), + pParams->subDeviceId - 1); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + status = gvaspaceExternalRootDirRevoke(pGVAS, pGpu, pParams); + if (status != NV_OK) + { + SLI_LOOP_BREAK; + } + } + SLI_LOOP_END + + // + // Updating the instance block of all channels in the TSGs that's using + // the VA space + // + status = gvaspaceUnregisterAllChanGrps(pGVAS, pGpu); + + // Restore BC if required. + if (pParams->subDeviceId != 0) + { + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + + if (IS_GSP_CLIENT(pGpu) || IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + } + + return status; +} + +// +// deviceCtrlCmdDmaSetPteInfo_IMPL +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +deviceCtrlCmdDmaSetPteInfo_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + OBJVASPACE *pVAS = NULL; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice), + pParams->hVASpace, &pVAS)); + + status = vaspaceSetPteInfo(pVAS, pGpu, pParams); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "vaspaceGetPteInfo failed\n"); + NV_ASSERT(0); + } + + return status; +} + +// +// deviceCtrlCmdDmaFlush_IMPL +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +deviceCtrlCmdDmaFlush_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_FLUSH_PARAMS *flushParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + FB_CACHE_MEMTYPE targetMem = FB_CACHE_MEM_UNDEFINED; + FB_CACHE_OP cacheOp = FB_CACHE_OP_UNDEFINED; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_PRINTF(LEVEL_INFO, "Flush op invoked with target Unit 0x%x\n", + flushParams->targetUnit); + + if (FLD_TEST_DRF(0080, _CTRL_DMA_FLUSH_TARGET_UNIT, _L2_INVALIDATE, + _SYSMEM, flushParams->targetUnit)) + { + targetMem = FB_CACHE_SYSTEM_MEMORY; + cacheOp = FB_CACHE_INVALIDATE; + } + if (FLD_TEST_DRF(0080, _CTRL_DMA_FLUSH_TARGET_UNIT, _L2_INVALIDATE, + _PEERMEM, flushParams->targetUnit)) + { + targetMem = FB_CACHE_PEER_MEMORY; + cacheOp = FB_CACHE_INVALIDATE; + } + if (FLD_TEST_DRF(0080, _CTRL_DMA_FLUSH_TARGET_UNIT, _L2, _ENABLE, + flushParams->targetUnit)) + { + targetMem = FB_CACHE_DIRTY; + cacheOp = FB_CACHE_WRITEBACK; + } + if (FLD_TEST_DRF(0080, _CTRL_DMA_FLUSH_TARGET_UNIT, _COMPTAG, _ENABLE, + flushParams->targetUnit)) + { + targetMem = FB_CACHE_COMPTAG_MEMORY; + cacheOp = FB_CACHE_WRITEBACK; + } + + if ((targetMem != FB_CACHE_MEM_UNDEFINED) && (cacheOp != FB_CACHE_OP_UNDEFINED)) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + status = kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, NULL, targetMem, cacheOp); + if (status != NV_OK) + { + SLI_LOOP_RETURN(status); + } + } + SLI_LOOP_END + } + + if (FLD_TEST_DRF(0080, _CTRL_DMA_FLUSH_TARGET_UNIT, _FB, _ENABLE, + flushParams->targetUnit)) + { + status = kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY); + } + + return status; +} + +// +// deviceCtrlCmdDmaAdvSchedGetVaCaps_IMPL +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +deviceCtrlCmdDmaAdvSchedGetVaCaps_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + OBJVASPACE *pVAS = NULL; + NV_STATUS status = NV_OK; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice), + pParams->hVASpace, &pVAS)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + vaspaceGetVasInfo(pVAS, pParams)); + + pParams->compressionPageSize = pMemorySystemConfig->comprPageSize; + pParams->vaSpaceId = pVAS->vaspaceId; + + return status; +} + +// +// deviceCtrlCmdDmaGetPdeInfo_IMPL +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +deviceCtrlCmdDmaGetPdeInfo_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = NULL; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in non SRIOV (legacy) guest RM, do a + // RPC to the host to do the hardware update. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + NV_STATUS status = NV_OK; + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice), + pParams->hVASpace, &pVAS)); + + if(vaspaceGetPageTableInfo(pVAS, pParams) != NV_OK) + { + status = NV_ERR_INVALID_EVENT; + NV_PRINTF(LEVEL_ERROR, "vaspaceGetPageTableInfo failed\n"); + NV_ASSERT(0); + } + + return status; +} +NV_STATUS +deviceCtrlCmdDmaSetDefaultVASpace_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_SET_DEFAULT_VASPACE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_ASSERT_OK_OR_RETURN( + deviceSetDefaultVASpace( + pDevice, + pParams->hVASpace)); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + + NV_RM_RPC_CONTROL(pRmCtrlParams->pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + } + + return status; +} + +// +// subdeviceCtrlCmdDmaInvalidateTLB +// +// Lock Requirements: +// Assert that GPUs lock held on entry +// Called from SW method w/o API lock +// +NV_STATUS +subdeviceCtrlCmdDmaInvalidateTLB_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJVASPACE *pVAS = NULL; + + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pSubdevice), RES_GET_PARENT_HANDLE(pSubdevice), + pParams->hVASpace, &pVAS)); + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV_STATUS status = NV_OK; + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + // + // Although this function is used following PTE upgrades most of the time, + // we cannot guarantee that, nor can we easily determine the update type. + // + vaspaceInvalidateTlb(pVAS, pGpu, PTE_DOWNGRADE); + + return NV_OK; +} + +/*! + * @brief subdeviceCtrlCmdDmaGetInfo + * + * Lock Requirements: + * Assert that both the GPUs lock and API lock are held on entry. + */ +NV_STATUS +subdeviceCtrlCmdDmaGetInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_DMA_GET_INFO_PARAMS *pDmaInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + NvU32 i; + NvU32 data; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // error checck + if (pDmaInfoParams->dmaInfoTblSize > NV2080_CTRL_DMA_GET_INFO_MAX_ENTRIES) + return NV_ERR_INVALID_PARAM_STRUCT; + + // step thru list + for (i = 0; i < pDmaInfoParams->dmaInfoTblSize; i++) + { + switch (pDmaInfoParams->dmaInfoTbl[i].index) + { + case NV2080_CTRL_DMA_INFO_INDEX_SYSTEM_ADDRESS_SIZE: + data = gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM); + break; + default: + { + data = 0; + status = NV_ERR_INVALID_ARGUMENT; + break; + } + + } + + if (status != NV_OK) + break; + + // save off data value + pDmaInfoParams->dmaInfoTbl[i].data = data; + } + + return status; +} + +/*! + * @brief New TLB interface control call w/o engine masks. + * + * Lock Requirements: + * Assert that API lock and GPUs lock held on entry + * + */ +NV_STATUS +deviceCtrlCmdDmaInvalidateTLB_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_INVALIDATE_TLB_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + OBJVASPACE *pVAS = NULL; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + NV_STATUS status = NV_OK; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDevice), RES_GET_HANDLE(pDevice), + pParams->hVASpace, &pVAS)); + + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + // + // Although this function is used following PTE upgrades most of the time, + // we cannot guarantee that, nor can we easily determine the update type. + // + vaspaceInvalidateTlb(pVAS, pGpu, PTE_DOWNGRADE); + } + SLI_LOOP_END + + return NV_OK; +} + +// +// deviceCtrlCmdDmaGetCaps_IMPL +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdDmaGetCaps_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_GET_CAPS_PARAMS *pDmaCapsParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // sanity check array size + if (pDmaCapsParams->capsTblSize != NV0080_CTRL_DMA_CAPS_TBL_SIZE) + { + NV_PRINTF(LEVEL_ERROR, "size mismatch: client 0x%x rm 0x%x\n", + pDmaCapsParams->capsTblSize, NV0080_CTRL_DMA_CAPS_TBL_SIZE); + return NV_ERR_INVALID_ARGUMENT; + } + + portMemSet(pDmaCapsParams->capsTbl, 0, NV0080_CTRL_DMA_CAPS_TBL_SIZE); + + // Fill in caps + if (pDma->getProperty(pDma, PDB_PROP_DMA_ENFORCE_32BIT_POINTER)) + RMCTRL_SET_CAP(pDmaCapsParams->capsTbl, NV0080_CTRL_DMA_CAPS, _32BIT_POINTER_ENFORCED); + + if (pDma->getProperty(pDma, PDB_PROP_DMA_SHADER_ACCESS_SUPPORTED)) + RMCTRL_SET_CAP(pDmaCapsParams->capsTbl, NV0080_CTRL_DMA_CAPS, _SHADER_ACCESS_SUPPORTED); + + if (pDma->getProperty(pDma, PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL)) + RMCTRL_SET_CAP(pDmaCapsParams->capsTbl, NV0080_CTRL_DMA_CAPS, _SPARSE_VIRTUAL_SUPPORTED); + + // Supported on all platforms except the Maxwell amodel simulator + if (pDma->getProperty(pDma, PDB_PROP_DMA_MULTIPLE_VASPACES_SUPPORTED)) + RMCTRL_SET_CAP(pDmaCapsParams->capsTbl, NV0080_CTRL_DMA_CAPS, _MULTIPLE_VA_SPACES_SUPPORTED); + + return status; +} + +// +// deviceCtrlCmdDmaEnablePrivilegedRange_IMPL +// +// Lock Requirements: +// Assert that both locks are held on entry +// Enables the privileged range assuming that the vaspace +// has not yet been created. If the vaspace has already been +// created that means we have already made allocations in this +// vaspace(lazy allocation). In this case this ctrl call should fail. +// +NV_STATUS +deviceCtrlCmdDmaEnablePrivilegedRange_IMPL +( + Device *pDevice, + NV0080_CTRL_DMA_ENABLE_PRIVILEGED_RANGE_PARAMS *pParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (pParams->hVASpace != NV01_NULL_OBJECT) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (pDevice->pVASpace == NULL) + { + pDevice->deviceInternalAllocFlags |= + NV_DEVICE_INTERNAL_ALLOCATION_FLAGS_ENABLE_PRIVILEGED_VASPACE; + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +diagapiCtrlCmdDmaIsSupportedSparseVirtual_IMPL +( + DiagApi *pDiagApi, + NV208F_CTRL_DMA_IS_SUPPORTED_SPARSE_VIRTUAL_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi); + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + + pParams->bIsSupported = pDma->getProperty(pDma, PDB_PROP_DMA_IS_SUPPORTED_SPARSE_VIRTUAL); + return NV_OK; +} + +NV_STATUS +diagapiCtrlCmdDmaGetVasBlockDetails_IMPL +( + DiagApi *pDiagApi, + NV208F_CTRL_DMA_GET_VAS_BLOCK_DETAILS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi); + RsResourceRef *pSubdevRef; + Subdevice *pGpuSubDevInfo; + OBJVASPACE *pVAS = NULL; + OBJEHEAP *pHeap = NULL; + EMEMBLOCK *pMemBlock = NULL; + + if (NV_OK != refFindAncestorOfType(RES_GET_REF(pDiagApi), classId(Subdevice), &pSubdevRef)) + return NV_ERR_INVALID_OBJECT_PARENT; + + pGpuSubDevInfo = dynamicCast(pSubdevRef->pResource, Subdevice); + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pDiagApi), RES_GET_PARENT_HANDLE(pGpuSubDevInfo), + pParams->hVASpace, &pVAS)); + + pHeap = vaspaceGetHeap(pVAS); + NV_ASSERT_OR_RETURN(NULL != pHeap, NV_ERR_INVALID_ARGUMENT); + pMemBlock = pHeap->eheapGetBlock(pHeap, pParams->virtualAddress, 0); + NV_ASSERT_OR_RETURN(NULL != pMemBlock, NV_ERR_INVALID_ARGUMENT); + + pParams->beginAddress = pMemBlock->begin; + pParams->endAddress = pMemBlock->end; + pParams->alignedAddress = pMemBlock->align; + pParams->pageSize = vaspaceGetMapPageSize(pVAS, pGpu, pMemBlock); + + NV_ASSERT_OR_RETURN(0 != pParams->pageSize, NV_ERR_INVALID_ARGUMENT); + + return NV_OK; +} + +/*! + * Initialize an abstracted page array with opaque page array data. + * + * By default, the page data is treated as an RmPhysAddr array. + * If the data is an OS-specific format, the bOsFormat field must be + * set to NV_TRUE. + */ +void +dmaPageArrayInit +( + DMA_PAGE_ARRAY *pPageArray, //!< [out] Abstracted page array. + void *pPageData, //!< [in] Opaque page array data. + NvU32 pageCount //!< [in] Number of pages represented. +) +{ + portMemSet(pPageArray, 0, sizeof(*pPageArray)); + pPageArray->pData = pPageData; + pPageArray->count = pageCount; +} + +/*! + * Initialize an abstracted page array from a memory descriptor. + */ +void +dmaPageArrayInitFromMemDesc +( + DMA_PAGE_ARRAY *pPageArray, //!< [out] Abstracted page array. + MEMORY_DESCRIPTOR *pMemDesc, //!< [in] Memory descriptor. + ADDRESS_TRANSLATION addressTranslation //!< [in] Address translation for page array. +) +{ + dmaPageArrayInit(pPageArray, + memdescGetPteArray(pMemDesc, addressTranslation), + memdescGetPteArraySize(pMemDesc, addressTranslation)); +} + +/*! + * Extract a physical page address from an abstracted page array. + * + * @returns The physical (byte) address of the requested page. + * @returns ~0 if the index is out of bounds (fatal error). + */ +RmPhysAddr +dmaPageArrayGetPhysAddr +( + DMA_PAGE_ARRAY *pPageArray, //!< [in] Abstracted page array. + NvU32 pageIndex //!< [in] Page index to retrieve. +) +{ + RmPhysAddr addr; + + NV_ASSERT_OR_RETURN(pPageArray->pData, ~0ULL); + NV_ASSERT_OR_RETURN(pageIndex < pPageArray->count, ~0ULL); + + if (pPageArray->bDuplicate) + { + pageIndex = 0; + } + + if (pPageArray->bOsFormat) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + addr = pOS->osPageArrayGetPhysAddr(pPageArray->pOsGpuInfo, + pPageArray->pData, pPageArray->startIndex + pageIndex); + } + else + { + RmPhysAddr *pPteArray = pPageArray->pData; + addr = pPteArray[pPageArray->startIndex + pageIndex]; + } + + addr |= pPageArray->orMask; + + return addr; +} + +/*! + * Determine if the upper half for the comptag can be used for this page. + * + * @param[in] pDma VirtMemAllocator object pointer + * @param[in] pteIndex PTE index + * @param[in] pageSize Page size + * + * @returns Whether the upper half can be used or not. + */ +NvBool +dmaUseCompTagLineUpperHalf_IMPL(VirtMemAllocator *pDma, NvU32 pteIndex, NvU32 pageSize) +{ + NvBool bUseUpperHalf = NV_FALSE; + OBJGPU *pGpu = ENG_GET_GPU(pDma); + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + if (pDma->getProperty(pDma, PDB_PROP_DMA_ENABLE_FULL_COMP_TAG_LINE)) + { + NvU32 subIndexShift; + + if (pMemorySystemConfig->comprPageShift >= NvU64_LO32(nvLogBase2(pageSize))) + { + subIndexShift = pMemorySystemConfig->comprPageShift - NvU64_LO32(nvLogBase2(pageSize)); + } + else + { + NV_ASSERT_OR_RETURN(0, NV_FALSE); + } + + // + // (SubIndexShift - 1) is half the compression page size (64 KB). + // Shifting by that gives the number of PTEs that span a 64 KB page. + // The 0th bit gives comptag subindex. + // + if (subIndexShift) + { + bUseUpperHalf = !!((pteIndex >> (subIndexShift - 1)) & 1); + } + } + + return bUseUpperHalf; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/fbsr.c b/src/nvidia/src/kernel/gpu/mem_mgr/fbsr.c new file mode 100644 index 000000000..cf1c6cffc --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/fbsr.c @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/mem_mgr/fbsr.h" + +NV_STATUS +fbsrObjectInit_IMPL(OBJFBSR *pFbsr, NvU32 type) +{ + pFbsr->type = type; + pFbsr->bValid = NV_FALSE; + pFbsr->bInitialized = NV_FALSE; + return NV_OK; +} + +/*! + * @brief Reserves the system memory for DMA type FBSR. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pFbsr OBJFBSR pointer + * @param[in] Size Reserved system memory size + * + * @returns NV_OK on success, error otherwise. + */ +NV_STATUS +fbsrReserveSysMemoryForPowerMgmt_IMPL +( + OBJGPU *pGpu, + OBJFBSR *pFbsr, + NvU64 Size +) +{ + NV_STATUS status; + + if (pFbsr->type != FBSR_TYPE_DMA) + return NV_ERR_GENERIC; + + status = memdescCreate(&pFbsr->pSysReservedMemDesc, pGpu, + Size, 0, NV_FALSE, + ADDR_SYSMEM, NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_NONE); + if (status != NV_OK) + return status; + + status = memdescAlloc(pFbsr->pSysReservedMemDesc); + if (status != NV_OK) + { + memdescDestroy(pFbsr->pSysReservedMemDesc); + pFbsr->pSysReservedMemDesc = NULL; + } + + return status; +} + +/*! + * @brief Free the reserved system memory for DMA type FBSR. + * + * @param[in] pFbsr OBJFBSR pointer + * + * @returns None + */ +void +fbsrFreeReservedSysMemoryForPowerMgmt_IMPL(OBJFBSR *pFbsr) +{ + if (pFbsr->pSysReservedMemDesc != NULL) + { + memdescFree(pFbsr->pSysReservedMemDesc); + memdescDestroy(pFbsr->pSysReservedMemDesc); + pFbsr->pSysReservedMemDesc = NULL; + } +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/heap.c b/src/nvidia/src/kernel/gpu/mem_mgr/heap.c new file mode 100644 index 000000000..482fdc025 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/heap.c @@ -0,0 +1,4813 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Standard local frame buffer allocation and management routines + */ + +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "mem_mgr/video_mem.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/system_mem.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu_mgr/gpu_mgr.h" +#include "core/locks.h" +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER +#include "vgpu/rpc.h" +#include "gpu/mmu/kern_gmmu.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "gpu/device/device.h" +#include "kernel/gpu/intr/intr.h" + +typedef enum +{ + BLOCK_ADD, + BLOCK_REMOVE, + BLOCK_SIZE_CHANGED, + BLOCK_FREE_STATE_CHANGED, +} BlockAction; + +// +// Statics +// +static NV_STATUS _heapBlockFree(OBJGPU *, Heap *, MEM_BLOCK *); +static void _heapSetTexturePlacement(Heap *, NvU32, NvU32, NvBool*, + NvU32*, NvU8*); +static NV_STATUS _heapGetMaxFree(Heap *, NvU64 *, NvU64 *); +static NV_STATUS _heapGetBankPlacement(OBJGPU *, Heap *, NvU32, + NvU32 *, NvU32, NvU32, NvU32 *); +static MEM_BLOCK *_heapFindAlignedBlockWithOwner(OBJGPU *, Heap *, NvU32, + NvU64/* aligned*/); +static NV_STATUS _heapProcessFreeBlock(OBJGPU *, MEM_BLOCK *, MEM_BLOCK **, + MEM_BLOCK **, Heap *, + MEMORY_ALLOCATION_REQUEST *, + NvHandle, OBJHEAP_ALLOC_DATA *, + FB_ALLOC_INFO *, NvU64, NvU64 *); +static void _heapAddBlockToNoncontigList(Heap *, MEM_BLOCK *); +static void _heapRemoveBlockFromNoncontigList(Heap *, MEM_BLOCK *); +static NV_STATUS _heapFindBlockByOffset(OBJGPU *, Heap *, NvU32, + MEMORY_DESCRIPTOR *, NvU64, + MEM_BLOCK **); +static NV_STATUS _heapAllocNoncontig(OBJGPU *, NvHandle, Heap *, + MEMORY_ALLOCATION_REQUEST *, NvHandle, + OBJHEAP_ALLOC_DATA *, FB_ALLOC_INFO *, + NvU32, NvU64, NvU64 *, MEMORY_DESCRIPTOR *, + HWRESOURCE_INFO **); +static NV_STATUS _heapUpdate(Heap *, MEM_BLOCK *, BlockAction); +static void _heapAdjustFree(Heap *pHeap, NvS64 blockSize, NvBool internalHeap); +static void _heapBlacklistChunksInFreeBlocks(OBJGPU *, Heap *); + +#ifdef DEBUG + +/****************************************************************************/ +/* */ +/* DEBUG support! */ +/* */ +/****************************************************************************/ + +NvU32 dbgDumpHeap = 0; +NvU32 dbgReverseDumpHeap = 0; + +static void _heapDump(Heap *); +static void _heapValidate(Heap *); + +#define HEAP_VALIDATE(h) {_heapValidate(h);if(dbgDumpHeap)_heapDump(h);} + +static void ConvertOwnerToString(NvU32 owner, char *string) +{ + int i; + string[0] = (unsigned char)((owner >> 24)); + string[1] = (unsigned char)((owner >> 16) & 0xFF); + string[2] = (unsigned char)((owner >> 8) & 0xFF); + string[3] = (unsigned char)((owner ) & 0xFF); + string[4] = 0; + for (i=0 ;i<4; i++) { + // Assuming ASCII these should be "safe" printable characters. + if ((string[i] < ' ') || (string[i] > 0x7E)) { + string[i] = '?'; + } + } +} + +static void _heapDump +( + Heap *pHeap +) +{ + NvU64 free; + MEM_BLOCK *pBlock; + char ownerString[5]; + + if (!pHeap) return; + + NV_PRINTF(LEVEL_INFO, "Heap dump. Size = 0x%08llx\n", pHeap->total); + NV_PRINTF(LEVEL_INFO, " Free = 0x%08llx\n", pHeap->free); + NV_PRINTF(LEVEL_INFO, " Reserved = 0x%08llx\n", pHeap->reserved); + NV_PRINTF(LEVEL_INFO, + "=================================================================\n"); + NV_PRINTF(LEVEL_INFO, + "\t\t Begin End Size \t Type ResId Owner" + " \"owns\"\n"); + NV_PRINTF(LEVEL_INFO, "Block List %s\n", + dbgReverseDumpHeap ? "Reverse" : "Forward"); + pBlock = pHeap->pBlockList; + do + { + if ( dbgReverseDumpHeap ) + pBlock = pBlock->prev; + + NV_PRINTF(LEVEL_INFO, "\t\t0x%08llx 0x%08llx 0x%08llx", pBlock->begin, + pBlock->end, 1 + (pBlock->end - pBlock->begin)); + + if (pBlock->owner == NVOS32_BLOCK_TYPE_FREE) { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\tFREE\n"); + } + else + { + ConvertOwnerToString(pBlock->owner, ownerString); + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, + "\t0x%04x 0x%08x \"%s\"\n", pBlock->u0.type, + pBlock->owner, ownerString); + } + + if ( !dbgReverseDumpHeap ) + pBlock = pBlock->next; + } while (pBlock != pHeap->pBlockList); + + NV_PRINTF(LEVEL_INFO, "FREE Block List %s\n", + dbgReverseDumpHeap ? "Reverse" : "Forward"); + free = 0; + pBlock = pHeap->pFreeBlockList; + if (pBlock) + do + { + if ( dbgReverseDumpHeap ) + pBlock = pBlock->u0.prevFree; + + NV_PRINTF(LEVEL_INFO, "\t\t0x%08llx 0x%08llx 0x%08llx\tFREE\n", + pBlock->begin, pBlock->end, + 1 + (pBlock->end - pBlock->begin)); + + free += pBlock->end - pBlock->begin + 1; + + if ( !dbgReverseDumpHeap ) + pBlock = pBlock->u1.nextFree; + } while (pBlock != pHeap->pFreeBlockList); + + NV_PRINTF(LEVEL_INFO, "\tCalculated free count = 0x%08llx\n", free); +} + +static void _heapValidate +( + Heap *pHeap +) +{ + MEM_BLOCK *pBlock, *pBlockFree; + NvU64 free, used; + + if (!pHeap) return; + + /* + * Scan the blocks and check for consistency. + */ + free = 0; + used = 0; + pBlock = pHeap->pBlockList; + pBlockFree = pHeap->pFreeBlockList; + do + { + if (pBlock->owner == NVOS32_BLOCK_TYPE_FREE) + { + if (!pBlockFree) + { + NV_PRINTF(LEVEL_ERROR, + "Invalid free list with free blocks found.\n"); + _heapDump(pHeap); + DBG_BREAKPOINT(); + } + free += pBlock->end - pBlock->begin + 1; + if (pBlock != pBlockFree) + { + NV_PRINTF(LEVEL_ERROR, + "Free list not consistent with block list.\n"); + _heapDump(pHeap); + DBG_BREAKPOINT(); + } + pBlockFree = pBlockFree->u1.nextFree; + } + else + { + used += pBlock->end - pBlock->begin + 1; + } + if (pBlock->next != pHeap->pBlockList) + { + if (pBlock->end != pBlock->next->begin - 1) + { + NV_PRINTF(LEVEL_ERROR, + "Hole between blocks at offset = 0x%llx\n", + pBlock->end); + _heapDump(pHeap); + DBG_BREAKPOINT(); + } + } + else + { + if (pBlock->end != pHeap->base + pHeap->total - 1) + { + NV_PRINTF(LEVEL_ERROR, "Last block doesn't end at top.\n"); + _heapDump(pHeap); + DBG_BREAKPOINT(); + } + if (pBlock->next->begin != pHeap->base) + { + NV_PRINTF(LEVEL_ERROR, + "First block doesn't start at bottom.\n"); + _heapDump(pHeap); + DBG_BREAKPOINT(); + } + } + if (pBlock->end < pBlock->begin) + { + NV_PRINTF(LEVEL_ERROR, + "Validate: Invalid block begin = 0x%08llx\n", + pBlock->begin); + NV_PRINTF(LEVEL_ERROR, + " end = 0x%08llx\n", + pBlock->end); + _heapDump(pHeap); + DBG_BREAKPOINT(); + } + pBlock = pBlock->next; + } while (pBlock != pHeap->pBlockList); + if (free != pHeap->free) + { + NV_PRINTF(LEVEL_ERROR, + "Calculated free count (%llx) not consistent with heap free count (%llx).\n", + free, pHeap->free); + _heapDump(pHeap); + DBG_BREAKPOINT(); + } + if ((used + free) > pHeap->total) + { + NV_PRINTF(LEVEL_ERROR, + "Calculated used count (%llx) not consistent with heap size (%llx).\n", + used + free, pHeap->total); + _heapDump(pHeap); + DBG_BREAKPOINT(); + } +} +#else +#define HEAP_VALIDATE(h) +#endif // DEBUG + + +/****************************************************************************/ +/* */ +/* Heap Manager */ +/* */ +/****************************************************************************/ + +static NV_STATUS heapReserveRegion +( + MemoryManager *pMemoryManager, + Heap *pHeap, + NvU64 offset, + NvU64 size, + MEMORY_DESCRIPTOR **ppMemDesc, + NvBool isRmRsvdRegion +) +{ + NV_STATUS rmStatus = NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU64 heapSize = (pHeap->base + pHeap->total); + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + + MEMORY_ALLOCATION_REQUEST allocRequest = {0}; + NV_MEMORY_ALLOCATION_PARAMS allocData = {0}; + + NvU64 align = 0; + NvU32 height = 1; + NvU32 pitch = 1; + NvU32 attr = DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB) | + DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS); + NvU32 attr2 = DRF_DEF(OS32, _ATTR2, _INTERNAL, _YES); + + NV_ASSERT_OR_RETURN((offset < heapSize), NV_OK); + + allocRequest.pUserParams = &allocData; + + allocData.owner = ((isRmRsvdRegion) ? HEAP_OWNER_RM_RESERVED_REGION : HEAP_OWNER_PMA_RESERVED_REGION); + allocData.height = height; + allocData.type = ((isRmRsvdRegion) ? NVOS32_TYPE_RESERVED : NVOS32_TYPE_PMA); + allocData.flags = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + allocData.attr = attr; + allocData.attr2 = attr2; + allocData.pitch = pitch; + allocData.alignment = align; + allocData.size = NV_MIN(size, (heapSize - offset)); + allocData.offset = offset; + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + NV_ASSERT_TRUE_OR_GOTO(rmStatus, pFbAllocInfo != NULL, NV_ERR_NO_MEMORY, done); + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + NV_ASSERT_TRUE_OR_GOTO(rmStatus, pFbAllocPageFormat != NULL, NV_ERR_NO_MEMORY, done); + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + memUtilsInitFBAllocInfo(&allocData, pFbAllocInfo, 0, 0); + + NV_ASSERT_OK_OR_GOTO(rmStatus, + memmgrAllocResources(pGpu, pMemoryManager, &allocRequest, pFbAllocInfo), + done); + + NV_ASSERT_OK_OR_GOTO(rmStatus, + vidmemAllocResources(pGpu, pMemoryManager, &allocRequest, pFbAllocInfo, pHeap), + done); + + NV_PRINTF(LEVEL_INFO, "Reserved heap for %s %llx..%llx\n", + ((isRmRsvdRegion) ? "RM" : "PMA"), offset, (offset+size-1)); + + *ppMemDesc = allocRequest.pMemDesc; + + // Account for reserved size removed from the total address space size + if (isRmRsvdRegion) + { + pHeap->reserved += allocData.size; + } + +done: + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + return rmStatus; +} + +/*! + * @brief Initializes a heap object + * + * @param[in] pFb FB object ptr + * @param[in/out] pHeap HEAP object ptr + * @param[in] base Base for this heap + * @param[in] size Size of this heap + * @param[in] heapType Heap type (Global or PMSA) + * @param[in] pPtr A generic pointer which will be typecasted based on heapType + */ +NV_STATUS heapInitInternal_IMPL +( + OBJGPU *pGpu, + Heap *pHeap, + NvU64 base, + NvU64 size, + HEAP_TYPE_INTERNAL heapType, + void *pPtr +) +{ + MEM_BLOCK *pBlock; + NvU32 i; + NV_STATUS status; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU32 typeDataSize = 0; + FB_REGION_DESCRIPTOR *pFbRegion; + MEMORY_DESCRIPTOR *pPmsaMemDesc = NULL; + + // + // Simply create a free heap. + // + NV_PRINTF(LEVEL_INFO, + "Heap Manager: HEAP ABOUT TO BE CREATED. (Base: 0x%llx Size: 0x%llx)\n", + base, size); + + pHeap->base = base; + pHeap->total = size; + pHeap->free = size; + pHeap->reserved = 0; + pHeap->heapType = heapType; + + pHeap->peakInternalUsage = 0; + pHeap->peakExternalUsage = 0; + pHeap->currInternalUsage = 0; + pHeap->currExternalUsage = 0; + + + // Set the flags based on HEAP type + switch (heapType) + { + case HEAP_TYPE_RM_GLOBAL: + pHeap->bHasFbRegions = NV_TRUE; + break; + case HEAP_TYPE_PHYS_MEM_SUBALLOCATOR: + NV_ASSERT(pPtr != NULL); + + pHeap->bHasFbRegions = NV_FALSE; + typeDataSize = sizeof(PHYS_MEM_SUBALLOCATOR_DATA); + pPmsaMemDesc = ((PHYS_MEM_SUBALLOCATOR_DATA *)pPtr)->pMemDesc; + break; + case HEAP_TYPE_PARTITION_LOCAL: + pHeap->bHasFbRegions = NV_TRUE; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + pHeap->pHeapTypeSpecificData = NULL; + if ((pPtr != NULL) && (typeDataSize > 0)) + { + pHeap->pHeapTypeSpecificData = portMemAllocNonPaged(typeDataSize); + if (pHeap->pHeapTypeSpecificData == NULL) + { + return NV_ERR_OPERATING_SYSTEM; + } + NV_ASSERT(pHeap->pHeapTypeSpecificData != NULL); + portMemCopy(pHeap->pHeapTypeSpecificData, typeDataSize, pPtr, typeDataSize); + } + + pBlock = portMemAllocNonPaged(sizeof(MEM_BLOCK)); + if (pBlock == NULL) + { + return NV_ERR_OPERATING_SYSTEM; + } + portMemSet(pBlock, 0, sizeof(MEM_BLOCK)); + + pBlock->owner = NVOS32_BLOCK_TYPE_FREE; + pBlock->textureId= 0; + pBlock->begin = base; + pBlock->align = 0; + pBlock->alignPad = 0; + pBlock->end = base + size - 1; + pBlock->u0.prevFree = pBlock; + pBlock->u1.nextFree = pBlock; + pBlock->next = pBlock; + pBlock->prev = pBlock; + pBlock->format = 0; + + pHeap->pBlockList = pBlock; + pHeap->pFreeBlockList = pBlock; + pHeap->memHandle = 0xcafe0000; + pHeap->numBlocks = 1; + pHeap->pBlockTree = NULL; + + // + // Set the client id as invalid since there isn't one that exists + // Initialize the client texture data structure + // + portMemSet(pHeap->textureData, 0, + sizeof(TEX_INFO) * MAX_TEXTURE_CLIENT_IDS); + + // + // Call into the hal to get bank placement policy. Note this will vary chip to chip, but let's allow the HAL to tell us + // the implementation details. + // + status = memmgrGetBankPlacementData_HAL(pGpu, pMemoryManager, pHeap->placementStrategy); + if (status != NV_OK) + { + // + // ooops, can't get HAL version of where to place things - let's default to something + // + NV_PRINTF(LEVEL_ERROR, + "Heap Manager unable to get bank placement policy from HAL.\n"); + NV_PRINTF(LEVEL_ERROR, + "Heap Manager defaulting to BAD placement policy.\n"); + + pHeap->placementStrategy[BANK_PLACEMENT_IMAGE] = ((0) + | BANK_MEM_GROW_UP + | MEM_GROW_UP + | 0xFFFFFF00); + pHeap->placementStrategy[BANK_PLACEMENT_DEPTH] = ((0) + | BANK_MEM_GROW_DOWN + | MEM_GROW_DOWN + | 0xFFFFFF00); + pHeap->placementStrategy[BANK_PLACEMENT_TEX_OVERLAY_FONT] = ((0) + | BANK_MEM_GROW_DOWN + | MEM_GROW_DOWN + | 0xFFFFFF00); + pHeap->placementStrategy[BANK_PLACEMENT_OTHER] = ((0) + | BANK_MEM_GROW_DOWN + | MEM_GROW_DOWN + | 0xFFFFFF00); + status = NV_OK; + } + + // Setup noncontig list + pHeap->pNoncontigFreeBlockList = NULL; + + // insert first block into rb-tree + if (NV_OK != _heapUpdate(pHeap, pBlock, BLOCK_ADD)) + { + return NV_ERR_INVALID_STATE; + } + + // + // If there are FB regions defined, check to see if any of them are + // marked reserved. Tag those regions as reserved in the heap. + // + if ((pMemoryManager->Ram.numFBRegions > 0) && (pHeap->bHasFbRegions)) + { + NvBool bConsoleFbRegionContentPreserved; + FB_REGION_DESCRIPTOR consoleFbRegion; + portMemSet(&consoleFbRegion, 0, sizeof(consoleFbRegion)); + + if (heapType != HEAP_TYPE_PARTITION_LOCAL) + { + // + // If a region of FB is actively being used for console display memory + // on this GPU, mark it reserved in-place. + // + memmgrReserveConsoleRegion_HAL(pGpu, pMemoryManager, &consoleFbRegion); + status = memmgrAllocateConsoleRegion_HAL(pGpu, pMemoryManager, &consoleFbRegion); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "Squashing the error status after failing to allocate console region, status: %x\n", + status); + status = NV_OK; + } + } + + // + // Define PMA-managed regions + // This will be moved to memmgr once we refactor SMC partitions + // + if (memmgrIsPmaEnabled(pMemoryManager) && + memmgrIsPmaSupportedOnPlatform(pMemoryManager) && + (heapType != HEAP_TYPE_PARTITION_LOCAL)) + { + memmgrSetPmaInitialized(pMemoryManager, NV_TRUE); + memmgrRegionSetupForPma(pGpu, pMemoryManager); + } + + bConsoleFbRegionContentPreserved = NV_FALSE; + + if (heapType != HEAP_TYPE_PARTITION_LOCAL) + { + // For GSP RM, all PMA candidate regions are given to CPU RM for its use + if (RMCFG_FEATURE_PLATFORM_GSP) + { + memmgrRegionSetupForPma(pGpu, pMemoryManager); + } + + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + pFbRegion = &pMemoryManager->Ram.fbRegion[i]; + + // If the region is marked reserved, reserve it in the heap + if (pFbRegion->bRsvdRegion || + ((memmgrIsPmaInitialized(pMemoryManager) || + RMCFG_FEATURE_PLATFORM_GSP) && + !pFbRegion->bInternalHeap)) + { + NvU64 fbRegionBase; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + // Skip regions which are outside the heap boundaries + if (pFbRegion->base < base && pFbRegion->limit < base) + { + continue; + } + + // TODO: Remove SRIOV check and enable on baremetal as well. + if (IS_VIRTUAL_WITH_SRIOV(pGpu) && (pFbRegion->base >= (base + size))) + { + continue; + } + + // Adjust base of reserved region on heap + fbRegionBase = NV_MAX(base, pFbRegion->base); + + NV_PRINTF(LEVEL_INFO, "Reserve at %llx of size %llx\n", + fbRegionBase, (pFbRegion->limit - fbRegionBase + 1)); + + status = heapReserveRegion( + pMemoryManager, + pHeap, + fbRegionBase, + (pFbRegion->limit - fbRegionBase + 1), + &pMemDesc, + pFbRegion->bRsvdRegion); + + if (status != NV_OK || pMemDesc == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to reserve %llx..%llx\n", + pFbRegion->base, pFbRegion->limit); + return status; + } + + if ((pMemoryManager->Ram.ReservedConsoleDispMemSize > 0) && + (pFbRegion->base == consoleFbRegion.base) && (pFbRegion->limit == consoleFbRegion.limit)) + { + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_LOST_ON_SUSPEND, NV_FALSE); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_PRESERVE_CONTENT_ON_SUSPEND, NV_TRUE); + + bConsoleFbRegionContentPreserved = NV_TRUE; + } + } + } + + if ((pMemoryManager->Ram.ReservedConsoleDispMemSize > 0) && + !bConsoleFbRegionContentPreserved) + { + NV_PRINTF(LEVEL_ERROR, + "failed to preserve content of console display memory\n"); + } + } + +#ifdef DEBUG + _heapDump(pHeap); +#endif + } //if ((pMemoryManager->Ram.numFBRegions > 0) && (pHeap->bHasFbRegions)) + + // Hand over all the memory of partition-heap to partition-PMA + if ((heapType == HEAP_TYPE_PARTITION_LOCAL) && + (memmgrIsPmaInitialized(pMemoryManager))) + { + MEMORY_DESCRIPTOR *pMemDesc = NULL; + status = heapReserveRegion( + pMemoryManager, + pHeap, + base, + size, + &pMemDesc, + NV_FALSE); + + if (status != NV_OK || pMemDesc == NULL) + { + NV_PRINTF(LEVEL_ERROR, "failed to reserve %llx..%llx\n", base, + base + size - 1); + + return status; + } + } + + // If PHYS_MEM_SUBALLOCATOR, increase its refCount + if ((status == NV_OK) && (pPmsaMemDesc != NULL)) + { + memdescAddRef(pPmsaMemDesc); + } + + return (status); +} + +void +heapDestruct_IMPL +( + Heap *pHeap +) +{ + MEM_BLOCK *pBlock, *pBlockFirst, *pBlockNext; + OBJGPU *pGpu = ENG_GET_GPU(pHeap); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvBool headptr_updated; + MEMORY_DESCRIPTOR *pPmsaMemDesc = NULL; + + NV_PRINTF(LEVEL_INFO, "Heap Manager: HEAP ABOUT TO BE DESTROYED.\n"); + +#ifdef DEBUG + _heapDump(pHeap); +#endif + + // Free all blacklisted pages + if (pHeap->blackListAddresses.count != 0) + { + heapFreeBlackListedPages(pGpu, pHeap); + } + + // + // Free all allocated blocks, but preserve primary surfaces. + // If the head of our list changes, restart the search, since our terminating + // block pointer may not be in the list anymore. + // + do + { + pBlock = pBlockFirst = pHeap->pBlockList; + if (pBlock == NULL) + { + break; + } + + headptr_updated = NV_FALSE; + + do + { + pBlockNext = pBlock->next; + + // If we are freeing the reserved region created at heapInit, free the memory descriptor too + if ((pBlock->allocedMemDesc) && ((pBlock->owner == HEAP_OWNER_RM_RESERVED_REGION) || + (pBlock->owner == HEAP_OWNER_PMA_RESERVED_REGION))) + { + memdescDestroy(pBlock->pMemDesc); + pBlock->pMemDesc = NULL; + pBlock->allocedMemDesc = NV_FALSE; + } + + _heapBlockFree(pGpu, pHeap, pBlock); + + // restart scanning the list, if the heap->pBlockList changed + if (pBlockFirst != pHeap->pBlockList) + { + headptr_updated = NV_TRUE; + break; + } + + pBlock = pBlockNext; + + } while (pBlock != pHeap->pBlockList); + + } while (headptr_updated); + + // + // Now that the console region is no longer reserved, free the console + // memdesc. + // + if (pHeap->heapType != HEAP_TYPE_PARTITION_LOCAL) + memmgrReleaseConsoleRegion(pGpu, pMemoryManager); + + // + // Free the heap structure, if we freed everything + // (the first block represents the entire free space of the heap). + // this is only done if the "internal" interface is used. + // heapDestroy is an exported function now to user/display driver land, + // and we don't want the heap structures being freed unless we've been + // called from RM-land during a STATE_DESTROY + // + if ((pHeap->pBlockList != NULL) && + (pHeap->pBlockList->begin == pHeap->base) && + (pHeap->pBlockList->end == (pHeap->base + pHeap->total - 1))) + { + portMemFree(pHeap->pBlockList); + } + + // Free the type specific data allocated + if (pHeap->pHeapTypeSpecificData != NULL) + { + if (pHeap->heapType == HEAP_TYPE_PHYS_MEM_SUBALLOCATOR) + { + pPmsaMemDesc = ((PHYS_MEM_SUBALLOCATOR_DATA *)(pHeap->pHeapTypeSpecificData))->pMemDesc; + if (pPmsaMemDesc != NULL) + { + memdescDestroy(pPmsaMemDesc); + } + } + portMemFree(pHeap->pHeapTypeSpecificData); + pHeap->pHeapTypeSpecificData = NULL; + } + + if ((pHeap->bHasFbRegions) && (memmgrIsPmaInitialized(pMemoryManager))) + { + if (pHeap->heapType != HEAP_TYPE_PARTITION_LOCAL) + memmgrSetPmaInitialized(pMemoryManager, NV_FALSE); + + pmaDestroy(&pHeap->pmaObject); + portMemSet(&pHeap->pmaObject, 0, sizeof(pHeap->pmaObject)); + } +} + +static NV_STATUS _heapGetBankPlacement +( + OBJGPU *pGpu, + Heap *pHeap, + NvU32 owner, + NvU32 *flags, + NvU32 type, + NvU32 bank, + NvU32 *placement +) +{ + NvU32 bankPlacement, i; + + if (type != NVOS32_TYPE_PRIMARY) + { + NvU32 bankPlacementType; + + // what kind of allocation is it? + switch (type) + { + case NVOS32_TYPE_IMAGE: + case NVOS32_TYPE_NOTIFIER: + bankPlacementType = BANK_PLACEMENT_IMAGE; + break; + case NVOS32_TYPE_DEPTH: + case NVOS32_TYPE_ZCULL: + case NVOS32_TYPE_STENCIL: + bankPlacementType = BANK_PLACEMENT_DEPTH; + break; + case NVOS32_TYPE_TEXTURE: + case NVOS32_TYPE_VIDEO: + case NVOS32_TYPE_FONT: + bankPlacementType = BANK_PLACEMENT_TEX_OVERLAY_FONT; + break; + default: + bankPlacementType = BANK_PLACEMENT_OTHER; + } + + // + // NV50+ doesn't care about bank placement since the fb has bank + // striding and we dont need to care about allocating primary surfaces + // in special areas to avoid bank conflicts. This strategy management + // should be removed in the future. + // + bankPlacement = pHeap->placementStrategy[bankPlacementType]; + } + else + { + // + // primary allocation, default grow direction is up, starting at bank 0 + // Can be overridden with NVOS32_ALLOC_FLAGS_FORCE_MEM_* + // + bankPlacement = ((0) + | BANK_MEM_GROW_UP + | MEM_GROW_UP + | 0xFFFFFF00); + } + + // + // check if bank placement force was passed in - hint is handled in the first loop below + // + if (*flags & NVOS32_ALLOC_FLAGS_BANK_FORCE) + { + // replace data in bankplacement + if (*flags & NVOS32_ALLOC_FLAGS_BANK_GROW_DOWN) + bankPlacement = bank | BANK_MEM_GROW_DOWN | 0xFFFFFF00; + else + bankPlacement = bank | BANK_MEM_GROW_UP | 0xFFFFFF00; + *flags &= ~(NVOS32_ALLOC_FLAGS_BANK_HINT); // remove hint flag + } + + // + // Check if FORCE_MEM_GROWS_UP or FORCE_MEM_GROWS_DOWN was passed in + // to override the MEM_GROWS direction for this allocation. Make sure + // to override each of the first MEM_NUM_BANKS_TO_TRY bytes in the NvU32 + // + if (*flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_UP) + { + *flags |= NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT; + for (i = 0; i < MEM_NUM_BANKS_TO_TRY; i++) + { + bankPlacement = (bankPlacement & ~(MEM_GROW_MASK << (i*MEM_BANK_DATA_SIZE))) | + (MEM_GROW_UP << (i*MEM_BANK_DATA_SIZE)); + } + } + if (*flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN) + { + *flags |= NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT; + for (i = 0; i < MEM_NUM_BANKS_TO_TRY; i++) + { + bankPlacement = (bankPlacement & ~(MEM_GROW_MASK << (i*MEM_BANK_DATA_SIZE))) | + (MEM_GROW_DOWN << (i*MEM_BANK_DATA_SIZE)); + } + } + + // return the bank placement to use + *placement = bankPlacement; + return (NV_OK); +} + +// +// Workaround for Bug 67690: +// NV28M-WinXP: (Lindbergh) StencilFloor OpenGL Sample Locks Up when Maximized on Secondary DualView Display +// +// Change heap placement for textures if more than two clients +// are detected. In the case of two or more clients, ignoreBankPlacement, textureClientIndex, +// and currentBankInfo are modified. IgnoreBankPlacement flag is set to true, textureClientIndex +// is returned with the index of the client to be used as heap->textureData[textureClientIndex] +// which pertains to the current client. Lastly, currentBankInfo is modified to grow in the +// opposite direction of the most recently allocated client. +// +static void _heapSetTexturePlacement +( + Heap *pHeap, + NvU32 client, + NvU32 type, + NvBool *ignoreBankPlacement, + NvU32 *textureClientIndex, + NvU8 *currentBankInfo +) +{ + NvU32 index, numClients, clientFound, mostRecentIndex; + mostRecentIndex = 0xFFFFFFFF; + clientFound = NV_FALSE; + numClients = 0; + + // + // let's first check to see if the client is already registered + // We will iterate thru to find number of clients + // + for (index = 0; index < MAX_TEXTURE_CLIENT_IDS; index++) + { + // client already registered + if (pHeap->textureData[index].clientId == client) + { + // give the currentBankInfo the new flags + *currentBankInfo = pHeap->textureData[index].placementFlags; + // + // Set the client as found so that we skip allocation + // of the client in the texture data structure + // + clientFound = NV_TRUE; + *textureClientIndex = index; + } + + // + // We loop through the whole structure to determine the + // number of texture clients currently listed + // + if (pHeap->textureData[index].clientId != 0) + numClients++; + + // + // This is used to assign new textures to the buffer + // A value of 0xFFFFFFFF indicates that this is the first allocation + // + if (pHeap->textureData[index].mostRecentAllocatedFlag == NV_TRUE) + mostRecentIndex = index; + } + + // + // If more than one clinet is detected, ignore bank placement + // otherwise, defaults to bank placement + // + if (numClients > 1) + *ignoreBankPlacement = NV_TRUE; + + // + // We fall into this if statement if no client was listed + // or if we have exceeded the allowable clients available + // + if (clientFound == NV_FALSE) + { + index = 0; + while (clientFound == NV_FALSE) + { + // the case of full texture buffer of clients, greater than 4 clients + if (index == MAX_TEXTURE_CLIENT_IDS) + { + index = (mostRecentIndex + 1) % MAX_TEXTURE_CLIENT_IDS; + + // assign the new client and update the texture data + pHeap->textureData[index].clientId = client; + pHeap->textureData[index].mostRecentAllocatedFlag = NV_TRUE; + pHeap->textureData[mostRecentIndex].mostRecentAllocatedFlag = NV_FALSE; + pHeap->textureData[index].refCount = 0; + + // + // Reverse the placementFlags from the one that was previously allocated + // + if (pHeap->textureData[mostRecentIndex].placementFlags & MEM_GROW_MASK) + *currentBankInfo = MEM_GROW_UP; + else + *currentBankInfo = MEM_GROW_DOWN; + + // Assign the new value to the texture data structure + pHeap->textureData[index].placementFlags = *currentBankInfo; + clientFound = NV_TRUE; + *ignoreBankPlacement = NV_TRUE; + *textureClientIndex = index; + } + + // the case in which there is still room available in the buffer + if (pHeap->textureData[index].clientId == 0) + { + // If we fall in here, it means there is still room available + pHeap->textureData[index].clientId = client; + + // deal with the grow directivity + if (mostRecentIndex == 0xFFFFFFFF) + { + // this is the very first client to be allocated + pHeap->textureData[index].placementFlags = *currentBankInfo; + if (pHeap->textureData[index].placementFlags & MEM_GROW_MASK) + *currentBankInfo = MEM_GROW_DOWN; + else + *currentBankInfo = MEM_GROW_UP; + pHeap->textureData[index].mostRecentAllocatedFlag = NV_TRUE; + } + else + { + if (pHeap->textureData[mostRecentIndex].placementFlags & MEM_GROW_MASK) + *currentBankInfo = MEM_GROW_UP; + else + *currentBankInfo = MEM_GROW_DOWN; + + // Set the last client allocated to the new client allocated + pHeap->textureData[mostRecentIndex].mostRecentAllocatedFlag = NV_FALSE; + pHeap->textureData[index].mostRecentAllocatedFlag = NV_TRUE; + + // update the placement flags + pHeap->textureData[index].placementFlags = *currentBankInfo; + + // if this isn't the first client in the heap, then we ignore bank placement + *ignoreBankPlacement = NV_TRUE; + } + + clientFound = NV_TRUE; + *textureClientIndex = index; + } + index++; + } // while (clientFound == NV_FALSE) + } // if (clientFound == NV_FALSE) +} + +// +// If we have two different alignment requirements for a memory +// allocation, this routine calculates the LCM (least common multiple) +// to satisfy both requirements. +// +// An alignment of 0 means "no preferred alignment". The return value +// will not exceed maxAlignment = NV_U64_MAX; it returns maxAlignment if the limit +// is exceeded. +// +// Called by heapAlloc and heapAllocHint. +// + + +/*! + * @Is Alloc Valid For FB Region + * + * Check the prospective allocation to see if the candidate block supports + * the requested surface type. + * + * NOTE: The FB region and FB heap allocation code assume that free blocks + * reside in a single FB region. This is true in current implementations that + * have the regions separated by a reserved block, but may not be true in future + * implementations. + * + * @param[in] pGpu GPU object + * @param[in] pHeap heap object + * @param[in] pFbAllocInfo allocation request information + * @param[in] pAllocData allocation candidate information + * + * @returns NV_TRUE if block can be allocated at the prospective address + * + */ +static NvBool +_isAllocValidForFBRegion +( + OBJGPU *pGpu, + Heap *pHeap, + FB_ALLOC_INFO *pFbAllocInfo, + OBJHEAP_ALLOC_DATA *pAllocData +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvBool isValid = NV_FALSE; + FB_REGION_DESCRIPTOR *fbRegion; + + // Check if any regions are defined. If not, then we are done. + if (pMemoryManager->Ram.numFBRegions > 0) + { + fbRegion = memmgrLookupFbRegionByOffset(pGpu, pMemoryManager, pAllocData->allocLo, pAllocData->allocHi); + + if (fbRegion != NULL) + { + // Because we heapAlloc the reserved region. + if (pFbAllocInfo->pageFormat->type == NVOS32_TYPE_PMA && + pFbAllocInfo->owner == HEAP_OWNER_PMA_RESERVED_REGION) + { + if (!fbRegion->bInternalHeap && !fbRegion->bRsvdRegion) + { + isValid = NV_TRUE; + } + return isValid; + } + // Check if the region is reserved/not usable + if (fbRegion->bRsvdRegion && + (pFbAllocInfo->pageFormat->type != NVOS32_TYPE_RESERVED)) + { + NV_PRINTF(LEVEL_INFO, + "Reserved region. Rejecting placement\n"); + return NV_FALSE; + } + + // + // Check if the region supports compression and if we need it. + // Surfaces that *require* compression can be allocated *only* in + // regions that support compression. *Optionally* compressed surfaces + // can be allocated anywhere though -- the selection of an uncompressed + // KIND will be handled in dmaUpdateVASpace. + // + if (!fbRegion->bSupportCompressed) + { + if (DRF_VAL(OS32, _ATTR, _COMPR , pFbAllocInfo->pageFormat->attr) == NVOS32_ATTR_COMPR_REQUIRED) + { + NV_PRINTF(LEVEL_INFO, + "Compression not supported. Rejecting placement\n"); + return NV_FALSE; + } + } + + // Check if the allocation type is specifically not allowed + if (pFbAllocInfo->pageFormat->type < NVOS32_NUM_MEM_TYPES) + { + if ((!fbRegion->bSupportISO) && + ((pFbAllocInfo->pageFormat->type == NVOS32_TYPE_PRIMARY) || + (pFbAllocInfo->pageFormat->type == NVOS32_TYPE_CURSOR) || + (pFbAllocInfo->pageFormat->type == NVOS32_TYPE_VIDEO))) + { + NV_PRINTF(LEVEL_INFO, + "ISO surface type #%d not supported. Rejecting placement\n", + pFbAllocInfo->pageFormat->type); + return NV_FALSE; + } + } + + if (!!fbRegion->bProtected ^ + !!(pFbAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_PROTECTED)) + { + NV_PRINTF(LEVEL_INFO, + "Protection mismatch. Rejecting placement\n"); + return NV_FALSE; + } + + } + else if (pFbAllocInfo->pageFormat->type != NVOS32_TYPE_RESERVED) + { + // + // Allow reserved allocs outside of valid regions, but everything else + // must be allocated in a region. + // + NV_PRINTF(LEVEL_INFO, + "pFbAllocInfo->type != NVOS32_TYPE_RESERVED\n"); + return NV_FALSE; + } + + } + + return NV_TRUE; +} + +/** + * Blacklists a single page + * This function will allocate the memory descriptor with a fixed memory offset + * and allocate the FB physical offset. Will replace the blacklist allocation + * path in the heapBlackListPages_IMPL. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pHeap Heap pointer + * @param[in] pBlacklistChunk BLACKLIST_CHUNK pointer + * + * @returns NV_OK on success + * NV_ERR_OUT_OF_MEMORY, if the memory is already blacklisted + */ + +static NV_STATUS +_heapBlacklistSingleChunk +( + OBJGPU *pGpu, + Heap *pHeap, + BLACKLIST_CHUNK *pBlacklistChunk +) +{ + NV_STATUS status = NV_OK; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_ASSERT(pBlacklistChunk != NULL); + + status = memdescCreate(&pBlacklistChunk->pMemDesc, + pGpu, pBlacklistChunk->size, RM_PAGE_SIZE, + NV_TRUE, ADDR_FBMEM, NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_FIXED_ADDRESS_ALLOCATE | + MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_FATAL, + "Error 0x%x creating memdesc for blacklisted chunk for address0x%llx, skipping\n", + status, pBlacklistChunk->physOffset); + NV_ASSERT(NV_FALSE); + return status; + } + + // this is how FIXED_ADDRESS_ALLOCATE works + memdescSetPte(pBlacklistChunk->pMemDesc, AT_GPU, 0, RM_PAGE_ALIGN_DOWN(pBlacklistChunk->physOffset)); + + if (pHeap->heapType != HEAP_TYPE_PHYS_MEM_SUBALLOCATOR) + { + // + // Allocate memory for this page. This is marked as an internal RM allocation + // and will be saved/restored during suspend/resume + // + status = memdescAlloc(pBlacklistChunk->pMemDesc); + if (NV_OK != status) + { + // no use for the memdesc if page couldn't be allocated + memdescDestroy(pBlacklistChunk->pMemDesc); + + NV_PRINTF(LEVEL_FATAL, + "Error 0x%x creating page for blacklisting address: 0x%llx, skipping\n", + status, pBlacklistChunk->physOffset); + NV_ASSERT(NV_FALSE); + return status; + } + } + + // set the flags properly + pBlacklistChunk->bIsValid = NV_TRUE; + + // if dynamic blacklisteing is enabled, clear the pending retirement flag + if (pMemoryManager->bEnableDynamicPageOfflining) + { + pBlacklistChunk->bPendingRetirement = NV_FALSE; + } + return status; +} + +/** + * Free-s the blacklisted pages within the range [begin, begin+size-1] + * This function will iterate the blacklisted chunks data structure, + * and free the blacklisted pages within the range [begin, begin+size-1] + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pMemoryManager MemoryManager pointer + * @param[in] pBlackList BLACKLIST pointer + * @param[in] begin starting address of the range + * @param[in] size Size of the region, where blacklisted pages to be free-d + * + * @returns NV_OK on success + */ +static NV_STATUS +_heapFreeBlacklistPages +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + BLACKLIST *pBlackList, + NvU64 begin, + NvU64 size +) +{ + NvU32 chunk = 0; + NvU64 baseChunkAddress = 0; + NvU64 endChunkAddress = 0; + BLACKLIST_CHUNK *pBlacklistChunks = pBlackList->pBlacklistChunks; + + for (chunk = 0; chunk < pBlackList->count; chunk++) + { + baseChunkAddress = 0; + endChunkAddress = 0; + // No need to process the chunk if it's not a valid chunk + if (pBlacklistChunks[chunk].bIsValid != NV_TRUE || + (pMemoryManager->bEnableDynamicPageOfflining && + pBlacklistChunks[chunk].bPendingRetirement)) + continue; + + baseChunkAddress = pBlacklistChunks[chunk].physOffset; + endChunkAddress = baseChunkAddress + pBlacklistChunks[chunk].size - 1; + + if (baseChunkAddress >= begin && endChunkAddress <= (begin + size - 1)) + { + // + // free the mem desc, set the excludeGlobalListFlag + // invalidate the entry + // + NV_PRINTF(LEVEL_FATAL, + "removing from blacklist... page start %llx, page end:%llx\n", + baseChunkAddress, endChunkAddress); + + memdescFree(pBlacklistChunks[chunk].pMemDesc); + memdescDestroy(pBlacklistChunks[chunk].pMemDesc); + + pBlacklistChunks[chunk].bIsValid = NV_FALSE; + } + } + return NV_OK; +} + +/** + * Blacklist pages within the range [begin, begin+size-1] + * This function will iterate the blacklisted chunks data structure, + * and blacklist pages within the range [begin, begin+size-1] + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pHeap Heap pointer + * @param[in] pBlackList BLACKLIST pointer + * @param[in] begin starting address of the range + * @param[in] size Size of the region, where pages will be blacklisted + * + * @returns NV_OK on success + * error, if _heapBlacklistSingleChunk fails + */ +static NV_STATUS +_heapBlacklistChunks +( + OBJGPU *pGpu, + Heap *pHeap, + BLACKLIST *pBlackList, + NvU64 begin, + NvU64 size +) +{ + NvU32 chunk = 0; + NvU64 baseAddress = 0; + NvU64 endAddress = 0; + BLACKLIST_CHUNK *pBlacklistChunks = pBlackList->pBlacklistChunks; + NV_STATUS status = NV_OK; + + + for (chunk = 0; chunk < pBlackList->count; chunk++) + { + baseAddress = 0; + endAddress = 0; + + // No need to process the chunk if it's a valid chunk + if (pBlacklistChunks[chunk].bIsValid == NV_TRUE) + continue; + + baseAddress = pBlacklistChunks[chunk].physOffset; + endAddress = baseAddress + pBlacklistChunks[chunk].size - 1; + + //TODO: what if the blacklisted chunk is halfway inside the allocated region?? + if (baseAddress >= begin && endAddress <= (begin + size - 1)) + { + NV_PRINTF(LEVEL_ERROR, + "blacklisting chunk from addr: 0x%llx to 0x%llx, new begin :0x%llx, end:0x%llx\n", + baseAddress, endAddress, begin, begin + size - 1); + status = _heapBlacklistSingleChunk(pGpu, pHeap, &pBlacklistChunks[chunk]); + NV_ASSERT(status == NV_OK); + } + } + return status; +} + +/*! + * @brief allocate memory from heap + * + * Allocates a memory region with requested parameters from heap. + * If requested contiguous allocation is not possible, tries to allocate non-contiguous memory. + * + * @param[in] pGpu GPU object + * @param[in] hClient client handle + * @param[in] pHeap heap object + * @param[in] pAllocRequest allocation request + * @param[in] memHandle memory handle + * @param[in/out] pAllocData heap-specific allocation data + * @param[in/out] pFbAllocInfo allocation data + * @param[out] pHwResource pointer to allocation HW resource info + * @param[in/out] pNoncontigAllocation the requested/provided allocation is noncotig + * @param[in] bNoncontigAllowed allocation can be made noncontig + * @param[in] bAllocedMemdesc memdesc should be freed if a new one is created + */ +NV_STATUS heapAlloc_IMPL +( + OBJGPU *pGpu, + NvHandle hClient, + Heap *pHeap, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + NvHandle memHandle, + OBJHEAP_ALLOC_DATA *pAllocData, + FB_ALLOC_INFO *pFbAllocInfo, + HWRESOURCE_INFO **pHwResource, + NvBool *pNoncontigAllocation, + NvBool bNoncontigAllowed, + NvBool bAllocedMemdesc +) +{ + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + MEMORY_DESCRIPTOR *pMemDesc = pAllocRequest->pMemDesc; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU32 textureClientIndex = 0xFFFFFFFF; + NvU64 desiredOffset = pFbAllocInfo->offset; + NvU64 adjustedSize = pFbAllocInfo->size - pFbAllocInfo->alignPad; + NvU32 bankPlacement = 0; + NvBool ignoreBankPlacement = NV_FALSE; + NvU8 currentBankInfo; + MEM_BLOCK *pBlockFirstFree; + MEM_BLOCK *pBlockFree; + MEM_BLOCK *pBlockNew = NULL; + MEM_BLOCK *pBlockSplit = NULL; + NvU64 allocatedOffset = 0; + NvBool bTurnBlacklistOff = NV_FALSE; + NvBool bDone = NV_FALSE; + NV_STATUS status = NV_OK; + NvU32 i; + + NV_ASSERT_OR_RETURN( + (memmgrAllocGetAddrSpace(GPU_GET_MEMORY_MANAGER(pGpu), pVidHeapAlloc->flags, pVidHeapAlloc->attr) + == ADDR_FBMEM) && + (pAllocRequest->pPmaAllocInfo[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] == NULL), + NV_ERR_INVALID_ARGUMENT); + + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + desiredOffset -= pFbAllocInfo->alignPad; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT) && + gpuCheckPageRetirementSupport_HAL(pGpu) && + FLD_TEST_DRF(OS32, _ATTR2, _BLACKLIST, _OFF, pVidHeapAlloc->attr2)) + { + NV_PRINTF(LEVEL_INFO, + "Trying to turn blacklisting pages off for this allocation of size: %llx\n", + pVidHeapAlloc->size); + if (!hypervisorIsVgxHyper()) + _heapFreeBlacklistPages(pGpu, pMemoryManager, &pHeap->blackList, desiredOffset, pVidHeapAlloc->size); + else + _heapFreeBlacklistPages(pGpu, pMemoryManager, &pHeap->blackList, pHeap->base, pHeap->total); + bTurnBlacklistOff = NV_TRUE; + // Now continue with the heap allocation. + } + + // + // Check for range-limited request. + // Range of [0,0] is a special case that means to use the entire heap. + // + // A range-limited request allows caller to say: I really want memory + // which only falls completely within a particular range. Returns + // error if can't allocate within that range. + // + // Used on Windows by OpenGL. On Windows during a modeswitch, the + // display driver frees all vidmem surfaces. Unfortunately, OpenGL + // writes to some vidmem surface with the CPU from user mode. If these + // surfaces are freed during the modeswitch, then the user mode OpenGL + // app might scribble on someone else's surface if that video memory is + // reused before OpenGL notices the modeswitch. Because modeswitches + // are asynchronous to the OpenGL client, it does not notice the + // modeswitches right away. + // + // A solution is for OpenGL to restrict vidmem surfaces that have + // this problem to a range of memory where it is safe *not* to free + // the surface during a modeswitch. + // + // virtual allocation are checked in dmaAllocVA() + if (pVidHeapAlloc->rangeLo == 0 && pVidHeapAlloc->rangeHi == 0) + { + pVidHeapAlloc->rangeHi = pHeap->base + pHeap->total - 1; + } + if (pVidHeapAlloc->rangeHi > pHeap->base + pHeap->total - 1) + { + pVidHeapAlloc->rangeHi = pHeap->base + pHeap->total - 1; + } + + if ((pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) == 0) + { + // Only want to override in one direction at a time + if (pMemoryManager->overrideInitHeapMin == 0) + { + pVidHeapAlloc->rangeHi = NV_MIN(pVidHeapAlloc->rangeHi, pMemoryManager->overrideHeapMax); + } + else + { + pVidHeapAlloc->rangeLo = NV_MAX(pVidHeapAlloc->rangeLo, pMemoryManager->overrideInitHeapMin); + } + } + + // + // Check for valid range. + // + if (pVidHeapAlloc->rangeLo > pVidHeapAlloc->rangeHi) + { + status = NV_ERR_INVALID_ARGUMENT; + goto return_early; + } + + // + // The bank placement loop does not know how to limit allocations to be + // within a range. + // + if (((pVidHeapAlloc->rangeLo > 0) || (pVidHeapAlloc->rangeHi < pHeap->base + pHeap->total - 1))) + { + pVidHeapAlloc->flags |= NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT; + } + + // + // Set up bank placement data - should have been preselected in heapCreate + // + status = _heapGetBankPlacement(pGpu, pHeap, pVidHeapAlloc->owner, + &pVidHeapAlloc->flags, + pVidHeapAlloc->type, + 0, + &bankPlacement); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "_heapGetBankPlacement failed for current allocation\n"); + goto return_early; + } + + // + // Find the best bank to start looking in for this pVidHeapAlloc->type, but only if we're + // not ignoring bank placement rules. Save the current bank info. + // + currentBankInfo = (NvU8)bankPlacement; // this is always non zero from above + + // + // Check for fixed address request. + // This allows caller to say: I really want this memory at a particular + // offset. Returns error if can't get that offset. + // Used initially by Mac display driver twinview code. + // On the Mac it is a very bad thing to *ever* move the primary + // during a modeset since a lot of sw caches the value and never + // checks again. + // + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + + // is our desired offset suitably aligned? + if (desiredOffset % pAllocData->alignment) + { + NV_PRINTF(LEVEL_ERROR, + "offset 0x%llx not aligned to 0x%llx\n", + desiredOffset, pAllocData->alignment); + goto failed; + } + + pBlockFree = pHeap->pFreeBlockList; + + if (pBlockFree == NULL) + { + NV_PRINTF(LEVEL_ERROR, "no free blocks\n"); + goto failed; + } + + do { + // + // Allocate from the bottom of the memory block. + // + pBlockFree = pBlockFree->u1.nextFree; + + // Does this block contain our desired range? + if ((desiredOffset >= pBlockFree->begin) && + (desiredOffset + pAllocData->allocSize - 1) <= pBlockFree->end) + { + // we have a match, now remove it from the pool + pAllocData->allocLo = desiredOffset; + pAllocData->allocHi = desiredOffset + pAllocData->allocSize - 1; + pAllocData->allocAl = pAllocData->allocLo; + + // Check that the candidate block can support the allocation type + if (_isAllocValidForFBRegion(pGpu, pHeap, pFbAllocInfo, pAllocData)) + goto got_one; + } + + } while (pBlockFree != pHeap->pFreeBlockList); + + // return error if can't get that particular address + NV_PRINTF(LEVEL_ERROR, + "failed NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE @%llx (%lld bytes)\n", + desiredOffset, pAllocData->allocSize); + goto failed; + } + + // + // Check if NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT was passed in with + // the pVidHeapAlloc->type to ignore placing this allocation in a particular bank. + // This means we default to the second loop where we choose first fit. + // + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT) + ignoreBankPlacement = NV_TRUE; + + // + // Bug 67690: Treat textures differently for more than one client (eg. opengl), + // [IN]: client, pVidHeapAlloc->type, ignoreBankPlacement + // [OUT]: heap, ignoreBankPlacement, textureClientIndex + // + // Bug 69385: Treat textures differently only if pVidHeapAlloc->flags are also set to zero. + // NV30GL-WinXP: Unable to run 3DMark2001SE @ 1600x1200x32bpp. + // + if ((pVidHeapAlloc->type == NVOS32_TYPE_TEXTURE) && (!pVidHeapAlloc->flags)) + _heapSetTexturePlacement(pHeap, hClient, pVidHeapAlloc->type, &ignoreBankPlacement, &textureClientIndex, ¤tBankInfo); + + if (!ignoreBankPlacement) + { + currentBankInfo = (NvU8)bankPlacement & BANK_MEM_GROW_MASK; + + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_BANK_HINT) + { + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_BANK_GROW_DOWN) + currentBankInfo = MEM_GROW_DOWN; + else + currentBankInfo = MEM_GROW_UP; + pVidHeapAlloc->flags &= ~(NVOS32_ALLOC_FLAGS_BANK_HINT); // hint flag only lasts for 1 loop + } + else + { + // Convert bank grow up/down to mem grow up/down + currentBankInfo = (currentBankInfo & BANK_MEM_GROW_DOWN ? MEM_GROW_DOWN : MEM_GROW_UP); + } + } // if (!ignoreBankPlacement) + + pBlockFirstFree = pHeap->pFreeBlockList; + if (!pBlockFirstFree) + { + NV_PRINTF(LEVEL_ERROR, "no free blocks\n"); + goto failed; + } + + if (*pNoncontigAllocation) + { + NV_PRINTF(LEVEL_INFO, "non-contig vidmem requested\n"); + goto non_contig_alloc; + } + + // + // Loop through all available regions. + // Note we don't check for bRsvdRegion here because when blacklisting + // those regions we need them to succeed. + // + bDone = NV_FALSE; + i = 0; + while (!bDone) + { + NvU64 saveRangeLo = pVidHeapAlloc->rangeLo; + NvU64 saveRangeHi = pVidHeapAlloc->rangeHi; + + if (!memmgrAreFbRegionsSupported(pMemoryManager) || + gpuIsCacheOnlyModeEnabled(pGpu)) + { + bDone = NV_TRUE; + } + else + { + NV_ASSERT( pMemoryManager->Ram.numFBRegionPriority > 0 ); + + if (FLD_TEST_DRF(OS32, _ATTR2, _PRIORITY, _LOW, pFbAllocInfo->pageFormat->attr2) || + (pMemoryManager->bPreferSlowRegion && + !FLD_TEST_DRF(OS32, _ATTR2, _PRIORITY, _HIGH, pFbAllocInfo->pageFormat->attr2))) + { + NV_ASSERT( pMemoryManager->Ram.fbRegionPriority[pMemoryManager->Ram.numFBRegionPriority-1-i] < pMemoryManager->Ram.numFBRegions ); + NV_ASSERT( !pMemoryManager->Ram.fbRegion[pMemoryManager->Ram.fbRegionPriority[pMemoryManager->Ram.numFBRegionPriority-1-i]].bRsvdRegion ); + // + // We prefer slow memory, or we want _LOW priority + // ==>> Try allocations in increasing order of performance, + // slowest first + // + pVidHeapAlloc->rangeLo = NV_MAX(pVidHeapAlloc->rangeLo, pMemoryManager->Ram.fbRegion[pMemoryManager->Ram.fbRegionPriority[pMemoryManager->Ram.numFBRegionPriority-1-i]].base); + pVidHeapAlloc->rangeHi = NV_MIN(pVidHeapAlloc->rangeHi, pMemoryManager->Ram.fbRegion[pMemoryManager->Ram.fbRegionPriority[pMemoryManager->Ram.numFBRegionPriority-1-i]].limit); + } + else + { + NV_ASSERT( pMemoryManager->Ram.fbRegionPriority[i] < pMemoryManager->Ram.numFBRegions ); + NV_ASSERT( !pMemoryManager->Ram.fbRegion[pMemoryManager->Ram.fbRegionPriority[i]].bRsvdRegion ); + // + // We don't explicitly want slow memory or we don't prefer + // allocations in the slow memory + // ==>> Try allocations in decreasing order of performance, + // fastest first + // + pVidHeapAlloc->rangeLo = NV_MAX(pVidHeapAlloc->rangeLo, pMemoryManager->Ram.fbRegion[pMemoryManager->Ram.fbRegionPriority[i]].base); + pVidHeapAlloc->rangeHi = NV_MIN(pVidHeapAlloc->rangeHi, pMemoryManager->Ram.fbRegion[pMemoryManager->Ram.fbRegionPriority[i]].limit); + } + i++; + + bDone = !(i < pMemoryManager->Ram.numFBRegionPriority); + } + + // + // When scanning upwards, start at the bottom - 1 so the following loop looks symetrical. + // + if ( ! (currentBankInfo & MEM_GROW_DOWN)) + pBlockFirstFree = pBlockFirstFree->u0.prevFree; + pBlockFree = pBlockFirstFree; + + do + { + NvU64 blockLo; + NvU64 blockHi; + + if (currentBankInfo & MEM_GROW_DOWN) + pBlockFree = pBlockFree->u0.prevFree; + else + pBlockFree = pBlockFree->u1.nextFree; + + // + // Is this block completely in requested range? + // + // We *should* check that pBlockFree is wholely resident in the range, but the + // old check didn't and checking it causes some tests to fail. + // So check that at least *some* of the block resides within the requested range. + // + if ((pBlockFree->end >= pVidHeapAlloc->rangeLo) && (pBlockFree->begin <= pVidHeapAlloc->rangeHi)) + { + // + // Find the intersection of the free block and the specified range. + // + blockLo = (pVidHeapAlloc->rangeLo > pBlockFree->begin) ? pVidHeapAlloc->rangeLo : pBlockFree->begin; + blockHi = (pVidHeapAlloc->rangeHi < pBlockFree->end) ? pVidHeapAlloc->rangeHi : pBlockFree->end; + + if (currentBankInfo & MEM_GROW_DOWN) + { + // + // Allocate from the top of the memory block. + // + pAllocData->allocLo = (blockHi - pAllocData->allocSize + 1) / pAllocData->alignment * pAllocData->alignment; + pAllocData->allocAl = pAllocData->allocLo; + pAllocData->allocHi = pAllocData->allocAl + pAllocData->allocSize - 1; + } + else + { + // + // Allocate from the bottom of the memory block. + // + pAllocData->allocAl = (blockLo + (pAllocData->alignment - 1)) / pAllocData->alignment * pAllocData->alignment; + pAllocData->allocLo = pAllocData->allocAl; + pAllocData->allocHi = pAllocData->allocAl + pAllocData->allocSize - 1; + } + + // + // Does the desired range fall completely within this block? + // Also make sure it does not wrap-around. + // Also make sure it is within the desired range. + // + if ((pAllocData->allocLo >= pBlockFree->begin) && (pAllocData->allocHi <= pBlockFree->end)) + { + if (pAllocData->allocLo <= pAllocData->allocHi) + { + if ((pAllocData->allocLo >= pVidHeapAlloc->rangeLo) && (pAllocData->allocHi <= pVidHeapAlloc->rangeHi)) + { + // Check that the candidate block can support the allocation type + if (_isAllocValidForFBRegion(pGpu, pHeap, pFbAllocInfo, pAllocData)) + { + pVidHeapAlloc->rangeLo = saveRangeLo; + pVidHeapAlloc->rangeHi = saveRangeHi; + goto got_one; + } + } + } + } + } + + } while (pBlockFree != pBlockFirstFree); + + pVidHeapAlloc->rangeLo = saveRangeLo; + pVidHeapAlloc->rangeHi = saveRangeHi; + } + +non_contig_alloc: + if (!bNoncontigAllowed) + goto failed; + + if (!*pNoncontigAllocation) + { + NV_PRINTF(LEVEL_INFO, + "Contig vidmem allocation failed, running noncontig allocator\n"); + + // Create a new noncontig memdescriptor + memdescDestroy(pAllocRequest->pMemDesc); + + status = memdescCreate(&pAllocRequest->pMemDesc, pGpu, adjustedSize, + 0, NV_FALSE, ADDR_FBMEM, NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_NONE); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "cannot alloc memDesc!\n"); + pMemDesc = pAllocRequest->pMemDesc = NULL; + goto failed; + } + + pMemDesc = pAllocRequest->pMemDesc; + pMemDesc->pHeap = pHeap; + + memdescSetPteKind(pMemDesc, pFbAllocInfo->format); + memdescSetHwResId(pMemDesc, pFbAllocInfo->hwResId); + } + + // Try the noncontig allocator + if (NV_OK == _heapAllocNoncontig(pGpu, + hClient, + pHeap, + pAllocRequest, + memHandle, + pAllocData, + pFbAllocInfo, + textureClientIndex, + pFbAllocInfo->alignPad, + &allocatedOffset, + pMemDesc, + pHwResource)) + { + *pNoncontigAllocation = NV_TRUE; + + // + // The noncontig allocator calls _heapProcessFreeBlock() + // by itself, so we goto done: straight + // + status = NV_OK; + goto return_early; + } + + NV_PRINTF(LEVEL_INFO, + "failed to allocate block. Heap total=0x%llx free=0x%llx\n", + pHeap->total, pHeap->free); + // Out of memory. + goto failed; + + // + // We have a match. Now link it in, trimming or splitting + // any slop from the enclosing block as needed. + // + +got_one: + if (NV_OK != _heapProcessFreeBlock(pGpu, pBlockFree, + &pBlockNew, &pBlockSplit, + pHeap, pAllocRequest, + memHandle, + pAllocData, pFbAllocInfo, + pFbAllocInfo->alignPad, + &allocatedOffset) || + NV_OK != _heapUpdate(pHeap, pBlockNew, BLOCK_FREE_STATE_CHANGED)) +failed: + { + + NV_PRINTF(LEVEL_INFO, + "failed to allocate block. Heap total=0x%llx free=0x%llx\n", + pHeap->total, pHeap->free); + + portMemFree(pBlockNew); + pBlockNew = NULL; + portMemFree(pBlockSplit); + status = NV_ERR_NO_MEMORY; + goto return_early; + } + + // + // If a client calls us with pVidHeapAlloc->type == NVOS32_TYPE_TEXTURE, but where flags + // are non-zero, we won't call _heapSetTexturePlacement and initialize + // textureClientIndex to a proper value (default is 0xFFFFFFFF). In that + // case, we won't track this texture allocation. Bug 79586. + // + if (pVidHeapAlloc->type == NVOS32_TYPE_TEXTURE && + textureClientIndex != 0xFFFFFFFF) + { + pBlockNew->textureId = hClient; + pHeap->textureData[textureClientIndex].refCount++; + } + else + { + pBlockNew->textureId = 0; + } + + pFbAllocInfo->offset = allocatedOffset; + + // TODO : This must be inside *all* blocks of a noncontig allocation + if (!*pNoncontigAllocation) + { + pBlockNew->pitch = pFbAllocInfo->pitch; + pBlockNew->height = pFbAllocInfo->height; + pBlockNew->width = pFbAllocInfo->width; + } + + *pHwResource = &pBlockNew->hwResource; + + // Remember memory descriptor + memdescDescribe(pMemDesc, ADDR_FBMEM, allocatedOffset, adjustedSize); + pBlockNew->pMemDesc = pMemDesc; + pBlockNew->allocedMemDesc = bAllocedMemdesc; + + status = NV_OK; + +return_early: + HEAP_VALIDATE(pHeap); + + if (bTurnBlacklistOff) + { + if (!hypervisorIsVgxHyper()) + _heapBlacklistChunks(pGpu, pHeap, &pHeap->blackList, desiredOffset, pVidHeapAlloc->size); + else + _heapBlacklistChunksInFreeBlocks(pGpu, pHeap); + } + + return status; +} + +static void _heapBlacklistChunksInFreeBlocks +( + OBJGPU *pGpu, + Heap *pHeap +) +{ + MEM_BLOCK *pBlockFirstFree, *pBlockFree; + NvU64 blockLo; + NvU64 blockHi; + NvU64 size; + + pBlockFirstFree = pHeap->pFreeBlockList; + + if (pBlockFirstFree) + { + pBlockFirstFree = pBlockFirstFree->u0.prevFree; + pBlockFree = pBlockFirstFree; + do + { + pBlockFree = pBlockFree->u1.nextFree; + blockLo = pBlockFree->begin; + blockHi = pBlockFree->end; + size = blockHi - blockLo + 1; + + _heapBlacklistChunks(pGpu, pHeap, &pHeap->blackList, blockLo, size); + + } while (pBlockFree != pBlockFirstFree); + } +} + +static NV_STATUS _heapBlockFree +( + OBJGPU *pGpu, + Heap *pHeap, + MEM_BLOCK *pBlock +) +{ + MEM_BLOCK *pBlockTmp; + NvU32 i; + OBJOS *pOS = GPU_GET_OS(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvBool bBlocksMerged = NV_FALSE; + + // + // Check for valid owner. + // + if (pBlock->owner == NVOS32_BLOCK_TYPE_FREE) + return NV_ERR_INVALID_STATE; + + pBlock->owner = NVOS32_BLOCK_TYPE_FREE; + + if (NV_OK != _heapUpdate(pHeap, pBlock, BLOCK_FREE_STATE_CHANGED)) + { + return NV_ERR_INVALID_STATE; + } + + // + // Update free count. + // + _heapAdjustFree(pHeap, pBlock->end - pBlock->begin + 1, + FLD_TEST_DRF(OS32, _ATTR2, _INTERNAL, _YES, pBlock->hwResource.attr2)); + + // + // Release any HW resources that might've been in use + // + { + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + if (pFbAllocInfo == NULL) + { + NV_ASSERT(0); + return NV_ERR_NO_MEMORY; + } + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + if (pFbAllocPageFormat == NULL) { + NV_ASSERT(0); + portMemFree(pFbAllocInfo); + return NV_ERR_NO_MEMORY; + } + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + pFbAllocInfo->pageFormat->type = pBlock->u0.type; + pFbAllocInfo->hwResId = pBlock->hwResource.hwResId; + pFbAllocInfo->height = 0; + pFbAllocInfo->pitch = 0; + pFbAllocInfo->size = pBlock->end - pBlock->begin + 1; + pFbAllocInfo->align = pBlock->align; + pFbAllocInfo->alignPad = pBlock->alignPad; + pFbAllocInfo->offset = pBlock->begin; + pFbAllocInfo->format = pBlock->format; + pFbAllocInfo->comprCovg = pBlock->hwResource.comprCovg; + pFbAllocInfo->zcullCovg = 0; + pFbAllocInfo->pageFormat->attr = pBlock->hwResource.attr; + pFbAllocInfo->pageFormat->attr2 = pBlock->hwResource.attr2; + pFbAllocInfo->ctagOffset = pBlock->hwResource.ctagOffset; + + memmgrFreeHwResources(pGpu, pMemoryManager, pFbAllocInfo); + + if (FLD_TEST_DRF(OS32, _ATTR2, _INTERNAL, _YES, pFbAllocInfo->pageFormat->attr2)) + { + pOS->osInternalReserveFreeCallback(pFbAllocInfo->offset, pGpu->gpuId); + } + + // Clear the HW resource associations since this block can be reused or merged. + portMemSet(&pBlock->hwResource, 0, sizeof(pBlock->hwResource)); + + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + } + + if ((pBlock->u0.type == NVOS32_TYPE_TEXTURE) && (pBlock->textureId != 0)) + { + for (i = 0; i < MAX_TEXTURE_CLIENT_IDS; i++) + { + // + // 1. Find the client within the textureData structure + // 2. Once found, set the value to 0 + // 3. Then decrement its refCount + // 4. If refCount goes to zero, reset the textureData structure + // that pertains to that index. + // + if (pHeap->textureData[i].clientId == pBlock->textureId) + { + pBlock->textureId = 0; + pHeap->textureData[i].refCount--; + if (pHeap->textureData[i].refCount == 0) + portMemSet(&pHeap->textureData[i], 0, + sizeof(TEX_INFO)); + break; + } + } + } + + // Account for freeing any reserved RM region + if ((pBlock->u0.type == NVOS32_TYPE_RESERVED) && (pBlock->owner == HEAP_OWNER_RM_RESERVED_REGION)) + { + NV_ASSERT(pHeap->reserved >= pBlock->end - pBlock->begin + 1); + pHeap->reserved -= pBlock->end - pBlock->begin + 1; + } + + // + // + // Can this merge with any surrounding free blocks? + // + if ((pBlock->prev->owner == NVOS32_BLOCK_TYPE_FREE) && (pBlock != pHeap->pBlockList)) + { + // + // Remove block to be freed and previous one since nodes will be + // combined into single one. + // + if (NV_OK != _heapUpdate(pHeap, pBlock, BLOCK_REMOVE)) + { + return NV_ERR_INVALID_STATE; + } + if (NV_OK != _heapUpdate(pHeap, pBlock->prev, BLOCK_REMOVE)) + { + return NV_ERR_INVALID_STATE; + } + + // + // Merge with previous block. + // + pBlock->prev->next = pBlock->next; + pBlock->next->prev = pBlock->prev; + pBlock->prev->end = pBlock->end; + pBlockTmp = pBlock; + pBlock = pBlock->prev; + pHeap->numBlocks--; + portMemFree(pBlockTmp); + + // re-insert updated free block into rb-tree + if (NV_OK != _heapUpdate(pHeap, pBlock, BLOCK_SIZE_CHANGED)) + { + return NV_ERR_INVALID_STATE; + } + + bBlocksMerged = NV_TRUE; + } + + if ((pBlock->next->owner == NVOS32_BLOCK_TYPE_FREE) && (pBlock->next != pHeap->pBlockList)) + { + // + // Remove block to be freed and next one since nodes will be + // combined into single one. + // + if (NV_OK != _heapUpdate(pHeap, pBlock, BLOCK_REMOVE)) + { + return NV_ERR_INVALID_STATE; + } + if (NV_OK != _heapUpdate(pHeap, pBlock->next, BLOCK_REMOVE)) + { + return NV_ERR_INVALID_STATE; + } + + // + // Merge with next block. + // + pBlock->prev->next = pBlock->next; + pBlock->next->prev = pBlock->prev; + pBlock->next->begin = pBlock->begin; + + if (pHeap->pBlockList == pBlock) + pHeap->pBlockList = pBlock->next; + + if (bBlocksMerged) + { + if (pHeap->pFreeBlockList == pBlock) + pHeap->pFreeBlockList = pBlock->u1.nextFree; + + pBlock->u1.nextFree->u0.prevFree = pBlock->u0.prevFree; + pBlock->u0.prevFree->u1.nextFree = pBlock->u1.nextFree; + } + + pBlockTmp = pBlock; + pBlock = pBlock->next; + pHeap->numBlocks--; + portMemFree(pBlockTmp); + + // re-insert updated free block into rb-tree + if (NV_OK != _heapUpdate(pHeap, pBlock, BLOCK_SIZE_CHANGED)) + { + return NV_ERR_INVALID_STATE; + } + + bBlocksMerged = NV_TRUE; + } + + if (!bBlocksMerged) + { + // + // Nothing was merged. Add to free list. + // + pBlockTmp = pHeap->pFreeBlockList; + if (!pBlockTmp) + { + pHeap->pFreeBlockList = pBlock; + pBlock->u1.nextFree = pBlock; + pBlock->u0.prevFree = pBlock; + } + else + { + if (pBlockTmp->begin > pBlock->begin) + // + // Insert into beginning of free list. + // + pHeap->pFreeBlockList = pBlock; + else if (pBlockTmp->u0.prevFree->begin > pBlock->begin) + // + // Insert into free list. + // + do + { + pBlockTmp = pBlockTmp->u1.nextFree; + } while (pBlockTmp->begin < pBlock->begin); + /* + else + * Insert at end of list. + */ + pBlock->u1.nextFree = pBlockTmp; + pBlock->u0.prevFree = pBlockTmp->u0.prevFree; + pBlock->u0.prevFree->u1.nextFree = pBlock; + pBlockTmp->u0.prevFree = pBlock; + } + } + + pBlock->mhandle = 0x0; + pBlock->align = pBlock->begin; + pBlock->alignPad = 0; + pBlock->format = 0; + + HEAP_VALIDATE(pHeap); + return (NV_OK); +} + +NV_STATUS heapReference_IMPL +( + OBJGPU *pGpu, + Heap *pHeap, + NvU32 owner, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NvU64 offsetAlign = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + MEM_BLOCK *pBlock; + + // Bail out in case allocation is in PMA owned FB region. + if (pMemDesc->pPmaAllocInfo) + { + if (0 != pMemDesc->pPmaAllocInfo->refCount) + { + pMemDesc->pPmaAllocInfo->refCount++; + if (IsSLIEnabled(pGpu) && + (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM)) + { // + memdescAddRef(pMemDesc); // Otherwise we have a fake parent descriptor removed with existing submem descriptors. + // In SLI only (not fully understood yet!). In non SLI, that memAddref() causes a memleak. + // + } + } + return NV_OK; + } + + if (owner == NVOS32_BLOCK_TYPE_FREE) + return NV_ERR_INVALID_STATE; + + pBlock = _heapFindAlignedBlockWithOwner(pGpu, pHeap, owner, offsetAlign); + + if (!pBlock) + return NV_ERR_INVALID_OFFSET; + + if (pBlock->refCount == HEAP_MAX_REF_COUNT) + { + NV_PRINTF(LEVEL_ERROR, + "heapReference: reference count %x will exceed maximum 0x%x:\n", + pBlock->refCount, HEAP_MAX_REF_COUNT); + return NV_ERR_GENERIC; + } + + pBlock->refCount++; + if (IsSLIEnabled(pGpu) && + (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM)) + { // + memdescAddRef(pMemDesc); // Otherwise we have a fake parent descriptor removed with existing submem descriptors. + // In SLI only (not fully understood yet!). In non SLI, that memAddref() causes a memleak. + // + } + return NV_OK; +} + +static NV_STATUS +_heapFindBlockByOffset +( + OBJGPU *pGpu, + Heap *pHeap, + NvU32 owner, + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 offset, + MEM_BLOCK **ppBlock +) +{ + NV_STATUS status; + + // IRQL TEST: must be running at equivalent of passive-level + IRQL_ASSERT_AND_RETURN(!osIsRaisedIRQL()); + + *ppBlock = _heapFindAlignedBlockWithOwner(pGpu, pHeap, owner, + offset); + + if (!*ppBlock) + { + // Try finding block based solely on offset. This is primarily needed + // to successfully locate a block that was allocated multiple times via + // NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE in heapAlloc: there can + // be multiple owners, so that _heapFindAlignedBlockWithOwner may fail. + if ((status = heapGetBlock(pHeap, offset, ppBlock)) != NV_OK + || !*ppBlock) + return NV_ERR_INVALID_OFFSET; + } + + return NV_OK; +} + +NV_STATUS +heapFree_IMPL +( + OBJGPU *pGpu, + Heap *pHeap, + NvU32 owner, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_STATUS status; + MEM_BLOCK *pBlock; + MEM_BLOCK *pNextBlock; + NvU64 offsetAlign = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + NvU64 allocBegin = 0; + NvU64 allocEnd = 0; + NvBool bTurnBlacklistOff = NV_FALSE; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + NV_ASSERT_OR_RETURN(pMemDesc->pHeap == pHeap, NV_ERR_INVALID_ARGUMENT); + + if (memdescGetContiguity(pMemDesc, AT_GPU)) + { + status = _heapFindBlockByOffset(pGpu, pHeap, + owner, pMemDesc, offsetAlign, + &pBlock); + if (NV_OK != status) + { + return status; + } + + if (pBlock->allocedMemDesc) + { + if (pMemDesc != pBlock->pMemDesc) + { + NV_ASSERT(pMemDesc == pBlock->pMemDesc); + return NV_ERR_INVALID_ARGUMENT; + } + + // Clear only if the memdesc is about to be freed by memdescDestroy() + if (pMemDesc->RefCount == 1) + { + pBlock->pMemDesc = NULL; + } + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + } + + if (--pBlock->refCount != 0) + return NV_OK; + + + if(pGpu->getProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT) && + gpuCheckPageRetirementSupport_HAL(pGpu)) + { + if (FLD_TEST_DRF(OS32, _ATTR2, _BLACKLIST, _OFF, pBlock->hwResource.attr2)) + { + bTurnBlacklistOff = NV_TRUE; + allocBegin = pBlock->begin; + allocEnd = pBlock->end; + } + } + + // + // Since _heapBlockFree() unconditionally releases HW resources + // such as compression tags, some memory descriptor fields + // are rendered stale. These fields need to be reset to safer + // default values (e.g. invalid HW resource ID, pitch PTE + // kind, etc.) - they may be referenced again before the memory + // descriptor itself is freed. + // + if (pBlock->allocedMemDesc && (pBlock->pMemDesc != NULL)) + { + memdescSetHwResId(pMemDesc, 0); + // XXX We cannot reset the PTE kind here since it cause corruption + // in RAGE. See bug 949059 + // + // This is an ugly hack to help OGL recover from modeswitch. + // A cleaner fix would be to change the way memory is managed in OGL, + // but it doesn't worth the effort to fix that on XP, since the OS is + // close to end of life. The OGL linux team have plan to change their + // memory management in the future, so later this hack may not be + // required anymore + // pMemDesc->PteKind = 0; + } + + if ((status = _heapBlockFree(pGpu, pHeap, pBlock)) != NV_OK) + { + NV_ASSERT(0); + } + + // + // since the mem desc is freed, now we can reallocate the blacklisted pages + // in the [allocBegin, allocEnd] + // + if (bTurnBlacklistOff) + status = _heapBlacklistChunks(pGpu, pHeap, &pHeap->blackList, allocBegin, allocEnd-allocBegin+1); + + if (pMemoryManager->bEnableDynamicPageOfflining) + { + NvU32 i = 0; + BLACKLIST *pBlacklist = &pHeap->blackList; + BLACKLIST_CHUNK *pBlacklistChunks = pBlacklist->pBlacklistChunks; + + for (i = 0; i < pBlacklist->count; i++) + { + if (pBlacklistChunks[i].bPendingRetirement && + (pBlacklistChunks[i].physOffset >= allocBegin && + pBlacklistChunks[i].physOffset <= allocEnd)) + { + status = _heapBlacklistSingleChunk(pGpu, pHeap, &pBlacklist->pBlacklistChunks[i]); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "heapBlacklistSingleChunk, status: %x!\n", status); + return status; + } + } + } + } + return status; + } + else + { + NvBool bBlacklistFailed = NV_FALSE; + // + // Use the pMemDesc->PteArray[0] to find the first block + // The remaining blocks can be found from each block's + // noncontigAllocListNext pointer + // + status = _heapFindBlockByOffset(pGpu, pHeap, + owner, pMemDesc, + memdescGetPte(pMemDesc, AT_GPU, 0), &pBlock); + + if (NV_OK != status) + { + return status; + } + + while (pBlock != NULL) + { + // _heapBlockFree() clears pBlock, so save the next pointer + pNextBlock = pBlock->noncontigAllocListNext; + + if (--pBlock->refCount != 0) + { + // Remove this block from the noncontig allocation list + pBlock->noncontigAllocListNext = NULL; + pBlock = pNextBlock; + continue; + } + + if (NV_OK != (status = _heapBlockFree(pGpu, pHeap, pBlock))) + return status; + + // check if we need to dynamically blacklist the page + if (pMemoryManager->bEnableDynamicPageOfflining) + { + NvU32 i = 0; + BLACKLIST *pBlacklist = &pHeap->blackList; + BLACKLIST_CHUNK *pBlacklistChunks = pBlacklist->pBlacklistChunks; + for (i = 0; i < pBlacklist->count; i++) + { + if (pBlacklistChunks[i].bPendingRetirement && + (pBlacklistChunks[i].physOffset >= pBlock->begin && + pBlacklistChunks[i].physOffset <= pBlock->end)) + { + status = _heapBlacklistSingleChunk(pGpu, pHeap, &pBlacklist->pBlacklistChunks[i]); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "heapBlacklistSingleChunk, status: %x!\n", status); + bBlacklistFailed = NV_TRUE; + } + } + } + } + pBlock = pNextBlock; + } + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + + if (bBlacklistFailed) + { + return NV_ERR_INVALID_STATE; + } + else + { + return status; + } + } +} + +NV_STATUS heapGetBlock_IMPL +( + Heap *pHeap, + NvU64 offset, + MEM_BLOCK **ppMemBlock +) +{ + NODE *pNode; + + if (btreeSearch(offset, &pNode, pHeap->pBlockTree) != NV_OK) + { + if (ppMemBlock) + { + *ppMemBlock = NULL; + } + return NV_ERR_GENERIC; + } + + if (ppMemBlock) + { + *ppMemBlock = (MEM_BLOCK *)pNode->Data; + } + + return NV_OK; +} + +static MEM_BLOCK *_heapFindAlignedBlockWithOwner +( + OBJGPU *pGpu, + Heap *pHeap, + NvU32 owner, + NvU64 offset // aligned +) +{ + MEM_BLOCK *pBlock; + NODE *pNode; + + HEAP_VALIDATE(pHeap); + + if (btreeSearch(offset, &pNode, pHeap->pBlockTree) != NV_OK) + { + return NULL; + } + + pBlock = (MEM_BLOCK *)pNode->Data; + if (pBlock->owner != owner) + { + return NULL; + } + + return pBlock; +} + +NV_STATUS heapGetSize_IMPL +( + Heap *pHeap, + NvU64 *size +) +{ + *size = pHeap->total; + HEAP_VALIDATE(pHeap); + return (NV_OK); +} + +NV_STATUS heapGetUsableSize_IMPL +( + Heap *pHeap, + NvU64 *usableSize +) +{ + *usableSize = pHeap->total - pHeap->reserved; + HEAP_VALIDATE(pHeap); + return (NV_OK); +} + +NV_STATUS heapGetFree_IMPL +( + Heap *pHeap, + NvU64 *free +) +{ + *free = pHeap->free; + HEAP_VALIDATE(pHeap); + return (NV_OK); +} + +NV_STATUS heapGetBase_IMPL +( + Heap *pHeap, + NvU64 *base +) +{ + *base = pHeap->base; + HEAP_VALIDATE(pHeap); + return (NV_OK); +} + +static NV_STATUS _heapGetMaxFree +( + Heap *pHeap, + NvU64 *maxOffset, + NvU64 *maxFree +) +{ + MEM_BLOCK *pBlockFirstFree, *pBlockFree; + NvU64 freeBlockSize; + + *maxFree = 0; + + pBlockFirstFree = pHeap->pFreeBlockList; + if (!pBlockFirstFree) + // There are no free blocks. Max free is already set to 0 + return (NV_OK); + + // Walk the free block list. + pBlockFree = pBlockFirstFree; + do { + freeBlockSize = pBlockFree->end - pBlockFree->begin + 1; + if (freeBlockSize > *maxFree) + { + *maxOffset = pBlockFree->begin; + *maxFree = freeBlockSize; + } + pBlockFree = pBlockFree->u1.nextFree; + } while (pBlockFree != pBlockFirstFree); + + return (NV_OK); +} + +NV_STATUS heapInfo_IMPL +( + Heap *pHeap, + NvU64 *bytesFree, + NvU64 *bytesTotal, + NvU64 *base, + NvU64 *largestOffset, // largest free blocks offset + NvU64 *largestFree // largest free blocks size +) +{ + NV_STATUS status; + + *bytesFree = pHeap->free; + *bytesTotal = pHeap->total - pHeap->reserved; + *base = pHeap->base; + status = _heapGetMaxFree(pHeap, largestOffset, largestFree); + HEAP_VALIDATE(pHeap); + + return status; +} + +NV_STATUS heapInfoTypeAllocBlocks_IMPL +( + Heap *pHeap, + NvU32 type, + NvU64 *bytesTotal +) +{ + MEM_BLOCK *pBlock; + NvU64 total; + + if (type >= NVOS32_NUM_MEM_TYPES) return (NV_ERR_GENERIC); + + pBlock = pHeap->pBlockList; + total = 0; + + if (type == NVOS32_TYPE_OWNER_RM) + { + // + // Scan for all the blocks whose owner is within + // HEAP_OWNER_RM_SCRATCH_BEGIN and HEAP_OWNER_RM_SCRATCH_END + // this is strictly speaking not 'type' search. Also note that this + // includes reserved space in any,.like in case of 3FB mixed density mode. + // + do + { + if ( (pBlock->owner > HEAP_OWNER_RM_SCRATCH_BEGIN) && + (pBlock->owner < HEAP_OWNER_RM_SCRATCH_END) ) + { + total += (pBlock->end - pBlock->begin + 1); + } + pBlock = pBlock->next; + } while (pBlock != pHeap->pBlockList); + } + else + { + // + // Scan for all the blocks belonging to this type. + // + do + { + if (pBlock->u0.type == type) + total += (pBlock->end - pBlock->begin + 1); + pBlock = pBlock->next; + } while (pBlock != pHeap->pBlockList); + } + + *bytesTotal = total; + + HEAP_VALIDATE(pHeap); + return NV_OK; +} + +NV_STATUS heapGetBlockHandle_IMPL( + Heap *pHeap, + NvU32 owner, + NvU32 type, + NvU64 offset, + NvBool bSkipCheck, // NV_TRUE if skip alignment/type check + NvHandle *puHandle +) +{ + MEM_BLOCK *pBlock; + NV_STATUS status; + + if (offset > (pHeap->base + pHeap->total - 1)) return (NV_ERR_GENERIC); + + status = heapGetBlock(pHeap, offset, &pBlock); + if (status != NV_OK) + { + return status; + } + + if (!((pBlock->owner == owner) && + (((pBlock->u0.type == type) && (pBlock->align == offset)) || bSkipCheck))) + { + return NV_ERR_GENERIC; + } + + *puHandle = pBlock->mhandle; + return NV_OK; +} + +// +// Returns the number of blocks (free or allocated) currently in the heap +// +NvU32 heapGetNumBlocks_IMPL +( + Heap *pHeap +) +{ + return pHeap->numBlocks; +} + +// +// Copies over block information for each block in the heap into the provided buffer +// +NV_STATUS heapGetBlockInfo_IMPL +( + Heap *pHeap, + NvU32 size, + NVOS32_HEAP_DUMP_BLOCK *pBlockBuffer +) +{ + MEM_BLOCK *pBlock; + NvU32 heapSize, i; + NV_STATUS rmStatus = NV_OK; + + // ensure buffer is the same size + heapSize = heapGetNumBlocks(pHeap); + NV_ASSERT_OR_RETURN(heapSize == size, NV_ERR_INVALID_ARGUMENT); + + pBlock = pHeap->pBlockList; + for (i=0; ibegin = pBlock->begin; + pBlockBuffer->align = pBlock->align; + pBlockBuffer->end = pBlock->end; + pBlockBuffer->owner = pBlock->owner; + pBlockBuffer->format = pBlock->format; + pBlock = pBlock->next; + pBlockBuffer++; + } + + return rmStatus; +} + +NV_STATUS heapAllocHint_IMPL +( + OBJGPU *pGpu, + Heap *pHeap, + NvHandle hClient, + NvHandle hDevice, + HEAP_ALLOC_HINT_PARAMS *pAllocHint +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 alignment; + NV_STATUS status; + NvBool ignoreBankPlacement; + NvU32 textureClientIndex = 0xFFFFFFFF; + NvU32 bankPlacement = 0; + NvU8 currentBankInfo = 0; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + NvU32 pageSize = 0; + NvU32 flags; + NvU32 owner; + NvU32 tiledAttr; + + // Check for valid size. + NV_ASSERT_OR_RETURN((pAllocHint->pSize != NULL), NV_ERR_INVALID_ARGUMENT); + + // Ensure a valid allocation type was passed in + NV_ASSERT_OR_RETURN((pAllocHint->type < NVOS32_NUM_MEM_TYPES), NV_ERR_INVALID_ARGUMENT); + + // As we will dereference these two later, we should not allow NULL value. + NV_ASSERT_OR_RETURN(((pAllocHint->pHeight != NULL) && (pAllocHint->pAttr != NULL)), NV_ERR_INVALID_ARGUMENT); + + owner = 0x0; + status = _heapGetBankPlacement(pGpu, pHeap, owner, + &pAllocHint->flags, pAllocHint->type, + 0x0, &bankPlacement); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "_heapGetBankPlacement failed for current allocation\n"); + goto exit; + } + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + if (pFbAllocInfo == NULL) + { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto exit; + } + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + if (pFbAllocPageFormat == NULL) { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto exit; + } + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + pFbAllocInfo->pageFormat->type = pAllocHint->type; + pFbAllocInfo->hwResId = 0; + pFbAllocInfo->pad = 0; + pFbAllocInfo->height = *pAllocHint->pHeight; + pFbAllocInfo->width = *pAllocHint->pWidth; + pFbAllocInfo->pitch = (pAllocHint->pPitch) ? (*pAllocHint->pPitch) : 0; + pFbAllocInfo->size = *pAllocHint->pSize; + pFbAllocInfo->pageFormat->kind = 0; + pFbAllocInfo->offset = ~0; + pFbAllocInfo->hClient = hClient; + pFbAllocInfo->hDevice = hDevice; + pFbAllocInfo->pageFormat->flags = pAllocHint->flags; + pFbAllocInfo->pageFormat->attr = *pAllocHint->pAttr; + pFbAllocInfo->retAttr = *pAllocHint->pAttr; + pFbAllocInfo->pageFormat->attr2 = *pAllocHint->pAttr2; + pFbAllocInfo->retAttr2 = *pAllocHint->pAttr2; + pFbAllocInfo->format = pAllocHint->format; + + if ((pAllocHint->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT) || + (pAllocHint->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE)) + pFbAllocInfo->align = *pAllocHint->pAlignment; + else + pFbAllocInfo->align = RM_PAGE_SIZE; + + // Fetch RM page size + pageSize = memmgrDeterminePageSize(pMemoryManager, pFbAllocInfo->hClient, pFbAllocInfo->size, + pFbAllocInfo->format, pFbAllocInfo->pageFormat->flags, + &pFbAllocInfo->retAttr, &pFbAllocInfo->retAttr2); + if (pageSize == 0) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, "memmgrDeterminePageSize failed, status: 0x%x\n", status); + goto exit; + } + + // Fetch memory alignment + status = memmgrAllocDetermineAlignment_HAL(pGpu, pMemoryManager, &pFbAllocInfo->size, &pFbAllocInfo->align, + pFbAllocInfo->alignPad, pFbAllocInfo->pageFormat->flags, + pFbAllocInfo->retAttr, pFbAllocInfo->retAttr2, 0); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "memmgrAllocDetermineAlignment failed, status: 0x%x\n", status); + goto exit; + } + + // + // Call into HAL to reserve any hardware resources for + // the specified memory type. + // If the alignment was changed due to a HW limitation, and the + // flag NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE is set, bad_argument + // will be passed back from the HAL + // + flags = pFbAllocInfo->pageFormat->flags; + pFbAllocInfo->pageFormat->flags |= NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC; + status = memmgrAllocHwResources(pGpu, pMemoryManager, pFbAllocInfo); + pFbAllocInfo->pageFormat->flags = flags; + *pAllocHint->pAttr = pFbAllocInfo->retAttr; + *pAllocHint->pAttr2 = pFbAllocInfo->retAttr2; + *pAllocHint->pKind = pFbAllocInfo->pageFormat->kind; + + // Save retAttr as Possible Attributes that have passed error checking and + // clear retAttr because we have not allocated them yet + pFbAllocInfo->possAttr = pFbAllocInfo->retAttr; + // pFbAllocInfo->possAttr2 = pFbAllocInfo->retAttr2; + pFbAllocInfo->retAttr = 0x0; + pFbAllocInfo->retAttr2 = 0x0; + if (status != NV_OK) + { + // + // probably means we passed in a bogus type or no tiling resources available + // when tiled memory attribute was set to REQUIRED + // + NV_PRINTF(LEVEL_ERROR, "memmgrAllocHwResources failed, status: 0x%x\n", + status); + goto exit; + } + + // + // Refresh search parameters. + // + if ((DRF_VAL(OS32, _ATTR, _FORMAT, *pAllocHint->pAttr) != NVOS32_ATTR_FORMAT_BLOCK_LINEAR)) + { + *pAllocHint->pHeight = pFbAllocInfo->height; + if (pAllocHint->pPitch) + *pAllocHint->pPitch = pFbAllocInfo->pitch; + } + + // + // The heap allocator has assumed required alignments are powers of 2 + // (aligning FB offsets has been done using bit masks). + // + // + *pAllocHint->pAlignment = pFbAllocInfo->align + 1; // convert mask to size + alignment = pFbAllocInfo->align + 1; + + // + // Allow caller to request host page alignment to make it easier + // to move things around with host os VM subsystem + // + + if (pAllocHint->flags & NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 hostPageSize = pSys->cpuInfo.hostPageSize; + + // hostPageSize *should* always be set, but.... + if (hostPageSize == 0) + hostPageSize = RM_PAGE_SIZE; + + alignment = memUtilsLeastCommonAlignment(alignment, hostPageSize); + } + + if (memmgrAllocGetAddrSpace(pMemoryManager, pAllocHint->flags, *pAllocHint->pAttr) == ADDR_FBMEM) + { + if (alignment >= pHeap->total) + { + status = NV_ERR_INVALID_ARGUMENT; + NV_PRINTF(LEVEL_ERROR, "heapAllocHint failed due to alignmend >= pHeap->total\n"); + goto exit; + } + } + + // + // Check if NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT was passed in with + // the type to ignore placing this allocation in a particular bank. + // This means we default to the second loop where we choose first fit. + // + ignoreBankPlacement = NV_FALSE; + if (pAllocHint->flags & NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT) + ignoreBankPlacement = NV_TRUE; + + if ((pAllocHint->type == NVOS32_TYPE_TEXTURE) && (!pAllocHint->flags)) + _heapSetTexturePlacement(pHeap, pAllocHint->client, pAllocHint->type, &ignoreBankPlacement, &textureClientIndex, ¤tBankInfo); + + pAllocHint->bankPlacement = bankPlacement; + pAllocHint->ignoreBankPlacement = ignoreBankPlacement; + + *pAllocHint->pHeight = pFbAllocInfo->height; + pAllocHint->pad = pFbAllocInfo->pad; + + // Special Case for Tiled Allocations + // Adjust the size so we don't use the partial tile areas + tiledAttr = DRF_VAL(OS32, _ATTR, _TILED, *pAllocHint->pAttr); + if (tiledAttr) + { + *pAllocHint->pSize = pFbAllocInfo->pitch * pFbAllocInfo->height; + if (pFbAllocInfo->size > *pAllocHint->pSize) + { + pAllocHint->pad += (pFbAllocInfo->size - *pAllocHint->pSize); + } + else if (pFbAllocInfo->size != *pAllocHint->pSize) + { + NV_PRINTF(LEVEL_ERROR, + "nvrm: Hint Allocation Size Error %llx %llx\n", + pFbAllocInfo->size, *pAllocHint->pSize); + DBG_BREAKPOINT(); + } + } + else + { + *pAllocHint->pSize = pFbAllocInfo->size; // returned to caller + } + + pAllocHint->alignAdjust = 0; + +exit: + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + return status; +} + +NV_STATUS heapHwAlloc_IMPL +( + OBJGPU *pGpu, + Heap *pHeap, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + MEMORY_HW_RESOURCES_ALLOCATION_REQUEST *pHwAlloc, + NvU32 *pAttr, + NvU32 *pAttr2 +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + NvU32 tiledAttr; + NvU32 pageSize = 0; + NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS *pUserParams = pHwAlloc->pUserParams; + + // Ensure a valid allocation type was passed in + if (pUserParams->type > NVOS32_NUM_MEM_TYPES - 1) + return NV_ERR_GENERIC; + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + if (NULL == pFbAllocInfo) + { + NV_PRINTF(LEVEL_ERROR, "No memory for Resource %p\n", + pHwAlloc->pHandle); + status = NV_ERR_GENERIC; + goto failed; + } + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + if (NULL == pFbAllocPageFormat) + { + NV_PRINTF(LEVEL_ERROR, "No memory for Resource %p\n", + pHwAlloc->pHandle); + status = NV_ERR_GENERIC; + goto failed; + } + + portMemSet(pFbAllocInfo, 0x0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0x0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + pFbAllocInfo->pageFormat->type = pUserParams->type; + pFbAllocInfo->hwResId = 0; + pFbAllocInfo->pad = 0; + pFbAllocInfo->height = pUserParams->height; + pFbAllocInfo->width = pUserParams->width; + pFbAllocInfo->pitch = pUserParams->pitch; + pFbAllocInfo->size = pUserParams->size; + pFbAllocInfo->origSize = pUserParams->size; + pFbAllocInfo->pageFormat->kind = pUserParams->kind; + pFbAllocInfo->offset = memmgrGetInvalidOffset_HAL(pGpu, pMemoryManager); + pFbAllocInfo->hClient = hClient; + pFbAllocInfo->hDevice = hDevice; + pFbAllocInfo->pageFormat->flags = pUserParams->flags; + pFbAllocInfo->pageFormat->attr = pUserParams->attr; + pFbAllocInfo->pageFormat->attr2 = pUserParams->attr2; + pFbAllocInfo->retAttr = pUserParams->attr; + pFbAllocInfo->retAttr2 = pUserParams->attr2; + pFbAllocInfo->comprCovg = pUserParams->comprCovg; + pFbAllocInfo->zcullCovg = 0; + pFbAllocInfo->internalflags = 0; + + if ((pUserParams->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT) || + (pUserParams->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE)) + pFbAllocInfo->align = pUserParams->alignment; + else + pFbAllocInfo->align = RM_PAGE_SIZE; + + // Fetch RM page size + pageSize = memmgrDeterminePageSize(pMemoryManager, pFbAllocInfo->hClient, pFbAllocInfo->size, + pFbAllocInfo->format, pFbAllocInfo->pageFormat->flags, + &pFbAllocInfo->retAttr, &pFbAllocInfo->retAttr2); + if (pageSize == 0) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, "memmgrDeterminePageSize failed\n"); + } + + // Fetch memory alignment + status = memmgrAllocDetermineAlignment_HAL(pGpu, pMemoryManager, &pFbAllocInfo->size, &pFbAllocInfo->align, + pFbAllocInfo->alignPad, pFbAllocInfo->pageFormat->flags, + pFbAllocInfo->retAttr, pFbAllocInfo->retAttr2, 0); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "memmgrAllocDetermineAlignment failed\n"); + } + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if ((status == NV_OK) && (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))) + { + { + NV_RM_RPC_MANAGE_HW_RESOURCE_ALLOC(pGpu, + hClient, + hDevice, + hMemory, + pFbAllocInfo, + status); + pHwAlloc->hwResource.isVgpuHostAllocated = NV_TRUE; + } + + pUserParams->uncompressedKind = pFbAllocInfo->uncompressedKind; + pUserParams->compPageShift = pFbAllocInfo->compPageShift; + pUserParams->compressedKind = pFbAllocInfo->compressedKind; + pUserParams->compTagLineMin = pFbAllocInfo->compTagLineMin; + pUserParams->compPageIndexLo = pFbAllocInfo->compPageIndexLo; + pUserParams->compPageIndexHi = pFbAllocInfo->compPageIndexHi; + pUserParams->compTagLineMultiplier = pFbAllocInfo->compTagLineMultiplier; + } + else + { + // + // Call into HAL to reserve any hardware resources for + // the specified memory type. + // If the alignment was changed due to a HW limitation, and the + // flag NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE is set, bad_argument + // will be passed back from the HAL + // + status = memmgrAllocHwResources(pGpu, pMemoryManager, pFbAllocInfo); + } + + // Is status bad or did we request attributes and they failed + if ((status != NV_OK) || ((pUserParams->attr) && (0x0 == pFbAllocInfo->retAttr))) + { + // + // probably means we passed in a bogus type or no tiling resources available + // when tiled memory attribute was set to REQUIRED + // + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "nvHalFbAlloc failure status = 0x%x Requested Attr 0x%x!\n", + status, pUserParams->attr); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "nvHalFbAlloc Out of Resources Requested=%x Returned=%x !\n", + pUserParams->attr, pFbAllocInfo->retAttr); + } + goto failed; + } + + // + // Refresh search parameters. + // + pUserParams->pitch = pFbAllocInfo->pitch; + + pUserParams->height = pFbAllocInfo->height; + pHwAlloc->pad = NvU64_LO32(pFbAllocInfo->pad); + pUserParams->kind = pFbAllocInfo->pageFormat->kind; + pHwAlloc->hwResId = pFbAllocInfo->hwResId; + + // Special Case for Tiled Allocations + // Adjust the size so we don't use the partial tile areas + tiledAttr = DRF_VAL(OS32, _ATTR, _TILED, pFbAllocInfo->pageFormat->attr); + if (tiledAttr) + { + pUserParams->size = pFbAllocInfo->pitch * pFbAllocInfo->height; + if (pFbAllocInfo->size > pUserParams->size) + { + pHwAlloc->pad += (NvU64_LO32(pFbAllocInfo->size) - pUserParams->size); + } + else if (pFbAllocInfo->size != pUserParams->size) + { + NV_PRINTF(LEVEL_ERROR, + "nvrm: Hint Allocation Size Error %llx %llx\n", + pFbAllocInfo->size, pUserParams->size); + DBG_BREAKPOINT(); + } + } + else + { + pUserParams->size = pFbAllocInfo->size; // returned to caller + } + + pHwAlloc->hwResource.attr = pFbAllocInfo->retAttr; + pHwAlloc->hwResource.attr2 = pFbAllocInfo->retAttr2; + pHwAlloc->hwResource.comprCovg = pFbAllocInfo->comprCovg; + pHwAlloc->hwResource.ctagOffset = pFbAllocInfo->ctagOffset; + pHwAlloc->hwResource.hwResId = pFbAllocInfo->hwResId; + + *pAttr = pFbAllocInfo->retAttr; + *pAttr2 = pFbAllocInfo->retAttr2; + +failed: + portMemFree(pFbAllocInfo->pageFormat); + portMemFree(pFbAllocInfo); + + return status; +} + +void heapHwFree_IMPL +( + OBJGPU *pGpu, + Heap *pHeap, + Memory *pMemory, + NvU32 flags +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + if (pFbAllocInfo == NULL) + { + NV_ASSERT(0); + goto exit; + } + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + if (pFbAllocPageFormat == NULL) { + NV_ASSERT(0); + goto exit; + } + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + pFbAllocInfo->pageFormat->type = pMemory->Type; + pFbAllocInfo->pageFormat->attr = pMemory->pHwResource->attr; + pFbAllocInfo->pageFormat->attr2 = pMemory->pHwResource->attr2; + pFbAllocInfo->hwResId = pMemory->pHwResource->hwResId; + pFbAllocInfo->size = pMemory->Length; + pFbAllocInfo->format = memdescGetPteKind(pMemory->pMemDesc); + pFbAllocInfo->offset = ~0; + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + { + NV_STATUS rmStatus = NV_OK; + + NV_RM_RPC_MANAGE_HW_RESOURCE_FREE(pGpu, + RES_GET_CLIENT_HANDLE(pMemory), + RES_GET_HANDLE(pMemory->pDevice), + RES_GET_HANDLE(pMemory), + flags, + rmStatus); + } + } + + if (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) + { + memmgrFreeHwResources(pGpu, pMemoryManager, pFbAllocInfo); + } + +exit: + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); +} + +NV_STATUS heapFreeBlockCount_IMPL(OBJGPU *pGpu, Heap *pHeap, NvU32 *pCount) +{ + MEM_BLOCK *pMemBlock; + + pMemBlock = pHeap->pFreeBlockList; + *pCount = 0; + + if (pMemBlock == NULL) + { + return NV_OK; + } + + do + { + (*pCount)++; + pMemBlock = pMemBlock->u1.nextFree; + } while (pMemBlock != pHeap->pFreeBlockList); + + return NV_OK; +} + +NV_STATUS heapFreeBlockInfo_IMPL(OBJGPU *pGpu, Heap *pHeap, NvU32 Count, void *pVoidInfo) +{ + NVOS32_BLOCKINFO *pBlockInfo = pVoidInfo; + NvU32 actualCount; + MEM_BLOCK *pMemBlock; + NV_STATUS rmStatus = NV_ERR_GENERIC; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 maxCpuOffset; + + heapFreeBlockCount(pGpu, pHeap, &actualCount); + + if ((actualCount == Count) && (NULL != pBlockInfo)) + { + if (actualCount == 0) + { + return NV_OK; + } + + maxCpuOffset = (pMemoryManager->Ram.mapRamSizeMb*0x100000) - 1; + pMemBlock = pHeap->pFreeBlockList; + actualCount = 0; + do + { + pBlockInfo->startOffset = pMemBlock->begin; + pBlockInfo->size = pMemBlock->end - pMemBlock->begin + 1; + pBlockInfo->flags = 0x0; + if (pBlockInfo->startOffset < maxCpuOffset) + { + pBlockInfo->flags |= NVOS32_FLAGS_BLOCKINFO_VISIBILITY_CPU; + } + pMemBlock = pMemBlock->u1.nextFree; + pBlockInfo++; + actualCount++; + } while ((pMemBlock != pHeap->pFreeBlockList) && (actualCount < Count)); + + rmStatus = NV_OK; + } + + return rmStatus; +} + +/*! + * @brief: Adjust heap free accounting + * + * @param[in] pHeap Heap pointer + * @param[in] blockSize +: Size of block being freed + * -: Size of block being allocated + * @param[in] internalHeap NV_TRUE if the allocation is 'INTERNAL' + * + * @return void + */ + +static void +_heapAdjustFree +( + Heap *pHeap, + NvS64 blockSize, + NvBool internalHeap +) +{ + pHeap->free += blockSize; + + NV_ASSERT(pHeap->free <= pHeap->total); + if(pHeap->free > pHeap->total) + { + DBG_BREAKPOINT(); + } + + // Collect data on internal/external heap usage + if (internalHeap) + { + pHeap->currInternalUsage -= blockSize; + pHeap->peakInternalUsage = NV_MAX(pHeap->peakInternalUsage, pHeap->currInternalUsage); + } + else + { + pHeap->currExternalUsage -= blockSize; + pHeap->peakExternalUsage = NV_MAX(pHeap->peakExternalUsage, pHeap->currExternalUsage); + } +} + +static NV_STATUS +_heapProcessFreeBlock +( + OBJGPU *pGpu, + MEM_BLOCK *pBlockFree, + MEM_BLOCK **ppBlockNew, + MEM_BLOCK **ppBlockSplit, + Heap *pHeap, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + NvHandle memHandle, + OBJHEAP_ALLOC_DATA *pAllocData, + FB_ALLOC_INFO *pFbAllocInfo, + NvU64 alignPad, + NvU64 *offset +) +{ + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + MEM_BLOCK *pBlockNew = NULL, *pBlockSplit = NULL; + OBJOS *pOS = GPU_GET_OS(pGpu); + NV_STATUS status = NV_OK; + + if ((pAllocData->allocLo == pBlockFree->begin) && + (pAllocData->allocHi == pBlockFree->end)) + { + // + // Wow, exact match so replace free block. + // Remove from free list. + // + pBlockFree->u1.nextFree->u0.prevFree = pBlockFree->u0.prevFree; + pBlockFree->u0.prevFree->u1.nextFree = pBlockFree->u1.nextFree; + + if (pHeap->pFreeBlockList == pBlockFree) + { + // + // This could be the last free block. + // + if (pBlockFree->u1.nextFree == pBlockFree) + pHeap->pFreeBlockList = NULL; + else + pHeap->pFreeBlockList = pBlockFree->u1.nextFree; + } + + // + // Set pVidHeapAlloc->owner/pVidHeapAlloc->type values here. + // Don't move because some fields are unions. + // + pBlockFree->owner = pVidHeapAlloc->owner; + pBlockFree->mhandle = memHandle; + pBlockFree->refCount = 1; + pBlockFree->u0.type = pVidHeapAlloc->type; + pBlockFree->align = pAllocData->allocAl; + pBlockFree->alignPad = alignPad; + pBlockFree->format = pFbAllocInfo->format; + + // tail end code below assumes 'blockNew' is the new block + pBlockNew = pBlockFree; + } + else if ((pAllocData->allocLo >= pBlockFree->begin) && + (pAllocData->allocHi <= pBlockFree->end)) + { + // + // Found a fit. + // It isn't exact, so we'll have to do a split + // + pBlockNew = portMemAllocNonPaged(sizeof(MEM_BLOCK)); + if (pBlockNew == NULL) + { + // Exit with failure and free any local allocations + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto _heapProcessFreeBlock_error; + } + + portMemSet(pBlockNew, 0, sizeof(MEM_BLOCK)); + + pBlockNew->owner = pVidHeapAlloc->owner; + pBlockNew->mhandle = memHandle; + pBlockNew->refCount = 1; + pBlockNew->u0.type = pVidHeapAlloc->type; + pBlockNew->begin = pAllocData->allocLo; + pBlockNew->align = pAllocData->allocAl; + pBlockNew->alignPad = alignPad; + pBlockNew->end = pAllocData->allocHi; + pBlockNew->format = pFbAllocInfo->format; + + if (gpuIsCacheOnlyModeEnabled(pGpu)) + { + // + // In L2 Cache only mode, set the beginning of the new allocation + // block to aligned (allocAl) offset rather then the start of + // the free block (allocLo). And that the end of the new block is + // is calculated as (allocSize - 1) from the beginning. + // This insures that we don't "over allocate" for the surface in the + // case where start of the free block is not properly aligned for both + // the grow down and grow up cases. + // Only applying this in L2 cache mode for now, as we don't want to "waste" + // L2 cache space, though wonder if there are any implications to doing + // it this way in normal operation. + // + pBlockNew->begin = pAllocData->allocAl; + pBlockNew->end = pBlockNew->begin + pAllocData->allocSize - 1; + } + + if ((pBlockFree->begin < pBlockNew->begin) && + (pBlockFree->end > pBlockNew->end)) + { + // Split free block in two. + pBlockSplit = portMemAllocNonPaged(sizeof(MEM_BLOCK)); + if (pBlockSplit == NULL) + { + // Exit with failure and free any local allocations + status = NV_ERR_NO_MEMORY; + goto _heapProcessFreeBlock_error; + } + + portMemSet(pBlockSplit, 0, sizeof(MEM_BLOCK)); + + // remove free block from rb-tree since node's range will be changed + if (NV_OK != (status = _heapUpdate(pHeap, pBlockFree, BLOCK_REMOVE))) + { + // Exit with failure and free any local allocations + goto _heapProcessFreeBlock_error; + } + + pBlockSplit->owner = NVOS32_BLOCK_TYPE_FREE; + pBlockSplit->format= 0; + pBlockSplit->begin = pBlockNew->end + 1; + pBlockSplit->align = pBlockSplit->begin; + pBlockSplit->alignPad = 0; + pBlockSplit->end = pBlockFree->end; + pBlockFree->end = pBlockNew->begin - 1; + // + // Insert free split block into free list. + // + pBlockSplit->u1.nextFree = pBlockFree->u1.nextFree; + pBlockSplit->u0.prevFree = pBlockFree; + pBlockSplit->u1.nextFree->u0.prevFree = pBlockSplit; + pBlockFree->u1.nextFree = pBlockSplit; + // + // Insert new and split blocks into block list. + // + pBlockNew->next = pBlockSplit; + pBlockNew->prev = pBlockFree; + pBlockSplit->next = pBlockFree->next; + pBlockSplit->prev = pBlockNew; + pBlockFree->next = pBlockNew; + pBlockSplit->next->prev = pBlockSplit; + + // update numBlocks count + pHeap->numBlocks++; + + // re-insert updated free block into rb-tree + if (NV_OK != (status = _heapUpdate(pHeap, pBlockFree, BLOCK_SIZE_CHANGED))) + { + // + // Exit and report success. The new block was allocated, but the + // noncontig info is now out-of-sync with reality. + // + NV_PRINTF(LEVEL_ERROR, + "_heapUpdate failed to _SIZE_CHANGE block\n"); + goto _heapProcessFreeBlock_exit; + } + + // insert new and split blocks into rb-tree + if (NV_OK != (status = _heapUpdate(pHeap, pBlockNew, BLOCK_ADD))) + { + // + // Exit and report success. The new block was allocated, but the + // noncontig info is now out-of-sync with reality. + // + NV_PRINTF(LEVEL_ERROR, "_heapUpdate failed to _ADD block\n"); + goto _heapProcessFreeBlock_exit; + } + + if (NV_OK != (status = _heapUpdate(pHeap, pBlockSplit, BLOCK_ADD))) + { + // + // Exit and report success. The new block was allocated, but the + // noncontig info is now out-of-sync with reality. + // + NV_PRINTF(LEVEL_ERROR, "_heapUpdate failed to _ADD block\n"); + goto _heapProcessFreeBlock_exit; + } + } + else if (pBlockFree->end == pBlockNew->end) + { + // remove free block from rb-tree since node's range will be changed + if (NV_OK != (status = _heapUpdate(pHeap, pBlockFree, BLOCK_REMOVE))) + { + // Exit with failure and free any local allocations + goto _heapProcessFreeBlock_error; + } + + // + // New block inserted after free block. + // + pBlockFree->end = pBlockNew->begin - 1; + pBlockNew->next = pBlockFree->next; + pBlockNew->prev = pBlockFree; + pBlockFree->next->prev = pBlockNew; + pBlockFree->next = pBlockNew; + + // re-insert updated free block into rb-tree + if (NV_OK != (status = _heapUpdate(pHeap, pBlockFree, BLOCK_SIZE_CHANGED))) + { + // + // Exit and report success. The new block was allocated, but the + // noncontig info is now out-of-sync with reality. + // + NV_PRINTF(LEVEL_ERROR, + "_heapUpdate failed to _SIZE_CHANGE block\n"); + goto _heapProcessFreeBlock_exit; + } + + // insert new block into rb-tree + if (NV_OK != (status = _heapUpdate(pHeap, pBlockNew, BLOCK_ADD))) + { + // + // Exit and report success. The new block was allocated, but the + // noncontig info is now out-of-sync with reality. + // + NV_PRINTF(LEVEL_ERROR, "_heapUpdate failed to _ADD block\n"); + goto _heapProcessFreeBlock_exit; + } + } + else if (pBlockFree->begin == pBlockNew->begin) + { + // remove free block from rb-tree since node's range will be changed + if (NV_OK != (status = _heapUpdate(pHeap, pBlockFree, BLOCK_REMOVE))) + { + // Exit with failure and free any local allocations + goto _heapProcessFreeBlock_error; + } + + // + // New block inserted before free block. + // + pBlockFree->begin = pBlockNew->end + 1; + pBlockFree->align = pBlockFree->begin; + pBlockNew->next = pBlockFree; + pBlockNew->prev = pBlockFree->prev; + pBlockFree->prev->next = pBlockNew; + pBlockFree->prev = pBlockNew; + if (pHeap->pBlockList == pBlockFree) + pHeap->pBlockList = pBlockNew; + + // re-insert updated free block into rb-tree + if (NV_OK != (status = _heapUpdate(pHeap, pBlockFree, BLOCK_SIZE_CHANGED))) + { + // + // Exit and report success. The new block was allocated, but the + // noncontig info is now out-of-sync with reality. + // + NV_PRINTF(LEVEL_ERROR, + "_heapUpdate failed to _SIZE_CHANGE block\n"); + goto _heapProcessFreeBlock_exit; + } + + // insert new block into rb-tree + if (NV_OK != (status = _heapUpdate(pHeap, pBlockNew, BLOCK_ADD))) + { + // + // Exit and report success. The new block was allocated, but the + // noncontig info is now out-of-sync with reality. + // + NV_PRINTF(LEVEL_ERROR, "_heapUpdate failed to _ADD block\n"); + goto _heapProcessFreeBlock_exit; + } + } + else + { + status = NV_ERR_NO_MEMORY; + // Exit with failure and free any local allocations + goto _heapProcessFreeBlock_error; + } + + pHeap->numBlocks++; + } + + if (NULL == pBlockNew) + status = NV_ERR_NO_MEMORY; + +_heapProcessFreeBlock_error: + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate block\n"); + + portMemFree(pBlockNew); + portMemFree(pBlockSplit); + + *ppBlockNew = NULL; + *ppBlockSplit = NULL; + + return status; + } + +_heapProcessFreeBlock_exit: + *ppBlockNew = pBlockNew; + *ppBlockSplit = pBlockSplit; + + // alignPad == 0 for all but >= NV5x + *offset = pBlockNew->align + pBlockNew->alignPad; + + // Reduce free amount by allocated block size. + _heapAdjustFree(pHeap, -((NvS64) (pBlockNew->end - pBlockNew->begin + 1)), + FLD_TEST_DRF(OS32, _ATTR2, _INTERNAL, _YES, pFbAllocInfo->pageFormat->attr2)); + + if (FLD_TEST_DRF(OS32, _ATTR2, _INTERNAL, _YES, pFbAllocInfo->pageFormat->attr2)) + { + pOS->osInternalReserveAllocCallback(*offset, pFbAllocInfo->size, pGpu->gpuId); + } + + return NV_OK; +} + +static void +_heapAddBlockToNoncontigList +( + Heap *pHeap, + MEM_BLOCK *pBlock +) +{ + if (NULL == pHeap->pNoncontigFreeBlockList) + { + pHeap->pNoncontigFreeBlockList = pBlock; + pBlock->nextFreeNoncontig = NULL; + pBlock->prevFreeNoncontig = NULL; + } + else + { + MEM_BLOCK *pNextBlock = pHeap->pNoncontigFreeBlockList; + NvU64 size, nextSize = 0; + size = pBlock->end - pBlock->begin + 1; + + NV_ASSERT(pBlock->prevFreeNoncontig == NULL && + pBlock->nextFreeNoncontig == NULL); + + // The noncontig block list is arranged in the descending order of size + while (NULL != pNextBlock) + { + nextSize = pNextBlock->end - pNextBlock->begin + 1; + + if (size > nextSize) + { + // Insert pBlock in front of pNextBlock + pBlock->prevFreeNoncontig = pNextBlock->prevFreeNoncontig; + pBlock->nextFreeNoncontig = pNextBlock; + pNextBlock->prevFreeNoncontig = pBlock; + + if (pHeap->pNoncontigFreeBlockList == pNextBlock) + { + // We inserted at the head of the list + pHeap->pNoncontigFreeBlockList = pBlock; + } + else + { + pBlock->prevFreeNoncontig->nextFreeNoncontig = pBlock; + } + + break; + } + + if (NULL == pNextBlock->nextFreeNoncontig) + { + // We reached the end of the list, insert here + pNextBlock->nextFreeNoncontig = pBlock; + pBlock->prevFreeNoncontig = pNextBlock; + pBlock->nextFreeNoncontig = NULL; + + break; + } + + pNextBlock = pNextBlock->nextFreeNoncontig; + } + } +} + +static void +_heapRemoveBlockFromNoncontigList +( + Heap *pHeap, + MEM_BLOCK *pBlock +) +{ + // + // Unless pBlock is at the head of the list (and is the only element in the + // list), both prev and nextFreeNoncontig cannot be NULL at the same time. + // That would imply a bug in the noncontig list building code. + // + NV_ASSERT(pBlock == pHeap->pNoncontigFreeBlockList || + pBlock->prevFreeNoncontig != NULL || + pBlock->nextFreeNoncontig != NULL); + + // Removing first block? + if (pHeap->pNoncontigFreeBlockList == pBlock) + { + pHeap->pNoncontigFreeBlockList = pBlock->nextFreeNoncontig; + } + else + { + if (NULL != pBlock->prevFreeNoncontig) + { + pBlock->prevFreeNoncontig->nextFreeNoncontig + = pBlock->nextFreeNoncontig; + } + } + + // Removing last block? + if (NULL != pBlock->nextFreeNoncontig) + { + pBlock->nextFreeNoncontig->prevFreeNoncontig + = pBlock->prevFreeNoncontig; + } + + pBlock->nextFreeNoncontig = pBlock->prevFreeNoncontig = NULL; +} + +// +// The allocation is done using two loops. The first loop traverses the heap's +// free list to build a list of blocks that can satisfy the allocation. If we +// don't find enough blocks, we can exit quickly without needing to unwind, +// which can happen quite frequently in low memory or heavy fragmentation +// conditions. +// +// The second loop does the actual allocations. It calls _heapProcessFreeBlock() +// to cut down a free block into the required size, which can fail, albeit +// rarely. We need to unwind at that point. The two loops keep the unwinding +// as infrequent as possible. +// +static NV_STATUS +_heapAllocNoncontig +( + OBJGPU *pGpu, + NvHandle hClient, + Heap *pHeap, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + NvHandle memHandle, + OBJHEAP_ALLOC_DATA *pAllocData, + FB_ALLOC_INFO *pFbAllocInfo, + NvU32 textureClientIndex, + NvU64 alignPad, + NvU64 *offset, + MEMORY_DESCRIPTOR *pMemDesc, + HWRESOURCE_INFO **ppHwResource +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + NvBool bFirstBlock = NV_TRUE; + NvU32 pteIndexOffset = 0, i = 0; + NvU32 blockId = 0; + NV_STATUS status = NV_OK; + NvU32 pageSize = 0; + NvS64 numPagesLeft; + MEM_BLOCK *pCurrBlock; + MEM_BLOCK *pNextBlock; + MEM_BLOCK *pSavedAllocList = NULL; + MEM_BLOCK *pLastBlock = NULL; + MEM_BLOCK *pBlockNew, *pBlockSplit; + NvU32 k, shuffleStride = 1; + NvU64 addr, j, numPages; + RM_ATTR_PAGE_SIZE pageSizeAttr = dmaNvos32ToPageSizeAttr(pFbAllocInfo->retAttr, pFbAllocInfo->retAttr2); + + switch (pageSizeAttr) + { + case RM_ATTR_PAGE_SIZE_DEFAULT: + case RM_ATTR_PAGE_SIZE_INVALID: + NV_PRINTF(LEVEL_ERROR, "Invalid page size attribute!\n"); + return NV_ERR_INVALID_ARGUMENT; + case RM_ATTR_PAGE_SIZE_4KB: + pageSize = RM_PAGE_SIZE; + break; + case RM_ATTR_PAGE_SIZE_BIG: + { + pageSize = kgmmuGetMaxBigPageSize_HAL(pKernelGmmu); + break; + } + case RM_ATTR_PAGE_SIZE_HUGE: + { + NV_ASSERT_OR_RETURN(kgmmuIsHugePageSupported(pKernelGmmu), + NV_ERR_INVALID_ARGUMENT); + pageSize = RM_PAGE_SIZE_HUGE; + break; + } + case RM_ATTR_PAGE_SIZE_512MB: + { + NV_ASSERT_OR_RETURN(kgmmuIsPageSize512mbSupported(pKernelGmmu), + NV_ERR_INVALID_ARGUMENT); + pageSize = RM_PAGE_SIZE_512M; + break; + } + } + + // + // pAllocData->allocSize already incorporates pFbAllocInfo->size, + // which in turn is up aligned to pFbAllocInfo->align and alignPad, + // so nothing else needs to be added here. + // + numPagesLeft = RM_ALIGN_UP(pAllocData->allocSize, pageSize) / pageSize; + NV_PRINTF(LEVEL_INFO, + "pageSize: 0x%x, numPagesLeft: 0x%llx, allocSize: 0x%llx\n", + pageSize / 1024, numPagesLeft, pAllocData->allocSize); + + for (pCurrBlock = pHeap->pNoncontigFreeBlockList; + numPagesLeft > 0 && NULL != pCurrBlock; + pCurrBlock = pNextBlock) + { + NvU64 blockBegin = 0; + NvU64 blockEnd = 0; + NvU64 blockAligned; + NvU64 blockSizeInPages, blockSize; + NvU64 alignPad; + NvU64 pteAddress; + NvU64 offset; + + // Get the next free block pointer before lists get re-linked + pNextBlock = pCurrBlock->nextFreeNoncontig; + + // Selecting blocks: Is this block completely out of range? + if ((pCurrBlock->end < pVidHeapAlloc->rangeLo) || + (pCurrBlock->begin > pVidHeapAlloc->rangeHi)) + { + continue; + } + + // Find the intersection of the block and the specified range. + blockBegin = ((pVidHeapAlloc->rangeLo >= pCurrBlock->begin) ? + pVidHeapAlloc->rangeLo : pCurrBlock->begin); + blockEnd = ((pVidHeapAlloc->rangeHi <= pCurrBlock->end) ? + pVidHeapAlloc->rangeHi : pCurrBlock->end); + + // Check if the derived block is usable + if ((blockBegin >= blockEnd) || + (blockEnd-blockBegin+1 < pageSize)) + { + // Skip if the usable size is invalid or insufficient. + continue; + } + + // + // Checks above should protect against underflow, but we might still + // end up with a post-aligned block that is unusable. + // "end" should be RM_PAGE_SIZE-1 aligned. + // + blockBegin = RM_ALIGN_UP(blockBegin, pageSize); + blockEnd = RM_ALIGN_DOWN(blockEnd+1, pageSize)-1; + + if (blockBegin >= blockEnd) + { + // + // When blockSize < page_size and blockBegin and/or blockEnd are + // not page aligned initially, the above alignment can cause + // blockBegin to become > blockEnd. + // + continue; + } + + // The first block has to handle pAllocData->alignment + if (bFirstBlock) + { + // Align the starting address of the block to + // pAllocData->alignment. + blockAligned = (blockBegin + + pAllocData->alignment - 1) / pAllocData->alignment + * pAllocData->alignment; + + // + // Check that we'll still be within this block when + // alignPad is added. + // + if (blockAligned + pFbAllocInfo->alignPad > blockEnd) + { + continue; + } + + // Then make sure this is page aligned. + blockBegin = RM_ALIGN_DOWN(blockAligned, pageSize); + + // + // blockBegin is now the page aligned starting address of a + // block that holds an address aligned to + // pAllocData->alignment, and can take padding from + // alignPad. + // + } + else + { + blockAligned = blockBegin; + } + + blockSizeInPages = (blockEnd - blockBegin + 1) / pageSize; + + // A usable block has to supply at least one page + if (blockSizeInPages < 1) + { + continue; + } + + // blockEnd may need to be corrected for the last page + if (((NvU64)numPagesLeft < blockSizeInPages)) + { + blockEnd = blockBegin + pageSize * numPagesLeft - 1; + blockSizeInPages = numPagesLeft; + } + + blockSize = blockEnd - blockBegin + 1; + + numPagesLeft -= blockSizeInPages; + + NV_PRINTF(LEVEL_INFO, + "\tblockId: %d, blockBegin: 0x%llx, blockEnd: 0x%llx, blockSize: " + "0x%llx, blockSizeInPages: 0x%llx, numPagesLeft: 0x%llx\n", + blockId, blockBegin, blockEnd, blockSize, blockSizeInPages, + numPagesLeft >= 0 ? numPagesLeft : 0); + + blockId++; + + // + // Set pAllocData values before the call to + // _heapProcessFreeBlock() + // + pAllocData->allocLo = blockBegin; + pAllocData->allocHi = blockEnd; + pAllocData->allocAl = blockAligned; + pAllocData->allocSize = blockSize; + + if (bFirstBlock) + { + alignPad = pFbAllocInfo->alignPad; + } + else + { + alignPad = 0; + } + + // + // Cut this new block down to size. pBlockNew will be the block to use + // when this returns. + // + if (NV_OK != (status = _heapProcessFreeBlock(pGpu, pCurrBlock, + &pBlockNew, &pBlockSplit, pHeap, pAllocRequest, + memHandle, pAllocData, pFbAllocInfo, + alignPad, &offset))) + { + NV_PRINTF(LEVEL_ERROR, + "ERROR: Could not process free block, error: 0x%x\n", + status); + goto unwind_and_exit; + } + + // Never fails + (void)_heapUpdate(pHeap, pBlockNew, BLOCK_FREE_STATE_CHANGED); + + // + // Save the allocation off in case we need to unwind + // This also ensures that all blocks that make up the noncontig + // allocation are strung together in a list, which is useful when + // freeing them. + // + if (pSavedAllocList == NULL) + { + // First block + pSavedAllocList = pLastBlock = pBlockNew; + pSavedAllocList->noncontigAllocListNext = NULL; + } + else + { + pLastBlock->noncontigAllocListNext = pBlockNew; + pLastBlock = pBlockNew; + pLastBlock->noncontigAllocListNext = NULL; + } + + pteAddress = RM_PAGE_ALIGN_DOWN(pBlockNew->begin); + + numPages = NV_MIN(blockSizeInPages, ((pMemDesc->PageCount - pteIndexOffset) * RM_PAGE_SIZE) / pageSize); + + if (pHeap->getProperty(pHeap, PDB_PROP_HEAP_PAGE_SHUFFLE)) + { + i = pHeap->shuffleStrideIndex; + shuffleStride = pHeap->shuffleStrides[i]; + + // Select a stride greater the the number of pages + while(numPages < shuffleStride && i > 0) + { + i--; + shuffleStride = pHeap->shuffleStrides[i]; + } + + pHeap->shuffleStrideIndex = (pHeap->shuffleStrideIndex + 1) % SHUFFLE_STRIDE_MAX; + } + + // + // Shuffling logic. + // We scatter the contiguous pages at multiple of stride length. + // For 5 pages with stride length 2, we have the following shuffling. + // Before: 0, 1, 2, 3, 4 + // After : 0, 2, 4, 1, 3 + // + for (i = 0; i < shuffleStride; i++) + { + for(j = i; j < numPages; j = j + shuffleStride) + { + addr = pteAddress + j * pageSize; + for (k = 0; k < pageSize/RM_PAGE_SIZE; k++) + { + // + // The memDesc has everything in terms of 4k pages. + // If allocationSize % pageSize != 0, there will not be enough PTEs in + // the memdesc for completely specifying the final block, but that's + // ok. The mapping code will be mapping in the whole pageSize final + // block anyway, and the heapBlockFree() code will free the whole + // block. + // + memdescSetPte(pMemDesc, AT_GPU, pteIndexOffset, addr); + pteIndexOffset++; + addr += RM_PAGE_SIZE; + } + } + } + + // + // If a client calls us with pVidHeapAlloc->type == + // NVOS32_TYPE_TEXTURE, but where flags are non-zero, we won't + // call objHeapSetTexturePlacement and initialize + // textureClientIndex to a proper value (default is 0xFFFFFFFF). + // In that case, we won't track this texture allocation. Bug + // 79586. + // + if (pVidHeapAlloc->type == NVOS32_TYPE_TEXTURE && + textureClientIndex != 0xFFFFFFFF) + { + pBlockNew->textureId = hClient; + if (bFirstBlock) + pHeap->textureData[textureClientIndex].refCount++; + } + else + { + pBlockNew->textureId = 0; + } + + if (bFirstBlock) + { + pFbAllocInfo->offset = offset; + *ppHwResource = &pBlockNew->hwResource; + } + + pBlockNew->pMemDesc = pMemDesc; + pBlockNew->allocedMemDesc = bFirstBlock; // avoid multiple frees + + bFirstBlock = NV_FALSE; + } + + // Did we find enough pages? + if (numPagesLeft > 0) + { + NV_PRINTF(LEVEL_INFO, + "Could not satisfy request: allocSize: 0x%llx\n", + pAllocData->allocSize); + + status = NV_ERR_NO_MEMORY; + +unwind_and_exit: + + while (pSavedAllocList != NULL) + { + NV_STATUS unwindStatus; + + pCurrBlock = pSavedAllocList->noncontigAllocListNext; + + unwindStatus = _heapBlockFree(pGpu, pHeap, pSavedAllocList); + + if (unwindStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "ERROR: Could not free block, error 0x%x!\n", + unwindStatus); + } + + pSavedAllocList = pCurrBlock; + } + } + return status; +} + +// +// Explanation of BlockAction values: +// - BLOCK_ADD, +// A new block is added to the heap +// o The block's node structure needs to be inited. +// o The block is added to the rb-tree. +// o The block is added to the noncontig freelist. +// - BLOCK_REMOVE +// A block is removed from the heap for good +// o The block is removed from the rb-tree. +// o The block is removed from the noncontig freelist. +// - BLOCK_SIZE_CHANGED +// A block's size has changed +// o The rb-tree needs to be updated. +// o The noncontig freelist needs to be updated. +// - BLOCK_FREE_STATE_CHANGED +// if pBlock->owner != NVOS32_BLOCK_TYPE_FREE +// A block is allocated to a client +// o The block is removed from the noncontig freelist. +// else +// A block is freed by the client +// o The block is added to the noncontig freelist. +// +static NV_STATUS +_heapUpdate +( + Heap *pHeap, + MEM_BLOCK *pBlock, + BlockAction action +) +{ + // A new block is to be added, init its node structure. + if (BLOCK_ADD == action) + { + portMemSet((void *)&pBlock->node, 0, sizeof(NODE)); + pBlock->node.Data = (void *)pBlock; + } + + // Both new and updated blocks need to be re-inserted into the rb tree. + if ((BLOCK_SIZE_CHANGED == action) || + (BLOCK_ADD == action)) + { + pBlock->node.keyStart = pBlock->begin; + pBlock->node.keyEnd = pBlock->end; + + if (btreeInsert(&pBlock->node, &pHeap->pBlockTree) != NV_OK) + { + NV_ASSERT_FAILED("btreeInsert failed to ADD/SIZE_CHANGE block"); + return NV_ERR_INVALID_STATE; + } + } + + // + // Updated, new and freed blocks need to be added back to the noncontig + // freelist. + // + if ((BLOCK_SIZE_CHANGED == action) || + (BLOCK_ADD == action) || + (BLOCK_FREE_STATE_CHANGED == action && + pBlock->owner == NVOS32_BLOCK_TYPE_FREE)) + { + _heapAddBlockToNoncontigList(pHeap, pBlock); + } + + // Remove the block from the heap + if (BLOCK_REMOVE == action) + { + if (btreeUnlink(&pBlock->node, &pHeap->pBlockTree) != NV_OK) + { + NV_ASSERT_FAILED("btreeUnlink failed to REMOVE block"); + return NV_ERR_INVALID_STATE; + } + } + + // An allocated block is only removed from the noncontig freelist. + if ((BLOCK_REMOVE == action) || + ((BLOCK_FREE_STATE_CHANGED == action && + pBlock->owner != NVOS32_BLOCK_TYPE_FREE))) + { + _heapRemoveBlockFromNoncontigList(pHeap, pBlock); + } + + return NV_OK; +} + +static NvU32 +_heapGetPageBlackListGranularity(void) +{ + return RM_PAGE_SIZE; +} + +// +// This function blacklists pages from the heap. +// The addresses of the pages to blacklist are available from +// pHeap->blackListAddresses. +// +NV_STATUS +heapBlackListPages_IMPL +( + OBJGPU *pGpu, + Heap *pHeap +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + PMA *pPma = &pHeap->pmaObject; + NvU32 i = 0, j = 0; + NV_STATUS status = NV_OK; + BLACKLIST *pBlackList = &pHeap->blackList; + BLACKLIST_ADDRESSES *pAddresses = &pHeap->blackListAddresses; + NvU32 count = pHeap->blackListAddresses.count; + NvU32 staticBlacklistSize, dynamicBlacklistSize; + NvU32 dynamicRmBlackListedCount; + NvU32 staticRmBlackListedCount; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + if (NULL == pAddresses) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pBlackList->count != 0) + { + NV_PRINTF(LEVEL_ERROR, "Error: BlackList already exists!\n"); + return NV_ERR_INVALID_STATE; + } + + // + // We may not be able to allocate all pages requested, but alloc enough + // space anyway + // + pBlackList->pBlacklistChunks = portMemAllocNonPaged(sizeof(BLACKLIST_CHUNK) * pMemorySystemConfig->maximumBlacklistPages); + if (NULL == pBlackList->pBlacklistChunks) + { + NV_PRINTF(LEVEL_ERROR, "Could not allocate memory for blackList!\n"); + return NV_ERR_NO_MEMORY; + } + + portMemSet(pBlackList->pBlacklistChunks, 0, sizeof(BLACKLIST_CHUNK) * pMemorySystemConfig->maximumBlacklistPages); + + dynamicRmBlackListedCount = 0; + staticRmBlackListedCount = 0; + for (i = 0, j = 0; i < count; i++) + { + if (NV2080_CTRL_FB_OFFLINED_PAGES_INVALID_ADDRESS == pAddresses->data[i].address) + { + continue; + } + + // + // If PMA is enabled, only blacklist pages in the internal heap. + // PMA blacklisting is handled in pmaRegisterRegion. + // + if (memmgrIsPmaInitialized(pMemoryManager)) + { + if (heapIsPmaManaged(pGpu, pHeap, pAddresses->data[i].address, pAddresses->data[i].address)) + { + // Skipping non-internal address + continue; + } + } + + if ((pAddresses->data[i].type == NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_MULTIPLE_SBE) || + (pAddresses->data[i].type == NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE)) + { + dynamicRmBlackListedCount++; + } + else + { + staticRmBlackListedCount++; + } + + // Create a memdesc + status = memdescCreate(&pBlackList->pBlacklistChunks[j].pMemDesc, + pGpu, + RM_PAGE_SIZE, + RM_PAGE_SIZE, + NV_TRUE, + ADDR_FBMEM, + NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_FIXED_ADDRESS_ALLOCATE | + MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE); + if (NV_OK != status) + { + portMemSet(&pBlackList->pBlacklistChunks[j], 0, sizeof(BLACKLIST_CHUNK)); + NV_PRINTF(LEVEL_ERROR, + "Error 0x%x creating blacklisted page memdesc for address 0x%llx, skipping\n", + status, pAddresses->data[i].address); + continue; + } + + if (pHeap->heapType == HEAP_TYPE_PHYS_MEM_SUBALLOCATOR) + pBlackList->pBlacklistChunks[j].pMemDesc->pHeap = pHeap; + + // This is how _FIXED_ADDRESS_ALLOCATE works + memdescSetPte(pBlackList->pBlacklistChunks[j].pMemDesc, + AT_GPU, 0, RM_PAGE_ALIGN_DOWN(pAddresses->data[i].address)); + + if (pHeap->heapType != HEAP_TYPE_PHYS_MEM_SUBALLOCATOR) + { + // + // Allocate memory for this page. This is marked as an internal RM + // allocation and WILL be saved/restored during suspend/resume. + // + status = memdescAlloc(pBlackList->pBlacklistChunks[j].pMemDesc); + if (NV_OK != status) + { + // No use for the memdesc if the page couldn't be allocated + memdescDestroy(pBlackList->pBlacklistChunks[j].pMemDesc); + + portMemSet(&pBlackList->pBlacklistChunks[j], 0, sizeof(BLACKLIST_CHUNK)); + + NV_PRINTF(LEVEL_ERROR, + "Error 0x%x blacklisting page at address 0x%llx, skipping\n", + status, pAddresses->data[i].address); + continue; + } + } + + // Page blacklisting is successful, add entries to the BLACKLIST + pBlackList->pBlacklistChunks[j].physOffset = pAddresses->data[i].address; + pBlackList->pBlacklistChunks[j].size = RM_PAGE_SIZE; + pBlackList->pBlacklistChunks[j].bIsValid = NV_TRUE; + + // If the page was successfully blacklisted, move to the next entry + j++; + } + + pBlackList->count = j; + + pmaGetBlacklistSize(pPma, &dynamicBlacklistSize, &staticBlacklistSize); + dynamicBlacklistSize = dynamicBlacklistSize >> 10; + staticBlacklistSize = staticBlacklistSize >> 10; + + dynamicBlacklistSize += (dynamicRmBlackListedCount * _heapGetPageBlackListGranularity()) >> 10; + staticBlacklistSize += (staticRmBlackListedCount * _heapGetPageBlackListGranularity()) >> 10; + + pHeap->dynamicBlacklistSize = dynamicBlacklistSize; + pHeap->staticBlacklistSize = staticBlacklistSize; + + if (0 == pBlackList->count) + { + // No address was blacklisted + portMemFree(pBlackList->pBlacklistChunks); + pBlackList->pBlacklistChunks = NULL; + } + + return NV_OK; +} + +// +// This function frees all blacklisted pages. +// The pHeap->blackList structure holds a list of memdescs, one for each +// blacklisted page. +// +NV_STATUS +heapFreeBlackListedPages_IMPL +( + OBJGPU *pGpu, + Heap *pHeap +) +{ + NvU32 i; + BLACKLIST *pBlackList = &pHeap->blackList; + + // Also free the blacklistAddresses data here + if (pHeap->blackListAddresses.data) + { + portMemFree(pHeap->blackListAddresses.data); + pHeap->blackListAddresses.count = 0; + pHeap->blackListAddresses.data = NULL; + } + + if (0 == pBlackList->count) + { + return NV_OK; + } + + if (NULL == pBlackList->pBlacklistChunks) + { + return NV_ERR_INVALID_STATE; + } + + for (i = 0; i < pBlackList->count; i++) + { + if (pBlackList->pBlacklistChunks[i].bIsValid) + { + // Free the blacklisted page + memdescFree(pBlackList->pBlacklistChunks[i].pMemDesc); + + // Free the memdesc + memdescDestroy(pBlackList->pBlacklistChunks[i].pMemDesc); + } + } + + portMemFree(pBlackList->pBlacklistChunks); + + pBlackList->count = 0; + pBlackList->pBlacklistChunks = NULL; + + return NV_OK; +} + +NV_STATUS +heapStorePendingBlackList_IMPL +( + OBJGPU *pGpu, + Heap *pHeap, + NvU64 pageAddressesWithEccOn, + NvU64 pageAddressWithEccOff +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvU64 physicalAddress; + NvU64 pageNumber; + BLACKLIST *pBlacklist = &pHeap->blackList; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + if (pMemorySystemConfig->bEnabledEccFBPA) + { + physicalAddress = pageAddressesWithEccOn; + } + else + { + physicalAddress = pageAddressWithEccOff; + } + + pageNumber = (physicalAddress >> RM_PAGE_SHIFT); + + // This code is called only when DBE happens, so marking it as type DBE + status = heapAddPageToBlackList(pGpu, pHeap, + DRF_VAL64(_HEAP, _PAGE_OFFLINE, _PAGE_NUMBER, pageNumber), + NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE); + if (NV_OK != status) + { + // No more space in the blacklist + NV_PRINTF(LEVEL_ERROR, "No more space in blacklist, status: %x!\n", status); + return status; + } + + if (memmgrIsPmaInitialized(pMemoryManager)) + { + if (heapIsPmaManaged(pGpu, pHeap, physicalAddress, physicalAddress)) + { + NV_PRINTF(LEVEL_INFO, "Calling PMA helper function to blacklist page offset: %llx\n", physicalAddress); + status = pmaAddToBlacklistTracking(&pHeap->pmaObject, physicalAddress); + return status; + } + else + { + // blacklisting needs to be done like CBC error recovery + return NV_ERR_RESET_REQUIRED; + } + } + else + { + if (pMemoryManager->bEnableDynamicPageOfflining) + { + // adding a new entry to heap managed blacklist + if (pBlacklist->count == pMemorySystemConfig->maximumBlacklistPages) + { + NV_PRINTF(LEVEL_ERROR, "We have blacklisted maximum number of pages possible. returning error \n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + portMemSet(&pBlacklist->pBlacklistChunks[pBlacklist->count], 0 , sizeof(BLACKLIST_CHUNK)); + pBlacklist->pBlacklistChunks[pBlacklist->count].physOffset = physicalAddress; + pBlacklist->pBlacklistChunks[pBlacklist->count].size = RM_PAGE_SIZE; + pBlacklist->pBlacklistChunks[pBlacklist->count].bPendingRetirement = NV_TRUE; + pBlacklist->count++; + } + } + return status; +} + +// +// This function copies the addresses of pages to be blacklisted from +// pPageNumbers into Heap's internal blackListAddresses structure. +// +NV_STATUS +heapStoreBlackList_IMPL +( + OBJGPU *pGpu, + Heap *pHeap, + NvU64 *pPageNumbersWithEccOn, + NvU64 *pPageNumbersWithECcOff, + NvU32 maxInputPages +) +{ + NvU32 i; + NvU64 *pPageNumbers; + NV_STATUS status = NV_OK; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + if (pMemorySystemConfig->bEnabledEccFBPA) + { + pPageNumbers = pPageNumbersWithEccOn; + } + else + { + pPageNumbers = pPageNumbersWithECcOff; + } + + for (i = 0; i < maxInputPages; i++) + { + status = heapAddPageToBlackList(pGpu, pHeap, + DRF_VAL64(_HEAP, _PAGE_OFFLINE, _PAGE_NUMBER, pPageNumbers[i]), + (NvU32)DRF_VAL64(_HEAP, _PAGE_OFFLINE, _TYPE, pPageNumbers[i])); + if (NV_OK != status) + { + // No more space in the blacklist + NV_PRINTF(LEVEL_ERROR, "No more space in blacklist!\n"); + return status; + } + } + + return status; +} + +NV_STATUS +heapAddPageToBlackList_IMPL +( + OBJGPU *pGpu, + Heap *pHeap, + NvU64 pageNumber, + NvU32 type +) +{ + NvU32 index = pHeap->blackListAddresses.count; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + if (index == pMemorySystemConfig->maximumBlacklistPages) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (pHeap->blackListAddresses.data == NULL) + { + NvU64 listSize = sizeof(BLACKLIST_ADDRESS) * pMemorySystemConfig->maximumBlacklistPages; + + pHeap->blackListAddresses.data = portMemAllocNonPaged(listSize); + if (pHeap->blackListAddresses.data == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(pHeap->blackListAddresses.data, 0, listSize); + } + + pHeap->blackListAddresses.data[index].address = (pageNumber << RM_PAGE_SHIFT); + pHeap->blackListAddresses.data[index].type = type; + + pHeap->blackListAddresses.count++; + + NV_PRINTF(LEVEL_INFO, "Added 0x%0llx (blacklist count: %u)\n", + pHeap->blackListAddresses.data[index].address, + pHeap->blackListAddresses.count); + + return NV_OK; +} + +/*! + * @brief: Identify if an FB range is PMA-managed + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pHeap Heap pointer + * @param[in] offset FB block offset + * @param[in] limit FB block limit + * + * @return NV_TRUE offset is PMA-managed + * NV_FALSE offset is not managed by PMA + */ +NvBool +heapIsPmaManaged_IMPL +( + OBJGPU *pGpu, + Heap *pHeap, + NvU64 offset, + NvU64 limit +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (memmgrIsPmaInitialized(pMemoryManager)) + { + NvU32 i; + + NV_ASSERT(offset <= limit); + + for (i = 0; i < pHeap->pmaObject.regSize; i++) + { + if ((offset >= pHeap->pmaObject.pRegDescriptors[i]->base) && + (limit <= pHeap->pmaObject.pRegDescriptors[i]->limit)) + { + NV_PRINTF(LEVEL_INFO, + "range %llx..%llx resides in PMA region=%llx..%llx\n", + offset, limit, + pHeap->pmaObject.pRegDescriptors[i]->base, + pHeap->pmaObject.pRegDescriptors[i]->limit); + return NV_TRUE; + } +#if defined(DEBUG) + // Check for straddling + else if ( + (limit >= pHeap->pmaObject.pRegDescriptors[i]->base) && + (offset <= pHeap->pmaObject.pRegDescriptors[i]->limit)) + { + NV_PRINTF(LEVEL_ERROR, + "range %llx..%llx straddles in PMA region=%llx..%llx\n", + offset, limit, + pHeap->pmaObject.pRegDescriptors[i]->base, + pHeap->pmaObject.pRegDescriptors[i]->limit); + } +#endif //defined(DEBUG) + } + } + + return(NV_FALSE); +} + +/*! + * @brief Increase the reference count + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pHeap Heap pointer + * + * @return Current refcount value + */ +NvU32 +heapAddRef_IMPL +( + Heap *pHeap +) +{ + if (pHeap == NULL) + return 0; + + return (NvU32)portAtomicIncrementS32((NvS32*)&(pHeap->refCount)); +} + +/*! + * @brief Increase the reference count + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pHeap Heap pointer + * + * @return Current refcount value + */ +NvU32 +heapRemoveRef_IMPL +( + Heap *pHeap +) +{ + NvU32 refCount = 0; + + if (pHeap == NULL) + return 0; + + refCount = (NvU32)portAtomicDecrementS32((NvS32*)&(pHeap->refCount)); + if (refCount == 0) + { + objDelete(pHeap); + } + + return refCount; +} + +/*! + * @brief Adjust the heap size + * + * @param[in] pHeap Heap pointer + * @param[in] resizeBy NVS64 resizeBy value + */ + +NV_STATUS heapResize_IMPL +( + Heap *pHeap, + NvS64 resizeBy +) +{ + MEM_BLOCK *pBlockLast; + MEM_BLOCK *pBlockNew; + NV_STATUS status = NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pHeap); + + NV_ASSERT_OR_RETURN(pHeap->heapType == HEAP_TYPE_PHYS_MEM_SUBALLOCATOR, NV_ERR_NOT_SUPPORTED); + + // Free all blacklisted pages + if ((pHeap->blackListAddresses.count != 0) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT) && + gpuCheckPageRetirementSupport_HAL(pGpu)) + { + heapFreeBlackListedPages(pGpu, pHeap); + } + + // Go to last block if the heap w.r.t. the start address + pBlockLast = pHeap->pBlockList; + while (pBlockLast->next != pHeap->pBlockList) + pBlockLast = pBlockLast->next; + + if (resizeBy < 0) // Shrink the allocation + { + NV_ASSERT_OR_RETURN(pBlockLast->owner == NVOS32_BLOCK_TYPE_FREE, NV_ERR_NO_MEMORY); + NV_ASSERT_OR_RETURN((pBlockLast->end - pBlockLast->begin + resizeBy > 0), NV_ERR_INVALID_LIMIT); + pBlockLast->end += resizeBy; + } + else // Grow the allocation + { + if (pBlockLast->owner == NVOS32_BLOCK_TYPE_FREE) + { + // Found a free block at the end Just resize it. + pBlockLast->end += resizeBy; + } + else + { + // Could not find a free block at the end. Add a new free block. + pBlockNew = portMemAllocNonPaged(sizeof(MEM_BLOCK)); + if (pBlockNew != NULL) + { + + portMemSet(pBlockNew, 0, sizeof(MEM_BLOCK)); + + pBlockNew->owner = NVOS32_BLOCK_TYPE_FREE; + pBlockNew->refCount = 1; + + // Set block boundaries + pBlockNew->begin = pBlockLast->end + 1; + pBlockNew->end = pBlockLast->end + resizeBy; + + if (pHeap->pFreeBlockList == NULL) + pHeap->pFreeBlockList = pBlockNew; + + // Add the block in the free blocks list + pBlockNew->u1.nextFree = pHeap->pFreeBlockList; + pBlockNew->u0.prevFree = pHeap->pFreeBlockList->u0.prevFree; + pBlockNew->u1.nextFree->u0.prevFree = pBlockNew; + pBlockNew->u0.prevFree->u1.nextFree = pBlockNew; + + // Add the block in the blocks list + pBlockNew->next = pBlockLast->next; + pBlockNew->prev = pBlockLast; + pBlockNew->next->prev = pBlockNew; + pBlockNew->prev->next = pBlockNew; + + if ((status = _heapUpdate(pHeap, pBlockNew, BLOCK_ADD)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "_heapUpdate failed to _ADD block\n"); + + if (pHeap->pFreeBlockList == pBlockNew) // There was no free block in the heap. + pHeap->pFreeBlockList = NULL; // We had added this one. + portMemFree(pBlockNew); + } + else + { + pHeap->numBlocks++; + } + } + } + } + + if (status == NV_OK) + { + pHeap->total += resizeBy; + pHeap->free += resizeBy; + + status = memmgrGetBlackListPagesForHeap_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pHeap); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "Failed to read blackList pages (0x%x).\n", + status); + } + + heapFilterBlackListPages(pHeap, pHeap->base, pHeap->total); + + if (pHeap->blackListAddresses.count != 0) + { + status = heapBlackListPages(pGpu, pHeap); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Error 0x%x creating blacklist\n", + status); + } + } + } + return status; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c new file mode 100644 index 000000000..4f8ef92b6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_ctrl.c @@ -0,0 +1,405 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This module contains Nv04Control support for heap allocations +* represented by NV04_MEMORY class instantiations. +* +******************************************************************************/ + +#include "core/core.h" +#include "os/os.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap.h" +#include "platform/platform.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "rmapi/client_resource.h" +#include "rmapi/control.h" +#include "gpu_mgr/gpu_mgr.h" +#include "rmapi/rs_utils.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "vgpu/rpc.h" + +#include "ctrl/ctrl0041.h" + +NV_STATUS +memCtrlCmdGetSurfaceCompressionCoverageLvm_IMPL +( + Memory *pMemory, + NV0041_CTRL_GET_SURFACE_COMPRESSION_COVERAGE_PARAMS *pParams +) +{ + NvU64 _zero = 0, _contigSegmentSize; + NvU32 _memAperture, _memFormat, _comprOffset, _zcullId; + NvU32 _gpuCacheAttr, _gpuP2PCacheAttr; + NV_STATUS status = NV_OK; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + OBJGPU *pGpu = pMemory->pGpu; + + if (IS_VIRTUAL(pGpu)) + { + NV_RM_RPC_CONTROL(pRmCtrlParams->pGpu, pRmCtrlParams->hClient, + pRmCtrlParams->hObject, pRmCtrlParams->cmd, + pRmCtrlParams->pParams, pRmCtrlParams->paramsSize, status); + return status; + } + + if (pParams->hSubDevice) + { + // Alloc operation in unicast mode + NvHandle hDevice; + if ((status = CliSetSubDeviceContext(pRmCtrlParams->hClient, pParams->hSubDevice, &hDevice, &pGpu)) != NV_OK) + { + return status; + } + } + + status = memmgrGetSurfacePhysAttr_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pMemory, + &_zero, &_memAperture, &_memFormat, + &_comprOffset, &pParams->format, &pParams->lineMin, &pParams->lineMax, + &_zcullId, + &_gpuCacheAttr, &_gpuP2PCacheAttr, + &_contigSegmentSize); + return status; +} + +NV_STATUS +memCtrlCmdGetSurfacePartitionStrideLvm_IMPL +( + Memory *pMemory, + NV0041_CTRL_GET_SURFACE_PARTITION_STRIDE_PARAMS *pParams +) +{ + // Only partitionStride == 256B is supported by RM. + pParams->partitionStride = 256; + + return NV_OK; +} + +NV_STATUS +memCtrlCmdGetSurfaceInfoLvm_IMPL +( + Memory *pMemory, + NV0041_CTRL_GET_SURFACE_INFO_PARAMS *pSurfaceInfoParams +) +{ + NV0041_CTRL_SURFACE_INFO *pSurfaceInfos = NvP64_VALUE(pSurfaceInfoParams->surfaceInfoList); + OBJGPU *pGpu = pMemory->pGpu; + NV_STATUS status = NV_OK; + NvU32 i; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvOffset zero, contigSegmentSize; + NvU32 memAperture, comprOffset, comprFormat, lineMin, lineMax, zcullId; + NvU32 memFormat, gpuCacheAttr, gpuP2PCacheAttr; + NvU32 data = 0; + NvU64 size = 0; + + if ((pSurfaceInfoParams->surfaceInfoListSize == 0) || pSurfaceInfos == NULL) + return NV_OK; + + // step thru the list + for (i = 0; i < pSurfaceInfoParams->surfaceInfoListSize; i++) + { + status = NV_OK; + data = 0; + + switch (pSurfaceInfos[i].index) + { + case NV0041_CTRL_SURFACE_INFO_INDEX_ATTRS: + { + if (pMemory->pHwResource->attr & DRF_DEF(OS32, _ATTR, _COMPR, _REQUIRED)) + data |= NV0041_CTRL_SURFACE_INFO_ATTRS_COMPR; + if (pMemory->pHwResource->attr & DRF_DEF(OS32, _ATTR, _ZCULL, _REQUIRED)) + data |= NV0041_CTRL_SURFACE_INFO_ATTRS_ZCULL; + break; + } + case NV0041_CTRL_SURFACE_INFO_INDEX_COMPR_COVERAGE: + { + if (pMemory->pHwResource->attr & DRF_DEF(OS32, _ATTR, _COMPR, _REQUIRED)) + { + zero = 0; + status = memmgrGetSurfacePhysAttr_HAL(pGpu, pMemoryManager, + pMemory, + &zero, + &memAperture, &memFormat, + &comprOffset, &comprFormat, &lineMin, &lineMax, + &zcullId, + &gpuCacheAttr, &gpuP2PCacheAttr, + &contigSegmentSize); + if (status == NV_OK) + { + // report compression coverage in units of 64k + data = NvOffset_LO32(contigSegmentSize / 0x10000); + } + } + break; + } + case NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_SIZE: + { + // Report the size of the physical allocation + size = (memdescGetSize(pMemory->pMemDesc) / NV0041_CTRL_SURFACE_INFO_PHYS_SIZE_SCALE_FACTOR); + data = NvOffset_LO32(size); + NV_ASSERT_OR_RETURN((NvU64)data == size, NV_ERR_OUT_OF_RANGE); + break; + } + case NV0041_CTRL_SURFACE_INFO_INDEX_PHYS_ATTR: + { + data = pMemory->pHwResource->attr & (DRF_SHIFTMASK(NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_PAGE_SIZE) | DRF_SHIFTMASK(NV0041_CTRL_SURFACE_INFO_PHYS_ATTR_CPU_COHERENCY)); + break; + } + case NV0041_CTRL_SURFACE_INFO_INDEX_ADDR_SPACE_TYPE: + { + // This is equivalent to NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE on the surface. + NV_ADDRESS_SPACE addrSpace; + + addrSpace = memdescGetAddressSpace(pMemory->pMemDesc); + if (addrSpace == ADDR_SYSMEM) + { + if (memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_BAR0_REFLECT)) + { + addrSpace = ADDR_REGMEM; + } + else if (memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_BAR1_REFLECT)) + { + addrSpace = ADDR_FBMEM; + } + } + switch (addrSpace) + { + case ADDR_SYSMEM: + { + data = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM; + break; + } + case ADDR_FBMEM: + { + data = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM; + break; + } + case ADDR_REGMEM: + { + data = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_REGMEM; + break; + } + default: + { + data = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + break; + } + } + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + // stop processing list at first failure + if (status != NV_OK) + break; + + pSurfaceInfos[i].data = data; + } + + return status; +} + + +NV_STATUS +memCtrlCmdSurfaceFlushGpuCache_IMPL +( + Memory *pMemory, + NV0041_CTRL_SURFACE_FLUSH_GPU_CACHE_PARAMS *pCacheFlushParams +) +{ + OBJGPU *pGpu = pMemory->pGpu; + NV_STATUS status = NV_OK; + FB_CACHE_OP cacheOp = FB_CACHE_OP_UNDEFINED; + FB_CACHE_MEMTYPE memType = FB_CACHE_MEM_UNDEFINED; + + NV_PRINTF(LEVEL_INFO, "\n"); + + if (FLD_TEST_DRF(0041, _CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS, _WRITE_BACK, + _YES, pCacheFlushParams->flags) && + FLD_TEST_DRF(0041, _CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS, _INVALIDATE, + _YES, pCacheFlushParams->flags)) + { + cacheOp = FB_CACHE_EVICT; + } + else if (FLD_TEST_DRF(0041, _CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS, _WRITE_BACK, + _NO, pCacheFlushParams->flags) && + FLD_TEST_DRF(0041, _CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS, _INVALIDATE, + _YES, pCacheFlushParams->flags)) + { + cacheOp = FB_CACHE_INVALIDATE; + } + else if (FLD_TEST_DRF(0041, _CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS, _WRITE_BACK, + _YES, pCacheFlushParams->flags) && + FLD_TEST_DRF(0041, _CTRL_SURFACE_FLUSH_GPU_CACHE_FLAGS, _INVALIDATE, + _NO, pCacheFlushParams->flags)) + { + cacheOp = FB_CACHE_WRITEBACK; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Must specify at least one of WRITE_BACK or INVALIDATE\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + if (!pMemory->pMemDesc) + { + NV_PRINTF(LEVEL_ERROR, + "Memory descriptor not found for hMemory 0x%x, unable to flush!\n", + RES_GET_HANDLE(pMemory)); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + if (memdescGetGpuCacheAttrib(pMemory->pMemDesc) != NV_MEMORY_CACHED) + { + NV_PRINTF(LEVEL_ERROR, "Cannot flush an uncached allocation\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + switch (memdescGetAddressSpace(pMemory->pMemDesc)) + { + case ADDR_FBMEM: + memType = FB_CACHE_VIDEO_MEMORY; + break; + case ADDR_SYSMEM: + memType = FB_CACHE_SYSTEM_MEMORY; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Cannot flush address space 0x%x\n", + memdescGetAddressSpace(pMemory->pMemDesc)); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + break; + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + status = kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, pMemory->pMemDesc, memType, cacheOp); + if (status != NV_OK) + { + SLI_LOOP_BREAK; + } + } + SLI_LOOP_END + + return status; +} + +NV_STATUS +memCtrlCmdGetMemPageSize_IMPL +( + Memory *pMemory, + NV0041_CTRL_GET_MEM_PAGE_SIZE_PARAMS *pPageSizeParams +) +{ + OBJGPU *pGpu = pMemory->pGpu; + PMEMORY_DESCRIPTOR pTempMemDesc = NULL; + NvU32 tempPageSize = 0; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + pTempMemDesc = memdescGetMemDescFromGpu(pMemory->pMemDesc, pGpu); + + // Take the first, assert if inconsistent + if (tempPageSize == 0) + { + tempPageSize = memdescGetPageSize(pTempMemDesc, AT_GPU); + } + else + { + NV_ASSERT(tempPageSize == memdescGetPageSize(pTempMemDesc, AT_GPU)); + } + } + SLI_LOOP_END + + pPageSizeParams->pageSize = tempPageSize; + + return NV_OK; +} + +NV_STATUS +memCtrlCmdUpdateSurfaceCompression_IMPL +( + Memory *pMemory, + NV0041_CTRL_UPDATE_SURFACE_COMPRESSION_PARAMS *pUpdateParams +) +{ + OBJGPU *pGpu = pMemory->pGpu; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pMemory->pHwResource, NV_ERR_INVALID_ARGUMENT); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + NV_STATUS tmpStatus = memmgrUpdateSurfaceCompression_HAL(pGpu, pMemoryManager, pMemory, pUpdateParams->bRelease); + if (tmpStatus != NV_OK) + { + status = tmpStatus; + } + } + SLI_LOOP_END + + return status; +} + +NV_STATUS +memCtrlCmdSetTag_IMPL +( + Memory *pMemory, + NV0041_CTRL_CMD_SET_TAG_PARAMS *pParams +) +{ + pMemory->tag = pParams->tag; + + return NV_OK; +} + +NV_STATUS +memCtrlCmdGetTag_IMPL +( + Memory *pMemory, + NV0041_CTRL_CMD_GET_TAG_PARAMS *pParams +) +{ + pParams->tag = pMemory->tag; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c new file mode 100644 index 000000000..694112de9 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_desc.c @@ -0,0 +1,4346 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Memory descriptor handling utility routines. + */ + +#include "gpu/mem_mgr/mem_desc.h" + +#include "gpu/bif/kernel_bif.h" + +#include "os/os.h" + +#include "gpu_mgr/gpu_mgr.h" +#include "core/locks.h" +#include "mem_mgr/io_vaspace.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "core/system.h" + +#include "gpu/mem_mgr/virt_mem_allocator.h" + +#include "rmconfig.h" +#include "vgpu/rpc.h" +#include "mem_mgr/mem.h" + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/mem_utils.h" + +#include "gpu/mem_mgr/heap.h" + +#include "gpu/mem_sys/kern_mem_sys.h" +#include "mem_mgr/video_mem.h" + +#include "mem_mgr/ctx_buf_pool.h" + +#include "nvrm_registry.h" // For memdescOverrideInstLoc*() + +#include "deprecated/rmapi_deprecated.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +#include "gpu/bus/kern_bus.h" + +// Structure for keeping track of BAR1 mappings +typedef struct +{ + NvU64 FbAperture; + NvU64 FbApertureLen; + NvP64 pPriv; +} FB_MAPPING_INFO; + +// +// Common address space lists +// +const NV_ADDRESS_SPACE ADDRLIST_FBMEM_PREFERRED[] = {ADDR_FBMEM, ADDR_SYSMEM, ADDR_UNKNOWN}; +const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_PREFERRED[] = {ADDR_SYSMEM, ADDR_FBMEM, ADDR_UNKNOWN}; +const NV_ADDRESS_SPACE ADDRLIST_FBMEM_ONLY[] = {ADDR_FBMEM, ADDR_UNKNOWN}; +const NV_ADDRESS_SPACE ADDRLIST_SYSMEM_ONLY[] = {ADDR_SYSMEM, ADDR_UNKNOWN}; + +// XXX These could probably encode the whole list in the u32 bits. +NvU32 memdescAddrSpaceListToU32(const NV_ADDRESS_SPACE *addrlist) +{ + if (addrlist == ADDRLIST_FBMEM_PREFERRED) + return 1; + else if (addrlist == ADDRLIST_SYSMEM_PREFERRED) + return 2; + else if (addrlist == ADDRLIST_FBMEM_ONLY) + return 3; + else if (addrlist == ADDRLIST_SYSMEM_ONLY) + return 4; + else + return 0; +} + +const NV_ADDRESS_SPACE *memdescU32ToAddrSpaceList(NvU32 index) +{ + switch (index) + { + case 1: return ADDRLIST_FBMEM_PREFERRED; + case 2: return ADDRLIST_SYSMEM_PREFERRED; + case 3: return ADDRLIST_FBMEM_ONLY; + case 4: return ADDRLIST_SYSMEM_ONLY; + default: + return NULL; + } +} + +/* + * @brief Setting a MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE has to initialize + * pHeap and bUsingSubAllocator flags + */ +static NV_STATUS _memdescSetSubAllocatorFlag +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + NvBool bSet +) +{ + if (!bSet) + { + NV_PRINTF(LEVEL_ERROR, + "Unsetting MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE not supported\n"); + NV_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ASSERT(!(pMemDesc->_flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL)); + + // Set flag forcing the allocation to fall into suballocator + pMemDesc->_flags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + + { + Heap *pHeap = pMemDesc->pHeap; + + NV_ASSERT(pHeap == NULL || pHeap->heapType == HEAP_TYPE_PHYS_MEM_SUBALLOCATOR); + if (pHeap == NULL) + pHeap = memmgrGetDeviceSuballocator(GPU_GET_MEMORY_MANAGER(pGpu), NV_FALSE); + + if (pHeap->heapType == HEAP_TYPE_PHYS_MEM_SUBALLOCATOR) + { + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &pMemDesc->gfid)); + pMemDesc->bUsingSuballocator = NV_TRUE; + } + } + + return NV_OK; +} + +/*! + * @brief Initializing GFID for guest allocated memdescs + */ +static NV_STATUS _memdescSetGuestAllocatedFlag +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + NvBool bSet +) +{ + + return NV_OK; +} + +/*! + * @brief Allocate and initialize a new empty memory descriptor + * + * Allocate a new memory descriptor. This allocates the memory descriptor + * only. memdescAlloc or memdescDescribe are later used to allocate or associate + * memory to the memory descriptor. + * + * This routine takes size and the physical contiguous of the future allocation + * in order to size the PTE array for non-contiguous requests. + * + * memdescDestroy should be called to free a memory descriptor. + * + * If MEMDESC_FLAGS_PRE_ALLOCATED is specified, use the memory descriptor + * supplied by the client instead of allocating a new one. + * + * @param[out] ppMemDesc Return pointer to new memory descriptor + * @param[in] pGpu + * @param[in] Size Size of memory descriptor in bytes. + * @param[in] PhysicallyContiguous Need physical contig or can it be scattered? + * @param[in] AddressSpace NV_ADDRESS_SPACE requested + * @param[in] CpuCacheAttrib CPU cacheability requested + * @param[in] Flags MEMDESC_FLAGS_* + * + * @returns NV_OK on success + */ +NV_STATUS +memdescCreate +( + MEMORY_DESCRIPTOR **ppMemDesc, + OBJGPU *pGpu, + NvU64 Size, + NvU64 Alignment, + NvBool PhysicallyContiguous, + NV_ADDRESS_SPACE AddressSpace, + NvU32 CpuCacheAttrib, + NvU64 Flags +) +{ + MEMORY_DESCRIPTOR *pMemDesc; + NvU64 allocSize, MdSize, PageCount; + NvU32 gpuCacheAttrib = NV_MEMORY_UNCACHED; + + allocSize = Size; + + // + // this memdesc may have gotten forced to sysmem if no carveout, + // but for VPR it needs to be in vidmem, so check and re-direct here, + // unless running with zero-FB + // + if ((AddressSpace != ADDR_UNKNOWN) && + (Flags & MEMDESC_ALLOC_FLAGS_PROTECTED) && + (!pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + gpuIsCacheOnlyModeEnabled(pGpu))) + { + AddressSpace = ADDR_FBMEM; + } + + if (pGpu != NULL) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (((AddressSpace == ADDR_SYSMEM) || (AddressSpace == ADDR_UNKNOWN)) && + !(Flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL)) + { + if (pMemoryManager && pMemoryManager->sysmemPageSize) + { + allocSize = RM_ALIGN_UP(allocSize, pMemoryManager->sysmemPageSize); + } + } + + if (RMCFG_FEATURE_PLATFORM_MODS) + { + if ( (AddressSpace == ADDR_FBMEM) && + !(Flags & MEMDESC_ALLOC_FLAGS_PROTECTED) && + memmgrGetUsableMemSizeMB_HAL(pGpu, pMemoryManager) == 0 && + gpuIsUnifiedMemorySpaceEnabled(pGpu)) + { + // On Tegra, force sysmem if carveout and SMMU are not available + AddressSpace = ADDR_SYSMEM; + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_ALL_INST_IN_SYSMEM)) + { + CpuCacheAttrib = pGpu->instCacheOverride; + } + } + + // + // Support for aligned contiguous SYSMEM allocations. + // + if ((AddressSpace == ADDR_SYSMEM || AddressSpace == ADDR_UNKNOWN) && + PhysicallyContiguous && (Alignment > RM_PAGE_SIZE)) + { + allocSize += (Alignment - RM_PAGE_SIZE); + } + } + } + + // + // Must allocate a larger buffer to store the PTEs for noncontiguous memory + // Note that we allocate one extra PTE, since we don't know what the PteAdjust + // is yet; if the PteAdjust is zero, we simply won't use it. This is in the + // MEMORY_DESCRIPTOR structure definition. + // + // RM_PAGE_SIZE is 4k and RM_PAGE_SHIFT is 12, so shift operation can be + // modified from ((allocSize + RM_PAGE_SIZE-1) >> RM_PAGE_SHIFT) to below as + // (4k >> 12 = 1). This modification helps us to avoid overflow of variable + // allocSize, in case caller of this function passes highest value of NvU64. + // + // If allocSize is passed as 0, PageCount should be returned as 0. + // + if (allocSize == 0) + { + PageCount = 0; + } + else + { + PageCount = ((allocSize - 1) >> RM_PAGE_SHIFT) + 1; + } + + if (PhysicallyContiguous) + { + MdSize = sizeof(MEMORY_DESCRIPTOR); + } + else + { + MdSize = sizeof(MEMORY_DESCRIPTOR) + + (sizeof(RmPhysAddr) * PageCount); + NV_ASSERT(MdSize <= 0xffffffffULL); + if (MdSize > 0xffffffffULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (Flags & MEMDESC_FLAGS_PAGED_SYSMEM) + { + // The flag MEMDESC_FLAGS_PAGED_SYSMEM is only for Windows + return NV_ERR_NOT_SUPPORTED; + } + + if (Flags & MEMDESC_FLAGS_PRE_ALLOCATED) + { + // Only fixed sized memDesc can be supported + if (PhysicallyContiguous == NV_FALSE) + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + NV_ASSERT_OR_RETURN(*ppMemDesc, NV_ERR_NOT_SUPPORTED); + + pMemDesc = *ppMemDesc; + } + else + { + pMemDesc = portMemAllocNonPaged((NvU32)MdSize); + if (pMemDesc == NULL) + { + return NV_ERR_NO_MEMORY; + } + } + + portMemSet(pMemDesc, 0, (NvU32)MdSize); + + // Fill in initial non-zero parameters + pMemDesc->pGpu = pGpu; + pMemDesc->Size = Size; + pMemDesc->PageCount = PageCount; + pMemDesc->ActualSize = allocSize; + pMemDesc->_addressSpace = AddressSpace; + pMemDesc->RefCount = 1; + pMemDesc->DupCount = 1; + pMemDesc->_subDeviceAllocCount = 1; + pMemDesc->_flags = Flags; + pMemDesc->_gpuCacheAttrib = gpuCacheAttrib; + pMemDesc->_gpuP2PCacheAttrib = NV_MEMORY_UNCACHED; + pMemDesc->Alignment = Alignment; + pMemDesc->gfid = GPU_GFID_PF; + pMemDesc->bUsingSuballocator = NV_FALSE; + pMemDesc->bDeferredFree = NV_FALSE; + + memdescSetCpuCacheAttrib(pMemDesc, CpuCacheAttrib); + + // Set any additional flags + pMemDesc->_flags |= MEMDESC_FLAGS_KERNEL_MODE; + if (PhysicallyContiguous) + pMemDesc->_flags |= MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + else + pMemDesc->_flags &= ~MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + + // OBJHEAP may not be created at this time and pMemDesc->pHeap may be NULL after this if-else + if (Flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL) + { + pMemDesc->_flags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + pMemDesc->_flags &= ~MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + else if (Flags & MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE) + { + NV_ASSERT_OK_OR_RETURN(_memdescSetSubAllocatorFlag(pGpu, pMemDesc, NV_TRUE)); + } + + // In case of guest allocated memory, just initialize GFID + if (Flags & MEMDESC_FLAGS_GUEST_ALLOCATED) + { + NV_ASSERT_OK_OR_RETURN(_memdescSetGuestAllocatedFlag(pGpu, pMemDesc, NV_TRUE)); + } + + *ppMemDesc = pMemDesc; + + return NV_OK; +} + +/*! + * @brief Initialize an caller allocated memory descriptor + * + * Helper to make it easier to get the memDesc **, and typically used + * with memdescDescribe. + * + * Only can be used for physically contiguous regions with a fixed + * size PTE array. + * + * memdescDestroy should be called to free a memory descriptor. + * + * If MEMDESC_FLAGS_PRE_ALLOCATED is specified, use the memory descriptor + * supplied by the client instead of allocating a new one. + * + * @param[out] pMemDesc Return pointer to new memory descriptor + * @param[in] pGpu + * @param[in] Size Size of memory descriptor in bytes + * @param[in] AddressSpace NV_ADDRESS_SPACE requested + * @param[in] CpuCacheAttrib CPU cacheability requested + * @param[in] Flags MEMDESC_FLAGS_* + * + * @returns void with no malloc there should be no failure cases + */ +void +memdescCreateExisting +( + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + NvU64 Size, + NV_ADDRESS_SPACE AddressSpace, + NvU32 CpuCacheAttrib, + NvU64 Flags +) +{ + NV_STATUS status; + status = memdescCreate(&pMemDesc, pGpu, Size, 0, NV_TRUE, AddressSpace, + CpuCacheAttrib, + Flags | MEMDESC_FLAGS_PRE_ALLOCATED | MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE); + NV_ASSERT(status == NV_OK); +} + + +/*! + * Increment ref count + */ +void memdescAddRef +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(pMemDesc != NULL); + ++(pMemDesc->RefCount); +} + +/*! + * Decrement ref count + */ +void memdescRemoveRef +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT_OR_RETURN_VOID(pMemDesc != NULL); + --(pMemDesc->RefCount); +} + +// +// Destroy all IOMMU mappings under this memdesc, including child +// mappings for root memdescs. +// +// TODO: merge the new IOMMU paths with the SMMU paths (see bug 1625121). +// +static void +_memdescFreeIommuMappings(PMEMORY_DESCRIPTOR pMemDesc) +{ +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM + PIOVAMAPPING pIovaMapping = pMemDesc->_pIommuMappings; + + if (!pIovaMapping) + return; + + if (memdescIsSubMemoryMemDesc(pMemDesc)) + { + iovaMappingDestroy(pIovaMapping); + return; + } + + while (pIovaMapping) + { + PIOVAMAPPING pTmpIovaMapping = pIovaMapping->pNext; + iovaMappingDestroy(pIovaMapping); + pIovaMapping = pTmpIovaMapping; + } + + pMemDesc->_pIommuMappings = NULL; +#endif +} + +/*! + * Destroy a memory descriptor if last reference is released + * + * If the memory descriptor is down to one reference, we need + * to check with the bus code check if that reference needs + * to be reclaimed. + * + * @param[in] pMemDesc Memory descriptor to be destroyed + * + * @returns None + */ +void +memdescDestroy +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Allow null frees + if (!pMemDesc) + { + return; + } + + memdescRemoveRef(pMemDesc); + + // if still more references are there for pMemDesc (pMemDesc->RefCount != 0), then bail out. + + if (pMemDesc->RefCount == 0) + { + MEM_DESC_DESTROY_CALLBACK *pCb = memdescGetDestroyCallbackList(pMemDesc); + MEM_DESC_DESTROY_CALLBACK *pNext; + + if (pMemDesc->_flags & MEMDESC_FLAGS_DUMMY_TOPLEVEL) + { + // When called from RmFreeFrameBuffer() and memdescFree could not do it because it is unallocated. + pMemDesc->_pNext = NULL; + pMemDesc->_subDeviceAllocCount = 1; + } + + NV_ASSERT(pMemDesc->childDescriptorCnt == 0); + NV_ASSERT(pMemDesc->_addressSpace == ADDR_FBMEM || pMemDesc->pHeap == NULL); + + // + // If there is private memdata, use the CB to free + // + if (pMemDesc->_pMemData && pMemDesc->_pMemDataReleaseCallback) + { + pMemDesc->_pMemDataReleaseCallback(pMemDesc); + } + + if (pMemDesc->bDeferredFree) + { + memdescFree(pMemDesc); + } + else if (pMemDesc->Allocated != 0) + { + // + // The caller forgot to free the actual memory before destroying the memdesc. + // Please fix this by calling memdescFree(). + // To prevent memory leaks, we explicitly free here until its fixed elsewhere. + // + NV_PRINTF(LEVEL_ERROR, "Destroying unfreed memory %p\n", pMemDesc); + NV_PRINTF(LEVEL_ERROR, "Please call memdescFree()\n"); + memdescFree(pMemDesc); + NV_ASSERT(!pMemDesc->Allocated); + } + + if (memdescGetStandbyBuffer(pMemDesc)) + { + memdescFree(memdescGetStandbyBuffer(pMemDesc)); + memdescDestroy(memdescGetStandbyBuffer(pMemDesc)); + memdescSetStandbyBuffer(pMemDesc, NULL); + } + + // + // Submemory descriptors will be destroyed without going through a free + // path, so we need to make sure that we remove the IOMMU submapping + // here. For root descriptors, we should already have removed all the + // associated IOVA mappings. + // + // However, for memory descriptors that weren't allocated by the RM, + // (e.g., were created from a user allocation), we won't go through a + // free path at all. In this case, mappings for other GPUs may still be + // attached to this root memory descriptor, so release them now. + // + _memdescFreeIommuMappings(pMemDesc); + + // Notify all interested parties of destruction + while (pCb) + { + pNext = pCb->pNext; + pCb->destroyCallback(pMemDesc->pGpu, pCb->pObject, pMemDesc); + // pCb is now invalid + pCb = pNext; + } + + portMemFree(pMemDesc->pPteSpaMappings); + pMemDesc->pPteSpaMappings = NULL; + portMemFree(pMemDesc->pSubMemDescList); + pMemDesc->pSubMemDescList = NULL; + + if (pMemDesc->_pParentDescriptor) + { + if ((pMemDesc->_flags & MEMDESC_FLAGS_PRE_ALLOCATED) == 0) + pMemDesc->_pParentDescriptor->childDescriptorCnt--; + memdescDestroy(pMemDesc->_pParentDescriptor); + pMemDesc->_pParentDescriptor = NULL; + } + + // Verify memdesc is not top + NV_ASSERT(memdescHasSubDeviceMemDescs(pMemDesc) == NV_FALSE); + + if ((pMemDesc->_flags & MEMDESC_FLAGS_PRE_ALLOCATED) == 0) + { + portMemFree(pMemDesc); + } + } +} + +/*! + * @brief Function that frees subdevice memory descriptors. If there are no + * subdevice memory descriptors function just simply resets memdesc structure. + * Top level memory descriptor is not destroyed. + * + * @param[in,out] pMemDesc Top level memory descriptor. + * + * @returns None + */ +static void +_memSubDeviceFreeAndDestroy +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + MEMORY_DESCRIPTOR *pSubDevMemDesc = pMemDesc->_pNext; + MEMORY_DESCRIPTOR *pNextMemDesc; + OBJGPU *pGpu = pMemDesc->pGpu; + NvBool bBcState; + + // No subdevice memdescs + if (pSubDevMemDesc == NULL || pGpu == NULL) + { + return; + } + + bBcState = gpumgrGetBcEnabledStatus(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + do + { + pNextMemDesc = pSubDevMemDesc->_pNext; + pSubDevMemDesc->_pNext = NULL; + memdescFree(pSubDevMemDesc); + memdescDestroy(pSubDevMemDesc); + pSubDevMemDesc = pNextMemDesc; + } while (pSubDevMemDesc != NULL); + + gpumgrSetBcEnabledStatus(pGpu, bBcState); +} + +/*! + * @brief Lower memdesc allocation layer for the special case of allocation + in the VPR region when MODS is managing it. + * + * @param[in] pMemDesc Memory descriptor to allocate + * + * @returns NV_OK on successful allocation. + * NV_ERR_NOT_SUPPORTED if not supported + */ +static NV_STATUS +_memdescAllocVprRegion +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Lower memdesc allocation layer. Provides underlying allocation + * functionality. + * + * @param[in,out] pMemDesc Memory descriptor to allocate + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +static NV_STATUS +_memdescAllocInternal +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NV_STATUS status = NV_OK; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + + if (pMemDesc->Allocated) + { + NV_ASSERT(!pMemDesc->Allocated); + return NV_ERR_INVALID_OBJECT_BUFFER; + } + + // Special case of an allocation request in MODS managed VPR region. + status = _memdescAllocVprRegion(pMemDesc); + if (status != NV_ERR_NOT_SUPPORTED) + goto done; + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + // System memory can be obtained from osAllocPages + status = osAllocPages(pMemDesc); + if (status != NV_OK) + { + goto done; + } + + // + // The pages have been allocated, so mark the descriptor as + // allocated. The IOMMU-mapping code needs the memdesc to be + // allocated in order to create the mapping. + // + pMemDesc->Allocated = 1; + + // + // TODO: merge new IOMMU paths with the SMMU paths below (see bug + // 1625121). For now they are parallel, and only one will be + // used. + // + if (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_CPU_ONLY)) + { + status = memdescMapIommu(pMemDesc, pGpu->busInfo.iovaspaceId); + if (status != NV_OK) + { + pMemDesc->Allocated = 0; + osFreePages(pMemDesc); + goto done; + } + } + + if (pMemDesc->_flags & MEMDESC_FLAGS_PROVIDE_IOMMU_MAP) + { + NV_PRINTF(LEVEL_ERROR, "SMMU mapping allocation is not supported for ARMv7.\n"); + NV_ASSERT(0); + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + else if ((pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) && + RMCFG_FEATURE_PLATFORM_MODS) + { + if (pMemDesc->Alignment > RM_PAGE_SIZE) + { + RmPhysAddr addr = memdescGetPhysAddr(pMemDesc, AT_CPU, 0); + NvU64 offset; + + NV_ASSERT((addr & (RM_PAGE_SIZE - 1)) == 0); + + NV_ASSERT((pMemDesc->Alignment & (pMemDesc->Alignment - 1)) == 0); + offset = addr & (pMemDesc->Alignment - 1); + + if (offset) + { + NV_ASSERT((pMemDesc->PageCount * RM_PAGE_SIZE - pMemDesc->Size) >= offset); + NV_ASSERT(pMemDesc->PteAdjust == 0); + pMemDesc->PteAdjust += NvU64_LO32(pMemDesc->Alignment - offset); + } + } + } + + break; + case ADDR_FBMEM: + { + Heap *pHeap = pMemDesc->pHeap; + + if (RMCFG_FEATURE_PMA && + (pMemDesc->_flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL)) + { + CTX_BUF_POOL_INFO *pCtxBufPool = NULL; + pCtxBufPool = memdescGetCtxBufPool(pMemDesc); + NV_ASSERT_TRUE_OR_GOTO(status, pCtxBufPool != NULL, NV_ERR_INVALID_STATE, done); + + // If pool is setup then allocate from pool + NV_ASSERT_OK_OR_GOTO(status, ctxBufPoolAllocate(pCtxBufPool, pMemDesc), done); + } + else + { + // XXX Hack! + MEMORY_ALLOCATION_REQUEST allocRequest = {0}; + NV_MEMORY_ALLOCATION_PARAMS allocData = {0}; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + NvU64 requestedSize = pMemDesc->Size; + + allocRequest.pUserParams = &allocData; + + // Don't allow FB allocations if FB is broken unless running in L2 cache only mode + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) && + !gpuIsCacheOnlyModeEnabled(pGpu)) + { + DBG_BREAKPOINT(); + status = NV_ERR_BROKEN_FB; + goto done; + } + + allocData.owner = HEAP_OWNER_RM_CHANNEL_CTX_BUFFER; + allocData.type = NVOS32_TYPE_IMAGE; + allocData.flags = NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + + // remove the "grows_down" flag when bReservedMemAtBottom is set so as to move RM memory to the bottom. + if (pMemorySystemConfig != NULL && !pMemorySystemConfig->bReservedMemAtBottom) + { + allocData.flags |= NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN; + } + + // Allocate in high priority memory? + if (pMemDesc->_flags & MEMDESC_FLAGS_HIGH_PRIORITY) + { + allocData.attr2 |= DRF_DEF(OS32, _ATTR2, _PRIORITY, _HIGH); + } + else if (pMemDesc->_flags & MEMDESC_FLAGS_LOW_PRIORITY) + { + allocData.attr2 |= DRF_DEF(OS32, _ATTR2, _PRIORITY, _LOW); + } + + allocData.size = pMemDesc->Size; + allocData.alignment = pMemDesc->Alignment; + allocRequest.pMemDesc = pMemDesc; + + if (pMemDesc->_pageSize == RM_PAGE_SIZE) + { + allocData.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB); + } + else if (pMemDesc->_pageSize == RM_PAGE_SIZE_64K || + pMemDesc->_pageSize == RM_PAGE_SIZE_128K) + { + allocData.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _BIG); + } + + allocData.flags |= pMemDesc->Alignment ? + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE : + NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE; + + if (pMemDesc->_flags & MEMDESC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + allocData.flags |= NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + allocData.offset = pMemDesc->_pteArray[0]; + } + + if (pMemDesc->_gpuCacheAttrib == NV_MEMORY_CACHED) + { + allocData.attr2 |= DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _YES); + } + else + { + // Force internal allocations to uncached unless explicitly requested. + allocData.attr2 |= DRF_DEF(OS32, _ATTR2, _GPU_CACHEABLE, _NO); + } + + allocData.attr2 = FLD_SET_DRF(OS32, _ATTR2, _INTERNAL, _YES, allocData.attr2); + + if (pMemDesc->_flags & MEMDESC_ALLOC_FLAGS_PROTECTED) + { + allocData.flags |= NVOS32_ALLOC_FLAGS_PROTECTED; + } + + allocData.attr |= DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS); + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + NV_ASSERT_TRUE_OR_GOTO(status, pFbAllocInfo != NULL, NV_ERR_NO_MEMORY, done); + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + NV_ASSERT_TRUE_OR_GOTO(status, pFbAllocPageFormat != NULL, NV_ERR_NO_MEMORY, done); + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + memUtilsInitFBAllocInfo(&allocData, pFbAllocInfo, 0, 0); // Client/device N/A + + status = memmgrAllocResources(pGpu, pMemoryManager, &allocRequest, pFbAllocInfo); + if (status != NV_OK) + goto done; + + status = vidmemAllocResources(pGpu, pMemoryManager, &allocRequest, pFbAllocInfo, pHeap); + if (status != NV_OK) + goto done; + + pMemDesc->Alignment = allocData.alignment; + + // Update MemDesc GPU cacheability with results of allocation + if (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, allocData.attr2) == NVOS32_ATTR2_GPU_CACHEABLE_YES) + { + pMemDesc->_gpuCacheAttrib = NV_MEMORY_CACHED; + } + else + { + pMemDesc->_gpuCacheAttrib = NV_MEMORY_UNCACHED; + } + + // + // Adjust size to the requested size, not the heap rounded size. A number of callers + // depend on this. In the future we will have the PageCount be accurate. + // + pMemDesc->Size = requestedSize; + pMemDesc->PageCount = ((pMemDesc->Size + pMemDesc->PteAdjust + RM_PAGE_SIZE - 1) >> RM_PAGE_SHIFT); + } + // We now have the memory + pMemDesc->Allocated = 1; + + // If the allocation succeeds and if its PhysMemSubAlloc, increment the refCount + if ((status == NV_OK) && (pHeap != NULL) && + (pHeap->heapType == HEAP_TYPE_PHYS_MEM_SUBALLOCATOR)) + { + heapAddRef(pHeap); + } + break; + } + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + status = NV_ERR_GENERIC; + goto done; + } + + memdescPrintMemdesc(pMemDesc, NV_TRUE, "memdesc allocated"); + +done: + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + return status; +} + +/*! + * @brief Upper memdesc allocation layer. Provides support for per-subdevice + * sysmem buffers and lockless sysmem allocation. + * + * @param[in,out] pMemDesc Memory descriptor to allocate + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescAlloc +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NV_STATUS status = NV_OK; + NvBool bcState = NV_FALSE; + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvBool reAcquire; + NvU32 gpuMask = 0; + + NV_ASSERT_OR_RETURN(!pMemDesc->Allocated, NV_ERR_INVALID_OBJECT_BUFFER); + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + // Can't alloc sysmem on GSP firmware. + if (RMCFG_FEATURE_PLATFORM_GSP && !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED)) + { + // + // TO DO: Make this an error once existing allocations are cleaned up. + // After that pHeap selection can be moved to memdescAllocInternal() + // + NV_PRINTF(LEVEL_ERROR, + "WARNING sysmem alloc on GSP firmware\n"); + pMemDesc->_addressSpace = ADDR_FBMEM; + pMemDesc->pHeap = GPU_GET_HEAP(pGpu); + } + + break; + case ADDR_FBMEM: + { + // If FB is broken then don't allow the allocation, unless running in L2 cache only mode + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) && + !gpuIsCacheOnlyModeEnabled(pGpu)) + { + status = NV_ERR_BROKEN_FB; + DBG_BREAKPOINT(); + } + + NV_ASSERT(pMemDesc->pHeap == NULL); + // Set the pHeap based on who owns this allocation + if (pMemDesc->_flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL) + { + // + // pHeap is not required in memdesc for ctx buf pools because each ctx buf + // pool is tied to PMA and this pools is cached inside memdesc. + // + CTX_BUF_POOL_INFO *pCtxBufPool = memdescGetCtxBufPool(pMemDesc); + NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_STATE); + } + else if (pMemDesc->_flags & MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE) + { + pMemDesc->pHeap = memmgrGetDeviceSuballocator(GPU_GET_MEMORY_MANAGER(pGpu), NV_FALSE); + } + else if (GPU_GET_MEMORY_MANAGER(pGpu) != NULL && + RMCFG_MODULE_HEAP && + pMemDesc->_addressSpace == ADDR_FBMEM) + { + pMemDesc->pHeap = GPU_GET_HEAP(pGpu); + } + + break; + } + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + + if (status != NV_OK) + { + return status; + } + + if (gpumgrGetBcEnabledStatus(pGpu)) + { + // Broadcast memdescAlloc call with flag set to allocate per subdevice. + if (pMemDesc->_flags & MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE) + { + NvU32 i; + MEMORY_DESCRIPTOR *pSubDevMemDesc = pMemDesc; + MEMORY_DESCRIPTOR *pPrev = pMemDesc; + OBJGPU *pGpuChild; + + pMemDesc->_subDeviceAllocCount = NumSubDevices(pGpu); + + for (i = 0; i < pMemDesc->_subDeviceAllocCount; i++) + { + // Get pGpu for this subdeviceinst + pGpuChild = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), i); + if (NULL == pGpuChild) + { + NV_ASSERT(0); + status = NV_ERR_OBJECT_NOT_FOUND; + goto subdeviceAlloc_failed; + } + + // + // We are accessing the fields of the top level desc here directly without using the + // accessor routines on purpose. + // + status = memdescCreate(&pSubDevMemDesc, pGpuChild, pMemDesc->Size, pMemDesc->Alignment, + !!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS), + pMemDesc->_addressSpace, + pMemDesc->_cpuCacheAttrib, + pMemDesc->_flags & ~MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE); + + if (status != NV_OK) + { + NV_ASSERT(0); + goto subdeviceAlloc_failed; + } + + pSubDevMemDesc->_gpuCacheAttrib = pMemDesc->_gpuCacheAttrib; + pSubDevMemDesc->_pageSize = pMemDesc->_pageSize; + + // Force broadcast state to false when allocating a subdevice memdesc + gpumgrSetBcEnabledStatus(pGpuChild, NV_FALSE); + + status = memdescAlloc(pSubDevMemDesc); + + if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + // + // The top level memdesc could have flags that don't reflect contiguity which + // is set after memdescAlloc. + // + pMemDesc->Alignment = pSubDevMemDesc->Alignment; + pMemDesc->_flags = pSubDevMemDesc->_flags | MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE; + pMemDesc->ActualSize = pSubDevMemDesc->ActualSize; + } + + // Restore broadcast state to true after allocating a subdevice memdesc + gpumgrSetBcEnabledStatus(pGpuChild, NV_TRUE); + + if (status != NV_OK) + { + memdescDestroy(pSubDevMemDesc); + NV_ASSERT(0); + goto subdeviceAlloc_failed; + } + + // Check for similarity in allocations for previous allocated subdev with current allocated subdev. + // If subdev0 ~ subdev1 && subdev1~subdev2 then subdev0 ~ subdev2 and so on...Thus can check symmetry across all subdev allocations + if (i > 0) + { + NV_ASSERT(pPrev->Size == pSubDevMemDesc->Size); + NV_ASSERT(pPrev->PteAdjust == pSubDevMemDesc->PteAdjust); + NV_ASSERT(pPrev->_addressSpace == pSubDevMemDesc->_addressSpace); + NV_ASSERT(pPrev->_flags == pSubDevMemDesc->_flags); + NV_ASSERT(pPrev->_pteKind == pSubDevMemDesc->_pteKind); + NV_ASSERT(pPrev->_pteKindCompressed == pSubDevMemDesc->_pteKindCompressed); + NV_ASSERT(pPrev->pHeap != pSubDevMemDesc->pHeap); + } + + pPrev->_pNext = pSubDevMemDesc; + pPrev = pSubDevMemDesc; + } + pMemDesc->Allocated = 1; + return NV_OK; + } + else if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + // Broadcast memdescAlloc call on vidmem *without* flag set to allocate per subdevice + NV_ASSERT(0); + } + } + + // Unicast memdescAlloc call but with flag set to allocate per subdevice. + NV_ASSERT(!((pMemDesc->_flags & MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE) && !gpumgrGetBcEnabledStatus(pGpu))); + + reAcquire = NV_FALSE; + bcState = NV_FALSE; + + if ((pMemDesc->_flags & MEMDESC_FLAGS_LOCKLESS_SYSMEM_ALLOC) && (pMemDesc->_addressSpace != ADDR_FBMEM)) + { + bcState = gpumgrGetBcEnabledStatus(pGpu); + if (RMCFG_FEATURE_RM_BASIC_LOCK_MODEL) + { + // + // There is no equivalent routine for osCondReleaseRmSema in + // the new basic lock model. + + // + // However, we can't drop the RM system semaphore in this + // path because on non-windows platforms (i.e. MODS) it + // has undesirable consequences. So for now we must + // bracket this section with a reference to the feature + // flag until we can rework this interface. + // + // + // Check to make sure we own the lock and that we are + // not at elevated IRQL; this models the behavior + // of osCondReleaseRmSema. + // + if (!osIsRaisedIRQL() && + (rmGpuGroupLockIsOwner(pGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, &gpuMask) || + rmGpuGroupLockIsOwner(pGpu->gpuInstance, GPU_LOCK_GRP_SUBDEVICE, &gpuMask))) + { + // + // Release all owned gpu locks rather than just the + // device-related locks because the caller may be holding more + // than the required device locks. All currently owned + // locks will be re-acquired before returning. + // + // This prevents potential GPU locking violations (e.g., if the + // caller is holding all the gpu locks but only releases the + // first of two device locks, then attempting to re-acquire + // the first device lock will be a locking violation with + // respect to the second device lock.) + // + gpuMask = rmGpuLocksGetOwnedMask(); + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + reAcquire = NV_TRUE; + } + } + else + { + reAcquire = osCondReleaseRmSema(pSys->pSema); + } + } + + // Actually allocate the memory + NV_CHECK_OK(status, LEVEL_ERROR, _memdescAllocInternal(pMemDesc)); + + if (status != NV_OK) + { + pMemDesc->pHeap = NULL; + } + + if (reAcquire) + { + if (osAcquireRmSema(pSys->pSema) != NV_OK) + { + DBG_BREAKPOINT(); + + } + + if (rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_MASK, + GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM, + &gpuMask) != NV_OK) + { + DBG_BREAKPOINT(); + } + // Releasing the semaphore allows another thread to enter RM and + // modify broadcast state. We need to set it back (see bug 368643) + gpumgrSetBcEnabledStatus(pGpu, bcState); + } + + return status; + +subdeviceAlloc_failed: + _memSubDeviceFreeAndDestroy(pMemDesc); + pMemDesc->_subDeviceAllocCount = 1; + pMemDesc->_pNext = NULL; + return status; +} + +/*! + * Allocate memory from one of the possible locations specified in pList. + * + * @param[in,out] pMemDesc Memory descriptor to allocate + * @param[in] pList List of NV_ADDRESS_SPACE values. Terminated + * by an ADDR_UNKNOWN entry. + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescAllocList +( + MEMORY_DESCRIPTOR *pMemDesc, + const NV_ADDRESS_SPACE *pList +) +{ + NV_STATUS status = NV_ERR_INVALID_ARGUMENT; + NvU32 i = 0; + + if (!pList) + { + return status; + } + + // + // this memdesc may have gotten forced to sysmem if no carveout, + // but for VPR it needs to be in vidmem, so check and re-direct here + // + if (pMemDesc->_flags & MEMDESC_ALLOC_FLAGS_PROTECTED) + { + OBJGPU *pGpu = pMemDesc->pGpu; + + // Only force to vidmem if not running with zero-FB. + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) || + gpuIsCacheOnlyModeEnabled(pGpu)) + { + pList = ADDRLIST_FBMEM_ONLY; + } + } + + while (pList[i] != ADDR_UNKNOWN) + { + pMemDesc->_addressSpace = pList[i]; + status = memdescAlloc(pMemDesc); + + if (status == NV_OK) + { + return status; + } + + i++; + } + + return status; +} + +/*! + * @brief Lower memdesc free layer. Provides underlying free + * functionality. + * + * @param[in,out] pMemDesc Memory descriptor to free + * + * @returns None + */ +static void +_memdescFreeInternal +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + MEM_DESC_DESTROY_CALLBACK *pCb, *pNext; + NvU64 oldSize; + + // Allow null frees + if (!pMemDesc) + { + return; + } + + pCb = memdescGetDestroyCallbackList(pMemDesc); + + // Notify all interested parties of destruction + while (pCb) + { + pNext = pCb->pNext; + pCb->destroyCallback(pMemDesc->pGpu, pCb->pObject, pMemDesc); + // pCb is now invalid + pCb = pNext; + } + + if (memdescHasSubDeviceMemDescs(pMemDesc)) + return; + + memdescPrintMemdesc(pMemDesc, NV_FALSE, "memdesc being freed"); + + // Bail our early in case this memdesc describes a MODS managed VPR region. + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_VPR_REGION_CLIENT_MANAGED)) + return; + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + // invalidate if memory is cached in FB L2 cache. + if (pMemDesc->_gpuCacheAttrib == NV_MEMORY_CACHED) + { + OBJGPU *pGpu = pMemDesc->pGpu; + + // + // If this memdesc managed to outlive its pGpu getting detached, + // we're plenty off the rails already, but avoid using the pGpu + // and carry on as best we can + // + if (gpumgrGetGpuGrpFromGpu(pGpu) != NULL) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + if (pKernelMemorySystem == NULL) + { + NV_ASSERT_FAILED("Sysmemdesc outlived its attached Gpu Memory System"); + } + else + { + NV_ASSERT_OK(kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, pMemDesc, + FB_CACHE_SYSTEM_MEMORY, + FB_CACHE_INVALIDATE)); + } + } + SLI_LOOP_END + } + else + { + NV_ASSERT_FAILED("Sysmemdesc outlived its attached pGpu"); + } + } + + oldSize = pMemDesc->Size; + pMemDesc->Size = pMemDesc->ActualSize; + pMemDesc->PageCount = ((pMemDesc->ActualSize + RM_PAGE_SIZE-1) >> RM_PAGE_SHIFT); + + osFreePages(pMemDesc); + + pMemDesc->Size = oldSize; + pMemDesc->PageCount = ((oldSize + RM_PAGE_SIZE-1) >> RM_PAGE_SHIFT); + + break; + + case ADDR_FBMEM: + { + Heap *pHeap = pMemDesc->pHeap; + NV_STATUS status = NV_OK; + OBJGPU *pGpu = pMemDesc->pGpu; + + if (RMCFG_FEATURE_PMA && + (pMemDesc->_flags & MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL)) + { + CTX_BUF_POOL_INFO *pCtxBufPool = memdescGetCtxBufPool(pMemDesc); + if (pCtxBufPool == NULL) + { + DBG_BREAKPOINT(); + NV_PRINTF(LEVEL_ERROR, "ctx buf pool not found\n"); + return; + } + NV_STATUS status = ctxBufPoolFree(pCtxBufPool, pMemDesc); + if (status != NV_OK) + { + DBG_BREAKPOINT(); + NV_PRINTF(LEVEL_ERROR, "Failed to free memdesc from context buffer pool\n"); + } + } + else + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + NV_ASSERT(pHeap != NULL); + + if (!pHeap) + return; + + status = memmgrFree(pGpu, pMemoryManager, pHeap, 0x0, 0x0, 0x0, + HEAP_OWNER_RM_CHANNEL_CTX_BUFFER, + pMemDesc); + NV_ASSERT(status == NV_OK); + } + + // If this heap is being used to manage PMSA memory, reduce the refcount accordingly + if ((status == NV_OK) && (pHeap != NULL) && + (pHeap->heapType == HEAP_TYPE_PHYS_MEM_SUBALLOCATOR)) + { + heapRemoveRef(pHeap); + } + break; + } + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + } +} + +/*! + * @brief Upper memdesc free layer. Provides support for per-subdevice + * sysmem buffers and lockless sysmem allocation. Because of SLI and subdevice + * submem allocations (refer to submem chart) support, if memory has never + * been allocated function will just unlink subdevice structure and destroy + * subdevice descriptors. + * + * @param[in,out] pMemDesc Memory descriptor to free + * + * @returns None + */ +void +memdescFree +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Allow null frees + if (!pMemDesc) + { + return; + } + + + if (memdescIsSubMemoryMemDesc(pMemDesc)) + { + NV_ASSERT(!pMemDesc->_pInternalMapping); + + if (pMemDesc->_addressSpace == ADDR_SYSMEM) + { + // The memdesc is being freed so destroy all of its IOMMU mappings. + _memdescFreeIommuMappings(pMemDesc); + } + + if (pMemDesc->_addressSpace != ADDR_FBMEM && + pMemDesc->_addressSpace != ADDR_SYSMEM) + { + return; + } + + _memSubDeviceFreeAndDestroy(pMemDesc); + } + else + { + // + // In case RM attempts to free memory that has more than 1 refcount, the free is deferred until refcount reaches 0 + // + // Bug 3307574 RM crashes when client's specify sysmem UserD location. + // RM attempts to peek at the client allocated UserD when waiting for a channel to go idle. + // + if (pMemDesc->RefCount > 1 && pMemDesc->Allocated == 1) + { + pMemDesc->bDeferredFree = NV_TRUE; + return; + } + + if (!pMemDesc->Allocated) + { + /* + * For sysmem not allocated by RM but only registered to it, we + * would need to update the shared sysmem pfn bitmap here + */ + return; + } + pMemDesc->Allocated--; + if (0 != pMemDesc->Allocated) + { + return; + } + + // If standbyBuffer memory was allocated then free it + if (pMemDesc->_pStandbyBuffer) + { + memdescFree(pMemDesc->_pStandbyBuffer); + memdescDestroy(pMemDesc->_pStandbyBuffer); + pMemDesc->_pStandbyBuffer = NULL; + } + + NV_ASSERT(!pMemDesc->_pInternalMapping); + + if (pMemDesc->_addressSpace == ADDR_SYSMEM) + { + // The memdesc is being freed so destroy all of its IOMMU mappings. + _memdescFreeIommuMappings(pMemDesc); + } + + if (pMemDesc->_addressSpace != ADDR_FBMEM && + pMemDesc->_addressSpace != ADDR_SYSMEM) + { + return; + } + + _memSubDeviceFreeAndDestroy(pMemDesc); + + _memdescFreeInternal(pMemDesc); + } + + // Reset tracking state + pMemDesc->_pNext = NULL; + pMemDesc->_subDeviceAllocCount = 1; + + // + // Reset tracking state of parent + // Why it is needed: + // When a submemory toplevel memdesc with subdevices is freed, + // the subdecice memdescs and their parent are destroyed or their + // refcount decreased. + // When the parent subdevice descriptors are destroyed, their + // top level descriptor is left alone and has a dangling + // _pNext pointer + // + if ((pMemDesc->_pParentDescriptor != NULL) && + (memdescHasSubDeviceMemDescs(pMemDesc->_pParentDescriptor)) && + (pMemDesc->_pParentDescriptor->RefCount == 1)) + { + pMemDesc->_pParentDescriptor->_pNext = NULL; + pMemDesc->_pParentDescriptor->_subDeviceAllocCount = 1; + } +} + +/*! + * @brief Lock the paged virtual memory descripted by the memory descriptor + * + * @param[in] pMemDesc Memory descriptor to lock + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescLock +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PAGED_SYSMEM)) + { + return NV_ERR_ILLEGAL_ACTION; + } + + return osLockMem(pMemDesc); +} + +/*! + * @brief Unlock the paged virtual memory descripted by the memory descriptor + * + * @param[in] pMemDesc Memory descriptor to unlock + * + * @returns NV_OK on successful allocation. Various NV_ERR_GENERIC codes otherwise. + */ +NV_STATUS +memdescUnlock +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PAGED_SYSMEM)) + { + return NV_ERR_ILLEGAL_ACTION; + } + + return osUnlockMem(pMemDesc); +} + +/*! + * @brief Get a CPU mapping to the memory described by a memory descriptor + * + * This is for memory descriptors used by RM clients, not by the RM itself. + * For internal mappings the busMapRmAperture() hal routines are used. + * + * @param[in] pMemDesc Memory descriptor to map + * @param[in] Offset Offset into memory descriptor to start map + * @param[in] Size Size of mapping + * @param[in] Kernel Kernel or user address space + * @param[in] Protect NV_PROTECT_* + * @param[out] pAddress Return address + * @param[out] pPriv Return cookie to be passed back to memdescUnmap + * + * @returns NV_STATUS + */ + +NV_STATUS +memdescMapOld +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 Offset, + NvU64 Size, + NvBool Kernel, + NvU32 Protect, + void **pAddress, + void **pPriv +) +{ + NvP64 pAddressP64 = NV_PTR_TO_NvP64(*pAddress); + NvP64 pPrivP64 = NV_PTR_TO_NvP64(*pPriv); + NV_STATUS status; + +#if !defined(NV_64_BITS) + NV_ASSERT(Kernel); +#endif + + status = memdescMap(pMemDesc, + Offset, + Size, + Kernel, + Protect, + &pAddressP64, + &pPrivP64); + + *pAddress = NvP64_VALUE(pAddressP64); + *pPriv = NvP64_VALUE(pPrivP64); + + return status; +} + +NV_STATUS +memdescMap +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 Offset, + NvU64 Size, + NvBool Kernel, + NvU32 Protect, + NvP64 *pAddress, + NvP64 *pPriv +) +{ + NV_STATUS status = NV_OK; + NvU64 rootOffset = 0; + + NV_ASSERT_OR_RETURN(((Offset + Size) <= memdescGetSize(pMemDesc)), NV_ERR_INVALID_ARGUMENT); + + pMemDesc = memdescGetRootMemDesc(pMemDesc, &rootOffset); + Offset += rootOffset; + + if (pMemDesc->PteAdjust && + (pMemDesc->Alignment > RM_PAGE_SIZE) && + (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) && + RMCFG_FEATURE_PLATFORM_MODS) + { + Offset += pMemDesc->PteAdjust; + } + + // + // Sanity check, the top-level descriptor should be allocated or else + // memDesc must be marked as user allocate memory. This allows mapping of + // memDesc keeping track of PA's for user allocated memory, wherein RM + // marks the corresponding memDesc as not allocated. + // + NV_ASSERT_OR_RETURN(pMemDesc->Allocated || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM) || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM), + NV_ERR_INVALID_OBJECT_BUFFER); + + NV_ASSERT_OR_RETURN(!memdescHasSubDeviceMemDescs(pMemDesc), NV_ERR_INVALID_OBJECT_BUFFER); + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + { + status = osMapSystemMemory(pMemDesc, Offset, Size, + Kernel, Protect, pAddress, pPriv); + if (status != NV_OK) + { + return status; + } + break; + } + + case ADDR_FBMEM: + { + OBJGPU *pGpu = pMemDesc->pGpu; + NvU32 mode = NV_MEMORY_WRITECOMBINED; + KernelBus *pKernelBus; + FB_MAPPING_INFO *pMapping; + RmPhysAddr bar1PhysAddr; + NvBool bCoherentCpuMapping; + + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + bCoherentCpuMapping = pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING); + + // Need struct to keep track of the info for this mapping + pMapping = portMemAllocNonPaged(sizeof(FB_MAPPING_INFO)); + if (pMapping == NULL) + { + return NV_ERR_NO_MEMORY; + } + + if (bCoherentCpuMapping) + { + NV_ASSERT(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + + if (Kernel) + { + NvP64 tempCpuPtr = kbusMapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc); + if (tempCpuPtr == NULL) + { + status = NV_ERR_GENERIC; + } + else + { + status = NV_OK; + tempCpuPtr = NvP64_PLUS_OFFSET(tempCpuPtr, Offset); + } + *pAddress = tempCpuPtr; + } + else + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + NvU64 fbOffset = pMemDesc->_pteArray[0] + + pMemDesc->PteAdjust + Offset; + bar1PhysAddr = pKernelMemorySystem->coherentCpuFbBase + fbOffset; + mode = NV_MEMORY_CACHED; + + status = osMapPciMemoryUser(pGpu->pOsGpuInfo, bar1PhysAddr, + Size, Protect, pAddress, + &pMapping->pPriv, + mode); + } + + + if (status != NV_OK) + { + portMemFree(pMapping); + return status; + } + + NV_PRINTF(LEVEL_INFO, "Allocating coherent link mapping. VA: %p PA: 0x%llx size: 0x%llx\n", + NvP64_VALUE(*pAddress), + memdescGetPhysAddr(pMemDesc, AT_GPU, Offset), Size); + + *pPriv = NV_PTR_TO_NvP64(pMapping); + break; + } + + // Mapping via PCIe BAR + + NvHandle hClient = NV01_NULL_OBJECT; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + if ((pCallContext != NULL) && (pCallContext->pClient != NULL)) + { + hClient = pCallContext->pClient->hClient; + } + + // Determine where in BAR1 the mapping will go + pMapping->FbApertureLen = Size; + status = kbusMapFbAperture_HAL(pGpu, pKernelBus, + pMemDesc, Offset, + &pMapping->FbAperture, + &pMapping->FbApertureLen, + BUS_MAP_FB_FLAGS_MAP_UNICAST, + hClient); + if (status != NV_OK) + { + portMemFree(pMapping); + return status; + } + + bar1PhysAddr = gpumgrGetGpuPhysFbAddr(pGpu) + pMapping->FbAperture; + mode = NV_MEMORY_WRITECOMBINED; + + // Create the mapping + if (Kernel) + { + status = osMapPciMemoryKernel64(pGpu, bar1PhysAddr, + Size, Protect, pAddress, + mode); + } + else + { + status = osMapPciMemoryUser(pGpu->pOsGpuInfo, bar1PhysAddr, + Size, Protect, pAddress, + &pMapping->pPriv, + mode); + } + + if (status != NV_OK) + { + if (!bCoherentCpuMapping) + { + kbusUnmapFbAperture_HAL(pGpu, pKernelBus, pMemDesc, + pMapping->FbAperture, + pMapping->FbApertureLen, + BUS_MAP_FB_FLAGS_MAP_UNICAST); + } + portMemFree(pMapping); + return status; + } + + *pPriv = NV_PTR_TO_NvP64(pMapping); + break; + } + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } + return NV_OK; +} +void +memdescUnmapOld +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool Kernel, + NvU32 ProcessId, + void *Address, + void *Priv +) +{ + memdescUnmap(pMemDesc, + Kernel, + ProcessId, + NV_PTR_TO_NvP64(Address), + NV_PTR_TO_NvP64(Priv)); +} + +/*! + * @brief Remove a mapping for the memory descriptor, reversing memdescMap + * + * @param[in] pMemDesc Memory descriptor to unmap + * @param[in] Kernel Kernel or user address space + * @param[in] ProcessId Process ID if user space + * @param[in] Address Mapped address + * @param[in] Priv Return priv cookie from memdescMap + * + * @returns None + */ +void +memdescUnmap +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool Kernel, + NvU32 ProcessId, + NvP64 Address, + NvP64 Priv +) +{ + // Allow null unmaps + if (!Address) + return; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + + // find first allocated parent descriptor + while (!pMemDesc->Allocated && pMemDesc->_pParentDescriptor) + { + pMemDesc = pMemDesc->_pParentDescriptor; + } + + switch (pMemDesc->_addressSpace) + { + case ADDR_SYSMEM: + { + osUnmapSystemMemory(pMemDesc, Kernel, ProcessId, Address, Priv); + break; + } + + case ADDR_FBMEM: + { + OBJGPU *pGpu = pMemDesc->pGpu; + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + FB_MAPPING_INFO *pMapping = (FB_MAPPING_INFO *)NvP64_VALUE(Priv); + NvBool bCoherentCpuMapping = pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING); + NvU64 Size = pMapping->FbApertureLen; + + NV_ASSERT(!(pMemDesc->_flags & MEMDESC_FLAGS_CPU_ONLY)); + + if (bCoherentCpuMapping) + { + if (Kernel) + { + kbusUnmapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc); + } + else + { + osUnmapPciMemoryUser(pGpu->pOsGpuInfo, Address, Size, + pMapping->pPriv); + } + + portMemFree(pMapping); + break; + } + + kbusUnmapFbAperture_HAL(pGpu, pKernelBus, pMemDesc, + pMapping->FbAperture, + Size, + BUS_MAP_FB_FLAGS_MAP_UNICAST); + if (Kernel) + { + osUnmapPciMemoryKernel64(pGpu, Address); + } + else + { + osUnmapPciMemoryUser(pGpu->pOsGpuInfo, Address, Size, + pMapping->pPriv); + } + + portMemFree(pMapping); + break; + } + + default: + // Don't know how to do any other types of memory yet + DBG_BREAKPOINT(); + } +} + +typedef enum +{ + MEMDESC_MAP_INTERNAL_TYPE_GSP, // On GSP, use a pre-existing mapping + MEMDESC_MAP_INTERNAL_TYPE_COHERENT_FBMEM, // For NVLINK, use a pre-existing mapping for fbmem + MEMDESC_MAP_INTERNAL_TYPE_BAR2, // Use BAR2 (fbmem or reflected sysmem) + MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT, // Use OS to map sysmem +} MEMDESC_MAP_INTERNAL_TYPE; + +static MEMDESC_MAP_INTERNAL_TYPE +memdescGetMapInternalType +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + if (RMCFG_FEATURE_PLATFORM_GSP) + { + return MEMDESC_MAP_INTERNAL_TYPE_GSP; + } + else if (pMemDesc->_addressSpace == ADDR_FBMEM && + pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING)) + { + // Temporary hack to keep the same behavior on GV100F (dVOLTA & DFPGA) + if (IsDFPGA(pGpu)) + return MEMDESC_MAP_INTERNAL_TYPE_BAR2; + + return MEMDESC_MAP_INTERNAL_TYPE_COHERENT_FBMEM; + } + else + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvBool bUseDirectMap = NV_FALSE; + NV_STATUS status; + + status = kbusUseDirectSysmemMap_HAL(pGpu, pKernelBus, pMemDesc, &bUseDirectMap); + NV_ASSERT_OR_RETURN(status == NV_OK, MEMDESC_MAP_INTERNAL_TYPE_BAR2); + + return bUseDirectMap ? MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT : MEMDESC_MAP_INTERNAL_TYPE_BAR2; + } + + return MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT; +} + +void +memdescFlushGpuCaches +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + + if (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_CACHED) + { + // + // Only the non-coherent memory path is available, so writeback GPU L2 + // invalidate the GPU L2 + // + kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, pMemDesc, FB_CACHE_MEM_UNDEFINED, FB_CACHE_EVICT); + } +} + +void +memdescFlushCpuCaches +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + // Flush WC to get the data written to this mapping out to memory + osFlushCpuWriteCombineBuffer(); + + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + + // Special care is needed on SOC, where the GPU cannot snoop the CPU L2 + if ((pKernelBif != NULL) && + !kbifIsSnoopDmaCapable(pGpu, pKernelBif) && + (memdescGetCpuCacheAttrib(pMemDesc) == NV_MEMORY_CACHED)) + { + // Flush CPU L2 so that the GPU will see any changes the CPU made + osFlushCpuCache(); + } +} + +/* + * @brief map memory descriptor for internal access + * + * flags - subset of TRANSFER_FLAGS_ + */ +void* +memdescMapInternal +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + MEMDESC_MAP_INTERNAL_TYPE mapType; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pMemDesc != NULL, NULL); + + if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + } + + mapType = memdescGetMapInternalType(pGpu, pMemDesc); + + // We need to flush & invalidate GPU L2 cache only for directed BAR mappings. + // Reflected BAR mappings will access memory via GPU, and hence go through GPU L2 cache. + if (mapType == MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT) + memdescFlushGpuCaches(pGpu, pMemDesc); + + if (pMemDesc->_pInternalMapping != NULL) + { + NV_ASSERT(pMemDesc->_internalMappingRefCount); + + // Existing BAR2 mapping may be invalid due to GPU reset + if (mapType == MEMDESC_MAP_INTERNAL_TYPE_BAR2) + { + pMemDesc->_pInternalMapping = kbusValidateBar2ApertureMapping_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), pMemDesc, + pMemDesc->_pInternalMapping); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemDesc->_pInternalMapping != NULL, NULL); + } + + pMemDesc->_internalMappingRefCount++; + return pMemDesc->_pInternalMapping; + } + + switch (mapType) + { + case MEMDESC_MAP_INTERNAL_TYPE_GSP: + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemDesc->_pInternalMapping != NULL, NULL); + break; + case MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT: + { + status = memdescMapOld(pMemDesc, 0, pMemDesc->Size, NV_TRUE, NV_PROTECT_READ_WRITE, + &pMemDesc->_pInternalMapping, &pMemDesc->_pInternalMappingPriv); + NV_CHECK_OR_RETURN(LEVEL_ERROR, status == NV_OK, NULL); + break; + } + case MEMDESC_MAP_INTERNAL_TYPE_COHERENT_FBMEM: + { + NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)); + pMemDesc->_pInternalMapping = kbusMapCoherentCpuMapping_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), pMemDesc); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemDesc->_pInternalMapping != NULL, NULL); + break; + } + case MEMDESC_MAP_INTERNAL_TYPE_BAR2: + pMemDesc->_pInternalMapping = kbusMapBar2Aperture_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), pMemDesc, flags); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemDesc->_pInternalMapping != NULL, NULL); + break; + + default: + DBG_BREAKPOINT(); + } + + pMemDesc->_internalMappingRefCount = 1; + return pMemDesc->_pInternalMapping; +} + +void memdescUnmapInternal +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + MEMDESC_MAP_INTERNAL_TYPE mapType; + + NV_ASSERT_OR_RETURN_VOID(pMemDesc != NULL); + NV_ASSERT_OR_RETURN_VOID(pMemDesc->_pInternalMapping != NULL && pMemDesc->_internalMappingRefCount != 0); + + if (pMemDesc->_addressSpace == ADDR_FBMEM) + { + pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + } + + mapType = memdescGetMapInternalType(pGpu, pMemDesc); + + if (mapType == MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT || mapType == MEMDESC_MAP_INTERNAL_TYPE_BAR2) + { + memdescFlushCpuCaches(pGpu, pMemDesc); + } + + if (--pMemDesc->_internalMappingRefCount == 0) + { + switch (mapType) + { + case MEMDESC_MAP_INTERNAL_TYPE_GSP: + break; + case MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT: + memdescUnmapOld(pMemDesc, NV_TRUE, 0, + pMemDesc->_pInternalMapping, pMemDesc->_pInternalMappingPriv); + break; + + case MEMDESC_MAP_INTERNAL_TYPE_COHERENT_FBMEM: + { + kbusUnmapCoherentCpuMapping_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), pMemDesc); + break; + } + case MEMDESC_MAP_INTERNAL_TYPE_BAR2: + { + NvU8 *p = (NvU8 *)pMemDesc->_pInternalMapping; + + kbusUnmapBar2ApertureWithFlags_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), pMemDesc, &p, flags); + break; + } + + default: + DBG_BREAKPOINT(); + } + + pMemDesc->_pInternalMapping = NULL; + pMemDesc->_pInternalMappingPriv = NULL; + pMemDesc->_internalMappingRefCount = 0; + } + + // Flush for direct mappings too to keep the behavior + if (((flags & TRANSFER_FLAGS_DEFER_FLUSH) == 0) && + (mapType == MEMDESC_MAP_INTERNAL_TYPE_SYSMEM_DIRECT || mapType == MEMDESC_MAP_INTERNAL_TYPE_BAR2)) + { + kbusFlush_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), + kbusGetFlushAperture(GPU_GET_KERNEL_BUS(pGpu), memdescGetAddressSpace(pMemDesc)) | BUS_FLUSH_USE_PCIE_READ); + } +} + +/*! + * Describe an existing region of memory in a memory descriptor + * + * Memory must be physically contiguous. + * + * The memory descriptor must be initialized with + * memdescCreate*(), typically memdescCreateExisting() + * prior to calling memdescDescribe. + * + * memdescDescribe() now only updates the fields needed in the call. + * + * @param[out] pMemDesc Memory descriptor to fill + * @param[in] AddressSpace Address space of memory + * @param[in] Base Physical address of region + * @param[in] Size Size of region + * + * @returns None + */ +void +memdescDescribe +( + MEMORY_DESCRIPTOR *pMemDesc, + NV_ADDRESS_SPACE AddressSpace, + RmPhysAddr Base, + NvU64 Size +) +{ + // Some sanity checks to see if we went through MemCreate*() first + NV_ASSERT((pMemDesc->RefCount == 1) && + (memdescGetDestroyCallbackList(pMemDesc) == NULL) && + (pMemDesc->PteAdjust == 0)); + + NV_ASSERT(pMemDesc->_pIommuMappings == NULL); + NV_ASSERT(pMemDesc->Allocated == 0); + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + // + // Check if the base address accounts for the DMA window start address + // (always in the high, unaddressable bits of the address) and add it + // if necessary. On most platforms, the DMA window start address will + // simply be 0. + // + // This is most likely to happen in cases where the Base address is + // read directly from a register or MMU entry, which does not already + // account for the DMA window. + // + if (pMemDesc->pGpu == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "unable to check Base 0x%016llx for DMA window\n", Base); + } + else if (AddressSpace == ADDR_SYSMEM) + { + OBJGPU *pGpu = pMemDesc->pGpu; + if (pGpu) + { + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + NvU32 physAddrWidth = gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM); + if ((Base & ~(NVBIT64(physAddrWidth) - 1)) == 0) + { + Base += pKernelBif->dmaWindowStartAddress; + } + } + } + + if (pMemDesc->Alignment != 0) + { + NV_ASSERT(NV_FLOOR_TO_QUANTA(Base, pMemDesc->Alignment) == Base); + } + + pMemDesc->Size = Size; + pMemDesc->ActualSize = Size; + pMemDesc->_flags |= MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + pMemDesc->_addressSpace = AddressSpace; + pMemDesc->_pteArray[0] = Base & ~RM_PAGE_MASK; + pMemDesc->_subDeviceAllocCount = 1; + pMemDesc->PteAdjust = NvU64_LO32(Base) & RM_PAGE_MASK; + pMemDesc->PageCount = ((Size + pMemDesc->PteAdjust + RM_PAGE_SIZE - 1) >> RM_PAGE_SHIFT); + pMemDesc->_pParentDescriptor = NULL; + pMemDesc->childDescriptorCnt = 0; +} + +/*! + * Fill the PTE array of a memory descriptor with an array of addresses + * returned by pmaAllocatePages(). + * + * Memory must be physically discontiguous. For the contiguous case + * memdescDescribe() is more apt. + * + * The memory descriptor must be initialized with memdescCreate*(), + * typically memdescCreateExisting() prior to calling + * memdescFillPages(). + * + * @param[in] pMemDesc Memory descriptor to fill + * @param[in] pageIndex Index into memory descriptor to fill from + * @param[in] pPages Array of physical addresses + * @param[in] pageCount Number of entries in pPages + * @param[in] pageSize Size of each page in pPages + * + * @returns None + */ +void +memdescFillPages +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 pageIndex, + NvU64 *pPages, + NvU32 pageCount, + NvU32 pageSize +) +{ + NvU32 i, j, k; + NvU32 numChunks4k = pageSize / RM_PAGE_SIZE; + NvU32 offset4k = numChunks4k * pageIndex; + NvU32 pageCount4k = numChunks4k * pageCount; + NvU32 result4k, limit4k; + NvU64 addr; + + NV_ASSERT(pMemDesc != NULL); + + NV_ASSERT(offset4k < pMemDesc->PageCount); + NV_ASSERT(portSafeAddU32(offset4k, pageCount4k, &result4k)); + + // + // There is a possibility that the pMemDesc was created using 4K aligned + // allocSize, but the actual memory allocator could align up the allocation + // size based on its supported pageSize, (e.g. PMA supports 64K pages). In + // that case, pageCount4k would be greater than pMemdesc->pageCount. We + // limit pageCount4k to stay within pMemdesc->pageCount in that case. + // + if (result4k > pMemDesc->PageCount) + pageCount4k = pMemDesc->PageCount - offset4k; + + NV_ASSERT(pageSize > 0); + NV_ASSERT(0 == (pageSize & (RM_PAGE_SIZE - 1))); + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + // Fill _pteArray array using numChunks4k as a stride + for (i = 0, j = offset4k; i < pageCount; i++, j += numChunks4k) + { + pMemDesc->_pteArray[j] = addr = pPages[i]; + + // Fill _pteArray at 4K granularity + limit4k = NV_MIN(j + numChunks4k, pageCount4k); + + addr += RM_PAGE_SIZE; + for (k = j + 1; k < limit4k; k++, addr += RM_PAGE_SIZE) + pMemDesc->_pteArray[k] = addr; + } +} + +// +// SubMemory per subdevice chart: (MD - Memory Descriptor, SD - subdevice) +// +// If we try to create submemory of descriptor which has subdevices: +// +// [Top level MD] +// ^ | +// | +--------> [ Subdevice 0 MD ] --------> [Subdevice 1 MD] +// | ^ ^ +// | | | +// [SubMemory top level MD] | | +// | | | +// +--------> [Subdevice 0 SubMemory MD] --------> [Subdevice 1 SubMemory MD] +// +// Top Level MD : parent of SubMemoryTopLevelMD; has subdescriptors +// for two subdevices +// SubMemory top level MD : has pointer to parent memory descriptor; has two +// subdevice MDs +// Subdevice 0 MD : subdevice MD of topLevelMD and parent of SD0 +// submemory descriptor; has pointer to next in the +// list of subdevice MDs +// Subdevice 0 SubMemory MD : submemory of subdevice 0 MD; has pointer to +// parent, subdevice 0 MD and to next in list of +// submemory subdevice memory descriptors +// + + + +/*! + * @brief Create a new memory descriptor that is a subset of pMemDesc. If + * pMemDesc has subdevice memory descriptors subMemory will be created for all + * subdevices and new memory descriptor will be top level for them (ASCII art) + * + * @param[out] ppMemDescNew New memory descriptor + * @param[in] pMemDesc Original memory descriptor + * @param[in] pGpu The GPU that this memory will be mapped to + * @param[in] Offset Sub memory descriptor starts at pMemdesc+Offset + * @param[in] Size For Size bytes + * + * @returns None + */ +NV_STATUS +memdescCreateSubMem +( + MEMORY_DESCRIPTOR **ppMemDescNew, + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + NvU64 Offset, + NvU64 Size +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR *pMemDescNew; + NvU32 subDevInst; + NvU64 tmpSize = Size; + MEMORY_DESCRIPTOR *pLast; + MEMORY_DESCRIPTOR *pNew; + OBJGPU *pGpuChild; + + // Default to the original memdesc's GPU if none is specified + if (pGpu == NULL) + { + pGpu = pMemDesc->pGpu; + } + + // Allocation size should be adjusted for the memory descriptor _pageSize. + // Also note that the first 4k page may not be at _pageSize boundary so at + // the time of the mapping, we maybe overmapping at the beginning or end of + // the descriptor. To fix it in the right way, memory descriptor needs to + // be further cleaned. Do not round to page size if client specifies so. + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE) && + pMemDesc->_pageSize != 0) + { + PMEMORY_DESCRIPTOR pTempMemDesc = pMemDesc; + NvU64 pageOffset; + + if (memdescHasSubDeviceMemDescs(pMemDesc)) + { + NV_ASSERT(pGpu); + pTempMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + } + + pageOffset = memdescGetPhysAddr(pTempMemDesc, AT_CPU, Offset) & + (pTempMemDesc->_pageSize - 1); + tmpSize = RM_ALIGN_UP(pageOffset + Size, pTempMemDesc->_pageSize); + } + + // Allocate the new MEMORY_DESCRIPTOR + status = memdescCreate(&pMemDescNew, pGpu, tmpSize, 0, + !!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS), + pMemDesc->_addressSpace, + pMemDesc->_cpuCacheAttrib, + ((pMemDesc->_flags & ~MEMDESC_FLAGS_PRE_ALLOCATED) | MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE)); + + if (status != NV_OK) + { + return status; + } + + // Fill in various fields as best we can; XXX this can get sort of sketchy + // in places, which should be all the more motivation to rip some of these + // fields out of the MEMORY_DESCRIPTOR. + if (pMemDesc->_flags & MEMDESC_FLAGS_KERNEL_MODE) + pMemDescNew->_flags |= MEMDESC_FLAGS_KERNEL_MODE; + else + pMemDescNew->_flags &= ~MEMDESC_FLAGS_KERNEL_MODE; + + pMemDescNew->Size = Size; + pMemDescNew->_pteKind = pMemDesc->_pteKind; + pMemDescNew->_hwResId = pMemDesc->_hwResId; + if (pMemDesc->_flags & MEMDESC_FLAGS_ENCRYPTED) + pMemDescNew->_flags |= MEMDESC_FLAGS_ENCRYPTED; + else + pMemDescNew->_flags &= ~MEMDESC_FLAGS_ENCRYPTED; + pMemDescNew->_pageSize = pMemDesc->_pageSize; + pMemDescNew->_gpuCacheAttrib = pMemDesc->_gpuCacheAttrib; + pMemDescNew->_gpuP2PCacheAttrib = pMemDesc->_gpuP2PCacheAttrib; + pMemDescNew->gfid = pMemDesc->gfid; + pMemDescNew->bUsingSuballocator = pMemDesc->bUsingSuballocator; + pMemDescNew->_pParentDescriptor = pMemDesc; + pMemDesc->childDescriptorCnt++; + + pMemDescNew->subMemOffset = Offset; + + // increase refCount of parent descriptor + memdescAddRef(pMemDesc); + + // Fill in the PteArray and PteAdjust + if ((pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) || + (pMemDesc->PageCount == 1)) + { + // Compute the base address, then fill it in + RmPhysAddr Base = pMemDesc->_pteArray[0] + pMemDesc->PteAdjust + Offset; + pMemDescNew->_pteArray[0] = Base & ~RM_PAGE_MASK; + pMemDescNew->PteAdjust = NvU64_LO32(Base) & RM_PAGE_MASK; + } + else + { + // More complicated... + RmPhysAddr Adjust; + NvU32 PageIndex, i; + + // We start this many bytes into the memory alloc + Adjust = pMemDesc->PteAdjust + Offset; + + // Break it down into pages (PageIndex) and bytes (PteAdjust) + PageIndex = (NvU32)(Adjust >> RM_PAGE_SHIFT); + pMemDescNew->PteAdjust = NvU64_LO32(Adjust) & RM_PAGE_MASK; + + // Fill in the PTEs; remember to copy the extra PTE, in case we need it + if (pMemDesc->PageCount) + { + for (i = 0; i < pMemDescNew->PageCount+1; i++) + { + NvU32 j = i + PageIndex; + if (j < pMemDesc->PageCount) + { + pMemDescNew->_pteArray[i] = pMemDesc->_pteArray[j]; + } + else + { + // + // This case can happen with page size greater than 4KB. + // Since pages are always tracked at 4KB granularity the + // subset description may overflow the parent memdesc. + // + // In this case the best we can do is describe the contiguous + // memory after the last 4KB page in the sub-memdesc. + // + // TODO: Tracking memdesc pages at native page size would + // remove the need for several hacks, including this one. + // + NV_ASSERT(i > 0); + pMemDescNew->_pteArray[i] = pMemDescNew->_pteArray[i - 1] + RM_PAGE_SIZE; + } + } + } + } + + if ((pMemDesc->_addressSpace == ADDR_SYSMEM) && + !memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_CPU_ONLY) && + !memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1)) + { + // + // For different IOVA spaces, the IOMMU mapping will often not be a + // subrange of the original mapping. + // + // Request the submapping to be associated with the submemdesc. + // + // TODO: merge the new IOMMU paths with the SMMU path above (see bug + // 1625121). + // + status = memdescMapIommu(pMemDescNew, pGpu->busInfo.iovaspaceId); + if (status != NV_OK) + { + memdescDestroy(pMemDescNew); + return status; + } + } + + // Support for SLI submemory per-subdevice allocations (refer to chart) + if (memdescHasSubDeviceMemDescs(pMemDesc)) + { + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + + if (gpumgrGetBcEnabledStatus(pGpu) && (pMemDesc->_addressSpace == ADDR_FBMEM)) + { + NV_ASSERT(!!(pMemDesc->_flags & MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE)); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + } + pLast = pMemDescNew; + + pMemDescNew->_subDeviceAllocCount = pMemDesc->_subDeviceAllocCount; + + for (subDevInst = 0; subDevInst < pMemDesc->_subDeviceAllocCount; subDevInst++) + { + pGpuChild = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), subDevInst); + status = memdescCreateSubMem(&pNew, memdescGetMemDescFromGpu(pMemDesc, pGpuChild), pGpuChild, Offset, Size); + + if (status != NV_OK) + { + while (NULL != pMemDescNew) + { + pNew = pMemDescNew; + pMemDescNew = pMemDescNew->_pNext; + memdescDestroy(pNew); + } + return status; + } + + pLast->_pNext = pNew; + pLast = pNew; + } + + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + + *ppMemDescNew = pMemDescNew; + + return NV_OK; +} + +/*! + * Given a memdesc, this checks if the allocated memory falls under subheap or in GPA address space + */ +static NvBool +_memIsSriovMappingsEnabled +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + return gpuIsSriovEnabled(pMemDesc->pGpu) && + (((pMemDesc->_flags & MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE) && pMemDesc->bUsingSuballocator) || + (pMemDesc->_flags & MEMDESC_FLAGS_GUEST_ALLOCATED)); +} + +/*! + * @brief Return the physical addresses of pMemdesc + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] pGpu GPU to return the addresses for + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * @param[in] stride How much to advance the offset for each + * consecutive address + * @param[in] count How many addresses to retrieve + * @param[out] pAddresses Returned array of addresses + * + */ +void memdescGetPhysAddrsForGpu(MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses) +{ + // + // Get the PTE array that we should use for phys addr lookups based on the + // MMU context. (see bug 1625121) + // + NvU64 i; + RmPhysAddr *pteArray = memdescGetPteArrayForGpu(pMemDesc, pGpu, addressTranslation); + const NvBool contiguous = (memdescGetPteArraySize(pMemDesc, addressTranslation) == 1); + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + offset += pMemDesc->PteAdjust; + + for (i = 0; i < count; ++i) + { + if (contiguous) + { + pAddresses[i] = pteArray[0] + offset; + } + else + { + NvU32 PageIndex = (NvU32)(offset >> RM_PAGE_SHIFT); + pAddresses[i] = pteArray[PageIndex] + (offset & RM_PAGE_MASK); + } + + offset += stride; + } +} + + +/*! + * @brief Return the physical addresses of pMemdesc + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * @param[in] stride How much to advance the offset for each + * consecutive address + * @param[in] count How many addresses to retrieve + * @param[out] pAddresses Returned array of addresses + * + */ +void memdescGetPhysAddrs(MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset, + NvU64 stride, + NvU64 count, + RmPhysAddr *pAddresses) +{ + memdescGetPhysAddrsForGpu(pMemDesc, pMemDesc->pGpu, addressTranslation, offset, stride, count, pAddresses); +} + +/*! + * @brief Return the physical address of pMemdesc+Offset + * + * "long description" + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] offset Offset into memory descriptor + * + * @returns A physical address + */ +RmPhysAddr +memdescGetPhysAddr +( + MEMORY_DESCRIPTOR *pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 offset +) +{ + RmPhysAddr addr; + memdescGetPhysAddrs(pMemDesc, addressTranslation, offset, 0, 1, &addr); + return addr; +} + +/*! + * @brief Return physical address for page specified by PteIndex + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * @param[in] PteIndex Look up this PteIndex + * + * @returns A physical address + */ +RmPhysAddr +memdescGetPte +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU32 PteIndex +) +{ + // + // Get the PTE array that we should use for phys addr lookups based on the + // MMU context. (see bug 1625121) + // + RmPhysAddr *pteArray = memdescGetPteArray(pMemDesc, addressTranslation); + RmPhysAddr PhysAddr; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + PhysAddr = pteArray[0] + (PteIndex << RM_PAGE_SHIFT); + } + else + { + PhysAddr = pteArray[PteIndex]; + } + + return PhysAddr; +} + +/*! + * @brief Return physical address for page specified by PteIndex + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * @param[in] PteIndex Look up this PteIndex + * @param[in] PhysAddr PTE address + * + * @returns None + */ +void +memdescSetPte +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU32 PteIndex, + RmPhysAddr PhysAddr +) +{ + // + // Get the PTE array that we should use for phys addr lookups based on the + // MMU context. (see bug 1625121) + // + RmPhysAddr *pteArray = memdescGetPteArray(pMemDesc, addressTranslation); + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + NV_ASSERT_OR_RETURN_VOID(PteIndex == 0); + } + + pteArray[PteIndex] = PhysAddr; + + // Free pteArraySpa + portMemFree(pMemDesc->pPteSpaMappings); + pMemDesc->pPteSpaMappings = NULL; +} + +/*! + * @brief Return page array size based on the MMU context + * For SRIOV, the host context (AT_PA) will + * have discontiguous view of the GPA in SPA space + * This is treated similar to discontiguous memdescs + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] addressTranslation Address translation identifier + * + * @returns PageArray + */ +NvU32 memdescGetPteArraySize(MEMORY_DESCRIPTOR *pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + // Contiguous allocations in SPA domain can be non-contiguous at vmmusegment granularity. + // Hence treat SPA domain allocations as non-contiguous by default. + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) || + ((addressTranslation == AT_PA) && (pMemDesc->_addressSpace == ADDR_FBMEM) && _memIsSriovMappingsEnabled(pMemDesc))) + { + return NvU64_LO32(pMemDesc->PageCount); + } + return 1; +} + +/*! + * @brief Return page array + * + * @param[in] pMemDesc Memory descriptor to use + * @param[in] pGpu GPU to get the PTE array for. + * @param[in] addressTranslation Address translation identifier + * + * @returns PageArray + */ +RmPhysAddr * +memdescGetPteArrayForGpu +( + PMEMORY_DESCRIPTOR pMemDesc, + OBJGPU *pGpu, + ADDRESS_TRANSLATION addressTranslation +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + switch (AT_VALUE(addressTranslation)) + { + // + // In SRIOV systems, an access from guest has to go through the following translations + // GVA -> GPA -> SPA + // + // Given HOST manages channel/memory management for guest, there are certain code paths that + // expects VA -> GPA translations and some may need GPA -> SPA translations. We use addressTranslation + // to differentiate between these cases. + // Since GPA -> SPA is very similar to IOMMU xlation and since existing AT_PA is used only in + // SYSMEM allocations, we decided to reuse AT_PA addressTranslation to fetch GPA -> SPA xlations. + // In case of non-SRIOV systems, using AT_PA will fall back to AT_GPU or default context. + // + // pMemDesc -> _pteArray tracks GVA -> GPA translations + // pMemDesc -> pPteSpaMappings tracks GPA -> SPA translations + // + + case AT_VALUE(AT_PA): + { + } + case AT_VALUE(AT_GPU): + { + // Imported ADDR_FABRIC_V2 memdescs are device-less. + if (pGpu != NULL) + { + PIOVAMAPPING pIovaMap = memdescGetIommuMap(pMemDesc, pGpu->busInfo.iovaspaceId); + if (pIovaMap != NULL) + { + return pIovaMap->iovaArray; + } + } + + // + // If no IOMMU mapping exists in the default IOVASPACE, fall + // through and use the physical memory descriptor instead. + // + } + default: + { + return pMemDesc->_pteArray; + } + } +} + + + +/*! + * @brief Convert aperture into a descriptive string. + * + * @param[in] addressSpace + * + * @returns String + * + * @todo "text" + */ +const char * +memdescGetApertureString +( + NV_ADDRESS_SPACE addressSpace +) +{ + static const char* ADDR_FBMEM_STR = "VIDEO MEMORY"; + static const char* ADDR_SYSMEM_STR = "SYSTEM MEMORY"; + + if (addressSpace == ADDR_FBMEM) + { + return ADDR_FBMEM_STR; + } + + if (addressSpace == ADDR_SYSMEM) + { + return ADDR_SYSMEM_STR; + } + + return NULL; +} + +/*! + * @brief Compare two memory descriptors to see if the memory described the same + * + * @param[in] pMemDescOne + * @param[in] pMemDescTwo + * + * @returns NV_TRUE if the memory descriptors refer to the same memory + */ +NvBool +memdescDescIsEqual +( + MEMORY_DESCRIPTOR *pMemDescOne, + MEMORY_DESCRIPTOR *pMemDescTwo +) +{ + if ((pMemDescOne == NULL) || (pMemDescTwo == NULL)) + return NV_FALSE; + + if (pMemDescOne->_addressSpace != pMemDescTwo->_addressSpace) + return NV_FALSE; + + // All the physical memory views should match. + if ((memdescGetPhysAddr(pMemDescOne, AT_CPU, 0) != memdescGetPhysAddr(pMemDescTwo, AT_CPU, 0)) || + (memdescGetPhysAddr(pMemDescOne, AT_GPU, 0) != memdescGetPhysAddr(pMemDescTwo, AT_GPU, 0))) + return NV_FALSE; + + if (memdescGetCpuCacheAttrib(pMemDescOne) != memdescGetCpuCacheAttrib(pMemDescTwo)) + return NV_FALSE; + + if (pMemDescOne->Size != pMemDescTwo->Size) + return NV_FALSE; + + if (pMemDescOne->Alignment != pMemDescTwo->Alignment) + return NV_FALSE; + + if (pMemDescOne->_pageSize != pMemDescTwo->_pageSize) + return NV_FALSE; + + return NV_TRUE; +} + +/*! + * @brief Add callback block to the destroy callback queue + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] pCb Callee allocated block with callback func/arg + * + * @returns nothing + */ +void +memdescAddDestroyCallback +( + MEMORY_DESCRIPTOR *pMemDesc, + MEM_DESC_DESTROY_CALLBACK *pCb +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pCb->pNext = memdescGetDestroyCallbackList(pMemDesc); + memdescSetDestroyCallbackList(pMemDesc, pCb); +} + +/*! + * @brief Remove callback block from the destroy callback queue + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] pRemoveCb Callee allocated block with callback func/arg + * + * @returns nothing + */ +void +memdescRemoveDestroyCallback +( + MEMORY_DESCRIPTOR *pMemDesc, + MEM_DESC_DESTROY_CALLBACK *pRemoveCb +) +{ + MEM_DESC_DESTROY_CALLBACK *pCb = memdescGetDestroyCallbackList(pMemDesc); + MEM_DESC_DESTROY_CALLBACK *pPrev = NULL; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + while (pCb) + { + if (pCb == pRemoveCb) + { + if (pPrev == NULL) + { + memdescSetDestroyCallbackList(pMemDesc, pCb->pNext); + } + else + { + pPrev->pNext = pCb->pNext; + } + break; + } + pPrev = pCb; + pCb = pCb->pNext; + } +} + +/*! + * @brief Retrieves a subdevice's memory descriptor by subdevice instance + * + * Subdevice memory descriptors are memory descriptors that describe + * per-subdevice memory buffers. This functionality is required by our current + * SLI programming model as our memdescAlloc() calls are primarily broadcast + * operations. A singular memdesc works for video memory as the + * heaps are symmetric. However, we run into trouble when dealing with system + * memory as both GPUs then share the same address space and symmetric + * addressing is no longer possible. + * + * N.B. The rational for exposing this routine is that it keeps SLI-isms out of + * most of the RM -- the alternative approach would've been to pass in the + * subdevice or a pGpu for all memdesc methods which would require more code + * changes solely for SLI. Long term hopefully we can transition to a unicast + * allocation model (SLI loops above memdescAlloc()/memdescCreate()) and the + * subdevice support in memdesc can (easily) be deleted. This approach also + * provides a safety net against misuse, e.g., if we added pGpu to + * memdescGetPhysAddr, current code which utilizes that routine outside an SLI loop + * would execute cleanly even though it's incorrect. + * + * @param[in] pMemDesc Memory descriptor to query + * @param[in] subDeviceInst SLI subdevice instance (subdevice - 1) + * + * @returns Memory descriptor if one exist for the subdevice. + * NULL if none is found. + */ +MEMORY_DESCRIPTOR * +memdescGetMemDescFromSubDeviceInst(MEMORY_DESCRIPTOR *pMemDesc, NvU32 subDeviceInst) +{ + if (!memdescHasSubDeviceMemDescs(pMemDesc)) + { + return pMemDesc; + } + else + { + return memdescGetMemDescFromIndex(pMemDesc, subDeviceInst); + } +} + +/*! + * @brief Retrieves a subdevice's memory descriptor by GPU object + * + * See memdescGetMemDescFromSubDeviceInst for an explanation of subdevice memory + * descriptors + * + * @param[in] pMemDesc Memory descriptor to query + * @param[in] pGpu + * + * @returns Memory descriptor if one exist for the GPU. + * NULL if none is found. + */ +MEMORY_DESCRIPTOR * +memdescGetMemDescFromGpu(MEMORY_DESCRIPTOR *pMemDesc, OBJGPU *pGpu) +{ + NvU32 subDeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + return memdescGetMemDescFromSubDeviceInst(pMemDesc, subDeviceInst); +} + +/*! + * @brief Retrieves a subdevice's memory descriptor by memdesc index. + * + * See memdescGetMemDescFromSubDeviceInst for an explanation of subdevice memory + * descriptors + * + * @param[in] pMemDesc Memory descriptor to query + * @param[in] index Index into array of memdesc + * + * @returns Memory descriptor if one exist for the GPU. + * NULL if none is found. + */ +MEMORY_DESCRIPTOR * +memdescGetMemDescFromIndex(MEMORY_DESCRIPTOR *pMemDesc, NvU32 index) +{ + if (!memdescHasSubDeviceMemDescs(pMemDesc)) + { + return pMemDesc; + } + else + { + MEMORY_DESCRIPTOR *pSubDevMemDesc = pMemDesc->_pNext; + + NV_ASSERT(pSubDevMemDesc); + + while (index--) + { + pSubDevMemDesc = pSubDevMemDesc->_pNext; + + if (!pSubDevMemDesc) + { + NV_ASSERT(0); + return NULL; + } + } + + return pSubDevMemDesc; + } +} + +/*! + * @brief Set address for a fixed heap allocation. + * + * Offset must refer to the heap. A later memdescAlloc() will + * force this offset. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] fbOffset Offset to refer to + * + * @returns nothing + */ +void +memdescSetHeapOffset +( + MEMORY_DESCRIPTOR *pMemDesc, + RmPhysAddr fbOffset +) +{ + NV_ASSERT(pMemDesc->_addressSpace == ADDR_FBMEM); + NV_ASSERT(pMemDesc->Allocated == NV_FALSE); + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_flags |= MEMDESC_FLAGS_FIXED_ADDRESS_ALLOCATE; + pMemDesc->_pteArray[0] = fbOffset; +} + +/*! + * @brief Set GPU cacheability + * + * A later memdescAlloc() will use this setting. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] cacheAttrib Set memory to GPU cacheable + * + * @returns nothing + */ +void memdescSetGpuCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 cacheAttrib +) +{ + NV_ASSERT(pMemDesc->Allocated == NV_FALSE); + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_gpuCacheAttrib = cacheAttrib; +} + +/*! + * @brief Get GPU P2P cache attributes + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current GPU P2P cache attributes + */ +NvU32 memdescGetGpuP2PCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_gpuP2PCacheAttrib; +} + +/*! + * @brief Set GPU P2P cacheability + * + * A later memdescAlloc() will use this setting. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] cacheAttrib Set memory to GPU P2P cacheable + * + * @returns nothing + */ +void memdescSetGpuP2PCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 cacheAttrib +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_gpuP2PCacheAttrib = cacheAttrib; +} + +/*! + * @brief Set CPU cacheability + * + * A later memdescAlloc() will use this setting. + * + * @param[in] pMemDesc Memory descriptor to update + * @param[in] cacheAttrib Set memory to CPU cacheable + * + * @returns nothing + */ +void memdescSetCpuCacheAttrib +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 cpuCacheAttrib +) +{ + // + // When running 64-bit MODS on ARM v8, we need to force all CPU mappings as WC. + // This seems to be an issue with glibc. See bug 1556221. + // + // Ideally, this should have been set based on a Core Logic (CL) property. + // But chipset initialization will only happen during bifStateInit(). + // RM can makes sysmem CPU mappings before bifStateInit(). + // + if (RMCFG_FEATURE_PLATFORM_MODS && NVCPU_IS_AARCH64) + { + if (cpuCacheAttrib == NV_MEMORY_UNCACHED) + { + cpuCacheAttrib = NV_MEMORY_WRITECOMBINED; + } + } + + pMemDesc->_cpuCacheAttrib = cpuCacheAttrib; +} + +/*! + * @brief Print contents of a MEMORY_DESCRIPTOR in a human readable format. + * + * @param[in] pMemDesc Memory Descriptor to print + * @param[in] bPrintIndividualPages Individual pages will also be printed + * iff they are discontiguous + * @param[in] pPrefixMessage Message that will be printed before the contents + * of the Memory Descriptor are printed. + * + * @returns nothing + */ +void memdescPrintMemdesc +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool bPrintIndividualPages, + const char *pPrefixMessage +) +{ +#if 0 + NvU32 i; + + if ((DBG_RMMSG_CHECK(DBG_LEVEL_INFO) == 0) || (pPrefixMessage == NULL) || (pMemDesc == NULL)) + { + return; + } + + NV_PRINTF(LEVEL_INFO, + "%s Aperture %s starting at 0x%llx and of size 0x%llx\n", + pPrefixMessage, + memdescGetApertureString(pMemDesc->_addressSpace), + memdescGetPhysAddr(pMemDesc, AT_CPU, 0), + pMemDesc->Size); + + if ((bPrintIndividualPages == NV_TRUE) && + (pMemDesc->PageCount > 1) && + (!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS))) + { + for (i = 0; i < pMemDesc->PageCount; i++) + { + NV_PRINTF(LEVEL_INFO, + " contains page starting @0x%llx\n", + pMemDesc->_pteArray[i]); + } + } + + // TODO: merge with SMMU path above (see bug 1625121). + if (pMemDesc->_pIommuMappings != NULL) + { + if (!memdescIsSubMemoryMemDesc(pMemDesc)) + { + PIOVAMAPPING pIovaMap = pMemDesc->_pIommuMappings; + while (pIovaMap != NULL) + { + NV_PRINTF(LEVEL_INFO, + "Has additional IOMMU mapping for IOVA space 0x%x starting @ 0x%llx\n", + pIovaMap->iovaspaceId, + pIovaMap->iovaArray[0]); + pIovaMap = pIovaMap->pNext; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "Has additional IOMMU mapping starting @ 0x%llx\n", + memdescGetPhysAddr(pMemDesc, AT_PA, 0)); + } + } +#endif // NV_PRINTF_ENABLED +} + +/*! + * @brief Return page offset from a MEMORY_DESCRIPTOR for an arbitrary power of two page size + * + * PageAdjust covers the 4KB alignment, but must include bits from the address for big pages. + * + * @param[in] pMemDesc Memory Descriptor to print + * @param[in] pageSize Page size (4096, 64K, 128K, etc) + * + * @returns nothing + */ +NvU64 memdescGetPageOffset +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 pageSize +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return (pMemDesc->PteAdjust + pMemDesc->_pteArray[0]) & (pageSize-1); +} + +/*! + * @brief Get PTE kind using GPU + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pGpu GPU to be used get supported kind + * @param[in] addressTranslation Address translation identifier + * + * @returns Current PTE kind value. + */ +NvU32 memdescGetPteKindForGpu +( + PMEMORY_DESCRIPTOR pMemDesc, + OBJGPU *pGpu +) +{ + return memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pMemDesc->_pteKind); +} + +/*! + * @brief Set PTE kind using GPU. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pGpu GPU to be used set supported kind + * @param[in] addressTranslation Address translation identifier + * @param[in] pteKind New PTE kind + * + * @returns nothing + */ +void memdescSetPteKindForGpu +( + PMEMORY_DESCRIPTOR pMemDesc, + OBJGPU *pGpu, + NvU32 pteKind +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pteKind = memmgrGetSwPteKindFromHwPteKind_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pteKind); + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_SET_KIND, NV_TRUE); +} + +/*! + * @brief Set PTE kind compressed value. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pteKind New PTE kind compressed value + * + * @returns nothing + */ +void memdescSetPteKindCompressed +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 pteKindCmpr +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pteKindCompressed = pteKindCmpr; +} + +/*! + * @brief Get PTE kind compressed value. + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * + * @returns Current PTE kind compressed value. + */ +NvU32 memdescGetPteKindCompressed +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pteKindCompressed; +} + +/*! + * @brief Get kernel mapping + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current kernel mapping + */ +NvP64 memdescGetKernelMapping +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_kernelMapping; +} + +/*! + * @brief Set kernel mapping + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] kernelMapping New kernel mapping + * + * @returns nothing + */ +void memdescSetKernelMapping +( + MEMORY_DESCRIPTOR *pMemDesc, + NvP64 kernelMapping +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_kernelMapping = kernelMapping; +} + +/*! + * @brief Get privileged kernel mapping + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Current privileged kernel mapping + */ +NvP64 memdescGetKernelMappingPriv +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_kernelMappingPriv; +} + +/*! + * @brief Set HW resource identifier (HwResId) + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] kernelMappingPriv New privileged kernel mapping + * + * @returns nothing + */ +void memdescSetKernelMappingPriv +( + MEMORY_DESCRIPTOR *pMemDesc, + NvP64 kernelMappingPriv +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_kernelMappingPriv = kernelMappingPriv; +} + + +/*! + * @brief Set standby buffer memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Pointer to standby buffer memory descriptor + */ +MEMORY_DESCRIPTOR *memdescGetStandbyBuffer +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pStandbyBuffer; +} + +/*! + * @brief Set standby buffer memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pStandbyBuffer Standby buffer memory descriptor pointer + * + * @returns nothing + */ +void memdescSetStandbyBuffer +( + MEMORY_DESCRIPTOR *pMemDesc, + MEMORY_DESCRIPTOR *pStandbyBuffer +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pStandbyBuffer = pStandbyBuffer; +} + +/*! + * @brief Set mem destroy callback list pointer + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pMemDestroyCallbackList Memory destroy callback list pointer + * + * @returns nothing + */ +void memdescSetDestroyCallbackList +( + MEMORY_DESCRIPTOR *pMemDesc, + MEM_DESC_DESTROY_CALLBACK *pCb +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pMemDestroyCallbackList = pCb; +} + +/*! + * @brief Get guest ID for specified memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Guest ID value + */ +NvU64 memdescGetGuestId +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_guestId; +} + +/*! + * @brief Set guest ID for memory descriptor + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] guestId New guest ID + * + * @returns nothing + */ +void memdescSetGuestId +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 guestId +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_guestId = guestId; +} + +/*! + * @brief Get value of specified flag + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] flag MEMDESC_FLAGS_* value + * + * @returns Boolean value of specified flag + */ +NvBool memdescGetFlag +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 flag +) +{ + // For checking contiguity, use the memdescGetContiguity() api + NV_ASSERT(flag != MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + // GPU_IN_RESET is set/got from top level memdesc always. + if (flag != MEMDESC_FLAGS_GPU_IN_RESET) + { + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + } + return !!(pMemDesc->_flags & flag); +} + +/*! + * @brief Set value of specified flag + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] flag MEMDESC_FLAGS_* value + * @param[in] bValue Boolean value of flag + * + * @returns nothing + */ +void memdescSetFlag +( + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 flag, + NvBool bValue +) +{ + // For setting contiguity, use the memdescSetContiguity() api + NV_ASSERT(flag != MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + + // GPU_IN_RESET is set/got from top level memdesc always. + if (flag != MEMDESC_FLAGS_GPU_IN_RESET) + { + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + } + + if (flag == MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE) + { + NV_ASSERT_OK(_memdescSetSubAllocatorFlag(pMemDesc->pGpu, pMemDesc, bValue)); + return; + } + else if (flag == MEMDESC_FLAGS_GUEST_ALLOCATED) + { + NV_ASSERT_OK(_memdescSetGuestAllocatedFlag(pMemDesc->pGpu, pMemDesc, bValue)); + return; + } + + if (bValue) + pMemDesc->_flags |= flag; + else + pMemDesc->_flags &= ~flag; +} + +/*! + * @brief Return memory descriptor address pointer + * + * The address value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Memory descriptor address pointer + */ +NvP64 memdescGetAddress +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_address; +} + +/*! + * @brief Set memory descriptor address pointer + * + * The address value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pAddress Pointer to address information + * + * @returns nothing + */ +void memdescSetAddress +( + MEMORY_DESCRIPTOR *pMemDesc, + NvP64 pAddress +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_address = pAddress; +} + +/*! + * @brief Get memory descriptor os-specific memory data pointer + * + * The pMemData value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Memory data pointer + */ +void *memdescGetMemData +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pMemData; +} + +/*! + * @brief Set memory descriptor os-specific memory data pointer + * + * The pMemData value is returned by osAllocPages + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] pMemData Pointer to new os-specific memory data + * @param[in] pMemDataReleaseCallback Pointer to CB to be called when memdesc + * is freed. + * + * @returns nothing + */ +void memdescSetMemData +( + MEMORY_DESCRIPTOR *pMemDesc, + void *pMemData, + MEM_DATA_RELEASE_CALL_BACK *pMemDataReleaseCallback +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pMemData = pMemData; + pMemDesc->_pMemDataReleaseCallback = pMemDataReleaseCallback; +} + +/*! + * @brief Return memory descriptor volatile attribute + * + * @param[in] pMemDesc Memory descriptor pointer + * + * @returns Volatile or not + */ +NvBool memdescGetVolatility +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + NvBool bVolatile = NV_FALSE; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + if (pMemDesc->_addressSpace == ADDR_SYSMEM) + { + bVolatile = (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED); + } + else + { + NV_ASSERT(pMemDesc->_addressSpace == ADDR_FBMEM); + } + + return bVolatile; +} + +/*! + * @brief Quick check whether the memory is contiguous or not + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * + * @returns NV_TRUE if contiguous + */ +NvBool memdescGetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + return !!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); +} + +/*! + * @brief Detailed Check whether the memory is contiguous or not + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * + * @returns NV_TRUE if contiguous + */ +NvBool memdescCheckContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation) +{ + NvU32 i; + + if (!(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)) + { + for (i = 0; i < (pMemDesc->PageCount - 1); i++) + { + if ((memdescGetPte(pMemDesc, addressTranslation, i) + RM_PAGE_SIZE) != + memdescGetPte(pMemDesc, addressTranslation, i + 1)) + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/*! + * @brief Set the contiguity of the memory descriptor + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * @param[in] isContiguous Contiguity value + * + * @returns nothing + */ +void memdescSetContiguity(PMEMORY_DESCRIPTOR pMemDesc, ADDRESS_TRANSLATION addressTranslation, NvBool isContiguous) +{ + NV_ASSERT_OR_RETURN_VOID(pMemDesc); + + if (isContiguous) + pMemDesc->_flags |= MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; + else + pMemDesc->_flags &= ~MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS; +} + +/*! + * @brief Get the address space of the memory descriptor + * + * @param[in] pMemDesc Memory descriptor used + * @param[in] addressTranslation Address translation identifier + * + * @returns addresspace + */ +NV_ADDRESS_SPACE memdescGetAddressSpace(PMEMORY_DESCRIPTOR pMemDesc) +{ + NV_ASSERT_OR_RETURN(pMemDesc != NULL, 0); + return pMemDesc->_addressSpace; +} + +/*! + * @brief Get page size + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * + * @returns Current page size. + */ +NvU32 memdescGetPageSize +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + return pMemDesc->_pageSize; +} + +/*! + * @brief Set page size + * + * @param[in] pMemDesc Memory descriptor pointer + * @param[in] addressTranslation Address translation identifier + * @param[in] pteKind New PTE kind + * + * @returns nothing + */ +void memdescSetPageSize +( + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU32 pageSize +) +{ + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + pMemDesc->_pageSize = pageSize; +} + +/*! + * @brief Get the Root memory descriptor. + * + * This can also be used to get the root offset as well. + * + * Root memory descriptor is the top level memory descriptor with no parent, + * from which this memory descriptor was derived + * + * @param[in] pMemDesc Pointer to memory descriptor. + * @param[out] pRootOffset Pointer to the root offset parameter. + * + * @returns the Root memory descriptor. + */ +PMEMORY_DESCRIPTOR memdescGetRootMemDesc +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU64 *pRootOffset +) +{ + NvU64 offset = 0; + + // Find the top-level parent descriptor + while (pMemDesc->_pParentDescriptor) + { + // Sanity check, None of the child descriptors should be allocated + NV_ASSERT(!pMemDesc->Allocated); + offset += pMemDesc->subMemOffset; + pMemDesc = pMemDesc->_pParentDescriptor; + } + + if (pRootOffset) + { + *pRootOffset = offset; + } + + return pMemDesc; +} +/*! + * @brief Sets the CUSTOM_HEAP flag of MEMDESC. + * + * Since we have ACR region, Memory descriptor can be allocated in ACR region + * in that case, we need to set this flag since we are using the custom ACR HEAP + * + * @param[in] pMemDesc Pointer to memory descriptor. + * + * @returns void. + */ +void +memdescSetCustomHeap +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + NV_ASSERT(0); +} + +/*! + * @brief Returns the ACR CUSTOM_HEAP flag. + * + * + * @param[in] pMemDesc Pointer to memory descriptor. + * + * @returns NV_TRUE if flag MEMDESC_FLAGS_CUSTOM_HEAP_ACR is SET. + */ +NvBool +memdescGetCustomHeap +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + return NV_FALSE; +} + +PIOVAMAPPING memdescGetIommuMap +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 iovaspaceId +) +{ + PIOVAMAPPING pIommuMap = pMemDesc->_pIommuMappings; + while (pIommuMap != NULL) + { + if (pIommuMap->iovaspaceId == iovaspaceId) + { + break; + } + + pIommuMap = pIommuMap->pNext; + } + + return pIommuMap; +} + +NV_STATUS memdescAddIommuMap +( + PMEMORY_DESCRIPTOR pMemDesc, + PIOVAMAPPING pIommuMap +) +{ + NV_ASSERT_OR_RETURN((pMemDesc->_pIommuMappings == NULL) || + (!memdescIsSubMemoryMemDesc(pMemDesc)), NV_ERR_INVALID_ARGUMENT); + + // + // Only root physical memdescs can have multiple IOMMU mappings. + // Submemdescs can only have one, and the list linkage is used + // instead to link it as a child of the root IOMMU mapping, so we + // don't want to overwrite that here. + // + if (!memdescIsSubMemoryMemDesc(pMemDesc)) + { + pIommuMap->pNext = pMemDesc->_pIommuMappings; + } + + pMemDesc->_pIommuMappings = pIommuMap; + + return NV_OK; +} + +void memdescRemoveIommuMap +( + PMEMORY_DESCRIPTOR pMemDesc, + PIOVAMAPPING pIommuMap +) +{ + // + // Only root physical memdescs can have multiple IOMMU mappings. + // Submemdescs can only have one, and the list linkage is used + // instead to link it as a child of the root IOMMU mapping, so we + // don't want to overwrite that here. + // + if (!memdescIsSubMemoryMemDesc(pMemDesc)) + { + PIOVAMAPPING *ppTmpIommuMap = &pMemDesc->_pIommuMappings; + while ((*ppTmpIommuMap != NULL) && (*ppTmpIommuMap != pIommuMap)) + { + ppTmpIommuMap = &(*ppTmpIommuMap)->pNext; + } + + if (*ppTmpIommuMap != NULL) + { + *ppTmpIommuMap = pIommuMap->pNext; + + } + else + { + NV_ASSERT(*ppTmpIommuMap != NULL); + } + } + else if (pMemDesc->_pIommuMappings == pIommuMap) + { + pMemDesc->_pIommuMappings = NULL; + } + else + { + // + // Trying to remove a submemory mapping that doesn't belong to this + // descriptor? + // + NV_ASSERT(pMemDesc->_pIommuMappings == pIommuMap); + } +} + +NV_STATUS memdescMapIommu +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 iovaspaceId +) +{ +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM + if (iovaspaceId != NV_IOVA_DOMAIN_NONE) + { + NV_ADDRESS_SPACE addrSpace = memdescGetAddressSpace(pMemDesc); + OBJGPU *pMappingGpu = gpumgrGetGpuFromId(iovaspaceId); + PMEMORY_DESCRIPTOR pRootMemDesc = memdescGetRootMemDesc(pMemDesc, NULL); + if ((addrSpace == ADDR_SYSMEM) || gpumgrCheckIndirectPeer(pMappingGpu, pRootMemDesc->pGpu)) + { + NV_STATUS status; + OBJIOVASPACE *pIOVAS = iovaspaceFromId(iovaspaceId); + NV_ASSERT_OR_RETURN(pIOVAS, NV_ERR_OBJECT_NOT_FOUND); + + status = iovaspaceAcquireMapping(pIOVAS, pMemDesc); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + } +#endif + + // + // Verify that the final physical addresses are indeed addressable by the + // GPU. We only need to do this for internally allocated sysmem (RM owned) + // as well as externally allocated/mapped sysmem. Note, addresses for peer + // (P2P mailbox registers) BARs are actually not handled by the GMMU and + // support a full 64-bit address width, hence validation is not needed. + // + if ((pMemDesc->Allocated || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM) || + memdescGetFlag(pMemDesc, MEMDESC_FLAGS_PEER_IO_MEM)) && + memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) + { + // TODO This should look up the GPU corresponding to the IOVAS instead. + OBJGPU *pGpu = pMemDesc->pGpu; + RmPhysAddr dmaWindowStartAddr = gpuGetDmaStartAddress(pGpu); + RmPhysAddr dmaWindowEndAddr = gpuGetDmaEndAddress_HAL(pGpu); + RmPhysAddr physAddr; + + if (memdescGetContiguity(pMemDesc, AT_GPU)) + { + physAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + if ((physAddr < dmaWindowStartAddr) || + (physAddr + pMemDesc->Size - 1 > dmaWindowEndAddr)) + { + NV_PRINTF(LEVEL_ERROR, + "0x%llx-0x%llx is not addressable by GPU 0x%x [0x%llx-0x%llx]\n", + physAddr, physAddr + pMemDesc->Size - 1, + pGpu->gpuId, dmaWindowStartAddr, dmaWindowEndAddr); + memdescUnmapIommu(pMemDesc, iovaspaceId); + return NV_ERR_INVALID_ADDRESS; + } + } + else + { + NvU32 i; + for (i = 0; i < pMemDesc->PageCount; i++) + { + physAddr = memdescGetPte(pMemDesc, AT_GPU, i); + if ((physAddr < dmaWindowStartAddr) || + (physAddr + (RM_PAGE_SIZE - 1) > dmaWindowEndAddr)) + { + NV_PRINTF(LEVEL_ERROR, + "0x%llx is not addressable by GPU 0x%x [0x%llx-0x%llx]\n", + physAddr, pGpu->gpuId, dmaWindowStartAddr, + dmaWindowEndAddr); + memdescUnmapIommu(pMemDesc, iovaspaceId); + return NV_ERR_INVALID_ADDRESS; + } + } + } + } + + return NV_OK; +} + +void memdescUnmapIommu +( + PMEMORY_DESCRIPTOR pMemDesc, + NvU32 iovaspaceId +) +{ +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM + PIOVAMAPPING pIovaMapping; + OBJIOVASPACE *pIOVAS; + + if (iovaspaceId == NV_IOVA_DOMAIN_NONE) + return; + + pIovaMapping = memdescGetIommuMap(pMemDesc, iovaspaceId); + NV_ASSERT(pIovaMapping); + + pIOVAS = iovaspaceFromMapping(pIovaMapping); + iovaspaceReleaseMapping(pIOVAS, pIovaMapping); +#endif +} + +void memdescCheckSubDevicePageSizeConsistency +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + OBJVASPACE *pVAS, + NvU64 pageSize, + NvU64 pageOffset +) +{ + NvU64 tempPageSize, tempPageOffset; + PMEMORY_DESCRIPTOR pTempMemDesc = NULL; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pTempMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + tempPageSize = memdescGetPageSize(pTempMemDesc, VAS_ADDRESS_TRANSLATION(pVAS)); + tempPageOffset = memdescGetPhysAddr(pTempMemDesc, VAS_ADDRESS_TRANSLATION(pVAS), 0) & (tempPageSize - 1); + + // Assert if inconsistent + NV_ASSERT(pageSize == tempPageSize); + NV_ASSERT(pageOffset == tempPageOffset); + SLI_LOOP_END +} + +void memdescCheckSubDeviceMemContiguityConsistency +( + OBJGPU *pGpu, + PMEMORY_DESCRIPTOR pMemDesc, + OBJVASPACE *pVAS, + NvBool bIsMemContiguous +) +{ + NvBool bTempIsMemContiguous = NV_FALSE; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + bTempIsMemContiguous = memdescGetContiguity(memdescGetMemDescFromGpu(pMemDesc, pGpu), VAS_ADDRESS_TRANSLATION(pVAS)); + // Assert if inconsistent + NV_ASSERT(bIsMemContiguous == bTempIsMemContiguous); + SLI_LOOP_END +} + +NV_STATUS memdescCheckSubDeviceKindComprConsistency +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + OBJVASPACE *pVAS, + NvU32 kind, + COMPR_INFO *pComprInfo +) +{ + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + MemoryManager *MemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU32 tempKind; + COMPR_INFO tempComprInfo = {0}; + NV_STATUS status; + + status = memmgrGetKindComprFromMemDesc(MemoryManager, + memdescGetMemDescFromGpu(pMemDesc, pGpu), + 0, + &tempKind, &tempComprInfo); + + if (NV_OK != status) + SLI_LOOP_RETURN(status); + + // Assert if inconsistent + NV_ASSERT(kind == tempKind); + NV_ASSERT(tempComprInfo.compPageShift == pComprInfo->compPageShift && + tempComprInfo.kind == pComprInfo->kind && + tempComprInfo.compPageIndexLo == pComprInfo->compPageIndexLo && + tempComprInfo.compPageIndexHi == pComprInfo->compPageIndexHi && + tempComprInfo.compTagLineMin == pComprInfo->compTagLineMin && + tempComprInfo.compTagLineMultiplier == pComprInfo->compTagLineMultiplier); + } + SLI_LOOP_END + + return NV_OK; +} + +/* @brief Get GPA(guest physical addresses) for given GPU physical addresses. + * + * @param[in] pGpu GPU for which GPAs are needed + * @param[in] pageCount Size of array. Should be 1 for contiguous mappings + * @param[in/out] pGpa Array of GPU PAs to be converted to guest PAs + * + * @returns NV_STATUS + */ +NV_STATUS memdescGetNvLinkGpa +( + OBJGPU *pGpu, + NvU64 pageCount, + RmPhysAddr *pGpa +) +{ + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + + NV_ASSERT_OR_RETURN(pGpa, NV_ERR_INVALID_ARGUMENT); + + NvU32 pageIndex; + // For each page, do the GPU PA to GPA conversion + for (pageIndex = 0; pageIndex < pageCount; pageIndex++) + { + pGpa[pageIndex] += pKernelMemorySystem->coherentCpuFbBase; + } + + return NV_OK; +} + +NV_STATUS +memdescSetCtxBufPool +( + PMEMORY_DESCRIPTOR pMemDesc, + CTX_BUF_POOL_INFO *pCtxBufPool +) +{ + + NV_ASSERT_OR_RETURN(!pMemDesc->Allocated, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(!memdescHasSubDeviceMemDescs(pMemDesc), NV_ERR_INVALID_ARGUMENT); + + pMemDesc->pCtxBufPool = pCtxBufPool; + return NV_OK; +} + +CTX_BUF_POOL_INFO* +memdescGetCtxBufPool +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + NV_ASSERT_OR_RETURN(!memdescHasSubDeviceMemDescs(pMemDesc), NULL); + return pMemDesc->pCtxBufPool; +} + +/*! + * @brief Override the registry INST_LOC two-bit enum to an aperture (list) + cpu attr. + * + * Caller must set initial default values. + */ +void +memdescOverrideInstLocList +( + NvU32 instLoc, // NV_REG_STR_RM_INST_LOC + const char *name, + const NV_ADDRESS_SPACE **ppAllocList, + NvU32 *pCpuMappingAttr +) +{ + switch (instLoc) + { + case NV_REG_STR_RM_INST_LOC_COH: + NV_PRINTF(LEVEL_INFO, "using coh system memory for %s\n", name); + *ppAllocList = ADDRLIST_SYSMEM_ONLY; + *pCpuMappingAttr = NV_MEMORY_CACHED; + break; + case NV_REG_STR_RM_INST_LOC_NCOH: + NV_PRINTF(LEVEL_INFO, "using ncoh system memory for %s\n", name); + *ppAllocList = ADDRLIST_SYSMEM_ONLY; + *pCpuMappingAttr = NV_MEMORY_UNCACHED; + break; + case NV_REG_STR_RM_INST_LOC_VID: + NV_PRINTF(LEVEL_INFO, "using video memory for %s\n", name); + *ppAllocList = ADDRLIST_FBMEM_ONLY; + *pCpuMappingAttr = NV_MEMORY_WRITECOMBINED; + break; + case NV_REG_STR_RM_INST_LOC_DEFAULT: + default: + // Do not update parameters + break; + } +} + +/*! + * @brief Override wrapper for callers needed an aperture + */ +void +memdescOverrideInstLoc +( + NvU32 instLoc, + const char *name, + NV_ADDRESS_SPACE *pAddrSpace, + NvU32 *pCpuMappingAttr +) +{ + const NV_ADDRESS_SPACE *pAllocList = NULL; + + memdescOverrideInstLocList(instLoc, name, &pAllocList, pCpuMappingAttr); + if (pAllocList != NULL) + *pAddrSpace = pAllocList[0]; +} +/*! +* @brief override physical address width +* +* address width to be override in bits. +* @param[in] pGpu +* @param[in] pMemDesc Memory descriptor to update +* @param[in] addresswidth Offset to refer to +* +* @returns nothing +*/ +void +memdescOverridePhysicalAddressWidthWindowsWAR +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 addressWidth +) +{ + return; +} + +/*! +* @brief Register MEMDESC to GSP +* Life of the registration: until memdescDeregisterFromGSP is called, +* always occurs when the memory is freed. +* Have argument as pMemory*; Move to NVOC +* +* @param[in] pGpu +* @param[in] hClient NvHandle +* @param[in] hDevice NvHandle +* @param[in] hMemory NvHandle +* +* @returns NV_STATUS +*/ +NV_STATUS +memdescRegisterToGSP +( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory +) +{ + NV_STATUS status = NV_OK; + Memory *pMemory = NULL; + RsResourceRef *pMemoryRef = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + // Nothing to do without GSP + if (!IS_GSP_CLIENT(pGpu)) + { + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, serverutilGetResourceRef(hClient, hMemory, &pMemoryRef)); + + pMemory = dynamicCast(pMemoryRef->pResource, Memory); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemory != NULL, NV_ERR_INVALID_OBJECT); + + pMemDesc = pMemory->pMemDesc; + + // Check: memory already registered + if ((pMemDesc->_flags & MEMDESC_FLAGS_REGISTERED_TO_GSP) != 0) + { + return NV_OK; + } + + // Check: no subdevice memDescs + NV_CHECK_OR_RETURN(LEVEL_ERROR, + !memdescHasSubDeviceMemDescs(pMemDesc), + NV_ERR_INVALID_STATE); + + // Check: SYSMEM only + NV_CHECK_OR_RETURN(LEVEL_ERROR, + memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM, + NV_ERR_INVALID_STATE); + + NvU32 os02Flags = 0; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + RmDeprecatedConvertOs32ToOs02Flags(pMemory->Attr, + pMemory->Attr2, + pMemory->Flags, + &os02Flags)); + NV_RM_RPC_ALLOC_MEMORY(pGpu, + hClient, + hParent, + hMemory, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + os02Flags, + pMemDesc, + status); + + if (status == NV_OK) + { + // Mark memory as registered in GSP + pMemDesc->_flags |= MEMDESC_FLAGS_REGISTERED_TO_GSP; + } + + return status; +} + + +/*! +* @brief Deregister MEMDESC from GSP +* Is always called when the memory is freed. +* Have argument as pMemory*; Move to NVOC +* +* @param[in] pGpu +* @param[in] hClient NvHandle +* @param[in] hParent NvHandle +* @param[in] hMemory NvHandle +* +* @returns NV_STATUS +*/ +NV_STATUS +memdescDeregisterFromGSP +( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hParent, + NvHandle hMemory +) +{ + NV_STATUS status = NV_OK; + Memory *pMemory = NULL; + RsResourceRef *pMemoryRef = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + // Nothing to do without GSP + if ((pGpu == NULL) || + !IS_GSP_CLIENT(pGpu)) + { + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, serverutilGetResourceRef(hClient, hMemory, &pMemoryRef)); + + pMemory = dynamicCast(pMemoryRef->pResource, Memory); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pMemory != NULL, NV_ERR_INVALID_OBJECT); + + pMemDesc = pMemory->pMemDesc; + + // Nothing to do if memory is not registered to GSP + if ((pMemDesc == NULL) || + (pMemDesc->_flags & MEMDESC_FLAGS_REGISTERED_TO_GSP) == 0) + { + return NV_OK; + } + + NV_RM_RPC_FREE(pGpu, + hClient, + hParent, + hMemory, + status); + + if (status == NV_OK) + { + // Mark memory as not registered in GSP + pMemDesc->_flags &= ~MEMDESC_FLAGS_REGISTERED_TO_GSP; + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr.c new file mode 100644 index 000000000..005888560 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr.c @@ -0,0 +1,3042 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "mem_mgr/video_mem.h" +#include "gpu/mem_mgr/fbsr.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/bus/kern_bus.h" +#include "core/locks.h" +#include "vgpu/rpc.h" +#include "core/thread_state.h" +#include "nvRmReg.h" +#include "gpu/mem_mgr/phys_mem_allocator/numa.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "mmu/gmmu_fmt.h" +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER +#include "class/cl503c.h" +#include "class/cl906f.h" // GF100_CHANNEL_GPFIFO + +static NV_STATUS _memmgrCreateFBSR(MemoryManager *pMemoryManager, NvU32); +static NV_STATUS _memmgrCreateChildObjects(MemoryManager *pMemoryManager); +static void _memmgrInitRegistryOverrides(OBJGPU *pGpu, MemoryManager *pMemoryManager); +static NV_STATUS _memmgrInitMIGMemoryPartitionHeap(OBJGPU *pGpu, MemoryManager *pMemoryManager, + NvU32 swizzId, NV_RANGE *pAddrRange, + Heap **ppMemoryPartitionHeap); +static NV_STATUS _memmgrAllocInternalClientObjects(OBJGPU *pGpu, + MemoryManager *pMemoryManager); +static void _memmgrFreeInternalClientObjects(MemoryManager *pMemoryManager); + +#define MEMUTILS_CHANNEL_GPFIFO_SIZE (NV906F_GP_ENTRY__SIZE * MEMUTILS_NUM_GPFIFIO_ENTRIES) + +NV_STATUS +memmgrConstructEngine_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + ENGDESCRIPTOR engDesc +) +{ + NV_STATUS rmStatus; + + pMemoryManager->overrideInitHeapMin = 0; + pMemoryManager->overrideHeapMax = ~0ULL; + + // Create the children + rmStatus = _memmgrCreateChildObjects(pMemoryManager); + if (rmStatus != NV_OK) + return rmStatus; + + pMemoryManager->MIGMemoryPartitioningInfo.hClient = NV01_NULL_OBJECT; + pMemoryManager->MIGMemoryPartitioningInfo.hDevice = NV01_NULL_OBJECT; + pMemoryManager->MIGMemoryPartitioningInfo.hSubdevice = NV01_NULL_OBJECT; + pMemoryManager->MIGMemoryPartitioningInfo.partitionableMemoryRange = NV_RANGE_EMPTY; + + return NV_OK; +} + +void +memmgrDestruct_IMPL +( + MemoryManager *pMemoryManager +) +{ + NvU32 i; + + for (i = 0; i < NUM_FBSR_TYPES; i++) + { + if (pMemoryManager->pFbsr[i]) + { + objDelete(pMemoryManager->pFbsr[i]); + pMemoryManager->pFbsr[i] = NULL; + } + } + + if (pMemoryManager->pHeap != NULL) + { + objDelete(pMemoryManager->pHeap); + pMemoryManager->pHeap = NULL; + } + + pMemoryManager->MIGMemoryPartitioningInfo.partitionableMemoryRange = NV_RANGE_EMPTY; +} + +static void +_memmgrInitRegistryOverrides(OBJGPU *pGpu, MemoryManager *pMemoryManager) +{ + NvU32 data32; + + // Check for ram size override. + pMemoryManager->Ram.fbOverrideSizeMb = ~0; + if ((osReadRegistryDword(pGpu, NV_REG_STR_OVERRIDE_FB_SIZE, &data32) == NV_OK) && + (data32 != 0)) + { + NV_PRINTF(LEVEL_WARNING, "Regkey %s = %dM\n", + NV_REG_STR_OVERRIDE_FB_SIZE, data32); + // Used to override heap sizing at create + pMemoryManager->Ram.fbOverrideSizeMb = data32; + } + + // + // Scrub on Free is enabled by default for GK110+ + // The reg key will be used to disable the scrub on free + // + if ((osReadRegistryDword(pGpu, NV_REG_STR_RM_DISABLE_SCRUB_ON_FREE, + &data32) == NV_OK) && data32) + { + pMemoryManager->bScrubOnFreeEnabled = NV_FALSE; + } + + if (NV_OK == osReadRegistryDword(pGpu, NV_REG_STR_RM_SYSMEM_PAGE_SIZE, &data32)) + { + switch (data32) + { + case RM_PAGE_SIZE: + case RM_PAGE_SIZE_64K: + case RM_PAGE_SIZE_HUGE: + case RM_PAGE_SIZE_512M: + break; + default: + NV_ASSERT(0); + NV_PRINTF(LEVEL_ERROR, + "Sysmem page size 0x%x not supported! Defaulting to 4KB\n", + data32); + data32 = RM_PAGE_SIZE; + } + pMemoryManager->sysmemPageSize = data32; + } + else + { + pMemoryManager->sysmemPageSize = RM_PAGE_SIZE; + + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_ALLOW_SYSMEM_LARGE_PAGES, &data32) == NV_OK) + { + pMemoryManager->bAllowSysmemHugePages = data32 ? NV_TRUE : NV_FALSE; + } + else + { + pMemoryManager->bAllowSysmemHugePages = NV_FALSE; + } + + // This key should not be used on physical (GSP) RM. + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + // Allow user to increase size of RM reserved heap via a regkey + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_INCREASE_RSVD_MEMORY_SIZE_MB, + &data32) == NV_OK) + { + pMemoryManager->rsvdMemorySizeIncrement = (NvU64)data32 << 20; + NV_PRINTF(LEVEL_ERROR, + "User specified increase in reserved size = %d MBs\n", + data32); + } + } + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION, + &data32) == NV_OK) + { + if (data32 == NV_REG_STR_RM_DISABLE_NONCONTIGUOUS_ALLOCATION_TRUE) + { + pMemoryManager->bAllowNoncontiguousAllocation = NV_FALSE; + } + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_FBSR_PAGED_DMA, &data32) == NV_OK) + { + pMemoryManager->bEnableFbsrPagedDma = !!data32; + } + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_IGNORE_UPPER_MEMORY, + &data32) == NV_OK) + { + // Ignore upper memory. + pMemoryManager->bIgnoreUpperMemory = !!data32; + } + + // Allow increasing the RM reserved space. + if (osReadRegistryDword(pGpu, NV_REG_STR_BUG_1698088_WAR, &data32) == NV_OK) + { + if (data32 == NV_REG_STR_BUG_1698088_WAR_ENABLE) + { + pMemoryManager->bBug1698088IncreaseRmReserveMemoryWar = NV_TRUE; + } + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_FBSR_FILE_MODE, &data32) == NV_OK) + { + if (data32 && RMCFG_FEATURE_PLATFORM_UNIX) + { + pMemoryManager->bEnableFbsrFileMode = NV_TRUE; + } + } + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_NO_ECC_FB_SCRUB, + &data32) == NV_OK) + { + pMemoryManager->bEccScrubOverride = NV_TRUE; + } + + if ((osReadRegistryDword(pGpu, NV_REG_STR_RM_INIT_SCRUB, + &data32) == NV_OK) && (data32)) + { + pMemoryManager->bScrubberInitialized = NV_TRUE; + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_DISABLE_ASYNC_MEM_SCRUB, + &data32) == NV_OK) + { + // disable async scrub + pMemoryManager->bDisableAsyncScrubforMods = !!data32; + } + + if ((osReadRegistryDword(pGpu, NV_REG_STR_RM_INCREASE_ECC_SCRUB_TIMEOUT, + &data32) == NV_OK) && (data32)) + { + // increase ECC scrub timeout + pMemoryManager->bBug1441072EccScrubWar = NV_TRUE; + } + + // + // Override PMA enable. PDB_PROP_FB_PMA_ENABLED is reconciled with + // PDB_PROP_FB_PLATFORM_PMA_SUPPORT to decide whether to enable PMA. + // + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_ENABLE_PMA, &data32) == NV_OK) + { + if (data32 == NV_REG_STR_RM_ENABLE_PMA_YES) + { + pMemoryManager->bPmaEnabled = NV_TRUE; + } + else + { + pMemoryManager->bPmaEnabled = NV_FALSE; + } + } + + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM && !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TCC_MODE)) + { + pMemoryManager->bFbsrWddmModeEnabled = NV_TRUE; + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_FBSR_WDDM_MODE, &data32) == NV_OK) + { + pMemoryManager->bFbsrWddmModeEnabled = !!data32; + } + + // + // Override PMA managed client page tables. + // NOTE: This is WAR for bug #s 1946145 and 1971628. + // This should be removed as part of heap removal and PMA refactor. + // + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES, + &data32) == NV_OK) + { + if (data32 == NV_REG_STR_RM_ENABLE_PMA_MANAGED_PTABLES_NO) + { + memmgrSetClientPageTablesPmaManaged(pMemoryManager, NV_FALSE); + } + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_ENABLE_ADDRTREE, &data32) == NV_OK) + { + if (data32 == NV_REG_STR_RM_ENABLE_ADDRTREE_YES) + { + pMemoryManager->bPmaAddrTree = NV_TRUE; + NV_PRINTF(LEVEL_ERROR, "Enabled address tree for PMA via regkey.\n"); + } + } + else if (RMCFG_FEATURE_PLATFORM_MODS) + { + pMemoryManager->bPmaAddrTree = NV_TRUE; + NV_PRINTF(LEVEL_ERROR, "Enabled address tree for PMA for MODS.\n"); + } +} + +NV_STATUS +memmgrStatePreInitLocked_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + // Determine the size of reserved memory + NV_ASSERT_OK_OR_RETURN(memmgrPreInitReservedMemory_HAL(pGpu, pMemoryManager)); + + return NV_OK; +} + +NV_STATUS +memmgrStateInitLocked_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + NvBool bDynamicPageOffliningDisable = NV_FALSE; + + if (IS_GSP_CLIENT(pGpu)) + { + // + // Temporary hack to get OpenRM working without breaking SLI + // After fixing CORERM-4078, memmgrInitFbRegions() call should be removed from memsysStateInitLocked() + // and only left here + // + NV_ASSERT_OK_OR_RETURN(memmgrInitFbRegions(pGpu, pMemoryManager)); + } + + NV_ASSERT_OK_OR_RETURN(memmgrInitReservedMemory_HAL(pGpu, pMemoryManager, pMemoryManager->Ram.fbAddrSpaceSizeMb << 20)); + + _memmgrInitRegistryOverrides(pGpu, pMemoryManager); + // + // Enable dynamic page blacklisting at this point before we call CreateHeap + // since it internally calls heapGetBlacklistPages which depends on this property + // + if (!bDynamicPageOffliningDisable) + memmgrEnableDynamicPageOfflining_HAL(pGpu, pMemoryManager); + + memmgrScrubRegistryOverrides_HAL(pGpu, pMemoryManager); + memmgrScrubInit_HAL(pGpu, pMemoryManager); + + // + // Allocate framebuffer heap. All memory must be allocated from here to keep the world + // consistent (N.B. the heap size has been reduced by the amount of instance memory). + // + status = memmgrCreateHeap(pMemoryManager); + if (status != NV_OK) + { + return status; + } + + // + // Just set up the memory pool now (basic init stuff). Actual physical + // frames are *NOT* added to the pool at this stage. + // + status = memmgrPageLevelPoolsCreate(pGpu, pMemoryManager); + if (status != NV_OK) + { + return status; + } + + // RMCONFIG: only if FBSR engine is enabled + if (RMCFG_MODULE_FBSR) + { + // + // If a configuration is not supported, do not initialize + // the corresponding fbsr engine. + // + if (pMemoryManager->bFbsrWddmModeEnabled) + { + pMemoryManager->fbsrStartMode = FBSR_TYPE_WDDM_FAST_DMA_DEFERRED_NONPAGED; + } + else if (pMemoryManager->bEnableFbsrPagedDma) + { + pMemoryManager->fbsrStartMode = FBSR_TYPE_PAGED_DMA; + } + else if (pMemoryManager->bEnableFbsrFileMode) + { + pMemoryManager->fbsrStartMode = FBSR_TYPE_FILE; + } + else + { + pMemoryManager->fbsrStartMode = FBSR_TYPE_PERSISTENT; + } + + for (i = pMemoryManager->fbsrStartMode; i < NUM_FBSR_TYPES; i++) + { + if (!pMemoryManager->bPersistentStandbyBuffer && + (i == FBSR_TYPE_PERSISTENT)) + { + continue; + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) && + (i == FBSR_TYPE_PAGED_DMA || i == FBSR_TYPE_DMA)) + { + continue; + } + + status = fbsrInit_HAL(pGpu, pMemoryManager->pFbsr[i]); + + // + // If one fbsr scheme failed, proceed to initializing the other + // fallback options. + // + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "fbsrInit failed for supported type %d suspend-resume scheme\n", + i); + continue; + } + } + } + + status = _memmgrAllocInternalClientObjects(pGpu, pMemoryManager); + if (status != NV_OK) + { + // + // TODO: Bug 3482892: Need a way to roll back StateInit + // steps in case of a failure + // WAR for now is to cleanup with memmgrStateDestroy(). + // + memmgrStateDestroy(pGpu, pMemoryManager); + return status; + } + + return NV_OK; +} + +NV_STATUS +memmgrStateLoad_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 flags +) +{ + // If fbOverrideSizeMb is set, finish setting up the FB parameters now that state init has finished + memmgrFinishHandleSizeOverrides_HAL(pGpu, pMemoryManager); + + if ((flags & GPU_STATE_FLAGS_PRESERVING) && + !(flags & GPU_STATE_FLAGS_GC6_TRANSITION)) + { + // + // Only do initialization scrubs (i.e. RM reserved region) on + // non-GC6 transitions since GC6 cycles leave FB powered. + // + memmgrScrubInit_HAL(pGpu, pMemoryManager); + } + + // Dump FB regions + memmgrDumpFbRegions(pGpu, pMemoryManager); + + return NV_OK; +} + +NV_STATUS +memmgrStatePreUnload_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 flags +) +{ + NV_ASSERT((flags & GPU_STATE_FLAGS_PRESERVING) || pMemoryManager->zbcSurfaces == 0); + + return NV_OK; +} + +NV_STATUS +memmgrStateUnload_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 flags +) +{ + if ((flags & GPU_STATE_FLAGS_PRESERVING) && + !(flags & GPU_STATE_FLAGS_GC6_TRANSITION)) + { + // + // Initialiation scrubs only happen during StateLoad on non-GC6 + // transitions. + // + memmgrScrubDestroy_HAL(pGpu, pMemoryManager); + } + + return NV_OK; +} + +void +memmgrStateDestroy_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + Heap *pHeap = MEMORY_MANAGER_GET_HEAP(pMemoryManager); + NvU32 i; + + _memmgrFreeInternalClientObjects(pMemoryManager); + + // Destroys the SW state of the page level pools + memmgrPageLevelPoolsDestroy(pGpu, pMemoryManager); + + // Destroy the heap entirely, and all associated structures + if (pHeap) + { + kmemsysPreHeapDestruct_HAL(pGpu, pKernelMemorySystem); + + objDelete(pHeap); + pMemoryManager->pHeap = NULL; + } + + // RMCONFIG: only if FBSR engine is enabled + if (RMCFG_MODULE_FBSR) + { + for (i = 0; i < NUM_FBSR_TYPES; i++) + { + fbsrDestroy_HAL(pGpu, pMemoryManager->pFbsr[i]); + } + } + + memmgrScrubDestroy_HAL(pGpu, pMemoryManager); +} + +static NV_STATUS +_memmgrCreateChildObjects +( + MemoryManager *pMemoryManager +) +{ + NV_STATUS status = NV_OK; + + // RMCONFIG: only if FBSR engine is enabled + if (RMCFG_MODULE_FBSR) + { + NvU32 i; + + // Create FBSR object for every type RM supports. + for (i = 0; i < NUM_FBSR_TYPES; i++) + { + status = _memmgrCreateFBSR(pMemoryManager, i); + if (status != NV_OK) + { + return status; + } + } + } + + return status; +} + +NV_STATUS +memmgrCreateHeap_IMPL +( + MemoryManager *pMemoryManager +) +{ + Heap *newHeap; + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + NvU64 rsvdSize; + NvU64 size; + NV_STATUS status = NV_OK; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + // If we're using FB regions then rsvd memory is already marked as a reserved region + if ((pMemoryManager->Ram.numFBRegions == 0) || (IS_VIRTUAL_WITH_SRIOV(pGpu))) + { + if (pMemorySystemConfig->bReservedMemAtBottom) + { + // rsvd memory is already accounted for in heapStart + rsvdSize = 0; + } + else + { + rsvdSize = pMemoryManager->rsvdMemorySize; + } + } + else + rsvdSize = 0; + + // for vGPU, add extra FB tax incurred by host RM to reserved size + rsvdSize += memmgrGetFbTaxSize_HAL(pGpu, pMemoryManager); + + // + // Fix up region descriptions to match with any FB override size + // + memmgrHandleSizeOverrides_HAL(pGpu, pMemoryManager); + + // + // Calculate the FB heap size as the address space size, then deduct any reserved memory + // + size = pMemoryManager->Ram.fbAddrSpaceSizeMb << 20; + size -= NV_MIN(size, rsvdSize); + + if((size != 0) || (pMemoryManager->bScanoutSysmem)) + { + status = objCreate(&newHeap, pMemoryManager, Heap); + if (status != NV_OK) + { + return status; + } + + pMemoryManager->pHeap = newHeap; + + if (memmgrIsPmaEnabled(pMemoryManager) && + memmgrIsPmaSupportedOnPlatform(pMemoryManager)) + { + portMemSet(&pMemoryManager->pHeap->pmaObject, 0, sizeof(pMemoryManager->pHeap->pmaObject)); + status = memmgrPmaInitialize(pGpu, pMemoryManager, &pMemoryManager->pHeap->pmaObject); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + + status = heapInit(pGpu, newHeap, + pMemoryManager->heapStartOffset, + size - pMemoryManager->heapStartOffset, HEAP_TYPE_RM_GLOBAL, GPU_GFID_PF, NULL); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + if ((memmgrIsPmaInitialized(pMemoryManager)) && (pMemoryManager->pHeap->bHasFbRegions)) + { + status = memmgrPmaRegisterRegions(pGpu, pMemoryManager, pMemoryManager->pHeap, + &pMemoryManager->pHeap->pmaObject); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + + // Reserve vidmem for FSP usage, including FRTS, WPR2 + status = memmgrReserveMemoryForFsp(pGpu, pMemoryManager); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to reserve vidmem for WPR and FRTS.\n"); + return status; + } + + if (!IsSLIEnabled(pGpu)) + { + // Do the actual blacklisting of pages from the heap + if (newHeap->blackListAddresses.count != 0) + { + status = heapBlackListPages(pGpu, newHeap); + + if (status != NV_OK) + { + // Warn and continue + NV_PRINTF(LEVEL_WARNING, "Error 0x%x creating blacklist\n", + status); + } + } + } + + kmemsysPostHeapCreate_HAL(pGpu, pKernelMemorySystem); + } + + return status; +} + +/* + * @brief Gets per-device suballocator. If it is not available, get shared heap. + * + * @param[in] pMemoryManager MemoryManager pointer + */ +Heap * +memmgrGetDeviceSuballocator_IMPL +( + MemoryManager *pMemoryManager, + NvBool bForceSubheap +) +{ + + if (!bForceSubheap) + { + // If no suballocator found, use heap + return MEMORY_MANAGER_GET_HEAP(pMemoryManager); + } + + return NULL; +} + +static NV_STATUS +_memmgrCreateFBSR +( + MemoryManager *pMemoryManager, + NvU32 type +) +{ + OBJFBSR *pFbsr; + NV_STATUS status; + + status = objCreate(&pFbsr, pMemoryManager, OBJFBSR); + if (status != NV_OK) + { + return status; + } + + NV_ASSERT(pFbsr); + pMemoryManager->pFbsr[type] = pFbsr; + + fbsrObjectInit(pFbsr, type); + + return NV_OK; +} + +static void +_memmgrFreeInternalClientObjects +( + MemoryManager *pMemoryManager +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (pMemoryManager->hThirdPartyP2P != 0) + { + pRmApi->Free(pRmApi, pMemoryManager->hClient, + pMemoryManager->hThirdPartyP2P); + pMemoryManager->hThirdPartyP2P = 0; + } + + if (pMemoryManager->hClient != 0) + { + rmapiutilFreeClientAndDeviceHandles(pRmApi, + &pMemoryManager->hClient, + &pMemoryManager->hDevice, + &pMemoryManager->hSubdevice); + } +} + +static NV_STATUS +_memmgrAllocInternalClientObjects +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + status = rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, + &pMemoryManager->hClient, + &pMemoryManager->hDevice, + &pMemoryManager->hSubdevice); + if (status != NV_OK) + { + goto failed; + } + + { + NV503C_ALLOC_PARAMETERS params; + NvHandle hThirdPartyP2P = 0; + + NV_ASSERT_OK_OR_GOTO(status, serverutilGenResourceHandle(pMemoryManager->hClient, + &hThirdPartyP2P), + failed); + + portMemSet(¶ms, 0, sizeof(params)); + params.flags = NV503C_ALLOC_PARAMETERS_FLAGS_TYPE_BAR1; + status = pRmApi->AllocWithHandle(pRmApi, + pMemoryManager->hClient, + pMemoryManager->hSubdevice, + hThirdPartyP2P, + NV50_THIRD_PARTY_P2P, ¶ms); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "Error creating internal ThirdPartyP2P object: %x\n", + status); + pMemoryManager->hThirdPartyP2P = 0; + } + else + { + pMemoryManager->hThirdPartyP2P = hThirdPartyP2P; + } + + } + + return NV_OK; + +failed: + _memmgrFreeInternalClientObjects(pMemoryManager); + + return status; +} + +/*! + * @brief Determine size of FB RAM which is used for RM internal allocations + * and PMA. + * + * @param[out] pFbUsedSize FB used memory size + * + * @returns NV_OK + */ +NV_STATUS +memmgrGetUsedRamSize_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU64 *pFbUsedSize +) +{ + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvU64 heapFreeSpace, heapTotalSpace, pmaFreeSpace; + + // + // Determine free memory in FB and substract with total FB memory. + // If PMA is initialized, then use the free memory size in PMA and + // heap otherwise only use heap free memory for calculation. + // + heapGetFree(pHeap, &heapFreeSpace); + heapGetSize(pHeap, &heapTotalSpace); + if (memmgrIsPmaInitialized(pMemoryManager)) + { + pmaGetFreeMemory(&pHeap->pmaObject, &pmaFreeSpace); + *pFbUsedSize = heapTotalSpace - heapFreeSpace - pmaFreeSpace; + } + else + { + *pFbUsedSize = heapTotalSpace - heapFreeSpace; + } + + return NV_OK; +} + +NV_STATUS +memmgrAllocHwResources_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_INFO *pFbAllocInfo +) +{ + MemoryManager *pMemoryManagerLoop; + FB_ALLOC_INFO *pTempInfo = NULL; + NvU32 skipFlag = (pFbAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC); + NV_STATUS rmStatus = NV_OK; + + pTempInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + if (pTempInfo == NULL) + { + NV_ASSERT(0); + return NV_ERR_NO_MEMORY; + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + NV_STATUS tempStatus; + *pTempInfo = *pFbAllocInfo; // struct copy + + pMemoryManagerLoop = GPU_GET_MEMORY_MANAGER(pGpu); + + tempStatus = memmgrAllocHal_HAL(pGpu, pMemoryManagerLoop, pTempInfo); + // be sure to return an intermediate error + if (NV_OK == rmStatus) + rmStatus = tempStatus; + } + SLI_LOOP_END + + *pFbAllocInfo = *pTempInfo; // struct copy + portMemFree(pTempInfo); + + pFbAllocInfo->pageFormat->flags &= ~NVOS32_ALLOC_FLAGS_SKIP_RESOURCE_ALLOC; + pFbAllocInfo->pageFormat->flags |= skipFlag; + + return rmStatus; +} + +NV_STATUS +memmgrFreeHwResources_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_ALLOC_INFO *pFbAllocInfo +) +{ + MemoryManager *pMemoryManagerLoop; + NV_STATUS rmStatus = NV_OK; + RMTIMEOUT timeout; + FB_ALLOC_INFO *pTempInfo = NULL; + + pTempInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + if (pTempInfo == NULL) + { + NV_ASSERT(0); + return NV_ERR_NO_MEMORY; + } + + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + NV_STATUS tempStatus; + pMemoryManagerLoop = GPU_GET_MEMORY_MANAGER(pGpu); + + *pTempInfo = *pFbAllocInfo; + + tempStatus = memmgrFreeHal_HAL(pGpu, pMemoryManagerLoop, pTempInfo, &timeout); + // be sure to return an intermediate error + if (NV_OK == rmStatus) + rmStatus = tempStatus; + + } + SLI_LOOP_END + + *pFbAllocInfo = *pTempInfo; + portMemFree(pTempInfo); + + return rmStatus; +} + +NvBool +memmgrLargePageSupported_IMPL +( + MemoryManager *pMemoryManager, + NV_ADDRESS_SPACE addrSpace +) +{ + NvBool isSupported = NV_FALSE; + + if (addrSpace == ADDR_FBMEM || addrSpace == ADDR_VIRTUAL) + { + isSupported = NV_TRUE; + } + else if (addrSpace == ADDR_SYSMEM) + { + isSupported = (pMemoryManager->sysmemPageSize != RM_PAGE_SIZE); + } + else + { + NV_ASSERT(0); + } + + return isSupported; +} + +NvBool +memmgrComprSupported_IMPL +( + MemoryManager *pMemoryManager, + NV_ADDRESS_SPACE addrSpace +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvBool isSupported = NV_FALSE; + + if (GPU_GET_KERNEL_GMMU(pGpu) != NULL) + { + if (memmgrLargePageSupported(pMemoryManager, addrSpace) || + pMemoryManager->bSmallPageCompression) + { + if (addrSpace == ADDR_FBMEM || addrSpace == ADDR_VIRTUAL) + { + isSupported = NV_TRUE; + } + else if (addrSpace == ADDR_SYSMEM) + { + // Compression is allowed on vidmem or unified aperture (vidmem/sysmem is same w.r.t HW) + isSupported = (gpuIsUnifiedMemorySpaceEnabled(pGpu) && + pMemoryManager->bSysmemCompressionSupportDef); + NV_PRINTF(LEVEL_ERROR, "isSupported=%s\n", + isSupported ? "NV_TRUE" : "NV_FALSE"); + } + else + { + NV_ASSERT(0); + } + } + } + + return isSupported; +} + +NV_ADDRESS_SPACE +memmgrAllocGetAddrSpace_IMPL +( + MemoryManager *pMemoryManager, + NvU32 flags, + NvU32 attr +) +{ + NV_ADDRESS_SPACE addrSpace = ADDR_UNKNOWN; + + if (flags & NVOS32_ALLOC_FLAGS_VIRTUAL) + { + addrSpace = ADDR_VIRTUAL; + } + else if (FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr)) + { + addrSpace = ADDR_FBMEM; + } + else + { + // In case location is SYSMEM or ANY, allocate in vidmem if protected flag is set. + if (flags & NVOS32_ALLOC_FLAGS_PROTECTED) + { + addrSpace = ADDR_FBMEM; + } + else + { + addrSpace = ADDR_SYSMEM; + } + } + + return addrSpace; +} + +NvU32 +memmgrGetMappableRamSizeMb_IMPL(MemoryManager *pMemoryManager) +{ + return NvU64_LO32(pMemoryManager->Ram.mapRamSizeMb); +} +// +// ZBC clear create/destroy routines. +// + +NV_STATUS +memmgrFillMemdescForPhysAttr_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + PMEMORY_DESCRIPTOR pMemDesc, + ADDRESS_TRANSLATION addressTranslation, + NvU64 *pOffset, + NvU32 *pMemAperture, + NvU32 *pMemKind, + NvU32 *pZCullId, + NvU32 *pGpuCacheAttr, + NvU32 *pGpuP2PCacheAttr, + NvU64 *contigSegmentSize +) +{ + NvU64 surfOffset = *pOffset, surfBase, surfLimit; + NvU32 zcbitmap; + + surfBase = memdescGetPhysAddr(pMemDesc, addressTranslation, 0); + surfLimit = surfBase + pMemDesc->Size - 1; + *pMemKind = memdescGetPteKind(pMemDesc); + + *pOffset = memdescGetPhysAddr(pMemDesc, addressTranslation, surfOffset); + + if (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM ) + *pMemAperture = NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_VIDMEM; + else if (memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) + *pMemAperture = NV0041_CTRL_CMD_GET_SURFACE_PHYS_ATTR_APERTURE_SYSMEM; + else if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL ) + { + // + // XXX we could theoretically find whatever phys mem object is plugged + // in at surfOffset w/in the virt object... that'd mean scanning + // pMemory->DmaMappingList + // + return NV_ERR_NOT_SUPPORTED; + } + else + return NV_ERR_GENERIC; + + if (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_CACHED) + { + *pGpuCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED; + } + else if (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) + { + *pGpuCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED; + } + else + { + *pGpuCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN; + } + + if (memdescGetGpuP2PCacheAttrib(pMemDesc) == NV_MEMORY_CACHED) + { + *pGpuP2PCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED; + } + else if (memdescGetGpuP2PCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) + { + *pGpuP2PCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_UNCACHED; + } + else + { + *pGpuP2PCacheAttr = NV0041_CTRL_GET_SURFACE_PHYS_ATTR_GPU_CACHED_UNKNOWN; + } + + zcbitmap = FB_HWRESID_ZCULL_VAL_FERMI(memdescGetHwResId(pMemDesc)); //bitmap form... need a scalar + for ( *pZCullId = 0; zcbitmap; zcbitmap >>= 1, *pZCullId += 1) {;;;} + *pZCullId -= 1; // side effect if there is no zcull id of setting ~0 + + *contigSegmentSize = surfLimit - (surfBase + surfOffset) + 1; + + if ( !memdescGetContiguity(pMemDesc, addressTranslation)) + { + // XXX overly conservative. we could scan the PTEs to find out if more pages are contig. + NvU64 surfOffsetLimitSame4KBPage = (4*1024)*((surfBase + surfOffset)/(4*1024)) + (4*1024) - 1; + if ( surfLimit >= surfOffsetLimitSame4KBPage ) + *contigSegmentSize = surfOffsetLimitSame4KBPage - (surfBase + surfOffset) + 1; + } + + return NV_OK; +} + +NvU32 +memmgrDeterminePageSize_IMPL +( + MemoryManager *pMemoryManager, + NvHandle hClient, + NvU64 memSize, + NvU32 memFormat, + NvU32 pageFormatFlags, + NvU32 *pRetAttr, + NvU32 *pRetAttr2 +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NV_ADDRESS_SPACE addrSpace; + NvBool bIsBigPageSupported; + RM_ATTR_PAGE_SIZE pageSizeAttr; + NvU32 pageSize = 0; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY) || (pKernelGmmu == NULL)) + { + pageSize = RM_PAGE_SIZE; + } + // Sanity check the arguments. + else if (pRetAttr == NULL || pRetAttr2 == NULL) + { + NV_ASSERT_OR_RETURN(0, 0); + } + else + { + addrSpace = memmgrAllocGetAddrSpace(pMemoryManager, pageFormatFlags, *pRetAttr); + + bIsBigPageSupported = memmgrLargePageSupported(pMemoryManager, addrSpace); + pageSizeAttr = dmaNvos32ToPageSizeAttr(*pRetAttr, *pRetAttr2); + + // + // Precedence in page size selection + // 1. CACHE_ONLY mode -> SMALL + // 2. !BigPageSupport (Sysmem && GpuSmmuOff ) -> SMALL + // 3. Client page size override -> Use override + // 4. HugePageSupported && size >= HugePageSize -> HUGE + // 5. Block-linear || size >= minSizeForBigPage || hClient || GpuSmmuOn -> BIG + // 6. none of the above -> SMALL + // + // On Tegra, we don't have a carveout/FB in production. So, we're + // not guaranteed to get BIG page sized or contiguous allocations + // from OS. But we need BIG page sized allocations for efficient Big GPU + // operation. We use the SMMU unit within the Tegra Memory Contoller (MC), + // to construct BIG pages from the 4KB small page allocations from OS. + // SMMU will linearize the discontiguous 4KB allocations into what will + // appear to the GPU as a large contiguous physical allocation. + // + // RM will eventually decide whether a SYSMEM allocation needs BIG page + // via GPU SMMU mapping. Right now, we give an option for RM clients to + // force it, via the SMMU_ON_GPU attribute. + // + if (gpuIsCacheOnlyModeEnabled(pGpu)) + { + pageSize = RM_PAGE_SIZE; + } + else if (!bIsBigPageSupported) + { + if (RM_ATTR_PAGE_SIZE_BIG == pageSizeAttr || + RM_ATTR_PAGE_SIZE_HUGE == pageSizeAttr || + RM_ATTR_PAGE_SIZE_512MB == pageSizeAttr) + { + NV_PRINTF(LEVEL_ERROR, + "Big/Huge/512MB page size not supported in sysmem.\n"); + NV_ASSERT_OR_RETURN(0, 0); + } + else + { + pageSize = RM_PAGE_SIZE; + } + } + else + { + switch (pageSizeAttr) + { + case RM_ATTR_PAGE_SIZE_INVALID: + NV_PRINTF(LEVEL_ERROR, "invalid page size attr\n"); + NV_ASSERT_OR_RETURN(0, 0); + + case RM_ATTR_PAGE_SIZE_DEFAULT: + { + NvBool bUseDefaultHugePagesize = NV_TRUE; + // WDDMV2 Windows it expect default page size to be 4K /64KB /128KB + if (bUseDefaultHugePagesize && + kgmmuIsHugePageSupported(pKernelGmmu) && + (memSize >= RM_PAGE_SIZE_HUGE) && (addrSpace != ADDR_SYSMEM || + pMemoryManager->sysmemPageSize == RM_PAGE_SIZE_HUGE)) + { + pageSize = RM_PAGE_SIZE_HUGE; + break; + } + else if ((memFormat != NVOS32_ATTR_FORMAT_PITCH) || + (memSize >= kgmmuGetMinBigPageSize(pKernelGmmu)) || hClient || + FLD_TEST_DRF(OS32, _ATTR2, _SMMU_ON_GPU, _ENABLE, *pRetAttr2)) + { + pageSize = kgmmuGetMaxBigPageSize_HAL(pKernelGmmu); + break; + } + + pageSize = RM_PAGE_SIZE; + break; + } + + case RM_ATTR_PAGE_SIZE_4KB: + pageSize = RM_PAGE_SIZE; + break; + + case RM_ATTR_PAGE_SIZE_BIG: + pageSize = kgmmuGetMaxBigPageSize_HAL(pKernelGmmu); + break; + + case RM_ATTR_PAGE_SIZE_HUGE: + if (kgmmuIsHugePageSupported(pKernelGmmu)) + { + pageSize = RM_PAGE_SIZE_HUGE; + } + else + { + NV_ASSERT_OR_RETURN(0, 0); + } + break; + + case RM_ATTR_PAGE_SIZE_512MB: + if (kgmmuIsPageSize512mbSupported(pKernelGmmu)) + { + pageSize = RM_PAGE_SIZE_512M; + } + else + { + NV_ASSERT_OR_RETURN(0, 0); + } + break; + + default: + NV_ASSERT(0); + } + } + } + + switch (pageSize) + { + case RM_PAGE_SIZE: + *pRetAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, *pRetAttr); + break; + + case RM_PAGE_SIZE_64K: + case RM_PAGE_SIZE_128K: + *pRetAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _BIG, *pRetAttr); + break; + + case RM_PAGE_SIZE_HUGE: + *pRetAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, *pRetAttr); + *pRetAttr2 = FLD_SET_DRF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _2MB, *pRetAttr2); + break; + + case RM_PAGE_SIZE_512M: + *pRetAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, *pRetAttr); + *pRetAttr2 = FLD_SET_DRF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _512MB, *pRetAttr2); + break; + + default: + NV_ASSERT(0); + } + + return pageSize; +} + +/*! + * Identify if platform's current configuration supports PMA + */ +NV_STATUS +memmgrSetPlatformPmaSupport_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + // + // KMD in WDDM mode will not support pma managed client page tables as + // in both cases client / OS manges it. + // + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM && !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TCC_MODE)) + { + memmgrSetClientPageTablesPmaManaged(pMemoryManager, NV_FALSE); + } + + // + // FB management should use PMA on Unix/Linux/Mods/Windows + // + if (RMCFG_FEATURE_PLATFORM_UNIX + || RMCFG_FEATURE_PLATFORM_MODS + || RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM) + { + pMemoryManager->bPmaSupportedOnPlatform = NV_TRUE; + } + + // + // PMA memory management is not currently supported in non SRIOV VGPU environment. + // The RPC mechanism needs to be expanded to distinguish allocation types. + // Bug #1735412 + // + // TODO : Remove these constraints. + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + pMemoryManager->bPmaSupportedOnPlatform = NV_FALSE; + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU)) + { + if (pMemoryManager->bVgpuPmaSupport) + { + memmgrSetClientPageTablesPmaManaged(pMemoryManager, NV_FALSE); + } + else + { + pMemoryManager->bPmaSupportedOnPlatform = NV_FALSE; + } + } + return (NV_OK); +} + +/*! + * Allocate console region in CPU-RM based on region table passed from Physical RM + */ +NV_STATUS +memmgrAllocateConsoleRegion_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + FB_REGION_DESCRIPTOR *pConsoleFbRegion +) +{ + + NV_STATUS status = NV_OK; + NvU32 consoleRegionId = 0x0; + NvU64 regionSize; + + if (pMemoryManager->Ram.ReservedConsoleDispMemSize > 0) + { + pConsoleFbRegion->base = pMemoryManager->Ram.fbRegion[consoleRegionId].base; + pConsoleFbRegion->limit = pMemoryManager->Ram.fbRegion[consoleRegionId].limit; + + regionSize = pConsoleFbRegion->limit - pConsoleFbRegion->base + 1; + + // Once the console is reserved, we don't expect to reserve it again + NV_ASSERT_OR_RETURN(pMemoryManager->Ram.pReservedConsoleMemDesc == NULL, + NV_ERR_STATE_IN_USE); + + status = memdescCreate(&pMemoryManager->Ram.pReservedConsoleMemDesc, pGpu, + regionSize, RM_PAGE_SIZE_64K, NV_TRUE, ADDR_FBMEM, + NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE); + if (status != NV_OK) + { + pConsoleFbRegion->base = pConsoleFbRegion->limit = 0; + return status; + } + + memdescDescribe(pMemoryManager->Ram.pReservedConsoleMemDesc, ADDR_FBMEM, + pConsoleFbRegion->base, regionSize); + memdescSetPageSize(pMemoryManager->Ram.pReservedConsoleMemDesc, + AT_GPU, RM_PAGE_SIZE); + + + NV_PRINTF(LEVEL_INFO, "Allocating console region of size: %llx, at base : %llx \n ", + regionSize, pConsoleFbRegion->base); + } + + return status; +} + +void +memmgrReleaseConsoleRegion_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + memdescDestroy(pMemoryManager->Ram.pReservedConsoleMemDesc); + pMemoryManager->Ram.pReservedConsoleMemDesc = NULL; +} + +PMEMORY_DESCRIPTOR +memmgrGetReservedConsoleMemDesc_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return pMemoryManager->Ram.pReservedConsoleMemDesc; +} + +/*! + * Reserve FB for allocating BAR2 Page Dirs and Page Tables + */ +void +memmgrReserveBar2BackingStore_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU64 *pAddr +) +{ + NvU64 tmpAddr = *pAddr; + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + NvU32 pageDirsSize = kbusGetSizeOfBar2PageDirs_HAL(pGpu, pKernelBus); + NvU32 pageTblsSize = kbusGetSizeOfBar2PageTables_HAL(pGpu, pKernelBus); + + // Reserve space for BAR2 Page Dirs + if (pKernelBus->PDEBAR2Aperture == ADDR_FBMEM) + { + tmpAddr = NV_ROUNDUP(tmpAddr, RM_PAGE_SIZE); + pKernelBus->bar2[GPU_GFID_PF].pdeBase = tmpAddr; + tmpAddr += pageDirsSize; + } + + // Reserve space for BAR2 Page Tables + if (pKernelBus->PTEBAR2Aperture == ADDR_FBMEM) + { + tmpAddr = NV_ROUNDUP(tmpAddr, RM_PAGE_SIZE); + pKernelBus->bar2[GPU_GFID_PF].pteBase = tmpAddr; + tmpAddr += pageTblsSize; + } + + NV_PRINTF(LEVEL_INFO, "Reserve space for bar2 Page dirs offset = 0x%llx size = 0x%x\n", + pKernelBus->bar2[GPU_GFID_PF].pdeBase, pageDirsSize); + + NV_PRINTF(LEVEL_INFO, "Reserve space for bar2 Page tables offset = 0x%llx size = 0x%x\n", + pKernelBus->bar2[GPU_GFID_PF].pteBase, pageTblsSize); + + *pAddr = NV_ROUNDUP(tmpAddr, RM_PAGE_SIZE); +} + +/*! + * Calculate the Vista reserved memory requirement per FB region for mixed type/density + */ +void +memmgrCalcReservedFbSpace_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NvU64 rsvdFastSize = 0; + NvU64 rsvdSlowSize = 0; + NvU64 rsvdISOSize = 0; + NvU32 i; + NvU32 idxISORegion = 0; + NvU32 idxFastRegion = 0; + NvU32 idxSlowRegion = 0; + + // + // This is a hack solely for Vista (on Vista the OS controls the majority of heap). + // Linux and Mac don't have reserved memory and doesn't use this function. + // + // On Vista, Fermi's instance memory is not reserved by RM anymore. + // KMD has to reserve enough instance memory for driver private data. + // This function does the calculation of needed space. See bug 642233. + // While it returns the result in Mb, the calculation is made with byte + // + + // If we have no usable memory then we can't reserve any. + if (!pMemoryManager->Ram.fbUsableMemSize) + return; + + // If reserved memory requirements have already been calculated, don't do it again. + if (pMemoryManager->bLddmReservedMemoryCalculated) + return; + + memmgrCalcReservedFbSpaceHal_HAL(pGpu, pMemoryManager, &rsvdFastSize, &rsvdSlowSize, &rsvdISOSize); + + // If we have regions defined, fill in the per-segment reserved memory requirement + if (pMemoryManager->Ram.numFBRegions > 0) + { + FB_REGION_DESCRIPTOR *pFbRegion = NULL; + NvU64 regionSize = 0; + + // + // Find the fastest and ISO regions. This search makes a soft assumption that + // region #0 is not reserved, fastest, and supports ISO -- that would be stupid + // + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + pFbRegion = &pMemoryManager->Ram.fbRegion[i]; + regionSize = (pFbRegion->limit - pFbRegion->base +1); + + // Check only non-reserved regions (which are typically unpopulated blackholes in address space) + if ((!pFbRegion->bRsvdRegion) && + (!pFbRegion->bProtected) && + (regionSize >= (rsvdFastSize + rsvdSlowSize + rsvdISOSize))) + { + // Find the fastest region + if ((pFbRegion->performance > pMemoryManager->Ram.fbRegion[idxFastRegion].performance) + || pMemoryManager->Ram.fbRegion[idxFastRegion].bRsvdRegion + || pMemoryManager->Ram.fbRegion[idxFastRegion].bProtected) + { + idxFastRegion = i; + } + // Find the slowest region + if ((pFbRegion->performance < pMemoryManager->Ram.fbRegion[idxSlowRegion].performance) + || pMemoryManager->Ram.fbRegion[idxSlowRegion].bRsvdRegion + || pMemoryManager->Ram.fbRegion[idxSlowRegion].bProtected) + { + idxSlowRegion = i; + } + // Find the fastest ISO region + if (pFbRegion->bSupportISO) + { + if ((!pMemoryManager->Ram.fbRegion[idxISORegion].bSupportISO) || + (pFbRegion->performance > pMemoryManager->Ram.fbRegion[idxISORegion].performance) + || pMemoryManager->Ram.fbRegion[idxISORegion].bProtected) + { + idxISORegion = i; + } + } + } + } + + // There should *ALWAYS* be a region that supports ISO, even if we have no display + NV_ASSERT(pMemoryManager->Ram.fbRegion[idxISORegion].bSupportISO); + + // There should *ALWAYS* be a non-reserved region that is faster than reserved and supports ISO + NV_ASSERT(!pMemoryManager->Ram.fbRegion[idxISORegion].bRsvdRegion); + NV_ASSERT(!pMemoryManager->Ram.fbRegion[idxFastRegion].bRsvdRegion); + NV_ASSERT(!pMemoryManager->Ram.fbRegion[idxSlowRegion].bRsvdRegion); + + // Can't put reserved memory in protected region + NV_ASSERT(!pMemoryManager->Ram.fbRegion[idxISORegion].bProtected); + NV_ASSERT(!pMemoryManager->Ram.fbRegion[idxFastRegion].bProtected); + NV_ASSERT(!pMemoryManager->Ram.fbRegion[idxSlowRegion].bProtected); + + // + // Vista expects to be able to VidHeapControl allocate a cursor in ISO + // + // For mixed density reserved memory should be split between "fast" and + // "slow" memory. Fast memory should also support ISO. The policy to + // prefer "slow" vs "fast" memory is platform dependent. + // + pMemoryManager->Ram.fbRegion[idxISORegion].rsvdSize += rsvdISOSize; + pMemoryManager->Ram.fbRegion[idxSlowRegion].rsvdSize += rsvdSlowSize; + pMemoryManager->Ram.fbRegion[idxFastRegion].rsvdSize += rsvdFastSize; + + pMemoryManager->bLddmReservedMemoryCalculated = NV_TRUE; + } +} + +/*! + * Init channel size + * + * @param[in] pChannel OBJCHANNEL pointer + * @param[in] numCopyBlocks Number of copies that should fit in the push buffer + * + * @returns NV_STATUS + */ +void +memmgrMemUtilsSetupChannelBufferSizes_IMPL +( + MemoryManager *pMemoryManager, + OBJCHANNEL *pChannel, + NvU32 numCopyBlocks +) +{ + // set channel specific sizes + pChannel->channelPbSize = numCopyBlocks * MEMUTILS_SIZE_PER_BLOCK_INBYTES; + pChannel->channelNotifierSize = MEMUTILS_CHANNEL_NOTIFIER_SIZE; + pChannel->channelNumGpFifioEntries = MEMUTILS_NUM_GPFIFIO_ENTRIES; + pChannel->methodSizePerBlock = MEMUTILS_SIZE_PER_BLOCK_INBYTES; + pChannel->channelSize = pChannel->channelPbSize + MEMUTILS_CHANNEL_GPFIFO_SIZE + MEMUTILS_CHANNEL_SEMAPHORE_SIZE; + pChannel->semaOffset = pChannel->channelPbSize + MEMUTILS_CHANNEL_GPFIFO_SIZE; + pChannel->finishPayloadOffset = pChannel->semaOffset + 4; +} + +NV_STATUS memmgrFree_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + Heap *pHeap, + NvHandle hClient, + NvHandle hDevice, + NvHandle hVASpace, + NvU32 owner, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NvU64 offsetAlign; + NV_STATUS status; + NvU32 pmaFreeFlag = 0; + + // IRQL TEST: must be running at equivalent of passive-level + IRQL_ASSERT_AND_RETURN(!osIsRaisedIRQL()); + + if (pMemDesc == NULL) + return NV_ERR_INVALID_ARGUMENT; + + offsetAlign = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + + if (owner == NVOS32_BLOCK_TYPE_FREE) + return NV_ERR_INVALID_ARGUMENT; + + // Virtual heap allocs are tagged vitual and always own the memdesc + if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL) + { + OBJVASPACE *pVAS = NULL; + RsClient *pClient; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return status; + + status = vaspaceGetByHandleOrDeviceDefault(pClient, hDevice, hVASpace, &pVAS); + if (status != NV_OK) + return status; + + status = vaspaceFree(pVAS, offsetAlign); + memdescDestroy(pMemDesc); + return status; + } + + // Free up the memory allocated by PMA. + if (pMemDesc->pPmaAllocInfo) + { + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + OBJGPU *pMemdescOwnerGpu = NULL; + + // + // A memdesc can be duped under a peer device. In that case, before + // freeing FB make sure the GPU which owns the memdesc is available. + // Otherwise, just assert, destroy the memdesc and return NV_OK to + // make sure rest of the clean up happens correctly as we are on + // destroy path. + // Note this is just a WAR till ressrv bring in cleanup of dup objects + // on GPU tear down. + // RS-TODO: Nuke this check once the cleanup is implemented. + // + if (pGpu != pMemDesc->pGpu) + { + if (!gpumgrIsGpuPointerValid(pMemDesc->pGpu)) + { + // + // This should never happen. GPU tear down should always clear + // the duped memory list after resource server implements it. + // For now just assert! + // + NV_ASSERT(0); + memdescDestroy(pMemDesc); + goto pma_free_exit; + } + } + + pMemdescOwnerGpu = pMemDesc->pGpu; + + // + // Similar to the above WAR, if portMem alocations fail for any reason, + // just assert and return NV_OK to ensure that the rest of the clean up + // happens correctly. + // + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + if (pFbAllocInfo == NULL) + { + NV_ASSERT(0); + goto pma_free_exit; + } + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + if (pFbAllocPageFormat == NULL) { + NV_ASSERT(0); + goto pma_free_exit; + } + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + // + // Do not release any HW resources associated with this allocation + // until the last reference to the allocation is freed. Passing + // hwresid = 0 and format = pitch to memmgrFreeHwResources will ensure + // that no comptags/zcull/zbc resources are freed. + // + if (pMemDesc->RefCount == 1) + { + pFbAllocInfo->hwResId = memdescGetHwResId(pMemDesc); + pFbAllocInfo->format = memdescGetPteKind(pMemDesc); + } + else + { + pFbAllocInfo->hwResId = 0; + pFbAllocInfo->format = 0; + } + pFbAllocInfo->offset = offsetAlign; + pFbAllocInfo->size = pMemDesc->Size; + + // Free any HW resources allocated. + memmgrFreeHwResources(pMemdescOwnerGpu, + GPU_GET_MEMORY_MANAGER(pMemdescOwnerGpu), pFbAllocInfo); + + if (pMemDesc->pPmaAllocInfo != NULL) + { + // Disabling scrub on free for non compressible surfaces + if (RMCFG_FEATURE_PLATFORM_MODS && + !memmgrIsKind_HAL(GPU_GET_MEMORY_MANAGER(pMemdescOwnerGpu), + FB_IS_KIND_COMPRESSIBLE, + memdescGetPteKind(pMemDesc))) + { + pmaFreeFlag = PMA_FREE_SKIP_SCRUB; + } + + vidmemPmaFree(pMemdescOwnerGpu, pHeap, pMemDesc->pPmaAllocInfo, pmaFreeFlag); + NV_PRINTF(LEVEL_INFO, "Freeing PMA allocation\n"); + } + +pma_free_exit: + portMemFree(pFbAllocInfo); + portMemFree(pFbAllocPageFormat); + memdescDestroy(pMemDesc); + + return NV_OK; + } + + return heapFree(pGpu, pHeap, owner, pMemDesc); +} + +NV_STATUS +memmgrSetPartitionableMem_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM_PARAMS params = {0}; + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvU64 bottomRsvdSize = 0; + NvU64 topRsvdSize = 0; + NvU32 bottomRegionIdx = 0xFFFF; + NvU32 topRegionIdx = 0xFFFF; + NvU32 i; + NvU64 size; + NvU64 base; + NvU64 offset; + NvU64 freeMem; + + // + // Find out the first and the last region for which internal heap or + // bRsvdRegion is true. In Ampere we should never have more than two + // discontigous RM reserved region + // To-Do - Bug 2301972 - Make sure that reserved memory is aligned to VMMU + // segments + // + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + if (pMemoryManager->Ram.fbRegion[i].bInternalHeap || + pMemoryManager->Ram.fbRegion[i].bRsvdRegion) + { + NvU64 rsvdSize = (pMemoryManager->Ram.fbRegion[i].limit - + pMemoryManager->Ram.fbRegion[i].base + 1); + + // Check if this is bottom reserved region + if (pMemoryManager->Ram.fbRegion[i].base == 0) + { + bottomRegionIdx = i; + bottomRsvdSize += rsvdSize; + } + else if (i > 0 && (pMemoryManager->Ram.fbRegion[i-1].bInternalHeap || + pMemoryManager->Ram.fbRegion[i-1].bRsvdRegion) && + (pMemoryManager->Ram.fbRegion[i].base == pMemoryManager->Ram.fbRegion[i - 1].limit + 1)) + { + // See if this is the contigous region with previous discovery + if (bottomRegionIdx == (i - 1)) + { + // Contigous bottom region + bottomRsvdSize += rsvdSize; + } + else + { + // Contigous top region + topRsvdSize += rsvdSize; + } + } + else + { + // + // Make sure we don't have discontigous reserved regions as + // they are not supported by HW also and we need to support + // these by using blacklisting mechanism. + // + if (topRegionIdx != 0xFFFF) + { + NV_PRINTF(LEVEL_ERROR, + "More than two discontigous rsvd regions found. " + "Rsvd region base - 0x%llx, Rsvd region Size - 0x%llx\n", + pMemoryManager->Ram.fbRegion[i].base, rsvdSize); + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + topRegionIdx = i; + topRsvdSize += rsvdSize; + } + } + } + + // + // Sanity check against the biggest available memory chunk. Pick the smallest + // of biggest available memory chunk or calculated total - reserved memory as + // in vGPU we are still using OBJHEAP and there are some allocations which + // happens at the top of the heap before we program this register + // + if (!memmgrIsPmaInitialized(pMemoryManager)) + { + NvU64 bytesTotal; + const NvU64 vgpuHeapWarSize = 256 *1024 * 1024; + NV_ASSERT_OK_OR_RETURN(heapInfo(pHeap, &freeMem, &bytesTotal, &base, + &offset, &size)); + + // + // offset is the starting address of biggest empty block whose size is + // returned and we care about the base of largest empty block + // + base = offset; + + // + // WAR - Bug-2383259 - TilL PMA is not enabled in vGPU-Host + // we need to delay reserve some memory at the top to full fill lazy + // allocations like FECS and GPCCS uCode. Leave 256MB at the top for + // such lazy allocations + // + if (size > vgpuHeapWarSize) + { + size -= vgpuHeapWarSize; + } + } + else + { + PMA_REGION_DESCRIPTOR *pFirstPmaRegionDesc = NULL; + NvU32 numPmaRegions; + + NV_ASSERT_OK_OR_RETURN(pmaGetRegionInfo(&pHeap->pmaObject, + &numPmaRegions, &pFirstPmaRegionDesc)); + + base = pFirstPmaRegionDesc->base; + pmaGetFreeMemory(&pHeap->pmaObject, &freeMem); + pmaGetTotalMemory(&pHeap->pmaObject, &size); + + // + // MIG won't be used alongside APM and hence the check below is of no use + // Even if we enable the check for APM the check will fail given that after + // enabling "scrub on free" using virtual CE writes, memory gets consumed by + // page tables backing the scrubber channel virtual mappings and hence the + // calculation below no longer holds good + // + if (!gpuIsApmFeatureEnabled(pGpu) || + !memmgrUseVasForCeMemoryOps(pMemoryManager) || + IS_MIG_ENABLED(pGpu)) + { + // + // PMA should be completely free at this point, otherwise we risk + // not setting the right partitionable range (pmaGetLargestFree's + // offset argument is not implemented as of this writing, so we + // only get the base address of the region that contains it). There + // is a known allocation from the top-level scrubber channel that + // is expected to be no larger than 64K. Issue a warning for any + // other uses. + // + if ((size > RM_PAGE_SIZE_64K) && + (freeMem < (size - RM_PAGE_SIZE_64K))) + { + NV_PRINTF(LEVEL_ERROR, + "Assumption that PMA is empty (after accounting for the top-level scrubber) is not met!\n"); + NV_PRINTF(LEVEL_ERROR, + " free space = 0x%llx bytes, total space = 0x%llx bytes\n", + freeMem, size); + NV_ASSERT_OR_RETURN(freeMem >= (size - RM_PAGE_SIZE_64K), + NV_ERR_INVALID_STATE); + } + } + } + + if (size == 0) + { + NV_PRINTF(LEVEL_ERROR, + "No partitionable memory. MIG memory partitioning can't be enabled.\n"); + return NV_OK; + } + + if (base != bottomRsvdSize) + { + NV_PRINTF(LEVEL_ERROR, + "Partitionable memory start - 0x%llx not aligned with RM reserved " + "region base-end - 0x%llx\n", base, bottomRsvdSize); + return NV_ERR_INVALID_STATE; + } + + params.partitionableMemSize = size; + params.bottomRsvdSize = bottomRsvdSize; + params.topRsvdSize = topRsvdSize; + + // Call physical MemorySystem to align and program the partitionable range + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMSYS_SET_PARTITIONABLE_MEM, + ¶ms, + sizeof(params))); + + pMemoryManager->MIGMemoryPartitioningInfo.partitionableMemoryRange = + rangeMake(params.partitionableStartAddr, params.partitionableEndAddr); + + // + // Make sure the created range is a valid range. + // rangeIsEmpty checks lo > hi, which should be good enough to catch + // inverted range case. + // + NV_ASSERT_OR_RETURN(!rangeIsEmpty(pMemoryManager->MIGMemoryPartitioningInfo.partitionableMemoryRange), + NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN(memmgrSetMIGPartitionableBAR1Range(pGpu, pMemoryManager)); + + if (IS_GSP_CLIENT(pGpu)) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + + // + // The Physical RM initializes its AMAPLIB context via + // memsysSetPartitionableMem_HAL(). The GSP Client RM has a separate + // AMAPLIB context that must also be initialized. + // + kmemsysReadMIGMemoryCfg_HAL(pGpu, pKernelMemorySystem); + } + + return NV_OK; +} + +NV_STATUS +memmgrFillComprInfo_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 pageSize, + NvU32 pageCount, + NvU32 kind, + NvU64 surfOffset, + NvU32 compTagStartOffset, + COMPR_INFO *pComprInfo +) +{ + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + portMemSet(pComprInfo, 0, sizeof(*pComprInfo)); + + pComprInfo->kind = kind; + + if (!memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind)) + return NV_OK; + + NV_ASSERT(compTagStartOffset != ~(NvU32)0); + + pComprInfo->compPageShift = pMemorySystemConfig->comprPageShift; + pComprInfo->compTagLineMin = compTagStartOffset; + pComprInfo->compPageIndexLo = (NvU32)(surfOffset >> pComprInfo->compPageShift); + pComprInfo->compPageIndexHi = (NvU32)((surfOffset + pageSize * pageCount - 1) >> pComprInfo->compPageShift); + pComprInfo->compTagLineMultiplier = 1; + + return NV_OK; +} + +NV_STATUS +memmgrGetKindComprForGpu_KERNEL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + OBJGPU *pMappingGpu, + NvU64 offset, + NvU32 *pKind, + COMPR_INFO *pComprInfo +) +{ + NvU32 ctagId = FB_HWRESID_CTAGID_VAL_FERMI(memdescGetHwResId(pMemDesc)); + NvU32 kind = memdescGetPteKindForGpu(pMemDesc, pMappingGpu); + const MEMORY_SYSTEM_STATIC_CONFIG *pMappingMemSysConfig = + kmemsysGetStaticConfig(pMappingGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pMappingGpu)); + + // Compression is not supported on memory not backed by a GPU + if (pMemDesc->pGpu != NULL && memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind) && + (ctagId == 0 || ctagId == FB_HWRESID_CTAGID_VAL_FERMI(-1))) + { + portMemSet(pComprInfo, 0, sizeof(*pComprInfo)); + + pComprInfo->kind = kind; + pComprInfo->compPageShift = pMappingMemSysConfig->comprPageShift; + pComprInfo->bPhysBasedComptags = NV_TRUE; + pComprInfo->compTagLineMin = 1; + } + else + { + memmgrFillComprInfoUncompressed(pMemoryManager, kind, pComprInfo); + } + + *pKind = pComprInfo->kind; + + return NV_OK; +} + +NV_STATUS +memmgrGetKindComprFromMemDesc_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU64 offset, + NvU32 *kind, + COMPR_INFO *pComprInfo +) +{ + return memmgrGetKindComprForGpu_HAL(pMemoryManager, pMemDesc, pMemDesc->pGpu, + offset, kind, pComprInfo); +} + +NvBool +memmgrIsCompressible_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + COMPR_INFO comprInfo; + NvU32 kind; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, + memmgrGetKindComprFromMemDesc(pMemoryManager, pMemDesc, 0, &kind, &comprInfo) == NV_OK, + NV_FALSE); + + return memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, comprInfo.kind); +} + +void +memmgrSetMIGPartitionableMemoryRange_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NV_RANGE range +) +{ + pMemoryManager->MIGMemoryPartitioningInfo.partitionableMemoryRange = range; +} + +NV_RANGE +memmgrGetMIGPartitionableMemoryRange_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return pMemoryManager->MIGMemoryPartitioningInfo.partitionableMemoryRange; +} + +/* + * @brief Sets total partitionable BAR1 + */ +NV_STATUS +memmgrSetMIGPartitionableBAR1Range_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + OBJVASPACE *pBar1VAS = kbusGetBar1VASpace_HAL(pGpu, pKernelBus); + OBJEHEAP *pVASHeap = vaspaceGetHeap(pBar1VAS); + NvU64 largestFreeOffset = 0; + NvU64 largestFreeSize = 0; + NvU64 partitionableBar1Start; + NvU64 partitionableBar1End; + + // Get partitionable BAR1 range + pVASHeap->eheapInfo(pVASHeap, NULL, NULL, &largestFreeOffset, &largestFreeSize, NULL, NULL); + + // + // We are not considering alignment here because VA space is reserved/allocated in chunks of pages + // so largestFreeOffset should be already aligned. + // + partitionableBar1Start = largestFreeOffset; + partitionableBar1End = largestFreeOffset + largestFreeSize - 1; + NV_ASSERT_OR_RETURN(partitionableBar1Start >= vaspaceGetVaStart(pBar1VAS), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(partitionableBar1End <= vaspaceGetVaLimit(pBar1VAS), NV_ERR_INVALID_STATE); + + pMemoryManager->MIGMemoryPartitioningInfo.partitionableBar1Range = rangeMake(partitionableBar1Start, partitionableBar1End); + return NV_OK; +} + +NV_RANGE +memmgrGetMIGPartitionableBAR1Range_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return pMemoryManager->MIGMemoryPartitioningInfo.partitionableBar1Range; +} + +NV_STATUS +memmgrAllocMIGGPUInstanceMemory_VF +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 swizzId, + NvHandle *phMemory, + NV_RANGE *pAddrRange, + Heap **ppMemoryPartitionHeap +) +{ + // For vGpu we have a static memory allocation + *phMemory = NV01_NULL_OBJECT; + *pAddrRange = pMemoryManager->MIGMemoryPartitioningInfo.partitionableMemoryRange; + *ppMemoryPartitionHeap = GPU_GET_HEAP(pGpu); + + return NV_OK; +} + +// Function to allocate memory for a GPU instance +NV_STATUS +memmgrAllocMIGGPUInstanceMemory_PF +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 swizzId, + NvHandle *phMemory, + NV_RANGE *pAddrRange, + Heap **ppMemoryPartitionHeap +) +{ + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NV_STATUS rmStatus = NV_OK; + NvHandle hMemory = 0; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + NV_ASSERT_OR_RETURN(pKernelMIGManager != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OK_OR_RETURN(kmemsysGetMIGGPUInstanceMemInfo(pGpu, pKernelMemorySystem, swizzId, pAddrRange)); + + // + // Only allocate memory for non swizzID-0 GPU instances as swizzID-0 owns full + // gpu and there is no need to pre-reserve memory for that + // + if (kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, swizzId)) + { + // + // Allocate memory using vidHeapControl + // + // vidHeapControl calls should happen outside GPU locks + // This is a PMA requirement as memory allocation calls may invoke eviction + // which UVM could get stuck behind GPU lock + // See Bug 1735851-#24 + // + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + + // Allocate gpfifo entries + NV_MEMORY_ALLOCATION_PARAMS memAllocParams; + portMemSet(&memAllocParams, 0, sizeof(NV_MEMORY_ALLOCATION_PARAMS)); + memAllocParams.owner = HEAP_OWNER_RM_CLIENT_GENERIC; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.size = rangeLength(*pAddrRange); + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM); + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS); + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT); + memAllocParams.attr2 = DRF_DEF(OS32, _ATTR2, _PAGE_OFFLINING, _OFF); // free the offlined pages + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + memAllocParams.rangeLo = 0; + memAllocParams.rangeHi = 0; + memAllocParams.offset = pAddrRange->lo; // Offset needed if fixed address allocation + memAllocParams.hVASpace = 0; // Physical allocation + memAllocParams.internalflags = NVOS32_ALLOC_INTERNAL_FLAGS_SKIP_SCRUB; + + rmStatus = pRmApi->Alloc(pRmApi, + pMemoryManager->MIGMemoryPartitioningInfo.hClient, + pMemoryManager->MIGMemoryPartitioningInfo.hSubdevice, + &hMemory, + NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + // Reaquire the GPU locks + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to grab RM-Lock\n"); + DBG_BREAKPOINT(); + rmStatus = NV_ERR_GENERIC; + goto cleanup; + } + + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to allocate physical memory for GPU instance.\n"); + return rmStatus; + } + } + rmStatus = _memmgrInitMIGMemoryPartitionHeap(pGpu, pMemoryManager, swizzId, pAddrRange, ppMemoryPartitionHeap); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to initialize memory partition heap\n"); + goto cleanup; + } + + NV_PRINTF(LEVEL_INFO, + "Allocated memory partition heap for swizzId - %d with StartAddr - 0x%llx, endAddr - 0x%llx.\n", + swizzId, pAddrRange->lo, pAddrRange->hi); + + *phMemory = hMemory; + return rmStatus; + +cleanup: + pRmApi->Free(pRmApi, pMemoryManager->MIGMemoryPartitioningInfo.hClient, hMemory); + + return rmStatus; +} + +// Function to initialize heap for managing MIG partition memory +static NV_STATUS +_memmgrInitMIGMemoryPartitionHeap +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 swizzId, + NV_RANGE *pAddrRange, + Heap **ppMemoryPartitionHeap +) +{ + NV_STATUS status = NV_OK; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + Heap *pMemoryPartitionHeap = NULL; + + // Use default heap for swizzID-0 as we don't prereserve memory for swizzID-0 + NV_ASSERT_OR_RETURN(pKernelMIGManager != NULL, NV_ERR_INVALID_STATE); + if (!kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, swizzId)) + { + *ppMemoryPartitionHeap = pMemoryManager->pHeap; + return NV_OK; + } + else + { + *ppMemoryPartitionHeap = NULL; + } + + NV_ASSERT_OK_OR_GOTO( + status, + objCreate(ppMemoryPartitionHeap, pMemoryManager, Heap), + fail); + + pMemoryPartitionHeap = *ppMemoryPartitionHeap; + + if (memmgrIsPmaEnabled(pMemoryManager) && + memmgrIsPmaSupportedOnPlatform(pMemoryManager)) + { + portMemSet(&pMemoryPartitionHeap->pmaObject, 0, sizeof(pMemoryPartitionHeap->pmaObject)); + NV_ASSERT_OK_OR_GOTO( + status, + memmgrPmaInitialize(pGpu, pMemoryManager, &pMemoryPartitionHeap->pmaObject), + fail); + } + + NV_ASSERT_OK_OR_GOTO( + status, + heapInit(pGpu, pMemoryPartitionHeap, pAddrRange->lo, + rangeLength(*pAddrRange), + HEAP_TYPE_PARTITION_LOCAL, + GPU_GFID_PF, + NULL), + fail); + + if (memmgrIsPmaInitialized(pMemoryManager) && + (pMemoryPartitionHeap->bHasFbRegions)) + { + NV_ASSERT_OK_OR_GOTO( + status, + memmgrPmaRegisterRegions(pGpu, pMemoryManager, pMemoryPartitionHeap, + &pMemoryPartitionHeap->pmaObject), + fail); + } + + if (!IsSLIEnabled(pGpu)) + { + // Do the actual blacklisting of pages from the heap + if (pMemoryPartitionHeap->blackListAddresses.count != 0) + { + status = heapBlackListPages(pGpu, pMemoryPartitionHeap); + + if (status != NV_OK) + { + // Warn and continue + NV_PRINTF(LEVEL_WARNING, "Error 0x%x creating blacklist\n", + status); + } + } + } + + return NV_OK; + +fail: + + if (pMemoryPartitionHeap != NULL) + { + objDelete(pMemoryPartitionHeap); + *ppMemoryPartitionHeap = NULL; + } + + return status; +} + +// Function to free GPU instance memory +NV_STATUS +memmgrFreeMIGGPUInstanceMemory_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 swizzId, + NvHandle hMemory, + Heap **ppMemoryPartitionHeap +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + NV_ASSERT_OR_RETURN(pKernelMIGManager != NULL, NV_ERR_INVALID_STATE); + NV_CHECK_OR_RETURN(LEVEL_SILENT, hMemory != NV01_NULL_OBJECT, NV_OK); + + // Nothing to do for swizzId 0 as we neither allocate memory nor allocate new heap object + if (!kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, swizzId)) + return NV_OK; + + objDelete(*ppMemoryPartitionHeap); + *ppMemoryPartitionHeap = NULL; + + // Free allocated memory + pRmApi->Free(pRmApi, pMemoryManager->MIGMemoryPartitioningInfo.hClient, hMemory); + return NV_OK; +} + +void memmgrComprInfoDisableCompression_IMPL +( + MemoryManager *pMemoryManager, + COMPR_INFO *pComprInfo +) +{ + memmgrFillComprInfoUncompressed(pMemoryManager, pComprInfo->kind, pComprInfo); +} + +void memmgrFillComprInfoUncompressed_IMPL +( + MemoryManager *pMemoryManager, + NvU32 kind, + COMPR_INFO *pComprInfo +) +{ + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind)) + kind = memmgrGetUncompressedKind_HAL(ENG_GET_GPU(pMemoryManager), pMemoryManager, kind, NV_FALSE); + + portMemSet(pComprInfo, 0, sizeof(*pComprInfo)); + pComprInfo->kind = kind; +} + +/*! + * @brief Creates the SW state of the page level pools. + * + * @param pGpu + * @param pMemoryManager + * + * @returns On success, returns NV_OK. + * On failure, returns error code. + */ +NV_STATUS +memmgrPageLevelPoolsCreate_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NV_STATUS status = NV_OK; + + if (RMCFG_FEATURE_PMA && + memmgrIsPmaInitialized(pMemoryManager) && + memmgrAreClientPageTablesPmaManaged(pMemoryManager)) + { + Heap *pHeap = GPU_GET_HEAP(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + const GMMU_FMT *pFmt = NULL; + + pFmt = kgmmuFmtGet(pKernelGmmu, GMMU_FMT_VERSION_DEFAULT, 0); + NV_ASSERT_OR_RETURN(NULL != pFmt, NV_ERR_INVALID_ARGUMENT); + + status = rmMemPoolSetup((void *)&pHeap->pmaObject, &pMemoryManager->pPageLevelReserve, + (pFmt->version == GMMU_FMT_VERSION_1) ? POOL_CONFIG_GMMU_FMT_1 : POOL_CONFIG_GMMU_FMT_2); + + NV_ASSERT(NV_OK == status); + } + return status; +} + +/*! + * @brief Destroys the SW state of the page level pools. + * + * @param pGpu + * @param pMemoryManager + * + * @returns + */ +void +memmgrPageLevelPoolsDestroy_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + if (RMCFG_FEATURE_PMA && + memmgrIsPmaInitialized(pMemoryManager) && + memmgrAreClientPageTablesPmaManaged(pMemoryManager)) + { + rmMemPoolDestroy(pMemoryManager->pPageLevelReserve); + pMemoryManager->pPageLevelReserve = NULL; + } +} + +/*! + * @brief Gets page level pool to use + * + * @param pGpu + * @param pMemoryManager + * @param[in] hClient client handle + * @param[out] ppMemPoolInfo page level pool + * + * @returns On success, returns NV_OK. + * On failure, returns error code. + */ +NV_STATUS +memmgrPageLevelPoolsGetInfo_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvHandle hClient, + RM_POOL_ALLOC_MEM_RESERVE_INFO **ppMemPoolInfo +) +{ + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bMemPartitioningEnabled = (pKernelMIGManager != NULL) && kmigmgrIsMIGMemPartitioningEnabled(pGpu, pKernelMIGManager); + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemPool = NULL; + NV_ASSERT_OR_RETURN(ppMemPoolInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + if (!memmgrIsPmaInitialized(pMemoryManager) || + !memmgrAreClientPageTablesPmaManaged(pMemoryManager)) + { + return NV_ERR_INVALID_STATE; + } + + // If memory partitioning is enabled, then use per-partition pool allocator + if (bMemPartitioningEnabled) + { + MIG_INSTANCE_REF ref; + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref)); + pMemPool = ref.pKernelMIGGpuInstance->pPageTableMemPool; + } + else + { + pMemPool = pMemoryManager->pPageLevelReserve; + } + NV_ASSERT_OR_RETURN(pMemPool != NULL, NV_ERR_INVALID_STATE); + + *ppMemPoolInfo = pMemPool; + return NV_OK; +} + +/*! + * @brief Initialize the PMA object + * + * @param pGpu + * @param pMemoryManager + * @param[in] pPma Pointer to the PMA object to init + * + * @returns On success, returns NV_OK. + * On failure, returns error code. + */ +NV_STATUS +memmgrPmaInitialize_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + PMA *pPma +) +{ + NvU32 pmaInitFlags = PMA_INIT_NONE; + NV_STATUS status = NV_OK; + NvBool bNumaEnabled = osNumaOnliningEnabled(pGpu->pOsGpuInfo); + + NV_ASSERT(memmgrIsPmaEnabled(pMemoryManager) && + memmgrIsPmaSupportedOnPlatform(pMemoryManager)); + + if (memmgrIsPmaForcePersistence(pMemoryManager)) + { + pmaInitFlags |= PMA_INIT_FORCE_PERSISTENCE; + } + + if (memmgrIsScrubOnFreeEnabled(pMemoryManager)) + { + pmaInitFlags |= PMA_INIT_SCRUB_ON_FREE; + } + + // Disable client page table management on SLI. + if (IsSLIEnabled(pGpu)) + { + memmgrSetClientPageTablesPmaManaged(pMemoryManager, NV_FALSE); + } + + if (bNumaEnabled) + { + NV_PRINTF(LEVEL_INFO, "Initializing PMA with NUMA flag.\n"); + pmaInitFlags |= PMA_INIT_NUMA; + } + + if (memmgrIsPmaAddrTree(pMemoryManager)) + { + pmaInitFlags |= PMA_INIT_ADDRTREE; + } + + status = pmaInitialize(pPma, pmaInitFlags); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to initialize PMA!\n"); + return status; + } + + if (bNumaEnabled) + { + NvU32 numaSkipReclaimVal = NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_DEFAULT; + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE, &numaSkipReclaimVal) == NV_OK) + { + if (numaSkipReclaimVal > NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_MAX) + { + numaSkipReclaimVal = NV_REG_STR_RM_NUMA_ALLOC_SKIP_RECLAIM_PERCENTAGE_MAX; + } + } + pmaNumaSetReclaimSkipThreshold(pPma, numaSkipReclaimVal); + } + + return NV_OK; +} + +NV_STATUS +memmgrInitFbRegions_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NV_ASSERT_OR_RETURN(pMemoryManager->Ram.numFBRegions == 0, NV_ERR_INVALID_STATE); + + // Dont setup regions if FB is broken and we aren't using L2 cache as "FB". + if ((pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) && + !gpuIsCacheOnlyModeEnabled(pGpu))) + return NV_OK; + + NV_ASSERT_OK_OR_RETURN(memmgrInitBaseFbRegions_HAL(pGpu, pMemoryManager)); + + NV_ASSERT_OK_OR_RETURN(memmgrInitFbRegionsHal_HAL(pGpu, pMemoryManager)); + + // + // Build a list of regions sorted by allocation priority + // (highest to lowest). Used for allocations using ObjHeap. + // + memmgrRegenerateFbRegionPriority(pGpu, pMemoryManager); + + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_EXTERNAL_HEAP_CONTROL)) + { + // KMD in WDDM mode + if (pMemoryManager->bMixedDensityFbp) + { + // + // For mixed memory on LDDM platforms, when we are using kernel-managed + // heap (not TCC mode), we want to prefer allocating in slow memory to conserve + // fast memory for applications. + // + pMemoryManager->bPreferSlowRegion = NV_TRUE; + } + } + } + + NV_ASSERT_OK_OR_RETURN(memmgrSetPlatformPmaSupport(pGpu, pMemoryManager)); + + return NV_OK; +} + +/*! + * @brief Register regions to the PMA object + * + * @param pGpu + * @param pMemoryManager + * @param[in] pPma Pointer to the PMA object to register with + * + * @returns On success, returns NV_OK. + * On failure, returns error code. + */ +NV_STATUS +memmgrPmaRegisterRegions_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + Heap *pHeap, + PMA *pPma +) +{ + HEAP_TYPE_INTERNAL heapType = pHeap->heapType; + PMA_REGION_DESCRIPTOR pmaRegion; + NvU32 pmaRegionIdx = 0; + NvU32 i; + PMA_BLACKLIST_ADDRESS *pBlacklistPages = NULL; + NvU32 blRegionCount = 0; + NvU32 blPageIndex; + NvU32 blackListCount; + NvU64 base, size; + NV_STATUS status = NV_OK; + + blackListCount = pHeap->blackListAddresses.count; + base = pHeap->base; + size = pHeap->total; + + // + // If there are blacklisted pages, prepare a staging buffer to pass the + // per-region blacklisted pages to PMA + // + if (blackListCount > 0) + { + pBlacklistPages = portMemAllocNonPaged( + sizeof(PMA_BLACKLIST_ADDRESS) * blackListCount); + if (pBlacklistPages == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Could not allocate memory for blackList!\n"); + status = NV_ERR_NO_MEMORY; + goto _pmaInitFailed; + } + } + + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + // + // Skip all regions that are completely outside the heap boundry + // OR marked as internal(used for internal RM allocations) + // OR marked as reserved(used for console, display, link training buffer etc.) + // + if ((pMemoryManager->Ram.fbRegion[i].limit < base || + pMemoryManager->Ram.fbRegion[i].base >= (base + size)) || + (pMemoryManager->Ram.fbRegion[i].bInternalHeap) || + (pMemoryManager->Ram.fbRegion[i].bRsvdRegion)) + { + continue; + } + + NV_PRINTF(LEVEL_INFO, + "PMA: Register FB region[%d] %llx..%llx EXTERNAL\n", i, + pMemoryManager->Ram.fbRegion[i].base, pMemoryManager->Ram.fbRegion[i].limit); + + pmaRegion.base = pMemoryManager->Ram.fbRegion[i].base; + pmaRegion.limit = pMemoryManager->Ram.fbRegion[i].limit; + + // Check if the base of managed memory is not based at FB region base. + if (pmaRegion.base < base) + { + pmaRegion.base = base; + } + + // check if limit of managed memory is less than FB region limit + if (pmaRegion.limit >= (base + size)) + { + pmaRegion.limit = base + size - 1; + } + + pmaRegion.performance = pMemoryManager->Ram.fbRegion[i].performance; + pmaRegion.bSupportCompressed = pMemoryManager->Ram.fbRegion[i].bSupportCompressed; + pmaRegion.bSupportISO = pMemoryManager->Ram.fbRegion[i].bSupportISO; + pmaRegion.bProtected = pMemoryManager->Ram.fbRegion[i].bProtected; + + // + // Now we know the region, find if it has any blacklisted pages + // TODO: Try to coalesce to unique 64K pages + // + blRegionCount = 0; + if (pBlacklistPages != NULL) + { + for (blPageIndex = 0; blPageIndex < blackListCount; blPageIndex++) + { + if ((pHeap->blackListAddresses.data[blPageIndex].address + != NV2080_CTRL_FB_OFFLINED_PAGES_INVALID_ADDRESS) && + (pHeap->blackListAddresses.data[blPageIndex].address >= pmaRegion.base) && + (pHeap->blackListAddresses.data[blPageIndex].address <= pmaRegion.limit)) + { + // Collect the region's blacklisted pages + pBlacklistPages[blRegionCount].physOffset = pHeap->blackListAddresses.data[blPageIndex].address; + + pBlacklistPages[blRegionCount].bIsDynamic = + ((pHeap->blackListAddresses.data[blPageIndex].type == + NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_MULTIPLE_SBE) || + (pHeap->blackListAddresses.data[blPageIndex].type == + NV2080_CTRL_FB_OFFLINED_PAGES_SOURCE_DPR_DBE)); + + blRegionCount++; + } + } + } + + NV_PRINTF(LEVEL_INFO, + "Register FB region %llx..%llx of size %llx with PMA\n", + pmaRegion.base, pmaRegion.limit, + pmaRegion.limit - pmaRegion.base + 1); + // + // Register the region for PMA management, and note if asynchronous + // scrubbing is enabled. Synchronous scrubbing is done before + // heap/PMA is initialized, but asynchronously scrubbed pages will + // need to be unmarked once they are scrubbed. + // + status = pmaRegisterRegion(pPma, pmaRegionIdx, + memmgrEccScrubInProgress_HAL(pGpu, pMemoryManager), + &pmaRegion, blRegionCount, + ((blRegionCount==0) ? NULL : pBlacklistPages)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to register FB region %llx..%llx with PMA\n", + pmaRegion.base, pmaRegion.limit); + DBG_BREAKPOINT(); + goto _pmaInitFailed; + } + pmaRegionIdx++; + } + + // + // bug #200354346, make sure the RM reserved region(s) are + // scrubbed during the region creation itself. Top Down scrubber, + // skips the RM reserved region(s) because the assumption is, they + // are pre-scrubbed. + // + if (heapType != HEAP_TYPE_PARTITION_LOCAL) + memmgrScrubInternalRegions_HAL(pGpu, pMemoryManager); + +_pmaInitFailed: + portMemFree(pBlacklistPages); + + if ((status == NV_OK) && (pMemoryManager->fbOverrideStartKb != 0)) + { + NvU64 allocSize = NV_ALIGN_UP(((NvU64)pMemoryManager->fbOverrideStartKb << 10), PMA_GRANULARITY); + NvU32 numPages = (NvU32)(allocSize >> PMA_PAGE_SHIFT); + PMA_ALLOCATION_OPTIONS allocOptions = {0}; + + allocOptions.flags = PMA_ALLOCATE_CONTIGUOUS; + allocOptions.flags |= PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE; + allocOptions.physBegin = 0; + allocOptions.physEnd = allocSize - 1; + + // This is intentionally thrown away + NvU64 *pPages = NULL; + pPages = portMemAllocNonPaged(numPages * sizeof(NvU64)); + if (pPages != NULL) + { + // Accommodate the regkey override for FB start + status = pmaAllocatePages(pPma, numPages, _PMA_64KB, &allocOptions, pPages); + portMemFree(pPages); + } + } + + if (status != NV_OK) + { + if (memmgrIsPmaInitialized(pMemoryManager)) + { + if (heapType != HEAP_TYPE_PARTITION_LOCAL) + { + memmgrSetPmaInitialized(pMemoryManager, NV_FALSE); + } + pmaDestroy(pPma); + } + } + + return status; +} + +/*! + * @brief Allocate internal handles for MIG partition memory allocation + */ +NV_STATUS +memmgrAllocMIGMemoryAllocationInternalHandles_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + NV_ASSERT_OR_RETURN(pMemoryManager->MIGMemoryPartitioningInfo.hClient == NV01_NULL_OBJECT, NV_ERR_INVALID_STATE); + NV_ASSERT_OK_OR_RETURN( + rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, + &pMemoryManager->MIGMemoryPartitioningInfo.hClient, + &pMemoryManager->MIGMemoryPartitioningInfo.hDevice, + &pMemoryManager->MIGMemoryPartitioningInfo.hSubdevice)); + + return NV_OK; +} + +/*! + * @brief Free internal handles used to support MIG memory partitioning + */ +void +memmgrFreeMIGMemoryAllocationInternalHandles_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + rmapiutilFreeClientAndDeviceHandles(pRmApi, + &pMemoryManager->MIGMemoryPartitioningInfo.hClient, + &pMemoryManager->MIGMemoryPartitioningInfo.hDevice, + &pMemoryManager->MIGMemoryPartitioningInfo.hSubdevice); +} + +/*! + * @brief Gets free memory (client visible) for all valid GPU instances + */ +void +memmgrGetFreeMemoryForAllMIGGPUInstances_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU64 *pBytes +) +{ + NvU64 val = 0; + Heap *pHeap = NULL; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance; + + *pBytes = 0; + + FOR_EACH_VALID_GPU_INSTANCE(pGpu, pKernelMIGManager, pKernelMIGGPUInstance) + { + NV_ASSERT(pKernelMIGGPUInstance->pMemoryPartitionHeap != NULL); + pHeap = pKernelMIGGPUInstance->pMemoryPartitionHeap; + + if (memmgrIsPmaInitialized(pMemoryManager)) + pmaGetFreeMemory(&pHeap->pmaObject, &val); + else + heapGetFree(pHeap, &val); + + *pBytes += val; + } + FOR_EACH_VALID_GPU_INSTANCE_END(); +} + +void +memmgrGetTopLevelScrubberStatus_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvBool *pbTopLevelScrubberEnabled, + NvBool *pbTopLevelScrubberConstructed +) +{ + NvBool bTopLevelScrubberEnabled = NV_FALSE; + NvBool bTopLevelScrubberConstructed = NV_FALSE; + NvU32 pmaConfigs = PMA_QUERY_SCRUB_ENABLED | PMA_QUERY_SCRUB_VALID; + + if (memmgrIsPmaInitialized(pMemoryManager)) + { + Heap *pHeap = GPU_GET_HEAP(pGpu); + NV_ASSERT_OK(pmaQueryConfigs(&pHeap->pmaObject, &pmaConfigs)); + bTopLevelScrubberEnabled = (pmaConfigs & PMA_QUERY_SCRUB_ENABLED) != 0x0; + bTopLevelScrubberConstructed = (pmaConfigs & PMA_QUERY_SCRUB_VALID) != 0x0; + } + + if (pbTopLevelScrubberEnabled != NULL) + *pbTopLevelScrubberEnabled = bTopLevelScrubberEnabled; + if (pbTopLevelScrubberConstructed != NULL) + *pbTopLevelScrubberConstructed = bTopLevelScrubberConstructed; +} + +/** + * @brief Save pre-MIG top level scrubber constructed status and teardown if constructed + */ +NV_STATUS +memmgrSaveAndDestroyTopLevelScrubber_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + // Save the pre-MIG top-level scrubber status for later + memmgrGetTopLevelScrubberStatus(pGpu, pMemoryManager, NULL, &pMemoryManager->MIGMemoryPartitioningInfo.bNonMIGTopLevelScrubber); + + // Destroy the top level scrubber if it exists + if (pMemoryManager->MIGMemoryPartitioningInfo.bNonMIGTopLevelScrubber) + { + // Delete top level scrubber + NV_ASSERT_OK_OR_RETURN(memmgrScrubHandlePreSchedulingDisable_HAL(pGpu, pMemoryManager)); + } + + return NV_OK; +} + +/** + * @brief Init top level scrubber if previous status was constructed + */ +NV_STATUS +memmgrInitSavedTopLevelScrubber_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + if (!pMemoryManager->MIGMemoryPartitioningInfo.bNonMIGTopLevelScrubber) + return NV_OK; + + NV_ASSERT_OK_OR_RETURN(memmgrScrubHandlePostSchedulingEnable_HAL(pGpu, pMemoryManager)); + + return NV_OK; +} + +/*! + * @brief Return the full address range for the partition assigend for the vGPU. + * + * @param[in] pGpu + * @param[in] pMemoryManager + * @param[out] base reference to the base address of the partition + * @param[out] size reference to the overall size of the partition + */ +static void +_memmgrGetFullMIGAddrRange +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU64 *base, + NvU64 *size +) +{ + NvU32 i; + NvU64 lo, hi; + + *base = 0; + *size = 0; + if (pMemoryManager->Ram.numFBRegions == 0) + { + return; + } + + lo = pMemoryManager->Ram.fbRegion[0].base; + hi = pMemoryManager->Ram.fbRegion[0].limit; + + for (i = 1; i < pMemoryManager->Ram.numFBRegions; i++) + { + if (pMemoryManager->Ram.fbRegion[i].base < lo) + { + lo = pMemoryManager->Ram.fbRegion[i].base; + } + + if (pMemoryManager->Ram.fbRegion[i].limit > hi) + { + hi = pMemoryManager->Ram.fbRegion[i].limit; + } + } + + *base = lo; + *size = hi - lo + 1; +} + +/*! + * @brief Discover MIG partitionable memory range based on PMA status + */ +NV_STATUS +memmgrDiscoverMIGPartitionableMemoryRange_VF +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NV_RANGE *pMemoryRange +) +{ + NvU64 size; + NvU64 base; + + // Set memory information + if (!memmgrIsPmaInitialized(pMemoryManager)) + { + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvU64 freeMem; + NvU64 bytesTotal; + NvU64 offset; + + NV_ASSERT_OK_OR_RETURN(heapInfo(pHeap, &freeMem, &bytesTotal, &base, + &offset, &size)); + + // + // offset is the starting address of biggest empty block whose size is + // returned and we care about the base of largest empty block + // + base = offset; + } + else + { + // + // In the case of vGPU, pmaGetLargestFree only returns the user-visible + // PMA region and not the reserved/internal regions that constitute the + // overall partition size assigned to the vGPU. + // This is misleading as pMemoryManager->partitionableMemoryRange is expected to + // represent the actual partition size. + // + _memmgrGetFullMIGAddrRange(pGpu, pMemoryManager, &base, &size); + } + + *pMemoryRange = rangeMake(base, base + size - 1); + + return NV_OK; +} + +NV_STATUS +memmgrReserveMemoryForFsp_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_ctrl.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_ctrl.c new file mode 100644 index 000000000..1fb67cefc --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_ctrl.c @@ -0,0 +1,783 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/heap.h" +#include "mem_mgr/video_mem.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/gpu_resource_desc.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/subdevice/subdevice_diag.h" +#include "ctrl/ctrl0080/ctrl0080fb.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" +#include "rmapi/mapping_list.h" +#include "platform/chipset/chipset.h" + +#include "class/cl0040.h" /* NV01_MEMORY_LOCAL_USER */ +#include "class/cl003e.h" /* NV01_MEMORY_SYSTEM */ +#include "class/cl84a0.h" /* NV01_MEMORY_LIST_XXX */ +#include "class/cl00b1.h" /* NV01_MEMORY_HW_RESOURCES */ + +// +// memmgrGetDeviceCaps +// +// This routine gets cap bits in unicast. If bCapsInitialized is passed as +// NV_FALSE, the caps will be copied into pFbCaps without OR/ANDing. Otherwise, +// the caps bits for the current GPU will be OR/ANDed together with pFbCaps to +// create a single set of caps that accurately represents the functionality of +// the device. +// +static void +memmgrGetDeviceCaps +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU8 *pFbCaps, + NvBool bCapsInitialized +) +{ + NvU8 tempCaps[NV0080_CTRL_FB_CAPS_TBL_SIZE], temp; + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + portMemSet(tempCaps, 0, NV0080_CTRL_FB_CAPS_TBL_SIZE); + + if (pMemoryManager->bScanoutSysmem) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _SUPPORT_SCANOUT_FROM_SYSMEM); + + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _SUPPORT_RENDER_TO_SYSMEM); + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _BLOCKLINEAR); + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _BLOCKLINEAR_GOBS_512); + + if (pKernelMemorySystem->bGpuCacheEnable) + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _SUPPORT_CACHED_SYSMEM); + + // + // OS owned Heap doesn't need to be scrubber from Pascal+ chips, since HW Scrubber + // takes care of it + // + if (pMemorySystemConfig->bEnabledEccFBPA && + pMemoryManager->bEccInterleavedVidmemScrub && + !IsGP100orBetter(pGpu)) + { + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _OS_OWNS_HEAP_NEED_ECC_SCRUB); + } + + if (memmgrComprSupported(pMemoryManager, ADDR_SYSMEM)) + { + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _SUPPORT_SYSMEM_COMPRESSION); + } + + if (pKernelMemorySystem->bDisableTiledCachingInvalidatesWithEccBug1521641) + { + if (pMemorySystemConfig->bEnabledEccFBPA || IS_VIRTUAL(pGpu)) + { + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _DISABLE_TILED_CACHING_INVALIDATES_WITH_ECC_BUG_1521641); + } + } + + if (memmgrIsScrubOnFreeEnabled(pMemoryManager)) { + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _VIDMEM_ALLOCS_ARE_CLEARED); + } + + if (pMemorySystemConfig->bDisablePostL2Compression) + { + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _DISABLE_PLC_GLOBALLY); + } + + if (pMemorySystemConfig->bDisablePlcForCertainOffsetsBug3046774) + { + RMCTRL_SET_CAP(tempCaps, NV0080_CTRL_FB_CAPS, _PLC_BUG_3046774); + } + + // If we don't have existing caps with which to reconcile, then just return + if (!bCapsInitialized) + { + portMemCopy(pFbCaps, NV0080_CTRL_FB_CAPS_TBL_SIZE, tempCaps, NV0080_CTRL_FB_CAPS_TBL_SIZE); + return; + } + + // factor in this GPUs caps: feature caps use AND, WARS use OR + RMCTRL_AND_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _SUPPORT_RENDER_TO_SYSMEM); + RMCTRL_AND_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _SUPPORT_SCANOUT_FROM_SYSMEM); + RMCTRL_AND_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _BLOCKLINEAR); + RMCTRL_AND_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _BLOCKLINEAR_GOBS_512); + RMCTRL_OR_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _L2_TAG_BUG_632241); + RMCTRL_AND_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _SUPPORT_CACHED_SYSMEM); + RMCTRL_AND_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _SUPPORT_C24_COMPRESSION); + RMCTRL_AND_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _SUPPORT_SYSMEM_COMPRESSION); + RMCTRL_OR_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _NISO_CFG0_BUG_534680); + RMCTRL_OR_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _ISO_FETCH_ALIGN_BUG_561630); + RMCTRL_OR_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _SINGLE_FB_UNIT); + RMCTRL_OR_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _OS_OWNS_HEAP_NEED_ECC_SCRUB); + RMCTRL_OR_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _DISABLE_TILED_CACHING_INVALIDATES_WITH_ECC_BUG_1521641); + RMCTRL_OR_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _DISABLE_MSCG_WITH_VR_BUG_1681803); + RMCTRL_AND_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _VIDMEM_ALLOCS_ARE_CLEARED); + RMCTRL_OR_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _DISABLE_PLC_GLOBALLY); + RMCTRL_OR_CAP(pFbCaps, tempCaps, temp, + NV0080_CTRL_FB_CAPS, _PLC_BUG_3046774); + + return; +} + +static NV_STATUS +memmgrGetFbCaps(OBJGPU *pGpu, NvU8 *pFbCaps) +{ + NV_STATUS rmStatus = NV_OK; + NvBool bCapsInitialized = NV_FALSE; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + if (pMemoryManager == NULL) + { + rmStatus = NV_ERR_INVALID_POINTER; + SLI_LOOP_BREAK; + } + memmgrGetDeviceCaps(pGpu, pMemoryManager, pFbCaps, bCapsInitialized); + bCapsInitialized = NV_TRUE; + } + SLI_LOOP_END + + return rmStatus; +} + +// +// deviceCtrlCmdFbGetCaps +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdFbGetCaps_IMPL +( + Device *pDevice, + NV0080_CTRL_FB_GET_CAPS_PARAMS *pFbCapsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvU8 *pFbCaps = NvP64_VALUE(pFbCapsParams->capsTbl); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // sanity check array size + if (pFbCapsParams->capsTblSize != NV0080_CTRL_FB_CAPS_TBL_SIZE) + { + NV_PRINTF(LEVEL_ERROR, "size mismatch: client 0x%x rm 0x%x\n", + pFbCapsParams->capsTblSize, NV0080_CTRL_FB_CAPS_TBL_SIZE); + return NV_ERR_INVALID_ARGUMENT; + } + + // now accumulate caps for entire device + return memmgrGetFbCaps(pGpu, pFbCaps); +} + +// +// deviceCtrlCmdFbGetCapsV2 +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +deviceCtrlCmdFbGetCapsV2_IMPL +( + Device *pDevice, + NV0080_CTRL_FB_GET_CAPS_V2_PARAMS *pFbCapsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NvU8 *pFbCaps = pFbCapsParams->capsTbl; + NV_STATUS rmStatus; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // now accumulate caps for entire device + rmStatus = memmgrGetFbCaps(pGpu, pFbCaps); + + return rmStatus; +} + +// +// subdeviceCtrlCmdFbGetBar1Offset +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdFbGetBar1Offset_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GET_BAR1_OFFSET_PARAMS *pFbMemParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + Device *pDevice; + NvU64 offset; + RsCpuMapping *pCpuMapping = NULL; + NV_STATUS status; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // Get the device handle + status = deviceGetByInstance(RES_GET_CLIENT(pSubdevice), + gpuGetDeviceInstance(pGpu), + &pDevice); + if (status != NV_OK) + return NV_ERR_INVALID_ARGUMENT; + + pCpuMapping = CliFindMappingInClient(hClient, RES_GET_HANDLE(pDevice), pFbMemParams->cpuVirtAddress); + if (pCpuMapping == NULL) + return NV_ERR_INVALID_ARGUMENT; + + offset = (NvU64)pFbMemParams->cpuVirtAddress - (NvU64)pCpuMapping->pLinearAddress; + pFbMemParams->gpuVirtAddress = pCpuMapping->pPrivate->gpuAddress + offset; + + return NV_OK; +} + +// +// subdeviceCtrlCmdFbIsKind +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdFbIsKind_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_IS_KIND_PARAMS *pIsKindParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvBool rmResult; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // perform appropriate RM operation based on the supported sdk operations + switch (pIsKindParams->operation) + { + + case NV2080_CTRL_FB_IS_KIND_OPERATION_SUPPORTED: + rmResult = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_SUPPORTED, pIsKindParams->kind); + break; + + case NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE: + rmResult = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, pIsKindParams->kind); + break; + case NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_1: + rmResult = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE_1, pIsKindParams->kind); + break; + case NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_2: + rmResult = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE_2, pIsKindParams->kind); + break; + case NV2080_CTRL_FB_IS_KIND_OPERATION_COMPRESSIBLE_4: + rmResult = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE_4, pIsKindParams->kind); + break; + case NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC: + rmResult = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_ZBC, pIsKindParams->kind); + break; + case NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_1: + rmResult = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_ZBC_ALLOWS_1, pIsKindParams->kind); + break; + case NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_2: + rmResult = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_ZBC_ALLOWS_2, pIsKindParams->kind); + break; + case NV2080_CTRL_FB_IS_KIND_OPERATION_ZBC_ALLOWS_4: + rmResult = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_ZBC_ALLOWS_4, pIsKindParams->kind); + break; + default: + NV_ASSERT(0); + status = NV_ERR_INVALID_ARGUMENT; + return status; + } + + // save the result in the params struct and return + pIsKindParams->result = rmResult; + return status; +} + +NV_STATUS +subdeviceCtrlCmdFbGetMemAlignment_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GET_MEM_ALIGNMENT_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvHandle hObject = RES_GET_HANDLE(pSubdevice); + Heap *pHeap = vidmemGetHeap(pGpu, hClient, NV_FALSE); + HEAP_ALLOC_HINT_PARAMS AllocHint = {0}; + NvU32 i; + NvU64 _size, _alignment; + NV_STATUS status = NV_OK; + + _size = pParams->alignSize; + _alignment = pParams->alignMask; + AllocHint.pSize = &_size; + AllocHint.pAlignment = &_alignment; + + // Type, Attributes, Flags + AllocHint.type = pParams->alignType; + AllocHint.pAttr = &pParams->alignAttr; + AllocHint.pAttr2 = &pParams->alignAttr2; + AllocHint.flags = pParams->alignInputFlags; + + // Size Information + AllocHint.pHeight = &pParams->alignHeight; + AllocHint.pWidth = &pParams->alignWidth; + AllocHint.pPitch = &pParams->alignPitch; + AllocHint.pKind = &pParams->alignKind; + AllocHint.alignAdjust = 0x0; + + status = heapAllocHint(pGpu, pHeap, hClient, hObject, &AllocHint); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "heapAllocHint failed\n"); + return status; + } + + //XXX64b torque converter + pParams->alignMask = (NvU32)_alignment; + pParams->alignSize = _size; + + if (NV_FALSE == AllocHint.ignoreBankPlacement) + { + for (i=0; ialignBank[i] = 0x0; + pParams->alignOutputFlags[i] = NVAL_MAP_DIRECTION_UP; + } + else + { + pParams->alignBank[i] = (AllocHint.bankPlacement & MEM_BANK_MASK) + 1; + pParams->alignOutputFlags[i] = (BANK_MEM_GROW_DOWN == (AllocHint.bankPlacement & BANK_MEM_GROW_MASK)) ? NVAL_MAP_DIRECTION_DOWN: NVAL_MAP_DIRECTION_UP; + } + AllocHint.bankPlacement >>= MEM_BANK_DATA_SIZE; + } + + } + else + { + for (i=0; ialignBank[i] = 0x0; + pParams->alignOutputFlags[i] = NVAL_MAP_DIRECTION_UP; + } + } + + // Keep Track of resources that we have allocated + pParams->alignPad = (NvU32)AllocHint.pad;//XXX64b + pParams->alignAdjust = (NvU32)AllocHint.alignAdjust;//XXX64b + + return NV_OK; +} + +#if defined(DEBUG) || defined(DEVELOP) || defined(NV_VERIF_FEATURES) || defined(NV_MODS) +/* + * Currently returns information for **all** clients; + * returns actual sizes allocated from the heap, + * not the sizes requested by the client + */ +NV_STATUS +subdeviceCtrlCmdFbGetClientAllocationInfo_IMPL(Subdevice *pSubdevice, + NV2080_CTRL_CMD_FB_GET_CLIENT_ALLOCATION_INFO_PARAMS *pParams) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + RmClient **ppClient; + NvU64 reservedAllocCount = pParams->allocCount; + NvU64 reservedClientCount = pParams->clientCount; + NvU64 curClientCount = 0; + NvU64 curAllocCount = 0; + + NV2080_CTRL_CMD_FB_ALLOCATION_INFO* pLocalAllocInfo = NULL; + NV2080_CTRL_CMD_FB_CLIENT_INFO* pLocalCliInfo = NULL; + + NV_STATUS status = NV_OK; + + if (reservedAllocCount != 0 && reservedClientCount != 0) + { + if(reservedAllocCount > (NV_U64_MAX/sizeof(NV2080_CTRL_CMD_FB_ALLOCATION_INFO)) || + reservedClientCount > (NV_U64_MAX/sizeof(NV2080_CTRL_CMD_FB_CLIENT_INFO))) + { + return NV_ERR_NO_MEMORY; + } + pLocalAllocInfo = (NV2080_CTRL_CMD_FB_ALLOCATION_INFO*) portMemAllocNonPaged( + sizeof(NV2080_CTRL_CMD_FB_ALLOCATION_INFO) * reservedAllocCount); + NV_ASSERT_OR_RETURN(pLocalAllocInfo != NULL, NV_ERR_NO_MEMORY); + + pLocalCliInfo = (NV2080_CTRL_CMD_FB_CLIENT_INFO*) portMemAllocNonPaged( + sizeof(NV2080_CTRL_CMD_FB_CLIENT_INFO) * reservedClientCount); + + NV_ASSERT_OR_ELSE(pLocalCliInfo != NULL, + portMemFree(pLocalAllocInfo); + return NV_ERR_NO_MEMORY; + ); + } + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + NvBool bFirstPass = NV_TRUE; + RmClient *pClient = *ppClient; + RsClient *pRsClient = staticCast(pClient, RsClient); + + RS_ITERATOR it = clientRefIter(pRsClient, 0, 0, RS_ITERATE_DESCENDANTS, NV_TRUE); + + while (clientRefIterNext(it.pClient, &it)) + { + NvU64 nPages; + NvU64 allocSz; + NvU64 index; + NvU32 memFlags; + NvU32 addrSpaceResult; + NV_ADDRESS_SPACE addrSpace; + + Memory *pMemoryInfo = dynamicCast(it.pResourceRef->pResource, Memory); + + if (pMemoryInfo == NULL) + continue; + + addrSpace = pMemoryInfo->pMemDesc->_addressSpace; + nPages = pMemoryInfo->pMemDesc->PageCount; + allocSz = pMemoryInfo->pMemDesc->ActualSize; + + // Ignore memdescs not in SYSMEM or FBMEM + if ((it.pResourceRef->externalClassId != NV01_MEMORY_LOCAL_USER && + it.pResourceRef->externalClassId != NV01_MEMORY_SYSTEM && + it.pResourceRef->externalClassId != NV01_MEMORY_LIST_SYSTEM && + it.pResourceRef->externalClassId != NV01_MEMORY_LIST_FBMEM && + it.pResourceRef->externalClassId != NV01_MEMORY_LIST_OBJECT && + it.pResourceRef->externalClassId != NV01_MEMORY_HW_RESOURCES) || + pGpu != pMemoryInfo->pGpu) + { + continue; + } + + if (pMemoryInfo->pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + nPages = 1; + } + else + { + allocSz = pMemoryInfo->pMemDesc->_pageSize; + } + + curAllocCount += nPages; + + // Only do client setup on first addded memdesc + if (bFirstPass) + { + curClientCount++; + bFirstPass = NV_FALSE; + + // Check if we have enough space for new client + if (pLocalAllocInfo == NULL) + continue; + + if (curClientCount > reservedClientCount) + goto free_mem; + + pLocalCliInfo[curClientCount-1].handle = pRsClient->hClient; + pLocalCliInfo[curClientCount-1].pid = pClient->ProcID; + pLocalCliInfo[curClientCount-1].subProcessID = pClient->SubProcessID; + portMemCopy(pLocalCliInfo[curClientCount-1].subProcessName, NV_PROC_NAME_MAX_LENGTH, + pClient->SubProcessName, NV_PROC_NAME_MAX_LENGTH); + + } + + // Check if we have enough space for memdesc info + if (pLocalAllocInfo == NULL) + continue; + + if (curAllocCount > reservedAllocCount) + goto free_mem; + + memFlags = 0; + + addrSpaceResult = addrSpace == ADDR_SYSMEM ? + NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE_SYSMEM: + NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE_VIDMEM; + + memFlags = FLD_SET_REF_NUM( + NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_TYPE, + addrSpaceResult, + memFlags); + + memFlags = FLD_SET_REF_NUM( + NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_OWNER, + pMemoryInfo->isMemDescOwner ? 1 : 0, + memFlags); + + memFlags = FLD_SET_REF_NUM( + NV2080_CTRL_CMD_FB_ALLOCATION_FLAGS_SHARED, + pMemoryInfo->RefCount != 1 ? 1 : 0, + memFlags); + + for (index = 0; index < nPages; index++) + { + NvU64 curIndex = curAllocCount + index - nPages; + + pLocalAllocInfo[curIndex].flags = memFlags; + pLocalAllocInfo[curIndex].client = curClientCount - 1; + pLocalAllocInfo[curIndex].beginAddr = (NvU64) pMemoryInfo->pMemDesc->_pteArray[index]; + pLocalAllocInfo[curIndex].size = allocSz; + + } + + continue; + +free_mem: + + portMemFree(pLocalCliInfo); + portMemFree(pLocalAllocInfo); + pLocalAllocInfo = NULL; + pLocalCliInfo = NULL; + + } + } + + if (pLocalAllocInfo != NULL) + { + + if ((portMemExCopyToUser(pLocalAllocInfo, (NvP64)pParams->pAllocInfo, + sizeof(NV2080_CTRL_CMD_FB_ALLOCATION_INFO) * curAllocCount) != NV_OK) || + (portMemExCopyToUser(pLocalCliInfo, (NvP64)pParams->pClientInfo, + sizeof(NV2080_CTRL_CMD_FB_CLIENT_INFO) * curClientCount) != NV_OK)) + { + status = NV_ERR_INVALID_POINTER; + } + + portMemFree(pLocalCliInfo); + portMemFree(pLocalAllocInfo); + } + + pParams->clientCount = curClientCount; + pParams->allocCount = curAllocCount; + return status; +} +#endif // defined(DEBUG) || defined(DEVELOP) || defined(NV_VERIF_FEATURES) || defined(NV_MODS) + +/*! + * @brief Get heap reservation size needed by different module + */ +NV_STATUS +subdeviceCtrlCmdFbGetHeapReservationSize_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_INTERNAL_FB_GET_HEAP_RESERVATION_SIZE_PARAMS *pParams +) +{ + NV_ASSERT_OR_RETURN(0, NV_ERR_NOT_SUPPORTED); +} + +// +// subdeviceCtrlCmdFbGetFBRegionInfo +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdFbGetFBRegionInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *pGFBRIParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU32 regionIndex, i; + NV_STATUS status = NV_OK; + + // Make sure the exported structure can accomodate the necessary informtion + ct_assert(NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES >= NVOS32_NUM_MEM_TYPES); + ct_assert(NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES >= MAX_FB_REGIONS); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + memmgrCalcReservedFbSpace(pGpu, pMemoryManager); + + pGFBRIParams->numFBRegions = 0; + + // + // Copy the internal information into the exported structure + // + if (pMemoryManager->Ram.numFBRegions > 0) + { + // Copy the region information + for (regionIndex = 0; (regionIndex < pMemoryManager->Ram.numFBRegions) && + (pGFBRIParams->numFBRegions < NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES); regionIndex++) + { + // Do not expose reserved or internal-use-only regions to callers + if ((!pMemoryManager->Ram.fbRegion[regionIndex].bRsvdRegion) && (!pMemoryManager->Ram.fbRegion[regionIndex].bInternalHeap)) + { + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].base = pMemoryManager->Ram.fbRegion[regionIndex].base; + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].limit = pMemoryManager->Ram.fbRegion[regionIndex].limit; + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].reserved = pMemoryManager->Ram.fbRegion[regionIndex].rsvdSize; + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].performance = pMemoryManager->Ram.fbRegion[regionIndex].performance; + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].supportCompressed = pMemoryManager->Ram.fbRegion[regionIndex].bSupportCompressed; + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].supportISO = pMemoryManager->Ram.fbRegion[regionIndex].bSupportISO; + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].bProtected = pMemoryManager->Ram.fbRegion[regionIndex].bProtected; + + // Init the blacklist to allow all surface types + for(i = 0; i < NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES; i++) + { + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].blackList[i] = NV_FALSE; + } + + // If the region does not support ISO then blacklist the ISO surface types (display, cursor, video) + if (!pMemoryManager->Ram.fbRegion[regionIndex].bSupportISO) + { + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].blackList[NVOS32_TYPE_PRIMARY] = NV_TRUE; + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].blackList[NVOS32_TYPE_CURSOR] = NV_TRUE; + pGFBRIParams->fbRegion[pGFBRIParams->numFBRegions].blackList[NVOS32_TYPE_VIDEO] = NV_TRUE; + } + + pGFBRIParams->numFBRegions++; + } + } + + regionIndex = pGFBRIParams->numFBRegions; + // Pad out the excess information + for ( ; regionIndex < NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES; regionIndex++) + { + pGFBRIParams->fbRegion[regionIndex].base = 0; + pGFBRIParams->fbRegion[regionIndex].limit = 0; + pGFBRIParams->fbRegion[regionIndex].reserved = 0; + pGFBRIParams->fbRegion[regionIndex].performance = 0; + pGFBRIParams->fbRegion[regionIndex].supportCompressed = NV_FALSE; + pGFBRIParams->fbRegion[regionIndex].supportISO = NV_FALSE; + pGFBRIParams->fbRegion[regionIndex].bProtected = NV_FALSE; + + for (i=0; ifbRegion[regionIndex].blackList[i] = NV_TRUE; + } + } + + if (pGFBRIParams->numFBRegions > 0) + { + // Special region information successfully reported. + status = NV_OK; + } + else + { + // No public region information to export. + status = NV_ERR_NOT_SUPPORTED; + } + } + else + { + // No special region information to export. Operate as usual. + status = NV_ERR_NOT_SUPPORTED; + } + + return status; +} + +/*! + * @brief This command gets the offset into FB that ECC asynchronous scrubber + * has completed up to. As well as if the scrubber has finished its job. + * + * @param[in] pDiagApi + * @param[in,out] pParams + * attribute: The attribute whose support is to be determined. + * value: The new value of the specified attribute to be applied. + * @return Returns NV_STATUS + * NV_OK Success + * + */ +NV_STATUS +diagapiCtrlCmdFbEccScrubDiag_IMPL +( + DiagApi *pDiagApi, + NV208F_CTRL_CMD_FB_ECC_SCRUB_DIAG_PARAMS *pConfig +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + memmgrGetScrubState_HAL(pGpu, pMemoryManager, &(pConfig->fbOffsetCompleted), + &(pConfig->fbEndOffset), &(pConfig->bAsyncScrubDisabled)); + + return NV_OK; +} + +/*! + * @brief This command launches the asynchronous scrubber on the region specified + * by beginBlock and endBlock. beginBlock is the lesser block index + * of the region to be scrubbed. + * + * @param[in] pDiagApi + * @param[in,out] pParams + * attribute: The attribute whose support is to be determined. + * value: The new value of the specified attribute to be applied. + * @return Returns NV_STATUS + * NV_OK Success + * + */ +NV_STATUS +diagapiCtrlCmdFbEccAsyncScrubRegion_IMPL +( + DiagApi *pDiagApi, + NV208F_CTRL_CMD_FB_ECC_ASYNC_SCRUB_REGION_PARAMS *pConfig +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDiagApi); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + memmgrAsyncScrubRegion_HAL(pGpu, pMemoryManager, pConfig->startBlock, pConfig->endBlock); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_gsp_client.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_gsp_client.c new file mode 100644 index 000000000..b6dc6377f --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_gsp_client.c @@ -0,0 +1,232 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/gsp/gsp_static_config.h" +#include "vgpu/vgpu_events.h" +#include +#include "gpu/mem_mgr/fermi_dma.h" +#include "nvoc/prelude.h" + +/*! + * @brief Initialize FB regions from static info obtained from GSP FW. Also, + * initialize region table related RAM fields. + */ +NV_STATUS +memmgrInitBaseFbRegions_FWCLIENT +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *pFbRegionInfoParams; + NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *pFbRegionInfo; + GspStaticConfigInfo *pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + NvU64 bias; + NvU32 i; + + // sanity checks + if (pGSCI == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Missing static info.\n"); + + return NV_ERR_INVALID_STATE; + } + + pFbRegionInfoParams = &pGSCI->fbRegionInfoParams; + if (pFbRegionInfoParams->numFBRegions == 0) + { + NV_PRINTF(LEVEL_ERROR, + "Missing FB region table in GSP Init arguments.\n"); + + return NV_ERR_INVALID_PARAMETER; + } + + if (pFbRegionInfoParams->numFBRegions > MAX_FB_REGIONS) + { + NV_PRINTF(LEVEL_ERROR, + "Static info struct has more FB regions (%u) than FB supports (%u).\n", + pFbRegionInfoParams->numFBRegions, MAX_FB_REGIONS); + + return NV_ERR_INVALID_PARAMETER; + } + + pMemoryManager->Ram.reservedMemSize = 0; + pMemoryManager->Ram.fbUsableMemSize = 0; + + // Copy FB regions from static info structure + for (i = 0; i < pFbRegionInfoParams->numFBRegions; i++) + { + pFbRegionInfo = &pFbRegionInfoParams->fbRegion[i]; + pMemoryManager->Ram.fbRegion[i].base = pFbRegionInfo->base; + pMemoryManager->Ram.fbRegion[i].limit = pFbRegionInfo->limit; + pMemoryManager->Ram.fbRegion[i].bProtected = pFbRegionInfo->bProtected; + pMemoryManager->Ram.fbRegion[i].bInternalHeap = NV_FALSE; + pMemoryManager->Ram.fbRegion[i].performance = pFbRegionInfo->performance; + pMemoryManager->Ram.fbRegion[i].bSupportCompressed = pFbRegionInfo->supportCompressed; + pMemoryManager->Ram.fbRegion[i].bSupportISO = pFbRegionInfo->supportISO; + pMemoryManager->Ram.fbRegion[i].rsvdSize = pFbRegionInfo->reserved; + + if (pFbRegionInfo->reserved) + { + pMemoryManager->Ram.fbRegion[i].bRsvdRegion = NV_TRUE; + pMemoryManager->Ram.reservedMemSize += pMemoryManager->Ram.fbRegion[i].rsvdSize; + } + else + { + pMemoryManager->Ram.fbRegion[i].bRsvdRegion = NV_FALSE; + pMemoryManager->Ram.fbUsableMemSize += (pMemoryManager->Ram.fbRegion[i].limit - + pMemoryManager->Ram.fbRegion[i].base + 1); + } + } + pMemoryManager->Ram.numFBRegions = pFbRegionInfoParams->numFBRegions; + + // Round up to the closest megabyte. + bias = (1 << 20) - 1; + // + // fbTotalMemSizeMb was set to fbUsableMemSize. However, in RM-offload, + // GSP-RM reserves some FB regions for its own usage, thus fbUsableMemSize + // won't represent the exact FB size. Instead, we are taking the FB size + // from the static info provided by GSP-RM. + // + pMemoryManager->Ram.fbTotalMemSizeMb = (pGSCI->fb_length + bias) >> 20; + pMemoryManager->Ram.fbAddrSpaceSizeMb = + (pMemoryManager->Ram.fbRegion[pFbRegionInfoParams->numFBRegions - 1].limit + bias) >> 20; + + NV_ASSERT(pMemoryManager->Ram.fbAddrSpaceSizeMb >= pMemoryManager->Ram.fbTotalMemSizeMb); + + // Dump some stats, region table is dumped in memsysStateLoad + NV_PRINTF(LEVEL_INFO, "FB Memory from Static info:\n"); + NV_PRINTF(LEVEL_INFO, "Reserved Memory=0x%llx, Usable Memory=0x%llx\n", + pMemoryManager->Ram.reservedMemSize, pMemoryManager->Ram.fbUsableMemSize); + NV_PRINTF(LEVEL_INFO, "fbTotalMemSizeMb=0x%llx, fbAddrSpaceSizeMb=0x%llx\n", + pMemoryManager->Ram.fbTotalMemSizeMb, pMemoryManager->Ram.fbAddrSpaceSizeMb); + + return NV_OK; +} + +/*! + * @brief Set up CPU RM reserved memory space for physical carveout. + */ +NV_STATUS +memmgrPreInitReservedMemory_FWCLIENT +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU64 tmpAddr = 0; + + if (KBUS_BAR2_ENABLED(pKernelBus)) + { + // + // This has to be the very *last* thing in reserved memory as it + // will may grow past the 1MB reserved memory window. We cannot + // size it until memsysStateInitLockedHal_GK104. + // + memmgrReserveBar2BackingStore(pGpu, pMemoryManager, &tmpAddr); + } + + NV_ASSERT(NvU64_LO32(tmpAddr) == tmpAddr); + pMemoryManager->rsvdMemorySize = NvU64_LO32(tmpAddr); + + return NV_OK; +} + +/*! + * @brief Calculate the FB reserved memory requirement. + * + * @param[out] rsvdFastSize generic reserved RM memory needed in fast region + * @param[out] rsvdSlowSize generic reserved RM memory needed in slow region + * @param[out] rsvdISOSize ISO-specific reserved RM memory needed + */ +void +memmgrCalcReservedFbSpaceHal_FWCLIENT +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU64 *rsvdFastSize, + NvU64 *rsvdSlowSize, + NvU64 *rsvdISOSize +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvU64 rsvdSizeBytes; + NvU64 smallPagePte = 0; + NvU64 bigPagePte = 0; + + // + // Minimum reserved memory for driver internal memdescAlloc() calls. + // DO NOT increase this hard-coded memory to account for more reserved + // FB memory, instead add individual calculations below. + // + rsvdSizeBytes = 5 * 1024 * 1024; + + // Add in USERD reservation + rsvdSizeBytes += memmgrGetUserdReservedFbSpace_HAL(pGpu, pMemoryManager); + + // Reserve FB for Fault method buffers + rsvdSizeBytes += kfifoCalcTotalSizeOfFaultMethodBuffers_HAL(pGpu, pKernelFifo, NV_TRUE); + + // smallPagePte = FBSize /4k * 8 (Small page PTE for whole FB) + smallPagePte = NV_ROUNDUP((pMemoryManager->Ram.fbUsableMemSize / FERMI_SMALL_PAGESIZE) * 8, RM_PAGE_SIZE); + + // bigPagePte = FBSize /bigPageSize * 8 (Big page PTE for whole FB) + bigPagePte = NV_ROUNDUP((pMemoryManager->Ram.fbUsableMemSize/ (kgmmuGetMaxBigPageSize_HAL(pKernelGmmu))) * 8, + RM_PAGE_SIZE); + + rsvdSizeBytes += smallPagePte; + rsvdSizeBytes += bigPagePte; + + if (gpuIsClientRmAllocatedCtxBufferEnabled(pGpu)) + { + rsvdSizeBytes += memmgrGetMaxContextSize_HAL(pGpu, pMemoryManager); + } + + // Add in NV_REG_STR_RM_INCREASE_RSVD_MEMORY_SIZE_MB if applicable + if (pMemoryManager->rsvdMemorySizeIncrement != 0) + { + // Allow reservation up to half of usable FB size + if (pMemoryManager->rsvdMemorySizeIncrement > (pMemoryManager->Ram.fbUsableMemSize / 2)) + { + pMemoryManager->rsvdMemorySizeIncrement = pMemoryManager->Ram.fbUsableMemSize / 2; + NV_PRINTF(LEVEL_ERROR, + "RM can only increase reserved heap by 0x%llx bytes\n", + pMemoryManager->rsvdMemorySizeIncrement); + } + rsvdSizeBytes += pMemoryManager->rsvdMemorySizeIncrement; + } + + rsvdSizeBytes = NV_ROUNDUP(rsvdSizeBytes, RM_PAGE_SIZE_64K); + + // mixed memory type/density only existed in pre-Pascal chips + *rsvdFastSize = rsvdSizeBytes; + *rsvdSlowSize = 0; + *rsvdISOSize = 0; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_regions.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_regions.c new file mode 100644 index 000000000..698f0d8e4 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_mgr_regions.c @@ -0,0 +1,559 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap.h" + +/*! + * @brief: Find FB region associated with a given FB offset + * + * @param[in] fbOffset Start offset of FB block + * @param[in] fbLimit End offset of FB block + * + * @return FB_REGION_DESCRIPTOR if found + * NULL if not found + */ + +PFB_REGION_DESCRIPTOR +memmgrLookupFbRegionByOffset_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + RmPhysAddr fbOffset, + RmPhysAddr fbLimit +) +{ + NvU32 i = 0; + PFB_REGION_DESCRIPTOR pFbRegion = NULL; + + // Find the region in which the candidate block resides + while (i < pMemoryManager->Ram.numFBRegions) + { + pFbRegion = &pMemoryManager->Ram.fbRegion[i]; + // Does the block resides entirely within this region? If so, then we are done searching. + //++ Too restrictive for some platforms +// if ((fbOffset >= pFbRegion->base) && +// (fbLimit <= pFbRegion->limit)) + // Does the block resides at least partially within this region? If so, then we are done searching. + if ((fbOffset >= pFbRegion->base) && + (fbOffset <= pFbRegion->limit)) + { + if (fbLimit > pFbRegion->limit) + { + NV_PRINTF(LEVEL_WARNING, "STRADDLING REGION!\n"); + } + return(pFbRegion); + } + i++; + } + + return (NULL); +} + +/*! + * Regenerate FB region allocation priority list + */ +void +memmgrRegenerateFbRegionPriority_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NvU32 i, j, temp; + + // Re-build a list of allocatable regions, sorted by preference (highest to lowest) + pMemoryManager->Ram.numFBRegionPriority = 0; + + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + if (!pMemoryManager->Ram.fbRegion[i].bRsvdRegion) + { + pMemoryManager->Ram.fbRegionPriority[pMemoryManager->Ram.numFBRegionPriority] = i; + pMemoryManager->Ram.numFBRegionPriority++; + } + } + NV_ASSERT( pMemoryManager->Ram.numFBRegionPriority > 0 ); + if (pMemoryManager->Ram.numFBRegionPriority > 1) + { + for (i = 0; i < pMemoryManager->Ram.numFBRegionPriority - 1; i++) + { + for (j = i + 1; j < pMemoryManager->Ram.numFBRegionPriority; j++) + { + if (pMemoryManager->Ram.fbRegion[pMemoryManager->Ram.fbRegionPriority[i]].performance < pMemoryManager->Ram.fbRegion[pMemoryManager->Ram.fbRegionPriority[j]].performance) + { + temp = pMemoryManager->Ram.fbRegionPriority[i]; + pMemoryManager->Ram.fbRegionPriority[i] = pMemoryManager->Ram.fbRegionPriority[j]; + pMemoryManager->Ram.fbRegionPriority[j] = temp; + } + } + } + } +} + +/*! + * @brief: Shifts the FB region IDs + * + * @param[in] regionId FB Region ID from which we want to shift the FB regions + * + */ +static void +_memmgrShiftFbRegions +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 regionId +) +{ + NvU32 fbRegion; + + NV_ASSERT(pMemoryManager->Ram.numFBRegions < MAX_FB_REGIONS); + + for (fbRegion=pMemoryManager->Ram.numFBRegions; fbRegion>regionId; fbRegion--) + { + pMemoryManager->Ram.fbRegion[fbRegion] = pMemoryManager->Ram.fbRegion[fbRegion-1]; + } + + pMemoryManager->Ram.numFBRegions++; +} + +/* + * Check if LostOnSuspend FB Regions are grouped together at the end of FB. + */ +static void +_memmgrCheckForLostOnSuspendContiguity +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + NvU32 newRegionId +) +{ + NvU32 i = 0; + NvBool bLostOnSuspend = NV_FALSE; + + // + // Check to make sure all the regions after this new one has bLostOnSuspend set to NV_TRUE + // + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + if ((bLostOnSuspend == NV_FALSE) && (pMemoryManager->Ram.fbRegion[i].bLostOnSuspend == NV_TRUE)) + { + bLostOnSuspend = NV_TRUE; + continue; + } + + if ((bLostOnSuspend == NV_TRUE) && (pMemoryManager->Ram.fbRegion[i].bLostOnSuspend == NV_FALSE)) + { + NV_ASSERT(0); + break; + } + } + return; +} + +/*! + * @brief: Insert a region into FbRegions[] + * + * @param[in] pRegion Descriptor of New region + * + * @return new FbRegion ID + */ +NvU32 +memmgrInsertFbRegion_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + PFB_REGION_DESCRIPTOR pInsertRegion +) +{ + NvU32 insertRegion = 0; + PFB_REGION_DESCRIPTOR pFbRegion; + // + // Consider that we have 4 Fb Regions + // +----------------------------- + + // | FB0 | FB1 | FB2 | FB3 | + // +----------------------------- + + // Find out the which region Insert region belongs to + // + for(insertRegion = 0; insertRegion < pMemoryManager->Ram.numFBRegions;insertRegion++) + { + pFbRegion = &pMemoryManager->Ram.fbRegion[insertRegion]; + + // Find out whether insert region's Base and Limit lies within current FB Region + if(pFbRegion->base <= pInsertRegion->base && + pFbRegion->limit >= pInsertRegion->limit) + { + break; + } + } + + if (insertRegion >= pMemoryManager->Ram.numFBRegions) + { + NV_PRINTF(LEVEL_ERROR, + "New Region does not belong to any existing FB Regions\n"); + NV_ASSERT(0); + } + else + { + NV_PRINTF(LEVEL_INFO, "New Region belongs to FB Region 0x%x\n", + insertRegion); + } + + + pFbRegion = &pMemoryManager->Ram.fbRegion[insertRegion]; + + // + // Consider that we have 4 FB regions and for the sake of example we will + // work on region 2 + // 0 1 2 3 + // +--------------------------+ + // | FB0 | FB1 | FB2 | FB3 | + // +--------------------------+ + // + + // Case 1: Insert Region occupies all space in the FB region + if ( pFbRegion->base == pInsertRegion->base && + pFbRegion->limit == pInsertRegion->limit) + { + // 0 1 2 3 + // +--------------------------+ + // | FB0 | FB1 | NEW | FB3 | + // +--------------------------+ + // + pMemoryManager->Ram.fbRegion[insertRegion] = *pInsertRegion; + } + // + // Case 2: Insert Region's base address is same as FB Region. + // split the Fb Region into 2 parts and assign first part to new region + // in Below case, region 2 will get split + // + else if (pFbRegion->base == pInsertRegion->base && + pFbRegion->limit > pInsertRegion->limit) + { + // + // 0 1 2 3 4 + // +-----------------------------------+ + // | FB0 | FB1 | New | FB2' | FB3 | + // +-----------------------------------+ + // + _memmgrShiftFbRegions(pGpu, pMemoryManager, insertRegion); + pMemoryManager->Ram.fbRegion[insertRegion] = *pInsertRegion; + pMemoryManager->Ram.fbRegion[insertRegion+1].base = pInsertRegion->limit + 1; + } + // + // Case 3: Insert Region's limit is same as FB Region + // split the region into 2 part and assign lower part of FB region to new Region + // + else if (pFbRegion-> base < pInsertRegion->base && + pFbRegion->limit == pInsertRegion->limit) + { + // + // 0 1 2 3 4 + // +-----------------------------------+ + // | FB0 | FB1 | FB2' | NEW | FB3 | + // +-----------------------------------+ + // + _memmgrShiftFbRegions(pGpu, pMemoryManager, insertRegion); + insertRegion++; + pMemoryManager->Ram.fbRegion[insertRegion] = *pInsertRegion; + pMemoryManager->Ram.fbRegion[insertRegion-1].limit = pInsertRegion->base - 1; + } + // + // Case 4: Insert region lies in between of FB region + // split the FB region in 3 parts and assign the middle part to insert region + // + else + { + // + // 0 1 2 3 4 5 + // +-------------------------------------------+ + // | FB0 | FB1 | FB2' | NEW | FB2'' | FB3 | + // +-------------------------------------------+ + // + _memmgrShiftFbRegions(pGpu, pMemoryManager, insertRegion); + insertRegion++; + pMemoryManager->Ram.fbRegion[insertRegion] = *pInsertRegion; + + _memmgrShiftFbRegions(pGpu, pMemoryManager, insertRegion); + pMemoryManager->Ram.fbRegion[insertRegion+1] = pMemoryManager->Ram.fbRegion[insertRegion-1]; + pMemoryManager->Ram.fbRegion[insertRegion+1].base = pInsertRegion->limit + 1; + pMemoryManager->Ram.fbRegion[insertRegion-1].limit = pInsertRegion->base - 1; + } + + _memmgrCheckForLostOnSuspendContiguity(pGpu, pMemoryManager, insertRegion); + + // Invalidate allocation priority list and regenerate it + memmgrRegenerateFbRegionPriority(pGpu, pMemoryManager); + + return insertRegion; +} + +/* + * @brief: Splits the fb region such that PMA regions, + * Rm internal reserve region and unusable regions + * are all separate regions + */ +void +memmgrRegionSetupCommon_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + FB_REGION_DESCRIPTOR rsvdFbRegion; + NvU32 i; + Heap *pHeap = GPU_GET_HEAP(pGpu); + NvU64 heapBase = pHeap->base; + NvU64 heapEnd = pHeap->total - 1; + NvU64 fbTax = memmgrGetFbTaxSize_HAL(pGpu, pMemoryManager); + + // TODO: Remove this check and enable on baremetal as well. + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + // + // Chop off anything that doesnt belong to the Heap object until + // we figure out why vGPU initializes objheap smaller. + // + if (pMemoryManager->Ram.fbRegion[i].base < heapBase) + { + portMemSet(&rsvdFbRegion, 0, sizeof(rsvdFbRegion)); + rsvdFbRegion.limit = heapBase - 1; + rsvdFbRegion.base = pMemoryManager->Ram.fbRegion[i].base; + rsvdFbRegion.rsvdSize = 0; + + // Should never be true for internal heap + rsvdFbRegion.bRsvdRegion = NV_TRUE; + rsvdFbRegion.performance = pMemoryManager->Ram.fbRegion[i].performance; + rsvdFbRegion.bSupportCompressed = pMemoryManager->Ram.fbRegion[i].bSupportCompressed; + rsvdFbRegion.bSupportISO = pMemoryManager->Ram.fbRegion[i].bSupportISO; + rsvdFbRegion.bProtected = pMemoryManager->Ram.fbRegion[i].bProtected; + rsvdFbRegion.bInternalHeap = NV_FALSE; + + i = memmgrInsertFbRegion(pGpu, pMemoryManager, &rsvdFbRegion); + continue; + } + + if (pMemoryManager->Ram.fbRegion[i].limit > heapEnd) + { + portMemSet(&rsvdFbRegion, 0, sizeof(rsvdFbRegion)); + rsvdFbRegion.limit = pMemoryManager->Ram.fbRegion[i].limit; + rsvdFbRegion.base = heapEnd + 1; + rsvdFbRegion.rsvdSize = 0; + + // Should never be true for internal heap + rsvdFbRegion.bRsvdRegion = NV_TRUE; + rsvdFbRegion.performance = pMemoryManager->Ram.fbRegion[i].performance; + rsvdFbRegion.bSupportCompressed = pMemoryManager->Ram.fbRegion[i].bSupportCompressed; + rsvdFbRegion.bSupportISO = pMemoryManager->Ram.fbRegion[i].bSupportISO; + rsvdFbRegion.bProtected = pMemoryManager->Ram.fbRegion[i].bProtected; + rsvdFbRegion.bInternalHeap = NV_FALSE; + + i = memmgrInsertFbRegion(pGpu, pMemoryManager, &rsvdFbRegion); + } + } + + // Create a separate region for FB tax as the last FB region, it's not accessible to VF + if (fbTax) + { + i = pMemoryManager->Ram.numFBRegions - 1; + + portMemSet(&rsvdFbRegion, 0, sizeof(rsvdFbRegion)); + rsvdFbRegion.limit = pMemoryManager->Ram.fbRegion[i].limit; + rsvdFbRegion.base = pMemoryManager->Ram.fbRegion[i].limit - fbTax + 1; + rsvdFbRegion.rsvdSize = 0; + + // Should never be true for internal heap + rsvdFbRegion.bRsvdRegion = NV_TRUE; + rsvdFbRegion.performance = pMemoryManager->Ram.fbRegion[i].performance; + rsvdFbRegion.bSupportCompressed = pMemoryManager->Ram.fbRegion[i].bSupportCompressed; + rsvdFbRegion.bSupportISO = pMemoryManager->Ram.fbRegion[i].bSupportISO; + rsvdFbRegion.bProtected = pMemoryManager->Ram.fbRegion[i].bProtected; + rsvdFbRegion.bInternalHeap = NV_FALSE; + + // Not required to be saved on hibernation, mark it as lost on suspend + rsvdFbRegion.bLostOnSuspend = NV_TRUE; + + i = memmgrInsertFbRegion(pGpu, pMemoryManager, &rsvdFbRegion); + } + } + + // + // We really don't want to calculate this before we have complete + // information from grCalculateGlobalCtxBufferSize, but we need to + // setup the internal region + // + + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + // + // if the region has an RM reserved block and is not already reserved, subdivide it. + // + if ((pMemoryManager->Ram.fbRegion[i].rsvdSize > 0) && + (pMemoryManager->Ram.fbRegion[i].bRsvdRegion == NV_FALSE)) + { + portMemSet(&rsvdFbRegion, 0, sizeof(rsvdFbRegion)); + + NV_ASSERT(pMemoryManager->Ram.fbRegion[i].rsvdSize <= pMemoryManager->Ram.fbRegion[i].limit - pMemoryManager->Ram.fbRegion[i].base + 1); + rsvdFbRegion.limit = pMemoryManager->Ram.fbRegion[i].limit; + rsvdFbRegion.base = rsvdFbRegion.limit - pMemoryManager->Ram.fbRegion[i].rsvdSize + 1; + rsvdFbRegion.rsvdSize = pMemoryManager->Ram.fbRegion[i].rsvdSize; + pMemoryManager->Ram.fbRegion[i].rsvdSize = 0; + // Should never be true for internal heap + rsvdFbRegion.bRsvdRegion = pMemoryManager->Ram.fbRegion[i].bRsvdRegion; + rsvdFbRegion.performance = pMemoryManager->Ram.fbRegion[i].performance; + rsvdFbRegion.bSupportCompressed = pMemoryManager->Ram.fbRegion[i].bSupportCompressed; + rsvdFbRegion.bSupportISO = pMemoryManager->Ram.fbRegion[i].bSupportISO; + rsvdFbRegion.bProtected = pMemoryManager->Ram.fbRegion[i].bProtected; + rsvdFbRegion.bInternalHeap = NV_TRUE; + + i = memmgrInsertFbRegion(pGpu, pMemoryManager, &rsvdFbRegion); + } + } + + // + // If FB size is being overriden, PMA should only own + // memory below overrideHeapMax. + // Also note on Pascal&&+ with FB override which below + // code takes care of: + // We can no longer can rely on RM reserve region being + // in the pre-scrubbed region after FB size is restricted. + // Non-prescrubbed region could overlap with vpr region. + // Until fbstate init completes, RM cannot distinguish VPR + // region, hence we have a WAR to prevent RM internal + // allocations from falling into the VPR region by routing it + // outside the fb override zone essentially to the prescrubbed + // region. See fbHandleSizeOverrides_GP100. Till fbstate init + // completes objheap will force all internal allocations to the + // region outside the 'boot scrub'. To allow for it the region + // should be in objheap - but not in pma. + // Since we mark everything above overrideHeapMax as internal heap + // the WAR will still be valid. + // + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + // + // Scan for the region that is above the overrideheapmax + // that is not already reserved and allow objheap to manage it. + // + if ((pMemoryManager->Ram.fbRegion[i].bRsvdRegion == NV_FALSE) && + (pMemoryManager->overrideHeapMax < pMemoryManager->Ram.fbRegion[i].limit)) + { + // Entire region is above the heap max + if ((pMemoryManager->overrideHeapMax < pMemoryManager->Ram.fbRegion[i].base)) + { + pMemoryManager->Ram.fbRegion[i].bInternalHeap = NV_TRUE; + } + else // straddling create a separate region + { + portMemSet(&rsvdFbRegion, 0, sizeof(rsvdFbRegion)); + rsvdFbRegion.base = pMemoryManager->overrideHeapMax + 1; + rsvdFbRegion.limit = pMemoryManager->Ram.fbRegion[i].limit; + rsvdFbRegion.bInternalHeap = NV_TRUE; + rsvdFbRegion.bRsvdRegion = NV_FALSE; + rsvdFbRegion.performance = pMemoryManager->Ram.fbRegion[i].performance; + rsvdFbRegion.bSupportCompressed = pMemoryManager->Ram.fbRegion[i].bSupportCompressed; + rsvdFbRegion.bSupportISO = pMemoryManager->Ram.fbRegion[i].bSupportISO; + rsvdFbRegion.bProtected = pMemoryManager->Ram.fbRegion[i].bProtected; + i = memmgrInsertFbRegion(pGpu, pMemoryManager, &rsvdFbRegion); + } + } + } +} + +/* + * @brief: Prepares the fb region for PMA such that PMA regions, + * Rm internal reserve region and unusable regions + * are all separate regions. In order to do that RM has + * to calculate the reserved region size a little earlier. + */ +void +memmgrRegionSetupForPma_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + memmgrCalcReservedFbSpace(pGpu, pMemoryManager); + memmgrRegionSetupCommon(pGpu, pMemoryManager); +} + +/*! + * Dump FB region table for debugging purposes. + */ +void +memmgrDumpFbRegions_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + NvU32 i; + + NV_PRINTF(LEVEL_INFO, "FB region table: numFBRegions = %u.\n", + pMemoryManager->Ram.numFBRegions); + + for (i = 0; i < pMemoryManager->Ram.numFBRegions; i++) + { + NV_PRINTF(LEVEL_INFO, "FB region %u - Base=0x%llx, Limit=0x%llx, RsvdSize=0x%llx\n", + i, + pMemoryManager->Ram.fbRegion[i].base, + pMemoryManager->Ram.fbRegion[i].limit, + pMemoryManager->Ram.fbRegion[i].rsvdSize); + NV_PRINTF(LEVEL_INFO, "FB region %u - Reserved=%d, InternalHeap=%d, Compressed=%d, ISO=%d, Protected=%d, Performance=%u\n", + i, + pMemoryManager->Ram.fbRegion[i].bRsvdRegion, + pMemoryManager->Ram.fbRegion[i].bInternalHeap, + pMemoryManager->Ram.fbRegion[i].bSupportCompressed, + pMemoryManager->Ram.fbRegion[i].bSupportISO, + pMemoryManager->Ram.fbRegion[i].bProtected, + pMemoryManager->Ram.fbRegion[i].performance); + } +} + +/*! + * @Clear FB sizing & regions + * + * Variables initialized: + * pMemoryManager->Ram.fbAddrSpaceSizeMb - Size of FB address space + * pMemoryManager->Ram.reservedMemSize - Size of FB reserved region + * pMemoryManager->Ram.numFBRegions - size of FB region list (can be 0) + * pMemoryManager->Ram.numFBRegionPriority - size of priority list + * + * @returns NV_OK + * + */ +void +memmgrClearFbRegions_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager +) +{ + pMemoryManager->Ram.fbAddrSpaceSizeMb = 0; + pMemoryManager->Ram.numFBRegions = 0; + pMemoryManager->Ram.numFBRegionPriority = 0; + pMemoryManager->Ram.reservedMemSize = 0; + pMemoryManager->bLddmReservedMemoryCalculated = NV_FALSE; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_scrub.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_scrub.c new file mode 100644 index 000000000..28a8a61ca --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_scrub.c @@ -0,0 +1,1390 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/ce/kernel_ce.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "gpu/bus/kern_bus.h" +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "objtmr.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "kernel/gpu/intr/intr.h" + +#include "gpu/mem_mgr/mem_scrub.h" +#include "os/os.h" +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "utils/nvprintf.h" +#include "utils/nvassert.h" +#include "nvgputypes.h" +#include "nvtypes.h" +#include "nvstatus.h" +#include "rmapi/rs_utils.h" +#include "core/locks.h" +#include "class/clb0b5.h" // MAXWELL_DMA_COPY_A +#include "class/clc0b5.h" // PASCAL_DMA_COPY_A +#include "class/clc1b5.h" // PASCAL_DMA_COPY_B +#include "class/clc3b5.h" // VOLTA_DMA_COPY_A +#include "class/clc5b5.h" // TURING_DMA_COPY_A +#include "class/clc6b5.h" // AMPERE_DMA_COPY_A +#include "class/clc7b5.h" // AMPERE_DMA_COPY_B + +static NvU64 _scrubCheckProgress(OBJMEMSCRUB *pScrubber); +static void _scrubSetupChannelBufferSizes(OBJCHANNEL *pChannel, NvU32 numCopyBlocks); +static NvU64 _searchScrubList(OBJMEMSCRUB *pScrubber, RmPhysAddr base, NvU64 size); +static void _waitForPayload(OBJMEMSCRUB *pScrubber, RmPhysAddr base, RmPhysAddr end); +static void _scrubAddWorkToList(OBJMEMSCRUB *pScrubber, RmPhysAddr base, NvU64 size, NvU64 newId); +static NvU32 _scrubMemory(OBJCHANNEL *pChannel, RmPhysAddr base, NvU64 size, NV_ADDRESS_SPACE dstAddrSpace, + NvU32 dstCpuCacheAttrib, NvU32 freeToken); +static void _scrubWaitAndSave(OBJMEMSCRUB *pScrubber, PSCRUB_NODE pList, NvLength itemsToSave); +static NvU64 _scrubGetFreeEntries(OBJMEMSCRUB *pScrubber); +static NvU64 _scrubCheckAndSubmit(OBJMEMSCRUB *pScrubber, NvU64 chunkSize, NvU64 *pPages, + NvU64 pageCount, PSCRUB_NODE pList, NvLength pagesToScrubCheck); +static void _scrubOsSchedule(OBJCHANNEL *pChannel); +static void _scrubCopyListItems(OBJMEMSCRUB *pScrubber, PSCRUB_NODE pList, NvLength itemsToSave); + +static NV_STATUS _scrubCheckLocked(OBJMEMSCRUB *pScrubber, PSCRUB_NODE *ppList, NvU64 *pSize); + +/** + * Constructs the memory scrubber object and signals + * RM to create CE channels for submitting scrubbing work + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pHeap Heap object pointer + * + * @returns NV_STATUS on success. + * error, if something fails + */ +NV_STATUS +scrubberConstruct +( + OBJGPU *pGpu, + Heap *pHeap +) +{ + OBJMEMSCRUB *pScrubber; + OBJCHANNEL *pChannel; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + PMA *pPma = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + RmClient *pClient; + + if (pHeap == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + pPma = &pHeap->pmaObject; + + if (pPma->pScrubObj != NULL) + return NV_OK; + + pScrubber = (OBJMEMSCRUB *)portMemAllocNonPaged(sizeof(OBJMEMSCRUB)); + if (pScrubber == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + portMemSet(pScrubber, 0, sizeof(OBJMEMSCRUB)); + + pScrubber->pScrubberMutex = (PORT_MUTEX *)portMemAllocNonPaged(portSyncMutexSize); + if (pScrubber->pScrubberMutex == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto error; + } + + NV_ASSERT_OK_OR_GOTO(status, + portSyncMutexInitialize(pScrubber->pScrubberMutex), freemutex); + + pScrubber->pScrubList = (PSCRUB_NODE) + portMemAllocNonPaged(sizeof(SCRUB_NODE) * MAX_SCRUB_ITEMS); + if (pScrubber->pScrubList == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto deinitmutex; + } + portMemSet(pScrubber->pScrubList, 0, sizeof(SCRUB_NODE) * MAX_SCRUB_ITEMS); + + { + pChannel = (OBJCHANNEL *) portMemAllocNonPaged(sizeof(OBJCHANNEL)); + if (pChannel == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto destroyscrublist; + } + portMemSet(pChannel, 0, sizeof(OBJCHANNEL)); + pScrubber->pChannel = pChannel; + pChannel->type = SCRUBBER_CHANNEL; + pChannel->pGpu = pGpu; + pChannel->bUseVasForCeCopy = memmgrUseVasForCeMemoryOps(pMemoryManager); + memmgrMemUtilsGetCopyEngineClass_HAL(pGpu, pMemoryManager, &pChannel->hTdCopyClass); + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithHandle(pRmApi, NV01_NULL_OBJECT, NV01_NULL_OBJECT, + NV01_NULL_OBJECT, NV01_ROOT, &pChannel->hClient), freeChannelObject); + + pChannel->bClientAllocated = NV_TRUE; + + if (bMIGInUse) + { + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance; + FOR_EACH_VALID_GPU_INSTANCE(pGpu, pKernelMIGManager, pKernelMIGGPUInstance) + { + if (pKernelMIGGPUInstance->pMemoryPartitionHeap == pHeap) + { + pChannel->pKernelMIGGpuInstance = pKernelMIGGPUInstance; + break; + } + } + FOR_EACH_VALID_GPU_INSTANCE_END(); + } + + // set all the unique ID's for the scrubber channel + NV_ASSERT_OK_OR_GOTO(status, + serverutilGetClientUnderLock(pChannel->hClient, &pClient), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + serverGetClientUnderLock(&g_resServ, pChannel->hClient, &pChannel->pRsClient), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + clientSetHandleGenerator(staticCast(pClient, RsClient), 1U, ~0U - 1U), freechannel); + + // set all the unique ID's for the scrubber channel + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->physMemId), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->channelId), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->errNotifierIdVirt), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->errNotifierIdPhys), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->copyObjectId), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->eventId), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->pushBufferId), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->doorbellRegionHandle), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->hUserD), freechannel); + + // + // RM scrubber channel is always created as privileged channels (physical address access) by default + // For MMU Bug: 2739505, we need to switch to use channels in non-privileged mode. + // We also need a (split) VAS for GSP-RM + MIG, to ensure we don't fall back to the device default + // VAS during channel allocation. + // + // TODO: This is temporary, and should be replaced shortly by enabling VAS allocation unilaterally. + // + if (pChannel->bUseVasForCeCopy || + (IS_GSP_CLIENT(pGpu) && bMIGInUse)) + { + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pChannel->hClient, &pChannel->hVASpaceId), freechannel); + } + + // set sizes for CE Channel + _scrubSetupChannelBufferSizes(pChannel, MAX_SCRUB_ITEMS); + + // Allocate Scrubber Channel related objects + NV_ASSERT_OK_OR_GOTO(status, + memmgrMemUtilsChannelInitialize_HAL(pGpu, pMemoryManager, pChannel), freechannel); + + NV_ASSERT_OK_OR_GOTO(status, + memmgrMemUtilsCopyEngineInitialize_HAL(pGpu, pMemoryManager, pChannel), freepartitionref); + + // Initialize semaphore location to 0 + WRITE_SCRUBBER_PB_SEMA(pChannel, 0); + WRITE_SCRUBBER_PAYLOAD_SEMA(pChannel, 0); + NV_ASSERT_OK_OR_GOTO(status, pmaRegMemScrub(pPma, pScrubber), freepartitionref); + } + + return status; + +freepartitionref: + if(bMIGInUse) + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hPartitionRef); + +freechannel: + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->channelId); + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hClient); + +freeChannelObject: + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hClient); + portMemFree(pScrubber->pChannel); + +destroyscrublist: + portMemFree(pScrubber->pScrubList); + +deinitmutex: + portSyncMutexDestroy(pScrubber->pScrubberMutex); + +freemutex: + portMemFree(pScrubber->pScrubberMutex); + pScrubber->pScrubberMutex = NULL; + +error: + portMemFree(pScrubber); + return status; +} + +static NvBool +_isScrubWorkPending( + OBJMEMSCRUB *pScrubber +) +{ + NvBool workPending = NV_FALSE; + + if (pScrubber->bVgpuScrubberEnabled) + { + if (pScrubber->lastSubmittedWorkId != pScrubber->vgpuScrubBuffRing.pScrubBuffRingHeader->lastSWSemaphoreDone) + workPending = NV_TRUE; + } + else + { + if (pScrubber->pChannel->lastSubmittedEntry != READ_SCRUBBER_PB_SEMA(pScrubber->pChannel)) + workPending = NV_TRUE; + } + return workPending; +} + +/** + * Destructs the scrubber + * 1. De-registers the scrubber from the PMA object + * 2. Free the scrubber list and scrubber lock + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pHeap Heap object pointer + * @param[in] pScrubber OBJMEMSCRUB pointer + * + */ +void +scrubberDestruct +( + OBJGPU *pGpu, + Heap *pHeap, + OBJMEMSCRUB *pScrubber +) +{ + OBJCHANNEL *pChannel = NULL; + PMA *pPma = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + PSCRUB_NODE pPmaScrubList = NULL; + NvU64 count = 0; + NV_STATUS status = NV_OK; + + if (pHeap == NULL) + { + return; + } + pPma = &pHeap->pmaObject; + + if (pScrubber == NULL) + return; + + pmaUnregMemScrub(pPma); + portSyncMutexAcquire(pScrubber->pScrubberMutex); + + pChannel = pScrubber->pChannel; + + if (!API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + RMTIMEOUT timeout; + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + + while (_isScrubWorkPending(pScrubber)) + { + // just wait till it finishes + // Since the default RM Timeout is violated by this, added this for FMODEL + if (!IS_FMODEL(pGpu)) + { + if (gpuCheckTimeout(pGpu, &timeout) == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_FATAL, + " Timed out when waiting for the scrub to complete the pending work .\n"); + DBG_BREAKPOINT(); + break; + } + } + } + } + + // check for the completed scrub work items + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, _scrubCheckLocked(pScrubber, &pPmaScrubList, &count)); + + // Make sure all scrubbed pages are returned to PMA + if (count > 0) + pmaClearScrubbedPages(pPma, pPmaScrubList, count); + + portMemFree(pPmaScrubList); + + portMemFree(pScrubber->pScrubList); + { + // Freeing channel first as in MODS case we don't wait for work to complete. Freeing VAS first causes MMU faults + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->channelId); + + if (pChannel->bClientUserd) + { + // scrub userd memory of scrubber channel as it may be allocated from PMA + NvU32 userdSize = 0; + + kfifoGetUserdSizeAlign_HAL(GPU_GET_KERNEL_FIFO(pGpu), &userdSize, NULL); + portMemSet((void*)pChannel->pControlGPFifo, 0, userdSize); + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hUserD); + } + + if (pChannel->bUseVasForCeCopy) + { + // unmap the Identity mapping + status = pRmApi->Unmap(pRmApi, pChannel->hClient, pChannel->deviceId, + pChannel->hFbAliasVA, pChannel->hFbAlias, 0, pChannel->fbAliasVA); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unmapping scrubber 1:1 Mapping, status: %x\n", status); + } + + // free the Alias memory in host + // this will not trigger scrubber + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hFbAliasVA); + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hFbAlias); + } + + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->errNotifierIdPhys); + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->pushBufferId); + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->errNotifierIdVirt); + if (pChannel->hVASpaceId) + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hVASpaceId); + + + pRmApi->Free(pRmApi, pChannel->hClient, pChannel->hClient); + portMemFree(pScrubber->pChannel); + } + + portSyncMutexRelease(pScrubber->pScrubberMutex); + portSyncMutexDestroy(pScrubber->pScrubberMutex); + portMemFree(pScrubber->pScrubberMutex); + portMemFree(pScrubber); +} + +static NV_STATUS +_scrubCheckLocked +( + OBJMEMSCRUB *pScrubber, + PSCRUB_NODE *ppList, + NvU64 *pSize +) +{ + NV_STATUS status = NV_OK; + PSCRUB_NODE pList = NULL; + NvLength itemsToSave = 0; + NvU64 currentCompletedId = 0; + + *ppList = NULL; + *pSize = 0; + currentCompletedId = _scrubCheckProgress(pScrubber); + + itemsToSave = (NvLength)(currentCompletedId - pScrubber->lastSeenIdByClient); + + NV_ASSERT(itemsToSave <= MAX_SCRUB_ITEMS); + + if(itemsToSave == 0) + goto exit; + + pList = (PSCRUB_NODE)portMemAllocNonPaged(itemsToSave * sizeof(SCRUB_NODE)); + if (pList == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto exit; + } + portMemSet(pList, 0, sizeof(SCRUB_NODE) * itemsToSave); + + _scrubCopyListItems(pScrubber, pList, itemsToSave); + +exit: + *ppList = pList; + *pSize = itemsToSave; + return status; +} + +/** + * This function checks for the completed scrub work items, + * and populates the SCRUB_NODE in the array. + * @param[in] pScrubber OBJMEMSCRUB pointer + * @param[out] ppList SCRUB_NODE double pointer + * @param[out] pSize NvU64 pointer + * @returns NV_OK on success, + * NV_ERR_INSUFFICIENT_RESOURCES when the list allocation fails. + */ + +NV_STATUS +scrubCheck +( + OBJMEMSCRUB *pScrubber, + PSCRUB_NODE *ppList, + NvU64 *pSize +) +{ + NV_STATUS status; + portSyncMutexAcquire(pScrubber->pScrubberMutex); + status = _scrubCheckLocked(pScrubber, ppList, pSize); + portSyncMutexRelease(pScrubber->pScrubberMutex); + return status; +} + +/** + * This function submits work to the memory scrubber. + * This function interface is changed to return a list of scrubbed pages to the + * client, since the scrubber work list resources are limited, if the submission + * page count is more than scrubber list resources the completed scrubbed pages + * are saved in the list and the submission progresses. + * + * @param[in] pScrubber OBJMEMSCRUB pointer + * @param[in] chunkSize NvU64 size of each page + * @param[in] pPages NvU64 array of base address + * @param[in] pageCount NvU64 number of pages + * @param[out] ppList SCRUB_NODE double pointer to hand off the list + * @param[out] pSize NvU64 pointer to store the size + * + * @returns NV_OK on success, NV_ERR_GENERIC on HW Failure + */ +NV_STATUS +scrubSubmitPages +( + OBJMEMSCRUB *pScrubber, + NvU64 chunkSize, + NvU64 *pPages, + NvU64 pageCount, + PSCRUB_NODE *ppList, + NvU64 *pSize +) +{ + NvU64 curPagesSaved = 0; + PSCRUB_NODE pScrubList = NULL; + NvLength pagesToScrubCheck = 0; + NvU64 totalSubmitted = 0; + NvU64 numFinished = 0; + NvU64 freeEntriesInList = 0; + NvU64 scrubCount = 0; + NvU64 numPagesToScrub = pageCount; + + portSyncMutexAcquire(pScrubber->pScrubberMutex); + *pSize = 0; + *ppList = pScrubList; + + NV_PRINTF(LEVEL_INFO, "submitting pages, pageCount:%llx\n", pageCount); + + freeEntriesInList = _scrubGetFreeEntries(pScrubber); + if (freeEntriesInList < pageCount) + { + pScrubList = (PSCRUB_NODE) + portMemAllocNonPaged((NvLength)(sizeof(SCRUB_NODE) * (pageCount - freeEntriesInList))); + + while (freeEntriesInList < pageCount) + { + if (pageCount > MAX_SCRUB_ITEMS) + { + pagesToScrubCheck = (NvLength)(MAX_SCRUB_ITEMS - freeEntriesInList); + scrubCount = MAX_SCRUB_ITEMS; + } + else + { + pagesToScrubCheck = (NvLength)(pageCount - freeEntriesInList); + scrubCount = pageCount; + } + + numFinished = _scrubCheckAndSubmit(pScrubber, chunkSize, &pPages[totalSubmitted], + scrubCount, &pScrubList[curPagesSaved], + pagesToScrubCheck); + + pageCount -= numFinished; + curPagesSaved += pagesToScrubCheck; + totalSubmitted += numFinished; + freeEntriesInList = _scrubGetFreeEntries(pScrubber); + } + + *ppList = pScrubList; + *pSize = curPagesSaved; + } + else + { + + totalSubmitted = _scrubCheckAndSubmit(pScrubber, chunkSize, pPages, + pageCount, NULL, + 0); + *ppList = NULL; + *pSize = 0; + } + + portSyncMutexRelease(pScrubber->pScrubberMutex); + if (totalSubmitted == numPagesToScrub) + return NV_OK; + else + { + NV_PRINTF(LEVEL_FATAL, "totalSubmitted :%llx != pageCount: %llx\n", + totalSubmitted, pageCount); + DBG_BREAKPOINT(); + return NV_ERR_GENERIC; + } +} + +/** + * This function waits for the memory scrubber to wait for the scrubbing of + * pages within the range [pagesStart, pagesEnd] for the for the array of pages + * of size pageCount + * + * @param[in] pScrubber OBJMEMSCRUB pointer + * @param[in] chunkSize NvU64 size of each page + * @param[in] pPages NvU64 pointer to store the base address + * @param[in] pageCount NvU64 number of pages in the array + * + * @returns NV_OK + */ + +NV_STATUS +scrubWaitPages +( + OBJMEMSCRUB *pScrubber, + NvU64 chunkSize, + NvU64 *pPages, + NvU32 pageCount +) +{ + + NvU32 iter = 0; + NV_STATUS status = NV_OK; + + portSyncMutexAcquire(pScrubber->pScrubberMutex); + for (iter = 0; iter < pageCount; iter++) + { + _waitForPayload(pScrubber, pPages[iter], (pPages[iter] + chunkSize - 1)); + } + portSyncMutexRelease(pScrubber->pScrubberMutex); + return status; + +} + +/** + * This function waits for the scrubber to finish scrubbing enough items + * to have numPages fully scrubbed and then saves the work items to the list + * passed to the client. + * + * @param[in] pScrubber OBJMEMSCRUB pointer + * @param[in] numPages the number of pages we should wait to be scrubbed + * @param[in] pageSize the page size + * @param[out] ppList SCRUB_NODE double pointer to return the saved list pointer + * @param[out] pSize NvU64 pointer to return the size of saved work. + * + * @returns NV_OK if at least one work is pending in the scrubber list + * NV_ERR_NO_MEMORY when no work is pending in the scrubber list + */ + +NV_STATUS +scrubCheckAndWaitForSize +( + OBJMEMSCRUB *pScrubber, + NvU64 numPages, + NvU64 pageSize, + PSCRUB_NODE *ppList, + NvU64 *pSize +) +{ + PSCRUB_NODE pList = NULL; + NV_STATUS status = NV_OK; + NvLength totalItems = 0; + portSyncMutexAcquire(pScrubber->pScrubberMutex); + totalItems = (NvLength)pScrubber->scrubListSize; + *pSize = 0; + *ppList = pList; + + NvLength startIdx = pScrubber->lastSeenIdByClient; + NvU64 totalScrubbedPages = 0; + NvLength requiredItemsToSave = 0; + + for (; requiredItemsToSave < totalItems && totalScrubbedPages <= numPages; requiredItemsToSave++) { + totalScrubbedPages += (pScrubber->pScrubList[(startIdx + requiredItemsToSave) % MAX_SCRUB_ITEMS].size / pageSize); + } + + if (requiredItemsToSave != 0) { + pList = (PSCRUB_NODE) portMemAllocNonPaged(sizeof(SCRUB_NODE) * requiredItemsToSave); + if (pList == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto exit; + } + + _scrubWaitAndSave(pScrubber, pList, requiredItemsToSave); + } + else { + // since there is no scrub remaining, its upto the user about how to handle that. + status = NV_ERR_NO_MEMORY; + } + + *pSize = (NvU64)requiredItemsToSave; + *ppList = pList; + +exit: + portSyncMutexRelease(pScrubber->pScrubberMutex); + return status; +} + +/** + * helper function to copy elements from scrub list to the temporary list to + * return to the caller. + * @param[in] pScrubber OBJMEMSCRUB pointer + * @param[in] SCRUB_NODE pointer to copy the element + * @param[in] NvLength number of elements to copy + * + */ + +static void +_scrubCopyListItems +( + OBJMEMSCRUB *pScrubber, + PSCRUB_NODE pList, + NvLength itemsToSave +) +{ + NvLength startIdx = pScrubber->lastSeenIdByClient%MAX_SCRUB_ITEMS; + NvLength endIdx = (pScrubber->lastSeenIdByClient + itemsToSave)% + MAX_SCRUB_ITEMS; + + NV_ASSERT(pList != NULL); + NV_ASSERT(itemsToSave <= MAX_SCRUB_ITEMS); + + if (startIdx < endIdx) + { + portMemCopy(pList, + sizeof(SCRUB_NODE) * itemsToSave, + &pScrubber->pScrubList[startIdx], + sizeof(SCRUB_NODE) * itemsToSave); + portMemSet(&pScrubber->pScrubList[startIdx], 0, sizeof(SCRUB_NODE) * itemsToSave); + } + else + { + NvLength itemsFromStartToLastItem = (NvLength)(MAX_SCRUB_ITEMS - startIdx); + + // copy from startIdx to (MAX_SCRUB_ITEMS -1) idx + portMemCopy(pList, + sizeof(SCRUB_NODE) * itemsFromStartToLastItem, + &pScrubber->pScrubList[startIdx], + sizeof(SCRUB_NODE) * itemsFromStartToLastItem); + portMemSet(&pScrubber->pScrubList[startIdx], 0, sizeof(SCRUB_NODE) * itemsFromStartToLastItem); + + // now copy from from 0 to endIdx + portMemCopy(&pList[itemsFromStartToLastItem], + sizeof(SCRUB_NODE) * endIdx, + &pScrubber->pScrubList[0], + sizeof(SCRUB_NODE) * endIdx); + + portMemSet(&pScrubber->pScrubList[0], 0, sizeof(SCRUB_NODE) * endIdx); + } + + pScrubber->lastSeenIdByClient += itemsToSave; + pScrubber->scrubListSize -= itemsToSave; + NV_ASSERT(_scrubGetFreeEntries(pScrubber) <= MAX_SCRUB_ITEMS); +} + +/* This function is used to check and submit work items always within the + * available / maximum scrub list size. + * + * @param[in] pScrubber OBJMEMSCRUB pointer + * @param[in] chunkSize size of each page + * @param[in] pPages Array of base address + * @param[in] pageCount number of pages in the array + * @param[in] pList pointer will store the return check array + * @returns the number of work successfully submitted, else 0 + */ +static NvU64 +_scrubCheckAndSubmit +( + OBJMEMSCRUB *pScrubber, + NvU64 chunkSize, + NvU64 *pPages, + NvU64 pageCount, + PSCRUB_NODE pList, + NvLength pagesToScrubCheck +) +{ + NvU64 iter = 0; + NvU64 newId; + OBJCHANNEL *pChannel; + NV_STATUS status; + + if (pList == NULL && pagesToScrubCheck != 0) + { + NV_PRINTF(LEVEL_ERROR, + "pages need to be saved off, but stash list is invalid\n"); + goto exit; + } + + _scrubWaitAndSave(pScrubber, pList, pagesToScrubCheck); + + for (iter = 0; iter < pageCount; iter++) + { + pChannel = pScrubber->pChannel; + newId = pScrubber->lastSubmittedWorkId + 1; + + NV_PRINTF(LEVEL_INFO, + "Submitting work, Id: %llx, base: %llx, size: %llx\n", + newId, pPages[iter], chunkSize); + + { + status = _scrubMemory(pChannel, pPages[iter], chunkSize, ADDR_FBMEM, NV_MEMORY_DEFAULT, + (NvU32)newId); + } + + if(status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failing because the work dint submit.\n"); + goto exit; + } + _scrubAddWorkToList(pScrubber, pPages[iter], chunkSize, newId); + _scrubCheckProgress(pScrubber); + } + + return iter; +exit: + return 0; + +} + +/** + * helper function to return the free space in scrub list + */ +static NvU64 +_scrubGetFreeEntries +( + OBJMEMSCRUB *pScrubber +) +{ + return MAX_SCRUB_ITEMS - pScrubber->scrubListSize; +} + +/** + * helper function to return the max semaphore id that we need to wait for + * array of scrub works + * + * @returns 0, if no entry in list matched the base& end + */ +static NvU64 +_searchScrubList +( + OBJMEMSCRUB *pScrubber, + RmPhysAddr base, + RmPhysAddr end +) +{ + NvU64 tempLastSeenIdByClient = pScrubber->lastSeenIdByClient; + NvU64 lastSubmittedWorkId = pScrubber->lastSubmittedWorkId; + NvU64 id = 0; + NvU64 maxId = 0; + RmPhysAddr blockStart = 0; + RmPhysAddr blockEnd = 0; + + // + // we need not check for lastSubmittedWorkId, since lastSubmittedWorkId is always one more than + // the lastSubmittedWorkIdx. + // + while (tempLastSeenIdByClient != lastSubmittedWorkId) + { + blockStart = pScrubber->pScrubList[tempLastSeenIdByClient%MAX_SCRUB_ITEMS].base; + blockEnd = pScrubber->pScrubList[tempLastSeenIdByClient%MAX_SCRUB_ITEMS].base + + pScrubber->pScrubList[tempLastSeenIdByClient%MAX_SCRUB_ITEMS].size - 1; + + // Check whether the page ranges overlap + if ( !(blockStart > end || blockEnd < base) ) + { + id = pScrubber->pScrubList[tempLastSeenIdByClient%MAX_SCRUB_ITEMS].id; + maxId = (id > maxId) ? id : maxId; + } + tempLastSeenIdByClient++; + } + return maxId; +} + + +/** + * helper function which waits for a particular submission to complete and + * copies the completed work items from scrub list to temporary list + * + */ + +static void +_scrubWaitAndSave +( + OBJMEMSCRUB *pScrubber, + PSCRUB_NODE pList, + NvLength itemsToSave +) +{ + NvU64 currentCompletedId = 0; + + if (itemsToSave == 0) + return; + + currentCompletedId = _scrubCheckProgress(pScrubber); + + while (currentCompletedId < (pScrubber->lastSeenIdByClient + itemsToSave)) + { + _scrubOsSchedule(pScrubber->pChannel); + currentCompletedId = _scrubCheckProgress(pScrubber); + } + + _scrubCopyListItems(pScrubber, pList, itemsToSave); +} + +/** + * helper function to yield when we wait for the scrubber to complete a work + */ +static void +_scrubOsSchedule(OBJCHANNEL *pChannel) +{ + // + // FIXME: Bug 2463959: objmemscrub is called with the rmDeviceGpuLock in the + // heapFree_IMPL->_stdmemPmaFree->pmaFreePages->scrubSubmitPages path. + // Yielding while holding the rmDeviceGpuLock can lead to deadlock. Instead, + // if the lock is held, service any interrupts on the owned CE to make progress. + // Bug 2527660 is filed to remove this change. + // + // pChannel is null when PMA scrub requests are handled in vGPU plugin. + // In this case vGpu plugin allocates scrubber channel in PF domain so + // above mention deadlock is not present here. + // + if ((pChannel != NULL) && (rmDeviceGpuLockIsOwner(pChannel->pGpu->gpuInstance))) + { + Intr *pIntr = GPU_GET_INTR(pChannel->pGpu); + intrServiceStallSingle_HAL(pChannel->pGpu, pIntr, MC_ENGINE_IDX_CE(pChannel->pKCe->publicID), NV_FALSE); + } + else + { + osSchedule(); + } + +} + +/** + * helper function to find and wait for a specific work to complete + */ +static void +_waitForPayload +( + OBJMEMSCRUB *pScrubber, + RmPhysAddr base, + RmPhysAddr end +) +{ + NvU64 idToWait; + + //We need to look up in the range between [lastSeenIdByClient, lastSubmittedWorkId] + idToWait = _searchScrubList(pScrubber, base, end); + + if (idToWait == 0) + { + return; + } + + // Loop will break out, when the semaphore is equal to payload + while (_scrubCheckProgress(pScrubber) < idToWait) + { + portUtilSpin(); + } +} + +/** + * helper function to add a work to the scrub list + */ +static void +_scrubAddWorkToList +( + OBJMEMSCRUB *pScrubber, + RmPhysAddr base, + NvU64 size, + NvU64 newId +) +{ + //since the Id works from [1,4k] range, the Idx in which it writes in 1 lesser + NvU32 idx = (newId-1) % MAX_SCRUB_ITEMS; + + /* + * since this function is called after making sure that there is space + * available in the list, no check is needed + */ + NV_ASSERT(pScrubber->pScrubList[idx].id == 0); + pScrubber->pScrubList[idx].base = base; + pScrubber->pScrubList[idx].size = size; + pScrubber->pScrubList[idx].id = newId; + + pScrubber->lastSubmittedWorkId = newId; + pScrubber->scrubListSize++; + NV_ASSERT(_scrubGetFreeEntries(pScrubber) <= MAX_SCRUB_ITEMS); +} + +/** helper function to return methods size needed for a single operation in a + channel. currently only scrubber channel is supported. + */ + +static NvU32 +_getOptimalMethodSizePerBlock +( + OBJCHANNEL *pChannel +) +{ + NvU32 methodSizePerBlock = 0; + switch(pChannel->type) + { + case SCRUBBER_CHANNEL: + { + // + // 6 1U methods -- 6 * 8 bytes = 48 bytes + // 1 2U methods -- 1 * 12 bytes = 12 bytes + // 1 3U methods -- 1 * 16 bytes = 16 bytes + // 1 4U methods -- 1 * 20 bytes = 20 bytes + // + methodSizePerBlock = SIZE_OF_ONE_MEMSET_BLOCK; // 0x60 + break; + } + case FAST_SCRUBBER_CHANNEL: + { + // + // 9 1U methods -- 9 * 8 bytes = 72 bytes + // 1 2U methods -- 1 * 12 bytes = 12 bytes + // 1 3U methods -- 1 * 16 bytes = 16 bytes + // 1 4U methods -- 1 * 20 bytes = 20 bytes + // + methodSizePerBlock = SIZE_OF_ONE_MEMSET_BLOCK + 24; // 0x78 + break; + } + // TODO: add the case for COPY CHANNEL. + default: + NV_ASSERT(NV_TRUE); + } + return methodSizePerBlock; +} + +/** + * helper function to set sizes for CE channel used by memory scrubber. + * Channel PB, GPFIFO and channel offsets are set for the numCopyBlock size + */ + +static void +_scrubSetupChannelBufferSizes +( + OBJCHANNEL *pChannel, + NvU32 numCopyBlocks +) +{ + NV_ASSERT(numCopyBlocks != 0); + NvU32 gpFifoSize = NV906F_GP_ENTRY__SIZE * numCopyBlocks; + + // set channel specific sizes + pChannel->methodSizePerBlock = _getOptimalMethodSizePerBlock(pChannel); + NV_ASSERT(pChannel->methodSizePerBlock != 0); + pChannel->channelPbSize = numCopyBlocks * (pChannel->methodSizePerBlock); + NV_ASSERT(pChannel->channelPbSize <= NV_U32_MAX); + pChannel->channelNotifierSize = SCRUBBER_CHANNEL_NOTIFIER_SIZE; + pChannel->channelNumGpFifioEntries = numCopyBlocks; + pChannel->channelSize = pChannel->channelPbSize + gpFifoSize + SCRUBBER_CHANNEL_SEMAPHORE_SIZE; + // Semaphore used to track PB and GPFIFO offset + pChannel->semaOffset = pChannel->channelPbSize + gpFifoSize; + // Semaphore used in work tracking for clients. + pChannel->finishPayloadOffset = pChannel->semaOffset + 4; +} + +/** + * Scrubber uses 64 bit index to track the work submitted. But HW supports + * only 32 bit semaphore. The current completed Id is calculated here, based + * on the lastSeenIdByClient and current HW semaphore value. + * + * @returns Current Completed 64 bit ID + */ +static NvU64 +_scrubCheckProgress +( + OBJMEMSCRUB *pScrubber +) +{ + NvU32 hwCurrentCompletedId; + NvU64 lastSWSemaphoreDone; + OBJCHANNEL *pChannel; + + NV_ASSERT(pScrubber != NULL); + + if (pScrubber->bVgpuScrubberEnabled) + { + hwCurrentCompletedId = pScrubber->vgpuScrubBuffRing.pScrubBuffRingHeader->lastSWSemaphoreDone; + } else + { + NV_ASSERT(pScrubber->pChannel != NULL); + pChannel = pScrubber->pChannel; + hwCurrentCompletedId = READ_SCRUBBER_PAYLOAD_SEMA(pChannel); + } + lastSWSemaphoreDone = pScrubber->lastSWSemaphoreDone; + + if (hwCurrentCompletedId == (NvU32)lastSWSemaphoreDone) + return lastSWSemaphoreDone; + + // check for wrap around case. Increment the upper 32 bits + if (hwCurrentCompletedId < (NvU32)lastSWSemaphoreDone) + { + lastSWSemaphoreDone += 0x100000000ULL; + } + + // update lower 32 bits + lastSWSemaphoreDone &= 0xFFFFFFFF00000000ULL; + lastSWSemaphoreDone |= (NvU64)hwCurrentCompletedId; + + pScrubber->lastSWSemaphoreDone = lastSWSemaphoreDone; + + return lastSWSemaphoreDone; +} + +/** helper function to push destination memory methods + */ +static NvU32 +_memsetPushDstProperties +( + OBJCHANNEL *pChannel, + NV_ADDRESS_SPACE dstAddressSpace, + NvU32 dstCpuCacheAttrib, + NvU32 **ppPtr +) +{ + NvU32 data = 0; + NvU32 *pPtr = *ppPtr; + NvU32 retVal = 0; + + if (dstAddressSpace == ADDR_FBMEM) + data = DRF_DEF(B0B5, _SET_DST_PHYS_MODE, _TARGET, _LOCAL_FB); + else if (dstCpuCacheAttrib == NV_MEMORY_CACHED) + data = DRF_DEF(B0B5, _SET_DST_PHYS_MODE, _TARGET, _COHERENT_SYSMEM); + else if (dstCpuCacheAttrib == NV_MEMORY_UNCACHED) + data = DRF_DEF(B0B5, _SET_DST_PHYS_MODE, _TARGET, _NONCOHERENT_SYSMEM); + + NV_PUSH_INC_1U(RM_SUBCHANNEL, NVB0B5_SET_DST_PHYS_MODE, data); + + if (pChannel->bUseVasForCeCopy) + { + retVal = DRF_DEF(B0B5, _LAUNCH_DMA, _DST_TYPE, _VIRTUAL) | + DRF_DEF(B0B5, _LAUNCH_DMA, _SRC_TYPE, _VIRTUAL); + } + else + { + retVal = DRF_DEF(B0B5, _LAUNCH_DMA, _DST_TYPE, _PHYSICAL) | + DRF_DEF(B0B5, _LAUNCH_DMA, _SRC_TYPE, _PHYSICAL); + } + + *ppPtr = pPtr; + return retVal; +} + + +/** single helper function to fill the push buffer with the methods needed for + memsetting using CE. This function is much more efficient in the sense it + decouples the mem(set/copy) operation from managing channel resources. + TODO: Add support for memcopy here based on channel type. + */ +static NvU32 +_scrubFillPb +( + OBJCHANNEL *pChannel, + RmPhysAddr base, + NvU32 size, + NvU32 payload, + NvBool bPipelined, + NV_ADDRESS_SPACE dstAddressSpace, + NvU32 dstCpuCacheAttrib, + NvBool bInsertFinishpayload, + NvBool bNonStallInterrupt, + NvU32 putIndex +) +{ + NvU32 launchDestType = 0; + NvU32 semaValue = 0; + NvU32 pipelinedValue = 0; + NvU32 *pPtr =(NvU32 *)((NvU8*)pChannel->pbCpuVA + (putIndex * pChannel->methodSizePerBlock)); + NvU32 *pStartPtr = pPtr; + NvU32 data = 0; + NvU64 pSemaAddr = 0; + NvU32 disablePlcKind = 0; + + NV_PRINTF(LEVEL_INFO, "PutIndex: %x, PbOffset: %x\n", putIndex, + putIndex * pChannel->methodSizePerBlock); + + // SET OBJECT + NV_PUSH_INC_1U(RM_SUBCHANNEL, NV906F_SET_OBJECT, pChannel->classEngineID); + // Set Pattern for Memset + NV_PUSH_INC_1U(RM_SUBCHANNEL, NVB0B5_SET_REMAP_CONST_A, MEMSET_PATTERN); + // Set Component Size to 1 + NV_PUSH_INC_1U(RM_SUBCHANNEL, NVB0B5_SET_REMAP_COMPONENTS, + DRF_DEF(B0B5, _SET_REMAP_COMPONENTS, _DST_X, _CONST_A) | + DRF_DEF(B0B5, _SET_REMAP_COMPONENTS, _COMPONENT_SIZE, _ONE) | + DRF_DEF(B0B5, _SET_REMAP_COMPONENTS, _NUM_DST_COMPONENTS, _ONE)); + + launchDestType = _memsetPushDstProperties + (pChannel, dstAddressSpace, dstCpuCacheAttrib, &pPtr); + + semaValue = (bInsertFinishpayload) ? + DRF_DEF(B0B5, _LAUNCH_DMA, _SEMAPHORE_TYPE, _RELEASE_ONE_WORD_SEMAPHORE) : 0; + + if (bPipelined) + pipelinedValue = DRF_DEF(B0B5, _LAUNCH_DMA, _DATA_TRANSFER_TYPE, _PIPELINED); + else + pipelinedValue = DRF_DEF(B0B5, _LAUNCH_DMA, _DATA_TRANSFER_TYPE, _NON_PIPELINED); + + if (pChannel->bUseVasForCeCopy) + { + base = base + pChannel->fbAliasVA - pChannel->startFbOffset; + } + + NV_PUSH_INC_2U(RM_SUBCHANNEL, NVB0B5_OFFSET_OUT_UPPER, + DRF_NUM(B0B5, _OFFSET_OUT_UPPER, _UPPER, NvU64_HI32(base)), + NVB0B5_OFFSET_OUT_LOWER, + DRF_NUM(B0B5, _OFFSET_OUT_LOWER, _VALUE,NvU64_LO32(base))); + + NV_PUSH_INC_1U(RM_SUBCHANNEL, NVB0B5_LINE_LENGTH_IN, size); + + if (semaValue) + { + NV_PUSH_INC_3U(RM_SUBCHANNEL, NVB0B5_SET_SEMAPHORE_A, + DRF_NUM(B0B5, _SET_SEMAPHORE_A, _UPPER, NvU64_HI32(pChannel->pbGpuVA+pChannel->finishPayloadOffset)), + NVB0B5_SET_SEMAPHORE_B, + DRF_NUM(B0B5, _SET_SEMAPHORE_B, _LOWER, NvU64_LO32(pChannel->pbGpuVA+pChannel->finishPayloadOffset)), + NVB0B5_SET_SEMAPHORE_PAYLOAD, + payload); + } + + switch (pChannel->hTdCopyClass) + { + case MAXWELL_DMA_COPY_A: + case PASCAL_DMA_COPY_A: + case PASCAL_DMA_COPY_B: + case VOLTA_DMA_COPY_A: + disablePlcKind = 0; + break; + default: // For anything after Turing, set the kind + disablePlcKind = DRF_DEF(C5B5, _LAUNCH_DMA, _DISABLE_PLC, _TRUE); + break; + } + + NV_PUSH_INC_1U(RM_SUBCHANNEL, NVB0B5_LAUNCH_DMA, + DRF_DEF(B0B5, _LAUNCH_DMA, _SRC_MEMORY_LAYOUT, _PITCH) | + DRF_DEF(B0B5, _LAUNCH_DMA, _DST_MEMORY_LAYOUT, _PITCH) | + DRF_DEF(B0B5, _LAUNCH_DMA, _MULTI_LINE_ENABLE, _FALSE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _REMAP_ENABLE, _TRUE) | + DRF_DEF(B0B5, _LAUNCH_DMA, _FLUSH_ENABLE, _TRUE) | + disablePlcKind | + launchDestType | + pipelinedValue | + semaValue); + + data = DRF_DEF(906F, _SEMAPHORED, _OPERATION, _RELEASE) | + DRF_DEF(906F, _SEMAPHORED, _RELEASE_SIZE, _4BYTE) | + DRF_DEF(906F, _SEMAPHORED, _RELEASE_WFI, _DIS); + + pSemaAddr = (pChannel->pbGpuVA+pChannel->semaOffset); + // + // This should always be at the bottom the push buffer segment, since this + // denotes that HOST has read all the methods needed for this memory operation + // and safely assume that this GPFIFO and PB entry can be reused. + // + NV_PUSH_INC_4U(RM_SUBCHANNEL, NV906F_SEMAPHOREA, + DRF_NUM(906F, _SEMAPHOREA_OFFSET, _UPPER, NvU64_HI32(pSemaAddr)), + NV906F_SEMAPHOREB, + DRF_NUM(906F, _SEMAPHOREB_OFFSET, _LOWER, NvU64_LO32(pSemaAddr) >> 2), + NV906F_SEMAPHOREC, + putIndex, + NV906F_SEMAPHORED, data + ); + // typecasting to calculate the bytes consumed by this iteration. + return (NvU32)((NvU8*)pPtr - (NvU8*)pStartPtr); +} + +/** helper function which waits for a PB & GPFIO entry to be read by HOST. + * After the HOST reads GPFIFO and PB entry, the semaphore will be released. + */ + +static NvU32 +_scrubWaitForFreeEntry +( + OBJCHANNEL *pChannel +) +{ + NvU32 putIndex = 0; + NvU32 getIndex = 0; + + putIndex = (pChannel->lastSubmittedEntry + 1) % MAX_SCRUB_ITEMS; + do + { + getIndex = READ_SCRUBBER_PB_SEMA(pChannel); + NV_PRINTF(LEVEL_INFO, "Get Index: %x, PayloadIndex: %x\n", getIndex, + READ_SCRUBBER_PAYLOAD_SEMA(pChannel)); + if (getIndex != putIndex) + { + break; + } + _scrubOsSchedule(pChannel); + } while(1); + return putIndex; +} + +/** helper function to fill GPFIFO entry with a pushbuffer segment. and kick + off the executiion by HOST. + */ + +static NV_STATUS +_scrubFillGpFifo +( + OBJCHANNEL *pChannel, + NvU32 putIndex, + NvU32 methodsLength +) +{ + NvU32 *pGpEntry; + NvU32 GpEntry0; + NvU32 GpEntry1; + NvU64 pbPutOffset; + OBJGPU *pGpu; + KernelBus *pKernelBus; + + NV_ASSERT(putIndex < pChannel->channelNumGpFifioEntries); + + pbPutOffset = (pChannel->pbGpuVA + (putIndex * pChannel->methodSizePerBlock)); + + + GpEntry0 = + DRF_DEF(906F, _GP_ENTRY0, _NO_CONTEXT_SWITCH, _FALSE) | + DRF_NUM(906F, _GP_ENTRY0, _GET, NvU64_LO32(pbPutOffset) >> 2); + GpEntry1 = + DRF_NUM(906F, _GP_ENTRY1, _GET_HI, NvU64_HI32(pbPutOffset)) | + DRF_NUM(906F, _GP_ENTRY1, _LENGTH, methodsLength >> 2) | + DRF_DEF(906F, _GP_ENTRY1, _LEVEL, _MAIN); + + pGpEntry = (NvU32 *)(((NvU8*)pChannel->pbCpuVA) + pChannel->channelPbSize + + (pChannel->lastSubmittedEntry * NV906F_GP_ENTRY__SIZE)); + + MEM_WR32(&pGpEntry[0], GpEntry0); + MEM_WR32(&pGpEntry[1], GpEntry1); + + // + // need to flush WRC buffer + // + osFlushCpuWriteCombineBuffer(); + + // + // write GP put + // + MEM_WR32(&pChannel->pControlGPFifo->GPPut, putIndex); + osFlushCpuWriteCombineBuffer(); + + // + // On some architectures, if doorbell is mapped via bar0, we need to send + // an extra flush + // + pGpu = pChannel->pGpu; + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + if (kbusFlushPcieForBar0Doorbell_HAL(pGpu, pKernelBus)!= NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Busflush failed in _scrubFillGpFifo\n"); + return NV_ERR_GENERIC; + } + + // + // removing the FIFO Lite Mode handling + // Refer older _ceChannelUpdateGpFifo_GF100 code for implementation + // + if (pChannel->bUseDoorbellRegister) + { + // Use the token from notifier memory for VM migration support. + MEM_WR32(pChannel->pDoorbellRegisterOffset, + MEM_RD32(&(pChannel->pTokenFromNotifier->info32))); + } + return NV_OK; +} + +/** Single function to memset a surface mapped by GPU. This interface supports + both sysmem and vidmem surface, since it uses CE to memset a surface. + The user is notified by releasing semaphore with value "payload" + */ +static NV_STATUS +_scrubMemory +( + OBJCHANNEL *pChannel, + RmPhysAddr base, + NvU64 size, + NV_ADDRESS_SPACE dstAddressSpace, + NvU32 dstCpuCacheAttrib, + NvU32 payload +) +{ + NvBool bFirstIteration = NV_TRUE; + NvBool bNonStallInterrupt = NV_FALSE; + NvU32 tempMemsetSize = 0; // HW supports copy size 32 bits only + NvU32 putIndex = 0; + NV_STATUS status = NV_OK; + NvU32 methodsLength = 0; + + do + { + tempMemsetSize = (NvU32)NV_MIN(size, SCRUB_MAX_BYTES_PER_LINE); + + //poll for free entry + putIndex = _scrubWaitForFreeEntry(pChannel); + NV_PRINTF(LEVEL_INFO, "Put Index: %x\n", putIndex); + + { + NV_PRINTF(LEVEL_INFO, "Fast Scrubber not enabled!\n"); + methodsLength = _scrubFillPb(pChannel, base, tempMemsetSize, payload, + bFirstIteration, dstAddressSpace, + dstCpuCacheAttrib, (tempMemsetSize == size), + bNonStallInterrupt, putIndex); + } + + NV_PRINTF(LEVEL_INFO, "MethodLength: %x\n", methodsLength); + // Add the PB entry in GP FIFO + status = _scrubFillGpFifo(pChannel, putIndex, methodsLength); + + pChannel->lastSubmittedEntry = putIndex; + + base += tempMemsetSize; + size -= tempMemsetSize; + bFirstIteration = NV_FALSE; + } while (size > 0); + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c b/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c new file mode 100644 index 000000000..750712300 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/mem_utils.c @@ -0,0 +1,979 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap_base.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "os/nv_memory_type.h" +#include "core/locks.h" + +#include "gpu/bus/kern_bus.h" + +/* ------------------------ Private functions --------------------------------------- */ + +/*! + * @brief This utility routine helps in determining the appropriate + * memory transfer technique to be used + */ +static TRANSFER_TYPE +memmgrGetMemTransferType +( + MemoryManager *pMemoryManager +) +{ + return TRANSFER_TYPE_PROCESSOR; +} + +/*! + * @brief This function is used for copying data b/w two memory regions + * using the specified memory transfer technique. Both memory regions + * can be in the same aperture or in different apertures. + * + * @param[in] pDstInfo TRANSFER_SURFACE info for destination region + * @param[in] pSrcInfo TRANSFER_SURFACE info for source region + * @param[in] size Size in bytes of the memory transfer + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemCopyWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + TRANSFER_SURFACE *pSrcInfo, + NvU32 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU8 *pSrc; + NvU8 *pDst; + + // Sanitize the input + NV_ASSERT_OR_RETURN(pDstInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSrcInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstInfo->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSrcInfo->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(!memdescDescIsEqual(pDstInfo->pMemDesc, pSrcInfo->pMemDesc), + NV_ERR_INVALID_ARGUMENT); + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + pDst = memdescMapInternal(pGpu, pDstInfo->pMemDesc, TRANSFER_FLAGS_NONE); + NV_ASSERT_OR_RETURN(pDst != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + pSrc = memdescMapInternal(pGpu, pSrcInfo->pMemDesc, TRANSFER_FLAGS_NONE); + if (pSrc == NULL) + { + memdescUnmapInternal(pGpu, pDstInfo->pMemDesc, 0); + NV_ASSERT_OR_RETURN(0, NV_ERR_INSUFFICIENT_RESOURCES); + } + + portMemCopy(pDst + pDstInfo->offset, size, pSrc + pSrcInfo->offset, size); + + memdescUnmapInternal(pGpu, pSrcInfo->pMemDesc, TRANSFER_FLAGS_NONE); + memdescUnmapInternal(pGpu, pDstInfo->pMemDesc, flags); + break; + case TRANSFER_TYPE_GSP_DMA: + NV_PRINTF(LEVEL_INFO, "Add call to GSP DMA task\n"); + break; + case TRANSFER_TYPE_CE: + NV_PRINTF(LEVEL_INFO, "Add call to CE\n"); + break; + } + + return NV_OK; +} + +/*! + * @brief This function is used for setting a memory region to a constant state + * using a specified memory transfer technique + * + * @param[in] pDstInfo TRANSFER_SURFACE info for destination region + * @param[in] value Value to be written to the region + * @param[in] size Size in bytes of the memory to be initialized + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemSetWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + NvU32 value, + NvU32 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU8 *pDst; + + // Sanitize the input + NV_ASSERT_OR_RETURN(pDstInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstInfo->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstInfo->offset + size <= pDstInfo->pMemDesc->Size, NV_ERR_INVALID_ARGUMENT); + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + pDst = memdescMapInternal(pGpu, pDstInfo->pMemDesc, TRANSFER_FLAGS_NONE); + NV_ASSERT_OR_RETURN(pDst != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + + portMemSet(pDst + pDstInfo->offset, value, size); + + memdescUnmapInternal(pGpu, pDstInfo->pMemDesc, flags); + break; + case TRANSFER_TYPE_GSP_DMA: + NV_PRINTF(LEVEL_INFO, "Add call to GSP DMA task\n"); + break; + case TRANSFER_TYPE_CE: + NV_PRINTF(LEVEL_INFO, "Add call to CE\n"); + break; + } + + return NV_OK; +} + +/*! + * @brief This function is used for writing data placed in a caller passed buffer + * to a given memory region using the specified memory transfer technique + * + * @param[in] pDstInfo TRANSFER_SURFACE info for the destination region + * @param[in] pBuf Buffer allocated by caller + * @param[in] size Size in bytes of the buffer + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemWriteWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + void *pBuf, + NvU64 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU8 *pDst; + NvU8 *pMapping = memdescGetKernelMapping(pDstInfo->pMemDesc); + + // Sanitize the input + NV_ASSERT_OR_RETURN(pDstInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstInfo->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBuf != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstInfo->offset + size <= pDstInfo->pMemDesc->Size, NV_ERR_INVALID_ARGUMENT); + + if (pMapping != NULL) + { + portMemCopy(pMapping + pDstInfo->offset, size, pBuf, size); + return NV_OK; + } + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + pDst = memdescMapInternal(pGpu, pDstInfo->pMemDesc, TRANSFER_FLAGS_NONE); + NV_ASSERT_OR_RETURN(pDst != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + + portMemCopy(pDst + pDstInfo->offset, size, pBuf, size); + + memdescUnmapInternal(pGpu, pDstInfo->pMemDesc, flags); + break; + case TRANSFER_TYPE_GSP_DMA: + NV_PRINTF(LEVEL_INFO, "Add call to GSP DMA task\n"); + break; + case TRANSFER_TYPE_CE: + NV_PRINTF(LEVEL_INFO, "Add call to CE\n"); + break; + } + + return NV_OK; +} + +/*! + * @brief This function is used for reading specified number of bytes from + * a source memory region into a caller passed buffer using a specified + * memory transfer technique + * + * @param[in] pSrcInfo TRANSFER_SURFACE info for the source region + * @param[in] pBuf Caller allocated buffer + * @param[in] size Size in bytes of the buffer + * @param[in] transferType Memory transfer technique to be used + * @param[in] flags Flags + */ +static NV_STATUS +memmgrMemReadWithTransferType +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pSrcInfo, + void *pBuf, + NvU64 size, + TRANSFER_TYPE transferType, + NvU32 flags +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU8 *pSrc; + NvU8 *pMapping = memdescGetKernelMapping(pSrcInfo->pMemDesc); + + + // Sanitize the input + NV_ASSERT_OR_RETURN(pSrcInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSrcInfo->pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBuf != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size > 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pSrcInfo->offset + size <= pSrcInfo->pMemDesc->Size, NV_ERR_INVALID_ARGUMENT); + + if (pMapping != NULL) + { + portMemCopy(pBuf, size, pMapping + pSrcInfo->offset, size); + return NV_OK; + } + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + pSrc = memdescMapInternal(pGpu, pSrcInfo->pMemDesc, TRANSFER_FLAGS_NONE); + NV_ASSERT_OR_RETURN(pSrc != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + + portMemCopy(pBuf, size, pSrc + pSrcInfo->offset, size); + + memdescUnmapInternal(pGpu, pSrcInfo->pMemDesc, 0); + break; + case TRANSFER_TYPE_GSP_DMA: + NV_PRINTF(LEVEL_INFO, "Add call to GSP DMA task\n"); + break; + case TRANSFER_TYPE_CE: + NV_PRINTF(LEVEL_INFO, "Add call to CE\n"); + break; + } + + return NV_OK; +} + +/* ------------------------ Public functions --------------------------------------- */ + +NvU64 memUtilsLeastCommonAlignment(NvU64 align1, NvU64 align2) +{ + NvU64 a, b; // For Euclid's algorithm + NvU64 lcm; // Least Common Multiple of align1 and align2 + NvU64 maxAlignment = NV_U64_MAX; + + // WOLOG, make sure align1 >= align2. + // + if (align2 > align1) + { + NvU64 tmp = align1; + align1 = align2; + align2 = tmp; + } + + // If align2 is 0, return min(align1, maxAlignment) + // + if (align2 == 0) + { + return align1 < maxAlignment ? align1 : maxAlignment; + } + + // Use Euclid's algorithm (GCD(a, b) = GCD(b, a % b)) to find the + // GCD of the two alignments, and use the GCD to find the LCM. + // + a = align1; + b = align2; + while (b != 0) + { + NvU64 old_a = a; + a = b; + b = old_a % b; + NV_ASSERT(a > b); // Ensure termination. Should never fail. + } + lcm = align1 * (align2 / a); // May overflow + + // Return min(lcm, maxAlignment). Also return maxAlignment if the + // lcm calculation overflowed, since that means it must have been + // much bigger than maxAlignment. + // + if (lcm > maxAlignment || lcm < align1 || + 0 != (lcm % align1) || 0 != (lcm % align2)) + { + NV_CHECK_FAILED(LEVEL_ERROR, "Alignment limit exceeded"); + return maxAlignment; + } + return lcm; +} + +void memUtilsInitFBAllocInfo +( + NV_MEMORY_ALLOCATION_PARAMS *pAllocParams, + FB_ALLOC_INFO *pFbAllocInfo, + NvHandle hClient, + NvHandle hDevice +) +{ + pFbAllocInfo->pageFormat->type = pAllocParams->type; + pFbAllocInfo->owner = pAllocParams->owner; + pFbAllocInfo->hwResId = 0; + pFbAllocInfo->pad = 0; + pFbAllocInfo->alignPad = 0; + pFbAllocInfo->height = pAllocParams->height; + pFbAllocInfo->width = pAllocParams->width; + pFbAllocInfo->pitch = pAllocParams->pitch; + pFbAllocInfo->size = pAllocParams->size; + pFbAllocInfo->origSize = pAllocParams->size; + pFbAllocInfo->adjustedSize = pAllocParams->size; + pFbAllocInfo->offset = ~0; + pFbAllocInfo->pageFormat->flags = pAllocParams->flags; + pFbAllocInfo->pageFormat->attr = pAllocParams->attr; + pFbAllocInfo->retAttr = pAllocParams->attr; + pFbAllocInfo->pageFormat->attr2 = pAllocParams->attr2; + pFbAllocInfo->retAttr2 = pAllocParams->attr2; + pFbAllocInfo->format = pAllocParams->format; + pFbAllocInfo->comprCovg = pAllocParams->comprCovg; + pFbAllocInfo->zcullCovg = 0; + pFbAllocInfo->ctagOffset = pAllocParams->ctagOffset; + pFbAllocInfo->bIsKernelAlloc = NV_FALSE; + pFbAllocInfo->internalflags = 0; + pFbAllocInfo->hClient = hClient; + pFbAllocInfo->hDevice = hDevice; + + if ((pAllocParams->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT) || + (pAllocParams->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE)) + pFbAllocInfo->align = pAllocParams->alignment; + else + pFbAllocInfo->align = RM_PAGE_SIZE; + + if (pAllocParams->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + pFbAllocInfo->offset = pAllocParams->offset; + pFbAllocInfo->desiredOffset = pAllocParams->offset; + } +} + +/*! + * @brief This function is used for copying data b/w two memory regions + * Both memory regions can be in the same aperture of different apertures + * + * @param[in] pDstInfo TRANSFER_SURFACE info for destination region + * @param[in] pSrcInfo TRANSFER_SURFACE info for source region + * @param[in] size Size in bytes of the memory transfer + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemCopy_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + TRANSFER_SURFACE *pSrcInfo, + NvU32 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + + return memmgrMemCopyWithTransferType(pMemoryManager, pDstInfo, pSrcInfo, + size, transferType, flags); +} + +/*! + * @brief This function is used for setting a memory region to a constant state + * + * @param[in] pDstInfo TRANSFER_SURFACE info for the destination region + * @param[in] value Value to be written to the region + * @param[in] size Size in bytes of the memory to be initialized + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemSet_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + NvU32 value, + NvU32 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + + return memmgrMemSetWithTransferType(pMemoryManager, pDstInfo, value, + size, transferType, flags); +} + +/*! + * @brief This function is used for setting a memory region to a constant state + * + * @param[in] pMemDesc Memory descriptor to end transfer to + * @param[in] value Value to be written to the region + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemDescMemSet_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 value, + NvU32 flags +) +{ + TRANSFER_SURFACE transferSurface = {.offset = 0, .pMemDesc = pMemDesc}; + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + + return memmgrMemSetWithTransferType(pMemoryManager, &transferSurface, value, + (NvU32)memdescGetSize(pMemDesc), + transferType, flags); +} + +/*! + * @brief This function is used for writing data placed in a user buffer + * to a given memory region + * + * @param[in] pDstInfo TRANSFER_SURFACE info for the destination region + * @param[in] pBuf Buffer allocated by caller + * @param[in] size Size in bytes of the buffer + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemWrite_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pDstInfo, + void *pBuf, + NvU64 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + + return memmgrMemWriteWithTransferType(pMemoryManager, pDstInfo, pBuf, + size, transferType, flags); +} + +/*! + * @brief This function is used for reading specified number of bytes from + * a source memory region into a caller passed buffer + * + * @param[in] pSrcInfo TRANSFER_SURFACE info for the source region + * @param[in] pBuf Caller allocated buffer + * @param[in] size Size in bytes of the buffer + * @param[in] flags Flags + */ +NV_STATUS +memmgrMemRead_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pSrcInfo, + void *pBuf, + NvU64 size, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + + return memmgrMemReadWithTransferType(pMemoryManager, pSrcInfo, pBuf, + size, transferType, flags); +} + +/*! + * @brief This helper function can be used to begin transfers + * + * @param[in] pTransferInfo Transfer information + * @param[in] shadowBufSize Size of allocated shadow buffer in case of shadow mapping + * @param[in] flags Flags + */ +NvU8 * +memmgrMemBeginTransfer_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pTransferInfo, + NvU64 shadowBufSize, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + MEMORY_DESCRIPTOR *pMemDesc = pTransferInfo->pMemDesc; + NvU64 offset = pTransferInfo->offset; + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU8 *pPtr = NULL; + NvU64 memSz = 0; + + NV_ASSERT_OR_RETURN(pMemDesc != NULL, NULL); + NV_ASSERT_OR_RETURN((memSz = memdescGetSize(pMemDesc)) >= shadowBufSize, NULL); + NV_ASSERT_OR_RETURN(memdescGetKernelMapping(pMemDesc) == NULL, NULL); + + memSz = shadowBufSize == 0 ? memSz : shadowBufSize; + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + NV_ASSERT_OR_RETURN((pPtr = memdescMapInternal(pGpu, pMemDesc, flags)) != NULL, NULL); + pPtr = &pPtr[offset]; + break; + case TRANSFER_TYPE_GSP_DMA: + case TRANSFER_TYPE_CE: + if (flags & TRANSFER_FLAGS_SHADOW_ALLOC) + { + NV_ASSERT_OR_RETURN((pPtr = portMemAllocNonPaged(memSz)), NULL); + if (flags & TRANSFER_FLAGS_SHADOW_INIT_MEM) + { + NV_ASSERT_OK(memmgrMemRead(pMemoryManager, pTransferInfo, pPtr, memSz, flags)); + } + } + break; + default: + NV_ASSERT(0); + } + memdescSetKernelMapping(pMemDesc, pPtr); + return pPtr; +} + +/*! + * @brief This helper function can be used to end transfers + * + * @param[in] pTransferInfo Transfer information + * @param[in] shadowBufSize Size of allocated shadow buffer in case of shadow mapping + * @param[in] flags Flags + */ +void +memmgrMemEndTransfer_IMPL +( + MemoryManager *pMemoryManager, + TRANSFER_SURFACE *pTransferInfo, + NvU64 shadowBufSize, + NvU32 flags +) +{ + TRANSFER_TYPE transferType = memmgrGetMemTransferType(pMemoryManager); + MEMORY_DESCRIPTOR *pMemDesc = pTransferInfo->pMemDesc; + NvU64 offset = pTransferInfo->offset; + OBJGPU *pGpu = ENG_GET_GPU(pMemoryManager); + NvU64 memSz = 0; + NvU8 *pMapping = memdescGetKernelMapping(pMemDesc); + + NV_ASSERT_OR_RETURN_VOID(pMemDesc != NULL); + NV_ASSERT_OR_RETURN_VOID((memSz = memdescGetSize(pMemDesc)) >= (shadowBufSize + offset) ); + + memSz = shadowBufSize == 0 ? memSz : shadowBufSize; + + memdescSetKernelMapping(pMemDesc, NULL); + + switch (transferType) + { + case TRANSFER_TYPE_PROCESSOR: + memdescUnmapInternal(pGpu, pMemDesc, flags); + return; + case TRANSFER_TYPE_GSP_DMA: + case TRANSFER_TYPE_CE: + if (pMapping != NULL) + { + NV_ASSERT_OK(memmgrMemWrite(pMemoryManager, pTransferInfo, pMapping, memSz, flags)); + portMemFree(pMapping); + } + return; + default: + NV_ASSERT(0); + } + return; +} + +/*! + * @brief Helper function that ends transfers to a memdesc with default offset/size + * + * @param[in] pMemDesc Memory descriptor to end transfer to + * @param[in] flags Flags + */ +void +memmgrMemDescEndTransfer_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + TRANSFER_SURFACE transferSurface = {.offset = 0, .pMemDesc = pMemDesc}; + memmgrMemEndTransfer(pMemoryManager, &transferSurface, memdescGetSize(pMemDesc), flags); +} + +/*! + * @brief Helper function that begins transfers to a memdesc with default offset/size + * + * @param[in] pMemDesc Memory descriptor to begin transfer to + * @param[in] flags Flags + */ +NvU8 * +memmgrMemDescBeginTransfer_IMPL +( + MemoryManager *pMemoryManager, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 flags +) +{ + TRANSFER_SURFACE transferSurface = {.offset = 0, .pMemDesc = pMemDesc}; + return memmgrMemBeginTransfer(pMemoryManager, &transferSurface, memdescGetSize(pMemDesc), flags); +} + +/*! + * @brief This function is used to allocate common resources across memory + * classes, and must be used before memory-specific resource alloc. + * + * @param[in/out] pAllocRequest User-provided alloc request struct + * @param[in/out] pFbAllocInfo Initialized FB_ALLOC_INFO struct to alloc + */ +NV_STATUS +memmgrAllocResources_IMPL +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo +) +{ + NV_STATUS status = NV_OK; + NvU64 alignment = 0; + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + NV_ADDRESS_SPACE addrSpace = memmgrAllocGetAddrSpace(pMemoryManager, pVidHeapAlloc->flags, + pFbAllocInfo->retAttr); + + NvU32 pageSize = 0; + NvBool bAllocedHwRes = NV_FALSE; + + // IRQL TEST: must be running at equivalent of passive-level + IRQL_ASSERT_AND_RETURN(!osIsRaisedIRQL()); + + // + // Check for valid size. + // + if (pVidHeapAlloc->size == 0) + return NV_ERR_INVALID_ARGUMENT; + + // + // Ensure a valid allocation pVidHeapAlloc->type was passed in + // + if (pVidHeapAlloc->type > NVOS32_NUM_MEM_TYPES - 1) + return NV_ERR_INVALID_ARGUMENT; + + if (ADDR_VIRTUAL != addrSpace) + { + // If vidmem not requested explicitly, decide on the physical location. + if (FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _PCI, pFbAllocInfo->retAttr) || + FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _ANY, pFbAllocInfo->retAttr)) + { + if (ADDR_FBMEM == addrSpace) + { + pFbAllocInfo->retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, pFbAllocInfo->retAttr); + } + else + { + pFbAllocInfo->retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _PCI, pFbAllocInfo->retAttr); + } + } + } + else // Virtual + { + // Clear location to ANY since virtual does not associate with location. + pFbAllocInfo->retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _ANY, pFbAllocInfo->retAttr); + } + + // Fetch RM page size + pageSize = memmgrDeterminePageSize(pMemoryManager, pFbAllocInfo->hClient, pFbAllocInfo->size, + pFbAllocInfo->format, pFbAllocInfo->pageFormat->flags, + &pFbAllocInfo->retAttr, &pFbAllocInfo->retAttr2); + if (!IsAMODEL(pGpu) && pageSize == 0) + { + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, "memmgrDeterminePageSize failed, status: 0x%x\n", status); + goto failed; + } + + // Fetch memory alignment + status = memmgrAllocDetermineAlignment_HAL(pGpu, pMemoryManager, &pFbAllocInfo->size, &pFbAllocInfo->align, + pFbAllocInfo->alignPad, pFbAllocInfo->pageFormat->flags, + pFbAllocInfo->retAttr, pFbAllocInfo->retAttr2, 0); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "memmgrAllocDetermineAlignment failed, status: 0x%x\n", status); + goto failed; + } + + // + // Call into HAL to reserve any hardware resources for + // the specified memory pVidHeapAlloc->type. + // If the alignment was changed due to a HW limitation, and the + // flag NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE is set, bad_argument + // will be passed back from the HAL + // + status = memmgrAllocHwResources(pGpu, pMemoryManager, pFbAllocInfo); + bAllocedHwRes = NV_TRUE; + + pVidHeapAlloc->attr = pFbAllocInfo->retAttr; + pVidHeapAlloc->attr2 = pFbAllocInfo->retAttr2; + pVidHeapAlloc->format = pFbAllocInfo->format; + pVidHeapAlloc->comprCovg = pFbAllocInfo->comprCovg; + pVidHeapAlloc->zcullCovg = pFbAllocInfo->zcullCovg; + + if (status != NV_OK) + { + // + // probably means we passed in a bogus pVidHeapAlloc->type or no tiling resources available + // when tiled memory attribute was set to REQUIRED + // + NV_PRINTF(LEVEL_ERROR, "fbAlloc failure!\n"); + goto failed; + } + + // call HAL to set resources + status = memmgrSetAllocParameters_HAL(pGpu, pMemoryManager, pFbAllocInfo); + + if (status != NV_OK) + { + // + // Two possibilties: either some attribute was set to REQUIRED, ran out of resources, + // or unaligned address / size was passed down. Free up memory and fail this call. + // heapFree will fix up heap pointers. + // + goto failed; + } + + // + // for fixed allocation check if the alignment needs to adjusted. + // some hardware units request allocation aligned to smaller than + // page sizes which can be handled through alignPad + // + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + // + // is our desired offset suitably aligned? + // if not adjust alignment using alignPad(offset into a page), the + // allocation is page size aligned as required for swizzling. + // + if (pFbAllocInfo->desiredOffset % (pFbAllocInfo->align + 1)) + { + pFbAllocInfo->alignPad = pFbAllocInfo->desiredOffset % (pFbAllocInfo->align + 1); + pFbAllocInfo->desiredOffset -= pFbAllocInfo->alignPad; + } + } + + // + // Refresh search parameters. + // + pFbAllocInfo->adjustedSize = pFbAllocInfo->size - pFbAllocInfo->alignPad; + pVidHeapAlloc->height = pFbAllocInfo->height; + pVidHeapAlloc->pitch = pFbAllocInfo->pitch; + + // + // The api takes alignment-1 (used to be a mask). + // + alignment = pFbAllocInfo->align + 1; + pVidHeapAlloc->alignment = pFbAllocInfo->align + 1; // convert mask to size + + // + // Allow caller to request host page alignment to make it easier + // to move things around with host os VM subsystem + // + if ((pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FORCE_ALIGN_HOST_PAGE) && + (addrSpace == ADDR_FBMEM)) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 hostPageSize = pSys->cpuInfo.hostPageSize; + + // hostPageSize *should* always be set, but.... + if (hostPageSize == 0) + hostPageSize = RM_PAGE_SIZE; + + alignment = memUtilsLeastCommonAlignment(alignment, hostPageSize); + } + + pVidHeapAlloc->alignment = alignment; + pFbAllocInfo->align = alignment - 1; + + return status; + +failed: + if (bAllocedHwRes) + { + memmgrFreeHwResources(pGpu, pMemoryManager, pFbAllocInfo); + } + + return status; +} + +/*! + * @brief This function is used to create a memory descriptor if needed. + * + * @param[in/out] pAllocRequest User-provided alloc request struct + * @param[in/out] pFbAllocInfo Initialized FB_ALLOC_INFO struct to alloc + * @param[out] ppMemDesc Double pointer to created descriptor + * @param[in] pHeap Heap pointer to store in descriptor + * @param[in] addrSpace Address space identifier + * @param[in] memDescFlags Memory descriptor alloc flags + * @param[out] bAllocedMemDesc NV_TRUE if a descriptor was created + */ +NV_STATUS +memUtilsAllocMemDesc +( + OBJGPU *pGpu, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo, + MEMORY_DESCRIPTOR **ppMemDesc, + Heap *pHeap, + NV_ADDRESS_SPACE addrSpace, + NvBool bContig, + NvBool *bAllocedMemDesc +) +{ + NV_STATUS status = NV_OK; + + // + // Allocate a memory descriptor if needed. We do this after the fbHwAllocResources() call + // so we have the updated size information. Linear callers like memdescAlloc() can live with + // only having access to the requested size in bytes, but block linear callers really do + // need to allocate after fbAlloc() rounding takes place. + // + if (pAllocRequest->pMemDesc == NULL) + { + NvU64 memDescFlags = MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE; + + // + // Allocate a contig vidmem descriptor now; if needed we'll + // allocate a new noncontig memdesc later + // + status = memdescCreate(&pAllocRequest->pMemDesc, pGpu, pFbAllocInfo->adjustedSize, 0, bContig, + addrSpace, NV_MEMORY_UNCACHED, memDescFlags); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "cannot alloc memDesc!\n"); + return status; + } + + *bAllocedMemDesc = NV_TRUE; + } + + *ppMemDesc = pAllocRequest->pMemDesc; + (*ppMemDesc)->pHeap = pHeap; + + // Set attributes tracked by the memdesc + memdescSetPteKind(*ppMemDesc, pFbAllocInfo->format); + memdescSetHwResId(*ppMemDesc, pFbAllocInfo->hwResId); + + return status; +} + +/*! + * Memsets the memory for the given memory descriptor with the given value. + * This function assumes that BAR2 is not yet available. Thus either the BAR0 + * window to FB or a memmap to SYSMEM will be used, depending on the memory + * location. + * + * @param[in] pGpu GPU object pointer + * @param[in] pMemDesc Memory descriptor for the memory to memset + * @param[in] value Value to memset to. + */ +NV_STATUS +memUtilsMemSetNoBAR2(OBJGPU *pGpu, PMEMORY_DESCRIPTOR pMemDesc, NvU8 value) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU8 *pMap = NULL; + void *pPriv = NULL; + RmPhysAddr physAddr; + RmPhysAddr physAddrOrig; + NvU64 sizeInDWord; + NvU32 sizeOfDWord = sizeof(NvU32); + NvU32 bar0Addr; + NvU32 i; + + NV_ASSERT((pMemDesc != NULL) && + (pMemDesc->Size & (sizeOfDWord-1)) == 0); + sizeInDWord = pMemDesc->Size / sizeOfDWord; + + // + // BAR2 is not yet initialized. Thus use either the BAR0 window or + // memmap to initialize the given surface. + // + NV_ASSERT(pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping == NULL); + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_FBMEM: + // + // Set the BAR0 window to encompass the given surface while + // saving off the location to where the BAR0 window was + // previously pointing. + // + physAddr = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + NV_ASSERT((physAddr & (sizeOfDWord-1)) == 0); + + physAddrOrig = kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus); + NV_ASSERT_OK_OR_RETURN( + kbusSetBAR0WindowVidOffset_HAL(pGpu, + pKernelBus, + physAddr & ~0xffffULL)); + bar0Addr = + NvU64_LO32(kbusGetBAR0WindowAddress_HAL(pKernelBus) + + (physAddr - kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus))); + + // + // Iterate and initialize the given surface with BAR0 + // writes. + // + for (i = 0; i < sizeInDWord; i++) + { + GPU_REG_WR32(pGpu, + bar0Addr + (sizeOfDWord * i), + value); + } + + // + // Restore where the BAR0 window was previously pointing + // to. + // + NV_ASSERT_OK_OR_RETURN( + kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, physAddrOrig)); + + break; + + case ADDR_SYSMEM: + // Plain old memmap. + NV_ASSERT_OK_OR_RETURN( + memdescMapOld(pMemDesc, 0, + pMemDesc->Size, + NV_TRUE, // kernel, + NV_PROTECT_READ_WRITE, + (void **)&pMap, + &pPriv)); + portMemSet(pMap, value, NvU64_LO32(pMemDesc->Size)); + memdescUnmapOld(pMemDesc, 1, 0, pMap, pPriv); + break; + + default: + // Should not happen. + NV_ASSERT(0); + break; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/method_notification.c b/src/nvidia/src/kernel/gpu/mem_mgr/method_notification.c new file mode 100644 index 000000000..f0559e2cd --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/method_notification.c @@ -0,0 +1,679 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/********************************* DMA Manager *****************************\ +* * +* Method notifications are handled in this module. DMA report and OS * +* action are dealt with on a per-object basis. * +* * +****************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu/mem_mgr/context_dma.h" +#include "os/os.h" +#include "objtmr.h" +#include "gpu/device/device.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_mgr.h" + +//--------------------------------------------------------------------------- +// +// Notification completion. +// +//--------------------------------------------------------------------------- + +void notifyMethodComplete +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + NvU32 Offset, + NvV32 Data, + NvU32 info32, + NvU16 info16, + NV_STATUS CompletionStatus +) +{ + if (pObject->bNotifyTrigger) + { + pObject->bNotifyTrigger = NV_FALSE; + + // + // Do any OS specified action related to this notification. + // + if (pObject->notifyAction) + { + PEVENTNOTIFICATION pEventNotifications = inotifyGetNotificationList(staticCast(pObject, INotifier)); + notifyEvents(pGpu, pEventNotifications, 0, Offset, Data, CompletionStatus, pObject->notifyAction); + } + } +} + +static NV_STATUS notifyWriteNotifier +( + OBJGPU *pGpu, + ContextDma *NotifyXlate, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvU64 Offset, + NvBool TimeSupplied, + NvU64 Time +) +{ + NV_STATUS status; + NOTIFICATION *pNotifyBuffer; + + // + // Fill in the notification structure. + // + status = ctxdmaGetKernelVA( NotifyXlate, Offset, sizeof(*pNotifyBuffer), + (void **)&(pNotifyBuffer), + gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu))); + + if (status != NV_OK) + { + return status; + } + + notifyFillNOTIFICATION(pGpu, pNotifyBuffer, Info32, Info16, + CompletionStatus, TimeSupplied, Time); + return status; +} + +void +notifyFillNOTIFICATION +( + OBJGPU *pGpu, + NOTIFICATION *pNotifyBuffer, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvBool TimeSupplied, + NvU64 Time +) +{ + INFO16_STATUS infoStatus; + NvU32 TimeHi, TimeLo; + + if (!TimeSupplied) + { + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + tmrGetCurrentTime(pTmr, &Time); + } + + TimeLo = NvU64_LO32(Time); + TimeHi = NvU64_HI32(Time); + + // + // Since notifiers are not read by the GPU, and only the CPU, these + // writes to not need to be flushed. A subsequent CPU read to this data + // will be serialized with these writes + // + MEM_WR32(&pNotifyBuffer->OtherInfo32, Info32); + MEM_WR32(&pNotifyBuffer->TimeHi, TimeHi); + MEM_WR32(&pNotifyBuffer->TimeLo, TimeLo); + + // + // Combine into 32b write to avoid issues in environments that don't + // support 16b writes. For example, when routing all memory requests + // through IFB we are limited to 32b read/writes only. + // + infoStatus.Info16Status_16.Status = (NvV16) CompletionStatus; + infoStatus.Info16Status_16.OtherInfo16 = Info16; + MEM_WR32(&pNotifyBuffer->Info16Status.Info16Status_32, + infoStatus.Info16Status_32); +} + +void +notifyFillNvNotification +( + OBJGPU *pGpu, + NvNotification *pNotification, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvBool TimeSupplied, + NvU64 Time +) +{ + NvU32 TimeHi, TimeLo; + + if (!TimeSupplied) + { + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + tmrGetCurrentTime(pTmr, &Time); + } + + TimeLo = NvU64_LO32(Time); + TimeHi = NvU64_HI32(Time); + + // + // Since notifiers are not read by the GPU, and only the CPU, these + // writes do not need to be flushed. A subsequent CPU read to this data + // will be serialized with these writes + // + MEM_WR16(&pNotification->info16, Info16); + MEM_WR32(&pNotification->info32, Info32); + MEM_WR32(&pNotification->timeStamp.nanoseconds[0], TimeHi); + MEM_WR32(&pNotification->timeStamp.nanoseconds[1], TimeLo); + MEM_WR16(&pNotification->status, CompletionStatus); +} + +NV_STATUS notifyFillNotifier +( + OBJGPU *pGpu, + ContextDma *NotifyXlate, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus +) +{ + return notifyWriteNotifier(pGpu, NotifyXlate, Info32, + Info16, CompletionStatus, + 0, NV_FALSE, 0); +} + +NV_STATUS notifyFillNotifierOffsetTimestamp +( + OBJGPU *pGpu, + ContextDma *NotifyXlate, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvU64 Offset, + NvU64 Time +) +{ + return notifyWriteNotifier(pGpu, NotifyXlate, Info32, + Info16, CompletionStatus, + Offset, + NV_TRUE, Time); +} + +NV_STATUS notifyFillNotifierOffset +( + OBJGPU *pGpu, + ContextDma *NotifyXlate, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvU64 Offset +) +{ + return notifyWriteNotifier(pGpu, NotifyXlate, Info32, + Info16, CompletionStatus, + Offset, + NV_FALSE, 0); +} + +NV_STATUS notifyFillNotifierArrayTimestamp +( + OBJGPU *pGpu, + ContextDma *NotifyXlate, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvU32 Index, + NvU64 Time +) +{ + return notifyWriteNotifier(pGpu, NotifyXlate, Info32, + Info16, CompletionStatus, + Index * sizeof(NOTIFICATION), + NV_TRUE, Time); +} + +NV_STATUS notifyFillNotifierArray +( + OBJGPU *pGpu, + ContextDma *NotifyXlate, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvU32 Index +) +{ + return notifyWriteNotifier(pGpu, NotifyXlate, Info32, + Info16, CompletionStatus, + Index * sizeof(NOTIFICATION), + NV_FALSE, 0); +} + +/* + * @brief fills notifier at GPU VA base + index with given info, + * time and completion status + * + * Looks up dma memory mapping with given GPU VA and performs writes. + * Notifier write is skipped when CPU kernel mapping is missing. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] hClient NvU32 client handle + * @param[in] hMemoryCtx Handle of a memory object to which NotifyGPUVABase belongs + * @param[in] NotifyGPUVABase 64b GPU VA base address of semaphore + * @param[in] Info32 32b info part + * @param[in] Info16 16b info part + * @param[in] CompletionStatus NV_STATUS value to write to notifier status + * @param[in] Index index of notifier in notifier array + * @param[in] Time 64b time stamp + * + * @return NV_ERR_INVALID_ADDRESS on wrong GPU VA address or out of bound index, + * NV_OK on success + * + */ +NV_STATUS notifyFillNotifierGPUVATimestamp +( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hMemoryCtx, + NvU64 NotifyGPUVABase, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvU32 Index, + NvU64 Time +) +{ + NvU64 notifyGPUVA; + NvBool bFound; + CLI_DMA_MAPPING_INFO *pDmaMappingInfo; + NvU64 offset; + NvU32 subdeviceInstance; + NOTIFICATION *pNotifier; + RsClient *pClient; + Device *pDevice; + NV_STATUS status; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return status; + + status = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + if (status != NV_OK) + return status; + + notifyGPUVA = NotifyGPUVABase + (Index * sizeof(NOTIFICATION)); + + // Memory context is required for mapping lookup + bFound = CliGetDmaMappingInfo(hClient, + RES_GET_HANDLE(pDevice), + hMemoryCtx, + notifyGPUVA, + gpumgrGetDeviceGpuMask(pGpu->deviceInstance), + &pDmaMappingInfo); + if (!bFound) + { + NV_PRINTF(LEVEL_ERROR, "Can't find mapping; notifier not written\n"); + return NV_ERR_INVALID_ADDRESS; + } + + offset = notifyGPUVA - pDmaMappingInfo->DmaOffset; + if ((offset + sizeof(NOTIFICATION)) > pDmaMappingInfo->pMemDesc->Size) + { + NV_PRINTF(LEVEL_ERROR, + "offset+size doesn't fit into mapping; notifier not written\n"); + return NV_ERR_INVALID_ADDRESS; + } + + // + // Set idx to default position in the dma mapped address array + // + subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu)); + + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE) + + if (IsSLIEnabled(pGpu) && + (memdescGetAddressSpace(pDmaMappingInfo->pMemDesc) == ADDR_FBMEM)) + { + // + // If SLI and it is vidmem, replace idx with appropriate SLI index + // otherwise, this just stays the default value. + // + subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + } + + if (!pDmaMappingInfo->KernelVAddr[subdeviceInstance]) + { + NV_PRINTF(LEVEL_ERROR, "KernelVAddr==NULL; notifier not written\n"); + } + else + { + pNotifier = (PNOTIFICATION)((NvU8*)pDmaMappingInfo->KernelVAddr[subdeviceInstance] + offset); + + notifyFillNOTIFICATION(pGpu, pNotifier, Info32, Info16, + CompletionStatus, NV_TRUE, Time); + } + + SLI_LOOP_END + + return NV_OK; +} + +/* + * @brief fills notifier at GPU VA base + index with current time, given info, + * and completion status + * + * Use this function to fill notifier through BAR1 when you have GPU VA. + * + * Wrapper for notifyFillNotifierGPUVATimestamp. + * Gets current time and routes data to notifyFillNotifierGPUVATimestamp + * + * @param[in] pGpu OBJGPU pointer + * @param[in] hClient NvU32 client handle + * @param[in] hMemoryCtx Handle of a memory object to which NotifyGPUVABase belongs + * @param[in] NotifyGPUVABase 64b GPU VA base address of semaphore + * @param[in] Info32 32b info part + * @param[in] Info16 16b info part + * @param[in] CompletionStatus NV_STATUS value to write to notifier status + * @param[in] Index index of notifier in notifier array + * @param[in] Time 64b time stamp + * + * @return status of notifyFillNotifierGPUVATimestamp + */ +NV_STATUS notifyFillNotifierGPUVA +( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hMemoryCtx, + NvU64 NotifyGPUVABase, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvU32 Index +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NvU64 Time; + + tmrGetCurrentTime(pTmr, &Time); + + return notifyFillNotifierGPUVATimestamp(pGpu, + hClient, + hMemoryCtx, + NotifyGPUVABase, + Info32, + Info16, + CompletionStatus, + Index, + Time); +} + +/* + * @brief fills notifiers by given memory info and index with given time, info, + * and completion status + * + * Use this function to fill notifier through BAR2 when you have memory info. + * + + * @param[in] pGpu OBJGPU pointer + * @param[in] hClient NvU32 client handle + * @param[in] NotifyGPUVABase 64b GPU VA base address of semaphore + * @param[in] Info32 32b info part + * @param[in] Info16 16b info part + * @param[in] CompletionStatus NV_STATUS value to write to notifier status + * @param[in] Index index of notifier in notifier array + * + * @return NV_ERR_GENERIC if RM aperture mapping failed. + */ +NV_STATUS notifyFillNotifierMemoryTimestamp +( + OBJGPU *pGpu, + Memory *pMemory, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvU32 Index, + NvU64 Time +) +{ + NvNotification * pDebugNotifier = NULL; + + // + // Check if there's already a CPU mapping we can use. If not, attempt to + // map the notifier, which may fail if we're in a context where we can't + // create mappings. + // + pDebugNotifier = (NvNotification *)((NvUPtr)pMemory->KernelVAddr); + if (pDebugNotifier == NULL) + { + pDebugNotifier = (NvNotification *) kbusMapRmAperture_HAL(pGpu, + pMemory->pMemDesc); + if (pDebugNotifier == NULL) + { + return NV_ERR_GENERIC; + } + } + + notifyFillNvNotification(pGpu, &pDebugNotifier[Index], Info32, Info16, + CompletionStatus, NV_TRUE, Time); + + if (pMemory->KernelVAddr == NvP64_NULL) + { + kbusUnmapRmAperture_HAL(pGpu, pMemory->pMemDesc, + (NvU8 **)&pDebugNotifier, NV_TRUE); + } + + return NV_OK; +} + +/* + * @brief fills notifiers by given memory info and index with current time, + * info and completion status. + * + * Use this function to fill notifier through BAR2 when you have memory info. + * + * Current time wrapper around notifyFillNotifierMemoryTimestamp. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] hClient NvU32 client handle + * @param[in] NotifyGPUVABase 64b GPU VA base address of semaphore + * @param[in] Info32 32b info part + * @param[in] Info16 16b info part + * @param[in] CompletionStatus NV_STATUS value to write to notifier status + * @param[in] Index index of notifier in notifier array + * + * @return status of notifyFillNotifierMemoryTimestamp + */ +NV_STATUS notifyFillNotifierMemory +( + OBJGPU *pGpu, + Memory *pMemory, + NvV32 Info32, + NvV16 Info16, + NV_STATUS CompletionStatus, + NvU32 Index +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NvU64 Time; + + tmrGetCurrentTime(pTmr, &Time); + + return notifyFillNotifierMemoryTimestamp(pGpu, + pMemory, + Info32, + Info16, + CompletionStatus, + Index, + Time); + +} + +/* + * @brief fill semaphore structure at GPU VA base given time and release value + * + * Looks up dma memory mapping with given GPU VA and performs writes. + * Semaphore write is skipped when CPU kernel mapping is missing. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] hClient NvU32 client handle + * @param[in] SemaphoreGPUVABase 64b GPU VA base address of semaphore + * @param[in] ReleaseValue NvU32 value to write to semaphore upon release + * @param[in] Index index of semaphore in semaphore array + * @param[in] Time 64b time stamp + * + * @return NV_ERR_INVALID_ADDRESS on wrong GPU VA address or out of bound index, + * NV_OK on success + * + */ +NV_STATUS semaphoreFillGPUVATimestamp +( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hMemCtx, + NvU64 SemaphoreGPUVABase, + NvV32 ReleaseValue, + NvU32 Index, + NvBool bBroadcast, + NvU64 Time +) +{ + NvU64 semaphoreGPUVA; + CLI_DMA_MAPPING_INFO *pDmaMappingInfo; + NvU64 offset; + NvU32 timeHi, timeLo; + NvU32 subdeviceInstance; + NvGpuSemaphore *pSemaphore; + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + NvBool bFound; + RsClient *pClient; + Device *pDevice; + NV_STATUS status; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return status; + + status = deviceGetByGpu(pClient, pGpu, NV_TRUE, &pDevice); + if (status != NV_OK) + return status; + + semaphoreGPUVA = SemaphoreGPUVABase + (Index * sizeof(NvGpuSemaphore)); + + bFound = CliGetDmaMappingInfo(hClient, + RES_GET_HANDLE(pDevice), + hMemCtx, + semaphoreGPUVA, + gpumgrGetDeviceGpuMask(pGpu->deviceInstance), + &pDmaMappingInfo); + if (!bFound) + { + NV_PRINTF(LEVEL_ERROR, "Can't find mapping; semaphore not released\n"); + return NV_ERR_INVALID_ADDRESS; + } + + offset = semaphoreGPUVA - pDmaMappingInfo->DmaOffset; + if ((offset + sizeof(NvGpuSemaphore)) > pDmaMappingInfo->pMemDesc->Size) + { + NV_PRINTF(LEVEL_ERROR, + "offset+size doesn't fit into mapping; semaphore not released\n"); + return NV_ERR_INVALID_ADDRESS; + } + + timeLo = NvU64_LO32(Time); + timeHi = NvU64_HI32(Time); + + // + // Set idx to default position in the dma mapped address array + // + subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu)); + + osFlushCpuWriteCombineBuffer(); + + gpumgrSetBcEnabledStatus(pGpu, bBroadcast); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + + if (IsSLIEnabled(pGpu) && + (memdescGetAddressSpace(pDmaMappingInfo->pMemDesc) == ADDR_FBMEM)) + { + // + // If SLI and it is vidmem, replace idx with appropriate SLI index + // otherwise, this just stays the default value. + // + subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + } + + if (!pDmaMappingInfo->KernelVAddr[subdeviceInstance]) + { + NV_PRINTF(LEVEL_ERROR, "KernelVAddr==NULL; semaphore not released\n"); + } + else + { + pSemaphore = (NvGpuSemaphore*)((NvU8*)pDmaMappingInfo->KernelVAddr[subdeviceInstance] + offset); + + MEM_WR32(&(pSemaphore->timeStamp.nanoseconds[0]), timeLo); + MEM_WR32(&(pSemaphore->timeStamp.nanoseconds[1]), timeHi); + MEM_WR32(&(pSemaphore->data[0]), ReleaseValue); + } + + SLI_LOOP_END + + gpumgrSetBcEnabledStatus(pGpu, bBcState); + osFlushCpuWriteCombineBuffer(); + + return NV_OK; +} + +/* + * @brief fill semaphore at GPU VA with given release value and current time stamp + * + * Use this function to fill Semaphore through BAR1 when you have GPU VA. + * + * Wrapper for semaphore handling. Gets current time and routes data to + * semaphoreFillGPUVATimestamp. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] hClient NvU32 client handle + * @param[in] SemaphoreGPUVABase 64b GPU VA base address of semaphore + * @param[in] ReleaseValue NvU32 value to write to semaphore upon release + * @param[in] Index index of semaphore in semaphore array + * + * @return status of semaphoreFillGPUVATimestamp + */ +NV_STATUS semaphoreFillGPUVA +( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hMemCtx, + NvU64 SemaphoreGPUVABase, + NvV32 ReleaseValue, + NvU32 Index, + NvBool bBroadcast +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NvU64 Time; + + tmrGetCurrentTime(pTmr, &Time); + + return semaphoreFillGPUVATimestamp(pGpu, + hClient, + hMemCtx, + SemaphoreGPUVABase, + ReleaseValue, + Index, + bBroadcast, + Time); +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/objheap.c b/src/nvidia/src/kernel/gpu/mem_mgr/objheap.c new file mode 100644 index 000000000..ae5f5791b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/objheap.c @@ -0,0 +1,173 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/***************************** Heap Rotuines *******************************\ +* Heap object function definitions. * +\***************************************************************************/ + +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap.h" // To move to MIT license +#include "nvRmReg.h" +#include "os/os.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "rmapi/rmapi_utils.h" +#include "vgpu/rpc.h" +#include "ctrl/ctrl2080/ctrl2080fb.h" + +NV_STATUS +heapInit_IMPL(OBJGPU *pGpu, Heap *pHeap, NvU64 base, NvU64 size, HEAP_TYPE_INTERNAL heapType, NvU32 gfid, void *pPtr) +{ + NV_STATUS status; + + status = memmgrGetBlackListPagesForHeap_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), pHeap); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_INFO, + "Failed to read blackList pages (0x%x).\n", + status); + } + + status = heapInitRegistryOverrides(pGpu, pHeap); + + if (NV_OK != status) + { + // Warn and continue + NV_PRINTF(LEVEL_ERROR, "Error 0x%x reading registry\n", status); + } + + // + // Not all offlined addresses belong to this partition + // Filter out and keep only the ones that fall in this partition's memory + // + if (heapType == HEAP_TYPE_PARTITION_LOCAL || + (heapType == HEAP_TYPE_PHYS_MEM_SUBALLOCATOR && hypervisorIsVgxHyper())) + { + if (IS_GFID_VF(gfid)) { + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 blIndex = 0; + RmPhysAddr guestFbOffsetSpa; + NV2080_CTRL_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES_PARAMS *pParams; + + pParams = portMemAllocStackOrHeap(sizeof(*pParams)); + if (pParams == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet(pParams, 0, sizeof(*pParams)); + + pParams->gfid = gfid; + pParams->numEntries = 1; + pParams->gpaEntries[0] = 0; // translate gpa 0 (assumes contig) + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_VMMU_GET_SPA_FOR_GPA_ENTRIES, + pParams, sizeof(*pParams)); + guestFbOffsetSpa = pParams->spaEntries[0]; + portMemFreeStackOrHeap(pParams); + pParams = NULL; + + if (status != NV_OK) + { + return status; + } + + heapFilterBlackListPages(pHeap, guestFbOffsetSpa + base, size); + + // Convert the offlined page offsets from SPA -> GPA + for (blIndex = 0; blIndex < pHeap->blackListAddresses.count; blIndex++) + { + pHeap->blackListAddresses.data[blIndex].address -= guestFbOffsetSpa; + } + } + else { + heapFilterBlackListPages(pHeap, base, size); + } + } + + status = heapInitInternal(pGpu, pHeap, base, size, heapType, pPtr); + + if (NV_OK != status) + { + return status; + } + + return status; +} + +// +// Initialize the heap from any possible registry overrides +// +NV_STATUS +heapInitRegistryOverrides_IMPL +( + OBJGPU *pGpu, + Heap *pHeap +) +{ + + return NV_OK; +} + +// +// Not all offlined addresses belong to a range +// Filter out and keep only the ones that fall in the range +// +void +heapFilterBlackListPages_IMPL +( + Heap *pHeap, + NvU64 base, + NvU64 size +) +{ + NvU32 blIndex = 0; + NvU32 blCount = 0; + NvU64 blAddress = 0; + NvU32 blType; + NvU32 unusedEntries; + + if (!pHeap || !pHeap->blackListAddresses.data) + return; + + for (blIndex = 0; blIndex < pHeap->blackListAddresses.count; blIndex++) + { + blAddress = pHeap->blackListAddresses.data[blIndex].address; + blType = pHeap->blackListAddresses.data[blIndex].type; + if (blAddress >= base && blAddress < (base + size)) + { + pHeap->blackListAddresses.data[blCount].address = blAddress; + pHeap->blackListAddresses.data[blCount].type = blType; + blCount++; + } + } + + if (blCount != 0) + { + // clear out all other entries + unusedEntries = pHeap->blackListAddresses.count - blCount; + portMemSet(&pHeap->blackListAddresses.data[blCount], 0, + sizeof(*(pHeap->blackListAddresses.data)) * unusedEntries); + + pHeap->blackListAddresses.count = blCount; + } +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/addrtree.c b/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/addrtree.c new file mode 100644 index 000000000..d8d900b9e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/addrtree.c @@ -0,0 +1,1944 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file addrtree.c + */ + +#include "gpu/mem_mgr/phys_mem_allocator/addrtree.h" +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.h" +#include "utils/nvprintf.h" +#include "utils/nvassert.h" +#include "nvport/nvport.h" +#include "nvmisc.h" + +// Returns if (x, x+y-1) contains (u, u+v-1) in one dimention +#define RANGE_CONTAINS(x, y, u, v) ((x <= u) && ((x + y - 1) >= (u + v - 1))) + +static PMA_PAGESTATUS pmaAddrtreeReadLevel(void *pMap, NvU32 levelNum, NvU64 frameNum, NvBool bReadAttrib); +static void _addrtreeUpdateAncestors(PMA_ADDRTREE *pTree, ADDRTREE_NODE *pNode, PMA_PAGESTATUS newStateMask); +static void _addrtreeConvertLevelFrameToNodeIndex(PMA_ADDRTREE *pTree, NvU32 levelNum, NvU64 frameNum, + ADDRTREE_NODE **ppNode, NvU32 *pIndex); +PMA_PAGESTATUS _pmaAddrtreeReadLevelAndSkipUnavailable(void *pMap, NvU32 levelNum, NvU64 frameNum, + PMA_PAGESTATUS searchState, NvBool bAllowFree, + NvU64 *pNumFramesToSkip); + +static NvU64 alignUpToMod(NvU64 frame, NvU64 alignment, NvU64 mod) +{ + if ((frame & (alignment - 1)) <= mod) + return NV_ALIGN_DOWN(frame, alignment) + mod; + else + return NV_ALIGN_UP(frame, alignment) + mod; +} + +static NvU32 +addrtreeGetTreeLevel(NvU64 pageSize) +{ + NvU32 level = 0; + switch (pageSize) + { + case _PMA_64KB: level = 5; break; + // 128KB is supported in wrapper routines + case _PMA_2MB: level = 4; break; + case _PMA_512MB: level = 2; break; + default: break; + } + + NV_ASSERT(level != 0); + return level; +} + +static void +pmaAddrtreePrintLevel(ADDRTREE_LEVEL *pLevel) +{ + NvU32 i; + NvU32 mapIndex = MAP_IDX_ALLOC_PIN; + ADDRTREE_NODE *pNode = NULL; + + for (i = 0; i < pLevel->nodeCount; i++) + { + pNode = &pLevel->pNodeList[i]; + NV_PRINTF(LEVEL_INFO, "S[%d]=0x%llx A[%d]=0x%llx\n", i, + pNode->seeChild[mapIndex], i, pNode->state[mapIndex]); + + // In case compiler complains when the above print is compiled out + pNode = pNode; + mapIndex = mapIndex; + } +} + +void pmaAddrtreePrintTree(void *pMap, const char* str) +{ + NvU32 i; + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + NV_PRINTF(LEVEL_INFO, "%s ==== \n", str); + + for (i = 0; i < pTree->levelCount - 1; i++) + { + NV_PRINTF(LEVEL_INFO, "Level [%d]\n", i); + pmaAddrtreePrintLevel(&pTree->levels[i]); + } + NV_PRINTF(LEVEL_INFO, "END printing Tree ==== \n"); +} + +static NvU64 +_makeMaskUpToIndex(NvU32 index) +{ + NV_ASSERT(index <= 64); + if (index == 64) + { + return NV_U64_MAX; + } + else + { + return (1ULL << index) - 1; + } +} + +void * +pmaAddrtreeInit +( + NvU64 numFrames, + NvU64 addrBase, + PMA_STATS *pPmaStats, + NvBool bProtected +) +{ + // + // Hardcoding this for now to get things started + // TODO: use more flexible configs + // + NvU32 levelSizes[] = {_TREE_2TB, _TREE_32GB, _TREE_512MB, _TREE_128MB, _TREE_2MB, _TREE_64KB}; + NvU32 levelCount = sizeof(levelSizes) / sizeof(levelSizes[0]); + //NV_PRINTF(LEVEL_INFO, "numFrames: 0x%llx, addrBase 0x%llx \n", numFrames, addrBase); + + NvU64 totalNodeCount, num2mbPages; + NvU32 i; + PMA_ADDRTREE *newTree; + NvU64 numFramesToAllocate; + + // PMA already ensures this + NV_ASSERT(NV_IS_ALIGNED(addrBase, PMA_GRANULARITY)); + + newTree = (PMA_ADDRTREE*)portMemAllocNonPaged((NvLength)sizeof(struct pma_addrtree)); + if (newTree == NULL) + { + return NULL; + } + portMemSet(newTree, 0, (NvLength)sizeof(*newTree)); + newTree->levels = NULL; + newTree->root = NULL; + + // Allocate the levels + newTree->levels = (ADDRTREE_LEVEL *)portMemAllocNonPaged((NvLength)(levelCount * sizeof(ADDRTREE_LEVEL))); + if (newTree->levels == NULL) + { + goto error; + } + portMemSet(newTree->levels, 0, (NvLength)(levelCount * sizeof(ADDRTREE_LEVEL))); + + newTree->levelCount = levelCount; + newTree->totalFrames = numFrames; + num2mbPages = numFrames / (_PMA_2MB >> PMA_PAGE_SHIFT); + + pPmaStats->numFreeFrames += newTree->totalFrames; + pPmaStats->num2mbPages += num2mbPages; + pPmaStats->numFree2mbPages += num2mbPages; + + newTree->bProtected = bProtected; + newTree->pPmaStats = pPmaStats; + + // + // Now pad the beginning of addrtree, and round down to the largest + // single page allocation supported so that its level will be aligned. + // The biggest single page is 512 MB currently. + // + newTree->numPaddingFrames = (addrBase - NV_ALIGN_DOWN64(addrBase, _PMA_512MB)) >> PMA_PAGE_SHIFT; + numFramesToAllocate = newTree->totalFrames + newTree->numPaddingFrames; + + // Allocate each level and calculate number of nodes needed + totalNodeCount = 0; + for (i = 0; i < levelCount; i++) + { + ADDRTREE_LEVEL *cur = &newTree->levels[i]; + cur->nodeCount = numFramesToAllocate >> (levelSizes[i] - PMA_PAGE_SHIFT); + cur->pageSizeShift = levelSizes[i]; + + // To be a tree, there needs to be one root node + if (i == 0 && cur->nodeCount != 0) + { + NV_PRINTF(LEVEL_ERROR, "Total memory is > 2TB. PMA Address Tree cannot account for this much.\n"); + goto error; + } + + // The 64KB level is a placeholder level which doesn't need to be allocated + if (i == levelCount - 1) + { + break; + } + + // Always allocate at least 1 node for each level and account for non-full nodes + if ((cur->nodeCount == 0) || (numFramesToAllocate % (1ULL << (levelSizes[i] - PMA_PAGE_SHIFT)) != 0)) + cur->nodeCount++; + + totalNodeCount += cur->nodeCount; + + //NV_PRINTF(LEVEL_INFO, "Level %d: nodeCount: %d, totalNodeCount: %d\n", i, cur->nodeCount, totalNodeCount); + } + + // Allocate all the nodes needed in a linear array + newTree->root = (ADDRTREE_NODE *)portMemAllocNonPaged((NvLength)(totalNodeCount * sizeof(ADDRTREE_NODE))); + if (newTree->root == NULL) + { + goto error; + } + portMemSet(newTree->root, 0, (NvLength)(totalNodeCount * sizeof(ADDRTREE_NODE))); + newTree->root->parent = NULL; + + NvU32 curIdx = 0; + + // Skip the last level because we don't really need to allocate 64K level + for (i = 0; i < levelCount - 1; i++) + { + NvU64 nextLevelStart = curIdx + newTree->levels[i].nodeCount; + // + // The maxChildren a node can have is a factor of + // the difference in page sizes between levels + // + NvU32 maxChildren = 1U<<(levelSizes[i] - levelSizes[i + 1]); + newTree->levels[i].maxFramesPerNode = maxChildren; + + //NV_PRINTF(LEVEL_INFO, "Level %d: maxChildren per node: %d\n", i, maxChildren); + + NvU32 j; + for (j = 0; j < newTree->levels[i].nodeCount; j++) + { + ADDRTREE_NODE *curNode = &newTree->root[curIdx]; + + // Register first node in the level structure + if (j == 0) + { + newTree->levels[i].pNodeList = curNode; + } + + // Populate curNode + curNode->level = i; + curNode->frame = maxChildren * j; + + // All nodes before this node must have maxChildren + NvU64 childrenNodeIdx = nextLevelStart + j * maxChildren; + curNode->children = &newTree->root[childrenNodeIdx]; + + // The last node may not have maxChildren, calculate how many it does have + // OK to just cast because we know numChildren must be at most 64 + NvU32 lastNodeNumValidChildren = (NvU32)(newTree->levels[i+1].nodeCount - (j * maxChildren)); + NvU32 numValidChildren = 0; + + // + // If this is not the last node in a level, + // then it must have maxChildren. + // Otherwise calculate how many it does have. + // + if (j != newTree->levels[i].nodeCount - 1) + { + curNode->numChildren = maxChildren; + numValidChildren = maxChildren; + } + else + { + curNode->numChildren = maxChildren; + numValidChildren = lastNodeNumValidChildren; + + // + // Mark invalid children as allocated so that reads of partial nodes + // do not return that they are available + // + NvU64 invalidChildrenMask = _makeMaskUpToIndex(maxChildren) & ~_makeMaskUpToIndex(lastNodeNumValidChildren); + curNode->state[MAP_IDX_ALLOC_PIN] = invalidChildrenMask; + _addrtreeUpdateAncestors(newTree, curNode, STATE_PIN); + } + + // Populate curNode->children[*]->parent, except for the last level + if (i != levelCount - 2) + { + NvU32 k; + + for (k = 0; k < numValidChildren; k++) + { + ADDRTREE_NODE *curChild = &curNode->children[k]; + curChild->parent = curNode; + } + } + + curIdx++; + } + } + + return (void *)newTree; + +error: + pmaAddrtreeDestroy(newTree); + return NULL; +} + +void pmaAddrtreeDestroy(void *pMap) +{ + NvU64 num2mbPages; + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + NV_ASSERT_OR_RETURN_VOID(pTree != NULL); + + num2mbPages = pTree->totalFrames / (_PMA_2MB >> PMA_PAGE_SHIFT); + pTree->pPmaStats->numFreeFrames -= pTree->totalFrames; + pTree->pPmaStats->numFree2mbPages -= num2mbPages; + + portMemFree(pTree->root); + portMemFree(pTree->levels); + + portMemFree(pTree); +} + +static void +_addrtreeConvertFrame(PMA_ADDRTREE *pTree, NvU32 sourceLevel, NvU64 sourceFrame, NvU32 targetLevel, NvU64 *pTargetFrame) +{ + // Converting up the tree, frame number goes down + if (sourceLevel > targetLevel) + { + *pTargetFrame = sourceFrame >> (pTree->levels[targetLevel+1].pageSizeShift - pTree->levels[sourceLevel+1].pageSizeShift); + } + else + { + *pTargetFrame = sourceFrame << (pTree->levels[sourceLevel+1].pageSizeShift - pTree->levels[targetLevel+1].pageSizeShift); + } +} + +// +// Given a node and index into its children array, whether this node is +// on the target level, the current accumulated status, and the current +// valid status mask, read the status of this node index and return +// - The remaining mask of children that will still need to be +// read to determine the state at a lower level of the tree in +// pStatusMask +// - The accumulated status combined with this node index's status +// +// To read the effective value of a node, callers must start with their +// desired status mask, call getNodeIndexStatus, and use the returned +// status mask to know what states still need to be read at the +// next level down in the tree. +// +// If statusMask is 0 upon return, reading children will give no more information +// That is, shouldCheckChildren == (*pStatusMask != 0) +// +// If the caller wants to keep reading status downwards in the tree, it must +// call getNodeIndexStatus with bIsTargetLevel = NV_FALSE. Here is pseudocode +// for reading a node: +// *pCumulativeStatus = 0; +// *pStatusMask = MAP_MASK; +// while (!targetLevel) { +// getNodeIndexStatus(node, index, NV_FALSE, pStatusMask, pCumulativeStatus); +// // increment while condition, typically the following: +// if (!shouldCheckChildren(stateMask)) +// { +// // do logic +// goto exit; +// } +// } +// +// // At target level +// getNodeIndexStatus(node, index, NV_TRUE, pStatusMask, pCumulativeStatus); +// +static void +getNodeIndexStatus +( + ADDRTREE_NODE *node, + NvU32 index, + NvBool bIsTargetLevel, + PMA_PAGESTATUS *pStatusMask, + PMA_PAGESTATUS *pCumulativeStatus +) +{ + PMA_PAGESTATUS curStatusMask = *pStatusMask; + PMA_PAGESTATUS nextStatusMask = 0; + NvU64 state = 0; + NvU32 i; + + NV_ASSERT(index < node->numChildren); + + for (i = 0; i < PMA_BITS_PER_PAGE; i++) + { + if (curStatusMask & (NVBIT(i))) + { + if (node->seeChild[i] & NVBIT64(index)) + { + nextStatusMask |= NVBIT64(i); + } + + // Assert that either state or seeChild is set, not both + NV_ASSERT(((node->seeChild[i] & node->state[i]) & (NVBIT64(index))) == 0); + + // + // But state does need to be accurate and returned as PMA_PAGESTATUS + // seeChild must be checked to get an accurate state + // + if ((node->state[i] | node->seeChild[i]) & (NVBIT64(index))) + { + state |= NVBIT64(i); + } + } + } + + //NV_PRINTF(LEVEL_ERROR, "curStatusMask=0x%llx, nextStatusMask=0x%llx, index=0x%x, state=0x%llx\n", + // (NvU64)curStatusMask, nextStatusMask, index, state); + + *pStatusMask = nextStatusMask; + + if (bIsTargetLevel) + { + *pCumulativeStatus |= state; + } + else + { + *pCumulativeStatus |= state & ~nextStatusMask; + } +} + +static NvBool +shouldCheckChildren(PMA_PAGESTATUS statusMask) +{ + return statusMask != 0; +} + +// This function returns if the current node holds valid information. +// If not, returns the effective state of this node +static NvBool +_addrtreeNodeValid +( + PMA_ADDRTREE *pTree, + ADDRTREE_NODE *node, + PMA_PAGESTATUS *pFoundState +) +{ + ADDRTREE_NODE *n = pTree->root; + NvU64 newFrame = 0; + NvU32 newIndex = 0; + *pFoundState = STATE_FREE; + PMA_PAGESTATUS stateMask = MAP_MASK; // check all states TODO + + //NV_PRINTF(LEVEL_INFO, "Source level=%d frame=0x%llx.\n", + // node->level, node->frame); + + while(n->level != node->level) + { + _addrtreeConvertFrame(pTree, node->level, node->frame, n->level, &newFrame); + newIndex = (NvU32)(newFrame - n->frame); + + getNodeIndexStatus(n, newIndex, NV_FALSE, &stateMask, pFoundState); + + if (!shouldCheckChildren(stateMask)) + { + return NV_FALSE; + } + + // Go to the right children to continue walking down + n = &n->children[newIndex]; + } + + return NV_TRUE; + +} + +static NvU64 +_addrtreeComputeMask(ADDRTREE_NODE *node, NvU64 frameStart, NvU64 numFrames) +{ + NV_ASSERT(node->numChildren <= 64); + + NvU64 mask = _makeMaskUpToIndex(node->numChildren); + + // If node is contained within the range, return a full mask + if (RANGE_CONTAINS(frameStart, numFrames, node->frame, node->numChildren)) + { + //NV_PRINTF(LEVEL_ERRORS, "frameStart=0x%llx, numFrames=0x%llx, node: start=0x%llx, num=0x%x, mask=0x%llx\n", + // frameStart, numFrames, node->frame, node->numChildren, mask); + return mask; + } + else + { + // If the node doesn't cover the start, unset the beginning bits + if (frameStart > node->frame) + { + mask &= (~_makeMaskUpToIndex((NvU32)(frameStart - node->frame))); + } + + // If the node doesn't cover the end, unset the last bits + if (frameStart + numFrames < node->frame + node->numChildren) + { + mask &= (_makeMaskUpToIndex((NvU32)(frameStart + numFrames - node->frame))); + } + } + return mask; +} + +// +// For the given state, for the mask of a node's children, +// the function returns +// +// NV_TRUE if the mask is available +// NV_FALSE if the mask is not available +// and the last child index which does not satisfy the given state +// +static NvBool +_addrtreeNodeMaskAvailable( + ADDRTREE_NODE *node, + NvU64 mask, + PMA_PAGESTATUS state, + NvU64 *pDiff +) +{ + NvU64 allocated = 0; + NvU32 i; + + for (i = 0; i < PMA_BITS_PER_PAGE; i++) + { + // + // allocated tracks which pages are not available. + // We are looking for pages in free or state status. + // Pages in state status are available, so exclude them. + // + if ((NVBIT(i) & state) == 0) + { + // Note that once we see a seeChild being set, we would + // count that as allocated because some children are allocated + allocated |= node->seeChild[i]; + allocated |= node->state[i]; + } + } + + allocated &= mask; + + // + // Skip past all unavailable and return last child index + // which does not satisfy the given state + // This will underflow for allocated == 0, but in that case, + // *pDiff should not be read anyway + // + *pDiff = 64 - portUtilCountLeadingZeros64(allocated) - 1; + + return (allocated == 0); +} + +// This function returns the node on a specific level that contains the frame +// The node may or may not contain valid information. That is handled later. +static NvU64 +_addrtreeGetNodeIdx(PMA_ADDRTREE *pTree, NvU32 level, NvU64 frame) +{ + ADDRTREE_LEVEL *treeLevel = &pTree->levels[level]; + + // Current level's frame number should be the same as next level's node number + NV_ASSERT(frame < pTree->levels[level+1].nodeCount); + + return (frame / (treeLevel->maxFramesPerNode)); +} + +// +// Optimization: enable scanning functions to skip over fully allocated +// parent nodes with _pmaAddrtreeReadLevelAndSkipUnavailable +// +// For a given level and startFrame, return the number of frames to skip on the +// given level based on the parent's allocation state. +// Add this number to skip to the startFrame to get the next node which is not +// fully allocated. +// +PMA_PAGESTATUS +_pmaAddrtreeReadLevelAndSkipUnavailable +( + void *pMap, + NvU32 levelNum, + NvU64 frameNum, + PMA_PAGESTATUS searchState, + NvBool bAllowFree, + NvU64 *pNumFramesToSkip +) +{ + NvU32 index; + ADDRTREE_NODE *pNode; + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + ADDRTREE_NODE *n = pTree->root; + NvU64 newFrame = 0; + NvU32 newIndex; + PMA_PAGESTATUS status = STATE_FREE; + PMA_PAGESTATUS stateMask = MAP_MASK; + + _addrtreeConvertLevelFrameToNodeIndex(pTree, levelNum, frameNum, &pNode, &index); + + while (n->level != pNode->level) + { + _addrtreeConvertFrame(pTree, pNode->level, pNode->frame, n->level, &newFrame); + newIndex = (NvU32)(newFrame - n->frame); + + getNodeIndexStatus(n, newIndex, NV_FALSE, &stateMask, &status); + + if (!shouldCheckChildren(stateMask)) + { + // + // There's no need to scan further down. + // Return the number of frames to skip over on the target level + // in order to skip over this n->level ancestor. + // + + NvBool bWrongState = (status != searchState) && !(bAllowFree && (status == STATE_FREE)); + + if (bWrongState) + { + // + // This node is fully allocated. + // Return the number of frames of the target level we should skip. + // + NvU64 targetFrameStartForThisNode; + NvU64 numTargetFramesPerAncestor; + + _addrtreeConvertFrame(pTree, n->level, newFrame, levelNum, &targetFrameStartForThisNode); + _addrtreeConvertFrame(pTree, n->level, 1ULL, levelNum, &numTargetFramesPerAncestor); + + *pNumFramesToSkip = numTargetFramesPerAncestor - (frameNum - targetFrameStartForThisNode); + } else { + // + // This node is in a state we're searching for. + // The caller will store off one frame of the target level + // + *pNumFramesToSkip = 1; + } + + goto exit; + } + + // Go to the right children to continue walking down + n = &n->children[newIndex]; + } + + *pNumFramesToSkip = 1; + + // Extract the final value from this pNode + getNodeIndexStatus(pNode, index, NV_TRUE, &stateMask, &status); + +exit: + NV_ASSERT(*pNumFramesToSkip > 0); + return status; +} + +// +// For the given state, between frameStart and (frameStart + numFrames - 1) +// for a given level, this function returns +// +// NV_TRUE if the range is available +// NV_FALSE if the range is not available +// and the frame number of the last frame which does not +// satisfy the given state in the variable pDiff +// +// XXX: Caution! This will not properly pick up nodes only in state `state` +// If it were used for discontig scanning, it would be wrong! +// +static NvBool +_pmaAddrtreeAvailable +( + PMA_ADDRTREE *pTree, + NvU32 level, + PMA_PAGESTATUS state, + NvU64 frameStart, + NvU64 numFrames, + NvU64 *pDiff +) +{ + NvU64 i, startIdx, endIdx, childrenMask; + PMA_PAGESTATUS foundState; + NvU64 nodeIndexDiff; + ADDRTREE_NODE *node; + + NV_ASSERT(level != 0); // TODO handle the root node case + //NV_PRINTF(LEVEL_INFO, "level=%d, frameStart=0x%llx, numFrames=%llx\n", + // level, frameStart, numFrames); + + startIdx = _addrtreeGetNodeIdx(pTree, level, frameStart); + endIdx = _addrtreeGetNodeIdx(pTree, level, (frameStart + numFrames - 1)); + + //NV_PRINTF(LEVEL_INFO, "startIdx = %llx, endIdx = 0x%llx\n", startIdx, endIdx); + + // Begin checking from the end so that we can skip the most frames in the overall search + for (i = endIdx; i >= startIdx; i--) + { + // Protect against underflow + if (i == (NvU64)-1) break; + + //NV_PRINTF(LEVEL_INFO, "IN LOOP: i=0x%llx, startIdx=%llx\n", i, startIdx); + node = &(pTree->levels[level].pNodeList[i]); + if (_addrtreeNodeValid(pTree, node, &foundState)) + { + childrenMask = _addrtreeComputeMask(node, frameStart, numFrames); + + if (!_addrtreeNodeMaskAvailable(node, childrenMask, state, &nodeIndexDiff)) + { + *pDiff = node->frame + nodeIndexDiff; + return NV_FALSE; + } + } + else + { + //NV_PRINTF(LEVEL_INFO, "IN LOOP: Node is INVALID. allocated?-%d\n", + // (NvU32)state); + if ((foundState != STATE_FREE) && (foundState != state)) + { + // This node is completely allocated. + // Return the frame number of the last frame in this node + *pDiff = node->frame + node->numChildren - 1; + return NV_FALSE; + } + else + { + // + // This node is completely free or in a state we're looking for, + // continue checking + // + continue; + } + } + } + + return NV_TRUE; +} + + +static NvBool +_pmaAddrtreeContigSearchLoop +( + PMA_ADDRTREE *pTree, + NvU32 level, + PMA_PAGESTATUS state, + NvU64 addrBase, + NvU64 localStart, + NvU64 localEnd, + NvU64 numFrames, + NvU64 frameAlignment, + NvU64 *freeList +) +{ + NvBool found = NV_FALSE; + NvU64 freeStart; + NvU64 diff; + PMA_PAGESTATUS startStatus, endStatus; + + if ((state != STATE_FREE) && (state != STATE_UNPIN)) + { + NV_PRINTF(LEVEL_INFO, "Scanning for state %d is not supported\n", state); + return found; + } + + freeStart = localStart; + while (!found) + { + NvU64 endFrame = freeStart + numFrames - 1; + NvU64 framesToSkip = 0; + + if (endFrame > localEnd) + { + // freeStart + numFrames too close to local search end. Re-starting search + break; + } + + // + // Read endStatus first so we don't have to waste time traversing the + // tree again to read startStatus if endStatus is not even usable + // + endStatus = _pmaAddrtreeReadLevelAndSkipUnavailable(pTree, level, endFrame, state, NV_TRUE, &framesToSkip); + + if (framesToSkip > 1) { + freeStart = NV_ALIGN_UP(endFrame + framesToSkip, frameAlignment); + NV_ASSERT(freeStart != 0); + continue; + } + + startStatus = _pmaAddrtreeReadLevelAndSkipUnavailable(pTree, level, freeStart, state, NV_TRUE, &framesToSkip); + + if (framesToSkip > 1) { + freeStart += NV_ALIGN_UP(framesToSkip, frameAlignment); + NV_ASSERT(freeStart != 0); + continue; + } + + if ((endStatus == STATE_FREE) || (endStatus == state)) + { + if ((startStatus == STATE_FREE) || (startStatus == state)) + { + if (_pmaAddrtreeAvailable(pTree, level, state, freeStart, numFrames, &diff)) + { + found = NV_TRUE; + // Substract off padding when returning + *freeList = addrBase + ((freeStart << pTree->levels[level+1].pageSizeShift) - + (pTree->numPaddingFrames << PMA_PAGE_SHIFT)); + //NV_PRINTF(LEVEL_INFO, "found! 0x%llx\n", freeStart); + } + else + { + //NV_PRINTF(LEVEL_INFO, "Frame number of allocated frame = 0x%llx\n", + // diff); + // + // Find the next aligned free frame and set it as the start + // frame for next iteration's scan. + // + freeStart = NV_ALIGN_UP(diff + 1, frameAlignment); + NV_ASSERT(freeStart != 0); + } + } + else + { + // Start point isn't free, so bump to check the next aligned frame + freeStart += frameAlignment; + } + } + else + { + // + // End point isn't usable, so jump to after the end to check again + // However, align the new start point properly before next iteration. + // + freeStart += NV_ALIGN_UP(numFrames, frameAlignment); + } + } + + return found; +} + +static NV_STATUS +_pmaAddrtreeScanContiguous +( + void *pMap, + NvU64 addrBase, + NvU64 rangeStart, + NvU64 rangeEnd, + NvU64 numPages, + NvU64 *freeList, + NvU32 pageSize, + NvU64 alignment, + NvU64 *numPagesAlloc, + NvBool bSkipEvict +) +{ + NvU64 localStart, localEnd, frameAlignment; + NvBool found; + NvU32 level; + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + + // This requirement is ensured in PMA + NV_ASSERT(alignment >= pageSize && portUtilIsPowerOfTwo(alignment)); + + // Only focus on the level above the pageSize level. Children are ignored. + level = addrtreeGetTreeLevel(pageSize); + if (level == 0) + { + NV_PRINTF(LEVEL_ERROR, "address tree cannot handle page size 0x%x\n", + pageSize); + return NV_ERR_INVALID_ARGUMENT; + } + + frameAlignment = alignment / pageSize; + + // Handle restricted allocations + if (rangeStart != 0 || rangeEnd != 0) + { + rangeStart += (pTree->numPaddingFrames << PMA_PAGE_SHIFT); + rangeEnd += (pTree->numPaddingFrames << PMA_PAGE_SHIFT); + localStart = NV_ALIGN_UP64(rangeStart, alignment) >> pTree->levels[level].pageSizeShift; + localEnd = NV_MIN(rangeEnd >> pTree->levels[level].pageSizeShift, pTree->levels[level].nodeCount - 1); + } + else + { + localStart = NV_ALIGN_UP64(pTree->numPaddingFrames << PMA_PAGE_SHIFT, alignment) >> pTree->levels[level].pageSizeShift; + localEnd = pTree->levels[level].nodeCount - 1; + } + + //NV_PRINTF(LEVEL_INFO, "Scanning level %d with addrBase 0x%llx in frame range 0x%llx..0x%llx, " + // "pages to allocate 0x%llx (pageSize=0x%x, alignment=0x%x)\n", + // (level - 1), addrBase, localStart, localEnd, numPages, pageSize, alignment); + + found = _pmaAddrtreeContigSearchLoop(pTree, level - 1, STATE_FREE, addrBase, localStart, localEnd, + numPages, frameAlignment, freeList); + if (found) + { + *numPagesAlloc = numPages; + return NV_OK; + } + + *numPagesAlloc = 0; + + if (bSkipEvict) + { + return NV_ERR_NO_MEMORY; + } + + // Loop back to the beginning and continue searching for evictable pages + found = _pmaAddrtreeContigSearchLoop(pTree, level - 1, STATE_UNPIN, addrBase, localStart, localEnd, + numPages, frameAlignment, freeList); + if (found) + return NV_ERR_IN_USE; + else + return NV_ERR_NO_MEMORY; +} + +// +// This function wraps the real _pmaAddrtreeScanContiguous +// to allow addrtree to scan for 128KB page size +// +NV_STATUS +pmaAddrtreeScanContiguous +( + void *pMap, + NvU64 addrBase, + NvU64 rangeStart, + NvU64 rangeEnd, + NvU64 numPages, + NvU64 *freeList, + NvU32 pageSize, + NvU64 alignment, + NvU64 *numPagesAlloc, + NvBool bSkipEvict +) +{ + if (NV_UNLIKELY(pageSize == _PMA_128KB)) { + // + // Call the contig function with twice as many 64KB frames, + // and cut in half the number of allocated frames. + // Contig allocations are all or nothing so the number of + // allocated frames cannot be odd. + // + NV_STATUS status; + + status = _pmaAddrtreeScanContiguous( + pMap, + addrBase, + rangeStart, + rangeEnd, + numPages * 2, + freeList, + _PMA_64KB, + alignment, + numPagesAlloc, + bSkipEvict); + + *numPagesAlloc /= 2; + + return status; + } + else + { + return _pmaAddrtreeScanContiguous( + pMap, + addrBase, + rangeStart, + rangeEnd, + numPages, + freeList, + pageSize, + alignment, + numPagesAlloc, + bSkipEvict); + } +} + +static NvU64 +_pmaAddrtreeDiscontigSearchLoop +( + PMA_ADDRTREE *pTree, + NvU32 level, + PMA_PAGESTATUS state, + NvU64 addrBase, + NvU64 localStart, + NvU64 localEnd, + NvU64 numFrames, + NvU64 frameAlignment, + NvU64 *freeList +) +{ + NvU64 found = 0; + NvU64 freeStart; + PMA_PAGESTATUS startStatus; + + if ((state != STATE_FREE) && (state != STATE_UNPIN)) + { + NV_PRINTF(LEVEL_INFO, "Scanning for state %d is not supported\n", state); + return found; + } + + freeStart = localStart; + + // + // We only need one frame at a time on this level, + // so we can skip much of the frame logic + // + while (found != numFrames) + { + NvU64 framesToSkip = 0; + + if (freeStart > localEnd) break; + + // + // For discontig, we MUST only pick up the exact state. + // Otherwise we give away pages for eviction that we already stored off to be allocated. + // + startStatus = _pmaAddrtreeReadLevelAndSkipUnavailable(pTree, level, freeStart, state, NV_FALSE, &framesToSkip); + + if (startStatus == state) + { + // Substract off padding when returning + freeList[found++] = addrBase + ((freeStart << pTree->levels[level+1].pageSizeShift) - + (pTree->numPaddingFrames << PMA_PAGE_SHIFT)); + } + freeStart += framesToSkip; + } + + return found; +} + +static NV_STATUS +_pmaAddrtreeScanDiscontiguous +( + void *pMap, + NvU64 addrBase, + NvU64 rangeStart, + NvU64 rangeEnd, + NvU64 numPages, + NvU64 *freeList, + NvU32 pageSize, + NvU64 alignment, + NvU64 *numPagesAlloc, + NvBool bSkipEvict +) +{ + NvU64 localStart, localEnd; + NvU64 foundFree; + NvU64 foundEvictable; + NvU32 level; + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + + // This requirement is ensured in PMA + NV_ASSERT(alignment == pageSize); + + // Only focus on the level above the pageSize level. Children are ignored. + level = addrtreeGetTreeLevel(pageSize); + if (level == 0) + { + NV_PRINTF(LEVEL_ERROR, "address tree cannot handle page size 0x%x\n", + pageSize); + return NV_ERR_INVALID_ARGUMENT; + } + + // Handle restricted allocations + if (rangeStart != 0 || rangeEnd != 0) + { + // Embedded % requires special handling. + NV_ASSERT_OR_ELSE_STR(rangeStart % pageSize == 0, + "rangeStart %% pageSize == 0", /*do nothing*/); + NV_ASSERT_OR_ELSE_STR((rangeEnd + 1) % pageSize == 0, + "(rangeEnd + 1) %% pageSize == 0", /*do nothing*/); + + rangeStart += (pTree->numPaddingFrames << PMA_PAGE_SHIFT); + rangeEnd += (pTree->numPaddingFrames << PMA_PAGE_SHIFT); + localStart = NV_ALIGN_UP(rangeStart, alignment) >> pTree->levels[level].pageSizeShift; + localEnd = NV_MIN(rangeEnd >> pTree->levels[level].pageSizeShift, pTree->levels[level].nodeCount - 1); + } + else + { + localStart = NV_ALIGN_UP64(pTree->numPaddingFrames << PMA_PAGE_SHIFT, alignment) >> pTree->levels[level].pageSizeShift; + localEnd = pTree->levels[level].nodeCount - 1; + } + + //NV_PRINTF(LEVEL_INFO, "Scanning level %d with addrBase 0x%llx in frame range 0x%llx..0x%llx, " + // "pages to allocate 0x%llx (pageSize=0x%x, alignment=0x%x)\n", + // (level - 1), addrBase, localStart, localEnd, numPages, pageSize, alignment); + + foundFree = _pmaAddrtreeDiscontigSearchLoop(pTree, level - 1, STATE_FREE, addrBase, localStart, localEnd, + numPages, alignment, freeList); + + + // numPagesAlloc must be set for partial allocations + *numPagesAlloc = foundFree; + + if (foundFree == numPages) + { + return NV_OK; + } + else if (bSkipEvict) + { + return NV_ERR_NO_MEMORY; + } + + // + // Loop back to the beginning and continue searching for evictable pages + // This next search picks up only evictable pages and not free pages + // + foundEvictable = _pmaAddrtreeDiscontigSearchLoop(pTree, level - 1, STATE_UNPIN, addrBase, localStart, localEnd, + (numPages - foundFree), alignment, (freeList + foundFree)); + + if ((foundFree + foundEvictable) == numPages) + return NV_ERR_IN_USE; + else + return NV_ERR_NO_MEMORY; +} + +// +// This function wraps the real _pmaAddrtreeScanDiscontiguous +// to allow addrtree to scan for 128KB page size +// +NV_STATUS +pmaAddrtreeScanDiscontiguous +( + void *pMap, + NvU64 addrBase, + NvU64 rangeStart, + NvU64 rangeEnd, + NvU64 numPages, + NvU64 *freeList, + NvU32 pageSize, + NvU64 alignment, + NvU64 *numPagesAlloc, + NvBool bSkipEvict +) +{ + if (NV_UNLIKELY(pageSize == _PMA_128KB)) { + NV_STATUS status = NV_OK; + NvU64 i; + NvU64 localNumPagesAlloc; + *numPagesAlloc = 0; + + if (rangeEnd == 0) { + NV_ASSERT(rangeStart == 0); + rangeEnd = ((PMA_ADDRTREE *)pMap)->totalFrames << PMA_PAGE_SHIFT; + } + + for (i = 0; i < numPages; i++) { + // + // Only call the contig function because we need the two frames to be + // contiguous. + // Alignment only needs to be aligned to 64KB and power of 2, + // so it is ok to pass through even if it is 128KB + // + status = _pmaAddrtreeScanContiguous( + pMap, + addrBase, + rangeStart, + rangeEnd, + 2, + freeList + i, + _PMA_64KB, + alignment, + &localNumPagesAlloc, + bSkipEvict); + + // Give back the first of every two 64KB frames + *numPagesAlloc += localNumPagesAlloc / 2; + + if (status != NV_OK) + { + return status; + } + + rangeStart = freeList[i] + _PMA_128KB; + } + + return status; + } + else + { + return _pmaAddrtreeScanDiscontiguous( + pMap, + addrBase, + rangeStart, + rangeEnd, + numPages, + freeList, + pageSize, + alignment, + numPagesAlloc, + bSkipEvict); + } +} + +// Either set or clear a specified bit in old and return the result +static NvU64 +replaceBit(NvU64 old, NvU32 bit, NvBool bIsSet) +{ + NV_ASSERT(bit < 64); + NvU64 mask = NVBIT64(bit); + return bIsSet ? (old | mask) : (old & ~mask); +} + +static void +_addrtreeUpdateAncestors +( + PMA_ADDRTREE *pTree, + ADDRTREE_NODE *pNode, + PMA_PAGESTATUS newStateMask +) +{ + ADDRTREE_NODE *pParent; + NvU32 i; + NvU64 newFrame; + NvU32 newIndex; + PMA_PAGESTATUS stateMask; + NvU64 fillPattern; + + pParent = pNode->parent; + + while (pParent != NULL) + { + //NV_PRINTF(LEVEL_INFO, "Current level %d Maybe updating parent at level %d newStateMask=%x\n", + // pNode->level, pNode->level - 1, newStateMask); + + if (newStateMask == 0) + break; + + fillPattern = _makeMaskUpToIndex(pNode->numChildren); + _addrtreeConvertFrame(pTree, pNode->level, pNode->frame, pParent->level, &newFrame); + newIndex = (NvU32)(newFrame - pParent->frame); + + for (i = 0; i < PMA_BITS_PER_PAGE; i++) + { + stateMask = NVBIT(i); + + if (stateMask & newStateMask) + { + NvBool bSeeChild; + NvBool bSetState; + NvU64 newSeeChild; + NvU64 newState; + + // Calculate what this node should look like to the parent + bSetState = NV_FALSE; + bSeeChild = NV_TRUE; + if (pNode->seeChild[i] == 0) + { + if (pNode->state[i] == fillPattern) + { + bSetState = NV_TRUE; + bSeeChild = NV_FALSE; + } + else if (pNode->state[i] == 0) + { + bSeeChild = NV_FALSE; + } + } + + newSeeChild = replaceBit(pParent->seeChild[i], newIndex, bSeeChild); + newState = replaceBit(pParent->state[i], newIndex, bSetState); + + //NV_PRINTF(LEVEL_INFO, "bSetState %d bSeeChild %d newSeeChild=0x%llx newState=0x%llx \n", + // bSetState, bSeeChild, newSeeChild, newState); + + // + // If the parent won't change from this update, do not do any further + // checking on this state: remove it from the mask + // + if (pParent->seeChild[i] == newSeeChild && pParent->state[i] == newState) + { + newStateMask &= ~stateMask; + continue; + } + + //NV_PRINTF(LEVEL_INFO, "update parent\n"); + + pParent->seeChild[i] = newSeeChild; + pParent->state[i] = newState; + + } + } + + pNode = pParent; + pParent = pNode->parent; + } + +} + +// +// This function traverses the tree and changes the state of the frame at `index` +// in `node` to the desired states and change all its ancestors to set seeChild. +// Note that this function also makes `node` a valid node unconditionally. +// +static void +_addrtreeSetState +( + PMA_ADDRTREE *pTree, + ADDRTREE_NODE *pNode, + NvU32 index, + PMA_PAGESTATUS newState, + PMA_PAGESTATUS newStateMask +) +{ + ADDRTREE_NODE *n = pTree->root; + ADDRTREE_NODE *pChildNode; + NvU32 i; + NvU64 newFrame = 0; + NvU32 newIndex; + NvU64 stateMask; + NvU64 childMask = 0; + + //NV_PRINTF(LEVEL_INFO, "Source level=%d frame=0x%llx.\n", + // pNode->level, pNode->frame); + NV_ASSERT(index < pNode->numChildren); + + // Walk down from root and update all its ancestors + while(n->level != pNode->level) + { + _addrtreeConvertFrame(pTree, pNode->level, pNode->frame, n->level, &newFrame); + newIndex = (NvU32)(newFrame - n->frame); + + //NV_PRINTF(LEVEL_INFO, "Going to traverse level=%d newFrame=0x%x newIndex=0x%x \n", + // n->level, newFrame, newIndex); + + childMask = NVBIT64(newIndex); + pChildNode = &n->children[newIndex]; + + for (i = 0; i < PMA_BITS_PER_PAGE; i++) + { + stateMask = NVBIT64(i); + + if (stateMask & newStateMask) + { + // + // If entire node has some state, update children to contain the same state + // Only update child state if this node's seeChild is 0 + // + if ((childMask & n->seeChild[i]) == 0) + { + if ((n->state[i] & childMask) != 0) + { + // + // Note that we may overwrite the pinned state of the edge cases here, + // but they will still be in the correct state since they cannot be the + // ones being freed. + // + pChildNode->state[i] = _makeMaskUpToIndex(pChildNode->numChildren); + } + else + { + // + // There is no risk of overwriting the ALLOC_PIN frames set to + // protect the end-of-region edge case. If seeChild == 0, these nodes + // and all ancestors must be in the set state, not the clear state + // because addrtree will not give out frames that are out of bounds + // + pChildNode->state[i] = 0; + } + + // Set the child's seeChild to 0 so we force update it next iteration + pChildNode->seeChild[i] = 0; + } + n->seeChild[i] |= childMask; + } + } + // Go to the right children to continue walking down + n = pChildNode; + } + + + //NV_PRINTF(LEVEL_INFO, "Setting pNode level=%d frame=0x%llx index=0x%x to state=0x%llx mask=0x%llx\n", + // pNode->level, pNode->frame, index, newState, newStateMask); + + // + // Important loop to actually set the state bits now. + // Do not refactor this unless you know what you are doing! + // Update the node, then go update the ancestors. + // + for (i = 0; i < PMA_BITS_PER_PAGE; i++) + { + stateMask = NVBIT64(i); + + if (stateMask & newStateMask) + { + // Set the current node index as valid since we are changing its states + pNode->seeChild[i] &= ~NVBIT64(index); + + if (stateMask & newStateMask & newState) + { + pNode->state[i] |= NVBIT64(index); + } + else + { + pNode->state[i] &= ~NVBIT64(index); + } + } + } + + _addrtreeUpdateAncestors(pTree, pNode, newStateMask); +} + +static PMA_PAGESTATUS +_addrtreeGetState +( + PMA_ADDRTREE *pTree, + ADDRTREE_NODE *node, + NvU32 index, + NvBool bReadAttrib +) +{ + ADDRTREE_NODE *n = pTree->root; + + NvU64 newFrame = 0; + NvU32 newIndex; + PMA_PAGESTATUS status = STATE_FREE; + PMA_PAGESTATUS stateMask = bReadAttrib ? MAP_MASK : STATE_MASK; + + while(n->level != node->level) + { + _addrtreeConvertFrame(pTree, node->level, node->frame, n->level, &newFrame); + newIndex = (NvU32)(newFrame - n->frame); + + //NV_PRINTF(LEVEL_INFO, "n->level=0x%x, node->level=0x%x\n", n->level, node->level); + + getNodeIndexStatus(n, newIndex, NV_FALSE, &stateMask, &status); + + if (!shouldCheckChildren(stateMask)) + { + return status; + } + + // Go to the right children to continue walking down + n = &n->children[newIndex]; + } + + // Extract the final value from this node + getNodeIndexStatus(node, index, NV_TRUE, &stateMask, &status); + + return status; +} + +// TODO: this is pretty similar to _addrtreeConvertFrame: maybe combine + +static void +_addrtreeConvertLevelFrameToNodeIndex +( + PMA_ADDRTREE *pTree, + NvU32 levelNum, + NvU64 frameNum, + ADDRTREE_NODE **ppNode, + NvU32 *pIndex +) +{ + NvU32 nodeIdx; + ADDRTREE_LEVEL *pLevel; + NvU32 framesPerNode; + + pLevel = &pTree->levels[levelNum]; + framesPerNode = pLevel->maxFramesPerNode; + + nodeIdx = (NvU32)(frameNum / framesPerNode); + *pIndex = (NvU32)(frameNum % framesPerNode); + *ppNode = &pLevel->pNodeList[nodeIdx]; + + NV_ASSERT(*pIndex < (*ppNode)->numChildren); +} + +//frameNum is a levelNum frame +static PMA_PAGESTATUS +pmaAddrtreeReadLevel +( + void *pMap, + NvU32 levelNum, + NvU64 frameNum, + NvBool bReadAttrib +) +{ + NvU32 index; + ADDRTREE_NODE *pNode; + PMA_PAGESTATUS state; + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + + _addrtreeConvertLevelFrameToNodeIndex(pTree, levelNum, + frameNum, &pNode, &index); + + state = _addrtreeGetState(pTree, pNode, index, bReadAttrib); + return state; +} + +// +// This function returns if the node pointed to by pNode index +// has any seeChild bits set for those seeChild bits that are valid for this node. +// It also returns the value of the pNode index in pState +// This is very similar to _addrtreeNodeValid, but requires some tweaked logic +// This is necessary for a very specific check in _pmaAddrtreeChangePageStateAttribEx +// This node must have at least one more level beneath it! +// +static NvBool +_addrtreeNodeIndexHasSeeChildSet +( + PMA_ADDRTREE *pTree, + ADDRTREE_NODE *pNode, + NvU32 index, + PMA_PAGESTATUS *pState +) +{ + ADDRTREE_NODE *n = pTree->root; + NvU64 newFrame = 0; + NvU32 newIndex = 0; + *pState = STATE_FREE; + + // TODO: try this for only STATE_MASK, because stats will only + // get corrupted if the STATE_MASK values differ. + PMA_PAGESTATUS stateMask = MAP_MASK; // check all states TODO + + while(n->level != pNode->level) + { + _addrtreeConvertFrame(pTree, pNode->level, pNode->frame, n->level, &newFrame); + newIndex = (NvU32)(newFrame - n->frame); + + getNodeIndexStatus(n, newIndex, NV_FALSE, &stateMask, pState); + + if (!shouldCheckChildren(stateMask)) + { + // State is fully realized in an ancestor above the parent + return NV_FALSE; + } + + // Go to the right children to continue walking down + n = &n->children[newIndex]; + } + + // Extract the value from this pNode + getNodeIndexStatus(pNode, index, NV_FALSE, &stateMask, pState); + + // Now check if the final child has any seeChild set + if (shouldCheckChildren(stateMask)) + { + // Target child is different from parent + return NV_TRUE; + } + else + { + // State is fully realized in parent + return NV_FALSE; + } +} + +// frameNumStart is the 64k frameNum to start with +static void +__pmaAddrtreeChangePageStateAttribEx +( + void *pMap, + NvU64 frameNumStart, + NvU32 pageSize, + PMA_PAGESTATUS newState, + PMA_PAGESTATUS newStateMask +) +{ + NvU32 index; + ADDRTREE_NODE *pNode; + NvU32 targetLevelNum = addrtreeGetTreeLevel(pageSize) - 1; + NvU64 targetFrameNum; + NvU32 levelNum = addrtreeGetTreeLevel(_PMA_64KB) - 1; + NvU32 levelNum2mb = addrtreeGetTreeLevel(_PMA_2MB) - 1; + PMA_PAGESTATUS oldState, updatedState, oldState2mb, updatedState2mb; + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + PMA_PAGESTATUS targetFoundState; + ADDRTREE_NODE *pTargetNode; + NvU32 targetIndex; + + frameNumStart += pTree->numPaddingFrames; + newStateMask &= MAP_MASK; + + _addrtreeConvertFrame(pTree, levelNum, frameNumStart, targetLevelNum, &targetFrameNum); + + _addrtreeConvertLevelFrameToNodeIndex(pTree, targetLevelNum, targetFrameNum, &pTargetNode, &targetIndex); + + // + // In address tree, if a node is partially allocated, any node above that node is considered fully + // allocated because you cannot allocate a node-aligned block at the higher level. + // Because of address tree structure, we don't get an accurate count of the number of frames + // that were allocated beforehand if we're overwriting some state. + // e.g. if a node is half allocated and then we allocate the rest of it at a higher level, + // then there's no way to know how many frames were allocated before without going downwards and reading them. + // Until something better is implemented, implement a heuristic wherein we only do the optimized case + // when the current node has no seeChild bits are set for thoese seeChild bits that are valid for this node. + // Also, we won't get any optimization out of doing this for 64KB, so skip it and avoid wasting time + // reading state. + // Since stats only care about free/unpin/pin states,.we could perhaps relax this restriction + // + + if ((pageSize >= _PMA_2MB) && + !_addrtreeNodeIndexHasSeeChildSet(pTree, pTargetNode, targetIndex, &targetFoundState)) + { + // Do optimized case + + // Figure out how many 64KB frames and how many 2MB frames we're going to touch + NvU64 numFramesTouched = pageSize >> PMA_PAGE_SHIFT; + NvU64 num2mbFramesTouched = pageSize >> _TREE_2MB; + + updatedState = (targetFoundState & ~newStateMask) | (newState & newStateMask); + + _addrtreeSetState(pTree, pTargetNode, targetIndex, newState, newStateMask); + + // In this case, the states at the 2MB and 64KB levels are the same because we're changing at + // least a 2MB node that had no valid children + pmaStatsUpdateState(&pTree->pPmaStats->numFreeFrames, + numFramesTouched, targetFoundState, updatedState); + pmaStatsUpdateState(&pTree->pPmaStats->numFree2mbPages, + num2mbFramesTouched, targetFoundState, updatedState); + + } + else + { + // Do unoptimized case + NvU32 framesPerPage = pageSize >> PMA_PAGE_SHIFT; + NvU32 j; + + for (j = 0; j < framesPerPage; j++) + { + NvU64 frameNum2mb; + NvU64 frameNum = frameNumStart + j; + + _addrtreeConvertFrame(pTree, levelNum, frameNum, levelNum2mb, &frameNum2mb); + oldState2mb = pmaAddrtreeReadLevel(pTree, levelNum2mb, frameNum2mb, NV_TRUE); + + _addrtreeConvertLevelFrameToNodeIndex(pTree, levelNum, frameNum, &pNode, &index); + + // The read is done only to update the stats tracking + oldState = pmaAddrtreeReadLevel(pTree, levelNum, frameNum, NV_TRUE); + _addrtreeSetState(pTree, pNode, index, newState, newStateMask); + + // Calculate what the new state will be + updatedState = (oldState & ~newStateMask) | (newState & newStateMask); + + pmaStatsUpdateState(&pTree->pPmaStats->numFreeFrames, 1, oldState, updatedState); + + updatedState2mb = pmaAddrtreeReadLevel(pTree, levelNum2mb, frameNum2mb, NV_TRUE); + + if (updatedState2mb != oldState2mb) + { + pmaStatsUpdateState(&pTree->pPmaStats->numFree2mbPages, 1, + oldState2mb, updatedState2mb); + + } + } + + } +} + +// +// This function wraps the real __pmaAddrtreeChangePageStateAttribEx +// to allow addrtree to set 128KB page size +// +static void +_pmaAddrtreeChangePageStateAttribEx +( + void *pMap, + NvU64 frameNumStart, + NvU32 pageSize, + PMA_PAGESTATUS newState, + PMA_PAGESTATUS newStateMask +) +{ + if (NV_UNLIKELY(pageSize == _PMA_128KB)) { + NvU64 i; + for (i = 0; i < 2; i++) { + __pmaAddrtreeChangePageStateAttribEx( + pMap, + frameNumStart + i, + _PMA_64KB, + newState, + newStateMask); + } + } + else + { + __pmaAddrtreeChangePageStateAttribEx( + pMap, + frameNumStart, + pageSize, + newState, + newStateMask); + } +} + +// +// These accessor functions can be made more efficient. TODO improve this. +// We have page size information in the alloc path, but in general, we don't use +// _pmaAddrtreeChangePageStateAttribEx to its fullest extent for all other cases where +// we change the state of the tree. +// If we had the page size information, we won't need to walk the tree for every frame. +// For now, pmaAddrtreeChangeState and pmaAddrtreeRead only operate on 64K level frameNums +// +void +pmaAddrtreeChangeStateAttribEx +( + void *pMap, + NvU64 frameNum, + PMA_PAGESTATUS newState, + PMA_PAGESTATUS newStateMask +) +{ + _pmaAddrtreeChangePageStateAttribEx(pMap, frameNum, _PMA_64KB, newState, newStateMask); +} + +// TODO: merge this on PMA level +void pmaAddrtreeChangeState(void *pTree, NvU64 frameNum, PMA_PAGESTATUS newState) +{ + pmaAddrtreeChangeStateAttribEx(pTree, frameNum, newState, STATE_MASK); +} + +// TODO: merge this on PMA level +void pmaAddrtreeChangeStateAttrib(void *pTree, NvU64 frameNum, PMA_PAGESTATUS newState, NvBool writeAttrib) +{ + PMA_PAGESTATUS mask = writeAttrib ? MAP_MASK : STATE_MASK; + pmaAddrtreeChangeStateAttribEx(pTree, frameNum, newState, mask); +} + +// TODO: merge this on PMA level +void +pmaAddrtreeChangePageStateAttrib +( + void * pTree, + NvU64 frameNumStart, + NvU32 pageSize, + PMA_PAGESTATUS newState, + NvBool writeAttrib +) +{ + PMA_PAGESTATUS mask = writeAttrib ? MAP_MASK : STATE_MASK; + _pmaAddrtreeChangePageStateAttribEx(pTree, frameNumStart, pageSize, newState, mask); +} + +PMA_PAGESTATUS pmaAddrtreeRead +( + void *pMap, + NvU64 frameNum, + NvBool bReadAttrib +) +{ + NvU32 index; + ADDRTREE_NODE *pNode; + NvU32 levelNum = addrtreeGetTreeLevel(_PMA_64KB) - 1; + PMA_PAGESTATUS state; + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + frameNum += pTree->numPaddingFrames; + + _addrtreeConvertLevelFrameToNodeIndex(pTree, levelNum, + frameNum, &pNode, &index); + + state = _addrtreeGetState(pTree, pNode, index, bReadAttrib); + return state; +} + + +void pmaAddrtreeGetSize +( + void *pMap, + NvU64 *pBytesTotal +) +{ + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + *pBytesTotal = (pTree->totalFrames << PMA_PAGE_SHIFT); +} + +// +// The algorithm here is very simplistic. But maybe that's OK because this call +// is not used a whole lot. We can optimize it but might not worth the effort. +// +void pmaAddrtreeGetLargestFree +( + void *pMap, + NvU64 *pLargestFree +) +{ + NvU64 i, length = 0, largestLength = 0; + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + + for (i = 0; i < pTree->totalFrames; i++) + { + if (pmaAddrtreeRead(pTree, i, NV_FALSE) != STATE_FREE) + { + largestLength = NV_MAX(length, largestLength); + length = 0; + } + else + { + length++; + } + } + + largestLength = NV_MAX(length, largestLength); + *pLargestFree = (largestLength << PMA_PAGE_SHIFT); +} + +// +// Check whether the specified frame range is available completely for eviction +// +// Returns: +// - -1 if the whole range is evictable +// +// - Index of the last unevictable frame number +// +// For now, just do a dumb scan frame by frame +static NvS64 +_pmaAddrtreeScanNumaUnevictable +( + PMA_ADDRTREE *pTree, + NvU64 frameBegin, + NvU64 frameEnd +) +{ + NvU64 frame; + PMA_PAGESTATUS frameStatus; + + for(frame = frameEnd; frame >= frameBegin; frame--) + { + frameStatus = pmaAddrtreeRead((void*)pTree, frame, NV_TRUE); + if (frameStatus != STATE_UNPIN) + { + return frame; + } + } + return -1; +} + +// +// Determine a contiguous evictable range of size actualSize +// +// Returns: +// - NV_ERR_NO_MEMORY if eviction is not possible for this size +// +// - NV_OK if there is a valid contiguous evictable range +// starting and ending at address stored at evictStart and evictEnd +// +// + +NV_STATUS pmaAddrtreeScanContiguousNumaEviction +( + void *pMap, + NvU64 addrBase, + NvLength actualSize, + NvU64 pageSize, + NvU64 *evictStart, + NvU64 *evictEnd +) +{ + NV_STATUS status = NV_ERR_NO_MEMORY; + PMA_ADDRTREE *pTree = (PMA_ADDRTREE *)pMap; + + NvU64 alignedAddrBase; + NvU64 frameNum; + NvU64 endFrame, frameStart; + NvU64 alignment = pageSize; + NvU64 frameAlignment, frameAlignmentPadding; + NvU64 numFrames = actualSize >> PMA_PAGE_SHIFT; + NvU64 framesToSkip; + NvU32 level = addrtreeGetTreeLevel(_PMA_64KB) - 1; + PMA_PAGESTATUS startStatus, endStatus; + + endFrame = pTree->totalFrames - 1; + + if (pTree->totalFrames < numFrames) + return status; + + // Copied from _pmaAddrtreeContigSearchLoop, pmaRegmapScanContiguous + // We need to do this one the 64K frame level because addrtree will currently + // qualify a 2MB node that is half unpin and half free as entirely + // unpin, which doesn't work in NUMA mode because PMA cannot ask + // UVM to evict free pages in NUMA mode + + frameAlignment = alignment >> PMA_PAGE_SHIFT; + alignedAddrBase = NV_ALIGN_UP(addrBase, alignment); + // May need to modify to work with internal address tree padding + frameAlignmentPadding = (alignedAddrBase - addrBase) >> PMA_PAGE_SHIFT; + frameStart = alignUpToMod(0, frameAlignment, frameAlignmentPadding); + + for (frameNum = frameStart; frameNum <= endFrame; ) + { + NvS64 firstUnevictableFrame; + NvU64 endFrame = frameNum + numFrames - 1; + + // + // Read endStatus first so we don't have to waste time traversing the + // tree again to read startStatus if endStatus is not even usable + // + endStatus = _pmaAddrtreeReadLevelAndSkipUnavailable(pTree, level, endFrame, STATE_UNPIN, NV_FALSE, &framesToSkip); + + if (framesToSkip > 1) { + frameNum = NV_ALIGN_UP(endFrame + framesToSkip, frameAlignment); + NV_ASSERT(frameNum != 0); + continue; + } + + startStatus = _pmaAddrtreeReadLevelAndSkipUnavailable(pTree, level, frameNum, STATE_UNPIN, NV_FALSE, &framesToSkip); + + if (framesToSkip > 1) { + frameNum += NV_ALIGN_UP(framesToSkip, frameAlignment); + NV_ASSERT(frameNum != 0); + continue; + } + + // Check against the whole state since we've already ready that in addrtree + if (endStatus != STATE_UNPIN) + { + // end is not available jump from start to after numFrames + frameNum += numFrames; + frameNum = alignUpToMod(frameNum, frameAlignment, frameAlignmentPadding); + continue; + } + + if (startStatus != STATE_UNPIN) + { + // startFrame is unavailable, jump to next aligned frame + frameNum += frameAlignment; + continue; + } + + // First occurrence of 0 in STATE_UNPIN from frameNum to frameNum + numFrames - 1 + firstUnevictableFrame = _pmaAddrtreeScanNumaUnevictable(pMap, frameNum, frameNum + numFrames - 1); + + if (firstUnevictableFrame == -1) + { + NV_PRINTF(LEVEL_INFO, " %s evictable frame = %lld evictstart = %llx evictEnd = %llx\n", + __FUNCTION__, frameNum, addrBase + (frameNum << PMA_PAGE_SHIFT), + (addrBase + (frameNum << PMA_PAGE_SHIFT) + actualSize - 1)); + + // Subtract off padding when returning + *evictStart = addrBase + (frameNum << PMA_PAGE_SHIFT) - (pTree->numPaddingFrames << PMA_PAGE_SHIFT); + *evictEnd = *evictStart + actualSize - 1; + status = NV_OK; + break; + } + else + { + // get the next aligned frame after the unevictable frame. + frameNum = alignUpToMod(firstUnevictableFrame + 1, frameAlignment, frameAlignmentPadding); + } + } + + return status; + +} + +NvU64 pmaAddrtreeGetEvictingFrames(void *pMap) +{ + return ((PMA_ADDRTREE *)pMap)->frameEvictionsInProcess; +} + +void pmaAddrtreeSetEvictingFrames(void *pMap, NvU64 frameEvictionsInProcess) +{ + ((PMA_ADDRTREE *)pMap)->frameEvictionsInProcess = frameEvictionsInProcess; +} + diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/numa.c b/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/numa.c new file mode 100644 index 000000000..04b6714c5 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/numa.c @@ -0,0 +1,705 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * + * @brief Implementation for the NUMA interfaces, used by parent module PMA only. + * This file interfaces with the RM Linux layer which interfaces with the + * Linux kernel. + */ + +#include "gpu/mem_mgr/phys_mem_allocator/numa.h" +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.h" +#include "gpu/mem_mgr/mem_scrub.h" +#include "utils/nvprintf.h" +#include "utils/nvassert.h" +#include "os/os.h" + +// +// Local helper functions and declarations +// + +//TODO merge or nuke these functions +static NV_STATUS _pmaNumaAvailableEvictablePage(PMA *pPma, NvS32 *validRegionList); +static NV_STATUS _pmaNumaAvailableEvictableRange(PMA *pPma, NvS32 *validRegionList, + NvLength actualSize, NvU64 pageSize, NvU64 *evictStart, NvU64 *evictEnd); +static NV_STATUS _pmaNumaAllocateRange(PMA *pPma, NvU32 numaNodeId, NvLength actualSize, + NvU64 pageSize, NvU64 *pPages, NvBool bScrubOnAlloc, NvBool allowEvict, NvS32 *validRegionList, + NvU64 *allocatedCount); +static NV_STATUS _pmaNumaAllocatePages (PMA *pPma, NvU32 numaNodeId, NvU32 pageSize, + NvLength allocationCount, NvU64 *pPages, NvBool bScrubOnAlloc, NvBool allowEvict, NvS32 *validRegionList, + NvU64 *allocatedPages); + +/*! + * @brief Check if there is at least one evictable page from UVM. + */ +static NV_STATUS _pmaNumaAvailableEvictablePage +( + PMA *pPma, + NvS32 *validRegionList +) +{ + NvU32 regionIdx; + PMA_PAGESTATUS frameState; + void *pMap = NULL; + NV_STATUS status = NV_ERR_NO_MEMORY; + + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + NvU32 regId, frameNum; + NvU64 totalFrames; + + regId = (NvU32)validRegionList[regionIdx]; + + if (validRegionList[regionIdx] == -1) + continue; + + pMap = pPma->pRegions[regId]; + pPma->pMapInfo->pmaMapGetSize(pMap, &totalFrames); + totalFrames >>= PMA_PAGE_SHIFT; + + for (frameNum = 0; frameNum < totalFrames; frameNum++) + { + frameState = pPma->pMapInfo->pmaMapRead(pMap, frameNum, NV_TRUE); + if ((frameState & STATE_MASK) == STATE_UNPIN) + { + status = NV_OK; + break; + } + } + + if (status == NV_OK) + break; + } + + if (status == NV_OK) + NV_PRINTF(LEVEL_INFO, "Evictable frame: FOUND\n"); + else + NV_PRINTF(LEVEL_INFO, "Evictable frame: NOT FOUND\n"); + + return status; +} + +/*! + * @brief Check if there is a contiguous range of + * evictable frame with UVM and get the start + * and end address if there is + * In NUMA, OS manages memory and PMA will only track allocated memory in ALLOC_PIN + * and ALLOC_UNPIN state. FREE memory is managed by OS and cannot be tracked by PMA + * and hence PMA cannot consider FREE memory for eviction and can only consider frames + * in known state to PMA or eviction. ALLOC_PIN cannot be evicted and hence only ALLOC_UNPIN + * can be evictable. + */ +NV_STATUS _pmaNumaAvailableEvictableRange +( + PMA *pPma, + NvS32 *validRegionList, + NvLength actualSize, + NvU64 pageSize, + NvU64 *evictStart, + NvU64 *evictEnd +) +{ + void *pMap = NULL; + NvU32 regionIdx; + NV_STATUS status = NV_ERR_NO_MEMORY; + + if ((evictStart == NULL) || (evictEnd == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *evictStart = 0; + *evictEnd = 0; + + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + NvU64 addrBase; + NvU32 regId; + + if (validRegionList[regionIdx] == -1) + continue; + + regId = (NvU32)validRegionList[regionIdx]; + pMap = pPma->pRegions[regId]; + addrBase = pPma->pRegDescriptors[regId]->base; + + + if ((status = pPma->pMapInfo->pmaMapScanContiguousNumaEviction(pMap, addrBase, actualSize, + pageSize, evictStart, evictEnd)) == NV_OK) + { + break; + } + } + + return status; +} + +/*! + * Check if the number of free frames is below the skip threshold percentage of total. + * @return NV_TRUE free frame count is below threshold. + * NV_FALSE otherwise. + */ +static NvBool _pmaCheckFreeFramesToSkipReclaim(PMA *pPma) +{ + return (100 * pPma->pmaStats.numFreeFrames < + (pPma->pmaStats.num2mbPages * (_PMA_2MB >> PMA_PAGE_SHIFT) * pPma->numaReclaimSkipThreshold)); +} + +/*! + * @brief Allocate contiguous memory for Numa + * + */ +NV_STATUS _pmaNumaAllocateRange +( + PMA *pPma, + NvU32 numaNodeId, + NvLength actualSize, + NvU64 pageSize, + NvU64 *pPages, + NvBool bScrubOnAlloc, + NvBool allowEvict, + NvS32 *validRegionList, + NvU64 *allocatedCount +) +{ + NV_STATUS status = NV_ERR_NO_MEMORY; + NvU64 sysPhysAddr = 0, gpaPhysAddr = 0, evictStart = 0, evictEnd = 0; + NvU32 flags = OS_ALLOC_PAGES_NODE_NONE; + *allocatedCount = 0; + + // check if numFreeFrames(64KB) are below a certain % of PMA managed memory(indicated by num2mbPages). + if (_pmaCheckFreeFramesToSkipReclaim(pPma)) + { + flags = OS_ALLOC_PAGES_NODE_SKIP_RECLAIM; + } + + portSyncSpinlockRelease(pPma->pPmaLock); + + // Try to allocate contiguous allocation of actualSize from OS. Do not force RECLAIM + status = osAllocPagesNode((int)numaNodeId, (NvLength)actualSize, flags, &sysPhysAddr); + + if (status == NV_OK) + { + NvU32 j; + // j=0 head page is already refcounted at allocation + for (j = 1; j < (actualSize >> PMA_PAGE_SHIFT); j++) + { + osAllocAcquirePage(sysPhysAddr + (j << PMA_PAGE_SHIFT)); + } + + gpaPhysAddr = sysPhysAddr - pPma->coherentCpuFbBase; + NV_ASSERT(gpaPhysAddr < pPma->coherentCpuFbBase); + *allocatedCount = 1; + + if (bScrubOnAlloc) + { + PSCRUB_NODE pPmaScrubList = NULL; + NvU64 count; + + if ((status = scrubSubmitPages(pPma->pScrubObj, (NvU32)actualSize, &gpaPhysAddr, + 1, &pPmaScrubList, &count)) != NV_OK) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto scrub_exit; + } + + if (count > 0) + _pmaClearScrubBit(pPma, pPmaScrubList, count); + + if ((status = _pmaCheckScrubbedPages(pPma, actualSize, &gpaPhysAddr, 1)) != NV_OK) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + } + +scrub_exit: + portMemFree(pPmaScrubList); + + if (status == NV_ERR_INSUFFICIENT_RESOURCES) + { + NV_PRINTF(LEVEL_ERROR, "ERROR: scrubber OOM!\n"); + } + } + + portSyncSpinlockAcquire(pPma->pPmaLock); + goto allocated; + } + + portSyncSpinlockAcquire(pPma->pPmaLock); + + NV_PRINTF(LEVEL_INFO, "Allocate from OS failed for allocation size = %lld!\n", + (NvU64) actualSize); + + + if (allowEvict) + { + // Check if UVM has evictable contiguous allocations of actualSize + status = _pmaNumaAvailableEvictableRange(pPma, validRegionList, + actualSize, pageSize, + &evictStart, &evictEnd); + } + + if ((status == NV_OK) && (evictEnd - evictStart + 1) >= actualSize) + { + void *pMap = NULL; + NvU32 regId; + + NV_ASSERT((evictEnd - evictStart + 1) == actualSize); + status = NV_ERR_NO_MEMORY; + regId = findRegionID(pPma, evictStart); + pMap = pPma->pRegions[regId]; + + if (pMap != NULL) + { + // + // Call UVM to evict the contiguous allocation and evict the rest to OS + // UVM will call into PMA to free this contiguous range along with any excesses. + // PMA will release only the excess allocation to OS in the free routine. + // i.e., region evictStart to evictEnd is marked as 'ATTRIB_EVICTING' and will not + // be returned to OS. + // + status = _pmaEvictContiguous(pPma, pMap, evictStart, evictEnd); + + if (status == NV_ERR_NO_MEMORY) + { + NV_PRINTF(LEVEL_INFO, "Eviction Failed = %llx to %llx!\n", evictStart, evictEnd); + } + else + { + NV_PRINTF(LEVEL_INFO, "Eviction succeeded = %llx to %llx Scrub status 0x%x!\n", + evictStart, evictEnd, status); + gpaPhysAddr = evictStart; + *allocatedCount = 1; + } + } + else + { + NV_PRINTF(LEVEL_INFO, "pMap NULL cannot perform eviction\n"); + } + } + + +allocated: + + // GPA needs to be acquired by shifting by the ATS aperture base address + pPages[0] = gpaPhysAddr; + + return status; +} + +/*! + * @brief Allocate discontiguous pages for Numa + * + */ +static NV_STATUS _pmaNumaAllocatePages +( + PMA *pPma, + NvU32 numaNodeId, + NvU32 pageSize, + NvLength allocationCount, + NvU64 *pPages, + NvBool bScrubOnAlloc, + NvBool allowEvict, + NvS32 *validRegionList, + NvU64 *allocatedPages +) +{ + NV_STATUS status = NV_ERR_NO_MEMORY; + NvU64 sysPhysAddr; + NvU64 i = 0, j = 0; + NvU32 flags = OS_ALLOC_PAGES_NODE_NONE; + + NV_ASSERT(allocationCount); + + // check if numFreeFrames are below certain % of PMA managed memory. + if (_pmaCheckFreeFramesToSkipReclaim(pPma)) + { + flags = OS_ALLOC_PAGES_NODE_SKIP_RECLAIM; + } + + portSyncSpinlockRelease(pPma->pPmaLock); + + for (; i < allocationCount; i++) + { + status = osAllocPagesNode((int)numaNodeId, (NvLength) pageSize, flags, &sysPhysAddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Alloc from OS failed for i= %lld allocationCount = %lld pageSize = %lld!\n", + i, (NvU64) allocationCount, (NvU64) pageSize); + break; + } + + // GPA needs to be acquired by shifting by the ATS aperture base address + NV_ASSERT(sysPhysAddr >= pPma->coherentCpuFbBase); + pPages[i] = sysPhysAddr - pPma->coherentCpuFbBase; + + // Skip the head page at offset 0 (j=0) as it is refcounted at allocation + for (j = 1; j < (pageSize >> PMA_PAGE_SHIFT); j++) + { + osAllocAcquirePage(sysPhysAddr + (j << PMA_PAGE_SHIFT)); + } + } + + if (bScrubOnAlloc) + { + PSCRUB_NODE pPmaScrubList = NULL; + NvU64 count; + + if ((status = scrubSubmitPages(pPma->pScrubObj, pageSize, pPages, + i, &pPmaScrubList, &count)) != NV_OK) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto scrub_exit; + } + + if (count > 0) + _pmaClearScrubBit(pPma, pPmaScrubList, count); + + if ((status = _pmaCheckScrubbedPages(pPma, pageSize, pPages, (NvU32)i)) != NV_OK) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + } + +scrub_exit: + portMemFree(pPmaScrubList); + + if (status == NV_ERR_INSUFFICIENT_RESOURCES) + { + NV_PRINTF(LEVEL_ERROR, "ERROR: scrubber OOM!\n"); + portSyncSpinlockAcquire(pPma->pPmaLock); + goto exit; + } + } + + portSyncSpinlockAcquire(pPma->pPmaLock); + + if (( i < allocationCount) && allowEvict) + { + NvU32 regionIdx; + + // Check if there is atleast one evictable page + status = _pmaNumaAvailableEvictablePage(pPma, validRegionList); + + if (status != NV_OK) + { + goto exit; + } + + status = NV_ERR_NO_MEMORY; + + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + NvU32 regId; + NvU64 addrBase, addrLimit; + void *pMap = NULL; + + if (validRegionList[regionIdx] == -1) + { + continue; + } + + regId = (NvU32)validRegionList[regionIdx]; + pMap = pPma->pRegions[regId]; + + addrBase = pPma->pRegDescriptors[regId]->base; + addrLimit = pPma->pRegDescriptors[regId]->limit; + + status = _pmaEvictPages(pPma, pMap, + &pPages[i], (NvU32)(allocationCount - i), + &pPages[0], i, + pageSize, addrBase, addrLimit); + + if (status != NV_ERR_NO_MEMORY) + { + NV_PRINTF(LEVEL_INFO, "Frames %lld evicted in region %d of total allocationCount %lld Scrub status 0x%x!\n", + i, regionIdx, (NvU64) allocationCount, status); + // + // UVM can over evict, but will call into PMA only to evict the excess. + // free startAddr + actualSize, (uvmAllocatedSize - actualSize) to OS. + // Assume no under eviction. Overeviction is taken care of by the free routine. + // + i = allocationCount; + break; + } + + NV_PRINTF(LEVEL_INFO, "Eviction Failed %d pages !\n", (NvU32) (allocationCount - i)); + } + + } + +exit: + *allocatedPages = i; + + return status; +} + + +NV_STATUS pmaNumaAllocate +( + PMA *pPma, + NvLength allocationCount, + NvU32 pageSize, + PMA_ALLOCATION_OPTIONS *allocationOptions, + NvU64 *pPages +) +{ + NvU32 i; + NV_STATUS status = NV_OK; + NvU32 numaNodeId = pPma->numaNodeId; + NvS32 regionList[PMA_REGION_SIZE]; + NvU32 flags = allocationOptions->flags; + NvLength allocSize = 0; + NvLength allocCount = 0; + NvU32 contigFlag = !!(flags & PMA_ALLOCATE_CONTIGUOUS); + // As per bug #2444368, kernel scrubbing is too slow. Use the GPU scrubber instead + NvBool bScrubOnAlloc = !(flags & PMA_ALLOCATE_NO_ZERO); + NvBool allowEvict = !(flags & PMA_ALLOCATE_DONT_EVICT); + NvBool partialFlag = !!(flags & PMA_ALLOCATE_ALLOW_PARTIAL); + + NvU64 finalAllocatedCount = 0; + + if (!pPma->bNuma) + { + NV_PRINTF(LEVEL_FATAL, "Cannot allocate from NUMA node %d on a non-NUMA system.\n", + numaNodeId); + return NV_ERR_INVALID_ARGUMENT; + } + + if (pageSize > _PMA_2MB) + { + NV_PRINTF(LEVEL_FATAL, "Cannot allocate with more than 2MB contiguity.\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (pPma->nodeOnlined != NV_TRUE) + { + NV_PRINTF(LEVEL_INFO, "Cannot allocate from NUMA node %d before it is onlined.\n", + numaNodeId); + return NV_ERR_INVALID_STATE; + } + + if (contigFlag) + { + if (((NvU64)allocationCount) * ((NvU64) pageSize) > NV_U32_MAX) + { + NV_PRINTF(LEVEL_FATAL, "Cannot allocate more than 4GB contiguous memory in one call.\n"); + return NV_ERR_INVALID_ARGUMENT; + } + } + + // We are not changing the state. Can be outside the lock perhaps + NV_CHECK_OK_OR_RETURN(LEVEL_FATAL, pmaSelector(pPma, allocationOptions, regionList)); + + if (pPma->bScrubOnFree) + { + portSyncMutexAcquire(pPma->pAllocLock); + portSyncRwLockAcquireRead(pPma->pScrubberValidLock); + + if (pmaPortAtomicGet(&pPma->scrubberValid) != PMA_SCRUBBER_VALID) + { + NV_PRINTF(LEVEL_WARNING, "PMA object is not valid\n"); + portSyncRwLockReleaseRead(pPma->pScrubberValidLock); + portSyncMutexRelease(pPma->pAllocLock); + return NV_ERR_INVALID_STATE; + } + } + else + { + // + // Scrub-on-free feature is OFF, therefore we cannot do scrub-on-alloc + // either because it uses the same HW + // + bScrubOnAlloc = NV_FALSE; + } + + // + // In the NUMA path, scrub on free does not provide enough safety guarantees + // because pages are released to the kernel and they can be reused by other + // processes. Therefore, we can only guarantee that the returned pages are + // zero if scrub on alloc is used. + // + allocationOptions->resultFlags = (bScrubOnAlloc)? PMA_ALLOCATE_RESULT_IS_ZERO : 0; + + portSyncSpinlockAcquire(pPma->pPmaLock); + + if (contigFlag) + { + allocCount = 1; + allocSize = allocationCount * pageSize; + status = _pmaNumaAllocateRange(pPma, numaNodeId, allocSize, pageSize, pPages, bScrubOnAlloc, allowEvict, regionList, &finalAllocatedCount); + } + else + { + allocCount = allocationCount; + allocSize = pageSize; + status = _pmaNumaAllocatePages(pPma, numaNodeId, (NvU32) allocSize, allocCount, pPages, bScrubOnAlloc, allowEvict, regionList, &finalAllocatedCount); + } + + if ((status == NV_ERR_NO_MEMORY) && partialFlag && (finalAllocatedCount > 0)) + { + status = NV_OK; + } + + if (status == NV_OK) + { + NvU32 regId; + void *pMap = NULL; + NvU64 regAddrBase; + NvU64 frameOffset; + NvU64 frameCount = 0; + PMA_PAGESTATUS curStatus = STATE_FREE; + PMA_PAGESTATUS allocOption = !!(flags & PMA_ALLOCATE_PINNED) ? + STATE_PIN : STATE_UNPIN; + + NV_PRINTF(LEVEL_INFO, "SUCCESS allocCount %lld, allocsize %lld eviction? %s pinned ? %s contig? %s\n", + (NvU64) allocCount,(NvU64) allocSize, (flags & PMA_ALLOCATE_DONT_EVICT) ? "NOTALLOWED" : "ALLOWED", + !!(flags & PMA_ALLOCATE_PINNED) ? "PINNED" : "UNPINNED", contigFlag ? "CONTIG":"DISCONTIG"); + + for (i = 0; i < finalAllocatedCount; i++) + { + NvU32 j; + + regId = findRegionID(pPma, pPages[i]); + pMap = pPma->pRegions[regId]; + regAddrBase = pPma->pRegDescriptors[regId]->base; + frameCount = allocSize >> PMA_PAGE_SHIFT; + + for (j = 0; j < frameCount; j++) + { + frameOffset = PMA_ADDR2FRAME(pPages[i], regAddrBase) + j; + + curStatus = pPma->pMapInfo->pmaMapRead(pMap, frameOffset, NV_TRUE); + + if (curStatus & ATTRIB_EVICTING) + { + status = NV_ERR_NO_MEMORY; + break; + } + pPma->pMapInfo->pmaMapChangeStateAttrib(pMap, frameOffset, allocOption, NV_TRUE); + } + if (status != NV_OK) + break; + } + + if (status == NV_OK) + { + allocationOptions->numPagesAllocated = (NvLength)finalAllocatedCount; + } + } + + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "FAILED allocCount %lld, allocsize %lld eviction? %s pinned ? %s contig? %s\n", + (NvU64) allocCount, (NvU64) allocSize, (flags & PMA_ALLOCATE_DONT_EVICT) ? "NOTALLOWED" : "ALLOWED", + !!(flags & PMA_ALLOCATE_PINNED) ? "PINNED" : "UNPINNED", contigFlag ? "CONTIG":"DISCONTIG"); + // + // Free the entire allocation if scrubbing failed or if we had allocated evicting allocations. + // Evicting allocation will be handled in the pmaEvictContiguous + // + if (finalAllocatedCount > 0) + pmaNumaFreeInternal(pPma, pPages, finalAllocatedCount, pageSize, 0); + + status = NV_ERR_NO_MEMORY; + } + + portSyncSpinlockRelease(pPma->pPmaLock); + + if (pPma->bScrubOnFree) + { + portSyncRwLockReleaseRead(pPma->pScrubberValidLock); + portSyncMutexRelease(pPma->pAllocLock); + } + + return status; +} + +void pmaNumaFreeInternal +( + PMA *pPma, + NvU64 *pPages, + NvU64 pageCount, + NvU64 size, + NvU32 flag +) +{ + NvU64 i, j; + + NV_PRINTF(LEVEL_INFO, "Freeing pPage[0] = %llx pageCount %lld\n", pPages[0], pageCount); + + for (i = 0; i < pageCount; i++) + { + NvU32 regId; + NvU64 addrBase; + NvU64 sysPhysAddr = 0; + NvU64 frameNum; + NvU64 framesPerPage; + + // Shift the GPA to acquire the bus address (SPA) + NV_ASSERT(pPages[i] < pPma->coherentCpuFbSize); + + regId = findRegionID(pPma, pPages[i]); + addrBase = pPma->pRegDescriptors[regId]->base; + frameNum = PMA_ADDR2FRAME(pPages[i], addrBase); + framesPerPage = size >> PMA_PAGE_SHIFT; + sysPhysAddr = pPages[i] + pPma->coherentCpuFbBase; + + for (j = 0; j < framesPerPage; j++) + { + PMA_PAGESTATUS newStatus = STATE_FREE; + PMA_PAGESTATUS currentStatus; + NvU64 sysPagePhysAddr = 0; + currentStatus = pPma->pMapInfo->pmaMapRead(pPma->pRegions[regId], (frameNum + j), NV_TRUE); + + // + // When the pages are marked for evicting, we will skip free the page to OS + // in order to reuse the page. + // + if (currentStatus & ATTRIB_EVICTING) + { + // + // Evicting allocations are returned to new client and will be freed later. + // We set the ATTRIB_NUMA_REUSE bit here just in case eviction fails later and we + // need to release the page to OS in the allocation path. + // + if (currentStatus & STATE_UNPIN) + { + pPma->pMapInfo->pmaMapChangeStateAttribEx(pPma->pRegions[regId], (frameNum + j), + ATTRIB_NUMA_REUSE, ATTRIB_NUMA_REUSE); + } + continue; + } + sysPagePhysAddr = sysPhysAddr + (j << PMA_PAGE_SHIFT); + osAllocReleasePage(sysPagePhysAddr); + pPma->pMapInfo->pmaMapChangeStateAttribEx(pPma->pRegions[regId], (frameNum + j), newStatus, ~ATTRIB_EVICTING); + } + } +} + +void pmaNumaSetReclaimSkipThreshold(PMA *pPma, NvU32 skipReclaimPercent) +{ + portSyncSpinlockAcquire(pPma->pPmaLock); + pPma->numaReclaimSkipThreshold = skipReclaimPercent; + portSyncSpinlockRelease(pPma->pPmaLock); +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.c b/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.c new file mode 100644 index 000000000..b6a6764c4 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.c @@ -0,0 +1,1982 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief The PMA implementation file. + * This file implements the PMA object and the public interfaces. + * + * @bug + * 1. SLI broadcast -- Not implemented + */ + +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h" +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.h" +#include "gpu/mem_mgr/phys_mem_allocator/numa.h" +#include "gpu/mem_mgr/mem_scrub.h" +#include "utils/nvprintf.h" +#include "utils/nvassert.h" + +#if !defined(SRT_BUILD) +// These files are not found on SRT builds +#include "os/os.h" +#else +NV_STATUS pmaNumaAllocate +( + PMA *pPma, + NvLength allocationCount, + NvU32 pageSize, + PMA_ALLOCATION_OPTIONS *allocationOptions, + NvU64 *pPages +) +{ + return NV_ERR_GENERIC; +} + +void pmaNumaFreeInternal +( + PMA *pPma, + NvU64 *pPages, + NvU64 pageCount, + NvU64 size, + NvU32 flag +) +{ + return; +} +void pmaNumaSetReclaimSkipThreshold(PMA *pPma, NvU32 data) +{ + return; +} +#endif + +typedef NV_STATUS (*scanFunc)(void *, NvU64, NvU64, NvU64, NvU64, NvU64*, NvU32, NvU64, NvU64*, NvBool); + +static void +_pmaRollback +( + PMA *pPma, + NvU64 *pPages, + NvU32 failCount, + NvU32 failFrame, + NvU32 pageSize, + PMA_PAGESTATUS oldState +) +{ + NvU32 framesPerPage, regId, i, j; + NvU64 frameNum, addrBase; + + framesPerPage = pageSize >> PMA_PAGE_SHIFT; + if (failCount != 0) + { + for(i = 0; i < failCount; i++) + { + regId = findRegionID(pPma, pPages[i]); + addrBase = pPma->pRegDescriptors[regId]->base; + frameNum = PMA_ADDR2FRAME(pPages[i], addrBase); + + for (j = 0; j < framesPerPage; j++) + { + pPma->pMapInfo->pmaMapChangeState(pPma->pRegions[regId], (frameNum + j), oldState); + } + } + } + + if (failFrame != 0) + { + // might fail half-way through + regId = findRegionID(pPma, pPages[failCount]); + addrBase = pPma->pRegDescriptors[regId]->base; + frameNum = PMA_ADDR2FRAME(pPages[failCount], addrBase); + for(i = 0; i < failFrame; i++) + { + pPma->pMapInfo->pmaMapChangeState(pPma->pRegions[regId], (frameNum + i), oldState); + } + } +} + +/////////////////////////////////////////////////////////////////////////////// +// +// Public interfaces +// + +NV_STATUS +pmaInitialize(PMA *pPma, NvU32 initFlags) +{ + NV_STATUS status = NV_OK; + PMA_MAP_INFO *pMapInfo; + + if (pPma == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pPma->pPmaLock = NULL; + pPma->pEvictionCallbacksLock = NULL; + + // Assume portMemInitialize() has been called + pPma->pPmaLock = (PORT_SPINLOCK *)portMemAllocNonPaged(portSyncSpinlockSize); + if (pPma->pPmaLock == NULL) + { + status = NV_ERR_NO_MEMORY; + goto error; + } + + status = portSyncSpinlockInitialize(pPma->pPmaLock); + if (status != NV_OK) + { + portMemFree(pPma->pPmaLock); + pPma->pPmaLock = NULL; + goto error; + } + + pPma->pEvictionCallbacksLock = (PORT_MUTEX *)portMemAllocNonPaged(portSyncMutexSize); + if (pPma->pEvictionCallbacksLock == NULL) + { + status = NV_ERR_NO_MEMORY; + goto error; + } + + status = portSyncMutexInitialize(pPma->pEvictionCallbacksLock); + if (status != NV_OK) + { + portMemFree(pPma->pEvictionCallbacksLock); + pPma->pEvictionCallbacksLock = NULL; + goto error; + } + + pPma->pAllocLock = (PORT_MUTEX *)portMemAllocNonPaged(portSyncMutexSize); + if (pPma->pAllocLock == NULL) + { + status = NV_ERR_NO_MEMORY; + goto error; + } + + status = portSyncMutexInitialize(pPma->pAllocLock); + if (status != NV_OK) + { + portMemFree(pPma->pAllocLock); + pPma->pAllocLock = NULL; + goto error; + } + + pPma->pScrubberValidLock = (PORT_RWLOCK *)portMemAllocNonPaged(portSyncRwLockSize); + if (pPma->pScrubberValidLock == NULL) + { + status = NV_ERR_NO_MEMORY; + goto error; + } + + pMapInfo = (PMA_MAP_INFO *)portMemAllocNonPaged(sizeof(struct _PMA_MAP_INFO)); + if (pMapInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto error; + } + + status = portSyncRwLockInitialize(pPma->pScrubberValidLock); + if (status != NV_OK) + { + portMemFree(pPma->pScrubberValidLock); + pPma->pScrubberValidLock = NULL; + goto error; + } + + // + // Initialize all the scanning callbacks to lower layer + // Default use regmap + // + pMapInfo->pmaMapInit = pmaRegmapInit; + pMapInfo->pmaMapDestroy = pmaRegmapDestroy; + pMapInfo->pmaMapChangeState = pmaRegmapChangeState; + pMapInfo->pmaMapChangeStateAttrib = pmaRegmapChangeStateAttrib; + pMapInfo->pmaMapChangeStateAttribEx = pmaRegmapChangeStateAttribEx; + pMapInfo->pmaMapChangePageStateAttrib = pmaRegmapChangePageStateAttrib; + pMapInfo->pmaMapRead = pmaRegmapRead; + pMapInfo->pmaMapScanContiguous = pmaRegmapScanContiguous; + pMapInfo->pmaMapScanDiscontiguous = pmaRegmapScanDiscontiguous; + pMapInfo->pmaMapGetSize = pmaRegmapGetSize; + pMapInfo->pmaMapGetLargestFree = pmaRegmapGetLargestFree; + pMapInfo->pmaMapScanContiguousNumaEviction = pmaRegMapScanContiguousNumaEviction; + pMapInfo->pmaMapGetEvictingFrames = pmaRegmapGetEvictingFrames; + pMapInfo->pmaMapSetEvictingFrames = pmaRegmapSetEvictingFrames; + + if (initFlags != PMA_INIT_NONE) + { + pPma->bForcePersistence = !!(initFlags & PMA_INIT_FORCE_PERSISTENCE); + + // If scrubber feature is enable, PMA is not valid until scrubber registration + if (initFlags & PMA_INIT_SCRUB_ON_FREE) + { + portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_INVALID); + } + pPma->bScrubOnFree = !!(initFlags & PMA_INIT_SCRUB_ON_FREE); + + // If running on NUMA system, we cannot allocate from OS until node is onlined + if (initFlags & PMA_INIT_NUMA) + { + pPma->nodeOnlined = NV_FALSE; + } + pPma->bNuma = !!(initFlags & PMA_INIT_NUMA); + + // If we want to run with address tree instead of regmap + if (initFlags & PMA_INIT_ADDRTREE) + { + pMapInfo->pmaMapInit = pmaAddrtreeInit; + pMapInfo->pmaMapDestroy = pmaAddrtreeDestroy; + pMapInfo->pmaMapChangeState = pmaAddrtreeChangeState; + pMapInfo->pmaMapChangeStateAttrib = pmaAddrtreeChangeStateAttrib; + pMapInfo->pmaMapChangeStateAttribEx = pmaAddrtreeChangeStateAttribEx; + pMapInfo->pmaMapChangePageStateAttrib = pmaAddrtreeChangePageStateAttrib; + pMapInfo->pmaMapRead = pmaAddrtreeRead; + pMapInfo->pmaMapScanContiguous = pmaAddrtreeScanContiguous; + pMapInfo->pmaMapScanDiscontiguous = pmaAddrtreeScanDiscontiguous; + pMapInfo->pmaMapGetSize = pmaAddrtreeGetSize; + pMapInfo->pmaMapGetLargestFree = pmaAddrtreeGetLargestFree; + pMapInfo->pmaMapScanContiguousNumaEviction = pmaAddrtreeScanContiguousNumaEviction; + pMapInfo->pmaMapGetEvictingFrames = pmaAddrtreeGetEvictingFrames; + pMapInfo->pmaMapSetEvictingFrames = pmaAddrtreeSetEvictingFrames; + NV_PRINTF(LEVEL_WARNING, "Going to use addrtree for PMA init!!\n"); + } + } + pPma->pMapInfo = pMapInfo; + + pPma->pmaStats.numFreeFrames = 0; + pPma->pmaStats.num2mbPages = 0; + pPma->pmaStats.numFree2mbPages = 0; + pPma->regSize = 0; + portAtomicSetSize(&pPma->initScrubbing, PMA_SCRUB_INITIALIZE); + + // OK not to take lock since it's initialization + NV_ASSERT(pmaStateCheck(pPma)); + + return NV_OK; + +error: + pmaDestroy(pPma); + return status; +} + +NV_STATUS +pmaQueryConfigs(PMA *pPma, NvU32 *pConfig) +{ + NvU32 config = 0; + + if (pPma == NULL || pConfig == NULL) + { + return NV_ERR_INVALID_STATE; + } + + if (pPma->bScrubOnFree) + { + config |= PMA_QUERY_SCRUB_ENABLED; + + portSyncRwLockAcquireRead(pPma->pScrubberValidLock); + if (pmaPortAtomicGet(&pPma->scrubberValid) == PMA_SCRUBBER_VALID) + { + config |= PMA_QUERY_SCRUB_VALID; + } + portSyncRwLockReleaseRead(pPma->pScrubberValidLock); + } + config |= pPma->bNuma ? (PMA_QUERY_NUMA_ENABLED) : 0; + + portSyncSpinlockAcquire(pPma->pPmaLock); + config |= pPma->nodeOnlined ? (PMA_QUERY_NUMA_ONLINED) : 0; + portSyncSpinlockRelease(pPma->pPmaLock); + + // Only expose the states the clients asked for + *pConfig = (*pConfig) & config; + return NV_OK; +} + +NV_STATUS +pmaRegMemScrub(PMA *pPma, OBJMEMSCRUB *pScrubObj) +{ + NV_ASSERT(pPma && pPma->bScrubOnFree); + portSyncRwLockAcquireWrite(pPma->pScrubberValidLock); + pPma->pScrubObj = pScrubObj; + portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_VALID); + portSyncRwLockReleaseWrite(pPma->pScrubberValidLock); + + return NV_OK; +} + +void +pmaUnregMemScrub(PMA *pPma) +{ + NV_ASSERT(pPma && pPma->bScrubOnFree); + portSyncRwLockAcquireWrite(pPma->pScrubberValidLock); + portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_INVALID); + pPma->pScrubObj = NULL; + portSyncRwLockReleaseWrite(pPma->pScrubberValidLock); +} + +NV_STATUS +pmaNumaOnlined(PMA *pPma, NvS32 numaNodeId, + NvU64 coherentCpuFbBase, NvU64 coherentCpuFbSize) +{ + if ((pPma == NULL) || (!pPma->bNuma) || + (numaNodeId == PMA_NUMA_NO_NODE)) + { + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + portSyncSpinlockAcquire(pPma->pPmaLock); + pPma->nodeOnlined = NV_TRUE; + pPma->numaNodeId = numaNodeId; + pPma->coherentCpuFbBase = coherentCpuFbBase; + pPma->coherentCpuFbSize = coherentCpuFbSize; + portSyncSpinlockRelease(pPma->pPmaLock); + + return NV_OK; +} + + +void +pmaNumaOfflined(PMA *pPma) +{ + if ((pPma == NULL) || (!pPma->bNuma)) + { + NV_ASSERT(0); + return; + } + + portSyncSpinlockAcquire(pPma->pPmaLock); + pPma->nodeOnlined = NV_FALSE; + pPma->numaNodeId = PMA_NUMA_NO_NODE; + portSyncSpinlockRelease(pPma->pPmaLock); +} + + +void +pmaDestroy(PMA *pPma) +{ + NvU32 i; + + NV_ASSERT(pPma != NULL); + + NV_ASSERT(pmaStateCheck(pPma)); + + if (pmaPortAtomicGet(&pPma->initScrubbing) == PMA_SCRUB_IN_PROGRESS) + { + pmaScrubComplete(pPma); + } + + if (pPma->bNuma) + { + if (pPma->nodeOnlined != NV_FALSE) + { + // + // Not really an error right now but it will be later, when we are able + // to offline memory. + // + NV_PRINTF(LEVEL_WARNING, "Destroying PMA before node %d is offlined\n", + pPma->numaNodeId); + } + } + + for (i = 0; i < pPma->regSize; i++) + { + pPma->pMapInfo->pmaMapDestroy((void *)pPma->pRegions[i]); + portMemFree(pPma->pRegDescriptors[i]); + } + pPma->regSize = 0; + + if (pPma->blacklistCount != 0) + { + portMemFree(pPma->pBlacklistChunks); + } + + portMemFree(pPma->pMapInfo); + + if (pPma->pAllocLock != NULL) + { + portSyncMutexDestroy(pPma->pAllocLock); + portMemFree(pPma->pAllocLock); + } + + if (pPma->pScrubberValidLock != NULL) + { + portSyncRwLockDestroy(pPma->pScrubberValidLock); + portMemFree(pPma->pScrubberValidLock); + } + + if (pPma->pEvictionCallbacksLock != NULL) + { + portSyncMutexDestroy(pPma->pEvictionCallbacksLock); + portMemFree(pPma->pEvictionCallbacksLock); + } + + if (pPma->pPmaLock != NULL) + { + portSyncSpinlockDestroy(pPma->pPmaLock); + portMemFree(pPma->pPmaLock); + } +} + + +NV_STATUS +pmaRegisterRegion +( + PMA *pPma, + NvU32 id, + NvBool bAsyncEccScrub, + PMA_REGION_DESCRIPTOR *pRegionDesc, + NvU32 blacklistCount, + PPMA_BLACKLIST_ADDRESS pBlacklistPageBase +) +{ + NvU64 numFrames; + void *pMap; + NvU64 physBase, physLimit; + NV_STATUS status = NV_OK; + + if (pPma == NULL || pRegionDesc == NULL || id != pPma->regSize + || (pBlacklistPageBase == NULL && blacklistCount != 0)) + { + if (pPma == NULL) + { + NV_PRINTF(LEVEL_ERROR, "ERROR: NULL PMA object\n"); + } + else if (id != pPma->regSize) + { + NV_PRINTF(LEVEL_ERROR, "ERROR: Non-consecutive region ID %d (should be %d)\n", + id, pPma->regSize); + } + if (pRegionDesc == NULL) + NV_PRINTF(LEVEL_ERROR, "ERROR: NULL region descriptor\n"); + if (pBlacklistPageBase == NULL && blacklistCount != 0) + NV_PRINTF(LEVEL_ERROR, "ERROR: Blacklist failure. List is NULL but count = %d\n", + blacklistCount); + + return NV_ERR_INVALID_ARGUMENT; + } + + if (pPma->bNuma) + { + NV_PRINTF(LEVEL_WARNING, "WARNING: registering regions on NUMA system.\n"); + } + + physBase = pRegionDesc->base; + physLimit = pRegionDesc->limit; + + if (!NV_IS_ALIGNED(physBase, PMA_GRANULARITY) || + !NV_IS_ALIGNED((physLimit + 1), PMA_GRANULARITY)) + { + NV_PRINTF(LEVEL_ERROR, "ERROR: Region range %llx..%llx unaligned\n", + physBase, physLimit); + // just try to check alignment on 64KB boundaries + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ASSERT(pmaStateCheck(pPma)); + + numFrames = (physLimit - physBase + 1) >> PMA_PAGE_SHIFT; + + pMap = pPma->pMapInfo->pmaMapInit(numFrames, physBase, &pPma->pmaStats, + pRegionDesc->bProtected); + if (pMap == NULL) + { + return NV_ERR_NO_MEMORY; + } + + pPma->pRegions[id] = pMap; + + // Deep copy of descriptor + pPma->pRegDescriptors[id] = + (PMA_REGION_DESCRIPTOR *) portMemAllocNonPaged(sizeof(PMA_REGION_DESCRIPTOR)); + portMemCopy(pPma->pRegDescriptors[id], sizeof(PMA_REGION_DESCRIPTOR), + pRegionDesc, sizeof(PMA_REGION_DESCRIPTOR)); + + pPma->regSize++; + + if (bAsyncEccScrub) + { + // + // Scrubbing cannot be done before we start. This is to protect against spurious pmaScrubComplete + // calls from RM + // + NV_ASSERT(pmaPortAtomicGet(&pPma->initScrubbing) != PMA_SCRUB_DONE); + + // Mark region as "scrubbing" until background scrubbing completes + pmaSetBlockStateAttrib(pPma, physBase, physLimit - physBase + 1, ATTRIB_SCRUBBING, ATTRIB_SCRUBBING); + + // + // This depends on RM initialization order: RM will only call pmaScrubComplete + // once after all regions are registered and finished scrubbing. + // The return value cannot be asserted. For example, when we are registering + // the second region, the old returned value is _IN_PROGRESS and that is expected. + // + portAtomicCompareAndSwapSize(&pPma->initScrubbing, PMA_SCRUB_IN_PROGRESS, + PMA_SCRUB_INITIALIZE); + } + + status = pmaRegisterBlacklistInfo(pPma, physBase, pBlacklistPageBase, blacklistCount); + if (status != NV_OK) + { + pPma->pMapInfo->pmaMapDestroy(pMap); + portMemFree(pPma->pRegDescriptors[id]); + return status; + } + + NV_PRINTF(LEVEL_INFO, "Registered region:\n"); + pmaRegionPrint(pPma, pPma->pRegDescriptors[id], pPma->pRegions[id]); + NV_PRINTF(LEVEL_INFO, "%d region(s) now registered\n", pPma->regSize); + + return status; +} + + +NV_STATUS +pmaAllocatePages +( + PMA *pPma, + NvLength allocationCount, + NvU32 pageSize, + PMA_ALLOCATION_OPTIONS *allocationOptions, + NvU64 *pPages +) +{ + NvS32 regionList[PMA_REGION_SIZE]; + NV_STATUS status, prediction; + NvU32 flags, evictFlag, contigFlag, persistFlag, alignFlag, pinFlag, rangeFlag, blacklistOffFlag, partialFlag, skipScrubFlag; + NvU32 regId, regionIdx; + NvU64 numPagesAllocatedThisTime, numPagesLeftToAllocate, numPagesAllocatedSoFar; + NvU64 addrBase, addrLimit; + NvU64 rangeStart, rangeEnd; + NvU64 *curPages; + NvBool blacklistOffPerRegion[PMA_REGION_SIZE]={NV_FALSE}; + NvU64 blacklistOffAddrStart[PMA_REGION_SIZE] = {0}, blacklistOffRangeSize[PMA_REGION_SIZE] = {0}; + NvBool bScrubOnFree = NV_FALSE; + + void *pMap = NULL; + scanFunc useFunc; + PMA_PAGESTATUS pinOption; + NvU64 alignment = pageSize; + NvU32 framesPerPage = pageSize >> PMA_PAGE_SHIFT; + + // + // A boolean indicating if we should try to evict. We at most try eviction once per call + // to pmaAllocatePages. + // + NvBool tryEvict = NV_TRUE; + NvBool tryAlloc = NV_TRUE; + + const NvU64 numFramesToAllocateTotal = framesPerPage * allocationCount; + + if (pPma == NULL || pPages == NULL || allocationCount == 0 + || (pageSize != _PMA_64KB && pageSize != _PMA_128KB && pageSize != _PMA_2MB && pageSize != _PMA_512MB) + || allocationOptions == NULL) + { + if (pPma == NULL) + NV_PRINTF(LEVEL_ERROR, "NULL PMA object\n"); + if (pPages == NULL) + NV_PRINTF(LEVEL_ERROR, "NULL page list pointer\n"); + if (allocationCount == 0) + NV_PRINTF(LEVEL_ERROR, "count == 0\n"); + if (pageSize != _PMA_64KB && pageSize != _PMA_128KB && pageSize != _PMA_2MB && pageSize != _PMA_512MB) + NV_PRINTF(LEVEL_ERROR, "pageSize=0x%x (not 64K, 128K, 2M, or 512M)\n", pageSize); + if (allocationOptions == NULL) + NV_PRINTF(LEVEL_ERROR, "NULL allocationOptions\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + flags = allocationOptions->flags; + evictFlag = !(flags & PMA_ALLOCATE_DONT_EVICT); + contigFlag = !!(flags & PMA_ALLOCATE_CONTIGUOUS); + pinFlag = !!(flags & PMA_ALLOCATE_PINNED); + rangeFlag = !!(flags & PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE); + persistFlag = pPma->bForcePersistence || !!(flags & PMA_ALLOCATE_PERSISTENT); + alignFlag = !!(flags & PMA_ALLOCATE_FORCE_ALIGNMENT); + blacklistOffFlag = !!(flags & PMA_ALLOCATE_TURN_BLACKLIST_OFF); + partialFlag = !!(flags & PMA_ALLOCATE_ALLOW_PARTIAL); + skipScrubFlag = !!(flags & PMA_ALLOCATE_NO_ZERO); + + // Fork out new code path for NUMA sub-allocation from OS + if (pPma->bNuma) + { + return pmaNumaAllocate(pPma, allocationCount, pageSize, allocationOptions, pPages); + } + + // + // Scrub on free is enabled for this allocation request if the feature is enabled and the + // caller does not want to skip scrubber. + // Caller may want to skip scrubber when it knows the memory is zero'ed or when we are + // initializing RM structures needed by the scrubber itself. + // + bScrubOnFree = pPma->bScrubOnFree && (!skipScrubFlag); + + // + // PMA only knows the page is zero'ed if PMA scrubbed it. + // For example, if something else scrubbed the page, called PMA with ALLOCATE_NO_ZERO, + // the _RESULT_IS_ZERO flag is not set because PMA did not scrub that page. + // + allocationOptions->resultFlags = bScrubOnFree ? PMA_ALLOCATE_RESULT_IS_ZERO : 0; + + if (blacklistOffFlag && !contigFlag) + { + NV_PRINTF(LEVEL_ERROR, "Blacklist can only be turned off for contiguous allocations\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (bScrubOnFree && blacklistOffFlag) + { + NV_PRINTF(LEVEL_ERROR, "Blacklist cannot be turned off when scrub on free is enabled\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (rangeFlag && (!NV_IS_ALIGNED(allocationOptions->physBegin, pageSize) + || !NV_IS_ALIGNED((allocationOptions->physEnd + 1), pageSize))) + { + NV_PRINTF(LEVEL_WARNING, + "base [0x%llx] or limit [0x%llx] not aligned to page size 0x%x\n", + allocationOptions->physBegin, + allocationOptions->physEnd + 1, + pageSize); + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Minimum alignment is requested page size. Alignment granularity is 64K. + // Alignment must be power of two for PMA math + // + if (alignFlag) + { + if (!NV_IS_ALIGNED(allocationOptions->alignment, _PMA_64KB) || + !portUtilIsPowerOfTwo(allocationOptions->alignment)) + { + NV_PRINTF(LEVEL_WARNING, + "alignment [%llx] is not aligned to 64KB or is not power of two.", + alignment); + return NV_ERR_INVALID_ARGUMENT; + } + + alignment = NV_MAX(pageSize, allocationOptions->alignment); + if (!contigFlag && alignment > pageSize) + { + NV_PRINTF(LEVEL_WARNING, + "alignment [%llx] larger than the pageSize [%x] not supported for non-contiguous allocs\n", + alignment, pageSize); + return NV_ERR_INVALID_ARGUMENT; + } + } + + pinOption = pinFlag ? STATE_PIN : STATE_UNPIN; + pinOption |= persistFlag ? ATTRIB_PERSISTENT : 0; + + useFunc = contigFlag ? (pPma->pMapInfo->pmaMapScanContiguous) : + (pPma->pMapInfo->pmaMapScanDiscontiguous); + + // No locking required because the states don't change + status = pmaSelector(pPma, allocationOptions, regionList); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_FATAL, "Region selector failed\n"); + return status; + } + + if (bScrubOnFree) + { + portSyncMutexAcquire(pPma->pAllocLock); + portSyncRwLockAcquireRead(pPma->pScrubberValidLock); + if (pmaPortAtomicGet(&pPma->scrubberValid) != PMA_SCRUBBER_VALID) + { + NV_PRINTF(LEVEL_WARNING, "PMA object is not valid\n"); + portSyncRwLockReleaseRead(pPma->pScrubberValidLock); + portSyncMutexRelease(pPma->pAllocLock); + return NV_ERR_INVALID_STATE; + } + } + + tryEvict = (evictFlag == 1); + +pmaAllocatePages_retry: + // + // Retry implies that the PMA lock has been released and will be re-acquired + // after checking the scrubber so any pages allocated so far are not guaranteed + // to be there any more. Restart from scratch. + // + NV_PRINTF(LEVEL_INFO, "Attempt %s allocation of 0x%llx pages of size 0x%x " + "(0x%x frames per page)\n", + contigFlag ? "contiguous" : "discontiguous", + (NvU64)allocationCount, pageSize, framesPerPage); + + // Check if scrubbing is done before allocating each time before we retry + if (bScrubOnFree) + { + if ((status = _pmaCheckScrubbedPages(pPma, 0, NULL, 0)) != NV_OK) + goto scrub_fatal; + } + + // Attempting to allocate starts here + numPagesLeftToAllocate = allocationCount; + numPagesAllocatedSoFar = 0; + curPages = pPages; + + portSyncSpinlockAcquire(pPma->pPmaLock); + + NV_ASSERT(pmaStateCheck(pPma)); + + prediction = _pmaPredictOutOfMemory(pPma, allocationCount, pageSize, allocationOptions); + if (!tryEvict && (prediction == NV_ERR_NO_MEMORY)) + { + NV_PRINTF(LEVEL_INFO, "Returning OOM from prediction path.\n"); + status = NV_ERR_NO_MEMORY; + goto normal_exit; + } + + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + if (regionList[regionIdx] == -1) + { + status = NV_ERR_NO_MEMORY; + goto normal_exit; + } + NV_ASSERT(regionList[regionIdx] < PMA_REGION_SIZE); + + regId = (NvU32)regionList[regionIdx]; + pMap = pPma->pRegions[regId]; + + addrBase = pPma->pRegDescriptors[regId]->base; + addrLimit = pPma->pRegDescriptors[regId]->limit; + + // + // If the start address of the range is less than the region's base + // address, start from the base itself. + // + rangeStart = rangeFlag ? ((allocationOptions->physBegin >= addrBase) ? + (allocationOptions->physBegin - addrBase) : 0) : 0; + rangeEnd = rangeFlag ? ((allocationOptions->physEnd >= addrBase) ? + (allocationOptions->physEnd - addrBase) : 0) : 0; + + if (rangeStart > rangeEnd) + { + status = NV_ERR_INVALID_ARGUMENT; + goto normal_exit; + } + + // + // Before continuing with allocation, lets check if we need to turn-off + // blacklisting. During retry, we don't have to free the blacklisted pages again + // + if (blacklistOffFlag && !blacklistOffPerRegion[regId]) + { + if (allocationOptions->physBegin > addrLimit) + { + blacklistOffAddrStart[regId] = 0; + blacklistOffRangeSize[regId] = 0; + } + else + { + // if the range falls within the region then free blacklisted pages in the entire region + blacklistOffAddrStart[regId] = addrBase; + blacklistOffRangeSize[regId] = (addrLimit - addrBase + 1); + _pmaFreeBlacklistPages(pPma, regId, blacklistOffAddrStart[regId], blacklistOffRangeSize[regId]); + blacklistOffPerRegion[regId] = NV_TRUE; + } + } + + NV_ASSERT(numPagesLeftToAllocate + numPagesAllocatedSoFar == allocationCount); + NV_ASSERT(numPagesLeftToAllocate > 0); + + numPagesAllocatedThisTime = 0; + status = (*useFunc)(pMap, addrBase, rangeStart, rangeEnd, numPagesLeftToAllocate, + curPages, pageSize, alignment, &numPagesAllocatedThisTime, !tryEvict); + + NV_ASSERT(numPagesAllocatedThisTime <= numPagesLeftToAllocate); + + if (contigFlag) + { + // Contiguous allocations are all or nothing + NV_ASSERT(numPagesAllocatedThisTime == 0 || + numPagesAllocatedThisTime == numPagesLeftToAllocate); + } + + // + // Adjust the counts and the pointer within the array of pages for the + // discontiguous case where only some pages might have been successfully + // allocated. + // + numPagesAllocatedSoFar += numPagesAllocatedThisTime; + curPages += numPagesAllocatedThisTime; + numPagesLeftToAllocate -= numPagesAllocatedThisTime; + + // + // PMA must currently catch addrtree shortcomings and fail the request + // Just follow the no memory path for now to properly release locks + // + if (status == NV_ERR_INVALID_ARGUMENT) + { + status = NV_ERR_NO_MEMORY; + } + + if (status == NV_ERR_IN_USE && !tryEvict) + { + // + // If memory is evictable, but eviction is not allowed by the + // caller, just return the no memory error. + // + NV_PRINTF(LEVEL_WARNING, "Memory evictable, but eviction not allowed, returning\n"); + status = NV_ERR_NO_MEMORY; + } + + if (status == NV_OK) + { + NV_ASSERT(numPagesLeftToAllocate == 0); + NV_ASSERT(numPagesAllocatedSoFar == allocationCount); + break; + } + else if (status == NV_ERR_NO_MEMORY) + { + // + // Print an "out of memory" mssg only after we've scanned through + // all the regions. Printing an OOM message on per region basis may + // confuse someone debugging that we've actually run out of memory. + // + if ((regionIdx < (pPma->regSize - 1)) && (regionList[regionIdx + 1] == -1)) + { + NV_PRINTF(LEVEL_ERROR, "Status no_memory\n"); + } + if (contigFlag) + { + // Contiguous allocations are all or nothing. + NV_ASSERT(numPagesAllocatedThisTime == 0); + } + } + else if (tryEvict) + { + NV_PRINTF(LEVEL_INFO, "Status evictable, region before eviction:\n"); + pmaRegionPrint(pPma, pPma->pRegDescriptors[regId], pMap); + + NV_ASSERT(numPagesLeftToAllocate > 0); + + if (contigFlag) + { + NV_ASSERT(numPagesLeftToAllocate == allocationCount); + NV_ASSERT(numPagesAllocatedThisTime == 0); + NV_ASSERT(numPagesAllocatedSoFar == 0); + + NvU64 evictStart = *curPages; + NvU64 evictEnd = *curPages + (numFramesToAllocateTotal << PMA_PAGE_SHIFT) - 1; + + NV_PRINTF(LEVEL_INFO, "Attempt %s eviction of 0x%llx pages of size 0x%x, " + "(0x%x frames per page) in the frame range 0x%llx..0x%llx\n", + contigFlag ? "contiguous" : "discontiguous", + numPagesLeftToAllocate, + pageSize, + framesPerPage, + (evictStart - addrBase) >> PMA_PAGE_SHIFT, + (evictEnd - addrBase) >> PMA_PAGE_SHIFT); + + status = _pmaEvictContiguous(pPma, pMap, evictStart, evictEnd); + } + else + { + // Default to allowing the whole region to be evicted + NvU64 evictPhysBegin = addrBase; + NvU64 evictPhysEnd = addrLimit; + + if (rangeFlag) + { + // + // And if a specific physical range was requested, intersect + // it with the region. + // + evictPhysBegin = NV_MAX(allocationOptions->physBegin, evictPhysBegin); + evictPhysEnd = NV_MIN(allocationOptions->physEnd, evictPhysEnd); + + // Regions that would cause the intersection to be empty are skipped. + NV_ASSERT(evictPhysBegin <= evictPhysEnd); + } + + NV_PRINTF(LEVEL_INFO, "Attempt %s eviction of 0x%llx pages of size 0x%x, " + "(0x%x frames per page), in the frame range 0x%llx..0x%llx\n", + contigFlag ? "contiguous" : "discontiguous", + numPagesLeftToAllocate, + pageSize, + framesPerPage, + (evictPhysBegin - addrBase) >> PMA_PAGE_SHIFT, + (evictPhysEnd - addrBase) >> PMA_PAGE_SHIFT); + + status = _pmaEvictPages(pPma, pMap, curPages, numPagesLeftToAllocate, + pPages, numPagesAllocatedSoFar, pageSize, evictPhysBegin, evictPhysEnd); + } + + if (status == NV_OK) + { + numPagesAllocatedSoFar = allocationCount; + } + else + { + NV_PRINTF(LEVEL_INFO, "Eviction/scrubbing failed, region after:\n"); + pmaRegionPrint(pPma, pPma->pRegDescriptors[regId], pMap); + } + + if (status == NV_ERR_INSUFFICIENT_RESOURCES) + { + NV_PRINTF(LEVEL_ERROR, "ERROR: scrubber OOM\n"); + + // Scrubber is out of memory + goto scrub_fatal; + } + } + } + + // + // if scrubbing is active in the background, release lock and spin until it + // completes, then re-try. + // + if ((status == NV_ERR_NO_MEMORY) && + (pmaPortAtomicGet(&pPma->initScrubbing) == PMA_SCRUB_IN_PROGRESS)) + { + // Release the spinlock before attempting a semaphore acquire. + portSyncSpinlockRelease(pPma->pPmaLock); + + // Wait until scrubbing is complete. + while (pmaPortAtomicGet(&pPma->initScrubbing) != PMA_SCRUB_DONE) + { + // Deschedule without PMA lock + pmaOsSchedule(); + } + NV_PRINTF(LEVEL_INFO, "Retrying after eviction/scrub\n"); + goto pmaAllocatePages_retry; + } + + if ((status == NV_ERR_NO_MEMORY) && partialFlag && (numPagesAllocatedSoFar > 0)) + { + // + // If scrub on free is enabled, continue to scrubWaitForAll if we haven't already, + // otherwise succeed the partial allocation. + // If scrub on free is not enabled, there is no waiting to try, so succeed the + // partial allocation immediately. + // + if (!bScrubOnFree || !tryAlloc) + { + NV_PRINTF(LEVEL_INFO, "Succeed partial allocation\n"); + status = NV_OK; + } + } + + if (status == NV_ERR_NO_MEMORY && bScrubOnFree) + { + PSCRUB_NODE pPmaScrubList = NULL; + NvU64 count; + portSyncSpinlockRelease(pPma->pPmaLock); + + NV_PRINTF(LEVEL_INFO, "Waiting for scrubber\n"); + + status = scrubCheckAndWaitForSize(pPma->pScrubObj, numPagesLeftToAllocate, + pageSize, &pPmaScrubList, &count); + + if (status == NV_OK) + { + if (count > 0) + { + _pmaClearScrubBit(pPma, pPmaScrubList, count); + } + + // + // Free the actual list, although allocated by objscrub + // there is no need for failure case handling to free the list, because the call + // returns error for 1)memory allocation failure or 2)nothing remaining to scrub. + // + portMemFree(pPmaScrubList); + } + + // + // Set tryEvict to NV_FALSE because we know UVM already failed eviction and any + // available memory that comes after we tried eviction will not be counted towards + // this allocation. + // + if (tryAlloc) + { + tryAlloc = NV_FALSE; + tryEvict = NV_FALSE; + NV_PRINTF(LEVEL_INFO, "Retrying after waiting for scrubber\n"); + goto pmaAllocatePages_retry; + } + + if (blacklistOffFlag) + { + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + if (blacklistOffPerRegion[regionIdx] == NV_FALSE) + continue; + _pmaReallocBlacklistPages(pPma, regionIdx, blacklistOffAddrStart[regionIdx], blacklistOffRangeSize[regionIdx]); + } + } + if (bScrubOnFree) + { + portSyncRwLockReleaseRead(pPma->pScrubberValidLock); + portSyncMutexRelease(pPma->pAllocLock); + } + NV_PRINTF(LEVEL_INFO, "Returning OOM after waiting for scrubber\n"); + return NV_ERR_NO_MEMORY; + } + + if (status == NV_OK) + { + NvU32 i; + + // + // Here we need to double check if the scrubber was valid because the contiguous eviction + // which called pmaFreePages could have had a fatal failure that resulted in some + // pages being unscrubbed. + // + if (bScrubOnFree && (pmaPortAtomicGet(&pPma->scrubberValid) != PMA_SCRUBBER_VALID)) + { + portSyncSpinlockRelease(pPma->pPmaLock); + NV_PRINTF(LEVEL_FATAL, "Failing allocation because the scrubber is not valid.\n"); + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto scrub_fatal; + } + + // Commit + allocationOptions->numPagesAllocated = (NvLength)numPagesAllocatedSoFar; + + if (contigFlag) + { + NvU64 frameBase; + const NvU64 numFramesAllocated = framesPerPage * numPagesAllocatedSoFar; + + regId = findRegionID(pPma, pPages[0]); + pMap = pPma->pRegions[regId]; + addrBase = pPma->pRegDescriptors[regId]->base; + frameBase = PMA_ADDR2FRAME(pPages[0], addrBase); + + NV_PRINTF(LEVEL_INFO, "Successfully allocated frames 0x%llx through 0x%llx\n", + frameBase, + frameBase + numFramesAllocated - 1); + + for (i = 0; i < numPagesAllocatedSoFar; i++) + { + pPma->pMapInfo->pmaMapChangePageStateAttrib(pMap, frameBase + (i * framesPerPage), + pageSize, pinOption, NV_TRUE); + } + + if (blacklistOffFlag && blacklistOffPerRegion[regId]) + { + NvU64 allocatedRegionEnd = PMA_FRAME2ADDR(frameBase + numFramesAllocated - 1, addrBase) + PMA_GRANULARITY - 1; + NvU64 blacklistOffAddrEnd = blacklistOffAddrStart[regId] + blacklistOffRangeSize[regId] - 1; + blacklistOffPerRegion[regId] = NV_FALSE; + _pmaReallocBlacklistPages(pPma, regId, blacklistOffAddrStart[regId], (pPages[0] - blacklistOffAddrStart[regId] + 1)); + if (allocatedRegionEnd < blacklistOffAddrEnd) + _pmaReallocBlacklistPages(pPma, regId, allocatedRegionEnd, (blacklistOffAddrEnd - allocatedRegionEnd)); + } + } + else + { + NvU64 frameRangeStart = 0; + NvU64 lastFrameRangeEnd = 0; + NvU64 frameBase = 0; + + (void)frameRangeStart; //Silence the compiler + (void)lastFrameRangeEnd; + + NV_PRINTF(LEVEL_INFO, "Successfully allocated frames:\n"); + + for (i = 0; i < numPagesAllocatedSoFar; i++) + { + regId = findRegionID(pPma, pPages[i]); + pMap = pPma->pRegions[regId]; + addrBase = pPma->pRegDescriptors[regId]->base; + frameBase = PMA_ADDR2FRAME(pPages[i], addrBase); + + // Print out contiguous frames in the same NV_PRINTF + if (i == 0) + { + frameRangeStart = frameBase; + } + else if ((lastFrameRangeEnd + 1) != frameBase) + { + // Break in frame range detected + NV_PRINTF(LEVEL_INFO, "0x%llx through 0x%llx \n", + frameRangeStart, + lastFrameRangeEnd); + + frameRangeStart = frameBase; + } + lastFrameRangeEnd = frameBase + framesPerPage - 1; + + pPma->pMapInfo->pmaMapChangePageStateAttrib(pMap, PMA_ADDR2FRAME(pPages[i], addrBase), + pageSize, pinOption, NV_TRUE); + + } + NV_PRINTF(LEVEL_INFO, "0x%llx through 0x%llx \n", + frameRangeStart, + frameBase + framesPerPage - 1); + } + } + +normal_exit: + if (blacklistOffFlag) + { + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + if (blacklistOffPerRegion[regionIdx] == NV_FALSE) + continue; + _pmaReallocBlacklistPages(pPma, regionIdx, blacklistOffAddrStart[regionIdx], blacklistOffRangeSize[regionIdx]); + } + } + + portSyncSpinlockRelease(pPma->pPmaLock); + if (bScrubOnFree) + { + portSyncRwLockReleaseRead(pPma->pScrubberValidLock); + portSyncMutexRelease(pPma->pAllocLock); + } + return status; + +scrub_fatal: + if (blacklistOffFlag) + { + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + if (blacklistOffPerRegion[regionIdx] == NV_FALSE) + continue; + _pmaReallocBlacklistPages(pPma, regionIdx, blacklistOffAddrStart[regionIdx], blacklistOffRangeSize[regionIdx]); + } + } + // Note we do not have the PMA lock. + portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_INVALID); + portSyncRwLockReleaseRead(pPma->pScrubberValidLock); + portSyncMutexRelease(pPma->pAllocLock); + return status; + +} + +NV_STATUS +pmaAllocatePagesBroadcast +( + PMA **pPma, + NvU32 pmaCount, + NvLength allocationCount, + NvU32 pageSize, + PMA_ALLOCATION_OPTIONS *allocationOptions, + NvU64 *pPages +) +{ + + if (pPma == NULL || pmaCount == 0 || allocationCount == 0 + || (pageSize != _PMA_64KB && pageSize != _PMA_128KB && pageSize != _PMA_2MB && pageSize != _PMA_512MB) + || pPages == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_ERR_GENERIC; +} + +NV_STATUS +pmaPinPages +( + PMA *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize +) +{ + NV_STATUS status = NV_OK; + NvU32 framesPerPage, regId, i, j; + NvU64 frameNum, addrBase; + PMA_PAGESTATUS state; + framesPerPage = pageSize >> PMA_PAGE_SHIFT; + + if (pPma == NULL || pageCount == 0 || pPages == NULL + || (pageSize != _PMA_64KB && pageSize != _PMA_128KB && pageSize != _PMA_2MB && pageSize != _PMA_512MB)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncSpinlockAcquire(pPma->pPmaLock); + + for(i = 0; i < pageCount; i++) + { + regId = findRegionID(pPma, pPages[i]); + addrBase = pPma->pRegDescriptors[regId]->base; + frameNum = PMA_ADDR2FRAME(pPages[i], addrBase); + + for (j = 0; j < framesPerPage; j++) + { + state = pPma->pMapInfo->pmaMapRead(pPma->pRegions[regId], (frameNum + j), NV_TRUE); + + // + // Check for incorrect usage of the API where the caller requests to + // pin pages that are not allocated unpinned. + // + if ((state & STATE_MASK) != STATE_UNPIN) + status = NV_ERR_INVALID_STATE; + + // + // Check for pages being evicted. Notably this is expected if the + // call races with eviction. + // + if (state & ATTRIB_EVICTING) + status = NV_ERR_IN_USE; + + if (status != NV_OK) + { + // + // Don't print the error for the eviction case as that's + // expected to happen. + // + if (status != NV_ERR_IN_USE) + { + NV_PRINTF(LEVEL_ERROR, + "Pin failed at page %d frame %d in region %d state %d\n", + i, j, regId, state); + } + _pmaRollback(pPma, pPages, i, j, pageSize, STATE_UNPIN); + goto done; + } + else + { + pPma->pMapInfo->pmaMapChangeState(pPma->pRegions[regId], (frameNum + j), STATE_PIN); + } + } + } + +done: + portSyncSpinlockRelease(pPma->pPmaLock); + + return status; +} + + +NV_STATUS +pmaUnpinPages +( + PMA *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize +) +{ + NvU32 framesPerPage, regId, i, j; + NvU64 frameNum, addrBase; + PMA_PAGESTATUS state; + framesPerPage = pageSize >> PMA_PAGE_SHIFT; + + if (pPma == NULL || pageCount == 0 || pPages == NULL + || (pageSize != _PMA_64KB && pageSize != _PMA_128KB && pageSize != _PMA_2MB && pageSize != _PMA_512MB)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncSpinlockAcquire(pPma->pPmaLock); + + for(i = 0; i < pageCount; i++) + { + regId = findRegionID(pPma, pPages[i]); + addrBase = pPma->pRegDescriptors[regId]->base; + frameNum = PMA_ADDR2FRAME(pPages[i], addrBase); + + for (j = 0; j < framesPerPage; j++) + { + state = pPma->pMapInfo->pmaMapRead(pPma->pRegions[regId], (frameNum + j), NV_FALSE); + if (state != STATE_PIN) + { + NV_PRINTF(LEVEL_ERROR, "Unpin failed at %dth page %dth frame\n", + i, j); + _pmaRollback(pPma, pPages, i, j, pageSize, STATE_PIN); + return NV_ERR_INVALID_STATE; + + } + else + { + pPma->pMapInfo->pmaMapChangeState(pPma->pRegions[regId], (frameNum + j), STATE_UNPIN); + } + } + } + + portSyncSpinlockRelease(pPma->pPmaLock); + + return NV_OK; +} + + +void +pmaFreePages +( + PMA *pPma, + NvU64 *pPages, + NvU64 pageCount, + NvU64 size, + NvU32 flag +) +{ + // TODO Support free of multiple regions in one call?? + NvU64 i, j, frameNum, framesPerPage, addrBase; + NvU32 regId; + NvBool bScrubValid = NV_TRUE; + NvBool bNeedScrub = pPma->bScrubOnFree && !(flag & PMA_FREE_SKIP_SCRUB); + + NV_ASSERT(pPma != NULL); + NV_ASSERT(pageCount != 0); + NV_ASSERT(pPages != NULL); + + if (pageCount != 1) + { + NV_ASSERT((size == _PMA_64KB) || + (size == _PMA_128KB) || + (size == _PMA_2MB)); + } + + // Fork out new code path for NUMA sub-allocation from OS + if (pPma->bNuma) + { + portSyncSpinlockAcquire(pPma->pPmaLock); + pmaNumaFreeInternal(pPma, pPages, pageCount, size, flag); + portSyncSpinlockRelease(pPma->pPmaLock); + + return; + } + + framesPerPage = size >> PMA_PAGE_SHIFT; + + // Check if any scrubbing is done before we actually free + if (bNeedScrub) + { + portSyncRwLockAcquireRead(pPma->pScrubberValidLock); + if (pmaPortAtomicGet(&pPma->scrubberValid) == PMA_SCRUBBER_VALID) + { + if (_pmaCheckScrubbedPages(pPma, 0, NULL, 0) != NV_OK) + { + portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_INVALID); + portSyncRwLockReleaseRead(pPma->pScrubberValidLock); + bScrubValid = NV_FALSE; + NV_PRINTF(LEVEL_WARNING, "Scrubber object is not valid\n"); + } + } + else + { + // We allow free with invalid scrubber object + portSyncRwLockReleaseRead(pPma->pScrubberValidLock); + bScrubValid = NV_FALSE; + NV_PRINTF(LEVEL_WARNING, "Scrubber object is not valid\n"); + } + } + // Only hold Reader lock here if (bScrubValid && bNeedScrub) + + portSyncSpinlockAcquire(pPma->pPmaLock); + + for (i = 0; i < pageCount; i++) + { + regId = findRegionID(pPma, pPages[i]); + addrBase = pPma->pRegDescriptors[regId]->base; + frameNum = PMA_ADDR2FRAME(pPages[i], addrBase); + + _pmaReallocBlacklistPages(pPma, regId, pPages[i], pageCount * size); + + for (j = 0; j < framesPerPage; j++) + { + PMA_PAGESTATUS newStatus = (bScrubValid && bNeedScrub) ? ATTRIB_SCRUBBING : STATE_FREE; + // + // Reset everything except for the (ATTRIB_EVICTING and ATTRIB_BLACKLIST) state to support memory being freed + // after being picked for eviction. + // + pPma->pMapInfo->pmaMapChangeStateAttribEx(pPma->pRegions[regId], (frameNum + j), newStatus, ~(ATTRIB_EVICTING | ATTRIB_BLACKLIST)); + } + } + + portSyncSpinlockRelease(pPma->pPmaLock); + + // Maybe we need to scrub the page on free + if (bScrubValid && bNeedScrub) + { + PSCRUB_NODE pPmaScrubList = NULL; + NvU64 count; + if (scrubSubmitPages(pPma->pScrubObj, size, pPages, pageCount, + &pPmaScrubList, &count) != NV_OK) + { + portAtomicSetSize(&pPma->scrubberValid, PMA_SCRUBBER_INVALID); + goto exit; + } + + if (count > 0) + { + _pmaClearScrubBit(pPma, pPmaScrubList, count); + } +exit: + // Free the actual list, although allocated by objscrub + portMemFree(pPmaScrubList); + + portSyncRwLockReleaseRead(pPma->pScrubberValidLock); + } +} + + +void +pmaClearScrubRange +( + PMA *pPma, + NvU64 rangeBase, + NvU64 rangeLimit +) +{ + NvU32 regionIdx; + NvU64 physBase, physLimit; + + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + physBase = pPma->pRegDescriptors[regionIdx]->base; + physLimit = pPma->pRegDescriptors[regionIdx]->limit; + + if ((physBase >= rangeBase) && (physLimit <= rangeLimit)) + { + pmaSetBlockStateAttrib(pPma, physBase, physLimit - physBase + 1, 0, ATTRIB_SCRUBBING); + } + } +} + + +NV_STATUS +pmaScrubComplete +( + PMA *pPma +) +{ + NvU32 regionIdx; + NvU64 physBase, physLimit; + + + if (pPma == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pmaPortAtomicGet(&pPma->initScrubbing) != PMA_SCRUB_IN_PROGRESS) + { + return NV_ERR_GENERIC; + } + + // Clear the scrubbing bit for all regions + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + physBase = pPma->pRegDescriptors[regionIdx]->base; + physLimit = pPma->pRegDescriptors[regionIdx]->limit; + + pmaSetBlockStateAttrib(pPma, physBase, physLimit - physBase + 1, 0, ATTRIB_SCRUBBING); + } + + NV_PRINTF(LEVEL_INFO, "Inside\n"); + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + pmaRegionPrint(pPma, pPma->pRegDescriptors[regionIdx], pPma->pRegions[regionIdx]); + } + + portAtomicSetSize(&pPma->initScrubbing, PMA_SCRUB_DONE); + + + return NV_OK; +} + + +NV_STATUS +pmaRegisterEvictionCb +( + PMA *pPma, + pmaEvictPagesCb_t pEvictPagesCb, + pmaEvictRangeCb_t pEvictRangeCb, + void *ctxPtr +) +{ + NV_STATUS status = NV_OK; + + if (pPma == NULL || pEvictPagesCb == NULL || pEvictRangeCb == NULL) + return NV_ERR_INVALID_ARGUMENT; + + // + // Lock the eviction callback mutex to guarantee that all the previously + // registered callbacks have been flushed before registering new ones. + // + portSyncMutexAcquire(pPma->pEvictionCallbacksLock); + + // + // Take the spin lock to make setting the callbacks atomic with allocations + // using the callbacks. + // + portSyncSpinlockAcquire(pPma->pPmaLock); + + // + // Both callbacks are always set together to a non-NULL value so just check + // one of them to make sure they are unset. + // + if (pPma->evictPagesCb == NULL) + { + pPma->evictPagesCb = pEvictPagesCb; + pPma->evictRangeCb = pEvictRangeCb; + pPma->evictCtxPtr = ctxPtr; + } + else + { + status = NV_ERR_INVALID_STATE; + } + + portSyncSpinlockRelease(pPma->pPmaLock); + + portSyncMutexRelease(pPma->pEvictionCallbacksLock); + + return status; +} + + +void +pmaUnregisterEvictionCb +( + PMA *pPma +) +{ + NvBool evictionPending; + + if (pPma == NULL) + return; + + // + // Lock the eviction callbacks mutex to prevent new callbacks from being + // registered while the old ones are being unregistered and flushed. + // + portSyncMutexAcquire(pPma->pEvictionCallbacksLock); + + // + // Take the spin lock to make removing the callbacks atomic with allocations + // using the callbacks. + // + portSyncSpinlockAcquire(pPma->pPmaLock); + + // TODO: Assert that no unpinned allocations are left. + + pPma->evictPagesCb = NULL; + pPma->evictRangeCb = NULL; + pPma->evictCtxPtr = NULL; + + evictionPending = pmaIsEvictionPending(pPma); + + portSyncSpinlockRelease(pPma->pPmaLock); + + // + // Even though no unpinned allocations should be present, there still could + // be pending eviction callbacks that picked some unpinned pages for + // eviction before they were freed. Wait for all of them to finish. + // + while (evictionPending) + { + // TODO: Consider adding a better wait mechanism. + pmaOsSchedule(); + + portSyncSpinlockAcquire(pPma->pPmaLock); + + evictionPending = pmaIsEvictionPending(pPma); + + portSyncSpinlockRelease(pPma->pPmaLock); + } + + portSyncMutexRelease(pPma->pEvictionCallbacksLock); +} + +void +pmaGetFreeMemory +( + PMA *pPma, + NvU64 *pBytesFree +) +{ + portSyncSpinlockAcquire(pPma->pPmaLock); + + *pBytesFree = pPma->pmaStats.numFreeFrames << PMA_PAGE_SHIFT; + + portSyncSpinlockRelease(pPma->pPmaLock); +} + +void +pmaGetTotalMemory +( + PMA *pPma, + NvU64 *pBytesTotal +) +{ + void *pMap; + NvU64 totalBytesInRegion; + NvU32 i; + + *pBytesTotal = 0; + + for (i = 0; i < pPma->regSize; i++) + { + pMap = pPma->pRegions[i]; + pPma->pMapInfo->pmaMapGetSize(pMap, &totalBytesInRegion); + + *pBytesTotal += totalBytesInRegion; + } +} + +NV_STATUS +pmaGetRegionInfo +( + PMA *pPma, + NvU32 *pRegSize, + PMA_REGION_DESCRIPTOR **ppRegionDesc +) +{ + if (pPma == NULL || pRegSize == NULL || ppRegionDesc == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *pRegSize = pPma->regSize; + *ppRegionDesc = pPma->pRegDescriptors[0]; + return NV_OK; +} + +void +pmaGetLargestFree +( + PMA *pPma, + NvU64 *pLargestFree, + NvU64 *pRegionBase, + NvU64 *pLargestOffset +) +{ + void *pMap; + NvU64 largestFreeInRegion; + NvU32 i; + + *pLargestFree = 0; + *pRegionBase = 0; + + // + // FIXME: This field is still not being used by any RM client. + // Set it to "bad" value for the present time. This should ideally + // contain the offset of the largest free chunk. + // + *pLargestOffset = ~0ULL; + + portSyncSpinlockAcquire(pPma->pPmaLock); + + for (i = 0; i < pPma->regSize; i++) + { + pMap = pPma->pRegions[i]; + pPma->pMapInfo->pmaMapGetLargestFree(pMap, &largestFreeInRegion); + + if (*pLargestFree < largestFreeInRegion) + { + *pLargestFree = largestFreeInRegion; + *pRegionBase = pPma->pRegDescriptors[i]->base; + } + } + + portSyncSpinlockRelease(pPma->pPmaLock); + + NV_PRINTF(LEVEL_INFO, "Largest Free Bytes = 0x%llx, base = 0x%llx, largestOffset = 0x%llx.\n", + *pLargestFree, *pRegionBase, *pLargestOffset); +} + +/*! + * @brief Returns a list of PMA allocated blocks which has ATTRIB_PERSISTENT + * attribute set. It will be used by FBSR module to save/restore + * clients PMA allocations during system suspend/resume. + * + * @param[in] pPma PMA pointer + * @param[in/out] ppPersistList Pointer to list of persistent segments + * + * @return + * NV_OK Success + * NV_ERR_NO_MEMORY Failure to allocate list + */ +NV_STATUS +pmaBuildPersistentList +( + PMA *pPma, + PRANGELISTTYPE *ppPersistList +) +{ + return pmaBuildList(pPma, ppPersistList, ATTRIB_PERSISTENT); +} + +/*! + * @brief Returns a list of all PMA allocated blocks. For all the PMA + * allocated blocks, either STATE_PIN or STATE_UNPIN attribute will + * be set. It will be used by FBSR module to save/restore clients + * PMA allocations for Unix GC-OFF based power management. + * + * @param[in] pPma PMA pointer + * @param[in/out] ppList Pointer to list of all the PMA allocated blocks. + * + * @return + * NV_OK Success + * NV_ERR_NO_MEMORY Failure to allocate list + */ +NV_STATUS +pmaBuildAllocatedBlocksList +( + PMA *pPma, + PRANGELISTTYPE *ppList +) +{ + return pmaBuildList(pPma, ppList, STATE_PIN | STATE_UNPIN); +} + +/*! + * @brief Frees previously generated list by function pmaBuildPersistentList(). + * + * @param[in] pPma PMA pointer + * @param[in/out] ppPersistList Pointer to list of persistent segments + * + * @return + * void + */ +void +pmaFreePersistentList +( + PMA *pPma, + PRANGELISTTYPE *ppPersistList +) +{ + pmaFreeList(pPma, ppPersistList); +} + +/*! + * @brief Frees previously generated list by function + * pmaBuildAllocatedBlocksList(). + * + * @param[in] pPma PMA pointer + * @param[in/out] ppList Pointer to list of all the PMA allocated blocks. + * + * @return + * void + */ +void +pmaFreeAllocatedBlocksList +( + PMA *pPma, + PRANGELISTTYPE *ppList +) +{ + pmaFreeList(pPma, ppList); +} + +/*! + * @brief Returns client managed blacklisted pages in the PMA region + * + * @param[in] pPma PMA pointer + * @param[in] pChunks pointer to blacklist addresses in the PMA region + * @param[in] pPageSize pointer to Size of each blacklist page addresses + * @param[in] pValidEntries pointer to valid client managed blacklist pages + * + * @return + * void + */ +void +pmaGetClientBlacklistedPages +( + PMA *pPma, + NvU64 *pChunks, + NvU32 *pPageSize, + NvU32 *pNumChunks +) +{ + NvU32 region = 0; + NvU32 validEntries = 0; + NvU32 chunk = 0; + + NvU32 blacklistCount = 0; + NvBool bClientManagedBlacklist = NV_FALSE; + PMA_BLACKLIST_CHUNK *pBlacklistChunks, *pBlacklistChunk; + + for (region = 0; region < pPma->regSize; region++) + { + pmaQueryBlacklistInfo(pPma, &blacklistCount, + &bClientManagedBlacklist, &pBlacklistChunks); + if (blacklistCount && bClientManagedBlacklist) + { + for (chunk = 0; chunk < blacklistCount; chunk++) + { + pBlacklistChunk = &pBlacklistChunks[chunk]; + if (!pBlacklistChunk->bIsValid) + { + pChunks[validEntries++] = pBlacklistChunk->physOffset; + } + } + } + } + + if (validEntries == 0) + pChunks = NULL; + + *pPageSize = _PMA_64KB; + *pNumChunks = validEntries; +} + +/*! + * @brief Returns the total blacklist size in bytes for + * both statically and dynamically blacklisted pages. + * pDynamicBlacklistSize and pStaticBlacklistSize are only copied-out if non-NULL. + * + * @param[in] pPma PMA pointer + * @param[in] pDynamicBlacklistSize pointer to dynamic blacklist size (bytes) + * @param[in] pStaticBlacklistSize pointer to static blacklist size (bytes) + * + * @return + * void + */ +void +pmaGetBlacklistSize +( + PMA *pPma, + NvU32 *pDynamicBlacklistSize, + NvU32 *pStaticBlacklistSize +) +{ + NvU32 dynamicBlacklistCount = 0; + NvU32 staticBlacklistCount = 0; + NvU32 blacklistCount = 0; + NvU32 region, size; + + PMA_BLACKLIST_CHUNK *pBlacklistChunks, *pChunk; + + for (region = 0; region < pPma->regSize; region++) + { + pmaQueryBlacklistInfo(pPma, &blacklistCount, + NULL, &pBlacklistChunks); + for (size = 0; size < blacklistCount; size++) + { + pChunk = &pBlacklistChunks[size]; + + if (pChunk->bIsDynamic) + dynamicBlacklistCount++; + else + staticBlacklistCount++; + } + } + + if (pDynamicBlacklistSize != NULL) + *pDynamicBlacklistSize = dynamicBlacklistCount << PMA_PAGE_SHIFT; + + if (pStaticBlacklistSize != NULL) + *pStaticBlacklistSize = staticBlacklistCount << PMA_PAGE_SHIFT; +} + +void +pmaClearScrubbedPages +( + PMA *pPma, + PSCRUB_NODE pPmaScrubList, + NvU64 count +) +{ + _pmaClearScrubBit(pPma, pPmaScrubList, count); +} + +void pmaPrintMapState +( + PMA *pPma +) +{ + NvU32 regionIdx; + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + pmaRegionPrint(pPma, pPma->pRegDescriptors[regionIdx], pPma->pRegions[regionIdx]); + } +} + +NV_STATUS +pmaAddToBlacklistTracking +( + PMA *pPma, + NvU64 physAddr +) +{ + PMA_BLACKLIST_ADDRESS blacklistPages = {0}; + NV_STATUS status = NV_OK; + if (pmaIsBlacklistingAddrUnique(pPma, physAddr)) + { + blacklistPages.physOffset = physAddr; + blacklistPages.bIsDynamic = NV_TRUE; + status = pmaRegisterBlacklistInfo(pPma, 0, &blacklistPages, 1); + } + return status; +} + +void +pmaGetTotalProtectedMemory +( + PMA *pPma, + NvU64 *pBytesTotal +) +{ + void *pMap; + NvU64 totalBytesInRegion; + NvU32 i; + + *pBytesTotal = 0; + + for (i = 0; i < pPma->regSize; i++) + { + if (pPma->pRegDescriptors[i]->bProtected) + { + pMap = pPma->pRegions[i]; + pPma->pMapInfo->pmaMapGetSize(pMap, &totalBytesInRegion); + *pBytesTotal += totalBytesInRegion; + } + } +} + +void +pmaGetTotalUnprotectedMemory +( + PMA *pPma, + NvU64 *pBytesTotal +) +{ + NvU64 totalBytesInProtectedRegion = 0; + NvU64 totalBytesOverall = 0; + + *pBytesTotal = 0; + + pmaGetTotalMemory(pPma, &totalBytesOverall); + pmaGetTotalProtectedMemory(pPma, &totalBytesInProtectedRegion); + + NV_ASSERT_OR_RETURN_VOID(totalBytesOverall >= totalBytesInProtectedRegion); + + *pBytesTotal = totalBytesOverall - totalBytesInProtectedRegion; +} + +void +pmaGetFreeProtectedMemory +( + PMA *pPma, + NvU64 *pBytesFree +) +{ + *pBytesFree = 0; +} + +void +pmaGetFreeUnprotectedMemory +( + PMA *pPma, + NvU64 *pBytesFree +) +{ + // When memory protection is not enabled all memory is unprotected + pmaGetFreeMemory(pPma, pBytesFree); +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.c b/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.c new file mode 100644 index 000000000..3159626a8 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.c @@ -0,0 +1,1256 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.h" +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h" +#include "gpu/mem_mgr/mem_scrub.h" +#include "utils/nvprintf.h" +#include "utils/nvassert.h" + +#if !defined(SRT_BUILD) +// These files are not found on SRT builds +#include "os/os.h" +#else +static NvU64 osGetPageRefcount(NvU64 sysPagePhysAddr) +{ + return 0; +} + +static NvU64 osCountTailPages(NvU64 sysPagePhysAddr) +{ + return 0; +} + +static void osAllocReleasePage(NvU64 sysPagePhysAddr) +{ + return; +} + +NV_STATUS scrubCheck(OBJMEMSCRUB *pScrubber, PSCRUB_NODE *ppList, NvU64 *size) +{ + return NV_ERR_GENERIC; +} + +NV_STATUS scrubSubmitPages(OBJMEMSCRUB *pScrubber, NvU64 chunkSize, NvU64* pages, + NvU64 pageCount, PSCRUB_NODE *ppList, NvU64 *size) +{ + return NV_ERR_GENERIC; +} + +NV_STATUS scrubWaitPages(OBJMEMSCRUB *pScrubber, NvU64 chunkSize, NvU64* pages, NvU32 pageCount) +{ + return NV_ERR_GENERIC; +} + +NV_STATUS scrubCheckAndWaitForSize (OBJMEMSCRUB *pScrubber, NvU64 numPages, + NvU64 pageSize, PSCRUB_NODE *ppList, NvU64 *pSize) +{ + return NV_ERR_GENERIC; +} +#endif + +// Local helpers +NvU32 +findRegionID(PMA *pPma, NvU64 address) +{ + NvU32 i; + + for (i = 0; i < pPma->regSize; i++) + { + NvU64 start, limit; + start = pPma->pRegDescriptors[i]->base; + limit = pPma->pRegDescriptors[i]->limit; + if (address >= start && address <= limit) + { + return i; + } + } + + // Should never get here + NV_ASSERT(0); + return 0; +} + + +void +pmaPrintBlockStatus(PMA_PAGESTATUS blockStatus) +{ + // Use DBG_PRINTF so as not to prepend "NVRM:" everytime, as NV_PRINTF does + if ((blockStatus & STATE_MASK) == STATE_FREE) { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "STATE_FREE "); + } + else if ((blockStatus & STATE_MASK) == STATE_UNPIN) { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "STATE_UNPIN "); + } + else if ((blockStatus & STATE_MASK) == STATE_PIN) { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "STATE_PIN "); + } + else { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "UNKNOWN STATE"); + } + + if (blockStatus & ATTRIB_PERSISTENT) { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " | ATTRIB_PERSISTENT"); + } + else { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " "); + } + + if (blockStatus & ATTRIB_SCRUBBING) { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " | ATTRIB_SCRUBBING "); + } + else { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " "); + } + + if (blockStatus & ATTRIB_EVICTING) { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " | ATTRIB_EVICTING "); + } + else { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " "); + } + + if (blockStatus & ATTRIB_BLACKLIST) { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " | ATTRIB_BLACKLIST "); + } + else { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " "); + } + + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\n"); +} + +void +pmaRegionPrint(PMA *pPma, PMA_REGION_DESCRIPTOR *pRegion, void *pMap) +{ + NvU32 i; + PMA_PAGESTATUS currStatus, blockStatus = STATE_FREE; + NvU64 addrBase, addrLimit, numFrames, blockStart = 0; + + NV_ASSERT(pRegion != NULL); + NV_ASSERT(pMap != NULL); + + (void)blockStart; //Silence the compiler + + addrBase = pRegion->base; + addrLimit = pRegion->limit; + numFrames = (addrLimit - addrBase + 1) >> PMA_PAGE_SHIFT; + + NV_PRINTF(LEVEL_INFO, "Region: 0x%llx..0x%llx\n", addrBase, addrLimit); + NV_PRINTF(LEVEL_INFO, "Total frames: 0x%llx\n", numFrames); + + for (i = 0; i < numFrames; i++) + { + currStatus = pPma->pMapInfo->pmaMapRead(pMap, i, NV_TRUE); + if (i == 0) + { + blockStatus = currStatus; + blockStart = i; + } + + if (blockStatus != currStatus) + { + NV_PRINTF(LEVEL_INFO, "%8llx..%8x: ", blockStart, i-1); + pmaPrintBlockStatus(blockStatus); + + blockStatus = currStatus; + blockStart = i; + } + } + NV_PRINTF(LEVEL_INFO, "%8llx..%8x: ", blockStart, i-1); + pmaPrintBlockStatus(blockStatus); +} + +NvBool +pmaStateCheck(PMA *pPma) +{ + NvU32 size, i; + PMA_REGION_DESCRIPTOR *pState; + void *pMap = NULL; + + if (pPma == NULL) return NV_FALSE; + + size = pPma->regSize; + if (size >= PMA_REGION_SIZE) return NV_FALSE; + + if (pPma->bNuma) + { + if (!pPma->nodeOnlined) + { + NV_PRINTF(LEVEL_INFO, "Warning: NUMA state not onlined.\n"); + return NV_TRUE; + } + else if (pPma->numaNodeId == PMA_NUMA_NO_NODE) + { + NV_PRINTF(LEVEL_INFO, "NUMA node ID invalid.\n"); + return NV_FALSE; + } + } + + for (i = 0; i < size; i++) + { + pMap = pPma->pRegions[i]; + pState = pPma->pRegDescriptors[i]; + + if (pMap == NULL || pState == NULL) return NV_FALSE; + } + + return NV_TRUE; +} + +void +pmaSetBlockStateAttribUnderPmaLock +( + PMA *pPma, + NvU64 base, + NvU64 size, + PMA_PAGESTATUS pmaState, + PMA_PAGESTATUS pmaStateWriteMask +) +{ + NvU64 numFrames, baseFrame, i; + NvS32 regId; + void *pMap; + + NV_ASSERT(pPma != NULL); + NV_ASSERT(NV_IS_ALIGNED(base, PMA_GRANULARITY)); + NV_ASSERT(NV_IS_ALIGNED(size, PMA_GRANULARITY)); + + regId = findRegionID(pPma, base); // assume same id for base+size TODO check this + pMap = pPma->pRegions[regId]; + + numFrames = size >> PMA_PAGE_SHIFT; + baseFrame = (base - pPma->pRegDescriptors[regId]->base) >> PMA_PAGE_SHIFT; + + for (i = 0; i < numFrames; i++) + { + pPma->pMapInfo->pmaMapChangeStateAttribEx(pMap, (baseFrame + i), pmaState, pmaStateWriteMask); + } +} + +void +pmaSetBlockStateAttrib +( + PMA *pPma, + NvU64 base, + NvU64 size, + PMA_PAGESTATUS pmaState, + PMA_PAGESTATUS pmaStateWriteMask +) +{ + NV_ASSERT(pPma != NULL); + + portSyncSpinlockAcquire(pPma->pPmaLock); + + pmaSetBlockStateAttribUnderPmaLock(pPma, base, size, pmaState, pmaStateWriteMask); + + portSyncSpinlockRelease(pPma->pPmaLock); +} + +// This must be called with the PMA lock held! +void +pmaStatsUpdateState +( + NvU64 *pNumFree, + NvU64 numPages, + PMA_PAGESTATUS oldState, + PMA_PAGESTATUS newState +) +{ + NV_ASSERT(pNumFree != NULL); + + oldState &= STATE_MASK; + newState &= STATE_MASK; + + if ((oldState == STATE_FREE) && (newState != STATE_FREE)) + { + (*pNumFree) -= numPages; + // NV_PRINTF(LEVEL_INFO, "Decrease to 0x%llx \n", *pNumFree); + } + else if ((oldState != STATE_FREE) && (newState == STATE_FREE)) + { + (*pNumFree) += numPages; + // NV_PRINTF(LEVEL_INFO, "Increase to 0x%llx \n", *pNumFree); + } +} + +NvBool pmaIsEvictionPending(PMA *pPma) +{ + NvU32 i; + void *pMap = NULL; + + for (i = 0; i < pPma->regSize; ++i) + { + pMap = pPma->pRegions[i]; + if (pPma->pMapInfo->pmaMapGetEvictingFrames(pMap) > 0) + return NV_TRUE; + } + + return NV_FALSE; +} + +void pmaOsSchedule(void) +{ + // TODO Move osSchedule() to nvport? +#if !defined(SRT_BUILD) + osSchedule(); +#endif +} + +/*! + * @brief Handle eviction results from UVM and free the reuse pages to + * OS if eviction failed half-way. + * If eviction was successful, we have to double check the refcount and + * decide if it's ok to reuse the pages for this eviction. + * See bug 2019754. + */ +static NV_STATUS +_pmaCleanupNumaReusePages +( + PMA *pPma, + NvU64 evictStart, + NvU64 numFrames, + NvBool bEvictionSucceeded +) +{ + NvU32 regId; + NvU64 sysPhysAddr = 0, sysPagePhysAddr = 0; + NvU64 frameNum, addrBase, i; + PMA_PAGESTATUS currentStatus; + NvBool bRaisedRefcount = NV_FALSE; + + regId = findRegionID(pPma, evictStart); + addrBase = pPma->pRegDescriptors[regId]->base; + frameNum = PMA_ADDR2FRAME(evictStart, addrBase); + sysPhysAddr = evictStart + pPma->coherentCpuFbBase; + + if (bEvictionSucceeded == NV_TRUE) + { + // + // If eviction from UVM succeeded, we double check the refcount and + // update whether we should reuse these pages or not. If refcount is + // greater than the appropriate number (1 for non-compound pages; for + // compound pages, refcount should be equal to the number of pages + // in this compound page), that means someone called get_user_pages + // on those pages and we need to fail this eviction. + // + for (i = 0; i < numFrames; i++) + { + sysPagePhysAddr = sysPhysAddr + (i << PMA_PAGE_SHIFT); + + if (osGetPageRefcount(sysPagePhysAddr) > osCountTailPages(sysPagePhysAddr)) + { + bRaisedRefcount = NV_TRUE; + break; + } + } + } + + if (!bEvictionSucceeded || bRaisedRefcount) + { + // + // Eviction Failed. Need to clean up. + // Since we set the NUMA_REUSE bit when we decide to reuse the pages, + // we know exactly which pages to free both to OS and in PMA bitmap. + // + for (i = 0; i < numFrames; i++) + { + currentStatus = pPma->pMapInfo->pmaMapRead(pPma->pRegions[regId], (frameNum + i), NV_TRUE); + sysPagePhysAddr = sysPhysAddr + (i << PMA_PAGE_SHIFT); + + if (currentStatus & ATTRIB_NUMA_REUSE) + { + osAllocReleasePage(sysPagePhysAddr); + pPma->pMapInfo->pmaMapChangeStateAttribEx(pPma->pRegions[regId], (frameNum + i), + STATE_FREE, (STATE_MASK | ATTRIB_NUMA_REUSE)); + } + } + + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + + +/*! + * @brief Eviction for contiguous allocation always evicts the full + * range to be allocated and the pmaMapScanContiguous() + * function sets the address to start eviction at as the first + * entry in the array of pages. + */ +NV_STATUS +_pmaEvictContiguous +( + PMA *pPma, + void *pMap, + NvU64 evictStart, + NvU64 evictEnd +) +{ + NV_STATUS status; + NvU64 numFramesToEvict; + NvU64 evictSize; + NvU64 frameEvictionsInProcess = pPma->pMapInfo->pmaMapGetEvictingFrames(pMap); + NvBool pmaNumaEnabled = pPma->bNuma; + + evictSize = evictEnd - evictStart + 1; + numFramesToEvict = evictSize >> PMA_PAGE_SHIFT; + frameEvictionsInProcess += numFramesToEvict; + pPma->pMapInfo->pmaMapSetEvictingFrames(pMap, frameEvictionsInProcess); + + pmaSetBlockStateAttribUnderPmaLock(pPma, evictStart, evictSize, ATTRIB_EVICTING, ATTRIB_EVICTING); + + // Release PMA lock before calling into UVM for eviction. + portSyncSpinlockRelease(pPma->pPmaLock); + + if (pPma->bScrubOnFree) + { + PSCRUB_NODE pPmaScrubList = NULL; + portSyncMutexRelease(pPma->pAllocLock); + + status = pPma->evictRangeCb(pPma->evictCtxPtr, evictStart, evictEnd); + + portSyncMutexAcquire(pPma->pAllocLock); + + NV_PRINTF(LEVEL_INFO, "evictRangeCb returned with status %llx\n", (NvU64)status); + + if (status != NV_OK) + { + goto evict_cleanup; + } + // For NUMA we will scrub only what's being evicted and returned to client. + if (pmaNumaEnabled) + { + // + // The evicting contiguous range is marked as ATTRIB_EVICTING + // and hence there will be no page stealing. + // + NvU64 count; + + if ((status = scrubSubmitPages(pPma->pScrubObj, (NvU32)evictSize, &evictStart, + 1, &pPmaScrubList, &count)) != NV_OK) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto scrub_exit; + } + + if (count > 0) + _pmaClearScrubBit(pPma, pPmaScrubList, count); + } + + if ((status = _pmaCheckScrubbedPages(pPma, evictSize, &evictStart, 1)) != NV_OK) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto scrub_exit; // just incase someone adds anything below. + } + +scrub_exit: + portMemFree(pPmaScrubList); + + if (!pmaNumaEnabled && + (status == NV_ERR_INSUFFICIENT_RESOURCES)) + { + NV_PRINTF(LEVEL_INFO, "ERROR: scrubber OOM!\n"); + goto exit; // fix this later, never exit early violating lock semantics + } + } + else + { + status = pPma->evictRangeCb(pPma->evictCtxPtr, evictStart, evictEnd); + NV_PRINTF(LEVEL_INFO, "evictRangeCb returned with status %llx\n", (NvU64)status); + } + +evict_cleanup: + // Reacquire PMA lock after returning from UVM and scrubber. + portSyncSpinlockAcquire(pPma->pPmaLock); + + // + // When we are in NUMA mode, we need to double check the NUMA_REUSE page attribute + // to possibly return these pages to OS. + // + if (pmaNumaEnabled) + { + status = _pmaCleanupNumaReusePages(pPma, evictStart, numFramesToEvict, (status == NV_OK)); + } + + pmaSetBlockStateAttribUnderPmaLock(pPma, evictStart, evictSize, 0, ATTRIB_EVICTING | ATTRIB_NUMA_REUSE); + + frameEvictionsInProcess = pPma->pMapInfo->pmaMapGetEvictingFrames(pMap); + NV_ASSERT(frameEvictionsInProcess >= numFramesToEvict); + pPma->pMapInfo->pmaMapSetEvictingFrames(pMap, (frameEvictionsInProcess - numFramesToEvict)); + +exit: + return status; +} + +/*! + * @brief Eviction for a non-contiguous range will allow the UVM driver to pick + * and evict the specific pages being evicted. The UVM driver is required to hand + * back pages to PMA in STATE_PIN state to prevent page stealing. + */ +NV_STATUS +_pmaEvictPages +( + PMA *pPma, + void *pMap, + NvU64 *evictPages, + NvU64 evictPageCount, + NvU64 *allocPages, + NvU64 allocPageCount, + NvU32 pageSize, + NvU64 physBegin, + NvU64 physEnd +) +{ + NvU64 i; + NV_STATUS status; + NvU64 numFramesToEvict = evictPageCount * (pageSize >> PMA_PAGE_SHIFT); + NvU64 frameEvictionsInProcess = pPma->pMapInfo->pmaMapGetEvictingFrames(pMap); + NvBool pmaNumaEnabled = pPma->bNuma; + + frameEvictionsInProcess += numFramesToEvict; + pPma->pMapInfo->pmaMapSetEvictingFrames(pMap, frameEvictionsInProcess); + + // + // Pin all the already allocated pages before unlocking the PMA + // lock to prevent them from being allocated while eviction is + // happening. + // + for (i = 0; i < allocPageCount; i++) + pmaSetBlockStateAttribUnderPmaLock(pPma, allocPages[i], pageSize, STATE_PIN, STATE_PIN); + + // Release PMA lock before calling into UVM for eviction. + portSyncSpinlockRelease(pPma->pPmaLock); + + if (pPma->bScrubOnFree) + { + PSCRUB_NODE pPmaScrubList = NULL; + NvU64 count = 0; + + portSyncMutexRelease(pPma->pAllocLock); + status = pPma->evictPagesCb(pPma->evictCtxPtr, pageSize, evictPages, + (NvU32)evictPageCount, physBegin, physEnd); + portSyncMutexAcquire(pPma->pAllocLock); + + NV_PRINTF(LEVEL_INFO, "evictPagesCb returned with status %llx\n", (NvU64)status); + + if (status != NV_OK) + { + goto evict_cleanup; + } + + // Don't need to mark ATTRIB_SCRUBBING to protect the pages because they are already pinned + status = scrubSubmitPages(pPma->pScrubObj, pageSize, evictPages, + (NvU32)evictPageCount, &pPmaScrubList, &count); + NV_ASSERT_OR_GOTO((status == NV_OK), scrub_exit); + + if (count > 0) + _pmaClearScrubBit(pPma, pPmaScrubList, count); + + // Wait for our scrubbing to complete + status = _pmaCheckScrubbedPages(pPma, pageSize, evictPages, (NvU32)evictPageCount); +scrub_exit: + // Free the actual list, although allocated by objscrub + portMemFree(pPmaScrubList); + + if ((status != NV_OK) && !pmaNumaEnabled) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; // Caller expects this status. + NV_PRINTF(LEVEL_ERROR, "ERROR: scrubber OOM!\n"); + NV_ASSERT_OK_OR_RETURN(status); + } + } + else + { + status = pPma->evictPagesCb(pPma->evictCtxPtr, pageSize, evictPages, + (NvU32)evictPageCount, physBegin, physEnd); + NV_PRINTF(LEVEL_INFO, "evictPagesCb returned with status %llx\n", (NvU64)status); + } + +evict_cleanup: + // Reacquire PMA lock after returning from UVM. + portSyncSpinlockAcquire(pPma->pPmaLock); + + // Unpin the allocations now that we reacquired the PMA lock. + for (i = 0; i < allocPageCount; i++) + pmaSetBlockStateAttribUnderPmaLock(pPma, allocPages[i], pageSize, 0, STATE_PIN); + + frameEvictionsInProcess = pPma->pMapInfo->pmaMapGetEvictingFrames(pMap); + NV_ASSERT(frameEvictionsInProcess >= numFramesToEvict); + pPma->pMapInfo->pmaMapSetEvictingFrames(pMap, (frameEvictionsInProcess - numFramesToEvict)); + + return status; +} + +// +// Region selector +// Given specific PMA_ALLOCATE_* requirements, generate a list of possible intersecting regions +// Invalid regionList IDs set to -1 +// +NV_STATUS +pmaSelector +( + PMA *pPma, + PMA_ALLOCATION_OPTIONS *allocationOptions, + NvS32 *regionList +) +{ + // regSize never decreases + registered states don't change, so lock-free + NvU32 i; + NvU32 flags = allocationOptions->flags; + NvU32 regionCount = 0; + NV_STATUS status = NV_OK; + + NV_ASSERT(regionList != NULL); + NV_ASSERT(allocationOptions != NULL); + + for (i=0; i < pPma->regSize; i++) + { + if (flags & PMA_ALLOCATE_SPECIFY_REGION_ID) + { + if (i != allocationOptions->regionId) + { + // Skip: wrong region ID + continue; + } + } + + if (!!(flags & PMA_ALLOCATE_PROTECTED_REGION) ^ + (pPma->pRegDescriptors[i]->bProtected)) + { + // Don't allow unprotected allocations in protected region + // OR protected allocations in unprotected region. + continue; + } + + if (flags & PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE) + { + NvU64 regionBegin, regionEnd; + NvU64 rangeBegin, rangeEnd; + PMA_REGION_DESCRIPTOR *regionDes; + + rangeBegin = allocationOptions->physBegin; + rangeEnd = allocationOptions->physEnd; + + regionDes = pPma->pRegDescriptors[i]; + regionBegin = regionDes->base; + regionEnd = regionDes->limit; + + if ((rangeEnd < regionBegin) || (rangeBegin > regionEnd)) + { + // Skip: Requested range doesn't intersect region + continue; + } + } + + if (flags & PMA_ALLOCATE_SPECIFY_MINIMUM_SPEED) + { + if (pPma->pRegDescriptors[i]->performance < allocationOptions->minimumSpeed) + { + // Skip: Region perf less than minimum threshold + continue; + } + } + + if (regionCount > 1) + { + NvU32 j = regionCount; + + if (flags & PMA_ALLOCATE_PREFER_SLOWEST) + { + // Find insertion point (slowest to fastest) + while ((j > 0) && + (pPma->pRegDescriptors[i]->performance < pPma->pRegDescriptors[regionList[j-1]]->performance)) + { + regionList[j] = regionList[j-1]; + j--; + } + } + else + { + // Find insertion point (fastest to slowest) + while ((j > 0) && + (pPma->pRegDescriptors[i]->performance > pPma->pRegDescriptors[regionList[j-1]]->performance)) + { + regionList[j] = regionList[j-1]; + j--; + } + } + + // Insert in sorted order + regionList[j] = i; + regionCount++; + } + else + { + regionList[regionCount++] = i; + } + } + + // Invalidate the unused slots + for (i = regionCount; i < pPma->regSize; i++) + { + regionList[i] = -1; + } + + if (regionCount == 0) + { + status = NV_ERR_NO_MEMORY; + } + + return status; +} + +/*! + * @brief This function will get a list of base+size and then goes in and + * clear the scrubbing bit on any pages in these ranges. It is only called + * when we know something is done scrubbing. + * + * @param[in] pPmaScrubList The list of ranges that need to be cleared + * @param[in] count Length of the list + * + * Note: + * - This call takes the PMA lock! Do not call this with PMA lock held. + */ +void +_pmaClearScrubBit +( + PMA *pPma, + PSCRUB_NODE pPmaScrubList, + NvU64 count +) +{ + NvU32 i; + NvU64 base; + NvU64 size; + + NV_ASSERT(count > 0); + portSyncSpinlockAcquire(pPma->pPmaLock); + + for (i = 0; i < count; i++) + { + base = pPmaScrubList[i].base; + size = pPmaScrubList[i].size; + NV_ASSERT(size > 0); + pmaSetBlockStateAttribUnderPmaLock(pPma, base, size, 0, ATTRIB_SCRUBBING); + } + portSyncSpinlockRelease(pPma->pPmaLock); +} + +/*! + * @brief This function will optionally wait for scrubbing to be finished for a + * list of pages, then check the scrubber status and clear the ATTRIB_SCRUBBING + * page attribute on any pages that have completed scrubbing + * + * @param[in] chunkSize The size of each page being waited on + * @param[in] pPages The list of pages being waited on + * @param[in] pageCount The number of pages we are waiting for + * If pageCount == 0, then we don't wait for any pages + * + * Locking: + * - This needs to be called without the PMA lock! + * - This call will take the PMA lock internally to modify page attributes. + */ +NV_STATUS +_pmaCheckScrubbedPages +( + PMA *pPma, + NvU64 chunkSize, + NvU64 *pPages, + NvU32 pageCount +) +{ + PSCRUB_NODE pPmaScrubList = NULL; + NvU64 count = 0; + NV_STATUS status = NV_OK; + + // If the caller wants to wait for something, we wait first before checking + if (pageCount != 0) + { + if ((status = scrubWaitPages(pPma->pScrubObj, chunkSize, pPages, pageCount)) != NV_OK) + return status; + } + + status = scrubCheck(pPma->pScrubObj, &pPmaScrubList, &count); + NV_ASSERT_OR_GOTO((status == NV_OK), exit); + + // This call takes the PMA lock! + if (count > 0) + _pmaClearScrubBit(pPma, pPmaScrubList, count); + +exit: + // Free the actual list, although allocated by objscrub + portMemFree(pPmaScrubList); + + return status; +} + + +NV_STATUS +_pmaPredictOutOfMemory +( + PMA *pPma, + NvLength allocationCount, + NvU32 pageSize, + PMA_ALLOCATION_OPTIONS *allocationOptions +) +{ + NvU32 alignFlag, partialFlag; + NvU64 alignment; + NvU64 free2mbPages = 0; + NvU64 bytesFree = 0; + + alignFlag = !!((allocationOptions->flags) & PMA_ALLOCATE_FORCE_ALIGNMENT); + partialFlag = !!((allocationOptions->flags) & PMA_ALLOCATE_ALLOW_PARTIAL); + alignment = allocationOptions->alignment; + + if ((alignFlag && (alignment == _PMA_2MB)) || pageSize == _PMA_2MB) + { + free2mbPages = pPma->pmaStats.numFree2mbPages; + + // If we have at least one page free, don't fail a partial allocation + if (partialFlag && (free2mbPages > 0)) + { + return NV_OK; + } + + if (free2mbPages < allocationCount) + { + return NV_ERR_NO_MEMORY; + } + } + + // Do a quick check and exit early if we are in OOM case + bytesFree = pPma->pmaStats.numFreeFrames << PMA_PAGE_SHIFT; + + // If we have at least one page free, don't fail a partial allocation + if (partialFlag && (bytesFree >= pageSize)) + { + return NV_OK; + } + + if (bytesFree < (pageSize * allocationCount)) + { + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + +/*! + * @brief Internal function to intermittently free the blacklisted pages in the + * range of allocation request. This will enable PMA to allow OS to manage those + * blacklisted pages after being allocated. + * + * @param[in] pPma PMA Object + * @param[in] regId PMA Region ID , where the allocation falls into + * @param[in] rangeBegin Start address for the allocation range + * @param[in] rangeSize Size of the allocation region + * + * Locking: + * - This needs to be called with the PMA lock! + */ + +void +_pmaFreeBlacklistPages +( + PMA *pPma, + NvU32 regId, + NvU64 rangeBegin, + NvU64 rangeSize +) +{ + NvU32 blacklistCount = 0; + NvU32 chunk; + NvU64 alignedBlacklistAddr; + NvBool bClientManagedBlacklist = NV_FALSE; + PMA_BLACKLIST_CHUNK *pBlacklistChunks, *pBlacklistChunk; + + pmaQueryBlacklistInfo(pPma, &blacklistCount, &bClientManagedBlacklist, &pBlacklistChunks); + + if(blacklistCount == 0) + return; // return early, nothing to do. + + for (chunk = 0; chunk < blacklistCount; chunk++) + { + pBlacklistChunk = &pBlacklistChunks[chunk]; + if (pBlacklistChunk->bIsValid && (pBlacklistChunk->physOffset >= rangeBegin && + pBlacklistChunk->physOffset <= (rangeBegin + rangeSize - 1))) + { + // + // Clear the blacklist attribute of the pages + // Since physOffset here is the absolute address, make sure we align it to 64k + // + alignedBlacklistAddr = NV_ALIGN_DOWN64(pBlacklistChunk->physOffset, PMA_GRANULARITY); + pmaSetBlockStateAttribUnderPmaLock(pPma, alignedBlacklistAddr, PMA_GRANULARITY, 0, ATTRIB_BLACKLIST); + pBlacklistChunk->bIsValid = NV_FALSE; + bClientManagedBlacklist = NV_TRUE; + } + } + + pmaSetClientManagedBlacklist(pPma, bClientManagedBlacklist); + + return; +} + +/*! + * @brief Internal function to reallocate blacklisted pages in the + * range of allocation request.This is called, when the allocation requesting + * blacklisting OFF fails or when the allocation with blacklisting OFF gets free-d. + * + * @param[in] pPma PMA Object + * @param[in] regId PMA Region ID , where the allocation falls into + * @param[in] rangeBegin Start address for the allocation range + * @param[in] rangeSize Size of the allocation region + * + * Locking: + * - This needs to be called with the PMA lock! + */ + +void _pmaReallocBlacklistPages +( + PMA *pPma, + NvU32 regId, + NvU64 rangeBegin, + NvU64 rangeSize +) +{ + NvU32 blacklistCount = 0; + NvU32 chunk; + NvU64 alignedBlacklistAddr; + NvU32 reallocatedBlacklistCount = 0; + + NvBool bClientManagedBlacklist = NV_FALSE; + PMA_BLACKLIST_CHUNK *pBlacklistChunks, *pBlacklistChunk; + pmaQueryBlacklistInfo(pPma, &blacklistCount, &bClientManagedBlacklist, &pBlacklistChunks); + + if (blacklistCount == 0 || !bClientManagedBlacklist) + { + return; + } + + // Assert if scrub on free is enabled for client managed blacklist + NV_ASSERT(pPma->bScrubOnFree == NV_FALSE); + + for (chunk = 0; chunk < blacklistCount; chunk++) + { + pBlacklistChunk = &pBlacklistChunks[chunk]; + if (!pBlacklistChunk->bIsValid && + (pBlacklistChunk->physOffset >= rangeBegin && + pBlacklistChunk->physOffset <= (rangeBegin + rangeSize -1))) + { + // Since physOffset here is the absolute address, make sure we align it to 64k + alignedBlacklistAddr = NV_ALIGN_DOWN64(pBlacklistChunk->physOffset, PMA_GRANULARITY); + pmaSetBlockStateAttribUnderPmaLock(pPma, alignedBlacklistAddr, PMA_GRANULARITY, ATTRIB_BLACKLIST, ATTRIB_BLACKLIST); + pBlacklistChunk->bIsValid = NV_TRUE; + } + reallocatedBlacklistCount = (pBlacklistChunk->bIsValid == NV_TRUE) ? reallocatedBlacklistCount+1: + reallocatedBlacklistCount; + } + + // Reset the flag if client handed over the blacklisted pages in their region to RM. + if (chunk == reallocatedBlacklistCount) + { + pmaSetClientManagedBlacklist(pPma, NV_FALSE); + } +} + +/*! + * @brief Internal function to lookup if the current frame is blacklisted already + * If so, we will return NV_TRUE, otherwise NV_FALSE. + * + * @param[in] pPma PMA Object + * @param[in] regId PMA Region ID , where the allocation falls into + * @param[in] frameNum Frame Number which needs to be checked. + * + * Locking: + * - This needs to be called with the PMA lock! + */ +NvBool +_pmaLookupBlacklistFrame +( + PMA *pPma, + NvU32 regId, + NvU64 frameNum +) +{ + NvU32 blacklistCount; + NvU64 addrBase; + NvU32 chunk; + NvU64 cliManagedBlackFrame = 0; + + NvBool bClientManagedBlacklist = NV_FALSE; + PMA_BLACKLIST_CHUNK *pBlacklistChunks, *pBlacklistChunk; + + pmaQueryBlacklistInfo(pPma, &blacklistCount, &bClientManagedBlacklist, &pBlacklistChunks); + + if (blacklistCount == 0 || !bClientManagedBlacklist) + return NV_FALSE; + + addrBase = pPma->pRegDescriptors[regId]->base; + for (chunk = 0; chunk < blacklistCount; chunk++) + { + pBlacklistChunk = &pBlacklistChunks[chunk]; + if (pBlacklistChunk->bIsValid) + { + // calculate the frame addr + cliManagedBlackFrame = PMA_ADDR2FRAME(pBlacklistChunk->physOffset, addrBase); + if (cliManagedBlackFrame == frameNum) + { + return NV_TRUE; + } + } + } + return NV_FALSE; +} + +/*! + * @brief Returns a list of PMA-managed blocks with the specified state and + * attributes. + * + * @param[in] pPma PMA pointer + * @param[in/out] ppList Pointer to list of segments having specified + * state and attributes + * @param[in] pageStatus PMA page state and attribute + * + * @return + * NV_OK Success + * NV_ERR_NO_MEMORY Failure to allocate list + */ +NV_STATUS +pmaBuildList +( + PMA *pPma, + PRANGELISTTYPE *ppList, + PMA_PAGESTATUS pageStatus +) +{ + NvU32 regionIdx, frameNum; + NvU64 addrBase, addrLimit, numFrames; + NvU64 blockStart = 0, blockEnd = 0; + NvBool bBlockValid; + PMA_PAGESTATUS pageState; + PRANGELISTTYPE pRangeCurr, pRangeList = NULL; + NV_STATUS status = NV_OK; + void *pMap = NULL; + + for (regionIdx = 0; regionIdx < pPma->regSize; regionIdx++) + { + pMap = pPma->pRegions[regionIdx]; + addrBase = pPma->pRegDescriptors[regionIdx]->base; + addrLimit = pPma->pRegDescriptors[regionIdx]->limit; + numFrames = (addrLimit - addrBase + 1) >> PMA_PAGE_SHIFT; + bBlockValid = NV_FALSE; + + for (frameNum = 0; frameNum < numFrames; frameNum++) + { + pageState = pPma->pMapInfo->pmaMapRead(pMap, frameNum, NV_TRUE); + if (pageState & pageStatus) + { + if (bBlockValid) + { + // Block start already found. Find the end + blockEnd = frameNum; + } + else + { + // Block start found. Now find the end + blockStart = frameNum; + blockEnd = frameNum; + bBlockValid = NV_TRUE; + } + } + else if (bBlockValid) + { + // Block found having required PMA page state. Store it in the list + pRangeCurr = (PRANGELISTTYPE) portMemAllocNonPaged(sizeof(RANGELISTTYPE)); + if (pRangeCurr) + { + pRangeCurr->base = addrBase + blockStart * PMA_GRANULARITY; + pRangeCurr->limit = addrBase + blockEnd * PMA_GRANULARITY + PMA_GRANULARITY - 1; + pRangeCurr->pNext = pRangeList; + pRangeList = pRangeCurr; + } + else + { + // Allocation failed + pmaFreeList(pPma, &pRangeList); + pRangeList = NULL; + status = NV_ERR_NO_MEMORY; + break; + } + + bBlockValid = NV_FALSE; + } + } + } + + *ppList = pRangeList; + + return status; +} + +/*! + * @brief Frees previously generated list of PMA-managed blocks with + * function pmaBuildList() + * + * @param[in] pPma PMA pointer + * @param[in/out] ppList Pointer to list of PMA segments + * + * @return + * None + */ +void +pmaFreeList +( + PMA *pPma, + PRANGELISTTYPE *ppList +) +{ + PRANGELISTTYPE pRangeCurr = *ppList; + PRANGELISTTYPE pRangeNext; + + while (pRangeCurr) + { + pRangeNext = pRangeCurr->pNext;; + portMemFree(pRangeCurr); + pRangeCurr = pRangeNext; + } + + *ppList = NULL; +} + +NV_STATUS +pmaRegisterBlacklistInfo +( + PMA *pPma, + NvU64 physAddrBase, + PMA_BLACKLIST_ADDRESS *pBlacklistPageBase, + NvU32 blacklistCount +) +{ + NvU32 i; + NvU64 alignedBlacklistAddr; + PMA_BLACKLIST_CHUNK *pBlacklistChunk = NULL; + NvU32 nextBlacklistEntry = 0; + NvU32 blacklistEntryIn = 0; + + if (blacklistCount == 0 || pBlacklistPageBase == NULL) + { + return NV_OK; + } + + if (pPma->pBlacklistChunks == NULL) + { + pPma->pBlacklistChunks = (PMA_BLACKLIST_CHUNK *) + portMemAllocNonPaged( PMA_MAX_BLACKLIST_ENTRIES * sizeof(PMA_BLACKLIST_CHUNK)); + if (pPma->pBlacklistChunks == NULL) + { + pPma->blacklistCount = 0; + NV_PRINTF(LEVEL_ERROR, "ERROR: Insufficient memory to allocate blacklisting tracking structure.\n"); + return NV_ERR_NO_MEMORY; + } + portMemSet(pPma->pBlacklistChunks, 0, PMA_MAX_BLACKLIST_ENTRIES * sizeof(PMA_BLACKLIST_CHUNK)); + } + + nextBlacklistEntry = pPma->blacklistCount; + + for (i = nextBlacklistEntry; i < (nextBlacklistEntry + blacklistCount); i++) + { + pBlacklistChunk = &pPma->pBlacklistChunks[i]; + pBlacklistChunk->physOffset = pBlacklistPageBase[blacklistEntryIn].physOffset; + pBlacklistChunk->bIsDynamic = pBlacklistPageBase[blacklistEntryIn].bIsDynamic; + + // Since physOffset here is the absolute address, make sure we align it to 64K + alignedBlacklistAddr = NV_ALIGN_DOWN64(pBlacklistPageBase[blacklistEntryIn].physOffset, PMA_GRANULARITY); + pmaSetBlockStateAttrib(pPma, alignedBlacklistAddr, PMA_GRANULARITY, ATTRIB_BLACKLIST, ATTRIB_BLACKLIST); + pBlacklistChunk->bIsValid = NV_TRUE; + + blacklistEntryIn++; + } + + pPma->blacklistCount += blacklistCount; + + return NV_OK; +} + +void +pmaSetClientManagedBlacklist +( + PMA *pPma, + NvBool bClientManagedBlacklist +) +{ + pPma->bClientManagedBlacklist = bClientManagedBlacklist; +} + +void +pmaQueryBlacklistInfo +( + PMA *pPma, + NvU32 *pBlacklistCount, + NvBool *pbClientManagedBlacklist, + PMA_BLACKLIST_CHUNK **ppBlacklistChunks +) +{ + if (pBlacklistCount) + { + *pBlacklistCount = pPma->blacklistCount; + } + + if (pbClientManagedBlacklist) + { + *pbClientManagedBlacklist = pPma->bClientManagedBlacklist; + } + + if (ppBlacklistChunks) + { + *ppBlacklistChunks = pPma->pBlacklistChunks; + } +} + +NvBool +pmaIsBlacklistingAddrUnique +( + PMA *pPma, + NvU64 physAddr +) +{ + NvU32 count = 0; + PMA_BLACKLIST_CHUNK *pBlacklistChunk = NULL; + if (pPma->blacklistCount == 0) + { + return NV_TRUE; + } + for (count = 0; count < pPma->blacklistCount; count++) + { + pBlacklistChunk = &pPma->pBlacklistChunks[count]; + if (pBlacklistChunk->physOffset == physAddr) + { + return NV_FALSE; + } + } + return NV_TRUE; +} + diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/regmap.c b/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/regmap.c new file mode 100644 index 000000000..d3807a32a --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/phys_mem_allocator/regmap.c @@ -0,0 +1,1111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file regmap.c + * @brief A bit map to keep track of FB frames + * + * @bug None + */ + +#include "gpu/mem_mgr/phys_mem_allocator/regmap.h" +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.h" +#include "utils/nvprintf.h" +#include "utils/nvassert.h" +#include "nvport/nvport.h" +#include "nvmisc.h" + +#define _UINT_SIZE 64 +#define _UINT_SHIFT 6 + +#define PAGE_BITIDX(n) ((n) & (_UINT_SIZE - 1)) +#define PAGE_MAPIDX(n) ((n) >> _UINT_SHIFT) +#define MAKE_BITMASK(n) ((NvU64)0x1 << (n)) + +#define SETBITS(bits, mask, newVal) ((bits & (~mask)) | (mask & newVal)) + +//////////////// DEBUG /////////////// + +void +pmaRegmapPrint(PMA_REGMAP *pMap) +{ + NvU32 i, j; + + NV_ASSERT(pMap != NULL); + NV_ASSERT(pMap->map != NULL); + + for (j = 0; j < PMA_BITS_PER_PAGE; j++) + { + NV_PRINTF(LEVEL_INFO, "*** %d-th MAP ***\n", j); + for (i = 0; i < pMap->mapLength; i+=4) + { + NV_PRINTF(LEVEL_INFO, "map[%d]: %llx ", i, (pMap->map)[j][i]); + NV_PRINTF(LEVEL_INFO, "map[%d]: %llx ", i+1, (i+1 > pMap->mapLength) ? 0 : (pMap->map)[j][i+1]); + NV_PRINTF(LEVEL_INFO, "map[%d]: %llx ", i+2, (i+2 > pMap->mapLength) ? 0 : (pMap->map)[j][i+2]); + NV_PRINTF(LEVEL_INFO, "map[%d]: %llx \n", i+3, (i+3 > pMap->mapLength) ? 0 : (pMap->map)[j][i+3]); + } + } +} + +// Returns the longest string of zeros. +static NvU32 +maxZerosGet(NvU64 bits) +{ + NvU32 y = 0; + + bits = ~bits; + while (bits != 0) + { + bits = bits & (bits << 1); + y++; + } + return y; +} + +static NvS64 +_checkOne(NvU64 *bits, NvU64 start, NvU64 end) +{ + NvS32 firstSetBit; + NvU64 startMapIdx, startBitIdx, endMapIdx, endBitIdx, mapIdx; + NvU64 startMask, endMask, handle; + + startMapIdx = PAGE_MAPIDX(start); + startBitIdx = PAGE_BITIDX(start); + endMapIdx = PAGE_MAPIDX(end); + endBitIdx = PAGE_BITIDX(end); + + if (startMapIdx < endMapIdx) + { + // + // Ensure the intermediate bitmaps are all good. We"ll handle end + // bitmaps later. + // + for (mapIdx = startMapIdx + 1; mapIdx <= (endMapIdx - 1); mapIdx++) + { + if (bits[mapIdx] != 0) + { + firstSetBit = portUtilCountTrailingZeros64(bits[mapIdx]); + return ((mapIdx << _UINT_SHIFT) + firstSetBit); + } + } + + // handle edge case + endMask = (NV_U64_MAX >> (_UINT_SIZE - endBitIdx - 1)); + + if ((bits[endMapIdx] & endMask) == 0) + { + startMask = (NV_U64_MAX << startBitIdx); + + if ((bits[startMapIdx] & startMask) == 0) + { + return -1; + } + else + { + handle = (bits[startMapIdx] & startMask); + firstSetBit = portUtilCountTrailingZeros64(handle); + NV_ASSERT((NvU64)firstSetBit >= startBitIdx); + return (start - startBitIdx + firstSetBit); + } + } + else + { + handle = bits[endMapIdx] & endMask; + firstSetBit = portUtilCountTrailingZeros64(handle); + return (end - endBitIdx + firstSetBit); + } + } + else + { + NV_ASSERT(startMapIdx == endMapIdx); + + startMask = (NV_U64_MAX << startBitIdx); + endMask = (NV_U64_MAX >> (_UINT_SIZE - endBitIdx - 1)); + + handle = (startMask & endMask); + if ((handle & bits[startMapIdx]) == 0) + { + return -1; + } + else + { + handle = handle & bits[startMapIdx]; + firstSetBit = portUtilCountTrailingZeros64(handle); + NV_ASSERT(firstSetBit != 64); + return (start - startBitIdx + firstSetBit); + } + } +} + +static NvU64 alignUpToMod(NvU64 frame, NvU64 alignment, NvU64 mod) +{ + if ((frame & (alignment - 1)) <= mod) + return NV_ALIGN_DOWN(frame, alignment) + mod; + else + return NV_ALIGN_UP(frame, alignment) + mod; +} + +// +// Determine if all frames in the 2MB range is not allocated +// They could be in scrubbing or eviction state. +// +static NvBool _pmaRegmapAllFree2mb(PMA_REGMAP *pRegmap, NvU64 frameNum) +{ + NvU64 baseFrame = (NV_ALIGN_DOWN((frameNum << PMA_PAGE_SHIFT), _PMA_2MB)) >> PMA_PAGE_SHIFT; + NvU32 numFrames = _PMA_2MB >> PMA_PAGE_SHIFT; + + // Always return false if the last 2MB range is incomplete + if ((baseFrame + numFrames) >= pRegmap->totalFrames) + { + return NV_FALSE; + } + + // + // We only care about STATE_PIN and STATE_UNPIN because: + // Even if the page is marked as SCRUBBING for example, we should not report OOM and prevent + // the clients from scanning the bitmap. + // + if (_checkOne(pRegmap->map[MAP_IDX_ALLOC_PIN], baseFrame, (baseFrame + numFrames - 1)) != -1) + { + return NV_FALSE; + } + + if (_checkOne(pRegmap->map[MAP_IDX_ALLOC_UNPIN], baseFrame, (baseFrame + numFrames - 1)) != -1) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +// +// Check whether the specified frame range is available completely for eviction +// +// Returns: +// - -1 if the whole range is evictable +// +// - Index of the last unevictable frame number +// +// +static NvS64 +_pmaRegmapScanNumaUnevictable +( + PMA_REGMAP *pRegmap, + NvU64 frameBegin, + NvU64 frameEnd +) +{ + NvS64 unevictableFrameIndex = -1; + NvU32 unevictableIndex = 0; + NvU64 *unpinBitmap = pRegmap->map[MAP_IDX_ALLOC_UNPIN]; + NvU64 *evictBitmap = pRegmap->map[MAP_IDX_EVICTING]; + NvU64 mapIter; + NvU64 startMapIdx = PAGE_MAPIDX(frameBegin); + NvU64 startBitIdx = PAGE_BITIDX(frameBegin); + NvU64 endMapIdx = PAGE_MAPIDX(frameEnd); + NvU64 endBitIdx = PAGE_BITIDX(frameEnd); + +#ifdef DEBUG_VERBOSE + NvU64 *pinBitmap = pRegmap->map[MAP_IDX_ALLOC_PIN]; +#endif + + for (mapIter = endMapIdx; mapIter >= startMapIdx; mapIter--) + { + NvU64 mask = NV_U64_MAX; + + if (mapIter == endMapIdx) + { + mask = (mask >> (_UINT_SIZE - endBitIdx - 1)); + } + + if (mapIter == startMapIdx) + { + mask = (mask & ~(MAKE_BITMASK(startBitIdx) - 1)); + } + +#ifdef DEBUG_VERBOSE + + NV_PRINTF(LEVEL_INFO, "mapIter %llx frame %llx mask %llx unpinbitmap %llx pinbitmap %llx evictbitmap %llx", + mapIter, (mapIter << _UINT_SHIFT), mask, unpinBitmap[mapIter], pinBitmap[mapIter], evictBitmap[mapIter]); +#endif + // start from the end + if ((unpinBitmap[mapIter] & mask) == mask) + continue; // go to previous bits + + + // MSB is 0 or all bits are 0 return the next frame + if ((unpinBitmap[mapIter] & mask) == 0) + { + if (mapIter == endMapIdx) + unevictableFrameIndex = frameEnd; + else + unevictableFrameIndex = (mapIter << _UINT_SHIFT) + (_UINT_SIZE - 1); + break; + } +#ifdef DEBUG_VERBOSE + NV_PRINTF(LEVEL_INFO, "Check leading zero of %llx", ~(unpinBitmap[mapIter] & mask)); +#endif + + unevictableIndex = _UINT_SIZE - portUtilCountLeadingZeros64((~unpinBitmap[mapIter]) & mask) - 1; + unevictableFrameIndex = (mapIter << _UINT_SHIFT) + unevictableIndex; + break; + } + + if (unevictableFrameIndex == -1) + { +#ifdef DEBUG_VERBOSE + NV_PRINTF(LEVEL_INFO, "Evictable range found between frameBegin %llx and frameEnd %llx", frameBegin, frameEnd); +#endif + + if (pRegmap->frameEvictionsInProcess && ((unevictableFrameIndex = _checkOne(evictBitmap, frameBegin, frameEnd)) != -1)) + { +#ifdef DEBUG_VERBOSE + NV_PRINTF(LEVEL_INFO, "The allocation range is already being evicted frameBegin %llx and frameEnd %llx", frameBegin, frameEnd); +#endif + return unevictableFrameIndex; + } + } + + return unevictableFrameIndex; +} + + +// +// Determine a contiguous evictable range of size actualSize +// +// Returns: +// - NV_ERR_NO_MEMORY if eviction is not possible for this size +// +// - NV_OK if there is a valid contiguous evictable range +// starting and ending at address stored at evictStart and evictEnd +// +// + +NV_STATUS +pmaRegMapScanContiguousNumaEviction +( + void *pMap, + NvU64 addrBase, + NvLength actualSize, + NvU64 pageSize, + NvU64 *evictStart, + NvU64 *evictEnd +) +{ + NV_STATUS status = NV_ERR_NO_MEMORY; + PMA_REGMAP *pRegmap = (PMA_REGMAP *)pMap; + + NvU64 alignedAddrBase; + NvU64 frameNum; + NvU64 endFrame, frameStart; + NvU64 alignment = pageSize; + NvU64 frameAlignment, frameAlignmentPadding; + NvU64 numFrames = actualSize >> PMA_PAGE_SHIFT; + + endFrame = pRegmap->totalFrames - 1; + + if (pRegmap->totalFrames < numFrames) + return status; + + //Copied from pmaRegmapScanContiguous + frameAlignment = alignment >> PMA_PAGE_SHIFT; + alignedAddrBase = NV_ALIGN_UP(addrBase, alignment); + frameAlignmentPadding = (alignedAddrBase - addrBase) >> PMA_PAGE_SHIFT; + frameStart = alignUpToMod(0, frameAlignment, frameAlignmentPadding); + + + for (frameNum = frameStart; frameNum <= endFrame; ) + { + PMA_PAGESTATUS startFrameAllocState; + PMA_PAGESTATUS endFrameAllocState; + NvS64 firstUnevictableFrame; + + startFrameAllocState = pmaRegmapRead(pRegmap, frameNum, NV_TRUE); + endFrameAllocState = pmaRegmapRead(pRegmap, frameNum + numFrames - 1, NV_TRUE); + + if ((endFrameAllocState & STATE_MASK) != STATE_UNPIN) + { + // end is not available jump from start to after numFrames + frameNum += numFrames; + frameNum = alignUpToMod(frameNum, frameAlignment, frameAlignmentPadding); + continue; + } + + if ((startFrameAllocState & STATE_MASK) != STATE_UNPIN) + { + // startFrame is unavailable, jump to next aligned frame + frameNum += frameAlignment; + continue; + } + + + // First occurrence of 0 in STATE_UNPIN from frameNum to frameNum + numFrames - 1 + firstUnevictableFrame = _pmaRegmapScanNumaUnevictable(pRegmap, frameNum, frameNum + numFrames - 1); + + if (firstUnevictableFrame == -1) + { + NV_PRINTF(LEVEL_INFO, + "Evictable frame = %lld evictstart = %llx evictEnd = %llx\n", + frameNum, addrBase + (frameNum << PMA_PAGE_SHIFT), + (addrBase + (frameNum << PMA_PAGE_SHIFT) + actualSize - 1)); + + *evictStart = addrBase + (frameNum << PMA_PAGE_SHIFT); + *evictEnd = *evictStart + actualSize - 1; + status = NV_OK; + break; + } + else + { + // get the next aligned frame after the unevictable frame. + frameNum = alignUpToMod(firstUnevictableFrame + 1, frameAlignment, frameAlignmentPadding); + } + } + + return status; +} +// +// Check whether the specified frame range is available for allocation or +// eviction. +// +// Returns: +// - NV_OK if the whole range is available and leaves frameIndex unset. +// +// - NV_ERR_IN_USE if some frames would need to be evicted, and sets frameIndex +// to the first one. +// +// - NV_ERR_NO_MEMORY if some frames are unavailable, and sets frameIndex to +// the first one. +// +// TODO: Would it be better to return the last frame index instead, given how the +// search skips over right past it? +// +static NV_STATUS +_pmaRegmapStatus(PMA_REGMAP *pRegmap, NvU64 start, NvU64 end, NvU64 *frameIndex) +{ + NvS64 diff; + + if ((diff = _checkOne(pRegmap->map[MAP_IDX_ALLOC_PIN], start, end)) != -1) + { + *frameIndex = diff; + return NV_ERR_NO_MEMORY; + } + + if (pRegmap->frameEvictionsInProcess > 0) + { + // + // Pages that are being evicted may be in the free state so we need to + // check for eviction on all frames as long as any eviction is happening + // in the region. + // + if ((diff = _checkOne(pRegmap->map[MAP_IDX_EVICTING], start, end)) != -1) + { + *frameIndex = diff; + return NV_ERR_NO_MEMORY; + } + } + + // + // Check SCRUBBING + // TODO: Skip this check if scrubbing has been completed for all frames. + // + if ((diff = _checkOne(pRegmap->map[MAP_IDX_SCRUBBING], start, end)) != -1) + { + *frameIndex = diff; + return NV_ERR_NO_MEMORY; + } + + if ((diff = _checkOne(pRegmap->map[MAP_IDX_NUMA_REUSE], start, end)) != -1) + { + *frameIndex = diff; + return NV_ERR_NO_MEMORY; + } + + if ((diff = _checkOne(pRegmap->map[MAP_IDX_ALLOC_UNPIN], start, end)) != -1) + { + *frameIndex = diff; + return NV_ERR_IN_USE; + } + + if ((diff = _checkOne(pRegmap->map[MAP_IDX_BLACKLIST], start, end)) != -1) + { + *frameIndex = diff; + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + +// +// Return ALL_FREE if all frames in the [start, end] range are available for +// allocation or the first frame index that isn't. +// +static NvS64 +_pmaRegmapAvailable(PMA_REGMAP *pRegmap, NvU64 start, NvU64 end) +{ + NvU64 unavailableFrameIndex; + NV_STATUS frameStatus = _pmaRegmapStatus(pRegmap, start, end, &unavailableFrameIndex); + + if (frameStatus == NV_OK) + return ALL_FREE; + + NV_ASSERT(unavailableFrameIndex >= start); + NV_ASSERT(unavailableFrameIndex <= end); + + return unavailableFrameIndex; +} + +// +// Return ALL_FREE if all frames in the [start, end] range are available for +// allocation, EVICTABLE if some of them would need to be evicted, or the first +// frame index that isn't free nor evictable. +// +static NvS64 +_pmaRegmapEvictable(PMA_REGMAP *pRegmap, NvU64 start, NvU64 end) +{ + NvU64 unavailableFrameIndex; + NvS64 frameStatus = _pmaRegmapStatus(pRegmap, start, end, &unavailableFrameIndex); + + if (frameStatus == NV_OK) + return ALL_FREE; + + NV_ASSERT(unavailableFrameIndex >= start); + NV_ASSERT(unavailableFrameIndex <= end); + + if (frameStatus == NV_ERR_IN_USE) + return EVICTABLE; + + return unavailableFrameIndex; +} + +void * +pmaRegmapInit +( + NvU64 numFrames, + NvU64 addrBase, + PMA_STATS *pPmaStats, + NvBool bProtected +) +{ + NvU32 i; + PMA_REGMAP *newMap; + NvU64 num2mbPages; + + newMap = (PMA_REGMAP*) portMemAllocNonPaged(sizeof(struct pma_regmap)); + if (newMap == NULL) + { + return NULL; + } + portMemSet(newMap, 0, sizeof(struct pma_regmap)); + + newMap->totalFrames = numFrames; + num2mbPages = numFrames / (_PMA_2MB >> PMA_PAGE_SHIFT); + + // + // Initialize all tracking staructure + // These data are added to the PMA structure; we do not keep per-region stats + // + pPmaStats->numFreeFrames += newMap->totalFrames; + pPmaStats->num2mbPages += num2mbPages; + pPmaStats->numFree2mbPages += num2mbPages; + + newMap->bProtected = bProtected; + newMap->pPmaStats = pPmaStats; + newMap->mapLength = PAGE_MAPIDX(numFrames-1) + 1; + + for (i = 0; i < PMA_BITS_PER_PAGE; i++) + { + // This cast is necessary because NVPORT requires NvLength -- NvU32 + // It is ok because max_reg = 2^32 * 2^6 * 64KB = 2^54B = 16 Petabytes + newMap->map[i] = (NvU64*) portMemAllocNonPaged((NvLength)(newMap->mapLength * sizeof(NvU64))); + if (newMap->map[i] == NULL) + { + pmaRegmapDestroy(newMap); + return NULL; + } + portMemSet(newMap->map[i], 0, (NvLength) (newMap->mapLength * sizeof(NvU64))); + } + + return (void *)newMap; +} + +void +pmaRegmapDestroy(void *pMap) +{ + NvU32 i; + NvU64 num2mbPages; + PMA_REGMAP *pRegmap = (PMA_REGMAP *)pMap; + + for (i = 0; i < PMA_BITS_PER_PAGE; i++) + { + portMemFree(pRegmap->map[i]); + } + + pRegmap->pPmaStats->numFreeFrames -= pRegmap->totalFrames; + + num2mbPages = pRegmap->totalFrames / (_PMA_2MB >> PMA_PAGE_SHIFT); + pRegmap->pPmaStats->numFree2mbPages -= num2mbPages; + + portMemFree(pRegmap); +} + +// +// Change a frame's state & attribute +// +// States: +// STATE_FREE, STATE_UNPIN, STATE_PIN +// Attributes: +// ATTRIB_PERSISTENT, ATTRIB_SCRUBBING, ATTRIB_EVICTING, ATTRIB_NUMA_REUSE +// Masks: +// STATE_MASK, ATTRIB_MASK +// + +void +pmaRegmapChangeStateAttribEx +( + void *pMap, + NvU64 frameNum, + PMA_PAGESTATUS newState, + PMA_PAGESTATUS newStateMask +) +{ + NvU64 mapIndex, mapOffset, bits, newVal, mask; + NvU32 i, bitWriteCount; + PMA_PAGESTATUS oldState, updatedState; + NvBool bUpdate2mbTracking = NV_FALSE; + PMA_REGMAP *pRegmap = (PMA_REGMAP *)pMap; + + mapIndex = PAGE_MAPIDX(frameNum); + mapOffset = PAGE_BITIDX(frameNum); + + NV_ASSERT(pRegmap != NULL); // possible error code return + NV_ASSERT(mapIndex < pRegmap->mapLength); + + bitWriteCount = PMA_STATE_BITS_PER_PAGE + PMA_ATTRIB_BITS_PER_PAGE; + + mask = (NvU64)MAKE_BITMASK(mapOffset); + + oldState = pmaRegmapRead(pRegmap, frameNum, NV_TRUE); + + // + // If we are going to allocate the 2MB page, we need bookkeeping + // before the bitmap is changed + // + if (((newState & STATE_MASK) != STATE_FREE) && _pmaRegmapAllFree2mb(pRegmap, frameNum)) + { + bUpdate2mbTracking = NV_TRUE; + } + + for (i = 0; i < bitWriteCount; i++) + { + if (NVBIT(i) & newStateMask) + { + newVal = ((NvU64) (newState & (1 << i)) >> i) << mapOffset; + bits = pRegmap->map[i][mapIndex]; + pRegmap->map[i][mapIndex] = (NvU64) SETBITS(bits, mask, newVal); + } + } + + // Update some stats for optimization + updatedState = pmaRegmapRead(pRegmap, frameNum, NV_TRUE); + + pmaStatsUpdateState(&pRegmap->pPmaStats->numFreeFrames, 1, + oldState, updatedState); + + // + // If we are freeing a frame, we should check if we need to update the 2MB + // page tracking + // + if (bUpdate2mbTracking || + (((oldState & STATE_MASK) != STATE_FREE) && _pmaRegmapAllFree2mb(pRegmap, frameNum))) + { + pmaStatsUpdateState(&pRegmap->pPmaStats->numFree2mbPages, 1, + oldState, updatedState); + + } +} + +void +pmaRegmapChangeStateAttrib +( + void *pMap, + NvU64 frameNum, + PMA_PAGESTATUS newState, + NvBool writeAttrib +) +{ + NvU64 mapIndex, mapOffset, bits, newVal, mask; + NvU32 i; + NvU32 bitWriteCount; + PMA_PAGESTATUS oldState; + NvBool bUpdate2mbTracking = NV_FALSE; + PMA_REGMAP *pRegmap = (PMA_REGMAP *)pMap; + + mapIndex = PAGE_MAPIDX(frameNum); + mapOffset = PAGE_BITIDX(frameNum); + + NV_ASSERT(pRegmap != NULL); // possible error code return + NV_ASSERT(mapIndex < pRegmap->mapLength); + + bitWriteCount = (writeAttrib ? + (PMA_STATE_BITS_PER_PAGE + PMA_ATTRIB_BITS_PER_PAGE) : + PMA_STATE_BITS_PER_PAGE); + + mask = (NvU64)MAKE_BITMASK(mapOffset); + + oldState = pmaRegmapRead(pRegmap, frameNum, NV_TRUE); + + // + // If we are going to allocate the 2MB page, we need bookkeeping + // before the bitmap is changed + // + if (((newState & STATE_MASK) != STATE_FREE) && _pmaRegmapAllFree2mb(pRegmap, frameNum)) + { + bUpdate2mbTracking = NV_TRUE; + } + + for (i = 0; i < bitWriteCount; i++) + { + newVal = ((NvU64) (newState & (1 << i)) >> i) << mapOffset; + bits = pRegmap->map[i][mapIndex]; + pRegmap->map[i][mapIndex] = (NvU64) SETBITS(bits, mask, newVal); + } + + NV_ASSERT(pmaRegmapRead(pRegmap, frameNum, writeAttrib) == newState); + + // Update some stats for optimization + pmaStatsUpdateState(&pRegmap->pPmaStats->numFreeFrames, 1, + oldState, newState); + + // + // If we are freeing a frame, we should check if we need to update the 2MB + // page tracking + // + if (bUpdate2mbTracking || + (((oldState & STATE_MASK) != STATE_FREE) && _pmaRegmapAllFree2mb(pRegmap, frameNum))) + { + pmaStatsUpdateState(&pRegmap->pPmaStats->numFree2mbPages, 1, + oldState, newState); + + } +} + +void +pmaRegmapChangeState(void *pMap, NvU64 frameNum, PMA_PAGESTATUS newState) +{ + NV_ASSERT(newState <= STATE_PIN); + // Write state bits, but not attrib bits + pmaRegmapChangeStateAttrib((PMA_REGMAP *)pMap, frameNum, newState, NV_FALSE); +} + +void +pmaRegmapChangePageStateAttrib +( + void * pMap, + NvU64 startFrame, + NvU32 pageSize, + PMA_PAGESTATUS newState, + NvBool writeAttrib +) +{ + NvU32 framesPerPage = pageSize >> PMA_PAGE_SHIFT; + NvU64 frame; + for (frame = startFrame; frame < startFrame + framesPerPage; frame++) + { + pmaRegmapChangeStateAttrib((PMA_REGMAP *)pMap, frame, newState, writeAttrib); + } +} + +PMA_PAGESTATUS +pmaRegmapRead(void *pMap, NvU64 frameNum, NvBool readAttrib) +{ + NvU64 mapIndex, mapOffset, bits, mask, val; + NvU32 i; + NvU32 bitReadCount; + PMA_REGMAP *pRegmap = (PMA_REGMAP *)pMap; + + NV_ASSERT(pRegmap != NULL); // possible error code return instead of assertion failure + + val = 0; + mapIndex = PAGE_MAPIDX(frameNum); + mapOffset = PAGE_BITIDX(frameNum); + + bitReadCount = (readAttrib ? PMA_STATE_BITS_PER_PAGE + PMA_ATTRIB_BITS_PER_PAGE : PMA_STATE_BITS_PER_PAGE); + + mask = (NvU64)MAKE_BITMASK(mapOffset); + for (i = 0; i < bitReadCount; i++) + { + bits = pRegmap->map[i][mapIndex]; + val |= (((bits & mask) >> mapOffset) << i); + } + return (PMA_PAGESTATUS)val; +} + + +// +// Note that these functions only return the free regions but doesn't reserve them +// therefore locks should not be released after they return until you mark them allocated +// +NV_STATUS +pmaRegmapScanContiguous +( + void *pMap, + NvU64 addrBase, + NvU64 rangeStart, + NvU64 rangeEnd, + NvU64 numPages, + NvU64 *freeList, + NvU32 pageSize, + NvU64 alignment, + NvU64 *numPagesAlloc, + NvBool bSkipEvict +) +{ + NvU64 freeStart, numFrames, localStart, localEnd, framesPerPage; + NvU64 frameAlignment, alignedAddrBase, frameAlignmentPadding; + NvBool found; + PMA_PAGESTATUS startStatus, endStatus; + PMA_REGMAP *pRegmap = (PMA_REGMAP *)pMap; + + framesPerPage = pageSize >> PMA_PAGE_SHIFT; + numFrames = framesPerPage * numPages; + frameAlignment = alignment >> PMA_PAGE_SHIFT; + + // + // Find how much is the base address short of the alignment requirements + // and adjust that value in the scanning range before starting the scan. + // + alignedAddrBase = NV_ALIGN_UP(addrBase, alignment); + frameAlignmentPadding = (alignedAddrBase - addrBase) >> PMA_PAGE_SHIFT; + + // Handle restricted allocations + if (rangeStart != 0 || rangeEnd != 0) + { + localStart = rangeStart >> PMA_PAGE_SHIFT; + localEnd = NV_MIN(rangeEnd >> PMA_PAGE_SHIFT, pRegmap->totalFrames - 1); + } + else + { + localStart = 0; + localEnd = pRegmap->totalFrames - 1; + } + + localStart = alignUpToMod(localStart, frameAlignment, frameAlignmentPadding); + freeStart = localStart; + found = 0; + + NV_PRINTF(LEVEL_INFO, + "Scanning with addrBase 0x%llx in frame range 0x%llx..0x%llx, pages to allocate 0x%llx\n", + addrBase, localStart, localEnd, numPages); + + while (!found) + { + if ((freeStart + numFrames - 1) > localEnd) + { + // freeStart + numFrames too close to local search end. Re-starting search + break; + } + + startStatus = pmaRegmapRead(pRegmap, freeStart, NV_TRUE); + endStatus = pmaRegmapRead(pRegmap, (freeStart + numFrames - 1), NV_TRUE); + + if (endStatus == STATE_FREE) + { + if (startStatus == STATE_FREE) + { + NvS64 diff = _pmaRegmapAvailable(pRegmap, freeStart, (freeStart + numFrames - 1)); + if (diff == ALL_FREE) + { + found = NV_TRUE; + *freeList = addrBase + (freeStart << PMA_PAGE_SHIFT); + *numPagesAlloc = numPages; + } + else + { + // + // Find the next aligned free frame and set it as the start + // frame for next iteration's scan. + // + NV_ASSERT(diff >= 0); + + freeStart = alignUpToMod(diff + 1, frameAlignment, frameAlignmentPadding); + + NV_ASSERT(freeStart != 0); + } + } + else + { + // Start point isn't free, so bump to check the next aligned frame + freeStart += frameAlignment; + } + } + else + { + // + // End point isn't usable, so jump to after the end to check again + // However, align the new start point properly before next iteration. + // + freeStart += NV_ALIGN_UP(numFrames, frameAlignment); + } + } + + if (found) return NV_OK; + + *numPagesAlloc = 0; + + if (bSkipEvict) return NV_ERR_NO_MEMORY; + + // Loop back to the beginning and continue searching + + freeStart = localStart; + while (!found) + { + if ((freeStart + numFrames - 1) > localEnd) + { + // Failed searching for STATE_FREE or STATE_UNPIN + return NV_ERR_NO_MEMORY; + } + + startStatus = pmaRegmapRead(pRegmap, freeStart, NV_TRUE); + endStatus = pmaRegmapRead(pRegmap, (freeStart + numFrames - 1), NV_TRUE); + + if (endStatus == STATE_FREE || endStatus == STATE_UNPIN) + { + if (startStatus == STATE_FREE || startStatus == STATE_UNPIN) + { + NvS64 diff = _pmaRegmapEvictable(pRegmap, freeStart, (freeStart + numFrames - 1)); + if (diff == EVICTABLE) + { + found = NV_TRUE; + *freeList = addrBase + (freeStart << PMA_PAGE_SHIFT); + } + else + { + // + // The previous search should have found an all free region + // and we wouldn't be looking for an evictable one. + // + NV_ASSERT(diff != ALL_FREE); + NV_ASSERT(diff >= 0); + + // + // Find the next aligned free frame and set it as the start + // frame for next iteration's scan. + // + freeStart = alignUpToMod(diff + 1, frameAlignment, frameAlignmentPadding); + NV_ASSERT(freeStart != 0); + } + } + else + { + // Start point isn't usable, so bump to the next aligned frame to check again + freeStart += frameAlignment; + } + } + else + { + // + // End point isn't usable, so jump to after the end to check again + // However, align the new start point properly before next iteration. + // + freeStart += NV_ALIGN_UP(numFrames, frameAlignment); + } + } + + return NV_ERR_IN_USE; +} + +NV_STATUS +pmaRegmapScanDiscontiguous +( + void *pMap, + NvU64 addrBase, + NvU64 rangeStart, + NvU64 rangeEnd, + NvU64 numPages, + NvU64 *freeList, + NvU32 pageSize, + NvU64 alignment, + NvU64 *numPagesAlloc, + NvBool bSkipEvict +) +{ + NvU64 freeStart, found, framesPerPage, localStart, localEnd; + NvU64 alignedAddrBase, frameAlignmentPadding; + PMA_PAGESTATUS startStatus, endStatus; + PMA_REGMAP *pRegmap = (PMA_REGMAP *)pMap; + + NV_ASSERT(alignment == pageSize); + + framesPerPage = pageSize >> PMA_PAGE_SHIFT; + + // + // Find how much is the base address short of the alignment requirements + // and adjust that value in the scanning range before starting the scan. + // + alignedAddrBase = NV_ALIGN_UP(addrBase, alignment); + frameAlignmentPadding = (alignedAddrBase - addrBase) >> PMA_PAGE_SHIFT; + + // Handle restricted allocations + if (rangeStart != 0 || rangeEnd != 0) + { + // Embedded % requires special handling. + NV_ASSERT_OR_ELSE_STR(rangeStart % pageSize == 0, + "rangeStart %% pageSize == 0", /*do nothing*/); + NV_ASSERT_OR_ELSE_STR((rangeEnd + 1) % pageSize == 0, + "(rangeEnd + 1) %% pageSize == 0", /*do nothing*/); + + localStart = rangeStart >> PMA_PAGE_SHIFT; + localEnd = NV_MIN(rangeEnd >> PMA_PAGE_SHIFT, pRegmap->totalFrames - 1); + } + else + { + localStart = 0; + localEnd = pRegmap->totalFrames-1; + } + + localStart = alignUpToMod(localStart, framesPerPage, frameAlignmentPadding); + freeStart = localStart; + found = 0; + + NV_PRINTF(LEVEL_INFO, + "Scanning with addrBase 0x%llx in frame range 0x%llx..0x%llx, pages to allocate 0x%llx\n", + addrBase, localStart, localEnd, numPages); + + // scan for allocatable pages + // two-pass algorithm + while (found != numPages) + { + if ((freeStart + framesPerPage - 1) > localEnd) break; + + startStatus = pmaRegmapRead(pRegmap, freeStart, NV_TRUE); + endStatus = pmaRegmapRead(pRegmap, (freeStart + framesPerPage - 1), NV_TRUE); + + if (startStatus == STATE_FREE) + { + if(endStatus == STATE_FREE) + { + NvS64 diff = _pmaRegmapAvailable(pRegmap, freeStart, (freeStart + framesPerPage - 1)); + if (diff == ALL_FREE) + { + freeList[found++] = addrBase + (freeStart << PMA_PAGE_SHIFT); + } + } + } + freeStart += framesPerPage; + } + + *numPagesAlloc = found; + if(found == numPages) return NV_OK; + if(bSkipEvict) return NV_ERR_NO_MEMORY; + + freeStart = localStart; + while (found != numPages) + { + if ((freeStart + framesPerPage - 1) > localEnd) return NV_ERR_NO_MEMORY; + + startStatus = pmaRegmapRead(pRegmap, freeStart, NV_TRUE); + endStatus = pmaRegmapRead(pRegmap, (freeStart + framesPerPage - 1), NV_TRUE); + + if (startStatus == STATE_FREE || startStatus == STATE_UNPIN) + { + if(endStatus == STATE_FREE || endStatus == STATE_UNPIN) + { + NvS64 diff = _pmaRegmapEvictable(pRegmap, freeStart, (freeStart + framesPerPage - 1)); + if (diff == EVICTABLE) + { + freeList[found++] = addrBase + (freeStart << PMA_PAGE_SHIFT); + } + } + } + freeStart += framesPerPage; + } + + return NV_ERR_IN_USE; +} + +void +pmaRegmapGetSize +( + void *pMap, + NvU64 *pBytesTotal +) +{ + PMA_REGMAP *pRegmap = (PMA_REGMAP *)pMap; + *pBytesTotal = (pRegmap->totalFrames << PMA_PAGE_SHIFT); +} + +void +pmaRegmapGetLargestFree +( + void *pMap, + NvU64 *pLargestFree +) +{ + NvU64 mapIndex = 0; + NvU32 mapMaxZeros = 0; + NvU32 mapTrailZeros = 0; + NvU32 regionMaxZeros = 0; + NvU64 mapMaxIndex; + PMA_REGMAP *pRegmap = (PMA_REGMAP *)pMap; + + mapMaxIndex = PAGE_MAPIDX(pRegmap->totalFrames - 1); + + while (mapIndex <= mapMaxIndex) + { + NvU64 bitmap = pRegmap->map[MAP_IDX_ALLOC_UNPIN][mapIndex] | pRegmap->map[MAP_IDX_ALLOC_PIN][mapIndex]; + + // If the last map[] is only partially used, mask the valid bits + if (mapIndex == mapMaxIndex && (PAGE_BITIDX(pRegmap->totalFrames) != 0)) + { + bitmap |= (~0ULL) << PAGE_BITIDX(pRegmap->totalFrames); + } + + if (maxZerosGet(bitmap) == _UINT_SIZE) + { + mapTrailZeros += _UINT_SIZE; + } + else + { + mapTrailZeros += portUtilCountTrailingZeros64(bitmap); + mapMaxZeros = maxZerosGet(bitmap); + regionMaxZeros = NV_MAX(regionMaxZeros, + NV_MAX(mapMaxZeros, mapTrailZeros)); + mapTrailZeros = portUtilCountLeadingZeros64(bitmap); + } + + mapIndex++; + } + regionMaxZeros = NV_MAX(regionMaxZeros, mapTrailZeros); + *pLargestFree = ((NvU64) regionMaxZeros) << PMA_PAGE_SHIFT; +} + +NvU64 pmaRegmapGetEvictingFrames(void *pMap) +{ + return ((PMA_REGMAP *)pMap)->frameEvictionsInProcess; +} + +void pmaRegmapSetEvictingFrames(void *pMap, NvU64 frameEvictionsInProcess) +{ + ((PMA_REGMAP *)pMap)->frameEvictionsInProcess = frameEvictionsInProcess; +} + diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/vaspace_api.c b/src/nvidia/src/kernel/gpu/mem_mgr/vaspace_api.c new file mode 100644 index 000000000..57ab83ca7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/vaspace_api.c @@ -0,0 +1,787 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "core/locks.h" + +#include "gpu/mem_mgr/vaspace_api.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/gpu_vaspace.h" +#include "mem_mgr/virtual_mem.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/device/device.h" +#include "class/cl90f1.h" +#include "gpu/bus/kern_bus.h" + +#include "virtualization/hypervisor/hypervisor.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "mem_mgr/virt_mem_mgr.h" + +#include "vgpu/rpc.h" + +/* ------------------ static and helper functions prototypes------------------*/ +static NvU32 translateAllocFlagsToVASpaceFlags(NvU32 allocFlags, NvU32 *translatedFlags); +static NvU32 translatePageSizeToVASpaceFlags(NV_VASPACE_ALLOCATION_PARAMETERS *pNvVASpaceAllocParams); +static void destroyMemDesc(Device *pDevice, NvHandle hVASpace); +static NV_STATUS _vaspaceapiManagePageLevelsForSplitVaSpace(OBJGPU *pGpu, NvHandle hClient, NvU32 gpuMask, NvU32 flags, VASPACEAPI_MANAGE_PAGE_LEVELS_ACTION action); + +NvBool +vaspaceapiCanCopy_IMPL(VaSpaceApi *pResource) +{ + return NV_TRUE; +} + +NV_STATUS +vaspaceapiConstruct_IMPL +( + VaSpaceApi *pVaspaceApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NvHandle hClient = pCallContext->pClient->hClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NvHandle hParent = pResourceRef->pParentRef->hResource; + NV_VASPACE_ALLOCATION_PARAMETERS *pNvVASpaceAllocParams; + NvU32 allocFlags; + NvU32 flags = 0; + NvU32 gpuMask = 0; + NvU32 gpuMaskInitial = 0; + OBJVMM *pVmm = SYS_GET_VMM(SYS_GET_INSTANCE()); + OBJVASPACE *pVAS = NULL; + NvU64 vasLimit = 0; + NvU64 vaStartInternal = 0; + NvU64 vaLimitInternal = 0; + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pVaspaceApi); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + OBJGPUGRP *pGpuGrp = GPU_RES_GET_GPUGRP(pVaspaceApi); + Device *pDevice; + NvBool bLockAcquired = NV_FALSE; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + if (RS_IS_COPY_CTOR(pParams)) + { + if (!rmGpuGroupLockIsOwner(pGpu->gpuInstance, GPU_LOCK_GRP_ALL, &gpuMask)) + { + // + // If we hold some GPU locks already then acquiring more GPU locks + // may violate lock ordering and cause dead-lock. To avoid dead-lock in this case, + // attempt to take the locks with a conditional acquire. + // + gpuMaskInitial = rmGpuLocksGetOwnedMask(); + NvU32 lockFlag = (gpuMaskInitial == 0) + ? GPUS_LOCK_FLAGS_NONE + : GPUS_LOCK_FLAGS_COND_ACQUIRE; + + NV_ASSERT_OK_OR_RETURN(rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_ALL, + lockFlag, + RM_LOCK_MODULES_MEM, + &gpuMask)); + + bLockAcquired = NV_TRUE; + } + + status = vaspaceapiCopyConstruct_IMPL(pVaspaceApi, pCallContext, pParams); + + if (bLockAcquired) + rmGpuGroupLockRelease(gpuMask & (~gpuMaskInitial), GPUS_LOCK_FLAGS_NONE); + + return status; + } + + pNvVASpaceAllocParams = pParams->pAllocParams; + allocFlags = pNvVASpaceAllocParams->flags; + + // Translate & validate flags + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, translateAllocFlagsToVASpaceFlags(allocFlags, &flags)); + + NV_ASSERT_OK_OR_RETURN(deviceGetByGpu(pCallContext->pClient, pGpu, NV_TRUE, &pDevice)); + + // + // Make sure this GPU is not already locked by this thread + // Ideally this thread shouldn't have locked any GPU in the system but + // checking this is sufficient as memory allocation from PMA requires + // current GPU's lock not to be held + // + if (rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + NV_PRINTF(LEVEL_ERROR, "VASpace alloc should be called without acquiring GPU lock\n"); + LOCK_ASSERT_AND_RETURN(0); + } + + if (pNvVASpaceAllocParams->index == NV_VASPACE_ALLOCATION_INDEX_GPU_HOST) + { + gpuMask = NVBIT(pGpu->gpuInstance); + } + else + { + gpuMask = gpumgrGetGpuMask(pGpu); + } + + status = _vaspaceapiManagePageLevelsForSplitVaSpace(pGpu, hClient, gpuMask, flags, VASPACEAPI_MANAGE_PAGE_LEVELS_RESERVE); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + //-------------------------------------------------------------------------------- + // we acquire the GPU lock below. + // From here down do not return early, use goto done + //-------------------------------------------------------------------------------- + + // Acquire the lock *only after* PMA is done allocating. + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM); + NV_ASSERT_OR_GOTO(status == NV_OK, done); + bLockAcquired = NV_TRUE; + + if ((pParams->externalClassId == FERMI_VASPACE_A) && + (pNvVASpaceAllocParams->index == NV_VASPACE_ALLOCATION_INDEX_GPU_HOST)) + { + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + { + status = NV_ERR_INVALID_ARGUMENT; + NV_ASSERT_OR_GOTO(0, done); + } + } + + // Per channel vaspace binding option has to be enabled! + if (pDevice->vaMode == NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT_OR_GOTO(0, done); + } + + // + // When MIG is enabled, ensure the client has a valid subscription. + // While only split VA spaces should require this (and this will be + // checked by _vaspaceapiManagePageLevelsForSplitVaSpace), check for + // all platforms once the GPU lock is held, for consistency/to weed + // out any cases where the client creates the VAS before subscribing + // to the partition. + // + // Only check for cases where split VA space management is disabled - + // other configurations (such as MODS SRIOV MIG testing) which use + // split VA spaces have problems with this check. + // + if ((!NV_IS_MODS) && (pParams->externalClassId == FERMI_VASPACE_A) && + (!gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) && + (pKernelMIGManager != NULL) && kmigmgrIsMIGMemPartitioningEnabled(pGpu, pKernelMIGManager)) + { + MIG_INSTANCE_REF ref; + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref), + done); + } + + // RS-TODO - Move this to allocWithResServer + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + + NvBool bSendRPC = NV_TRUE; + NvBool bIsFlaCapability = kbusIsFlaSupported(pKernelBus); + + if (gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + // If we are in SR-IOV heavy mode then we need to send RPC to host + bSendRPC = NV_TRUE; + + // If FLA capability is disabled then do not send RPC. + if ((pNvVASpaceAllocParams->index == NV_VASPACE_ALLOCATION_INDEX_GPU_FLA || + (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_IS_FLA)) && !bIsFlaCapability) + { + bSendRPC = NV_FALSE; + } + } + else + { + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + // In case of SR-IOV, the BAR1 and FLA is managed by the guest. So, no need + // to communicate with the host for BAR1 and FLA VA. + if ((pNvVASpaceAllocParams->index == NV_VASPACE_ALLOCATION_INDEX_GPU_HOST)) + bSendRPC = NV_FALSE; + } + + if ((IS_GSP_CLIENT(pGpu) || IS_VIRTUAL_WITH_SRIOV(pGpu)) && + ((pNvVASpaceAllocParams->index == NV_VASPACE_ALLOCATION_INDEX_GPU_FLA) || + (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_IS_FLA))) + bSendRPC = NV_FALSE; + } + + if (bSendRPC) + { + NV_RM_RPC_ALLOC_OBJECT(pGpu, + pParams->hClient, + pParams->hParent, + pParams->hResource, + pParams->externalClassId, + pNvVASpaceAllocParams, + status); + if (status != NV_OK) + { + goto done; + } + } + } + + if ((pParams->externalClassId == FERMI_VASPACE_A) && + (pNvVASpaceAllocParams->index != NV_VASPACE_ALLOCATION_INDEX_GPU_NEW)) + { + switch (pNvVASpaceAllocParams->index) + { + case NV_VASPACE_ALLOCATION_INDEX_GPU_HOST: + pVAS = kbusGetBar1VASpace_HAL(pGpu, pKernelBus); + break; + case NV_VASPACE_ALLOCATION_INDEX_GPU_GLOBAL: + gpugrpGetGlobalVASpace(pGpuGrp, &pVAS); + break; + case NV_VASPACE_ALLOCATION_INDEX_GPU_DEVICE: + if (vaspaceGetByHandleOrDeviceDefault(pCallContext->pClient, hParent, NV01_NULL_OBJECT, &pVAS) != NV_OK) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + break; + case NV_VASPACE_ALLOCATION_INDEX_GPU_FLA: + status = kbusGetFlaVaspace_HAL(pGpu, pKernelBus, &pVAS); + // + // special handling here, because FLA VAspace can fail + // and tests can still run assuming FLA is not supported. + // This should not happen, but if it happens RM should fail + // grcefully. Filing bug for MODS to fix this. And then we can + // remove this special handling. + // + if (pVAS == NULL) + goto done; + break; + } + if (pVAS == NULL) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT_OR_GOTO(0, done); + } + + vaspaceIncRefCnt(pVAS); + } + else + { + // validate the big page size + if (!((pNvVASpaceAllocParams->bigPageSize == NV_VASPACE_BIG_PAGE_SIZE_64K) || + (pNvVASpaceAllocParams->bigPageSize == NV_VASPACE_BIG_PAGE_SIZE_128K) || + (pNvVASpaceAllocParams->bigPageSize == 0))) + { + status = NV_ERR_INVALID_ARGUMENT; + NV_ASSERT_OR_GOTO(0, done); + } + + // + // Guest page tables are allocated in guest subheap only + // for non SRIOV guests. For SR-IOV guests page table go + // into subheap only if the guest does not manage VA. + // + if (!gpuIsSplitVasManagementServerClientRmEnabled(pGpu) || + !IS_VIRTUAL(pGpu)) + { + flags |= VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR; + } + + // Apply ATS restrictions + if (flags & VASPACE_FLAGS_ENABLE_ATS) + { + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)) + { + status = NV_ERR_NOT_SUPPORTED; + NV_ASSERT_OR_GOTO(0, done); + } + } + + if ((pMemoryManager != NULL) && + memmgrIsPmaInitialized(pMemoryManager) && + memmgrAreClientPageTablesPmaManaged(pMemoryManager) && + !(allocFlags & NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED) && + !(allocFlags & NV_VASPACE_ALLOCATION_FLAGS_IS_FLA)) + { + flags |= VASPACE_FLAGS_PTETABLE_PMA_MANAGED; + } + + // Get flags for the requested big page size + flags |= translatePageSizeToVASpaceFlags(pNvVASpaceAllocParams); + + if (0 != pNvVASpaceAllocParams->vaSize) + { + // FLA VASpace can start from any base (!= 0) + if (flags & VASPACE_FLAGS_FLA) + { + vasLimit = pNvVASpaceAllocParams->vaBase + + pNvVASpaceAllocParams->vaSize - 1; + if (vasLimit < pNvVASpaceAllocParams->vaBase) + { + NV_PRINTF(LEVEL_ERROR, + "Integer overflow !!! Invalid parameters for vaBase:%llx, vaSize:%llx\n", + pNvVASpaceAllocParams->vaBase, + pNvVASpaceAllocParams->vaSize); + status = NV_ERR_INVALID_ARGUMENT; + NV_ASSERT_OR_GOTO(0, done); + } + } + else + { + vasLimit = pNvVASpaceAllocParams->vaSize - 1; + } + } + + // Finally call the factory + status = vmmCreateVaspace(pVmm, pParams->externalClassId, + pNvVASpaceAllocParams->index, + gpuMask, + pNvVASpaceAllocParams->vaBase, + vasLimit, + vaStartInternal, + vaLimitInternal, + NULL, + flags, + &pVAS); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not construct VA space. Status %x\n", status); + pVAS = NULL; + NV_ASSERT_OR_GOTO(0, done); + } + } + + pVaspaceApi->pVASpace = pVAS; + + NV_PRINTF(LEVEL_INFO, + "Created vaspaceapi 0x%x, hParent 0x%x, device 0x%x, client 0x%x, varef" + " 0x%p, parentref 0x%p\n", pResourceRef->hResource, hParent, + RES_GET_HANDLE(pDevice), hClient, pResourceRef, + pCallContext->pResourceRef->pParentRef); + + // Return the actual VAS base and size. + pNvVASpaceAllocParams->vaBase = vaspaceGetVaStart(pVAS); + pNvVASpaceAllocParams->vaSize = vaspaceGetVaLimit(pVAS) + 1; + +done: + if (status == NV_OK) + { + (void)_vaspaceapiManagePageLevelsForSplitVaSpace(pGpu, hClient, gpuMask, flags, VASPACEAPI_MANAGE_PAGE_LEVELS_TRIM); + } + else + { + (void)_vaspaceapiManagePageLevelsForSplitVaSpace(pGpu, hClient, gpuMask, flags, VASPACEAPI_MANAGE_PAGE_LEVELS_RELEASE); + } + + if (bLockAcquired) + { + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + return status; +} + +NV_STATUS +vaspaceapiCopyConstruct_IMPL +( + VaSpaceApi *pVaspaceApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsClient *pSrcClient = pParams->pSrcClient; + RsResourceRef *pSrcRef = pParams->pSrcRef; + VaSpaceApi *pSrcVaspaceApi = dynamicCast(pSrcRef->pResource, VaSpaceApi); + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSrcVaspaceApi); + NvHandle hVASpaceSrc = pSrcRef->hResource; + NvHandle hClientSrc = pSrcClient->hClient; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvHandle hVASpace = pCallContext->pResourceRef->hResource; + OBJVASPACE *pVAS = pSrcVaspaceApi->pVASpace; + Device *pDevice; + NvBool bFlaVA = NV_FALSE; + + NV_ASSERT_OR_RETURN(pVAS, NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OK_OR_RETURN(deviceGetByGpu(pCallContext->pClient, pGpu, NV_TRUE, &pDevice)); + + + bFlaVA = ((IS_VIRTUAL_WITH_SRIOV(pGpu) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) || + IS_GSP_CLIENT(pGpu)) && + (vaspaceGetFlags(pVAS) & VASPACE_FLAGS_FLA); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, there is nothing to do at this point in the + // guest OS (where IS_VIRTUAL(pGpu) is true). + // + if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) && !bFlaVA) + { + NV_RM_RPC_DUP_OBJECT(pGpu, hClient, hParent, hVASpace, hClientSrc, + hVASpaceSrc, 0, + NV_FALSE, // destructor explicitly issues RPC_FREE + pCallContext->pResourceRef, status); + } + + + NV_PRINTF(LEVEL_INFO, + "Shared vaspaceapi 0x%x, device 0x%x, client 0x%x, as vaspace 0x%x for " + "hParent 0x%x device 0x%x client 0x%x varef 0x%p, deviceref 0x%p\n", + hVASpaceSrc, pSrcRef->pParentRef->hResource, hClientSrc, + hVASpace, hParent, RES_GET_HANDLE(pDevice), hClient, + pCallContext->pResourceRef, + pCallContext->pResourceRef->pParentRef); + + vaspaceIncRefCnt(pVAS); + pVaspaceApi->pVASpace = pVAS; + // + // Mark the VAS to be duped, so that we can free it when FLA Memory is freed + // This is hacked for now, to make sure the duped VAS is freed with the duped VA + // + pVaspaceApi->bDuped = NV_TRUE; + + return status; + +} + +void +vaspaceapiDestruct_IMPL(VaSpaceApi *pVaspaceApi) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pVaspaceApi); + OBJVMM *pVmm = SYS_GET_VMM(SYS_GET_INSTANCE()); + OBJGVASPACE *pGVAS; + NvHandle hVASpace; + NvHandle hParent; + NvHandle hClient; + RsClient *pRsClient; + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + Device *pDevice; + NvBool bBar1VA = NV_FALSE; + NvBool bFlaVA = NV_FALSE; + + GPU_RES_SET_THREAD_BC_STATE(pVaspaceApi); + + resGetFreeParams(staticCast(pVaspaceApi, RsResource), &pCallContext, &pParams); + pRsClient = pCallContext->pClient; + hClient = pRsClient->hClient; + hParent = pCallContext->pResourceRef->pParentRef->hResource; + hVASpace = pCallContext->pResourceRef->hResource; + + status = deviceGetByGpu(pCallContext->pClient, pGpu, NV_TRUE, &pDevice); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "No device found vaspaceapi 0x%x, hParent 0x%x, client 0x%x varef 0x%p," + " deviceref 0x%p\n", hVASpace, hParent, hClient, + pCallContext->pResourceRef, + pCallContext->pResourceRef->pParentRef); + return; + } + + if (IS_VIRTUAL_WITH_SRIOV(pGpu) && !gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + bBar1VA = !!(vaspaceGetFlags(pVaspaceApi->pVASpace) & VASPACE_FLAGS_BAR_BAR1); + bFlaVA = !!(vaspaceGetFlags(pVaspaceApi->pVASpace) & VASPACE_FLAGS_FLA); + } + else + { + if (IS_GSP_CLIENT(pGpu) && (vaspaceGetFlags(pVaspaceApi->pVASpace) & VASPACE_FLAGS_FLA)) + bFlaVA = NV_TRUE; + } + + pGVAS = dynamicCast(pVaspaceApi->pVASpace, OBJGVASPACE); + if (pGVAS != NULL) + { + (void)_vaspaceapiManagePageLevelsForSplitVaSpace(pGpu, hClient, pVaspaceApi->pVASpace->gpuMask, pGVAS->flags, + VASPACEAPI_MANAGE_PAGE_LEVELS_RELEASE); + } + + destroyMemDesc(pDevice, hVASpace); + + vmmDestroyVaspace(pVmm, pVaspaceApi->pVASpace); + + // + // RS-TODO: Move out to freeWithResServ? + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // In case of SR-IOV, the BAR1 is managed by the guest. So, no need + // to communicate with the host for BAR1 VA. + // + if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) && !bBar1VA && !bFlaVA) + { + NV_RM_RPC_FREE(pGpu, hClient, hParent, hVASpace, status); + NV_ASSERT(NV_OK == status); + } + + NV_PRINTF(LEVEL_INFO, + "Destroyed vaspaceapi 0x%x, hParent 0x%x, device 0x%x, client 0x%x " + "varef 0x%p, deviceref 0x%p\n", hVASpace, hParent, + RES_GET_HANDLE(pDevice), hClient, + pCallContext->pResourceRef, + pCallContext->pResourceRef->pParentRef); +} + +/*--------------------------static and helper functions ----------------------*/ +// move this to Device +/** + * @brief Destroy associated memory with this vaspace. + * + * @param[in] pDevice Pointer to Device instance + * @param[in] hVASpace VASpace handle of the vaspace that will be deleted + **/ +static void +destroyMemDesc +( + Device *pDevice, + NvHandle hVASpace +) +{ + VirtualMemory *pVirtualMemory = NULL; + RsClient *pClient = RES_GET_CLIENT(pDevice); + NODE *pNode = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // + // RS-TODO: Convert to resource server dependency tracking system. + // stdmemConstruct calls add refAddDependant() but does it work properly for + // sharing (?) + // + + // Check if any memory linked to this VASpace hasn't been freed. + btreeEnumStart(0, &pNode, pDevice->DevMemoryTable); + while (pNode != NULL) + { + Memory *pMemory = pNode->Data; + pVirtualMemory = dynamicCast(pMemory, VirtualMemory); + btreeEnumNext(&pNode, pDevice->DevMemoryTable); + + if ((pVirtualMemory != NULL) && + virtmemMatchesVASpace(pVirtualMemory, pClient->hClient, hVASpace)) + { + // Free hMemory + pRmApi->Free(pRmApi, pClient->hClient, RES_GET_HANDLE(pVirtualMemory)); + + // Restart iteration as memory will be freed. + btreeEnumStart(0, &pNode, pDevice->DevMemoryTable); + } + } +} + +/** + * @brief Translate flags to vaspace equivalent flags and perform error check. + * + * @param[in] allocFlags Client handle + * @param[out] translatedFlags The translated internal flags. + **/ +static NV_STATUS translateAllocFlagsToVASpaceFlags(NvU32 allocFlags, NvU32 *translatedFlags) +{ + NV_STATUS status = NV_OK; + NvU32 flags = 0; + + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_MINIMIZE_PTETABLE_SIZE) + { + flags |= VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE; + } + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_RETRY_PTE_ALLOC_IN_SYS) + { + flags |= VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS; + } + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_SHARED_MANAGEMENT) + { + flags |= VASPACE_FLAGS_SHARED_MANAGEMENT; + } + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_IS_MIRRORED) + { + flags |= VASPACE_FLAGS_SET_MIRRORED; + } + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_ENABLE_PAGE_FAULTING) + { + flags |= VASPACE_FLAGS_ENABLE_FAULTING; + } + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED) + { + flags |= VASPACE_FLAGS_IS_EXTERNALLY_OWNED; + flags |= VASPACE_FLAGS_DISABLE_SPLIT_VAS; + } + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_ENABLE_NVLINK_ATS) + { + flags |= VASPACE_FLAGS_ENABLE_ATS; + } + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_ALLOW_ZERO_ADDRESS) + { + flags |= VASPACE_FLAGS_ALLOW_ZERO_ADDRESS; + } + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_IS_FLA) + { + flags |= VASPACE_FLAGS_FLA; + } + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_SKIP_SCRUB_MEMPOOL) + { + flags |= VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL; + } + if (allocFlags & NV_VASPACE_ALLOCATION_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE) + { + flags |= VASPACE_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE; + } + flags |= VASPACE_FLAGS_ENABLE_VMM; + + // Validate the flag combinations + NV_CHECK_OR_RETURN(LEVEL_WARNING, + !((flags & VASPACE_FLAGS_IS_EXTERNALLY_OWNED) && + (flags & VASPACE_FLAGS_SET_MIRRORED)), + NV_ERR_INVALID_ARGUMENT); + + // MODs environment requires ATS to be enabled but RM to continue to own + // page table management + NV_CHECK_OR_RETURN(LEVEL_WARNING, + !((flags & VASPACE_FLAGS_ENABLE_ATS) && + !(flags & VASPACE_FLAGS_IS_EXTERNALLY_OWNED)), + NV_ERR_INVALID_ARGUMENT); + // + // 1766112: Prevent channels in fault-capable VAS from running unless bound + // User-mode clients can allocate a fault-capable VAS and schedule it + // without registering it with the UVM driver if it is not marked as + // externally owned. This will cause what looks like a hang on the GPU + // until the app is killed. + // ATS still requires non-externally-owned fault-capable VAS in MODS, + // but otherwise this combination is disallowed. + // + NV_CHECK_OR_RETURN(LEVEL_WARNING, + !((flags & VASPACE_FLAGS_ENABLE_FAULTING) && + !(flags & VASPACE_FLAGS_IS_EXTERNALLY_OWNED)), + NV_ERR_INVALID_ARGUMENT); + + *translatedFlags |= flags; + + return status; +} +/** + * @brief Translate page size to vaspace flags. + * + * @param[in] pNvVASpaceAllocParams user allocation params which has the pagesize. + * @return[out] flags that vaspace would understand. + **/ +static NvU32 translatePageSizeToVASpaceFlags(NV_VASPACE_ALLOCATION_PARAMETERS *pNvVASpaceAllocParams) +{ + NvU32 flags = 0; + + switch (pNvVASpaceAllocParams->bigPageSize) + { + case RM_PAGE_SIZE_64K: + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _64K); + break; + case RM_PAGE_SIZE_128K: + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _128K); + break; + default: + flags |= DRF_DEF(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, _DEFAULT); + break; + } + + return flags; +} + +/*! + * Helper function to reserve/release/trim the page levels for Server RM. + * This function is only needed for client VA space allocation, so we will + * return directly when that is not the case. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] hClient Handle of the client + * @param[in] gpuMask GPU mask + * @param[in] flags Flags for the corresponding VA allocation + * @param[in] action Requested action to manage the page levels + */ +static NV_STATUS +_vaspaceapiManagePageLevelsForSplitVaSpace +( + OBJGPU *pGpu, + NvHandle hClient, + NvU32 gpuMask, + NvU32 flags, + VASPACEAPI_MANAGE_PAGE_LEVELS_ACTION action +) +{ + NV_STATUS status = NV_OK; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvBool bCallingContextPlugin; + + // Split VA space is only supported in Client-RM with PMA enabled + if ((!IS_VIRTUAL_WITH_SRIOV(pGpu) && !IS_GSP_CLIENT(pGpu)) || + !RMCFG_FEATURE_PMA || + !memmgrIsPmaInitialized(pMemoryManager) || + !memmgrAreClientPageTablesPmaManaged(pMemoryManager) || + !!(flags & VASPACE_FLAGS_DISABLE_SPLIT_VAS)) + { + return NV_OK; + } + + // + // For Split VAS mechanism, the top 3 page levels for Server RM's carveout region + // is allocated by the client RM. With PMA managed pagetables, we need to reserve + // them upfront in memory pool, before it can be allocated. + // + NV_ASSERT_OK_OR_RETURN(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin)); + if (!(flags & VASPACE_FLAGS_FLA) && !bCallingContextPlugin) + { + // Loop over each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, gpuMask) + { + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemPool = NULL; + + // + // TODO - This call may not work properly at SMC environment, so + // need to thoutoughly test it against SMC sanities before enabling + // this function on non Client-RM environment. + // + NV_ASSERT_OK_OR_RETURN(memmgrPageLevelPoolsGetInfo(pGpu, pMemoryManager, hClient, &pMemPool)); + + if (action == VASPACEAPI_MANAGE_PAGE_LEVELS_RESERVE) + { + status = rmMemPoolReserve(pMemPool, 3 * RM_PAGE_SIZE, flags); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + else if (action == VASPACEAPI_MANAGE_PAGE_LEVELS_RELEASE) + { + rmMemPoolRelease(pMemPool, flags); + } + else if (action == VASPACEAPI_MANAGE_PAGE_LEVELS_TRIM) + { + rmMemPoolTrim(pMemPool, 0, flags); + } + } + FOR_EACH_GPU_IN_MASK_UC_END + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/virt_mem_allocator.c b/src/nvidia/src/kernel/gpu/mem_mgr/virt_mem_allocator.c new file mode 100644 index 000000000..83e6d96a6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/virt_mem_allocator.c @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* VirtMemAllocator Object Function Definitions. * +* * +\***************************************************************************/ + +#include "core/core.h" +#include "core/hal.h" +#include "core/info_block.h" +#include "nvRmReg.h" +#include "os/os.h" +#include "vgpu/rpc.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" + +#include "ctrl/ctrl0002.h" + +// local functions +static NV_STATUS dmaInitRegistryOverrides(OBJGPU*, VirtMemAllocator*); + +NV_STATUS +dmaConstructEngine_IMPL(OBJGPU *pGpu, VirtMemAllocator *pDma, ENGDESCRIPTOR engDesc) +{ + NV_STATUS rmStatus; + + pGpu = ENG_GET_GPU(pDma); + rmStatus = dmaConstructHal_HAL(pGpu, pDma); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = dmaInitRegistryOverrides(pGpu, pDma); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, ", Could not apply registry overrides\n"); + DBG_BREAKPOINT(); + return rmStatus; + } + + return dmaInit_HAL(pGpu, pDma); +} + +/*! + * @brief Initialize all registry overrides for this object + * + * @param[in] pGpu GPU object pointer + * @param[in,out] pDma VirtMemAllocator object pointer + */ +static NV_STATUS +dmaInitRegistryOverrides(OBJGPU *pGpu, VirtMemAllocator *pDma) +{ + NV_STATUS rmStatus = NV_OK; + + return rmStatus; +} + +NV_STATUS dmaStateInitLocked_IMPL(OBJGPU *pGpu, VirtMemAllocator *pDma) +{ + dmaInitGart_HAL(pGpu, pDma); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_mgr/virt_mem_allocator_vgpu.c b/src/nvidia/src/kernel/gpu/mem_mgr/virt_mem_allocator_vgpu.c new file mode 100644 index 000000000..07bc73bc7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_mgr/virt_mem_allocator_vgpu.c @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/********************* VGPU Specific FB Routines *********************\ + * * + * VGPU specific VMA routines reside in this file. * + * * +\*********************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" + +NV_STATUS +dmaConstructHal_VGPUSTUB(OBJGPU *pGpu, VirtMemAllocator *pDma) +{ + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_sys/arch/ampere/kern_mem_sys_ga100.c b/src/nvidia/src/kernel/gpu/mem_sys/arch/ampere/kern_mem_sys_ga100.c new file mode 100644 index 000000000..0631db7e3 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_sys/arch/ampere/kern_mem_sys_ga100.c @@ -0,0 +1,439 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "os/os.h" +#include "kernel/gpu/mem_sys/kern_mem_sys.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "gpu/mem_mgr/mem_desc.h" + +#include "published/ampere/ga100/dev_fb.h" + +/*! + * @brief Write the sysmemFlushBuffer val into the NV_PFB_NISO_FLUSH_SYSMEM_ADDR register + * + * @param[in] pGpu OBJGPU pointer + * @param[in[ pKernelMemorySystem KernelMemorySystem pointer + * + * @returns void + */ +void +kmemsysProgramSysmemFlushBuffer_GA100 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + NvU64 alignedSysmemFlushBufferAddr = 0x0; + NvU32 alignedSysmemFlushBufferAddrHi = 0x0; + + NV_ASSERT(pKernelMemorySystem->sysmemFlushBuffer != 0); + + alignedSysmemFlushBufferAddr = pKernelMemorySystem->sysmemFlushBuffer >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT; + alignedSysmemFlushBufferAddrHi = DRF_VAL(_PFB, _NISO_FLUSH_SYSMEM_ADDR_HI, _ADR_63_40, + NvU64_HI32(alignedSysmemFlushBufferAddr)); + + NV_ASSERT((alignedSysmemFlushBufferAddrHi & (~NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI_MASK)) == 0); + + alignedSysmemFlushBufferAddrHi &= NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI_MASK; + + GPU_FLD_WR_DRF_NUM(pGpu, _PFB, _NISO_FLUSH_SYSMEM_ADDR_HI, _ADR_63_40, + alignedSysmemFlushBufferAddrHi); + GPU_FLD_WR_DRF_NUM(pGpu, _PFB, _NISO_FLUSH_SYSMEM_ADDR, _ADR_39_08, + NvU64_LO32(alignedSysmemFlushBufferAddr)); +} + +/* + * @brief Initialize the sysmem flush buffer + * + * Setting up the sysmem flush buffer needs to be done very early in some cases + * as it's required for the GPU to perform a system flush. One such case is + * resetting GPU FALCONs and in particular resetting the PMU as part of VBIOS + * init. + * + * @returns NV_OK if all is okay. Otherwise an error-specific value. + */ +NV_STATUS +kmemsysInitFlushSysmemBuffer_GA100 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + NV_STATUS status; + + // + // In case of suspend/resume, the buffer might be already allocated, but + // the HW still needs to be programmed below. + // + if (pKernelMemorySystem->pSysmemFlushBufferMemDesc == NULL) + { + // + // Sysmem flush buffer + // The sysmembar flush does a zero byte read of sysmem if there was a + // sysmem write since the last flush. The actual memory does have + // to be valid and allocated at all times because an actual read may + // be issued. + // + status = memdescCreate(&pKernelMemorySystem->pSysmemFlushBufferMemDesc, + pGpu, RM_PAGE_SIZE, + (1 << NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT), + NV_TRUE, + ADDR_SYSMEM, + NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_NONE); + if (status != NV_OK) + return status; + + status = memdescAlloc(pKernelMemorySystem->pSysmemFlushBufferMemDesc); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not allocate sysmem flush buffer: %x\n", status); + DBG_BREAKPOINT(); + return status; + } + + pKernelMemorySystem->sysmemFlushBuffer = memdescGetPhysAddr(pKernelMemorySystem->pSysmemFlushBufferMemDesc, AT_GPU, 0); + } + + kmemsysProgramSysmemFlushBuffer_HAL(pGpu, pKernelMemorySystem); + return NV_OK; +} + +/*! + * @brief Validate the sysmemFlushBuffer val and assert + * + * @param[in] pGpu OBJGPU pointer + * @param[in[ pKernelMemorySystem KernelMemorySystem pointer + * + * @returns void + */ +void +kmemsysAssertSysmemFlushBufferValid_GA100 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + NV_ASSERT((GPU_REG_RD_DRF(pGpu, _PFB, _NISO_FLUSH_SYSMEM_ADDR, _ADR_39_08) != 0) + || (GPU_REG_RD_DRF(pGpu, _PFB, _NISO_FLUSH_SYSMEM_ADDR_HI, _ADR_63_40) != 0)); +} + +/*! + * @brief Read MIG Memory CFG register + */ +NV_STATUS +kmemsysReadMIGMemoryCfg_GA100 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_MEMSYS_GET_MIG_MEMORY_CONFIG_PARAMS params = {0}; + + NV_ASSERT_OK_OR_RETURN( + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_KMEMSYS_GET_MIG_MEMORY_CONFIG, + ¶ms, + sizeof(params))); + + pKernelMemorySystem->memBoundaryCfgTable.memBoundaryCfgA = params.memBoundaryCfgA; + pKernelMemorySystem->memBoundaryCfgTable.memBoundaryCfgB = params.memBoundaryCfgB; + pKernelMemorySystem->memBoundaryCfgTable.memBoundaryCfgC = params.memBoundaryCfgC; + + return NV_OK; +} + +/*! + * @brief Read MIG Memory partition table + */ +NV_STATUS +kmemsysInitMIGMemoryPartitionTable_GA100 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + + if (!pMemorySystemConfig->bDisablePlcForCertainOffsetsBug3046774) + return NV_OK; + + NV_ASSERT_OK_OR_RETURN( + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_MIG_MEMORY_PARTITION_TABLE, + &pKernelMemorySystem->migMemoryPartitionTable, + sizeof(pKernelMemorySystem->migMemoryPartitionTable))); + + return NV_OK; +} + +/*! + * @brief Function to map swizzId to mem range given total range in Fb + */ +static NV_STATUS +_kmemsysSwizzIdToFbMemRange_GA100 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 swizzId, + NvU64 vmmuSegmentSize, + NV_RANGE totalRange, + NV_RANGE *pAddrRange +) +{ + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmemsysSwizzIdToMIGMemRange(pGpu, pKernelMemorySystem, swizzId, totalRange, pAddrRange)); + + // + // If pAddrRange->lo is vmmuSegment aligned, then alignedUp + // by one segment else simply align it. We need to make sure we have + // atleast 1 VMMU segment delta between consecutive segments + // + if (pAddrRange->lo != 0) + { + pAddrRange->lo = NV_IS_ALIGNED64(pAddrRange->lo, vmmuSegmentSize) ? + pAddrRange->lo + vmmuSegmentSize : + NV_ALIGN_UP64(pAddrRange->lo, vmmuSegmentSize); + } + + return NV_OK; +} + +/*! + * @brief Function to map swizzId to VMMU Segments + */ +NV_STATUS +kmemsysSwizzIdToVmmuSegmentsRange_GA100 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 swizzId, + NvU32 vmmuSegmentSize, + NvU32 totalVmmuSegments +) +{ + // + // This parameter represents the number of boundaries drawn when a + // specific GPU instance type is created + // + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU32 numBoundaries = 0; + NvU32 partitionDivFactor = 1; + NV_RANGE addrRange = NV_RANGE_EMPTY; + NV_RANGE partitionableMemoryRange = memmgrGetMIGPartitionableMemoryRange(pGpu, pMemoryManager); + NvU64 startingVmmuSegment; + NvU64 memSizeInVmmuSegment; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + _kmemsysSwizzIdToFbMemRange_GA100(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, partitionableMemoryRange, &addrRange)); + + switch (swizzId) + { + case 0: + { + numBoundaries = 0; + partitionDivFactor = 1; + break; + } + case 1: + case 2: + { + numBoundaries = 1; + partitionDivFactor = 2; + break; + } + case 3: + case 4: + case 5: + case 6: + { + numBoundaries = 3; + partitionDivFactor = 4; + break; + } + case 7: + case 8: + case 9: + case 10: + case 11: + case 12: + case 13: + case 14: + { + numBoundaries = 7; + partitionDivFactor = 8; + break; + } + } + + startingVmmuSegment = addrRange.lo / vmmuSegmentSize; + memSizeInVmmuSegment = (totalVmmuSegments - numBoundaries) / partitionDivFactor; + + NV_ASSERT_OK_OR_RETURN( + kmemsysInitMIGGPUInstanceMemConfigForSwizzId(pGpu, pKernelMemorySystem, swizzId, startingVmmuSegment, memSizeInVmmuSegment)); + + return NV_OK; +} + +NvBool +kmemsysIsPagePLCable_GA100 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU64 physAddr, + NvU64 pageSize +) +{ + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + NvU64 topAddr = ((pKernelMemorySystem->memBoundaryCfgTable.memBoundaryCfgB + pKernelMemorySystem->memBoundaryCfgTable.memBoundaryCfgA) * + pMemorySystemConfig->ltcCount * pMemorySystemConfig->ltsPerLtcCount) >> 4; + NvU64 bottomAddr = (pKernelMemorySystem->memBoundaryCfgTable.memBoundaryCfgA * pMemorySystemConfig->ltcCount * + pMemorySystemConfig->ltsPerLtcCount) >> 4; + NvU64 secureTopAddr = pKernelMemorySystem->memBoundaryCfgTable.memBoundaryCfgC; + NvU64 revBottomAddr = pKernelMemorySystem->memBoundaryCfgTable.memBoundaryCfgA << 5; + NvBool bPageSize2M = (pageSize == (2 << 20)); + NvU64 partitionedMemorySize = ((topAddr - bottomAddr) << 16); + NvU32 blackBlockOffset = 2048 - (NvU32)((revBottomAddr >> 1) % 2048); + NvU32 swizzId = 0; + + if (pMemorySystemConfig->ltsPerLtcCount * pMemorySystemConfig->ltcCount != 80) + return NV_TRUE; + + if (!((pMemorySystemConfig->ltsPerLtcCount != 4) || ((pMemorySystemConfig->ltcCount % 4) != 0) || (pMemorySystemConfig->ltcCount < 4)) && + !(topAddr >= secureTopAddr) && + !(physAddr >= (secureTopAddr << 16)) && + !((physAddr < (bottomAddr << 16)) || (physAddr >= (topAddr << 16)))) + { + NvU32 partition_id = (NvU32)((physAddr - (bottomAddr << 16)) / (partitionedMemorySize / 8)); + NV_ASSERT_OR_RETURN(partition_id < 8, NV_TRUE); + swizzId = pKernelMemorySystem->migMemoryPartitionTable.data[partition_id]; + } + + + switch (swizzId) + { + case 0: + if (!bPageSize2M) + { + return ((physAddr % (160 * 256 * 1024)) >= (3 * 256 * 1024)); + } + else + { + NvBool noPLC = NV_FALSE; + + for (NvU64 addr = physAddr; (addr < physAddr + (2 * 1024 * 1024)); addr += (256 * 1024)) + { + noPLC |= ((addr % (160 * 256 * 1024)) < (3 * 256 * 1024)); + } + + return !noPLC; + } + case 1: + case 2: + { + NvU64 rebasePA = physAddr - (bottomAddr << 16) - ((partitionedMemorySize / 2) * (swizzId - 1)) + + 160 * 64 * 1024 - (blackBlockOffset * 10 / 128) * (128 * 1024); + + if (!bPageSize2M) + { + return ((rebasePA % (160 * 128 * 1024)) >= (4 * 128 * 1024)); + } + else + { + NvBool noPLC = NV_FALSE; + + for (NvU64 addr = rebasePA; (addr < rebasePA + (2 * 1024 * 1024)); addr += (128 * 1024)) + { + noPLC |= ((addr % (160 * 128 * 1024)) < (4 * 128 * 1024)); + } + + return !noPLC; + } + } + case 3: + case 4: + case 5: + case 6: + { + NvU64 rebasePA = physAddr - (bottomAddr << 16) - ((partitionedMemorySize / 4) * (swizzId - 3)) + + 160 * 64 * 1024 - (blackBlockOffset * 10 / 128) * (64 * 1024); + + if (!bPageSize2M) + { + return ((rebasePA % (160 * 64 * 1024)) >= (4 * 64 * 1024)); + } + else + { + NvBool noPLC = NV_FALSE; + + for (NvU64 addr = rebasePA; (addr < rebasePA + (2 * 1024 * 1024)); addr += (64 * 1024)) + { + noPLC |= ((addr % (160 * 64 * 1024)) < (4 * 64 * 1024)); + } + + return !noPLC; + } + } + case 7: + case 8: + case 9: + case 10: + case 11: + case 12: + case 13: + case 14: + { + NvU64 rebasePA = physAddr - (bottomAddr << 16) - ((partitionedMemorySize / 8) * (swizzId - 7)) + + 80 * 64 * 1024 - (blackBlockOffset * 10 / 256) * (64 * 1024); + + if (!bPageSize2M) + { + return ((rebasePA % (80 * 64 * 1024)) >= (3 * 64 * 1024)); + } + else + { + NvBool noPLC = NV_FALSE; + + for (NvU64 addr = rebasePA; (addr < rebasePA + (2 * 1024 * 1024)); addr += (64 * 1024)) + { + noPLC |= ((addr % (80 * 64 * 1024)) < (3 * 64 * 1024)); + } + + return !noPLC; + } + } + default: + return NV_TRUE; + } +} diff --git a/src/nvidia/src/kernel/gpu/mem_sys/arch/ampere/kern_mem_sys_ga102.c b/src/nvidia/src/kernel/gpu/mem_sys/arch/ampere/kern_mem_sys_ga102.c new file mode 100644 index 000000000..bf9ee2e9d --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_sys/arch/ampere/kern_mem_sys_ga102.c @@ -0,0 +1,142 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "kernel/gpu/mem_sys/kern_mem_sys.h" + +#include "published/ampere/ga102/dev_gc6_island.h" +#include "published/ampere/ga102/dev_gc6_island_addendum.h" + +/*! + * @brief get usable fb size in MB from NV_USABLE_FB_SIZE_IN_MB + * + * @param[out] pFbSize FB size computed from NV_USABLE_FB_SIZE_IN_MB + */ +NV_STATUS +kmemsysReadUsableFbSize_GA102 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU64 *pFbSize +) +{ + // + // VBIOS Devinit calculates the usable FB size calculated and published in + // NV_USABLE_FB_SIZE_IN_MB. + // + NvU32 regValue = GPU_REG_RD32(pGpu, NV_USABLE_FB_SIZE_IN_MB); + + *pFbSize = ((NvU64) DRF_VAL(_USABLE, _FB_SIZE_IN_MB, _VALUE, regValue) << 20); + return NV_OK; +} + +NvBool +kmemsysIsPagePLCable_GA102 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU64 physAddr, + NvU64 pageSize +) +{ + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + NvBool bPageSize2M = (pageSize == (2 << 20)); + + if (pMemorySystemConfig->ltsPerLtcCount * pMemorySystemConfig->ltcCount == 48) + { + if (!bPageSize2M) + { + return ((physAddr % (384 * 64 * 1024)) >= (6 * 64 * 1024)); + } + else + { + NvBool noPLC = NV_FALSE; + + for (NvU64 addr = physAddr; (addr < physAddr + (2 * 1024 * 1024)); addr += (64 * 1024)) + { + noPLC |= ((addr % (384 * 64 * 1024)) < (6 * 64 * 1024)); + } + + return !noPLC; + } + } + else if (pMemorySystemConfig->ltsPerLtcCount * pMemorySystemConfig->ltcCount == 40) + { + if (!bPageSize2M) + { + return ((physAddr % (320 * 64 * 1024)) >= (5 * 64 * 1024)); + } + else + { + NvBool noPLC = NV_FALSE; + + for (NvU64 addr = physAddr; (addr < physAddr + (2 * 1024 * 1024)); addr += (64 * 1024)) + { + noPLC |= ((addr % (320 * 64 * 1024)) < (5 * 64 * 1024)); + } + + return !noPLC; + } + } + else if (pMemorySystemConfig->ltsPerLtcCount == 4 && pMemorySystemConfig->ltcCount == 8) + { + if (!bPageSize2M) + { + return ((physAddr % (256 * 64 * 1024)) >= (4 * 64 * 1024)); + } + else + { + NvBool noPLC = NV_FALSE; + + for (NvU64 addr = physAddr; (addr < physAddr + (2 * 1024 * 1024)); addr += (64 * 1024)) + { + noPLC |= ((addr % (256 * 64 * 1024)) < (4 * 64 * 1024)); + } + + return !noPLC; + } + } + else if (pMemorySystemConfig->ltsPerLtcCount == 3 && pMemorySystemConfig->ltcCount == 8) + { + if (!bPageSize2M) + { + return ((physAddr % (192 * 64 * 1024)) >= (3 * 64 * 1024)); + } + else + { + NvBool noPLC = NV_FALSE; + + for (NvU64 addr = physAddr; (addr < physAddr + (2 * 1024 * 1024)); addr += (64 * 1024)) + { + noPLC |= ((addr % (192 * 64 * 1024)) < (3 * 64 * 1024)); + } + + return !noPLC; + } + } + else + { + return NV_TRUE; + } +} diff --git a/src/nvidia/src/kernel/gpu/mem_sys/arch/maxwell/kern_mem_sys_gm107.c b/src/nvidia/src/kernel/gpu/mem_sys/arch/maxwell/kern_mem_sys_gm107.c new file mode 100644 index 000000000..7e79748fa --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_sys/arch/maxwell/kern_mem_sys_gm107.c @@ -0,0 +1,420 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "os/os.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/mem_desc.h" + +#include "gpu/device/device.h" + +#include "published/maxwell/gm107/dev_fb.h" +#include "published/maxwell/gm107/dev_bus.h" +#include "published/maxwell/gm107/dev_flush.h" + +// Based on busFlushSingle_GM107 + +NV_STATUS +kmemsysDoCacheOp_GM107 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 reg, + NvU32 regValue, + NvU32 pollMask, + PRMTIMEOUT pTimeout +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 cnt = 0; + NV_STATUS timeoutStatus = NV_OK; + NvU32 regValueRead = 0; + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + // + // When the GPU is lost we cannot expect to successfully do cache + // maintenance (see Bug 1557278). + // + return rmStatus; + } + + // We don't want this breakpoint when a debug build is being used by special test + // equipment (e.g. ATE) that expects to hit this situation. Bug 672073 +#ifdef DEBUG + if (!(API_GPU_IN_RESET_SANITY_CHECK(pGpu))) + { + NV_ASSERT(GPU_REG_RD32(pGpu, NV_UFLUSH_FB_FLUSH) == 0); + NV_ASSERT(kmemsysReadL2SysmemInvalidateReg_HAL(pGpu, pKernelMemorySystem) == 0); + NV_ASSERT(kmemsysReadL2PeermemInvalidateReg_HAL(pGpu, pKernelMemorySystem) == 0); + NV_ASSERT(GPU_REG_RD32(pGpu, NV_UFLUSH_L2_CLEAN_COMPTAGS) == 0); + NV_ASSERT(GPU_REG_RD32(pGpu, NV_UFLUSH_L2_FLUSH_DIRTY) == 0); + } +#endif // DEBUG + + switch (reg) + { + case NV_UFLUSH_L2_PEERMEM_INVALIDATE: + kmemsysWriteL2PeermemInvalidateReg_HAL(pGpu, pKernelMemorySystem, regValue); + break; + case NV_UFLUSH_L2_SYSMEM_INVALIDATE: + kmemsysWriteL2SysmemInvalidateReg_HAL(pGpu, pKernelMemorySystem, regValue); + break; + default: + GPU_REG_WR32(pGpu, reg, regValue); + } + + while(1) + { + switch (reg) + { + case NV_UFLUSH_L2_PEERMEM_INVALIDATE: + regValueRead = kmemsysReadL2PeermemInvalidateReg_HAL(pGpu, pKernelMemorySystem); + break; + case NV_UFLUSH_L2_SYSMEM_INVALIDATE: + regValueRead = kmemsysReadL2SysmemInvalidateReg_HAL(pGpu, pKernelMemorySystem); + break; + default: + regValueRead = GPU_REG_RD32(pGpu, reg); + } + + if (regValueRead & pollMask) + { + if (timeoutStatus == NV_ERR_TIMEOUT) + { + // + // This should not timeout, except for a HW bug. Famous last words. + // On !DEBUG we just keep trucking, it's the best we can do. + // + NV_PRINTF(LEVEL_ERROR, + "- timeout error waiting for reg 0x%x update cnt=%d\n", + reg, cnt); + rmStatus = NV_ERR_TIMEOUT; + DBG_BREAKPOINT(); + break; + } + else if ( API_GPU_IN_RESET_SANITY_CHECK(pGpu) || + !API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + // + // The GPU is in full chip reset, or has fallen off the bus + // Just return + // + return NV_OK; + } + timeoutStatus = gpuCheckTimeout(pGpu, pTimeout); + osSpinLoop(); + cnt++; + } + else + break; + } + +#ifdef DEBUG + if (cnt > 1) + { + NvU32 intr0 = 0; + intr0 = GPU_REG_RD32(pGpu, NV_PBUS_INTR_0); + NV_ASSERT(DRF_VAL(_PBUS, _INTR_0, _FB_ACK_TIMEOUT, intr0) != NV_PBUS_INTR_0_FB_ACK_TIMEOUT_PENDING); + } +#endif // DEBUG + + return rmStatus; +} + +void +kmemsysWriteL2SysmemInvalidateReg_GM107 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 value +) +{ + GPU_REG_WR32(pGpu, NV_UFLUSH_L2_SYSMEM_INVALIDATE, value); +} + +NvU32 +kmemsysReadL2SysmemInvalidateReg_GM107 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + return GPU_REG_RD32(pGpu, NV_UFLUSH_L2_SYSMEM_INVALIDATE); +} + +void +kmemsysWriteL2PeermemInvalidateReg_GM107 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 value +) +{ + GPU_REG_WR32(pGpu, NV_UFLUSH_L2_PEERMEM_INVALIDATE, value); +} + +NvU32 +kmemsysReadL2PeermemInvalidateReg_GM107 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + return GPU_REG_RD32(pGpu, NV_UFLUSH_L2_PEERMEM_INVALIDATE); +} + +/*! + * @brief Initialize the sysmem flush buffer + * + * Setting up the sysmem flush buffer needs to be done very early in some cases + * as it's required for the GPU to perform a system flush. One such case is + * resetting GPU FALCONs and in particular resetting the PMU as part of VBIOS + * init. + * + * @returns NV_OK if all is okay. Otherwise an error-specific value. + */ +NV_STATUS +kmemsysInitFlushSysmemBuffer_GM107 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + NV_STATUS status; + NvBool bTryAgain = NV_FALSE; + + // + // In case of suspend/resume, the buffer might be already allocated, but + // the HW still needs to be programmed below. + // + if (pKernelMemorySystem->pSysmemFlushBufferMemDesc == NULL) + { + const NvU32 flushBufferDmaAddressSize = 40; + RmPhysAddr dmaWindowStartAddr = gpuGetDmaStartAddress(pGpu); + RmPhysAddr dmaWindowEndAddr = dmaWindowStartAddr + + (1ULL << flushBufferDmaAddressSize) - 1; + + // + // Sysmem flush buffer + // The sysmembar flush does a zero byte read of sysmem if there was a + // sysmem write since the last flush. The actual memory does have + // to be valid and allocated at all times because an actual read may + // be issued (observed on e.g. GF108). + // + + // + // First, try to allocate a 32-bit addressable DMA memory without + // lowering the DMA address size. + // This is currently implemented for Linux where the MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE + // will allocate 32-bit memory by using GFP_DMA32 flag. + // + status = memdescCreate(&pKernelMemorySystem->pSysmemFlushBufferMemDesc, + pGpu, RM_PAGE_SIZE, + (1 << NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT), + NV_TRUE, + ADDR_SYSMEM, + NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_ALLOC_32BIT_ADDRESSABLE); + if (status != NV_OK) + return status; + + status = memdescAlloc(pKernelMemorySystem->pSysmemFlushBufferMemDesc); + + // + // Check if the memory allocation failed (probably due to no available + // memory under 4GB). + // + if (status != NV_OK) + bTryAgain = NV_TRUE; + else + { + // + // Check if the DMA address returned is not within 40-bit boundary + // (possible on non-Linux platforms). + // + pKernelMemorySystem->sysmemFlushBuffer = memdescGetPhysAddr(pKernelMemorySystem->pSysmemFlushBufferMemDesc, AT_GPU, 0); + if (pKernelMemorySystem->sysmemFlushBuffer < dmaWindowStartAddr || + pKernelMemorySystem->sysmemFlushBuffer + RM_PAGE_SIZE - 1 > dmaWindowEndAddr) + bTryAgain = NV_TRUE; + } + + // + // If above checks are satisfied, do the actual sysmem flush buffer setup. + // If not, try again with the WAR to temporarily lower the DMA address size. + // + if (!bTryAgain) + { + GPU_FLD_WR_DRF_NUM(pGpu, _PFB, _NISO_FLUSH_SYSMEM_ADDR, _ADR_39_08, + NvU64_LO32(pKernelMemorySystem->sysmemFlushBuffer >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT)); + + return NV_OK; + } + + memdescFree(pKernelMemorySystem->pSysmemFlushBufferMemDesc); + memdescDestroy(pKernelMemorySystem->pSysmemFlushBufferMemDesc); + + status = memdescCreate(&pKernelMemorySystem->pSysmemFlushBufferMemDesc, + pGpu, RM_PAGE_SIZE, + (1 << NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT), + NV_TRUE, + ADDR_SYSMEM, + NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_NONE); + if (status != NV_OK) + return status; + // + // Temporarily lower the DMA address size + // + // This is admittedly hacky and only safe during GPU initialization, + // before other drivers like UVM (at least on Linux), can start + // requesting its own DMA mappings for the same device. + // + // If DMA address size modification ever becomes needed in more places, + // making it a part of the memdesc APIs would be cleaner. + // + if (gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM) > flushBufferDmaAddressSize) + { + memdescOverridePhysicalAddressWidthWindowsWAR(pGpu, pKernelMemorySystem->pSysmemFlushBufferMemDesc, flushBufferDmaAddressSize); + osDmaSetAddressSize(pGpu->pOsGpuInfo, flushBufferDmaAddressSize); + } + + status = memdescAlloc(pKernelMemorySystem->pSysmemFlushBufferMemDesc); + + // Restore it back to what HW supports + if (gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM) > flushBufferDmaAddressSize) + { + osDmaSetAddressSize(pGpu->pOsGpuInfo, gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM)); + } + + if (status == NV_OK) + { + pKernelMemorySystem->sysmemFlushBuffer = memdescGetPhysAddr(pKernelMemorySystem->pSysmemFlushBufferMemDesc, AT_GPU, 0); + } + else if (status == NV_ERR_INVALID_ADDRESS) + { + if (NVCPU_IS_PPC64LE && dmaWindowStartAddr != 0) + { + pKernelMemorySystem->sysmemFlushBuffer = dmaWindowStartAddr; + } + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Could not allocate sysmem flush buffer: %x\n", status); + DBG_BREAKPOINT(); + return status; + } + + // Manually redo the memdesc addressability check for the reduced address size + if (pKernelMemorySystem->sysmemFlushBuffer < dmaWindowStartAddr || + pKernelMemorySystem->sysmemFlushBuffer + RM_PAGE_SIZE - 1 > dmaWindowEndAddr) + { + NvBool bMakeItFatal = NV_TRUE; + NV_PRINTF(LEVEL_ERROR, + "GPU 0x%x: Allocated sysmem flush buffer not addressable 0x%llx\n", + pGpu->gpuId, pKernelMemorySystem->sysmemFlushBuffer); + + if (IS_FMODEL(pGpu) || IS_RTLSIM(pGpu) || IS_EMULATION(pGpu)) + { + bMakeItFatal = NV_FALSE; + } + + // + // MODS on DGX-2 is hitting this. Make it non-fatal for now with + // the proper WAR implementation tracked in bug 2403630. + // + if (RMCFG_FEATURE_PLATFORM_MODS) + { + bMakeItFatal = NV_FALSE; + } + + // + // Windows on greater than 2 TB systems is hitting this. Making it + // non-fatal till a proper WAR is implemented. Bug 2423129 had + // this issue. + // + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM) + { + bMakeItFatal = NV_FALSE; + } + + if (bMakeItFatal) + { + return NV_ERR_NO_MEMORY; + } + } + } + + NV_ASSERT(pKernelMemorySystem->sysmemFlushBuffer != 0); + GPU_FLD_WR_DRF_NUM(pGpu, _PFB, _NISO_FLUSH_SYSMEM_ADDR, _ADR_39_08, + NvU64_LO32(pKernelMemorySystem->sysmemFlushBuffer >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT)); + + return NV_OK; +} + +/*! + * @brief Write the sysmemFlushBuffer val into the NV_PFB_NISO_FLUSH_SYSMEM_ADDR register + * + * @param[in] pGpu OBJGPU pointer + * @param[in[ pKernelMemorySystem KernelMemorySystem pointer + * + * @returns void + */ +void +kmemsysProgramSysmemFlushBuffer_GM107 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + // + // Q: Why are we writing this twice, both in fbInit and fbLoad? + // A: fbLoad is preceded by busLoad which can do sysmem writes. + // Writing in fbInit solves the load order guessing problem. + // + // Q: Why not just in fbInit? + // A: Because on power management resume, this value should be restored too. + // + GPU_FLD_WR_DRF_NUM(pGpu, _PFB, _NISO_FLUSH_SYSMEM_ADDR, _ADR_39_08, + NvU64_LO32(pKernelMemorySystem->sysmemFlushBuffer >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT)); +} + +/*! + * @brief Validate the sysmemFlushBuffer val and assert + * + * @param[in] pGpu OBJGPU pointer + * @param[in[ pKernelMemorySystem KernelMemorySystem pointer + * + * @returns void + */ +void +kmemsysAssertSysmemFlushBufferValid_GM107 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + NV_ASSERT(GPU_REG_RD_DRF(pGpu, _PFB, _NISO_FLUSH_SYSMEM_ADDR, _ADR_39_08) != 0); +} diff --git a/src/nvidia/src/kernel/gpu/mem_sys/arch/maxwell/kern_mem_sys_gm200.c b/src/nvidia/src/kernel/gpu/mem_sys/arch/maxwell/kern_mem_sys_gm200.c new file mode 100644 index 000000000..ccaed118f --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_sys/arch/maxwell/kern_mem_sys_gm200.c @@ -0,0 +1,184 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/bus/kern_bus.h" + +#include "published/maxwell/gm200/dev_flush.h" + +/** + * @brief L2 Cache management OPs + * + * GK20A/T124 have a writeback L2 cache, so the cache ops are slightly + * different than those of Fermi/Kepler. Specifically, we can write back dirty + * lines to system memory. + * + * @param pMemDesc + * @param targetMem + * @param cacheOp + */ +NV_STATUS +kmemsysCacheOp_GM200 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + PMEMORY_DESCRIPTOR pMemDesc, + FB_CACHE_MEMTYPE targetMem, + FB_CACHE_OP cacheOp +) +{ + NV_STATUS status = NV_OK; + RMTIMEOUT timeout; + NvU32 reg; + NvU32 regValue; + NvU32 pollMask; + + if ((targetMem == FB_CACHE_MEM_UNDEFINED) && pMemDesc) + { + targetMem = (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) ? + FB_CACHE_VIDEO_MEMORY : FB_CACHE_SYSTEM_MEMORY; + } + + if ((cacheOp == FB_CACHE_OP_UNDEFINED) || (targetMem == FB_CACHE_MEM_UNDEFINED)) + { + NV_PRINTF(LEVEL_ERROR, "called with null %s\n", + cacheOp ? "cache operation" : "memory target"); + DBG_BREAKPOINT(); + return status; // return NV_OK + } + + // For GK20A, an explicit sysmembar flush is needed before L2 cache flush operation. + // Refer GK20A LTC IAS (section 5.5) + kbusSendSysmembar(pGpu, GPU_GET_KERNEL_BUS(pGpu)); + + // Wait for the flush to flow through + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + + switch (targetMem) + { + case FB_CACHE_SYSTEM_MEMORY: + if (cacheOp == FB_CACHE_INVALIDATE) + { + NV_PRINTF(LEVEL_INFO, + "Invalidate not supported, promoting to an evict (writeback + " + "invalidate clean lines).\n"); + cacheOp = FB_CACHE_EVICT; + } + + if (cacheOp == FB_CACHE_WRITEBACK || cacheOp == FB_CACHE_EVICT) + { + reg = NV_UFLUSH_L2_FLUSH_DIRTY; + regValue = FLD_SET_DRF(_UFLUSH, _L2_FLUSH_DIRTY, _PENDING, _BUSY, 0); + pollMask = FLD_SET_DRF(_UFLUSH, _L2_FLUSH_DIRTY, _PENDING, _BUSY, 0); + pollMask = FLD_SET_DRF(_UFLUSH, _L2_FLUSH_DIRTY, _OUTSTANDING, _TRUE, pollMask); + status = kmemsysDoCacheOp_HAL(pGpu, pKernelMemorySystem, reg, regValue, pollMask, &timeout); + } + + if (cacheOp == FB_CACHE_EVICT) + { + // + // Ideally we should use NV_UFLUSH_L2_INVALIDATE_CLEAN_LINES. + // But NV_UFLUSH_L2_INVALIDATE_CLEAN_LINES is not defined on desktop GPUs. + // NV_UFLUSH_L2_SYSMEM_INVALIDATE is same as NV_UFLUSH_L2_INVALIDATE_CLEAN_LINES, and is defined in all chips. + // + reg = NV_UFLUSH_L2_SYSMEM_INVALIDATE; + regValue = FLD_SET_DRF(_UFLUSH, _L2_SYSMEM_INVALIDATE, _PENDING, _BUSY, 0); + pollMask = FLD_SET_DRF(_UFLUSH, _L2_SYSMEM_INVALIDATE, _PENDING, _BUSY, 0); + pollMask = FLD_SET_DRF(_UFLUSH, _L2_SYSMEM_INVALIDATE, _OUTSTANDING, _TRUE, pollMask); + status = kmemsysDoCacheOp_HAL(pGpu, pKernelMemorySystem, reg, regValue, pollMask, &timeout); + } + break; + case FB_CACHE_VIDEO_MEMORY: + if (cacheOp == FB_CACHE_EVICT) + { + status = kmemsysSendL2InvalidateEvict(pGpu, pKernelMemorySystem, + NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_ALL | + NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_CLEAN); + } + else if (cacheOp == FB_CACHE_INVALIDATE) + { + status = kmemsysSendL2InvalidateEvict(pGpu, pKernelMemorySystem, + NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_FLAGS_ALL); + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + break; + case FB_CACHE_PEER_MEMORY: + // + // for GF100 - for sysmem cache only invalidate operation is supported + // evict = writeback+invalidate is reduced to invalidate for GF100 + // + if((cacheOp != FB_CACHE_INVALIDATE) && (cacheOp != FB_CACHE_EVICT)) + return NV_OK; + + reg = NV_UFLUSH_L2_PEERMEM_INVALIDATE; + regValue = FLD_SET_DRF(_UFLUSH, _L2_PEERMEM_INVALIDATE, _PENDING, _BUSY, 0); + pollMask = FLD_SET_DRF(_UFLUSH, _L2_PEERMEM_INVALIDATE, _PENDING, _BUSY, 0); + pollMask = FLD_SET_DRF(_UFLUSH, _L2_PEERMEM_INVALIDATE, _OUTSTANDING, _TRUE, pollMask); + status = kmemsysDoCacheOp_HAL(pGpu, pKernelMemorySystem, reg, regValue, pollMask, &timeout); + break; + case FB_CACHE_COMPTAG_MEMORY: + if(cacheOp != FB_CACHE_WRITEBACK && cacheOp != FB_CACHE_EVICT) + return NV_OK; + + // + // Beware of HW bug 545242. Graphics should be idle and flushed here + // or comp tag cache could be corrupted. When mods uses this call + // during verif, this should already be the case. + // + reg = NV_UFLUSH_L2_CLEAN_COMPTAGS; + regValue = FLD_SET_DRF(_UFLUSH, _L2_CLEAN_COMPTAGS, _PENDING, _BUSY, 0); + pollMask = FLD_SET_DRF(_UFLUSH, _L2_CLEAN_COMPTAGS, _PENDING, _BUSY, 0); + pollMask = FLD_SET_DRF(_UFLUSH, _L2_CLEAN_COMPTAGS, _OUTSTANDING, _TRUE, pollMask); + status = kmemsysDoCacheOp_HAL(pGpu, pKernelMemorySystem, reg, regValue, pollMask, &timeout); + break; + case FB_CACHE_DIRTY: + if(cacheOp != FB_CACHE_WRITEBACK && cacheOp != FB_CACHE_EVICT) + return NV_OK; + + reg = NV_UFLUSH_L2_FLUSH_DIRTY; + regValue = FLD_SET_DRF(_UFLUSH, _L2_FLUSH_DIRTY, _PENDING, _BUSY, 0); + pollMask = FLD_SET_DRF(_UFLUSH, _L2_FLUSH_DIRTY, _PENDING, _BUSY, 0); + pollMask = FLD_SET_DRF(_UFLUSH, _L2_FLUSH_DIRTY, _OUTSTANDING, _TRUE, pollMask); + status = kmemsysDoCacheOp_HAL(pGpu, pKernelMemorySystem, reg, regValue, pollMask, &timeout); + break; + case FB_CACHE_DIRTY_ALL: + if(cacheOp != FB_CACHE_EVICT) + return NV_OK; + + status = kmemsysSendFlushL2AllRamsAndCaches(pGpu, pKernelMemorySystem); + break; + default: + // return OK for other memory targets + status = NV_OK; + } + + return status; +} + diff --git a/src/nvidia/src/kernel/gpu/mem_sys/arch/pascal/kern_mem_sys_gp102.c b/src/nvidia/src/kernel/gpu/mem_sys/arch/pascal/kern_mem_sys_gp102.c new file mode 100644 index 000000000..2dc639f20 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_sys/arch/pascal/kern_mem_sys_gp102.c @@ -0,0 +1,59 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_sys/kern_mem_sys.h" + +#include "published/pascal/gp102/dev_fb.h" + +/*! + * @brief get usable fb size in MB from NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE + * + * @param[out] pFbSize FB size computed from memory range + */ +NV_STATUS +kmemsysReadUsableFbSize_GP102 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU64 *pFbSize +) +{ + // + // VBIOS Devinit calculates lower range values in + // NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE. + // + NvU32 regValue = GPU_REG_RD32(pGpu, NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE); + NvU32 lowerRangeMag = DRF_VAL(_PFB, _PRI_MMU_LOCAL_MEMORY_RANGE, _LOWER_MAG, regValue); + NvU32 lowerRangeScale = DRF_VAL(_PFB, _PRI_MMU_LOCAL_MEMORY_RANGE, _LOWER_SCALE, regValue); + NvU64 fbSize = ((NvU64) lowerRangeMag << (lowerRangeScale + 20)); + + if (FLD_TEST_DRF(_PFB, _PRI_MMU_LOCAL_MEMORY_RANGE, _ECC_MODE, _ENABLED, regValue)) + { + fbSize = fbSize / 16 * 15; + } + + *pFbSize = fbSize; + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mem_sys/arch/turing/kern_mem_sys_tu102.c b/src/nvidia/src/kernel/gpu/mem_sys/arch/turing/kern_mem_sys_tu102.c new file mode 100644 index 000000000..ad3938bfa --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_sys/arch/turing/kern_mem_sys_tu102.c @@ -0,0 +1,70 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_sys/kern_mem_sys.h" + +#include "published/turing/tu102/dev_vm.h" + +void +kmemsysWriteL2SysmemInvalidateReg_TU102 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 value +) +{ + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_L2_SYSMEM_INVALIDATE, value); +} + +NvU32 +kmemsysReadL2SysmemInvalidateReg_TU102 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + return GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_L2_SYSMEM_INVALIDATE); +} + +void +kmemsysWriteL2PeermemInvalidateReg_TU102 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 value +) +{ + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_L2_PEERMEM_INVALIDATE, value); +} + +NvU32 +kmemsysReadL2PeermemInvalidateReg_TU102 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + return GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_L2_PEERMEM_INVALIDATE); +} diff --git a/src/nvidia/src/kernel/gpu/mem_sys/arch/volta/kern_mem_sys_gv100.c b/src/nvidia/src/kernel/gpu/mem_sys/arch/volta/kern_mem_sys_gv100.c new file mode 100644 index 000000000..43b7d3b51 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_sys/arch/volta/kern_mem_sys_gv100.c @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "os/os.h" +#include "gpu/mem_sys/kern_mem_sys.h" + +/*! + * @brief Get physical address of the FB memory on systems where GPU memory + * is onlined to the OS + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelMemorySystem pointer to the kernel side KernelMemorySystem instance. + * @param[in] physAddr Physical Address of FB memory + * @param[in] numaNodeId NUMA node id where FB memory is added to the + * kernel + * + * @return NV_OK on success + */ +NV_STATUS +kmemsysGetFbNumaInfo_GV100 +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU64 *physAddr, + NvS32 *numaNodeId +) +{ + NV_STATUS status; + + status = osGetFbNumaInfo(pGpu, physAddr, numaNodeId); + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "NUMA FB Physical address: 0x%llx Node ID: 0x%x\n", + *physAddr, *numaNodeId); + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/mem_sys/kern_mem_sys.c b/src/nvidia/src/kernel/gpu/mem_sys/kern_mem_sys.c new file mode 100644 index 000000000..f46176c0f --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_sys/kern_mem_sys.c @@ -0,0 +1,727 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "vgpu/vgpu_events.h" +#include "objrpc.h" +#include "gpu/bif/kernel_bif.h" +#include "gpu/bus/kern_bus.h" +#include "os/os.h" +#include "nvRmReg.h" + +NV_STATUS +kmemsysConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + ENGDESCRIPTOR engDesc +) +{ + pKernelMemorySystem->memPartitionNumaInfo = NULL; + + if (IS_GSP_CLIENT(pGpu)) + { + // Setting up the sysmem flush buffer needs to be done very early in some cases + // as it's required for the GPU to perform a system flush. One such case is + // resetting GPU FALCONs and in particular resetting the PMU as part of VBIOS + // init. + NV_ASSERT_OK_OR_RETURN(kmemsysInitFlushSysmemBuffer_HAL(pGpu, pKernelMemorySystem)); + } + + return NV_OK; +} + +static void +kmemsysInitRegistryOverrides +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + NvU32 data32; + + // + // Bug 1032432. Check regkey for FB pull + // + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_L2_CLEAN_FB_PULL, &data32) == NV_OK) + { + if (data32 == NV_REG_STR_RM_L2_CLEAN_FB_PULL_DISABLED) + pKernelMemorySystem->bL2CleanFbPull = NV_FALSE; + } +} + +/* + * Initialize the Kernel Memory System state. + * + * @param[in] pGpu pointer to the GPU instance. + * @param[in] pKernelMemorySystem pointer to the kernel side KernelMemorySystem instance. + * + * @return NV_OK upon success. + */ +NV_STATUS kmemsysStateInitLocked_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + MEMORY_SYSTEM_STATIC_CONFIG *pStaticConfig; + NV_STATUS status = NV_OK; + + NV_ASSERT_OK_OR_GOTO(status, kmemsysEnsureSysmemFlushBufferInitialized(pGpu, pKernelMemorySystem), fail); + + pStaticConfig = portMemAllocNonPaged(sizeof(*pStaticConfig)); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pStaticConfig != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + portMemSet(pStaticConfig, 0, sizeof(pStaticConfig)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmemsysInitStaticConfig_HAL(pGpu, pKernelMemorySystem, pStaticConfig), + fail); + + pKernelMemorySystem->pStaticConfig = pStaticConfig; + + kmemsysInitRegistryOverrides(pGpu, pKernelMemorySystem); + +fail: + if (status != NV_OK) + { + portMemFree((void *)pKernelMemorySystem->pStaticConfig); + } + + return status; +} + +NV_STATUS +kmemsysStatePreLoad_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 flags +) +{ + // + // Program the sysmem flush buffer address and assert that the register contents are valid. + // The HAL wiring is such that a given RM build will only do one or the other (e.g., RM offloaded + // to ucode won't program the register itself but will assert that its contents are valid). + // + kmemsysProgramSysmemFlushBuffer_HAL(pGpu, pKernelMemorySystem); + kmemsysAssertSysmemFlushBufferValid_HAL(pGpu, pKernelMemorySystem); + + return NV_OK; +} + +/* + * Release the state accumulated in StateInit. + * @param[in] pGpu pointer to the GPU instance. + * @param[in] pKernelMemorySystem pointer to the kernel side KernelMemorySystem instance. + */ +void kmemsysStateDestroy_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + + portMemFree((void *)pKernelMemorySystem->pStaticConfig); +} + +/*! + * Returns MemorySystem settings that are static after GPU state init/load is + * finished. + */ +const MEMORY_SYSTEM_STATIC_CONFIG * +kmemsysGetStaticConfig_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + // check if state Init has not completed. + NV_ASSERT_OR_ELSE(pKernelMemorySystem != NULL, return NULL); + + return pKernelMemorySystem->pStaticConfig; +} + +void +kmemsysDestruct_IMPL +( + KernelMemorySystem *pKernelMemorySystem +) +{ + pKernelMemorySystem->sysmemFlushBuffer = 0; + memdescFree(pKernelMemorySystem->pSysmemFlushBufferMemDesc); + memdescDestroy(pKernelMemorySystem->pSysmemFlushBufferMemDesc); + pKernelMemorySystem->pSysmemFlushBufferMemDesc = NULL; + + portMemSet(pKernelMemorySystem->gpuInstanceMemConfig, 0, sizeof(pKernelMemorySystem->gpuInstanceMemConfig)); +} + +NV_STATUS +kmemsysAllocComprResources_KERNEL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + FB_ALLOC_INFO *pFbAllocInfo, + NvU64 origSize, + NvU32 kindChosen, + NvU32 *pRetAttr, + NvU32 retAttr2 +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + NvU32 gfid; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + NV_ASSERT_OR_RETURN(pMemorySystemConfig->bOneToOneComptagLineAllocation || pMemorySystemConfig->bUseRawModeComptaglineAllocation, + NV_ERR_INVALID_STATE); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, + !FLD_TEST_DRF(OS32, _ALLOC, _COMPTAG_OFFSET_USAGE, _FIXED, pFbAllocInfo->ctagOffset), + NV_ERR_INVALID_ARGUMENT); + + // Failing the allocation if scrub on free is disabled + if (!memmgrIsScrubOnFreeEnabled(pMemoryManager)) + { + if (!(IS_SIMULATION(pGpu) || IsDFPGA(pGpu) || (IS_EMULATION(pGpu) && RMCFG_FEATURE_PLATFORM_MODS) + ||(RMCFG_FEATURE_PLATFORM_WINDOWS && !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TCC_MODE)) + ||(IsSLIEnabled(pGpu) && !(RMCFG_FEATURE_PLATFORM_WINDOWS && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TCC_MODE)))) + ) + { + NV_PRINTF(LEVEL_ERROR, "Compressible surfaces cannot be allocated on a system, " + "where scrub on free is disabled\n"); + return NV_ERR_INVALID_STATE; + } + } + else if (pMemorySystemConfig->bOneToOneComptagLineAllocation) + { + NV_ASSERT_OR_RETURN(memmgrUseVasForCeMemoryOps(pMemoryManager), NV_ERR_INVALID_STATE); + } + + FB_SET_HWRESID_CTAGID_FERMI(pFbAllocInfo->hwResId, FB_HWRESID_CTAGID_VAL_FERMI(-1)); + *pRetAttr = FLD_SET_DRF(OS32, _ATTR, _COMPR, _REQUIRED, *pRetAttr); + return NV_OK; +} + +/*! + * @brief Initializes static config data from the Physical side. + * @param[in] pGpu pointer to the GPU instance. + * @param[in] pKernelMemorySystem pointer to the kernel side KernelMemorySystem instance. + * @param[out] pConfig pointer to the static config init on Physical driver. + * + * @return NV_OK upon success. + * NV_ERR* otherwise. + */ +NV_STATUS +kmemsysInitStaticConfig_KERNEL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + MEMORY_SYSTEM_STATIC_CONFIG *pConfig +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status; + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMSYS_GET_STATIC_CONFIG, + pConfig, sizeof(*pConfig)); + return status; +} + +/*! + * @brief Function to map swizzId to mem size given total mem + * + * @param[IN] pGpu + * @param[IN] pKernelMemorySystem + * @param[IN] swizzId + * @param[IN] totalRange total memory range + * @param[OUT] pPartitionSizeFlag Flag stating partition memory size + * @param[OUT] pSizeInBytes Memory size in bytes supported by partition + */ +NV_STATUS +kmemsysSwizzIdToMIGMemSize_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 swizzId, + NV_RANGE totalRange, + NvU32 *pPartitionSizeFlag, + NvU64 *pSizeInBytes +) +{ + // + // To handle the straddling issue we always consider memory for different + // swizzIds as addition of minimum sized segements allowed in partitioning + // + NvU64 memSize = rangeLength(totalRange) / KMIGMGR_MAX_GPU_INSTANCES; + + switch (swizzId) + { + case 0: + { + *pSizeInBytes = memSize * KMIGMGR_MAX_GPU_INSTANCES; + *pPartitionSizeFlag = DRF_DEF(2080_CTRL_GPU, _PARTITION_FLAG, _MEMORY_SIZE, _FULL); + break; + } + + case 1: + case 2: + { + *pSizeInBytes = (memSize * (KMIGMGR_MAX_GPU_INSTANCES / 2)); + *pPartitionSizeFlag = DRF_DEF(2080_CTRL_GPU, _PARTITION_FLAG, _MEMORY_SIZE, _HALF); + break; + } + + case 3: + case 4: + case 5: + case 6: + { + *pSizeInBytes = (memSize * (KMIGMGR_MAX_GPU_INSTANCES / 4)); + *pPartitionSizeFlag = DRF_DEF(2080_CTRL_GPU, _PARTITION_FLAG, _MEMORY_SIZE, _QUARTER); + break; + } + + case 7: + case 8: + case 9: + case 10: + case 11: + case 12: + case 13: + case 14: + { + *pSizeInBytes = memSize; + *pPartitionSizeFlag = DRF_DEF(2080_CTRL_GPU, _PARTITION_FLAG, _MEMORY_SIZE, _EIGHTH); + break; + } + + default: + { + NV_PRINTF(LEVEL_ERROR, "Unsupported SwizzId %d\n", swizzId); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + } + + if ((*pSizeInBytes == 0) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB)) + { + NV_PRINTF(LEVEL_ERROR, "Insufficient memory\n"); + DBG_BREAKPOINT(); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + return NV_OK; +} + +/*! + * @brief Function to map swizzId to mem range given total range + */ +NV_STATUS +kmemsysSwizzIdToMIGMemRange_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 swizzId, + NV_RANGE totalRange, + NV_RANGE *pAddrRange +) +{ + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NV_STATUS rmStatus = NV_OK; + NvU32 memSizeFlag = 0; + NvU32 minSwizzId = 0; + NvU64 unalignedStartAddr = 0; + NvU64 memSize = 0; + NV_RANGE swizzIdRange = NV_RANGE_EMPTY; + + NV_ASSERT_OR_RETURN(!rangeIsEmpty(totalRange), NV_ERR_INVALID_ARGUMENT); + + // Get SwizzId to size mapping + NV_ASSERT_OK_OR_RETURN( + kmemsysSwizzIdToMIGMemSize(pGpu, pKernelMemorySystem, swizzId, totalRange, &memSizeFlag, &memSize)); + + swizzIdRange = kmigmgrMemSizeFlagToSwizzIdRange_HAL(pGpu, pKernelMIGManager, memSizeFlag); + NV_ASSERT_OR_RETURN(!rangeIsEmpty(swizzIdRange), NV_ERR_INVALID_ARGUMENT); + + minSwizzId = swizzIdRange.lo; + + unalignedStartAddr = (totalRange.lo + (memSize * (swizzId - minSwizzId))); + *pAddrRange = rangeMake(unalignedStartAddr, unalignedStartAddr + memSize - 1); + + return rmStatus; +} + +/*! + * @brief Function to return GPU instance memory address range + */ +NV_STATUS +kmemsysGetMIGGPUInstanceMemInfo_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 swizzId, + NV_RANGE *pAddrRange +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 vmmuSegmentSize; + NvU64 startAddr; + NvU64 endAddr; + + NV_ASSERT_OR_RETURN(pAddrRange != NULL, NV_ERR_INVALID_ARGUMENT); + *pAddrRange = NV_RANGE_EMPTY; + NV_ASSERT_OR_RETURN(swizzId < KMIGMGR_MAX_GPU_SWIZZID, NV_ERR_INVALID_STATE); + + // Not supported in vGPU or ZERO_FB configs + NV_CHECK_OR_RETURN(LEVEL_SILENT, + !(IS_VIRTUAL(pGpu) || (pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB))), + NV_OK); + + // + // VMMU not supported in AMODEL. Use legacy swizz-ID calculation instead of relying on vMMU segments + // to calculate address range + // + if (IsAMODEL(pGpu)) + { + NV_RANGE partitionableMemoryRange = memmgrGetMIGPartitionableMemoryRange(pGpu, pMemoryManager); + return kmemsysSwizzIdToMIGMemRange(pGpu, pKernelMemorySystem, swizzId, partitionableMemoryRange, pAddrRange); + } + + // Get the VMMU segment size + vmmuSegmentSize = gpuGetVmmuSegmentSize(pGpu); + NV_ASSERT_OR_RETURN((vmmuSegmentSize != 0), NV_ERR_INVALID_STATE); + + startAddr = pKernelMemorySystem->gpuInstanceMemConfig[swizzId].startingVmmuSegment * vmmuSegmentSize; + endAddr = startAddr + (pKernelMemorySystem->gpuInstanceMemConfig[swizzId].memSizeInVmmuSegment * vmmuSegmentSize) - 1; + *pAddrRange = rangeMake(startAddr, endAddr); + + return NV_OK; +} + +/*! + * @brief Function to populate static GPU instance memory config which will be + * utilized for GPU instance memory query and memory allocation + */ +NV_STATUS +kmemsysPopulateMIGGPUInstanceMemConfig_KERNEL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_RANGE partitionableMemoryRange = memmgrGetMIGPartitionableMemoryRange(pGpu, pMemoryManager); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvU64 vmmuSegmentSize; + NvU64 totalVmmuSegments; + NvU64 alignedStartAddr; + NvU64 alignedEndAddr; + NvU32 swizzId; + + // Not needed in vGPU or zero_fb configs + NV_CHECK_OR_RETURN(LEVEL_SILENT, + !(IS_VIRTUAL(pGpu) || (pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB))), + NV_OK); + + // Nothing to do if MIG is not supported + NV_CHECK_OR_RETURN(LEVEL_SILENT, kmigmgrIsMIGSupported(pGpu, pKernelMIGManager), NV_OK); + + // Get the VMMU segment size + vmmuSegmentSize = gpuGetVmmuSegmentSize(pGpu); + NV_ASSERT_OR_RETURN((vmmuSegmentSize != 0), NV_ERR_INVALID_STATE); + + alignedStartAddr = partitionableMemoryRange.lo; + alignedEndAddr = partitionableMemoryRange.hi; + if (alignedStartAddr != 0) + { + alignedStartAddr = NV_IS_ALIGNED64(alignedStartAddr, vmmuSegmentSize) ? + alignedStartAddr + vmmuSegmentSize : + NV_ALIGN_UP64(alignedStartAddr, vmmuSegmentSize); + } + + if (NV_IS_ALIGNED64(alignedEndAddr + 1, vmmuSegmentSize)) + { + alignedEndAddr = alignedEndAddr - vmmuSegmentSize; + } + + totalVmmuSegments = (alignedEndAddr - alignedStartAddr + 1) / vmmuSegmentSize; + for (swizzId = 0; swizzId < KMIGMGR_MAX_GPU_SWIZZID; swizzId++) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmemsysSwizzIdToVmmuSegmentsRange_HAL(pGpu, pKernelMemorySystem, swizzId, vmmuSegmentSize, totalVmmuSegments)); + } + + return NV_OK; +} + +/*! + * @brief Gets GPU instance memory configuration based on swizzId + */ +NV_STATUS +kmemsysGetMIGGPUInstanceMemConfigFromSwizzId_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 swizzId, + const MIG_GPU_INSTANCE_MEMORY_CONFIG **ppGPUInstanceMemConfig +) +{ + NV_ASSERT_OR_RETURN(swizzId < KMIGMGR_MAX_GPU_SWIZZID, NV_ERR_INVALID_ARGUMENT); + // MODS makes a control call to describe GPU instances before this is populated. Return invalid data anyways + NV_ASSERT_OR_RETURN(pKernelMemorySystem->gpuInstanceMemConfig[swizzId].bInitialized, NV_ERR_INVALID_STATE); + + *ppGPUInstanceMemConfig = &pKernelMemorySystem->gpuInstanceMemConfig[swizzId]; + return NV_OK; +} + +/*! + * @brief Set GPU Instance memory config information and mark initialized + */ +NV_STATUS +kmemsysInitMIGGPUInstanceMemConfigForSwizzId_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 swizzId, + NvU64 startingVmmuSegment, + NvU64 memSizeInVmmuSegment +) +{ + NV_ASSERT_OR_RETURN(swizzId < KMIGMGR_MAX_GPU_SWIZZID, NV_ERR_INVALID_ARGUMENT); + + pKernelMemorySystem->gpuInstanceMemConfig[swizzId].startingVmmuSegment = startingVmmuSegment; + pKernelMemorySystem->gpuInstanceMemConfig[swizzId].memSizeInVmmuSegment = memSizeInVmmuSegment; + pKernelMemorySystem->gpuInstanceMemConfig[swizzId].bInitialized = NV_TRUE; + + NV_PRINTF(LEVEL_INFO, + "GPU Instance Mem Config for swizzId = 0x%x : MemStartSegment = 0x%llx, MemSizeInSegments = 0x%llx\n", + swizzId, + pKernelMemorySystem->gpuInstanceMemConfig[swizzId].startingVmmuSegment, + pKernelMemorySystem->gpuInstanceMemConfig[swizzId].memSizeInVmmuSegment); + + return NV_OK; +} + +/*! + * @brief Ensure that the sysmem flush sysmem buffer has been initialized + * + * Setting up the sysmem flush buffer needs to be done very early in some cases + * as it's required for the GPU to perform a system flush. One such case is + * resetting GPU FALCONs and in particular resetting the PMU as part of VBIOS + * init. + * + * @returns NV_OK if the sysmem flush buffer has been initialized. + */ +NV_STATUS +kmemsysEnsureSysmemFlushBufferInitialized_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + if (IS_VIRTUAL(pGpu) || + IS_GSP_CLIENT(pGpu) || + RMCFG_FEATURE_PLATFORM_GSP) + { + return NV_OK; + } + + return kmemsysInitFlushSysmemBuffer_HAL(pGpu, pKernelMemorySystem); +} + +/*! + * @brief Handle sysmem NVLink/C2C, NUMA and ATS functionality + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelMemorySystem pointer to the kernel side KernelMemorySystem instance. + * @param[in] bFlush Whether the CPU cache of the GPU mapping + * should be flushed + * + * @return NV_OK on success + */ +NV_STATUS +kmemsysSetupCoherentCpuLink_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvBool bFlush +) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 memblockSize = 0; + NvU64 numaOnlineBase = 0; + NvU64 numaOnlineSize = 0; + NvU32 data32; + NvBool bCpuMapping; + + // + // Compute coherent link aperture range for SHH and P9 (!NV_VERIF_FEATURES) to enable + // FB access via coherent link(Nvlink/C2C) path + // + { + NV_ASSERT_OK_OR_RETURN(kmemsysGetFbNumaInfo_HAL(pGpu, pKernelMemorySystem, + &pKernelMemorySystem->coherentCpuFbBase, + &pGpu->numaNodeId)); + if (pKernelMemorySystem->coherentCpuFbBase != 0) + { + pKernelMemorySystem->coherentCpuFbEnd = pKernelMemorySystem->coherentCpuFbBase + + pMemoryManager->Ram.fbUsableMemSize; + } + } + + // ATS mappings are currently not supported in mods + if ((osReadRegistryDword(pGpu, + NV_REG_STR_OVERRIDE_GPU_NUMA_NODE_ID, &data32)) == NV_OK) + { + pGpu->numaNodeId = (NvS32)data32; + NV_PRINTF(LEVEL_ERROR, "Override GPU NUMA node ID %d!\n", + pGpu->numaNodeId); + } + + // Default enable + bCpuMapping = NV_TRUE; + + // Parse regkey here + if ((osReadRegistryDword(pGpu, + NV_REG_STR_RM_FORCE_BAR_PATH, &data32) == NV_OK) && + (data32 == 1)) + { + NV_PRINTF(LEVEL_ERROR, + "Force disabling NVLINK/C2C mappings through regkey.\n"); + + bCpuMapping = NV_FALSE; + } + + if ((pKernelMemorySystem->coherentCpuFbBase == 0) || !bCpuMapping) + { + return NV_OK; + } + NV_ASSERT_OK_OR_RETURN(kbusCreateCoherentCpuMapping_HAL(pGpu, pKernelBus, bFlush)); + + // Switch the toggle for coherent link mapping only if migration is successful + pGpu->setProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING, NV_TRUE); + + NV_ASSERT_OK_OR_RETURN(kbusVerifyCoherentLink_HAL(pGpu, pKernelBus)); + + // + // TODO clean up with bug 2020982 + // RM: Encapsulate NUMA-specific kernel code and logic in a new object + // + if (osNumaMemblockSize(&memblockSize) == NV_OK) + { + NvU64 rsvdFastSize = 0; + NvU64 rsvdSlowSize = 0; + NvU64 rsvdISOSize = 0; + NvU64 totalResvBytes = 0; + + memmgrCalcReservedFbSpaceHal_HAL(pGpu, pMemoryManager, &rsvdFastSize, &rsvdSlowSize, &rsvdISOSize); + totalResvBytes = (rsvdFastSize + rsvdSlowSize + rsvdISOSize); + totalResvBytes += memmgrGetRsvdMemorySize(pMemoryManager); + + // + // Online all of FB memory less reserved memory, aligned to memblock + // + // TODO: make sure the onlineable memory is aligned to memblockSize + // Currently, if we have leftover memory, it'll just be wasted because no + // one can access it + // + numaOnlineSize = NV_ALIGN_DOWN(pMemoryManager->Ram.fbUsableMemSize - totalResvBytes, memblockSize); + NV_PRINTF(LEVEL_INFO, "NUMA reserved memory size: 0x%llx online memory size: 0x%llx\n", + totalResvBytes, numaOnlineSize); + + pKernelMemorySystem->numaOnlineBase = numaOnlineBase; + pKernelMemorySystem->numaOnlineSize = numaOnlineSize; + } + + return NV_OK; +} + +/*! + * @brief Teardown sysmem NVLink/C2C NUMA and ATS functionality + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelMemorySystem Kernel Memory System pointer + * @param[in] bFlush Whether the CPU cache of the GPU mapping + * should be flushed + */ +void +kmemsysTeardownCoherentCpuLink_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvBool bFlush +) +{ + kbusTeardownCoherentCpuMapping_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), bFlush); + pGpu->setProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING, NV_FALSE); +} + +NV_STATUS +kmemsysSendL2InvalidateEvict_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU32 flags) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT_PARAMS params = {0}; + + params.flags = flags; + + return pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMSYS_L2_INVALIDATE_EVICT, + ¶ms, sizeof(params)); +} + +NV_STATUS +kmemsysSendFlushL2AllRamsAndCaches_IMPL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + return pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MEMSYS_FLUSH_L2_ALL_RAMS_AND_CACHES, + NULL, 0); +} + +NV_STATUS +kmemsysGetUsableFbSize_KERNEL +( + OBJGPU *pGpu, + KernelMemorySystem *pKernelMemorySystem, + NvU64 *pFbSize +) +{ + return kmemsysReadUsableFbSize_HAL(pGpu, pKernelMemorySystem, pFbSize); +} diff --git a/src/nvidia/src/kernel/gpu/mem_sys/kern_mem_sys_ctrl.c b/src/nvidia/src/kernel/gpu/mem_sys/kern_mem_sys_ctrl.c new file mode 100644 index 000000000..2d86fc763 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mem_sys/kern_mem_sys_ctrl.c @@ -0,0 +1,1235 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/bus/kern_bus.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "gpu/gpu_resource_desc.h" +#include "gpu/subdevice/subdevice.h" +#include "platform/chipset/chipset.h" +#include "ctrl/ctrl0080/ctrl0080fb.h" +#include "ctrl/ctrl2080/ctrl2080fb.h" +#include "core/locks.h" +#include "vgpu/rpc.h" +#include "rmapi/client.h" + + +static NV_STATUS +_fbGetFbInfos(OBJGPU *pGpu, NvHandle hClient, NvHandle hObject, NV2080_CTRL_FB_INFO *pFbInfos, NvU32 fbInfoListSize) +{ + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + Heap *pHeap = GPU_GET_HEAP(pGpu); + Heap *pMemoryPartitionHeap = NULL; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvU32 data = 0; + NvU32 i = 0; + NvBool bIsPmaEnabled = memmgrIsPmaInitialized(pMemoryManager); + NvBool bIsMIGInUse = IS_MIG_IN_USE(pGpu); + NvU64 bytesTotal; + NvU64 bytesFree; + NvU64 heapBase; + NvU64 largestOffset; + NvU64 largestFree; + NvU64 val; + NvU64 routeToPhysicalIdxMask = 0; + NvBool bIsClientMIGMonitor = NV_FALSE; + NvBool bIsClientMIGProfiler = NV_FALSE; + + ct_assert(NV2080_CTRL_FB_INFO_INDEX_MAX < NV_NBITS_IN_TYPE(routeToPhysicalIdxMask)); + + if (!RMCFG_FEATURE_PMA) + return NV_ERR_NOT_SUPPORTED; + + if (bIsMIGInUse) + { + bIsClientMIGMonitor = !RMCFG_FEATURE_PLATFORM_GSP && rmclientIsCapableByHandle(hClient, NV_RM_CAP_SYS_SMC_MONITOR); + bIsClientMIGProfiler = kmigmgrIsClientUsingDeviceProfiling(pGpu, pKernelMIGManager, hClient); + } + + // + // Most MIG queries require GPU instance info that is only kept in the Physical RM. Flag + // the indices that will need to be fulfilled by the GSP when in offload mode, and + // also load the per-GPU instance heap for others. + // + for (i = 0; i < fbInfoListSize; i++) + { + switch (pFbInfos[i].index) + { + // The cases which aren't affected by MIG + case NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_COUNT: + case NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_FREE_COUNT: + case NV2080_CTRL_FB_INFO_INDEX_BANK_SWIZZLE_ALIGNMENT: + case NV2080_CTRL_FB_INFO_INDEX_BANK_COUNT: + case NV2080_CTRL_FB_INFO_INDEX_OVERLAY_OFFSET_ADJUSTMENT: + case NV2080_CTRL_FB_INFO_INDEX_FB_TAX_SIZE_KB: + case NV2080_CTRL_FB_INFO_INDEX_RAM_LOCATION: + case NV2080_CTRL_FB_INFO_INDEX_FB_IS_BROKEN: + case NV2080_CTRL_FB_INFO_INDEX_L2CACHE_ONLY_MODE: + case NV2080_CTRL_FB_INFO_INDEX_SMOOTHDISP_RSVD_BAR1_SIZE: + case NV2080_CTRL_FB_INFO_INDEX_HEAP_OFFLINE_SIZE: + case NV2080_CTRL_FB_INFO_INDEX_SUSPEND_RESUME_RSVD_SIZE: + case NV2080_CTRL_FB_INFO_INDEX_ALLOW_PAGE_RETIREMENT: + case NV2080_CTRL_FB_INFO_POISON_FUSE_ENABLED: + case NV2080_CTRL_FB_INFO_FBPA_ECC_ENABLED: + case NV2080_CTRL_FB_INFO_DYNAMIC_PAGE_OFFLINING_ENABLED: + case NV2080_CTRL_FB_INFO_INDEX_FORCED_BAR1_64KB_MAPPING_ENABLED: + case NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_SIZE: + case NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_ALIGNMENT: + case NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_BAR1_MAX_OFFSET_64KB: + { + continue; + } + case NV2080_CTRL_FB_INFO_INDEX_BUS_WIDTH: + case NV2080_CTRL_FB_INFO_INDEX_PARTITION_COUNT: + case NV2080_CTRL_FB_INFO_INDEX_PARTITION_MASK: + case NV2080_CTRL_FB_INFO_INDEX_FBP_MASK: + case NV2080_CTRL_FB_INFO_INDEX_FBP_COUNT: + case NV2080_CTRL_FB_INFO_INDEX_L2CACHE_SIZE: + case NV2080_CTRL_FB_INFO_INDEX_LTC_COUNT: + case NV2080_CTRL_FB_INFO_INDEX_LTS_COUNT: + case NV2080_CTRL_FB_INFO_INDEX_LTC_MASK: + case NV2080_CTRL_FB_INFO_INDEX_MEMORYINFO_VENDOR_ID: + case NV2080_CTRL_FB_INFO_INDEX_TRAINIG_2T: + case NV2080_CTRL_FB_INFO_INDEX_PSEUDO_CHANNEL_MODE: + case NV2080_CTRL_FB_INFO_INDEX_COMPRESSION_SIZE: + case NV2080_CTRL_FB_INFO_INDEX_DRAM_PAGE_STRIDE: + case NV2080_CTRL_FB_INFO_INDEX_RAM_CFG: + case NV2080_CTRL_FB_INFO_INDEX_RAM_TYPE: + case NV2080_CTRL_FB_INFO_INDEX_ECC_STATUS_SIZE: + { + // This info is only known by the Physical RM. Redirect it there. + if (IS_GSP_CLIENT(pGpu)) + { + routeToPhysicalIdxMask |= BIT64(i); + } + + break; + } + case NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE: + case NV2080_CTRL_FB_INFO_INDEX_RAM_SIZE: + { + // + // If MIG is enabled and device profiling/monitoring + // is not in use we check for GPU instance subscription + // and provide GPU instance local info. Unsubscribed + unprivileged + // clients may still query global info for the above list of + // indices. + // + if (bIsMIGInUse && + !bIsClientMIGProfiler && !bIsClientMIGMonitor) + { + MIG_INSTANCE_REF ref; + status = kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, &ref); + + if ((status != NV_OK) && !kmigmgrIsMIGReferenceValid(&ref)) + { + status = NV_OK; + break; + } + } + + // Fall through to the default case to get the memory partition heap + } + default: + { + // + // If MIG is enabled and device profiling/monitoring + // is not in use we check for GPU instance subscription + // and provide GPU instance local info + // + if (bIsMIGInUse && + !bIsClientMIGProfiler && !bIsClientMIGMonitor) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, (kmigmgrGetMemoryPartitionHeapFromClient(pGpu, + pKernelMIGManager, hClient, &pMemoryPartitionHeap) == NV_OK), + NV_ERR_INSUFFICIENT_PERMISSIONS); + + // + // If client is associated with a GPU instance then point pHeap + // to client's memory partition heap + // + if (pMemoryPartitionHeap != NULL) + pHeap = pMemoryPartitionHeap; + } + break; + } + } + } + + // If we have any infos that need to be populated by Physical RM, query now + if (routeToPhysicalIdxMask != 0) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pParams = + portMemAllocNonPaged(sizeof(NV2080_CTRL_FB_GET_INFO_V2_PARAMS)); + NvU32 physIdx = 0; + + portMemSet(pParams, 0, sizeof(*pParams)); + FOR_EACH_INDEX_IN_MASK(64, i, routeToPhysicalIdxMask) + { + pParams->fbInfoList[physIdx++].index = pFbInfos[i].index; + } + FOR_EACH_INDEX_IN_MASK_END; + + pParams->fbInfoListSize = physIdx; + + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, hClient, hObject, + NV2080_CTRL_CMD_FB_GET_INFO_V2, + pParams, sizeof(*pParams)), + portMemFree(pParams); + return status; + ); + + physIdx = 0; + FOR_EACH_INDEX_IN_MASK(64, i, routeToPhysicalIdxMask) + { + NV_ASSERT(pFbInfos[i].index == pParams->fbInfoList[physIdx].index); + pFbInfos[i].data = pParams->fbInfoList[physIdx++].data; + } + FOR_EACH_INDEX_IN_MASK_END; + + portMemFree(pParams); + } + + for (i = 0; i < fbInfoListSize; i++) + { + // Skip info already populated by Physical RM + if ((routeToPhysicalIdxMask & BIT64(i)) != 0) + continue; + + switch (pFbInfos[i].index) + { + case NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_COUNT: + { + data = 0; + break; + } + case NV2080_CTRL_FB_INFO_INDEX_TILE_REGION_FREE_COUNT: + { + // Obsolete + data = 0; + break; + } + case NV2080_CTRL_FB_INFO_INDEX_BAR1_SIZE: + { + GETBAR1INFO bar1Info = {0}; + status = memmgrGetBAR1InfoForClient_HAL(pGpu, pMemoryManager, hClient, &bar1Info); + if (status != NV_OK) + data = 0; + else + data = bar1Info.bar1Size; + + break; + } + case NV2080_CTRL_FB_INFO_INDEX_BAR1_AVAIL_SIZE: + { + GETBAR1INFO bar1Info = {0}; + status = memmgrGetBAR1InfoForClient_HAL(pGpu, pMemoryManager, hClient, &bar1Info); + if (status != NV_OK) + data = 0; + else + data = bar1Info.bar1AvailSize; + + break; + } + case NV2080_CTRL_FB_INFO_INDEX_BAR1_MAX_CONTIGUOUS_AVAIL_SIZE: + { + GETBAR1INFO bar1Info = {0}; + status = memmgrGetBAR1InfoForClient_HAL(pGpu, pMemoryManager, hClient, &bar1Info); + if (status != NV_OK) + data = 0; + else + data = bar1Info.bar1MaxContigAvailSize; + + break; + } + case NV2080_CTRL_FB_INFO_INDEX_BANK_SWIZZLE_ALIGNMENT: + { + GETBAR1INFO bar1Info = {0}; + status = memmgrGetBAR1InfoForClient_HAL(pGpu, pMemoryManager, hClient, &bar1Info); + if (status != NV_OK) + data = 0; + else + data = bar1Info.bankSwizzleAlignment; + + break; + } + case NV2080_CTRL_FB_INFO_INDEX_TOTAL_RAM_SIZE: + { + if (pMemoryPartitionHeap != NULL) + { + NvU32 heapSizeKb; + if (bIsPmaEnabled) + { + pmaGetTotalMemory(&pHeap->pmaObject, &bytesTotal); + NV_ASSERT(NvU64_HI32(bytesTotal >> 10) == 0); + heapSizeKb = NvU64_LO32(bytesTotal >> 10); + } + else + { + NvU64 size; + + heapGetSize(pHeap, &size); + NV_ASSERT(NvU64_HI32(size >> 10) == 0); + heapSizeKb = NvU64_LO32(size >> 10); + } + data = heapSizeKb; + break; + } + else + { + NV_ASSERT(0 == NvU64_HI32(pMemoryManager->Ram.fbTotalMemSizeMb << 10)); + data = NvU64_LO32(NV_MIN((pMemoryManager->Ram.fbTotalMemSizeMb << 10), (pMemoryManager->Ram.fbOverrideSizeMb << 10))); + break; + } + } + case NV2080_CTRL_FB_INFO_INDEX_RAM_SIZE: + { + if (pMemoryPartitionHeap != NULL) + { + NvU32 heapSizeKb; + if (bIsPmaEnabled) + { + pmaGetTotalMemory(&pHeap->pmaObject, &bytesTotal); + NV_ASSERT(NvU64_HI32(bytesTotal >> 10) == 0); + heapSizeKb = NvU64_LO32(bytesTotal >> 10); + } + else + { + NvU64 size; + + heapGetSize(pHeap, &size); + NV_ASSERT(NvU64_HI32(size >> 10) == 0); + heapSizeKb = NvU64_LO32(size >> 10); + } + data = heapSizeKb; + break; + } + NV_ASSERT(0 == NvU64_HI32(pMemoryManager->Ram.fbTotalMemSizeMb << 10)); + data = NvU64_LO32(NV_MIN((pMemoryManager->Ram.fbTotalMemSizeMb << 10), (pMemoryManager->Ram.fbOverrideSizeMb << 10))); + break; + } + case NV2080_CTRL_FB_INFO_INDEX_USABLE_RAM_SIZE: + { + if (pMemoryPartitionHeap != NULL) + { + NvU32 heapSizeKb; + if (bIsPmaEnabled) + { + pmaGetTotalMemory(&pHeap->pmaObject, &bytesTotal); + NV_ASSERT(NvU64_HI32(bytesTotal >> 10) == 0); + heapSizeKb = NvU64_LO32(bytesTotal >> 10); + } + else + { + NvU64 size; + + heapGetSize(pHeap, &size); + NV_ASSERT(NvU64_HI32(size >> 10) == 0); + heapSizeKb = NvU64_LO32(size >> 10); + } + data = heapSizeKb; + break; + } + NV_ASSERT(0 == NvU64_HI32(pMemoryManager->Ram.fbUsableMemSize >> 10)); + data = NvU64_LO32(NV_MIN((pMemoryManager->Ram.fbUsableMemSize >> 10), (pMemoryManager->Ram.fbOverrideSizeMb << 10))); + break; + } + case NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE: + { + if (bIsPmaEnabled) + { + pmaGetTotalMemory(&pHeap->pmaObject, &bytesTotal); + NV_ASSERT(NvU64_HI32(bytesTotal >> 10) == 0); + data = NvU64_LO32(bytesTotal >> 10); + } + else + { + NvU64 size; + + heapGetUsableSize(pHeap, &size); + NV_ASSERT(NvU64_HI32(size >> 10) == 0); + data = NvU64_LO32(size >> 10); + } + break; + } + case NV2080_CTRL_FB_INFO_INDEX_HEAP_START: + { + if (pMemoryPartitionHeap != NULL) + { + if (bIsPmaEnabled) + { + pmaGetLargestFree(&pHeap->pmaObject, &largestFree, &heapBase, &largestOffset); + } + else + { + status = heapInfo(pHeap, &bytesFree, &bytesTotal, &heapBase, + &largestOffset, &largestFree); + } + data = NvU64_LO32(heapBase >> 10); + } + else + { + // + // Returns start of heap in kbytes. This is zero unless + // VGA display memory is reserved. + // + heapGetBase(pHeap, &heapBase); + data = NvU64_LO32(heapBase >> 10); + NV_ASSERT(((NvU64) data << 10ULL) == heapBase); + } + break; + } + case NV2080_CTRL_FB_INFO_INDEX_HEAP_FREE: + { + if (bIsClientMIGMonitor || bIsClientMIGProfiler) + { + bytesFree = 0; + + // + // Add free memory across the all valid MIG GPU instances and + // the global heap. + // + // As MIG uses the global heap when memory is not + // partitioned, skip getting information from it. + // + if (kmigmgrIsMIGMemPartitioningEnabled(pGpu, pKernelMIGManager)) + { + memmgrGetFreeMemoryForAllMIGGPUInstances(pGpu, pMemoryManager, &val); + bytesFree = val; + } + + if (bIsPmaEnabled) + pmaGetFreeMemory(&pHeap->pmaObject, &val); + else + heapGetFree(pHeap, &val); + + bytesFree += val; + + NV_ASSERT(NvU64_HI32(bytesFree >> 10) == 0); + data = NvU64_LO32(bytesFree >> 10); + } + else if (bIsPmaEnabled) + { + pmaGetFreeMemory(&pHeap->pmaObject, &bytesFree); + + NV_ASSERT(NvU64_HI32(bytesFree >> 10) == 0); + data = NvU64_LO32(bytesFree >> 10); + } + else + { + NvU64 size; + + heapGetFree(pHeap, &size); + NV_ASSERT(NvU64_HI32(size >> 10) == 0); + data = NvU64_LO32(size >> 10); + } + break; + } + + case NV2080_CTRL_FB_INFO_INDEX_VISTA_RESERVED_HEAP_SIZE: + { + // + // If PMA is enabled, ideally we wouldn't have any reserved heap but UVM vidheapctrl + // allocations are not accounted for by KMD. RM will reserve it. + // + if (bIsPmaEnabled) + { + if (pMemoryPartitionHeap != NULL) + { + data = 0; + break; + } + + NvU64 uvmReserveMem = 0; + memmgrCalcReservedFbSpaceForUVM_HAL(pGpu, pMemoryManager, &uvmReserveMem); + // Ceil bytes and return the # of KB + data = NvU64_LO32(NV_CEIL(uvmReserveMem, 1024)); + } + else + { + memmgrCalcReservedFbSpace(pGpu, pMemoryManager); + // heap size in kbytes + data = memmgrGetReservedHeapSizeMb_HAL(pGpu, pMemoryManager) << 10; + } + break; + } + + case NV2080_CTRL_FB_INFO_INDEX_MAPPABLE_HEAP_SIZE: + { + if (bIsPmaEnabled) + { + NvU32 heapSizeKb; + + pmaGetTotalMemory(&pHeap->pmaObject, &bytesTotal); + NV_ASSERT(NvU64_HI32(bytesTotal >> 10) == 0); + heapSizeKb = NvU64_LO32(bytesTotal >> 10); + + data = memmgrGetMappableRamSizeMb(pMemoryManager) << 10; + if (data > heapSizeKb) + data = heapSizeKb; + } + else + { + NvU64 size; + NvU32 heapSizeKb; + + heapGetSize(pHeap, &size); + NV_ASSERT(NvU64_HI32(size >> 10) == 0); + heapSizeKb = NvU64_LO32(size >> 10); + + data = memmgrGetMappableRamSizeMb(pMemoryManager) << 10; + if (data > heapSizeKb) + data = heapSizeKb; + } + break; + } + case NV2080_CTRL_FB_INFO_INDEX_BANK_COUNT: + { + data = 1; + break; + } + case NV2080_CTRL_FB_INFO_INDEX_OVERLAY_OFFSET_ADJUSTMENT: + { + data = 0; + break; + } + case NV2080_CTRL_FB_INFO_INDEX_FB_TAX_SIZE_KB: + { + bytesTotal = memmgrGetFbTaxSize_HAL(pGpu, pMemoryManager); + data = NvU64_LO32(bytesTotal >> 10); + break; + } + case NV2080_CTRL_FB_INFO_INDEX_HEAP_BASE_KB: + { + if (bIsPmaEnabled) + { + pmaGetLargestFree(&pHeap->pmaObject, &largestFree, &heapBase, &largestOffset); + } + else + { + status = heapInfo(pHeap, &bytesFree, &bytesTotal, &heapBase, + &largestOffset, &largestFree); + } + + data = NvU64_LO32(heapBase >> 10); + break; + } + case NV2080_CTRL_FB_INFO_INDEX_LARGEST_FREE_REGION_SIZE_KB: + { + if (bIsPmaEnabled) + { + pmaGetLargestFree(&pHeap->pmaObject, &largestFree, &heapBase, &largestOffset); + } + else + { + status = heapInfo(pHeap, &bytesFree, &bytesTotal, &heapBase, + &largestOffset, &largestFree); + } + + data = NvU64_LO32(largestFree >> 10); + break; + } + case NV2080_CTRL_FB_INFO_INDEX_LARGEST_FREE_REGION_BASE_KB: + { + if (bIsPmaEnabled) + { + pmaGetLargestFree(&pHeap->pmaObject, &largestFree, &heapBase, &largestOffset); + } + else + { + status = heapInfo(pHeap, &bytesFree, &bytesTotal, &heapBase, + &largestOffset, &largestFree); + } + + data = NvU64_LO32(largestOffset >> 10); + break; + } + case NV2080_CTRL_FB_INFO_INDEX_RAM_LOCATION: + { + data = NV2080_CTRL_FB_INFO_RAM_LOCATION_GPU_DEDICATED; + break; + } + case NV2080_CTRL_FB_INFO_INDEX_FB_IS_BROKEN: + { + data = (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_ZERO_FB)) ? 1 : 0; + break; + } + case NV2080_CTRL_FB_INFO_INDEX_L2CACHE_ONLY_MODE: + { + data = gpuIsCacheOnlyModeEnabled(pGpu) ? 1 : 0; + break; + } + + case NV2080_CTRL_FB_INFO_INDEX_SMOOTHDISP_RSVD_BAR1_SIZE: + { + data = pGpu->uefiScanoutSurfaceSizeInMB; + break; + } + + case NV2080_CTRL_FB_INFO_INDEX_HEAP_OFFLINE_SIZE: + { + data = pHeap->dynamicBlacklistSize; + break; + } + case NV2080_CTRL_FB_INFO_INDEX_1TO1_COMPTAG_ENABLED: + { + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + data = (pMemorySystemConfig->bOneToOneComptagLineAllocation) ? 1 : 0; + break; + } + + case NV2080_CTRL_FB_INFO_INDEX_SUSPEND_RESUME_RSVD_SIZE: + { + NvU64 rsvdSize = memmgrGetRsvdSizeForSr_HAL(pGpu, pMemoryManager); + NV_ASSERT(NvU64_HI32(rsvdSize) == 0); + data = NvU64_LO32(rsvdSize); + break; + } + case NV2080_CTRL_FB_INFO_INDEX_ALLOW_PAGE_RETIREMENT: + { + data = pGpu->getProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT) ? 1 : 0; + break; + } + case NV2080_CTRL_FB_INFO_POISON_FUSE_ENABLED: + { + data = gpuIsGlobalPoisonFuseEnabled(pGpu) ? 1 : 0; + break; + } + case NV2080_CTRL_FB_INFO_FBPA_ECC_ENABLED: + { + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + data = (pMemorySystemConfig->bEnabledEccFBPA) ? 1: 0; + break; + } + case NV2080_CTRL_FB_INFO_DYNAMIC_PAGE_OFFLINING_ENABLED: + { + data = pMemoryManager->bEnableDynamicPageOfflining ? 1 : 0; + break; + } + case NV2080_CTRL_FB_INFO_INDEX_FORCED_BAR1_64KB_MAPPING_ENABLED: + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + data = kbusIsBar1Force64KBMappingEnabled(pKernelBus); + break; + } + case NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_SIZE: + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + data = 0; + if (kbusIsP2pMailboxClientAllocated(pKernelBus)) + kbusGetP2PMailboxAttributes_HAL(pGpu, pKernelBus, &data, NULL, NULL); + break; + } + case NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_ALIGNMENT: + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + data = 0; + if (kbusIsP2pMailboxClientAllocated(pKernelBus)) + kbusGetP2PMailboxAttributes_HAL(pGpu, pKernelBus, NULL, &data, NULL); + break; + } + case NV2080_CTRL_FB_INFO_INDEX_P2P_MAILBOX_BAR1_MAX_OFFSET_64KB: + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + data = 0; + if (kbusIsP2pMailboxClientAllocated(pKernelBus)) + kbusGetP2PMailboxAttributes_HAL(pGpu, pKernelBus, NULL, NULL, &data); + break; + } + default: + { + data = 0; + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + if (status != NV_OK) + break; + // save off data value + pFbInfos[i].data = data; + } + + return status; +} + +// +// subdeviceCtrlCmdFbGetInfo +// +// Lock Requirements: +// Assert that API and Gpus lock held on entry +// +NV_STATUS +subdeviceCtrlCmdFbGetInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GET_INFO_PARAMS *pFbInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvHandle hObject = RES_GET_HANDLE(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + if ((pFbInfoParams->fbInfoListSize == 0) || + (NvP64_VALUE(pFbInfoParams->fbInfoList) == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return _fbGetFbInfos(pGpu, hClient, hObject, NvP64_VALUE(pFbInfoParams->fbInfoList), pFbInfoParams->fbInfoListSize); +} + +// +// subdeviceCtrlCmdFbGetInfoV2 +// +// Lock Requirements: +// Assert that API and Gpus lock held on entry +// +NV_STATUS +subdeviceCtrlCmdFbGetInfoV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GET_INFO_V2_PARAMS *pFbInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvHandle hObject = RES_GET_HANDLE(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + if ((pFbInfoParams->fbInfoListSize > NV2080_CTRL_FB_INFO_MAX_LIST_SIZE) || + (pFbInfoParams->fbInfoListSize == 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return _fbGetFbInfos(pGpu, hClient, hObject, pFbInfoParams->fbInfoList, pFbInfoParams->fbInfoListSize); +} + +// +// subdeviceCtrlCmdFbGetCarveoutAddressInfo +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdFbGetCarveoutAddressInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GET_SYSTEM_CARVEOUT_ADDRESS_SPACE_INFO *pParams +) +{ + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + pParams->StartAddr = 0x0; + pParams->SpaceSize = 0x0; + + return NV_ERR_GENERIC; +} + +// +// subdeviceCtrlCmdFbSetGpuCacheAllocPolicy +// +// Lock Requirements: +// Assert that GPUs lock held on entry +// Called from SW method w/o API lock +// +NV_STATUS +subdeviceCtrlCmdFbSetGpuCacheAllocPolicy_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS *pParams +) +{ + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + // Map engine to FBBA client + return NV_ERR_NOT_SUPPORTED; +} + +// +// subdeviceCtrlCmdFbGetGpuCacheAllocPolicy +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdFbGetGpuCacheAllocPolicy_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_PARAMS *pGpuCacheAllocPolicyParams +) +{ + + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + pGpuCacheAllocPolicyParams->allocPolicy = 0; + + NV_PRINTF(LEVEL_ERROR, "Failed to read Client reads ALLOC policy\n"); + return status; +} + +// +// subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2 +// +// Lock Requirements: +// Assert that GPUs lock held on entry +// Called from SW method w/o API lock +// +NV_STATUS +subdeviceCtrlCmdFbSetGpuCacheAllocPolicyV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS *pParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + // Bug 724186 -- Skip this check for deferred API + LOCK_ASSERT_AND_RETURN(pRmCtrlParams->bDeferredApi || rmGpuLockIsOwner()); + + NV_PRINTF(LEVEL_ERROR, "Failed to set alloc policy\n"); + return status; +} + +// +// subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2 +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdFbGetGpuCacheAllocPolicyV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GPU_CACHE_ALLOC_POLICY_V2_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + NV_ASSERT_OR_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner(), NV_ERR_INVALID_LOCK_STATE); + + NV_PRINTF(LEVEL_ERROR, "Failed to get alloc policy.\n"); + return status; +} + +// +// subdeviceCtrlCmdFbGetCliManagedOfflinedPages +// +// Lock Requirements: +// Assert that API and Gpus lock held on entry +// + +NV_STATUS +subdeviceCtrlCmdFbGetCliManagedOfflinedPages_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GET_CLI_MANAGED_OFFLINED_PAGES_PARAMS *pOsOfflinedParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + Heap *pHeap = GPU_GET_HEAP(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + BLACKLIST_CHUNK *pBlacklistChunks = pHeap->blackList.pBlacklistChunks; + NvU64 pageAddress; + NvU32 i; + NvU32 osBlackListCount = 0; + NvU32 chunk; + NvU64 chunks[NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES]; + NvU32 pageSize; + NvU32 numChunks = 0; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + if (!IsSLIEnabled(pGpu)) + { + if (memmgrIsPmaInitialized(pMemoryManager)) + { + // If PMA is enabled Client pages are located here. + pmaGetClientBlacklistedPages(&pHeap->pmaObject, chunks, &pageSize, &numChunks); + + NV_ASSERT(numChunks <= NV2080_CTRL_FB_OFFLINED_PAGES_MAX_PAGES); + + for (chunk = 0; chunk < numChunks; chunk++) + { + pOsOfflinedParams->offlinedPages[chunk] = (NvU32)(chunks[chunk] >> RM_PAGE_SHIFT); + } + pOsOfflinedParams->validEntries = numChunks; + pOsOfflinedParams->pageSize = pageSize; + } + else + { + // Iterate over the heap blacklist array to get the OS blacklisted regions + for (i = 0; i < pHeap->blackList.count; i++) + { + // Extract only the globally excluded page offsets + if (!pBlacklistChunks[i].bIsValid) + { + pageAddress = pBlacklistChunks[i].physOffset; + pOsOfflinedParams->offlinedPages[osBlackListCount] = (NvU32) (pageAddress >> RM_PAGE_SHIFT); + osBlackListCount++; + } + } + pOsOfflinedParams->validEntries = osBlackListCount; + pOsOfflinedParams->pageSize = RM_PAGE_SIZE; + } + return NV_OK; + } + else + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief This call can be used to update the NUMA status. + * + * Lock Requirements: + * Assert that API and GPUs lock held on entry + * + * @param[in] pSubdevice Subdevice + * @param[in] pParams pointer to control parameters + * + * @return NV_OK When successful + * NV_ERR_INVALID_STATE Otherwise + */ +NV_STATUS +subdeviceCtrlCmdFbUpdateNumaStatus_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_UPDATE_NUMA_STATUS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + Heap *pHeap = GPU_GET_HEAP(pGpu); + PMA *pPma = &pHeap->pmaObject; + NV_STATUS status = NV_OK; + + if (!RMCFG_FEATURE_PMA) + return NV_ERR_NOT_SUPPORTED; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (pParams->bOnline) + { + status = pmaNumaOnlined(pPma, pGpu->numaNodeId, + pKernelMemorySystem->coherentCpuFbBase, + pKernelMemorySystem->numaOnlineSize); + } + else + { + pmaNumaOfflined(pPma); + } + + return status; +} + +/* + * @brief This call can be used to get NUMA related information. + * + * Lock Requirements: + * Assert that API and GPUs lock held on entry + * + * @param[in] pSubdevice Subdevice + * @param[in] pParams pointer to control parameters + * + * @return NV_OK When successful + * NV_ERR_INVALID_STATE Otherwise + */ +NV_STATUS +subdeviceCtrlCmdFbGetNumaInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_GET_NUMA_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + BLACKLIST_ADDRESS *pBlAddrs; + NvU32 numaOfflineIdx = 0; + NvU32 idx; + NV_STATUS status; + NvU32 count; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (pParams->numaOfflineAddressesCount > + NV_ARRAY_ELEMENTS(pParams->numaOfflineAddresses)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pParams->numaNodeId = pGpu->numaNodeId; + pParams->numaMemAddr = pKernelMemorySystem->coherentCpuFbBase + pKernelMemorySystem->numaOnlineBase; + pParams->numaMemSize = pKernelMemorySystem->numaOnlineSize; + + if (pParams->numaOfflineAddressesCount == 0) + { + return NV_OK; + } + + if (!(pGpu->getProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT) && + gpuCheckPageRetirementSupport_HAL(pGpu))) + { + pParams->numaOfflineAddressesCount = 0; + return NV_OK; + } + + count = pMemorySystemConfig->maximumBlacklistPages; + pBlAddrs = portMemAllocNonPaged(sizeof(BLACKLIST_ADDRESS) * count); + if (pBlAddrs == NULL) + { + return NV_ERR_NO_MEMORY; + } + + status = memmgrGetBlackListPages_HAL(pGpu, pMemoryManager, pBlAddrs, &count); + NV_ASSERT(status != NV_ERR_BUFFER_TOO_SMALL); + + if(status == NV_OK) + { + for (idx = 0; idx < count; idx++) + { + NvU64 offset = pBlAddrs[idx].address; + + if (numaOfflineIdx >= pParams->numaOfflineAddressesCount) + { + status = NV_ERR_BUFFER_TOO_SMALL; + break; + } + + // Only tell the caller about Offline pages in the NUMA region. + if (offset < pParams->numaMemSize) + { + pParams->numaOfflineAddresses[numaOfflineIdx++] = pParams->numaMemAddr + offset; + } + else + { + NV_PRINTF(LEVEL_INFO, "retired page address 0x%llx not in NUMA region\n", + offset); + } + } + } + else + { + // No offlined pages or failure to read offlined addresses. + status = NV_OK; + } + + pParams->numaOfflineAddressesCount = numaOfflineIdx; + + portMemFree(pBlAddrs); + + return status; +} + +NV_STATUS +subdeviceCtrlCmdFbSetZbcReferenced_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + NvU32 gfid; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, IS_GFID_VF(gfid) || pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL, NV_ERR_INSUFFICIENT_PERMISSIONS); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV_STATUS status = NV_OK; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + return status; + } + + return NV_OK; +} + +/*! + * @brief This call can be used to flush L2 followed by FB or + * just the FB. + * + * If L2 ops are needed, either _INVALIDATE or _WRITE_BACK + * or both flags set to _YES is required. Specifying both + * to _YES implies EVICT. + * + * If only the FB flush is needed, only the + * _APERTURE and _FB_FLUSH_YES are needed. + * + * If only L2 ops are needed (i.e. no FB flush following + * it), _FB_FLUSH_NO is needed in addition to the other + * L2 ops flags. + * + * Lock Requirements: + * Assert that API and GPUs lock held on entry + * + * @param[in] pSubdevice Subdevice + * @param[in] pCacheFlushParams Various flush flags + * + */ +NV_STATUS +subdeviceCtrlCmdFbFlushGpuCache_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FB_FLUSH_GPU_CACHE_PARAMS *pCacheFlushParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + NV_STATUS status = NV_OK; + FB_CACHE_MEMTYPE memType = FB_CACHE_MEM_UNDEFINED; + FB_CACHE_OP cacheOp = FB_CACHE_OP_UNDEFINED; + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvBool bWriteback = NV_FALSE; + NvBool bInvalidate = NV_FALSE; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + // Either WriteBack or Invalidate are required for Cache Ops + if (FLD_TEST_DRF(2080, _CTRL_FB_FLUSH_GPU_CACHE_FLAGS, _WRITE_BACK, + _YES, pCacheFlushParams->flags)) + { + bWriteback = NV_TRUE; + } + if (FLD_TEST_DRF(2080, _CTRL_FB_FLUSH_GPU_CACHE_FLAGS, _INVALIDATE, + _YES, pCacheFlushParams->flags)) + { + bInvalidate = NV_TRUE; + } + + if (bWriteback || bInvalidate ) + { + // Cache Ops Path + + switch (DRF_VAL(2080, _CTRL_FB_FLUSH_GPU_CACHE_FLAGS, _APERTURE, + pCacheFlushParams->flags)) + { + case NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_VIDEO_MEMORY: + memType = FB_CACHE_VIDEO_MEMORY; + break; + case NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_SYSTEM_MEMORY: + memType = FB_CACHE_SYSTEM_MEMORY; + break; + case NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_APERTURE_PEER_MEMORY: + memType = FB_CACHE_PEER_MEMORY; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid aperture.\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (bWriteback && bInvalidate) + { + cacheOp = FB_CACHE_EVICT; + } + else if (!bWriteback && bInvalidate) + { + cacheOp = FB_CACHE_INVALIDATE; + } + else if (bWriteback && !bInvalidate) + { + cacheOp = FB_CACHE_WRITEBACK; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Must specify at least one of WRITE_BACK or INVALIDATE\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + switch (DRF_VAL(2080, _CTRL_FB_FLUSH_GPU_CACHE_FLAGS, _FLUSH_MODE, + pCacheFlushParams->flags)) + { + case NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE_ADDRESS_ARRAY: + if ((pCacheFlushParams->addressArraySize == 0) || + (pCacheFlushParams->addressArraySize > + NV2080_CTRL_FB_FLUSH_GPU_CACHE_MAX_ADDRESSES)) + { + NV_PRINTF(LEVEL_ERROR, "Invalid array size (0x%x)\n", + pCacheFlushParams->addressArraySize); + status = NV_ERR_INVALID_ARGUMENT; + break; + } + + if(pCacheFlushParams->memBlockSizeBytes == 0) + { + NV_PRINTF(LEVEL_ERROR, "Invalid memBlock size (0x%llx)\n", + pCacheFlushParams->memBlockSizeBytes); + status = NV_ERR_INVALID_ARGUMENT; + break; + } + + status = NV_ERR_GENERIC; + break; + case NV2080_CTRL_FB_FLUSH_GPU_CACHE_FLAGS_FLUSH_MODE_FULL_CACHE: + status = kmemsysCacheOp_HAL(pGpu, pKernelMemorySystem, NULL, memType, cacheOp); + break; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid FLUSH_MODE 0x%x\n", + DRF_VAL(2080, _CTRL_FB_FLUSH_GPU_CACHE_FLAGS, _FLUSH_MODE, pCacheFlushParams->flags)); + return NV_ERR_INVALID_ARGUMENT; + } + } + + // FB Flush + if (FLD_TEST_DRF(2080, _CTRL_FB_FLUSH_GPU_CACHE_FLAGS, _FB_FLUSH, + _YES, pCacheFlushParams->flags)) + { + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, LEVEL_ERROR, + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY)); + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/arch/ampere/kmigmgr_ga100.c b/src/nvidia/src/kernel/gpu/mig_mgr/arch/ampere/kmigmgr_ga100.c new file mode 100644 index 000000000..41a566352 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mig_mgr/arch/ampere/kmigmgr_ga100.c @@ -0,0 +1,347 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/mem_mgr/heap.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/fifo/kernel_fifo.h" + +#include "published/ampere/ga100/dev_bus.h" +#include "published/ampere/ga100/dev_bus_addendum.h" + +/*! + * @brief Checks Devinit owned scratch bit to see if MIG is enabled or not + * + * @return NV_TRUE if scratch bit is set else NV_FALSE + */ +NvBool +kmigmgrIsDevinitMIGBitSet_GA100 +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + NvU32 regVal; + + regVal = GPU_REG_RD32(pGpu, NV_PBUS_SW_SCRATCH(1)); + + return FLD_TEST_DRF(_PBUS, _SW_SCRATCH1_SMC,_MODE, _ON, regVal); +} + +/*! + * @brief Peforms checks to determine whether instancing can be enabled on + * this GPU, such as determining whether any partitionable engines are + * currently active. + */ +NV_STATUS +kmigmgrCreateGPUInstanceCheck_GA100 +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvBool bMemoryPartitioningNeeded +) +{ + Heap *pHeap; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 engines[NV2080_ENGINE_TYPE_LAST]; + NvU32 engineCount; + NvU32 i; + NvU64 largestFreeSize; + NvU64 base; + NvU64 unused; + NV_RANGE freeBlock; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_RANGE partitionableMemoryRange = memmgrGetMIGPartitionableMemoryRange(pGpu, pMemoryManager); + + // Ensure the engine DB is up-to-date + NV_ASSERT_OK_OR_RETURN(gpuUpdateEngineTable(pGpu)); + + // Store all engine tags of partitionable engines in the system + engineCount = 0; + for (i = 0; i < pGpu->engineDB.size; ++i) + { + if (kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, pGpu->engineDB.pType[i])) + { + // + // If memory partitioning isnt needed, scrubber channel will be active, and + // partitioning isn't really a destructive operation anyway, so + // skip checking for copy engines + // + if (NV2080_ENGINE_TYPE_IS_COPY(pGpu->engineDB.pType[i]) && + !bMemoryPartitioningNeeded) + { + continue; + } + + if (NV2080_ENGINE_TYPE_IS_GR(pGpu->engineDB.pType[i]) && + (NV2080_ENGINE_TYPE_GR_IDX(pGpu->engineDB.pType[i]) > 0)) + { + // + // This check is used during GPU instance creation, prior to which + // it is impossible to use GR1-7, so as an optimization, skip + // checking for those. + // + continue; + } + + engines[engineCount++] = pGpu->engineDB.pType[i]; + } + } + + // Make sure there are no channels alive on any of these engines + if (kfifoEngineListHasChannel(pGpu, pKernelFifo, engines, engineCount)) + return NV_ERR_STATE_IN_USE; + + pHeap = GPU_GET_HEAP(pGpu); + if (!memmgrIsPmaInitialized(pMemoryManager)) + { + NV_ASSERT_OK_OR_RETURN( + heapInfo(pHeap, &unused, &unused, &unused, &base, &largestFreeSize)); + } + else + { + pmaGetLargestFree(&pHeap->pmaObject, &largestFreeSize, &base, &unused); + } + + // Make sure that no memory has been claimed from our partitionable range + freeBlock = rangeMake(base, base + largestFreeSize - 1); + if (!rangeContains(freeBlock, partitionableMemoryRange)) + return NV_ERR_STATE_IN_USE; + + return NV_OK; +} + +/*! + * @brief Function to determine whether gpu instance flags are valid + * for this GPU + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] gpuInstanceFlag NV2080_CTRL_GPU_PARTITION_FLAG_* + * + * @return Returns true if flags are valid + */ +NvBool +kmigmgrIsGPUInstanceFlagValid_GA100 +( + OBJGPU *pGpu, + KernelMIGManager *pGrMgr, + NvU32 gpuInstanceFlag +) +{ + NvU32 memSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, + _MEMORY_SIZE, gpuInstanceFlag); + NvU32 computeSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, + _COMPUTE_SIZE, gpuInstanceFlag); + + switch (memSizeFlag) + { + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_FULL: + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_HALF: + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_QUARTER: + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH: + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU mem partitioning flag 0x%x\n", + memSizeFlag); + return NV_FALSE; + } + + switch (computeSizeFlag) + { + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_FULL: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_HALF: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_HALF: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_QUARTER: + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH: + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unrecognized GPU compute partitioning flag 0x%x\n", + computeSizeFlag); + return NV_FALSE; + } + + return NV_TRUE; +} + +/*! + * @brief Function to determine whether gpu instance flag combinations are valid + * for this GPU + */ +NvBool +kmigmgrIsGPUInstanceCombinationValid_GA100 +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 gpuInstanceFlag +) +{ + NvU32 memSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _MEMORY_SIZE, gpuInstanceFlag); + NvU32 computeSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _COMPUTE_SIZE, gpuInstanceFlag); + + if (!kmigmgrIsGPUInstanceFlagValid_HAL(pGpu, pKernelMIGManager, gpuInstanceFlag)) + { + return NV_FALSE; + } + + // JPG_OFA profile is only available on the smallest partition + if (FLD_TEST_REF(NV2080_CTRL_GPU_PARTITION_FLAG_REQ_DEC_JPG_OFA, _ENABLE, gpuInstanceFlag)) + { + if (kmigmgrIsA100ReducedConfig(pGpu, pKernelMIGManager)) + { + if (computeSizeFlag != NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_QUARTER) + { + return NV_FALSE; + } + } + else if (computeSizeFlag != NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH) + { + return NV_FALSE; + } + } + + if (kmigmgrIsA100ReducedConfig(pGpu, pKernelMIGManager) && + computeSizeFlag == NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_HALF) + { + return NV_FALSE; + } + + switch (computeSizeFlag) + { + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_FULL: + NV_CHECK_OR_RETURN(LEVEL_SILENT, memSizeFlag == NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_FULL, + NV_FALSE); + break; + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_HALF: + NV_CHECK_OR_RETURN(LEVEL_SILENT, memSizeFlag == NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_HALF, + NV_FALSE); + break; + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_MINI_HALF: + NV_CHECK_OR_RETURN(LEVEL_SILENT, memSizeFlag == NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_HALF, + NV_FALSE); + break; + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_QUARTER: + NV_CHECK_OR_RETURN(LEVEL_SILENT, memSizeFlag == NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_QUARTER, + NV_FALSE); + break; + case NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH: + NV_CHECK_OR_RETURN(LEVEL_SILENT, memSizeFlag == NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH, + NV_FALSE); + break; + default: + NV_ASSERT(0); + return NV_FALSE; + } + + return NV_TRUE; +} + +/*! + * @brief Returns the range of swizzids which can be assigned to a GPU + * instance of the given size. + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] memSizeFlag NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_* + */ +NV_RANGE +kmigmgrMemSizeFlagToSwizzIdRange_GA100 +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 memSizeFlag +) +{ + NV_RANGE ret; + + switch (DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _MEMORY_SIZE, memSizeFlag)) + { + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_FULL: + { + ret = rangeMake(0, 0); + break; + } + + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_HALF: + { + ret = rangeMake(1, 2); + break; + } + + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_QUARTER: + { + ret = rangeMake(3, 6); + break; + } + + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH: + { + ret = rangeMake(7, 14); + break; + } + + default: + { + NV_PRINTF(LEVEL_ERROR, "Unsupported mem size flag 0x%x\n", + memSizeFlag); + DBG_BREAKPOINT(); + ret = NV_RANGE_EMPTY; + break; + } + } + return ret; +} +/*! + * @brief Checks if user requested a configuration that should require memory partitioning + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] partitionFlags Client request flags + */ +NvBool +kmigmgrIsMemoryPartitioningRequested_GA100 +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 partitionFlags +) +{ + NvU32 memSizeFlag = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _MEMORY_SIZE, partitionFlags); + return (memSizeFlag != NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_FULL); +} + +/*! + * @brief Checks if memory partitioning will be needed for a given swizzId + */ +NvBool +kmigmgrIsMemoryPartitioningNeeded_GA100 +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId +) +{ + // Memory partitioning is needed for non-zero swizzIds + return (swizzId != 0); +} + diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/compute_instance_subscription.c b/src/nvidia/src/kernel/gpu/mig_mgr/compute_instance_subscription.c new file mode 100644 index 000000000..fce0e6316 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mig_mgr/compute_instance_subscription.c @@ -0,0 +1,223 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains the functions managing MIG compute instance subscriptions + * + *****************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "core/system.h" +#include "os/os.h" +#include "gpu/subdevice/subdevice.h" + +#include "kernel/gpu/mig_mgr/gpu_instance_subscription.h" +#include "kernel/gpu/mig_mgr/compute_instance_subscription.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +#include "class/clc638.h" +#include "rmapi/rs_utils.h" +#include "gpu/gpu_uuid.h" +#include "vgpu/rpc.h" + +NV_STATUS +cisubscriptionConstruct_IMPL +( + ComputeInstanceSubscription *pComputeInstanceSubscription, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + NVC638_ALLOCATION_PARAMETERS *pUserParams = pRmAllocParams->pAllocParams; + RsClient *pRsClient = pCallContext->pClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pParentRef = pResourceRef->pParentRef; + GPUInstanceSubscription *pGPUInstanceSubscription = dynamicCast(pParentRef->pResource, GPUInstanceSubscription); + OBJGPU *pGpu; + MIG_COMPUTE_INSTANCE *pMIGComputeInstance; + NV_STATUS status; + + pGpu = GPU_RES_GET_GPU(pComputeInstanceSubscription); + + osRmCapInitDescriptor(&pComputeInstanceSubscription->dupedCapDescriptor); + + if (RS_IS_COPY_CTOR(pRmAllocParams)) + return cisubscriptionCopyConstruct_IMPL(pComputeInstanceSubscription, pCallContext, pRmAllocParams); + + if (!IS_MIG_IN_USE(pGpu)) + { + NV_ASSERT_FAILED("Compute instance Subscription failed: MIG GPU partitioning not done"); + return NV_ERR_NOT_SUPPORTED; + } + + NV_CHECK_OR_RETURN(LEVEL_SILENT, pUserParams->execPartitionId < KMIGMGR_MAX_COMPUTE_INSTANCES, NV_ERR_INVALID_ARGUMENT); + NV_CHECK_OR_RETURN(LEVEL_SILENT, pGPUInstanceSubscription->pKernelMIGGpuInstance != NULL, NV_ERR_INVALID_STATE); + NV_CHECK_OR_RETURN(LEVEL_SILENT, pGPUInstanceSubscription->pKernelMIGGpuInstance->bValid, NV_ERR_INVALID_STATE); + + pMIGComputeInstance = &pGPUInstanceSubscription->pKernelMIGGpuInstance->MIGComputeInstance[pUserParams->execPartitionId]; + NV_CHECK_OR_RETURN(LEVEL_SILENT, pMIGComputeInstance->bValid, NV_ERR_INVALID_ARGUMENT); + + // + // For now skip kernel clients, such as UVM, until Bug 2729768 is fixed. + // + if (pRsClient->type == CLIENT_TYPE_USER) + { + status = osRmCapAcquire(pMIGComputeInstance->pOsRmCaps, + NV_RM_CAP_SMC_EXEC_PARTITION_ACCESS, + pUserParams->capDescriptor, + &pComputeInstanceSubscription->dupedCapDescriptor); + if ((status != NV_ERR_NOT_SUPPORTED) && (status != NV_OK)) + { + NV_PRINTF(LEVEL_ERROR, + "Capability validation failed: ID 0x%0x!\n", + pUserParams->execPartitionId); + return status; + } + } + + pComputeInstanceSubscription->pMIGComputeInstance = pMIGComputeInstance; + + NV_ASSERT_OK_OR_GOTO(status, + kmigmgrIncRefCount(pMIGComputeInstance->pShare), + cleanup_duped_desc); + + return NV_OK; + +cleanup_duped_desc: + osRmCapRelease(pComputeInstanceSubscription->dupedCapDescriptor); + + return status; +} + +NV_STATUS +cisubscriptionCopyConstruct_IMPL +( + ComputeInstanceSubscription *pComputeInstanceSubscription, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pSrcRef = pParams->pSrcRef; + ComputeInstanceSubscription *pComputeInstanceSubscriptionSrc = dynamicCast(pSrcRef->pResource, ComputeInstanceSubscription); + OBJGPU *pGpu = GPU_RES_GET_GPU(pComputeInstanceSubscription); + + // non kernel clients are not allowed to dup MIG instances + NV_CHECK_OR_RETURN(LEVEL_SILENT, pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL, + NV_ERR_NOT_SUPPORTED); + + if (IS_VIRTUAL_WITH_SRIOV(pGpu) || IS_GSP_CLIENT(pGpu)) + { + RsResourceRef *pDstRef = pCallContext->pResourceRef; + NV_STATUS status = NV_OK; + + NV_RM_RPC_DUP_OBJECT(pGpu, + pCallContext->pClient->hClient, + pDstRef->pParentRef->hResource, + pDstRef->hResource, + pParams->pSrcClient->hClient, + pSrcRef->hResource, + 0, NV_TRUE, // Send RPC for object free + pDstRef, status); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, status); + } + + pComputeInstanceSubscription->pMIGComputeInstance = pComputeInstanceSubscriptionSrc->pMIGComputeInstance; + + NV_ASSERT_OK( + kmigmgrIncRefCount(pComputeInstanceSubscription->pMIGComputeInstance->pShare)); + + return NV_OK; +} + +void +cisubscriptionDestruct_IMPL +( + ComputeInstanceSubscription *pComputeInstanceSubscription +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + + resGetFreeParams(staticCast(pComputeInstanceSubscription, RsResource), &pCallContext, &pParams); + + osRmCapRelease(pComputeInstanceSubscription->dupedCapDescriptor); + + gisubscriptionCleanupOnUnsubscribe(pCallContext); + + NV_ASSERT_OK( + kmigmgrDecRefCount(pComputeInstanceSubscription->pMIGComputeInstance->pShare)); +} + +NV_STATUS +cisubscriptionGetComputeInstanceSubscription_IMPL +( + RsClient *pClient, + NvHandle hParent, + ComputeInstanceSubscription **ppComputeInstanceSubscription +) +{ + RsResourceRef *pResourceRef; + + NV_ASSERT_OR_RETURN(ppComputeInstanceSubscription != NULL, NV_ERR_INVALID_ARGUMENT); + + pResourceRef = serverutilFindChildRefByType(pClient->hClient, + hParent, classId(ComputeInstanceSubscription), + NV_TRUE); + if (pResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + *ppComputeInstanceSubscription = dynamicCast(pResourceRef->pResource, ComputeInstanceSubscription); + + return NV_OK; +} + +NvBool +cisubscriptionCanCopy_IMPL +( + ComputeInstanceSubscription *pComputeInstanceSubscription +) +{ + return NV_TRUE; +} + +NV_STATUS +cisubscriptionCtrlCmdGetUuid_IMPL +( + ComputeInstanceSubscription *pComputeInstanceSubscription, + NVC638_CTRL_GET_UUID_PARAMS *pParams +) +{ + ct_assert(NV_UUID_LEN == NVC638_UUID_LEN); + ct_assert(NV_UUID_STR_LEN == NVC638_UUID_STR_LEN); + + portMemCopy(pParams->uuid, NVC638_UUID_LEN, + pComputeInstanceSubscription->pMIGComputeInstance->uuid.uuid, NV_UUID_LEN); + + nvGetSmcUuidString(&pComputeInstanceSubscription->pMIGComputeInstance->uuid, + pParams->uuidStr); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/gpu_instance_subscription.c b/src/nvidia/src/kernel/gpu/mig_mgr/gpu_instance_subscription.c new file mode 100644 index 000000000..763dcd7e9 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mig_mgr/gpu_instance_subscription.c @@ -0,0 +1,1008 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains the functions managing GPU instance subscriptions + * + *****************************************************************************/ + +#include "core/core.h" +#include "core/system.h" +#include "gpu/gpu.h" +#include "os/os.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "virtualization/hypervisor/hypervisor.h" + +#include "kernel/gpu/mig_mgr/gpu_instance_subscription.h" +#include "kernel/gpu/mig_mgr/compute_instance_subscription.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +#include "ctrl/ctrlc637.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" +#include "gpu/gpu_uuid.h" +#include "vgpu/rpc.h" +#include "rmapi/control.h" + +static inline NvBool +_gisubscriptionClientSharesVASCrossPartition +( + GPUInstanceSubscription *pGPUInstanceSubscription, + CALL_CONTEXT *pCallContext, + NvU32 targetedSwizzId +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pGPUInstanceSubscription); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + RsResourceRef *pDeviceRef; + Device *pDevice; + MIG_INSTANCE_REF shareRef; + + NV_ASSERT_OR_RETURN(pGPUInstanceSubscription != NULL, NV_TRUE); + + NV_ASSERT_OK( + refFindAncestorOfType(pCallContext->pResourceRef, + classId(Device), &pDeviceRef)); + pDevice = dynamicCast(pDeviceRef->pResource, Device); + + if (pDevice->hClientShare == NV01_NULL_OBJECT) + { + // NULL Client Share : Legacy Global VASpace. This is cross-GPU-instance. + return NV_TRUE; + } + else if (pDevice->hClientShare == pCallContext->pClient->hClient) + { + // Same Client Share : Self Scoped VASpace. This is not cross-GPU-instance. + return NV_FALSE; + } + + // + // Different Client Share. Device default VASpace is shared between this + // client and hClientShare. The VAS is cross-GPU-instance if the sharing client + // is subscribed to a different GPU instance than the subscription request, or + // if the sharing client isn't subscribed to any GPU instance. + // + status = kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + pDevice->hClientShare, + &shareRef); + + + return (status != NV_OK) || (shareRef.pKernelMIGGpuInstance->swizzId != targetedSwizzId); +} + +NV_STATUS +gisubscriptionConstruct_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + NVC637_ALLOCATION_PARAMETERS *pUserParams = pRmAllocParams->pAllocParams; + RsClient *pRsClient = pCallContext->pClient; + OBJGPU *pGpu; + KernelMIGManager *pKernelMIGManager; + NvU32 swizzId; + NV_STATUS status; + + pGpu = GPU_RES_GET_GPU(pGPUInstanceSubscription); + + osRmCapInitDescriptor(&pGPUInstanceSubscription->dupedCapDescriptor); + + if (RS_IS_COPY_CTOR(pRmAllocParams)) + return gisubscriptionCopyConstruct_IMPL(pGPUInstanceSubscription, pCallContext, pRmAllocParams); + + pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + swizzId = pUserParams->swizzId; + + if (!IS_MIG_ENABLED(pGpu)) + { + NV_ASSERT_FAILED("Subscription failed: MIG not enabled\n"); + return NV_ERR_NOT_SUPPORTED; + } + + // + // Root-SwizzID is a special swizzID which doesn't have any GPU instance + // associated with it. It can be subscribed to even without GPU instances + // + if (swizzId == NVC637_DEVICE_PROFILING_SWIZZID) + { + // Check if this is a root-client or un-privileged device profiling is allowed + if (gpuIsRmProfilingPrivileged(pGpu) && + !rmclientIsAdminByHandle(pRmAllocParams->hClient, pCallContext->secInfo.privLevel)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + if (kmigmgrIsDeviceProfilingInUse(pGpu, pKernelMIGManager)) + { + // Only one DeviceProfiling session is allowed to be used with-in a system + NV_PRINTF(LEVEL_ERROR, + "Subscription failed: Device-Level-Monitoring already in use\n"); + return NV_ERR_INVALID_STATE; + } + + // Mark the root swizzID in use and return + NV_ASSERT_OK_OR_RETURN(kmigmgrSetDeviceProfilingInUse(pGpu, pKernelMIGManager)); + pGPUInstanceSubscription->bDeviceProfiling = NV_TRUE; + goto done; + } + else + { + pGPUInstanceSubscription->bDeviceProfiling = NV_FALSE; + } + + if (!IS_MIG_IN_USE(pGpu)) + { + NV_ASSERT_FAILED("Subscription failed: MIG GPU instancing not done\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if (!kmigmgrIsSwizzIdInUse(pGpu, pKernelMIGManager, swizzId)) + { + NV_PRINTF(LEVEL_ERROR, + "Subscription failed: swizzid 0x%0x doesn't exist!\n", + swizzId); + return NV_ERR_INVALID_ARGUMENT; + } + + if (_gisubscriptionClientSharesVASCrossPartition(pGPUInstanceSubscription, pCallContext, swizzId)) + { + NV_PRINTF(LEVEL_ERROR, + "Subscription failed: Client shares VAS with client not subscribed to target GPU instance!\n"); + return NV_ERR_STATE_IN_USE; + } + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager, swizzId, + &pGPUInstanceSubscription->pKernelMIGGpuInstance)); + + // For now skip kernel clients, such as UVM, until Bug 2729768 is fixed. + if (pRsClient->type == CLIENT_TYPE_USER) + { + status = osRmCapAcquire(pGPUInstanceSubscription->pKernelMIGGpuInstance->pOsRmCaps, + NV_RM_CAP_SMC_PARTITION_ACCESS, + pUserParams->capDescriptor, + &pGPUInstanceSubscription->dupedCapDescriptor); + if ((status != NV_ERR_NOT_SUPPORTED) && (status != NV_OK)) + { + NV_PRINTF(LEVEL_ERROR, + "Capability validation failed: swizzid 0x%0x!\n", + swizzId); + return status; + } + } + + status = kmigmgrIncRefCount(pGPUInstanceSubscription->pKernelMIGGpuInstance->pShare); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPU instance ref-counting failed: swizzid 0x%0x!\n", + swizzId); + goto cleanup_duped_desc; + } + +done: + NV_PRINTF(LEVEL_INFO, "Client 0x%x subscribed to swizzid 0x%0x.\n", + pRmAllocParams->hClient, swizzId); + + return NV_OK; + +cleanup_duped_desc: + osRmCapRelease(pGPUInstanceSubscription->dupedCapDescriptor); + + return status; +} + +NV_STATUS +gisubscriptionCopyConstruct_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pSrcRef = pParams->pSrcRef; + GPUInstanceSubscription *pGPUInstanceSubscriptionSrc = dynamicCast(pSrcRef->pResource, GPUInstanceSubscription); + OBJGPU *pGpu = GPU_RES_GET_GPU(pGPUInstanceSubscription); + + { + // non kernel clients are not allowed to dup GPU instances + NV_CHECK_OR_RETURN(LEVEL_SILENT, pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL, + NV_ERR_NOT_SUPPORTED); + } + + if (pGPUInstanceSubscriptionSrc->bDeviceProfiling) + { + // Duping of root-swizzId is not allowed + NV_PRINTF(LEVEL_ERROR, + "Subscription failed: Duping not allowed for Device-level-SwizzId\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if (IS_VIRTUAL_WITH_SRIOV(pGpu) || IS_GSP_CLIENT(pGpu)) + { + RsResourceRef *pDstRef = pCallContext->pResourceRef; + NV_STATUS status = NV_OK; + + NV_RM_RPC_DUP_OBJECT(pGpu, + pCallContext->pClient->hClient, + pDstRef->pParentRef->hResource, + pDstRef->hResource, + pParams->pSrcClient->hClient, + pSrcRef->hResource, + 0, NV_TRUE, // Send RPC for object free + pDstRef, status); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, status); + } + + pGPUInstanceSubscription->pKernelMIGGpuInstance = pGPUInstanceSubscriptionSrc->pKernelMIGGpuInstance; + // TODO XXX tracking this to support CI subscription bypass path for UVM + pGPUInstanceSubscription->bIsDuped = NV_TRUE; + + NV_ASSERT_OK( + kmigmgrIncRefCount(pGPUInstanceSubscription->pKernelMIGGpuInstance->pShare)); + + return NV_OK; +} + +void +gisubscriptionDestruct_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pGPUInstanceSubscription); + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + resGetFreeParams(staticCast(pGPUInstanceSubscription, RsResource), &pCallContext, &pParams); + + if (pGPUInstanceSubscription->bDeviceProfiling) + { + kmigmgrClearDeviceProfilingInUse(pGpu, pKernelMIGManager); + pGPUInstanceSubscription->bDeviceProfiling = NV_FALSE; + return; + } + + NV_ASSERT_OK( + kmigmgrDecRefCount(pGPUInstanceSubscription->pKernelMIGGpuInstance->pShare)); + + osRmCapRelease(pGPUInstanceSubscription->dupedCapDescriptor); + + gisubscriptionCleanupOnUnsubscribe(pCallContext); + + NV_PRINTF(LEVEL_INFO, "Client 0x%x unsubscribed from swizzid 0x%0x.\n", + RES_GET_CLIENT(pGPUInstanceSubscription)->hClient, pGPUInstanceSubscription->pKernelMIGGpuInstance->swizzId); +} + +NvBool +gisubscriptionIsDuped_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription +) +{ + return pGPUInstanceSubscription->bIsDuped; +} + +NV_STATUS +gisubscriptionGetGPUInstanceSubscription_IMPL +( + RsClient *pClient, + NvHandle hParent, + GPUInstanceSubscription **ppGPUInstanceSubscription +) +{ + RsResourceRef *pResourceRef; + + NV_ASSERT_OR_RETURN(NULL != ppGPUInstanceSubscription, NV_ERR_INVALID_ARGUMENT); + + pResourceRef = serverutilFindChildRefByType(pClient->hClient, + hParent, classId(GPUInstanceSubscription), + NV_TRUE); + if (pResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + *ppGPUInstanceSubscription = dynamicCast(pResourceRef->pResource, GPUInstanceSubscription); + + return NV_OK; +} + +NvBool +gisubscriptionCanCopy_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription +) +{ + return NV_TRUE; +} + +// +// gisubscriptionCtrlCmdExecPartitionsCreate +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +gisubscriptionCtrlCmdExecPartitionsCreate_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription, + NVC637_CTRL_EXEC_PARTITIONS_CREATE_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pGPUInstanceSubscription); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = pGPUInstanceSubscription->pKernelMIGGpuInstance; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (!rmclientIsCapableOrAdminByHandle(RES_GET_CLIENT_HANDLE(pGPUInstanceSubscription), + NV_RM_CAP_SYS_SMC_CONFIG, + pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_ERROR, "Non-privileged context issued privileged cmd\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } +} + + NV_ASSERT_OR_RETURN(pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED), NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(IS_MIG_IN_USE(pGpu), NV_ERR_INVALID_STATE); + + NV_CHECK_OR_RETURN(LEVEL_SILENT, (pParams->execPartCount <= NVC637_CTRL_MAX_EXEC_PARTITIONS), + NV_ERR_INVALID_ARGUMENT); + + // Check for trivial arguments + NV_CHECK_OR_RETURN(LEVEL_SILENT, pParams->execPartCount > 0, NV_WARN_NOTHING_TO_DO); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, pRmCtrlParams->hClient, + pRmCtrlParams->hObject, pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, status); + + // Only continue if execution partition creation succeeded in the host + NV_ASSERT_OK_OR_RETURN(status); + } + + if (!IS_GSP_CLIENT(pGpu)) + { + KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS request = + { + .type = KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST, + .inst.request.count = pParams->execPartCount, + .inst.request.pReqComputeInstanceInfo = pParams->execPartInfo + }; + + if (hypervisorIsVgxHyper() && + FLD_TEST_REF(NVC637_CTRL_DMA_EXEC_PARTITIONS_CREATE_REQUEST_WITH_PART_ID, _TRUE, pParams->flags)) + { + request.type = KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST_WITH_IDS; + } + + if (IS_VIRTUAL(pGpu)) + { + status = kmigmgrCreateComputeInstances_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, + pParams->bQuery, + request, + pParams->execPartId, + NV_TRUE /* create MIG compute instance capabilities */); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + } + else + { + NvU32 i; + + for (i = 0; i < pParams->execPartCount; i++) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS export; + GPUMGR_SAVE_COMPUTE_INSTANCE save; + KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS restore = + { + .type = KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_RESTORE, + .inst.restore.pComputeInstanceSave = &save, + }; + portMemSet(&export, 0, sizeof(export)); + export.id = pParams->execPartId[i]; + + // Retrieve the CI state created by GSP-RM, then restore it to CPU-RM + NV_ASSERT_OK_OR_RETURN( + pRmApi->Control(pRmApi, + pKernelMIGGpuInstance->instanceHandles.hClient, + pKernelMIGGpuInstance->instanceHandles.hSubscription, + NVC637_CTRL_CMD_EXEC_PARTITIONS_EXPORT, + &export, + sizeof(export))); + + portMemSet(&save, 0, sizeof(save)); + save.bValid = NV_TRUE; + save.id = pParams->execPartId[i]; + save.ciInfo = export.info; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrCreateComputeInstances_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, + NV_FALSE, restore, &pParams->execPartId[i], NV_TRUE)); + } + } + + // + // Generate a subdevice event stating something has changed in GPU instance + // config. Clients currently do not care about changes and their scope + // + if (!pParams->bQuery) + { + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_SMC_CONFIG_UPDATE, NULL, + 0, 0, 0); + } + + return status; +} + +// +// gisubscriptionCtrlCmdExecPartitionsDelete +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +gisubscriptionCtrlCmdExecPartitionsDelete_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription, + NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pGPUInstanceSubscription); + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = pGPUInstanceSubscription->pKernelMIGGpuInstance; + NvU32 execPartIdx; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (!rmclientIsCapableOrAdminByHandle(RES_GET_CLIENT_HANDLE(pGPUInstanceSubscription), + NV_RM_CAP_SYS_SMC_CONFIG, + pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_ERROR, "Non-privileged context issued privileged cmd\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } +} + + NV_ASSERT_OR_RETURN(pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED), + NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(IS_MIG_IN_USE(pGpu), NV_ERR_INVALID_STATE); + + NV_CHECK_OR_RETURN(LEVEL_SILENT, pParams->execPartCount <= NVC637_CTRL_MAX_EXEC_PARTITIONS, + NV_ERR_INVALID_ARGUMENT); + + // Check for trivial arguments + NV_CHECK_OR_RETURN(LEVEL_SILENT, pParams->execPartCount > 0, NV_WARN_NOTHING_TO_DO); + + // Check that the passed indices are valid compute instances + for (execPartIdx = 0; execPartIdx < pParams->execPartCount; ++execPartIdx) + { + NvU32 execPartId = pParams->execPartId[execPartIdx]; + NV_CHECK_OR_RETURN(LEVEL_ERROR, + execPartId < KMIGMGR_MAX_COMPUTE_INSTANCES, + NV_ERR_INVALID_ARGUMENT); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + pKernelMIGGpuInstance->MIGComputeInstance[execPartId].bValid, + NV_ERR_INVALID_ARGUMENT); + } + + for (execPartIdx = 0; execPartIdx < pParams->execPartCount; ++execPartIdx) + { + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrDeleteComputeInstance(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, + pParams->execPartId[execPartIdx], + NV_FALSE)); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + } + + // + // Generate a subdevice event stating something has changed in GPU instance + // config. Clients currently do not care about changes and their scope + // + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_SMC_CONFIG_UPDATE, NULL, 0, 0, 0); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, pRmCtrlParams->hClient, + pRmCtrlParams->hObject, pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, status); + + NV_ASSERT_OK_OR_RETURN(status); + } + + return status; +} + +// +// gisubscriptionCtrlCmdExecPartitionsGet +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +gisubscriptionCtrlCmdExecPartitionsGet_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription, + NVC637_CTRL_EXEC_PARTITIONS_GET_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pGPUInstanceSubscription); + ComputeInstanceSubscription *pComputeInstanceSubscription = NULL; + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = pGPUInstanceSubscription->pKernelMIGGpuInstance; + NvU32 ciIdx; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pGPUInstanceSubscription); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + NvBool bEnumerateAll = rmclientIsCapableOrAdminByHandle(hClient, + NV_RM_CAP_SYS_SMC_CONFIG, + pCallContext->secInfo.privLevel); + + MIG_COMPUTE_INSTANCE *pTargetComputeInstanceInfo = NULL; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_ASSERT_OR_RETURN(pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED), + NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(IS_MIG_IN_USE(pGpu), NV_ERR_INVALID_STATE); + + (void)cisubscriptionGetComputeInstanceSubscription(RES_GET_CLIENT(pGPUInstanceSubscription), RES_GET_HANDLE(pGPUInstanceSubscription), &pComputeInstanceSubscription); + if (pComputeInstanceSubscription != NULL) + { + bEnumerateAll = NV_FALSE; + pTargetComputeInstanceInfo = pComputeInstanceSubscription->pMIGComputeInstance; + } + else if (!bEnumerateAll) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + pParams->execPartCount = 0; + for (ciIdx = 0; + ciIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); + ++ciIdx) + { + NVC637_CTRL_EXEC_PARTITIONS_INFO *pOutInfo; + MIG_COMPUTE_INSTANCE *pMIGComputeInstance = + &pKernelMIGGpuInstance->MIGComputeInstance[ciIdx]; + + if (!pMIGComputeInstance->bValid) + continue; + + if (!bEnumerateAll && (pMIGComputeInstance != pTargetComputeInstanceInfo)) + continue; + + pParams->execPartId[pParams->execPartCount] = ciIdx; + pOutInfo = &pParams->execPartInfo[pParams->execPartCount]; + ++pParams->execPartCount; + + pOutInfo->gpcCount = pMIGComputeInstance->resourceAllocation.gpcCount; + pOutInfo->veidCount = pMIGComputeInstance->resourceAllocation.veidCount; + pOutInfo->ceCount = kmigmgrCountEnginesOfType(&pMIGComputeInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_COPY(0)); + pOutInfo->nvEncCount = kmigmgrCountEnginesOfType(&pMIGComputeInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_NVENC(0)); + pOutInfo->nvDecCount = kmigmgrCountEnginesOfType(&pMIGComputeInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_NVDEC(0)); + pOutInfo->nvJpgCount = kmigmgrCountEnginesOfType(&pMIGComputeInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_NVJPG); + pOutInfo->ofaCount = kmigmgrCountEnginesOfType(&pMIGComputeInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_OFA); + pOutInfo->sharedEngFlag = pMIGComputeInstance->sharedEngFlag; + pOutInfo->veidStartOffset = pMIGComputeInstance->resourceAllocation.veidOffset; + } + + return status; +} + +// +// gisubscriptionCtrlCmdExecPartitionsGetActiveIds +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +gisubscriptionCtrlCmdExecPartitionsGetActiveIds_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription, + NVC637_CTRL_EXEC_PARTITIONS_GET_ACTIVE_IDS_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pGPUInstanceSubscription); + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = pGPUInstanceSubscription->pKernelMIGGpuInstance; + NvU32 ciIdx; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_ASSERT_OR_RETURN(pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED), + NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(IS_MIG_IN_USE(pGpu), NV_ERR_INVALID_STATE); + + pParams->execPartCount = 0; + for (ciIdx = 0; + ciIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); + ++ciIdx) + { + MIG_COMPUTE_INSTANCE *pMIGComputeInstance = + &pKernelMIGGpuInstance->MIGComputeInstance[ciIdx]; + + if (!pMIGComputeInstance->bValid) + continue; + + pParams->execPartId[pParams->execPartCount] = ciIdx; + + ct_assert(NV_UUID_LEN == NVC637_UUID_LEN); + ct_assert(NV_UUID_STR_LEN == NVC637_UUID_STR_LEN); + + nvGetSmcUuidString(&pMIGComputeInstance->uuid, + pParams->execPartUuid[pParams->execPartCount].str); + + ++pParams->execPartCount; + } + + return status; +} + +NV_STATUS +gisubscriptionCtrlCmdExecPartitionsExport_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription, + NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pGPUInstanceSubscription); + KERNEL_MIG_GPU_INSTANCE *pGPUInstance = pGPUInstanceSubscription->pKernelMIGGpuInstance; + MIG_COMPUTE_INSTANCE *pMIGComputeInstance; + NvU32 gpcIdx; + + // No partitions to export + if (!IS_MIG_IN_USE(pGpu)) + return NV_ERR_NOT_SUPPORTED; + +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + // An unprivileged client has no use case for import/export + if (!rmclientIsCapableOrAdminByHandle(RES_GET_CLIENT_HANDLE(pGPUInstanceSubscription), + NV_RM_CAP_SYS_SMC_CONFIG, + pCallContext->secInfo.privLevel)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } +} + + if (IS_VIRTUAL(pGpu)) + { + // Guest RM does not support import/export + return NV_ERR_NOT_SUPPORTED; + } + + if (IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV_STATUS status = NV_OK; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + return status; + } + + if (pParams->id >= NV_ARRAY_ELEMENTS(pGPUInstance->MIGComputeInstance)) + return NV_ERR_INVALID_ARGUMENT; + + if (!pGPUInstance->MIGComputeInstance[pParams->id].bValid) + return NV_ERR_OBJECT_NOT_FOUND; + + pMIGComputeInstance = &pGPUInstance->MIGComputeInstance[pParams->id]; + + portMemCopy(pParams->info.uuid, sizeof(pParams->info.uuid), + pMIGComputeInstance->uuid.uuid, sizeof(pMIGComputeInstance->uuid.uuid)); + pParams->info.sharedEngFlags = pMIGComputeInstance->sharedEngFlag; + pParams->info.veidOffset = pMIGComputeInstance->resourceAllocation.veidOffset; + pParams->info.veidCount = pMIGComputeInstance->resourceAllocation.veidCount; + for (gpcIdx = 0; gpcIdx < pMIGComputeInstance->resourceAllocation.gpcCount; ++gpcIdx) + { + pParams->info.gpcMask |= NVBIT32(pMIGComputeInstance->resourceAllocation.gpcIds[gpcIdx]); + } + bitVectorToRaw(&pMIGComputeInstance->resourceAllocation.engines, + pParams->info.enginesMask, sizeof(pParams->info.enginesMask)); + + return NV_OK; +} + +NV_STATUS +gisubscriptionCtrlCmdExecPartitionsImport_IMPL +( + GPUInstanceSubscription *pGPUInstanceSubscription, + NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pGPUInstanceSubscription); + KERNEL_MIG_GPU_INSTANCE *pGPUInstance = pGPUInstanceSubscription->pKernelMIGGpuInstance; + NV_STATUS status = NV_OK; + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED)) + return NV_ERR_NOT_SUPPORTED; + +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + // An unprivileged client has no use case for import/export + if (!rmclientIsCapableOrAdminByHandle(RES_GET_CLIENT_HANDLE(pGPUInstanceSubscription), + NV_RM_CAP_SYS_SMC_CONFIG, + pCallContext->secInfo.privLevel)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } +} + + if (IS_VIRTUAL(pGpu)) + { + // Guest RM does not support import/export + return NV_ERR_NOT_SUPPORTED; + } + + if (IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + if (status != NV_OK) + return status; + } + + { + GPUMGR_SAVE_COMPUTE_INSTANCE save; + KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS restore = + { + .type = KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_RESTORE, + .inst.restore.pComputeInstanceSave = &save, + }; + + portMemSet(&save, 0, sizeof(save)); + save.bValid = NV_TRUE; + save.id = pParams->id; + save.ciInfo = pParams->info; + + if (IS_GSP_CLIENT(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrCreateComputeInstances_HAL(pGpu, pKernelMIGManager, pGPUInstance, NV_FALSE, restore, &pParams->id, NV_FALSE), + cleanup_rpc); + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + } + + return NV_OK; + +cleanup_rpc: + if (IS_GSP_CLIENT(pGpu)) + { + NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS params; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + portMemSet(¶ms, 0, sizeof(params)); + params.execPartCount = 1; + params.execPartId[0] = pParams->id; + + NV_ASSERT_OK( + pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pGPUInstanceSubscription), + RES_GET_HANDLE(pGPUInstanceSubscription), + NVC637_CTRL_CMD_EXEC_PARTITIONS_DELETE, + ¶ms, + sizeof(params))); + } + + return status; +} + +/*! + * @brief Determines whether an object of the given class id is affected by + * gpu/compute instance subscription and should be automatically freed if a + * client unsubscribes from a gpu/compute instance. + */ +NvBool +gisubscriptionShouldClassBeFreedOnUnsubscribe_IMPL +( + NvU32 internalClassId +) +{ + NvBool bShouldFree = NV_TRUE; + + switch (internalClassId) + { + case (classId(Device)): + // fall-through + case (classId(Subdevice)): + // fall-through + case (classId(GPUInstanceSubscription)): + // fall-through + case (classId(ComputeInstanceSubscription)): + bShouldFree = NV_FALSE; + break; + default: + break; + } + + return bShouldFree; +} + +/*! + * @brief Automatically frees client resources which may be affected by + * subscription objects. This is intended to be called on unsubscription. + * + * @see gisubscriptionShouldClassBeFreedOnUnsubscribe + * + * @param[in] pCallContext Call context of client to clean up + */ +void +gisubscriptionCleanupOnUnsubscribe_IMPL +( + CALL_CONTEXT *pCallContext +) +{ + RsResourceRef *pDeviceRef; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + RS_ITERATOR iter; + NvHandle *pHandles; + NvU32 handleCount; + NvU32 i; + + NV_ASSERT_OK( + refFindAncestorOfType(pCallContext->pResourceRef, classId(Device), &pDeviceRef)); + + // Determine the number of handles we need to free + handleCount = 0; + iter = serverutilRefIter(pCallContext->pClient->hClient, + pDeviceRef->hResource, + 0, + RS_ITERATE_DESCENDANTS, + NV_FALSE); + while (clientRefIterNext(iter.pClient, &iter)) + { + RsResourceRef *pResourceRef = iter.pResourceRef; + + if (!gisubscriptionShouldClassBeFreedOnUnsubscribe(pResourceRef->internalClassId)) + continue; + + ++handleCount; + NV_PRINTF(LEVEL_INFO, + "Will be freeing resource class id 0x%x on unsubscription!\n", + pResourceRef->internalClassId); + } + + // If we have nothing to free then bail early + if (handleCount == 0) + goto done; + + // Allocate an array large enough to store the handles we need to free + pHandles = portMemAllocNonPaged(handleCount * sizeof(*pHandles)); + if (NULL == pHandles) + { + NV_ASSERT(0); + goto done; + } + + // Store the handles that we need to free + i = 0; + iter = serverutilRefIter(pCallContext->pClient->hClient, + pDeviceRef->hResource, + 0, + RS_ITERATE_DESCENDANTS, + NV_FALSE); + while (clientRefIterNext(iter.pClient, &iter)) + { + RsResourceRef *pResourceRef = iter.pResourceRef; + + if (!gisubscriptionShouldClassBeFreedOnUnsubscribe(pResourceRef->internalClassId)) + continue; + + NV_ASSERT_OR_GOTO(i < handleCount, cleanup); + pHandles[i++] = pResourceRef->hResource; + } + + // + // Free all of the handles we flagged for deletion. + // Note - some of these resources will free other dependant resources, so + // some of these free calls will do nothing. That's fine for our purposes. + // + NV_ASSERT_OR_GOTO(i == handleCount, cleanup); + for (i = 0; i < handleCount; ++i) + pRmApi->Free(pRmApi, pCallContext->pClient->hClient, pHandles[i]); + +cleanup: + portMemFree(pHandles); + +done: + return; +} + diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c b/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c new file mode 100644 index 000000000..57304bb03 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mig_mgr/kernel_mig_manager.c @@ -0,0 +1,6193 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "kernel/gpu/mig_mgr/compute_instance_subscription.h" +#include "kernel/gpu/mig_mgr/gpu_instance_subscription.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/mem_sys/kern_mem_sys.h" +#include "kernel/gpu/ce/kernel_ce.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/mmu/kern_gmmu.h" +#include "kernel/gpu/mem_mgr/heap.h" +#include "rmapi/client.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "gpu/mem_mgr/mem_scrub.h" +#include "vgpu/rpc.h" +#include "kernel/gpu/gr/kernel_graphics_manager.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/core/locks.h" +#include "nv_ref.h" +#include "nvRmReg.h" + +struct KERNEL_MIG_MANAGER_PRIVATE_DATA +{ + NvBool bInitialized; + KERNEL_MIG_MANAGER_STATIC_INFO staticInfo; +}; + +/*! + * @brief Function to increment gi/ci refcount + */ +NV_STATUS +kmigmgrIncRefCount_IMPL +( + RsShared *pShared +) +{ + NvS32 refCount; + + NV_ASSERT_OR_RETURN(pShared != NULL, NV_ERR_INVALID_ARGUMENT); + + serverRefShare(&g_resServ, pShared); + refCount = serverGetShareRefCount(&g_resServ, pShared); + + // Make sure refCount didn't overflow + NV_ASSERT_OR_RETURN(refCount > 0, NV_ERR_INVALID_STATE); + return NV_OK; +} + +/*! + * @brief Function to decrement gi/ci refcount + */ +NV_STATUS +kmigmgrDecRefCount_IMPL +( + RsShared *pShared +) +{ + NvS32 refCount; + + NV_ASSERT_OR_RETURN(pShared != NULL, NV_ERR_INVALID_ARGUMENT); + + refCount = serverGetShareRefCount(&g_resServ, pShared); + serverFreeShare(&g_resServ, pShared); + --refCount; + + // Make sure refCount didn't underflow + NV_ASSERT_OR_RETURN(refCount > 0, NV_ERR_INVALID_STATE); + return NV_OK; +} + +/*! @brief create a reference to a single GPU instance, no compute instance */ +MIG_INSTANCE_REF +kmigmgrMakeGIReference_IMPL +( + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + MIG_INSTANCE_REF ref = { pKernelMIGGpuInstance, NULL }; + return ref; +} + +/*! @brief create a reference to a compute instance */ +MIG_INSTANCE_REF +kmigmgrMakeCIReference_IMPL +( + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + MIG_COMPUTE_INSTANCE *pMIGComputeInstance +) +{ + MIG_INSTANCE_REF ref = { pKernelMIGGpuInstance, pMIGComputeInstance }; + return ref; +} + +/*! @brief create a Ref referencing no GI/CI */ +MIG_INSTANCE_REF +kmigmgrMakeNoMIGReference_IMPL() +{ + MIG_INSTANCE_REF ref = { NULL, NULL }; + return ref; +} + +/*! @brief check if MIG attribution id is valid for max instances */ +NvBool +kmigmgrIsInstanceAttributionIdValid_IMPL +( + NvU16 id +) +{ + return (((id / KMIGMGR_MAX_GPU_SWIZZID) <= KMIGMGR_MAX_GPU_INSTANCES) && + ((id % KMIGMGR_MAX_GPU_SWIZZID) <= KMIGMGR_MAX_COMPUTE_INSTANCES)); +} + +/*! @brief check if existing valid instance ref is passed in */ +NvBool +kmigmgrIsMIGReferenceValid_IMPL +( + MIG_INSTANCE_REF *pRef +) +{ + // Invalid argument + NV_CHECK_OR_RETURN(LEVEL_SILENT, pRef != NULL, NV_FALSE); + // Invalid argument + NV_CHECK_OR_RETURN(LEVEL_SILENT, !((pRef->pKernelMIGGpuInstance == NULL) && + (pRef->pMIGComputeInstance != NULL)), NV_FALSE); + + NV_CHECK_OR_RETURN(LEVEL_SILENT, pRef->pKernelMIGGpuInstance != NULL, NV_FALSE); + NV_ASSERT_OR_RETURN(pRef->pKernelMIGGpuInstance->bValid, NV_FALSE); + + // If we reached this point, the GPU instance is valid + NV_CHECK_OR_RETURN(LEVEL_SILENT, pRef->pMIGComputeInstance != NULL, NV_TRUE); + NV_ASSERT_OR_RETURN(pRef->pMIGComputeInstance->bValid, NV_FALSE); + + return NV_TRUE; +} + +/*! @brief check if the same instance(s) are passed in; only compare GI if lhs has no CI */ +NvBool +kmigmgrAreMIGReferencesSame_IMPL +( + MIG_INSTANCE_REF *pRefA, + MIG_INSTANCE_REF *pRefB +) +{ + NV_CHECK_OR_RETURN(LEVEL_SILENT, kmigmgrIsMIGReferenceValid(pRefA) && + kmigmgrIsMIGReferenceValid(pRefB), NV_FALSE); + + if ((pRefA->pKernelMIGGpuInstance != pRefB->pKernelMIGGpuInstance) || + ((pRefA->pMIGComputeInstance != NULL) && + (pRefA->pMIGComputeInstance != pRefB->pMIGComputeInstance))) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +/*! + * @brief Count set bits within range indicated by given base type in bitvector + * + * @param[in] pEngines Bitvector to count + * @param[in] engineType 0th index NV2080_ENGINE_TYPE, only partitionable engines supported + */ +NvU32 +kmigmgrCountEnginesOfType_IMPL +( + const ENGTYPE_BIT_VECTOR *pEngines, + NvU32 engineType +) +{ + NV_RANGE range = rangeMake(engineType, engineType); + ENGTYPE_BIT_VECTOR mask; + + if (pEngines == NULL) + return 0; + + if (!NV2080_ENGINE_TYPE_IS_VALID(engineType)) + return 0; + + if (NV2080_ENGINE_TYPE_IS_GR(engineType)) + range = NV2080_ENGINE_RANGE_GR(); + else if (NV2080_ENGINE_TYPE_IS_COPY(engineType)) + range = NV2080_ENGINE_RANGE_COPY(); + else if (NV2080_ENGINE_TYPE_IS_NVDEC(engineType)) + range = NV2080_ENGINE_RANGE_NVDEC(); + else if (NV2080_ENGINE_TYPE_IS_NVENC(engineType)) + range = NV2080_ENGINE_RANGE_NVENC(); + else if (NV2080_ENGINE_TYPE_IS_NVJPEG(engineType)) + range = NV2080_ENGINE_RANGE_NVJPEG(); + + bitVectorClrAll(&mask); + bitVectorSetRange(&mask, range); + bitVectorAnd(&mask, &mask, pEngines); + return bitVectorCountSetBits(&mask); +} + +/*! + * @brief Calculate the attribution ID for the given MIG instance reference. + * + * @note the attribution ID is an encoding of gpu/compute instance IDs dependent + * upon the maximum values of these IDs which must be queried by the + * recipient in order to decode. Attribution values for NULL or lone + * GPU instances will produce non-zero attribution IDs which will decode to + * out-of-range values for both IDs. + * + * @param[in] ref Reference to a Gi/CI + * + * @return the encoded attribution ID + */ +NvU16 +kmigmgrGetAttributionIdFromMIGReference_IMPL +( + MIG_INSTANCE_REF ref +) +{ + NvU16 giID = KMIGMGR_MAX_GPU_SWIZZID; + NvU16 ciID = KMIGMGR_MAX_COMPUTE_INSTANCES; + + // + // Inverting this encoding depends upon the compute instance IDs having a + // shorter range than the gpu instance IDs, otherwise high compute instance + // IDs will cause aliasing + // + ct_assert(KMIGMGR_MAX_COMPUTE_INSTANCES < KMIGMGR_MAX_GPU_SWIZZID); + + // We are also depending on this encoding fitting in 16 bits... + ct_assert((KMIGMGR_MAX_GPU_SWIZZID * KMIGMGR_MAX_COMPUTE_INSTANCES) <= NV_U16_MAX); + + if (kmigmgrIsMIGReferenceValid(&ref) && + (ref.pKernelMIGGpuInstance->swizzId < KMIGMGR_MAX_GPU_SWIZZID)) + { + giID = (NvU16)ref.pKernelMIGGpuInstance->swizzId; + if ((ref.pMIGComputeInstance != NULL) && + (ref.pMIGComputeInstance->id < KMIGMGR_MAX_COMPUTE_INSTANCES)) + { + ciID = (NvU16)ref.pMIGComputeInstance->id; + } + } + + return (giID * KMIGMGR_MAX_GPU_SWIZZID) + ciID; +} + +/*! + * @brief Function to convert an engine type from one bitvector to a + * corresponding engine type in another bitvector. The two bitvectors + * are expected to have the same set bit count. + */ +NV_STATUS +kmigmgrEngineTypeXlate_IMPL +( + ENGTYPE_BIT_VECTOR *pSrc, + NvU32 srcEngineType, + ENGTYPE_BIT_VECTOR *pDst, + NvU32 *pDstEngineType +) +{ + NvU32 tempSrcEngineType; + NvU32 tempDstEngineType; + NvBool bFound; + + NV_ASSERT_OR_RETURN(pSrc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDst != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pDstEngineType != NULL, NV_ERR_INVALID_ARGUMENT); + + if (!bitVectorTest(pSrc, srcEngineType)) + return NV_ERR_OBJECT_NOT_FOUND; + + // Iterate over both masks at the same time + bFound = NV_FALSE; + FOR_EACH_IN_BITVECTOR_PAIR(pSrc, tempSrcEngineType, pDst, tempDstEngineType) + { + bFound = (srcEngineType == tempSrcEngineType); + if (bFound) + break; + } + FOR_EACH_IN_BITVECTOR_PAIR_END(); + + // We already checked that the engine is present above, this should never fire + NV_ASSERT(bFound); + + *pDstEngineType = tempDstEngineType; + + return NV_OK; +} + +// +// below algorithm depends on contiguity of all partitionable engine values +// in NV2080_ENGINE_TYPE, so add asserts here. +// Note - this only checks the first and last ID, a proper check would account +// for all entries, but that's not possible at this time. +// +ct_assert((NV2080_ENGINE_TYPE_GR(NV2080_ENGINE_TYPE_GR_SIZE - 1) - + NV2080_ENGINE_TYPE_GR(0)) == (NV2080_ENGINE_TYPE_GR_SIZE - 1)); +ct_assert((NV2080_ENGINE_TYPE_COPY(NV2080_ENGINE_TYPE_COPY_SIZE - 1) - + NV2080_ENGINE_TYPE_COPY(0)) == (NV2080_ENGINE_TYPE_COPY_SIZE - 1)); +ct_assert((NV2080_ENGINE_TYPE_NVDEC(NV2080_ENGINE_TYPE_NVDEC_SIZE - 1) - + NV2080_ENGINE_TYPE_NVDEC(0)) == (NV2080_ENGINE_TYPE_NVDEC_SIZE - 1)); +ct_assert((NV2080_ENGINE_TYPE_NVENC(NV2080_ENGINE_TYPE_NVENC_SIZE - 1) - + NV2080_ENGINE_TYPE_NVENC(0)) == (NV2080_ENGINE_TYPE_NVENC_SIZE - 1)); + +/*! + * @brief Chooses the engines of the given type to allocate. Supports + * shared/exclusive ownership arbitration. + * + * @param[IN] pSourceEngines Mask of engines to allocate from + * @param[IN} bShared NV_TRUE if engines should be shared + * @param[IN] engTypeRange NV_RANGE of bit indices for this eng type + * @param[IN] regEngCount Requested number of engines in this exec part + * @param[I/O] pOutEngines Mask of engines already/newly allocated + * @param[I/O] pExclusiveEngines Mask of already exclusively-allocated engines + * @param[I/O] pSharedEngines Mask of engines shared by other instances + */ +NV_STATUS +kmigmgrAllocateInstanceEngines_IMPL +( + ENGTYPE_BIT_VECTOR *pSourceEngines, + NvBool bShared, + NV_RANGE engTypeRange, + NvU32 reqEngCount, + ENGTYPE_BIT_VECTOR *pOutEngines, + ENGTYPE_BIT_VECTOR *pExclusiveEngines, + ENGTYPE_BIT_VECTOR *pSharedEngines +) +{ + NvU32 allocated = 0; + ENGTYPE_BIT_VECTOR engines; + NvU32 engineType; + NvU32 localIdx; + + // If using shared engines, allocate as many from existing shared engines as possible + if (bShared) + { + bitVectorClrAll(&engines); + bitVectorSetRange(&engines, engTypeRange); + bitVectorAnd(&engines, &engines, pSourceEngines); + localIdx = 0; + FOR_EACH_IN_BITVECTOR(&engines, engineType) + { + if (allocated == reqEngCount) + break; + + // Skip engines that aren't in the shared pool already + if (!bitVectorTest(pSharedEngines, engineType)) + { + localIdx++; + continue; + } + + // assign the engine + bitVectorSet(pOutEngines, engTypeRange.lo + localIdx); + + localIdx++; + allocated++; + } + FOR_EACH_IN_BITVECTOR_END(); + } + + // Allocate the rest from the free pool + bitVectorClrAll(&engines); + bitVectorSetRange(&engines, engTypeRange); + bitVectorAnd(&engines, &engines, pSourceEngines); + localIdx = 0; + FOR_EACH_IN_BITVECTOR(&engines, engineType) + { + if (allocated == reqEngCount) + break; + + // Skip in-use engines + if (bitVectorTest(pSharedEngines, engineType) || + bitVectorTest(pExclusiveEngines, engineType)) + { + localIdx++; + continue; + } + + // Add the engine to the appropriate in-use pool + bitVectorSet((bShared ? pSharedEngines : pExclusiveEngines), engineType); + + // Assign the engine + bitVectorSet(pOutEngines, engTypeRange.lo + localIdx); + + localIdx++; + allocated++; + } + FOR_EACH_IN_BITVECTOR_END(); + + NV_CHECK_OR_RETURN(LEVEL_SILENT, allocated == reqEngCount, NV_ERR_INSUFFICIENT_RESOURCES); + return NV_OK; +} + +/*! + * @brief Convert global/physical engine mask to logical/local (no-hole) mask + * + * @param[in] pPhysicalEngineMask Bitvector storing physical mask + * @param[in] pLocalEngineMask Bitvector storing local mask + */ +void +kmigmgrGetLocalEngineMask_IMPL +( + ENGTYPE_BIT_VECTOR *pPhysicalEngineMask, + ENGTYPE_BIT_VECTOR *pLocalEngineMask +) +{ + NV_RANGE range; + NvU32 count; + bitVectorClrAll(pLocalEngineMask); + + count = kmigmgrCountEnginesOfType(pPhysicalEngineMask, NV2080_ENGINE_TYPE_GR(0)); + if (count > 0) + { + range = rangeMake(NV2080_ENGINE_TYPE_GR(0), NV2080_ENGINE_TYPE_GR(count - 1)); + bitVectorSetRange(pLocalEngineMask, range); + } + + count = kmigmgrCountEnginesOfType(pPhysicalEngineMask, NV2080_ENGINE_TYPE_COPY(0)); + if (count > 0) + { + range = rangeMake(NV2080_ENGINE_TYPE_COPY(0), NV2080_ENGINE_TYPE_COPY(count - 1)); + bitVectorSetRange(pLocalEngineMask, range); + } + + count = kmigmgrCountEnginesOfType(pPhysicalEngineMask, NV2080_ENGINE_TYPE_NVDEC(0)); + if (count > 0) + { + range = rangeMake(NV2080_ENGINE_TYPE_NVDEC(0), NV2080_ENGINE_TYPE_NVDEC(count - 1)); + bitVectorSetRange(pLocalEngineMask, range); + } + + count = kmigmgrCountEnginesOfType(pPhysicalEngineMask, NV2080_ENGINE_TYPE_NVENC(0)); + if (count > 0) + { + range = rangeMake(NV2080_ENGINE_TYPE_NVENC(0), NV2080_ENGINE_TYPE_NVENC(count - 1)); + bitVectorSetRange(pLocalEngineMask, range); + } + + count = kmigmgrCountEnginesOfType(pPhysicalEngineMask, NV2080_ENGINE_TYPE_NVJPEG(0)); + if (count > 0) + { + range = rangeMake(NV2080_ENGINE_TYPE_NVJPEG(0), NV2080_ENGINE_TYPE_NVJPEG(count - 1)); + bitVectorSetRange(pLocalEngineMask, range); + } + + count = kmigmgrCountEnginesOfType(pPhysicalEngineMask, NV2080_ENGINE_TYPE_OFA); + if (count > 0) + bitVectorSet(pLocalEngineMask, NV2080_ENGINE_TYPE_OFA); +} + +/*! + * @brief Create client and subdevice handles to make calls into this gpu instance + */ +NV_STATUS +kmigmgrAllocGPUInstanceHandles_IMPL +( + OBJGPU *pGpu, + NvU32 swizzId, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hSubscription = NV01_NULL_OBJECT; + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubdevice; + NVC637_ALLOCATION_PARAMETERS params; + + NV_ASSERT_OK_OR_RETURN( + rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, &hClient, &hDevice, &hSubdevice)); + + portMemSet(¶ms, 0, sizeof(params)); + params.swizzId = swizzId; + NV_ASSERT_OK_OR_RETURN( + pRmApi->Alloc(pRmApi, hClient, hSubdevice, &hSubscription, AMPERE_SMC_PARTITION_REF, ¶ms)); + + pKernelMIGGpuInstance->instanceHandles.hClient = hClient; + pKernelMIGGpuInstance->instanceHandles.hDevice = hDevice; + pKernelMIGGpuInstance->instanceHandles.hSubdevice = hSubdevice; + pKernelMIGGpuInstance->instanceHandles.hSubscription = hSubscription; + + return NV_OK; +} + +/*! + * @brief Delete created gpu instance handles if they exist + */ +void +kmigmgrFreeGPUInstanceHandles_IMPL +( + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + if (pKernelMIGGpuInstance->instanceHandles.hClient != NV01_NULL_OBJECT) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + pRmApi->Free(pRmApi, pKernelMIGGpuInstance->instanceHandles.hClient, pKernelMIGGpuInstance->instanceHandles.hClient); + pKernelMIGGpuInstance->instanceHandles.hClient = NV01_NULL_OBJECT; + pKernelMIGGpuInstance->instanceHandles.hDevice = NV01_NULL_OBJECT; + pKernelMIGGpuInstance->instanceHandles.hSubdevice = NV01_NULL_OBJECT; + pKernelMIGGpuInstance->instanceHandles.hSubscription = NV01_NULL_OBJECT; + } +} + +/*! + * @brief Checks if all references to gpu instance are internal + */ +NvBool +kmigmgrIsGPUInstanceReadyToBeDestroyed_IMPL +( + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + NvS32 targetRefCount; + NvS32 actualRefCount; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, pKernelMIGGpuInstance->pShare != NULL, NV_TRUE); + + // + // Initial refCount is increased to "1" when gpu instance is created and then + // every subscription by a client should increase the refcount + // + targetRefCount = 1; + + // A client handle is allocated to support internal GR Routing + if (pKernelMIGGpuInstance->instanceHandles.hClient != NV01_NULL_OBJECT) + targetRefCount++; + + // + // GPU instance scrubber is initialized during gpu instance creation and deleted + // when gpu instance is invalidated, and subscribes to the gpu instance, so must + // be accounted for in the target ref count + // + if (pKernelMIGGpuInstance->bMemoryPartitionScrubberInitialized) + targetRefCount++; + + actualRefCount = serverGetShareRefCount(&g_resServ, pKernelMIGGpuInstance->pShare); + if (actualRefCount > targetRefCount) + return NV_FALSE; + + // Mismatch here indicates programming error + NV_ASSERT(actualRefCount == targetRefCount); + return NV_TRUE; +} + +NV_STATUS +kmigmgrConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + ENGDESCRIPTOR engDesc +) +{ + NvU32 GIIdx; + KERNEL_MIG_MANAGER_PRIVATE_DATA *pPrivate; + + pKernelMIGManager->bMIGEnabled = NV_FALSE; + pKernelMIGManager->swizzIdInUseMask = 0x0; + + pPrivate = portMemAllocNonPaged(sizeof(*pPrivate)); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pPrivate != NULL, NV_ERR_NO_MEMORY); + portMemSet(pPrivate, 0, sizeof(*pPrivate)); + pKernelMIGManager->pPrivate = pPrivate; + + for (GIIdx = 0; GIIdx < NV_ARRAY_ELEMENTS(pKernelMIGManager->kernelMIGGpuInstance); ++GIIdx) + { + kmigmgrInitGPUInstanceInfo(pGpu, pKernelMIGManager, + &pKernelMIGManager->kernelMIGGpuInstance[GIIdx]); + } + + kmigmgrInitRegistryOverrides(pGpu, pKernelMIGManager); + + return NV_OK; +} + +void +kmigmgrDestruct_IMPL +( + KernelMIGManager *pKernelMIGManager +) +{ + NvU32 GIIdx; + NvU32 CIIdx; + + portMemFree(pKernelMIGManager->pPrivate->staticInfo.pPartitionableEngines); + pKernelMIGManager->pPrivate->staticInfo.pPartitionableEngines = NULL; + portMemFree(pKernelMIGManager->pPrivate->staticInfo.pProfiles); + pKernelMIGManager->pPrivate->staticInfo.pProfiles = NULL; + portMemFree(pKernelMIGManager->pPrivate->staticInfo.pSwizzIdFbMemPageRanges); + pKernelMIGManager->pPrivate->staticInfo.pSwizzIdFbMemPageRanges = NULL; + + portMemFree(pKernelMIGManager->pPrivate); + pKernelMIGManager->pPrivate = NULL; + + for (GIIdx = 0; GIIdx < NV_ARRAY_ELEMENTS(pKernelMIGManager->kernelMIGGpuInstance); ++GIIdx) + { + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = &pKernelMIGManager->kernelMIGGpuInstance[GIIdx]; + + // Shouldn't have any valid GPU instance + if (pKernelMIGGpuInstance->bValid) + { + NV_PRINTF(LEVEL_ERROR, + "Deleting valid GPU instance with swizzId - %d. Should have been deleted before shutdown!\n", + pKernelMIGGpuInstance->swizzId); + } + + for (CIIdx = 0; + CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); + ++CIIdx) + { + MIG_COMPUTE_INSTANCE *pMIGComputeInstance = &pKernelMIGGpuInstance->MIGComputeInstance[CIIdx]; + + // Shouldn't have any valid compute instance + if (pMIGComputeInstance->bValid) + { + NV_PRINTF(LEVEL_ERROR, + "Deleting valid compute instance - %d. Should have been deleted before shutdown!\n", + CIIdx); + } + } + } +} + +/*! + * @brief Handle KMIGMGR init which must occur after GPU post load. + * + * @param[in] pGpu + * @param[in] pUnusedData Unused callback data + */ +static NV_STATUS +_kmigmgrHandlePostSchedulingEnableCallback +( + OBJGPU *pGpu, + void *pUnusedData +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + if (!IS_VIRTUAL(pGpu)) + { + NvBool bTopLevelScrubberEnabled = NV_FALSE; + NvBool bTopLevelScrubberConstructed = NV_FALSE; + + memmgrGetTopLevelScrubberStatus(pGpu, pMemoryManager, + &bTopLevelScrubberEnabled, &bTopLevelScrubberConstructed); + + // + // This callback is handled as part of the same routine that triggers + // scrubber initialization. Unfortunately this callback depends on the + // scrubber being initialized first, and we cannot enforce that the scrubber + // callback always goes first. However, the trigger routine does support a + // retry mechanism that will allow us to get called back after all of the + // other callbacks in the list are completed. We signal for retry by + // returning NV_WARN_MORE_PROCESSING_REQUIRED if the scrubber is enabled but + // hasn't been intialized yet. The warning will be quashed on the first + // attempt, but will then be reported and trigger initialization failure if + // it happens again on the retry. + // + // Bug: 2997744, skipping the check here because top level scrubber creation is dealyed until + // GPU instances are created in MIG enabled guest + // + NV_CHECK_OR_RETURN(LEVEL_SILENT, + !bTopLevelScrubberEnabled || bTopLevelScrubberConstructed, + NV_WARN_MORE_PROCESSING_REQUIRED); + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memmgrSetPartitionableMem_HAL(pGpu, pMemoryManager)); + + if (IS_MIG_ENABLED(pGpu)) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + + // + // Populate static GPU instance memory config which will be used to manage + // GPU instance memory + // + NV_ASSERT_OK_OR_RETURN(kmemsysPopulateMIGGPUInstanceMemConfig_HAL(pGpu, pKernelMemorySystem)); + + // Initialize static info derived from physical RM + NV_ASSERT_OK_OR_RETURN(kmigmgrLoadStaticInfo_HAL(pGpu, pKernelMIGManager)); + + // KERNEL_ONLY variants require static info to detect reduced configs + kmigmgrDetectReducedConfig_HAL(pGpu, pKernelMIGManager); + } + + NV_ASSERT_OK(kmigmgrRestoreFromPersistence_HAL(pGpu, pKernelMIGManager)); + + return NV_OK; +} + +static NV_STATUS _kmigmgrHandlePreSchedulingDisableCallback +( + OBJGPU *pGpu, + void *pUnusedData +) +{ + NvU32 GIIdx; + NvU32 CIIdx; + NV_STATUS rmStatus = NV_OK; + NvBool bDisable = NV_FALSE; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + for (GIIdx = 0; GIIdx < NV_ARRAY_ELEMENTS(pKernelMIGManager->kernelMIGGpuInstance); ++GIIdx) + { + if (pKernelMIGManager->kernelMIGGpuInstance[GIIdx].bValid) + { + kmigmgrDestroyGPUInstanceScrubber(pGpu, pKernelMIGManager, &pKernelMIGManager->kernelMIGGpuInstance[GIIdx]); + } + } + + if (IS_VIRTUAL(pGpu) && kmigmgrUseLegacyVgpuPolicy(pGpu, pKernelMIGManager)) + return NV_OK; + + // + // Update persistent instance topology so that we can recreate it on next + // GPU attach. + // + NV_ASSERT_OK(kmigmgrSaveToPersistence(pGpu, pKernelMIGManager)); + + if (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) + return NV_OK; + + for (GIIdx = 0; GIIdx < NV_ARRAY_ELEMENTS(pKernelMIGManager->kernelMIGGpuInstance); ++GIIdx) + { + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = &pKernelMIGManager->kernelMIGGpuInstance[GIIdx]; + NvU32 swizzId; + + // Skip invalid gpu instances + if (!pKernelMIGGpuInstance->bValid) + continue; + + swizzId = pKernelMIGGpuInstance->swizzId; + + // Shouldn't be any valid gpu instances + NV_PRINTF(LEVEL_ERROR, + "Invalidating valid gpu instance with swizzId = %d\n", + swizzId); + + for (CIIdx = 0; + CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); + ++CIIdx) + { + MIG_COMPUTE_INSTANCE *pMIGComputeInstance = + &pKernelMIGGpuInstance->MIGComputeInstance[CIIdx]; + + // Skip invalid compute instances + if (!pMIGComputeInstance->bValid) + continue; + + // Shouldn't be any valid compute instances + NV_PRINTF(LEVEL_ERROR, + "Invalidating valid compute instance with id = %d\n", + CIIdx); + + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kmigmgrDeleteComputeInstance(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, CIIdx, NV_TRUE)); + + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NVC637_CTRL_EXEC_PARTITIONS_DELETE_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.execPartCount = 1; + params.execPartId[0] = CIIdx; + + NV_ASSERT_OK( + pRmApi->Control(pRmApi, + pKernelMIGGpuInstance->instanceHandles.hClient, + pKernelMIGGpuInstance->instanceHandles.hSubscription, + NVC637_CTRL_CMD_EXEC_PARTITIONS_DELETE, + ¶ms, + sizeof(params))); + } + } + + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kmigmgrInvalidateGPUInstance(pGpu, pKernelMIGManager, swizzId, NV_TRUE)); + + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.partitionCount = 1; + params.partitionInfo[0].bValid = NV_FALSE; + params.partitionInfo[0].swizzId = swizzId; + + NV_ASSERT_OK( + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_GPU_INSTANCES, + ¶ms, + sizeof(params))); + } + + // There was an active gpu instance, we need to disable MIG later + bDisable = NV_TRUE; + } + + // Disable MIG + if (pKernelMIGManager->swizzIdInUseMask != 0x0) + { + NV_ASSERT(0); + NV_PRINTF(LEVEL_ERROR, "leaked swizzid mask 0x%llx !!\n", pKernelMIGManager->swizzIdInUseMask); + } + + if (bDisable) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kmigmgrSetMIGState(pGpu, pKernelMIGManager, NV_TRUE, NV_FALSE, NV_TRUE)); + } + + return NV_OK; +} + +NV_STATUS +kmigmgrStateInitLocked_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + // + // Configure MIG Mode based on devinit's determination of MIG enable + // preconditions being met or not. Devinit will set SW_SCRATCH bit if MIG + // mode was requested and was able to be supported / enabled. + // + if (kmigmgrIsDevinitMIGBitSet_HAL(pGpu, pKernelMIGManager)) + pKernelMIGManager->bMIGEnabled = NV_TRUE; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, kmigmgrIsMIGSupported(pGpu, pKernelMIGManager), NV_OK); + + // Setup a callback to initialize state at the very end of GPU post load + NV_ASSERT_OK( + kfifoAddSchedulingHandler(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + _kmigmgrHandlePostSchedulingEnableCallback, NULL, + _kmigmgrHandlePreSchedulingDisableCallback, NULL)); + + return NV_OK; +} + +/*! State unload */ +NV_STATUS +kmigmgrStateUnload_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 flags +) +{ + kmigmgrClearStaticInfo_HAL(pGpu, pKernelMIGManager); + + // Nothing to do if MIG is not supported + NV_CHECK_OR_RETURN(LEVEL_SILENT, kmigmgrIsMIGSupported(pGpu, pKernelMIGManager), NV_OK); + + kfifoRemoveSchedulingHandler(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + _kmigmgrHandlePostSchedulingEnableCallback, NULL, + _kmigmgrHandlePreSchedulingDisableCallback, NULL); + + return NV_OK; +} + +/*! Init registry overrides */ +void +kmigmgrInitRegistryOverrides_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ +} + +/** + * @brief Retrieve data block for GPU instance at given slot + */ +KERNEL_MIG_GPU_INSTANCE * +kmigmgrGetMIGGpuInstanceSlot_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 i +) +{ + NV_ASSERT_OR_RETURN(i < NV_ARRAY_ELEMENTS(pKernelMIGManager->kernelMIGGpuInstance), NULL); + return &pKernelMIGManager->kernelMIGGpuInstance[i]; +} + +/** + * @brief Returns true if MIG is supported. + * Also MIG is not supported on platforms that support ATS over NVLink. + */ +NvBool +kmigmgrIsMIGSupported_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + // MIG is not supported on platforms that support ATS over NVLink. + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)) + { + NV_ASSERT(!pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED)); + return NV_FALSE; + } + + return pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED); +} + +/*! + * @brief Determines if MIG is enabled in supported system or not + */ +NvBool +kmigmgrIsMIGEnabled_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + return kmigmgrIsMIGSupported(pGpu, pKernelMIGManager) && pKernelMIGManager->bMIGEnabled; +} + +/*! + * @brief Determines if MIG GPU instancing is enabled + */ +NvBool +kmigmgrIsMIGGpuInstancingEnabled_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + return (IS_MIG_ENABLED(pGpu) && + (pKernelMIGManager->swizzIdInUseMask != 0)); +} + +/*! + * @brief Determines if MIG memory partitioning is enabled + */ +NvBool +kmigmgrIsMIGMemPartitioningEnabled_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + NvU32 swizzId; + + if (!IS_MIG_IN_USE(pGpu)) + { + return NV_FALSE; + } + + FOR_EACH_INDEX_IN_MASK(64, swizzId, pKernelMIGManager->swizzIdInUseMask) + { + if (kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, swizzId)) + { + return NV_TRUE; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return NV_FALSE; +} + +/*! + * @brief Determines if NvLink and P2P are compatible with MIG + */ +NvBool +kmigmgrIsMIGNvlinkP2PSupported_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + // + // No need to make decision based on any override if MIG is not supported/enabled + // on a specific chip + // + if (!IS_MIG_ENABLED(pGpu)) + { + return NV_TRUE; + } + + // MIG+NVLINK not supported by default + return NV_FALSE; +} + +/*! Retrieve immutable static data */ +const KERNEL_MIG_MANAGER_STATIC_INFO * +kmigmgrGetStaticInfo_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + KERNEL_MIG_MANAGER_PRIVATE_DATA *pPrivate = (KERNEL_MIG_MANAGER_PRIVATE_DATA *)pKernelMIGManager->pPrivate; + return ((pPrivate != NULL) && pPrivate->bInitialized) ? &pPrivate->staticInfo : NULL; +} + +/*! Initialize static information queried from Physical RM */ +NV_STATUS +kmigmgrLoadStaticInfo_KERNEL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + KERNEL_MIG_MANAGER_PRIVATE_DATA *pPrivate = (KERNEL_MIG_MANAGER_PRIVATE_DATA *)pKernelMIGManager->pPrivate; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pPrivate != NULL, NV_ERR_INVALID_STATE); + + if (pPrivate->bInitialized) + return NV_OK; + + // + // HACK + // Some of the static data implementations depend on other static data. We + // must publish early to make the data accessible as it becomes available. + // + pPrivate->bInitialized = NV_TRUE; + + pPrivate->staticInfo.pPartitionableEngines = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pPartitionableEngines)); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + pPrivate->staticInfo.pPartitionableEngines != NULL, + status = NV_ERR_NO_MEMORY; + goto failed;); + portMemSet(pPrivate->staticInfo.pPartitionableEngines, 0x0, sizeof(*pPrivate->staticInfo.pPartitionableEngines)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PARTITIONABLE_ENGINES, + pPrivate->staticInfo.pPartitionableEngines, + sizeof(*pPrivate->staticInfo.pPartitionableEngines)), + failed); + + pPrivate->staticInfo.pProfiles = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pProfiles)); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + pPrivate->staticInfo.pProfiles != NULL, + status = NV_ERR_NO_MEMORY; + goto failed;); + portMemSet(pPrivate->staticInfo.pProfiles, 0x0, sizeof(*pPrivate->staticInfo.pProfiles)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_PROFILES, + pPrivate->staticInfo.pProfiles, + sizeof(*pPrivate->staticInfo.pProfiles)), + failed); + + pPrivate->staticInfo.pSwizzIdFbMemPageRanges = portMemAllocNonPaged(sizeof(*pPrivate->staticInfo.pSwizzIdFbMemPageRanges)); + NV_CHECK_OR_ELSE(LEVEL_ERROR, + pPrivate->staticInfo.pSwizzIdFbMemPageRanges != NULL, + status = NV_ERR_NO_MEMORY; + goto failed;); + portMemSet(pPrivate->staticInfo.pSwizzIdFbMemPageRanges, 0x0, sizeof(*pPrivate->staticInfo.pSwizzIdFbMemPageRanges)); + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_STATIC_KMIGMGR_GET_SWIZZ_ID_FB_MEM_PAGE_RANGES, + pPrivate->staticInfo.pSwizzIdFbMemPageRanges, + sizeof(*pPrivate->staticInfo.pSwizzIdFbMemPageRanges)); + + if (status == NV_ERR_NOT_SUPPORTED) + { + // Only supported on specific GPU's + status = NV_OK; + portMemFree(pPrivate->staticInfo.pSwizzIdFbMemPageRanges); + pPrivate->staticInfo.pSwizzIdFbMemPageRanges = NULL; + } + else if (status != NV_OK) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, status, failed); + } + + return status; + +failed: + portMemFree(pPrivate->staticInfo.pPartitionableEngines); + pPrivate->staticInfo.pPartitionableEngines = NULL; + portMemFree(pPrivate->staticInfo.pProfiles); + pPrivate->staticInfo.pProfiles = NULL; + portMemFree(pPrivate->staticInfo.pSwizzIdFbMemPageRanges); + pPrivate->staticInfo.pSwizzIdFbMemPageRanges = NULL; + + pPrivate->bInitialized = NV_FALSE; + + return status; +} + +/*! + * @brief Clears Static information set for vGPU + */ +void +kmigmgrClearStaticInfo_VF +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + NvU32 i; + + // Nothing to do + if (!kmigmgrUseLegacyVgpuPolicy(pGpu, pKernelMIGManager)) + return; + + for (i = 0; i < KMIGMGR_MAX_GPU_INSTANCES; ++i) + { + if (pKernelMIGManager->kernelMIGGpuInstance[i].pShare != NULL) + { + serverFreeShare(&g_resServ, pKernelMIGManager->kernelMIGGpuInstance[i].pShare); + pKernelMIGManager->kernelMIGGpuInstance[i].pShare = NULL; + } + + kmigmgrInitGPUInstanceInfo(pGpu, pKernelMIGManager, &pKernelMIGManager->kernelMIGGpuInstance[i]); + } +} + +/*! + * @brief Disable RC Watchdog + */ +NV_STATUS +kmigmgrDisableWatchdog_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMigManager +) +{ + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + NvU32 wdFlags = pKernelRc->watchdog.flags; + NvS32 enableRequestsRefcount; + NvS32 disableRequestsRefcount; + NvS32 softDisableRequestsRefcount; + + krcWatchdogGetReservationCounts(pKernelRc, + &enableRequestsRefcount, + &disableRequestsRefcount, + &softDisableRequestsRefcount); + + // + // If clients have made requests to the watchdog, we can't enable MIG until + // these clients have gone away because we disallow them from modifying WD + // state while MIG is active but these clients need to release their + // refcount on exit + // + if ((enableRequestsRefcount != 0) || (disableRequestsRefcount != 0) || + (softDisableRequestsRefcount != 0)) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to disable watchdog with outstanding reservations - enable: %d disable: %d softDisable: %d.\n", + enableRequestsRefcount, + disableRequestsRefcount, + softDisableRequestsRefcount); + + return NV_ERR_STATE_IN_USE; + } + + NV_CHECK_OR_RETURN(LEVEL_SILENT, (wdFlags & WATCHDOG_FLAGS_INITIALIZED) != 0x0, NV_OK); + + pKernelMigManager->bRestoreWatchdog = NV_TRUE; + pKernelMigManager->bReenableWatchdog = (wdFlags & WATCHDOG_FLAGS_DISABLED) == 0x0; + + return krcWatchdogShutdown(pGpu, pKernelRc); +} + +/*! + * @brief Enable RC Watchdog if it was enabled before kmigmgrDisableWatchdog invocation + */ +NV_STATUS +kmigmgrRestoreWatchdog_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMigManager +) +{ + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + + NV_CHECK_OR_RETURN(LEVEL_SILENT, pKernelMigManager->bRestoreWatchdog, NV_OK); + + if (pKernelMigManager->bReenableWatchdog) + { + krcWatchdogEnable(pKernelRc, NV_FALSE /* bOverRide */); + } + + pKernelMigManager->bRestoreWatchdog = NV_FALSE; + pKernelMigManager->bReenableWatchdog = NV_FALSE; + + return krcWatchdogInit_HAL(pGpu, pKernelRc); +} + +/*! + * @brief Function to set swizzId in use + */ +NV_STATUS +kmigmgrSetSwizzIdInUse_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId +) +{ + // Validate that same ID is not already set and then set the ID + NvU64 mask = NVBIT64(swizzId); + + if (swizzId >= KMIGMGR_MAX_GPU_SWIZZID) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (mask & pKernelMIGManager->swizzIdInUseMask) + { + NV_PRINTF(LEVEL_ERROR, "SwizzID - %d already in use\n", swizzId); + DBG_BREAKPOINT(); + return NV_ERR_STATE_IN_USE; + } + + pKernelMIGManager->swizzIdInUseMask |= mask; + + return NV_OK; +} + +/*! + * @brief Function to mark swizzId free + */ +NV_STATUS +kmigmgrClearSwizzIdInUse_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId +) +{ + // Validate that same ID is not already set and then set the ID + NvU64 mask = NVBIT64(swizzId); + + if (swizzId >= KMIGMGR_MAX_GPU_SWIZZID) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (!(mask & pKernelMIGManager->swizzIdInUseMask)) + { + NV_PRINTF(LEVEL_ERROR, "SwizzID - %d not in use\n", swizzId); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + pKernelMIGManager->swizzIdInUseMask &= ~mask; + + return NV_OK; +} + +/*! + * @brief Function to see if swizzId in use + */ +NvBool +kmigmgrIsSwizzIdInUse_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId +) +{ + NvU64 mask = NVBIT64(swizzId); + + if (mask & pKernelMIGManager->swizzIdInUseMask) + return NV_TRUE; + + return NV_FALSE; +} + +/* + * @brief Return global swizzId mask + */ +NvU64 +kmigmgrGetSwizzIdInUseMask_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + return pKernelMIGManager->swizzIdInUseMask; +} + +/*! + * @brief Marks the given engines as in use by some GPU instance + */ +NV_STATUS +kmigmgrSetEnginesInUse_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + ENGTYPE_BIT_VECTOR *pEngines +) +{ + ENGTYPE_BIT_VECTOR tempEngines; + + NV_ASSERT_OR_RETURN(pEngines != NULL, NV_ERR_INVALID_ARGUMENT); + + bitVectorAnd(&tempEngines, pEngines, &pKernelMIGManager->partitionableEnginesInUse); + // Ensure no engine in given mask is marked as in-use + NV_ASSERT_OR_RETURN(bitVectorTestAllCleared(&tempEngines), NV_ERR_STATE_IN_USE); + + // partitionableEnginesInUse |= pEngines + bitVectorOr(&pKernelMIGManager->partitionableEnginesInUse, + &pKernelMIGManager->partitionableEnginesInUse, + pEngines); + return NV_OK; +} + +/*! + * @brief Marks the given sys pipes as no longer in use by any GPU instance + */ +NV_STATUS +kmigmgrClearEnginesInUse_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + ENGTYPE_BIT_VECTOR *pEngines +) +{ + ENGTYPE_BIT_VECTOR tempEngines; + + NV_ASSERT_OR_RETURN(pEngines != NULL, NV_ERR_INVALID_ARGUMENT); + + bitVectorAnd(&tempEngines, pEngines, &pKernelMIGManager->partitionableEnginesInUse); + // Ensure every engine in given mask is marked as in-use + NV_ASSERT_OR_RETURN(bitVectorTestEqual(&tempEngines, pEngines), NV_ERR_STATE_IN_USE); + + // partitionableEnginesInUse &= ~(pEngines) + bitVectorComplement(&tempEngines, pEngines); + bitVectorAnd(&pKernelMIGManager->partitionableEnginesInUse, + &pKernelMIGManager->partitionableEnginesInUse, + &tempEngines); + return NV_OK; +} + +/*! + * @brief Checks whether given engine is in use by any GPU instance + */ +NvBool +kmigmgrIsEngineInUse_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 engineType +) +{ + return bitVectorTest(&pKernelMIGManager->partitionableEnginesInUse, engineType); +} + +/* + * @brief Determines whether NV2080_ENGINE_TYPE can be partitioned + */ +NvBool +kmigmgrIsEnginePartitionable_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 engineType +) +{ + return kmigmgrIsMIGSupported(pGpu, pKernelMIGManager) && + (NV2080_ENGINE_TYPE_IS_COPY(engineType) || + NV2080_ENGINE_TYPE_IS_GR(engineType) || + NV2080_ENGINE_TYPE_IS_NVDEC(engineType) || + NV2080_ENGINE_TYPE_IS_NVENC(engineType) || + NV2080_ENGINE_TYPE_IS_NVJPEG(engineType) || + (engineType == NV2080_ENGINE_TYPE_OFA)); +} + +/*! + * @brief Function to determine whether global NV2080_ENGINE_TYPE belongs to given + * gpu/compute instance. + * + * @return NV_TRUE if this engine falls within the given instance. NV_FALSE + * otherwise. Non-partitioned engines fall within all instances. + */ +NvBool +kmigmgrIsEngineInInstance_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 globalEngType, + MIG_INSTANCE_REF ref +) +{ + NvU32 unused; + return kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + globalEngType, + &unused) == NV_OK; +} + +/*! + * @brief Function to allocate N available sys pipes for the given swizzid. + * This function will only retrieve available pipes according to the + * sys pipe free mask in KernelMIGManager, and is not responsible for + * updating that mask. + * + * @param[IN] pGpu + * @param[IN] pKernerlMIGManager + * @param[IN] engineCount Number of engines requested + * @param[IN] engineRange Range of acceptable NV2080_ENGINE_TYPE to allocate + * @param[IN/OUT] pInstanceEngines Bitmask tracking engines owned by MIG instance + */ +NV_STATUS +kmigmgrGetFreeEngines_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 engineCount, + NV_RANGE engineRange, + ENGTYPE_BIT_VECTOR *pInstanceEngines +) +{ + const KERNEL_MIG_MANAGER_STATIC_INFO *pStaticInfo = kmigmgrGetStaticInfo(pGpu, pKernelMIGManager); + ENGTYPE_BIT_VECTOR partitionableEngines; + ENGTYPE_BIT_VECTOR availableEngines; + NvU32 numAllocated; + NvU32 engineType; + + NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pStaticInfo->pPartitionableEngines != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(!rangeIsEmpty(engineRange), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pInstanceEngines != NULL, NV_ERR_INVALID_ARGUMENT); + + if (engineCount == 0) + { + return NV_OK; + } + + bitVectorFromRaw(&partitionableEngines, + &pStaticInfo->pPartitionableEngines->engineMask, + sizeof(pStaticInfo->pPartitionableEngines->engineMask)); + + NV_ASSERT_OR_RETURN(!bitVectorTestAllCleared(&partitionableEngines), + NV_ERR_INVALID_STATE); + + // availableEngines = (ENGINE_MASK & partitionableEngines) & ~enginesInUse + bitVectorClrAll(&availableEngines); + bitVectorSetRange(&availableEngines, engineRange); + bitVectorAnd(&availableEngines, &availableEngines, &partitionableEngines); + { + ENGTYPE_BIT_VECTOR enginesNotInUse; + bitVectorComplement(&enginesNotInUse, &pKernelMIGManager->partitionableEnginesInUse); + bitVectorAnd(&availableEngines, &availableEngines, &enginesNotInUse); + } + + numAllocated = 0; + FOR_EACH_IN_BITVECTOR(&availableEngines, engineType) + { + if (numAllocated == engineCount) + break; + + bitVectorSet(pInstanceEngines, engineType); + numAllocated++; + } + FOR_EACH_IN_BITVECTOR_END(); + + return NV_OK; +} + +/*! + * @brief Trim runlist buffer pools + */ +void +kmigmgrTrimInstanceRunlistBufPools_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + NvU32 engineType; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + if (!kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance->swizzId)) + return; + + if (!ctxBufPoolIsSupported(pGpu)) + return; + + for (engineType = 0; engineType < NV2080_ENGINE_TYPE_LAST; engineType++) + { + if (!NV2080_ENGINE_TYPE_IS_VALID(engineType) || + !kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, engineType) || + !kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, kmigmgrMakeGIReference(pKernelMIGGpuInstance))) + { + continue; + } + + if (kfifoGetRunlistBufPool(pGpu, pKernelFifo, engineType) != NULL) + { + ctxBufPoolTrim(kfifoGetRunlistBufPool(pGpu, pKernelFifo, engineType)); + } + } +} + +// +// Creates runlist buffers for engines belonging to this GPU instance from non-partitionable memory and +// recreates these runlist buffers in GPU instance's memory. +// +NV_STATUS +kmigmgrCreateGPUInstanceRunlists_FWCLIENT +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 index; + NvU32 runlistId; + NvU32 engineType; + NvU32 engDesc; + NV_STATUS status = NV_OK; + NvU32 numEngines = kfifoGetNumEngines_HAL(pGpu, pKernelFifo); + NvU32 maxRunlists = kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo); + NvU64 runlistAlign; + NvU64 allocFlags; + NvU32 attr; + NV_ADDRESS_SPACE aperture; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS_PARAMS *pParams; + + // TODO: Mem partitioning check should suffice here + if (!kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance->swizzId) || + !ctxBufPoolIsSupported(pGpu)) + { + return NV_OK; + } + + kfifoRunlistGetBufAllocParams(pGpu, &aperture, &attr, &allocFlags); + allocFlags |= MEMDESC_FLAGS_OWNED_BY_CTX_BUF_POOL; + + for (index = 0; index < numEngines; index++) + { + NV_ASSERT_OK_OR_GOTO(status, + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_INVALID, index, + ENGINE_INFO_TYPE_RUNLIST, &runlistId), + failed); + + if ((runlistId >= maxRunlists) || (runlistId >= NV_NBITS_IN_TYPE(pKernelMIGGpuInstance->runlistIdMask))) + { + status = NV_ERR_INVALID_STATE; + goto failed; + } + + // some engines share runlists. so skip if have already dealt with this runlist + if ((pKernelMIGGpuInstance->runlistIdMask & NVBIT64(runlistId)) != 0x0) + { + continue; + } + + NV_ASSERT_OK_OR_GOTO(status, + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_RUNLIST, runlistId, + ENGINE_INFO_TYPE_NV2080, &engineType), + failed); + + NV_ASSERT_OK_OR_GOTO(status, + kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_RUNLIST, runlistId, + ENGINE_INFO_TYPE_ENG_DESC, &engDesc), + failed); + + // Check if this is a partitionable engine. Non-partitionable engine runlists can stay in RM reserved memory + if (!kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, engineType)) + { + continue; + } + + // if partitionable engine doesn't belong to this GPU instance then nothing to do + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, kmigmgrMakeGIReference(pKernelMIGGpuInstance))) + { + continue; + } + + // + // Sched is only managed by Physical RM. + // If running on GSP client, we will instead allocate the runlist buffers from the ctxbuf pool + // and promote them to GSP later. GSP will skip the runlist buffer allocation during schedInit + // and wait for the RPC to memdescDescribe the allocation from client RM. + // + // OBJSCHEDMGR is not valid in kernel RM. Allocate and store runlist buffers in OBJFIFO, + // which will be sent to GSP to store in its schedmgr + // + NV_ASSERT_OK_OR_GOTO(status, + kfifoRunlistAllocBuffers(pGpu, pKernelFifo, + NV_TRUE, + aperture, + runlistId, + attr, + allocFlags, + 0, + NV_TRUE, + pKernelFifo->pppRunlistBufMemDesc[runlistId]), + failed); + + // Add runlist to GPU instance + pKernelMIGGpuInstance->runlistIdMask |= NVBIT64(runlistId); + } + + runlistAlign = NVBIT64(kfifoRunlistGetBaseShift_HAL(pKernelFifo)); + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + NV_ASSERT_OR_GOTO(pParams != NULL, failed); + + ct_assert(sizeof(pParams->runlistIdMask) == sizeof(pKernelMIGGpuInstance->runlistIdMask)); + pParams->runlistIdMask = pKernelMIGGpuInstance->runlistIdMask; + pParams->swizzId = pKernelMIGGpuInstance->swizzId; + + for (runlistId = 0; runlistId < maxRunlists; runlistId++) + { + if (pParams->runlistIdMask & NVBIT64(runlistId)) + { + for (index = 0; index < NUM_BUFFERS_PER_RUNLIST; index++) + { + MEMORY_DESCRIPTOR *pSourceMemDesc = pKernelFifo->pppRunlistBufMemDesc[runlistId][index]; + + pParams->rlBuffers[runlistId][index].base = (NvU64)memdescGetPhysAddr(pSourceMemDesc, AT_GPU, 0); + pParams->rlBuffers[runlistId][index].size = pSourceMemDesc->ActualSize; + pParams->rlBuffers[runlistId][index].alignment = runlistAlign; + pParams->rlBuffers[runlistId][index].addressSpace = memdescGetAddressSpace(pSourceMemDesc); + pParams->rlBuffers[runlistId][index].cpuCacheAttrib = attr; + + } + } + } + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_FIFO_PROMOTE_RUNLIST_BUFFERS, + pParams, + sizeof(*pParams)); + + portMemFree(pParams); + + NV_ASSERT_OK_OR_GOTO(status, status, failed); + + // + // Trim out any additional memory after runlist buffers are allocated + // from ctx buf pools + // + kmigmgrTrimInstanceRunlistBufPools(pGpu, pKernelMIGManager, pKernelMIGGpuInstance); + + return NV_OK; + +failed: + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, + kmigmgrDeleteGPUInstanceRunlists_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance)); + + return status; +} + +// +// Deletes runlist buffers for all partitionable engines from GPU instance's memory and +// reallocates these runlist buffers in non-partitionable memory. +// +NV_STATUS +kmigmgrDeleteGPUInstanceRunlists_FWCLIENT +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 runlistId; + NV_STATUS status = NV_OK; + NvU32 bufIdx; + MEMORY_DESCRIPTOR **ppRlBuffer; + + if (!kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance->swizzId) || + !ctxBufPoolIsSupported(pGpu)) + { + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance->runlistIdMask == 0, NV_ERR_INVALID_STATE); + return NV_OK; + } + + FOR_EACH_INDEX_IN_MASK(64, runlistId, pKernelMIGGpuInstance->runlistIdMask) + { + for (bufIdx = 0; bufIdx < NUM_BUFFERS_PER_RUNLIST; bufIdx++) + { + ppRlBuffer = &(pKernelFifo->pppRunlistBufMemDesc[runlistId][bufIdx]); + + if (*ppRlBuffer != NULL) + { + memdescFree(*ppRlBuffer); + memdescDestroy(*ppRlBuffer); + *ppRlBuffer = NULL; + } + } + + // remove runlist from GPU instance + pKernelMIGGpuInstance->runlistIdMask &= ~(NVBIT64(runlistId)); + + } + FOR_EACH_INDEX_IN_MASK_END; + + return status; +} + +/*! + * @brief Load MIG instance topology from persistence, if available. + * If MIG is disabled, this operation will be skipped with a warning. + */ +NV_STATUS +kmigmgrRestoreFromPersistence_PF +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + NV_STATUS status = NV_OK; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY *pTopologySave = NULL; + NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *pPartImportParams = NULL; + NVC637_CTRL_EXEC_PARTITIONS_IMPORT_EXPORT_PARAMS *pExecPartImportParams = NULL; + NvU32 GIIdx; + NvU32 CIIdx; + NvBool bTopologyValid; + NvHandle hClient = NV01_NULL_OBJECT; + NvHandle hDevice = NV01_NULL_OBJECT; + NvHandle hSubdevice = NV01_NULL_OBJECT; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, + gpumgrGetSystemMIGInstanceTopo(gpuGetDBDF(pGpu), &pTopologySave), + NV_OK); + + // Check to see whether there was actually anything saved + for (GIIdx = 0; GIIdx < NV_ARRAY_ELEMENTS(pTopologySave->saveGI); ++GIIdx) + { + GPUMGR_SAVE_GPU_INSTANCE *pGPUInstanceSave = &pTopologySave->saveGI[GIIdx]; + if (pGPUInstanceSave->bValid) + break; + } + + bTopologyValid = (GIIdx < NV_ARRAY_ELEMENTS(pTopologySave->saveGI)); + NV_CHECK_OR_RETURN(LEVEL_SILENT, bTopologyValid, NV_OK); + + if (!IS_MIG_ENABLED(pGpu)) + { + NV_PRINTF(LEVEL_WARNING, "Skipping reinitialization of persistent MIG instances due to MIG disablement!\n"); + // + // If we ended up here, we have inconsistent state in that there are instances to be restored + // but MIG is disabled. This also means, that /proc filesystem is populated with nodes for the + // instances that we are expected to restore, but wont do so. Clean them up. + // + gpumgrUnregisterRmCapsForMIGGI(gpuGetDBDF(pGpu)); + return NV_OK; + } + + NV_ASSERT_OK_OR_RETURN( + rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, &hClient, &hDevice, &hSubdevice)); + + pPartImportParams = portMemAllocNonPaged(sizeof(*pPartImportParams)); + NV_CHECK_OR_ELSE(LEVEL_ERROR, pPartImportParams != NULL, + status = NV_ERR_NO_MEMORY; + goto cleanup; ); + pExecPartImportParams = portMemAllocNonPaged(sizeof(*pExecPartImportParams)); + NV_CHECK_OR_ELSE(LEVEL_ERROR, pExecPartImportParams != NULL, + status = NV_ERR_NO_MEMORY; + goto cleanup; ); + + for (GIIdx = 0; GIIdx < NV_ARRAY_ELEMENTS(pTopologySave->saveGI); ++GIIdx) + { + GPUMGR_SAVE_GPU_INSTANCE *pGPUInstanceSave = &pTopologySave->saveGI[GIIdx]; + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance; + + if (!pGPUInstanceSave->bValid) + continue; + + portMemSet(pPartImportParams, 0, sizeof(*pPartImportParams)); + pPartImportParams->swizzId = pGPUInstanceSave->swizzId; + portMemCopy(&pPartImportParams->info, sizeof(pPartImportParams->info), + &pGPUInstanceSave->giInfo, sizeof(pGPUInstanceSave->giInfo)); + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_INTERNAL_KMIGMGR_IMPORT_GPU_INSTANCE, + pPartImportParams, + sizeof(*pPartImportParams)), + cleanup); + + NV_ASSERT_OK_OR_GOTO(status, + kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager, pGPUInstanceSave->swizzId, &pKernelMIGGpuInstance), + cleanup); + + // Restore capability caps + pKernelMIGGpuInstance->pOsRmCaps = pGPUInstanceSave->pOsRmCaps; + + for (CIIdx = 0; CIIdx < NV_ARRAY_ELEMENTS(pGPUInstanceSave->saveCI); ++CIIdx) + { + GPUMGR_SAVE_COMPUTE_INSTANCE *pComputeInstanceSave = &pGPUInstanceSave->saveCI[CIIdx]; + NvHandle hSubscription; + NVC637_ALLOCATION_PARAMETERS alloc; + + if (!pComputeInstanceSave->bValid) + continue; + + portMemSet(&alloc, 0, sizeof(alloc)); + alloc.swizzId = pGPUInstanceSave->swizzId; + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->AllocWithSecInfo(pRmApi, + hClient, + hSubdevice, + &hSubscription, + AMPERE_SMC_PARTITION_REF, + &alloc, + RMAPI_ALLOC_FLAGS_NONE, + NULL, + &pRmApi->defaultSecInfo), + cleanup); + + portMemSet(pExecPartImportParams, 0, sizeof(*pExecPartImportParams)); + pExecPartImportParams->id = CIIdx; + portMemCopy(&pExecPartImportParams->info, sizeof(pExecPartImportParams->info), + &pComputeInstanceSave->ciInfo, sizeof(pComputeInstanceSave->ciInfo)); + + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Control(pRmApi, + hClient, + hSubscription, + NVC637_CTRL_CMD_EXEC_PARTITIONS_IMPORT, + pExecPartImportParams, + sizeof(*pExecPartImportParams)), + cleanup); + + // Restore capability caps + pKernelMIGGpuInstance->MIGComputeInstance[pExecPartImportParams->id].pOsRmCaps = pComputeInstanceSave->pOsRmCaps; + + pRmApi->Free(pRmApi, hClient, hSubscription); + } + } + +cleanup: + rmapiutilFreeClientAndDeviceHandles(pRmApi, &hClient, &hDevice, &hSubdevice); + portMemFree(pPartImportParams); + portMemFree(pExecPartImportParams); + + // + // Let stateUnload handle an error teardown case, since it has to be + // coordinated between CPU/GSP + // + return status; +} + +/*! + * @brief Load MIG instance topology from persistence, if available. + * If MIG is disabled, this operation will be skipped with a warning. + */ +NV_STATUS +kmigmgrRestoreFromPersistence_VF +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + NV_STATUS status = NV_OK; + GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY *pTopologySave = NULL; + NvU32 GIIdx; + NvU32 CIIdx; + NvBool bTopologyValid; + NvBool bMemoryPartitioningNeeded; + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, + gpumgrGetSystemMIGInstanceTopo(gpuGetDBDF(pGpu), &pTopologySave), + NV_OK); + + // Check to see whether there was actually anything saved + for (GIIdx = 0; GIIdx < NV_ARRAY_ELEMENTS(pTopologySave->saveGI); ++GIIdx) + { + GPUMGR_SAVE_GPU_INSTANCE *pGPUInstanceSave = &pTopologySave->saveGI[GIIdx]; + if (pGPUInstanceSave->bValid) + break; + } + + bTopologyValid = (GIIdx < NV_ARRAY_ELEMENTS(pTopologySave->saveGI)); + NV_CHECK_OR_RETURN(LEVEL_SILENT, bTopologyValid, NV_OK); + + if (!IS_MIG_ENABLED(pGpu)) + { + NV_PRINTF(LEVEL_WARNING, "Skipping reinitialization of persistent MIG instances due to MIG disablement!\n"); + gpumgrUnregisterRmCapsForMIGGI(gpuGetDBDF(pGpu)); + return NV_OK; + } + + bMemoryPartitioningNeeded = kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, pTopologySave->saveGI[0].swizzId); + + // Perform all initialization that must be done when MIG is first enabled + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrSetMIGState(pGpu, pKernelMIGManager, bMemoryPartitioningNeeded, NV_TRUE, NV_FALSE)); + + for (GIIdx = 0; GIIdx < NV_ARRAY_ELEMENTS(pTopologySave->saveGI); ++GIIdx) + { + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance; + GPUMGR_SAVE_GPU_INSTANCE *pGPUInstanceSave = &pTopologySave->saveGI[GIIdx]; + KMIGMGR_CREATE_GPU_INSTANCE_PARAMS restore = + { + .type = KMIGMGR_CREATE_GPU_INSTANCE_PARAMS_TYPE_RESTORE, + .inst.restore.pGPUInstanceSave = pGPUInstanceSave + }; + NvU32 swizzId; + + if (!pGPUInstanceSave->bValid) + continue; + + // Create a GPU instance using the saved data + NV_CHECK_OK_OR_GOTO(status, LEVEL_WARNING, + kmigmgrCreateGPUInstance(pGpu, pKernelMIGManager, &swizzId, restore, NV_TRUE, NV_FALSE), + fail); + + NV_ASSERT_OK_OR_GOTO(status, + kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager, swizzId, &pKernelMIGGPUInstance), + fail); + + // Restore capability caps + pKernelMIGGPUInstance->pOsRmCaps = pGPUInstanceSave->pOsRmCaps; + + for (CIIdx = 0; CIIdx < NV_ARRAY_ELEMENTS(pGPUInstanceSave->saveCI); ++CIIdx) + { + GPUMGR_SAVE_COMPUTE_INSTANCE *pComputeInstanceSave = &pGPUInstanceSave->saveCI[CIIdx]; + KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS restore = + { + .type = KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_RESTORE, + .inst.restore.pComputeInstanceSave = pComputeInstanceSave + }; + NvU32 id; + + if (!pComputeInstanceSave->bValid) + continue; + + // Create a compute instance on this GPU instance using the saved data + NV_CHECK_OK_OR_GOTO(status, LEVEL_WARNING, + kmigmgrCreateComputeInstances_HAL(pGpu, pKernelMIGManager, pKernelMIGGPUInstance, NV_FALSE, restore, &id, NV_FALSE), + fail); + + // Restore capability caps + pKernelMIGGPUInstance->MIGComputeInstance[id].pOsRmCaps = pComputeInstanceSave->pOsRmCaps; + } + } + + return NV_OK; + +fail: + + // Clean up anything we created and bail + FOR_EACH_VALID_GPU_INSTANCE(pGpu, pKernelMIGManager, pKernelMIGGPUInstance) + { + for (CIIdx = 0; CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGPUInstance->MIGComputeInstance); ++CIIdx) + { + MIG_COMPUTE_INSTANCE *pMIGComputeInstance = &pKernelMIGGPUInstance->MIGComputeInstance[CIIdx]; + + // Skip invalid compute instances + if (!pMIGComputeInstance->bValid) + continue; + + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, LEVEL_ERROR, + kmigmgrDeleteComputeInstance(pGpu, pKernelMIGManager, pKernelMIGGPUInstance, CIIdx, NV_TRUE)); + } + + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, LEVEL_ERROR, + kmigmgrInvalidateGPUInstance(pGpu, pKernelMIGManager, pKernelMIGGPUInstance->swizzId, NV_TRUE)); + } + FOR_EACH_VALID_GPU_INSTANCE_END(); + + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, LEVEL_ERROR, + kmigmgrSetMIGState(pGpu, pKernelMIGManager, bMemoryPartitioningNeeded, NV_FALSE, NV_FALSE)); + + return status; +} + +/* + * @brief Initialize MIG gpu instance + */ +void +kmigmgrInitGPUInstanceInfo_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + NvU32 i; + + bitVectorClrAll(&pKernelMIGGpuInstance->exclusiveEngMask); + bitVectorClrAll(&pKernelMIGGpuInstance->sharedEngMask); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); ++i) + { + NV_ASSERT(!pKernelMIGGpuInstance->MIGComputeInstance[i].bValid); + pKernelMIGGpuInstance->MIGComputeInstance[i].pOsRmCaps = NULL; + pKernelMIGGpuInstance->MIGComputeInstance[i].id = KMIGMGR_COMPUTE_INSTANCE_ID_INVALID; + } + + pKernelMIGGpuInstance->swizzId = KMIGMGR_SWIZZID_INVALID; + pKernelMIGGpuInstance->hMemory = NV01_NULL_OBJECT; + pKernelMIGGpuInstance->pShare = NULL; + pKernelMIGGpuInstance->pMemoryPartitionHeap = NULL; + pKernelMIGGpuInstance->bValid = NV_FALSE; + pKernelMIGGpuInstance->memRange = NV_RANGE_EMPTY; + pKernelMIGGpuInstance->pMIGGpuInstance = NULL; + pKernelMIGGpuInstance->pOsRmCaps = NULL; + pKernelMIGGpuInstance->pProfile = NULL; +} + +/*! + * @brief Function to set device profiling in use + */ +NV_STATUS +kmigmgrSetDeviceProfilingInUse_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + NV_ASSERT_OR_RETURN(!kmigmgrIsDeviceProfilingInUse(pGpu, pKernelMIGManager), + NV_ERR_STATE_IN_USE); + pKernelMIGManager->bDeviceProfilingInUse = NV_TRUE; + return NV_OK; +} + +/*! + * @brief Function to clear device profiling in-use + */ +void +kmigmgrClearDeviceProfilingInUse_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + pKernelMIGManager->bDeviceProfilingInUse = NV_FALSE; +} + +/*! + * @brief Function to check if device profiling is in-use + */ +NvBool +kmigmgrIsDeviceProfilingInUse_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + return pKernelMIGManager->bDeviceProfilingInUse; +} + +/*! + * @brief Function to check if specific client is subscribed to DeviceProfiling + */ +NvBool +kmigmgrIsClientUsingDeviceProfiling_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvHandle hClient +) +{ + RsClient *pRsClient; + GPUInstanceSubscription *pGPUInstanceSubscription; + Subdevice *pSubdevice; + NV_STATUS status; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, IS_MIG_ENABLED(pGpu), NV_FALSE); + + if (!kmigmgrIsDeviceProfilingInUse(pGpu, pKernelMIGManager)) + { + return NV_FALSE; + } + + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + serverGetClientUnderLock(&g_resServ, hClient, &pRsClient), + return NV_FALSE; ); + + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + subdeviceGetByGpu(pRsClient, pGpu, &pSubdevice), + return NV_FALSE; ); + + NV_CHECK_OK_OR_ELSE(status, LEVEL_ERROR, + gisubscriptionGetGPUInstanceSubscription(pRsClient, RES_GET_HANDLE(pSubdevice), &pGPUInstanceSubscription), + return NV_FALSE; ); + + return gisubscriptionIsDeviceProfiling(pGPUInstanceSubscription); +} + +/*! + * @brief enable all LCE engines for use by GPU instances + */ +NV_STATUS +kmigmgrEnableAllLCEs_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvBool bEnableAllLCEs +) +{ + KernelCE *pKCe = NULL; + NvU32 i; + + // + // AMODEL support of CEs is faked. No actual work needs to be done for + // AMODEL here, so just return NV_OK early to avoid triggering assertions. + // + NV_CHECK_OR_RETURN(LEVEL_SILENT, !IsAMODEL(pGpu), NV_OK); + + for (i = 0; i < ENG_CE__SIZE_1; ++i) + { + pKCe = GPU_GET_KCE(pGpu, i); + + if (pKCe != NULL) + break; + } + + NV_ASSERT_OR_RETURN(pKCe, NV_ERR_INSUFFICIENT_RESOURCES); + + if (bEnableAllLCEs) + NV_ASSERT_OK_OR_RETURN(kceUpdateClassDB_HAL(pGpu, pKCe)); + else + NV_ASSERT_OK_OR_RETURN(kceTopLevelPceLceMappingsUpdate(pGpu, pKCe)); + + return NV_OK; +} + +/*! + * @brief Retrieves instance(s) associated with a client, if applicable + */ +NV_STATUS +kmigmgrGetInstanceRefFromClient_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvHandle hClient, + MIG_INSTANCE_REF *pRef +) +{ + NV_STATUS status = NV_OK; + RsClient *pRsClient; + GPUInstanceSubscription *pGPUInstanceSubscription; + ComputeInstanceSubscription *pComputeInstanceSubscription = NULL; + Subdevice *pSubdevice; + MIG_INSTANCE_REF ref; + + NV_ASSERT_OR_RETURN(pRef != NULL, NV_ERR_INVALID_ARGUMENT); + *pRef = kmigmgrMakeNoMIGReference(); + + if (!IS_MIG_IN_USE(pGpu)) + { + return NV_ERR_INVALID_STATE; + } + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + subdeviceGetByGpu(pRsClient, pGpu, &pSubdevice)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + gisubscriptionGetGPUInstanceSubscription(pRsClient, RES_GET_HANDLE(pSubdevice), + &pGPUInstanceSubscription)); + + ref.pKernelMIGGpuInstance = pGPUInstanceSubscription->pKernelMIGGpuInstance; + + status = cisubscriptionGetComputeInstanceSubscription(pRsClient, + RES_GET_HANDLE(pGPUInstanceSubscription), + &pComputeInstanceSubscription); + if (status == NV_OK) + { + ref = kmigmgrMakeCIReference(pGPUInstanceSubscription->pKernelMIGGpuInstance, + pComputeInstanceSubscription->pMIGComputeInstance); + } + else + { + ref = kmigmgrMakeGIReference(pGPUInstanceSubscription->pKernelMIGGpuInstance); + // Quash status, this is optional + status = NV_OK; + } + + NV_CHECK_OR_RETURN(LEVEL_SILENT, kmigmgrIsMIGReferenceValid(&ref), NV_ERR_INVALID_STATE); + *pRef = ref; + return status; +} + +/*! + * @brief Retrieves GPU instance heap associated with a client, if applicable + */ +NV_STATUS +kmigmgrGetMemoryPartitionHeapFromClient_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvHandle hClient, + Heap **ppMemoryPartitionHeap +) +{ + MIG_INSTANCE_REF ref; + NV_STATUS rmStatus = NV_OK; + + NV_ASSERT_OR_RETURN(IS_MIG_IN_USE(pGpu), NV_ERR_INVALID_STATE); + + rmStatus = kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref); + if ((rmStatus != NV_OK) || !kmigmgrIsMIGReferenceValid(&ref)) + { + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilegeByHandle(hClient); + + // It's okay for kernel/root clients to not be associated to a GPU instance + if (privLevel >= RS_PRIV_LEVEL_KERNEL) + { + rmStatus = NV_OK; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Failed to get GPU instance for non-privileged client hClient=0x%08x!\n", + hClient); + + // if we got here due to a bogus GPU instance info, actually return an error + if (rmStatus == NV_OK) + rmStatus = NV_ERR_INVALID_STATE; + } + } + else + { + NV_ASSERT_OR_RETURN(ppMemoryPartitionHeap != NULL, NV_ERR_INVALID_ARGUMENT); + *ppMemoryPartitionHeap = ref.pKernelMIGGpuInstance->pMemoryPartitionHeap; + NV_PRINTF(LEVEL_INFO, + "GPU instance heap found for hClient = 0x%08x with swizzId = %d!\n", + hClient, ref.pKernelMIGGpuInstance->swizzId); + } + + return rmStatus; +} + +/*! + * @brief Retrieves swizzid associated with a client, if applicable + */ +NV_STATUS +kmigmgrGetSwizzIdFromClient_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvHandle hClient, + NvU32 *pSwizzId +) +{ + MIG_INSTANCE_REF ref; + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref)); + + *pSwizzId = ref.pKernelMIGGpuInstance->swizzId; + return NV_OK; +} + +/*! + * @brief Printout properties of specified MIG gpu instance + */ +void +kmigmgrPrintGPUInstanceInfo_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) + NV_STATUS status; + const MIG_GPU_INSTANCE_MEMORY_CONFIG *pGPUInstanceMemConfig; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + NV_RANGE partitionableMemoryRange = memmgrGetMIGPartitionableMemoryRange(pGpu, pMemoryManager); + + NvU32 grCount = kmigmgrCountEnginesOfType(&pKernelMIGGpuInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_GR(0)); + NvU32 ceCount = kmigmgrCountEnginesOfType(&pKernelMIGGpuInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_COPY(0)); + NvU32 decCount = kmigmgrCountEnginesOfType(&pKernelMIGGpuInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_NVDEC(0)); + NvU32 encCount = kmigmgrCountEnginesOfType(&pKernelMIGGpuInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_NVENC(0)); + NvU32 jpgCount = kmigmgrCountEnginesOfType(&pKernelMIGGpuInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_NVJPG); + NvU32 ofaCount = kmigmgrCountEnginesOfType(&pKernelMIGGpuInstance->resourceAllocation.engines, + NV2080_ENGINE_TYPE_OFA); + +#define PADDING_STR "-----------------------------------------------------------------" + + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18s | %18s | %18s |\n", + "SwizzId", + "SwizzId Table Mask", + "Gpc Count"); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18d | %18s | %18d |\n", + pKernelMIGGpuInstance->swizzId, + "NOT IMPLEMENTED", + pKernelMIGGpuInstance->resourceAllocation.gpcCount); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18s | %18s | %18s |\n", + "OBJGR Count", + "OBJCE Count", + "NVDEC Count"); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18d | %18d | %18d |\n", + grCount, + ceCount, + decCount); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18s | %18s | %18s |\n", + "NVENC Count", + "NVJPG Count", + "NVOFA Count"); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18d | %18d | %18d |\n", + encCount, + jpgCount, + ofaCount); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18s | %18s | %18s |\n", + "VEID Offset", + "VEID Count", + "VEID-GR Map"); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18d | %18d | %18llx |\n", + pKernelMIGGpuInstance->resourceAllocation.veidOffset, + pKernelMIGGpuInstance->resourceAllocation.veidCount, + DRF_MASK64(pKernelMIGGpuInstance->resourceAllocation.veidCount : 0) << pKernelMIGGpuInstance->resourceAllocation.veidOffset); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %29s | %29s |\n", + "Partitionable", + "Partitionable"); + NV_PRINTF(LEVEL_INFO, "| %29s | %29s |\n", + "Memory Start Addr", + "Memory End Addr"); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %29llx | %29llx |\n", + partitionableMemoryRange.lo, + partitionableMemoryRange.hi); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18s | %18s | %18s |\n", + "Local Instance", + "Local Instance", + "Local Instance"); + NV_PRINTF(LEVEL_INFO, "| %18s | %18s | %18s |\n", + "Memory Start Addr", + "Memory End Addr", + "Size in Bytes"); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18llx | %18llx | %18llx |\n", + pKernelMIGGpuInstance->memRange.lo, + pKernelMIGGpuInstance->memRange.hi, + rangeLength(pKernelMIGGpuInstance->memRange)); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %18s | %18s | %18s |\n", + "Local Instance", + "Local Instance", + "Local Instance"); + NV_PRINTF(LEVEL_INFO, "| %18s | %18s | %18s |\n", + "Start VMMU Seg.", + "End VMMU Seg.", + "Size in VMMU Seg."); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + + NV_ASSERT_OK_OR_ELSE(status, + kmemsysGetMIGGPUInstanceMemConfigFromSwizzId(pGpu, pKernelMemorySystem, pKernelMIGGpuInstance->swizzId, &pGPUInstanceMemConfig), + return;); + NV_PRINTF(LEVEL_INFO, "| %18llx | %18llx | %18llx |\n", + pGPUInstanceMemConfig->startingVmmuSegment, + (pGPUInstanceMemConfig->startingVmmuSegment + + pGPUInstanceMemConfig->memSizeInVmmuSegment) - 1, + pGPUInstanceMemConfig->memSizeInVmmuSegment); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); +#undef PADDING_STR +#endif // NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) +} + +/*! + * @brief Function to set GPU instance information representing provided swizzId. + */ +NV_STATUS +kmigmgrSetGPUInstanceInfo_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId, + KMIGMGR_CREATE_GPU_INSTANCE_PARAMS params +) +{ + NvU32 i; + NvHandle hMemory = NV01_NULL_OBJECT; + NV_RANGE addrRange = NV_RANGE_EMPTY; + NV_STATUS rmStatus = NV_OK; + Heap *pMemoryPartitionHeap = NULL; + NvU32 partitionFlag = (params.type == KMIGMGR_CREATE_GPU_INSTANCE_PARAMS_TYPE_REQUEST) + ? params.inst.request.partitionFlag + : params.inst.restore.pGPUInstanceSave->giInfo.partitionFlags; + + if (swizzId >= KMIGMGR_MAX_GPU_SWIZZID) + { + return NV_ERR_INVALID_ARGUMENT; + } + + for (i = 0; i < KMIGMGR_MAX_GPU_INSTANCES; ++i) + { + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = &pKernelMIGManager->kernelMIGGpuInstance[i]; + + // Find first invalid GPU instance and use it to save GPU instance data + if (!pKernelMIGGpuInstance->bValid) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + rmStatus = memmgrAllocMIGGPUInstanceMemory_HAL(pGpu, pMemoryManager, swizzId, + &hMemory, &addrRange, + &pMemoryPartitionHeap); + NV_CHECK_OR_RETURN(LEVEL_ERROR, rmStatus == NV_OK, rmStatus); + + // Mark GPU instance as valid as we use GPU instance Invalidation for cleanup + pKernelMIGGpuInstance->bValid = NV_TRUE; + pKernelMIGGpuInstance->swizzId = swizzId; + pKernelMIGGpuInstance->hMemory = hMemory; + pKernelMIGGpuInstance->memRange = addrRange; + pKernelMIGGpuInstance->pMemoryPartitionHeap = pMemoryPartitionHeap; + pKernelMIGGpuInstance->partitionFlag = partitionFlag; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetProfileByPartitionFlag(pGpu, pKernelMIGManager, partitionFlag, &pKernelMIGGpuInstance->pProfile)); + + // Allocate RsShared for the GPU instance + NV_ASSERT_OK_OR_RETURN(serverAllocShare(&g_resServ, classInfo(RsShared), + &pKernelMIGGpuInstance->pShare)); + + // Get resources associated with this swizzId + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrSwizzIdToResourceAllocation(pGpu, pKernelMIGManager, swizzId, params, + pKernelMIGGpuInstance, + &pKernelMIGGpuInstance->resourceAllocation)); + + // Set assigned engines as in use + NV_ASSERT_OK_OR_RETURN( + kmigmgrSetEnginesInUse(pGpu, pKernelMIGManager, &pKernelMIGGpuInstance->resourceAllocation.engines)); + + // Update engine tracking bitmasks for CI management later + bitVectorClrAll(&pKernelMIGGpuInstance->exclusiveEngMask); + bitVectorClrAll(&pKernelMIGGpuInstance->sharedEngMask); + + // Print GPU instance info for debug + NV_PRINTF(LEVEL_INFO, "CREATING GPU instance\n"); + kmigmgrPrintGPUInstanceInfo(pGpu, pKernelMIGManager, pKernelMIGGpuInstance); + + break; + } + } + + NV_ASSERT_OR_RETURN(i < KMIGMGR_MAX_GPU_INSTANCES, NV_ERR_INSUFFICIENT_RESOURCES); + return rmStatus; +} + +/*! + * @brief Function to get GPU instance information representing provided swizzId. + */ +NV_STATUS +kmigmgrGetGPUInstanceInfo_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId, + KERNEL_MIG_GPU_INSTANCE **ppKernelMIGGpuInstance +) +{ + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance; + + if (swizzId >= KMIGMGR_MAX_GPU_SWIZZID) + { + return NV_ERR_INVALID_ARGUMENT; + } + + FOR_EACH_VALID_GPU_INSTANCE(pGpu, pKernelMIGManager, pKernelMIGGPUInstance) + { + if (pKernelMIGGPUInstance->swizzId == swizzId) + { + *ppKernelMIGGpuInstance = pKernelMIGGPUInstance; + return NV_OK; + } + } + FOR_EACH_VALID_GPU_INSTANCE_END(); + + return NV_ERR_INVALID_ARGUMENT; +} + +/*! + * @brief Function to convert local NV2080_ENGINE_TYPE to global + * NV2080_ENGINE_TYPE for partitionable engines + * Currently It support GR, CE, NVDEC, NVENC, NVJPG + */ +NV_STATUS +kmigmgrGetLocalToGlobalEngineType_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + MIG_INSTANCE_REF ref, + NvU32 localEngType, + NvU32 *pGlobalEngType +) +{ + NV_ASSERT_OR_RETURN(kmigmgrIsMIGReferenceValid(&ref), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV2080_ENGINE_TYPE_IS_VALID(localEngType), + NV_ERR_INVALID_ARGUMENT); + + if (!kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, localEngType)) + { + // + // Return same engineId as local if called for non-partitioned + // 2080type engines like host engines, PMU SEC etc. + // + *pGlobalEngType = localEngType; + return NV_OK; + } + + if (ref.pMIGComputeInstance != NULL) + { + // Replace the CI-local input index with GI-local + if (kmigmgrEngineTypeXlate(&ref.pMIGComputeInstance->resourceAllocation.localEngines, localEngType, + &ref.pMIGComputeInstance->resourceAllocation.engines, &localEngType) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Compute instance Local Engine type 0x%x is not allocated to Compute instance\n", + localEngType); + return NV_ERR_INVALID_ARGUMENT; + } + } + + // Replace the GI-local input index with global + if (kmigmgrEngineTypeXlate(&ref.pKernelMIGGpuInstance->resourceAllocation.localEngines, localEngType, + &ref.pKernelMIGGpuInstance->resourceAllocation.engines, &localEngType) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPU instance Local Engine type 0x%x is not allocated to GPU instance\n", + localEngType); + return NV_ERR_INVALID_ARGUMENT; + } + + *pGlobalEngType = localEngType; + return NV_OK; +} + +/*! + * @brief Function to convert global NV2080_ENGINE_TYPE to local + * NV2080_ENGINE_TYPE for partitionable engines + * Currently it supports GR, CE, NVDEC, NVENC, NVJPG + */ +NV_STATUS +kmigmgrGetGlobalToLocalEngineType_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + MIG_INSTANCE_REF ref, + NvU32 globalEngType, + NvU32 *pLocalEngType +) +{ + NV_ASSERT_OR_RETURN(kmigmgrIsMIGReferenceValid(&ref), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV2080_ENGINE_TYPE_IS_VALID(globalEngType), + NV_ERR_INVALID_ARGUMENT); + + if (!kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, globalEngType)) + { + // + // Return same engineId as global if called for non-partitioned + // 2080type engines like host engines, PMU SEC etc. + // + *pLocalEngType = globalEngType; + return NV_OK; + } + + // Replace the global input index with GI-local + if (kmigmgrEngineTypeXlate(&ref.pKernelMIGGpuInstance->resourceAllocation.engines, globalEngType, + &ref.pKernelMIGGpuInstance->resourceAllocation.localEngines, &globalEngType) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Global Engine type 0x%x is not allocated to GPU instance\n", + globalEngType); + return NV_ERR_INVALID_ARGUMENT; + } + + if (ref.pMIGComputeInstance != NULL) + { + // Replace the GI-local input index with CI-local + if (kmigmgrEngineTypeXlate(&ref.pMIGComputeInstance->resourceAllocation.engines, globalEngType, + &ref.pMIGComputeInstance->resourceAllocation.localEngines, &globalEngType) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPU instance Local Engine type 0x%x is not allocated to compute instance\n", + globalEngType); + return NV_ERR_INVALID_ARGUMENT; + } + } + + *pLocalEngType = globalEngType; + return NV_OK; +} + +/*! + * @brief Function to retrieve list of engine types belonging to this + * GPU instance. When MIG is enabled, GRCEs are filtered from the engine + * list, as well as any local GR engine indices outside of the range + * allocated to this GPU instance. When MIG is disabled, all non-legacy GR + * engines are filtered from the enginelist, but no CEs are filtered. + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] pSubdevice + * @param[OUT] pEngineTypes Engine type list + * @param[OUT] pEngineCount Engine type count + * + * @return NV_STATUS + * NV_OK on success + * NV_ERR_INVALID_ARGUMENT if invalid subdevice + * NV_ERR_INVALID_STATE if subdevice is not partitioned + */ +NV_STATUS +kmigmgrFilterEngineList_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + Subdevice *pSubdevice, + NvU32 *pEngineTypes, + NvU32 *pEngineCount +) +{ + MIG_INSTANCE_REF ref; + NvBool bMIGInUse = IS_MIG_IN_USE(pGpu); + NvU32 i; + + if (bMIGInUse) + { + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, RES_GET_CLIENT_HANDLE(pSubdevice), &ref)); + } + + *pEngineCount = 0; + for (i = 0; i < pGpu->engineDB.size; ++i) + { + NvU32 engineType = pGpu->engineDB.pType[i]; + NvU32 newEngineType = engineType; + NvBool bAddEngine = NV_TRUE; + + if (bMIGInUse) + { + if (kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, ref)) + { + // Override the engine type with the local engine idx + NV_ASSERT_OK(kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, ref, + engineType, + &newEngineType)); + } + else + { + bAddEngine = NV_FALSE; + } + } + else if (NV2080_ENGINE_TYPE_IS_GR(engineType) && + (0 != NV2080_ENGINE_TYPE_GR_IDX(engineType))) + { + bAddEngine = NV_FALSE; + } + + if (bAddEngine) + { + pEngineTypes[(*pEngineCount)++] = newEngineType; + } + } + + return NV_OK; +} + +/** + * @brief Removes all engines which are not in this client's GPU instance from the + * partnerlist. + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] pSubdevice + * @param[IN/OUT] pPartnerListParams Client Partner list params + * + * @return NV_STATUS + * NV_OK on success or MIG disabled + * NV_ERR_INVALID_ARGUMENT on bad pParams + */ +NV_STATUS +kmigmgrFilterEnginePartnerList_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams +) +{ + NvU32 i, j; + MIG_INSTANCE_REF ref; + + NV_ASSERT_OR_RETURN(NULL != pPartnerListParams, NV_ERR_INVALID_ARGUMENT); + + // MIG disabled, nothing to do + if (!IS_MIG_IN_USE(pGpu)) + { + return NV_OK; + } + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, RES_GET_CLIENT_HANDLE(pSubdevice), &ref)); + + for (i = 0; i < pPartnerListParams->numPartners; ++i) + { + NvU32 engineType = pPartnerListParams->partnerList[i]; + + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, ref)) + { + // Filter this entry from the partner list + for (j = i; j < pPartnerListParams->numPartners - 1; ++j) + { + pPartnerListParams->partnerList[j] = pPartnerListParams->partnerList[j + 1]; + } + + pPartnerListParams->numPartners--; + + // Break early to prevent underflow of i + if (0 == pPartnerListParams->numPartners) + { + break; + } + + i--; + } + } + + return NV_OK; +} + +/*! + * @brief Finds a GPU Instance profile matching the input request flag + */ +NV_STATUS +kmigmgrGetProfileByPartitionFlag_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 partitionFlag, + const NV2080_CTRL_INTERNAL_MIGMGR_PROFILE_INFO **ppProfile +) +{ + const KERNEL_MIG_MANAGER_STATIC_INFO *pStaticInfo = kmigmgrGetStaticInfo(pGpu, pKernelMIGManager); + NvU32 i; + + NV_ASSERT_OR_RETURN(pStaticInfo != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pStaticInfo->pProfiles != NULL, NV_ERR_INVALID_STATE); + + for (i = 0; i < pStaticInfo->pProfiles->count; ++i) + { + if (pStaticInfo->pProfiles->table[i].partitionFlag == partitionFlag) + { + *ppProfile = &pStaticInfo->pProfiles->table[i]; + return NV_OK; + } + } + + return NV_ERR_INVALID_STATE; +} + +/* + * @brief Determine illegal swizzIds based on global swizzId mask + */ +NV_STATUS +kmigmgrGetInvalidSwizzIdMask_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId, + NvU64 *pUnsupportedSwizzIdMask +) +{ + NvU64 i; + NvU64 gpuSlice[KGRMGR_MAX_GR] = + { + (NVBIT64(0) | NVBIT64(1) | NVBIT64(3) | NVBIT64(7)), + (NVBIT64(0) | NVBIT64(1) | NVBIT64(3) | NVBIT64(8)), + (NVBIT64(0) | NVBIT64(1) | NVBIT64(4) | NVBIT64(9)), + (NVBIT64(0) | NVBIT64(1) | NVBIT64(4) | NVBIT64(10)), + (NVBIT64(0) | NVBIT64(2) | NVBIT64(5) | NVBIT64(11)), + (NVBIT64(0) | NVBIT64(2) | NVBIT64(5) | NVBIT64(12)), + (NVBIT64(0) | NVBIT64(2) | NVBIT64(6) | NVBIT64(13)), + (NVBIT64(0) | NVBIT64(2) | NVBIT64(6) | NVBIT64(14)) + }; + + NV_ASSERT_OR_RETURN(NULL != pUnsupportedSwizzIdMask, NV_ERR_INVALID_ARGUMENT); + + // All bits corresponding to nonexistent swizzids are invalid + *pUnsupportedSwizzIdMask = DRF_SHIFTMASK64(63:KMIGMGR_MAX_GPU_SWIZZID); + + for (i = 0; i < KGRMGR_MAX_GR; ++i) + { + if (0 != (gpuSlice[i] & NVBIT64(swizzId))) + { + *pUnsupportedSwizzIdMask |= gpuSlice[i]; + } + } + + return NV_OK; +} + +/*! + * @brief Processes request to update partitioning mode to the given value. + */ +NV_STATUS +kmigmgrSetPartitioningMode_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS params; + + portMemSet(¶ms, 0x0, sizeof(params)); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GPU_GET_SMC_MODE, + ¶ms, + sizeof(params))); + + // Should never have reached this far + NV_ASSERT_OR_RETURN(params.smcMode != NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_UNSUPPORTED, + NV_ERR_INVALID_STATE); + + // + // If pending state, do not update mode in response to request. Mode will be + // updated on next GPU reset. + // + if ((params.smcMode == NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_DISABLE_PENDING) || + (params.smcMode == NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_ENABLE_PENDING)) + { + return NV_OK; + } + + pKernelMIGManager->bMIGEnabled = (params.smcMode == NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_ENABLED); + + // MIG Mode might not have been enabled yet, so load static info if enabled + if (IS_MIG_ENABLED(pGpu)) + { + // Initialize static info derived from physical RM + NV_ASSERT_OK_OR_RETURN(kmigmgrLoadStaticInfo_HAL(pGpu, pKernelMIGManager)); + } + + return NV_OK; +} + +/** + * @brief Function to get reference of gpu / compute instance which + * contains the given engine. If no instances are found, an error is returned. + */ +NV_STATUS +kmigmgrGetMIGReferenceFromEngineType_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 engineType, + MIG_INSTANCE_REF *pRef +) +{ + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance; + MIG_COMPUTE_INSTANCE *pMIGComputeInstance; + NvU32 CIIdx; + + NV_ASSERT_OR_RETURN(pRef != NULL, NV_ERR_INVALID_ARGUMENT); + // Default to non-attributed channel + *pRef = kmigmgrMakeNoMIGReference(); + + // Bail out early if there are no instances to attribute to + if (!IS_MIG_IN_USE(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + // + // if this happens to be an RM internal channel not bound to an engine, + // attribute it to no instance + // + if (!NV2080_ENGINE_TYPE_IS_VALID(engineType)) + return NV_ERR_INVALID_ARGUMENT; + + // Engine is not partitionable, attribute to no instance + if (!kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, engineType)) + return NV_ERR_INVALID_ARGUMENT; + + pKernelMIGGPUInstance = NULL; + FOR_EACH_VALID_GPU_INSTANCE(pGpu, pKernelMIGManager, pKernelMIGGPUInstance) + { + if (kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, + kmigmgrMakeGIReference(pKernelMIGGPUInstance))) + { + break; + } + } + FOR_EACH_VALID_GPU_INSTANCE_END(); + + // Engine was partitionable, but not in any of our gpu instance. + if ((pKernelMIGGPUInstance == NULL) || !pKernelMIGGPUInstance->bValid) + return NV_ERR_INVALID_STATE; + + *pRef = kmigmgrMakeGIReference(pKernelMIGGPUInstance); + + // Attempt to find a compute instance which contains this engine + for (CIIdx = 0; + CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGPUInstance->MIGComputeInstance); + ++CIIdx) + { + pMIGComputeInstance = &pKernelMIGGPUInstance->MIGComputeInstance[CIIdx]; + + if (!pMIGComputeInstance->bValid) + continue; + + if (kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, + kmigmgrMakeCIReference(pKernelMIGGPUInstance, pMIGComputeInstance))) + { + break; + } + } + + if (CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGPUInstance->MIGComputeInstance)) + *pRef = kmigmgrMakeCIReference(pKernelMIGGPUInstance, pMIGComputeInstance); + + return NV_OK; +} + +/*! + * @brief Check if we are running on a reduced config GPU then set the corresponding flag + */ +void +kmigmgrDetectReducedConfig_KERNEL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + const KERNEL_MIG_MANAGER_STATIC_INFO *pStaticInfo = kmigmgrGetStaticInfo(pGpu, pKernelMIGManager); + NvU32 i; + + for (i = 0; i < pStaticInfo->pProfiles->count; ++i) + { + NvU32 computeSize = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _COMPUTE_SIZE, + pStaticInfo->pProfiles->table[i].partitionFlag); + + // Reduced config A100 does not support 1/8 compute size + if (computeSize == NV2080_CTRL_GPU_PARTITION_FLAG_COMPUTE_SIZE_EIGHTH) + { + return; + } + } + + pKernelMIGManager->bIsA100ReducedConfig = NV_TRUE; +} + +/*! + * @brief Get the CE in GI that can be used for scrubbing + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] hClient Client handle subscribed to GI + * @param[OUT] ppCe Scrubber CE + */ +NV_STATUS +kmigmgrGetGPUInstanceScrubberCe_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvHandle hClient, + NvU32 *ceInst +) +{ + MIG_INSTANCE_REF ref; + ENGTYPE_BIT_VECTOR ces; + + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref)); + + bitVectorClrAll(&ces); + bitVectorSetRange(&ces, NV2080_ENGINE_RANGE_COPY()); + bitVectorAnd(&ces, &ces, &ref.pKernelMIGGpuInstance->resourceAllocation.engines); + + NV_ASSERT_OR_RETURN(!bitVectorTestAllCleared(&ces), NV_ERR_INSUFFICIENT_RESOURCES); + + // Pick the first CE in the instance + *ceInst = NV2080_ENGINE_TYPE_COPY_IDX(bitVectorCountTrailingZeros(&ces)); + + return NV_OK; +} + +/*! + * @brief Copy gpu instance type cache to user provided params for + * DESCRIBE_PARTITIONS + */ +NV_STATUS +kmigmgrDescribeGPUInstances_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS *pParams +) +{ + const KERNEL_MIG_MANAGER_STATIC_INFO *pStaticInfo = kmigmgrGetStaticInfo(pGpu, pKernelMIGManager); + NvU32 i; + NvU32 entryCount; + + if ((pStaticInfo == NULL) || (pStaticInfo->pProfiles == NULL)) + return NV_ERR_NOT_SUPPORTED; + + entryCount = 0; + for (i = 0; i < pStaticInfo->pProfiles->count; ++i) + { + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + NV_RANGE addrRange = NV_RANGE_EMPTY; + NvU32 swizzId; + NvU32 memorySize = DRF_VAL(2080_CTRL_GPU, _PARTITION_FLAG, _MEMORY_SIZE, + pStaticInfo->pProfiles->table[i].partitionFlag); + + // Retrieve a valid id for this flag combination + switch (memorySize) + { + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_FULL: + swizzId = 0; + break; + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_HALF: + swizzId = 1; + break; + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_QUARTER: + swizzId = 3; + break; + case NV2080_CTRL_GPU_PARTITION_FLAG_MEMORY_SIZE_EIGHTH: + swizzId = 7; + break; + default: + NV_ASSERT(0); + continue; + } + + NV_ASSERT_OK(kmemsysGetMIGGPUInstanceMemInfo(pGpu, pKernelMemorySystem, swizzId, &addrRange)); + pParams->partitionDescs[entryCount].memorySize = rangeLength(addrRange); + } + + pParams->partitionDescs[entryCount].partitionFlag = pStaticInfo->pProfiles->table[i].partitionFlag; + pParams->partitionDescs[entryCount].grCount = pStaticInfo->pProfiles->table[i].grCount; + pParams->partitionDescs[entryCount].gpcCount = pStaticInfo->pProfiles->table[i].gpcCount; + pParams->partitionDescs[entryCount].veidCount = pStaticInfo->pProfiles->table[i].veidCount; + pParams->partitionDescs[entryCount].smCount = pStaticInfo->pProfiles->table[i].smCount; + pParams->partitionDescs[entryCount].ceCount = pStaticInfo->pProfiles->table[i].ceCount; + pParams->partitionDescs[entryCount].nvEncCount = pStaticInfo->pProfiles->table[i].nvEncCount; + pParams->partitionDescs[entryCount].nvDecCount = pStaticInfo->pProfiles->table[i].nvDecCount; + pParams->partitionDescs[entryCount].nvJpgCount = pStaticInfo->pProfiles->table[i].nvJpgCount; + pParams->partitionDescs[entryCount].nvOfaCount = pStaticInfo->pProfiles->table[i].nvOfaCount; + + entryCount++; + } + pParams->descCount = pStaticInfo->pProfiles->count; + + return NV_OK; +} + +/*! + * @brief Saves MIG compute instance topology in provided structure + */ +NV_STATUS +kmigmgrSaveComputeInstances_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + GPUMGR_SAVE_COMPUTE_INSTANCE *pComputeInstanceSaves +) +{ + NvU32 CIIdx; + NvU32 ciCount = 0; + + // Sanity checks + NV_ASSERT_OR_RETURN((pKernelMIGGpuInstance != NULL) && (pComputeInstanceSaves != NULL), + NV_ERR_INVALID_ARGUMENT); + + for (CIIdx = 0; CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); ++CIIdx) + { + MIG_COMPUTE_INSTANCE *pMIGComputeInstance = &pKernelMIGGpuInstance->MIGComputeInstance[CIIdx]; + GPUMGR_SAVE_COMPUTE_INSTANCE *pComputeInstanceSave = &pComputeInstanceSaves[ciCount]; + NvU32 gpcIdx; + + // Skip invalid compute instances + if (!pMIGComputeInstance->bValid) + continue; + + portMemSet(pComputeInstanceSave, 0, sizeof(*pComputeInstanceSave)); + pComputeInstanceSave->bValid = NV_TRUE; + pComputeInstanceSave->ciInfo.sharedEngFlags = pMIGComputeInstance->sharedEngFlag; + pComputeInstanceSave->id = CIIdx; + pComputeInstanceSave->pOsRmCaps = pMIGComputeInstance->pOsRmCaps; + bitVectorToRaw(&pMIGComputeInstance->resourceAllocation.engines, + &pComputeInstanceSave->ciInfo.enginesMask, + sizeof(pComputeInstanceSave->ciInfo.enginesMask)); + if (IS_GSP_CLIENT(pGpu)) + { + for (gpcIdx = 0; gpcIdx < pMIGComputeInstance->resourceAllocation.gpcCount; ++gpcIdx) + { + pComputeInstanceSave->ciInfo.gpcMask |= + NVBIT32(pMIGComputeInstance->resourceAllocation.gpcIds[gpcIdx]); + } + } + else + { + pComputeInstanceSave->ciInfo.gpcMask = DRF_MASK(pMIGComputeInstance->resourceAllocation.gpcCount - 1 : 0); + } + pComputeInstanceSave->ciInfo.veidOffset = pMIGComputeInstance->resourceAllocation.veidOffset; + pComputeInstanceSave->ciInfo.veidCount = pMIGComputeInstance->resourceAllocation.veidCount; + + portMemCopy(pComputeInstanceSave->ciInfo.uuid, sizeof(pComputeInstanceSave->ciInfo.uuid), + pMIGComputeInstance->uuid.uuid, sizeof(pMIGComputeInstance->uuid.uuid)); + + ++ciCount; + } + + return NV_OK; +} + +/*! + * @brief Function to get SwizzId to allowed GrIdx, physical GPC_IDs, + * physical CE_IDs and VEIDs in a GPU instance + * + * @param[IN] swizzId SwizzId used by the GPU instance + * @param[OUT] pResourceAllocation Structure containing engine configs for a + * GPU instance. This contains engineCount and + * engine Ids. + */ +NV_STATUS +kmigmgrSwizzIdToResourceAllocation_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId, + KMIGMGR_CREATE_GPU_INSTANCE_PARAMS params, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + MIG_RESOURCE_ALLOCATION *pResourceAllocation +) +{ + NV2080_CTRL_INTERNAL_KMIGMGR_EXPORTED_GPU_INSTANCE_INFO info; + NvU32 tempGpcMask; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, swizzId < KMIGMGR_MAX_GPU_SWIZZID, NV_ERR_INVALID_ARGUMENT); + + if (params.type == KMIGMGR_CREATE_GPU_INSTANCE_PARAMS_TYPE_REQUEST) + { + NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS export; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + portMemSet(&export, 0, sizeof(export)); + export.swizzId = swizzId; + + // Retrieve the info of the gpu instance GSP just created + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MIGMGR_EXPORT_GPU_INSTANCE, + &export, + sizeof(export))); + info = export.info; + } + else + { + info = params.inst.restore.pGPUInstanceSave->giInfo; + } + + pResourceAllocation->gpcCount = 0; + tempGpcMask = info.gpcMask; + while (tempGpcMask != 0x0) + { + NvU32 gpcIdx = portUtilCountTrailingZeros32(tempGpcMask); + pResourceAllocation->gpcIds[(pResourceAllocation->gpcCount)++] = gpcIdx; + tempGpcMask &= ~(NVBIT32(gpcIdx)); + } + + pResourceAllocation->veidCount = info.veidCount; + pResourceAllocation->veidOffset = info.veidOffset; + + bitVectorFromRaw(&pResourceAllocation->engines, info.enginesMask, sizeof(info.enginesMask)); + + // Cache the local engine mask for this instance + kmigmgrGetLocalEngineMask(&pResourceAllocation->engines, &pResourceAllocation->localEngines); + + return NV_OK; +} + +// Create client and subdevice handles to make calls into this compute instance +NV_STATUS +kmigmgrAllocComputeInstanceHandles_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + MIG_COMPUTE_INSTANCE *pMIGComputeInstance +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hGPUInstanceSubscription = NV01_NULL_OBJECT; + NvHandle hComputeInstanceSubscription = NV01_NULL_OBJECT; + NvHandle hClient; + NvHandle hDevice; + NvHandle hSubdevice; + NV_STATUS status; + + NV_ASSERT_OK_OR_RETURN( + rmapiutilAllocClientAndDeviceHandles(pRmApi, pGpu, &hClient, &hDevice, &hSubdevice)); + + { + NVC637_ALLOCATION_PARAMETERS params; + portMemSet(¶ms, 0, sizeof(params)); + params.swizzId = pKernelMIGGpuInstance->swizzId; + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Alloc(pRmApi, hClient, hSubdevice, &hGPUInstanceSubscription, AMPERE_SMC_PARTITION_REF, ¶ms), + failed); + } + + { + NVC638_ALLOCATION_PARAMETERS params; + portMemSet(¶ms, 0, sizeof(params)); + params.execPartitionId = pMIGComputeInstance->id; + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Alloc(pRmApi, hClient, hGPUInstanceSubscription, &hComputeInstanceSubscription, AMPERE_SMC_EXEC_PARTITION_REF, ¶ms), + failed); + } + + pMIGComputeInstance->instanceHandles.hClient = hClient; + pMIGComputeInstance->instanceHandles.hSubdevice = hSubdevice; + pMIGComputeInstance->instanceHandles.hSubscription = hComputeInstanceSubscription; + + return NV_OK; + +failed: + pRmApi->Free(pRmApi, hClient, hClient); + return status; +} + +/*! + * @brief create compute instances + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] pKernelMIGGpuInstance + * @param[IN] bQuery If NV_TRUE, don't save created instances + * @param[IN] params List of requested compute instance to create + * @param[OUT] pCIIDs IDs of created instances + * @param[IN] bCreateCap Flag stating if MIG CI capabilities needs to be created + */ +NV_STATUS +kmigmgrCreateComputeInstances_VF +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + NvBool bQuery, + KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS params, + NvU32 *pCIIDs, + NvBool bCreateCap +) +{ + NV_STATUS status = NV_OK; + NvU32 count; + ENGTYPE_BIT_VECTOR shadowExclusiveEngMask; + ENGTYPE_BIT_VECTOR shadowSharedEngMask; + MIG_COMPUTE_INSTANCE computeInstanceInfo[KMIGMGR_MAX_COMPUTE_INSTANCES]; + NvU32 CIIdx; + NvU32 freeSlots; + NvU32 createdInstances; + NvU32 inUseGpcCount; + NvU32 remainingGpcCount; + NvU32 i; + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance != NULL, NV_ERR_INVALID_ARGUMENT); + + count = (params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST) + ? params.inst.request.count + : 1; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, count != 0, NV_ERR_INVALID_ARGUMENT); + + portMemSet(computeInstanceInfo, 0, sizeof(computeInstanceInfo)); + + // Check that there's enough open compute instance slots, and count used GPCs + freeSlots = 0; + inUseGpcCount = 0; + for (CIIdx = 0; + CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); + ++CIIdx) + { + MIG_COMPUTE_INSTANCE *pMIGComputeInstance = &pKernelMIGGpuInstance->MIGComputeInstance[CIIdx]; + + if (pMIGComputeInstance->bValid) + inUseGpcCount += pMIGComputeInstance->resourceAllocation.gpcCount; + else + freeSlots++; + } + NV_CHECK_OR_RETURN(LEVEL_SILENT, freeSlots >= count, NV_ERR_INSUFFICIENT_RESOURCES); + + // + // Check that we have enough spare GPCs. We're going to reuse the GPU Instance + // configuration logic later on to do the actual allocation, so for now just + // check the count. + // + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance->resourceAllocation.gpcCount >= inUseGpcCount, + NV_ERR_INVALID_STATE); + remainingGpcCount = pKernelMIGGpuInstance->resourceAllocation.gpcCount - inUseGpcCount; + + // + // Cache local copies of the resource pools, we'll commit them later if we + // have to + // + bitVectorCopy(&shadowExclusiveEngMask, &pKernelMIGGpuInstance->exclusiveEngMask); + bitVectorCopy(&shadowSharedEngMask, &pKernelMIGGpuInstance->sharedEngMask); + for (CIIdx = 0; CIIdx < count; ++CIIdx) + { + MIG_COMPUTE_INSTANCE *pMIGComputeInstance = &computeInstanceInfo[CIIdx]; + MIG_RESOURCE_ALLOCATION *pResourceAllocation = &pMIGComputeInstance->resourceAllocation; + NvU32 gpcCount = + ((params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST) || + (params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST_WITH_IDS)) + ? params.inst.request.pReqComputeInstanceInfo[CIIdx].gpcCount + : nvPopCount32(params.inst.restore.pComputeInstanceSave->ciInfo.gpcMask); + + pMIGComputeInstance->bValid = NV_TRUE; + pMIGComputeInstance->sharedEngFlag = + ((params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST) || + (params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST_WITH_IDS)) + ? params.inst.request.pReqComputeInstanceInfo[CIIdx].sharedEngFlag + : params.inst.restore.pComputeInstanceSave->ciInfo.sharedEngFlags; + NvU32 grCount; + NvU32 ceCount; + NvU32 decCount; + NvU32 encCount; + NvU32 jpgCount; + NvU32 ofaCount; + + // Check to see if we have enough GPCs left to satisfy this request + if (remainingGpcCount < gpcCount) + { + NV_PRINTF(LEVEL_ERROR, + "Not enough remaining GPCs (%d) for compute instance request (%d).\n", + remainingGpcCount, gpcCount); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + remainingGpcCount -= gpcCount; + + if ((params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST) || + (params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST_WITH_IDS)) + { + grCount = 1; + ceCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].ceCount; + decCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].nvDecCount; + encCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].nvEncCount; + jpgCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].nvJpgCount; + ofaCount = params.inst.request.pReqComputeInstanceInfo[CIIdx].ofaCount; + } + else + { + ENGTYPE_BIT_VECTOR engines; + + bitVectorFromRaw(&engines, + params.inst.restore.pComputeInstanceSave->ciInfo.enginesMask, + sizeof(params.inst.restore.pComputeInstanceSave->ciInfo.enginesMask)); + + grCount = kmigmgrCountEnginesOfType(&engines, + NV2080_ENGINE_TYPE_GR(0)); + + ceCount = kmigmgrCountEnginesOfType(&engines, + NV2080_ENGINE_TYPE_COPY(0)); + + decCount = kmigmgrCountEnginesOfType(&engines, + NV2080_ENGINE_TYPE_NVDEC(0)); + + encCount = kmigmgrCountEnginesOfType(&engines, + NV2080_ENGINE_TYPE_NVENC(0)); + + jpgCount = kmigmgrCountEnginesOfType(&engines, + NV2080_ENGINE_TYPE_NVJPEG(0)); + + ofaCount = kmigmgrCountEnginesOfType(&engines, + NV2080_ENGINE_TYPE_OFA); + + NV_ASSERT(grCount == 1); + } + + bitVectorClrAll(&pResourceAllocation->engines); + + // Allocate the GR engines for this compute instance + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NONE) != 0x0), + NV2080_ENGINE_RANGE_GR(), + grCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask)); + + // Allocate the Copy engines for this compute instance + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_CE) != 0x0), + NV2080_ENGINE_RANGE_COPY(), + ceCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask)); + + // Allocate the NVDEC engines for this compute instance + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVDEC) != 0x0), + NV2080_ENGINE_RANGE_NVDEC(), + decCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask)); + + // Allocate the NVENC engines for this compute instance + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVENC) != 0x0), + NV2080_ENGINE_RANGE_NVENC(), + encCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask)); + + // Allocate the NVJPG engines for this compute instance + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_NVJPG) != 0x0), + NV2080_ENGINE_RANGE_NVJPEG(), + jpgCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask)); + + // Allocate the NVOFA engines for this compute instance + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrAllocateInstanceEngines(&pKernelMIGGpuInstance->resourceAllocation.engines, + ((pMIGComputeInstance->sharedEngFlag & + NVC637_CTRL_EXEC_PARTITIONS_SHARED_FLAG_OFA) != 0x0), + rangeMake(NV2080_ENGINE_TYPE_OFA, NV2080_ENGINE_TYPE_OFA), + ofaCount, + &pResourceAllocation->engines, + &shadowExclusiveEngMask, + &shadowSharedEngMask)); + + + // Cache local mask of engine IDs for this compute instance + kmigmgrGetLocalEngineMask(&pResourceAllocation->engines, + &pResourceAllocation->localEngines); + } + + // Commit the allocations to the instance + if (!bQuery) + { + NvU32 swizzId = pKernelMIGGpuInstance->swizzId; + NvU32 gpcCountPerGr[8]; + NvU32 updateEngMask; + + // Populate configure GPU instance parameters with compute instance info + updateEngMask = 0x0; + portMemSet(gpcCountPerGr, 0, sizeof(gpcCountPerGr)); + for (CIIdx = 0; CIIdx < count; ++CIIdx) + { + MIG_COMPUTE_INSTANCE *pMIGComputeInstance = &computeInstanceInfo[CIIdx]; + MIG_RESOURCE_ALLOCATION *pComputeResourceAllocation = &pMIGComputeInstance->resourceAllocation; + NvU32 localEngineType; + NvU32 gpcCount = + ((params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST) || + (params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST_WITH_IDS)) + ? params.inst.request.pReqComputeInstanceInfo[CIIdx].gpcCount + : nvPopCount32(params.inst.restore.pComputeInstanceSave->ciInfo.gpcMask); + + // + // Xlate from CI-local GR 0 to GI-local GR idx + // We can't use kmigmgrGetLocalToGlobalEngineType because these + // compute instances aren't committed yet + // + NV_ASSERT_OK( + kmigmgrEngineTypeXlate(&pComputeResourceAllocation->localEngines, NV2080_ENGINE_TYPE_GR(0), + &pComputeResourceAllocation->engines, &localEngineType)); + + updateEngMask |= NVBIT32(NV2080_ENGINE_TYPE_GR_IDX(localEngineType)); + gpcCountPerGr[NV2080_ENGINE_TYPE_GR_IDX(localEngineType)] = gpcCount; + } + + // Configure the GR engines for each compute instance + status = kmigmgrConfigureGPUInstance(pGpu, pKernelMIGManager, swizzId, + gpcCountPerGr, + updateEngMask); + + // Do our best to deconfigure the engines we configured so far, then bail + if (status != NV_OK) + { + portMemSet(gpcCountPerGr, 0, sizeof(gpcCountPerGr)); + // Quash status. This is best-effort cleanup + (void)kmigmgrConfigureGPUInstance(pGpu, pKernelMIGManager, swizzId, + gpcCountPerGr, + updateEngMask); + + return status; + } + + // Update the GI pools with the result of this allocation + bitVectorCopy(&pKernelMIGGpuInstance->exclusiveEngMask, &shadowExclusiveEngMask); + bitVectorCopy(&pKernelMIGGpuInstance->sharedEngMask, &shadowSharedEngMask); + + // update each compute instance gpc ids and veid info + for (CIIdx = 0; CIIdx < count; ++CIIdx) + { + MIG_RESOURCE_ALLOCATION *pResourceAllocation = &pKernelMIGGpuInstance->resourceAllocation; + MIG_COMPUTE_INSTANCE *pMIGComputeInstance = &computeInstanceInfo[CIIdx]; + MIG_RESOURCE_ALLOCATION *pComputeResourceAllocation = &pMIGComputeInstance->resourceAllocation; + NvU32 gpcCount; + NvU32 globalEngineType; + NvU32 globalGrIdx; + + // + // Xlate from CI-local GR 0 to global GR idx + // We can't use kmigmgrGetLocalToGlobalEngineType because these + // compute instances aren't committed yet + // + NV_ASSERT_OK( + kmigmgrEngineTypeXlate(&pComputeResourceAllocation->localEngines, NV2080_ENGINE_TYPE_GR(0), + &pComputeResourceAllocation->engines, &globalEngineType)); + + gpcCount = gpcCountPerGr[NV2080_ENGINE_TYPE_GR_IDX(globalEngineType)]; + + NV_ASSERT_OK( + kmigmgrEngineTypeXlate(&pResourceAllocation->localEngines, globalEngineType, + &pResourceAllocation->engines, &globalEngineType)); + globalGrIdx = NV2080_ENGINE_TYPE_GR_IDX(globalEngineType); + + pComputeResourceAllocation->gpcCount = gpcCount; + + kgrmgrGetVeidBaseForGrIdx(pGpu, pKernelGraphicsManager, globalGrIdx, &pComputeResourceAllocation->veidOffset); + + pComputeResourceAllocation->veidOffset = pComputeResourceAllocation->veidOffset - pResourceAllocation->veidOffset; + + pComputeResourceAllocation->veidCount = (pResourceAllocation->veidCount / pResourceAllocation->gpcCount) * + gpcCount; + } + + // Copy over the local cached compute instance info + createdInstances = 0; + for (CIIdx = 0; + CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); + ++CIIdx) + { + if (pKernelMIGGpuInstance->MIGComputeInstance[CIIdx].bValid) + continue; + + if ((params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_RESTORE) && + (params.inst.restore.pComputeInstanceSave->id != CIIdx)) + { + continue; + } + + if ((params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_REQUEST_WITH_IDS) && + (pCIIDs[0] != CIIdx)) + { + continue; + } + + NV_ASSERT(pKernelMIGGpuInstance->MIGComputeInstance[CIIdx].id == + KMIGMGR_COMPUTE_INSTANCE_ID_INVALID); + + portMemCopy(&pKernelMIGGpuInstance->MIGComputeInstance[CIIdx], + sizeof(pKernelMIGGpuInstance->MIGComputeInstance[CIIdx]), + &computeInstanceInfo[createdInstances], + sizeof(pKernelMIGGpuInstance->MIGComputeInstance[CIIdx])); + + pKernelMIGGpuInstance->MIGComputeInstance[CIIdx].id = CIIdx; + + pCIIDs[createdInstances++] = CIIdx; + + if (createdInstances == count) + break; + } + + for (i = 0; i < createdInstances; ++i) + { + MIG_RESOURCE_ALLOCATION *pResourceAllocation; + MIG_RESOURCE_ALLOCATION *pComputeResourceAllocation; + MIG_COMPUTE_INSTANCE *pMIGComputeInstance; + NvU32 globalEngineType; + NvU32 globalGrIdx; + + // + // As per the current design, index for the pMIGComputeInstance + // array is same as the compute instance ID. + // + CIIdx = pCIIDs[i]; + + pResourceAllocation = &pKernelMIGGpuInstance->resourceAllocation; + + pMIGComputeInstance = &pKernelMIGGpuInstance->MIGComputeInstance[CIIdx]; + pComputeResourceAllocation = &pMIGComputeInstance->resourceAllocation; + + NV_ASSERT_OK( + kmigmgrEngineTypeXlate(&pComputeResourceAllocation->localEngines, NV2080_ENGINE_TYPE_GR(0), + &pComputeResourceAllocation->engines, &globalEngineType)); + NV_ASSERT_OK( + kmigmgrEngineTypeXlate(&pResourceAllocation->localEngines, globalEngineType, + &pResourceAllocation->engines, &globalEngineType)); + globalGrIdx = NV2080_ENGINE_TYPE_GR_IDX(globalEngineType); + + NV_ASSERT(pMIGComputeInstance->id == CIIdx); + + // + // Register instance with the capability framework only if it explicitly + // requested. Otherwise, we rely on the persistent state. + // + if (bCreateCap) + { + // Register compute instance with the capability framework + NV_ASSERT_OK_OR_GOTO(status, + osRmCapRegisterSmcExecutionPartition(pKernelMIGGpuInstance->pOsRmCaps, + &pMIGComputeInstance->pOsRmCaps, + pMIGComputeInstance->id), + cleanup_created_instances); + } + + // Populate UUID + NV_ASSERT_OK_OR_GOTO(status, + kmigmgrGenerateComputeInstanceUuid_HAL(pGpu, pKernelMIGManager, swizzId, globalGrIdx, + &pMIGComputeInstance->uuid), + cleanup_created_instances); + + // Allocate RsShared for the instance + NV_ASSERT_OK_OR_GOTO( + status, + serverAllocShare(&g_resServ, classInfo(RsShared), + &pMIGComputeInstance->pShare), + cleanup_created_instances); + + // Allocate subscribed handles for this instance + NV_ASSERT_OK_OR_GOTO(status, + kmigmgrAllocComputeInstanceHandles(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, pMIGComputeInstance), + cleanup_created_instances); + + { + KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, globalGrIdx); + fecsSetRoutingInfo(pGpu, + pKernelGraphics, + pMIGComputeInstance->instanceHandles.hClient, + pMIGComputeInstance->instanceHandles.hSubdevice, + 0); + + NV_ASSERT_OK_OR_GOTO(status, + kgraphicsCreateGoldenImageChannel(pGpu, pKernelGraphics), + cleanup_created_instances); + } + } + } + + return NV_OK; + +cleanup_created_instances: + for (i = 0; i < createdInstances; ++i) + { + (void)kmigmgrDeleteComputeInstance(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, + pCIIDs[i], NV_FALSE); + } + + return status; +} + +/*! + * @brief create compute instances for CPU-RM + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] pKernelMIGGpuInstance + * @param[IN] bQuery If NV_TRUE, don't save created instances + * @param[IN] params List of requested compute instance to create + * @param[OUT] pCIIDs IDs of created instances + * @param[IN] bCreateCap Flag stating if MIG CI capabilities needs to be created + */ +NV_STATUS +kmigmgrCreateComputeInstances_FWCLIENT +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + NvBool bQuery, + KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS params, + NvU32 *pCIIDs, + NvBool bCreateCap +) +{ + NV_STATUS status = NV_OK; + KernelGraphics *pKernelGraphics; + MIG_COMPUTE_INSTANCE *pMIGComputeInstance; + MIG_RESOURCE_ALLOCATION *pResourceAllocation; + MIG_RESOURCE_ALLOCATION *pComputeResourceAllocation; + NVC637_CTRL_EXEC_PARTITIONS_EXPORTED_INFO info; + NvU32 CIIdx = pCIIDs[0]; + NvU32 tempGpcMask; + NvU32 gpcCountPerGr[8]; + NvU32 localEngineType; + NvU32 globalEngineType; + NvU32 globalGrIdx; + + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(params.type == KMIGMGR_CREATE_COMPUTE_INSTANCE_PARAMS_TYPE_RESTORE, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(params.inst.restore.pComputeInstanceSave != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(params.inst.restore.pComputeInstanceSave->bValid, NV_ERR_INVALID_ARGUMENT); + // CPU-RM will always restore the CI state created by GSP-RM, so will always be commit operation + NV_ASSERT_OR_RETURN(!bQuery, NV_ERR_INVALID_ARGUMENT); + + pResourceAllocation = &pKernelMIGGpuInstance->resourceAllocation; + pMIGComputeInstance = &pKernelMIGGpuInstance->MIGComputeInstance[CIIdx]; + pComputeResourceAllocation = &pMIGComputeInstance->resourceAllocation; + + NV_ASSERT_OR_RETURN(!pMIGComputeInstance->bValid, NV_ERR_INVALID_STATE); + NV_ASSERT(pMIGComputeInstance->id == KMIGMGR_COMPUTE_INSTANCE_ID_INVALID); + + info = params.inst.restore.pComputeInstanceSave->ciInfo; + + portMemCopy(pMIGComputeInstance->uuid.uuid, sizeof(pMIGComputeInstance->uuid.uuid), + info.uuid, sizeof(info.uuid)); + pMIGComputeInstance->sharedEngFlag = info.sharedEngFlags; + + pComputeResourceAllocation->gpcCount = 0; + tempGpcMask = info.gpcMask; + while (tempGpcMask != 0x0) + { + NvU32 gpcIdx = portUtilCountTrailingZeros32(tempGpcMask); + pComputeResourceAllocation->gpcIds[(pComputeResourceAllocation->gpcCount)++] = gpcIdx; + tempGpcMask &= ~(NVBIT32(gpcIdx)); + } + + pComputeResourceAllocation->veidCount = info.veidCount; + pComputeResourceAllocation->veidOffset = info.veidOffset; + + bitVectorFromRaw(&pComputeResourceAllocation->engines, info.enginesMask, sizeof(info.enginesMask)); + + // Cache the local engine mask for this CI + kmigmgrGetLocalEngineMask(&pComputeResourceAllocation->engines, &pComputeResourceAllocation->localEngines); + + pMIGComputeInstance->bValid = NV_TRUE; + pMIGComputeInstance->id = CIIdx; + + // Populate configure GPU instance parameters with compute instance info + portMemSet(gpcCountPerGr, 0, sizeof(gpcCountPerGr)); + + // + // Xlate from CI-local GR 0 to GI-local GR idx + // We can't use kmigmgrGetLocalToGlobalEngineType because these + // compute instances aren't committed yet + // + NV_ASSERT_OK( + kmigmgrEngineTypeXlate(&pComputeResourceAllocation->localEngines, NV2080_ENGINE_TYPE_GR(0), + &pComputeResourceAllocation->engines, &localEngineType)); + + gpcCountPerGr[NV2080_ENGINE_TYPE_GR_IDX(localEngineType)] = pComputeResourceAllocation->gpcCount; + + // Configure the GR engines for each compute instance + status = kmigmgrConfigureGPUInstance(pGpu, pKernelMIGManager, pKernelMIGGpuInstance->swizzId, + gpcCountPerGr, + NVBIT32(NV2080_ENGINE_TYPE_GR_IDX(localEngineType))); + + // Do our best to deconfigure the engines we configured so far, then bail + if (status != NV_OK) + { + portMemSet(gpcCountPerGr, 0, sizeof(gpcCountPerGr)); + // Quash status. This is best-effort cleanup + (void)kmigmgrConfigureGPUInstance(pGpu, pKernelMIGManager, pKernelMIGGpuInstance->swizzId, + gpcCountPerGr, + NVBIT32(NV2080_ENGINE_TYPE_GR_IDX(localEngineType))); + + return status; + } + + // + // Register instance with the capability framework only if it explicitly + // requested. Otherwise, we rely on the persistent state. + // + if (bCreateCap) + { + // Register compute instance with the capability framework + NV_ASSERT_OK_OR_GOTO(status, + osRmCapRegisterSmcExecutionPartition(pKernelMIGGpuInstance->pOsRmCaps, + &pMIGComputeInstance->pOsRmCaps, + pMIGComputeInstance->id), + cleanup_created_instances); + } + + // Allocate RsShared for the instance + NV_ASSERT_OK_OR_GOTO(status, + serverAllocShare(&g_resServ, classInfo(RsShared), + &pMIGComputeInstance->pShare), + cleanup_created_instances); + + // Allocate subscribed handles for this instance + NV_ASSERT_OK_OR_GOTO(status, + kmigmgrAllocComputeInstanceHandles(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, pMIGComputeInstance), + cleanup_created_instances); + + NV_ASSERT_OK( + kmigmgrEngineTypeXlate(&pComputeResourceAllocation->localEngines, NV2080_ENGINE_TYPE_GR(0), + &pComputeResourceAllocation->engines, &globalEngineType)); + NV_ASSERT_OK( + kmigmgrEngineTypeXlate(&pResourceAllocation->localEngines, globalEngineType, + &pResourceAllocation->engines, &globalEngineType)); + globalGrIdx = NV2080_ENGINE_TYPE_GR_IDX(globalEngineType); + + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, globalGrIdx); + fecsSetRoutingInfo(pGpu, + pKernelGraphics, + pMIGComputeInstance->instanceHandles.hClient, + pMIGComputeInstance->instanceHandles.hSubdevice, + 0); + + NV_ASSERT_OK_OR_GOTO(status, + kgraphicsCreateGoldenImageChannel(pGpu, pKernelGraphics), + cleanup_created_instances); + + return NV_OK; + +cleanup_created_instances: + (void)kmigmgrDeleteComputeInstance(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, + CIIdx, NV_FALSE); + + return status; +} + +// Delete created instance handles if they exist +void +kmigmgrFreeComputeInstanceHandles_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + MIG_COMPUTE_INSTANCE *pMIGComputeInstance +) +{ + if (pMIGComputeInstance->instanceHandles.hClient != NV01_NULL_OBJECT) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + pRmApi->Free(pRmApi, pMIGComputeInstance->instanceHandles.hClient, pMIGComputeInstance->instanceHandles.hClient); + pMIGComputeInstance->instanceHandles.hClient = NV01_NULL_OBJECT; + pMIGComputeInstance->instanceHandles.hSubdevice = NV01_NULL_OBJECT; + pMIGComputeInstance->instanceHandles.hSubscription = NV01_NULL_OBJECT; + } +} + +/*! + * @brief Releases the engines owned by this Compute Instance of the given class + * of engine (GR, COPY, etc) to the GPU Instance resource pools. + */ +void +kmigmgrReleaseComputeInstanceEngines_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + MIG_COMPUTE_INSTANCE *pMIGComputeInstance +) +{ + NvU32 globalEngineType; + NvU32 localEngineType; + ENGTYPE_BIT_VECTOR *pGlobalMask; + ENGTYPE_BIT_VECTOR *pLocalMask; + + NV_ASSERT_OR_RETURN_VOID(pKernelMIGGpuInstance != NULL); + NV_ASSERT_OR_RETURN_VOID(pMIGComputeInstance != NULL); + + pGlobalMask = &pKernelMIGGpuInstance->resourceAllocation.engines; + pLocalMask = &pKernelMIGGpuInstance->resourceAllocation.localEngines; + + // Iterate over both global/local masks at the same time + FOR_EACH_IN_BITVECTOR_PAIR(pGlobalMask, globalEngineType, pLocalMask, localEngineType) + { + NvU32 CIIdx; + + // Skip anything not owned by this compute instance + if (!bitVectorTest(&pMIGComputeInstance->resourceAllocation.engines, localEngineType)) + continue; + + // + // Clear this engine from the exclusive ownership mask. If it was being + // shared, it already isn't in the exclusive ownership mask, so doing + // this for all engines in this compute instance isn't harmful. + // + bitVectorClr(&pKernelMIGGpuInstance->exclusiveEngMask, globalEngineType); + + // If this engine was exclusively owned, nothing else to do + if (!bitVectorTest(&pKernelMIGGpuInstance->sharedEngMask, globalEngineType)) + continue; + + // Determine if any other compute instance owns this engine + for (CIIdx = 0; + CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); + ++CIIdx) + { + if (!pKernelMIGGpuInstance->MIGComputeInstance[CIIdx].bValid) + continue; + + if (bitVectorTest(&pKernelMIGGpuInstance->MIGComputeInstance[CIIdx].resourceAllocation.engines, + localEngineType)) + { + break; + } + } + + // If engine is still owned by someone, don't mark it unused + if (CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance)) + continue; + + // mark this engine as no longer being shared by anyone + bitVectorClr(&pKernelMIGGpuInstance->sharedEngMask, globalEngineType); + } + FOR_EACH_IN_BITVECTOR_PAIR_END(); +} + +/*! + * @brief Function to delete Compute Instance + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] pKernelMIGGpuInstance + * @param[IN] CIID Compute Instance ID + * @param[IN] bUnload NV_TRUE if called during gpu state unload path + */ +NV_STATUS +kmigmgrDeleteComputeInstance_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + NvU32 CIID, + NvBool bUnload +) +{ + MIG_COMPUTE_INSTANCE *pMIGComputeInstance; + MIG_RESOURCE_ALLOCATION *pComputeResourceAllocation; + ENGTYPE_BIT_VECTOR grEngines; + NvU32 swizzId; + NvU32 gpcCountPerGr[KGRMGR_MAX_GPC]; + NvU32 updateEngMask; + + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(CIID < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance), + NV_ERR_INVALID_ARGUMENT); + + // Make sure that the targeted compute instance is still valid + NV_CHECK_OR_RETURN(LEVEL_SILENT, + pKernelMIGGpuInstance->MIGComputeInstance[CIID].bValid, + NV_WARN_NOTHING_TO_DO); + + pMIGComputeInstance = &pKernelMIGGpuInstance->MIGComputeInstance[CIID]; + pComputeResourceAllocation = &pMIGComputeInstance->resourceAllocation; + + // + // Initial refCount is increased to "1" when instance is created and then + // every subscription by a client should increase the refcount + // + if ((pMIGComputeInstance->pShare != NULL) && + (serverGetShareRefCount(&g_resServ, pMIGComputeInstance->pShare) > 2)) + { + NV_PRINTF(LEVEL_ERROR, + "Compute Instance with id - %d still in use by other clients\n", + CIID); + + return NV_ERR_STATE_IN_USE; + } + + if (!bUnload) + { + // + // Unregister instance from the capability framework only if + // it is explicitly destroyed i.e. not during GPU state unload path. + // + // Note that the saved instance persistent state will be freed by + // _gpumgrUnregisterRmCapsForMIGCI during driver unload. + // + osRmCapUnregister(&pMIGComputeInstance->pOsRmCaps); + } + + // Deconfigure the GR engine for this compute instance + swizzId = pKernelMIGGpuInstance->swizzId; + portMemSet(gpcCountPerGr, 0, sizeof(gpcCountPerGr)); + + bitVectorClrAll(&grEngines); + bitVectorSetRange(&grEngines, NV2080_ENGINE_RANGE_GR()); + bitVectorAnd(&grEngines, &grEngines, &pComputeResourceAllocation->engines); + NV_ASSERT_OR_RETURN(!bitVectorTestAllCleared(&grEngines), NV_ERR_INVALID_STATE); + updateEngMask = NVBIT32(NV2080_ENGINE_TYPE_GR_IDX(bitVectorCountTrailingZeros(&grEngines))); + NV_ASSERT_OK_OR_RETURN( + kmigmgrConfigureGPUInstance(pGpu, pKernelMIGManager, swizzId, gpcCountPerGr, updateEngMask)); + + { + NvU32 globalEngType; + MIG_INSTANCE_REF ref = kmigmgrMakeCIReference(pKernelMIGGpuInstance, pMIGComputeInstance); + NV_ASSERT_OK_OR_RETURN( + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_GR(0), + &globalEngType)); + + // Free up the internal handles for this compute instance + kmigmgrFreeComputeInstanceHandles(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, pMIGComputeInstance); + + fecsSetRoutingInfo(pGpu, + GPU_GET_KERNEL_GRAPHICS(pGpu, NV2080_ENGINE_TYPE_GR_IDX(globalEngType)), + pKernelMIGGpuInstance->instanceHandles.hClient, + pKernelMIGGpuInstance->instanceHandles.hSubdevice, + NV2080_ENGINE_TYPE_GR_IDX(bitVectorCountTrailingZeros(&grEngines))); + + if (pMIGComputeInstance->pShare != NULL) + { + serverFreeShare(&g_resServ, pMIGComputeInstance->pShare); + pMIGComputeInstance->pShare = NULL; + } + } + + // Mark this compute instance as invalid + pMIGComputeInstance->bValid = NV_FALSE; + + // Release this compute instance's engines + kmigmgrReleaseComputeInstanceEngines(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, pMIGComputeInstance); + + // Now that we no longer need it, clear the shared engine flag + pMIGComputeInstance->sharedEngFlag = 0x0; + pMIGComputeInstance->id = KMIGMGR_COMPUTE_INSTANCE_ID_INVALID; + + pMIGComputeInstance->pOsRmCaps = NULL; + + return NV_OK; +} + +/*! + * @brief print out the CI configuration of this GI + */ +static void +_kmigmgrPrintComputeInstances +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) +#define PADDING_STR "----------------------------------------------------" + NvU32 engineType; + NvU32 CIIdx; + + NV_PRINTF(LEVEL_INFO, "\n"); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %14s | %14s | %14s |\n", + "SwizzId", + "GR Count", + "Gpc Count"); + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + NV_PRINTF(LEVEL_INFO, "| %14d | %14d | %14d |\n", + pKernelMIGGpuInstance->swizzId, + kmigmgrCountEnginesOfType(&pKernelMIGGpuInstance->resourceAllocation.engines, NV2080_ENGINE_TYPE_GR(0)), + pKernelMIGGpuInstance->resourceAllocation.gpcCount); + + for (CIIdx = 0; + CIIdx < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); + ++CIIdx) + { + MIG_RESOURCE_ALLOCATION *pComputeResourceAllocation; + + if (!pKernelMIGGpuInstance->MIGComputeInstance[CIIdx].bValid) + { + continue; + } + + pComputeResourceAllocation = &pKernelMIGGpuInstance->MIGComputeInstance[CIIdx].resourceAllocation; + + NV_ASSERT_OK( + kmigmgrEngineTypeXlate(&pComputeResourceAllocation->localEngines, NV2080_ENGINE_TYPE_GR(0), + &pComputeResourceAllocation->engines, &engineType)); + + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + if (IS_GSP_CLIENT(pGpu)) + { + NvU32 gpcIdx; + NvU32 gpcMask = 0x0; + + for (gpcIdx = 0; gpcIdx < pComputeResourceAllocation->gpcCount; ++gpcIdx) + { + gpcMask |= NVBIT32(pComputeResourceAllocation->gpcIds[gpcIdx]); + } + NV_PRINTF(LEVEL_INFO, "| %23s | %23s |\n", + "Gr Engine IDX", + "GPC Mask"); + NV_PRINTF(LEVEL_INFO, "| %23d | %23X |\n", + NV2080_ENGINE_TYPE_GR_IDX(engineType), + gpcMask); + } + else + { + // gpcMask is not meaningful in VGPU, thus only printing gpcCount + NV_PRINTF(LEVEL_INFO, "| %23s | %23s |\n", + "Gr Engine IDX", + "GPC Count"); + NV_PRINTF(LEVEL_INFO, "| %23d | %23X |\n", + NV2080_ENGINE_TYPE_GR_IDX(engineType), + pComputeResourceAllocation->gpcCount); + } + } + NV_PRINTF(LEVEL_INFO, "%s\n", PADDING_STR); + +#undef PADDING_STR +#endif // NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) +} + +/*! + * @brief Function to configure a specific GPU instance by setting available + * GPCs with requested GR Engines + * + * @param[IN] pGpu + * @param[IN} pKernelMIGManager + * @param[OUT] swizzId SwizzId for this GPU instance + * @param[IN] pGpcCountPerGr Requested num GPCs for every GR engine in + * this instance + * @param[IN] updateEngMask Entry valid flag for each engine in instance + * + * @return Returns NV_STATUS + * NV_OK + * NV_ERR_INVALID_ARGUMENT + * NV_WARN_NOTHING_TO_DO + * NV_ERR_INSUFFICIENT_RESOURCES + */ +NV_STATUS +kmigmgrConfigureGPUInstance_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId, + NvU32 *pGpcCountPerGr, + NvU32 updateEngMask +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NV_STATUS status = NV_OK; + NvU32 i; + NvU32 j; + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = NULL; + NvBool bAssigning; + NvU32 checkGrs[NV2080_ENGINE_TYPE_GR_SIZE]; + NvU32 checkGrCount = 0; + NvU32 engineType; + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + + // Sanity check the GPU instance requested to be configured + if (!kmigmgrIsSwizzIdInUse(pGpu, pKernelMIGManager, swizzId)) + { + NV_PRINTF(LEVEL_ERROR, "Invalid swizzId - %d.\n", swizzId); + return NV_ERR_INVALID_ARGUMENT; + } + + status = kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager, swizzId, &pKernelMIGGpuInstance); + NV_CHECK_OR_RETURN(LEVEL_SILENT, status == NV_OK, status); + + bAssigning = NV_FALSE; + portMemSet(checkGrs, 0, sizeof(checkGrs)); + + i = 0; + FOR_EACH_IN_BITVECTOR(&pKernelMIGGpuInstance->resourceAllocation.engines, engineType) + { + NvU32 engineIdx; + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + continue; + + engineIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + + // Skip over invalid entries + if (!(updateEngMask & NVBIT32(i))) + { + i++; + continue; + } + + // Make sure no requested GPC count is greater than instance GPC count + if (pGpcCountPerGr[i] > pKernelMIGGpuInstance->resourceAllocation.gpcCount) + { + NV_PRINTF(LEVEL_ERROR, + "Invalid GPC count - %d requested for GrIdx - %d.\n", + pGpcCountPerGr[i], + engineIdx); + return NV_ERR_INVALID_ARGUMENT; + } + + bAssigning = bAssigning || pGpcCountPerGr[i] > 0; + + checkGrs[checkGrCount++] = engineType; + + i++; + } + FOR_EACH_IN_BITVECTOR_END(); + + // + // Return an error if there are any channels on any engines targeted by this + // request + // + NV_CHECK_OR_RETURN(LEVEL_SILENT, + !kfifoEngineListHasChannel(pGpu, pKernelFifo, checkGrs, checkGrCount), + NV_ERR_STATE_IN_USE); + + if (!bAssigning) + { + // Invalidate targeted engines + i = 0; + FOR_EACH_IN_BITVECTOR(&pKernelMIGGpuInstance->resourceAllocation.engines, engineType) + { + NvU32 engineIdx; + + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + continue; + + engineIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + + if (updateEngMask & NVBIT32(i)) + { + NV_ASSERT_OK_OR_RETURN( + kmigmgrInvalidateGr(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, engineIdx)); + } + + i++; + } + FOR_EACH_IN_BITVECTOR_END(); + + return NV_OK; + } + + // + // Client passes the logical GR-IDs while RM works with physical GR-IDs + // Walk the list of physical GRs associated with this GPU instance and then + // set GPCs as requested + // + i = 0; + FOR_EACH_IN_BITVECTOR(&pKernelMIGGpuInstance->resourceAllocation.engines, engineType) + { + NvU32 engineIdx; + NvU32 gpcCount = pGpcCountPerGr[i]; + + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + continue; + + engineIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + + if (!(updateEngMask & NVBIT32(i)) || (0 == gpcCount)) + { + i++; + continue; + } + + // Update the GR to VEID mapping + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kgrmgrAllocVeidsForGrIdx(pGpu, + pKernelGraphicsManager, + engineIdx, + gpcCount, + pKernelMIGGpuInstance), + cleanup); + + i++; + } + FOR_EACH_IN_BITVECTOR_END(); + + _kmigmgrPrintComputeInstances(pGpu, pKernelMIGManager, pKernelMIGGpuInstance); + + i = 0; + FOR_EACH_IN_BITVECTOR(&pKernelMIGGpuInstance->resourceAllocation.engines, engineType) + { + NvU32 engineIdx; + NvU32 gpcCount = pGpcCountPerGr[i]; + KernelGraphics *pKGr; + + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + continue; + + engineIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + + if (!(updateEngMask & NVBIT32(i)) || (0 == gpcCount)) + { + i++; + continue; + } + + pKGr = GPU_GET_KERNEL_GRAPHICS(pGpu, engineIdx); + // Re-pull public static data for kernel graphics + status = kgraphicsLoadStaticInfo_HAL(pGpu, pKGr, pKernelMIGGpuInstance->swizzId); + if (status != NV_OK) + goto cleanup; + + // record sizes of local GR ctx buffers for this GR + status = kgrmgrDiscoverMaxLocalCtxBufInfo(pGpu, pKernelGraphicsManager, pKGr, swizzId); + if (status != NV_OK) + goto cleanup; + + i++; + } + FOR_EACH_IN_BITVECTOR_END(); + + return status; + +cleanup: + + j = 0; + FOR_EACH_IN_BITVECTOR(&pKernelMIGGpuInstance->resourceAllocation.engines, engineType) + { + NvU32 engineIdx; + + // Rollback all previous validations + if (j == i) + break; + + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + continue; + + engineIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + + if (updateEngMask & NVBIT32(j)) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to configure GPU instance. Invalidating GRID - %d\n", + engineIdx); + + // Invalidate assignments to this GR, clear global state + kmigmgrInvalidateGr(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, engineIdx); + } + + j++; + } + FOR_EACH_IN_BITVECTOR_END(); + + return status; +} + +// invalidate GR to GPC mappings +NV_STATUS +kmigmgrInvalidateGrGpcMapping_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + NvU32 grIdx +) +{ + NV_STATUS status = NV_OK; + NvU32 gfid; + NvBool bCallingContextPlugin; + KernelGraphics *pKernelGraphics; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + NV_ASSERT_OK_OR_RETURN(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin)); + if (bCallingContextPlugin) + { + gfid = GPU_GFID_PF; + } + + // Free global ctx buffers, this will need to be regenerated + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, grIdx); + fecsBufferTeardown(pGpu, pKernelGraphics); + kgraphicsFreeGlobalCtxBuffers(pGpu, pKernelGraphics, gfid); + + // clear cached ctx buf sizes + kgraphicsClearCtxBufferInfo(pGpu, pKernelGraphics); + + return status; +} + +// invalidate a GR engine +NV_STATUS +kmigmgrInvalidateGr_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance, + NvU32 grIdx +) +{ + KernelGraphics *pKGr = GPU_GET_KERNEL_GRAPHICS(pGpu, grIdx); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrInvalidateGrGpcMapping(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, grIdx)); + + kgrmgrClearVeidsForGrIdx(pGpu, pKernelGraphicsManager, grIdx); + + kgraphicsInvalidateStaticInfo(pGpu, pKGr); + return NV_OK; +} + +/*! + * @brief Function to invalidate a gpu instance + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] swizzId swizzId which is getting invalidated + * @param[IN] bUnload NV_TRUE if called from gpu state unload path + * + * @return Returns NV_STATUS + * NV_OK + * NV_ERR_INVALID_ARGUMENT No GPC associated with Gr + */ +NV_STATUS +kmigmgrInvalidateGPUInstance_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId, + NvBool bUnload +) +{ + NV_STATUS rmStatus = NV_OK; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = NULL; + NvU32 i; + NvU32 engineType; + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + + // Sanity checks + rmStatus = kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager, swizzId, &pKernelMIGGpuInstance); + if (rmStatus != NV_OK) + { + // Didn't find requested gpu instance + NV_PRINTF(LEVEL_ERROR, "No valid gpu instance with SwizzId - %d found\n", + swizzId); + return rmStatus; + } + + // Make sure that no client is using this gpu instance + if (!kmigmgrIsGPUInstanceReadyToBeDestroyed(pKernelMIGGpuInstance)) + { + NV_PRINTF(LEVEL_ERROR, + "Gpu instance with SwizzId - %d still in use by other clients\n", + swizzId); + + kmigmgrPrintSubscribingClients(pGpu, pKernelMIGManager, swizzId); + return NV_ERR_STATE_IN_USE; + } + + for (i = 0; i < NV_ARRAY_ELEMENTS(pKernelMIGGpuInstance->MIGComputeInstance); ++i) + { + if (pKernelMIGGpuInstance->MIGComputeInstance[i].bValid) + { + NV_PRINTF(LEVEL_ERROR, + "Cannot destroy gpu instance %u with valid compute instance %d \n", + swizzId, i); + + return NV_ERR_STATE_IN_USE; + } + } + + NV_PRINTF(LEVEL_INFO, "FREEING GPU INSTANCE\n"); + kmigmgrPrintGPUInstanceInfo(pGpu, pKernelMIGManager, pKernelMIGGpuInstance); + + if (!bUnload) + { + // + // Unregister gpu instance from the capability framework only if + // it is explicitly destroyed i.e. not during GPU state unload path. + // + // Note that the saved gpu instance persistent state will be freed by + // _gpumgrUnregisterRmCapsForSmcPartitions during driver unload. + // + osRmCapUnregister(&pKernelMIGGpuInstance->pOsRmCaps); + } + + // Remove GR->GPC mappings in GPU instance Info + FOR_EACH_IN_BITVECTOR(&pKernelMIGGpuInstance->resourceAllocation.engines, engineType) + { + NvU32 engineIdx; + KernelGraphics *pKernelGraphics; + + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + continue; + + engineIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kmigmgrInvalidateGr(pGpu, pKernelMIGManager, pKernelMIGGpuInstance, engineIdx)); + + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, engineIdx); + fecsClearRoutingInfo(pGpu, pKernelGraphics); + } + FOR_EACH_IN_BITVECTOR_END(); + + // Delete client handle after all GR's are invalidated + kmigmgrFreeGPUInstanceHandles(pKernelMIGGpuInstance); + + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kmigmgrClearEnginesInUse(pGpu, pKernelMIGManager, &pKernelMIGGpuInstance->resourceAllocation.engines)); + + // Destroy runlist buffer pools + kmigmgrDestroyGPUInstanceGrBufPools(pGpu, pKernelMIGManager, pKernelMIGGpuInstance); + + if (kmigmgrIsSwizzIdInUse(pGpu, pKernelMIGManager, swizzId)) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kmigmgrClearSwizzIdInUse(pGpu, pKernelMIGManager, swizzId)); + } + + // Sanity check that requested swizzID is not set in swizzIdMask + NV_ASSERT_OR_ELSE(!(NVBIT64(swizzId) & pKernelMIGManager->swizzIdInUseMask), rmStatus = NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kmemsysInitMIGMemoryPartitionTable_HAL(pGpu, pKernelMemorySystem)); + + // Destroy gpu instance scrubber + kmigmgrDestroyGPUInstanceScrubber(pGpu, pKernelMIGManager, pKernelMIGGpuInstance); + + // Destroy gpu instance pool for page table mem + kmigmgrDestroyGPUInstancePool(pGpu, pKernelMIGManager, pKernelMIGGpuInstance); + + // Delete gpu instance engine runlists + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kmigmgrDeleteGPUInstanceRunlists_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance)); + + // Destroy runlist buffer pools + kmigmgrDestroyGPUInstanceRunlistBufPools(pGpu, pKernelMIGManager, pKernelMIGGpuInstance); + + // Free gpu instance memory + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + memmgrFreeMIGGPUInstanceMemory(pGpu, pMemoryManager, swizzId, pKernelMIGGpuInstance->hMemory, &pKernelMIGGpuInstance->pMemoryPartitionHeap)); + + if (pKernelMIGGpuInstance->pShare != NULL) + { + serverFreeShare(&g_resServ, pKernelMIGGpuInstance->pShare); + pKernelMIGGpuInstance->pShare = NULL; + } + + // Initialize gpu instance info to initial value + kmigmgrInitGPUInstanceInfo(pGpu, pKernelMIGManager, pKernelMIGGpuInstance); + + return rmStatus; +} + +/*! + * @brief Init gpu instance scrubber + */ +NV_STATUS +kmigmgrInitGPUInstanceScrubber_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (!IsSLIEnabled(pGpu) && + memmgrIsScrubOnFreeEnabled(pMemoryManager) && + memmgrIsPmaInitialized(pMemoryManager)) + { + NV_ASSERT_OK_OR_RETURN(scrubberConstruct(pGpu, pKernelMIGGpuInstance->pMemoryPartitionHeap)); + pKernelMIGGpuInstance->bMemoryPartitionScrubberInitialized = NV_TRUE; + } + + return NV_OK; +} + +/*! + * @brief Destroy gpu instance scrubber + */ +void +kmigmgrDestroyGPUInstanceScrubber_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + OBJMEMSCRUB *pMemscrub = NULL; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (!pKernelMIGGpuInstance->bMemoryPartitionScrubberInitialized) + return; + + if (!IsSLIEnabled(pGpu) && + memmgrIsScrubOnFreeEnabled(pMemoryManager) && + memmgrIsPmaInitialized(pMemoryManager)) + { + pMemscrub = pKernelMIGGpuInstance->pMemoryPartitionHeap->pmaObject.pScrubObj; + scrubberDestruct(pGpu, pKernelMIGGpuInstance->pMemoryPartitionHeap, pMemscrub); + pKernelMIGGpuInstance->bMemoryPartitionScrubberInitialized = NV_FALSE; + } +} + +/*! + * @brief Releases GR buffer memory back from global buffer pools and destroys + * these pools for all GR engines that belong to this gpu instance. + */ +void +kmigmgrDestroyGPUInstanceGrBufPools_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + NvU32 engineType; + + if (!ctxBufPoolIsSupported(pGpu)) + return; + + NV_ASSERT(pKernelMIGGpuInstance != NULL); + + FOR_EACH_IN_BITVECTOR(&pKernelMIGGpuInstance->resourceAllocation.engines, engineType) + { + NvU32 engineIdx; + KernelGraphics *pKernelGraphics; + + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + continue; + + engineIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, engineIdx); + + kgraphicsDestroyCtxBufPool(pGpu, pKernelGraphics); + } + FOR_EACH_IN_BITVECTOR_END(); +} + +/*! + * @brief Destroy per-gpu instance memory pool for client page tables + */ +void +kmigmgrDestroyGPUInstancePool_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (!memmgrIsPmaInitialized(pMemoryManager) || + !memmgrAreClientPageTablesPmaManaged(pMemoryManager)) + { + NV_ASSERT_OR_GOTO((pKernelMIGGpuInstance->pPageTableMemPool == NULL), destroy_pool); + return; + } + + if (!kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance->swizzId)) + { + NV_ASSERT_OR_GOTO((pKernelMIGGpuInstance->pPageTableMemPool == NULL), destroy_pool); + return; + } + + if (pKernelMIGGpuInstance->pPageTableMemPool == NULL) + { + NV_PRINTF(LEVEL_INFO, "page table memory pool not setup\n"); + return; + } + +destroy_pool: + rmMemPoolDestroy(pKernelMIGGpuInstance->pPageTableMemPool); + pKernelMIGGpuInstance->pPageTableMemPool = NULL; +} + +/*! + * @brief Releases runlist buffer memory back from runlist buffer pools and destroys the + * runlist buffer pools for engines that belong to these gpu instance. + */ +void +kmigmgrDestroyGPUInstanceRunlistBufPools_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + NvU32 engineType; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + if (!kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance->swizzId)) + return; + + if (!ctxBufPoolIsSupported(pGpu)) + return; + + for (engineType = 0; engineType < NV2080_ENGINE_TYPE_LAST; engineType++) + { + if (!NV2080_ENGINE_TYPE_IS_VALID(engineType) || + !kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, engineType) || + !kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, kmigmgrMakeGIReference(pKernelMIGGpuInstance))) + { + continue; + } + + if (pKernelFifo->pRunlistBufPool[engineType] != NULL) + { + ctxBufPoolRelease(pKernelFifo->pRunlistBufPool[engineType]); + ctxBufPoolDestroy(&pKernelFifo->pRunlistBufPool[engineType]); + } + } +} + +/*! + * @brief Print out clients subscribing to specified gpu instance + */ +void +kmigmgrPrintSubscribingClients_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 swizzId +) +{ + RmClient **ppClient; + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient != NULL; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + RmClient *pClient = *ppClient; + RsClient *pRsClient = staticCast(pClient, RsClient); + NvHandle hClient = pRsClient->hClient; + MIG_INSTANCE_REF ref; + RS_PRIV_LEVEL privLevel = rmclientGetCachedPrivilege(pClient); + + NV_STATUS status = kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, + &ref); + + if (status != NV_OK) + continue; + + if (ref.pKernelMIGGpuInstance->swizzId != swizzId) + continue; + + (void)privLevel; + NV_PRINTF(LEVEL_INFO, "%s client %x currently subscribed to swizzId %u\n", + (privLevel >= RS_PRIV_LEVEL_KERNEL) ? "Kernel" : "Usermode", + hClient, swizzId); + } +} + +/*! + * @brief Function to enable/disable MIG mode + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] bMemoryPartitioningNeeded Is Memory partitioning required? + * @param[IN] bEnable Enable/Disable MIG + * @param[IN] bUnload RM unload path + * + * @return Returns NV_STATUS + * NV_OK + * NV_WARN_NOTHING_TO_DO + * NV_ERR_INVALID_STATE + */ +NV_STATUS +kmigmgrSetMIGState_VF +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvBool bMemoryPartitioningNeeded, + NvBool bEnable, + NvBool bUnload +) +{ + if (bEnable) + { + KernelGraphics *pKGr = GPU_GET_KERNEL_GRAPHICS(pGpu, 0); + + kgraphicsInvalidateStaticInfo(pGpu, pKGr); + } + + return NV_OK; +} + +/*! + * @brief Function to enable/disable MIG mode + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[IN] bMemoryPartitioningNeeded Is Memory partitioning required? + * @param[IN] bEnable Enable/Disable MIG + * @param[IN] bUnload RM unload path + * + * @return Returns NV_STATUS + * NV_OK + * NV_WARN_NOTHING_TO_DO + * NV_ERR_INVALID_STATE + */ +NV_STATUS +kmigmgrSetMIGState_FWCLIENT +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvBool bMemoryPartitioningNeeded, + NvBool bEnable, + NvBool bUnload +) +{ + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + NV_STATUS rmStatus = NV_OK; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (bEnable) + { + KernelGraphics *pKGr = GPU_GET_KERNEL_GRAPHICS(pGpu, 0); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kgrmgrDiscoverMaxGlobalCtxBufSizes(pGpu, pKernelGraphicsManager, pKGr, bMemoryPartitioningNeeded), + done); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrDisableWatchdog(pGpu, pKernelMIGManager), + cleanup_disableWatchdog); + + // Before enabling MIG, deconfigure GR0 in legacy mode + kgraphicsInvalidateStaticInfo(pGpu, pKGr); + + // + // Destroy all global ctx buffers, we will need to recreate them in + // partitionable memory later. + // + fecsBufferTeardown(pGpu, pKGr); + + kgraphicsFreeGlobalCtxBuffers(pGpu, pKGr, GPU_GFID_PF); + + // + // Save the pre-MIG top-level scrubber status for later + // Destroy the top level scrubber if it exists + // + NV_ASSERT_OK_OR_GOTO(rmStatus, + memmgrSaveAndDestroyTopLevelScrubber(pGpu, pMemoryManager), + cleanup_destroyTopLevelScrubber); + + // + // Preexisting channel and memory allocation checks should be done after + // all buffers(like global Gr buffers) and pre-created channels(like scrubber, watchdog etc.) + // are destroyed. + // + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrCreateGPUInstanceCheck_HAL(pGpu, pKernelMIGManager, bMemoryPartitioningNeeded), + cleanup_createPartitionCheck); + + // Enable ctx buf pool before allocating any resources that uses it. + if (bMemoryPartitioningNeeded) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA, NV_TRUE); + } + + // Add the MIG-specific classes + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + gpuAddClassToClassDBByClassId(pGpu, AMPERE_SMC_PARTITION_REF)); + + if (rmStatus != NV_OK) + goto cleanup_addClassToClassDB; + + // Allocate handles for memory partitioning if needed + if (bMemoryPartitioningNeeded) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + memmgrAllocMIGMemoryAllocationInternalHandles(pGpu, pMemoryManager), + cleanup_memsysConfigL2EvictLast); + } + + // initialize pKernelFifo->pppRunlistBufMemDesc based on max possible # of runlists. + { + MEMORY_DESCRIPTOR ***pppMemDesc = NULL; + NvU32 maxRunlists = kfifoGetMaxNumRunlists_HAL(pGpu, pKernelFifo); + NvU32 rowSize = sizeof(pppMemDesc) * maxRunlists; + NvU32 arrSize = rowSize * NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS; + NvU32 i; + + // Should not have already been initialized + NV_ASSERT(pKernelFifo->pppRunlistBufMemDesc == NULL); + + pppMemDesc = portMemAllocNonPaged(rowSize); + NV_ASSERT_OR_ELSE(pppMemDesc != NULL, rmStatus = NV_ERR_NO_MEMORY; goto cleanup_initialize_runlistBufMemDesc;); + portMemSet(pppMemDesc, 0, rowSize); + + *pppMemDesc = portMemAllocNonPaged(arrSize); + NV_ASSERT_OR_ELSE(*pppMemDesc != NULL, rmStatus = NV_ERR_NO_MEMORY; goto cleanup_initialize_runlistBufMemDesc;); + portMemSet(*pppMemDesc, 0, arrSize); + + // Set up pointers for the 2D array + for (i = 0; i < maxRunlists; i++) + { + pppMemDesc[i] = *pppMemDesc + (NV2080_CTRL_INTERNAL_FIFO_MAX_RUNLIST_BUFFERS * i); + } + + pKernelFifo->pppRunlistBufMemDesc = pppMemDesc; + } + + // + // Populate static GPU instance memory config which will be used to manage + // GPU instance memory + // + if (!IsAMODEL(pGpu)) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + NV_ASSERT_OK_OR_RETURN(kmemsysPopulateMIGGPUInstanceMemConfig_HAL(pGpu, pKernelMemorySystem)); + } + } + else + { + if (bMemoryPartitioningNeeded) + { + memmgrFreeMIGMemoryAllocationInternalHandles(pGpu, pMemoryManager); + } + +cleanup_initialize_runlistBufMemDesc: + + if (pKernelFifo->pppRunlistBufMemDesc != NULL) + { + portMemFree(*(pKernelFifo->pppRunlistBufMemDesc)); + portMemFree(pKernelFifo->pppRunlistBufMemDesc); + } + + pKernelFifo->pppRunlistBufMemDesc = NULL; + +cleanup_memsysConfigL2EvictLast: + +cleanup_addClassToClassDB: + // Delete the MIG GR classes as MIG is disabled + NV_ASSERT_OK( + gpuDeleteClassFromClassDBByClassId(pGpu, AMPERE_SMC_PARTITION_REF)); + + // Disable ctx buf pool after freeing any resources that uses it. + pGpu->setProperty(pGpu, PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA, NV_FALSE); + + // + // HACK: GSP-RM always enables/disables LCEs during MIG enable/disable. + // Client-RM must always follow it to update its settings accordingly, + // so it should only call it for MIG disable (and not as part of MIG + // enable). + // + if (!bEnable) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kmigmgrEnableAllLCEs(pGpu, pKernelMIGManager, NV_FALSE)); + } + +cleanup_createPartitionCheck: + if (!bUnload) + { + // Init top level scrubber if it existed before + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + memmgrInitSavedTopLevelScrubber(pGpu, pMemoryManager)); + } +cleanup_destroyTopLevelScrubber: + + if (!bUnload) + { + KernelGraphics *pKGr = GPU_GET_KERNEL_GRAPHICS(pGpu, 0); + + // Since MIG is now disabled, reconfigure GR0 in legacy mode + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(rmStatus, + kgraphicsLoadStaticInfo(pGpu, pKGr, KMIGMGR_SWIZZID_INVALID)); + NV_ASSERT_OK( + kmigmgrRestoreWatchdog(pGpu, pKernelMIGManager)); + } +cleanup_disableWatchdog: + + goto done; + } + +done: + return rmStatus; +} + +/*! + * @brief Function to create or destroy GPU instance + * + * @param[IN] pGpu + * @param[IN] pKernelMIGManager + * @param[OUT] pSwizzId Output swizzId allocated for this gpu instance + * @param[IN] params Gpu instance creation parameters + * @param[IN] bValid Flag stating if gpu instance is created or destroyed + * @param[IN] bCreateCap Flag stating if MIG capabilities needs to be created + */ +NV_STATUS +kmigmgrCreateGPUInstance_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NvU32 *pSwizzId, + KMIGMGR_CREATE_GPU_INSTANCE_PARAMS params, + NvBool bValid, + NvBool bCreateCap +) +{ + NV_STATUS rmStatus = NV_OK; + + // If making a gpu instance valid, memory should be allocated accordingly + if (bValid) + { + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = NULL; + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + NvU32 engineType; + + // + // Determine SwizzID for this gpu instance. If this isn't a restore, this + // has already been determined by physical RM. + // + if (params.type == KMIGMGR_CREATE_GPU_INSTANCE_PARAMS_TYPE_RESTORE) + { + NvU32 swizzId = params.inst.restore.pGPUInstanceSave->swizzId; + NV_ASSERT_OR_RETURN(!kmigmgrIsSwizzIdInUse(pGpu, pKernelMIGManager, swizzId), + NV_ERR_INVALID_STATE); + *pSwizzId = swizzId; + } + + // + // HACK: GSP-RM updated the PCE-LCE mappings while setting MIG state. + // The Client-RM hasn't had an opportunity to refresh its mappings + // yet until the first gpu instance creation, so do it now. + // + if ((pKernelMIGManager->swizzIdInUseMask == 0x0) && IS_GSP_CLIENT(pGpu)) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrEnableAllLCEs(pGpu, pKernelMIGManager, NV_TRUE), invalidate); + } + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrSetGPUInstanceInfo(pGpu, pKernelMIGManager, *pSwizzId, params), invalidate); + + // Mark swizzId as "in-use" in cached mask + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrSetSwizzIdInUse(pGpu, pKernelMIGManager, *pSwizzId), invalidate); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager, *pSwizzId, &pKernelMIGGpuInstance), invalidate); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrAllocGPUInstanceHandles(pGpu, *pSwizzId, pKernelMIGGpuInstance), invalidate); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrInitGPUInstanceBufPools(pGpu, pKernelMIGManager, pKernelMIGGpuInstance), invalidate); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, + kmigmgrCreateGPUInstanceRunlists_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance), invalidate); + + NV_ASSERT_OK_OR_GOTO(rmStatus, + kmemsysInitMIGMemoryPartitionTable_HAL(pGpu, pKernelMemorySystem), invalidate); + + FOR_EACH_IN_BITVECTOR(&pKernelMIGGpuInstance->resourceAllocation.engines, engineType) + { + NvU32 engineIdx; + KernelGraphics *pKernelGraphics; + NvU32 localEngineType; + + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + continue; + + engineIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, engineIdx); + + NV_ASSERT_OK_OR_GOTO(rmStatus, + kmigmgrGetGlobalToLocalEngineType(pGpu, + pKernelMIGManager, + kmigmgrMakeGIReference(pKernelMIGGpuInstance), + engineType, + &localEngineType), + invalidate); + + fecsSetRoutingInfo(pGpu, + pKernelGraphics, + pKernelMIGGpuInstance->instanceHandles.hClient, + pKernelMIGGpuInstance->instanceHandles.hSubdevice, + NV2080_ENGINE_TYPE_GR_IDX(localEngineType)); + } + FOR_EACH_IN_BITVECTOR_END(); + + // Init gpu instance pool for page table mem + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrInitGPUInstancePool(pGpu, pKernelMIGManager, pKernelMIGGpuInstance), invalidate); + + // Init gpu instance scrubber + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrInitGPUInstanceScrubber(pGpu, pKernelMIGManager, pKernelMIGGpuInstance), invalidate); + + // + // Register gpu instance with the capability framework only if it explicitly + // requested. Otherwise, we rely on the persistent state. + // + if (bCreateCap) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + osRmCapRegisterSmcPartition(pGpu->pOsRmCaps, &pKernelMIGGpuInstance->pOsRmCaps, + pKernelMIGGpuInstance->swizzId), invalidate); + } + } + else + { + NV_PRINTF(LEVEL_INFO, "Invalidating swizzId - %d.\n", *pSwizzId); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrInvalidateGPUInstance(pGpu, pKernelMIGManager, *pSwizzId, NV_FALSE)); + } + + return rmStatus; + +invalidate: + kmigmgrInvalidateGPUInstance(pGpu, pKernelMIGManager, *pSwizzId, NV_FALSE); + + return rmStatus; +} + +/* + * @brief Init per-gpu instance memory pool so that memory for client page tables + * can be allocated from this memory pool + */ +NV_STATUS +kmigmgrInitGPUInstancePool_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + const GMMU_FMT *pFmt = kgmmuFmtGet(pKernelGmmu, GMMU_FMT_VERSION_DEFAULT, 0); + NvU32 version; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance != NULL, NV_ERR_INVALID_ARGUMENT); + + if (!memmgrIsPmaInitialized(pMemoryManager) || + !memmgrAreClientPageTablesPmaManaged(pMemoryManager)) + { + return NV_OK; + } + + if (!kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, pKernelMIGGpuInstance->swizzId)) + return NV_OK; + + NV_ASSERT_OR_RETURN(pFmt != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance->pMemoryPartitionHeap != NULL, NV_ERR_INVALID_STATE); + + version = ((pFmt->version == GMMU_FMT_VERSION_1) ? POOL_CONFIG_GMMU_FMT_1 : POOL_CONFIG_GMMU_FMT_2); + return rmMemPoolSetup((void*)&pKernelMIGGpuInstance->pMemoryPartitionHeap->pmaObject, &pKernelMIGGpuInstance->pPageTableMemPool, version); +} + +/* + * @brief Initializes ctx buf pools for runlist buffer and GR global ctx buffers + * for engines that belong to this gpu instance. + */ +NV_STATUS +kmigmgrInitGPUInstanceBufPools_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + Heap *pHeap; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance != NULL, NV_ERR_INVALID_ARGUMENT); + pHeap = pKernelMIGGpuInstance->pMemoryPartitionHeap; + NV_ASSERT_OR_RETURN(pHeap != NULL, NV_ERR_INVALID_STATE); + + if (!ctxBufPoolIsSupported(pGpu)) + return NV_OK; + + // + // We have to drop GPU lock before making allocations from PMA + // as RM allocations can trigger UVM evictions. + // However, in this case we can skip dropping GPU lock as gpu instance PMA + // isn't visible to UVM yet. + // This is just a sanity check to make sure this assumption is correct and + // allocation from PMA cannot trigger UVM evictions. + // + if (memmgrIsPmaInitialized(pMemoryManager)) + { + NvU64 freeSpace, totalSpace; + pmaGetFreeMemory(&pHeap->pmaObject, &freeSpace); + pmaGetTotalMemory(&pHeap->pmaObject, &totalSpace); + if (freeSpace != totalSpace) + { + NV_PRINTF(LEVEL_ERROR, "Assumption that PMA is empty at this time is broken\n"); + NV_PRINTF(LEVEL_ERROR, "free space = 0x%llx bytes total space = 0x%llx bytes\n", + freeSpace, totalSpace); + NV_PRINTF(LEVEL_ERROR, "This means PMA allocations may trigger UVM evictions at this point causing deadlocks!\n"); + return NV_ERR_INVALID_STATE; + } + } + + NV_ASSERT_OK_OR_RETURN(kmigmgrInitGPUInstanceRunlistBufPools(pGpu, pKernelMIGManager, pKernelMIGGpuInstance)); + NV_ASSERT_OK_OR_RETURN(kmigmgrInitGPUInstanceGrBufPools(pGpu, pKernelMIGManager, pKernelMIGGpuInstance)); + return NV_OK; +} + +/* + * Initializes the runlist buffer pools for engines that belong to this gpu instance + * Also reserves memory for runlist buffers into these pools. + * later, runlists will be allocated from these pools. + */ +NV_STATUS +kmigmgrInitGPUInstanceRunlistBufPools_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + NvU32 engineType; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + CTX_BUF_INFO runlistBufInfo[NUM_BUFFERS_PER_RUNLIST] = {0}; + NvU64 rlSize; + NvU64 rlAlign; + NvU32 swizzId; + NvU32 i; + NvU32 runlistId; + Heap *pHeap; + + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance != NULL, NV_ERR_INVALID_ARGUMENT); + swizzId = pKernelMIGGpuInstance->swizzId; + pHeap = pKernelMIGGpuInstance->pMemoryPartitionHeap; + NV_ASSERT_OR_RETURN(pHeap != NULL, NV_ERR_INVALID_STATE); + + if (!kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, swizzId)) + return NV_OK; + + for (engineType = 0; engineType < NV2080_ENGINE_TYPE_LAST; engineType++) + { + if (!NV2080_ENGINE_TYPE_IS_VALID(engineType) || + !kmigmgrIsEnginePartitionable(pGpu, pKernelMIGManager, engineType) || + !kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, engineType, kmigmgrMakeGIReference(pKernelMIGGpuInstance))) + { + continue; + } + + // Get runlist ID for Engine type. + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, + ENGINE_INFO_TYPE_NV2080, engineType, + ENGINE_INFO_TYPE_RUNLIST, &runlistId)); + + // + // ctx buf pools only support HW runlists today + // we assume TSGs are supported for all runlists which is true for Ampere + // + for (i = 0; i < NUM_BUFFERS_PER_RUNLIST; i++) + { + NV_ASSERT_OK_OR_RETURN(kfifoGetRunlistBufInfo(pGpu, pKernelFifo, runlistId, NV_TRUE, + 0, &rlSize, &rlAlign)); + runlistBufInfo[i].size = rlSize; + runlistBufInfo[i].align = rlAlign; + runlistBufInfo[i].attr = RM_ATTR_PAGE_SIZE_DEFAULT; + runlistBufInfo[i].bContig = NV_TRUE; + } + + NV_ASSERT_OK_OR_RETURN(ctxBufPoolInit(pGpu, pHeap, &pKernelFifo->pRunlistBufPool[engineType])); + NV_ASSERT_OR_RETURN(pKernelFifo->pRunlistBufPool[engineType] != NULL, NV_ERR_INVALID_STATE); + + // + // Skip scrubber for runlist buffer alloctions since gpu instance scrubber is not setup yet + // and it will be destroyed before deleting the runlist buffer pool. + // + ctxBufPoolSetScrubSkip(pKernelFifo->pRunlistBufPool[engineType], NV_TRUE); + NV_ASSERT_OK_OR_RETURN(ctxBufPoolReserve(pGpu, pKernelFifo->pRunlistBufPool[engineType], &runlistBufInfo[0], NUM_BUFFERS_PER_RUNLIST)); + } + + return NV_OK; +} + +/* + * @brief Initializes gr buffer pools for all GR engines that belong to this gpu instance + * Also reserves memory for global GR buffers into these pools. + */ +NV_STATUS +kmigmgrInitGPUInstanceGrBufPools_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance +) +{ + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + GR_GLOBALCTX_BUFFER bufId; + NvU32 bufCount; + CTX_BUF_INFO globalCtxBufInfo[GR_GLOBALCTX_BUFFER_COUNT]; + Heap *pHeap = NULL; + NV_STATUS rmStatus = NV_OK; + NvU32 engineType; + + NV_ASSERT_OR_RETURN(pKernelMIGGpuInstance != NULL, NV_ERR_INVALID_ARGUMENT); + pHeap = pKernelMIGGpuInstance->pMemoryPartitionHeap; + NV_ASSERT_OR_RETURN(pHeap != NULL, NV_ERR_INVALID_STATE); + + bufCount = 0; + FOR_EACH_IN_ENUM(GR_GLOBALCTX_BUFFER, bufId) + { + if (kgrmgrIsGlobalCtxBufSupported(bufId, NV_FALSE)) + { + const CTX_BUF_INFO *pBufInfo = kgrmgrGetGlobalCtxBufInfo(pGpu, pKernelGraphicsManager, bufId); + NV_ASSERT_OR_RETURN(pBufInfo != NULL, NV_ERR_INVALID_STATE); + + globalCtxBufInfo[bufCount] = *pBufInfo; + + if ((bufId == GR_GLOBALCTX_BUFFER_FECS_EVENT) || (bufId == GR_GLOBAL_BUFFER_GLOBAL_PRIV_ACCESS_MAP)) + { + globalCtxBufInfo[bufCount].bContig = NV_TRUE; + } + else if ((bufId == GR_GLOBALCTX_BUFFER_PRIV_ACCESS_MAP) || (bufId == GR_GLOBALCTX_BUFFER_UNRESTRICTED_PRIV_ACCESS_MAP)) + { + globalCtxBufInfo[bufCount].bContig = gpuIsClientRmAllocatedCtxBufferEnabled(pGpu); + } + kgrmgrSetGlobalCtxBufInfo(pGpu, pKernelGraphicsManager, bufId, + globalCtxBufInfo[bufCount].size, + globalCtxBufInfo[bufCount].align, + globalCtxBufInfo[bufCount].attr, + globalCtxBufInfo[bufCount].bContig); + bufCount++; + } + } + FOR_EACH_IN_ENUM_END; + + FOR_EACH_IN_BITVECTOR(&pKernelMIGGpuInstance->resourceAllocation.engines, engineType) + { + NvU32 engineIdx; + KernelGraphics *pKernelGraphics; + CTX_BUF_POOL_INFO *pGrCtxBufPool; + + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + continue; + + engineIdx = NV2080_ENGINE_TYPE_GR_IDX(engineType); + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, engineIdx); + + NV_ASSERT_OK_OR_GOTO(rmStatus, + kgraphicsInitCtxBufPool(pGpu, pKernelGraphics, pHeap), + failed); + + pGrCtxBufPool = kgraphicsGetCtxBufPool(pGpu, pKernelGraphics); + + if (pGrCtxBufPool == NULL) + { + rmStatus = NV_ERR_INVALID_STATE; + goto failed; + } + + // + // Skip scrubber for GR buffer alloctions since gpu instance scrubber is not setup yet + // and it will be destroyed before deleting the GR buffer pool. + // + ctxBufPoolSetScrubSkip(pGrCtxBufPool, NV_TRUE); + NV_ASSERT_OK_OR_GOTO( + rmStatus, + ctxBufPoolReserve(pGpu, pGrCtxBufPool, &globalCtxBufInfo[0], bufCount), + failed); + } + FOR_EACH_IN_BITVECTOR_END(); + + return NV_OK; + +failed: + kmigmgrDestroyGPUInstanceGrBufPools(pGpu, pKernelMIGManager, pKernelMIGGpuInstance); + return rmStatus; +} + +/*! + * @brief Save MIG instance topology to persistence, if available. + */ +NV_STATUS +kmigmgrSaveToPersistence_IMPL +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager +) +{ + GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY *pTopologySave = NULL; + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGPUInstance; + NvU32 gpcIdx; + NvU32 savedGIIdx; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, + gpumgrGetSystemMIGInstanceTopo(gpuGetDBDF(pGpu), &pTopologySave), + NV_OK); + + // Clear existing topology, if any. + portMemSet(pTopologySave->saveGI, 0, sizeof(pTopologySave->saveGI)); + + // If there are no instances then don't bother checking anything. + NV_CHECK_OR_RETURN(LEVEL_SILENT, IS_MIG_IN_USE(pGpu), NV_OK); + + savedGIIdx = 0; + FOR_EACH_VALID_GPU_INSTANCE(pGpu, pKernelMIGManager, pKernelMIGGPUInstance) + { + GPUMGR_SAVE_GPU_INSTANCE *pGPUInstanceSave = &pTopologySave->saveGI[savedGIIdx]; + + pGPUInstanceSave->bValid = NV_TRUE; + pGPUInstanceSave->swizzId = pKernelMIGGPUInstance->swizzId; + pGPUInstanceSave->pOsRmCaps = pKernelMIGGPUInstance->pOsRmCaps; + pGPUInstanceSave->giInfo.partitionFlags = pKernelMIGGPUInstance->partitionFlag; + bitVectorToRaw(&pKernelMIGGPUInstance->resourceAllocation.engines, + pGPUInstanceSave->giInfo.enginesMask, sizeof(pGPUInstanceSave->giInfo.enginesMask)); + for (gpcIdx = 0; gpcIdx < pKernelMIGGPUInstance->resourceAllocation.gpcCount; ++gpcIdx) + { + pGPUInstanceSave->giInfo.gpcMask |= NVBIT32(pKernelMIGGPUInstance->resourceAllocation.gpcIds[gpcIdx]); + } + pGPUInstanceSave->giInfo.veidOffset = pKernelMIGGPUInstance->resourceAllocation.veidOffset; + pGPUInstanceSave->giInfo.veidCount = pKernelMIGGPUInstance->resourceAllocation.veidCount; + + NV_ASSERT_OK_OR_RETURN(kmigmgrSaveComputeInstances(pGpu, pKernelMIGManager, pKernelMIGGPUInstance, + pGPUInstanceSave->saveCI)); + + ++savedGIIdx; + } + FOR_EACH_VALID_GPU_INSTANCE_END(); + + return NV_OK; +} + +// Control call for getting active gpu instance Ids +NV_STATUS +subdeviceCtrlCmdGpuGetActivePartitionIds_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ACTIVE_PARTITION_IDS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvU64 validSwizzIdMask; + + pParams->partitionCount = 0; + + ct_assert(NV2080_CTRL_GPU_MAX_PARTITIONS == KMIGMGR_MAX_GPU_INSTANCES); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if ((pKernelMIGManager == NULL) || !pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED)) + { + NV_PRINTF(LEVEL_INFO, "MIG not supported on this GPU.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if (!IS_MIG_ENABLED(pGpu)) + { + NV_PRINTF(LEVEL_INFO, "MIG Mode has not been turned on.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + // + // We can always have device_monitoring swizzID available in system even without + // GPU split into MIG instances + // + pParams->swizzId[pParams->partitionCount++] = NVC637_DEVICE_LEVEL_SWIZZID; + + // Populate all active swizzIDs + validSwizzIdMask = pKernelMIGManager->swizzIdInUseMask; + while(validSwizzIdMask != 0x0) + { + pParams->swizzId[pParams->partitionCount] = portUtilCountTrailingZeros64(validSwizzIdMask); + validSwizzIdMask &= ~NVBIT64(pParams->swizzId[pParams->partitionCount]); + pParams->partitionCount++; + } + + return NV_OK; +} + +// +// Control call to determine the number of gpu instances of the given size which +// can still be created, given the current configuration of the GPU. +// +NV_STATUS +subdeviceCtrlCmdGpuGetPartitionCapacity_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_PARTITION_CAPACITY_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_CHECK_OR_RETURN(LEVEL_INFO, IS_MIG_ENABLED(pGpu), NV_ERR_NOT_SUPPORTED); + + if (IS_VIRTUAL(pGpu)) + { + // This is not supported in legacy MIG vGPU policy + if (kmigmgrUseLegacyVgpuPolicy(pGpu, pKernelMIGManager)) + return NV_ERR_NOT_SUPPORTED; + + if (!pParams->bStaticInfo) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + // Only expose current capacity to admins or capable clients. + if (!rmclientIsCapableOrAdminByHandle(hClient, + NV_RM_CAP_SYS_SMC_CONFIG, + pCallContext->secInfo.privLevel)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + if (!kmigmgrIsGPUInstanceCombinationValid_HAL(pGpu, pKernelMIGManager, pParams->partitionFlag) || + !FLD_TEST_DRF(2080_CTRL_GPU, _PARTITION_FLAG, _COMPUTE_SIZE, _FULL, pParams->partitionFlag)) + { + pParams->partitionCount = 0; + pParams->availableSpansCount = 0; + } + else + { + if (IS_MIG_IN_USE(pGpu)) + { + pParams->partitionCount = 0; + pParams->availableSpansCount = 0; + } + else + { + pParams->partitionCount = 1; + pParams->availableSpansCount = 1; + pParams->availableSpans[0].lo = NV_RANGE_EMPTY.lo; + pParams->availableSpans[0].hi = NV_RANGE_EMPTY.hi; + } + } + } + + if (!kmigmgrIsGPUInstanceCombinationValid_HAL(pGpu, pKernelMIGManager, pParams->partitionFlag) || + !FLD_TEST_DRF(2080_CTRL_GPU, _PARTITION_FLAG, _COMPUTE_SIZE, _FULL, pParams->partitionFlag)) + { + pParams->totalPartitionCount = 0; + pParams->totalSpansCount = 0; + } + else + { + pParams->totalPartitionCount = 1; + pParams->totalSpansCount = 1; + pParams->totalSpans[0].lo = NV_RANGE_EMPTY.lo; + pParams->totalSpans[0].hi = NV_RANGE_EMPTY.hi; + } + + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; + + return status; +} + +// +// Control call to provide information about gpu instances which can be created on +// this GPU. +// +NV_STATUS +subdeviceCtrlCmdGpuDescribePartitions_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_DESCRIBE_PARTITIONS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED)) + { + NV_PRINTF(LEVEL_INFO, "MIG not supported on this GPU.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if (!IS_MIG_ENABLED(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "Entered MIG API with MIG disabled.\n"); + } + + return kmigmgrDescribeGPUInstances(pGpu, pKernelMIGManager, pParams); +} + +// +// Control call to set the global partitioning mode for this GPU. This call may +// require a PF-FLR to be performed on the GPU before work may be submitted on +// the GPU. +// +NV_STATUS +subdeviceCtrlCmdGpuSetPartitioningMode_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_SET_PARTITIONING_MODE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (IS_VIRTUAL(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + if ((pKernelMIGManager == NULL) || !kmigmgrIsMIGSupported(pGpu, pKernelMIGManager)) + { + NV_PRINTF(LEVEL_INFO, "MIG not supported on this GPU.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_PARTITIONING_MODE, + pParams, + sizeof(*pParams))); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrSetPartitioningMode(pGpu, pKernelMIGManager)); + + return NV_OK; +} + +/*! + * @brief Process a single request to create / destroy a gpu instance. + * Handles enabling / disabling MIG mode on entry/exit. + */ +static NV_STATUS +_kmigmgrProcessGPUInstanceEntry +( + OBJGPU *pGpu, + KernelMIGManager *pKernelMIGManager, + NV2080_CTRL_GPU_SET_PARTITION_INFO *pEntry +) +{ + NV_STATUS status = NV_OK; + NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS *pParams = portMemAllocNonPaged(sizeof(*pParams)); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pParams != NULL, NV_ERR_NO_MEMORY); + + pParams->partitionCount = 1; + pParams->partitionInfo[0] = *pEntry; + + // + // Mirrored GPU Instance Management: + // 1: CPU enable MIG + // 2: GSP enable MIG + // 3: GSP create gpu instance + // 4: CPU create gpu instance + // 5: CPU delete gpu instance + // 6: GSP delete gpu instance + // 7: GSP disable MIG + // 8: CPU disable MIG + // + + // Step 1, 2: If this is the first gpu instance, enable MIG + if (pEntry->bValid && (pKernelMIGManager->swizzIdInUseMask == 0x0)) + { + NvBool bMemoryPartitioningRequested = kmigmgrIsMemoryPartitioningRequested_HAL(pGpu, pKernelMIGManager, pEntry->partitionFlag); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrSetMIGState(pGpu, pKernelMIGManager, bMemoryPartitioningRequested, NV_TRUE, NV_FALSE), + cleanup_params); + } + + if (pEntry->bValid) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_GPU_INSTANCES, + pParams, + sizeof(*pParams)), + cleanup_smc_state); + pEntry->swizzId = pParams->partitionInfo[0].swizzId; + } + + if (IS_GSP_CLIENT(pGpu)) + { + KMIGMGR_CREATE_GPU_INSTANCE_PARAMS request = + { + .type = KMIGMGR_CREATE_GPU_INSTANCE_PARAMS_TYPE_REQUEST, + .inst.request.partitionFlag = pEntry->partitionFlag, + .inst.request.bUsePlacement = + FLD_TEST_REF(NV2080_CTRL_GPU_PARTITION_FLAG_PLACE_AT_SPAN, _ENABLE, + pEntry->partitionFlag), + .inst.request.placement = rangeMake(pEntry->placement.lo, pEntry->placement.hi) + }; + request.inst.request.partitionFlag = FLD_SET_DRF(2080_CTRL_GPU, _PARTITION_FLAG, _PLACE_AT_SPAN, _DISABLE, + request.inst.request.partitionFlag); + + // Step 3, 4, 5, 6: Create / delete gpu instance + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrCreateGPUInstance(pGpu, pKernelMIGManager, &pEntry->swizzId, request, pEntry->bValid, + NV_TRUE /* create MIG capabilities */), + cleanup_rpc); + } + + if (!pEntry->bValid) + { + NV_ASSERT_OK_OR_GOTO(status, + pRmApi->Control(pRmApi, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_GPU_INSTANCES, + pParams, + sizeof(*pParams)), + cleanup_params); + } + + // Step 7, 8: If this is the last gpu instance to go, disable MIG + if (pKernelMIGManager->swizzIdInUseMask == 0x0) + { + NvBool bMemoryPartitioningNeeded = kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, pParams->partitionInfo[0].swizzId); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrSetMIGState(pGpu, pKernelMIGManager, bMemoryPartitioningNeeded, NV_FALSE, NV_FALSE), + cleanup_params); + } + + portMemFree(pParams); + return status; + +cleanup_rpc: + if (pEntry->bValid) + { + // Reuse the same RPC information we prepared earlier, but flip the bValid bit + pParams->partitionInfo[0].bValid = NV_FALSE; + NV_ASSERT_OK(pRmApi->Control(pRmApi, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_GPU_INSTANCES, + pParams, + sizeof(*pParams))); + } + +cleanup_smc_state: + if (pEntry->bValid && (pKernelMIGManager->swizzIdInUseMask == 0x0)) + { + NvBool bMemoryPartitioningRequested = kmigmgrIsMemoryPartitioningRequested_HAL(pGpu, pKernelMIGManager, pEntry->partitionFlag); + + NV_ASSERT_OK( + kmigmgrSetMIGState(pGpu, pKernelMIGManager, bMemoryPartitioningRequested, NV_FALSE, NV_FALSE)); + } + +cleanup_params: + portMemFree(pParams); + return status; +} + +/*! + * @brief Control call for dividing GPU into requested gpu instances + * + * @returns NV_OK if successful. + * NV_ERR_INVALID_ARGUMENT if parameter is not found + * NV_ERR_NOT_SUPPORTED if parameter is not supported + * + */ +NV_STATUS +subdeviceCtrlCmdGpuSetPartitions_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS *pParams +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 i; + NvU32 j; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (!rmclientIsCapableOrAdminByHandle(hClient, + NV_RM_CAP_SYS_SMC_CONFIG, + pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_ERROR, "Non-privileged context issued privileged cmd\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + NV_CHECK_OR_RETURN(LEVEL_INFO, IS_MIG_ENABLED(pGpu), NV_ERR_NOT_SUPPORTED); + + // Sanity checks + if (pParams->partitionCount > KMIGMGR_MAX_GPU_INSTANCES) + { + return NV_ERR_INVALID_ARGUMENT; + } + else if (0 == pParams->partitionCount) + { + return NV_WARN_NOTHING_TO_DO; + } + + for (i = 0; i < pParams->partitionCount; i++) + { + if (pParams->partitionInfo[i].bValid) + { + NvU32 partitionFlag = FLD_SET_DRF(2080_CTRL_GPU, _PARTITION_FLAG, _PLACE_AT_SPAN, _DISABLE, + pParams->partitionInfo[i].partitionFlag); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + kmigmgrIsGPUInstanceCombinationValid_HAL(pGpu, pKernelMIGManager, partitionFlag), + NV_ERR_NOT_SUPPORTED); + } + } + + // This is not supported in vGPU + if (IS_VIRTUAL(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + for (i = 0; i < pParams->partitionCount; i++) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_INFO, + _kmigmgrProcessGPUInstanceEntry(pGpu, pKernelMIGManager, &pParams->partitionInfo[i]), + cleanup); + } + + // + // Generate a subdevice event stating something has changed in GPU instance + // config. Clients currently do not care about changes and their scope + // + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_SMC_CONFIG_UPDATE, NULL, 0, 0, 0); + + return rmStatus; + +cleanup: + // Invalidate gpu instances which has been created + for (j = 0; j < i; j++) + { + pParams->partitionInfo[i].bValid = !pParams->partitionInfo[i].bValid; + NV_ASSERT_OK( + _kmigmgrProcessGPUInstanceEntry(pGpu, pKernelMIGManager, &pParams->partitionInfo[i])); + pParams->partitionInfo[i].bValid = !pParams->partitionInfo[i].bValid; + } + + return rmStatus; +} + +// Control call for getting specific gpu instance info +NV_STATUS +subdeviceCtrlCmdGpuGetPartitions_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS *pParams +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 i; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + NvU64 validSwizzIdMask; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NV2080_CTRL_GPU_GET_PARTITIONS_PARAMS rpcParams = *pParams; + + ct_assert(NV2080_CTRL_GPU_MAX_PARTITIONS == KMIGMGR_MAX_GPU_INSTANCES); + ct_assert(NV2080_CTRL_GPU_MAX_GPC_PER_SMC == KGRMGR_MAX_GPC); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (!IS_VIRTUAL(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + pRmApi->Control(pRmApi, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + NV2080_CTRL_CMD_INTERNAL_MIGMGR_GET_GPU_INSTANCES, + &rpcParams, + sizeof(rpcParams))); + } + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED)) + { + NV_PRINTF(LEVEL_INFO, "MIG not supported on this GPU.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if (!IS_MIG_ENABLED(pGpu)) + NV_PRINTF(LEVEL_INFO, "Entered MIG API with MIG disabled.\n"); + + if (!IS_MIG_IN_USE(pGpu)) + { + // set the valid gpu instance count to "0" and return + pParams->validPartitionCount = 0; + return NV_OK; + } + + // See if all gpu instances are requested and get info for all gpu instance + if (pParams->bGetAllPartitionInfo) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (!rmclientIsCapableOrAdminByHandle(hClient, + NV_RM_CAP_SYS_SMC_CONFIG, + pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_ERROR, + "Non privileged client requesting global gpu instance info\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Take all swizzId's for consideration + validSwizzIdMask = pKernelMIGManager->swizzIdInUseMask; + } + else + { + rmStatus = kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref); + if (rmStatus != NV_OK) + { + // set the valid gpu instance count to "0" and return + pParams->validPartitionCount = 0; + return NV_OK; + } + + validSwizzIdMask = NVBIT64(ref.pKernelMIGGpuInstance->swizzId); + } + + pParams->validPartitionCount = 0; + for (i = 0; i < KMIGMGR_MAX_GPU_INSTANCES; i++) + { + MIG_RESOURCE_ALLOCATION *pResourceAllocation; + NvU32 swizzId = portUtilCountTrailingZeros64(validSwizzIdMask); + NvU32 j; + NvU32 engineType; + + rmStatus = kmigmgrGetGPUInstanceInfo(pGpu, pKernelMIGManager, swizzId, &ref.pKernelMIGGpuInstance); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to get gpu instance info for swizzId - %d\n", + swizzId); + return rmStatus; + } + + pResourceAllocation = &ref.pKernelMIGGpuInstance->resourceAllocation; + + pParams->queryPartitionInfo[i].partitionFlag = ref.pKernelMIGGpuInstance->partitionFlag; + pParams->queryPartitionInfo[i].swizzId = ref.pKernelMIGGpuInstance->swizzId; + pParams->queryPartitionInfo[i].grEngCount = + kmigmgrCountEnginesOfType(&pResourceAllocation->engines, NV2080_ENGINE_TYPE_GR(0)); + pParams->queryPartitionInfo[i].smCount = ref.pKernelMIGGpuInstance->pProfile->smCount; + pParams->queryPartitionInfo[i].veidCount = pResourceAllocation->veidCount; + pParams->queryPartitionInfo[i].ceCount = + kmigmgrCountEnginesOfType(&pResourceAllocation->engines, NV2080_ENGINE_TYPE_COPY(0)); + pParams->queryPartitionInfo[i].gpcCount = pResourceAllocation->gpcCount; + pParams->queryPartitionInfo[i].nvDecCount = + kmigmgrCountEnginesOfType(&pResourceAllocation->engines, NV2080_ENGINE_TYPE_NVDEC(0)); + pParams->queryPartitionInfo[i].nvEncCount = + kmigmgrCountEnginesOfType(&pResourceAllocation->engines, NV2080_ENGINE_TYPE_NVENC(0)); + pParams->queryPartitionInfo[i].nvJpgCount = + kmigmgrCountEnginesOfType(&pResourceAllocation->engines, NV2080_ENGINE_TYPE_NVJPG); + pParams->queryPartitionInfo[i].nvOfaCount = + kmigmgrCountEnginesOfType(&pResourceAllocation->engines, NV2080_ENGINE_TYPE_OFA); + pParams->queryPartitionInfo[i].memSize = rangeLength(ref.pKernelMIGGpuInstance->memRange); + pParams->queryPartitionInfo[i].bValid = NV_TRUE; + + { + NV_ASSERT_OR_RETURN(rpcParams.queryPartitionInfo[i].bValid, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN( + pParams->queryPartitionInfo[i].swizzId == rpcParams.queryPartitionInfo[i].swizzId, + NV_ERR_INVALID_STATE); + + // Fill GPCs associated with every GR + j = 0; + FOR_EACH_IN_BITVECTOR(&pResourceAllocation->engines, engineType) + { + if (!NV2080_ENGINE_TYPE_IS_GR(engineType)) + continue; + + pParams->queryPartitionInfo[i].gpcsPerGr[j] = rpcParams.queryPartitionInfo[i].gpcsPerGr[j]; + pParams->queryPartitionInfo[i].veidsPerGr[j] = rpcParams.queryPartitionInfo[i].veidsPerGr[j]; + + j++; + } + FOR_EACH_IN_BITVECTOR_END(); + + // Take the value provided by physical + pParams->queryPartitionInfo[i].bPartitionError = rpcParams.queryPartitionInfo[i].bPartitionError; + pParams->queryPartitionInfo[i].span = rpcParams.queryPartitionInfo[i].span; + } + + ++pParams->validPartitionCount; + + validSwizzIdMask &= ~NVBIT64(swizzId); + if (validSwizzIdMask == 0) + { + break; + } + } + + return rmStatus; +} + +NV_STATUS +subdeviceCtrlCmdInternalKMIGmgrExportGPUInstance_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + // No gpu instances to export + if (!IS_MIG_IN_USE(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + // An unprivileged client has no use case for import/export + if (!rmclientIsCapableOrAdminByHandle(RES_GET_CLIENT_HANDLE(pSubdevice), + NV_RM_CAP_SYS_SMC_CONFIG, + pCallContext->secInfo.privLevel)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Guest RM does not support import/export + if (IS_VIRTUAL(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MIGMGR_EXPORT_GPU_INSTANCE, + pParams, + sizeof(*pParams))); + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdInternalKMIGmgrImportGPUInstance_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_INTERNAL_KMIGMGR_IMPORT_EXPORT_GPU_INSTANCE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_MIG_SUPPORTED)) + return NV_ERR_NOT_SUPPORTED; + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + // An unprivileged client has no use case for import/export + if (!rmclientIsCapableOrAdminByHandle(RES_GET_CLIENT_HANDLE(pSubdevice), + NV_RM_CAP_SYS_SMC_CONFIG, + pCallContext->secInfo.privLevel)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Guest RM does not support import/export + if (IS_VIRTUAL(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (kmigmgrGetSwizzIdInUseMask(pGpu, pKernelMIGManager) == 0x0) + { + NvBool bMemoryPartitioningNeeded = kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, pParams->swizzId); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrSetMIGState(pGpu, GPU_GET_KERNEL_MIG_MANAGER(pGpu), bMemoryPartitioningNeeded, NV_TRUE, NV_FALSE)); + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MIGMGR_IMPORT_GPU_INSTANCE, + pParams, + sizeof(*pParams)), + cleanup_mig_state); + + if (IS_GSP_CLIENT(pGpu)) + { + struct GPUMGR_SAVE_GPU_INSTANCE save; + KMIGMGR_CREATE_GPU_INSTANCE_PARAMS restore = + { + .type = KMIGMGR_CREATE_GPU_INSTANCE_PARAMS_TYPE_RESTORE, + .inst.restore.pGPUInstanceSave = &save, + }; + save.bValid = NV_TRUE; + save.swizzId = pParams->swizzId; + save.pOsRmCaps = NULL; + portMemCopy(&save.giInfo, sizeof(save.giInfo), &pParams->info, sizeof(pParams->info)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kmigmgrCreateGPUInstance(pGpu, pKernelMIGManager, &pParams->swizzId, restore, NV_TRUE, NV_FALSE), + cleanup_rpc); + } + + return NV_OK; + +cleanup_rpc: + { + NV2080_CTRL_GPU_SET_PARTITIONS_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.partitionCount = 1; + params.partitionInfo[0].bValid = NV_FALSE; + params.partitionInfo[0].swizzId = pParams->swizzId; + + NV_ASSERT_OK( + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MIGMGR_SET_GPU_INSTANCES, + pParams, + sizeof(*pParams))); + } + +cleanup_mig_state: + if (kmigmgrGetSwizzIdInUseMask(pGpu, pKernelMIGManager) == 0x0) + { + NvBool bMemoryPartitioningNeeded = kmigmgrIsMemoryPartitioningNeeded_HAL(pGpu, pKernelMIGManager, pParams->swizzId); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrSetMIGState(pGpu, GPU_GET_KERNEL_MIG_MANAGER(pGpu), bMemoryPartitioningNeeded, NV_FALSE, NV_FALSE)); + } + + return status; +} + diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/mig_config_session.c b/src/nvidia/src/kernel/gpu/mig_mgr/mig_config_session.c new file mode 100644 index 000000000..14ab815a8 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mig_mgr/mig_config_session.c @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * File: mig_config_session.c + * + * Description: + * Upon successful allocation of this class, a client is granted + * permission to invoke the privileged GPU instance configuration + * control calls. + * + *****************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "kernel/gpu/mig_mgr/mig_config_session.h" +#include "class/clc639.h" +#include "os/os.h" +#include "rmapi/client.h" + +NV_STATUS +migconfigsessionConstruct_IMPL +( + MIGConfigSession *pMIGConfigSession, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + NVC639_ALLOCATION_PARAMETERS *pUserParams = pRmAllocParams->pAllocParams; + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvHandle hClient = pCallContext->pClient->hClient; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + osRmCapInitDescriptor(&pMIGConfigSession->dupedCapDescriptor); + + status = osRmCapAcquire(pSys->pOsRmCaps, + NV_RM_CAP_SYS_SMC_CONFIG, + pUserParams->capDescriptor, + &pMIGConfigSession->dupedCapDescriptor); + + // + // On platforms where capability isn't implemented, + // enforce the admin-only check. + // + if (status == NV_ERR_NOT_SUPPORTED) + { + if (!rmclientIsAdminByHandle(hClient, pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_ERROR, "insufficient permissions\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + else if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Capability validation failed\n"); + return status; + } + + return NV_OK; +} + +void +migconfigsessionDestruct_IMPL +( + MIGConfigSession *pMIGConfigSession +) +{ + osRmCapRelease(pMIGConfigSession->dupedCapDescriptor); +} + diff --git a/src/nvidia/src/kernel/gpu/mig_mgr/mig_monitor_session.c b/src/nvidia/src/kernel/gpu/mig_mgr/mig_monitor_session.c new file mode 100644 index 000000000..f61a91724 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mig_mgr/mig_monitor_session.c @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * File: mig_monitor_session.c + * + * Description: + * Upon successful allocation of this class, a client is granted + * permission to query information across the GPU instances + * irrespective of per-instance MIG subscriptions + * + *****************************************************************************/ + +#include "core/core.h" +#include "kernel/gpu/mig_mgr/mig_monitor_session.h" +#include "class/clc640.h" +#include "os/os.h" +#include "rmapi/client.h" +#include "core/system.h" + +NV_STATUS +migmonitorsessionConstruct_IMPL +( + MIGMonitorSession *pMIGMonitorSession, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + NVC640_ALLOCATION_PARAMETERS *pUserParams = pRmAllocParams->pAllocParams; + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvHandle hClient = pCallContext->pClient->hClient; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + osRmCapInitDescriptor(&pMIGMonitorSession->dupedCapDescriptor); + + status = osRmCapAcquire(pSys->pOsRmCaps, + NV_RM_CAP_SYS_SMC_MONITOR, + pUserParams->capDescriptor, + &pMIGMonitorSession->dupedCapDescriptor); + + // + // On platforms where capability isn't implemented, + // enforce the admin-only check. + // + if (status == NV_ERR_NOT_SUPPORTED) + { + if (!rmclientIsAdminByHandle(hClient, pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_ERROR, "insufficient permissions\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + else if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Capability validation failed\n"); + return status; + } + + return NV_OK; +} + +void +migmonitorsessionDestruct_IMPL +( + MIGMonitorSession *pMIGMonitorSession +) +{ + osRmCapRelease(pMIGMonitorSession->dupedCapDescriptor); +} + diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/ampere/kern_gmmu_fmt_ga10x.c b/src/nvidia/src/kernel/gpu/mmu/arch/ampere/kern_gmmu_fmt_ga10x.c new file mode 100644 index 000000000..449eff421 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/ampere/kern_gmmu_fmt_ga10x.c @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mmu/kern_gmmu.h" +#include "mmu/gmmu_fmt.h" + +/*! + * PD3 [48:47] + * | + * v + * PD2 [46:38] + * | + * v + * PD1 [37:29] / PT_512M [37:29] (512MB page) + * | + * v + * PD0 [28:21] / PT_HUGE [28:21] (2MB page) + * | \ + * | \ + * v v + * PT_SMALL PT_BIG (64KB page) + * [20:12] [20:16] + */ +void kgmmuFmtInitLevels_GA10X(KernelGmmu *pKernelGmmu, + MMU_FMT_LEVEL *pLevels, + const NvU32 numLevels, + const NvU32 version, + const NvU32 bigPageShift) +{ + kgmmuFmtInitLevels_GP10X(pKernelGmmu, pLevels, numLevels, version, bigPageShift); + + // Page directory 1 can now hold a PTE pointing to a 512MB Page + pLevels[2].bPageTable = NV_TRUE; +} diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/ampere/kern_gmmu_ga100.c b/src/nvidia/src/kernel/gpu/mmu/arch/ampere/kern_gmmu_ga100.c new file mode 100644 index 000000000..a3cf631d8 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/ampere/kern_gmmu_ga100.c @@ -0,0 +1,332 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/nvlink/kernel_nvlink.h" + +#include "published/ampere/ga100/dev_vm.h" + +/*! + * @brief Sets the Invalidation scope field in the register + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * @param[in] flags + * @param[in/out] TLB_INVALIDATE_PARAMS pointer + * + * @returns NV_ERR_INVALID_ARGUMENT on input validation + * NV_OK on success + */ +NV_STATUS +kgmmuSetTlbInvalidationScope_GA100 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 flags, + TLB_INVALIDATE_PARAMS *pParams +) +{ + switch(flags) + { + case NV_GMMU_INVAL_SCOPE_ALL_TLBS: + pParams->regVal = FLD_SET_DRF(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE, _INVAL_SCOPE, + _ALL_TLBS, pParams->regVal); + break; + case NV_GMMU_INVAL_SCOPE_LINK_TLBS: + pParams->regVal = FLD_SET_DRF(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE, _INVAL_SCOPE, + _LINK_TLBS, pParams->regVal); + break; + case NV_GMMU_INVAL_SCOPE_NON_LINK_TLBS: + pParams->regVal = FLD_SET_DRF(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE, _INVAL_SCOPE, + _NON_LINK_TLBS, pParams->regVal); + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +/*! + * @brief Validates fabric base address. + * + * @param pKernelGmmu + * @param fabricBaseAddr + * + * @returns On success, NV_OK. + * On failure, returns NV_ERR_XXX. + */ +NV_STATUS +kgmmuValidateFabricBaseAddress_GA100 +( + KernelGmmu *pKernelGmmu, + NvU64 fabricBaseAddr +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelGmmu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 fbSizeBytes; + + fbSizeBytes = pMemoryManager->Ram.fbTotalMemSizeMb << 20; + + // + // Ampere SKUs will be paired with NVSwitches (Limerock) supporting 2K + // mapslots that can cover 64GB each. Make sure that the fabric base + // address being used is valid to cover whole frame buffer. + // + + // Check if fabric address is aligned to mapslot size. + if (fabricBaseAddr & (NVBIT64(36) - 1)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Align fbSize to mapslot size. + fbSizeBytes = RM_ALIGN_UP(fbSizeBytes, NVBIT64(36)); + + // Make sure the address range doesn't go beyond the limit, (2K * 64GB). + if ((fabricBaseAddr + fbSizeBytes) > NVBIT64(47)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NV_STATUS +kgmmuSetupWarForBug2720120_GA100 +( + KernelGmmu *pKernelGmmu, + GMMU_FMT_FAMILY *pFam +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pKernelGmmu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + const GMMU_FMT *pFmt = kgmmuFmtGet(pKernelGmmu, GMMU_FMT_VERSION_DEFAULT, 0); + const MMU_FMT_LEVEL *pPageDir1 = mmuFmtFindLevelWithPageShift(pFmt->pRoot, 29); + const MMU_FMT_LEVEL *pPageDir0 = mmuFmtFindLevelWithPageShift(pFmt->pRoot, 21); + const MMU_FMT_LEVEL *pSmallPT = mmuFmtFindLevelWithPageShift(pFmt->pRoot, 12); + const GMMU_FMT_PDE *pPde0Fmt = gmmuFmtGetPde(pFmt, pPageDir0, 1); + const GMMU_FMT_PDE *pPde1Fmt = gmmuFmtGetPde(pFmt, pPageDir1, 0); + NvU8 *pMap = NULL; + void *pPriv = NULL; + NvU32 sizeOfDWord = sizeof(NvU32); + RmPhysAddr physAddr; + RmPhysAddr physAddrOrig; + NvU64 sizeInDWord; + NvU32 bar0Addr; + NvU32 entryIndex; + NvU32 entryIndexHi; + NvU32 entryOffset; + + // + // BAR2 is not yet initialized. Thus use either the BAR0 window or + // memmap to initialize the given surface. + // + NV_ASSERT(pKernelBus->virtualBar2[GPU_GFID_PF].pCpuMapping == NULL); + + // Initialize the memdescs to NULL before use + pKernelGmmu->pWarSmallPageTable = NULL; + pKernelGmmu->pWarPageDirectory0 = NULL; + + // Bug 2720120: Allocate a small page table consisting of all invalid entries + NV_ASSERT_OK_OR_RETURN(memdescCreate(&pKernelGmmu->pWarSmallPageTable, pGpu, + mmuFmtLevelSize(pSmallPT), + RM_PAGE_SIZE, NV_TRUE, + kgmmuGetPTEAperture(pKernelGmmu), + kgmmuGetPTEAttr(pKernelGmmu), 0)); + + NV_ASSERT_OK_OR_GOTO(status, memdescAlloc(pKernelGmmu->pWarSmallPageTable), failed); + + switch (memdescGetAddressSpace(pKernelGmmu->pWarSmallPageTable)) + { + case ADDR_FBMEM: + memUtilsMemSetNoBAR2(pGpu, pKernelGmmu->pWarSmallPageTable, 0); + break; + + case ADDR_SYSMEM: + // Plain old memmap. + NV_ASSERT_OK_OR_GOTO(status, memdescMapOld(pKernelGmmu->pWarSmallPageTable, 0, + pKernelGmmu->pWarSmallPageTable->Size, + NV_TRUE, // kernel, + NV_PROTECT_READ_WRITE, + (void **)&pMap, + &pPriv), failed); + + portMemSet(pMap, 0, pKernelGmmu->pWarSmallPageTable->Size); + + memdescUnmapOld(pKernelGmmu->pWarSmallPageTable, 1, 0, pMap, pPriv); + break; + + default: + // Should not happen. + status = NV_ERR_INVALID_ARGUMENT; + NV_ASSERT_OR_GOTO(status == NV_OK, failed); + break; + } + + // The WAR PDE0 points to the small page table allocated above + { + const GMMU_APERTURE aperture = kgmmuGetMemAperture(pKernelGmmu, pKernelGmmu->pWarSmallPageTable); + + nvFieldSetBool(&pPde0Fmt->fldVolatile, + memdescGetVolatility(pKernelGmmu->pWarSmallPageTable), + pFam->bug2720120WarPde0.v8); + gmmuFieldSetAperture(&pPde0Fmt->fldAperture, aperture, + pFam->bug2720120WarPde0.v8); + gmmuFieldSetAddress(gmmuFmtPdePhysAddrFld(pPde0Fmt, aperture), + kgmmuEncodePhysAddr(pKernelGmmu, aperture, + memdescGetPhysAddr(pKernelGmmu->pWarSmallPageTable, + AT_GPU, 0), + NVLINK_INVALID_FABRIC_ADDR), + pFam->bug2720120WarPde0.v8); + } + + // + // Bug 2720120: Allocate a PD0 instance all of whose entries point to + // the small page table allocated above + // + NV_ASSERT_OK_OR_GOTO(status, memdescCreate(&pKernelGmmu->pWarPageDirectory0, + pGpu, mmuFmtLevelSize(pPageDir0), + RM_PAGE_SIZE, NV_TRUE, + kgmmuGetPTEAperture(pKernelGmmu), + kgmmuGetPTEAttr(pKernelGmmu), 0), failed); + + NV_ASSERT_OK_OR_GOTO(status, memdescAlloc(pKernelGmmu->pWarPageDirectory0), failed); + + entryIndexHi = mmuFmtLevelEntryCount(pPageDir0) - 1; + switch (memdescGetAddressSpace(pKernelGmmu->pWarPageDirectory0)) + { + case ADDR_FBMEM: + // + // Set the BAR0 window to encompass the given surface while + // saving off the location to where the BAR0 window was + // previously pointing. + // + physAddr = memdescGetPhysAddr(pKernelGmmu->pWarPageDirectory0, AT_GPU, 0); + NV_ASSERT_OR_GOTO(NV_IS_ALIGNED64(physAddr, sizeOfDWord), failed); + + physAddrOrig = kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus); + NV_ASSERT_OK_OR_GOTO(status, + kbusSetBAR0WindowVidOffset_HAL(pGpu, + pKernelBus, + physAddr & ~0xffffULL), + failed); + + bar0Addr = NvU64_LO32(kbusGetBAR0WindowAddress_HAL(pKernelBus) + + (physAddr - kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus))); + + // + // Iterate and initialize the given surface with BAR0 + // writes. + // + sizeInDWord = (NvU32)NV_DIV_AND_CEIL(pPageDir0->entrySize, sizeOfDWord); + for (entryIndex = 0; entryIndex <= entryIndexHi; entryIndex++) + { + entryOffset = entryIndex * pPageDir0->entrySize; + NvU32 i; + for (i = 0; i < sizeInDWord; i++) + { + GPU_REG_WR32(pGpu, + bar0Addr + entryOffset + (sizeOfDWord * i), + pFam->bug2720120WarPde0.v32[i]); + } + } + + // Restore where the BAR0 window was previously pointing to + NV_ASSERT_OK_OR_GOTO(status, + kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, + physAddrOrig), + failed); + + break; + + case ADDR_SYSMEM: + // Plain old memmap. + NV_ASSERT_OK_OR_GOTO(status, memdescMapOld(pKernelGmmu->pWarPageDirectory0, 0, + pKernelGmmu->pWarPageDirectory0->Size, + NV_TRUE, // kernel, + NV_PROTECT_READ_WRITE, + (void **)&pMap, + &pPriv), failed); + + for (entryIndex = 0; entryIndex <= entryIndexHi; entryIndex++) + { + entryOffset = entryIndex * pPageDir0->entrySize; + + // Memory-mapped write. + portMemCopy(pMap + entryOffset, + pPageDir0->entrySize, + pFam->bug2720120WarPde0.v8, + pPageDir0->entrySize); + } + + memdescUnmapOld(pKernelGmmu->pWarPageDirectory0, 1, 0, pMap, pPriv); + break; + + default: + // Should not happen. + status = NV_ERR_INVALID_ARGUMENT; + NV_ASSERT_OR_GOTO(status == NV_OK, failed); + break; + } + + // The WAR PDE1 points to the PD0 instance allocated above + { + const GMMU_APERTURE aperture = kgmmuGetMemAperture(pKernelGmmu, pKernelGmmu->pWarPageDirectory0); + + nvFieldSetBool(&pPde1Fmt->fldVolatile, + memdescGetVolatility(pKernelGmmu->pWarPageDirectory0), + pFam->bug2720120WarPde1.v8); + gmmuFieldSetAperture(&pPde1Fmt->fldAperture, aperture, + pFam->bug2720120WarPde1.v8); + gmmuFieldSetAddress(gmmuFmtPdePhysAddrFld(pPde1Fmt, aperture), + kgmmuEncodePhysAddr(pKernelGmmu, aperture, + memdescGetPhysAddr(pKernelGmmu->pWarPageDirectory0, + AT_GPU, 0), + NVLINK_INVALID_FABRIC_ADDR), + pFam->bug2720120WarPde1.v8); + } + +failed: + if (status != NV_OK) + { + if (pKernelGmmu->pWarSmallPageTable != NULL) + { + memdescFree(pKernelGmmu->pWarSmallPageTable); + memdescDestroy(pKernelGmmu->pWarSmallPageTable); + pKernelGmmu->pWarSmallPageTable = NULL; + } + if (pKernelGmmu->pWarPageDirectory0 != NULL) + { + memdescFree(pKernelGmmu->pWarPageDirectory0); + memdescDestroy(pKernelGmmu->pWarPageDirectory0); + pKernelGmmu->pWarPageDirectory0 = NULL; + } + } + return status; +} diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_fmt_gm10x.c b/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_fmt_gm10x.c new file mode 100644 index 000000000..dc2e34550 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_fmt_gm10x.c @@ -0,0 +1,197 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(SRT_BUILD) +#include "gpu/mmu/kern_gmmu.h" +#endif +#include "mmu/gmmu_fmt.h" +#include "published/maxwell/gm107/dev_mmu.h" + +/*! + * 64KB Big Page Size Format + * + * PD [39:26] + * | \ + * | \ + * v v + * PT_SMALL PT_BIG + * [25:12] [25:16] + * + * 128KB Big Page Size Format + * + * PD [39:27] + * | \ + * | \ + * v v + * PT_SMALL PT_BIG + * [26:12] [26:17] + */ +void kgmmuFmtInitLevels_GM10X(KernelGmmu *pKernelGmmu, + MMU_FMT_LEVEL *pLevels, + const NvU32 numLevels, + const NvU32 version, + const NvU32 bigPageShift) +{ + NV_ASSERT_OR_RETURN_VOID(numLevels >= 3); + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_1); + NV_ASSERT_OR_RETURN_VOID(bigPageShift == 16 || bigPageShift == 17); + + // Page directory (root). + pLevels[0].virtAddrBitHi = 39; + pLevels[0].virtAddrBitLo = (NvU8)bigPageShift + 10; + pLevels[0].entrySize = NV_MMU_PDE__SIZE; + pLevels[0].numSubLevels = 2; + pLevels[0].subLevels = pLevels + 1; + + // Big page table. + pLevels[1].virtAddrBitHi = pLevels[0].virtAddrBitLo - 1; + pLevels[1].virtAddrBitLo = (NvU8)bigPageShift; + pLevels[1].entrySize = NV_MMU_PTE__SIZE; + pLevels[1].bPageTable = NV_TRUE; + + // Small page table. + pLevels[2].virtAddrBitHi = pLevels[0].virtAddrBitLo - 1; + pLevels[2].virtAddrBitLo = 12; + pLevels[2].entrySize = NV_MMU_PTE__SIZE; + pLevels[2].bPageTable = NV_TRUE; +} + +void kgmmuFmtInitPdeMulti_GM10X(KernelGmmu *pKernelGmmu, + GMMU_FMT_PDE_MULTI *pPdeMulti, + const NvU32 version, + const NV_FIELD_ENUM_ENTRY *pPdeApertures) +{ + GMMU_FMT_PDE *pPdeBig = &pPdeMulti->subLevels[0]; + GMMU_FMT_PDE *pPdeSmall = &pPdeMulti->subLevels[1]; + + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_1); + + // Common PDE fields. + INIT_FIELD_DESC32(&pPdeMulti->fldSizeRecipExp, NV_MMU_PDE_SIZE); + + // Dual PDE - big part. + pPdeBig->version = GMMU_FMT_VERSION_1; + INIT_FIELD_APERTURE(&pPdeBig->fldAperture, NV_MMU_PDE_APERTURE_BIG, pPdeApertures); + INIT_FIELD_ADDRESS(&pPdeBig->fldAddrVidmem, NV_MMU_PDE_ADDRESS_BIG_VID, + NV_MMU_PDE_ADDRESS_SHIFT); + INIT_FIELD_ADDRESS(&pPdeBig->fldAddrSysmem, NV_MMU_PDE_ADDRESS_BIG_SYS, + NV_MMU_PDE_ADDRESS_SHIFT); + INIT_FIELD_BOOL(&pPdeBig->fldVolatile, NV_MMU_PDE_VOL_BIG); + + // Dual PDE - small part. + pPdeSmall->version = GMMU_FMT_VERSION_1; + INIT_FIELD_APERTURE(&pPdeSmall->fldAperture, NV_MMU_PDE_APERTURE_SMALL, pPdeApertures); + INIT_FIELD_ADDRESS(&pPdeSmall->fldAddrVidmem, NV_MMU_PDE_ADDRESS_SMALL_VID, + NV_MMU_PDE_ADDRESS_SHIFT); + INIT_FIELD_ADDRESS(&pPdeSmall->fldAddrSysmem, NV_MMU_PDE_ADDRESS_SMALL_SYS, + NV_MMU_PDE_ADDRESS_SHIFT); + INIT_FIELD_BOOL(&pPdeSmall->fldVolatile, NV_MMU_PDE_VOL_SMALL); +} + +void kgmmuFmtInitPte_GM10X(KernelGmmu *pKernelGmmu, + GMMU_FMT_PTE *pPte, + const NvU32 version, + const NV_FIELD_ENUM_ENTRY *pPteApertures, + const NvBool bUnifiedAperture) +{ + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_1); + + pPte->version = GMMU_FMT_VERSION_1; + INIT_FIELD_BOOL(&pPte->fldValid, NV_MMU_PTE_VALID); + INIT_FIELD_APERTURE(&pPte->fldAperture, NV_MMU_PTE_APERTURE, pPteApertures); + INIT_FIELD_DESC32(&pPte->fldPeerIndex, NV_MMU_PTE_ADDRESS_VID_PEER); + INIT_FIELD_BOOL(&pPte->fldVolatile, NV_MMU_PTE_VOL); + INIT_FIELD_BOOL(&pPte->fldReadOnly, NV_MMU_PTE_READ_ONLY); + INIT_FIELD_BOOL(&pPte->fldPrivilege, NV_MMU_PTE_PRIVILEGE); + INIT_FIELD_BOOL(&pPte->fldEncrypted, NV_MMU_PTE_ENCRYPTED); + INIT_FIELD_BOOL(&pPte->fldLocked, NV_MMU_PTE_LOCK); + INIT_FIELD_DESC32(&pPte->fldKind, NV_MMU_PTE_KIND); + INIT_FIELD_BOOL(&pPte->fldReadDisable, NV_MMU_PTE_READ_DISABLE); + INIT_FIELD_BOOL(&pPte->fldWriteDisable, NV_MMU_PTE_WRITE_DISABLE); + INIT_FIELD_ADDRESS(&pPte->fldAddrVidmem, NV_MMU_PTE_ADDRESS_VID, NV_MMU_PTE_ADDRESS_SHIFT); + INIT_FIELD_ADDRESS(&pPte->fldAddrPeer, NV_MMU_PTE_ADDRESS_VID, NV_MMU_PTE_ADDRESS_SHIFT); + + if (bUnifiedAperture) + { + // + // On Tegra, Use the vidmem address field descriptor for sysmem as well. + // This is to avoid clobbering the comptags on PTE. + // + // Comptag fields overlap with the upper bits of the sysmem physical address. + // Sysmem compression is supported on Tegra since we have a unified aperture. + // The physical address width supported by Tegra corresponds to the vidmem aperture. + // So the upper bits of sysmem can be safely used for compression. + // + INIT_FIELD_ADDRESS(&pPte->fldAddrSysmem, NV_MMU_PTE_ADDRESS_VID, NV_MMU_PTE_ADDRESS_SHIFT); + } + else + { + INIT_FIELD_ADDRESS(&pPte->fldAddrSysmem, NV_MMU_PTE_ADDRESS_SYS, NV_MMU_PTE_ADDRESS_SHIFT); + } +} + +void kgmmuFmtInitPteComptagLine_GM10X(KernelGmmu *pKernelGmmu, + GMMU_FMT_PTE *pPte, + const NvU32 version) +{ + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_1); + INIT_FIELD_DESC32(&pPte->fldCompTagLine, NV_MMU_PTE_COMPTAGLINE); +} + +void kgmmuFmtInitPdeApertures_GM10X +( + KernelGmmu *pKernelGmmu, + NV_FIELD_ENUM_ENTRY *pEntries +) +{ + nvFieldEnumEntryInit(pEntries + GMMU_APERTURE_INVALID, + NV_MMU_PDE_APERTURE_BIG_INVALID); + + nvFieldEnumEntryInit(pEntries + GMMU_APERTURE_VIDEO, + NV_MMU_PDE_APERTURE_BIG_VIDEO_MEMORY); + + nvFieldEnumEntryInit(pEntries + GMMU_APERTURE_SYS_COH, + NV_MMU_PDE_APERTURE_BIG_SYSTEM_COHERENT_MEMORY); + + nvFieldEnumEntryInit(pEntries + GMMU_APERTURE_SYS_NONCOH, + NV_MMU_PDE_APERTURE_BIG_SYSTEM_NON_COHERENT_MEMORY); +} + +void kgmmuFmtInitPteApertures_GM10X +( + KernelGmmu *pKernelGmmu, + NV_FIELD_ENUM_ENTRY *pEntries +) +{ + nvFieldEnumEntryInit(pEntries + GMMU_APERTURE_VIDEO, + NV_MMU_PTE_APERTURE_VIDEO_MEMORY); + + nvFieldEnumEntryInit(pEntries + GMMU_APERTURE_PEER, + NV_MMU_PTE_APERTURE_PEER_MEMORY); + + nvFieldEnumEntryInit(pEntries + GMMU_APERTURE_SYS_COH, + NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY); + + nvFieldEnumEntryInit(pEntries + GMMU_APERTURE_SYS_NONCOH, + NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY); +} diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_fmt_gm20x.c b/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_fmt_gm20x.c new file mode 100644 index 000000000..51eda1ee6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_fmt_gm20x.c @@ -0,0 +1,49 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(SRT_BUILD) +#include "gpu/mmu/kern_gmmu.h" +#endif +#include "mmu/gmmu_fmt.h" + +#ifndef NV_MMU_PTE_COMPTAG_USABLE +#define NV_MMU_PTE_COMPTAG_USABLE (1*32+27):(1*32+12) /* RWXVF */ +#endif +#ifndef NV_MMU_PTE_COMPTAG_SUB_INDEX +#define NV_MMU_PTE_COMPTAG_SUB_INDEX (1*32+28):(1*32+28) /* RWXVF */ +#endif + +void kgmmuFmtInitCaps_GM20X(KernelGmmu *pKernelGmmu, + GMMU_FMT *pFmt) +{ + pFmt->bSparseHwSupport = NV_TRUE; +} + +void kgmmuFmtInitPteComptagLine_GM20X(KernelGmmu *pKernelGmmu, + GMMU_FMT_PTE *pPte, + const NvU32 version) +{ + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_1); + INIT_FIELD_DESC32(&pPte->fldCompTagLine, NV_MMU_PTE_COMPTAG_USABLE); + INIT_FIELD_DESC32(&pPte->fldCompTagSubIndex, NV_MMU_PTE_COMPTAG_SUB_INDEX); +} diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_gm107.c b/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_gm107.c new file mode 100644 index 000000000..d2512b3a9 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_gm107.c @@ -0,0 +1,326 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/bus/kern_bus.h" + +#include "published/maxwell/gm107/dev_fb.h" +#include "published/maxwell/gm107/dev_mmu.h" + +/*! + * Returns the big page size in bytes + * + * @param[in] pKernelGmmu KernelGmmu object pointer + * + * @returns NvU32 + */ +NvU32 +kgmmuGetBigPageSize_GM107(KernelGmmu *pKernelGmmu) +{ + return pKernelGmmu->defaultBigPageSize; +} + +/*! + * @brief Unicast GMMU TLB invalidate. + * + * Sequence: + * 1. Acquire mutex for PMU ELPG (exclusive PRI_FIFO access). + * 2. Wait for available PRI_FIFO space. + * 3. Commit invalidate. + * 4. Wait for PRI_FIFO to drain. + * 5. Release mutex for PMU ELPG. + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * @param[in] pRootPageDir Memory descriptor of the PDB + * @param[in] bHubOnly Only HUB TLB needs to be invalidated + * @param[in] update_type PTE_DOWNGRADE if coherency is required following + * invalidate. Unused in FERMI. + * @param[in] gfid GFID of the VF whose mappings are to be + * invalidated. Unused on pre Turing + * @param[in] invalidation_scope invalidation scope to choose whether to invalidate + * Link TLB or Non-Link TLBs or ALL TLBs + */ +void +kgmmuInvalidateTlb_GM107 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + MEMORY_DESCRIPTOR *pRootPageDir, + NvU32 vaspaceFlags, + VAS_PTE_UPDATE_TYPE update_type, + NvU32 gfid, + NvU32 invalidation_scope +) +{ + NV_STATUS status = NV_OK; + TLB_INVALIDATE_PARAMS params; + NvU32 flushCount = 0; + + // + // Bail out early if + // 1. The GPU is in reset path. + // 2. We are running inside a guest in paravirtualization config. + // + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu) || + IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return; + } + + // Clear struct before use. + portMemSet(¶ms, 0, sizeof(TLB_INVALIDATE_PARAMS)); + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // Skip BAR invalidates if regkey is set for RTL/FMOD. + if ((vaspaceFlags & VASPACE_FLAGS_BAR) + && kgmmuIsIgnoreHubTlbInvalidate(pKernelGmmu)) + { + NV_PRINTF(LEVEL_INFO, + "disable_mmu_invalidate flag, skipping hub invalidate.\n"); + return; + } + + // + // Originally the flag is 0, but to WAR bug 2909388, add flag + // GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE to bypass using threadStateCheckTimeout, + // GPU_TIMEOUT_FLAGS_BYPASS_CPU_YIELD to not wait inside timeout with mutex held. + // + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, ¶ms.timeout, + GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE | + GPU_TIMEOUT_FLAGS_DEFAULT | GPU_TIMEOUT_FLAGS_BYPASS_CPU_YIELD); + + // + // 2. Wait until we can issue an invalidate. On pre-Turing, wait for space + // in the PRI FIFO. On Turing, check if an invalidate is already in progress. + // + // Set the GFID. + params.gfid = gfid; + + status = kgmmuCheckPendingInvalidates_HAL(pGpu, pKernelGmmu, ¶ms.timeout, params.gfid); + if (status != NV_OK) + { + return; + } + + // Trigger an invalidate. + params.regVal = FLD_SET_DRF(_PFB_PRI, _MMU_INVALIDATE, _TRIGGER, _TRUE, params.regVal); + + // Not using range-based invalidate. + params.regVal = FLD_SET_DRF(_PFB_PRI, _MMU_INVALIDATE, _ALL_VA, _TRUE, params.regVal); + + if (NULL != pRootPageDir) + { + // Invalidatating only one VAS. + params.regVal = FLD_SET_DRF(_PFB_PRI, _MMU_INVALIDATE, _ALL_PDB, _FALSE, params.regVal); + + // Setup PDB of VAS to invalidate. + if (memdescGetAddressSpace(pRootPageDir) == ADDR_FBMEM) + { + params.pdbAperture = NV_PFB_PRI_MMU_INVALIDATE_PDB_APERTURE_VID_MEM; + } + else if (memdescGetAddressSpace(pRootPageDir) == ADDR_SYSMEM) + { + params.pdbAperture = NV_PFB_PRI_MMU_INVALIDATE_PDB_APERTURE_SYS_MEM; + } + else + { + return; + } + + params.pdbAddress = memdescGetPhysAddr(memdescGetMemDescFromGpu(pRootPageDir, pGpu), AT_GPU, 0); + } + else + { + // Invalidate *ALL* address spaces. + params.regVal = FLD_SET_DRF(_PFB_PRI, _MMU_INVALIDATE, _ALL_PDB, _TRUE, params.regVal); + // Override invalidation scope. + invalidation_scope = NV_GMMU_INVAL_SCOPE_ALL_TLBS; + } + + // For host VAS (e.g. BAR) we do not have to invalidate GR. + if (vaspaceFlags & VASPACE_FLAGS_BAR) + { + params.regVal = FLD_SET_DRF(_PFB_PRI, _MMU_INVALIDATE, _HUBTLB_ONLY, _TRUE, params.regVal); + } + + // Perform membarWAR for non-BAR2 pte downgrades. + if (!(vaspaceFlags & VASPACE_FLAGS_BAR_BAR2) && (PTE_DOWNGRADE == update_type)) + { + flushCount = kgmmuSetTlbInvalidateMembarWarParameters_HAL(pGpu, pKernelGmmu, ¶ms); + } + + status = kgmmuSetTlbInvalidationScope_HAL(pGpu, pKernelGmmu, invalidation_scope, ¶ms); + if (!(status == NV_OK || status == NV_ERR_NOT_SUPPORTED)) + return; + + // 3 and 4. Commit the invalidate and wait for invalidate to complete. + status = kgmmuCommitTlbInvalidate_HAL(pGpu, pKernelGmmu, ¶ms); + if (status != NV_OK) + { + return; + } + + while (flushCount--) + { + if (kbusFlush_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), BUS_FLUSH_VIDEO_MEMORY) == NV_ERR_TIMEOUT) + { + break; + } + } +} + +/*! + * Check if a specific GMMU format version is supported. + */ +NvBool +kgmmuFmtIsVersionSupported_GM10X(KernelGmmu *pKernelGmmu, NvU32 version) +{ + return (version == GMMU_FMT_VERSION_1); +} + +void +kgmmuDetermineMaxVASize_GM107 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NvU32 v; + NvU32 maxFmtVersionSupported = 0; + + for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v) + { + const NvU32 ver = g_gmmuFmtVersions[v]; + if (kgmmuFmtIsVersionSupported_HAL(pKernelGmmu, ver)) + { + maxFmtVersionSupported = maxFmtVersionSupported < ver ? ver : maxFmtVersionSupported; + } + } + + switch (maxFmtVersionSupported) + { + case GMMU_FMT_VERSION_1: + pKernelGmmu->maxVASize = 1ULL << 40; + break; + case GMMU_FMT_VERSION_2: + pKernelGmmu->maxVASize = 1ULL << 49; + break; + default: + pKernelGmmu->maxVASize = 1ULL << 40; + } +} + +/* + * @brief Checks the system memory address against the PA capabilities of the + * GMMU. + * + * This will account for the DMA window, if applicable, and strip out any bits + * that won't fit in the format (since the DMA window will be handling those). + */ +void +kgmmuEncodeSysmemAddrs_GM107 +( + KernelGmmu *pKernelGmmu, + NvU64 *pAddresses, + NvU64 count +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelGmmu); + const NvU32 paWidth = gpuGetPhysAddrWidth_HAL(pGpu, ADDR_SYSMEM); + NvU64 i; + + for (i = 0; i < count; ++i) + { + NvU64 address = pAddresses[i]; + NvU64 addressAdjusted = address; + + NV_ASSERT(address >= pKernelGmmu->sysmemBaseAddress); + NV_ASSERT(address <= pKernelGmmu->sysmemBaseAddress + (NVBIT64(paWidth) - 1)); + + addressAdjusted &= (NVBIT64(paWidth) - 1); + NV_ASSERT(addressAdjusted == (address - pKernelGmmu->sysmemBaseAddress)); + + pAddresses[i] = addressAdjusted; + } +} + +/*! + * @brief This function returns the largest page size + * that is supported by the system. + * On GM2xx, big page size is specific to VASpace and is not decided + * globally (provided the property is set). + * In such a case, all FB allocations need to be aligned to this page + * size so that this surface can be mapped to both 64k and 128k page sizes. + * + * @param[in] pKernelGmmu + * + * @returns The size of a large page in bytes + */ +NvU32 +kgmmuGetMaxBigPageSize_GM107(KernelGmmu *pKernelGmmu) +{ + if (!kgmmuIsPerVaspaceBigPageEn(pKernelGmmu)) + return kgmmuGetBigPageSize_HAL(pKernelGmmu); + + return RM_PAGE_SIZE_128K; +} + +NvU8 +kgmmuGetHwPteApertureFromMemdesc_GM107 +( + KernelGmmu *pGmmu, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + NV_ADDRESS_SPACE addrSpace = memdescGetAddressSpace(pMemDesc); + NvU8 aperture = 0; + + switch (addrSpace) + { + case ADDR_SYSMEM: + if (memdescGetCpuCacheAttrib(pMemDesc) == NV_MEMORY_CACHED) + { + aperture = NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY; + } + else + { + aperture = NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY; + } + break; + case ADDR_FBMEM: + case ADDR_FABRIC: + case ADDR_FABRIC_V2: + aperture = NV_MMU_PTE_APERTURE_VIDEO_MEMORY; + break; + default: + // + // If we reach here, then the addrSpace is not valid + // and we should assert here. + // + NV_ASSERT(0); + } + return aperture; +} diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_gm200.c b/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_gm200.c new file mode 100644 index 000000000..139ac6c10 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_gm200.c @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mmu/kern_gmmu.h" + +/*! + * Initialize the GMMU format families. + */ +NV_STATUS +kgmmuFmtFamiliesInit_GM200(OBJGPU *pGpu, KernelGmmu *pKernelGmmu) +{ + NvU32 i; + NvU32 v; + + for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v) + { + GMMU_FMT_FAMILY *pFam = pKernelGmmu->pFmtFamilies[v]; + if (NULL != pFam) + { + // + // GM20X supports sparse directly in HW by setting the + // volatile bit when the valid bit is clear. + // + nvFieldSetBool(&pFam->pte.fldValid, NV_FALSE, pFam->sparsePte.v8); + nvFieldSetBool(&pFam->pte.fldVolatile, NV_TRUE, pFam->sparsePte.v8); + + // PDEs are similar but use aperture fields for validity. + if (nvFieldIsValid32(&pFam->pde.fldVolatile.desc)) + { + gmmuFieldSetAperture(&pFam->pde.fldAperture, GMMU_APERTURE_INVALID, + pFam->sparsePde.v8); + nvFieldSetBool(&pFam->pde.fldVolatile, NV_TRUE, pFam->sparsePde.v8); + } + + // Multi-PDE case uses sub-level 0 (big page table) volatile bit. + for (i = 0; i < MMU_FMT_MAX_SUB_LEVELS; ++i) + { + const GMMU_FMT_PDE *pPdeFmt = &pFam->pdeMulti.subLevels[i]; + if (nvFieldIsValid32(&pPdeFmt->fldVolatile.desc)) + { + gmmuFieldSetAperture(&pPdeFmt->fldAperture, GMMU_APERTURE_INVALID, + pFam->sparsePdeMulti.v8); + if (0 == i) + { + nvFieldSetBool(&pPdeFmt->fldVolatile, NV_TRUE, pFam->sparsePdeMulti.v8); + } + } + } + } + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/pascal/kern_gmmu_fmt_gp10x.c b/src/nvidia/src/kernel/gpu/mmu/arch/pascal/kern_gmmu_fmt_gp10x.c new file mode 100644 index 000000000..f790df91c --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/pascal/kern_gmmu_fmt_gp10x.c @@ -0,0 +1,195 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(SRT_BUILD) +#include "gpu/mmu/kern_gmmu.h" +#endif +#include "mmu/gmmu_fmt.h" +#include "published/pascal/gp100/dev_mmu.h" + +/*! + * PD3 [48:47] + * | + * v + * PD2 [46:38] + * | + * v + * PD1 [37:29] + * | + * v + * PD0 [28:21] / PT_LARGE [28:21] (2MB page) + * | \ + * | \ + * v v + * PT_SMALL PT_BIG (64KB page) + * [20:12] [20:16] + */ +void kgmmuFmtInitLevels_GP10X(KernelGmmu *pKernelGmmu, + MMU_FMT_LEVEL *pLevels, + const NvU32 numLevels, + const NvU32 version, + const NvU32 bigPageShift) +{ + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_2); + NV_ASSERT_OR_RETURN_VOID(numLevels >= 6); + NV_ASSERT_OR_RETURN_VOID(bigPageShift == 16 || bigPageShift == 17); + + // Page directory 3 (root). + pLevels[0].virtAddrBitHi = 48; + pLevels[0].virtAddrBitLo = 47; + pLevels[0].entrySize = NV_MMU_VER2_PDE__SIZE; + pLevels[0].numSubLevels = 1; + pLevels[0].subLevels = pLevels + 1; + + // Page directory 2. + pLevels[1].virtAddrBitHi = 46; + pLevels[1].virtAddrBitLo = 38; + pLevels[1].entrySize = NV_MMU_VER2_PDE__SIZE; + pLevels[1].numSubLevels = 1; + pLevels[1].subLevels = pLevels + 2; + + // Page directory 1. + pLevels[2].virtAddrBitHi = 37; + pLevels[2].virtAddrBitLo = 29; + pLevels[2].entrySize = NV_MMU_VER2_PDE__SIZE; + pLevels[2].numSubLevels = 1; + pLevels[2].subLevels = pLevels + 3; + + // Page directory 0. + pLevels[3].virtAddrBitHi = 28; + pLevels[3].virtAddrBitLo = 21; + pLevels[3].entrySize = NV_MMU_VER2_DUAL_PDE__SIZE; + pLevels[3].numSubLevels = 2; + pLevels[3].bPageTable = NV_TRUE; + pLevels[3].subLevels = pLevels + 4; + + // Big page table. + pLevels[4].virtAddrBitHi = 20; + pLevels[4].virtAddrBitLo = (NvU8)bigPageShift; + pLevels[4].entrySize = NV_MMU_VER2_PTE__SIZE; + pLevels[4].bPageTable = NV_TRUE; + + // Small page table. + pLevels[5].virtAddrBitHi = 20; + pLevels[5].virtAddrBitLo = 12; + pLevels[5].entrySize = NV_MMU_VER2_PTE__SIZE; + pLevels[5].bPageTable = NV_TRUE; +} + +void kgmmuFmtInitPdeMulti_GP10X(KernelGmmu *pKernelGmmu, + GMMU_FMT_PDE_MULTI *pPdeMulti, + const NvU32 version, + const NV_FIELD_ENUM_ENTRY *pPdeApertures) +{ + GMMU_FMT_PDE *pPdeBig = &pPdeMulti->subLevels[0]; + GMMU_FMT_PDE *pPdeSmall = &pPdeMulti->subLevels[1]; + + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_2); + + // Dual PDE - big part. + pPdeBig->version = GMMU_FMT_VERSION_2; + INIT_FIELD_APERTURE(&pPdeBig->fldAperture, NV_MMU_VER2_DUAL_PDE_APERTURE_BIG, + pPdeApertures); + INIT_FIELD_ADDRESS(&pPdeBig->fldAddrVidmem, NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_VID, + NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SHIFT); + INIT_FIELD_ADDRESS(&pPdeBig->fldAddrSysmem, NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SYS, + NV_MMU_VER2_DUAL_PDE_ADDRESS_BIG_SHIFT); + INIT_FIELD_BOOL(&pPdeBig->fldVolatile, NV_MMU_VER2_DUAL_PDE_VOL_BIG); + + // Dual PDE - small part. + pPdeSmall->version = GMMU_FMT_VERSION_2; + INIT_FIELD_APERTURE(&pPdeSmall->fldAperture, NV_MMU_VER2_DUAL_PDE_APERTURE_SMALL, + pPdeApertures); + INIT_FIELD_ADDRESS(&pPdeSmall->fldAddrVidmem, NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_VID, + NV_MMU_VER2_DUAL_PDE_ADDRESS_SHIFT); + INIT_FIELD_ADDRESS(&pPdeSmall->fldAddrSysmem, NV_MMU_VER2_DUAL_PDE_ADDRESS_SMALL_SYS, + NV_MMU_VER2_DUAL_PDE_ADDRESS_SHIFT); + INIT_FIELD_BOOL(&pPdeSmall->fldVolatile, NV_MMU_VER2_DUAL_PDE_VOL_SMALL); +} + +void kgmmuFmtInitPde_GP10X(KernelGmmu *pKernelGmmu, + GMMU_FMT_PDE *pPde, + const NvU32 version, + const NV_FIELD_ENUM_ENTRY *pPdeApertures) +{ + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_2); + + pPde->version = GMMU_FMT_VERSION_2; + INIT_FIELD_APERTURE(&pPde->fldAperture, NV_MMU_VER2_PDE_APERTURE, + pPdeApertures); + INIT_FIELD_ADDRESS(&pPde->fldAddrVidmem, NV_MMU_VER2_PDE_ADDRESS_VID, + NV_MMU_VER2_PDE_ADDRESS_SHIFT); + INIT_FIELD_ADDRESS(&pPde->fldAddrSysmem, NV_MMU_VER2_PDE_ADDRESS_SYS, + NV_MMU_VER2_PDE_ADDRESS_SHIFT); + INIT_FIELD_BOOL(&pPde->fldVolatile, NV_MMU_VER2_PDE_VOL); +} + +void kgmmuFmtInitPte_GP10X(KernelGmmu *pKernelGmmu, + GMMU_FMT_PTE *pPte, + const NvU32 version, + const NV_FIELD_ENUM_ENTRY *pPteApertures, + const NvBool bUnifiedAperture) +{ + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_2); + + pPte->version = GMMU_FMT_VERSION_2; + INIT_FIELD_BOOL(&pPte->fldValid, NV_MMU_VER2_PTE_VALID); + INIT_FIELD_APERTURE(&pPte->fldAperture, NV_MMU_VER2_PTE_APERTURE, pPteApertures); + INIT_FIELD_DESC32(&pPte->fldPeerIndex, NV_MMU_VER2_PTE_ADDRESS_VID_PEER); + INIT_FIELD_BOOL(&pPte->fldVolatile, NV_MMU_VER2_PTE_VOL); + INIT_FIELD_BOOL(&pPte->fldReadOnly, NV_MMU_VER2_PTE_READ_ONLY); + INIT_FIELD_BOOL(&pPte->fldPrivilege, NV_MMU_VER2_PTE_PRIVILEGE); + INIT_FIELD_BOOL(&pPte->fldEncrypted, NV_MMU_VER2_PTE_ENCRYPTED); + INIT_FIELD_BOOL(&pPte->fldAtomicDisable, NV_MMU_VER2_PTE_ATOMIC_DISABLE); + INIT_FIELD_DESC32(&pPte->fldKind, NV_MMU_VER2_PTE_KIND); + + INIT_FIELD_ADDRESS(&pPte->fldAddrVidmem, NV_MMU_VER2_PTE_ADDRESS_VID, NV_MMU_VER2_PTE_ADDRESS_SHIFT); + + INIT_FIELD_ADDRESS(&pPte->fldAddrPeer, NV_MMU_VER2_PTE_ADDRESS_VID, NV_MMU_VER2_PTE_ADDRESS_SHIFT); + + if (bUnifiedAperture) + { + // + // On Tegra, use the vidmem address field descriptor for sysmem as well. + // This is to avoid clobbering the comptags in PTE. + // + // Comptag fields overlap with the upper bits of the sysmem physical address. + // Sysmem compression is supported on Tegra since we have a unified aperture. + // The physical address width supported by Tegra corresponds to the vidmem aperture. + // So the upper bits of sysmem can be safely used for compression. + // + INIT_FIELD_ADDRESS(&pPte->fldAddrSysmem, NV_MMU_VER2_PTE_ADDRESS_VID, NV_MMU_VER2_PTE_ADDRESS_SHIFT); + } + else + { + INIT_FIELD_ADDRESS(&pPte->fldAddrSysmem, NV_MMU_VER2_PTE_ADDRESS_SYS, NV_MMU_VER2_PTE_ADDRESS_SHIFT); + } +} + +void kgmmuFmtInitPteComptagLine_GP10X(KernelGmmu *pKernelGmmu, + GMMU_FMT_PTE *pPte, + const NvU32 version) +{ + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_2); + INIT_FIELD_DESC32(&pPte->fldCompTagLine, NV_MMU_VER2_PTE_COMPTAGLINE); +} diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/pascal/kern_gmmu_gp100.c b/src/nvidia/src/kernel/gpu/mmu/arch/pascal/kern_gmmu_gp100.c new file mode 100644 index 000000000..659027e99 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/pascal/kern_gmmu_gp100.c @@ -0,0 +1,231 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/gpu.h" + +#include "gpu/mem_mgr/fermi_dma.h" + +#include "published/pascal/gp100/dev_ram.h" +#include "published/pascal/gp100/dev_fault.h" + +/*! + * Check if a specific GMMU format version is supported. + */ +NvBool +kgmmuFmtIsVersionSupported_GP10X(KernelGmmu *pKernelGmmu, NvU32 version) +{ + return (version == GMMU_FMT_VERSION_2); +} + +/*! + * @brief Get the size of PDB allocation for 5-level page table formats + * + * Because GMMU can prefetch uninitialized PDB entries and cause XVE to hang, + * we need to allocate all entries of the PDB regardless of vaLimit. + * + * @param[in] pKernelGmmu + * @param[in] pLevelFmt Level format of the PDB. Must be root level. + * @param[in] vaLimit VA limit that needs to be covered + * + */ +NvU32 +kgmmuGetPDBAllocSize_GP100 +( + KernelGmmu *pKernelGmmu, + const MMU_FMT_LEVEL *pLevelFmt, + NvU64 vaLimit +) +{ + return mmuFmtLevelSize(pLevelFmt); +} + +/*! + * @brief This gets the offset and data for vaLimit + * + * @param[in] pKernelGmmu + * @param[in] pVAS OBJVASPACE pointer + * @param[in] subctxId subctxId value + * @param[in] pParams Pointer to the structure containing parameters passed by the engine + * @param[out] pOffset Pointer to offset of NV_RAMIN_ADR_LIMIT_LO:NV_RAMIN_ADR_LIMIT_HI pair + * @param[out] pData Pointer to value to write + * + * @returns NV_STATUS + */ +NV_STATUS +kgmmuInstBlkVaLimitGet_GP100 +( + KernelGmmu *pKernelGmmu, + OBJVASPACE *pVAS, + NvU32 subctxId, + INST_BLK_INIT_PARAMS *pParams, + NvU32 *pOffset, + NvU64 *pData +) +{ + ct_assert(SF_OFFSET(NV_RAMIN_ADR_LIMIT_HI) == SF_OFFSET(NV_RAMIN_ADR_LIMIT_LO) + 4); + ct_assert((SF_OFFSET(NV_RAMIN_ADR_LIMIT_LO) & 7) == 0); + + NvU64 vaLimit; + NvU64 vaMask = ((NvU64)SF_SHIFTMASK(NV_RAMIN_ADR_LIMIT_HI) << 32) | + SF_SHIFTMASK(NV_RAMIN_ADR_LIMIT_LO); + + // Only legacy pdb is valid in instance block. + NV_ASSERT(subctxId == FIFO_PDB_IDX_BASE); + NV_ASSERT_OR_RETURN(pVAS, NV_ERR_INVALID_ARGUMENT); + + vaLimit = vaspaceGetVaLimit(pVAS) - pParams->uvmKernelPrivRegion; + *pData = (vaLimit & vaMask) | 0xfff; + *pOffset = SF_OFFSET(NV_RAMIN_ADR_LIMIT_LO); + + return NV_OK; +} + +/*! + * @brief This gets the offsets and data for the PDB limit + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * @param[in] pVAS OBJVASPACE pointer + * @param[in] pParams Pointer to the structure containing parameters passed by the engine + * @param[in] subctxId subctxId value + * @param[out] pOffsetLo Pointer to low offset + * @param[out] pDataLo Pointer to data written at above offset + * @param[out] pOffsetHi Pointer to high offset + * @param[out] pDataHi Pointer to data written at above offset + * + * @returns + */ +NV_STATUS +kgmmuInstBlkPageDirBaseGet_GP100 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + OBJVASPACE *pVAS, + INST_BLK_INIT_PARAMS *pParams, + NvU32 subctxId, + NvU32 *pOffsetLo, + NvU32 *pDataLo, + NvU32 *pOffsetHi, + NvU32 *pDataHi +) +{ + + NV_ASSERT(subctxId == FIFO_PDB_IDX_BASE); + + if (pVAS != NULL) + { + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE); + const GMMU_FMT *pFmt = gvaspaceGetGmmuFmt(pGVAS, pGpu); + PMEMORY_DESCRIPTOR pPDB; + RmPhysAddr physAdd; + NvU32 aperture; + NvU32 addrLo; + NvU32 bigPageSize = vaspaceGetBigPageSize(pVAS); + + pPDB = (pParams->bIsClientAdmin) ? vaspaceGetKernelPageDirBase(pVAS, pGpu) : + vaspaceGetPageDirBase(pVAS, pGpu); + NV_ASSERT_OR_RETURN(pPDB != NULL, NV_ERR_INVALID_STATE); + + physAdd = memdescGetPhysAddr(pPDB, AT_GPU, 0); + aperture = kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pPDB); + addrLo = NvU64_LO32(physAdd >> NV_RAMIN_BASE_SHIFT); + + *pOffsetLo = SF_OFFSET(NV_RAMIN_PAGE_DIR_BASE_LO); + *pDataLo = SF_NUM(_RAMIN_PAGE_DIR_BASE, _TARGET, aperture) | + ((pParams->bIsFaultReplayable) ? + SF_DEF(_RAMIN_PAGE_DIR_BASE, _FAULT_REPLAY_TEX, _ENABLED) | + SF_DEF(_RAMIN_PAGE_DIR_BASE, _FAULT_REPLAY_GCC, _ENABLED) : + SF_DEF(_RAMIN_PAGE_DIR_BASE, _FAULT_REPLAY_TEX, _DISABLED) | + SF_DEF(_RAMIN_PAGE_DIR_BASE, _FAULT_REPLAY_GCC, _DISABLED)) | + ((NULL != pFmt) && (GMMU_FMT_VERSION_2 == pFmt->version) ? + SF_DEF(_RAMIN, _USE_NEW_PT_FORMAT, _TRUE) : + SF_DEF(_RAMIN, _USE_NEW_PT_FORMAT, _FALSE)) | + ((bigPageSize == FERMI_BIG_PAGESIZE_64K) ? + SF_DEF(_RAMIN, _BIG_PAGE_SIZE, _64KB) : + SF_DEF(_RAMIN, _BIG_PAGE_SIZE, _128KB)) | + SF_NUM(_RAMIN_PAGE_DIR_BASE, _VOL, memdescGetVolatility(pPDB)) | + SF_NUM(_RAMIN_PAGE_DIR_BASE, _LO, addrLo); + + *pOffsetHi = SF_OFFSET(NV_RAMIN_PAGE_DIR_BASE_HI); + *pDataHi = SF_NUM(_RAMIN_PAGE_DIR_BASE, _HI, NvU64_HI32(physAdd)); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "A channel must have a pVAS if it does not support TSG sub context!\n"); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_PARAMETER; + } + + return NV_OK; +} + + +/** + * @brief Converts a MMU fault type (NV_PFAULT_FAULT_TYPE_*) into a string. + * + * @param[in] faultType NV_PFAULT_FAULT_TYPE_* + * + * @returns a string (always non-null) + */ +const char * +kgmmuGetFaultTypeString_GP100(KernelGmmu *pGmmu, NvU32 faultType) +{ + switch (faultType) + { + case NV_PFAULT_FAULT_TYPE_PDE: + return "FAULT_PDE"; + case NV_PFAULT_FAULT_TYPE_PDE_SIZE: + return "FAULT_PDE_SIZE"; + case NV_PFAULT_FAULT_TYPE_PTE: + return "FAULT_PTE"; + case NV_PFAULT_FAULT_TYPE_VA_LIMIT_VIOLATION: + return "FAULT_VA_LIMIT_VIOLATION"; + case NV_PFAULT_FAULT_TYPE_UNBOUND_INST_BLOCK: + return "FAULT_UNBOUND_INST_BLOCK"; + case NV_PFAULT_FAULT_TYPE_PRIV_VIOLATION: + return "FAULT_PRIV_VIOLATION"; + case NV_PFAULT_FAULT_TYPE_RO_VIOLATION: + return "FAULT_RO_VIOLATION"; + case NV_PFAULT_FAULT_TYPE_PITCH_MASK_VIOLATION: + return "FAULT_PITCH_MASK_VIOLATION"; + case NV_PFAULT_FAULT_TYPE_WORK_CREATION: + return "FAULT_WORK_CREATION"; + case NV_PFAULT_FAULT_TYPE_UNSUPPORTED_APERTURE: + return "FAULT_UNSUPPORTED_APERTURE"; + case NV_PFAULT_FAULT_TYPE_COMPRESSION_FAILURE: + return "FAULT_COMPRESSION_FAILURE"; + case NV_PFAULT_FAULT_TYPE_UNSUPPORTED_KIND: + return "FAULT_INFO_TYPE_UNSUPPORTED_KIND"; + case NV_PFAULT_FAULT_TYPE_REGION_VIOLATION: + return "FAULT_INFO_TYPE_REGION_VIOLATION"; + case NV_PFAULT_FAULT_TYPE_POISONED: + return "FAULT_INFO_TYPE_POISONED"; + case NV_PFAULT_FAULT_TYPE_ATOMIC_VIOLATION: + return "FAULT_INFO_TYPE_ATOMIC_VIOLATION"; + default: + return "UNRECOGNIZED_FAULT"; + } +} diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/turing/kern_gmmu_fmt_tu10x.c b/src/nvidia/src/kernel/gpu/mmu/arch/turing/kern_gmmu_fmt_tu10x.c new file mode 100644 index 000000000..e4033fe83 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/turing/kern_gmmu_fmt_tu10x.c @@ -0,0 +1,48 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if !defined(SRT_BUILD) +#include "gpu/mmu/kern_gmmu.h" +#endif +#include "mmu/gmmu_fmt.h" +#include "published/turing/tu102/dev_mmu.h" + +void kgmmuFmtInitPteComptagLine_TU10X(KernelGmmu *pKernelGmmu, GMMU_FMT_PTE *pPte, const NvU32 version) +{ + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_2); + INIT_FIELD_DESC32(&pPte->fldCompTagLine, NV_MMU_VER2_PTE_COMPTAGLINE); +} + +void kgmmuFmtInitPeerPteFld_TU10X(KernelGmmu *pKernelGmmu, GMMU_FMT_PTE *pPte, const NvU32 version) +{ + NV_ASSERT_OR_RETURN_VOID(version == GMMU_FMT_VERSION_2); + + NV_FIELD_DESC64 *pField = &pPte->fldAddrPeer.desc; + + pPte->fldAddrPeer.shift = NV_MMU_VER2_PTE_ADDRESS_SHIFT; + + BEGIN_DISCONTIG_FIELD_DESC64(pField) + DRF_DISCONTIG_FIELD_DESC64(pField, NV_MMU_VER2_PTE_ADDRESS_VID) + DRF_DISCONTIG_FIELD_DESC64(pField, NV_MMU_VER2_PTE_COMPTAGLINE) + END_FIELD_DESC64_DISCONTIGUOUS(pField) +} diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/turing/kern_gmmu_tu102.c b/src/nvidia/src/kernel/gpu/mmu/arch/turing/kern_gmmu_tu102.c new file mode 100644 index 000000000..d5ce373c1 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/turing/kern_gmmu_tu102.c @@ -0,0 +1,406 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file kern_gmmu_tu102.c + * @brief TURING specific HAL MMU routines reside in this file + */ + +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/gpu.h" +#include "kernel/gpu/intr/intr.h" + +#include "published/turing/tu102/dev_fb.h" +#include "published/turing/tu102/dev_vm.h" + + +/*! + * @brief Checks for any pending invalidations. + * + * @param pGpu + * @param pKernelGmmu + * @param pTimeOut Timeout for the invalidate operation. + * @param gfid GFID + */ +NV_STATUS +kgmmuCheckPendingInvalidates_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + RMTIMEOUT *pTimeOut, + NvU32 gfid +) +{ + NV_STATUS status = NV_OK; + + while (1) + { + NvU32 regVal; + + { + regVal = GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE); + } + + if (FLD_TEST_DRF(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE, _TRIGGER, _FALSE, regVal)) + break; + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + status = NV_ERR_GPU_IN_FULLCHIP_RESET; + else if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + status = NV_ERR_GPU_IS_LOST; + else + status = gpuCheckTimeout(pGpu, pTimeOut); + + if (NV_OK != status) + break; + + osSpinLoop(); + } + return status; +} + +/*! + * @brief Commit the invalidate command to H/W. + * + * @param pGpu + * @param pKernelGmmu + * @param pParams Pointer to TLB_INVALIDATE_PARAMS data. + */ +NV_STATUS +kgmmuCommitTlbInvalidate_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + TLB_INVALIDATE_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + + if (!FLD_TEST_DRF(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE, _ALL_PDB, _TRUE, pParams->regVal)) + { + kgmmuSetPdbToInvalidate_HAL(pGpu, pKernelGmmu, pParams); + } + + { + if (IS_VIRTUAL(pGpu)) + { + // Prevent VF from updating INVALIDATE_ALL_PDB, bug 3356599 + pParams->regVal = FLD_SET_DRF(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE, _ALL_PDB, _FALSE, pParams->regVal); + } + + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE, pParams->regVal); + } + + // Wait for the invalidate command to complete. + status = kgmmuCheckPendingInvalidates_HAL(pGpu, pKernelGmmu, &pParams->timeout, pParams->gfid); + + return status; +} + +/*! + * @brief This function sets the PDB physical address for the VAS whose + * entries are to be invalidated. + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * @param[in] pParams Pointer to TLB_INVALIDATE_PARAMS data + */ +void +kgmmuSetPdbToInvalidate_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + TLB_INVALIDATE_PARAMS *pParams +) +{ + { + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB, + DRF_NUM(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE_PDB, _ADDR, + NvU64_LO32(pParams->pdbAddress >> + NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB_ADDR_ALIGNMENT)) | + DRF_NUM(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE_PDB, _APERTURE, pParams->pdbAperture)); + + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_UPPER_PDB, + DRF_NUM(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE_UPPER_PDB, _ADDR, + NvU64_LO32((pParams->pdbAddress >> + NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB_ADDR_ALIGNMENT) >> + DRF_SIZE(NV_VIRTUAL_FUNCTION_PRIV_MMU_INVALIDATE_PDB_ADDR)))); + } +} + +/** + * @brief Initialize the GMMU format families. + * @details Turing supports PA based comptagline allocation policy + * + * @param[in] pGpu + * @param[in] pKernelGmmu + */ +NV_STATUS +kgmmuFmtFamiliesInit_TU102(OBJGPU *pGpu, KernelGmmu *pKernelGmmu) +{ + extern NV_STATUS kgmmuFmtFamiliesInit_GV100(OBJGPU *pGpu, KernelGmmu *pKernelGmmu); + + NvU32 v; + NV_STATUS result; + GMMU_FMT_FAMILY *pFam; + + for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v) + { + pFam = pKernelGmmu->pFmtFamilies[v]; + if (NULL != pFam) + { + if (kgmmuIsBug2720120WarEnabled(pKernelGmmu)) + { + NV_ASSERT_OK_OR_RETURN(kgmmuSetupWarForBug2720120_HAL(pKernelGmmu, pFam)); + } + } + } + + // inherit former FmtFamilies setup procedure + result = kgmmuFmtFamiliesInit_GV100(pGpu, pKernelGmmu); + + return result; +} + +/*! + * @brief Set membar parameters for tlb invalidation and return + * any additional sysmembar flushes that is required after tlb invalidation + * is committed. + * + * During PTE downgrades, tlb invalidates should be followed by + * sysmembar. Since Turing doesnt have HSHUB we dont need any + * additional flushes. + * + * Use invalidate units SYSMEMBAR and return flushCount 0. + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * @param[in] pParams Tlb invalidation parameter structure. + * + */ +NvU32 +kgmmuSetTlbInvalidateMembarWarParameters_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + TLB_INVALIDATE_PARAMS *pParams +) +{ + + pParams->regVal = FLD_SET_DRF(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE, _SYS_MEMBAR, _TRUE, pParams->regVal); + pParams->regVal = FLD_SET_DRF(_VIRTUAL_FUNCTION_PRIV, _MMU_INVALIDATE, _ACK, _GLOBALLY, pParams->regVal); + + return 0; +} + +NV_STATUS +kgmmuGetFaultRegisterMappings_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 index, + NvP64 *pFaultBufferGet, + NvP64 *pFaultBufferPut, + NvP64 *pFaultBufferInfo, + NvP64 *pHubIntr, + NvP64 *pHubIntrEnSet, + NvP64 *pHubIntrEnClear, + NvU32 *faultMask, + NvP64 *pPrefetchCtrl +) +{ + Intr *pIntr = GPU_GET_INTR(pGpu); + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + NvP64 bar0Mapping = NV_PTR_TO_NvP64(pMapping->gpuNvAddr); + NvU32 intrVector = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_REPLAYABLE_FAULT, NV_FALSE); + NvU32 leafReg, leafBit; + + leafReg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector); + leafBit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector); + + NV_ASSERT_OR_RETURN((index < NUM_FAULT_BUFFERS), NV_ERR_INVALID_ARGUMENT); + + *pFaultBufferGet = NvP64_PLUS_OFFSET(bar0Mapping, + GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET(index))); + *pFaultBufferPut = NvP64_PLUS_OFFSET(bar0Mapping, + GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT(index))); + // Note: this variable is deprecated since buffer overflow is not a seperate register from Volta + *pFaultBufferInfo = 0; + *pHubIntr = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(leafReg))); + *pHubIntrEnSet = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET(leafReg))); + *pHubIntrEnClear = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR(leafReg))); + + // Only REPLAYABLE fault buffer and ACCESS Counter interrupts can be owned by clients + if (index == NV_VIRTUAL_FUNCTION_PRIV_MMU_REPLAY_FAULT_BUFFER) + { + *faultMask = NVBIT(leafBit); + *pPrefetchCtrl = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_PAGE_FAULT_CTRL)); + } + + return NV_OK; +} + + +/** + * @brief Test the access counter write NAK. + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * @retval true Write was negatively acknowledged. + */ +NvBool kgmmuTestAccessCounterWriteNak_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NvU32 accessCntrInfo = GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO); + return FLD_TEST_DRF(_VIRTUAL_FUNCTION_PRIV, _ACCESS_COUNTER_NOTIFY_BUFFER_INFO, + _WRITE_NACK, _TRUE, accessCntrInfo); +} + +/*! + * @brief Service replayable fault + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * + * @returns NV_STATUS + */ +NV_STATUS +kgmmuServiceReplayableFault_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NV_STATUS rmStatus = NV_OK; + PEVENTNOTIFICATION *ppEventNotification = NULL; + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + return NV_OK; + + if (NV_OK == CliGetEventNotificationList(pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hFaultBufferClient, + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hFaultBufferObject, NULL, &ppEventNotification) && ppEventNotification) + { + rmStatus = notifyEvents(pGpu, *ppEventNotification, NVC369_NOTIFIER_MMU_FAULT_REPLAYABLE, + 0, 0, NV_OK, NV_OS_WRITE_THEN_AWAKEN); + NV_ASSERT(rmStatus == NV_OK); + } + return rmStatus; +} + +NvU32 +kgmmuReadMmuFaultStatus_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 gfid +) +{ + { + return GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_STATUS); + } +} + +void +kgmmuWriteMmuFaultStatus_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 value +) +{ + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_STATUS, value); +} + +NvU32 +kgmmuReadMmuFaultBufferSize_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 index, + NvU32 gfid +) +{ + { + return GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_SIZE(index)); + } +} + +NV_STATUS +kgmmuReadFaultBufferGetPtr_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 index, + NvU32 *pGetOffset, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 val; + NV_ASSERT_OR_RETURN((index < NUM_FAULT_BUFFERS), NV_ERR_INVALID_ARGUMENT); + + val = GPU_VREG_RD32_EX(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_GET(index), pThreadState); + *pGetOffset = DRF_VAL(_VIRTUAL_FUNCTION_PRIV, _MMU_FAULT_BUFFER_GET, _PTR, val); + + return NV_OK; +} + +NV_STATUS +kgmmuReadFaultBufferPutPtr_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 index, + NvU32 *pPutOffset +) +{ + NvU32 val; + NV_ASSERT_OR_RETURN((index < NUM_FAULT_BUFFERS), NV_ERR_INVALID_ARGUMENT); + + val = GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_MMU_FAULT_BUFFER_PUT(index)); + *pPutOffset = DRF_VAL(_VIRTUAL_FUNCTION_PRIV, _MMU_FAULT_BUFFER_PUT, _PTR, val); + return NV_OK; +} + +/*! + * @brief Test if an MMU non-replayable fault is pending + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * + * @returns NV_TRUE if an MMU non-replayable fault is pending, else NV_FALSE + */ +NvBool +kgmmuIsNonReplayableFaultPending_TU102 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NvU32 reg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(NV_PFB_PRI_MMU_INT_VECTOR_FAULT_NOTIFY_NON_REPLAYABLE); + NvU32 bit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(NV_PFB_PRI_MMU_INT_VECTOR_FAULT_NOTIFY_NON_REPLAYABLE); + NvU32 pending = GPU_VREG_RD32_EX(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(reg), NULL /* threadstate */); + return pending & NVBIT(bit); +} diff --git a/src/nvidia/src/kernel/gpu/mmu/arch/volta/kern_gmmu_gv100.c b/src/nvidia/src/kernel/gpu/mmu/arch/volta/kern_gmmu_gv100.c new file mode 100644 index 000000000..d06daf970 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/arch/volta/kern_gmmu_gv100.c @@ -0,0 +1,523 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/gpu.h" +#include "kernel/gpu/rc/kernel_rc.h" + +#include "published/volta/gv100/dev_fb.h" +#include "published/volta/gv100/dev_ram.h" +#include "published/volta/gv100/dev_fault.h" + +/** + * @brief Initialize the supported GMMU HW format structures. + * @details GV100+ supports ATS NV4K 64K PTE encoding + * + * @param pKernelGmmu The KernelGmmu + * @param pGpu The gpu + */ +NV_STATUS +kgmmuFmtFamiliesInit_GV100(OBJGPU *pGpu, KernelGmmu *pKernelGmmu) +{ + extern NV_STATUS kgmmuFmtFamiliesInit_GM200(OBJGPU *pGpu, KernelGmmu *pKernelGmmu); + NvU32 v; + NV_STATUS result; + GMMU_FMT_FAMILY *pFam; + + // setup nv4kPte endcoding: v - 0, vol - 1, priv - 1 + for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v) + { + pFam = pKernelGmmu->pFmtFamilies[v]; + if (NULL != pFam) + { + nvFieldSetBool(&pFam->pte.fldValid, NV_FALSE, pFam->nv4kPte.v8); + nvFieldSetBool(&pFam->pte.fldVolatile, NV_TRUE, pFam->nv4kPte.v8); + nvFieldSetBool(&pFam->pte.fldPrivilege, NV_TRUE, pFam->nv4kPte.v8); + } + } + + // inherit former FmtFamilies setup procedure + result = kgmmuFmtFamiliesInit_GM200(pGpu, pKernelGmmu); + + return result; +} + +NV_STATUS +kgmmuChangeReplayableFaultOwnership_GV100(OBJGPU *pGpu, KernelGmmu *pKernelGmmu, NvBool bOwnedByRm) +{ + // + // Disable the interrupt when RM loses the ownership and enable it back when + // RM regains it. At least nvUvmInterfaceOwnPageFaultIntr() relies on that behavior. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + return NV_OK; + + if (bOwnedByRm) + pKernelGmmu->uvmSharedIntrRmOwnsMask |= RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY; + else + pKernelGmmu->uvmSharedIntrRmOwnsMask &= ~RM_UVM_SHARED_INTR_MASK_MMU_REPLAYABLE_FAULT_NOTIFY; + + // + // Notably don't set the PDB_PROP_GPU_IGNORE_REPLAYABLE_FAULTS property as + // on Volta that would mean masking out all MMU faults from pending + // interrupts. + // + + return NV_OK; +} + +/*! + * @brief Creates the shadow fault buffer for client handling of non-replayable faults + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * + * @returns + */ +NV_STATUS +kgmmuClientShadowFaultBufferAlloc_GV100 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + return NV_OK; + + if (pKernelGmmu->getProperty(pKernelGmmu, PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED)) + { + NV_PRINTF(LEVEL_ERROR, "Fault-Buffer is disabled. ShadowBuffer cannot be created\n"); + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + + return kgmmuClientShadowFaultBufferNonreplayableAllocate(pGpu, pKernelGmmu); +} + +/*! + * @brief Frees the shadow fault buffer for client handling of non-replayable faults + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * + * @returns + */ +NV_STATUS +kgmmuClientShadowFaultBufferFree_GV100 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + return NV_OK; + + return kgmmuClientShadowFaultBufferNonreplayableDestroy(pGpu, pKernelGmmu); +} + +/*! + * @brief Writes the ATS properties to the instance block + * + * @param[in] pKernelGmmu + * @param[in] pVAS OBJVASPACE pointer + * @param[in] subctxId subctxId value + * @param[in] pInstBlkDesc Memory descriptor for the instance block of the engine + * + * @returns NV_STATUS + */ +NV_STATUS +kgmmuInstBlkAtsGet_GV100 +( + KernelGmmu *pKernelGmmu, + OBJVASPACE *pVAS, + NvU32 subctxId, + NvU32 *pOffset, + NvU32 *pData +) +{ + NvU32 pasid = 0; + + if (subctxId == FIFO_PDB_IDX_BASE) + { + // A channel is setting base PDB with a valid VAS. Otherwise, it should fail. + if (pVAS != NULL) + { + // Since ct_assert has to be done within compile time, it has to be at the top of the scope. Otherwise, the build fails. + ct_assert(SF_WIDTH(NV_RAMIN_PASID) <= 32); + + // + // The PASID value is provided by the OS and out of client control + // however if the PASID value is invalid the ATS feature will not function + // as expected so check sanity and fail early + // + NV_ASSERT_OR_RETURN(NV_OK == vaspaceGetPasid(pVAS, &pasid), + NV_ERR_INVALID_DATA); + if (pasid > MASK_BITS(SF_WIDTH(NV_RAMIN_PASID))) + { + NV_PRINTF(LEVEL_ERROR, + "Invalid PASID %d (max width %d bits)\n", pasid, + SF_WIDTH(NV_RAMIN_PASID)); + return NV_ERR_OPERATING_SYSTEM; + } + + *pOffset = SF_OFFSET(NV_RAMIN_ENABLE_ATS); + *pData = SF_NUM(_RAMIN, _ENABLE_ATS, vaspaceIsAtsEnabled(pVAS)) | + SF_NUM(_RAMIN, _PASID, pasid); + } + else + { + // We cannot set base PDB without pVAS! + NV_ASSERT_OR_RETURN(pVAS != NULL, NV_ERR_INVALID_STATE); + } + } + else + { + // In subcontext supported PDB, we set valid values with non-NULL VAS. Otherwise, PDB entry is invalid. + if (pVAS != NULL) + { + ct_assert(SF_WIDTH(NV_RAMIN_SC_PASID(0)) <= 32); + + // + // set ATS for legacy PDB if SubctxId is set to be FIFO_PDB_IDX_BASE + // Otherwise, set PDB with given SubctxId. + // + NV_ASSERT_OR_RETURN(NV_OK == vaspaceGetPasid(pVAS, &pasid), + NV_ERR_INVALID_DATA); + + if (pasid > MASK_BITS(SF_WIDTH(NV_RAMIN_SC_PASID(subctxId)))) + { + NV_PRINTF(LEVEL_ERROR, + "Invalid PASID %d (max width %d bits)\n", pasid, + SF_WIDTH(NV_RAMIN_SC_PASID(subctxId))); + return NV_ERR_OPERATING_SYSTEM; + } + + *pData = SF_IDX_NUM(_RAMIN_SC, _ENABLE_ATS, vaspaceIsAtsEnabled(pVAS), subctxId) | + SF_IDX_NUM(_RAMIN_SC, _PASID, pasid, subctxId); + } + else + { + // + // If pVAS is NULL, that means the PDB of this SubctxId is set to Invalid. + // In this case, ATS should be Disabled. + // + *pData = NV_RAMIN_ENABLE_ATS_FALSE; + } + + *pOffset = SF_OFFSET(NV_RAMIN_SC_ENABLE_ATS(subctxId)); + } + + return NV_OK; +} + +/*! + * @brief This gets the offset and data for vaLimit + * + * @param[in] pKernelGmmu + * @param[in] pVAS OBJVASPACE pointer + * @param[in] subctxId subctxId value + * @param[in] pParams Pointer to the structure containing parameters passed by the engine + * @param[out] pOffset Pointer to offset of NV_RAMIN_ADR_LIMIT_LO:NV_RAMIN_ADR_LIMIT_HI pair + * @param[out] pData Pointer to value to write + * + * @returns NV_STATUS + */ +NV_STATUS +kgmmuInstBlkVaLimitGet_GV100 +( + KernelGmmu *pKernelGmmu, + OBJVASPACE *pVAS, + NvU32 subctxId, + INST_BLK_INIT_PARAMS *pParams, + NvU32 *pOffset, + NvU64 *pData +) +{ + extern NV_STATUS kgmmuInstBlkVaLimitGet_GP100(KernelGmmu *pKernelGmmu, OBJVASPACE *pVAS, NvU32 subctxId, INST_BLK_INIT_PARAMS *pParams, NvU32 *pOffset, NvU64 *pData); + + if (subctxId == FIFO_PDB_IDX_BASE) + { + return kgmmuInstBlkVaLimitGet_GP100(pKernelGmmu, pVAS, subctxId, pParams, + pOffset, pData); + } + + *pOffset = 0; + *pData = 0; + + return NV_OK; +} + +/*! + * @brief This gets the offsets and data for the PDB limit + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * @param[in] pVAS OBJVASPACE pointer + * @param[in] pParams Pointer to the structure containing parameters passed by the engine + * @param[in] subctxId subctxId value + * @param[out] pOffsetLo Pointer to low offset + * @param[out] pDataLo Pointer to data written at above offset + * @param[out] pOffsetHi Pointer to high offset + * @param[out] pDataHi Pointer to data written at above offset + * + * @returns + */ +NV_STATUS +kgmmuInstBlkPageDirBaseGet_GV100 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + OBJVASPACE *pVAS, + INST_BLK_INIT_PARAMS *pParams, + NvU32 subctxId, + NvU32 *pOffsetLo, + NvU32 *pDataLo, + NvU32 *pOffsetHi, + NvU32 *pDataHi +) +{ + extern NV_STATUS kgmmuInstBlkPageDirBaseGet_GP100(OBJGPU *pGpu, KernelGmmu *pKernelGmmu, OBJVASPACE *pVAS, INST_BLK_INIT_PARAMS *pParams, NvU32 subctxid, NvU32 *pOffsetLo, NvU32 *pDataLo, NvU32 *pOffsetHi, NvU32 *pDataHi); + + if (subctxId == FIFO_PDB_IDX_BASE) + { + return kgmmuInstBlkPageDirBaseGet_GP100(pGpu, pKernelGmmu, pVAS, + pParams, subctxId, pOffsetLo, pDataLo, pOffsetHi, pDataHi); + } + else + { + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + MEMORY_DESCRIPTOR *pPDB = NULL; + + if (pParams->bIsZombieSubctx) + { + pPDB = kfifoGetDummyPageMemDesc(pKernelFifo); + + NV_ASSERT_OR_RETURN((pPDB != NULL), NV_ERR_INVALID_STATE); + } + else if (pVAS != NULL) + { + pPDB = (pParams->bIsClientAdmin) ? + vaspaceGetKernelPageDirBase(pVAS, pGpu) : + vaspaceGetPageDirBase(pVAS, pGpu); + } + + if (pPDB == NULL) + { + // + // The teardown model for subcontext with UVM + CUDA is as follows: + // + // Step 1: Unregister(vas) --> UnsetPageDirectory(vas) + // Step 2: FreeSubcontext(vas) + // + // But new subcontext can be added between step 1 & step 2. + // Currently RM doesn't support the notion of a subcontext with NULL PDB. + // This results in RM failing subsequent subcontext allocation, causing the UNBOUND instance block failure in bug 1823795. + // To fix this, we will allow a subcontext to exist with invalid PDB until it is freed later. + // This shouldn't cause any functional issue as no access memory shouldn't happen from this subcontext. + + *pDataLo = NV_RAMIN_SC_PAGE_DIR_BASE_TARGET_INVALID; + *pDataHi = NV_RAMIN_SC_PAGE_DIR_BASE_TARGET_INVALID; + } + else + { + RmPhysAddr physAdd = memdescGetPhysAddr(pPDB, AT_GPU, 0); + NvU32 aperture = kgmmuGetHwPteApertureFromMemdesc(pKernelGmmu, pPDB); + NvU32 addrLo = NvU64_LO32(physAdd >> NV_RAMIN_BASE_SHIFT); + + // + // Volta only supports new page table format and 64KB big page size so + // forcing _USE_VER2_PT_FORMAT to _TRUE and _BIG_PAGE_SIZE to 64KB. + // + *pDataLo = + SF_IDX_NUM(_RAMIN_SC_PAGE_DIR_BASE, _TARGET, aperture,subctxId) | + ((pParams->bIsFaultReplayable)? + SF_IDX_DEF(_RAMIN_SC_PAGE_DIR_BASE, _FAULT_REPLAY_TEX, _ENABLED, subctxId) | + SF_IDX_DEF(_RAMIN_SC_PAGE_DIR_BASE, _FAULT_REPLAY_GCC, _ENABLED, subctxId) : + SF_IDX_DEF(_RAMIN_SC_PAGE_DIR_BASE, _FAULT_REPLAY_TEX, _DISABLED, subctxId) | + SF_IDX_DEF(_RAMIN_SC_PAGE_DIR_BASE, _FAULT_REPLAY_GCC, _DISABLED, subctxId)) | + SF_IDX_DEF(_RAMIN_SC, _USE_VER2_PT_FORMAT, _TRUE, subctxId) | + SF_IDX_DEF(_RAMIN_SC, _BIG_PAGE_SIZE, _64KB, subctxId) | + SF_IDX_NUM(_RAMIN_SC_PAGE_DIR_BASE, _VOL, memdescGetVolatility(pPDB), subctxId) | + SF_IDX_NUM(_RAMIN_SC_PAGE_DIR_BASE, _LO, addrLo, subctxId); + + *pDataHi = SF_IDX_NUM(_RAMIN_SC_PAGE_DIR_BASE, _HI, NvU64_HI32(physAdd), subctxId); + } + + *pOffsetLo = SF_OFFSET(NV_RAMIN_SC_PAGE_DIR_BASE_LO(subctxId)); + *pOffsetHi = SF_OFFSET(NV_RAMIN_SC_PAGE_DIR_BASE_HI(subctxId)); + } + + return NV_OK; +} + +/** + * @brief Report MMU Fault buffer overflow errors. MMU Fault + * buffer overflow is a fatal error. Raise an assert and + * any client notifications if registered, to ensure + * overflow is debugged properly. + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * + * @returns + */ +NV_STATUS +kgmmuReportFaultBufferOverflow_GV100 +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NV_STATUS rmStatus = NV_OK; + NvU32 faultStatus = kgmmuReadMmuFaultStatus_HAL(pGpu, pKernelGmmu, GPU_GFID_PF); + NvU32 faultBufferGet; + NvU32 faultBufferPut; + PEVENTNOTIFICATION *ppEventNotification = NULL; + NvU32 faultBufferSize; + + kgmmuReadFaultBufferGetPtr_HAL(pGpu, pKernelGmmu, NON_REPLAYABLE_FAULT_BUFFER, + &faultBufferGet, NULL); + faultBufferGet = DRF_VAL(_PFB_PRI, _MMU_FAULT_BUFFER_GET, _PTR, faultBufferGet); + + kgmmuReadFaultBufferPutPtr_HAL(pGpu, pKernelGmmu, NON_REPLAYABLE_FAULT_BUFFER, + &faultBufferPut); + faultBufferPut = DRF_VAL(_PFB_PRI, _MMU_FAULT_BUFFER_PUT, _PTR, faultBufferPut); + + faultBufferSize = kgmmuReadMmuFaultBufferSize_HAL(pGpu, pKernelGmmu, NON_REPLAYABLE_FAULT_BUFFER, GPU_GFID_PF); + + if (kgmmuIsNonReplayableFaultPending_HAL(pGpu, pKernelGmmu)) + { + if (IsVOLTA(pGpu)) + { + // + // Check if Non_replayable interrupt is set when overflow is seen. + // This shouldn't happen as this can cause a live-lock considering + // top-half will kept on coming and will not let overflow interrupt + // serviced. HW should disable the FAULT_INTR when overflow is + // detected. + // + NV_PRINTF(LEVEL_ERROR, "MMU Fault: GPU %d: HW-BUG : " + "NON_REPLAYABLE_INTR is high when OVERFLOW is detected\n", + pGpu->gpuInstance); + NV_ASSERT(0); + } + else + { + // + // With message-based MMU interrupts (Turing onwards), it is + // possible for us to get here - a real fault can happen while an + // overflow happens, and there is no ordering guarantee about the + // order of these interrupts in HW. However, if we write GET pointer + // with GET != PUT while overflow is detected, the fault interrupt + // will not be sent. Instead, the overflow interrupt will be sent, + // so this will not cause an interrupt storm with message-based + // interrupts. If HW does have a bug though, we'll see the below + // print repeatedly which can point to a HW bug where it isn't + // behaving the way it is designed to do. + // + NV_PRINTF(LEVEL_INFO, "MMU Fault: GPU %d: NON_REPLAYABLE_INTR " + "is high when OVERFLOW is detected\n", pGpu->gpuInstance); + } + } + + // Check if overflow is due to incorrect fault buffer size or GET > SIZE + if (FLD_TEST_DRF(_PFB_PRI, _MMU_FAULT_STATUS, _NON_REPLAYABLE_GETPTR_CORRUPTED, _SET, faultStatus) || + FLD_TEST_DRF(_PFB_PRI, _MMU_FAULT_STATUS, _REPLAYABLE_GETPTR_CORRUPTED, _SET, faultStatus)) + { + NV_PRINTF(LEVEL_ERROR, + "MMU Fault: GPU %d: Buffer overflow detected due to GET > SIZE\n", + pGpu->gpuInstance); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "MMU Fault: GPU %d: Buffer overflow detected due to incorrect SIZE\n", + pGpu->gpuInstance); + + NV_PRINTF(LEVEL_ERROR, + "MMU Fault: GPU %d: Buffer SIZE is expected to handle max faults " + "possible in system\n", pGpu->gpuInstance); + } + + NV_PRINTF(LEVEL_ERROR, + "MMU Fault: GPU %d: STATUS - 0x%x GET - 0x%x, PUT - 0x%x SIZE - 0x%x\n", + pGpu->gpuInstance, faultStatus, faultBufferGet, faultBufferPut, + faultBufferSize); + + // Raise an event for Mods if registered as Mods checks for overflow + if ((NV_OK == CliGetEventNotificationList(pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hFaultBufferClient, + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hFaultBufferObject, NULL, &ppEventNotification)) && ppEventNotification) + { + MODS_ARCH_ERROR_PRINTF("MMU Fault Buffer overflow detected\n"); + rmStatus = notifyEvents(pGpu, *ppEventNotification, NVC369_NOTIFIER_MMU_FAULT_ERROR, + 0, 0, NV_OK, NV_OS_WRITE_THEN_AWAKEN); + if (rmStatus != NV_OK) + return rmStatus; + } + + krcBreakpoint(GPU_GET_KERNEL_RC(pGpu)); + + faultStatus = kgmmuReadMmuFaultStatus_HAL(pGpu, pKernelGmmu, GPU_GFID_PF); + faultStatus = FLD_SET_DRF(_PFB_PRI, _MMU_FAULT_STATUS, _NON_REPLAYABLE_OVERFLOW, _RESET, + faultStatus); + kgmmuWriteMmuFaultStatus_HAL(pGpu, pKernelGmmu, faultStatus); + return rmStatus; +} + +/*! + * @brief Get the engine ID associated with the Graphics Engine + */ +NvU32 +kgmmuGetGraphicsEngineId_GV100 +( + KernelGmmu *pKernelGmmu +) +{ + return NV_PFAULT_MMU_ENG_ID_GRAPHICS; +} + +/*! + * @brief Reinit GMMU Peer PTE format to handle 47-bit peer addressing. + * This is controlled by NVSWITCH discovery and will not be enabled + * outside of specialized compute configurations. + * + * @param[in] pGmmu The valid gmmu + */ +NV_STATUS +kgmmuEnableNvlinkComputePeerAddressing_GV100(KernelGmmu *pKernelGmmu) +{ + NvU32 v; + + // + // Recalculate the format structures + // + for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v) + { + if (!kgmmuFmtIsVersionSupported_HAL(pKernelGmmu, g_gmmuFmtVersions[v])) + continue; + + kgmmuFmtInitPeerPteFld_HAL(pKernelGmmu, &pKernelGmmu->pFmtFamilies[v]->pte, + g_gmmuFmtVersions[v]); + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mmu/bar2_walk.c b/src/nvidia/src/kernel/gpu/mmu/bar2_walk.c new file mode 100644 index 000000000..1bcd37035 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/bar2_walk.c @@ -0,0 +1,816 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/nvlink/kernel_nvlink.h" +#include "gpu/bus/kern_bus.h" +#include "mem_mgr/gpu_vaspace.h" +#include "mmu/mmu_walk.h" +#include "vgpu/vgpu_events.h" + +/*! + * @file + * @brief struct MMU_WALK_CALLBACKS g_bar2WalkCallbacks and the callback + * function implementations. + */ + +/*! + * Implementation of @ref MmuWalkCBUpdatePde for BAR2 + */ +static NvBool +_bar2WalkCBUpdatePdb +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pRootFmt, + const MMU_WALK_MEMDESC *pRootMem, + const NvBool bIgnoreChannelBusy +) +{ + OBJGPU *pGpu = pUserCtx->pGpu; + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NV_STATUS status = NV_OK; + NvU32 gfid; + NvBool bUseTempMemDesc; + + NV_ASSERT_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK, NV_FALSE); + + bUseTempMemDesc = pKernelBus->bar2[gfid].bBootstrap && + kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus) && + (NULL != pKernelBus->bar2[gfid].pPDEMemDescForBootstrap); + + if (NULL == pRootMem) + { + // + // Ignoring uncommits for now since kbusInitInstBlk_HAL can't handle + // NULL memdesc and it doesn't matter functionally. + // + goto done; + } + + switch (pKernelBus->InstBlkAperture) + { + // BAR2 will use the default big page size chosen by the system. + default: + case ADDR_FBMEM: + if (pKernelBus->bar2[gfid].bBootstrap) + { + status = kbusInitInstBlk_HAL(pGpu, pKernelBus, + NULL /* use BAR0 window */, + (bUseTempMemDesc ? + pKernelBus->bar2[gfid].pPDEMemDescForBootstrap : + pKernelBus->bar2[gfid].pPDEMemDesc), + pKernelBus->bar2[gfid].vaLimit, + kgmmuGetBigPageSize_HAL(pKernelGmmu), NULL); + } + else + { + status = kbusInitInstBlk_HAL(pGpu, pKernelBus, + pKernelBus->bar2[gfid].pInstBlkMemDesc, + pKernelBus->bar2[gfid].pPDEMemDesc, + pKernelBus->bar2[gfid].vaLimit, + kgmmuGetBigPageSize_HAL(pKernelGmmu), NULL); + } + NV_ASSERT_OR_GOTO(NV_OK == status, done); + break; + case ADDR_SYSMEM: + status = kbusInitInstBlk_HAL(pGpu, pKernelBus, + pKernelBus->bar2[gfid].pInstBlkMemDesc, + (bUseTempMemDesc ? + pKernelBus->bar2[gfid].pPDEMemDescForBootstrap : + pKernelBus->bar2[gfid].pPDEMemDesc), + pKernelBus->bar2[gfid].vaLimit, + kgmmuGetBigPageSize_HAL(pKernelGmmu), NULL); + NV_ASSERT_OR_GOTO(NV_OK == status, done); + break; + } + +done: + return NV_OK == status; +} + +/*! + * Implementation of @ref MmuWalkCBUpdatePde for BAR2 + */ +static void +_bar2WalkCBFillEntries +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + const MMU_WALK_FILL_STATE fillState, + NvU32 *pProgress +) +{ + OBJGPU *pGpu = pUserCtx->pGpu; + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvU32 gfid = pUserCtx->gfid; + const GMMU_FMT *pFmt = NULL; + const GMMU_FMT_FAMILY *pFam = NULL; + MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR*)pLevelMem; + NvU8 *pMap = NULL; + void *pPriv = NULL; + NV_STATUS status = NV_OK; + GMMU_ENTRY_VALUE entryValue; + ADDRESS_TRANSLATION addressTranslation = AT_GPU; + NvU32 sizeInDWord = (NvU32)NV_CEIL(pLevelFmt->entrySize, sizeof(NvU32)); + NvU32 entryIndex; + NvU32 entryOffset; + NvU64 entryStart; + NvU32 i; + + pFmt = pKernelBus->bar2[gfid].pFmt; + pFam = kgmmuFmtGetFamily(pKernelGmmu, pFmt->version); + + // Determine what entry value to write. + switch (fillState) + { + case MMU_WALK_FILL_INVALID: + portMemSet(&entryValue, 0, sizeof(entryValue)); + break; + case MMU_WALK_FILL_SPARSE: + if (pLevelFmt->numSubLevels > 0) + { + // Select sparse entry template based on number of sub-levels. + if (pLevelFmt->numSubLevels > 1) + { + entryValue = pFam->sparsePdeMulti; + } + else + { + NV_ASSERT(pLevelFmt->numSubLevels == 1); + entryValue = pFam->sparsePde; + } + } + else + { + entryValue = pFam->sparsePte; + } + break; + // case MMU_WALK_FILL_NV4K not supported on bar2 gmmu + default: + NV_ASSERT(0); + } + + // Determine how to write the entry value. + if (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) + { + if (pKernelBus->bar2[gfid].bBootstrap) + { + if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus)) + { + pMap = kbusCpuOffsetInBar2WindowGet(pGpu, pKernelBus, pMemDesc); + + for (entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++ ) + { + entryOffset = entryIndex * pLevelFmt->entrySize; + + // Memory write via BAR2's CPU mapping. + portMemCopy(pMap + entryOffset, + pLevelFmt->entrySize, + entryValue.v8, + pLevelFmt->entrySize); + } + } + else + { + // + // No CPU mapping to the BAR2 VAS page levels is available yet. + // Must use the BAR0 window to directly write to the physical + // addresses where the BAR2 VAS page levels are located in FB. + // + NV_ASSERT_OR_RETURN_VOID(pKernelBus->virtualBar2[gfid].pPageLevels == NULL); + + for ( entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++ ) + { + entryOffset = entryIndex * pLevelFmt->entrySize; + entryStart = memdescGetPhysAddr(pMemDesc, FORCE_VMMU_TRANSLATION(pMemDesc, addressTranslation), entryOffset); + for (i = 0; i < sizeInDWord; i++) + { + // BAR0 write. + status = kbusMemAccessBar0Window_HAL(pGpu, pKernelBus, + (entryStart + (sizeof(NvU32) * i)), + &entryValue.v32[i], + sizeof(NvU32), + NV_FALSE, + ADDR_FBMEM); + NV_ASSERT_OR_RETURN_VOID(NV_OK == status); + } + } + } + } + else + { + // + // Determine the start of the desired page level offsetted from + // the CPU mapping to the start of the BAR2 VAS page levels. + // + + NV_ASSERT_OR_RETURN_VOID(pKernelBus->virtualBar2[gfid].pPageLevels != NULL); + + pMap = memdescGetPhysAddr(pMemDesc, addressTranslation, 0) - + pKernelBus->bar2[gfid].pdeBase + + pKernelBus->virtualBar2[gfid].pPageLevels; + + for ( entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++ ) + { + entryOffset = entryIndex * pLevelFmt->entrySize; + + // Memory write via BAR2. + portMemCopy(pMap + entryOffset, + pLevelFmt->entrySize, + entryValue.v8, + pLevelFmt->entrySize); + } + } + } + else + { + NV_ASSERT(memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM); + + // Plain old memmap. + status = memdescMapOld(pMemDesc, 0, + pMemDesc->Size, + NV_TRUE, // kernel, + NV_PROTECT_READ_WRITE, + (void **)&pMap, + &pPriv); + NV_ASSERT_OR_RETURN_VOID(NV_OK == status); + + for ( entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++ ) + { + entryOffset = entryIndex * pLevelFmt->entrySize; + + // Memory-mapped write. + portMemCopy(pMap + entryOffset, + pLevelFmt->entrySize, + entryValue.v8, + pLevelFmt->entrySize); + } + + memdescUnmapOld(pMemDesc, 1, 0, pMap, pPriv); + } + + *pProgress = entryIndexHi - entryIndexLo + 1; +} + +/*! + * Implementation of @ref MmuWalkCBUpdatePde for BAR2 + */ +static NvBool +_bar2WalkCBUpdatePde +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndex, + const MMU_WALK_MEMDESC **pSubLevels +) +{ + OBJGPU *pGpu = pUserCtx->pGpu; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 gfid; + const GMMU_FMT *pFmt; + MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR*)pLevelMem; + NvU8 *pMap = NULL; + void *pPriv = NULL; + NV_STATUS status = NV_OK; + GMMU_ENTRY_VALUE entry; + NvU32 i; + NvU32 sizeInDWord; + NvU32 entryOffset; + NvU64 entryStart; + + NV_ASSERT_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK, NV_FALSE); + + pFmt = pKernelBus->bar2[gfid].pFmt; + + // Clear out the temp copy of the PDE + portMemSet(entry.v8, 0, pLevelFmt->entrySize); + + for (i = 0; i < pLevelFmt->numSubLevels; ++i) + { + const GMMU_FMT_PDE *pPde = gmmuFmtGetPde(pFmt, pLevelFmt, i); + MEMORY_DESCRIPTOR *pSubMemDesc = (MEMORY_DESCRIPTOR*)pSubLevels[i]; + + if (NULL != pSubMemDesc) + { + const GMMU_APERTURE aperture = kgmmuGetMemAperture(pKernelGmmu, pSubMemDesc); + const GMMU_FIELD_ADDRESS *pFldAddr = gmmuFmtPdePhysAddrFld(pPde, aperture); + const NvU64 physAddr = memdescGetPhysAddr(pSubMemDesc, AT_GPU, 0); + + // Set fields within the temp PDE + { + nvFieldSetBool(&pPde->fldVolatile, memdescGetVolatility(pSubMemDesc), entry.v8); + } + gmmuFieldSetAperture(&pPde->fldAperture, aperture, entry.v8); + gmmuFieldSetAddress(pFldAddr, + kgmmuEncodePhysAddr(pKernelGmmu, aperture, physAddr, + NVLINK_INVALID_FABRIC_ADDR), + entry.v8); + + NV_PRINTF(LEVEL_INFO, " SubLevel %u = PA 0x%llX\n", i, + physAddr); + } + else + { + NV_PRINTF(LEVEL_INFO, " SubLevel %u = INVALID\n", i); + } + } + + entryOffset = entryIndex * pLevelFmt->entrySize; + + // If we are setting up BAR2, we need special handling. + if (pKernelBus->bar2[gfid].bBootstrap) + { + entryStart = memdescGetPhysAddr(pMemDesc, FORCE_VMMU_TRANSLATION(pMemDesc, AT_GPU), entryOffset); + sizeInDWord = (NvU32)NV_CEIL(pLevelFmt->entrySize, sizeof(NvU32)); + + for (i = 0; i < sizeInDWord; i++) + { + if (ADDR_FBMEM == pKernelBus->PDEBAR2Aperture) + { + if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus)) + { + pMap = kbusCpuOffsetInBar2WindowGet(pGpu, pKernelBus, pMemDesc); + portMemCopy(pMap + entryOffset, pLevelFmt->entrySize, entry.v8, pLevelFmt->entrySize); + } + else + { + status = kbusMemAccessBar0Window_HAL(pGpu, pKernelBus, + (entryStart + (sizeof(NvU32) * i)), + &entry.v32[i], + sizeof(NvU32), + NV_FALSE, + ADDR_FBMEM); + NV_ASSERT_OR_RETURN(NV_OK == status, NV_FALSE); + } + } + else + { + // Plain old memmap. + status = memdescMapOld(pMemDesc, 0, + pMemDesc->Size, + NV_TRUE, // kernel, + NV_PROTECT_READ_WRITE, + (void **)&pMap, + &pPriv); + NV_ASSERT_OR_RETURN(NV_OK == status, NV_FALSE); + portMemCopy(pMap + entryOffset, pLevelFmt->entrySize, entry.v8, pLevelFmt->entrySize); + memdescUnmapOld(pMemDesc, 1, 0, pMap, pPriv); + } + } + } + else if (pKernelBus->bar2[gfid].bMigrating || IS_GFID_VF(gfid)) + { + if (ADDR_FBMEM == pKernelBus->PDEBAR2Aperture) + { + NV_ASSERT(NULL != pKernelBus->virtualBar2[gfid].pPageLevels); + + pMap = memdescGetPhysAddr(pMemDesc, AT_GPU, 0) - + pKernelBus->bar2[gfid].pdeBase + + pKernelBus->virtualBar2[gfid].pPageLevels; + NV_ASSERT(NULL != pMap); + portMemCopy(pMap + entryOffset, pLevelFmt->entrySize, entry.v8, pLevelFmt->entrySize); + } + else + { + NV_ASSERT(0); // Page level instances in sysmem don't need migration. + } + } + else + { + NV_ASSERT(0); // Not yet supported. + } + + return NV_TRUE; +} + +/*! + * Implementation of @ref MmuWalkCBLevelFree for BAR2 + */ +static void +_bar2WalkCBLevelFree +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaBase, + MMU_WALK_MEMDESC *pOldMem +) +{ + MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR*)pOldMem; + + NV_PRINTF(LEVEL_INFO, "PA 0x%llX for VA 0x%llX-0x%llX\n", + memdescGetPhysAddr(pMemDesc, AT_GPU, 0), + mmuFmtLevelVirtAddrLo(pLevelFmt, vaBase), + mmuFmtLevelVirtAddrHi(pLevelFmt, vaBase)); + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); +} + +/*! + * Implementation of @ref MmuWalkCBLevelAlloc for BAR2 + */ +static NV_STATUS +_bar2WalkCBLevelAlloc +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaBase, + const NvU64 vaLimit, + const NvBool bTarget, + MMU_WALK_MEMDESC **ppMemDesc, + NvU32 *pMemSize, + NvBool *pBChanged +) +{ + OBJGPU *pGpu = pUserCtx->pGpu; + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 gfid; + NvU64 pdeBase = 0; + NvU64 pteBase = 0; + NvU32 allocSize; + NvU32 memOffset; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + NvBool bPreFillCache = gpuIsCacheOnlyModeEnabled(pGpu) && + !pMemorySystemConfig->bL2PreFill; + NV_STATUS status = NV_OK; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + // Abort early if level is not targeted or already allocated. + if (!bTarget || (NULL != *ppMemDesc)) + { + return NV_OK; + } + + // Specify which Page Level we are initializing. + if (pKernelBus->bar2[gfid].bBootstrap || pKernelBus->bar2[gfid].bMigrating || IS_GFID_VF(gfid)) + { + if (pLevelFmt == pKernelBus->bar2[gfid].pFmt->pRoot) + { + pKernelBus->bar2[gfid].pageDirInit = 0; + pKernelBus->bar2[gfid].pageTblInit = 0; + } + + NV_ASSERT_OR_RETURN(pKernelBus->bar2[gfid].pageDirInit + pKernelBus->bar2[gfid].pageTblInit < + pKernelBus->bar2[gfid].numPageDirs + pKernelBus->bar2[gfid].numPageTbls, + NV_ERR_INVALID_STATE); + } + + if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus) && + (ADDR_FBMEM == pKernelBus->PDEBAR2Aperture)) + { + if (pKernelBus->bar2[gfid].bBootstrap) + { + pdeBase = pKernelBus->bar2[gfid].pdeBaseForBootstrap; + pteBase = pKernelBus->bar2[gfid].pteBaseForBootstrap; + } + else if (pKernelBus->bar2[gfid].bMigrating) + { + pdeBase = pKernelBus->bar2[gfid].pdeBase; + pteBase = pKernelBus->bar2[gfid].pteBase; + } + else + { + status = NV_ERR_INVALID_OPERATION; + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + } + } + else + { + NV_ASSERT(pKernelBus->bar2[gfid].bBootstrap || IS_GFID_VF(gfid)); + pdeBase = pKernelBus->bar2[gfid].pdeBase; + pteBase = pKernelBus->bar2[gfid].pteBase; + } + + // Process Page Dirs + if (0 != pLevelFmt->numSubLevels) + { + allocSize = pKernelBus->bar2[gfid].pageDirSize; + status = memdescCreate(&pMemDesc, pGpu, + allocSize, + RM_PAGE_SIZE, + NV_TRUE, + pKernelBus->PDEBAR2Aperture, + pKernelBus->PDEBAR2Attr, + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + + switch (pKernelBus->PDEBAR2Aperture) + { + default: + case ADDR_FBMEM: + // + // Reserved FB memory for BAR2 Page Levels is contiiguous, hence + // we simply offset from page dir base. + // + memOffset = pKernelBus->bar2[gfid].pageDirInit * pKernelBus->bar2[gfid].pageDirSize; + memdescDescribe(pMemDesc, + pKernelBus->PDEBAR2Aperture, + pdeBase + memOffset, + allocSize); + + // + // Pre-fill cache to prevent FB read accesses if in cache only + // mode and not doing one time pre-fill. + // + if (bPreFillCache) + { + kmemsysPreFillCacheOnlyMemory_HAL(pGpu, pKernelMemorySystem, + pdeBase + memOffset, + allocSize); + } + break; + + case ADDR_SYSMEM: + status = memdescAlloc(pMemDesc); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + break; + } + + if (pLevelFmt == pKernelBus->bar2[gfid].pFmt->pRoot) + { + if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus) && + (ADDR_FBMEM == pKernelBus->PDEBAR2Aperture)) + { + if (pKernelBus->bar2[gfid].bBootstrap) + { + // Cache the temporary root Page Dir setup at bottom of FB. + pKernelBus->bar2[gfid].pdeBaseForBootstrap = memdescGetPhysAddr(pMemDesc, + AT_GPU, 0); + pKernelBus->bar2[gfid].pPDEMemDescForBootstrap = pMemDesc; + } + else if (pKernelBus->bar2[gfid].bMigrating) + { + // + // Cache the root Page Dir setup at top of FB. + // + pKernelBus->bar2[gfid].pdeBase = memdescGetPhysAddr(pMemDesc, + AT_GPU, 0); + pKernelBus->bar2[gfid].pPDEMemDesc = pMemDesc; + } + else + { + status = NV_ERR_INVALID_OPERATION; + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + } + } + else + { + NV_ASSERT(pKernelBus->bar2[gfid].bBootstrap || IS_GFID_VF(gfid)); + pKernelBus->bar2[gfid].pdeBase = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + pKernelBus->bar2[gfid].pPDEMemDesc = pMemDesc; + } + } + if (pKernelBus->bar2[gfid].bBootstrap || pKernelBus->bar2[gfid].bMigrating || IS_GFID_VF(gfid)) + { + pKernelBus->bar2[gfid].pageDirInit++; + } + } + else // Alloc Page Table + { + allocSize = pKernelBus->bar2[gfid].pageTblSize; + status = memdescCreate(&pMemDesc, pGpu, + allocSize, + RM_PAGE_SIZE, + NV_TRUE, + pKernelBus->PTEBAR2Aperture, + pKernelBus->PTEBAR2Attr, + MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + + switch (pKernelBus->PTEBAR2Aperture) + { + default: + case ADDR_FBMEM: + // + // Reserved FB memory for BAR2 Page Levels is contiiguous, hence + // we simply offset from the page table base. + // pageTblInit gives us the page table number we are + // initializing. + // + memOffset = pKernelBus->bar2[gfid].pageTblInit * allocSize; + memdescDescribe(pMemDesc, + pKernelBus->PTEBAR2Aperture, + pteBase + memOffset, + allocSize); + + // + // Pre-fill cache to prevent FB read accesses if in cache only mode + // and not doing one time pre-fill + // + if (bPreFillCache) + { + kmemsysPreFillCacheOnlyMemory_HAL(pGpu, pKernelMemorySystem, + pteBase + memOffset, + allocSize); + } + break; + + case ADDR_SYSMEM: + status = memdescAlloc(pMemDesc); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + break; + } + + if (pKernelBus->bar2[gfid].pageTblInit == 0) + { + if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus) && + (ADDR_FBMEM == pKernelBus->PTEBAR2Aperture)) + { + // Cache the first Page Table memdesc + if (pKernelBus->bar2[gfid].bBootstrap) + { + pKernelBus->bar2[gfid].pteBaseForBootstrap = memdescGetPhysAddr(pMemDesc, + AT_GPU, 0); + } + else if (pKernelBus->bar2[gfid].bMigrating) + { + pKernelBus->bar2[gfid].pteBase = memdescGetPhysAddr(pMemDesc, + AT_GPU, 0); + } + else + { + status = NV_ERR_INVALID_OPERATION; + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + } + } + else + { + NV_ASSERT(pKernelBus->bar2[gfid].bBootstrap || IS_GFID_VF(gfid)); + pKernelBus->bar2[gfid].pteBase = memdescGetPhysAddr(pMemDesc, + AT_GPU, 0); + } + pKernelBus->virtualBar2[gfid].pPTEMemDesc = pMemDesc; + } + if (pKernelBus->bar2[gfid].bBootstrap || pKernelBus->bar2[gfid].bMigrating || IS_GFID_VF(gfid)) + { + pKernelBus->bar2[gfid].pageTblInit++; + } + } + + // Return the allocated memdesc + *ppMemDesc = (MMU_WALK_MEMDESC*)pMemDesc; + *pMemSize = allocSize; + *pBChanged = NV_TRUE; + +cleanup: + if (NV_OK != status) + { + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + } + return status; +} + +/*! + * Implementation of @ref MmuWalkCBWriteBuffer for BAR2 + */ +static void +_bar2WalkCBWriteBuffer +( + MMU_WALK_USER_CTX *pUserCtx, + MMU_WALK_MEMDESC *pStagingBuffer, + MMU_WALK_MEMDESC *pLevelBuffer, + NvU64 entryIndexLo, + NvU64 entryIndexHi, + NvU64 tableSize, + NvU64 entrySize +) +{ + OBJGPU *pGpu = pUserCtx->pGpu; + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 gfid; + MEMORY_DESCRIPTOR *pStagingBufferDesc = (MEMORY_DESCRIPTOR*) pStagingBuffer; + MEMORY_DESCRIPTOR *pOutputBufferDesc = (MEMORY_DESCRIPTOR*) pLevelBuffer; + NvBool bRestore = NV_FALSE; + NvU64 firstEntryOffset = entryIndexLo * entrySize; + NvU64 entryRangeSize = (entryIndexHi - entryIndexLo + 1llu) * (entrySize); + NvU64 oldBar0Mapping = 0; + NvU8 *pStagingBufferMapping; + NvU8 *pStagingDescMapping; + NvU8 *pOutputBufferMapping; + void *pPriv; + + NV_ASSERT_OR_RETURN_VOID(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK); + + // TODO: Stash this mapping somewhere permanent to avoid constant remapping + NV_ASSERT_OR_RETURN_VOID( + memdescMapOld(pStagingBufferDesc, + 0, + pStagingBufferDesc->Size, + NV_TRUE, // kernel, + NV_PROTECT_READ_WRITE, + (void **)&pStagingDescMapping, + &pPriv) + == NV_OK); + + pStagingBufferMapping = &pStagingDescMapping[firstEntryOffset % tableSize]; + + if (pKernelBus->bar2[gfid].bBootstrap) + { + if (kbusIsPhysicalBar2InitPagetableEnabled(pKernelBus)) + { + // BAR2 in physical mode, using top of FB + NvU8 *pOutputDescMapping = kbusCpuOffsetInBar2WindowGet(pGpu, pKernelBus, pOutputBufferDesc); + pOutputBufferMapping = &pOutputDescMapping[firstEntryOffset]; + } + else + { + // Get the physical address of the memdesc + NvU64 phys = memdescGetPhysAddr(pOutputBufferDesc, + FORCE_VMMU_TRANSLATION(pOutputBufferDesc, AT_GPU), + firstEntryOffset); + // Get BAR0 info + NvU8 *pWindowAddress = pKernelBus->pDefaultBar0Pointer; + NvU64 windowSize = pKernelBus->physicalBar0WindowSize; + + // + // Set PRAMIN window offset to the page needed, + // logic is copied from kbusMemAccessBar0Window_GM107 + // + NvU64 currentBar0Mapping = kbusGetBAR0WindowVidOffset_HAL(pGpu, pKernelBus); + + // + // First check if start of window is in range, + // then check if end of window is in range + // + if (phys < currentBar0Mapping || + phys + entryRangeSize >= currentBar0Mapping + windowSize) + { + kbusSetBAR0WindowVidOffset_HAL(pGpu, + pKernelBus, + (phys & ~(windowSize - 1llu))); + oldBar0Mapping = currentBar0Mapping; + currentBar0Mapping = (phys & ~(windowSize - 1llu)); + bRestore = NV_TRUE; + } + + pOutputBufferMapping = &pWindowAddress[phys - currentBar0Mapping]; + } + } + else + { + // BAR2 in virtual mode + pOutputBufferMapping = memdescGetPhysAddr(pOutputBufferDesc, + FORCE_VMMU_TRANSLATION(pOutputBufferDesc, AT_GPU), + firstEntryOffset) - + pKernelBus->bar2[gfid].pdeBase + + pKernelBus->virtualBar2[gfid].pPageLevels; + } + + portMemCopy(pOutputBufferMapping, + entryRangeSize, + pStagingBufferMapping, + entryRangeSize); + + memdescUnmapOld(pStagingBufferDesc, NV_TRUE, 0, pStagingDescMapping, pPriv); + + if (bRestore) + { + kbusSetBAR0WindowVidOffset_HAL(pGpu, pKernelBus, oldBar0Mapping); + } +} + +const MMU_WALK_CALLBACKS g_bar2WalkCallbacks = +{ + _bar2WalkCBLevelAlloc, + _bar2WalkCBLevelFree, + _bar2WalkCBUpdatePdb, + _bar2WalkCBUpdatePde, + _bar2WalkCBFillEntries, + NULL, + _bar2WalkCBWriteBuffer, +}; diff --git a/src/nvidia/src/kernel/gpu/mmu/fault_buffer_ctrl.c b/src/nvidia/src/kernel/gpu/mmu/fault_buffer_ctrl.c new file mode 100644 index 000000000..20b753ca2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/fault_buffer_ctrl.c @@ -0,0 +1,67 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" + +#include "gpu/mmu/kern_gmmu.h" +#include "rmapi/control.h" + +#include "ctrl/ctrlb069.h" +#include "gpu/mmu/mmu_fault_buffer.h" + +NV_STATUS +faultbufCtrlCmdFaultbufferGetSize_IMPL +( + MmuFaultBuffer *pMmuFaultBuffer, + NVB069_CTRL_FAULTBUFFER_GET_SIZE_PARAMS *pGetParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pMmuFaultBuffer); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + pGetParams->faultBufferSize = + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[REPLAYABLE_FAULT_BUFFER].faultBufferSize; + + return NV_OK; +} + +NV_STATUS +faultbufCtrlCmdFaultbufferGetRegisterMappings_IMPL +( + MmuFaultBuffer *pMmuFaultBuffer, + NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pMmuFaultBuffer); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + return kgmmuGetFaultRegisterMappings_HAL(pGpu, pKernelGmmu, REPLAYABLE_FAULT_BUFFER, + &pParams->pFaultBufferGet, + &pParams->pFaultBufferPut, + &pParams->pFaultBufferInfo, + &pParams->pPmcIntr, + &pParams->pPmcIntrEnSet, + &pParams->pPmcIntrEnClear, + &pParams->replayableFaultMask, + &pParams->pPrefetchCtrl); +} diff --git a/src/nvidia/src/kernel/gpu/mmu/gmmu_trace.c b/src/nvidia/src/kernel/gpu/mmu/gmmu_trace.c new file mode 100644 index 000000000..d282abfc6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/gmmu_trace.c @@ -0,0 +1,505 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mmu/gmmu_fmt.h" +#include "gpu/mmu/kern_gmmu.h" + +/*! + * @file + * @brief struct MMU_TRACE_CALLBACKS g_gmmuTraceCallbacks and the callback + * function implementations. + */ + +#define PRINT_FIELD_BOOL(fmt, fmtPte, field, pte) \ + do { \ + if (nvFieldIsValid32(&(fmtPte)->fld##field.desc)) \ + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, fmt, \ + nvFieldGetBool(&(fmtPte)->fld##field, (pte)->v8)); \ + } while (0) + +#define PRINT_FIELD_32(fmt, fmtPte, field, pte) \ + do { \ + if (nvFieldIsValid32(&(fmtPte)->fld##field)) \ + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, fmt, \ + nvFieldGet32(&(fmtPte)->fld##field, (pte)->v8)); \ + } while (0) + +#if NV_PRINTF_STRINGS_ALLOWED +static const char *_decodeAperture[] = { "invalid", "video", "peer", "sysnoncoh", "syscoh" }; +#else // NV_PRINTF_STRINGS_ALLOWED +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) +static const char _decodeAperture[] = "XVPNC"; +#endif // NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) +#endif // NV_PRINTF_STRINGS_ALLOWED + +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) +static const char _decodeSize[] = "1248"; +#endif // NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) + +static NvBool +_gmmuIsPte +( + const void *pFmt, + const MMU_FMT_LEVEL *pFmtLevel, + const MMU_ENTRY *pEntry, + NvBool *pValid +) +{ + const GMMU_FMT *pFmtGmmu = (GMMU_FMT*)pFmt; + const GMMU_ENTRY_VALUE *pGmmuEntry = (GMMU_ENTRY_VALUE*)pEntry; + + if (gmmuFmtEntryIsPte(pFmtGmmu, pFmtLevel, pGmmuEntry->v8)) + { + if (pValid) + { + const GMMU_FMT_PTE *pFmtPte = pFmtGmmu->pPte; + + *pValid = nvFieldGetBool(&pFmtPte->fldValid, pGmmuEntry->v8); + } + + return NV_TRUE; + } + + return NV_FALSE; +} + +static const void* +_gmmuGetFmtPde +( + const void *pFmt, + const MMU_FMT_LEVEL *pFmtLevel, + NvU32 sublevel +) +{ + return gmmuFmtGetPde((GMMU_FMT*)pFmt, pFmtLevel, sublevel); +} + +static const void* +_gmmuGetFmtPte +( + const void *pFmt +) +{ + const GMMU_FMT *pFmtGmmu = (GMMU_FMT*)pFmt; + + return pFmtGmmu->pPte; +} + +static NvU64 +_gmmuGetPdePa +( + OBJGPU *pGpu, + const void *pFmtPde, + const MMU_ENTRY *pPde +) +{ + const GMMU_FMT_PDE *pFmt = (GMMU_FMT_PDE*)pFmtPde; + const GMMU_ENTRY_VALUE *pGmmuEntry = (GMMU_ENTRY_VALUE*)pPde; + GMMU_APERTURE aperture = gmmuFieldGetAperture(&pFmt->fldAperture, + pGmmuEntry->v8); + + if (aperture != GMMU_APERTURE_INVALID) + { + const GMMU_FIELD_ADDRESS *pAddrFld = gmmuFmtPdePhysAddrFld(pFmt, aperture); + + if (pAddrFld) + { + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvU64 addr = gmmuFieldGetAddress(pAddrFld, pGmmuEntry->v8); + if ((aperture == GMMU_APERTURE_SYS_NONCOH) || (aperture == GMMU_APERTURE_SYS_COH)) + { + addr += pKernelGmmu->sysmemBaseAddress; + } + return addr; + } + } + + return MMU_INVALID_ADDR; +} + +static NvU64 +_gmmuGetPtePa +( + OBJGPU *pGpu, + const void *pFmtPte, + const MMU_ENTRY *pPte +) +{ + const GMMU_FMT_PTE *pFmt = (GMMU_FMT_PTE*)pFmtPte; + const GMMU_ENTRY_VALUE *pGmmuEntry = (GMMU_ENTRY_VALUE*)pPte; + GMMU_APERTURE aperture = gmmuFieldGetAperture(&pFmt->fldAperture, + pGmmuEntry->v8); + + if (aperture != GMMU_APERTURE_INVALID) + { + const GMMU_FIELD_ADDRESS *pAddrFld = gmmuFmtPtePhysAddrFld(pFmt, aperture); + if (pAddrFld) + { + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvU64 addr = gmmuFieldGetAddress(pAddrFld, pGmmuEntry->v8); + if ((aperture == GMMU_APERTURE_SYS_NONCOH) || (aperture == GMMU_APERTURE_SYS_COH)) + { + addr += pKernelGmmu->sysmemBaseAddress; + } + return addr; + } + } + + return MMU_INVALID_ADDR; +} + +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) +static void +_gmmuPrintPa +( + NvU64 pa, + GMMU_APERTURE aperture, + NvU32 peerIndex +) +{ + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "%s", _decodeAperture[aperture]); + if (GMMU_APERTURE_PEER == aperture) + { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "[%d]", peerIndex); + } + if ((GMMU_APERTURE_INVALID == aperture) && (0 == pa)) + { + // PA is just noise if invalid, but should print if non-zero (bug indicator). + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " "); + } + else + { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "[0x%08llx] ", pa); + } +} +#endif + +static void +_gmmuPrintPdb +( + OBJGPU *pGpu, + OBJVASPACE *pVAS, + NvU64 va, + NvU64 vaLimit +) +{ +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) + PMEMORY_DESCRIPTOR pPDB = vaspaceGetPageDirBase(pVAS, pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + GMMU_APERTURE aperture = kgmmuGetMemAperture(pKernelGmmu, pPDB); + + NV_PRINTF(LEVEL_INFO, "MMUTRACE: VA[0x%08llx-%08llx] PDB: ", va, vaLimit); + + _gmmuPrintPa(memdescGetPhysAddr(pPDB, VAS_ADDRESS_TRANSLATION(pVAS), 0), + aperture, 0); + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\n"); +#endif +} + +static void +_gmmuPrintPde +( + OBJGPU *pGpu, + const void *pFmt, + const MMU_FMT_LEVEL *pFmtLevel, + const MMU_ENTRY *pPde +) +{ +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) + const GMMU_FMT *pFmtGmmu = (GMMU_FMT*)pFmt; + const GMMU_FMT_PDE_MULTI *pPdeMulti = pFmtGmmu->pPdeMulti; + const GMMU_FMT_PDE *pFmtPde = pFmtGmmu->pPde; + const GMMU_ENTRY_VALUE *pGmmuEntry = (GMMU_ENTRY_VALUE*)pPde; + GMMU_APERTURE aperture = GMMU_APERTURE_INVALID; + NvU64 pa; + + if (pFmtLevel->numSubLevels > 1) + { + NvU32 sublevel; + + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "(Dual) "); + + for (sublevel = 0; sublevel < pFmtLevel->numSubLevels; sublevel++) + { + pFmtPde = &pPdeMulti->subLevels[sublevel]; + aperture = gmmuFieldGetAperture(&pFmtPde->fldAperture, pGmmuEntry->v8); + + if (aperture != GMMU_APERTURE_INVALID) + { + break; + } + } + + if (aperture == GMMU_APERTURE_INVALID) + { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "invalid\n"); + return; + } + + if (nvFieldIsValid32(&pPdeMulti->fldSizeRecipExp)) + { + NvU32 size = nvFieldGet32(&pPdeMulti->fldSizeRecipExp, pGmmuEntry->v8); + + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "Size=1/%c", _decodeSize[size]); + } + + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\n"); + + return; + } + + aperture = gmmuFieldGetAperture(&pFmtPde->fldAperture, pGmmuEntry->v8); + pa = _gmmuGetPdePa(pGpu, pFmtPde, pPde); + + _gmmuPrintPa(pa, aperture, 0); + + { + PRINT_FIELD_BOOL("Vol=%d", pFmtPde, Volatile, pGmmuEntry); + } + + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\n"); +#endif +} + +static void +_gmmuPrintPt +( + OBJGPU *pGpu, + const MMU_FMT_LEVEL *pFmtLevel, + const void *pFmt, + const MMU_ENTRY *pPde +) +{ +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) + const GMMU_FMT_PDE *pFmtPde = (GMMU_FMT_PDE*)pFmt; + const GMMU_ENTRY_VALUE *pGmmuEntry = (GMMU_ENTRY_VALUE*)pPde; + NvU64 pt = _gmmuGetPdePa(pGpu, pFmtPde, pPde); + GMMU_APERTURE aperture = gmmuFieldGetAperture(&pFmtPde->fldAperture, + pGmmuEntry->v8); + NvU64 pageSize = mmuFmtLevelPageSize(pFmtLevel); + + if (pt == MMU_INVALID_ADDR) + { + pt = 0; + } + + switch (pageSize) + { + case RM_PAGE_SIZE_HUGE: // 2M + break; + case RM_PAGE_SIZE_128K: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PT_128K: "); + break; + case RM_PAGE_SIZE_64K: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PT_64K: "); + break; + case RM_PAGE_SIZE: // 4K + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PT_4K: "); + break; + default: + NV_ASSERT(0); + } + + _gmmuPrintPa(pt, aperture, 0); + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\n"); +#endif +} + +static void +_gmmuPrintPte +( + OBJGPU *pGpu, + const MMU_FMT_LEVEL *pFmtLevel, + const void *pFmtPte, + const MMU_ENTRY *pPte, + NvU32 index +) +{ +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) + const GMMU_FMT_PTE *pFmt = (GMMU_FMT_PTE*)pFmtPte; + const GMMU_ENTRY_VALUE *pGmmuEntry = (GMMU_ENTRY_VALUE*)pPte; + NvU64 pa = _gmmuGetPtePa(pGpu, pFmt, pPte); + GMMU_APERTURE aperture = gmmuFieldGetAperture(&pFmt->fldAperture, + pGmmuEntry->v8); + NvU32 peerIndex = nvFieldGet32(&pFmt->fldPeerIndex, + pGmmuEntry->v8); + + switch (mmuFmtLevelPageSize(pFmtLevel)) + { + case RM_PAGE_SIZE_512M: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE_512M"); + break; + case RM_PAGE_SIZE_HUGE: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE_2M"); + break; + case RM_PAGE_SIZE_128K: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE_128K"); + break; + case RM_PAGE_SIZE_64K: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE_64K"); + break; + case RM_PAGE_SIZE: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE_4K"); + break; + default: + NV_ASSERT(0); + break; + } + + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "[0x%x]: ", index); + _gmmuPrintPa(pa, aperture, peerIndex); + + { + PRINT_FIELD_BOOL("Vld=%d, ", pFmt, Valid, pGmmuEntry); + PRINT_FIELD_BOOL("Priv=%d, ", pFmt, Privilege, pGmmuEntry); + PRINT_FIELD_BOOL("RO=%d, ", pFmt, ReadOnly, pGmmuEntry); + PRINT_FIELD_BOOL("RD=%d, ", pFmt, ReadDisable, pGmmuEntry); + PRINT_FIELD_BOOL("WD=%d, ", pFmt, WriteDisable, pGmmuEntry); + PRINT_FIELD_BOOL("Enc=%d, ", pFmt, Encrypted, pGmmuEntry); + PRINT_FIELD_BOOL("Vol=%d, ", pFmt, Volatile, pGmmuEntry); + PRINT_FIELD_BOOL("Lock=%d, ", pFmt, Locked, pGmmuEntry); + PRINT_FIELD_BOOL("AtomDis=%d, ", pFmt, AtomicDisable, pGmmuEntry); + PRINT_FIELD_32("Kind=0x%x, ", pFmt, Kind, pGmmuEntry); + PRINT_FIELD_32("CTL=0x%x, ", pFmt, CompTagLine, pGmmuEntry); + PRINT_FIELD_32("CTL_MSB=%d, ", pFmt, CompTagSubIndex, pGmmuEntry); + } + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "\n"); +#endif // NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) +} + +static NvBool +_gmmuIsInvalidPdeOk +( + OBJGPU *pGpu, + const void *pFmt, + const void *pFmtEntry, + const MMU_ENTRY *pPde, + NvU32 sublevel +) +{ + const GMMU_FMT *pFmtGmmu = (GMMU_FMT*)pFmt; + const GMMU_FMT_PDE *pFmtPde = (GMMU_FMT_PDE*)pFmtEntry; + const GMMU_ENTRY_VALUE *pGmmuEntry = (GMMU_ENTRY_VALUE*)pPde; + NvBool bSparse = NV_FALSE; + + { + bSparse = nvFieldGetBool(&pFmtPde->fldVolatile, pGmmuEntry->v8); + } + if (pFmtGmmu->bSparseHwSupport && (sublevel == 0) && bSparse) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +static NvU32 +_gmmuPdeAddrSpace +( + const void *pFmtEntry, + const MMU_ENTRY *pPde +) +{ + const GMMU_FMT_PDE *pFmtPde = (GMMU_FMT_PDE*)pFmtEntry; + const GMMU_ENTRY_VALUE *pGmmuEntry = (GMMU_ENTRY_VALUE*)pPde; + + switch (gmmuFieldGetAperture(&pFmtPde->fldAperture, pGmmuEntry->v8)) + { + case GMMU_APERTURE_INVALID: + return ADDR_UNKNOWN; + case GMMU_APERTURE_VIDEO: + return ADDR_FBMEM; + case GMMU_APERTURE_SYS_COH: + case GMMU_APERTURE_SYS_NONCOH: + return ADDR_SYSMEM; + default: + NV_ASSERT(0); + return ADDR_UNKNOWN; + } +} + +static NvU32 +_gmmuPteAddrSpace +( + const void *pFmtEntry, + const MMU_ENTRY *pPte +) +{ + const GMMU_FMT_PTE *pFmtPte = (GMMU_FMT_PTE*)pFmtEntry; + const GMMU_ENTRY_VALUE *pGmmuEntry = (GMMU_ENTRY_VALUE*)pPte; + + switch (gmmuFieldGetAperture(&pFmtPte->fldAperture, pGmmuEntry->v8)) + { + case GMMU_APERTURE_INVALID: + return ADDR_UNKNOWN; + case GMMU_APERTURE_VIDEO: // Fallthrough + case GMMU_APERTURE_PEER: + return ADDR_FBMEM; + case GMMU_APERTURE_SYS_COH: // Fallthrough + case GMMU_APERTURE_SYS_NONCOH: + return ADDR_SYSMEM; + default: + NV_ASSERT(0); + return ADDR_UNKNOWN; + } +} + +static NvU32 +_gmmuSwToHwLevel +( + const void *pFmt, + NvU32 level +) +{ + const GMMU_FMT *pFmtGmmu = (GMMU_FMT*)pFmt; + + switch (pFmtGmmu->version) + { + case GMMU_FMT_VERSION_2: + NV_ASSERT_OR_RETURN(level < 4, 0); + return 3 - level; + case GMMU_FMT_VERSION_1: + NV_ASSERT_OR_RETURN(level == 0, 0); + return 0; + default: + NV_ASSERT(0); + return 0; + } +} + +const MMU_TRACE_CALLBACKS g_gmmuTraceCallbacks = +{ + _gmmuIsPte, // isPte + _gmmuGetFmtPde, // getFmtPde + _gmmuGetFmtPte, // getFmtPte + _gmmuGetPdePa, // getPdePa + _gmmuGetPtePa, // getPtePa + _gmmuPrintPdb, // printPdb + _gmmuPrintPde, // printPde + _gmmuPrintPt, // printPt + _gmmuPrintPte, // printPte + _gmmuIsInvalidPdeOk, // isInvalidPdeOk + _gmmuPdeAddrSpace, // pdeAddrSpace + _gmmuPteAddrSpace, // pteAddrSpace + _gmmuSwToHwLevel, // swToHwLevel +}; diff --git a/src/nvidia/src/kernel/gpu/mmu/gmmu_walk.c b/src/nvidia/src/kernel/gpu/mmu/gmmu_walk.c new file mode 100644 index 000000000..9adea807e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/gmmu_walk.c @@ -0,0 +1,998 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "mem_mgr/gpu_vaspace.h" +#include "gpu/mmu/kern_gmmu.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "nvRmReg.h" // NV_REG_STR_RM_* + +#include "mmu/gmmu_fmt.h" +#include "mmu/mmu_fmt.h" + +/*! + * @file + * @brief struct MMU_WALK_CALLBACKS g_gmmuWalkCallbacks and the callback + * function implementations. + */ + +/** + * See @ref MMU_WALK_FILL_STATE + */ +#if NV_PRINTF_STRINGS_ALLOWED +const char *g_gmmuFillStateStrings[] = { "INVALID", "SPARSE", "NV4K" }; +const char *g_gmmuUVMMirroringDirStrings[] = { "[User Root] ", "[Mirrored Root] " }; +#else // NV_PRINTF_STRINGS_ALLOWED +static const char _gmmuFillStateString[] = "XS4"; +static const char _gmmuUVMMirroringDirString[] = "UM"; +#endif // NV_PRINTF_STRINGS_ALLOWED + +static PMEMORY_DESCRIPTOR +_gmmuMemDescCacheCreate(MMU_WALK_USER_CTX *pUserCtx, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 memSize); + +static PMEMORY_DESCRIPTOR +_gmmuMemDescCacheAlloc(MMU_WALK_USER_CTX *pUserCtx); + +/*! + * Utility function to decide if a level should be mirrored. + * Used by MMU callbacks. + */ +static NvBool NV_FORCEINLINE +_mirrorLevel +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt +) +{ + return (pLevelFmt == pUserCtx->pGpuState->pFmt->pRoot) && pUserCtx->pGVAS->bIsMirrored; +} + +/*! + * Utility function to get the number of Page Dirs to loop over. + * Used by MMU callbacks. + */ +static NvU8 NV_FORCEINLINE +_getMaxPageDirs(NvBool bMirror) +{ + return bMirror ? GMMU_MAX_PAGE_DIR_INDEX_COUNT : + GMMU_MAX_PAGE_DIR_INDEX_COUNT - 1; +} + +static NV_STATUS +_gmmuScrubMemDesc +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + TRANSFER_SURFACE dest = {0}; + + dest.pMemDesc = pMemDesc; + dest.offset = 0; + + NV_ASSERT_OK_OR_RETURN(memmgrMemSet(GPU_GET_MEMORY_MANAGER(pGpu), &dest, 0, + (NvU32)memdescGetSize(pMemDesc), + TRANSFER_FLAGS_NONE)); + + return NV_OK; +} + +static NV_STATUS +_gmmuWalkCBLevelAlloc +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaBase, + const NvU64 vaLimit, + const NvBool bTarget, + MMU_WALK_MEMDESC **ppMemDesc, + NvU32 *pMemSize, + NvBool *pBChanged +) +{ + OBJGPU *pGpu = pUserCtx->pGpu; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + OBJGVASPACE *pGVAS = pUserCtx->pGVAS; + const GVAS_BLOCK *pBlock = pUserCtx->pBlock; + const GMMU_FMT *pFmt = pUserCtx->pGpuState->pFmt; + MEMORY_DESCRIPTOR *pMemDesc[GMMU_MAX_PAGE_DIR_INDEX_COUNT] = {NULL}; + const NvU32 minMemSize = (mmuFmtVirtAddrToEntryIndex(pLevelFmt, vaLimit) + 1) * + pLevelFmt->entrySize; + NvU32 newMemSize; + NV_STATUS status = NV_OK; + NvU32 alignment; + NvU32 aperture; + NvU32 attr; + NvU64 memDescFlags = MEMDESC_FLAGS_NONE; + NvU32 memPoolListCount = 0; + NvU32 memPoolList[4]; + NvBool bAllowSysmem; + NvBool bPacked = NV_FALSE; + NvBool bPartialTbl = NV_FALSE; + NvBool bPmaManaged = !!(pGVAS->flags & VASPACE_FLAGS_PTETABLE_PMA_MANAGED); + NvBool bMirror = _mirrorLevel(pUserCtx, pLevelFmt); + NvU8 maxPgDirs = _getMaxPageDirs(bMirror); + NvU8 i = 0, j = 0; + + // Abort early if level is not targeted or already sufficiently sized. + if (((NULL == *ppMemDesc) && !bTarget) || + ((NULL != *ppMemDesc) && (minMemSize <= *pMemSize))) + { + return NV_OK; + } + + // Check if this level is the root page directory. + if (pLevelFmt == pFmt->pRoot) + { + newMemSize = kgmmuGetPDBAllocSize_HAL(pKernelGmmu, pLevelFmt, pGVAS->vaLimitInternal); + + // TODO: PDB alignemnt. + alignment = RM_PAGE_SIZE; + + // Determine level aperture and memory attributes. + if (pGVAS->flags & VASPACE_FLAGS_BAR) + { + aperture = kgmmuGetPDEBAR1Aperture(pKernelGmmu); + attr = kgmmuGetPDEBAR1Attr(pKernelGmmu); + bAllowSysmem = !FLD_TEST_DRF(_REG_STR_RM, _INST_LOC, _BAR_PDE, _VID, + pGpu->instLocOverrides); + } + else + { + aperture = kgmmuGetPDEAperture(pKernelGmmu); + attr = kgmmuGetPDEAttr(pKernelGmmu); + bAllowSysmem = !FLD_TEST_DRF(_REG_STR_RM, _INST_LOC, _PDE, _VID, + pGpu->instLocOverrides); + } + + // Default aperture. + memPoolList[memPoolListCount++] = aperture; + + // Fallback to sysmem if allowed. + if (bAllowSysmem && + (aperture != ADDR_SYSMEM) && !(pGVAS->flags & VASPACE_FLAGS_BAR)) + { + memPoolList[memPoolListCount++] = ADDR_SYSMEM; + } + } + else + { + const MMU_FMT_LEVEL *pParent; + const GMMU_FMT_PDE_MULTI *pPdeMulti = pFmt->pPdeMulti; + const GMMU_FMT_PDE *pPde; + NvU32 subLevel; + + // Find the level's parent format. + pParent = mmuFmtFindLevelParent(pFmt->pRoot, pLevelFmt, &subLevel); + NV_ASSERT_OR_RETURN(NULL != pParent, NV_ERR_INVALID_ARGUMENT); + + // Get the alignment from the parent PDE address shift. + pPde = gmmuFmtGetPde(pFmt, pParent, subLevel); + + { + alignment = NVBIT(pPde->fldAddrSysmem.shift); + } + + // Initially assume full size. + newMemSize = mmuFmtLevelSize(pLevelFmt); + + // Shrink size if partial page tables are supported. + if ((pGVAS->flags & VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE) && + (pParent->numSubLevels > 1) && + nvFieldIsValid32(&pPdeMulti->fldSizeRecipExp)) + { + NvU32 i; + // + // Only a fixed set of PDE ranges are allowed to have partial size. + // Partial VA holes of these PDEs are blocked at VAS creation time. + // See @ref gvaspaceConstructHal_IMPL for details. + // + for (i = 0; i < pGVAS->numPartialPtRanges; ++i) + { + if ((vaBase >= pGVAS->partialPtVaRangeBase[i]) && + (vaBase <= (pGVAS->partialPtVaRangeBase[i] + + pGVAS->partialPtVaRangeSize - 1))) + { + const NvU32 recipExpMax = pPdeMulti->fldSizeRecipExp.maskPos >> + pPdeMulti->fldSizeRecipExp.shift; + const NvU32 fracMemSize = nvNextPow2_U32(minMemSize); + const NvU32 recipExpTgt = BIT_IDX_32(newMemSize / fracMemSize); + const NvU32 recipExp = NV_MIN(recipExpMax, recipExpTgt); + newMemSize >>= recipExp; + bPartialTbl = NV_TRUE; + break; + } + } + } + + // New size must satisfy the minimum size. + NV_ASSERT(newMemSize >= minMemSize); + // New size must be larger than old size, otherwise should have aborted earlier. + NV_ASSERT(newMemSize > *pMemSize); + + // Determine level aperture and memory attributes. + if (pGVAS->flags & VASPACE_FLAGS_BAR) + { + aperture = kgmmuGetPTEBAR1Aperture(pKernelGmmu); + attr = kgmmuGetPTEBAR1Attr(pKernelGmmu); + bAllowSysmem = !FLD_TEST_DRF(_REG_STR_RM, _INST_LOC, _BAR_PTE, _VID, + pGpu->instLocOverrides); + } + else + { + aperture = kgmmuGetPTEAperture(pKernelGmmu); + attr = kgmmuGetPTEAttr(pKernelGmmu); + bAllowSysmem = !FLD_TEST_DRF(_REG_STR_RM, _INST_LOC, _PTE, _VID, + pGpu->instLocOverrides); + } + + // + // BAR PDEs/PTEs are not allowed in sysmem since it can cause deadlock + // during PCIE transactions. + // PMU PDEs/PTEs must be in vidmem so that PMU can access virtually mapped + // memory during GC6 exit. + // + bAllowSysmem = bAllowSysmem && + !(pGVAS->flags & VASPACE_FLAGS_BAR) && + !(pGVAS->flags & VASPACE_FLAGS_PMU); + + // Prefer sysmem if requested and allowed. + if (bAllowSysmem && + (NULL != pBlock && pBlock->flags.bPreferSysmemPageTables)) + { + memPoolList[memPoolListCount++] = ADDR_SYSMEM; + } + + // Default aperture. + memPoolList[memPoolListCount++] = aperture; + + // Fallback to sysmem if requested and allowed. + if (bAllowSysmem && + (pGVAS->flags & VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS)) + { + memPoolList[memPoolListCount++] = ADDR_SYSMEM; + } + } + + // Add memList end entry. + memPoolList[memPoolListCount++] = ADDR_UNKNOWN; + NV_ASSERT(memPoolListCount <= NV_ARRAY_ELEMENTS32(memPoolList)); + + // MEMDESC flags + memDescFlags = MEMDESC_FLAGS_LOCKLESS_SYSMEM_ALLOC | + MEMDESC_FLAGS_PAGE_SIZE_ALIGN_IGNORE; + + if (pGVAS->flags & VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR) + { + memDescFlags |= MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE; + } + + // Create the level memdesc. + for (i = 0; i < maxPgDirs; i++) + { + MEMORY_DESCRIPTOR *pMemDescTemp; + + status = memdescCreate(&pMemDescTemp, pGpu, + (((newMemSize < RM_PAGE_SIZE) && !bPartialTbl && !bPmaManaged) ? + RM_PAGE_SIZE : newMemSize), + alignment, + NV_TRUE, + ADDR_UNKNOWN, + attr, + memDescFlags); + NV_ASSERT_OR_GOTO(NV_OK == status, done); + + // Page levels always use 4KB swizzle. + memdescSetPageSize(pMemDescTemp, AT_GPU, RM_PAGE_SIZE); + + // + // Allocate the page level memory from reserved pool if aperture is vidmem + // and PMA is enabled. Otherwise, allocate the same way on both vidmem and + // sysmem. + // + while (memPoolList[j] != ADDR_UNKNOWN) + { + memdescSetAddressSpace(pMemDescTemp, memPoolList[j]); + switch (memPoolList[j]) + { + case ADDR_FBMEM: + if (RMCFG_FEATURE_PMA && + (pGVAS->flags & VASPACE_FLAGS_PTETABLE_PMA_MANAGED) && + (pGVAS->pPageTableMemPool != NULL)) + { + pMemDescTemp->ActualSize = RM_ALIGN_UP(newMemSize, alignment); + status = rmMemPoolAllocate(pGVAS->pPageTableMemPool, + (RM_POOL_ALLOC_MEMDESC*)pMemDescTemp); + break; + } + case ADDR_SYSMEM: + status = memdescAlloc(pMemDescTemp); + break; + default: + NV_ASSERT_OR_GOTO(0, done); + } + if (NV_OK == status) + { + // + // Always scrub the allocation for the PDB allocation in case + // GMMU prefetches some uninitialized entries + // + if (pLevelFmt == pFmt->pRoot) + { + status = _gmmuScrubMemDesc(pGpu, pMemDescTemp); + } + + break; + } + j++; + } + + if (NV_OK != status) + { + memdescDestroy(pMemDescTemp); + goto done; + } + + // + // The packing optimization is only needed for allocations in vidmem since + // the 4K granularity is not applicable to allocations in sysmem. + // + bPacked = ((memdescGetAddressSpace(pMemDescTemp) == ADDR_FBMEM) && + (alignment < RM_PAGE_SIZE) && !bPmaManaged); + + if (bPacked) + { + // Try to allocate from the free list of packed memdescs + pMemDesc[i] = _gmmuMemDescCacheAlloc(pUserCtx); + if (NULL != pMemDesc[i]) + { + // Free this if we have already allocated from the list. + memdescFree(pMemDescTemp); + memdescDestroy(pMemDescTemp); + } + else + { + // Add another page to the cache and then alloc. + pMemDesc[i] = _gmmuMemDescCacheCreate(pUserCtx, + pMemDescTemp, + newMemSize); + if (NULL == pMemDesc[i]) + { + memdescFree(pMemDescTemp); + memdescDestroy(pMemDescTemp); + goto done; + } + } + } + else + { + pMemDesc[i] = pMemDescTemp; + } + +#if NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "[GPU%u]: [%s] %sPA 0x%llX (0x%X bytes) for VA 0x%llX-0x%llX\n", + pUserCtx->pGpu->gpuInstance, + bPacked ? "Packed" : "Unpacked", + bMirror ? g_gmmuUVMMirroringDirStrings[i] : "", + memdescGetPhysAddr(pMemDesc[i], AT_GPU, 0), newMemSize, + mmuFmtLevelVirtAddrLo(pLevelFmt, vaBase), + mmuFmtLevelVirtAddrHi(pLevelFmt, vaLimit)); +#else // NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "[GPU%u]: [Packed: %c] %sPA 0x%llX (0x%X bytes) for VA 0x%llX-0x%llX\n", + pUserCtx->pGpu->gpuInstance, + bPacked ? 'Y' : 'N', + bMirror ? _gmmuUVMMirroringDirString[i] : ' ', + memdescGetPhysAddr(pMemDesc[i], AT_GPU, 0), newMemSize, + mmuFmtLevelVirtAddrLo(pLevelFmt, vaBase), + mmuFmtLevelVirtAddrHi(pLevelFmt, vaLimit)); +#endif // NV_PRINTF_STRINGS_ALLOWED + } + + // Commit return values. + *ppMemDesc = (MMU_WALK_MEMDESC*)pMemDesc[GMMU_USER_PAGE_DIR_INDEX]; + *pMemSize = newMemSize; + *pBChanged = NV_TRUE; + +done: + if (NV_OK == status) + { + // Commit mirrored root desc. + if (bMirror) + { + pUserCtx->pGpuState->pMirroredRoot = + (MMU_WALK_MEMDESC*)pMemDesc[GMMU_KERNEL_PAGE_DIR_INDEX]; + } + } + else + { + for (i = 0; i < maxPgDirs; i++) + { + memdescFree(pMemDesc[i]); + memdescDestroy(pMemDesc[i]); + } + } + return status; +} + +static PMEMORY_DESCRIPTOR +_gmmuMemDescCacheCreate +( + MMU_WALK_USER_CTX *pUserCtx, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 memSize +) +{ + NV_STATUS status = NV_OK; + MEMORY_DESCRIPTOR* pMemDescTmp; + NvU32 i; + + NV_ASSERT_OR_RETURN((NULL != pMemDesc), NULL); + NV_ASSERT_OR_RETURN((memSize <= pMemDesc->ActualSize), NULL); + + if (pMemDesc->pSubMemDescList == NULL) + { + pMemDesc->pSubMemDescList = portMemAllocNonPaged(sizeof(MEMORY_DESCRIPTOR_LIST)); + NV_ASSERT_OR_RETURN(pMemDesc->pSubMemDescList != NULL, NULL); + } + + // Initialize the list head of the unpacked memdesc + listInitIntrusive(pMemDesc->pSubMemDescList); + + // Form the list of submemdescs with the parent memdesc as the head + for (i = 0; i < (pMemDesc->ActualSize / memSize); i++) + { + MEMORY_DESCRIPTOR *pSubMemDesc = NULL; + status = memdescCreateSubMem(&pSubMemDesc, + pMemDesc, + pUserCtx->pGpu, + i * memSize, + memSize); + NV_ASSERT_OR_RETURN((NV_OK == status), NULL); + listAppendExisting(pMemDesc->pSubMemDescList, pSubMemDesc); + } + + // Add the parent memdesc to the per VAS/per GPU list of unpacked memdescs + listAppendExisting(&pUserCtx->pGpuState->unpackedMemDescList, pMemDesc); + + // Pop the free list of packed memdescs and return one + pMemDescTmp = listTail(pMemDesc->pSubMemDescList); + listRemove(pMemDesc->pSubMemDescList, pMemDescTmp); + return pMemDescTmp; +} + +static PMEMORY_DESCRIPTOR +_gmmuMemDescCacheAlloc +( + MMU_WALK_USER_CTX *pUserCtx +) +{ + MEMORY_DESCRIPTOR *pParentMemDesc; + MEMORY_DESCRIPTOR *pParentMemDescNext; + + for (pParentMemDesc = listHead(&pUserCtx->pGpuState->unpackedMemDescList); + pParentMemDesc != NULL; + pParentMemDesc = pParentMemDescNext) + { + pParentMemDescNext = listNext(&pUserCtx->pGpuState->unpackedMemDescList, pParentMemDesc); + MEMORY_DESCRIPTOR *pChild; + pChild = listTail(pParentMemDesc->pSubMemDescList); + listRemove(pParentMemDesc->pSubMemDescList, pChild); + if (NULL != pChild) + { + return pChild; + } + } + return NULL; +} + +void +gmmuMemDescCacheFree +( + GVAS_GPU_STATE *pGpuState +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pGpuState); + + while (listCount(&pGpuState->unpackedMemDescList) > 0) + { + MEMORY_DESCRIPTOR *pTmp; + MEMORY_DESCRIPTOR *pParentMemDesc; + pParentMemDesc = listTail(&pGpuState->unpackedMemDescList); + + // Assert if all submemdescs have not been returned to the parent. + NV_ASSERT(pParentMemDesc->RefCount - listCount(pParentMemDesc->pSubMemDescList) == 1); + + while(listCount(pParentMemDesc->pSubMemDescList) > 0) + { + pTmp = listTail(pParentMemDesc->pSubMemDescList); + listRemove(pParentMemDesc->pSubMemDescList, pTmp); + memdescDestroy(pTmp); + } + listRemove(&pGpuState->unpackedMemDescList, pParentMemDesc); + memdescFree(pParentMemDesc); + memdescDestroy(pParentMemDesc); + } +} + +static void +_gmmuWalkCBLevelFree +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaBase, + MMU_WALK_MEMDESC *pOldMem +) +{ + NvU8 i; + NvBool bMirror = _mirrorLevel(pUserCtx, pLevelFmt); + NvU8 maxPgDirs = _getMaxPageDirs(bMirror); + MEMORY_DESCRIPTOR *pMemDesc[GMMU_MAX_PAGE_DIR_INDEX_COUNT] = {NULL}; + + pMemDesc[GMMU_USER_PAGE_DIR_INDEX] = (MEMORY_DESCRIPTOR*)pOldMem; + if (bMirror) + { + pMemDesc[GMMU_KERNEL_PAGE_DIR_INDEX] = + (MEMORY_DESCRIPTOR*)pUserCtx->pGpuState->pMirroredRoot; + pUserCtx->pGpuState->pMirroredRoot = NULL; + } + + for (i = 0; i < maxPgDirs; i++) + { + if (NULL == pMemDesc[i]) + { + continue; + } + +#if NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "[GPU%u]: %sPA 0x%llX for VA 0x%llX-0x%llX\n", + pUserCtx->pGpu->gpuInstance, + bMirror ? g_gmmuUVMMirroringDirStrings[i] : "", + memdescGetPhysAddr(pMemDesc[i], AT_GPU, 0), + mmuFmtLevelVirtAddrLo(pLevelFmt, vaBase), + mmuFmtLevelVirtAddrHi(pLevelFmt, vaBase)); +#else // NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "[GPU%u]: %cPA 0x%llX for VA 0x%llX-0x%llX\n", + pUserCtx->pGpu->gpuInstance, + bMirror ? _gmmuUVMMirroringDirString[i] : ' ', + memdescGetPhysAddr(pMemDesc[i], AT_GPU, 0), + mmuFmtLevelVirtAddrLo(pLevelFmt, vaBase), + mmuFmtLevelVirtAddrHi(pLevelFmt, vaBase)); +#endif // NV_PRINTF_STRINGS_ALLOWED + + // + // If this is a submemdesc, return it to its free list only when + // the refcount is 1. A refcount greater than 1 implies that 2 or + // more GPUs in SLI are using it. GPUs in SLI can share a page level + // instance. + // + if (memdescIsSubMemoryMemDesc(pMemDesc[i]) && + (pMemDesc[i]->RefCount == 1)) + { + // Return this to the free list from which it was borrowed + listAppendExisting(memdescGetParentDescriptor(pMemDesc[i])->pSubMemDescList, pMemDesc[i]); + } + else + { + if (RMCFG_FEATURE_PMA && + (pUserCtx->pGVAS->flags & VASPACE_FLAGS_PTETABLE_PMA_MANAGED) && + (pMemDesc[i]->pPageHandleList != NULL) && + (listCount(pMemDesc[i]->pPageHandleList) != 0) && + (pUserCtx->pGVAS->pPageTableMemPool != NULL)) + { + rmMemPoolFree(pUserCtx->pGVAS->pPageTableMemPool, + (RM_POOL_ALLOC_MEMDESC*)pMemDesc[i], + pUserCtx->pGVAS->flags); + } + + if (!memdescIsSubMemoryMemDesc(pMemDesc[i])) + { + memdescFree(pMemDesc[i]); + } + memdescDestroy(pMemDesc[i]); + } + } +} + +static NvBool +_gmmuWalkCBUpdatePdb +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pRootFmt, + const MMU_WALK_MEMDESC *pRootMem, + const NvBool bIgnoreChannelBusy +) +{ + OBJGPU *pGpu = pUserCtx->pGpu; + MEMORY_DESCRIPTOR *pPDB = (MEMORY_DESCRIPTOR*)pRootMem; + + NV_PRINTF(LEVEL_INFO, "[GPU%u]: PA 0x%llX (%s)\n", + pUserCtx->pGpu->gpuInstance, + (NULL != pPDB) ? memdescGetPhysAddr(pPDB, AT_GPU, 0) : 0, + (NULL != pPDB) ? "valid" : "null"); + + if (pUserCtx->pGVAS->flags & VASPACE_FLAGS_BAR_BAR1) + { + // + // Do nothing, as BAR1 pdb is static and is only created and + // destroyed along with the vaspace itself. Since the bar1 + // instance memory is appropriately updated then, we do not + // do anything inside update pdb for bar1 which will be invoked + // for mmuwalksparsify and mmuwalkunmap. + // + return NV_TRUE; + } + else if ((pUserCtx->pGVAS->flags & VASPACE_FLAGS_HDA)) + { + // Instance Block set up once by caller. + return NV_TRUE; + } + else if (IS_VIRTUAL_WITH_SRIOV(pGpu) || IS_GSP_CLIENT(pGpu)) + { + // Noop inside a guest or CPU RM. + return NV_TRUE; + } + return NV_TRUE; +} + +static NvBool +_gmmuWalkCBUpdatePde +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndex, + const MMU_WALK_MEMDESC **pSubLevels +) +{ + NvU32 i; + GMMU_ENTRY_VALUE entry; + NvBool bMirror = _mirrorLevel(pUserCtx, pLevelFmt); + NvU8 maxPgDirs = _getMaxPageDirs(bMirror); + OBJGPU *pGpu = pUserCtx->pGpu; + OBJGVASPACE *pGVAS = pUserCtx->pGVAS; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + const GMMU_FMT *pFmt = pUserCtx->pGpuState->pFmt; + MEMORY_DESCRIPTOR *pMemDesc[GMMU_MAX_PAGE_DIR_INDEX_COUNT] = {NULL}; + NvU32 recipExp = NV_U32_MAX; + const GMMU_FMT_PDE_MULTI *pPdeMulti = pFmt->pPdeMulti; + + pMemDesc[GMMU_USER_PAGE_DIR_INDEX] = (MEMORY_DESCRIPTOR*)pLevelMem; + if (bMirror) + { + pMemDesc[GMMU_KERNEL_PAGE_DIR_INDEX] = + (MEMORY_DESCRIPTOR*)pUserCtx->pGpuState->pMirroredRoot; + } + + for (i = 0; i < maxPgDirs; i++) + { +#if NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, "[GPU%u]: %sPA 0x%llX, Entry 0x%X\n", + pUserCtx->pGpu->gpuInstance, + bMirror ? g_gmmuUVMMirroringDirStrings[i] : "", + memdescGetPhysAddr(pMemDesc[i], AT_GPU, 0), entryIndex); +#else // NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, "[GPU%u]: %cPA 0x%llX, Entry 0x%X\n", + pUserCtx->pGpu->gpuInstance, + bMirror ? _gmmuUVMMirroringDirString[i] : ' ', + memdescGetPhysAddr(pMemDesc[i], AT_GPU, 0), entryIndex); +#endif // NV_PRINTF_STRINGS_ALLOWED + } + + portMemSet(entry.v8, 0, pLevelFmt->entrySize); + + for (i = 0; i < pLevelFmt->numSubLevels; ++i) + { + const GMMU_FMT_PDE *pPde = gmmuFmtGetPde(pFmt, pLevelFmt, i); + MEMORY_DESCRIPTOR *pSubMemDesc = (MEMORY_DESCRIPTOR*)pSubLevels[i]; + + if (NULL != pSubMemDesc) + { + const GMMU_APERTURE aperture = kgmmuGetMemAperture(pKernelGmmu, pSubMemDesc); + const GMMU_FIELD_ADDRESS *pFldAddr = gmmuFmtPdePhysAddrFld(pPde, aperture); + const NvU64 physAddr = memdescGetPhysAddr(pSubMemDesc, AT_GPU, 0); + + { + nvFieldSetBool(&pPde->fldVolatile, memdescGetVolatility(pSubMemDesc), entry.v8); + } + + gmmuFieldSetAperture(&pPde->fldAperture, aperture, entry.v8); + gmmuFieldSetAddress(pFldAddr, + kgmmuEncodePhysAddr(pKernelGmmu, aperture, physAddr, + NVLINK_INVALID_FABRIC_ADDR), + entry.v8); + + // Calculate partial page table size if supported. + if ((pGVAS->flags & VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE) && + (pLevelFmt->numSubLevels > 1) && + nvFieldIsValid32(&pPdeMulti->fldSizeRecipExp)) + { + const NvU32 maxMemSize = mmuFmtLevelSize(&pLevelFmt->subLevels[i]); + const NvU32 curMemSize = (NvU32)pSubMemDesc->Size; + const NvU32 minRecipExp = BIT_IDX_32(maxMemSize / curMemSize); + + // We should have allocated on a fractional (pow2) boundary. + NV_ASSERT(ONEBITSET(curMemSize)); + + if (recipExp == NV_U32_MAX) + { + // Save exponent if not set yet. + recipExp = minRecipExp; + } + else + { + // Otherwise ensure parallel sub-levels match. + NV_ASSERT(recipExp == minRecipExp); + } + } + + NV_PRINTF(LEVEL_INFO, " SubLevel %u = PA 0x%llX\n", i, + physAddr); + } + else + { + NV_PRINTF(LEVEL_INFO, " SubLevel %u = INVALID\n", i); + } + } + + // Set partial page table size exponent if needed. + if (recipExp != NV_U32_MAX) + { + nvFieldSet32(&pPdeMulti->fldSizeRecipExp, recipExp, entry.v8); + } + + for (i = 0; i < maxPgDirs; i++) + { + TRANSFER_SURFACE dest = {0}; + + dest.pMemDesc = pMemDesc[i]; + dest.offset = entryIndex * pLevelFmt->entrySize; + NV_ASSERT_OK(memmgrMemWrite(GPU_GET_MEMORY_MANAGER(pGpu), &dest, + entry.v8, pLevelFmt->entrySize, + TRANSFER_FLAGS_NONE)); + } + + return NV_TRUE; +} + +static void +_gmmuWalkCBFillEntries +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + const MMU_WALK_FILL_STATE fillState, + NvU32 *pProgress +) +{ + NvU32 i; + NvU32 j; + OBJGPU *pGpu = pUserCtx->pGpu; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + const GMMU_FMT *pFmt = pUserCtx->pGpuState->pFmt; + NvBool bMirror = _mirrorLevel(pUserCtx, pLevelFmt); + NvU8 maxPgDirs = _getMaxPageDirs(bMirror); + MEMORY_DESCRIPTOR *pMemDesc[GMMU_MAX_PAGE_DIR_INDEX_COUNT] = {NULL}; + NvU32 sizeOfEntries = (entryIndexHi - entryIndexLo + 1) * + pLevelFmt->entrySize; + NvU8 *pEntries; + + pMemDesc[GMMU_USER_PAGE_DIR_INDEX] = (MEMORY_DESCRIPTOR*)pLevelMem; + if (bMirror) + { + pMemDesc[GMMU_KERNEL_PAGE_DIR_INDEX] = + (MEMORY_DESCRIPTOR*)pUserCtx->pGpuState->pMirroredRoot; + } + + for (j = 0; j < maxPgDirs; j++) + { + TRANSFER_SURFACE dest = {0}; + + dest.pMemDesc = pMemDesc[j]; + dest.offset = entryIndexLo * pLevelFmt->entrySize; + + // + // A shadow buffer is allocated to store the PTEs in case of writes + // using CE and GSP DMA task. This code gets called in a high IRQL + // path on Windows and shadow buffer allocation may fail there. + // + pEntries = memmgrMemBeginTransfer(pMemoryManager, &dest, sizeOfEntries, + TRANSFER_FLAGS_SHADOW_ALLOC); + NV_ASSERT_OR_RETURN_VOID(pEntries != NULL); + +#if NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "[GPU%u]: %sPA 0x%llX, Entries 0x%X-0x%X = %s\n", + pUserCtx->pGpu->gpuInstance, + bMirror ? g_gmmuUVMMirroringDirStrings[j] : "", + memdescGetPhysAddr(pMemDesc[j], AT_GPU, 0), + entryIndexLo, entryIndexHi, + g_gmmuFillStateStrings[fillState]); +#else // NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_INFO, + "[GPU%u] %cPA 0x%llX, Entries 0x%X-0x%X = %c\n", + pUserCtx->pGpu->gpuInstance, + bMirror ? _gmmuUVMMirroringDirString[j] : ' ', + memdescGetPhysAddr(pMemDesc[j], AT_GPU, 0), + entryIndexLo, entryIndexHi, + _gmmuFillStateString[fillState]); +#endif // NV_PRINTF_STRINGS_ALLOWED + + switch (fillState) + { + case MMU_WALK_FILL_INVALID: + portMemSet(pEntries, 0, sizeOfEntries); + break; + case MMU_WALK_FILL_SPARSE: + { + const GMMU_FMT_FAMILY *pFam = kgmmuFmtGetFamily(pKernelGmmu, pFmt->version); + const GMMU_ENTRY_VALUE *pSparseEntry; + + // Select sparse entry template based on number of sub-levels. + if (pLevelFmt->numSubLevels > 1) + { + pSparseEntry = &pFam->sparsePdeMulti; + } + else if (pLevelFmt->numSubLevels == 1) + { + pSparseEntry = &pFam->sparsePde; + } + else + { + if (kbusIsFlaDummyPageEnabled(pKernelBus) && + (pUserCtx->pGVAS->flags & VASPACE_FLAGS_FLA)) + pSparseEntry = &pUserCtx->pGpuState->flaDummyPage.pte; + else + pSparseEntry = &pFam->sparsePte; + } + + // Copy sparse template to each entry. + for (i = entryIndexLo; i <= entryIndexHi; ++i) + { + NvU32 entryIndex = (i - entryIndexLo) * pLevelFmt->entrySize; + portMemCopy(&pEntries[entryIndex], + pLevelFmt->entrySize, + pSparseEntry->v8, + pLevelFmt->entrySize); + } + break; + } + case MMU_WALK_FILL_NV4K: + { + const GMMU_FMT_FAMILY *pFam = + kgmmuFmtGetFamily(pKernelGmmu, pFmt->version); + const GMMU_ENTRY_VALUE *pNv4kEntry = &pFam->nv4kPte; + + // debug print - to remove when the code is robust enough + if (!gvaspaceIsAtsEnabled(pUserCtx->pGVAS) || + mmuFmtLevelPageSize(pLevelFmt) != RM_PAGE_SIZE_64K) + { +#if NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_ERROR, + "[GPU%u]: %sPA 0x%llX, Entries 0x%X-0x%X = %s FAIL\n", + pUserCtx->pGpu->gpuInstance, + bMirror ? g_gmmuUVMMirroringDirStrings[j] : "", + memdescGetPhysAddr(pMemDesc[j], AT_GPU, 0), + entryIndexLo, entryIndexHi, + g_gmmuFillStateStrings[fillState]); +#else // NV_PRINTF_STRINGS_ALLOWED + NV_PRINTF(LEVEL_ERROR, + "[GPU%u]: %cPA 0x%llX, Entries 0x%X-0x%X = %c FAIL\n", + pUserCtx->pGpu->gpuInstance, + bMirror ? _gmmuUVMMirroringDirString[j] : ' ', + memdescGetPhysAddr(pMemDesc[j], AT_GPU, 0), + entryIndexLo, entryIndexHi, + _gmmuFillStateString[fillState]); +#endif // NV_PRINTF_STRINGS_ALLOWED + + DBG_BREAKPOINT(); + return; + } + + // Copy nv4k template to each entry + for (i = entryIndexLo; i <= entryIndexHi; ++i) + { + NvU32 entryIndex = (i - entryIndexLo) * pLevelFmt->entrySize; + portMemCopy(&pEntries[entryIndex], + pLevelFmt->entrySize, + pNv4kEntry->v8, + pLevelFmt->entrySize); + } + break; + } + default: + NV_ASSERT(0); + break; + } + + memmgrMemEndTransfer(pMemoryManager, &dest, sizeOfEntries, + TRANSFER_FLAGS_SHADOW_ALLOC); + } + + *pProgress = entryIndexHi - entryIndexLo + 1; +} + +static void +_gmmuWalkCBCopyEntries +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_WALK_MEMDESC *pSrcMem, + const MMU_WALK_MEMDESC *pDstMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + NvU32 *pProgress +) +{ + MEMORY_DESCRIPTOR *pSrcDesc = (MEMORY_DESCRIPTOR *)pSrcMem; + MEMORY_DESCRIPTOR *pDstDesc = (MEMORY_DESCRIPTOR *)pDstMem; + TRANSFER_SURFACE src = {0}; + TRANSFER_SURFACE dest = {0}; + + src.pMemDesc = pSrcDesc; + src.offset = entryIndexLo * pLevelFmt->entrySize; + dest.pMemDesc = pDstDesc; + dest.offset = entryIndexLo * pLevelFmt->entrySize; + + // Only copy if different source and destination memory. + if (!memdescDescIsEqual(pSrcDesc, pDstDesc)) + { + OBJGPU *pGpu = pUserCtx->pGpu; + NvU32 sizeOfEntries = (entryIndexHi - entryIndexLo + 1) * + pLevelFmt->entrySize; + + NV_PRINTF(LEVEL_INFO, + "[GPU%u]: GVAS(%p) PA 0x%llX -> PA 0x%llX, Entries 0x%X-0x%X\n", + pGpu->gpuInstance, pUserCtx->pGVAS, + memdescGetPhysAddr(pSrcDesc, AT_GPU, 0), + memdescGetPhysAddr(pDstDesc, AT_GPU, 0), entryIndexLo, + entryIndexHi); + + NV_ASSERT_OK(memmgrMemCopy(GPU_GET_MEMORY_MANAGER(pGpu), &dest, &src, + sizeOfEntries, TRANSFER_FLAGS_NONE)); + } + + // Report full range complete. + *pProgress = entryIndexHi - entryIndexLo + 1; +} + +const MMU_WALK_CALLBACKS g_gmmuWalkCallbacks = +{ + _gmmuWalkCBLevelAlloc, + _gmmuWalkCBLevelFree, + _gmmuWalkCBUpdatePdb, + _gmmuWalkCBUpdatePde, + _gmmuWalkCBFillEntries, + _gmmuWalkCBCopyEntries, + NULL, +}; diff --git a/src/nvidia/src/kernel/gpu/mmu/kern_gmmu.c b/src/nvidia/src/kernel/gpu/mmu/kern_gmmu.c new file mode 100644 index 000000000..003c9a034 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/kern_gmmu.c @@ -0,0 +1,2010 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /****************************************************************************** +* +* Kernel GMMU module header +* Defines and structures used on CPU RM for the GMMU object. +* +******************************************************************************/ + +#include "gpu/bif/kernel_bif.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/nvlink/kernel_nvlink.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "vgpu/vgpu_events.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "rmapi/rmapi.h" +#include "gpu/gpu.h" +#include "nvRmReg.h" +#include "vgpu/rpc.h" +#include "kernel/gpu/intr/engine_idx.h" + + +static void _kgmmuInitRegistryOverrides(OBJGPU *pGpu, KernelGmmu *pKernelGmmu); + +/*! + * KERNEL_GMMU constructor + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * @param[in] engDesc Engine descriptor + * + * @return NV_OK on success, pertinent error code on failure. + */ +NV_STATUS +kgmmuConstructEngine_IMPL(OBJGPU *pGpu, KernelGmmu *pKernelGmmu, ENGDESCRIPTOR engDesc) +{ + NvU32 v; + + kgmmuDetermineMaxVASize_HAL(pGpu, pKernelGmmu); + + if (gpuIsCacheOnlyModeEnabled(pGpu)) + { + pKernelGmmu->bHugePageSupported = NV_FALSE; + pKernelGmmu->bPageSize512mbSupported = NV_FALSE; + } + + // Allocate and init MMU format families. + kgmmuFmtInitPdeApertures_HAL(pKernelGmmu, pKernelGmmu->pdeApertures); + kgmmuFmtInitPteApertures_HAL(pKernelGmmu, pKernelGmmu->pteApertures); + + for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v) + { + const NvU32 ver = g_gmmuFmtVersions[v]; + if (kgmmuFmtIsVersionSupported_HAL(pKernelGmmu, ver)) + { + GMMU_FMT_FAMILY *pFam = NULL; + + // Alloc version struct. + pFam = portMemAllocNonPaged(sizeof(*pFam)); + NV_ASSERT_OR_RETURN((pFam != NULL), NV_ERR_NO_MEMORY); + portMemSet(pFam, 0, sizeof(*pFam)); + pKernelGmmu->pFmtFamilies[v] = pFam; + + // Init PDE/PTE formats. + kgmmuFmtInitPdeMulti_HAL(pKernelGmmu, &pFam->pdeMulti, ver, pKernelGmmu->pdeApertures); + kgmmuFmtInitPde_HAL(pKernelGmmu, &pFam->pde, ver, pKernelGmmu->pdeApertures); + kgmmuFmtInitPte_HAL(pKernelGmmu, &pFam->pte, ver, pKernelGmmu->pteApertures, + gpuIsUnifiedMemorySpaceEnabled(pGpu)); + + kgmmuFmtInitPteComptagLine_HAL(pKernelGmmu, &pFam->pte, ver); + } + else + { + pKernelGmmu->pFmtFamilies[v] = NULL; + } + } + + NV_ASSERT_OK_OR_RETURN(kgmmuFmtInit(pKernelGmmu)); + + portMemSet(&pKernelGmmu->mmuFaultBuffer, 0, sizeof(pKernelGmmu->mmuFaultBuffer)); + + // Default placement for PDEs is in vidmem. + pKernelGmmu->PDEAperture = ADDR_FBMEM; + pKernelGmmu->PDEAttr = NV_MEMORY_WRITECOMBINED; + pKernelGmmu->PDEBAR1Aperture = ADDR_FBMEM; + pKernelGmmu->PDEBAR1Attr = NV_MEMORY_WRITECOMBINED; + + // Default placement for PTEs is in vidmem. + pKernelGmmu->PTEAperture = ADDR_FBMEM; + pKernelGmmu->PTEAttr = NV_MEMORY_WRITECOMBINED; + pKernelGmmu->PTEBAR1Aperture = ADDR_FBMEM; + pKernelGmmu->PTEBAR1Attr = NV_MEMORY_WRITECOMBINED; + + _kgmmuInitRegistryOverrides(pGpu, pKernelGmmu); + + return NV_OK; +} + +static NV_STATUS +_kgmmuInitStaticInfo +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo; + NV_STATUS status; + + // + // On vGPU, all hardware management is done by the host except for full SR-IOV. + // Thus, only do any further HW initialization on the host. + // + if (!(IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)))) + { + // Init HAL specific features. + NV_ASSERT_OK_OR_RETURN(kgmmuFmtFamiliesInit_HAL(pGpu, pKernelGmmu)); + } + + pStaticInfo = portMemAllocNonPaged(sizeof(*pStaticInfo)); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pStaticInfo != NULL, NV_ERR_INSUFFICIENT_RESOURCES); + portMemSet(pStaticInfo, 0, sizeof(*pStaticInfo)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + kgmmuInitStaticInfo_HAL(pGpu, pKernelGmmu, pStaticInfo), + fail); + + pKernelGmmu->pStaticInfo = pStaticInfo; + +fail: + if (status != NV_OK) + { + portMemFree(pStaticInfo); + } + + return status; +} + +/* + * Initialize the Kernel GMMU state. + * + * @param pGpu + * @param pKernelGmmu + */ +NV_STATUS kgmmuStateInitLocked_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + NV_STATUS status; + + if (pKernelBif != NULL) + { + // This value shouldn't change after initialization, so cache it now + pKernelGmmu->sysmemBaseAddress = pKernelBif->dmaWindowStartAddress; + } + + status = _kgmmuInitStaticInfo(pGpu, pKernelGmmu); + + return status; +} + +/* + * State Post Load + */ +NV_STATUS kgmmuStatePostLoad_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 flags +) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = NV_OK; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvBool bComputePeerMode = NV_FALSE; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT) || + kbusIsFlaSupported(pKernelBus)) + { + bComputePeerMode = NV_TRUE; + } + + if (bComputePeerMode) + { + status = kgmmuEnableNvlinkComputePeerAddressing_HAL(pKernelGmmu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to enable GMMU property compute addressing for GPU %x , status:%x\n", + pGpu->gpuInstance, status); + return status; + } + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_COMPUTE_PEER_ADDR, + NULL, 0); + } + return status; +} + +/*! + * KernelGmmu destructor + * + * @param[in] pKernelGmmu KernelGmmu object pointer + */ +void +kgmmuDestruct_IMPL(KernelGmmu *pKernelGmmu) +{ + NvU32 v; + NvU32 b; + + // Free per big page size format and format-family storage. + for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v) + { + if (NULL != pKernelGmmu->pFmtFamilies[v]) + { + for (b = 0; b < GMMU_FMT_MAX_BIG_PAGE_SIZES; ++b) + { + portMemFree(pKernelGmmu->pFmtFamilies[v]->pFmts[b]); + pKernelGmmu->pFmtFamilies[v]->pFmts[b] = NULL; + } + portMemFree(pKernelGmmu->pFmtFamilies[v]); + } + } +} + +void +kgmmuStateDestroy_IMPL(OBJGPU *pGpu, KernelGmmu *pKernelGmmu) +{ + if (NULL != pKernelGmmu->pStaticInfo) + { + portMemFree((void *)pKernelGmmu->pStaticInfo); + pKernelGmmu->pStaticInfo = NULL; + } + if (NULL != pKernelGmmu->pWarSmallPageTable) + { + memdescFree(pKernelGmmu->pWarSmallPageTable); + memdescDestroy(pKernelGmmu->pWarSmallPageTable); + pKernelGmmu->pWarSmallPageTable = NULL; + } + if (NULL != pKernelGmmu->pWarPageDirectory0) + { + memdescFree(pKernelGmmu->pWarPageDirectory0); + memdescDestroy(pKernelGmmu->pWarPageDirectory0); + pKernelGmmu->pWarPageDirectory0 = NULL; + } +} + +/*! + * Initializes KERN_GMMU state based on registry key overrides + * + * @param[in] pGpu + * @param[in] pKernelGmmu + */ +static void +_kgmmuInitRegistryOverrides(OBJGPU *pGpu, KernelGmmu *pKernelGmmu) +{ + NvU32 data; + + memdescOverrideInstLoc(DRF_VAL(_REG_STR_RM, _INST_LOC, _PDE, pGpu->instLocOverrides), + "GMMU PDE", + &pKernelGmmu->PDEAperture, + &pKernelGmmu->PDEAttr); + memdescOverrideInstLoc(DRF_VAL(_REG_STR_RM, _INST_LOC, _BAR_PDE, pGpu->instLocOverrides), + "BAR1 PDE", + &pKernelGmmu->PDEBAR1Aperture, + &pKernelGmmu->PDEBAR1Attr); + memdescOverrideInstLoc(DRF_VAL(_REG_STR_RM, _INST_LOC, _PTE, pGpu->instLocOverrides), + "GMMU PTE", + &pKernelGmmu->PTEAperture, + &pKernelGmmu->PTEAttr); + memdescOverrideInstLoc(DRF_VAL(_REG_STR_RM, _INST_LOC, _BAR_PTE, pGpu->instLocOverrides), + "BAR1 PTE", + &pKernelGmmu->PTEBAR1Aperture, + &pKernelGmmu->PTEBAR1Attr); + + // + // Check if we want to disable big page size per address space + // + pKernelGmmu->bEnablePerVaspaceBigPage = IsGM20X(pGpu); + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_DISABLE_BIG_PAGE_PER_ADDRESS_SPACE, &data)) + { + pKernelGmmu->bEnablePerVaspaceBigPage = !data; + } + + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_FERMI_BIG_PAGE_SIZE, &data)) + { + if (pGpu->optimizeUseCaseOverride != + NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_DEFAULT) + { + NV_PRINTF(LEVEL_ERROR, + "The %s regkey cannot be used with the %s regkey!\n", + NV_REG_STR_FERMI_BIG_PAGE_SIZE, + NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX); + return; + } + else + { + switch (data) + { + case NV_REG_STR_FERMI_BIG_PAGE_SIZE_64KB: + case NV_REG_STR_FERMI_BIG_PAGE_SIZE_128KB: + pKernelGmmu->overrideBigPageSize = data; + break; + default: + break; + } + } + } + else if (pGpu->optimizeUseCaseOverride != + NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_DEFAULT) + { + switch (pGpu->optimizeUseCaseOverride) + { + case NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_SPARSE_TEX: + pKernelGmmu->overrideBigPageSize = RM_PAGE_SIZE_64K; + break; + case NV_REG_STR_RM_OPTIMIZE_COMPUTE_OR_SPARSE_TEX_COMPUTE: + pKernelGmmu->overrideBigPageSize = RM_PAGE_SIZE_128K; + break; + default: + break; + } + } + + // Check if HW fault buffer is disabled + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_DISABLE_HW_FAULT_BUFFER, &data)) + { + NV_PRINTF(LEVEL_ERROR, + "Overriding HW Fault buffer state to 0x%x due to regkey!\n", + data); + pKernelGmmu->setProperty(pKernelGmmu, PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED, data); + } +} + +GMMU_APERTURE +kgmmuGetMemAperture_IMPL +( + KernelGmmu *pKernelGmmu, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_FBMEM: + return GMMU_APERTURE_VIDEO; + case ADDR_SYSMEM: + if (NV_MEMORY_CACHED == memdescGetCpuCacheAttrib(pMemDesc)) + { + return GMMU_APERTURE_SYS_COH; + } + return GMMU_APERTURE_SYS_NONCOH; + default: + NV_ASSERT(0); + return GMMU_APERTURE_INVALID; + } +} + +/*! + * Initialize GMMU format structures dependent on big page size. + */ +NV_STATUS +kgmmuFmtInit_IMPL(KernelGmmu *pKernelGmmu) +{ + NvU32 v; + NvU32 b; + + // Allocate and init MMU formats for the supported big page sizes. + for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v) + { + const NvU32 ver = g_gmmuFmtVersions[v]; + GMMU_FMT_FAMILY *pFam = pKernelGmmu->pFmtFamilies[v]; + if (NULL != pFam) + { + for (b = 0; b < GMMU_FMT_MAX_BIG_PAGE_SIZES; ++b) + { + const NvU32 bigPageShift = g_gmmuFmtBigPageShifts[b]; + + // Allocate +1 level for the last dual-level. + const NvU32 numLevels = GMMU_FMT_MAX_LEVELS + 1; + const NvU32 size = sizeof(GMMU_FMT) + sizeof(MMU_FMT_LEVEL) * numLevels; + MMU_FMT_LEVEL *pLvls; + + // Allocate format and levels in one chunk. + pFam->pFmts[b] = portMemAllocNonPaged(size); + NV_ASSERT_OR_RETURN((pFam->pFmts[b] != NULL), NV_ERR_NO_MEMORY); + portMemSet(pFam->pFmts[b], 0, size); + + // Levels stored contiguously after the format struct. + pLvls = (MMU_FMT_LEVEL *)(pFam->pFmts[b] + 1); + + // Common init. + pFam->pFmts[b]->version = ver; + pFam->pFmts[b]->pRoot = pLvls; + pFam->pFmts[b]->pPdeMulti = &pFam->pdeMulti; + pFam->pFmts[b]->pPde = &pFam->pde; + pFam->pFmts[b]->pPte = &pFam->pte; + + kgmmuFmtInitLevels_HAL(pKernelGmmu, pLvls, numLevels, ver, bigPageShift); + kgmmuFmtInitCaps_HAL(pKernelGmmu, pFam->pFmts[b]); + } + } + } + + return NV_OK; +} + +/*! + * Retrieve GMMU format family based on version. + */ +const GMMU_FMT_FAMILY * +kgmmuFmtGetFamily_IMPL(KernelGmmu *pKernelGmmu, NvU32 version) +{ + NvU32 v; + + // Find a matching format. + for (v = GMMU_FMT_MAX_VERSION_COUNT; v > 0; --v) + { + if (0 == version) + { + // Pick newest default version if none requested. + if (NULL != pKernelGmmu->pFmtFamilies[v - 1]) + { + return pKernelGmmu->pFmtFamilies[v - 1]; + } + } + else if (g_gmmuFmtVersions[v - 1] == version) + { + return pKernelGmmu->pFmtFamilies[v - 1]; + } + } + + return NULL; +} + +/*! + * Returns GMMU settings that are static after GPU state init/load is + * finished. + */ +const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS * +kgmmuGetStaticInfo_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + // check if state Init has not completed. + NV_ASSERT_OR_ELSE(pKernelGmmu != NULL, return NULL); + + return pKernelGmmu->pStaticInfo; +} + +/*! + * @brief Initializes static info data from the Physical side. + * + * @param pGpu + * @param pKernelGmmu + * @param[out] pStaticInfo pointer to the static info init on Physical driver. + */ +NV_STATUS +kgmmuInitStaticInfo_KERNEL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status; + + status = pRmApi->Control(pRmApi, pGpu->hInternalClient, pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GMMU_GET_STATIC_INFO, + pStaticInfo, sizeof(*pStaticInfo)); + + return status; +} + +/*! + * Retrieve GMMU format based on version and big page size. + */ +const GMMU_FMT * +kgmmuFmtGet_IMPL(KernelGmmu *pKernelGmmu, NvU32 version, NvU64 bigPageSize) +{ + const GMMU_FMT_FAMILY *pFmtFamily = kgmmuFmtGetFamily(pKernelGmmu, version); + + if (NULL != pFmtFamily) + { + NvU32 b; + + // Pick default big page size if none requested. + if (0 == bigPageSize) + { + // + // Retrieve Big Page Size. If it is not yet set, set it to 64K. + // Useful when this method is invoked before big page size is set. + // + if (0 == (bigPageSize = kgmmuGetBigPageSize_HAL(pKernelGmmu))) + bigPageSize = NVBIT64(16); + } + + // Find a matching format. + for (b = 0; b < GMMU_FMT_MAX_BIG_PAGE_SIZES; ++b) + { + if (NVBIT64(g_gmmuFmtBigPageShifts[b]) == bigPageSize) + { + return pFmtFamily->pFmts[b]; + } + } + } + + return NULL; +} + +/*! + * Check if a big page size is supported. + */ +NvBool +kgmmuFmtIsBigPageSizeSupported_IMPL(KernelGmmu *pKernelGmmu, NvU64 bigPageSize) +{ + if (kgmmuIsPerVaspaceBigPageEn(pKernelGmmu)) + { + return NV_TRUE; + } + return kgmmuGetBigPageSize_HAL(pKernelGmmu) == bigPageSize; +} + +/*! + * @bried Returns the latest supported MMU fmt. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelGmmu KernelGmmu pointer + * + * @returns const GMMU_FMT* + */ +const GMMU_FMT* +kgmmuFmtGetLatestSupportedFormat_IMPL(OBJGPU *pGpu, KernelGmmu *pKernelGmmu) +{ + NvU32 v; + NvU32 maxFmtVersionSupported = 0; + + for (v = 0; v < GMMU_FMT_MAX_VERSION_COUNT; ++v) + { + const NvU32 ver = g_gmmuFmtVersions[v]; + if (kgmmuFmtIsVersionSupported_HAL(pKernelGmmu, ver)) + { + maxFmtVersionSupported = maxFmtVersionSupported < ver ? ver : maxFmtVersionSupported; + } + } + + return kgmmuFmtGet(pKernelGmmu, maxFmtVersionSupported, 0); +} + +/*! + * @brief Used for calculating total memory required for page tables + required for translating a given VA range. + * + * @param pGpu + * @param pKernelGmmu + * @param[in] pFmt Pointer to GMMU format + * @param[in] vaBase Start VA + * @param[in] vaLimit End VA + * @param[in] pageSizeLockMask Mask of page sizes locked down at VA reservation + * + * @returns total size of page tables. + */ +NvU64 +kgmmuGetSizeOfPageTables_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + const GMMU_FMT *pFmt, + NvU64 vaBase, + NvU64 vaLimit, + NvU64 pageSizeLockMask +) +{ + const MMU_FMT_LEVEL *pPgTbl = NULL; + NvU64 pgTblSize = 0; + NvU64 numPgTblsCeil; + NvU64 numPgTblsFloor; + NvU64 numEntries; + NvU32 pageShift; + + // Loop over all page table sizes in mask + FOR_EACH_INDEX_IN_MASK(64, pageShift, pageSizeLockMask) + { + pPgTbl = mmuFmtFindLevelWithPageShift(pFmt->pRoot, pageShift); + + // + // Do not consider page directories. They are handled by + // @ref kgmmuGetSizeOfPageDirs. + // + if (!pPgTbl->bPageTable || (pPgTbl->numSubLevels != 0)) + { + continue; + } + + numPgTblsCeil = NV_DIV_AND_CEIL(vaLimit, NVBIT64(pPgTbl->virtAddrBitHi + 1)) - + (vaBase / NVBIT64(pPgTbl->virtAddrBitHi + 1)) + 1; + numPgTblsFloor = vaLimit / NVBIT64(pPgTbl->virtAddrBitHi + 1); + + // If full page tables are not used, allocate only as much as needed. + if (numPgTblsFloor == 0) + { + numEntries = mmuFmtVirtAddrToEntryIndex(pPgTbl, vaLimit) - + mmuFmtVirtAddrToEntryIndex(pPgTbl, vaBase) + 1; + pgTblSize += numEntries * pPgTbl->entrySize; + } + else + { + pgTblSize += numPgTblsCeil * mmuFmtLevelSize(pPgTbl); + } + } + FOR_EACH_INDEX_IN_MASK_END + + return pgTblSize; +} + +/*! + * @brief Used for calculating total memory required for page directories + required for translating a given VA range. + * + * @param pGpu + * @param pKernelGmmu + * @param[in] pFmt Pointer to GMMU format + * @param[in] vaBase Start VA + * @param[in] vaLimit End VA + * + * @returns total size of page directories + */ +NvU64 +kgmmuGetSizeOfPageDirs_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + const GMMU_FMT *pFmt, + NvU64 vaBase, + NvU64 vaLimit, + NvU64 pageSizeLockMask +) +{ + const MMU_FMT_LEVEL *pLevel = NULL; + NvU64 size = 0; + NvU16 i; + + NV_ASSERT_OR_RETURN(pFmt != NULL, 0); + + pLevel = pFmt->pRoot; + + // + // Retain only the lowest set bit + // + // If the lowest set bit corresponds to a leaf page table (4K or 64K), we"ll + // calculate memory for all upper level page directories and if the set bit + // corresponds to an upper level page directory we"ll factor in all levels + // from the root upto that level. + // + pageSizeLockMask = pageSizeLockMask & -((NvS64)pageSizeLockMask); + + // Accumulate size for all Page Directories. + for (i = 0; i < GMMU_FMT_MAX_LEVELS - 1; i++) + { + NvU64 vaPerEntry = mmuFmtEntryVirtAddrMask(pLevel) + 1; + NvU64 numEntries = NV_DIV_AND_CEIL(vaLimit, vaPerEntry) - + (vaBase / vaPerEntry) + 1; + NvU64 levelSize = numEntries * pLevel->entrySize; + levelSize = NV_ROUNDUP(levelSize, RM_PAGE_SIZE); + + // Stop accumulating size once we are beyond the specified level. + if (mmuFmtLevelPageSize(pLevel) < pageSizeLockMask) + { + break; + } + + size += levelSize; + + // If there's one sublevel choose that. + if (pLevel->numSubLevels == 1) + { + pLevel = &(pLevel->subLevels[0]); + } + else + { + // Choose the 4K page size sublevel. + pLevel = &(pLevel->subLevels[1]); + } + NV_ASSERT_OR_RETURN(pLevel != NULL, 0); + + // Stop accumulating size if we've exhausted all Page Dirs. + if (pLevel->bPageTable && (pLevel->numSubLevels == 0)) + { + break; + } + } + + return size; +} + +/* + * Fill comptag field in PTE. + */ +void kgmmuFieldSetKindCompTags_IMPL +( + KernelGmmu *pGmmu, + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevel, + const COMPR_INFO *pCompr, + NvU64 physAddr, + NvU64 surfOffset, + NvU32 pteIndex, + NvU8 *pEntries +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pGmmu); + GMMU_COMPR_INFO comprInfo = {0}; + + comprInfo.compressedKind = pCompr->kind; + comprInfo.compPageShift = pCompr->compPageShift; + + if (memmgrIsKind_HAL(GPU_GET_MEMORY_MANAGER(pGpu), FB_IS_KIND_COMPRESSIBLE, pCompr->kind)) + { + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + if (pCompr->bPhysBasedComptags) + { + NvBool bCallingContextPlugin; + + NV_ASSERT(pMemorySystemConfig->bOneToOneComptagLineAllocation || pMemorySystemConfig->bUseRawModeComptaglineAllocation); + + NV_ASSERT_OR_RETURN_VOID(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin) == NV_OK); + if (IS_VIRTUAL_WITH_SRIOV(pGpu) || bCallingContextPlugin || + pMemorySystemConfig->bUseRawModeComptaglineAllocation) + { + // In raw mode or when SR-IOV is enabled, HW handles compression tags + comprInfo.compTagLineMin = 1; + } + else + { + comprInfo.compTagLineMin = memmgrDetermineComptag_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), physAddr); + } + + comprInfo.compPageIndexLo = surfOffset >> pCompr->compPageShift; + comprInfo.compPageIndexHi = (surfOffset + mmuFmtLevelPageSize(pLevel) - 1) >> pCompr->compPageShift; + comprInfo.compTagLineMultiplier = 1; + } + else + { + comprInfo.compPageIndexLo = pCompr->compPageIndexLo; + comprInfo.compPageIndexHi = pCompr->compPageIndexHi; + comprInfo.compTagLineMin = pCompr->compTagLineMin; + comprInfo.compTagLineMultiplier = pCompr->compTagLineMultiplier; + } + } + + gmmuFmtInitPteCompTags(pFmt, pLevel, &comprInfo, surfOffset, pteIndex, 1, pEntries); +} + +NV_STATUS +kgmmuFaultBufferGetAddressSpace_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 index, + NvU32 *pFaultBufferAddrSpace, + NvU32 *pFaultBufferAttr +) +{ + NvU32 faultBufferAddrSpace = ADDR_UNKNOWN; + NvU32 faultBufferAttr = 0; + + NV_ASSERT_OR_RETURN((index < NUM_FAULT_BUFFERS), NV_ERR_INVALID_ARGUMENT); + + if (index == NON_REPLAYABLE_FAULT_BUFFER) + { + faultBufferAddrSpace = ADDR_SYSMEM; + faultBufferAttr = NV_MEMORY_CACHED; + memdescOverrideInstLoc(DRF_VAL(_REG_STR_RM, _INST_LOC_3, _UVM_FAULT_BUFFER_NONREPLAYABLE, pGpu->instLocOverrides3), + "UVM non-replayable fault", &faultBufferAddrSpace, &faultBufferAttr); + } + else if (index == REPLAYABLE_FAULT_BUFFER) + { + faultBufferAddrSpace = ADDR_SYSMEM; + faultBufferAttr = NV_MEMORY_CACHED; + memdescOverrideInstLoc(DRF_VAL(_REG_STR_RM, _INST_LOC_4, _UVM_FAULT_BUFFER_REPLAYABLE, pGpu->instLocOverrides4), + "UVM replayable fault", &faultBufferAddrSpace, &faultBufferAttr); + } + + if (pFaultBufferAddrSpace != NULL) + { + *pFaultBufferAddrSpace = faultBufferAddrSpace; + } + + if (pFaultBufferAttr != NULL) + { + *pFaultBufferAttr = faultBufferAttr; + } + + return NV_OK; +} + +NV_STATUS +kgmmuFaultBufferCreateMemDesc_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 index, + NvU32 faultBufferSize, + NvU64 memDescFlags, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvU32 faultBufferAddrSpace = ADDR_UNKNOWN; + NvU32 faultBufferAttr = 0; + NvBool isContiguous = NV_FALSE; + + NV_ASSERT_OR_RETURN((index < NUM_FAULT_BUFFERS), NV_ERR_INVALID_ARGUMENT); + + status = kgmmuFaultBufferGetAddressSpace(pGpu, pKernelGmmu, index, + &faultBufferAddrSpace, &faultBufferAttr); + if (status != NV_OK) + { + return status; + } + + if (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + // Allocate contiguous fault buffers for SR-IOV Heavy + isContiguous = NV_TRUE; + } + + status = memdescCreate(&pMemDesc, pGpu, + RM_PAGE_ALIGN_UP(faultBufferSize), 0, isContiguous, + faultBufferAddrSpace, faultBufferAttr, + (memDescFlags | MEMDESC_FLAGS_LOST_ON_SUSPEND)); + if (status != NV_OK) + { + return status; + } + + // + // GPU doesn't read faultbuffer memory, so if faultBuffers are in sysmem, ensure that GpuCacheAttr + // is set to UNCACHED as having a vol bit set in PTEs will ensure HUB uses L2Bypass mode and it will + // save extra cycles to cache in L2 while MMU will write fault packets. + // + if (faultBufferAddrSpace == ADDR_SYSMEM && + pKernelGmmu->getProperty(pKernelGmmu, PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED)) + { + memdescSetGpuCacheAttrib(pMemDesc, NV_MEMORY_UNCACHED); + } + + *ppMemDesc = pMemDesc; + + return NV_OK; +} + +NV_STATUS +kgmmuFaultBufferUnregister_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 index +) +{ + struct HW_FAULT_BUFFER *pFaultBuffer; + MEMORY_DESCRIPTOR *pMemDesc; + + pFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[index]; + pMemDesc = pFaultBuffer->pFaultBufferMemDesc; + + pFaultBuffer->faultBufferSize = 0; + pFaultBuffer->pFaultBufferMemDesc = NULL; + + memdescDestroy(pMemDesc); + + return NV_OK; +} + +NV_STATUS +kgmmuFaultBufferAlloc_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 index, + NvU32 faultBufferSize +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + struct HW_FAULT_BUFFER *pFaultBuffer; + + NV_ASSERT_OR_RETURN((index < NUM_FAULT_BUFFERS), NV_ERR_INVALID_ARGUMENT); + + if (pKernelGmmu->getProperty(pKernelGmmu, PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED)) + return NV_OK; + + pFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[index]; + + status = kgmmuFaultBufferCreateMemDesc(pGpu, pKernelGmmu, index, faultBufferSize, + MEMDESC_FLAGS_NONE, &pMemDesc); + if (status != NV_OK) + { + return status; + } + + status = memdescAlloc(pMemDesc); + if (status != NV_OK) + { + memdescDestroy(pMemDesc); + return status; + } + + pFaultBuffer->faultBufferSize = faultBufferSize; + pFaultBuffer->pFaultBufferMemDesc = pMemDesc; + + return status; +} + +NV_STATUS +kgmmuFaultBufferFree_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 index +) +{ + struct HW_FAULT_BUFFER *pFaultBuffer; + + NV_ASSERT_OR_RETURN((index < NUM_FAULT_BUFFERS), NV_ERR_INVALID_ARGUMENT); + + if (pKernelGmmu->getProperty(pKernelGmmu, PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED)) + return NV_OK; + + pFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[index]; + + memdescFree(pFaultBuffer->pFaultBufferMemDesc); + + kgmmuFaultBufferUnregister(pGpu, pKernelGmmu, index); + + return NV_OK; +} + +NV_STATUS +kgmmuFaultBufferReplayableAllocate_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvHandle hClient, + NvHandle hObject +) +{ + NV_STATUS status; + struct HW_FAULT_BUFFER *pFaultBuffer; + NvU32 faultBufferSize; + NvU32 numBufferPages; + const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo = kgmmuGetStaticInfo(pGpu, pKernelGmmu); + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + pKernelGmmu->getProperty(pKernelGmmu, PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED)) + { + return NV_OK; + } + + pFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[REPLAYABLE_FAULT_BUFFER]; + if (pFaultBuffer->pFaultBufferMemDesc != NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + faultBufferSize = pStaticInfo->replayableFaultBufferSize; + + status = kgmmuFaultBufferAlloc(pGpu, pKernelGmmu, + REPLAYABLE_FAULT_BUFFER, + faultBufferSize); + if (status != NV_OK) + { + return status; + } + + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_GMMU_REGISTER_FAULT_BUFFER_PARAMS *pParams; + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + kgmmuFaultBufferFree(pGpu, pKernelGmmu, REPLAYABLE_FAULT_BUFFER); + return NV_ERR_NO_MEMORY; + } + portMemSet(pParams, 0, sizeof(*pParams)); + + numBufferPages = RM_PAGE_ALIGN_UP(faultBufferSize) / RM_PAGE_SIZE; + if (numBufferPages > NV_ARRAY_ELEMENTS(pParams->faultBufferPteArray)) + { + portMemFree(pParams); + kgmmuFaultBufferFree(pGpu, pKernelGmmu, REPLAYABLE_FAULT_BUFFER); + return NV_ERR_BUFFER_TOO_SMALL; + } + + memdescGetPhysAddrs(pFaultBuffer->pFaultBufferMemDesc, + AT_GPU, 0, RM_PAGE_SIZE, + numBufferPages, pParams->faultBufferPteArray); + + pParams->hClient = hClient; + pParams->hObject = hObject; + pParams->faultBufferSize = faultBufferSize; + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_FAULT_BUFFER, + pParams, sizeof(*pParams)); + + portMemFree(pParams); + if (status != NV_OK) + { + kgmmuFaultBufferFree(pGpu, pKernelGmmu, REPLAYABLE_FAULT_BUFFER); + return status; + } + } + + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hFaultBufferClient = hClient; + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hFaultBufferObject = hObject; + + return NV_OK; +} + +NV_STATUS +kgmmuFaultBufferReplayableDestroy_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NV_STATUS status = NV_OK; + struct HW_FAULT_BUFFER *pFaultBuffer; + + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + pKernelGmmu->getProperty(pKernelGmmu, PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED)) + { + return NV_OK; + } + + pFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[REPLAYABLE_FAULT_BUFFER]; + if (pFaultBuffer->pFaultBufferMemDesc == NULL) + { + return NV_OK; + } + + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hFaultBufferClient = 0; + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hFaultBufferObject = 0; + + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_FAULT_BUFFER, + NULL, 0); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unregistering Replayable Fault buffer failed (status=0x%08x), proceeding...\n", + status); + } + } + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + status = kgmmuFaultBufferUnregister(pGpu, pKernelGmmu, REPLAYABLE_FAULT_BUFFER); + } + else + { + status = kgmmuFaultBufferFree(pGpu, pKernelGmmu, REPLAYABLE_FAULT_BUFFER); + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Destroying Replayable Fault buffer failed (status=0x%08x), proceeding...\n", + status); + } + + return NV_OK; +} + +/*! + * @brief: Encodes peer addresses to support NVSwitch systems. + * + * This function prepends the fabricBaseAddress to a physical address in order + * to generate a unique peer address from the global fabric address space. + * + * @param[in] pAddresses : Array of physical addresses to be encoded. + * @param[in] fabricBaseAddress : Unique fabric base address. + * @param[in] count : Count if physical addresses. + */ +static void +_kgmmuEncodePeerAddrs +( + NvU64 *pAddresses, + NvU64 fabricBaseAddress, + NvU64 count +) +{ + NvU64 i; + + // + // If there is no fabric address, it should be a NOP. Note, this acts as an + // early complete path for other PEER addressing. + // + if (fabricBaseAddress == NVLINK_INVALID_FABRIC_ADDR) + { + return; + } + + for (i = 0; i < count; i++) + { + pAddresses[i] = fabricBaseAddress + pAddresses[i]; + } +} + +void +kgmmuEncodePhysAddrs_IMPL +( + KernelGmmu *pKernelGmmu, + const GMMU_APERTURE aperture, + NvU64 *pAddresses, + NvU64 fabricBaseAddress, + NvU64 count +) +{ + NV_ASSERT(aperture != GMMU_APERTURE_INVALID); + + if (aperture == GMMU_APERTURE_SYS_COH || + aperture == GMMU_APERTURE_SYS_NONCOH) + { + kgmmuEncodeSysmemAddrs_HAL(pKernelGmmu, pAddresses, count); + } + else if (aperture == GMMU_APERTURE_PEER) + { + _kgmmuEncodePeerAddrs(pAddresses, fabricBaseAddress, count); + } + else + { + return; + } +} + +NvU64 +kgmmuEncodePhysAddr_IMPL +( + KernelGmmu *pKernelGmmu, + const GMMU_APERTURE aperture, + NvU64 physAddr, + NvU64 fabricBaseAddress +) +{ + kgmmuEncodePhysAddrs(pKernelGmmu, aperture, &physAddr, fabricBaseAddress, 1); + return physAddr; +} + +static void +_kgmmuClientShadowBufferQueueCopyData +( + NvLength msgSize, + NvLength opIdx, + QueueContext *pCtx, + void *pData, + NvLength count, + NvBool bCopyIn +) +{ + NvLength size; + GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer = pCtx->pData; + NvU8 *pQueueData, *pClientData = pData; + void *pDst, *pSrc; + + if (count == 0) + return; + + size = count * msgSize; + pQueueData = KERNEL_POINTER_FROM_NvP64(NvU8 *, pClientShadowFaultBuffer->pBufferAddress); + pQueueData = pQueueData + (opIdx * msgSize); + + pDst = bCopyIn ? pQueueData : pClientData; + pSrc = bCopyIn ? pClientData : pQueueData; + portMemCopy(pDst, size, pSrc, size); +} + +static NV_STATUS +_kgmmuClientShadowFaultBufferQueueAllocate +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NV_STATUS status; + GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer; + MEMORY_DESCRIPTOR *pQueueMemDesc; + + pClientShadowFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].clientShadowFaultBuffer; + + status = memdescCreate(&pQueueMemDesc, pGpu, + sizeof(GMMU_SHADOW_FAULT_BUF), RM_PAGE_SIZE, + NV_TRUE, ADDR_SYSMEM, NV_MEMORY_CACHED, + MEMDESC_FLAGS_NONE); + if (status != NV_OK) + { + return status; + } + + status = memdescAlloc(pQueueMemDesc); + if (status != NV_OK) + { + memdescDestroy(pQueueMemDesc); + return status; + } + + status = memdescMap(pQueueMemDesc, 0, + memdescGetSize(pQueueMemDesc), + NV_TRUE, NV_PROTECT_READ_WRITE, + &pClientShadowFaultBuffer->pQueueAddress, + &pClientShadowFaultBuffer->pQueuePriv); + if (status != NV_OK) + { + memdescFree(pQueueMemDesc); + memdescDestroy(pQueueMemDesc); + return status; + } + + pClientShadowFaultBuffer->queueContext.pCopyData = _kgmmuClientShadowBufferQueueCopyData; + pClientShadowFaultBuffer->queueContext.pData = pClientShadowFaultBuffer; + pClientShadowFaultBuffer->pQueueMemDesc = pQueueMemDesc; + + return NV_OK; +} + +void +kgmmuClientShadowFaultBufferQueueDestroy_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvBool bFreeQueue +) +{ + GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer; + MEMORY_DESCRIPTOR *pQueueMemDesc; + + pClientShadowFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].clientShadowFaultBuffer; + + pQueueMemDesc = pClientShadowFaultBuffer->pQueueMemDesc; + + pClientShadowFaultBuffer->pQueueMemDesc = NULL; + pClientShadowFaultBuffer->pQueueAddress = NvP64_NULL; + pClientShadowFaultBuffer->pQueuePriv = NvP64_NULL; + + if (bFreeQueue) + { + memdescFree(pQueueMemDesc); + } + memdescDestroy(pQueueMemDesc); +} + +static NV_STATUS +_kgmmuClientShadowFaultBufferPagesAllocate +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvU32 shadowFaultBufferSize +) +{ + NV_STATUS status; + GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer; + MEMORY_DESCRIPTOR *pMemDesc; + + pClientShadowFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].clientShadowFaultBuffer; + + shadowFaultBufferSize = RM_PAGE_ALIGN_UP(shadowFaultBufferSize); + + status = memdescCreate(&pMemDesc, pGpu, + shadowFaultBufferSize, RM_PAGE_SIZE, + NV_FALSE, ADDR_SYSMEM, NV_MEMORY_CACHED, + MEMDESC_FLAGS_NONE); + if (status != NV_OK) + { + return status; + } + + status = memdescAlloc(pMemDesc); + if (status != NV_OK) + { + memdescDestroy(pMemDesc); + return status; + } + + status = memdescMap(pMemDesc, 0, + memdescGetSize(pMemDesc), + NV_TRUE, NV_PROTECT_READ_WRITE, + &pClientShadowFaultBuffer->pBufferAddress, + &pClientShadowFaultBuffer->pBufferPriv); + if (status != NV_OK) + { + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + return status; + } + + pClientShadowFaultBuffer->pBufferMemDesc = pMemDesc; + + return NV_OK; +} + +void +kgmmuClientShadowFaultBufferPagesDestroy_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvBool bFreePages +) +{ + MEMORY_DESCRIPTOR *pMemDesc; + GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer; + GMMU_FAULT_BUFFER_PAGE *pBufferPage; + NvU32 i; + + pClientShadowFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].clientShadowFaultBuffer; + pMemDesc = pClientShadowFaultBuffer->pBufferMemDesc; + + if (bFreePages) + { + memdescUnmap(pMemDesc, + NV_TRUE, osGetCurrentProcess(), + pClientShadowFaultBuffer->pBufferAddress, + pClientShadowFaultBuffer->pBufferPriv); + + memdescFree(pMemDesc); + } + else + { + for (i = 0; i < pClientShadowFaultBuffer->numBufferPages; i++) + { + pBufferPage = &pClientShadowFaultBuffer->pBufferPages[i]; + + memdescUnmap(pMemDesc, NV_TRUE, osGetCurrentProcess(), + pBufferPage->pAddress, pBufferPage->pPriv); + } + portMemFree(pClientShadowFaultBuffer->pBufferPages); + } + memdescDestroy(pMemDesc); +} + +NV_STATUS +kgmmuClientShadowFaultBufferRegister_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NV_STATUS status; + struct GMMU_FAULT_BUFFER *pFaultBuffer; + GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer; + GMMU_SHADOW_FAULT_BUF *pQueue; + MEMORY_DESCRIPTOR *pBufferMemDesc; + RmPhysAddr shadowFaultBufferQueuePhysAddr; + NvU32 queueCapacity, numBufferPages; + NvU32 faultBufferSize; + const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo = kgmmuGetStaticInfo(pGpu, pKernelGmmu); + + pFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF]; + pClientShadowFaultBuffer = &pFaultBuffer->clientShadowFaultBuffer; + faultBufferSize = pStaticInfo->nonReplayableFaultBufferSize; + + pQueue = KERNEL_POINTER_FROM_NvP64(GMMU_SHADOW_FAULT_BUF *, + pClientShadowFaultBuffer->pQueueAddress); + queueCapacity = faultBufferSize / NVC369_BUF_SIZE; + + status = queueInitNonManaged(pQueue, queueCapacity); + if (status != NV_OK) + { + return status; + } + + if (!IS_GSP_CLIENT(pGpu)) + { + portSyncSpinlockAcquire(pFaultBuffer->pShadowFaultBufLock); + + if (pFaultBuffer->pClientShadowFaultBuffer == NULL) + { + pFaultBuffer->pClientShadowFaultBuffer = pClientShadowFaultBuffer; + } + else + { + status = NV_ERR_NOT_SUPPORTED; + } + + portSyncSpinlockRelease(pFaultBuffer->pShadowFaultBufLock); + + if (status != NV_OK) + { + queueDestroy(pQueue); + return status; + } + } + else + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER_PARAMS *pParams; + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + queueDestroy(pQueue); + return NV_ERR_NO_MEMORY; + } + portMemSet(pParams, 0, sizeof(*pParams)); + + pBufferMemDesc = pClientShadowFaultBuffer->pBufferMemDesc; + numBufferPages = memdescGetSize(pBufferMemDesc) >> RM_PAGE_SHIFT; + if (numBufferPages > NV_ARRAY_ELEMENTS(pParams->shadowFaultBufferPteArray)) + { + portMemFree(pParams); + queueDestroy(pQueue); + return NV_ERR_BUFFER_TOO_SMALL; + } + + shadowFaultBufferQueuePhysAddr = memdescGetPhysAddr(pClientShadowFaultBuffer->pQueueMemDesc, + AT_GPU, 0); + + memdescGetPhysAddrs(pBufferMemDesc, + AT_GPU, + 0, RM_PAGE_SIZE, + numBufferPages, pParams->shadowFaultBufferPteArray); + + pParams->shadowFaultBufferQueuePhysAddr = shadowFaultBufferQueuePhysAddr; + pParams->shadowFaultBufferSize = faultBufferSize; + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER, + pParams, sizeof(*pParams)); + + portMemFree(pParams); + if (status != NV_OK) + { + queueDestroy(pQueue); + return status; + } + + pFaultBuffer->pClientShadowFaultBuffer = pClientShadowFaultBuffer; + } + + return NV_OK; +} + +void +kgmmuClientShadowFaultBufferUnregister_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NV_STATUS status = NV_OK; + GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer; + GMMU_SHADOW_FAULT_BUF *pQueue; + struct GMMU_FAULT_BUFFER *pFaultBuffer; + + pFaultBuffer = &pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF]; + + if (!IS_GSP_CLIENT(pGpu)) + { + portSyncSpinlockAcquire(pFaultBuffer->pShadowFaultBufLock); + + pFaultBuffer->pClientShadowFaultBuffer = NULL; + + portSyncSpinlockRelease(pFaultBuffer->pShadowFaultBufLock); + } + else + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER, + NULL, 0); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unregistering Replayable Fault buffer failed (status=0x%08x), proceeding...\n", + status); + } + + pFaultBuffer->pClientShadowFaultBuffer = NULL; + } + + pClientShadowFaultBuffer = &pFaultBuffer->clientShadowFaultBuffer; + pQueue = KERNEL_POINTER_FROM_NvP64(GMMU_SHADOW_FAULT_BUF *, + pClientShadowFaultBuffer->pQueueAddress); + queueDestroy(pQueue); +} + +/*! + * @brief Creates shadow fault buffer for client handling of non-replayable + * faults in the CPU-RM, and registers it in the GSP-RM. + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * + * @returns + */ +NV_STATUS +kgmmuClientShadowFaultBufferNonreplayableAllocate_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + NV_STATUS status; + const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo = kgmmuGetStaticInfo(pGpu, pKernelGmmu); + + ct_assert((RM_PAGE_SIZE % sizeof(struct GMMU_FAULT_PACKET)) == 0); + + NV_ASSERT_OR_RETURN(!pKernelGmmu->getProperty(pKernelGmmu, PDB_PROP_KGMMU_FAULT_BUFFER_DISABLED), NV_ERR_INVALID_STATE); + + NV_ASSERT_OR_RETURN(pStaticInfo->nonReplayableFaultBufferSize != 0, NV_ERR_INVALID_STATE); + + status = _kgmmuClientShadowFaultBufferQueueAllocate(pGpu, pKernelGmmu); + if (status != NV_OK) + { + return status; + } + + status = _kgmmuClientShadowFaultBufferPagesAllocate(pGpu, pKernelGmmu, + pStaticInfo->nonReplayableFaultBufferSize); + if (status != NV_OK) + { + goto destroy_queue_and_exit; + } + + status = kgmmuClientShadowFaultBufferRegister(pGpu, pKernelGmmu); + if (status != NV_OK) + { + goto destroy_pages_and_exit; + } + + return NV_OK; + +destroy_pages_and_exit: + kgmmuClientShadowFaultBufferPagesDestroy(pGpu, pKernelGmmu, NV_TRUE); +destroy_queue_and_exit: + kgmmuClientShadowFaultBufferQueueDestroy(pGpu, pKernelGmmu, NV_TRUE); + return status; +} + +/*! + * @brief Unregister client shadow fault buffer in the GSP-RM or destroy + * it in the CPU-RM. + * + * @param[in] pGpu + * @param[in] pKernelGmmu + * + * @returns + */ +NV_STATUS +kgmmuClientShadowFaultBufferNonreplayableDestroy_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu +) +{ + GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer; + NvBool bFreeMemory = !RMCFG_FEATURE_PLATFORM_GSP; + + pClientShadowFaultBuffer = pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].pClientShadowFaultBuffer; + + if (pClientShadowFaultBuffer != NvP64_NULL) + { + kgmmuClientShadowFaultBufferUnregister(pGpu, pKernelGmmu); + + kgmmuClientShadowFaultBufferPagesDestroy(pGpu, pKernelGmmu, bFreeMemory); + kgmmuClientShadowFaultBufferQueueDestroy(pGpu, pKernelGmmu, bFreeMemory); + } + + return NV_OK; +} + +/*! + * Returns the minimum allocation size to align to big-page size in bytes + * + * @param[in] pKernelGmmu + * + * @return NvU32 + */ +NvU32 +kgmmuGetMinBigPageSize_IMPL(KernelGmmu *pKernelGmmu) +{ + // + // Set the minimum size in the heap that we will round up to a big page instead + // just 4KB. HW doesn't like 4KB pages in video memory, but SW wants to pack + // physical memory sometimes. Typically UMDs that really care about perf use + // suballocation for larger RM allocations anyway. + // + // Promote allocates bigger than half the big page size. + // (this is a policy change for Big page sizes/VASpace) + // + return RM_PAGE_SIZE_64K >> 1; +} + +/*! + * @brief Initializes the init block for an engine + * + * @param[in] pKernelGmmu + * @param[in] pInstBlkDesc Memory descriptor for the instance block of the engine + * @param[in] pVAS OBJVASPACE pointer of the engine + * @param[in] subctxId subctxId Value + * @param[in] pInstBlkParams Pointer to the structure storing the parameters passed by the caller + * + * @returns NV_STATUS + */ +NV_STATUS +kgmmuInstBlkInit_IMPL +( + KernelGmmu *pKernelGmmu, + MEMORY_DESCRIPTOR *pInstBlkDesc, + OBJVASPACE *pVAS, + NvU32 subctxId, + INST_BLK_INIT_PARAMS *pInstBlkParams +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelGmmu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU8 *pInstBlk; // CPU VA of instance block. + NvU64 vaLimitData; + NvU32 vaLimitOffset; + NvU32 dirBaseHiOffset; + NvU32 dirBaseHiData; + NvU32 dirBaseLoOffset; + NvU32 dirBaseLoData; + NvU32 atsOffset; + NvU32 atsData; + NV_STATUS status = NV_OK; + + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + // Get VA limit + status = kgmmuInstBlkVaLimitGet_HAL(pKernelGmmu, pVAS, subctxId, pInstBlkParams, &vaLimitOffset, &vaLimitData); + NV_ASSERT_OR_RETURN((status == NV_OK), status); + + // Get page dir base + NV_ASSERT_OK_OR_RETURN(kgmmuInstBlkPageDirBaseGet_HAL(pGpu, pKernelGmmu, + pVAS, pInstBlkParams, subctxId, + &dirBaseLoOffset, &dirBaseLoData, &dirBaseHiOffset, &dirBaseHiData)); + + if ((pVAS != NULL) && vaspaceIsAtsEnabled(pVAS)) + { + // Coherent link ATS parameters are only set on the new VMM path. + status = kgmmuInstBlkAtsGet_HAL(pKernelGmmu, pVAS, subctxId, &atsOffset, &atsData); + NV_ASSERT_OR_RETURN((status == NV_OK), status); + } + else + { + atsOffset = 0; + atsData = 0; + } + // Write the fields out + pInstBlk = pInstBlkParams->pInstBlk; + + if (pInstBlk != NULL) + { + if (vaLimitOffset != 0) + { + // TO DO: FMODEL fails with MEM_WR64 + if (IS_SIMULATION(pGpu)) + { + MEM_WR32(pInstBlk + vaLimitOffset + 0, NvU64_LO32(vaLimitData)); + MEM_WR32(pInstBlk + vaLimitOffset + 4, NvU64_HI32(vaLimitData)); + } + else + { + MEM_WR64(pInstBlk + vaLimitOffset, vaLimitData); + } + } + + MEM_WR32(pInstBlk + dirBaseHiOffset, dirBaseHiData); + MEM_WR32(pInstBlk + dirBaseLoOffset, dirBaseLoData); + + if (atsOffset != 0) + MEM_WR32(pInstBlk + atsOffset, atsData); + } + else + { + pInstBlk = kbusMapRmAperture_HAL(pGpu, pInstBlkDesc); + if (pInstBlk == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (vaLimitOffset != 0) + { + // TO DO: FMODEL fails with MEM_WR64 + if (IS_SIMULATION(pGpu)) + { + MEM_WR32(pInstBlk + vaLimitOffset + 0, NvU64_LO32(vaLimitData)); + MEM_WR32(pInstBlk + vaLimitOffset + 4, NvU64_HI32(vaLimitData)); + } + else + { + MEM_WR64(pInstBlk + vaLimitOffset, vaLimitData); + } + } + + MEM_WR32(pInstBlk + dirBaseHiOffset, dirBaseHiData); + MEM_WR32(pInstBlk + dirBaseLoOffset, dirBaseLoData); + + if (atsOffset != 0) + MEM_WR32(pInstBlk + atsOffset, atsData); + + kbusUnmapRmAperture_HAL(pGpu, pInstBlkDesc, &pInstBlk, NV_FALSE); + } + + if (!pInstBlkParams->bDeferFlush) + { + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_USE_PCIE_READ + | kbusGetFlushAperture(pKernelBus, memdescGetAddressSpace(pInstBlkDesc))); + } + + return status; +} + +GMMU_APERTURE +kgmmuGetExternalAllocAperture_IMPL +( + NvU32 addressSpace +) +{ + switch (addressSpace) + { + case ADDR_FBMEM: + return GMMU_APERTURE_VIDEO; + case ADDR_FABRIC: + case ADDR_FABRIC_V2: + return GMMU_APERTURE_PEER; + case ADDR_SYSMEM: + case ADDR_VIRTUAL: + return GMMU_APERTURE_SYS_COH; + default: + NV_PRINTF(LEVEL_ERROR, "Unexpected addressSpace (%u) when mapping to GMMU_APERTURE.\n", + addressSpace); + NV_ASSERT(0); + return GMMU_APERTURE_SYS_COH; + } +} + +/*! + * @brief + * + * @param pGpu + * @param pKernelGmmu + * @param bOwnedByRm + */ +void +kgmmuAccessCntrChangeIntrOwnership_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + NvBool bOwnedByRm +) +{ + // + // Disable the interrupt when RM loses the ownership and enable it back when + // RM regains it. nvUvmInterfaceOwnAccessCntIntr() will rely on this behavior. + // + if (bOwnedByRm) + pKernelGmmu->uvmSharedIntrRmOwnsMask |= RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY; + else + pKernelGmmu->uvmSharedIntrRmOwnsMask &= ~RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY; +} + +/** + * @brief Provides an opportunity to register some IntrService during intrStateInit. + */ +void +kgmmuRegisterIntrService_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX] +) +{ + NvU32 engineIdx; + + static NvU16 engineIdxList[] = { + MC_ENGINE_IDX_REPLAYABLE_FAULT, + MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR, + }; + + for (NvU32 tableIdx = 0; tableIdx < NV_ARRAY_ELEMENTS32(engineIdxList); tableIdx++) + { + engineIdx = engineIdxList[tableIdx]; + NV_ASSERT(pRecords[engineIdx].pInterruptService == NULL); + pRecords[engineIdx].pInterruptService = staticCast(pKernelGmmu, IntrService); + } +} + +/** + * @brief Service stall interrupts. + * + * @returns Zero, or any implementation-chosen nonzero value. If the same nonzero value is returned enough + * times the interrupt is considered stuck. + */ +NvU32 +kgmmuServiceInterrupt_IMPL +( + OBJGPU *pGpu, + KernelGmmu *pKernelGmmu, + IntrServiceServiceInterruptArguments *pParams +) +{ + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pParams != NULL, 0); + + switch (pParams->engineIdx) + { + case MC_ENGINE_IDX_REPLAYABLE_FAULT: + { + NV_STATUS status = kgmmuServiceReplayableFault_HAL(pGpu, pKernelGmmu); + if (status != NV_OK) + { + NV_ASSERT_OK_FAILED("Failed to service replayable MMU fault error", + status); + } + break; + } + case MC_ENGINE_IDX_REPLAYABLE_FAULT_ERROR: + { + status = kgmmuReportFaultBufferOverflow_HAL(pGpu, pKernelGmmu); + if (status != NV_OK) + { + NV_ASSERT_OK_FAILED( + "Failed to report replayable MMU fault buffer overflow error", + status); + } + break; + } + default: + { + NV_ASSERT_FAILED("Invalid engineIdx"); + break; + } + } + + return 0; +} + +/*! + * @brief Extract the PTE FIELDS from the PTE and + * set the corresponding flags/fields in pParams. + * + * @param[in] pKernelGmmu + * @param[in] pPte Pointer to the PTE contents + * @param[out] pPteInfo Pointer to the PTE info structure + * @param[in] pFmt NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK pointer to cmd params + * @param[in] pLevelFmt Format of the level + * + * + * @returns none + */ + +void +kgmmuExtractPteInfo_IMPL +( + KernelGmmu *pKernelGmmu, + GMMU_ENTRY_VALUE *pPte, + NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *pPteInfo, + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevelFmt +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelGmmu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + const GMMU_FMT_PTE *pFmtPte = pFmt->pPte; + NvBool bPteValid; + + bPteValid = nvFieldGetBool(&pFmtPte->fldValid, pPte->v8); + + pPteInfo->pteFlags = FLD_SET_DRF_NUM(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_VALID, + bPteValid, pPteInfo->pteFlags); + + switch (gmmuFieldGetAperture(&pFmtPte->fldAperture, pPte->v8)) + { + case GMMU_APERTURE_VIDEO: + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_APERTURE, + _VIDEO_MEMORY, pPteInfo->pteFlags); + break; + case GMMU_APERTURE_PEER: + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_APERTURE, + _PEER_MEMORY, pPteInfo->pteFlags); + break; + case GMMU_APERTURE_SYS_COH: + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_APERTURE, + _SYSTEM_COHERENT_MEMORY, pPteInfo->pteFlags); + break; + case GMMU_APERTURE_SYS_NONCOH: + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_APERTURE, + _SYSTEM_NON_COHERENT_MEMORY, pPteInfo->pteFlags); + break; + case GMMU_APERTURE_INVALID: + default: + NV_ASSERT(0); + break; + } + + { + pPteInfo->pteFlags = FLD_SET_DRF_NUM(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_GPU_CACHED, + !nvFieldGetBool(&pFmtPte->fldVolatile, pPte->v8), pPteInfo->pteFlags); + + if (nvFieldIsValid32(&pFmtPte->fldReadDisable.desc) && + nvFieldIsValid32(&pFmtPte->fldWriteDisable.desc)) + { + if (nvFieldGetBool(&pFmtPte->fldWriteDisable, pPte->v8)) + { + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, + _PARAMS_FLAGS_SHADER_ACCESS, _READ_ONLY, pPteInfo->pteFlags); + } + else if (nvFieldGetBool(&pFmtPte->fldReadDisable, pPte->v8)) + { + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, + _PARAMS_FLAGS_SHADER_ACCESS, _WRITE_ONLY, pPteInfo->pteFlags); + } + else + { + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, + _PARAMS_FLAGS_SHADER_ACCESS, _READ_WRITE, pPteInfo->pteFlags); + } + } + else + { + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_SHADER_ACCESS, + _NOT_SUPPORTED, pPteInfo->pteFlags); + } + + pPteInfo->pteFlags = FLD_SET_DRF_NUM(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_READ_ONLY, + nvFieldGetBool(&pFmtPte->fldReadOnly, pPte->v8), pPteInfo->pteFlags); + + // Get comptagline + pPteInfo->comptagLine = nvFieldGet32(&pFmtPte->fldCompTagLine, pPte->v8); + } + + // Get kind + pPteInfo->kind = nvFieldGet32(&pFmtPte->fldKind, pPte->v8); + + // + // Decode the comptags value from kind. GF100 only supports 2 bits per rop tile, + // but future chips will use the other layouts. + // + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE_1, pPteInfo->kind)) + { + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_COMPTAGS, _1, pPteInfo->pteFlags); + } + else if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE_2, pPteInfo->kind)) + { + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_COMPTAGS, _2, pPteInfo->pteFlags); + } + else if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE_4, pPteInfo->kind)) + { + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_COMPTAGS, _4, pPteInfo->pteFlags); + } + else + { + pPteInfo->pteFlags = FLD_SET_DRF(0080_CTRL, _DMA_PTE_INFO, _PARAMS_FLAGS_COMPTAGS, _NONE, pPteInfo->pteFlags); + } +} diff --git a/src/nvidia/src/kernel/gpu/mmu/mmu_fault_buffer.c b/src/nvidia/src/kernel/gpu/mmu/mmu_fault_buffer.c new file mode 100644 index 000000000..f7b5ef303 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/mmu_fault_buffer.c @@ -0,0 +1,199 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/mmu/mmu_fault_buffer.h" +#include "gpu/device/device.h" +#include "rmapi/client.h" + +NV_STATUS +faultbufConstruct_IMPL +( + MmuFaultBuffer *pMmuFaultBuffer, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pMmuFaultBuffer); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvHandle hFaultBufferClient = pCallContext->pClient->hClient; + NvHandle hFaultBufferObject = pCallContext->pResourceRef->hResource; + RmClient *pRmClient = dynamicCast(pCallContext->pClient, RmClient); + + NV_ASSERT_OR_RETURN(pRmClient != NULL, NV_ERR_INVALID_CLIENT); + + if (!gpuIsClassSupported(pGpu, pParams->externalClassId)) + { + NV_PRINTF(LEVEL_ERROR, "class %x not supported\n", + pParams->externalClassId); + return NV_ERR_INVALID_CLASS; + } + + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + { + NV_PRINTF(LEVEL_ERROR, "Client is not privileged\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + NV_ASSERT_OR_RETURN(pKernelGmmu != NULL, NV_ERR_NOT_SUPPORTED); + + status = kgmmuFaultBufferReplayableAllocate(pGpu, pKernelGmmu, + hFaultBufferClient, + hFaultBufferObject); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to setup Replayable Fault buffer (status=0x%08x).\n", + status); + return status; + } + + return NV_OK; +} + +void +faultbufDestruct_IMPL +( + MmuFaultBuffer *pMmuFaultBuffer +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pMmuFaultBuffer); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + (void) kgmmuFaultBufferReplayableDestroy(pGpu, pKernelGmmu); +} + +NV_STATUS +faultbufMap_IMPL +( + MmuFaultBuffer *pMmuFaultBuffer, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + // This only maps the replayable fault buffer + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + OBJGPU *pGpu; + KernelGmmu *pKernelGmmu; + NV_STATUS rmStatus = NV_OK; + NvBool bBroadcast = NV_TRUE; + NvBool bKernel; + + pGpu = CliGetGpuFromContext(pCpuMapping->pContextRef, &bBroadcast); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + gpuSetThreadBcState(pGpu, bBroadcast); + + pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + NV_ASSERT_OR_RETURN(pKernelGmmu != NULL, NV_ERR_INVALID_ARGUMENT); + + rmStatus = rmapiValidateKernelMapping(rmclientGetCachedPrivilege(pClient), + pCpuMapping->flags, + &bKernel); + if (rmStatus != NV_OK) + return rmStatus; + + pCpuMapping->processId = osGetCurrentProcess(); + + // Map entire buffer (no offsets supported) + rmStatus = memdescMap(pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[REPLAYABLE_FAULT_BUFFER].pFaultBufferMemDesc, + 0, + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[REPLAYABLE_FAULT_BUFFER].pFaultBufferMemDesc->Size, + bKernel, + pCpuMapping->pPrivate->protect, + &pCpuMapping->pLinearAddress, + &pCpuMapping->pPrivate->pPriv); + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[REPLAYABLE_FAULT_BUFFER].hCpuFaultBuffer = pCpuMapping->pPrivate->pPriv; + + return rmStatus; +} + +NV_STATUS +faultbufUnmap_IMPL +( + MmuFaultBuffer *pMmuFaultBuffer, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + // This only unmaps the replayable fault buffer + + NV_STATUS rmStatus; + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + OBJGPU *pGpu; + KernelGmmu *pKernelGmmu; + NvBool bBroadcast = NV_TRUE; + NvBool bKernel; + + pGpu = CliGetGpuFromContext(pCpuMapping->pContextRef, &bBroadcast); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + gpuSetThreadBcState(pGpu, bBroadcast); + + pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NV_ASSERT_OR_RETURN(pKernelGmmu != NULL, NV_ERR_INVALID_ARGUMENT); + + rmStatus = rmapiValidateKernelMapping(rmclientGetCachedPrivilege(pClient), + pCpuMapping->flags, + &bKernel); + if (rmStatus != NV_OK) + return rmStatus; + + // Unmap it + memdescUnmap(pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[REPLAYABLE_FAULT_BUFFER].pFaultBufferMemDesc, + bKernel, + pCpuMapping->processId, + pCpuMapping->pLinearAddress, + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[REPLAYABLE_FAULT_BUFFER].hCpuFaultBuffer); + + return NV_OK; +} + +NV_STATUS +faultbufGetMapAddrSpace_IMPL +( + MmuFaultBuffer *pMmuFaultBuffer, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pMmuFaultBuffer); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NV_ADDRESS_SPACE addrSpace; + PMEMORY_DESCRIPTOR pMemDesc; + + pMemDesc = pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].hwFaultBuffers[REPLAYABLE_FAULT_BUFFER].pFaultBufferMemDesc; + if (pMemDesc == NULL) + return NV_ERR_INVALID_OBJECT; + + NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, pMemDesc, mapFlags, &addrSpace)); + if (pAddrSpace) + *pAddrSpace = addrSpace; + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/mmu/mmu_fault_buffer_ctrl.c b/src/nvidia/src/kernel/gpu/mmu/mmu_fault_buffer_ctrl.c new file mode 100644 index 000000000..cc5e27da7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/mmu_fault_buffer_ctrl.c @@ -0,0 +1,109 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" + +#include "gpu/mmu/kern_gmmu.h" +#include "rmapi/control.h" + +#include "ctrl/ctrlc369.h" +#include "gpu/mmu/mmu_fault_buffer.h" + +NV_STATUS +faultbufCtrlCmdMmuFaultBufferRegisterNonReplayBuf_IMPL +( + MmuFaultBuffer *pMmuFaultBuffer, + NVC369_CTRL_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF_PARAMS *pParams +) +{ + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pMmuFaultBuffer); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + const NV2080_CTRL_INTERNAL_GMMU_GET_STATIC_INFO_PARAMS *pStaticInfo = kgmmuGetStaticInfo(pGpu, pKernelGmmu); + GMMU_CLIENT_SHADOW_FAULT_BUFFER *pClientShadowFaultBuffer; + + pClientShadowFaultBuffer = pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].pClientShadowFaultBuffer; + + // This call takes GpuLock, so shadowBuffer pointers can be accessed without grabbing lock protecting them + if (pClientShadowFaultBuffer) + { + NV_PRINTF(LEVEL_ERROR, + "Client shadow fault buffer for non-replayable faults already allocated\n"); + NV_ASSERT(0); + + return NV_ERR_NOT_SUPPORTED; + } + + status = kgmmuClientShadowFaultBufferAlloc_HAL(pGpu, pKernelGmmu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Error allocating client shadow fault buffer for non-replayable faults\n"); + + return status; + } + + pClientShadowFaultBuffer = pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].pClientShadowFaultBuffer; + + pParams->pShadowBuffer = pClientShadowFaultBuffer->pQueueAddress; + pParams->pShadowBufferContext = (NvP64) &pClientShadowFaultBuffer->queueContext; + pParams->bufferSize = pStaticInfo->nonReplayableFaultBufferSize; + + return NV_OK; +} + +NV_STATUS +faultbufCtrlCmdMmuFaultBufferUnregisterNonReplayBuf_IMPL +( + MmuFaultBuffer *pMmuFaultBuffer, + NVC369_CTRL_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF_PARAMS *pParams +) +{ + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pMmuFaultBuffer); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + // This call takes GpuLock, so shadowBuffer pointers can be accessed without grabbing lock protecting them + if (!pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].pClientShadowFaultBuffer) + { + NV_PRINTF(LEVEL_ERROR, + "Client shadow fault buffer for non-replayable faults does not exist\n"); + return NV_OK; + } + + if (pParams->pShadowBuffer != + pKernelGmmu->mmuFaultBuffer[GPU_GFID_PF].pClientShadowFaultBuffer->pQueueAddress) + { + NV_PRINTF(LEVEL_ERROR, + "Given client shadow fault buffer for non-replayable faults does not " + "match with the actual\n"); + } + + status = kgmmuClientShadowFaultBufferFree_HAL(pGpu, pKernelGmmu); + if (status != NV_OK) { + NV_PRINTF(LEVEL_ERROR, + "Error freeing client shadow fault buffer for non-replayable faults\n"); + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/mmu/mmu_trace.c b/src/nvidia/src/kernel/gpu/mmu/mmu_trace.c new file mode 100644 index 000000000..a46bb718a --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/mmu_trace.c @@ -0,0 +1,823 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* Memory Manager Object Function Definitions. * +* * +\***************************************************************************/ + +#include "gpu/mmu/mmu_trace.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "mmu/gmmu_fmt.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + +#include "os/os.h" +#include "mem_mgr/gpu_vaspace.h" + +#define MMU_TRACE_INDENT(level) \ + do { \ + NvU32 _level; \ + for (_level = 0; _level < level + 1; _level++) \ + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, " "); \ + } while (0) + +typedef struct +{ + const void *pFmt; + const MMU_FMT_LEVEL *pFmtRoot; + const MMU_TRACE_CALLBACKS *pTraceCb; +} MMU_LAYOUT, *PMMU_LAYOUT; + +typedef struct { + NvU64 va; + NvU64 vaLimit; + NvU32 index; + NvU32 indexLimit; + NvBool bInvalid; +} MMU_INVALID_RANGE, *PMMU_INVALID_RANGE; + + +typedef NV_STATUS (*MmuTraceCbPte) (OBJGPU *pGpu, const MMU_TRACE_CALLBACKS *pTraceCb, NvU64 va, + const MMU_FMT_LEVEL *pFmtLevel, const void *pFmtPte, + const MMU_ENTRY *pPte, void *pArg, NvBool valid, NvBool *pDone); + +typedef NV_STATUS (*MmuTraceCbPde) (OBJGPU *pGpu, const MMU_TRACE_CALLBACKS *pTraceCb, NvU64 va, + NvU64 vaLimit, NvU32 level, NvU32 index, + const MMU_FMT_LEVEL *pFmtLevel, const void *pFmtPde, const MMU_ENTRY *pPde, + NvBool valid, NvBool *pDone, void *pArg); + +typedef NV_STATUS (*MmuTraceCbTranslate) (OBJGPU *pGpu, const MMU_TRACE_CALLBACKS *pTraceCb, + const MMU_FMT_LEVEL *pFmtLevel, const void *pFmtPte, + const MMU_ENTRY *pPte, void *pArg, NvU64 va, NvBool valid, + NvBool *pDone); + +typedef NV_STATUS (*MmuTraceCbDumpMapping) (OBJGPU *pGpu, const MMU_TRACE_CALLBACKS *pTraceCb, + const MMU_FMT_LEVEL *pFmtLevel, const void *pFmtPte, + const MMU_ENTRY *pPte, void *pArg, NvU64 va, NvU64 vaLimit, + NvBool valid, NvBool *pDone); + +typedef NV_STATUS (*MmuTraceCbValidate) (NvBool valid, void *pArg, NvU64 entryVa, NvU64 entryVaLimit, + NvBool *pDone); + +typedef struct { + MmuTraceCbPte pteFunc; + MmuTraceCbTranslate translateFunc; + MmuTraceCbDumpMapping dumpMappingFunc; + MmuTraceCbValidate validateFunc; + MmuTraceCbPde pdeFunc; + NvU64 vaArg; + PMMU_TRACE_ARG pArg; +} MMU_TRACE_INFO, *PMMU_TRACE_INFO; + +static void _mmuInitLayout (OBJGPU *pGpu, OBJVASPACE *pVAS, PMMU_LAYOUT pLayout); + +static NV_STATUS _mmuTraceWalk(OBJGPU *pGpu, PMMU_LAYOUT pLayout, MMU_WALK *pWalk, NvU32 level, const MMU_FMT_LEVEL *pFmtLevel, + PMEMORY_DESCRIPTOR pMemDesc, NvU64 va, NvU64 vaLimit, PMMU_TRACE_INFO pInfo, + NvBool *pDone, NvBool verbose); + +static NV_STATUS _mmuTracePteCallback(OBJGPU *pGpu, const MMU_TRACE_CALLBACKS *pTraceCb, NvU64 va, + const MMU_FMT_LEVEL *pFmtLevel, const void *pFmtPte, + const MMU_ENTRY *pPte, void *pArg, NvBool valid, NvBool *pDone); + +static NV_STATUS _mmuTraceTranslateCallback(OBJGPU *Gpu, const MMU_TRACE_CALLBACKS *pTraceCb, + const MMU_FMT_LEVEL *pFmtLevel, const void *pFmtPte, + const MMU_ENTRY *pPte, void *pArg, NvU64 va, NvBool valid, + NvBool *pDone); + +static NV_STATUS _mmuTraceDumpMappingCallback(OBJGPU *pGpu, const MMU_TRACE_CALLBACKS *pTraceCb, + const MMU_FMT_LEVEL *pFmtLevel, const void *pFmtPte, + const MMU_ENTRY *pPte, void *pArg, NvU64 va, NvU64 vaLimit, + NvBool valid, NvBool *pDone); + +static NV_STATUS _mmuTraceValidateCallback(NvBool valid, void *pArg, NvU64 entryVa, NvU64 entryVaLimit, + NvBool *pDone); + +static NV_STATUS +mmuTraceWalk +( + OBJGPU *pGpu, + OBJVASPACE *pVAS, + NvU64 va, + NvU64 vaLimit, + PMMU_TRACE_INFO pInfo, + NvBool verbose +) +{ + PMEMORY_DESCRIPTOR pPDB = vaspaceGetPageDirBase(pVAS, pGpu); + OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE); + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + NvBool done = NV_FALSE; + MMU_LAYOUT layout; + MMU_WALK *pWalk = pGpuState->pWalk; + + NV_ASSERT(pPDB); + _mmuInitLayout(pGpu, pVAS, &layout); + + if (verbose) + { + layout.pTraceCb->printPdb(pGpu, pVAS, va, vaLimit); + } + + return _mmuTraceWalk(pGpu, &layout, pWalk, 0, layout.pFmtRoot, pPDB, + va, vaLimit, pInfo, &done, verbose); +} + +/*! + * @brief Translates virtual addresses to physical addresses. Has additional + * functionality when specifying the mode in pParams + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pVAS OBJVASPACE pointer + * @param[in/out] pParams Input parameters as well as pArg, which is where this + * function will write all outputs. + * + * @returns NV_STATUS status = NV_OK on success, or status upon failure. + */ +NV_STATUS +mmuTrace +( + OBJGPU *pGpu, + OBJVASPACE *pVAS, + PMMU_TRACE_PARAM pParams +) +{ + PMEMORY_DESCRIPTOR pPDB; + MMU_TRACE_INFO info = {0}; + NV_STATUS status; + NvBool verbose; + NvBool modeValid; + MMU_TRACE_MODE traceMode; + + if (RMCFG_FEATURE_PLATFORM_GSP) + { + // + // All client vaspace is managed by CPU-RM, so MMU_TRACER is not needed + // in GSP-RM + // + return NV_ERR_NOT_SUPPORTED; + } + + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVAS != NULL, NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OR_RETURN(pParams != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pParams->pArg != NULL, NV_ERR_INVALID_ARGUMENT); + + pPDB = vaspaceGetPageDirBase(pVAS, pGpu); + NV_ASSERT_OR_RETURN(pPDB != NULL, NV_ERR_INVALID_ARGUMENT); + + verbose = (pParams->mode == MMU_TRACE_MODE_TRACE_VERBOSE); + + pParams->pArg->pa = MMU_INVALID_ADDR; + pParams->pArg->aperture = ADDR_UNKNOWN; + + info.pArg = pParams->pArg; + + modeValid = NV_TRUE; + traceMode = pParams->mode; + + switch (traceMode) { + case MMU_TRACE_MODE_TRACE: + case MMU_TRACE_MODE_TRACE_VERBOSE: + info.pteFunc = _mmuTracePteCallback; + break; + + case MMU_TRACE_MODE_TRANSLATE: + info.translateFunc = _mmuTraceTranslateCallback; + info.vaArg = pParams->va; + pParams->pArg->valid = NV_FALSE; + break; + + case MMU_TRACE_MODE_VALIDATE: + info.validateFunc = _mmuTraceValidateCallback; + pParams->pArg->valid = NV_FALSE; + pParams->pArg->validateCount = pParams->vaLimit - pParams->va + 1; + break; + + case MMU_TRACE_MODE_DUMP_RANGE: + info.pArg->pMapParams->count = 0; + info.pArg->pMapParams->hasMore = 0; + info.dumpMappingFunc = _mmuTraceDumpMappingCallback; + break; + + default: + modeValid = NV_FALSE; + break; + } + + NV_ASSERT_OR_RETURN(modeValid, NV_ERR_INVALID_ARGUMENT); + status = mmuTraceWalk(pGpu, pVAS, pParams->va, pParams->vaLimit, &info, verbose); + + // If translate mode but a translate never happened, return as an error. + if (traceMode == MMU_TRACE_MODE_TRANSLATE && !pParams->pArg->valid) + { + return NV_ERR_INVALID_XLATE; + } + + return status; +} + +static void +_mmuInitLayout +( + OBJGPU *pGpu, + OBJVASPACE *pVAS, + PMMU_LAYOUT pLayout +) +{ + OBJGVASPACE *pGVAS = dynamicCast(pVAS, OBJGVASPACE); + const GMMU_FMT *pGmmuFmt; + + ct_assert(sizeof(GMMU_ENTRY_VALUE) <= sizeof(MMU_ENTRY)); + + NV_ASSERT(pGVAS); // Only valid for gvaspaces as of now + + pGmmuFmt = gvaspaceGetGmmuFmt(pGVAS, pGpu); + NV_ASSERT(pGmmuFmt); + + pLayout->pFmt = pGmmuFmt; + pLayout->pFmtRoot = pGmmuFmt->pRoot; + pLayout->pTraceCb = &g_gmmuTraceCallbacks; +} + + +static void +_mmuPrintPte +( + OBJGPU *pGpu, + const MMU_TRACE_CALLBACKS *pTraceCb, + NvU64 va, + NvU64 vaLimit, + NvU32 level, + NvU32 index, + const MMU_FMT_LEVEL *pFmtLevel, + const void *pFmtPte, + const MMU_ENTRY *pPte, + NvBool valid, + PMMU_INVALID_RANGE pRange, + NvBool verbose +) +{ + NvU64 pageSize; + + if (!verbose) + { + return; + } + + pageSize = mmuFmtLevelPageSize(pFmtLevel); + if ((RM_PAGE_SIZE_HUGE != pageSize) && + (RM_PAGE_SIZE_512M != pageSize)) + { + level++; // Indent one more level for PTE + } + + if (pRange->bInvalid) + { + NV_PRINTF(LEVEL_INFO, "MMUTRACE: VA[0x%08llx-%08llx]", pRange->va, + pRange->vaLimit); + MMU_TRACE_INDENT(level); + + switch (pageSize) + { + case RM_PAGE_SIZE_512M: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE_512M"); + break; + case RM_PAGE_SIZE_HUGE: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE_2M"); + break; + case RM_PAGE_SIZE_128K: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE_128K"); + break; + case RM_PAGE_SIZE_64K: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE_64K"); + break; + case RM_PAGE_SIZE: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE_4K"); + break; + default: + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PTE"); + NV_ASSERT(0); + break; + } + + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "[0x%x", pRange->index); + if (pRange->index != pRange->indexLimit) + { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "-%x", pRange->indexLimit); + } + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "]: invalid\n"); + } + + if (valid) + { + NV_PRINTF(LEVEL_INFO, "MMUTRACE: VA[0x%08llx-%08llx]", va, vaLimit); + MMU_TRACE_INDENT(level); + pTraceCb->printPte(pGpu, pFmtLevel, pFmtPte, pPte, index); + } +} + +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) +static void +_mmuPrintPt +( + OBJGPU *pGpu, + PMMU_LAYOUT pLayout, + NvU64 va, + NvU64 vaLimit, + const MMU_FMT_LEVEL *pFmtLevel, + NvU32 level, + NvU32 subLevel, + const MMU_ENTRY *pPde +) +{ + const void *pFmt = pLayout->pFmt; + const MMU_TRACE_CALLBACKS *pTraceCb = pLayout->pTraceCb; + const MMU_FMT_LEVEL *pFmtSub = &pFmtLevel->subLevels[subLevel]; + const void *pFmtPde = pTraceCb->getFmtPde(pFmt, pFmtLevel, subLevel); + + NV_PRINTF(LEVEL_INFO, "MMUTRACE: VA[0x%08llx-%08llx]", va, vaLimit); + MMU_TRACE_INDENT(level+1); + pTraceCb->printPt(pGpu, pFmtSub, pFmtPde, pPde); +} +#endif + +static void +_mmuPrintPdeInvalid +( + PMMU_LAYOUT pLayout, + NvU32 level, + PMMU_INVALID_RANGE pRange, + NvBool verbose +) +{ +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) + const void *pFmt = pLayout->pFmt; + const MMU_TRACE_CALLBACKS *pTraceCb = pLayout->pTraceCb; + NvU32 hwLevel = pTraceCb->swToHwLevel(pFmt, level); + + if (!verbose) + { + return; + } + + NV_PRINTF(LEVEL_INFO, "MMUTRACE: VA[0x%08llx-%08llx]", pRange->va, + pRange->vaLimit); + MMU_TRACE_INDENT(level); + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PDE%u[0x%x", hwLevel, pRange->index); + if (pRange->index != pRange->indexLimit) + { + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "-%03x", pRange->indexLimit); + } + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "]: invalid\n"); +#endif +} + +static void +_mmuPrintPdeValid +( + OBJGPU *pGpu, + PMMU_LAYOUT pLayout, + NvU64 va, + NvU64 vaLimit, + NvU32 level, + NvU32 subLevel, + NvU32 index, + const MMU_FMT_LEVEL *pFmtLevel, + const MMU_ENTRY *pPde, + PMMU_INVALID_RANGE pRange, + NvBool verbose +) +{ +#if NV_PRINTF_LEVEL_ENABLED(LEVEL_INFO) + const void *pFmt = pLayout->pFmt; + const MMU_TRACE_CALLBACKS *pTraceCb = pLayout->pTraceCb; + NvU32 hwLevel = pTraceCb->swToHwLevel(pFmt, level); + + if (!verbose) + { + return; + } + + if (pRange->bInvalid) + { + _mmuPrintPdeInvalid(pLayout, level, pRange, NV_TRUE); + } + + NV_PRINTF(LEVEL_INFO, "MMUTRACE: VA[0x%08llx-%08llx]", va, vaLimit); + MMU_TRACE_INDENT(level); + NV_PRINTF_EX(NV_PRINTF_MODULE, LEVEL_INFO, "PDE%u[0x%x]: ", hwLevel, index); + pTraceCb->printPde(pGpu, pFmt, pFmtLevel, pPde); + + // Assuming only PTs result in multiple sublevels + if (pFmtLevel->numSubLevels > 1) + { + NvU32 i; + + for (i = 0; i < pFmtLevel->numSubLevels; i++) + { + _mmuPrintPt(pGpu, pLayout, va, vaLimit, pFmtLevel, level, i, pPde); + } + } +#endif +} + +static NV_STATUS +_mmuTraceWalk +( + OBJGPU *pGpu, + PMMU_LAYOUT pLayout, + MMU_WALK *pWalk, + NvU32 level, + const MMU_FMT_LEVEL *pFmtLevel, + PMEMORY_DESCRIPTOR pMemDesc, + NvU64 va, + NvU64 vaLimit, + PMMU_TRACE_INFO pInfo, + NvBool *pDone, + NvBool verbose +) +{ + const void *pFmt = pLayout->pFmt; + const MMU_TRACE_CALLBACKS *pTraceCb = pLayout->pTraceCb; + NvU32 index = mmuFmtVirtAddrToEntryIndex(pFmtLevel, va); + NvU64 offset = index * pFmtLevel->entrySize; + NV_STATUS status = NV_OK; + MMU_INVALID_RANGE invalidRange = {0}; + NvU64 entryVa = va; + NvBool isPt = NV_FALSE; + NvU8 *pBase = NULL; + MEMORY_DESCRIPTOR *pTempMemDesc = NULL; + + if (pMemDesc == NULL) + { + return NV_OK; + } + + pBase = kbusMapRmAperture_HAL(pGpu, pMemDesc); + if (pBase == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + while (entryVa <= vaLimit && index < mmuFmtLevelEntryCount(pFmtLevel)) + { + // + // Determine the highest address that this entry covers. Check if + // our vaLimit actually covers this entire page or not. + // + NvU64 entryVaLevelLimit = entryVa | mmuFmtEntryVirtAddrMask(pFmtLevel); + NvU64 entryVaLimit = NV_MIN(entryVaLevelLimit, vaLimit); + NvBool valid = NV_FALSE; + MMU_ENTRY entry; + NvU32 i; + NV_ASSERT((offset + pFmtLevel->entrySize) <= pMemDesc->Size); + + portMemCopy(&entry, pFmtLevel->entrySize, pBase + offset, pFmtLevel->entrySize); + if (pTraceCb->isPte(pFmt, pFmtLevel, &entry, &valid)) + { + NvU64 vaArg = pInfo->vaArg; + const void *pFmtPte = pTraceCb->getFmtPte(pFmt); + isPt = NV_TRUE; + + if (pInfo->translateFunc != NULL && + mmuFmtLevelVirtAddrLo(pFmtLevel, entryVa) <= vaArg && vaArg <= entryVaLimit) + { + status = pInfo->translateFunc(pGpu, pTraceCb, pFmtLevel, + pFmtPte, &entry, pInfo->pArg, + vaArg, valid, pDone); + } + else if (pInfo->dumpMappingFunc != NULL) + { + status = pInfo->dumpMappingFunc(pGpu, pTraceCb, pFmtLevel, pFmtPte, + &entry, pInfo->pArg, entryVa, entryVaLimit, + valid, pDone); + } + else if (pInfo->pteFunc != NULL) + { + if (valid) + { + _mmuPrintPte(pGpu, pTraceCb, entryVa, entryVaLimit, level, + index, pFmtLevel, pFmtPte, &entry, NV_TRUE, + &invalidRange, verbose); + } + status = pInfo->pteFunc(pGpu, pTraceCb, va, pFmtLevel, pFmtPte, + &entry, pInfo->pArg, valid, pDone); + } + else if (pInfo->validateFunc != NULL) + { + status = pInfo->validateFunc(valid, pInfo->pArg, entryVa, entryVaLimit, pDone); + } + + if (status != NV_OK || *pDone) + { + goto unmap_and_exit; + } + goto update_and_continue; + } + + status = NV_ERR_INVALID_XLATE; + // Attempt translation of each sub-level. + for (i = 0; i < pFmtLevel->numSubLevels; i++) + { + + NvU32 memSize = 0; + + const void *pFmtPde = pTraceCb->getFmtPde(pFmt, pFmtLevel, i); + NvU64 nextBase = pTraceCb->getPdePa(pGpu, pFmtPde, &entry); + + // Entry is invalid + if (nextBase == MMU_INVALID_ADDR) + { + if (!pTraceCb->isInvalidPdeOk(pGpu, pFmt, pFmtPde, &entry, i)) + { + status = NV_ERR_INVALID_XLATE; + goto unmap_and_exit; + } + + // + // On dumpMapping, change status to NV_OK because + // the entire range can be scanned, which can result in + // iterating through Pdes with no valid sublevels, which + // would otherwise leave the status at NV_ERR_INVALID_XLATE + // + if (pInfo->dumpMappingFunc != NULL) + { + status = NV_OK; + } + + // Continue to next sub-level, still assuming a PDE fault so far. + continue; + } + + NV_ASSERT_OK_OR_RETURN( + mmuWalkGetPageLevelInfo(pWalk, &pFmtLevel->subLevels[i], entryVa, + (const MMU_WALK_MEMDESC**)&pTempMemDesc, &memSize)); + + // Only print out the PDE the first time we know it's a valid PDE + if (!valid) + { + _mmuPrintPdeValid(pGpu, pLayout, entryVa, entryVaLimit, level, i, index, + pFmtLevel, &entry, &invalidRange, verbose); + } + + valid = NV_TRUE; + + if (pInfo->pdeFunc != NULL) + { + if (NV_OK != pInfo->pdeFunc(pGpu, pTraceCb, entryVa, entryVaLimit, level, index, + pFmtLevel, pFmtPde, &entry, NV_TRUE, pDone, + pInfo->pArg) || *pDone) + { + goto destroy_mem; + } + } + + // Recurse into sub-level translation, 1 OK translation => success + if (NV_OK == _mmuTraceWalk(pGpu, pLayout, pWalk, level + 1, &pFmtLevel->subLevels[i], + pTempMemDesc, entryVa, entryVaLimit, pInfo, pDone, verbose)) + { + status = NV_OK; + } + +destroy_mem: + if (*pDone) + { + goto unmap_and_exit; + } + } + + if (status != NV_OK) + { + goto unmap_and_exit; + } + +update_and_continue: + + if (!valid) + { + if (!invalidRange.bInvalid) + { + invalidRange.va = entryVa; + invalidRange.index = index; + } + + invalidRange.vaLimit = entryVaLimit; + invalidRange.indexLimit = index; + } + invalidRange.bInvalid = !valid; + + offset += pFmtLevel->entrySize; + entryVa = entryVaLevelLimit + 1; + index++; + } + + // contiguous invalid range at the end + if (invalidRange.bInvalid) + { + if (isPt) + { + if (pInfo->pteFunc) + { + _mmuPrintPte(pGpu, pTraceCb, 0, 0, level, 0, pFmtLevel, NULL, NULL, + NV_FALSE, &invalidRange, verbose); + status = pInfo->pteFunc(pGpu, pTraceCb, 0, NULL, NULL, NULL, NULL, NV_FALSE, pDone); + } + } + else + { + if (pInfo->pdeFunc) + { + _mmuPrintPdeInvalid(pLayout, level, &invalidRange, verbose); + status = pInfo->pdeFunc(pGpu, pTraceCb, 0, 0, level, 0, pFmtLevel, NULL, + NULL, NV_FALSE, pDone, pInfo->pArg); + } + } + } + +unmap_and_exit: + + kbusUnmapRmAperture_HAL(pGpu, pMemDesc, &pBase, NV_FALSE); + + return status; +} + +static NV_STATUS +_mmuTracePteCallback +( + OBJGPU *pGpu, + const MMU_TRACE_CALLBACKS *pTraceCb, + NvU64 va, + const MMU_FMT_LEVEL *pFmtLevel, + const void *pFmtPte, + const MMU_ENTRY *pPte, + void *pArg, + NvBool valid, + NvBool *pDone +) +{ + *pDone = NV_FALSE; + + if (valid) + { + PMMU_TRACE_ARG pMmuTraceArg = (PMMU_TRACE_ARG)pArg; + + pMmuTraceArg->pa = pTraceCb->getPtePa(pGpu, pFmtPte, pPte); + pMmuTraceArg->pa += mmuFmtVirtAddrPageOffset(pFmtLevel, va); + pMmuTraceArg->aperture = pTraceCb->pteAddrSpace(pFmtPte, pPte); + } + + return NV_OK; +} + +static NV_STATUS +_mmuTraceTranslateCallback +( + OBJGPU *pGpu, + const MMU_TRACE_CALLBACKS *pTraceCb, + const MMU_FMT_LEVEL *pFmtLevel, + const void *pFmtPte, + const MMU_ENTRY *pPte, + void *pArg, + NvU64 va, + NvBool valid, + NvBool *pDone +) +{ + PMMU_TRACE_ARG pMmuTraceArg; + + if (!valid) + { + *pDone = NV_FALSE; + return NV_OK; + } + + pMmuTraceArg = (PMMU_TRACE_ARG)pArg; + pMmuTraceArg->pa = pTraceCb->getPtePa(pGpu, pFmtPte, pPte); + + pMmuTraceArg->pa += mmuFmtVirtAddrPageOffset(pFmtLevel, va); + pMmuTraceArg->aperture = pTraceCb->pteAddrSpace(pFmtPte, pPte); + pMmuTraceArg->valid = NV_TRUE; + *pDone = NV_TRUE; + + return NV_OK; +} + + +static NV_STATUS +_mmuTraceDumpMappingCallback +( + OBJGPU *pGpu, + const MMU_TRACE_CALLBACKS *pTraceCb, + const MMU_FMT_LEVEL *pFmtLevel, + const void *pFmtPte, + const MMU_ENTRY *pPte, + void *pArg, + NvU64 va, + NvU64 vaLimit, + NvBool valid, + NvBool *pDone +) +{ + PMMU_TRACE_ARG pMmuTraceArg = (PMMU_TRACE_ARG)pArg; + *pDone = NV_FALSE; + + // If the mapping is an invalid range, just continue. + if (!valid) + { + return NV_OK; + } + + NvBool bCoalesce = (pMmuTraceArg->pMapParams->count > 0) && + ((pMmuTraceArg->pMapParams->opsBuffer[pMmuTraceArg->pMapParams->count - 1].gpuVA + + pMmuTraceArg->pMapParams->opsBuffer[pMmuTraceArg->pMapParams->count - 1].size) == va); + + // + // If we encounter more ranges that we can't account for, + // return hasMore = 1. + // + if (!bCoalesce && (pMmuTraceArg->pMapParams->count >= MAX_GET_MAPPINGS_OPS)) + { + *pDone = NV_TRUE; + pMmuTraceArg->pMapParams->hasMore = 1; + return NV_OK; + } + + // Coalesce if possible + if (bCoalesce) + { + pMmuTraceArg->pMapParams->opsBuffer[pMmuTraceArg->pMapParams->count - 1].size += (NvU32)(vaLimit - va + 1); + } + else + { + pMmuTraceArg->pMapParams->opsBuffer[pMmuTraceArg->pMapParams->count].gpuVA = va; + pMmuTraceArg->pMapParams->opsBuffer[pMmuTraceArg->pMapParams->count].size = (NvU32)(vaLimit - va + 1); + pMmuTraceArg->pMapParams->count++; + } + + return NV_OK; +} + + +static NV_STATUS +_mmuTraceValidateCallback +( + NvBool valid, + void *pArg, + NvU64 entryVa, + NvU64 entryVaLimit, + NvBool *pDone +) +{ + PMMU_TRACE_ARG pMmuTraceArg = (PMMU_TRACE_ARG)pArg; + + // If the range is valid, then subtract validated range from validateCount + if (valid) + { + NvU64 vaCoverage = entryVaLimit - entryVa; + pMmuTraceArg->validateCount -= (NV_MIN(vaCoverage + 1, pMmuTraceArg->validateCount)); + + // If we've reached zero, then the range is valid and we're done. + if (pMmuTraceArg->validateCount == 0) + { + *pDone = NV_TRUE; + pMmuTraceArg->valid = NV_TRUE; + } + else + { + *pDone = NV_FALSE; + pMmuTraceArg->valid = NV_FALSE; + } + + } + + // If it's not, then continue the search. We're not done. + else + { + *pDone = NV_FALSE; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/mmu/uvm_sw.c b/src/nvidia/src/kernel/gpu/mmu/uvm_sw.c new file mode 100644 index 000000000..f4aa1493a --- /dev/null +++ b/src/nvidia/src/kernel/gpu/mmu/uvm_sw.c @@ -0,0 +1,71 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/client.h" +#include "gpu/mmu/uvm_sw.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "rmapi/client.h" + +void +uvmswInitSwMethodState_IMPL +( + UvmSwObject *pUvmSw +) +{ + pUvmSw->methodA = 0; + pUvmSw->methodB = 0; + pUvmSw->bCancelMethodASet = NV_FALSE; + pUvmSw->bCancelMethodBSet = NV_FALSE; + pUvmSw->bClearMethodASet = NV_FALSE; +} + +NV_STATUS +uvmswConstruct_IMPL +( + UvmSwObject *pUvmSw, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NvHandle hClient = pCallContext->pClient->hClient; + RmClient *pRmClient = dynamicCast(pCallContext->pClient, RmClient); + RS_PRIV_LEVEL privLevel = pCallContext->secInfo.privLevel; + + if (!(rmclientIsAdmin(pRmClient, privLevel) || hypervisorCheckForObjectAccess(hClient))) + return NV_ERR_INVALID_CLIENT; + + uvmswInitSwMethodState(pUvmSw); + + return NV_OK; +} + +void +uvmswDestruct_IMPL +( + UvmSwObject *pUvmSw +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pUvmSw, ChannelDescendant); + + chandesIsolateOnDestruct(pChannelDescendant); +} diff --git a/src/nvidia/src/kernel/gpu/nvdec/arch/ampere/kernel_nvdec_ga100.c b/src/nvidia/src/kernel/gpu/nvdec/arch/ampere/kernel_nvdec_ga100.c new file mode 100644 index 000000000..7d2a2f953 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvdec/arch/ampere/kernel_nvdec_ga100.c @@ -0,0 +1,107 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/nvdec/kernel_nvdec.h" + +#include "core/core.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gpu.h" +#include "os/nv_memory_type.h" + +#include "published/ampere/ga100/dev_boot.h" +#include "published/ampere/ga100/dev_fuse.h" +#include "published/ampere/ga100/dev_nvdec_pri.h" +#include "published/ampere/ga100/dev_nvdec_addendum.h" + +void +knvdecConfigureFalcon_GA100 +( + OBJGPU *pGpu, + KernelNvdec *pKernelNvdec +) +{ + KernelFalconEngineConfig falconConfig; + + portMemSet(&falconConfig, 0, sizeof(falconConfig)); + + falconConfig.registerBase = DRF_BASE(NV_PNVDEC(0)); + falconConfig.riscvRegisterBase = 0; // RISC-V unused or unsupported + falconConfig.fbifBase = NV_PNVDEC_FBIF_BASE(0); + falconConfig.bBootFromHs = NV_FALSE; + falconConfig.pmcEnableMask = DRF_DEF(_PMC, _ENABLE, _NVDEC, _ENABLED); + falconConfig.bIsPmcDeviceEngine = NV_TRUE; + falconConfig.physEngDesc = ENG_NVDEC(0); + falconConfig.ctxAttr = NV_MEMORY_UNCACHED; + falconConfig.ctxBufferSize = FLCN_CTX_ENG_BUFFER_SIZE_HW; + falconConfig.addrSpaceList = memdescAddrSpaceListToU32(ADDRLIST_FBMEM_PREFERRED); + + kflcnConfigureEngine(pGpu, staticCast(pKernelNvdec, KernelFalcon), &falconConfig); +} + +NvBool +knvdecIsEngineDisabled_GA100 +( + OBJGPU *pGpu, + KernelNvdec *pKernelNvdec +) +{ + NvU32 reg = GPU_REG_RD32(pGpu, NV_FUSE_OPT_NVDEC_DISABLE); + NvU32 field = DRF_VAL(_FUSE, _OPT_NVDEC_DISABLE, _DATA, reg); + // highest bit in field will read 1 if NVDEC0 is disabled + return (NVBIT(DRF_SIZE(NV_FUSE_OPT_NVDEC_DISABLE_DATA) - 1) & field) > 0; +} + +/*! + * Returns the NVDEC0 fuse version of the provided ucode id (1-indexed) + * + * @param pGpu OBJGPU pointer + * @param pKernelNvdec KernelNvdec pointer + * @param[in] ucodeId Ucode Id (1-indexed) to read fuse for + */ +NvU32 +knvdecReadUcodeFuseVersion_GA100 +( + OBJGPU *pGpu, + KernelNvdec *pKernelNvdec, + const NvU32 ucodeId +) +{ + NvU32 fuseVal = 0; + NvU32 index = ucodeId - 1; // adjust to 0-indexed + + // TODO: Bug 3519329: switch to indexed register once available + // if (index < NV_FUSE_OPT_FPF_NVDEC_UCODE_VERSION__SIZE_1) + if (index < 16) + { + // fuseVal = GPU_REG_IDX_RD_DRF(pGpu, _FUSE, _OPT_FPF_NVDEC_UCODE_VERSION, index, _DATA); + fuseVal = GPU_REG_RD32(pGpu, NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION + (4 * index)); + + if (fuseVal) + { + HIGHESTBITIDX_32(fuseVal); + fuseVal = fuseVal + 1; + } + } + + return fuseVal; +} diff --git a/src/nvidia/src/kernel/gpu/nvdec/arch/ampere/kernel_nvdec_ga102.c b/src/nvidia/src/kernel/gpu/nvdec/arch/ampere/kernel_nvdec_ga102.c new file mode 100644 index 000000000..62ba012ee --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvdec/arch/ampere/kernel_nvdec_ga102.c @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/nvdec/kernel_nvdec.h" + +#include "core/core.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gpu.h" +#include "os/nv_memory_type.h" + +#include "published/ampere/ga102/dev_falcon_second_pri.h" +#include "published/ampere/ga100/dev_fuse.h" +#include "published/ampere/ga102/dev_boot.h" +#include "published/ampere/ga102/dev_nvdec_pri.h" +#include "published/ampere/ga102/dev_nvdec_addendum.h" + +void +knvdecConfigureFalcon_GA102 +( + OBJGPU *pGpu, + KernelNvdec *pKernelNvdec +) +{ + KernelFalconEngineConfig falconConfig; + + portMemSet(&falconConfig, 0, sizeof(falconConfig)); + + falconConfig.registerBase = DRF_BASE(NV_PNVDEC(0)); + falconConfig.riscvRegisterBase = NV_FALCON2_NVDEC0_BASE; + falconConfig.fbifBase = NV_PNVDEC_FBIF_BASE(0); + falconConfig.bBootFromHs = NV_TRUE; + falconConfig.pmcEnableMask = DRF_DEF(_PMC, _ENABLE, _NVDEC, _ENABLED); + falconConfig.bIsPmcDeviceEngine = NV_TRUE; + falconConfig.physEngDesc = ENG_NVDEC(0); + falconConfig.ctxAttr = NV_MEMORY_UNCACHED; + falconConfig.ctxBufferSize = FLCN_CTX_ENG_BUFFER_SIZE_HW; + falconConfig.addrSpaceList = memdescAddrSpaceListToU32(ADDRLIST_FBMEM_PREFERRED); + + kflcnConfigureEngine(pGpu, staticCast(pKernelNvdec, KernelFalcon), &falconConfig); +} + +NvBool +knvdecIsEngineDisabled_GA102 +( + OBJGPU *pGpu, + KernelNvdec *pKernelNvdec +) +{ + NvU32 reg = GPU_REG_RD32(pGpu, NV_FUSE_OPT_NVDEC_DISABLE); + NvU32 field = DRF_VAL(_FUSE, _OPT_NVDEC_DISABLE, _DATA, reg); + // highest bit in field will read 1 if NVDEC0 is disabled + return (NVBIT(DRF_SIZE(NV_FUSE_OPT_NVDEC_DISABLE_DATA) - 1) & field) > 0; +} diff --git a/src/nvidia/src/kernel/gpu/nvdec/arch/turing/kernel_nvdec_tu102.c b/src/nvidia/src/kernel/gpu/nvdec/arch/turing/kernel_nvdec_tu102.c new file mode 100644 index 000000000..745a549f2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvdec/arch/turing/kernel_nvdec_tu102.c @@ -0,0 +1,95 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/nvdec/kernel_nvdec.h" + +#include "core/core.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gpu.h" +#include "os/nv_memory_type.h" + +#include "published/turing/tu102/dev_falcon_v4.h" +#include "published/turing/tu102/dev_boot.h" +#include "published/turing/tu102/dev_fuse.h" +#include "published/turing/tu102/dev_nvdec_pri.h" +#include "published/turing/tu102/dev_nvdec_addendum.h" + +void +knvdecConfigureFalcon_TU102 +( + OBJGPU *pGpu, + KernelNvdec *pKernelNvdec +) +{ + KernelFalconEngineConfig falconConfig; + + portMemSet(&falconConfig, 0, sizeof(falconConfig)); + + falconConfig.registerBase = DRF_BASE(NV_PNVDEC(0)); + falconConfig.riscvRegisterBase = 0; // RISC-V unused or unsupported + falconConfig.fbifBase = NV_PNVDEC_FBIF_BASE(0); + falconConfig.bBootFromHs = NV_FALSE; + falconConfig.pmcEnableMask = DRF_DEF(_PMC, _ENABLE, _NVDEC, _ENABLED); + falconConfig.bIsPmcDeviceEngine = NV_FALSE; + falconConfig.physEngDesc = ENG_NVDEC(0); + falconConfig.ctxAttr = NV_MEMORY_UNCACHED; + falconConfig.ctxBufferSize = FLCN_CTX_ENG_BUFFER_SIZE_HW; + falconConfig.addrSpaceList = memdescAddrSpaceListToU32(ADDRLIST_FBMEM_PREFERRED); + + kflcnConfigureEngine(pGpu, staticCast(pKernelNvdec, KernelFalcon), &falconConfig); +} + +NvBool +knvdecIsEngineDisabled_TU102 +( + OBJGPU *pGpu, + KernelNvdec *pKernelNvdec +) +{ + NvU32 reg = GPU_REG_RD32(pGpu, NV_FUSE_OPT_NVDEC_DISABLE); + NvU32 field = DRF_VAL(_FUSE, _OPT_NVDEC_DISABLE, _DATA, reg); + // highest bit in field will read 1 if NVDEC0 is disabled + return (NVBIT(DRF_SIZE(NV_FUSE_OPT_NVDEC_DISABLE_DATA) - 1) & field) > 0; +} + +void knvdecNonstallIntrCheckAndClear_TU102(OBJGPU *pGpu, KernelNvdec *pKernelNvdec, THREAD_STATE_NODE *pThreadState) +{ + NvU32 registerBase = staticCast(pKernelNvdec, KernelFalcon)->registerBase; + NvU32 intr; + NvU32 clearBits; + + NV_ASSERT(registerBase != 0); + + intr = GPU_REG_RD32_EX(pGpu, registerBase + NV_PFALCON_FALCON_IRQSTAT, + pThreadState); + + if (DRF_VAL( _PFALCON_FALCON, _IRQSTAT, _SWGEN1, intr)) + { + NV_PRINTF(LEVEL_INFO, "Handling Trap Interrupt\n"); + + // Clear interrupt + clearBits = DRF_NUM(_PFALCON_FALCON, _IRQSTAT, _SWGEN1, 1); + GPU_REG_WR32_EX(pGpu, registerBase + NV_PFALCON_FALCON_IRQSCLR, + clearBits, pThreadState); + } +} diff --git a/src/nvidia/src/kernel/gpu/nvdec/kernel_nvdec.c b/src/nvidia/src/kernel/gpu/nvdec/kernel_nvdec.c new file mode 100644 index 000000000..bad851159 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvdec/kernel_nvdec.c @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/nvdec/kernel_nvdec.h" + +#include "core/core.h" +#include "gpu/eng_desc.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gpu.h" + +NV_STATUS +knvdecConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelNvdec *pKernelNvdec, + ENGDESCRIPTOR engDesc +) +{ + if (knvdecIsEngineDisabled_HAL(pGpu, pKernelNvdec)) + { + // fail construction of KernelNvdec if NVDEC0 engine is missing in HW + return NV_ERR_NOT_SUPPORTED; + } + + knvdecConfigureFalcon_HAL(pGpu, pKernelNvdec); + return NV_OK; +} + +void knvdecRegisterIntrService_IMPL(OBJGPU *pGpu, KernelNvdec *pKernelNvdec, IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX]) +{ + KernelFalcon *pKernelFalcon = staticCast(pKernelNvdec, KernelFalcon); + NV_ASSERT_OR_RETURN_VOID(pKernelFalcon); + + NV_PRINTF(LEVEL_INFO, "physEngDesc 0x%x\n", pKernelFalcon->physEngDesc); + + NV_ASSERT(pKernelFalcon->physEngDesc == ENG_NVDEC(0)); + + NvU32 mcIdx = MC_ENGINE_IDX_NVDEC0; + + NV_ASSERT(pRecords[mcIdx].pNotificationService == NULL); + pRecords[mcIdx].bFifoWaiveNotify = NV_FALSE; + pRecords[mcIdx].pNotificationService = staticCast(pKernelNvdec, IntrService); +} + +NV_STATUS knvdecServiceNotificationInterrupt_IMPL(OBJGPU *pGpu, KernelNvdec *pKernelNvdec, IntrServiceServiceNotificationInterruptArguments *pParams) +{ + NvU32 idxMc = pParams->engineIdx; + NvU32 idx2080 = NV2080_ENGINE_TYPE_NVDEC0; + + NV_PRINTF(LEVEL_INFO, "nonstall intr for MC 0x%x\n", idxMc); + + NV_ASSERT_OR_RETURN(idxMc == MC_ENGINE_IDX_NVDEC0, NV_ERR_INVALID_STATE); + + knvdecNonstallIntrCheckAndClear_HAL(pGpu, pKernelNvdec, pParams->pThreadState); + + // Wake up channels waiting on this event + engineNonStallIntrNotify(pGpu, idx2080); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/nvdec/kernel_nvdec_ctx.c b/src/nvidia/src/kernel/gpu/nvdec/kernel_nvdec_ctx.c new file mode 100644 index 000000000..b0ec21757 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvdec/kernel_nvdec_ctx.c @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "kernel/gpu/falcon/kernel_falcon.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/nvdec/kernel_nvdec_ctx.h" + +NV_STATUS +nvdecctxConstructHal_KERNEL +( + NvdecContext *pNvdecContext, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pNvdecContext, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + KernelFalcon *pKernelFalcon = kflcnGetKernelFalconForEngine(pGpu, pChannelDescendant->resourceDesc.engDesc); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + + NV_PRINTF(LEVEL_INFO, "nvdecctxConstruct for 0x%x\n", pChannelDescendant->resourceDesc.engDesc); + + return kflcnAllocContext(pGpu, pKernelFalcon, pKernelChannel, RES_GET_EXT_CLASS_ID(pChannelDescendant)); +} + +void nvdecctxDestructHal_KERNEL +( + NvdecContext *pNvdecContext +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pNvdecContext, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + KernelFalcon *pKernelFalcon = kflcnGetKernelFalconForEngine(pGpu, pChannelDescendant->resourceDesc.engDesc); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + + NV_PRINTF(LEVEL_INFO, "nvdecctxDestruct for 0x%x\n", pChannelDescendant->resourceDesc.engDesc); + + NV_ASSERT_OK(kflcnFreeContext(pGpu, pKernelFalcon, pKernelChannel, RES_GET_EXT_CLASS_ID(pChannelDescendant))); +} diff --git a/src/nvidia/src/kernel/gpu/nvdec/kernel_nvdec_engdesc.c b/src/nvidia/src/kernel/gpu/nvdec/kernel_nvdec_engdesc.c new file mode 100644 index 000000000..93d50c602 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvdec/kernel_nvdec_engdesc.c @@ -0,0 +1,109 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/nvdec/kernel_nvdec_ctx.h" + +#include "class/cla0b0.h" // NVA0B0_VIDEO_DECODER +#include "class/clb0b0.h" // NVB0B0_VIDEO_DECODER +#include "class/clb6b0.h" // NVB6B0_VIDEO_DECODER +#include "class/clc1b0.h" // NVC1B0_VIDEO_DECODER +#include "class/clc2b0.h" // NVC2B0_VIDEO_DECODER +#include "class/clc3b0.h" // NVC3B0_VIDEO_DECODER +#include "class/clc4b0.h" // NVC4B0_VIDEO_DECODER +#include "class/clc6b0.h" // NVC6B0_VIDEO_DECODER +#include "class/clc7b0.h" // NVC7B0_VIDEO_DECODER + +/* + * This function returns an engine descriptor corresponding to the class + * and engine instance passed in. + * + * @params[in] externalClassId Id of classs being allocated + * @params[in] pAllocParams void pointer containing creation parameters. + * + * @returns + * ENG_INVALID, for unknown engine. Returns the right engine descriptor otherwise. + */ +ENGDESCRIPTOR +nvdecGetEngineDescFromAllocParams +( + OBJGPU *pGpu, + NvU32 externalClassId, + void *pAllocParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + NvU32 engineInstance = 0; + NV_BSP_ALLOCATION_PARAMETERS *pNvdecAllocParms = pAllocParams; + + NV_ASSERT_OR_RETURN((pNvdecAllocParms != NULL), ENG_INVALID); + + if (pNvdecAllocParms->size != sizeof(NV_BSP_ALLOCATION_PARAMETERS)) + { + NV_PRINTF(LEVEL_ERROR, "createParams size mismatch (rm = 0x%x / client = 0x%x)\n", + (NvU32) sizeof(NV_BSP_ALLOCATION_PARAMETERS), + pNvdecAllocParms->size); + DBG_BREAKPOINT(); + return ENG_INVALID; + } + + switch (externalClassId) + { + case NVA0B0_VIDEO_DECODER: + case NVB0B0_VIDEO_DECODER: + case NVB6B0_VIDEO_DECODER: + case NVC1B0_VIDEO_DECODER: + case NVC2B0_VIDEO_DECODER: + case NVC3B0_VIDEO_DECODER: + engineInstance = 0; + break; + case NVC4B0_VIDEO_DECODER: + case NVC6B0_VIDEO_DECODER: + case NVC7B0_VIDEO_DECODER: + engineInstance = pNvdecAllocParms->engineInstance; + break; + default: + return ENG_INVALID; + } + + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + + NV_ASSERT_OK( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + pCallContext->pClient->hClient, &ref)); + + NV_ASSERT_OK( + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_NVDEC(engineInstance), + &engineInstance)); + return ENG_NVDEC(NV2080_ENGINE_TYPE_NVDEC_IDX(engineInstance)); + } + + // Get the right class as per engine instance. + return ENG_NVDEC(engineInstance); +} diff --git a/src/nvidia/src/kernel/gpu/nvenc/kernel_nvenc_ctx.c b/src/nvidia/src/kernel/gpu/nvenc/kernel_nvenc_ctx.c new file mode 100644 index 000000000..1bd8bc538 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvenc/kernel_nvenc_ctx.c @@ -0,0 +1,104 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/nvenc/kernel_nvenc_ctx.h" +#include "kernel/gpu/device/device.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "os/os.h" +#include "gpu/falcon/kernel_falcon.h" + +NV_STATUS +msencctxConstructHal_KERNEL +( + MsencContext *pMsencContext, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pMsencContext, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + KernelFalcon *pKernelFalcon = kflcnGetKernelFalconForEngine(pGpu, pChannelDescendant->resourceDesc.engDesc); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + + NV_PRINTF(LEVEL_INFO, "msencctxConstruct for 0x%x\n", pChannelDescendant->resourceDesc.engDesc); + + return kflcnAllocContext(pGpu, pKernelFalcon, pKernelChannel, RES_GET_EXT_CLASS_ID(pChannelDescendant)); +} + +void msencctxDestructHal_KERNEL +( + MsencContext *pMsencContext +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pMsencContext, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + KernelFalcon *pKernelFalcon = kflcnGetKernelFalconForEngine(pGpu, pChannelDescendant->resourceDesc.engDesc); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + + NV_PRINTF(LEVEL_INFO, "msencctxDestruct for 0x%x\n", pChannelDescendant->resourceDesc.engDesc); + + NV_ASSERT_OK(kflcnFreeContext(pGpu, pKernelFalcon, pKernelChannel, RES_GET_EXT_CLASS_ID(pChannelDescendant))); +} + +// +// Query subdevice caps, and return caps for nvenc0 +// This version does not support SLI +// +NV_STATUS +deviceCtrlCmdMsencGetCaps_IMPL +( + Device *pDevice, + NV0080_CTRL_MSENC_GET_CAPS_PARAMS *pMsencCapsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + NV2080_CTRL_INTERNAL_MSENC_GET_CAPS_PARAMS params; + + // sanity check array size + if (pMsencCapsParams->capsTblSize != NV0080_CTRL_MSENC_CAPS_TBL_SIZE) + { + NV_PRINTF(LEVEL_ERROR, "size mismatch: client 0x%x rm 0x%x\n", + pMsencCapsParams->capsTblSize, + NV0080_CTRL_MSENC_CAPS_TBL_SIZE); + return NV_ERR_INVALID_ARGUMENT; + } + + ct_assert(NV0080_CTRL_MSENC_CAPS_TBL_SIZE == + sizeof(params.caps[0].capsTbl) / sizeof(*params.caps[0].capsTbl)); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_ASSERT_OK_OR_RETURN(pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_MSENC_GET_CAPS, + ¶ms, + sizeof(params))); + + NV_ASSERT_OR_RETURN(params.valid[0], NV_ERR_INVALID_STATE); + portMemCopy(pMsencCapsParams->capsTbl, + NV0080_CTRL_MSENC_CAPS_TBL_SIZE, + ¶ms.caps[0].capsTbl, + sizeof(params.caps[0].capsTbl)); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/nvenc/kernel_nvenc_engdesc.c b/src/nvidia/src/kernel/gpu/nvenc/kernel_nvenc_engdesc.c new file mode 100644 index 000000000..1d1e627b1 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvenc/kernel_nvenc_engdesc.c @@ -0,0 +1,99 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/fifo/kernel_channel.h" + +#include "class/clc0b7.h" +#include "class/cld0b7.h" +#include "class/clc1b7.h" +#include "class/clc2b7.h" +#include "class/clc3b7.h" +#include "class/clc4b7.h" +#include "class/clb4b7.h" +#include "class/clc7b7.h" + +/* + * This function returns an engine descriptor corresponding to the class + * and engine instance passed in. + * + * @params[in] externalClassId Id of classs being allocated + * @params[in] pAllocParams void pointer containing creation parameters. + * + * @returns + * ENG_INVALID, for unknown engine. Returns the right engine descriptor otherwise. + */ +ENGDESCRIPTOR +msencGetEngineDescFromAllocParams(OBJGPU *pGpu, NvU32 externalClassId, void *pAllocParams) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + NvU32 engineInstance = 0; + NV_MSENC_ALLOCATION_PARAMETERS *pMsencAllocParms = pAllocParams; + + if (pMsencAllocParms->size != sizeof(NV_MSENC_ALLOCATION_PARAMETERS)) + { + NV_PRINTF(LEVEL_ERROR, "createParams size mismatch (rm = 0x%x / client = 0x%x)\n", + (NvU32)sizeof(NV_MSENC_ALLOCATION_PARAMETERS), + pMsencAllocParms->size); + DBG_BREAKPOINT(); + return ENG_INVALID; + } + + switch (externalClassId) + { + case NVC0B7_VIDEO_ENCODER: + engineInstance = 0; + break; + case NVD0B7_VIDEO_ENCODER: + case NVC1B7_VIDEO_ENCODER: + case NVC2B7_VIDEO_ENCODER: + case NVC3B7_VIDEO_ENCODER: + case NVC4B7_VIDEO_ENCODER: + case NVB4B7_VIDEO_ENCODER: + case NVC7B7_VIDEO_ENCODER: + engineInstance = pMsencAllocParms->engineInstance; + break; + default: + return ENG_INVALID; + } + + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + + NV_ASSERT_OK( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + pCallContext->pClient->hClient, &ref)); + + NV_ASSERT_OK( + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_NVENC(engineInstance), + &engineInstance)); + return ENG_MSENC(NV2080_ENGINE_TYPE_NVENC_IDX(engineInstance)); + } + + // Get the right class as per engine instance. + return ENG_MSENC(engineInstance); +} diff --git a/src/nvidia/src/kernel/gpu/nvjpg/kernel_nvjpg_ctx.c b/src/nvidia/src/kernel/gpu/nvjpg/kernel_nvjpg_ctx.c new file mode 100644 index 000000000..1d28c4126 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvjpg/kernel_nvjpg_ctx.c @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "kernel/gpu/falcon/kernel_falcon.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/nvjpg/kernel_nvjpg_ctx.h" + +NV_STATUS +nvjpgctxConstructHal_KERNEL +( + NvjpgContext *pNvjpgContext, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pNvjpgContext, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + KernelFalcon *pKernelFalcon = kflcnGetKernelFalconForEngine(pGpu, pChannelDescendant->resourceDesc.engDesc); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + + NV_PRINTF(LEVEL_INFO, "nvjpgctxConstruct for 0x%x\n", pChannelDescendant->resourceDesc.engDesc); + + return kflcnAllocContext(pGpu, pKernelFalcon, pKernelChannel, RES_GET_EXT_CLASS_ID(pChannelDescendant)); +} + +void nvjpgctxDestructHal_KERNEL +( + NvjpgContext *pNvjpgContext +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pNvjpgContext, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + KernelFalcon *pKernelFalcon = kflcnGetKernelFalconForEngine(pGpu, pChannelDescendant->resourceDesc.engDesc); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + + NV_PRINTF(LEVEL_INFO, "nvjpgctxDestruct for 0x%x\n", pChannelDescendant->resourceDesc.engDesc); + + NV_ASSERT_OK(kflcnFreeContext(pGpu, pKernelFalcon, pKernelChannel, RES_GET_EXT_CLASS_ID(pChannelDescendant))); +} diff --git a/src/nvidia/src/kernel/gpu/nvjpg/kernel_nvjpg_engdesc.c b/src/nvidia/src/kernel/gpu/nvjpg/kernel_nvjpg_engdesc.c new file mode 100644 index 000000000..b41bdb40d --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvjpg/kernel_nvjpg_engdesc.c @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/gpu.h" +#include "gpu/mig_mgr/kernel_mig_manager.h" +#include "nvos.h" +#include "resserv/rs_server.h" + +#include "class/clc4d1.h" // NVC4D1_VIDEO_NVJPG + +/*! + * This function returns an engine descriptor corresponding to the class + * and engine instance passed in. + * + * @params[in] externalClassId Id of classs being allocated + * @params[in] pAllocParams void pointer containing creation parameters. + * + * @returns + * ENG_INVALID, for unknown engine. Returns the right engine descriptor otherwise. + */ +ENGDESCRIPTOR +nvjpgGetEngineDescFromAllocParams +( + OBJGPU *pGpu, + NvU32 externalClassId, + void *pAllocParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + NvU32 engineInstance = 0; + NV_NVJPG_ALLOCATION_PARAMETERS *pNvjpgAllocParms = pAllocParams; + + NV_ASSERT_OR_RETURN((pNvjpgAllocParms != NULL), ENG_INVALID); + + if (pNvjpgAllocParms->size != sizeof(NV_NVJPG_ALLOCATION_PARAMETERS)) + { + NV_PRINTF(LEVEL_ERROR, "createParams size mismatch (rm = 0x%x / client = 0x%x)\n", + (NvU32) sizeof(NV_NVJPG_ALLOCATION_PARAMETERS), + pNvjpgAllocParms->size); + DBG_BREAKPOINT(); + return ENG_INVALID; + } + + switch (externalClassId) + { + case NVC4D1_VIDEO_NVJPG: + engineInstance = 0; + break; + default: + DBG_BREAKPOINT(); + return ENG_INVALID; + } + + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + + NV_ASSERT_OK( + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + pCallContext->pClient->hClient, &ref)); + + NV_ASSERT_OK( + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + NV2080_ENGINE_TYPE_NVJPEG(engineInstance), + &engineInstance)); + return ENG_NVJPEG(NV2080_ENGINE_TYPE_NVJPEG_IDX(engineInstance)); + } + + // Get the right class as per engine instance. + return ENG_NVJPEG(engineInstance); +} diff --git a/src/nvidia/src/kernel/gpu/nvlink/arch/ampere/kernel_nvlink_ga100.c b/src/nvidia/src/kernel/gpu/nvlink/arch/ampere/kernel_nvlink_ga100.c new file mode 100644 index 000000000..e706aaaa7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/arch/ampere/kernel_nvlink_ga100.c @@ -0,0 +1,212 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/gpu.h" + +/** + * @brief This routine overrides the nvlink connection topology if chiplib arguments + * have been provided. It queries MODS API for the chiplib overrides and based + * on that, derives hshub configuration values that are programmed at a later + * stage during nvlink state load. The override values should exist for ALL + * links or NO links. The field encoding can be found in phys_nvlink.h + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] phase unused + */ +NV_STATUS +knvlinkOverrideConfig_GA100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 phase +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + pKernelNvlink->pLinkConnection = portMemAllocNonPaged(sizeof(NvU32) * NVLINK_MAX_LINKS_SW); + if (pKernelNvlink->pLinkConnection == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pKernelNvlink->pLinkConnection, 0, sizeof(NvU32) * NVLINK_MAX_LINKS_SW); + + // + // To deal with the nonlegacy force config reg keys, we need to now fill + // in the default phys links, use a unity 1/1 map. + // + for (i = 0; i < NVLINK_MAX_LINKS_SW; i++) + { + // The physical link is guaranteed valid in all cases + pKernelNvlink->pLinkConnection[i] = DRF_NUM(_NVLINK, _ARCH_CONNECTION, _PHYSICAL_LINK, i); + } + + // Check to see if there are chiplib overrides for nvlink configuration + status = osGetForcedNVLinkConnection(pGpu, NVLINK_MAX_LINKS_SW, pKernelNvlink->pLinkConnection); + if ((NV_OK != status) || pKernelNvlink->bForceAutoconfig) + { + // A non-OK status implies there are no overrides. + NV_PRINTF(LEVEL_INFO, "Not using forced config!\n"); + + portMemFree(pKernelNvlink->pLinkConnection); + pKernelNvlink->pLinkConnection = NULL; + return NV_OK; + } + + NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS forcedConfigParams; + portMemSet(&forcedConfigParams, 0, sizeof(forcedConfigParams)); + + forcedConfigParams.bLegacyForcedConfig = NV_FALSE; + portMemCopy(&forcedConfigParams.linkConnection, (sizeof(NvU32) * NVLINK_MAX_LINKS_SW), + pKernelNvlink->pLinkConnection, (sizeof(NvU32) * NVLINK_MAX_LINKS_SW)); + + // + // RPC to GSP-RM to for GSP-RM to process the forced NVLink configurations. This includes + // setting up of HSHUB state and programming the memory subsystem registers. + // + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_PROCESS_FORCED_CONFIGS, + (void *)&forcedConfigParams, + sizeof(forcedConfigParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to process forced NVLink configurations !\n"); + + portMemFree(pKernelNvlink->pLinkConnection); + pKernelNvlink->pLinkConnection = NULL; + return status; + } + + pKernelNvlink->bOverrideComputePeerMode = forcedConfigParams.bOverrideComputePeerMode; + + // + // Now, CPU-RM should process the forced configurations and update its state, which includes + // the topology information and the required link masks. + // + return knvlinkSetupTopologyForForcedConfig(pGpu, pKernelNvlink); +} + +/*! + * @brief Wrapper function chose between removing all or peer mappings + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] bAllMapping Whether both sysmem and peer mappings should be removed + * @param[in] peerMask Mask of peers for which mappings will be removed + * @param[in] bL2Entry Are the mappings being removed because of L2 entry? + * + * @return Returns NV_OK on success + */ +NV_STATUS +knvlinkRemoveMapping_GA100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvBool bAllMapping, + NvU32 peerMask, + NvBool bL2Entry +) +{ + NV_STATUS status = NV_OK; + + NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + + params.bL2Entry = bL2Entry; + + if (bAllMapping) + { + params.mapTypeMask = NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_TYPE_SYSMEM | + NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_TYPE_PEER; + params.peerMask = (1 << NVLINK_MAX_PEERS_SW) - 1; + } + else + { + params.mapTypeMask = NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_TYPE_PEER; + params.peerMask = peerMask; + } + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_REMOVE_NVLINK_MAPPING, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + return status; + + // + // Ampere+, along with HSHUB config registers, we also need to update + // the MUX registers and the connection config registers. So, we have + // to call nvlinkCurrentConfig instead of nvlinkUpdateHshubConfigRegs + // + return knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink); +} + +/*! + * @brief Validates fabric base address. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] fabricBaseAddr Address to be validated + * + * @returns On success, NV_OK. + * On failure, returns NV_ERR_XXX. + */ +NV_STATUS +knvlinkValidateFabricBaseAddress_GA100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU64 fabricBaseAddr +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 fbSizeBytes; + + fbSizeBytes = pMemoryManager->Ram.fbTotalMemSizeMb << 20; + + // + // Ampere SKUs will be paired with NVSwitches (Limerock) supporting 2K + // mapslots that can cover 64GB each. Make sure that the fabric base + // address being used is valid to cover whole frame buffer. + // + + // Check if fabric address is aligned to mapslot size. + if (fabricBaseAddr & (NVBIT64(36) - 1)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Align fbSize to mapslot size. + fbSizeBytes = RM_ALIGN_UP(fbSizeBytes, NVBIT64(36)); + + // Make sure the address range doesn't go beyond the limit, (2K * 64GB). + if ((fabricBaseAddr + fbSizeBytes) > NVBIT64(47)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/nvlink/arch/pascal/kernel_nvlink_gp100.c b/src/nvidia/src/kernel/gpu/nvlink/arch/pascal/kernel_nvlink_gp100.c new file mode 100644 index 000000000..fb1786e57 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/arch/pascal/kernel_nvlink_gp100.c @@ -0,0 +1,373 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" + +#include "gpu/gpu.h" +#include "gpu/ce/kernel_ce.h" +#include "nvRmReg.h" + +// +// NVLINK Override Configuration +// +NV_STATUS +knvlinkOverrideConfig_GP100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 phase +) +{ + NV_STATUS status = NV_OK; + + NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS forcedConfigParams; + portMemSet(&forcedConfigParams, 0, sizeof(forcedConfigParams)); + + forcedConfigParams.bLegacyForcedConfig = NV_TRUE; + forcedConfigParams.phase = phase; + + // RPC to GSP-RM to for GSP-RM to process the forced NVLink configurations. + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_PROCESS_FORCED_CONFIGS, + (void *)&forcedConfigParams, + sizeof(forcedConfigParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to process forced NVLink configurations !\n"); + return status; + } + + return NV_OK; +} + +/*! + * Get a mask with one bit set for each unique GPU peer connected via + * NVLINK. In this implementation, each bit is the lowest link ID of + * all links connected to a given GPU peer. This allows a static peer + * ID assignment based on link topology. + * + * Note: Ampere and beyond, there is no static peer ID allocation for + * NVLink. + * + * @param[in] pGpu OBJGPU ptr + * @param[in] pKernelNvlink KernelNvlink ptr + * + * return NvU32 unique nvlink peer ID mask for pGpu + */ +NvU32 +knvlinkGetUniquePeerIdMask_GP100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NvU32 uniqueIdMask = 0; + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pKernelNvlink->peerLinkMasks); i++) + { + NvU32 peerLinkMask = pKernelNvlink->peerLinkMasks[i]; + if (peerLinkMask != 0) + { + uniqueIdMask |= LOWESTBIT(peerLinkMask); + } + } + + return uniqueIdMask; +} + +/** + * Get a unique peerID for the remote GPU connected via NVLINK. In this + * implementation, that peer ID is the lowest link ID of all the links + * connected to the peer GPU. This allows a static peer ID assignment + * based on link topology. + * + * Note: Ampere and beyond, there is no static peer ID allocation for + * NVLink. + * + * @param[in] pGpu OBJGPU pointer for local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] pRemoteGpu OBJGPU pointer for remote GPU + * + * return NvU32 unique nvlink peer ID pGpu to pRemoteGpu + */ +NvU32 +knvlinkGetUniquePeerId_GP100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + OBJGPU *pRemoteGpu +) +{ + NvU32 peerLinkMask; + + peerLinkMask = pKernelNvlink->peerLinkMasks[gpuGetInstance(pRemoteGpu)]; + if (peerLinkMask == 0) + { + return BUS_INVALID_PEER; + } + + LOWESTBITIDX_32(peerLinkMask); + + return peerLinkMask; +} + +/*! + * @brief Wrapper function chose between removing all or peer mappings + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] bAllMapping Whether both sysmem and peer mappings should be removed + * @param[in] peerMask Mask of peers for which mappings will be removed + * @param[in] bL2Entry Are the mappings being removed because of L2 entry? + * + * @return Returns NV_OK on success + */ +NV_STATUS +knvlinkRemoveMapping_GP100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvBool bAllMapping, + NvU32 peerMask, + NvBool bL2Entry +) +{ + NV_STATUS status = NV_OK; + + NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + + params.bL2Entry = bL2Entry; + + if (bAllMapping) + { + params.mapTypeMask = NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_TYPE_SYSMEM | + NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_TYPE_PEER; + params.peerMask = (1 << NVLINK_MAX_PEERS_SW) - 1; + } + else + { + params.mapTypeMask = NV2080_CTRL_NVLINK_REMOVE_NVLINK_MAPPING_TYPE_PEER; + params.peerMask = peerMask; + } + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_REMOVE_NVLINK_MAPPING, + (void *)¶ms, sizeof(params)); + return status; +} + +/** + * @brief Get the mask of optimal CEs for P2P reads/writes + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] gpuMask Mask of GPUs instances + * @param[out] sysmemOptimalReadCEs Mask of CEs for SYSMEM reads + * @param[out] sysmemOptimalWriteCEs Mask of CEs for SYSMEM writes + * @param[out] p2pOptimalReadCEs Mask of CEs for P2P reads + * @param[out] p2pOptimalWriteCEs Mask of CEs for P2P writes + */ +NV_STATUS +knvlinkGetP2POptimalCEs_GP100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 gpuMask, + NvU32 *sysmemOptimalReadCEs, + NvU32 *sysmemOptimalWriteCEs, + NvU32 *p2pOptimalReadCEs, + NvU32 *p2pOptimalWriteCEs +) +{ + KernelCE *kce = NULL; + NvU32 maxCes = 0; + NvU32 sysmemReadCE = 0; + NvU32 sysmemWriteCE = 0; + NvU32 nvlinkP2PCeMask = 0; + NvU32 i; + + maxCes = gpuGetNumCEs(pGpu); + + for (i = 0; i < maxCes; i++) + { + kce = GPU_GET_KCE(pGpu, i); + if (kce) + { + kceGetCeFromNvlinkConfig(pGpu, kce, + gpuMask, + &sysmemReadCE, + &sysmemWriteCE, + &nvlinkP2PCeMask); + break; + } + } + + if (sysmemOptimalReadCEs != NULL) + { + *sysmemOptimalReadCEs = NVBIT(sysmemReadCE); + } + + if (sysmemOptimalWriteCEs != NULL) + { + *sysmemOptimalWriteCEs = NVBIT(sysmemWriteCE); + } + + if (p2pOptimalReadCEs != NULL) + { + *p2pOptimalReadCEs = nvlinkP2PCeMask; + } + + if (p2pOptimalWriteCEs != NULL) + { + *p2pOptimalWriteCEs = nvlinkP2PCeMask; + } + + return NV_OK; +} + +/** + * @brief Setup peer mapping for the given ID to the remote GPU, + * and program HSHUB to finalize the mapping. + * + * @param[in] pGpu OBJGPU pointer (Local) + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] pRemoteGpu OBJGPU pointer (Remote) + * @param[in] peerId peer ID + */ +void +knvlinkSetupPeerMapping_GP100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + OBJGPU *pRemoteGpu, + NvU32 peerId +) +{ + NV_STATUS status = NV_OK; + NvU32 peerLinkMask; + + NV2080_CTRL_NVLINK_PRE_SETUP_NVLINK_PEER_PARAMS preSetupNvlinkPeerParams; + NV2080_CTRL_NVLINK_POST_SETUP_NVLINK_PEER_PARAMS postSetupNvlinkPeerParams; + + // HSHUB registers are updated during driver load if nvlink topology is forced + if (!knvlinkIsForcedConfig(pGpu, pKernelNvlink)) + { + if ((pGpu == pRemoteGpu) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED)) + { + NV_PRINTF(LEVEL_ERROR, + "P2P loopback is disabled on GPU%u, aborting peer setup (0x%x)\n", + gpuGetInstance(pGpu), peerId); + return; + } + + peerLinkMask = pKernelNvlink->peerLinkMasks[gpuGetInstance(pRemoteGpu)]; + + if (peerLinkMask != 0) + { + portMemSet(&preSetupNvlinkPeerParams, 0, sizeof(preSetupNvlinkPeerParams)); + preSetupNvlinkPeerParams.peerId = peerId; + preSetupNvlinkPeerParams.peerLinkMask = peerLinkMask; + preSetupNvlinkPeerParams.bNvswitchConn = knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink); + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_PRE_SETUP_NVLINK_PEER, + (void *)&preSetupNvlinkPeerParams, + sizeof(preSetupNvlinkPeerParams)); + NV_ASSERT(status == NV_OK); + + // Update *ALL* the HSHUB settings together + knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink); + + portMemSet(&postSetupNvlinkPeerParams, 0, sizeof(postSetupNvlinkPeerParams)); + postSetupNvlinkPeerParams.peerMask = NVBIT(peerId); + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_POST_SETUP_NVLINK_PEER, + (void *)&postSetupNvlinkPeerParams, + sizeof(postSetupNvlinkPeerParams)); + NV_ASSERT(status == NV_OK); + } + } +} + +/*! + * @brief Return the mask of links that are connected + * + * @param[in] pGpu OBJGPU ptr + * @param[in] pKernelNvlink KernelNvlink ptr + */ +NvU32 +knvlinkGetConnectedLinksMask_GP100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + // + // Connected links are already filtered against the + // enabled links. Hence, enabledLinks has final say + // + return knvlinkGetEnabledLinkMask(pGpu, pKernelNvlink); +} + +/*! + * @brief Program NVLink Speed for the enabled links + * + * @param[in] pGpu OBJGPU ptr + * @param[in] pKernelNvlink KernelNvlink ptr + */ +NV_STATUS +knvlinkProgramLinkSpeed_GP100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + + NV2080_CTRL_NVLINK_PROGRAM_LINK_SPEED_PARAMS programLinkSpeedParams; + portMemSet(&programLinkSpeedParams, 0, sizeof(programLinkSpeedParams)); + + programLinkSpeedParams.bPlatformLinerateDefined = NV_FALSE; + programLinkSpeedParams.platformLineRate = + NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_DEFAULT; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_PROGRAM_LINK_SPEED, + (void *)&programLinkSpeedParams, + sizeof(programLinkSpeedParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to program NVLink speed for links!\n"); + return status; + } + + pKernelNvlink->nvlinkLinkSpeed = programLinkSpeedParams.nvlinkLinkSpeed; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/nvlink/arch/turing/kernel_nvlink_tu102.c b/src/nvidia/src/kernel/gpu/nvlink/arch/turing/kernel_nvlink_tu102.c new file mode 100644 index 000000000..2c4082793 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/arch/turing/kernel_nvlink_tu102.c @@ -0,0 +1,164 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" +#include "gpu/gpu.h" + +/*! + * Turing+ will use RXDET (receiver detect) feature to determine + * if a link is connected. Check bRxDetected field in nvlink_link + * + * @param[in] pGpu OBJGPU ptr + * @param[in] pKernelNvlink KernelNvlink ptr + * + * @return returns NV_OK + */ +NV_STATUS +knvlinkFilterBridgeLinks_TU102 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + NvU32 linkId; + + // All links from Turing+ are sensable by receiver detect + pKernelNvlink->bridgeSensableLinks = pKernelNvlink->discoveredLinks; + + // If connections are forced through chiplib, return enabled links + if (pKernelNvlink->bRegistryLinkOverride) + { + pKernelNvlink->connectedLinksMask = pKernelNvlink->enabledLinks; + pKernelNvlink->bridgedLinks = pKernelNvlink->enabledLinks; + + NV_PRINTF(LEVEL_INFO, + "Connections forced through chiplib. ConnectedLinksMask same as " + "enabledLinks = 0x%x\n", pKernelNvlink->connectedLinksMask); + + goto knvlinkFilterBridgeLinks_end; + } + + // Mark the links as bridged if receiver detect has passed + FOR_EACH_INDEX_IN_MASK(32, linkId, pKernelNvlink->discoveredLinks) + { +#if defined(INCLUDE_NVLINK_LIB) + + // If the link has not been registered yet, continue + if (pKernelNvlink->nvlinkLinks[linkId].core_link == NULL) + { + // Link is not registered yet. Connectivity is absent + pKernelNvlink->connectedLinksMask &= ~NVBIT(linkId); + pKernelNvlink->bridgedLinks &= ~NVBIT(linkId); + + NV_PRINTF(LEVEL_INFO, + "GPU%d: Link%d not yet registered in core lib. Connectivity will be " + "established after RXDET\n", pGpu->gpuInstance, linkId); + continue; + } + + if (pKernelNvlink->nvlinkLinks[linkId].core_link->bRxDetected) + { + pKernelNvlink->connectedLinksMask |= NVBIT(linkId); + pKernelNvlink->bridgedLinks |= NVBIT(linkId); + } + else + { + pKernelNvlink->connectedLinksMask &= ~NVBIT(linkId); + pKernelNvlink->bridgedLinks &= ~NVBIT(linkId); + } +#endif + } + FOR_EACH_INDEX_IN_MASK_END; + +knvlinkFilterBridgeLinks_end: + + // For GSP-CLIENTs, the link masks and vbios info need to synchronize with GSP + status = knvlinkSyncLinkMasksAndVbiosInfo(pGpu, pKernelNvlink); + if (status != NV_OK) + { + return status; + } + + return NV_OK; +} + +/*! + * @brief Return the mask of links that are connected + * + * @param[in] pGpu OBJGPU ptr + * @param[in] pKernelNvlink KernelNvlink ptr + */ +NvU32 +knvlinkGetConnectedLinksMask_TU102 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + // + // On Turing, enabledLinks != connectedLinksMask + // This is because the connection cannot be sensed till receiver detect + // has reported whether or not the connection is present + // + return pKernelNvlink->connectedLinksMask; +} + +/*! + * @brief Is IOCTRL guaranteed to be powered up for D3 + * + * @param[in] pGpu OBJGPU ptr + * @param[in] pKernelNvlink KernelNvlink ptr + * + * return NV_TRUE if IOCTRL guaranteed to be powered up + */ +NvBool +knvlinkPoweredUpForD3_TU102 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + // + // IOCTRL is guaranteed NOT to be reset if the D3 variant is + // RTD3 or FGC6 and if NVLink L2 is supported on the chip + // + if ((pGpu->getProperty(pGpu, PDB_PROP_GPU_RTD3_GC6_ACTIVE) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_FAST_GC6_ACTIVE)) && + pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_L2_POWER_STATE_ENABLED)) + { + // + // Bugs# 2274645, 2197144: On Turing, the NVLink clamps are broken. So, + // IOCTRL unit will see the reset signal when the GPU enters RTD3/FGC6. + // + if (!pKernelNvlink->getProperty(pKernelNvlink, + PDB_PROP_KNVLINK_BUG2274645_RESET_FOR_RTD3_FGC6)) + { + return NV_TRUE; + } + } + + return NV_FALSE; +} diff --git a/src/nvidia/src/kernel/gpu/nvlink/arch/volta/kernel_minion_gv100.c b/src/nvidia/src/kernel/gpu/nvlink/arch/volta/kernel_minion_gv100.c new file mode 100644 index 000000000..d5d4ae29f --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/arch/volta/kernel_minion_gv100.c @@ -0,0 +1,118 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "nvRmReg.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" + +/*! + * @brief MINION construct + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelIoctrl IOCTRL object pointer + * + * @return NV_OK if successful + */ +NV_STATUS +kioctrlMinionConstruct_GV100 +( + OBJGPU *pGpu, + KernelIoctrl *pKernelIoctrl +) +{ + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + // By default, MINION is available based on chip type. Condition further... + if (pKernelIoctrl->getProperty(pKernelIoctrl, PDB_PROP_KIOCTRL_MINION_AVAILABLE)) + { + // Trim based on model/platform/testing/schedule + NvBool bEnableMinion = kioctrlGetMinionEnableDefault_HAL(pGpu, pKernelIoctrl); + + if (!bEnableMinion) + { + NV_PRINTF(LEVEL_INFO, + "NVLink MINION is not supported on this platform, disabling.\n"); + } + + // Read in any MINION registry overrides. + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_NVLINK_MINION_CONTROL, &pKernelNvlink->minionControl)) + { + NV_PRINTF(LEVEL_INFO, "%s: 0x%x\n", + NV_REG_STR_RM_NVLINK_MINION_CONTROL, pKernelNvlink->minionControl); + + // Select requested enable state + switch (DRF_VAL(_REG_STR_RM, _NVLINK_MINION_CONTROL, _ENABLE, pKernelNvlink->minionControl)) + { + case NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_FORCE_ON: + NV_PRINTF(LEVEL_INFO, + "NVLink MINION force enable requested by command line override.\n"); + pKernelIoctrl->setProperty(pKernelIoctrl, PDB_PROP_KIOCTRL_MINION_FORCE_BOOT, NV_TRUE); + bEnableMinion = NV_TRUE; + break; + case NV_REG_STR_RM_NVLINK_MINION_CONTROL_ENABLE_FORCE_OFF: + NV_PRINTF(LEVEL_INFO, + "NVLink MINION force disable requested by command line override.\n"); + bEnableMinion = NV_FALSE; + break; + default: + break; + } + + } + + // Flush the final minion enable setting + pKernelIoctrl->setProperty(pKernelIoctrl, PDB_PROP_KIOCTRL_MINION_AVAILABLE, bEnableMinion); + } + + return NV_OK; +} + +/*! + * @brief Determine the default MINION enable state. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelIoctrl IOCTRL object pointer + * + * @return NV_TRUE is enable by default + */ +NvBool +kioctrlGetMinionEnableDefault_GV100 +( + OBJGPU *pGpu, + KernelIoctrl *pKernelIoctrl +) +{ + // + // Arch requests that NVLink MINION always be DISABLED by default on RTL + // for performance reasons. They will force enable when needed. + // + if (IS_RTLSIM(pGpu)) + { + return NV_FALSE; + } + + // MINION support is fully enabled by default on everything else. + return NV_TRUE; +} diff --git a/src/nvidia/src/kernel/gpu/nvlink/arch/volta/kernel_nvlink_gv100.c b/src/nvidia/src/kernel/gpu/nvlink/arch/volta/kernel_nvlink_gv100.c new file mode 100644 index 000000000..6425ecff7 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/arch/volta/kernel_nvlink_gv100.c @@ -0,0 +1,526 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "os/os.h" +#include "kernel/gpu/mmu/kern_gmmu.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" +#include "core/thread_state.h" + +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#if defined(INCLUDE_NVLINK_LIB) +static NV_STATUS _knvlinkAreLinksDisconnected(OBJGPU *, KernelNvlink *, NvBool *); +#endif + +/*! + * @brief Construct NVLink HAL + * + * @param[in] pGpu OBJGPU pointer for local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK if successful + */ +NV_STATUS +knvlinkConstructHal_GV100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + + if (IS_FMODEL(pGpu) || IS_EMULATION(pGpu)) + { + pKernelNvlink->bVerifTrainingEnable = NV_TRUE; + } + + return status; +} + +/*! + * @brief Enable links post topology. + * + * @param[in] pGpu OBJGPU pointer for local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkMask Masks of links to enable + * + */ +NV_STATUS +knvlinkEnableLinksPostTopology_GV100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkMask +) +{ + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_ENABLE_LINKS_POST_TOPOLOGY_PARAMS params; + + // + // Skip the RPC if linkmask is 0 or if all the links in the mask + // are already initialized + // + if ((linkMask & (~pKernelNvlink->initializedLinks)) == 0) + { + return NV_OK; + } + + portMemSet(¶ms, 0, sizeof(params)); + + params.linkMask = linkMask; + + // Reset timeout to clear any accumulated timeouts from link init + if (IS_GSP_CLIENT(pGpu)) + { + threadStateResetTimeout(pGpu); + } + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_ENABLE_LINKS_POST_TOPOLOGY, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to enable Links post topology!\n"); + return status; + } + + pKernelNvlink->initializedLinks = params.initializedLinks; + + return NV_OK; +} + +/** + * @brief This routine overrides the nvlink connection topology if chiplib arguments + * have been provided. It queries MODS API for the chiplib overrides and based + * on that, derives hshub configuration values that are programmed at a later + * stage during nvlink state load. The override values should exist for ALL + * links or NO links. The field encoding can be found in phys_nvlink.h + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] phase unused + */ +NV_STATUS +knvlinkOverrideConfig_GV100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 phase +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + pKernelNvlink->pLinkConnection = portMemAllocNonPaged(sizeof(NvU32) * NVLINK_MAX_LINKS_SW); + if (pKernelNvlink->pLinkConnection == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pKernelNvlink->pLinkConnection, 0, sizeof(NvU32) * NVLINK_MAX_LINKS_SW); + + // + // To deal with the nonlegacy force config reg keys, we need to now fill + // in the default phys links, use a unity 1/1 map. + // + for (i = 0; i < NVLINK_MAX_LINKS_SW; i++) + { + // The physical link is guaranteed valid in all cases + pKernelNvlink->pLinkConnection[i] = DRF_NUM(_NVLINK, _ARCH_CONNECTION, _PHYSICAL_LINK, i); + } + + // Check to see if there are chiplib overrides for nvlink configuration + status = osGetForcedNVLinkConnection(pGpu, NVLINK_MAX_LINKS_SW, pKernelNvlink->pLinkConnection); + if (NV_OK != status) + { + // A non-OK status implies there are no overrides. + portMemFree(pKernelNvlink->pLinkConnection); + pKernelNvlink->pLinkConnection = NULL; + return NV_OK; + } + + NV2080_CTRL_NVLINK_PROCESS_FORCED_CONFIGS_PARAMS forcedConfigParams; + portMemSet(&forcedConfigParams, 0, sizeof(forcedConfigParams)); + + forcedConfigParams.bLegacyForcedConfig = NV_FALSE; + portMemCopy(&forcedConfigParams.linkConnection, (sizeof(NvU32) * NVLINK_MAX_LINKS_SW), + pKernelNvlink->pLinkConnection, (sizeof(NvU32) * NVLINK_MAX_LINKS_SW)); + + // + // RPC to GSP-RM to for GSP-RM to process the forced NVLink configurations. This includes + // setting up of HSHUB state and programming the memory subsystem registers. + // + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_PROCESS_FORCED_CONFIGS, + (void *)&forcedConfigParams, sizeof(forcedConfigParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to process forced NVLink configurations !\n"); + + portMemFree(pKernelNvlink->pLinkConnection); + pKernelNvlink->pLinkConnection = NULL; + return status; + } + + pKernelNvlink->bOverrideComputePeerMode = forcedConfigParams.bOverrideComputePeerMode; + + // + // Now, CPU-RM should process the forced configurations and update its state, which includes + // the topology information and the required link masks. + // + return knvlinkSetupTopologyForForcedConfig(pGpu, pKernelNvlink); +} + +/*! + * @brief This function applies settings specific to supporting Degraded Mode + * on NVswitch systems (LR10+) as follows - + * Checks for links that have been degarded (i.e disconnected) and then + * takes one of the following 2 actions- + * 1) If the link has been degraded, it + * a) adds them to the disconnectedLinkMask, and + * b) marks their remote end as not connected + * 2) If not, then adds the "active" link to the switchLinkMasks so that the + * caller can then update the correct state in a subsequent call to the + * _nvlinkUpdateSwitchLinkMasks function + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] pSwitchLinkMasks Active switch links + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkApplyNvswitchDegradedModeSettings_GV100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 *pSwitchLinkMasks +) +{ + NV_STATUS status = NV_OK; + +#if defined(INCLUDE_NVLINK_LIB) + + NvBool bLinkDisconnected[NVLINK_MAX_LINKS_SW] = {0}; + NvBool bUpdateConnStatus = NV_FALSE; + NvU32 switchLinks = 0; + NvU32 linkId; + + // At least there should be one connection to NVSwitch, else bail out + FOR_EACH_INDEX_IN_MASK(32, linkId, pKernelNvlink->enabledLinks) + { + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.deviceType == NVLINK_DEVICE_TYPE_NVSWITCH) + { + switchLinks |= NVBIT(linkId); + } + } + FOR_EACH_INDEX_IN_MASK_END; + + if (switchLinks == 0) + { + return NV_OK; + } + + // + // Retrieve the connection status for all the enabled links. This requires checking + // the links reset status and link state and sublink states as well. + // Bug 3480556: _knvlinkAreLinksDisconnected makes RPC call to GSP-RM to get the link + // and sublink states. Trigger one RPC instead of invoking the RPC once + // for each link which reduces perf. + // + status = _knvlinkAreLinksDisconnected(pGpu, pKernelNvlink, bLinkDisconnected); + NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, status); + + FOR_EACH_INDEX_IN_MASK(32, linkId, pKernelNvlink->enabledLinks) + { + bUpdateConnStatus = NV_FALSE; + + // + // Degraded Mode on NVSwitch systems: + // We loop over enabledLinks to check if there are any links that have + // changed their state from HS, so as to add them to the disconnectedLinkMask + // and mark their remote end as not connected. If not, then add the active link + // to switchLinkMasks so that it gets updated by _nvlinkUpdateSwitchLinkMasks below + // + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.deviceType == NVLINK_DEVICE_TYPE_NVSWITCH) + { + if (bLinkDisconnected[linkId]) + { + // RPC into GSP-RM to update the link connected status only if its required + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected) + { + bUpdateConnStatus = NV_TRUE; + } + + // Mark this link as disconnected + pKernelNvlink->disconnectedLinkMask |= (NVBIT32(linkId)); + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected = NV_FALSE; + + // RPC into GSP-RM to update the link connected status only if its required + if (bUpdateConnStatus) + { + status = knvlinkUpdateLinkConnectionStatus(pGpu, pKernelNvlink, linkId); + if (status != NV_OK) + { + return status; + } + } + } + else if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected == NV_TRUE) + { + *pSwitchLinkMasks |= NVBIT32(linkId); + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + +#endif + + return status; +} + +#if defined(INCLUDE_NVLINK_LIB) + +/*! + * @brief This function returns NV_TRUE for the following cases- + * 1. Pseudo-clean shutdown + * linkMode == SAFE && sublinkState == OFF (RX and TX) + * 2. Link reset post shutdown + * linkMode == RESET + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[out] bLinkDisconnected Array of connection status for the links + * NV_FALSE if link is connected + * NV_TRUE if link is disconnected + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkAreLinksDisconnected +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvBool *bLinkDisconnected +) +{ + NV_STATUS status = NV_OK; + NvU32 linkId; + + NV_ASSERT_OR_RETURN(bLinkDisconnected != NULL, NV_ERR_INVALID_ARGUMENT); + + NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.linkMask = pKernelNvlink->enabledLinks; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_GET_LINK_AND_CLOCK_INFO, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + return status; + + FOR_EACH_INDEX_IN_MASK(32, linkId, pKernelNvlink->enabledLinks) + { + if ((params.linkInfo[linkId].linkState == NVLINK_LINKSTATE_SAFE) && + (params.linkInfo[linkId].txSublinkState == NVLINK_SUBLINK_STATE_TX_OFF) && + (params.linkInfo[linkId].rxSublinkState == NVLINK_SUBLINK_STATE_RX_OFF)) + { + // Case 1: Pseudo-clean shutdown + bLinkDisconnected[linkId] = NV_TRUE; + } + else if (params.linkInfo[linkId].bLinkReset) + { + // Case 2: Link reset post shutdown + bLinkDisconnected[linkId] = NV_TRUE; + } + else + { + // Link is connected + bLinkDisconnected[linkId] = NV_FALSE; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return status; +} + +#endif + +/*! + * @brief Program NVLink Speed for the enabled links + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NV_STATUS +knvlinkProgramLinkSpeed_GV100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + NvU32 platformLineRate; + + NV_STATUS platformLinerateDefined = NV_ERR_NOT_SUPPORTED; + platformLinerateDefined = osGetPlatformNvlinkLinerate(pGpu, &platformLineRate); + + NV2080_CTRL_NVLINK_PROGRAM_LINK_SPEED_PARAMS programLinkSpeedParams; + portMemSet(&programLinkSpeedParams, 0, sizeof(programLinkSpeedParams)); + + programLinkSpeedParams.bPlatformLinerateDefined = (platformLinerateDefined == NV_OK); + programLinkSpeedParams.platformLineRate = platformLineRate; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_PROGRAM_LINK_SPEED, + (void *)&programLinkSpeedParams, + sizeof(programLinkSpeedParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to program NVLink speed for links!\n"); + return status; + } + + pKernelNvlink->nvlinkLinkSpeed = programLinkSpeedParams.nvlinkLinkSpeed; + + return NV_OK; +} + +/*! + * Handles PostLoad Hal for NVLink. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @returns NV_OK on success, ERROR otherwise. + */ +NV_STATUS +knvlinkStatePostLoadHal_GV100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS params; + + // Set NVSwitch fabric base address and enable compute peer addressing + if (knvlinkIsNvswitchProxyPresent(pGpu, pKernelNvlink)) + { + portMemSet(¶ms, 0, sizeof(params)); + params.bGet = NV_TRUE; + params.addr = NVLINK_INVALID_FABRIC_ADDR; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get nvswitch fabric address for GPU %x\n", + pGpu->gpuInstance); + return status; + } + + status = knvlinkSetUniqueFabricBaseAddress(pGpu, pKernelNvlink, params.addr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to set unique NVSwitch fabric base address for GPU %x\n", + pGpu->gpuInstance); + return status; + } + } + + if (knvlinkIsNvswitchProxyPresent(pGpu, pKernelNvlink) || pKernelNvlink->bOverrideComputePeerMode) + { + status = kgmmuEnableNvlinkComputePeerAddressing_HAL(pKernelGmmu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to enable compute addressing for GPU %x\n", + pGpu->gpuInstance); + return status; + } + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_INTERNAL_NVLINK_ENABLE_COMPUTE_PEER_ADDR, + NULL, 0); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to enable compute peer addressing!\n"); + return status; + } + } + + return NV_OK; +} + +/*! + * @brief Validates fabric base address. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] fabricBaseAddr Address to be validated + * + * @returns On success, NV_OK. + * On failure, returns NV_ERR_XXX. + */ +NV_STATUS +knvlinkValidateFabricBaseAddress_GV100 +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU64 fabricBaseAddr +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 fbSizeBytes; + + fbSizeBytes = pMemoryManager->Ram.fbTotalMemSizeMb << 20; + + // + // Volta SKUs will be paired with NVSwitches (Willow) supporting 8K mapslots + // that can cover 16GB each. Make sure that the fabric base address being + // used is valid to cover whole frame buffer. + // + + // Check if fabric address is aligned to mapslot size. + if (fabricBaseAddr & (NVBIT64(34) - 1)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Align fbSize to mapslot size. + fbSizeBytes = RM_ALIGN_UP(fbSizeBytes, NVBIT64(34)); + + // Make sure the address range doesn't go beyond the limit, (8K * 16GB). + if ((fabricBaseAddr + fbSizeBytes) > NVBIT64(47)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/nvlink/kernel_ioctrl.c b/src/nvidia/src/kernel/gpu/nvlink/kernel_ioctrl.c new file mode 100644 index 000000000..febe018ee --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/kernel_ioctrl.c @@ -0,0 +1,72 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" + +/*! + * @brief Construct a KernelIoctrl object + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelIoctrl KernelIoctrl object pointer + * @param[in] publicId The ID of the ioctrl to be constructed + * + * @return NV_OK on success + */ +NV_STATUS +kioctrlConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelIoctrl *pKernelIoctrl, + NvU32 publicId +) +{ + NV_STATUS status = NV_OK; + OBJENGSTATE *pEngstate; + + pKernelIoctrl->PublicId = publicId; + + // Default the IOCTRL IP version + pKernelIoctrl->ipVerIoctrl = NVLINK_VERSION_10; + + pEngstate = dynamicCast(pKernelIoctrl, OBJENGSTATE); + pEngstate->engDesc = ENG_KERNEL_IOCTRL(publicId); + + status = kioctrlMinionConstruct_HAL(pGpu, pKernelIoctrl); + + return status; +} + +/*! + * @brief Destruct a KernelIoctrl object + * + * @param[in] pKernelIoctrl KernelIoctrl object pointer + */ +void +kioctrlDestructEngine_IMPL +( + KernelIoctrl *pKernelIoctrl +) +{ + return; +} diff --git a/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlink.c b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlink.c new file mode 100644 index 000000000..8cdefd074 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlink.c @@ -0,0 +1,1722 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "core/hal.h" +#include "core/info_block.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/ce/kernel_ce.h" + +/*! + * @brief Is NVLINK topology forced? NVLink topology is considered + * forced for both legacy forced config and chiplib configs + * + * @param[in] pGpu OBJGPU + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_TRUE if topology is forced + */ +NvBool +knvlinkIsForcedConfig_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + return (pKernelNvlink->bChiplibConfig); +} + +/*! + * @brief Determine if NVLink is enabled or disabled by default + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_TRUE if NVLink is enabled on the GPU/platform + */ +NvBool +knvlinkIsNvlinkDefaultEnabled_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + // + // Currently it is critical that the following lib check be present. + // Burying this in the hal below it may get lost as the stub is all + // thats required for POR (always true from the hals perspective) + // +#if !defined(INCLUDE_NVLINK_LIB) + + return NV_FALSE; + +#endif + + // Let the PDB handle the final decision. + return pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_ENABLED); +} + +/*! + * @brief Determine if P2P loopback over NVLink is supported for + * the given GPU. This function returns true if any link + * is connected in loopback mode. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_TRUE if any link is in loopback mode + */ +NvBool +knvlinkIsP2pLoopbackSupported_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + NvU32 i; + + if ((pGpu == NULL) || (pKernelNvlink == NULL)) + { + return NV_FALSE; + } + + // Return false if P2P loopback is disabled through regkey + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED)) + { + return NV_FALSE; + } + + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink->enabledLinks) + { + if (knvlinkIsP2pLoopbackSupportedPerLink_IMPL(pGpu, pKernelNvlink, i)) + return NV_TRUE; + } + FOR_EACH_INDEX_IN_MASK_END + +#endif + + return NV_FALSE; +} + +/*! + * @brief Determine if P2P loopback over NVLink is supported for + * the given link. This function returns true if the link + * is connected in loopback mode. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] link Link ID + * + * @return NV_TRUE if the link is in loopback mode + */ +NvBool +knvlinkIsP2pLoopbackSupportedPerLink_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 link +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + if ((pGpu == NULL) || (pKernelNvlink == NULL)) + { + return NV_FALSE; + } + + // Return false if P2P loopback is disabled through regkey + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED)) + { + return NV_FALSE; + } + + // Return false if the given link is disabled + if (!(NVBIT(link) & pKernelNvlink->enabledLinks)) + { + return NV_FALSE; + } + + // Check the link connected to the same GPU (loopback) + if (pKernelNvlink->nvlinkLinks[link].remoteEndInfo.bConnected) + { + if ((pKernelNvlink->nvlinkLinks[link].remoteEndInfo.domain == gpuGetDomain(pGpu)) && + (pKernelNvlink->nvlinkLinks[link].remoteEndInfo.bus == gpuGetBus(pGpu)) && + (pKernelNvlink->nvlinkLinks[link].remoteEndInfo.device == gpuGetDevice(pGpu)) && + (pKernelNvlink->nvlinkLinks[link].remoteEndInfo.function == 0)) + { + return NV_TRUE; + } + } + +#endif + + return NV_FALSE; +} + +/*! + * @brief Determine if P2P over NVLINK is supported between 2 GPUs + * + * @param[in] pGpu OBJGPU pointer for local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] pPeerGpu OBJGPU pointer for remote GPU + * + * @return NV_TRUE if P2P is supported between the 2 GPUs + */ +NvBool +knvlinkIsNvlinkP2pSupported_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + OBJGPU *pPeerGpu +) +{ + NV_STATUS status = NV_OK; + + if (pKernelNvlink == NULL) + { + return NV_FALSE; + } + + // Get the Nvlink P2P connections from the core library + status = knvlinkGetP2pConnectionStatus(pGpu, pKernelNvlink, pPeerGpu); + + if (status == NV_OK) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +/*! + * @brief Checks whether necessary the config setup is done to + * support P2P over NVSwitch + * + * @param[in] pGpu OBJGPU pointer for local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] pPeerGpu OBJGPU pointer for remote GPU + * + * @return NV_TRUE if P2P over NVSwitch + */ +NvBool +knvlinkCheckNvswitchP2pConfig_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + OBJGPU *pPeerGpu +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU64 rangeStart = knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink); + NvU64 rangeEnd = rangeStart + (pMemoryManager->Ram.fbTotalMemSizeMb << 20); + NvU64 peerRangeStart = knvlinkGetUniqueFabricBaseAddress(pPeerGpu, + GPU_GET_KERNEL_NVLINK(pPeerGpu)); + + if (knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink)) + { + if (gpuIsSriovEnabled(pGpu)) + { + // currently vgpu + switch doesn't support GPA addresing. + return NV_TRUE; + } + + if (knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink) == + NVLINK_INVALID_FABRIC_ADDR) + { + NV_PRINTF(LEVEL_ERROR, "GPU %d doesn't have a fabric address\n", + gpuGetInstance(pGpu)); + + return NV_FALSE; + } + + if ((pGpu != pPeerGpu) && + ((peerRangeStart >= rangeStart) && (peerRangeStart < rangeEnd))) + { + NV_PRINTF(LEVEL_ERROR, + "GPU %d doesn't have a unique fabric address\n", + gpuGetInstance(pGpu)); + + return NV_FALSE; + } + } + else + { + if (knvlinkGetUniqueFabricBaseAddress(pGpu, pKernelNvlink) != + NVLINK_INVALID_FABRIC_ADDR) + { + NV_PRINTF(LEVEL_ERROR, + "non-NVSwitch GPU %d has a valid fabric address\n", + gpuGetInstance(pGpu)); + + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/*! + * @brief Get Nvlink P2P connections between 2 GPUs + * + * @param[in] pGpu OBJGPU pointer for local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] pPeerGpu OBJGPU pointer for remote GPU + * + * @return NV_OK if P2P connections are present + */ +NV_STATUS +knvlinkGetP2pConnectionStatus_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + OBJGPU *pPeerGpu +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu0 = pGpu; + OBJGPU *pGpu1 = pPeerGpu; + KernelNvlink *pKernelNvlink0 = pKernelNvlink; + KernelNvlink *pKernelNvlink1 = NULL; + NvU32 numPeerLinks = 0; + + if (pGpu1 == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid pPeerGpu.\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + else if ((pGpu0 == pGpu1) && + (pGpu0->getProperty(pGpu0, PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED))) + { + // P2P over loopback links are disabled through regkey overrides + NV_PRINTF(LEVEL_INFO, "loopback P2P on GPU%u disabled by regkey\n", + gpuGetInstance(pGpu0)); + + return NV_ERR_NOT_SUPPORTED; + } + else + { + pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + } + + if (pKernelNvlink1 == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Input mask contains a GPU on which NVLink is disabled.\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + if ((IS_RTLSIM(pGpu0) && !pKernelNvlink0->bForceEnableCoreLibRtlsims) || + knvlinkIsForcedConfig(pGpu0, pKernelNvlink0)) + { + // For non-legacy configs. + if (pKernelNvlink0->bChiplibConfig) + { + NV_PRINTF(LEVEL_INFO, + "NVLink P2P is supported between GPU%d and GPU%d\n", + gpuGetInstance(pGpu0), gpuGetInstance(pGpu1)); + + return NV_OK; + } + } + + // Get the remote ends of the links of local GPU from the nvlink core + knvlinkCoreGetRemoteDeviceInfo(pGpu0, pKernelNvlink0); + + // Post topology link enable on links of local GPU + status = knvlinkEnableLinksPostTopology_HAL(pGpu0, pKernelNvlink0, + pKernelNvlink0->enabledLinks); + if (status != NV_OK) + { + return status; + } + + numPeerLinks = knvlinkGetNumLinksToPeer(pGpu0, pKernelNvlink0, pGpu1); + if (numPeerLinks > 0) + { + if (knvlinkGetNumLinksToPeer(pGpu1, pKernelNvlink1, pGpu0) != numPeerLinks) + { + // Get the remote ends of the links of remote GPU from the nvlink core + knvlinkCoreGetRemoteDeviceInfo(pGpu1, pKernelNvlink1); + + // Post topology link enable on links of remote GPU + status = knvlinkEnableLinksPostTopology_HAL(pGpu1, pKernelNvlink1, + pKernelNvlink1->enabledLinks); + if (status != NV_OK) + { + return status; + } + } + + // Peers should have the same number of links pointing back at us + NV_ASSERT_OR_RETURN(knvlinkGetNumLinksToPeer(pGpu1, pKernelNvlink1, pGpu0) == + numPeerLinks, NV_ERR_INVALID_STATE); + + NV_ASSERT_OR_RETURN(knvlinkCheckNvswitchP2pConfig(pGpu0, pKernelNvlink0, pGpu1), + NV_ERR_INVALID_STATE); + + NV_ASSERT_OR_RETURN(knvlinkCheckNvswitchP2pConfig(pGpu1, pKernelNvlink1, pGpu0), + NV_ERR_INVALID_STATE); + + NV_PRINTF(LEVEL_INFO, + "NVLink P2P is supported between GPU%d and GPU%d\n", + gpuGetInstance(pGpu0), gpuGetInstance(pGpu1)); + + return NV_OK; + } + + NV_PRINTF(LEVEL_INFO, + "NVLink P2P is NOT supported between between GPU%d and GPU%d\n", + pGpu->gpuInstance, pGpu1->gpuInstance); + + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Update the settings for the current established NVLink + * topology. This is the top level function that should be + * called, instead of applying the settings individually, + * since it grabs the required locks + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkUpdateCurrentConfig_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + KernelCE *pKCe = NULL; + NvBool bOwnsLock = NV_FALSE; + NV_STATUS status = NV_OK; + NvU32 i; + + if (osAcquireRmSema(pSys->pSema) == NV_OK) + { + // + // XXX Bug 1795328: Fix P2P path to acquire locks for the GPU + // Due to platform differences in the P2P path, the GPU lock is not + // consistently held at this point in the call stack. This function + // requires exclusive access to RM/PMU data structures to update HSHUB, + // and therefore requires the GPU lock to be held at this point. + // This check should be removed once the P2P paths have been updated to + // acquire the GPU locks consistently for all platforms. + // + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + status = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_NVLINK); + if (status != NV_OK) + { + NV_ASSERT(0); + goto fail; + } + + bOwnsLock = NV_TRUE; + } + + // + // Links that have remote end detected should have passed RXDET + // Update the mask of connected links and bridged links + // + knvlinkFilterBridgeLinks_HAL(pGpu, pKernelNvlink); + + NV2080_CTRL_NVLINK_UPDATE_CURRENT_CONFIG_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + + // Reset timeout to clear any accumulated timeouts from link init + if (IS_GSP_CLIENT(pGpu)) + { + threadStateResetTimeout(pGpu); + } + + // + // RPC into GSP-RM for programming the HSHUB, CONNECTION_CFG and LTCS + // registers. + // + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_UPDATE_CURRENT_CONFIG, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Updating current NVLink config failed\n"); + goto fail; + } + + // Sync the GPU property for NVLINK over SYSMEM with GSP-RM + pGpu->setProperty(pGpu, PDB_PROP_GPU_NVLINK_SYSMEM, params.bNvlinkSysmemEnabled); + + // Update the PCE-LCE mappings + for (i = 0; i < ENG_CE__SIZE_1; i++) + { + pKCe = GPU_GET_KCE(pGpu, i); + if (pKCe) + { + status = kceTopLevelPceLceMappingsUpdate(pGpu, pKCe); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to update PCE-LCE mappings\n"); + } + break; + } + } + +fail: + if (bOwnsLock) + { + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + osReleaseRmSema(pSys->pSema, NULL); + } + + return status; +} + +/*! + * @brief Return the mask of links enabled on the system + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NvU32 +knvlinkGetEnabledLinkMask_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + return pKernelNvlink->enabledLinks; +} + +/*! + * @brief Return the mask of links discovered on the system + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NvU32 +knvlinkGetDiscoveredLinkMask_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + return pKernelNvlink->discoveredLinks; +} + +/*! + * @brief Returns the number of sysmem links + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return The #sysmem NVLinks + */ +NvU32 +knvlinkGetNumLinksToSystem_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NvU32 numSysmemLinks = pKernelNvlink->sysmemLinkMask; + + if (numSysmemLinks != 0) + { + NUMSETBITS_32(numSysmemLinks); + } + + return numSysmemLinks; +} + +/*! + * @brief Returns number of peer links to a remote GPU + * + * @param[in] pGpu OBJGPU pointer of local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] pRemoteGpu OBJGPU pointer of remote GPU + * + * @return The #peer NVLinks to the remote GPU + */ +NvU32 +knvlinkGetNumLinksToPeer_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + OBJGPU *pRemoteGpu +) +{ + NvU32 numPeerLinks = + knvlinkGetLinkMaskToPeer(pGpu, pKernelNvlink, pRemoteGpu); + + if (numPeerLinks != 0) + { + NUMSETBITS_32(numPeerLinks); + } + + return numPeerLinks; +} + +/*! + * @brief Gets the mask of peer links between the GPUs + * + * @param[in] pGpu0 OBJGPU pointer + * @param[in] pKernelNvlink0 Nvlink pointer + * @param[in] pGpu1 Remote OBJGPU pointer + * + * @return Returns the mask of peer links between the GPUs + */ +NvU32 +knvlinkGetLinkMaskToPeer_IMPL +( + OBJGPU *pGpu0, + KernelNvlink *pKernelNvlink0, + OBJGPU *pGpu1 +) +{ + NvU32 peerLinkMask = 0; + + if (!knvlinkIsForcedConfig(pGpu0, pKernelNvlink0)) + { + // + // If nvlink topology is not forced, then the hshub registers + // are updated only when a P2P object is allocated. So, return + // the cached value of mask of links connected to a GPU + // + peerLinkMask = pKernelNvlink0->peerLinkMasks[gpuGetInstance(pGpu1)]; + } + + return peerLinkMask; +} + +/*! + * @brief Sets the mask of peer links between the GPUs + * + * @param[in] pGpu0 OBJGPU pointer + * @param[in] pKernelNvlink0 Nvlink pointer + * @param[in] pGpu1 Remote OBJGPU pointer + * @param[in] peerLinkMask Mask of links to the peer GPU + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkSetLinkMaskToPeer_IMPL +( + OBJGPU *pGpu0, + KernelNvlink *pKernelNvlink0, + OBJGPU *pGpu1, + NvU32 peerLinkMask +) +{ + NV_STATUS status = NV_OK; + + // Return early if no update needed to the peer link mask + if (pKernelNvlink0->peerLinkMasks[gpuGetInstance(pGpu1)] == peerLinkMask) + return NV_OK; + + pKernelNvlink0->peerLinkMasks[gpuGetInstance(pGpu1)] = peerLinkMask; + + NV2080_CTRL_NVLINK_UPDATE_PEER_LINK_MASK_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.gpuInst = gpuGetInstance(pGpu1); + params.peerLinkMask = peerLinkMask; + + // Reset timeout to clear any accumulated timeouts from link init + if (IS_GSP_CLIENT(pGpu0)) + { + threadStateResetTimeout(pGpu0); + } + + // Sync the peerLinkMask with GSP-RM + status = knvlinkExecGspRmRpc(pGpu0, pKernelNvlink0, + NV2080_CTRL_CMD_NVLINK_UPDATE_PEER_LINK_MASK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to sync peerLinksMask from GPU%d to GPU%d\n", + gpuGetInstance(pGpu0), gpuGetInstance(pGpu1)); + return status; + } + + return NV_OK; +} + +/*! + * @brief Get the mask of links that are peer links + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NvU32 +knvlinkGetPeersNvlinkMaskFromHshub_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + NvU32 peerLinkMask = 0; + NvU32 i; + + NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.linkMask = pKernelNvlink->enabledLinks; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_GET_LINK_AND_CLOCK_INFO, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + return 0; + + // Scan enabled links for peer connections + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink->enabledLinks) + { + if (params.linkInfo[i].bLinkConnectedToPeer) + peerLinkMask |= NVBIT(i); + } + FOR_EACH_INDEX_IN_MASK_END; + + return peerLinkMask; +} + +/*! + * @brief Prepare a GPU's NVLink engine for reset by removing mappings + * to it from other GPUs. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * return NV_OK on success + */ +NV_STATUS +knvlinkPrepareForXVEReset_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS retStatus = NV_OK; + OBJGPU *pRemoteGpu; + NV_STATUS status; + NvU32 gpuInstance; + NvU32 gpuMask; + + // This is not supported on forced configs + if (knvlinkIsForcedConfig(pGpu, pKernelNvlink)) + { + return NV_OK; + } + + // + // Let fabric manager handle link shutdown/reset if the fabric is managed + // externally. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + NV_PRINTF(LEVEL_INFO, + "NVLink fabric is externally managed, skipping\n"); + return NV_OK; + } + + status = gpumgrGetGpuAttachInfo(NULL, &gpuMask); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + gpuInstance = 0; + while ((pRemoteGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + KernelNvlink *pRemoteKernelNvlink = GPU_GET_KERNEL_NVLINK(pRemoteGpu); + + if ((pRemoteGpu == pGpu) || (pRemoteKernelNvlink == NULL) || + (knvlinkGetNumLinksToPeer(pRemoteGpu, pRemoteKernelNvlink, pGpu) == 0) || + API_GPU_IN_RESET_SANITY_CHECK(pRemoteGpu)) + { + continue; + } + + // + // Reset the peer masks in HSHUB of the remote GPU. Partial resets + // (only removing the links connected to the GPU being reset) don't + // appear to be sufficient. The reset will work fine, but the next + // time we attempt to initialize this GPU, the copy engines will time + // out while scrubbing FB and a GPU sysmembar (NV_UFLUSH_FB_FLUSH) will + // fail to complete. + // + // The above symptoms haven't been root-caused (yet), but the current + // POR for GPU reset is that once one GPU is reset, the others + // connected to it over NVLink must also be reset before using NVLink + // for peer traffic, so just use the big hammer and squash all HSHUB + // configs on GPU reset. + // + // This allows us to reset the GPUs one by one, with GPU + // initializations in between, without hanging up the GPU trying to + // flush data over links that aren't available anymore. + // + status = knvlinkRemoveMapping_HAL(pRemoteGpu, pRemoteKernelNvlink, NV_FALSE, + ((1 << NVLINK_MAX_PEERS_SW) - 1), + NV_FALSE /* bL2Entry */); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to reset HSHUB on GPU%u while preparing for GPU%u XVE reset (0x%x)\n", + gpuGetInstance(pRemoteGpu), gpuGetInstance(pGpu), + status); + + retStatus = (retStatus == NV_OK) ? status : retStatus; + } + } + + // Remove all NVLink mappings in HSHUB config registers to init values + status = knvlinkRemoveMapping_HAL(pGpu, pKernelNvlink, NV_TRUE, ((1 << NVLINK_MAX_PEERS_SW) - 1), + NV_FALSE /* bL2Entry */); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to reset HSHUB on GPU%u while preparing XVE reset: %s (0x%x)\n", + gpuGetInstance(pGpu), nvstatusToString(status), status); + + retStatus = (retStatus == NV_OK) ? status : retStatus; + } + + // Pseudo-clean shutdown the links from this GPU + status = knvlinkCoreShutdownDeviceLinks(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to shutdown links on GPU%u while preparing XVE reset: %s (0x%x)\n", + gpuGetInstance(pGpu), nvstatusToString(status), status); + + retStatus = (retStatus == NV_OK) ? status : retStatus; + } + + // + // Reset links related to this device and its peers (see Bug 2346447) + // The property is disabled on Pascal, since the path hasn't been verified + // and link reset after pseudo-clean shutdown results in DL and TL errors. + // + if (pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_LINKRESET_AFTER_SHUTDOWN)) + { + status = knvlinkCoreResetDeviceLinks(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to reset links on GPU%u while preparing XVE reset: %s (0x%x)\n", + gpuGetInstance(pGpu), nvstatusToString(status), status); + + retStatus = (retStatus == NV_OK) ? status : retStatus; + } + + // + // knvlinkCoreResetDeviceLinks() only resets the links which have + // connectivity. + // Pre-Ampere, we may run into a situation where the PLL + // sharing partner links (both) may not be reset due to no connectivity. + // + // Hence, (re-)reset all the links to recover them after shutdown (pre-Ampere) + // + NV2080_CTRL_NVLINK_RESET_LINKS_PARAMS resetLinksparams; + + portMemSet(&resetLinksparams, 0, sizeof(resetLinksparams)); + resetLinksparams.linkMask = pKernelNvlink->enabledLinks; + resetLinksparams.flags = NV2080_CTRL_NVLINK_RESET_FLAGS_TOGGLE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_RESET_LINKS, + (void *)&resetLinksparams, sizeof(resetLinksparams)); + + retStatus = (retStatus == NV_OK) ? status : retStatus; + } + + return retStatus; +} + +/*! + * @brief Set the power features supported on this NVLink IP + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +void +knvlinkSetPowerFeatures_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + // Get the Ip Verion from the First available IOCTRL. + switch (pKernelNvlink->ipVerNvlink) + { + case NVLINK_VERSION_22: + { + // Regkeys finally decide whether or not the power state is supported + pKernelNvlink->setProperty(pKernelNvlink, PDB_PROP_KNVLINK_SINGLE_LANE_POWER_STATE_ENABLED, + (pKernelNvlink->bDisableSingleLaneMode ? NV_FALSE : NV_TRUE)); + + // NVLink L2 is supported only on MODS and Windows LDDM + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM || RMCFG_FEATURE_PLATFORM_MODS) + { + pKernelNvlink->setProperty(pKernelNvlink, PDB_PROP_KNVLINK_L2_POWER_STATE_ENABLED, + (pKernelNvlink->bDisableL2Mode ? NV_FALSE : NV_TRUE)); + } + + break; + } + case NVLINK_VERSION_31: + case NVLINK_VERSION_30: + case NVLINK_VERSION_20: + { + // Regkeys finally decide whether or not the power state is supported + pKernelNvlink->setProperty(pKernelNvlink, PDB_PROP_KNVLINK_SINGLE_LANE_POWER_STATE_ENABLED, + (pKernelNvlink->bDisableSingleLaneMode ? NV_FALSE : NV_TRUE)); + break; + } + default: + break; + } +} + +/*! + * @brief Checks if NVSWITCH_FABRIC_ADDR field is valid. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +void +knvlinkDetectNvswitchProxy_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = NV_OK; + NvU32 i; + + // Initialize fabricBaseAddr to NVLINK_INVALID_FABRIC_ADDR + pKernelNvlink->fabricBaseAddr = NVLINK_INVALID_FABRIC_ADDR; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT) || + pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED)) + { + return; + } + + if (pKernelNvlink->discoveredLinks == 0) + { + return; + } + + // Get the link train status for the enabled link masks + NV2080_CTRL_NVLINK_ARE_LINKS_TRAINED_PARAMS linkTrainedParams; + + portMemSet(&linkTrainedParams, 0, sizeof(linkTrainedParams)); + linkTrainedParams.linkMask = pKernelNvlink->enabledLinks; + linkTrainedParams.bActiveOnly = NV_FALSE; + + // Reset timeout to clear any accumulated timeouts from link init + if (IS_GSP_CLIENT(pGpu)) + { + threadStateResetTimeout(pGpu); + } + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_ARE_LINKS_TRAINED, + (void *)&linkTrainedParams, sizeof(linkTrainedParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get the link train status for links\n"); + return; + } + + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink->enabledLinks) + { + if (!linkTrainedParams.bIsLinkActive[i]) + { + return; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.bGet = NV_TRUE; + params.addr = NVLINK_INVALID_FABRIC_ADDR; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get fabric address for GPU %x\n", + pGpu->gpuInstance); + return; + } + + if (params.addr != NVLINK_INVALID_FABRIC_ADDR) + { + pKernelNvlink->fabricBaseAddr = params.addr; + pKernelNvlink->bNvswitchProxy = NV_TRUE; + } +} + +/*! + * @brief Sets NVSWITCH_FLA_ADDR field in the scratch register. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] addr FLA addr + * + * @return Returns NV_OK upon success. + * Otherwise, returns NV_ERR_XXX. + */ +NV_STATUS +knvlinkSetNvswitchFlaAddr_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU64 addr +) +{ + return NV_OK; +} + +/*! + * @brief Gets NVSWITCH_FLA_ADDR field from the scratch register. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return Returns the stashed FLA starting address. + */ +NvU64 +knvlinkGetNvswitchFlaAddr_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + return 0; +} + +/*! + * @brief Checks if fabricBaseAddr is valid. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return Returns true if the fabricBaseAddr is valid. + */ +NvBool +knvlinkIsNvswitchProxyPresent_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + return pKernelNvlink->bNvswitchProxy; +} + + +/*! + * @brief Set unique FLA base address for NVSwitch enabled systems. + * Validates FLA base address and programs the base address + * in switch scratch registers for guest VM to pick it up. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] flaBaseAddr NvU64 base address + * + * @returns On success, sets unique FLA base address and returns NV_OK. + * On failure, returns NV_ERR_XXX. + */ +NV_STATUS +knvlinkSetUniqueFlaBaseAddress_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU64 flaBaseAddr +) +{ + NV_STATUS status = NV_OK; + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + NV2080_CTRL_NVLINK_GET_SET_NVSWITCH_FLA_ADDR_PARAMS params; + + if (!knvlinkIsForcedConfig(pGpu, pKernelNvlink)) + { + knvlinkCoreGetRemoteDeviceInfo(pGpu, pKernelNvlink); + + status = knvlinkEnableLinksPostTopology_HAL(pGpu, pKernelNvlink, + pKernelNvlink->enabledLinks); + if (status != NV_OK) + { + return status; + } + } + + status = kbusValidateFlaBaseAddress_HAL(pGpu, pKernelBus, flaBaseAddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "FLA base addr validation failed for GPU %x\n", + pGpu->gpuInstance); + return status; + } + + if (IsSLIEnabled(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, + "Operation is unsupported on SLI enabled GPU %x\n", + pGpu->gpuInstance); + return NV_ERR_NOT_SUPPORTED; + } + + portMemSet(¶ms, 0, sizeof(params)); + params.bGet = NV_FALSE; + params.addr = flaBaseAddr; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_GET_SET_NVSWITCH_FLA_ADDR, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to stash fla base address for GPU %x\n", + pGpu->gpuInstance); + return status; + } + + NV_PRINTF(LEVEL_INFO, "FLA base addr %llx is assigned to GPU %x\n", + flaBaseAddr, pGpu->gpuInstance); + + return NV_OK; +} + +/*! + * @brief Synchronize the link masks and vbios defined properties + * between CPU and GSP-RMs + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NV_STATUS +knvlinkSyncLinkMasksAndVbiosInfo_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + + NV2080_CTRL_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + params.discoveredLinks = pKernelNvlink->discoveredLinks; + params.connectedLinksMask = pKernelNvlink->connectedLinksMask; + params.bridgeSensableLinks = pKernelNvlink->bridgeSensableLinks; + params.bridgedLinks = pKernelNvlink->bridgedLinks; + + // Reset timeout to clear any accumulated timeouts from link init + if (IS_GSP_CLIENT(pGpu)) + { + threadStateResetTimeout(pGpu); + } + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_SYNC_LINK_MASKS_AND_VBIOS_INFO, + (void *)¶ms, sizeof(params)); + + pKernelNvlink->vbiosDisabledLinkMask = params.vbiosDisabledLinkMask; + pKernelNvlink->initializedLinks = params.initializedLinks; + pKernelNvlink->initDisabledLinksMask = params.initDisabledLinksMask; + pKernelNvlink->bEnableSafeModeAtLoad = params.bEnableSafeModeAtLoad; + pKernelNvlink->bEnableTrainingAtLoad = params.bEnableTrainingAtLoad; + + return status; +} + +/*! + * @brief Update link connection status. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Target link Id + */ +NV_STATUS +knvlinkUpdateLinkConnectionStatus_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId +) +{ + NV_STATUS status = NV_OK; + + NV2080_CTRL_NVLINK_UPDATE_LINK_CONNECTION_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + params.linkId = linkId; + +#if defined(INCLUDE_NVLINK_LIB) + + params.bConnected = pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected; + params.remoteDeviceType = pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.deviceType; + params.remoteLinkNumber = pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.linkNumber; + +#endif + + // Reset timeout to clear any accumulated timeouts from link init + if (IS_GSP_CLIENT(pGpu)) + { + threadStateResetTimeout(pGpu); + } + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_UPDATE_LINK_CONNECTION, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to update Link connection status!\n"); + return status; + } + + return NV_OK; +} + +/*! + * @brief Update the post Rx Detect link mask. + * + * @param[in] pGpu OBJGPU pointer for local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + * + */ +NV_STATUS +knvlinkUpdatePostRxDetectLinkMask_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + + NV2080_CTRL_NVLINK_GET_LINK_MASK_POST_RX_DET_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_GET_LINK_MASK_POST_RX_DET, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to update Rx Detect Link mask!\n"); + return status; + } + + pKernelNvlink->postRxDetLinkMask = params.postRxDetLinkMask; + + return NV_OK; +} + +/*! + * @brief Copy over the NVLink devices information from GSP-RM. + * + * @param[in] pGpu OBJGPU pointer for local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NV_STATUS +knvlinkCopyNvlinkDeviceInfo_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + NV2080_CTRL_NVLINK_GET_NVLINK_DEVICE_INFO_PARAMS nvlinkInfoParams; + + portMemSet(&nvlinkInfoParams, 0, sizeof(nvlinkInfoParams)); + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_DEVICE_INFO, + (void *)&nvlinkInfoParams, sizeof(nvlinkInfoParams)); + + if (status == NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_WARNING, "NVLink is unavailable\n"); + return status; + } + else if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to retrieve all nvlink device info!\n"); + return status; + } + + // Update CPU-RM's NVLink state with the information received from GSP-RM RPC + pKernelNvlink->ioctrlMask = nvlinkInfoParams.ioctrlMask; + pKernelNvlink->ioctrlNumEntries = nvlinkInfoParams.ioctrlNumEntries; + pKernelNvlink->ioctrlSize = nvlinkInfoParams.ioctrlSize; + pKernelNvlink->discoveredLinks = nvlinkInfoParams.discoveredLinks; + pKernelNvlink->ipVerNvlink = nvlinkInfoParams.ipVerNvlink; + + for (i = 0; i < NVLINK_MAX_LINKS_SW; i++) + { + pKernelNvlink->nvlinkLinks[i].pGpu = pGpu; + pKernelNvlink->nvlinkLinks[i].bValid = nvlinkInfoParams.linkInfo[i].bValid; + pKernelNvlink->nvlinkLinks[i].linkId = nvlinkInfoParams.linkInfo[i].linkId; + pKernelNvlink->nvlinkLinks[i].ioctrlId = nvlinkInfoParams.linkInfo[i].ioctrlId; + + // Copy over the link PLL master and slave relationship for each link + pKernelNvlink->nvlinkLinks[i].pllMasterLinkId = nvlinkInfoParams.linkInfo[i].pllMasterLinkId; + pKernelNvlink->nvlinkLinks[i].pllSlaveLinkId = nvlinkInfoParams.linkInfo[i].pllSlaveLinkId; + + // Copy over the ip versions for DLPL devices discovered + pKernelNvlink->nvlinkLinks[i].ipVerDlPl = nvlinkInfoParams.linkInfo[i].ipVerDlPl; + } + + return NV_OK; +} + +/*! + * @brief Copy over the Ioctrl devices information from GSP-RM. + * + * @param[in] pGpu OBJGPU pointer for local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NV_STATUS +knvlinkCopyIoctrlDeviceInfo_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + KernelIoctrl *pKernelIoctrl = NULL; + NV_STATUS status = NV_OK; + NvU32 ioctrlIdx; + + NV2080_CTRL_NVLINK_GET_IOCTRL_DEVICE_INFO_PARAMS ioctrlInfoParams; + + // Query the IOCTRL information for each of the IOCTRLs discovered + FOR_EACH_INDEX_IN_MASK(32, ioctrlIdx, pKernelNvlink->ioctrlMask) + { + portMemSet(&ioctrlInfoParams, 0, sizeof(ioctrlInfoParams)); + + ioctrlInfoParams.ioctrlIdx = ioctrlIdx; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_GET_IOCTRL_DEVICE_INFO, + (void *)&ioctrlInfoParams, sizeof(ioctrlInfoParams)); + + if (status == NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_WARNING, "NVLink is unavailable\n"); + return status; + } + else if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to retrieve device info for IOCTRL %d!\n", ioctrlIdx); + return status; + } + + pKernelIoctrl = KNVLINK_GET_IOCTRL(pKernelNvlink, ioctrlIdx); + + // Update CPU-RM's NVLink state with the information received from GSP-RM RPC + pKernelIoctrl->PublicId = ioctrlInfoParams.PublicId; + pKernelIoctrl->localDiscoveredLinks = ioctrlInfoParams.localDiscoveredLinks; + pKernelIoctrl->localGlobalLinkOffset = ioctrlInfoParams.localGlobalLinkOffset; + pKernelIoctrl->ioctrlDiscoverySize = ioctrlInfoParams.ioctrlDiscoverySize; + pKernelIoctrl->numDevices = ioctrlInfoParams.numDevices; + + // Copy over the ip versions for the ioctrl and minion devices discovered + pKernelIoctrl->ipVerIoctrl = ioctrlInfoParams.ipRevisions.ipVerIoctrl; + pKernelIoctrl->ipVerMinion = ioctrlInfoParams.ipRevisions.ipVerMinion; + + if (pKernelIoctrl->ipVerMinion == 0) + { + pKernelIoctrl->setProperty(pKernelIoctrl, PDB_PROP_KIOCTRL_MINION_AVAILABLE, NV_FALSE); + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return NV_OK; +} + +/** + * @brief Setup topology information for the forced nvlink configurations + * + * @param[in] pGpu OBJGPU pointer for local GPU + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NV_STATUS +knvlinkSetupTopologyForForcedConfig_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + NvU32 i, physLink; + + // Start with all links disabled and no forced config in effect + pKernelNvlink->bRegistryLinkOverride = NV_TRUE; + pKernelNvlink->registryLinkMask = 0; + pKernelNvlink->bChiplibConfig = NV_FALSE; + + for (i = 0; i < NVLINK_MAX_LINKS_SW; i++) + { + // Filter against the links discovered from IOCTRL + if (!(pKernelNvlink->discoveredLinks & NVBIT(i))) + continue; + + // The physical link is guaranteed valid in all cases + physLink = DRF_VAL(_NVLINK, _ARCH_CONNECTION, _PHYSICAL_LINK, pKernelNvlink->pLinkConnection[i]); + + // Update link tracking + if (DRF_VAL(_NVLINK, _ARCH_CONNECTION, _ENABLED, pKernelNvlink->pLinkConnection[i])) + { + NV_PRINTF(LEVEL_INFO, + "ARCH_CONNECTION info from chiplib: ENABLED Logical link %d (Physical " + "link %d) = 0x%X\n", i, physLink, + pKernelNvlink->pLinkConnection[i]); + + // + // This "link" should be ENABLED. We use the physical link since RM only deals with + // physical links. + // + pKernelNvlink->registryLinkMask |= NVBIT(physLink); + + // Config is forced (at least one link requested) + pKernelNvlink->bChiplibConfig = NV_TRUE; + } + else + { + NV_PRINTF(LEVEL_INFO, + "ARCH_CONNECTION info from chiplib: DISABLED Logical link %d (Physical " + "link %d) = 0x%X\n", i, physLink, + pKernelNvlink->pLinkConnection[i]); + } + + // Accumulate any PEER links + if (DRF_VAL(_NVLINK, _ARCH_CONNECTION, _PEER_MASK, pKernelNvlink->pLinkConnection[i])) + { +#if defined(INCLUDE_NVLINK_LIB) + // Ensure reginit has the info it needs for the remote side + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.bConnected = NV_TRUE; + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.deviceType = + NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU; + +#endif + } + + // Accumulate any CPU links + if (DRF_VAL(_NVLINK, _ARCH_CONNECTION, _CPU, pKernelNvlink->pLinkConnection[i])) + { +#if defined(INCLUDE_NVLINK_LIB) + // Ensure reginit has the info it needs for the remote side + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.bConnected = NV_TRUE; + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.deviceType = pKernelNvlink->forcedSysmemDeviceType; +#endif + } + + // RPC into GSP-RM to update the link remote connection status + status = knvlinkUpdateLinkConnectionStatus(pGpu, pKernelNvlink, i); + if (status != NV_OK) + { + return status; + } + } + + // Update enabledLinks mask with the mask of forced link configurations + pKernelNvlink->enabledLinks = pKernelNvlink->discoveredLinks & pKernelNvlink->registryLinkMask; + + return NV_OK; +} + +/*! + * @brief Sync the lane shutdown properties with GSP-RM + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NV_STATUS +knvlinkSyncLaneShutdownProps_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + + NV2080_CTRL_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + params.bLaneShutdownEnabled = + pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED); + params.bLaneShutdownOnUnload = + pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD); + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_SYNC_NVLINK_SHUTDOWN_PROPS, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to sync NVLink shutdown properties with GSP!\n"); + return status; + } + + return NV_OK; +} + +/*! + * @brief Set unique fabric address for NVSwitch enabled systems. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] fabricBaseAddr Fabric Address to set + * + * @returns On success, sets unique fabric address and returns NV_OK. + * On failure, returns NV_ERR_XXX. + */ +NV_STATUS +knvlinkSetUniqueFabricBaseAddress_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU64 fabricBaseAddr +) +{ + NV_STATUS status = NV_OK; + + if (!knvlinkIsForcedConfig(pGpu, pKernelNvlink)) + { + knvlinkCoreGetRemoteDeviceInfo(pGpu, pKernelNvlink); + + knvlinkEnableLinksPostTopology_HAL(pGpu, pKernelNvlink, + pKernelNvlink->enabledLinks); + } + + if (!knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink)) + { + NV_PRINTF(LEVEL_ERROR, + "Operation failed due to no NVSwitch connectivity to the " + "GPU %x\n", pGpu->gpuInstance); + return NV_ERR_INVALID_STATE; + } + + status = knvlinkValidateFabricBaseAddress_HAL(pGpu, pKernelNvlink, + fabricBaseAddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Fabric addr validation failed for GPU %x\n", + pGpu->gpuInstance); + return status; + } + + if (IsSLIEnabled(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, + "Operation is unsupported on SLI enabled GPU %x\n", + pGpu->gpuInstance); + return NV_ERR_NOT_SUPPORTED; + } + + if (pKernelNvlink->fabricBaseAddr == fabricBaseAddr) + { + NV_PRINTF(LEVEL_INFO, + "The same fabric addr is being re-assigned to GPU %x\n", + pGpu->gpuInstance); + return NV_OK; + } + + if (pKernelNvlink->fabricBaseAddr != NVLINK_INVALID_FABRIC_ADDR) + { + NV_PRINTF(LEVEL_ERROR, "Fabric addr is already assigned to GPU %x\n", + pGpu->gpuInstance); + return NV_ERR_STATE_IN_USE; + } + + // + // Update GMMU peer field descriptor. + // We can safely defer reinitialization of peer field descriptor to this + // call because RM doesn't allow any P2P operations until FM assigns fabric + // addresses. + // + NV2080_CTRL_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.bGet = NV_FALSE; + params.addr = fabricBaseAddr; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_INTERNAL_NVLINK_GET_SET_NVSWITCH_FABRIC_ADDR, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to stash fabric address for GPU %x\n", + pGpu->gpuInstance); + return status; + } + + pKernelNvlink->fabricBaseAddr = fabricBaseAddr; + + NV_PRINTF(LEVEL_ERROR, "Fabric base addr %llx is assigned to GPU %x\n", + pKernelNvlink->fabricBaseAddr, pGpu->gpuInstance); + + return NV_OK; +} + +/** + * @brief Process the mask of init disabled links + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NV_STATUS +knvlinkProcessInitDisabledLinks_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NvU32 mask = 0; + NvBool bSkipHwNvlinkDisable = 0; + NV_STATUS status = NV_OK; + + NV2080_CTRL_NVLINK_PROCESS_INIT_DISABLED_LINKS_PARAMS params; + + status = gpumgrGetGpuInitDisabledNvlinks(pGpu->gpuId, &mask, &bSkipHwNvlinkDisable); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get init disabled links from gpumgr\n"); + return status; + } + + portMemSet(¶ms, 0, sizeof(params)); + + params.initDisabledLinksMask = mask; + params.bSkipHwNvlinkDisable = bSkipHwNvlinkDisable; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_PROCESS_INIT_DISABLED_LINKS, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to process init disabled links in GSP\n"); + return status; + } + + pKernelNvlink->initDisabledLinksMask = params.initDisabledLinksMask; + + return NV_OK; +} + +// Grab GPU locks before RPCing into GSP-RM for NVLink RPCs +NV_STATUS +knvlinkExecGspRmRpc_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 cmd, + void *paramAddr, + NvU32 paramSize +) +{ + NvU32 gpuMaskRelease = 0; + NvU32 gpuMaskInitial = rmGpuLocksGetOwnedMask(); + NvU32 gpuMask = gpuMaskInitial | NVBIT(pGpu->gpuInstance); + NV_STATUS status = NV_OK; + + if (IS_GSP_CLIENT(pGpu)) + { + if (!rmGpuGroupLockIsOwner(pGpu->gpuInstance, GPU_LOCK_GRP_MASK, &gpuMask)) + { + status = rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_MASK, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, + RM_LOCK_MODULES_NVLINK, + &gpuMask); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to acquire locks for gpumask 0x%x\n", gpuMask); + return status; + } + + gpuMaskRelease = (gpuMask & (~gpuMaskInitial)); + } + } + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + cmd, paramAddr, paramSize); + if (gpuMaskRelease) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + + return status; +} + +void +knvlinkUtoa(NvU8 *str, NvU64 length, NvU64 val) +{ + NvU8 temp[NV2080_GPU_MAX_NAME_STRING_LENGTH]; + NvU8 *ptr = temp; + NvU64 i = 0; + + NV_ASSERT(str != NULL); + + do + { + i = val % 10; + val = val / 10; + *ptr++ = (NvU8)(i + '0'); + } while(val); + + NV_ASSERT(length > (NvU64) (ptr - temp)); + + while (ptr > temp) + *str++ = *--ptr; + + *str = '\0'; +} diff --git a/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkapi.c b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkapi.c new file mode 100644 index 000000000..3bc34dfd5 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkapi.c @@ -0,0 +1,813 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2026-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "core/hal.h" +#include "core/info_block.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/mem_sys/kern_mem_sys.h" +#include "vgpu/rpc.h" +#include "nvRmReg.h" + +// +// subdeviceCtrlCmdBusGetNvlinkCaps +// Get the Nvlink global capabilities +// +NV_STATUS +subdeviceCtrlCmdBusGetNvlinkCaps_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + return knvlinkCtrlCmdBusGetNvlinkCaps(pGpu, pParams); +} + +// +// knvlinkCtrlCmdBusGetNvlinkCaps +// Inner function of subdeviceCtrlCmdBusGetNvlinkCaps for internal RM direct function call +// Get the Nvlink global capabilities +// +NV_STATUS +knvlinkCtrlCmdBusGetNvlinkCaps +( + OBJGPU *pGpu, + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_CAPS_PARAMS *pParams +) +{ + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + NV_STATUS status = NV_OK; + NvBool bMIGNvLinkP2PSupported = ((pKernelMIGManager != NULL) && + kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)); + NvU8 tempCaps[NV2080_CTRL_NVLINK_CAPS_TBL_SIZE]; + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to get blacklist information from host RM + // + if (IS_VIRTUAL(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, pRmCtrlParams->hClient, pRmCtrlParams->hObject, pRmCtrlParams->cmd, + pRmCtrlParams->pParams, pRmCtrlParams->paramsSize, status); + return status; + } + + portMemSet(tempCaps, 0, NV2080_CTRL_NVLINK_CAPS_TBL_SIZE); + + // Initialize link masks to 0 + pParams->enabledLinkMask = 0; + pParams->discoveredLinkMask = 0; + + if (pKernelNvlink == NULL) + { + NV_PRINTF(LEVEL_INFO, "NVLink is unavailable. Returning.\n"); + status = NV_OK; + return status; + } + + // With MIG memory partitioning, NvLink P2P or sysmem accesses are not allowed + if (bMIGNvLinkP2PSupported) + { + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _SUPPORTED); + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _SYSMEM_ACCESS); + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _P2P_SUPPORTED); + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _P2P_ATOMICS); + + // + // Some links might have passed receiver detect (bridge is present), + // but might have failed to transition to safe mode (marginal links) + // Update connectedLinks and bridgedLinks mask for these links + // + knvlinkFilterBridgeLinks_HAL(pGpu, pKernelNvlink); + + // + // This GPU supports SLI bridge sensing if any of the links + // support bridge sensing. + // + if (pKernelNvlink->bridgeSensableLinks) + { + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _SLI_BRIDGE_SENSABLE); + } + + // This GPU has an SLI bridge if any of the links are bridged + if (pKernelNvlink->bridgedLinks) + { + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _SLI_BRIDGE); + } + + // NVLink versions beyond the first support sysmem atomics + if (pKernelNvlink->ipVerNvlink != NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_1_0) + { + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _SYSMEM_ATOMICS); + } + } + + switch (pKernelNvlink->ipVerNvlink) + { + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_1: + { + pParams->lowestNvlinkVersion = NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_1; + pParams->highestNvlinkVersion = NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_1; + pParams->lowestNciVersion = NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_3_1; + pParams->highestNciVersion = NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_3_1; + + // Supported power states + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L0); + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L2); + break; + } + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_0: + { + pParams->lowestNvlinkVersion = NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_0; + pParams->highestNvlinkVersion = NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_0; + pParams->lowestNciVersion = NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_3_0; + pParams->highestNciVersion = NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_3_0; + + // Supported power states + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L0); + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L2); + break; + } + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_2: + { + pParams->lowestNvlinkVersion = NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_2; + pParams->highestNvlinkVersion = NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_2; + pParams->lowestNciVersion = NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_2_2; + pParams->highestNciVersion = NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_2_2; + + // Supported power states + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L0); + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L2); + break; + } + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_0: + { + pParams->lowestNvlinkVersion = NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_0; + pParams->highestNvlinkVersion = NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_0; + pParams->lowestNciVersion = NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_2_0; + pParams->highestNciVersion = NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_2_0; + + // Supported power states + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L0); + break; + } + default: + { + pParams->lowestNvlinkVersion = NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_1_0; + pParams->highestNvlinkVersion = NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_1_0; + pParams->lowestNciVersion = NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_1_0; + pParams->highestNciVersion = NV2080_CTRL_NVLINK_CAPS_NCI_VERSION_1_0; + + // Supported power states + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L0); + break; + } + } + + portMemCopy(&pParams->capsTbl, NV2080_CTRL_NVLINK_CAPS_TBL_SIZE, tempCaps, NV2080_CTRL_NVLINK_CAPS_TBL_SIZE); + + pParams->discoveredLinkMask = knvlinkGetDiscoveredLinkMask(pGpu, pKernelNvlink); + pParams->enabledLinkMask = knvlinkGetEnabledLinkMask(pGpu, pKernelNvlink); + + return status; +} + +// +// subdeviceCtrlCmdBusGetNvlinkStatus +// Get the Nvlink per link capabilities +// +NV_STATUS +subdeviceCtrlCmdBusGetNvlinkStatus_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + NvBool bMIGNvLinkP2PSupported = ((pKernelMIGManager != NULL) && + kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)); + OBJGPU *remotePeer0 = NULL; + NV_STATUS status = NV_OK; + NvU8 i = 0; + NvU8 tempCaps[NV2080_CTRL_NVLINK_CAPS_TBL_SIZE]; + NvU32 r = 0; + NvBool bPeerLink, bSysmemLink, bSwitchLink; + NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_INFO_PARAMS nvlinkLinkAndClockInfoParams; + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to get blacklist information from host RM + // + if (IS_VIRTUAL(pGpu)) + { + // RPC for this RmCtrl was implemented as an effort of enabling NVLINK P2P + // on vGPU. As NVLINK P2P is supported Pascal+ onwards, we return NOT_SUPPORTED + // pre-Pascal. + if (IsPASCALorBetter(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, pRmCtrlParams->hClient, pRmCtrlParams->hObject, pRmCtrlParams->cmd, + pRmCtrlParams->pParams, pRmCtrlParams->paramsSize, status); + return status; + } + else + { + return NV_ERR_NOT_SUPPORTED; + } + } + + // Initialize link mask to 0 + pParams->enabledLinkMask = 0; + + if ((pKernelNvlink == NULL) || !bMIGNvLinkP2PSupported) + { + NV_PRINTF(LEVEL_INFO, "NVLink is unavailable. Returning.\n"); + status = NV_OK; + return status; + } + + portMemSet(tempCaps, 0, NV2080_CTRL_NVLINK_CAPS_TBL_SIZE); + + pParams->enabledLinkMask = pKernelNvlink->enabledLinks; + + r = pParams->enabledLinkMask; + while (r >>= 1 ) i++; + + NV_ASSERT(i <= NV2080_CTRL_NVLINK_MAX_LINKS); + + // Get the remote ends of the links from the nvlink core + if (!knvlinkIsForcedConfig(pGpu, pKernelNvlink) && + !(IS_RTLSIM(pGpu) && !pKernelNvlink->bForceEnableCoreLibRtlsims)) + { + // Get the nvlink connections for this device from the core + knvlinkCoreGetRemoteDeviceInfo(pGpu, pKernelNvlink); + } + + // + // Some links might have passed receiver detect (bridge is present), + // but might have failed to transition to safe mode (marginal links) + // Update connectedLinks and bridgedLinks mask for these links + // + knvlinkFilterBridgeLinks_HAL(pGpu, pKernelNvlink); + + portMemSet(&nvlinkLinkAndClockInfoParams, 0, sizeof(nvlinkLinkAndClockInfoParams)); + + nvlinkLinkAndClockInfoParams.linkMask = pParams->enabledLinkMask; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_GET_LINK_AND_CLOCK_INFO, + (void *)&nvlinkLinkAndClockInfoParams, + sizeof(nvlinkLinkAndClockInfoParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to collect nvlink status info!\n"); + return status; + } + + FOR_EACH_INDEX_IN_MASK(32, i, pParams->enabledLinkMask) + { + bPeerLink = NV_FALSE; + bSysmemLink = NV_FALSE; + bSwitchLink = NV_FALSE; + NV2080_CTRL_NVLINK_GET_LINK_AND_CLOCK_VALUES *pLinkAndClockValues; + + pLinkAndClockValues = &nvlinkLinkAndClockInfoParams.linkInfo[i]; + + portMemSet(tempCaps, 0, NV2080_CTRL_NVLINK_CAPS_TBL_SIZE); + + (pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_ENABLED)) ? + (RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _VALID)) : 0; + + if (pLinkAndClockValues->bLinkConnectedToSystem) + { + // Tag as a Sysmem link + bSysmemLink = NV_TRUE; + + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _SYSMEM_ACCESS); + + // NVLink versions beyond the first support sysmem atomics + if (pKernelNvlink->ipVerNvlink != NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_1_0 ) + { + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _SYSMEM_ATOMICS); + } + } + + if (pLinkAndClockValues->bLinkConnectedToPeer) + { + // Tag as Peer link + bPeerLink = NV_TRUE; + + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _P2P_SUPPORTED); + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _P2P_ATOMICS); + } + + // Indicate per-link bridge sense status + if (pKernelNvlink->bridgeSensableLinks & NVBIT(i)) + { + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _SLI_BRIDGE_SENSABLE); + } + + // Indicate per-link bridge status + if (pKernelNvlink->bridgedLinks & NVBIT(i)) + { + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _SLI_BRIDGE); + } + + // Set the power states caps + switch (pKernelNvlink->ipVerNvlink) + { + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_0: + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L0); + break; + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_1: + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_0: + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_2: + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L0); + if (pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_L2_POWER_STATE_ENABLED)) + { + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L2); + } + break; + + default: + RMCTRL_SET_CAP(tempCaps, NV2080_CTRL_NVLINK_CAPS, _POWER_STATE_L0); + break; + } + + portMemCopy(&pParams->linkInfo[i].capsTbl, NV2080_CTRL_NVLINK_CAPS_TBL_SIZE, tempCaps, NV2080_CTRL_NVLINK_CAPS_TBL_SIZE); + + pParams->linkInfo[i].phyType = NV2080_CTRL_NVLINK_STATUS_PHY_NVHS; + pParams->linkInfo[i].subLinkWidth = pLinkAndClockValues->subLinkWidth; + pParams->linkInfo[i].linkState = pLinkAndClockValues->linkState; + pParams->linkInfo[i].txSublinkStatus = (NvU8) pLinkAndClockValues->txSublinkState; + pParams->linkInfo[i].rxSublinkStatus = (NvU8) pLinkAndClockValues->rxSublinkState; + + // Initialize the lane reversal state information for the link + pParams->linkInfo[i].bLaneReversal = pLinkAndClockValues->bLaneReversal; + + switch (pKernelNvlink->ipVerNvlink) + { + + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_1: + pParams->linkInfo[i].nvlinkVersion = NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_3_1; + pParams->linkInfo[i].nciVersion = NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_3_1; + break; + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_3_0: + pParams->linkInfo[i].nvlinkVersion = NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_3_0; + pParams->linkInfo[i].nciVersion = NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_3_0; + break; + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_2: + pParams->linkInfo[i].nvlinkVersion = NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_2_2; + pParams->linkInfo[i].nciVersion = NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_2_2; + break; + case NV2080_CTRL_NVLINK_CAPS_NVLINK_VERSION_2_0: + pParams->linkInfo[i].nvlinkVersion = NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_2_0; + pParams->linkInfo[i].nciVersion = NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_2_0; + break; + default: + pParams->linkInfo[i].nvlinkVersion = NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_1_0; + pParams->linkInfo[i].nciVersion = NV2080_CTRL_NVLINK_STATUS_NCI_VERSION_1_0; + break; + } + pParams->linkInfo[i].phyVersion = NV2080_CTRL_NVLINK_STATUS_NVHS_VERSION_1_0; + + // Initialize the connection information for the link + pParams->linkInfo[i].connected = NV2080_CTRL_NVLINK_STATUS_CONNECTED_FALSE; + pParams->linkInfo[i].remoteDeviceLinkNumber = NV2080_CTRL_NVLINK_STATUS_REMOTE_LINK_NUMBER_INVALID; + pParams->linkInfo[i].remoteDeviceInfo.deviceType = NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_NONE; + pParams->linkInfo[i].localDeviceLinkNumber = i; + pParams->linkInfo[i].laneRxdetStatusMask = pKernelNvlink->nvlinkLinks[i].laneRxdetStatusMask; + + // Set the device information for the local end of the link + pParams->linkInfo[i].localDeviceInfo.domain = gpuGetDomain(pGpu); + pParams->linkInfo[i].localDeviceInfo.bus = gpuGetBus(pGpu); + pParams->linkInfo[i].localDeviceInfo.device = gpuGetDevice(pGpu); + pParams->linkInfo[i].localDeviceInfo.function = 0; + pParams->linkInfo[i].localDeviceInfo.pciDeviceId = pGpu->idInfo.PCIDeviceID; + pParams->linkInfo[i].localDeviceInfo.deviceType = NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU; +#if defined(INCLUDE_NVLINK_LIB) + if (pKernelNvlink->nvlinkLinks[i].core_link) + { + pParams->linkInfo[i].localLinkSid = pKernelNvlink->nvlinkLinks[i].core_link->localSid; + } +#endif + + // Record the local end of the link's deviceIdFlags + if(pGpu->idInfo.PCIDeviceID) + { + pParams->linkInfo[i].localDeviceInfo.deviceIdFlags = + FLD_SET_DRF(2080_CTRL_NVLINK, _DEVICE_INFO, _DEVICE_ID_FLAGS, _PCI, + pParams->linkInfo[i].localDeviceInfo.deviceIdFlags); + } + + // + // Get clock related state + // NOTE: to be depricated HW terminology conforming versions + // + pParams->linkInfo[i].nvlinkLinkClockKHz = pLinkAndClockValues->nvlinkLinkClockKHz; + pParams->linkInfo[i].nvlinkRefClkSpeedKHz = nvlinkLinkAndClockInfoParams.nvlinkRefClkSpeedKHz; + pParams->linkInfo[i].nvlinkCommonClockSpeedKHz = pParams->linkInfo[i].nvlinkLinkClockKHz / 16; + + pParams->linkInfo[i].nvlinkCommonClockSpeedMhz = pParams->linkInfo[i].nvlinkCommonClockSpeedKHz / 1000; + + // Clock speed and Data rate info conforming with HW terminology + pParams->linkInfo[i].nvlinkLineRateMbps = pLinkAndClockValues->nvlinkLineRateMbps; + pParams->linkInfo[i].nvlinkLinkClockMhz = pLinkAndClockValues->nvlinkLinkClockMhz; + pParams->linkInfo[i].nvlinkLinkDataRateKiBps = pLinkAndClockValues->nvlinkLinkDataRateKiBps; + pParams->linkInfo[i].nvlinkRefClkType = pLinkAndClockValues->nvlinkRefClkType; + pParams->linkInfo[i].nvlinkRefClkSpeedMhz = pLinkAndClockValues->nvlinkReqLinkClockMhz; + + +#if defined(INCLUDE_NVLINK_LIB) + + if (pKernelNvlink->nvlinkLinks[i].remoteEndInfo.bConnected) + { + pParams->linkInfo[i].connected = NV2080_CTRL_NVLINK_STATUS_CONNECTED_TRUE; + pParams->linkInfo[i].remoteDeviceLinkNumber = (NvU8) pKernelNvlink->nvlinkLinks[i].remoteEndInfo.linkNumber; + pParams->linkInfo[i].remoteLinkSid = pKernelNvlink->nvlinkLinks[i].remoteEndInfo.chipSid; + + // Set the device information for the remote end of the link + pParams->linkInfo[i].remoteDeviceInfo.domain = pKernelNvlink->nvlinkLinks[i].remoteEndInfo.domain; + pParams->linkInfo[i].remoteDeviceInfo.bus = pKernelNvlink->nvlinkLinks[i].remoteEndInfo.bus; + pParams->linkInfo[i].remoteDeviceInfo.device = pKernelNvlink->nvlinkLinks[i].remoteEndInfo.device; + pParams->linkInfo[i].remoteDeviceInfo.function = pKernelNvlink->nvlinkLinks[i].remoteEndInfo.function; + pParams->linkInfo[i].remoteDeviceInfo.pciDeviceId = pKernelNvlink->nvlinkLinks[i].remoteEndInfo.pciDeviceId; + pParams->linkInfo[i].remoteDeviceInfo.deviceType = pKernelNvlink->nvlinkLinks[i].remoteEndInfo.deviceType; + + // Update the device Id flags for PCI + if (pKernelNvlink->nvlinkLinks[i].remoteEndInfo.pciDeviceId) + { + pParams->linkInfo[i].remoteDeviceInfo.deviceIdFlags |= + FLD_SET_DRF(2080_CTRL_NVLINK, _DEVICE_INFO, _DEVICE_ID_FLAGS, _PCI, + pParams->linkInfo[i].remoteDeviceInfo.deviceIdFlags); + } + + // Check the PCI dbdf values to confirm the device on remote end + if (NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_PCI & pParams->linkInfo[i].remoteDeviceInfo.deviceIdFlags) + { + if (!knvlinkIsP2pLoopbackSupportedPerLink(pGpu, pKernelNvlink, i)) + { + pParams->linkInfo[i].loopProperty = NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_NONE; + continue; + } + } + + pParams->linkInfo[i].loopProperty = pParams->linkInfo[i].remoteDeviceLinkNumber == i ? + NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_LOOPBACK : + NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_LOOPOUT; + } +#endif + + // Per-link ForceConfig handling (non-legacy Arch ForceConfig only) + if (knvlinkIsForcedConfig(pGpu, pKernelNvlink)) + { + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_EMULATION)) + { + pParams->linkInfo[i].linkState = NV2080_CTRL_NVLINK_STATUS_LINK_STATE_ACTIVE; + pParams->linkInfo[i].rxSublinkStatus = NV2080_CTRL_NVLINK_STATUS_SUBLINK_RX_STATE_HIGH_SPEED_1; + pParams->linkInfo[i].txSublinkStatus = NV2080_CTRL_NVLINK_STATUS_SUBLINK_TX_STATE_HIGH_SPEED_1; + } + + pParams->linkInfo[i].connected = NV_TRUE; + pParams->linkInfo[i].loopProperty = NV_FALSE; + pParams->linkInfo[i].remoteDeviceLinkNumber = i; + if (!pParams->linkInfo[i].nvlinkLinkClockMhz) + pParams->linkInfo[i].nvlinkLinkClockMhz = pLinkAndClockValues->nvlinkReqLinkClockMhz; + + // Expose remote device as EBRIDGE if forced only sysmem + if (bSysmemLink && !bPeerLink && !bSwitchLink) + { + pParams->linkInfo[i].remoteDeviceInfo.domain = 0; + pParams->linkInfo[i].remoteDeviceInfo.bus = FORCED_SYSMEM_PCI_BUS; + pParams->linkInfo[i].remoteDeviceInfo.device = 0; + pParams->linkInfo[i].remoteDeviceInfo.function = i; + pParams->linkInfo[i].remoteDeviceInfo.pciDeviceId = FORCED_SYSMEM_DEVICE_ID; + pParams->linkInfo[i].remoteDeviceInfo.deviceType = FORCED_SYSMEM_DEVICE_TYPE; + + pParams->linkInfo[i].remoteDeviceInfo.deviceIdFlags |= + FLD_SET_DRF(2080_CTRL_NVLINK, _DEVICE_INFO, _DEVICE_ID_FLAGS, _PCI, + pParams->linkInfo[i].remoteDeviceInfo.deviceIdFlags); + } + + // Expose remote device as GPU if forced only peer + if (bPeerLink && !bSysmemLink && !bSwitchLink) + { + remotePeer0 = gpumgrGetGpu(pGpu->gpuInstance == 0 ? 1 : 0); + if (NULL == remotePeer0) + { + remotePeer0 = pGpu; + } + + // + // Ensure the remote is actually a GPU that supports NVLink. + // If it is not, we should stick with the current GPU as + // this is likely a loopback config. See Bug 1786206. + // + if (remotePeer0 != pGpu) + { + KernelNvlink *pRemoteKernelNvlink = GPU_GET_KERNEL_NVLINK(remotePeer0); + if (pRemoteKernelNvlink) + { + if (pRemoteKernelNvlink->discoveredLinks == 0) + { + // There are no links on this remote, fall back to loopback. + remotePeer0 = pGpu; + } + } + else + { + // NVLink not present on this remote, fall back to loopback. + remotePeer0 = pGpu; + } + } + + pParams->linkInfo[i].remoteDeviceInfo.domain = gpuGetDomain(remotePeer0); + pParams->linkInfo[i].remoteDeviceInfo.bus = gpuGetBus(remotePeer0); + pParams->linkInfo[i].remoteDeviceInfo.device = gpuGetDevice(remotePeer0); + pParams->linkInfo[i].remoteDeviceInfo.function = 0; + pParams->linkInfo[i].remoteDeviceInfo.pciDeviceId = remotePeer0->idInfo.PCIDeviceID; + pParams->linkInfo[i].remoteDeviceInfo.deviceType = NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU; + + // This config is either in loopback or real 1/1 P2P, nothing else. + if (gpuGetDBDF(remotePeer0) == gpuGetDBDF(pGpu)) + { + pParams->linkInfo[i].loopProperty = NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_LOOPBACK; + } + + pParams->linkInfo[i].remoteDeviceInfo.deviceIdFlags |= + FLD_SET_DRF(2080_CTRL_NVLINK, _DEVICE_INFO, _DEVICE_ID_FLAGS, _PCI, + pParams->linkInfo[i].remoteDeviceInfo.deviceIdFlags); + } + + // + // Expose remote device as Switch if requested + // Requested can be either forced sysmem and peer or + // if either and requested as switch + // + if ( (bSysmemLink && bPeerLink) || + ((bSysmemLink || bPeerLink) && bSwitchLink)) + { + pParams->linkInfo[i].remoteDeviceInfo.domain = 0; + pParams->linkInfo[i].remoteDeviceInfo.bus = FORCED_SWITCH_PCI_BUS; + pParams->linkInfo[i].remoteDeviceInfo.device = 0; + pParams->linkInfo[i].remoteDeviceInfo.function = i; + pParams->linkInfo[i].remoteDeviceInfo.pciDeviceId = FORCED_SWITCH_DEVICE_ID; + pParams->linkInfo[i].remoteDeviceInfo.deviceType = NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH; + + pParams->linkInfo[i].remoteDeviceInfo.deviceIdFlags |= + FLD_SET_DRF(2080_CTRL_NVLINK, _DEVICE_INFO, _DEVICE_ID_FLAGS, _PCI, + pParams->linkInfo[i].remoteDeviceInfo.deviceIdFlags); + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return status; +} + + +/* + * @brief Get the number of successful error recoveries + */ +NV_STATUS +subdeviceCtrlCmdNvlinkGetErrorRecoveries_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_NVLINK_GET_ERROR_RECOVERIES_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bMIGNvLinkP2PSupported = ((pKernelMIGManager != NULL) && + kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)); + NV_STATUS status = NV_OK; + NvU32 i; + + if ((pKernelNvlink == NULL) || !bMIGNvLinkP2PSupported) + { + NV_PRINTF(LEVEL_ERROR, "NVLink is unavailable, failing.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + FOR_EACH_INDEX_IN_MASK(32, i, pParams->linkMask & pKernelNvlink->enabledLinks) + { + pParams->numRecoveries[i] = pKernelNvlink->errorRecoveries[i]; + + // Clear the counts + pKernelNvlink->errorRecoveries[i] = 0; + } + FOR_EACH_INDEX_IN_MASK_END; + + return status; +} + +// +// subdeviceCtrlCmdNvlinkSetPowerState +// Set the mask of links to a target power state +// +NV_STATUS +subdeviceCtrlCmdNvlinkSetPowerState_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_NVLINK_SET_POWER_STATE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + NvBool bMIGNvLinkP2PSupported = ((pKernelMIGManager != NULL) && + kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)); + NV_STATUS status = NV_OK; + + if ((pKernelNvlink == NULL) || !bMIGNvLinkP2PSupported) + { + NV_PRINTF(LEVEL_INFO, "NVLink unavailable. Return\n"); + return NV_ERR_NOT_SUPPORTED; + } + + // Verify the mask of links requested are enabled on the GPU + if ((pParams->linkMask & pKernelNvlink->enabledLinks) != pParams->linkMask) + { + NV_PRINTF(LEVEL_INFO, "Links not enabled. Return.\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + switch (pParams->powerState) + { + case NV2080_CTRL_NVLINK_POWER_STATE_L0: + { + status = knvlinkEnterExitSleep(pGpu, pKernelNvlink, + pParams->linkMask, + NV_FALSE); + + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) + { + NV_PRINTF(LEVEL_INFO, + "Transition to L0 for GPU%d: linkMask 0x%x in progress... Waiting for " + "remote endpoints to request L2 exit\n", + pGpu->gpuInstance, pParams->linkMask); + + return NV_WARN_MORE_PROCESSING_REQUIRED; + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Error setting power state %d on linkmask 0x%x\n", + pParams->powerState, pParams->linkMask); + + return status; + } + break; + } + + case NV2080_CTRL_NVLINK_POWER_STATE_L2: + { + status = knvlinkEnterExitSleep(pGpu, pKernelNvlink, + pParams->linkMask, + NV_TRUE); + + if (status == NV_WARN_MORE_PROCESSING_REQUIRED) + { + NV_PRINTF(LEVEL_INFO, + "Transition to L2 for GPU%d: linkMask 0x%x in progress... Waiting for " + "remote endpoints to request L2 entry\n", + pGpu->gpuInstance, pParams->linkMask); + + return NV_WARN_MORE_PROCESSING_REQUIRED; + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Error setting power state %d on linkmask 0x%x\n", + pParams->powerState, pParams->linkMask); + + return status; + } + break; + } + + case NV2080_CTRL_NVLINK_POWER_STATE_L1: + case NV2080_CTRL_NVLINK_POWER_STATE_L3: + { + // L1 and L3 states are not yet supported. Targeted for Ampere + NV_PRINTF(LEVEL_ERROR, "Unsupported power state %d requested.\n", + pParams->powerState); + + return NV_ERR_INVALID_REQUEST; + } + + default: + { + NV_PRINTF(LEVEL_ERROR, "Unsupported power state %d requested.\n", + pParams->powerState); + + return NV_ERR_INVALID_REQUEST; + } + } + + return status; +} + +// +// subdeviceCtrlCmdNvlinkSetNvlinkPeer +// Set/unset the USE_NVLINK_PEER bit +// +NV_STATUS +subdeviceCtrlCmdNvlinkSetNvlinkPeer_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_NVLINK_SET_NVLINK_PEER_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_ENABLE_NVLINK_PEER_PARAMS enableNvlinkPeerParams; + + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bMIGNvLinkP2PSupported = ((pKernelMIGManager != NULL) && + kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)); + + if ((pKernelNvlink == NULL) || !bMIGNvLinkP2PSupported) + { + NV_PRINTF(LEVEL_INFO, "NVLink unavailable. Return\n"); + return NV_ERR_NOT_SUPPORTED; + } + + if ((pParams->bEnable == NV_FALSE) && + !pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_UNSET_NVLINK_PEER_SUPPORTED)) + { + NV_PRINTF(LEVEL_ERROR, + "Unsetting USE_NVLINK_PEER field not supported\n"); + return NV_ERR_NOT_SUPPORTED; + } + + portMemSet(&enableNvlinkPeerParams, 0, sizeof(enableNvlinkPeerParams)); + enableNvlinkPeerParams.peerMask = pParams->peerMask; + enableNvlinkPeerParams.bEnable = pParams->bEnable; + + // Update the RM cache to reflect the updated status of USE_NVLINK_PEER + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_ENABLE_NVLINK_PEER, + (void *)&enableNvlinkPeerParams, + sizeof(enableNvlinkPeerParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d Failed to update USE_NVLINK_PEER for peer mask 0x%x\n", + gpuGetInstance(pGpu), pParams->peerMask); + + return status; + } + + // Call knvlinkUpdateCurrentConfig to flush settings to the registers + status = knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink); + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkcorelib.c b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkcorelib.c new file mode 100644 index 000000000..260bc6bfb --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkcorelib.c @@ -0,0 +1,680 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "core/hal.h" +#include "core/info_block.h" +#include "core/locks.h" +#include "gpu/gpu.h" + +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" + +#if defined(INCLUDE_NVLINK_LIB) +static void _knvlinkUpdateRemoteEndUuidInfo(OBJGPU *, KernelNvlink *); +#endif + +/*! + * @brief Initializes NVLink lib - WAR only. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +void +knvlinkCoreDriverLoadWar_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + // + // All platforms which support NVLINK_CORE should call nvlink_lib_initialize + // explicitly, if NVLink support is needed. It is not RM's responsibility to + // initialize NVLink driver. Currently, only windows fails to do the same. + // Hence, adding this WAR to keep NVLink alive on windows. Also, see + // nvlinkCoreDriverUnloadWar_IMPL. + // + // See Bug 1962411 in order to nuke this WAR for Windows. + // +#if defined(INCLUDE_NVLINK_LIB) + + if (RMCFG_FEATURE_PLATFORM_WINDOWS) + { + if (!nvlink_lib_is_initialized()) + { + nvlink_lib_initialize(); + } + } + +#endif +} + +/*! + * @brief Un-initializes NVLink lib - WAR only. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +void +knvlinkCoreDriverUnloadWar_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + if (RMCFG_FEATURE_PLATFORM_WINDOWS) + { + nvlink_lib_unload(); + } + +#endif +} + +/*! + * @brief Checks whether NVLink driver is supported. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkCoreIsDriverSupported_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + if (nvlink_lib_is_initialized()) + { + return NV_OK; + } + +#endif + + NV_PRINTF(LEVEL_INFO, "NVLink core lib isn't initialized yet!\n"); + + return NV_ERR_NOT_SUPPORTED; +} + +/*! + * @brief Add GPU device to nvlink core + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkCoreAddDevice_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + +#if defined(INCLUDE_NVLINK_LIB) + + nvlink_device *dev = NULL; + char *drvName = NULL; + char *devName = NULL; + char *devIdx = NULL; + + // Return if the device is already registered + if (pKernelNvlink->pNvlinkDev) + { + NV_PRINTF(LEVEL_INFO, "GPU already registered in NVLINK core!\n"); + + return status; + } + + // Set the driver name + drvName = portMemAllocNonPaged(NVLINK_DRIVER_NAME_LENGTH); + if (drvName == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet((void *)drvName, 0, NVLINK_DRIVER_NAME_LENGTH); + portMemCopy(drvName, sizeof(NVLINK_NVIDIA_DRIVER), NVLINK_NVIDIA_DRIVER, + sizeof(NVLINK_NVIDIA_DRIVER)); + + // + // Set the temporary device name. The actual device name will be updated + // after PMU state load completes + // + devName = portMemAllocNonPaged(NVLINK_DEVICE_NAME_LENGTH); + if (devName == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, "Failed to allocate memory for device name\n"); + + goto knvlinkCoreAddDevice_exit; + } + portMemSet((void *)devName, 0, NVLINK_DEVICE_NAME_LENGTH); + portMemCopy(devName, sizeof("GPU"), "GPU", sizeof("GPU")); + devIdx = devName; + while (*devIdx != '\0') devIdx++; + knvlinkUtoa((NvU8 *)devIdx, + NVLINK_DEVICE_NAME_LENGTH - (devIdx - devName), + gpuGetInstance(pGpu)); + + // Allocate memory for the nvlink_device struct + dev = portMemAllocNonPaged(sizeof(nvlink_device)); + if (dev == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, + "Failed to create nvlink_device struct for GPU\n"); + + goto knvlinkCoreAddDevice_exit; + } + portMemSet((void *)dev, 0, sizeof(nvlink_device)); + + // Initialize values for the nvlink_device struct + dev->driverName = drvName; + dev->deviceName = devName; + dev->type = NVLINK_DEVICE_TYPE_GPU; + dev->pciInfo.domain = gpuGetDomain(pGpu); + dev->pciInfo.bus = gpuGetBus(pGpu); + dev->pciInfo.device = gpuGetDevice(pGpu); + dev->pciInfo.function = 0; + dev->pciInfo.pciDeviceId = pGpu->idInfo.PCIDeviceID; + dev->pciInfo.bars[0].baseAddr = pGpu->pKernelBus->pciBars[0]; + dev->pciInfo.bars[0].barSize = pGpu->pKernelBus->pciBarSizes[0]; + dev->initialized = 1; + + // Register the GPU in nvlink core + if (nvlink_lib_register_device(dev) != 0) + { + NV_PRINTF(LEVEL_ERROR, "Failed to register GPU in NVLINK core!\n"); + + goto knvlinkCoreAddDevice_exit; + } + + NV_PRINTF(LEVEL_INFO, "GPU registered successfully in NVLINK core\n"); + + pKernelNvlink->pNvlinkDev = dev; + + return status; + +knvlinkCoreAddDevice_exit: + + portMemFree(drvName); + + portMemFree(devName); + + portMemFree(dev); + +#endif + + return status; +} + +/*! + * @brief Update GPU UUID in nvlink core + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkCoreUpdateDeviceUUID_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + +#if defined(INCLUDE_NVLINK_LIB) + + NvU8 *pGidString = NULL; + char *devIdx = NULL; + NvU32 flags = 0; + NvU32 gidStrLen; + + if (pKernelNvlink->pNvlinkDev) + { + // + // SHA1 uuid format is 16 bytes long. Hence, make sure NVLINK_UUID_LEN + // is sufficient to store SHA1 uuid format. + // + ct_assert(NVLINK_UUID_LEN == RM_SHA1_GID_SIZE); + ct_assert(NVLINK_UUID_LEN == NV_UUID_LEN); + + flags = FLD_SET_DRF_NUM(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, + NV2080_GPU_CMD_GPU_GET_GID_FLAGS_TYPE_SHA1, flags); + flags = FLD_SET_DRF_NUM(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, + NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT_BINARY, flags); + + if (!pKernelNvlink->pNvlinkDev->uuid) + { + status = gpuGetGidInfo(pGpu, &pGidString, &gidStrLen, flags); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to update GPU UUID\n"); + + return status; + } + pKernelNvlink->pNvlinkDev->uuid = (NvU8 *)pGidString; + + _knvlinkUpdateRemoteEndUuidInfo(pGpu, pKernelNvlink); + + } + + // PMU state load has completed. Update the device name + portMemSet((void *)pKernelNvlink->pNvlinkDev->deviceName, 0, NVLINK_DEVICE_NAME_LENGTH); + gpuGetNameString(pGpu, + NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII, + pKernelNvlink->pNvlinkDev->deviceName); + + devIdx = pKernelNvlink->pNvlinkDev->deviceName; + while (*devIdx != '\0') devIdx++; + + NV_ASSERT((devIdx - pKernelNvlink->pNvlinkDev->deviceName) < NVLINK_DEVICE_NAME_LENGTH); + + knvlinkUtoa((NvU8 *)devIdx, + NVLINK_DEVICE_NAME_LENGTH - (devIdx - pKernelNvlink->pNvlinkDev->deviceName), + gpuGetInstance(pGpu)); + } + +#endif + + return status; +} + +/*! + * @brief Add link to nvlink core lib + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Link Number + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkCoreAddLink_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId +) +{ + NV_STATUS status = NV_OK; + +#if defined(INCLUDE_NVLINK_LIB) + + nvlink_link *link = NULL; + char *linkName = NULL; + char *linkIdx = NULL; + + // GPU device must be registered + if (pKernelNvlink->pNvlinkDev == NULL) + { + NV_PRINTF(LEVEL_INFO, "NVLink device isn't available.\n"); + + return NV_ERR_INVALID_STATE; + } + + // On RTL, by default minion and SW train is disabled + if (IS_RTLSIM(pGpu) && !pKernelNvlink->bForceEnableCoreLibRtlsims) + { + NV_PRINTF(LEVEL_INFO, + "Skipping registration of link %d on simulation.\n", linkId); + + return status; + } + + // Return if the link is already registered + if (pKernelNvlink->nvlinkLinks[linkId].core_link) + { + NV_PRINTF(LEVEL_INFO, "Link %d already registered in NVLINK core!\n", + linkId); + + return status; + } + + // Set the link name + linkName = portMemAllocNonPaged(NVLINK_LINK_NAME_LENGTH); + if (linkName == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet((void *)linkName, 0, NVLINK_LINK_NAME_LENGTH); + portMemCopy(linkName, sizeof("Link"), "Link", sizeof("Link")); + linkIdx = linkName; + while (*linkIdx != '\0') linkIdx++; + knvlinkUtoa((NvU8 *)linkIdx, + NVLINK_LINK_NAME_LENGTH - (linkIdx - linkName), + linkId); + + // Allocate memory for the nvlink_link struct + link = portMemAllocNonPaged(sizeof(nvlink_link)); + if (link == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, "Failed to create nvlink_link struct\n"); + + goto knvlinkCoreAddLink_exit; + } + portMemSet((void *)link, 0, sizeof(nvlink_link)); + + // Initialize values for the nvlink_link struct + link->linkName = linkName; + link->linkNumber = linkId; + link->state = NVLINK_LINKSTATE_OFF; + link->tx_sublink_state = NVLINK_SUBLINK_STATE_TX_OFF; + link->rx_sublink_state = NVLINK_SUBLINK_STATE_RX_OFF; + link->bRxDetected = NV_FALSE; + link->version = pKernelNvlink->ipVerNvlink; + link->dev = pKernelNvlink->pNvlinkDev; + link->link_info = &(pKernelNvlink->nvlinkLinks[linkId]); + link->link_handlers = osGetNvlinkLinkCallbacks(); + + if (link->link_handlers == NULL) + { + + NV_PRINTF(LEVEL_ERROR, "link handlers not found\n"); + + goto knvlinkCoreAddLink_exit; + } + + // Register the link in nvlink core + if (nvlink_lib_register_link(link->dev, link) != 0) + { + NV_PRINTF(LEVEL_ERROR, "Failed to register link %d in NVLINK core!\n", + linkId); + + goto knvlinkCoreAddLink_exit; + } + + pKernelNvlink->nvlinkLinks[linkId].core_link = link; + + NV_PRINTF(LEVEL_INFO, + "LINK%d: %s registered successfully in NVLINK core\n", linkId, + linkName); + + return status; + +knvlinkCoreAddLink_exit: + + portMemFree(linkName); + + portMemFree(link); + +#endif // defined(INCLUDE_NVLINK_LIB) + + return status; +} + +/*! + * @brief Remove link from nvlink core + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Link Number + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkCoreRemoveLink_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId +) +{ + NV_STATUS status = NV_OK; + +#if defined(INCLUDE_NVLINK_LIB) + + // GPU device must be registered + NV_ASSERT(pKernelNvlink->pNvlinkDev != NULL); + + if (pKernelNvlink->nvlinkLinks[linkId].core_link) + { + nvlink_lib_unregister_link(pKernelNvlink->nvlinkLinks[linkId].core_link); + + // Update the RM state now that the link is de-registered + pKernelNvlink->nvlinkLinks[linkId].core_link = NULL; + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected = NV_FALSE; + } + +#endif + + return status; +} + +/*! + * @brief Remove GPU device from nvlink core + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkCoreRemoveDevice_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + OBJGPU *pGpu1 = NULL; + KernelNvlink *pKernelNvlink1 = NULL; + NvU32 gpuMask = 0; + NvU32 gpuInstance = 0; + NV_STATUS status = NV_OK; + + gpumgrGetGpuAttachInfo(NULL, &gpuMask); + + // Clear peer link masks + while ((pGpu1 = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (pGpu1 == pGpu) + continue; + + pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + if (pKernelNvlink1 == NULL) + { + continue; + } + + // Set peerLinkMask from pGpu1 to pGpu to 0 + status = knvlinkSetLinkMaskToPeer(pGpu1, pKernelNvlink1, pGpu, 0); + if (status != NV_OK) + return status; + } + + if (pKernelNvlink->pNvlinkDev) + { + nvlink_device *dev = pKernelNvlink->pNvlinkDev; + nvlink_lib_unregister_device(dev); + + portMemFree((NvU8 *)dev->driverName); + portMemFree((NvU8 *)dev->deviceName); + + if (dev->uuid) + { + portMemFree((NvU8 *)dev->uuid); + } + portMemFree(dev); + } + + // Update the RM cache of the core lib device + pKernelNvlink->pNvlinkDev = NULL; + +#endif + + return NV_OK; +} + +/*! + * @brief Return true if a GPU is connected to an NVSwitch. For now, to keep SW + * simple we make sure that all enabled links are connected to NVSwitch. Note, + * on NVSwitch systems currently there is no POR for GPU<->GPU direct peers, + * everything gets routed through NVSwitch. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NvBool +knvlinkIsGpuConnectedToNvswitch_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NvBool bConnected = NV_FALSE; + +#if defined(INCLUDE_NVLINK_LIB) + + NvU32 i; + KNVLINK_CONN_INFO remoteEndInfo; + + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink->enabledLinks) + { + remoteEndInfo = pKernelNvlink->nvlinkLinks[i].remoteEndInfo; + + if (remoteEndInfo.bConnected && + remoteEndInfo.deviceType == NVLINK_DEVICE_TYPE_NVSWITCH) + { + bConnected = NV_TRUE; + } + else if (remoteEndInfo.bConnected) + { + // We don't support a mix of direct connect and switch links + return NV_FALSE; + } + } + FOR_EACH_INDEX_IN_MASK_END; + +#endif + + return bConnected; +} + +/*! + * @brief Return true if a link is connected. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Link identifier + */ +NvBool +knvlinkIsLinkConnected_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + // + // For forced configs, we might not have connectivity information, + // return true + // + if (knvlinkIsForcedConfig(pGpu, pKernelNvlink) || + pKernelNvlink->pLinkConnection) + { + return NV_TRUE; + } + + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected) + { + return NV_TRUE; + } + +#endif + + return NV_FALSE; +} + +#if defined(INCLUDE_NVLINK_LIB) + +/*! + * @brief Update the UUID for the remote device + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +static void +_knvlinkUpdateRemoteEndUuidInfo +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + OBJGPU *pGpu1 = NULL; + KernelNvlink *pKernelNvlink1 = NULL; + unsigned remoteLinkId; + unsigned i, j; + + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink->enabledLinks) + { + if (pKernelNvlink->nvlinkLinks[i].remoteEndInfo.bConnected && + (pKernelNvlink->nvlinkLinks[i].remoteEndInfo.deviceType == NVLINK_DEVICE_TYPE_GPU)) + { + pGpu1 = NULL; + + // Get the remote OBJGPU and Nvlink + for (j = 0; j < NV_MAX_DEVICES; j++) + { + pGpu1 = gpumgrGetGpu(j); + + if (pGpu1 && + (pKernelNvlink->nvlinkLinks[i].remoteEndInfo.domain == gpuGetDomain(pGpu1) && + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.bus == gpuGetBus(pGpu1) && + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.device == gpuGetDevice(pGpu1) && + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.function == 0)) + { + pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + + if (pKernelNvlink1) + { + remoteLinkId = pKernelNvlink->nvlinkLinks[i].remoteEndInfo.linkNumber; + + // Set the PCI information for remote end + portMemCopy(pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.devUuid, + NV_UUID_LEN, + &pGpu->gpuUuid.uuid[0], + NV_UUID_LEN); + } + break; + } + } + } + } + FOR_EACH_INDEX_IN_MASK_END; +} + +#endif diff --git a/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkcorelibcallback.c b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkcorelibcallback.c new file mode 100644 index 000000000..bff61d32b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkcorelibcallback.c @@ -0,0 +1,1499 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "core/hal.h" +#include "core/info_block.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "gpu/gpu.h" + +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" + +#if defined(INCLUDE_NVLINK_LIB) + +static void _knvlinkCorePassiveLinkChangeCallback(NvU32, void *); + +/*! + * Compile time assert to ensure NV2080_CTRL_NVLINK_MAX_SEED_BUFFER_SIZE == + * NVLINK_MAX_SEED_BUFFER_SIZE + */ +ct_assert(NV2080_CTRL_NVLINK_MAX_SEED_BUFFER_SIZE == + NVLINK_MAX_SEED_BUFFER_SIZE); + +/*! + * Compile time asserts to ensure NV2080_NVLINK_CORE_LINK_STATE* == + * NVLINK_LINKSTATE* + */ +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_OFF == + NVLINK_LINKSTATE_OFF); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_HS == + NVLINK_LINKSTATE_HS); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_SAFE == + NVLINK_LINKSTATE_SAFE); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_FAULT == + NVLINK_LINKSTATE_FAULT); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_RECOVERY == + NVLINK_LINKSTATE_RECOVERY); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_FAIL == + NVLINK_LINKSTATE_FAIL); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_DETECT == + NVLINK_LINKSTATE_DETECT); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_RESET == + NVLINK_LINKSTATE_RESET); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_ENABLE_PM == + NVLINK_LINKSTATE_ENABLE_PM); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_DISABLE_PM == + NVLINK_LINKSTATE_DISABLE_PM); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_SAVE_STATE == + NVLINK_LINKSTATE_SAVE_STATE); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_RESTORE_STATE == + NVLINK_LINKSTATE_RESTORE_STATE); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_PRE_HS == + NVLINK_LINKSTATE_PRE_HS); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_DISABLE_ERR_DETECT == + NVLINK_LINKSTATE_DISABLE_ERR_DETECT); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_LANE_DISABLE == + NVLINK_LINKSTATE_LANE_DISABLE); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_LANE_SHUTDOWN == + NVLINK_LINKSTATE_LANE_SHUTDOWN); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_TRAFFIC_SETUP == + NVLINK_LINKSTATE_TRAFFIC_SETUP); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_INITPHASE1 == + NVLINK_LINKSTATE_INITPHASE1); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_INITNEGOTIATE == + NVLINK_LINKSTATE_INITNEGOTIATE); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_POST_INITNEGOTIATE == + NVLINK_LINKSTATE_POST_INITNEGOTIATE); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_INITOPTIMIZE == + NVLINK_LINKSTATE_INITOPTIMIZE); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_POST_INITOPTIMIZE == + NVLINK_LINKSTATE_POST_INITOPTIMIZE); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_DISABLE_HEARTBEAT == + NVLINK_LINKSTATE_DISABLE_HEARTBEAT); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_CONTAIN == + NVLINK_LINKSTATE_CONTAIN); +ct_assert(NV2080_NVLINK_CORE_LINK_STATE_INITTL == + NVLINK_LINKSTATE_INITTL); + +/*! + * Compile time asserts to ensure NV2080_NVLINK_CORE_SUBLINK_STATE_TX* == + * NVLINK_SUBLINK_STATE_TX* + */ +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_HS == + NVLINK_SUBLINK_STATE_TX_HS); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_SINGLE_LANE == + NVLINK_SUBLINK_STATE_TX_SINGLE_LANE); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_TRAIN == + NVLINK_SUBLINK_STATE_TX_TRAIN); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_SAFE == + NVLINK_SUBLINK_STATE_TX_SAFE); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_OFF == + NVLINK_SUBLINK_STATE_TX_OFF); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_COMMON_MODE == + NVLINK_SUBLINK_STATE_TX_COMMON_MODE); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_COMMON_MODE_DISABLE == + NVLINK_SUBLINK_STATE_TX_COMMON_MODE_DISABLE); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_DATA_READY == + NVLINK_SUBLINK_STATE_TX_DATA_READY); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_EQ == + NVLINK_SUBLINK_STATE_TX_EQ); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_PRBS_EN == + NVLINK_SUBLINK_STATE_TX_PRBS_EN); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_TX_POST_HS == + NVLINK_SUBLINK_STATE_TX_POST_HS); + +/*! + * Compile time asserts to ensure NV2080_NVLINK_CORE_SUBLINK_STATE_RX* == + * NVLINK_SUBLINK_STATE_RX* + */ +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_RX_HS == + NVLINK_SUBLINK_STATE_RX_HS); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_RX_SINGLE_LANE == + NVLINK_SUBLINK_STATE_RX_SINGLE_LANE); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_RX_TRAIN == + NVLINK_SUBLINK_STATE_RX_TRAIN); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_RX_SAFE == + NVLINK_SUBLINK_STATE_RX_SAFE); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_RX_OFF == + NVLINK_SUBLINK_STATE_RX_OFF); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_RX_RXCAL == + NVLINK_SUBLINK_STATE_RX_RXCAL); +ct_assert(NV2080_NVLINK_CORE_SUBLINK_STATE_RX_INIT_TERM == + NVLINK_SUBLINK_STATE_RX_INIT_TERM); + +/*! + * @brief Callback function for adding link to nvlink core + * + * @param[in] nvlink_link pointer + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreAddLinkCallback +( + nvlink_link *link +) +{ + return 0; +} + +/*! + * @brief Callback function for removing link from nvlink core + * + * @param[in] nvlink_link pointer + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreRemoveLinkCallback +( + nvlink_link *link +) +{ + portMemFree((NvU8 *)link->linkName); + link->linkName = NULL; + link->link_handlers = NULL; + link->dev = NULL; + portMemFree(link); + return 0; +} + +/*! + * @brief Callback function for locking a link so its state can be accessed + * and modified atomically. + * + * @param[in] nvlink_link pointer + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreLockLinkCallback +( + nvlink_link *link +) +{ + KNVLINK_RM_LINK *pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // Return if NVLink fabric is managed by FM + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + return NVL_SUCCESS; + } + + // + // We track the lock state of this API via the master/parent GPU of the + // subdevice, since the locking APIs currently available to us operate at + // the device level. + // + OBJGPU *pGpu = gpumgrGetParentGPU(pNvlinkLink->pGpu); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + NV_ASSERT_OR_RETURN(pKernelNvlink != NULL, NVL_ERR_INVALID_STATE); + + // First check if the lock is already held + if (rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))) + { + // + // If the lock is held with deviceLockRefcount == 0, it means the + // lock was acquired before this function was first called for the + // device, so we will not release the lock via the unlock callback. + // + if (pKernelNvlink->deviceLockRefcount > 0) + { + pKernelNvlink->deviceLockRefcount++; + NV_PRINTF(LEVEL_INFO, "incremented device lock refcnt to %u\n", + pKernelNvlink->deviceLockRefcount); + } + else + { + NV_PRINTF(LEVEL_INFO, + "device lock acquired outside of the core library callbacks\n"); + } + } + else + { + if (rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_NVLINK) == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "acquired device GPU locks\n"); + + pKernelNvlink->deviceLockRefcount++; + } + else + { + NV_PRINTF(LEVEL_INFO, "failed to acquire device GPU locks!\n"); + + return NVL_ERR_GENERIC; + } + } + + return NVL_SUCCESS; +} + +/*! + * @brief Callback function for unlocking a link. + * + * This should only be called after nvlinkCoreLockLinkCallback(). + * + * @param[in] nvlink_link pointer + */ +void +knvlinkCoreUnlockLinkCallback +( + nvlink_link *link +) +{ + KNVLINK_RM_LINK *pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // Return if NVLink fabric is managed by FM + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + return; + } + + // + // We track the lock state of this API via the master/parent GPU of the + // subdevice, since the locking APIs currently available to us operate at + // the device level. + // + OBJGPU *pGpu = gpumgrGetParentGPU(pNvlinkLink->pGpu); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + NV_ASSERT_OR_RETURN_VOID(rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))); + + if (pKernelNvlink == NULL) + { + NV_PRINTF(LEVEL_ERROR, "pKernelNvlink is NULL, returning early\n"); + + return; + } + + if (pKernelNvlink->deviceLockRefcount > 0) + { + if (--pKernelNvlink->deviceLockRefcount == 0) + { + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + NV_PRINTF(LEVEL_INFO, "released device GPU locks\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, "decremented device lock refcnt to %u\n", + pKernelNvlink->deviceLockRefcount); + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "device lock acquired outside of the core library callbacks\n"); + } +} + +/*! + * @brief Function to be executed when the master end + * of a link triggers the retraining of the link + * + * @param[in] gpuInstance Master GPU instance + * @param[in] linkChangeData Contains information of both ends + */ +static void +_knvlinkCorePassiveLinkChangeCallback +( + NvU32 gpuInstance, + void *linkChangeData +) +{ + OBJGPU *pGpu = NULL; + OBJSYS *pSys = SYS_GET_INSTANCE(); + KernelNvlink *pKernelNvlink = NULL; + + KNVLINK_RM_LINK *pNvlinkLink; + nvlink_link_change *link_change; + nvlink_link *slave; + nvlink_link *master; + + link_change = *(nvlink_link_change **)linkChangeData; + master = link_change->master; + slave = link_change->slave; + pNvlinkLink = (KNVLINK_RM_LINK *)master->link_info; + + pGpu = gpumgrGetGpu(gpuInstance); + NV_ASSERT(pGpu == pNvlinkLink->pGpu); + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + // If link training is disabled through regkey + if (pKernelNvlink && pKernelNvlink->bSkipLinkTraining) + { + return; + } + + // If fabric is externally managed through FM, return + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + return; + } + + if (osAcquireRmSema(pSys->pSema) == NV_OK) + { + if (master->link_handlers->lock(master) == NVL_SUCCESS) + { + if (slave->link_handlers->lock(slave) == NVL_SUCCESS) + { + if (pKernelNvlink != NULL) + { + NvU32 linkId = pNvlinkLink->linkId; + + // + // TODO: we should probably be smarter about detecting if + // the master has already taken its own action in + // retraining the link that would remove the need to + // process this one. But for now, just blindly process + // the link change request from the slave. + // + knvlinkRetrainLink(pGpu, pKernelNvlink, linkId, + link_change->change_type == nvlink_retrain_from_off); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "master GPU does not support NVLINK!\n"); + DBG_BREAKPOINT(); + } + slave->link_handlers->unlock(slave); + } + else + { + NV_PRINTF(LEVEL_ERROR, "failed to acquire slave lock!\n"); + } + master->link_handlers->unlock(master); + } + else + { + NV_PRINTF(LEVEL_ERROR, "failed to acquire the master lock!\n"); + } + osReleaseRmSema(pSys->pSema, NULL); + } + else + { + NV_PRINTF(LEVEL_ERROR, "failed to acquire the RM semaphore!\n"); + } + + return; +} + +/*! + * @brief Callback function for queuing a link change request from the + * link slave. + * + * This function is only called for links which are the master of the + * connection. + * + * The master link is NOT locked at the time this callback is called, + * and this callback must not attempt to acquire the master link lock. + * + * @param[in] nvlink_link_change pointer + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreQueueLinkChangeCallback +( + nvlink_link_change *link_change +) +{ + + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + OBJOS *pOS = NULL; + NV_STATUS status = NV_OK; + void *pWorkItemData; + + pNvlinkLink = (KNVLINK_RM_LINK *)link_change->master->link_info; + pGpu = pNvlinkLink->pGpu; + + if (pGpu == NULL) + { + return NVL_ERR_INVALID_STATE; + } + + // The master should be marked as such + NV_ASSERT_OR_RETURN(link_change->master->master, NV_ERR_INVALID_STATE); + + pOS = GPU_GET_OS(pGpu); + + pWorkItemData = portMemAllocNonPaged(sizeof(nvlink_link_change *)); + NV_ASSERT_OR_RETURN(pWorkItemData != NULL, NVL_NO_MEM); + + *((nvlink_link_change **)pWorkItemData) = link_change; + + // + // This function will free the argument if it succeeds, hence the need for + // the work item data wrapper. + // + status = pOS->osQueueWorkItem(pGpu, _knvlinkCorePassiveLinkChangeCallback, + pWorkItemData); + if (status != NV_OK) + { + portMemFree(pWorkItemData); + return NVL_ERR_GENERIC; + } + + return NVL_SUCCESS; +} + +/*! + * @brief Callback function for setting a DL link mode + * + * @param[in] nvlink_link pointer + * @param[in] Link mode to be set + * @param[in] Flags + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreSetDlLinkModeCallback +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + KernelIoctrl *pKernelIoctrl = NULL; + NV_STATUS status = NV_OK; + NvU8 linkIndex; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + NV2080_CTRL_NVLINK_CALLBACK_SET_DL_LINK_MODE_PARAMS + *pSetDlLinkModeParams; + + portMemSet(¶ms, 0, sizeof(params)); + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + pGpu = pNvlinkLink->pGpu; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + linkIndex = pNvlinkLink->linkId; + pKernelIoctrl = KNVLINK_LINK_GET_IOCTRL(pKernelNvlink, linkIndex); + + // If link training is disabled through regkey + if (pKernelNvlink->bSkipLinkTraining) + { + return 0; + } + + params.linkId = linkIndex; + params.callbackType.type = NV2080_CTRL_NVLINK_CALLBACK_TYPE_SET_DL_LINK_MODE; + + pSetDlLinkModeParams = ¶ms.callbackType.callbackParams.setDlLinkMode; + pSetDlLinkModeParams->mode = mode; + pSetDlLinkModeParams->bSync = (flags == NVLINK_STATE_CHANGE_SYNC) ? + NV_TRUE : NV_FALSE; + + switch (mode) + { + case NVLINK_LINKSTATE_OFF: + { + pSetDlLinkModeParams->linkMode = + NV2080_NVLINK_CORE_LINK_STATE_OFF; + + break; + } + case NVLINK_LINKSTATE_PRE_HS: + { + pSetDlLinkModeParams->linkMode = + NV2080_NVLINK_CORE_LINK_STATE_PRE_HS; + + pSetDlLinkModeParams->linkModeParams.linkModePreHsParams.remoteDeviceType = + pKernelNvlink->nvlinkLinks[linkIndex].remoteEndInfo.deviceType; + pSetDlLinkModeParams->linkModeParams.linkModePreHsParams.ipVerDlPl = + pKernelNvlink->nvlinkLinks[linkIndex].remoteEndInfo.ipVerDlPl; + + break; + } + case NVLINK_LINKSTATE_INITPHASE1: + { + pSetDlLinkModeParams->linkMode = + NV2080_NVLINK_CORE_LINK_STATE_INITPHASE1; + + if (pKernelIoctrl->getProperty(pKernelIoctrl, PDB_PROP_KIOCTRL_MINION_CACHE_SEEDS)) + { + NvU32 *seedDataSrc = pKernelNvlink->nvlinkLinks[linkIndex].core_link->seedData; + NvU32 *seedDataDest = + pSetDlLinkModeParams->linkModeParams.linkModeInitPhase1Params.seedData; + + portMemCopy(seedDataDest, sizeof(*seedDataDest) * NV2080_CTRL_NVLINK_MAX_SEED_BUFFER_SIZE, + seedDataSrc, sizeof(*seedDataSrc) * NVLINK_MAX_SEED_BUFFER_SIZE); + } + + break; + } + case NVLINK_LINKSTATE_POST_INITNEGOTIATE: + { + pSetDlLinkModeParams->linkMode = + NV2080_NVLINK_CORE_LINK_STATE_POST_INITNEGOTIATE; + + break; + } + case NVLINK_LINKSTATE_POST_INITOPTIMIZE: + { + NvU32 initoptimizeTimeout; + THREAD_STATE_NODE threadNode; + THREAD_STATE_NODE *pThreadNode = NULL; + NvBool bDoThreadStateFree = NV_FALSE; + + status = threadStateGetCurrent(&pThreadNode, pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "Thread state not initialized!\n"); + + // + // There is a possiblity that the entrypoint to the + // callback does not setup threadstate (ie. MODs). + // If there is no thread state, then initialize it. + // + threadStateInit(&threadNode, THREAD_STATE_FLAGS_NONE); + bDoThreadStateFree = NV_TRUE; + + // Getting thread state a second time should not fail + status = threadStateGetCurrent(&pThreadNode, pGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error getting current thread!\n"); + threadStateFree(&threadNode, THREAD_STATE_FLAGS_NONE); + return 1; + } + + NV_ASSERT(pThreadNode == &threadNode); + } + + initoptimizeTimeout = gpuScaleTimeout(pGpu, + NVLINK_INITOPTIMIZE_POLL_TIMEOUT); + + // + // Override the thread state timeout, + // so GSP doesn't timeout after 4 seconds + // + threadStateSetTimeoutOverride(pThreadNode, + (NvU64)initoptimizeTimeout / 1000); + + pSetDlLinkModeParams->linkMode = + NV2080_NVLINK_CORE_LINK_STATE_POST_INITOPTIMIZE; + + // Poll for 10 seconds to avoid GSP timeout + while(1) + { + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Error calling and polling for Init Optimize status! link 0x%x\n", + linkIndex); + break; + } + + // Check if polling is done + if (pSetDlLinkModeParams->linkModeParams.linkModePostInitOptimizeParams.bPollDone) + { + break; + } + else + { + // Add a 1 second delay so GSP isn't spammed with commands + osDelay(NVLINK_INITOPTIMIZE_POLL_COUNT_DELAY_MS); + osSpinLoop(); + } + } + + if (bDoThreadStateFree) + { + threadStateFree(&threadNode, THREAD_STATE_FLAGS_NONE); + } + + // Nothing else to do, return early + return (status == NV_OK) ? 0 : 1; + } + default: + { + // Do nothing + break; + } + } + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error setting current link state: 0x%llx!\n", mode); + return 1; + } + + // Post control call operations + switch (mode) + { + case NVLINK_LINKSTATE_SAVE_STATE: + { + // If the request was to save the link state, update on success + link->bStateSaved = NV_TRUE; + break; + } + case NVLINK_LINKSTATE_RESTORE_STATE: + { + // If the request was to restore the link state, update on success + link->bStateSaved = NV_FALSE; + break; + } + case NVLINK_LINKSTATE_OFF: + { + if (pKernelIoctrl->getProperty(pKernelIoctrl, PDB_PROP_KIOCTRL_MINION_CACHE_SEEDS)) + { + NvU32 *seedDataSrc = pSetDlLinkModeParams->linkModeParams.linkModeOffParams.seedData; + NvU32 *seedDataDest = pKernelNvlink->nvlinkLinks[linkIndex].core_link->seedData; + + portMemCopy(seedDataDest, sizeof(*seedDataDest) * NV2080_CTRL_NVLINK_MAX_SEED_BUFFER_SIZE, + seedDataSrc, sizeof(*seedDataSrc) * NVLINK_MAX_SEED_BUFFER_SIZE); + } + + break; + } + case NVLINK_LINKSTATE_POST_INITNEGOTIATE: + { + NV2080_CTRL_NVLINK_SET_DL_LINK_MODE_POST_INITNEGOTIATE_PARAMS + *pPostInitNegotiateParams; + + pPostInitNegotiateParams = + &pSetDlLinkModeParams->linkModeParams.linkModePostInitNegotiateParams; + + // Save Remote/Local link SID info into core lib + pKernelNvlink->nvlinkLinks[linkIndex].core_link->bInitnegotiateConfigGood = + pPostInitNegotiateParams->bInitnegotiateConfigGood; + pKernelNvlink->nvlinkLinks[linkIndex].core_link->remoteSid = + pPostInitNegotiateParams->remoteLocalSidInfo.remoteSid; + pKernelNvlink->nvlinkLinks[linkIndex].core_link->remoteDeviceType = + pPostInitNegotiateParams->remoteLocalSidInfo.remoteDeviceType; + pKernelNvlink->nvlinkLinks[linkIndex].core_link->remoteLinkId = + pPostInitNegotiateParams->remoteLocalSidInfo.remoteLinkId; + pKernelNvlink->nvlinkLinks[linkIndex].core_link->localSid = + pPostInitNegotiateParams->remoteLocalSidInfo.localSid; + + break; + } + default: + { + // Do nothing + break; + } + } + + return 0; +} + +/*! + * @brief Callback function for getting a DL link mode + * + * @param[in] nvlink_link pointer + * @param[out] Current mode of the link + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreGetDlLinkModeCallback +( + nvlink_link *link, + NvU64 *mode +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + pGpu = pNvlinkLink->pGpu; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_DL_LINK_MODE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error getting current link state!\n"); + return 1; + } + + *mode = (NvU64) params.callbackType.callbackParams.getDlLinkMode.mode; + return 0; +} + +/*! + * @brief Callback function for setting a TL link mode + * + * @param[in] nvlink_link pointer + * @param[in] Link mode to be set + * @param[in] Flags + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreSetTlLinkModeCallback +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + NV2080_CTRL_NVLINK_CALLBACK_SET_TL_LINK_MODE_PARAMS + *pSetTlLinkModeParams; + + portMemSet(¶ms, 0, sizeof(params)); + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + pGpu = pNvlinkLink->pGpu; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + // If link training is disabled through regkey + if (pKernelNvlink->bSkipLinkTraining) + { + return 0; + } + + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = NV2080_CTRL_NVLINK_CALLBACK_TYPE_SET_TL_LINK_MODE; + + pSetTlLinkModeParams = ¶ms.callbackType.callbackParams.setTlLinkMode; + pSetTlLinkModeParams->mode = mode; + pSetTlLinkModeParams->bSync = (flags == NVLINK_STATE_CHANGE_SYNC) ? + NV_TRUE : NV_FALSE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error setting current link state!\n"); + return 1; + } + + return 0; +} + +/*! + * @brief Callback function for getting a TL link mode + * + * @param[in] nvlink_link pointer + * @param[out] Current mode of the link + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreGetTlLinkModeCallback +( + nvlink_link *link, + NvU64 *mode +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + pGpu = pNvlinkLink->pGpu; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_TL_LINK_MODE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error getting current link state!\n"); + return 1; + } + + *mode = (NvU64) params.callbackType.callbackParams.getTlLinkMode.mode; + return 0; +} + +/*! + * @brief Callback function for setting Tx sublink mode + * + * @param[in] nvlink_link pointer + * @param[in] TX Sublink mode to be set + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreSetTxSublinkModeCallback +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + NV2080_CTRL_NVLINK_CALLBACK_SET_TX_SUBLINK_MODE_PARAMS + *pSetTxSublinkModeParams; + + portMemSet(¶ms, 0, sizeof(params)); + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + pGpu = pNvlinkLink->pGpu; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + // If link training is disabled through regkey + if (pKernelNvlink->bSkipLinkTraining) + { + return 0; + } + + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = + NV2080_CTRL_NVLINK_CALLBACK_TYPE_SET_TX_SUBLINK_MODE; + + pSetTxSublinkModeParams = ¶ms.callbackType.callbackParams.setTxSublinkMode; + pSetTxSublinkModeParams->mode = mode; + pSetTxSublinkModeParams->bSync = (flags == NVLINK_STATE_CHANGE_SYNC) ? + NV_TRUE : NV_FALSE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Error setting TX sublink mode. mode = 0x%08llx\n", mode); + return 1; + } + + switch(mode) + { + case NVLINK_SUBLINK_STATE_TX_COMMON_MODE: + case NVLINK_SUBLINK_STATE_TX_EQ: + case NVLINK_SUBLINK_STATE_TX_DATA_READY: + link->tx_sublink_state = (NvU32) mode; + break; + default: + break; + } + + return 0; +} + +/*! + * @brief Callback function for setting Rx sublink mode + * + * @param[in] nvlink_link pointer + * @param[in] RX Sublink mode to be set + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreSetRxSublinkModeCallback +( + nvlink_link *link, + NvU64 mode, + NvU32 flags +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + NV2080_CTRL_NVLINK_CALLBACK_SET_RX_SUBLINK_MODE_PARAMS + *pSetRxSublinkModeParams; + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + pGpu = pNvlinkLink->pGpu; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + // If link training is disabled through regkey + if (pKernelNvlink->bSkipLinkTraining) + { + return 0; + } + + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = + NV2080_CTRL_NVLINK_CALLBACK_TYPE_SET_RX_SUBLINK_MODE; + + pSetRxSublinkModeParams = + ¶ms.callbackType.callbackParams.setRxSublinkMode; + pSetRxSublinkModeParams->mode = mode; + pSetRxSublinkModeParams->bSync = (flags == NVLINK_STATE_CHANGE_SYNC) ? + NV_TRUE : NV_FALSE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error setting RX sublink mode!\n"); + return 1; + } + + switch(mode) + { + case NVLINK_SUBLINK_STATE_RX_RXCAL: + case NVLINK_SUBLINK_STATE_RX_INIT_TERM: + link->rx_sublink_state = (NvU32) mode; + break; + default: + break; + } + + return 0; +} + +/*! + * @brief Callback function for getting Tx sublink mode + * + * @param[in] nvlink_link pointer + * @param[out] Current mode of the TX sublink + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreGetTxSublinkModeCallback +( + nvlink_link *link, + NvU64 *mode, + NvU32 *subMode +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + // Initialize to default values + params.callbackType.callbackParams.getTxSublinkMode.sublinkMode = + NVLINK_SUBLINK_STATE_TX_OFF; + params.callbackType.callbackParams.getTxSublinkMode.sublinkSubMode = + NVLINK_SUBLINK_SUBSTATE_TX_STABLE; + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + pGpu = pNvlinkLink->pGpu; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = + NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_TX_SUBLINK_MODE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error getting current TX sublink state!\n"); + return 1; + } + + *mode = (NvU64) params.callbackType.callbackParams.getTxSublinkMode.sublinkMode; + *subMode = params.callbackType.callbackParams.getTxSublinkMode.sublinkSubMode; + return 0; +} + +/*! + * @brief Callback function for getting Rx sublink mode + * + * @param[in] nvlink_link pointer + * @param[out] Current mode of the RX sublink + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreGetRxSublinkModeCallback +( + nvlink_link *link, + NvU64 *mode, + NvU32 *subMode +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + // Initialize to default values + params.callbackType.callbackParams.getRxSublinkMode.sublinkMode = + NVLINK_SUBLINK_STATE_RX_OFF; + params.callbackType.callbackParams.getRxSublinkMode.sublinkSubMode = + NVLINK_SUBLINK_SUBSTATE_RX_STABLE; + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + pGpu = pNvlinkLink->pGpu; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = + NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_RX_SUBLINK_MODE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error getting current RX sublink state!\n"); + return 1; + } + + *mode = (NvU64) params.callbackType.callbackParams.getRxSublinkMode.sublinkMode; + *subMode = params.callbackType.callbackParams.getRxSublinkMode.sublinkSubMode;; + return 0; +} + +/*! + * @brief Callback function for performing receiver detect + * + * @param[in] nvlink_link pointer + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreSetRxSublinkDetectCallback +( + nvlink_link *link, + NvU32 flags +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + NV2080_CTRL_NVLINK_CALLBACK_SET_RX_DETECT_PARAMS + *pSetRxDetectParams; + + portMemSet(¶ms, 0, sizeof(params)); + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + pGpu = pNvlinkLink->pGpu; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = + NV2080_CTRL_NVLINK_CALLBACK_TYPE_SET_RX_SUBLINK_DETECT; + + pSetRxDetectParams = + ¶ms.callbackType.callbackParams.setRxSublinkDetect; + pSetRxDetectParams->bSync = (flags == NVLINK_STATE_CHANGE_SYNC) ? + NV_TRUE : NV_FALSE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Error performing RXDET (Receiver Detect) on link!\n"); + return 1; + } + + return 0; +} + +/*! + * @brief Callback function for getting status of receiver detect + * + * @param[in] nvlink_link pointer + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreGetRxSublinkDetectCallback +( + nvlink_link *link +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + NvU32 linkId; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + NV2080_CTRL_NVLINK_CALLBACK_GET_RX_DETECT_PARAMS + *pGetRxDetectParams; + + portMemSet(¶ms, 0, sizeof(params)); + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + pGpu = pNvlinkLink->pGpu; + linkId = pNvlinkLink->linkId; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + params.linkId = linkId; + params.callbackType.type = + NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_RX_SUBLINK_DETECT; + + pGetRxDetectParams = ¶ms.callbackType.callbackParams.getRxSublinkDetect; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + + // Store RXDET status mask + pKernelNvlink->nvlinkLinks[linkId].laneRxdetStatusMask = + pGetRxDetectParams->laneRxdetStatusMask; + + // Update bRxDetected field based on RXDET status + link->bRxDetected = (status == NV_OK ? NV_TRUE : NV_FALSE); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "RXDET (Receiver Detect) failed on link!\n"); + return 1; + } + + return 0; +} + +/*! + * @brief Callback function for sending a discovery token over a link + * + * @param[in] nvlink_link pointer + * @param[in] Token to be sent on the link + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreWriteDiscoveryTokenCallback +( + nvlink_link *link, + NvU64 token +) +{ + KNVLINK_RM_LINK *pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + OBJGPU *pGpu = pNvlinkLink->pGpu; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + { + + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + NV2080_CTRL_NVLINK_CALLBACK_RD_WR_DISCOVERY_TOKEN_PARAMS + *pWriteDiscoveryTokenParams; + + portMemSet(¶ms, 0, sizeof(params)); + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = + NV2080_CTRL_NVLINK_CALLBACK_TYPE_WRITE_DISCOVERY_TOKEN; + + pWriteDiscoveryTokenParams = + ¶ms.callbackType.callbackParams.writeDiscoveryToken; + pWriteDiscoveryTokenParams->ipVerDlPl = pNvlinkLink->ipVerDlPl; + pWriteDiscoveryTokenParams->token = token; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + } + + if (status != NV_OK) + { + if (status != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, "Error writing Discovery Token!\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, "R4 Tokens not supported on the chip!\n"); + } + + return 1; + } + + return 0; +} + +/*! + * @brief Callback function for getting a discovery token on a link + * + * @param[in] nvlink_link pointer + * @param[out] Token received on the link + * + * @returns NVL_SUCCESS on success + */ +NvlStatus +knvlinkCoreReadDiscoveryTokenCallback +( + nvlink_link *link, + NvU64 *token +) +{ + KNVLINK_RM_LINK *pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + OBJGPU *pGpu = pNvlinkLink->pGpu; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status = NV_OK; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + NV2080_CTRL_NVLINK_CALLBACK_RD_WR_DISCOVERY_TOKEN_PARAMS + *pReadDiscoveryTokenParams; + + portMemSet(¶ms, 0, sizeof(params)); + + if (token == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Bad token address provided!\n"); + return 1; + } + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return 1; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + { + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = + NV2080_CTRL_NVLINK_CALLBACK_TYPE_READ_DISCOVERY_TOKEN; + + pReadDiscoveryTokenParams = + ¶ms.callbackType.callbackParams.readDiscoveryToken; + pReadDiscoveryTokenParams->ipVerDlPl = pNvlinkLink->ipVerDlPl; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + } + + if (status != NV_OK) + { + if (status != NV_ERR_NOT_SUPPORTED) + { + NV_PRINTF(LEVEL_ERROR, "Error reading discovery token!\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, "R4 Tokens not supported on the chip!\n"); + } + + return 1; + } + + *token = pReadDiscoveryTokenParams->token; + + return 0; +} + +/*! + * @brief Callback function for post link training tasks. + * + * @param[in] nvlink_link pointer + */ +void +knvlinkCoreTrainingCompleteCallback +( + nvlink_link *link +) +{ + KNVLINK_RM_LINK *pNvlinkLink; + OBJGPU *pGpu = NULL; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + + pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + + pGpu = pNvlinkLink->pGpu; + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = NV2080_CTRL_NVLINK_CALLBACK_TYPE_TRAINING_COMPLETE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error issuing NvLink Training Complete callback!\n"); + } +} + +/*! + * @brief nvlinkCoreGetUphyLoadCallback checks if uphy is locked + * + * @param[in] pGpu OBJGPU pointer + * @param[in] link nvlink_link pointer + * @param[out] bUnlocked Uphy is locked or unlocked + */ +void +knvlinkCoreGetUphyLoadCallback +( + nvlink_link *link, + NvBool *bUnlocked +) +{ + KNVLINK_RM_LINK *pNvlinkLink = (KNVLINK_RM_LINK *)link->link_info; + OBJGPU *pGpu = pNvlinkLink->pGpu; + KernelNvlink *pKernelNvlink = NULL; + NV_STATUS status; + NV2080_CTRL_NVLINK_CORE_CALLBACK_PARAMS params; + NV2080_CTRL_NVLINK_CALLBACK_GET_UPHY_LOAD_PARAMS + *pGetUphyLoadParams; + + portMemSet(¶ms, 0, sizeof(params)); + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Error processing link info!\n"); + return; + } + + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + params.linkId = pNvlinkLink->linkId; + params.callbackType.type = + NV2080_CTRL_NVLINK_CALLBACK_TYPE_GET_UPHY_LOAD; + + pGetUphyLoadParams = + ¶ms.callbackType.callbackParams.getUphyLoad; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_CORE_CALLBACK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error issuing NvLink Get Uphy Load callback!\n"); + } + + *bUnlocked = pGetUphyLoadParams->bUnlocked; +} + +#endif diff --git a/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkcorelibtrain.c b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkcorelibtrain.c new file mode 100644 index 000000000..5bcad0407 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkcorelibtrain.c @@ -0,0 +1,2070 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "core/hal.h" +#include "core/info_block.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "gpu/gpu.h" + +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" + +#include "nverror.h" + +#if defined(INCLUDE_NVLINK_LIB) +#include "nvlink_os.h" +#endif + +static void _knvlinkRetrainLinkPrologue(OBJGPU *, KernelNvlink *, NvU32); + +#if defined(INCLUDE_NVLINK_LIB) + +static NV_STATUS _knvlinkActivateDiscoveredConns(OBJGPU *, KernelNvlink *, NvBool); +static NV_STATUS _knvlinkActivateDiscoveredP2pConn(OBJGPU *, KernelNvlink *, NvU32); +static NV_STATUS _knvlinkActivateDiscoveredSwitchConn(OBJGPU *, KernelNvlink *, NvU32); +static NV_STATUS _knvlinkActivateDiscoveredSysmemConn(OBJGPU *, KernelNvlink *, NvU32); +static NV_STATUS _knvlinkEnterSleep(OBJGPU *, KernelNvlink *, NvU32); +static NV_STATUS _knvlinkExitSleep(OBJGPU *, KernelNvlink *, NvU32); +static NvBool _knvlinkUpdateSwitchLinkMasks(OBJGPU *, KernelNvlink *, NvU32); +static NvBool _knvlinkUpdateSwitchLinkMasksGpuDegraded(OBJGPU *, KernelNvlink *); +static void _knvlinkUpdatePeerConfigs(OBJGPU *, KernelNvlink *); +static void _knvlinkPrintTopologySummary(OBJGPU *, KernelNvlink *); + +#endif + +/*! + * @brief Get the nvlink connections for the GPU. + * This function calls into the core library to trigger topology discovery + * on the set of links that have been registered with the core library. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkCoreGetRemoteDeviceInfo_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + +#if defined(INCLUDE_NVLINK_LIB) + + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 flags = NVLINK_STATE_CHANGE_SYNC; + NvBool bNvswitchProxyPresent = NV_FALSE; + NvBool bUpdateConnStatus = NV_FALSE; + NvBool bCheckDegradedMode = NV_FALSE; + NvU32 linkId; + + // + // Topology discovery should NOT be triggered in RTD3/FGC6 exit path if L2 is + // supported. The remote information will be restored when RM state is restored + // + if (!knvlinkPoweredUpForD3_HAL(pGpu, pKernelNvlink)) + { + // + // Optimization: Check for nvlink proxy only when system fabric is externally + // managed. This would avoid RPCs in non-nvswitch cases. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + bNvswitchProxyPresent = knvlinkIsNvswitchProxyPresent(pGpu, pKernelNvlink); + } + + // We only need to look at links that are still considered disconnected + FOR_EACH_INDEX_IN_MASK(32, linkId, pKernelNvlink->disconnectedLinkMask) + { + nvlink_conn_info conn_info = {0}; + bUpdateConnStatus = NV_FALSE; + + if (pKernelNvlink->nvlinkLinks[linkId].core_link) + { + // Call the core library to get the remote end information + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + nvlink_lib_get_remote_conn_info( + pKernelNvlink->nvlinkLinks[linkId].core_link, &conn_info); + + // + // nvlink_lib_get_remote_conn_info could fail to return connection info if + // it runs on a shared-NVSwitch virtualization model (HGX-2) where GPU nodes + // can't see NVSwitches. In that case, examine the NVLink scratch register + // for connectivity information. + // + if (!conn_info.bConnected && bNvswitchProxyPresent) + { + conn_info.bConnected = NV_TRUE; + conn_info.deviceType = NVLINK_DEVICE_TYPE_NVSWITCH; + conn_info.pciDeviceId = NV_U32_MAX; + conn_info.domain = NV_U32_MAX; + conn_info.bus = NV_U16_MAX; + conn_info.device = NV_U16_MAX; + conn_info.function = NV_U16_MAX; + } + + // + // New connection is discovered from core library. But we don't know if this + // connection was shutdown or reset by fabric manager while enabling degraded + // mode. So, we have to check for degraded mode. + // + if (conn_info.bConnected) + { + bCheckDegradedMode = NV_TRUE; + } + } + else + { + // Aynchronous link initialization for IP 2.2 + if (pKernelNvlink->ipVerNvlink == NVLINK_VERSION_22) + { + flags = NVLINK_STATE_CHANGE_ASYNC; + } + + nvlink_lib_discover_and_get_remote_conn_info( + pKernelNvlink->nvlinkLinks[linkId].core_link, &conn_info, flags); + } + + // RPC into GSP-RM to update the link connected status only if its required + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected != conn_info.bConnected) + bUpdateConnStatus = NV_TRUE; + + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected = conn_info.bConnected; + + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected) + { + // Update the RM cache for the remote device information for the link + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.domain = conn_info.domain; + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bus = conn_info.bus; + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.device = conn_info.device; + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.function = conn_info.function; + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.pciDeviceId = conn_info.pciDeviceId; + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.deviceType = conn_info.deviceType; + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.linkNumber = conn_info.linkNumber; + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.chipSid = conn_info.chipSid; + + nvlink_memcpy(pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.devUuid, + conn_info.devUuid, + NV_UUID_LEN); + } + + if (bUpdateConnStatus) + { + // RPC into GSP-RM to update the link remote connection status for pGpu + status = knvlinkUpdateLinkConnectionStatus(pGpu, pKernelNvlink, linkId); + if (status != NV_OK) + { + return status; + } + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + } + else + { + NV_PRINTF(LEVEL_INFO, + "L2 supported. Skip topology discovery on GPU%d in RTD3/FGC6 exit\n", + pGpu->gpuInstance); + } + + // + // Update the RM cache for the discovered connections and then activate + // those connections. This includes all the post-topology settings like + // buffer-ready and interrupt enables + // + status = _knvlinkActivateDiscoveredConns(pGpu, pKernelNvlink, bCheckDegradedMode); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "Failed to activate the discovered connections on GPU%d\n", + pGpu->gpuInstance); + } + +#endif + + return status; +} + +/*! + * @brief Train all the connected sysmem links associated with the device + * to active through the nvlink core library. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkTrainSysmemLinksToActive_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 i; + + // On Fmodel, sysmem link training is not supported + if (IS_FMODEL(pGpu)) + { + NV_PRINTF(LEVEL_INFO, + "Skipping unsupported sysmem link training on GPU%d\n", + pGpu->gpuInstance); + + return NV_OK; + } + + // Return if link training is force disabled through regkey + if (pKernelNvlink->bSkipLinkTraining) + { + NV_PRINTF(LEVEL_INFO, + "Skipping link training due to regkey on GPU%d\n", + pGpu->gpuInstance); + + return NV_OK; + } + + // If fabric is managed by FM, return + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + NV_PRINTF(LEVEL_INFO, + "Fabric is externally managed, skip link training\n"); + + return NV_OK; + } + + NV_PRINTF(LEVEL_INFO, "Training sysmem links for GPU%d\n", + pGpu->gpuInstance); + + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink->enabledLinks) + { + if (pKernelNvlink->nvlinkLinks[i].remoteEndInfo.bConnected && + ((pKernelNvlink->nvlinkLinks[i].remoteEndInfo.deviceType == NVLINK_DEVICE_TYPE_IBMNPU) || + (pKernelNvlink->nvlinkLinks[i].remoteEndInfo.deviceType == NVLINK_DEVICE_TYPE_TEGRASHIM) || + (pKernelNvlink->nvlinkLinks[i].remoteEndInfo.deviceType == NVLINK_DEVICE_TYPE_EBRIDGE))) + { + if (nvlink_lib_train_links_from_swcfg_to_active( + &pKernelNvlink->nvlinkLinks[i].core_link, 1, NVLINK_STATE_CHANGE_SYNC) + != NVL_SUCCESS) + { + nvErrorLog_va((void *)pGpu, NVLINK_ERROR, + "NVLink: failed to train link %d to remote PCI:%04x:%02x:%02x", + i, + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.domain, + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.bus, + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.device); + + return NV_ERR_NOT_SUPPORTED; + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // + // After training links, we may have used up most of the available 4s + // timeout during GPU state load. As a WAR in lieu of improving the + // performance of link training SW, reset the timeout for now. + // + NV_PRINTF(LEVEL_INFO, "resetting timeout after link training\n"); + + threadStateResetTimeout(pGpu); + +#endif + + return NV_OK; +} + +/*! + * @brief Ensure links are trained and put into active. + * + * @param[in] pGpu0 OBJGPU pointer + * @param[in] pGpu1 OBJGPU pointer + * @param[in] pKernelNvlink0 KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkCheckTrainingIsComplete_IMPL +( + OBJGPU *pGpu0, + OBJGPU *pGpu1, + KernelNvlink *pKernelNvlink0 +) +{ + NV_STATUS status = NV_OK; + +#if defined(INCLUDE_NVLINK_LIB) + OBJSYS *pSys = SYS_GET_INSTANCE(); + + KernelNvlink *pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + + if (pKernelNvlink1 == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Input mask contains a GPU on which NVLink is disabled.\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + // Link training will be triggered from KMD in L2 exit path + if (knvlinkPoweredUpForD3_HAL(pGpu0, pKernelNvlink0)) + { + NV_PRINTF(LEVEL_INFO, + "Skip link training on GPU%d in RTD3/FGC6 exit. Links will train to " + "ACTIVE in L2 exit path\n", pGpu0->gpuInstance); + return NV_OK; + } + + // Minion and SW training is by default disabled on RTL + if (IS_RTLSIM(pGpu0) && !pKernelNvlink0->bForceEnableCoreLibRtlsims) + { + return NV_OK; + } + + // Return if link training is force disabled through regkey + if (pKernelNvlink0->bSkipLinkTraining) + { + NV_PRINTF(LEVEL_INFO, + "Skipping link training due to regkey on GPU%d\n", + pGpu0->gpuInstance); + return NV_OK; + } + + // Return if forced config, since SW training is not supported + if (knvlinkIsForcedConfig(pGpu0, pKernelNvlink0)) + { + NV_PRINTF(LEVEL_INFO, "Skipping link due to forced configuration\n"); + return NV_OK; + } + + // If fabric is managed by FM, return + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + NV_PRINTF(LEVEL_INFO, + "Fabric is externally managed, skip link training\n"); + return NV_OK; + } + +#endif + + return status; +} + +/*! + * @brief Train all the connected links between the two given devices + * to active through the nvlink core library. + * + * @param[in] pGpu0 OBJGPU pointer + * @param[in] pGpu1 OBJGPU pointer + * @param[in] pKernelNvlink0 KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkTrainP2pLinksToActive_IMPL +( + OBJGPU *pGpu0, + OBJGPU *pGpu1, + KernelNvlink *pKernelNvlink0 +) +{ + NV_STATUS status = NV_OK; + +#if defined(INCLUDE_NVLINK_LIB) + + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 version = pKernelNvlink0->ipVerNvlink; + NvBool bTrainLinks = NV_FALSE; + NvU32 count = 0; + NvU32 i; + + nvlink_link *pLinks[NVLINK_MAX_LINKS_SW] = { 0 }; + + // Link training will be triggered from KMD in L2 exit path + if (knvlinkPoweredUpForD3_HAL(pGpu0, pKernelNvlink0)) + { + NV_PRINTF(LEVEL_INFO, + "Skip link training on GPU%d in RTD3/FGC6 exit. Links will train to " + "ACTIVE in L2 exit path\n", pGpu0->gpuInstance); + + return NV_OK; + } + + // Minion and SW training is by default disabled on RTL + if (IS_RTLSIM(pGpu0) && !pKernelNvlink0->bForceEnableCoreLibRtlsims) + { + return NV_OK; + } + + // Return if link training is force disabled through regkey + if (pKernelNvlink0->bSkipLinkTraining) + { + NV_PRINTF(LEVEL_INFO, + "Skipping link training due to regkey on GPU%d\n", + pGpu0->gpuInstance); + + return NV_OK; + } + + // Return if forced config, since SW training is not supported + if (knvlinkIsForcedConfig(pGpu0, pKernelNvlink0)) + { + NV_PRINTF(LEVEL_INFO, "Skipping link due to forced configuration\n"); + + return NV_OK; + } + + // If fabric is managed by FM, return + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + NV_PRINTF(LEVEL_INFO, + "Fabric is externally managed, skip link training\n"); + + return NV_OK; + } + + // Get the link train status for the enabled link masks + NV2080_CTRL_NVLINK_ARE_LINKS_TRAINED_PARAMS linkTrainedParams; + + portMemSet(&linkTrainedParams, 0, sizeof(linkTrainedParams)); + linkTrainedParams.linkMask = pKernelNvlink0->enabledLinks; + linkTrainedParams.bActiveOnly = NV_TRUE; + + // Reset timeout to clear any accumulated timeouts from link init + if (IS_GSP_CLIENT(pGpu0)) + { + threadStateResetTimeout(pGpu0); + } + + status = knvlinkExecGspRmRpc(pGpu0, pKernelNvlink0, + NV2080_CTRL_CMD_NVLINK_ARE_LINKS_TRAINED, + (void *)&linkTrainedParams, + sizeof(linkTrainedParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get the link train status for links\n"); + return status; + } + + // + // Bug# 3424466: Optimization - Return if all enabled links for this GPU are + // already trained. The core library makes several callbacks to check link + // state which results in numerous RPCs on GSP-RM platforms resulting in low + // perf on chips which have low link training latency and low links count. + // + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink0->enabledLinks) + { + if (!KNVLINK_IS_LINK_CONNECTED_TO_GPU(pKernelNvlink0, i, pGpu1)) + { + continue; + } + + if (!linkTrainedParams.bIsLinkActive[i]) + { + bTrainLinks = NV_TRUE; + break; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + if (!bTrainLinks) + { + NV_PRINTF(LEVEL_INFO, "Enabled links are all trained already, return\n"); + return NV_OK; + } + + // Train the mask of enabled links to ACTIVE state + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink0->enabledLinks) + { + if (!KNVLINK_IS_LINK_CONNECTED_TO_GPU(pKernelNvlink0, i, pGpu1)) + { + continue; + } + + if (version >= NVLINK_VERSION_22) + { + // Capture links for parallel link training + pLinks[count] = pKernelNvlink0->nvlinkLinks[i].core_link; + count++; + } + else + { + // Invoke link training for NVLINK <= 2.0 + (void)nvlink_lib_train_links_from_swcfg_to_active( + &pKernelNvlink0->nvlinkLinks[i].core_link, 1, NVLINK_STATE_CHANGE_SYNC); + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // Invoke link training for NVLINK >= 2.2 + if (count > 0) + { + // + // nvlink_lib_train_links_from_swcfg_to_active with + // NVLINK_STATE_CHANGE_ASYNC flag invokes link training asynchronously, + // but the call itself is synchronous i.e. it will poll for link + // training to complete. + // + NV_ASSERT(version >= NVLINK_VERSION_22); + (void)nvlink_lib_train_links_from_swcfg_to_active( + pLinks, count, NVLINK_STATE_CHANGE_ASYNC); + } + + // Get the link train status for the enabled link masks + portMemSet(&linkTrainedParams, 0, sizeof(linkTrainedParams)); + linkTrainedParams.linkMask = pKernelNvlink0->enabledLinks; + linkTrainedParams.bActiveOnly = NV_TRUE; + + // Reset timeout to clear any accumulated timeouts from link init + if (IS_GSP_CLIENT(pGpu0)) + { + threadStateResetTimeout(pGpu0); + } + + status = knvlinkExecGspRmRpc(pGpu0, pKernelNvlink0, + NV2080_CTRL_CMD_NVLINK_ARE_LINKS_TRAINED, + (void *)&linkTrainedParams, + sizeof(linkTrainedParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get the link train status for links\n"); + return status; + } + + // Check if the links are trained to "active" state. + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink0->enabledLinks) + { + if (!KNVLINK_IS_LINK_CONNECTED_TO_GPU(pKernelNvlink0, i, pGpu1)) + { + continue; + } + + if (linkTrainedParams.bIsLinkActive[i]) + { + continue; + } + + nvErrorLog_va((void *)pGpu0, NVLINK_ERROR, + "NVLink: Failed to train link %d to remote PCI:%04x:%02x:%02x", + i, + pKernelNvlink0->nvlinkLinks[i].remoteEndInfo.domain, + pKernelNvlink0->nvlinkLinks[i].remoteEndInfo.bus, + pKernelNvlink0->nvlinkLinks[i].remoteEndInfo.device); + + status = NV_ERR_INVALID_STATE; + } + FOR_EACH_INDEX_IN_MASK_END; + +#endif + + return status; +} + +/*! + * knvlinkTrainFabricLinksToActive_IMPL + * Setup NVLinks between 2 peers connected to switch. Train the links to + * High Speed. + * + * Note: Desired sequence to setup NvLink P2P is: + * 1. A client queries P2P capability among GPUs. + * 2. If the GPUs are P2P compatible, create NV50_P2P object which invokes + * link training. + * However, existing GPU<->GPU link training happens during step 1 through + * gpumgrGetP2PCaps - which gets called on RmInitAdapter and may lead to timeout + * based upon the time consumed by costly link training operations. + * + * For now, we are fixing this for nvswitch systems by adding this helper + * function which should just get invoked during NV50_P2P object creation. + * + * This issue needs to be fixed for non-nvswitch systems as well. Bug:200285708. + * Once the bug is fixed, knvlinkTrainFabricLinksToActive can be called from + * knvlinkTrainP2pLinksToActive. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkTrainFabricLinksToActive_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 i; + + // Minion and SW training is by default disabled on RTL + if (IS_RTLSIM(pGpu) && !pKernelNvlink->bForceEnableCoreLibRtlsims) + { + return NV_OK; + } + + // Return if link training is force disabled through regkey + if (pKernelNvlink->bSkipLinkTraining) + { + NV_PRINTF(LEVEL_INFO, + "Skipping link training due to regkey on GPU%d\n", + pGpu->gpuInstance); + + return NV_OK; + } + + // If fabric is managed by FM, return + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + NV_PRINTF(LEVEL_INFO, + "Fabric is externally managed, skip link training\n"); + + return NV_OK; + } + + if (knvlinkIsForcedConfig(pGpu, pKernelNvlink)) + { + NV_PRINTF(LEVEL_INFO, + "Nvlink in Forced Config - skip link training.\n"); + + return NV_OK; + } + + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink->enabledLinks) + { + if ( pKernelNvlink->nvlinkLinks[i].remoteEndInfo.bConnected && + (pKernelNvlink->nvlinkLinks[i].remoteEndInfo.deviceType == + NVLINK_DEVICE_TYPE_NVSWITCH)) + { + if (nvlink_lib_train_links_from_swcfg_to_active( + &pKernelNvlink->nvlinkLinks[i].core_link, 1, NVLINK_STATE_CHANGE_SYNC) + != NVL_SUCCESS) + { + nvErrorLog_va((void *)pGpu, NVLINK_ERROR, + "NVLink: failed to train link %d to remote PCI:%04x:%02x:%02x", + i, + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.domain, + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.bus, + pKernelNvlink->nvlinkLinks[i].remoteEndInfo.device); + + return NV_ERR_INVALID_STATE; + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + +#endif + + return NV_OK; +} + +/*! + * @brief Transition/Wakeup the links into/from sleep (L2) state + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkMask Mask of links + * @param[in] bEntry Enter/Exit sleep (L2) + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkEnterExitSleep_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkMask, + NvBool bEntry +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 linkId; + + // NVLink L2 as a feature should be enabled + if (!pKernelNvlink->getProperty(pKernelNvlink, + PDB_PROP_KNVLINK_L2_POWER_STATE_ENABLED)) + { + NV_PRINTF(LEVEL_ERROR, "NVLink L2 is not supported. Returning\n"); + + return NV_ERR_NOT_SUPPORTED; + } + + // Return error if NVLink fabric is managed by FM + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + NV_PRINTF(LEVEL_ERROR, + "Skipping L2 entry/exit since fabric is externally managed\n"); + + return NV_ERR_NOT_SUPPORTED; + } + + // Check if all the links in the mask are connected + FOR_EACH_INDEX_IN_MASK(32, linkId, linkMask) + { + if (!pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d: Link%d is not connected. Returning\n", + pGpu->gpuInstance, linkId); + + return NV_ERR_NOT_SUPPORTED; + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // Links that share a PLL must enter/exit L2 together + FOR_EACH_INDEX_IN_MASK(32, linkId, linkMask) + { + // If the link is a PLL master, consider the slave link + if (pKernelNvlink->nvlinkLinks[linkId].pllMasterLinkId == linkId) + { + // If the slave link exists and is not init-disabled, it should be included + if ( (pKernelNvlink->nvlinkLinks[linkId].pllSlaveLinkId != NVLINK_MAX_LINKS_SW) && + (NVBIT(pKernelNvlink->nvlinkLinks[linkId].pllSlaveLinkId) & pKernelNvlink->enabledLinks) && + !(NVBIT(pKernelNvlink->nvlinkLinks[linkId].pllSlaveLinkId) & linkMask) ) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d: Links sharing PLL should enter/exit L2 together. Returning\n", + pGpu->gpuInstance); + + return NV_ERR_NOT_SUPPORTED; + } + } + else + { + // For a slave link, its PLL master should be included if not init-disabled + if ( (NVBIT(pKernelNvlink->nvlinkLinks[linkId].pllMasterLinkId) & pKernelNvlink->enabledLinks) && + !(NVBIT(pKernelNvlink->nvlinkLinks[linkId].pllMasterLinkId) & linkMask) ) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d: Links sharing PLL should enter/exit L2 together. Returning\n", + pGpu->gpuInstance); + + return NV_ERR_NOT_SUPPORTED; + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // Device must be registered in the nvlink core library + if (!pKernelNvlink->pNvlinkDev) + { + NV_PRINTF(LEVEL_ERROR, + "GPU%d: not registered in core lib. Returning\n", + pGpu->gpuInstance); + + return NV_ERR_NOT_SUPPORTED; + } + + if (bEntry) + { + // Remove the peer mapping in HSHUB and transition links to sleep (L2) + return _knvlinkEnterSleep(pGpu, pKernelNvlink, linkMask); + } + else + { + // Wakeup the links from sleep (L2) and setup the peer mapping in HSHUB + return _knvlinkExitSleep(pGpu, pKernelNvlink, linkMask); + } +#endif + + return NV_OK; +} + +/*! + * @brief Shutdown all the connected links associated with the device + * through the nvlink core library. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkCoreShutdownDeviceLinks_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + nvlink_link *pLinks[NVLINK_MAX_LINKS_SW] = {0}; + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 count = 0; + NvU32 linkId; + + // Skip link shutdown where fabric manager is present + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED) || + (pKernelNvlink->pNvlinkDev == NULL)) + { + NV_PRINTF(LEVEL_INFO, + "core lib device is either externally managed or not present, skipping\n"); + + return NV_OK; + } + + // return early if there are no enabled links + if (pKernelNvlink->enabledLinks == 0) + { + NV_PRINTF(LEVEL_INFO, "No links to shutdown for the GPU%d\n", + pGpu->gpuInstance); + + return NV_OK; + } + + FOR_EACH_INDEX_IN_MASK(32, linkId, pKernelNvlink->enabledLinks) + { + // Capture the links for lane shutdown through core lib if supported + if (pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED)) + { + // Skip GPU in reset + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.deviceType == + NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU) + { + OBJGPU* pRemoteGpu = gpumgrGetGpuFromBusInfo( + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.domain, + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bus, + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.device); + if (API_GPU_IN_RESET_SANITY_CHECK(pRemoteGpu)) + { + continue; + } + } + pLinks[count] = pKernelNvlink->nvlinkLinks[linkId].core_link; + count++; + } + else + { + nvlink_lib_powerdown_links_from_active_to_swcfg( + &pKernelNvlink->nvlinkLinks[linkId].core_link, + 1, NVLINK_STATE_CHANGE_SYNC); + } + } + FOR_EACH_INDEX_IN_MASK_END; + + // Trigger laneshutdown through core lib if shutdown is supported + if (pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED) && (count > 0)) + { + if (nvlink_lib_powerdown_links_from_active_to_off( + pLinks, count, NVLINK_STATE_CHANGE_SYNC)) + { + NV_PRINTF(LEVEL_ERROR, "Unable to turn off links for the GPU%d\n", + pGpu->gpuInstance); + + return NV_ERR_INVALID_STATE; + } + } + +#endif + + return NV_OK; +} + +/*! + * @brief Reset all the connected links associated with the device + * through the nvlink core library. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkCoreResetDeviceLinks_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ +#if defined(INCLUDE_NVLINK_LIB) + + nvlink_link *pLinks[NVLINK_MAX_LINKS_SW] = {0}; + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 count = 0; + NvU32 linkId; + + // Skip link reset where fabric manager is present + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED) || + (pKernelNvlink->pNvlinkDev == NULL)) + { + NV_PRINTF(LEVEL_INFO, + "core lib device is either externally managed or not present, skipping\n"); + + return NV_OK; + } + + // return early if there are no enabled links + if (pKernelNvlink->enabledLinks == 0) + { + NV_PRINTF(LEVEL_INFO, "No links to reset for the GPU%d\n", + pGpu->gpuInstance); + + return NV_OK; + } + + // We only perform the link reset if lane shutdown is enabled + if (pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ENABLED)) + { + FOR_EACH_INDEX_IN_MASK(32, linkId, pKernelNvlink->enabledLinks) + { + // Skip GPU in reset + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.deviceType == + NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU) + { + OBJGPU* pRemoteGpu = gpumgrGetGpuFromBusInfo( + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.domain, + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bus, + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.device); + if (API_GPU_IN_RESET_SANITY_CHECK(pRemoteGpu)) + { + continue; + } + } + pLinks[count] = pKernelNvlink->nvlinkLinks[linkId].core_link; + count++; + } + FOR_EACH_INDEX_IN_MASK_END; + + if (nvlink_lib_reset_links(pLinks, count, NVLINK_STATE_CHANGE_SYNC) && (count > 0)) + { + NV_PRINTF(LEVEL_ERROR, "Unable to reset link(s) for GPU%d\n", + pGpu->gpuInstance); + + return NV_ERR_INVALID_STATE; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "Lane shutdown not enabled, skipping link(s) reset for GPU%d\n", + pGpu->gpuInstance); + + return NV_ERR_INVALID_STATE; + } + +#endif + + return NV_OK; +} + +/*! + * @brief Retrain a link from either safe mode or off. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Link ID of the link in question + * @param[in] bFromOff Whether link should be retrained from SAFE/OFF + * + * @returns NV_OK if link retraining was successful + */ +NV_STATUS +knvlinkRetrainLink_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId, + NvBool bFromOff +) +{ + NV_STATUS status = NV_OK; + + // If NVLINK_LIB isn't enabled, we just execute prologue and return. + _knvlinkRetrainLinkPrologue(pGpu, pKernelNvlink, linkId); + + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // If fabric is managed by FM + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { +#if defined(INCLUDE_NVLINK_LIB) + + // + // Notify FM for link re-training. + // + // Note, at this point all DL interrupts should be disabled. The interrupts + // will be enabled through nvlinkCoreReenableLinkInterruptsCallback only if + // links can be successfully re-trained. + // + // It is responsibility of FM to highlight link re-training failures to the + // system admin. Hence, we shouldn't be logging Xid in this case. + // + // It is worth to note that, there is no race in between interrupt + // enable/disable register update as we notify FM only after disabling + // interrupts. + // + gpuNotifySubDeviceEvent(pGpu, + NV2080_NOTIFIERS_NVLINK_ERROR_RECOVERY_REQUIRED, + NULL, 0, 0, (NvV16)NV2080_CTRL_NVLINK_UNIT_DL); + + return NV_OK; +#endif + } + +#if defined(INCLUDE_NVLINK_LIB) + // + // If this is a slave endpoint requesting the retrain, kick off a request + // to the master instead. There is no need to (and indeed, we should not) + // hold the master endpoint lock here. + // + if (!pKernelNvlink->nvlinkLinks[linkId].core_link->master) + { + nvlink_link_change *link_change; + nvlink_link *slave, *master; + + slave = pKernelNvlink->nvlinkLinks[linkId].core_link; + if (nvlink_lib_get_link_master(slave, &master) != NVL_SUCCESS) + { + NV_PRINTF(LEVEL_ERROR, + "link master could not be found from GPU%u link %u\n", + gpuGetInstance(pGpu), linkId); + + return NV_ERR_INVALID_STATE; + } + + NV_ASSERT_OR_RETURN(master != slave, NV_ERR_INVALID_STATE); + + link_change = &slave->link_change; + link_change->slave = slave; + link_change->master = master; + link_change->change_type = bFromOff ? nvlink_retrain_from_off : + nvlink_retrain_from_safe; + + if (master->link_handlers->queue_link_change(link_change) != NVL_SUCCESS) + { + return NV_ERR_GENERIC; + } + + // + // Because the link retrain request to the master is asynchronous, + // tell the caller they'll need to wait. + // + return NV_WARN_MORE_PROCESSING_REQUIRED; + } +#endif + + if (bFromOff) + { + status = knvlinkRetrainLinkFromOff(pGpu, pKernelNvlink, linkId); + } + else + { + status = knvlinkRetrainLinkFromSafe(pGpu, pKernelNvlink, linkId); + } + + return status; +} + +/*! + * @brief Retrain the link from OFF state + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Link ID of the link in question + * + * @returns NV_OK if link retraining was successful + */ +NV_STATUS +knvlinkRetrainLinkFromOff +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId +) +{ + + return NV_OK; +} + +/*! + * @brief Retrain the link from SAFE state + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Link ID of the link in question + * + * @returns NV_OK if link retraining was successful + */ +NV_STATUS +knvlinkRetrainLinkFromSafe +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId +) +{ + + return NV_OK; +} + +/*! + * @brief _knvlinkRetrainLinkPrologue currently disables DL interrupts + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Link ID of the link in question + */ +static void +_knvlinkRetrainLinkPrologue +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId +) +{ + + return; +} + +#if defined(INCLUDE_NVLINK_LIB) + +/*! + * @brief Activate the connections discovered in topology discovery + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] bCheckDegradedMode Whether to check for degraded mode + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkActivateDiscoveredConns +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvBool bCheckDegradedMode +) +{ + NvU32 initDisconnectedLinkMask = pKernelNvlink->disconnectedLinkMask; + NvU32 switchLinkMasks = 0; + NvBool bPeerUpdated = NV_FALSE; + NV_STATUS status = NV_OK; + NvU32 linkId; + + // + // Degraded Mode on LR10+ systems. Check for degraded mode if this was not done before + // and if new connections were discovered from the core library. + // + if (bCheckDegradedMode) + { + status = knvlinkApplyNvswitchDegradedModeSettings_HAL(pGpu, pKernelNvlink, + &switchLinkMasks); + } + + // We only need to look at links that are considered disconnected + FOR_EACH_INDEX_IN_MASK(32, linkId, initDisconnectedLinkMask) + { + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bConnected) + { + // This link is now marked connected + pKernelNvlink->disconnectedLinkMask &= ~NVBIT(linkId); + + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.deviceType + == NVLINK_DEVICE_TYPE_GPU) + { + bPeerUpdated = NV_TRUE; + + // + // Activate the p2p link. This includes copying the remote device + // information for the remote link and enabling the post topology + // steps on both the ends of the link. + // + // NOTE: HSHUB will nott be setup for the discovered peer link here + // and will only be configured when a P2P object is created + // + status = _knvlinkActivateDiscoveredP2pConn(pGpu, pKernelNvlink, linkId); + } + else if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.deviceType + == NVLINK_DEVICE_TYPE_NVSWITCH) + { + status = _knvlinkActivateDiscoveredSwitchConn(pGpu, pKernelNvlink, linkId); + + // + // There is no need to mark link as a master. On NVSwitch systems, + // External Fabric Management should be enabled by default. + // + switchLinkMasks |= NVBIT(linkId); + } + else + { + // + // Activate the sysmem link. This includes even training the link to + // ACTIVE, since for sysmem link post-topology steps should be setup + // only after ACTIVE + // + status = _knvlinkActivateDiscoveredSysmemConn(pGpu, pKernelNvlink, linkId); + } + + // If any of the above failed, return failure + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to activate link%d on GPU%d!!!\n", linkId, + pGpu->gpuInstance); + + return status; + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + +#if defined(NVCPU_PPC64LE) || defined(NVCPU_AARCH64) + if (pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_SYSMEM_SUPPORT_ENABLED)) + { + // Credits should be released after Active for sysmem + knvlinkEnableLinksPostTopology_HAL(pGpu, pKernelNvlink, pKernelNvlink->enabledLinks); + if (status != NV_OK) + { + return status; + } + + // Enable SYSMEM links in HSHUB. On P9 this must happen after Active + knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink); + } +#endif + + // If any new connection was discovered in this call + if (initDisconnectedLinkMask != pKernelNvlink->disconnectedLinkMask) + { + if (pKernelNvlink->disconnectedLinkMask == pKernelNvlink->enabledLinks) //GPU degraded case + { + bPeerUpdated |= _knvlinkUpdateSwitchLinkMasksGpuDegraded(pGpu, pKernelNvlink); + } + else // other cases + { + bPeerUpdated |= _knvlinkUpdateSwitchLinkMasks(pGpu, pKernelNvlink, + switchLinkMasks); + } + + _knvlinkPrintTopologySummary(pGpu, pKernelNvlink); + + // + // Make sure we update the CE mappings for this GPU, if the known set + // of peers has changed. + // + knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink); + if (bPeerUpdated) + { + // + // Request that any peers updated also update their CE mappings, + // since they now have a new peer. + // + _knvlinkUpdatePeerConfigs(pGpu, pKernelNvlink); + } + } + + return status; +} + +/*! + * @brief Activate the given P2P connection + * This function updates the RM state for the discovered P2P connection + * and enables post-topology steps on both ends of the connection. But, + * it does not configure HSHUB on any end of the connection. HSHUB will + * be configured only when a P2P object is created + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Link ID + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkActivateDiscoveredP2pConn +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId +) +{ + OBJGPU *pGpu0 = pGpu; + OBJGPU *pGpu1 = NULL; + KernelNvlink *pKernelNvlink0 = GPU_GET_KERNEL_NVLINK(pGpu0); + NV_STATUS status = NV_OK; + NvBool bUpdateConnStatus = NV_FALSE; + NvU32 remoteLinkId; + NvU32 gpuInst; + + // Get the remote OBJGPU and Nvlink + for (gpuInst = 0; gpuInst < NV_MAX_DEVICES; gpuInst++) + { + pGpu1 = gpumgrGetGpu(gpuInst); + + if (pGpu1 && + // Just rely on PCIe DBDF values for detecting the remote + (pKernelNvlink0->nvlinkLinks[linkId].remoteEndInfo.domain == gpuGetDomain(pGpu1)) && + (pKernelNvlink0->nvlinkLinks[linkId].remoteEndInfo.bus == gpuGetBus(pGpu1)) && + (pKernelNvlink0->nvlinkLinks[linkId].remoteEndInfo.device == gpuGetDevice(pGpu1)) && + (pKernelNvlink0->nvlinkLinks[linkId].remoteEndInfo.function == 0)) + { + KernelNvlink *pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + + // Map the remote GPU's instance number to the associated links on this GPU. + status = knvlinkSetLinkMaskToPeer(pGpu0, pKernelNvlink0, pGpu1, + (pKernelNvlink0->peerLinkMasks[gpuInst] | NVBIT(linkId))); + if (status != NV_OK) + return status; + + // + // Post Topology enable on the local end of the link. + // Needs to happen before HSHUB is setup for this link on any end. + // + status = knvlinkEnableLinksPostTopology_HAL(pGpu0, pKernelNvlink0, NVBIT(linkId)); + if (status != NV_OK) + { + return status; + } + + // Set the remote device information for the remote device + if (pKernelNvlink1) + { + remoteLinkId = pKernelNvlink0->nvlinkLinks[linkId].remoteEndInfo.linkNumber; + + // RPC into GSP-RM to update the link remote connection status only if its required + if (pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.bConnected == NV_FALSE) + bUpdateConnStatus = NV_TRUE; + + // Set the PCI information for remote end + pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.bConnected = NV_TRUE; + pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.domain = pKernelNvlink0->pNvlinkDev->pciInfo.domain; + pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.bus = pKernelNvlink0->pNvlinkDev->pciInfo.bus; + pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.device = pKernelNvlink0->pNvlinkDev->pciInfo.device; + pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.function = pKernelNvlink0->pNvlinkDev->pciInfo.function; + pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.pciDeviceId = pKernelNvlink0->pNvlinkDev->pciInfo.pciDeviceId; + pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.deviceType = pKernelNvlink0->pNvlinkDev->type; + pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.chipSid = pKernelNvlink0->nvlinkLinks[linkId].core_link->localSid; + pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.linkNumber = linkId; + + // Update the DLPL revision in the connection information + pKernelNvlink0->nvlinkLinks[linkId].remoteEndInfo.ipVerDlPl = pKernelNvlink1->nvlinkLinks[remoteLinkId].ipVerDlPl; + pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.ipVerDlPl = pKernelNvlink0->nvlinkLinks[linkId].ipVerDlPl; + + if (bUpdateConnStatus) + { + // RPC into GSP-RM to update the link remote connection status for pGpu1 for the given link + status = knvlinkUpdateLinkConnectionStatus(pGpu1, pKernelNvlink1, remoteLinkId); + if (status != NV_OK) + { + return status; + } + } + + pKernelNvlink1->disconnectedLinkMask &= ~NVBIT(remoteLinkId); + + // Map this GPU's instance number to the associated link on the remote end. + status = knvlinkSetLinkMaskToPeer(pGpu1, pKernelNvlink1, pGpu0, + (pKernelNvlink1->peerLinkMasks[gpuGetInstance(pGpu0)] | NVBIT(remoteLinkId))); + if (status != NV_OK) + return status; + + // + // Post Topology enable on the remote end of the link. + // Needs to happen before HSHUB is setup for this link on any end. + // + status = knvlinkEnableLinksPostTopology_HAL(pGpu1, pKernelNvlink1, NVBIT(remoteLinkId)); + if (status != NV_OK) + { + return status; + } + + // Set the deviceUUID + portMemCopy(pKernelNvlink1->nvlinkLinks[remoteLinkId].remoteEndInfo.devUuid, + NV_UUID_LEN, + pGpu0->gpuUuid.uuid, + NV_UUID_LEN); + + // + // The master of a GPU <-> GPU link depends on instance number. This is so that when locking + // (which requires the master to be locked before the slave), the lower GPU instance number + // will always be locked first, which is how rmGpuLocksAcquire acquires them. For loopback, + // fall back to link ID instead. + // + if ((gpuGetInstance(pGpu0) < gpuGetInstance(pGpu1)) || + ((gpuGetInstance(pGpu0) == gpuGetInstance(pGpu1)) && + (linkId < remoteLinkId))) + { + NV_ASSERT(NVL_SUCCESS == nvlink_lib_set_link_master( + pKernelNvlink0->nvlinkLinks[linkId].core_link)); + } + else if ((gpuGetInstance(pGpu1) < gpuGetInstance(pGpu0)) || + ((gpuGetInstance(pGpu1) == gpuGetInstance(pGpu0)) && + (remoteLinkId < linkId))) + { + NV_ASSERT(NVL_SUCCESS == nvlink_lib_set_link_master( + pKernelNvlink1->nvlinkLinks[remoteLinkId].core_link)); + } + + break; + } + } + } + + return status; +} + +/*! + * @brief Activate the given switch connection + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Link ID + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkActivateDiscoveredSwitchConn +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId +) +{ + NV_STATUS status = NV_OK; + + // Post Topology enablement for switch links + status = knvlinkEnableLinksPostTopology_HAL(pGpu, pKernelNvlink, NVBIT(linkId)); + if (status != NV_OK) + { + return status; + } + + return NV_OK; +} + +/*! + * @brief Activate the given P2P connection + * This function updates the RM state for the discovered sysmem + * connection and trains the connection to ACTIVE, because, for + * sysmem link post-topology steps can only be configured after + * ACTIVE. HSHUB is also configured for sysmem link here. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkId Link ID + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkActivateDiscoveredSysmemConn +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkId +) +{ + NV_STATUS status = NV_OK; + + NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_PARAMS updateHshubMuxParams; + NV2080_CTRL_NVLINK_SETUP_NVLINK_SYSMEM_PARAMS nvlinkSysmemParams; + + pKernelNvlink->sysmemLinkMask |= NVBIT(linkId); + + portMemSet(&nvlinkSysmemParams, 0, sizeof(nvlinkSysmemParams)); + nvlinkSysmemParams.sysmemLinkMask = pKernelNvlink->sysmemLinkMask; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_SETUP_NVLINK_SYSMEM, + (void *)&nvlinkSysmemParams, + sizeof(nvlinkSysmemParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to setup HSHUB NVLink sysmem links state\n"); + return status; + } + + // Always make the GPU side the master for NPU connections + NV_ASSERT(NVL_SUCCESS == nvlink_lib_set_link_master( + pKernelNvlink->nvlinkLinks[linkId].core_link)); + + // Train SYSMEM links to Active, and only then enable traffic + status = knvlinkTrainSysmemLinksToActive(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "FAILED TO TRAIN CPU/SYSMEM LINKS TO ACTIVE on GPU%d!!!\n", + pGpu->gpuInstance); + + NV_ASSERT(0); + } + + portMemSet(&updateHshubMuxParams, 0, sizeof(updateHshubMuxParams)); + updateHshubMuxParams.updateType = NV2080_CTRL_NVLINK_UPDATE_HSHUB_MUX_TYPE_PROGRAM; + updateHshubMuxParams.bSysMem = NV_TRUE; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_UPDATE_HSHUB_MUX, + (void *)&updateHshubMuxParams, + sizeof(updateHshubMuxParams)); + return status; +} + +/*! + * @brief Transition the mask of links into sleep (L2) state + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkMask Mask of links + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkEnterSleep +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkMask +) +{ + NV_STATUS retStatus = NV_OK; + NvlStatus status = NVL_SUCCESS; + + NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS programBufferRdyParams; + NV2080_CTRL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS saveRestoreHshubStateParams; + + portMemSet(&programBufferRdyParams, 0, sizeof(programBufferRdyParams)); + programBufferRdyParams.flags = NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_FLAGS_SAVE; + programBufferRdyParams.bSysmem = NV_FALSE; + programBufferRdyParams.peerLinkMask = linkMask; + + // Save Bufferready state for the the mask of links entering L2 + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_PROGRAM_BUFFERREADY, + (void *)&programBufferRdyParams, + sizeof(programBufferRdyParams)); + if (status != NV_OK) + return status; + + portMemSet(&saveRestoreHshubStateParams, 0, sizeof(saveRestoreHshubStateParams)); + saveRestoreHshubStateParams.linkMask = linkMask; + saveRestoreHshubStateParams.bSave = NV_TRUE; + + // Save HSHUB SW state for the links which will need to be restored later + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_SAVE_RESTORE_HSHUB_STATE, + (void *)&saveRestoreHshubStateParams, + sizeof(saveRestoreHshubStateParams)); + if (status != NV_OK) + return status; + + // In L2 Entry path + pKernelNvlink->bL2Entry = NV_TRUE; + + // Put the mask of links of the device to sleep + status = nvlink_lib_powerdown_links_from_active_to_L2(pKernelNvlink->pNvlinkDev, + linkMask, + NVLINK_STATE_CHANGE_ASYNC); + if (status == NVL_MORE_PROCESSING_REQUIRED) + { + NV_PRINTF(LEVEL_INFO, + "Transition to L2 for GPU%d: linkMask 0x%x in progress... Waiting for " + "remote endpoints to request L2 entry\n", pGpu->gpuInstance, + linkMask); + + return NV_WARN_MORE_PROCESSING_REQUIRED; + } + + if (status != NVL_SUCCESS) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to put the linkmask 0x%x of GPU%d to SLEEP\n", + linkMask, pGpu->gpuInstance); + + return NV_ERR_GENERIC; + } + + return retStatus; +} + +/*! + * @brief Wakeup the mask of links from sleep (L2) state + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] linkMask Mask of links + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkExitSleep +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 linkMask +) +{ + NvlStatus status = NVL_SUCCESS; + NvU32 linkId; + NvU32 remoteLinkId; + NvU32 gpuInst; + NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS programBufferRdyParams; + NV2080_CTRL_NVLINK_SAVE_RESTORE_HSHUB_STATE_PARAMS saveRestoreHshubStateParams; + pKernelNvlink->bL2Entry = NV_FALSE; + { + // Wakeup the mask of links of the device from sleep using legacy l2 exit + status = nvlink_lib_train_links_from_L2_to_active(pKernelNvlink->pNvlinkDev, + linkMask, + NVLINK_STATE_CHANGE_ASYNC); + } + + if (status == NVL_SUCCESS) + { + // Perform post-initialization setup for links that exited L2 + FOR_EACH_INDEX_IN_MASK(32, linkId, linkMask) + { + // Post topology link enable for pre-Ampere. This sets up buffer ready + status = knvlinkEnableLinksPostTopology_HAL(pGpu, pKernelNvlink, NVBIT(linkId)); + if (status != NV_OK) + { + return status; + } + + // Update the current NVLink configuration + knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink); + + // Perform post-topology initialization steps on the remote endpoint + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.deviceType == NVLINK_DEVICE_TYPE_GPU) + { + OBJGPU *pGpu1 = NULL; + KernelNvlink *pKernelNvlink1 = NULL; + + // Get the remote OBJGPU and Nvlink + for (gpuInst = 0; gpuInst < NV_MAX_DEVICES; gpuInst++) + { + pGpu1 = gpumgrGetGpu(gpuInst); + + if (pGpu1 && + (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.domain == gpuGetDomain(pGpu1) && + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bus == gpuGetBus(pGpu1) && + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.device == gpuGetDevice(pGpu1) && + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.function == 0)) + { + pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + remoteLinkId = pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.linkNumber; + + // Post topology link enable for pre-Ampere. This sets up buffer ready + status = knvlinkEnableLinksPostTopology_HAL(pGpu1, pKernelNvlink1, NVBIT(remoteLinkId)); + if (status != NV_OK) + { + return status; + } + + // Update the current NVLink configuration + knvlinkUpdateCurrentConfig(pGpu1, pKernelNvlink1); + + break; + } + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + } + + // + // Restore HSHUB *ONLY AFTER* links have been trained and post-topology setup is complete + // on both ends of the link. Only then HSHUB can be configured for P2P on any side of link + // + if (status == NVL_SUCCESS) + { + portMemSet(&saveRestoreHshubStateParams, 0, sizeof(saveRestoreHshubStateParams)); + saveRestoreHshubStateParams.linkMask = linkMask; + saveRestoreHshubStateParams.bSave = NV_FALSE; + + // Restore HSHUB SW state for the links which exited L2 state + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_SAVE_RESTORE_HSHUB_STATE, + (void *)&saveRestoreHshubStateParams, + sizeof(saveRestoreHshubStateParams)); + if (status != NV_OK) + return status; + + knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink); + + portMemSet(&programBufferRdyParams, 0, sizeof(programBufferRdyParams)); + programBufferRdyParams.flags = NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_FLAGS_RESTORE; + programBufferRdyParams.bSysmem = NV_FALSE; + programBufferRdyParams.peerLinkMask = linkMask; + + // Restore Bufferready state for the links which exited L2 state + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_PROGRAM_BUFFERREADY, + (void *)&programBufferRdyParams, + sizeof(programBufferRdyParams)); + if (status != NV_OK) + return status; + + FOR_EACH_INDEX_IN_MASK(32, linkId, linkMask) + { + if (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.deviceType == NVLINK_DEVICE_TYPE_GPU) + { + OBJGPU *pGpu1 = NULL; + KernelNvlink *pKernelNvlink1 = NULL; + + // Get the remote OBJGPU and Nvlink + for (gpuInst = 0; gpuInst < NV_MAX_DEVICES; gpuInst++) + { + pGpu1 = gpumgrGetGpu(gpuInst); + + if (pGpu1 && + (pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.domain == gpuGetDomain(pGpu1) && + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.bus == gpuGetBus(pGpu1) && + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.device == gpuGetDevice(pGpu1) && + pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.function == 0)) + { + pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + remoteLinkId = pKernelNvlink->nvlinkLinks[linkId].remoteEndInfo.linkNumber; + + portMemSet(&saveRestoreHshubStateParams, 0, sizeof(saveRestoreHshubStateParams)); + saveRestoreHshubStateParams.linkMask = NVBIT(remoteLinkId); + saveRestoreHshubStateParams.bSave = NV_FALSE; + + // Restore HSHUB SW state for the links which exited L2 state + status = knvlinkExecGspRmRpc(pGpu1, pKernelNvlink1, + NV2080_CTRL_CMD_NVLINK_SAVE_RESTORE_HSHUB_STATE, + (void *)&saveRestoreHshubStateParams, + sizeof(saveRestoreHshubStateParams)); + if (status != NV_OK) + return status; + + knvlinkUpdateCurrentConfig(pGpu1, pKernelNvlink1); + + portMemSet(&programBufferRdyParams, 0, sizeof(programBufferRdyParams)); + programBufferRdyParams.flags = NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_FLAGS_RESTORE; + programBufferRdyParams.bSysmem = NV_FALSE; + programBufferRdyParams.peerLinkMask = NVBIT(remoteLinkId); + + // + // Restore Buffer Ready state for the links from cached SW state after HSHUB + // settings have been restored + // + status = knvlinkExecGspRmRpc(pGpu1, pKernelNvlink1, + NV2080_CTRL_CMD_NVLINK_PROGRAM_BUFFERREADY, + (void *)&programBufferRdyParams, + sizeof(programBufferRdyParams)); + if (status != NV_OK) + return status; + + break; + } + } + } + } + FOR_EACH_INDEX_IN_MASK_END; + } + + if (status == NVL_MORE_PROCESSING_REQUIRED) + { + NV_PRINTF(LEVEL_INFO, + "Transition to L0 for GPU%d: linkMask 0x%x in progress... Waiting for " + "remote endpoints to request L2 exit\n", pGpu->gpuInstance, + linkMask); + + return NV_WARN_MORE_PROCESSING_REQUIRED; + } + + if (status != NVL_SUCCESS) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to wakeup the linkmask 0x%x of GPU%d from SLEEP\n", + linkMask, pGpu->gpuInstance); + + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +/*! + * @brief Updates GPU peer info (peerMask) based on switchLinkMasks + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] switchLinkMasks Mask of switch links + * + * @return Returns NV_TRUE if peerMask is updated + */ +static NvBool +_knvlinkUpdateSwitchLinkMasks +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 switchLinkMasks +) +{ + KernelNvlink *pKernelNvlink1 = NULL; + OBJGPU *pGpu1 = NULL; + NvBool bPeerUpdated = NV_FALSE; + NV_STATUS status = NV_OK; + NvU32 gpuInst; + + // + // On NvSwitch systems, all the enabled and connected GPU links should + // go through NvSwitch. We don't support GPU<->GPU or GPU<->NPU direct + // connections on NvSwitch systems. + // + if (!knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink)) + { + return bPeerUpdated; + } + + for (gpuInst = 0; gpuInst < NV_MAX_DEVICES; gpuInst++) + { + pGpu1 = gpumgrGetGpu(gpuInst); + if (!pGpu1) + { + continue; + } + + // No support for SLI P2P on nvswitch systems. + if (IsSLIEnabled(pGpu1)) + { + continue; + } + + pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + + if (!pKernelNvlink1) + { + continue; + } + + if (!pKernelNvlink1->discoveredLinks) + { + continue; + } + + if (!knvlinkIsGpuConnectedToNvswitch(pGpu1, pKernelNvlink1)) + { + continue; + } + + // Update local peerLinkMasks. + status = knvlinkSetLinkMaskToPeer(pGpu, pKernelNvlink, pGpu1, switchLinkMasks); + if (status != NV_OK) + return NV_FALSE; + + // + // Update remote peerLinkMasks only if a remote endpoint is connected. + // + // We are deliberately picking up loopback peerLinkMask, because it + // represents the actual nvswitch connection mask for that GPU and + // guarantees that the end point is connected to nvswitch. + // + status = knvlinkSetLinkMaskToPeer(pGpu1, pKernelNvlink1, pGpu, + pKernelNvlink1->peerLinkMasks[gpuGetInstance(pGpu1)]); + if (status != NV_OK) + return NV_FALSE; + + bPeerUpdated = NV_TRUE; + } + + return bPeerUpdated; +} + +/*! + * @brief Updates GPU peer info (peerMask) when a GPU is degraded + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + */ +static NvBool +_knvlinkUpdateSwitchLinkMasksGpuDegraded +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + KernelNvlink *pKernelNvlink1 = NULL; + OBJGPU *pGpu1 = NULL; + NvBool bPeerUpdated = NV_FALSE; + NV_STATUS status = NV_OK; + NvU32 gpuInst; + + for (gpuInst = 0; gpuInst < NV_MAX_DEVICES; gpuInst++) + { + pGpu1 = gpumgrGetGpu(gpuInst); + if (!pGpu1) + { + continue; + } + + // No support for SLI P2P on nvswitch systems. + if (IsSLIEnabled(pGpu1)) + { + continue; + } + + pKernelNvlink1 = GPU_GET_KERNEL_NVLINK(pGpu1); + + if (!pKernelNvlink1) + { + continue; + } + + if (!pKernelNvlink1->discoveredLinks) + { + continue; + } + + if (!knvlinkIsGpuConnectedToNvswitch(pGpu1, pKernelNvlink1)) + { + continue; + } + + // Update local peerLinkMasks. + status = knvlinkSetLinkMaskToPeer(pGpu, pKernelNvlink, pGpu1, 0); + if (status != NV_OK) + return NV_FALSE; + + // Update remote peerLinkMasks + status = knvlinkSetLinkMaskToPeer(pGpu1, pKernelNvlink1, pGpu, 0); + if (status != NV_OK) + return NV_FALSE; + + bPeerUpdated = NV_TRUE; + } + + return bPeerUpdated; +} + +/*! + * For each known peer, update their configurations, now that another + * one of their peers (this GPU) has been initialized. + * + * This will update the PCE-LCE mappings, but it will not trigger any + * HSHUB updates since peer IDs shouldn't have been allocated at this + * point. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +static void +_knvlinkUpdatePeerConfigs +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NvU32 gpuInst; + + for (gpuInst = 0; gpuInst < NV_ARRAY_ELEMENTS(pKernelNvlink->peerLinkMasks); gpuInst++) + { + if (pKernelNvlink->peerLinkMasks[gpuInst] != 0) + { + OBJGPU *pRemoteGpu = gpumgrGetGpu(gpuInst); + + if (pRemoteGpu != NULL) + { + KernelNvlink *pRemoteKernelNvlink = GPU_GET_KERNEL_NVLINK(pRemoteGpu); + + if (pRemoteKernelNvlink != NULL) + { + NV_PRINTF(LEVEL_INFO, + "GPU%u requesting GPU%u NVLINK config update\n", + gpuGetInstance(pGpu), + gpuGetInstance(pRemoteGpu)); + + _knvlinkPrintTopologySummary(pRemoteGpu, pRemoteKernelNvlink); + + // Update CE mappings on remote GPUs since we have new connections + knvlinkUpdateCurrentConfig(pRemoteGpu, pRemoteKernelNvlink); + } + } + } + } +} + +/*! + * Print the nvlink topology for this GPU + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +static void +_knvlinkPrintTopologySummary +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ +#if NV_PRINTF_ENABLED + + NvU32 i; + NV_STATUS status; + + if (DBG_RMMSG_CHECK(LEVEL_INFO) == 0) + { + return; + } + + NV_PRINTF(LEVEL_INFO, "GPU%02u cached topology:\n", gpuGetInstance(pGpu)); + + NV2080_CTRL_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_HSHUB_GET_SYSMEM_NVLINK_MASK, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to determine sysmem link mask\n"); + return; + } + + // Print the discovered sysmem links + if (params.sysmemLinkMask != 0) + { + NV_PRINTF(LEVEL_INFO, " sysmem link mask : 0x%x\n", params.sysmemLinkMask); + } + + // Print the discovered p2p links + for (i = 0; i < NV_ARRAY_ELEMENTS(pKernelNvlink->peerLinkMasks); i++) + { + if (pKernelNvlink->peerLinkMasks[i] != 0) + { + NV_PRINTF(LEVEL_INFO, " GPU%02u link mask : 0x%x\n", i, + pKernelNvlink->peerLinkMasks[i]); + } + } + + // Print the links which do not have a connection yet + if (pKernelNvlink->disconnectedLinkMask != 0) + { + NV_PRINTF(LEVEL_INFO, " unknown link mask: 0x%x\n", + pKernelNvlink->disconnectedLinkMask); + } + +#endif +} + +#endif diff --git a/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkoverrides.c b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkoverrides.c new file mode 100644 index 000000000..9c5820a59 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkoverrides.c @@ -0,0 +1,263 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" +#include "nvRmReg.h" +#include "os/os.h" + +/*! + * @brief Apply NVLink overrides from Registry + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkApplyRegkeyOverrides_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NvU32 regdata; + + // Initialize the settings + + // Link training settings + pKernelNvlink->bEnableTrainingAtLoad = NV_FALSE; + pKernelNvlink->bSkipLinkTraining = NV_FALSE; + + // Link enable/disable filtering + pKernelNvlink->bRegistryLinkOverride = NV_FALSE; + pKernelNvlink->registryLinkMask = 0; + pKernelNvlink->vbiosDisabledLinkMask = 0; + pKernelNvlink->regkeyDisabledLinksMask = 0; + + // Clock and speed settings + pKernelNvlink->nvlinkLinkSpeed = NV_REG_STR_RM_NVLINK_SPEED_CONTROL_SPEED_DEFAULT; + + // Power management settings + pKernelNvlink->bDisableSingleLaneMode = NV_FALSE; + pKernelNvlink->bDisableL2Mode = NV_FALSE; + + // Registry overrides for forcing NVLINK on/off + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_NVLINK_CONTROL, &pKernelNvlink->registryControl)) + { + NV_PRINTF(LEVEL_INFO, "registryControl: 0x%x\n", + pKernelNvlink->registryControl); + + // If NVLink is force disabled through regkey override + if (FLD_TEST_DRF(_REG_STR_RM, _NVLINK_CONTROL, _FORCE_DISABLE, _YES, + pKernelNvlink->registryControl) && + FLD_TEST_DRF(_REG_STR_RM, _NVLINK_CONTROL, _FORCE_ENABLE, _NO, + pKernelNvlink->registryControl)) + { + NV_PRINTF(LEVEL_INFO, + "Disabling NVLINK (forced disable via regkey)\n"); + + return NV_ERR_NOT_SUPPORTED; + } + else if (!knvlinkIsNvlinkDefaultEnabled(pGpu, pKernelNvlink) && + FLD_TEST_DRF(_REG_STR_RM, _NVLINK_CONTROL, _FORCE_ENABLE, _NO, + pKernelNvlink->registryControl)) + { + // NVLink is not default enabled and is not force enabled as well + NV_PRINTF(LEVEL_INFO, + "Disabling NVLINK (disabled by platform default)\n"); + + return NV_ERR_NOT_SUPPORTED; + } + else if (FLD_TEST_DRF(_REG_STR_RM, _NVLINK_CONTROL, _FORCE_DISABLE, _YES, + pKernelNvlink->registryControl) && + FLD_TEST_DRF(_REG_STR_RM, _NVLINK_CONTROL, _FORCE_ENABLE, _YES, + pKernelNvlink->registryControl)) + { + // NVLink is both force enabled and disabled. Fallback to default + NV_PRINTF(LEVEL_INFO, + "Conflict in Nvlink Force Enable/Disable. Reverting to platform default.\n"); + + if (!knvlinkIsNvlinkDefaultEnabled(pGpu, pKernelNvlink)) + { + NV_PRINTF(LEVEL_INFO, + "Disabling NVLINK (disabled by platform default)\n"); + + return NV_ERR_NOT_SUPPORTED; + } + } + else + { + NV_PRINTF(LEVEL_INFO, "NVLink is enabled\n"); + pKernelNvlink->setProperty(pKernelNvlink, PDB_PROP_KNVLINK_ENABLED, NV_TRUE); + } + + // + // Regkey overrides to trigger link init and training during driver load + // a. Initialize to swcfg mode during driver load + // b. Train to active mode during driver load + // + if (FLD_TEST_DRF(_REG_STR_RM, _NVLINK_CONTROL, _TRAIN_AT_LOAD, _YES, + pKernelNvlink->registryControl)) + { + NV_PRINTF(LEVEL_INFO, + "Overriding NvLink training during driver load via regkey.\n"); + pKernelNvlink->bEnableTrainingAtLoad = NV_TRUE; + } + else + { + pKernelNvlink->bEnableTrainingAtLoad = NV_FALSE; + pKernelNvlink->bEnableSafeModeAtLoad = NV_FALSE; + } + + // Regkey override to skip link initialization and training + if (FLD_TEST_DRF(_REG_STR_RM, _NVLINK_CONTROL, _SKIP_TRAIN, _YES, + pKernelNvlink->registryControl)) + { + pKernelNvlink->bSkipLinkTraining = NV_TRUE; + } + else + { + pKernelNvlink->bSkipLinkTraining = NV_FALSE; + } + + // Regkey override to skip forced config if enabled + if (FLD_TEST_DRF(_REG_STR_RM, _NVLINK_CONTROL, _FORCE_AUTOCONFIG, _YES, + pKernelNvlink->registryControl)) + { + pKernelNvlink->bForceAutoconfig = NV_TRUE; + } + else + { + pKernelNvlink->bForceAutoconfig = NV_FALSE; + } + } + else if (!knvlinkIsNvlinkDefaultEnabled(pGpu, pKernelNvlink)) + { + NV_PRINTF(LEVEL_INFO, + "Disabling NVLINK (disabled by platform default)\n"); + + return NV_ERR_NOT_SUPPORTED; + } + + // + // Registry overrides for nvlink register initialization + // + // NOTE: Reginit has been deprecated on Ampere and beyond + // + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL, &pKernelNvlink->verboseMask)) + { + if (DRF_VAL(_REG_STR_RM, _NVLINK_VERBOSE_MASK_CONTROL, _REGINIT, + pKernelNvlink->verboseMask) + == NV_REG_STR_RM_NVLINK_VERBOSE_MASK_CONTROL_REGINIT_ON) + { + NV_PRINTF(LEVEL_INFO, + "Forcing NVLINK Verbose Reg Init Prints enabled via regkey\n"); + } + } + + // Registry overrides to disable a set of links + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_NVLINK_DISABLE_LINKS, &pKernelNvlink->regkeyDisabledLinksMask)) + { + NV_PRINTF(LEVEL_INFO, "Disable NvLinks 0x%x via regkey\n", + pKernelNvlink->regkeyDisabledLinksMask); + } + + // + // Registry overrides to enable NvLinks selectively + // + // NOTE: This is used only on Pascal. Volta and beyond, this should not be used + // + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_NVLINK_ENABLE, &pKernelNvlink->registryLinkMask)) + { + pKernelNvlink->bRegistryLinkOverride = NV_TRUE; + NV_PRINTF(LEVEL_INFO, "Enable NvLinks 0x%x via regkey\n", + pKernelNvlink->registryLinkMask); + } + + // Registry overrides for disabling nvlink P2P loopback + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK, ®data) && + regdata == NV_REG_STR_RM_NVLINK_DISABLE_P2P_LOOPBACK_TRUE) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED, NV_TRUE); + } + + // Regkey overrides for NVLink power management controls + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_NVLINK_LINK_PM_CONTROL, ®data)) + { + NV_PRINTF(LEVEL_INFO, "RM NVLink Link PM controlled via regkey\n"); + + // Whether one-eighth mode has been disabled by regkey + if (FLD_TEST_DRF(_REG_STR_RM, _NVLINK_LINK_PM_CONTROL, _SINGLE_LANE_MODE, + _DISABLE, regdata)) + { + NV_PRINTF(LEVEL_INFO, + "NVLink single-lane power state disabled via regkey\n"); + pKernelNvlink->bDisableSingleLaneMode = NV_TRUE; + } + + // Whether L2 power state has been disabled by regkey + if (FLD_TEST_DRF(_REG_STR_RM, _NVLINK_LINK_PM_CONTROL, _L2_MODE, + _DISABLE, regdata)) + { + NV_PRINTF(LEVEL_INFO, + "NVLink L2 power state disabled via regkey\n"); + pKernelNvlink->bDisableL2Mode = NV_TRUE; + } + } + + // If lane disable and lane shutdown is force enabled through regkey + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN, ®data) && + regdata == NV_REG_STR_RM_NVLINK_FORCE_LANESHUTDOWN_TRUE) + { + NV_PRINTF(LEVEL_INFO, + "NVLink lanedisable and laneshutdown is forced enabled via regkey\n"); + + pKernelNvlink->setProperty(pKernelNvlink, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD, NV_TRUE); + } + + // + // Registry override to control SYSMEM device type for reginit flow when + // using force config. + // + if (NV_OK == osReadRegistryDword(pGpu, + NV_REG_STR_RM_NVLINK_SYSMEM_DEVICE_TYPE, &pKernelNvlink->forcedSysmemDeviceType)) + { + NV_PRINTF(LEVEL_INFO, + "Forcing NVLINK SYSMEM device type with 0x%x via regkey\n", + pKernelNvlink->forcedSysmemDeviceType); + } + else + { + pKernelNvlink->forcedSysmemDeviceType = NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_EBRIDGE; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkstate.c b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkstate.c new file mode 100644 index 000000000..39bc94dec --- /dev/null +++ b/src/nvidia/src/kernel/gpu/nvlink/kernel_nvlinkstate.c @@ -0,0 +1,1017 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/nvlink/kernel_ioctrl.h" +#include "kernel/gpu/mem_sys/kern_mem_sys.h" +#include "os/os.h" + +static NV_STATUS _knvlinkCreateIoctrl(OBJGPU *, KernelNvlink *, NvU32); +static NV_STATUS _knvlinkFilterDiscoveredLinks(OBJGPU *, KernelNvlink *); +static NV_STATUS _knvlinkFilterIoctrls(OBJGPU *, KernelNvlink *); +static NV_STATUS _knvlinkProcessSysmemLinks(OBJGPU *, KernelNvlink *, NvBool); +static NV_STATUS _knvlinkPurgeState(OBJGPU *, KernelNvlink *); + +/*! + * @brief Create an IOCTRL object + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelNvlink KernelNvlink object pointer + * @param[in] PublicId The ID of the ioctrl to be created + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkCreateIoctrl +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 PublicId +) +{ + KernelIoctrl *pKernelIoctrl = NULL; + NV_STATUS status = NV_OK; + + status = objCreate(&pKernelIoctrl, pKernelNvlink, KernelIoctrl); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + pKernelNvlink->pKernelIoctrl[PublicId] = pKernelIoctrl; + + status = kioctrlConstructEngine(pGpu, pKernelIoctrl, PublicId); + + return status; +} + +/*! + * @brief Filter the discovered links against disabled links + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelNvlink KernelNvlink object pointer + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkFilterDiscoveredLinks +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + // Ensure any vbios disabled links are removed from discovered + if (pKernelNvlink->vbiosDisabledLinkMask) + { + // Update the link mask if overridden through vbios + pKernelNvlink->discoveredLinks &= ~(pKernelNvlink->vbiosDisabledLinkMask); + + NV_PRINTF(LEVEL_INFO, + "Links discovered after VBIOS overrides = 0x%x\n", + pKernelNvlink->discoveredLinks); + } + + // Filter links that are disabled through regkey overrides + if (pKernelNvlink->regkeyDisabledLinksMask) + { + pKernelNvlink->discoveredLinks &= ~(pKernelNvlink->regkeyDisabledLinksMask); + + NV_PRINTF(LEVEL_INFO, + "Links after applying disable links regkey = 0x%x\n", + pKernelNvlink->discoveredLinks); + } + + return NV_OK; +} + +/*! + * @brief Filter the IOCTRLs which have no discovered links + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelNvlink KernelNvlink object pointer + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkFilterIoctrls +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + KernelIoctrl *pKernelIoctrl; + NvU32 i; + + // Update local IOCTRL discovered masks after vbios and regkey overrides + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink->ioctrlMask) + { + pKernelIoctrl = KNVLINK_GET_IOCTRL(pKernelNvlink, i); + + // If minion force boot enabled, don't remove IOCTRL from list + if (pKernelIoctrl->getProperty(pKernelIoctrl, PDB_PROP_KIOCTRL_MINION_FORCE_BOOT)) + { + continue; + } + + pKernelIoctrl->localDiscoveredLinks &= + KIOCTRL_LINK_GLOBAL_TO_LOCAL_MASK(pKernelNvlink->discoveredLinks); + + // No need to handle the IOCTRL if no links are being enabled + if (pKernelIoctrl->localDiscoveredLinks == 0x0) + { + pKernelNvlink->ioctrlMask &= ~(NVBIT(pKernelIoctrl->PublicId)); + } + } + FOR_EACH_INDEX_IN_MASK_END; + + return NV_OK; +} + +/*! + * @brief NVLINK ConstructEngine + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] engDesc NVLink Engine descriptor + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + ENGDESCRIPTOR engDesc +) +{ + NV_STATUS status = NV_OK; + NvU32 ioctrlIdx = 0; + + // Initialize the nvlink core library + knvlinkCoreDriverLoadWar(pGpu, pKernelNvlink); + + // Return early if nvlink core is not supported + status = knvlinkCoreIsDriverSupported(pGpu, pKernelNvlink); + if (status != NV_OK) + { + return status; + } + + // + // Apply NVLink regkey overrides for monolithic/CPU-RM. + // If NVLink is disabled, so is related functionality. + // + status = knvlinkApplyRegkeyOverrides(pGpu, pKernelNvlink); + if (status == NV_ERR_NOT_SUPPORTED) + { + return status; + } + + pKernelNvlink->bVerifTrainingEnable = NV_FALSE; + pKernelNvlink->bL2Entry = NV_FALSE; + + status = knvlinkConstructHal_HAL(pGpu, pKernelNvlink); + if (status == NV_ERR_NOT_SUPPORTED) + { + return status; + } + + // + // Create MAX KernelIoctrl objects. + // Later in knvlinkStatePreInit_IMPL, we will remove the objects for + // IOCTRLs that do not exist in the HW. + // + // We must use this ordering because we should not touch GPU registers + // during object creation + // + for (ioctrlIdx = 0; ioctrlIdx < NVLINK_MAX_IOCTRLS_SW; ioctrlIdx++) + { + if (!pKernelNvlink->pKernelIoctrl[ioctrlIdx]) + { + _knvlinkCreateIoctrl(pGpu, pKernelNvlink, ioctrlIdx); + } + } + + return NV_OK; +} + +/*! + * @brief Determine if the NVLink IP is present and usable + * This includes: + * 1. Detecting IOCTRL in PTOP + * 2. Detecting IOCTRL Discovery integrity + * 3. Detecting at least 1 link exposed in IOCTRL Discovery + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + */ +NvBool +knvlinkIsPresent_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NV_STATUS status = NV_OK; + + // On GSP clients, retrieve all device discovery info from GSP through RPC + status = knvlinkCopyNvlinkDeviceInfo(pGpu, pKernelNvlink); + if (status != NV_OK) + return NV_FALSE; + + status = knvlinkCopyIoctrlDeviceInfo(pGpu, pKernelNvlink); + if (status != NV_OK) + return NV_FALSE; + + return NV_TRUE; +} + +/*! + * @brief NVLINK State Pre-Init + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkStatePreInitLocked_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + return knvlinkRemoveMissingIoctrlObjects(pGpu, pKernelNvlink); +} + +/*! + * @brief Remove IOCTRL objects that are not present in the system + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkRemoveMissingIoctrlObjects_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + NvU32 ioctrlIdx = 0; + KernelIoctrl *pKernelIoctrl = NULL; + NV_STATUS status = NV_OK; + + // On GSP clients, retrieve all device discovery info from GSP + status = knvlinkCopyNvlinkDeviceInfo(pGpu, pKernelNvlink); + if (status != NV_OK) + return status; + + // Delete IOCTRL objects that are not present + for (ioctrlIdx = 0; ioctrlIdx < NVLINK_MAX_IOCTRLS_SW; ioctrlIdx++) + { + pKernelIoctrl = KNVLINK_GET_IOCTRL(pKernelNvlink, ioctrlIdx); + if (!(pKernelNvlink->ioctrlMask & NVBIT(ioctrlIdx))) + { + objDelete(pKernelIoctrl); + pKernelNvlink->pKernelIoctrl[ioctrlIdx] = NULL; + } + } + + return NV_OK; +} + +/*! + * @brief NVLINK StateLoad + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] flags Flags + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkStateLoad_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bMIGNvLinkP2PDisabled = ((pKernelMIGManager != NULL) && + !kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)); + NvU32 preInitializedLinks; + NvU32 i; + + // + // If we are on the resume path, nvlinkIsPresent will not be called, + // but we need to call it to get all the devices set up, call it now. + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + // The return is not important, the path is. + knvlinkIsPresent(pGpu, pKernelNvlink); + } + + // For GSP-CLIENTs, the link masks and vbios info need to synchronize with GSP + status = knvlinkSyncLinkMasksAndVbiosInfo(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + goto knvlinkStateLoad_end; + } + + // Filter discovered links against disabled links (vbios or regkey) + status = _knvlinkFilterDiscoveredLinks(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + goto knvlinkStateLoad_end; + } + + // Filter IOCTRLs which have no discovered links (vbios or regkey) + status = _knvlinkFilterIoctrls(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + goto knvlinkStateLoad_end; + } + + // Update power features supported based on the NVLink IP + knvlinkSetPowerFeatures(pGpu, pKernelNvlink); + + if (!IS_RTLSIM(pGpu) || pKernelNvlink->bForceEnableCoreLibRtlsims) + { + // NvLink should not be registered with core-lib when MIG is enabled + if (!knvlinkPoweredUpForD3_HAL(pGpu, pKernelNvlink) && + !bMIGNvLinkP2PDisabled) + { + // Add the NVGPU device to the nvlink core + status = knvlinkCoreAddDevice(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to add GPU device to nvlink core\n"); + goto knvlinkStateLoad_end; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "MIG Enabled or NVLink L2 is supported on chip. " + "Skip device registration in RTD3/FGC6 exit\n"); + } + } + + // + // Process the mask of init disabled links. Links can be init disabled + // by the hypervisor in a virtualized system for links that connect to + // GPUs that do not belong to the same guest + // + status = knvlinkProcessInitDisabledLinks(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + goto knvlinkStateLoad_end; + } + + // Remove the init disabled links from the discovered links mask + pKernelNvlink->discoveredLinks &= ~(pKernelNvlink->initDisabledLinksMask); + + // Track un-connected links, we assume all discovered links are connected. + pKernelNvlink->connectedLinksMask = pKernelNvlink->discoveredLinks; + + // Initialize initializedLinks to 0 (assume no links initialized) + pKernelNvlink->initializedLinks = 0; + + // For GSP-CLIENTs, the link masks and vbios info need to synchronize with GSP + status = knvlinkSyncLinkMasksAndVbiosInfo(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + goto knvlinkStateLoad_end; + } + + // + // Save off the links that were previously initialized to be able to + // optimize away a heavy flush later. This is needed on IBM systems + // + preInitializedLinks = pKernelNvlink->initializedLinks; + + // Load link speed if forced from OS + status = knvlinkProgramLinkSpeed_HAL(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + goto knvlinkStateLoad_end; + } + + // + // At this point, the discovered links mask is filtered. If there are no + // discovered links, then we skip the rest of the steps. + // + if (pKernelNvlink->discoveredLinks == 0) + { + goto knvlinkStateLoad_end; + } + + // + // Override configuration for NVLink topology. This can be either + // a. Legacy forced configurations + // b. Chiplib forced configurations + // + status = knvlinkOverrideConfig_HAL(pGpu, pKernelNvlink, NVLINK_PHASE_STATE_LOAD); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + goto knvlinkStateLoad_end; + } + + // + // Finalize the enabledLinks mask. If the configuration is not forced + // (legacy or chiplib), this is same as the discovered links mask + // + if (pKernelNvlink->bRegistryLinkOverride) + { + pKernelNvlink->enabledLinks = pKernelNvlink->discoveredLinks & + pKernelNvlink->registryLinkMask; + } + else if (bMIGNvLinkP2PDisabled) + { + // NvLink is not supported with MIG + pKernelNvlink->enabledLinks = 0; + } + else + { + pKernelNvlink->enabledLinks = pKernelNvlink->discoveredLinks; + } + + // Sense NVLink bridge presence and remove links on missing bridges. + knvlinkFilterBridgeLinks_HAL(pGpu, pKernelNvlink); + + // Disconnected links mask tracks links whose remote ends are not discovered + pKernelNvlink->disconnectedLinkMask = pKernelNvlink->enabledLinks; + + if (!IS_RTLSIM(pGpu) || pKernelNvlink->bForceEnableCoreLibRtlsims) + { + if (!knvlinkPoweredUpForD3_HAL(pGpu, pKernelNvlink)) + { + // Register links in the nvlink core library + FOR_EACH_INDEX_IN_MASK(32, i, pKernelNvlink->enabledLinks) + { + status = knvlinkCoreAddLink(pGpu, pKernelNvlink, i); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to register Link%d in nvlink core\n", i); + goto knvlinkStateLoad_end; + } + } + FOR_EACH_INDEX_IN_MASK_END; + } + else + { + NV_PRINTF(LEVEL_INFO, + "NVLink L2 is supported on the chip. " + "Skip link registration in RTD3/FGC6 exit\n"); + } + } + + // RPC to GSP-RM to perform pre-topology setup on mask of enabled links + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_ENABLE_LINKS, + NULL, 0); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to perform pre-topology setup on mask of enabled links\n"); + return status; + } + + // + // Check for NVSwitch proxy presence and update the RM state accordingly. If + // this is service VM, then the NVSwitch fabric address should not have been + // by now. If this is guest VM, then NVSwitch fabric address should already + // have been programmed by service VM. + // + knvlinkDetectNvswitchProxy(pGpu, pKernelNvlink); + + // + // Check for NVSwitch proxy to enable External Fabric Management and force + // init fabric manager state. + // + if (knvlinkIsNvswitchProxyPresent(pGpu, pKernelNvlink)) + { + sysEnableExternalFabricMgmt(pSys); + sysForceInitFabricManagerState(pSys); + } + + // + // WAR Bug# 3261027: Sync-up External Fabric Management status with GSP-RM. + // Long term once objsys state is made available to GSP, this WAR won't + // be needed. + // + status = sysSyncExternalFabricMgmtWAR(pSys, pGpu); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + goto knvlinkStateLoad_end; + } + + // + // If we are running on CPU-RM or monolithic, process SYSMEM links, if present + // on the system. + // + status = _knvlinkProcessSysmemLinks(pGpu, pKernelNvlink, + (preInitializedLinks != pKernelNvlink->initializedLinks)); + if (status != NV_OK) + { + NV_ASSERT(status == NV_OK); + goto knvlinkStateLoad_end; + } + + // + // FLA is supported only on Nvlink enabled systems + // Don't move this code path, since FLA relies on property + // "PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED", which is set in this + // function. + // + if (!(flags & GPU_STATE_FLAGS_PRESERVING)) + { + if ((status = kbusInitFla_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), 0, 0)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Init FLA failed, status:0x%x\n", status); + NV_ASSERT(status == NV_OK); + } + } + +knvlinkStateLoad_end: + + if (status != NV_OK) + { + _knvlinkPurgeState(pGpu, pKernelNvlink); + } + + return status; +} + +/*! + * @brief NVLINK StatePostLoad + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] flags Flags + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkStatePostLoad_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pRemoteGpu = NULL; + NvU32 gpuInstance; + NvU32 gpuMask; + knvlinkCoreUpdateDeviceUUID(pGpu, pKernelNvlink); + + if (!knvlinkIsForcedConfig(pGpu, pKernelNvlink)) + { + // + // If link initialization to SAFE during driver load is force enabled + // through regkey, then trigger topology discovery now. This can't be + // done for ALI since topology discovery can only happen after + // verification training is complete + // + if ( + (pKernelNvlink->bEnableSafeModeAtLoad || pKernelNvlink->bEnableTrainingAtLoad || + pKernelNvlink->bVerifTrainingEnable)) + { + knvlinkCoreGetRemoteDeviceInfo(pGpu, pKernelNvlink); + } + + // + // Bug# 1667991: Enabling link training to high speed + // at driver load for loopback or P2P links only as of now. + // + // Also train links on verif env like Emulation and Fmodel + // + if (pKernelNvlink->bEnableTrainingAtLoad || pKernelNvlink->bVerifTrainingEnable) + { + { + status = gpumgrGetGpuAttachInfo(NULL, &gpuMask); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + gpuInstance = 0; + while ((pRemoteGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + knvlinkTrainP2pLinksToActive(pGpu, pRemoteGpu, pKernelNvlink); + } + } + } + } + + status = knvlinkStatePostLoadHal_HAL(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR," failed for GPU 0x%x\n", pGpu->gpuInstance); + return status; + } + + return NV_OK; +} + +/*! + * @brief NVLINK StateUnload + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * @param[in] flags Flags + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkStateUnload_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 flags +) +{ + // + // Don't tear down FLA when undergoing suspend/resume + // Enable this only for CPU-RM and monolithic RM + // + if (!(flags & GPU_STATE_FLAGS_PRESERVING)) + { + kbusDestroyFla_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu)); + } + + return NV_OK; +} + +/*! + * @brief NVLINK StatePostUnload + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +NV_STATUS +knvlinkStatePostUnload_IMPL +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvU32 flags +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = NV_OK; + + if ((knvlinkGetNumLinksToSystem(pGpu, pKernelNvlink) != 0) && + pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING)) + { + // + // On GPU reset the CPU<->GPU NVLinks are reset, and leaving any GPU + // memory cached on the CPU leads to fatal errors when the CPU tries to + // flush it after the link is down. + // + // Handle this by flushing all of the CPU caches as part of destroying + // the mapping. Do it only if the GPU is being drained as that's an + // indication the GPU is going to be reset. Otherwise, the links stay + // up and it's unnecessary to flush the cache. + // + // Do the flush before the link is put into safe mode below as the + // flush is much slower (takes minutes) once that's done. + // + NvBool bFlush = pKernelNvlink->getProperty(pKernelNvlink, + PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD); + kmemsysTeardownCoherentCpuLink(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu), bFlush); + } + + // + // If GPU is in the D3 entry path and if NVLink L2 is supported and links are + // expected to be in L2 before D3 entry is triggered, skip lane shutdown + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_CODEPATH) && + pKernelNvlink->getProperty(pKernelNvlink, + PDB_PROP_KNVLINK_L2_POWER_STATE_ENABLED) && + pKernelNvlink->getProperty(pKernelNvlink, + PDB_PROP_KNVLINK_L2_POWER_STATE_FOR_LONG_IDLE)) + { + goto knvlinkStatePostUnload_end; + } + + // + // Set HSHUB to init values. + // + // It is good to reset HSHUB when GPUs are going down. For example, a GPU + // can be torn down because it (or its peers) experienced an NVLink error. + // In such cases resetting HSHUB is must. Otherwise, consecutive RmInitAdaper + // on the GPU could fail if membars are emitted on the broken NVLinks. + // + // We do not set sysmem masks to init values because P9 systems are crashing + // for an unknown reason with an HMI exception during consecutive + // RmInitAdapter. + // + // TODO: For now, we are enabling this change for NVSwitch systems in r400_00 + // to unblock DGX-2 release. In chips_a, the change will be enabled on all + // platforms (in discussion with ARCH for non-NVSwitch platforms). + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT) || + knvlinkIsNvswitchProxyPresent(pGpu, pKernelNvlink)) + { + knvlinkRemoveMapping_HAL(pGpu, pKernelNvlink, NV_FALSE, + ((1 << NVLINK_MAX_PEERS_SW) - 1), + NV_FALSE /* bL2Entry */); + } + + // + // Check if lane disable and shutdown during driver unload has been forced + // using regkey override, or as a part of the external reset sequence. + // + if (pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD) && + !pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED) && + !API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + NV2080_CTRL_NVLINK_DISABLE_DL_INTERRUPTS_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.linkMask = pKernelNvlink->enabledLinks; + + // Disable all the DL interrupts + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_DISABLE_DL_INTERRUPTS, + (void *)¶ms, sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to disable DL interrupts for the links\n"); + return status; + } + + // Shutdown all the links through pseudo-clean shutdown + status = knvlinkPrepareForXVEReset(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to pseudo-clean shutdown the links for GPU%d\n", + pGpu->gpuInstance); + return status; + } + } + +knvlinkStatePostUnload_end: + + _knvlinkPurgeState(pGpu, pKernelNvlink); + + return status; +} + +/*! + * @brief Purge SW state + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink KernelNvlink pointer + * + * @return NV_OK on success + */ +static NV_STATUS +_knvlinkPurgeState +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink +) +{ + KernelIoctrl *pKernelIoctrl = NULL; + NvU32 ioctrlIdx; + +#if defined(INCLUDE_NVLINK_LIB) + + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bMIGNvLinkP2PDisabled = ((pKernelMIGManager != NULL) && + !kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)); + + // With MIG NvLink registration was skipped with core-lib + if (knvlinkPoweredUpForD3_HAL(pGpu, pKernelNvlink) || bMIGNvLinkP2PDisabled) + { + NV_PRINTF(LEVEL_INFO, + "Skipping device/link un-registration in RTD3 GC6 entry path\n"); + goto _knvlinkPurgeState_end; + } + + if (!IS_RTLSIM(pGpu) || pKernelNvlink->bForceEnableCoreLibRtlsims) + { + if (pKernelNvlink->pNvlinkDev) + { + NvU32 linkId; + + // Un-register the links from nvlink core library + FOR_EACH_INDEX_IN_MASK(32, linkId, pKernelNvlink->enabledLinks) + { + knvlinkCoreRemoveLink(pGpu, pKernelNvlink, linkId); + } + FOR_EACH_INDEX_IN_MASK_END; + + // Un-register the nvgpu device from nvlink core library + knvlinkCoreRemoveDevice(pGpu, pKernelNvlink); + } + } + +_knvlinkPurgeState_end: + +#endif + + // + // This GPU is being shutdown, so need to clear the peerLinkMasks and sysmem link + // mask. + // + portMemSet(pKernelNvlink->peerLinkMasks, 0, sizeof(pKernelNvlink->peerLinkMasks)); + pKernelNvlink->sysmemLinkMask = 0; + + // Unload each IOCTRL object + for (ioctrlIdx = 0; ioctrlIdx < pKernelNvlink->ioctrlNumEntries; ioctrlIdx++) + { + pKernelIoctrl = KNVLINK_GET_IOCTRL(pKernelNvlink, ioctrlIdx); + if (pKernelIoctrl) + { + kioctrlDestructEngine(pKernelIoctrl); + pKernelNvlink->ioctrlMask &= ~NVBIT(ioctrlIdx); + } + } + + // Destroy the chiplib configuration memory + if (pKernelNvlink->pLinkConnection) + { + portMemFree(pKernelNvlink->pLinkConnection); + pKernelNvlink->pLinkConnection = NULL; + } + + return NV_OK; +} + +void +knvlinkDestruct_IMPL +( + KernelNvlink *pKernelNvlink +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelNvlink); + KernelIoctrl *pKernelIoctrl = NULL; + NvU32 ioctrlIdx; + + // Destroy the RM NVLink state + _knvlinkPurgeState(pGpu, pKernelNvlink); + + // Free Ioctrls + for (ioctrlIdx = 0; ioctrlIdx < NVLINK_MAX_IOCTRLS_SW; ioctrlIdx++) + { + pKernelIoctrl = KNVLINK_GET_IOCTRL(pKernelNvlink, ioctrlIdx); + if (pKernelIoctrl) + { + objDelete(pKernelIoctrl); + pKernelNvlink->pKernelIoctrl[ioctrlIdx] = NULL; + } + } + + // Unload the nvlink core library + knvlinkCoreDriverUnloadWar(pGpu, pKernelNvlink); +} + +/** + * @brief Handle sysmem NVLink connections and ATS functionality + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelNvlink Nvlink pointer + * @param[in] bFlush Whether the CPU cache of the GPU mapping + * should be flushed + * + * @return NV_OK on success + */ +NV_STATUS +_knvlinkProcessSysmemLinks +( + OBJGPU *pGpu, + KernelNvlink *pKernelNvlink, + NvBool bFlush +) +{ + NV_STATUS status = NV_OK; + +#if defined(NVCPU_PPC64LE) || defined(NVCPU_AARCH64) + if (pKernelNvlink->getProperty(pKernelNvlink, PDB_PROP_KNVLINK_SYSMEM_SUPPORT_ENABLED)) + { + // + // In case of IBM or Tegra, the sysmem links will already have + // been registered in nvlink core library. In order to trigger + // topology detection, call knvlinkCoreGetRemoteDeviceInfo + // + if (!knvlinkIsForcedConfig(pGpu, pKernelNvlink) && !pKernelNvlink->pLinkConnection) + { + // + // Establish the current link topology and enable IBM CPU/SYSMEM links. + // If any of the discovered links are CPU/SYSMEM, they will be trained, + // post-enabled, and then enabled in HSHUB when the call has completed. + // + status = knvlinkCoreGetRemoteDeviceInfo(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed call to get remote device info during IBM CPU/SYSMEM links " + "setup, failing NVLink StateLoad on GPU%d!!!\n\n", + pGpu->gpuInstance); + + return status; + } + } + else + { + // If the topology is forced, then just apply the settings + knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink); + } + } +#else + if (knvlinkIsForcedConfig(pGpu, pKernelNvlink) || !IS_SILICON(pGpu)) + { + // Set up the current Nvlink configuration + knvlinkUpdateCurrentConfig(pGpu, pKernelNvlink); + } +#endif + // Set Buffer ready for the sysmem links + NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_PARAMS programBufferRdyParams; + + portMemSet(&programBufferRdyParams, 0, sizeof(programBufferRdyParams)); + programBufferRdyParams.flags = NV2080_CTRL_NVLINK_PROGRAM_BUFFERREADY_FLAGS_SET; + programBufferRdyParams.bSysmem = NV_TRUE; + programBufferRdyParams.peerLinkMask = 0; + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_PROGRAM_BUFFERREADY, + (void *)&programBufferRdyParams, + sizeof(programBufferRdyParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to program bufferready for the sysmem nvlinks!\n"); + return status; + } + + // + // Enable functionality related to NVLink SYSMEM: + // + ATS functionality if hardware support is available + // + Apply the Bug 200279966 WAR + // + if (knvlinkGetNumLinksToSystem(pGpu, pKernelNvlink) != 0) + { + // + // Configure sysmem atomics config after sysmem link is up. + // Sysmem atomics are programmed from memsysConfigureSysmemAtomics_HAL + // but in case of PPC+GV100, nvlink setup is not done during call to + // memsysConfigureSysmemAtomics_HAL that leads to sysmem atomics not getting + // configured. Hence configure the sysmem atomics now for taking care of + // PPC+GV100. + // + + NV2080_CTRL_NVLINK_ENABLE_SYSMEM_NVLINK_ATS_PARAMS sysmemNvlinkAtsParams; + portMemSet(&sysmemNvlinkAtsParams, 0, sizeof(sysmemNvlinkAtsParams)); + + status = knvlinkExecGspRmRpc(pGpu, pKernelNvlink, + NV2080_CTRL_CMD_NVLINK_ENABLE_SYSMEM_NVLINK_ATS, + (void *)&sysmemNvlinkAtsParams, + sizeof(sysmemNvlinkAtsParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to snable ATS functionality for NVLink sysmem!\n"); + return status; + } + + status = kmemsysSetupCoherentCpuLink(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu), bFlush); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + osSetNVLinkSysmemLinkState(pGpu, NV_TRUE); + } + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/ofa/kernel_ofa_ctx.c b/src/nvidia/src/kernel/gpu/ofa/kernel_ofa_ctx.c new file mode 100644 index 000000000..12981c12d --- /dev/null +++ b/src/nvidia/src/kernel/gpu/ofa/kernel_ofa_ctx.c @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "kernel/gpu/falcon/kernel_falcon.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/ofa/kernel_ofa_ctx.h" + +NV_STATUS +ofactxConstructHal_KERNEL +( + OfaContext *pOfaContext, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pOfaContext, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + KernelFalcon *pKernelFalcon = kflcnGetKernelFalconForEngine(pGpu, pChannelDescendant->resourceDesc.engDesc); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + + NV_PRINTF(LEVEL_INFO, "ofactxConstruct for 0x%x\n", pChannelDescendant->resourceDesc.engDesc); + + return kflcnAllocContext(pGpu, pKernelFalcon, pKernelChannel, RES_GET_EXT_CLASS_ID(pChannelDescendant)); +} + +void ofactxDestructHal_KERNEL +( + OfaContext *pOfaContext +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pOfaContext, ChannelDescendant); + OBJGPU *pGpu = GPU_RES_GET_GPU(pChannelDescendant); + KernelFalcon *pKernelFalcon = kflcnGetKernelFalconForEngine(pGpu, pChannelDescendant->resourceDesc.engDesc); + KernelChannel *pKernelChannel = pChannelDescendant->pKernelChannel; + + NV_PRINTF(LEVEL_INFO, "ofactxDestruct for 0x%x\n", pChannelDescendant->resourceDesc.engDesc); + + NV_ASSERT_OK(kflcnFreeContext(pGpu, pKernelFalcon, pKernelChannel, RES_GET_EXT_CLASS_ID(pChannelDescendant))); +} + diff --git a/src/nvidia/src/kernel/gpu/perf/kern_cuda_limit.c b/src/nvidia/src/kernel/gpu/perf/kern_cuda_limit.c new file mode 100644 index 000000000..88ade0b85 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/perf/kern_cuda_limit.c @@ -0,0 +1,181 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @brief All functions related to the Cuda Safe feature. + */ +#include "gpu/perf/kern_cuda_limit.h" +#include "rmapi/rmapi.h" +#include "gpu/gpu.h" + +/* ------------------------ Static Function Prototypes --------------------- */ +static NV_STATUS kperfCudaLimitCliGet(Device *pDevice, NvBool *pbCudaLimit); +static NV_STATUS kperfCudaLimitCliSet(Device *pDevice, NvBool bCudaLimit); + +/* ------------------------ Public Functions ------------------------------ */ + +/*! + * Disable Cuda limit activation at teardown of client. + * + * @param[in] pDevice Device info pointer + * @param[in] pGpu OBJGPU pointer + * + * @return NV_OK + * Operation completed successfully. + */ +NV_STATUS +deviceKPerfCudaLimitCliDisable +( + Device *pDevice, + OBJGPU *pGpu +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status = NV_OK; + + if (pDevice->nCudaLimitRefCnt > 0) + { + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalDevice, + NV0080_CTRL_CMD_INTERNAL_PERF_CUDA_LIMIT_DISABLE, + NULL, + 0); + + NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, status); + + pDevice->nCudaLimitRefCnt = 0; + } + + return status; +} + +/*! + * @copydoc NV0080_CTRL_CMD_PERF_CUDA_LIMIT_SET_CONTROL + * + * @Parameter pDispCommon [In] + * @Parameter pParams [In] + * + * @return NV_OK + * Operation completed successfully. + * @return NV_ERR_INVALID_REQUEST + * Invalid feature activation or deactivation request. + * @return NV_ERR_INVALID_STATE + * Feature isn't enabled. + * @return Other unexpected errors + * Unexpected errors propagated from other functions. + */ +NV_STATUS +deviceCtrlCmdKPerfCudaLimitSetControl_IMPL +( + Device *pDevice, + NV0080_CTRL_PERF_CUDA_LIMIT_CONTROL_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pDevice); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvBool bCudaLimitBefore; + NvBool bCudaLimitAfter; + NV_STATUS status = NV_OK; + + + // Obtain current Cuda limit activation setting. + NV_ASSERT_OK_OR_RETURN(kperfCudaLimitCliGet(pDevice, &bCudaLimitBefore)); + + // Set Cuda activation setting and error check the client ref count. + NV_ASSERT_OK_OR_RETURN(kperfCudaLimitCliSet(pDevice, pParams->bCudaLimit)); + + // Obtain current Cuda limit activation setting again. + NV_ASSERT_OK_OR_RETURN(kperfCudaLimitCliGet(pDevice, &bCudaLimitAfter)); + + // If the limit is changing. + if (bCudaLimitBefore != bCudaLimitAfter) + { + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pDevice), + RES_GET_HANDLE(pDevice), + NV0080_CTRL_CMD_INTERNAL_PERF_CUDA_LIMIT_SET_CONTROL, + pParams, + sizeof(*pParams)); + } + + return status; +} +/* ---------------------- Private Static Functions -------------------------- */ +/*! + * Get current Cuda limit activation setting. + * + * @param[in] pDevice Device object + * @param[out] pbCudaLimit Returns current Cuda limit setting + * + * @return NV_OK + * Operation completed successfully. + * @return NV_ERR_INVALID_POINTER + * OUT parameter is NULL. + */ +static NV_STATUS +kperfCudaLimitCliGet +( + Device *pDevice, + NvBool *pbCudaLimit +) +{ + NV_ASSERT_OR_RETURN((pbCudaLimit != NULL), NV_ERR_INVALID_POINTER); + + *pbCudaLimit = (pDevice->nCudaLimitRefCnt != 0); + return NV_OK; +} + +/*! + * Enable/Disable Cuda limit activation setting. + * + * @param[in] pDevice Device object + * @param[in] bCudaLimit Cuda limit setting + * + * @return NV_OK + * Operation completed successfully. + * @return NV_ERR_INVALID_REQUEST + * Otherwise + */ +static NV_STATUS +kperfCudaLimitCliSet +( + Device *pDevice, + NvBool bCudaLimit +) +{ + if (bCudaLimit) + { + pDevice->nCudaLimitRefCnt++; + } + else if (pDevice->nCudaLimitRefCnt > 0) + { + pDevice->nCudaLimitRefCnt--; + } + else + { + return NV_ERR_INVALID_REQUEST; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/perf/kern_perf.c b/src/nvidia/src/kernel/gpu/perf/kern_perf.c new file mode 100644 index 000000000..48922838c --- /dev/null +++ b/src/nvidia/src/kernel/gpu/perf/kern_perf.c @@ -0,0 +1,141 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************************************************************** +* +* Kernel Perf Module +* This file contains functions managing perf on CPU RM +* +****************************************************************************/ + +/* ------------------------ Includes --------------------------------------- */ +#include "gpu/perf/kern_perf.h" +#include "gpu/perf/kern_perf_1hz.h" +#include "gpu/perf/kern_perf_boost.h" +#include "objtmr.h" + +/* ------------------------ Global Variables ------------------------------- */ +/* ------------------------ Static Function Prototypes --------------------- */ +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Public Functions ------------------------------- */ +/*! + * @copydoc kperfConstructEngine + */ +NV_STATUS +kperfConstructEngine_IMPL(OBJGPU *pGpu, KernelPerf *pKernelPerf, ENGDESCRIPTOR engDesc) +{ + return NV_OK; +} + +/*! + * @copydoc kperfStateInitLocked + */ +NV_STATUS +kperfStateInitLocked_IMPL(OBJGPU *pGpu, KernelPerf *pKernelPerf) +{ + NV_STATUS status = NV_OK; + + // Initialize SW state corresponding to SLI GPU Boost synchronization. + status = kperfGpuBoostSyncStateInit(pGpu, pKernelPerf); + + return status; +} + +/*! + * @copydoc kperfStateLoad + */ +NV_STATUS +kperfStateLoad_IMPL(OBJGPU *pGpu, KernelPerf *pKernelPerf, NvU32 flags) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + pKernelPerf->timer1HzCallback.bEnableTimerUpdates = NV_TRUE; + + tmrScheduleCallbackRelSec(pTmr, kperfTimerProc, TMR_POBJECT_KERNEL_PERF_1HZ, 1, TMR_FLAG_RECUR, 0); + + // Initialize SW state corresponding to SLI GPU Boost synchronization. + kperfGpuBoostSyncStateInit(pGpu, pKernelPerf); + + return NV_OK; +} + +/*! + * @copydoc kperfStateUnload +*/ +NV_STATUS +kperfStateUnload_IMPL +( + OBJGPU *pGpu, + KernelPerf *pKernelPerf, + NvU32 flags +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + pKernelPerf->timer1HzCallback.bEnableTimerUpdates = NV_FALSE; + + tmrCancelCallback(pTmr, TMR_POBJECT_KERNEL_PERF_1HZ); + + return NV_OK; +} + +/*! + * @copydoc kperfReentrancy + * + * @note This is 1:1 copy from Physical RM to the Kernel RM + */ +NV_STATUS kperfReentrancy_IMPL(OBJGPU *pGpu, KernelPerf *pKernelPerf, NvU32 function, NvBool bSet) +{ + // This routine and all routines that this routine calls are forbidden. + NvU32 reentrancyMask = function | (function - 1); + + if (bSet) + { + // Catches routines that reentered. + reentrancyMask &= pKernelPerf->reentrancyMask; + pKernelPerf->reentrancyMask |= function; + } + else + { + // Catches routines that were not exited or were exited twice. + reentrancyMask &= (pKernelPerf->reentrancyMask ^ function); + pKernelPerf->reentrancyMask &= ~function; + } + + if (pKernelPerf->reentrancyMask & reentrancyMask) + { + NV_PRINTF(LEVEL_ERROR, + "Code reentered. function %02x, reentered %02x set %d\n", + function, reentrancyMask, bSet); + + // This assert causes failures in MODS Sanity PVS. + //NV_ASSERT(reentrancyMask, 0); // Log all reentrancy errors. + + return NV_ERR_INVALID_STATE; + } + + return NV_OK; +} + +/* ------------------------- Private Functions ------------------------------ */ + diff --git a/src/nvidia/src/kernel/gpu/perf/kern_perf_1hz.c b/src/nvidia/src/kernel/gpu/perf/kern_perf_1hz.c new file mode 100644 index 000000000..9e7788d1c --- /dev/null +++ b/src/nvidia/src/kernel/gpu/perf/kern_perf_1hz.c @@ -0,0 +1,84 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* ------------------------ Includes --------------------------------------- */ +#include "gpu/perf/kern_perf.h" +#include "gpu/perf/kern_perf_1hz.h" +#include "objtmr.h" + +/* ------------------------ Global Variables ------------------------------- */ +/* ------------------------ Static Function Prototypes --------------------- */ +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Public Functions ------------------------------- */ +/*! + * @copydoc kperfTimerProc + */ +NV_STATUS +kperfTimerProc(OBJGPU *pGpu, OBJTMR *pTmr, void *ptr) +{ + KernelPerf *pKernelPerf = GPU_GET_KERNEL_PERF(pGpu); + + // Request next callback. call with Flags = TMR_FLAG_RECUR, since self-rescheduling + if (ptr == TMR_POBJECT_KERNEL_PERF_1HZ) + { + pKernelPerf->timer1HzCallback.b1HzTimerCallback = NV_TRUE; + tmrScheduleCallbackRelSec(pTmr, kperfTimerProc, TMR_POBJECT_KERNEL_PERF_1HZ, 1, TMR_FLAG_RECUR, 0); + } + + kperfTimer1HzCallback(pGpu, pKernelPerf); + + return NV_OK; +} + +/*! + * @copydoc kperfTimer1HzCallback + */ +void +kperfTimer1HzCallback_IMPL(OBJGPU *pGpu, KernelPerf *pKernelPerf) +{ + NvBool bBcState = gpumgrGetBcEnabledStatus(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + // + // If GPU is in full power, timer updates are enabled and we are not breaking the + // reentrancy rules. + // + if ((gpuIsGpuFullPower(pGpu)) && + (pKernelPerf->timer1HzCallback.bEnableTimerUpdates) && + (kperfReentrancy(pGpu, pKernelPerf, KERNEL_PERF_REENTRANCY_TIMER_1HZ_CALLBACK, NV_TRUE) == NV_OK)) + { + // If this function is called as a result of a 1HZ callback, do the Boost Hint callback + if (pKernelPerf->timer1HzCallback.b1HzTimerCallback) + { + pKernelPerf->timer1HzCallback.b1HzTimerCallback = NV_FALSE; + } + + // Release the reentrancy flag for this specific routine + kperfReentrancy(pGpu, pKernelPerf, KERNEL_PERF_REENTRANCY_TIMER_1HZ_CALLBACK, NV_FALSE); + } + + gpumgrSetBcEnabledStatus(pGpu, bBcState); + +} + +/* ------------------------- Private Functions ------------------------------ */ diff --git a/src/nvidia/src/kernel/gpu/perf/kern_perf_boost.c b/src/nvidia/src/kernel/gpu/perf/kern_perf_boost.c new file mode 100644 index 000000000..38aae2449 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/perf/kern_perf_boost.c @@ -0,0 +1,91 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* ------------------------ Includes --------------------------------------- */ +#include "gpu/perf/kern_perf.h" +#include "gpu/perf/kern_perf_boost.h" +#include "rmapi/rmapi.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" +#include "resserv/rs_client.h" +#include "vgpu/rpc.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" + +/* ------------------------ Global Variables ------------------------------- */ +/* ------------------------ Static Function Prototypes --------------------- */ +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Public Functions ------------------------------- */ + +/*! + * @copydoc NV2080_CTRL_CMD_PERF_BOOST + */ +NV_STATUS +subdeviceCtrlCmdKPerfBoost_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_PERF_BOOST_PARAMS *pBoostParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelPerf *pKernelPerf = GPU_GET_KERNEL_PERF(pGpu); + NV_STATUS status = NV_OK; + + status = kperfBoostSet_HAL(pKernelPerf, pSubdevice, pBoostParams); + return status; +} + +/*! + * @copydoc kperfBoostSet + */ +NV_STATUS +kperfBoostSet_3x +( + KernelPerf *pKernelPerf, + Subdevice *pSubdevice, + NV2080_CTRL_PERF_BOOST_PARAMS *pBoostParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + NV2080_CTRL_INTERNAL_PERF_BOOST_SET_PARAMS_2X boostParams2x = {0}; + + boostParams2x.flags = pBoostParams->flags; + boostParams2x.duration = pBoostParams->duration; + + // + // This should always be GSP CLIENT. + // + NV_ASSERT(IS_GSP_CLIENT(pGpu)); + + NV_RM_RPC_CONTROL(pGpu, + RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_INTERNAL_PERF_BOOST_SET_2X, + &boostParams2x, + sizeof(boostParams2x), + status); + + return status; +} + + diff --git a/src/nvidia/src/kernel/gpu/perf/kern_perf_gpuboostsync.c b/src/nvidia/src/kernel/gpu/perf/kern_perf_gpuboostsync.c new file mode 100644 index 000000000..d3d69315b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/perf/kern_perf_gpuboostsync.c @@ -0,0 +1,416 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/perf/kern_perf.h" +#include "core/locks.h" +#include "vgpu/rpc.h" +#include "nvRmReg.h" + +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Public Class Interfaces ------------------------ */ +/*! + * Initialize SW state corresponding to SLI GPU Boost synchronization. + * + * @param[in] pGpu GPU object pointer + * @param[in] pKernelPerf KernelPerf object pointer + * + * @return NV_OK + */ +NV_STATUS +kperfGpuBoostSyncStateInit_IMPL +( + OBJGPU *pGpu, + KernelPerf *pKernelPerf +) +{ + NV_STATUS status = NV_OK; + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NvU32 i; + NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO_PARAMS ctrlParams = { 0 }; + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_GET_INFO, + &ctrlParams, + sizeof(ctrlParams)); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to read Sync Gpu Boost init state, status=0x%x\n", + status); + goto kperfGpuBoostSyncStateInit_IMPL_exit; + } + + pKernelPerf->sliGpuBoostSync.hysteresisus = ctrlParams.hysteresisus; + pKernelPerf->sliGpuBoostSync.bHystersisEnable = ctrlParams.bHystersisEnable; + pKernelPerf->sliGpuBoostSync.bSliGpuBoostSyncEnable = ctrlParams.bSliGpuBoostSyncEnable; + + // Initialize the GPU Boost synchronization limits. + for (i = 0; i < NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM; i++) + { + pKernelPerf->sliGpuBoostSync.limits[i] = NV_U32_MAX; + } + +kperfGpuBoostSyncStateInit_IMPL_exit: + return NV_OK; +} + +/*! + * @copydoc kperfGpuBoostSyncActivate + */ +NV_STATUS +kperfGpuBoostSyncActivate_IMPL +( + OBJGPU *pGpu, + KernelPerf *pKernelPerf, + NvBool bActivate +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status = NV_OK; + NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_CONTROL_PARAMS ctrlParams = { 0 }; + + ctrlParams.bActivate = bActivate; + + status = pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_CONTROL, + &ctrlParams, + sizeof(ctrlParams)); + + return status; +} + +/*! + * Processes all GPU Boost PERF_LIMITs and applies most restrictive of them for + * SLI GPU Boost synchronization. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pParams + * + * @return NV_OK + * Synchronized PERF_LIMITs successfully applied or removed. + * @return Other unexpected errors + * Unexpected errors propagated from other functions. + */ +NV_STATUS +kperfDoSyncGpuBoostLimits_IMPL +( + OBJGPU *pGpu, + KernelPerf *pKernelPerf, + NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPUBOOSTMGR *pBoostMgr = SYS_GET_GPUBOOSTMGR(pSys); + NV_STATUS status = NV_OK; + NvU64 currns = 0; + NvU64 diffns = 0; + NvBool bUpdate = NV_FALSE; + NvBool bBridgeless = NV_FALSE; + NV2080_CTRL_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS_PARAMS perfGpuBoostSyncParamsSet = { 0 }; + NvBool bBcState; + NvU32 grpId; + NvU32 i; + + bBcState = gpumgrGetBcEnabledStatus(pGpu); + + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + gpuboostmgrGetBoostGrpIdFromGpu(pBoostMgr, pGpu, &grpId); + + if (!gpuboostmgrIsBoostGrpActive(pBoostMgr, grpId)) + { + return status; + } + + for (i = 0; i < NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM; i++) + { + pKernelPerf->sliGpuBoostSync.limits[i] = pParams->currLimits[i]; + } + pKernelPerf->sliGpuBoostSync.bBridgeless = pParams->bBridgeless; + + portMemSet(perfGpuBoostSyncParamsSet.currLimits, NV_U8_MAX, sizeof(perfGpuBoostSyncParamsSet.currLimits)); + + // + // NOTE: + // One will see a pattern of forks in this file: + // if (boost group active) + // Gpu Boost Loop + // else + // SLI Loop + // This is a temporary change to introduce Sync Gpu boost Manager to the SLI Boost framework. + // The goal eventually is to replace SLI GPU Boost with Sync GPU Boost. + // WDDM KMD and UMDs needs to change for that + // + + if (gpuboostmgrIsBoostGrpActive(pBoostMgr, grpId)) + { + OBJGPU *pGpuItr = NULL; + + GPUBOOSTMGR_ITR_START(pBoostMgr, grpId, pGpuItr) + { + pKernelPerf = GPU_GET_KERNEL_PERF(pGpuItr); + + // Find min of all GPU Boost PERF_LIMITs across all the GPUs. + for (i = 0; i < NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM; i++) + { + perfGpuBoostSyncParamsSet.currLimits[i] = NV_MIN(perfGpuBoostSyncParamsSet.currLimits[i], pKernelPerf->sliGpuBoostSync.limits[i]); + } + + if (pKernelPerf->sliGpuBoostSync.bBridgeless) + { + bBridgeless = NV_TRUE; + } + + } + GPUBOOSTMGR_ITR_END + } + else + { + } + + // Enable hysteresis algorithm, if required. + if ((pKernelPerf != NULL) && + (pKernelPerf->sliGpuBoostSync.bHystersisEnable)) + { + // Get current tick. + osGetPerformanceCounter(&currns); + + for (i = 0; i < NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM; i++) + { + // If GPU Boost PERF_LIMITs are being lowered, immediately synchronize. + if (perfGpuBoostSyncParamsSet.currLimits[i] < pGpuMgr->sliGpuBoostSync.prevLimits[i]) + { + bUpdate = NV_TRUE; + } + else if (perfGpuBoostSyncParamsSet.currLimits[i] > pGpuMgr->sliGpuBoostSync.prevLimits[i]) + { + // Otherwise, synchronize only if specified time has been elapsed. + diffns = currns - pGpuMgr->sliGpuBoostSync.prevChangeTsns; + + if ((diffns / 1000) > pKernelPerf->sliGpuBoostSync.hysteresisus) + { + bUpdate = NV_TRUE; + } + } + } + + // Update previous history and apply SLI GPU Boost PERF_LIMITs. + if (bUpdate) + { + pGpuMgr->sliGpuBoostSync.prevChangeTsns = currns; + + for (i = 0; i < NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM; i++) + { + pGpuMgr->sliGpuBoostSync.prevLimits[i] = perfGpuBoostSyncParamsSet.currLimits[i]; + } + } + else + { + return status; + } + } + + perfGpuBoostSyncParamsSet.flags = pParams->flags; + perfGpuBoostSyncParamsSet.bBridgeless = bBridgeless; + + if (gpuboostmgrIsBoostGrpActive(pBoostMgr, grpId)) + { + OBJGPU *pGpuItr = NULL; + + GPUBOOSTMGR_ITR_START(pBoostMgr, grpId, pGpuItr) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + status = pRmApi->Control(pRmApi, + pGpuItr->hInternalClient, + pGpuItr->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_PERF_GPU_BOOST_SYNC_SET_LIMITS, + &perfGpuBoostSyncParamsSet, + sizeof(perfGpuBoostSyncParamsSet)); + } + GPUBOOSTMGR_ITR_END + } + else + { + } + + gpumgrSetBcEnabledStatus(pGpu, bBcState); + + return status; +} + +/*! + * Update the bridgeless info. + * + * @param[in] pGpu GPU object pointer + * @param[in] bBridgeless Latest bridgeless info that should be set + * + * @return NV_OK + */ +NV_STATUS +kPerfGpuBoostSyncBridgelessUpdateInfo +( + OBJGPU *pGpu, + NvBool bBridgeless +) +{ + KernelPerf *pKernelPerf = GPU_GET_KERNEL_PERF(pGpu); + + if (pKernelPerf != NULL) + { + pKernelPerf->sliGpuBoostSync.bBridgeless = bBridgeless; + } + + return NV_OK; +} + +/*! + * Helper routine to toggle the state of Sync Gpu Boost Algorithm using SGBG infrastructure + * @param[in] bActivate NV_TRUE if we want to turn the algorithm on, NV_FALSE otherwise + */ +NV_STATUS +kperfGpuBoostSyncStateUpdate +( + OBJGPUBOOSTMGR *pBoostMgr, + NvU32 boostGroupId, + NvBool bActivate +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + KernelPerf *pKernelPerf = NULL; + NV_STATUS status = NV_OK; + NvU32 i; + + // No need to activate again if refCount is greater than 1. + if (1 < pBoostMgr->pBoostGroups[boostGroupId].refCount) + { + NV_PRINTF(LEVEL_ERROR, + "Trying to activate and already active Sync GPU Boost Group = 0x%08x.\n", + boostGroupId); + DBG_BREAKPOINT(); + return status; + } + + // + // Trigger state change per GPU + // We need to acquire GPU locks here as this will tough GPU state. + // + + // LOCK: acquire GPUs lock + if (NV_OK == rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_GPU)) + { + if (NV_OK == osAcquireRmSema(pSys->pSema)) + { + OBJGPU *pGpuItr = NULL; + + GPUBOOSTMGR_ITR_START(pBoostMgr, boostGroupId, pGpuItr) + { + pKernelPerf = GPU_GET_KERNEL_PERF(pGpuItr); + + if ((pKernelPerf != NULL) && + (pKernelPerf->sliGpuBoostSync.bSliGpuBoostSyncEnable)) + { + status = kperfGpuBoostSyncActivate(pGpuItr, pKernelPerf, bActivate); + + if (status == NV_OK) + { + for (i = 0; i < NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM; i++) + { + pKernelPerf->sliGpuBoostSync.limits[i] = NV_U32_MAX; + } + } + } + else + { + status = NV_ERR_INVALID_STATE; + } + if (NV_OK != status) + { + OBJGPU *pGpuItr2 = NULL; + NV_PRINTF(LEVEL_ERROR, + "Failed to toggle Sync Gpu Boost state on Gpu 0x%08x\n", + pGpuItr->gpuId); + DBG_BREAKPOINT(); + + // Toggle back the Sync Gpu Boost state of all the GPUs so far + GPUBOOSTMGR_ITR_START(pBoostMgr, boostGroupId, pGpuItr2) + { + pKernelPerf = GPU_GET_KERNEL_PERF(pGpuItr2); + + if ((pKernelPerf != NULL) && + (pKernelPerf->sliGpuBoostSync.bSliGpuBoostSyncEnable)) + { + status = kperfGpuBoostSyncActivate(pGpuItr2, pKernelPerf, !bActivate); + + if (status == NV_OK) + { + for (i = 0; i < NV2080_CTRL_INTERNAL_PERF_SYNC_GPU_BOOST_LIMITS_NUM; i++) + { + pKernelPerf->sliGpuBoostSync.limits[i] = NV_U32_MAX; + } + } + } + // Intentionaly ignoring the status as we want to rollback the algorithm + // activation and return previously failing status + + if (pGpuItr == pGpuItr2) + { + // break from unwind/cleanup loop + break; + } + } + GPUBOOSTMGR_ITR_END + + // break from outer iterator + break; + } + } + GPUBOOSTMGR_ITR_END + } + else + { + NV_PRINTF(LEVEL_ERROR, "OS Semaphore acquire failed\n"); + status = NV_ERR_STATE_IN_USE; + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + goto kperfSliGpuBoostSyncStateUpdate_exit; + } + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + else + { + NV_PRINTF(LEVEL_ERROR, "GPU lock acquire failed\n"); + status = NV_ERR_STATE_IN_USE; + goto kperfSliGpuBoostSyncStateUpdate_exit; + } + +kperfSliGpuBoostSyncStateUpdate_exit: + return status; +} diff --git a/src/nvidia/src/kernel/gpu/perf/kern_perf_pm.c b/src/nvidia/src/kernel/gpu/perf/kern_perf_pm.c new file mode 100644 index 000000000..f0a1d9c8e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/perf/kern_perf_pm.c @@ -0,0 +1,278 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* ------------------------ Includes --------------------------------------- */ +#include "os/os.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "gpu/gpu.h" +#include "gpu/perf/kern_perf_pm.h" + +/* ------------------------ Global Variables ------------------------------- */ +/* ------------------------ Static Function Prototypes --------------------- */ +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Public Functions ------------------------------- */ +// +// kPerfPerfmonClientDeviceSet +// +// This routine attempts to acquire (or release) the PerfMon HW +// for all GPUs within the specified device. PM HW mgmt should +// really be a unicast operation but we have a BC interface +// (NV_CFG_RESERVE_PERFMON_HW) that we need to continue to support. +// +// The work here is broken up into two steps: +// +// (1) loop over all GPUs and check PM HW status +// (2) if (1) passes then acquire/release the PM HW in a second loop +// (3) release acquired PM HW if (2) fails +// +NV_STATUS +kPerfPerfmonClientDeviceSet +( + NvHandle hClient, + NvHandle hDevice, + NvBool bReservation, + NvBool bClientHandlesGrGating, + NvBool bRmHandlesIdleSlow +) +{ + RsResourceRef *pDeviceRef; + Subdevice *pSubdevice; + OBJGPU *pGpu; + RM_API *pRmApi; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + NV_STATUS status = NV_OK; + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + { + return NV_ERR_INVALID_CLIENT; + } + pRsClient = staticCast(pClient, RsClient); + + status = clientGetResourceRefByType(pRsClient, hDevice, classId(Device), &pDeviceRef); + if (status != NV_OK) + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // + // Are we attempting to make a reservation or release one? + // + if (bReservation) + { + // + // The Perfmon HW must be *available* on all GPUs for this + // to work so check that first. + // + it = clientRefIter(pRsClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pRsClient, &it)) + { + pSubdevice = dynamicCast(it.pResourceRef->pResource, Subdevice); + pGpu = gpumgrGetGpuFromSubDeviceInst(pSubdevice->deviceInst, pSubdevice->subDeviceInst); + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS params = {0}; + params.bReservation = bReservation; + + status = pRmApi->Control(pRmApi, + hClient, + it.pResourceRef->hResource, + NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK, + ¶ms, + sizeof(params)); + if (status != NV_OK) + { + goto kClientPerfPerfmonDeviceSet_exit; + } + } + + // + // All available...now claim it using the subdevice interface. + // + NvHandle failedReservationHandle = 0; + it = clientRefIter(pRsClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pRsClient, &it)) + { + pSubdevice = dynamicCast(it.pResourceRef->pResource, Subdevice); + pGpu = gpumgrGetGpuFromSubDeviceInst(pSubdevice->deviceInst, pSubdevice->subDeviceInst); + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS params = {0}; + params.bReservation = bReservation; + params.bClientHandlesGrGating = bClientHandlesGrGating; + params.bRmHandlesIdleSlow = bRmHandlesIdleSlow; + + status = pRmApi->Control(pRmApi, + hClient, + it.pResourceRef->hResource, + NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET, + ¶ms, + sizeof(params)); + // + // If the reservation did not succeed we should release all acquired reservations. + // This should never happen as the check shpould be always called before reservation. + // + if (status != NV_OK) + { + failedReservationHandle = it.pResourceRef->hResource; + break; + } + } + + if (status != NV_OK) + { + it = clientRefIter(pRsClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pRsClient, &it)) + { + if (failedReservationHandle == it.pResourceRef->hResource) + { + goto kClientPerfPerfmonDeviceSet_exit; + } + + pSubdevice = dynamicCast(it.pResourceRef->pResource, Subdevice); + pGpu = gpumgrGetGpuFromSubDeviceInst(pSubdevice->deviceInst, pSubdevice->subDeviceInst); + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS params = {0}; + params.bReservation = NV_FALSE; + params.bClientHandlesGrGating = bClientHandlesGrGating; + params.bRmHandlesIdleSlow = bRmHandlesIdleSlow; + // + // Ignoring the status here intentionaly since no mater if this release failed, + // we want to try and release perfmon HW for all the GPUs that acquired it. + // + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, + LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + it.pResourceRef->hResource, + NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET, + ¶ms, + sizeof(params))); + } + } + } + else + { + // + // The Perfmon HW must be *reserved* on all GPUs for this to work + // so check that first. + // + it = clientRefIter(pRsClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pRsClient, &it)) + { + pSubdevice = dynamicCast(it.pResourceRef->pResource, Subdevice); + pGpu = gpumgrGetGpuFromSubDeviceInst(pSubdevice->deviceInst, pSubdevice->subDeviceInst); + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK_PARAMS params = {0}; + params.bReservation = bReservation; + + status = pRmApi->Control(pRmApi, + hClient, + it.pResourceRef->hResource, + NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_CHECK, + ¶ms, + sizeof(params)); + // + // bailing here under the assumption that we acquired all or none + // + if (status != NV_OK) + { + goto kClientPerfPerfmonDeviceSet_exit; + } + } + + it = clientRefIter(pRsClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pRsClient, &it)) + { + pSubdevice = dynamicCast(it.pResourceRef->pResource, Subdevice); + pGpu = gpumgrGetGpuFromSubDeviceInst(pSubdevice->deviceInst, pSubdevice->subDeviceInst); + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV2080_CTRL_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET_PARAMS params = {0}; + params.bReservation = bReservation; + params.bClientHandlesGrGating = bClientHandlesGrGating; + params.bRmHandlesIdleSlow = bRmHandlesIdleSlow; + + NV_CHECK_OK_OR_CAPTURE_FIRST_ERROR(status, + LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + it.pResourceRef->hResource, + NV2080_CTRL_CMD_INTERNAL_PERF_PERFMON_CLIENT_RESERVATION_SET, + ¶ms, + sizeof(params))); + } + } + +kClientPerfPerfmonDeviceSet_exit: + return status; +} + +/*! + * @brief Reserves HW Performance Monitoring capabilities for clients. + * + * This command reserves HW Performance Monitoring capabilities for exclusive + * use by the requester. + * + * The function cannot use "ROUTE_TO_PHYSICAL" directly since the privilege + * checks cannot be skipped on CPU RM. + * + * @param[in] pSubdevice + * @param[in,out] pPerfmonParams + * + * @returns NV_OK if the HW Performance monitoring capabilities is sucessfully + * acquired by the client. + */ +NV_STATUS +subdeviceCtrlCmdPerfReservePerfmonHw_KERNEL +( + Subdevice *pSubdevice, + NV2080_CTRL_PERF_RESERVE_PERFMON_HW_PARAMS *pPerfmonParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_STATUS status; + + if (gpuIsRmProfilingPrivileged(pGpu) && !osIsAdministrator()) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Redirect to Physical RM. + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_PERF_RESERVE_PERFMON_HW, + pPerfmonParams, + sizeof(*pPerfmonParams)); + + return status; +} + +/* ------------------------- Private Functions ------------------------------ */ diff --git a/src/nvidia/src/kernel/gpu/perf/kern_perf_pwr.c b/src/nvidia/src/kernel/gpu/perf/kern_perf_pwr.c new file mode 100644 index 000000000..24f0b81f2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/perf/kern_perf_pwr.c @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* ------------------------ Includes --------------------------------------- */ +#include "os/os.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "vgpu/rpc.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "gpu/gpu.h" + +/* ------------------------ Global Variables ------------------------------- */ +/* ------------------------ Static Function Prototypes --------------------- */ +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Public Functions ------------------------------- */ +/*! + * @brief Retrieving the current requested RATED_TDP action corresponding + * to the specified client. + * + * @params[in] pSubdevice + * @params[in/out] pControlParams + * + * @return NV_OK + * Client control action successfuly applied. + * @return NV_ERR_NOT_SUPPORTED + * RATED_TDP functionality not supported on this GPU. + * @reutrn NV_ERR_INVALID_ARGUMENT + * Invalid client specified. + */ +NV_STATUS +subdeviceCtrlCmdPerfRatedTdpSetControl_KERNEL +( + Subdevice *pSubdevice, + NV2080_CTRL_PERF_RATED_TDP_CONTROL_PARAMS *pControlParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NV_STATUS status = NV_OK; + + NvBool bSmcEnabled = IS_MIG_ENABLED(pGpu); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + // + // With SMC enabled, the clock controls can only be modified by a priv client + // This is to ensure that clients running under a SMC partition do not impact + // other clients running on different partitions + // + if (bSmcEnabled && !rmclientIsAdminByHandle(hClient, pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_ERROR, + "Non-Privileged clients are not allowed to access clock controls with SMC enabled.\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // + // Redirect to Physical RM in case of the GSP CLIENT or + // host RM in case of the vGPU + // + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + + return status; +} + +/* ------------------------- Private Functions ------------------------------ */ diff --git a/src/nvidia/src/kernel/gpu/perf/kern_perfbuffer.c b/src/nvidia/src/kernel/gpu/perf/kern_perfbuffer.c new file mode 100644 index 000000000..491572e5b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/perf/kern_perfbuffer.c @@ -0,0 +1,85 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @brief All functions are related to the Perf Boost Hint feature. + */ +/* ------------------------ Includes --------------------------------------- */ +#include "os/os.h" +#include "gpu/gpu.h" +#include "rmapi/rmapi.h" +#include "resserv/rs_client.h" +#include "vgpu/rpc.h" +#include "gpu/perf/kern_perfbuffer.h" + +/* ------------------------ Global Variables ------------------------------- */ +/* ------------------------ Static Function Prototypes --------------------- */ +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Public Functions ------------------------------- */ +/*! + * @copydoc perfbufferConstructHal + */ +NV_STATUS +perfbufferConstructHal_KERNEL +( + PerfBuffer *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pResource); + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvHandle hMemory = pCallContext->pResourceRef->hResource; + NvU32 class = pResourceRef->externalClassId; + NV_STATUS status = NV_OK; + + NV_CHECK_OK_OR_RETURN(LEVEL_NOTICE, perfbufferPrivilegeCheck(pResource)); + + NV_RM_RPC_ALLOC_OBJECT(pGpu, hClient, hParent, hMemory, class, + pParams->pAllocParams, status); + + return status; +} + +/*! + * @copydoc perfbufferPrivilegeCheck + */ +NV_STATUS +perfbufferPrivilegeCheck_IMPL +( + PerfBuffer *pResource +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pResource); + + if (gpuIsRmProfilingPrivileged(pGpu) && !osIsAdministrator()) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} + +/* ------------------------- Private Functions ------------------------------ */ diff --git a/src/nvidia/src/kernel/gpu/pmu/kern_pmu.c b/src/nvidia/src/kernel/gpu/pmu/kern_pmu.c new file mode 100644 index 000000000..a8c4a09e9 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/pmu/kern_pmu.c @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************************************************************** +* +* Kernel Pmu Module +* This file contains functions managing PMU core on CPU RM +* +****************************************************************************/ + +#include "gpu/pmu/kern_pmu.h" + +NV_STATUS +kpmuConstructEngine_IMPL(OBJGPU *pGpu, KernelPmu *pKernelPmu, ENGDESCRIPTOR engDesc) +{ + + return kpmuInitLibosLoggingStructures(pGpu, pKernelPmu); +} + +void +kpmuDestruct_IMPL(KernelPmu *pKernelPmu) +{ + OBJGPU *pGpu = ENG_GET_GPU(pKernelPmu); + + kpmuFreeLibosLoggingStructures(pGpu, pKernelPmu); +} + +/*! + * Init libos PMU logging + */ +NV_STATUS +kpmuInitLibosLoggingStructures_IMPL +( + OBJGPU *pGpu, + KernelPmu *pKernelPmu +) +{ + NV_STATUS nvStatus = NV_OK; + + if (!IS_GSP_CLIENT(pGpu)) + return NV_OK; + + NV_ASSERT_OR_RETURN(pKernelPmu->pPrintBuf == NULL, NV_ERR_INVALID_STATE); + + pKernelPmu->pLogElf = NULL; + + // Allocate print buffer + pKernelPmu->printBufSize = PMU_LOG_BUFFER_MAX_SIZE; + pKernelPmu->pPrintBuf = portMemAllocNonPaged(pKernelPmu->printBufSize); + + // Create PMU log + libosLogCreateEx(&pKernelPmu->logDecode, "PMU"); + + // Add PMU log buffer (use a fake "task name" - NVRISCV) + libosLogAddLogEx(&pKernelPmu->logDecode, pKernelPmu->pPrintBuf, pKernelPmu->printBufSize, + pGpu->gpuInstance, (gpuGetChipArch(pGpu) >> GPU_ARCH_SHIFT), gpuGetChipImpl(pGpu), + "NVRISCV"); + + // Finish PMU log init (setting the lossless-print flag and resolve-pointers flag) + libosLogInitEx(&pKernelPmu->logDecode, pKernelPmu->pLogElf, NV_TRUE, NV_TRUE); + + if (nvStatus != NV_OK) + kpmuFreeLibosLoggingStructures(pGpu, pKernelPmu); + return nvStatus; +} + +/*! + * Free libos PMU logging + */ +void +kpmuFreeLibosLoggingStructures_IMPL +( + OBJGPU *pGpu, + KernelPmu *pKernelPmu +) +{ + if (!IS_GSP_CLIENT(pGpu)) + return; + + // Destroy PMU log + libosLogDestroy(&pKernelPmu->logDecode); + portMemFree(pKernelPmu->pPrintBuf); + pKernelPmu->pPrintBuf = NULL; + portMemFree(pKernelPmu->pLogElf); + pKernelPmu->pLogElf = NULL; +} + +/*! + * Log a PMU libos log buffer coming from GSP-RM + */ +void +kpmuLogBuf_IMPL +( + OBJGPU *pGpu, + KernelPmu *pKernelPmu, + NvU8 *pBuf, + NvU32 bufSize +) +{ + NV_ASSERT_OR_RETURN_VOID(pKernelPmu->pPrintBuf != NULL); + + portMemCopy(pKernelPmu->pPrintBuf, pKernelPmu->printBufSize, + pBuf, bufSize); + libosExtractLogs(&pKernelPmu->logDecode, NV_FALSE); +} diff --git a/src/nvidia/src/kernel/gpu/rc/kernel_rc.c b/src/nvidia/src/kernel/gpu/rc/kernel_rc.c new file mode 100644 index 000000000..2879abc13 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/rc/kernel_rc.c @@ -0,0 +1,502 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/rc/kernel_rc.h" + +#include "kernel/core/system.h" +#include "kernel/gpu/bif/kernel_bif.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/os/os.h" +#include "kernel/platform/chipset/chipset.h" +#include "kernel/rmapi/client.h" + + +#include "libraries/utils/nvprintf.h" +#include "nvRmReg.h" +#include "nverror.h" +#include "nvtypes.h" +#include "objtmr.h" + + +static void _krcInitRegistryOverrides(OBJGPU *pGpu, KernelRc *pKernelRc); +static void _krcLogUuidOnce(OBJGPU *pGpu, KernelRc *pKernelRc); + + +NV_STATUS +krcConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc, + ENGDESCRIPTOR engDescriptor +) +{ + _krcInitRegistryOverrides(pGpu, pKernelRc); + + return NV_OK; +} + + +void +krcInitRegistryOverridesDelayed_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + NvU32 dword = 0; + (void) dword; + + + dword = 0; + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_ROBUST_CHANNELS, &dword) != + NV_OK) + { +#if RMCFG_FEATURE_PLATFORM_WINDOWS || RMCFG_FEATURE_PLATFORM_GSP || \ + RMCFG_FEATURE_PLATFORM_UNIX + dword = NV_REG_STR_RM_ROBUST_CHANNELS_ENABLE; +#else +#error "unrecognized platform" +#endif + } + pKernelRc->bRobustChannelsEnabled = (dword == + NV_REG_STR_RM_ROBUST_CHANNELS_ENABLE); + + + dword = 0; + // + // Force uncached pushbuffers for robust channel. + // + // We used to allocate the recovery channel as uncached, which is achieved + // by allocating physically contiguous memory then remap that uncached. + // However, this caused allocations issues in cases which shares a channel + // with the robust channel, and ended up requesting sizeof(RC + pushbuffer) + // of contiguous memory (bug 73669). + // + // We therefore switched to cached allocations, with a few exceptions where + // an uncached pushbuffer is still needed: + // - When the system does not support CPU cache snooping (bugs 292461 and + // 976485). + // + if ((osReadRegistryDword(pGpu, + NV_REG_STR_USE_UNCACHED_PCI_MAPPINGS, + &dword) == NV_OK && + dword != 0) || + ((pKernelBif != NULL) && + !kbifIsSnoopDmaCapable(pGpu, pKernelBif))) + { + pKernelRc->watchdog.flags |= WATCHDOG_FLAGS_ALLOC_UNCACHED_PCI; + } +} + + +static void +_krcInitRegistryOverrides +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + NvU32 dword = 0; + (void) dword; + + dword = 0; + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_BREAK_ON_RC, &dword) != NV_OK) + { + dword = NV_REG_STR_RM_BREAK_ON_RC_DEFAULT; + } + + pKernelRc->bBreakOnRc = (dword == NV_REG_STR_RM_BREAK_ON_RC_ENABLE); + + // Allow driver registry key RmBreak to override Device Key + if (DRF_VAL(_DEBUG, _BREAK_FLAGS, _RC, SYS_GET_INSTANCE()->debugFlags) == + NV_DEBUG_BREAK_FLAGS_RC_ENABLE) + { + pKernelRc->bBreakOnRc = NV_TRUE; + } + + if (pKernelRc->bBreakOnRc) + { + NV_PRINTF(LEVEL_INFO, "Breakpoint on RC Error is enabled\n"); + } + else + { + NV_PRINTF(LEVEL_INFO, "Breakpoint on RC Error is disabled\n"); + } + + + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_WATCHDOG_TIMEOUT, + &pKernelRc->watchdogPersistent.timeoutSecs) != + NV_OK || + pKernelRc->watchdogPersistent.timeoutSecs == 0) + { + pKernelRc->watchdogPersistent.timeoutSecs = + NV_REG_STR_RM_WATCHDOG_TIMEOUT_DEFAULT; + } + if (osReadRegistryDword(pGpu, + NV_REG_STR_RM_WATCHDOG_INTERVAL, + &pKernelRc->watchdogPersistent.intervalSecs) != + NV_OK || + pKernelRc->watchdogPersistent.intervalSecs == 0) + { + pKernelRc->watchdogPersistent.intervalSecs = + NV_REG_STR_RM_WATCHDOG_INTERVAL_DEFAULT; + } + + if (pKernelRc->watchdogPersistent.intervalSecs > + pKernelRc->watchdogPersistent.timeoutSecs) + { + pKernelRc->watchdogPersistent.intervalSecs = + pKernelRc->watchdogPersistent.timeoutSecs; + } + + + dword = 0; + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_RC_WATCHDOG, &dword) == NV_OK) + { + if (dword == NV_REG_STR_RM_RC_WATCHDOG_DISABLE) + { + pKernelRc->watchdog.flags |= WATCHDOG_FLAGS_DISABLED; + } + } + else if (IS_GSP_CLIENT(pGpu) || IS_EMULATION(pGpu) || IS_SIMULATION(pGpu)) + { + // GSPTODO: need to sort out RC watchdog for GSP + pKernelRc->watchdog.flags |= WATCHDOG_FLAGS_DISABLED; + } + + + dword = 0; + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_DO_LOG_RC_EVENTS, &dword) == + NV_OK) + { + pKernelRc->bLogEvents = (dword == NV_REG_STR_RM_DO_LOG_RC_ENABLE); + if (pKernelRc->bLogEvents) + { + NV_PRINTF(LEVEL_INFO, "RC Error Logging is enabled\n"); +#if defined(DEBUG) + // Don't print out the initialization log on a retail build + osErrorLog(pGpu, ROBUST_CHANNEL_RC_LOGGING_ENABLED, ""); +#endif + } + } +} + + +static void +_krcLogUuidOnce +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + if (!pKernelRc->bGpuUuidLoggedOnce) + { + NvU8 *gidString = NULL; + NvU32 gidStrlen; + + if (gpuGetGidInfo(pGpu, + &gidString, + &gidStrlen, + (DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _ASCII) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1))) == + NV_OK) + { + portDbgPrintf("NVRM: GPU at PCI:%04x:%02x:%02x: %s\n", + gpuGetDomain(pGpu), + gpuGetBus(pGpu), + gpuGetDevice(pGpu), + gidString); + portMemFree(gidString); + } + + if (pGpu->boardInfo != NULL && pGpu->boardInfo->serialNumber[0] != '\0') + { + portDbgPrintf("NVRM: GPU Board Serial Number: %s\n", + pGpu->boardInfo->serialNumber); + } + + pKernelRc->bGpuUuidLoggedOnce = NV_TRUE; + } +} + + +void +krcGetMigAttributionForError_KERNEL +( + KernelRc *pKernelRc, + NvU32 exceptType, + NvU16 *pGpuPartitionId, + NvU16 *pComputeInstanceId +) +{ + if (pGpuPartitionId != NULL) + { + *pGpuPartitionId = KMIGMGR_INSTANCE_ATTRIBUTION_ID_INVALID; + } + if (pComputeInstanceId != NULL) + { + *pComputeInstanceId = KMIGMGR_INSTANCE_ATTRIBUTION_ID_INVALID; + } +} + + +void +krcReportXid_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc, + NvU32 exceptType, + const char *pMsg +) +{ + // + // Log the RC error to the OS + // + // Enforce the policy of gating the log output by "RmLogonRC" regkey. + // Some of our callers do not abide by this rule. + // That is how they want it under Windows. + // + if (GPU_GET_KERNEL_RC(pGpu)->bLogEvents) + { + NvU16 gpuPartitionId; + NvU16 computeInstanceId; + KernelChannel *pKernelChannel = krcGetChannelInError(pKernelRc); + + // Channels are populated with osGetCurrentProcessName() and pid of + // their process at creation-time. If no channel was found, mark unknown + const char *procname = ""; + char pid_string[12] = "''"; + + // + // Get PID of channel creator if available, otherwise default to + // whatever the kernel can tell us at the moment (likely pid 0) + // + if (pKernelChannel != NULL) + { + RsClient *pClient = RES_GET_CLIENT(pKernelChannel); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + procname = pRmClient->name; + nvDbgSnprintf(pid_string, sizeof(pid_string), "%u", pKernelChannel->ProcessID); + } + + _krcLogUuidOnce(pGpu, pKernelRc); + + krcGetMigAttributionForError_HAL(pKernelRc, + exceptType, + &gpuPartitionId, + &computeInstanceId); + + if (gpuPartitionId != KMIGMGR_INSTANCE_ATTRIBUTION_ID_INVALID && + computeInstanceId != KMIGMGR_INSTANCE_ATTRIBUTION_ID_INVALID) + { + // Attribute this XID to both GPU / Compute instance + portDbgPrintf( + "NVRM: Xid (PCI:%04x:%02x:%02x GPU-I:%02u GPU-CI:%02u): %d, pid=%s, name=%s, %s\n", + gpuGetDomain(pGpu), gpuGetBus(pGpu), gpuGetDevice(pGpu), + gpuPartitionId, computeInstanceId, + exceptType, + pid_string, + procname, + pMsg != NULL ? pMsg : ""); + } + else if (gpuPartitionId != KMIGMGR_INSTANCE_ATTRIBUTION_ID_INVALID) + { + // Attribute this XID to GPU instance only + portDbgPrintf( + "NVRM: Xid (PCI:%04x:%02x:%02x GPU-I:%02u): %d, pid=%s, name=%s, %s\n", + gpuGetDomain(pGpu), gpuGetBus(pGpu), gpuGetDevice(pGpu), + gpuPartitionId, + exceptType, + pid_string, + procname, + pMsg != NULL ? pMsg : ""); + } + else + { + // Legacy (no attribution) XID reporting + portDbgPrintf("NVRM: Xid (PCI:%04x:%02x:%02x): %d, pid=%s, name=%s, %s\n", + gpuGetDomain(pGpu), gpuGetBus(pGpu), gpuGetDevice(pGpu), + exceptType, + pid_string, + procname, + pMsg != NULL ? pMsg : ""); + } + } +} + + +NvBool +krcTestAllowAlloc_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc, + NvU32 failMask +) +{ + if (pKernelRc->bRobustChannelsEnabled && + (pKernelRc->watchdog.allocFailMask & failMask)) + { + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NvU64 time; + NV_STATUS status = tmrGetCurrentTime(pTmr, &time); + + // + // randomly fail this alloc based on NV timer + // assuming here that we don't get allocations within 128ns of each + // other + // + if (status == NV_OK && ((time & 0xff) > (0xffu / 2))) + return NV_FALSE; + } + + return NV_TRUE; +} + + +NV_STATUS +krcCheckBusError_KERNEL +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + OBJCL *pCl = SYS_GET_CL(SYS_GET_INSTANCE()); + NvU32 clDevCtrlStatusFlags = 0; + NvU32 clDevCtrlStatusFlags_Org = 0; + NvU32 clDevCtrlStatus = 0; + PcieAerCapability clAer; + + + // PCI-E provides extended error reporting + if (pKernelBif == NULL || kbifGetBusIntfType_HAL(pKernelBif) != + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) + { + return NV_OK; + } + + // Clear PCIe dev ctrl/status errors and AER errors + kbifClearConfigErrors(pGpu, pKernelBif, NV_TRUE, + KBIF_CLEAR_XVE_AER_ALL_MASK); + + // Corelogic device control status + if (pCl != NULL && + clPcieReadDevCtrlStatus(pGpu, pCl, + &clDevCtrlStatusFlags, + &clDevCtrlStatus) == NV_OK && + clDevCtrlStatusFlags != 0) + { + NV_PRINTF(LEVEL_ERROR, + "PCI-E corelogic status has pending errors (CL_PCIE_DEV_CTRL_STATUS = %08X):\n", + clDevCtrlStatus); + + clDevCtrlStatusFlags_Org = clDevCtrlStatusFlags; + + if (clDevCtrlStatusFlags & + NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR) + { + NV_PRINTF(LEVEL_ERROR, " _CORR_ERROR_DETECTED\n"); + // not much interested in this one + clDevCtrlStatusFlags &= + ~NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR; + } + if (clDevCtrlStatusFlags & + NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR) + { + NV_PRINTF(LEVEL_ERROR, " _NON_FATAL_ERROR_DETECTED\n"); + } + if (clDevCtrlStatusFlags & + NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR) + { + NV_PRINTF(LEVEL_ERROR, " _FATAL_ERROR_DETECTED\n"); + } + if (clDevCtrlStatusFlags & + NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST) + { + NV_PRINTF(LEVEL_ERROR, " _UNSUPP_REQUEST_DETECTED\n"); + } + } + + // Corelogic AER + if (pCl != NULL && clPcieReadAerCapability(pGpu, pCl, &clAer) == NV_OK && + (clAer.UncorrErrStatusReg != 0 || clAer.RooErrStatus != 0)) + { + NV_PRINTF(LEVEL_ERROR, + "PCE-I Advanced Error Reporting Corelogic Info:\n"); + NV_PRINTF(LEVEL_ERROR, + " Uncorr Error Status Register : %08X\n", + clAer.UncorrErrStatusReg); + NV_PRINTF(LEVEL_ERROR, + " Uncorr Error Mask Register : %08X\n", + clAer.UncorrErrMaskReg); + NV_PRINTF(LEVEL_ERROR, + " Uncorr Error Severity Register : %08X\n", + clAer.UncorrErrSeverityReg); + NV_PRINTF(LEVEL_ERROR, + " Corr Error Status Register : %08X\n", + clAer.CorrErrStatusReg); + NV_PRINTF(LEVEL_ERROR, + " Corr Error Mask Register : %08X\n", + clAer.CorrErrMaskReg); + NV_PRINTF(LEVEL_ERROR, + " Advanced Err Cap & Ctrl Register: %08X\n", + clAer.AEcapCrtlReg); + NV_PRINTF(LEVEL_ERROR, + " Header Log [0-3] : %08X\n", + clAer.HeaderLogReg.Header[0]); + NV_PRINTF(LEVEL_ERROR, + " Header Log [4-7] : %08X\n", + clAer.HeaderLogReg.Header[1]); + NV_PRINTF(LEVEL_ERROR, + " Header Log [8-B] : %08X\n", + clAer.HeaderLogReg.Header[2]); + NV_PRINTF(LEVEL_ERROR, + " Header Log [C-F] : %08X\n", + clAer.HeaderLogReg.Header[3]); + NV_PRINTF(LEVEL_ERROR, + " Root Error Command Register : %08X\n", + clAer.RootErrCmd); + NV_PRINTF(LEVEL_ERROR, + " Root Error Status : %08X\n", + clAer.RooErrStatus); + NV_PRINTF(LEVEL_ERROR, + " Error Source ID Register : %08X\n", + clAer.ErrSrcReg); + + // + // if you hit this case with some AER errors reported please refer to + // PCI-E manual for detailed bits spec + // TODO: add details bits here + // + } + + if (clDevCtrlStatusFlags_Org) + { + // clear the corelogic status after we had a chance to examine it + clPcieClearDevCtrlStatus(pGpu, pCl, &clDevCtrlStatus); + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/rc/kernel_rc_callback.c b/src/nvidia/src/kernel/gpu/rc/kernel_rc_callback.c new file mode 100644 index 000000000..5826691b3 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/rc/kernel_rc_callback.c @@ -0,0 +1,400 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/rc/kernel_rc.h" + +#include "kernel/core/locks.h" +#include "kernel/diagnostics/journal.h" +#include "kernel/gpu/device/device.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/gpu/mmu/kern_gmmu.h" +#include "kernel/os/os.h" +#include "rmapi/client.h" + +#include "ctrl/ctrl506f.h" + +#include "libraries/utils/nvprintf.h" +#include "nverror.h" +#include "nvtypes.h" +#include "objtmr.h" +#include "vgpu/rpc.h" + + +static NV_STATUS +_vgpuRcResetCallback +( + NvHandle hClient, + NvHandle hDevice, + NvHandle hChannel, + RC_ERROR_CONTEXT *pRcErrorContext +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = NV_OK; + + if (osCondAcquireRmSema(pSys->pSema) == NV_OK) + { + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_COND_ACQUIRE, + RM_LOCK_MODULES_RC) == NV_OK) + { + THREAD_STATE_NODE threadState; + NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS params = {0}; + + threadStateInitISRAndDeferredIntHandler( + &threadState, + pRcErrorContext->pGpu, + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + params.engineID = pRcErrorContext->EngineId; + params.exceptType = pRcErrorContext->exceptType; + + NV_RM_RPC_CONTROL(pRcErrorContext->pGpu, + hClient, + hChannel, + NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL, + ¶ms, + sizeof params, + status); + + threadStateFreeISRAndDeferredIntHandler( + &threadState, + pRcErrorContext->pGpu, + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + portMemFree(pRcErrorContext); + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + else + { + status = NV_ERR_STATE_IN_USE; + } + osReleaseRmSema(pSys->pSema, NULL); + } + else + { + status = NV_ERR_STATE_IN_USE; + } + + return status; +} + + +// +// krcResetCallback is called by both LDDM and MODS +// When adding more function parameters make sure to use datatypes that are +// defined in nvtypes.h and also update the RC_RESET_CALLBACK typedef in +// sdk/nvidia/inc/rmcd.h +// +NvU32 +krcResetCallback +( + NvHandle hClient, + NvHandle hDevice, + NvHandle hFifo, + NvHandle hChannel, + void *pContext, + NvBool bClearRc +) +{ + THREAD_STATE_NODE threadState; + RC_ERROR_CONTEXT *pRcErrorContext = (RC_ERROR_CONTEXT *)pContext; + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = NV_ERR_GENERIC; + + if (pRcErrorContext != NULL) + { + if (bClearRc) + { + // + // This is an error condition encountered where the caller + // wants to free the RC allocated data structure and nothing + // else. Currently, only called by the KMD when a TDR occurs + // and there are pending RCs that needs to be cancelled. + // + portMemFree(pRcErrorContext); + status = NV_OK; + } + else if (IS_VIRTUAL(pRcErrorContext->pGpu)) + { + status = _vgpuRcResetCallback(hClient, + hDevice, + hChannel, + pRcErrorContext); + } + else if (osCondAcquireRmSema(pSys->pSema) == NV_OK) + { + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_RC) == + NV_OK) + { + KernelChannel *pKernelChannel = NULL; + + threadStateInitISRAndDeferredIntHandler( + &threadState, + pRcErrorContext->pGpu, + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + NV_ASSERT_OK_OR_GOTO( + status, + serverGetClientUnderLock(&g_resServ, hClient, NULL), + error_cleanup); + NV_ASSERT_OK_OR_GOTO( + status, + CliGetKernelChannel(hClient, hChannel, &pKernelChannel), + error_cleanup); + + NV_ASSERT_OR_ELSE(pKernelChannel != NULL, + status = NV_ERR_INVALID_STATE; + goto error_cleanup); + + if (IS_GSP_CLIENT(pRcErrorContext->pGpu)) + { + NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL_PARAMS params = {0}; + NV_RM_RPC_CONTROL(pRcErrorContext->pGpu, + RES_GET_CLIENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel), + NV506F_CTRL_CMD_RESET_ISOLATED_CHANNEL, + ¶ms, + sizeof params, + status); + } + + threadStateFreeISRAndDeferredIntHandler( + &threadState, + pRcErrorContext->pGpu, + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + portMemFree(pRcErrorContext); + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + else + { + status = NV_ERR_STATE_IN_USE; + } + osReleaseRmSema(pSys->pSema, NULL); + } + else + { + status = NV_ERR_STATE_IN_USE; + } + } + else + { + // If no context then just skip.... + NV_PRINTF(LEVEL_ERROR, "-- No context skipping reset of channel...\n"); + status = NV_OK; + } + + return status; + +error_cleanup: + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + osReleaseRmSema(pSys->pSema, NULL); + return status; +} + + +NvBool +krcErrorInvokeCallback_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc, + KernelChannel *pKernelChannel, + FIFO_MMU_EXCEPTION_DATA *pMmuExceptionData, + NvU32 exceptType, + NvU32 exceptLevel, + NvU32 engineId, + NvU32 rcDiagRecStart +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + OBJOS *pOS = SYS_GET_OS(pSys); + KernelMIGManager *pKernelMigManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + RmClient *pClient = NULL; + RsClient *pRsClient; + RC_CALLBACK_STATUS clientAction; + NvU32 localEngineId = engineId; + NvU32 rcDiagRecOwner = RCDB_RCDIAG_DEFAULT_OWNER; + NV_STATUS status; + NvBool bReturn = NV_TRUE; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), bReturn); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pKernelChannel != NULL, bReturn); + + status = serverGetClientUnderLock(&g_resServ, + RES_GET_CLIENT_HANDLE(pKernelChannel), + &pRsClient); + if (status != NV_OK) + return bReturn; + + pClient = dynamicCast(pRsClient, RmClient); + if (pClient == NULL) + return bReturn; + + // + // If SMC is enabled, RM need to notify partition local engineIds. + // Convert global ID to partition local + // + if (IS_MIG_IN_USE(pGpu) && NV2080_ENGINE_TYPE_IS_VALID(engineId) && + kmigmgrIsEnginePartitionable(pGpu, pKernelMigManager, engineId)) + { + MIG_INSTANCE_REF ref; + status = kmigmgrGetInstanceRefFromClient(pGpu, + pKernelMigManager, + pRsClient->hClient, + &ref); + if (status != NV_OK) + return bReturn; + + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMigManager, engineId, ref)) + { + // Notifier is requested for an unsupported engine + NV_PRINTF( + LEVEL_ERROR, + "RcErroCallback requested for an unsupported engine (0x%x)\n", + localEngineId); + return bReturn; + } + + // Override the engine type with the local engine idx + status = kmigmgrGetGlobalToLocalEngineType(pGpu, + pKernelMigManager, + ref, + engineId, + &localEngineId); + if (status != NV_OK) + return bReturn; + } + + if (pOS->osCheckCallback(pGpu)) + { + NvHandle hDevice, hFifo; + RC_ERROR_CONTEXT *pRcErrorContext = NULL; + Device *pDevice; + + NV_ASSERT_OK_OR_RETURN( + deviceGetByGpu(pRsClient, pGpu, NV_TRUE, &pDevice)); + + hDevice = RES_GET_HANDLE(pDevice); + + + if (!pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup + ->bAllocatedByRm) + { + hFifo = RES_GET_PARENT_HANDLE(pKernelChannel); + } + else + { + hFifo = RES_GET_HANDLE(pKernelChannel); + } + + pRcErrorContext = portMemAllocNonPaged(sizeof *pRcErrorContext); + if (pRcErrorContext != NULL) + { + portMemSet(pRcErrorContext, 0, sizeof *pRcErrorContext); + + pRcErrorContext->pGpu = pGpu; + pRcErrorContext->ChId = pKernelChannel->ChID; + pRcErrorContext->secChId = 0xFFFFFFFF; + pRcErrorContext->sechClient = RES_GET_CLIENT_HANDLE(pKernelChannel); + pRcErrorContext->exceptType = exceptType; + pRcErrorContext->EngineId = localEngineId; + pRcErrorContext->subdeviceInstance = pGpu->subdeviceInstance; + + if (pMmuExceptionData != NULL) + { + pRcErrorContext->addrLo = pMmuExceptionData->addrLo; + pRcErrorContext->addrHi = pMmuExceptionData->addrHi; + pRcErrorContext->faultType = pMmuExceptionData->faultType; + pRcErrorContext->faultStr = kgmmuGetFaultTypeString_HAL( + GPU_GET_KERNEL_GMMU(pGpu), + pMmuExceptionData->faultType); + } + } + + clientAction = pOS->osRCCallback(pGpu, + RES_GET_CLIENT_HANDLE(pKernelChannel), + hDevice, + hFifo, + RES_GET_HANDLE(pKernelChannel), + exceptLevel, + exceptType, + (NvU32 *)pRcErrorContext, + &krcResetCallback); + + if (clientAction == RC_CALLBACK_IGNORE || + clientAction == RC_CALLBACK_ISOLATE_NO_RESET) + { + if (clientAction == RC_CALLBACK_IGNORE) + { + NV_PRINTF(LEVEL_ERROR, "-- Drivers tells RM to ignore\n"); + } + + // + // if osRCCallback returns RC_HANDLER_ISOLATE_NO_RESET or + // IGNORE, client won't call rcResetChannel to put channel back + // pRcErrorContext has to be released here + // + portMemFree(pRcErrorContext); + } + bReturn = (clientAction != RC_CALLBACK_IGNORE); + } + else + { + // use the new CliNotifyDeviceFifoEvent() notification method + NvRcNotification params; + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NvU64 time; + CLI_CHANNEL_CLASS_INFO classInfo; + + tmrGetCurrentTime(pTmr, &time); + + params.timeStamp.nanoseconds[0] = NvU64_HI32(time); + params.timeStamp.nanoseconds[1] = NvU64_LO32(time); + params.exceptLevel = exceptLevel; + params.exceptType = exceptType; + + // Get rc notifier index from class info + + CliGetChannelClassInfo(RES_GET_EXT_CLASS_ID(pKernelChannel), + &classInfo); + + // notify the Fifo channel based event listeners + kchannelNotifyGeneric(pKernelChannel, + classInfo.rcNotifierIndex, + ¶ms, + sizeof(params)); + } + + // update RC diagnostic records with process id and owner + if (rcDiagRecStart != INVALID_RCDB_RCDIAG_INDEX) + { + rcdbUpdateRcDiagRecContext(pRcDB, + rcDiagRecStart, + pRcDB->RcErrRptNextIdx - 1, + pClient->ProcID, + rcDiagRecOwner); + } + return bReturn; +} diff --git a/src/nvidia/src/kernel/gpu/rc/kernel_rc_ctrl.c b/src/nvidia/src/kernel/gpu/rc/kernel_rc_ctrl.c new file mode 100644 index 000000000..03400ed1d --- /dev/null +++ b/src/nvidia/src/kernel/gpu/rc/kernel_rc_ctrl.c @@ -0,0 +1,369 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/rc/kernel_rc.h" + +#include "kernel/diagnostics/journal.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "kernel/rmapi/client.h" +#include "kernel/rmapi/client_resource.h" + +#include "g_all_dcl_pb.h" +#include "lib/protobuf/prb_util.h" + +NV_STATUS +subdeviceCtrlCmdRcReadVirtualMem_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS *pReadVirtMemParam +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + KernelChannel *pKernelChannel; + + if (pReadVirtMemParam->bufferPtr == NvP64_NULL + || (((NvU64)pReadVirtMemParam->bufferPtr) & 0x7) != 0 + || pReadVirtMemParam->bufferSize == 0) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (CliGetKernelChannelWithDevice(RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_PARENT_HANDLE(pSubdevice), + pReadVirtMemParam->hChannel, + &pKernelChannel) == NV_OK) + { + status = krcReadVirtMem(pGpu, GPU_GET_KERNEL_RC(pGpu), + pKernelChannel, + pReadVirtMemParam->virtAddress, + pReadVirtMemParam->bufferPtr, + pReadVirtMemParam->bufferSize); + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + + return status; +} + + +NV_STATUS +subdeviceCtrlCmdSetRcInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_RC_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + + if (!((pParams->rcBreak == NV2080_CTRL_CMD_RC_INFO_BREAK_DISABLE || + pParams->rcBreak == NV2080_CTRL_CMD_RC_INFO_BREAK_ENABLE) && + (pParams->rcMode == NV2080_CTRL_CMD_RC_INFO_MODE_DISABLE || + pParams->rcMode == NV2080_CTRL_CMD_RC_INFO_MODE_ENABLE))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pKernelRc->bBreakOnRc = (pParams->rcBreak == + NV2080_CTRL_CMD_RC_INFO_BREAK_ENABLE); + + pKernelRc->bRobustChannelsEnabled = (pParams->rcMode == + NV2080_CTRL_CMD_RC_INFO_MODE_ENABLE); + + return NV_OK; +} + + +NV_STATUS +subdeviceCtrlCmdGetRcInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_RC_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + + pParams->rcBreak = pKernelRc->bBreakOnRc ? + NV2080_CTRL_CMD_RC_INFO_BREAK_ENABLE : + NV2080_CTRL_CMD_RC_INFO_BREAK_DISABLE; + + pParams->rcMode = pKernelRc->bRobustChannelsEnabled ? + NV2080_CTRL_CMD_RC_INFO_MODE_ENABLE : + NV2080_CTRL_CMD_RC_INFO_MODE_DISABLE; + + return NV_OK; +} + + +NV_STATUS +krcSubdeviceCtrlGetErrorInfoCheckPermissions_KERNEL +( + KernelRc *pKernelRc, + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (rmclientIsAdminByHandle(RES_GET_CLIENT_HANDLE(pSubdevice), + pCallContext->secInfo.privLevel)) + { + return NV_OK; + } + + if (IS_MIG_IN_USE(pGpu) && + rmclientIsCapableOrAdminByHandle(RES_GET_CLIENT_HANDLE(pSubdevice), + NV_RM_CAP_SYS_SMC_MONITOR, + pCallContext->secInfo.privLevel)) + { + return NV_OK; + } + + return NV_ERR_INSUFFICIENT_PERMISSIONS; +} + + +NV_STATUS +subdeviceCtrlCmdRcGetErrorCount_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + + status = krcSubdeviceCtrlGetErrorInfoCheckPermissions_HAL(pKernelRc, + pSubdevice); + if (status != NV_OK) + { + return status; + } + + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + return pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_RC_GET_ERROR_COUNT, + pParams, + sizeof *pParams); + } + + return krcSubdeviceCtrlCmdRcGetErrorCount(pKernelRc, pSubdevice, pParams); +} + + +NV_STATUS +subdeviceCtrlCmdRcGetErrorV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_RC_GET_ERROR_V2_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + + status = krcSubdeviceCtrlGetErrorInfoCheckPermissions_HAL(pKernelRc, + pSubdevice); + if (status != NV_OK) + { + return status; + } + + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + return pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_RC_GET_ERROR_V2, + pParams, + sizeof *pParams); + } + + return krcSubdeviceCtrlCmdRcGetErrorV2(pKernelRc, pSubdevice, pParams); +} + + +NV_STATUS +krcSubdeviceCtrlCmdRcGetErrorCount_IMPL +( + KernelRc *pKernelRc, + Subdevice *pSubdevice, + NV2080_CTRL_RC_GET_ERROR_COUNT_PARAMS *pErrorCount +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + SYS_ERROR_INFO *pSysErrorInfo = &pRcDB->ErrorInfo; + + pErrorCount->errorCount = pSysErrorInfo->ErrorCount; + + return NV_OK; +} + +NV_STATUS +krcSubdeviceCtrlCmdRcGetErrorV2_IMPL +( + KernelRc *pKernelRc, + Subdevice *pSubdevice, + NV2080_CTRL_RC_GET_ERROR_V2_PARAMS *pErrorParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + SYS_ERROR_INFO *pSysErrorInfo = &pRcDB->ErrorInfo; + RmProtoBuf_RECORD *pProtoBuf = (RmProtoBuf_RECORD *) + pErrorParams->recordBuffer; + NvU8 *pBuffer = pErrorParams->recordBuffer + sizeof(RmProtoBuf_RECORD); + RMPRBERRORELEMENT_V2 *pPrbErrorElement; + RMCD_ERROR_BLOCK *pErrorBlock; + PRB_ENCODER prbEnc; + NV_STATUS status = NV_OK; + NvU32 i; + + // Set Output Size to zero + pErrorParams->outputRecordSize = 0; + + ct_assert(sizeof(pErrorParams->recordBuffer) > sizeof(RmProtoBuf_RECORD)); + + // Check error list + if (pSysErrorInfo->pErrorList == NULL) + { + return status; + } + + // Skip over records up to whichBuffer + pPrbErrorElement = (RMPRBERRORELEMENT_V2 *)pSysErrorInfo->pErrorList; + for (i = 0; (i < pErrorParams->whichBuffer) && (pPrbErrorElement != NULL); + i++) + { + pPrbErrorElement = (RMPRBERRORELEMENT_V2 *) + pPrbErrorElement->ErrorHeader.pNextError; + } + if (pPrbErrorElement == NULL) + { + return status; + } + + // + // There's no data in a protobuf error element - just combine the error + // blocks into a Dcl.DclMsg. + // + if (pPrbErrorElement->RmPrbErrorData.common.Header.cRecordType != + RmPrbErrorInfo_V2) + { + // Can only handle protobuf formatted messages + NV_PRINTF(LEVEL_ERROR, + "NVRM-RC: unknown error element type: %d\n", + pPrbErrorElement->RmPrbErrorData.common.Header.cRecordType); + return NV_ERR_NOT_SUPPORTED; + } + + { + NvU32 dwSize = sizeof(pErrorParams->recordBuffer) - + sizeof(RmProtoBuf_RECORD); + prbEncStart(&prbEnc, DCL_ERRORBLOCK, pBuffer, dwSize, NULL); + } + + // Add each Error Block + for (pErrorBlock = pPrbErrorElement->ErrorHeader.pErrorBlock; + pErrorBlock != NULL; + pErrorBlock = pErrorBlock->pNext) + { + NV_STATUS status1; + status = prbEncNestedStart(&prbEnc, DCL_ERRORBLOCK_DATA); + if (status != NV_OK) + break; + + status = prbEncCatMsg(&prbEnc, + pErrorBlock->pBlock, + pErrorBlock->blockSize); + status1 = prbEncNestedEnd(&prbEnc); + if (status != NV_OK) + break; + + if (status1 != NV_OK) + { + status = status1; + break; + } + } + + { + NvU8 *prbBuffer = NULL; + NvU32 prbLen = prbEncFinish(&prbEnc, (void **)&prbBuffer); + + if (status == NV_OK) + { + pProtoBuf->Header.wRecordSize = sizeof(RmProtoBuf_RECORD); + pProtoBuf->Header.cRecordType = RmProtoBuf; + pProtoBuf->Header.cRecordGroup = RmGroup; + pProtoBuf->dwSize = prbLen; + } + + pErrorParams->outputRecordSize = prbLen + sizeof(RmProtoBuf_RECORD); + } + + return status; +} + + +NV_STATUS +krcCliresCtrlNvdGetRcerrRptCheckPermissions_KERNEL +( + KernelRc *pKernelRc, + RmClientResource *pRmCliRes, + NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS *pReportParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmClient *pClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + + if (pClient == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + // + // Only a kernel client is allowed to query reports without filter, record + // owners are assigned only on Windows. + // + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL && + pReportParams->owner == RCDB_RCDIAG_DEFAULT_OWNER) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/rc/kernel_rc_misc.c b/src/nvidia/src/kernel/gpu/rc/kernel_rc_misc.c new file mode 100644 index 000000000..c165cddc1 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/rc/kernel_rc_misc.c @@ -0,0 +1,125 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/core/core.h" +#include "kernel/gpu/mem_mgr/virt_mem_allocator.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "kernel/os/os.h" + +#include "gpu/mem_mgr/mem_desc.h" +#include "nverror.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/bus/kern_bus.h" + + +NV_STATUS krcReadVirtMem_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc, + KernelChannel *pKernelChannel, + NvU64 virtAddr, + NvP64 bufPtr, + NvU32 bufSize +) +{ + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + MEMORY_DESCRIPTOR memDesc; + + NvU32 pageStartOffset; + NvU32 start4kPage; + NvU32 end4kPage; + NvU64 physaddr; + NvU32 memtype; + NvU8 *pMem = NULL; + NvU32 cursize; + NvU32 cur4kPage; + NV_STATUS status = NV_OK; + + pageStartOffset = NvOffset_LO32(virtAddr) & RM_PAGE_MASK; + start4kPage = (NvOffset_LO32(virtAddr) >> 12) & 0x1FFFF; + end4kPage = (NvOffset_LO32(virtAddr + bufSize - 1) >> 12) & 0x1FFFF; + + cursize = RM_PAGE_SIZE - pageStartOffset; + virtAddr &= ~RM_PAGE_MASK; + + for (cur4kPage = start4kPage; cur4kPage <= end4kPage; ++cur4kPage) + { + status = dmaXlateVAtoPAforChannel_HAL(pGpu, pDma, + pKernelChannel, virtAddr, &physaddr, &memtype); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM: va translation failed\n"); + return status; + } + if (memtype == ADDR_SYSMEM) + { + // XXX Fix me! NV_MEMORY_UNCACHED may not be the correct memory type. + pMem = (NvU8*) osMapKernelSpace(physaddr, RM_PAGE_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + } + else if (memtype == ADDR_FBMEM) + { + memdescCreateExisting(&memDesc, + pGpu, + RM_PAGE_SIZE, + ADDR_FBMEM, + NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_NONE); + memdescDescribe(&memDesc, ADDR_FBMEM, physaddr, RM_PAGE_SIZE); + pMem = kbusMapRmAperture_HAL(pGpu, &memDesc); + } + if (pMem == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + if (cursize > bufSize) + { + cursize = bufSize; + } + + portMemCopy(NvP64_VALUE(bufPtr), + cursize, + pMem + pageStartOffset, + cursize); + + if (memtype == ADDR_SYSMEM) + { + osUnmapKernelSpace(pMem, RM_PAGE_SIZE); + } + else if (memtype == ADDR_FBMEM) + { + kbusUnmapRmAperture_HAL(pGpu, &memDesc, &pMem, NV_TRUE); + memdescDestroy(&memDesc); + } + pMem = NULL; + pageStartOffset = 0; + bufPtr = NvP64_PLUS_OFFSET(bufPtr,cursize); + bufSize -= cursize; + cursize = RM_PAGE_SIZE; + virtAddr += RM_PAGE_SIZE; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/rc/kernel_rc_notification.c b/src/nvidia/src/kernel/gpu/rc/kernel_rc_notification.c new file mode 100644 index 000000000..a541f6784 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/rc/kernel_rc_notification.c @@ -0,0 +1,462 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/rc/kernel_rc.h" + +#include "kernel/gpu/device/device.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/gpu/gpu.h" +#include "kernel/gpu/mem_mgr/context_dma.h" +#include "kernel/gpu/mem_mgr/virt_mem_allocator_common.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu_mgr/gpu_mgr.h" +#include "kernel/virtualization/hypervisor/hypervisor.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "nverror.h" +#include "nvstatus.h" + +static NV_STATUS +_krcErrorWriteNotifierCpuMemHelper +( + OBJGPU *pGpu, + Memory *pMemory, + NvHandle hClient, + NvU32 exceptType, + NvU32 localEngineType, + NV_STATUS notifierStatus +) +{ + NV_STATUS status = NV_OK; + + switch (memdescGetAddressSpace(pMemory->pMemDesc)) + { + case ADDR_VIRTUAL: + notifyFillNotifierGPUVA(pGpu, + hClient, + RES_GET_HANDLE(pMemory), + memdescGetPhysAddr(pMemory->pMemDesc, AT_GPU_VA, 0), + exceptType, + (NvU16)localEngineType, + notifierStatus, + 0 /* Index */); + break; + + case ADDR_SYSMEM: + case ADDR_FBMEM: + notifyFillNotifierMemory(pGpu, + pMemory, + exceptType, + (NvU16)localEngineType, + notifierStatus, + 0 /* Index */ ); + break; + + default: + status = NV_ERR_NOT_SUPPORTED; + } + return status; +} + + +/*! + * This path is called in GSP_CLIENT, vGPU and MONOLITHIC cases, except in the + * GSP_CLIENT path where GSP has already written to the notifiers. + * In that case, rcErrorSendEventNotifications is called which + * only sends the notifications without actually writing to the error notifiers. + */ +NV_STATUS +krcErrorWriteNotifier_CPU +( + OBJGPU *pGpu, + KernelRc *pKernelRc, + KernelChannel *pKernelChannel, + NvU32 exceptType, + NvU32 localEngineType, + NV_STATUS notifierStatus, + NvU32 *pFlushFlags +) +{ + NV_STATUS status = NV_OK; + ContextDma *pContextDma; + Memory *pMemory; + Device *pDevice; + // + // Update the ECC error notifier for exceptTypes related to + // CONTAINED/UNCONTAINED/DBE errors + // + NvBool bUpdateEccNotifier = ( + exceptType == ROBUST_CHANNEL_GPU_ECC_DBE || + exceptType == ROBUST_CHANNEL_UNCONTAINED_ERROR || + exceptType == ROBUST_CHANNEL_CONTAINED_ERROR); + + status = deviceGetByInstance(RES_GET_CLIENT(pKernelChannel), + gpuGetDeviceInstance(pGpu), + &pDevice); + if (status != NV_OK) + { + return NV_ERR_INVALID_DEVICE; + } + + if (hypervisorIsVgxHyper() && bUpdateEccNotifier && + pKernelChannel->hEccErrorContext != NV01_NULL_OBJECT) + { + // + // Attempt to use a context DMA notification with an event that + // wakes up the listener. If a context DMA is not found, fall back + // to a memory-based notifier without an event. + // + if (ctxdmaGetByHandle(RES_GET_CLIENT(pKernelChannel), + pKernelChannel->hEccErrorContext, + &pContextDma) == NV_OK) + { + notifyFillNotifier(pGpu, + pContextDma, + exceptType, + (NvU16)localEngineType, + notifierStatus); + } + else if ((status = memGetByHandleAndDevice( + RES_GET_CLIENT(pKernelChannel), + pKernelChannel->hEccErrorContext, + RES_GET_HANDLE(pDevice), + &pMemory)) == NV_OK) + { + status = _krcErrorWriteNotifierCpuMemHelper(pGpu, + pMemory, + RES_GET_CLIENT_HANDLE(pKernelChannel), + exceptType, + localEngineType, + notifierStatus); + } + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "notified (ECC) channel %d\n", + kchannelGetDebugTag(pKernelChannel)); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "No valid error notifier found for ECC error context 0x%x\n" + "Skipping notification update\n", + pKernelChannel->hEccErrorContext); + } + } + + if (ctxdmaGetByHandle(RES_GET_CLIENT(pKernelChannel), + pKernelChannel->hErrorContext, + &pContextDma) == NV_OK) + { + PEVENTNOTIFICATION pEventNotifications = inotifyGetNotificationList( + staticCast(pContextDma, INotifier)); + notifyFillNotifier(pGpu, + pContextDma, + exceptType, + (NvU16)localEngineType, + notifierStatus); + NV_PRINTF(LEVEL_INFO, "notified channel %d\n", + kchannelGetDebugTag(pKernelChannel)); + if (pEventNotifications) + { + notifyEvents(pGpu, + pEventNotifications, + 0, + 0, + RES_GET_HANDLE(pContextDma), + status, + NV_OS_WRITE_THEN_AWAKEN); + } + } + else if ((status = memGetByHandleAndDevice(RES_GET_CLIENT(pKernelChannel), + pKernelChannel->hErrorContext, + RES_GET_HANDLE(pDevice), + &pMemory)) == NV_OK) + { + status = _krcErrorWriteNotifierCpuMemHelper(pGpu, + pMemory, + RES_GET_CLIENT_HANDLE(pKernelChannel), + exceptType, + localEngineType, + notifierStatus); + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "notified channel %d\n", + kchannelGetDebugTag(pKernelChannel)); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "No valid error notifier found for error context 0x%x\n" + "Skipping notification update\n", + pKernelChannel->hErrorContext); + } + } + return status; +} + + +/*! + * Update a channel's error notifier with exception type, engine id, and an + * error code + * + * Supported notifier types are: CTXDMA, GPU virtual memory, system memory, + * VIDMEM + * + * @param[in] pKernelChannel Channel + * @param[in] exceptType Exception type written to notifier + * @param[in] nv2080EngineType Engine ID written to notifier + * @param[in] scope If we should notify every channel in the TSG + * + * @returns NV_OK if successful + * NV_ERR_INVALID_CHANNEL if the specified channel does not exist + * NV_ERR_INVALID_DEVICE if the channel's device cannot be found + * NV_ERR_NOT_SUPPORTED if the notifier type is not supported + */ +NV_STATUS krcErrorSetNotifier_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc, + KernelChannel *pKernelChannel, + NvU32 exceptType, + NvU32 nv2080EngineType, + RC_NOTIFIER_SCOPE scope +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 status = NV_OK; + NvU32 flushFlags = 0; + NvBool bNewListCreated = NV_FALSE; + CHANNEL_NODE *pChanNode; + CHANNEL_LIST *pChanList; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); + + // + // WAR bug 200326278, 200474671 + // + // Notifying TSG wide causes vGPU failures, so avoid TSG wide notification + // when GPU is Virtualized. + // + if (scope == RC_NOTIFIER_SCOPE_TSG && + kfifoIsSubcontextSupported(pKernelFifo) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_VIRTUALIZATION_MODE_HOST_VGPU)) + { + pChanList = (pKernelChannel->pKernelChannelGroupApi + ->pKernelChannelGroup->pChanList); + } + else + { + NV_ASSERT_OK_OR_GOTO(status, + kfifoChannelListCreate(pGpu, pKernelFifo, &pChanList), + Error); + bNewListCreated = NV_TRUE; + + NV_ASSERT_OK_OR_GOTO(status, + kfifoChannelListAppend(pGpu, pKernelFifo, pKernelChannel, pChanList), + Error); + } + + for (pChanNode = pChanList->pHead; pChanNode; pChanNode = pChanNode->pNext) + { + NvU32 localEngineType; + KernelChannel *pKernelChannel = pChanNode->pKernelChannel; + + // + // If MIG is enabled, RM need to notify partition local engineId. + // Convert global ID to partition local if client has filled proper + // engineIDs + // + localEngineType = nv2080EngineType; + if (IS_MIG_IN_USE(pGpu) && + nv2080EngineType > NV2080_ENGINE_TYPE_NULL) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + + status = kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + RES_GET_CLIENT_HANDLE(pKernelChannel), + &ref); + if (status != NV_OK) + goto Error; + + if (!kmigmgrIsEngineInInstance(pGpu, pKernelMIGManager, nv2080EngineType, ref)) + { + NV_PRINTF(LEVEL_ERROR, + "Notifier requested for an unsupported engine (0x%x)\n", + nv2080EngineType); + status = NV_ERR_INVALID_ARGUMENT; + goto Error; + } + + // Override the engine type with the local engine idx + status = kmigmgrGetGlobalToLocalEngineType(pGpu, pKernelMIGManager, + ref, + nv2080EngineType, + &localEngineType); + if (status != NV_OK) + goto Error; + } + + NV_ASSERT_OK_OR_GOTO(status, + krcErrorWriteNotifier_HAL(pGpu, pKernelRc, + pKernelChannel, + exceptType, + localEngineType, + 0xffff /* notifierStatus */, + &flushFlags), + Error) + } + + if (flushFlags != 0) { + kbusFlush_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), flushFlags); + } + +Error: + if (bNewListCreated) + { + kfifoChannelListDestroy(pGpu, pKernelFifo, pChanList); + } + return status; +} + + +/*! Send notifications for notifiers what were already written to. + * + * GSP writes to notifiers to avoid a race condition between the time when an + * error is handled and when the client receives notifications. However, GSP + * doesn't actually send those notification events to the client in some cases + * as that requires OS interaction. + * + * This function actually sends those notifications to the client. + * + */ +NV_STATUS +krcErrorSendEventNotificationsCtxDma_FWCLIENT +( + OBJGPU *pGpu, + KernelRc *pKernelRc, + KernelChannel *pKernelChannel, + RC_NOTIFIER_SCOPE scope +) +{ + NV_STATUS status = NV_OK; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvBool bNewListCreated = NV_FALSE; + CHANNEL_NODE *pChanNode; + CHANNEL_LIST *pChanList; + + // + // This function should only be called on paths where GSP has already + // written to notifiers + // + NV_ASSERT_OR_RETURN(IS_GSP_CLIENT(pGpu), NV_ERR_INVALID_STATE); + + // WAR bug 200326278, 200474671 + if (scope == RC_NOTIFIER_SCOPE_TSG && + kfifoIsSubcontextSupported(pKernelFifo)) + { + // Notify all channels in the TSG + pChanList = (pKernelChannel->pKernelChannelGroupApi + ->pKernelChannelGroup->pChanList); + } + else + { + // Setup a fake list for the channel to be notified + status = kfifoChannelListCreate(pGpu, pKernelFifo, &pChanList); + if (status != NV_OK) { + NV_PRINTF(LEVEL_ERROR, "failed to create notification list\n"); + goto Error; + } + bNewListCreated = NV_TRUE; + + status = kfifoChannelListAppend(pGpu, pKernelFifo, pKernelChannel, pChanList); + if (status != NV_OK) { + NV_PRINTF(LEVEL_ERROR, + "failed to insert channel into notification list\n"); + goto Error; + } + } + + for (pChanNode = pChanList->pHead; pChanNode; pChanNode = pChanNode->pNext) + { + ContextDma *pContextDma; + + if (ctxdmaGetByHandle(RES_GET_CLIENT(pChanNode->pKernelChannel), + pChanNode->pKernelChannel->hErrorContext, + &pContextDma) == NV_OK) + { + EVENTNOTIFICATION *pEventNotifications = inotifyGetNotificationList( + staticCast(pContextDma, INotifier)); + if (pEventNotifications != NULL) + { + notifyEvents(pGpu, pEventNotifications, 0, 0, + RES_GET_HANDLE(pContextDma), status, + NV_OS_WRITE_THEN_AWAKEN); + } + } + } + +Error: + if (bNewListCreated) + { + kfifoChannelListDestroy(pGpu, pKernelFifo, pChanList); + } + return status; +} + + +NV_STATUS +krcErrorSendEventNotifications_KERNEL +( + OBJGPU *pGpu, + KernelRc *pKernelRc, + KernelChannel *pKernelChannel, + NvU32 engineId, // unused + NvU32 exceptType, + RC_NOTIFIER_SCOPE scope, + NvU16 partitionAttributionId +) +{ + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); + + NV_ASSERT_OK_OR_RETURN( + krcErrorSendEventNotificationsCtxDma_HAL(pGpu, pKernelRc, + pKernelChannel, + scope)); + + gpuNotifySubDeviceEvent(pGpu, + NV2080_NOTIFIERS_RC_ERROR, + NULL, + 0, + exceptType, + partitionAttributionId); + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/rc/kernel_rc_watchdog.c b/src/nvidia/src/kernel/gpu/rc/kernel_rc_watchdog.c new file mode 100644 index 000000000..b769aa631 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/rc/kernel_rc_watchdog.c @@ -0,0 +1,1308 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/core/core.h" +#include "kernel/core/locks.h" +#include "kernel/gpu/mem_mgr/heap.h" +#include "kernel/gpu/mem_mgr/mem_mgr.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "kernel/gpu/bif/kernel_bif.h" +#include "kernel/os/os.h" + +#include "class/cl0000.h" // NV01_NULL_OBJECT +#include "class/cl0002.h" // NV01_CONTEXT_DMA +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER +#include "class/cl0070.h" // NV01_MEMORY_VIRTUAL +#include "class/cl0080.h" // NV01_DEVICE_0 +#include "class/cl2080.h" // NV20_SUBDEVICE_0 +#include "class/cl902d.h" // FERMI_TWOD_A +#include "class/cl906f.h" // GF100_CHANNEL_GPFIFO +#include "class/cla06f.h" // KEPLER_CHANNEL_GPFIFO_A +#include "class/cla06fsubch.h" +#include "class/cla16f.h" // KEPLER_CHANNEL_GPFIFO_B +#include "class/clb06f.h" // MAXWELL_CHANNEL_GPFIFO_A +#include "class/clc06f.h" // PASCAL_CHANNEL_GPFIFO_A +#include "class/clc36f.h" // VOLTA_CHANNEL_GPFIFO_A +#include "class/clc46f.h" // TURING_CHANNEL_GPFIFO_A +#include "class/clc56f.h" // AMPERE_CHANNEL_GPFIFO_A + +#include "deprecated/rmapi_deprecated.h" +#include "nvRmReg.h" + + +// +// Watchdog object ids +// +#define WATCHDOG_PUSHBUFFER_CHANNEL_ID 0x31415900 +#define WATCHDOG_NOTIFIER_DMA_ID (WATCHDOG_PUSHBUFFER_CHANNEL_ID + 2) +#define WATCHDOG_DEVICE_ID (WATCHDOG_PUSHBUFFER_CHANNEL_ID + 3) +#define WATCHDOG_SUB_DEVICE_0_ID (WATCHDOG_PUSHBUFFER_CHANNEL_ID + 4) +#define WATCHDOG_GROBJ_ID (WATCHDOG_SUB_DEVICE_0_ID + NV_MAX_SUBDEVICES) +#define WATCHDOG_ERROR_DMA_ID (WATCHDOG_GROBJ_ID + 1) +#define WATCHDOG_MEM_ID (WATCHDOG_GROBJ_ID + 2) +#define WATCHDOG_VIRTUAL_CTX_ID (WATCHDOG_GROBJ_ID + 3) +#define WATCHDOG_USERD_PHYS_MEM_ID (WATCHDOG_GROBJ_ID + 4) + +// Push buffer size in dwords +#define WATCHDOG_PUSHBUF_SIZE 128 + +// Default watchdog pushbuffer size (if no PERF engine) +#define WATCHDOG_PB_SIZE_DEFAULT 0xC000 + +#define WATCHDOG_PUSHBUFFERS 2 +#define WATCHDOG_GPFIFO_ENTRIES 4 +#define WATCHDOG_GRAPHICS_NOTIFIERS 3 + +#define GPFIFO_ALIGN NV906F_GP_ENTRY__SIZE +#define NOTIFIER_ALIGN 16 + +#define WATCHDOG_GPFIFO_OFFSET(pbBytes) \ + ((((pbBytes)*WATCHDOG_PUSHBUFFERS) + (GPFIFO_ALIGN - 1)) & \ + ~(GPFIFO_ALIGN - 1)) + +#define WATCHDOG_BEGINNING_NOTIFIER_OFFSET(pbBytes) \ + (((WATCHDOG_GPFIFO_OFFSET(pbBytes) + \ + (WATCHDOG_GPFIFO_ENTRIES * NV906F_GP_ENTRY__SIZE)) + \ + (NOTIFIER_ALIGN - 1)) & \ + ~(NOTIFIER_ALIGN - 1)) + +#define WATCHDOG_ERROR_NOTIFIER_OFFSET(pbBytes) \ + (WATCHDOG_BEGINNING_NOTIFIER_OFFSET(pbBytes)) + +#define WATCHDOG_NOTIFIER_OFFSET(pbBytes, gpuIndex, notifier) \ + (WATCHDOG_BEGINNING_NOTIFIER_OFFSET(pbBytes) + \ + (sizeof(NvNotification) * NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1) + \ + ((gpuIndex) * sizeof(NvNotification) * WATCHDOG_GRAPHICS_NOTIFIERS) + \ + (sizeof(NvNotification) * (notifier))) + +#define WATCHDOG_WORK_SUBMIT_TOKEN_OFFSET(pbBytes) \ + ((WATCHDOG_BEGINNING_NOTIFIER_OFFSET(pbBytes)) + \ + NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN * \ + sizeof(NvNotification)) + +#define WATCHDOG_PUSHBUFFER_OFFSET(pbBytes, pbnum) ((pbBytes) * (pbnum)) + +#define SUBDEVICE_MASK_ALL DRF_MASK(NV906F_DMA_SET_SUBDEVICE_MASK_VALUE) + + +NV_STATUS +krcWatchdogChangeState_IMPL +( + KernelRc *pKernelRc, + Subdevice *pSubdevice, + RC_CHANGE_WATCHDOG_STATE_OPERATION_TYPE operation +) +{ + // + // Provide automatic management of RC watchdog enabling and disabling. + // Provide for cooperation between RM clients, and allow for independent + // behavior or multiple client and multiple GPUs. + // + // RM clients can use the NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG and related API + // calls to request enabling or disabling of the RM watchdog, per GPU. + // Whether or not the watchdog is actually enabled or disabled, however, + // depends upon whether or not other, conflicting requests are already in + // force. + // + // Some background as to how this is normally used: + // + // -- Normally, some clients (such as X) wants the watchdog running. + // -- Normally, CUDA wants the watchdog disabled. + // -- When the RM initializes, it sets the watchog to disabled. + // -- X will normally tell the RM, for each GPU that it manages, to enable + // the watchdog. + // -- Each CUDA client normally will tell the RM, for each GPU that it + // manages, to disable the watchdog. + // -- X will have options that provide for either *not* enabling the + // watchdog, or at least, not blocking another client from disabling the + // watchdog. + // -- Likewise, CUDA will have an option that provides for either enabling + // the watchdog, or at least, not blocking another client from enabling + // the watchdog. + // + // The watchdog is not allowed to transition directly between ENABLED and + // DISABLED states. It must go through a "don't care" state, in between: + // + // ENABLED <--> DON'T-CARE <--> DISABLED + // + // Each of the three states may be reached with an associated RM API call: + // + // NV2080_CTRL_CMD_RC_ENABLE_WATCHDOG: ENABLED state + // NV2080_CTRL_CMD_RC_DISABLE_WATCHDOG: DISABLED state + // NV2080_CTRL_CMD_RC_RELEASE_WATCHDOG_REQUESTS: DON'T-CARE state + // + // In addition, RM client destruction leads directly to the DON'T-CARE + // state. This allows good behavior and cooperation between possibly + // conflicting RM clients. + // + // Basic operation: + // + // ENABLE requests: Increment enableRequestsRefCount, disallow disable + // operations from any client, but *allow* additional enable operations + // from any client. + // + // DISABLE requests: Increment disableRequestsRefCount, disallow enable + // operations from any client, but *allow* additional disable operations + // from any client. + // + // CLIENT DESTRUCTION requests: Decrement the enableRequestsRefCount if the + // client had an existing ENABLE request when it was destroyed. Reduce the + // disableRequestsRefCount if the client had an existing DISABLE request + // when it was destroyed. + // + // RELEASE requests: Possibly reduce the refCount, just as if the client had + // been destroyed. This is convenenient for client such as MODS, that tend + // to make multiple calls to enable and disable the watchdog, within the + // lifetime of a single RM client. + // + // + NvBool bCurrentEnableRequest = NV_FALSE; + NvBool bCurrentDisableRequest = NV_FALSE; + NvBool bCurrentSoftDisableRequest = NV_FALSE; + NvS32 prevEnableRefCount = pKernelRc->watchdogPersistent.enableRequestsRefCount; + NvS32 prevDisableRefCount = pKernelRc->watchdogPersistent.disableRequestsRefCount; + NvS32 prevSoftDisableRefCount = pKernelRc->watchdogPersistent.softDisableRequestsRefCount; + NvBool bPrevEnableRequest = pSubdevice->bRcWatchdogEnableRequested; + NvBool bPrevDisableRequest = pSubdevice->bRcWatchdogDisableRequested; + NvBool bPrevSoftDisableRequest = pSubdevice->bRcWatchdogSoftDisableRequested; + OBJGPU *pGpu = ENG_GET_GPU(pKernelRc); + const char *opstring; + + switch (operation) + { + case RMAPI_ENABLE_REQUEST: + bCurrentEnableRequest = NV_TRUE; + bCurrentDisableRequest = NV_FALSE; + bCurrentSoftDisableRequest = NV_FALSE; + opstring = "enable watchdog"; + break; + + case RMAPI_SOFT_DISABLE_REQUEST: + bCurrentEnableRequest = NV_FALSE; + bCurrentDisableRequest = NV_FALSE; + bCurrentSoftDisableRequest = NV_TRUE; + opstring = "soft disable watchdog"; + break; + + case RMAPI_DISABLE_REQUEST: + bCurrentEnableRequest = NV_FALSE; + bCurrentDisableRequest = NV_TRUE; + bCurrentSoftDisableRequest = NV_FALSE; + opstring = "disable watchdog"; + break; + + case RMAPI_RELEASE_ALL_REQUESTS: + bCurrentEnableRequest = NV_FALSE; + bCurrentDisableRequest = NV_FALSE; + bCurrentSoftDisableRequest = NV_FALSE; + opstring = "release all requests"; + break; + + case RM_CLIENT_DESTRUCTION: + bCurrentEnableRequest = NV_FALSE; + bCurrentDisableRequest = NV_FALSE; + bCurrentSoftDisableRequest = NV_FALSE; + opstring = "destroy RM client"; + break; + + default: + NV_ASSERT(0); + return NV_ERR_INVALID_ARGUMENT; + break; + } + // -Wunused-but-set-variable nonsense if NV_PRINTF is compiled out + (void)opstring; + + + // + // Step 1: check for conflicting requests, and bail out without changing + // client state or watchdog state, if there are any such conflicts. We don't + // consider the soft disable requests for conflicts, since they won't be + // applied anyway, but we do still want them to be counted for when the + // conflicting request is released - we'll fall back to the soft-disabled + // state then. + // + if ((pKernelRc->watchdogPersistent.disableRequestsRefCount != 0 && + bCurrentEnableRequest) || + (pKernelRc->watchdogPersistent.enableRequestsRefCount != 0 && + bCurrentDisableRequest)) + { + NV_PRINTF(LEVEL_ERROR, + "Cannot %s on GPU 0x%x, due to another client's request\n" + "(Enable requests: %d, Disable requests: %d)\n", + opstring, + pGpu->gpuId, + pKernelRc->watchdogPersistent.enableRequestsRefCount, + pKernelRc->watchdogPersistent.disableRequestsRefCount); + + return NV_ERR_STATE_IN_USE; + } + + NV_PRINTF(LEVEL_INFO, + "(before) op: %s, GPU 0x%x, enableRefCt: %d, disableRefCt: %d, softDisableRefCt: %d, WDflags: 0x%x\n", + opstring, + pGpu->gpuId, + pKernelRc->watchdogPersistent.enableRequestsRefCount, + pKernelRc->watchdogPersistent.disableRequestsRefCount, + pKernelRc->watchdogPersistent.softDisableRequestsRefCount, + pKernelRc->watchdog.flags); + + // Step 2: if client state has changed, adjust the per-GPU/RC refcount: + if (!bPrevEnableRequest && bCurrentEnableRequest) + { + ++pKernelRc->watchdogPersistent.enableRequestsRefCount; + } + else if (bPrevEnableRequest && !bCurrentEnableRequest) + { + --pKernelRc->watchdogPersistent.enableRequestsRefCount; + } + + if (!bPrevDisableRequest && bCurrentDisableRequest) + { + ++pKernelRc->watchdogPersistent.disableRequestsRefCount; + } + else if (bPrevDisableRequest && !bCurrentDisableRequest) + { + --pKernelRc->watchdogPersistent.disableRequestsRefCount; + } + + if (!bPrevSoftDisableRequest && bCurrentSoftDisableRequest) + { + ++pKernelRc->watchdogPersistent.softDisableRequestsRefCount; + } + else if (bPrevSoftDisableRequest && !bCurrentSoftDisableRequest) + { + --pKernelRc->watchdogPersistent.softDisableRequestsRefCount; + } + + // Step 3: record client state: + pSubdevice->bRcWatchdogEnableRequested = bCurrentEnableRequest; + pSubdevice->bRcWatchdogDisableRequested = bCurrentDisableRequest; + pSubdevice->bRcWatchdogSoftDisableRequested = bCurrentSoftDisableRequest; + + // + // Step 4: if per-GPU/RC refcount has changed from 0 to 1, then change the + // watchdog state: + // + if (pKernelRc->watchdogPersistent.enableRequestsRefCount == 1 && + prevEnableRefCount == 0 && + pKernelRc->watchdogPersistent.disableRequestsRefCount == 0) + { + // Enable the watchdog: + krcWatchdogEnable(pKernelRc, NV_FALSE /* bOverRide */); + } + else if (pKernelRc->watchdogPersistent.disableRequestsRefCount == 1 && + prevDisableRefCount == 0 && + pKernelRc->watchdogPersistent.enableRequestsRefCount == 0) + { + // Disable the watchdog: + krcWatchdogDisable(pKernelRc); + } + else if ((pKernelRc->watchdogPersistent.enableRequestsRefCount == 0) && + (pKernelRc->watchdogPersistent.disableRequestsRefCount == 0) && + ((prevEnableRefCount > 0) || (prevSoftDisableRefCount == 0)) && + (pKernelRc->watchdogPersistent.softDisableRequestsRefCount > 0)) + { + // + // Go back to disabled if all of the below are true: + // (1) there are no outstanding enable or disable requests, + // (2) the change is the release of the last enable request OR + // there were previously no soft disable requests + // (3) there are now one or more outstanding soft disable requests + // (including the one currently being refcounted. + // + krcWatchdogDisable(pKernelRc); + } + + NV_PRINTF(LEVEL_INFO, + "(after) op: %s, GPU 0x%x, enableRefCt: %d, disableRefCt: %d, softDisableRefCt: %d, WDflags: 0x%x\n", + opstring, + pGpu->gpuId, + pKernelRc->watchdogPersistent.enableRequestsRefCount, + pKernelRc->watchdogPersistent.disableRequestsRefCount, + pKernelRc->watchdogPersistent.softDisableRequestsRefCount, + pKernelRc->watchdog.flags); + + return NV_OK; +} + + +void +krcWatchdogDisable_IMPL +( + KernelRc *pKernelRc +) +{ + pKernelRc->watchdog.flags |= WATCHDOG_FLAGS_DISABLED; +} + + +void +krcWatchdogEnable_IMPL +( + KernelRc *pKernelRc, + NvBool bOverRide +) +{ + // + // Make sure no operations are pending from before + // if bOverRide is NV_TRUE then we are enabling from a modeswitch + // + if (bOverRide) + pKernelRc->watchdog.deviceResetRd = pKernelRc->watchdog.deviceResetWr; + + pKernelRc->watchdog.flags &= ~WATCHDOG_FLAGS_DISABLED; +} + + +NV_STATUS +krcWatchdogShutdown_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (!(pKernelRc->watchdog.flags & WATCHDOG_FLAGS_INITIALIZED)) + return NV_OK; + + krcWatchdogDisable(pKernelRc); + osRemove1SecondRepeatingCallback(pGpu, + krcWatchdogTimerProc, + NULL /* pData */); + + // This should free the client and all associated resources + pRmApi->Free(pRmApi, + pKernelRc->watchdog.hClient, + pKernelRc->watchdog.hClient); + + // + // Make sure to clear any old watchdog data this also clears + // WATCHDOG_FLAGS_INITIALIZED + // + portMemSet(&pKernelRc->watchdog, 0, sizeof pKernelRc->watchdog); + portMemSet(&pKernelRc->watchdogChannelInfo, 0, + sizeof pKernelRc->watchdogChannelInfo); + + return NV_OK; +} + + +void krcWatchdogGetReservationCounts_IMPL +( + KernelRc *pKernelRc, + NvS32 *pEnable, + NvS32 *pDisable, + NvS32 *pSoftDisable +) +{ + if (pEnable != NULL) + *pEnable = pKernelRc->watchdogPersistent.enableRequestsRefCount; + + if (pDisable != NULL) + *pDisable = pKernelRc->watchdogPersistent.disableRequestsRefCount; + + if (pSoftDisable != NULL) + *pSoftDisable = pKernelRc->watchdogPersistent .softDisableRequestsRefCount; +} + + +NV_STATUS +krcWatchdogInit_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + NvHandle hClient; + NvU32 subDeviceInstance; + NvU32 grObj; + NvU32 gpfifoObj; + NvU32 pushBufBytes; + NvU32 allocationSize; + NvU32 ctrlSize; + NvU32 flags; + NV_STATUS status; + KernelChannel *pKernelChannel; + NvBool bCacheSnoop; + RM_API *pRmApi = rmGpuLockIsOwner() ? + rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL) : + rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + NvBool bClientUserd = IsVOLTAorBetter(pGpu); + NvBool bAcquireLock = NV_FALSE; + + union + { + NV0080_ALLOC_PARAMETERS nv0080; + NV2080_ALLOC_PARAMETERS nv2080; + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS channelGPFifo; + NV_CONTEXT_DMA_ALLOCATION_PARAMS ctxDma; + NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS virtual; + NV_MEMORY_ALLOCATION_PARAMS mem; + } *pParams = NULL; + + // If booting in SMC mode, skip watchdog init since TWOD is not supported + NV_CHECK_OR_RETURN(LEVEL_SILENT, + !IS_MIG_ENABLED(pGpu) && + gpuIsClassSupported(pGpu, FERMI_TWOD_A), + NV_OK); + + if (pKernelRc->watchdog.flags & + (WATCHDOG_FLAGS_DISABLED | WATCHDOG_FLAGS_INITIALIZED)) + { + return NV_OK; + } + + if (bClientUserd) + { + Heap *pHeap = GPU_GET_HEAP(pGpu); + if (pHeap->pmaObject.bNuma) + { + // PMA can't be used until it's onlined + bClientUserd = NV_FALSE; + } + } + + portMemSet(&pKernelRc->watchdogChannelInfo, 0, + sizeof pKernelRc->watchdogChannelInfo); + + // Allocate a root. + { + hClient = NV01_NULL_OBJECT; + if (pRmApi->AllocWithHandle(pRmApi, + NV01_NULL_OBJECT /* hClient */, + NV01_NULL_OBJECT /* hParent */, + NV01_NULL_OBJECT /* hObject */, + NV01_ROOT, + &hClient) != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "Unable to allocate a watchdog client\n"); + return NV_ERR_GENERIC; + } + + pParams = portMemAllocNonPaged(sizeof *pParams); + if (pParams == NULL) + { + status = NV_ERR_NO_MEMORY; + goto error; + } + } + + // Alloc device + { + NV0080_ALLOC_PARAMETERS *pNv0080 = &pParams->nv0080; + + portMemSet(pNv0080, 0, sizeof *pNv0080); + pNv0080->deviceId = gpuGetDeviceInstance(pGpu); + pNv0080->hClientShare = hClient; + + status = pRmApi->AllocWithHandle(pRmApi, + hClient /* hClient */, + hClient /* hParent */, + WATCHDOG_DEVICE_ID /* hObject */, + NV01_DEVICE_0, + pNv0080); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "Unable to allocate a watchdog device\n"); + goto error; + } + } + + // Alloc subdevices + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE) + { + NV2080_ALLOC_PARAMETERS *pNv2080 = &pParams->nv2080; + + subDeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + portMemSet(pNv2080, 0, sizeof *pNv2080); + pNv2080->subDeviceId = subDeviceInstance; + + status = pRmApi->AllocWithHandle(pRmApi, + hClient /* hClient */, + WATCHDOG_DEVICE_ID /* hParent */, + (WATCHDOG_SUB_DEVICE_0_ID + subDeviceInstance) /* hObject */, + NV20_SUBDEVICE_0, + pNv2080); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to allocate a watchdog subdevice\n"); + SLI_LOOP_GOTO(error); + } + } + SLI_LOOP_END + + // + // Determine what class to allocate so we will know whether to use + // context DMAs. Context DMAs are not allowed on any gpu after Fermi + // + if (gpuIsClassSupported(pGpu, FERMI_TWOD_A)) + { + grObj = FERMI_TWOD_A; + } + else + { + grObj = NV01_NULL_OBJECT; // Null object will kill RmAllocObject + } + + { + const struct + { + NvU32 gpfifoObject; + NvLength ctrlSize; + } gpfifoMapping[] = { + {KEPLER_CHANNEL_GPFIFO_B, sizeof(NvA16FControl)} + , {KEPLER_CHANNEL_GPFIFO_A, sizeof(NvA06FControl)} + , {MAXWELL_CHANNEL_GPFIFO_A, sizeof(Nvb06FControl)} + , {PASCAL_CHANNEL_GPFIFO_A, sizeof(Nvc06fControl)} + , {VOLTA_CHANNEL_GPFIFO_A, sizeof(Nvc36fControl)} + , {TURING_CHANNEL_GPFIFO_A, sizeof(Nvc46fControl)} + , {AMPERE_CHANNEL_GPFIFO_A, sizeof(Nvc56fControl)} + }; + + NvU32 i; + + // Defaults if none match + gpfifoObj = GF100_CHANNEL_GPFIFO; + ctrlSize = sizeof(Nv906fControl); + pKernelRc->watchdogChannelInfo.class2dSubch = 0; + + for (i = 0; i < NV_ARRAY_ELEMENTS(gpfifoMapping); ++i) + { + if (gpuIsClassSupported(pGpu, gpfifoMapping[i].gpfifoObject)) + { + gpfifoObj = gpfifoMapping[i].gpfifoObject; + ctrlSize = gpfifoMapping[i].ctrlSize; + pKernelRc->watchdogChannelInfo + .class2dSubch = NVA06F_SUBCHANNEL_2D; + break; + } + } + } + + // RMCONFIG: only if PERF engine is enabled + if (RMCFG_MODULE_KERNEL_PERF) + { + pushBufBytes = WATCHDOG_PUSHBUF_SIZE * 4; + } + else + { + pushBufBytes = WATCHDOG_PB_SIZE_DEFAULT; + } + + // Allocate a virtual context handle + { + NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS *pVirtual = &pParams->virtual; + + portMemSet(pVirtual, 0, sizeof *pVirtual); + status = pRmApi->AllocWithHandle(pRmApi, + hClient /* hClient */, + WATCHDOG_DEVICE_ID /* hParent */, + WATCHDOG_VIRTUAL_CTX_ID /* hObject */, + NV01_MEMORY_VIRTUAL, + pVirtual); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to allocate unified heap for watchdog\n"); + goto error; + } + } + + // + // Calculate the system memory allocation size based on size of push + // buffers, notifers, GPFIFOs, etc., taking alignment requirements into + // consideration. + // + pKernelRc->watchdogChannelInfo.pbBytes = pushBufBytes; + pushBufBytes *= WATCHDOG_PUSHBUFFERS; + allocationSize = (NvU32)( + pushBufBytes + + ((WATCHDOG_GPFIFO_ENTRIES * NV906F_GP_ENTRY__SIZE) + GPFIFO_ALIGN) + + ((sizeof(NvNotification) + NOTIFIER_ALIGN) * + NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1) + + (sizeof(NvNotification) * WATCHDOG_GRAPHICS_NOTIFIERS * + NV_MAX_SUBDEVICES)); + + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + bCacheSnoop = FLD_TEST_REF(BIF_DMA_CAPS_SNOOP, _CTXDMA, + kbifGetDmaCaps(pGpu, pKernelBif)); + + // Allocate memory for the notifiers and pushbuffer. + flags = ((bCacheSnoop ? DRF_DEF(OS02, _FLAGS, _COHERENCY, _CACHED) : + DRF_DEF(OS02, _FLAGS, _COHERENCY, _UNCACHED)) | + DRF_DEF(OS02, _FLAGS, _LOCATION, _PCI) | + DRF_DEF(OS02, _FLAGS, _PHYSICALITY, _NONCONTIGUOUS)); + if ((pKernelRc->watchdog.flags & WATCHDOG_FLAGS_ALLOC_UNCACHED_PCI) != 0) + { + flags = FLD_SET_DRF(OS02, _FLAGS, _COHERENCY, _UNCACHED, flags); + } + + { + NV_MEMORY_ALLOCATION_PARAMS *pMem = &pParams->mem; + + portMemSet(pMem, 0, sizeof *pMem); + pMem->owner = HEAP_OWNER_RM_CLIENT_GENERIC; + pMem->size = allocationSize; + pMem->type = NVOS32_TYPE_IMAGE; + + // TODO - migrate to use newer OS32 flags inline JIRA CORERM-4212 + status = RmDeprecatedConvertOs02ToOs32Flags(flags, + &pMem->attr, + &pMem->attr2, + &pMem->flags); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid flags\n"); + goto error; + } + + status = pRmApi->AllocWithHandle(pRmApi, + hClient /* hClient */, + WATCHDOG_DEVICE_ID /* hParent */, + WATCHDOG_MEM_ID /* hObject */, + NV01_MEMORY_SYSTEM, + pMem); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to allocate system memory for watchdog\n"); + goto error; + } + + status = pRmApi->MapToCpu( pRmApi, + hClient /* hClient */, + WATCHDOG_DEVICE_ID /* hDevice */, + WATCHDOG_MEM_ID /* hMemory */, + 0 /* offset */, + pMem->size /* length */, + (void **)&pKernelRc->watchdogChannelInfo.pCpuAddr, + 0 /* flags */); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to map system memory for watchdog\n"); + goto error; + } + + portMemSet(pKernelRc->watchdogChannelInfo.pCpuAddr, 0, pMem->size); + + // Map the allocation into the unified heap. + status = pRmApi->Map(pRmApi, + hClient /* hClient */, + WATCHDOG_DEVICE_ID /* hDevice */, + WATCHDOG_VIRTUAL_CTX_ID /* hMemctx */, + WATCHDOG_MEM_ID /* hMemory */, + 0 /* offset */, + allocationSize /* length */, + (bCacheSnoop ? DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE) : + DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _DISABLE)) | + DRF_DEF(OS46, _FLAGS, _ACCESS, _READ_WRITE), + &pKernelRc->watchdogChannelInfo.pGpuAddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unable to map system memory into watchdog's heap\n"); + goto error; + } + } + + // Allocate the error notifier context DMA. + { + NV_CONTEXT_DMA_ALLOCATION_PARAMS *pCtxDma = &pParams->ctxDma; + + portMemSet(pCtxDma, 0, sizeof *pCtxDma); + pCtxDma->hSubDevice = 0; + pCtxDma->flags = (bCacheSnoop ? + DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _ENABLE) : + DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _DISABLE)) | + DRF_DEF(OS03, _FLAGS, _ACCESS, _READ_WRITE) | + DRF_DEF(OS03, _FLAGS, _MAPPING, _KERNEL) | + DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE); + pCtxDma->hMemory = WATCHDOG_MEM_ID; + pCtxDma->offset = WATCHDOG_ERROR_NOTIFIER_OFFSET( + pKernelRc->watchdogChannelInfo.pbBytes); + pCtxDma->limit = ((NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1 * + sizeof(NvNotification)) - + 1); + + status = pRmApi->AllocWithHandle(pRmApi, + hClient /* hClient */ , + WATCHDOG_DEVICE_ID /* hParent */ , + WATCHDOG_ERROR_DMA_ID /* hObject */, + NV01_CONTEXT_DMA, + pCtxDma); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to set up watchdog's error context\n"); + goto error; + } + } + + // Allocate the graphics notifier context DMA. + { + NV_CONTEXT_DMA_ALLOCATION_PARAMS *pCtxDma = &pParams->ctxDma; + + portMemSet(pCtxDma, 0, sizeof *pCtxDma); + pCtxDma->hSubDevice = 0; + pCtxDma->flags = (bCacheSnoop ? + DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _ENABLE) : + DRF_DEF(OS03, _FLAGS, _CACHE_SNOOP, _DISABLE)) | + DRF_DEF(OS03, _FLAGS, _ACCESS, _READ_WRITE) | + DRF_DEF(OS03, _FLAGS, _HASH_TABLE, _DISABLE); + pCtxDma->hMemory = WATCHDOG_MEM_ID; + pCtxDma->offset = WATCHDOG_NOTIFIER_OFFSET( + pKernelRc->watchdogChannelInfo.pbBytes, + 0 /* gpuIndex */, + 0 /* notifier */); + pCtxDma->limit = ((sizeof(NvNotification) * + WATCHDOG_GRAPHICS_NOTIFIERS * NV_MAX_SUBDEVICES) - + 1); + + status = pRmApi->AllocWithHandle(pRmApi, + hClient /* hClient */, + WATCHDOG_DEVICE_ID /* hParent */, + WATCHDOG_NOTIFIER_DMA_ID /* hObject */, + NV01_CONTEXT_DMA, + pCtxDma); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "Unable to set up watchdog's notifier\n"); + goto error; + } + } + + if (bClientUserd) + { + NV_MEMORY_ALLOCATION_PARAMS *pMem = &pParams->mem; + NvU32 userdMemClass = NV01_MEMORY_LOCAL_USER; + + portMemSet(pMem, 0, sizeof *pMem); + pMem->owner = HEAP_OWNER_RM_CLIENT_GENERIC; + pMem->size = ctrlSize; + pMem->type = NVOS32_TYPE_IMAGE; + + // Apply registry overrides to USERD. + switch (DRF_VAL(_REG_STR_RM, _INST_LOC, _USERD, pGpu->instLocOverrides)) + { + case NV_REG_STR_RM_INST_LOC_USERD_COH: + case NV_REG_STR_RM_INST_LOC_USERD_NCOH: + userdMemClass = NV01_MEMORY_SYSTEM; + pMem->attr = DRF_DEF(OS32, _ATTR, _LOCATION, _PCI); + break; + + case NV_REG_STR_RM_INST_LOC_USERD_VID: + case NV_REG_STR_RM_INST_LOC_USERD_DEFAULT: + pMem->attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM); + break; + } + + // + // Allocate memory using vidHeapControl + // + // vidHeapControl calls should happen outside GPU locks. This is a PMA + // requirement as memory allocation calls may invoke eviction which UVM + // could get stuck behind GPU lock + // + if (userdMemClass == NV01_MEMORY_LOCAL_USER && rmGpuLockIsOwner()) + { + bAcquireLock = NV_TRUE; + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + } + + // + // Using device handle since VGPU doesnt support subdevice memory + // allocations + // + status = pRmApi->AllocWithHandle(pRmApi, + hClient /* hClient */, + WATCHDOG_DEVICE_ID /* hParent */, + WATCHDOG_USERD_PHYS_MEM_ID /* hObject */, + userdMemClass, + pMem); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to allocate video memory for USERD\n"); + goto error; + } + } + + { + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *pChannelGPFifo = + &pParams->channelGPFifo; + + // + // RmAllocChannel recognizes our handle and attempts to give us + // channel 30. This is not guaranteed; we could theoretically get any + // channel. + // + portMemSet(pChannelGPFifo, 0, sizeof *pChannelGPFifo); + pChannelGPFifo->hObjectError = WATCHDOG_ERROR_DMA_ID; + pChannelGPFifo->hObjectBuffer = WATCHDOG_VIRTUAL_CTX_ID; + pChannelGPFifo->gpFifoOffset = ( + pKernelRc->watchdogChannelInfo.pGpuAddr + + WATCHDOG_GPFIFO_OFFSET(pKernelRc->watchdogChannelInfo.pbBytes)); + pChannelGPFifo->gpFifoEntries = WATCHDOG_GPFIFO_ENTRIES; + + // 2d object is only suppported on GR0 + pChannelGPFifo->engineType = NV2080_ENGINE_TYPE_GR0; + + if (bClientUserd) + pChannelGPFifo->hUserdMemory[0] = WATCHDOG_USERD_PHYS_MEM_ID; + + // channel alloc API needs to be called without GPU lock + if (!bAcquireLock && rmGpuLockIsOwner()) + { + bAcquireLock = NV_TRUE; + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + } + + status = pRmApi->AllocWithHandle(pRmApi, + hClient /* hClient */, + WATCHDOG_DEVICE_ID /* hParent */, + WATCHDOG_PUSHBUFFER_CHANNEL_ID /* hObject */, + gpfifoObj, + pChannelGPFifo); + + if (bAcquireLock) + { + // Reaquire the GPU locks + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_RC) != + NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to grab RM-Lock\n"); + DBG_BREAKPOINT(); + status = NV_ERR_GENERIC; + goto error; + } + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "Unable to alloc watchdog channel\n"); + + if (status == NV_ERR_INVALID_CLASS) + { + status = NV_ERR_NOT_SUPPORTED; + } + goto error; + } + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE) + { + Nv906fControl *pControlGPFifo = NULL; + subDeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + // USERD isn't mapped for us on Fermi by RmAllocChannel. + status = pRmApi->MapToCpu(pRmApi, + hClient /* hClient */, + (WATCHDOG_SUB_DEVICE_0_ID + subDeviceInstance) /* hDevice */, + bClientUserd ? WATCHDOG_USERD_PHYS_MEM_ID : + WATCHDOG_PUSHBUFFER_CHANNEL_ID /* hMemory */, + 0 /* offset */, + ctrlSize /* length */, + (void **)&pControlGPFifo, + 0 /* flags */); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to create a watchdog GPFIFO mapping\n"); + SLI_LOOP_GOTO(error); + } + + pKernelRc->watchdogChannelInfo.pControlGPFifo[subDeviceInstance] = + pControlGPFifo; + + pKernelRc->watchdog.notifiers[subDeviceInstance] =(NvNotification *)( + pKernelRc->watchdogChannelInfo.pCpuAddr + + WATCHDOG_NOTIFIER_OFFSET(pKernelRc->watchdogChannelInfo.pbBytes, + subDeviceInstance /* gpuIndex */, + 0 /* notifier */)); + } + SLI_LOOP_END + + pKernelRc->watchdog.errorContext = (NvNotification *)( + pKernelRc->watchdogChannelInfo.pCpuAddr + + WATCHDOG_ERROR_NOTIFIER_OFFSET(pKernelRc->watchdogChannelInfo.pbBytes)); + + pKernelRc->watchdog.notifierToken = (NvNotification *)( + pKernelRc->watchdogChannelInfo.pCpuAddr + + WATCHDOG_WORK_SUBMIT_TOKEN_OFFSET( + pKernelRc->watchdogChannelInfo.pbBytes)); + + // Create an object that will require a trip through the graphics engine + status = pRmApi->AllocWithHandle(pRmApi, + hClient /* hClient */, + WATCHDOG_PUSHBUFFER_CHANNEL_ID /* hParent */, + WATCHDOG_GROBJ_ID /* hObject */, + grObj, + NULL); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "Unable to allocate class %x\n", grObj); + goto error; + } + + // + // Determine the (class + engine) handle the hardware will understand, if + // necessary + // + if (CliGetKernelChannelWithDevice(hClient, + WATCHDOG_DEVICE_ID, + WATCHDOG_PUSHBUFFER_CHANNEL_ID, + &pKernelChannel) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "CliGetKernelChannelWithDevice failed\n"); + status = NV_ERR_INVALID_CHANNEL; + goto error; + } + + NV_ASSERT_OR_ELSE(pKernelChannel != NULL, status = NV_ERR_INVALID_CHANNEL; + goto error); + + { + NvU32 classID; + NvU32 engineID; + + status = kchannelGetClassEngineID_HAL(pGpu, pKernelChannel, + WATCHDOG_GROBJ_ID, + &pKernelRc->watchdogChannelInfo.classEngineID, + &classID, + &engineID); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to get class engine ID %x\n", + grObj); + goto error; + } + } + + pKernelRc->watchdog.hClient = hClient; + pKernelRc->watchdog.runlistId = kchannelGetRunlistId(pKernelChannel); + + // Schedule the watchdog channel for execution. + { + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS nvA06fScheduleParams; + + portMemSet(&nvA06fScheduleParams, 0, sizeof nvA06fScheduleParams); + nvA06fScheduleParams.bEnable = NV_TRUE; + + status = pRmApi->Control(pRmApi, + pKernelRc->watchdog.hClient, + WATCHDOG_PUSHBUFFER_CHANNEL_ID, + NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, + &nvA06fScheduleParams, + sizeof nvA06fScheduleParams); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Unable to schedule watchdog channel\n"); + goto error; + } + } + + // Get the work submit token that watchdog can use while submitting work + { + NvU32 workSubmitToken; + status = kfifoRmctrlGetWorkSubmitToken_HAL(GPU_GET_KERNEL_FIFO(pGpu), + hClient, + WATCHDOG_PUSHBUFFER_CHANNEL_ID, + &workSubmitToken); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to get work submit token for watchdog\n"); + goto error; + } + } + + krcWatchdogInitPushbuffer_HAL(pGpu, pKernelRc); + + pKernelRc->watchdog.flags |= WATCHDOG_FLAGS_INITIALIZED; + + // Hook into the 1 Hz OS timer + osSchedule1SecondCallback(pGpu, + krcWatchdogTimerProc, + NULL /* pData */, + NV_OS_1HZ_REPEAT); + + // Schedule next interval to run immediately + pKernelRc->watchdogPersistent.nextRunTime = 0; + +error: + NV_ASSERT(status == NV_OK); + if (status != NV_OK) + { + pRmApi->Free(pRmApi, hClient, hClient); + } + + portMemFree(pParams); + return status; +} + + +void +krcWatchdogInitPushbuffer_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + NvU32 *ptr, *ptrbase, *ptrbase1; + NvU32 pbOffset; + + // + // Set up the pushbuffer. + // Create two seperate pushbuffer segments + // First - Set object on graphics class + // Second - Notifier, setref + // Create GPFIFO + // Point to setobject pushbuffer, gp_put++ + // Then keep on pointing gp_entry to second pushbuffer segment everytime we + // need a notifier + // + pbOffset = WATCHDOG_PUSHBUFFER_OFFSET( + pKernelRc->watchdogChannelInfo.pbBytes, + 0); + ptrbase = ptr = (NvU32 *)(pKernelRc->watchdogChannelInfo.pCpuAddr + + pbOffset); + + if (IsSLIEnabled(pGpu)) + { + PUSH_DATA( + DRF_DEF(906F, _DMA, _SEC_OP, _GRP0_USE_TERT) | + DRF_DEF(906F, _DMA, _TERT_OP, _GRP0_SET_SUB_DEV_MASK) | + DRF_NUM(906F, _DMA, _SET_SUBDEVICE_MASK_VALUE, SUBDEVICE_MASK_ALL)); + } + + // Set up object in first pushbuffer + PUSH_PAIR(pKernelRc->watchdogChannelInfo.class2dSubch, + NV902D_SET_OBJECT, + pKernelRc->watchdogChannelInfo.classEngineID); + + // + // Construct GPFIFO entries + // Pushbuffer 0 + // + { + NvU64 get = pKernelRc->watchdogChannelInfo.pGpuAddr + pbOffset; + NvU32 length = (NvU8 *)ptr - (NvU8 *)ptrbase; + + pKernelRc->watchdogChannelInfo.gpEntry0[0] = + DRF_DEF(906F, _GP_ENTRY0, _NO_CONTEXT_SWITCH, _FALSE) | + DRF_NUM(906F, _GP_ENTRY0, _GET, NvU64_LO32(get) >> 2); + + pKernelRc->watchdogChannelInfo.gpEntry0[1] = + DRF_NUM(906F, _GP_ENTRY1, _GET_HI, NvU64_HI32(get)) | + DRF_NUM(906F, _GP_ENTRY1, _LENGTH, length >> 2) | + DRF_DEF(906F, _GP_ENTRY1, _PRIV, _USER) | + DRF_DEF(906F, _GP_ENTRY1, _LEVEL, _MAIN); + } + + // Set up notifiers in second pushbuffer + pbOffset = WATCHDOG_PUSHBUFFER_OFFSET( + pKernelRc->watchdogChannelInfo.pbBytes, + 1); + ptrbase1 = ptr = (NvU32 *)(pKernelRc->watchdogChannelInfo.pCpuAddr + + pbOffset); + + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE); + { + NvU64 offset; + if (IsSLIEnabled(pGpu)) + { + PUSH_DATA(DRF_DEF(906F, _DMA, _SEC_OP, _GRP0_USE_TERT) | + DRF_DEF(906F, _DMA, _TERT_OP, _GRP0_SET_SUB_DEV_MASK) | + DRF_NUM(906F, _DMA, _SET_SUBDEVICE_MASK_VALUE, + NVBIT(gpumgrGetSubDeviceInstanceFromGpu(pGpu)))); + } + + offset = (pKernelRc->watchdogChannelInfo.pGpuAddr + + WATCHDOG_NOTIFIER_OFFSET( + pKernelRc->watchdogChannelInfo.pbBytes, + gpumgrGetSubDeviceInstanceFromGpu(pGpu), + 0)); + + PUSH_PAIR(pKernelRc->watchdogChannelInfo.class2dSubch, + NV902D_SET_NOTIFY_A, + DRF_NUM(902D, _SET_NOTIFY_A, _ADDRESS_UPPER, NvU64_HI32(offset))); + PUSH_PAIR(pKernelRc->watchdogChannelInfo.class2dSubch, + NV902D_SET_NOTIFY_B, + DRF_NUM(902D, _SET_NOTIFY_B, _ADDRESS_LOWER, NvU64_LO32(offset))); + } + SLI_LOOP_END; + + if (IsSLIEnabled(pGpu)) + { + PUSH_DATA( + DRF_DEF(906F, _DMA, _SEC_OP, _GRP0_USE_TERT) | + DRF_DEF(906F, _DMA, _TERT_OP, _GRP0_SET_SUB_DEV_MASK) | + DRF_NUM(906F, _DMA, _SET_SUBDEVICE_MASK_VALUE, SUBDEVICE_MASK_ALL)); + } + + // Notifiers + PUSH_PAIR(pKernelRc->watchdogChannelInfo.class2dSubch, + NV902D_NOTIFY, NV902D_NOTIFY_TYPE_WRITE_ONLY); + PUSH_PAIR(pKernelRc->watchdogChannelInfo.class2dSubch, + NV902D_NO_OPERATION, 0x0); + PUSH_PAIR(pKernelRc->watchdogChannelInfo.class2dSubch, + NV906F_SET_REFERENCE, 0x0); + + // Pushbuffer 1 + { + NvU64 get = pKernelRc->watchdogChannelInfo.pGpuAddr + pbOffset; + NvU32 length = (NvU8 *)ptr - (NvU8 *)ptrbase1; + + pKernelRc->watchdogChannelInfo.gpEntry1[0] = + DRF_DEF(906F, _GP_ENTRY0, _NO_CONTEXT_SWITCH, _FALSE) | + DRF_NUM(906F, _GP_ENTRY0, _GET, NvU64_LO32(get) >> 2); + + pKernelRc->watchdogChannelInfo.gpEntry1[1] = + DRF_NUM(906F, _GP_ENTRY1, _GET_HI, NvU64_HI32(get)) | + DRF_NUM(906F, _GP_ENTRY1, _LENGTH, length >> 2) | + DRF_DEF(906F, _GP_ENTRY1, _PRIV, _USER) | + DRF_DEF(906F, _GP_ENTRY1, _LEVEL, _MAIN) | + DRF_DEF(906F, _GP_ENTRY1, _SYNC, _WAIT); + } + + // Write a new entry to the GPFIFO (pushbuffer 0) + { + NvU32 *pGpEntry = (NvU32 *)( + pKernelRc->watchdogChannelInfo.pCpuAddr + + WATCHDOG_GPFIFO_OFFSET(pKernelRc->watchdogChannelInfo.pbBytes)); + MEM_WR32(&pGpEntry[0], pKernelRc->watchdogChannelInfo.gpEntry0[0]); + MEM_WR32(&pGpEntry[1], pKernelRc->watchdogChannelInfo.gpEntry0[1]); + } + + // Flush the WRC buffer using fence operation before updating gp_put + osFlushCpuWriteCombineBuffer(); + + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE); + { + NvU32 subdeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + MEM_WR32( + &pKernelRc->watchdogChannelInfo.pControlGPFifo[subdeviceId]->GPPut, + 1); + pKernelRc->watchdog.notifiers[subdeviceId]->status = 0; + } + SLI_LOOP_END; + + // + // Flush the WRC buffer using fence operation before updating the usermode + // channel ID register + // + osFlushCpuWriteCombineBuffer(); + + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE); + { + kfifoUpdateUsermodeDoorbell_HAL(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + pKernelRc->watchdog.notifierToken->info32, + pKernelRc->watchdog.runlistId); + } + SLI_LOOP_END; + + krcWatchdogWriteNotifierToGpfifo(pGpu, pKernelRc); +} + + +void +krcWatchdogWriteNotifierToGpfifo_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + NvU32 GPPut; + + // Write a second entry to the GPFIFO (notifier) + { + NvU32 subdeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + NvU32 *pGpEntry; + + GPPut = MEM_RD32( + &pKernelRc->watchdogChannelInfo.pControlGPFifo[subdeviceId]->GPPut); + + if (GPPut >= WATCHDOG_GPFIFO_ENTRIES) + { + NV_ASSERT(GPPut < WATCHDOG_GPFIFO_ENTRIES); + return; + } + + pGpEntry = (NvU32 *)( + pKernelRc->watchdogChannelInfo.pCpuAddr + + WATCHDOG_GPFIFO_OFFSET(pKernelRc->watchdogChannelInfo.pbBytes) + + (GPPut * NV906F_GP_ENTRY__SIZE)); + MEM_WR32(&pGpEntry[0], pKernelRc->watchdogChannelInfo.gpEntry1[0]); + MEM_WR32(&pGpEntry[1], pKernelRc->watchdogChannelInfo.gpEntry1[1]); + } + + // + // Flush the WRC buffer using fence operation before updating the usermode + // channel ID register + // + osFlushCpuWriteCombineBuffer(); + + // + // Write out incremented GPPut (we need > 2 GP fifo entries as one entry + // must always be empty, as there is no extra state bit to distinguish + // between a full GPFIFO buffer and an empty GPFIFO buffer). + // + GPPut = (GPPut + 1) % WATCHDOG_GPFIFO_ENTRIES; + + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE); + { + NvU32 subdeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + MEM_WR32( + &pKernelRc->watchdogChannelInfo.pControlGPFifo[subdeviceId]->GPPut, + GPPut); + } + SLI_LOOP_END; + + // + // Flush the WRC buffer using fence operation before updating the usermode + // channel ID register + // + osFlushCpuWriteCombineBuffer(); + + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE); + { + kfifoUpdateUsermodeDoorbell_HAL(pGpu, GPU_GET_KERNEL_FIFO(pGpu), + pKernelRc->watchdog.notifierToken->info32, + pKernelRc->watchdog.runlistId); + } + SLI_LOOP_END; +} + + diff --git a/src/nvidia/src/kernel/gpu/rc/kernel_rc_watchdog_callback.c b/src/nvidia/src/kernel/gpu/rc/kernel_rc_watchdog_callback.c new file mode 100644 index 000000000..98e2fb328 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/rc/kernel_rc_watchdog_callback.c @@ -0,0 +1,353 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "kernel/gpu/rc/kernel_rc.h" + +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/gpu.h" +#include "kernel/gpu_mgr/gpu_mgr.h" + +#include "ctrl/ctrl906f.h" + + +// Seconds before watchdog tries to reset itself +#define WATCHDOG_RESET_SECONDS 4 + + +// +// Thwapping simply invokes error recovery on the given channel, faking a parse +// error. Error recovery isolates the channel, so ignoring the error is not an +// option for clients. +// +static void +_krcThwapChannel +( + OBJGPU *pGpu, + KernelRc *pKernelRc, + NvU32 chid +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + KernelChannel *pKernelChannel = kfifoChidMgrGetKernelChannel( + pGpu, pKernelFifo, + kfifoGetChidMgr(pGpu, pKernelFifo, pKernelRc->watchdog.runlistId), + chid); + + if (pKernelChannel == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to thwap channel 0x%02x, it's not in use\n", chid); + return; + } + + NV_PRINTF(LEVEL_INFO, "Thwapping channel 0x%02x.\n", + kchannelGetDebugTag(pKernelChannel)); + + + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV906F_CTRL_CMD_RESET_CHANNEL_PARAMS params = {0}; + + params.resetReason = + NV906F_CTRL_CMD_INTERNAL_RESET_CHANNEL_REASON_FAKE_ERROR; + params.subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + params.engineID = NV2080_ENGINE_TYPE_NULL; + + pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel), + NV906F_CTRL_CMD_RESET_CHANNEL, + ¶ms, + sizeof params); + } +} + + +// +// Tests error recovery by causing channel errors of varying severity. Only +// affects channels in the ThwapChannelMask and StompChannelMask bitmasks, which +// are normally zero. +// +static void +_krcTestChannelRecovery +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + NvU32 chid; + + for (chid = 0; chid < 32; chid++) + { + if (pKernelRc->watchdog.thwapChannelMask & (1 << chid)) + { + + _krcThwapChannel(pGpu, pKernelRc, chid); + + // Unless this channel is marked for repeat, clear its thwap bit. + if (0 == (pKernelRc->watchdog.thwapRepeatMask & (1 << chid))) + { + pKernelRc->watchdog.thwapChannelMask &= ~(1 << chid); + } + } + if (pKernelRc->watchdog.stompChannelMask & (1 << chid)) + { + // Unless this channel is marked for repeat, clear its stomp bit. + if (0 == (pKernelRc->watchdog.stompRepeatMask & (1 << chid))) + { + pKernelRc->watchdog.stompChannelMask &= ~(1 << chid); + } + } + } +} + + +void krcWatchdogTimerProc +( + OBJGPU *pGpu, + void *data +) +{ + // + // These calls shouldn't occur during a hibernate/standby enter or resume + // sequence or if the GPU is lost, which will cause a system hang. + // + if (gpuIsGpuFullPower(pGpu) && + !pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_LOST)) + { + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + + krcWatchdog_HAL(pGpu, pKernelRc); + krcWatchdogCallbackVblankRecovery_HAL(pGpu, pKernelRc); + krcWatchdogCallbackPerf_HAL(pGpu, pKernelRc); + } +} + + +// +// Watchdog callback routine. Monitors GPU and attempts to recover from lockups, +// etc. +// +void +krcWatchdog_IMPL +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + NvU32 usec, sec; + NvU64 currentTime; + NvBool allNotifiersWritten = NV_TRUE; + NV_STATUS rmStatus; + + // Do nothing if robust channels are not enabled + if (!pKernelRc->bRobustChannelsEnabled) + return; + + // + // If the device has been reset then we can skip this sometimes after a + // reset we can have a reenable make sure that this is a one time event + // + if (pKernelRc->watchdog.deviceResetRd != pKernelRc->watchdog.deviceResetWr) + { + if (pKernelRc->watchdog.deviceReset[pKernelRc->watchdog.deviceResetRd]) + pKernelRc->watchdog.flags |= WATCHDOG_FLAGS_DISABLED; + + pKernelRc->watchdog.deviceResetRd = ( + (pKernelRc->watchdog.deviceResetRd + 1) & + (WATCHDOG_RESET_QUEUE_SIZE - 1)); + } + + // If we are disabled or chip is in low power mode + if ((WATCHDOG_FLAGS_DISABLED != + (pKernelRc->watchdog.flags & WATCHDOG_FLAGS_DISABLED)) && + gpuIsGpuFullPower(pGpu)) + { + // + // Make sure we're initialized. If not, initialize and wait for the + // next watchdog call. + // + if (!(pKernelRc->watchdog.flags & WATCHDOG_FLAGS_INITIALIZED)) + { + rmStatus = krcWatchdogInit_HAL(pGpu, pKernelRc); + + if (rmStatus!= NV_OK) + { + NV_PRINTF(LEVEL_INFO, "krcWatchdogInit failed: %d\n", rmStatus); + } + return; + } + + // Count the number of invocations of the callback. + pKernelRc->watchdog.count++; + + // Check if, for some reason, the watchdog triggered an error + if ((pKernelRc->watchdog.errorContext->status & 0xFFFF) != 0) + { + NV_PRINTF(LEVEL_WARNING, + "RC watchdog: error on our channel (reinitializing).\n"); + + // reset allows enough time to restart + pKernelRc->watchdog.errorContext->status = 0; + + // reinit the pushbuffer image and kickoff again + krcWatchdogInitPushbuffer_HAL(pGpu, pKernelRc); + + // Run Immediately + pKernelRc->watchdogPersistent.nextRunTime = 0; + } + + // Handle robust channel testing, if necessary. + if (pKernelRc->watchdog.channelTestCountdown != 0) + { + pKernelRc->watchdog.channelTestCountdown--; + if (pKernelRc->watchdog.channelTestCountdown == 0) + { + if ((pKernelRc->watchdog.thwapChannelMask != 0) || + (pKernelRc->watchdog.stompChannelMask != 0)) + { + _krcTestChannelRecovery(pGpu, pKernelRc); + } + pKernelRc->watchdog.channelTestCountdown = + pKernelRc->watchdog.channelTestInterval; + } + } + + osGetCurrentTime(&sec, &usec); + currentTime = (((NvU64)sec) * 1000000) + usec; + + // + // See if all GPUs got around to delivering their notifiers. If so, + // they will have set the notifier statuses to 0. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE); + { + NvU32 subdeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + if (pKernelRc->watchdog.notifiers[subdeviceId]->status != 0) + allNotifiersWritten = NV_FALSE; + } + SLI_LOOP_END; + + if (!allNotifiersWritten) + { + if (currentTime >= pKernelRc->watchdogPersistent.notifyLimitTime) + { + // + // If the card hasn't gotten around to us for many seconds, + // something is wrong. + // + NV_PRINTF(LEVEL_ERROR, + "RC watchdog: GPU is probably locked! Notify Timeout Seconds: %d\n", + pKernelRc->watchdogPersistent.timeoutSecs); + + // Disable the watchdog for now, and drop the critical section. + pKernelRc->watchdog.flags |= WATCHDOG_FLAGS_DISABLED; + + // + // Attempt to clean up the mess. + // This should probably be in an SLI loop when a GSP client + // + krcWatchdogRecovery_HAL(pGpu, pKernelRc); + + // Re-enable. + pKernelRc->watchdog.flags &= ~WATCHDOG_FLAGS_DISABLED; + + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE); + { + NvU32 subdeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pKernelRc->watchdog.notifiers[subdeviceId]->status = 0; + } + SLI_LOOP_END; + + pKernelRc->watchdogPersistent.nextRunTime = 0; + + NV_PRINTF(LEVEL_WARNING, "RC watchdog: Trying to recover.\n"); + + return; + } + else if (currentTime >= + pKernelRc->watchdogPersistent.resetLimitTime) + { + // + // It's entirely possible that the card is extremely busy and + // hasn't gotten around to the watchdog channel yet. This can + // happen during power events and mode sets. Allow for a few + // failures before we panic. + // + NV_PRINTF(LEVEL_WARNING, + "RC watchdog: GPU is possibly locked. Attempting to restart watchdog.\n"); + + // + // Fall through and attempt to update the watchdog's position. + // This may result in a double notify exception, but that's not + // fatal. + // + } + else + { + return; + } + } + + if (currentTime >= pKernelRc->watchdogPersistent.nextRunTime) + { + // Stored as microseconds (1000000 of a second) + pKernelRc->watchdogPersistent.nextRunTime = currentTime + + (pKernelRc->watchdogPersistent.intervalSecs * 1000000); + pKernelRc->watchdogPersistent.notifyLimitTime = currentTime + + (pKernelRc->watchdogPersistent.timeoutSecs * 1000000); + pKernelRc->watchdogPersistent.resetLimitTime = currentTime + + (WATCHDOG_RESET_SECONDS * 1000000); + + // Reset the status to a known value. + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE); + { + NvU32 subdeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pKernelRc->watchdog.notifiers[subdeviceId]->status = 0xFFFF; + } + SLI_LOOP_END; + + // Set the put pointer on our buffer. + krcWatchdogWriteNotifierToGpfifo(pGpu, pKernelRc); + } + } +} + + +void +krcWatchdogRecovery_KERNEL +( + OBJGPU *pGpu, + KernelRc *pKernelRc +) +{ + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_RC_WATCHDOG_TIMEOUT, + NULL, + 0); +} diff --git a/src/nvidia/src/kernel/gpu/rc/kernel_rc_watchdog_ctrl.c b/src/nvidia/src/kernel/gpu/rc/kernel_rc_watchdog_ctrl.c new file mode 100644 index 000000000..76035ab47 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/rc/kernel_rc_watchdog_ctrl.c @@ -0,0 +1,133 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/rc/kernel_rc.h" +#include "kernel/gpu/subdevice/subdevice.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + + +NV_STATUS +subdeviceCtrlCmdRcGetWatchdogInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_RC_GET_WATCHDOG_INFO_PARAMS *pWatchdogInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + + portMemSet(pWatchdogInfoParams, 0, sizeof *pWatchdogInfoParams); + + if (pKernelRc->watchdog.flags & WATCHDOG_FLAGS_INITIALIZED) + { + pWatchdogInfoParams->watchdogStatusFlags |= + NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_INITIALIZED; + } + + if (pKernelRc->watchdog.flags & WATCHDOG_FLAGS_DISABLED) + { + pWatchdogInfoParams->watchdogStatusFlags |= + NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_DISABLED; + } + + if ((pKernelRc->watchdog.flags & WATCHDOG_FLAGS_INITIALIZED) && + !(pKernelRc->watchdog.flags & WATCHDOG_FLAGS_DISABLED) && + gpuIsGpuFullPower(pGpu)) + { + pWatchdogInfoParams->watchdogStatusFlags |= + NV2080_CTRL_RC_GET_WATCHDOG_INFO_FLAGS_RUNNING; + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdRcDisableWatchdog_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + // Watchdog not supported while SMC is active + NV_CHECK_OR_RETURN(LEVEL_INFO, !IS_MIG_ENABLED(pGpu), NV_ERR_NOT_SUPPORTED); + + return krcWatchdogChangeState(GPU_GET_KERNEL_RC(pGpu), + pSubdevice, + RMAPI_DISABLE_REQUEST); +} + +NV_STATUS +subdeviceCtrlCmdRcSoftDisableWatchdog_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + // Watchdog not supported while SMC is active + NV_CHECK_OR_RETURN(LEVEL_INFO, !IS_MIG_ENABLED(pGpu), NV_ERR_NOT_SUPPORTED); + + return krcWatchdogChangeState(GPU_GET_KERNEL_RC(pGpu), + pSubdevice, + RMAPI_SOFT_DISABLE_REQUEST); +} + +NV_STATUS +subdeviceCtrlCmdRcEnableWatchdog_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + // Watchdog not supported while SMC is active + NV_CHECK_OR_RETURN(LEVEL_INFO, !IS_MIG_ENABLED(pGpu), NV_ERR_NOT_SUPPORTED); + + return krcWatchdogChangeState(GPU_GET_KERNEL_RC(pGpu), + pSubdevice, + RMAPI_ENABLE_REQUEST); +} + +NV_STATUS +subdeviceCtrlCmdRcReleaseWatchdogRequests_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + // Watchdog not supported while SMC is active + NV_CHECK_OR_RETURN(LEVEL_INFO, !IS_MIG_ENABLED(pGpu), NV_ERR_NOT_SUPPORTED); + + return krcWatchdogChangeState(GPU_GET_KERNEL_RC(pGpu), + pSubdevice, + RMAPI_RELEASE_ALL_REQUESTS); +} + + +NV_STATUS subdeviceCtrlCmdInternalRcWatchdogTimeout_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + krcWatchdogRecovery_HAL(pGpu, GPU_GET_KERNEL_RC(pGpu)); + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/sec2/arch/ampere/kernel_sec2_ga100.c b/src/nvidia/src/kernel/gpu/sec2/arch/ampere/kernel_sec2_ga100.c new file mode 100644 index 000000000..ef346be1b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/sec2/arch/ampere/kernel_sec2_ga100.c @@ -0,0 +1,93 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/sec2/kernel_sec2.h" + +#include "core/core.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gpu.h" +#include "os/nv_memory_type.h" + +#include "published/ampere/ga100/dev_fuse.h" +#include "published/ampere/ga100/dev_sec_pri.h" +#include "published/ampere/ga100/dev_sec_addendum.h" + +void +ksec2ConfigureFalcon_GA100 +( + OBJGPU *pGpu, + KernelSec2 *pKernelSec2 +) +{ + KernelFalconEngineConfig falconConfig; + + portMemSet(&falconConfig, 0, sizeof(falconConfig)); + + falconConfig.registerBase = DRF_BASE(NV_PSEC); + falconConfig.riscvRegisterBase = 0; // RISC-V unused or unsupported + falconConfig.fbifBase = NV_PSEC_FBIF_BASE; + falconConfig.bBootFromHs = NV_FALSE; + falconConfig.pmcEnableMask = 0; + falconConfig.bIsPmcDeviceEngine = NV_FALSE; + falconConfig.physEngDesc = ENG_SEC2; + falconConfig.ctxAttr = NV_MEMORY_UNCACHED; + falconConfig.ctxBufferSize = FLCN_CTX_ENG_BUFFER_SIZE_HW << 4; + falconConfig.addrSpaceList = memdescAddrSpaceListToU32(ADDRLIST_FBMEM_PREFERRED); + + kflcnConfigureEngine(pGpu, staticCast(pKernelSec2, KernelFalcon), &falconConfig); +} + +/*! + * Returns the SEC2 fuse version of the provided ucode id (1-indexed) + * + * @param pGpu OBJGPU pointer + * @param pKernelSec2 KernelSec2 pointer + * @param[in] ucodeId Ucode Id (1-indexed) to read fuse for + */ +NvU32 +ksec2ReadUcodeFuseVersion_GA100 +( + OBJGPU *pGpu, + KernelSec2 *pKernelSec2, + const NvU32 ucodeId +) +{ + NvU32 fuseVal = 0; + NvU32 index = ucodeId - 1; // adjust to 0-indexed + + // TODO: Bug 3519329: switch to indexed register once available + // if (index < NV_FUSE_OPT_FPF_SEC2_UCODE_VERSION__SIZE_1) + if (index < 16) + { + // fuseVal = GPU_REG_IDX_RD_DRF(pGpu, _FUSE, _OPT_FPF_SEC2_UCODE_VERSION, index, _DATA); + fuseVal = GPU_REG_RD32(pGpu, NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION + (4 * index)); + + if (fuseVal) + { + HIGHESTBITIDX_32(fuseVal); + fuseVal = fuseVal + 1; + } + } + + return fuseVal; +} diff --git a/src/nvidia/src/kernel/gpu/sec2/arch/ampere/kernel_sec2_ga102.c b/src/nvidia/src/kernel/gpu/sec2/arch/ampere/kernel_sec2_ga102.c new file mode 100644 index 000000000..1a3268187 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/sec2/arch/ampere/kernel_sec2_ga102.c @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/sec2/kernel_sec2.h" + +#include "core/core.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gpu.h" +#include "os/nv_memory_type.h" + +#include "published/ampere/ga102/dev_falcon_second_pri.h" +#include "published/ampere/ga102/dev_sec_pri.h" +#include "published/ampere/ga102/dev_sec_addendum.h" + +void +ksec2ConfigureFalcon_GA102 +( + OBJGPU *pGpu, + KernelSec2 *pKernelSec2 +) +{ + KernelFalconEngineConfig falconConfig; + + portMemSet(&falconConfig, 0, sizeof(falconConfig)); + + falconConfig.registerBase = DRF_BASE(NV_PSEC); + falconConfig.riscvRegisterBase = NV_FALCON2_SEC_BASE; + falconConfig.fbifBase = NV_PSEC_FBIF_BASE; + falconConfig.bBootFromHs = NV_TRUE; + falconConfig.pmcEnableMask = 0; + falconConfig.bIsPmcDeviceEngine = NV_FALSE; + falconConfig.physEngDesc = ENG_SEC2; + falconConfig.ctxAttr = NV_MEMORY_UNCACHED; + falconConfig.ctxBufferSize = FLCN_CTX_ENG_BUFFER_SIZE_HW << 4; + falconConfig.addrSpaceList = memdescAddrSpaceListToU32(ADDRLIST_FBMEM_PREFERRED); + + kflcnConfigureEngine(pGpu, staticCast(pKernelSec2, KernelFalcon), &falconConfig); +} diff --git a/src/nvidia/src/kernel/gpu/sec2/arch/turing/kernel_sec2_tu102.c b/src/nvidia/src/kernel/gpu/sec2/arch/turing/kernel_sec2_tu102.c new file mode 100644 index 000000000..286c125ed --- /dev/null +++ b/src/nvidia/src/kernel/gpu/sec2/arch/turing/kernel_sec2_tu102.c @@ -0,0 +1,200 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/sec2/kernel_sec2.h" + +#include "core/bin_data.h" +#include "core/core.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gpu.h" +#include "os/nv_memory_type.h" + +#include "published/turing/tu102/dev_sec_pri.h" +#include "published/turing/tu102/dev_sec_addendum.h" + +#include "rmflcnbl.h" + +void +ksec2ConfigureFalcon_TU102 +( + OBJGPU *pGpu, + KernelSec2 *pKernelSec2 +) +{ + KernelFalconEngineConfig falconConfig; + + portMemSet(&falconConfig, 0, sizeof(falconConfig)); + + falconConfig.registerBase = DRF_BASE(NV_PSEC); + falconConfig.riscvRegisterBase = 0; // RISC-V unused or unsupported + falconConfig.fbifBase = NV_PSEC_FBIF_BASE; + falconConfig.bBootFromHs = NV_FALSE; + falconConfig.pmcEnableMask = 0; + falconConfig.bIsPmcDeviceEngine = NV_FALSE; + falconConfig.physEngDesc = ENG_SEC2; + falconConfig.ctxAttr = NV_MEMORY_UNCACHED; + falconConfig.ctxBufferSize = FLCN_CTX_ENG_BUFFER_SIZE_HW << 4; + falconConfig.addrSpaceList = memdescAddrSpaceListToU32(ADDRLIST_FBMEM_PREFERRED); + + kflcnConfigureEngine(pGpu, staticCast(pKernelSec2, KernelFalcon), &falconConfig); +} + +NV_STATUS +ksec2ResetHw_TU102 +( + OBJGPU *pGpu, + KernelSec2 *pKernelSec2 +) +{ + GPU_FLD_WR_DRF_DEF(pGpu, _PSEC, _FALCON_ENGINE, _RESET, _TRUE); + GPU_FLD_WR_DRF_DEF(pGpu, _PSEC, _FALCON_ENGINE, _RESET, _FALSE); + + return NV_OK; +} + +NvBool +ksec2IsEngineInReset_TU102 +( + OBJGPU *pGpu, + KernelSec2 *pKernelSec2 +) +{ + NvU32 val = GPU_REG_RD32(pGpu, NV_PSEC_FALCON_ENGINE); + + return FLD_TEST_DRF(_PSEC_FALCON, _ENGINE, _RESET, _TRUE, val); +} + +static NV_STATUS +s_allocateGenericBlUcode +( + OBJGPU *pGpu, + KernelSec2 *pKernelSec2, + const RM_FLCN_BL_DESC **ppDesc, + const NvU8 **ppImg +) +{ + NV_STATUS status = NV_OK; + + const BINDATA_ARCHIVE *pBinArchive; + const BINDATA_STORAGE *pBinDesc; + const BINDATA_STORAGE *pBinImg; + NvLength descSizeAligned; + NvLength imgSizeAligned; + + RM_FLCN_BL_DESC *pGenericBlUcodeDesc = NULL; + NvU8 *pGenericBlUcodeImg = NULL; + + NV_ASSERT_OR_RETURN(ppDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(ppImg != NULL, NV_ERR_INVALID_ARGUMENT); + + pBinArchive = ksec2GetBinArchiveBlUcode_HAL(pGpu, pKernelSec2); + NV_ASSERT_OR_GOTO(pBinArchive != NULL, out); + + // allocate desc + pBinDesc = bindataArchiveGetStorage(pBinArchive, "ucode_desc"); + NV_ASSERT_OR_GOTO(pBinDesc != NULL, out); + + descSizeAligned = RM_ALIGN_UP(bindataGetBufferSize(pBinDesc), FLCN_BLK_ALIGNMENT); + pGenericBlUcodeDesc = portMemAllocNonPaged(descSizeAligned); + if (pGenericBlUcodeDesc == NULL) + { + status = NV_ERR_NO_MEMORY; + goto out; + } + + NV_ASSERT_OK_OR_GOTO(status, + bindataWriteToBuffer(pBinDesc, (NvU8 *) pGenericBlUcodeDesc, descSizeAligned), out); + + // allocate img + pBinImg = bindataArchiveGetStorage(pBinArchive, "ucode_image"); + imgSizeAligned = RM_ALIGN_UP(bindataGetBufferSize(pBinImg), FLCN_BLK_ALIGNMENT); + + if (pGenericBlUcodeDesc->blImgHeader.blCodeSize > imgSizeAligned) + { + status = NV_ERR_INVALID_DATA; + goto out; + } + + pGenericBlUcodeImg = portMemAllocNonPaged(imgSizeAligned); + if (pGenericBlUcodeImg == NULL) + { + status = NV_ERR_NO_MEMORY; + goto out; + } + + NV_ASSERT_OK_OR_GOTO(status, + bindataWriteToBuffer(pBinImg, pGenericBlUcodeImg, imgSizeAligned), out); + + *ppDesc = pGenericBlUcodeDesc; + *ppImg = pGenericBlUcodeImg; + + return status; + +out: + portMemFree(pGenericBlUcodeDesc); + portMemFree(pGenericBlUcodeImg); + return status; +} + +/*! + * Get the generic falcon bootloader ucode descriptor and image + * + * Note: this bootloader works for both SEC2 and GSP + * (though it is defined only on KernelSec2) + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pKernelSec2 KernelSec2 pointer + * @param[out] ppDesc pointer to ucode descriptor + * @param[out] ppImg pointer to ucode image + */ +NV_STATUS +ksec2GetGenericBlUcode_TU102 +( + OBJGPU *pGpu, + KernelSec2 *pKernelSec2, + const RM_FLCN_BL_DESC **ppDesc, + const NvU8 **ppImg +) +{ + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(ppDesc != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(ppImg != NULL, NV_ERR_INVALID_ARGUMENT); + + if (pKernelSec2->pGenericBlUcodeDesc == NULL) + { + NV_ASSERT_OR_RETURN(pKernelSec2->pGenericBlUcodeImg == NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OK_OR_RETURN( + s_allocateGenericBlUcode(pGpu, pKernelSec2, + &pKernelSec2->pGenericBlUcodeDesc, + &pKernelSec2->pGenericBlUcodeImg)); + } + + NV_ASSERT_OR_RETURN(pKernelSec2->pGenericBlUcodeDesc != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelSec2->pGenericBlUcodeImg != NULL, NV_ERR_INVALID_STATE); + + *ppDesc = pKernelSec2->pGenericBlUcodeDesc; + *ppImg = pKernelSec2->pGenericBlUcodeImg; + + return status; +} diff --git a/src/nvidia/src/kernel/gpu/sec2/kernel_sec2.c b/src/nvidia/src/kernel/gpu/sec2/kernel_sec2.c new file mode 100644 index 000000000..75d43c4bf --- /dev/null +++ b/src/nvidia/src/kernel/gpu/sec2/kernel_sec2.c @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gpu/sec2/kernel_sec2.h" + +#include "core/core.h" +#include "gpu/eng_desc.h" +#include "gpu/falcon/kernel_falcon.h" +#include "gpu/gpu.h" + +NV_STATUS +ksec2ConstructEngine_IMPL +( + OBJGPU *pGpu, + KernelSec2 *pKernelSec2, + ENGDESCRIPTOR engDesc +) +{ + ksec2ConfigureFalcon_HAL(pGpu, pKernelSec2); + return NV_OK; +} + +void +ksec2Destruct_IMPL +( + KernelSec2 *pKernelSec2 +) +{ + portMemFree((void * /* const_cast */) pKernelSec2->pGenericBlUcodeDesc); + pKernelSec2->pGenericBlUcodeDesc = NULL; + + portMemFree((void * /* const_cast */) pKernelSec2->pGenericBlUcodeImg); + pKernelSec2->pGenericBlUcodeImg = NULL; +} diff --git a/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c b/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c new file mode 100644 index 000000000..733732e05 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/generic_engine.c @@ -0,0 +1,170 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/generic_engine.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/client.h" + +#include + + +NV_STATUS +genapiConstruct_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RS_ITERATOR it; + OBJGPU *pGpu = GPU_RES_GET_GPU(pGenericEngineApi); + + if (!gpuIsClassSupported(pGpu, pCallContext->pResourceRef->externalClassId)) + return NV_ERR_INVALID_CLASS; + + // + // We allow multiple instances of GenericEngineApi class, however, only want + // to allow a single instance of each external class id type. E.g.: + // GF100_SUBDEVICE_GRAPHICS is allowed alongside GF100_SUBDEVICE_FB. + // + it = clientRefIter(pCallContext->pClient, + pCallContext->pResourceRef->pParentRef, + classId(GenericEngineApi), RS_ITERATE_CHILDREN, NV_TRUE); + + while (clientRefIterNext(pCallContext->pClient, &it)) + { + if (it.pResourceRef->externalClassId == pCallContext->pResourceRef->externalClassId && + it.pResourceRef != pCallContext->pResourceRef) + { + return NV_ERR_STATE_IN_USE; + } + } + + return NV_OK; +} + +void +genapiDestruct_IMPL +( + GenericEngineApi *pGenericEngineApi +) +{ +} + +NV_STATUS +genapiControl_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + + return gpuresControl_IMPL(staticCast(pGenericEngineApi, GpuResource), + pCallContext, pParams); +} + +NV_STATUS +genapiMap_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu; + NvU32 engineOffset, regSize, regBase; + NvU32 protect; + NV_STATUS rmStatus; + + pGpu = GPU_RES_GET_GPU(pGenericEngineApi); + + // XXX The default should really be more restrictive + protect = NV_PROTECT_READ_WRITE; + + switch (RES_GET_EXT_CLASS_ID(pGenericEngineApi)) + { + case GF100_SUBDEVICE_MASTER: + { + regSize = sizeof(GF100MASTERMap); + regBase = NV_REG_BASE_MASTER; + protect = NV_PROTECT_READABLE; + break; + } + default: + return NV_ERR_INVALID_CLASS; + } + + // Get the offset to the engine registers + rmStatus = gpuGetRegBaseOffset_HAL(pGpu, regBase, &engineOffset); + if (rmStatus != NV_OK) + return rmStatus; + + // Round down to nearest 4k page + engineOffset &= ~(0x1000-1); + + // Check the caller is requesting more privilieges than we allow + if (pCpuMapping->pPrivate->protect & ~protect) + { + NV_PRINTF(LEVEL_ERROR, "%s%saccess not allowed on class 0x%x\n", + (pCpuMapping->pPrivate->protect & ~protect) & NV_PROTECT_READABLE ? "Read " : "", + (pCpuMapping->pPrivate->protect & ~protect) & NV_PROTECT_WRITEABLE ? "Write " : "", + RES_GET_EXT_CLASS_ID(pGenericEngineApi)); + + return NV_ERR_PROTECTION_FAULT; + } + + // Create mapping + rmStatus = rmapiMapGpuCommon(staticCast(pGenericEngineApi, RsResource), + pCallContext, + pCpuMapping, + pGpu, + engineOffset, + regSize); + pCpuMapping->processId = osGetCurrentProcess(); + + if (pParams->ppCpuVirtAddr) + *pParams->ppCpuVirtAddr = pCpuMapping->pLinearAddress; + + return rmStatus; +} + +NV_STATUS +genapiGetMapAddrSpace_IMPL +( + GenericEngineApi *pGenericEngineApi, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + if (pAddrSpace) + *pAddrSpace = ADDR_REGMEM; + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice.c new file mode 100644 index 000000000..7ba1c8b94 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice.c @@ -0,0 +1,636 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** +* +* Description: +* This is a subdevice resource implementation. +* +******************************************************************************/ + +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" + +#include "vgpu/rpc.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" +#include "core/thread_state.h" +#include "kernel/gpu/fifo/kernel_fifo.h" + +#include "objtmr.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "Nvcm.h" +#include "gpu/bus/p2p_api.h" + +NV_STATUS +subdeviceConstruct_IMPL +( + Subdevice *pSubdevice, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV2080_ALLOC_PARAMETERS *pNv2080AllocParams = pParams->pAllocParams; + OBJGPU *pPrimaryGpu; + OBJGPU *pGpu; + NvU32 subDeviceInst; + NV_STATUS status = NV_OK; + RsClient *pRsClient = pCallContext->pClient; + GpuResource *pSubdevGpuRes = staticCast(pSubdevice, GpuResource); + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pParentRef = pResourceRef->pParentRef; + Device *pDevice = GPU_RES_GET_DEVICE(pSubdevice); + NvU32 i; + Subdevice *pSubdeviceTest; + + if (pNv2080AllocParams == NULL) + subDeviceInst = 0; + else + subDeviceInst = pNv2080AllocParams->subDeviceId; + + // validate subdevice instance + if (gpumgrIsSubDeviceInstanceValid(subDeviceInst) == NV_FALSE) + return NV_ERR_INVALID_CLASS; + + status = gpuGetByRef(pResourceRef->pParentRef, NULL, &pPrimaryGpu); + if (status != NV_OK) + return status; + + // Lookup GPU for subdevice instance + status = gpugrpGetGpuFromSubDeviceInstance(GPU_RES_GET_GPUGRP(pDevice), subDeviceInst, &pGpu); + if (status != NV_OK) + return NV_ERR_INVALID_CLASS; + + // Check if subdevice already allocated + if (subdeviceGetByInstance(pRsClient, RES_GET_HANDLE(pDevice), subDeviceInst, &pSubdeviceTest) == NV_OK) + return NV_ERR_INSUFFICIENT_RESOURCES; + + gpuresSetGpu(pSubdevGpuRes, pGpu, NV_FALSE); + + pSubdevice->pDevice = pDevice; + pSubdevice->deviceInst = pDevice->deviceInst; + pSubdevice->subDeviceInst = subDeviceInst; + pSubdevice->bUpdateTGP = NV_FALSE; + + for (i = 0; i < NV2080_NOTIFIERS_MAXCOUNT; i++) + pSubdevice->notifyActions[i] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + + pSubdevice->hNotifierMemory = NV01_NULL_OBJECT; + pSubdevice->hSemMemory = NV01_NULL_OBJECT; + + { + // + // If using thwap to generate an allocation failure here, fail the alloc + // right away + // + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pPrimaryGpu); + if (pKernelRc != NULL && + !krcTestAllowAlloc(pPrimaryGpu, pKernelRc, + NV_ROBUST_CHANNEL_ALLOCFAIL_SUBDEVICE)) + { + return NV_ERR_GENERIC; + } + } + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_ALLOC_SUBDEVICE(pPrimaryGpu, pRsClient->hClient, pParentRef->hResource, + pResourceRef->hResource, NV20_SUBDEVICE_0, + subDeviceInst, status); + NV_ASSERT_OK_OR_RETURN(status); + } + + return status; +} + +void +subdevicePreDestruct_IMPL +( + Subdevice *pSubdevice +) +{ + subdeviceResetTGP(pSubdevice); +} + +void +subdeviceDestruct_IMPL +( + Subdevice* pSubdevice +) +{ + CALL_CONTEXT *pCallContext; + RsClient *pRsClient = RES_GET_CLIENT(pSubdevice); + RsResourceRef *pResourceRef = RES_GET_REF(pSubdevice); + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + if (pSubdevice->bGcoffDisallowed) + { + osClientGcoffDisallowRefcount(pGpu->pOsGpuInfo, NV_FALSE); + } + + LOCK_METER_DATA(FREE_SUBDEVICE, 0, 0, 0); + + // TODO - Call context lookup in dtor can likely be phased out now that we have RES_GET_CLIENT + resGetFreeParams(staticCast(pSubdevice, RsResource), &pCallContext, NULL); + + // free P2P objects associated with this subDevice + // can't rely on resource server to clean up since object exists in both lists + if (NULL != pSubdevice->pP2PMappingList) + { + CliFreeSubDeviceP2PList(pSubdevice, pCallContext); + } + + // check for any pending client's timer notification for this subdevice + if (pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + tmrCancelCallback(pTmr, pSubdevice); + pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + + subdeviceRestoreLockedClock(pSubdevice, pCallContext); + + // Restore GR tick frequency to default. + subdeviceRestoreGrTickFreq(pSubdevice, pCallContext); + + // Remove NVLink error injection mode request + subdeviceReleaseNvlinkErrorInjectionMode(pSubdevice, pCallContext); + + subdeviceReleaseComputeModeReservation(pSubdevice, pCallContext); + +#ifdef DEBUG + NV_ASSERT(pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE); +#endif + + subdeviceUnsetGpuDebugMode(pSubdevice); + subdeviceRestoreWatchdog(pSubdevice); + + if (pResourceRef != NULL && (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))) + { + NV_RM_RPC_FREE(pGpu, pRsClient->hClient, + pResourceRef->pParentRef->hResource, + pResourceRef->hResource, status); + } + // + // Restore sched settings + // + if (pSubdevice->bSchedPolicySet) + { + if (NV_OK == kfifoRestoreSchedPolicy(pGpu, GPU_GET_KERNEL_FIFO(pGpu))) + { + pSubdevice->bSchedPolicySet = NV_FALSE; + } + } +} + +NV_STATUS +subdeviceInternalControlForward_IMPL +( + Subdevice *pSubdevice, + NvU32 command, + void *pParams, + NvU32 size +) +{ + return gpuresInternalControlForward_IMPL(staticCast(pSubdevice, GpuResource), command, pParams, size); +} + +NV_STATUS +subdeviceControlFilter_IMPL(Subdevice *pSubdevice, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams) +{ + return NV_OK; +} + +NV_STATUS +subdeviceAddP2PApi_IMPL +( + Subdevice *pSubdevice, + P2PApi *pP2PApi +) +{ + PNODE pNode; + NvHandle hPeerSubDevice; + NV_STATUS status; + PCLI_P2P_INFO_LIST *pP2PInfoList; + NvHandle hSubDevice = RES_GET_HANDLE(pSubdevice); + + if (NULL == pP2PApi || NULL == pP2PApi->peer1 || NULL == pP2PApi->peer2) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // in case of loopback, both handles are the same and this will not matter + // otherwise we need the peer subdevice handle + // + hPeerSubDevice = (RES_GET_HANDLE(pP2PApi->peer1) == hSubDevice) ? + RES_GET_HANDLE(pP2PApi->peer2) : + RES_GET_HANDLE(pP2PApi->peer1); + + if (NV_OK != btreeSearch(hPeerSubDevice, &pNode, + pSubdevice->pP2PMappingList)) + { + pP2PInfoList = portMemAllocNonPaged(sizeof(PCLI_P2P_INFO_LIST)); + if (pP2PInfoList == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto failed; + } + + listInit(pP2PInfoList, portMemAllocatorGetGlobalNonPaged()); + + pNode = portMemAllocNonPaged(sizeof(NODE)); + if (pNode == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto failed; + } + + portMemSet(pNode, 0, sizeof(NODE)); + pNode->keyStart = hPeerSubDevice; + pNode->keyEnd = hPeerSubDevice; + pNode->Data = pP2PInfoList; + + status = btreeInsert(pNode, &pSubdevice->pP2PMappingList); +failed: + if (NV_OK != status) + { + portMemFree(pNode); + portMemFree(pP2PInfoList); + return status; + } + } + else + { + pP2PInfoList = pNode->Data; + } + + listAppendValue(pP2PInfoList, &pP2PApi); + + return NV_OK; +} + +NV_STATUS +subdeviceDelP2PApi_IMPL +( + Subdevice *pSubdevice, + P2PApi *pP2PApi +) +{ + PCLI_P2P_INFO_LIST *pP2PInfoList; + PNODE pNode; + NV_STATUS status; + NvHandle hPeerSubDevice; + NvHandle hSubDevice = RES_GET_HANDLE(pSubdevice); + + // + // in case of loopback, both handles are the same and this will not matter + // otherwise we need the peer subdevice handle + // + hPeerSubDevice = (RES_GET_HANDLE(pP2PApi->peer1) == hSubDevice) ? + RES_GET_HANDLE(pP2PApi->peer2) : + RES_GET_HANDLE(pP2PApi->peer1); + + if (NV_OK != (status = btreeSearch(hPeerSubDevice, &pNode, pSubdevice->pP2PMappingList))) + return status; + + pP2PInfoList = pNode->Data; + + listRemoveFirstByValue(pP2PInfoList, &pP2PApi); + if (listCount(pP2PInfoList) == 0) + { + if (NV_OK != (status = btreeUnlink(pNode, &pSubdevice->pP2PMappingList))) + { + return status; + } + + pNode->Data = NULL; + portMemFree(pNode); + pNode = NULL; + portMemFree(pP2PInfoList); + pP2PInfoList = NULL; + } + + return NV_OK; +} + +NV_STATUS +subdeviceGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hSubdevice, + Subdevice **ppSubdevice +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppSubdevice = NULL; + + status = clientGetResourceRef(pClient, hSubdevice, &pResourceRef); + if (status != NV_OK) + return status; + + *ppSubdevice = dynamicCast(pResourceRef->pResource, Subdevice); + + return (*ppSubdevice) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +subdeviceGetByGpu_IMPL +( + RsClient *pClient, + OBJGPU *pGpu, + Subdevice **ppSubdevice +) +{ + Subdevice *pSubdevice = NULL; + OBJGPU *pTmpGpu = NULL; + RS_ITERATOR it; + RsResourceRef *pResourceRef; + + *ppSubdevice = NULL; + + it = clientRefIter(pClient, NULL, classId(Subdevice), RS_ITERATE_DESCENDANTS, NV_TRUE); + while (clientRefIterNext(pClient, &it)) + { + pResourceRef = it.pResourceRef; + pSubdevice = dynamicCast(pResourceRef->pResource, Subdevice); + if (pSubdevice == NULL) + continue; + + pTmpGpu = GPU_RES_GET_GPU(pSubdevice); + + if (pTmpGpu == pGpu) + { + *ppSubdevice = pSubdevice; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +subdeviceGetByInstance_IMPL +( + RsClient *pClient, + NvHandle hDevice, + NvU32 subDeviceInst, + Subdevice **ppSubdevice +) +{ + RsResourceRef *pDeviceRef; + Subdevice *pSubdevice; + RS_ITERATOR it; + + *ppSubdevice = NULL; + + if (clientGetResourceRefByType(pClient, hDevice, classId(Device), &pDeviceRef) != NV_OK) + return NV_ERR_INVALID_ARGUMENT; + + it = clientRefIter(pClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pClient, &it)) + { + pSubdevice = dynamicCast(it.pResourceRef->pResource, Subdevice); + + if (pSubdevice && pSubdevice->subDeviceInst == subDeviceInst) + { + *ppSubdevice = pSubdevice; + return NV_OK; + } + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +// **************************************************************************** +// Helper functions +// **************************************************************************** +void +subdeviceUnsetGpuDebugMode_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (!pSubdevice->bGpuDebugModeEnabled) + { + return; + } + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + pGpu->bIsDebugModeEnabled = NV_FALSE; +} + +void +subdeviceReleaseComputeModeReservation_IMPL +( + Subdevice *pSubdevice, + CALL_CONTEXT *pCallContext +) +{ + RsClient *pRsClient = pCallContext->pClient; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + // Release the reservation ONLY IF we had the reservation to begin with. Otherwise, + // leave it alone, because someone else has acquired it: + if (pGpu->hComputeModeReservation == pRsClient->hClient) + { + pGpu->hComputeModeReservation = NV01_NULL_OBJECT; + } +} + +void +subdeviceRestoreGrTickFreq_IMPL +( + Subdevice *pSubdevice, + CALL_CONTEXT *pCallContext +) +{ + if (!pSubdevice->bMaxGrTickFreqRequested) + return; + + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJTMR *pTmr; + RsClient *pRsClient = pCallContext->pClient; + RsResourceRef *pSubdeviceRef = pCallContext->pResourceRef; + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + if (pSubdeviceRef) + { + pTmr = GPU_GET_TIMER(pGpu); + refcntReleaseReferences(pTmr->pGrTickFreqRefcnt, + NV_REQUESTER_CLIENT_OBJECT(pRsClient->hClient, pSubdeviceRef->hResource), NV_TRUE); + pSubdevice->bMaxGrTickFreqRequested = NV_FALSE; + } +} + +void +subdeviceRestoreWatchdog_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelRc *pKernelRc; + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + pKernelRc = GPU_GET_KERNEL_RC(pGpu); + NV_CHECK_OR_RETURN_VOID(LEVEL_INFO, pKernelRc != NULL); + krcWatchdogChangeState(pKernelRc, pSubdevice, RM_CLIENT_DESTRUCTION); +} + +// **************************************************************************** +// Deprecated Functions +// **************************************************************************** + +/** + * WARNING: This function is deprecated! Please use subdeviceGetByGpu and + * GPU_RES_SET_THREAD_BC_STATE (if needed to set thread UC state for SLI) + */ +Subdevice * +CliGetSubDeviceInfoFromGpu +( + NvHandle hClient, + OBJGPU *pGpu +) +{ + RsClient *pClient; + NV_STATUS status; + Subdevice *pSubdevice; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NULL; + + status = subdeviceGetByGpu(pClient, pGpu, &pSubdevice); + if (status != NV_OK) + return NULL; + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + return pSubdevice; +} + +/** + * WARNING: This function is deprecated! Please use subdeviceGetByGpu and + * RES_GET_HANDLE + */ +NV_STATUS +CliGetSubDeviceHandleFromGpu +( + NvHandle hClient, + OBJGPU *pGpu, + NvHandle *phSubDevice +) +{ + Subdevice *pSubdevice; + + if (phSubDevice == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if ((pSubdevice = CliGetSubDeviceInfoFromGpu(hClient, pGpu)) == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *phSubDevice = RES_GET_HANDLE(pSubdevice); + + return NV_OK; +} + +/** + * WARNING: This function is deprecated and use is *strongly* discouraged + * (especially for new code!) + * + * From the function name (CliSetSubDeviceContext) it appears as a simple + * accessor but violates expectations by modifying the SLI BC threadstate (calls + * to GPU_RES_SET_THREAD_BC_STATE). This can be dangerous if not carefully + * managed by the caller. + * + * Instead of using this routine, please use subdeviceGetByHandle then call + * GPU_RES_GET_GPU, RES_GET_HANDLE, GPU_RES_SET_THREAD_BC_STATE as needed. + * + * Note that GPU_RES_GET_GPU supports returning a pGpu for both pDevice, + * pSubdevice, the base pResource type, and any resource that inherits from + * GpuResource. That is, instead of using CliSetGpuContext or + * CliSetSubDeviceContext, please use following pattern to look up the pGpu: + * + * OBJGPU *pGpu = GPU_RES_GET_GPU(pResource or pResourceRef->pResource) + * + * To set the threadstate, please use: + * + * GPU_RES_SET_THREAD_BC_STATE(pResource or pResourceRef->pResource); + */ +NV_STATUS +CliSetSubDeviceContext +( + NvHandle hClient, + NvHandle hSubdevice, + NvHandle *phDevice, + OBJGPU **ppGpu +) +{ + Subdevice *pSubdevice; + RsClient *pClient; + NV_STATUS status; + + if (phDevice != NULL) + { + *phDevice = 0; + } + *ppGpu = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return status; + + status = subdeviceGetByHandle(pClient, hSubdevice, &pSubdevice); + if (status != NV_OK) + return status; + + *ppGpu = GPU_RES_GET_GPU(pSubdevice); + if (phDevice != NULL) + { + *phDevice = RES_GET_HANDLE(pSubdevice->pDevice); + } + + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c new file mode 100644 index 000000000..7df4194f6 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c @@ -0,0 +1,279 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * subdevice (NV20_SUBDEVICE_0) class. Subdevice-level control calls + * are directed unicast to the associated GPU. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "diagnostics/journal.h" +#include "diagnostics/tracer.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/client.h" +#include "rmapi/rs_utils.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + +// +// EVENT RM SubDevice Controls +// +NV_STATUS +subdeviceCtrlCmdEventSetTrigger_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + gpuNotifySubDeviceEvent(pGpu, NV2080_NOTIFIERS_SW, NULL, 0, 0, 0); + + return NV_OK; +} + +// +// subdeviceCtrlCmdEventSetTriggerFifo +// +// Used to signal Vulkan timeline semaphores from the CPU. +// +NV_STATUS +subdeviceCtrlCmdEventSetTriggerFifo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_TRIGGER_FIFO_PARAMS *pTriggerFifoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + engineNonStallIntrNotifyEvent(pGpu, NV2080_ENGINE_TYPE_HOST, + pTriggerFifoParams->hEvent); + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdEventSetNotification_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pSetEventParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + // NV01_EVENT must have been plugged into this subdevice + if (inotifyGetNotificationList(staticCast(pSubdevice, INotifier)) == NULL) + { + NV_PRINTF(LEVEL_INFO, "cmd 0x%x: no event list\n", NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION); + return NV_ERR_INVALID_STATE; + } + + if (pSetEventParams->event >= NV2080_NOTIFIERS_MAXCOUNT) + { + NV_PRINTF(LEVEL_INFO, "bad event 0x%x\n", pSetEventParams->event); + return NV_ERR_INVALID_ARGUMENT; + } + + if (pSetEventParams->event == NV2080_NOTIFIERS_TIMER) + { + NV_PRINTF(LEVEL_INFO, "wrong control call for timer event\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + pRmApi->Control(pRmApi, RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + pSetEventParams, + sizeof *pSetEventParams)); + } + + switch (pSetEventParams->action) + { + case NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE: + case NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT: + { + // must be in disabled state to transition to an active state + if (pSubdevice->notifyActions[pSetEventParams->event] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + status = NV_ERR_INVALID_STATE; + break; + } + + if (pSetEventParams->event == NV2080_NOTIFIERS_FIFO_EVENT_MTHD) + { + pGpu->activeFifoEventMthdNotifiers++; + } + + pSubdevice->notifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + + case NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE: + { + if ((pSetEventParams->event == NV2080_NOTIFIERS_FIFO_EVENT_MTHD) && + (pSubdevice->notifyActions[pSetEventParams->event] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE)) + { + NV_ASSERT(pGpu->activeFifoEventMthdNotifiers); + pGpu->activeFifoEventMthdNotifiers--; + } + + pSubdevice->notifyActions[pSetEventParams->event] = pSetEventParams->action; + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + return status; +} + +NV_STATUS +subdeviceCtrlCmdEventSetMemoryNotifies_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS *pSetMemoryNotifiesParams +) +{ + Memory *pMemory; + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + NvU32 i; + + // ensure there's no pending notifications if there is an existing notification buffer + if (pSubdevice->hNotifierMemory != NV01_NULL_OBJECT) + { + for (i = 0; i < NV2080_NOTIFIERS_MAXCOUNT; i++) + { + if (pSubdevice->notifyActions[i] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + return NV_ERR_STATE_IN_USE; + } + } + } + + if (pSetMemoryNotifiesParams->hMemory == NV01_NULL_OBJECT) + { + pSubdevice->hNotifierMemory = pSetMemoryNotifiesParams->hMemory; + pSubdevice->pNotifierMemory = NULL; + return NV_OK; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(pClient, pSetMemoryNotifiesParams->hMemory, &pMemory)); + + if (pMemory->pMemDesc->Size < NV_SIZEOF32(NvNotification) * NV2080_NOTIFIERS_MAXCOUNT) + { + return NV_ERR_INVALID_LIMIT; + } + + pSubdevice->hNotifierMemory = pSetMemoryNotifiesParams->hMemory; + pSubdevice->pNotifierMemory = pMemory; + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdEventSetSemaphoreMemory_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_SEMAPHORE_MEMORY_PARAMS *pSetSemMemoryParams +) +{ + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + Memory *pMemory; + NvU32 i; + + if (pSubdevice->hSemMemory != NV01_NULL_OBJECT) + { + for (i = 0; i < NV2080_NOTIFIERS_MAXCOUNT; i++) + { + if (pSubdevice->notifyActions[i] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + return NV_ERR_STATE_IN_USE; + } + } + } + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandle(pClient, pSetSemMemoryParams->hSemMemory, &pMemory)); + + if (pSetSemMemoryParams->semOffset >= pMemory->pMemDesc->Size) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pSubdevice->hSemMemory = pSetSemMemoryParams->hSemMemory; + pMemory->vgpuNsIntr.nsSemOffset = pSetSemMemoryParams->semOffset; + + pMemory->vgpuNsIntr.nsSemValue = 0; + pMemory->vgpuNsIntr.guestMSIAddr = 0; + pMemory->vgpuNsIntr.guestMSIData = 0; + pMemory->vgpuNsIntr.guestDomainId = 0; + pMemory->vgpuNsIntr.pVgpuVfioRef = NULL; + pMemory->vgpuNsIntr.isSemaMemValidationEnabled = NV_TRUE; + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdEventSetSemaMemValidation_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_EVENT_SET_SEMA_MEM_VALIDATION_PARAMS *pSetSemaMemValidationParams +) +{ + Memory *pMemory; + RsClient *pClient = RES_GET_CLIENT(pSubdevice); + NvU32 *pSemValue; + NV_STATUS rmStatus = NV_ERR_INVALID_OBJECT_HANDLE; + + rmStatus = memGetByHandle(pClient, pSetSemaMemValidationParams->hSemMemory, &pMemory); + + if (rmStatus == NV_OK) + { + pSemValue = (NvU32 *)NvP64_VALUE(memdescGetKernelMapping(pMemory->pMemDesc)); + + if (pSemValue == NULL) + { + return NV_ERR_INVALID_ADDRESS; + } + + portMemSet(pSemValue, 0, RM_PAGE_SIZE); + pMemory->vgpuNsIntr.nsSemValue = 0; + pMemory->vgpuNsIntr.isSemaMemValidationEnabled = pSetSemaMemValidationParams->isSemaMemValidationEnabled; + } + + return rmStatus; +} + diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_fla.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_fla.c new file mode 100644 index 000000000..21148bb8a --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_fla.c @@ -0,0 +1,306 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "gpu/subdevice/subdevice.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/fabric_vaspace.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "gpu/bus/kern_bus.h" + +#include "vgpu/rpc.h" +#include "rmapi/client.h" + +static NV_STATUS +_subdeviceFlaRangeModeHostManagedVasDestroy +( + Subdevice *pSubdevice, + NV2080_CTRL_FLA_RANGE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU32 gfid; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + // See _subdeviceFlaRangeModeHostManagedVasInit for details about this check. + if (!RMCFG_FEATURE_PLATFORM_GSP && IS_GFID_PF(gfid) && + (pKernelNvlink != NULL) && !knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink)) + return NV_ERR_NOT_SUPPORTED; + + kbusDestroyHostManagedFlaVaspace_HAL(pGpu, GPU_GET_KERNEL_BUS(pGpu), gfid); + + return NV_OK; +} + +static NV_STATUS +_subdeviceFlaRangeModeHostManagedVasInit +( + Subdevice *pSubdevice, + NV2080_CTRL_FLA_RANGE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 hSubdevice = RES_GET_HANDLE(pSubdevice); + NvHandle hDevice = RES_GET_PARENT_HANDLE(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvU32 gfid; + NV_STATUS status; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + if (!RMCFG_FEATURE_PLATFORM_GSP && IS_GFID_PF(gfid) && + (pKernelNvlink != NULL) && !knvlinkIsGpuConnectedToNvswitch(pGpu, pKernelNvlink)) + return NV_ERR_NOT_SUPPORTED; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, pParams->size != 0, NV_ERR_INVALID_ARGUMENT); + + if (pKernelBus->flaInfo.bFlaAllocated) + { + NV_PRINTF(LEVEL_ERROR, + "FLA VAS is not allowed for base: %llx, size:%llx in gpu: %x\n", + pParams->base, pParams->size, pGpu->gpuInstance); + + return NV_ERR_INVALID_STATE; + } + + status = kbusAllocateHostManagedFlaVaspace_HAL(pGpu, pKernelBus, hClient, hDevice, + hSubdevice, pParams->hVASpace, + pParams->base, pParams->size, + gfid); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Host Managed FLA Vaspace failed, status: %x, gpu: %x \n", + status, pGpu->gpuInstance); + return status; + } + + return NV_OK; +} + +static NV_STATUS +_subdeviceFlaRangeModeInit +( + Subdevice *pSubdevice, + NV2080_CTRL_FLA_RANGE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NV_STATUS status; + + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + // Must not be called from vGPU guests, except for arch MODS. + if (IS_VIRTUAL(pGpu)) + return NV_ERR_NOT_SUPPORTED; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, pParams->size != 0, NV_ERR_INVALID_ARGUMENT); + + // + // check if FM has previously allocated for the same FLA range. + // + // Note that in case of vGPU, FLA VAS is allocated in guests, thus + // pKernelBus->flaInfo.bFlaAllocated shouldn't be set in the host. We allow + // FM running in the host to override the FLA range as we expect device + // to be idle when the vGPU partitions are being configured. + // + if (pKernelBus->flaInfo.bFlaAllocated) + { + // check if FM has previously allocated for the same FLA range + if (!kbusVerifyFlaRange_HAL(pGpu, pKernelBus, pParams->base, pParams->size)) + return NV_ERR_IN_USE; + + return NV_OK; + } + + // + // We don't have to care about Arch Mods/pre-silicon verification scenarios + // for programming FLA base address to scratch register + // + NV_CHECK_OR_RETURN(LEVEL_SILENT, pKernelNvlink != NULL, NV_ERR_NOT_SUPPORTED); + + status = knvlinkSetUniqueFlaBaseAddress(pGpu, pKernelNvlink, pParams->base); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to set FLA range because of invalid config for gpu: %x\n", + pGpu->gpuInstance); + return status; + } + + status = kbusAllocateFlaVaspace_HAL(pGpu, pKernelBus, pParams->base, pParams->size); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Allocating new FLA Vaspace failed, status: %x, gpu: %x \n", + status, pGpu->gpuInstance); + return status; + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdFlaRange_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FLA_RANGE_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + POBJGPU pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + KernelMIGManager *pKernelMIGManager; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (!rmclientIsCapableOrAdminByHandle(hClient, + NV_RM_CAP_EXT_FABRIC_MGMT, + pCallContext->secInfo.privLevel)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + if (!kbusIsFlaSupported(pKernelBus)) + { + NV_PRINTF(LEVEL_INFO, + "FLA is not supported in this platform\n"); + + return NV_ERR_NOT_SUPPORTED; + } + + if (!ONEBITSET(pParams->mode)) + return NV_ERR_INVALID_ARGUMENT; + + // + // Even if FLA is enabled, it is not compatible with MIG memory partitioning + // If MIG memory partitioning is enabled, we should not allow FLA address + // range creation + // + pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + if ((pKernelMIGManager != NULL) && !kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)) + { + NV_PRINTF(LEVEL_INFO, + "ERROR: FLA cannot be enabled with peer support disabled with MIG\n"); + return NV_ERR_NOT_SUPPORTED; + } + + switch (pParams->mode) + { + case NV2080_CTRL_FLA_RANGE_PARAMS_MODE_INITIALIZE: + status = _subdeviceFlaRangeModeInit(pSubdevice, pParams); + break; + + // Deprecated as no client should be able to invoke this control option. + case NV2080_CTRL_FLA_RANGE_PARAMS_MODE_DESTROY: + status = NV_OK; + break; + + case NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_INITIALIZE: + status = _subdeviceFlaRangeModeHostManagedVasInit(pSubdevice, pParams); + break; + + case NV2080_CTRL_FLA_RANGE_PARAMS_MODE_HOST_MANAGED_VAS_DESTROY: + status = _subdeviceFlaRangeModeHostManagedVasDestroy(pSubdevice, pParams); + break; + + default: + status = NV_ERR_INVALID_OPERATION; + break; + } + + return status; +} + +// Control call to manage FLA range in RM +NV_STATUS +subdeviceCtrlCmdFlaSetupInstanceMemBlock_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +// Control call to manage FLA range in RM +NV_STATUS +subdeviceCtrlCmdFlaGetRange_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FLA_GET_RANGE_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +subdeviceCtrlCmdFlaGetFabricMemStats_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_FLA_GET_FABRIC_MEM_STATS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + FABRIC_VASPACE *pFabricVAS = NULL; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (pGpu->pFabricVAS == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + pFabricVAS = dynamicCast(pGpu->pFabricVAS, FABRIC_VASPACE); + + if (pFabricVAS->bRpcAlloc) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + pParams->totalSize = fabricvaspaceGetVaLimit(pFabricVAS) - + fabricvaspaceGetVaStart(pFabricVAS) + 1; + + return fabricvaspaceGetFreeHeap(pFabricVAS, &pParams->freeSize); +} diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c new file mode 100644 index 000000000..80abe75a2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c @@ -0,0 +1,2372 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * subdevice (NV20_SUBDEVICE_0) class. Subdevice-level control calls + * are directed unicast to the associated GPU. + * File contains ctrls related to general GPU + */ + +#include "core/core.h" +#include "core/locks.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_db.h" +#include "nvrm_registry.h" +#include "nvVer.h" +#include "gpu/bif/kernel_bif.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mmu/kern_gmmu.h" +#include "kernel/gpu/intr/intr.h" +#include "kernel/gpu/mc/kernel_mc.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "objtmr.h" +#include "platform/chipset/chipset.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/gr/kernel_graphics_manager.h" +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/mem_mgr.h" + +#include "gpu/mem_sys/kern_mem_sys.h" + +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "rmapi/resource_fwd_decls.h" +#include "rmapi/client.h" + +#include "class/cl900e.h" + + + + +static NV_STATUS +getGpuInfos(Subdevice *pSubdevice, NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pParams, NvBool bCanAccessHw) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + NvU32 i = 0; + NvU32 data = 0; + NvBool bGspForward = NV_FALSE; + + // bit to set when telling GSP to fill in an info entry + const NvU32 indexForwardToGsp = 0x80000000; + + if ((pParams->gpuInfoListSize > NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE) || + (pParams->gpuInfoListSize == 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + for (i = 0; i < pParams->gpuInfoListSize; i++) + { + if (pParams->gpuInfoList[i].index >= NV2080_CTRL_GPU_INFO_MAX_LIST_SIZE) + { + return NV_ERR_INVALID_ARGUMENT; + } + + switch (pParams->gpuInfoList[i].index) + { + case NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY: + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + if (kbusIsFlaSupported(pKernelBus)) + { + data = NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY_YES; + } + else + { + data = NV2080_CTRL_GPU_INFO_INDEX_GPU_FLA_CAPABILITY_NO; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_MINOR_REVISION_EXT: + { + data = gpuGetChipMinExtRev(pGpu); + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_NETLIST_REV0: + { + data = 0; + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_NETLIST_REV1: + { + data = 0; + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_SYSMEM_ACCESS: + { + data = NV2080_CTRL_GPU_INFO_SYSMEM_ACCESS_YES; + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_GEMINI_BOARD: + { + data = !!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_GEMINI); + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_SURPRISE_REMOVAL_POSSIBLE: + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + data = !!pCl->getProperty(pCl, PDB_PROP_CL_IS_EXTERNAL_GPU); + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_IBMNPU_RELAXED_ORDERING: + { + NvBool mode = NV_FALSE; + data = NV2080_CTRL_GPU_INFO_IBMNPU_RELAXED_ORDERING_UNSUPPORTED; + + if (osGetIbmnpuRelaxedOrderingMode(pGpu->pOsGpuInfo, &mode) == NV_OK) + { + data = NV2080_CTRL_GPU_INFO_IBMNPU_RELAXED_ORDERING_DISABLED; + + if (mode) + { + data = NV2080_CTRL_GPU_INFO_IBMNPU_RELAXED_ORDERING_ENABLED; + } + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED: + { + if (gpuIsGlobalPoisonFuseEnabled(pGpu)) + { + data = NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED_YES; + } + else + { + data = NV2080_CTRL_GPU_INFO_INDEX_GLOBAL_POISON_FUSE_ENABLED_NO; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_NVSWITCH_PROXY_DETECTED: + { + NV_CHECK_OR_ELSE(LEVEL_WARNING, bCanAccessHw, + { data = 0; status = NV_ERR_INVALID_ARGUMENT; break; }); + + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + data = NV2080_CTRL_GPU_INFO_NVSWITCH_PROXY_DETECTED_NO; + + if (pKernelNvlink != NULL && + knvlinkIsNvswitchProxyPresent(pGpu, pKernelNvlink)) + { + data = NV2080_CTRL_GPU_INFO_NVSWITCH_PROXY_DETECTED_YES; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_GPU_SMC_MODE: + { + NV_CHECK_OR_ELSE(LEVEL_WARNING, bCanAccessHw, + { data = 0; status = NV_ERR_INVALID_ARGUMENT; break; }); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_GPU_GET_SMC_MODE_PARAMS params; + + if (IS_VIRTUAL(pGpu)) + { + data = IS_MIG_ENABLED(pGpu) ? + NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_ENABLED : + NV2080_CTRL_GPU_INFO_GPU_SMC_MODE_DISABLED; + break; + } + + portMemSet(¶ms, 0x0, sizeof(params)); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GPU_GET_SMC_MODE, + ¶ms, + sizeof(params))); + data = params.smcMode; + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_SPLIT_VAS_MGMT_SERVER_CLIENT_RM: + { + if (gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + data = NV2080_CTRL_GPU_INFO_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_YES; + } + else + { + data = NV2080_CTRL_GPU_INFO_SPLIT_VAS_MGMT_SERVER_CLIENT_RM_NO; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_GPU_SM_VERSION: + { + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + + if ((pKernelGraphicsManager == NULL) || + !pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized || + (pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo == NULL)) + { + NV_PRINTF(LEVEL_ERROR, "Unable to retrieve SM version!\n"); + data = NV2080_CTRL_GR_INFO_SM_VERSION_NONE; + status = NV_ERR_INVALID_STATE; + } + else + { + data = pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_SM_VERSION].data; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM: + { + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + if (kfifoIsPerRunlistChramEnabled(pKernelFifo)) + { + data = NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM_ENABLED; + } + else + { + data = NV2080_CTRL_GPU_INFO_INDEX_PER_RUNLIST_CHANNEL_RAM_DISABLED; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY: + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)) + { + data = NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY_YES; + } + else + { + data = NV2080_CTRL_GPU_INFO_INDEX_GPU_ATS_CAPABILITY_NO; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_NVENC_STATS_REPORTING_STATE: + { + data = NV2080_CTRL_GPU_INFO_NVENC_STATS_REPORTING_STATE_NOT_SUPPORTED; + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED: + { + if (pGpu->bNeed4kPageIsolation) + { + data = NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_YES; + } + else + { + data = NV2080_CTRL_GPU_INFO_INDEX_4K_PAGE_ISOLATION_REQUIRED_NO; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED: + { + if (IsMobile(pGpu)) + { + data = NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_YES; + } + else + { + data = NV2080_CTRL_GPU_INFO_INDEX_MOBILE_CONFIG_ENABLED_NO; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY: + { + { + // Always return ENABLED for Baremetal/Host + data = NV2080_CTRL_GPU_INFO_INDEX_GPU_PROFILING_CAPABILITY_ENABLED; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY: + { + { + // Always return ENABLED for Baremetal/Host + data = NV2080_CTRL_GPU_INFO_INDEX_GPU_DEBUGGING_CAPABILITY_ENABLED; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU: + { + if (gpuGetChipInfo(pGpu) && gpuGetChipInfo(pGpu)->isCmpSku) + { + data = NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU_YES; + } + else + { + data = NV2080_CTRL_GPU_INFO_INDEX_CMP_SKU_NO; + } + break; + } + case NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY: + { + data = NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_NO; + + if (osDmabufIsSupported() && + (!IS_VIRTUAL(pGpu)) && + (!NVCPU_IS_PPC64LE)) + { + data = NV2080_CTRL_GPU_INFO_INDEX_DMABUF_CAPABILITY_YES; + } + break; + } + default: + { + // Only forward to GSP if we're in the HW-access-enabled control + if (IS_GSP_CLIENT(pGpu) && bCanAccessHw) + { + pParams->gpuInfoList[i].index |= indexForwardToGsp; + bGspForward = NV_TRUE; + } + else + { + data = 0; + status = NV_ERR_INVALID_ARGUMENT; + } + break; + } + } + + // save off data value + pParams->gpuInfoList[i].data = data; + } + + if (IS_GSP_CLIENT(pGpu) && bGspForward && (status == NV_OK)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + return pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pSubdevice), + RES_GET_HANDLE(pSubdevice), + NV2080_CTRL_CMD_GPU_GET_INFO_V2, + pParams, sizeof(*pParams)); + } + + return status; +} + +NV_STATUS +subdeviceCtrlCmdGpuGetInfoV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams +) +{ + return getGpuInfos(pSubdevice, pGpuInfoParams, NV_TRUE); +} + +// +// subdeviceCtrlCmdGpuGetCachedInfo: As subdeviceCtrlCmdGpuGetInfoV2, except +// does not perform any HW access (NO_GPUS_ACCESS and NO_GPUS_LOCK flags) +// +NV_STATUS +subdeviceCtrlCmdGpuGetCachedInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_INFO_V2_PARAMS *pGpuInfoParams +) +{ + return getGpuInfos(pSubdevice, pGpuInfoParams, NV_FALSE); +} + +static POBJHWBC +getBridgeObject(OBJHWBC *pHWBC, NvU32 hwbcId) +{ + OBJHWBC *pBridgeObject = NULL; + if (NULL != pHWBC) + { + if (hwbcId == pHWBC->hwbcId) + { + pBridgeObject = pHWBC; + } + else + { + pBridgeObject = getBridgeObject(pHWBC->pSibling, hwbcId); + if (NULL == pBridgeObject) + { + pBridgeObject = getBridgeObject(pHWBC->pFirstChild, hwbcId); + } + } + } + return pBridgeObject; +} + +static NV_STATUS +getPlxFirmwareAndBusInfo +( + OBJHWBC *pHWBC, + NvU32 *domainId, + NvU8 *busId, + NvU8 *deviceId, + NvU8 *funcId, + NvU32 *fwVersion, + NvU8 *oemVersion, + NvU8 *siliconRevision, + NvU8 *bcRes +) +{ + if (NULL == pHWBC) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (domainId) + *domainId = pHWBC->ctrlDev.domain; + if (busId) + *busId = pHWBC->ctrlDev.bus; + if (deviceId) + *deviceId = pHWBC->ctrlDev.device; + if (funcId) + *funcId = pHWBC->ctrlDev.func; + if (fwVersion) + *fwVersion = pHWBC->fwVersion; + if (oemVersion) + *oemVersion = pHWBC->fwOemVersion; + if (siliconRevision) + *siliconRevision = pHWBC->plxRevision; + if (bcRes) + *bcRes = (NvU8)pHWBC->bcRes; + return NV_OK; +} + +static NV_STATUS +getPlxFirmwareVersion +( + NvU32 hwbcId, + NvU32 *fwVersion, + NvU8 *oemVersion, + NvU8 *siliconRevision, + NvU8 *bcRes +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + OBJHWBC *pHWBC = getBridgeObject(pCl->pHWBC, hwbcId); + + return getPlxFirmwareAndBusInfo(pHWBC, NULL, NULL, NULL, NULL, fwVersion, + oemVersion, siliconRevision, bcRes); +} + +static NvU8 +getBridgeCountAndId(OBJHWBC *pHWBC, NvU32 pBridgeId[], NvU32 *bridgeIndex) +{ + NvU8 count = 0; + if ((NULL == bridgeIndex) || + (*bridgeIndex >= NV2080_CTRL_MAX_PHYSICAL_BRIDGE)) + { + return count; + } + if (NULL != pHWBC) + { + if ((HWBC_PLX_PEX8747 == pHWBC->bcRes) || (HWBC_NVIDIA_BR04 == pHWBC->bcRes)) + { + pBridgeId[*bridgeIndex] = pHWBC->hwbcId; + (*bridgeIndex)++; + count++; + } + count += getBridgeCountAndId(pHWBC->pSibling, pBridgeId, bridgeIndex); + count += getBridgeCountAndId(pHWBC->pFirstChild, pBridgeId, bridgeIndex); + } + return count; +} + +static NV_STATUS +getBridgeData +( + NvU8 *pPlxCount, + NvU32 pBridgeId[] +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + NvU32 bridgeIndex = 0; + + if (NULL == pPlxCount) + { + return NV_ERR_INVALID_ARGUMENT; + } + + *pPlxCount = getBridgeCountAndId(pCl->pHWBC, pBridgeId, &bridgeIndex); + NV_ASSERT_OR_RETURN(*pPlxCount < NV2080_CTRL_MAX_PHYSICAL_BRIDGE, + NV_ERR_OUT_OF_RANGE); + return NV_OK; +} + +static NV_STATUS +getUpstreamBridgeIds +( + OBJGPU *pGpu, + NvU8 *pPlxCount, + NvU32 pBridgeId[] +) +{ + HWBC_LIST *pGpuHWBCList; + NvU8 bridgeIndex = 0; + + if (NULL == pPlxCount) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpuHWBCList = pGpu->pHWBCList; + while(pGpuHWBCList) + { + NV_ASSERT_OR_RETURN(pGpuHWBCList->pHWBC != NULL, NV_ERR_INVALID_POINTER); + pBridgeId[bridgeIndex] = pGpuHWBCList->pHWBC->hwbcId; + pGpuHWBCList = pGpuHWBCList->pNext; + bridgeIndex++; + NV_ASSERT_OR_RETURN(bridgeIndex < NV2080_CTRL_MAX_PHYSICAL_BRIDGE, + NV_ERR_OUT_OF_RANGE); + } + *pPlxCount = bridgeIndex; + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdGpuGetPhysicalBridgeVersionInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_PHYSICAL_BRIDGE_VERSION_INFO_PARAMS *pBridgeInfoParams +) +{ + NV_STATUS status = NV_OK; + NvU8 bridgeIndex; + status = getBridgeData(&pBridgeInfoParams->bridgeCount, + pBridgeInfoParams->hPhysicalBridges); + if (status == NV_OK) + { + NV2080_CTRL_GPU_PHYSICAL_BRIDGE_VERSION_PARAMS *pBridgeVersionParams = + pBridgeInfoParams->bridgeList; + for (bridgeIndex = 0; + bridgeIndex < pBridgeInfoParams->bridgeCount; + bridgeIndex++) + { + status = getPlxFirmwareVersion(pBridgeInfoParams->hPhysicalBridges[bridgeIndex], + &pBridgeVersionParams->fwVersion, + &pBridgeVersionParams->oemVersion, + &pBridgeVersionParams->siliconRevision, + &pBridgeVersionParams->hwbcResourceType); + if (status != NV_OK) + { + break; + } + pBridgeVersionParams++; + } + } + return status; +} + +NV_STATUS +subdeviceCtrlCmdGpuGetAllBridgesUpstreamOfGpu_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ALL_BRIDGES_UPSTREAM_OF_GPU_PARAMS *pBridgeInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + NvU8 bridgeIndex; + HWBC_LIST *pGpuHWBCList; + status = getUpstreamBridgeIds(pGpu, + &pBridgeInfoParams->bridgeCount, + pBridgeInfoParams->physicalBridgeIds); + if (status == NV_OK) + { + NV2080_CTRL_GPU_BRIDGE_VERSION_PARAMS *pBridgeVersionParams = + pBridgeInfoParams->bridgeList; + pGpuHWBCList = pGpu->pHWBCList; + for (bridgeIndex = 0; + bridgeIndex < pBridgeInfoParams->bridgeCount && pGpuHWBCList; + bridgeIndex++) + { + status = getPlxFirmwareAndBusInfo(pGpuHWBCList->pHWBC, + &pBridgeVersionParams->domain, + &pBridgeVersionParams->bus, + &pBridgeVersionParams->device, + &pBridgeVersionParams->func, + &pBridgeVersionParams->fwVersion, + &pBridgeVersionParams->oemVersion, + &pBridgeVersionParams->siliconRevision, + &pBridgeVersionParams->hwbcResourceType); + if (status != NV_OK) + { + break; + } + pGpuHWBCList = pGpuHWBCList->pNext; + pBridgeVersionParams++; + } + } + return status; +} + +/*! + * @brief This command can be used for Optimus enabled system. + * + * @return : + * NV_OK + */ +NV_STATUS +subdeviceCtrlCmdGpuSetOptimusInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_OPTIMUS_INFO_PARAMS *pGpuOptimusInfoParams +) +{ + NvU32 status = NV_OK; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (pGpuOptimusInfoParams->isOptimusEnabled) + { + // + // Setting pMemoryManager->bPersistentStandbyBuffer for Optimus system. + // It is used for sys_mem allocation which is pinned across + // S3 transitions.Sys_mem allocations are done at first S3 cycle + // and release during driver unload, which reduces system + // VM fragmentation, which was a problem in optimus system. + // For more details refer bug 754122. + // + GPU_GET_MEMORY_MANAGER(pGpu)->bPersistentStandbyBuffer = NV_TRUE; + } + return status; +} + +// RM reports dynamic encoder capacity as a percentage (0-100) of the encoders fixed +// capacity. Fixed capacity is readable via NvEncode API and is defined in +// drivers/video/encode/src/CNVVAEncoder.cpp#200 +// +// Dynamic capacity of 0x0 indicates that encoder performance may be minimal for this +// GPU and software should fall back to CPU-based encode. +// + +#define NV_ENC_CAPACITY_MAX_VALUE 100 +// +// subdeviceCtrlCmdGpuGetEncoderCapacity +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEncoderCapacity_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_PARAMS *pEncoderCapacityParams +) +{ + NV_STATUS rmStatus = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if ((pEncoderCapacityParams->queryType != NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_H264) && + (pEncoderCapacityParams->queryType != NV2080_CTRL_GPU_GET_ENCODER_CAPACITY_HEVC)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pEncoderCapacityParams->encoderCapacity = NV_ENC_CAPACITY_MAX_VALUE; + + return rmStatus; +} + +// +// subdeviceCtrlCmdGpuGetNvencSwSessionStats +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetNvencSwSessionStats_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_STATS_PARAMS *pParams +) +{ + pParams->encoderSessionCount = 0; + pParams->averageEncodeFps = 0; + pParams->averageEncodeLatency = 0; + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetNvencSwSessionInfo +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// + +NV_STATUS +subdeviceCtrlCmdGpuGetNvencSwSessionInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS *pParams +) +{ + pParams->sessionInfoTblEntry = 0; + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetNvfbcSwSessionStats +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetNvfbcSwSessionStats_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_STATS_PARAMS *pParams +) +{ + pParams->sessionCount = 0; + pParams->averageFPS = 0; + pParams->averageLatency = 0; + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// + +NV_STATUS +subdeviceCtrlCmdGpuGetNvfbcSwSessionInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_NVFBC_SW_SESSION_INFO_PARAMS *pParams +) +{ + pParams->sessionInfoCount = 0; + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetSdm +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetSdm_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_SDM_PARAMS *pSdmParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pSdmParams->subdeviceMask = gpuGetSubdeviceMask(pGpu); + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuSetSdm +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuSetSdm_IMPL +( + Subdevice* pSubdevice, + NV2080_CTRL_GPU_SET_SDM_PARAMS* pSdmParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU32 subdeviceInstance; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (!ONEBITSET(pSdmParams->subdeviceMask)) + { + NV_PRINTF(LEVEL_ERROR, "Subdevice mask has none or more than one bit set"); + return NV_ERR_INVALID_DATA; + } + + if (gpuIsStateLoaded(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "NV2080_CTRL_CMD_GPU_SET_SDM cannot be called after the GPU is loaded"); + return NV_ERR_INVALID_STATE; + } + subdeviceInstance = BIT_IDX_32(pSdmParams->subdeviceMask); + + if (subdeviceInstance >= NV_MAX_SUBDEVICES) + { + NV_PRINTF(LEVEL_ERROR, "Subdevice mask exceeds the max count of subdevices"); + return NV_ERR_INVALID_DATA; + } + pGpu->subdeviceInstance = subdeviceInstance; + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetSimulationInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetSimulationInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS *pGpuSimulationInfoParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (IS_SILICON(pGpu)) + { + pGpuSimulationInfoParams->type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE; + } + else + { + pGpuSimulationInfoParams->type = NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_UNKNOWN; + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetEngines +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEngines_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINES_PARAMS *pParams +) +{ + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS getEngineParamsV2; + NvU32 *pKernelEngineList = NvP64_VALUE(pParams->engineList); + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + portMemSet(&getEngineParamsV2, 0, sizeof(getEngineParamsV2)); + + status = subdeviceCtrlCmdGpuGetEnginesV2(pSubdevice, &getEngineParamsV2); + NV_CHECK_OR_RETURN(LEVEL_INFO, NV_OK == status, status); + + // NULL clients just want an engine count + if (NULL != pKernelEngineList) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, pParams->engineCount >= getEngineParamsV2.engineCount, + NV_ERR_BUFFER_TOO_SMALL); + portMemCopy(pKernelEngineList, + getEngineParamsV2.engineCount * sizeof(*getEngineParamsV2.engineList), getEngineParamsV2.engineList, + getEngineParamsV2.engineCount * sizeof(*getEngineParamsV2.engineList)); + } + + pParams->engineCount = getEngineParamsV2.engineCount; + + return status; +} + +// +// subdeviceCtrlCmdGpuGetEnginesV2 +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEnginesV2_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINES_V2_PARAMS *pEngineParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // Update the engine Database + NV_ASSERT_OK_OR_RETURN(gpuUpdateEngineTable(pGpu)); + + // Validate engine count + if (pGpu->engineDB.size > NV2080_GPU_MAX_ENGINES_LIST_SIZE) + { + NV_PRINTF(LEVEL_ERROR, "The engine database's size (0x%x) exceeds " + "NV2080_GPU_MAX_ENGINES_LIST_SIZE (0x%x)!\n", + pGpu->engineDB.size, NV2080_GPU_MAX_ENGINES_LIST_SIZE); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_STATE; + } + + { + // Need this null check in case object doesn't exist when using Orin trimmed profile + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + if (pKernelMIGManager != NULL) + { + // Filter engines based on current partitioning scheme + status = kmigmgrFilterEngineList(pGpu, + pKernelMIGManager, + pSubdevice, + pEngineParams->engineList, + &pEngineParams->engineCount); + } + } + + return status; +} + +// +// subdeviceCtrlCmdGpuGetEngineClasslist +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEngineClasslist_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS *pClassParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + ENGDESCRIPTOR engDesc; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + status = gpuXlateClientEngineIdToEngDesc(pGpu, pClassParams->engineType, &engDesc); + NV_ASSERT(status == NV_OK); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST Invalid engine ID 0x%x\n", + pClassParams->engineType); + DBG_BREAKPOINT(); + return status; + } + + status = gpuGetClassList(pGpu, &pClassParams->numClasses, NvP64_VALUE(pClassParams->classList), engDesc); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST Class List query failed\n"); + } + + return status; +} + +// +// subdeviceCtrlCmdGpuGetEnginePartnerList +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEnginePartnerList_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_PARTNERLIST_PARAMS *pPartnerListParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + ENGDESCRIPTOR engDesc; + NvU32 localType; + NvU32 i; + PCLASSDESCRIPTOR pClass; + NV_STATUS status = NV_OK; + + pPartnerListParams->numPartners = 0; + + status = gpuXlateClientEngineIdToEngDesc(pGpu, pPartnerListParams->engineType, &engDesc); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "Invalid engine ID 0x%x\n", + pPartnerListParams->engineType); + return status; + } + + // find class in class db + status = gpuGetClassByClassId(pGpu, pPartnerListParams->partnershipClassId, &pClass); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "Invalid class ID 0x%x\n", + pPartnerListParams->partnershipClassId); + return status; + } + + // Make sure that the engine related to this class is FIFO... + if (pClass->engDesc != ENG_KERNEL_FIFO) + { + NV_PRINTF(LEVEL_ERROR, + "Class 0x%x is not considered a partnership class.\n", + pPartnerListParams->partnershipClassId); + return NV_ERR_NOT_SUPPORTED; + } + + // Translate the instance-local engine type to the global engine type in MIG mode + localType = pPartnerListParams->engineType; + if (IS_MIG_IN_USE(pGpu)) + { + MIG_INSTANCE_REF ref; + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref)); + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, localType, + &pPartnerListParams->engineType)); + } + + // See if the hal wants to handle this + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + status = kfifoGetEnginePartnerList_HAL(pGpu, pKernelFifo, pPartnerListParams); + + // Restore the client's passed engineType + pPartnerListParams->engineType = localType; + + if (NV_OK == status) + { + goto subdeviceCtrlCmdGpuGetEnginePartnerList_filter; + } + + // + // For channels that the hal didn't handle, we should just return + // all of the supported engines except for the target engine. + // + + // Update the engine Database + NV_ASSERT_OK_OR_RETURN(gpuUpdateEngineTable(pGpu)); + + // Make sure it all will fit + if (pGpu->engineDB.size > NV2080_CTRL_GPU_MAX_ENGINE_PARTNERS) + { + NV_PRINTF(LEVEL_ERROR, + "partnerList space is too small, time to increase. This is fatal\n"); + DBG_BREAKPOINT(); + return status; + } + + // Copy over all of the engines except the target + for (i = 0; i < pGpu->engineDB.size; i++) + { + // Skip the engine handed in + if (pGpu->engineDB.pType[i] != pPartnerListParams->engineType ) + { + pPartnerListParams->partnerList[pPartnerListParams->numPartners++] = pGpu->engineDB.pType[i]; + } + } + +subdeviceCtrlCmdGpuGetEnginePartnerList_filter: + if (IS_MIG_IN_USE(pGpu)) + { + // Remove entries which don't exist in this client's GPU instance + status = kmigmgrFilterEnginePartnerList(pGpu, pKernelMIGManager, + pSubdevice, + pPartnerListParams); + } + + return status; +} + +// +// subdeviceCtrlCmdGpuGetEngineFaultInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetEngineFaultInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_FAULT_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NV_STATUS status = NV_OK; + NvU32 engineType = pParams->engineType; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // + // When MIG is enabled, clients pass in their instance-specific engineId + // rather than physical engineId since each client only sees engines available in + // its own instance. So we need to convert this local engineId to physical engineId + // + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + MIG_INSTANCE_REF ref; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hClient, &ref)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + pParams->engineType, + &engineType)); + } + + // Populate HW info for SW engine entry + status = kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, ENGINE_INFO_TYPE_NV2080, + engineType, ENGINE_INFO_TYPE_MMU_FAULT_ID, + &pParams->mmuFaultId); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "NV2080_CTRL_CMD_GPU_GET_ENGINE_INFO failed\n"); + return status; + } + + // Only GR engine supports subcontext faulting on Volta+ chips + pParams->bSubcontextSupported = (NV2080_ENGINE_TYPE_IS_GR(engineType) && + kfifoIsSubcontextSupported(pKernelFifo)); + + return status; +} + +ct_assert(NV2080_CTRL_INTERNAL_MAX_TPC_PER_GPC_COUNT == + NV2080_CTRL_CMD_GPU_GET_PES_INFO_MAX_TPC_PER_GPC_COUNT); + +// +// subdeviceCtrlCmdGpuGetFermiGpcInfo +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetFermiGpcInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *pParams +) +{ + NV2080_CTRL_GR_GET_GPC_MASK_PARAMS gpcMaskParams; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvHandle hSubdevice = RES_GET_HANDLE(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(GPU_RES_GET_GPU(pSubdevice)->gpuInstance)); + + portMemSet(&gpcMaskParams, 0, sizeof(gpcMaskParams)); + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_GR_GET_GPC_MASK, + &gpcMaskParams, + sizeof(gpcMaskParams))); + + pParams->gpcMask = gpcMaskParams.gpcMask; + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetFermiTpcInfo +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetFermiTpcInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *pParams +) +{ + NV2080_CTRL_GR_GET_TPC_MASK_PARAMS tpcMaskParams; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvHandle hSubdevice = RES_GET_HANDLE(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(GPU_RES_GET_GPU(pSubdevice)->gpuInstance)); + + portMemSet(&tpcMaskParams, 0, sizeof(tpcMaskParams)); + tpcMaskParams.gpcId = pParams->gpcId; + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_GR_GET_TPC_MASK, + &tpcMaskParams, + sizeof(tpcMaskParams))); + + pParams->tpcMask = tpcMaskParams.tpcMask; + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetFermiZcullInfo +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +// WARNING: This control call is deprecated. +// +NV_STATUS +subdeviceCtrlCmdGpuGetFermiZcullInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS *pParams +) +{ + NV2080_CTRL_GR_GET_ZCULL_MASK_PARAMS zcullMaskParams; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NvHandle hSubdevice = RES_GET_HANDLE(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + portMemSet(&zcullMaskParams, 0, sizeof(zcullMaskParams)); + zcullMaskParams.gpcId = pParams->gpcId; + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + pRmApi->Control(pRmApi, + hClient, + hSubdevice, + NV2080_CTRL_CMD_GR_GET_ZCULL_MASK, + &zcullMaskParams, + sizeof(zcullMaskParams))); + + pParams->zcullMask = zcullMaskParams.zcullMask; + + return NV_OK; +} + +/*! + * @brief Get graphics engine PES configuration + * + * This can be called before floor sweeping is determined, so we cannot use cached + * values. + */ +NV_STATUS +subdeviceCtrlCmdGpuGetPesInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_PES_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + RsClient *pRsClient; + NvU32 gpcId = pParams->gpcId; + NvU32 maxGpcCount; + + // + // XXX Bug 2681931 - GET_PES_INFO overloads interpretation of gpcId parameter + // This ctrl call is due for deprecation and should not be used. + // + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_ASSERT_OK_OR_RETURN( + serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + + NV_CHECK_OR_RETURN(LEVEL_INFO, !IS_MIG_IN_USE(pGpu), NV_ERR_NOT_SUPPORTED); + pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, 0); + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + maxGpcCount = gpuGetLitterValues_HAL(pGpu, NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS); + if (gpcId >= maxGpcCount) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pParams->numPesInGpc = pKernelGraphicsStaticInfo->floorsweepingMasks.numPesPerGpc[gpcId]; + + NV_CHECK_OR_RETURN(LEVEL_SILENT, pKernelGraphicsStaticInfo->pPpcMasks != NULL, NV_ERR_NOT_SUPPORTED); + pParams->activePesMask = pKernelGraphicsStaticInfo->pPpcMasks->mask[gpcId]; + + pParams->maxTpcPerGpcCount = pKernelGraphicsStaticInfo->pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC].data; + portMemCopy(pParams->tpcToPesMap, sizeof(pParams->tpcToPesMap), + pKernelGraphicsStaticInfo->floorsweepingMasks.tpcToPesMap, sizeof(pKernelGraphicsStaticInfo->floorsweepingMasks.tpcToPesMap)); + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuQueryMode_IMPL +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuQueryMode_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_QUERY_MODE_PARAMS *pQueryMode +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + switch (gpuGetMode(pGpu)) + { + case NV_GPU_MODE_GRAPHICS_MODE: + { + pQueryMode->mode = NV2080_CTRL_GPU_QUERY_MODE_GRAPHICS_MODE; + break; + } + case NV_GPU_MODE_COMPUTE_MODE: + { + pQueryMode->mode = NV2080_CTRL_GPU_QUERY_MODE_COMPUTE_MODE; + break; + } + default: + { + pQueryMode->mode = NV2080_CTRL_GPU_QUERY_MODE_UNKNOWN_MODE; + break; + } + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuHandleGpuSR +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuHandleGpuSR_IMPL +( + Subdevice *pSubdevice +) +{ + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuSetComputeModeRules +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuSetComputeModeRules_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_SET_COMPUTE_MODE_RULES_PARAMS *pSetRulesParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + NV_STATUS status = NV_OK; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + + // + // Client RM still needs to set its value and update the registry, + // so don't return unless there was an error. + // + NV_ASSERT_OK_OR_RETURN(status); + } + + //TODO Bug 2718406 will extend compute mode support for MIG + if (IS_MIG_ENABLED(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + switch(pSetRulesParams->rules) + { + case NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE: + case NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE: + case NV2080_CTRL_GPU_COMPUTE_MODE_RULES_COMPUTE_PROHIBITED: + case NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE_PROCESS: + pGpu->computeModeRules = pSetRulesParams->rules; + + // + // Store this setting in the registry so that it persists even + // after the last client disconnects. + // Client RM handles this so skip on GSP. + // + if (NV_OK != + osWriteRegistryDword(pGpu, + NV_REG_STR_RM_COMPUTE_MODE_RULES, + pGpu->computeModeRules)) + { + // Non-fatal but worth reporting + NV_PRINTF(LEVEL_ERROR, + "Could not store compute mode rule in the registry, current setting may not persist if all clients disconnect!\n"); + } + break; + + default: + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuQueryComputeModeRules +// +// Lock Requirements: +// Assert that API lock held on entry +// +// TODO Bug 2718406 will extend compute mode support for MIG +// +NV_STATUS +subdeviceCtrlCmdGpuQueryComputeModeRules_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_QUERY_COMPUTE_MODE_RULES_PARAMS *pQueryRulesParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + { + pQueryRulesParams->rules = pGpu->computeModeRules; + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdGpuAcquireComputeModeReservation_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + //TODO Bug 2718406 will extend compute mode support for MIG + if (IS_MIG_ENABLED(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + switch (pGpu->computeModeRules) + { + case NV2080_CTRL_GPU_COMPUTE_MODE_RULES_NONE: + { + // If a GPU is in "normal" mode, then the caller can always get the reservation: + pGpu->hComputeModeReservation = hClient; + } + return NV_OK; + break; // For the Coverity code checker. + + case NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE: + case NV2080_CTRL_GPU_COMPUTE_MODE_RULES_EXCLUSIVE_COMPUTE_PROCESS: + { + // + // If a GPU is in "cuda exclusive" mode, then the caller can only get the + // reservation if no other client holds the reservation: + // + if (NV01_NULL_OBJECT == pGpu->hComputeModeReservation) + { + pGpu->hComputeModeReservation = hClient; + return NV_OK; + } + else + { + // Someone else holds the reservation: + return NV_ERR_STATE_IN_USE; + } + } + break; + + case NV2080_CTRL_GPU_COMPUTE_MODE_RULES_COMPUTE_PROHIBITED: + // + // If a GPU is in "cuda prohibited" mode, then the caller can never get the + // reservation: + // + return NV_ERR_STATE_IN_USE; + break; + + default: + NV_ASSERT(0); // This *should* be unreachable code. + break; + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdGpuReleaseComputeModeReservation_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + //TODO Bug 2718406 will extend compute mode support for MIG + if (IS_MIG_ENABLED(pGpu)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // Release the reservation ONLY IF we had the reservation to begin with. Otherwise, + // leave it alone, because someone else has acquired it: + if (pGpu->hComputeModeReservation == hClient) + { + pGpu->hComputeModeReservation = NV01_NULL_OBJECT; + } + else + { + return NV_ERR_STATE_IN_USE; + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuGetId +// +// Lock Requirements: +// Assert that API lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetId_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ID_PARAMS *pIdParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pIdParams->gpuId = pGpu->gpuId; + + return NV_OK; +} + +// +// nv2080CtrlCmdGpuGetPids +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetPids_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_PIDS_PARAMS *pGetPidsParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU32 internalClassId; + NV_STATUS status; + MIG_INSTANCE_REF *pRef = NULL; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + switch (pGetPidsParams->idType) + { + case (NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_CLASS): + { + if (pGetPidsParams->id == NV20_SUBDEVICE_0) + { + internalClassId = classId(Subdevice); + } + else if (pGetPidsParams->id == MPS_COMPUTE) + { + internalClassId = classId(MpsApi); + } + else + { + internalClassId = classId(ChannelDescendant); + } + break; + } + case (NV2080_CTRL_GPU_GET_PIDS_ID_TYPE_VGPU_GUEST): + { + internalClassId = classId(HostVgpuDeviceApi); + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + } + + // + // With MIG GPU instancing enabled, get associated instance ref + // Clients with MIG_MONITOR capability are allowed to get full device + // info + // + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + MIG_INSTANCE_REF partitionRef = kmigmgrMakeNoMIGReference(); + pRef = &partitionRef; + if (IS_MIG_IN_USE(pGpu) && + !rmclientIsCapableByHandle(hClient, NV_RM_CAP_SYS_SMC_MONITOR)) + { + // + // Check instanceSubscription to limit the scope of the call + // Clients with mig-monitor capability are allowed to get full device + // info + // + NV_CHECK_OR_RETURN(LEVEL_INFO, (kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, pRef) == NV_OK), + NV_ERR_INSUFFICIENT_PERMISSIONS); + } + + // + // Search over all clients to see if any contain objects of type = id. + // If they do, then add their PID to the PIDArray param and also + // return the amount of valid entries in the Array through pidTblCount. + // + status = gpuGetProcWithObject(pGpu, pGetPidsParams->id, internalClassId, + pGetPidsParams->pidTbl, &pGetPidsParams->pidTblCount, + pRef); + return status; +} + +// +// subdeviceCtrlCmdGpuGetPidInfo +// +// Lock Requirements: +// Assert that API and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuGetPidInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_PID_INFO_PARAMS *pGetPidInfoParams +) +{ + NV2080_CTRL_GPU_PID_INFO_DATA *pPidInfoData; + NV2080_CTRL_SMC_SUBSCRIPTION_INFO *pSmcInfo; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV2080_CTRL_GPU_PID_INFO *pPidInfo; + NvU32 internalClassId; + NvU32 i; + MIG_INSTANCE_REF *pRef = NULL; + NvBool bGlobalInfo = NV_TRUE; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if ((pGetPidInfoParams->pidInfoListCount <= 0) || + (pGetPidInfoParams->pidInfoListCount > + NV2080_CTRL_GPU_GET_PID_INFO_MAX_COUNT)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // With MIG GPU instancing enabled, get associated instance ref + // Clients with MIG_MONITOR capability are allowed to get full device + // info + // + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + MIG_INSTANCE_REF ref = kmigmgrMakeNoMIGReference(); + pRef = &ref; + if (IS_MIG_IN_USE(pGpu) && + !rmclientIsCapableByHandle(hClient, NV_RM_CAP_SYS_SMC_MONITOR)) + { + // + // Check instanceSubscription to limit the scope of the call + // Clients with mig-monitor capability are allowed to get full device + // info + // + NV_CHECK_OR_RETURN(LEVEL_INFO, (kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, pRef) == NV_OK), + NV_ERR_INSUFFICIENT_PERMISSIONS); + bGlobalInfo = NV_FALSE; + } + + for (i = 0; i < pGetPidInfoParams->pidInfoListCount; ++i) + { + pPidInfo = &pGetPidInfoParams->pidInfoList[i]; + + pSmcInfo = &pPidInfo->smcSubscription; + pSmcInfo->computeInstanceId = PARTITIONID_INVALID; + pSmcInfo->gpuInstanceId = PARTITIONID_INVALID; + + switch (pPidInfo->index) + { + case (NV2080_CTRL_GPU_PID_INFO_INDEX_VIDEO_MEMORY_USAGE): + { + internalClassId = classId(Memory); + + pPidInfoData = &pPidInfo->data; + portMemSet(pPidInfoData, 0, sizeof(NV2080_CTRL_GPU_PID_INFO_DATA)); + pPidInfo->result = gpuFindClientInfoWithPidIterator(pGpu, pPidInfo->pid, 0, + internalClassId, + pPidInfoData, + pSmcInfo, + pRef, + bGlobalInfo); + break; + } + default: + { + pPidInfo->result = NV_ERR_INVALID_ARGUMENT; + break; + } + } + } + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdGpuInterruptFunction_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_INTERRUPT_FUNCTION_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + Intr *pIntr = GPU_GET_INTR(pGpu); + + return intrTriggerCpuDoorbellForVF_HAL(pGpu, pIntr, pParams->gfid); +} + +// Control call to fetch the Runlist pri base for the engine(s) specified +NV_STATUS +subdeviceCtrlCmdGpuGetEngineRunlistPriBase_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus = NV_OK; + NvU32 i; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (!kfifoIsHostEngineExpansionSupported(pKernelFifo)) + { + status = NV_ERR_NOT_SUPPORTED; + return status; + } + + for (i = 0; i < NV2080_GPU_MAX_ENGINES_LIST_SIZE; i++) + { + NvU32 engineId; + + // Check if input is NULL or a SW engine; return a NULL value since SW engine does not have a runlist pri base + // and this should not be returned as an error + if ((pParams->engineList[i] == NV2080_ENGINE_TYPE_NULL) || (pParams->engineList[i] == NV2080_ENGINE_TYPE_SW)) + { + pParams->runlistPriBase[i] = NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_NULL; + continue; + } + + // + // See if MIG is enabled. If yes, then we have to convert instanceLocal + // engine to global engine before moving ahead + // + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, + &ref)); + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + pParams->engineList[i], + &engineId)); + } + else + { + engineId = pParams->engineList[i]; + } + + tmpStatus = kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, ENGINE_INFO_TYPE_NV2080, + engineId, ENGINE_INFO_TYPE_RUNLIST_PRI_BASE, + &pParams->runlistPriBase[i]); + + if (tmpStatus != NV_OK) + { + pParams->runlistPriBase[i] = NV2080_CTRL_GPU_GET_ENGINE_RUNLIST_PRI_BASE_ERROR; + status = tmpStatus; + } + } + return status; +} + +// Control call to fetch the HW engine ID for the engine(s) specified +NV_STATUS +subdeviceCtrlCmdGpuGetHwEngineId_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_HW_ENGINE_ID_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus = NV_OK; + NvU32 i; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (!kfifoIsHostEngineExpansionSupported(pKernelFifo)) + { + status = NV_ERR_NOT_SUPPORTED; + return status; + } + + for (i = 0; i < NV2080_GPU_MAX_ENGINES_LIST_SIZE; i++) + { + NvU32 engineId; + + // Check if input is NULL or a SW engine; return a NULL value since SW engine does not have a runlist pri base + // and this should not be returned as an error + if (pParams->engineList[i] == NV2080_ENGINE_TYPE_NULL || (pParams->engineList[i] == NV2080_ENGINE_TYPE_SW)) + { + pParams->hwEngineID[i] = NV2080_CTRL_GPU_GET_HW_ENGINE_ID_NULL; + continue; + } + + // + // See if MIG is enabled. If yes, then we have to convert instanceLocal + // engine to global engine before moving ahead + // + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + MIG_INSTANCE_REF ref; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, + &ref)); + + NV_CHECK_OK_OR_RETURN( + LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, + pParams->engineList[i], + &engineId)); + } + else + { + engineId = pParams->engineList[i]; + } + + tmpStatus = kfifoEngineInfoXlate_HAL(pGpu, pKernelFifo, ENGINE_INFO_TYPE_NV2080, + engineId, + ENGINE_INFO_TYPE_FIFO_TAG, + &pParams->hwEngineID[i]); + + if (tmpStatus != NV_OK) + { + pParams->hwEngineID[i] = NV2080_CTRL_GPU_GET_HW_ENGINE_ID_ERROR; + status = tmpStatus; + } + } + return status; +} + +NV_STATUS +subdeviceCtrlCmdGpuGetMaxSupportedPageSize_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_MAX_SUPPORTED_PAGE_SIZE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmDeviceGpuLockIsOwner(pGpu->gpuInstance)); + + // Default to minimal page size (4k) + pParams->maxSupportedPageSize = RM_PAGE_SIZE; + + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + if (kgmmuIsPageSize512mbSupported(pKernelGmmu)) + { + pParams->maxSupportedPageSize = RM_PAGE_SIZE_512M; + } + else if (kgmmuIsHugePageSupported(pKernelGmmu)) + { + pParams->maxSupportedPageSize = RM_PAGE_SIZE_HUGE; + } + else + { + pParams->maxSupportedPageSize = kgmmuGetMaxBigPageSize_HAL(pKernelGmmu); + } + + if (gpuIsSriovEnabled(pGpu)) + { + NvU64 vmmuSegmentSize = gpuGetVmmuSegmentSize(pGpu); + if (vmmuSegmentSize > 0 && + vmmuSegmentSize < NV2080_CTRL_GPU_VMMU_SEGMENT_SIZE_512MB) + { + pParams->maxSupportedPageSize = RM_PAGE_SIZE_HUGE; + } + } + + return status; +} + +#if (defined(DEBUG) || defined(DEVELOP) || defined(NV_MODS)) && RMCFG_MODULE_KERNEL_GRAPHICS +NV_STATUS +subdeviceCtrlCmdGpuGetNumMmusPerGpc_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_NUM_MMUS_PER_GPC_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + KernelGraphics *pKernelGraphics; + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo; + NvU32 count; + NvU32 maxGpcCount; + + // Ensure that the gpcId is within range + maxGpcCount = gpuGetLitterValues_HAL(pGpu, NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS); + if (pParams->gpcId >= maxGpcCount) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + kgrmgrCtrlRouteKGR(pGpu, pKernelGraphicsManager, RES_GET_CLIENT_HANDLE(pSubdevice), + &pParams->grRouteInfo, &pKernelGraphics)); + + pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + NV_ASSERT_OR_RETURN(pKernelGraphicsStaticInfo != NULL, NV_ERR_INVALID_STATE); + + count = pKernelGraphicsStaticInfo->floorsweepingMasks.mmuPerGpc[pParams->gpcId]; + + // If the litter value doesn't exist (pre Ampere) than default to 1 gpcmmu + pParams->count = ((count != 0) ? count : 1); + + return NV_OK; +} +#endif + +// Stubbed for Orin + +/* + * @brief Update/Set the compute policy config for a GPU + * + * Lock Requirements: + * Assert that API and GPUs lock held on entry + * + * @param[in] pSubdevice + * @param[in] pParams pointer to control parameters + * + * @return + * NV_OK Success + * NV_ERR_NOT_SUPPORTED Setting policy is not supported on requested GPU + * NV_ERR_INVALID_ARGUMENT Invalid config type/value specified + * else appropriate error code. + */ +NV_STATUS +subdeviceCtrlCmdGpuSetComputePolicyConfig_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_SET_COMPUTE_POLICY_CONFIG_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_COMPUTE_POLICY_INFO policyInfo; + NvU32 gidFlags; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + switch(pParams->config.type) + { + case NV2080_CTRL_GPU_COMPUTE_POLICY_TIMESLICE: + if (!gpuIsComputePolicyTimesliceSupported(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "Setting the timeslice policy is not supported for gpu with pci id 0x%llx\n", + gpuGetDBDF(pGpu)); + return NV_ERR_NOT_SUPPORTED; + + } + + if (pParams->config.data.timeslice >= NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_MAX) + { + NV_PRINTF(LEVEL_ERROR, "Unsupported timeslice value %u specified for gpu with pci id 0x%llx\n", + pParams->config.data.timeslice, gpuGetDBDF(pGpu)); + return NV_ERR_INVALID_ARGUMENT; + } + + policyInfo.timeslice = pParams->config.data.timeslice; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unsupported compute policy %u specified for gpu id 0x%llx\n", + pParams->config.type, gpuGetDBDF(pGpu)); + return NV_ERR_INVALID_ARGUMENT; + } + + gidFlags = DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY); + status = gpuGetGidInfo(pGpu, NULL, NULL, gidFlags); + if (status != NV_OK) + { + return status; + } + + NV_ASSERT(pGpu->gpuUuid.isInitialized); + status = gpudbSetGpuComputePolicyConfig(pGpu->gpuUuid.uuid, pParams->config.type, + &policyInfo); + + return status; +} + +// +// Make sure number of compute policies per GPU is always less than or equal +// to the number of policy configs that can be handled by the +// NV2080_CTRL_CMD_GPU_GET_COMPUTE_POLICY_CONFIG command. +// +ct_assert(NV2080_CTRL_GPU_COMPUTE_POLICY_MAX <= NV2080_CTRL_GPU_COMPUTE_POLICY_CONFIG_LIST_MAX); + +/* + * @brief Get all compute policy configs for a GPU + * + * Lock Requirements: + * Assert that API and GPUs lock held on entry + * + * @param[in] pSubdevice + * @param[in] pParams pointer to control parameters + * + * @return + * NV_OK on success + * else appropriate error code. + */ +NV_STATUS +subdeviceCtrlCmdGpuGetComputePolicyConfig_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_COMPUTE_POLICY_CONFIG_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_COMPUTE_POLICY_INFO policyInfo; + NvU32 policyId; + NvU32 gidFlags; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + gidFlags = DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY); + status = gpuGetGidInfo(pGpu, NULL, NULL, gidFlags); + if (status != NV_OK) + { + return status; + } + + NV_ASSERT(pGpu->gpuUuid.isInitialized); + status = gpudbGetGpuComputePolicyConfigs(pGpu->gpuUuid.uuid, &policyInfo); + if (status != NV_OK) + { + return status; + } + + pParams->numConfigs = 0; + // Loop through all compute policies and retrieve the configured settings + for (policyId = NV2080_CTRL_GPU_COMPUTE_POLICY_TIMESLICE; + policyId < NV2080_CTRL_GPU_COMPUTE_POLICY_MAX; + policyId++) + { + switch (policyId) + { + case NV2080_CTRL_GPU_COMPUTE_POLICY_TIMESLICE: + pParams->configList[policyId].type = NV2080_CTRL_GPU_COMPUTE_POLICY_TIMESLICE; + pParams->configList[policyId].data.timeslice = policyInfo.timeslice; + pParams->numConfigs++; + break; + default: + NV_ASSERT(0); + break; + } + } + + return status; +} + +/*! + * @brief Check if address range is within the provided limits + * + * @param[in] addrStart Staring address of address range + * @param[in] addrLength Size of address range + * @param[in] limitStart Staring address of limit + * @param[in] limitLength Size of limit + * + * @return + * NV_TRUE, if address range is within the provided limits + * NV_FALSE, if address range is outside the provided limits + * + */ +static NvBool isAddressWithinLimits +( + NvU64 addrStart, + NvU64 addrLength, + NvU64 limitStart, + NvU64 limitLength +) +{ + NvU64 addrEnd = 0; + NvU64 limitEnd = 0; + + // + // Calculate End address of address range and limit, + // Return NV_FALSE in case of 64-bit addition overflow + // + if (!portSafeAddU64(addrStart, addrLength - 1, &addrEnd) || + !portSafeAddU64(limitStart, limitLength - 1, &limitEnd)) + { + return NV_FALSE; + } + + return ((addrStart >= limitStart) && (addrEnd <= limitEnd)); +} + +/*! + * @brief Validate the address range for Memory Map request by comparing the + * user supplied address range with GPU BAR0/BAR1 range. + * + * Lock Requirements: + * Assert that API and GPUs lock held on entry + * + * @param[in] pSubdevice + * @param[in] pParams pointer to control parameters + * + * Possible status values returned are: + * NV_OK + * NV_ERR_PROTECTION_FAULT + * + */ +NV_STATUS subdeviceCtrlCmdValidateMemMapRequest_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_VALIDATE_MEM_MAP_REQUEST_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvU64 start = pParams->addressStart; + NvU64 length = pParams->addressLength; + NV_STATUS rmStatus; + NvU32 bar0MapSize; + NvU64 bar0MapOffset; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + pParams->protection = NV_PROTECT_READ_WRITE; + + if (isAddressWithinLimits(start, length, pGpu->busInfo.gpuPhysAddr, + pGpu->deviceMappings[0].gpuNvLength)) + { + start -= pGpu->busInfo.gpuPhysAddr; + + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + rmStatus = tmrGetTimerBar0MapInfo_HAL(pGpu, pTmr, + &bar0MapOffset, &bar0MapSize); + if ((rmStatus == NV_OK) && + isAddressWithinLimits(start, length, bar0MapOffset, bar0MapSize)) + { + pParams->protection = NV_PROTECT_READABLE; + return NV_OK; + } + + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + rmStatus = kfifoGetUsermodeMapInfo_HAL(pGpu, pKernelFifo, + &bar0MapOffset, &bar0MapSize); + if ((rmStatus == NV_OK) && + isAddressWithinLimits(start, length, bar0MapOffset, bar0MapSize)) + { + return NV_OK; + } + + KernelMc *pKernelMc = GPU_GET_KERNEL_MC(pGpu); + rmStatus = kmcGetMcBar0MapInfo_HAL(pGpu, pKernelMc, + &bar0MapOffset, &bar0MapSize); + if ((rmStatus == NV_OK) && + isAddressWithinLimits(start, length, bar0MapOffset, bar0MapSize)) + { + pParams->protection = NV_PROTECT_READABLE; + return NV_OK; + } + + // + // If the kernel side does not know about the object being mapped, + // fall-through to GSP and see if it knows anything. + // + if (IS_GSP_CLIENT(pGpu)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + + return pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_GPU_VALIDATE_MEM_MAP_REQUEST, + pParams, sizeof(*pParams)); + } + + return NV_ERR_PROTECTION_FAULT; + } + // See bug 1784955 + else if (isAddressWithinLimits(start, length, pGpu->busInfo.gpuPhysFbAddr, pGpu->fbLength) + || GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)->coherentCpuFbBase) + { + return NV_OK; + } + + return NV_ERR_PROTECTION_FAULT; +} + +/*! + * @brief: This command returns the load time (latency) of each engine, + * implementing NV2080_CTRL_CMD_GPU_GET_ENGINE_LOAD_TIMES control call. + * + * @param[in] pSubdevice + * @param[in] pParams + * + * @return + * NV_OK Success + */ +NV_STATUS +subdeviceCtrlCmdGpuGetEngineLoadTimes_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_GET_ENGINE_LOAD_TIMES_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + PENGDESCRIPTOR engDescriptorList = gpuGetInitEngineDescriptors(pGpu); + NvU32 numEngDescriptors = gpuGetNumEngDescriptors(pGpu); + NvU32 curEngDescIdx; + + NV_ASSERT_OR_RETURN(numEngDescriptors < NV2080_CTRL_GPU_MAX_ENGINE_OBJECTS, NV_ERR_BUFFER_TOO_SMALL); + + pParams->engineCount = numEngDescriptors; + for (curEngDescIdx = 0; curEngDescIdx < numEngDescriptors; curEngDescIdx++) + { + ENGDESCRIPTOR curEngDescriptor = engDescriptorList[curEngDescIdx]; + OBJENGSTATE *pEngstate = gpuGetEngstate(pGpu, curEngDescriptor); + + if (pEngstate == NULL) + { + pParams->engineIsInit[curEngDescIdx] = NV_FALSE; + continue; + } + + pParams->engineList[curEngDescIdx] = pEngstate->engDesc; + pParams->engineStateLoadTime[curEngDescIdx] = pEngstate->stats[ENGSTATE_STATE_LOAD].transitionTimeUs * 1000; + pParams->engineIsInit[curEngDescIdx] = NV_TRUE; + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdGpuSetFabricAddr +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuSetFabricAddr_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_SET_FABRIC_BASE_ADDR_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (!rmclientIsCapableOrAdminByHandle(hClient, + NV_RM_CAP_EXT_FABRIC_MGMT, + pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_ERROR, "Non-privileged context issued privileged cmd\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + if (pKernelNvlink == NULL) + return NV_ERR_NOT_SUPPORTED; + + return knvlinkSetUniqueFabricBaseAddress(pGpu, pKernelNvlink, pParams->fabricBaseAddr); +} diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_regops.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_regops.c new file mode 100644 index 000000000..27979c20e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_gpu_regops.c @@ -0,0 +1,191 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "gpu/subdevice/subdevice.h" +#include "vgpu/rpc.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "kernel/gpu/fifo/kernel_channel_group_api.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/fifo/kernel_channel.h" +#include "kernel/gpu/fifo/kernel_fifo.h" + +// +// Because the field regStatus is set and checked in multiple places in the +// call chain, it must be initialized early on in an entry point function (few +// of them). (Security Bug: 3401950) +// Note: The api doesn't mandate the caller clear the field before calling +// +static void +initRegStatus(NV2080_CTRL_GPU_REG_OP *pRegOps, NvU32 regOpCount) +{ + NvU32 i; + + for (i=0; i < regOpCount; i++) + { + pRegOps[i].regStatus = NV2080_CTRL_GPU_REG_OP_STATUS_SUCCESS; + } +} + +// +// Do some pre validation of each regop such as offset and access. There is +// another validation in gpuValidateRegOp above that runs at a later time. +// EXECUTION: CPU-RM if the call is not from GSP-PLUGIN +// GSP-RM if the call is from GSP-PLUGIN only +// +NV_STATUS +gpuValidateRegOps +( + OBJGPU *pGpu, + NV2080_CTRL_GPU_REG_OP *pRegOps, + NvU32 regOpCount, + NvBool bNonTransactional, + NvBool isClientGspPlugin +) +{ + NvU8 regStatus; + NvU32 i; + + for (i = 0; i < regOpCount; i++) + { + regStatus = NV2080_CTRL_GPU_REG_OP_STATUS_SUCCESS; + + { + NV_STATUS status; + + status = gpuValidateRegOffset(pGpu, pRegOps[i].regOffset); + if (status != NV_OK) + { + regStatus = NV2080_CTRL_GPU_REG_OP_STATUS_INVALID_OFFSET; + } + } + if (regStatus != NV2080_CTRL_GPU_REG_OP_STATUS_SUCCESS) + { + pRegOps[i].regStatus |= regStatus; + + if (!bNonTransactional) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + } + + return NV_OK; +} + +static NV_STATUS +subdeviceCtrlCmdGpuExecRegOps_cmn +( + Subdevice *pSubdevice, + NvHandle hClientTarget, + NvHandle hChannelTarget, + NvU32 bNonTransactional, + NV2080_CTRL_GR_ROUTE_INFO grRouteInfo, + NV2080_CTRL_GPU_REG_OP *pRegOps, + NvU32 regOpCount, + NvBool isClientGspPlugin +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + NV_STATUS status = NV_OK; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_PRINTF(LEVEL_INFO, "client 0x%x channel 0x%x\n", hClientTarget, + hChannelTarget); + + // sanity check list size + if (regOpCount == 0) + { + NV_PRINTF(LEVEL_ERROR, "regOpCount is 0\n"); + return NV_ERR_INVALID_PARAM_STRUCT; + } + + if (pRegOps == NULL) + { + NV_PRINTF(LEVEL_ERROR, "regOps is NULL\n"); + return NV_ERR_INVALID_PARAM_STRUCT; + } + + if ((hClientTarget == 0) != (hChannelTarget == 0)) + { + NV_PRINTF(LEVEL_ERROR, + "hClientTarget and hChannelTarget must both be set or both be 0\n"); + return NV_ERR_INVALID_PARAM_STRUCT; + } + + // init once, only in monolithic-rm or the cpu-rm, or gsp-rm if the call + // is from the gsp plugin + if (!RMCFG_FEATURE_PLATFORM_GSP || isClientGspPlugin) + { + initRegStatus(pRegOps, regOpCount); + } + + status = gpuValidateRegOps(pGpu, pRegOps, regOpCount, bNonTransactional, + isClientGspPlugin); + if (status != NV_OK) + { + return status; + } + + if (IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_GPU_EXEC_REG_OPS(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->pParams, + pRegOps, + status); + return status; + } + + return status; +} + +// +// subdeviceCtrlCmdGpuExecRegOps +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdGpuExecRegOps_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS *pRegParams +) +{ + return subdeviceCtrlCmdGpuExecRegOps_cmn(pSubdevice, + pRegParams->hClientTarget, + pRegParams->hChannelTarget, + pRegParams->bNonTransactional, + pRegParams->grRouteInfo, + pRegParams->regOps, + pRegParams->regOpCount, + NV_FALSE); +} + diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c new file mode 100644 index 000000000..74e931e9e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c @@ -0,0 +1,471 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This module contains the gpu control interfaces for the + * subdevice (NV20_SUBDEVICE_0) class. Subdevice-level control calls + * are directed unicast to the associated GPU. + * File contains ctrls related to TMR engine object + */ + +#include "core/core.h" + + +#include "core/locks.h" +#include "gpu/subdevice/subdevice.h" +#include "objtmr.h" +#include "rmapi/client.h" + +#include "kernel/gpu/intr/intr.h" + +// +// subdeviceCtrlCmdTimerCancel +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdTimerCancel_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu; + OBJTMR *pTmr; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + if (pSubdevice == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = GPU_RES_GET_GPU(pSubdevice); + pTmr = GPU_GET_TIMER(pGpu); + + if (pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + tmrCancelCallback(pTmr, pSubdevice); + pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + return NV_OK; +} + +static NV_STATUS +gpuControlTimerCallback(OBJGPU *pGpu, OBJTMR *pTmr, void * pData) +{ + Subdevice *pSubDevice = (Subdevice *) pData; + PEVENTNOTIFICATION pNotifyEvent = inotifyGetNotificationList(staticCast(pSubDevice, INotifier)); + + if (pSubDevice->notifyActions[NV2080_NOTIFIERS_TIMER] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + NV_PRINTF(LEVEL_INFO, + "callback is called but the timer is not scheduled\n"); + return NV_ERR_INVALID_STATE; + } + + // Mark the timer as processed (no self-rescheduling for now) + pSubDevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + + // Find timer event + while ((pNotifyEvent != NULL) && (pNotifyEvent->NotifyIndex != NV2080_NOTIFIERS_TIMER)) + { + pNotifyEvent = pNotifyEvent->Next; + } + if (pNotifyEvent == NULL) + { + NV_PRINTF(LEVEL_INFO, "timer event is missing\n"); + return NV_ERR_INVALID_STATE; + } + + // perform a direct callback to the client + if (pNotifyEvent->Data != NvP64_NULL) + { + NvU64 currentTime = tmrGetTime_HAL(pGpu, pTmr); + osEventNotification(pGpu, pNotifyEvent, NV2080_NOTIFIERS_TIMER, + ¤tTime, sizeof(currentTime)); + } + else + { + NV_PRINTF(LEVEL_INFO, "timer callback pointer is missing\n"); + return NV_ERR_INVALID_STATE; + } + return NV_OK; +} + +static NV_STATUS +timerSchedule +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pTimerScheduleParams +) +{ + OBJGPU *pGpu; + OBJTMR *pTmr; + PEVENTNOTIFICATION pNotifyEvent; + + if (pSubdevice == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = GPU_RES_GET_GPU(pSubdevice); + pTmr = GPU_GET_TIMER(pGpu); + + pNotifyEvent = inotifyGetNotificationList(staticCast(pSubdevice, INotifier)); + + if (pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] != NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + NV_PRINTF(LEVEL_INFO, + "gpuControlTimerCallback: the timer is already scheduled for this subdevice\n"); + return NV_ERR_INVALID_STATE; + } + + // Validate the timer event + while ((pNotifyEvent != NULL) && (pNotifyEvent->NotifyIndex != NV2080_NOTIFIERS_TIMER)) + { + pNotifyEvent = pNotifyEvent->Next; + } + if (pNotifyEvent == NULL) + { + NV_PRINTF(LEVEL_INFO, + "gpuControlTimerCallback: timer event is missing\n"); + return NV_ERR_INVALID_STATE; + } + if (((pNotifyEvent->NotifyType != NV01_EVENT_KERNEL_CALLBACK) && (pNotifyEvent->NotifyType != NV01_EVENT_KERNEL_CALLBACK_EX)) || + (pNotifyEvent->Data == NvP64_NULL)) + { + NV_PRINTF(LEVEL_INFO, + "gpuControlTimer: cmd 0x%x: callback function is missing\n", + NV2080_CTRL_CMD_TIMER_SCHEDULE); + return NV_ERR_INVALID_STATE; + + } + + // Mark the timer as processed (no self-rescheduling for now). Set the flag before calling the timer + // since callback may be called right away. + pSubdevice->notifyActions[NV2080_NOTIFIERS_TIMER] = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + + // schedule the timer + if (DRF_VAL(2080, _CTRL_TIMER_SCHEDULE_FLAGS, _TIME, pTimerScheduleParams->flags) == NV2080_CTRL_TIMER_SCHEDULE_FLAGS_TIME_ABS) + { + tmrScheduleCallbackAbs(pTmr, gpuControlTimerCallback, pSubdevice, pTimerScheduleParams->time_nsec, 0, 0); + } + else + { + tmrScheduleCallbackRel(pTmr, gpuControlTimerCallback, pSubdevice, pTimerScheduleParams->time_nsec, 0, 0); + } + + return NV_OK; +} + +// +// subdeviceCtrlCmdTimerSchedule +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// +NV_STATUS +subdeviceCtrlCmdTimerSchedule_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_TIMER_SCHEDULE_PARAMS *pParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + if (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) + { + LOCK_ASSERT_AND_RETURN(rmDeviceGpuLockIsOwner(GPU_RES_GET_GPU(pSubdevice)->gpuInstance)); + } + else + { + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + } + + return timerSchedule(pSubdevice, pParams); +} + +// +// subdeviceCtrlCmdTimerGetTime +// +// Lock Requirements: +// Assert that API lock and GPUs lock held on entry +// Timer callback list accessed in tmrService at DPC +// +NV_STATUS +subdeviceCtrlCmdTimerGetTime_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_TIMER_GET_TIME_PARAMS *pParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + if ((pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) && + (pRmCtrlParams->flags & NVOS54_FLAGS_LOCK_BYPASS)) + { + if (pTmr->tmrChangePending) + { + return NV_ERR_STATE_IN_USE; + } + } + else + { + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + } + + tmrGetCurrentTime(pTmr, &pParams->time_nsec); + + return NV_OK; +} + +// +// subdeviceCtrlCmdTimerGetRegisterOffset +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +subdeviceCtrlCmdTimerGetRegisterOffset_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_TIMER_GET_REGISTER_OFFSET_PARAMS *pTimerRegOffsetParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpuGetRegBaseOffset_HAL(pGpu, NV_REG_BASE_TIMER, &pTimerRegOffsetParams->tmr_offset); +} + +/*! + * @brief Provides correlation information between GPU time and CPU time. + * + * @param[in] pSubDevice + * @param[in] pParams + * + * @return NV_OK Success + * @return NV_ERR_INVALID_ARGUMENT Invalid argument + * @return NV_ERR_NOT_SUPPORTED Unsupported CPU clock id + */ +NV_STATUS +subdeviceCtrlCmdTimerGetGpuCpuTimeCorrelationInfo_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_TIMER_GET_GPU_CPU_TIME_CORRELATION_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NV_STATUS status = NV_OK; + NvU8 i; + NvU32 sec, usec; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + NV_ASSERT_OR_RETURN((pParams->sampleCount <= + NV2080_CTRL_TIMER_GPU_CPU_TIME_MAX_SAMPLES), + NV_ERR_INVALID_ARGUMENT); + + switch (pParams->cpuClkId) + { + case NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_OSTIME: + { + for (i = 0; i < pParams->sampleCount; i++) + { + osGetCurrentTime(&sec, &usec); + pParams->samples[i].cpuTime = (((NvU64)sec) * 1000000) + usec; + status = tmrGetCurrentTime(pTmr, + &pParams->samples[i].gpuTime); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not get GPU time. status=0x%08x\n", + status); + break; + } + } + break; + } + + case NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_PLATFORM_API: + { + // + // As reading CPU time and GPU time is a serial process we need to + // have a technique to mitigate the effects of preemption so we read + // the timestamps in a zipper pattern like c G c G c G c into an + // array storing all 7 values, find the two c values closest together, + // and report the sync point as the average of those two c values and + // the G between them. One complication is that reading a GPU's PTIMER + // directly from the CPU must be done via two non-adjacent BAR0-mapped + // memory locations for the low 32 bits and high 32 bits, and there's + // no way to atomically get both. One way to fix this is to make the + // read of the GPU time do the high bits, the low bits, and the high + // bits again, and if the two high values differ, we repeat the process + // until Ghi1 and Ghi2 match Once Ghi1 and 2 match, we use that as + // the high bits and the lo bits & CPU time from the zipper. + // + const NvU32 numTimerSamples = 3; // We take (hardcoded) 3 gpu timestamps. + NvU32 gpuTimeLo[3]; // Array to hold num_timer_samples gpu timestamps. + NvU64 cpuTime[4]; // Array to hold num_timer_samples+1 cpu timestamps. + NvU64 min; + NvU32 closestPairBeginIndex; + NvU32 gpuTimeHiOld; + NvU32 gpuTimeHiNew; + NvU32 i; + + gpuTimeHiNew = tmrReadTimeHiReg_HAL(pGpu, pTmr, NULL); + + do + { + gpuTimeHiOld = gpuTimeHiNew; + for (i = 0; i < numTimerSamples; i++) + { + + osGetPerformanceCounter(&cpuTime[i]); + + gpuTimeLo[i] = tmrReadTimeLoReg_HAL(pGpu, pTmr, NULL); + } + + osGetPerformanceCounter(&cpuTime[i]); + + // Read GPU TIME_1(High) again to detect wrap around. + gpuTimeHiNew = tmrReadTimeHiReg_HAL(pGpu, pTmr, NULL); + } while (gpuTimeHiNew != gpuTimeHiOld); + + // find i such that cpuTime[i+1] - cpuTime[i] is minimum + // i.e. find closest pair of cpuTime. + min = cpuTime[1] - cpuTime[0]; + closestPairBeginIndex = 0; + for (i = 0; i < numTimerSamples; i++) + { + if ((cpuTime[i+1] - cpuTime[i]) < min) + { + closestPairBeginIndex = i; + min = cpuTime[i+1] - cpuTime[i]; + } + } + + pParams->samples[0].gpuTime = ((((NvU64)gpuTimeHiNew) << 32) | + gpuTimeLo[closestPairBeginIndex]); + pParams->samples[0].cpuTime = (cpuTime[closestPairBeginIndex] + + cpuTime[closestPairBeginIndex + 1])/2; + NV_PRINTF(LEVEL_INFO, + "GPUTime = %llx CPUTime = %llx\n", + pParams->samples[0].gpuTime, pParams->samples[0].cpuTime); + break; + } + + case NV2080_TIMER_GPU_CPU_TIME_CPU_CLK_ID_TSC: + { + for (i = 0; i < pParams->sampleCount; i++) + { + status = tmrGetGpuAndCpuTimestampPair_HAL(pGpu, pTmr, &pParams->samples[i].gpuTime, &pParams->samples[i].cpuTime); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Could not get CPU GPU time. status=0x%08x\n", + status); + break; + } + } + break; + } + default: + { + status = NV_ERR_NOT_SUPPORTED; + break; + } + } + + return status; +} + +/*! + * @brief Set the frequency to update GR time stamp to default or max. + * + * The GR tick frequency will be restored to default + * only when no client has a pending request to increase. + * + * Lock Requirements: + * Assert that API lock held on entry + * No GPUs lock + * + * @param[in] pSubDevice + * @param[in] pParams + * + * @return NV_OK Success + * NV_ERR_INVALID_ARGUMENT Invalid Argument + * NV_ERR_NOT_SUPPORTED Not Supported + * Other errors from refcntRequestReference() or refcntReleaseReferences() + */ +NV_STATUS +subdeviceCtrlCmdTimerSetGrTickFreq_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_CMD_TIMER_SET_GR_TICK_FREQ_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pSubdevice); + NV_STATUS status; + OBJREFCNT *pRefcnt; + NvHandle hSubDevice; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (pSubdevice == NULL || pTmr == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pRefcnt = pTmr->pGrTickFreqRefcnt; + + hSubDevice = RES_GET_HANDLE(pSubdevice); + if (pParams->bSetMaxFreq) + { + status = refcntRequestReference(pRefcnt, + NV_REQUESTER_CLIENT_OBJECT(hClient, hSubDevice), + REFCNT_STATE_ENABLED, NV_FALSE); + pSubdevice->bMaxGrTickFreqRequested = NV_TRUE; + } + else + { + status = refcntReleaseReferences(pRefcnt, + NV_REQUESTER_CLIENT_OBJECT(hClient, hSubDevice), + NV_TRUE); + pSubdevice->bMaxGrTickFreqRequested = NV_FALSE; + } + return status; +} + diff --git a/src/nvidia/src/kernel/gpu/subdevice/subdevice_diag.c b/src/nvidia/src/kernel/gpu/subdevice/subdevice_diag.c new file mode 100644 index 000000000..0b2bfeecd --- /dev/null +++ b/src/nvidia/src/kernel/gpu/subdevice/subdevice_diag.c @@ -0,0 +1,90 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "gpu/gpu.h" +#include "rmapi/rs_utils.h" + +#include "gpu/subdevice/subdevice_diag.h" +#include "gpu/subdevice/subdevice.h" + +#include "class/cl208f.h" // NV208F_NOTIFIERS_MAXCOUNT +#include "ctrl/ctrl208f/ctrl208fevent.h" // NV208F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE + +#include "resserv/rs_client.h" + + + +// +// All state associated with subdevice diag objects is protected by +// the API sleep lock (there is no intersection with ISR/DPC code). +// +NV_STATUS diagapiConstruct_IMPL +( + DiagApi *pDiagApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NvU32 i; + + for (i = 0; i < NV208F_NOTIFIERS_MAXCOUNT; i++) + { + pDiagApi->notifyActions[i] = + NV208F_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + + return NV_OK; +} + +NV_STATUS +diagapiControl_IMPL +( + DiagApi *pDiagApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + RmCtrlParams *pRmCtrlParams = pParams->pLegacyParams; + gpuresControlSetup(pParams, staticCast(pDiagApi, GpuResource)); + + (void)pRmCtrlParams; + NV_PRINTF(LEVEL_INFO, "gpuControlSubDevice: cmd 0x%x\n", + pRmCtrlParams->cmd); + + return gpuresControl_IMPL(staticCast(pDiagApi, GpuResource), pCallContext, pParams); +} + + +NV_STATUS +diagapiControlFilter_IMPL +( + DiagApi *pDiagApi, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/sw_eng.c b/src/nvidia/src/kernel/gpu/sw_eng.c new file mode 100644 index 000000000..c63771d5b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/sw_eng.c @@ -0,0 +1,39 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*************************** SW Engine Manager *****************************\ +* * +* Module: swobj.c * +* Software engine objects are managed here. * +* * +****************************************************************************/ + +#include "objsweng.h" + +NV_STATUS +swengConstructEngine_IMPL(OBJGPU *pGpu, OBJSWENG *pSwEng, ENGDESCRIPTOR engDesc) +{ + // RS-TODO: Register SW emulated classes here + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/sw_test.c b/src/nvidia/src/kernel/gpu/sw_test.c new file mode 100644 index 000000000..59cef7062 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/sw_test.c @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "kernel/gpu/sw_test.h" + +NV_STATUS +swtestConstruct_IMPL +( + SoftwareMethodTest *pSwTest, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +swtestDestruct_IMPL +( + SoftwareMethodTest *pSwTest +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pSwTest, ChannelDescendant); + + chandesIsolateOnDestruct(pChannelDescendant); +} + +static METHOD Nv04SoftwareTestMethods[] = +{ + {mthdNoOperation, 0x0100, 0x0103}, +}; + +NV_STATUS swtestGetSwMethods_IMPL +( + SoftwareMethodTest *pSwTest, + METHOD **ppMethods, + NvU32 *pNumMethods +) +{ + *ppMethods = Nv04SoftwareTestMethods; + *pNumMethods = NV_ARRAY_ELEMENTS32(Nv04SoftwareTestMethods); + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/timed_semaphore.c b/src/nvidia/src/kernel/gpu/timed_semaphore.c new file mode 100644 index 000000000..a5f828682 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timed_semaphore.c @@ -0,0 +1,644 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************************* Video Manager *******************************\ +* * +* This module implements the GF100_TIMED_SEMAPHORE_SW object class and * +* its corresponding methods. * +* * +******************************************************************************/ + +#include "class/cl0000.h" +#include "kernel/gpu/timed_sema.h" +#include "objtmr.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu_mgr/gpu_mgr.h" +#include "rmapi/control.h" +#include "kernel/gpu/fifo/kernel_channel.h" + +#include "class/cl9074.h" + +#define F_NOTIFIER_HI_VALID (NVBIT(0)) +#define F_NOTIFIER_LO_VALID (NVBIT(1)) +#define F_SEMAPHORE_HI_VALID (NVBIT(2)) +#define F_SEMAPHORE_LO_VALID (NVBIT(3)) +#define F_WAIT_TIMESTAMP_HI_VALID (NVBIT(4)) +#define F_WAIT_TIMESTAMP_LO_VALID (NVBIT(5)) +#define F_RELEASE_VALUE_VALID (NVBIT(6)) +#define F_FLUSHING (NVBIT(8)) + +#define F_ALL_VALID ( \ + F_NOTIFIER_HI_VALID | \ + F_NOTIFIER_LO_VALID | \ + F_SEMAPHORE_HI_VALID | \ + F_SEMAPHORE_LO_VALID | \ + F_WAIT_TIMESTAMP_HI_VALID | \ + F_WAIT_TIMESTAMP_LO_VALID | \ + F_RELEASE_VALUE_VALID \ +) + +//--------------------------------------------------------------------------- +// +// Internal prototypes +// +//--------------------------------------------------------------------------- + +static NV_STATUS _class9074TimerCallback +( + OBJGPU *pGpu, + OBJTMR *pTmr, + void *pContext +); + +//--------------------------------------------------------------------------- +// +// Internal functions +// +//--------------------------------------------------------------------------- + +static NvBool +_9074TimedSemReleaseNow +( + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw, + NvU64 waitTimestamp, + NvU64 currentTime, + NvU16 *notifierStatus +) +{ + if (waitTimestamp <= currentTime) + { + *notifierStatus = NV9074_NOTIFICATION_STATUS_DONE; + return NV_TRUE; + } + else if ((pTimedSemSw->Flags & F_FLUSHING) && + (waitTimestamp >= pTimedSemSw->FlushLimitTimestamp)) + { + *notifierStatus = NV9074_NOTIFICATION_STATUS_DONE_FLUSHED; + return NV_TRUE; + } + return NV_FALSE; +} + +static NV_STATUS +_9074TimedSemRelease +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + NvHandle hClient, + NvU64 notifierGPUVA, + NvU64 semaphoreGPUVA, + NvU64 time, + NvU32 releaseValue, + NvU16 notifierStatus, + NvU32 notifyAction +) +{ + NV_STATUS status; + NV_STATUS overallStatus = NV_OK; + + status = semaphoreFillGPUVATimestamp(pGpu, + hClient, + pObject->pKernelChannel->hVASpace, + semaphoreGPUVA, + releaseValue, + 0, /* Index */ + NV_TRUE, + time); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Semaphore fill failed, error 0x%x\n", status); + + if (overallStatus == NV_OK) + overallStatus = status; + } + + status = notifyFillNotifierGPUVATimestamp(pGpu, + hClient, + pObject->pKernelChannel->hVASpace, + notifierGPUVA, + 0, /* Info32 */ + 0, /* Info16 */ + notifierStatus, + 0, /* Index */ + time); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Notifier fill failed, error 0x%x\n", status); + + if (overallStatus == NV_OK) + overallStatus = status; + } + + if (notifyAction) + { + PEVENTNOTIFICATION pEventNotifications = inotifyGetNotificationList(staticCast(pObject, INotifier)); + status = notifyEvents(pGpu, + pEventNotifications, + 0 /* Notifier 'Index' */, + 0 /* Method */, + 0 /* Data */, + NV_OK, + notifyAction); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Event notify failed, error 0x%x\n", + status); + + if (overallStatus == NV_OK) + overallStatus = status; + } + } + + return overallStatus; +} + +static NV_STATUS +_9074TimedSemRequest +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + NvU64 notifierGPUVA, + NvU64 semaphoreGPUVA, + NvU64 waitTimestamp, + NvU32 releaseValue, + NvU32 notifyAction +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw = dynamicCast(pObject, TimedSemaSwObject); + NvU64 currentTime; + NvU16 notifierStatus = NV9074_NOTIFICATION_STATUS_PENDING; + PGF100_TIMED_SEM_ENTRY pTimedSemEntry = NULL; + NV_STATUS status = NV_OK; + + // Is it possible to release this semaphore immediately? + if (listCount(&pTimedSemSw->entryList) == 0) + { + tmrGetCurrentTime(pTmr, ¤tTime); + if (IS_VIRTUAL(pGpu) || + _9074TimedSemReleaseNow(pTimedSemSw, waitTimestamp, currentTime, + ¬ifierStatus)) + { + status = _9074TimedSemRelease(pGpu, + pObject, + RES_GET_CLIENT_HANDLE(pTimedSemSw), + notifierGPUVA, + semaphoreGPUVA, + currentTime, + releaseValue, + notifierStatus, + notifyAction); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Timed sem release failed, error 0x%x\n", status); + } + return status; + } + } + + // Queue the semaphore release entry. + pTimedSemEntry = listAppendNew(&pTimedSemSw->entryList); + NV_CHECK_OR_RETURN(LEVEL_INFO, pTimedSemEntry != NULL, NV_ERR_NO_MEMORY); + + pTimedSemEntry->NotifierGPUVA = notifierGPUVA; + pTimedSemEntry->SemaphoreGPUVA = semaphoreGPUVA; + pTimedSemEntry->WaitTimestamp = waitTimestamp; + pTimedSemEntry->ReleaseValue = releaseValue; + pTimedSemEntry->NotifyAction = notifyAction; + + // Schedule the callback when entry was added to an empty list. + if (listCount(&pTimedSemSw->entryList) == 1) + { + tmrScheduleCallbackAbs(pTmr, _class9074TimerCallback, pObject, + pTimedSemEntry->WaitTimestamp, TMR_FLAG_RELEASE_SEMAPHORE, + staticCast(pTimedSemSw, ChannelDescendant)->pKernelChannel->ChID); + } + + return status; +} + +//--------------------------------------------------------------------------- +// +// Class object creation and destruction +// +//--------------------------------------------------------------------------- + +NV_STATUS +tsemaConstruct_IMPL +( + TimedSemaSwObject *pTimedSemSw, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + listInit(&pTimedSemSw->entryList, portMemAllocatorGetGlobalNonPaged()); + + return NV_OK; +} + +void +tsemaDestruct_IMPL +( + TimedSemaSwObject *pTimedSemSw +) +{ + ChannelDescendant *pChannelDescendant = staticCast(pTimedSemSw, ChannelDescendant); + OBJTMR *pTmr = GPU_GET_TIMER(GPU_RES_GET_GPU(pChannelDescendant)); + + tmrCancelCallback(pTmr, pChannelDescendant); + + chandesIsolateOnDestruct(pChannelDescendant); + + // Remove all the entries from the queue. + listDestroy(&pTimedSemSw->entryList); +} + +//--------------------------------------------------------------------------- +// +// Class object control. +// +//--------------------------------------------------------------------------- + +NV_STATUS +tsemaCtrlCmdFlush_IMPL +( + TimedSemaSwObject *pTimedSemaSwObject, + NV9074_CTRL_CMD_FLUSH_PARAMS *pFlushParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pTimedSemaSwObject); + ChannelDescendant *pObject = staticCast(pTimedSemaSwObject, ChannelDescendant); + + if (pFlushParams->isFlushing) { + pTimedSemaSwObject->Flags |= F_FLUSHING; + } + else { + pTimedSemaSwObject->Flags &= ~F_FLUSHING; + } + + if (pTimedSemaSwObject->Flags & F_FLUSHING) + { + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + tmrGetCurrentTime(pTmr, &pTimedSemaSwObject->FlushLimitTimestamp); + pTimedSemaSwObject->FlushLimitTimestamp += pFlushParams->maxFlushTime; + + tmrCancelCallback(pTmr, pObject); + _class9074TimerCallback(pGpu, pTmr, pObject); + } + + return NV_OK; +} // end of tsemaCtrlCmdFlush_IMPL + +NV_STATUS +tsemaCtrlCmdGetTime_IMPL +( + TimedSemaSwObject *pTimedSemaSwObject, + NV9074_CTRL_CMD_GET_TIME_PARAMS *pGetTimeParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pTimedSemaSwObject); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + tmrGetCurrentTime(pTmr, &pGetTimeParams->currentTime); + + return NV_OK; +} // end of tsemaCtrlCmdGetTime_IMPL + +NV_STATUS +tsemaCtrlCmdRelease_IMPL +( + TimedSemaSwObject *pTimedSemaSwObject, + NV9074_CTRL_CMD_RELEASE_PARAMS *pReleaseParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pTimedSemaSwObject); + ChannelDescendant *pObject = staticCast(pTimedSemaSwObject, ChannelDescendant); + + NV_PRINTF(LEVEL_INFO, "\n"); + + return _9074TimedSemRequest(pGpu, + pObject, + pReleaseParams->notifierGPUVA, + pReleaseParams->semaphoreGPUVA, + pReleaseParams->waitTimestamp, + pReleaseParams->releaseValue, + DRF_VAL(9074, _CTRL_CMD_RELEASE_FLAGS, _NOTIFY, pReleaseParams->releaseFlags)); +} // end of tsemaCtrlCmdRelease_IMPL + +//--------------------------------------------------------------------------- +// +// Class method routines. +// +//--------------------------------------------------------------------------- + +static NV_STATUS _class9074SetNotifierHi +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + PMETHOD pMethod, + NvU32 Offset, + NvU32 Data +) +{ + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw = dynamicCast(pObject, TimedSemaSwObject); + + NV_PRINTF(LEVEL_INFO, "\n"); + + pTimedSemSw->NotifierHi = DRF_VAL(9074, _SET_NOTIFIER_HI, _V, Data); + pTimedSemSw->Flags |= F_NOTIFIER_HI_VALID; + + return NV_OK; +} // end of _class9074SetNotifierHi + +static NV_STATUS _class9074SetNotifierLo +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + PMETHOD pMethod, + NvU32 Offset, + NvU32 Data +) +{ + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw = dynamicCast(pObject, TimedSemaSwObject); + NvU64 notifier; + + NV_PRINTF(LEVEL_INFO, "\n"); + + if (!(pTimedSemSw->Flags & F_NOTIFIER_HI_VALID)) + { + NV_PRINTF(LEVEL_ERROR, "NOTIFIER_HI not set\n"); + return NV_ERR_INVALID_STATE; + } + if (Data & 0x3) + { + NV_PRINTF(LEVEL_ERROR, "Mis-aligned address\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + notifier = pTimedSemSw->NotifierHi; + notifier <<= 32; + notifier |= Data; + + pTimedSemSw->NotifierLo = Data; + pTimedSemSw->NotifierGPUVA = notifier; + pTimedSemSw->Flags |= F_NOTIFIER_LO_VALID; + + return NV_OK; +} // end of _class9074SetNotifierLo( + +static NV_STATUS _class9074SetSemaphoreHi +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + PMETHOD pMethod, + NvU32 Offset, + NvU32 Data +) +{ + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw = dynamicCast(pObject, TimedSemaSwObject); + + NV_PRINTF(LEVEL_INFO, "\n"); + + pTimedSemSw->SemaphoreHi = DRF_VAL(9074, _SET_SEMAPHORE_HI, _V, Data); + pTimedSemSw->Flags |= F_SEMAPHORE_HI_VALID; + + return NV_OK; +} // end of _class9074SetSemaphoreHi + +static NV_STATUS _class9074SetSemaphoreLo +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + PMETHOD pMethod, + NvU32 Offset, + NvU32 Data +) +{ + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw = dynamicCast(pObject, TimedSemaSwObject); + NvU64 semaphore; + + NV_PRINTF(LEVEL_INFO, "\n"); + + if (!(pTimedSemSw->Flags & F_SEMAPHORE_HI_VALID)) + { + NV_PRINTF(LEVEL_ERROR, "SEMAPHORE_HI not set\n"); + return NV_ERR_INVALID_STATE; + } + if (Data & 0x3) + { + NV_PRINTF(LEVEL_ERROR, "Mis-aligned address\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + semaphore = pTimedSemSw->SemaphoreHi; + semaphore <<= 32; + semaphore |= Data; + + pTimedSemSw->SemaphoreLo = Data; + pTimedSemSw->SemaphoreGPUVA = semaphore; + pTimedSemSw->Flags |= F_SEMAPHORE_LO_VALID; + + return NV_OK; +} // end of _class9074SetSemaphoreLo + +static NV_STATUS _class9074SetWaitTimestampHi +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + PMETHOD pMethod, + NvU32 Offset, + NvU32 Data +) +{ + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw = dynamicCast(pObject, TimedSemaSwObject); + + NV_PRINTF(LEVEL_INFO, "\n"); + + pTimedSemSw->WaitTimestampHi = Data; + pTimedSemSw->Flags |= F_WAIT_TIMESTAMP_HI_VALID; + + return NV_OK; +} // end of _class9074SetWaitTimestampHi + +static NV_STATUS _class9074SetWaitTimestampLo +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + PMETHOD pMethod, + NvU32 Offset, + NvU32 Data +) +{ + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw = dynamicCast(pObject, TimedSemaSwObject); + + NV_PRINTF(LEVEL_INFO, "\n"); + + if (!(pTimedSemSw->Flags & F_WAIT_TIMESTAMP_HI_VALID)) + { + NV_PRINTF(LEVEL_ERROR, "WAIT_TIMESTAMP_HI not set\n"); + return NV_ERR_INVALID_STATE; + } + + pTimedSemSw->WaitTimestampLo = Data; + pTimedSemSw->Flags |= F_WAIT_TIMESTAMP_LO_VALID; + + pTimedSemSw->WaitTimestamp = pTimedSemSw->WaitTimestampHi; + pTimedSemSw->WaitTimestamp <<= 32; + pTimedSemSw->WaitTimestamp |= pTimedSemSw->WaitTimestampLo; + + return NV_OK; +} // end of _class9074SetWaitTimestampLo + +static NV_STATUS _class9074SetSemaphoreReleaseValue +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + PMETHOD pMethod, + NvU32 Offset, + NvU32 Data +) +{ + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw = dynamicCast(pObject, TimedSemaSwObject); + + NV_PRINTF(LEVEL_INFO, "\n"); + + pTimedSemSw->ReleaseValue = Data; + pTimedSemSw->Flags |= F_RELEASE_VALUE_VALID; + + return NV_OK; +} // end of class9074SetSemaphoreRelease + +static NV_STATUS _class9074ScheduleSemaphoreRelease +( + OBJGPU *pGpu, + ChannelDescendant *pObject, + PMETHOD pMethod, + NvU32 Offset, + NvU32 Data +) +{ + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw = dynamicCast(pObject, TimedSemaSwObject); + + NV_PRINTF(LEVEL_INFO, "\n"); + + if ((pTimedSemSw->Flags & F_ALL_VALID) != F_ALL_VALID) + { + NV_PRINTF(LEVEL_ERROR, "Required methods were not written\n"); + return NV_ERR_INVALID_STATE; + } + + pTimedSemSw->Flags &= ~F_ALL_VALID; + + return _9074TimedSemRequest(pGpu, + pObject, + pTimedSemSw->NotifierGPUVA, + pTimedSemSw->SemaphoreGPUVA, + pTimedSemSw->WaitTimestamp, + pTimedSemSw->ReleaseValue, + DRF_VAL(9074, _SCHEDULE_SEMAPHORE_RELEASE, _NOTIFY, Data)); +} // end of _class9074ScheduleSemaphoreRelease + +static NV_STATUS _class9074TimerCallback +( + OBJGPU *pGpu, + OBJTMR *pTmr, + void *pContext +) +{ + ChannelDescendant *pObject = pContext; + PGF100_TIMED_SEM_SW_OBJECT pTimedSemSw = dynamicCast(pObject, TimedSemaSwObject); + PGF100_TIMED_SEM_ENTRY pTimedSemEntry = NULL; + PGF100_TIMED_SEM_ENTRY pTimedSemEntryNext = NULL; + NvU64 currentTime; + NvU16 notifierStatus; + NV_STATUS status = NV_OK; + + NV_PRINTF(LEVEL_INFO, "\n"); + + tmrGetCurrentTime(pTmr, ¤tTime); + + // Process entries at the head of the queue that can be released now. + for (pTimedSemEntry = listHead(&pTimedSemSw->entryList); + pTimedSemEntry != NULL; + pTimedSemEntry = pTimedSemEntryNext) + { + pTimedSemEntryNext = listNext(&pTimedSemSw->entryList, pTimedSemEntry); + + if (!_9074TimedSemReleaseNow(pTimedSemSw, pTimedSemEntry->WaitTimestamp, + currentTime, ¬ifierStatus)) + { + break; + } + + status = _9074TimedSemRelease(pGpu, + pObject, + RES_GET_CLIENT_HANDLE(pTimedSemSw), + pTimedSemEntry->NotifierGPUVA, + pTimedSemEntry->SemaphoreGPUVA, + currentTime, + pTimedSemEntry->ReleaseValue, + notifierStatus, + pTimedSemEntry->NotifyAction); + + listRemove(&pTimedSemSw->entryList, pTimedSemEntry); + } + + // Schedule the callback for entry at the head of the queue. + if (pTimedSemEntry != NULL) + { + tmrScheduleCallbackAbs(pTmr, _class9074TimerCallback, pObject, + pTimedSemEntry->WaitTimestamp, TMR_FLAG_RELEASE_SEMAPHORE, + staticCast(pTimedSemSw, ChannelDescendant)->pKernelChannel->ChID); + } + + return status; +} // end of _class9074TimerCallback + +// GF100_TIMED_SEMAPHORE_SW +static METHOD GF100TimedSemSwMethods[] = +{ + { mthdNoOperation, 0x0100, 0x0103 }, + { _class9074SetNotifierHi, 0x0140, 0x0143 }, + { _class9074SetNotifierLo, 0x0144, 0x0147 }, + { _class9074SetSemaphoreHi, 0x0148, 0x014b }, + { _class9074SetSemaphoreLo, 0x014c, 0x014f }, + { _class9074SetWaitTimestampHi, 0x0150, 0x0153 }, + { _class9074SetWaitTimestampLo, 0x0154, 0x0157 }, + { _class9074SetSemaphoreReleaseValue, 0x0158, 0x015b }, + { _class9074ScheduleSemaphoreRelease, 0x015c, 0x015f } +}; + +NV_STATUS tsemaGetSwMethods_IMPL +( + TimedSemaSwObject *pTimedSemSw, + METHOD **ppMethods, + NvU32 *pNumMethods +) +{ + *ppMethods = GF100TimedSemSwMethods; + *pNumMethods = NV_ARRAY_ELEMENTS32(GF100TimedSemSwMethods); + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/timer/arch/ampere/timer_ga100.c b/src/nvidia/src/kernel/gpu/timer/arch/ampere/timer_ga100.c new file mode 100644 index 000000000..7cc163608 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timer/arch/ampere/timer_ga100.c @@ -0,0 +1,56 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/********************* Chip Specific HAL TMR Routines **********************\ +* * +* The GA100 specific HAL TMR routines reside in this file. * +* * +\***************************************************************************/ +/* ------------------------- Includes --------------------------------------- */ +#include "gpu/gpu.h" +#include "objtmr.h" +#include "published/ampere/ga100/dev_vm.h" +/* ------------------------- Datatypes -------------------------------------- */ +/* ------------------------- Macros ----------------------------------------- */ +/* ------------------------- Static Function Prototypes --------------------- */ +/* ------------------------- Public Functions ------------------------------ */ + +/*! + * @brief Gets GPU PTIMER offsets + * + */ +NV_STATUS +tmrGetGpuPtimerOffset_GA100 +( + POBJGPU pGpu, + POBJTMR pTmr, + NvU32 *pGpuTimestampOffsetLo, + NvU32 *pGpuTimestampOffsetHi +) +{ + { + *pGpuTimestampOffsetLo = GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_TIME_0); + *pGpuTimestampOffsetHi = GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_TIME_1); + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/timer/arch/maxwell/timer_gm107.c b/src/nvidia/src/kernel/gpu/timer/arch/maxwell/timer_gm107.c new file mode 100644 index 000000000..8554bc817 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timer/arch/maxwell/timer_gm107.c @@ -0,0 +1,278 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/************************** Ptimer based Routines **************************\ +* * +* The GM107 specific HAL TMR routines reside in this file. * +* * +\***************************************************************************/ +/* ------------------------- Includes --------------------------------------- */ +#include "gpu/gpu.h" +#include "objtmr.h" +#include "published/maxwell/gm107/dev_timer.h" +/* ------------------------- Datatypes -------------------------------------- */ +/* ------------------------- Macros ----------------------------------------- */ +/* ------------------------- Static Function Prototypes --------------------- */ +/* ------------------------- Public Functions ------------------------------ */ + +/* + * @brief Sets the GPU time to the current wall-clock time. + * + * @param[in] pGpu- GPU Object pointer + * @param[in] pTmr- Timer Object pointer + * + * @return NV_OK + * @return NV_ERR_PRIV_SEC_VIOLATION - PTIMER_TIME is read-only. + */ +NV_STATUS +tmrSetCurrentTime_GM107 +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NvU64 ns; // Time since 1970 in ns. + NvU32 seconds; // Time since 1970 in seconds + NvU32 useconds; // and uSeconds. + + // + // Get current time from operating system. + // + // We get the time in seconds and microseconds since 1970 + // Note that we don't really need the real time of day + // + osGetCurrentTime(&seconds, &useconds); + + NV_PRINTF(LEVEL_INFO, + "osGetCurrentTime returns 0x%x seconds, 0x%x useconds\n", + seconds, useconds); + + // + // Calculate ns since 1970. + // + ns = ((NvU64)seconds * 1000000 + useconds) * 1000; + + // + // TIME_0 must always come 2nd. On Maxwell and later writing TIME_0 is + // the trigger to load the new time. + // + GPU_REG_WR32(pGpu, NV_PTIMER_TIME_1, NvU64_HI32(ns)); + GPU_REG_WR32(pGpu, NV_PTIMER_TIME_0, NvU64_LO32(ns)); + + // Mark that time has been initialized + pTmr->bInitialized = NV_TRUE; + + return NV_OK; +} + +#define NV_NUM_PTIMER_SAMPLES 3 + +NV_STATUS +tmrGetGpuAndCpuTimestampPair_GM107 +( + POBJGPU pGpu, + POBJTMR pTmr, + NvU64 *pGpuTime, + NvU64 *pCpuTime +) +{ +#if PORT_IS_FUNC_SUPPORTED(portUtilExReadTimestampCounter) + NvU32 gpuTimeLo[NV_NUM_PTIMER_SAMPLES]; + NvU64 cpuTime[NV_NUM_PTIMER_SAMPLES+1]; + NvU64 min; + NvU32 closestPairBeginIndex; + NvU32 gpuTimeHiOld; + NvU32 gpuTimeHiNew; + NvU32 i; + NvU32 gpuTimestampOffsetLo = 0; + NvU32 gpuTimestampOffsetHi = 0; + + // We take (hardcoded) 3 gpu timestamps. + ct_assert(NV_NUM_PTIMER_SAMPLES == 3); + + tmrGetGpuPtimerOffset_HAL(pGpu, pTmr, &gpuTimestampOffsetLo, &gpuTimestampOffsetHi); + + gpuTimeHiNew = osGpuReadReg032(pGpu, gpuTimestampOffsetHi); + do + { + portAtomicTimerBarrier(); + gpuTimeHiOld = gpuTimeHiNew; + cpuTime[0] = portUtilExReadTimestampCounter(); + portAtomicTimerBarrier(); + gpuTimeLo[0] = osGpuReadReg032(pGpu, gpuTimestampOffsetLo); + portAtomicTimerBarrier(); + cpuTime[1] = portUtilExReadTimestampCounter(); + portAtomicTimerBarrier(); + gpuTimeLo[1] = osGpuReadReg032(pGpu, gpuTimestampOffsetLo); + portAtomicTimerBarrier(); + cpuTime[2] = portUtilExReadTimestampCounter(); + portAtomicTimerBarrier(); + gpuTimeLo[2] = osGpuReadReg032(pGpu, gpuTimestampOffsetLo); + portAtomicTimerBarrier(); + cpuTime[3] = portUtilExReadTimestampCounter(); + portAtomicTimerBarrier(); + // Read TIME_1 again to detect wrap around. + gpuTimeHiNew = osGpuReadReg032(pGpu, gpuTimestampOffsetHi); + } while (gpuTimeHiNew != gpuTimeHiOld); + + // + // find i such that cpuTime[i+1] - cpuTime[i] is minimum i.e. + // the find closest pair of cpuTime. + // + min = cpuTime[1] - cpuTime[0]; + closestPairBeginIndex = 0; + for (i = 0; i < NV_NUM_PTIMER_SAMPLES; i++) + { + if ((cpuTime[i+1] - cpuTime[i]) < min) + { + closestPairBeginIndex = i; + min = cpuTime[i+1] - cpuTime[i]; + } + } + + *pGpuTime = (((NvU64)gpuTimeHiNew) << 32) | gpuTimeLo[closestPairBeginIndex]; + *pCpuTime = (cpuTime[closestPairBeginIndex] + cpuTime[closestPairBeginIndex + 1])/2; + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif + +} + +/*! + * @brief Gets GPU PTIMER offsets + * + */ +NV_STATUS +tmrGetGpuPtimerOffset_GM107 +( + POBJGPU pGpu, + POBJTMR pTmr, + NvU32 *pGpuTimestampOffsetLo, + NvU32 *pGpuTimestampOffsetHi +) +{ + if (pGpuTimestampOffsetLo) + { + *pGpuTimestampOffsetLo = NV_PTIMER_TIME_0; + } + if (pGpuTimestampOffsetHi) + { + *pGpuTimestampOffsetHi = NV_PTIMER_TIME_1; + } + + return NV_OK;; +} + +/*! + * @brief Change GR Tick frequency to either default or max. + * + * @param[in] OBJGPU *GPU Object + * @param[in] OBJTMR *Timer Object + * @param[in] NvBool Whether to set freq to Max or not. + * + * @returns NV_OK on success. + */ +NV_STATUS +tmrGrTickFreqChange_GM107 +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvBool bSetMax +) +{ + NvU32 grTickFreq; + + grTickFreq = GPU_REG_RD32(pGpu, NV_PTIMER_GR_TICK_FREQ); + if (bSetMax) + { + grTickFreq = FLD_SET_DRF(_PTIMER, _GR_TICK_FREQ, _SELECT, _MAX, grTickFreq); + } + else + { + grTickFreq = FLD_SET_DRF(_PTIMER, _GR_TICK_FREQ, _SELECT, _DEFAULT, grTickFreq); + } + + GPU_REG_WR32(pGpu, NV_PTIMER_GR_TICK_FREQ, grTickFreq); + + return NV_OK; +} + +// +// From dev_timer.ref +// +// When reading the TIME, TIME_1 should be read first, followed by TIME_0, then +// a second reading of TIME_1 should be done. If the two readings of TIME_1 do +// not agree, this process should be repeated. +// +NvU64 +tmrGetTimeEx_GM107 +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 TimeLo = 0; + NvU32 TimeHi = 0; + NvU32 TimeHi2 = 0; + NvU32 i; + NvU64 Time; + + do + { + TimeHi = tmrReadTimeHiReg_HAL(pGpu, pTmr, pThreadState); + // Get a stable TIME_0 + for (i = 0; i < pTmr->retryTimes; ++i) + { + TimeLo = tmrReadTimeLoReg_HAL(pGpu, pTmr, pThreadState); + if ((TimeLo & ~DRF_SHIFTMASK(NV_PTIMER_TIME_0_NSEC)) == 0) + break; + } + + // Couldn't get a good value + if (i == pTmr->retryTimes) + { + // PTIMER returns bad bits after several read attempts + NV_PRINTF(LEVEL_ERROR, + "NVRM-RC: Consistently Bad TimeLo value %x\n", TimeLo); + DBG_BREAKPOINT(); + + // count # of times we reset PTIMER + pTmr->errorCount++; + + // load timer state (this will restore TIME_0 and TIME_1 to OS provided values) + tmrStateInitLocked(pGpu, pTmr); + tmrStateLoad(pGpu, pTmr, GPU_STATE_DEFAULT); + + return 0; + } + + // Read TIME_1 again to detect wrap around. + TimeHi2 = tmrReadTimeHiReg_HAL(pGpu, pTmr, pThreadState); + } while (TimeHi != TimeHi2); + + // Convert to 64b + Time = (((NvU64)TimeHi) << 32) | TimeLo; + + return Time; +} diff --git a/src/nvidia/src/kernel/gpu/timer/arch/maxwell/timer_gm200.c b/src/nvidia/src/kernel/gpu/timer/arch/maxwell/timer_gm200.c new file mode 100644 index 000000000..fb6a1d099 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timer/arch/maxwell/timer_gm200.c @@ -0,0 +1,105 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/********************* Chip Specific HAL TMR Routines **********************\ +* * +* The GM200 specific HAL TMR routines reside in this file. * +* * +\***************************************************************************/ +/* ------------------------- Includes --------------------------------------- */ +#include "gpu/gpu.h" +#include "objtmr.h" +#include "published/maxwell/gm200/dev_timer.h" +/* ------------------------- Datatypes -------------------------------------- */ +/* ------------------------- Macros ----------------------------------------- */ +/* ------------------------- Static Function Prototypes --------------------- */ +/* ------------------------- Public Functions ------------------------------ */ + +NV_STATUS +tmrSetCountdownIntrDisable_GM200 +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NvU32 reg = GPU_REG_RD32(pGpu, NV_PTIMER_INTR_EN_0); + + reg = FLD_SET_DRF(_PTIMER, _INTR_EN_0, _TIMER, _DISABLED, reg); + GPU_REG_WR32_UC(pGpu, NV_PTIMER_INTR_EN_0, reg); + + return NV_OK; +} + +NV_STATUS +tmrSetCountdownIntrEnable_GM200 +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NvU32 reg = GPU_REG_RD32(pGpu, NV_PTIMER_INTR_EN_0); + + reg = FLD_SET_DRF(_PTIMER, _INTR_EN_0, _TIMER, _ENABLED, reg); + GPU_REG_WR32_UC(pGpu, NV_PTIMER_INTR_EN_0, reg); + + return NV_OK; +} + +NV_STATUS +tmrSetCountdownIntrReset_GM200 +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + GPU_REG_WR32_EX(pGpu, NV_PTIMER_INTR_0, + DRF_DEF(_PTIMER, _INTR_0, _TIMER, _RESET), pThreadState); + return NV_OK; +} + +NvBool +tmrGetCountdownPending_GM200 +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 reg = GPU_REG_RD32_EX(pGpu, NV_PTIMER_INTR_0, pThreadState); + return FLD_TEST_DRF(_PTIMER, _INTR_0, _TIMER, _PENDING, reg); +} + +NV_STATUS +tmrSetCountdown_GM200 +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 timer, + NvU32 tmrId, + THREAD_STATE_NODE *pThreadState +) +{ + GPU_REG_WR32_EX(pGpu, NV_PTIMER_TIMER_0, timer, pThreadState); + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/timer/arch/turing/timer_tu102.c b/src/nvidia/src/kernel/gpu/timer/arch/turing/timer_tu102.c new file mode 100644 index 000000000..818cde411 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timer/arch/turing/timer_tu102.c @@ -0,0 +1,164 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/************************** Ptimer based Routines **************************\ +* * +* The TU102 specific HAL TMR routines reside in this file. * +* * +\***************************************************************************/ +/* ------------------------- Includes --------------------------------------- */ +#include "gpu/gpu.h" +#include "objtmr.h" +#include "published/turing/tu102/dev_vm.h" +/* ------------------------- Datatypes -------------------------------------- */ +/* ------------------------- Macros ----------------------------------------- */ +/* ------------------------- Static Function Prototypes --------------------- */ +/* ------------------------- Public Functions ------------------------------ */ + + +NV_STATUS +tmrSetCountdown_TU102 +( + POBJGPU pGpu, + POBJTMR pTmr, + NvU32 time, + NvU32 tmrId, + THREAD_STATE_NODE *pThreadState +) +{ + GPU_VREG_WR32_EX(pGpu, NV_VIRTUAL_FUNCTION_PRIV_TIMER, time, pThreadState); + + return NV_OK; +} + +NV_STATUS +tmrSetCountdownIntrEnable_TU102 +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + // + // The countdown timer interrupt has moved to NV_CTRL on Turing and hence, + // we will never enable the interrupt in NV_PTIMER_INTR_EN_0 which controls + // whether the interrupt is reported into the NV_PMC_INTR tree. We have not + // assigned a STUB to the HAL to allow pre-GM20X versions to return an + // error if called on those chips. + // + + return NV_OK; +} + +NV_STATUS +tmrSetCountdownIntrReset_TU102 +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + // + // The countdown timer interrupt has moved to NV_CTRL on Turing and will be + // reset directly in NV_CTRL. We have not assigned a STUB to the HAL to + // llow pre-GM20X versions to return an error if called on those chips. + // + return NV_OK; +} + + +/*! + * @brief Gets GPU PTIMER offsets + * + */ +NV_STATUS +tmrGetGpuPtimerOffset_TU102 +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 *pGpuTimestampOffsetLo, + NvU32 *pGpuTimestampOffsetHi +) +{ + extern NV_STATUS tmrGetGpuPtimerOffset_GV100(OBJGPU *pGpu, OBJTMR *pTmr, NvU32 *pGpuTimestampOffsetLo, NvU32 *pGpuTimestampOffsetHi); + NvU32 ptimerOffsetLo = 0; + NvU32 ptimerOffsetHi = 0; + + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + ptimerOffsetLo = NV_VIRTUAL_FUNCTION_TIME_0; + ptimerOffsetHi = NV_VIRTUAL_FUNCTION_TIME_1; + } + else + { + return tmrGetGpuPtimerOffset_GV100(pGpu, pTmr, pGpuTimestampOffsetLo, pGpuTimestampOffsetHi); + } + + if (pGpuTimestampOffsetLo) + { + *pGpuTimestampOffsetLo = ptimerOffsetLo; + } + + if (pGpuTimestampOffsetHi) + { + *pGpuTimestampOffsetHi = ptimerOffsetHi; + } + + return NV_OK; +} + +/* + * @brief This function returns the PTIMER_TIME_0 register. This function will + * work for both Physical function and virtual function in SR-IOV. + * @param[in] POBJGPU - GPU Object pointer + * @param[in] POBJTMR - Timer Object pointer + * + * @return NvU32 + */ +NvU32 +tmrReadTimeLoReg_TU102 +( + POBJGPU pGpu, + POBJTMR pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + return GPU_VREG_RD32_EX(pGpu, NV_VIRTUAL_FUNCTION_TIME_0, pThreadState); +} + +/* + * @brief This function returns the PTIMER_TIME_1 register. This function will + * work for both Physical function and virtual function in SR-IOV. + * @param[in] POBJGPU - GPU Object pointer + * @param[in] POBJTMR - Timer Object pointer + * + * @return NvU32 + */ +NvU32 +tmrReadTimeHiReg_TU102 +( + POBJGPU pGpu, + POBJTMR pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + return GPU_VREG_RD32_EX(pGpu, NV_VIRTUAL_FUNCTION_TIME_1, pThreadState); +} diff --git a/src/nvidia/src/kernel/gpu/timer/arch/volta/timer_gv100.c b/src/nvidia/src/kernel/gpu/timer/arch/volta/timer_gv100.c new file mode 100644 index 000000000..85872be35 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timer/arch/volta/timer_gv100.c @@ -0,0 +1,111 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/********************* Chip Specific HAL TMR Routines **********************\ +* * +* The GV100 specific HAL TMR routines reside in this file. * +* * +\***************************************************************************/ +/* ------------------------- Includes --------------------------------------- */ +#include "gpu/gpu.h" +#include "objtmr.h" +#include "published/volta/gv100/dev_timer.h" +#include "published/volta/gv100/dev_usermode.h" +/* ------------------------- Datatypes -------------------------------------- */ +/* ------------------------- Macros ----------------------------------------- */ +/* ------------------------- Static Function Prototypes --------------------- */ +/* ------------------------- Public Functions ------------------------------ */ +/* + * @brief Sets the GPU time to the current wall-clock time. + * + * @param[in] pGpu- GPU Object pointer + * @param[in] pTmr- Timer Object pointer + * + * @return NV_OK + * @return NV_ERR_PRIV_SEC_VIOLATION - PTIMER_TIME is read-only. + */ +NV_STATUS +tmrSetCurrentTime_GV100 +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NV_STATUS status; + + // We can only set the time if level 0 is allowed to write + if (GPU_FLD_TEST_DRF_DEF(pGpu, _PTIMER, _TIME_PRIV_LEVEL_MASK, _WRITE_PROTECTION_LEVEL0, _ENABLE)) + { + NvU64 ns; + NvU32 seconds; + NvU32 useconds; + + osGetCurrentTime(&seconds, &useconds); + + NV_PRINTF(LEVEL_INFO, + "osGetCurrentTime returns 0x%x seconds, 0x%x useconds\n", + seconds, useconds); + + ns = ((NvU64)seconds * 1000000 + useconds) * 1000; + + // Writing TIME_0 is the trigger to load the new time. + GPU_REG_WR32(pGpu, NV_PTIMER_TIME_1, NvU64_HI32(ns)); + GPU_REG_WR32(pGpu, NV_PTIMER_TIME_0, NvU64_LO32(ns)); + + status = NV_OK; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "ERROR: Write to PTIMER attempted even though Level 0 PLM is disabled.\n"); + NV_ASSERT(0); + status = NV_ERR_PRIV_SEC_VIOLATION; + } + + return status; +} + +/*! + * @brief Gets GPU PTIMER offsets + * + */ +NV_STATUS +tmrGetGpuPtimerOffset_GV100 +( + POBJGPU pGpu, + POBJTMR pTmr, + NvU32 *pGpuTimestampOffsetLo, + NvU32 *pGpuTimestampOffsetHi +) +{ + extern NV_STATUS tmrGetGpuPtimerOffset_GM107(OBJGPU *pGpu, OBJTMR *pTmr, NvU32 *pGpuTimestampOffsetLo, NvU32 *pGpuTimestampOffsetHi); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_NV_USERMODE_ENABLED)) + { + *pGpuTimestampOffsetLo = NV_USERMODE_TIME_0; + *pGpuTimestampOffsetHi = NV_USERMODE_TIME_1; + } + else + return tmrGetGpuPtimerOffset_GM107(pGpu, pTmr, pGpuTimestampOffsetLo, pGpuTimestampOffsetHi); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/timer/timer.c b/src/nvidia/src/kernel/gpu/timer/timer.c new file mode 100644 index 000000000..341e224ec --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timer/timer.c @@ -0,0 +1,1788 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file timer.c + * @brief Timer Object Function Definitions. + */ + +/* ------------------------ Includes ---------------------------------------- */ +#include "objtmr.h" +#include "class/cl0004.h" // NV004_NOTIFIERS_SET_ALARM_NOTIFY +#include "gpu/gpu_resource.h" +#include "core/locks.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "kernel/gpu/intr/intr.h" + +/* ------------------------ Static Function Prototypes ---------------------- */ +static PTMR_EVENT_PVT _tmrPullCallbackFromHead (OBJTMR *); +static void _tmrScanCallback(OBJTMR *, void *); +static PTMR_EVENT_PVT _tmrGetNextFreeCallback(OBJTMR *); +static NV_STATUS _tmrInsertCallback(OBJTMR *, PTMR_EVENT_PVT, NvU64); +static void _tmrInsertCallbackInList(OBJGPU *pGpu, OBJTMR *pTmr, PTMR_EVENT_PVT pEvent); +static void _tmrStateLoadCallbacks(OBJGPU *, OBJTMR *); +static NV_STATUS _tmrGetNextAlarmTime(OBJTMR *, NvU64 *); +static void _tmrScheduleCallbackInterrupt(OBJGPU *, OBJTMR *, NvU64); + +NV_STATUS +tmrConstructEngine_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + ENGDESCRIPTOR engDesc +) +{ + // Mark that this timer is not yet initialized + pTmr->bInitialized = NV_FALSE; + + // Create the Granular lock for SWRL Timer callback + pTmr->pTmrSwrlLock = portSyncSpinlockCreate(portMemAllocatorGetGlobalNonPaged()); + if (pTmr->pTmrSwrlLock == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Alloc spinlock failed\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + tmrInitCallbacks(pTmr); + osInit1HzCallbacks(pTmr); + + pTmr->retryTimes = 3; + + pTmr->errorCount = 0; + + pTmr->pGrTickFreqRefcnt = NULL; + + return NV_OK; +} + +static RefcntStateChangeCallback _tmrGrTimeStampFreqStateChange; +static NV_STATUS _tmrGrTimeStampFreqRefcntInit(OBJGPU *, OBJTMR *); + +/*! + * @brief Initializes the GR timer specific state of OBJTMR. + * + * Since this is initialization of client-level structures that won't be + * touched yet, we can execute without the GPU locks. + * + * @param[in] pGpu - OBJGPU pointer + * @param[in] pHwpm - OBJTMR pointer + * + * @returns NV_OK if the profiler-specific state is successfully initialized + * Other errors from _tmrGrTimeStampFreqRefcntInit() + */ +NV_STATUS +tmrStateInitUnlocked_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NV_STATUS status; + + status = _tmrGrTimeStampFreqRefcntInit(pGpu, pTmr); + + return status; +} + +/*! + * @brief Creates the reference counters for GR Time Stamp update frequency. + * + * @param[in] pGpu - OBJGPU pointer + * @param[in] pTmr - OBJTMR pointer + * + * @returns NV_OK if the reference counters are created successfully or the + * REFCNT engine is disabled + * NV_ERR_INVALID_STATE if one of the reference counters could not + * be loaded after creation + * Other errors from objCreate() and refcntConstruct() + */ +static NV_STATUS +_tmrGrTimeStampFreqRefcntInit +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NV_STATUS status = NV_OK; + status = objCreate(&pTmr->pGrTickFreqRefcnt, pTmr, OBJREFCNT, + staticCast(pTmr, Dynamic), 0, &_tmrGrTimeStampFreqStateChange, NULL); + + return status; +} + +/*! + * @brief Changes GR engine time stamp update frequency on behalf of the + * associated reference counter. + * + * @param[in] pRefcnt - OBJREFCNT pointer + * @param[in] pDynamic - OBJTMR pointer + * @param[in] oldState - The current state of the setting + * @param[in] newState - The next intended state of the setting + * + * @returns NV_OK if the power feature state is successfully changed + * NV_ERR_INVALID_STATE if the callback context state of the + * reference counter cannot be loaded + * NV_ERR_NOT_SUPPORTED if the callback is called on an unknown + * reference counter + * Other errors from tmrGrTimeStampFreqStateChange_HAL(), + */ +static NV_STATUS +_tmrGrTimeStampFreqStateChange +( + OBJREFCNT *pRefcnt, + Dynamic *pDynamic, + REFCNT_STATE oldState, + REFCNT_STATE newState +) +{ + NV_STATUS status = NV_OK; + OBJTMR *pTmr = dynamicCast(pDynamic, OBJTMR); + OBJGPU *pGpu = NULL; + NvBool bSetMaxFreq = (newState == REFCNT_STATE_ENABLED); + + if (pRefcnt == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (pTmr == NULL) + { + return NV_ERR_INVALID_STATE; + } + + pGpu = TMR_GET_GPU(pTmr); + if (pGpu == NULL) + { + return NV_ERR_INVALID_STATE; + } + + status = tmrGrTickFreqChange_HAL(pGpu, pTmr, bSetMaxFreq); + + return status; +} + +void +tmrDestruct_IMPL(OBJTMR *pTmr) +{ + // Delete the Granular lock for SWRL Timer callback + if (pTmr->pTmrSwrlLock != NULL) + { + portSyncSpinlockDestroy(pTmr->pTmrSwrlLock); + pTmr->pTmrSwrlLock = NULL; + } + + if (pTmr->pGrTickFreqRefcnt != NULL) + { + objDelete(pTmr->pGrTickFreqRefcnt); + pTmr->pGrTickFreqRefcnt = NULL; + } + + osDestroy1HzCallbacks(pTmr); +} + +/*! + * Simple Utility function, checks if there are any queued callbacks + */ +static NV_INLINE NvBool tmrEventsExist(OBJTMR *pTmr) +{ + return pTmr->pRmActiveEventList != NULL; +} + +/*! + * Allocates the necessary memory for storing a callback in the timer. + * + * @param[out] ppEvent A reference to the client's pointer. + */ +NV_STATUS tmrEventCreate_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT *ppEventPublic, + TIMEPROC Proc, + void *pUserData, + NvU32 flags +) +{ + PTMR_EVENT_PVT *ppEvent = (PTMR_EVENT_PVT*)ppEventPublic; + NV_STATUS status = NV_OK; + + *ppEvent = portMemAllocNonPaged(sizeof(TMR_EVENT_PVT)); + if (*ppEvent == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate timer event\n"); + return NV_ERR_NO_MEMORY; + } + (*ppEvent)->bLegacy = NV_FALSE; + (*ppEvent)->bInUse = NV_FALSE; + (*ppEvent)->pNext = NULL; + (*ppEventPublic)->pTimeProc = Proc; + (*ppEventPublic)->pUserData = pUserData; + (*ppEventPublic)->flags = flags; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS)) + { + status = tmrEventCreateOSTimer_HAL(pTmr, *ppEventPublic); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create OS timer \n"); + } + } + return status; +} + +static void +_tmrScheduleCallbackInterrupt +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU64 alarmTime +) +{ + // + // Don't schedule the interrupt if we are polling. The interrupt can be + // routed to a different device, which could get confused. Also we don't + // want the extra priv writes. + // + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS)) + return; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + NvU64 currentTime; + NvU32 countdownTime; + + tmrGetCurrentTime(pTmr, ¤tTime); + countdownTime = currentTime < alarmTime ? NvU64_LO32(alarmTime - currentTime) : 0; + tmrSetCountdown_HAL(pGpu, pTmr, countdownTime, 0, NULL); + } + else + { + tmrSetAlarm_HAL(pGpu, pTmr, alarmTime, NULL); + } +} + +void +tmrResetCallbackInterrupt_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrReset_HAL(pGpu, pTmr, NULL); + } + else + { + tmrSetAlarmIntrReset_HAL(pGpu, pTmr, NULL); + } +} + +NvBool +tmrGetCallbackInterruptPending_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + return tmrGetCountdownPending_HAL(pGpu, pTmr, NULL); + } + else + { + return tmrGetAlarmPending_HAL(pGpu, pTmr, NULL); + } +} + +/*! + * Cancels a given callback, marking it invalid and preventing it from being executed. + * Updates the next alarm time appropriately + * + * @param[in] pEvent The callback to be cancelled + */ +void tmrEventCancel_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT pEventPublic +) +{ + NvU64 nextAlarmTime; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pEventPublic; + PTMR_EVENT_PVT pChaser = pTmr->pRmActiveEventList; + NvBool bRemovedHead = pChaser == pEvent; + + if (pEventPublic == NULL) + { + return; + } + + NV_ASSERT(!pEvent->bLegacy); + + pEvent->bInUse = NV_FALSE; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS)) + { + NV_STATUS status = NV_OK; + status = tmrEventCancelOSTimer_HAL(pTmr, pEventPublic); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed in cancel of OS timer callback\n"); + } + return; + } + + if (bRemovedHead) + { + pTmr->pRmActiveEventList = pEvent->pNext; + + // Need to update the alarm time + if (NV_OK == _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + else + { + // List is empty! Disable PTIMER interrupt. + tmrRmCallbackIntrDisable(pTmr, pGpu); + } + } + else + { + while (pChaser != NULL && pChaser->pNext != pEvent) + { + pChaser = pChaser->pNext; + } + if (pChaser == NULL) + { + // The callback wasn't currently scheduled, nothing to change. + return; + } + pChaser->pNext = pEvent->pNext; + } +} + +/*! + * Frees the memory used for maintaining a given callback in the timer. + * Currently automatically calls cancel on the event. + * + * @param[in] pEvent The callback to cancel and free. + */ +void tmrEventDestroy_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT pEventPublic +) +{ + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pEventPublic; + + if (pEvent != NULL) + { + NV_ASSERT(!pEvent->bLegacy); + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS)) + { + // OS timer destroying will cancel the timer + tmrEventDestroyOSTimer_HAL(pTmr, pEventPublic); + } + else + { + tmrEventCancel(pTmr, pEventPublic); + } + portMemFree(pEvent); + } +} + +/*! + * TODO: document + */ +static NV_STATUS +_nv0004CtrlCmdTmrSetAlarmNotifyCallback(OBJGPU *pGpu, OBJTMR *pTmr, void *pData) +{ + PEVENTNOTIFICATION pNotifyEvent = pData; + NV_STATUS status = NV_OK; + + // perform a direct callback to the client + if (NvP64_VALUE(pNotifyEvent->Data) != NULL) + { + //one shot signal + status = osNotifyEvent(pGpu, pNotifyEvent, NV004_NOTIFIERS_SET_ALARM_NOTIFY, 0, NV_OK); + } + + return status; +} + +/*! + * TODO: document + * TODO: Migrate this to match current API (probably) + */ +NV_STATUS +tmrapiCtrlCmdTmrSetAlarmNotify_IMPL +( + TimerApi *pTimerApi, + NV0004_CTRL_TMR_SET_ALARM_NOTIFY_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pTimerApi); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PEVENTNOTIFICATION pNotifyEvent = inotifyGetNotificationList(staticCast(pTimerApi, INotifier)); + + // Validate the timer event + while ((pNotifyEvent != NULL) && (pNotifyEvent->hEvent != pParams->hEvent)) + { + pNotifyEvent = pNotifyEvent->Next; + } + + if (pNotifyEvent == NULL) + { + NV_PRINTF(LEVEL_INFO, "timer event is missing\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // schedule the timer + tmrScheduleCallbackRel(pTmr, + _nv0004CtrlCmdTmrSetAlarmNotifyCallback, + pNotifyEvent, + pParams->alarmTimeNsecs, 0, 0); + + return NV_OK; +} + +NV_STATUS tmrGetCurrentTimeEx_IMPL +( + OBJTMR *pTmr, + NvU64 *pTime, + THREAD_STATE_NODE *pThreadState +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + *pTime = (NvU64)(~0); + return NV_ERR_GPU_IN_FULLCHIP_RESET; + } + + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + *pTime = (NvU64)(~0); + return NV_ERR_GPU_IS_LOST; + } + + *pTime = tmrGetTimeEx_HAL(pGpu, pTmr, pThreadState); + + return NV_OK; +} + +NV_STATUS tmrGetCurrentTime_IMPL +( + OBJTMR *pTmr, + NvU64 *pTime +) +{ + return tmrGetCurrentTimeEx(pTmr, pTime, NULL); +} + +/*! + * TODO: document + */ +NV_STATUS tmrGetCurrentDiffTime_IMPL +( + OBJTMR *pTmr, + NvU64 startTime, + NvU64 *pDiffTime +) +{ + NvU64 currentTime; + NV_STATUS rmStatus; + + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + + *pDiffTime = currentTime - startTime; + + return rmStatus; +} + +/*! + * Schedule a callback relative to current time specified in units of nanoseconds. + * Callbacks should be expected to be late however, this is not an RTOS, and a + * scheduling delay has been implemented to fix some race condition bugs. + * User has to provide a structure in memory for the timer to use. + * + * @Note: For statically defined events it is recommended to preallocate them all + * at the appropriate stage in task life-cycle, and deallocated at the + * corresponding end of the life-cycle. For dynamically generated events + * consider the affects on fragmentation and potentially deferring deallocation. + * + * @param[in] pEvent Callback memory structure, provided by user. + * @param[in] RelTime Number of nanoseconds from now to call Proc. + * + * @returns Status + */ +NV_STATUS tmrEventScheduleRel_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT pEvent, + NvU64 RelTime +) +{ + NvU64 AbsTime, currentTime; + NV_STATUS rmStatus; + + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + if (rmStatus != NV_OK) + return rmStatus; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS)) + { + /*HR timer scheduled in relative mode*/ + /*TBD : This condition needs to be moved to OS timer handling functions */ + AbsTime = RelTime; + } + else + { + AbsTime = currentTime + RelTime; + } + + return tmrEventScheduleAbs(pTmr, pEvent, AbsTime); +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + * It only remains for transitional purposes only. + */ +NV_STATUS tmrScheduleCallbackRel_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object, + NvU64 RelTime, + NvU32 Flags, + NvU32 ChId +) +{ + NvU64 AbsTime, currentTime; + NV_STATUS rmStatus; + + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + if (rmStatus != NV_OK) + return rmStatus; + + AbsTime = currentTime + RelTime; + + return tmrScheduleCallbackAbs(pTmr, Proc, Object, AbsTime, Flags, ChId); +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + * It only remains for transitional purposes only. + */ +NV_STATUS tmrScheduleCallbackRelSec_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object, + NvU32 RelTimeSec, + NvU32 Flags, + NvU32 ChId +) +{ + NvU64 RelTimeNs; + + RelTimeNs = (NvU64)RelTimeSec * 1000000000; + + return tmrScheduleCallbackRel(pTmr, Proc, Object, RelTimeNs, Flags, ChId); +} + +/*! + * Determines if the Callback is actually scheduled currently. + * + * @param[in] pEvent The event in question + */ +NvBool tmrEventOnList_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT pEventPublic +) +{ + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pEventPublic; + PTMR_EVENT_PVT pScan = pTmr->pRmActiveEventList; + + while (pScan != NULL) + { + if (pScan == pEvent) + { + NV_ASSERT(pEvent->bInUse); + return NV_TRUE; + } + pScan = pScan->pNext; + } + return NV_FALSE; +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + * It only remains for transitional purposes only. + */ +NvBool tmrCallbackOnList_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object +) +{ + NvBool onList = NV_FALSE; + PTMR_EVENT_PVT tmrScan; + PTMR_EVENT_PVT tmrList; + + tmrList = pTmr->pRmActiveEventList; + + for (tmrScan = tmrList; tmrScan; tmrScan = tmrScan->pNext) + { + if ((Proc == tmrScan->pTimeProc_OBSOLETE) && + (Object == tmrScan->super.pUserData)) + { + onList = NV_TRUE; + break; + } + } + + return onList; +} + +/*! + * OBSOLETE: This will be removed very soon! + */ +static PTMR_EVENT_PVT +_tmrGetNextFreeCallback +( + OBJTMR *pTmr +) +{ + PTMR_EVENT_PVT pEvent = NULL; + + pEvent = pTmr->pRmCallbackFreeList_OBSOLETE; + if (pEvent != NULL) + { + NV_ASSERT(pEvent->bLegacy); // OBSOLETE, remove later + pTmr->pRmCallbackFreeList_OBSOLETE = pEvent->pNext; + // just to be sure. + pEvent->pNext = NULL; + } + + return pEvent; +} + +/*! + * Creates and inserts a node into the callback list. + * + * @param[in] pEvent Callback memory structure, provided by user. + * @param[in] Time Absolute nanoseconds at which to call Proc. + * + * @returns Status + */ +static NV_STATUS +_tmrInsertCallback +( + OBJTMR *pTmr, + PTMR_EVENT_PVT pEvent, + NvU64 Time +) +{ + NV_STATUS returnStatus = NV_ERR_GENERIC; // Indicate that the timer was NOT inserted in the list + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + + // If this is a free callback + if (!pEvent->bInUse && !tmrEventOnList(pTmr, (PTMR_EVENT)pEvent)) + { + pEvent->timens = Time; + + _tmrInsertCallbackInList(pGpu, pTmr, pEvent); + + returnStatus = NV_OK; + } + else + { + // Shouldn't get here. Don't call this function unless valid + NV_ASSERT_OR_RETURN(!"Invalid call to insert, already in use", NV_ERR_INVALID_ARGUMENT); + } + + return returnStatus; +} + +/*! + * Insert (time sorted) a specific event into the callback queue. + * + * Handles setting the next alarm time as well as enabling alarm if needed + * + * @param[in] pEvent The event to be inserted, must be initialized + */ +static void +_tmrInsertCallbackInList +( + OBJGPU *pGpu, + OBJTMR *pTmr, + PTMR_EVENT_PVT pEvent +) +{ + PTMR_EVENT_PVT pScan; + NvBool bAddedAsHead = NV_TRUE; + NvU64 nextAlarmTime; + + NV_ASSERT(!pEvent->bInUse); + + pEvent->bInUse = NV_TRUE; + + if (pTmr->pRmActiveEventList == NULL) + { + // Enable PTIMER interrupt. + tmrRmCallbackIntrEnable(pTmr, pGpu); + + // insert pEvent as first and only entry. + pEvent->pNext = NULL; + pTmr->pRmActiveEventList = pEvent; + } + else if (pEvent->timens <= pTmr->pRmActiveEventList->timens) + { + // insert pEvent as head entry of the non-empty callback list. + pEvent->pNext = pTmr->pRmActiveEventList; + pTmr->pRmActiveEventList = pEvent; + } + else + { + bAddedAsHead = NV_FALSE; + + pScan = pTmr->pRmActiveEventList; + + while (pScan->pNext != NULL) + { + if (pEvent->timens <= pScan->pNext->timens) + { + // insert into the middle of the list. + pEvent->pNext = pScan->pNext; + pScan->pNext = pEvent; + + break; + } + pScan = pScan->pNext; + } + + if (pScan->pNext == NULL) + { + // insert it at the end of the list. + pEvent->pNext = NULL; + pScan->pNext = pEvent; + } + } + + if (bAddedAsHead) + { + // Find out when the next alarm should be. + if (NV_OK != _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + // if there is no event list, then just use 0. + nextAlarmTime = 0; + } + + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } +} + +/*! + * Schedule a callback at the absolute time specified in units of nanoseconds. + * + * Account for bad scheduling times, if the time too close in the future push + * it back till a short delay later. This avoids some race conditions. Even though + * callbacks may be delayed. However callbacks will not happen early. + * + * @Note: For statically defined events it is recommended to preallocate them all + * at the appropriate stage in task life-cycle, and deallocated at the + * corresponding end of the life-cycle. For dynamically generated events + * consider the affects on fragmentation and potentially deferring deallocation. + * + * @param[in] pEvent Callback memory structure, provided by user. + * @param[in] Time Absolute nanoseconds at which to call Proc. + * + * @returns Status + */ +NV_STATUS tmrEventScheduleAbs_IMPL +( + OBJTMR *pTmr, + PTMR_EVENT pEventPublic, + NvU64 Time +) +{ + NV_STATUS rmStatus = NV_OK; + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pEventPublic; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS)) + { + NV_CHECK_OK(rmStatus, LEVEL_ERROR, + tmrEventScheduleAbsOSTimer_HAL(pTmr, pEventPublic, Time)); + return rmStatus; + } + + if (pEventPublic->pTimeProc == NULL && pEvent->pTimeProc_OBSOLETE == NULL) + { + // + // Bug 372159: Not sure exactly how this is happening, but we are seeing + // it in OCA. If you see this during development/testing, please update + // the bug. + // + NV_ASSERT_FAILED( + "Attempting to schedule callback with NULL procedure. " + "Please update Bug 372159 with appropriate information."); + rmStatus = NV_ERR_INVALID_ARGUMENT; + } + else + { + // + // Insert this proc into the callback list. + // + // if (Time <= CurrentTime + SCHEDULING_DELAY_MIN): + // + // We used to return NV_ERR_CALLBACK_NOT_SCHEDULED here. + // The next fix called the callback immediately in order to simulate + // it being "scheduled", however this introduced nasty stack-overflow + // due self rescheduling tasks. + // + // CL 16512758 fixed the stack-overflow issue, and added a case for + // handling callbacks scheduled to occur within 250 ns. Later we found + // out that a 1 us callback could cause the alarm to be set to the past + // and cause a 4+ second delay due to wrap-around. To fix this, we + // removed the 250 ns threshold, so that we will always re-read the + // current time after setting the alarm to prevent the wrap-around. + // + rmStatus = _tmrInsertCallback(pTmr, pEvent, Time); + } + + return rmStatus; +} + +/*! + * Warning! This code is dangerous, it can cause the whole system to crash. It will be + * removed as soon as possible! Use the new API! + */ +NV_STATUS tmrScheduleCallbackAbs_IMPL +( + OBJTMR *pTmr, + TIMEPROC_OBSOLETE Proc, + void *Object, + NvU64 Time, + NvU32 Flags, + NvU32 ChId +) +{ + PTMR_EVENT_PVT tmrInsert; + // Get a free callback from the free list. + if(pTmr->pRmCallbackFreeList_OBSOLETE == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (!tmrCallbackOnList(pTmr, Proc, Object)) + { + tmrInsert = _tmrGetNextFreeCallback(pTmr); + if (tmrInsert){ + tmrInsert->pTimeProc_OBSOLETE = Proc; + tmrInsert->super.pUserData = Object; + tmrInsert->super.flags = Flags; + tmrInsert->super.chId = ChId; + + return tmrEventScheduleAbs(pTmr, (PTMR_EVENT)tmrInsert, Time); + } + else + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + else + { + NV_PRINTF(LEVEL_ERROR, "Proc %p Object %p already on tmrList\n", Proc, + Object); + } + return NV_OK; +} + +/*! + * Searches specified lists for PTMR_EVENT associated with Object and + * removes it. + * + * @param[in] Object Unique identifier based on TMR_POBJECT_BASE (tmr.h) + * + * @returns None + */ +static void _tmrScanCallback +( + OBJTMR *pTmr, + void *pObject +) +{ + PTMR_EVENT_PVT tmrScan; + PTMR_EVENT_PVT tmrNext; + PTMR_EVENT_PVT tmrCurrent; + + // + // Start at the beginning of the callback list. + // + // 'current' is either the same as 'scan' or + // it's the item immediately before 'scan' in + // the algorithm below. + // + tmrScan = tmrCurrent = pTmr->pRmActiveEventList; + + // + // Loop through the callback list while there are entries. + // + while (tmrScan) + { + // Point to the next callback so that we + // can continue our scan through the list. + tmrNext = tmrScan->pNext; + + // + // Scan list looking for matches to 'Object'. + // + if (tmrScan->super.pUserData == pObject) + { + // + // If the 'current' is not the item to be deleted + // (It must be the previous item) then link it + // to the 'next' item + // + if (tmrCurrent != tmrScan) + { + tmrCurrent->pNext = tmrScan->pNext; + } + else + { + // + // If 'current' is the same as the item to be deleted + // then move it to the next item. + // + tmrCurrent = tmrNext; + + // + // Update the head pointer if removing the head entry. + // This fixes bug 93812. + // + if (pTmr->pRmActiveEventList == tmrScan) + { + pTmr->pRmActiveEventList = tmrScan->pNext; + } + } + + if (tmrScan->bLegacy) + { + // + // Tack the object to be deleted onto the head of the + // callback free list (OBSOLETE) + // + tmrScan->pNext = pTmr->pRmCallbackFreeList_OBSOLETE; + pTmr->pRmCallbackFreeList_OBSOLETE = tmrScan; + } + + tmrScan->bInUse = NV_FALSE; + } + else + { + // + // If we haven't deleted this item, then the 'current' + // item becomes this item. So 'scan' will advance ONE beyond + // the item that was NOT deleted, and 'current' becomes + // the item NOT deleted. + // + tmrCurrent = tmrScan; + } + + // Now point to the 'next' object in the callback list. + tmrScan = tmrNext; + } +} + +// determine which (if any) callback should determine the next alarm time +static NV_STATUS +_tmrGetNextAlarmTime +( + OBJTMR *pTmr, + NvU64 *pNextAlarmTime +) +{ + if (pTmr->pRmActiveEventList == NULL) + { + *pNextAlarmTime = 0; + return NV_ERR_CALLBACK_NOT_SCHEDULED; + } + + *pNextAlarmTime = pTmr->pRmActiveEventList->timens; + + return NV_OK; +} + +/*! + * Return the very next callback to be scheduled, removing it from the list + * and marking it as free (only "In Use" when in the list) + */ +static PTMR_EVENT_PVT _tmrPullCallbackFromHead +( + OBJTMR *pTmr +) +{ + PTMR_EVENT_PVT tmrDelete = pTmr->pRmActiveEventList; + if (tmrDelete) + { + // remove from callbackList + pTmr->pRmActiveEventList = tmrDelete->pNext; + tmrDelete->bInUse = NV_FALSE; + + if(tmrDelete->bLegacy) + { + // Might be a race condition, but will be removed so it's OK + tmrDelete->pNext = pTmr->pRmCallbackFreeList_OBSOLETE; + pTmr->pRmCallbackFreeList_OBSOLETE = tmrDelete; + } + } + + return tmrDelete; +} + +/*! + * Time until next callback expires. + * + * Returns NV_ERR_CALLBACK_NOT_SCHEDULED if not callbacks are scheduled. + */ +NV_STATUS +tmrTimeUntilNextCallback_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU64 *pTimeUntilCallbackNs +) +{ + NvU64 currentTime; + NvU64 nextAlarmTime; + NV_STATUS status; + + *pTimeUntilCallbackNs = 0; + + // Get the time from the first (earliest) entry. + status = _tmrGetNextAlarmTime(pTmr, &nextAlarmTime); + if (status != NV_OK) + return status; + + status = tmrGetCurrentTime(pTmr, ¤tTime); + if (status != NV_OK) + return status; + + if (currentTime < nextAlarmTime) + *pTimeUntilCallbackNs = nextAlarmTime - currentTime; + + return NV_OK; +} + +/*! + * Used by tmrService, iteratively checks which callbacks need to be executed. + */ +NvBool +tmrCallExpiredCallbacks_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NvU64 currentTime = 0; + NvU64 nextAlarmTime; + PTMR_EVENT_PVT pEvent; + NV_STATUS rmStatus; + NvBool bProccessedCallback = NV_FALSE; + + // Call all callbacks that have expired + if (pTmr && (tmrEventsExist(pTmr))) + { + // Check for expired time. + for (;;) + { + // Get the time from the first (earliest) entry. + rmStatus = _tmrGetNextAlarmTime(pTmr, &nextAlarmTime); + if (rmStatus != NV_OK) + break; + + if (nextAlarmTime > currentTime) + { + rmStatus = tmrGetCurrentTime(pTmr, ¤tTime); + if ((rmStatus != NV_OK) || (nextAlarmTime > currentTime)) + break; + } + + // Pull from head of list. + pEvent = _tmrPullCallbackFromHead(pTmr); + + if (pEvent && + ((pEvent->super.pTimeProc != NULL) || + (pEvent->pTimeProc_OBSOLETE != NULL)) ) + { + // Call callback. This could insert a new callback into the list. + if (pEvent->bLegacy) + { + pEvent->pTimeProc_OBSOLETE(pGpu, pTmr, pEvent->super.pUserData); + } + else + { + pEvent->super.pTimeProc(pGpu, pTmr, (PTMR_EVENT)pEvent); + } + + bProccessedCallback = NV_TRUE; + } + else + { + // + // Bug 372159: Hopefully by checking that the callback procedure + // is not NULL in tmrEventScheduleAbs() we should never hit + // this point, but this is just to be certain. If you hit this + // assert please update Bug 3721259 with pertinent details + // (Swak, !stacks, what you were developing/testing, etc.). + // + NV_ASSERT_FAILED( + "Attempting to execute callback with NULL procedure. " + "Please update Bug 372159 with appropriate information."); + } + } + + // + // rmStatus is NV_OK only when there are more events in the list AND + // the GPU has not fallen off the bus AND the GPU is not in full chip + // reset. + // + // We get this this routine with bInterrupt set to true when we got + // (and cleared) the timer interrupt. So, we need to set it again. + // + if (rmStatus == NV_OK) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + } + + return bProccessedCallback; +} + +/*! + * TODO: document + */ +static void +_tmrStateLoadCallbacks +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + NvU64 nextAlarmTime = 0; + + if (tmrEventsExist(pTmr)) + { + if (tmrGetCallbackInterruptPending(pGpu, pTmr)) + { + if (NV_OK == _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + } + + // + // else - we have alarm pending - just proceed to enable interrupts + // so that it's immediately handled + // + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrEnable_HAL(pGpu, pTmr); + } + else + { + tmrSetAlarmIntrEnable_HAL(pGpu, pTmr); + } + } +} + +/*! + * Wraps HAL functions to enable hardware timer interrupts for the rm callbacks. + */ +void +tmrRmCallbackIntrEnable_IMPL +( + OBJTMR *pTmr, + OBJGPU *pGpu +) +{ + tmrResetCallbackInterrupt(pGpu, pTmr); + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrEnable_HAL(pGpu, pTmr); + } + else + { + tmrSetAlarmIntrEnable_HAL(pGpu, pTmr); + } +} + +/*! + * Wraps HAL functions to disable hardware timer interrupts for the rm callbacks. + */ +void +tmrRmCallbackIntrDisable_IMPL +( + OBJTMR *pTmr, + OBJGPU *pGpu +) +{ + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS)) + { + tmrSetCountdownIntrDisable_HAL(pGpu, pTmr); + } + else + { + tmrSetAlarmIntrDisable_HAL(pGpu, pTmr); + } +} + +void +tmrSetCountdownCallback_IMPL +( + OBJTMR *pTmr, + TIMEPROC_COUNTDOWN pSwrlCallback +) +{ + pTmr->pSwrlCallback = pSwrlCallback; +} + +/*! + * TODO: document + */ +void +tmrGetSystemTime_IMPL +( + OBJTMR *pTmr, + PDAYMSECTIME pTime +) +{ + NvU32 sec; + NvU32 usec; + + // + // This function finds out the current time in terms of number of days and + // milliseconds since 1900. Note that the estimates are really crude since + // 1 year is treated as 365 days, 1 month as 30 days and so on. Keep these + // points in mind before using the function. + // + if (pTime != NULL) + { + // Get the system time and calculate the contents of the returned structure. + osGetCurrentTime(&sec, &usec); + pTime->days = sec / (3600 * 24); // # of days since ref point + sec = sec % (3600 * 24); // seconds since day began + pTime->msecs = sec * 1000 + (usec / 1000); // milliseconds since day began + } +} + +/*! + * This has become obsolete, it should be replaced with userData logic + */ +NvBool +tmrCheckCallbacksReleaseSem_IMPL +( + OBJTMR *pTmr, + NvU32 chId +) +{ + PTMR_EVENT_PVT pScan; + + for (pScan = pTmr->pRmActiveEventList; pScan != NULL; pScan = pScan->pNext) + { + if ((pScan->super.flags & TMR_FLAG_RELEASE_SEMAPHORE) && + (pScan->super.chId == chId)) + { + break; + } + } + + return pScan != NULL; +} + +/*! + * TODO: document + */ +void +tmrInitCallbacks_IMPL +( + OBJTMR *pTmr +) +{ + NvU32 i; + + // Initialize the timer callback lists. + pTmr->pRmActiveEventList = NULL; + + // Everything below this comment will be removed with new API + pTmr->pRmCallbackFreeList_OBSOLETE = pTmr->rmCallbackTable_OBSOLETE; + + // Fill in all the forward pointers in the callback table. + for (i = 0; i < (TMR_NUM_CALLBACKS_RM - 1); i++) + { + pTmr->rmCallbackTable_OBSOLETE[i].pNext = &pTmr->rmCallbackTable_OBSOLETE[i+1]; + pTmr->rmCallbackTable_OBSOLETE[i].bInUse = NV_FALSE; + pTmr->rmCallbackTable_OBSOLETE[i].bLegacy = NV_TRUE; + } + pTmr->rmCallbackTable_OBSOLETE[i].pNext = NULL; + pTmr->rmCallbackTable_OBSOLETE[i].bInUse = NV_FALSE; + pTmr->rmCallbackTable_OBSOLETE[i].bLegacy = NV_TRUE; +} + +/*! + * Searches for all events associated with an Object and removes them. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] pObject Unique identifier based on TMR_POBJECT_BASE (tmr.h) + * + * @returns NV_OK always succeeds + */ +NV_STATUS +tmrCancelCallback_IMPL +( + OBJTMR *pTmr, + void *pObject +) +{ + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + NvU64 nextAlarmTime; + + if (tmrEventsExist(pTmr) && pObject != NULL) + { + // Pull all objects with the same address from the callback list. + _tmrScanCallback(pTmr, pObject); + + // + // If there's anything left then set an alarm for the soonest one. + // Otherwise, disable the PTIMER interrupt altogether. + // + if (NV_OK == _tmrGetNextAlarmTime(pTmr, &nextAlarmTime)) + { + _tmrScheduleCallbackInterrupt(pGpu, pTmr, nextAlarmTime); + } + else + { + // List is empty! Disable PTIMER interrupt. + tmrRmCallbackIntrDisable(pTmr, pGpu); + } + } + + return NV_OK; +} + +/*! + * TODO: document + * + * This function finds out if the (futureTime - pastTime) > maxCacheTimeInMSec + */ +NvBool +tmrDiffExceedsTime_IMPL +( + OBJTMR *pTmr, + PDAYMSECTIME pFutureTime, + PDAYMSECTIME pPastTime, + NvU32 time +) +{ + NvU32 msecsInADay = 1000 * 3600 * 24; + NvBool bRetVal = NV_FALSE; + + if ((pFutureTime->days < pPastTime->days) || + (((pFutureTime->days == pPastTime->days) && + (pFutureTime->msecs < pPastTime->msecs)))) + { + bRetVal = NV_TRUE; + } + else + { + // Because of overflow possibility, first check for diff in days + if ((((pFutureTime->days - pPastTime->days) + + (pFutureTime->msecs - pPastTime->msecs)/msecsInADay)) > (time/msecsInADay)) + { + bRetVal = NV_TRUE; + } + else + { + // Now diff in millisecs + if ((((pFutureTime->days - pPastTime->days) * msecsInADay) + + (pFutureTime->msecs - pPastTime->msecs)) > time) + { + bRetVal = NV_TRUE; + } + } + } + + return bRetVal; +} + +/*! + * TODO: document + */ +NV_STATUS +tmrStateInitLocked_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + + return NV_OK; +} + +/*! + * TODO: document + */ +NV_STATUS +tmrStateLoad_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 flags +) +{ + // Have to restore any pending callbacks' state + _tmrStateLoadCallbacks(pGpu, pTmr); + + return NV_OK; +} + +/*! + * TODO: document + */ +NV_STATUS +tmrStateUnload_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 flags +) +{ + // Disable Timer interrupt. + tmrSetAlarmIntrDisable_HAL(pGpu, pTmr); + tmrSetCountdownIntrDisable_HAL(pGpu, pTmr); + + return NV_OK; +} + +/*! + * TODO: document + */ +void +tmrStateDestroy_IMPL +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + objDelete(pTmr->pGrTickFreqRefcnt); + pTmr->pGrTickFreqRefcnt = NULL; +} + +NV_STATUS +tmrapiConstruct_IMPL +( + TimerApi *pTimerApi, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +tmrapiDestruct_IMPL +( + TimerApi *pTimerApi +) +{ +} + +NV_STATUS +tmrapiGetRegBaseOffsetAndSize_IMPL +( + TimerApi *pTimerApi, + OBJGPU *pGpu, + NvU32 *pOffset, + NvU32 *pSize +) +{ + NV_STATUS status; + NvU32 offset; + + status = gpuGetRegBaseOffset_HAL(GPU_RES_GET_GPU(pTimerApi), NV_REG_BASE_TIMER, &offset); + if (status != NV_OK) + return status; + + if (pOffset) + *pOffset = offset; + + if (pSize) + *pSize = sizeof(Nv01TimerMap); + + return NV_OK; +} + +void +tmrapiDeregisterEvents_IMPL(TimerApi *pTimerApi) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pTimerApi); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PEVENTNOTIFICATION pNotifyEvent = inotifyGetNotificationList(staticCast(pTimerApi, INotifier)); + + // Validate the timer event + while (pNotifyEvent != NULL) + { + tmrCancelCallback(pTmr, pNotifyEvent); + + pNotifyEvent = pNotifyEvent->Next; + } +} + +//--------------------------------------------------------------------------- +// +// NV0004 Control Functions +// +//--------------------------------------------------------------------------- + +// +// There is some type hacking involved here. The inner callback is called correctly here +// though it is cast to the outer callback type for storage. The timer only sees the +// outer callback type directly so it will call it correctly, and this wrapper hides the +// inner callback and calls it correctly from itself. Hacky but it should work around the +// limitations in the SDK (all RM derived types undefined, so TIMEPROC type is impossible). +// +typedef NvU32 (*TMR_CALLBACK_FUNCTION)(void *pCallbackData); + +typedef struct +{ + TMR_CALLBACK_FUNCTION pTimeProc; + void *pCallbackData; +} wrapperStorage_t; + +static NV_STATUS _tmrCallbackWrapperfunction +( + OBJGPU *pGpu, + OBJTMR *pTmr, + PTMR_EVENT pEvent +) +{ + wrapperStorage_t *pObj_Inner = (wrapperStorage_t *)pEvent->pUserData; + + // Backup the wrapper function and data + TIMEPROC pCallback_Outer = pEvent->pTimeProc; + void *pCallbackData_Outer = pEvent->pUserData; + + // Swap in the inner function and data + pEvent->pTimeProc = (TIMEPROC) pObj_Inner->pTimeProc; // Intentionally the wrong type! + pEvent->pUserData = pObj_Inner->pCallbackData; + + // Perform the actual callback the way the user expects it + pObj_Inner->pTimeProc((void *)pEvent->pUserData); + + // Rewrap whatever changes the user may have made + pObj_Inner->pTimeProc = (TMR_CALLBACK_FUNCTION) pEvent->pTimeProc; + pObj_Inner->pCallbackData = pEvent->pUserData; + + // Restore the wrapper function and data + pEvent->pTimeProc = pCallback_Outer; + pEvent->pUserData = pCallbackData_Outer; + + return NV_OK; +} + +/*! + * Creates an event and initializes the wrapper callback data, putting the + * desired callback inside of it's struct to be swapped in later. + * + * @returns NV_STATUS + */ +NV_STATUS +tmrCtrlCmdEventCreate +( + OBJGPU *pGpu, + TMR_EVENT_SET_PARAMS *pParams +) +{ + NV_STATUS rc; + PTMR_EVENT pEvent; + wrapperStorage_t *pWrapper; + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + + // ALlocate the wrapper's callerdata to store real caller data! + pWrapper = portMemAllocNonPaged(sizeof(wrapperStorage_t)); + if (pWrapper == NULL) + { + return NV_ERR_NO_MEMORY; + } + pWrapper->pTimeProc = (TMR_CALLBACK_FUNCTION)NvP64_VALUE(pParams->pTimeProc); + pWrapper->pCallbackData = NvP64_VALUE(pParams->pCallbackData); + + rc = tmrEventCreate(pTmr, + &pEvent, + _tmrCallbackWrapperfunction, + pWrapper, + pParams->flags); + + *(pParams->ppEvent) = NV_PTR_TO_NvP64(pEvent); + + return rc; +} + + +/*! + * Schedules an existing event. Takes in time arguments and a flag to + * determine if it should be interpreted as absolute or relative time. + * + * @returns NV_STATUS + */ +NV_STATUS +tmrCtrlCmdEventSchedule +( + OBJGPU *pGpu, + TMR_EVENT_SCHEDULE_PARAMS *pParams +) +{ + NV_STATUS rc; + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PTMR_EVENT pEvent = (PTMR_EVENT)NvP64_VALUE(pParams->pEvent); + + if(pParams->bUseTimeAbs) + { + rc = tmrEventScheduleAbs(pTmr, pEvent, pParams->timeNs); + } + else + { + rc = tmrEventScheduleRel(pTmr, pEvent, pParams->timeNs); + } + + return rc; +} + +/*! + * Cancels an existing event. NOP on unscheduled event. + * + * @returns NV_OK + */ +NV_STATUS +tmrCtrlCmdEventCancel +( + OBJGPU *pGpu, + TMR_EVENT_GENERAL_PARAMS *pParams +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PTMR_EVENT pEvent = (PTMR_EVENT)NvP64_VALUE(pParams->pEvent); + tmrEventCancel(pTmr, pEvent); + + return NV_OK; +} + +/*! + * Cancel and destroys an existing event. It also cleans up the special + * wrapper memory used by this API framework. + * + * @returns NV_OK + */ +NV_STATUS +tmrCtrlCmdEventDestroy +( + OBJGPU *pGpu, + TMR_EVENT_GENERAL_PARAMS *pParams +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + PTMR_EVENT pEvent = (PTMR_EVENT)NvP64_VALUE(pParams->pEvent); + + // Free our temporary wrapper storage + portMemFree(pEvent->pUserData); + + tmrEventDestroy(pTmr, pEvent); + + return NV_OK; +} + +/** + * @brief Provides an opportunity to register some IntrService during intrStateInit. + * + * @param[in] pGpu + * @param[in] pTmr The IntrService object discovered as a GPU child; + * not necessarily the one to be registered. + * @param[in] pRecords A table of MC_ENGINE_IDX_MAX IntrServiceRecord, to be updated directly. + */ +void +tmrRegisterIntrService_IMPL(OBJGPU *pGpu, OBJTMR *pTmr, IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX]) +{ + NV_ASSERT(pRecords[MC_ENGINE_IDX_TMR].pInterruptService == NULL); + pRecords[MC_ENGINE_IDX_TMR].pInterruptService = staticCast(pTmr, IntrService); + + // NB. There's no harm in registering ourselves for _SWRL even when it's not in the intr table. + NV_ASSERT(pRecords[MC_ENGINE_IDX_TMR_SWRL].pInterruptService == NULL); + pRecords[MC_ENGINE_IDX_TMR_SWRL].pInterruptService = staticCast(pTmr, IntrService); +} + +/** + * @brief Clears the stall interrupt leaf vector and return whether to call ServiceStall. + * @details Normally there's no need to override this function as its default is used by almost all handlers, + * but MC_ENGINE_IDX_TMR_SWRL requires a specific sequence to acquire as a semaphore. + * + * @param[in] pGpu + * @param[in] pTmr The IntrService object registered to handle the engineIdx stall interrupt. + * @param[in] pParams + * + * @returns A boolean which is NV_FALSE if the stall interrupt should not actually be handled. + */ +NvBool +tmrClearInterrupt_IMPL(OBJGPU *pGpu, OBJTMR *pTmr, IntrServiceClearInterruptArguments *pParams) +{ + Intr *pIntr = GPU_GET_INTR(pGpu); + NV_ASSERT_OR_RETURN(pParams != NULL, NV_FALSE); + + switch (pParams->engineIdx) { + case MC_ENGINE_IDX_TMR: + { + intrClearLeafVector_HAL(pGpu, pIntr, + intrGetVectorFromEngineId(pGpu, pIntr, pParams->engineIdx, NV_FALSE), + NULL); + return NV_TRUE; + } + case MC_ENGINE_IDX_TMR_SWRL: + { + return tmrClearSwrlCallbacksSemaphore(pGpu, pTmr, NULL); + } + default: + { + NV_ASSERT_FAILED("invalid EngineIdx"); + return NV_FALSE; + } + } +} + diff --git a/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c b/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c new file mode 100644 index 000000000..f0fa0f449 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timer/timer_ostimer.c @@ -0,0 +1,325 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/********************* Non-Chip Specific HAL TMR Routines ******************\ +* * +* This file contains TMR method implementations using OSTIMER * +* * +\***************************************************************************/ + +#include "objtmr.h" + +// +// This function returns current time from OS timer +// +NvU64 +tmrGetTimeEx_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + NvU32 seconds; // Time since 1970 in seconds + NvU32 useconds; // and uSeconds. + NvU64 timeNs; // Time since 1970 in ns. + + // + // Get current time from operating system. + // + // We get the time in seconds and microseconds since 1970 + // Note that we don't really need the real time of day + // + osGetCurrentTime(&seconds, &useconds); + + // + // Calculate ns since 1970. + // + timeNs = ((NvU64)seconds * 1000000 + useconds) * 1000; + + return timeNs; +} + +/*! + * Creates OS timer event + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * @param[out] NV_STATUS + */ +NV_STATUS tmrEventCreateOSTimer_OSTIMER +( + OBJTMR *pTmr, + PTMR_EVENT pEventPublic +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pEventPublic; + + status = osCreateNanoTimer(pGpu->pOsGpuInfo, pEvent, &(pEvent->super.pOSTmrCBdata)); + + if (status != NV_OK) + { + pEvent->super.pOSTmrCBdata = NULL; + NV_PRINTF(LEVEL_ERROR, "OS create timer failed\n"); + } + + return status; +} + +/*! + * This function Starts or Schedules OS Timer + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * @param[in] absolute time in nano seconds + * + * @returns NV_ERR_INVALID_REQUEST failed to create timer +*/ +NV_STATUS tmrEventScheduleAbsOSTimer_OSTIMER +( + OBJTMR *pTmr, + PTMR_EVENT pPublicEvent, + NvU64 timeNs +) +{ + NV_STATUS status= NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT) pPublicEvent; + + if (pEvent->super.pOSTmrCBdata == NULL) + { + NV_PRINTF(LEVEL_ERROR, "OS Timer not created\n"); + return NV_ERR_INVALID_REQUEST; + } + + status = osStartNanoTimer(pGpu->pOsGpuInfo, pEvent->super.pOSTmrCBdata, timeNs); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "OS Start timer FAILED!\n"); + } + + pEvent->super.flags |= TMR_FLAG_OS_TIMER_QUEUED; + return status; +} + +/*! + * This function runs OS timer callback + * +* @param[in] pGpu Pointer to GPU object +* @param[in] pTmr Pointer to Timer Object +* @param[in] pEvent pointer to timer event information +* + * @returns NV_ERR_INVALID_REQUEST if callback not found + */ +NV_STATUS tmrEventServiceOSTimerCallback_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + PTMR_EVENT pPublicEvent +) +{ + PTMR_EVENT_PVT pEvent = (PTMR_EVENT_PVT)pPublicEvent; + NV_STATUS status = NV_OK; + + if (pEvent && (pEvent->super.pTimeProc != NULL)) + { + pEvent->super.pTimeProc(pGpu, pTmr, (PTMR_EVENT)pEvent); + pEvent->super.flags &= ~TMR_FLAG_OS_TIMER_QUEUED; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "ERROR No Timer event callback found, invalid timer SW state\n"); + status = NV_ERR_INVALID_REQUEST; + } + + return status; +} + +/*! + * This function cancels OS timer callback + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * @returns NV_ERR_INVALID_REQUEST if callback entry not found + */ +NV_STATUS tmrEventCancelOSTimer_OSTIMER +( + OBJTMR *pTmr, + PTMR_EVENT pPublicEvent +) +{ + NV_STATUS status= NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + PTMR_EVENT_PVT pTmrEvent = (PTMR_EVENT_PVT) pPublicEvent; + + if (pTmrEvent != NULL && pTmrEvent->super.pOSTmrCBdata != NULL) + { + // Cancel the callback of OS timer + status = osCancelNanoTimer(pGpu->pOsGpuInfo, pTmrEvent->super.pOSTmrCBdata); + pTmrEvent->super.flags &= ~TMR_FLAG_OS_TIMER_QUEUED; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "ERROR No Timer event callback found, invalid timer SW state\n"); + status = NV_ERR_INVALID_REQUEST; + } + + return status; +} + +/*! + * This function cancels OS timer callback + * + * @param[in] pTmr Pointer to Timer Object + * @param[in] pEvent pointer to timer event information + * + * @returns NV_ERR_INVALID_REQUEST if callback entry not found + */ +NV_STATUS tmrEventDestroyOSTimer_OSTIMER +( + OBJTMR *pTmr, + PTMR_EVENT pPublicEvent +) +{ + NV_STATUS status= NV_OK; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + PTMR_EVENT_PVT pTmrEvent = (PTMR_EVENT_PVT) pPublicEvent; + + if (pTmrEvent != NULL && pTmrEvent->super.pOSTmrCBdata != NULL) + { + // Cancel the callback of OS timer + status = osDestroyNanoTimer(pGpu->pOsGpuInfo, pTmrEvent->super.pOSTmrCBdata); + pTmrEvent->super.flags &= ~TMR_FLAG_OS_TIMER_QUEUED; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "No Timer event callback found, invalid timer SW state\n"); + status = NV_ERR_INVALID_REQUEST; + } + + return status; +} + +NV_STATUS +tmrGetIntrStatus_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU32 *pStatus, + THREAD_STATE_NODE *pThreadState +) +{ + *pStatus = 0; + return NV_OK; +} + +// +// For functions that only need a short delta of time elapsed (~ 4.29 seconds) +// NOTE: Since it wraps around every 4.29 seconds, for general GetTime purposes, +// it's better to use tmrGetTime(). +// +NvU32 +tmrGetTimeLo_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + return NvU64_LO32(tmrGetTimeEx_HAL(pGpu, pTmr, NULL)); +} + +NvU64 +tmrGetTime_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr +) +{ + return tmrGetTimeEx_HAL(pGpu, pTmr, NULL); +} + +NvU32 +tmrReadTimeLoReg_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + return NvU64_LO32(tmrGetTimeEx_HAL(pGpu, pTmr, pThreadState)); +} + +NvU32 +tmrReadTimeHiReg_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + THREAD_STATE_NODE *pThreadState +) +{ + return NvU64_HI32(tmrGetTimeEx_HAL(pGpu, pTmr, pThreadState)); +} + +NV_STATUS +tmrGetGpuAndCpuTimestampPair_OSTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU64 *pGpuTime, + NvU64 *pCpuTime +) +{ +#if PORT_IS_FUNC_SUPPORTED(portUtilExReadTimestampCounter) + *pGpuTime = tmrGetTimeEx_HAL(pGpu, pTmr, NULL); + *pCpuTime = portUtilExReadTimestampCounter(); + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS +tmrDelay_OSTIMER +( + OBJTMR *pTmr, + NvU32 nsec +) +{ + if (nsec > 50000000) // 50 ms. + { + osDelay(nsec / 1000000); + } + else if (nsec > 0) + { + osDelayNs(nsec); + } + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/timer/timer_ptimer.c b/src/nvidia/src/kernel/gpu/timer/timer_ptimer.c new file mode 100644 index 000000000..74c3d795d --- /dev/null +++ b/src/nvidia/src/kernel/gpu/timer/timer_ptimer.c @@ -0,0 +1,189 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/************************** Ptimer based Routines **************************\ +* * +* The PTimer based routines reside in this file. * +* * +\***************************************************************************/ + +/* ------------------------- Includes --------------------------------------- */ +#include "gpu/gpu.h" +#include "objtmr.h" + +#include "published/turing/tu104/dev_timer.h" + +/* ------------------------- Datatypes -------------------------------------- */ +/* ------------------------- Macros ----------------------------------------- */ +/* ------------------------- Static Function Prototypes --------------------- */ +/* ------------------------- Public Functions ------------------------------ */ +// +// NV_PTIMER_TIME_0 rolls over approx every 4 secs. For delays +// less than 1/4 of that time just compare against TIME_0 +// +NV_STATUS tmrDelay_PTIMER +( + OBJTMR *pTmr, + NvU32 nsec +) +{ + NvU64 Time; + NvU32 start = 0; + OBJGPU *pGpu = ENG_GET_GPU(pTmr); + NvU32 count = 0; + + // If GPU is gone, we can't use timer at all. Delay also doesn't make any sense. + if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + return NV_ERR_GPU_IS_LOST; + } + if (API_GPU_IN_RESET_SANITY_CHECK(pGpu)) + { + return NV_ERR_GPU_IN_FULLCHIP_RESET; + } + + if (gpuIsReplayableTraceEnabled(pGpu)) { + NV_PRINTF(LEVEL_ERROR, "Entered tmrDelay - %d\n", nsec); + } + + // + // On NV50 fmodel, a large delay (1 ms) will cause the simulator + // to abort due to lack of activity. + if (IS_SIMULATION(pGpu) && (nsec > 100000)) + { + nsec = 100000; + } + + if (nsec == 0) + return NV_OK; + + if (nsec > 50000000) // 50 ms. + { + // + // Provide control back to os scheduler so as not to + // starve other processes/tasks in the system, etc... + // + NV_PRINTF(LEVEL_ERROR, + "Too long delay w/o yield, use osDelay instead.\n"); + } + + // + // Check to make sure the numerator / denominator have been programmed up. + // The power on reset values for both are 0 + // + // for short delays (delays less than 12 us, known to be the smallest delay + // tmrDelay known to provide - bug 478366), we call tmrGetTimeLo instead of + // tmrGetTime as it's sufficient to check within one round of 4.29 s time frame. + // + if (nsec < 12000) // 12 us + { + // start with the current time, reading only low half + start = tmrGetTimeLo_HAL(pGpu, pTmr); + while (tmrGetTimeLo_HAL(pGpu, pTmr) - start < nsec) + { + // + // Determine if PTIMER is not moving at all. The math below assumes that register accesses + // take at least 1/16 usec from CPU (which is very conservative compared to reality). + // Note that tmrGetTimeLo reads at least one register. + // + if ((count >> 4) > ((nsec + 999) / 1000)) + { + NV_PRINTF(LEVEL_INFO, + "PTIMER may be stuck. Already at %d iterations for a delay of %d nsec\n", + count, nsec); + } + else if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + return NV_ERR_GPU_IS_LOST; + } + else + { + count++; + } + } + } + else + { + // Start with the current time. + Time = tmrGetTime_HAL(pGpu, pTmr); + + // Add nanosecond delay. + Time += nsec; + + while (Time > tmrGetTime_HAL(pGpu, pTmr)) + { + // + // Determine if PTIMER is not moving at all. The math below assumes that register accesses + // take at least 1/16 usec from CPU (which is very conservative compared to reality). + // Note that tmrGetTime reads at least two registers. + // + if ((count >> 3) > ((nsec + 999) / 1000)) + { + NV_PRINTF(LEVEL_INFO, + "PTIMER may be stuck. Already at %d iterations for a delay of %d nsec\n", + count, nsec); + } + else if (!API_GPU_ATTACHED_SANITY_CHECK(pGpu)) + { + return NV_ERR_GPU_IS_LOST; + } + else + { + count++; + } + osSpinLoop(); + } + } + + if (gpuIsReplayableTraceEnabled(pGpu)) { + NV_PRINTF(LEVEL_ERROR, "Exiting tmrDelay\n"); + } + + return NV_OK; +} + +/*! + * @brief Returns the BAR0 offset and size of the PTIMER range. + * + * @param[in] pGpu + * @param[in] pTmr + * @param[out] pBar0MapOffset + * @param[out] pBar0MapSize + * + * @return NV_STATUS + */ +NV_STATUS +tmrGetTimerBar0MapInfo_PTIMER +( + OBJGPU *pGpu, + OBJTMR *pTmr, + NvU64 *pBar0MapOffset, + NvU32 *pBar0MapSize +) +{ + *pBar0MapOffset = DRF_BASE(NV_PTIMER); + *pBar0MapSize = DRF_SIZE(NV_PTIMER); + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/uvm/access_cntr_buffer.c b/src/nvidia/src/kernel/gpu/uvm/access_cntr_buffer.c new file mode 100644 index 000000000..92ab2e3c3 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/uvm/access_cntr_buffer.c @@ -0,0 +1,189 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" + +#include "os/os.h" + +#include "gpu/uvm/uvm.h" +#include "gpu/uvm/access_cntr_buffer.h" +#include "gpu/device/device.h" +#include "kernel/rmapi/client.h" + +NV_STATUS +accesscntrConstruct_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + // Not supported on legacy guests and in case of sr-iov heavy + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (!gpuIsClassSupported(pGpu, pParams->externalClassId)) + { + NV_PRINTF(LEVEL_ERROR, "class %x not supported\n", + pParams->externalClassId); + return NV_ERR_INVALID_CLASS; + } + + NV_ASSERT_OR_RETURN(pUvm != NULL, NV_ERR_NOT_SUPPORTED); + + status = uvmInitializeAccessCntrBuffer(pGpu, pUvm); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to initialize UVM Access Counters (status=0x%08x).\n", + status); + return status; + } + + pUvm->accessCntrBuffer.hAccessCntrBufferClient = pCallContext->pClient->hClient; + pUvm->accessCntrBuffer.hAccessCntrBufferObject = pCallContext->pResourceRef->hResource; + + return NV_OK; +} + +void +accesscntrDestruct_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + (void) uvmTerminateAccessCntrBuffer(pGpu, pUvm); +} + +NV_STATUS +accesscntrMap_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + OBJGPU *pGpu; + OBJUVM *pUvm; + NV_STATUS rmStatus = NV_OK; + NvBool bBroadcast = NV_TRUE; + NvBool bKernel; + + pGpu = CliGetGpuFromContext(pCpuMapping->pContextRef, &bBroadcast); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + gpuSetThreadBcState(pGpu, bBroadcast); + pUvm = GPU_GET_UVM(pGpu); + + rmStatus = rmapiValidateKernelMapping(rmclientGetCachedPrivilege(pClient), + pCpuMapping->flags, + &bKernel); + if (rmStatus != NV_OK) + return rmStatus; + + pCpuMapping->processId = osGetCurrentProcess(); + + rmStatus = memdescMap(pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc, + 0, + pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc->Size, + bKernel, + pCpuMapping->pPrivate->protect, + &pCpuMapping->pLinearAddress, + &pCpuMapping->pPrivate->pPriv); + pUvm->accessCntrBuffer.hAccessCntrBufferCpuMapping = pCpuMapping->pPrivate->pPriv; + + return rmStatus; +} + +NV_STATUS +accesscntrUnmap_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + NV_STATUS rmStatus; + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + OBJGPU *pGpu; + OBJUVM *pUvm; + NvBool bBroadcast = NV_TRUE; + NvBool bKernel; + + pGpu = CliGetGpuFromContext(pCpuMapping->pContextRef, &bBroadcast); + NV_ASSERT_OR_RETURN(pGpu != NULL, NV_ERR_INVALID_ARGUMENT); + gpuSetThreadBcState(pGpu, bBroadcast); + + pUvm = GPU_GET_UVM(pGpu); + + rmStatus = rmapiValidateKernelMapping(rmclientGetCachedPrivilege(pClient), + pCpuMapping->flags, + &bKernel); + if (rmStatus != NV_OK) + return rmStatus; + + memdescUnmap(pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc, + bKernel, + pCpuMapping->processId, + pCpuMapping->pLinearAddress, + pUvm->accessCntrBuffer.hAccessCntrBufferCpuMapping); + + return NV_OK; +} + +NV_STATUS +accesscntrGetMapAddrSpace_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + NV_ADDRESS_SPACE addrSpace; + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + PMEMORY_DESCRIPTOR pMemDesc = pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc; + + if (pMemDesc == NULL) + return NV_ERR_INVALID_OBJECT; + + NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, pMemDesc, mapFlags, &addrSpace)); + + if (pAddrSpace) + *pAddrSpace = addrSpace; + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/gpu/uvm/access_cntr_buffer_ctrl.c b/src/nvidia/src/kernel/gpu/uvm/access_cntr_buffer_ctrl.c new file mode 100644 index 000000000..af065238f --- /dev/null +++ b/src/nvidia/src/kernel/gpu/uvm/access_cntr_buffer_ctrl.c @@ -0,0 +1,294 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" + +#include "kernel/gpu/subdevice/subdevice.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/uvm/uvm.h" +#include "rmapi/control.h" + +#include "gpu/uvm/access_cntr_buffer.h" + +NV_STATUS +accesscntrCtrlCmdAccessCntrBufferReadGet_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_GET_PARAMS *pGetParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + return uvmReadAccessCntrBufferGetPtr_HAL(pGpu, pUvm, &pGetParams->accessCntrBufferGetOffset); +} + +NV_STATUS +accesscntrCtrlCmdAccessCntrBufferWriteGet_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + NVC365_CTRL_ACCESS_CNTR_BUFFER_WRITE_GET_PARAMS *pGetParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + return uvmWriteAccessCntrBufferGetPtr_HAL(pGpu, pUvm, pGetParams->accessCntrBufferGetValue); +} + +NV_STATUS +accesscntrCtrlCmdAccessCntrBufferReadPut_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + NVC365_CTRL_ACCESS_CNTR_BUFFER_READ_PUT_PARAMS *pGetParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + return uvmReadAccessCntrBufferPutPtr_HAL(pGpu, pUvm, &pGetParams->accessCntrBufferPutOffset); +} + +NV_STATUS +accesscntrCtrlCmdAccessCntrBufferGetSize_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_SIZE_PARAMS *pGetParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + pGetParams->accessCntrBufferSize = pUvm->accessCntrBuffer.accessCntrBufferSize; + + return NV_OK; +} + +NV_STATUS +accesscntrCtrlCmdAccessCntrBufferEnable_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS *pGetParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvU32 intrOwnership = pGetParams->intrOwnership; + + // + // Note: Changing the ownership mask to RM before disabling notifications could + // cause starvation issues if not protected by the GPU lock. + // For example, if interrupts need to be handled between changing of the ownership + // and disabling of the interrupt, multiple access counter interrupts could come + // to RM (since ownership changed from UVM to RM) and RM will starve the thread + // that is trying to disable the interrupt. + // See bug 2094809 for more details. + // + if (intrOwnership != NVC365_CTRL_ACCESS_COUNTER_INTERRUPT_OWNERSHIP_NO_CHANGE) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, pKernelGmmu != NULL, NV_ERR_NOT_SUPPORTED); + kgmmuAccessCntrChangeIntrOwnership(pGpu, pKernelGmmu, + (intrOwnership == NVC365_CTRL_ACCESS_COUNTER_INTERRUPT_OWNERSHIP_RM)); + } + + if (pGetParams->enable) + { + return uvmEnableAccessCntr_HAL(pGpu, pUvm, NV_FALSE); + } + else + { + return uvmDisableAccessCntr_HAL(pGpu, pUvm, NV_FALSE); + } +} + +NV_STATUS +accesscntrCtrlCmdAccessCntrBufferEnableIntr_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_INTR_PARAMS *pGetParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + NV_STATUS status = uvmEnableAccessCntrIntr_HAL(pGpu, pUvm, intr_notify); + if (status == NV_OK) + { + pGetParams->enable = NV_TRUE; + } + else + { + pGetParams->enable = NV_FALSE; + } + + return status; +} + +NV_STATUS +accesscntrCtrlCmdAccessCntrBufferGetRegisterMappings_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + return uvmGetAccessCntrRegisterMappings_HAL(pGpu, pUvm, + &pParams->pAccessCntrBufferGet, + &pParams->pAccessCntrBufferPut, + &pParams->pAccessCntrBufferFull, + &pParams->pHubIntr, + &pParams->pHubIntrEnSet, + &pParams->pHubIntrEnClear, + &pParams->accessCntrMask); +} + +NV_STATUS +accesscntrCtrlCmdAccessCntrBufferGetFullInfo_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_FULL_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + return uvmReadAccessCntrBufferFullPtr_HAL(pGpu, pUvm, &pParams->fullFlag); +} + +NV_STATUS +accesscntrCtrlCmdAccessCntrBufferResetCounters_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + NVC365_CTRL_ACCESS_CNTR_BUFFER_RESET_COUNTERS_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + NV_STATUS status = uvmResetAccessCntrBuffer_HAL(pGpu, pUvm, pParams->counterType); + if (status == NV_OK) + { + pParams->resetFlag = NV_TRUE; + } + else + { + pParams->resetFlag = NV_FALSE; + } + + return status; +} + +NV_STATUS +accesscntrCtrlCmdAccessCntrSetConfig_IMPL +( + AccessCounterBuffer *pAccessCounterBuffer, + NVC365_CTRL_ACCESS_CNTR_SET_CONFIG_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pAccessCounterBuffer); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + NvU32 cmd = pParams->cmd; + + if (cmd & NVC365_CTRL_ACCESS_COUNTER_SET_MIMC_GRANULARITY) + uvmAccessCntrSetGranularity_HAL(pGpu, pUvm, MIMC, pParams->mimcGranularity); + if (cmd & NVC365_CTRL_ACCESS_COUNTER_SET_MOMC_GRANULARITY) + uvmAccessCntrSetGranularity_HAL(pGpu, pUvm, MOMC, pParams->momcGranularity); + if (cmd & NVC365_CTRL_ACCESS_COUNTER_SET_MIMC_LIMIT) + uvmAccessCntrSetCounterLimit_HAL(pGpu, pUvm, + NVC365_CTRL_ACCESS_COUNTER_MIMC_LIMIT, pParams->mimcLimit); + if (cmd & NVC365_CTRL_ACCESS_COUNTER_SET_MOMC_LIMIT) + uvmAccessCntrSetCounterLimit_HAL(pGpu, pUvm, + NVC365_CTRL_ACCESS_COUNTER_MOMC_LIMIT, pParams->momcLimit); + if (cmd & NVC365_CTRL_ACCESS_COUNTER_SET_THRESHOLD) + uvmAccessCntrSetThreshold_HAL(pGpu, pUvm, pParams->threshold); + + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdInternalUvmRegisterAccessCntrBuffer_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS *pParams +) +{ + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + NvU32 numBufferPages = NV_ROUNDUP(pParams->bufferSize, RM_PAGE_SIZE) / RM_PAGE_SIZE; + + if (numBufferPages > NV_ARRAY_ELEMENTS(pParams->bufferPteArray) || + numBufferPages == 0) + { + return NV_ERR_INVALID_ARGUMENT; + } + + status = uvmAccessCntrBufferRegister(pGpu, pUvm, + pParams->bufferSize, + pParams->bufferPteArray); + return status; +} + +NV_STATUS +subdeviceCtrlCmdInternalUvmUnregisterAccessCntrBuffer_IMPL +( + Subdevice *pSubdevice +) +{ + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + status = uvmAccessCntrBufferUnregister(pGpu, pUvm); + + return status; +} + +NV_STATUS +subdeviceCtrlCmdInternalUvmServiceAccessCntrBuffer_IMPL +( + Subdevice *pSubdevice +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + return uvmAccessCntrService_HAL(pGpu, pUvm); +} + +NV_STATUS +subdeviceCtrlCmdInternalUvmGetAccessCntrBufferSize_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS *pParams +) +{ + OBJGPU *pGpu = GPU_RES_GET_GPU(pSubdevice); + OBJUVM *pUvm = GPU_GET_UVM(pGpu); + + pParams->bufferSize = uvmGetAccessCounterBufferSize_HAL(pGpu, pUvm); + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/uvm/arch/turing/uvm_tu102.c b/src/nvidia/src/kernel/gpu/uvm/arch/turing/uvm_tu102.c new file mode 100644 index 000000000..3448b316e --- /dev/null +++ b/src/nvidia/src/kernel/gpu/uvm/arch/turing/uvm_tu102.c @@ -0,0 +1,320 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/uvm/uvm.h" +#include "kernel/gpu/intr/intr.h" + +#include "class/clc365.h" +#include "ctrl/ctrlc365.h" +#include "published/turing/tu102/dev_access_counter.h" +#include "published/turing/tu102/dev_vm.h" +#include "published/turing/tu102/dev_fb.h" + +NV_STATUS +uvmReadAccessCntrBufferPutPtr_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm, + NvU32 *putOffset +) +{ + + *putOffset = GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_PUT); + + return NV_OK; +} + +NV_STATUS +uvmReadAccessCntrBufferGetPtr_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm, + NvU32 *getOffset +) +{ + *getOffset = GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_GET); + + return NV_OK; +} + +NV_STATUS +uvmWriteAccessCntrBufferGetPtr_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm, + NvU32 getPtrValue +) +{ + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_GET, getPtrValue); + return NV_OK; +} + +NV_STATUS +uvmEnableAccessCntr_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm, + NvBool bIsErrorRecovery +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvBool bRmOwnsAccessCntr = !!(pKernelGmmu->uvmSharedIntrRmOwnsMask & + RM_UVM_SHARED_INTR_MASK_HUB_ACCESS_COUNTER_NOTIFY); + // + // Do not touch interrupts if in error recovery path + // Also disable interrupts if RM does not own the interrupt to prevent race + // See bug 2094809 for more details + // + if (!bRmOwnsAccessCntr) + { + uvmDisableAccessCntrIntr_HAL(pGpu, pUvm); + } + else + { + if (!bIsErrorRecovery) + uvmEnableAccessCntrIntr_HAL(pGpu, pUvm, intr_all); + } + + GPU_VREG_FLD_WR_DRF_DEF(pGpu, _VIRTUAL_FUNCTION, _PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_LO, _EN, _TRUE); + + return NV_OK; +} + +NV_STATUS +uvmGetAccessCntrRegisterMappings_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm, + NvP64 *pAccessCntrBufferGet, + NvP64 *pAccessCntrBufferPut, + NvP64 *pAccessCntrBufferFull, + NvP64 *pHubIntr, + NvP64 *pHubIntrEnSet, + NvP64 *pHubIntrEnClear, + NvU32 *accessCntrMask +) +{ + Intr *pIntr = GPU_GET_INTR(pGpu); + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + NvP64 bar0Mapping = NV_PTR_TO_NvP64(pMapping->gpuNvAddr); + NvU32 intrVector = intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_ACCESS_CNTR, NV_FALSE); + NvU32 leafReg, leafBit; + + leafReg = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_REG(intrVector); + leafBit = NV_CTRL_INTR_GPU_VECTOR_TO_LEAF_BIT(intrVector); + + *pAccessCntrBufferGet = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_GET)); + *pAccessCntrBufferPut = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_PUT)); + *pAccessCntrBufferFull = NvP64_PLUS_OFFSET(bar0Mapping,GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO)); + *pHubIntr = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF(leafReg))); + *pHubIntrEnSet = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_SET(leafReg))); + *pHubIntrEnClear = NvP64_PLUS_OFFSET(bar0Mapping, GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_PRIV_CPU_INTR_LEAF_EN_CLEAR(leafReg))); + *accessCntrMask = NVBIT(leafBit); + return NV_OK; +} + +NV_STATUS +uvmReadAccessCntrBufferFullPtr_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm, + NvBool *fullFlag +) +{ + NvU32 fullPtrValue; + + fullPtrValue = GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO); + if (fullPtrValue & NV_PFB_NISO_ACCESS_COUNTER_NOTIFY_BUFFER_INFO_FULL_TRUE) + { + *fullFlag = NV_TRUE; + } + else + { + *fullFlag = NV_FALSE; + } + return NV_OK; +} + +NV_STATUS +uvmAccessCntrSetThreshold_TU102(POBJGPU pGpu, POBJUVM pUvm, NvU32 threshold) +{ + + GPU_VREG_FLD_WR_DRF_NUM(pGpu, _VIRTUAL_FUNCTION_PRIV, _ACCESS_COUNTER_CONFIG, _THRESHOLD, threshold); + return NV_OK; +} + +// Note: This function returns zero for chips which do not support the access counter. +NvU32 uvmGetAccessCounterBufferSize_TU102(POBJGPU pGpu, POBJUVM pUvm) +{ + + return GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_SIZE) * + NV_ACCESS_COUNTER_NOTIFY_BUF_SIZE; +} + +NV_STATUS +uvmAccessCntrSetGranularity_TU102(POBJGPU pGpu, POBJUVM pUvm, ACCESS_CNTR_TYPE accessCntType, NvU32 granularity) +{ + + if (accessCntType == MIMC) + { + switch(granularity) + { + case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_64K: + GPU_VREG_FLD_WR_DRF_DEF(pGpu, _VIRTUAL_FUNCTION_PRIV, _ACCESS_COUNTER_CONFIG, _MIMC_GRANULARITY, _64K); + break; + case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_2M: + GPU_VREG_FLD_WR_DRF_DEF(pGpu, _VIRTUAL_FUNCTION_PRIV, _ACCESS_COUNTER_CONFIG, _MIMC_GRANULARITY, _2M); + break; + case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16M: + GPU_VREG_FLD_WR_DRF_DEF(pGpu, _VIRTUAL_FUNCTION_PRIV, _ACCESS_COUNTER_CONFIG, _MIMC_GRANULARITY, _16M); + break; + case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16G: + GPU_VREG_FLD_WR_DRF_DEF(pGpu, _VIRTUAL_FUNCTION_PRIV, _ACCESS_COUNTER_CONFIG, _MIMC_GRANULARITY, _16G); + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + } + else if (accessCntType == MOMC) + { + switch(granularity) + { + case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_64K: + GPU_VREG_FLD_WR_DRF_DEF(pGpu, _VIRTUAL_FUNCTION_PRIV, _ACCESS_COUNTER_CONFIG, _MOMC_GRANULARITY, _64K); + break; + case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_2M: + GPU_VREG_FLD_WR_DRF_DEF(pGpu, _VIRTUAL_FUNCTION_PRIV, _ACCESS_COUNTER_CONFIG, _MOMC_GRANULARITY, _2M); + break; + case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16M: + GPU_VREG_FLD_WR_DRF_DEF(pGpu, _VIRTUAL_FUNCTION_PRIV, _ACCESS_COUNTER_CONFIG, _MOMC_GRANULARITY, _16M); + break; + case NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16G: + GPU_VREG_FLD_WR_DRF_DEF(pGpu, _VIRTUAL_FUNCTION_PRIV, _ACCESS_COUNTER_CONFIG, _MOMC_GRANULARITY, _16G); + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + } + else + return NV_ERR_INVALID_ARGUMENT; + + return NV_OK; +} + +void +uvmWriteAccessCntrBufferHiReg_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm, + NvU32 hiVal +) +{ + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_HI, hiVal); +} + +void +uvmWriteAccessCntrBufferLoReg_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm, + NvU32 loVal +) +{ + + GPU_VREG_WR32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_LO, loVal); +} + +NvU32 +uvmReadAccessCntrBufferLoReg_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm +) +{ + return GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_LO); +} + +NvU32 +uvmReadAccessCntrBufferInfoReg_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm +) +{ + return GPU_VREG_RD32(pGpu, NV_VIRTUAL_FUNCTION_PRIV_ACCESS_COUNTER_NOTIFY_BUFFER_INFO); +} + +NV_STATUS +uvmEnableAccessCntrIntr_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm, + NvU32 intrType +) +{ + Intr *pIntr = GPU_GET_INTR(pGpu); + + if (intrType == intr_all || intrType == intr_notify) + { + intrEnableLeaf_HAL(pGpu, pIntr, + intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_ACCESS_CNTR, NV_FALSE)); + } + + return NV_OK; +} + +NV_STATUS +uvmDisableAccessCntrIntr_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm +) +{ + Intr *pIntr = GPU_GET_INTR(pGpu); + + intrDisableLeaf_HAL(pGpu, pIntr, + intrGetVectorFromEngineId(pGpu, pIntr, MC_ENGINE_IDX_ACCESS_CNTR, NV_FALSE)); + return NV_OK; +} + +NV_STATUS +uvmAccessCntrService_TU102 +( + POBJGPU pGpu, + POBJUVM pUvm +) +{ + NvU64 accessCntrAddress = 0; + PEVENTNOTIFICATION *ppEventNotification = NULL; + + if (NV_OK == CliGetEventNotificationList(pUvm->accessCntrBuffer.hAccessCntrBufferClient, + pUvm->accessCntrBuffer.hAccessCntrBufferObject, NULL, &ppEventNotification) && ppEventNotification) + { + NV_ASSERT_OK_OR_RETURN(notifyEvents(pGpu, *ppEventNotification, NVC365_NOTIFIERS_ACCESS_COUNTER, + NvU64_HI32(accessCntrAddress), NvU64_LO32(accessCntrAddress), NV_OK, NV_OS_WRITE_THEN_AWAKEN)); + } + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/uvm/arch/volta/uvm_gv100.c b/src/nvidia/src/kernel/gpu/uvm/arch/volta/uvm_gv100.c new file mode 100644 index 000000000..7c58dd98b --- /dev/null +++ b/src/nvidia/src/kernel/gpu/uvm/arch/volta/uvm_gv100.c @@ -0,0 +1,409 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief VOLTA specific HALs for UVM routines reside in this file + */ + +#include "core/core.h" +#include "nvRmReg.h" +#include "gpu/gpu.h" +#include "gpu/mmu/kern_gmmu.h" +#include "gpu/uvm/uvm.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/bus/kern_bus.h" +#include "rmapi/event.h" + +#include "class/clc365.h" +#include "ctrl/ctrlc365.h" +#include "published/volta/gv100/dev_fb.h" + +NV_STATUS +uvmSetupAccessCntrBuffer_GV100 +( + OBJGPU *pGpu, + OBJUVM *pUvm +) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 accessCntrBufferHi = 0; + NvU32 accessCntrBufferLo = 0; + NvU64 vaddr; + NV_STATUS status = NV_OK; + + // Return if guest RM is with no sriov + if (IS_GSP_CLIENT(pGpu) || IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + if (!pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc) + { + return NV_ERR_INVALID_OBJECT_BUFFER; + } + + status = kbusMapCpuInvisibleBar2Aperture_HAL(pGpu, pKernelBus, pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc, + &vaddr, pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc->Size, 0, GPU_GFID_PF); + if (status != NV_OK) + { + return status; + } + pUvm->accessCntrBuffer.bar2UvmAccessCntrBufferAddr = vaddr; + + accessCntrBufferHi = NvU64_HI32(pUvm->accessCntrBuffer.bar2UvmAccessCntrBufferAddr); + accessCntrBufferLo = NvU64_LO32(pUvm->accessCntrBuffer.bar2UvmAccessCntrBufferAddr); + + uvmWriteAccessCntrBufferHiReg_HAL(pGpu, pUvm, accessCntrBufferHi); + uvmWriteAccessCntrBufferLoReg_HAL(pGpu, pUvm, accessCntrBufferLo); + + return NV_OK; +} + +NV_STATUS +uvmDisableAccessCntr_GV100 +( + OBJGPU *pGpu, + OBJUVM *pUvm, + NvBool bIsErrorRecovery +) +{ + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + RMTIMEOUT timeout; + NvU32 putPtr; + NvU32 getPtr; + NV_STATUS status = NV_OK; + + status = gpuSanityCheckRegisterAccess(pGpu, 0, NULL); + if (status != NV_OK) + return status; + + if (!bIsErrorRecovery && kgmmuTestAccessCounterWriteNak_HAL(pGpu, pKernelGmmu)) + { + NV_PRINTF(LEVEL_ERROR, + "Forcing bIsErrorRecovery = NV_TRUE because of WRITE_NACK.\n"); + bIsErrorRecovery = NV_TRUE; + } + + uvmWriteAccessCntrBufferLoReg_HAL(pGpu, pUvm, + FLD_SET_DRF( _PFB_NISO, _ACCESS_COUNTER_NOTIFY_BUFFER_LO, _EN, _FALSE, + uvmReadAccessCntrBufferLoReg_HAL(pGpu, pUvm))); + + // + // Check for any pending notifications which might be pending in pipe to ensure + // they don't show up later when the buffer is enabled again. To ensure that HW sets the + // correct notifications PUSHED status in priv, perform a read to ensure that EN == FALSE. + // If PUSHED == TRUE, RM will check the PUT pointer and if updated, it will wait for valid + // bit to show up for all packets and then reset the buffer + // + gpuSetTimeout(pGpu, GPU_TIMEOUT_DEFAULT, &timeout, 0); + if (FLD_TEST_DRF(_PFB_NISO, _ACCESS_COUNTER_NOTIFY_BUFFER_LO, _EN, _FALSE, + uvmReadAccessCntrBufferLoReg_HAL(pGpu, pUvm))) + { + while (FLD_TEST_DRF(_PFB_NISO, _ACCESS_COUNTER_NOTIFY_BUFFER_INFO, _PUSHED, _FALSE, + uvmReadAccessCntrBufferInfoReg_HAL(pGpu, pUvm))) + { + if (gpuCheckTimeout(pGpu, &timeout) == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, + "Timeout waiting for HW to write notification buffer.\n"); + DBG_BREAKPOINT(); + return NV_ERR_TIMEOUT; + } + } + + // + // If called from error recovery, we can't wait for packet to show up as notification packets + // could be the source of error + // + if (bIsErrorRecovery) + goto done; + + // If PUT pointer is updated, wait for VALID packets to show up and reset the packets + uvmReadAccessCntrBufferPutPtr_HAL(pGpu, pUvm, &putPtr); + uvmReadAccessCntrBufferGetPtr_HAL(pGpu, pUvm, &getPtr); + if (getPtr != putPtr) + { + MEMORY_DESCRIPTOR *pMemDesc = IS_GSP_CLIENT(pGpu) ? + pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc : + pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc; + NvU8 *pAccessCntrBufferPage; + NvU32 entriesPerPage = RM_PAGE_SIZE / NVC365_NOTIFY_BUF_SIZE; + NvU32 pageSizeModBufSize = RM_PAGE_SIZE % NVC365_NOTIFY_BUF_SIZE; + NvU32 maxEntryCount = pMemDesc->Size / NVC365_NOTIFY_BUF_SIZE; + NvU32 inPageGetPtr; + NvP64 pAddr; + NvP64 pPriv; + + NV_ASSERT_OR_RETURN(pageSizeModBufSize == 0, NV_ERR_INVALID_OPERATION); + + // Map one buffer page and wait for packets to become valid + status = memdescMap(pMemDesc, (getPtr / entriesPerPage) * RM_PAGE_SIZE, RM_PAGE_SIZE, + NV_TRUE, NV_PROTECT_READ_WRITE, &pAddr, &pPriv); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to map access counter buffer while disabling it.\n"); + return status; + } + + while (getPtr != putPtr) + { + pAccessCntrBufferPage = (NvU8 *)pAddr; + inPageGetPtr = getPtr % entriesPerPage; + + // Wait for an entry to be come valid + while (!DRF_VAL_MW(C365, _NOTIFY_BUF_ENTRY, _VALID, (NvU32 *)&pAccessCntrBufferPage[inPageGetPtr * NVC365_NOTIFY_BUF_SIZE])) + osSchedule(); + + portMemSet((void *)(&pAccessCntrBufferPage[inPageGetPtr * NVC365_NOTIFY_BUF_SIZE]), 0, NVC365_NOTIFY_BUF_SIZE); + getPtr = ((getPtr + 1 == maxEntryCount) ? 0 : (getPtr + 1)); + + // Map another page with entries to clear + if (getPtr % entriesPerPage == 0) + { + memdescUnmap(pMemDesc, NV_TRUE, osGetCurrentProcess(), pAddr, pPriv); + status = memdescMap(pMemDesc, (getPtr / entriesPerPage) * RM_PAGE_SIZE, RM_PAGE_SIZE, + NV_TRUE, NV_PROTECT_READ_WRITE, &pAddr, &pPriv); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to map access counter buffer while disabling it.\n"); + return status; + } + } + } + + uvmWriteAccessCntrBufferGetPtr_HAL(pGpu, pUvm, getPtr); + + memdescUnmap(pMemDesc, NV_TRUE, osGetCurrentProcess(), pAddr, pPriv); + } + } + else + { + NV_PRINTF(LEVEL_ERROR, "Failed disabling notification buffer.\n"); + DBG_BREAKPOINT(); + return NV_ERR_TIMEOUT; + } + +done: + return NV_OK; +} + +NV_STATUS +uvmUnloadAccessCntrBuffer_GV100(OBJGPU *pGpu, OBJUVM *pUvm) +{ + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + // Return if guest RM is with no sriov + if (IS_GSP_CLIENT(pGpu) || IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + uvmDisableAccessCntr_HAL(pGpu, pUvm, NV_FALSE); + kbusUnmapCpuInvisibleBar2Aperture_HAL(pGpu, pKernelBus, pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc, + pUvm->accessCntrBuffer.bar2UvmAccessCntrBufferAddr, GPU_GFID_PF); + pUvm->accessCntrBuffer.bar2UvmAccessCntrBufferAddr = 0; + + return NV_OK; +} + +NV_STATUS +uvmDestroyAccessCntrBuffer_GV100(OBJGPU *pGpu, OBJUVM *pUvm) +{ + if(pUvm == NULL) + { + return NV_WARN_NULL_OBJECT; + } + + // Return if guest RM is with no sriov + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + memdescFree(pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc); + memdescDestroy(pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc); + + pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc = NULL; + pUvm->accessCntrBuffer.accessCntrBufferSize = 0; + + return NV_OK; +} + +NV_STATUS +uvmInitAccessCntrBuffer_GV100(OBJGPU *pGpu, OBJUVM *pUvm) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR *pUvmAccessCntrBufferDesc; + NvP64 pAddr; + NvP64 pPriv; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + NvU32 accessCntrBufferAperture = 0; + NvU32 accessCntrBufferAttr = 0; + NV2080_CTRL_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE_PARAMS getSizeParams = {0}; + + // Return if guest RM is with no sriov + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu))) + { + return NV_OK; + } + + // Issue control to fetch buffer size from physical + status = pUvm->pRmApi->Control(pUvm->pRmApi, + pUvm->hClient, + pUvm->hSubdevice, + NV2080_CTRL_CMD_INTERNAL_UVM_GET_ACCESS_CNTR_BUFFER_SIZE, + &getSizeParams, + sizeof(getSizeParams)); + if (status != NV_OK) + { + return status; + } + + pUvm->accessCntrBuffer.accessCntrBufferSize = getSizeParams.bufferSize; + + accessCntrBufferAperture = ADDR_SYSMEM; + accessCntrBufferAttr = NV_MEMORY_CACHED; + memdescOverrideInstLoc(DRF_VAL(_REG_STR_RM, _INST_LOC_4, _UVM_FAULT_BUFFER_REPLAYABLE, pGpu->instLocOverrides4), + "UVM access counter", &accessCntrBufferAperture, &accessCntrBufferAttr); + + status = memdescCreate(&pUvmAccessCntrBufferDesc, pGpu, pUvm->accessCntrBuffer.accessCntrBufferSize, 0, + NV_FALSE, accessCntrBufferAperture, accessCntrBufferAttr, MEMDESC_FLAGS_LOST_ON_SUSPEND); + if (status != NV_OK) + { + return status; + } + + // + // GPU doesn't read accessCounter notification buffer memory, so if buffer is in sysmem, + // ensure that GpuCacheAttr is set to UNCACHED as having a vol bit set in PTEs will ensure HUB + // uses L2Bypass mode and it will save extra cycles to cache in L2 while MMU will write notification packets. + // + if (accessCntrBufferAperture == ADDR_SYSMEM && + pKernelGmmu->getProperty(pKernelGmmu, PDB_PROP_KGMMU_SYSMEM_FAULT_BUFFER_GPU_UNCACHED)) + { + memdescSetGpuCacheAttrib(pUvmAccessCntrBufferDesc, NV_MEMORY_UNCACHED); + } + + status = memdescAlloc(pUvmAccessCntrBufferDesc); + if (status != NV_OK) + { + memdescDestroy(pUvmAccessCntrBufferDesc); + return status; + } + + memmgrSetMemDescPageSize_HAL(pGpu, pMemoryManager, pUvmAccessCntrBufferDesc, AT_GPU, RM_ATTR_PAGE_SIZE_4KB); + + status = memdescMap(pUvmAccessCntrBufferDesc, 0, + memdescGetSize(pUvmAccessCntrBufferDesc), NV_TRUE, + NV_PROTECT_READ_WRITE, &pAddr, &pPriv); + if (status != NV_OK) + { + memdescFree(pUvmAccessCntrBufferDesc); + memdescDestroy(pUvmAccessCntrBufferDesc); + return status; + } + portMemSet(NvP64_VALUE(pAddr), 0, memdescGetSize(pUvmAccessCntrBufferDesc)); + + memdescUnmap(pUvmAccessCntrBufferDesc, NV_TRUE, osGetCurrentProcess(), pAddr, pPriv); + + pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc = pUvmAccessCntrBufferDesc; + + return status; +} + +NV_STATUS +uvmResetAccessCntrBuffer_GV100(OBJGPU *pGpu, OBJUVM *pUvm, NvU32 counterType) +{ + switch(counterType) + { + case NVC365_CTRL_ACCESS_COUNTER_TYPE_ALL: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_NOTIFY_BUFFER_CLR, _ALL_COUNTERS, _CLR); + break; + case NVC365_CTRL_ACCESS_COUNTER_TYPE_MIMC: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_NOTIFY_BUFFER_CLR, _MIMC, _CLR); + break; + case NVC365_CTRL_ACCESS_COUNTER_TYPE_MOMC: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_NOTIFY_BUFFER_CLR, _MOMC, _CLR); + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + return NV_OK; +} + +NV_STATUS +uvmAccessCntrSetCounterLimit_GV100(OBJGPU *pGpu, OBJUVM *pUvm, NvU32 type, NvU32 limit) +{ + if (type == NVC365_CTRL_ACCESS_COUNTER_MIMC_LIMIT) + { + switch(limit) + { + case NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_NONE: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_CONFIG, _MIMC_USE_LIMIT, _NONE); + break; + case NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_QTR: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_CONFIG, _MIMC_USE_LIMIT, _QTR); + break; + case NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_HALF: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_CONFIG, _MIMC_USE_LIMIT, _HALF); + break; + case NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_FULL: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_CONFIG, _MIMC_USE_LIMIT, _FULL); + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + } + else if (type == NVC365_CTRL_ACCESS_COUNTER_MOMC_LIMIT) + { + switch(limit) + { + case NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_NONE: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_CONFIG, _MOMC_USE_LIMIT, _NONE); + break; + case NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_QTR: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_CONFIG, _MOMC_USE_LIMIT, _QTR); + break; + case NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_HALF: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_CONFIG, _MOMC_USE_LIMIT, _HALF); + break; + case NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_FULL: + GPU_FLD_WR_DRF_DEF(pGpu, _PFB_NISO, _ACCESS_COUNTER_CONFIG, _MOMC_USE_LIMIT, _FULL); + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + } + else + return NV_ERR_INVALID_ARGUMENT; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/gpu/uvm/uvm.c b/src/nvidia/src/kernel/gpu/uvm/uvm.c new file mode 100644 index 000000000..40f6e8b19 --- /dev/null +++ b/src/nvidia/src/kernel/gpu/uvm/uvm.c @@ -0,0 +1,308 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2012-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/uvm/uvm.h" +#include "nvRmReg.h" +#include "rmapi/control.h" +#include "rmapi/rmapi_utils.h" +#include "kernel/gpu/intr/engine_idx.h" +#include "kernel/gpu/mem_mgr/virt_mem_allocator_common.h" + +#include + +/** + * @brief Send the request to set up the buffer to physical RM. + * + * @param pGpu + * @param pUvm + */ +static NV_STATUS +_uvmSetupAccessCntrBuffer(OBJGPU *pGpu, OBJUVM *pUvm) +{ + NvU32 bufferSize; + NvU32 numBufferPages; + NV2080_CTRL_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER_PARAMS params = {0}; + NV_STATUS status = NV_OK; + + // Buffer was not allocated, nothing to do + if (pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc == NULL) + { + return NV_OK; + } + + bufferSize = pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc->Size; + numBufferPages = NV_ROUNDUP(bufferSize, RM_PAGE_SIZE) / RM_PAGE_SIZE; + + if (numBufferPages > NV_ARRAY_ELEMENTS(params.bufferPteArray)) + { + return NV_ERR_BUFFER_TOO_SMALL; + } + + memdescGetPhysAddrs(pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc, + AT_GPU, 0, RM_PAGE_SIZE, + numBufferPages, params.bufferPteArray); + + params.bufferSize = bufferSize; + + status = pUvm->pRmApi->Control(pUvm->pRmApi, + pUvm->hClient, + pUvm->hSubdevice, + NV2080_CTRL_CMD_INTERNAL_UVM_REGISTER_ACCESS_CNTR_BUFFER, + ¶ms, sizeof(params)); + + return status; +} + +/** + * @brief Send the request to unload access counter buffer to physical RM + * + * @param pGpu + * @param pUvm + */ +static NV_STATUS +_uvmUnloadAccessCntrBuffer(OBJGPU *pGpu, OBJUVM *pUvm) +{ + NV_STATUS status; + + // Buffer was not allocated, nothing to do + if (pUvm->accessCntrBuffer.pUvmAccessCntrAllocMemDesc == NULL) + { + return NV_OK; + } + + status = pUvm->pRmApi->Control(pUvm->pRmApi, + pUvm->hClient, + pUvm->hSubdevice, + NV2080_CTRL_CMD_INTERNAL_UVM_UNREGISTER_ACCESS_CNTR_BUFFER, + NULL, 0); + + return status; +} + +/** + * @brief Initialize state for UVM + * + * @param pGpu + * @param pUvm + */ +NV_STATUS +uvmStateInitUnlocked_IMPL(OBJGPU *pGpu, OBJUVM *pUvm) +{ + NV_STATUS status = NV_OK; + + pUvm->pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + status = rmapiutilAllocClientAndDeviceHandles(pUvm->pRmApi, pGpu, &pUvm->hClient, + NULL, &pUvm->hSubdevice); + + return status; +} + +/** + * @brief Destroy GPU gpu state for UVM + * + * @param pGpu + * @param pUvm + */ +void +uvmStateDestroy_IMPL(OBJGPU *pGpu, OBJUVM *pUvm) +{ + rmapiutilFreeClientAndDeviceHandles(pUvm->pRmApi, &pUvm->hClient, NULL, &pUvm->hSubdevice); +} + +/** + * @brief Setup UVM access counters. Setup includes memory allocation, mapping + * and writing mapped address to HW registers. + * + * @param pGpu + * @param pUvm + */ +NV_STATUS +uvmInitializeAccessCntrBuffer_IMPL(OBJGPU *pGpu, OBJUVM *pUvm) +{ + NV_STATUS status; + + status = uvmInitAccessCntrBuffer_HAL(pGpu, pUvm); + if (status != NV_OK) + { + return status; + } + + status = _uvmSetupAccessCntrBuffer(pGpu, pUvm); + if (status != NV_OK) + { + (void) uvmDestroyAccessCntrBuffer_HAL(pGpu, pUvm); + } + + return status; +} + +/** + * @brief Destroy UVM access counters i.e. reciprocate above setup function. + * + * @param pGpu + * @param pUvm + */ +NV_STATUS +uvmTerminateAccessCntrBuffer_IMPL(OBJGPU *pGpu, OBJUVM *pUvm) +{ + NV_STATUS status; + + status = _uvmUnloadAccessCntrBuffer(pGpu, pUvm); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unloading UVM Access counters failed (status=0x%08x), proceeding...\n", + status); + } + + status = uvmDestroyAccessCntrBuffer_HAL(pGpu, pUvm); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Freeing UVM Access counters failed (status=0x%08x), proceeding...\n", + status); + } + + return NV_OK; +} + +/** + * @brief Re-create access counter memory descriptor and setup the buffer. + * + * @param pGpu + * @param pUvm + */ +NV_STATUS +uvmAccessCntrBufferRegister_IMPL(OBJGPU *pGpu, OBJUVM *pUvm, + NvU32 bufferSize, NvU64 *pBufferPages) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR *pMemDesc; + NvU32 addrSpace = ADDR_SYSMEM; + NvU32 attr = NV_MEMORY_CACHED; + + memdescOverrideInstLoc(DRF_VAL(_REG_STR_RM, _INST_LOC_4, _UVM_FAULT_BUFFER_REPLAYABLE, pGpu->instLocOverrides4), + "UVM access counter", &addrSpace, &attr); + + status = memdescCreate(&pMemDesc, pGpu, + bufferSize, 0, NV_FALSE, + addrSpace, attr, + (MEMDESC_FLAGS_GUEST_ALLOCATED | + MEMDESC_FLAGS_EXT_PAGE_ARRAY_MEM | + MEMDESC_FLAGS_LOST_ON_SUSPEND)); + if (status != NV_OK) + { + return status; + } + + memdescFillPages(pMemDesc, 0, pBufferPages, + NV_ROUNDUP(bufferSize, RM_PAGE_SIZE) / RM_PAGE_SIZE, + RM_PAGE_SIZE); + + pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc = pMemDesc; + + status = uvmSetupAccessCntrBuffer_HAL(pGpu, pUvm); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Setup of UVM Access counters failed (status=0x%08x)\n", + status); + + memdescDestroy(pMemDesc); + + pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc = NULL; + } + + return status; +} + +/** + * @brief Disable access counter buffer and destroy the buffer + * descriptor. + * + * @param pGpu + * @param pUvm + */ +NV_STATUS +uvmAccessCntrBufferUnregister_IMPL(OBJGPU *pGpu, OBJUVM *pUvm) +{ + NV_STATUS status; + + status = uvmUnloadAccessCntrBuffer_HAL(pGpu, pUvm); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Unloading UVM Access counters failed (status=0x%08x), proceeding...\n", + status); + } + + memdescDestroy(pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc); + + pUvm->accessCntrBuffer.pUvmAccessCntrMemDesc = NULL; + + return NV_OK; +} + +/** + * @brief Provides an opportunity to register some IntrService during intrStateInit. + */ +void +uvmRegisterIntrService_IMPL +( + OBJGPU *pGpu, + OBJUVM *pUvm, + IntrServiceRecord pRecords[MC_ENGINE_IDX_MAX] +) +{ + NvU32 engineIdx = MC_ENGINE_IDX_ACCESS_CNTR; + NV_ASSERT(pRecords[engineIdx].pInterruptService == NULL); + pRecords[engineIdx].pInterruptService = staticCast(pUvm, IntrService); +} + +/** + * @brief Service stall interrupts. + * + * @returns Zero, or any implementation-chosen nonzero value. If the same nonzero value is returned enough + * times the interrupt is considered stuck. + */ +NvU32 +uvmServiceInterrupt_IMPL +( + OBJGPU *pGpu, + OBJUVM *pUvm, + IntrServiceServiceInterruptArguments *pParams +) +{ + NV_ASSERT_OR_RETURN(pParams != NULL, 0); + NV_ASSERT_OR_RETURN(pParams->engineIdx == MC_ENGINE_IDX_ACCESS_CNTR, 0); + + NV_ASSERT_OK(pUvm->pRmApi->Control(pUvm->pRmApi, + pUvm->hClient, + pUvm->hSubdevice, + NV2080_CTRL_CMD_INTERNAL_UVM_SERVICE_ACCESS_CNTR_BUFFER, + NULL, 0)); + + return 0; +} diff --git a/src/nvidia/src/kernel/gpu_mgr/gpu_db.c b/src/nvidia/src/kernel/gpu_mgr/gpu_db.c new file mode 100644 index 000000000..238b9c7c2 --- /dev/null +++ b/src/nvidia/src/kernel/gpu_mgr/gpu_db.c @@ -0,0 +1,370 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************************************************************************** + * + * Description: + * This file contains the functions managing the gpu database + * + ***************************************************************************/ + +#include "gpu_mgr/gpu_db.h" +#include "core/system.h" + +#include "gpu/gpu.h" // for NBADDR + +NV_STATUS +gpudbConstruct_IMPL +( + GpuDb *pGpuDb +) +{ + listInit(&pGpuDb->gpuList, portMemAllocatorGetGlobalNonPaged()); + + pGpuDb->pLock = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (pGpuDb->pLock == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Gpu data base list lock init failed\n"); + listDestroy(&pGpuDb->gpuList); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + return NV_OK; +} + +void +gpudbDestruct_IMPL +( + GpuDb *pGpuDb +) +{ + if (pGpuDb->pLock != NULL) + { + portSyncMutexDestroy(pGpuDb->pLock); + } + + listDestroy(&pGpuDb->gpuList); +} + +static PGPU_INFO_LIST_NODE +_gpudbFindGpuInfoByUuid +( + const NvU8 *pUuid +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode = NULL; + + for (pNode = listHead(&pGpuDb->gpuList); + pNode != NULL; + pNode = listNext(&pGpuDb->gpuList, pNode)) + { + if (portMemCmp(pNode->uuid, pUuid, RM_SHA1_GID_SIZE) == 0) + { + break; + } + } + + return pNode; +} + +NV_STATUS +gpudbRegisterGpu(const NvU8 *pUuid, const NBADDR *pUpstreamPortPciInfo, NvU64 pciInfo) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_OK; + NvU32 i = 0; + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode != NULL) + { + pNode->bShutdownState = NV_FALSE; + goto done; + } + + pNode = listAppendNew(&pGpuDb->gpuList); + if (pNode == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Append the list failed\n"); + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + portMemCopy(pNode->uuid, RM_SHA1_GID_SIZE, pUuid, RM_SHA1_GID_SIZE); + + pNode->pciPortInfo.domain = gpuDecodeDomain(pciInfo); + pNode->pciPortInfo.bus = gpuDecodeBus(pciInfo); + pNode->pciPortInfo.device = gpuDecodeDevice(pciInfo); + pNode->pciPortInfo.function = 0; + pNode->pciPortInfo.bValid = NV_TRUE; + + pNode->upstreamPciPortInfo.domain = pUpstreamPortPciInfo->domain; + pNode->upstreamPciPortInfo.bus = pUpstreamPortPciInfo->bus; + pNode->upstreamPciPortInfo.device = pUpstreamPortPciInfo->device; + pNode->upstreamPciPortInfo.function = pUpstreamPortPciInfo->func; + pNode->upstreamPciPortInfo.bValid = pUpstreamPortPciInfo->valid; + + pNode->bShutdownState = NV_FALSE; + + // Initialize all compute polcies with default values + pNode->policyInfo.timeslice = NV2080_CTRL_CMD_GPU_COMPUTE_TIMESLICE_DEFAULT; + + // Initialize all choesnIdx to _INVALID + for (i = 0; i < GPUDB_CLK_PROP_TOP_POLS_COUNT; ++i) + { + ct_assert(sizeof(pNode->clkPropTopPolsControl.chosenIdx[0]) == sizeof(NvU8)); + pNode->clkPropTopPolsControl.chosenIdx[i] = NV_U8_MAX; + } + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Update/Set the compute policy config for a GPU +* +* @param[in] uuid GPU uuid +* @param[in] policyType Policy for which config has to be set +* @param[in] policyInfo Requested policy config +* +* @return NV_OK Config updated successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbSetGpuComputePolicyConfig +( + const NvU8 *pUuid, + NvU32 policyType, + GPU_COMPUTE_POLICY_INFO *policyInfo +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || policyInfo == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + // Store the policy specific data + switch(policyType) + { + case NV2080_CTRL_GPU_COMPUTE_POLICY_TIMESLICE: + pNode->policyInfo.timeslice = policyInfo->timeslice; + status = NV_OK; + break; + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + + } + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Get all compute policy configs for a GPU +* +* @param[in] uuid GPU uuid +* @param[in] policyInfo Pointer in which to retrieve all compute policies +* for the requested GPU +* +* @return NV_OK Configs retrieved successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbGetGpuComputePolicyConfigs +( + const NvU8 *pUuid, + GPU_COMPUTE_POLICY_INFO *policyInfo +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || policyInfo == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + // Return the policy specific data + portMemCopy(policyInfo, sizeof(GPU_COMPUTE_POLICY_INFO), + &pNode->policyInfo, sizeof(GPU_COMPUTE_POLICY_INFO)); + status = NV_OK; + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Set clock policies control for a GPU +* +* @param[in] pUuid Pointer to GPU uuid +* @param[in] pControl Pointer to the control tuple +* +* @return NV_OK Configs retrieved successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbSetClockPoliciesControl +( + const NvU8 *pUuid, + GPU_CLK_PROP_TOP_POLS_CONTROL *pControl +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || pControl == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + portMemCopy(&pNode->clkPropTopPolsControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL), + pControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL)); + + status = NV_OK; +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +/*! +* @brief Get clock policies control for a GPU +* +* @param[in] pUuid Pointer to GPU uuid +* @param[out] pControl Pointer to the control tuple +* +* @return NV_OK Configs retrieved successfully +* @return NV_ERR_INVALID_ARGUMENT Invalid argument specified +* @return NV_ERR_OBJECT_NOT_FOUND GPU entry in db not found +*/ +NV_STATUS +gpudbGetClockPoliciesControl +( + const NvU8 *pUuid, + GPU_CLK_PROP_TOP_POLS_CONTROL *pControl +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (pUuid == NULL || pControl == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + portSyncMutexAcquire(pGpuDb->pLock); + + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + portMemCopy(pControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL), + &pNode->clkPropTopPolsControl, + sizeof(GPU_CLK_PROP_TOP_POLS_CONTROL)); + + status = NV_OK; +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} + +NV_STATUS +gpudbSetShutdownState +( + const NvU8 *pUuid +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuDb *pGpuDb = SYS_GET_GPUDB(pSys); + GPU_INFO_LIST_NODE *pNode; + NV_STATUS status = NV_OK; + + portSyncMutexAcquire(pGpuDb->pLock); + pNode = _gpudbFindGpuInfoByUuid(pUuid); + if (pNode == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + goto done; + } + + pNode->bShutdownState = NV_TRUE; + +done: + portSyncMutexRelease(pGpuDb->pLock); + return status; +} diff --git a/src/nvidia/src/kernel/gpu_mgr/gpu_group.c b/src/nvidia/src/kernel/gpu_mgr/gpu_group.c new file mode 100644 index 000000000..7f1ae1e7f --- /dev/null +++ b/src/nvidia/src/kernel/gpu_mgr/gpu_group.c @@ -0,0 +1,329 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* GpuGrp Object Function Definitions. * +* * +\***************************************************************************/ + +#include "gpu_mgr/gpu_group.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" +#include "core/system.h" +#include "class/cl2080.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "mem_mgr/vaspace.h" +#include "class/cl90f1.h" // FERMI_VASPACE_A +#include "nvlimits.h" + +/*! + * Creates the gpugrp object. + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] gpuMask Mask of GPUs corresponding to this gpugrp + * + * @returns NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpugrpCreate_IMPL +( + OBJGPUGRP *pGpuGrp, + NvU32 gpuMask +) +{ + pGpuGrp->gpuMask = gpuMask; + // + // Add the gpugrp instance to the GPU objects in the mask + // At boot this call fails and is deferred to GPU post construct. + // When coming out of SLI this call is succeeding - and postconstruct + // is not called. + // + gpumgrAddDeviceInstanceToGpus(gpuMask); + return NV_OK; +} + +/*! + * Destroys gpugrp object. + * + * It first iterates over the GPUs that belong to this gpugrp + * object indicated by the gpuMask. + * Following this it destroys the object. + * + * @param[in] pGpuGrp gpugrp object pointer + */ +NV_STATUS +gpugrpDestroy_IMPL +( + OBJGPUGRP *pGpuGrp +) +{ + NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND; + OBJGPU *pGpu = NULL; + NvU32 gpuIndex = 0; + + // Add the gpugrp instance to the GPU objects in the mask + while ((pGpu = gpumgrGetNextGpu(pGpuGrp->gpuMask, &gpuIndex))) + { + rmStatus = NV_OK; + pGpu->deviceInstance = NV_MAX_DEVICES; + } + + // Call the utility routine that does the object deletion. + objDelete(pGpuGrp); + return rmStatus; +} + +/*! + * Gets the gpu mask for the gpugrp. + * + * @param[in] pGpuGrp gpugrp object pointer + * + * @returns NvU32 gpumask + */ +NvU32 +gpugrpGetGpuMask_IMPL(OBJGPUGRP *pGpuGrp) +{ + return pGpuGrp->gpuMask; +} + +/*! + * Sets the gpu mask for the gpugrp. + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] gpuMask gpumask to set + * + */ +void +gpugrpSetGpuMask_IMPL(OBJGPUGRP *pGpuGrp, NvU32 gpuMask) +{ + pGpuGrp->gpuMask = gpuMask; +} +/*! + * Gets the broadcast enabled state + * + * @param[in] pGpuGrp gpugrp object pointer + * + * @returns NvBool + */ +NvBool +gpugrpGetBcEnabledState_IMPL(OBJGPUGRP *pGpuGrp) +{ + return pGpuGrp->bcEnabled; +} + +/*! + * Sets the broadcast enable state + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] bcState Broadcast enable state + * + */ +void +gpugrpSetBcEnabledState_IMPL(OBJGPUGRP *pGpuGrp, NvBool bcState) +{ + pGpuGrp->bcEnabled = bcState; +} + +/*! + * Sets the parent GPU for the gpugrp + * + * @param[in] pGpuGrp gpugrp object pointer + * @param[in] pGpu Parent GPU object pointer + * + */ +void +gpugrpSetParentGpu_IMPL +( + OBJGPUGRP *pGpuGrp, + OBJGPU *pParentGpu +) +{ + pGpuGrp->parentGpu = pParentGpu; +} + +/*! + * Gets the parent GPU for the gpugrp + * + * @param[in] pGpuGrp gpugrp object pointer + * + * @returns GPU pointer + */ +POBJGPU +gpugrpGetParentGpu_IMPL(OBJGPUGRP *pGpuGrp) +{ + return pGpuGrp->parentGpu; +} + + +/*! + * @brief gpugrpCreateVASpace - creates the GLobal VASpace for this gpugrp. + * + * This is created once per group. So for GPUs in SLI, there is only + * one of this created. + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[in] pGpu Parent GPU object pointer + * @param[in] vaspaceClass VASPACE class to create + * @param[in] vaStart vaspace start + * @param[in] vaLimit vaspace limit + * @param[in] vaspaceFlags VASPACE flags for creation + * @param[out] ppGlobalVASpace Global vaspace that is created + * + * @return NV_OK on success or appropriate RM_ERR on failure + * + */ +NV_STATUS +gpugrpCreateGlobalVASpace_IMPL +( + OBJGPUGRP *pGpuGrp, + OBJGPU *pGpu, + NvU32 vaspaceClass, + NvU64 vaStart, + NvU64 vaLimit, + NvU32 vaspaceFlags, + OBJVASPACE **ppGlobalVASpace +) +{ + NV_STATUS rmStatus; + NvU32 gpuMask = pGpuGrp->gpuMask; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + NvBool bcState = gpumgrGetBcEnabledStatus(pGpu); + + NV_ASSERT_OR_RETURN(ppGlobalVASpace != NULL, NV_ERR_INVALID_ARGUMENT); + *ppGlobalVASpace = NULL; + + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + vaspaceFlags |= VASPACE_FLAGS_ENABLE_VMM; + rmStatus = vmmCreateVaspace(pVmm, vaspaceClass, 0x0, gpuMask, vaStart, + vaLimit, 0, 0, NULL, vaspaceFlags, ppGlobalVASpace); + gpumgrSetBcEnabledStatus(pGpu, bcState); + if (NV_OK != rmStatus) + { + *ppGlobalVASpace = NULL; + return rmStatus; + } + pGpuGrp->pGlobalVASpace = (*ppGlobalVASpace); + + return rmStatus; +} + +/*! + * @brief gpugrpDestroyVASpace - Destroys the gpugrp global vaspace + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[in] pGpu Parent GPU object pointer + * + * @return NV_OK on success or appropriate RM_ERR on failure + * + */ +NV_STATUS +gpugrpDestroyGlobalVASpace_IMPL(OBJGPUGRP *pGpuGrp, OBJGPU *pGpu) +{ + NV_STATUS rmStatus = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + NvBool bcState = gpumgrGetBcEnabledStatus(pGpu); + + // Nothing to destroy, bail out early + if (pGpuGrp->pGlobalVASpace == NULL) + return rmStatus; + + gpumgrSetBcEnabledStatus(pGpu, NV_TRUE); + vmmDestroyVaspace(pVmm, pGpuGrp->pGlobalVASpace); + gpumgrSetBcEnabledStatus(pGpu, bcState); + pGpuGrp->pGlobalVASpace = NULL; + return rmStatus; +} + + +/*! + * @brief gpugrpGetVASpace - retrieves the group global vaspace + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[out] ppGlobalVASpace Global vaspace for this GPUGRP + * + * @return NV_OK on success + * NV_ERR_INVALID_ARGUMENT on NULL pointer parameter + * NV_ERR_OBJECT_NOT_FOUND if there is no device vaspace + */ +NV_STATUS +gpugrpGetGlobalVASpace_IMPL(OBJGPUGRP *pGpuGrp, OBJVASPACE **ppVASpace) +{ + NV_ASSERT_OR_RETURN(ppVASpace != NULL, NV_ERR_INVALID_ARGUMENT); + + if (pGpuGrp->pGlobalVASpace == NULL) + { + *ppVASpace = NULL; + return NV_ERR_OBJECT_NOT_FOUND; + } + *ppVASpace = pGpuGrp->pGlobalVASpace; + return NV_OK; +} + + +/*! + * @brief gpugrpGetGpuFromSubDeviceInstance - retrieves the pGpu associated to + * a GPU group and a subdevice instance. + * + * @param[in] pGpuGrp GPUGRP object pointer + * @param[in] subDeviceInst GPU sundevice Instance + * @param[out] ppGpu POBJGPU* pointer + * + * @return NV_OK on success + * NV_ERR_INVALID_ARGUMENT on NULL pointer parameter + * NV_ERR_OBJECT_NOT_FOUND if there is no GPU for the input parameters + */ +NV_STATUS +gpugrpGetGpuFromSubDeviceInstance_IMPL(OBJGPUGRP *pGpuGrp, NvU32 subDeviceInst, OBJGPU **ppGpu) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuInst = 0; + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NV_ERR_INVALID_ARGUMENT); + + *ppGpu = NULL; + + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + // check for single GPU case + if (gpumgrGetSubDeviceCount(gpuMask) == 1) + { + *ppGpu = gpumgrGetNextGpu(gpuMask, &gpuInst); + } + else + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInst)) != NULL) + { + if (gpumgrGetSubDeviceInstanceFromGpu(pGpu) == subDeviceInst) + { + *ppGpu = pGpu; + break; + } + } + } + return (*ppGpu == NULL ? NV_ERR_OBJECT_NOT_FOUND : NV_OK); +} + diff --git a/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c b/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c new file mode 100644 index 000000000..354581fea --- /dev/null +++ b/src/nvidia/src/kernel/gpu_mgr/gpu_mgmt_api.c @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * File: gpu_mgmt_api.c + * + * Description: + * This file contains the functions managing the GPU information + * encapsulated by GPUDB object or probed state GPU. + * + *****************************************************************************/ + +#include "core/core.h" +#include "gpu_mgr/gpu_mgmt_api.h" +#include "gpu_mgr/gpu_db.h" + +NV_STATUS +gpumgmtapiConstruct_IMPL +( + GpuManagementApi *pGpuMgmt, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +gpumgmtapiDestruct_IMPL +( + GpuManagementApi *pGpuMgmt +) +{ +} + +NV_STATUS +gpumgmtapiCtrlCmdSetShutdownState_IMPL +( + GpuManagementApi *pGpuMgmt, + NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams +) +{ + return gpudbSetShutdownState(pParams->uuid); +} diff --git a/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c b/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c new file mode 100644 index 000000000..780b800fc --- /dev/null +++ b/src/nvidia/src/kernel/gpu_mgr/gpu_mgr.c @@ -0,0 +1,3138 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Routines ***************************\ +* GPU Manager * +\***************************************************************************/ + + + +#include "core/system.h" +#include "core/locks.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" +#include "tls/tls.h" +#include "nvrm_registry.h" +#include "nv_ref.h" +#include "nvlimits.h" + +#include "kernel/gpu/intr/intr.h" +#include "platform/platform.h" +#include "platform/chipset/chipset.h" +#include "published/pcie_switch/pcie_switch_ref.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "gpu/mem_sys/kern_mem_sys.h" + +// local static funcs +static void gpumgrSetAttachInfo(OBJGPU *, GPUATTACHARG *); +static void gpumgrGetGpuHalFactor(NvU32 *pChipId0, NvU32 *pChipId1, RM_RUNTIME_VARIANT *pRmVariant, GPUATTACHARG *pAttachArg); + +static void +_gpumgrUnregisterRmCapsForGpuUnderLock(NvU64 gpuDomainBusDevice) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + // SMC partition caps must be destroyed before GPU caps. + gpumgrUnregisterRmCapsForMIGGI(gpuDomainBusDevice); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuDomainBusDevice == gpuDomainBusDevice && + pProbedGpu->gpuId != NV0000_CTRL_GPU_INVALID_ID) + { + osRmCapUnregister(&pProbedGpu->pOsRmCaps); + break; + } + } +} + +static void +_gpumgrUnregisterRmCapsForGpu(NvU64 gpuDomainBusDevice) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + _gpumgrUnregisterRmCapsForGpuUnderLock(gpuDomainBusDevice); + portSyncMutexRelease(pGpuMgr->probedGpusLock); +} + +static NV_STATUS +_gpumgrRegisterRmCapsForGpu(OBJGPU *pGpu) +{ + NV_STATUS status = NV_ERR_INVALID_STATE; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuDomainBusDevice == gpuGetDBDF(pGpu) && + pProbedGpu->gpuId != NV0000_CTRL_GPU_INVALID_ID) + { + if (pProbedGpu->pOsRmCaps == NULL) + { + status = osRmCapRegisterGpu(pGpu->pOsGpuInfo, + &pProbedGpu->pOsRmCaps); + } + else + { + status = NV_OK; + } + + pGpu->pOsRmCaps = pProbedGpu->pOsRmCaps; + break; + } + } + + NV_ASSERT(status == NV_OK); + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + return status; +} + +// +// ODB functions +// +NV_STATUS +gpumgrConstruct_IMPL(OBJGPUMGR *pGpuMgr) +{ + NvU32 i; + + NV_PRINTF(LEVEL_INFO, "gpumgrConstruct\n"); + + pGpuMgr->numGpuHandles = 0; + + for (i = 0; i < NV_MAX_DEVICES; i++) + pGpuMgr->gpuHandleIDList[i].gpuInstance = NV_MAX_DEVICES; + + pGpuMgr->probedGpusLock = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + + if (pGpuMgr->probedGpusLock == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + portMemSet(&pGpuMgr->probedGpus[i], 0, sizeof(PROBEDGPU)); + pGpuMgr->probedGpus[i].gpuId = NV0000_CTRL_GPU_INVALID_ID; + } + + pGpuMgr->gpuAttachCount = 0; + pGpuMgr->gpuAttachMask = 0; + + pGpuMgr->deviceCount = 0; + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->pGpuGrpTable); i++) + { + pGpuMgr->pGpuGrpTable[i] = NULL; + } + + pGpuMgr->powerDisconnectedGpuCount = 0; + + return NV_OK; +} + + +void +gpumgrDestruct_IMPL(OBJGPUMGR *pGpuMgr) +{ + NV_PRINTF(LEVEL_INFO, "gpumgrDestruct\n"); + + portSyncMutexDestroy(pGpuMgr->probedGpusLock); +} + +// +// gpumgrAllocGpuInstance +// +// This interface returns the next available gpu number. +// +NV_STATUS +gpumgrAllocGpuInstance(NvU32 *pGpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + NvU32 i; + NvU64 availableIDs = ((1ULL << NV_MAX_DEVICES) - 1); + + if (pGpuMgr->numGpuHandles == 0) + { + *pGpuInstance = 0; + return NV_OK; + } + else if (pGpuMgr->numGpuHandles == NV_MAX_DEVICES) + { + *pGpuInstance = NV_MAX_DEVICES; + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + for (i = 0; i < pGpuMgr->numGpuHandles; i++) + availableIDs &= ~NVBIT(pGpuMgr->gpuHandleIDList[i].gpuInstance); + + for (i = 0; ((availableIDs & (1ULL << i)) == 0); i++) + ; + + *pGpuInstance = i; + + return NV_OK; +} + +// +// During destruction of a GPU the handle list needs to be modified. +// Since we cannot guarantee the _last_ GPU will always be the one +// destroyed we have to compact the handle list so we have no gaps +// and can simply decrement numGpuHandles. +// +static void +_gpumgrShiftDownGpuHandles(NvU32 startIndex) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i, lastMovedIndex = startIndex; + + for (i = startIndex; i < (NV_MAX_DEVICES - 1); i++) + { + if (pGpuMgr->gpuHandleIDList[i + 1].pGpu != 0) + { + lastMovedIndex = i + 1; + pGpuMgr->gpuHandleIDList[i].gpuInstance = + pGpuMgr->gpuHandleIDList[i + 1].gpuInstance; + + pGpuMgr->gpuHandleIDList[i].pGpu = + pGpuMgr->gpuHandleIDList[i + 1].pGpu; + } + } + + pGpuMgr->gpuHandleIDList[lastMovedIndex].gpuInstance = NV_MAX_DEVICES; + pGpuMgr->gpuHandleIDList[lastMovedIndex].pGpu = reinterpretCast(NULL, OBJGPU *); + pGpuMgr->numGpuHandles--; +} + +static void +_gpumgrDestroyGpu(NvU32 gpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu; + NvU32 i; + + osSyncWithGpuDestroy(NV_TRUE); + + pGpu = gpumgrGetGpu(gpuInstance); + + if (pGpu != NULL) + { + objDelete(pGpu); + } + + for (i = 0; i < pGpuMgr->numGpuHandles; i++) + { + if (pGpuMgr->gpuHandleIDList[i].gpuInstance == gpuInstance) + { + pGpuMgr->gpuHandleIDList[i].gpuInstance = NV_MAX_DEVICES; + pGpuMgr->gpuHandleIDList[i].pGpu = reinterpretCast(NULL, OBJGPU *); + _gpumgrShiftDownGpuHandles(i); + break; + } + } + + // Destroy pointers of blob data if it is created + OBJPFM *pPfm = SYS_GET_PFM(pSys); + pfmBlobDataDestroy(pPfm); + + osSyncWithGpuDestroy(NV_FALSE); +} + +POBJGPU +gpumgrGetGpu(NvU32 gpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + for (i = 0; i < pGpuMgr->numGpuHandles; i++) + if (pGpuMgr->gpuHandleIDList[i].gpuInstance == gpuInstance) + if (pGpuMgr->gpuHandleIDList[i].pGpu) + return pGpuMgr->gpuHandleIDList[i].pGpu; + + return NULL; +} + +POBJGPU +gpumgrGetSomeGpu(void) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuMask = 0; + NvU32 gpuIndex = 0; + NvU32 gpuCount = 0; + + // Get some gpu to get the SLI Display Parent + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + + if (pGpu == NULL) + { + // None of the GPUs are initialized - Too early + NV_PRINTF(LEVEL_ERROR, + "Failed to retrieve pGpu - Too early call!.\n"); + NV_ASSERT(NV_FALSE); + return pGpu; + } + return pGpu; +} + + +// +// gpumgrAllocDeviceInstance +// +// This interface returns the next available broadcast device number. +// This broadcast device number is used to uniquely identify this set +// of gpu(s) both internally in the RM (e.g. OBJGPUGRP handle) as well +// as via the architecture (e.g., for the 'deviceId' parameter of +// NV0080_ALLOC_PARAMETERS). +// +NV_STATUS +gpumgrAllocDeviceInstance(NvU32 *pDeviceInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->pGpuGrpTable); i++) + if (pGpuMgr->pGpuGrpTable[i] == NULL) + break; + + if (i == NV_MAX_DEVICES) + { + *pDeviceInstance = NV_MAX_DEVICES; + return NV_ERR_GENERIC; + } + + *pDeviceInstance = i; + + return NV_OK; +} + +// +// gpumgrGetGpuAttachInfo +// +// Returns current gpu attach info. +// +NV_STATUS +gpumgrGetGpuAttachInfo(NvU32 *pGpuCnt, NvU32 *pGpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + // caller can pass in NULL for outparams that it doesn't need. + if (pGpuCnt) *pGpuCnt = pGpuMgr->gpuAttachCount; + if (pGpuMask) *pGpuMask = pGpuMgr->gpuAttachMask; + + return NV_OK; +} + +NvU32 +gpumgrGetDeviceGpuMask(NvU32 deviceInstance) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromInstance(deviceInstance); + + if (pGpuGrp == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "Could not find GPU Group for deviceInstance 0x%x!\n", + deviceInstance); + return 0; + } + + return gpugrpGetGpuMask(pGpuGrp); +} + +NV_STATUS +gpumgrIsDeviceInstanceValid(NvU32 deviceInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPUGRP *pGpuGrp = NULL; + + if (deviceInstance >= NV_MAX_DEVICES) + return NV_ERR_INVALID_ARGUMENT; + + pGpuGrp = pGpuMgr->pGpuGrpTable[deviceInstance]; + if (NULL == pGpuGrp) + return NV_ERR_INVALID_DATA; + + if (0 == gpugrpGetGpuMask(pGpuGrp)) + return NV_ERR_INVALID_ARGUMENT; + + return NV_OK; +} + +NvBool +gpumgrIsSubDeviceInstanceValid(NvU32 subDeviceInstance) +{ + if (subDeviceInstance >= NV2080_MAX_SUBDEVICES) + return NV_FALSE; + + return NV_TRUE; +} + +NvU32 gpumgrGetPrimaryForDevice(NvU32 deviceInstance) +{ + NvU32 gpuMask, gpuInstance = 0; + OBJGPU *pGpu = NULL; + + gpuMask = gpumgrGetDeviceGpuMask(deviceInstance); + + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + if (pGpu != NULL) + { + return pGpu->gpuInstance; + } + } + else + { + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (gpumgrIsParentGPU(pGpu)) + { + return pGpu->gpuInstance; + } + } + } + + NV_PRINTF(LEVEL_ERROR, + "deviceInstance 0x%x does not exist!\n", deviceInstance); + + return 0; // this should not happen, never +} + +NvBool +gpumgrIsDeviceEnabled(NvU32 deviceInstance) +{ + NvU32 gpuMask, gpuInstance = 0; + NvBool bEnabled; + + gpuMask = gpumgrGetDeviceGpuMask(deviceInstance); + + if (gpuMask == 0) + { + return NV_FALSE; + } + /* + * Check if this device + * - has been disabled via Power-SLI + * - is in the "drain" state + */ + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + OBJGPU *pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + + if (pGpu == NULL) + return NV_FALSE; + + if ((gpumgrQueryGpuDrainState(pGpu->gpuId, &bEnabled, NULL) == NV_OK) + && bEnabled) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +// +// gpumgrRegisterGpuId +// +// This interface is used by os-dependent code to insert a probed +// gpu into the table of probed gpus known to the RM. +// +NV_STATUS +gpumgrRegisterGpuId(NvU32 gpuId, NvU64 gpuDomainBusDevice) +{ + NV_STATUS status = NV_ERR_INSUFFICIENT_RESOURCES; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + NV_PRINTF(LEVEL_ERROR, + "GPU id 0x%x already registered at index %u\n", + gpuId, i); + + // Duplicate gpu + status = NV_ERR_IN_USE; + goto done; + } + + if (pGpuMgr->probedGpus[i].gpuId == NV0000_CTRL_GPU_INVALID_ID) + { + pGpuMgr->probedGpus[i].gpuId = gpuId; + pGpuMgr->probedGpus[i].gpuDomainBusDevice = gpuDomainBusDevice; + pGpuMgr->probedGpus[i].bInitAttempted = NV_FALSE; + pGpuMgr->probedGpus[i].bExcluded = NV_FALSE; + pGpuMgr->probedGpus[i].bUuidValid = NV_FALSE; + pGpuMgr->probedGpus[i].pOsRmCaps = NULL; + status = NV_OK; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return status; +} + +// +// gpumgrUnregisterGpuId +// +// This interface is used by os-dependent code to remove a gpu +// from the table of probed gpus known to the RM. +// +NV_STATUS +gpumgrUnregisterGpuId(NvU32 gpuId) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + _gpumgrUnregisterRmCapsForGpuUnderLock(pProbedGpu->gpuDomainBusDevice); + pProbedGpu->gpuId = NV0000_CTRL_GPU_INVALID_ID; + pProbedGpu->bDrainState = NV_FALSE; + pProbedGpu->bRemoveIdle = NV_FALSE; + pProbedGpu->bExcluded = NV_FALSE; + pProbedGpu->bUuidValid = NV_FALSE; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return NV_OK; +} + +// +// gpumgrExcludeGpuId +// +// This interface is used by os-dependent code to 'exclude' a gpu. +// +// gpuId: the device to exclude +// +NV_STATUS +gpumgrExcludeGpuId(NvU32 gpuId) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + pProbedGpu->bExcluded = NV_TRUE; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return NV_OK; +} + +// +// gpumgrSetUuid +// +// This interface is used by os-dependent code to pass the UUID for a gpu. +// The UUID is a 16-byte raw UUID/GID. +// +NV_STATUS +gpumgrSetUuid(NvU32 gpuId, NvU8 *uuid) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + if (uuid == NULL) + return NV_ERR_INVALID_DATA; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + portMemCopy(pProbedGpu->uuid, RM_SHA1_GID_SIZE, uuid, RM_SHA1_GID_SIZE); + pProbedGpu->bUuidValid = NV_TRUE; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return NV_OK; +} + +// +// gpumgrGetCachedUuid +// +// Lookup the cached UUID for a GPU +// +static NV_STATUS +gpumgrGetCachedUuid(NvU32 gpuId, NvU8 *uuid, unsigned int len) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + + if (uuid == NULL || len < RM_SHA1_GID_SIZE) + return NV_ERR_INVALID_ARGUMENT; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + PROBEDGPU *pProbedGpu = &pGpuMgr->probedGpus[i]; + + if (pProbedGpu->gpuId == gpuId) + { + if (pProbedGpu->bUuidValid) + { + portMemCopy(uuid, RM_SHA1_GID_SIZE, pProbedGpu->uuid, RM_SHA1_GID_SIZE); + status = NV_OK; + } + else + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + } + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return status; +} + +NV_STATUS +gpumgrGetGpuUuidInfo(NvU32 gpuId, NvU8 **ppUuidStr, NvU32 *pUuidStrLen, NvU32 uuidFlags) +{ + NvU8 *pUuid; + NV_STATUS status; + + if (ppUuidStr == NULL || pUuidStrLen == NULL) + return NV_ERR_INVALID_DATA; + + // gpumgr only supports SHA1 format; error out if requesting otherwise + if (!FLD_TEST_DRF(0000_CTRL_CMD, _GPU_GET_UUID_FROM_GPU_ID_FLAGS, _TYPE, _SHA1, uuidFlags)) + return NV_ERR_INVALID_ARGUMENT; + + pUuid = portMemAllocNonPaged(RM_SHA1_GID_SIZE); + if (pUuid == NULL) + return NV_ERR_NO_MEMORY; + + status = gpumgrGetCachedUuid(gpuId, pUuid, RM_SHA1_GID_SIZE); + if (status != NV_OK) + { + portMemFree(pUuid); + return status; + } + + if (FLD_TEST_DRF(0000_CTRL_CMD, _GPU_GET_UUID_FROM_GPU_ID_FLAGS, _FORMAT, _BINARY, uuidFlags)) + { + // Binary case - pUuid is freed by the caller + *ppUuidStr = pUuid; + *pUuidStrLen = RM_SHA1_GID_SIZE; + } + else + { + // Conversion to ASCII or UNICODE + status = transformGidToUserFriendlyString(pUuid, RM_SHA1_GID_SIZE, + ppUuidStr, pUuidStrLen, uuidFlags); + portMemFree(pUuid); + } + + return status; +} + +static void +gpumgrGetGpuHalFactorOfVirtual(NvBool *pIsVirtual, GPUATTACHARG *pAttachArg) +{ + *pIsVirtual = NV_FALSE; +} + +#define NV_PMC_BOOT_42_ARCHITECTURE_TU100 0x00000016 +static NvBool _gpumgrIsRmFirmwareCapableChip(NvU32 pmcBoot42) +{ + return (DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, pmcBoot42) >= NV_PMC_BOOT_42_ARCHITECTURE_TU100); +} +#undef NV_PMC_BOOT_42_ARCHITECTURE_TU100 + +NvBool gpumgrIsDeviceRmFirmwareCapable +( + NvU16 devId, + NvU32 pmcBoot42, + NvBool *pbEnabledByDefault +) +{ + static const NvU16 defaultGspRmGpus[] = { + 0x1E37, // GFN + 0x1E38, // TU102 + 0x1EB4, // T4G + 0x1EB8, // T4 + 0x1EB9, // T4 + + 0x20B0, // A100 + 0x20B1, // A100 + 0x20B2, // A100 + 0x20B3, // A200 + 0x20B5, // A100 80GB + 0x20B6, // A100 + 0x20B7, // A30 + 0x20B8, // A100X SKU230/231 Roy-100 + 0x20B9, // A30X SKU205/206 Roy-30 + 0x20F0, // A100 + 0x20F1, // A100 + 0x20F2, // A100 + 0x2235, // A40 + 0x2236, // A10 SKU215 Pris-24 + 0x2237, // A10G SKU215 Pris-24 + 0x25B6, // A16 + }; + NvU32 count = NV_ARRAY_ELEMENTS(defaultGspRmGpus); + NvU32 i; + + *pbEnabledByDefault = NV_FALSE; + + if (!_gpumgrIsRmFirmwareCapableChip(pmcBoot42)) + return NV_FALSE; + + for (i = 0; i < count; i++) + { + if (defaultGspRmGpus[i] == devId) + { + NV_PRINTF(LEVEL_INFO, + "DevId 0x%x is GSP-RM enabled by default\n", + devId); + *pbEnabledByDefault = NV_TRUE; + break; + } + } + + return NV_TRUE; +} + +static NvBool gpumgrCheckRmFirmwarePolicy +( + NvU64 nvDomainBusDeviceFunc, + NvBool bRequestFwClientRm, + NvU32 pmcBoot42 +) +{ + + if (!bRequestFwClientRm) + return NV_FALSE; + + NvU32 data; + + if (!_gpumgrIsRmFirmwareCapableChip(pmcBoot42)) + { + NV_PRINTF(LEVEL_ERROR, "Disabling GSP offload -- GPU not supported\n"); + return NV_FALSE; + } + + // Disable if RM registry override set + if (osReadRegistryDword(NULL, NV_REG_STR_RM_DISABLE_GSP_OFFLOAD, &data) == NV_OK) + { + if (data != NV_REG_STR_RM_DISABLE_GSP_OFFLOAD_FALSE) + { + NV_PRINTF(LEVEL_NOTICE, "Disabling GSP offload -- registry entry\n"); + return NV_FALSE; + } + } + + return NV_TRUE; +} + +// +// gpumgrGetGpuHalFactor +// +// Get Gpu Hal factors those are used to init Hal binding +// +// TODO : later this function will be used to read out NVOC Halspec init value for OBJGPU +// +static void +gpumgrGetGpuHalFactor +( + NvU32 *pChipId0, + NvU32 *pChipId1, + RM_RUNTIME_VARIANT *pRmVariant, + GPUATTACHARG *pAttachArg +) +{ + NvBool isVirtual; + NvBool isFwClient; + + DEVICE_MAPPING gpuDevMapping = {0}; + gpuDevMapping.gpuNvAddr = pAttachArg->regBaseAddr; + gpuDevMapping.gpuNvLength = pAttachArg->regLength; + + // get ChipId0 and ChipId1 + if (pAttachArg->socDeviceArgs.specified) + { + // This path is taken for T234D+ devices. + + *pChipId0 = pAttachArg->socDeviceArgs.socChipId0; + *pChipId1 = 0; + isVirtual = NV_FALSE; + } + else if (pAttachArg->bIsSOC) + { + // This path is only taken for ARCH MODS iGPU verification. + + *pChipId0 = pAttachArg->socChipId0; + *pChipId1 = 0; + isVirtual = NV_FALSE; + } + else + { + // + // PMC_BOOT_42 register is added above G94+ chips which is internal to NVIDIA + // Earlier we used PMC_BOOT_0 as Internal ID which is now exposed to customers + // + *pChipId0 = osDevReadReg032(/*pGpu=*/ NULL, &gpuDevMapping, NV_PMC_BOOT_0); + *pChipId1 = osDevReadReg032(/*pGpu=*/ NULL, &gpuDevMapping, NV_PMC_BOOT_42); + + gpumgrGetGpuHalFactorOfVirtual(&isVirtual, pAttachArg); + } + + isFwClient = gpumgrCheckRmFirmwarePolicy(pAttachArg->nvDomainBusDeviceFunc, + pAttachArg->bRequestFwClientRm, + *pChipId1); + + if (RMCFG_FEATURE_PLATFORM_GSP || RMCFG_FEATURE_PLATFORM_DCE) + *pRmVariant = RM_RUNTIME_VARIANT_UCODE; + else if (isVirtual) + *pRmVariant = RM_RUNTIME_VARIANT_VF; + else if (isFwClient) + *pRmVariant = RM_RUNTIME_VARIANT_PF_KERNEL_ONLY; + else + *pRmVariant = RM_RUNTIME_VARIANT_PF_MONOLITHIC; // default, monolithic mode +} + + +// +// _gpumgrCreateGpu +// +// Former _sysCreateGpu(). The function is moved to Gpumgr for hinding struct +// GPUATTACHARG from SYS. SYS is still the parent object of both GPUMGR and +// GPU. +// +static NV_STATUS +_gpumgrCreateGpu(NvU32 gpuInstance, GPUATTACHARG *pAttachArg) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu; + NV_STATUS status; + RM_RUNTIME_VARIANT rmVariant; + NvU32 chipId0; // 32-bit chipId (pmcBoot0 on GPU) + NvU32 chipId1; // 32-bit chipId (pmcBoot42 on GPU) + NvU32 hidrev, majorRev; + + gpumgrGetGpuHalFactor(&chipId0, &chipId1, &rmVariant, pAttachArg); + + hidrev = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _CHIPID, chipId0); + majorRev = DRF_VAL(_PAPB_MISC, _GP_HIDREV, _MAJORREV, chipId0); + + // WAR: The majorrev of t234 shows 0xa on fmodel instead of 0x4 + if ((hidrev == 0x23) && (majorRev == 0xa)) + { + majorRev = 0x4; + } + + hidrev = (hidrev << 4) | majorRev; + + // create OBJGPU with halspec factor initialization value + status = objCreate(&pGpu, pSys, OBJGPU, + /* ChipHal_arch = */ DRF_VAL(_PMC, _BOOT_42, _ARCHITECTURE, chipId1), + /* ChipHal_impl = */ DRF_VAL(_PMC, _BOOT_42, _IMPLEMENTATION, chipId1), + /* ChipHal_hidrev = */ hidrev, + /* RmVariantHal_rmVariant = */ rmVariant, + /* DispIpHal_ipver = */ 0, // initialized later + /* ctor.gpuInstance = */ gpuInstance); + if (status != NV_OK) + { + return status; + } + + // legacy chip-config Hal registration path + status = gpuBindHalLegacy(pGpu, chipId0, chipId1); + if (status != NV_OK) + { + objDelete(pGpu); + return status; + } + + // + // Save away the public ID associated with the handle just returned + // from create object. + // + pGpuMgr->gpuHandleIDList[pGpuMgr->numGpuHandles].gpuInstance = gpuInstance; + pGpuMgr->gpuHandleIDList[pGpuMgr->numGpuHandles].pGpu = pGpu; + + pGpuMgr->numGpuHandles++; + + return status; +} + + +static void +_gpumgrGetEncSessionStatsReportingState(OBJGPU *pGpu) +{ +} + +// +// gpumgrAttachGpu +// +// This interface is used by os-dependent code to attach a new gpu +// to the pool managed by the RM. Construction of OBJGPU and it's +// descendants is handled here, along with any other necessary prep +// for the subsequent gpu preinit/init stages. +// +NV_STATUS +gpumgrAttachGpu(NvU32 gpuInstance, GPUATTACHARG *pAttachArg) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu = NULL; + NV_STATUS status; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // create the new OBJGPU + if ((status = _gpumgrCreateGpu(gpuInstance, pAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + // get a pointer to the new OBJGPU + pGpu = gpumgrGetGpu(gpuInstance); + + // load up attach parameters + gpumgrSetAttachInfo(pGpu, pAttachArg); + + // Load OOR check address mode based on arch +#if defined(NVCPU_X86_64) + pGpu->busInfo.oorArch = OOR_ARCH_X86_64; +#elif defined(NVCPU_PPC64LE) + pGpu->busInfo.oorArch = OOR_ARCH_PPC64LE; +#elif defined(NVCPU_ARM) + pGpu->busInfo.oorArch = OOR_ARCH_ARM; +#elif defined(NVCPU_AARCH64) + pGpu->busInfo.oorArch = OOR_ARCH_AARCH64; +#else + pGpu->busInfo.oorArch = OOR_ARCH_NONE; +#endif + + pGpu->pOS = SYS_GET_OS(pSys); + + // let os fill in dpc details before we get into engine construction + if ((status = osDpcAttachGpu(pGpu, pAttachArg->pOsAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + // let os fill in what it needs before we get into engine construction + if ((status = osAttachGpu(pGpu, pAttachArg->pOsAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + NV_ASSERT((pGpuMgr->gpuAttachMask & NVBIT(gpuInstance)) == 0); + pGpuMgr->gpuAttachMask |= NVBIT(gpuInstance); + pGpuMgr->gpuAttachCount++; + + status = _gpumgrRegisterRmCapsForGpu(pGpu); + if (status != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + // finish gpu construction + if ((status = gpuPostConstruct(pGpu, pAttachArg)) != NV_OK) + { + goto gpumgrAttach_error_and_exit; + } + + _gpumgrGetEncSessionStatsReportingState(pGpu); + + Intr *pIntr = GPU_GET_INTR(pGpu); + // On some boards, vbios enables interrupt early before RM + // initialize pGpu so that hotplug intrs can be serviced on + // the mfg line. Disable interrupt here for this case. + if (pIntr != NULL) + { + if (intrGetIntrEnFromHw_HAL(pGpu, pIntr, NULL) != INTERRUPT_TYPE_DISABLED) + { + intrSetIntrEnInHw_HAL(pGpu, pIntr, INTERRUPT_TYPE_DISABLED, NULL); + intrSetStall_HAL(pGpu, pIntr, INTERRUPT_TYPE_DISABLED, NULL); + } + } + + // Add entry into system nvlink topo array + gpumgrAddSystemNvlinkTopo(pAttachArg->nvDomainBusDeviceFunc); + // Add entry into system partition topo array + gpumgrAddSystemMIGInstanceTopo(pAttachArg->nvDomainBusDeviceFunc); + + return status; + +gpumgrAttach_error_and_exit: + if ((pGpuMgr->gpuAttachMask & NVBIT(gpuInstance)) != 0) + { + pGpuMgr->gpuAttachMask &= ~NVBIT(gpuInstance); + pGpuMgr->gpuAttachCount--; + } + + if (pGpu != NULL) + { + _gpumgrUnregisterRmCapsForGpu(gpuGetDBDF(pGpu)); + } + + osDpcDetachGpu(pGpu); + _gpumgrDestroyGpu(gpuInstance); + return status; +} + +// +// gpumgrDetachGpu +// +// This entry point detaches a gpu from the RM. The corresponding +// OBJGPU and any of it's offspring are released, etc. +// +NV_STATUS +gpumgrDetachGpu(NvU32 gpuInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu = gpumgrGetGpu(gpuInstance); + NvBool bDelClientResourcesFromGpuMask = !pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_TIMEOUT_RECOVERY); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // Mark for deletion the stale clients related to the GPU mask + if (bDelClientResourcesFromGpuMask) + { + rmapiSetDelPendingClientResourcesFromGpuMask(NVBIT(gpuInstance)); + } + + osDpcDetachGpu(pGpu); + + pGpu->pOsRmCaps = NULL; + + // release pDev + _gpumgrDestroyGpu(gpuInstance); + + // Delete the marked clients related to the GPU mask + if (bDelClientResourcesFromGpuMask) + { + rmapiDelPendingDevices(NVBIT(gpuInstance)); + rmapiDelPendingClients(); + } + + NV_ASSERT(pGpuMgr->gpuAttachMask & NVBIT(gpuInstance)); + pGpuMgr->gpuAttachMask &= ~NVBIT(gpuInstance); + pGpuMgr->gpuAttachCount--; + + return NV_OK; +} + +// +// gpumgrCreateDevice +// +// Create a broadcast device. The set of one or more gpus +// comprising the broadcast device is described by gpuMask. +// +NV_STATUS +gpumgrCreateDevice(NvU32 *pDeviceInstance, NvU32 gpuMask, NvU32 *pGpuIdsOrdinal) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pParentGpu = NULL; + NvU32 gpuInstance; + NV_STATUS status = NV_ERR_INVALID_REQUEST; + OBJGPUGRP *pGpuGrp = NULL; + + pGpuMgr->deviceCount++; + + NV_ASSERT(gpuMask != 0); + + // if only 1 gpu in the set, we're done + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + // alloc new broadcast device instance + status = gpumgrAllocDeviceInstance(pDeviceInstance); + if (status != NV_OK) + { + goto gpumgrCreateDevice_exit; + } + + gpumgrConstructGpuGrpObject(pGpuMgr, gpuMask, + &pGpuMgr->pGpuGrpTable[*pDeviceInstance]); + // + // Set up parent gpu state. pParentGpu == NULL during boot when + // we're first creating this device because the GPU attach process + // has not yet completed. pParentGpu != NULL when we're coming + // out of SLI (unlinking). + // + gpuInstance = 0; + pParentGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + if (pParentGpu) + { + gpumgrSetParentGPU(pParentGpu, pParentGpu); + } + + gpumgrAddDeviceMaskToGpuInstTable(gpuMask); + status = NV_OK; + goto gpumgrCreateDevice_exit; + } + +gpumgrCreateDevice_exit: + if (status != NV_OK) + { + // Device creation failed + pGpuMgr->deviceCount--; + } + else + { + pGpuGrp = pGpuMgr->pGpuGrpTable[*pDeviceInstance]; + if (gpugrpGetGpuMask(pGpuGrp) != gpuMask) + { + NV_ASSERT(0); + gpumgrDestroyDevice(*pDeviceInstance); + return NV_ERR_INVALID_DATA; + } + NV_PRINTF(LEVEL_INFO, + "gpumgrCreateDevice: deviceInst 0x%x mask 0x%x\n", + *pDeviceInstance, gpuMask); + } + return status; +} + +NV_STATUS +gpumgrDestroyDevice(NvU32 deviceInstance) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_STATUS status = NV_OK; + OBJGPUGRP *pGpuGrp = pGpuMgr->pGpuGrpTable[deviceInstance]; + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NV_ERR_OBJECT_NOT_FOUND); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + NV_ASSERT(gpuMask != 0); + + // if we only have one subdevice we're done + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + gpugrpDestroy(pGpuGrp); + pGpuMgr->pGpuGrpTable[deviceInstance] = NULL; + gpumgrClearDeviceMaskFromGpuInstTable(gpuMask); + goto gpumgrDestroyDevice_exit; + } + +gpumgrDestroyDevice_exit: + pGpuMgr->deviceCount--; + + return status; +} + +// +// gpumgrGetDeviceInstanceMask +// +// Returns mask of enabled (or valid) device instances. +// This mask tells clients which NV01_DEVICE class +// instances are valid. +// +NvU32 +gpumgrGetDeviceInstanceMask(void) +{ + NvU32 i, deviceInstanceMask = 0; + + // for every broadcast device... + for (i = 0; i < NV_MAX_DEVICES; i++) + { + // ...add it to our mask if it's enabled + if (NV_OK == gpumgrIsDeviceInstanceValid(i)) + deviceInstanceMask |= NVBIT(i); + } + + return deviceInstanceMask; +} + +NvU32 +gpumgrGetGpuMask(OBJGPU *pGpu) +{ + NvU32 deviceInstance = gpuGetDeviceInstance(pGpu); + + NV_ASSERT(deviceInstance < NV_MAX_DEVICES); + + return gpumgrGetDeviceGpuMask(deviceInstance); +} + +// +// gpumgrGetSubDeviceCount +// +NvU32 +gpumgrGetSubDeviceCount(NvU32 gpuMask) +{ + NvU32 subDeviceCount = 0; + + // tally # of gpus in the set + while (gpuMask != 0) + { + subDeviceCount ++; + gpuMask &= (gpuMask-1); // remove lowest bit in gpuMask + } + return subDeviceCount; +} + +// +// gpumgrGetSubDeviceCountFromGpu +// ATTENTION: When using with SLI Next / RM Unlinked SLI, the +// subdevice count is always 1 for each GPU. This can cause +// bugs, buffer overflows with arrays based on subdevice instances as +// with RM Unlinked SLI: +// - subdevice count is always 1 (the GPUs are not linked) +// - GPU subdevice instance can be non zero +// For subdevice instance arrays, please use +// gpumgrGetSubDeviceMaxValuePlus1() +// +NvU32 +gpumgrGetSubDeviceCountFromGpu(OBJGPU *pGpu) +{ + NvU32 gpuMask = gpumgrGetGpuMask(pGpu); + NvU32 subDeviceCount = gpumgrGetSubDeviceCount(gpuMask); + + NV_ASSERT(subDeviceCount > 0); + return subDeviceCount; +} + +// +// gpumgrGetSubDeviceMaxValuePlus1 +// SLI disabled: return 1 +// SLI enabled with RM linked in SLI: returns 2 or more +// SLI enabled with RM unlinked: return current subdeviceInstance + 1 +// Life of the function: until a full transition to SLI Next / RM Unlinked SLI. +// +NvU32 +gpumgrGetSubDeviceMaxValuePlus1(OBJGPU *pGpu) +{ + if (!IsSLIEnabled(pGpu)) + { + // SLI disabled: return 1 as all GPU subdevice instances are 0 + // Unkinked SLI: returns the current subdevice instance + 1 + return gpumgrGetSubDeviceInstanceFromGpu(pGpu) + 1; + } + else + { + // SLI Enabled in RM: The count of subdevice instances for that GPU/device + return gpumgrGetSubDeviceCountFromGpu(pGpu); + } +} + +static void +gpumgrSetAttachInfo(OBJGPU *pGpu, GPUATTACHARG *pAttachArg) +{ + NvU32 gpuId = NV0000_CTRL_GPU_INVALID_ID; + + if (pAttachArg->socDeviceArgs.specified) + { + NvU32 idx; + // This path is taken for T234D+ SOC devices. + + // + // TODO: This existing field is specifically used to safeguard + // iGPU-specific code paths within RM, and should actually be NV_FALSE for + // T234D+. + // + // See JIRA TDS-5101 for more details. + // + pGpu->bIsSOC = NV_TRUE; + + for (idx = 0; idx < SOC_DEV_MAPPING_MAX; idx++) + { + pGpu->deviceMappings[idx] = pAttachArg->socDeviceArgs.deviceMapping[idx]; + } + + pGpu->busInfo.iovaspaceId = pAttachArg->socDeviceArgs.iovaspaceId; + pGpu->busInfo.gpuPhysAddr = pGpu->deviceMappings[SOC_DEV_MAPPING_DISP].gpuNvPAddr; + pGpu->gpuDeviceMapCount = 1; + + // + // TODO bug 2100708: a fake DBDF is used on SOC to opt out of some + // RM paths that cause issues otherwise, see the bug for details. + // + pGpu->busInfo.nvDomainBusDeviceFunc = pAttachArg->nvDomainBusDeviceFunc; + } + else if (pAttachArg->bIsSOC) + { + // This path is only taken for ARCH MODS iGPU verification. + + NV_ASSERT(sizeof(pGpu->deviceMappings) == sizeof(pAttachArg->socDeviceMappings)); + pGpu->bIsSOC = NV_TRUE; + pGpu->idInfo.PCIDeviceID = pAttachArg->socId; + pGpu->idInfo.PCISubDeviceID = pAttachArg->socSubId; + pGpu->busInfo.iovaspaceId = pAttachArg->iovaspaceId; + if (RMCFG_FEATURE_PLATFORM_MODS) + { + NV_ASSERT(sizeof(pGpu->deviceMappings) == sizeof(pAttachArg->socDeviceMappings)); + portMemCopy(pGpu->deviceMappings, sizeof(pGpu->deviceMappings), pAttachArg->socDeviceMappings, sizeof(pGpu->deviceMappings)); + pGpu->gpuDeviceMapCount = pAttachArg->socDeviceCount; + + // + // TODO bug 2100708: a fake DBDF is used on SOC to opt out of some + // RM paths that cause issues otherwise, see the bug for details. + // + pGpu->busInfo.nvDomainBusDeviceFunc = pAttachArg->nvDomainBusDeviceFunc; + } + } + else + { + // + // Set this gpu's hardware register access address pointers + // from the contents of mappingInfo. + // + pGpu->bIsSOC = NV_FALSE; + + pGpu->deviceMappings[0].gpuNvAddr = pAttachArg->regBaseAddr; + pGpu->registerAccess.gpuFbAddr = pAttachArg->fbBaseAddr; + pGpu->busInfo.gpuPhysAddr = pAttachArg->devPhysAddr; + pGpu->busInfo.gpuPhysFbAddr = pAttachArg->fbPhysAddr; + pGpu->busInfo.gpuPhysInstAddr = pAttachArg->instPhysAddr; + pGpu->busInfo.gpuPhysIoAddr = pAttachArg->ioPhysAddr; + pGpu->busInfo.iovaspaceId = pAttachArg->iovaspaceId; + pGpu->busInfo.nvDomainBusDeviceFunc = pAttachArg->nvDomainBusDeviceFunc; + pGpu->deviceMappings[0].gpuNvLength = pAttachArg->regLength; + pGpu->fbLength = pAttachArg->fbLength; + pGpu->busInfo.IntLine = pAttachArg->intLine; + pGpu->gpuDeviceMapCount = 1; + + if ( ! pAttachArg->instBaseAddr ) + { + // + // The OS init goo didn't map a separate region for instmem. + // So instead use the 1M mapping in bar0. + // + pGpu->instSetViaAttachArg = NV_FALSE; + pGpu->registerAccess.gpuInstAddr = (GPUHWREG*)(((NvU8*)pGpu->deviceMappings[0].gpuNvAddr) + 0x00700000); // aka NV_PRAMIN. + if (!pGpu->busInfo.gpuPhysInstAddr) + { + // + // Only use the bar0 window physical address if the OS didn't + // specify a bar2 physical address. + // + pGpu->busInfo.gpuPhysInstAddr = pGpu->busInfo.gpuPhysAddr + 0x00700000; // aka NV_PRAMIN + } + pGpu->instLength = 0x100000; // 1MB + } + else + { + pGpu->instSetViaAttachArg = NV_TRUE; + pGpu->registerAccess.gpuInstAddr = pAttachArg->instBaseAddr; + pGpu->instLength = pAttachArg->instLength; + } + } + + gpuId = gpuGenerate32BitId(gpuGetDomain(pGpu), gpuGetBus(pGpu), gpuGetDevice(pGpu)); + if (gpuId != NV0000_CTRL_GPU_INVALID_ID) + { + gpumgrSetGpuId(pGpu, gpuId); + } +} + +// +// gpumgrStatePreInitGpu & gpumgrStateInitGpu +// +// These routines handle unicast gpu initialization. +// +NV_STATUS +gpumgrStatePreInitGpu(OBJGPU *pGpu) +{ + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status == NV_OK) + { + if (FULL_GPU_SANITY_CHECK(pGpu)) + { + // pre-init phase done in UC mode + status = gpuStatePreInit(pGpu); + } + else + { + status = NV_ERR_GPU_IS_LOST; + DBG_BREAKPOINT(); + } + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + // save the init status for later client queries + gpumgrSetGpuInitStatus(pGpu->gpuId, status); + + return status; +} + +NV_STATUS +gpumgrStateInitGpu(OBJGPU *pGpu) +{ + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status == NV_OK) + { + // init phase + status = gpuStateInit(pGpu); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + // save the init status for later client queries + gpumgrSetGpuInitStatus(pGpu->gpuId, status); + + return status; +} + +NV_STATUS +gpumgrStateLoadGpu(OBJGPU *pGpu, NvU32 flags) +{ + NV_STATUS status; + + // LOCK: acquire GPUs lock + status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_INIT); + if (status == NV_OK) + { + // Load phase + status = gpuStateLoad(pGpu, flags); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + if (status != NV_OK) + goto gpumgrStateLoadGpu_exit; + +gpumgrStateLoadGpu_exit: + // save the init status for later client queries + gpumgrSetGpuInitStatus(pGpu->gpuId, status); + + return status; +} + +// +// gpumgrGetNextGpu +// +// This routine searches subDeviceMask for the next gpu by using +// the start index value as a beginning bit position. If a gpu is +// found, the start index value is bumped to the next bit position +// in the mask. +// +POBJGPU +gpumgrGetNextGpu(NvU32 subDeviceMask, NvU32 *pStartIndex) +{ + NvU32 i; + + if (*pStartIndex > NV_MAX_DEVICES) + { + *pStartIndex = NV_MAX_DEVICES; + return NULL; + } + + for (i = *pStartIndex; i < NV_MAX_DEVICES; i++) + { + if (subDeviceMask & NVBIT(i)) + { + *pStartIndex = i+1; + return gpumgrGetGpu(i); + } + } + + *pStartIndex = NV_MAX_DEVICES; + return NULL; +} + + +// +// gpumgrIsGpuPointerValid - Validates pGpu without dereferencing it. +// +NvBool +gpumgrIsGpuPointerValid(OBJGPU *pGpu) +{ + OBJGPU *pTempGpu = NULL; + NvU32 gpuMask = 0; + NvU32 gpuCount = 0; + NvU32 gpuIndex = 0; + + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + pTempGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + + while(pTempGpu) + { + if (pTempGpu->getProperty(pTempGpu, PDB_PROP_GPU_STATE_INITIALIZED)) + { + if (pTempGpu == pGpu) + { + return NV_TRUE; + } + } + + pTempGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex); + } + + return NV_FALSE; +} + +NvBool gpumgrIsGpuDisplayParent(OBJGPU *pGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + NvBool rc = NV_FALSE; + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NV_FALSE); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + // If there's only one GPU in the device, then of course it's the display parent! + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + rc = NV_TRUE; + } + // + // If the gpuInstance argument is the first gpuInstance in the ordering, + // then it's the display parent! + // + else if (pGpu->gpuInstance == pGpuGrp->SliLinkOrder[0].gpuInstance) + { + rc = NV_TRUE; + } + + // Otherwise it isn't. + return rc; +} + +OBJGPU *gpumgrGetDisplayParent(OBJGPU *pGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + NvU32 gpuCount; + NvU32 gpuMask; + NvU32 gpuInstance; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NULL); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + gpuCount = gpumgrGetSubDeviceCount(gpuMask); + + if (gpuCount > 1) + { + gpuInstance = pGpuGrp->SliLinkOrder[0].gpuInstance; + pGpu = gpumgrGetGpu(gpuInstance); + } + + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + return pGpu; +} + +// +// gpumgrGetProbedGpuIds +// +// This routine services the NV0000_CTRL_GPU_GET_PROBED_IDS command. +// The passed in gpuIds table is filled in with valid gpuId info +// for each probed gpu. Invalid entries in the table are set to the +// invalid id value. +// +NV_STATUS +gpumgrGetProbedGpuIds(NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuIdsParams) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i, j, k; + + ct_assert(NV_MAX_DEVICES == NV0000_CTRL_GPU_MAX_PROBED_GPUS); + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0, j = 0, k = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId != NV0000_CTRL_GPU_INVALID_ID) + { + if (pGpuMgr->probedGpus[i].bExcluded) + pGpuIdsParams->excludedGpuIds[k++] = pGpuMgr->probedGpus[i].gpuId; + else + pGpuIdsParams->gpuIds[j++] = pGpuMgr->probedGpus[i].gpuId; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + for (i = j; i < NV_ARRAY_ELEMENTS(pGpuIdsParams->gpuIds); i++) + pGpuIdsParams->gpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + + for (i = k; i < NV_ARRAY_ELEMENTS(pGpuIdsParams->excludedGpuIds); i++) + pGpuIdsParams->excludedGpuIds[i] = NV0000_CTRL_GPU_INVALID_ID; + + return NV_OK; +} + +// +// gpumgrGetAttachedGpuIds +// +// This routine services the NV0000_CTRL_GPU_GET_ATTACHED_IDS command. +// The passed in gpuIds table is filled in with valid gpuId info +// for each attached gpu. Any remaining entries in the table are set to +// the invalid id value. +// +NV_STATUS +gpumgrGetAttachedGpuIds(NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuIdsParams) +{ + OBJGPU *pGpu; + NvU32 gpuAttachCnt, gpuAttachMask, i, cnt; + NvU32 *pGpuIds = &pGpuIdsParams->gpuIds[0]; + + // fill the table w/valid entries + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + for (cnt = 0, i = 0; i < NV_MAX_DEVICES; i++) + { + if (gpuAttachMask & NVBIT(i)) + { + pGpu = gpumgrGetGpu(i); + pGpuIds[cnt++] = pGpu->gpuId; + } + } + + // invalidate rest of the entries + while (cnt < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) + pGpuIds[cnt++] = NV0000_CTRL_GPU_INVALID_ID; + + return NV_OK; +} + +// +// gpumgrGetSubDeviceDeviceInstanceFromGpu +// +// Given a pGpu return the corresponding subdevice instance value. +// +NvU32 +gpumgrGetSubDeviceInstanceFromGpu(OBJGPU *pGpu) +{ + return pGpu->subdeviceInstance; +} + +// +// gpumgrGetParentGPU +// +POBJGPU +gpumgrGetParentGPU(OBJGPU *pGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + NvU32 gpuMask; + + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NULL); + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + if (gpumgrIsSubDeviceCountOne(gpuMask)) + { + return pGpu; + } + else + { + return gpugrpGetParentGpu(pGpuGrp); + } +} + +// +// gpumgrSetParentGPU +// +void +gpumgrSetParentGPU(OBJGPU *pGpu, OBJGPU *pParentGpu) +{ + OBJGPUGRP *pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + + NV_ASSERT_OR_RETURN_VOID(pGpuGrp != NULL); + gpugrpSetParentGpu(pGpuGrp, pParentGpu); +} + +// +// gpumgrGetGpuFromId +// +// Find the specified gpu from it's gpuId. +// +POBJGPU +gpumgrGetGpuFromId(NvU32 gpuId) +{ + OBJGPU *pGpu; + NvU32 gpuAttachCnt, gpuAttachMask; + NvU32 i; + + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + for (i = 0; i < NV_MAX_DEVICES; i++) + { + if (gpuAttachMask & NVBIT(i)) + { + pGpu = gpumgrGetGpu(i); + + // found it + if (pGpu->gpuId == gpuId) + return pGpu; + } + } + + // didn't find it + return NULL; +} + +// +// gpumgrGetGpuFromUuid() +// +// Get GPUOBJECT from UUID. Returns NULL if it cannot find a GPU with the +// requested UUID. +// +POBJGPU +gpumgrGetGpuFromUuid(const NvU8 *pGpuUuid, NvU32 flags) +{ + OBJGPU *pGpu; + NvU32 attachedGpuCount; + NvU32 attachedGpuMask; + NvU32 gpuIndex; + NvU32 gidStrLen; + NvU8 *pGidString = NULL; + NV_STATUS rmStatus; + + // get all attached GPUs + rmStatus = gpumgrGetGpuAttachInfo(&attachedGpuCount, &attachedGpuMask); + + gpuIndex = 0; + + for(pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex); + pGpu != NULL; + pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex)) + { + // + // get the GPU's UUID + // + // This implementation relies on the fact that gpuGetGidInfo() only + // allocates memory if it succeeds. + // + rmStatus = gpuGetGidInfo(pGpu, &pGidString, &gidStrLen, flags); + if (NV_OK != rmStatus) + return NULL; + + // check if it matches + if (0 == portMemCmp(pGidString, pGpuUuid, gidStrLen)) + { + portMemFree(pGidString); + return pGpu; + } + else + { + // if it doesn't match, clean up allocated memory for next iteration + portMemFree(pGidString); + } + } + + return NULL; // Failed to find a GPU with the requested UUID +} + +// +// gpumgrGetGpuFromBusInfo +// +// Find the specified GPU using its PCI bus info. +// +POBJGPU +gpumgrGetGpuFromBusInfo(NvU32 domain, NvU8 bus, NvU8 device) +{ + NV_STATUS status; + OBJGPU *pGpu; + NvU32 attachedGpuCount; + NvU32 attachedGpuMask; + NvU32 gpuIndex = 0; + + status = gpumgrGetGpuAttachInfo(&attachedGpuCount, &attachedGpuMask); + NV_ASSERT_OR_RETURN(status == NV_OK, NULL); + + for (pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex); + pGpu != NULL; + pGpu = gpumgrGetNextGpu(attachedGpuMask, &gpuIndex)) + { + if ((gpuGetDomain(pGpu) == domain) && + (gpuGetBus(pGpu) == bus) && + (gpuGetDevice(pGpu) == device)) + { + return pGpu; + } + } + + return NULL; +} + +// +// gpumgrSetGpuId +// +// This routine assigns the specified gpuId to the specified gpu. +// +void +gpumgrSetGpuId(OBJGPU *pGpu, NvU32 gpuId) +{ + pGpu->gpuId = gpuId; + + // if boardId is unassigned then give it a default value now + if (pGpu->boardId == 0xffffffff) + { + pGpu->boardId = gpuId; + } +} + +// +// gpumgrGetGpuIdInfo +// +// Special purpose routine that handles NV0000_CTRL_CMD_GPU_GET_ID_INFO +// requests from clients. +// NV0000_CTRL_CMD_GPU_GET_ID_INFO is deprecated in favour of +// NV0000_CTRL_CMD_GPU_GET_ID_INFO_V2, per comments in ctrl0000gpu.h +// +NV_STATUS +gpumgrGetGpuIdInfoV2(NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuInfo) +{ + OBJGPU *pGpu; + NvU32 deviceInstance, subDeviceInstance; + + // start by making sure client request specifies a valid gpu + pGpu = gpumgrGetGpuFromId(pGpuInfo->gpuId); + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_INFO, + "gpumgrGetGpuInfoV2: bad gpuid spec: 0x%x\n", + pGpuInfo->gpuId); + return NV_ERR_INVALID_ARGUMENT; + } + + LOCK_ASSERT_AND_RETURN(rmGpuLockIsOwner()); + + // + // We have a valid gpuInstance, so now let's get the corresponding + // deviceInstance/subDeviceInstance pair. + // + deviceInstance = gpuGetDeviceInstance(pGpu); + if (deviceInstance == NV_MAX_DEVICES) + { + NV_PRINTF(LEVEL_ERROR, + "gpumgrGetGpuInfoV2: deviceInstance not found\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + subDeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + pGpuInfo->gpuInstance = pGpu->gpuInstance; + pGpuInfo->deviceInstance = deviceInstance; + pGpuInfo->subDeviceInstance = subDeviceInstance; + pGpuInfo->boardId = pGpu->boardId; + + // + // Setup gpu info flags; see ctrl0000gpu.h for list of flags. + // + pGpuInfo->gpuFlags = 0; + pGpuInfo->numaId = NV0000_CTRL_NO_NUMA_NODE; + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)) + { + pGpuInfo->gpuFlags |= DRF_NUM(0000, _CTRL_GPU_ID_INFO, _ATS_ENABLED, + NV0000_CTRL_GPU_ID_INFO_ATS_ENABLED_TRUE); + pGpuInfo->numaId = pGpu->numaNodeId; + } + + // is this gpu in use? + pGpuInfo->gpuFlags |= DRF_NUM(0000, _CTRL_GPU_ID_INFO, _IN_USE, gpuIsInUse(pGpu)); + + // is this gpu part of a sli device? + pGpuInfo->gpuFlags |= DRF_NUM(0000, _CTRL_GPU_ID_INFO, _LINKED_INTO_SLI_DEVICE, IsSLIEnabled(pGpu)); + + // is this gpu a mobile gpu? + if (IsMobile(pGpu)) + { + pGpuInfo->gpuFlags |= DRF_DEF(0000, _CTRL_GPU_ID_INFO, _MOBILE, _TRUE); + } + + // is this gpu the boot primary? + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PRIMARY_DEVICE)) + { + pGpuInfo->gpuFlags |= DRF_DEF(0000, _CTRL_GPU_ID_INFO, _BOOT_MASTER, _TRUE); + } + + // is this GPU part of an SOC + if (pGpu->bIsSOC) + { + pGpuInfo->gpuFlags |= DRF_DEF(0000, _CTRL_GPU_ID_INFO, _SOC, _TRUE); + } + + // GPU specific SLI status + pGpuInfo->sliStatus = pGpu->sliStatus; + + NV_PRINTF(LEVEL_INFO, + "gpumgrGetGpuInfoV2: gpu[0x%x]: device 0x%x subdevice 0x%x\n", + pGpuInfo->gpuId, pGpuInfo->deviceInstance, + pGpuInfo->subDeviceInstance); + + return NV_OK; +} +NV_STATUS +gpumgrGetGpuIdInfo(NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuInfo) +{ + NV_STATUS status; + NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS pGpuInfoV2 = {0}; + pGpuInfoV2.gpuId = pGpuInfo->gpuId; + + status = gpumgrGetGpuIdInfoV2(&pGpuInfoV2); + + if (status != NV_OK) + { + return status; + } + pGpuInfo->gpuFlags = pGpuInfoV2.gpuFlags; + pGpuInfo->deviceInstance = pGpuInfoV2.deviceInstance; + pGpuInfo->subDeviceInstance = pGpuInfoV2.subDeviceInstance; + pGpuInfo->sliStatus = pGpuInfoV2.sliStatus; + pGpuInfo->boardId = pGpuInfoV2.boardId; + pGpuInfo->gpuInstance = pGpuInfoV2.gpuInstance; + pGpuInfo->numaId = pGpuInfoV2.numaId; + + // If we get a non-NULL szName parameter, let os-dependent code + // fill it in from information we already have. + if (NvP64_VALUE(pGpuInfo->szName) != NULL) + { + status = osDeviceClassToDeviceName(pGpuInfo->deviceInstance, + NvP64_VALUE(pGpuInfo->szName)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "gpumgrGetGpuInfo: deviceInstance to szName translation failed\n"); + return status; + } + } + + return status; +} + +// +// gpumgrGetGpuInitStatus +// +// Special purpose routine that handles NV0000_CTRL_CMD_GET_GPU_INIT_STATUS +// requests from clients. +// +NV_STATUS +gpumgrGetGpuInitStatus(NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatus) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + NV_STATUS rmStatus; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == pGpuInitStatus->gpuId) + { + if (pGpuMgr->probedGpus[i].bInitAttempted) + { + pGpuInitStatus->status = pGpuMgr->probedGpus[i].initStatus; + rmStatus = NV_OK; + } + else + { + // + // No init has been attempted on this GPU yet, so this request + // doesn't make any sense. + // + rmStatus = NV_ERR_INVALID_STATE; + } + goto done; + } + } + + // We couldn't find a probed gpuId matching the requested one. + rmStatus = NV_ERR_INVALID_ARGUMENT; +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return rmStatus; +} + +NV_STATUS +gpumgrGetProbedGpuDomainBusDevice(NvU32 gpuId, NvU64 *gpuDomainBusDevice) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + NV_STATUS rmStatus; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + *gpuDomainBusDevice = pGpuMgr->probedGpus[i].gpuDomainBusDevice; + rmStatus = NV_OK; + goto done; + } + } + + // + // We couldn't find a probed gpuId matching the requested one. + // + // This used to return a generic NV_ERR_INVALID_ARGUMENT, but we want to be + // more specific as at least nvml wants to be able to tell this case apart + // from other errors. This case is expected when GPUs are removed from the + // driver (e.g. through unbind on Linux) after a client queries for the + // probed GPUs, but before getting the PCI info for all of them. + // + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + +done: + portSyncMutexRelease(pGpuMgr->probedGpusLock); + return rmStatus; +} + +// +// gpumgrSetGpuInitStatus +// +// Marks initialization of the gpu in question as attempted and stores the +// status. +// +void +gpumgrSetGpuInitStatus(NvU32 gpuId, NV_STATUS status) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); i++) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + // Overwrite any previous init status + pGpuMgr->probedGpus[i].bInitAttempted = NV_TRUE; + pGpuMgr->probedGpus[i].initStatus = status; + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); +} + +// +// gpumgrUpdateBoardId +// +// Update gpu boardIds. By default the boardId will already be set to +// the gpuId. This routine fetches the upstream bridge port and sets the +// new boardId to the pGpu +// +void +gpumgrUpdateBoardId_IMPL(OBJGPU *pGpu) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + void *handleUp; + NvU16 vendorIDUp, deviceIDUp; + NvU8 busUp, deviceUp, funcUp; + NvU32 domain; + NvU32 boardId; + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_BEHIND_BRIDGE)) + { + return; + } + + domain = gpuGetDomain(pGpu); + handleUp = clFindBrdgUpstreamPort(pGpu, pCl, NV_TRUE, + &busUp, &deviceUp, &funcUp, + &vendorIDUp, &deviceIDUp, + NULL); + if (!handleUp) + return; + + if (!(IS_SUPPORTED_PCIE_SWITCH(vendorIDUp, deviceIDUp))) + { + return; + } + + boardId = gpuGenerate32BitId(domain, busUp, deviceUp); + pGpu->boardId = boardId; +} + +// +// gpumgrGetDefaultPrimaryGpu +// +// This routine looks at the set of GPUs and picks a the primary (parent) +// with the following rules, in this order: +// 1- If a primary GPU has been passed in an SLI config by a client +// 2- If there is a boot primary in the GPU mask +// 3- The first VGA device attached (not 3d controller) +// +NvU32 +gpumgrGetDefaultPrimaryGpu +( + NvU32 gpuMask +) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuInstance; + + if (gpuMask == 0) + { + NV_ASSERT(gpuMask); + return 0; + } + + // Find masterFromSLIConfig, set when a RM client passes a primary GPU + // index from a SLI config + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (pGpu->masterFromSLIConfig) + { + break; + } + } + + // default to boot primary + if (pGpu == NULL) + { + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PRIMARY_DEVICE)) + { + break; + } + } + } + + if (pGpu) + { + return gpuGetInstance(pGpu); + } + + // otherwise the primary is the first non 3d controller in the set attached to the RM + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_3D_CONTROLLER)) + { + break; + } + } + + if (!pGpu) + { + // The GPU mask contains only 3d Controllers. + // Choose first one in the set attached to the RM. + gpuInstance = 0; + pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + } + + if (pGpu == NULL) + { + return 0; // This should never happen + } + + return gpuGetInstance(pGpu); +} + +void +gpumgrServiceInterrupts_IMPL(NvU32 gpuMask, MC_ENGINE_BITVECTOR *engineMask, NvBool bExhaustive) +{ + OBJGPU *pGpu; + NvU32 gpuInstance = 0; + + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (gpuIsGpuFullPower(pGpu)) + { + Intr *pIntr = GPU_GET_INTR(pGpu); + + // + // On SLI, one OBJGPU's StateInit functions could attempt to service + // interrupts on another OBJGPU which has not yet started StateInit. + // However Intr is not prepared to service interrupts until after + // intrStateInit. To avoid calling in too early, check that the + // interrupt table is initialized. + // + if (pIntr->pIntrTable == NULL) + { + continue; + } + + // + // Service interrupts for the specified engines now. + // A non-false value for 'bExhaustive' indicates that intrServiceStallList + // should repreatedly service all specified engines, until it finds + // no further pending interrupt work remains on those engines. + // + intrServiceStallList_HAL(pGpu, pIntr, engineMask, bExhaustive); + } + } +} + +NV_STATUS +gpumgrGetGpuLockAndDrPorts +( + OBJGPU *pGpu, + OBJGPU *pPeerGpu, + NvU32 *pPinsetOut, + NvU32 *pPinsetIn +) +{ + *pPinsetOut = 0; + *pPinsetIn = 0; + return NV_OK; +} + +// +// Stores the address of the boot primary in pGpu +// Returns NV_OK on success NV_ERR_GENERIC otherwise. +// +NV_STATUS +gpumgrGetBootPrimary(OBJGPU **ppGpu) +{ + NvU32 gpuCount, gpuMask, idx1; + OBJGPU *pGpu = NULL; + + // Find boot primary + idx1 = 0; + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + while ((pGpu = gpumgrGetNextGpu(gpuMask, &idx1)) != NULL) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_PRIMARY_DEVICE)) + break; + } + *ppGpu = pGpu; + + // No boot primary + if (pGpu == NULL) + { + return NV_ERR_GENERIC; + } + + return NV_OK; +} + +// +// Returns the mGpu +// +OBJGPU *gpumgrGetMGpu (void) +{ + OBJGPU *pGpu; + NvU32 gpuCount, gpuMask, gpuIndex = 0; + // Parse through all the GPUs + + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex))) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_HYBRID_MGPU)) + { + break; + } + } + + return pGpu; +} + +// +// Get PhysFbAddr for the given GPU which may be different if +// the GPU is broadcast or chipset broadcast are enabled or not: +// - BC GPU + no CL BC -> returns gpu address +// - UC GPU -> returns GPU address +// - BC GPU + CL BC -> returns broadcast address +// +RmPhysAddr gpumgrGetGpuPhysFbAddr(OBJGPU *pGpu) +{ + RmPhysAddr physFbAddr; + + physFbAddr = pGpu->busInfo.gpuPhysFbAddr; + + NV_ASSERT(physFbAddr); + return physFbAddr; +} + + +// +// Get GPU object from subdevice instance +// +POBJGPU +gpumgrGetGpuFromSubDeviceInst(NvU32 deviceInst, NvU32 subDeviceInst) +{ + OBJGPU *pGpu = NULL; + OBJGPUGRP *pGpuGrp = NULL; + NvU32 gpuInst = 0; + NvU32 gpuMask; + + pGpuGrp = gpumgrGetGpuGrpFromInstance(deviceInst); + NV_ASSERT_OR_RETURN(pGpuGrp != NULL, NULL); + + gpuMask = gpugrpGetGpuMask(pGpuGrp); + + // check for single GPU case + if (gpumgrGetSubDeviceCount(gpuMask) == 1) + return gpumgrGetNextGpu(gpuMask, &gpuInst); + + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInst)) != NULL) + { + if (gpumgrGetSubDeviceInstanceFromGpu(pGpu) == subDeviceInst) + { + break; + } + } + + NV_ASSERT(pGpu); + + return pGpu; +} + +/*! + * @brief sets the device instance pGpu->deviceInstance for the GPUs indicated by the gpu mask + * + * Only remove the device instance if it is the last GPU to be removed. + * + * At RM initialization we fill in the software feature values for this GPU. + * The values are determined from the software feature database + * + * @param[in] gpuMask NvU32 value + * + * @return NV_OK or NV_ERR_OBJECT_NOT_FOUND if no GPU has been found + * + */ +NV_STATUS +gpumgrAddDeviceInstanceToGpus(NvU32 gpuMask) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_STATUS rmStatus = NV_ERR_OBJECT_NOT_FOUND; + OBJGPU *pGpu = NULL; + NvU32 i, gpuIndex = 0; + OBJGPUGRP *pGpuGrp = NULL; + + // Add the device instance to the GPU objects in the mask + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex))) + { + rmStatus = NV_ERR_OBJECT_NOT_FOUND; + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->pGpuGrpTable); i++) + { + pGpuGrp = pGpuMgr->pGpuGrpTable[i]; + // if it contains the specified gpu... + if ((pGpuGrp != NULL) && + (gpugrpGetGpuMask(pGpuGrp) & NVBIT(pGpu->gpuInstance))) + { + pGpu->deviceInstance = i; + rmStatus = NV_OK; + break; + } + } + NV_ASSERT_OK_OR_RETURN(rmStatus); + } + + return rmStatus; +} + +/*! + * @brief Retrieves the OBJGPUGRP pointer given the instance + * + * @param[in] gpugrpInstance GPUGRP instance + * + * @return GPUGRP pointer on success, NULL on error + * + */ +POBJGPUGRP +gpumgrGetGpuGrpFromInstance +( + NvU32 gpugrpInstance +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_ASSERT_OR_RETURN(gpugrpInstance < NV_MAX_DEVICES, NULL); + return pGpuMgr->pGpuGrpTable[gpugrpInstance]; +} + +/*! + * @brief Retrieves the OBJGPUGRP pointer given the GPU pointer. + * + * @param[in] pGpu GPU object pointer + * + * @return OBJGPUGRP pointer on success, NULL on error + * + */ +POBJGPUGRP +gpumgrGetGpuGrpFromGpu +( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 gpugrpInstance = gpuGetDeviceInstance(pGpu); + NV_ASSERT_OR_RETURN(gpugrpInstance < NV_MAX_DEVICES, NULL); + + return pGpuMgr->pGpuGrpTable[gpugrpInstance]; +} + +/*! + * @brief Constructs the GPUGRP object for the given instance + * + * @param[in] pGpu GPU object pointer + * @param[in] gpuMask GpuMask corresponding to this GPUGRP + * @param[out] ppGpuGrp Newly created gpugrp object pointer + * + * @return NV_OK on success, appropriate error on failure. + * + */ +NV_STATUS +gpumgrConstructGpuGrpObject +( + OBJGPUMGR *pGpuMgr, + NvU32 gpuMask, + OBJGPUGRP **ppGpuGrp +) +{ + NV_STATUS status; + + status = objCreate(ppGpuGrp, pGpuMgr, OBJGPUGRP); + if (NV_OK != status) + { + return status; + } + + status = gpugrpCreate(*ppGpuGrp, gpuMask); + if (NV_OK != status) + { + return status; + } + + return NV_OK; +} + +/*! + * @brief Enter/exit "drain" state on a given GPU + * + * @param[in] gpuId Platform specific GPU Id + * @param[in] bEnable NV_TRUE: enter, NV_FALSE: exit + * @param[in] bRemove Ask the OS to forget the GPU, once quiescent + * @param[in] bLinkDisable Shut down the upstream PCIe link after the removal. + * This is done in user-land, we just check that the + * GPU is in the right state. + * + * @return NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpumgrModifyGpuDrainState + +( + NvU32 gpuId, + NvBool bEnable, + NvBool bRemove, + NvBool bLinkDisable +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu; + NvBool bAttached; + NvBool bStateChange = NV_FALSE; + NvU32 i; + NvU32 domain = 0; + NvU8 bus = 0; + NvU8 device = 0; + + if (bRemove && !osRemoveGpuSupported()) + { + return NV_ERR_NOT_SUPPORTED; + } + + bAttached = ((pGpu = gpumgrGetGpuFromId(gpuId)) != NULL); + + if (bEnable && bLinkDisable && bAttached) + { + return NV_ERR_IN_USE; + } + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); ++i) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + bStateChange = pGpuMgr->probedGpus[i].bDrainState != bEnable; + pGpuMgr->probedGpus[i].bDrainState = bEnable; + pGpuMgr->probedGpus[i].bRemoveIdle = bEnable && bRemove; + domain = gpuDecodeDomain(pGpuMgr->probedGpus[i].gpuDomainBusDevice); + bus = gpuDecodeBus(pGpuMgr->probedGpus[i].gpuDomainBusDevice); + device = gpuDecodeDevice(pGpuMgr->probedGpus[i].gpuDomainBusDevice); + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + if (i == NV_MAX_DEVICES) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // If the initial drain state (characterized by enabling draining without + // setting the remove flag) is already enabled, multiple clients may be + // trying to simultaneously manage drain state. Only return success for the + // first to allow them to filter out the others. + // + if (bEnable && !bRemove && !bStateChange) + { + return NV_ERR_IN_USE; + } + + if (bEnable && bRemove && !bAttached) + { + osRemoveGpu(domain, bus, device); + } + + return NV_OK; +} + +/*! + * @brief Query "drain"/remove state on a given GPU + * + * @param[in] gpuId Platform specific GPU Id + * @param[out] pBEnable Drain state ptr + * @param[out] pBRemove Remove flag ptr + * + * @return NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpumgrQueryGpuDrainState + +( + NvU32 gpuId, + NvBool *pBEnable, + NvBool *pBRemove +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); ++i) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + if (pBEnable != NULL) + { + *pBEnable = pGpuMgr->probedGpus[i].bDrainState; + } + + if (pBRemove != NULL) + { + *pBRemove = pGpuMgr->probedGpus[i].bRemoveIdle; + } + + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + // + // This used to return a generic NV_ERR_INVALID_ARGUMENT on error, but we + // want to be more specific as at least nvml wants to be able to tell this + // case apart from other errors. This case is expected when GPUs are + // removed from the driver (e.g. through unbind on Linux) after a client + // queries for the probed GPUs, but before getting the PCI info for all of + // them. + // + return (i == NV_MAX_DEVICES) ? NV_ERR_OBJECT_NOT_FOUND : NV_OK; +} + +/*! +* @brief Retrieves the group gpuMask that contains this gpuInstance. +* Used for locking all gpus under the same device together +* +* @param[in] gpuInstance: unique Index per GPU +* +* @return gpuMask: mask of all GPU that are in the same group +* +*/ +NvU32 +gpumgrGetGrpMaskFromGpuInst +( + NvU32 gpuInst +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + NV_ASSERT_OR_RETURN(gpuInst < NV_MAX_DEVICES, 0); + + return pGpuMgr->gpuInstMaskTable[gpuInst]; +} + +/*! +* @brief Updates per GPU isnstance table to contain correct group mask +* +* @param[in] gpuMask: mask of all GPUs that are in the same group +* +*/ +void +gpumgrAddDeviceMaskToGpuInstTable +( + NvU32 gpuMask +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + int gpuInst; + NvU32 tempGpuMask = gpuMask; + + for (gpuInst = 0; (tempGpuMask != 0) && (gpuInst < NV_MAX_DEVICES); gpuInst++) + { + if (NVBIT(gpuInst) & gpuMask) + pGpuMgr->gpuInstMaskTable[gpuInst] = gpuMask; + + tempGpuMask &= ~NVBIT(gpuInst); + } +} + +/*! +* @brief Clear group mask from per GPU isnstance table (when group is destroyed) +* +* @param[in] gpuMask: gpu group mask being teared down +* +*/ +void +gpumgrClearDeviceMaskFromGpuInstTable +( + NvU32 gpuMask +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + int gpuInst; + NvU32 tempGpuMask = gpuMask; + + for (gpuInst = 0; (tempGpuMask != 0) && (gpuInst < NV_MAX_DEVICES); gpuInst++) + { + if (NVBIT(gpuInst) & gpuMask) + pGpuMgr->gpuInstMaskTable[gpuInst] = 0; + + tempGpuMask &= ~NVBIT(gpuInst); + } +} + +/*! +* @brief Add an nvlink topo entry by a GPU's PCI DomainBusDevice if not already present +* +* @param[in] DomainBusDevice: the PCI DomainBusDevice for the gpu to be registered +* +*/ +void +gpumgrAddSystemNvlinkTopo_IMPL +( + NvU64 DomainBusDevice +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + if (gpumgrGetSystemNvlinkTopo(DomainBusDevice, NULL)) + { + // This gpu is already registered + return; + } + + for (i = 0; i < NV_MAX_DEVICES; i++) + { + // add new gpu entry into the first slot available + if (!pGpuMgr->nvlinkTopologyInfo[i].valid) + { + pGpuMgr->nvlinkTopologyInfo[i].valid = NV_TRUE; + pGpuMgr->nvlinkTopologyInfo[i].DomainBusDevice = DomainBusDevice; + return; + } + } +} + +/*! +* @brief Finds nvlinkTopologyInfo entry info based on DomainBusDevice +* +* @param[in] DomainBusDevice: the PCI DomainBusDevice for the gpu to be registered +* @param[out] pTopoParams: topology params found for the specified GPU +* +* @returns NV_TRUE if entry found +* NV_FALSE otherwise +*/ +NvBool +gpumgrGetSystemNvlinkTopo_IMPL +( + NvU64 DomainBusDevice, + PNVLINK_TOPOLOGY_PARAMS pTopoParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i, idx; + + for (i = 0; i < NV_MAX_DEVICES; i++) + { + // + // Choose the correct GPU by comparing PCI BusDomainDevice + // This ensures we are using the same GPU across gpu load/unload + // + if ((pGpuMgr->nvlinkTopologyInfo[i].valid) && (pGpuMgr->nvlinkTopologyInfo[i].DomainBusDevice == DomainBusDevice)) + { + if (pTopoParams) + { + pTopoParams->sysmemLinks = pGpuMgr->nvlinkTopologyInfo[i].params.sysmemLinks; + pTopoParams->maxLinksPerPeer = pGpuMgr->nvlinkTopologyInfo[i].params.maxLinksPerPeer; + pTopoParams->bSymmetric = pGpuMgr->nvlinkTopologyInfo[i].params.bSymmetric; + // Pascal Only + pTopoParams->numLinks = pGpuMgr->nvlinkTopologyInfo[i].params.numLinks; + // Volta+ + pTopoParams->numPeers = pGpuMgr->nvlinkTopologyInfo[i].params.numPeers; + pTopoParams->bSwitchConfig = pGpuMgr->nvlinkTopologyInfo[i].params.bSwitchConfig; + // Ampere+ + for (idx = 0; idx < NV_CE_PCE2LCE_CONFIG__SIZE_1_MAX; idx++) + { + pTopoParams->maxPceLceMap[idx] = pGpuMgr->nvlinkTopologyInfo[i].params.maxPceLceMap[idx]; + } + for (idx = 0; idx < NV_CE_GRCE_CONFIG__SIZE_1; idx++) + { + pTopoParams->maxGrceConfig[idx] = pGpuMgr->nvlinkTopologyInfo[i].params.maxGrceConfig[idx]; + } + pTopoParams->maxExposeCeMask = pGpuMgr->nvlinkTopologyInfo[i].params.maxExposeCeMask; + pTopoParams->maxTopoIdx = pGpuMgr->nvlinkTopologyInfo[i].params.maxTopoIdx; + for (idx = 0; idx < NV_CE_MAX_HSHUBS; idx++) + { + pTopoParams->pceAvailableMaskPerHshub[idx] = pGpuMgr->nvlinkTopologyInfo[i].params.pceAvailableMaskPerHshub[idx]; + } + } + return NV_TRUE; + } + } + + return NV_FALSE; +} + +/*! +* @brief Update the nvlinkTopologyInfo entry +* +* @param[in] DomainBusDevice: the PCI DomainBusDevice for the gpu to be registered +* @param[in] pTopoParams: topology params to update the cache with +* +*/ +void +gpumgrUpdateSystemNvlinkTopo_IMPL +( + NvU64 DomainBusDevice, + PNVLINK_TOPOLOGY_PARAMS pTopoParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i, idx; + + for (i = 0; i < NV_MAX_DEVICES; i++) + { + // + // Choose the correct GPU by comparing PCI BusDomainDevice + // This ensures we are using the same GPU across gpu load/unload + // + if ((pGpuMgr->nvlinkTopologyInfo[i].valid) && (pGpuMgr->nvlinkTopologyInfo[i].DomainBusDevice == DomainBusDevice)) + { + pGpuMgr->nvlinkTopologyInfo[i].params.sysmemLinks = pTopoParams->sysmemLinks; + pGpuMgr->nvlinkTopologyInfo[i].params.maxLinksPerPeer = pTopoParams->maxLinksPerPeer; + pGpuMgr->nvlinkTopologyInfo[i].params.numLinks = pTopoParams->numLinks; // Pascal only + pGpuMgr->nvlinkTopologyInfo[i].params.numPeers = pTopoParams->numPeers; + pGpuMgr->nvlinkTopologyInfo[i].params.bSymmetric = pTopoParams->bSymmetric; + pGpuMgr->nvlinkTopologyInfo[i].params.bSwitchConfig = pTopoParams->bSwitchConfig; + // Ampere + only + for (idx = 0; idx < NV_CE_PCE2LCE_CONFIG__SIZE_1_MAX; idx++) + { + pGpuMgr->nvlinkTopologyInfo[i].params.maxPceLceMap[idx] = pTopoParams->maxPceLceMap[idx]; + } + for (idx = 0; idx < NV_CE_GRCE_CONFIG__SIZE_1; idx++) + { + pGpuMgr->nvlinkTopologyInfo[i].params.maxGrceConfig[idx] = pTopoParams->maxGrceConfig[idx]; + } + pGpuMgr->nvlinkTopologyInfo[i].params.maxExposeCeMask = pTopoParams->maxExposeCeMask; + pGpuMgr->nvlinkTopologyInfo[i].params.maxTopoIdx = pTopoParams->maxTopoIdx; + for (idx = 0; idx < NV_CE_MAX_HSHUBS; idx++) + { + pGpuMgr->nvlinkTopologyInfo[i].params.pceAvailableMaskPerHshub[idx] = pTopoParams->pceAvailableMaskPerHshub[idx]; + } + return; + } + } +} + +/*! +* @brief Check if GPUs are indirect peers +* +* @param[in] pGpu +* @param[in] pRemoteGpu +* @returns NV_TRUE if GPUs are indirect peers +* +*/ +NvBool +gpumgrCheckIndirectPeer_IMPL +( + OBJGPU *pGpu, + OBJGPU *pRemoteGpu +) +{ +#if defined(NVCPU_PPC64LE) + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + if (pKernelNvlink == NULL) + { + return NV_FALSE; + } + + // If it's the same GPU or GPUs belonging to same SLI group, return early + if ((pGpu == pRemoteGpu) || + (pGpu->deviceInstance == pRemoteGpu->deviceInstance)) + { + return NV_FALSE; + } + + // If we are not on P9 + NVLINK2 systems then we don't support indirect peers + if ((GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)->coherentCpuFbBase == 0) || + (GPU_GET_KERNEL_MEMORY_SYSTEM(pRemoteGpu)->coherentCpuFbBase == 0)) + { + return NV_FALSE; + } + + return !(knvlinkGetNumLinksToPeer(pGpu, pKernelNvlink, pRemoteGpu)); +#else + return NV_FALSE; +#endif +} + +/*! + * @brief Set NVLinks (mask) for which initialization is disabled. + * + * @param[in] gpuId Platform specific GPU Id. + * @param[in] mask Mask representing the links to be disabled. + * + * @return NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpumgrSetGpuInitDisabledNvlinks_IMPL +( + NvU32 gpuId, + NvU32 mask, + NvBool bSkipHwNvlinkDisable +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_STATUS status = NV_ERR_INVALID_DEVICE; + NvU32 i; + + if (gpumgrGetGpuFromId(gpuId) != NULL) + { + return NV_ERR_IN_USE; + } + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); ++i) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + // Mask will be validated during Nvlink construct. + pGpuMgr->probedGpus[i].initDisabledNvlinksMask = mask; + pGpuMgr->probedGpus[i].bSkipHwNvlinkDisable = bSkipHwNvlinkDisable; + status = NV_OK; + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + return status; +} + +/*! + * @brief Get NVLinks (mask) for which initialization is disabled. + * + * @param[in] gpuId Platform specific GPU Id. + * @param[out] mask Mask representing the links to be disabled. + * + * @return NV_OK on success, appropriate error on failure. + */ +NV_STATUS +gpumgrGetGpuInitDisabledNvlinks_IMPL +( + NvU32 gpuId, + NvU32 *pMask, + NvBool *pbSkipHwNvlinkDisable +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NV_STATUS status = NV_ERR_INVALID_DEVICE; + NvU32 i; + + portSyncMutexAcquire(pGpuMgr->probedGpusLock); + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->probedGpus); ++i) + { + if (pGpuMgr->probedGpus[i].gpuId == gpuId) + { + *pMask = pGpuMgr->probedGpus[i].initDisabledNvlinksMask; + *pbSkipHwNvlinkDisable = pGpuMgr->probedGpus[i].bSkipHwNvlinkDisable; + status = NV_OK; + break; + } + } + + portSyncMutexRelease(pGpuMgr->probedGpusLock); + + return status; +} + +/*! + * @brief Adds an entry in the system partition topology save for the given GPU + * ID. Note that this does not create any saved partition topology. + * + * @param[in] DomainBusDevice: the PCI DomainBusDevice for the gpu to be registered + */ +void +gpumgrAddSystemMIGInstanceTopo_IMPL +( + NvU64 domainBusDevice +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + if (gpumgrGetSystemMIGInstanceTopo(domainBusDevice, NULL)) + { + // This gpu is already registered + return; + } + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->MIGTopologyInfo); i++) + { + // add new gpu entry into the first slot available + if (!pGpuMgr->MIGTopologyInfo[i].bValid) + { + pGpuMgr->MIGTopologyInfo[i].bValid = NV_TRUE; + pGpuMgr->MIGTopologyInfo[i].domainBusDevice = domainBusDevice; + break; + } + } + + // Shouldn't be possible to not find an open slot + NV_ASSERT(i < NV_ARRAY_ELEMENTS(pGpuMgr->MIGTopologyInfo)); +} + +/*! + * @brief Retrieves the entry in the system partition topology save for the given GPU + * ID. Note that retrieval of this entry does not constitute a guarantee + * that there is any valid data saved. + * + * @param[in] DomainBusDevice: the PCI DomainBusDevice for the gpu to be registered + * @param[out] ppTopology: Stores the saved MIG topology for the given GPU, if + * found. + * + * @returns NV_TRUE if entry found + * NV_FALSE otherwise + */ +NvBool +gpumgrGetSystemMIGInstanceTopo_IMPL +( + NvU64 domainBusDevice, + GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY **ppTopology +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGpuMgr->MIGTopologyInfo); i++) + { + // + // Choose the correct GPU by comparing PCI BusDomainDevice + // This ensures we are using the same GPU across gpu load/unload + // + if (!pGpuMgr->MIGTopologyInfo[i].bValid || + (pGpuMgr->MIGTopologyInfo[i].domainBusDevice != domainBusDevice)) + { + continue; + } + + if (NULL != ppTopology) + *ppTopology = &pGpuMgr->MIGTopologyInfo[i]; + + return NV_TRUE; + } + + return NV_FALSE; +} + +static void +_gpumgrUnregisterRmCapsForMIGCI(GPUMGR_SAVE_GPU_INSTANCE *pGPUInstanceSave) +{ + GPUMGR_SAVE_COMPUTE_INSTANCE *pComputeInstanceSave; + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS(pGPUInstanceSave->saveCI); i++) + { + pComputeInstanceSave = &pGPUInstanceSave->saveCI[i]; + + if (pComputeInstanceSave->bValid) + { + osRmCapUnregister(&pComputeInstanceSave->pOsRmCaps); + + // + // Mark invalid as the partition caps have been unregistered from RM + // completely. + // + pComputeInstanceSave->bValid = NV_FALSE; + } + } +} + +void +gpumgrUnregisterRmCapsForMIGGI_IMPL(NvU64 gpuDomainBusDevice) +{ + GPUMGR_SAVE_MIG_INSTANCE_TOPOLOGY *pTopologySave; + GPUMGR_SAVE_GPU_INSTANCE *pGPUInstanceSave; + NvU32 i; + + if (!gpumgrGetSystemMIGInstanceTopo(gpuDomainBusDevice, &pTopologySave)) + { + return; + } + + for (i = 0; i < NV_ARRAY_ELEMENTS(pTopologySave->saveGI); i++) + { + pGPUInstanceSave = &pTopologySave->saveGI[i]; + + if (pGPUInstanceSave->bValid) + { + _gpumgrUnregisterRmCapsForMIGCI(pGPUInstanceSave); + + osRmCapUnregister(&pGPUInstanceSave->pOsRmCaps); + + // + // Mark invalid as the partition caps have been unregistered from RM + // completely. + // + pGPUInstanceSave->bValid = NV_FALSE; + } + } +} + +/** + * @brief Saves a pointer to the current GPU instance in thread local storage, + * to be logged by NVLOG, until gpumgrSetGpuRelease is called. + * Returns a pointer to tls entry (to be passed to gpumgrSetGpuRelease) + * + * @param[in] pGpu + */ +NvBool +gpumgrSetGpuAcquire(OBJGPU *pGpu) +{ + NvU32 **ppGpuInstance; + ppGpuInstance = (NvU32 **)tlsEntryAcquire + (TLS_ENTRY_ID_CURRENT_GPU_INSTANCE); + if (ppGpuInstance) + { + *ppGpuInstance = &(pGpu->gpuInstance); + return NV_TRUE; + } + return NV_FALSE; +} + +/** + * @brief Releases the thread local storage for GPU ID. + */ +void +gpumgrSetGpuRelease(void) +{ + tlsEntryRelease(TLS_ENTRY_ID_CURRENT_GPU_INSTANCE); +} + +/** +* @brief Returns the type of bridge SLI_BT_* +*/ +NvU8 +gpumgrGetGpuBridgeType(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + + return pGpuMgr->gpuBridgeType; +} diff --git a/src/nvidia/src/kernel/mem_mgr/console_mem.c b/src/nvidia/src/kernel/mem_mgr/console_mem.c new file mode 100644 index 000000000..59c424ebb --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/console_mem.c @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/console_mem.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "rmapi/client.h" +#include "virtualization/hypervisor/hypervisor.h" + +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER + +NV_STATUS +conmemConstruct_IMPL +( + ConsoleMemory *pConsoleMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + NvHandle hClient = pCallContext->pClient->hClient; + Memory *pMemory = staticCast(pConsoleMemory, Memory); + OBJGPU *pGpu = pMemory->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + MEMORY_DESCRIPTOR *pMemDesc = memmgrGetReservedConsoleMemDesc(pGpu, pMemoryManager); + RS_PRIV_LEVEL privLevel = pCallContext->secInfo.privLevel; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pParams)) + return NV_OK; + + if (!(rmclientIsAdminByHandle(hClient, privLevel) || hypervisorCheckForObjectAccess(hClient))) + { + return NV_ERR_INVALID_CLASS; + } + + if (pMemDesc == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ASSERT(pMemDesc->Allocated == 0); + memdescAddRef(pMemDesc); + pMemDesc->DupCount++; + + // + // NV01_MEMORY_FRAMEBUFFER_CONSOLE is just a way to get at the reserved + // framebuffer console memDesc rather than allocating a new one. Otherwise, + // it's treated as normal memory. + // + status = memConstructCommon(pMemory, NV01_MEMORY_LOCAL_USER, 0, pMemDesc, + 0, NULL, 0, 0, 0, 0, NVOS32_MEM_TAG_NONE, + (HWRESOURCE_INFO *)NULL); + if (status != NV_OK) + { + memdescDestroy(pMemDesc); + } + return status; +} + +NvBool +conmemCanCopy_IMPL +( + ConsoleMemory *pConsoleMemory +) +{ + return NV_TRUE; +} diff --git a/src/nvidia/src/kernel/mem_mgr/ctx_buf_pool.c b/src/nvidia/src/kernel/mem_mgr/ctx_buf_pool.c new file mode 100644 index 000000000..85cb7dc93 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/ctx_buf_pool.c @@ -0,0 +1,724 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file ctx_buf_pool.c + * @brief This file defines interfaces that act as wrappers around the RM memory + * pool interfaces. These interfaces are used for creating RM memory pools for + * RM internal allocations like global (engine-specific) and local (context-specific) + * context buffers. While client page tables are also RM internal allocations + * and use RM memory pools, they DO NOT use interfaces defined in this file. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "mem_mgr/ctx_buf_pool.h" +#include "class/cl90f1.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "vgpu/vgpu_events.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/bus/kern_bus.h" +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/gr/kernel_graphics.h" +#include "gpu/mem_mgr/heap.h" + +/* + * @brief Are memory pools supported for context buffers + * + * @param[in] pGpu OBJGPU pointer + * + * @return NvBool + */ +NvBool +ctxBufPoolIsSupported +( + OBJGPU *pGpu +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvBool bCallingContextPlugin; + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_MOVE_CTX_BUFFERS_TO_PMA)) + { + NV_PRINTF(LEVEL_INFO, "Ctx buffers not supported in PMA\n"); + return NV_FALSE; + } + + if (!memmgrIsPmaInitialized(pMemoryManager)) + { + NV_PRINTF(LEVEL_INFO, "PMA is disabled. Ctx buffers will be allocated in RM reserved heap\n"); + return NV_FALSE; + } + + if (IS_VIRTUAL(pGpu) || RMCFG_FEATURE_PLATFORM_GSP) + { + NV_PRINTF(LEVEL_INFO, "Guest RM/GSP don't support ctx buffers in PMA\n"); + return NV_FALSE; + } + + // + // In virtualized env, host RM we will continue to use subheap for all allocations it makes on behalf + // of guest RM. Ctx buffer allocations made by host RM for plugins(plugin channel inst block, runlists etc) + // will come from host RM's PMA(partition PMA) + // + NV_ASSERT_OR_RETURN(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin) == NV_OK, NV_FALSE); + if (hypervisorIsVgxHyper() && !bCallingContextPlugin) + { + NV_PRINTF(LEVEL_INFO, "ctx buffers in PMA not supported for allocations host RM makes on behalf of guest\n"); + return NV_FALSE; + } + + NV_PRINTF(LEVEL_INFO, "Ctx buffer pool enabled. Ctx buffers will be allocated from PMA\n"); + return NV_TRUE; +} + +/* + * @brief Initializes all context buffer pools for a VA space + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pHeap Pointer to Heap object to whose PMA this pool is tied + * @param[out] ppCtxBufPool Pointer to context buffer pool + * + * @return NV_STATUS + */ +NV_STATUS +ctxBufPoolInit +( + OBJGPU *pGpu, + Heap *pHeap, + CTX_BUF_POOL_INFO **ppCtxBufPool +) +{ + NV_STATUS status = NV_OK; + CTX_BUF_POOL_INFO *pCtxBufPool = NULL; + NvU32 i, poolConfig; + + NV_ASSERT_OR_RETURN(ppCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT); + + if (!ctxBufPoolIsSupported(pGpu)) + { + return NV_OK; + } + + pCtxBufPool = portMemAllocNonPaged(sizeof(CTX_BUF_POOL_INFO)); + NV_ASSERT_OR_RETURN((pCtxBufPool != NULL), NV_ERR_NO_MEMORY); + portMemSet(pCtxBufPool, 0, sizeof(CTX_BUF_POOL_INFO)); + + // + // create a mem pool for each page size supported by RM + // pool corresponding to RM_ATTR_PAGE_SIZE_DEFAULT remains unused + // + for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++) + { + switch (i) + { + case RM_ATTR_PAGE_SIZE_DEFAULT: + case RM_ATTR_PAGE_SIZE_4KB: + poolConfig = POOL_CONFIG_CTXBUF_4K; + break; + case RM_ATTR_PAGE_SIZE_BIG: + poolConfig = POOL_CONFIG_CTXBUF_64K; + break; + case RM_ATTR_PAGE_SIZE_HUGE: + poolConfig = POOL_CONFIG_CTXBUF_2M; + break; + case RM_ATTR_PAGE_SIZE_512MB: + poolConfig = POOL_CONFIG_CTXBUF_512M; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unsupported page size attr %d\n", i); + return NV_ERR_INVALID_STATE; + } + NV_ASSERT_OK_OR_GOTO(status, rmMemPoolSetup((void*)&pHeap->pmaObject, &pCtxBufPool->pMemPool[i], poolConfig), cleanup); + } + NV_PRINTF(LEVEL_INFO, "Ctx buf pool successfully initialized\n"); + +cleanup: + if (status != NV_OK) + { + if (pCtxBufPool != NULL) + { + ctxBufPoolDestroy(&pCtxBufPool); + } + } + *ppCtxBufPool = pCtxBufPool; + return status; +} + +/* + * @brief Destroys all context buffer pools for a VA space + * + * @param[in] ppCtxBufPool Pointer to context buffer pool + * + * @return + */ +void +ctxBufPoolDestroy +( + CTX_BUF_POOL_INFO **ppCtxBufPool +) +{ + NvU32 i; + CTX_BUF_POOL_INFO *pCtxBufPool; + NV_ASSERT((ppCtxBufPool != NULL) && (*ppCtxBufPool != NULL)); + if ((ppCtxBufPool == NULL) || (*ppCtxBufPool == NULL)) + { + NV_PRINTF(LEVEL_ERROR, "Ctx buf pool doesn't exist\n"); + return; + } + + pCtxBufPool = *ppCtxBufPool; + + for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++) + { + if (pCtxBufPool->pMemPool[i] != NULL) + { + rmMemPoolDestroy(pCtxBufPool->pMemPool[i]); + pCtxBufPool->pMemPool[i] = NULL; + } + } + portMemFree(pCtxBufPool); + *ppCtxBufPool = NULL; + NV_PRINTF(LEVEL_INFO, "Ctx buf pool destroyed\n"); +} + +/* + * @brief Calculates total amount of memory required for all buffers in each pool and reserves the memory + * + * Q. Why do we need a separate function to calculate memory when we already know size of all buffers? + * A. Memory required to allocate a buffer depends on 3 things: size, page size and alignment. + * context buffers don't have alignment requirements so alignment directly depends on page size. + * page size is determined based on the size of buffer and RM_ATTR_PAGE_SIZE parameter. + * Once we get the page size, we can align the size of buffer accordingly and also route it to correct pool. + * Each buffer has different size and attr and so will have different page size and will accordingly go to different pools. + * Today, alignment is determined at alloc time(inside heapAlloc) and usually page size in map call. + * We use the same algorithm (memmgrDeterminePageSize) below to determine alignment and page size for each buffer. + * + * Q. Why do we need a list of context buffers? + * A. Reserving memory from PMA requires us to drop GPU lock. To determine + * page size we need GPU lock since we are accessing some global state here. + * So we first calculate memory requirements for each pool based on which buffers + * will eventually end up in those pools. + * Later we drop GPU lock and reserve memory for each pool. + * This avoids acquiring and dropping locks for each buffer and also avoids making a call to PMA + * for each buffer. + * + * @param[in] pCtxBufPool Pointer to context buffer pool + * @param[in] pBufInfoList List of context buffers to reserve memory for + * @param[in] bufcount number of buffers to reserve memory for + * + * @return NV_STATUS + */ +NV_STATUS +ctxBufPoolReserve +( + OBJGPU *pGpu, + CTX_BUF_POOL_INFO *pCtxBufPool, + CTX_BUF_INFO *pBufInfoList, + NvU32 bufCount +) +{ + NV_STATUS status = NV_OK; + NvU32 pageSize, i; + NvU64 totalSize[RM_ATTR_PAGE_SIZE_INVALID] = {0}; + NvU64 size; + + NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBufInfoList != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(bufCount > 0, NV_ERR_INVALID_ARGUMENT); + + for (i = 0; i < bufCount; i++) + { + size = pBufInfoList[i].size; + + // update size and pageSize based on buffer alignment requirement and buffer size + NV_ASSERT_OK_OR_RETURN(ctxBufPoolGetSizeAndPageSize(pCtxBufPool, pGpu, + pBufInfoList[i].align, pBufInfoList[i].attr, pBufInfoList[i].bContig, &size, &pageSize)); + + // + // Determine the pool(4K/64K/2M) from where this buffer will eventually + // get allocated and mark that pool to reserve this memory. + // + switch(pageSize) + { + case RM_PAGE_SIZE: + totalSize[RM_ATTR_PAGE_SIZE_4KB] += size; + break; + case RM_PAGE_SIZE_64K: + case RM_PAGE_SIZE_128K: + totalSize[RM_ATTR_PAGE_SIZE_BIG] += size; + break; + case RM_PAGE_SIZE_HUGE: + totalSize[RM_ATTR_PAGE_SIZE_HUGE] += size; + break; + case RM_PAGE_SIZE_512M: + totalSize[RM_ATTR_PAGE_SIZE_512MB] += size; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unrecognized/unsupported page size = 0x%x\n", pageSize); + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + NV_PRINTF(LEVEL_INFO, "Reserving 0x%llx bytes for buf Id = 0x%x in pool with page size = 0x%x\n", size, i, pageSize); + } + + for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++) + { + if (totalSize[i] > 0) + { + NV_ASSERT_OK_OR_GOTO(status, rmMemPoolReserve(pCtxBufPool->pMemPool[i], totalSize[i], 0), done); + NV_PRINTF(LEVEL_INFO, "Reserved 0x%llx bytes in pool with RM_ATTR_PAGE_SIZE_* = 0x%x\n", totalSize[i], i); + } + } + +done: + if (status != NV_OK) + { + NV_ASSERT_OK(ctxBufPoolTrim(pCtxBufPool)); + NV_PRINTF(LEVEL_ERROR, "Failed to reserve memory. trimming all pools\n"); + } + return status; + +} + +/* + * @brief Trims out additional memory from context buffer pools + * + * @param[in] pCtxBufPool Pointer to context buffer pool + * + * @return NV_STATUS + */ +NV_STATUS +ctxBufPoolTrim +( + CTX_BUF_POOL_INFO *pCtxBufPool +) +{ + NvU32 i; + NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT); + + for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++) + { + rmMemPoolTrim(pCtxBufPool->pMemPool[i], 0, 0); + NV_PRINTF(LEVEL_INFO, "Trimmed pool with RM_ATTR_PAGE_SIZE_* = 0x%x\n", i); + } + return NV_OK; +} + +/* + * @brief Releases all memory from context buffer pools + * + * If there are pending allocations in any of the pools then + * this function just returns early. + * + * @param[in] pCtxBufPool Pointer to context buffer pool + * + * @return + */ +void +ctxBufPoolRelease +( + CTX_BUF_POOL_INFO *pCtxBufPool +) +{ + NvU32 i; + NV_ASSERT(pCtxBufPool != NULL); + + for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++) + { + rmMemPoolRelease(pCtxBufPool->pMemPool[i], 0); + } +} + +/* + * @brief Allocates memory from context buffer pools + * + * @param[in] pCtxBufPool Pointer to context buffer pool + * @param[in] pMemDesc Pointer to context buffer memory descriptor + * + * @return NV_STATUS + */ +NV_STATUS +ctxBufPoolAllocate +( + CTX_BUF_POOL_INFO *pCtxBufPool, + PMEMORY_DESCRIPTOR pMemDesc +) +{ + RM_POOL_ALLOC_MEM_RESERVE_INFO *pPool = NULL; + NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + + NV_ADDRESS_SPACE addrSpace = memdescGetAddressSpace(pMemDesc); + if (addrSpace != ADDR_FBMEM) + { + NV_PRINTF(LEVEL_ERROR, "ctx buf pool is only used for buffers to be allocated in FB\n" + "SYSMEM buffers don't need memory to be pre-reserved in pool\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // If page size is not set, then set it based on actual size of memdesc and its alignment + NvU32 pageSize = memdescGetPageSize(pMemDesc, AT_GPU); + if ((pageSize == 0) || (memdescGetContiguity(pMemDesc, AT_GPU))) + { + NvU32 newPageSize; + NV_ASSERT_OK_OR_RETURN(ctxBufPoolGetSizeAndPageSize(pCtxBufPool, pMemDesc->pGpu, + pMemDesc->Alignment, RM_ATTR_PAGE_SIZE_DEFAULT, memdescGetContiguity(pMemDesc, AT_GPU), + &pMemDesc->ActualSize, &newPageSize)); + + // + // Update the page size in the memdesc only if it isn't set already. + // This is ok as we get new page size only if no page size was set or if the buffer is contiguous or both. + // For physically contig buffers, PA address in the memdesc remains the same irrespective of page size. + // For such buffers, if pageSize was already set then we don't want to change it as it can change the way + // buffers are mapped by RM vs HW(this is specifically applicable to main GR ctx buffer) + // + if (pageSize == 0) + { + memdescSetPageSize(pMemDesc, AT_GPU, newPageSize); + NV_PRINTF(LEVEL_INFO, "Ctx buffer page size set to 0x%x\n", newPageSize); + } + pageSize = newPageSize; + } + + // Determine the pool(4K/64K/2M) from where this buffer is to be allocated + switch(pageSize) + { + case RM_PAGE_SIZE: + pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_4KB]; + break; + case RM_PAGE_SIZE_64K: + case RM_PAGE_SIZE_128K: + pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_BIG]; + break; + case RM_PAGE_SIZE_HUGE: + pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_HUGE]; + break; + case RM_PAGE_SIZE_512M: + pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_512MB]; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unsupported page size = 0x%x set for context buffer\n", pageSize); + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + NV_ASSERT_OK_OR_RETURN(rmMemPoolAllocate(pPool, (RM_POOL_ALLOC_MEMDESC*)pMemDesc)); + NV_PRINTF(LEVEL_INFO, "Buffer allocated from ctx buf pool with page size = 0x%x\n", pageSize); + return NV_OK; +} + +/* + * @brief Frees memory from context buffer pools + * + * @param[in] pCtxBufPool Pointer to context buffer pool + * @param[in] pMemDesc Pointer to context buffer memory descriptor + * + * @return NV_STATUS + */ +NV_STATUS +ctxBufPoolFree +( + CTX_BUF_POOL_INFO *pCtxBufPool, + PMEMORY_DESCRIPTOR pMemDesc +) +{ + RM_POOL_ALLOC_MEM_RESERVE_INFO *pPool = NULL; + NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + + NvU32 pageSize = memdescGetPageSize(pMemDesc, AT_GPU); + + // + // If buffer is contiguous, then it may or may not be allocated from the same pool + // as its page size. (see ctxBufPoolAllocate) + // In such case, determine the size of buffer as done during allocation to route the + // free to correct pool. + // + if (memdescGetContiguity(pMemDesc, AT_GPU)) + { + NvU64 size = pMemDesc->ActualSize; + NV_ASSERT_OK_OR_RETURN(ctxBufPoolGetSizeAndPageSize(pCtxBufPool, pMemDesc->pGpu, + pMemDesc->Alignment, RM_ATTR_PAGE_SIZE_DEFAULT, NV_TRUE, &size, &pageSize)); + } + + switch(pageSize) + { + case RM_PAGE_SIZE: + pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_4KB]; + break; + case RM_PAGE_SIZE_64K: + case RM_PAGE_SIZE_128K: + pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_BIG]; + break; + case RM_PAGE_SIZE_HUGE: + pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_HUGE]; + break; + case RM_PAGE_SIZE_512M: + pPool = pCtxBufPool->pMemPool[RM_ATTR_PAGE_SIZE_512MB]; + break; + default: + NV_PRINTF(LEVEL_ERROR, "Unsupported page size detected for context buffer\n"); + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + + // If scrubber is being skipped by PMA we need to manually scrub this memory + if (rmMemPoolIsScrubSkipped(pPool)) + { + OBJGPU *pGpu = pMemDesc->pGpu; + NvU8 *pMem = kbusMapRmAperture_HAL(pGpu, pMemDesc); + if (pMem == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Failed to BAR2 map memdesc. memory won't be scrubbed\n"); + NV_ASSERT(pMem != NULL); + } + else + { + portMemSet(pMem, 0, pMemDesc->ActualSize); + kbusUnmapRmAperture_HAL(pGpu, pMemDesc, &pMem, NV_TRUE); + } + } + rmMemPoolFree(pPool, (RM_POOL_ALLOC_MEMDESC*)pMemDesc, 0); + + NV_PRINTF(LEVEL_INFO, "Buffer freed from ctx buf pool with page size = 0x%x\n", pageSize); + return NV_OK; +} + +/* + * @brief Returns memory pool for global buffers like runlists, GR global buffers etc. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] bufId Id to identify the buffer + * @param[in] engineType NV2080 engine type + * @param[out] ppCtxBufPool Pointer to context buffer pool + * + * @return NV_STATUS + */ +NV_STATUS +ctxBufPoolGetGlobalPool +( + OBJGPU *pGpu, + CTX_BUF_ID bufId, + NvU32 engineType, + CTX_BUF_POOL_INFO **ppCtxBufPool +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + CTX_BUF_POOL_INFO *pCtxBufPool = NULL; + + NV_ASSERT_OR_RETURN(ppCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV2080_ENGINE_TYPE_IS_VALID(engineType), NV_ERR_INVALID_ARGUMENT); + + switch (bufId) + { + case CTX_BUF_ID_RUNLIST: + pCtxBufPool = kfifoGetRunlistBufPool(pGpu, pKernelFifo, engineType); + break; + case CTX_BUF_ID_GR_GLOBAL: + { + KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, NV2080_ENGINE_TYPE_GR_IDX(engineType)); + NV_ASSERT_OR_RETURN(NV2080_ENGINE_TYPE_IS_GR(engineType), NV_ERR_INVALID_ARGUMENT); + pCtxBufPool = kgraphicsGetCtxBufPool(pGpu, pKernelGraphics); + break; + } + default: + NV_PRINTF(LEVEL_ERROR, "Invalid buf Id = 0x%x requested\n", bufId); + return NV_ERR_INVALID_ARGUMENT; + } + *ppCtxBufPool = pCtxBufPool; + return NV_OK; +} + +/* + * @brief Get updated buffer size and page size for a context buffer + * + * @param[in] pGpu OBJGPU pointer + * @param[in] alignment Expected buffer alignment + * @param[in] attr Page size attribute for buffer + * @param[in] bContig Is buffer physically contiguouss + * @param[in/out] pSize Size of buffer + * @param[out] pPageSize Page size for buffer + * + * @return NV_STATUS + */ +NV_STATUS +ctxBufPoolGetSizeAndPageSize +( + CTX_BUF_POOL_INFO *pCtxBufPool, + OBJGPU *pGpu, + NvU64 alignment, + RM_ATTR_PAGE_SIZE attr, + NvBool bContig, + NvU64 *pSize, + NvU32 *pPageSize +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvU32 pageSize = 0; + NvU32 allocFlags = 0; + NvU32 retAttr = 0; + NvU32 retAttr2 = 0; + NvU64 size = 0; + + NV_ASSERT_OR_RETURN(pSize != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pPageSize != NULL, NV_ERR_INVALID_ARGUMENT); + + size = *pSize; + retAttr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, retAttr); + + switch (attr) + { + case RM_ATTR_PAGE_SIZE_DEFAULT: + retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT, retAttr); + break; + case RM_ATTR_PAGE_SIZE_4KB: + retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, retAttr); + break; + case RM_ATTR_PAGE_SIZE_BIG: + retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _BIG, retAttr); + break; + case RM_ATTR_PAGE_SIZE_HUGE: + retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, retAttr); + retAttr2 = FLD_SET_DRF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _2MB, retAttr); + break; + case RM_ATTR_PAGE_SIZE_512MB: + retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, retAttr); + retAttr2 = FLD_SET_DRF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _512MB, retAttr); + break; + default: + NV_PRINTF(LEVEL_ERROR, "unsupported page size attr\n"); + return NV_ERR_NOT_SUPPORTED; + } + + // Update the size of buffer based on requested alignment + { + NvU32 tempAttr = 0; + NvU64 tempAlign = alignment; + + if (attr == RM_ATTR_PAGE_SIZE_DEFAULT) + tempAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _4KB, retAttr); + else + tempAttr = retAttr; + + status = memmgrAllocDetermineAlignment_HAL(pGpu, pMemoryManager, &size, &tempAlign, 0, + allocFlags, tempAttr, retAttr2, 0); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Couldn't determine buffer alignment\n"); + DBG_BREAKPOINT(); + return status; + } + } + + // + // If buffer needs to be allocated contiguously then we need to route it to a pool + // whose chunk size >= buffer size + // + if (bContig) + { + NvU64 chunkSize = 0; + NvU32 i; + for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++) + { + NV_ASSERT_OK_OR_RETURN(rmMemPoolGetChunkAndPageSize(pCtxBufPool->pMemPool[i], &chunkSize, &pageSize)); + if (chunkSize >= size) + { + size = chunkSize; + break; + } + } + if (i == RM_ATTR_PAGE_SIZE_INVALID) + { + NV_PRINTF(LEVEL_ERROR, "couldn't find pool with chunksize >= 0x%llx\n", size); + DBG_BREAKPOINT(); + status = NV_ERR_NO_MEMORY; + return status; + } + } + else + { + // Determine page size based on updated buffer size + pageSize = memmgrDeterminePageSize(pMemoryManager, 0, size, NVOS32_ATTR_FORMAT_PITCH, + allocFlags, &retAttr, &retAttr2); + } + + // make sure we get a valid page size and alignment is taken care of + if ((pageSize == 0) || ((NvU64)pageSize < alignment)) + { + NV_PRINTF(LEVEL_ERROR, "Incorrect page size determination\n"); + DBG_BREAKPOINT(); + status = NV_ERR_INVALID_STATE; + return status; + } + + // Align up buffer size based on page size + size = NV_ALIGN_UP64(size, pageSize); + + *pPageSize = pageSize; + *pSize = size; + NV_PRINTF(LEVEL_INFO, "Buffer updated size = 0x%llx with page size = 0x%x\n", size, pageSize); + return status; +} + +/* + * @brief Is scrubbing skipped for allocations in this ctx buf pool + * + * @param[in] pCtxBufPool Pointer to context buffer pool + * + * @return NvBool + */ +NvBool +ctxBufPoolIsScrubSkipped +( + CTX_BUF_POOL_INFO *pCtxBufPool +) +{ + NvU32 i; + NV_ASSERT_OR_RETURN(pCtxBufPool != NULL, NV_ERR_INVALID_ARGUMENT); + for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++) + { + if (!rmMemPoolIsScrubSkipped(pCtxBufPool->pMemPool[i])) + return NV_FALSE; + } + + return NV_TRUE; +} + +/* + * @brief Set ctx buf pool to skip scrub for all its allocations + * + * @param[in] pCtxBufPool Pointer to context buffer pool + * @param[in] bSkipScrub Should scrubbing be skipped + * + */ +void +ctxBufPoolSetScrubSkip +( + CTX_BUF_POOL_INFO *pCtxBufPool, + NvBool bSkipScrub +) +{ + NvU32 i; + NV_ASSERT_OR_RETURN_VOID(pCtxBufPool != NULL); + for (i = 0; i < RM_ATTR_PAGE_SIZE_INVALID; i++) + { + rmMemPoolSkipScrub(pCtxBufPool->pMemPool[i], bSkipScrub); + } +} diff --git a/src/nvidia/src/kernel/mem_mgr/fabric_vaspace.c b/src/nvidia/src/kernel/mem_mgr/fabric_vaspace.c new file mode 100644 index 000000000..945117345 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/fabric_vaspace.c @@ -0,0 +1,786 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Rotuines ***************************\ +* * +* Fabric Virtual Address Space Function Definitions. * +* * +\***************************************************************************/ + +#include "gpu/mmu/kern_gmmu.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/fabric_vaspace.h" +#include "mem_mgr/gpu_vaspace.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/bus/kern_bus.h" +#include "kernel/gpu/fifo/kernel_fifo.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "mmu/mmu_walk.h" +#include "lib/base_utils.h" +#include "class/cl90f1.h" // FERMI_VASPACE_A +#include "class/cl00fc.h" // FABRIC_VASPACE_A +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER +#include "class/cl0080.h" // NV01_DEVICE_0 +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "deprecated/rmapi_deprecated.h" +#include "rmapi/rs_utils.h" +#include "vgpu/vgpu_events.h" +#include "mem_mgr/virt_mem_mgr.h" + + + +// +// TODO: To be removed when legacy FLA VAS (pKernelBus->flaInfo.pFlaVAS) is removed" +// The instance block is setup during kbusAllocateFlaVaspace_HAL(). However, we +// lazily bind it to the new fabric VAS when the very first NV_FABRIC_MEMORY +// allocations happens. +// +static NV_STATUS +_fabricvaspaceBindInstBlk +( + FABRIC_VASPACE *pFabricVAS +) +{ + OBJVASPACE *pVAS = staticCast(pFabricVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NV_STATUS status = NV_OK; + + INST_BLK_INIT_PARAMS instblkParams; + + if (gvaspaceIsInUse(dynamicCast(pKernelBus->flaInfo.pFlaVAS, OBJGVASPACE))) + { + NV_PRINTF(LEVEL_ERROR, + "FabricVAS and FlaVAS cannot be used simultaneously! " + "Instance block setup for fabricVAS failed\n"); + return NV_ERR_INVALID_OPERATION; + } + + // + // Check if this is the first fabric vaspace allocation. If this is not the + // first allocation, instance block is already setup. Return NV_OK. + // + if (gvaspaceIsInUse(dynamicCast(pFabricVAS->pGVAS, OBJGVASPACE))) + { + return NV_OK; + } + + // Unbind the instance block for FLA vaspace. + status = kbusSetupUnbindFla_HAL(pGpu, pKernelBus); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to unbind instance block for FlaVAS, status=0x%x\n", + status); + return status; + } + + // Instantiate the instance block for fabric vaspace. + portMemSet(&instblkParams, 0, sizeof(instblkParams)); + status = kgmmuInstBlkInit(pKernelGmmu, pKernelBus->flaInfo.pInstblkMemDesc, + pFabricVAS->pGVAS, FIFO_PDB_IDX_BASE, + &instblkParams); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to setup instance block for fabricVAS, status=0x%x\n", + status); + goto failed; + } + + // Bind the instance block for fabric vaspace. + status = kbusSetupBindFla_HAL(pGpu, pKernelBus, pFabricVAS->gfid); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to bind instance block for fabricVAS, status=0x%x\n", + status); + goto failed; + } + + return NV_OK; + +failed: + // Instantiate the instance block for FLA vaspace. + portMemSet(&instblkParams, 0, sizeof(instblkParams)); + NV_ASSERT(kgmmuInstBlkInit(pKernelGmmu, pKernelBus->flaInfo.pInstblkMemDesc, + pKernelBus->flaInfo.pFlaVAS, FIFO_PDB_IDX_BASE, + &instblkParams) == NV_OK); + + // Bind the instance block for FLA vaspace. + NV_ASSERT(kbusSetupBindFla_HAL(pGpu, pKernelBus, pFabricVAS->gfid) == NV_OK); + + return status; +} + +// +// TODO: To be removed when legacy FLA VAS (pKernelBus->flaInfo.pFlaVAS) is removed" +// The instance block is unbind during kbusDestroyFla_HAL(). However, we unbind +// it here and bind back the instance block for the legacy FLA VAS after the +// last NV_FABRIC_MEMORY allocation is freed. +// +static void +_fabricvaspaceUnbindInstBlk +( + FABRIC_VASPACE *pFabricVAS +) +{ + OBJVASPACE *pVAS = staticCast(pFabricVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + INST_BLK_INIT_PARAMS instblkParams = {0}; + + // + // Check if there are any pending allocations for the fabric vaspace. + // If there are pending allocations, skip restore and return NV_OK. + // + if (gvaspaceIsInUse(dynamicCast(pFabricVAS->pGVAS, OBJGVASPACE))) + { + return; + } + + // Unbind the instance block for fabric vaspace. + NV_ASSERT(kbusSetupUnbindFla_HAL(pGpu, pKernelBus) == NV_OK); + + // Instantiate the instance block for FLA vaspace. + NV_ASSERT(kgmmuInstBlkInit(pKernelGmmu, pKernelBus->flaInfo.pInstblkMemDesc, + pKernelBus->flaInfo.pFlaVAS, FIFO_PDB_IDX_BASE, + &instblkParams) == NV_OK); + + // Bind the instance block for FLA vaspace. + NV_ASSERT(kbusSetupBindFla_HAL(pGpu, pKernelBus, pFabricVAS->gfid) == NV_OK); +} + +NV_STATUS +fabricvaspaceConstruct__IMPL +( + FABRIC_VASPACE *pFabricVAS, + NvU32 classId, + NvU32 vaspaceId, + NvU64 vaStart, + NvU64 vaLimit, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 flags +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + OBJVASPACE *pVAS = staticCast(pFabricVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + NV_STATUS status = NV_OK; + NvHandle hClient = 0; + NvHandle hDevice = 0; + NV0080_ALLOC_PARAMETERS devAllocParams = { 0 }; + NvU32 gfid = 0; + + // Sanity check input parameters. + NV_ASSERT_OR_RETURN(FABRIC_VASPACE_A == classId, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(vaStart <= vaLimit, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(ONEBITSET(pVAS->gpuMask), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(vaspaceId == pGpu->gpuId, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + status = pRmApi->AllocWithHandle(pRmApi, NV01_NULL_OBJECT, + NV01_NULL_OBJECT, NV01_NULL_OBJECT, + NV01_ROOT, &hClient); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed creating client, status=0x%x\n", status); + return status; + } + + status = serverutilGenResourceHandle(hClient, &hDevice); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed creating device handle, status=0x%x\n", status); + goto cleanup; + } + + // Allocate a device handle + devAllocParams.deviceId = gpuGetDeviceInstance(pGpu); + status = pRmApi->AllocWithHandle(pRmApi, hClient, hClient, hDevice, + NV01_DEVICE_0, &devAllocParams); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed creating device, status=0x%x\n", status); + goto cleanup; + } + + // Save off flags. + pFabricVAS->flags = (flags | + VASPACE_FLAGS_ALLOW_ZERO_ADDRESS | + VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB | + VASPACE_FLAGS_DISABLE_SPLIT_VAS); + + if (IS_GFID_VF(gfid)) + { + pFabricVAS->gfid = gfid; + pFabricVAS->flags |= VASPACE_FLAGS_ALLOW_PAGES_IN_PHYS_MEM_SUBALLOCATOR; + } + + pFabricVAS->bRpcAlloc = IS_VIRTUAL(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu); + + // Create the GVASPACE object associated with this fabric vaspace. + status = vmmCreateVaspace(pVmm, FERMI_VASPACE_A, 0, pVAS->gpuMask, + vaStart, vaLimit, 0, 0, NULL, pFabricVAS->flags, + &pFabricVAS->pGVAS); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed allocating gvaspace associated with the fabric vaspace, " + "status=0x%x\n", status); + goto cleanup; + } + + pFabricVAS->hClient = hClient; + pFabricVAS->hDevice = hDevice; + + // Capture the vasStart and vasLimit for the fabric vaspace. + pVAS->vasStart = pFabricVAS->pGVAS->vasStart; + pVAS->vasLimit = pFabricVAS->pGVAS->vasLimit; + + return NV_OK; + +cleanup: + NV_ASSERT(pRmApi->Free(pRmApi, hClient, hClient) == NV_OK); + + return status; +} + +void +fabricvaspaceDestruct_IMPL +( + FABRIC_VASPACE *pFabricVAS +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJVMM *pVmm = SYS_GET_VMM(pSys); + OBJVASPACE *pVAS = staticCast(pFabricVAS, OBJVASPACE); + + if (pFabricVAS->pGVAS == NULL) + return; + + NV_ASSERT(pRmApi->Free(pRmApi, pFabricVAS->hClient, + pFabricVAS->hClient) == NV_OK); + + // There should be no vaspace allocations pending at this point. + NV_ASSERT(!gvaspaceIsInUse(dynamicCast(pFabricVAS->pGVAS, OBJGVASPACE))); + + // Destroy the GVASPACE object associated with this fabric vaspace. + vmmDestroyVaspace(pVmm, pFabricVAS->pGVAS); + + pFabricVAS->pGVAS = NULL; + pVAS->vasStart = 0; + pVAS->vasLimit = 0; +} + +NV_STATUS +fabricvaspaceAlloc_IMPL +( + FABRIC_VASPACE *pFabricVAS, + NvU64 size, + NvU64 align, + NvU64 rangeLo, + NvU64 rangeHi, + NvU64 pageSize, + VAS_ALLOC_FLAGS flags, + NvU64 *pAddr +) +{ + NV_STATUS status = NV_OK; + + // Sanity check the input parameters. + NV_ASSERT_OR_RETURN(pFabricVAS->pGVAS != NULL, NV_ERR_OBJECT_NOT_FOUND); + NV_ASSERT_OR_RETURN(pAddr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pageSize >= RM_PAGE_SIZE_HUGE, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(align != 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size != 0, NV_ERR_INVALID_ARGUMENT); + + // Check the alignment and size are pageSize aligned + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED64(align, pageSize), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED64(size, pageSize), NV_ERR_INVALID_ARGUMENT); + + status = _fabricvaspaceBindInstBlk(pFabricVAS); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to bind instance block for fabric vaspace." + " Alloc failed\n"); + return status; + } + + // Adjust rangeLo and rangeHi + rangeLo = NV_ALIGN_DOWN(rangeLo, pageSize); + rangeHi = NV_ALIGN_UP(rangeHi, pageSize); + + // + // Allocate VA space of the size and alignment requested. + // RM_PAGE_SIZE_HUGE is passed since FLA->PA page size is 2MB. + // + status = vaspaceAlloc(pFabricVAS->pGVAS, size, align, rangeLo, rangeHi, + RM_PAGE_SIZE_HUGE, flags, pAddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate vaspace\n"); + goto failed; + } + + // Assert that the address returned is pageSize aligned + NV_ASSERT(NV_IS_ALIGNED64(*pAddr, pageSize)); + + return NV_OK; + +failed: + + _fabricvaspaceUnbindInstBlk(pFabricVAS); + + return status; +} + +NV_STATUS +fabricvaspaceAllocNonContiguous_IMPL +( + FABRIC_VASPACE *pFabricVAS, + NvU64 size, + NvU64 align, + NvU64 rangeLo, + NvU64 rangeHi, + NvU64 pageSize, + VAS_ALLOC_FLAGS flags, + NvU64 **ppAddr, + NvU32 *pNumAddr +) +{ + NV_STATUS status = NV_OK; + NvU64 freeSize = 0; + NvU32 pageCount = (size / pageSize); + NvU64 addr; + NvU32 idx; + NvBool bDefaultAllocMode; + + // Sanity check the input parameters. + NV_ASSERT_OR_RETURN(pFabricVAS->pGVAS != NULL, NV_ERR_OBJECT_NOT_FOUND); + NV_ASSERT_OR_RETURN(ppAddr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pNumAddr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pageSize >= RM_PAGE_SIZE_HUGE, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(align != 0, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size != 0, NV_ERR_INVALID_ARGUMENT); + + // Check the alignment and size are pageSize aligned. + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED64(align, pageSize), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED64(size, pageSize), NV_ERR_INVALID_ARGUMENT); + + // Check if heap can satisfy the request. + NV_ASSERT_OK_OR_RETURN(fabricvaspaceGetFreeHeap(pFabricVAS, &freeSize)); + if (freeSize < size) + { + NV_PRINTF(LEVEL_ERROR, + "Not enough memory in eheap, size requested = 0x%llx, " + "free memory = 0x%llx\n", + size, freeSize); + return NV_ERR_NO_MEMORY; + } + + if (flags.bForceNonContig && flags.bForceContig) + { + NV_PRINTF(LEVEL_ERROR, + "Forcing both contiguous and noncontiguous is not allowed\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + bDefaultAllocMode = (!flags.bForceNonContig && !flags.bForceContig); + + // Adjust rangeLo and rangeHi. + rangeLo = NV_ALIGN_DOWN(rangeLo, pageSize); + rangeHi = NV_ALIGN_UP(rangeHi, pageSize); + + *ppAddr = portMemAllocNonPaged(sizeof(NvU64) * pageCount); + if (*ppAddr == NULL) + { + return NV_ERR_NO_MEMORY; + } + portMemSet(*ppAddr, 0, sizeof(NvU64) * pageCount); + + status = _fabricvaspaceBindInstBlk(pFabricVAS); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to bind instance block for fabric vaspace." + " Alloc failed\n"); + goto failed; + } + + // Initialize number of addresses to 0 + *pNumAddr = 0; + + // + // Attempt to allocate VA space of the size and alignment requested. + // RM_PAGE_SIZE_HUGE is passed since FLA->PA page size is 2MB. + // + if (flags.bForceContig || bDefaultAllocMode) + { + status = vaspaceAlloc(pFabricVAS->pGVAS, size, align, rangeLo, rangeHi, + RM_PAGE_SIZE_HUGE, flags, &addr); + if (status == NV_OK) + { + (*ppAddr)[0] = addr; + *pNumAddr = 1; + } + } + + // + // If size could not be allocated in one memblock, break size into + // multiple pageSize chunks. RM_PAGE_SIZE_HUGE is passed since + // FLA->PA page size is 2MB. + // + if (flags.bForceNonContig || (bDefaultAllocMode && (status != NV_OK))) + { + for (idx = 0; idx < pageCount; idx++) + { + status = vaspaceAlloc(pFabricVAS->pGVAS, pageSize, align, rangeLo, + rangeHi, RM_PAGE_SIZE_HUGE, flags, &addr); + if (status == NV_OK) + { + // Assert that the address returned is pageSize aligned + NV_ASSERT(NV_IS_ALIGNED64(addr, pageSize)); + + (*ppAddr)[idx] = addr; + *pNumAddr = *pNumAddr + 1; + } + else + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate vaspace\n"); + goto failed; + } + } + } + + return NV_OK; + +failed: + + fabricvaspaceBatchFree(pFabricVAS, *ppAddr, *pNumAddr, 1); + portMemFree(*ppAddr); + *ppAddr = NULL; + *pNumAddr = 0; + + return status; +} + +NV_STATUS +fabricvaspaceFree_IMPL +( + FABRIC_VASPACE *pFabricVAS, + NvU64 vAddr +) +{ + OBJVASPACE *pVAS = staticCast(pFabricVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + NV_ASSERT_OR_RETURN(pFabricVAS->pGVAS != NULL, NV_ERR_OBJECT_NOT_FOUND); + + NV_ASSERT(vaspaceFree(pFabricVAS->pGVAS, vAddr) == NV_OK); + + kbusFlush_HAL(pGpu, pKernelBus, (BUS_FLUSH_VIDEO_MEMORY | + BUS_FLUSH_SYSTEM_MEMORY | + BUS_FLUSH_USE_PCIE_READ)); + + fabricvaspaceInvalidateTlb(pFabricVAS, pGpu, PTE_DOWNGRADE); + + _fabricvaspaceUnbindInstBlk(pFabricVAS); + + return NV_OK; +} + +NV_STATUS +fabricvaspaceMap_IMPL +( + FABRIC_VASPACE *pFabricVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi, + const MMU_MAP_TARGET *pTarget, + const VAS_MAP_FLAGS flags +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void +fabricvaspaceUnmap_IMPL +( + FABRIC_VASPACE *pFabricVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi +) +{ + return; +} + +NV_STATUS +fabricvaspaceApplyDefaultAlignment_IMPL +( + FABRIC_VASPACE *pFabricVAS, + const FB_ALLOC_INFO *pAllocInfo, + NvU64 *pAlign, + NvU64 *pSize, + NvU64 *pPageSizeLockMask +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +fabricvaspaceGetVasInfo_IMPL +( + FABRIC_VASPACE *pFabricVAS, + NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +fabricvaspacePinRootPageDir_IMPL +( + FABRIC_VASPACE *pFabricVAS, + OBJGPU *pGpu +) +{ + NV_ASSERT_OR_RETURN(pFabricVAS->pGVAS != NULL, NV_ERR_OBJECT_NOT_FOUND); + + return vaspacePinRootPageDir(pFabricVAS->pGVAS, pGpu); +} + +void +fabricvaspaceUnpinRootPageDir_IMPL +( + FABRIC_VASPACE *pFabricVAS, + OBJGPU *pGpu +) +{ + NV_ASSERT(pFabricVAS->pGVAS != NULL); + + vaspaceUnpinRootPageDir(pFabricVAS->pGVAS, pGpu); +} + +NV_STATUS +fabricvaspaceGetFreeHeap_IMPL +( + FABRIC_VASPACE *pFabricVAS, + NvU64 *freeSize +) +{ + NV_ASSERT_OR_RETURN(pFabricVAS->pGVAS != NULL, NV_ERR_OBJECT_NOT_FOUND); + NV_ASSERT_OR_RETURN(freeSize != NULL, NV_ERR_INVALID_ARGUMENT); + + return gvaspaceGetFreeHeap(dynamicCast(pFabricVAS->pGVAS, OBJGVASPACE), + freeSize); +} + +void +fabricvaspaceBatchFree_IMPL +( + FABRIC_VASPACE *pFabricVAS, + NvU64 *pAddr, + NvU32 numAddr, + NvU32 stride +) +{ + OBJVASPACE *pVAS = staticCast(pFabricVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + NvU32 count = 0; + NvU32 idx = 0; + + for (count = 0; count < numAddr; count++) + { + NV_ASSERT(vaspaceFree(pFabricVAS->pGVAS, pAddr[idx]) == NV_OK); + + idx += stride; + } + + kbusFlush_HAL(pGpu, pKernelBus, (BUS_FLUSH_VIDEO_MEMORY | + BUS_FLUSH_SYSTEM_MEMORY | + BUS_FLUSH_USE_PCIE_READ)); + + fabricvaspaceInvalidateTlb(pFabricVAS, pGpu, PTE_DOWNGRADE); + + _fabricvaspaceUnbindInstBlk(pFabricVAS); +} + +void +fabricvaspaceInvalidateTlb_IMPL +( + FABRIC_VASPACE *pFabricVAS, + OBJGPU *pGpu, + VAS_PTE_UPDATE_TYPE type +) +{ + vaspaceInvalidateTlb(pFabricVAS->pGVAS, pGpu, type); +} + +NV_STATUS +fabricvaspaceGetGpaMemdesc_IMPL +( + FABRIC_VASPACE *pFabricVAS, + MEMORY_DESCRIPTOR *pFabricMemdesc, + OBJGPU *pMappingGpu, + MEMORY_DESCRIPTOR **ppAdjustedMemdesc +) +{ + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pMappingGpu); + MEMORY_DESCRIPTOR *pRootMemDesc = NULL; + NODE *pNode = NULL; + NV_STATUS status = NV_OK; + NvU64 rootOffset = 0; + + NV_ASSERT_OR_RETURN(ppAdjustedMemdesc != NULL, NV_ERR_INVALID_ARGUMENT); + + if (memdescGetAddressSpace(pFabricMemdesc) != ADDR_FABRIC_V2 || + (pKernelNvlink != NULL && + knvlinkIsP2pLoopbackSupported(pMappingGpu, pKernelNvlink))) + { + *ppAdjustedMemdesc = pFabricMemdesc; + return NV_OK; + } + + pRootMemDesc = memdescGetRootMemDesc(pFabricMemdesc, &rootOffset); + + RmPhysAddr *pteArray = memdescGetPteArray(pRootMemDesc, AT_GPU); + + // Check if pteArray[0] is within the VAS range for the mapping GPU. + if ((pteArray[0] < vaspaceGetVaStart(staticCast(pFabricVAS, OBJVASPACE))) || + (pteArray[0] > vaspaceGetVaLimit(staticCast(pFabricVAS, OBJVASPACE)))) + { + *ppAdjustedMemdesc = pFabricMemdesc; + return NV_OK; + } + + // + // If the address space is of type ADDR_FABRIC_V2 then determine if the FLA import + // is on the mapping GPU. If FLA import is on the mapping GPU and NVLink P2P over + // loopback is not supported, then map GVA->PA directly. For discontiguous fabric + // memory allocation, searching for the first entry in the pteArray should be fine + // to determine if FLA import is on the mapping GPU. + // + NV_ASSERT_OK_OR_RETURN(btreeSearch(pteArray[0], &pNode, pFabricVAS->pFabricVaToGpaMap)); + + FABRIC_VA_TO_GPA_MAP_NODE *pFabricNode = (FABRIC_VA_TO_GPA_MAP_NODE *)pNode->Data; + + // + // Create a sub-memdesc for the offset into the vidMemDesc where the GVA would be + // mapped. Note this includes two offsets: + // 1. Offset into the fabric memdesc where the GVA is mapped. + // 2. Offset into the physical vidmem memdesc where the fabric memory is mapped. + // + status = memdescCreateSubMem(ppAdjustedMemdesc, pFabricNode->pVidMemDesc, pMappingGpu, + rootOffset + pFabricNode->offset, + memdescGetSize(pFabricMemdesc)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to create submMemdesc for the GVA->PA mapping\n"); + return status; + } + + return NV_OK; +} + +void +fabricvaspacePutGpaMemdesc_IMPL +( + FABRIC_VASPACE *pFabricVAS, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + memdescDestroy(pMemDesc); +} + +void +fabricvaspaceVaToGpaMapRemove_IMPL +( + FABRIC_VASPACE *pFabricVAS, + NvU64 vAddr +) +{ + FABRIC_VA_TO_GPA_MAP_NODE *pFabricNode = NULL; + NODE *pNode = NULL; + + if (btreeSearch(vAddr, &pNode, pFabricVAS->pFabricVaToGpaMap) == NV_OK) + { + pFabricNode = (FABRIC_VA_TO_GPA_MAP_NODE *)pNode->Data; + + btreeUnlink(&pFabricNode->Node, &pFabricVAS->pFabricVaToGpaMap); + + portMemFree(pFabricNode); + } +} + +NV_STATUS +fabricvaspaceVaToGpaMapInsert_IMPL +( + FABRIC_VASPACE *pFabricVAS, + NvU64 vAddr, + MEMORY_DESCRIPTOR *pVidMemDesc, + NvU64 offset +) +{ + FABRIC_VA_TO_GPA_MAP_NODE *pFabricNode = NULL; + NV_STATUS status = NV_OK; + + pFabricNode = portMemAllocNonPaged(sizeof(FABRIC_VA_TO_GPA_MAP_NODE)); + if (pFabricNode == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pFabricNode, 0, sizeof(FABRIC_VA_TO_GPA_MAP_NODE)); + + pFabricNode->pVidMemDesc = pVidMemDesc; + pFabricNode->offset = offset; + pFabricNode->Node.keyStart = vAddr; + pFabricNode->Node.keyEnd = vAddr; + pFabricNode->Node.Data = pFabricNode; + + // Insert into the btree tracking memory fabric allocations for this GPU. + status = btreeInsert(&pFabricNode->Node, &pFabricVAS->pFabricVaToGpaMap); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to insert addr 0x%llx into the memory fabric tree\n", + pFabricNode->Node.keyStart); + + portMemFree(pFabricNode); + return status; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/mem_mgr/fla_mem.c b/src/nvidia/src/kernel/mem_mgr/fla_mem.c new file mode 100644 index 000000000..16802a344 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/fla_mem.c @@ -0,0 +1,439 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "mem_mgr_internal.h" +#include "mem_mgr/fla_mem.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "rmapi/rmapi.h" +#include "rmapi/client.h" +#include "gpu/device/device.h" + +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER +#include "class/cl00f3.h" // NV01_MEMORY_FLA + +NV_STATUS +flamemConstruct_IMPL +( + FlaMemory *pFlaMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_FLA_MEMORY_ALLOCATION_PARAMS *pAllocParams; + Memory *pMemory = staticCast(pFlaMemory, Memory); + OBJGPU *pGpu = pMemory->pGpu; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + NvU32 idx = 0; + NV_ADDRESS_SPACE addressSpace = ADDR_FBMEM; + NvU32 Cache = 0; + NvBool bContig; + NvU32 hwResId = 0; + NvU32 format = 0; + NvU32 pageSize = 0; + NvU64 pageCount = 0; + NvU64 newBase = 0; + NvU64 flaBaseAddr = 0; + NvU64 flaSize = 0; + RsClient *pExportClient; + RsClient *pImportClient; + OBJGPU *pExportGpu = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + RsResourceRef *pExportRef = NULL; + NvBool bLoopback = NV_FALSE; + NvU32 gfid; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pParams)) + { + return flamemCopyConstruct_IMPL(pFlaMemory, pCallContext, pParams); + } + + pAllocParams = pParams->pAllocParams; + + flaBaseAddr = pAllocParams->base; + flaSize = pAllocParams->limit + 1; + + // + // NV01_MEMORY_FLA class is used by the clients to create memory objects for memory + // to be imported from other GPU(s)/FAM/Process. + // + + if (DRF_VAL(OS02, _FLAGS, _COHERENCY, pAllocParams->flagsOs02) == NVOS02_FLAGS_COHERENCY_UNCACHED) + Cache = NV_MEMORY_UNCACHED; + else if (DRF_VAL(OS02, _FLAGS, _COHERENCY, pAllocParams->flagsOs02) == NVOS02_FLAGS_COHERENCY_CACHED) + Cache = NV_MEMORY_CACHED; + else if (DRF_VAL(OS02, _FLAGS, _COHERENCY, pAllocParams->flagsOs02) == NVOS02_FLAGS_COHERENCY_WRITE_COMBINE) + Cache = NV_MEMORY_WRITECOMBINED; + else if (DRF_VAL(OS02, _FLAGS, _COHERENCY, pAllocParams->flagsOs02) == NVOS02_FLAGS_COHERENCY_WRITE_THROUGH) + Cache = NV_MEMORY_CACHED; + else if (DRF_VAL(OS02, _FLAGS, _COHERENCY, pAllocParams->flagsOs02) == NVOS02_FLAGS_COHERENCY_WRITE_PROTECT) + Cache = NV_MEMORY_CACHED; + else if (DRF_VAL(OS02, _FLAGS, _COHERENCY, pAllocParams->flagsOs02) == NVOS02_FLAGS_COHERENCY_WRITE_BACK) + Cache = NV_MEMORY_CACHED; + + // Must be of valid type, in FBMEM + if ((pAllocParams->type >= NVOS32_NUM_MEM_TYPES) || + (!FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, pAllocParams->attr)) || + (pAllocParams->flags & (NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED | + NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT | + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE | + NVOS32_ALLOC_FLAGS_BANK_FORCE | + NVOS32_ALLOC_FLAGS_VIRTUAL))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Retrieving the exporting device + status = serverGetClientUnderLock(&g_resServ, pAllocParams->hExportClient, &pExportClient); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR,"hExportClient: %x not valid, status: %x\n", pAllocParams->hExportClient, + status); + return status; + } + + status = serverGetClientUnderLock(&g_resServ, pParams->hClient, &pImportClient); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR,"hImportClient: %x not valid, status: %x\n", pParams->hClient, + status); + return status; + } + + status = gpuGetByHandle(pImportClient, pAllocParams->hExportSubdevice, NULL, &pExportGpu); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR,"hParent: %x not valid under client: %x, status: %x\n", pAllocParams->hExportSubdevice, + pParams->hClient, status); + return status; + } + + if (pExportGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Invalid Parent handle: %x in client: %x \n", + pAllocParams->hExportSubdevice, pParams->hClient); + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + pFlaMemory->peerDeviceInst = gpuGetDeviceInstance(pExportGpu); + pFlaMemory->peerGpuInst = gpuGetInstance(pExportGpu); + + bLoopback = (gpuGetDeviceInstance(pGpu) == pFlaMemory->peerDeviceInst); + + switch (DRF_VAL(OS32, _ATTR, _PAGE_SIZE, pAllocParams->attr)) + { + case NVOS32_ATTR_PAGE_SIZE_4KB: + pageSize = RM_PAGE_SIZE; + break; + case NVOS32_ATTR_PAGE_SIZE_BIG: + pageSize = RM_PAGE_SIZE_64K; + break; + case NVOS32_ATTR_PAGE_SIZE_HUGE: + if (DRF_VAL(OS32, _ATTR2, _PAGE_SIZE_HUGE, pAllocParams->attr2) == + NVOS32_ATTR2_PAGE_SIZE_HUGE_2MB) + pageSize = RM_PAGE_SIZE_HUGE; + else if ((DRF_VAL(OS32, _ATTR2, _PAGE_SIZE_HUGE, pAllocParams->attr2) == + NVOS32_ATTR2_PAGE_SIZE_HUGE_512MB) || + (DRF_VAL(OS32, _ATTR2, _PAGE_SIZE_HUGE, pAllocParams->attr2) == + NVOS32_ATTR2_PAGE_SIZE_HUGE_DEFAULT)) + { + pageSize = RM_PAGE_SIZE_512M; + } + else + { + NV_PRINTF(LEVEL_ERROR, "Unknown page size specified"); + return NV_ERR_INVALID_ARGUMENT; + } + break; + case NVOS32_ATTR_PAGE_SIZE_DEFAULT: + pageSize = RM_PAGE_SIZE_512M; + break; + } + + // AlignUp flaSize to pageSize + flaSize = NV_ALIGN_UP64(flaSize, pageSize); + + // if hExportHandle is passed, we need to bump the refcount as well as deduct the base and size + // from the handle + if (IS_GFID_PF(gfid) && pAllocParams->hExportHandle) + { + RMRES_MEM_INTER_MAP_PARAMS memInterMapParams; + MEMORY_DESCRIPTOR *pExportMemDesc; + + // Decoding the export handle + status = clientGetResourceRef(pExportClient, pAllocParams->hExportHandle, &pExportRef); + if (status != NV_OK) + return status; + + // Use virtual GetMemInterMapParams to get information needed for mapping from pExportRef->pResource + portMemSet(&memInterMapParams, 0, sizeof(memInterMapParams)); + + memInterMapParams.pGpu = pExportGpu; + memInterMapParams.pMemoryRef = pExportRef; + memInterMapParams.bSubdeviceHandleProvided = NV_TRUE; + + status = rmresGetMemInterMapParams(dynamicCast(pExportRef->pResource, RmResource), &memInterMapParams); + if (status != NV_OK) + return status; + + pExportMemDesc = memInterMapParams.pSrcMemDesc; + if (pExportMemDesc) + { + if (!(pExportMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS)) + { + NV_PRINTF(LEVEL_ERROR, "Exported handle is discontiguous, bailing out, export handle: %x gpu: %x\n", + pAllocParams->hExportHandle, pExportGpu->gpuInstance); + return NV_ERR_INVALID_ARGUMENT; + } + + flaBaseAddr = pExportMemDesc->_pteArray[0]; + flaSize = NV_ALIGN_UP64(memdescGetSize(pExportMemDesc), pageSize); + NV_PRINTF(LEVEL_INFO, "fla base addr: %llx,. size: %llx, gpu: %x\n", flaBaseAddr, flaSize, pGpu->gpuInstance); + } + else + { + NV_PRINTF(LEVEL_ERROR, "Invalid Export Handle: %x, export gpu: %x\n", pAllocParams->hExportHandle, pExportGpu->gpuInstance); + return NV_ERR_INVALID_ARGUMENT; + } + } + + if(!NV_IS_ALIGNED(flaBaseAddr, pageSize)) + { + NV_PRINTF(LEVEL_ERROR, + "flaBaseAsddr: 0x%llx is not aligned to page size: 0x%x\n", + flaBaseAddr, pageSize); + return NV_ERR_INVALID_ARGUMENT; + } + + if ((flaSize == 0) + || (flaSize < pageSize) + ) + { + NV_PRINTF(LEVEL_ERROR, "flaSize: %llx is either 0 or less than page size: %x, gpu: %x\n", + flaSize, pageSize, pGpu->gpuInstance); + return NV_ERR_INVALID_ARGUMENT; + } + + if ((status = memmgrGetFlaKind_HAL(pGpu, pMemoryManager, &format)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "FLA is not supported for this platform, only from ampere\n"); + return status; + } + + if (!bLoopback && pAllocParams->hExportHandle) + { + // + // TODO: This might break SMC assumption whenver we support FLA in SMC (currently not supported for Ampere) + // Duping the exported handle under device on the import side + // we need this to make sure that the export handle doesn't get reused after FM crash or app crash + // + // During Import, we need to increment the refCount on the exported FLA VA Handle, else exporting client can + // free the FLA VA Handle and can be reused by same/another client for a different memory leading to a security riskk. + // Currently, we are duping the exported handle under this hierarchy (importing client-> exporting device-> + // ..duped exported FLA VAS -> duped exported FLA Handle) + // + // Notes for scalabilty: Since this import call touches two clients, we should be locking both clients before calling + // into the constructor + // + RsClient *pImportClient; + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, pParams->hClient, &pImportClient)); + Device *pImportDevice; + NV_ASSERT_OK_OR_RETURN(deviceGetByGpu(pImportClient, pExportGpu, NV_TRUE, &pImportDevice)); + + status = pRmApi->DupObject(pRmApi, + pParams->hClient, + RES_GET_HANDLE(pImportDevice), + &pFlaMemory->hDupedExportMemory, + pAllocParams->hExportClient, + pAllocParams->hExportHandle, + 0 //flags + ); + if (status != NV_OK) + return status; + + } + bContig = FLD_TEST_DRF(OS02, _FLAGS, _PHYSICALITY, _CONTIGUOUS, pAllocParams->flagsOs02); + + pAllocParams->flags |= (NVOS32_ALLOC_FLAGS_MEMORY_HANDLE_PROVIDED | + NVOS32_ALLOC_FLAGS_IGNORE_BANK_PLACEMENT); + + status = memdescCreate(&pMemDesc, pGpu, flaSize, 0, bContig, addressSpace, + Cache, + MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE_FB_BC_ONLY(pGpu, addressSpace)); + if (status != NV_OK) + { + goto done_fbmem; + } + + // + // Now arrange the fla range based on the page size attributes + // TODO: what if the page size is more than the flaSize or + // flaSize not aligned for pageSize + // + newBase = flaBaseAddr; + idx = 0; + if (bContig) + { + memdescSetPte(pMemDesc, AT_GPU, idx, newBase); + } + else + { + pageCount = flaSize/pageSize; + // reformat the base to PFN and store it in a list + for ( ;idx < pageCount; idx++, newBase += pageSize) + { + memdescSetPte(pMemDesc, AT_GPU, idx, newBase); + } + } + memdescSetPageSize(pMemDesc, AT_GPU, pageSize); + + status = memConstructCommon(pMemory, + pCallContext->pResourceRef->externalClassId, // FLA_MEMORY + 0, // flags + pMemDesc, + 0, // heapOwner + NULL, + pAllocParams->attr, + pAllocParams->attr2, + 0, // pitch + pAllocParams->type, // type + NVOS32_MEM_TAG_NONE, + NULL); // pHwResource + if (status != NV_OK) + { + goto done_fbmem; + } + + memdescSetPteKind(pMemory->pMemDesc, format); + memdescSetHwResId(pMemory->pMemDesc, hwResId); + + NV_PRINTF(LEVEL_INFO, "FLA Memory imported into peerDeviceInst: %x, base: %llx., size: %llx, exported from: %x \n", + gpuGetDeviceInstance(pGpu), newBase, flaSize, pFlaMemory->peerDeviceInst); + + // Restrict RPC when guest is running in FULL SRIOV mode (split vas is enabled) + if (IS_VIRTUAL_WITH_SRIOV(pGpu) && gpuIsWarBug200577889SriovHeavyEnabled(pGpu)) + { + pAllocParams->base = newBase; + pAllocParams->limit = flaSize - 1; + NV_RM_RPC_ALLOC_OBJECT(pGpu, + pParams->hClient, + pParams->hParent, + pParams->hResource, + pParams->externalClassId, + pAllocParams, + status); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "vGPU-RPC to FLA Memory Construct failed with status: %x \n", + status); + goto delete_memory; + } + + pMemory->bRpcAlloc = NV_TRUE; + } + + return NV_OK; + +delete_memory: + memDestructCommon(pMemory); + +done_fbmem: + if (pMemDesc != NULL) + { + memdescDestroy(pMemDesc); + } + + // free the duped memory + if (!bLoopback && pAllocParams->hExportHandle) + { + pRmApi->Free(pRmApi, pParams->hClient, pFlaMemory->hDupedExportMemory); + } + + return status; +} + +NvBool +flamemCanCopy_IMPL +( + FlaMemory *pFlaMemory +) +{ + return NV_TRUE; +} + +NV_STATUS +flamemCopyConstruct_IMPL +( + FlaMemory *pFlaMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + FlaMemory *pSrcFlaMem; + + pSrcFlaMem = dynamicCast(pParams->pSrcRef->pResource, FlaMemory); + + pFlaMemory->peerDeviceInst = pSrcFlaMem->peerDeviceInst; + pFlaMemory->peerGpuInst = pSrcFlaMem->peerGpuInst; + pFlaMemory->hDupedExportMemory = NV01_NULL_OBJECT; + return NV_OK; +} + +void +flamemDestruct_IMPL +( + FlaMemory *pFlaMemory +) +{ + Memory *pMemory = staticCast(pFlaMemory, Memory); + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + RsClient *pClient = RES_GET_CLIENT(pMemory); + NvHandle hClient = pClient->hClient; + OBJGPU *pGpu = pMemory->pGpu; + + if (!pFlaMemory) + { + NV_PRINTF(LEVEL_ERROR, "Invalid Object in destruct, bailing out ..\n"); + return; + } + // + // Releasing the Duped export memory handle + // Note: RM dupes the export memory handle with the importing client handle + // both the export memory client and import memory client are external clients + // hClient here can be same/different than duped memory client (single process/multiprocess) + // This could cause issues when we move to per-client locking, need to revisit this at that point + // + if (pFlaMemory->peerDeviceInst != gpuGetDeviceInstance(pGpu) && + pFlaMemory->hDupedExportMemory != NV01_NULL_OBJECT) + pRmApi->Free(pRmApi, hClient, pFlaMemory->hDupedExportMemory); +} diff --git a/src/nvidia/src/kernel/mem_mgr/gpu_vaspace.c b/src/nvidia/src/kernel/mem_mgr/gpu_vaspace.c new file mode 100644 index 000000000..326da22ed --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/gpu_vaspace.c @@ -0,0 +1,5130 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Rotuines ***************************\ +* * +* GPU Virtual Address Space Function Definitions. * +* * +\***************************************************************************/ + +#include "gpu/mmu/kern_gmmu.h" +#include "mem_mgr/gpu_vaspace.h" +#include "mem_mgr/fabric_vaspace.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "os/os.h" +#include "containers/eheap_old.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/bus/kern_bus.h" +#include "mmu/mmu_walk.h" +#include "lib/base_utils.h" +#include "class/cl90f1.h" // FERMI_VASPACE_A +#include "ctrl/ctrl90f1.h" // FERMI_VASPACE_A +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/device/device.h" +#include "kernel/gpu/fifo/kernel_channel_group.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "gpu/subdevice/subdevice.h" +#include "core/locks.h" +#include "mem_mgr/pool_alloc.h" +#include "deprecated/rmapi_deprecated.h" +#include "rmapi/rs_utils.h" +#include "gpu/mem_mgr/vaspace_api.h" + + + +#define GMMU_PD1_VADDR_BIT_LO 29 + +static const NvU32 pageSizes[VAS_PAGESIZE_IDX_MAX] = { + RM_PAGE_SIZE, + RM_PAGE_SIZE_64K, + RM_PAGE_SIZE_HUGE, + RM_PAGE_SIZE_512M +}; + +static NV_STATUS +_gvaspaceGpuStateConstruct +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_GPU_STATE *pGpuState, + const NvU64 reqBigPageSize, + const NvU64 vaStart, + const NvU64 vaLimit, + const NvU64 vaStartInternal, + const NvU64 vaLimitInternal, + const NvU32 flags, + const NvBool bFirst, + NvU64 *pFullPdeCoverage, + NvU32 *pPartialPdeExpMax +); + +static void +_gvaspaceGpuStateDestruct +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_GPU_STATE *pGpuState +); + +static NV_STATUS +_gvaspaceReserveTopForGrowth +( + OBJGVASPACE *pGVAS +); + +static NV_STATUS +_gvaspaceReserveRange +( + OBJGVASPACE *pGVAS, + NvU64 rangeLo, + NvU64 rangeHi +); + +static NV_STATUS +_gvaspacePinLazyPageTables +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 va +); + +static NV_STATUS +_gvaspaceFreeVASBlock +( + OBJEHEAP *pHeap, + void *pEnv, + PEMEMBLOCK pMemBlock, + NvU32 *pContinue, + NvU32 *pInvalCursor +); + +static NV_STATUS +_gvaspaceMappingInsert +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_BLOCK *pVASBlock, + const NvU64 vaLo, + const NvU64 vaHi, + const VAS_MAP_FLAGS flags +); + +static NV_STATUS +_gvaspaceMappingRemove +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_BLOCK *pVASBlock, + const NvU64 vaLo, + const NvU64 vaHi +); + +static void +_gvaspaceAddPartialPtRange +( + OBJGVASPACE *pGVAS, + const NvU64 va +); + +static NV_STATUS +_gvaspaceSetExternalPageDirBase +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc +); + +static NV_STATUS +_gvaspaceReservePageTableEntries +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi, + const NvU64 pageSizeMask +); + +static NV_STATUS +_gvaspaceReleasePageTableEntries +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi, + const NvU64 pageSizeMask +); + +static NV_STATUS +_gvaspaceReleaseUnreservedPTEs +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi, + const MMU_FMT_LEVEL *pLevelFmt +); + +static NV_STATUS +_gvaspaceCopyServerRmReservedPdesToServerRm +( + NvHandle hClient, + NvHandle hVASpace, + OBJGPU *pGpu, + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *pPdeCopyParams +); + +static void +_gvaspaceForceFreePageLevelInstances +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_GPU_STATE *pGpuState +); + +static NV_STATUS +_gvaspaceBar1VaSpaceConstructFW +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + return NV_OK; + } + + status = gvaspacePinRootPageDir(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + return status; +} + +static NV_STATUS +_gvaspaceBar1VaSpaceConstructClient +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + MMU_WALK_USER_CTX userCtx = {0}; + + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + NV_ASSERT_OR_RETURN(NULL != userCtx.pGpuState, NV_ERR_INVALID_STATE); + + status = mmuWalkSparsify(userCtx.pGpuState->pWalk, vaspaceGetVaStart(pVAS), + vaspaceGetVaLimit(pVAS), NV_FALSE); + + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + } + + return status; +} + +static NV_STATUS +_gvaspaceBar1VaSpaceConstruct +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + + status = _gvaspaceBar1VaSpaceConstructFW(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + status = _gvaspaceBar1VaSpaceConstructClient(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + return status; +} + +static NV_STATUS +_gvaspaceReserveVaForServerRm +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + + // Reserve everything below vaStartServerRMOwned as non allocable by server RM. + if (pVAS->vasStart < pGVAS->vaStartServerRMOwned) + { + status = _gvaspaceReserveRange(pGVAS, pVAS->vasStart, + pGVAS->vaStartServerRMOwned - 1); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + + // Reserve everything above vaLimitServerRMOwned as non allocable by server RM. + if (pGVAS->vaLimitServerRMOwned < pGVAS->vaLimitInternal) + { + status = _gvaspaceReserveRange(pGVAS, pGVAS->vaLimitServerRMOwned + 1, + pGVAS->vaLimitInternal); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + + return status; +} + +static NV_STATUS +_gvaspaceReserveVaForClientRm +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + + // + // Client RM needs to hold the GPU lock for any GPU it wants to RPC to. + // We don't actually know which locks we potentially hold here, so use + // SAFE_LOCK_UPGRADE. + // + GPU_MASK gpuMask = pVAS->gpuMask; + status = rmGpuGroupLockAcquire(0, GPU_LOCK_GRP_MASK, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_MEM, &gpuMask); + + // If we get NOTHING_TO_DO, we already have the needed locks, so don't free them + if (status == NV_WARN_NOTHING_TO_DO) + gpuMask = 0; + else if (status != NV_OK) + return status; + + // + // Reserve everything in the range [vaStartServerRMOwned, vaLimitServerRMOwned] + // as non allocable by client RM. This range is reserved for server RM. + // + status = _gvaspaceReserveRange(pGVAS, pGVAS->vaStartServerRMOwned, + pGVAS->vaLimitServerRMOwned); + NV_ASSERT_OR_GOTO(status == NV_OK, done); + + if (pGVAS->flags & VASPACE_FLAGS_PTETABLE_PMA_MANAGED) + { + // Loop over each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (pMemoryManager->pPageLevelReserve == NULL) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_STATE; + break; + } + } + FOR_EACH_GPU_IN_MASK_UC_END + + NV_ASSERT_OR_GOTO(status == NV_OK, done); + } + + // Loop over each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + MMU_WALK_USER_CTX userCtx = {0}; + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + if (NULL == userCtx.pGpuState) + { + status = NV_ERR_INVALID_STATE; + break; + } + else + { + // + // We're pinning only till PD1 for now to conserve memory. We don't know + // how much memory will be eventually consumed by leaf page tables. + // + const MMU_FMT_LEVEL *pLevelFmt = + mmuFmtFindLevelWithPageShift(userCtx.pGpuState->pFmt->pRoot, GMMU_PD1_VADDR_BIT_LO); + status = mmuWalkReserveEntries(userCtx.pGpuState->pWalk, + pLevelFmt, + pGVAS->vaStartServerRMOwned, + pGVAS->vaLimitServerRMOwned, + NV_TRUE); + + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + + if (status != NV_OK) + { + break; + } + } + + status = gvaspaceCopyServerRmReservedPdesToServerRm(pGVAS, pGpu); + if (status != NV_OK) + { + break; + } + } + FOR_EACH_GPU_IN_MASK_UC_END + +done: + if (gpuMask != 0) + { + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + } + return status; +} + +static NV_STATUS +_gvaspaceReserveSplitVaSpace +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + NvBool bClientRm = NV_FALSE; + NvBool bServerRm = NV_FALSE; + NvU32 gfid; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + if (IS_VIRTUAL_WITH_SRIOV(pGpu) || IS_GSP_CLIENT(pGpu)) + { + bClientRm = NV_TRUE; + } + else if (IS_GFID_VF(gfid)) + { + bServerRm = NV_TRUE; + } + + if (bServerRm || bClientRm) + { + pGVAS->vaStartServerRMOwned = NV_MIN(pGVAS->vaLimitInternal - + SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE + 1, + SPLIT_VAS_SERVER_RM_MANAGED_VA_START); + pGVAS->vaLimitServerRMOwned = pGVAS->vaStartServerRMOwned + + SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE - 1; + + // Base and limit + 1 should be aligned to 512MB. + if (!NV_IS_ALIGNED(pGVAS->vaStartServerRMOwned, NVBIT64(GMMU_PD1_VADDR_BIT_LO))) + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + + if (!NV_IS_ALIGNED(pGVAS->vaLimitServerRMOwned + 1, NVBIT64(GMMU_PD1_VADDR_BIT_LO))) + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + + // Validate limit. + if (pGVAS->vaLimitServerRMOwned > pGVAS->vaLimitInternal) + { + NV_PRINTF(LEVEL_ERROR, "vaLimitServerRMOwned (0x%llx)" + "> vaLimitInternal (0x%llx)\n", + pGVAS->vaLimitServerRMOwned, pGVAS->vaLimitInternal); + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + + // + // If we are running inside server on behalf of a client, server RM can assign VA + // only inside the range [vaStartServerRMOwned, vaLimitServerRMOwned]. + // + if (bServerRm) + { + status = _gvaspaceReserveVaForServerRm(pGVAS, pGpu); + } + else if (bClientRm) + { + status = _gvaspaceReserveVaForClientRm(pGVAS, pGpu); + } + } + return status; +} + + +static NvU32 +_gvaspacePageSizeToIdx +( + NvU64 pageSize +) +{ + switch (pageSize) + { + case RM_PAGE_SIZE: + return VAS_PAGESIZE_IDX_4K; + case RM_PAGE_SIZE_64K: + case RM_PAGE_SIZE_128K: + return VAS_PAGESIZE_IDX_BIG; + case RM_PAGE_SIZE_HUGE: + return VAS_PAGESIZE_IDX_HUGE; + case RM_PAGE_SIZE_512M: + return VAS_PAGESIZE_IDX_512M; + default: + NV_PRINTF(LEVEL_ERROR, "Invalid pageSize=0x%llx!\n", pageSize); + NV_ASSERT(0); + return 0; + } +} + +NV_STATUS +gvaspaceConstruct__IMPL +( + OBJGVASPACE *pGVAS, + NvU32 classId, + NvU32 vaspaceId, + NvU64 vaStart, + NvU64 vaLimit, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 flags +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + NvU64 reqBigPageSize; + NV_STATUS status = NV_OK; + GVAS_GPU_STATE *pGpuState; + NvU32 highestBitIdx; + NvU64 fullPdeCoverage = 0; + NvU32 partialPdeExpMax = 0; + NvBool bFirst = NV_TRUE; + NvBool bCallingContextPlugin; + + NV_ASSERT_OR_RETURN(FERMI_VASPACE_A == classId, NV_ERR_INVALID_ARGUMENT); + + // Save off flags. + pGVAS->flags = flags; + + // Save off UVM mirroring flag. + if (flags & VASPACE_FLAGS_SET_MIRRORED) + { + NV_ASSERT_OR_RETURN(!(pGVAS->flags & VASPACE_FLAGS_BAR), NV_ERR_ILLEGAL_ACTION); + NV_ASSERT_OR_RETURN(!(pGVAS->flags & VASPACE_FLAGS_IS_EXTERNALLY_OWNED), NV_ERR_INVALID_ARGUMENT); + pGVAS->bIsMirrored = NV_TRUE; + } + + if (flags & VASPACE_FLAGS_ENABLE_FAULTING) + { + // All channels in this address space will have faulting enabled. + pGVAS->bIsFaultCapable = NV_TRUE; + } + if (flags & VASPACE_FLAGS_IS_EXTERNALLY_OWNED) + { + // This address space is managed by the UVM driver. + pGVAS->bIsExternallyOwned = NV_TRUE; + } + if (flags & VASPACE_FLAGS_ENABLE_ATS) + { + pGVAS->bIsAtsEnabled = NV_TRUE; + NV_PRINTF(LEVEL_INFO, "ATS Enabled VaSpace\n"); + // + // Initialize with invalid PASID value for sanity checking later during + // PASID programming in HW. + // For non-MODS case, PASID is programmed via control call + // NV0080_CTRL_DMA_SET_PAGE_DIRECTORY + // + pGVAS->processAddrSpaceId = NV_U32_MAX; + } + + if (flags & VASPACE_FLAGS_FLA) + { + pGVAS->flags |= VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB; + } + + // Determine requested big page size based on flags. + switch (DRF_VAL(_VASPACE, _FLAGS, _BIG_PAGE_SIZE, flags)) + { + case NV_VASPACE_FLAGS_BIG_PAGE_SIZE_64K: + reqBigPageSize = RM_PAGE_SIZE_64K; + break; + case NV_VASPACE_FLAGS_BIG_PAGE_SIZE_128K: + reqBigPageSize = RM_PAGE_SIZE_128K; + break; + case NV_VASPACE_FLAGS_BIG_PAGE_SIZE_DEFAULT: + reqBigPageSize = 0; // Let GMMU pick based on format. + break; + default: + NV_ASSERT_OR_RETURN(0, NV_ERR_NOT_SUPPORTED); + break; + } + + // Create per-GPU state array. + highestBitIdx = pVAS->gpuMask; + HIGHESTBITIDX_32(highestBitIdx); + pGVAS->pGpuStates = portMemAllocNonPaged(sizeof(*pGVAS->pGpuStates) * (highestBitIdx + 1)); + NV_ASSERT_OR_RETURN(NULL != pGVAS->pGpuStates, NV_ERR_NO_MEMORY); + portMemSet(pGVAS->pGpuStates, 0, sizeof(*pGVAS->pGpuStates) * (highestBitIdx + 1)); + + // Initialize channel group map + mapInit(&pGVAS->chanGrpMap, portMemAllocatorGetGlobalNonPaged()); + + // Loop over each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + status = _gvaspaceGpuStateConstruct(pGVAS, pGpu, pGpuState, reqBigPageSize, + vaStart, vaLimit, vaStartInternal, + vaLimitInternal, flags, + bFirst, + &fullPdeCoverage, &partialPdeExpMax); + if (NV_OK != status) + { + DBG_BREAKPOINT(); + break; + } + bFirst = NV_FALSE; + } + FOR_EACH_GPU_IN_MASK_UC_END + if (NV_OK != status) + { + goto catch; + } + + // Validate limit. + NV_ASSERT_OR_RETURN(pVAS->vasStart <= pVAS->vasLimit, NV_ERR_INVALID_ARGUMENT); + // External limit is applied to the HW, so must be at least the internal limit. + NV_ASSERT_OR_RETURN(pVAS->vasLimit >= pGVAS->vaLimitInternal, NV_ERR_INVALID_ARGUMENT); + + // Create virtual address heap (BC state). + pGVAS->pHeap = portMemAllocNonPaged(sizeof(*pGVAS->pHeap)); + if (pGVAS->pHeap == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_ASSERT_OR_GOTO(NULL != pGVAS->pHeap, catch); + } + + constructObjEHeap(pGVAS->pHeap, pVAS->vasStart, pGVAS->vaLimitMax + 1, + sizeof(GVAS_BLOCK), 0); + + if (gpuIsSplitVasManagementServerClientRmEnabled(pGpu) && + !(pGVAS->flags & VASPACE_FLAGS_BAR) && + !(pGVAS->flags & VASPACE_FLAGS_FLA) && + !(pGVAS->flags & VASPACE_FLAGS_PMU) && + !(pGVAS->flags & VASPACE_FLAGS_HDA) && + !(pGVAS->flags & VASPACE_FLAGS_HWPM) && + !(pGVAS->flags & VASPACE_FLAGS_PERFMON) && + !(pGVAS->flags & VASPACE_FLAGS_DISABLE_SPLIT_VAS)) + { + NV_ASSERT_OK_OR_GOTO(status, vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin), catch); + if (IS_VGPU_GSP_PLUGIN_OFFLOAD_ENABLED(pGpu) || !bCallingContextPlugin) + { + status = _gvaspaceReserveSplitVaSpace(pGVAS, pGpu); + NV_ASSERT_OR_GOTO(NV_OK == status, catch); + } + } + + // Reserve VA block between current limit and max limit for later growth. + if (flags & VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS) + { + // MAC could overcommit VA, so let the entire va range include RM internal and Client VA be available. + // Reserve only the varange outside the vaspace. + // i.e., vaStart to vaStartInternal - 1 (enforce 32 bit client VA), vaStartInternal - vaLimitInternal (RM internal VA) + // vaLimitInternal+1 - vasLimit (client VA) + + // By default allocations will be routed within RM internal va range. + pGVAS->bRMInternalRestrictedVaRange = NV_TRUE; + + status = _gvaspaceReserveRange(pGVAS, pVAS->vasLimit + 1, pGVAS->vaLimitMax); + } + else + { + status = _gvaspaceReserveTopForGrowth(pGVAS); + } + NV_ASSERT_OR_GOTO(NV_OK == status, catch); + + // Reserve VA holes for partial page tables if requested and supported. + if ((flags & VASPACE_FLAGS_MINIMIZE_PTETABLE_SIZE) && (partialPdeExpMax > 0)) + { + const NvU64 partialSize = fullPdeCoverage >> partialPdeExpMax; + const NvU64 pdeAlignedVasStart = NV_ALIGN_DOWN64(pVAS->vasStart, fullPdeCoverage); + const NvU64 pdeAlignedVasLimit = NV_ALIGN_UP64(pGVAS->vaLimitInternal + 1, fullPdeCoverage) - 1; + const NvU64 pdeAlignedVasSize = pdeAlignedVasLimit - pdeAlignedVasStart + 1; + const NvU64 maxRangeSize = NV_ALIGN_DOWN64(pdeAlignedVasSize / 4, fullPdeCoverage); + NvU32 i; + + NV_ASSERT_OR_RETURN(!(flags & VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS), NV_ERR_ILLEGAL_ACTION); + + pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + + // + // Pick a size for partial page table ranges. + // + // This optimization is required for WHQL MaxContexts on pre-Pascal. + // In this scenario each (minimal) context allocates at least one + // small page table. + // Each full small page table requires 256KB a piece + // (assuming 128KB big page size). + // With 100 contexts, this would require 100 * 256KB = 25MB of + // reserved FB memory. + // While system memory fallback is enabled it is not reliable. + // OS makes no guarantees for such large contiguous sysmem + // allocations. + // + // The optimization uses a heuristic based on two competing constraints: + // 1. Need to cover enough small allocations that page table memory is + // not wasted incrementally (especially for small applications). + // 2. Need to leave enough contiguous VA to satisfy large requests. + // + // There are drawbacks in both directions, so we pick a simple policy. + // We statically partition the VA space into areas where partial + // page tables will be used and areas that will use full page tables. + // We pick the partitioning size to be the smaller of 256MB and + // 1/4th of the VAS heap size to satisfy the above two constraints. + // + pGVAS->partialPtVaRangeSize = NV_MIN(NVBIT64(28), maxRangeSize); + + // + // We also need to pick where to place the partial page table VA ranges. + // We use a static heuristic: initial VA allocations usually land + // at the beginning (normal) and end (grow down) of the VA heap. + // Grow down is an important case since KMD reserves push buffers and other + // special allocations at the end of the heap. + // + // There is also the complication that virtual address within 32-bits + // are optimal for some UMDs and chips - e.g. UMD can restrict and detect 32-bit + // addresses and compile shaders dynamically with more efficient instructions. + // For these configurations we also allocate partial ranges above and + // below the 4GB offset to catch allocations with 32-bit restricted ranges. + // The range just above 32-bits catches unrestricted allocations + // which are moved above 32-bits to stay out of the way and conserve + // the 32-bit space. + // + // If the application uses a large amount of the VA it will eventually use + // the middle of the heap, but at that point incremental page table waste + // is amortized (low overall overhead). + // + // An alternative approach is to pick the partial PDEs dynamically, + // for example the first N PDEs used. + // However this signicantly complicates VA heap allocation, + // especially for grow down requests (think about it). + // The original RM VAS code used this approach, but it was + // proved to cause stuttering in allocation-heavy apps due to the + // complex "reject PDE" loops that were required (see Bug 1551532). + // + // Another alternative considered was to dynamically grow + // partial page tables - e.g. migrate from 1/8th to 1/4th as the upper VA + // is allocated. This would remove the need for static heuristics and + // place no restriction on VA heap allocation (great!), BUT: + // + // 1. WDDMv1 allows paging (mapping with CE) to take place concurrently + // with respect to allocation (page table pinning), + // so migration is not possible without the pager being able + // to synchronize dependencies (WDDMv2). Darn. + // 2. Even if it were possible, if page tables were migrated through + // BAR2 the read performance during the copy would be dreadful. + // RM would need internal CE support (e.g. leverage ECC scrubber) + // for this to be feasible. + // + // Hence, we are using these static heuristics. + // + + // Bottom of heap. + _gvaspaceAddPartialPtRange(pGVAS, pdeAlignedVasStart); + + // Handle 1GB offset. See usage of KMD MINIMUM_GPU_VIRTUAL_ADDRESS. + if ((pdeAlignedVasLimit + 1) > NVBIT64(30)) + { + _gvaspaceAddPartialPtRange(pGVAS, NVBIT64(30)); + } + + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + // Handle 32-bit restricted pointer ranges. + if (((pdeAlignedVasLimit + 1) > NVBIT64(32)) && + (pDma->getProperty(pDma, PDB_PROP_DMA_ENFORCE_32BIT_POINTER))) + { + // Top of 32-bit range. + _gvaspaceAddPartialPtRange(pGVAS, + NVBIT64(32) - pGVAS->partialPtVaRangeSize); + + // Bottom of range above 32-bits. + _gvaspaceAddPartialPtRange(pGVAS, NVBIT64(32)); + } + // Top of heap. + _gvaspaceAddPartialPtRange(pGVAS, + pdeAlignedVasLimit - pGVAS->partialPtVaRangeSize + 1); + + // Reserve the VA holes at the end of each partial PDE. + for (i = 0; i < pGVAS->numPartialPtRanges; ++i) + { + NvU64 off; + for (off = 0; off < pGVAS->partialPtVaRangeSize; off += fullPdeCoverage) + { + EMEMBLOCK *pBlock; + NvU32 allocFlags = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + NvU64 allocOffset; + NvU64 allocSize; + const NvU64 currPdeStart = pGVAS->partialPtVaRangeBase[i] + off; + const NvU64 nextPdeStart = currPdeStart + fullPdeCoverage; + + // Clamp to VAS start and limit. + allocOffset = NV_MAX(pVAS->vasStart, currPdeStart + partialSize); + + // Only reserve the hole if the VA limit extends past the partial size. + if (allocOffset <= pGVAS->vaLimitInternal) + { + allocSize = NV_MIN(pGVAS->vaLimitInternal + 1, nextPdeStart) - allocOffset; + + status = pGVAS->pHeap->eheapAlloc(pGVAS->pHeap, VAS_EHEAP_OWNER_RSVD, + &allocFlags, &allocOffset, &allocSize, + 1, 1, &pBlock, NULL, NULL); + NV_ASSERT_OR_GOTO(NV_OK == status, catch); + } + } + } + } + + // Sparsify entire VAS for BAR1 + if (pGVAS->flags & VASPACE_FLAGS_BAR_BAR1) + { + // Loop over each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + status = _gvaspaceBar1VaSpaceConstruct(pGVAS, pGpu); + NV_ASSERT(NV_OK == status); + } + FOR_EACH_GPU_IN_MASK_UC_END + } + +catch: + if (status != NV_OK) + { + gvaspaceDestruct_IMPL(pGVAS); + } + + return status; +} + +static void +_gvaspaceBar1VaSpaceDestructFW +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + return; + } + + gvaspaceUnpinRootPageDir(pGVAS, pGpu); +} + +static NV_STATUS +_gvaspaceBar1VaSpaceDestructClient +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + MMU_WALK_USER_CTX userCtx = {0}; + + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + NV_ASSERT_OR_RETURN(NULL != userCtx.pGpuState, NV_ERR_INVALID_STATE); + + status = mmuWalkUnmap(userCtx.pGpuState->pWalk, vaspaceGetVaStart(pVAS), vaspaceGetVaLimit(pVAS)); + + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + } + + return status; +} + +static NV_STATUS +_gvaspaceBar1VaSpaceDestruct +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + + _gvaspaceBar1VaSpaceDestructFW(pGVAS, pGpu); + + status = _gvaspaceBar1VaSpaceDestructClient(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + return status; +} + +static NV_STATUS +_gvaspaceReleaseVaForServerRm +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + MMU_WALK_USER_CTX userCtx = {0}; + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + if (NULL == userCtx.pGpuState) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT(0); + } + else + { + const MMU_FMT_LEVEL *pLevelFmt = + mmuFmtFindLevelWithPageShift(userCtx.pGpuState->pFmt->pRoot, GMMU_PD1_VADDR_BIT_LO); + status = mmuWalkReleaseEntries(userCtx.pGpuState->pWalk, + pLevelFmt, + pGVAS->vaStartServerRMOwned, + pGVAS->vaLimitServerRMOwned); + } + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + + return status; +} + +void +gvaspaceDestruct_IMPL(OBJGVASPACE *pGVAS) +{ + // Destroy BC state. + if (NULL != pGVAS->pHeap) + { + pGVAS->pHeap->eheapTraverse(pGVAS->pHeap, + pGVAS, + _gvaspaceFreeVASBlock, + 1 /*forwards*/); + pGVAS->pHeap->eheapDestruct(pGVAS->pHeap); + portMemFree(pGVAS->pHeap); + pGVAS->pHeap = NULL; + } + + // Destroy channel group map + if (mapCount(&pGVAS->chanGrpMap)) + { + NV_ASSERT(0); + NV_PRINTF(LEVEL_ERROR, + "GVAS is still used by some channel group(s)\n"); + } + mapDestroy(&pGVAS->chanGrpMap); + + // Destroy per-GPU state. + if (NULL != pGVAS->pGpuStates) + { + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + OBJGPU *pGpu = NULL; + GVAS_GPU_STATE *pGpuState; + + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + // Unsparsify entire VAS for BAR1. + if (pGVAS->flags & VASPACE_FLAGS_BAR_BAR1) + { + NV_STATUS status = NV_OK; + + status = _gvaspaceBar1VaSpaceDestruct(pGVAS, pGpu); + NV_ASSERT(NV_OK == status); + } + } + FOR_EACH_GPU_IN_MASK_UC_END + + // Release the PDEs for the server owned portion of the VA range + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + NvBool bClientRm = (IS_VIRTUAL_WITH_SRIOV(pGpu) || IS_GSP_CLIENT(pGpu)); + + if (bClientRm && (0 != pGVAS->vaStartServerRMOwned)) + { + NV_ASSERT(NV_OK == _gvaspaceReleaseVaForServerRm(pGVAS, pGpu)); + } + } + FOR_EACH_GPU_IN_MASK_UC_END + + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + _gvaspaceGpuStateDestruct(pGVAS, pGpu, pGpuState); + } + FOR_EACH_GPU_IN_MASK_UC_END + + // + // Release the Big Page Table (BPT) caches *only* after all page level + // updates have been completed on all the GPUs in SLI. Destroying the + // cache on one GPU with unreleased BPT instances on another GPU can + // cause memory leaks in a SLI scenario. This is because in SLI, a GPU + // can share a BPT instance from another GPU's cache. + // + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + gmmuMemDescCacheFree(pGpuState); + } + FOR_EACH_GPU_IN_MASK_UC_END + + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + if (RMCFG_FEATURE_PMA && + pMemoryManager->pPageLevelReserve != NULL) + { + if (pGVAS->pPageTableMemPool != NULL) + rmMemPoolRelease(pGVAS->pPageTableMemPool, pGVAS->flags); + } + } + FOR_EACH_GPU_IN_MASK_UC_END + + portMemFree(pGVAS->pGpuStates); + pGVAS->pGpuStates = NULL; + } +} + +/*! + * Add a region of VA reserved for partial page tables. + */ +static void +_gvaspaceAddPartialPtRange +( + OBJGVASPACE *pGVAS, + const NvU64 va +) +{ + NV_ASSERT_OR_RETURN_VOID(pGVAS->numPartialPtRanges < + GVAS_MAX_PARTIAL_PAGE_TABLE_RANGES); + + // Only add the range if it is first range or above the previous range. + if ((0 == pGVAS->numPartialPtRanges) || + (va >= (pGVAS->partialPtVaRangeBase[pGVAS->numPartialPtRanges - 1] + + pGVAS->partialPtVaRangeSize))) + { + pGVAS->partialPtVaRangeBase[pGVAS->numPartialPtRanges] = va; + pGVAS->numPartialPtRanges++; + } +} + +/*! + * Construct unicast GPU state associated with a VAS and reconcile + * differences between GMMU settings (currently must be homogenous). + */ +static NV_STATUS +_gvaspaceGpuStateConstruct +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_GPU_STATE *pGpuState, + const NvU64 reqBigPageSize, + const NvU64 vaStart, + const NvU64 vaLimit, + const NvU64 vaStartInternal, + const NvU64 vaLimitInternal, + const NvU32 flags, + const NvBool bFirst, + NvU64 *pFullPdeCoverage, + NvU32 *pPartialPdeExpMax +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NvU64 pageSizeMask; + NvU64 vaStartMin; + NvU64 vaLimitMax; + NvU32 bigPageSize; + NvU64 maxPageSizeSupported; + NvU64 compPageSize; + NvU64 extManagedAlign; + NvU64 vaLimitExt; + const GMMU_FMT *pFmt; + const MMU_FMT_LEVEL *pBigPT; + MMU_WALK_FLAGS walkFlags = {0}; + NvU64 fullPdeCoverage; + NvU32 partialPdeExpMax = 0; + NvU64 vaStartInt = 0; + NvU64 vaLimitInt = 0; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + // Must be in UC. + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + // Get GMMU format for this GPU. + pFmt = kgmmuFmtGet(pKernelGmmu, GMMU_FMT_VERSION_DEFAULT, reqBigPageSize); + NV_ASSERT_OR_RETURN(NULL != pFmt, NV_ERR_NOT_SUPPORTED); + pGpuState->pFmt = pFmt; + + // UVM mirroring works only with pre-Pascal format. + if (flags & VASPACE_FLAGS_SET_MIRRORED) + { + NV_ASSERT_OR_RETURN(GMMU_FMT_VERSION_1 == pFmt->version, NV_ERR_ILLEGAL_ACTION); + } + + // Determine GPU's page size settings. + pageSizeMask = mmuFmtAllPageSizes(pFmt->pRoot); + bigPageSize = (NvU32)(pageSizeMask & (RM_PAGE_SIZE_64K | RM_PAGE_SIZE_128K)); + // + // Set the max page size supported for the system to determine if we need to look into + // 512Mb page or not. + // + maxPageSizeSupported = (kgmmuIsPageSize512mbSupported(pKernelGmmu)? + RM_PAGE_SIZE_512M: (kgmmuIsHugePageSupported(pKernelGmmu) ? + RM_PAGE_SIZE_HUGE : bigPageSize)); + compPageSize = pMemorySystemConfig->comprPageSize; + + // Determine externally managed VA alignment from big page table coverage. + pBigPT = mmuFmtFindLevelWithPageShift(pFmt->pRoot, nvLogBase2(bigPageSize)); + NV_ASSERT_OR_RETURN(NULL != pBigPT, NV_ERR_INVALID_ARGUMENT); + extManagedAlign = NVBIT64(pBigPT->virtAddrBitHi + 1); + + // Determine partial page table parameters. + fullPdeCoverage = mmuFmtLevelVirtAddrMask(pBigPT) + 1; + if (nvFieldIsValid32(&pFmt->pPdeMulti->fldSizeRecipExp)) + { + partialPdeExpMax = pFmt->pPdeMulti->fldSizeRecipExp.maskPos >> + pFmt->pPdeMulti->fldSizeRecipExp.shift; + } + + // set VA start address to non-zero reserved VA space base. + vaStartMin = gvaspaceGetReservedVaspaceBase(pGVAS, pGpu); + + vaLimitMax = NVBIT64(pFmt->pRoot->virtAddrBitHi + 1) - 1; + + // Calculate the desired internal and external VAS limits. + if (0 == vaLimit) + { + // Default: allow maximum VAS limit. + vaLimitExt = vaLimitMax; + } + else + { + // Otherwise ensure requested limit does not exeed max HW limit. + NV_ASSERT_OR_RETURN(vaLimit <= vaLimitMax, NV_ERR_INVALID_ARGUMENT); + + vaLimitExt = vaLimit; + } + + if (flags & VASPACE_FLAGS_RESTRICTED_RM_INTERNAL_VALIMITS) + { + NV_ASSERT_OR_RETURN(vaLimitInternal <= vaLimitMax, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(vaLimitInternal <= vaLimit, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(vaStartInternal <= vaLimitInternal, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(vaStartInternal >= vaStartMin, NV_ERR_INVALID_ARGUMENT); + + vaStartInt = vaStartInternal; + vaLimitInt = vaLimitInternal; + } + else + { + vaStartInt = vaStart; + vaLimitInt = vaLimitExt; + } + + + // + // Shared management external limit is aligned to root PDE coverage. + // This allows KMD/OS to hook external PDEs beneath an RM-allocated root. + // + if (flags & VASPACE_FLAGS_SHARED_MANAGEMENT) + { + vaLimitExt = NV_ALIGN_UP64(vaLimitExt + 1, mmuFmtLevelPageSize(pFmt->pRoot)) - 1; + } + + // First GPU sets the precedent. + if (bFirst) + { + pGVAS->bigPageSize = bigPageSize; + pGVAS->maxPageSizeSupported = maxPageSizeSupported; + pGVAS->compPageSize = compPageSize; + pGVAS->extManagedAlign = extManagedAlign; + + // + // Determine VAS start and limit. + // vaStart of 0 is allowed if explicitly requested (e.g. BAR1). + // + if ((0 == vaStart) && !(flags & VASPACE_FLAGS_ALLOW_ZERO_ADDRESS)) + { + pVAS->vasStart = vaStartMin; + } + else + { + pVAS->vasStart = vaStart; + } + + if (vaStartInt == 0) + { + vaStartInt = pVAS->vasStart; + } + + pGVAS->vaStartInternal = vaStartInt; + pGVAS->vaLimitInternal = vaLimitInt; + + pVAS->vasLimit = vaLimitExt; + pGVAS->vaLimitInternal = vaLimitInt; + pGVAS->vaLimitMax = vaLimitMax; + *pFullPdeCoverage = fullPdeCoverage; + *pPartialPdeExpMax = partialPdeExpMax; + } + // Remaining must either match or take best-fit. + else + { + NV_ASSERT_OR_RETURN(bigPageSize == pGVAS->bigPageSize, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(compPageSize == pGVAS->compPageSize, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(extManagedAlign == pGVAS->extManagedAlign, NV_ERR_INVALID_ARGUMENT); + if ((0 == vaStart) && !(flags & VASPACE_FLAGS_ALLOW_ZERO_ADDRESS)) + { + pVAS->vasStart = NV_MAX(pVAS->vasStart, vaStartMin); + } + pVAS->vasLimit = NV_MIN(pVAS->vasLimit, vaLimitExt); + pGVAS->vaStartInternal = NV_MAX(pGVAS->vaStartInternal, vaStartInt); + pGVAS->vaLimitInternal = NV_MIN(pGVAS->vaLimitInternal, vaLimitInt); + pGVAS->vaLimitMax = NV_MIN(pGVAS->vaLimitMax, vaLimitMax); + NV_ASSERT_OR_RETURN(*pFullPdeCoverage == fullPdeCoverage, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(*pPartialPdeExpMax == partialPdeExpMax, NV_ERR_INVALID_ARGUMENT); + } + + // + // Create MMU walker library state. + // Set ats flag to enable related functionality/functionalities in MMU walker + // e.g. NV4K state for 64K PTEs + // + walkFlags.bAtsEnabled = gvaspaceIsAtsEnabled(pGVAS); + walkFlags.bUseIterative = gpuIsIterativeMmuWalkerEnabled(pGpu); + NV_ASSERT_OK_OR_RETURN( + mmuWalkCreate(pFmt->pRoot, NULL, + &g_gmmuWalkCallbacks, + walkFlags, + &pGpuState->pWalk, + NULL)); + + listInit(&pGpuState->reservedPageTableEntries, + portMemAllocatorGetGlobalNonPaged()); + + listInitIntrusive(&pGpuState->unpackedMemDescList); + + return NV_OK; +} + +/*! + * Destruct unicast GPU state associated with a VAS. + */ +static void +_gvaspaceGpuStateDestruct +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_GPU_STATE *pGpuState +) +{ + NV_ASSERT_OR_RETURN_VOID(!gpumgrGetBcEnabledStatus(pGpu)); + if (NULL != pGpuState->pRootInternal) + { + // Cleanup if client didn't call UnsetPageDir. + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS params = {0}; + NV_STATUS status; + status = gvaspaceExternalRootDirRevoke(pGVAS, pGpu, ¶ms); + NV_ASSERT(NV_OK == status); + } + + // + // Force free all page level instances. This can come in + // handy on systems that support surprise removal. + // + _gvaspaceForceFreePageLevelInstances(pGVAS, pGpu, pGpuState); + + mmuWalkDestroy(pGpuState->pWalk); + pGpuState->pWalk = NULL; + NV_ASSERT(NULL == pGpuState->pMirroredRoot); + + NV_ASSERT(NULL == listHead(&pGpuState->reservedPageTableEntries)); + listDestroy(&pGpuState->reservedPageTableEntries); +} + +static void +_gvaspaceCleanupFlaDummyPagesForFlaRange +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_GPU_STATE *pGpuState +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + if (pGpuState->flaDummyPage.hMemory != NV01_NULL_OBJECT) + { + portMemSet(&pGpuState->flaDummyPage.pte, 0, sizeof(pGpuState->flaDummyPage.pte)); + pRmApi->Free(pRmApi, pKernelBus->flaInfo.hClient, pGpuState->flaDummyPage.hMemory); + pGpuState->flaDummyPage.hMemory = NV01_NULL_OBJECT; + } +} + +static NV_STATUS +_gvaspaceAllocateFlaDummyPagesForFlaRange +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_GPU_STATE *pGpuState +) +{ + NV_STATUS status; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + const GMMU_FMT_FAMILY *pFam = kgmmuFmtGetFamily(pKernelGmmu, pGpuState->pFmt->version); + NvU64 addr; + NvBool bAcquireLock = NV_FALSE; + RM_API *pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + NV_MEMORY_ALLOCATION_PARAMS memAllocParams; + RsClient *pClient; + Memory *pMemory; + + if (!kbusIsFlaDummyPageEnabled(pKernelBus)) + return NV_OK; + + portMemSet(&memAllocParams, 0, sizeof(memAllocParams)); + memAllocParams.owner = VAS_EHEAP_OWNER_NVRM; + memAllocParams.size = RM_PAGE_SIZE_64K; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.attr = DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM) | + DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _BIG) | + DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS); + memAllocParams.flags = NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM | + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + memAllocParams.alignment = RM_PAGE_SIZE_64K; + + NV_ASSERT_OK_OR_GOTO(status, + serverutilGenResourceHandle(pKernelBus->flaInfo.hClient, &pGpuState->flaDummyPage.hMemory), + cleanup); + + // + // Allocate memory using vidHeapControl + // + // vidHeapControl calls should happen outside GPU locks + // This is a PMA requirement as memory allocation calls may invoke eviction + // which UVM could get stuck behind GPU lock + // + if (rmDeviceGpuLockIsOwner(pGpu->gpuInstance) || rmGpuLockIsOwner()) + { + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + bAcquireLock = NV_TRUE; + pRmApi = rmapiGetInterface(RMAPI_API_LOCK_INTERNAL); + } + + status = pRmApi->AllocWithHandle(pRmApi, pKernelBus->flaInfo.hClient, pKernelBus->flaInfo.hSubDevice, + pGpuState->flaDummyPage.hMemory, NV01_MEMORY_LOCAL_USER, + &memAllocParams); + + if (bAcquireLock) + { + // Reacquire the GPU locks + if (rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM) != NV_OK) + { + NV_ASSERT(0); + status = NV_ERR_GENERIC; + goto cleanup; + } + bAcquireLock = NV_FALSE; + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to allocate dummy page for FLA, status: %x\n", status); + goto cleanup; + } + + NV_ASSERT_OK_OR_GOTO(status, + serverGetClientUnderLock(&g_resServ, pKernelBus->flaInfo.hClient, &pClient), + cleanup); + + NV_ASSERT_OK_OR_GOTO(status, + memGetByHandle(pClient, pGpuState->flaDummyPage.hMemory, &pMemory), + cleanup); + + // prefill the big pte + const GMMU_APERTURE pgAperture = kgmmuGetMemAperture(pKernelGmmu, pMemory->pMemDesc); + + nvFieldSetBool(&pFam->pte.fldValid, NV_TRUE, pGpuState->flaDummyPage.pte.v8); + nvFieldSetBool(&pFam->pte.fldVolatile, memdescGetVolatility(pMemory->pMemDesc), + pGpuState->flaDummyPage.pte.v8); + gmmuFieldSetAperture(&pFam->pte.fldAperture, pgAperture, + pGpuState->flaDummyPage.pte.v8); + + addr = kgmmuEncodePhysAddr(pKernelGmmu, pgAperture, + memdescGetPhysAddr(pMemory->pMemDesc, AT_GPU, 0), + NVLINK_INVALID_FABRIC_ADDR); + + gmmuFieldSetAddress(gmmuFmtPtePhysAddrFld(&pFam->pte, pgAperture), addr, pGpuState->flaDummyPage.pte.v8); + + return NV_OK; + +cleanup: + _gvaspaceCleanupFlaDummyPagesForFlaRange(pGVAS, pGpu, pGpuState); + return status; +} + +NV_STATUS +gvaspaceAlloc_IMPL +( + OBJGVASPACE *pGVAS, + NvU64 size, + NvU64 align, + NvU64 rangeLo, + NvU64 rangeHi, + NvU64 pageSizeLockMask, + VAS_ALLOC_FLAGS flags, + NvU64 *pAddr +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvU32 eheapFlags = 0; + NV_STATUS status = NV_OK; + OBJEHEAP *pHeap = pGVAS->pHeap; + EMEMBLOCK *pMemBlock; + GVAS_BLOCK *pVASBlock; + NvU64 origRangeLo = pHeap->rangeLo; + NvU64 origRangeHi = pHeap->rangeHi; + + if (pGVAS->bIsExternallyOwned) + { + NV_PRINTF(LEVEL_ERROR, + "Cannot reserve VA on an externally owned VASPACE\n"); + + return NV_ERR_NOT_SUPPORTED; + } + + // + // TODO: To be removed after pKernelBus->flaInfo.pFlaVAS is removed. + // In case of FLA vaspace, check that fabric vaspace is not in use. + // + if ((pVAS == pKernelBus->flaInfo.pFlaVAS) && (pGpu->pFabricVAS != NULL)) + { + FABRIC_VASPACE *pFabricVAS = dynamicCast(pGpu->pFabricVAS, FABRIC_VASPACE); + + if (gvaspaceIsInUse(dynamicCast(pFabricVAS->pGVAS, OBJGVASPACE))) + { + NV_PRINTF(LEVEL_ERROR, "FabricVAS and FlaVAS cannot be used simultaneously! " + "FlaVAS Alloc failed\n"); + return NV_ERR_INVALID_OPERATION; + } + } + + // Clip the input range to the original range. + rangeLo = NV_MAX(rangeLo, origRangeLo); + rangeHi = NV_MIN(rangeHi, origRangeHi); + + // Check RM's internal allocation is only used. + if (gvaspaceIsInternalVaRestricted(pGVAS)) + { + if (!flags.bClientAllocation) // ignore the 32bit restriction here. + { + rangeLo = pGVAS->vaStartInternal; + rangeHi = pGVAS->vaLimitInternal; + } + else + { + // Fixed address range Check + + // Does not interfere with RM internal VA range. + if (flags.bFixedAddressRange && + ((rangeLo >= pGVAS->vaStartInternal && rangeLo <= pGVAS->vaLimitInternal) || \ + (rangeHi <= pGVAS->vaLimitInternal && rangeHi >= pGVAS->vaStartInternal))) + { + return NV_ERR_INVALID_PARAMETER; + } + + // Flexible address range + + // Place above RM va internal as much as possible + if (!flags.bFixedAddressRange && !(rangeHi < pGVAS->vaStartInternal || rangeLo > pGVAS->vaLimitInternal)) + { + if ((rangeHi > pGVAS->vaLimitInternal) && (rangeHi - pGVAS->vaLimitInternal) >= size) + { + rangeLo = pGVAS->vaLimitInternal + 1; + } + else if (rangeLo < pGVAS->vaStartInternal && pGVAS->vaStartInternal - rangeLo >= size) + { + rangeHi = pGVAS->vaStartInternal - 1; + } + else + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + // else do nothing as the ranges are disjoint + } + } + } + + // + // If this address space is marked as mirrored, then we will + // cap user allocations to be under the top PDE. + // If the allocations are privileged, then we will restrict the + // allocations to the top PDE. + // + if (pGVAS->bIsMirrored) + { + if (flags.bPrivileged) + { + // + // This is a kernel allocation so restrict the Allocations to + // the topmost PDE. + // + rangeLo = NV_MAX(rangeLo, pGVAS->vaLimitInternal - + UVM_KERNEL_PRIVILEGED_REGION_LENGTH + 1); + rangeHi = NV_MIN(rangeHi, pGVAS->vaLimitInternal); + + // Verify the allocation range is within UVM_PRIVILEGED_REGION + NV_ASSERT_OR_RETURN(rangeLo >= UVM_KERNEL_PRIVILEGED_REGION_START, + NV_ERR_OUT_OF_RANGE); + NV_ASSERT_OR_RETURN(rangeHi < UVM_KERNEL_PRIVILEGED_REGION_START + + UVM_KERNEL_PRIVILEGED_REGION_LENGTH, + NV_ERR_OUT_OF_RANGE); + } + else + { + // + // This is a user space allocation. Restrict allocation from the last PDB + // because that is privileged + // vaRangeLo can still be based on the users override. We will return an error + // if the user requested for an address in the last PDE range + // + rangeHi = NV_MIN(rangeHi, pGVAS->vaLimitInternal - + UVM_KERNEL_PRIVILEGED_REGION_LENGTH); + + // Verify range is not in the priviledged region. + NV_ASSERT_OR_RETURN(rangeHi < UVM_KERNEL_PRIVILEGED_REGION_START, + NV_ERR_OUT_OF_RANGE); + } + } + + // + // Sanity check the range before applying to eheap since + // eheapSetAllocRange auto-clips (silencing potential range bugs). + // + NV_ASSERT_OR_RETURN(origRangeLo <= rangeLo, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeLo <= rangeHi, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeHi <= origRangeHi, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(size <= (rangeHi - rangeLo + 1), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OK_OR_RETURN(pHeap->eheapSetAllocRange(pHeap, rangeLo, rangeHi)); + // !!! All return paths after this point must "goto catch" to restore. !!! + + // Honor reverse flag for non-BAR VA spaces. + if (flags.bReverse || (pGVAS->flags & VASPACE_FLAGS_REVERSE)) + { + eheapFlags |= NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN; + } + + if (flags.bFixedAddressAllocate) + { + eheapFlags |= NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + } + + // Attempt to allocate VA space of the size and alignment requested. + if (NV_OK != pHeap->eheapAlloc(pHeap, VAS_EHEAP_OWNER_NVRM, &eheapFlags, + pAddr, &size, align, 1, + &pMemBlock, NULL, NULL)) + { + status = NV_ERR_NO_MEMORY; + goto catch; + } + pVASBlock = (PGVAS_BLOCK)pMemBlock->pData; + + // Save flags for VA initialization + pVASBlock->flags = flags; + pVASBlock->pageSizeLockMask = pageSizeLockMask; + + if (flags.bExternallyManaged) + { + pVASBlock->management = VA_MANAGEMENT_PDES_ONLY; + } + + // + // VA reserved as sparse is sparsified immediately, changing its + // unmapped state from "invalid" to "zero." + // + if (flags.bSparse || (pGVAS->flags & VASPACE_FLAGS_BAR_BAR1)) + { + // Loop over each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + MMU_WALK_USER_CTX userCtx = {0}; + + // Sparsify the VA range. + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, pVASBlock, &userCtx); + + if (NULL == userCtx.pGpuState) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT(0); + } + else + { + status = mmuWalkSparsify(userCtx.pGpuState->pWalk, *pAddr, + *pAddr + size - 1, NV_FALSE); + } + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + if (NV_OK != status) + { + DBG_BREAKPOINT(); + break; + } + + // Invalidate TLB to apply new sparse state. + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY | + BUS_FLUSH_SYSTEM_MEMORY | + BUS_FLUSH_USE_PCIE_READ); + gvaspaceInvalidateTlb(pGVAS, pGpu, PTE_UPGRADE); + } + FOR_EACH_GPU_IN_MASK_UC_END + if (NV_OK != status) + { + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + MMU_WALK_USER_CTX userCtx = {0}; + + // Unsparsify the VA range. + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, pVASBlock, &userCtx); + if (NULL == userCtx.pGpuState) + { + // Intentionally not clobbering status + NV_ASSERT(0); + } + else + { + // Not checking the returns status + mmuWalkUnmap(userCtx.pGpuState->pWalk, + pMemBlock->begin, pMemBlock->end); + } + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + // Invalidate TLB to apply new sparse state. + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY | + BUS_FLUSH_SYSTEM_MEMORY | + BUS_FLUSH_USE_PCIE_READ); + gvaspaceInvalidateTlb(pGVAS, pGpu, PTE_UPGRADE); + } + FOR_EACH_GPU_IN_MASK_UC_END + + goto catch; + } + } + // Pin page tables upfront for non-lazy, non-external VA reservations. + else if (!(flags.bLazy || flags.bExternallyManaged) && + (0 != pVASBlock->pageSizeLockMask)) + { + // Loop over each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + NvU32 pageShift; + MMU_WALK_USER_CTX userCtx = {0}; + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, pVASBlock, &userCtx); + + if (NULL == userCtx.pGpuState) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT(0); + } + else + { + if (pGVAS->flags & VASPACE_FLAGS_FLA) + { + // currently FLA VASpace is associated with only GPU. + NV_ASSERT(ONEBITSET(pVAS->gpuMask)); + status = _gvaspaceAllocateFlaDummyPagesForFlaRange(pGVAS, pGpu, userCtx.pGpuState); + } + // Loop over each page size requested by client. + FOR_EACH_INDEX_IN_MASK(64, pageShift, pVASBlock->pageSizeLockMask) + { + // Pre-reserve page level instances in the VA range. + const MMU_FMT_LEVEL *pLevelFmt = + mmuFmtFindLevelWithPageShift(userCtx.pGpuState->pFmt->pRoot, pageShift); + status = mmuWalkReserveEntries(userCtx.pGpuState->pWalk, pLevelFmt, + *pAddr, *pAddr + size - 1, NV_TRUE); + if (NV_OK != status) + { + DBG_BREAKPOINT(); + break; + } + } + FOR_EACH_INDEX_IN_MASK_END + } + + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + if (NV_OK != status) + { + break; + } + } + FOR_EACH_GPU_IN_MASK_UC_END + if (NV_OK != status) + { + // Unpin page tables for each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + NvU32 pageShift; + MMU_WALK_USER_CTX userCtx = {0}; + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, pVASBlock, &userCtx); + + if (NULL == userCtx.pGpuState) + { + // Intentionally not clobbering status + NV_ASSERT(0); + } + else + { + if (pGVAS->flags & VASPACE_FLAGS_FLA) + { + _gvaspaceCleanupFlaDummyPagesForFlaRange(pGVAS, pGpu, userCtx.pGpuState); + } + // Loop over each page size requested by client during VA reservation. + FOR_EACH_INDEX_IN_MASK(64, pageShift, pVASBlock->pageSizeLockMask) + { + // Release page level instances in the VA range. + const MMU_FMT_LEVEL *pLevelFmt = + mmuFmtFindLevelWithPageShift(userCtx.pGpuState->pFmt->pRoot, pageShift); + // Not checking the returns status + mmuWalkReleaseEntries(userCtx.pGpuState->pWalk, pLevelFmt, + pMemBlock->begin, pMemBlock->end); + } + FOR_EACH_INDEX_IN_MASK_END + } + + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + } + FOR_EACH_GPU_IN_MASK_UC_END + goto catch; + } + } + +catch: + pHeap->eheapSetAllocRange(pHeap, origRangeLo, origRangeHi); + return status; +} + +NV_STATUS +gvaspaceFree_IMPL +( + OBJGVASPACE *pGVAS, + NvU64 vAddr +) +{ + PEMEMBLOCK pMemBlock; + PGVAS_BLOCK pVASBlock; + GVAS_MAPPING *pMapNode; + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + NV_STATUS status = NV_OK; + + if (pGpu == NULL) + { + return NV_ERR_INVALID_STATE; + } + + pMemBlock = pGVAS->pHeap->eheapGetBlock(pGVAS->pHeap, vAddr, 0); + NV_ASSERT_OR_RETURN(NULL != pMemBlock, NV_ERR_INVALID_ARGUMENT); + pVASBlock = (PGVAS_BLOCK)pMemBlock->pData; + + if (pMemBlock->refCount > 1) + { + pMemBlock->refCount--; + return NV_OK; + } + + // Before unmapping any CPU visible surfaces, make sure any CPU writes are flushed to L2. + if (pGVAS->flags & VASPACE_FLAGS_BAR) + { + // Loop over each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY | + BUS_FLUSH_SYSTEM_MEMORY | + BUS_FLUSH_USE_PCIE_READ); + } + FOR_EACH_GPU_IN_MASK_UC_END + } + + // + // Unmap any leaked mappings. + // + btreeEnumStart(0, (NODE**)&pMapNode, &pVASBlock->pMapTree->node); + while (NULL != pMapNode) + { + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pMapNode->gpuMask) + { + gvaspaceUnmap(pGVAS, pGpu, pMapNode->node.keyStart, pMapNode->node.keyEnd); + } + FOR_EACH_GPU_IN_MASK_UC_END + + btreeEnumStart(0, (NODE**)&pMapNode, &pVASBlock->pMapTree->node); + } + + // Unpin page tables for each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + NvU32 pageShift; + + NV_ASSERT(NULL != pGpuState); + + if (NULL != pGpuState) + { + FOR_EACH_INDEX_IN_MASK(64, pageShift, pVASBlock->pageSizeLockMask) + { + // Release page level instances in the VA range. + const MMU_FMT_LEVEL *pLevelFmt = + mmuFmtFindLevelWithPageShift(pGpuState->pFmt->pRoot, + pageShift); + + status = _gvaspaceReleaseUnreservedPTEs(pGVAS, pGpu, + pMemBlock->begin, + pMemBlock->end, + pLevelFmt); + NV_ASSERT(NV_OK == status); + } + FOR_EACH_INDEX_IN_MASK_END + } + } + FOR_EACH_GPU_IN_MASK_UC_END + + if (!pVASBlock->flags.bSkipTlbInvalidateOnFree) + { + // Invalidate TLB on each GPU associated with VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + kbusFlush_HAL(pGpu, pKernelBus, BUS_FLUSH_VIDEO_MEMORY | + BUS_FLUSH_SYSTEM_MEMORY | + BUS_FLUSH_USE_PCIE_READ); + gvaspaceInvalidateTlb(pGVAS, pGpu, PTE_DOWNGRADE); + } + FOR_EACH_GPU_IN_MASK_UC_END + } + + pGVAS->pHeap->eheapFree(pGVAS->pHeap, pMemBlock->begin); + + return NV_OK; +} + +NV_STATUS +gvaspaceApplyDefaultAlignment_IMPL +( + OBJGVASPACE *pGVAS, + const FB_ALLOC_INFO *pAllocInfo, + NvU64 *pAlign, + NvU64 *pSize, + NvU64 *pPageSizeLockMask +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + NvU64 bigPageSize = pGVAS->bigPageSize; + NvU64 maxPageSize = RM_PAGE_SIZE; + NvU64 compPageSize = pGVAS->compPageSize; + NvU64 pageSizeMask = 0; + + // + // In L2 cache only mode, force the page size to 4K in order to + // conserve memory, otherwise we end up wasting a lot of memory + // aligning allocations to the big page size + // + if (gpuIsCacheOnlyModeEnabled(pGpu)) + { + NV_PRINTF(LEVEL_ERROR, + "Overriding page size to 4k in Cache only Mode\n"); + pageSizeMask |= RM_PAGE_SIZE; + } + else + { + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + // Determine the page size to align to based on user hint. + switch (dmaNvos32ToPageSizeAttr(pAllocInfo->pageFormat->attr, pAllocInfo->pageFormat->attr2)) + { + case RM_ATTR_PAGE_SIZE_4KB: + pageSizeMask |= RM_PAGE_SIZE; + break; + case RM_ATTR_PAGE_SIZE_DEFAULT: + pageSizeMask |= RM_PAGE_SIZE; + pageSizeMask |= bigPageSize; + maxPageSize = bigPageSize; + + if (FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, pAllocInfo->retAttr)) + { + NV_ASSERT_OR_RETURN(kgmmuIsHugePageSupported(pKernelGmmu), + NV_ERR_NOT_SUPPORTED); + pageSizeMask |= RM_PAGE_SIZE_HUGE; + maxPageSize = RM_PAGE_SIZE_HUGE; + } + break; + case RM_ATTR_PAGE_SIZE_BIG: + pageSizeMask |= bigPageSize; + maxPageSize = bigPageSize; + break; + case RM_ATTR_PAGE_SIZE_HUGE: + NV_ASSERT_OR_RETURN(kgmmuIsHugePageSupported(pKernelGmmu), + NV_ERR_NOT_SUPPORTED); + pageSizeMask |= RM_PAGE_SIZE_HUGE; + maxPageSize = RM_PAGE_SIZE_HUGE; + break; + case RM_ATTR_PAGE_SIZE_512MB: + NV_ASSERT_OR_RETURN(kgmmuIsPageSize512mbSupported(pKernelGmmu), + NV_ERR_NOT_SUPPORTED); + pageSizeMask |= RM_PAGE_SIZE_512M; + maxPageSize = RM_PAGE_SIZE_512M; + break; + case RM_ATTR_PAGE_SIZE_INVALID: + NV_PRINTF(LEVEL_ERROR, "Invalid page size attr\n"); + return NV_ERR_INVALID_ARGUMENT; + } + } + + // Save page sizes that will have page tables pinned (either upfront or lazily). + *pPageSizeLockMask |= pageSizeMask; + + // Size must be aligned to maximum potential map page size. + *pSize = RM_ALIGN_UP(*pSize, maxPageSize); + + // + // Offset must be aligned to maximum potential map page size and + // compression page size. + // + // However, the client may force alignment if it is known the VA range will + // not be mapped to compressed physical memory. + // The forced alignment better be aligned to the mapping page size, + // but this isn't enforced until map time. + // + if (!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE)) + { + *pAlign = NV_MAX(*pAlign, NV_MAX(maxPageSize, compPageSize)); + } + + // Offset and size must be aligned to PDE stride for external management. + if (pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED) + { + *pAlign = NV_MAX(*pAlign, pGVAS->extManagedAlign); + *pSize = RM_ALIGN_UP(*pSize, pGVAS->extManagedAlign); + } + + return NV_OK; +} + +NV_STATUS +gvaspaceIncAllocRefCnt_IMPL +( + OBJGVASPACE *pGVAS, + NvU64 vAddr +) +{ + EMEMBLOCK *pVASpaceBlock; + + pVASpaceBlock = pGVAS->pHeap->eheapGetBlock(pGVAS->pHeap, vAddr, 0); + if (NULL == pVASpaceBlock) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pVASpaceBlock->refCount++; + + return NV_OK; +} + +POBJEHEAP +gvaspaceGetHeap_IMPL(OBJGVASPACE *pGVAS) +{ + return pGVAS->pHeap; +} + +NvU32 +gvaspaceGetMapPageSize_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + EMEMBLOCK *pMemBlock +) +{ + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + const MMU_FMT_LEVEL *pRootFmtLvl = pGpuState->pFmt->pRoot; + const NvU64 pageSizeMask = mmuFmtAllPageSizes(pRootFmtLvl); + NvU32 i; + + // + // Though page size mask is 64 bits, we will loop only over 32 bits as the callers + // do not expect a pagsize more than 2^32 bytes + // + for (i = 0; i < 32; ++i) + { + if (pageSizeMask & NVBIT64(i)) + { + const MMU_FMT_LEVEL *pTargetFmt = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvU32 memSize = 0; + + pTargetFmt = mmuFmtFindLevelWithPageShift(pRootFmtLvl, i); + mmuWalkGetPageLevelInfo(pGpuState->pWalk, pTargetFmt, pMemBlock->begin, + (const MMU_WALK_MEMDESC**)&pMemDesc, &memSize); + if (NULL != pMemDesc) + { + return NVBIT32(i); + } + } + } + + if ((pageSizeMask >> 32) != 0) + { + NV_PRINTF(LEVEL_ERROR, + "Time to update width of variables for page size. PageSizeMask: 0x%llx\n", + pageSizeMask); + } + + NV_ASSERT(0); + return 0; +} + +NvU32 +gvaspaceGetBigPageSize_IMPL(OBJGVASPACE *pGVAS) +{ + return pGVAS->bigPageSize; +} + +NvBool +gvaspaceIsMirrored_IMPL(OBJGVASPACE *pGVAS) +{ + return pGVAS->bIsMirrored; +} + +NvBool +gvaspaceIsFaultCapable_IMPL(OBJGVASPACE *pGVAS) +{ + return pGVAS->bIsFaultCapable; +} + +NvBool +gvaspaceIsExternallyOwned_IMPL(OBJGVASPACE *pGVAS) +{ + return pGVAS->bIsExternallyOwned; +} + +NvBool +gvaspaceIsAtsEnabled_IMPL(OBJGVASPACE *pGVAS) +{ + NvBool bAtsEnabled = pGVAS->bIsAtsEnabled; + + // ATS is supported with MIG memory partitioning only when VA Space has it enabled. + if (bAtsEnabled) + { + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + KernelMIGManager *pKernelMIGManager = (pGpu != NULL) ? GPU_GET_KERNEL_MIG_MANAGER(pGpu) : NULL; + + if ((pKernelMIGManager != NULL) && kmigmgrIsMIGMemPartitioningEnabled(pGpu, pKernelMIGManager)) + { + return gpuIsAtsSupportedWithSmcMemPartitioning_HAL(pGpu); + } + } + + return bAtsEnabled; +} + +NV_STATUS +gvaspaceGetPasid_IMPL(OBJGVASPACE *pGVAS, NvU32 *pPasid) +{ + NV_ASSERT_OR_RETURN(pPasid != NULL, NV_ERR_INVALID_ARGUMENT); + + NV_PRINTF(LEVEL_INFO, "ATS enabled: %u PASID: %u\n", + pGVAS->bIsAtsEnabled, pGVAS->processAddrSpaceId); + + NV_ASSERT_OR_RETURN(pGVAS->bIsAtsEnabled, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pGVAS->processAddrSpaceId != NV_U32_MAX, NV_ERR_INVALID_STATE); + *pPasid = pGVAS->processAddrSpaceId; + return NV_OK; +} + +NvU32 +gvaspaceGetFlags_IMPL(OBJGVASPACE *pGVAS) +{ + return pGVAS->flags; +} + +MEMORY_DESCRIPTOR* +gvaspaceGetPageDirBase_IMPL(OBJGVASPACE *pGVAS, OBJGPU *pGpu) +{ + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + MEMORY_DESCRIPTOR *pRootMem = NULL; + NvU32 rootSize = 0; + + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NULL); + + + if (pGVAS->bIsExternallyOwned) + { + return pGVAS->pExternalPDB; + } + + mmuWalkGetPageLevelInfo(pGpuState->pWalk, pGpuState->pFmt->pRoot, 0, + (const MMU_WALK_MEMDESC**)&pRootMem, &rootSize); + return pRootMem; +} + +MEMORY_DESCRIPTOR* +gvaspaceGetKernelPageDirBase_IMPL(OBJGVASPACE *pGVAS, OBJGPU *pGpu) +{ + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NULL); + + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + return (MEMORY_DESCRIPTOR*)pGpuState->pMirroredRoot; +} + +const GMMU_FMT * +gvaspaceGetGmmuFmt_IMPL(OBJGVASPACE *pGVAS, OBJGPU *pGpu) +{ + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(NULL != pGpuState, NULL); + return pGpuState->pFmt; +} + +GVAS_GPU_STATE * +gvaspaceGetGpuState_IMPL(OBJGVASPACE *pGVAS, OBJGPU *pGpu) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + NV_ASSERT_OR_RETURN(NULL != pGVAS->pGpuStates, NULL); + NV_ASSERT_OR_RETURN(pVAS->gpuMask & NVBIT32(pGpu->gpuInstance), NULL); + return pGVAS->pGpuStates + nvMaskPos32(pVAS->gpuMask, pGpu->gpuInstance); +} + +NV_STATUS +gvaspacePinRootPageDir_IMPL(OBJGVASPACE *pGVAS, OBJGPU *pGpu) +{ + MMU_WALK_USER_CTX userCtx = {0}; + const MMU_FMT_LEVEL *pLevelFmt; + NV_STATUS status; + NvU64 rootPdeCoverage; + NvU64 vaLo; + NvU64 vaHi; + + if (NULL == pGVAS->pGpuStates) + { + // TODO: VMM must be enabled - remove once default. + return NV_ERR_NOT_SUPPORTED; + } + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + if (NULL == userCtx.pGpuState) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT_OR_GOTO(0, done); + } + + // Determine aligned range to pin. + pLevelFmt = userCtx.pGpuState->pFmt->pRoot; + rootPdeCoverage = mmuFmtLevelPageSize(pLevelFmt); + vaLo = NV_ALIGN_DOWN64(gvaspaceGetVaStart(pGVAS), rootPdeCoverage); + vaHi = NV_ALIGN_UP64(gvaspaceGetVaLimit(pGVAS) + 1, rootPdeCoverage) - 1; + + // Alloc and bind root level instance. + status = mmuWalkReserveEntries(userCtx.pGpuState->pWalk, + pLevelFmt, vaLo, vaHi, NV_TRUE); + NV_ASSERT_OR_GOTO(NV_OK == status, done); + +done: + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + return status; +} + +void +gvaspaceUnpinRootPageDir_IMPL(OBJGVASPACE *pGVAS, OBJGPU *pGpu) +{ + MMU_WALK_USER_CTX userCtx = {0}; + const MMU_FMT_LEVEL *pLevelFmt; + NV_STATUS status; + NvU64 rootPdeCoverage; + NvU64 vaLo; + NvU64 vaHi; + + if (NULL == pGVAS->pGpuStates) + { + // TODO: VMM must be enabled - remove once default. + return; + } + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + if (NULL == userCtx.pGpuState) + { + NV_ASSERT_OR_GOTO(0, done); + } + + // Determine aligned range to unpin. + pLevelFmt = userCtx.pGpuState->pFmt->pRoot; + rootPdeCoverage = mmuFmtLevelPageSize(pLevelFmt); + vaLo = NV_ALIGN_DOWN64(gvaspaceGetVaStart(pGVAS), rootPdeCoverage); + vaHi = NV_ALIGN_UP64(gvaspaceGetVaLimit(pGVAS) + 1, rootPdeCoverage) - 1; + + // Unreserve root level instance (won't free it if there are still mappings). + status = mmuWalkReleaseEntries(userCtx.pGpuState->pWalk, + pLevelFmt, vaLo, vaHi); + NV_ASSERT_OR_GOTO(NV_OK == status, done); + +done: + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); +} + +NV_STATUS +gvaspaceMap_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi, + const MMU_MAP_TARGET *pTarget, + const VAS_MAP_FLAGS flags +) +{ + NV_STATUS status = NV_OK; + EMEMBLOCK *pMemBlock = NULL; + GVAS_BLOCK *pVASBlock = NULL; + NvU64 pageSize = mmuFmtLevelPageSize(pTarget->pLevelFmt); + MMU_WALK_USER_CTX userCtx = {0}; + + // Enforce unicast. + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + // Check VA alignment. + NV_ASSERT_OR_RETURN(0 == (vaLo & (pageSize - 1)), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(0 == ((vaHi + 1) & (pageSize - 1)), NV_ERR_INVALID_ARGUMENT); + + // + // Register the mapping unless remapping an existing mapping. + // Remapping an existing mapping is used in two cases: + // 1. [MODS-only] Release/reacquire compression for verif. + // 2. [Windows-only] BAR1 force clobber for BSOD during bugcheck. + // + if (!flags.bRemap) + { + // Get VA block. + pMemBlock = pGVAS->pHeap->eheapGetBlock(pGVAS->pHeap, vaLo, 0); + NV_ASSERT_OR_RETURN(NULL != pMemBlock, NV_ERR_INVALID_ARGUMENT); + pVASBlock = (GVAS_BLOCK*)pMemBlock->pData; + + // Check VA containment. + NV_ASSERT_OR_RETURN(vaHi <= pMemBlock->end, NV_ERR_INVALID_ARGUMENT); + + // Insert range into VAS block mapping tree. + status = _gvaspaceMappingInsert(pGVAS, pGpu, pVASBlock, vaLo, vaHi, flags); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + } + + // Call MMU walker to map. + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, pVASBlock, &userCtx); + + if (NULL == userCtx.pGpuState) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT_OR_GOTO(0, catch); + } + + status = mmuWalkMap(userCtx.pGpuState->pWalk, vaLo, vaHi, pTarget); + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + NV_ASSERT_OR_GOTO(NV_OK == status, catch); + +catch: + if (NV_OK != status && (!flags.bRemap)) + { + _gvaspaceMappingRemove(pGVAS, pGpu, pVASBlock, vaLo, vaHi); + } + return status; +} + +void +gvaspaceUnmap_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi +) +{ + NV_STATUS status = NV_OK; + EMEMBLOCK *pMemBlock = NULL; + GVAS_BLOCK *pVASBlock = NULL; + MMU_WALK_USER_CTX userCtx = {0}; + + // Enforce unicast. + NV_ASSERT_OR_RETURN_VOID(!gpumgrGetBcEnabledStatus(pGpu)); + + // Get VA block. + pMemBlock = pGVAS->pHeap->eheapGetBlock(pGVAS->pHeap, vaLo, 0); + NV_ASSERT_OR_RETURN_VOID(NULL != pMemBlock); + pVASBlock = (GVAS_BLOCK*)pMemBlock->pData; + + // Unregister the mapping + status = _gvaspaceMappingRemove(pGVAS, pGpu, pVASBlock, vaLo, vaHi); + NV_ASSERT_OR_RETURN_VOID(NV_OK == status); + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, pVASBlock, &userCtx); + + if (NULL == userCtx.pGpuState) + { + NV_ASSERT(0); + } + else + { + if (pVASBlock->flags.bSparse || (pGVAS->flags & VASPACE_FLAGS_BAR_BAR1) + ||((pMemBlock->refCount >1) && (pGVAS->flags & VASPACE_FLAGS_FLA)) + ) + { + // Return back to Sparse if that was the original state of this allocation. + status = mmuWalkSparsify(userCtx.pGpuState->pWalk, vaLo, vaHi, NV_FALSE); + NV_ASSERT(NV_OK == status); + } + else + { + // Plain old unmap + status = mmuWalkUnmap(userCtx.pGpuState->pWalk, vaLo, vaHi); + NV_ASSERT(NV_OK == status); + } + } + + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); +} + +void +gvaspaceInvalidateTlb_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + VAS_PTE_UPDATE_TYPE update_type +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + NvU32 gfid = GPU_GFID_PF; + + NV_ASSERT_OR_RETURN_VOID(!gpumgrGetBcEnabledStatus(pGpu)); + NV_ASSERT_OR_RETURN_VOID(0 != (NVBIT(pGpu->gpuInstance) & pVAS->gpuMask)); + + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + MEMORY_DESCRIPTOR *pRootMem = NULL; + NvU32 rootSize = 0; + NvU32 invalidation_scope = NV_GMMU_INVAL_SCOPE_ALL_TLBS; + NvBool bCallingContextPlugin; + + NV_ASSERT_OR_RETURN_VOID(vgpuIsCallingContextPlugin(pGpu, &bCallingContextPlugin) == NV_OK); + if (!bCallingContextPlugin) + { + NV_ASSERT_OR_RETURN_VOID(vgpuGetCallingContextGfid(pGpu, &gfid) == NV_OK); + } + + if (pGVAS->flags & VASPACE_FLAGS_INVALIDATE_SCOPE_NVLINK_TLB) + { + invalidation_scope = NV_GMMU_INVAL_SCOPE_LINK_TLBS; + } + else + { + invalidation_scope = NV_GMMU_INVAL_SCOPE_NON_LINK_TLBS; + } + + mmuWalkGetPageLevelInfo(pGpuState->pWalk, pGpuState->pFmt->pRoot, 0, + (const MMU_WALK_MEMDESC**)&pRootMem, &rootSize); + if (pRootMem != NULL) + { + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + kgmmuInvalidateTlb_HAL(pGpu, pKernelGmmu, pRootMem, + pGVAS->flags, + update_type, gfid, + invalidation_scope); + + if (pGVAS->bIsMirrored) + { + kgmmuInvalidateTlb_HAL(pGpu, pKernelGmmu, + (MEMORY_DESCRIPTOR*)pGpuState->pMirroredRoot, + pGVAS->flags, + update_type, gfid, + invalidation_scope); + } + } +} + +NV_STATUS +gvaspaceGetVasInfo_IMPL +( + OBJGVASPACE *pGVAS, + NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + const MMU_FMT_LEVEL *pBigPageTable = NULL; + const MMU_FMT_LEVEL *pSmallPageTable = NULL; + const GMMU_FMT *pFmt = gvaspaceGetGpuState(pGVAS, pGpu)->pFmt; + + NV_ASSERT_OR_RETURN(NULL != pParams, NV_ERR_INVALID_PARAM_STRUCT); + + // Retrive the number of VA bits for this format. + pParams->vaBitCount = pFmt->pRoot->virtAddrBitHi + 1; + + // Check if the page sizes are supported + pSmallPageTable = mmuFmtFindLevelWithPageShift(pFmt->pRoot, RM_PAGE_SHIFT); + NV_ASSERT_OR_RETURN(pSmallPageTable, NV_ERR_INVALID_EVENT); + + pBigPageTable = mmuFmtFindLevelWithPageShift(pFmt->pRoot, nvLogBase2(pGVAS->bigPageSize)); + NV_ASSERT_OR_RETURN(pBigPageTable, NV_ERR_INVALID_EVENT); + pParams->bigPageSize = pGVAS->bigPageSize; + + pParams->hugePageSize = kgmmuIsHugePageSupported(pKernelGmmu) ? + RM_PAGE_SIZE_HUGE : 0; + pParams->pageSize512MB = kgmmuIsPageSize512mbSupported(pKernelGmmu) ? + RM_PAGE_SIZE_512M : 0; + + // Dual Page Table is supported for all Fermi-and-later chips + pParams->dualPageTableSupported = (NvU32)NV_TRUE; + + // Big Page Table caps + + // VA bits covered by a PDE (for Big Page Table), in a terminal Page Directory. + pParams->pdeCoverageBitCount = pBigPageTable->virtAddrBitHi + 1; + // Physical size of Page Table in bytes + pParams->pageTableBigFormat.pageTableSize = mmuFmtLevelSize(pBigPageTable); + // VA extent of a Big Page Table + pParams->pageTableBigFormat.pageTableCoverage = + (NvU32)mmuFmtLevelVirtAddrMask(pBigPageTable) + 1; + + // Small Page Table caps, similar to Big Page Table caps + //num4KPageTableFormats = 1; + pParams->pageTable4KFormat[0].pageTableSize = mmuFmtLevelSize(pSmallPageTable); + pParams->pageTable4KFormat[0].pageTableCoverage = + (NvU32)mmuFmtLevelVirtAddrMask(pSmallPageTable) + 1; + + pParams->idealVRAMPageSize = pParams->bigPageSize; + + pParams->vaRangeLo = vaspaceGetVaStart(pVAS); + + return NV_OK; +} + +NV_STATUS +gvaspaceGetPageTableInfo_IMPL +( + OBJGVASPACE *pGVAS, + NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS *pParams +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + GVAS_GPU_STATE *pGpuState; + MMU_WALK *pWalk; + const MMU_FMT_LEVEL *pRootFmt; + PMEMORY_DESCRIPTOR pRootMem = NULL; + NvU32 rootSize = 0; + NvU32 pteBlockIdx = 0; + NvU32 i; + NvBool bOrigBcState; + NV_STATUS rmStatus; + NvU64 maxPageSizeSupported = pGVAS->maxPageSizeSupported; + + NV_ASSERT_OR_RETURN(NULL != pParams, NV_ERR_INVALID_PARAM_STRUCT); + + // Pick a specific sub-device if requested. + if (0 != pParams->subDeviceId) + { + pGpu = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), + pParams->subDeviceId - 1); + NV_ASSERT_OR_RETURN(NULL != pGpu, NV_ERR_INVALID_ARGUMENT); + } + + // Page tables are pinned and queried in UC. Force GPU to unicast. + bOrigBcState = gpumgrGetBcEnabledStatus(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + pWalk = pGpuState->pWalk; + pRootFmt = pGpuState->pFmt->pRoot; + + // Pin lazy page tables for WDDMv1 KMD. + rmStatus = _gvaspacePinLazyPageTables(pGVAS, pGpu, pParams->gpuAddr); + + gpumgrSetBcEnabledStatus(pGpu, bOrigBcState); + + NV_ASSERT_OR_RETURN((NV_OK == rmStatus), rmStatus); + + for (i = 0; i <= (_gvaspacePageSizeToIdx(maxPageSizeSupported)); i++) + { + PMEMORY_DESCRIPTOR pMemDesc = NULL; + NvU32 memSize = 0; + NV0080_CTRL_DMA_PDE_INFO_PTE_BLOCK *pPteBlock = NULL; + NvU64 pageSize; + const MMU_FMT_LEVEL *pLevelFmt; + const MMU_FMT_LEVEL *pParentFmt; + NvU32 subLevel; + + + pageSize = (VAS_PAGESIZE_IDX_BIG == i) ? pGVAS->bigPageSize : pageSizes[i]; + pLevelFmt = mmuFmtFindLevelWithPageShift(pRootFmt, BIT_IDX_64(pageSize)); + if (NULL == pLevelFmt) + { + continue; + } + + pParentFmt = mmuFmtFindLevelParent(pRootFmt, pLevelFmt, &subLevel); + NV_ASSERT_OR_RETURN(NULL != pParentFmt, NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OK_OR_RETURN( + mmuWalkGetPageLevelInfo(pWalk, pLevelFmt, pParams->gpuAddr, + (const MMU_WALK_MEMDESC**)&pMemDesc, &memSize)); + if (NULL == pMemDesc) + continue; + + // These only need to be calculated once, but we need the parent level format. + if (0 == pteBlockIdx) + { + // The base VA of the PDE + pParams->pdeVirtAddr = mmuFmtLevelVirtAddrLo(pLevelFmt, pParams->gpuAddr); + + // Number of bytes occupied by one PDE + pParams->pdeEntrySize = pParentFmt->entrySize; + } + + pPteBlock = &pParams->pteBlocks[pteBlockIdx++]; + + // Page size supported by this page table + pPteBlock->pageSize = (NvU32)pageSize; + + // Phys addr of the Page Table + pPteBlock->ptePhysAddr = memdescGetPhysAddr(pMemDesc, VAS_ADDRESS_TRANSLATION(pVAS), 0); + + // Number of bytes occupied by one PTE + pPteBlock->pteEntrySize = pLevelFmt->entrySize; + + // VA extent of one PDE, i.e. of one entire Page Table. + pPteBlock->pdeVASpaceSize = (NvU32)mmuFmtLevelVirtAddrMask(pLevelFmt) + 1; + + // Caching attributes + pPteBlock->pteCacheAttrib = memdescGetCpuCacheAttrib(pMemDesc); + + // Addr space of the Page Table + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_FBMEM: + pPteBlock->pteAddrSpace = + NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_VIDEO_MEMORY; + break; + case ADDR_SYSMEM: + if (memdescGetCpuCacheAttrib(pMemDesc) == NV_MEMORY_CACHED) + { + pPteBlock->pteAddrSpace = + NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_SYSTEM_COHERENT_MEMORY; + } + else + { + pPteBlock->pteAddrSpace = + NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PTE_ADDR_SPACE_SYSTEM_NON_COHERENT_MEMORY; + } + break; + default: + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + } + + // Addr of the root Page Dir + NV_ASSERT_OK_OR_RETURN( + mmuWalkGetPageLevelInfo(pWalk, pRootFmt, 0, + (const MMU_WALK_MEMDESC**)&pRootMem, &rootSize)); + if (NULL == pRootMem) + { + return NV_ERR_INVALID_STATE; + } + pParams->pdbAddr = memdescGetPhysAddr(pRootMem, VAS_ADDRESS_TRANSLATION(pVAS), 0); + + // Addr Space of the Page Dir. + switch (memdescGetAddressSpace(pRootMem)) + { + case ADDR_FBMEM: + pParams->pdeAddrSpace = + NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_VIDEO_MEMORY; + break; + case ADDR_SYSMEM: + if (memdescGetCpuCacheAttrib(pRootMem) == NV_MEMORY_CACHED) + { + pParams->pdeAddrSpace = + NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_SYSTEM_COHERENT_MEMORY; + } + else + { + pParams->pdeAddrSpace = + NV0080_CTRL_DMA_GET_PDE_INFO_PARAMS_PDE_ADDR_SPACE_SYSTEM_NON_COHERENT_MEMORY; + } + break; + default: + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + return NV_OK; +} + +NV_STATUS +gvaspaceGetPteInfo_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + NV0080_CTRL_DMA_GET_PTE_INFO_PARAMS *pParams, + RmPhysAddr *pPhysAddr +) +{ + KernelGmmu *pKernelGmmu; + GVAS_GPU_STATE *pGpuState; + MMU_WALK *pWalk; + const GMMU_FMT *pFmt; + NV_STATUS status = NV_OK; + NvU32 i; + NvU32 pteBlockIndex = 0; + NvU64 maxPageSizeSupported = pGVAS->maxPageSizeSupported; + const MMU_FMT_LEVEL *pRootFmt; + + NV_ASSERT_OR_RETURN(NULL != pParams, NV_ERR_INVALID_PARAM_STRUCT); + + // Pick a specific sub-device if requested. + if (0 != pParams->subDeviceId) + { + pGpu = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(pGpu), + pParams->subDeviceId - 1); + NV_ASSERT_OR_RETURN(NULL != pGpu, NV_ERR_INVALID_ARGUMENT); + } + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + pWalk = pGpuState->pWalk; + pFmt = pGpuState->pFmt; + pRootFmt = pFmt->pRoot; + pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + // + // We will try all available page sizes for valid allocation a the give VA. + // Will flag error if mutiple valid allocations exist. + // + + for (i = 0; i <= (_gvaspacePageSizeToIdx(maxPageSizeSupported)); i++) + { + NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *pPteBlock = NULL; + const GMMU_FIELD_ADDRESS *pAddrField; + const MMU_FMT_LEVEL *pLevelFmt = NULL; + PMEMORY_DESCRIPTOR pMemDesc = NULL; + NvU32 memSize = 0; + NvU8 *pMap = NULL; + NvU32 pteIndex; + GMMU_ENTRY_VALUE pte = {{0}}; + NvU64 pageSize; + + pageSize = (VAS_PAGESIZE_IDX_BIG == i) ? pGVAS->bigPageSize : pageSizes[i]; + pLevelFmt = mmuFmtFindLevelWithPageShift(pRootFmt, BIT_IDX_64(pageSize)); + if (NULL == pLevelFmt) + { + continue; + } + + NV_ASSERT_OK_OR_RETURN( + mmuWalkGetPageLevelInfo(pWalk, pLevelFmt, pParams->gpuAddr, + (const MMU_WALK_MEMDESC**)&pMemDesc, &memSize)); + if (NULL == pMemDesc) + { + // Skip if not allocated. + continue; + } + + pteIndex = mmuFmtVirtAddrToEntryIndex(pLevelFmt, pParams->gpuAddr); + + // Map the Page Table to BAR + pMap = kbusMapRmAperture_HAL(pGpu, pMemDesc); + NV_ASSERT_OR_RETURN(NULL != pMap, NV_ERR_INSUFFICIENT_RESOURCES); + portMemCopy(pte.v8, pLevelFmt->entrySize, pMap + (pteIndex * pLevelFmt->entrySize), pLevelFmt->entrySize); + kbusUnmapRmAperture_HAL(pGpu, pMemDesc, &pMap, NV_FALSE); + + if (pteBlockIndex <= _gvaspacePageSizeToIdx(maxPageSizeSupported)) + { + pPteBlock = &pParams->pteBlocks[pteBlockIndex++]; + } + else if (nvFieldGetBool(&pFmt->pPte->fldValid, pte.v8)) + { + // Override last block if the PTE is valid. + pPteBlock = &pParams->pteBlocks[pteBlockIndex - 1]; + } + else + { + break; + } + + // Page size supported by this page table + pPteBlock->pageSize = (NvU32)pageSize; + + // Number of bytes occupied by one PTE + pPteBlock->pteEntrySize = pLevelFmt->entrySize; + + kgmmuExtractPteInfo(pKernelGmmu, &pte, pPteBlock, pFmt, pLevelFmt); + + // + // Get phys addr encoded in the PTE, but only + // if requested and the PTE is valid. Only one page size + // PTE should be valid at a time. + // + if ((NULL != pPhysAddr) && + nvFieldGetBool(&pFmt->pPte->fldValid, pte.v8)) + { + pAddrField = gmmuFmtPtePhysAddrFld(pFmt->pPte, + gmmuFieldGetAperture(&pFmt->pPte->fldAperture, pte.v8)); + *pPhysAddr = (RmPhysAddr)gmmuFieldGetAddress(pAddrField, pte.v8); + } + } + + // + // We don't support pre-initializing paga tables on VMM. + // Page tables are allocated on-demand during mapping. + // So we assert if this ctrl call is called with request to init page tables (skipVASpaceInit == NV_FALSE), + // and the page tables are not already allocated. + // + NV_ASSERT_OR_RETURN((pteBlockIndex > 0) || pParams->skipVASpaceInit, NV_ERR_INVALID_REQUEST); + + return status; +} + +static inline +NvBool isPteDowngrade(KernelGmmu *pKernelGmmu, const GMMU_FMT *pFmt, NvU32 pteInputFlags, GMMU_ENTRY_VALUE curPte) +{ + NvBool bReadOnly = NV_FALSE; + NvBool curPteReadOnly = NV_FALSE; + + NvBool bPteInvalid = (!FLD_TEST_DRF(0080, _CTRL_DMA_PTE_INFO_PARAMS_FLAGS, + _VALID, _TRUE, pteInputFlags) + && nvFieldGetBool(&pFmt->pPte->fldValid, curPte.v8)); + + { + curPteReadOnly = nvFieldGetBool(&pFmt->pPte->fldReadOnly, curPte.v8); + } + bReadOnly = (!FLD_TEST_DRF(0080, _CTRL_DMA_PTE_INFO_PARAMS_FLAGS, + _READ_ONLY, _TRUE, pteInputFlags) + && !curPteReadOnly); + + return (bPteInvalid || bReadOnly); +} + +NV_STATUS +gvaspaceSetPteInfo_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + NV0080_CTRL_DMA_SET_PTE_INFO_PARAMS *pParams +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU8 *pMap = NULL; + NvU8 i; + NV_STATUS status = NV_OK; + NvBool bDowngrade = NV_FALSE; + NvU64 maxPageSizeSupported = pGVAS->maxPageSizeSupported; + + NV_ASSERT_OR_RETURN(NULL != pParams, NV_ERR_INVALID_PARAM_STRUCT); + + // Loop over each GPU associated with the VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + MMU_WALK *pWalk = pGpuState->pWalk; + const GMMU_FMT *pFmt = pGpuState->pFmt; + const MMU_FMT_LEVEL *pRootFmt = pFmt->pRoot; + bDowngrade = NV_FALSE; + + // Skip the GPU if the caller requested a different specific sub-device. + if ((0 != pParams->subDeviceId) && + (pGpu->subdeviceInstance != (pParams->subDeviceId - 1))) + { + goto catchGpu; + } + + for (i = 0; i <= (_gvaspacePageSizeToIdx(maxPageSizeSupported)); i++) + { + NV0080_CTRL_DMA_PTE_INFO_PTE_BLOCK *pPteBlock = NULL; + const MMU_FMT_LEVEL *pLevelFmt; + PMEMORY_DESCRIPTOR pMemDesc = NULL; + NvU32 memSize = 0; + GMMU_ENTRY_VALUE pte = {{0}}; + NvBool bValid; + NvBool bEncrypted; + NvBool bReadOnly; + NvBool bVolatile = NV_FALSE; + NvU32 aperture; + NvU32 pteIndex; + NvU64 surfOffset; + + // + // Ignore the index if a page size of 0 is specified. This is a valid + // check as the caller may send down 0 page size for indxes + // which are not of interest. + // + if (0 == pParams->pteBlocks[i].pageSize) + continue; + + // + // Continue the loop if we see an unsupported page size. + // Ideally we should assert, but we're emulating the behavior of the old + // API @ref dmaSetPteInfo_GF100 here. + // + if (!(pGVAS->bigPageSize == pParams->pteBlocks[i].pageSize || + RM_PAGE_SIZE_HUGE == pParams->pteBlocks[i].pageSize || + RM_PAGE_SIZE_512M == pParams->pteBlocks[i].pageSize || + RM_PAGE_SIZE == pParams->pteBlocks[i].pageSize)) + { + continue; + } + + // Query the Page Tables. + pLevelFmt = mmuFmtFindLevelWithPageShift(pRootFmt, + BIT_IDX_32(pParams->pteBlocks[i].pageSize)); + if (NULL == pLevelFmt) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT_OR_GOTO(0, catchGpu); + } + status = mmuWalkGetPageLevelInfo(pWalk, pLevelFmt, pParams->gpuAddr, + (const MMU_WALK_MEMDESC**)&pMemDesc, &memSize); + NV_ASSERT_OR_GOTO(NV_OK == status, catchGpu); + + // Can't set PTE for an unallocated VA. + if (NULL == pMemDesc) + { + status = NV_ERR_INVALID_ADDRESS; + NV_ASSERT_OR_GOTO(0, catchGpu); + } + + pPteBlock = &pParams->pteBlocks[i]; + + // Map the Page Table to BAR + pMap = kbusMapRmAperture_HAL(pGpu, pMemDesc); + if (NULL == pMap) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + NV_ASSERT_OR_GOTO(0, catchGpu); + } + + // Read the PTE + pteIndex = mmuFmtVirtAddrToEntryIndex(pLevelFmt, pParams->gpuAddr); + portMemCopy(pte.v8, pLevelFmt->entrySize, pMap + (pteIndex * pLevelFmt->entrySize), pLevelFmt->entrySize); + + // + // If any of these entries are being downgraded, we need to perform + // a full flush. + // + bDowngrade = isPteDowngrade(GPU_GET_KERNEL_GMMU(pGpu), pFmt, pPteBlock->pteFlags, pte); + + + // Insert the PTE fields from pParams + + // Valid + bValid = FLD_TEST_DRF(0080, _CTRL_DMA_PTE_INFO_PARAMS_FLAGS, + _VALID, _TRUE, pPteBlock->pteFlags); + + // Aperture + aperture = DRF_VAL(0080_CTRL, _DMA_PTE_INFO, + _PARAMS_FLAGS_APERTURE, pPteBlock->pteFlags); + + // Encryption + bEncrypted = FLD_TEST_DRF(0080, _CTRL_DMA_PTE_INFO_PARAMS_FLAGS, + _ENCRYPTED, _TRUE, pPteBlock->pteFlags); + + // Read Only + bReadOnly = FLD_TEST_DRF(0080, _CTRL_DMA_PTE_INFO_PARAMS_FLAGS, + _READ_ONLY, _TRUE, pPteBlock->pteFlags); + + if (!FLD_TEST_DRF(0080, _CTRL_DMA_PTE_INFO_PARAMS_FLAGS, + _GPU_CACHED, _NOT_SUPPORTED, pPteBlock->pteFlags)) + { + bVolatile = FLD_TEST_DRF(0080, _CTRL_DMA_PTE_INFO_PARAMS_FLAGS, + _GPU_CACHED, _FALSE, pPteBlock->pteFlags); + } + + { + nvFieldSetBool(&pFmt->pPte->fldValid, bValid, pte.v8); + if (bValid) + { + nvFieldSet32(&pFmt->pPte->fldAperture._enum.desc, aperture, pte.v8); + nvFieldSetBool(&pFmt->pPte->fldEncrypted, bEncrypted, pte.v8); + nvFieldSetBool(&pFmt->pPte->fldReadOnly, bReadOnly, pte.v8); + nvFieldSetBool(&pFmt->pPte->fldVolatile, bVolatile, pte.v8); + + // + // gmmuFmtInitPteCompTags assumes that lower/upper half of CTL is + // determined by the surfOffset passed in. + // We calculate the surface offset here based on the pteIndex to + // match the pre-GM20X behavior of selecting half based on VA alignment. + // + // compPageIndexLo/Hi are also set to always allow compression on + // the page index we are overriding. The 2MB page require 0x20 comptags to be + // contiguous, so set the endPage limit proprerly as well. + // + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, pPteBlock->kind)) + { + COMPR_INFO comprInfo; + const GMMU_FIELD_ADDRESS *pAddrField = gmmuFmtPtePhysAddrFld(pFmt->pPte, + gmmuFieldGetAperture(&pFmt->pPte->fldAperture, pte.v8)); + RmPhysAddr physAddr = (RmPhysAddr)gmmuFieldGetAddress(pAddrField, pte.v8); + + surfOffset = pteIndex * pPteBlock->pageSize; + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, memmgrFillComprInfo(pGpu, pMemoryManager, pPteBlock->pageSize, 1, pPteBlock->kind, + surfOffset, pPteBlock->comptagLine, &comprInfo)); + kgmmuFieldSetKindCompTags(GPU_GET_KERNEL_GMMU(pGpu), pFmt, pLevelFmt, &comprInfo, physAddr, surfOffset, pteIndex, pte.v8); + } + else + { + nvFieldSet32(&pFmt->pPte->fldKind, pPteBlock->kind, pte.v8); + } + } + } + + + + + // Copy back the overwritten values to the actual PTE memory + portMemCopy(pMap + (pteIndex * pLevelFmt->entrySize), pLevelFmt->entrySize, pte.v8, pLevelFmt->entrySize); + kbusUnmapRmAperture_HAL(pGpu, pMemDesc, &pMap, NV_TRUE); + } + + // Invalidate TLB + gvaspaceInvalidateTlb(pGVAS, pGpu, bDowngrade ? PTE_DOWNGRADE : PTE_UPGRADE); + +catchGpu: + if (NV_OK != status) + { + break; + } + } + FOR_EACH_GPU_IN_MASK_UC_END + + return status; +} + +static void +_gmmuWalkCBFillEntries_SkipExternal +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + const MMU_WALK_FILL_STATE fillState, + NvU32 *pProgress +) +{ + OBJGVASPACE *pGVAS = pUserCtx->pGVAS; + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + + // Clamp index range to RM-internal entries. + const NvU32 entryIndexLoClamp = NV_MAX(entryIndexLo, + mmuFmtVirtAddrToEntryIndex(pLevelFmt, pVAS->vasStart)); + const NvU32 entryIndexHiClamp = NV_MIN(entryIndexHi, + mmuFmtVirtAddrToEntryIndex(pLevelFmt, pGVAS->vaLimitInternal)); + + // Clamp may negate range. + if (entryIndexHiClamp >= entryIndexLoClamp) + { + // Reuse normal fill callback. + g_gmmuWalkCallbacks.FillEntries(pUserCtx, pLevelFmt, pLevelMem, + entryIndexLoClamp, entryIndexHiClamp, + fillState, pProgress); + NV_ASSERT_OR_RETURN_VOID(*pProgress == (entryIndexHiClamp - entryIndexLoClamp + 1)); + } + + // Report full range complete on success. + *pProgress = entryIndexHi - entryIndexLo + 1; +} + +static void +_gmmuWalkCBCopyEntries_SkipExternal +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_WALK_MEMDESC *pSrcMem, + const MMU_WALK_MEMDESC *pDstMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + NvU32 *pProgress +) +{ + OBJGVASPACE *pGVAS = pUserCtx->pGVAS; + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + + // Clamp index range to RM-internal entries. + const NvU32 entryIndexLoClamp = NV_MAX(entryIndexLo, + mmuFmtVirtAddrToEntryIndex(pLevelFmt, pVAS->vasStart)); + const NvU32 entryIndexHiClamp = NV_MIN(entryIndexHi, + mmuFmtVirtAddrToEntryIndex(pLevelFmt, pGVAS->vaLimitInternal)); + + // Clamp may negate range. + if (entryIndexHiClamp >= entryIndexLoClamp) + { + // Reuse normal copy callback. + g_gmmuWalkCallbacks.CopyEntries(pUserCtx, pLevelFmt, pSrcMem, pDstMem, + entryIndexLoClamp, entryIndexHiClamp, + pProgress); + NV_ASSERT_OR_RETURN_VOID(*pProgress == (entryIndexHiClamp - entryIndexLoClamp + 1)); + } + + // Report full range complete. + *pProgress = entryIndexHi - entryIndexLo + 1; +} + +// +// TODO: hClient is added temporarily and will be removed after RS_PRIV_LEVEL is used +// in osCreateMemFromOsDescriptor. See bug #2107861. +// +NV_STATUS +gvaspaceExternalRootDirCommit_IMPL +( + OBJGVASPACE *pGVAS, + NvHandle hClient, + OBJGPU *pGpu, + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *pParams +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + MEMORY_DESCRIPTOR *pRootMemNew = NULL; + NvU64 rootSizeNew; + NV_STATUS status; + NV_ADDRESS_SPACE aperture; + NvU32 cpuCacheAttr; + NvU64 vaLimitOld; + NvU64 vaLimitNew; + NvU32 attr; + NvU32 attr2; + NvU32 os02Flags = 0; + const MMU_WALK_CALLBACKS *pCb = NULL; + MMU_WALK_CALLBACKS callbacks; + MMU_WALK_USER_CTX userCtx = {0}; + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + const NvBool bAllChannels = FLD_TEST_DRF(0080_CTRL_DMA_SET_PAGE_DIRECTORY, + _FLAGS, _ALL_CHANNELS, _TRUE, pParams->flags); + const NvBool bFirstCommit = (NULL == pGpuState->pRootInternal); + const NvBool bIgnoreChannelBusy = FLD_TEST_DRF(0080_CTRL_DMA_SET_PAGE_DIRECTORY, + _FLAGS, _IGNORE_CHANNEL_BUSY, _TRUE, pParams->flags); + const NvU64 rootPdeCoverage = mmuFmtLevelPageSize(pGpuState->pFmt->pRoot); + NvU64 vaInternalLo = NV_ALIGN_DOWN64(gvaspaceGetVaStart(pGVAS), rootPdeCoverage); + NvU64 vaInternalHi = NV_ALIGN_UP64(pGVAS->vaLimitInternal + 1, rootPdeCoverage) - 1; + NvU32 gfid; + + NV_ASSERT_OK_OR_RETURN(vgpuGetCallingContextGfid(pGpu, &gfid)); + + // + // For external VAS, create subcontext only after SetPageDirectory() call is made. + // This will ensure that new PDB will be updated in all channels subcontext array. + // bug 1805222 comment 11 for more details. + // + if (vaspaceIsExternallyOwned(pVAS)) + { + MEMORY_DESCRIPTOR *pPDB = vaspaceGetPageDirBase(pVAS, pGpu); + NV_ASSERT_OR_RETURN(pPDB == NULL, NV_ERR_INVALID_STATE); + } + + switch (DRF_VAL(0080_CTRL_DMA_SET_PAGE_DIRECTORY, _FLAGS, _APERTURE, pParams->flags)) + { + case NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_VIDMEM: + aperture = ADDR_FBMEM; + cpuCacheAttr = NV_MEMORY_UNCACHED; + break; + case NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_COH: + aperture = ADDR_SYSMEM; + cpuCacheAttr = NV_MEMORY_CACHED; + break; + case NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS_APERTURE_SYSMEM_NONCOH: + aperture = ADDR_SYSMEM; + cpuCacheAttr = NV_MEMORY_UNCACHED; + break; + default: + NV_ASSERT_OR_RETURN(!"invalid aperture", NV_ERR_INVALID_ARGUMENT); + } + + NV_ASSERT_OR_RETURN(!pGVAS->bIsMirrored, NV_ERR_NOT_SUPPORTED); + // Ensure new page directory is not smaller than RM-managed region of the VA heap. + vaLimitOld = pVAS->vasLimit; + vaLimitNew = mmuFmtEntryIndexVirtAddrHi(pGpuState->pFmt->pRoot, 0, pParams->numEntries - 1); + + NV_ASSERT_OR_RETURN(vaLimitNew >= pGVAS->vaLimitInternal, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(vaLimitNew <= pGVAS->vaLimitMax, NV_ERR_INVALID_ARGUMENT); + + // We have to truncate this later so we check for overflow here + NV_ASSERT_OR_RETURN((NvU64)pParams->numEntries * (NvU64)pGpuState->pFmt->pRoot->entrySize <= NV_U32_MAX, + NV_ERR_INVALID_ARGUMENT); + + // Describe the new page directory. + rootSizeNew = (NvU64)pParams->numEntries * (NvU64)pGpuState->pFmt->pRoot->entrySize; + + if (pGVAS->bIsAtsEnabled) + { + NV_PRINTF(LEVEL_INFO, "PASID: %u\n", pParams->pasid); + pGVAS->processAddrSpaceId = pParams->pasid; + } + + NV_ASSERT_OR_RETURN((pGVAS->flags & VASPACE_FLAGS_SHARED_MANAGEMENT) || vaspaceIsExternallyOwned(pVAS), + NV_ERR_NOT_SUPPORTED); + + // If we have coherent cpu mapping, it is functionally required that we use direct BAR2 mappings + if ((aperture == ADDR_SYSMEM) && pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) && + !vaspaceIsExternallyOwned(pVAS)) + { + NV_CHECK_OR_RETURN(LEVEL_ERROR, IS_GFID_PF(gfid), NV_ERR_INVALID_ARGUMENT); + + // + // If the page tables are in sysmem, we need to explicitly state this is + // allocated outside RM and need to register with OS layer so that RM + // can map the memory later in the busMapRmAperture code path. + // + attr = DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS); + attr2 = 0; + + status = RmDeprecatedConvertOs32ToOs02Flags(attr, attr2, 0, &os02Flags); + NV_ASSERT_OR_GOTO(NV_OK == status, catch); + + status = osCreateMemFromOsDescriptor(pGpu, (NvP64)pParams->physAddress, + hClient, os02Flags, + &rootSizeNew, &pRootMemNew, + NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR, + RS_PRIV_LEVEL_KERNEL); + NV_ASSERT_OR_GOTO(NV_OK == status, catch); + } + else + { + NvU32 flags = MEMDESC_FLAGS_NONE; + + if (IS_GFID_VF(gfid)) + { + // In SRIOV-heavy PDB address is GPA + NV_CHECK_OR_RETURN(LEVEL_ERROR, aperture == ADDR_FBMEM, NV_ERR_INVALID_ARGUMENT); + flags |= MEMDESC_FLAGS_GUEST_ALLOCATED; + } + + // TODO: PDB alignment + status = memdescCreate(&pRootMemNew, pGpu, (NvU32)rootSizeNew, RM_PAGE_SIZE, NV_TRUE, aperture, + cpuCacheAttr, flags); + NV_ASSERT_OR_GOTO(NV_OK == status, catch); + memdescDescribe(pRootMemNew, aperture, pParams->physAddress, (NvU32)rootSizeNew); + memdescSetPageSize(pRootMemNew, VAS_ADDRESS_TRANSLATION(pVAS), RM_PAGE_SIZE); + } + + if (vaspaceIsExternallyOwned(pVAS)) + { + // + // For externally owned vaspace we will associate a PDB that is provided + // by the owner of the vaspace. THis is different from the "shared management" + // scenario because, in the externally owned case RM will not allocate any page tables + // or VA for this address space. This is a way to make sure RM programs the correct PDB + // when clients use this address space to create a channel. + // TODO: Make externally owned vaspace a separate vaspace class. + // + status = _gvaspaceSetExternalPageDirBase(pGVAS, pGpu, pRootMemNew); + return status; + } + + // Acquire MMU walker user context (always released below in catch label). + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + if (!bAllChannels) + { + // Specify single channel ID for which to update PDB if required by caller. + userCtx.pChID = &pParams->chId; + } + + // + // If RM-internal page directory has not been saved yet, this is + // the first external page directory committed. + // + if (bFirstCommit) + { + NvU32 rootSizeOld; + + // + // Lock-down the root entries of the RM-internal VA range. + // This forces the internal root page directory to be allocated if it + // is not already. + // + status = mmuWalkReserveEntries(pGpuState->pWalk, pGpuState->pFmt->pRoot, + vaInternalLo, vaInternalHi, NV_TRUE); + NV_ASSERT_OR_GOTO(NV_OK == status, catch); + + // Save a reference to the RM-internal root for later revoke. + mmuWalkGetPageLevelInfo(pGpuState->pWalk, pGpuState->pFmt->pRoot, 0, + (const MMU_WALK_MEMDESC**)&pGpuState->pRootInternal, + &rootSizeOld); + NV_ASSERT(NULL != pGpuState->pRootInternal); + + // TODO: Proper refcount with memdesc cleanup - inverse of memdescFree/memdescDestroy. + ++pGpuState->pRootInternal->RefCount; + if (pGpuState->pRootInternal->Allocated > 0) + { + ++pGpuState->pRootInternal->Allocated; + } + } + + // + // Invalidate MMU to kick out any entries associated with old PDB. + // Because we're copying the PTE entry, we need to ensure all accesses + // referring to the old entry are flushed, so we use PTE_DOWNGRADE here. + // + gvaspaceInvalidateTlb(pGVAS, pGpu, PTE_DOWNGRADE); + + // + // Override callbacks for migration. + // Copy and Fill callbacks are replaced to skip external entries. + // + pCb = mmuWalkGetCallbacks(pGpuState->pWalk); + callbacks = *pCb; + callbacks.CopyEntries = _gmmuWalkCBCopyEntries_SkipExternal; + callbacks.FillEntries = _gmmuWalkCBFillEntries_SkipExternal; + mmuWalkSetCallbacks(pGpuState->pWalk, &callbacks); + + // Track latest limit for PDB commit. + pVAS->vasLimit = vaLimitNew; + + // Migrate root to the new memory. + status = mmuWalkMigrateLevelInstance(pGpuState->pWalk, pGpuState->pFmt->pRoot, 0, + (MMU_WALK_MEMDESC *)pRootMemNew, (NvU32)rootSizeNew, + bIgnoreChannelBusy); + NV_ASSERT_OR_GOTO(NV_OK == status, catch); + +catch: + // Restore walker callbacks. + if (NULL != pCb) + { + mmuWalkSetCallbacks(pGpuState->pWalk, pCb); + } + // Rollback on failure. + if (NV_OK != status) + { + pVAS->vasLimit = vaLimitOld; + if (bFirstCommit) + { + if (NULL != pGpuState->pRootInternal) + { + memdescFree(pGpuState->pRootInternal); + memdescDestroy(pGpuState->pRootInternal); + pGpuState->pRootInternal = NULL; + + mmuWalkReleaseEntries(pGpuState->pWalk, pGpuState->pFmt->pRoot, + vaInternalLo, vaInternalHi); + } + } + if (NULL != pRootMemNew) + { + memdescDestroy(pRootMemNew); + pRootMemNew = NULL; + } + } + // Release MMU walker user context. + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + + return status; +} + +NV_STATUS +gvaspaceExternalRootDirRevoke_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *pParams +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + NV_STATUS status = NV_OK; + MEMORY_DESCRIPTOR *pRootMemNew = NULL; + NvU32 rootSizeNew; + const MMU_WALK_CALLBACKS *pCb = NULL; + MMU_WALK_CALLBACKS callbacks; + MMU_WALK_USER_CTX userCtx = {0}; + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + const NvU64 rootPdeCoverage = mmuFmtLevelPageSize(pGpuState->pFmt->pRoot); + const NvU64 vaInternalLo = NV_ALIGN_DOWN64(pVAS->vasStart, rootPdeCoverage); + const NvU64 vaInternalHi = NV_ALIGN_UP64(pGVAS->vaLimitInternal + 1, rootPdeCoverage) - 1; + + if (vaspaceIsExternallyOwned(pVAS)) + { + MEMORY_DESCRIPTOR *pExternalPDB = NULL; + + // get the PDB + pExternalPDB = vaspaceGetPageDirBase(pVAS, pGpu); + if (NULL != pExternalPDB) + { + memdescDestroy(pExternalPDB); + pExternalPDB = NULL; + } + status = _gvaspaceSetExternalPageDirBase(pGVAS, pGpu, pExternalPDB); + return status; + } + + NV_ASSERT_OR_RETURN(!pGVAS->bIsMirrored, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(NULL != pGpuState->pRootInternal, NV_ERR_INVALID_STATE); + + pRootMemNew = pGpuState->pRootInternal; + rootSizeNew = (NvU32)pRootMemNew->Size; + + // + // Invalidate MMU to kick out old entries before changing PDBs. + // Because we're copying the PTE entry, we need to ensure all accesses + // referring to the old entry are flushed, so we use PTE_DOWNGRADE here. + // + gvaspaceInvalidateTlb(pGVAS, pGpu, PTE_DOWNGRADE); + + // Acquire walker user context. + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + // + // Override callbacks for migration. + // Copy and Fill callbacks are replaced to skip external entries. + // + pCb = mmuWalkGetCallbacks(pGpuState->pWalk); + callbacks = *pCb; + callbacks.CopyEntries = _gmmuWalkCBCopyEntries_SkipExternal; + callbacks.FillEntries = _gmmuWalkCBFillEntries_SkipExternal; + mmuWalkSetCallbacks(pGpuState->pWalk, &callbacks); + + // Restore limit for PDB commit. + pVAS->vasLimit = pGVAS->vaLimitInternal; + + // Migrate root to the new memory. + status = mmuWalkMigrateLevelInstance(pGpuState->pWalk, pGpuState->pFmt->pRoot, 0, + (MMU_WALK_MEMDESC *)pRootMemNew, rootSizeNew, NV_FALSE); + NV_ASSERT(NV_OK == status); + + // RM-internal root ownership has transferred back to walker. + pGpuState->pRootInternal = NULL; + + // Release locked-down internal root entries. + status = mmuWalkReleaseEntries(pGpuState->pWalk, pGpuState->pFmt->pRoot, + vaInternalLo, vaInternalHi); + NV_ASSERT(NV_OK == status); + + // Restore callbacks. + mmuWalkSetCallbacks(pGpuState->pWalk, pCb); + + // Release walker user context. + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + + // No possible response to failure - above asserts are best we can do. + return NV_OK; +} + +NV_STATUS +gvaspaceResize_IMPL +( + OBJGVASPACE *pGVAS, + NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + NvU64 vaLimitNew; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(!pGVAS->bIsMirrored, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(!(pGVAS->flags & VASPACE_FLAGS_SHARED_MANAGEMENT), + NV_ERR_NOT_SUPPORTED); + + // Calculate and check new VA range size (limit + 1). + if (NV0080_CTRL_DMA_SET_VA_SPACE_SIZE_MAX == pParams->vaSpaceSize) + { + vaLimitNew = pGVAS->vaLimitMax; + } + else + { + vaLimitNew = pParams->vaSpaceSize - 1; + } + + // Abort early if not changing the size. + if (vaLimitNew == pVAS->vasLimit) + { + goto done; + } + + // Shrinking VAS space is not currently supported. + NV_ASSERT_OR_RETURN(vaLimitNew >= pVAS->vasLimit, NV_ERR_INVALID_LIMIT); + NV_ASSERT_OR_RETURN(vaLimitNew <= pGVAS->vaLimitMax, NV_ERR_INVALID_LIMIT); + + if (gvaspaceIsInternalVaRestricted(pGVAS)) + { + // This is not supported because, VASPACE_SHARED_MANAGEMENT supported + // clients use Set/Revoke Root Page Dir to expand/shrink their VAs. + // I do not find documented use case for this. + NV_PRINTF(LEVEL_ERROR, "doesn't support clientVA expansion\n"); + NV_ASSERT(0); + } + + + // Commit new limit. + pVAS->vasLimit = vaLimitNew; + pGVAS->vaLimitInternal = vaLimitNew; + + // Shrink the top VA region reserved for growth. + pGVAS->pHeap->eheapFree(pGVAS->pHeap, pGVAS->vaLimitMax); + NV_ASSERT_OK_OR_RETURN(_gvaspaceReserveTopForGrowth(pGVAS)); + + // Loop over each GPU associated with the VAS. + FOR_EACH_GPU_IN_MASK_UC(32, pSys, pGpu, pVAS->gpuMask) + { + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + MMU_WALK_USER_CTX userCtx = {0}; + const MMU_WALK_CALLBACKS *pCb = mmuWalkGetCallbacks(pGpuState->pWalk); + const MMU_FMT_LEVEL *pRootFmt = pGpuState->pFmt->pRoot; + MMU_WALK_MEMDESC *pRootMem = NULL; + NvU32 rootSize = 0; + NvBool bChanged = NV_FALSE; + + // If root has not been allocated yet it will use the new limit later. + if (NULL == gvaspaceGetPageDirBase(pGVAS, pGpu)) + { + goto doneGpu; + } + + // Acquire walker context. + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + status = mmuWalkGetPageLevelInfo(pGpuState->pWalk, pRootFmt, 0, + (const MMU_WALK_MEMDESC**)&pRootMem, &rootSize); + NV_ASSERT_OR_GOTO(NV_OK == status, doneGpu); + + // + // Allocate new root manually. If realloc is not needed, LevelAlloc() + // will retain the existing PDB. + // + status = pCb->LevelAlloc(&userCtx, pRootFmt, 0, vaLimitNew, NV_TRUE, + &pRootMem, &rootSize, &bChanged); + NV_ASSERT_OR_GOTO(NV_OK == status, doneGpu); + + // Migrate to new root (if reallocated), updating PDB and VA limit accordingly. + if (bChanged) + { + // + // Invalidate MMU to kick out any entries associated with old PDB. + // Because we're copying the PTE entry, we need to ensure all accesses + // referring to the old entry are flushed, so we use PTE_DOWNGRADE here. + // + gvaspaceInvalidateTlb(pGVAS, pGpu, PTE_DOWNGRADE); + + status = mmuWalkMigrateLevelInstance(pGpuState->pWalk, pRootFmt, 0, + pRootMem, rootSize, NV_FALSE); + NV_ASSERT_OR_GOTO(NV_OK == status, doneGpu); + } + else + { + NvBool bDone; + + // Trigger an UpdatePdb to commit the new vaLimit to HW. + bDone = pCb->UpdatePdb(&userCtx, pRootFmt, (const MMU_WALK_MEMDESC *)pRootMem, + NV_FALSE); + if (!bDone) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT_OR_GOTO(bDone, doneGpu); + } + } + +doneGpu: + if ((NV_OK != status) && (NULL != pRootMem)) + { + pCb->LevelFree(&userCtx, pRootFmt, 0, pRootMem); + } + // Release walker context. + if (NULL != userCtx.pGpu) + { + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + } + if (NV_OK != status) + { + break; + } + } + FOR_EACH_GPU_IN_MASK_UC_END + +done: + if (NV_OK == status) + { + // On success, return usable VA space size. + pParams->vaSpaceSize = pGVAS->vaLimitInternal - pVAS->vasStart + 1; + } + + return status; +} + +struct MMU_MAP_ITERATOR +{ + GMMU_ENTRY_VALUE entry; +}; + +static void +_gmmuWalkCBMapSingleEntry +( + MMU_WALK_USER_CTX *pUserCtx, + const MMU_MAP_TARGET *pTarget, + const MMU_WALK_MEMDESC *pLevelMem, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + NvU32 *pProgress +) +{ + OBJGPU *pGpu = pUserCtx->pGpu; + MMU_MAP_ITERATOR *pIter = pTarget->pIter; + MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR*)pLevelMem; + NvU8 *pMap = NULL; + + NV_PRINTF(LEVEL_INFO, "[GPU%u]: PA 0x%llX, Entries 0x%X-0x%X\n", + pUserCtx->pGpu->gpuInstance, + memdescGetPhysAddr(pMemDesc, AT_GPU, 0), entryIndexLo, + entryIndexHi); + + NV_ASSERT_OR_RETURN_VOID(entryIndexLo == entryIndexHi); + + pMap = kbusMapRmAperture_HAL(pGpu, pMemDesc); + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + + portMemCopy(pMap + entryIndexLo * pTarget->pLevelFmt->entrySize, + pTarget->pLevelFmt->entrySize, + pIter->entry.v8, + pTarget->pLevelFmt->entrySize); + + kbusUnmapRmAperture_HAL(pGpu, pMemDesc, &pMap, NV_TRUE); + + *pProgress = entryIndexHi - entryIndexLo + 1; +} + +NV_STATUS +gvaspaceUpdatePde2_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS *pParams +) +{ + MMU_WALK_USER_CTX userCtx = {0}; + MMU_MAP_TARGET mapTarget = {0}; + MMU_MAP_ITERATOR mapIter; + NvU32 numValidPTs = 0; + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + const GMMU_FMT *pFmt = pGpuState->pFmt; + const MMU_FMT_LEVEL *pPTBig; + const MMU_FMT_LEVEL *pPT4KB; + NvU32 bigIdx; + NvU32 ptIdx; + const NvBool bSparse = FLD_TEST_DRF(0080_CTRL_DMA_UPDATE_PDE_2, _FLAGS, + _SPARSE, _TRUE, pParams->flags); + NvU8 *pPdeBuffer = KERNEL_POINTER_FROM_NvP64(NvU8*, pParams->pPdeBuffer); + + portMemSet(&mapIter, 0, sizeof(mapIter)); + + // Lookup leaf page table formats. + pPTBig = mmuFmtFindLevelWithPageShift(pFmt->pRoot, + BIT_IDX_32(gvaspaceGetBigPageSize(pGVAS))); + pPT4KB = mmuFmtFindLevelWithPageShift(pFmt->pRoot, 12); + + NV_ASSERT_OR_RETURN(NULL != pPTBig, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(NULL != pPT4KB, NV_ERR_NOT_SUPPORTED); + + // Setup map target. + mapTarget.pLevelFmt = mmuFmtFindLevelParent(pFmt->pRoot, pPTBig, &bigIdx); + mapTarget.pIter = &mapIter; + mapTarget.MapNextEntries = _gmmuWalkCBMapSingleEntry; + + NV_ASSERT_OR_RETURN(NULL != mapTarget.pLevelFmt, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(2 == mapTarget.pLevelFmt->numSubLevels, NV_ERR_NOT_SUPPORTED); + + // Setup PDE value. + for (ptIdx = 0; ptIdx < NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX__SIZE; ++ptIdx) + { + NV0080_CTRL_DMA_UPDATE_PDE_2_PAGE_TABLE_PARAMS *pPtParams = &pParams->ptParams[ptIdx]; + const GMMU_FMT_PDE *pPdeFmt; + GMMU_APERTURE aperture; + + // Select PDE format. + switch (ptIdx) + { + case NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX_SMALL: + pPdeFmt = gmmuFmtGetPde(pFmt, mapTarget.pLevelFmt, !bigIdx); + break; + case NV0080_CTRL_DMA_UPDATE_PDE_2_PT_IDX_BIG: + pPdeFmt = gmmuFmtGetPde(pFmt, mapTarget.pLevelFmt, bigIdx); + break; + default: + NV_ASSERT_OR_RETURN(0, NV_ERR_NOT_SUPPORTED); + } + + // Translate aperture field. + switch (pPtParams->aperture) + { + case NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_INVALID: + aperture = GMMU_APERTURE_INVALID; + break; + case NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_VIDEO_MEMORY: + aperture = GMMU_APERTURE_VIDEO; + break; + case NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_SYSTEM_COHERENT_MEMORY: + aperture = GMMU_APERTURE_SYS_COH; + break; + case NV0080_CTRL_DMA_UPDATE_PDE_2_PT_APERTURE_SYSTEM_NON_COHERENT_MEMORY: + aperture = GMMU_APERTURE_SYS_NONCOH; + break; + default: + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + gmmuFieldSetAperture(&pPdeFmt->fldAperture, aperture, + mapIter.entry.v8); + + if (GMMU_APERTURE_INVALID != aperture) + { + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + + gmmuFieldSetAddress(gmmuFmtPdePhysAddrFld(pPdeFmt, aperture), + kgmmuEncodePhysAddr(pKernelGmmu, aperture, pPtParams->physAddr, + NVLINK_INVALID_FABRIC_ADDR), + mapIter.entry.v8); + + { + nvFieldSetBool(&pPdeFmt->fldVolatile, NV_TRUE, mapIter.entry.v8); + } + numValidPTs++; + } + } + + if (0 == numValidPTs) + { + // Check for sparse flags and invalid PDEs + if (bSparse) + { + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + const GMMU_FMT_FAMILY *pFam = kgmmuFmtGetFamily(pKernelGmmu, pFmt->version); + mapIter.entry = pFam->sparsePdeMulti; + } + } + else + { + const NvU32 sizeFracExp = + DRF_VAL(0080_CTRL_DMA_UPDATE_PDE_2, _FLAGS, _PDE_SIZE, pParams->flags); + + NV_ASSERT_OR_RETURN(!bSparse, NV_ERR_INVALID_ARGUMENT); + + // Translate PDE_SIZE field. + if (sizeFracExp > 0) + { + NV_ASSERT_OR_RETURN(nvFieldIsValid32(&pFmt->pPdeMulti->fldSizeRecipExp), + NV_ERR_INVALID_ARGUMENT); + nvFieldSet32(&pFmt->pPdeMulti->fldSizeRecipExp, sizeFracExp, mapIter.entry.v8); + } + } + + if (NULL != pPdeBuffer) + { + // Copy value to user supplied buffer if provided. + portMemCopy(pPdeBuffer, mapTarget.pLevelFmt->entrySize, + mapIter.entry.v8, mapTarget.pLevelFmt->entrySize); + } + else + { + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + const NvU64 vaLo = pParams->pdeIndex * mmuFmtLevelPageSize(mapTarget.pLevelFmt); + const NvU64 vaHi = vaLo + mmuFmtEntryVirtAddrMask(mapTarget.pLevelFmt); + NV_STATUS status; + + NV_ASSERT_OR_RETURN(vaLo >= pVAS->vasStart, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(vaHi <= pGVAS->vaLimitInternal, NV_ERR_INVALID_ARGUMENT); + + // Call walker to map the PDE. + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + if (NULL == userCtx.pGpuState) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT(0); + } + else + { + status = mmuWalkMap(userCtx.pGpuState->pWalk, vaLo, vaHi, &mapTarget); + } + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + // Flush TLB if requested. + if (FLD_TEST_DRF(0080_CTRL_DMA_UPDATE_PDE_2, _FLAGS, _FLUSH_PDE_CACHE, _TRUE, + pParams->flags)) + { + gvaspaceInvalidateTlb(pGVAS, pGpu, PTE_UPGRADE); + } + } + + return NV_OK; +} + +void +gvaspaceWalkUserCtxAcquire_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const GVAS_BLOCK *pVASBlock, + MMU_WALK_USER_CTX *pUserCtx +) +{ + // Must be UC. + NV_ASSERT(!gpumgrGetBcEnabledStatus(pGpu)); + + pUserCtx->pGVAS = pGVAS; + pUserCtx->pGpu = pGpu; + pUserCtx->pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + pUserCtx->pBlock = pVASBlock; + + // The following two asserts were added for a rare issue hit during eGPU surprise disconnect on Mac + NV_ASSERT_OR_RETURN_VOID(pUserCtx->pGpuState != NULL); + NV_ASSERT_OR_RETURN_VOID(pUserCtx->pGpuState->pWalk != NULL); + + // If current context is non-NULL, a previous release was missed. + NV_ASSERT(NULL == mmuWalkGetUserCtx(pUserCtx->pGpuState->pWalk)); + mmuWalkSetUserCtx(pUserCtx->pGpuState->pWalk, pUserCtx); +} + +void +gvaspaceWalkUserCtxRelease_IMPL +( + OBJGVASPACE *pGVAS, + MMU_WALK_USER_CTX *pUserCtx +) +{ + // If current context doesn't match, must have nested acquires (not allowed). + NV_ASSERT_OR_RETURN_VOID(pUserCtx->pGpuState); + NV_ASSERT(pUserCtx == mmuWalkGetUserCtx(pUserCtx->pGpuState->pWalk)); + mmuWalkSetUserCtx(pUserCtx->pGpuState->pWalk, NULL); +} + +NV_STATUS +gvaspaceIncChanGrpRefCnt_IMPL +( + OBJGVASPACE *pGVAS, + KernelChannelGroup *pKernelChannelGroup +) +{ + NvU32 *pChanGrpRefCnt; + + NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_ARGUMENT); + + pChanGrpRefCnt = mapFind(&pGVAS->chanGrpMap, + (NvU64)NV_PTR_TO_NvP64(pKernelChannelGroup)); + + if (pChanGrpRefCnt != NULL) + { + (*pChanGrpRefCnt)++; + } + else + { + pChanGrpRefCnt = mapInsertNew(&pGVAS->chanGrpMap, + (NvU64)NV_PTR_TO_NvP64(pKernelChannelGroup)); + NV_ASSERT_OR_RETURN(pChanGrpRefCnt != NULL, NV_ERR_INVALID_STATE); + NV_PRINTF(LEVEL_INFO, "ChanGrp 0x%x on runlist 0x%x registered.\n", + pKernelChannelGroup->grpID, pKernelChannelGroup->runlistId); + *pChanGrpRefCnt = 1; + } + + NV_PRINTF(LEVEL_INFO, + "ChanGrp 0x%x on runlist 0x%x refCnt increased to 0x%x\n", + pKernelChannelGroup->grpID, + pKernelChannelGroup->runlistId, + *pChanGrpRefCnt); + + return NV_OK; +} + +NV_STATUS +gvaspaceDecChanGrpRefCnt_IMPL +( + OBJGVASPACE *pGVAS, + KernelChannelGroup *pKernelChannelGroup +) +{ + NvU32 *pChanGrpRefCnt; + + NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_ARGUMENT); + + pChanGrpRefCnt = mapFind(&pGVAS->chanGrpMap, + (NvU64)NV_PTR_TO_NvP64(pKernelChannelGroup)); + + NV_ASSERT_OR_RETURN(pChanGrpRefCnt != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(*pChanGrpRefCnt != 0, NV_ERR_INVALID_STATE); + + (*pChanGrpRefCnt)--; + + NV_PRINTF(LEVEL_INFO, + "ChanGrp 0x%x on runlist 0x%x refCnt decreased to 0x%x\n", + pKernelChannelGroup->grpID, + pKernelChannelGroup->runlistId, + *pChanGrpRefCnt); + + if (*pChanGrpRefCnt == 0) + { + mapRemove(&pGVAS->chanGrpMap, pChanGrpRefCnt); + NV_PRINTF(LEVEL_INFO, "ChanGrp 0x%x on runlist 0x%x unregistered.\n", + pKernelChannelGroup->grpID, pKernelChannelGroup->runlistId); + } + + return NV_OK; +} + +NvU32 +gvaspaceGetChanGrpRefCnt_IMPL +( + OBJGVASPACE *pGVAS, + KernelChannelGroup *pKernelChannelGroup +) +{ + NvU32 refCnt = 0; + NvU32 *pChanGrpRefCnt; + + NV_ASSERT_OR_RETURN(pGVAS != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_ARGUMENT); + + pChanGrpRefCnt = mapFind(&pGVAS->chanGrpMap, + (NvU64)NV_PTR_TO_NvP64(pKernelChannelGroup)); + + if (pChanGrpRefCnt != NULL) + { + refCnt = *pChanGrpRefCnt; + } + else + { + NV_PRINTF(LEVEL_INFO, + "grpID 0x%x on runlistId 0x%x not registered to the VAS\n", + pKernelChannelGroup->grpID, + pKernelChannelGroup->runlistId); + } + + return refCnt; +} + +NV_STATUS +gvaspaceCheckChanGrpRefCnt_IMPL +( + OBJGVASPACE *pGVAS, + KernelChannelGroup *pKernelChannelGroup +) +{ + // @todo Implement + return NV_OK; +} + +NV_STATUS +gvaspaceUnregisterAllChanGrps_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + + // TODO: This function should be made Physcial only + if(IS_GSP_CLIENT(pGpu) || IS_VIRTUAL(pGpu)) + { + return NV_OK; + } + + if (!kfifoIsSubcontextSupported(pKernelFifo)) + { + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +gvaspaceGetPageLevelInfo_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS *pParams +) +{ + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + MMU_WALK *pWalk = NULL; + const MMU_FMT_LEVEL *pLevelFmt = NULL; + const MMU_FMT_LEVEL *pTargetFmt = NULL; + NvU32 level = 0; + NvU32 sublevel = 0; + GVAS_GPU_STATE *pGpuState; + + if (NULL == pGVAS->pGpuStates) + { + // TODO: VMM must be enabled - remove once default. + return NV_ERR_NOT_SUPPORTED; + } + + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(NULL != pGpuState, NV_ERR_INVALID_ARGUMENT); + + pWalk = pGpuState->pWalk; + pLevelFmt = pGpuState->pFmt->pRoot; + + pTargetFmt = mmuFmtFindLevelWithPageShift(pLevelFmt, BIT_IDX_64(pParams->pageSize)); + NV_ASSERT_OR_RETURN(NULL != pTargetFmt, NV_ERR_INVALID_ARGUMENT); + + for (level = 0; NULL != pLevelFmt; ++level) + { + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvU32 memSize = 0; + + NV_ASSERT_OR_RETURN(level < GMMU_FMT_MAX_LEVELS, NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN( + mmuWalkGetPageLevelInfo(pWalk, pLevelFmt, pParams->virtAddress, + (const MMU_WALK_MEMDESC**)&pMemDesc, &memSize)); + if (NULL == pMemDesc) + { + break; + } + + pParams->levels[level].pFmt = (MMU_FMT_LEVEL *) pLevelFmt; + pParams->levels[level].size = memSize; + + // Copy level formats + portMemCopy((void *)&(pParams->levels[level].levelFmt), sizeof(MMU_FMT_LEVEL), (void *)pLevelFmt, sizeof(MMU_FMT_LEVEL)); + + for (sublevel = 0; (sublevel < MMU_FMT_MAX_SUB_LEVELS) && (sublevel < pLevelFmt->numSubLevels); sublevel++) + { + portMemCopy((void *)&(pParams->levels[level].sublevelFmt[sublevel]), sizeof(MMU_FMT_LEVEL), (void *)(pLevelFmt->subLevels + sublevel), sizeof(MMU_FMT_LEVEL)); + } + + pParams->levels[level].physAddress = + memdescGetPhysAddr(pMemDesc, VAS_ADDRESS_TRANSLATION(pVAS), 0); + + switch (memdescGetAddressSpace(pMemDesc)) + { + case ADDR_FBMEM: + pParams->levels[level].aperture = GMMU_APERTURE_VIDEO; + break; + case ADDR_SYSMEM: + if (NV_MEMORY_CACHED == memdescGetCpuCacheAttrib(pMemDesc)) + { + pParams->levels[level].aperture = GMMU_APERTURE_SYS_COH; + } + else + { + pParams->levels[level].aperture = GMMU_APERTURE_SYS_NONCOH; + } + break; + default: + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + + pLevelFmt = mmuFmtGetNextLevel(pLevelFmt, pTargetFmt); + } + + pParams->numLevels = level; + + return NV_OK; +} + +NV_STATUS +gvaspaceCopyServerRmReservedPdesToServerRm_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu +) +{ + CALL_CONTEXT *pContext = resservGetTlsCallContext(); + NV_STATUS status = NV_OK; + + if ((!IS_VIRTUAL_WITH_SRIOV(pGpu) && !IS_GSP_CLIENT(pGpu)) || + (0 == pGVAS->vaStartServerRMOwned)) + { + return NV_OK; + } + + if (NULL != pContext) + { + NvHandle hClient = pContext->pClient->hClient; + RsResourceRef *pResourceRef = pContext->pResourceRef; + RsResourceRef *pDeviceRef = NULL; + NvBool bFreeNeeded = NV_FALSE; + NvHandle hDevice; + NvHandle hVASpace; + NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS pdeInfo; + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS pdeCopyParams; + NvU32 i; + + if (pResourceRef->internalClassId == classId(VaSpaceApi)) + { + hVASpace = pResourceRef->hResource; + } + else + { + // Can't think of a way we can reach here with a non zero handle. + hVASpace = 0; + } + + // Find the device ref. + if (pResourceRef->internalClassId == classId(Device)) + { + pDeviceRef = pResourceRef; + } + else + { + status = refFindAncestorOfType(pResourceRef, classId(Device), &pDeviceRef); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + } + + hDevice = pDeviceRef->hResource; + + // + // VAS handle is 0 for the device vaspace. Trigger an allocation on + // server RM so that the plugin has a valid handle to the device VAS + // under this client. This handle will be required by the plugin when + // we make the RPC later. + // + if ((0 == hVASpace) && IS_GSP_CLIENT(pGpu)) + { + NV_VASPACE_ALLOCATION_PARAMETERS vaParams = {0}; + + status = serverutilGenResourceHandle(hClient, &hVASpace); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + vaParams.index = NV_VASPACE_ALLOCATION_INDEX_GPU_DEVICE; + + NV_RM_RPC_ALLOC_OBJECT(pGpu, + hClient, + hDevice, + hVASpace, + FERMI_VASPACE_A, + &vaParams, + status); + + NV_ASSERT_OR_RETURN(NV_OK == status, status); + bFreeNeeded = NV_TRUE; + } + + portMemSet(&pdeInfo, 0, sizeof(NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS)); + portMemSet(&pdeCopyParams, 0, sizeof(NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS)); + + // Populate the input params. + pdeInfo.pageSize = NVBIT32(GMMU_PD1_VADDR_BIT_LO); + pdeInfo.virtAddress = pGVAS->vaStartServerRMOwned; + + // Fetch the details of the PDEs backing server RM's VA range. + status = gvaspaceGetPageLevelInfo(pGVAS, pGpu, &pdeInfo); + NV_ASSERT_OR_GOTO(NV_OK == status, done); + + // Populate the input params. + for (i = 0; i < pdeInfo.numLevels; i++) + { + pdeCopyParams.levels[i].pageShift = pdeInfo.levels[i].levelFmt.virtAddrBitLo; + pdeCopyParams.levels[i].physAddress = pdeInfo.levels[i].physAddress; + pdeCopyParams.levels[i].aperture = pdeInfo.levels[i].aperture; + pdeCopyParams.levels[i].size = pdeInfo.levels[i].size; + } + + pdeCopyParams.numLevelsToCopy = pdeInfo.numLevels; + pdeCopyParams.subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + pdeCopyParams.pageSize = NVBIT32(GMMU_PD1_VADDR_BIT_LO); + pdeCopyParams.virtAddrLo = pGVAS->vaStartServerRMOwned; + pdeCopyParams.virtAddrHi = pdeCopyParams.virtAddrLo + + SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE - 1; + // + // RPC the details of these reserved PDEs to server RM so that server RM can + // mirror these PDEs in its mmu walker state. Any lower level PDEs/PTEs + // allocated under these top level PDEs will be modified exclusively by + // server RM. Client RM won't touch those. + // + status = _gvaspaceCopyServerRmReservedPdesToServerRm(hClient, hVASpace, pGpu, &pdeCopyParams); + NV_ASSERT_OR_GOTO(NV_OK == status, done); +done: + if (bFreeNeeded) + { + NV_STATUS tmpStatus = NV_OK; + + NV_RM_RPC_FREE(pGpu, hClient, hDevice, hVASpace, tmpStatus); + NV_ASSERT_OR_RETURN(NV_OK == tmpStatus, tmpStatus); + } + } + + return status; +} + +static NV_STATUS +_gvaspaceControl_Prolog +( + VaSpaceApi *pVaspaceApi, + NvHandle hSubDevice, + NvU32 subDeviceId, + OBJGVASPACE **ppGVAS, + OBJGPU **ppGpu +) +{ + OBJVASPACE *pVAS = NULL; + + NV_ASSERT_OK_OR_RETURN( + vaspaceGetByHandleOrDeviceDefault(RES_GET_CLIENT(pVaspaceApi), + RES_GET_PARENT_HANDLE(pVaspaceApi), + RES_GET_HANDLE(pVaspaceApi), + &pVAS)); + *ppGVAS = dynamicCast(pVAS, OBJGVASPACE); + NV_ASSERT_OR_RETURN(NULL != *ppGVAS, NV_ERR_INVALID_CLASS); + + if (0 != hSubDevice) + { + NvHandle hDevice; + NV_ASSERT_OR_RETURN(CliSetSubDeviceContext(RES_GET_CLIENT_HANDLE(pVaspaceApi), hSubDevice, &hDevice, ppGpu) == NV_OK, + NV_ERR_INVALID_OBJECT); + } + else + { + *ppGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + // Pick a specific sub-device if requested. + if (0 != subDeviceId) + { + *ppGpu = gpumgrGetGpuFromSubDeviceInst(gpuGetDeviceInstance(*ppGpu), subDeviceId - 1); + NV_ASSERT_OR_RETURN(NULL != *ppGpu, NV_ERR_INVALID_ARGUMENT); + } + gpumgrSetBcEnabledStatus(*ppGpu, NV_FALSE); + } + + return NV_OK; +} + + +NV_STATUS +vaspaceapiCtrlCmdVaspaceGetGmmuFormat_IMPL +( + VaSpaceApi *pVaspaceApi, + NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS *pGmmuFormatParams +) +{ + OBJGVASPACE *pGVAS = NULL; + OBJGPU *pGpu = NULL; + + NV_ASSERT_OK_OR_RETURN( + _gvaspaceControl_Prolog(pVaspaceApi, pGmmuFormatParams->hSubDevice, + pGmmuFormatParams->subDeviceId, &pGVAS, &pGpu)); + + pGmmuFormatParams->pFmt = (GMMU_FMT *) gvaspaceGetGmmuFmt_IMPL(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(NULL != pGmmuFormatParams->pFmt, NV_ERR_INVALID_STATE); + + return NV_OK; +} + +NV_STATUS +vaspaceapiCtrlCmdVaspaceGetPageLevelInfo_IMPL +( + VaSpaceApi *pVaspaceApi, + NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS *pPageLevelInfoParams +) +{ + OBJGVASPACE *pGVAS = NULL; + OBJGPU *pGpu = NULL; + + NV_ASSERT_OK_OR_RETURN( + _gvaspaceControl_Prolog(pVaspaceApi, pPageLevelInfoParams->hSubDevice, + pPageLevelInfoParams->subDeviceId, &pGVAS, &pGpu)); + + if (NULL == pGVAS->pGpuStates) + { + // TODO: VMM must be enabled - remove once default. + return NV_ERR_NOT_SUPPORTED; + } + + if (IS_VIRTUAL(pGpu) && !gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + NV_STATUS status = NV_OK; + if (!(gvaspaceGetFlags(pGVAS) & VASPACE_FLAGS_FLA)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + } + + return gvaspaceGetPageLevelInfo(pGVAS, pGpu, pPageLevelInfoParams); +} + +#if defined(DEBUG) || defined(DEVELOP) || defined(NV_VERIF_FEATURES) +NV_STATUS +vaspaceapiCtrlCmdVaspaceGetPageLevelInfoVerif_IMPL +( + VaSpaceApi *pVaspaceApi, + NV90F1_CTRL_VASPACE_GET_PAGE_LEVEL_INFO_PARAMS *pPageLevelInfoParams +) +{ + return vaspaceapiCtrlCmdVaspaceGetPageLevelInfo_IMPL(pVaspaceApi, pPageLevelInfoParams); +} +#endif + +NV_STATUS +vaspaceapiCtrlCmdVaspaceReserveEntries_IMPL +( + VaSpaceApi *pVaspaceApi, + NV90F1_CTRL_VASPACE_RESERVE_ENTRIES_PARAMS *pReserveEntriesParams +) +{ + OBJGVASPACE *pGVAS = NULL; + OBJGPU *pGpu = NULL; + NV_STATUS status = NV_OK; + GVAS_GPU_STATE *pGpuState; + + NV_ASSERT_OK_OR_RETURN( + _gvaspaceControl_Prolog(pVaspaceApi, pReserveEntriesParams->hSubDevice, + pReserveEntriesParams->subDeviceId, &pGVAS, &pGpu)); + + if (NULL == pGVAS->pGpuStates) + { + // TODO: VMM must be enabled - remove once default. + return NV_ERR_NOT_SUPPORTED; + } + + NV_ASSERT_OR_RETURN(ONEBITSET(pReserveEntriesParams->pageSize), NV_ERR_INVALID_ARGUMENT); + + if (IS_VIRTUAL(pGpu) && !gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + if (!(gvaspaceGetFlags(pGVAS) & VASPACE_FLAGS_FLA)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + } + + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(NULL != pGpuState, NV_ERR_INVALID_STATE); + + // Alloc and bind page level instances. + status = gvaspaceReservePageTableEntries(pGVAS, pGpu, pReserveEntriesParams->virtAddrLo, + pReserveEntriesParams->virtAddrHi, + pReserveEntriesParams->pageSize); + NV_ASSERT(NV_OK == status); + + return status; +} + +NV_STATUS +vaspaceapiCtrlCmdVaspaceReleaseEntries_IMPL +( + VaSpaceApi *pVaspaceApi, + NV90F1_CTRL_VASPACE_RELEASE_ENTRIES_PARAMS *pReleaseEntriesParams +) +{ + OBJGVASPACE *pGVAS = NULL; + OBJGPU *pGpu = NULL; + NV_STATUS status = NV_OK; + + NV_ASSERT_OK_OR_RETURN( + _gvaspaceControl_Prolog(pVaspaceApi, pReleaseEntriesParams->hSubDevice, + pReleaseEntriesParams->subDeviceId, &pGVAS, &pGpu)); + + if (NULL == pGVAS->pGpuStates) + { + // TODO: VMM must be enabled - remove once default. + return NV_ERR_NOT_SUPPORTED; + } + + NV_ASSERT_OR_RETURN(ONEBITSET(pReleaseEntriesParams->pageSize), NV_ERR_INVALID_ARGUMENT); + + if (IS_VIRTUAL(pGpu) && !gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + if (!(gvaspaceGetFlags(pGVAS) & VASPACE_FLAGS_FLA)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + } + + // Unbind and free page level instances. + status = gvaspaceReleasePageTableEntries(pGVAS, pGpu, pReleaseEntriesParams->virtAddrLo, + pReleaseEntriesParams->virtAddrHi, + pReleaseEntriesParams->pageSize); + NV_ASSERT(NV_OK == status); + + return status; +} + +NV_STATUS +vaspaceapiCtrlCmdVaspaceCopyServerReservedPdes_IMPL +( + VaSpaceApi *pVaspaceApi, + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *pCopyServerReservedPdesParams +) +{ + OBJGVASPACE *pGVAS = NULL; + OBJVASPACE *pVAS = NULL; + OBJGPU *pGpu = NULL; + KernelGmmu *pKernelGmmu = NULL; + NV_STATUS status = NV_OK; + MMU_WALK_USER_CTX userCtx = {0}; + GVAS_GPU_STATE *pGpuState; + NvS32 i; + + NV_ASSERT_OK_OR_RETURN( + _gvaspaceControl_Prolog(pVaspaceApi, pCopyServerReservedPdesParams->hSubDevice, + pCopyServerReservedPdesParams->subDeviceId, &pGVAS, &pGpu)); + + pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + pVAS = staticCast(pGVAS, OBJVASPACE); + + if (NULL == pGVAS->pGpuStates) + { + // TODO: VMM must be enabled - remove once default. + return NV_ERR_NOT_SUPPORTED; + } + + NV_ASSERT_OR_RETURN(pCopyServerReservedPdesParams->numLevelsToCopy <= GMMU_FMT_MAX_LEVELS, NV_ERR_INVALID_ARGUMENT); + + NV_ASSERT_OR_RETURN(ONEBITSET(pCopyServerReservedPdesParams->pageSize), NV_ERR_INVALID_ARGUMENT); + + if (IS_VIRTUAL(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams->pLegacyParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + return status; + } + + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(NULL != pGpuState, NV_ERR_INVALID_STATE); + + // Alloc and bind page level instances. + status = gvaspaceReservePageTableEntries(pGVAS, pGpu, pCopyServerReservedPdesParams->virtAddrLo, + pCopyServerReservedPdesParams->virtAddrHi, + pCopyServerReservedPdesParams->pageSize); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + // Kick out any stale TLB entries. + gvaspaceInvalidateTlb(pGVAS, pGpu, PTE_DOWNGRADE); + + // Acquire walker context. + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + for (i = pCopyServerReservedPdesParams->numLevelsToCopy - 1; i >= 0; i--) + { + MEMORY_DESCRIPTOR *pMemDescNew; + NV_ADDRESS_SPACE aperture; + NvU64 flags = 0; + + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + flags = MEMDESC_FLAGS_GUEST_ALLOCATED; + } + + switch(pCopyServerReservedPdesParams->levels[i].aperture) + { + case GMMU_APERTURE_VIDEO: + aperture = ADDR_FBMEM; + break; + case GMMU_APERTURE_SYS_COH: + case GMMU_APERTURE_SYS_NONCOH: + aperture = ADDR_SYSMEM; + break; + default: + NV_ASSERT_OR_GOTO(0, done); + } + + status = memdescCreate(&pMemDescNew, pGpu, + pCopyServerReservedPdesParams->levels[i].size, + RM_PAGE_SIZE, + NV_TRUE, + aperture, + kgmmuGetPTEAttr(pKernelGmmu), + flags); + NV_ASSERT_OR_GOTO(NV_OK == status, done); + + // Setup a memdesc in server to describe the client's PDE backing memory + memdescDescribe(pMemDescNew, aperture, pCopyServerReservedPdesParams->levels[i].physAddress, + pCopyServerReservedPdesParams->levels[i].size); + memdescSetPageSize(pMemDescNew, VAS_ADDRESS_TRANSLATION(pVAS), RM_PAGE_SIZE); + + // Modify the server's walker state with the new backing memory. + const MMU_FMT_LEVEL *pLevelFmt = + mmuFmtFindLevelWithPageShift(pGpuState->pFmt->pRoot, + pCopyServerReservedPdesParams->levels[i].pageShift); + status = mmuWalkModifyLevelInstance(pGpuState->pWalk, + pLevelFmt, + pGVAS->vaStartServerRMOwned, + (MMU_WALK_MEMDESC*)pMemDescNew, + (NvU32)pCopyServerReservedPdesParams->levels[i].size, + NV_FALSE, + NV_FALSE, + NV_FALSE); + NV_ASSERT_OR_GOTO(NV_OK == status, done); + } + +done: + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + return status; +} + +/********************Local routines used in this file alone*******************/ + +/*! + * Reserve VA block between current limit and max limit for later growth. + */ +static NV_STATUS +_gvaspaceReserveTopForGrowth +( + OBJGVASPACE *pGVAS +) +{ + if (pGVAS->vaLimitInternal < pGVAS->vaLimitMax) + { + EMEMBLOCK *pBlock; + NvU32 allocFlags; + NvU64 allocOffset; + NvU64 allocSize; + + allocFlags = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + allocOffset = pGVAS->vaLimitInternal + 1; + allocSize = pGVAS->vaLimitMax - pGVAS->vaLimitInternal; + + return pGVAS->pHeap->eheapAlloc(pGVAS->pHeap, VAS_EHEAP_OWNER_RSVD, + &allocFlags, &allocOffset, &allocSize, + 1, 1, &pBlock, NULL, NULL); + } + return NV_OK; +} + +/*! + * Reserve a range of VA from rangeLo to rangeHi for later use. + */ +static NV_STATUS +_gvaspaceReserveRange +( + OBJGVASPACE *pGVAS, + NvU64 rangeLo, + NvU64 rangeHi +) +{ + if (rangeLo <= rangeHi) + { + EMEMBLOCK *pBlock; + NvU32 allocFlags; + NvU64 allocOffset; + NvU64 allocSize; + + allocFlags = NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + allocOffset = rangeLo; + allocSize = rangeHi - rangeLo + 1; + + return pGVAS->pHeap->eheapAlloc(pGVAS->pHeap, VAS_EHEAP_OWNER_RSVD, + &allocFlags, &allocOffset, &allocSize, + 1, 1, &pBlock, NULL, NULL); + } + return NV_ERR_INVALID_ARGUMENT; +} + +/*! + * Pin the page tables covering external management (leaf PDE) alignment if the + * provided VA belongs to a lazy VA reservation. This is used for lazy WDDMv1 page tables. + */ +static NV_STATUS +_gvaspacePinLazyPageTables +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 va +) +{ + NV_STATUS status = NV_OK; + PEMEMBLOCK pMemBlock; + PGVAS_BLOCK pVASBlock; + + // Search for the VA block, abort if not found. + pMemBlock = pGVAS->pHeap->eheapGetBlock(pGVAS->pHeap, va, 0); + if (!pMemBlock) + { + return NV_OK; + } + pVASBlock = (PGVAS_BLOCK)pMemBlock->pData; + + // Only proceed if lazy and we have lock mask info. + if (pVASBlock->flags.bLazy && (0 != pVASBlock->pageSizeLockMask)) + { + NvU32 pageShift; + MMU_WALK_USER_CTX userCtx = {0}; + NvU64 vaLo = NV_ALIGN_DOWN64(va, pGVAS->extManagedAlign); + NvU64 vaHi = NV_ALIGN_UP64(va + 1, pGVAS->extManagedAlign) - 1; + + // Clamp VA to block limits. + vaLo = NV_MAX(vaLo, pMemBlock->begin); + vaHi = NV_MIN(vaHi, pMemBlock->end); + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, pVASBlock, &userCtx); + + if (NULL == userCtx.pGpuState) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT(0); + } + else + { + // Loop over each page size requested by client. + FOR_EACH_INDEX_IN_MASK(64, pageShift, pVASBlock->pageSizeLockMask) + { + // Pre-reserve page level instances in the VA range. + const MMU_FMT_LEVEL *pLevelFmt = + mmuFmtFindLevelWithPageShift(userCtx.pGpuState->pFmt->pRoot, pageShift); + status = mmuWalkReserveEntries(userCtx.pGpuState->pWalk, pLevelFmt, + vaLo, vaHi, NV_TRUE); + if (NV_OK != status) + { + DBG_BREAKPOINT(); + break; + } + } + FOR_EACH_INDEX_IN_MASK_END + } + + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + } + + return status; +} + +/*! + * @brief Callback to free leaked virtual allocations. + * + * @param[in[ pHeap Heap being traversed. + * @param[in] pEnv Callback environment. + * @param[in] pMemBlock Current heap block (may be free or used). + * @param[in,out] pContinue Initially 1, if set to 0 traversal stops. + * @param[in,out] pInvalCursor Initially 0, must be set to 1 if current block + * is deleted during the callback (to prevent it + * from being used to find next block). + */ +static NV_STATUS +_gvaspaceFreeVASBlock +( + OBJEHEAP *pHeap, + void *pEnv, + PEMEMBLOCK pMemBlock, + NvU32 *pContinue, + NvU32 *pInvalCursor +) +{ + OBJGVASPACE *pGVAS = pEnv; + + if (NVOS32_BLOCK_TYPE_FREE != pMemBlock->owner && VAS_EHEAP_OWNER_RSVD != pMemBlock->owner) + { + NV_PRINTF(LEVEL_WARNING, + "Virtual allocation leak in range 0x%llX-0x%llX\n", + pMemBlock->begin, pMemBlock->end); + + gvaspaceFree(pGVAS, pMemBlock->begin); + *pInvalCursor = 1; + } + + return NV_OK; +} + +/*! + * @brief Register a mapping range for a given GPU. + */ +static NV_STATUS +_gvaspaceMappingInsert +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_BLOCK *pVASBlock, + const NvU64 vaLo, + const NvU64 vaHi, + const VAS_MAP_FLAGS flags +) +{ + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = staticCast(pGVAS, OBJVASPACE); + GVAS_MAPPING *pMapNode = NULL; + const NvU32 gpuMask = NVBIT(pGpu->gpuInstance); + + // Ensure this is not a remap. + NV_ASSERT_OR_RETURN(!flags.bRemap, NV_ERR_INVALID_ARGUMENT); + + // Check if GPU is valid for this VAS. + NV_ASSERT_OR_RETURN(gpuMask == (pVAS->gpuMask & gpuMask), NV_ERR_INVALID_ARGUMENT); + + // Search for existing mapping. + status = btreeSearch(vaLo, (NODE**)&pMapNode, &pVASBlock->pMapTree->node); + if (NV_OK == status) + { + // If it already exists, check for consistency. + NV_ASSERT_OR_RETURN(0 == (pMapNode->gpuMask & gpuMask), + NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pMapNode->node.keyStart == vaLo, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pMapNode->node.keyEnd == vaHi, NV_ERR_INVALID_ARGUMENT); + + // Commit the new GPU mask to the mapping. + pMapNode->gpuMask |= gpuMask; + } + else + { + // If it doesn't exist, allocate new node. + pMapNode = portMemAllocNonPaged(sizeof(*pMapNode)); + NV_ASSERT_OR_RETURN(NULL != pMapNode, NV_ERR_NO_MEMORY); + + portMemSet(pMapNode, 0, sizeof(*pMapNode)); + pMapNode->node.keyStart = vaLo; + pMapNode->node.keyEnd = vaHi; + pMapNode->gpuMask = gpuMask; + + // Insert mapping node. + status = btreeInsert(&pMapNode->node, (NODE**)&pVASBlock->pMapTree); + NV_ASSERT_OR_GOTO(NV_OK == status, catch); + +catch: + if (NV_OK != status) + { + portMemFree(pMapNode); + } + } + + return status; +} + +/* + * @brief Override to an externally owned PDB. + * For externally owned vaspace we will associate a PDB that is provided + * by the owner of the vaspace. THis is different from the "shared management" + * scenario because, in the externally owned case RM will not allocate any page tables + * or VA for this address space. This is a way to make sure RM programs the correct PDB + * when clients use this address space to create a channel. + * TODO: Make externally owned vaspace a separate vaspace class. +*/ +static NV_STATUS +_gvaspaceSetExternalPageDirBase +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pPDB +) +{ + NV_ASSERT_OR_RETURN(!gpumgrGetBcEnabledStatus(pGpu), NV_ERR_INVALID_STATE); + + if (pGVAS->bIsExternallyOwned) + { + pGVAS->pExternalPDB = pPDB; + } + return NV_OK; +} + +/*! + * @brief Unregister a mapping range for a given GPU. + */ +static NV_STATUS +_gvaspaceMappingRemove +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_BLOCK *pVASBlock, + const NvU64 vaLo, + const NvU64 vaHi +) +{ + NV_STATUS status = NV_OK; + GVAS_MAPPING *pMapNode = NULL; + const NvU32 gpuMask = NVBIT(pGpu->gpuInstance); + + // Search for existing mapping. + status = btreeSearch(vaLo, (NODE**)&pMapNode, &pVASBlock->pMapTree->node); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + // Check for consistency. + NV_ASSERT_OR_RETURN(gpuMask == (pMapNode->gpuMask & gpuMask), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pMapNode->node.keyStart == vaLo, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pMapNode->node.keyEnd == vaHi, NV_ERR_INVALID_ARGUMENT); + + // Remove GPU from mapping mask. + pMapNode->gpuMask &= ~gpuMask; + + // Remove mapping if unused. + if (0 == pMapNode->gpuMask) + { + btreeUnlink(&pMapNode->node, (NODE**)&pVASBlock->pMapTree); + portMemFree(pMapNode); + } + + return status; +} + +/*! + * Returns if the RM va space is restricted + * True only for MAC GPU Va space + * @param[in] pGVAS Pointer to the OBJGVASPACE object + */ +NvBool +gvaspaceIsInternalVaRestricted_IMPL(OBJGVASPACE *pGVAS) +{ + return pGVAS->bRMInternalRestrictedVaRange; +} + +NV_STATUS +gvaspaceReservePageTableEntries_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi, + const NvU64 pageSizeMask +) +{ + GVAS_GPU_STATE *pGpuState; + VA_RANGE_GPU *pIter; + VA_RANGE_GPU newReservedPageTableEntry; + + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(NULL != pGpuState, NV_ERR_GENERIC); + pIter = listHead(&pGpuState->reservedPageTableEntries); + + // Move past any entries before this + while (pIter && pIter->vaLo < vaLo) + pIter = listNext(&pGpuState->reservedPageTableEntries, pIter); + + // Insert this range and and reserve it + newReservedPageTableEntry.vaLo = vaLo; + newReservedPageTableEntry.vaHi = vaHi; + NV_ASSERT_OR_RETURN(listInsertValue(&pGpuState->reservedPageTableEntries, + pIter, &newReservedPageTableEntry), + NV_ERR_GENERIC); + return _gvaspaceReservePageTableEntries(pGVAS, pGpu, vaLo, vaHi, + pageSizeMask); +} + +NV_STATUS +gvaspaceReleasePageTableEntries_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi, + const NvU64 pageSizeMask +) +{ + GVAS_GPU_STATE *pGpuState; + VA_RANGE_GPU *pIter; + + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(NULL != pGpuState, NV_ERR_GENERIC); + pIter = listHead(&pGpuState->reservedPageTableEntries); + + while (pIter != NULL) + { + if ((pIter->vaLo == vaLo) && (pIter->vaHi == vaHi)) + { + listRemove(&pGpuState->reservedPageTableEntries, pIter); + return _gvaspaceReleasePageTableEntries(pGVAS, pGpu, vaLo, vaHi, + pageSizeMask); + } + + pIter = listNext(&pGpuState->reservedPageTableEntries, pIter); + } + + NV_PRINTF(LEVEL_ERROR, "Cannot find the reserved PTE to release.\n"); + + return NV_ERR_GENERIC; +} + +static NV_STATUS +_gvaspaceReservePageTableEntries +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi, + const NvU64 pageSizeMask +) +{ + NV_STATUS status = NV_OK; + NvU32 pageShift; + MMU_WALK_USER_CTX userCtx = {0}; + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + // Loop over each page size requested by client. + FOR_EACH_INDEX_IN_MASK(64, pageShift, pageSizeMask) + { + // Pre-reserve page level instances in the VA range + const MMU_FMT_LEVEL *pLevelFmt = + mmuFmtFindLevelWithPageShift( + userCtx.pGpuState->pFmt->pRoot, pageShift); + status = mmuWalkReserveEntries(userCtx.pGpuState->pWalk, + pLevelFmt, vaLo, vaHi, NV_TRUE); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to Reserve Entries.\n"); + break; + } + + if (pGVAS->flags & VASPACE_FLAGS_BAR_BAR1) + { + status = mmuWalkSparsify(userCtx.pGpuState->pWalk, vaLo, vaHi, NV_FALSE); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to sparsify reserved BAR1 page tables.\n"); + break; + } + } + } + FOR_EACH_INDEX_IN_MASK_END + + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + + return status; +} + +static NV_STATUS +_gvaspaceReleasePageTableEntries +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi, + const NvU64 pageSizeMask +) +{ + GVAS_GPU_STATE *pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + NvU32 pageShift; + + NV_ASSERT(NULL != pGpuState); + + + // Loop over each page size. + FOR_EACH_INDEX_IN_MASK(64, pageShift, pageSizeMask) + { + // Release page level instances in the VA range. + const MMU_FMT_LEVEL *pLevelFmt = + mmuFmtFindLevelWithPageShift(pGpuState->pFmt->pRoot, pageShift); + + NV_ASSERT_OK_OR_RETURN( + _gvaspaceReleaseUnreservedPTEs(pGVAS, pGpu, vaLo, vaHi, + pLevelFmt)); + } + FOR_EACH_INDEX_IN_MASK_END + + return NV_OK; +} + +static NV_STATUS +_gvaspaceReleaseUnreservedPTEs +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + const NvU64 vaLo, + const NvU64 vaHi, + const MMU_FMT_LEVEL *pLevelFmt +) +{ + NV_STATUS status = NV_OK; + + MMU_WALK_USER_CTX userCtx = {0}; + GVAS_GPU_STATE *pGpuState; + VA_RANGE_GPU *pIter; + NvU64 piecewiseStart; + NvU64 piecewiseEnd; + + pGpuState = gvaspaceGetGpuState(pGVAS, pGpu); + NV_ASSERT_OR_RETURN(NULL != pGpuState, NV_ERR_GENERIC); + pIter = listHead(&pGpuState->reservedPageTableEntries); + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + + NV_ASSERT(NULL != userCtx.pGpuState); + + piecewiseStart = vaLo; + while (piecewiseStart <= vaHi) + { + piecewiseEnd = 0; + + // If there is no reservation, free the whole range. + if (!pIter) + piecewiseEnd = vaHi; + // If there is a reservation on this GPU, free the memory space + // before its low address. + else if (pIter->vaLo > piecewiseStart) + piecewiseEnd = NV_MIN(vaHi, + pIter->vaLo - 1); + + if (piecewiseEnd) + { + if (!(pGVAS->flags & VASPACE_FLAGS_BAR_BAR1)) + { + // Clear out any mappings or sparse state. + status = mmuWalkUnmap(userCtx.pGpuState->pWalk, + piecewiseStart, piecewiseEnd); + NV_ASSERT(NV_OK == status); + } + + // Release page level instances in the VA range. + status = mmuWalkReleaseEntries(userCtx.pGpuState->pWalk, + pLevelFmt, + piecewiseStart, + piecewiseEnd); + NV_ASSERT(NV_OK == status); + + piecewiseStart = piecewiseEnd + 1; + } + + if (pIter) + { + /* Skip over the memory space of the reservation */ + piecewiseStart = NV_MAX(piecewiseStart, pIter->vaHi + 1); + pIter = listNext(&pGpuState->reservedPageTableEntries, pIter); + } + } + + + if (pGVAS->flags & VASPACE_FLAGS_FLA) + { + // free the dummy vas block + _gvaspaceCleanupFlaDummyPagesForFlaRange(pGVAS, pGpu, userCtx.pGpuState); + } + + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); + + return status; +} + +static NV_STATUS +_gvaspaceCopyServerRmReservedPdesToServerRm +( + NvHandle hClient, + NvHandle hVASpace, + OBJGPU *pGpu, + NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *pPdeCopyParams +) +{ + NV_STATUS status = NV_OK; + RmCtrlParams rmCtrlParams; + + portMemSet(&rmCtrlParams, 0, sizeof(RmCtrlParams)); + + rmCtrlParams.hClient = hClient; + rmCtrlParams.hObject = hVASpace; + rmCtrlParams.cmd = NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES; + rmCtrlParams.pParams = pPdeCopyParams; + rmCtrlParams.paramsSize = sizeof(NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS); + + NV_RM_RPC_CONTROL(pGpu, + rmCtrlParams.hClient, + rmCtrlParams.hObject, + rmCtrlParams.cmd, + rmCtrlParams.pParams, + rmCtrlParams.paramsSize, + status); + NV_ASSERT(NV_OK == status); + + return status; +} + +static void +_gvaspaceForceFreePageLevelInstances +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + GVAS_GPU_STATE *pGpuState +) +{ + VA_RANGE_GPU *pIter = NULL; + MMU_WALK_USER_CTX userCtx = {0}; + + pIter = listHead(&(pGpuState->reservedPageTableEntries)); + while (NULL != pIter) + { + VA_RANGE_GPU *pNext; + pNext = listNext(&(pGpuState->reservedPageTableEntries), pIter); + listRemove(&(pGpuState->reservedPageTableEntries), pIter); + pIter = pNext; + } + + gvaspaceWalkUserCtxAcquire(pGVAS, pGpu, NULL, &userCtx); + mmuWalkLevelInstancesForceFree(pGpuState->pWalk); + gvaspaceWalkUserCtxRelease(pGVAS, &userCtx); +} + +/*! + * @brief Reserve mempool for page levels. + * + * @param[in] pGpu OBJGPU pointer + * @param[in] pClient RsClient + * @param[in] pVAS Virtual address space + * @param[in] size Size of the mapping (assumes mapping is page aligned) + * @param[in] pageSizeLockMask Mask of page sizes locked down at VA reservation + */ +NV_STATUS +gvaspaceReserveMempool_IMPL +( + OBJGVASPACE *pGVAS, + OBJGPU *pGpu, + NvHandle hClient, + NvU64 size, + NvU64 pageSizeLockMask, + NvU32 flags +) +{ + NV_STATUS status = NV_OK; + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemPool = NULL; + + if (RMCFG_FEATURE_PMA && + pGVAS->flags & VASPACE_FLAGS_PTETABLE_PMA_MANAGED) + { + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + const GMMU_FMT *pFmt = kgmmuFmtGet(pKernelGmmu, GMMU_FMT_VERSION_DEFAULT, 0); + + // + // Always assume worst case of 4K mapping even if client has + // requested bigger page size. This is to ensure that we have + // sufficient memory in pools. Some MODS tests query for free + // framebuffer and allocate the entire available. In such cases + // we can run into OOM errors during page table allocation when + // the test tries to map a big surface and the pools are short + // of memory. + // + if (ONEBITSET(pageSizeLockMask)) + { + // + // There is a requirement of serial ATS enabled vaspaces to have + // both small and big page tables allocated at the same time. This + // is required for the 4K not valid feature. This is irrespective + // of the actual page size requested by the client. + // + if (gvaspaceIsAtsEnabled(pGVAS)) + { + pageSizeLockMask = RM_PAGE_SIZE | pGVAS->bigPageSize; + } + else if (!(flags & VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY)) + { + pageSizeLockMask = RM_PAGE_SIZE; + } + } + else + { + NV_ASSERT_OR_RETURN(((pageSizeLockMask & RM_PAGE_SIZE) != 0), + NV_ERR_INVALID_ARGUMENT); + } + + NvU64 poolSize = kgmmuGetSizeOfPageDirs(pGpu, pKernelGmmu, pFmt, 0, size - 1, + pageSizeLockMask) + + kgmmuGetSizeOfPageTables(pGpu, pKernelGmmu, pFmt, 0, size - 1, + pageSizeLockMask); + + NV_ASSERT_OK_OR_RETURN(memmgrPageLevelPoolsGetInfo(pGpu, pMemoryManager, hClient, &pMemPool)); + status = rmMemPoolReserve(pMemPool, poolSize, pGVAS->flags); + if ((pGVAS->flags & VASPACE_FLAGS_RETRY_PTE_ALLOC_IN_SYS) && + (status == NV_ERR_NO_MEMORY)) + { + // + // It is okay to change the status to NV_OK here since it is understood that + // we may run out of video memory at some time. The RETRY_PTE_ALLOC_IN_SYS + // flag ensures that RM retries allocating the page tables in sysmem if such + // a situation arises. So, running out of video memory here need not be fatal. + // It may be fatal if allocation in sysmem also fails. In that case RM will + // return an error from elsewhere. + // + status = NV_OK; + } + else + { + NV_ASSERT_OR_RETURN((NV_OK == status), status); + + // setup page table pool in VA space if reservation to pool succeeds + if (pGVAS->pPageTableMemPool != NULL) + { + if (pGVAS->pPageTableMemPool != pMemPool) + { + rmMemPoolRelease(pMemPool, pGVAS->flags); + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + } + else + { + pGVAS->pPageTableMemPool = pMemPool; + } + } + } + + return status; +} + +NV_STATUS +gvaspaceGetFreeHeap_IMPL +( + OBJGVASPACE *pGVAS, + NvU64 *pFreeSize +) +{ + NV_ASSERT_OR_RETURN(pFreeSize != NULL, NV_ERR_INVALID_ARGUMENT); + + return pGVAS->pHeap->eheapGetFree(pGVAS->pHeap, pFreeSize); +} + +NvBool +gvaspaceIsInUse_IMPL +( + OBJGVASPACE *pGVAS +) +{ + NvU64 freeSize = 0; + NvU64 totalSize = 0; + + // Get the free heap size. + NV_ASSERT(gvaspaceGetFreeHeap(pGVAS, &freeSize) == NV_OK); + + // Get the total heap size for FLA vaspace. + totalSize = vaspaceGetVaLimit(staticCast(pGVAS, OBJVASPACE)) - + vaspaceGetVaStart(staticCast(pGVAS, OBJVASPACE)) + 1; + + return (totalSize != freeSize); +} diff --git a/src/nvidia/src/kernel/mem_mgr/hw_resources.c b/src/nvidia/src/kernel/mem_mgr/hw_resources.c new file mode 100644 index 000000000..c2df1fc71 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/hw_resources.c @@ -0,0 +1,212 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "mem_mgr_internal.h" +#include "mem_mgr/hw_resources.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "rmapi/client.h" +#include "mmu/gmmu_fmt.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" + +#include "class/cl0041.h" // NV04_MEMORY +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl00b1.h" // NV01_MEMORY_HW_RESOURCES +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER + +NV_STATUS +hwresConstruct_IMPL +( + MemoryHwResources *pMemoryHwResources, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS *pAllocData; + MEMORY_HW_RESOURCES_ALLOCATION_REQUEST allocRequest = {0}; + MEMORY_HW_RESOURCES_ALLOCATION_REQUEST *pAllocRequest = &allocRequest; + Memory *pMemory = staticCast(pMemoryHwResources, Memory); + OBJGPU *pGpu = pMemory->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + Heap *pHeap = MEMORY_MANAGER_GET_HEAP(pMemoryManager); + NV_STATUS status = NV_OK; + NvHandle hDevice = RES_GET_HANDLE(pMemory->pDevice); + NvHandle hMemory = pCallContext->pResourceRef->hResource; + NvHandle hClient = pCallContext->pClient->hClient; + NvU32 retAttr, retAttr2; + NvBool bVidmem; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pParams)) + return NV_OK; + + pAllocData = pParams->pAllocParams; + + if (gpuIsDebuggerActive_HAL(pGpu)) + { + // Bug 643431 - WAR for GR WFI timeouts when debugger is active + return NV_ERR_BUSY_RETRY; + } + + // Init alloc request + pAllocRequest->pUserParams = pAllocData; + pAllocRequest->bindResultFunc = NvP64_VALUE(pAllocData->bindResultFunc); + pAllocRequest->pHandle = NvP64_VALUE(pAllocData->pHandle); + pAllocRequest->hwResId = 0; + + bVidmem = FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, pAllocData->attr); + + status = heapHwAlloc(pGpu, pHeap, hClient, hDevice, hMemory, + pAllocRequest, &retAttr, &retAttr2); + + pAllocData->attr = retAttr; + pAllocData->attr2 = retAttr2; + + // If Allocation succeeded then allocate a handle + if (status == NV_OK) + { + MEMORY_DESCRIPTOR *pMemDesc; + + // + // Default to not GPU-cachable. This doesn't matter for the HW resources + // class but the lower level code expects this field to be updated. + // + if (FLD_TEST_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT, pAllocData->attr2)) + { + pAllocData->attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + pAllocData->attr2); + } + + // comtags lines are allocated contiguously. + //pAllocData->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, + //pUserParams->attr); + + status = memCreateMemDesc(pGpu, + &pMemDesc, + bVidmem ? ADDR_FBMEM : ADDR_SYSMEM, + // Offset - do not know this yet. Must be page aligned. + memmgrGetInvalidOffset_HAL(pGpu, pMemoryManager), + pAllocData->size + pAllocRequest->pad, + pAllocData->attr, + pAllocData->attr2); + + if (status == NV_OK) + { + status = memConstructCommon(pMemory, + bVidmem ? NV01_MEMORY_LOCAL_USER : NV01_MEMORY_SYSTEM, + pAllocData->flags, pMemDesc, + pAllocData->owner, pHeap, pAllocData->attr, + pAllocData->attr2, 0, pAllocData->type, + NVOS32_MEM_TAG_NONE, &pAllocRequest->hwResource); + + if (status == NV_OK) + { + NV_ASSERT(pMemory->pMemDesc); + memdescSetPteKind(pMemory->pMemDesc, pAllocData->kind); + memdescSetHwResId(pMemory->pMemDesc, pAllocRequest->hwResId); + + pMemory->osDeviceHandle = pAllocData->osDeviceHandle; + + if (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) + { + NvU32 compressedKind; + COMPR_INFO comprInfo; + + status = memmgrGetKindComprForGpu_HAL(pMemoryManager, + pMemory->pMemDesc, + pMemory->pMemDesc->pGpu, + 0, + &compressedKind, + &comprInfo); + + if (status == NV_OK) + { + pAllocData->compPageShift = pMemorySystemConfig->comprPageShift; + pAllocData->compressedKind = comprInfo.kind; + pAllocData->compTagLineMin = comprInfo.compTagLineMin; + pAllocData->compPageIndexLo = comprInfo.compPageIndexLo; + pAllocData->compPageIndexHi = comprInfo.compPageIndexHi; + pAllocData->compTagLineMultiplier = comprInfo.compTagLineMultiplier; + + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, compressedKind)) + { + pAllocData->uncompressedKind = memmgrGetUncompressedKind_HAL(pGpu, pMemoryManager, + compressedKind, NV_FALSE); + } + else + { + pAllocData->uncompressedKind = compressedKind; + } + } + } + } + + if (status != NV_OK) + { + memdescDestroy(pMemDesc); + } + } + } + + return status; +} + +void +hwresDestruct_IMPL +( + MemoryHwResources *pMemoryHwResources +) +{ + Memory *pMemory = staticCast(pMemoryHwResources, Memory); + OBJGPU *pGpu = pMemory->pGpu; + MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc; + Heap *pHeap = GPU_GET_HEAP(pGpu); + + // + // Must be done before memDestructCommon, as memDestructCommon will update BC state + // (3/8/2019 - is this comment stale?) + // + heapHwFree(pGpu, pHeap, pMemory, NVOS32_DELETE_RESOURCES_ALL); + + memDestructCommon(pMemory); + + NV_ASSERT(pMemDesc->Allocated == 0); + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); +} + +NvBool +hwresCanCopy_IMPL +( + MemoryHwResources *pMemoryHwResources +) +{ + return NV_TRUE; +} diff --git a/src/nvidia/src/kernel/mem_mgr/io_vaspace.c b/src/nvidia/src/kernel/mem_mgr/io_vaspace.c new file mode 100644 index 000000000..bf38bbbc2 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/io_vaspace.c @@ -0,0 +1,604 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Rotuines ***************************\ +* * +* IOMMU Virtual Address Space Function Definitions. * +* * +\***************************************************************************/ + +#include "mem_mgr/io_vaspace.h" +#include "class/cl00f2.h" // IO_VASPACE_A +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu_mgr/gpu_mgr.h" +#include "os/os.h" +#include "core/system.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "gpu/mmu/kern_gmmu.h" + + +NV_STATUS +iovaspaceConstruct__IMPL +( + OBJIOVASPACE *pIOVAS, + NvU32 classId, + NvU32 vaspaceId, + NvU64 vaStart, + NvU64 vaLimit, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + NvU32 flags +) +{ + NV_ASSERT_OR_RETURN(IO_VASPACE_A == classId, NV_ERR_INVALID_ARGUMENT); + pIOVAS->mappingCount = 0; + return NV_OK; +} + +void +iovaspaceDestruct_IMPL(OBJIOVASPACE *pIOVAS) +{ + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + + if (pIOVAS->mappingCount != 0) + { + NV_PRINTF(LEVEL_ERROR, "%lld left-over mappings in IOVAS 0x%x\n", + pIOVAS->mappingCount, pVAS->vaspaceId); + DBG_BREAKPOINT(); + } +} + +NV_STATUS +iovaspaceAlloc_IMPL +( + OBJIOVASPACE *pIOVAS, + NvU64 size, + NvU64 align, + NvU64 rangeLo, + NvU64 rangeHi, + NvU64 pageSizeLockMask, + VAS_ALLOC_FLAGS flags, + NvU64 *pAddr +) +{ + NV_STATUS status = NV_OK; + + // TBD implement iommu specific stuff + return status; +} + +NV_STATUS +iovaspaceFree_IMPL +( + OBJIOVASPACE *pIOVAS, + NvU64 vAddr +) +{ + NV_STATUS status = NV_OK; + + // TBD implement iommu specific stuff + return status; +} + +NV_STATUS +iovaspaceApplyDefaultAlignment_IMPL +( + OBJIOVASPACE *pIOVAS, + const FB_ALLOC_INFO *pAllocInfo, + NvU64 *pAlign, + NvU64 *pSize, + NvU64 *pPageSizeLockMask +) +{ + RM_ATTR_PAGE_SIZE pageSizeAttr; + NvU64 maxPageSize = RM_PAGE_SIZE; + + pageSizeAttr = dmaNvos32ToPageSizeAttr(pAllocInfo->pageFormat->attr, pAllocInfo->pageFormat->attr2); + switch(pageSizeAttr) + { + case RM_ATTR_PAGE_SIZE_DEFAULT: + case RM_ATTR_PAGE_SIZE_4KB: + *pAlign = NV_MAX(*pAlign, maxPageSize); + *pSize = RM_ALIGN_UP(*pSize, maxPageSize); + return NV_OK; + default: + break; + } + + { + OBJVASPACE *pVAS; + OBJGPU *pGpu; + KernelGmmu *pKernelGmmu; + // + // This is reached when the page size is BIG or greater. We should only + // reach here for T124 or later chips which have GPU, or for standalone GPU + // chips, and require the GPU's big page size here, not the IOMMU's big page + // as it doesn't have big page. + // + pVAS = staticCast(pIOVAS, OBJVASPACE); + pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + NV_ASSERT_OR_RETURN(NULL != pKernelGmmu, NV_ERR_INVALID_POINTER); + + switch (pageSizeAttr) + { + case RM_ATTR_PAGE_SIZE_BIG: + maxPageSize = kgmmuGetMaxBigPageSize_HAL(pKernelGmmu); + break; + case NVOS32_ATTR_PAGE_SIZE_HUGE: + NV_ASSERT_OR_RETURN(kgmmuIsHugePageSupported(pKernelGmmu), + NV_ERR_INVALID_ARGUMENT); + maxPageSize = RM_PAGE_SIZE_HUGE; + break; + case RM_ATTR_PAGE_SIZE_512MB: + NV_ASSERT_OR_RETURN(kgmmuIsPageSize512mbSupported(pKernelGmmu), + NV_ERR_INVALID_ARGUMENT); + maxPageSize = RM_PAGE_SIZE_512M; + break; + default: + break; + } + + // + // Offset and size must be aligned to maximum potential map page size. + // NOTE: Ignoring NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE but still using + // requested alignment as a lower bound. + // + *pAlign = NV_MAX(*pAlign, maxPageSize); + *pSize = RM_ALIGN_UP(*pSize, maxPageSize); + } + return NV_OK; +} + +NV_STATUS +iovaspaceIncAllocRefCnt_IMPL +( + OBJIOVASPACE *pIOVAS, + NvU64 vAddr +) +{ + NV_STATUS status = NV_OK; + + // TBD: Implement iommu specific stuff + return status; +} + +NV_STATUS +iovaspaceGetVasInfo_IMPL +( + OBJIOVASPACE *pIOVAS, + struct NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams +) +{ + return NV_OK; +} + +NvU64 +iovaspaceGetVaStart_IMPL(OBJIOVASPACE *pIOVAS) +{ + // TODO: query OS layer, this could also be set in ctor, not virtual? + return 0; +} + +NvU64 +iovaspaceGetVaLimit_IMPL(OBJIOVASPACE *pIOVAS) +{ + // TODO: query OS layer, this could also be set in ctor, not virtual? + return NVBIT64(32) - 1; +} + +#if (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM +static PIOVAMAPPING +_iovaspaceCreateMappingDataFromMemDesc +( + PMEMORY_DESCRIPTOR pMemDesc +) +{ + PIOVAMAPPING pIovaMapping = NULL; + NvU64 mappingDataSize = 0; + + mappingDataSize = sizeof(IOVAMAPPING); + if (!memdescGetContiguity(pMemDesc, AT_CPU)) + { + mappingDataSize += sizeof(RmPhysAddr) * + (NvU64_LO32(pMemDesc->PageCount) - 1); + } + + // + // The portMemAllocNonPaged() and portMemSet() interfaces work with 32-bit sizes, + // so make sure we don't exceed that here. + // + if (NvU64_HI32(mappingDataSize) != 0UL) + { + NV_PRINTF(LEVEL_ERROR, "too much memory to map! (0x%llx bytes)\n", + mappingDataSize); + DBG_BREAKPOINT(); + return NULL; + } + + pIovaMapping = portMemAllocNonPaged(NvU64_LO32(mappingDataSize)); + if (pIovaMapping == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "failed to allocate 0x%x bytes for IOVA mapping metadata\n", + NvU64_LO32(mappingDataSize)); + return NULL; + } + + portMemSet((void *)pIovaMapping, 0, NvU64_LO32(mappingDataSize)); + + pIovaMapping->pPhysMemDesc = pMemDesc; + + return pIovaMapping; +} + +static NV_STATUS +_iovaspaceCreateSubmapping +( + OBJIOVASPACE *pIOVAS, + PMEMORY_DESCRIPTOR pPhysMemDesc +) +{ + NvU64 rootOffset; + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + PMEMORY_DESCRIPTOR pRootMemDesc = memdescGetRootMemDesc(pPhysMemDesc, &rootOffset); + PIOVAMAPPING pRootIovaMapping; + PIOVAMAPPING pSubMapping = NULL; + + NV_ASSERT(pRootMemDesc != pPhysMemDesc); + + // + // A submapping requires the root mapping to be there, acquire a reference + // on it so that it sticks around for at least as long as the submapping. + // The reference is released when the submapping is destroyed. + // + status = iovaspaceAcquireMapping(pIOVAS, pRootMemDesc); + if (status != NV_OK) + return status; + + // + // The root mapping has been just successfully acquired so it has to be + // there. + // + pRootIovaMapping = memdescGetIommuMap(pRootMemDesc, pVAS->vaspaceId); + NV_ASSERT(pRootIovaMapping != NULL); + + // + // Since this is a submemory descriptor, we need to account for the + // PteAdjust as well, which is included in rootOffset. We don't want to + // account for it in the iovaArray because it is not accounted for in the + // memdesc's PTE array. This should result in a 4K-aligned root offset. + // + rootOffset -= pPhysMemDesc->PteAdjust; + NV_ASSERT((rootOffset & RM_PAGE_MASK) == 0); + + // + // For submemory descriptors, there are two possibilities: + // (1) The root descriptor already has an IOVA mapping for the entire + // allocation in this IOVA space, in which case we just need a subset + // of that. + // (2) The root descriptor does not have an IOVA mapping for any of the + // allocation in this IOVA space, in which case we need to create one + // first. + // + + pSubMapping = _iovaspaceCreateMappingDataFromMemDesc(pPhysMemDesc); + if (pSubMapping == NULL) + { + iovaspaceReleaseMapping(pIOVAS, pRootIovaMapping); + return NV_ERR_NO_MEMORY; + } + + pSubMapping->refcount = 1; + pSubMapping->iovaspaceId = pRootIovaMapping->iovaspaceId; + pSubMapping->link.pParent = pRootIovaMapping; + + pSubMapping->pNext = pRootIovaMapping->link.pChildren; + pRootIovaMapping->link.pChildren = pSubMapping; + + // + // We need to copy over the corresponding entries from the root IOVA + // mapping before we assign it to the physical memdesc. The root offset + // determines where in the root mapping we need to start. + // + if (memdescGetContiguity(pPhysMemDesc, AT_CPU)) + { + pSubMapping->iovaArray[0] = pRootIovaMapping->iovaArray[0] + rootOffset; + } + else + { + NvU64 i, j; + NV_ASSERT(((rootOffset >> RM_PAGE_SHIFT) + pPhysMemDesc->PageCount) <= + pRootMemDesc->PageCount); + for (i = (rootOffset >> RM_PAGE_SHIFT), j = 0; + j < pPhysMemDesc->PageCount && i < pRootMemDesc->PageCount; i++, j++) + { + pSubMapping->iovaArray[j] = pRootIovaMapping->iovaArray[i]; + } + } + + memdescAddIommuMap(pPhysMemDesc, pSubMapping); + + ++pIOVAS->mappingCount; + + return NV_OK; +} + +static void +_iovaspaceDestroySubmapping +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + PMEMORY_DESCRIPTOR pPhysMemDesc = pIovaMapping->pPhysMemDesc; + PIOVAMAPPING pRootIovaMapping = pIovaMapping->link.pParent; + PIOVAMAPPING pTmpIovaMapping = pRootIovaMapping->link.pChildren; + + memdescRemoveIommuMap(pPhysMemDesc, pIovaMapping); + + if (pTmpIovaMapping == pIovaMapping) + { + pRootIovaMapping->link.pChildren = pIovaMapping->pNext; + } + else + { + while (pTmpIovaMapping != NULL && pTmpIovaMapping->pNext != pIovaMapping) + { + pTmpIovaMapping = pTmpIovaMapping->pNext; + } + + if (pTmpIovaMapping != NULL) + { + pTmpIovaMapping->pNext = pIovaMapping->pNext; + } + else + { + // Not found in the root submappings list? + NV_ASSERT(pTmpIovaMapping != NULL); + } + } + + portMemFree(pIovaMapping); + --pIOVAS->mappingCount; + + // + // After destroying a submapping, release its reference on the root mapping. + // The reference was acquired in _iovaspaceCreateSubmapping(). + // + iovaspaceReleaseMapping(pIOVAS, pRootIovaMapping); +} + +static NV_STATUS +_iovaspaceCreateMapping +( + OBJIOVASPACE *pIOVAS, + PMEMORY_DESCRIPTOR pPhysMemDesc +) +{ + NV_STATUS status; + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + NV_ADDRESS_SPACE addressSpace; + PIOVAMAPPING pIovaMapping = NULL; + OBJGPU *pMappingGpu = NULL; + + // + // The source memdesc has to be allocated to acquire an I/O VA space + // mapping, because the OS layer will be setting up a layer of indirection + // that assumes the PTEs in the memdesc are valid. There is no requirement + // that it be mapped to the CPU at this point. + // + if (pPhysMemDesc == NULL) + { + NV_ASSERT(pPhysMemDesc != NULL); + return NV_ERR_INVALID_ARGUMENT; + } + + pMappingGpu = gpumgrGetGpuFromId(pVAS->vaspaceId); + addressSpace = memdescGetAddressSpace(pPhysMemDesc); + + // Only support SYSMEM or indirect peer mappings + if ((addressSpace != ADDR_SYSMEM) && + !gpumgrCheckIndirectPeer(pMappingGpu, pPhysMemDesc->pGpu)) + { + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + pIovaMapping = _iovaspaceCreateMappingDataFromMemDesc(pPhysMemDesc); + if (pIovaMapping == NULL) + { + return NV_ERR_NO_MEMORY; + } + + // Initialize the mapping as an identity mapping for the OS layer + if (memdescGetContiguity(pPhysMemDesc, AT_CPU)) + { + pIovaMapping->iovaArray[0] = memdescGetPte(pPhysMemDesc, AT_CPU, 0); + } + else + { + NvU32 i; + for (i = 0; i < pPhysMemDesc->PageCount; i++) + { + pIovaMapping->iovaArray[i] = memdescGetPte(pPhysMemDesc, AT_CPU, i); + } + } + + pIovaMapping->iovaspaceId = pVAS->vaspaceId; + pIovaMapping->refcount = 1; + + status = osIovaMap(pIovaMapping); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "failed to map memdesc into I/O VA space 0x%x (status = 0x%x)\n", + pVAS->vaspaceId, status); + goto error; + } + + memdescAddIommuMap(pPhysMemDesc, pIovaMapping); + ++pIOVAS->mappingCount; + + return NV_OK; + +error: + portMemFree(pIovaMapping); + pIovaMapping = NULL; + + return status; +} + +NV_STATUS +iovaspaceAcquireMapping_IMPL +( + OBJIOVASPACE *pIOVAS, + PMEMORY_DESCRIPTOR pPhysMemDesc +) +{ + OBJVASPACE *pVAS = staticCast(pIOVAS, OBJVASPACE); + PIOVAMAPPING pIovaMapping = memdescGetIommuMap(pPhysMemDesc, pVAS->vaspaceId); + + if (pIovaMapping) + { + // If the mapping is already there, just increment its refcount. + NV_ASSERT(pIovaMapping->refcount != 0); + ++pIovaMapping->refcount; + return NV_OK; + } + + if (memdescIsSubMemoryMemDesc(pPhysMemDesc)) + return _iovaspaceCreateSubmapping(pIOVAS, pPhysMemDesc); + else + return _iovaspaceCreateMapping(pIOVAS, pPhysMemDesc); +} + +static void +_iovaspaceDestroyRootMapping +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + PMEMORY_DESCRIPTOR pPhysMemDesc = pIovaMapping->pPhysMemDesc; + PIOVAMAPPING pNextIovaMapping, pTmpIovaMapping; + + // + // Increment the refcount to guarantee that destroying the last submapping + // won't end up trying to destroy the root mapping we are already + // destroying. + // + ++pIovaMapping->refcount; + + // + // Clear out any submappings underneath this mapping, since they will no + // longer be valid. + // + pNextIovaMapping = pIovaMapping->link.pChildren; + while (pNextIovaMapping != NULL) + { + pTmpIovaMapping = pNextIovaMapping->pNext; + _iovaspaceDestroySubmapping(pIOVAS, pNextIovaMapping); + pNextIovaMapping = pTmpIovaMapping; + } + + memdescRemoveIommuMap(pPhysMemDesc, pIovaMapping); + + osIovaUnmap(pIovaMapping); + portMemFree(pIovaMapping); + + --pIOVAS->mappingCount; +} + +void +iovaspaceDestroyMapping_IMPL +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + if (memdescIsSubMemoryMemDesc(pIovaMapping->pPhysMemDesc)) + _iovaspaceDestroySubmapping(pIOVAS, pIovaMapping); + else + _iovaspaceDestroyRootMapping(pIOVAS, pIovaMapping); +} + +void +iovaspaceReleaseMapping_IMPL +( + OBJIOVASPACE *pIOVAS, + PIOVAMAPPING pIovaMapping +) +{ + if (pIovaMapping == NULL) + { + NV_ASSERT(0); + return; + } + + if (pIovaMapping->refcount == 0) + NV_ASSERT(pIovaMapping->refcount > 0); + + if (--pIovaMapping->refcount != 0) + return; + + iovaspaceDestroyMapping(pIOVAS, pIovaMapping); +} + +OBJIOVASPACE *iovaspaceFromId(NvU32 iovaspaceId) +{ + OBJVASPACE *pVAS; + OBJVMM *pVmm = SYS_GET_VMM(SYS_GET_INSTANCE()); + NV_STATUS status = vmmGetVaspaceFromId(pVmm, iovaspaceId, IO_VASPACE_A, &pVAS); + + if (status != NV_OK) + return NULL; + + return dynamicCast(pVAS, OBJIOVASPACE); +} + +OBJIOVASPACE *iovaspaceFromMapping(PIOVAMAPPING pIovaMapping) +{ + OBJIOVASPACE *pIOVAS = iovaspaceFromId(pIovaMapping->iovaspaceId); + + // + // The IOVASPACE has to be there as the mapping is referencing it. If it's + // not, the mapping has been left dangling outlasting the IOVAS it was + // under. + // + NV_ASSERT(pIOVAS != NULL); + + return pIOVAS; +} + +void iovaMappingDestroy(PIOVAMAPPING pIovaMapping) +{ + OBJIOVASPACE *pIOVAS = iovaspaceFromMapping(pIovaMapping); + + NV_ASSERT_OR_RETURN_VOID(pIOVAS != NULL); + iovaspaceDestroyMapping(pIOVAS, pIovaMapping); +} + +#endif // (RMCFG_FEATURE_PLATFORM_UNIX || RMCFG_FEATURE_PLATFORM_MODS) && !NVCPU_IS_ARM diff --git a/src/nvidia/src/kernel/mem_mgr/mem.c b/src/nvidia/src/kernel/mem_mgr/mem.c new file mode 100644 index 000000000..0b9def1bb --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/mem.c @@ -0,0 +1,1028 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr/mem.h" + +#include "mem_mgr/fla_mem.h" + +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "core/locks.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "vgpu/rpc.h" + +#include "class/cl0041.h" // NV04_MEMORY +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +NV_STATUS +memConstruct_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pParentRef = pResourceRef->pParentRef; + + // + // Common initialization used for both normal construction & copy + // constructor + // + + // NULL if parent isn't a device + pMemory->pDevice = dynamicCast(pParentRef->pResource, Device); + + // NULL if parent isn't a subdevice + pMemory->pSubDevice = dynamicCast(pParentRef->pResource, Subdevice); + + // If parent subdevice, grandparent must be a device + if (pMemory->pSubDevice) + { + RsResourceRef *pGrandParentRef = pParentRef->pParentRef; + + pMemory->pDevice = dynamicCast(pGrandParentRef->pResource, Device); + + if (pMemory->pDevice == NULL) + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + // If child of device, we have a pGpu + if (pMemory->pDevice) + { + // NOTE: pGpu and pDevice be NULL for NoDeviceMemory + pMemory->pGpu = CliGetGpuFromContext(pResourceRef, &pMemory->bBcResource); + + // Set thread BC state + gpuSetThreadBcState(pMemory->pGpu, pMemory->bBcResource); + } + + if (RS_IS_COPY_CTOR(pParams)) + { + // + // Copy constructor path (NvRmDupObject) + // + return memCopyConstruct_IMPL(pMemory, pCallContext, pParams); + } + else + { + // + // Default constructor path (NvRmAlloc) + // + } + + return NV_OK; +} + +NV_STATUS +memGetMapAddrSpace_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + NV_ADDRESS_SPACE addrSpace; + OBJGPU *pGpu = pMemory->pGpu; + NvBool bBcResource = pMemory->bBcResource; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + if (pGpu == NULL) + return NV_ERR_INVALID_OBJECT; + + gpuSetThreadBcState(pGpu, bBcResource); + + pMemDesc = memdescGetMemDescFromGpu(pMemory->pMemDesc, pGpu); + + NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, pMemDesc, mapFlags, &addrSpace)); + + if (addrSpace == ADDR_SYSMEM) + { + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_BAR0_REFLECT)) + { + addrSpace = ADDR_REGMEM; + } + else if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_BAR1_REFLECT)) + { + addrSpace = ADDR_FBMEM; + } + } + + if (pAddrSpace) + *pAddrSpace = addrSpace; + + return NV_OK; +} + +void +memDestruct_IMPL +( + Memory *pMemory +) +{ + OBJGPU *pGpu = pMemory->pGpu; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pMemory); + NvHandle hParent = RES_GET_PARENT_HANDLE(pMemory); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + NV_STATUS status = NV_OK; + + // + // The default destructor is used when memConstructCommon() is called by + // the subclass but not memDestructCommon(). + // + if (pMemory->bConstructed && pMemory->pMemDesc != NULL) + { + // Remove the system memory reference from the client + memDestructCommon(pMemory); + memdescFree(pMemory->pMemDesc); + memdescDestroy(pMemory->pMemDesc); + } + + // if the allocation is RPC-ed, free using RPC + if (pMemory->bRpcAlloc && (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu))) + { + NV_RM_RPC_FREE(pGpu, hClient, hParent, hMemory, status); + NV_ASSERT(status == NV_OK); + } +} + +NV_STATUS +memCreateMemDesc_IMPL +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR **ppMemDesc, + NV_ADDRESS_SPACE addrSpace, + NvU64 FBOffset, + NvU64 length, + NvU32 attr, + NvU32 attr2 +) +{ + NV_STATUS status = NV_OK; + NvU32 CpuCacheAttrib, gpuCacheAttrib; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + *ppMemDesc = NULL; + + if (addrSpace == ADDR_SYSMEM) + NV_ASSERT_OR_RETURN(FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, attr), NV_ERR_INVALID_ARGUMENT); + + // setup the CpuCacheAttrib as well.. (if the caller doesn't specify anything it will be 0=UNCACHED) + switch (DRF_VAL(OS32, _ATTR, _COHERENCY, attr)) + { + case NVOS32_ATTR_COHERENCY_UNCACHED: + CpuCacheAttrib = NV_MEMORY_UNCACHED; + break; + case NVOS32_ATTR_COHERENCY_WRITE_COMBINE: + CpuCacheAttrib = NV_MEMORY_WRITECOMBINED; + break; + case NVOS32_ATTR_COHERENCY_CACHED: + case NVOS32_ATTR_COHERENCY_WRITE_THROUGH: + case NVOS32_ATTR_COHERENCY_WRITE_PROTECT: + case NVOS32_ATTR_COHERENCY_WRITE_BACK: + CpuCacheAttrib = NV_MEMORY_CACHED; + break; + default: + NV_ASSERT(0); + CpuCacheAttrib = NV_MEMORY_UNCACHED; + break; + } + + gpuCacheAttrib = FLD_TEST_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _YES, attr2) ? NV_MEMORY_CACHED : NV_MEMORY_UNCACHED; + + // Create and fill in a memory descriptor + status = memdescCreate(&pMemDesc, pGpu, length, 0, NV_TRUE, addrSpace, + CpuCacheAttrib, + MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE_FB_BC_ONLY(pGpu, addrSpace)); + if (status == NV_OK) + { + if (memdescHasSubDeviceMemDescs(pMemDesc)) + { + MEMORY_DESCRIPTOR *pMemDescNext = pMemDesc->_pNext; + while (pMemDescNext) + { + memdescDescribe(pMemDescNext, addrSpace, FBOffset, length); + memdescSetGpuCacheAttrib(pMemDescNext, gpuCacheAttrib); + pMemDescNext = pMemDescNext->_pNext; + } + } + else + { + memdescDescribe(pMemDesc, addrSpace, FBOffset, length); + memdescSetGpuCacheAttrib(pMemDesc, gpuCacheAttrib); + } + + *ppMemDesc = pMemDesc; + } + + return status; +} + +NV_STATUS +memCreateKernelMapping_IMPL +( + Memory *pMemory, + NvU32 Protect, + NvBool bClear +) +{ + NV_STATUS status; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory)); + + if (pMemory->KernelVAddr == NvP64_NULL) + { + if (memdescGetAddressSpace(pMemory->pMemDesc) != ADDR_SYSMEM) + { + return NV_ERR_NOT_SUPPORTED; + } + + status = memdescMap(pMemory->pMemDesc, 0, pMemory->Length, NV_TRUE, + Protect, &pMemory->KernelVAddr, &pMemory->KernelMapPriv); + + if (status != NV_OK) + { + pMemory->KernelVAddr = NvP64_NULL; + pMemory->KernelMapPriv = NvP64_NULL; + return status; + } + + memdescSetKernelMapping(pMemory->pMemDesc, pMemory->KernelVAddr); + memdescSetKernelMappingPriv(pMemory->pMemDesc, pMemory->KernelMapPriv); + + if (bClear) + { + portMemSet(NvP64_VALUE(pMemory->KernelVAddr), 0, pMemory->Length); + } + } + + return NV_OK; +} + +RM_ATTR_PAGE_SIZE +dmaNvos32ToPageSizeAttr +( + NvU32 attr, + NvU32 attr2 +) +{ + switch (DRF_VAL(OS32, _ATTR, _PAGE_SIZE, attr)) + { + case NVOS32_ATTR_PAGE_SIZE_DEFAULT: + return RM_ATTR_PAGE_SIZE_DEFAULT; + case NVOS32_ATTR_PAGE_SIZE_4KB: + return RM_ATTR_PAGE_SIZE_4KB; + case NVOS32_ATTR_PAGE_SIZE_BIG: + return RM_ATTR_PAGE_SIZE_BIG; + case NVOS32_ATTR_PAGE_SIZE_HUGE: + switch (DRF_VAL(OS32, _ATTR2, _PAGE_SIZE_HUGE, attr2)) + { + case NVOS32_ATTR2_PAGE_SIZE_HUGE_DEFAULT: + case NVOS32_ATTR2_PAGE_SIZE_HUGE_2MB: + return RM_ATTR_PAGE_SIZE_HUGE; + case NVOS32_ATTR2_PAGE_SIZE_HUGE_512MB: + return RM_ATTR_PAGE_SIZE_512MB; + } + break; + } + + NV_ASSERT_FAILED("Invalid attr and attr2 page size arguments"); + return RM_ATTR_PAGE_SIZE_DEFAULT; +} + +NV_STATUS +memConstructCommon_IMPL +( + Memory *pMemory, + NvU32 categoryClassId, + NvU32 flags, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 heapOwner, + Heap *pHeap, + NvU32 attr, + NvU32 attr2, + NvU32 Pitch, + NvU32 type, + NvU32 tag, + HWRESOURCE_INFO *pHwResource +) +{ + OBJGPU *pGpu = NULL; + NV_STATUS status = NV_OK; + + if (pMemDesc == NULL) + return NV_ERR_INVALID_ARGUMENT; + + // initialize the memory description + pMemory->categoryClassId = categoryClassId; + pMemory->pMemDesc = pMemDesc; + pMemory->Length = pMemDesc->Size; + pMemory->RefCount = 1; + pMemory->HeapOwner = heapOwner; + pMemory->pHeap = pHeap; + pMemory->Attr = attr; + pMemory->Attr2 = attr2; + pMemory->Pitch = Pitch; + pMemory->Type = type; + pMemory->Flags = flags; + pMemory->tag = tag; + pMemory->isMemDescOwner = NV_TRUE; + pMemory->bRpcAlloc = NV_FALSE; + + // We are finished if this instance is device-less + if (pMemory->pDevice == NULL) + { + goto done; + } + + if (pMemDesc->pGpu == NULL) + { + return NV_ERR_INVALID_STATE; + } + + // Memory has hw resources associated with it that need to be tracked. + if (pHwResource != NULL) + { + pMemory->pHwResource = portMemAllocNonPaged(sizeof(HWRESOURCE_INFO)); + if (pMemory->pHwResource != NULL) + { + *pMemory->pHwResource = *pHwResource; // struct copy + pMemory->pHwResource->refCount = 1; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Unable to allocate HWRESOURCE_INFO tracking structure\n"); + status = NV_ERR_NO_MEMORY; + goto done; + } + } + + NV_ASSERT(status == NV_OK); + + // + // Apply attr and flags to the memory descriptor. Ideally all should + // be handled before we get here. + // + + // Check whether encryption should be enabled + if (flags & NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_ENCRYPTED, NV_TRUE); + SLI_LOOP_END + } + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, attr2)) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_USER_READ_ONLY, NV_TRUE); + SLI_LOOP_END + } + + if (FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_DEVICE, _READ_ONLY, attr2)) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_DEVICE_READ_ONLY, NV_TRUE); + SLI_LOOP_END + } + + // setup GpuP2PCacheAttrib + switch (DRF_VAL(OS32, _ATTR2, _P2P_GPU_CACHEABLE, attr2)) + { + case NVOS32_ATTR2_P2P_GPU_CACHEABLE_YES: + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetGpuP2PCacheAttrib(memdescGetMemDescFromGpu(pMemDesc, pGpu), NV_MEMORY_CACHED); + SLI_LOOP_END + break; + default: + NV_ASSERT(0); + /*FALLSTHRU*/ + case NVOS32_ATTR2_P2P_GPU_CACHEABLE_NO: + case NVOS32_ATTR2_P2P_GPU_CACHEABLE_DEFAULT: + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetGpuP2PCacheAttrib(memdescGetMemDescFromGpu(pMemDesc, pGpu), NV_MEMORY_UNCACHED); + SLI_LOOP_END + break; + } + + // + // Page size may be specified at allocation. This if for fermi family + // chips and is a nop for previous generations. At this point the hal call + // to set the page size should never fail as the memory was just allocated. + // + if (pMemDesc->pGpu) + { + pGpu = pMemDesc->pGpu; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + RM_ATTR_PAGE_SIZE pageSizeAttr = dmaNvos32ToPageSizeAttr(attr, attr2); + status = memmgrSetMemDescPageSize_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), memdescGetMemDescFromGpu(pMemDesc, pGpu), + AT_GPU, pageSizeAttr); + if (status != NV_OK) + { + SLI_LOOP_BREAK; + } + SLI_LOOP_END + + if (status != NV_OK) + { + goto done; + } + } + + pMemory->Node.keyStart = RES_GET_HANDLE(pMemory); + pMemory->Node.keyEnd = RES_GET_HANDLE(pMemory); + pMemory->Node.Data = pMemory; + + status = btreeInsert(&pMemory->Node, &pMemory->pDevice->DevMemoryTable); + if (status != NV_OK) + goto done; + + // Initialize the circular list item for tracking dup/sharing of pMemDesc + pMemory->dupListItem.pNext = pMemory->dupListItem.pPrev = pMemory; + +done: + if (status != NV_OK) + { + if (pMemory != NULL && pMemory->pHwResource != NULL) + { + portMemFree(pMemory->pHwResource); + } + } + else + { + pMemory->bConstructed = NV_TRUE; + } + + return status; +} + +static void +_memDestructCommonWithDevice +( + Memory *pMemory +) +{ + NvHandle hMemory = RES_GET_HANDLE(pMemory); + OBJGPU *pGpu = pMemory->pGpu; + Device *pDevice = pMemory->pDevice; + RsResourceRef *pDeviceRef = RES_GET_REF(pDevice); + NvHandle hDevice = RES_GET_HANDLE(pDevice); + Subdevice *pSubDeviceInfo; + DispCommon *pDispCommon; + RsClient *pRsClient = RES_GET_CLIENT(pMemory); + NV_STATUS status; + RS_ITERATOR subDevIt; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + + gpuSetThreadBcState(pGpu, pMemory->bBcResource); + + subDevIt = clientRefIter(pRsClient, pDeviceRef, classId(Subdevice), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(pRsClient, &subDevIt)) + { + pSubDeviceInfo = dynamicCast(subDevIt.pResourceRef->pResource, Subdevice); + + if (hMemory == pSubDeviceInfo->hNotifierMemory) + { + pSubDeviceInfo->hNotifierMemory = NV01_NULL_OBJECT; + pSubDeviceInfo->pNotifierMemory = NULL; + } + } + + dispcmnGetByDevice(pRsClient, hDevice, &pDispCommon); + + if (pDispCommon != NULL) + { + DisplayApi *pDisplayApi = staticCast(pDispCommon, DisplayApi); + if (pDisplayApi->hNotifierMemory == hMemory) + { + pDisplayApi->hNotifierMemory = NV01_NULL_OBJECT; + pDisplayApi->pNotifierMemory = NULL; + } + } + + // + // Release any FB HW resources + // + if (pMemory->pHwResource) + { + if (--pMemory->pHwResource->refCount == 0) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvBool bHostVgpuDeviceExists = NV_FALSE; + + if ((pMemory->categoryClassId == NV01_MEMORY_SYSTEM && memmgrComprSupported(pMemoryManager, ADDR_SYSMEM)) || + (bHostVgpuDeviceExists && (pMemory->pHwResource->isGuestAllocated))) + { + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + if (pFbAllocInfo == NULL) + { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto done; + } + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + if (pFbAllocPageFormat == NULL) { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + pFbAllocInfo->pageFormat->type = pMemory->Type; + pFbAllocInfo->pageFormat->attr = pMemory->Attr; + pFbAllocInfo->pageFormat->attr2 = pMemory->Attr2; + pFbAllocInfo->hwResId = memdescGetHwResId(pMemory->pMemDesc); + pFbAllocInfo->size = pMemory->Length; + pFbAllocInfo->format = memdescGetPteKind(pMemory->pMemDesc); + + // + // Note that while freeing duped memory under a device, the + // device may not be the memory owning device. Hence, always use + // memory owning device (pMemDesc->pGpu) to free HW resources. + // + status = memmgrFreeHwResources(pMemory->pMemDesc->pGpu, pMemoryManager, pFbAllocInfo); + NV_ASSERT(status == NV_OK); + } + portMemFree(pMemory->pHwResource); + } + } + + NV_ASSERT_OK_OR_GOTO(status, btreeUnlink(&pMemory->Node, &pDevice->DevMemoryTable), done); + + pMemory->pMemDesc->DupCount--; + + // Choose the new owner + if (pMemory->isMemDescOwner) + { + (pMemory->dupListItem.pNext)->isMemDescOwner = NV_TRUE; + } + // Remove from circular list tracking dup/sharing of pMemDesc + pMemory->dupListItem.pPrev->dupListItem.pNext = pMemory->dupListItem.pNext; + pMemory->dupListItem.pNext->dupListItem.pPrev = pMemory->dupListItem.pPrev; + pMemory->dupListItem.pNext = pMemory->dupListItem.pPrev = NULL; + + pMemory->bConstructed = NV_FALSE; + +done: + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + // The unmap call(s) above may have changed the broadcast state so restore it here + gpuSetThreadBcState(pGpu, pMemory->bBcResource); +} + +void +memDestructCommon_IMPL +( + Memory *pMemory +) +{ + OBJGPU *pGpu = pMemory->pGpu; + RsResourceRef *pResourceRef = RES_GET_REF(pMemory); + RsResourceRef *pParentRef = pResourceRef->pParentRef; + RsClient *pClient = RES_GET_CLIENT(pMemory); + NvHandle hClient = pClient->hClient; + NvHandle hParent = pParentRef->hResource; + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + if (!pMemory->bConstructed) + return; + + NV_ASSERT_OK(memdescDeregisterFromGSP(pGpu, hClient, hParent, hMemory)); + + // Do device specific teardown if we have a device + if (pMemory->pDevice != NULL) + { + _memDestructCommonWithDevice(pMemory); + } + else + { + pMemory->bConstructed = NV_FALSE; + } + + if (pMemory->KernelVAddr != NvP64_NULL) + { + memdescUnmap(pMemory->pMemDesc, NV_TRUE, osGetCurrentProcess(), + pMemory->KernelVAddr, pMemory->KernelMapPriv); + pMemory->KernelVAddr = NvP64_NULL; + pMemory->KernelMapPriv = NvP64_NULL; + } +} + +NV_STATUS +memGetByHandleAndDevice_IMPL +( + RsClient *pClient, + NvHandle hMemory, + NvHandle hDevice, + Memory **ppMemory +) +{ + NV_STATUS status; + + status = memGetByHandle(pClient, hMemory, ppMemory); + if (status != NV_OK) + return status; + + if (hDevice != RES_GET_HANDLE((*ppMemory)->pDevice)) + { + *ppMemory = NULL; + return NV_ERR_OBJECT_NOT_FOUND; + } + + return NV_OK; +} + +NV_STATUS +memGetByHandle_IMPL +( + RsClient *pClient, + NvHandle hMemory, + Memory **ppMemory +) +{ + RsResourceRef *pResourceRef; + NV_STATUS status; + + *ppMemory = NULL; + + status = clientGetResourceRef(pClient, hMemory, &pResourceRef); + if (status != NV_OK) + return status; + + *ppMemory = dynamicCast(pResourceRef->pResource, Memory); + + if (*ppMemory == NULL) + return NV_ERR_INVALID_OBJECT_HANDLE; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(*ppMemory)); + + return NV_OK; +} + +NV_STATUS +memGetByHandleAndGroupedGpu_IMPL +( + RsClient *pClient, + NvHandle hMemory, + OBJGPU *pGpu, + Memory **ppMemory +) +{ + Device *pDevice; + NV_STATUS status; + + // Get device handle + status = deviceGetByInstance(pClient, gpuGetDeviceInstance(pGpu), &pDevice); + if (status != NV_OK) + return NV_ERR_INVALID_OBJECT_HANDLE; + + return memGetByHandleAndDevice(pClient, hMemory, RES_GET_HANDLE(pDevice), ppMemory); +} + +NV_STATUS +memIsReady_IMPL +( + Memory *pMemory +) +{ + if (pMemory->pMemDesc == NULL) + return NV_ERR_INVALID_OBJECT; + + return NV_OK; +} + +NV_STATUS +memControl_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + RmCtrlParams *pRmCtrlParams = pParams->pLegacyParams; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory)); + + if (!pMemory->pGpu) + return NV_ERR_INVALID_OBJECT_PARENT; + + if (REF_VAL(NVXXXX_CTRL_CMD_CLASS, pParams->cmd) == NV04_MEMORY) + { + if (pMemory->categoryClassId == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) + return NV_ERR_NOT_SUPPORTED; + } + + pRmCtrlParams->pGpu = pMemory->pGpu; + + gpuSetThreadBcState(pMemory->pGpu, pMemory->bBcResource); + + return resControl_IMPL(staticCast(pMemory, RsResource), pCallContext, pParams); +} + +NV_STATUS +memCopyConstruct_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsClient *pDstClient = pCallContext->pClient; + RsClient *pSrcClient = pParams->pSrcClient; + RsResourceRef *pDstRef = pCallContext->pResourceRef; + RsResourceRef *pSrcRef = pParams->pSrcRef; + Memory *pMemorySrc = dynamicCast(pSrcRef->pResource, Memory); + Memory *pMemoryDst = pMemory; + OBJGPU *pSrcGpu = NULL; + OBJGPU *pDstGpu = NULL; + NV_STATUS status = NV_OK; + NvBool bReleaseGpuLock = NV_FALSE; + Device *pSrcDevice = NULL; + Device *pDstDevice = NULL; + Subdevice *pSrcSubDevice = NULL; + Subdevice *pDstSubDevice = NULL; + RsResourceRef *pSrcParentRef = pSrcRef->pParentRef; + RsResourceRef *pDstParentRef = pDstRef->pParentRef; + + NV_ASSERT_OR_RETURN(pSrcParentRef != NULL, NV_ERR_INVALID_OBJECT_PARENT); + NV_ASSERT_OR_RETURN(pDstParentRef != NULL, NV_ERR_INVALID_OBJECT_PARENT); + NV_ASSERT_OR_RETURN(pMemorySrc != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemorySrc)); + + // + // Must return early when parent is Client. + // This copy constructor is very device-specific so it is up + // to the device-less Memory subclasses to define their own dup behavior. + // + if (RES_GET_CLIENT_HANDLE(pMemoryDst) == RES_GET_PARENT_HANDLE(pMemoryDst)) + { + NV_ASSERT_OR_RETURN(RES_GET_CLIENT_HANDLE(pMemorySrc) == + RES_GET_PARENT_HANDLE(pMemorySrc), + NV_ERR_INVALID_OBJECT_PARENT); + return NV_OK; + } + + pSrcGpu = pMemorySrc->pGpu; + pDstGpu = pMemoryDst->pGpu; + pSrcDevice = pMemorySrc->pDevice; + pDstDevice = pMemoryDst->pDevice; + pSrcSubDevice = pMemorySrc->pSubDevice; + pDstSubDevice = pMemoryDst->pSubDevice; + + // Only children of device are supported + NV_ASSERT_OR_RETURN(pSrcDevice != NULL, NV_ERR_INVALID_OBJECT_PARENT); + NV_ASSERT_OR_RETURN(pDstDevice != NULL, NV_ERR_INVALID_OBJECT_PARENT); + + if (!!pSrcSubDevice != !!pDstSubDevice) + { + NV_PRINTF(LEVEL_ERROR, "Parent type mismatch between Src and Dst objects" + "Both should be either device or subDevice\n"); + return NV_ERR_INVALID_OBJECT_PARENT; + } + + // RS-TODO: This should use pMemorySrc->bBcResource when adding full support for subdevice duping + gpuSetThreadBcState(pSrcGpu, NV_TRUE); + + if (!rmGpuLockIsOwner() && + !(rmDeviceGpuLockIsOwner(pSrcGpu->gpuInstance) && + rmDeviceGpuLockIsOwner(pDstGpu->gpuInstance))) + { + // LOCK: acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM)) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to acquire GPU locks, error 0x%x\n", status); + return status; + } + + bReleaseGpuLock = NV_TRUE; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, + memCheckCopyPermissions(pMemorySrc, pDstGpu, pDstClient->hClient), done); + + // Initialize Memory + pMemoryDst->categoryClassId = pMemorySrc->categoryClassId; + pMemoryDst->Length = pMemorySrc->Length; + pMemoryDst->HeapOwner = pMemorySrc->HeapOwner; + pMemoryDst->pHeap = pMemorySrc->pHeap; + pMemoryDst->pMemDesc = pMemorySrc->pMemDesc; + pMemoryDst->KernelVAddr = NvP64_NULL; + pMemoryDst->KernelMapPriv = NvP64_NULL; + pMemoryDst->Attr = pMemorySrc->Attr; + pMemoryDst->Attr2 = pMemorySrc->Attr2; + pMemoryDst->Pitch = pMemorySrc->Pitch; + pMemoryDst->Type = pMemorySrc->Type; + pMemoryDst->Flags = pMemorySrc->Flags; + pMemoryDst->tag = pMemorySrc->tag; + pMemoryDst->pHwResource = pMemorySrc->pHwResource; + pMemoryDst->isMemDescOwner = NV_FALSE; + pMemoryDst->bRpcAlloc = pMemorySrc->bRpcAlloc; + + // Link in the new device memory mapping + pMemoryDst->Node.keyStart = RES_GET_HANDLE(pMemoryDst); + pMemoryDst->Node.keyEnd = RES_GET_HANDLE(pMemoryDst); + pMemoryDst->Node.Data = pMemoryDst; + + status = btreeInsert(&pMemoryDst->Node, &pDstDevice->DevMemoryTable); + if (status != NV_OK) + goto done; + + { + OBJGPU *pGpu = pDstGpu; // Need pGpu for SLI loop + + gpuSetThreadBcState(pDstGpu, NV_TRUE); + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + if (memdescGetPageSize(memdescGetMemDescFromGpu(pMemoryDst->pMemDesc, pGpu), AT_GPU) == 0) + { + status = memmgrSetMemDescPageSize_HAL(pGpu, GPU_GET_MEMORY_MANAGER(pGpu), + memdescGetMemDescFromGpu(pMemoryDst->pMemDesc, pGpu), + AT_GPU, RM_ATTR_PAGE_SIZE_DEFAULT); + NV_ASSERT(status == NV_OK); + } + SLI_LOOP_END + } + + // + // ref-count increments for shared structs after all places where we + // could return early. + // + if (pMemoryDst->pHwResource != NULL) + pMemoryDst->pHwResource->refCount++; + + memdescAddRef(pMemoryDst->pMemDesc); + pMemoryDst->pMemDesc->DupCount++; + if (pMemoryDst->pMemDesc->Allocated) + pMemoryDst->pMemDesc->Allocated++; + + // Insert pMemoryDst after pMemorySrc in circular list to track dup/sharing of pMemDesc + pMemoryDst->dupListItem.pNext = pMemorySrc->dupListItem.pNext; + pMemoryDst->dupListItem.pPrev = pMemorySrc; + pMemorySrc->dupListItem.pNext = pMemoryDst; + pMemoryDst->dupListItem.pNext->dupListItem.pPrev = pMemoryDst; + +done: + + // If the original allocation was RPCed, also send the Dup. + if (pMemory->bRpcAlloc && (IS_VIRTUAL(pSrcGpu) || IS_GSP_CLIENT(pSrcGpu))) + { + NV_RM_RPC_DUP_OBJECT(pSrcGpu, pDstClient->hClient, pDstParentRef->hResource, pDstRef->hResource, + pSrcClient->hClient, pSrcRef->hResource, 0, + NV_FALSE, // do not automatically issue RPC_FREE on object free + NULL, + status); + NV_ASSERT(status == NV_OK); + } + + // UNLOCK: release GPUs lock + if (bReleaseGpuLock) + { + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + pMemory->bConstructed = (status == NV_OK); + return status; +} + +NV_STATUS +memGetMemInterMapParams_IMPL +( + Memory *pMemory, + RMRES_MEM_INTER_MAP_PARAMS *pParams +) +{ + OBJGPU *pGpu = pParams->pGpu; + RsResourceRef *pMemoryRef = pParams->pMemoryRef; + + FlaMemory *pFlaMemory; + + MEMORY_DESCRIPTOR *pSrcMemDesc = pMemory->pMemDesc; + Device *pDevice; + Subdevice *pSubdevice; + NvBool bcState = gpumgrGetBcEnabledStatus(pGpu); + + // Don't expect to use default, but safe thing to do is set src=dest + NvHandle hMemoryDevice = 0; + OBJGPU *pSrcGpu = pGpu; + + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory)); + + if (pMemoryRef->pParentRef != NULL) + { + pDevice = dynamicCast(pMemoryRef->pParentRef->pResource, Device); + if (pDevice != NULL) + { + pSrcGpu = GPU_RES_GET_GPU(pDevice); + hMemoryDevice = RES_GET_HANDLE(pDevice); + GPU_RES_SET_THREAD_BC_STATE(pDevice); + } + else + { + pSubdevice = dynamicCast(pMemoryRef->pParentRef->pResource, Subdevice); + if (pSubdevice != NULL) + { + pSrcGpu = GPU_RES_GET_GPU(pSubdevice); + hMemoryDevice = RES_GET_HANDLE(pSubdevice->pDevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + } + } + } + + pParams->pSrcGpu = pSrcGpu; + pParams->hMemoryDevice = hMemoryDevice; + + // + // Restore pGpu's bcState in case it was overwritten above (i.e., + // the case that hMemoryDevice and hBroadcastDevice are the same + // device, but a unicast mapping was desired). + // + gpumgrSetBcEnabledStatus(pGpu, bcState); + + // + // Mapping Guest allocated memory in PF is not supported + // + if (pSrcMemDesc->pGpu != pGpu && gpuIsSriovEnabled(pGpu) && + !(memdescGetFlag(pSrcMemDesc, MEMDESC_FLAGS_GUEST_ALLOCATED))) + { + // + // Memory allocated by pSrcMemDesc->pGpu needs to be + // remapped for pGpu as requested by client. + // + pParams->bDmaMapNeeded = NV_TRUE; + } + + pParams->pSrcMemDesc = pSrcMemDesc; + + pFlaMemory = dynamicCast(pMemoryRef->pResource, FlaMemory); + if (pFlaMemory != NULL) + { + pParams->pSrcGpu = gpumgrGetGpu(pFlaMemory->peerGpuInst); + pParams->bFlaMapping = NV_TRUE; + + NV_PRINTF(LEVEL_INFO, "FLA memory imported as (%s) with exportGpu:%x \n", + (pParams->pSrcGpu != pGpu ? " P2P " : " LOOPBACK "), + pFlaMemory->peerDeviceInst); + } + + return NV_OK; +} + +NV_STATUS +memGetMemoryMappingDescriptor_IMPL +( + Memory *pMemory, + MEMORY_DESCRIPTOR **ppMemDesc +) +{ + NV_CHECK_OK_OR_RETURN(LEVEL_INFO, memIsReady(pMemory)); + if (pMemory->pGpu != NULL) + { + *ppMemDesc = memdescGetMemDescFromGpu(pMemory->pMemDesc, pMemory->pGpu); + } + else + { + *ppMemDesc = pMemory->pMemDesc; + } + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/mem_mgr/mem_fabric.c b/src/nvidia/src/kernel/mem_mgr/mem_fabric.c new file mode 100644 index 000000000..3adc3c5b7 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/mem_fabric.c @@ -0,0 +1,803 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/****************************************************************************** + * + * Description: + * This file contains the functions managing the memory fabric + * + *****************************************************************************/ + +#include "core/core.h" +#include "core/locks.h" +#include "rmapi/resource.h" +#include "rmapi/rs_utils.h" +#include "mem_mgr_internal.h" +#include "mem_mgr/mem_fabric.h" +#include "mem_mgr/fabric_vaspace.h" +#include "mem_mgr/mem.h" +#include "mem_mgr/vaspace.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "os/os.h" +#include "compute/fabric.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/gpu.h" +#include "class/cl00f8.h" +#include "Nvcm.h" +#include "vgpu/rpc.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/bus/p2p_api.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" + +#include "published/ampere/ga100/dev_mmu.h" + +typedef struct +{ + // + // TODO: Only sticky non-partial mappings are supported currently, so all + // the fabric addrs are mapped to the single vidmem memory object. However, + // when partial mappings are supported, we will need a per-fabric memdesc + // tree to track the mappings for multiple vidmem memory objects. + // + NvHandle hDupedVidmem; +} FABRIC_MEMDESC_DATA; + +static NvU32 +_memoryfabricMemDescGetNumAddr +( + MEMORY_DESCRIPTOR *pMemDesc +) +{ + OBJGPU *pGpu = pMemDesc->pGpu; + NvU32 pageSize = 0; + + // Get the page size from the memory descriptor. + pageSize = memdescGetPageSize(pMemDesc, + VAS_ADDRESS_TRANSLATION(pGpu->pFabricVAS)); + + // Get the number of addresses associated with this memory descriptor. + if (memdescGetContiguity(pMemDesc, + VAS_ADDRESS_TRANSLATION(pGpu->pFabricVAS))) + { + // For contiguous allocation, there is just one entry _pteArray[0]. + return 1; + } + + // For discontiguous allocations, numAddr is total size / page size. + return (memdescGetSize(pMemDesc) / pageSize); +} + +static void +_memoryfabricMemDescDestroyCallback +( + OBJGPU *pGpu, + void *pObject, + MEMORY_DESCRIPTOR *pMemDesc +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + FABRIC_VASPACE *pFabricVAS = dynamicCast(pGpu->pFabricVAS, FABRIC_VASPACE); + + FABRIC_MEMDESC_DATA *pMemdescData = + (FABRIC_MEMDESC_DATA *)memdescGetMemData(pMemDesc); + + RmPhysAddr *pteArray = memdescGetPteArrayForGpu(pMemDesc, pGpu, + VAS_ADDRESS_TRANSLATION(pGpu->pFabricVAS)); + + NvU32 numAddr = _memoryfabricMemDescGetNumAddr(pMemDesc); + + // Get the page size from the memory descriptor. + NvU32 pageSize = memdescGetPageSize(pMemDesc, + VAS_ADDRESS_TRANSLATION(pGpu->pFabricVAS)); + + // Remove the fabric memory allocations from the map. + fabricvaspaceVaToGpaMapRemove(pFabricVAS, pteArray[0]); + + if (!pFabricVAS->bRpcAlloc) + { + // + // Call fabricvaspaceBatchFree to free the FLA allocations. + // _pteArray in memdesc is RM_PAGE_SIZE whereas page size for memory fabric + // allocations is either 2MB or 512MB. Pass stride accordingly. + // + fabricvaspaceBatchFree(pFabricVAS, pteArray, numAddr, (pageSize >> RM_PAGE_SHIFT)); + } + + // Destroy the duped physical video memory handle. + if ((pMemdescData != NULL) && (pMemdescData->hDupedVidmem != 0)) + { + NV_ASSERT(pRmApi->Free(pRmApi, pFabricVAS->hClient, + pMemdescData->hDupedVidmem) == NV_OK); + + portMemFree(pMemDesc->_pMemData); + } + + portMemFree(pObject); +} + +static NV_STATUS +_memoryfabricMapPhysicalMemory +( + NvU64 *vAddr, + NvU32 numAddr, + NvU64 allocSize, + NvU32 pageSize, + MEMORY_DESCRIPTOR *pVidMemDesc, + NvU64 offset, + NvBool bReadOnly +) +{ + OBJGPU *pGpu = pVidMemDesc->pGpu; + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + DMA_PAGE_ARRAY pageArray; + NvU64 mapLength; + NvU32 kind; + COMPR_INFO comprInfo; + NvU32 vidmemPteArraySize; + RmPhysAddr addr; + NvU32 i; + FABRIC_VASPACE *pFabricVAS; + NvU32 mapFlags = DMA_UPDATE_VASPACE_FLAGS_UPDATE_ALL | + DMA_UPDATE_VASPACE_FLAGS_SKIP_4K_PTE_CHECK; + + NV_ASSERT_OR_RETURN(vAddr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVidMemDesc != NULL, NV_ERR_INVALID_ARGUMENT); + + mapFlags |= bReadOnly ? DMA_UPDATE_VASPACE_FLAGS_READ_ONLY : 0; + + pFabricVAS = dynamicCast(pGpu->pFabricVAS, FABRIC_VASPACE); + + if (pFabricVAS->bRpcAlloc) + return NV_OK; + + // Get compression attributes for the vidmem memdesc. + status = memmgrGetKindComprFromMemDesc(pMemoryManager, pVidMemDesc, offset, &kind, &comprInfo); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to get the compression attributes for the vidmem memdesc\n"); + return status; + } + + portMemSet(&pageArray, 0, sizeof(DMA_PAGE_ARRAY)); + + // Get the vidmem pteArray size. + vidmemPteArraySize = memdescGetPteArraySize(pVidMemDesc, AT_GPU); + + // Get the fabric addr range to map. + mapLength = (numAddr == 1 ? allocSize : pageSize); + pageArray.count = (vidmemPteArraySize == 1 ? 1 : (mapLength / RM_PAGE_SIZE)); + + for (i = 0; i < numAddr; i++) + { + if (pageArray.count == 1) + { + addr = pVidMemDesc->_pteArray[0] + offset; + pageArray.pData = &addr; + } + else + { + pageArray.pData = &pVidMemDesc->_pteArray[offset / RM_PAGE_SIZE]; + } + + // Map the memory fabric object at the given physical video memory offset. + status = dmaUpdateVASpace_HAL(pGpu, pDma, pFabricVAS->pGVAS, pVidMemDesc, + NULL, vAddr[i], vAddr[i] + mapLength - 1, + mapFlags, &pageArray, 0, &comprInfo, 0, + NV_MMU_PTE_VALID_TRUE, + NV_MMU_PTE_APERTURE_VIDEO_MEMORY, + BUS_INVALID_PEER, NVLINK_INVALID_FABRIC_ADDR, + DMA_DEFER_TLB_INVALIDATE, NV_FALSE); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to map fabric addrs starting at 0x%llx\n", vAddr[i]); + return status; + } + + offset = offset + mapLength; + } + + fabricvaspaceInvalidateTlb(pFabricVAS, pVidMemDesc->pGpu, PTE_UPGRADE); + + return NV_OK; +} + +static void +_memoryfabricFreeFabricVa_VGPU +( + OBJGPU *pGpu, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + NV_RM_RPC_FREE(pGpu, pParams->hClient, + pParams->hParent, pParams->hResource, status); + NV_ASSERT(status == NV_OK); +} + +static void +_memoryfabricFreeFabricVa +( + FABRIC_VASPACE *pFabricVAS, + OBJGPU *pGpu, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvU64 *pAddr, + NvU32 numAddr +) +{ + if (pFabricVAS->bRpcAlloc) + { + _memoryfabricFreeFabricVa_VGPU(pGpu, pParams); + } + else + { + fabricvaspaceBatchFree(pFabricVAS, pAddr, numAddr, 1); + } +} + +static NV_STATUS +_memoryfabricAllocFabricVa_VGPU +( + OBJGPU *pGpu, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NV00F8_ALLOCATION_PARAMETERS *pAllocParams, + NvU64 **ppAddr, + NvU32 *pNumAddr +) +{ + NV00F8_CTRL_DESCRIBE_PARAMS *pDescribeParams = NULL; + NvU32 i = 0; + NV_STATUS status = NV_OK; + NvU32 idx = 0; + NvU64 *pAddr = NULL; + + NV_RM_RPC_ALLOC_OBJECT(pGpu, pParams->hClient, + pParams->hParent, + pParams->hResource, + pParams->externalClassId, + pAllocParams, + status); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Alloc NV_MEMORY_FABRIC RPC failed, status: %x\n", + status); + return status; + } + + pDescribeParams = portMemAllocNonPaged(sizeof(*pDescribeParams)); + if (pDescribeParams == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + + portMemSet(pDescribeParams, 0, sizeof(*pDescribeParams)); + + do + { + pDescribeParams->offset = idx; + NV_RM_RPC_CONTROL(pGpu, pParams->hClient, + pParams->hResource, + NV00F8_CTRL_CMD_DESCRIBE, + pDescribeParams, + sizeof(*pDescribeParams), status); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "CTRL_CMD_DESCRIBE failed, status: 0x%x, " + "numPfns: 0x%x, totalPfns: 0x%llx, readSoFar: 0x%x \n", + status, pDescribeParams->numPfns, pDescribeParams->totalPfns, idx); + goto cleanup; + } + + if (pAddr == NULL) + { + pAddr = portMemAllocNonPaged(sizeof(NvU64) * pDescribeParams->totalPfns); + if (pAddr == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + } + + for (i=0; i < pDescribeParams->numPfns; i++) + { + pAddr[idx + i] = (NvU64)((NvU64)pDescribeParams->pfnArray[i] << RM_PAGE_SHIFT_HUGE); + } + + idx += pDescribeParams->numPfns; + } while (idx < pDescribeParams->totalPfns); + + portMemFree(pDescribeParams); + + *ppAddr = pAddr; + *pNumAddr = idx; + + return status; + +cleanup: + portMemFree(pAddr); + portMemFree(pDescribeParams); + + _memoryfabricFreeFabricVa_VGPU(pGpu, pParams); + + return status; +} + +static NV_STATUS +_memoryfabricAllocFabricVa +( + FABRIC_VASPACE *pFabricVAS, + OBJGPU *pGpu, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NV00F8_ALLOCATION_PARAMETERS *pAllocParams, + VAS_ALLOC_FLAGS flags, + NvU64 **ppAddr, + NvU32 *pNumAddr +) +{ + OBJVASPACE *pOBJVASPACE = staticCast(pFabricVAS, OBJVASPACE); + + if (pFabricVAS->bRpcAlloc) + { + return _memoryfabricAllocFabricVa_VGPU(pGpu, pParams, + pAllocParams, ppAddr, + pNumAddr); + } + else + { + return fabricvaspaceAllocNonContiguous(pFabricVAS, + pAllocParams->allocSize, + pAllocParams->alignment, + vaspaceGetVaStart(pOBJVASPACE), + vaspaceGetVaLimit(pOBJVASPACE), + pAllocParams->pageSize, flags, + ppAddr, pNumAddr); + } +} + +NV_STATUS +memoryfabricConstruct_IMPL +( + MemoryFabric *pMemoryFabric, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + Memory *pMemory = staticCast(pMemoryFabric, Memory); + OBJGPU *pGpu = pMemory->pGpu; + FABRIC_VASPACE *pFabricVAS = dynamicCast(pGpu->pFabricVAS, FABRIC_VASPACE); + NV00F8_ALLOCATION_PARAMETERS *pAllocParams = pParams->pAllocParams; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + RsResourceRef *pVidmemRef = NULL; + MEMORY_DESCRIPTOR *pVidMemDesc = NULL; + NV_STATUS status = NV_OK; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + FABRIC_MEMDESC_DATA *pMemdescData = NULL; + MEM_DESC_DESTROY_CALLBACK *pCallback = NULL; + VAS_ALLOC_FLAGS flags = {0}; + NvU64 *pAddr = NULL; + NvU32 numAddr = 0; + NvU32 pteKind = 0; + NvBool bReadOnly = NV_FALSE; + + if (RS_IS_COPY_CTOR(pParams)) + { + return memoryfabricCopyConstruct_IMPL(pMemoryFabric, + pCallContext, + pParams); + } + + // Only page size 512MB and 2MB supported. + if ((pAllocParams->pageSize != NV_MEMORY_FABRIC_PAGE_SIZE_512M) && + (pAllocParams->pageSize != NV_MEMORY_FABRIC_PAGE_SIZE_2M)) + { + NV_PRINTF(LEVEL_ERROR, "Unsupported pageSize: 0x%x\n", + pAllocParams->pageSize); + + return NV_ERR_INVALID_ARGUMENT; + } + + // Alignment should be pageSize aligned. + if (!NV_IS_ALIGNED64(pAllocParams->alignment, pAllocParams->pageSize)) + { + NV_PRINTF(LEVEL_ERROR, + "Alignment should be pageSize aligned\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + // AllocSize should be page size aligned. + if (!NV_IS_ALIGNED64(pAllocParams->allocSize, pAllocParams->pageSize)) + { + NV_PRINTF(LEVEL_ERROR, + "AllocSize should be pageSize aligned\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + // We don't support flexible mappings yet. + if (pAllocParams->allocFlags & NV00F8_ALLOC_FLAGS_FLEXIBLE_FLA) + { + NV_PRINTF(LEVEL_ERROR, + "Only sticky mappings are supported\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + if (pAllocParams->allocFlags & NV00F8_ALLOC_FLAGS_READ_ONLY) + { +#if !defined(DEVELOP) && !defined(DEBUG) && !defined(NV_MODS) + NV_PRINTF(LEVEL_ERROR, + "RO mappings are only supported on non-release builds\n"); + + return NV_ERR_NOT_SUPPORTED; +#else + bReadOnly = NV_TRUE; +#endif + } + + // For sticky mappings, physical video memory handle is needed. + if (pAllocParams->map.hVidMem == 0) + { + NV_PRINTF(LEVEL_ERROR, + "Physical vidmem handle needed for sticky mappings\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + status = serverutilGetResourceRef(pCallContext->pClient->hClient, + pAllocParams->map.hVidMem, &pVidmemRef); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to get resource in resserv for vidmem handle\n"); + return status; + } + + pVidMemDesc = (dynamicCast(pVidmemRef->pResource, Memory))->pMemDesc; + + if ((memdescGetAddressSpace(pVidMemDesc) != ADDR_FBMEM) || + (pGpu != pVidMemDesc->pGpu)) + { + NV_PRINTF(LEVEL_ERROR, "Invalid physical vidmem handle passed\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + if (memdescGetPageSize(pVidMemDesc, AT_GPU) != NV_MEMORY_FABRIC_PAGE_SIZE_2M) + { + NV_PRINTF(LEVEL_ERROR, "Physical vidmem page size should be 2MB\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + if ((pAllocParams->map.offset >= pVidMemDesc->Size) || + !NV_IS_ALIGNED64(pAllocParams->map.offset, NV_MEMORY_FABRIC_PAGE_SIZE_2M)) + { + NV_PRINTF(LEVEL_ERROR, + "Invalid offset passed for the physical vidmem handle\n"); + + return NV_ERR_INVALID_OFFSET; + } + + // hVidmem should be big enough to cover allocSize, starting from offset. + if (pAllocParams->allocSize > + (memdescGetSize(pVidMemDesc) - pAllocParams->map.offset)) + { + NV_PRINTF(LEVEL_ERROR, + "Insufficient physical video memory to map the requested " + "memory fabric allocation\n"); + + return NV_ERR_INVALID_ARGUMENT; + } + + // Check if fabric vaspace is valid. + if (pFabricVAS == NULL) + { + NV_PRINTF(LEVEL_ERROR, "Fabric vaspace object not available\n"); + + return NV_ERR_NOT_SUPPORTED; + } + + // Set the vaspace alloc flags. + flags.bSkipTlbInvalidateOnFree = NV_TRUE; + + flags.bForceContig = !!(pAllocParams->allocFlags & + NV00F8_ALLOC_FLAGS_FORCE_CONTIGUOUS); + + flags.bForceNonContig = !!(pAllocParams->allocFlags & + NV00F8_ALLOC_FLAGS_FORCE_NONCONTIGUOUS); + + status = _memoryfabricAllocFabricVa(pFabricVAS, pGpu, + pParams, pAllocParams, + flags, &pAddr, &numAddr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "VA Space alloc failed! Status Code: 0x%x Size: 0x%llx " + "RangeLo: 0x%llx, RangeHi: 0x%llx, page size: 0x%x\n", + status, pAllocParams->allocSize, + vaspaceGetVaStart(pGpu->pFabricVAS), + vaspaceGetVaLimit(pGpu->pFabricVAS), + pAllocParams->pageSize); + + return status; + } + + // Create a memdesc to associate with the above allocation. + status = memdescCreate(&pMemDesc, pGpu, pAllocParams->allocSize, + 0, (numAddr == 1), ADDR_FABRIC_V2, NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_NONE); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to allocate memory descriptor\n"); + goto freeVaspace; + } + + // Associate the memdesc with the above FLA allocation. + if (numAddr == 1) + { + // For contiguous allocation, call memdescSetPte to set _pteArray[0]. + memdescSetPte(pMemDesc, VAS_ADDRESS_TRANSLATION(pGpu->pFabricVAS), + 0, pAddr[0]); + } + else + { + // For discontiguous allocations, call memdescFillPages to fill ptes. + memdescFillPages(pMemDesc, 0, pAddr, numAddr, pAllocParams->pageSize); + } + + // Set the memdesc _pageSize. + memdescSetPageSize(pMemDesc, VAS_ADDRESS_TRANSLATION(pGpu->pFabricVAS), + pAllocParams->pageSize); + + status = memConstructCommon(pMemory, NV_MEMORY_FABRIC, 0, pMemDesc, 0, NULL, + 0, 0, 0, 0, NVOS32_MEM_TAG_NONE, NULL); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "MemoryFabric memConstructCommon failed\n"); + goto freeMemdesc; + } + + status = memmgrGetFlaKind_HAL(pGpu, pMemoryManager, &pteKind); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Error getting kind attr for fabric memory\n"); + goto freeMemCommon; + } + + // Set PTE kind attribute for fabric memory. + memdescSetPteKind(pMemory->pMemDesc, pteKind); + memdescSetGpuCacheAttrib(pMemory->pMemDesc, NV_MEMORY_UNCACHED); + + // Allocate memory for memory fabric memdesc private data. + pMemdescData = portMemAllocNonPaged(sizeof(FABRIC_MEMDESC_DATA)); + if (pMemdescData == NULL) + { + status = NV_ERR_NO_MEMORY; + goto freeMemCommon; + } + portMemSet(pMemdescData, 0, sizeof(FABRIC_MEMDESC_DATA)); + + // Associate the memdesc data release callback function. + memdescSetMemData(pMemDesc, (void *)pMemdescData, NULL); + + // Allocate memory for the memory descriptor destroy callback. + pCallback = portMemAllocNonPaged(sizeof(MEM_DESC_DESTROY_CALLBACK)); + if (pCallback == NULL) + { + status = NV_ERR_NO_MEMORY; + goto freeMemdescMemData; + } + portMemSet(pCallback, 0, sizeof(MEM_DESC_DESTROY_CALLBACK)); + + // Associate the memdescDestroy callback function. + pCallback->pObject = (void *)pCallback; + pCallback->destroyCallback = + (MemDescDestroyCallBack*) &_memoryfabricMemDescDestroyCallback; + + memdescAddDestroyCallback(pMemDesc, pCallback); + + // Dup the physical video memory handle and cache it in memfabric memdesc. + status = pRmApi->DupObject(pRmApi, pFabricVAS->hClient, pFabricVAS->hDevice, + &pMemdescData->hDupedVidmem, pCallContext->pClient->hClient, + pAllocParams->map.hVidMem, 0); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to dup physical video memory handle\n"); + goto freeCallback; + } + + status = fabricvaspaceVaToGpaMapInsert(pFabricVAS, pAddr[0], pVidMemDesc, + pAllocParams->map.offset); + if (status != NV_OK) + goto freeDupedMem; + + // Map the memory fabric object at the given physical video memory offset. + status = _memoryfabricMapPhysicalMemory(pAddr, numAddr, pAllocParams->allocSize, + pAllocParams->pageSize, pVidMemDesc, + pAllocParams->map.offset, bReadOnly); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to map FLA at the given physical vidmem offset\n"); + goto memFabricRemoveVaToGpaMap; + } + + pMemoryFabric->flags = pAllocParams->allocFlags; + pMemory->bRpcAlloc = pFabricVAS->bRpcAlloc; + + portMemFree(pAddr); + + return NV_OK; + +memFabricRemoveVaToGpaMap: + fabricvaspaceVaToGpaMapRemove(pFabricVAS, pAddr[0]); + +freeDupedMem: + // Free the duped vidmem handle. + NV_ASSERT(pRmApi->Free(pRmApi, pFabricVAS->hClient, + pMemdescData->hDupedVidmem) == NV_OK); + +freeCallback: + // Destroy the memdesc destroy callback. + memdescRemoveDestroyCallback(pMemDesc, pCallback); + portMemFree(pCallback); + pCallback = NULL; + +freeMemdescMemData: + // Free the memory fabric memdesc private data. + portMemFree(pMemdescData); + pMemdescData = NULL; + +freeMemCommon: + memDestructCommon(pMemory); + +freeMemdesc: + // Destroy the created memory descriptor. + memdescDestroy(pMemDesc); + pMemDesc = NULL; + +freeVaspace: + _memoryfabricFreeFabricVa(pFabricVAS, pGpu, + pParams, pAddr, numAddr); + + // Free memory allocated for vaspace allocations. + portMemFree(pAddr); + + return status; +} + +void +memoryfabricDestruct_IMPL +( + MemoryFabric *pMemoryFabric +) +{ + return; +} + +NvBool +memoryfabricCanCopy_IMPL +( + MemoryFabric *pMemoryFabric +) +{ + return NV_TRUE; +} + +NV_STATUS +memoryfabricCopyConstruct_IMPL +( + MemoryFabric *pMemoryFabric, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + // + // Memory fabric object must enforce the source (owner) GPU for duping. + // However, CUDA and UVM drivers have been using destination (mapping) + // GPU to dup memory objects in general. The changes involved in the + // UVM driver would need more time as they are a bit involved. Thus, + // for now RM is temporarily relaxing this restriction. + // + // The duping restriction will be added back once UVM bug 3367020 + // is fixed. + // + + return NV_OK; +} + +NvBool +memoryfabricCanExport_IMPL +( + MemoryFabric *pMemoryFabric +) +{ + // + // Check if FLA->PA mappings are present. Only then allow export. + // FLA->PA mappings are guaranteed for STICKY FLA mappings, which is only + // what we support currently. + // TODO: Re-visit this function when support for FLEXIBLE FLA mappings is + // added. + // + return !(pMemoryFabric->flags & NV00F8_ALLOC_FLAGS_FLEXIBLE_FLA); +} + +NV_STATUS +memoryfabricControl_IMPL +( + MemoryFabric *pMemoryFabric, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + + if (REF_VAL(NVXXXX_CTRL_CMD_CLASS, pParams->cmd) != NV_MEMORY_FABRIC) + return NV_ERR_INVALID_ARGUMENT; + + status = resControl_IMPL(staticCast(pMemoryFabric, RsResource), + pCallContext, pParams); + + return status; +} + +NV_STATUS +memoryfabricCtrlGetInfo_IMPL +( + MemoryFabric *pMemoryFabric, + NV00F8_CTRL_GET_INFO_PARAMS *pParams +) +{ + Memory *pMemory = staticCast(pMemoryFabric, Memory); + + if (pMemory->pMemDesc == NULL) + return NV_ERR_INVALID_ARGUMENT; + + pParams->size = memdescGetSize(pMemory->pMemDesc); + pParams->pageSize = memdescGetPageSize(pMemory->pMemDesc, AT_GPU); + pParams->allocFlags = pMemoryFabric->flags; + + return NV_OK; +} + +NV_STATUS +memoryfabricCtrlCmdDescribe_IMPL +( + MemoryFabric *pMemoryFabric, + NV00F8_CTRL_DESCRIBE_PARAMS *pParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} diff --git a/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h b/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h new file mode 100644 index 000000000..cc0253c13 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/mem_mgr_internal.h @@ -0,0 +1,38 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _MM_INTERNAL_H_ +#define _MM_INTERNAL_H_ + +// +// Don't define deprecated definitions for RM MM implementations +// +#define RM_STRICT_SUPPRESS_DEPRECATED_DEFINITIONS_VER_JAN_21_2020 + +// +// MM API runs within VGPU guest/GSP client. Don't allow direct access to +// physical engine objects/definitions. +// +#define RM_STRICT_SUPPRESS_PHYSICAL_DEFINITIONS_VER_JAN_21_2020 + +#endif // _MM_INTERNAL_H_ diff --git a/src/nvidia/src/kernel/mem_mgr/no_device_mem.c b/src/nvidia/src/kernel/mem_mgr/no_device_mem.c new file mode 100644 index 000000000..f806f2d3a --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/no_device_mem.c @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/no_device_mem.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" + +NV_STATUS +nodevicememConstruct_IMPL +( + NoDeviceMemory *pNoDeviceMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR *pMemDesc; + NV_MEMORY_ALLOCATION_PARAMS *pAllocParams = pParams->pAllocParams; + Memory *pMemory = staticCast(pNoDeviceMemory, Memory); + NvU32 cpuCacheAttrib; + NvU32 coherency; + NvU32 protection; + NvU32 attr = 0; + NvU32 attr2 = 0; + + coherency = DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocParams->attr); + attr = FLD_SET_DRF_NUM(OS32, _ATTR, _COHERENCY, coherency, attr); + switch (coherency) + { + case NVOS32_ATTR_COHERENCY_CACHED: + case NVOS32_ATTR_COHERENCY_WRITE_THROUGH: + case NVOS32_ATTR_COHERENCY_WRITE_PROTECT: + case NVOS32_ATTR_COHERENCY_WRITE_BACK: + cpuCacheAttrib = NV_MEMORY_CACHED; + break; + case NVOS32_ATTR_COHERENCY_UNCACHED: + cpuCacheAttrib = NV_MEMORY_UNCACHED; + break; + case NVOS32_ATTR_COHERENCY_WRITE_COMBINE: + // Intentional fall-through + default: + cpuCacheAttrib = NV_MEMORY_WRITECOMBINED; + break; + } + + status = memdescCreate(&pMemory->pMemDesc, NULL, pAllocParams->size, 0, NV_MEMORY_CONTIGUOUS, + ADDR_SYSMEM, cpuCacheAttrib, MEMDESC_FLAGS_CPU_ONLY); + if (status != NV_OK) + return status; + + status = osAllocPages(pMemory->pMemDesc); + if (status != NV_OK) + goto cleanup; + pMemDesc = pMemory->pMemDesc; + pMemDesc->Allocated = 1; + + protection = DRF_VAL(OS32, _ATTR2, _PROTECTION_USER, pAllocParams->attr2); + attr2 = FLD_SET_DRF_NUM(OS32, _ATTR2, _PROTECTION_USER, protection, attr2); + if (protection == NVOS32_ATTR2_PROTECTION_USER_READ_ONLY) + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY, NV_TRUE); + + // initialize the memory description + pMemory->categoryClassId = pCallContext->pResourceRef->externalClassId; + pMemory->pMemDesc = pMemDesc; + pMemory->Length = pMemDesc->Size; + pMemory->RefCount = 1; + pMemory->HeapOwner = 0; + pMemory->pHeap = NULL; + pMemory->Attr = attr; + pMemory->Attr2 = attr2; + pMemory->Flags = pAllocParams->flags; + pMemory->isMemDescOwner = NV_TRUE; + + // Initialize the circular list item for tracking dup/sharing of pMemDesc + pMemory->dupListItem.pNext = pMemory->dupListItem.pPrev = pMemory; + + return NV_OK; + +cleanup: + nodevicememDestruct_IMPL(pNoDeviceMemory); + return status; +} + +void nodevicememDestruct_IMPL(NoDeviceMemory *pNoDeviceMemory) +{ + Memory *pMemory = staticCast(pNoDeviceMemory, Memory); + + if (pMemory->KernelVAddr != NvP64_NULL) + { + memdescUnmap(pMemory->pMemDesc, NV_TRUE, osGetCurrentProcess(), + pMemory->KernelVAddr, pMemory->KernelMapPriv); + pMemory->KernelVAddr = NvP64_NULL; + pMemory->KernelMapPriv = NvP64_NULL; + } + + if (pMemory->pMemDesc) + { + memdescFree(pMemory->pMemDesc); + memdescDestroy(pMemory->pMemDesc); + } +} + +NV_STATUS nodevicememGetMapAddrSpace_IMPL +( + NoDeviceMemory *pNoDeviceMemory, + CALL_CONTEXT *pCallContext, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + *pAddrSpace = ADDR_SYSMEM; + return NV_OK; +} diff --git a/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c b/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c new file mode 100644 index 000000000..e1ac1a85d --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/os_desc_mem.c @@ -0,0 +1,222 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/os_desc_mem.h" +#include "rmapi/client.h" +#include "rmapi/mapping_list.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" +#include "gpu/device/device.h" +#include "vgpu/rpc.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "deprecated/rmapi_deprecated.h" + +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR + +NV_STATUS +osdescConstruct_IMPL +( + OsDescMemory *pOsDescMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + Memory *pMemory = staticCast(pOsDescMemory, Memory); + NV_OS_DESC_MEMORY_ALLOCATION_PARAMS *pUserParams; + OBJGPU *pGpu = pMemory->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NV_STATUS status; + NvU64 limit; + NvU32 os02Flags; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvHandle hMemory = pCallContext->pResourceRef->hResource; + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pRmAllocParams)) + return NV_OK; + + pUserParams = pRmAllocParams->pAllocParams; + + limit = pUserParams->limit; + + // + // Bug 860684: osCreateMemFromOsDescriptor expects OS02 flags + // from the old NvRmAllocMemory64() interface so we need to + // translate the OS32_ATTR flags to OS02 flags. + // + status = RmDeprecatedConvertOs32ToOs02Flags(pUserParams->attr, + pUserParams->attr2, + pUserParams->flags, + &os02Flags); + + if (status != NV_OK) + { + return status; + } + + // Only kernel user is allowed to register physical address with RM + if (pUserParams->descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_PHYS_ADDR) + { + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + { + return NV_ERR_NOT_SUPPORTED; + } + } + + if (pUserParams->descriptorType == NVOS32_DESCRIPTOR_TYPE_OS_IO_MEMORY) + { + // + // We currently allow RmMapMemory on external IO resources which are + // safe to share across processes. For example, NpuResource. + // + // Otherwise we would be affected by the security issues like Bug 1630288. + // + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _MAPPING, _NEVER_MAP, os02Flags); + + // + // Force peerMappingOverride check for IO memory registration through + // RmVidHeapCtrl. See Bug 1630288 "[PeerSync] threat related to GPU.." for + // more details. + // + os02Flags = FLD_SET_DRF(OS02, _FLAGS, _PEER_MAP_OVERRIDE, _REQUIRED, os02Flags); + } + + // + // Create and fill in the memory descriptor based on the current + // state of the OS descriptor. + // + status = osCreateMemFromOsDescriptor(pGpu, + pUserParams->descriptor, + hClient, + os02Flags, + &limit, + &pMemDesc, + pUserParams->descriptorType, + pRmAllocParams->pSecInfo->privLevel); + + if (status != NV_OK) + { + return status; + } + + if (pMemoryManager->bAllowSysmemHugePages && pMemDesc->bForceHugePages) + { + pUserParams->attr = DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _HUGE); + pUserParams->attr2 = DRF_DEF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _DEFAULT); + } + + status = memConstructCommon(pMemory, NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, pUserParams->flags, + pMemDesc, 0, NULL, pUserParams->attr, pUserParams->attr2, 0, 0, + pUserParams->tag, (HWRESOURCE_INFO *)NULL); + + if (status == NV_OK) + { + RsResourceRef *pResourceRef = RES_GET_REF(pMemory); + RsCpuMapping *pCpuMapping = NULL; + NvU32 flags = 0; + flags = FLD_SET_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, flags); + RS_CPU_MAP_PARAMS dummyParams; + portMemSet(&dummyParams, 0, sizeof(dummyParams)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + refAddMapping(pResourceRef, &dummyParams, pResourceRef->pParentRef, &pCpuMapping)); + + NV_ASSERT_OK_OR_RETURN(CliUpdateMemoryMappingInfo(pCpuMapping, + pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL, + pUserParams->descriptor, NvP64_NULL, + limit+1, flags)); + pCpuMapping->pPrivate->pGpu = pGpu; + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if (IS_VIRTUAL(pGpu)) + { + NV_RM_RPC_ALLOC_MEMORY(pGpu, + hClient, + hParent, + hMemory, + NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + os02Flags, + pMemDesc, + status); + if (status == NV_OK) + pMemory->bRpcAlloc = NV_TRUE; + + } + } + + // + // RM support for MODS PTE kind in external allocations + // bug 1858656 + // + if (status == NV_OK && NV_IS_MODS) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvU32 kind; + FB_ALLOC_PAGE_FORMAT fbAllocPageFormat = {0}; + + NV_ASSERT_OR_RETURN( + DRF_VAL(OS32, _ATTR, _COMPR, pUserParams->attr) == + NVOS32_ATTR_COMPR_NONE, NV_ERR_NOT_SUPPORTED); + + fbAllocPageFormat.flags = pUserParams->flags; + fbAllocPageFormat.type = pUserParams->type; + fbAllocPageFormat.attr = pUserParams->attr; + fbAllocPageFormat.attr2 = pUserParams->attr2; + + // memmgrChooseKind will select kind based on format + status = memmgrChooseKind_HAL(pGpu, pMemoryManager, &fbAllocPageFormat, + DRF_VAL(OS32, _ATTR, _COMPR, pUserParams->attr), &kind); + if (status == NV_OK) + { + memdescSetPteKind(pMemDesc, kind); + } + } + + // failure case + if (status != NV_OK) + { + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + } + + return status; +} + +NvBool +osdescCanCopy_IMPL +( + OsDescMemory *pOsDescMemory +) +{ + return RMCFG_FEATURE_PLATFORM_UNIX; +} diff --git a/src/nvidia/src/kernel/mem_mgr/phys_mem.c b/src/nvidia/src/kernel/mem_mgr/phys_mem.c new file mode 100644 index 000000000..79df657b7 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/phys_mem.c @@ -0,0 +1,222 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/phys_mem.h" +#include "os/os.h" +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/mmu/kern_gmmu.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/device/device.h" +#include "rmapi/client.h" +#include "virtualization/hypervisor/hypervisor.h" + +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER +#include "class/cl00c2.h" // NV01_MEMORY_LOCAL_PHYSICAL + +NV_STATUS +physmemConstruct_IMPL +( + PhysicalMemory *pPhysicalMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_PHYSICAL_MEMORY_ALLOCATION_PARAMS *pAllocParams; + NV_STATUS status = NV_OK; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvHandle hMemory = pCallContext->pResourceRef->hResource; + Memory *pMemory = staticCast(pPhysicalMemory, Memory); + OBJGPU *pGpu = pMemory->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + MEMORY_DESCRIPTOR *pMemDesc = NULL; + Heap *pHeap = MEMORY_MANAGER_GET_HEAP(pMemoryManager); + HWRESOURCE_INFO hwResource = {0}; + NvU64 heapBase; + NvU64 trueLength; + RS_PRIV_LEVEL privLevel = pCallContext->secInfo.privLevel; + NvBool bCompressedKind; + NvU32 attr = DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS) | + DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM); + NvU32 attr2 = 0; + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu)); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pParams)) + return NV_OK; + + // + // This class does not allocate ANY framebuffer memory. This function + // returns a dumb linear mapping to the entire framebuffer. + // + // The driver client is responsible for calling RmMapMemory() to actually + // get mappings when needed. + // + // + + if (!(rmclientIsAdminByHandle(hClient, privLevel) || hypervisorCheckForObjectAccess(hClient))) + return NV_ERR_INSUFFICIENT_PERMISSIONS; + + pAllocParams = pParams->pAllocParams; + bCompressedKind = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, pAllocParams->format); + + heapGetBase(pHeap, &heapBase); + heapGetSize(pHeap, &trueLength); + pAllocParams->memSize = trueLength; + + switch (pAllocParams->pageSize) + { + case RM_PAGE_SIZE_512M: + NV_ASSERT_OR_RETURN(kgmmuIsPageSize512mbSupported(pKernelGmmu), NV_ERR_INVALID_ARGUMENT); + attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _HUGE); + attr2 |= DRF_DEF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _512MB); + break; + case RM_PAGE_SIZE_HUGE: + NV_ASSERT_OR_RETURN(kgmmuIsHugePageSupported(pKernelGmmu), NV_ERR_INVALID_ARGUMENT); + attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _HUGE); + attr2 |= DRF_DEF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _2MB); + break; + default: + attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _BIG); + break; + } + + if (bCompressedKind) + { + FB_ALLOC_INFO FbAllocInfo = {0}; + FB_ALLOC_PAGE_FORMAT fbAllocPageFormat = {0}; + + NV_CHECK_OR_RETURN(LEVEL_ERROR, + pMemorySystemConfig->bOneToOneComptagLineAllocation || pMemorySystemConfig->bUseRawModeComptaglineAllocation, + NV_ERR_INVALID_ARGUMENT); + + attr |= DRF_DEF(OS32, _ATTR, _ZCULL, _NONE); + attr2 |= DRF_DEF(OS32, _ATTR2, _ZBC, _PREFER_NO_ZBC); + attr2 |= DRF_DEF(OS32, _ATTR2, _ZBC_SKIP_ZBCREFCOUNT, _YES); + + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_DISALLOW_PLC, pAllocParams->format)) + attr |= DRF_DEF(OS32, _ATTR, _COMPR, _DISABLE_PLC_ANY); + else + attr |= DRF_DEF(OS32, _ATTR, _COMPR, _REQUIRED); + + FbAllocInfo.pageFormat = &fbAllocPageFormat; + FbAllocInfo.hClient = hClient; + FbAllocInfo.hDevice = hParent; /* device */ + FbAllocInfo.size = trueLength; + FbAllocInfo.origSize = trueLength; + FbAllocInfo.offset = 0; + FbAllocInfo.format = pAllocParams->format; + FbAllocInfo.retAttr = attr; + FbAllocInfo.retAttr2 = attr2; + FbAllocInfo.pageFormat->kind = FbAllocInfo.format; + FbAllocInfo.pageFormat->attr = FbAllocInfo.retAttr; + FbAllocInfo.pageFormat->attr2 = FbAllocInfo.retAttr2; + + // Fetch RM page size + NV_CHECK_OR_RETURN(LEVEL_ERROR, + memmgrDeterminePageSize(pMemoryManager, FbAllocInfo.hClient, FbAllocInfo.size, + FbAllocInfo.format, FbAllocInfo.pageFormat->flags, + &FbAllocInfo.retAttr, &FbAllocInfo.retAttr2) != 0, + NV_ERR_INVALID_ARGUMENT); + + // Fetch memory alignment + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + memmgrAllocDetermineAlignment_HAL(pGpu, pMemoryManager, &FbAllocInfo.size, &FbAllocInfo.align, + FbAllocInfo.alignPad, FbAllocInfo.pageFormat->flags, + FbAllocInfo.retAttr, FbAllocInfo.retAttr2, 0)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, memmgrAllocHwResources(pGpu, pMemoryManager, &FbAllocInfo)); + NV_ASSERT_OR_RETURN(FbAllocInfo.format == pAllocParams->format, NV_ERR_INVALID_ARGUMENT); + + attr = FbAllocInfo.retAttr; + attr2 = FbAllocInfo.retAttr2; + + hwResource.attr = FbAllocInfo.retAttr; + hwResource.attr2 = FbAllocInfo.retAttr2; + hwResource.comprCovg = FbAllocInfo.comprCovg; + hwResource.ctagOffset = FbAllocInfo.ctagOffset; + hwResource.hwResId = FbAllocInfo.hwResId; + } + + status = memCreateMemDesc(pGpu, &pMemDesc, ADDR_FBMEM, 0, trueLength, attr, attr2); + memdescDescribe(pMemDesc, ADDR_FBMEM, heapBase, trueLength); + + if (status == NV_OK) + { + memdescSetPteKind(pMemDesc, pAllocParams->format); + if (bCompressedKind) + memdescSetHwResId(pMemDesc, hwResource.hwResId); + + // Track internally as NV01_MEMORY_LOCAL_USER to share regular FB mem code paths + status = memConstructCommon(pMemory, NV01_MEMORY_LOCAL_USER, 0, pMemDesc, 0, + NULL, attr, attr2, 0, 0, NVOS32_MEM_TAG_NONE, + bCompressedKind ? &hwResource : NULL); + if (status == NV_OK) + { + if (!IS_GSP_CLIENT(pGpu)) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + NV_RM_RPC_ALLOC_LOCAL_USER(pGpu, hClient, hParent, hMemory, pMemDesc, trueLength, + attr, attr2, pAllocParams->format, status); + if (status != NV_OK) + { + // cleanup on an RPC failure + memDestructCommon(pMemory); + memdescDestroy(pMemDesc); + return status; + } + + pMemory->bRpcAlloc = NV_TRUE; + } + } + else + { + memdescDestroy(pMemDesc); + } + } + return status; +} + +NvBool +physmemCanCopy_IMPL +( + PhysicalMemory *pPhysicalMemory +) +{ + return NV_TRUE; +} diff --git a/src/nvidia/src/kernel/mem_mgr/pool_alloc.c b/src/nvidia/src/kernel/mem_mgr/pool_alloc.c new file mode 100644 index 000000000..0f2fc9a23 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/pool_alloc.c @@ -0,0 +1,982 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file pool_alloc.c + * @brief Defines the interfaces for managing the memory pools used for + * allocating and freeing the RM allocations. RM's + * internal page directories/tables are NOT managed by PMA and + * DO NOT use the interfaces defined in this file. + */ + +/* ------------------------------------ Includes ----------------------------------- */ +#include "mem_mgr/pool_alloc.h" +#include "mem_mgr/vaspace.h" +#include "gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.h" +#include "class/cl90f1.h" +#include "mmu/gmmu_fmt.h" +#include "gpu/gpu.h" + +/* ------------------------------------ Local Defines ------------------------------ */ +#define PMA_CHUNK_SIZE_512M (512 * 1024 * 1024) +#define PMA_CHUNK_SIZE_2M (2 * 1024 * 1024) +#define PMA_CHUNK_SIZE_512K (512 * 1024) +#define PMA_CHUNK_SIZE_256K (256 * 1024) +#define PMA_CHUNK_SIZE_64K (64 * 1024) + +/*! PAGE SIZES FOR DIFFERENT POOL ALLOCATOR LEVELS + * + * CONTEXT BUFFER allocations + * + * When allocator is used for context buffers three page sizes + * are supported as follows: + * + * For buffers >= 2MB, page size = 2MB + * For buffers >= 32KB and < 2MB, page size = 64KB + * For buffers < 32KB, page siez = 4KB + * + * PAGE TABLE allocations + * + * When the allocator is used for page tables the page sizes + * supported by different allocator levels are calculated as follows: + * + * Pre-Pascal [Big page size = 128K] + * Size of a full PD0 (Root) = 64 KBytes + * Size of a full Small Page Table = 256 KBytes + * Size of a full Big Page Table = 8 KBytes + * + * Pre-Pascal [Big page size = 64K] + * Size of a full PD0 (Root) = 128 KBytes + * Size of a full Small Page Table = 128 KBytes + * Size of a full Big Page Table = 8 KBytes + * + * Pascal+ + * Size of a full PD3 (Root) = 4 KBytes + * Size of a full PD2 = 4 KBytes + * Size of a full PD1 = 4 KBytes + * Size of a full PD0 = 4 KBytes + * Size of a full Small Page Table = 4 KBytes + * Size of a full Big Page Table = 256 Bytes + * + */ +typedef enum +{ + RM_POOL_IDX_512M = 0, + RM_POOL_IDX_2M = 1, + RM_POOL_IDX_256K = 2, + RM_POOL_IDX_128K = 3, + RM_POOL_IDX_64K = 4, + RM_POOL_IDX_8K = 5, + RM_POOL_IDX_4K = 6, + RM_POOL_IDX_256B = 7, + NUM_POOLS // This should always be the last entry! +}POOL_IDX; + +/*! + * This array contains the alloction sizes (in bytes) of each pool. + */ +static const NvU32 poolAllocSizes[] = {0x20000000, 0x200000, 0x40000, 0x20000, 0x10000, 0x2000, 0x1000, 0x100}; + +#define POOL_CONFIG_POOL_IDX 0 +#define POOL_CONFIG_CHUNKSIZE_IDX 1 + +static const int poolConfig[POOL_CONFIG_MAX_SUPPORTED][POOL_CONFIG_CHUNKSIZE_IDX + 1] = { + // page size // chunk size + { RM_POOL_IDX_256K, PMA_CHUNK_SIZE_512K}, // pool with pageSize = 256K for GMMU_FMT_VERSION_1 + { RM_POOL_IDX_4K, PMA_CHUNK_SIZE_64K }, // pool with pageSize = 4K for GMMU_FMT_VERSION_2 + { RM_POOL_IDX_512M, PMA_CHUNK_SIZE_512M }, // pool with pageSize = 512MB for RM allocated buffers (unused as of ampere) + { RM_POOL_IDX_2M, PMA_CHUNK_SIZE_2M }, // pool with pageSize = 2MB for RM allocated buffers + { RM_POOL_IDX_64K, PMA_CHUNK_SIZE_256K }, // pool with pageSize = 64K for RM allocated buffers + { RM_POOL_IDX_4K, PMA_CHUNK_SIZE_64K } // pool with pageSize = 4K for RM allocated buffers +}; + +/*! + * Locking in the RM internal pool allocator + * =================================== + * + * - pPoolLock + * Mutex (a PORT_MUTEX instance) + * + * The data inside RM_POOL_ALLOC_MEM_RESERVE_INFO is protected from concurrent access + * by this mutex. Any function accessing the RM_POOL_ALLOC_MEM_RESERVE_INFO data should + * acquire the mutex. + * We're using a mutex instead of a spinlock since we are allocating memory inside the + * lock. The allocation thread (pmaAllocatePages) may sleep on a semaphore (if scrubbing + * is in progress). So, spinlock is not an appropriate choice. The current assumption is that + * none of the functions defined here gets called in an interrupt/atomic context. We"ll + * assert in portSyncMutexAcquire() if any of this code ever gets called in an atomic + * context. The order in which locks are grabbed all the way from the point of entry into RM + * to the functions defined here is as follows. + * + * @ref rmMemPoolReserve API Lock -> pPoolLock (mutex) -> Locks inside PMA. + * @ref rmMemPoolAllocate API Lock -> GPU Lock -> pPoolLock (mutex) + * @ref rmMemPoolFree API Lock -> GPU Lock -> pPoolLock (mutex) + * @ref rmMemPoolRelease API Lock -> GPU Lock -> pPoolLock (mutex) + */ + +// State of memory pool +struct RM_POOL_ALLOC_MEM_RESERVE_INFO +{ + /*! + * Pointer to the PMA object. + */ + PMA *pPma; + + /*! + * Mutex to provide exclusive access to the data inside this struct + */ + PORT_MUTEX *pPoolLock; + + /*! + * Index of the topmost pool in the hierarchy + */ + POOL_IDX topmostPoolIndex; + + /*! + * Size of topmost pool's upstream chunk allocated by PMA. + */ + NvU64 pmaChunkSize; + + /*! + * Array of memory pools. + */ + POOLALLOC *pPool[NUM_POOLS]; + + /*! + * Num of allocations made from the pool. + */ + NvU64 validAllocCount; + + /*! + * Skip scrubbing for all allocations made from the pool. + */ + NvBool bSkipScrub; + + /*! + * Automatically trim memory pool when allocation is freed. + */ + NvBool bTrimOnFree; +}; + +/* ------------------------------------ Static functions --------------------------- */ + +/*! + * @brief Used for allocating pages by the upstream allocator for the topmost + * pool. + * + * @param[in] pCtx Context for upstream allocator. + * @param[in] pageSize Only for debugging. + * @param[in] pPage Output page handle from upstream. + * + * @return NV_STATUS + */ +static NV_STATUS +allocUpstreamTopPool +( + void *pCtx, + NvU64 pageSize, + POOLALLOC_HANDLE *pPage +) +{ + PMA_ALLOCATION_OPTIONS allocOptions = {0}; + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(NULL != pCtx, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pPage, NV_ERR_INVALID_ARGUMENT); + + // TODO: Replace the direct call to PMA with function pointer. + pMemReserveInfo = (RM_POOL_ALLOC_MEM_RESERVE_INFO *)pCtx; + allocOptions.flags = PMA_ALLOCATE_PINNED | PMA_ALLOCATE_PERSISTENT | + PMA_ALLOCATE_CONTIGUOUS; + + if (pMemReserveInfo->bSkipScrub) + { + allocOptions.flags |= PMA_ALLOCATE_NO_ZERO; + } + + status = pmaAllocatePages(pMemReserveInfo->pPma, + (NvU32)(pMemReserveInfo->pmaChunkSize/PMA_CHUNK_SIZE_64K), + PMA_CHUNK_SIZE_64K, + &allocOptions, + &pPage->address); + NV_ASSERT_OR_RETURN((NV_OK == status), status); + + pPage->pMetadata = NULL; + + return status; +} + +/*! + * @brief Used for allocating pages by the upstream allocator for the lower + * pools. + * + * @param[in] pCtx Context for upstream allocator. + * @param[in] pageSize Only for debugging. + * @param[in] pPage Output page handle from upstream. + * + * @return NV_STATUS + */ +static NV_STATUS +allocUpstreamLowerPools +( + void *pCtx, + NvU64 pageSize, + POOLALLOC_HANDLE *pPage +) +{ + NV_STATUS status; + + NV_ASSERT_OR_RETURN(NULL != pCtx, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pPage, NV_ERR_INVALID_ARGUMENT); + + status = poolAllocate((POOLALLOC *)pCtx, pPage); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + return status; +} + +/*! + * @brief Used for freeing pages by the upstream allocator for the topmost + * pool. + * + * @param[in] pCtx Context for upstream allocator. + * @param[in] pageSize Only for debugging. + * @param[in] pPage Page handle of page to be freed. + * + * @return + */ +static void +freeUpstreamTopPool +( + void *pCtx, + NvU64 pageSize, + POOLALLOC_HANDLE *pPage +) +{ + NvU32 flags = 0; + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo; + + NV_ASSERT_OR_RETURN_VOID(NULL != pCtx); + NV_ASSERT_OR_RETURN_VOID(NULL != pPage); + + // TODO: Replace the direct call to PMA with function pointer. + pMemReserveInfo = (RM_POOL_ALLOC_MEM_RESERVE_INFO *)pCtx; + + if (pMemReserveInfo->bSkipScrub) + { + flags |= PMA_FREE_SKIP_SCRUB; + } + + pmaFreePages(pMemReserveInfo->pPma, &(pPage->address), 1, + pMemReserveInfo->pmaChunkSize, flags); +} + +/*! + * @brief Used for freeing pages by the upstream allocator for the lower + * pools. + * + * @param[in] pCtx Context for upstream allocator. + * @param[in] pageSize Only for debugging. + * @param[in] pPage Page handle of page to be freed. + * + * @return + */ +static void +freeUpstreamLowerPools +( + void *pCtx, + NvU64 pageSize, + POOLALLOC_HANDLE *pPage +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pCtx); + NV_ASSERT_OR_RETURN_VOID(NULL != pPage); + + poolFree((POOLALLOC *)pCtx, pPage); +} + +/*! + * @brief Increments the refcount whenever an allocation is made + * from the pool. + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * + * @return + */ +static void +rmMemPoolAddRef +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pMemReserveInfo); + + pMemReserveInfo->validAllocCount++; +} + +/*! + * @brief Decrements the refcount whenever an allocation is freed and + * returned to the pool. + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * + * @return + */ +static void +rmMemPoolRemoveRef +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pMemReserveInfo); + NV_ASSERT_OR_RETURN_VOID(pMemReserveInfo->validAllocCount > 0); + + pMemReserveInfo->validAllocCount--; +} + +/*! + * @brief Gets the number of vaspaces that are being served by the pools. + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * + * @return + */ +static NvU64 +rmMemPoolGetRef +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo +) +{ + NV_ASSERT_OR_RETURN(NULL != pMemReserveInfo, 0); + + return pMemReserveInfo->validAllocCount; +} + +/* -------------------------------------- Public functions ---------------------------------- */ + +NV_STATUS +rmMemPoolSetup +( + void *pCtx, + RM_POOL_ALLOC_MEM_RESERVE_INFO **ppMemReserveInfo, + POOL_CONFIG_MODE configMode +) +{ + NvS32 poolIndex; + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo; + NV_STATUS status; + NvU32 flags = 0; + + if (configMode >= POOL_CONFIG_MAX_SUPPORTED) + { + return NV_ERR_INVALID_PARAMETER; + } + + pMemReserveInfo = (RM_POOL_ALLOC_MEM_RESERVE_INFO *)portMemAllocNonPaged(sizeof(*pMemReserveInfo)); + if (NULL == pMemReserveInfo) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(pMemReserveInfo, 0, sizeof(*pMemReserveInfo)); + + *ppMemReserveInfo = pMemReserveInfo; + + pMemReserveInfo->pPma = (PMA *)pCtx; + + // + // poolConfig is a 2D array where each row is a pre-defined configuration mode + // For example, POOL_CONFIG_GMMU_FMT_1 means this pool is used for allocating PTE/PDE entries for the GMMU_FMT_VERSION_1 + // First column in poolConfig corresponds to topmostPoolIndex for a given config + // Second column in poolConfig corresponds to chunk size for a given config + // + pMemReserveInfo->topmostPoolIndex = poolConfig[configMode][POOL_CONFIG_POOL_IDX]; + pMemReserveInfo->pmaChunkSize = poolConfig[configMode][POOL_CONFIG_CHUNKSIZE_IDX]; + + // + // The topmost pool is fed pages directly by PMA. + // + // Calling into PMA with GPU lock acquired may cause deadlocks in case RM + // is operating along side UVM. Currently, we don't support UVM on Windows. + // So, allow the topmost pool to call into PMA on Windows. This is not + // permissible on platforms that support UVM like Linux. + // TODO: Remove this special handling for Windows once we have taken care + // of reserving memory for page tables required for mapping GR context buffers + // in the channel vaspace. See bug 200590870 and 200614517. + // + if (RMCFG_FEATURE_PLATFORM_WINDOWS_LDDM) + { + flags = FLD_SET_DRF(_RMPOOL, _FLAGS, _AUTO_POPULATE, _ENABLE, flags); + } + else + { + flags = FLD_SET_DRF(_RMPOOL, _FLAGS, _AUTO_POPULATE, _DISABLE, flags); + } + pMemReserveInfo->pPool[pMemReserveInfo->topmostPoolIndex] = poolInitialize( + (NvU32)pMemReserveInfo->pmaChunkSize, + poolAllocSizes[pMemReserveInfo->topmostPoolIndex], + allocUpstreamTopPool, + freeUpstreamTopPool, + (void *)pMemReserveInfo, + portMemAllocatorGetGlobalNonPaged(), + flags); + if (NULL == pMemReserveInfo->pPool[pMemReserveInfo->topmostPoolIndex]) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + // + // The pools are nested. Starting with the second pool, each is fed + // pages by the pool immediately above it in hierarchy. + // + flags = FLD_SET_DRF(_RMPOOL, _FLAGS, _AUTO_POPULATE, _ENABLE, flags); + for (poolIndex = pMemReserveInfo->topmostPoolIndex + 1; poolIndex < NUM_POOLS; poolIndex++) + { + pMemReserveInfo->pPool[poolIndex] = poolInitialize( + poolAllocSizes[poolIndex - 1], + poolAllocSizes[poolIndex], + allocUpstreamLowerPools, + freeUpstreamLowerPools, + (void *)pMemReserveInfo->pPool[poolIndex - 1], + portMemAllocatorGetGlobalNonPaged(), + flags); + if (NULL == pMemReserveInfo->pPool[poolIndex]) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + } + + pMemReserveInfo->pPoolLock = (PORT_MUTEX *)portMemAllocNonPaged(portSyncMutexSize); + if (NULL == pMemReserveInfo->pPoolLock) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = portSyncMutexInitialize(pMemReserveInfo->pPoolLock); + if (NV_OK != status) + { + portMemFree(pMemReserveInfo->pPoolLock); + pMemReserveInfo->pPoolLock = NULL; + goto done; + } + + if ((configMode == POOL_CONFIG_CTXBUF_4K) || + (configMode == POOL_CONFIG_CTXBUF_64K) || + (configMode == POOL_CONFIG_CTXBUF_2M) || + (configMode == POOL_CONFIG_CTXBUF_512M)) + { + pMemReserveInfo->bTrimOnFree = NV_FALSE; + } + else + { + pMemReserveInfo->bTrimOnFree = NV_TRUE; + } +done: + if (NV_OK != status) + { + rmMemPoolDestroy(pMemReserveInfo); + } + return status; +} + + +NV_STATUS +rmMemPoolReserve +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, + NvU64 size, + NvU32 flags +) +{ + NvU64 numChunks; + NV_STATUS status = NV_ERR_NO_MEMORY; + NvBool bPrevSkipScrubState = NV_FALSE; + + NV_ASSERT_OR_RETURN((NULL != pMemReserveInfo), NV_ERR_INVALID_ARGUMENT); + + portSyncMutexAcquire(pMemReserveInfo->pPoolLock); + + if (flags & VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL) + { + bPrevSkipScrubState = pMemReserveInfo->bSkipScrub; + pMemReserveInfo->bSkipScrub = NV_TRUE; + } + + numChunks = NV_DIV_AND_CEIL(size, pMemReserveInfo->pmaChunkSize); + + // Reserve pages only in the topmost pool. + if (NULL != pMemReserveInfo->pPool[pMemReserveInfo->topmostPoolIndex]) + { + NV_CHECK_OK(status, LEVEL_WARNING, + poolReserve(pMemReserveInfo->pPool[pMemReserveInfo->topmostPoolIndex], numChunks)); + } + + if (flags & VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL) + { + pMemReserveInfo->bSkipScrub = bPrevSkipScrubState; + } + + portSyncMutexRelease(pMemReserveInfo->pPoolLock); + return status; +} + +NV_STATUS +rmMemPoolAllocate +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, + RM_POOL_ALLOC_MEMDESC *pPoolMemDesc +) +{ + POOLALLOC_HANDLE *pPageHandle = NULL; + PoolPageHandleList *pPageHandleList = NULL; + NvS32 poolIndex = 0; + NvS32 topPool; + NV_STATUS status = NV_OK; + NvU64 allocSize; + NvU32 freeListLength; + MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR*)pPoolMemDesc; + NvU64 *pPhysicalAddresses = NULL; + NvU32 numPages = 0; + + NV_ASSERT_OR_RETURN((NULL != pMemReserveInfo), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((NULL != pMemDesc), NV_ERR_INVALID_ARGUMENT); + + topPool = pMemReserveInfo->topmostPoolIndex; + pPageHandleList = (PoolPageHandleList *)portMemAllocNonPaged(sizeof(*pPageHandleList)); + NV_ASSERT_OR_RETURN(pPageHandleList != NULL, NV_ERR_NO_MEMORY); + + portMemSet(pPageHandleList, 0, sizeof(*pPageHandleList)); + listInit(pPageHandleList, portMemAllocatorGetGlobalNonPaged()); + + portSyncMutexAcquire(pMemReserveInfo->pPoolLock); + + poolGetListLength(pMemReserveInfo->pPool[topPool], + &freeListLength, NULL, NULL); + NV_PRINTF(LEVEL_INFO, + "Total size of memory reserved for allocation = 0x%llx Bytes\n", + freeListLength * pMemReserveInfo->pmaChunkSize); + + // + // The onus is on the caller to pass the correct size info after factoring + // in any alignment requirements. The size after factoring in all alignment + // requirements is tracked in the ActualSize field. The Size field tracks + // the requested size and doesn't take any alignment requirements into + // consideration. + // + allocSize = pMemDesc->ActualSize; + + if (allocSize > poolAllocSizes[topPool]) + { + numPages = NvU64_LO32(NV_DIV_AND_CEIL(allocSize, poolAllocSizes[topPool])); + poolIndex = topPool; + } + else + { + for (poolIndex = NUM_POOLS - 1; poolIndex >= topPool; poolIndex--) + { + if (allocSize <= poolAllocSizes[poolIndex]) + { + NV_PRINTF(LEVEL_INFO, + "Allocating from pool with alloc size = 0x%x Bytes\n", + poolAllocSizes[poolIndex]); + break; + } + } + + if (poolIndex < 0) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + } + + // + // If allocation request is greater than page size of top level pool then + // allocate multiple pages from top-level pool + // + if (numPages > 1) + { + NvU32 index; + + NV_PRINTF(LEVEL_INFO, + "Allocating from pool with alloc size = 0x%x Bytes\n", + poolAllocSizes[topPool] * numPages); + + if (memdescGetContiguity(pMemDesc, AT_GPU)) + { + status = poolAllocateContig(pMemReserveInfo->pPool[topPool], numPages, pPageHandleList); + if (status != NV_OK) + { + goto done; + } + pPageHandle = listHead(pPageHandleList); + memdescDescribe(pMemDesc, ADDR_FBMEM, pPageHandle->address, pMemDesc->Size); + } + else + { + pPhysicalAddresses = (NvU64*)portMemAllocNonPaged(sizeof(*pPhysicalAddresses) * numPages); + if (pPhysicalAddresses == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + portMemSet(pPhysicalAddresses, 0, sizeof(*pPhysicalAddresses) * numPages); + + for (index = 0; index < numPages; index++) + { + pPageHandle = listAppendNew(pPageHandleList); + if (pPageHandle == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_ASSERT_OR_GOTO((pPageHandle != NULL), done); + } + status = poolAllocate(pMemReserveInfo->pPool[topPool], pPageHandle); + if (status != NV_OK) + { + // + // Remove current pageHandle from the list as its invalid + // and we don't want poolFree being called on it as a part of cleanup + // + listRemove(pPageHandleList, pPageHandle); + NV_ASSERT_OR_GOTO(0, done); + pPageHandle = NULL; + } + pPhysicalAddresses[index] = pPageHandle->address; + pPageHandle = NULL; + } + memdescFillPages(pMemDesc, 0, pPhysicalAddresses, numPages, poolAllocSizes[topPool]); + portMemFree(pPhysicalAddresses); + pPhysicalAddresses = NULL; + } + } + else + { + pPageHandle = listAppendNew(pPageHandleList); + NV_ASSERT_OR_GOTO((NULL != pPageHandle), done); + + status = poolAllocate(pMemReserveInfo->pPool[poolIndex], pPageHandle); + if (status != NV_OK) + { + listRemove(pPageHandleList, pPageHandle); + NV_ASSERT_OR_GOTO((NV_OK == status), done); + pPageHandle = NULL; + } + + memdescDescribe(pMemDesc, ADDR_FBMEM, pPageHandle->address, pMemDesc->Size); + // memdescDescribe() sets Size and ActualSize to same values. Hence, reassigning + pMemDesc->ActualSize = allocSize; + pPageHandle = NULL; + } + + // save list of page handles in memdesc + pMemDesc->pPageHandleList = pPageHandleList; + + // Refcount the pool. + rmMemPoolAddRef(pMemReserveInfo); + +done: + portMemFree(pPhysicalAddresses); + + if ((status != NV_OK) && (pPageHandleList != NULL)) + { + if (poolIndex >= 0) + { + PoolPageHandleListIter it = listIterAll(pPageHandleList); + while (listIterNext(&it)) + { + poolFree(pMemReserveInfo->pPool[poolIndex], it.pValue); + } + } + + listClear(pPageHandleList); + portMemFree(pPageHandleList); + } + portSyncMutexRelease(pMemReserveInfo->pPoolLock); + return status; +} + +/*! + * @brief Returns any unused nodes from the topmost level of a pool hierarchy + * back to PMA. + * + * @param[in] pMemReserveInfo Pointer to the RM_POOL_ALLOC_MEM_RESERVE_INFO data + * @param[in] nodesToPreserve Number of nodes to preserve in the topmost pool + * @param[in] flags VASpace flags to skip scrubbing + * + * @return + */ +void +rmMemPoolTrim +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, + NvU32 nodesToPreserve, + NvU32 flags +) +{ + NvBool bPrevSkipScrubState = NV_FALSE; + + NV_ASSERT_OR_RETURN_VOID(NULL != pMemReserveInfo); + + if (flags & VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL) + { + bPrevSkipScrubState = pMemReserveInfo->bSkipScrub; + pMemReserveInfo->bSkipScrub = NV_TRUE; + } + + poolTrim(pMemReserveInfo->pPool[pMemReserveInfo->topmostPoolIndex], + nodesToPreserve); + + if (flags & VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL) + { + pMemReserveInfo->bSkipScrub = bPrevSkipScrubState; + } +} + +void +rmMemPoolFree +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, + RM_POOL_ALLOC_MEMDESC *pPoolAllocMemDesc, + NvU32 flags +) +{ + MEMORY_DESCRIPTOR *pMemDesc = (MEMORY_DESCRIPTOR*)pPoolAllocMemDesc; + NvS32 poolIndex = 0; + NvU64 allocSize; + PoolPageHandleListIter it; + NvU32 topPool; + + NV_ASSERT_OR_RETURN_VOID(NULL != pMemReserveInfo); + NV_ASSERT_OR_RETURN_VOID(NULL != pMemDesc); + NV_ASSERT_OR_RETURN_VOID((pMemDesc->pPageHandleList != NULL) && + (listCount(pMemDesc->pPageHandleList) != 0)); + + portSyncMutexAcquire(pMemReserveInfo->pPoolLock); + + // + // Refcount can be greater than 1 in case of shared vaspaces (as in UVM). + // In this case, RM's internal PDB may be refcounted and a reference + // stored internally for later revoke. + // + if (pMemDesc->RefCount > 1) + { + goto done; + } + + topPool = pMemReserveInfo->topmostPoolIndex; + + // Use the ActualSize value to look up the pools + allocSize = pMemDesc->ActualSize; + + // + // If allocation was greater than page size of top level pool then + // multiple pages were allocated from top pool and we need to free them all. + // + if (allocSize > poolAllocSizes[topPool]) + { + poolIndex = topPool; + } + else + { + for (poolIndex = NUM_POOLS - 1; poolIndex >= 0; poolIndex--) + { + if ((NULL != pMemReserveInfo->pPool[poolIndex]) && + (allocSize <= poolAllocSizes[poolIndex])) + { + break; + } + } + } + NV_ASSERT_OR_GOTO((poolIndex >= 0), done); + + it = listIterAll(pMemDesc->pPageHandleList); + while (listIterNext(&it)) + { + poolFree(pMemReserveInfo->pPool[poolIndex], it.pValue); + } + listClear(pMemDesc->pPageHandleList); + portMemFree(pMemDesc->pPageHandleList); + pMemDesc->pPageHandleList = NULL; + + rmMemPoolRemoveRef(pMemReserveInfo); + + // Trim the topmost pool so that any unused pages are returned to PMA. + if (pMemReserveInfo->bTrimOnFree) + { + rmMemPoolTrim(pMemReserveInfo, 1, flags); + } +done: + portSyncMutexRelease(pMemReserveInfo->pPoolLock); +} + +void +rmMemPoolRelease +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, + NvU32 flags +) +{ + NvS32 poolIndex; + NvU32 freeListLength; + NvU32 partialListLength; + NvU32 fullListLenght; + NvBool bPrevSkipScrubState = NV_FALSE; + + NV_ASSERT_OR_RETURN_VOID(NULL != pMemReserveInfo); + + portSyncMutexAcquire(pMemReserveInfo->pPoolLock); + + // + // A refcount equal to zero implies that there are no unfreed page level + // instances. At this point the lowermost pool should have only its freelist + // non empty. An unfreed allocation in a lower level pool implies non empty + // partial lists and full lists in the pools above it. We free the pools + // from bottom to top so that by the time we come to the topmost pool, all + // allocations are present in the freelist of the topmost pool. The topmost + // pool can then return all the memory back to PMA. The pools can get memory + // via a call to rmMemPoolReserve() + // + if (rmMemPoolGetRef(pMemReserveInfo) != 0) + { + goto done; + } + + if (flags & VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL) + { + bPrevSkipScrubState = pMemReserveInfo->bSkipScrub; + pMemReserveInfo->bSkipScrub = NV_TRUE; + } + + for (poolIndex = NUM_POOLS - 1; poolIndex >= 0; poolIndex--) + { + if (NULL != pMemReserveInfo->pPool[poolIndex]) + { + // + // Since this function gets called only when validAlloCount is zero, + // the fullList and the partialList are expected to be empty. All + // allocations (if any) should be only in the freelist at this point. + // + poolGetListLength(pMemReserveInfo->pPool[poolIndex], &freeListLength, + &partialListLength, &fullListLenght); + NV_ASSERT(partialListLength == 0); + NV_ASSERT(fullListLenght == 0); + + // poolTrim() trims only the freelist. + poolTrim(pMemReserveInfo->pPool[poolIndex], 0); + } + } + + if (flags & VASPACE_FLAGS_SKIP_SCRUB_MEMPOOL) + { + pMemReserveInfo->bSkipScrub = bPrevSkipScrubState; + } + +done: + portSyncMutexRelease(pMemReserveInfo->pPoolLock); +} + +void +rmMemPoolDestroy +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo +) +{ + NvS32 poolIndex; + NvU32 freeListLength; + NvU32 partialListLength; + NvU32 fullListLenght; + + NV_ASSERT_OR_RETURN_VOID(NULL != pMemReserveInfo); + + NV_ASSERT(rmMemPoolGetRef(pMemReserveInfo) == 0); + + // + // Always free pools from bottom to top since the lower pools return + // their pages to the pool just above during free. The topmost pool will + // return the it's pages back to PMA. + // + for (poolIndex = NUM_POOLS - 1; poolIndex >= 0; poolIndex--) + { + if (NULL != pMemReserveInfo->pPool[poolIndex]) + { + poolGetListLength(pMemReserveInfo->pPool[poolIndex], &freeListLength, + &partialListLength, &fullListLenght); + NV_ASSERT(freeListLength == 0); + NV_ASSERT(partialListLength == 0); + NV_ASSERT(fullListLenght == 0); + + poolDestroy(pMemReserveInfo->pPool[poolIndex]); + } + } + + if (NULL != pMemReserveInfo->pPoolLock) + { + portSyncMutexDestroy(pMemReserveInfo->pPoolLock); + portMemFree(pMemReserveInfo->pPoolLock); + pMemReserveInfo->pPoolLock = NULL; + } + + portMemFree(pMemReserveInfo); + pMemReserveInfo = NULL; +} + +NvBool +rmMemPoolIsScrubSkipped +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo +) +{ + NV_ASSERT_OR_RETURN(pMemReserveInfo != NULL, NV_FALSE); + return pMemReserveInfo->bSkipScrub; +} + +void +rmMemPoolSkipScrub +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, + NvBool bSkipScrub +) +{ + NV_ASSERT_OR_RETURN_VOID(pMemReserveInfo != NULL); + pMemReserveInfo->bSkipScrub = bSkipScrub; +} + + +NV_STATUS +rmMemPoolGetChunkAndPageSize +( + RM_POOL_ALLOC_MEM_RESERVE_INFO *pMemReserveInfo, + NvU64 *pChunkSize, + NvU32 *pPageSize +) +{ + NV_ASSERT_OR_RETURN(pMemReserveInfo != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((pChunkSize != NULL) && (pPageSize != NULL), NV_ERR_INVALID_ARGUMENT); + *pChunkSize = pMemReserveInfo->pmaChunkSize; + *pPageSize = poolAllocSizes[pMemReserveInfo->topmostPoolIndex]; + return NV_OK; +} diff --git a/src/nvidia/src/kernel/mem_mgr/reg_mem.c b/src/nvidia/src/kernel/mem_mgr/reg_mem.c new file mode 100644 index 000000000..6b559c8b4 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/reg_mem.c @@ -0,0 +1,73 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/reg_mem.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "os/os.h" + +#include "class/cl003f.h" // NV01_MEMORY_LOCAL_PRIVILEGED + +NV_STATUS +regmemConstruct_IMPL +( + RegisterMemory *pRegisterMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NV_STATUS status = NV_OK; + NvU64 trueLength; + Memory *pMemory = staticCast(pRegisterMemory, Memory); + OBJGPU *pGpu = pMemory->pGpu; + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pParams)) + return NV_OK; + + trueLength = kbusGetPciBarSize(GPU_GET_KERNEL_BUS(pGpu), 0); + + status = memCreateMemDesc(pMemory->pGpu, &pMemDesc, ADDR_REGMEM, 0, + trueLength, 0, 0); + if (status == NV_OK) + { + status = memConstructCommon(pMemory, NV01_MEMORY_LOCAL_PRIVILEGED, 0, pMemDesc, 0, NULL, 0, 0, + 0, 0, NVOS32_MEM_TAG_NONE, (HWRESOURCE_INFO *)NULL); + if (status != NV_OK) + { + memdescDestroy(pMemDesc); + } + } + return status; +} + +NvBool +regmemCanCopy_IMPL +( + RegisterMemory *pRegisterMemory +) +{ + return NV_TRUE; +} diff --git a/src/nvidia/src/kernel/mem_mgr/standard_mem.c b/src/nvidia/src/kernel/mem_mgr/standard_mem.c new file mode 100644 index 000000000..8c50b03f2 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/standard_mem.c @@ -0,0 +1,253 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/standard_mem.h" +#include "vgpu/rpc.h" +#include "rmapi/client.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/device/device.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "resserv/rs_server.h" +#include "rmapi/rs_utils.h" +#include "gpu/mem_mgr/heap.h" + +NV_STATUS stdmemValidateParams +( + OBJGPU *pGpu, + NvHandle hClient, + NV_MEMORY_ALLOCATION_PARAMS *pAllocData +) +{ + NvBool bIso; + RS_PRIV_LEVEL privLevel; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + privLevel = pCallContext->secInfo.privLevel; + + // + // Make sure UMD does not impact the internal allocation flags + // Do this check right after copy in. RM is free to set these flags later + // + if ((privLevel < RS_PRIV_LEVEL_KERNEL) && + (pAllocData->internalflags != 0)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // These flags don't do anything in this path. No mapping on alloc and + // kernel map is controlled by TYPE + // + pAllocData->flags |= NVOS32_ALLOC_FLAGS_MAP_NOT_REQUIRED; + pAllocData->flags &= ~NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP; + + pAllocData->address = NvP64_NULL; + + // + // Reject any API calls that pass an invalid owner. + // Reject any client calls that try to mess with internal RM memory. + // + if ((pAllocData->owner == 0) || + (pAllocData->owner == 0xFFFFFFFF) || + ((pAllocData->owner >= HEAP_OWNER_RM_SCRATCH_BEGIN) && + (pAllocData->owner <= HEAP_OWNER_RM_SCRATCH_END))) + { + return NV_ERR_INVALID_OWNER; + } + + bIso = (pAllocData->type == NVOS32_TYPE_PRIMARY) || + (pAllocData->type == NVOS32_TYPE_VIDEO) || + (pAllocData->type == NVOS32_TYPE_CURSOR); + + // + // MM-TODO: If surface requires ISO guarantees, ensure it's of the proper + // NVOS32_TYPE. Eventually, we should decouple NVOS32_TYPE from conveying + // ISO behavior; RM needs to audit NVOS32_TYPE uses wrt ISO determination. + // + if (!bIso && FLD_TEST_DRF(OS32, _ATTR2, _ISO, _YES, pAllocData->attr2)) + { + NV_PRINTF(LEVEL_INFO, "type is non-ISO but attributes request ISO!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // + // check PAGE_OFFLINING flag for client + // If the client is not a ROOT client, then turning PAGE_OFFLINIG OFF is invalid + // + if (FLD_TEST_DRF(OS32, _ATTR2, _PAGE_OFFLINING, _OFF, pAllocData->attr2)) + { + { + // if the client requesting is not kernel mode, return early +#if defined(DEBUG) || defined(DEVELOP) || defined(NV_VERIF_FEATURES) + if (!rmclientIsAdminByHandle(hClient, privLevel)) +#else + if (privLevel < RS_PRIV_LEVEL_KERNEL) +#endif + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + } + + // + // If NVOS32_TYPE indicates ISO requirements, set + // NVOS32_ATTR2_NISO_DISPLAY_YES so it can be used within RM instead of + // NVOS32_TYPE for ISO determination. + // + if (bIso) + { + pAllocData->attr2 = FLD_SET_DRF(OS32, _ATTR2, _ISO, _YES, + pAllocData->attr2); + } + + if (!(pAllocData->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END)) + { + NV_ASSERT_OR_RETURN((pAllocData->rangeLo == 0) && + (pAllocData->rangeHi == 0), NV_ERR_INVALID_ARGUMENT); + } + NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Attr 0x%x Type 0x%x Attr2 0x%x\n", + pAllocData->attr, pAllocData->type, pAllocData->attr2); + + // Make sure that encryption is supported if it is requested + if ((pAllocData->flags & NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED) && + DRF_VAL(OS32, _ATTR, _LOCATION, pAllocData->attr) == NVOS32_ATTR_LOCATION_VIDMEM) + { + NV_PRINTF(LEVEL_ERROR, + "Encryption requested for video memory on a non-0FB chip;\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + if (FLD_TEST_DRF(OS32, _ATTR2, _ALLOCATE_FROM_SUBHEAP, _YES, pAllocData->attr2)) + { + NV_CHECK_OR_RETURN(LEVEL_ERROR, FLD_TEST_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, pAllocData->attr), + NV_ERR_INVALID_ARGUMENT); + } + + return NV_OK; +} + +void stdmemDumpInputAllocParams +( + NV_MEMORY_ALLOCATION_PARAMS *pAllocData, + CALL_CONTEXT *pCallContext +) +{ + NV_PRINTF(LEVEL_INFO, "stdmemConstruct input\n"); + NV_PRINTF(LEVEL_INFO, " Owner: 0x%x\n", pAllocData->owner); + NV_PRINTF(LEVEL_INFO, " hMemory: 0x%x\n", pCallContext->pResourceRef->hResource); + NV_PRINTF(LEVEL_INFO, " Type: 0x%x\n", pAllocData->type); + NV_PRINTF(LEVEL_INFO, " Flags: 0x%x\n", pAllocData->flags); + NV_PRINTF(LEVEL_INFO, " Begin: 0x%08llx\n", pAllocData->rangeLo); + NV_PRINTF(LEVEL_INFO, " End: 0x%08llx\n", pAllocData->rangeHi); + NV_PRINTF(LEVEL_INFO, " Height: 0x%x\n", pAllocData->height); + NV_PRINTF(LEVEL_INFO, " Width: 0x%x\n", pAllocData->width); + NV_PRINTF(LEVEL_INFO, " Pitch: 0x%x\n", pAllocData->pitch); + NV_PRINTF(LEVEL_INFO, " Size: 0x%08llx\n", pAllocData->size); + NV_PRINTF(LEVEL_INFO, " Alignment: 0x%08llx\n", pAllocData->alignment); + NV_PRINTF(LEVEL_INFO, " Offset: 0x%08llx\n", pAllocData->offset); + NV_PRINTF(LEVEL_INFO, " Attr: 0x%x\n", pAllocData->attr); + NV_PRINTF(LEVEL_INFO, " Attr2: 0x%x\n", pAllocData->attr2); + NV_PRINTF(LEVEL_INFO, " Format: 0x%x\n", pAllocData->format); + NV_PRINTF(LEVEL_INFO, " ComprCovg: 0x%x\n", pAllocData->comprCovg); + NV_PRINTF(LEVEL_INFO, " ZCullCovg: 0x%x\n", pAllocData->zcullCovg); + NV_PRINTF(LEVEL_INFO, " CtagOffset: 0x%x\n", pAllocData->ctagOffset); + NV_PRINTF(LEVEL_INFO, " hVASpace: 0x%x\n", pAllocData->hVASpace); + NV_PRINTF(LEVEL_INFO, " tag: 0x%x\n", pAllocData->tag); +} + +void stdmemDumpOutputAllocParams +( + NV_MEMORY_ALLOCATION_PARAMS *pAllocData +) +{ + NV_PRINTF(LEVEL_INFO, "stdmemConstruct output\n"); + NV_PRINTF(LEVEL_INFO, " Height: 0x%x\n", pAllocData->height); + NV_PRINTF(LEVEL_INFO, " Width: 0x%x\n", pAllocData->width); + NV_PRINTF(LEVEL_INFO, " Pitch: 0x%x\n", pAllocData->pitch); + NV_PRINTF(LEVEL_INFO, " Size: 0x%08llx\n", pAllocData->size); + NV_PRINTF(LEVEL_INFO, " Alignment: 0x%08llx\n", pAllocData->alignment); + NV_PRINTF(LEVEL_INFO, " Offset: 0x%08llx\n", pAllocData->offset); + NV_PRINTF(LEVEL_INFO, " Attr: 0x%x\n", pAllocData->attr); + NV_PRINTF(LEVEL_INFO, " Attr2: 0x%x\n", pAllocData->attr2); + NV_PRINTF(LEVEL_INFO, " Format: 0x%x\n", pAllocData->format); + NV_PRINTF(LEVEL_INFO, " ComprCovg: 0x%x\n", pAllocData->comprCovg); + NV_PRINTF(LEVEL_INFO, " ZCullCovg: 0x%x\n", pAllocData->zcullCovg); +} + +NV_STATUS +stdmemConstruct_IMPL +( + StandardMemory *pStandardMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + + +NvBool stdmemCanCopy_IMPL(StandardMemory *pStandardMemory) +{ + return NV_TRUE; +} + +/*! + * stdmemQueryPageSize + * + * @brief + * Returns page size requested by client. + * + * @param[in] pMemoryManager MemoryManager pointer + * @param[in] hClient Client handle. + * @param[in] pAllocData Pointer to VIDHEAP_ALLOC_DATA + * + * @returns + * The page size in bytes. + */ +NvU32 +stdmemQueryPageSize +( + MemoryManager *pMemoryManager, + NvHandle hClient, + NV_MEMORY_ALLOCATION_PARAMS *pAllocData +) +{ + NvU32 retAttr = pAllocData->attr; + NvU32 retAttr2 = pAllocData->attr2; + + return memmgrDeterminePageSize(pMemoryManager, hClient, pAllocData->size, + pAllocData->format, pAllocData->flags, &retAttr, &retAttr2); +} + +// +// Control calls for system memory objects maintained outside the heap. +// + +NvU32 stdmemGetSysmemPageSize_IMPL(OBJGPU * pGpu, StandardMemory *pStdMemory) +{ + return GPU_GET_MEMORY_MANAGER(pGpu)->sysmemPageSize; +} diff --git a/src/nvidia/src/kernel/mem_mgr/system_mem.c b/src/nvidia/src/kernel/mem_mgr/system_mem.c new file mode 100644 index 000000000..c2632f8ba --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/system_mem.c @@ -0,0 +1,571 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr/system_mem.h" +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/disp/kern_disp.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "core/locks.h" +#include "os/os.h" +#include "rmapi/client.h" +#include "deprecated/rmapi_deprecated.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "core/system.h" + +#include "gpu/mem_sys/kern_mem_sys.h" + +#include "kernel/gpu/rc/kernel_rc.h" +#include "Nvcm.h" + +#include "class/cl003e.h" // NV01_MEMORY_SYSTEM + +/*! + * sysmemConstruct + * + * @brief + * This routine provides common allocation services used by the + * following heap allocation functions: + * NVOS32_FUNCTION_ALLOC_DEPTH_WIDTH_HEIGHT + * NVOS32_FUNCTION_ALLOC_SIZE + * NVOS32_FUNCTION_ALLOC_SIZE_RANGE + * NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT + * + * @param[in] pSystemMemory Pointer to SystemMemory object + * @param[in] pCallContext Pointer to the current CALL_CONTEXT. + * @param[in] pParams Pointer to the alloc params + * + * @return 'NV_OK' + * Operation completed successfully. + * @return 'NV_ERR_NO_MEMORY' + * There is not enough available memory to satisfy allocation request. + * @return 'NV_ERR_INSUFFICIENT_RESOURCES' + * Not enough available resources to satisfy allocation request. + */ +NV_STATUS +sysmemConstruct_IMPL +( + SystemMemory *pSystemMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + Memory *pMemory = staticCast(pSystemMemory, Memory); + NV_MEMORY_ALLOCATION_PARAMS *pAllocData = pParams->pAllocParams; + MEMORY_ALLOCATION_REQUEST allocRequest = {0}; + MEMORY_ALLOCATION_REQUEST *pAllocRequest = &allocRequest; + OBJGPU *pGpu = pMemory->pGpu; + HWRESOURCE_INFO hwResource; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NvU32 gpuCacheAttrib; + NV_STATUS rmStatus = NV_OK; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvU64 sizeOut; + NvU64 offsetOut; + MEMORY_DESCRIPTOR *pMemDesc; + NvU32 Cache; + NvU32 flags; + StandardMemory *pStdMemory = staticCast(pSystemMemory, StandardMemory); + + // Copy-construction has already been done by the base Memory class + if (RS_IS_COPY_CTOR(pParams)) + return NV_OK; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, stdmemValidateParams(pGpu, hClient, pAllocData)); + NV_CHECK_OR_RETURN(LEVEL_ERROR, + DRF_VAL(OS32, _ATTR, _LOCATION, pAllocData->attr) != NVOS32_ATTR_LOCATION_VIDMEM && + !(pAllocData->flags & NVOS32_ALLOC_FLAGS_VIRTUAL), + NV_ERR_INVALID_ARGUMENT); + + stdmemDumpInputAllocParams(pAllocData, pCallContext); + + // send it through the regular allocator even though it is for sysmem + pAllocRequest->classNum = NV01_MEMORY_SYSTEM; + pAllocRequest->pUserParams = pAllocData; + pAllocRequest->hMemory = pResourceRef->hResource; + pAllocRequest->hClient = hClient; + pAllocRequest->hParent = hParent; + pAllocRequest->pGpu = pGpu; + pAllocRequest->internalflags = NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC; + pAllocRequest->pHwResource = &hwResource; + + // Unsure if we need to keep separate copies, but keeping old behavior for now. + sizeOut = pAllocData->size; + offsetOut = pAllocData->offset; + + { + // + // If using thwap to generate an allocation failure here, fail the + // alloc right away. + // + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + if (pKernelRc != NULL && + !krcTestAllowAlloc(pGpu, pKernelRc, + NV_ROBUST_CHANNEL_ALLOCFAIL_HEAP)) + { + rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; + goto failed; + } + } + + rmStatus = sysmemInitAllocRequest(pGpu, pSystemMemory, pAllocRequest); + + if (rmStatus != NV_OK) + goto failed; + + NV_ASSERT(pAllocRequest->pMemDesc); + pMemDesc = pAllocRequest->pMemDesc; + + // Copy final heap size/offset back to client struct + // + // What should we return ?. System or the Device physical address. + // Return the Device physical address for now. + // May change with the heap refactoring !. + // + // System and Device physical address can be got using the nv0041CtrlCmdGetSurfacePhysAttr ctrl call + offsetOut = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + sizeOut = pMemDesc->Size; + pAllocData->limit = sizeOut - 1; + + // + // For system memory default to GPU uncached. GPU caching is different from + // the expected default memory model since it is not coherent. Clients must + // understand this an handle any coherency requirements explicitly. + // + if (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, pAllocData->attr2) == + NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT) + { + pAllocData->attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + pAllocData->attr2); + } + + if (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, pAllocData->attr2) == + NVOS32_ATTR2_GPU_CACHEABLE_YES) + { + gpuCacheAttrib = NV_MEMORY_CACHED; + } + else + { + gpuCacheAttrib = NV_MEMORY_UNCACHED; + } + + if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_UNCACHED) + Cache = NV_MEMORY_UNCACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_CACHED) + Cache = NV_MEMORY_CACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_COMBINE) + Cache = NV_MEMORY_WRITECOMBINED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_THROUGH) + Cache = NV_MEMORY_CACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_PROTECT) + Cache = NV_MEMORY_CACHED; + else if (DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr) == NVOS32_ATTR_COHERENCY_WRITE_BACK) + Cache = NV_MEMORY_CACHED; + else + Cache = 0; + + ct_assert(NVOS32_ATTR_COHERENCY_UNCACHED == NVOS02_FLAGS_COHERENCY_UNCACHED); + ct_assert(NVOS32_ATTR_COHERENCY_CACHED == NVOS02_FLAGS_COHERENCY_CACHED); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_COMBINE == NVOS02_FLAGS_COHERENCY_WRITE_COMBINE); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_THROUGH == NVOS02_FLAGS_COHERENCY_WRITE_THROUGH); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_PROTECT == NVOS02_FLAGS_COHERENCY_WRITE_PROTECT); + ct_assert(NVOS32_ATTR_COHERENCY_WRITE_BACK == NVOS02_FLAGS_COHERENCY_WRITE_BACK); + + flags = DRF_DEF(OS02, _FLAGS, _LOCATION, _PCI) | + DRF_DEF(OS02, _FLAGS, _MAPPING, _NO_MAP) | + DRF_NUM(OS02, _FLAGS, _COHERENCY, DRF_VAL(OS32, _ATTR, _COHERENCY, pAllocData->attr)); + + NV_ASSERT(memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM); + memdescSetCpuCacheAttrib(pMemDesc, Cache); + + if (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_KERNEL_MODE, NV_FALSE); + + if (pAllocData->flags & NVOS32_ALLOC_FLAGS_TURBO_CIPHER_ENCRYPTED) + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED, NV_TRUE); + + if (FLD_TEST_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES, pAllocData->attr2)) + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_MEMORY_TYPE_DISPLAY_NISO, NV_TRUE); + + memdescSetFlag(pMemDesc, MEMDESC_FLAGS_SYSMEM_OWNED_BY_CLIENT, NV_TRUE); + + memdescSetGpuCacheAttrib(pMemDesc, gpuCacheAttrib); + + rmStatus = memdescAlloc(pMemDesc); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "*** Cannot allocate sysmem through fb heap\n"); + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + + if (FLD_TEST_DRF(OS32, _ATTR2, _NISO_DISPLAY, _YES, pAllocData->attr2)) + { + KernelDisplay *pKernelDisplay = GPU_GET_KERNEL_DISPLAY(pGpu); + NvU64 physAddrStart; + NvU64 physAddrEnd; + + NV_ASSERT_OR_RETURN(pKernelDisplay != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pKernelDisplay->pStaticInfo != NULL, NV_ERR_INVALID_STATE); + + if (pKernelDisplay->pStaticInfo->bFbRemapperEnabled) + { + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + + physAddrStart = memdescGetPhysAddr(pMemDesc, AT_GPU, 0); + physAddrEnd = memdescGetPhysAddr(pMemDesc, AT_GPU, pAllocData->limit); + + rmStatus = kmemsysCheckDisplayRemapperRange_HAL(pGpu, pKernelMemorySystem, physAddrStart, physAddrEnd); + + if (rmStatus != NV_OK) + { + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + } + } + + // ClientDB can set the pagesize for memdesc. + // With GPU SMMU mapping, this needs to be set on the SMMU memdesc. + // So SMMU allocation should happen before memConstructCommon() + // Eventually SMMU allocation will be part of memdescAlloc(). + + // An SMMU mapping will be added to SYSMEM allocations in the following cases: + // 1. BIG page allocations with non-contiguous SYSMEM in Tegra. + // 2. RM clients forcing SMMU mapping via flags. + // GPU Arch verification with VPR is one such usecase. + // + // fbAlloc_GF100() will set the page size attribute to BIG for these cases. + + if (FLD_TEST_DRF(OS32, _ATTR2, _SMMU_ON_GPU, _ENABLE, pAllocData->attr2)) + { + NV_PRINTF(LEVEL_ERROR, "SMMU mapping allocation is not supported.\n"); + NV_ASSERT(0); + rmStatus = NV_ERR_NOT_SUPPORTED; + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + else if ((FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _BIG, pAllocData->attr) || + FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, pAllocData->attr)) && + FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, pAllocData->attr) && + (stdmemGetSysmemPageSize_HAL(pGpu, pStdMemory) == RM_PAGE_SIZE)) + { + NV_PRINTF(LEVEL_ERROR, + "Non-contiguous allocation not supported where requested page size is larger than sysmem page size.\n"); + NV_ASSERT(0); + rmStatus = NV_ERR_NOT_SUPPORTED; + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + + rmStatus = memConstructCommon(pMemory, pAllocRequest->classNum, flags, pMemDesc, 0, + NULL, pAllocData->attr, pAllocData->attr2, 0, 0, + pAllocData->tag, &hwResource); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "*** Cannot add symem through fb heap to client db\n"); + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + + // + // We need to force a kernel mapping of system memory-backed notifiers + // allocated in this path. + // + if (pAllocData->type == NVOS32_TYPE_NOTIFIER) + { + rmStatus = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_FALSE); + if (rmStatus != NV_OK) + { + memDestructCommon(pMemory); + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + } + + if (IS_VIRTUAL(pGpu)) + { + NvU32 os02Flags; + NvU32 os32Flags = pAllocData->flags; + + // NVOS32_TYPE_NOTIFIER notifier indicates kernel mapping in this path + if (pAllocData->type == NVOS32_TYPE_NOTIFIER) + os32Flags |= NVOS32_ALLOC_FLAGS_KERNEL_MAPPING_MAP; + + // + // Calculate os02flags as VGPU plugin allocates sysmem with legacy + // RmAllocMemory API + // + rmStatus = RmDeprecatedConvertOs32ToOs02Flags(pAllocData->attr, + pAllocData->attr2, + os32Flags, + &os02Flags); + + if (rmStatus == NV_OK) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + NV_RM_RPC_ALLOC_MEMORY(pGpu, + hClient, + hParent, + pAllocRequest->hMemory, + pAllocRequest->classNum, + os02Flags, + pMemDesc, + rmStatus); + } + + if (rmStatus != NV_OK) + { + memDestructCommon(pMemory); + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + goto failed; + } + + pMemory->bRpcAlloc = NV_TRUE; + } + + pAllocData->size = sizeOut; + pAllocData->offset = offsetOut; + + stdmemDumpOutputAllocParams(pAllocData); + +failed: + return rmStatus; +} + +NV_STATUS +sysmemCtrlCmdGetSurfaceNumPhysPages_IMPL +( + SystemMemory *pSystemMemory, + NV003E_CTRL_GET_SURFACE_NUM_PHYS_PAGES_PARAMS *pParams +) +{ + Memory *pMemory = staticCast(pSystemMemory, Memory); + NV_STATUS status; + + NV_ASSERT_OR_RETURN(memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_SYSMEM, NV_ERR_NOT_SUPPORTED); + + status = osGetNumMemoryPages(pMemory->pMemDesc, + &pParams->numPages); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get sysmem pages\n"); + } + + return status; +} + +NV_STATUS +sysmemCtrlCmdGetSurfacePhysPages_IMPL +( + SystemMemory *pSystemMemory, + NV003E_CTRL_GET_SURFACE_PHYS_PAGES_PARAMS *pParams +) +{ + Memory *pMemory = staticCast(pSystemMemory, Memory); + NV_STATUS status; + + NV_ASSERT_OR_RETURN(memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_SYSMEM, NV_ERR_NOT_SUPPORTED); + + status = osGetMemoryPages(pMemory->pMemDesc, + NvP64_VALUE(pParams->pPages), + &pParams->numPages); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to get sysmem pages\n"); + } + + return status; +} + +NV_STATUS +sysmemInitAllocRequest_HMM +( + OBJGPU *pGpu, + SystemMemory *pSystemMemory, + MEMORY_ALLOCATION_REQUEST *pAllocRequest +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + NV_STATUS status = NV_OK; + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + NV_ASSERT_TRUE_OR_GOTO(status, pFbAllocInfo != NULL, NV_ERR_NO_MEMORY, done); + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + NV_ASSERT_TRUE_OR_GOTO(status, pFbAllocPageFormat != NULL, NV_ERR_NO_MEMORY, done); + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + memUtilsInitFBAllocInfo(pAllocRequest->pUserParams, pFbAllocInfo, pAllocRequest->hClient, pAllocRequest->hParent); + + NV_ASSERT_OK_OR_GOTO(status, + memmgrAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo), + done); + + NV_ASSERT_OK_OR_GOTO(status, + sysmemAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo, pSystemMemory), + done); + +done: + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + return status; +} + +NV_STATUS +sysmemAllocResources +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo, + SystemMemory *pSystemMemory +) +{ + NV_STATUS status = NV_OK; + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvBool bAllocedMemDesc = NV_FALSE; + NvBool bContig = FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, pVidHeapAlloc->attr); + // + // BUG 3506666 + // While replaying a trace, it is possible for the playback OS to have a smaller page size + // than the capture OS so if we're running a replay where the requested page size is larger, + // assume this is a contiguous piece of memory, if contiguity is not specified. + // + if (FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _DEFAULT, pVidHeapAlloc->attr)) + { + if ((FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _BIG, pVidHeapAlloc->attr) || + FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _HUGE, pVidHeapAlloc->attr)) && + (stdmemGetSysmemPageSize_HAL(pGpu, staticCast(pSystemMemory, StandardMemory)) == RM_PAGE_SIZE)) + { + bContig = NV_TRUE; + } + } + + // + // Check for virtual-only parameters used on physical allocs. + // + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_VIRTUAL_ONLY) + { + NV_PRINTF(LEVEL_ERROR, + "Virtual-only flag used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + if (FLD_TEST_DRF(OS32, _ATTR2, _32BIT_POINTER, _ENABLE, pVidHeapAlloc->attr2)) + { + NV_PRINTF(LEVEL_ERROR, + "Virtual-only 32-bit pointer attr used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + if (pVidHeapAlloc->hVASpace != 0) + { + NV_PRINTF(LEVEL_ERROR, + "VA space handle used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + NV_ASSERT(!(pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_WPR1) && !(pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_WPR2)); + + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + NV_PRINTF(LEVEL_ERROR, + "Expected fixed address allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, memUtilsAllocMemDesc(pGpu, pAllocRequest, pFbAllocInfo, &pMemDesc, NULL, + ADDR_SYSMEM, bContig, &bAllocedMemDesc), failed); + + // get possibly updated surface attributes + pVidHeapAlloc->attr = pFbAllocInfo->retAttr; + pVidHeapAlloc->attr2 = pFbAllocInfo->retAttr2; + + // update contiguity attribute to reflect memdesc + if (memdescGetContiguity(pAllocRequest->pMemDesc, AT_GPU)) + { + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, + pVidHeapAlloc->attr); + } + else + { + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _NONCONTIGUOUS, + pVidHeapAlloc->attr); + } + + pVidHeapAlloc->offset = pFbAllocInfo->offset; + + if (pAllocRequest->pHwResource != NULL) + { + pAllocRequest->pHwResource->attr = pFbAllocInfo->retAttr; + pAllocRequest->pHwResource->attr2 = pFbAllocInfo->retAttr2; + pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId; + pAllocRequest->pHwResource->comprCovg = pFbAllocInfo->comprCovg; + pAllocRequest->pHwResource->ctagOffset = pFbAllocInfo->ctagOffset; + pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId; + } + + return NV_OK; + +failed: + memmgrFreeHwResources(pGpu, pMemoryManager, pFbAllocInfo); + + if (bAllocedMemDesc) + { + memdescDestroy(pAllocRequest->pMemDesc); + pAllocRequest->pMemDesc = NULL; + } + + return status; +} diff --git a/src/nvidia/src/kernel/mem_mgr/vaddr_list.c b/src/nvidia/src/kernel/mem_mgr/vaddr_list.c new file mode 100644 index 000000000..0dd14a612 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/vaddr_list.c @@ -0,0 +1,250 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file vaddr_list.c + * @brief Virtual memory map tracking utility routines. + */ + +#include "mem_mgr/vaddr_list.h" + + /** + * @brief Init map tracker object + * + * @param[in] pVaList tracker object pointer + * + * @return NV_TRUE if tracker object is initialized successfully, NV_FALSE if not + */ +NV_STATUS vaListInit(VA_LIST *pVaList) +{ + VADDR_LIST_INFO *pVaListInfo = NULL; + VA_INFO *pVaInfo = NULL; + + NV_ASSERT_OR_RETURN(pVaList, NV_ERR_INVALID_ARGUMENT); + + mapInit(pVaList, portMemAllocatorGetGlobalNonPaged()); + + pVaListInfo = (VADDR_LIST_INFO *)portMemAllocNonPaged(sizeof(VADDR_LIST_INFO)); + if (pVaListInfo == NULL) + { + mapDestroy(pVaList); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + portMemSet(pVaListInfo, 0, sizeof(VADDR_LIST_INFO)); + pVaListInfo->bRelease = NV_TRUE; + + pVaInfo = mapInsertNew(pVaList, 0); + if (pVaInfo == NULL) + { + mapDestroy(pVaList); + portMemFree(pVaListInfo); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pVaInfo->pVaListInfo = pVaListInfo; + + return NV_OK; +} + +/** + * @brief Destroy map tracker object + * + * @param[in] pVaList tracker object pointer + * + * @return NV_TRUE if tracker object is initialized successfully, NV_FALSE if not + */ +void vaListDestroy(VA_LIST *pVaList) +{ + VA_INFO *pVaInfo = NULL; + + NV_ASSERT_OR_RETURN_VOID(pVaList); + + pVaInfo = mapFind(pVaList, 0); + if (pVaInfo) + { + NV_ASSERT(pVaInfo->vAddr == 0); + NV_ASSERT(pVaInfo->refCnt == 0); + NV_ASSERT(pVaInfo->pVaListInfo); + + portMemFree(pVaInfo->pVaListInfo); + pVaInfo->pVaListInfo = NULL; + + mapRemove(pVaList, pVaInfo); + } + else + { + DBG_BREAKPOINT(); + } + + if (mapCount(pVaList) != 0) + { + NV_PRINTF(LEVEL_ERROR, "non-zero mapCount(pVaList): 0x%x\n", + mapCount(pVaList)); + + DBG_BREAKPOINT(); + } + + mapDestroy(pVaList); +} + +NV_STATUS vaListSetManaged(VA_LIST *pVaList, NvBool bManaged) +{ + VA_INFO *pVaInfo = NULL; + + NV_ASSERT_OR_RETURN(pVaList, NV_ERR_INVALID_ARGUMENT); + + // We can change the prop only when no active maps + pVaInfo = mapFind(pVaList, 0); + NV_ASSERT_OR_RETURN(pVaInfo, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN((mapCount(pVaList) == 1) || + (pVaInfo->pVaListInfo->bRelease == !!bManaged), + NV_ERR_INVALID_STATE); + + pVaInfo->pVaListInfo->bRelease = bManaged; + + return NV_OK; +} + +NvBool vaListGetManaged(VA_LIST *pVaList) +{ + VA_INFO *pVaInfo = NULL; + + NV_ASSERT_OR_RETURN(pVaList, NV_FALSE); + + pVaInfo = mapFind(pVaList, 0); + NV_ASSERT_OR_RETURN(pVaInfo, NV_FALSE); + return pVaInfo->pVaListInfo->bRelease; +} + +NV_STATUS vaListAddVa(VA_LIST *pVaList, OBJVASPACE *pVAS, NvU64 vAddr) +{ + NV_STATUS status = NV_OK; + VA_INFO *pVaInfo = NULL; + + NV_ASSERT_OR_RETURN(pVaList, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVAS, NV_ERR_INVALID_ARGUMENT); + + pVaInfo = mapFind(pVaList, (NvU64)NV_PTR_TO_NvP64(pVAS)); + if (pVaInfo) + { + NV_ASSERT_OR_RETURN(pVaInfo->refCnt, NV_ERR_INVALID_STATE); + + if (pVaInfo->vAddr == vAddr) + { + pVaInfo->refCnt++; + } + else + { + DBG_BREAKPOINT(); + status = NV_ERR_INVALID_STATE; + } + } + else + { + pVaInfo = mapInsertNew(pVaList, (NvU64)NV_PTR_TO_NvP64(pVAS)); + if (pVaInfo) + { + pVaInfo->vAddr = vAddr; + pVaInfo->refCnt = 1; + } + else + { + DBG_BREAKPOINT(); + status = NV_ERR_INSUFFICIENT_RESOURCES; + } + } + + return status; +} + +NV_STATUS vaListRemoveVa(VA_LIST *pVaList, OBJVASPACE *pVAS) +{ + NV_STATUS status = NV_OK; + VA_INFO *pVaInfo = NULL; + + NV_ASSERT_OR_RETURN(pVaList, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVAS, NV_ERR_INVALID_ARGUMENT); + + pVaInfo = mapFind(pVaList, (NvU64)NV_PTR_TO_NvP64(pVAS)); + if (pVaInfo) + { + NV_ASSERT_OR_RETURN(pVaInfo->refCnt, NV_ERR_INVALID_STATE); + + pVaInfo->refCnt--; + + if (pVaInfo->refCnt == 0) + { + mapRemove(pVaList, pVaInfo); + } + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +NV_STATUS vaListFindVa(VA_LIST *pVaList, OBJVASPACE *pVAS, NvU64 *pVaddr) +{ + NV_STATUS status = NV_OK; + VA_INFO *pVaInfo = NULL; + + NV_ASSERT_OR_RETURN(pVaList, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVAS, NV_ERR_INVALID_ARGUMENT); + + pVaInfo = mapFind(pVaList, (NvU64)NV_PTR_TO_NvP64(pVAS)); + if (pVaInfo) + { + NV_ASSERT_OR_RETURN(pVaInfo->refCnt, NV_ERR_INVALID_STATE); + *pVaddr = pVaInfo->vAddr; + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} + +NV_STATUS vaListGetRefCount(VA_LIST *pVaList, OBJVASPACE *pVAS, NvU64 *refCount) +{ + NV_STATUS status = NV_OK; + VA_INFO *pVaInfo; + + NV_ASSERT_OR_RETURN(pVaList != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pVAS != NULL, NV_ERR_INVALID_ARGUMENT); + + pVaInfo = mapFind(pVaList, (NvU64)NV_PTR_TO_NvP64(pVAS)); + if (pVaInfo != NULL) + { + NV_ASSERT_OR_RETURN(pVaInfo->refCnt, NV_ERR_INVALID_STATE); + *refCount = pVaInfo->refCnt; + } + else + { + status = NV_ERR_OBJECT_NOT_FOUND; + } + + return status; +} diff --git a/src/nvidia/src/kernel/mem_mgr/vaspace.c b/src/nvidia/src/kernel/mem_mgr/vaspace.c new file mode 100644 index 000000000..f1963a18f --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/vaspace.c @@ -0,0 +1,311 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* Virtual Address Space Function Definitions. * +\***************************************************************************/ + + +#include "mem_mgr/vaspace.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "rmapi/rs_utils.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "gpu/mem_mgr/vaspace_api.h" + +void +vaspaceIncRefCnt_IMPL(OBJVASPACE *pVAS) +{ + pVAS->refCnt++; +} + +void +vaspaceDecRefCnt_IMPL(OBJVASPACE *pVAS) +{ + NV_ASSERT_OR_RETURN_VOID(pVAS->refCnt != 0); + pVAS->refCnt--; +} + +NV_STATUS +vaspaceFillAllocParams_IMPL +( + OBJVASPACE *pVAS, + const FB_ALLOC_INFO *pAllocInfo, + NvU64 *pSize, + NvU64 *pAlign, + NvU64 *pRangeLo, + NvU64 *pRangeHi, + NvU64 *pPageSizeLockMask, + VAS_ALLOC_FLAGS *pFlags +) +{ + NvBool bRestrictedVaRange = NV_FALSE; + NvBool bEnforce32bitPtr = NV_FALSE; + NvU32 vasFlags = vaspaceGetFlags(pVAS); + + OBJGPU *pGpu = gpumgrGetGpu(gpumgrGetDefaultPrimaryGpu(pVAS->gpuMask)); + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + bRestrictedVaRange = !!(pDma->getProperty(pDma, PDB_PROP_DMA_RESTRICT_VA_RANGE)); + bEnforce32bitPtr = !!(pDma->getProperty(pDma, PDB_PROP_DMA_ENFORCE_32BIT_POINTER)); + + // Apply default alignment policies to offset alignment and size. + NV_ASSERT_OK_OR_RETURN( + vaspaceApplyDefaultAlignment(pVAS, pAllocInfo, pAlign, pSize, + pPageSizeLockMask)); + + pFlags->bClientAllocation = !!(pAllocInfo->internalflags & NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC); + + if (pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + // Fixed address allocation implemented by restricting range. + *pRangeLo = pAllocInfo->offset; + *pRangeHi = pAllocInfo->offset + *pSize - 1; + } + else if (!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END)) + { + // If user didn't specify fixed or restricted range, allow full VAS range. + *pRangeLo = vaspaceGetVaStart(pVAS); + *pRangeHi = vaspaceGetVaLimit(pVAS); + + // + // For MODS we also allow restricting the range to 40 bits by default. + // This is needed for Pascal 49b support where some HW units can only + // access 40b VA. MODS must use range/fixed address allocations to + // get a VA above 40 bits in this mode. + // + if (bRestrictedVaRange && !(vasFlags & VASPACE_FLAGS_FLA)) + { + *pRangeHi = NV_MIN(*pRangeHi, NVBIT64(40) - 1); + } + } + + if (vaspaceIsInternalVaRestricted(pVAS)) // will be true only for MAC's GPUVA. + { + + if (pFlags->bClientAllocation) // client allocations + { + NvU64 partitionRangeLo = 0; + NvU64 partitionRangeHi = 0; + + // If 32 bit enforcement is set, route to the lower va range. + if (FLD_TEST_DRF(OS32, _ATTR2, _32BIT_POINTER, _ENABLE, pAllocInfo->pageFormat->attr2)) + { + partitionRangeLo = vaspaceGetVaStart(pVAS); + partitionRangeHi = NVBIT64(32) - 1; + } + else + { + // route to >4gig + partitionRangeLo = NVBIT64(32); + partitionRangeHi = vaspaceGetVaLimit(pVAS); + } + + // If fixed address is requested - the range should be entirely contained within the partition. + if (pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE || + pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END) // Is this a valid expectation? + { + // Within the 32 bit or 64 bit partition. + if (!(*pRangeLo >= partitionRangeLo && *pRangeHi <=partitionRangeHi)) + { + return NV_ERR_INVALID_PARAMETER; + } + // both use_begin_end and fixed_addr_range will have this flag set + pFlags->bFixedAddressRange = NV_TRUE; + } + else + { + *pRangeLo = partitionRangeLo; + *pRangeHi = partitionRangeHi; + pFlags->bFixedAddressRange = NV_FALSE; + } + } + } + else + { + // + // Handle 32bit pointer requests. 32b pointers are forced below 32b + // on all chips. Non-32b requests are only forced on some chips, + // typically kepler, and only if there are no other address hints. + // + // If requested size cannot be satisfied with range above 4 GB, then relax that + // restriction. + // + if (FLD_TEST_DRF(OS32, _ATTR2, _32BIT_POINTER, _ENABLE, pAllocInfo->pageFormat->attr2)) + { + *pRangeHi = NV_MIN(*pRangeHi, NVBIT64(32) - 1); + } + + else if (bEnforce32bitPtr && + !(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) && + !(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END) && + ((*pRangeHi - *pRangeLo + 1 - *pSize) > NVBIT64(32)) && + !(vasFlags & VASPACE_FLAGS_FLA)) + { + *pRangeLo = NV_MAX(*pRangeLo, NVBIT64(32)); + } + } + + if ((*pRangeHi - *pRangeLo + 1) < *pSize) // Moved the range check here + { + NV_PRINTF(LEVEL_ERROR, + "Requested size 0x%llx more than available range. RangeLo=0x%llx, RangeHi=0x%llx\n", + *pSize, *pRangeLo, *pRangeHi); + NV_ASSERT_OR_RETURN(0, NV_ERR_INSUFFICIENT_RESOURCES); + } + + // Convert flags. + pFlags->bReverse = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN); + + pFlags->bPreferSysmemPageTables = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_PREFER_PTES_IN_SYSMEMORY); + + pFlags->bExternallyManaged = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED); + + pFlags->bLazy = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_LAZY); + + pFlags->bSparse = + !!(pAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_SPARSE); + + // + // The protected flag for kernel allocations is honoured only + // if this is a root client(kernel client). + // + pFlags->bPrivileged = pAllocInfo->bIsKernelAlloc; + + return NV_OK; +} + +NvU64 +vaspaceGetVaStart_IMPL(OBJVASPACE *pVAS) +{ + return pVAS->vasStart; +} + +NvU64 +vaspaceGetVaLimit_IMPL(OBJVASPACE *pVAS) +{ + return pVAS->vasLimit; +} + +void +vaspaceInvalidateTlb_IMPL +( + OBJVASPACE *pVAS, + OBJGPU *pGpu, + VAS_PTE_UPDATE_TYPE type +) +{ + NV_ASSERT(0); +} + +NvBool +vaspaceIsInternalVaRestricted_IMPL(OBJVASPACE *pVAS) +{ + return NV_FALSE; +} + +NV_STATUS +vaspaceGetByHandleOrDeviceDefault_IMPL +( + RsClient *pClient, + NvHandle hDeviceOrSubDevice, + NvHandle hVASpace, + OBJVASPACE **ppVAS +) +{ + NV_STATUS status = NV_OK; + NvHandle _hDeviceOrSubDevice; + Device *pDevice = NULL; + RsResourceRef *pResourceRef; + + if (hVASpace == NV01_NULL_OBJECT) + { + if (hDeviceOrSubDevice == 0) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + _hDeviceOrSubDevice = hDeviceOrSubDevice; + } + else + { + status = serverutilGetResourceRefWithType(pClient->hClient, hVASpace, + classId(VaSpaceApi), &pResourceRef); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid object handle 0x%x\n", hVASpace); + return status; + } + _hDeviceOrSubDevice = pResourceRef->pParentRef ? pResourceRef->pParentRef->hResource : 0; + } + + status = deviceGetByHandle(pClient, _hDeviceOrSubDevice, &pDevice); + if (status != NV_OK) + { + Subdevice *pSubdevice; + + status = subdeviceGetByHandle(pClient, _hDeviceOrSubDevice, &pSubdevice); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid parent handle!\n"); + return status; + } + + pDevice = pSubdevice->pDevice; + } + + // Allocates/Finds VA Space according to the handle type. + if (hVASpace == NV01_NULL_OBJECT) + { + // Check the vaspace mode + if (pDevice->vaMode == NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES) + { + NV_PRINTF(LEVEL_ERROR, + "VA mode %d (PRIVATE) doesn't support allocating an implicit VA space.\n", + pDevice->vaMode); + return NV_ERR_INVALID_STATE; + } + return deviceGetDefaultVASpace(pDevice, ppVAS); + } + else + { + VaSpaceApi *pVaSpaceApi = NULL; + // Check the vaspace mode + if (pDevice->vaMode == NV_DEVICE_ALLOCATION_VAMODE_SINGLE_VASPACE) + { + NV_PRINTF(LEVEL_ERROR, + "VA mode %d (GLOBAL) doesn't support allocating private VA spaces.\n", + pDevice->vaMode); + return NV_ERR_INVALID_STATE; + } + pVaSpaceApi = dynamicCast(pResourceRef->pResource, VaSpaceApi); + *ppVAS = pVaSpaceApi->pVASpace; + } + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/mem_mgr/video_mem.c b/src/nvidia/src/kernel/mem_mgr/video_mem.c new file mode 100644 index 000000000..0ad341d06 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/video_mem.c @@ -0,0 +1,1344 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr/video_mem.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/mem_mgr/heap.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "gpu/mem_sys/kern_mem_sys.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "vgpu/rpc.h" +#include "core/locks.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "diagnostics/gpu_acct.h" +#include "Nvcm.h" +#include "gpu/bus/third_party_p2p.h" + +#include "class/cl0040.h" // NV01_MEMORY_LOCAL_USER + +/*! + * _vidmemQueryAlignment + * + * @brief + * Returns the size and alignment for this allocation. + * + * @param[in] pRVHCP Pointer to RmVidHeapControlParams data + * @param[in] pAllocData Pointer to VIDHEAP_ALLOC_DATA + * @param[out] pSize The size aligned to the HW/requested alignment + * @param[out] pAlign The alignment required for this allocation. + + * @returns + * NV_OK Operation is successful. + */ +static NV_STATUS +_vidmemQueryAlignment +( + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + NvU64 *pSize, + NvU64 *pAlign +) +{ + NV_MEMORY_ALLOCATION_PARAMS *pAllocData = pAllocRequest->pUserParams; + OBJGPU *pGpu = pAllocRequest->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS rmStatus = NV_OK; + NvU64 size = pAllocData->size; + NvU32 pageSize = 0; + NvU64 align = 0; + NvU32 retAttr = pAllocData->attr; + NvU32 retAttr2 = pAllocData->attr2; + + NV_ASSERT_OR_RETURN((NULL != pSize) && (NULL != pAlign), + NV_ERR_INVALID_ARGUMENT); + + if ((pAllocData->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_HINT) || + (pAllocData->flags & NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE)) + align = pAllocData->alignment; + else + align = RM_PAGE_SIZE; + + // Fetch RM page size + pageSize = memmgrDeterminePageSize(pMemoryManager, pAllocRequest->hClient, size, pAllocData->format, + pAllocData->flags, &retAttr, &retAttr2); + + if (pageSize == 0) + { + rmStatus = NV_ERR_INVALID_STATE; + NV_ASSERT_OK_FAILED("memmgrDeterminePageSize", rmStatus); + return rmStatus; + } + + // Fetch memory alignment + NV_ASSERT_OK_OR_RETURN(memmgrAllocDetermineAlignment_HAL(pGpu, pMemoryManager, + &size, &align, 0, pAllocData->flags, retAttr, retAttr2, 0)); + + *pSize = size; + *pAlign = align + 1; + + return rmStatus; +} + +/*! + * _vidmemPmaAllocate + * + * @brief + * Allocates memory on vidmem through PMA. + * + * @param[in] pHeap Pointer to Heap object + * @param[in] pAllocRequest Pointer to the MEMORY_ALLOCATION_REQUEST. + * + * @returns + * NV_OK Operation is successful + * NV_ERR_* Error code in case of errors. + */ +static NV_STATUS +_vidmemPmaAllocate +( + Heap *pHeap, + MEMORY_ALLOCATION_REQUEST *pAllocRequest +) +{ + NV_MEMORY_ALLOCATION_PARAMS *pAllocData = pAllocRequest->pUserParams; + OBJGPU *pGpu = pAllocRequest->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + PMA *pPma = &pHeap->pmaObject; + NvU64 size = 0; + NvU32 pageCount; + NvU32 pmaInfoSize; + NvU32 pageSize; + NV_STATUS status; + NvU64 sizeAlign = 0; + PMA_ALLOCATION_OPTIONS allocOptions = {0}; + NvBool bContig = !FLD_TEST_DRF(OS32, _ATTR, + _PHYSICALITY, _NONCONTIGUOUS, + pAllocData->attr); + NvU32 subdevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + // LOCK: acquire device lock + status = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_MEM_PMA); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + + NV_PRINTF(LEVEL_INFO, "PMA input\n"); + NV_PRINTF(LEVEL_INFO, " Owner: 0x%x\n", pAllocData->owner); + NV_PRINTF(LEVEL_INFO, " hMemory: 0x%x\n", pAllocRequest->hMemory); + NV_PRINTF(LEVEL_INFO, " Type: 0x%x\n", pAllocData->type); + NV_PRINTF(LEVEL_INFO, " Flags: 0x%x\n", pAllocData->flags); + NV_PRINTF(LEVEL_INFO, " Begin: 0x%08llx\n", pAllocData->rangeLo); + NV_PRINTF(LEVEL_INFO, " End: 0x%08llx\n", pAllocData->rangeHi); + NV_PRINTF(LEVEL_INFO, " Height: 0x%x\n", pAllocData->height); + NV_PRINTF(LEVEL_INFO, " Width: 0x%x\n", pAllocData->width); + NV_PRINTF(LEVEL_INFO, " Pitch: 0x%x\n", pAllocData->pitch); + NV_PRINTF(LEVEL_INFO, " Size: 0x%08llx\n", pAllocData->size); + NV_PRINTF(LEVEL_INFO, " Alignment: 0x%08llx\n", + pAllocData->alignment); + NV_PRINTF(LEVEL_INFO, " Offset: 0x%08llx\n", pAllocData->offset); + NV_PRINTF(LEVEL_INFO, " Attr: 0x%x\n", pAllocData->attr); + NV_PRINTF(LEVEL_INFO, " Attr2: 0x%x\n", pAllocData->attr2); + NV_PRINTF(LEVEL_INFO, " Format: 0x%x\n", pAllocData->format); + NV_PRINTF(LEVEL_INFO, " ComprCovg: 0x%x\n", pAllocData->comprCovg); + NV_PRINTF(LEVEL_INFO, " ZCullCovg: 0x%x\n", pAllocData->zcullCovg); + NV_PRINTF(LEVEL_INFO, " CtagOffset: 0x%x\n", pAllocData->ctagOffset); + NV_PRINTF(LEVEL_INFO, " hVASpace: 0x%x\n", pAllocData->hVASpace); + + // Get the page size returned by RM. + pageSize = stdmemQueryPageSize(pMemoryManager, pAllocRequest->hClient, pAllocData); + + if (pageSize == 0) + { + status = NV_ERR_INVALID_STATE; + } + else if (pageSize == RM_PAGE_SIZE) + { + // + // TODO Remove this after the suballocator is in place + // Minimum granularity of PMA is 64K. + // + pageSize = RM_PAGE_SIZE_64K; + } + + // Get the alignment returned by RM without actually allocating. + status = _vidmemQueryAlignment(pAllocRequest, &size, &sizeAlign); + + // + // Pass the turn blacklist off flag to PMA Allocation API + // No need for FB check since PMA only manages FB + // Bug:2451834, gpuCheckPageRetirementSupport should not be called outside + // RM lock. + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ALLOW_PAGE_RETIREMENT) && + gpuCheckPageRetirementSupport_HAL(pGpu) && + FLD_TEST_DRF(OS32, _ATTR2, _BLACKLIST, _OFF, pAllocData->attr2)) + { + allocOptions.flags |= PMA_ALLOCATE_TURN_BLACKLIST_OFF; + } + + // UNLOCK: release device lock + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + // RM allocations are always pinned. + allocOptions.flags |= PMA_ALLOCATE_PINNED; + + if (pAllocData->flags & NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM) + { + allocOptions.flags |= PMA_ALLOCATE_PERSISTENT; + } + + // Check for VPR region. + if (pAllocData->flags & NVOS32_ALLOC_FLAGS_PROTECTED) + { + allocOptions.flags |= PMA_ALLOCATE_PROTECTED_REGION; + } + + // Fixed address allocations. + if (pAllocData->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + { + NvU64 offsetAlign = NV_MAX(sizeAlign, pageSize); + + allocOptions.flags |= PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE; + allocOptions.physBegin = NV_ALIGN_DOWN(pAllocData->offset, offsetAlign); + allocOptions.physEnd = NV_ALIGN_UP(allocOptions.physBegin + size, offsetAlign) - 1; + size = allocOptions.physEnd - allocOptions.physBegin + 1; + } + + // Range based allocations. + if (pAllocData->flags & NVOS32_ALLOC_FLAGS_USE_BEGIN_END) + { + allocOptions.flags |= PMA_ALLOCATE_SPECIFY_ADDRESS_RANGE; + allocOptions.physBegin = NV_ALIGN_UP(pAllocData->rangeLo, ((NvU64)pageSize)); + allocOptions.physEnd = NV_ALIGN_DOWN(pAllocData->rangeHi, ((NvU64)pageSize)); + allocOptions.physEnd = (allocOptions.physEnd > 0) ? + allocOptions.physEnd - 1 : 0; + NV_ASSERT_OR_RETURN(allocOptions.physBegin <= allocOptions.physEnd, + NV_ERR_INVALID_ARGUMENT); + } + + // Skip scrubber, used only by scrubber construction path + if (pAllocData->internalflags & NVOS32_ALLOC_INTERNAL_FLAGS_SKIP_SCRUB) + { + allocOptions.flags |= PMA_ALLOCATE_NO_ZERO; + } + + // Pass along client requested alignment + allocOptions.flags |= PMA_ALLOCATE_FORCE_ALIGNMENT; + allocOptions.alignment = NV_MAX(sizeAlign, pageSize); + + + // Get the number of pages to be allocated by PMA + pageCount = (NvU32) NV_DIV_AND_CEIL(size, pageSize); + +retry_alloc: + // Evaluate the size of the PMA_ALLOC_INFO struct. + if (bContig) + { + allocOptions.flags |= PMA_ALLOCATE_CONTIGUOUS; + pmaInfoSize = sizeof(PMA_ALLOC_INFO); + } + else + { + pmaInfoSize = sizeof(PMA_ALLOC_INFO) + ((pageCount - 1) * sizeof(NvU64)); + } + + // Alloc the tracking structure and store the values in it. + pAllocRequest->pPmaAllocInfo[subdevInst] = portMemAllocNonPaged(pmaInfoSize); + NV_ASSERT_OR_RETURN(NULL != pAllocRequest->pPmaAllocInfo[subdevInst], NV_ERR_NO_MEMORY); + portMemSet(pAllocRequest->pPmaAllocInfo[subdevInst], 0, pmaInfoSize); + + pAllocRequest->pPmaAllocInfo[subdevInst]->pageSize = pageSize; + pAllocRequest->pPmaAllocInfo[subdevInst]->pageCount = pageCount; + pAllocRequest->pPmaAllocInfo[subdevInst]->allocSize = ((NvU64) pageCount) * pageSize; + pAllocRequest->pPmaAllocInfo[subdevInst]->bContig = bContig; + pAllocRequest->pPmaAllocInfo[subdevInst]->refCount = 1; + pAllocRequest->pPmaAllocInfo[subdevInst]->flags = allocOptions.flags; + + NV_PRINTF(LEVEL_INFO, "\nNVRM: Size requested: 0x%llx bytes\n", size); + NV_PRINTF(LEVEL_INFO, " PageSize: 0x%x bytes\n", pageSize); + NV_PRINTF(LEVEL_INFO, " PageCount: 0x%x\n", pageCount); + NV_PRINTF(LEVEL_INFO, " Actual Size: 0x%llx\n", + pAllocRequest->pPmaAllocInfo[subdevInst]->allocSize); + NV_PRINTF(LEVEL_INFO, " Contiguous: %s\n", bContig ? "YES" : "NO"); + + // Get the allocation from PMA. + status = pmaAllocatePages(pPma, pageCount, pageSize, &allocOptions, + pAllocRequest->pPmaAllocInfo[subdevInst]->pageArray); + if (NV_OK != status) + { + portMemFree(pAllocRequest->pPmaAllocInfo[subdevInst]); + pAllocRequest->pPmaAllocInfo[subdevInst] = NULL; + + if (bContig) + { + if (FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _ALLOW_NONCONTIGUOUS, pAllocData->attr) || + (FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _DEFAULT, pAllocData->attr) && + pHeap->getProperty(pHeap, PDB_PROP_HEAP_NONCONTIG_ALLOC_BY_DEFAULT))) + { + bContig = NV_FALSE; + allocOptions.flags &= ~PMA_ALLOCATE_CONTIGUOUS; + NV_PRINTF(LEVEL_INFO, + "pmaAllocatePages failed -- retrying as noncontiguous\n"); + goto retry_alloc; + } + } + + NV_PRINTF(LEVEL_WARNING, "pmaAllocatePages failed (%x)\n", status); + } + else + { + pAllocData->attr = (bContig ? + FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS, pAllocData->attr) : + FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, pAllocData->attr)); + } + + return status; +} + +/*! + * vidmemPmaFree + * + * @brief + * Frees the memory allocated by PMA + * + * @param[in] pGpu Pointer to OBJGPU + * @param[in] pHeap Pointer to Heap object + * @param[in] pPmaAllocInfo Pointer to the PMA allocation tracking structure + * @param[in] flag Flags to modify PMA behavior + * + * @returns NONE + */ +void +vidmemPmaFree +( + OBJGPU *pGpu, + Heap *pHeap, + PMA_ALLOC_INFO *pPmaAllocInfo, + NvU32 flags +) +{ + PMA *pPma = &pHeap->pmaObject; + NvU32 pmaFreeFlags = flags; + + NV_ASSERT_OR_RETURN_VOID(NULL != pPmaAllocInfo); + + // Decrement the refcount and free only in case of zero references. + pPmaAllocInfo->refCount--; + if (pPmaAllocInfo->refCount != 0) + { + return; + } + + // + // Skip the scrubber if the memory is allocated with scrubber skipped. + // The only use case is scrubber internal allocations. + // + if (pPmaAllocInfo->flags & PMA_ALLOCATE_NO_ZERO) + { + pmaFreeFlags |= PMA_FREE_SKIP_SCRUB; + } + + if (pPmaAllocInfo->bContig) + { + pmaFreePages(pPma, pPmaAllocInfo->pageArray, 1, + pPmaAllocInfo->allocSize, pmaFreeFlags); + } + else + { + pmaFreePages(pPma, pPmaAllocInfo->pageArray, + pPmaAllocInfo->pageCount, + pPmaAllocInfo->pageSize, pmaFreeFlags); + } + portMemFree(pPmaAllocInfo); + pPmaAllocInfo = NULL; +} + +Heap* +vidmemGetHeap +( + OBJGPU *pGpu, + NvHandle hClient, + NvBool bSubheap +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status = NV_OK; + + if (bSubheap) + { + Heap *pHeap = memmgrGetDeviceSuballocator(pMemoryManager, bSubheap); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, pHeap != NULL && pHeap->heapType == HEAP_TYPE_PHYS_MEM_SUBALLOCATOR, NULL); + return pHeap; + } + + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + Heap *pMemoryPartitionHeap = NULL; + + status = kmigmgrGetMemoryPartitionHeapFromClient(pGpu, pKernelMIGManager, hClient, &pMemoryPartitionHeap); + if (status == NV_OK) + { + if (pMemoryPartitionHeap != NULL) + return pMemoryPartitionHeap; + } + else + { + NV_PRINTF(LEVEL_ERROR, + "failed to get memory partition heap for hClient = 0x%x\n", + hClient); + return NULL; + } + } + + return MEMORY_MANAGER_GET_HEAP(pMemoryManager); +} + +static NV_STATUS +vidmemCopyConstruct +( + VideoMemory *pVideoMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + Memory *pMemorySrc = dynamicCast(pParams->pSrcRef->pResource, Memory); + OBJGPU *pGpu = pMemorySrc->pGpu; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(!memdescGetCustomHeap(pMemorySrc->pMemDesc), NV_ERR_INVALID_ARGUMENT); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + MEMORY_DESCRIPTOR *pSrcMemDesc = memdescGetMemDescFromGpu(pMemorySrc->pMemDesc, pGpu); + status = heapReference(pGpu, pSrcMemDesc->pHeap, pMemorySrc->HeapOwner, + pSrcMemDesc); + NV_ASSERT(status == NV_OK); + SLI_LOOP_END + + return NV_OK; +} + +/*! + * vidmemConstruct + * + * @brief + * This routine provides common allocation services used by the + * following heap allocation functions: + * NVOS32_FUNCTION_ALLOC_DEPTH_WIDTH_HEIGHT + * NVOS32_FUNCTION_ALLOC_SIZE + * NVOS32_FUNCTION_ALLOC_SIZE_RANGE + * NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT + * + * @param[in] pVideoMemory Pointer to VideoMemory object + * @param[in] pCallContext Pointer to the current CALL_CONTEXT. + * @param[in] pParams Pointer to the alloc params + * + * @return 'NV_OK' + * Operation completed successfully. + * @return 'NV_ERR_NO_MEMORY' + * There is not enough available memory to satisfy allocation request. + * @return 'NV_ERR_NOT_SUPPORTED' + * Operation not supported on broken FB. + * @return 'NV_ERR_INSUFFICIENT_RESOURCES' + * Not enough available resources to satisfy allocation request. + */ +NV_STATUS +vidmemConstruct_IMPL +( + VideoMemory *pVideoMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + Memory *pMemory = staticCast(pVideoMemory, Memory); + NV_MEMORY_ALLOCATION_PARAMS *pAllocData = pParams->pAllocParams; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + MEMORY_ALLOCATION_REQUEST allocRequest = {0}; + MEMORY_ALLOCATION_REQUEST *pAllocRequest = &allocRequest; + OBJGPU *pGpu = pMemory->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + Heap *pHeap; + NvBool bSubheap = NV_FALSE; + MEMORY_DESCRIPTOR *pTopLevelMemDesc = NULL; + MEMORY_DESCRIPTOR *pTempMemDesc = NULL; + HWRESOURCE_INFO hwResource; + RsClient *pRsClient = pCallContext->pClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NvU32 gpuCacheAttrib; + NvBool bIsPmaAlloc = NV_FALSE; + NvU64 sizeOut; + NvU64 offsetOut; + NvU64 offsetOutTemp; + NvBool bLockAcquired = NV_FALSE; + NvU32 attr = 0; + NvU32 attr2 = 0; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + NV_STATUS rmStatus = NV_OK; + + if (RS_IS_COPY_CTOR(pParams)) + { + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance) && !rmGpuLockIsOwner()) + { + NV_ASSERT_OK_OR_GOTO(rmStatus, + rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_MEM), + done); + + bLockAcquired = NV_TRUE; + } + + rmStatus = vidmemCopyConstruct(pVideoMemory, pCallContext, pParams); + goto done; + } + + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, stdmemValidateParams(pGpu, hClient, pAllocData)); + NV_CHECK_OR_RETURN(LEVEL_WARNING, + DRF_VAL(OS32, _ATTR, _LOCATION, pAllocData->attr) == NVOS32_ATTR_LOCATION_VIDMEM && + !(pAllocData->flags & NVOS32_ALLOC_FLAGS_VIRTUAL), + NV_ERR_INVALID_ARGUMENT); + + stdmemDumpInputAllocParams(pAllocData, pCallContext); + + bSubheap = FLD_TEST_DRF(OS32, _ATTR2, _ALLOCATE_FROM_SUBHEAP, _YES, pAllocData->attr2); + pHeap = vidmemGetHeap(pGpu, hClient, bSubheap); + NV_CHECK_OR_RETURN(LEVEL_INFO, pHeap != NULL, NV_ERR_INVALID_STATE); + + pAllocRequest->classNum = NV01_MEMORY_LOCAL_USER; + pAllocRequest->pUserParams = pAllocData; + pAllocRequest->hMemory = pResourceRef->hResource; + pAllocRequest->hClient = hClient; + pAllocRequest->hParent = hParent; + pAllocRequest->pGpu = pGpu; + pAllocRequest->internalflags = NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC; + pAllocRequest->pHwResource = &hwResource; + + // Unsure if we need to keep separate copies, but keeping old behavior for now. + sizeOut = pAllocData->size; + offsetOut = pAllocData->offset; + + bIsPmaAlloc = memmgrIsPmaInitialized(pMemoryManager) && + !bSubheap && + !(pAllocData->flags & NVOS32_ALLOC_FLAGS_WPR1) && + !(pAllocData->flags & NVOS32_ALLOC_FLAGS_WPR2) && + (!(pAllocData->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) || + heapIsPmaManaged(pGpu, pHeap, pAllocData->offset, pAllocData->offset+pAllocData->size-1)); + + // Scrub-on-free is not supported by heap. Make sure clients don't get unscrubbed allocations + NV_CHECK_OR_RETURN(LEVEL_WARNING, + !memmgrIsScrubOnFreeEnabled(pMemoryManager) || RMCFG_FEATURE_PLATFORM_MODS || bIsPmaAlloc || bSubheap, + NV_ERR_INVALID_STATE); + + // Get the allocation from PMA if enabled. + if (bIsPmaAlloc) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + pAllocRequest->pGpu = pGpu; + rmStatus = _vidmemPmaAllocate(vidmemGetHeap(pGpu, hClient, NV_FALSE), pAllocRequest); + if (NV_OK != rmStatus) + SLI_LOOP_GOTO(done); + SLI_LOOP_END; + } + + if (RMCFG_FEATURE_RM_BASIC_LOCK_MODEL) + { + // + // Can't move locking up as PMA locks need to be taken first. + // Acquire the lock *only after* PMA is done allocating. + // + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance) && !rmGpuLockIsOwner()) + { + rmStatus = rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_MEM); + NV_ASSERT_OR_GOTO(NV_OK == rmStatus, done); + + bLockAcquired = NV_TRUE; + } + else + { + NV_ASSERT(0); + } + } + + { + // + // If using thwap to generate an allocation failure here, fail the + // alloc right away. + // + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + if (pKernelRc != NULL && + !krcTestAllowAlloc(pGpu, pKernelRc, + NV_ROBUST_CHANNEL_ALLOCFAIL_HEAP)) + { + rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + } + + // Don't allow FB allocations if FB is broken unless it is a virtual allocation or running in L2 cache only mode + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BROKEN_FB) && + !gpuIsCacheOnlyModeEnabled(pGpu)) + { + NV_ASSERT_FAILED("Video memory requested despite BROKEN FB"); + rmStatus = NV_ERR_NOT_SUPPORTED; + goto done; + } + + if (gpuIsDebuggerActive_HAL(pGpu)) + { + // Bug 643431 - WAR for GR WFI timeouts when debugger is active + rmStatus = NV_ERR_BUSY_RETRY; + goto done; + } + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + NV_ASSERT_TRUE_OR_GOTO(rmStatus, pFbAllocInfo != NULL, NV_ERR_NO_MEMORY, done); + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + NV_ASSERT_TRUE_OR_GOTO(rmStatus, pFbAllocPageFormat != NULL, NV_ERR_NO_MEMORY, done); + + // Call heapAlloc to get memory. + if (gpumgrGetBcEnabledStatus(pGpu)) + { + MEMORY_DESCRIPTOR *pPrev = NULL; + + // VGPU won't run in SLI. So no need to set subheap flags in memdesc. + NV_ASSERT(!bSubheap); + + // Create dummy top level memdesc + rmStatus = memdescCreate(&pTopLevelMemDesc, pGpu, RM_PAGE_SIZE, 0, + NV_TRUE, + ADDR_FBMEM, + NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_DUMMY_TOPLEVEL); + if (rmStatus != NV_OK) + goto done; + pPrev = pTopLevelMemDesc; + pTopLevelMemDesc->_subDeviceAllocCount = gpumgrGetSubDeviceCountFromGpu(pGpu); // very important to have this here + pTopLevelMemDesc->_flags |= MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE; + + offsetOutTemp = ~((NvU64)0); + offsetOut = 0; + sizeOut = 0; + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + // Call heapAlloc to get memory. + pAllocRequest->pMemDesc = NULL; // heapAlloc_IMPL needs a NULL pMemdesc in order for it to be allocated, + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + memUtilsInitFBAllocInfo(pAllocRequest->pUserParams, pFbAllocInfo, hClient, hParent); + + rmStatus = memmgrAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo); + if (rmStatus != NV_OK) + SLI_LOOP_GOTO(done); + + rmStatus = vidmemAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo, + vidmemGetHeap(pGpu, hClient, NV_FALSE)); + if (rmStatus != NV_OK) + SLI_LOOP_GOTO(done); + + NV_ASSERT(pAllocRequest->pMemDesc); + + // + // Spoof the flags contiguity, size and alignment of heapAlloc'ed subdev memdesc + // to dummy top level memdesc we created + // + pTopLevelMemDesc->Alignment = pAllocRequest->pMemDesc->Alignment; + pTopLevelMemDesc->_flags = pAllocRequest->pMemDesc->_flags | MEMDESC_FLAGS_ALLOC_PER_SUBDEVICE | MEMDESC_FLAGS_DUMMY_TOPLEVEL; + pTopLevelMemDesc->Size = pAllocRequest->pMemDesc->Size; + pTopLevelMemDesc->ActualSize = pAllocRequest->pMemDesc->ActualSize; + pTopLevelMemDesc->_pageSize = pAllocRequest->pMemDesc->_pageSize; + pTopLevelMemDesc->pHeap = pAllocRequest->pMemDesc->pHeap; + + // add pAllocData->pMemDesc for subdev to linked list + pPrev->_pNext = pAllocRequest->pMemDesc; + pPrev = pAllocRequest->pMemDesc; + + // + // After Bugs 1967134, 1511574, 1448340, 1761278, 1993033 are implemented, remove the code below and + // always set offsetOut = ~0 for the broadcast case. + // Then remove the interface to remove the physical offset. + // + if (offsetOutTemp == ~((NvU64)0)) // 1st + offsetOut = offsetOutTemp = memdescGetPhysAddr(pAllocRequest->pMemDesc, AT_GPU, 0); + else if (offsetOut != ~((NvU64)0)) + { + offsetOutTemp = memdescGetPhysAddr(pAllocRequest->pMemDesc, AT_GPU, 0); + if (offsetOut != offsetOutTemp) + { + offsetOut = ~((NvU64)0); + } + } + + NV_ASSERT(!sizeOut || pAllocRequest->pMemDesc->Size == sizeOut); + sizeOut = pAllocRequest->pMemDesc->Size; + } + SLI_LOOP_END; + + pTempMemDesc = memdescGetMemDescFromGpu(pTopLevelMemDesc, pGpu); + } + else + { + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + memUtilsInitFBAllocInfo(pAllocRequest->pUserParams, pFbAllocInfo, hClient, hParent); + + rmStatus = memmgrAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo); + if (rmStatus != NV_OK) + goto done; + + rmStatus = vidmemAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo, pHeap); + if (rmStatus != NV_OK) + goto done; + + NV_ASSERT(pAllocRequest->pMemDesc); + + pTempMemDesc = pTopLevelMemDesc = pAllocRequest->pMemDesc; + offsetOut = memdescGetPhysAddr(pTempMemDesc, AT_GPU, 0); + sizeOut = pTempMemDesc->Size; + + if (bSubheap) + memdescSetFlag(pTempMemDesc, MEMDESC_FLAGS_OWNED_BY_CURRENT_DEVICE, NV_TRUE); + } + + pAllocData->limit = sizeOut - 1; + + if (bIsPmaAlloc) + { + // Cache the PMA_ALLOC_INFO structure. + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescGetMemDescFromGpu(pTopLevelMemDesc, pGpu)->pPmaAllocInfo = pAllocRequest->pPmaAllocInfo[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]; + SLI_LOOP_END; + } + + // + // Video memory is always locally transparently cached. It does not require + // any cache managment. Marked cached unconditionally. Non-coherent peer + // caching is handled with an override at mapping time. + // + if (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, pAllocData->attr2) == + NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT) + { + pAllocData->attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _YES, + pAllocData->attr2); + } + gpuCacheAttrib = NV_MEMORY_CACHED; + + // ClientDB can set the pagesize for memdesc. + // With GPU SMMU mapping, this needs to be set on the SMMU memdesc. + // So SMMU allocation should happen before memConstructCommon() + // Eventaully SMMU allocation will be part of memdescAlloc(). + + // + // There are a few cases where the heap will return an existing + // memdesc. Only update attributes if it is new. + // + // @todo attr tracking should move into heapAlloc + // + if (pTempMemDesc->RefCount == 1) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY); + memdescSetGpuCacheAttrib(memdescGetMemDescFromGpu(pTopLevelMemDesc, pGpu), gpuCacheAttrib); + SLI_LOOP_END; + + + // An SMMU mapping will be added to FB allocations in the following cases: + // 1. RM clients forcing SMMU mapping via flags + // GPU Arch verification with VPR is one such usecase. + + if (FLD_TEST_DRF(OS32, _ATTR2, _SMMU_ON_GPU, _ENABLE, pAllocData->attr2)) + { + NV_ASSERT_FAILED("SMMU mapping allocation is not supported for ARMv7"); + rmStatus = NV_ERR_NOT_SUPPORTED; + + memdescFree(pTopLevelMemDesc); + memdescDestroy(pTopLevelMemDesc); + goto done; + } + } + + rmStatus = memConstructCommon(pMemory, pAllocRequest->classNum, pAllocData->flags, + pTopLevelMemDesc, pAllocData->owner, pHeap, pAllocData->attr, + pAllocData->attr2, 0, pAllocData->type, + pAllocData->tag, &hwResource); + if (rmStatus != NV_OK) + { + memdescFree(pTopLevelMemDesc); + memdescDestroy(pTopLevelMemDesc); + + goto done; + } + NV_ASSERT(pMemory->pMemDesc); + NV_ASSERT(pMemory->pHeap); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + // XXX: This is a hack for now. No Hw resources are assumed to be used in the call. + // The host is only requested to make an alias to the allocated heap. + + if (!IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_ALLOC_VIDMEM(pGpu, + hClient, + hParent, + pAllocRequest->hMemory, + pTopLevelMemDesc, + sizeOut, + attr, + attr2, + pAllocData->type, + pAllocData->flags, + pAllocData->height, + pAllocData->width, + pAllocData->format, + pAllocData->comprCovg, + pAllocData->zcullCovg, + pAllocData->alignment, + pAllocData->pitch, + pAllocData->ctagOffset, + rmStatus); + + if (rmStatus != NV_OK) + { + memDestructCommon(pMemory); + memdescFree(pTopLevelMemDesc); + memdescDestroy(pTopLevelMemDesc); + pTopLevelMemDesc = NULL; + goto done; + } + + pMemory->bRpcAlloc = NV_TRUE; + } + + if (RMCFG_MODULE_GPUACCT) + { + OBJGPU *pGpu = pMemory->pGpu; + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(pSys); + RmClient *pClient = dynamicCast(pRsClient, RmClient); + NvU64 fbUsage; + NV2080_CTRL_GPU_PID_INFO_DATA pidInfoData; + NV2080_CTRL_SMC_SUBSCRIPTION_INFO smcInfo; + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ACCOUNTING_ON) && + (pMemory->pSubDevice == NULL) && // Skipping for subdevice memory allocations. Was this intentional? + (pCallContext->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bSmcGpuPartitioningEnabled = IS_MIG_IN_USE(pGpu); + MIG_INSTANCE_REF partitionRef = kmigmgrMakeNoMIGReference(); + NvBool bGlobalInfo = NV_TRUE; + smcInfo.computeInstanceId = PARTITIONID_INVALID; + smcInfo.gpuInstanceId = PARTITIONID_INVALID; + // + // With SMC GPU partitioning enabled, get associated partition ref and + // only account for partitionLocal usages + // + if (bSmcGpuPartitioningEnabled) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, + hClient, + &partitionRef), + done); + bGlobalInfo = NV_FALSE; + } + portMemSet(&pidInfoData, 0, sizeof(NV2080_CTRL_GPU_PID_INFO_DATA)); + + gpuFindClientInfoWithPidIterator(pGpu, pClient->ProcID, + pClient->SubProcessID, + classId(Memory), + &pidInfoData, + &smcInfo, + &partitionRef, + bGlobalInfo); + + // Only account for memory owned by the process. + fbUsage = pidInfoData.vidMemUsage.memPrivate + + pidInfoData.vidMemUsage.memSharedOwned; + + gpuacctUpdateProcPeakFbUsage(pGpuAcct, pGpu->gpuInstance, + pClient->ProcID, pClient->SubProcessID,fbUsage); + } + } + + pAllocData->size = sizeOut; + pAllocData->offset = offsetOut; + + stdmemDumpOutputAllocParams(pAllocData); + +done: + if (bSubheap && pTempMemDesc != NULL && rmStatus != NV_OK) + heapRemoveRef(pHeap); + + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + if (bLockAcquired) + { + // UNLOCK: release GPUs lock + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + if (bIsPmaAlloc && NV_OK != rmStatus) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + if (pAllocRequest->pPmaAllocInfo[gpumgrGetSubDeviceInstanceFromGpu(pGpu)]) + vidmemPmaFree(pGpu, vidmemGetHeap(pGpu, hClient, NV_FALSE), + pAllocRequest->pPmaAllocInfo[gpumgrGetSubDeviceInstanceFromGpu(pGpu)], 0); + SLI_LOOP_END; + } + + return rmStatus; +} + +void +vidmemDestruct_IMPL +( + VideoMemory *pVideoMemory +) +{ + Memory *pMemory = staticCast(pVideoMemory, Memory); + OBJGPU *pGpu = pMemory->pGpu; + MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc; + + // Free any association of the memory with existing third-party p2p object + CliUnregisterMemoryFromThirdPartyP2P(pMemory); + + memDestructCommon(pMemory); + + // free the video memory based on how it was alloced ... a non-zero + // heapOwner indicates it was heapAlloc-ed. + if (!memdescGetCustomHeap(pMemDesc)) + { + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvHandle hClient = RES_GET_CLIENT_HANDLE(pVideoMemory); + NvHandle hParent = RES_GET_PARENT_HANDLE(pVideoMemory); + NvU32 heapOwner = pMemory->HeapOwner; + NV_STATUS status; + + NV_PRINTF(LEVEL_INFO, "Function: FREE\n"); + NV_PRINTF(LEVEL_INFO, " Owner: 0x%x\n", heapOwner); + NV_PRINTF(LEVEL_INFO, " hMemory: 0x%x\n", RES_GET_HANDLE(pVideoMemory)); + + // + // memHandle (and the block's size/type) is returned, but not + // needed ... the caller already has the correct handle to pass + // to memDestructCommon + // + if (gpumgrGetBcEnabledStatus(pGpu) && + (memdescGetAddressSpace(memdescGetMemDescFromGpu(pMemDesc, pGpu)) == ADDR_FBMEM)) + { + MEMORY_DESCRIPTOR *pNextMemDesc = NULL, *pSubdevMemDesc = NULL; + pSubdevMemDesc = pMemDesc->_pNext; + + NV_ASSERT(pMemDesc->_subDeviceAllocCount > 1); + NV_ASSERT(!IS_MIG_IN_USE(pGpu)); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY); + if (pSubdevMemDesc == NULL) + { + NV_ASSERT(0); + SLI_LOOP_BREAK; + } + // Unlink per-gpu memdesc for SLI client allocations before freeing heap + pNextMemDesc = pSubdevMemDesc->_pNext; + + status = memmgrFree(pGpu, + pMemoryManager, + pSubdevMemDesc->pHeap, + hClient, + hParent, // device or subdevice + 0, + heapOwner, + pSubdevMemDesc); + NV_ASSERT(status == NV_OK); + + pSubdevMemDesc = pNextMemDesc; + SLI_LOOP_END; + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); + } + else + { + Heap *pHeap = pMemDesc->pHeap; + + NV_ASSERT(pMemDesc->_subDeviceAllocCount == 1); + status = memmgrFree(pGpu, + pMemoryManager, + pHeap, + hClient, + hParent, // device or subdevice + 0, + heapOwner, + pMemDesc); + NV_ASSERT(status == NV_OK); + + } + } +} + +NV_STATUS +vidmemAllocResources +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo, + Heap *pHeap +) +{ + NV_STATUS status = NV_OK; + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + NvU64 requestedSize = pVidHeapAlloc->size; + HWRESOURCE_INFO *pHwResource = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvBool bAllocedMemDesc = NV_FALSE; + NvBool bAllocedMemory = NV_FALSE; + NvBool bNoncontigAllowed = NV_FALSE; + NvBool bNoncontigAllocation = NV_FALSE; + NvHandle hVASpace = pVidHeapAlloc->hVASpace; + NvBool bIsPmaOwned = NV_FALSE; + NvU32 subdeviceInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + NvBool bContig = NV_TRUE; + + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pGpu, pKernelMemorySystem); + + // Most vidmem allocations external to RM get routed to PMA. + bIsPmaOwned = (pAllocRequest->pPmaAllocInfo[subdeviceInst] != NULL); + + // check if pMemorySystemConfig is not yet initialized on Offload client. + if (pMemorySystemConfig == NULL) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT(0); + goto failed; + } + + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_VIRTUAL_ONLY) + { + NV_PRINTF(LEVEL_WARNING, + "Virtual-only flag used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + if (FLD_TEST_DRF(OS32, _ATTR2, _32BIT_POINTER, _ENABLE, pVidHeapAlloc->attr2)) + { + NV_PRINTF(LEVEL_WARNING, + "Virtual-only 32-bit pointer attr used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + if (hVASpace != 0) + { + NV_PRINTF(LEVEL_WARNING, + "VA space handle used with physical allocation\n"); + status = NV_ERR_INVALID_ARGUMENT; + goto failed; + } + + // Prior to this change, heap was silently ignoring non-contig Vidmem allocation requests. + // With this change to allow non-contig vidmem allocation, I was getting a DVS Extended Sanity failures & regression on Windows. + // It seems Windows is making some allocations with non-contig flag, but was expecting contig allocation. + // So enable the non-contig path only for verif platforms. + // + bContig = FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _DEFAULT, pVidHeapAlloc->attr) ? + !pHeap->getProperty(pHeap, PDB_PROP_HEAP_NONCONTIG_ALLOC_BY_DEFAULT) : + !FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _NONCONTIGUOUS, pVidHeapAlloc->attr); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_WARNING, memUtilsAllocMemDesc(pGpu, pAllocRequest, pFbAllocInfo, &pMemDesc, pHeap, + ADDR_FBMEM, bContig, &bAllocedMemDesc), failed); + +#ifndef NV_DISABLE_NONCONTIG_ALLOC + bNoncontigAllowed = + (!bContig || FLD_TEST_DRF(OS32, _ATTR, _PHYSICALITY, _ALLOW_NONCONTIGUOUS, pVidHeapAlloc->attr)) && + pMemoryManager->bAllowNoncontiguousAllocation && + !FLD_TEST_DRF(OS32, _ATTR, _FORMAT, _SWIZZLED, pVidHeapAlloc->attr); +#endif + + if (bIsPmaOwned) + { + pFbAllocInfo->offset = pMemDesc->_pteArray[0]; + + if (bContig) + { + NV_PRINTF(LEVEL_INFO, "---> PMA Path taken contiguous\n"); + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, + pVidHeapAlloc->attr); + + memdescDescribe(pAllocRequest->pMemDesc, ADDR_FBMEM, + pAllocRequest->pPmaAllocInfo[subdeviceInst]->pageArray[0], + pFbAllocInfo->adjustedSize); + } + else + { + NV_PRINTF(LEVEL_INFO, "---> PMA Path taken discontiguous\n"); + NV_ASSERT(!bContig && bNoncontigAllowed); + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _NONCONTIGUOUS, + pVidHeapAlloc->attr); + memdescFillPages(pAllocRequest->pMemDesc, 0, + pAllocRequest->pPmaAllocInfo[subdeviceInst]->pageArray, + pAllocRequest->pPmaAllocInfo[subdeviceInst]->pageCount, + pAllocRequest->pPmaAllocInfo[subdeviceInst]->pageSize); + } + } + else + { + OBJHEAP_ALLOC_DATA allocData = { 0 }; + + bNoncontigAllocation = !bContig; + + allocData.alignment = pVidHeapAlloc->alignment; + allocData.allocSize = pFbAllocInfo->size + pFbAllocInfo->pad; + + status = heapAlloc(pGpu, + pFbAllocInfo->hClient, + pHeap, + pAllocRequest, + pAllocRequest->hMemory, + &allocData, + pFbAllocInfo, + &pHwResource, + &bNoncontigAllocation, + bNoncontigAllowed, + bAllocedMemDesc); + + // heapAlloc might create a new memdesc for compbit/discontig case + pMemDesc = pAllocRequest->pMemDesc; + + if (status != NV_OK) + { + goto failed; + } + + bAllocedMemory = NV_TRUE; + } + + if (!bIsPmaOwned && (pVidHeapAlloc->type != NVOS32_TYPE_PMA)) + { + NvU64 size, numBlocks; + NvU32 i; + + // + // Pre-fill cache to prevent FB read accesses if in cache only mode and not doing one time pre-fill + // Also, only need to fill the *requested* size and not the actual allocation size + // These might not work with noncontig allocation since they assume + // physically contig memory + // + if (!bNoncontigAllocation && + gpuIsCacheOnlyModeEnabled(pGpu) && + !pMemorySystemConfig->bL2PreFill) + { + NV_STATUS preFillStatus; + + requestedSize = (requestedSize == 0) ? pVidHeapAlloc->size : requestedSize; + preFillStatus = kmemsysPreFillCacheOnlyMemory_HAL(pGpu, pKernelMemorySystem, + pFbAllocInfo->offset, requestedSize); + NV_ASSERT(preFillStatus == NV_OK); + } + + if (memdescGetContiguity(pMemDesc, AT_GPU)) + { + size = pFbAllocInfo->adjustedSize; + numBlocks = 1; // One contiguous page + } + else + { + // Only 4k-sized noncontig pages supported currently + size = RM_PAGE_SIZE; + numBlocks = pMemDesc->PageCount; + } + + for (i = 0; i < numBlocks; i++) + { + // Ensures memory is fully initialized + memmgrScrubMemory_HAL(pGpu, pMemoryManager, memdescGetPte(pMemDesc, AT_GPU, i), size); + } + } + + // get possibly updated surface attributes + pVidHeapAlloc->attr = pFbAllocInfo->retAttr; + pVidHeapAlloc->attr2 = pFbAllocInfo->retAttr2; + + // update contiguity attribute to reflect memdesc + if (memdescGetContiguity(pAllocRequest->pMemDesc, AT_GPU)) + { + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, + pVidHeapAlloc->attr); + } + else + { + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _NONCONTIGUOUS, + pVidHeapAlloc->attr); + } + + // TODO remove once we don't have hwResource in MEM_BLOCK + if (pHwResource != NULL) + { + pHwResource->attr = pFbAllocInfo->retAttr; + pHwResource->attr2 = pFbAllocInfo->retAttr2; + pHwResource->hwResId = pFbAllocInfo->hwResId; + pHwResource->comprCovg = pFbAllocInfo->comprCovg; + pHwResource->ctagOffset = pFbAllocInfo->ctagOffset; + } + + pVidHeapAlloc->offset = pFbAllocInfo->offset; + + if (pAllocRequest->pHwResource != NULL) + { + pAllocRequest->pHwResource->attr = pFbAllocInfo->retAttr; + pAllocRequest->pHwResource->attr2 = pFbAllocInfo->retAttr2; + pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId; + pAllocRequest->pHwResource->comprCovg = pFbAllocInfo->comprCovg; + pAllocRequest->pHwResource->ctagOffset = pFbAllocInfo->ctagOffset; + pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId; + } + + return NV_OK; + +failed: + if (bAllocedMemory) + { + memmgrFree(pGpu, pMemoryManager, pHeap, + pFbAllocInfo->hClient, pFbAllocInfo->hDevice, 0, + pVidHeapAlloc->owner, + pMemDesc); + } + else + { + memmgrFreeHwResources(pGpu, pMemoryManager, pFbAllocInfo); + } + + if (bAllocedMemDesc) + { + memdescDestroy(pAllocRequest->pMemDesc); + pAllocRequest->pMemDesc = NULL; + } + + return status; +} + +NV_STATUS +vidmemCheckCopyPermissions_IMPL +( + VideoMemory *pVideoMemory, + OBJGPU *pDstGpu, + NvHandle hDstClient +) +{ + Memory *pMemory = staticCast(pVideoMemory, Memory); + OBJGPU *pSrcGpu = pMemory->pGpu; + NvHandle hSrcClient = RES_GET_CLIENT_HANDLE(pVideoMemory); + KernelMIGManager *pSrcKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pSrcGpu); + KernelMIGManager *pDstKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pDstGpu); + NvBool bSrcClientKernel = (rmclientGetCachedPrivilegeByHandle(hSrcClient) >= RS_PRIV_LEVEL_KERNEL); + NvBool bDstClientKernel = (rmclientGetCachedPrivilegeByHandle(hDstClient) >= RS_PRIV_LEVEL_KERNEL); + + // + // XXX: In case of MIG memory, duping across GPU instances is not allowed + // Bug 2815350 - Due to this bug, allow kernel clients to bypass this check + // + if (!bDstClientKernel && (IS_MIG_IN_USE(pSrcGpu) || IS_MIG_IN_USE(pDstGpu))) + { + // + // Due to Bug 2815350 we have to take an exception for kernel clients, + // hence we can't use a direct instanceRef check. + // Rather than defaulting to heap based checks, keeping the + // instanceRef check in else as that's what we should only have + // when Bug 2815350 is fixed. + // Both clients are kernel - Force subscription check. No exception needed. + // Only SrcClientKernel - Enforce subscription check in dstClient + // DstClientKernel - Ignore any enforcement as kernel clients are + // allowed to dup without any enforcement + // + + if (bSrcClientKernel) + { + // Get memory partition heap from both clients and compare + Heap *pDstClientHeap = NULL; + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + kmigmgrGetMemoryPartitionHeapFromClient(pDstGpu, pDstKernelMIGManager, hDstClient, + &pDstClientHeap)); + + // Make sure memory is coming from same heaps + if (pDstClientHeap != pMemory->pHeap) + { + NV_PRINTF(LEVEL_WARNING, + "Duping outside of GPU instance not allowed with MIG\n"); + return NV_ERR_NOT_SUPPORTED; + } + } + else + { + // + // Due to Bug 2815350 we have to take an exception for kernel clients, + // hence we can't use a direct instanceRef check + // + MIG_INSTANCE_REF srcInstRef; + MIG_INSTANCE_REF dstInstRef; + + // Check instance subscription of source and destination clients + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + kmigmgrGetInstanceRefFromClient(pSrcGpu, pSrcKernelMIGManager, hSrcClient, + &srcInstRef)); + NV_CHECK_OK_OR_RETURN(LEVEL_WARNING, + kmigmgrGetInstanceRefFromClient(pDstGpu, pDstKernelMIGManager, hDstClient, + &dstInstRef)); + + // + // Memory duping is allowed accross compute instances. so ignore + // compute instance differences + // + srcInstRef = kmigmgrMakeGIReference(srcInstRef.pKernelMIGGpuInstance); + dstInstRef = kmigmgrMakeGIReference(dstInstRef.pKernelMIGGpuInstance); + if (!kmigmgrAreMIGReferencesSame(&srcInstRef, &dstInstRef)) + { + NV_PRINTF(LEVEL_WARNING, + "GPU instance subscription differ between Source and Destination clients\n"); + return NV_ERR_NOT_SUPPORTED; + } + } + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c b/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c new file mode 100644 index 000000000..7e4e172b8 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/virt_mem_mgr.c @@ -0,0 +1,215 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* Virtual Memory Manager Object Function Definitions. * +* * +\***************************************************************************/ + +#include "mem_mgr/virt_mem_mgr.h" +#include "mem_mgr/vaspace.h" +#include "mem_mgr/io_vaspace.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "class/cl00f2.h" // IO_VASPACE_A +#include "class/cl00fc.h" // FABRIC_VASPACE_A +#include "mem_mgr/gpu_vaspace.h" +#include "class/cl90f1.h" // FERMI_VASPACE_A +#include "mem_mgr/fabric_vaspace.h" + +NV_STATUS +vmmCreateVaspace_IMPL +( + OBJVMM *pVmm, + NvU32 classId, + NvU32 vaspaceId, + NvU32 gpuMask, + NvU64 vaStart, + NvU64 vaLimit, + NvU64 vaStartInternal, + NvU64 vaLimitInternal, + OBJVASPACE *pPteSpaceMap, + NvU32 flags, + OBJVASPACE **ppVAS +) +{ + NV_STATUS status = NV_OK; + const NVOC_CLASS_INFO *pClassInfo; + Dynamic *pNewObj; + ADDRESS_TRANSLATION addressTranslation; + + NV_ASSERT_OR_RETURN(ppVAS != NULL, NV_ERR_INVALID_ARGUMENT); + + // + // IOMMU vaspaces may be created for a device before the device itself + // has been created, so there isn't an OBJGPU to get here yet. In these + // cases, the vaspaceId is used to correlate the vaspace with the GPU (it + // is the GPU ID). + // + if (gpuMask == 0) + { + NV_ASSERT_OR_RETURN(IO_VASPACE_A == classId, NV_ERR_INVALID_ARGUMENT); + } + + switch (classId) + { + case FERMI_VASPACE_A: + addressTranslation = AT_GPU; + pClassInfo = RMCFG_MODULE_GVASPACE ? classInfo(OBJGVASPACE) : NULL; + break; + case IO_VASPACE_A: + addressTranslation = AT_PA; + pClassInfo = RMCFG_MODULE_IOVASPACE ? classInfo(OBJIOVASPACE) : NULL; + // + // For IOMMU vaspace, there is only one per vaspaceID. See if + // vaspace for this vaspaceID already exists, if it does, just increment + // the refcount. + // + if (vmmGetVaspaceFromId(pVmm, vaspaceId, classId, ppVAS) == NV_OK) + { + vaspaceIncRefCnt(*ppVAS); + return NV_OK; + } + break; + case FABRIC_VASPACE_A: + addressTranslation = AT_GPU; + pClassInfo = classInfo(FABRIC_VASPACE); + // + // For Fabric vaspace, there is only one per vaspaceID. See if + // vaspace for this vaspaceID already exists, if it does, just increment + // the refcount. + // + if (vmmGetVaspaceFromId(pVmm, vaspaceId, classId, ppVAS) == NV_OK) + { + vaspaceIncRefCnt(*ppVAS); + return NV_OK; + } + break; + default: // Unsupported class + addressTranslation = AT_GPU; + pClassInfo = NULL; + break; + } + + if (pClassInfo == NULL) + { + *ppVAS = NULL; + return NV_ERR_INVALID_CLASS; + } + + status = objCreateDynamic(&pNewObj, pVmm, pClassInfo); + if (NV_OK != status) + return status; + + *ppVAS = dynamicCast(pNewObj, OBJVASPACE); + + (*ppVAS)->addressTranslation = addressTranslation; + (*ppVAS)->vaspaceId = vaspaceId; + (*ppVAS)->gpuMask = gpuMask; + + vaspaceIncRefCnt(*ppVAS); + + status = vaspaceConstruct_(*ppVAS, classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags); + if (status != NV_OK) + { + vmmDestroyVaspace(pVmm, *ppVAS); + *ppVAS = NULL; + return status; + } + + return status; +} + +void +vmmDestroyVaspace_IMPL +( + OBJVMM *pVmm, + OBJVASPACE *pVAS +) +{ + OBJVASPACE *pTargetVAS = pVAS; + + vaspaceDecRefCnt(pTargetVAS); + + // + // Call the utility routine that does the object deletion when the last + // reference has been destroyed. + // + if (0 == pTargetVAS->refCnt) + { + objDelete(pTargetVAS); + pTargetVAS = NULL; + } +} + +NV_STATUS +vmmGetVaspaceFromId_IMPL +( + OBJVMM *pVmm, + NvU32 vaspaceId, + NvU32 classId, + OBJVASPACE **ppVAS +) +{ + Object *pIter = NULL; + OBJVASPACE *pVAS = NULL; + OBJIOVASPACE *pIOVAS = NULL; + FABRIC_VASPACE *pFabricVAS = NULL; + + pIter = objGetChild(staticCast(pVmm, Object)); + while (pIter != NULL) + { + switch (classId) + { + case IO_VASPACE_A: + pIOVAS = dynamicCast(pIter, OBJIOVASPACE); + if (pIOVAS != NULL) + { + pVAS = staticCast(pIOVAS, OBJVASPACE); + } + break; + case FABRIC_VASPACE_A: + pFabricVAS = dynamicCast(pIter, FABRIC_VASPACE); + if (pFabricVAS != NULL) + { + pVAS = staticCast(pFabricVAS, OBJVASPACE); + } + break; + default: + NV_ASSERT(0); + break; + } + + if ((pVAS != NULL) && (pVAS->vaspaceId == vaspaceId)) + { + *ppVAS = pVAS; + return NV_OK; + } + + pIter = objGetSibling(pIter); + } + + *ppVAS = NULL; + return NV_ERR_OBJECT_NOT_FOUND; +} + diff --git a/src/nvidia/src/kernel/mem_mgr/virt_mem_range.c b/src/nvidia/src/kernel/mem_mgr/virt_mem_range.c new file mode 100644 index 000000000..faa5ff677 --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/virt_mem_range.c @@ -0,0 +1,144 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr_internal.h" +#include "mem_mgr/virt_mem_range.h" +#include "os/os.h" +#include "vgpu/rpc.h" +#include "gpu/device/device.h" +#include "gpu/mem_mgr/vaspace_api.h" +#include "gpu/mmu/kern_gmmu.h" +#include "class/cl0070.h" + +NV_STATUS +vmrangeConstruct_IMPL +( + VirtualMemoryRange *pVmRange, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS *pAllocData; + NV_STATUS status = NV_OK; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pVASpaceRef = NULL; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvHandle hMemory = pCallContext->pResourceRef->hResource; + NvU32 class = pResourceRef->externalClassId; + VirtualMemory *pVirtualMemory = staticCast(pVmRange, VirtualMemory); + Memory *pMemory = staticCast(pVmRange, Memory); + OBJGPU *pGpu = pMemory->pGpu; + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pGpu); + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NvU64 maxVA; + + // Copy checking is handled by VirtualMemory class + if (RS_IS_COPY_CTOR(pParams)) + return NV_OK; + + pAllocData = pParams->pAllocParams; + + // + // Handling of hVASpace + // 0 to find the default hVASpace in this context. This is compatible with + // the old virtual ContextDma class. + // NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE (0xffffffff) + // An invalid handle to disallow mappings with this DynamicObject. + // This is used to emulate the original behavior for NV01_MEMORY_SYSTEM_DYNAMIC + // class. + // or an actual hVASpace handle + // + if (pAllocData->hVASpace == 0) + { + NV_ASSERT_OR_RETURN(pKernelGmmu->maxVASize, NV_ERR_INVALID_STATE); + maxVA = pKernelGmmu->maxVASize; + } + else if (pAllocData->hVASpace == NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE) + { + NV_ASSERT_OR_RETURN(pKernelGmmu->maxVASize, NV_ERR_INVALID_STATE); + maxVA = 1ULL << 40; + } + else + { + VaSpaceApi *pVAS; + + status = clientGetResourceRef(pCallContext->pClient, pAllocData->hVASpace, &pVASpaceRef); + NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, status); + pVAS = dynamicCast(pVASpaceRef->pResource, VaSpaceApi); + NV_CHECK_OR_RETURN(LEVEL_INFO, pVAS, NV_ERR_INVALID_OBJECT_HANDLE); + NV_ASSERT_OR_RETURN(pVAS->pVASpace, NV_ERR_INVALID_OBJECT_HANDLE); + + maxVA = pVAS->pVASpace->vasLimit + 1; + } + if (pAllocData->limit != 0) + { + NV_CHECK_OR_RETURN(LEVEL_INFO, pAllocData->limit < maxVA, NV_ERR_INVALID_LIMIT); + maxVA = pAllocData->limit + 1; + } + + NV_CHECK_OR_RETURN(LEVEL_INFO, pAllocData->offset < maxVA, NV_ERR_INVALID_OFFSET); + + status = memCreateMemDesc(pGpu, &pMemDesc, ADDR_VIRTUAL, + pAllocData->offset, + maxVA - pAllocData->offset, + 0, 0); + NV_CHECK_OR_RETURN(LEVEL_INFO, status == NV_OK, status); + + status = memConstructCommon(pMemory, class, 0, pMemDesc, 0, NULL, + 0, 0, 0, 0, NVOS32_MEM_TAG_NONE, (HWRESOURCE_INFO *)NULL); + if (status != NV_OK) + { + memdescDestroy(pMemDesc); + return status; + } + + // Allow unicast mapping on NV01_MEMORY_VIRTUAL object. + pVirtualMemory->bAllowUnicastMapping = NV_TRUE; + + // Construct sets hVASpace to invalid, update with our handle + pVirtualMemory->hVASpace = pAllocData->hVASpace; + + // + // VGPU: With GUEST_MANAGED_VA we should not have to forward this object, + // but virtual ContextDma and the memory destructor depend on it. + // + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NV_RM_RPC_ALLOC_OBJECT(pGpu, hClient, hParent, hMemory, class, + pAllocData, status); + if (status != NV_OK) + { + memdescDestroy(pMemDesc); + return status; + } + pMemory->bRpcAlloc = NV_TRUE; + } + + pAllocData->limit = maxVA - 1; // Return limit to client + + if (pVASpaceRef) + refAddDependant(pVASpaceRef, pResourceRef); + + return status; +} diff --git a/src/nvidia/src/kernel/mem_mgr/virtual_mem.c b/src/nvidia/src/kernel/mem_mgr/virtual_mem.c new file mode 100644 index 000000000..e13bc6e4e --- /dev/null +++ b/src/nvidia/src/kernel/mem_mgr/virtual_mem.c @@ -0,0 +1,1831 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mem_mgr/virtual_mem.h" +#include "mem_mgr/vaspace.h" +#include "gpu/mem_mgr/virt_mem_allocator.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "vgpu/rpc.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "mem_mgr/mem.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "core/locks.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "Nvcm.h" +#include "gpu/mem_mgr/vaspace_api.h" +#include "gpu/mem_mgr/mem_utils.h" +#include "gpu/bus/kern_bus.h" +#include "gpu/bus/p2p_api.h" + +#include "class/cl0070.h" // NV01_MEMORY_VIRTUAL +#include "class/cl50a0.h" // NV50_MEMORY_VIRTUAL + +static void _virtmemFreeKernelMapping(OBJGPU *, CLI_DMA_MAPPING_INFO *); + +/*! + * _virtmemQueryVirtAllocParams + * + * @brief + * Queries for the actual size of VA allocation, alignment + * and mask of page sizes (needed for page table allocation) + * + * @param[in] pGpu OBJGPU pointer + * @param[in] hClient Client handle + * @param[in] hDevice Device handle + * @param[in] pAllocData Pointer to VIDHEAP_ALLOC_DATA + * @param[out] pAlign Alignment + * @param[out] pSize Size of allocation + * @param[out] ppVAS Virtual address space for request + * @param[out] pPageSizeLockMask Mask of page sizes locked during VA reservation + * + * @returns + * NV_OK + */ +static NV_STATUS +_virtmemQueryVirtAllocParams +( + OBJGPU *pGpu, + NvHandle hClient, + NvHandle hDevice, + NV_MEMORY_ALLOCATION_PARAMS *pAllocData, + NvU64 *pAlign, + NvU64 *pSize, + OBJVASPACE **ppVAS, + NvU64 *pPageSizeLockMask +) +{ + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + RsClient *pClient; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + NV_STATUS status = NV_OK; + NvBool bReleaseGpuLock = NV_FALSE; + + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + if (pFbAllocInfo == NULL) + { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto done; + } + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + if (pFbAllocPageFormat == NULL) { + NV_ASSERT(0); + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + pFbAllocInfo->pageFormat->attr = pAllocData->attr; + pFbAllocInfo->pageFormat->attr2 = pAllocData->attr2; + pFbAllocInfo->pageFormat->flags = pAllocData->flags; + *pSize = pAllocData->size; + *pAlign = pAllocData->alignment; + + // LOCK: acquire device lock + if (!rmDeviceGpuLockIsOwner(gpuGetInstance(pGpu))) + { + NV_ASSERT_OK_OR_GOTO(status, rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_MEM_PMA), done); + bReleaseGpuLock = NV_TRUE; + } + + *pPageSizeLockMask = stdmemQueryPageSize(pMemoryManager, hClient, + pAllocData); + if (*pPageSizeLockMask == 0) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + NV_ASSERT_OK_OR_GOTO(status, + serverGetClientUnderLock(&g_resServ, hClient, &pClient), + done); + + NV_ASSERT_OK_OR_GOTO(status, + vaspaceGetByHandleOrDeviceDefault(pClient, hDevice, pAllocData->hVASpace, ppVAS), + done); + + NV_ASSERT_OK_OR_GOTO(status, + vaspaceApplyDefaultAlignment(*ppVAS, pFbAllocInfo, pAlign, pSize, pPageSizeLockMask), + done); + +done: + if (bReleaseGpuLock) + { + // UNLOCK: release device lock + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + return status; +} + +/*! + * @brief Handle copy construction for VirtualMemory object + */ +static NV_STATUS +_virtmemCopyConstruct +( + VirtualMemory *pDstVirtualMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsClient *pDstClient = pCallContext->pClient; + RsClient *pSrcClient = pParams->pSrcClient; + RsResourceRef *pSrcRef = pParams->pSrcRef; + VirtualMemory *pSrcVirtualMemory = dynamicCast(pSrcRef->pResource, VirtualMemory); + Memory *pDstMemory = staticCast(pDstVirtualMemory, Memory); + Memory *pSrcMemory = staticCast(pSrcVirtualMemory, Memory); + OBJGPU *pSrcGpu = pSrcMemory->pGpu; + OBJVASPACE *pVASSrc = NULL; + NvBool bIncAllocRefCnt = NV_FALSE; + + // Special handling for Dup of the FLA VASpace + if (pSrcVirtualMemory->bFlaVAS) + { + Device *pDstDevice; + RsClient *pFlaClient; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hImportedVASpace = NV01_NULL_OBJECT; + RsResourceRef *pDupedVasRef; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + serverGetClientUnderLock(&g_resServ, GPU_GET_KERNEL_BUS(pSrcGpu)->flaInfo.hClient, &pFlaClient)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + vaspaceGetByHandleOrDeviceDefault(pFlaClient, + RES_GET_HANDLE(pSrcMemory->pDevice), + GPU_GET_KERNEL_BUS(pSrcGpu)->flaInfo.hFlaVASpace, + &pVASSrc)); + + // + // FLA Memory can be duped during import stage and the importing client which might not be the + // same as exporting client. Also the importing client might not also bind to the the exporting FLA + // VASpace on the exporting device. In that case, we might see leaks in the exporting FLA VASpace. + // To avoid those scenarios, we are duping the FLA VAS to the importing client under the exporting device. + // RS-TODO: Bug 3059751 to track the duped VAS as dependant in ResServer + // + NV_ASSERT_OK_OR_RETURN(deviceGetByGpu(pDstClient, pSrcGpu, NV_TRUE, &pDstDevice)); + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + pRmApi->DupObject(pRmApi, + pDstClient->hClient, + RES_GET_HANDLE(pDstDevice), + &hImportedVASpace, + GPU_GET_KERNEL_BUS(pSrcGpu)->flaInfo.hClient, + GPU_GET_KERNEL_BUS(pSrcGpu)->flaInfo.hFlaVASpace, + 0)); + + if (clientGetResourceRef(pDstClient, hImportedVASpace, &pDupedVasRef) == NV_OK) + refAddDependant(pDupedVasRef, RES_GET_REF(pDstVirtualMemory)); + + pDstVirtualMemory->hVASpace = hImportedVASpace; + + // Increase refcount if locally managed + bIncAllocRefCnt = !pSrcMemory->bRpcAlloc; + } + else if (pSrcVirtualMemory->hVASpace == NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE) + { + // A legacy sysmem dynamic object does not have valid hVASpace + pDstVirtualMemory->hVASpace = NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE; + + // No VASPACE is update + bIncAllocRefCnt = NV_FALSE; + } + else + { + OBJVASPACE *pVASDst = NULL; + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + vaspaceGetByHandleOrDeviceDefault(pSrcClient, + RES_GET_HANDLE(pSrcMemory->pDevice), + pSrcVirtualMemory->hVASpace, &pVASSrc)); + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + vaspaceGetByHandleOrDeviceDefault(pDstClient, + RES_GET_HANDLE(pDstMemory->pDevice), + NV01_NULL_OBJECT, &pVASDst)); + if (pVASSrc != pVASDst) + { + return NV_ERR_INVALID_DEVICE; + } + + pDstVirtualMemory->hVASpace = NV01_NULL_OBJECT; + + // Increase refcount for locally managed NV50_MEMORY_VIRTUAL + bIncAllocRefCnt = pSrcVirtualMemory->bReserveVaOnAlloc && !pSrcMemory->bRpcAlloc; + } + + pDstVirtualMemory->bAllowUnicastMapping = pSrcVirtualMemory->bAllowUnicastMapping; + pDstVirtualMemory->bReserveVaOnAlloc = pSrcVirtualMemory->bReserveVaOnAlloc; + pDstVirtualMemory->bFlaVAS = pSrcVirtualMemory->bFlaVAS; + + // Mappings do not follow virtual memory object + pDstVirtualMemory->pDmaMappingList = NULL; + + if (bIncAllocRefCnt) + { + NvU64 vaddr; + NvU64 size; + + virtmemGetAddressAndSize(pSrcVirtualMemory, &vaddr, &size); + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + vaspaceIncAllocRefCnt(pVASSrc, vaddr)); + } + + return NV_OK; +} + +/*! + * virtmemConstruct + * + * @brief + * This routine provides common allocation services used by the + * following heap allocation functions: + * NVOS32_FUNCTION_ALLOC_DEPTH_WIDTH_HEIGHT + * NVOS32_FUNCTION_ALLOC_SIZE + * NVOS32_FUNCTION_ALLOC_SIZE_RANGE + * NVOS32_FUNCTION_ALLOC_TILED_PITCH_HEIGHT + * + * @param[in] pVirtualMemory Pointer to VirtualMemory object + * @param[in] pCallContext Pointer to the current CALL_CONTEXT. + * @param[in] pParams Pointer to the alloc params + * + * @return 'NV_OK' + * Operation completed successfully. + * @return 'NV_ERR_NO_MEMORY' + * There is not enough available memory to satisfy allocation request. + * @return 'NV_ERR_INSUFFICIENT_RESOURCES' + * Not enough available resources to satisfy allocation request. + */ +NV_STATUS +virtmemConstruct_IMPL +( + VirtualMemory *pVirtualMemory, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + Memory *pMemory = staticCast(pVirtualMemory, Memory); + NV_MEMORY_ALLOCATION_PARAMS *pAllocData = pParams->pAllocParams; + MEMORY_ALLOCATION_REQUEST allocRequest = {0}; + MEMORY_ALLOCATION_REQUEST *pAllocRequest = &allocRequest; + OBJGPU *pGpu = pMemory->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + OBJVASPACE *pVAS = NULL; + HWRESOURCE_INFO hwResource; + RsClient *pRsClient = pCallContext->pClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsResourceRef *pVASpaceRef = NULL; + NvU32 gpuCacheAttrib; + NV_STATUS status = NV_OK; + NvHandle hClient = pCallContext->pClient->hClient; + NvHandle hParent = pCallContext->pResourceRef->pParentRef->hResource; + NvU64 sizeOut; + NvU64 offsetOut; + NvBool bLockAcquired = NV_FALSE; + NvU32 attr = 0; + NvU32 attr2 = 0; + NvBool bRpcAlloc = NV_FALSE; + NvBool bResAllocated = NV_FALSE; + NvU32 gpuMask = 0; + NvU32 gpuMaskInitial = 0; + FB_ALLOC_INFO *pFbAllocInfo = NULL; + FB_ALLOC_PAGE_FORMAT *pFbAllocPageFormat = NULL; + + // Bulk of copy-construction is done by Memory class. Handle our members. + if (RS_IS_COPY_CTOR(pParams)) + { + if (!rmGpuGroupLockIsOwner(pGpu->gpuInstance, GPU_LOCK_GRP_ALL, &gpuMask)) + { + // + // If we hold some GPU locks already then acquiring more GPU locks + // may violate lock ordering and cause dead-lock. To avoid dead-lock in this case, + // attempt to take the locks with a conditional acquire. + // + gpuMaskInitial = rmGpuLocksGetOwnedMask(); + NvU32 lockFlag = (gpuMaskInitial == 0) + ? GPUS_LOCK_FLAGS_NONE + : GPUS_LOCK_FLAGS_COND_ACQUIRE; + + NV_ASSERT_OK_OR_RETURN(rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_ALL, + lockFlag, + RM_LOCK_MODULES_MEM, + &gpuMask)); + + bLockAcquired = NV_TRUE; + } + + status = _virtmemCopyConstruct(pVirtualMemory, pCallContext, pParams); + + if (bLockAcquired) + { + bLockAcquired = NV_FALSE; + rmGpuGroupLockRelease(gpuMask & (~gpuMaskInitial), GPUS_LOCK_FLAGS_NONE); + } + + goto done; + } + + pVirtualMemory->hVASpace = RM_INVALID_VASPACE_HANDLE; + pVirtualMemory->bAllowUnicastMapping = NV_FALSE; + pVirtualMemory->bReserveVaOnAlloc = NV_FALSE; + pVirtualMemory->bFlaVAS = NV_FALSE; + pVirtualMemory->pDmaMappingList = NULL; + + // NV01_MEMORY_VIRTUAL does not allocate typed memory from the heap + if (pParams->externalClassId == NV01_MEMORY_VIRTUAL) + return NV_OK; + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, stdmemValidateParams(pGpu, hClient, pAllocData)); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pAllocData->flags & NVOS32_ALLOC_FLAGS_VIRTUAL, NV_ERR_INVALID_ARGUMENT); + + stdmemDumpInputAllocParams(pAllocData, pCallContext); + + pAllocRequest->classNum = NV50_MEMORY_VIRTUAL; + pAllocRequest->pUserParams = pAllocData; + pAllocRequest->hMemory = pResourceRef->hResource; + pAllocRequest->hClient = hClient; + pAllocRequest->hParent = hParent; + pAllocRequest->pGpu = pGpu; + pAllocRequest->internalflags = NVOS32_ALLOC_INTERNAL_FLAGS_CLIENTALLOC; + pAllocRequest->pHwResource = &hwResource; + + // Unsure if we need to keep separate copies, but keeping old behavior for now. + sizeOut = pAllocData->size; + offsetOut = pAllocData->offset; + + // + // Reserve memory for page tables in case of non lazy page table + // allocations. + // + // PageLevelMemReserve will reserve only if the PDB property for + // client managed page tables is set. + // + if (memmgrIsPmaInitialized(pMemoryManager) && + !(pAllocData->flags & NVOS32_ALLOC_FLAGS_LAZY) && + !(pAllocData->flags & NVOS32_ALLOC_FLAGS_EXTERNALLY_MANAGED)) + { + NvU64 size; + NvU64 align; + NvU64 pageSizeLockMask; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + pAllocRequest->pGpu = pGpu; + size = 0; + align = 0; + pageSizeLockMask = 0; + + status = _virtmemQueryVirtAllocParams(pGpu, hClient, hParent, + pAllocData, &align, &size, + &pVAS, &pageSizeLockMask); + if (NV_OK != status) + SLI_LOOP_GOTO(done); + + status = vaspaceReserveMempool(pVAS, pGpu, hClient, + size, pageSizeLockMask, + VASPACE_RESERVE_FLAGS_NONE); + if (NV_OK != status) + SLI_LOOP_GOTO(done); + + SLI_LOOP_END; + } + + if (RMCFG_FEATURE_RM_BASIC_LOCK_MODEL) + { + // + // Can't move locking up as PMA locks need to be taken first. + // Acquire the lock *only after* PMA is done allocating. + // + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance) && !rmGpuLockIsOwner()) + { + NV_ASSERT_OK_OR_GOTO(status, + rmDeviceGpuLocksAcquire(pGpu, GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_MEM), + done); + + bLockAcquired = NV_TRUE; + } + } + + { + // + // If using thwap to generate an allocation failure here, fail the + // alloc right away. + // + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + if (pKernelRc != NULL && + !krcTestAllowAlloc(pGpu, pKernelRc, + NV_ROBUST_CHANNEL_ALLOCFAIL_HEAP)) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + } + + // Validate virtual address space + NV_CHECK_OK_OR_GOTO(status, LEVEL_SILENT, + vaspaceGetByHandleOrDeviceDefault(pRsClient, hParent, pAllocData->hVASpace, &pVAS), + done); + + pVirtualMemory->bFlaVAS = !!(vaspaceGetFlags(pVAS) & VASPACE_FLAGS_FLA); + pVirtualMemory->bOptimizePageTableMempoolUsage = + !!(vaspaceGetFlags(pVAS) & VASPACE_FLAGS_OPTIMIZE_PTETABLE_MEMPOOL_USAGE); + + if (IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) + { + NvBool bSriovFull = IS_VIRTUAL_WITH_SRIOV(pGpu) && + !gpuIsWarBug200577889SriovHeavyEnabled(pGpu); + NvBool bBar1VAS = !!(vaspaceGetFlags(pVAS) & VASPACE_FLAGS_BAR_BAR1); + + // + // Skip RPC to the Host RM when local RM is managing page tables. Special case + // for early SR-IOV that only manages BAR1 and FLA page tables in the guest. + // + bRpcAlloc = !(gpuIsSplitVasManagementServerClientRmEnabled(pGpu) || + (bSriovFull && (bBar1VAS || pVirtualMemory->bFlaVAS))); + } + + if (bRpcAlloc) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_SILENT, + memdescCreate(&pAllocRequest->pMemDesc, pGpu, + pAllocRequest->pUserParams->size, 0, NV_TRUE, + ADDR_VIRTUAL, + NV_MEMORY_UNCACHED, + MEMDESC_FLAGS_SKIP_RESOURCE_COMPUTE), + done); + } + else + { + pFbAllocInfo = portMemAllocNonPaged(sizeof(FB_ALLOC_INFO)); + NV_ASSERT_TRUE_OR_GOTO(status, pFbAllocInfo != NULL, NV_ERR_NO_MEMORY, done); + + pFbAllocPageFormat = portMemAllocNonPaged(sizeof(FB_ALLOC_PAGE_FORMAT)); + NV_ASSERT_TRUE_OR_GOTO(status, pFbAllocPageFormat != NULL, NV_ERR_NO_MEMORY, done); + + portMemSet(pFbAllocInfo, 0, sizeof(FB_ALLOC_INFO)); + portMemSet(pFbAllocPageFormat, 0, sizeof(FB_ALLOC_PAGE_FORMAT)); + pFbAllocInfo->pageFormat = pFbAllocPageFormat; + + memUtilsInitFBAllocInfo(pAllocRequest->pUserParams, pFbAllocInfo, hClient, hParent); + + // Call memmgr to get memory. + NV_CHECK_OK_OR_GOTO(status, LEVEL_SILENT, + memmgrAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo), + done); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_SILENT, + virtmemAllocResources(pGpu, pMemoryManager, pAllocRequest, pFbAllocInfo), + done); + + bResAllocated = NV_TRUE; + } + + NV_ASSERT(pAllocRequest->pMemDesc != NULL); + + // Copy final heap size/offset back to client struct + // + // What should we return ?. System or the Device physical address. + // Return the Device physical address for now. + // May change with the heap refactoring !. + // + // System and Device physical address can be got using the nv0041CtrlCmdGetSurfacePhysAttr ctrl call + offsetOut = memdescGetPhysAddr(pAllocRequest->pMemDesc, AT_GPU, 0); + sizeOut = pAllocRequest->pMemDesc->Size; + pAllocData->limit = sizeOut - 1; + + // To handle < nv50 + if (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, pAllocData->attr2) == + NVOS32_ATTR2_GPU_CACHEABLE_DEFAULT) + { + pAllocData->attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _NO, + pAllocData->attr2); + } + + if (DRF_VAL(OS32, _ATTR2, _GPU_CACHEABLE, pAllocData->attr2) == + NVOS32_ATTR2_GPU_CACHEABLE_YES) + { + gpuCacheAttrib = NV_MEMORY_CACHED; + } + else + { + gpuCacheAttrib = NV_MEMORY_UNCACHED; + } + + // + // Issue RPC if page tables are managed in the Host/GSP RM. This depends on + // the type object we have and the VGPU/GSP mode. We issue this prior to + // as memConstructCommon as RPC fills in pAllocData->offset. + // + if (bRpcAlloc) + { + NV_RM_RPC_ALLOC_VIRTMEM(pGpu, + hClient, + hParent, + pAllocData->hVASpace, + pAllocRequest->hMemory, + &pAllocData->offset, + pAllocRequest->pMemDesc->Size, + attr, + attr2, + pAllocData->type, + pAllocData->flags, + pAllocData->height, + pAllocData->width, + pAllocData->format, + pAllocData->comprCovg, + pAllocData->zcullCovg, + pAllocData->rangeLo, + pAllocData->rangeHi, + pAllocData->alignment, + status); + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, status, done); + + // Update memory descriptor with results of the RPC + memdescDescribe(pAllocRequest->pMemDesc, + memdescGetAddressSpace(pAllocRequest->pMemDesc), + pAllocData->offset, + pAllocRequest->pMemDesc->Size); + + // Assign offset back to caller + offsetOut = memdescGetPhysAddr(pAllocRequest->pMemDesc, AT_GPU, 0); + } + + // + // The idea is to allocate virtual address space and record it (lo, limit) in this mem + // object. Later call MapMemoryDma(hThisMem, hSomePhysMem) to back it. + // + NV_CHECK_OK_OR_GOTO(status, LEVEL_SILENT, + memConstructCommon(pMemory, pAllocRequest->classNum, pAllocData->flags, + pAllocRequest->pMemDesc, pAllocData->owner, NULL, pAllocData->attr, + pAllocData->attr2, 0, pAllocData->type, NVOS32_MEM_TAG_NONE, NULL), + done); + pMemory->bRpcAlloc = bRpcAlloc; + + pVirtualMemory->hVASpace = pAllocData->hVASpace; + pVirtualMemory->bReserveVaOnAlloc = NV_TRUE; + + if (pAllocData->hVASpace != NV01_NULL_OBJECT) + { + NV_CHECK_OK_OR_GOTO(status, LEVEL_SILENT, + clientGetResourceRef(pRsClient, pAllocData->hVASpace, &pVASpaceRef), + done); + if (pVASpaceRef != NULL) + refAddDependant(pVASpaceRef, pResourceRef); + } + + NV_ASSERT(pMemory->pMemDesc); + NV_ASSERT(memdescGetAddressSpace(pMemory->pMemDesc) == ADDR_VIRTUAL); + memdescSetGpuCacheAttrib(pMemory->pMemDesc, gpuCacheAttrib); + + pAllocData->size = sizeOut; + pAllocData->offset = offsetOut; + + stdmemDumpOutputAllocParams(pAllocData); + +done: + if (status != NV_OK) + { + if (pAllocRequest->pMemDesc != NULL) + { + if (pMemory->pMemDesc != NULL) + { + memDestructCommon(pMemory); + pMemory->pMemDesc = NULL; + } + + if (bResAllocated) + { + memmgrFree(pGpu, pMemoryManager, NULL, + hClient, hParent, pAllocData->hVASpace, + pAllocData->owner, + pAllocRequest->pMemDesc); + } + + if (bRpcAlloc) + { + memdescDestroy(pAllocRequest->pMemDesc); + } + } + // vaspaceReserveMempool allocations are clean up is managed independently + } + + portMemFree(pFbAllocPageFormat); + portMemFree(pFbAllocInfo); + + if (bLockAcquired) + { + // UNLOCK: release GPUs lock + rmDeviceGpuLocksRelease(pGpu, GPUS_LOCK_FLAGS_NONE, NULL); + } + + return status; +} + +/*! + * @brief Handle destruction of VirtualMemory specific fields + */ +void +virtmemDestruct_IMPL +( + VirtualMemory *pVirtualMemory +) +{ + Memory *pMemory = staticCast(pVirtualMemory, Memory); + OBJGPU *pGpu = pMemory->pGpu; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NvHandle hClient; + NvHandle hParent; + NvHandle hVASpace; + MEMORY_DESCRIPTOR *pMemDesc; + NvU32 heapOwner; + NV_STATUS status = NV_OK; + + // Save needed state from memory object before common destruction + hClient = RES_GET_CLIENT_HANDLE(pVirtualMemory); + hParent = RES_GET_PARENT_HANDLE(pVirtualMemory); + hVASpace = pVirtualMemory->hVASpace; + pMemDesc = pMemory->pMemDesc; + heapOwner = pMemory->HeapOwner; + + NV_ASSERT(pMemDesc); + + memDestructCommon(pMemory); + + // + // NV50_MEMORY_VIRTUAL may have underlying heap allocation associated with the object + // to free depending on which RM/VGPU context we are in. This is tracked at object + // creation time. + // + // If we RPCed a NV50_MEMORY_VIRTUAL or we have a NV01_MEMORY_VIRTUAL than just destroy + // the memdesc and RPC the free if required. + // + if (pMemory->bRpcAlloc || pMemory->categoryClassId == NV01_MEMORY_VIRTUAL) + { + NV_ASSERT(pMemDesc->Allocated == 0); + memdescDestroy(pMemDesc); + } + else + { + NV_ASSERT(heapOwner != 0); + + // Get the relevant information from the client memory info and free it + status = memmgrFree(pGpu, + pMemoryManager, + NULL, + hClient, + hParent, + hVASpace, + heapOwner, + pMemDesc); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "VirtualMemory memmgrFree failed, client: %x, hVASpace: %x, gpu: %x\n", + RES_GET_CLIENT_HANDLE(pVirtualMemory), + hVASpace, + pGpu->gpuInstance); + } + } +} + +NV_STATUS +virtmemAllocResources +( + OBJGPU *pGpu, + MemoryManager *pMemoryManager, + MEMORY_ALLOCATION_REQUEST *pAllocRequest, + FB_ALLOC_INFO *pFbAllocInfo +) +{ + NV_STATUS status = NV_OK; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + RsClient *pRsClient = NULL; + NV_MEMORY_ALLOCATION_PARAMS *pVidHeapAlloc = pAllocRequest->pUserParams; + NvHandle hVASpace = pVidHeapAlloc->hVASpace; + NvBool bAllocedMemDesc = NV_FALSE; + NvBool bBar1VA = NV_FALSE; + NvBool bFlaVA = NV_FALSE; + + NV_ASSERT(!(pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_WPR1) && !(pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_WPR2)); + + NV_CHECK_OK_OR_GOTO(status, LEVEL_ERROR, memUtilsAllocMemDesc(pGpu, pAllocRequest, pFbAllocInfo, &pMemDesc, NULL, + ADDR_VIRTUAL, NV_TRUE, &bAllocedMemDesc), failed); + + // Only a kernel client can request for a protected allocation + if (pFbAllocInfo->pageFormat->flags & NVOS32_ALLOC_FLAGS_ALLOCATE_KERNEL_PRIVILEGED) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RS_PRIV_LEVEL privLevel; + + // + // This fn has usescases where call context is unavailable. + // In those cases, fall back to cached privileges. + // + if (pCallContext == NULL) + { + privLevel = rmclientGetCachedPrivilegeByHandle(pFbAllocInfo->hClient); + } + else + { + privLevel = pCallContext->secInfo.privLevel; + } + + if ( + (privLevel >= RS_PRIV_LEVEL_KERNEL)) + { + pFbAllocInfo->bIsKernelAlloc = NV_TRUE; + } + else + { + NV_PRINTF(LEVEL_ERROR, "NV_ERR_INSUFFICIENT_PERMISSIONS\n"); + status = NV_ERR_INSUFFICIENT_PERMISSIONS; + goto failed; + } + } + + // Allocate a virtual surface + if (pVidHeapAlloc->flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) + pFbAllocInfo->offset = pVidHeapAlloc->offset - pFbAllocInfo->alignPad; + + // + // pFbAllocInfo->hClient=0 is sometimes passed and not always needed, + // do not immediately fail if this call, only if the client needs to be used. + // + status = serverGetClientUnderLock(&g_resServ, pFbAllocInfo->hClient, &pRsClient); + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // In case of SR-IOV, the VAS is managed by the guest. So, no need + // to communicate with the host for VA allocation. + // + if (IS_VIRTUAL_WITH_SRIOV(pGpu)) + { + OBJVASPACE *pVAS = NULL; + + // Only try this if GetClient succeeded, else pass through the status from its fail. + if (pRsClient != NULL) + status = vaspaceGetByHandleOrDeviceDefault(pRsClient, pFbAllocInfo->hDevice, hVASpace, &pVAS); + if (NV_OK != status) + goto failed; + + bBar1VA = !!(vaspaceGetFlags(pVAS) & VASPACE_FLAGS_BAR_BAR1); + bFlaVA = !!(vaspaceGetFlags(pVAS) & VASPACE_FLAGS_FLA); + } + + // For Virtual FLA allocations, we don't have to RPC + if ((!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) || + bBar1VA || bFlaVA || + gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + OBJVASPACE *pVAS = NULL; + NvU64 align = pFbAllocInfo->align + 1; + VAS_ALLOC_FLAGS flags = {0}; + NvU64 pageSizeLockMask = 0; + pFbAllocInfo->internalflags = pAllocRequest->internalflags; + + // Only try this if GetClient succeeded, else pass through the status from its fail. + if (pRsClient != NULL) + status = vaspaceGetByHandleOrDeviceDefault(pRsClient, pFbAllocInfo->hDevice, hVASpace, &pVAS); + if (NV_OK != status) + goto failed; + + status = vaspaceFillAllocParams(pVAS, pFbAllocInfo, + &pFbAllocInfo->size, &align, + &pVidHeapAlloc->rangeLo, &pVidHeapAlloc->rangeHi, + &pageSizeLockMask, &flags); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "FillAllocParams failed.\n"); + DBG_BREAKPOINT(); + } + else + { + status = vaspaceAlloc(pVAS, pFbAllocInfo->size, align, + pVidHeapAlloc->rangeLo, pVidHeapAlloc->rangeHi, + pageSizeLockMask, flags, &pFbAllocInfo->offset); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, + "VA Space alloc failed! Status Code: 0x%x Size: 0x%llx RangeLo: 0x%llx," + " RangeHi: 0x%llx, pageSzLockMask: 0x%llx\n", + status, pFbAllocInfo->size, + pVidHeapAlloc->rangeLo, pVidHeapAlloc->rangeHi, + pageSizeLockMask); + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto failed; + } + + memdescDescribe(pMemDesc, ADDR_VIRTUAL, + pFbAllocInfo->offset, + pFbAllocInfo->size); + + // Return alignment info. + pFbAllocInfo->align = align - 1; + pVidHeapAlloc->alignment = align; + } + } + else + { + // Possibly dead code: IS_VIRTUAL and bSplitVAs are only enabled on legacy vGPU. + memdescDescribe(pMemDesc, ADDR_VIRTUAL, memdescGetPte(pMemDesc, AT_GPU, 0), + pMemDesc->Size); + } + + // + // Report default (any) page size for virtual allocations with no page size restriction. + // Actual page size will be determined at map time. + // + if (FLD_TEST_DRF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT, pFbAllocInfo->pageFormat->attr)) + { + pFbAllocInfo->retAttr = FLD_SET_DRF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT, pFbAllocInfo->retAttr); + } + + // get possibly updated surface attributes + pVidHeapAlloc->attr = pFbAllocInfo->retAttr; + pVidHeapAlloc->attr2 = pFbAllocInfo->retAttr2; + + // update contiguity attribute to reflect memdesc + if (memdescGetContiguity(pAllocRequest->pMemDesc, AT_GPU)) + { + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _CONTIGUOUS, + pVidHeapAlloc->attr); + } + else + { + pVidHeapAlloc->attr = FLD_SET_DRF(OS32, _ATTR, _PHYSICALITY, + _NONCONTIGUOUS, + pVidHeapAlloc->attr); + } + + pVidHeapAlloc->offset = pFbAllocInfo->offset; + + if (pAllocRequest->pHwResource != NULL) + { + pAllocRequest->pHwResource->attr = pFbAllocInfo->retAttr; + pAllocRequest->pHwResource->attr2 = pFbAllocInfo->retAttr2; + pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId; + pAllocRequest->pHwResource->comprCovg = pFbAllocInfo->comprCovg; + pAllocRequest->pHwResource->ctagOffset = pFbAllocInfo->ctagOffset; + pAllocRequest->pHwResource->hwResId = pFbAllocInfo->hwResId; + } + + return NV_OK; + +failed: + memmgrFreeHwResources(pGpu, pMemoryManager, pFbAllocInfo); + + if (bAllocedMemDesc) + { + memdescDestroy(pAllocRequest->pMemDesc); + pAllocRequest->pMemDesc = NULL; + } + + return status; +} + +/* + * @brief Interface to vaspaceReserveMempool to reserve PMA memory for page tables + */ +NV_STATUS virtmemReserveMempool_IMPL +( + VirtualMemory *pVirtualMemory, + OBJGPU *pGpu, + NvHandle hDevice, + NvU64 size, + NvU32 pageSizeMask +) +{ + RsClient *pClient = RES_GET_CLIENT(pVirtualMemory); + OBJVASPACE *pVAS = NULL; + NvU32 mempoolFlags = VASPACE_RESERVE_FLAGS_NONE; + + // + // Reject mappings for a legacy NV01_MEMORY_SYSTEM_DYNAMIC + // object silently. + // + if (pVirtualMemory->hVASpace == NV_MEMORY_VIRTUAL_SYSMEM_DYNAMIC_HVASPACE) + { + return NV_ERR_INVALID_OBJECT; + } + + if (pVirtualMemory->bOptimizePageTableMempoolUsage) + { + mempoolFlags = VASPACE_RESERVE_FLAGS_ALLOC_UPTO_TARGET_LEVEL_ONLY; + } + + NV_ASSERT_OK_OR_RETURN( + vaspaceGetByHandleOrDeviceDefault(pClient, hDevice, + pVirtualMemory->hVASpace, &pVAS)); + + return vaspaceReserveMempool(pVAS, pGpu, RES_GET_CLIENT_HANDLE(pVirtualMemory), + size, pageSizeMask, mempoolFlags); +} + +/*! + * @brief Does this VirtualMemory object use the specified hVASpace? + */ +NvBool +virtmemMatchesVASpace_IMPL +( + VirtualMemory *pVirtualMemory, + NvHandle hClient, + NvHandle hVASpace +) +{ + return (RES_GET_CLIENT_HANDLE(pVirtualMemory) == hClient) && (pVirtualMemory->hVASpace == hVASpace); +} + +/*! + * @brief Helper to look up a VirtualMemory object + */ +NV_STATUS +virtmemGetByHandleAndDevice_IMPL +( + RsClient *pClient, + NvHandle hMemory, + NvHandle hDevice, + VirtualMemory **ppVirtualMemory +) +{ + Memory *pMemory; + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + memGetByHandleAndDevice(pClient, hMemory, hDevice, &pMemory)); + + *ppVirtualMemory = dynamicCast(pMemory, VirtualMemory); + + return (*ppVirtualMemory != NULL) ? NV_OK : NV_ERR_INVALID_OBJECT_HANDLE; +} + +/*! + * @brief Create a CPU mapping in addition to the DMA mapping + */ +static NV_STATUS +_virtmemAllocKernelMapping +( + OBJGPU *pGpu, + OBJVASPACE *pVAS, + CLI_DMA_MAPPING_INFO *pDmaMappingInfo, + NvU64 offset, + NvU64 size, + Memory *pMemoryInfo +) +{ + NV_STATUS status = NV_OK; + NvBool bCoherentCpuMapping = pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING); + NvU32 gpuSubDevInst; + RmPhysAddr bar1PhysAddr; + + SLI_LOOP_START(SLI_LOOP_FLAGS_NONE) + + gpuSubDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + if (bCoherentCpuMapping) + { + // Use a temp pointer to prevent overwriting the previous pointer by accident + NvP64 tempCpuPtr = NvP64_NULL; + MEMORY_DESCRIPTOR *pMemDesc = memdescGetMemDescFromGpu(pDmaMappingInfo->pMemDesc, pGpu); + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + NV_PRINTF(LEVEL_INFO, + "Allocating coherent link mapping. length=%lld, memDesc->size=%lld\n", + size, pDmaMappingInfo->pMemDesc->Size); + + NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)); + NV_ASSERT(pDmaMappingInfo->pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + + tempCpuPtr = kbusMapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc); + if (tempCpuPtr == NULL) + { + status = NV_ERR_GENERIC; + } + else + { + status = NV_OK; + tempCpuPtr = NvP64_PLUS_OFFSET(tempCpuPtr, offset); + } + + pDmaMappingInfo->KernelVAddr[gpuSubDevInst] = NvP64_VALUE(tempCpuPtr); + } + else + { + // + // Allocate GPU virtual address space for the video memory region + // for those GPUs that support it. + // + pDmaMappingInfo->FbApertureLen[gpuSubDevInst] = pDmaMappingInfo->pMemDesc->Size; + if (RMCFG_FEATURE_PLATFORM_GSP) + { + status = osMapSystemMemory(pMemoryInfo->pMemDesc, + offset, + pDmaMappingInfo->pMemDesc->Size, + NV_TRUE /*Kernel*/, + NV_PROTECT_READ_WRITE, + (NvP64 *) &pDmaMappingInfo->KernelVAddr[gpuSubDevInst], + (NvP64 *) &pDmaMappingInfo->KernelPriv); + + if (status != NV_OK) + { + pDmaMappingInfo->FbApertureLen[gpuSubDevInst] = 0; + pDmaMappingInfo->FbAperture[gpuSubDevInst] = 0; + pDmaMappingInfo->KernelPriv = 0; + SLI_LOOP_BREAK; + } + } + else + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + NvHandle hClient = NV01_NULL_OBJECT; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + if ((pCallContext != NULL) && (pCallContext->pClient != NULL)) + { + hClient = pCallContext->pClient->hClient; + } + + status = kbusMapFbAperture_HAL(pGpu, pKernelBus, + pMemoryInfo->pMemDesc, offset, + &pDmaMappingInfo->FbAperture[gpuSubDevInst], + &pDmaMappingInfo->FbApertureLen[gpuSubDevInst], + BUS_MAP_FB_FLAGS_MAP_UNICAST, hClient); + + if (status != NV_OK) + { + pDmaMappingInfo->FbApertureLen[gpuSubDevInst] = 0; + pDmaMappingInfo->FbAperture[gpuSubDevInst] = 0; + SLI_LOOP_BREAK; + } + + bar1PhysAddr = gpumgrGetGpuPhysFbAddr(pGpu) + pDmaMappingInfo->FbAperture[gpuSubDevInst]; + status = osMapPciMemoryKernelOld(pGpu, bar1PhysAddr, + pDmaMappingInfo->pMemDesc->Size, + NV_PROTECT_READ_WRITE, + &pDmaMappingInfo->KernelVAddr[gpuSubDevInst], + NV_MEMORY_WRITECOMBINED); + } + } + + if (status != NV_OK) + { + SLI_LOOP_BREAK; + } + + SLI_LOOP_END + + if (status != NV_OK) + { + _virtmemFreeKernelMapping(pGpu, pDmaMappingInfo); + } + + return status; +} +/*! + * @brief Free CPU mapping + */ +static void +_virtmemFreeKernelMapping +( + OBJGPU *pGpu, + CLI_DMA_MAPPING_INFO *pDmaMappingInfo +) +{ + NvU32 gpuSubDevInst; + NvBool bCoherentCpuMapping = pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING); + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + + gpuSubDevInst = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + // Unmap a kernel CPU mapping if one exists + if (pDmaMappingInfo->KernelVAddr[gpuSubDevInst] != NULL) + { + if (bCoherentCpuMapping) + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + MEMORY_DESCRIPTOR *pMemDesc = memdescGetMemDescFromGpu(pDmaMappingInfo->pMemDesc, pGpu); + kbusUnmapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc); + } + else + { + osUnmapPciMemoryKernelOld(pGpu, pDmaMappingInfo->KernelVAddr[gpuSubDevInst]); + } + + pDmaMappingInfo->KernelVAddr[gpuSubDevInst] = NULL; + } + + // Unmap the FB aperture mapping if one exists + if ((pDmaMappingInfo->FbApertureLen[gpuSubDevInst]) && (!bCoherentCpuMapping)) + { + if (RMCFG_FEATURE_PLATFORM_GSP) + { + // This is a no-op in GSP, but document it here as code in case it changes. + osUnmapSystemMemory(pDmaMappingInfo->pMemDesc, + NV_TRUE /*Kernel*/, + 0 /*ProcessId*/, + (NvP64)pDmaMappingInfo->FbAperture[gpuSubDevInst], + NV_PTR_TO_NvP64(pDmaMappingInfo->KernelPriv)); + } + else + { + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + kbusUnmapFbAperture_HAL(pGpu, + pKernelBus, + pDmaMappingInfo->pMemDesc, + pDmaMappingInfo->FbAperture[gpuSubDevInst], + pDmaMappingInfo->FbApertureLen[gpuSubDevInst], + BUS_MAP_FB_FLAGS_MAP_UNICAST); + } + pDmaMappingInfo->FbAperture[gpuSubDevInst] = 0; + pDmaMappingInfo->FbApertureLen[gpuSubDevInst] = 0; + pDmaMappingInfo->KernelPriv = 0; + } + + SLI_LOOP_END +} + +/*! + * @brief Map an object into a VirtualMemory object + */ +NV_STATUS +virtmemMapTo_IMPL +( + VirtualMemory *pVirtualMemory, + RS_RES_MAP_TO_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + Memory *pMemory = staticCast(pVirtualMemory, Memory); + OBJGPU *pGpu = pParams->pGpu; + OBJGPU *pSrcGpu = pParams->pSrcGpu; + RsClient *pClient = RES_GET_CLIENT(pVirtualMemory); + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + RsResourceRef *pMemoryRef = pParams->pMemoryRef; + NvHandle hClient = pClient->hClient; + NvHandle hBroadcastDevice = pParams->hBroadcastDevice; + NvHandle hVirtualMem = RES_GET_HANDLE(pVirtualMemory); + NvHandle hMemoryDevice = pParams->hMemoryDevice; + NvU32 gpuMask = pParams->gpuMask; + NvU64 offset = pParams->offset; // offset into pMemoryRef to map + NvU64 length = pParams->length; + NvU32 flags = pParams->flags; + NvU32 p2p = DRF_VAL(OS46, _FLAGS, _P2P_ENABLE, pParams->flags); + + VirtMemAllocator *pDma = GPU_GET_DMA(pGpu); + MEMORY_DESCRIPTOR *pSrcMemDesc = pParams->pSrcMemDesc; + NvU64 *pDmaOffset = pParams->pDmaOffset; // return VirtualMemory offset + CLI_DMA_MAPPING_INFO *pDmaMappingInfo = NULL; + CLI_DMA_MAPPING_INFO *pDmaMappingInfo_old = NULL; + OBJVASPACE *pVas = NULL; + Memory *pSrcMemory = dynamicCast(pMemoryRef->pResource, Memory); + + NvU32 tgtAddressSpace = ADDR_UNKNOWN; + + NvBool bDmaMapNeeded = pParams->bDmaMapNeeded; + NvBool bDmaMapped = NV_FALSE; + NvBool bDmaUnmapped = NV_FALSE; + NvBool bDmaMappingRegistered = NV_FALSE; + NvBool bFlaMapping = pParams->bFlaMapping; + NvBool bIsIndirectPeer = NV_FALSE; + NvBool bEncrypted; + NvBool bIsSysmem = NV_FALSE; + NvBool bBar1P2P = (p2p && kbusIsPcieBar1P2PMapping_HAL(pGpu, + GPU_GET_KERNEL_BUS(pGpu), + pSrcGpu, + GPU_GET_KERNEL_BUS(pSrcGpu))); + + // + // Allow unicast on NV01_MEMORY_VIRTUAL object, but maintain the broadcast + // requirement for NV50_MEMORY_VIRTUAL. + // + if (pParams->bSubdeviceHandleProvided && !pVirtualMemory->bAllowUnicastMapping) + { + NV_PRINTF(LEVEL_ERROR, "Unicast mappings into virtual memory object not supported.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + status = vaspaceGetByHandleOrDeviceDefault(pClient, hBroadcastDevice, pVirtualMemory->hVASpace, &pVas); + if (status != NV_OK) + return status; + + // + // Use the encryption setting of the virtual allocation. + // This makes sense, since the same physical mem descriptor could have + // more than one mapping, each with different encryption settings. + // + bEncrypted = memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_ENCRYPTED); + + // Validate the offset and limit passed in. + if (offset + length > pSrcMemDesc->Size) + return NV_ERR_INVALID_BASE; + + status = intermapCreateDmaMapping(pClient, pMemoryRef, hBroadcastDevice, hVirtualMem, &pDmaMappingInfo, flags); + if (status != NV_OK) + return status; + + if (bBar1P2P) + { + DMA_BAR1P2P_MAPPING_PRARAMS params = {0}; + + params.pVas = pVas; + params.pPeerGpu = pSrcGpu; + params.pPeerMemDesc = pSrcMemDesc; + params.flags = flags; + params.offset = offset; + params.length = length; + params.pDmaMappingInfo = pDmaMappingInfo; + + status = dmaAllocBar1P2PMapping_HAL(pGpu, pDma, ¶ms); + if (status != NV_OK) + goto done; + + // Adjust local variables for the BAR1 P2P mappings + pSrcMemDesc = params.pMemDescOut; + flags = params.flagsOut; + offset = params.offsetOut; + } + + // + // Determine target address space. If we're mapping fbmem from + // one gpu for use by another, then we need to treat that memory as + // ADDR_SYSMEM. + // + tgtAddressSpace = memdescGetAddressSpace(memdescGetMemDescFromGpu(pSrcMemDesc, pGpu)); + if ((pSrcGpu != pGpu) && (tgtAddressSpace == ADDR_FBMEM)) + { + tgtAddressSpace = ADDR_SYSMEM; + + if (gpumgrCheckIndirectPeer(pGpu, pSrcGpu)) + bIsIndirectPeer = NV_TRUE; + + // IOMMU mapping not needed for GPU P2P accesses on FB pages. + bDmaMapNeeded = NV_FALSE; + } + + if (tgtAddressSpace == ADDR_FABRIC || tgtAddressSpace == ADDR_FABRIC_V2) + { + // IOMMU mapping not needed for GPU P2P accesses on FB pages. + bDmaMapNeeded = NV_FALSE; + } + + // Different cases for vidmem & system memory/fabric memory. + bIsSysmem = (tgtAddressSpace == ADDR_SYSMEM); + + if (bIsSysmem || (tgtAddressSpace == ADDR_FABRIC) || (tgtAddressSpace == ADDR_FABRIC_V2)) + { + // offset needs to be 0 when reusing a mapping. + if ((DRF_VAL(OS46, _FLAGS, _DMA_UNICAST_REUSE_ALLOC, flags) == NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC_TRUE) && + (offset != 0)) + { + status = NV_ERR_INVALID_OFFSET; + goto done; + } + + // + // Create a MEMORY_DESCRIPTOR describing this region of the memory + // alloc in question + // + status = memdescCreateSubMem(&pDmaMappingInfo->pMemDesc, pSrcMemDesc, pGpu, offset, length); + if (status != NV_OK) + goto done; + *pParams->ppMemDesc = pDmaMappingInfo->pMemDesc; + + // + // If system memory does not support compression, the virtual kind is compressible, + // and being mapped into system memory fallback to using the uncompressed kind. + // + if (FLD_TEST_DRF(OS46, _FLAGS, _PAGE_KIND, _VIRTUAL, flags) && + (tgtAddressSpace == ADDR_SYSMEM) && + (!memmgrComprSupported(pMemoryManager, ADDR_SYSMEM))) + { + NvU32 kind = memdescGetPteKind(pMemory->pMemDesc); + NvU32 updatedKind = memmgrGetUncompressedKind_HAL(pGpu, pMemoryManager, kind, 0); + NvU32 dmaKind = memdescGetPteKind(pDmaMappingInfo->pMemDesc); + + if (dmaKind != updatedKind) + { + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY); + NV_ASSERT(memdescGetFlag(memdescGetMemDescFromGpu(pMemory->pMemDesc, pGpu), MEMDESC_FLAGS_SET_KIND)); + memdescSetPteKind(memdescGetMemDescFromGpu(pDmaMappingInfo->pMemDesc, pGpu), updatedKind); + SLI_LOOP_END; + } + } + + // if GPUs are indirect peers, create TCE mappings + if (bIsIndirectPeer) + { + // + // TODO: Ideally memdescMapIommu should be called on FB memdesc with + // pSrcGpu That would clearly convey that memory is owned by pSrcGpu and + // we are trying to create IOMMU mappings for pGpu. This effort is being + // tracked in bug 2043603 + // + status = memdescMapIommu(pDmaMappingInfo->pMemDesc, pGpu->busInfo.iovaspaceId); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "DMA map pages failed for requested GPU!\n"); + goto done; + } + } + else if (bDmaMapNeeded) + { + status = osDmaMapPages(pGpu->pOsGpuInfo, pDmaMappingInfo->pMemDesc); + if ((status != NV_OK) && (status != NV_ERR_NOT_SUPPORTED)) + { + NV_PRINTF(LEVEL_ERROR, "DMA map pages failed for requested GPU!\n"); + goto done; + } + // + // Some operating systems return NV_ERR_NOT_SUPPORTED. Assign NV_OK to + // status since we return status from this function and NV_ERR_NOT_SUPPORTED + // may be considered as failure in calling function. + // + status = NV_OK; + bDmaMapped = NV_TRUE; + } + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + memdescSetFlag(memdescGetMemDescFromGpu(pDmaMappingInfo->pMemDesc, pGpu), + MEMDESC_FLAGS_ENCRYPTED, + bEncrypted); + SLI_LOOP_END + + // Monolithic CPU RM or SPLIT_VAS_MGMT + if (!pMemory->bRpcAlloc || gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + pDmaMappingInfo->DmaOffset = *pDmaOffset; // in case this is 'in' + + // allocate mapping in VirtualMemory object + status = dmaAllocMap(pGpu, pDma, pVas, pVirtualMemory, pSrcMemory, pDmaMappingInfo); + if (status != NV_OK) + goto done; + + status = intermapRegisterDmaMapping(pClient, hBroadcastDevice, hVirtualMem, pDmaMappingInfo, pDmaMappingInfo->DmaOffset, gpuMask); + if (status != NV_OK) + { + dmaFreeMap(pGpu, pDma, pVas, + pVirtualMemory, pDmaMappingInfo, + DRF_DEF(OS47, _FLAGS, _DEFER_TLB_INVALIDATION, _FALSE)); + goto done; + } + + bDmaMappingRegistered = NV_TRUE; + + // If a kernel mapping has been requested, create one + if (DRF_VAL(OS46, _FLAGS, _KERNEL_MAPPING, flags) == NVOS46_FLAGS_KERNEL_MAPPING_ENABLE) + { + status = memdescMapOld(pDmaMappingInfo->pMemDesc, + 0, + pDmaMappingInfo->pMemDesc->Size, + NV_TRUE, NV_PROTECT_READ_WRITE, + &pDmaMappingInfo->KernelVAddr[gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu))], + &pDmaMappingInfo->KernelPriv); + + if (status != NV_OK) + goto done; + } + + *pDmaOffset = pDmaMappingInfo->DmaOffset; + } // !IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu) + } + else if (tgtAddressSpace == ADDR_FBMEM) + { + // + // Create a MEMORY_DESCRIPTOR describing this region of the memory alloc + // in question + // + status = memdescCreateSubMem(&pDmaMappingInfo->pMemDesc, pSrcMemDesc, pGpu, offset, length); + if (status != NV_OK) + goto done; + *pParams->ppMemDesc = pDmaMappingInfo->pMemDesc; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY); + memdescSetFlag(memdescGetMemDescFromGpu(pDmaMappingInfo->pMemDesc, pGpu), + MEMDESC_FLAGS_ENCRYPTED, + bEncrypted); + SLI_LOOP_END; + + if (FLD_TEST_DRF(OS46, _FLAGS, _PAGE_KIND, _VIRTUAL, flags)) + { + // + // Want to make sure that the virtual kind was set beforehand + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | SLI_LOOP_FLAGS_IGNORE_REENTRANCY); + NV_ASSERT(memdescGetFlag(memdescGetMemDescFromGpu(pMemory->pMemDesc, pGpu), MEMDESC_FLAGS_SET_KIND)); + memdescSetPteKind(memdescGetMemDescFromGpu(pDmaMappingInfo->pMemDesc, pGpu), + memdescGetPteKind(pMemory->pMemDesc)); + SLI_LOOP_END; + } + + pDmaMappingInfo->DmaOffset = *pDmaOffset; // in case this is 'in' + + // Monolithic CPU RM or SPLIT_VAS_MGMT + if (!pMemory->bRpcAlloc || gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + // allocate mapping in VirtualMemory object + status = dmaAllocMap(pGpu, pDma, pVas, pVirtualMemory, pSrcMemory, pDmaMappingInfo); + if (status != NV_OK) + goto done; + + *pDmaOffset = pDmaMappingInfo->DmaOffset; + + status = intermapRegisterDmaMapping(pClient, hBroadcastDevice, hVirtualMem, pDmaMappingInfo, pDmaMappingInfo->DmaOffset, gpuMask); + if (status != NV_OK) + { + dmaFreeMap(pGpu, pDma, pVas, + pVirtualMemory, pDmaMappingInfo, + DRF_DEF(OS47, _FLAGS, _DEFER_TLB_INVALIDATION, _FALSE)); + goto done; + } + + bDmaMappingRegistered = NV_TRUE; + + if (DRF_VAL(OS46, _FLAGS, _KERNEL_MAPPING, flags) == NVOS46_FLAGS_KERNEL_MAPPING_ENABLE) + { + status = _virtmemAllocKernelMapping(pGpu, pVas, pDmaMappingInfo, offset, length, pSrcMemory); + if (status != NV_OK) + goto done; + } + + *pDmaOffset = pDmaMappingInfo->DmaOffset; + } // if (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) + } + else + { + // unknown (or mixed vidmem+sysmem?) mem case + status = NV_ERR_INVALID_OBJECT_HANDLE; + goto done; + } + + if (RMCFG_CLASS_NV50_P2P && + !bFlaMapping && + (bBar1P2P || DRF_VAL(OS46, _FLAGS, _P2P_ENABLE, pDmaMappingInfo->Flags) == NVOS46_FLAGS_P2P_ENABLE_NOSLI)) + { + NvU32 subDevIdSrc; + NvU32 subDevIdTgt; + + // + // if we are on SLI and trying to map peer memory between two GPUs + // on the same device, we don't rely on dynamic p2p mailbox setup. + // SLI uses static p2p mailbox and hence will not have any + // P2P object associated with it + // + if ((hBroadcastDevice == hMemoryDevice) && IsSLIEnabled(pGpu)) + { + goto vgpu_send_rpc; + } + + subDevIdSrc = DRF_VAL(OS46, _FLAGS, _P2P_SUBDEV_ID_SRC, pDmaMappingInfo->Flags); + subDevIdTgt = DRF_VAL(OS46, _FLAGS, _P2P_SUBDEV_ID_TGT, pDmaMappingInfo->Flags); + + status = CliAddP2PDmaMappingInfo(hClient, + hBroadcastDevice, subDevIdTgt, + hMemoryDevice, subDevIdSrc, + pDmaMappingInfo); + if (NV_OK != status) + { + dmaFreeMap(pGpu, pDma, pVas, + pVirtualMemory, pDmaMappingInfo, + DRF_DEF(OS47, _FLAGS, _DEFER_TLB_INVALIDATION, _FALSE)); + + intermapDelDmaMapping(pClient, hBroadcastDevice, hVirtualMem, *pDmaOffset, gpuMask, NULL); + pDmaMappingInfo = NULL; + return status; + } + + // cache the pointer + pDmaMappingInfo_old = pDmaMappingInfo; + } + +vgpu_send_rpc: + + if (pMemory->bRpcAlloc) + { + NV_RM_RPC_MAP_MEMORY_DMA(pGpu, hClient, hBroadcastDevice, hVirtualMem, pMemoryRef->hResource, + offset, length, flags, pDmaOffset, status); + if (status != NV_OK) + goto done; + + if ((IS_VIRTUAL(pGpu) || IS_GSP_CLIENT(pGpu)) && + !gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + // + // vGPU doesn't understand subdevice handles. But clients map memory + // with subdevice handles and we don't want that to fail on vGPU. + // Currently, we just pass down the broadcast device handle to the host + // (which should be equivalent if SLI is disabled). This will need to + // be revisited if vGPU ever supports SLI. + // + NV_ASSERT(!IsSLIEnabled(pGpu)); + + // delete the old copy + if (RMCFG_CLASS_NV50_P2P && + (pDmaMappingInfo_old != NULL)) + { + status = CliUpdateP2PDmaMappingInList(hClient, pDmaMappingInfo, *pDmaOffset); + NV_ASSERT(status == NV_OK); + } + + pDmaMappingInfo->DmaOffset = *pDmaOffset; + + status = intermapRegisterDmaMapping(pClient, hBroadcastDevice, hVirtualMem, pDmaMappingInfo, + pDmaMappingInfo->DmaOffset, gpuMask); + if (status != NV_OK) + goto done; + + bDmaMappingRegistered = NV_TRUE; + + if (tgtAddressSpace == ADDR_SYSMEM) + { + // If a kernel mapping has been requested, create one + if (DRF_VAL(OS46, _FLAGS, _KERNEL_MAPPING, flags) == NVOS46_FLAGS_KERNEL_MAPPING_ENABLE) + { + status = memdescMapOld(pDmaMappingInfo->pMemDesc, + 0, + pDmaMappingInfo->pMemDesc->Size, + NV_TRUE, NV_PROTECT_READ_WRITE, + &pDmaMappingInfo->KernelVAddr[gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu))], + &pDmaMappingInfo->KernelPriv); + if (status != NV_OK) + goto done; + } + } + } + } + +done: + if (status != NV_OK) + { + if (pDmaMappingInfo != NULL) + { + if ((pDmaMappingInfo->pMemDesc != NULL) && + FLD_TEST_DRF(OS46, _FLAGS, _KERNEL_MAPPING, _ENABLE, flags)) + { + // + // if Kernel cookie exists and mapping is in sysmem, free sysmem mapping + // for ADDR_FBMEM function determines whether mapping was created itself + // + if ((pDmaMappingInfo->KernelPriv != NULL) && + (memdescGetAddressSpace(pDmaMappingInfo->pMemDesc) == ADDR_SYSMEM)) + { + memdescUnmapOld(pDmaMappingInfo->pMemDesc, NV_TRUE, 0, + pDmaMappingInfo->KernelVAddr[gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu))], + pDmaMappingInfo->KernelPriv); + pDmaMappingInfo->KernelPriv = NULL; + } + else if (memdescGetAddressSpace(pDmaMappingInfo->pMemDesc) == ADDR_FBMEM) + { + _virtmemFreeKernelMapping(pGpu, pDmaMappingInfo); + } + } + + if (pDmaMappingInfo->pMemDesc != NULL) + { + NV_STATUS status; + + if (bIsIndirectPeer) + { + memdescUnmapIommu(pDmaMappingInfo->pMemDesc, pGpu->busInfo.iovaspaceId); + } + else if (bDmaMapped) + { + // Unmap the DMA mapped pages in failure case if any. + status = osDmaUnmapPages(pGpu->pOsGpuInfo, pDmaMappingInfo->pMemDesc); + if (!(status == NV_OK || status == NV_ERR_NOT_SUPPORTED)) + { + NV_PRINTF(LEVEL_ERROR, "DMA unmap pages failed for requested GPU!\n"); + } + } + } + + dmaFreeBar1P2PMapping_HAL(pDma, pDmaMappingInfo); + + memdescDestroy(pDmaMappingInfo->pMemDesc); + pDmaMappingInfo->pMemDesc = NULL; + + if (bDmaMappingRegistered) + { + intermapDelDmaMapping(pClient, hBroadcastDevice, hVirtualMem, *pDmaOffset, gpuMask, &bDmaUnmapped); + } + if (!bDmaUnmapped) + { + // Explicitly free the DMA mapping if intermapDelDmaMapping was not able to clean up + intermapFreeDmaMapping(pDmaMappingInfo); + } + } + } + + return status; +} + +/*! + * @brief Unmap object from VirtualMemory object + */ +NV_STATUS +virtmemUnmapFrom_IMPL +( + VirtualMemory *pVirtualMemory, + RS_RES_UNMAP_FROM_PARAMS *pParams +) +{ + OBJGPU *pGpu = pParams->pGpu; + Memory *pMemory = staticCast(pVirtualMemory, Memory); + RsClient *pClient = RES_GET_CLIENT(pVirtualMemory); + NvHandle hClient = pClient->hClient; + NvHandle hMemory = pParams->hMemory; + NvHandle hVirtualMem = RES_GET_HANDLE(pVirtualMemory); + NvHandle hBroadcastDevice = pParams->hBroadcastDevice; + NvU32 gpuMask = pParams->gpuMask; + NvU64 dmaOffset = pParams->dmaOffset; + OBJVASPACE *pVas = NULL; + NV_STATUS status = NV_OK; + NvBool bIsIndirectPeer = NV_FALSE; + NvBool bReturnStatus; + + CLI_DMA_MAPPING_INFO *pDmaMappingInfo = NULL; + + if (hMemory != NV01_NULL_OBJECT) + { + RsResourceRef *pSrcMemoryRef; + Memory *pMemorySrc; + + if (clientGetResourceRef(pClient, hMemory, &pSrcMemoryRef) != NV_OK) + return NV_ERR_OBJECT_NOT_FOUND; + + status = rmresCheckMemInterUnmap(dynamicCast(pSrcMemoryRef->pResource, RmResource), pParams->bSubdeviceHandleProvided); + + // Exit if failed or invalid class, otherwise continue on to next part + if (status != NV_OK) + return status; + + pMemorySrc = dynamicCast(pSrcMemoryRef->pResource, Memory); + if (pMemorySrc != NULL) + { + if (gpumgrCheckIndirectPeer(pMemorySrc->pGpu, pGpu)) + bIsIndirectPeer = NV_TRUE; + } + } + + if (pParams->bSubdeviceHandleProvided && !pVirtualMemory->bAllowUnicastMapping) + { + NV_PRINTF(LEVEL_ERROR, "Unicast DMA mappings into virtual memory object not supported.\n"); + return NV_ERR_NOT_SUPPORTED; + } + + status = vaspaceGetByHandleOrDeviceDefault(pClient, hBroadcastDevice, pVirtualMemory->hVASpace, &pVas); + if (status != NV_OK) + return status; + + // Get DMA mapping info. + bReturnStatus = CliGetDmaMappingInfo(hClient, hBroadcastDevice, hVirtualMem, dmaOffset, gpuMask, &pDmaMappingInfo); + if (!bReturnStatus) + return NV_ERR_INVALID_OBJECT_HANDLE; + + // + // if Kernel cookie exists and mapping is in sysmem, free sysmem mapping + // for ADDR_FBMEM function determines whether mapping was created itself + // + if ((pDmaMappingInfo->KernelPriv != NULL) && + (memdescGetAddressSpace(pDmaMappingInfo->pMemDesc) == ADDR_SYSMEM)) + { + memdescUnmapOld(pDmaMappingInfo->pMemDesc, NV_TRUE, 0, + pDmaMappingInfo->KernelVAddr[gpumgrGetSubDeviceInstanceFromGpu(gpumgrGetParentGPU(pGpu))], + pDmaMappingInfo->KernelPriv); + pDmaMappingInfo->KernelPriv = NULL; + } + else if (memdescGetAddressSpace(memdescGetMemDescFromGpu(pDmaMappingInfo->pMemDesc, pGpu)) == ADDR_FBMEM) + { + _virtmemFreeKernelMapping(pGpu, pDmaMappingInfo); + } + + // if this was peer mapped context dma, remove it from P2P object + if (RMCFG_CLASS_NV50_P2P && (pDmaMappingInfo->pP2PInfo != NULL)) + { + CliDelP2PDmaMappingInfo(hClient, pDmaMappingInfo); + + dmaFreeBar1P2PMapping_HAL(GPU_GET_DMA(pGpu), pDmaMappingInfo); + } + + if (!pMemory->bRpcAlloc || gpuIsSplitVasManagementServerClientRmEnabled(pGpu)) + { + // free mapping in context dma + dmaFreeMap(pGpu, GPU_GET_DMA(pGpu), pVas, pVirtualMemory, pDmaMappingInfo, pParams->flags); + + if ((memdescGetAddressSpace(memdescGetMemDescFromGpu(pDmaMappingInfo->pMemDesc, pGpu)) == ADDR_FBMEM) && + bIsIndirectPeer) + { + memdescUnmapIommu(pDmaMappingInfo->pMemDesc, pGpu->busInfo.iovaspaceId); + } + else if ((memdescGetAddressSpace(memdescGetMemDescFromGpu(pDmaMappingInfo->pMemDesc, pGpu)) == ADDR_SYSMEM) && + (pDmaMappingInfo->pMemDesc->pGpu != pGpu)) + { + status = osDmaUnmapPages(pGpu->pOsGpuInfo, pDmaMappingInfo->pMemDesc); + if (!(status == NV_OK || status == NV_ERR_NOT_SUPPORTED)) + { + NV_PRINTF(LEVEL_ERROR, "DMA unmap pages failed for requested GPU!\n"); + } + // + // Some operating systems return NV_ERR_NOT_SUPPORTED. Assign NV_OK to + // status since we return status from this function and NV_ERR_NOT_SUPPORTED + // may be considered as failure in calling function. + // + status = NV_OK; + } + } + + while (bReturnStatus) + { + // free memory descriptor + memdescFree(pDmaMappingInfo->pMemDesc); + memdescDestroy(pDmaMappingInfo->pMemDesc); + pDmaMappingInfo->pMemDesc = NULL; + + // delete client dma mapping + intermapDelDmaMapping(pClient, hBroadcastDevice, hVirtualMem, dmaOffset, gpuMask, NULL); + + // Get the next DMA mapping info for this offset and gpu mask + bReturnStatus = CliGetDmaMappingInfo(hClient, hBroadcastDevice, hVirtualMem, dmaOffset, gpuMask, &pDmaMappingInfo); + } + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + // vGPU doesn't understand subdevice handles. But clients map memory + // with subdevice handles and we don't want that to fail on vGPU. + // Currently, we just pass down the broadcast device handle to the host + // (which should be equivalent if SLI is disabled). This will need to + // be revisited if vGPU ever supports SLI. + // + NV_ASSERT((!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) || !IsSLIEnabled(pGpu)); + + if (pMemory->bRpcAlloc && + (NV01_NULL_OBJECT != hMemory) && + (resGetRefCount(staticCast(pVirtualMemory, RsResource)) || (hVirtualMem == hMemory))) + { + // + // resGetRefCount(pMemCtx->pResource) is zero when we are here from call of + // RmFree -> clientFreeResourceTree_IMPL -> clientFreeResource_IMPL -> __nvoc_objDelete + // + // memDestruct_IMPL-> CliDelDeviceMemory(i.e. hVirtualMem == hMemory) -> RmUnmapMemoryDma are valid calls since we + // call RPC_FREE later in memDestruct_IMPL. + // + // ifbDestruct_IMPL-> RmUnmapMemoryDma should not RPC_UNMAP_MEMORY_DMA since RPC_FREE is invoked in call stack earlier. + // + NV_RM_RPC_UNMAP_MEMORY_DMA(pGpu, hClient, hBroadcastDevice, hVirtualMem, hMemory, 0, dmaOffset, status); + } + + return status; +} + +/*! + * @brief return address and size of a VirtualMemory object + */ +void virtmemGetAddressAndSize_IMPL +( + VirtualMemory *pVirtualMemory, + NvU64 *pVAddr, + NvU64 *pSize +) +{ + MEMORY_DESCRIPTOR *pMemDesc = staticCast(pVirtualMemory, Memory)->pMemDesc; + + *pVAddr = memdescGetPhysAddr(pMemDesc, AT_GPU_VA, 0); + *pSize = memdescGetSize(pMemDesc); +} diff --git a/src/nvidia/src/kernel/os/os_init.c b/src/nvidia/src/kernel/os/os_init.c new file mode 100644 index 000000000..0b18d0b3e --- /dev/null +++ b/src/nvidia/src/kernel/os/os_init.c @@ -0,0 +1,706 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* * +* Common Operating System Object Function Pointer Initializations. * +* All the function pointers in the OS object are initialized here. * +* The initializations are broken into 'bite-sized' sub-functions * +* for ease of reading. Any functions that are common among all OS's * +* are directly initialized to the common function name. However, * +* the actual code for that function may be different from one OS * +* to the other; each OS compiles separately. Any function pointers * +* that are either not used by some OS's or are initialized to * +* different functions by different OS's are 'stubbed' out by * +* initializing them to a 'stub' function. * +\***************************************************************************/ + +#include "os/os.h" +#include "os/os_stub.h" +#include "core/system.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu/gpu_access.h" +#include "nv_ref.h" +#include "virtualization/hypervisor/hypervisor.h" + + +#include "gpu/bif/kernel_bif.h" + +#include "kernel/gpu/rc/kernel_rc.h" + +#include "g_os_private.h" + +// +// Functions to fill function stubs +// +static void initOSFunctionPointers(OBJOS *); + +// +// Helper functions to assist the above functions +// +static void initMiscOSFunctionPointers(OBJOS *); +static void initCommonMiscOSFunctionPointers(OBJOS *); +static void initStubMiscOSFunctionPointers(OBJOS *); +static void initWinNTStubOSFunctionPointers(OBJOS *); +static void initMacOSCoreOSFunctionPointers(OBJOS *); +static void initAPIOSFunctionPointers(OBJOS *); + +// Bug check code string common to all OS +const char *ppOsBugCheckBugcodeStr[] = OS_BUG_CHECK_BUGCODE_STR; + +NV_STATUS +constructObjOS(OBJOS *pOS) +{ + // Stub out function pointers + initOSFunctionPointers(pOS); + + // Now call the OS specific initialization + osInitObjOS(pOS); + + return NV_OK; +} + +static void +initOSFunctionPointers(OBJOS *pOS) +{ + initMiscOSFunctionPointers(pOS); + initWinNTStubOSFunctionPointers(pOS); + initMacOSCoreOSFunctionPointers(pOS); + initAPIOSFunctionPointers(pOS); +} + +static void +initMiscOSFunctionPointers(OBJOS *pOS) +{ + initCommonMiscOSFunctionPointers(pOS); + initStubMiscOSFunctionPointers(pOS); +} + +static void +initCommonMiscOSFunctionPointers(OBJOS *pOS) +{ + // Common OS function pointers. + pOS->osGetSimulationMode = osGetSimulationMode; +} + +static void +initStubMiscOSFunctionPointers(OBJOS *pOS) +{ + // Stubbed OS function pointers. + pOS->osSimEscapeWrite = stubOsSimEscapeWrite; + pOS->osSimEscapeWriteBuffer = stubOsSimEscapeWriteBuffer; + pOS->osSimEscapeRead = stubOsSimEscapeRead; + pOS->osSimEscapeReadBuffer = stubOsSimEscapeReadBuffer; + + pOS->osCheckCallback = stubOsCheckCallback; + pOS->osRCCallback = stubOsRCCallback; + + pOS->osPageArrayGetPhysAddr = stubOsPageArrayGetPhysAddr; + + pOS->osInternalReserveAllocCallback = stubOsInternalReserveAllocCallback; + pOS->osInternalReserveFreeCallback = stubOsInternalReserveFreeCallback; +} + +static void +initWinNTStubOSFunctionPointers(OBJOS *pOS) +{ + pOS->osQADbgRegistryInit = stubOsQADbgRegistryInit; + pOS->osQueueWorkItem = stubOsQueueWorkItem; + pOS->osQueueWorkItemWithFlags = stubOsQueueWorkItemWithFlags; + pOS->osQueueSystemWorkItem = stubOsQueueSystemWorkItem; + pOS->osCallACPI_MXMX = stubOsCallACPI_MXMX; + pOS->osCallACPI_DDC = stubOsCallACPI_DDC; + pOS->osCallACPI_BCL = stubOsCallACPI_BCL; + pOS->osCallACPI_ON = stubOsCallACPI_ON; + pOS->osCallACPI_OFF = stubOsCallACPI_OFF; + pOS->osCallACPI_NVHG_GPUON = stubOsCallWMI_NVHG_GPUON; + pOS->osCallACPI_NVHG_GPUOFF = stubOsCallWMI_NVHG_GPUOFF; + pOS->osCallACPI_NVHG_GPUSTA = stubOsCallWMI_NVHG_GPUSTA; + pOS->osCallACPI_NVHG_MXDS = stubOsCallWMI_NVHG_MXDS; + pOS->osCallACPI_NVHG_MXMX = stubOsCallWMI_NVHG_MXMX; + pOS->osCallACPI_NVHG_DOS = stubOsCallWMI_NVHG_DOS; + pOS->osCallACPI_NVHG_ROM = stubOsCallWMI_NVHG_ROM; + pOS->osCallACPI_NVHG_DCS = stubOsCallWMI_NVHG_DCS; + pOS->osCallACPI_DOD = stubOsCallWMI_DOD; + pOS->osSetupVBlank = stubOsSetupVBlank; + pOS->osCallACPI_NBPS = stubOsCallACPI_NBPS; + pOS->osCallACPI_NBSL = stubOsCallACPI_NBSL; + pOS->osCallACPI_DSM = stubOsCallACPI_DSM; + pOS->osCallACPI_OPTM_GPUON = stubOsCallWMI_OPTM_GPUON; + pOS->osGetUefiVariable = stubOsGetUefiVariable; + pOS->osCallACPI_MXDS = stubOsCallACPI_MXDS; + pOS->osCallACPI_MXDM = stubOsCallACPI_MXDM; + pOS->osCallACPI_MXID = stubOsCallACPI_MXID; + pOS->osCallACPI_LRST = stubOsCallACPI_LRST; +} + +static void +initMacOSCoreOSFunctionPointers(OBJOS *pOS) +{ + pOS->osNv_rdcr4 = stubOsnv_rdcr4; + pOS->osNv_rdxcr0 = stubOsnv_rdxcr0; + pOS->osNv_cpuid = stubOsnv_cpuid; + pOS->osNv_rdmsr = stubOsnv_rdmsr; + pOS->osNv_wrmsr = stubOsnv_wrmsr; + pOS->osRobustChannelsDefaultState = stubOsRobustChannelsDefaultState; + pOS->osCallACPI_MXMX = stubOsCallACPI_MXMX; + pOS->osCallACPI_DDC = stubOsCallACPI_DDC; + pOS->osCallACPI_BCL = stubOsCallACPI_BCL; + pOS->osGetUefiVariable = stubOsGetUefiVariable; +} + +static void +initAPIOSFunctionPointers(OBJOS *pOS) +{ + pOS->osRmInitRm = osRmInitRm; +} + +// +// Function to find the maximum number of cores in the system +// +NvU32 osGetMaximumCoreCount() +{ + // + // Windows provides an API to query this that supports CPU hotadd that our + // cpuid() didn't catch, so favor that. + // +#if NVOS_IS_WINDOWS && PORT_IS_KERNEL_BUILD && !defined(NV_MODS) + extern unsigned long KeQueryMaximumProcessorCountEx(unsigned short); + return KeQueryMaximumProcessorCountEx(0xFFFF); // All groups. +#else + OBJSYS *pSys = SYS_GET_INSTANCE(); + return pSys ? pSys->cpuInfo.maxLogicalCpus : 0; +#endif +} + +/*! + * @brief Generic OS 8-bit GPU register write function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevWriteReg008 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be written + * @param[in] thisValue - Value to be written + * + */ +void osGpuWriteReg008( + OBJGPU *pGpu, + NvU32 thisAddress, + NvU8 thisValue +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + osDevWriteReg008(pGpu, pMapping, thisAddress, thisValue); +} + +/*! + * @brief Generic OS 16-bit GPU register write function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevWriteReg016 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be written + * @param[in] thisValue - Value to be written + * + */ +void osGpuWriteReg016( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV16 thisValue +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + osDevWriteReg016(pGpu, pMapping, thisAddress, thisValue); +} + +/*! + * @brief Generic OS 32-bit GPU register write function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevWriteReg032 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be written + * @param[in] thisValue - Value to be written + * + */ +void osGpuWriteReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + osDevWriteReg032(pGpu, pMapping, thisAddress, thisValue); +} + +/*! + * @brief Generic OS 8-bit GPU register read function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevReadReg008 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be read. + * + * @return The value read from the register + */ +NvU8 osGpuReadReg008( + OBJGPU *pGpu, + NvU32 thisAddress +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + return osDevReadReg008(pGpu, pMapping, thisAddress); +} + +/*! + * @brief Generic OS 16-bit GPU register read function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevReadReg016 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be read. + * + * @return The value read from the register + */ +NvU16 osGpuReadReg016( + OBJGPU *pGpu, + NvU32 thisAddress +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + return osDevReadReg016(pGpu, pMapping, thisAddress); +} + +/*! + * @brief Generic OS 32-bit GPU register read function. + * + * This function first obtains the pointer to the mapping for the GPU + * registers and then calls the OS specific osDevReadReg032 function. + * + * @param[in] pGpu - The GPU context specific to this call. + * @param[in] thisAddress - Address of the register to be read. + * + * @return The value read from the register + */ +NvU32 osGpuReadReg032( + OBJGPU *pGpu, + NvU32 thisAddress +) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + return osDevReadReg032(pGpu, pMapping, thisAddress); +} + +void vgpuDevWriteReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvV32 thisValue, + NvBool *vgpuHandled +) +{ + NvBool isPCIConfigAccess = NV_FALSE; + NvU32 offAddr = 0; + NvU32 configSpaceSize; + NvU32 configSpaceMirrorBase = 0; + NvU32 configSpaceMirrorSize = 0; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys); + + if(!pGpu || + !pHypervisor || !pHypervisor->bDetected || !pHypervisor->bIsHVMGuest || + !GPU_GET_KERNEL_BIF(pGpu)) + { + *vgpuHandled = NV_FALSE; + return; + } + + NV_ASSERT_OK(kbifGetPciConfigSpacePriMirror_HAL(pGpu, GPU_GET_KERNEL_BIF(pGpu), + &configSpaceMirrorBase, &configSpaceMirrorSize)); + + { + configSpaceSize = NV_CONFIG_PCI_NV_12; + } + + if ((thisAddress >= configSpaceMirrorBase) && + (thisAddress + sizeof(NvU32) <= configSpaceMirrorBase + configSpaceSize)) + { + isPCIConfigAccess = NV_TRUE; + offAddr = thisAddress - configSpaceMirrorBase; + } + + if ((isPCIConfigAccess) && (pGpu->hPci)) + { + if (IS_PASSTHRU(pGpu) && + !gpuIsBar1Size64Bit(pGpu) && + (offAddr == NV_CONFIG_PCI_NV_6) && + !gpuIsBar2MovedByVtd(pGpu)) + { + // + // Xen doesn't move the BAR2 according to the BAR1 size, so + // we have to manually jump to 0x1C. + // + osPciWriteDword(pGpu->hPci, NV_CONFIG_PCI_NV_7(0), thisValue); + *vgpuHandled = NV_TRUE; + return; + } + + // + // Avoid calling OS Pci config functions during ISR + // As this is not allowed in Windows. + // + if (IS_VIRTUAL_WITH_SRIOV(pGpu) && osIsISR()) + { + *vgpuHandled = NV_TRUE; + return; + } + + // use config cycles to write these PCI offsets + osPciWriteDword(pGpu->hPci, offAddr, thisValue); + *vgpuHandled = NV_TRUE; + return; + } + + *vgpuHandled = NV_FALSE; +} + +NvU32 vgpuDevReadReg032( + OBJGPU *pGpu, + NvU32 thisAddress, + NvBool *vgpuHandled +) +{ + NvBool isPCIConfigAccess = NV_FALSE; + NvU32 offAddr = 0; + NvU32 configSpaceSize; + NvU32 configSpaceMirrorBase = 0; + NvU32 configSpaceMirrorSize = 0; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys); + + if(!pGpu || + !pHypervisor || !pHypervisor->bDetected || !pHypervisor->bIsHVMGuest || + !GPU_GET_KERNEL_BIF(pGpu)) + { + *vgpuHandled = NV_FALSE; + return 0; + } + + NV_ASSERT_OK(kbifGetPciConfigSpacePriMirror_HAL(pGpu, GPU_GET_KERNEL_BIF(pGpu), + &configSpaceMirrorBase, &configSpaceMirrorSize)); + + { + configSpaceSize = NV_CONFIG_PCI_NV_12; + } + + if ((thisAddress >= configSpaceMirrorBase) && + (thisAddress + sizeof(NvU32) <= configSpaceMirrorBase + configSpaceSize)) + { + isPCIConfigAccess = NV_TRUE; + offAddr = thisAddress - configSpaceMirrorBase; + } + + if ((isPCIConfigAccess) && (pGpu->hPci)) + { + if (IS_PASSTHRU(pGpu) && + !gpuIsBar1Size64Bit(pGpu) && + (offAddr == NV_CONFIG_PCI_NV_6) && + !gpuIsBar2MovedByVtd(pGpu)) + { + // + // Xen doesn't move the BAR2 according to the BAR1 size, so + // we have to manually jump to 0x1C. + // + *vgpuHandled = NV_TRUE; + return osPciReadDword(pGpu->hPci, NV_CONFIG_PCI_NV_7(0)); + } + + // + // Avoid calling OS Pci config functions during ISR + // As this is not allowed in Windows. + // + if (IS_VIRTUAL_WITH_SRIOV(pGpu) && osIsISR()) + { + *vgpuHandled = NV_TRUE; + return 0; + } + // use config cycles to read these PCI offsets + *vgpuHandled = NV_TRUE; + return osPciReadDword(pGpu->hPci, offAddr); + } + + *vgpuHandled = NV_FALSE; + return 0; +} + + +/** + * @brief Adds a filter to trap a certain CPU virtual address range + * + * Sets up a filter so all accesses to an address range are sent through the + * specified callback. + * + * Only one filter is allowed for any given address. + * + * @param[in] rangeStart start of CPU address range (inclusive) + * @param[in] rangeEnd end of CPU address range (inclusive) + * @param[in] pCb Callback function + * @param[in] pPriv opaque point to data pass to callback + * + * @return NV_OK is success, appropriate error otherwise. + */ +NV_STATUS +osMemAddFilter +( + NvU64 rangeStart, + NvU64 rangeEnd, + OSMemFilterCb *pCb, + void *pPriv +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + POSMEMFILTERDATA pFilterData = NULL; + + pFilterData = portMemAllocNonPaged(sizeof(OSMEMFILTERDATA)); + if (pFilterData == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to alloc mem for os mem filter data!\n"); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pFilterData->node.keyStart = rangeStart; + pFilterData->node.keyEnd = rangeEnd; + pFilterData->pPriv = pPriv; + pFilterData->pFilterCb = pCb; + pFilterData->node.Data = (void *)pFilterData; + + return btreeInsert(&pFilterData->node, &pSys->pMemFilterList); +} + +/** + * @brief Remove a filter added with @ref osMemAddFilter + * + * @param[in] rangeStart memory address to remove filter from. + */ +NV_STATUS +osMemRemoveFilter +( + NvU64 rangeStart +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + PNODE pNode = NULL; + + if (btreeSearch(rangeStart, &pNode, pSys->pMemFilterList) != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "Failed to find filter data for the given range start address!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + NV_ASSERT(pNode); + NV_ASSERT(pNode->keyStart == rangeStart); + + if (btreeUnlink(pNode, &pSys->pMemFilterList) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to unlink filter data!\n"); + return NV_ERR_INVALID_STATE; + } + + portMemFree(pNode->Data); + pNode = NULL; + + return NV_OK; +} + +/** + * @brief Retrieves a filter added with @ref osMemAddFilter. + * + * @param[in] address Address to search for + * + * @return Appropriate filter data if a filter exists, NULL otherwise. + */ +POSMEMFILTERDATA +osMemGetFilter(NvUPtr address) +{ + OBJSYS *pSys; + PNODE pNode = NULL; + + pSys = SYS_GET_INSTANCE(); + if (!pSys) + return NULL; + + if (btreeSearch(address, &pNode, pSys->pMemFilterList) != NV_OK) + return NULL; + + return pNode->Data; +} + +/*! + * Some data such as Bindata array are placed on paged memory. Access to paged segment + * on high IRQL is not allowed on some platform (e.g. Windows). The issue could be + * difficult to debug as the repro rate is random. The failure only happens when the + * target segment is paged out. + * + * This utility function checks whether it is safe to access paged segments. When the + * function is called at high IRQL path, it gives an assertion with a message. On + * developer branches, such as chips_a, it triggers an intended Bugcheck. + * + * @param[in] void No input required + * + * @return void To avoid random failure, do not return and check the error + * code of this function. BSOD D1 or internal BSOD provides + * full call stack that is much helpful for debugging. + */ + +void osPagedSegmentAccessCheck() +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + // check whether it is safe to access/alloc Paged memory + if (! portMemExSafeForPagedAlloc() || pOS->getProperty(pOS, PDB_PROP_OS_NO_PAGED_SEGMENT_ACCESS)) + { + NV_ASSERT_FAILED("Paged memory access is prohibited"); + + } +} + +/*! + * @brief Retrieves a registry key DWORD value and returns the best result + * from both nbsi and os registry tables. + * + * @param[in] OBJGPU pointer + * @param[in] pRegParmStr Registry key string + * @param[out] pData Registry key DWORD value + * + * @return NV_OK if key was found and data returned in pData + * @return Other unexpected errors + */ +NV_STATUS osReadRegistryDword +( + OBJGPU *pGpu, + const char *pRegParmStr, + NvU32 *pData +) +{ + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pRegParmStr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pData != NULL, NV_ERR_INVALID_ARGUMENT); + + status = osReadRegistryDwordBase(pGpu, pRegParmStr, pData); + + return status; +} + +/*! + * @brief Retrieves a registry key STRING value and returns the best result + * from both nbsi and os registry tables. + * + * @param[in] OBJGPU pointer + * @param[in] pRegParmStr Registry key string + * @param[out] pData Registry key STRING value + * @param[in] pCbLen Count of bytes in registry value. + * + * @return NV_OK if key was found and data returned in pData + * @return Other unexpected errors + */ +NV_STATUS osReadRegistryString +( + OBJGPU *pGpu, + const char *pRegParmStr, + NvU8 *pData, + NvU32 *pCbLen +) +{ + NV_STATUS status; + NV_ASSERT_OR_RETURN(pRegParmStr != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pCbLen != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(!(*pCbLen != 0 && pData == NULL), NV_ERR_INVALID_ARGUMENT); + + status = osReadRegistryStringBase(pGpu, pRegParmStr, pData, pCbLen); + + return status; +} + +void nvErrorLog(void *pVoid, NvU32 num, const char *pFormat, va_list arglist) +{ + if ((pFormat == NULL) || (*pFormat == '\0')) + { + return; + } + + OBJGPU *pGpu = reinterpretCast(pVoid, OBJGPU *); + +#if RMCFG_MODULE_SMBPBI || \ + (RMCFG_MODULE_KERNEL_RC && !RMCFG_FEATURE_PLATFORM_GSP) + char *errorString = portMemAllocNonPaged(MAX_ERROR_STRING); + if (errorString == NULL) + goto done; + + unsigned msglen; + va_list arglistCpy; + + va_copy(arglistCpy, arglist); + msglen = nvDbgVsnprintf(errorString, MAX_ERROR_STRING, pFormat, arglistCpy); + va_end(arglistCpy); + + if (msglen == 0) + goto done; + + { + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + if (pKernelRc != NULL) + krcReportXid(pGpu, pKernelRc, num, errorString); + } + +done: + portMemFree(errorString); +#endif // RMCFG_MODULE_SMBPBI || (RMCFG_MODULE_KERNEL_RC && + // !RMCFG_FEATURE_PLATFORM_GSP) + + osErrorLogV(pGpu, num, pFormat, arglist); +} + +void +nvErrorLog_va +( + void * pVoid, + NvU32 num, + const char * pFormat, + ... +) +{ + va_list arglist; + + va_start(arglist, pFormat); + nvErrorLog(pVoid, num, pFormat, arglist); + va_end(arglist); +} diff --git a/src/nvidia/src/kernel/os/os_sanity.c b/src/nvidia/src/kernel/os/os_sanity.c new file mode 100644 index 000000000..ca0694008 --- /dev/null +++ b/src/nvidia/src/kernel/os/os_sanity.c @@ -0,0 +1,319 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/************************************************************************************************************** +* +* Description: +* Sanity test the system environment to verify our driver can run properly +* +**************************************************************************************************************/ + +#include +#include +#include +#include +#include "kernel/gpu/intr/intr.h" +#include +#include + +#include "g_os_private.h" + +/*! + * @brief Wait for interrupt + * + * @param[in] pGpu GPU Object + * @param[in] serviced NV_TRUE if a GPU interrupt was serviced + * + * @returns NV_FALSE??? + */ + +static int interrupt_triggered; + +static NvBool osWaitForInterrupt( + OBJGPU *pGpu, + NvBool *serviced +) +{ + Intr *pIntr = GPU_GET_INTR(pGpu); + NvU32 intAMode; + NvBool swPending; + + intrGetStallInterruptMode_HAL(pGpu, pIntr, &intAMode, &swPending); + + // + // If we see a GPU here that has interrupts disabled, it must + // share its IRQ with some other device. Don't do anything, + // if there are additional GPUs, they will be probed soon enough. + // + if ((intAMode == INTERRUPT_TYPE_DISABLED) || + ((intAMode == INTERRUPT_TYPE_SOFTWARE) && !swPending)) + { + if (serviced) *serviced = NV_FALSE; + return NV_FALSE; + } + + // + // this should never happen, but hey, lots of things are like that in SW + // (this references the assert below these comments) + // The reason it can't happen: + // + // If osWaitForInterrupt is wired up, then following the logic of _osVerifyInterrupts() + // this GPU must be constrained to generating interrupts from the software interrupt + // pending bit only. + // + // If osWaitForInterrupt() is wired up, and we get here, and we are not in software interrupt mode + // then there is a re-entrancy problem that needs to be addressed in your flavor of the RM. + // + if (intAMode != INTERRUPT_TYPE_SOFTWARE) + { + NV_PRINTF(LEVEL_ERROR, + "INTR_EN_0_INTA_SOFTWARE was not set on gpuInstance: 0x%x\n", + pGpu->gpuInstance); + NV_ASSERT(0); + } + + intrClearStallSWIntr_HAL(pGpu, pIntr); + + NV_PRINTF(LEVEL_INFO, "Triggered for gpuInstance: 0x%x\n", + pGpu->gpuInstance); + + interrupt_triggered = 1; + if (serviced) *serviced = NV_TRUE; + + return NV_FALSE; +} + +NV_STATUS osSanityTestIsr( + OBJGPU *pGpu +) +{ + NvBool serviced = NV_FALSE; + + osWaitForInterrupt(pGpu, &serviced); + + return (serviced) ? NV_OK : NV_ERR_GENERIC; +} + +// +// IRQ routing verification is currently done on Windows and Linux +// For Windows this is done both during bootUp and resume whereas for linux +// this is done only during bootup. +// +static NV_STATUS _osVerifyInterrupts( + OBJGPU *pGpu +) +{ +#if !defined(NV_UNIX) || defined(NV_MODS) + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); +#endif + Intr *pIntr = GPU_GET_INTR(pGpu); + OBJGPU *pGpuSaved = pGpu; + NvU32 *pIntrEn0, *pIntrEn1; + MC_ENGINE_BITVECTOR intrMask; + MC_ENGINE_BITVECTOR engines; + NvU32 Bailout; + NvU32 gpuAttachCnt, gpuAttachMask, gpuInstance, i; + + // + // We're adding the PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY check since none of the + // support required to run this interrupt sanity test has been brought up + // yet for T234D SOC display. + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY)) + { + // + // Nothing to verify here for the time being + // + return NV_OK; + } + + NV_PRINTF(LEVEL_INFO, "called with gpuInstance: 0x%x\n", + pGpu->gpuInstance); + + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + pIntrEn0 = portMemAllocNonPaged(gpuAttachCnt * sizeof(NvU32)); + if (pIntrEn0 == NULL) + { + NV_PRINTF(LEVEL_ERROR, "pIntrEn0 portMemAllocNonPaged failed!\n"); + return NV_ERR_NO_MEMORY; + } + pIntrEn1 = portMemAllocNonPaged(gpuAttachCnt * sizeof(NvU32)); + if (pIntrEn1 == NULL) + { + NV_PRINTF(LEVEL_ERROR, "pIntrEn1 portMemAllocNonPaged failed!\n"); + portMemFree(pIntrEn0); + return NV_ERR_NO_MEMORY; + } + + // + // Normally, interrupts are disabled while we have the semaphore. However in locking + // models that support altIntr or intrMask, intrs may be left enabled. So we really + // need to disable intrs on all GPUs before running this test and switching over + // our ISR to the dummy one. + // + // In this case, we enable interrupts so we can catch a specifically triggered interrupt + // then we restore the initial state. this should be early enough that no other + // interrupts are triggered than the software one we explicitly throw. + // note that we only enable the software interrupt, so we won't get any unexpected hw interrupts + // + // Concerning HW interrupts while software interrupts + // are enabled. + // + // "Yes, if INTERRUPT_TYPE is set to SOFTWARE, all the other + // [hardware] interrupts can no longer assert. + // + // I'm surprised the manual doesn't have a #define for BOTH (0x3). + // That would allow both SW and HW interrupts to assert." + // + gpuInstance = i = 0; + while ((pGpu = gpumgrGetNextGpu(gpuAttachMask, &gpuInstance)) != NULL) + { + if (gpuIsGpuFullPower(pGpu) == NV_FALSE) + { + continue; + } + pIntr = GPU_GET_INTR(pGpu); + + // Save off the current state of the non stall interrupt tree + pIntrEn1[i] = intrGetNonStallEnable_HAL(pGpu, pIntr, NULL /* threadstate */); + + // Disable the non stall interrupt tree + intrDisableNonStall_HAL(pGpu, pIntr, NULL /* threadstate */); + + pIntr = GPU_GET_INTR(pGpu); + pIntrEn0[i] = intrGetIntrEnFromHw_HAL(pGpu, pIntr, NULL /* threadstate */); + intrSetIntrEnInHw_HAL(pGpu, pIntr, INTERRUPT_TYPE_DISABLED, NULL /* threadstate */); + i++; + } + pGpu = pGpuSaved; + pIntr = GPU_GET_INTR(pGpu); + + intrSetIntrEnInHw_HAL(pGpu, pIntr, INTERRUPT_TYPE_SOFTWARE, NULL /* threadstate */); + intrDisableStallSWIntr_HAL(pGpu, pIntr); + intrEnableStallSWIntr_HAL(pGpu, pIntr); + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING)) + { + // + // Save off the IntrMask and enable all engines to be reflected + // in PMC_INTR_0 + // + intrGetIntrMask_HAL(pGpu, pIntr, &intrMask, NULL /* threadstate */); + bitVectorSetAll(&engines); + intrSetIntrMask_HAL(pGpu, pIntr, &engines, NULL /* threadstate */); + } + + // + // Hook up our dummy ISR to catch the SW triggered test after + // we've set INTR_EN_0 to _SOFTWARE and enabled all intrs + // to be seen via the IntrMask + // + + pGpu->testIntr = NV_TRUE; + + interrupt_triggered = 0; + + intrSetStallSWIntr_HAL(pGpu, pIntr); + + Bailout = 0; + + while (!interrupt_triggered) + { +#if defined(NV_UNIX) && !defined(NV_MODS) + osDelay(50); + Bailout += 50 * 1000; +#else + tmrDelay(pTmr, 5 * 1000); + Bailout += 5; +#endif + if (Bailout > pGpu->timeoutData.defaultus) + break; + } + + // + // Message Signalled Interrupt (MSI) support + // This call checks if MSI is enabled and if it is, we need re-arm it. + // + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + kbifCheckAndRearmMSI(pGpu, pKernelBif); + + pGpu->testIntr = NV_FALSE; + + if (pIntr->getProperty(pIntr, PDB_PROP_INTR_USE_INTR_MASK_FOR_LOCKING)) + { + // Restore the IntrMask that we saved off + intrSetIntrMask_HAL(pGpu, pIntr, &intrMask, NULL /* threadstate */); + } + + // Disable the SW interrupt explicitly before restoring interrupt enables + intrDisableStallSWIntr_HAL(pGpu, pIntr); + + // Restore NV_PMC_INTR_EN_0 and INTR_EN_1 on all GPUs + gpuInstance = i = 0; + while ((pGpu = gpumgrGetNextGpu(gpuAttachMask, &gpuInstance)) != NULL) + { + if (gpuIsGpuFullPower(pGpu) == NV_FALSE) + { + continue; + } + pIntr = GPU_GET_INTR(pGpu); + + // Restore the non stall interrupt tree enable + intrRestoreNonStall_HAL(pGpu, pIntr, pIntrEn1[i], NULL /* threadState */); + + // Restore the stall interrupt tree enable + intrSetIntrEnInHw_HAL(pGpu, pIntr, pIntrEn0[i], NULL /* threadstate */); + i++; + } + + portMemFree(pIntrEn0); + portMemFree(pIntrEn1); + + NV_PRINTF(LEVEL_INFO, "Finishing with %d\n", interrupt_triggered); + + return interrupt_triggered ? NV_OK : NV_ERR_IRQ_NOT_FIRING; +} + +// +// add various system environment start-up tests here +// currently, just verify interrupt hookup, but could also verify other details +// +NV_STATUS osVerifySystemEnvironment( + OBJGPU *pGpu +) +{ + NV_STATUS status = NV_OK; + + // PCIE GEN4 fmodel MSI is broken as per bug 2076928 comment 42 and will not be fixed + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + if (IS_FMODEL(pGpu) && + pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_IS_FMODEL_MSI_BROKEN) && + kbifIsMSIEnabled(pGpu, pKernelBif)) + { + return NV_OK; + } + + status = _osVerifyInterrupts(pGpu); + + return status; +} + diff --git a/src/nvidia/src/kernel/os/os_stubs.c b/src/nvidia/src/kernel/os/os_stubs.c new file mode 100644 index 000000000..6d696a500 --- /dev/null +++ b/src/nvidia/src/kernel/os/os_stubs.c @@ -0,0 +1,672 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Routines ***************************\ +* * +* Module: os_stubs.c * +* Stubs for all the public stub routines * +\***************************************************************************/ + +#include "os/os_stub.h" + +// +// Here's a little debugging tool. It is possible that some code is stubbed for +// certain OS's that shouldn't be. In debug mode, the stubs below will dump out +// a stub 'number' to help you identify any stubs that are getting called. You +// can then evaluate whether or not that is correct. +// +// Highest used STUB_CHECK is 237. +// +#if defined(DEBUG) +#define STUB_CHECK(n) _stubCallCheck(n) + +int enableOsStubCallCheck = 0; + +static void _stubCallCheck(int funcNumber) +{ + if (enableOsStubCallCheck) { + NV_PRINTF(LEVEL_INFO, "STUB CALL: %d \r\n", funcNumber); + } +} + +#else +#define STUB_CHECK(n) +#endif // DEBUG + +struct OBJCL; + +void stubOsQADbgRegistryInit(OBJOS *pOS) +{ + STUB_CHECK(61); +} + +NvU32 stubOsnv_rdcr4(OBJOS *pOS) +{ + STUB_CHECK(76); + return 0; +} + +NvU64 stubOsnv_rdxcr0(OBJOS *pOs) +{ + STUB_CHECK(237); + return 0; +} + +int stubOsnv_cpuid(OBJOS *pOS, int arg1, int arg2, NvU32 *arg3, + NvU32 *arg4, NvU32 *arg5, NvU32 *arg6) +{ + STUB_CHECK(77); + return 0; +} + +NvU32 stubOsnv_rdmsr(OBJOS *pOS, NvU32 arg1, NvU32 *arg2, NvU32 *arg3) +{ + STUB_CHECK(122); + return 0; +} + +NvU32 stubOsnv_wrmsr(OBJOS *pOS, NvU32 arg1, NvU32 arg2, NvU32 arg3) +{ + STUB_CHECK(123); + return 0; +} + +NvU32 stubOsRobustChannelsDefaultState(OBJOS *pOS) +{ + STUB_CHECK(128); + return 0; +} + +NV_STATUS stubOsQueueWorkItem(OBJGPU *pGpu, OSWorkItemFunction pFunction, void * pParms) +{ + STUB_CHECK(180); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsQueueSystemWorkItem(OSSystemWorkItemFunction pFunction, void *pParms) +{ + STUB_CHECK(181); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsQueueWorkItemWithFlags(OBJGPU *pGpu, OSWorkItemFunction pFunction, void * pParms, NvU32 flags) +{ + STUB_CHECK(182); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsSimEscapeWrite(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, NvU32 Value) +{ + STUB_CHECK(195); + return NV_ERR_GENERIC; +} + +NV_STATUS stubOsSimEscapeWriteBuffer(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, void* pBuffer) +{ + STUB_CHECK(197); + return NV_ERR_GENERIC; +} + +NV_STATUS stubOsSimEscapeRead(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, NvU32 *Value) +{ + STUB_CHECK(196); + return NV_ERR_GENERIC; +} + +NV_STATUS stubOsSimEscapeReadBuffer(OBJGPU *pGpu, const char *path, NvU32 Index, NvU32 Size, void* pBuffer) +{ + STUB_CHECK(198); + return NV_ERR_GENERIC; +} + +NV_STATUS stubOsCallACPI_MXMX(OBJGPU *pGpu, NvU32 AcpiId, NvU8 *pInOut) +{ + STUB_CHECK(220); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_DDC(OBJGPU *pGpu, NvU32 ulAcpiId, NvU8 *pOut, NvU32 *size, NvBool bReadMultiBlock) +{ + STUB_CHECK(221); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_BCL(OBJGPU *pGpu, NvU32 acpiId, NvU32 *pOut, NvU16 *size) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_ON(OBJGPU *pGpu, NvU32 uAcpiId) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_OFF(OBJGPU *pGpu, NvU32 uAcpiId) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_NBPS(OBJGPU *pGpu, NvU8 *pOut, NvU32 *pOutSize) +{ + *pOutSize = 0; + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_NBSL(OBJGPU *pGpu, NvU32 val) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_OPTM_GPUON(OBJGPU *pGpu) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_GPUON(OBJGPU *pGpu, NvU32 *pInOut) +{ + //STUB_CHECK(225); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_GPUOFF(OBJGPU *pGpu, NvU32 *pInOut) +{ + //STUB_CHECK(226); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_GPUSTA(OBJGPU *pGpu, NvU32 *pInOut) +{ + //STUB_CHECK(227); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_MXDS(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + //STUB_CHECK(228); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_MXMX(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + //STUB_CHECK(229); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_DOS(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + //STUB_CHECK(230); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_ROM(OBJGPU *pGpu, NvU32 *pIn, NvU32 *pOut) +{ + //STUB_CHECK(231); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_NVHG_DCS(OBJGPU *pGpu, NvU32 AcpiId, NvU32 *pInOut) +{ + //STUB_CHECK(232); + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallWMI_DOD(OBJGPU *pGpu, NvU32 *pInOut, NvU32 *pOutSize) +{ + //STUB_CHECK(233); + return NV_ERR_NOT_SUPPORTED; +} + + +NV_STATUS stubOsCallACPI_DSM(OBJGPU *pGpu, ACPI_DSM_FUNCTION acpiDSMFunction, NvU32 NVHGDSMSubfunction, NvU32 *pInOut, NvU16 *size) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_MXDS(OBJGPU *pGpu, NvU32 ulAcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_MXDM(OBJGPU *pGpu, NvU32 ulAcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_MXID(OBJGPU *pGpu, NvU32 ulAcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS stubOsCallACPI_LRST(OBJGPU *pGpu, NvU32 ulAcpiId, NvU32 *pInOut) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool stubOsCheckCallback(OBJGPU *pGpu) +{ + return NV_FALSE; +} + +RC_CALLBACK_STATUS +stubOsRCCallback +( + OBJGPU *pGpu, + NvHandle hClient, // IN The client RC is on + NvHandle hDevice, // IN The device RC is on + NvHandle hFifo, // IN The channel or TSG RC is on + NvHandle hChannel, // IN The channel RC is on + NvU32 errorLevel, // IN Error Level + NvU32 errorType, // IN Error type + NvU32 *data, // IN/OUT context of RC handler + void *pfnRmRCReenablePusher +) +{ + return RC_CALLBACK_IGNORE; +} + +NV_STATUS stubOsSetupVBlank(OBJGPU *pGpu, void * pProc, + void * pParm1, void * pParm2, NvU32 Head, void * pParm3) +{ + return NV_OK; +} + +NV_STATUS stubOsObjectEventNotification(NvHandle hClient, NvHandle hObject, NvU32 hClass, PEVENTNOTIFICATION pNotifyEvent, + NvU32 notifyIndex, void * pEventData, NvU32 eventDataSize) +{ + return NV_ERR_NOT_SUPPORTED; +} + +RmPhysAddr +stubOsPageArrayGetPhysAddr(OS_GPU_INFO *pOsGpuInfo, void* pPageData, NvU32 pageIndex) +{ + NV_ASSERT(0); + return 0; +} + +void stubOsInternalReserveAllocCallback(NvU64 offset, NvU64 size, NvU32 gpuId) +{ + return; +} + +void stubOsInternalReserveFreeCallback(NvU64 offset, NvU32 gpuId) +{ + return; +} + + +NV_STATUS osVgpuAllocVmbusEventDpc(void **ppArg1) +{ + return NV_OK; +} + +void osVgpuScheduleVmbusEventDpc(void *pArg1, void *pArg2) +{ +} + +NV_STATUS osLockPageableDataSection(RM_PAGEABLE_SECTION *pSection) +{ + return NV_OK; +} + +NV_STATUS osUnlockPageableDataSection(RM_PAGEABLE_SECTION *pSection) +{ + return NV_OK; +} + +NV_STATUS osIsKernelBuffer(void *pArg1, NvU32 arg2) +{ + return NV_OK; +} + +NV_STATUS osMapViewToSection(OS_GPU_INFO *pArg1, + void *pSectionHandle, + void **ppAddress, + NvU64 actualSize, + NvU64 sectionOffset, + NvBool bIommuEnabled) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osUnmapViewFromSection(OS_GPU_INFO *pArg1, + void *pAddress, + NvBool bIommuEnabled) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osSrPinSysmem( + OS_GPU_INFO *pArg1, + NvU64 commitSize, + void *pMdl +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osSrUnpinSysmem(OS_GPU_INFO *pArg1) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osCreateMemFromOsDescriptorInternal( + OBJGPU *pGpu, + void *pAddress, + NvU32 flags, + NvU64 size, + MEMORY_DESCRIPTOR **ppMemDesc, + NvBool bCachedKernel, + RS_PRIV_LEVEL privilegeLevel +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osReserveCpuAddressSpaceUpperBound(void **ppSectionHandle, + NvU64 maxSectionSize) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void osReleaseCpuAddressSpaceUpperBound(void *pSectionHandle) +{ +} + +void osIoWriteDword( + NvU32 port, + NvU32 data +) +{ +} + +NvU32 osIoReadDword( + NvU32 port +) +{ + return 0; +} + +NvBool osIsVga( + OS_GPU_INFO *pArg1, + NvBool bIsGpuPrimaryDevice +) +{ + return bIsGpuPrimaryDevice; +} + +void osInitOSHwInfo( + OBJGPU *pGpu +) +{ +} + +void osDestroyOSHwInfo( + OBJGPU *pGpu +) +{ +} + +NV_STATUS osDoFunctionLevelReset( + OBJGPU *pGpu +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvBool osGrService( + OS_GPU_INFO *pOsGpuInfo, + NvU32 grIdx, + NvU32 intr, + NvU32 nstatus, + NvU32 addr, + NvU32 dataLo +) +{ + return NV_FALSE; +} + +NvBool osDispService( + NvU32 Intr0, + NvU32 Intr1 +) +{ + return NV_FALSE; +} + +NV_STATUS osDeferredIsr( + OBJGPU *pGpu +) +{ + return NV_OK; +} + +NV_STATUS osGetAcpiTable( + NvU32 tableSignature, + void **ppTable, + NvU32 tableSize, + NvU32 *retSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osInitGetAcpiTable(void) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvU32 osGetReleaseAssertBehavior(void) +{ + return 0; +} + +void osDbgBugCheckOnAssert(void) +{ + return; +} + +NvBool osQueueDpc(OBJGPU *pGpu) +{ + return NV_FALSE; +} + +NvBool osBugCheckOnTimeoutEnabled(void) +{ + return NV_FALSE; +} + +NV_STATUS osNvifMethod( + OBJGPU *pGpu, + NvU32 func, + NvU32 subFunc, + void *pInParam, + NvU16 inParamSize, + NvU32 *pOutStatus, + void *pOutData, + NvU16 *pOutDataSize +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS osNvifInitialize( + OBJGPU *pGpu +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +stubOsGetUefiVariable +( + OBJGPU *pGpu, + char *pName, + LPGUID pGuid, + NvU8 *pBuffer, + NvU32 *pSize, + NvU32 *pAttributes +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NvU32 osGetDynamicPowerSupportMask(void) +{ + return 0; +} + +void osUnrefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo) +{ + return; +} + +NV_STATUS osRefGpuAccessNeeded(OS_GPU_INFO *pOsGpuInfo) +{ + return NV_OK; +} + +void osClientGcoffDisallowRefcount( + OS_GPU_INFO *pArg1, + NvBool arg2 +) +{ +} + +#if !RMCFG_FEATURE_PLATFORM_DCE /* dce_core_rm_clk_reset.c */ && \ + (!RMCFG_FEATURE_PLATFORM_UNIX || !RMCFG_FEATURE_TEGRA_SOC_NVDISPLAY || \ + RMCFG_FEATURE_DCE_CLIENT_RM /* osSocNvDisp.c */ ) +NV_STATUS +osTegraSocEnableClk +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocDisableClk +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocGetCurrFreqKHz +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM, + NvU32 *pCurrFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocGetMaxFreqKHz +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM, + NvU32 *pMaxFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocGetMinFreqKHz +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM, + NvU32 *pMinFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocSetFreqKHz +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRM, + NvU32 reqFreqKHz +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocSetParent +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRMsource, + NvU32 whichClkRMparent +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocGetParent +( + OS_GPU_INFO *pOsGpuInfo, + NvU32 whichClkRMsource, + NvU32 *pWhichClkRMparent +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocDeviceReset +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocPmPowergate +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +osTegraSocPmUnpowergate +( + OS_GPU_INFO *pOsGpuInfo +) +{ + return NV_ERR_NOT_SUPPORTED; +} +#endif + diff --git a/src/nvidia/src/kernel/os/os_timer.c b/src/nvidia/src/kernel/os/os_timer.c new file mode 100644 index 000000000..db4dc9078 --- /dev/null +++ b/src/nvidia/src/kernel/os/os_timer.c @@ -0,0 +1,423 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2002-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief This file contains platform-independent code for the 1 Hz OS timer. + */ + +#include "objtmr.h" +#include "core/thread_state.h" +#include "core/locks.h" + +static NvBool _os1HzCallbackIsOnList(OBJTMR *pTmr, OS1HZPROC callback, void *pData, NvU32 flags); +static NV_STATUS _os1HzCallback(OBJGPU *pGpu, OBJTMR *pTmr, TMR_EVENT *pTmrEvent); + +/*! + * @brief Initialize 1Hz callbacks + * + * Initialize the 1Hz callback list and create a timer event + * (if using PTIMER events). + * + * @param[in,out] pTmr TMR object pointer + */ +NV_STATUS +osInit1HzCallbacks +( + OBJTMR *pTmr +) +{ + NvU32 i; + + // Initialize the OS 1 Hz timer callback list. + pTmr->pOs1HzCallbackList = NULL; + pTmr->pOs1HzCallbackFreeList = pTmr->os1HzCallbackTable; + + // Fill in all the forward pointers in the callback table. + for (i = 0; i < (TMR_NUM_CALLBACKS_OS - 1); i++) + { + pTmr->os1HzCallbackTable[i].next = &pTmr->os1HzCallbackTable[i+1]; + } + pTmr->os1HzCallbackTable[i].next = NULL; + + if (pTmr->getProperty(pTmr, PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS)) + { + NV_ASSERT_OK_OR_RETURN(tmrEventCreate(pTmr, &pTmr->pOs1HzEvent, + _os1HzCallback, NULL, TMR_FLAG_RECUR)); + } + + return NV_OK; +} + +/*! + * @brief Destroy 1Hz callbacks + * + * Destroy the 1Hz callback list and free the timer event + * (if using PTIMER events). + * + * @param[in,out] pTmr TMR object pointer + */ +NV_STATUS +osDestroy1HzCallbacks +( + OBJTMR *pTmr +) +{ + if (pTmr->pOs1HzEvent != NULL) + { + tmrEventCancel(pTmr, pTmr->pOs1HzEvent); + tmrEventDestroy(pTmr, pTmr->pOs1HzEvent); + pTmr->pOs1HzEvent = NULL; + } + + pTmr->pOs1HzCallbackList = NULL; + pTmr->pOs1HzCallbackFreeList = NULL; + return NV_OK; +} + +/*! + * @brief Timer function to insert 1Hz callback to the list. + * + * This function is used to insert/register the 1Hz callback to the callback list. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] callback OS1HZPROC callback function point + * @param[in] pData Unique identifier for the callback + * @param[in] flags Callback flags + * + * @return NV_OK The callback has been added + * @return NV_ERR_INVALID_REQUEST The callback has not been added + */ +NV_STATUS +osSchedule1SecondCallback +( + OBJGPU *pGpu, + OS1HZPROC callback, + void *pData, + NvU32 flags +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + OS1HZTIMERENTRY *pEntry; + NV_STATUS nvStatus = NV_OK; + + // Grab the next free timer entry. + if ((pTmr->pOs1HzCallbackFreeList != NULL) && + !_os1HzCallbackIsOnList(pTmr, callback, pData, flags)) + { + if ((pTmr->pOs1HzCallbackList == NULL) && (pTmr->pOs1HzEvent != NULL)) + { + // First one. Add 1Hz callback to timer events. + NV_ASSERT_OK(tmrEventScheduleRelSec(pTmr, pTmr->pOs1HzEvent, 1)); + } + + pEntry = pTmr->pOs1HzCallbackFreeList; + pTmr->pOs1HzCallbackFreeList = pEntry->next; + + pEntry->callback = callback; + pEntry->data = pData; + pEntry->flags = flags; + + pEntry->next = pTmr->pOs1HzCallbackList; + pTmr->pOs1HzCallbackList = pEntry; + } + else + { + NV_PRINTF(LEVEL_INFO, "Callback registration FAILED!\n"); + nvStatus = NV_ERR_INVALID_REQUEST; + } + + return nvStatus; +} + +/*! + * @brief Timer function to remove 1Hz callback from the list. + * + * This function is used to remove/unregister the 1Hz callback from + * the callback list. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] callback OS1HZPROC callback function point + * @param[in] pData Unique identifier for the callback + */ +void +osRemove1SecondRepeatingCallback +( + OBJGPU *pGpu, + OS1HZPROC callback, + void *pData +) +{ + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + OS1HZTIMERENTRY *pEntry; + OS1HZTIMERENTRY **ppEntryPtr; + + ppEntryPtr = &pTmr->pOs1HzCallbackList; + while ((*ppEntryPtr) != NULL) + { + pEntry = *ppEntryPtr; + if ((pEntry->callback == callback) && + (pEntry->data == pData) && + (pEntry->flags & NV_OS_1HZ_REPEAT)) + { + *ppEntryPtr = pEntry->next; + pEntry->next = pTmr->pOs1HzCallbackFreeList; + pEntry->data = NULL; + pEntry->callback = NULL; + pEntry->flags = NV_OS_1HZ_REPEAT; + pTmr->pOs1HzCallbackFreeList = pEntry; + break; + } + ppEntryPtr = &pEntry->next; + } + + if ((pTmr->pOs1HzCallbackList == NULL) && (pTmr->pOs1HzEvent != NULL)) + { + // Last one. Remove 1Hz callback from timer events. + tmrEventCancel(pTmr, pTmr->pOs1HzEvent); + } +} + +// +// Return Value(TRUE) is used by Vista to determine if we were able to acquire the lock +// If we cannot acquire the lock this means the API or ISR/DPC has it +// +NvBool +osRun1HzCallbacksNow +( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJTMR *pTmr = GPU_GET_TIMER(pGpu); + OS1HZTIMERENTRY **ppEntryPtr; + OS1HZPROC pProc; + THREAD_STATE_NODE threadState; + void *pData; + NvBool bAcquired = NV_TRUE; + GPU_MASK lockedGpus = 0; +#if !TLS_DPC_HAVE_UNIQUE_ID + NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; // ISR allocations come from this buffer + PORT_MEM_ALLOCATOR *pDpcAllocator; + pDpcAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator)); + tlsIsrInit(pDpcAllocator); +#endif + + // + // LOCK: + // + // What irql are we at here? Should we acquire the API lock in addition to + // or instead of the GPUs lock? + // + + // LOCK: try to acquire GPU lock + if (rmGpuGroupLockAcquire(pGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, + GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_TMR, + &lockedGpus) != NV_OK) + { + // Out of conflicting thread + bAcquired = NV_FALSE; + goto exit; + } + + if (osCondAcquireRmSema(pSys->pSema) != NV_OK) + { + // UNLOCK: release GPU lock + rmGpuGroupLockRelease(lockedGpus, GPUS_LOCK_FLAGS_NONE); + // Out of conflicting thread + bAcquired = NV_FALSE; + goto exit; + } + + threadStateInitISRAndDeferredIntHandler(&threadState, pGpu, + THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + + if (!gpuIsGpuFullPower(pGpu)) + { + goto exit; + } + + ppEntryPtr = &pTmr->pOs1HzCallbackList; + for (;;) + { + POS1HZTIMERENTRY entry; + + // Be paranoid. + entry = *ppEntryPtr; + + // End of list? + if (entry == NULL) + break; + + // Run the callback. + if (entry->callback != NULL) + { + pProc = entry->callback; + pData = entry->data; + pProc(pGpu, pData); + } + + // + // The proc call above can add new entries to the list. + // When new entries are added, they are added at the + // beginning of the list. That means that our *entryPtr + // might no longer point to our current entry. If that is + // the case, then we need to search the list again to find + // our entry. Or inside this code, we need to find the + // entryPtr over again. + // + if (entry != *ppEntryPtr) + { + POS1HZTIMERENTRY item; + + ppEntryPtr = &pTmr->pOs1HzCallbackList; + for (;;) + { + // Be paranoid. + item = *ppEntryPtr; + + // End of list? + if (item == NULL) + break; + + if (item == entry) + { + break; + } + + ppEntryPtr = &item->next; + } + + if (item != entry) + { + // + // The entry was removed from the list inside the proc. + // So, we don't need to do anything below. Use + // ppEntryPtr = NULL to indicate that for now. + // + ppEntryPtr = NULL; + } + + } + + // + // If this timer is supposed to repeat, leave it in place. + // Otherwise, move it to the free list. + // + if ( (ppEntryPtr != NULL) && + !(entry->flags & NV_OS_1HZ_REPEAT)) + { + *ppEntryPtr = entry->next; + entry->next = pTmr->pOs1HzCallbackFreeList; + pTmr->pOs1HzCallbackFreeList = entry; + } + else + { + ppEntryPtr = &entry->next; + } + } + +exit: + if (bAcquired) + { + // Out of conflicting thread + threadStateFreeISRAndDeferredIntHandler(&threadState, + pGpu, THREAD_STATE_FLAGS_IS_DEFERRED_INT_HANDLER); + osReleaseRmSema(pSys->pSema, NULL); + // UNLOCK: release GPU lock + rmGpuGroupLockRelease(lockedGpus, GPUS_LOCK_FLAGS_NONE); + } + +#if !TLS_DPC_HAVE_UNIQUE_ID + tlsIsrDestroy(pDpcAllocator); + portMemAllocatorRelease(pDpcAllocator); +#endif + + return bAcquired; +} + +/*! + * @brief Timer function to check the duplicate callback on the list. + * + * This function is used to check if there's any duplicate repeat callback has + * been registered to the list, walk through the list and find if there's any + * registered callback matched with flags NV_OS_1HZ_REPEAT. + * + * @param[in,out] pTmr TMR object pointer + * @param[in] callback OS1HZPROC callback function point + * @param[in] pData Unique identifier for the callback + * @param[in] flags Callback flags + * + * @return NV_TRUE The callback has been registered + * @return NV_FALSE The callback has not been registered + */ +static NvBool +_os1HzCallbackIsOnList +( + OBJTMR *pTmr, + OS1HZPROC callback, + void *pData, + NvU32 flags +) +{ + POS1HZTIMERENTRY pScan; + + for (pScan = pTmr->pOs1HzCallbackList; pScan != NULL; pScan = pScan->next) + { + if ((pScan->callback == callback) && + (pScan->data == pData) && + (pScan->flags & NV_OS_1HZ_REPEAT)) + { + break; + } + } + + return pScan != NULL; +} + +/*! + * @brief Os 1Hz callback function + * + * Calls all callbacks on the 1Hz list and reschedules callback + * (if using PTIMER events). + * + * @param[in,out] pGpu GPU object pointer + * @param[in,out] pTmr TMR object pointer + * @param[in] pTmrEvent pointer to the timer event + * + * @return NV_OK The callback was rescheduled successfully. + * @return NV_ERR_INVALID_ARGUMENT The callback was not rescheduled. + */ +static NV_STATUS +_os1HzCallback +( + OBJGPU *pGpu, + OBJTMR *pTmr, + TMR_EVENT *pTmrEvent +) +{ + osRun1HzCallbacksNow(pGpu); + + // TMR_FLAG_RECUR does not work, so reschedule it here. + return tmrEventScheduleRelSec(pTmr, pTmrEvent, 1); +} diff --git a/src/nvidia/src/kernel/platform/chipset/chipset.c b/src/nvidia/src/kernel/platform/chipset/chipset.c new file mode 100644 index 000000000..e1b38bc52 --- /dev/null +++ b/src/nvidia/src/kernel/platform/chipset/chipset.c @@ -0,0 +1,794 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** HW State Rotuines ***************************\ +* Core Logic Object Function Definitions. * +\***************************************************************************/ + +#include "core/system.h" +#include "platform/chipset/chipset.h" +#include "platform/platform.h" +#include "platform/chipset/chipset_info.h" +#include "os/os.h" +#include "nvRmReg.h" +#include "nvpcie.h" +#include "nv_ref.h" +#include "kernel/gpu/bif/kernel_bif.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gsp/gsp_static_config.h" +#include "ctrl/ctrl2080/ctrl2080bus.h" + +// local static funcs +static void clDestructHWBC(OBJHWBC *pHWBC); + +NV_STATUS +clConstruct_IMPL(OBJCL *pCl) +{ + // Used to track when the link has gone into Recovery, which can cause CEs. + pCl->EnteredRecoverySinceErrorsLastChecked = NV_FALSE; + + pCl->pPcieConfigSpaceBase = NULL; + + // + // We set this property by default. + // Chipset setup function can override this. + // Right now only Tegra chipsets overide this setting. + // + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + + return NV_OK; +} + + +void +clInitPropertiesFromRegistry_IMPL(OBJGPU *pGpu, OBJCL *pCl) +{ + NvU32 data32; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_DISABLE_BR03_FLOW_CONTROL, &data32) == NV_OK + && data32) + { + pCl->setProperty(pCl, PDB_PROP_CL_DISABLE_BR03_FLOW_CONTROL, NV_TRUE); + } + + if (osReadRegistryDword(pGpu, NV_REG_STR_RM_FORCE_ENABLE_GEN2, &data32) == NV_OK) + { + if (NV_REG_STR_RM_FORCE_ENABLE_GEN2_YES == data32) + { + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_FORCE_GEN2_ENABLE, NV_TRUE); + } + } + + pOS->osQADbgRegistryInit(pOS); +} + +static void +clDestructHWBC(OBJHWBC *pHWBC) +{ + if (pHWBC->pSibling) + { + clDestructHWBC(pHWBC->pSibling); + pHWBC->pSibling = NULL; + } + if (pHWBC->pFirstChild) + { + clDestructHWBC(pHWBC->pFirstChild); + pHWBC->pFirstChild = NULL; + } + + portMemFree(pHWBC); +} + +void +clDestruct_IMPL(OBJCL *pCl) +{ + if (pCl->pHWBC) + { + clDestructHWBC(pCl->pHWBC); + pCl->pHWBC = NULL; + } + + clFreeBusTopologyCache(pCl); + + clFreePcieConfigSpaceBase(pCl); +} + +// +// Find our NV device on the PCI bus and save it's pci bus/device address. +// +NvU32 +clInitMappingPciBusDevice_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + void *handle; + NvU32 domain; + NvU16 bus; + NvU8 device; + NvU16 vendorID, deviceID; + NvBool bFoundDevice = NV_FALSE; + + // do we already know our domain/bus/device? + if (gpuGetDBDF(pGpu) == 0) + { + // we're checking all the device/funcs for the first 10 buses! + // Note that we give up the enumeration once we find our first + // or in the Multichip case our second device. + for (domain = 0; (domain < PCI_MAX_DOMAINS) && !bFoundDevice; domain++) + { + for (bus = 0; (bus < PCI_MAX_BUSES) && !bFoundDevice; bus++) + { + for (device = 0; device < PCI_MAX_DEVICES; device++) + { + // read at domain, bus, device (we're always function 0) + handle = osPciInitHandle(domain, (NvU8) bus, device, 0, &vendorID, &deviceID); + if (!handle) + continue; + + if (vendorID != NV_CONFIG_PCI_NV_0_VENDOR_ID_NVIDIA) + continue; + + // make sure we're a VGA device class + if ((osPciReadByte(handle, 0xB)) != PCI_CLASS_DISPLAY_DEV) + continue; + + // if the BAR0 matches our PhysAddr, it's the correct device + if ((osPciReadDword(handle, PCI_BASE_ADDRESS_0)) != + pGpu->busInfo.gpuPhysAddr) + continue; + + // save our domain/bus/device/function + pGpu->busInfo.nvDomainBusDeviceFunc = gpuEncodeDomainBusDevice(domain, (NvU8)bus, device); + + bFoundDevice = NV_TRUE; + + if (!(IS_SIMULATION(pGpu) || IS_SIM_MODS(GPU_GET_OS(pGpu)))) + { + NV_ASSERT(gpuGetDBDF(pGpu) != 0); + } + // On the HP "Wilson's Peak"/McKinley system + // the graphics is located at + // domain==0, bus==0, device==0. + // Why should this be invalid? + // In simulation, the fmodel can put this at bus==0, device==0 + break; + } + } + } + } + + domain = gpuGetDomain(pGpu); + bus = gpuGetBus(pGpu); + device = gpuGetDevice(pGpu); + + if (gpuGetDBDF(pGpu) == 0) + { + if (!(IS_SIMULATION(pGpu)|| IS_SIM_MODS(GPU_GET_OS(pGpu))) + || (bFoundDevice == NV_FALSE)) + { + NV_PRINTF(LEVEL_ERROR, + "NVRM initMappingPciBusDevice: can't find a device!\n"); + return NV0000_CTRL_GPU_INVALID_ID; // couldn't find it + } + } + + return gpuGenerate32BitId(domain, (NvU8)bus, device); +} + +// +// Walk the PCIE Capabilities and if the subsystem ID is found then return +// the subvendorID and subdeviceID +// +static void getSubsystemFromPCIECapabilities +( + NvU32 domain, + NvU8 bus, + NvU8 device, + NvU8 func, + NvU16 *subvendorID, + NvU16 *subdeviceID +) +{ + void *handle; + NvU32 PCIECapPtr; + NvU32 PCIECap; + NvU32 PCIECapNext; + + handle = osPciInitHandle(domain, bus, device, func, NULL, NULL); + + // We start from Cap. List and search for Subsystem ID Capability + PCIECapNext = osPciReadByte(handle, PCI_CAPABILITY_LIST); + if (PCIECapNext) + { + do + { + PCIECapPtr = PCIECapNext; + PCIECap = osPciReadDword(handle, CL_PCIE_CAP - CL_PCIE_BEGIN + PCIECapPtr); + PCIECapNext = (NvU8)((PCIECap >> 8) & 0xFF); + } while ((PCIECapNext != 0) && + ((PCIECap & CAP_ID_MASK) != CAP_ID_SUBSYSTEM_ID)); + + if ((PCIECap & CAP_ID_MASK) == CAP_ID_SUBSYSTEM_ID) + { + if (subvendorID) + { + *subvendorID = osPciReadWord(handle, CL_PCIE_CAP - CL_PCIE_BEGIN + PCIECapPtr + 4); + } + if (subdeviceID) + { + *subdeviceID = osPciReadWord(handle, CL_PCIE_CAP - CL_PCIE_BEGIN + PCIECapPtr + 6); + } + } + } +} + +// +// PCI Express Support +// Find first host bridge's domain, bus, device, function, if not already found +// + +NV_STATUS +clFindFHBAndGetChipsetInfoIndex_IMPL +( + OBJCL *pCl, + NvU16 *pChipsetInfoIndex +) +{ + void *handle; + NvU32 domain; + NvU8 revisionID = 0; + NvU32 i; + NvBool matchFound = NV_FALSE; + NvU16 bus; + NvU8 device, func; + NvU16 vendorID, deviceID, subvendorID, subdeviceID; + PBUSTOPOLOGYINFO pBusTopologyInfo; + // + // PC motherboards have a host bridge to connect PCIE root complex to rest of the system. + // However, Tegra devices only have a PCI-to-PCI bridge. + // So allow Tegra chipset initialization, even if a host bridge is not found. + // See bug 1547160 comment#17 for more details. + // + NvU16 pciSubBaseClass[2] = {PCI_COMMON_CLASS_SUBBASECLASS_HOST, PCI_COMMON_CLASS_SUBBASECLASS_P2P}; + + // return it, if we've got it already + if (pCl->chipsetIDBusAddr.valid) + return NV_OK; + + // Initialize to 0 + pCl->chipsetIDBusAddr.domain = 0x0; + pCl->chipsetIDBusAddr.bus = 0x0; + pCl->chipsetIDBusAddr.device = 0x0; + pCl->chipsetIDBusAddr.func = 0x0; + pCl->chipsetIDBusAddr.valid = 0x0; + pCl->chipsetIDBusAddr.handle = NULL; + pCl->chipsetIDInfo.deviceID = PCI_INVALID_DEVICEID; + pCl->chipsetIDInfo.vendorID = PCI_INVALID_VENDORID; + pCl->chipsetIDInfo.subdeviceID = PCI_INVALID_SUBDEVICEID; + pCl->chipsetIDInfo.subvendorID = PCI_INVALID_SUBVENDORID; + + for (i = 0; chipsetInfo[i].vendorID; i++) + { + pBusTopologyInfo = pCl->pBusTopologyInfo; + while (pBusTopologyInfo) + { + if ((pBusTopologyInfo->busInfo.vendorID == chipsetInfo[i].vendorID) && + (pBusTopologyInfo->busInfo.deviceID == chipsetInfo[i].deviceID)) + { + matchFound = NV_TRUE; + break; + } + pBusTopologyInfo = pBusTopologyInfo->next; + } + + if (matchFound) + { + if (pChipsetInfoIndex != NULL) + { + *pChipsetInfoIndex = (NvU16) i; + } + + // + // IBM Wildhorse system has NV chipset attached to secondary K8 at bus 0x80 + // (bug 227308). + // Do not change the algorithm for older chipsets where the devcie at bus%0x40 ==0, 0, 0 is + // considered as a host bridge. + // + if (((pBusTopologyInfo->pciSubBaseClass & 0xFF) == PCI_SUBCLASS_BR_HOST) || + (!(pBusTopologyInfo->bus % 0x40) && !pBusTopologyInfo->device && !pBusTopologyInfo->func)) + { + pCl->FHBAddr.domain = pBusTopologyInfo->domain; + pCl->FHBAddr.bus = pBusTopologyInfo->bus; + pCl->FHBAddr.device = pBusTopologyInfo->device; + pCl->FHBAddr.func = pBusTopologyInfo->func; + pCl->FHBAddr.valid = 0x1; + pCl->FHBAddr.handle = pBusTopologyInfo->handle; + + // Store a copy of deviceID, vendorID, subdeviceID and subvendorID; + pCl->FHBBusInfo.deviceID = pBusTopologyInfo->busInfo.deviceID; + pCl->FHBBusInfo.vendorID = pBusTopologyInfo->busInfo.vendorID; + pCl->FHBBusInfo.subdeviceID = pBusTopologyInfo->busInfo.subdeviceID; + pCl->FHBBusInfo.subvendorID = pBusTopologyInfo->busInfo.subvendorID; + pCl->FHBBusInfo.revisionID = pBusTopologyInfo->busInfo.revisionID; + + pCl->chipsetIDBusAddr.domain = pBusTopologyInfo->domain; + pCl->chipsetIDBusAddr.bus = pBusTopologyInfo->bus; + pCl->chipsetIDBusAddr.device = pBusTopologyInfo->device; + pCl->chipsetIDBusAddr.func = pBusTopologyInfo->func; + pCl->chipsetIDBusAddr.valid = 0x1; + pCl->chipsetIDBusAddr.handle = pBusTopologyInfo->handle; + + // Store a copy of deviceID, vendorID, subdeviceID and subvendorID; + pCl->chipsetIDInfo.deviceID = pBusTopologyInfo->busInfo.deviceID; + pCl->chipsetIDInfo.vendorID = pBusTopologyInfo->busInfo.vendorID; + pCl->chipsetIDInfo.subdeviceID = pBusTopologyInfo->busInfo.subdeviceID; + pCl->chipsetIDInfo.subvendorID = pBusTopologyInfo->busInfo.subvendorID; + return NV_OK; + } + else + { + pCl->chipsetIDBusAddr.domain = pBusTopologyInfo->domain; + pCl->chipsetIDBusAddr.bus = pBusTopologyInfo->bus; + pCl->chipsetIDBusAddr.device = pBusTopologyInfo->device; + pCl->chipsetIDBusAddr.func = pBusTopologyInfo->func; + pCl->chipsetIDBusAddr.valid = 0x1; + pCl->chipsetIDBusAddr.handle = pBusTopologyInfo->handle; + + // Store a copy of deviceID, vendorID, subdeviceID and subvendorID; + pCl->chipsetIDInfo.deviceID = pBusTopologyInfo->busInfo.deviceID; + pCl->chipsetIDInfo.vendorID = pBusTopologyInfo->busInfo.vendorID; + pCl->chipsetIDInfo.subdeviceID = pBusTopologyInfo->busInfo.subdeviceID; + pCl->chipsetIDInfo.subvendorID = pBusTopologyInfo->busInfo.subvendorID; + + if (pCl->chipsetIDInfo.subvendorID == 0) + { + getSubsystemFromPCIECapabilities(pCl->chipsetIDBusAddr.domain, + pCl->chipsetIDBusAddr.bus, + pCl->chipsetIDBusAddr.device, + pCl->chipsetIDBusAddr.func, + &pCl->chipsetIDInfo.subvendorID, + &pCl->chipsetIDInfo.subdeviceID); + } + break; + } + } + } + + if ((!matchFound) && (pChipsetInfoIndex != NULL)) + { + // This should be the entry with NULL information + NV_ASSERT(chipsetInfo[i].vendorID == 0); + *pChipsetInfoIndex = (NvU16) i; + } + + // + // We are here because VendorId and deviceId in chipsetInfo table does not + // match with Host Bridge ID. In that case we need to find FHB either in + // cached bus topology or need to loop through PCI bus to find the FHB. + // + for (i = 0; i < 2; i++) + { + pBusTopologyInfo = pCl->pBusTopologyInfo; + while (pBusTopologyInfo) + { + if (pBusTopologyInfo->pciSubBaseClass == pciSubBaseClass[i]) + { + pCl->FHBAddr.domain = pBusTopologyInfo->domain; + pCl->FHBAddr.bus = pBusTopologyInfo->bus; + pCl->FHBAddr.device = pBusTopologyInfo->device; + pCl->FHBAddr.func = pBusTopologyInfo->func; + pCl->FHBAddr.valid = 0x1; + pCl->FHBAddr.handle = pBusTopologyInfo->handle; + + // Store a copy of deviceID, vendorID, subdeviceID and subvendorID; + pCl->FHBBusInfo.deviceID = pBusTopologyInfo->busInfo.deviceID; + pCl->FHBBusInfo.vendorID = pBusTopologyInfo->busInfo.vendorID; + pCl->FHBBusInfo.subdeviceID = pBusTopologyInfo->busInfo.subdeviceID; + pCl->FHBBusInfo.subvendorID = pBusTopologyInfo->busInfo.subvendorID; + pCl->FHBBusInfo.revisionID = pBusTopologyInfo->busInfo.revisionID; + + if (!matchFound) + { + pCl->chipsetIDBusAddr.domain = pBusTopologyInfo->domain; + pCl->chipsetIDBusAddr.bus = pBusTopologyInfo->bus; + pCl->chipsetIDBusAddr.device = pBusTopologyInfo->device; + pCl->chipsetIDBusAddr.func = pBusTopologyInfo->func; + pCl->chipsetIDBusAddr.valid = 0x1; + pCl->chipsetIDBusAddr.handle = pBusTopologyInfo->handle; + + // Store a copy of deviceID, vendorID, subdeviceID and subvendorID; + pCl->chipsetIDInfo.deviceID = pBusTopologyInfo->busInfo.deviceID; + pCl->chipsetIDInfo.vendorID = pBusTopologyInfo->busInfo.vendorID; + pCl->chipsetIDInfo.subdeviceID = pBusTopologyInfo->busInfo.subdeviceID; + pCl->chipsetIDInfo.subvendorID = pBusTopologyInfo->busInfo.subvendorID; + + if (pCl->chipsetIDInfo.subvendorID == 0) + { + getSubsystemFromPCIECapabilities(pCl->chipsetIDBusAddr.domain, + pCl->chipsetIDBusAddr.bus, + pCl->chipsetIDBusAddr.device, + pCl->chipsetIDBusAddr.func, + &pCl->chipsetIDInfo.subvendorID, + &pCl->chipsetIDInfo.subdeviceID); + } + } + return NV_OK; + } + pBusTopologyInfo = pBusTopologyInfo->next; + } + + NV_PRINTF(LEVEL_INFO, + "NVRM : Host bridge device not found. Looking for a PCI-to-PCI bridge device!!!\n"); + } + + NV_PRINTF(LEVEL_ERROR, + "NVRM : This is Bad. FHB not found in cached bus topology!!!\n"); + + // HB is not present in cached bus topology. + NV_ASSERT(0); + + // + // Don't bother scanning all domains, which takes approximately forever. + // If we can't find it in domain 0, we're probably not going to anyway + // (and something is already wrong). + // + domain = 0; + + for (bus = 0; bus < PCI_MAX_BUSES; bus++) + { + for (device = 0; device < PCI_MAX_DEVICES; device++) + { + for (func = 0; func < PCI_MAX_FUNCTION; func++) + { + // read at domain, bus, device, func + handle = osPciInitHandle(domain, (NvU8)bus, device, func, &vendorID, &deviceID); + if (!handle) + { + if (func == 0) + { + // If a read to function zero of a specified bus/device master aborts, + // then it is assumed that no such device exists on the bus since + // devices are required to implement function number zero. + // In this case reads to the remaining functions are not necessary. + break; + } + else + { + continue; + } + } + + if (vendorID == PCI_INVALID_VENDORID) + break; // skip to the next device + + if ((osPciReadByte(handle, PCI_HEADER_TYPE0_BASECLASS)) != PCI_CLASS_BRIDGE_DEV) + break; // not a bridge device + + if ((osPciReadByte(handle, PCI_HEADER_TYPE0_SUBCLASS)) != PCI_SUBCLASS_BR_HOST) + break; // not a host bridge + + subdeviceID = osPciReadWord(handle, PCI_HEADER_TYPE0_SUBSYS_ID); + subvendorID = osPciReadWord(handle, PCI_HEADER_TYPE0_SUBSYS_VEN_ID); + revisionID = osPciReadByte(handle, PCI_HEADER_TYPE0_REVISION_ID); + + // Found it + pCl->FHBAddr.domain = domain; + pCl->FHBAddr.bus = (NvU8)bus; + pCl->FHBAddr.device = device; + pCl->FHBAddr.func = func; + pCl->FHBAddr.valid = 0x1; + pCl->FHBAddr.handle = handle; + + // Store a copy of deviceID, vendorID, subdeviceID and subvendorID; + pCl->FHBBusInfo.deviceID = deviceID; + pCl->FHBBusInfo.vendorID = vendorID; + pCl->FHBBusInfo.subdeviceID = subdeviceID; + pCl->FHBBusInfo.subvendorID = subvendorID; + pCl->FHBBusInfo.revisionID = revisionID; + + if (!matchFound) + { + pCl->chipsetIDBusAddr.domain = domain; + pCl->chipsetIDBusAddr.bus = (NvU8)bus; + pCl->chipsetIDBusAddr.device = device; + pCl->chipsetIDBusAddr.func = func; + pCl->chipsetIDBusAddr.valid = 0x1; + pCl->chipsetIDBusAddr.handle = handle; + + // Store a copy of deviceID, vendorID, subdeviceID and subvendorID; + pCl->chipsetIDInfo.deviceID = deviceID; + pCl->chipsetIDInfo.vendorID = vendorID; + pCl->chipsetIDInfo.subdeviceID = subdeviceID; + pCl->chipsetIDInfo.subvendorID = subvendorID; + } + return NV_OK; + } + } + } + + // This is bad, we didn't find the First Host Bridge device (assume domain0/bus0/device0/func0) + pCl->FHBAddr.domain = 0x0; + pCl->FHBAddr.bus = 0x0; + pCl->FHBAddr.device = 0x0; + pCl->FHBAddr.func = 0x0; + pCl->FHBAddr.valid = 0x1; + pCl->FHBAddr.handle = NULL; + pCl->FHBBusInfo.deviceID = PCI_INVALID_DEVICEID; + pCl->FHBBusInfo.vendorID = PCI_INVALID_VENDORID; + pCl->FHBBusInfo.subdeviceID = PCI_INVALID_SUBDEVICEID; + pCl->FHBBusInfo.subvendorID = PCI_INVALID_SUBVENDORID; + + DBG_BREAKPOINT(); + + NV_ASSERT(0); //We can't find a host bridge, bad! + + + return NV_ERR_NOT_SUPPORTED; +} + +NvBool +clIsL1MaskEnabledForUpstreamPort_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + NvU32 linkCtrl; + NvBool bEnable = NV_FALSE; + + if (!pGpu->gpuClData.upstreamPort.addr.valid) + { + if (!pGpu->gpuClData.rootPort.addr.valid) + { + bEnable = NV_TRUE; + } + else + { + if (clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, + CL_PCIE_LINK_CTRL_STATUS, &linkCtrl) != NV_OK) + { + bEnable = NV_TRUE; + } + else + { + if (!(linkCtrl & CL_PCIE_LINK_CTRL_STATUS_ASPM_L1_BIT)) + { + bEnable = NV_TRUE; + } + } + } + } + else + { + if (clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.upstreamPort, + CL_PCIE_LINK_CTRL_STATUS, &linkCtrl) != NV_OK) + { + bEnable = NV_TRUE; + } + else + { + if (!(linkCtrl & CL_PCIE_LINK_CTRL_STATUS_ASPM_L1_BIT)) + { + bEnable = NV_TRUE; + } + } + } + + return bEnable; +} + +// +// return the First Host Bridge's handle, VendorID and DeviceID +// +NV_STATUS +clGetFHBHandle_IMPL( + OBJCL *pCl, + void **Handle, + NvU16 *VendorID, + NvU16 *DeviceID +) +{ + NV_ASSERT(Handle && DeviceID && VendorID); // Avoid Null Pointer + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + + *Handle = pCl->FHBAddr.handle; + *DeviceID = pCl->FHBBusInfo.deviceID; + *VendorID = pCl->FHBBusInfo.vendorID; + + // can this happen, should this be #if 0 out? + if (*Handle == NULL) + { + *Handle = osPciInitHandle(pCl->FHBAddr.domain, + pCl->FHBAddr.bus, + pCl->FHBAddr.device, + pCl->FHBAddr.func, + VendorID, + DeviceID); + } + + return (*Handle ? NV_OK : NV_ERR_GENERIC); +} + + +NV_STATUS +clInit_IMPL( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + // + // Common code for all buses + // + (void)clInitMappingPciBusDevice(pGpu, pCl); + + if (kbifGetBusIntfType_HAL(GPU_GET_KERNEL_BIF(pGpu)) == + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) + { + return clInitPcie(pGpu, pCl); + } + + return NV_OK; // no Init is needed for PCI +} + +void +clUpdateConfig_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + // Common code for all buses + clInitMappingPciBusDevice(pGpu, pCl); + + if (kbifGetBusIntfType_HAL(GPU_GET_KERNEL_BIF(pGpu)) == + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) + { + clUpdatePcieConfig(pGpu, pCl); + return; + } + + return; // no Update is needed for PCI +} + +NV_STATUS +clTeardown_IMPL( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + + if (pKernelBif == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + clFreeBusTopologyCache(pCl); + + switch (kbifGetBusIntfType_HAL(pKernelBif)) + { + case NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS: + return clTeardownPcie(pGpu, pCl); + + case NV2080_CTRL_BUS_INFO_TYPE_PCI: + case NV2080_CTRL_BUS_INFO_TYPE_FPCI: + return NV_OK; + + default: + return NV_ERR_GENERIC; + } +} + +NV_STATUS +subdeviceCtrlCmdBusGetBFD_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_BUS_GET_BFD_PARAMSARR *pBusGetBFDParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + BUSTOPOLOGYINFO *pBusTopologyInfo = pCl->pBusTopologyInfo; + NvU32 i = 0; + + while(pBusTopologyInfo && i < 32) + { + pBusGetBFDParams->params[i].valid = NV_TRUE; + pBusGetBFDParams->params[i].deviceID = pBusTopologyInfo->busInfo.deviceID; + pBusGetBFDParams->params[i].vendorID = pBusTopologyInfo->busInfo.vendorID; + pBusGetBFDParams->params[i].domain = pBusTopologyInfo->domain; + pBusGetBFDParams->params[i].bus = (NvU16)pBusTopologyInfo->bus; + pBusGetBFDParams->params[i].device = (NvU16)pBusTopologyInfo->device; + pBusGetBFDParams->params[i].function = (NvU8)pBusTopologyInfo->func; + i++; + pBusTopologyInfo = pBusTopologyInfo->next; + } + if(i < 32) + { + pBusGetBFDParams->params[i].valid = NV_FALSE; + } + + pBusTopologyInfo = pCl->pBusTopologyInfo; + return NV_OK; +} + +void clSyncWithGsp_IMPL(OBJCL *pCl, GspSystemInfo *pGSI) +{ + NvU32 idx = 0; +#define CL_SYNC_PDB(prop) \ + do { \ + pGSI->clPdbProperties |= pCl->getProperty(pCl, prop) ? NVBIT64(idx) : 0;\ + idx++; \ + } while (0) + + CL_SYNC_PDB(PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE); + CL_SYNC_PDB(PDB_PROP_CL_FORCE_SNOOP_READS_AND_WRITES_WAR_BUG_410390); + CL_SYNC_PDB(PDB_PROP_CL_DISABLE_BR03_FLOW_CONTROL); + CL_SYNC_PDB(PDB_PROP_CL_ASLM_SUPPORTS_NV_LINK_UPGRADE); + CL_SYNC_PDB(PDB_PROP_CL_ASLM_SUPPORTS_FAST_LINK_UPGRADE); + CL_SYNC_PDB(PDB_PROP_CL_ASLM_SUPPORTS_HOT_RESET); + CL_SYNC_PDB(PDB_PROP_CL_ASLM_SUPPORTS_GEN2_LINK_UPGRADE); + CL_SYNC_PDB(PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST); + CL_SYNC_PDB(PDB_PROP_CL_ASPM_L0S_CHIPSET_DISABLED); + CL_SYNC_PDB(PDB_PROP_CL_ASPM_L1_CHIPSET_DISABLED); + CL_SYNC_PDB(PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY); + CL_SYNC_PDB(PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY); + CL_SYNC_PDB(PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED); + CL_SYNC_PDB(PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED_GEFORCE); + CL_SYNC_PDB(PDB_PROP_CL_EXTENDED_TAG_FIELD_NOT_CAPABLE); + CL_SYNC_PDB(PDB_PROP_CL_NOSNOOP_NOT_CAPABLE); + CL_SYNC_PDB(PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE); + CL_SYNC_PDB(PDB_PROP_CL_PCIE_FORCE_GEN2_ENABLE); + CL_SYNC_PDB(PDB_PROP_CL_PCIE_GEN2_AT_LESS_THAN_X16_DISABLED); + CL_SYNC_PDB(PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR); + CL_SYNC_PDB(PDB_PROP_CL_INTEL_CPU_ROOTPORT1_NEEDS_H57_WAR); + CL_SYNC_PDB(PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ); + CL_SYNC_PDB(PDB_PROP_CL_ON_PCIE_GEN3_PATSBURG); + CL_SYNC_PDB(PDB_PROP_CL_ALLOW_PCIE_GEN3_ON_PATSBURG_WITH_IVBE_CPU); + CL_SYNC_PDB(PDB_PROP_CL_BUG_999673_P2P_ARBITRARY_SPLIT_WAR); + CL_SYNC_PDB(PDB_PROP_CL_UPSTREAM_LTR_SUPPORTED); + CL_SYNC_PDB(PDB_PROP_CL_BUG_1340801_DISABLE_GEN3_ON_GIGABYTE_SNIPER_3); + CL_SYNC_PDB(PDB_PROP_CL_BUG_1681803_WAR_DISABLE_MSCG); + CL_SYNC_PDB(PDB_PROP_CL_ON_HASWELL_HOST_BRIDGE); + CL_SYNC_PDB(PDB_PROP_CL_PCIE_NON_COHERENT_USE_TC0_ONLY); + CL_SYNC_PDB(PDB_PROP_CL_UNSUPPORTED_CHIPSET); + CL_SYNC_PDB(PDB_PROP_CL_IS_CHIPSET_IO_COHERENT); + CL_SYNC_PDB(PDB_PROP_CL_DISABLE_IOMAP_WC); + CL_SYNC_PDB(PDB_PROP_CL_HAS_RESIZABLE_BAR_ISSUE); + CL_SYNC_PDB(PDB_PROP_CL_IS_EXTERNAL_GPU); + +#undef CL_SYNC_PDB + + NV_ASSERT(idx < (sizeof(pGSI->clPdbProperties) * 8)); + + pGSI->Chipset = pCl->Chipset; + pGSI->FHBBusInfo = pCl->FHBBusInfo; + +} diff --git a/src/nvidia/src/kernel/platform/chipset/chipset_info.c b/src/nvidia/src/kernel/platform/chipset/chipset_info.c new file mode 100644 index 000000000..8fcdccb0d --- /dev/null +++ b/src/nvidia/src/kernel/platform/chipset/chipset_info.c @@ -0,0 +1,1369 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/**************** Resource Manager Defines and Structures ******************\ +* * +* Chipset and Root Port information * +* * +\***************************************************************************/ + +#include "platform/chipset/chipset_info.h" +#include "platform/chipset/chipset.h" +#include "core/system.h" +#include "os/os.h" +#include "nvpcie.h" + +#include "nvcst.h" + +// +// X48 & X38 share the same chipset ID, differentiate between two, we need to look at the +// capability Identifier register bit 89 +// +#define PCIE_CHIPSET_CAPABILITY_ID_OFFSET_X48 0xE0 // PCIE chipset capability ID offset +#define PCIE_CHIPSET_DETECT_OFFSET 0x8 // PCIE chipset identifier +#define PCIE_CHIPSET_DETECT_BIT 0x19 // Chipset detect bit + +// Used to check if chipset is X38 or X48 +#define IS_CHIPSET_X38(n) (!((n) & NVBIT(PCIE_CHIPSET_DETECT_BIT))) + +static NV_STATUS Intel_Core_Nehalem_Processor_setupFunc(OBJCL *pCl); +static NV_STATUS Intel_Huron_River_setupFunc(OBJCL *pCl); +void _Set_ASPM_L0S_L1(OBJCL *pCl, NvBool bDisableL0S, NvBool bDisableL1); + +RPINFO rootPortInfo[] = +{ + {PCI_VENDOR_ID_BROADCOM, 0x0140, RP_BROADCOM_HT2100, Broadcom_HT2100_setupFunc}, + {PCI_VENDOR_ID_BROADCOM, 0x0142, RP_BROADCOM_HT2100, Broadcom_HT2100_setupFunc}, + {PCI_VENDOR_ID_BROADCOM, 0x0144, RP_BROADCOM_HT2100, Broadcom_HT2100_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_2581_ROOT_PORT, RP_INTEL_2581, Intel_RP25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_2585_ROOT_PORT, RP_INTEL_2585, Intel_RP25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2589 , RP_INTEL_2589, Intel_RP25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_2591_ROOT_PORT, RP_INTEL_2591, Intel_RP25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3597_ROOT_PORT, RP_INTEL_3597, NULL}, + {PCI_VENDOR_ID_INTEL, 0x2775, RP_INTEL_2775, Intel_RP25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, 0x2771, RP_INTEL_2771, Intel_RP25XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_8110_ROOT_PORT, RP_INTEL_8110, Intel_RP81XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_8112_ROOT_PORT, RP_INTEL_8112, Intel_RP81XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_8180_ROOT_PORT, RP_INTEL_8180, Intel_RP81XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_8181_ROOT_PORT, RP_INTEL_8181, Intel_RP81XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_8184_ROOT_PORT, RP_INTEL_8184, Intel_RP81XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_8185_ROOT_PORT, RP_INTEL_8185, Intel_RP81XX_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3C02_ROOT_PORT, RP_INTEL_3C02, Intel_RP3C0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3C03_ROOT_PORT, RP_INTEL_3C03, Intel_RP3C0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3C04_ROOT_PORT, RP_INTEL_3C04, Intel_RP3C0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3C05_ROOT_PORT, RP_INTEL_3C05, Intel_RP3C0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3C06_ROOT_PORT, RP_INTEL_3C06, Intel_RP3C0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3C07_ROOT_PORT, RP_INTEL_3C07, Intel_RP3C0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3C08_ROOT_PORT, RP_INTEL_3C08, Intel_RP3C0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3C09_ROOT_PORT, RP_INTEL_3C09, Intel_RP3C0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3C0A_ROOT_PORT, RP_INTEL_3C0A, Intel_RP3C0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_3C0B_ROOT_PORT, RP_INTEL_3C0B, Intel_RP3C0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_2F04_ROOT_PORT, RP_INTEL_2F04, Intel_RP2F0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_2F08_ROOT_PORT, RP_INTEL_2F08, Intel_RP2F0X_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_0C01_ROOT_PORT, RP_INTEL_0C01, Intel_RP0C0X_setupFunc}, + +// last element must have zero vendor id and device id + {0, 0, RP_UNKNOWN, NULL} +}; + +BRINFO upstreamPortInfo[] = +{ + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_1901_ROOT_PORT, Intel_Skylake_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_9D18_PCH_BRIDGE, Intel_Skylake_U_Pch_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_A117_PCH_BRIDGE, Intel_Skylake_H_Pch_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_A118_PCH_BRIDGE, Intel_Skylake_H_Pch_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_9C98_PCH_BRIDGE, Intel_Broadwell_setupFunc}, + {PCI_VENDOR_ID_INTEL, DEVICE_ID_INTEL_9D10_PCH_BRIDGE, Intel_Kabylake_Y_setupFunc}, + {PCI_VENDOR_ID_AMD, DEVICE_ID_AMD_1483_ROOT_PORT, AMD_RP1483_setupFunc}, + {PCI_VENDOR_ID_AMD, DEVICE_ID_AMD_1480_ROOT_PORT, AMD_RP1480_setupFunc}, + {PCI_VENDOR_ID_AMD, DEVICE_ID_AMD_1630_ROOT_PORT, AMD_RP1630_setupFunc}, + + // last element must have zero vendor id and device id + {0, 0, NULL} +}; + +static NV_STATUS +Intel_25XX_setupFunc +( + OBJCL *pCl +) +{ + RmPhysAddr baseAddress; + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + baseAddress = (RmPhysAddr)(osPciReadDword(pCl->FHBAddr.handle, + INTEL_25XX_CONFIG_SPACE_BASE)); + + // PCI-E enhanced config space is 256M aligned + baseAddress &= ( ~ 0x0fffffff); + + if (baseAddress) + { + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + } + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + return NV_OK; +} + +// Montevina +static NV_STATUS +Intel_2A40_setupFunc +( + OBJCL *pCl +) +{ + RmPhysAddr baseAddress; + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + baseAddress = (RmPhysAddr)(osPciReadDword(pCl->FHBAddr.handle, + INTEL_2A40_CONFIG_SPACE_BASE)); + + // + // PCI-E v1.1 enhanced config space is aligned between 1M and 256M, + // depending on # of buses (see PCIE v1.1, section 7.2.2). + // + baseAddress &= ( ~ 0x000fffff); + + if (baseAddress) + { + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + { + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + // + // Get the specific Montevina revision to see if its safe to enable + // ALSM. Note that the initial Montevina boards do not support ASLM. + // + if (pCl->FHBBusInfo.revisionID >= INTEL_2A40_ASLM_CAPABLE_REVID) + { + // Supports ASLM + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_NV_LINK_UPGRADE, NV_TRUE); + } + } + } + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + return NV_OK; +} + +// Calpella - Arrandale +static NV_STATUS +Intel_0040_setupFunc +( + OBJCL *pCl +) +{ + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_GEN2_LINK_UPGRADE, NV_TRUE); + + return Intel_Core_Nehalem_Processor_setupFunc(pCl); +} + +// Eaglelake +static NV_STATUS +Intel_2E00_setupFunc +( + OBJCL *pCl +) +{ + RmPhysAddr baseAddress; + + if (!pCl->FHBAddr.valid) + { + return NV_ERR_GENERIC; + } + + baseAddress = (RmPhysAddr)(osPciReadDword(pCl->FHBAddr.handle, + INTEL_2E00_CONFIG_SPACE_BASE)); + + // + // PCI-E v1.1 enhanced config space is aligned between 1M and 256M, + // depending on # of buses (see PCIE v1.1, section 7.2.2) + // + baseAddress &= ( ~ 0x000fffff); + + if (baseAddress) + { + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + } + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_FALSE); + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + return NV_OK; +} + + +// Q35/BearlakeB/IntelQ33 +static NV_STATUS +Intel_29X0_setupFunc +( + OBJCL *pCl +) +{ + NV_STATUS rmStatus; + + rmStatus = Intel_29XX_setupFunc(pCl); + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_TRUE); + + return rmStatus; +} + +static NV_STATUS +Intel_29E0_setupFunc +( + OBJCL *pCl +) +{ + NV_STATUS rmStatus; + NvU32 capId; + + rmStatus = Intel_29XX_setupFunc(pCl); + // + // Both X48 & X38 chipset share the same device ID and bit 89 + // of pci capability register is used to differentiate between two + // (Bug 549707) + // + capId = clPcieReadDword( + pCl, + pCl->FHBAddr.domain, + pCl->FHBAddr.bus, + pCl->FHBAddr.device, + pCl->FHBAddr.func, + (PCIE_CHIPSET_CAPABILITY_ID_OFFSET_X48 + PCIE_CHIPSET_DETECT_OFFSET)); + + if (IS_CHIPSET_X38(capId)) + { + // Not capable of Gen1/Gen2 switch + pCl->setProperty(pCl, + PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED_GEFORCE, + NV_TRUE); + } + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_FALSE); + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_GEN2_AT_LESS_THAN_X16_DISABLED, NV_TRUE); + + return rmStatus; +} + +static NV_STATUS +Intel_29XX_setupFunc +( + OBJCL *pCl +) +{ + RmPhysAddr baseAddress; + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + + baseAddress = (RmPhysAddr)(osPciReadDword(pCl->FHBAddr.handle, + INTEL_29XX_CONFIG_SPACE_BASE)); + + // + // PCI-E v1.1 enhanced config space is aligned between 1M and 256M, + // depending on # of buses (see PCIE v1.1, section 7.2.2). + // + baseAddress &= ( ~ 0x000fffff); + + if (baseAddress) + { + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + } + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + return NV_OK; +} + +static NV_STATUS +Intel_25E0_setupFunc +( + OBJCL *pCl +) +{ + RmPhysAddr baseAddress; + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + baseAddress = (RmPhysAddr)INTEL_25E0_CONFIG_SPACE_BASE_ADDRESS; + + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + return NV_OK; +} + +static NV_STATUS +Intel_27XX_setupFunc +( + OBJCL *pCl +) +{ + RmPhysAddr baseAddress; + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + baseAddress = (RmPhysAddr)(osPciReadDword(pCl->FHBAddr.handle, + INTEL_25XX_CONFIG_SPACE_BASE)); + + // PCI-E enhanced config space is 256M aligned + baseAddress &= ( ~ 0x0fffffff); + + if (baseAddress) + { + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + } + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + return NV_OK; +} + +static NV_STATUS +Intel_359E_setupFunc +( + OBJCL *pCl +) +{ + RmPhysAddr baseAddress; + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + baseAddress = (RmPhysAddr)(osPciReadDword(pCl->FHBAddr.handle, + INTEL_359E_CONFIG_SPACE_BASE)); + + // PCI-E enhanced config space is 256M aligned + baseAddress &= ( ~ 0x0fffffff); + + if (baseAddress) + { + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + } + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + return NV_OK; +} + +static NV_STATUS +Intel_4000_setupFunc +( + OBJCL *pCl +) +{ + RmPhysAddr baseAddress; + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + + baseAddress = (RmPhysAddr)INTEL_4000_CONFIG_SPACE_BASE_ADDRESS; + + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + return NV_OK; +} + +static NV_STATUS +Intel_4003_setupFunc +( + OBJCL *pCl +) +{ + void *pHandle; + NvU32 hecbase; + RmPhysAddr baseAddress; + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_FALSE); + + // The Intel "SkullTrail" (aka 5400) motherboard chipset does not + // have a fixed PCIe enhanced config space base address. We can find + // it by reading bus 0, device 16, function 0, register 0x64 (HECBASE), + // per Intel 5400 Chipset Memory Controller Hub Datasheet. + + pHandle = osPciInitHandle(0, 0, 16, 0, NULL, NULL); + if (pHandle == NULL) + return NV_ERR_GENERIC; + + // Note: This read is on device 16, not on FHB (first host bridge). + + hecbase = osPciReadDword(pHandle, INTEL_4003_CONFIG_SPACE_BASE); + if (hecbase != 0xFFFFFFFF) + { + // The part we are interested in are the 12 bits [23:12], + // which make up bits [39:28] of the base address. So we + // isolate the 12 bits we need and shift into place. The + // high byte of the 40-bit address is shifted away. + + baseAddress = (RmPhysAddr) (hecbase & 0x00FFF000) << 16; + + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Can't read HECBASE register on Intel SkullTrail!\n"); + } + + if (clPcieReadDword(pCl, pCl->FHBAddr.domain, pCl->FHBAddr.bus, pCl->FHBAddr.device, pCl->FHBAddr.func, 0) + != (NvU32)(pCl->FHBBusInfo.vendorID | pCl->FHBBusInfo.deviceID << 16)) + { + pCl->pPcieConfigSpaceBase->baseAddress = INTEL_4003_CONFIG_SPACE_BASE_ADDRESS_E; + if (clPcieReadDword(pCl, pCl->FHBAddr.domain, pCl->FHBAddr.bus, pCl->FHBAddr.device, pCl->FHBAddr.func, 0) + != (NvU32)(pCl->FHBBusInfo.vendorID | pCl->FHBBusInfo.deviceID << 16)) + { + return NV_ERR_GENERIC; + } + } + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + return NV_OK; +} + +// +// the PCI extended config space BAR is a property of the CPU, not the +// actual chipset because Intel integrated that part of the +// Northbridge in the CPU. +// This function currently applies to processors: +// - i7: INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I7, +// - i5 Auburndale: INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_A, +// - i5 Lynnfield :INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_L. +// + +static NV_STATUS +Intel_Core_Nehalem_Processor_setupFunc +( + OBJCL *pCl +) +{ + NvS32 bus; + RmPhysAddr baseAddress; + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_FALSE); + + // + // Intel data sheet 320835 describes how to find the PCI extended + // config space BAR in a Core i7 CPU. + // + // Current link for i7: + // http://download.intel.com/design/processor/datashts/320835.pdf + // Link for i5 not found. It has been reverse engineered. + + // We need to find the pci functions, "Intel QuickPath + // Architecture System Address Decoder" device 0, function 1 on + // the CPU's PCI bus, Device ID = + // INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER* [section + // 2.3]. The bus number is the highest bus number in the PCI + // fabric. + + for (bus = 0xff; bus >= 0; --bus) + { + NvU16 vendorId, deviceId; + void *pHandle = osPciInitHandle(0, bus, 0, 1, &vendorId, &deviceId); + if (pHandle + && (vendorId == PCI_VENDOR_ID_INTEL) + && ((deviceId == INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I7) || + (deviceId == INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_A) || + (deviceId == INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_L) || + (deviceId == INTEL_QUICKPATH_SYSTEM_ADDRESS_DECODER_I5_6))) + { + // [section 2.6.5] describes how to decode the config + // space bar + const NvU32 sad_pciexbar_low = osPciReadDword(pHandle, 0x50); + const NvU64 sad_pciexbar_high = osPciReadDword(pHandle, 0x54); + + const NvU64 sad_pciexbar = (sad_pciexbar_high << 32) | sad_pciexbar_low; + const NvU64 address = sad_pciexbar & 0xfffff00000ULL; + const NvU32 size = (sad_pciexbar_low >> 1) & 0x7; + const NvU32 enabled = (sad_pciexbar_low & 1); + + // if it's disabled, then skip. + if (enabled == 0) + continue; + + // if it's not a size we know, then skip. 0 = 256MB, 6 = 64MB, 7 = + // 128MB BAR size + if ((size != 0) && (size != 7) && (size != 6)) + continue; + + baseAddress = (RmPhysAddr)address; + + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + break; + } + } + + return NV_OK; +} + +static NV_STATUS +Intel_3400_setupFunc +( + OBJCL *pCl +) +{ + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_FALSE); + + return Intel_Core_Nehalem_Processor_setupFunc(pCl); +} + +// +// 3B42 is the device Id of the IBEX south bridge. It is not the device id of the host bridge. +// On such core i5 platforms the host bridge is in the CPU, so we cannot rely on it +// to detect the P55 platform. +// +static NV_STATUS +Intel_3B42_setupFunc +( + OBJCL *pCl +) +{ + void *pHandle; + NvU16 deviceId; + NvU16 cpuDeviceId; + NvU16 cpuVId; + + // We need to find out if it is Intel H57 Chipset (based on LPC D31 F0 + // PCI DevID 3B08h )and remove it from ASPM POR. Refer Bug 706926 for more details + pHandle = osPciInitHandle(0, 0, 31 , 0, NULL, &deviceId); + + if ((pHandle != NULL) && (deviceId == 0x3B08)) + { + // Set PDB_PROP_CL_INTEL_CPU_ROOTPORT1_NEEDS_H57_WAR to disable L1 in CPU Root Port for H57. + pCl->setProperty(pCl, PDB_PROP_CL_INTEL_CPU_ROOTPORT1_NEEDS_H57_WAR, NV_TRUE); + } + + // + // Bug 782125 : [PEX ASPM] Enable GPU L0s and L1 on H55 LPC (@ D31 F0) (deviceId == 0x3B06) and + // H57 (deviceId == 0x3B08) with Clarkdale CPUs dev 0 func 0 (deviceId == 0x0042) for All + // Fermi and Later GPUs + // + if ((pHandle != NULL) && ((deviceId == 0x3B06) || (deviceId == 0x3B08))) + { + // We need to find out if CPU is Clarkdale by reading 'Register 0 of Dev 0 Func 0 + // VID: 0x8086 and DEVID: 0x0040 + pHandle = osPciInitHandle(0, 0, 0 , 0, &cpuVId, &cpuDeviceId); + + if ((pHandle != NULL) && (cpuVId == PCI_VENDOR_ID_INTEL) && (cpuDeviceId == 0x0040)) + { + // Enable L1P and L0 + _Set_ASPM_L0S_L1(pCl, NV_FALSE, NV_FALSE); + } + } + + return Intel_Core_Nehalem_Processor_setupFunc(pCl); +} + +// Intel Huron River Chipset Common Function +static NV_STATUS +Intel_Huron_River_setupFunc +( + OBJCL *pCl +) +{ + // Enable Gen2 ASLM + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_GEN2_LINK_UPGRADE, NV_TRUE); + + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST, NV_TRUE); + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_TRUE); + + // Enable L0s and L1 on mobile only + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + + return NV_OK; +} + +// Intel Huron River Chipset HM67/QM67 Function - supports SLI +static NV_STATUS +Intel_1C4B_setupFunc +( + OBJCL *pCl +) +{ + return Intel_Huron_River_setupFunc(pCl); +} + +// Intel Huron River Chipset HM65 Function - does not support SLI +static NV_STATUS +Intel_1C49_setupFunc +( + OBJCL *pCl +) +{ + return Intel_Huron_River_setupFunc(pCl); +} + +// Intel P67 Chipset Setup Function +static NV_STATUS +Intel_1C10_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// Intel P67 Chipset Cougar-PointSetup Function - supports SLI +static NV_STATUS +Intel_1C46_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// Intel X79 Patsburg Chipset - supports SLI +static NV_STATUS +Intel_1D40_setupFunc +( + OBJCL *pCl +) +{ + pCl->setProperty(pCl, PDB_PROP_CL_ON_PCIE_GEN3_PATSBURG, NV_TRUE); + + if (DEVICE_ID_INTEL_0E00_HOST_BRIDGE == pCl->FHBBusInfo.deviceID) + { + pCl->setProperty(pCl, PDB_PROP_CL_ALLOW_PCIE_GEN3_ON_PATSBURG_WITH_IVBE_CPU, NV_TRUE); + } + + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST, NV_TRUE); + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_TRUE); + + // Enable L0s on mobile parts only + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + + return NV_OK; +} + +// Intel X99 platform +static NV_STATUS +Intel_8D47_setupFunc +( + OBJCL *pCl +) +{ + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST, NV_TRUE); + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_TRUE); + + // Enable L0s on mobile parts only + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + + // Disable MSCG on X99 chipset + pCl->setProperty(pCl, PDB_PROP_CL_BUG_1681803_WAR_DISABLE_MSCG, NV_TRUE); + + return NV_OK; +} + +// Intel C612 platform (X99 based) +static NV_STATUS +Intel_8D44_setupFunc +( + OBJCL *pCl +) +{ + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST, NV_TRUE); + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_TRUE); + + // Enable L0s on mobile parts only + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + + return NV_OK; +} + +// Intel Z75 Ivy Bridge CPU - supports SLI +static NV_STATUS +Intel_1E10_setupFunc +( + OBJCL *pCl +) +{ + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_FALSE, NV_FALSE); + + // Set PDB to disable Gen3 on GIGABYTE Sniper 3 motherboard. Bug 1340801. + if (pCl->chipsetIDInfo.subvendorID == NV_PCI_SUBID_VENDOR_GIGABYTE) + { + switch(pCl->chipsetIDInfo.subdeviceID) + { + case GIGABYTE_SNIPER_3_SSDEVID_1: + case GIGABYTE_SNIPER_3_SSDEVID_2: + pCl->setProperty(pCl, PDB_PROP_CL_BUG_1340801_DISABLE_GEN3_ON_GIGABYTE_SNIPER_3, NV_TRUE); + break; + default: + break; + } + } + + return NV_OK; +} + +// Intel SharkBay (Haswell) - Lynx Point platform +static NV_STATUS +Intel_8C4B_setupFunc +( + OBJCL *pCl +) +{ + switch (pCl->FHBBusInfo.deviceID) + { + case DEVICE_ID_INTEL_0C00_HASWELL_HOST_BRIDGE: + case DEVICE_ID_INTEL_0C04_HASWELL_HOST_BRIDGE: + pCl->setProperty(pCl, PDB_PROP_CL_ON_HASWELL_HOST_BRIDGE, NV_TRUE); + break; + default: + break; + } + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_FALSE); + + // Enable L0s on mobile only + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + + return NV_OK; +} + +// Intel Z97 platform +static NV_STATUS +Intel_8CC4_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// Intel Z170 platform +static NV_STATUS +Intel_A145_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// Intel Z270 platform +static NV_STATUS +Intel_A2C5_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// Intel C62x/C422 platform +static NV_STATUS +Intel_A242_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// IntelX299 platform +static NV_STATUS +Intel_A2D2_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// Intel Z370 platform +static NV_STATUS +Intel_A2C9_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// Intel CannonLake platform +static NV_STATUS +Intel_A301_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// Intel Comet Lake platform +static NV_STATUS +Intel_0685_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// Intel Z590 platform (Rocket Lake) +static NV_STATUS +Intel_4381_setupFunc +( + OBJCL *pCl +) +{ + pCl->setProperty(pCl, PDB_PROP_CL_HAS_RESIZABLE_BAR_ISSUE, NV_TRUE); + + return NV_OK; +} + +// Intel Z690 platform (Alder Lake) +static NV_STATUS +Intel_7A82_setupFunc +( + OBJCL *pCl +) +{ + pCl->setProperty(pCl, PDB_PROP_CL_HAS_RESIZABLE_BAR_ISSUE, NV_TRUE); + + return NV_OK; +} + +static NV_STATUS +Nvidia_T210_setupFunc +( + OBJCL *pCl +) +{ + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + + if (clInsertPcieConfigSpaceBase(pCl, 0, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + + // + // setting this prevents trying to access the PCI MCFG table to get config information; + // this is part of the ACPI spec, which doesn't apply to Tegra + // + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + // Enable Gen2 ASLM + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_GEN2_LINK_UPGRADE, NV_TRUE); + + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_FALSE); + + _Set_ASPM_L0S_L1(pCl, NV_FALSE, NV_FALSE); + return NV_OK; +} + +static NV_STATUS +Nvidia_T194_setupFunc +( + OBJCL *pCl +) +{ + NV_STATUS status; + + status = Nvidia_T210_setupFunc(pCl); + if (status != NV_OK) + return status; + + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + + return NV_OK; +} + +static NV_STATUS +SiS_656_setupFunc +( + OBJCL *pCl +) +{ + NvU32 PcieConfigBaseReg; + RmPhysAddr baseAddress; + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + + PcieConfigBaseReg = osPciReadDword(pCl->FHBAddr.handle, + SIS_656_CONFIG_SPACE_BASE); + + baseAddress = (RmPhysAddr)(REF_VAL(SIS_656_CONFIG_SPACE_BASE_ADDRESS, PcieConfigBaseReg )); + + if (baseAddress) + { + baseAddress <<= 28; + if (clInsertPcieConfigSpaceBase(pCl, baseAddress, 0, 0, (NvU8)(PCI_MAX_BUSES - 1)) == NV_OK) + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + } + + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + + return NV_OK; +} + +static NV_STATUS +ATI_RS400_setupFunc +( + OBJCL *pCl +) +{ + NvU32 nbcfg; + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + + // Distinguish chipset revisions. A21 and earlier have PCI-E issues + // that require special treatment. + nbcfg = osPciReadDword(pCl->FHBAddr.handle, 0x9c); + if (nbcfg & 1) + pCl->Chipset = CS_ATI_RS400; + else + pCl->Chipset = CS_ATI_RS400_A21; + + return NV_OK; +} + +static NV_STATUS +ATI_RS480_setupFunc +( + OBJCL *pCl +) +{ + NvU32 rev; + + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + + // Distinguish chipset revisions. A21 and earlier have PCI-E issues + // that require special treatment. + rev = osPciReadDword(pCl->FHBAddr.handle, 0x08); + if (rev & 0xff) + pCl->Chipset = CS_ATI_RS480; + else + pCl->Chipset = CS_ATI_RS480_A21; + + return NV_OK; +} + +// +// AMD RS 780 and GX790 +// +NV_STATUS +AMD_RS780_setupFunc(OBJCL *pCl) +{ + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + + _Set_ASPM_L0S_L1(pCl, NV_FALSE, NV_TRUE); + return NV_OK; +} + +// +// AMD FX 790 +// +NV_STATUS +AMD_FX790_setupFunc(OBJCL *pCl) +{ + if (!pCl->FHBAddr.valid) + return NV_ERR_GENERIC; + + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_FALSE); + return NV_OK; +} + +NV_STATUS ATI_RD870_setupFunc(OBJCL *pCl) +{ + return NV_OK; +} + +NV_STATUS ATI_RD890_setupFunc(OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE, NV_TRUE); + return NV_OK; +} + +NV_STATUS ATI_RX780_setupFunc(OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE, NV_TRUE); + return NV_OK; +} + +// AMD FX890 and AMD GX890 Chipset Setup Function +static NV_STATUS +AMD_FX890_setupFunc +( + OBJCL *pCl +) +{ + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_FALSE, NV_FALSE); + + return NV_OK; +} + +// AMD FX990 and AMD X990 Chipset Setup Function +static NV_STATUS +ATI_FX990_setupFunc +( + OBJCL *pCl +) +{ + pCl->setProperty(pCl, PDB_PROP_CL_BUG_999673_P2P_ARBITRARY_SPLIT_WAR, NV_TRUE); + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_FALSE, NV_FALSE); + + return NV_OK; +} + + +// AMD X370 Chipset Setup Function +static NV_STATUS +AMD_X370_setupFunc +( + OBJCL *pCl +) +{ + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_FALSE, NV_FALSE); + + return NV_OK; +} + +// VIA VX900 Chipset Setup Function +static NV_STATUS +VIA_VX900_setupFunc +( + OBJCL *pCl +) +{ + // This chipset is not capable of Gen1/Gen2 switch. + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_GEN1_GEN2_SWITCH_CHIPSET_DISABLED, NV_TRUE); + + return NV_OK; +} + +// AppliedMicro XGene Storm Setup Function +static NV_STATUS +APM_Storm_setupFunc +( + OBJCL *pCl +) +{ + // This chipset has trouble with multiple traffic classes + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_NON_COHERENT_USE_TC0_ONLY, NV_TRUE); + + return NV_OK; +} + +// Generic ARMV8 setup function +static NV_STATUS +ARMV8_generic_setupFunc +( + OBJCL *pCl +) +{ + return NV_OK; +} + +// Marvell ThunderX2 Setup Function +static NV_STATUS +Marvell_ThunderX2_setupFunc +( + OBJCL *pCl +) +{ + // TODO Need to check if any more PDB properties should be set + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + return NV_OK; +} + +// QEMU Setup Function +static NV_STATUS +QEMU_setupFunc +( + OBJCL *pCl +) +{ + // + // TODO Need to check if any more PDB properties should be set and + // use ACPI tables to determine whether system is I/O coherent, + // instead of hard coding. + // + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + return NV_OK; +} + +// Ampere eMag Setup Function +static NV_STATUS +Ampere_eMag_setupFunc +( + OBJCL *pCl +) +{ + // TODO Need to check if any more PDB properties should be set + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + return NV_OK; +} + +// Huawei Kunpeng Setup Function +static NV_STATUS +Huawei_Kunpeng920_setupFunc +( + OBJCL *pCl +) +{ + // + // TODO Need to check if any more PDB properties should be set and + // use ACPI tables to determine whether system is I/O coherent, + // instead of hard coding. + // + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + return NV_OK; +} + +// Mellanox BlueField Setup Function +static NV_STATUS +Mellanox_BlueField_setupFunc +( + OBJCL *pCl +) +{ + // TODO Need to check if any more PDB properties should be set + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + return NV_OK; +} + +// Amazon Gravitron2 Setup Function +static NV_STATUS +Amazon_Gravitron2_setupFunc +( + OBJCL *pCl +) +{ + // TODO Need to check if any more PDB properties should be set + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + return NV_OK; +} + +// Fujitsu A64FX Setup Function +static NV_STATUS +Fujitsu_A64FX_setupFunc +( + OBJCL *pCl +) +{ + // TODO Need to check if any more PDB properties should be set + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + return NV_OK; +} + +// Phytium FT2000 Setup Function +static NV_STATUS +Phytium_FT2000_setupFunc +( + OBJCL *pCl +) +{ + // TODO Need to check if any more PDB properties should be set + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + return NV_OK; +} + +// Ampere Quicksilver Setup Function +static NV_STATUS +Ampere_Altra_setupFunc +( + OBJCL *pCl +) +{ + // TODO Need to check if any more PDB properties should be set + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + + // WAR bug 2915474: Ampere Altra rev0 does not correctly handle UC/WC iomaps. + if (pCl->pBusTopologyInfo->busInfo.revisionID == 0x0) + { + pCl->setProperty(pCl, PDB_PROP_CL_DISABLE_IOMAP_WC, NV_TRUE); + } + + return NV_OK; +} + +static NV_STATUS +Arm_NeoverseN1_setupFunc +( + OBJCL *pCl +) +{ + // TODO Need to check if any more PDB properties should be set + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IO_COHERENT, NV_TRUE); + return NV_OK; +} + +void +csGetInfoStrings +( + OBJCL *pCl, + NvU8 *pChipsetNameStr, + NvU8 *pVendorNameStr, + NvU8 *pSliBondNameStr, + NvU8 *pSubSysVendorNameStr, + NvU32 nameStrLen +) +{ + NvU32 i; + const char* pszUnknown = "Unknown"; + NvU32 szUnknownLen = portStringLength(pszUnknown); + + if (!pCl->chipsetIDBusAddr.valid) + { + portStringCopy((char *) pChipsetNameStr, szUnknownLen, + pszUnknown, szUnknownLen); + portStringCopy((char *) pVendorNameStr, szUnknownLen, + pszUnknown, szUnknownLen); + portStringCopy((char *) pSliBondNameStr, szUnknownLen, + pszUnknown, szUnknownLen); + portStringCopy((char *) pSubSysVendorNameStr, szUnknownLen, + pszUnknown, szUnknownLen); + return ; + } + + for (i = 0; chipsetInfo[i].chipset; i++) + { + if ((pCl->chipsetIDInfo.vendorID == chipsetInfo[i].vendorID) && + (pCl->chipsetIDInfo.deviceID == chipsetInfo[i].deviceID)) + { + portStringCopy((char*)pChipsetNameStr, + nameStrLen, + chipsetInfo[i].name, + nameStrLen); + break; + } + } + if (!chipsetInfo[i].chipset) + { + portStringCopy((char *) pChipsetNameStr, szUnknownLen, + pszUnknown, szUnknownLen); + } + + for (i = 0; vendorName[i].vendorID; i++) + { + if (pCl->chipsetIDInfo.vendorID == vendorName[i].vendorID) + { + portStringCopy((char*)pVendorNameStr, + nameStrLen, + vendorName[i].name, + nameStrLen); + break; + } + } + if (!vendorName[i].vendorID) + { + portStringCopy((char *) pVendorNameStr, szUnknownLen, + pszUnknown, szUnknownLen); + } + + for (i = 0; vendorName[i].vendorID; i++) + { + if (pCl->chipsetIDInfo.subvendorID == vendorName[i].vendorID) + { + portStringCopy((char*)pSubSysVendorNameStr, + nameStrLen, + vendorName[i].name, + nameStrLen); + break; + } + } + if (!vendorName[i].vendorID) + { + portStringCopy((char *)pSubSysVendorNameStr, szUnknownLen, + pszUnknown, szUnknownLen); + } + + { + portStringCopy((char *)pSliBondNameStr, szUnknownLen, + pszUnknown, szUnknownLen); + } +} + +// +// This function sets the pdb properties to disable ASPM L0S\L1 +// +void +_Set_ASPM_L0S_L1 +( + OBJCL *pCl, + NvBool bDisableL0S, + NvBool bDisableL1 +) +{ + // + // this chipset is part of exception list to enable/disable L0S/L1 + // (refer bug 529308) + // + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST, NV_TRUE); + + if (bDisableL0S) + { + // Not capable of ASPM-L0s + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L0S_CHIPSET_DISABLED, NV_TRUE); + } + if (bDisableL1) + { + // Not capable of ASPM L1 + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L1_CHIPSET_DISABLED, NV_TRUE); + } +} + diff --git a/src/nvidia/src/kernel/platform/chipset/chipset_pcie.c b/src/nvidia/src/kernel/platform/chipset/chipset_pcie.c new file mode 100644 index 000000000..bfa501e9a --- /dev/null +++ b/src/nvidia/src/kernel/platform/chipset/chipset_pcie.c @@ -0,0 +1,4286 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************* PCIE Chipset Routines *********************************\ +* * +* One time initialization code to update the PCI Express chipset and * +* our own PCI Express related values (includes workarounds and registry * +* overrides). * +* * +****************************************************************************/ + +#include "nvRmReg.h" +#include "core/core.h" +#include "os/os.h" +#include "platform/platform.h" +#include "platform/chipset/chipset.h" +#include "platform/chipset/chipset_info.h" +#include "nvpcie.h" +#include "gpu/gpu.h" +#include "objtmr.h" +#include "gpu/bif/kernel_bif.h" +#include "gpu/gpu.h" +#include "gpu/gsp/gsp_static_config.h" +#include "virtualization/hypervisor/hypervisor.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "ctrl/ctrl2080/ctrl2080bus.h" // NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_* +#include "core/thread_state.h" +#include "nveGPUConfig.h" +#include "Nvcm.h" + +#include "published/maxwell/gm107/dev_nv_xve.h" // NV_XVE_VCCAP_CTRL0* +#include "published/pcie_switch/pcie_switch_ref.h" + +// +// static functions +// + +static NV_STATUS objClInitPcieChipset(OBJGPU *, OBJCL *); +static NvBool objClInitGpuPortData(OBJGPU *, OBJCL *pCl); +static NV_STATUS objClSetPortCapsOffsets(OBJCL *, PORTDATA *); +static NV_STATUS objClSetPortPcieEnhancedCapsOffsets(OBJCL *, PORTDATA *); +static void * objClFindRootPort(OBJGPU *, OBJCL *, NvU32, NvU8, NvU8 *, NvU8 *, NvU8 *, NvU16 *, NvU16 *); +static NvBool objClBR03Exists(OBJGPU *, OBJCL *); +static NvBool objClBR04Exists(OBJGPU *, OBJCL *); +static void * objClPcieMapEnhCfgSpace(OBJCL *, NvU32, NvU8, NvU8, NvU8); +static void objClPcieUnmapEnhCfgSpace(OBJCL *, void *); +static void _objClPostSetupFuncRegkeyOverrides(OBJGPU *, OBJCL *); +static void objClGpuMapRootPort(OBJGPU *, OBJCL *); +static void objClGpuUnmapRootPort(OBJGPU *); +static void objClGpuMapEnhCfgSpace(OBJGPU *, OBJCL *); +static void objClGpuUnmapEnhCfgSpace(OBJGPU *); +static NV_STATUS objClGpuIs3DController(OBJGPU *); +static void objClLoadPcieVirtualP2PApproval(OBJGPU *); +static void objClCheckForExternalGpu(OBJGPU *, OBJCL *); +static void _objClAdjustTcVcMap(OBJGPU *, OBJCL *, PORTDATA *); + +extern void _Set_ASPM_L0S_L1(OBJCL *, NvBool, NvBool); + +static NV_STATUS addHwbcToList(OBJGPU *, OBJHWBC *); + +#define INTEL_HASWELL_POWER_CONTROL_UNIT_DEVICE_ID 0x2FC0 +#define HASWELL_CPU_CAPID4_OFFSET 0x94 +#define INTEL_C0_OR_C1_CPUID 0x306f2 + +#define NV_ACPI_TABLE_SIGNATURE_GFCM NvU32_BUILD('G','F','C','M') +#define NV_ACPI_TABLE_SIGNATURE_TDSR NvU32_BUILD('T','D','S','R') +#define NV_ACPI_TABLE_SIGNATURE_TDSX NvU32_BUILD('T','D','S','X') + +NV_STATUS +clInitDeviceInfo_IMPL(OBJCL *pCl, OBJGPU *pGpu) +{ + NvU32 gpuId; + + // Find our NV device on the PCI bus and save it's pci bus/device address. + gpuId = clInitMappingPciBusDevice(pGpu, pCl); + if (gpuId == NV0000_CTRL_GPU_INVALID_ID) + return NV_ERR_INVALID_DEVICE; + + // Now find our ports + if (!objClInitGpuPortData(pGpu, pCl)) + return NV_ERR_OPERATING_SYSTEM; + + return NV_OK; +} + +static void +_objClPostSetupFuncRegkeyOverrides(OBJGPU *pGpu, OBJCL *pCl) +{ + NvU32 data; + + if (osReadRegistryDword(pGpu, NV_REG_STR_CL_ASLM_CFG, &data) == NV_OK) + { + switch(DRF_VAL(_REG_STR, _CL_ASLM_CFG, _NV_LINK_UPGRADE, data)) + { + case NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_YES: + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_NV_LINK_UPGRADE, NV_TRUE); + break; + case NV_REG_STR_CL_ASLM_CFG_NV_LINK_UPGRADE_NO: + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_NV_LINK_UPGRADE, NV_FALSE); + break; + default: + break; + } + + switch(DRF_VAL(_REG_STR, _CL_ASLM_CFG, _HOT_RESET, data)) + { + case NV_REG_STR_CL_ASLM_CFG_HOT_RESET_YES: + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_HOT_RESET, NV_TRUE); + break; + case NV_REG_STR_CL_ASLM_CFG_HOT_RESET_NO: + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_HOT_RESET, NV_FALSE); + break; + default: + break; + } + + switch(DRF_VAL(_REG_STR, _CL_ASLM_CFG, _FAST_UPGRADE, data)) + { + case NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_YES: + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_FAST_LINK_UPGRADE, NV_TRUE); + break; + case NV_REG_STR_CL_ASLM_CFG_FAST_UPGRADE_NO: + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_FAST_LINK_UPGRADE, NV_FALSE); + break; + default: + break; + } + + switch(DRF_VAL(_REG_STR, _CL_ASLM_CFG, _GEN2_LINK_UPGRADE, data)) + { + case NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_YES: + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_GEN2_LINK_UPGRADE, NV_TRUE); + break; + case NV_REG_STR_CL_ASLM_CFG_GEN2_LINK_UPGRADE_NO: + pCl->setProperty(pCl, PDB_PROP_CL_ASLM_SUPPORTS_GEN2_LINK_UPGRADE, NV_FALSE); + break; + default: + break; + } + } +} + +NV_STATUS +addHwbcToList (OBJGPU *pGpu, OBJHWBC *pHWBC) +{ + HWBC_LIST *pHWBCList; + HWBC_LIST *pGpuHWBCList; + + pHWBCList = portMemAllocNonPaged(sizeof(HWBC_LIST)); + if (NULL == pHWBCList) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + portMemSet(pHWBCList, 0, sizeof(HWBC_LIST)); + pHWBCList->pHWBC = pHWBC; + if (NULL == pGpu->pHWBCList) + { + pGpu->pHWBCList = pHWBCList; + } + else + { + pGpuHWBCList = pGpu->pHWBCList; + while (pGpuHWBCList->pNext) + { + pGpuHWBCList = pGpuHWBCList->pNext; + } + pGpuHWBCList->pNext = pHWBCList; + } + return NV_OK; +} + +// +// Determine which chipset we're using (from available options) +// and initialize chipset-specific functions +// +NV_STATUS +objClInitPcieChipset(OBJGPU *pGpu, OBJCL *pCl) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJPFM *pPfm = SYS_GET_PFM(pSys); + NvU32 i; + NvU32 domain; + NvU16 chipsetInfoIndex; + NvU32 devCap2; + NvU32 devCtrl2; + NvBool rootPortLtrSupported; + NvBool tempLtrSupported; + NvBool needsNosnoopWAR = NV_FALSE; + NV_STATUS status; + + if (pGpu != NULL) + { + domain = gpuGetDomain(pGpu); + + if (clStoreBusTopologyCache(pCl, domain, PCI_MAX_BUSES) != NV_OK) + { + return NV_ERR_GENERIC; + } + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (!pCl->ChipsetInitialized && + !pPfm->getProperty(pPfm, PDB_PROP_PFM_NO_HOSTBRIDGE_DETECT)) + { + needsNosnoopWAR = clRootportNeedsNosnoopWAR_HAL(pGpu, pCl); + + if (needsNosnoopWAR) + pCl->setProperty(pCl, PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR, NV_TRUE); + + if (hypervisorIsVgxHyper()) + { + pCl->setProperty(pCl, PDB_PROP_CL_NOSNOOP_NOT_CAPABLE, NV_TRUE); + } + // Find the first host bridge + if (clFindFHBAndGetChipsetInfoIndex(pCl, &chipsetInfoIndex) == NV_OK) + { + pCl->Chipset = chipsetInfo[chipsetInfoIndex].chipset; + // If the chipset info is not found, hipsetInfo[chipsetInfoIndex].setupFunc = NULL + if ((!chipsetInfo[chipsetInfoIndex].setupFunc) || + (chipsetInfo[chipsetInfoIndex].setupFunc(pCl) != NV_OK)) + { + NV_PRINTF(LEVEL_ERROR, "*** Chipset Setup Function Error!\n"); + } + } + else + { + NV_PRINTF(LEVEL_WARNING, + "*** Chipset has no definition! (vendor ID 0x%04x, device ID 0x%04x)\n", + pCl->FHBBusInfo.vendorID, pCl->FHBBusInfo.deviceID); + } + if (pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS)) + { + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ, NV_TRUE); + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + } + + if (NVCPU_IS_FAMILY_ARM) + { + for (i=0; armChipsetAllowListInfo[i].vendorID; i++) + { + if (pCl->FHBBusInfo.vendorID == armChipsetAllowListInfo[i].vendorID && + pCl->FHBBusInfo.deviceID == armChipsetAllowListInfo[i].deviceID) + { + break; + } + } + + if (armChipsetAllowListInfo[i].vendorID == 0) + { + portDbgPrintf( + "NVRM: Chipset not recognized (vendor ID 0x%04x, device ID 0x%04x)\n", + pCl->FHBBusInfo.vendorID, pCl->FHBBusInfo.deviceID); + + // Allow the driver to run on AARCH64 even when the chipset is not matched, + // but we need a disclaimer message printed... + portDbgPrintf( + "The NVIDIA GPU driver for AArch64 has not been qualified on this platform\n" + "and therefore it is not recommended or intended for use in any production\n" + "environment.\n"); + } + } + +#if !defined(NVCPU_PPC64LE) + // + // If PCIe config space base addresses are not found through chipset specific + // setup functions, read pcie config space addresses from the MCFG table and + // update pCl->pPcieConfigSpaceBase linked-list. + // Skip reading MCFG table for old SLI chipsets. + // + + if (!(pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && + (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_SKIP_MCFG_READ)))) + { + if (clStorePcieConfigSpaceBaseFromMcfg(pCl) == NV_OK) + { + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_TRUE); + } + } +#endif // !defined(NVCPU_PPC64LE) + + _objClPostSetupFuncRegkeyOverrides(pGpu, pCl); + pCl->ChipsetInitialized = NV_TRUE; + + // + // Verify PCI Express Enhanced configuration space of First Host Bridge + // through vendor ID and device ID + // + + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE)) + { + if (clPcieReadDword(pCl, + pCl->FHBAddr.domain, + pCl->FHBAddr.bus, + pCl->FHBAddr.device, + pCl->FHBAddr.func, + 0) + != (NvU32)(pCl->FHBBusInfo.vendorID | pCl->FHBBusInfo.deviceID << 16)) + { + NV_PRINTF(LEVEL_ERROR, + "*** PCI-E config space not consistent with PCI config space, FHB vendor ID and device ID not equal!\n"); + NV_PRINTF(LEVEL_ERROR, + "*** Setting PCI-E config space inaccessible!\n"); + pCl->setProperty(pCl,PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_FALSE); + clFreePcieConfigSpaceBase(pCl); + } + } + } + else if (pPfm->getProperty(pPfm, PDB_PROP_PFM_NO_HOSTBRIDGE_DETECT)) + { + NV_PRINTF(LEVEL_INFO, + "Skipping PCI Express host bridge initialization\n"); + } + + // handles for ports + if (!objClInitGpuPortData(pGpu, pCl)) + { + NV_PRINTF(LEVEL_ERROR, "*** Unable to get PCI port handles\n"); + return NV_ERR_OPERATING_SYSTEM; + } + + if (!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS)) + { + objClGpuMapEnhCfgSpace(pGpu, pCl); + objClGpuMapRootPort(pGpu, pCl); + } + + domain = pGpu->gpuClData.upstreamPort.addr.domain; + + OBJHWBC *pHWBC = pCl->pHWBC; + NvU8 childBus = pGpu->gpuClData.upstreamPort.addr.bus; + + while (pHWBC != NULL) + { + if ((pHWBC->domain == domain) && (pHWBC->minBus <= childBus) && (pHWBC->maxBus >= childBus)) + { + if (HWBC_PLX_PEX8747 == pHWBC->bcRes) + { + plxPex8747GetFirmwareInfo(pCl, pGpu, pHWBC); + + // + // Check to avoid unsetting the flag when GPU has more than + // 1 PLX in its upstream. This happens when motherboard has + // a plx bridge. + // + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_PLX_PRESENT)) + { + // Set property only if the PLX has our firmware flashed. + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_PLX_PRESENT, + pHWBC->hasPlxFirmwareInfo); + } + } + NV_ASSERT_OK_OR_RETURN(addHwbcToList(pGpu, pHWBC)); + pHWBC = pHWBC->pFirstChild; + } + else + pHWBC = pHWBC->pSibling; + } + + for (i=0; rootPortInfo[i].vendorID; i++) + if (pGpu->gpuClData.rootPort.VendorID == rootPortInfo[i].vendorID && + pGpu->gpuClData.rootPort.DeviceID == rootPortInfo[i].deviceID) + break; + + if (rootPortInfo[i].setupFunc) + { + if (NV_OK != rootPortInfo[i].setupFunc(pGpu, pCl)) { + NV_PRINTF(LEVEL_ERROR, "*** Root Port Setup Function Error\n"); + } + } + + for (i = 0; upstreamPortInfo[i].vendorID; i++) + { + if (pGpu->gpuClData.upstreamPort.VendorID == upstreamPortInfo[i].vendorID && + pGpu->gpuClData.upstreamPort.DeviceID == upstreamPortInfo[i].deviceID) + { + break; + } + } + + if (upstreamPortInfo[i].setupFunc) + { + if (NV_OK != upstreamPortInfo[i].setupFunc(pGpu, pCl)) + { + NV_PRINTF(LEVEL_ERROR, "Upstream port Setup Function Error\n"); + } + } + + // Verify PCI Express Enhanced configuration space through vendor ID and device ID + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && + pGpu->gpuClData.rootPort.addr.valid) + { + if (clPcieReadDword(pCl, + pGpu->gpuClData.rootPort.addr.domain, + pGpu->gpuClData.rootPort.addr.bus, + pGpu->gpuClData.rootPort.addr.device, + pGpu->gpuClData.rootPort.addr.func, + 0) + != (NvU32)(pGpu->gpuClData.rootPort.VendorID | pGpu->gpuClData.rootPort.DeviceID << 16)) + { + NV_PRINTF(LEVEL_ERROR, + "*** PCI-E config space not consistent with PCI config space, root port vendor ID and device ID not equal!\n"); + pCl->setProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE, NV_FALSE); + clFreePcieConfigSpaceBase(pCl); + } + } + + rootPortLtrSupported = NV_FALSE; + pCl->setProperty(pCl, PDB_PROP_CL_UPSTREAM_LTR_SUPPORTED, NV_FALSE); + + if (pGpu->gpuClData.rootPort.addr.valid) + { + if ((NV_OK == clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, + CL_PCIE_DEV_CAP_2, &devCap2)) && + (NV_OK == clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, + CL_PCIE_DEV_CTRL_2, &devCtrl2))) + { + if (CL_IS_LTR_PORT_SUPPORTED(devCap2) && CL_IS_LTR_PORT_ENABLED(devCtrl2)) + { + rootPortLtrSupported = NV_TRUE; + } + } + } + + // + // No need to check upstream components' LTR support if root port + // does not support LTR + // + if (rootPortLtrSupported) + { + status = clCheckUpstreamLtrSupport(pGpu, pCl, &tempLtrSupported); + if (status == NV_OK && tempLtrSupported == NV_TRUE) + { + pCl->setProperty(pCl, PDB_PROP_CL_UPSTREAM_LTR_SUPPORTED, NV_TRUE); + } + else + { + NV_PRINTF(LEVEL_INFO, "LTR capability not supported.\n"); + } + } + + return NV_OK; +} + +/*! @brief Check LTR capability throughout the hierarchy of + * switches in between root port and endpoint. + * + * @param[i] pGpu GPU object pointer + * @param[i] pCl Core logic object pointer + * @param[out] pTempLtrSupported True if LTR is supported + */ +NV_STATUS +clCheckUpstreamLtrSupport_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + NvBool *pTempLtrSupported +) +{ + NvU32 portCaps = 0; + NvU32 domain = gpuGetDomain(pGpu); + NvU8 bus = gpuGetBus(pGpu); + NvU32 PCIECapPtr; + void *pHandleUp; + NvU8 busUp, devUp, funcUp; + NvU16 vendorIDUp, deviceIDUp; + NvU32 devCap2; + NvU32 devCtrl2; + NV_STATUS status = NV_OK; + *pTempLtrSupported = NV_FALSE; + + if (!pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE)) + { + { + NV_PRINTF(LEVEL_ERROR, "PCIE config space is inaccessible!\n"); + status = NV_ERR_NOT_SUPPORTED; + goto clCheckUpstreamLtrSupport_exit; + } + } + + do + { + // find upstream port + pHandleUp = clFindP2PBrdg(pCl, domain, bus, + &busUp, &devUp, &funcUp, + &vendorIDUp, &deviceIDUp); + + // make sure handle was found + if (!pHandleUp) + { + status = NV_ERR_NOT_SUPPORTED; + goto clCheckUpstreamLtrSupport_exit; + } + + status = clSetPortPcieCapOffset(pCl, pHandleUp, &PCIECapPtr); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Capability pointer not found.\n"); + status = NV_ERR_NOT_SUPPORTED; + goto clCheckUpstreamLtrSupport_exit; + } + + devCap2 = osPciReadDword(pHandleUp, + CL_PCIE_DEV_CAP_2 - CL_PCIE_BEGIN + PCIECapPtr); + devCtrl2 = osPciReadDword(pHandleUp, + CL_PCIE_DEV_CTRL_2 - CL_PCIE_BEGIN + PCIECapPtr); + + if ((!CL_IS_LTR_PORT_SUPPORTED(devCap2)) || + (!CL_IS_LTR_PORT_ENABLED(devCtrl2))) + { + // + // Even if a single switch in the hierarchy doesn't support + // LTR, it has to be disabled. No need to check further. + // + *pTempLtrSupported = NV_FALSE; + status = NV_ERR_NOT_SUPPORTED; + goto clCheckUpstreamLtrSupport_exit; + } + portCaps = osPciReadDword(pHandleUp, + CL_PCIE_CAP - CL_PCIE_BEGIN + PCIECapPtr); + + bus = busUp; + } while (!CL_IS_ROOT_PORT(portCaps)); + *pTempLtrSupported = NV_TRUE; + status = NV_OK; + +clCheckUpstreamLtrSupport_exit: + return status; +} + +static void +_objClAdjustTcVcMap(OBJGPU *pGpu, OBJCL *pCl, PORTDATA *pPort) +{ + NvU32 epVcCtrl0, epTcVcMap, upTcVcMap, upVcCtrl0, subsetTcVcMap; + + // read port TC/VC map + if (NV_OK != clPcieReadPortConfigReg(pGpu, pCl, pPort, + CL_VC_RESOURCE_CTRL_0, &upVcCtrl0)) + { + NV_PRINTF(LEVEL_INFO, + "NVPCIE: Can not read VC resource control 0 on port %04x:%02x:%02x.%1x (bug 1048498).\n", + pPort->addr.domain, pPort->addr.bus, pPort->addr.device, + pPort->addr.func); + } + else + { + upTcVcMap = DRF_VAL(_XVE, _VCCAP_CTRL0, _MAP, upVcCtrl0); + + // read NVIDIA TC/VC map + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, NV_XVE_VCCAP_CTRL0, &epVcCtrl0)) + { + NV_PRINTF(LEVEL_ERROR, "Cannot read NV_XVE_VCCAP_CTRL0\n"); + return; + } + epTcVcMap = DRF_VAL(_XVE, _VCCAP_CTRL0, _MAP, epVcCtrl0); + + subsetTcVcMap = epTcVcMap & upTcVcMap; + if (epTcVcMap != subsetTcVcMap) + { + NV_PRINTF(LEVEL_INFO, + "NVPCIE: TC/VC map is inconsistent (Port %04x:%02x:%02x.%1x 0x%02x, GPU 0x%02x)!\n", + pPort->addr.domain, pPort->addr.bus, pPort->addr.device, + pPort->addr.func, (upTcVcMap << 1) | 1, + (epTcVcMap << 1) | 1); + + NV_PRINTF(LEVEL_INFO, + "NVPCIE: Fixing TC/VC map to common subset 0x%02x.\n", + (subsetTcVcMap << 1) | 1); + + epVcCtrl0 = FLD_SET_DRF_NUM(_XVE, _VCCAP_CTRL0, _MAP, subsetTcVcMap, + epVcCtrl0); + GPU_BUS_CFG_WR32(pGpu, NV_XVE_VCCAP_CTRL0, epVcCtrl0); + } + } +} + +// +// Determine if any updates are needed to the PCI Express +// +void +clUpdatePcieConfig_IMPL(OBJGPU *pGpu, OBJCL *pCl) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJPFM *pPfm = SYS_GET_PFM(pSys); + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + NvBool bIsGemini = NV_FALSE; + NvBool bIsMultiGpu; + NvU32 busIntfType = kbifGetBusIntfType_HAL(pKernelBif); + + // verify we're an PCI Express graphics card + if (busIntfType != NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS && + busIntfType != NV2080_CTRL_BUS_INFO_TYPE_FPCI) + { + return; + } + + // Find our NV device on the PCI bus and save it's pci bus/device address. + (void)clInitMappingPciBusDevice(pGpu, pCl); + NV_PRINTF(LEVEL_INFO, + "GPU Domain %X Bus %X Device %X Func %X\n", + gpuGetDomain(pGpu), gpuGetBus(pGpu), gpuGetDevice(pGpu), 0); + + // Set 3d controller property + if (objClGpuIs3DController(pGpu) != NV_OK) + { + return; + } + + // Load PCI Express virtual P2P approval config + objClLoadPcieVirtualP2PApproval(pGpu); + + // + // Disable NOSNOOP bit for Passthrough. + // + if (IS_PASSTHRU(pGpu)) + { + pCl->setProperty(pCl, PDB_PROP_CL_NOSNOOP_NOT_CAPABLE, NV_TRUE); + } + + objClInitPcieChipset(pGpu, pCl); + + // + // Now that chipset capabilities have been initialized, configure the + // device's config space device control status bits to match. This should + // happen before any DMA. + // + kbifInitPcieDeviceControlStatus(pGpu, pKernelBif); + + // + // Passthrough configurations do not typically present the upstream + // bridge required for detecting multi-GPU boards. So for hypervisors + // with passthrough pretend to separate GPUs. + // + if (!pPfm->getProperty(pPfm, PDB_PROP_PFM_NO_HOSTBRIDGE_DETECT)) + { + + // + // MultiGpu board includes all Dagwood and Gemini boards + // A dagwood board would not have bIsGemini flag set but would have + // bIsMultiGpu flag set. + // + bIsMultiGpu = gpuIsMultiGpuBoard(pGpu, NULL, &bIsGemini); + if (bIsGemini) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_GEMINI, NV_TRUE); + } + + // Update the board ID only if the Gpu is a multiGpu board + if (bIsMultiGpu) + { + gpumgrUpdateBoardId(pGpu); + } + } + + NV_PRINTF(LEVEL_INFO, + "Chipset %X Domain %X Bus %X Device %X Func %X PCIE PTR %X\n", + pCl->Chipset, + pGpu->gpuClData.upstreamPort.addr.domain, + pGpu->gpuClData.upstreamPort.addr.bus, + pGpu->gpuClData.upstreamPort.addr.device, + pGpu->gpuClData.upstreamPort.addr.func, + pGpu->gpuClData.upstreamPort.PCIECapPtr); + NV_PRINTF(LEVEL_INFO, + "Chipset %X Root Port Domain %X Bus %X Device %X Func %X PCIE PTR %X\n", + pCl->Chipset, pGpu->gpuClData.rootPort.addr.domain, + pGpu->gpuClData.rootPort.addr.bus, + pGpu->gpuClData.rootPort.addr.device, + pGpu->gpuClData.rootPort.addr.func, + pGpu->gpuClData.rootPort.PCIECapPtr); + NV_PRINTF(LEVEL_INFO, + "Chipset %X Board Upstream Port Domain %X Bus %X Device %X Func %X PCIE PTR %X\n", + pCl->Chipset, pGpu->gpuClData.boardUpstreamPort.addr.domain, + pGpu->gpuClData.boardUpstreamPort.addr.bus, + pGpu->gpuClData.boardUpstreamPort.addr.device, + pGpu->gpuClData.boardUpstreamPort.addr.func, + pGpu->gpuClData.boardUpstreamPort.PCIECapPtr); + NV_PRINTF(LEVEL_INFO, + "Chipset %X Board Downstream Port Domain %X Bus %X Device %X Func %X PCIE PTR %X\n", + pCl->Chipset, pGpu->gpuClData.boardDownstreamPort.addr.domain, + pGpu->gpuClData.boardDownstreamPort.addr.bus, + pGpu->gpuClData.boardDownstreamPort.addr.device, + pGpu->gpuClData.boardDownstreamPort.addr.func, + pGpu->gpuClData.boardDownstreamPort.PCIECapPtr); + + NV_PRINTF(LEVEL_INFO, + "FHB Domain %X Bus %X Device %X Func %X VendorID %X DeviceID %X\n", + pCl->FHBAddr.domain, pCl->FHBAddr.bus, pCl->FHBAddr.device, + pCl->FHBAddr.func, pCl->FHBBusInfo.vendorID, + pCl->FHBBusInfo.deviceID); + + // + // Match the GPU TC/VC map to that of the chipset + // Some SBIOS revisions do not program the TC/VC map correctly. RM will + // always ensure that the GPU TC/VC map is a subset of the RP TC/VC map. + // The GPU's map can be more restrictive than the RP, but it can *never* + // be larger. + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED)) + { + _objClAdjustTcVcMap(pGpu, pCl, &pGpu->gpuClData.rootPort); + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BEHIND_BRIDGE)) + { + // + // Match the GPU TC/VC map to that of the parent bridge. + // Normally, the SBIOS programs up these mappings during POST. + // However, this does not happen when coming out of Zero Power. + // + _objClAdjustTcVcMap(pGpu, pCl, &pGpu->gpuClData.upstreamPort); + } + } +} + +// +// Here's our attempt to configure PCI Express on our own (for NT4?). +// +// First, we'll attempt to recognize the chipset to determine if we know how to +// program it. +// +NV_STATUS clInitPcie_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + NvU32 busIntfType; + + if (pKernelBif == NULL) + { + return NV_ERR_NOT_SUPPORTED; + } + + busIntfType = kbifGetBusIntfType_HAL(GPU_GET_KERNEL_BIF(pGpu)); + + // verify we're an PCI Express graphics card + if (busIntfType != NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS && + busIntfType != NV2080_CTRL_BUS_INFO_TYPE_FPCI) + { + return NV_ERR_NOT_SUPPORTED; + } + + /* enable chipset-specific overrides */ + clUpdatePcieConfig(pGpu, pCl); + + // Bug 200370149 tracks normalizing KMD detection with RM. + objClCheckForExternalGpu(pGpu, pCl); + + return NV_OK; +} + +NV_STATUS +clTeardownPcie_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + if (!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS)) + { + objClGpuUnmapRootPort(pGpu); + objClGpuUnmapEnhCfgSpace(pGpu); + } + return NV_OK; +} + +// +// Return the Bus, Device, Func numbers of the root port for the given GPU. +// +static NvBool +objClInitGpuPortData +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + void *handle; + NvU16 vendorID; + NvU16 deviceID; + NvU8 bus; + NvU8 device; + NvU8 func; + NvU8 gpuBus; + NvU32 domain = 0; + NvU32 linkCap; + + // return it, if we've got it already + if (pGpu->gpuClData.upstreamPort.addr.valid) + return NV_TRUE; + + NV_ASSERT(gpuGetDBDF(pGpu)); + + domain = gpuGetDomain(pGpu); + gpuBus = gpuGetBus(pGpu); + + handle = clFindP2PBrdg(pCl, domain, gpuBus, &bus, &device, &func, + &vendorID, &deviceID); + + if (!handle) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys); + + // This is bad, we didn't find the upstream port device (assume + // bus0/device0/func0) + pGpu->gpuClData.upstreamPort.addr.domain = 0x0; + pGpu->gpuClData.upstreamPort.addr.bus = 0x0; + pGpu->gpuClData.upstreamPort.addr.device = 0x0; + pGpu->gpuClData.upstreamPort.addr.func = 0x0; + pGpu->gpuClData.upstreamPort.addr.valid = 0x1; + pGpu->gpuClData.upstreamPort.addr.handle = NULL; + pGpu->gpuClData.upstreamPort.DeviceID = PCI_INVALID_DEVICEID; + pGpu->gpuClData.upstreamPort.VendorID = PCI_INVALID_VENDORID; + + // + // Hypervisors typically hide the PCI topology from the guest, so we + // don't expect to always be able to find an upstream port. + // + // For MODS debug breakpoints are always fatal and MODS is sometimes run + // on systems where the up stream port cannot be determined + if ((kbifGetBusIntfType_HAL(GPU_GET_KERNEL_BIF(pGpu)) != + NV2080_CTRL_BUS_INFO_TYPE_FPCI) && + (!pHypervisor || !pHypervisor->bDetected) && + !RMCFG_FEATURE_PLATFORM_MODS) + { + DBG_BREAKPOINT(); + return NV_FALSE; + } + } + else + { + pGpu->gpuClData.upstreamPort.addr.domain = domain; + pGpu->gpuClData.upstreamPort.addr.bus = bus; + pGpu->gpuClData.upstreamPort.addr.device = device; + pGpu->gpuClData.upstreamPort.addr.func = func; + pGpu->gpuClData.upstreamPort.addr.valid = 0x1; + pGpu->gpuClData.upstreamPort.addr.handle = handle; + pGpu->gpuClData.upstreamPort.DeviceID = deviceID; + pGpu->gpuClData.upstreamPort.VendorID = vendorID; + + objClSetPortCapsOffsets(pCl, &pGpu->gpuClData.upstreamPort); + } + + // Root port + handle = objClFindRootPort(pGpu, pCl, domain, gpuBus, &bus, &device, &func, &vendorID, &deviceID); + if (handle) + { + pGpu->gpuClData.rootPort.addr.domain = domain; + pGpu->gpuClData.rootPort.addr.bus = bus; + pGpu->gpuClData.rootPort.addr.device = device; + pGpu->gpuClData.rootPort.addr.func = func; + pGpu->gpuClData.rootPort.addr.valid = 0x1; + pGpu->gpuClData.rootPort.addr.handle = handle; + pGpu->gpuClData.rootPort.DeviceID = deviceID; + pGpu->gpuClData.rootPort.VendorID = vendorID; + + objClSetPortCapsOffsets(pCl, &pGpu->gpuClData.rootPort); + } + else + { + // This is a valid topology if a PCIe-to-PCI bridge chip is being used. + pGpu->gpuClData.rootPort.DeviceID = PCI_INVALID_DEVICEID; + pGpu->gpuClData.rootPort.VendorID = PCI_INVALID_VENDORID; + } + + // Assuming that both BR03 and BR04 can exist on the same system + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_BR03_PRESENT, objClBR03Exists(pGpu, pCl)); + pGpu->setProperty(pGpu, PDB_PROP_GPU_IS_BR04_PRESENT, objClBR04Exists(pGpu, pCl)); + + // board downstream port + domain = gpuGetDomain(pGpu); + handle = clFindBrdgUpstreamPort(pGpu, pCl, NV_FALSE, &bus, &device, &func, &vendorID, &deviceID, NULL); + if (handle) + { + pGpu->gpuClData.boardDownstreamPort.addr.domain = domain; + pGpu->gpuClData.boardDownstreamPort.addr.bus = bus; + pGpu->gpuClData.boardDownstreamPort.addr.device = device; + pGpu->gpuClData.boardDownstreamPort.addr.func = func; + pGpu->gpuClData.boardDownstreamPort.addr.valid = 0x1; + pGpu->gpuClData.boardDownstreamPort.addr.handle = handle; + pGpu->gpuClData.boardDownstreamPort.DeviceID = deviceID; + pGpu->gpuClData.boardDownstreamPort.VendorID = vendorID; + + objClSetPortCapsOffsets(pCl, &pGpu->gpuClData.boardDownstreamPort); + } + else if ( pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR03_PRESENT) || + pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR04_PRESENT) ) // Error + { + pGpu->gpuClData.boardDownstreamPort.DeviceID = PCI_INVALID_DEVICEID; + pGpu->gpuClData.boardDownstreamPort.VendorID = PCI_INVALID_VENDORID; + DBG_BREAKPOINT(); + return NV_FALSE; + } + // else: boardDownstreamPort data is not initialized, as it would + // represent the GPU. We do not access the PCIE config space of the GPU. + + // board upstream port + domain = gpuGetDomain(pGpu); + handle = clFindBrdgUpstreamPort(pGpu, pCl, NV_TRUE, &bus, &device, &func, &vendorID, &deviceID, NULL); + if (handle) + { + pGpu->gpuClData.boardUpstreamPort.addr.domain = domain; + pGpu->gpuClData.boardUpstreamPort.addr.bus = bus; + pGpu->gpuClData.boardUpstreamPort.addr.device = device; + pGpu->gpuClData.boardUpstreamPort.addr.func = func; + pGpu->gpuClData.boardUpstreamPort.addr.valid = 0x1; + pGpu->gpuClData.boardUpstreamPort.addr.handle = handle; + pGpu->gpuClData.boardUpstreamPort.DeviceID = deviceID; + pGpu->gpuClData.boardUpstreamPort.VendorID = vendorID; + + objClSetPortCapsOffsets(pCl, &pGpu->gpuClData.boardUpstreamPort); + } + else if ( (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR03_PRESENT)) && + (!pGpu->getProperty(pGpu, PDB_PROP_GPU_IS_BR04_PRESENT)) ) + { + // No BR03, same as upstream port + pGpu->gpuClData.boardUpstreamPort.addr.domain = pGpu->gpuClData.upstreamPort.addr.domain; + pGpu->gpuClData.boardUpstreamPort.addr.bus = pGpu->gpuClData.upstreamPort.addr.bus; + pGpu->gpuClData.boardUpstreamPort.addr.device = pGpu->gpuClData.upstreamPort.addr.device; + pGpu->gpuClData.boardUpstreamPort.addr.func = pGpu->gpuClData.upstreamPort.addr.func; + pGpu->gpuClData.boardUpstreamPort.addr.valid = pGpu->gpuClData.upstreamPort.addr.valid; + pGpu->gpuClData.boardUpstreamPort.addr.handle = pGpu->gpuClData.upstreamPort.addr.handle; + pGpu->gpuClData.boardUpstreamPort.DeviceID = pGpu->gpuClData.upstreamPort.DeviceID; + pGpu->gpuClData.boardUpstreamPort.VendorID = pGpu->gpuClData.upstreamPort.VendorID; + + pGpu->gpuClData.boardUpstreamPort.PCIECapPtr = pGpu->gpuClData.upstreamPort.PCIECapPtr; + pGpu->gpuClData.boardUpstreamPort.PCIEErrorCapPtr = pGpu->gpuClData.upstreamPort.PCIEErrorCapPtr; + pGpu->gpuClData.boardUpstreamPort.PCIEVCCapPtr = pGpu->gpuClData.upstreamPort.PCIEVCCapPtr; + pGpu->gpuClData.boardUpstreamPort.PCIEL1SsCapPtr = pGpu->gpuClData.upstreamPort.PCIEL1SsCapPtr; + } + else //Error + { + DBG_BREAKPOINT(); + return NV_FALSE; + } + + objClSetPcieHWBC(pGpu, pCl); + + // set property to indicate whether the GPU is behind any bridge + if (pGpu->gpuClData.rootPort.addr.valid && pGpu->gpuClData.upstreamPort.addr.valid) + { + if ((pGpu->gpuClData.rootPort.addr.domain != pGpu->gpuClData.upstreamPort.addr.domain) || + (pGpu->gpuClData.rootPort.addr.bus != pGpu->gpuClData.upstreamPort.addr.bus) || + (pGpu->gpuClData.rootPort.addr.device != pGpu->gpuClData.upstreamPort.addr.device) || + (pGpu->gpuClData.rootPort.addr.func != pGpu->gpuClData.upstreamPort.addr.func)) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_BEHIND_BRIDGE, NV_TRUE); + } + } + // + // If GPU is behind bridge then to enable\disable ASPM L0S\L1 should + // be decided based upon the link capability of the immediate bridge + // with which GPU is connected (Bug 540109) + // + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_BEHIND_BRIDGE)) + { + if (pGpu->gpuClData.upstreamPort.addr.valid) + { + if (clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.upstreamPort, + CL_PCIE_LINK_CAP, &linkCap) == NV_OK) + { + if (!CL_IS_L0_SUPPORTED(linkCap)) + { + pGpu->setProperty(pGpu, + PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED, + NV_TRUE); + } + if (!CL_IS_L1_SUPPORTED(linkCap)) + { + pGpu->setProperty(pGpu, + PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED, + NV_TRUE); + } + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Error reading pcie link control status of upstream port\n"); + // disable L0\L1 support by default + pGpu->setProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L0S_UNSUPPORTED, NV_TRUE); + pGpu->setProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED, NV_TRUE); + } + } + } + + // + // We must check for specific ports on which we should always support ASPM L1. + // This allows us to include ASPM L1 support on specific ports, such as PEX + // slots coming off the CPU rather than the chipset. POR for which to support + // is generally determined by HW. SW then adds it here. + // + + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_UNSUPPORTED)) + { + NvU16 vendorID; + NvU16 deviceID; + + if (pGpu->gpuClData.upstreamPort.addr.valid) + { + vendorID = pGpu->gpuClData.upstreamPort.VendorID; + deviceID = pGpu->gpuClData.upstreamPort.DeviceID; + } + else if (pGpu->gpuClData.rootPort.addr.valid) + { + vendorID = pGpu->gpuClData.rootPort.VendorID; + deviceID = pGpu->gpuClData.rootPort.DeviceID; + } + else + { + vendorID = PCI_INVALID_VENDORID; + deviceID = PCI_INVALID_DEVICEID; + } + + // + // Any root ports not in our list remains off the supported list. That + // means ASPM L1 support will be dictated by the chipset POR as a whole, + // and not on a per-root-port basis. + // + + switch(vendorID) + { + case PCI_VENDOR_ID_INTEL: + switch(deviceID) + { + case INTEL_LYNNFIELD_ROOTPORT_CPU1: + // Bug 706926: Intel H57 Chipset (based on LPC D31 F0 PCI DevID 3B08h ) + // need to remove from ASPM POR + if (pCl->getProperty(pCl, PDB_PROP_CL_INTEL_CPU_ROOTPORT1_NEEDS_H57_WAR)) + break; + + case INTEL_LYNNFIELD_ROOTPORT_CPU2: + pGpu->setProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED, NV_TRUE); + break; + + default: + // This device id is not in our per-root-port POR. + break; + } + break; + + default: + // This vendor has no devices in our per-root-port POR. + break; + } + } + + return NV_TRUE; +} + +// +// Find out the P2P Bridge(port) which bridges to the secondary bus +// +void * +clFindP2PBrdg_IMPL +( + OBJCL *pCl, + NvU32 domain, + NvU8 secBus, + NvU8 *pbus, + NvU8 *pdevice, + NvU8 *pfunc, + NvU16 *vendorID, + NvU16 *deviceID +) +{ + PBUSTOPOLOGYINFO pBusTopologyInfo; + OBJGPU *pGpu; + NvU32 gpuAttachCnt, gpuAttachMask, gpuInstance; + NvU16 secBus16 = secBus; + + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + gpuInstance = 0; + pGpu = gpumgrGetNextGpu(gpuAttachMask, &gpuInstance); + + if (pGpu && !IS_SIMULATION(pGpu)) + { + secBus16 = PCI_MAX_BUSES; + // And we will store the PCI-E topology only once + } + else + { + // If pGPU is NULL we do not know if we are in simulation + // I verified this does not happen. pGPU is always set. + secBus16 = secBus; + if (gpuAttachCnt > 1) + { + // Free the topology in this case only when the GPU count is greater than 1 + // Of we have only one GPU then we do not need to rescan the bus. + clFreeBusTopologyCache(pCl); + } + } + + // If the bus topology is not cached, do it here + if (clStoreBusTopologyCache(pCl, domain, secBus16) != NV_OK) + { + return NULL; + } + + pBusTopologyInfo = pCl->pBusTopologyInfo; + + while (pBusTopologyInfo) + { + if ((pBusTopologyInfo->secBus == secBus) && + (pBusTopologyInfo->domain == domain) && + (pBusTopologyInfo->bus <= secBus - 1) && + (pBusTopologyInfo->bVgaAdapter == NV_FALSE)) + { + *pbus = pBusTopologyInfo->bus; + *pdevice = pBusTopologyInfo->device; + *pfunc = pBusTopologyInfo->func; + *vendorID = pBusTopologyInfo->busInfo.vendorID; + *deviceID = pBusTopologyInfo->busInfo.deviceID; + return pBusTopologyInfo->handle; + } + pBusTopologyInfo = pBusTopologyInfo->next; + } + + return NULL; +} + +static void +objClGpuMapRootPort +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + NBADDR *pRoot = &pGpu->gpuClData.rootPort.addr; + void *vAddr; + RmPhysAddr pcieConfigSpaceBase; + + NV_ASSERT_OR_RETURN_VOID(!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS)); + + if (!pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) + || !pRoot->valid + || (pGpu->gpuClData.rootPort.vAddr != 0)) + { + return; + } + + pcieConfigSpaceBase = clFindPcieConfigSpaceBase(pCl, pRoot->domain, pRoot->bus); + if (pcieConfigSpaceBase == 0) + { + return; + } + + vAddr = osMapKernelSpace(pcieConfigSpaceBase | + pRoot->bus << PCIE_BUS_SHIFT | + pRoot->device << PCIE_DEVICE_SHIFT | + pRoot->func << PCIE_FUNC_SHIFT, + RM_PAGE_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + if (vAddr == 0) + { + NV_PRINTF(LEVEL_ERROR, + "NVPCIE: unable to map root port PCIE config space.\n"); + return; + } + + pGpu->gpuClData.rootPort.vAddr = vAddr; +} + +static void +objClGpuUnmapRootPort +( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + NV_ASSERT_OR_RETURN_VOID(!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS)); + + if (pGpu->gpuClData.rootPort.vAddr != 0) + { + osUnmapKernelSpace(pGpu->gpuClData.rootPort.vAddr, RM_PAGE_SIZE); + pGpu->gpuClData.rootPort.vAddr = 0; + } +} + +static void +objClGpuMapEnhCfgSpace +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + NvU8 bus; + NvU8 device; + NvU32 domain; + RmPhysAddr pcieConfigSpaceBase; + + NV_ASSERT_OR_RETURN_VOID(!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS)); + + if (!pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) || + (pGpu->gpuCfgAddr != NULL) || (gpuGetDBDF(pGpu) == 0)) + { + return; + } + + domain = gpuGetDomain(pGpu); + bus = gpuGetBus(pGpu); + device = gpuGetDevice(pGpu); + + pcieConfigSpaceBase = clFindPcieConfigSpaceBase(pCl, domain, bus); + if (pcieConfigSpaceBase == 0) + { + return; + } + + pGpu->gpuCfgAddr = osMapKernelSpace(pcieConfigSpaceBase | + (bus << PCIE_BUS_SHIFT) | + (device << PCIE_DEVICE_SHIFT), + RM_PAGE_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + + if (pGpu->gpuCfgAddr == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "unable to map GPU's PCI-E configuration space.\n"); + return; + } +} + +static void +objClGpuUnmapEnhCfgSpace +( + OBJGPU *pGpu +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + NV_ASSERT_OR_RETURN_VOID(!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS)); + + if (pGpu->gpuCfgAddr != NULL) + { + osUnmapKernelSpace(pGpu->gpuCfgAddr, RM_PAGE_SIZE); + pGpu->gpuCfgAddr = NULL; + } +} + +static NV_STATUS +objClSetPortCapsOffsets +( + OBJCL *pCl, + PORTDATA *pPort +) +{ + clSetPortPcieCapOffset(pCl, pPort->addr.handle, + &pPort->PCIECapPtr); + objClSetPortPcieEnhancedCapsOffsets(pCl, pPort); + + return NV_OK; +} + +// +// Look for the PCI Express capability offset +// Copied from nvagp.c and modified +// +NV_STATUS +clSetPortPcieCapOffset_IMPL +( + OBJCL *pCl, + void *handle, + NvU32 *cap_offset +) +{ + NvU8 cap_next; + NvU32 pcie_caps; + + if (handle == 0) + return NV_ERR_GENERIC; // handle hasn't been initialized + + if (((osPciReadDword(handle, 0x4)) & 0x00100000) == 0) + { + NV_PRINTF(LEVEL_ERROR, + "NVPCIE: Upstream port doesn't support PCI Express Capability structure. This is a violation of PCIE spec\n"); + NV_ASSERT(0); + return NV_ERR_GENERIC; // chipset doesn't support capability ptrs + } + + // find the PCI offset for the PCI Express Cap ID + + cap_next = osPciReadByte(handle, PCI_CAPABILITY_LIST); + while (cap_next) + { + pcie_caps = osPciReadDword(handle, cap_next); + if ((pcie_caps & CAP_ID_MASK) == CAP_ID_PCI_EXPRESS) + break; // found the PCI Express Cap ID + cap_next = (NvU8)((pcie_caps >> 8) & 0xFF); + } + + if (cap_next == 0) + return NV_ERR_GENERIC; // didn't find the PCI Express capid + + *cap_offset = cap_next; + return NV_OK; +} + +// +// Look for the PCI Express Enhanced capability list +// +static NV_STATUS +objClSetPortPcieEnhancedCapsOffsets +( + OBJCL *pCl, + PORTDATA *pPort +) +{ + NvU32 cap_next, cap_type, value; + + // make sure we can access the enhanced configuration space + if (!pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) || + (!pPort->addr.valid)) + { + return NV_ERR_INVALID_STATE; + } + + // Enhanced CFG space starts at 0x100 + cap_next = 0x100; + while (cap_next) + { + value = clPcieReadDword(pCl, + pPort->addr.domain, + pPort->addr.bus, + pPort->addr.device, + pPort->addr.func, + cap_next); + cap_type = REF_VAL(PCIE_CAP_HEADER_ID, value); + switch (cap_type) + { + case PCIE_CAP_ID_ERROR: + pPort->PCIEErrorCapPtr = cap_next; + break; + case PCIE_CAP_ID_VC: + pPort->PCIEVCCapPtr = cap_next; + break; + case PCIE_CAP_ID_L1_PM_SUBSTATES: + pPort->PCIEL1SsCapPtr = cap_next; + break; + } + cap_next = REF_VAL(PCIE_CAP_HEADER_NEXT, value); + } + + return NV_OK; +} + +NV_STATUS +clPcieReadPortConfigReg_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + PORTDATA *pPort, + NvU32 offset, + NvU32 *value +) +{ + if ((kbifGetBusIntfType_HAL(GPU_GET_KERNEL_BIF(pGpu)) != + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) || + !pPort->addr.valid) + { + return NV_ERR_GENERIC; + } + + if ((offset >= CL_PCIE_BEGIN) && (offset + sizeof(NvU32) <= CL_PCIE_END)) + { + if (pPort->PCIECapPtr) + *value = osPciReadDword(pPort->addr.handle, + offset - CL_PCIE_BEGIN + pPort->PCIECapPtr); + else + return NV_ERR_GENERIC; + } + else if ((offset >= CL_AER_BEGIN) && (offset + sizeof(NvU32) <= CL_AER_END)) + { + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && pPort->PCIEErrorCapPtr) + *value = clPcieReadDword(pCl, + pPort->addr.domain, + pPort->addr.bus, + pPort->addr.device, + pPort->addr.func, + offset - CL_AER_BEGIN + pPort->PCIEErrorCapPtr); + else + return NV_ERR_GENERIC; + } + else if ((offset >= CL_VC_BEGIN) && (offset + sizeof(NvU32) <= CL_VC_END)) + { + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && pPort->PCIEVCCapPtr) + *value = clPcieReadDword(pCl, + pPort->addr.domain, + pPort->addr.bus, + pPort->addr.device, + pPort->addr.func, + offset - CL_VC_BEGIN + pPort->PCIEVCCapPtr); + else + return NV_ERR_GENERIC; + } + else if ((offset >= CL_L1_SS_BEGIN) && (offset + sizeof(NvU32) <= CL_L1_SS_END)) + { + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && pPort->PCIEL1SsCapPtr) + *value = clPcieReadDword(pCl, + pPort->addr.domain, + pPort->addr.bus, + pPort->addr.device, + pPort->addr.func, + offset - CL_L1_SS_BEGIN + pPort->PCIEL1SsCapPtr); + else + return NV_ERR_GENERIC; + } + else if (offset + sizeof(NvU32) <= 0x40) + { + *value = osPciReadDword(pPort->addr.handle, offset); + } + else + return NV_ERR_GENERIC; // read from extended PCI Express configuration space, currently not supported yet + + return NV_OK; +} + +static NvBool +objClBR03Exists +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + void *handle; + NvU8 dummy8, gpuBus; + NvU32 gpuDomain; + NvU16 vendor, device; + + if ((kbifGetBusIntfType_HAL(GPU_GET_KERNEL_BIF(pGpu)) != + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) || + !pGpu->gpuClData.upstreamPort.addr.valid) + { + return NV_FALSE; + } + + gpuDomain = gpuGetDomain(pGpu); + gpuBus = gpuGetBus(pGpu); + + // Upstream port + handle = clFindP2PBrdg(pCl, gpuDomain, gpuBus, &dummy8, &dummy8, &dummy8, &vendor, &device); + + // make sure handle was found + if (!handle) + return NV_FALSE; + + // make sure that this is the BR03 upstream port + if ( (vendor != PCI_VENDOR_ID_NVIDIA) || + (device != NV_BR03_XVU_DEV_ID_DEVICE_ID_BR03) ) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +static NvBool +objClBR04Exists +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + void *handle; + NvU8 dummy8, gpuBus; + NvU32 gpuDomain; + NvU16 vendor, device; + + if ((kbifGetBusIntfType_HAL(GPU_GET_KERNEL_BIF(pGpu)) != + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) || + !pGpu->gpuClData.upstreamPort.addr.valid) + { + return NV_FALSE; + } + + gpuDomain = gpuGetDomain(pGpu); + gpuBus = gpuGetBus(pGpu); + + // Upstream port + handle = clFindP2PBrdg(pCl, gpuDomain, gpuBus, &dummy8, &dummy8, &dummy8, &vendor, &device); + + // make sure handle was found + if (!handle) + return NV_FALSE; + + // make sure that this is the BR04 upstream port + if ( (vendor != PCI_VENDOR_ID_NVIDIA) || + (!IS_DEVID_BR04(device)) ) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +void * +clFindBrdgUpstreamPort_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + NvBool portUpstreamOfBrdg, + NvU8 *pbus, + NvU8 *pdev, + NvU8 *pfunc, + NvU16 *pvendorID, + NvU16 *pdeviceID, + NvU8 *pbusBrdg +) +{ + void *handleBrdg, *handleUpstream; + NvU8 bus = 0, dev = 0, func = 0, gpuBus; + NvU32 domain = 0; + NvU16 vendor = 0, device = 0; + + if ((kbifGetBusIntfType_HAL(GPU_GET_KERNEL_BIF(pGpu)) != + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) || + !pGpu->gpuClData.upstreamPort.addr.valid) + { + return NULL; + } + + domain = gpuGetDomain(pGpu); + gpuBus = gpuGetBus(pGpu); + + // find upstream port of Brdg + handleBrdg = clFindP2PBrdg(pCl, domain, gpuBus, + &bus, &dev, &func, &vendor, &device); + + // if caller asks for it, save off bus # of this Brdg + if (pbusBrdg) + { + *pbusBrdg = bus; + } + + // make sure that this is the Brdg upstream port + if (!handleBrdg || !IS_SUPPORTED_PCIE_SWITCH(vendor, device)) + { + return NULL; + } + + if (portUpstreamOfBrdg) + { + // find port upstream of Brdg + handleUpstream = clFindP2PBrdg(pCl, domain, bus, + &bus, &dev, &func, &vendor, &device); + + // make sure handle was found + if (!handleUpstream) + return NULL; + } + else + { + handleUpstream = handleBrdg; + } + + *pbus = bus; + *pdev = dev; + *pfunc = func; + *pvendorID = vendor; + *pdeviceID = device; + + return handleUpstream; +} + +static void * +objClFindRootPort +( + OBJGPU *pGpu, + OBJCL *pCl, + NvU32 domain, + NvU8 bus, + NvU8 *pbusRp, + NvU8 *pdevRp, + NvU8 *pfuncRp, + NvU16 *pvendorIDRp, + NvU16 *pdeviceIDRp +) +{ + NvU32 PCIECapPtr; + NvU32 portCaps = 0; + void *handleUp; + NvU8 busUp, devUp, funcUp; + NvU16 vendorIDUp, deviceIDUp; + NV_STATUS status; + + do + { + // find upstream port + handleUp = clFindP2PBrdg(pCl, domain, bus, + &busUp, &devUp, &funcUp, + &vendorIDUp, &deviceIDUp); + + // make sure handle was found + if (!handleUp) + return NULL; + + status = clSetPortPcieCapOffset(pCl, handleUp, &PCIECapPtr); + if (status != NV_OK) + { + // If capability pointer is not found ignore and move to next upstream port. + bus = busUp; + continue; + } + + portCaps = osPciReadDword(handleUp, + CL_PCIE_CAP - CL_PCIE_BEGIN + PCIECapPtr); + + bus = busUp; + } while (!CL_IS_ROOT_PORT(portCaps)); + + *pbusRp = busUp; + *pdevRp = devUp; + *pfuncRp = funcUp; + *pvendorIDRp = vendorIDUp; + *pdeviceIDRp = deviceIDUp; + + return handleUp; +} + +// +// clCountBR +// +// Returns the count of cascaded BR03s and BR04s right under this GPU. +// +void +clCountBR_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + NvU8 *pBR03Count, + NvU8 *pBR04Count, + NvU8 *pPLXCount +) +{ + void *handleUp; + NvU16 vendorIDUp, deviceIDUp; + NvU8 bus = 0xff, busUp, deviceUp, funcUp; + NvU32 domain; + NvU8 downstreamPortBus; + NvU8 BR03Count = 0; + NvU8 BR04Count = 0; + NvU8 PLXCount = 0; + + domain = gpuGetDomain(pGpu); + handleUp = clFindBrdgUpstreamPort(pGpu, pCl, NV_TRUE, + &busUp, &deviceUp, &funcUp, + &vendorIDUp, &deviceIDUp, + &downstreamPortBus); + + while (handleUp) + { + if ((vendorIDUp == PCI_VENDOR_ID_NVIDIA) && (deviceIDUp == NV_BR03_XVU_DEV_ID_DEVICE_ID_BR03)) + { + BR03Count++; + } + else if((vendorIDUp == PCI_VENDOR_ID_NVIDIA) && IS_DEVID_BR04(deviceIDUp)) + { + BR04Count++; + } + else if((vendorIDUp == PCI_VENDOR_ID_PLX) && IS_DEVID_SUPPORTED_PLX(deviceIDUp)) + { + PLXCount++; + } + else + { + break; + } + + bus = busUp; + + // continue up + handleUp = clFindP2PBrdg(pCl, domain, bus, &busUp, &deviceUp, + &funcUp, &vendorIDUp, &deviceIDUp); + if (IS_SUPPORTED_PCIE_SWITCH(vendorIDUp, deviceIDUp)) + { + // port up of bridge + handleUp = clFindP2PBrdg(pCl, domain, busUp, &busUp, &deviceUp, + &funcUp, &vendorIDUp, &deviceIDUp); + } + } + + *pBR03Count = BR03Count; + *pBR04Count = BR04Count; + *pPLXCount = PLXCount; + return ; +} + +// +// clSearchBR04() returns the bus, revision of the BR04s in the system, +// and their count. +// It ignores the BR04s located in GX2 boards. +// It includes BR04s located on the motherboard, or on a riser board. +// +void +clSearchBR04_IMPL +( + OBJCL *pCl, + NvU8 *pBR04BusArray, + NvU8 *pBR04RevArray, + NvU8 *pBR04Count +) +{ + PBUSTOPOLOGYINFO pBusTopologyInfo = pCl->pBusTopologyInfo; + PBUSTOPOLOGYINFO pBusTopologyInfoBR04DS, pBusTopologyInfoBR04GPU; + NvU16 vendorID, deviceID; + void *handleBR04; + NvU32 regValue = 0; + NvU8 BR04DSPorts = 0; + NvU8 BR04Count = 0; + + while (pBusTopologyInfo) + { + if (pBusTopologyInfo->bVgaAdapter) + { + // This is not a P2P bridge + pBusTopologyInfo = pBusTopologyInfo->next; + continue; + } + + if (!IS_DEVID_BR04(pBusTopologyInfo->busInfo.deviceID)) + { + // This is not a BR04 + pBusTopologyInfo = pBusTopologyInfo->next; + continue; + } + + // + // Look at the devices connected to this BR04. + // If it is a GX2 GPU, then skip. + // BR04 has one upstream port and 2 to 4 downstream ports. + // We look at the downstream ports of BR04. + // We explicitely look for at least 2 BR04 downstream ports. + // + pBusTopologyInfoBR04DS = pCl->pBusTopologyInfo; + BR04DSPorts = 0; + while (pBusTopologyInfoBR04DS) + { + if ((pBusTopologyInfoBR04DS->bus == pBusTopologyInfo->secBus) && + IS_DEVID_BR04(pBusTopologyInfoBR04DS->busInfo.deviceID)) + { + // + // We have one potential downstream port + // Look to if a GX2 GPU is connected to this BR04 + // + pBusTopologyInfoBR04GPU = pCl->pBusTopologyInfo; + while (pBusTopologyInfoBR04GPU) + { + if (pBusTopologyInfoBR04GPU->bus == pBusTopologyInfoBR04DS->secBus) + { + break; + } + pBusTopologyInfoBR04GPU = pBusTopologyInfoBR04GPU->next; + } + BR04DSPorts++; + } + pBusTopologyInfoBR04DS = pBusTopologyInfoBR04DS->next; + } + + if (BR04DSPorts > 1) + { + // + // We have at least 2 downstream ports. This is a BR04. + // This is a not a downstream port of a BR04 connected to an upstream port of another BR04. + // Get the revision. + // + handleBR04 = osPciInitHandle(pBusTopologyInfo->domain, + pBusTopologyInfo->bus, + pBusTopologyInfo->device, + pBusTopologyInfo->func, &vendorID, &deviceID); + regValue = osPciReadDword(handleBR04, NV_BR04_XVU_REV_CC); + if (DRF_VAL(_BR04_XVU, _REV_CC, _MAJOR_REVISION_ID, regValue) == 0xA) + { + pBR04RevArray[BR04Count] = (NvU8) DRF_VAL(_BR04_XVU, _REV_CC, _MINOR_REVISION_ID, regValue); + } + else + { + pBR04RevArray[BR04Count] = 0xFF; + } + pBR04BusArray[BR04Count] = pBusTopologyInfo->bus; + BR04Count++; + } + pBusTopologyInfo = pBusTopologyInfo->next; + } + + *pBR04Count = BR04Count; +} + +// +// clFindCommonBR +// +// Returns the bus number of a common bridge behind the 2 GPUs. +// The returned values are 0xFF when no bridge is found. +// This function finds the most upper bridge(s) if bScanAll is set to NV_TRUE. +// This function finds the first recognized bridge (BR04, BR03, PLX) under the GPUs if bScanAll is set to NV_FALSE. +// +void +clFindCommonBR_IMPL +( + OBJGPU *pGpu1, + OBJGPU *pGpu2, + OBJCL *pCl, + NvU8 *pBR03Bus, + NvU8 *pBR04Bus, + NvU8 *pPLXBus, + NvBool bScanAll +) +{ + void *handleUp1, *handleUp2; + NvU16 vendorIDUp1, deviceIDUp1, vendorIDUp2, deviceIDUp2; + NvU8 bus1 = 0xff, busUp1, deviceUp1, funcUp1, bus2 = 0xff, busUp2, deviceUp2, funcUp2; + NvU32 domain1, domain2; + NvU8 downstreamPortBus1, downstreamPortBus2; + NvU8 BR03Bus = 0xFF; + NvU8 BR04Bus = 0xFF; + NvU8 PLXBus = 0xFF; + + NV_ASSERT(pGpu1 != pGpu2); + + domain1 = gpuGetDomain(pGpu1); + domain2 = gpuGetDomain(pGpu2); + + if (domain1 != domain2) + { + // + //1. If two GPUs are from different PCI domains, then there can not be a common BR03/BR04 bridge + // that connects to both GPUs. Because a new domain will start form a host bridge. + //2. Returning early when two GPUs are from different PCI domains save significant GPU initialization + // time when we have more that 6 GPUs in the system connected to different domains. This function + // is called multiple times while searching for 2-way 3-way and 4-way sli combination. (Bug 770154) + // + + *pBR03Bus = BR03Bus; + *pBR04Bus = BR04Bus; + *pPLXBus = PLXBus; + return; + } + + handleUp1 = clFindBrdgUpstreamPort(pGpu1, pCl, NV_TRUE, + &busUp1, &deviceUp1, &funcUp1, + &vendorIDUp1, &deviceIDUp1, + &downstreamPortBus1); + + // Traverse the PCI-E tree under GPU1 + while (handleUp1) + { + if (IS_SUPPORTED_PCIE_SWITCH(vendorIDUp1, deviceIDUp1)) + { + handleUp2 = clFindBrdgUpstreamPort(pGpu2, pCl, NV_TRUE, + &busUp2, &deviceUp2, &funcUp2, + &vendorIDUp2, &deviceIDUp2, + &downstreamPortBus2); + + // Traverse the PCI-E tree under GPU2 + while (handleUp2) + { + // Same bus + if (busUp2 == busUp1) + { + if ((vendorIDUp2 == PCI_VENDOR_ID_NVIDIA) && + IS_DEVID_BR04(deviceIDUp1) && IS_DEVID_BR04(deviceIDUp2)) + { + BR04Bus = busUp2; + break; + } + + if ((vendorIDUp2 == PCI_VENDOR_ID_NVIDIA) && + (deviceIDUp1 == NV_BR03_XVU_DEV_ID_DEVICE_ID_BR03) && + (deviceIDUp2 == NV_BR03_XVU_DEV_ID_DEVICE_ID_BR03)) + { + BR03Bus = busUp2; + break; + } + if ((vendorIDUp2 == PCI_VENDOR_ID_PLX) && + IS_DEVID_SUPPORTED_PLX(deviceIDUp1) && + IS_DEVID_SUPPORTED_PLX(deviceIDUp2)) + { + PLXBus = busUp2; + break; + } + } + + bus2 = busUp2; + + // continue up + handleUp2 = clFindP2PBrdg(pCl, domain2, bus2, &busUp2, &deviceUp2, + &funcUp2, &vendorIDUp2, &deviceIDUp2); + } + } + + // + // If we requested to not scan all the devices up to the root port, + // and we found one, stop right here. + // + if (!bScanAll && + ((BR04Bus != 0xFF) || (BR03Bus != 0xFF) || (PLXBus != 0xFF))) + { + break; + } + + bus1 = busUp1; + + // continue up + handleUp1 = clFindP2PBrdg(pCl, domain1, bus1, &busUp1, &deviceUp1, + &funcUp1, &vendorIDUp1, &deviceIDUp1); + } + + *pBR03Bus = BR03Bus; + *pBR04Bus = BR04Bus; + *pPLXBus = PLXBus; + return ; +} + +// +// clFindCommonDownstreamBR_IMPL +// This function finds the common bridge immediately downstream the GPUs. +// clFindCommonBR can return multiple bridges if bScanAll is NV_TRUE. +// +void +clFindCommonDownstreamBR_IMPL +( + OBJGPU *pGpu1, + OBJGPU *pGpu2, + OBJCL *pCl, + NvU8 *pPciSwitchBus +) +{ + void *handleUp1, *handleUp2; + NvU16 vendorIDUp1, deviceIDUp1, vendorIDUp2, deviceIDUp2; + NvU8 bus1 = 0xff, busUp1, deviceUp1, funcUp1; + NvU8 bus2 = 0xff, busUp2, deviceUp2, funcUp2; + NvU32 domain1, domain2; + NvU8 downstreamPortBus1, downstreamPortBus2; + NvU8 pciSwitchBus = 0xFF; + + NV_ASSERT(pGpu1 != pGpu2); + + domain1 = gpuGetDomain(pGpu1); + domain2 = gpuGetDomain(pGpu2); + + if (domain1 != domain2) + { + // + //1. If two GPUs are from different PCI domains, then there can not be a common BR03/BR04 bridge + // that connects to both GPUs. Because a new domain will start form a host bridge. + //2. Returning early when two GPUs are from different PCI domains save significant GPU initialization + // time when we have more that 6 GPUs in the system connected to different domains. This function + // is called multiple times while searching for 2-way 3-way and 4-way sli combination. (Bug 770154) + // + + *pPciSwitchBus = pciSwitchBus; + return; + } + + handleUp1 = clFindBrdgUpstreamPort(pGpu1, pCl, NV_TRUE, + &busUp1, &deviceUp1, &funcUp1, + &vendorIDUp1, &deviceIDUp1, + &downstreamPortBus1); + + // Traverse the PCI-E tree under GPU1 + while (handleUp1) + { + if (IS_SUPPORTED_PCIE_SWITCH(vendorIDUp1, deviceIDUp1)) + { + handleUp2 = clFindBrdgUpstreamPort(pGpu2, pCl, NV_TRUE, + &busUp2, &deviceUp2, &funcUp2, + &vendorIDUp2, &deviceIDUp2, + &downstreamPortBus2); + + // Traverse the PCI-E tree under GPU2 + while (handleUp2) + { + // Same bus + if (busUp2 == busUp1) + { + if (IS_SUPPORTED_PCIE_SWITCH(vendorIDUp2, deviceIDUp1) && + IS_SUPPORTED_PCIE_SWITCH(vendorIDUp2, deviceIDUp2)) + { + pciSwitchBus = busUp2; + break; + } + } + + bus2 = busUp2; + + // continue up + handleUp2 = clFindP2PBrdg(pCl, domain2, bus2, &busUp2, &deviceUp2, + &funcUp2, &vendorIDUp2, &deviceIDUp2); + } + } + + // If we found a supported switch, stop right here. + if (pciSwitchBus != 0xFF) + { + break; + } + + bus1 = busUp1; + + // continue up + handleUp1 = clFindP2PBrdg(pCl, domain1, bus1, &busUp1, &deviceUp1, + &funcUp1, &vendorIDUp1, &deviceIDUp1); + } + + *pPciSwitchBus = pciSwitchBus; + return; +} + +// clFindBR +// +// Returns the bus number of the most upper bridge(s) under the GPUs, +// a boolean indicating that a non BR04 A03 bridge has been found between the GPU +// and the host bridge - other than dagwood. +// The returned values are 0xFF when no bridge is found. +// The *pBRNot3rdParty argument is currently solely used and has been tested with X58. +// +void +clFindBR_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + NvU8 *pBR03Bus, + NvU8 *pBR04Bus, + NvBool *pBRNot3rdParty, + NvBool *pNoUnsupportedBRFound, + NvBool *pNoOnboardBR04, + NvU8 *pPLXBus +) +{ + void *handleUp, *br04handle = NULL; + NvU16 vendorIDUp, deviceIDUp; + NvU8 bus = 0xff, busUp, deviceUp, funcUp; + NvU32 domain; + NvU8 downstreamPortBus1; + NvU32 regValue = 0; + NvU32 gpuBrNot3rdPartyCount = 0, gpuBrCount = 0; + NvBool bGpuIsMultiGpuBoard = NV_FALSE; + NvBool bIsGX2 = NV_FALSE; + NvBool bIsGemini = NV_FALSE; + NvU32 gpuBR04Count = 0; + NvU8 BR03Bus = 0xFF; + NvU8 BR04Bus = 0xFF; + NvU8 PLXBus = 0xFF; + NvBool brNot3rdParty = NV_FALSE; + NvBool bNoOnboardBR04 = NV_TRUE; + NvBool bNoUnsupportedBRFound = NV_TRUE; + + NvU8 BR04Rev = 0x0; + + domain = gpuGetDomain(pGpu); + handleUp = clFindBrdgUpstreamPort(pGpu, pCl, NV_TRUE, + &busUp, &deviceUp, &funcUp, + &vendorIDUp, &deviceIDUp, + &downstreamPortBus1); + + bGpuIsMultiGpuBoard = gpuIsMultiGpuBoard(pGpu, &bIsGX2, &bIsGemini); + + // Traverse the pci tree + while (handleUp) + { + gpuBrCount++; + br04handle = NULL; + BR04Rev = 0x0; + if (IS_DEVID_BR04(deviceIDUp)) + { + BR04Bus = busUp; + br04handle = handleUp; + regValue = osPciReadDword(br04handle, NV_BR04_XVU_REV_CC); + if (DRF_VAL(_BR04_XVU, _REV_CC, _MAJOR_REVISION_ID, regValue) == 0xA) + { + BR04Rev = (NvU8) DRF_VAL(_BR04_XVU, _REV_CC, _MINOR_REVISION_ID, regValue); + } + gpuBR04Count++; + } + + if (deviceIDUp == NV_BR03_XVU_DEV_ID_DEVICE_ID_BR03) + { + BR03Bus = busUp; + } + + // + // Check if upstream device ID is matching with any device ID + // on Patsburg PCIE DID range. + // + if (((deviceIDUp >= PATSBURG_PCIE_DEVICE_MIN_DEVID) && + (deviceIDUp <= PATSBURG_PCIE_DEVICE_MAX_DEVID)) || + (deviceIDUp == PATSBURG_PCIE_DEVICE_DEVID)) + { + bNoUnsupportedBRFound = NV_FALSE; + } + + if (IS_DEVID_SUPPORTED_PLX(deviceIDUp)) + { + PLXBus = busUp; + } + + // Do not count the BR04A03, PLX, and the bridges behind the dagwoods. + if ((BR04Rev != 0x3) && (PLXBus == 0xFF) && ((gpuBrCount != 1) || (bGpuIsMultiGpuBoard == NV_FALSE))) + { + gpuBrNot3rdPartyCount++; + } + + bus = busUp; + + // continue up + handleUp = clFindP2PBrdg(pCl, domain, bus, &busUp, &deviceUp, + &funcUp, &vendorIDUp, &deviceIDUp); + } + + if ((bIsGX2 || bIsGemini) && gpuBR04Count) + { + // Ignore one BR04 in case of GX2 or Gemini + gpuBR04Count--; + } + if (gpuBR04Count) + { + bNoOnboardBR04 = NV_FALSE; + } + + // One bridge is just behind the root port. Ignore it. + brNot3rdParty = (gpuBrNot3rdPartyCount > 1); + + *pBR03Bus = BR03Bus; + *pBR04Bus = BR04Bus; + *pBRNot3rdParty = brNot3rdParty; + *pNoOnboardBR04 = bNoOnboardBR04 ; + *pNoUnsupportedBRFound = bNoUnsupportedBRFound; + *pPLXBus = PLXBus; + + return ; +} + +// +// Free the cached bus topology +// Do not perform per-gpu memory tracking as pCl remains +// during the SLI transitions. +// +void +clFreeBusTopologyCache_IMPL(OBJCL *pCl) +{ + PBUSTOPOLOGYINFO pBusTopologyInfo = pCl->pBusTopologyInfo, pBusTopologyInfoNext; + + while (pBusTopologyInfo) + { + pBusTopologyInfoNext = pBusTopologyInfo->next; + portMemFree(pBusTopologyInfo); + pBusTopologyInfo = pBusTopologyInfoNext; + } + pCl->pBusTopologyInfo = NULL; +} + +// +// Cache the bus topology +// Do not perform per-gpu memory tracking as pCl remains +// during the SLI transitions. +// Perform Bus topology caching for new domain. +// +NV_STATUS +clStoreBusTopologyCache_IMPL +( + OBJCL *pCl, + NvU32 domain, + NvU16 secBus +) +{ + void *handle; + NvU16 vendorID, deviceID; + NvS16 bus = 0; + NvS8 device = 0, func = 0; + NvU16 pciSubBaseClass; + PBUSTOPOLOGYINFO pBusTopologyInfo = NULL, pBusTopologyInfoLast = NULL; + + if (pCl->pBusTopologyInfo) + { + pBusTopologyInfo = pCl->pBusTopologyInfo; + while (pBusTopologyInfo) + { + if (pBusTopologyInfo->domain == domain) + { + // Already cached + return NV_OK; + } + + // Keep track of the current node This will capture the last node on exit. + pBusTopologyInfoLast = pBusTopologyInfo; + pBusTopologyInfo = pBusTopologyInfo->next; + } + } + + // We did not find our domain, so enumerate devices again and update cache. + for (bus = 0; bus < secBus; bus++) + { + for (device = 0; device < PCI_MAX_DEVICES; device++) + { + for (func = 0; func < PCI_MAX_FUNCTION; func++) + { + // read at bus, device, func + handle = osPciInitHandle(domain, (NvU8)bus, device, func, &vendorID, &deviceID); + if (!handle) + { + if (func == 0) + { + // If a read to function zero of a specified bus/device master aborts, + // then it is assumed that no such device exists on the bus since + // devices are required to implement function number zero. + // In this case reads to the remaining functions are not necessary. + break; + } + else + { + continue; + } + } + + if (vendorID == PCI_INVALID_VENDORID) + { + break; // skip to the next device + } + + pciSubBaseClass = osPciReadWord(handle, PCI_COMMON_CLASS_SUBCLASS); + pBusTopologyInfo = portMemAllocNonPaged(sizeof(BUSTOPOLOGYINFO)); + if (pBusTopologyInfo == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "Buffer Allocation for clStoreBusTopologyCache FAILED\n"); + clFreeBusTopologyCache(pCl); + + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + portMemSet(pBusTopologyInfo, 0, sizeof(BUSTOPOLOGYINFO)); + + // + // Append the new node to the end of the cache linked list. + // NOTE: pBusTopologyInfoLast holds either the last node in the + // cache or is NULL, in which case pCl's cache list does not exist. + // + if (!pBusTopologyInfoLast) + { + pCl->pBusTopologyInfo = pBusTopologyInfo; + } + else + { + pBusTopologyInfoLast->next = pBusTopologyInfo; + } + pBusTopologyInfo->next = NULL; + pBusTopologyInfoLast = pBusTopologyInfo; + + pBusTopologyInfo->handle = handle; + pBusTopologyInfo->domain = domain; + pBusTopologyInfo->bus = (NvU8)bus; + pBusTopologyInfo->device = device; + pBusTopologyInfo->func = func; + pBusTopologyInfo->pciSubBaseClass = pciSubBaseClass; + pBusTopologyInfo->busInfo.vendorID = vendorID; + pBusTopologyInfo->busInfo.deviceID = deviceID; + pBusTopologyInfo->busInfo.subvendorID = osPciReadWord(handle, PCI_COMMON_SUBSYSTEM_VENDOR_ID); + pBusTopologyInfo->busInfo.subdeviceID = osPciReadWord(handle, PCI_COMMON_SUBSYSTEM_ID); + pBusTopologyInfo->busInfo.revisionID = osPciReadByte(handle, PCI_HEADER_TYPE0_REVISION_ID); + + if ((pciSubBaseClass == PCI_COMMON_CLASS_SUBBASECLASS_P2P) || + (pciSubBaseClass == PCI_COMMON_CLASS_SUBBASECLASS_HOST)) + { + pBusTopologyInfo->secBus = (NvU8)osPciReadByte(handle, PCI_TYPE_1_SECONDARY_BUS_NUMBER); + pBusTopologyInfo->bVgaAdapter = NV_FALSE; + } + else + { + pBusTopologyInfo->bVgaAdapter = NV_TRUE; + } + + if (func == 0 && ((osPciReadByte(handle, 0xE)) & PCI_MULTIFUNCTION) == 0) + { + break; // no need to cycle through functions + } + } + } + } + // + // Adding thread reset timeout here to fix Cisco bug 1277168. + // Enumerating pcie bus topology in cisco host c240 takes too long + // leading to RM timeout. + // + threadStateResetTimeout(NULL); + + return NV_OK; +} + + +NV_STATUS +clPcieWriteRootPortConfigReg_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + NvU32 offset, + NvU32 value +) +{ + if ((kbifGetBusIntfType_HAL(GPU_GET_KERNEL_BIF(pGpu)) != + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) || + !pGpu->gpuClData.rootPort.addr.valid) + { + return NV_ERR_GENERIC; + } + if ((offset >= CL_PCIE_BEGIN) && (offset + sizeof(NvU32) <= CL_PCIE_END)) + { + if (pGpu->gpuClData.rootPort.PCIECapPtr) + osPciWriteDword(pGpu->gpuClData.rootPort.addr.handle, + offset - CL_PCIE_BEGIN + pGpu->gpuClData.rootPort.PCIECapPtr, value); + else + return NV_ERR_GENERIC; + } + else if ((offset >= CL_AER_BEGIN) && (offset + sizeof(NvU32) <= CL_AER_END)) + { + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && pGpu->gpuClData.rootPort.PCIEErrorCapPtr) + clPcieWriteDword(pCl, + pGpu->gpuClData.rootPort.addr.domain, + pGpu->gpuClData.rootPort.addr.bus, + pGpu->gpuClData.rootPort.addr.device, + pGpu->gpuClData.rootPort.addr.func, + offset - CL_AER_BEGIN + pGpu->gpuClData.rootPort.PCIEErrorCapPtr, + value); + else + return NV_ERR_GENERIC; + } + else if ((offset >= CL_VC_BEGIN) && (offset + sizeof(NvU32) <= CL_VC_END)) + { + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && pGpu->gpuClData.rootPort.PCIEVCCapPtr) + clPcieWriteDword(pCl, + pGpu->gpuClData.rootPort.addr.domain, + pGpu->gpuClData.rootPort.addr.bus, + pGpu->gpuClData.rootPort.addr.device, + pGpu->gpuClData.rootPort.addr.func, + offset - CL_VC_BEGIN + pGpu->gpuClData.rootPort.PCIEVCCapPtr, + value); + else + return NV_ERR_GENERIC; + } + else if ((offset >= CL_L1_SS_BEGIN) && (offset + sizeof(NvU32) <= CL_L1_SS_END)) + { + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && pGpu->gpuClData.rootPort.PCIEL1SsCapPtr) + clPcieWriteDword(pCl, + pGpu->gpuClData.rootPort.addr.domain, + pGpu->gpuClData.rootPort.addr.bus, + pGpu->gpuClData.rootPort.addr.device, + pGpu->gpuClData.rootPort.addr.func, + offset - CL_L1_SS_BEGIN + pGpu->gpuClData.rootPort.PCIEL1SsCapPtr, + value); + else + return NV_ERR_GENERIC; + } + else + return NV_ERR_GENERIC; // invalid property + + return NV_OK; +} + +NV_STATUS +clPcieReadAerCapability_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + PcieAerCapability *pAER +) +{ + NV_STATUS status = NV_ERR_GENERIC; + + if ( pAER ) + { + portMemSet(pAER, 0, sizeof(*pAER)); + status = clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_CAP, &pAER->PexEnhCapHeader); + if ( status != NV_OK ) // if cap id read fails, then there's no AER + return status; + + // sucess, read the rest of the struct + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_UNCORRECTABLE_STATUS, &pAER->UncorrErrStatusReg); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_UNCORRECTABLE_MASK, &pAER->UncorrErrMaskReg); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_UNCORRECTABLE_SEVERITY,&pAER->UncorrErrSeverityReg); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_CORRECTABLE_STATUS, &pAER->CorrErrStatusReg); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_CORRECTABLE_MASK, &pAER->CorrErrMaskReg); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_ADVANCED_CAP_CONTROL, &pAER->AEcapCrtlReg); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_HEADER_LOG+ 0, (NvU32*)&pAER->HeaderLogReg.Header[0]); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_HEADER_LOG+ 4, (NvU32*)&pAER->HeaderLogReg.Header[1]); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_HEADER_LOG+ 8, (NvU32*)&pAER->HeaderLogReg.Header[2]); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_HEADER_LOG+12, (NvU32*)&pAER->HeaderLogReg.Header[3]); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_ROOT_ERROR_COMMAND, &pAER->RootErrCmd); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_ROOT_ERROR_STATUS, &pAER->RooErrStatus); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_AER_ERROR_SOURCE, &pAER->ErrSrcReg); + + status = NV_OK; + } + + return status; +} + +NV_STATUS +clPcieReadL1SsCapability_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + PPexL1SubstateCapability pL1Ss +) +{ + NV_STATUS status = NV_ERR_GENERIC; + + if (pL1Ss) + { + portMemSet(pL1Ss, 0, sizeof(*pL1Ss)); + status = clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_L1_SS_CAP_HDR, &pL1Ss->PexEnhCapHeader); + if ( status != NV_OK ) // if cap id read fails, then there's no L1 PM Substates Capability + return status; + + // sucess, read the rest of the struct + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_L1_SS_CAP_REG, &pL1Ss->Capabilities); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_L1_SS_CTRL1_REG, &pL1Ss->Control1Reg); + clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_L1_SS_CTRL2_REG, &pL1Ss->Control2Reg); + status = NV_OK; + } + + return status; +} + +NV_STATUS +clPcieReadDevCtrlStatus_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + NvU32* pDevCtrlStatusFlags, + NvU32* pDevCtrlStatus +) +{ + NV_STATUS status = NV_ERR_GENERIC; + NvU32 clDevCtrlStatus = 0; + + if ( pDevCtrlStatusFlags ) + { + status = clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, + CL_PCIE_DEV_CTRL_STATUS, &clDevCtrlStatus); + + if ( status == NV_OK ) + { + if ( pDevCtrlStatus ) + *pDevCtrlStatus = clDevCtrlStatus; // optionally return full status + + *pDevCtrlStatusFlags = 0; + + if ( pCl->EnteredRecoverySinceErrorsLastChecked ) + { + pCl->EnteredRecoverySinceErrorsLastChecked = NV_FALSE; + *pDevCtrlStatusFlags |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_ENTERED_RECOVERY; + } + + if ( clDevCtrlStatus & CL_PCIE_DEVICE_CONTROL_STATUS_CORR_ERROR_DETECTED ) + *pDevCtrlStatusFlags |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_CORR_ERROR; + if ( clDevCtrlStatus & CL_PCIE_DEVICE_CONTROL_STATUS_NON_FATAL_ERROR_DETECTED ) + *pDevCtrlStatusFlags |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_NON_FATAL_ERROR; + if ( clDevCtrlStatus & CL_PCIE_DEVICE_CONTROL_STATUS_FATAL_ERROR_DETECTED ) + *pDevCtrlStatusFlags |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_FATAL_ERROR; + if ( clDevCtrlStatus & CL_PCIE_DEVICE_CONTROL_STATUS_UNSUPP_REQUEST_DETECTED ) + *pDevCtrlStatusFlags |= NV2080_CTRL_BUS_INFO_PCIE_LINK_ERRORS_UNSUPP_REQUEST; + } + } + + return status; +} + +NV_STATUS +clPcieClearDevCtrlStatus_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + NvU32* pDevCtrlStatus +) +{ + NV_STATUS status = NV_ERR_GENERIC; + NvU32 clDevCtrlStatus = 0; + + if ( pDevCtrlStatus ) + { + clDevCtrlStatus = *pDevCtrlStatus; + if ( clDevCtrlStatus == 0 ) + { + return NV_OK; + } + } + else + { + status = clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, + CL_PCIE_DEV_CTRL_STATUS, &clDevCtrlStatus); + if ( status != NV_OK ) + return status; + } + + status = clPcieWriteRootPortConfigReg(pGpu, pCl, CL_PCIE_DEV_CTRL_STATUS, clDevCtrlStatus); + + return status; +} + +NvBool +clUpstreamVgaDecodeEnabled_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + NvU32 domain; + NvU8 bus; + NvU32 PCIECapPtr; + NvU32 portCaps = 0; + void *handleUp; + NvU8 busUp, devUp, funcUp; + NvU16 vendorIDUp, deviceIDUp; + NvU16 bridgeCtl; + + domain = gpuGetDomain(pGpu); + bus = gpuGetBus(pGpu); + do + { + // find upstream port + handleUp = clFindP2PBrdg(pCl, domain, bus, + &busUp, &devUp, &funcUp, + &vendorIDUp, &deviceIDUp); + + // make sure handle was found + if (!handleUp) + { + return NV_FALSE; + } + + bus = busUp; + + if (NV_OK != clSetPortPcieCapOffset(pCl, handleUp, &PCIECapPtr)) + { + // + // If capability pointer is not found, ignore and move to next + // upstream port. + // + continue; + } + + portCaps = osPciReadDword(handleUp, + CL_PCIE_CAP - CL_PCIE_BEGIN + PCIECapPtr); + + // + // If the VGA Enable bit isn't set on any bridge between the device and + // the root port, bail early. + // + bridgeCtl = clPcieReadWord(pCl, domain, busUp, devUp, funcUp, + PCI_HEADER_TYPE1_BRIDGE_CONTROL); + if ((bridgeCtl & PCI_HEADER_TYPE1_BRIDGE_CONTROL_VGA_EN) == 0) + { + return NV_FALSE; + } + } while (!CL_IS_ROOT_PORT(portCaps)); + + // + // The VGA Enable bit must have been set in every bridge between the device + // and the root port. + // + return NV_TRUE; +} + +static void * +objClPcieMapEnhCfgSpace +( + OBJCL *pCl, + NvU32 domain, + NvU8 bus, + NvU8 device, + NvU8 func +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJGPU *pGpu = NULL; + NvU32 gpuAttachCnt, gpuAttachMask, gpuInstance; + RmPhysAddr pcieConfigSpaceBase; + + NV_ASSERT_OR_RETURN(!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS), NULL); + + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuAttachMask, &gpuInstance)) != NULL) + { + if (pGpu->gpuClData.rootPort.vAddr != 0 + && domain == pGpu->gpuClData.rootPort.addr.domain + && bus == pGpu->gpuClData.rootPort.addr.bus + && device == pGpu->gpuClData.rootPort.addr.device + && func == pGpu->gpuClData.rootPort.addr.func) + { + return pGpu->gpuClData.rootPort.vAddr; + } + else if ((pGpu->gpuCfgAddr != NULL) && + (gpuGetDBDF(pGpu) != 0) && + (domain == gpuGetDomain(pGpu)) && + (bus == gpuGetBus(pGpu)) && + (device == gpuGetDevice(pGpu)) && + (func == 0)) + { + return pGpu->gpuCfgAddr; + } + } + + pcieConfigSpaceBase = clFindPcieConfigSpaceBase(pCl, domain, bus); + if (pcieConfigSpaceBase == 0) + { + return NULL; + } + + return osMapKernelSpace(pcieConfigSpaceBase | + bus << PCIE_BUS_SHIFT | + device << PCIE_DEVICE_SHIFT | + func << PCIE_FUNC_SHIFT, + RM_PAGE_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); +} + +static void +objClPcieUnmapEnhCfgSpace +( + OBJCL *pCl, + void *addr +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJGPU *pGpu = NULL; + NvU32 gpuAttachCnt, gpuAttachMask, gpuInstance; + + NV_ASSERT_OR_RETURN_VOID(!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS)); + + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuAttachMask, &gpuInstance)) != NULL) + { + if ((pGpu->gpuClData.rootPort.vAddr == addr) || + (pGpu->gpuCfgAddr == addr)) + { + return; + } + } + + osUnmapKernelSpace(addr, RM_PAGE_SIZE); +} + +NvU16 +clPcieReadWord_IMPL +( + OBJCL *pCl, + NvU32 domain, + NvU8 bus, + NvU8 device, + NvU8 func, + NvU32 offset +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + NvU16 *pData = NULL, value = 0; + NV_ASSERT(offset + sizeof(value) <= 0x1000); // Enhanced Configuration Space is 4K + NV_ASSERT(device < PCI_MAX_DEVICES); // Maximum device number is 32 + NV_ASSERT(func < PCI_MAX_FUNCTIONS); // Maximum function number is 8 + + if (!pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) || + (offset + sizeof(value) > 0x1000)) + { + NV_ASSERT_FAILED("clPcieReadWord() failed!"); + return 0; + } + + // Check if there is an OS specific implementation + void *handle = osPciInitHandle(domain, bus, device, func, NULL, NULL); + if ((handle != NULL) && osTestPcieExtendedConfigAccess(handle, offset)) + { + return osPciReadWord(handle, offset); + } + + NV_ASSERT_OR_RETURN(!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS), 0); + + pData = objClPcieMapEnhCfgSpace(pCl, domain, bus, device, func); + if (!pData) + return 0; + value = pData[offset/sizeof(value)]; + objClPcieUnmapEnhCfgSpace(pCl, pData); + + return value; +} + +NvU32 +clPcieReadDword_IMPL +( + OBJCL *pCl, + NvU32 domain, + NvU8 bus, + NvU8 device, + NvU8 func, + NvU32 offset +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + NvU32 *pData = NULL, value = 0; + NV_ASSERT(offset + sizeof(value) <= 0x1000); // Enhanced Configuration Space is 4K + NV_ASSERT(device < PCI_MAX_DEVICES); // Maximum device number is 32 + NV_ASSERT(func < PCI_MAX_FUNCTIONS); // Maximum function number is 8 + + if (!pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) || + (offset + sizeof(value) > 0x1000)) + { + NV_ASSERT_FAILED("clPcieReadDword() failed!"); + return 0; + } + + // Check if there is an OS specific implementation + void *handle = osPciInitHandle(domain, bus, device, func, NULL, NULL); + if ((handle != NULL) && osTestPcieExtendedConfigAccess(handle, offset)) + { + return osPciReadDword(handle, offset); + } + + NV_ASSERT_OR_RETURN(!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS), 0); + + pData = objClPcieMapEnhCfgSpace(pCl, domain, bus, device, func); + if (!pData) + return 0; + value = MEM_RD32(pData + offset/sizeof(value)); + objClPcieUnmapEnhCfgSpace(pCl, pData); + + return value; +} + +void +clPcieWriteWord_IMPL +( + OBJCL *pCl, + NvU32 domain, + NvU8 bus, + NvU8 device, + NvU8 func, + NvU32 offset, + NvU16 value +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + NvU16 *pData = NULL; + NV_ASSERT(offset + sizeof(value) <= 0x1000); // Enhanced Configuration Space is 4K + NV_ASSERT(device < PCI_MAX_DEVICES); // Maximum device number is 32 + NV_ASSERT(func < PCI_MAX_FUNCTIONS); // Maximum function number is 8 + + if (!pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) || + (offset + sizeof(value) > 0x1000)) + { + NV_ASSERT_FAILED("clPcieWriteWord() failed!"); + return; + } + + // Check if there is an OS specific implementation + void *handle = osPciInitHandle(domain, bus, device, func, NULL, NULL); + if ((handle != NULL) && osTestPcieExtendedConfigAccess(handle, offset)) + { + osPciWriteWord(handle, offset, value); + return; + } + + NV_ASSERT_OR_RETURN_VOID(!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS)); + + pData = objClPcieMapEnhCfgSpace(pCl, domain, bus, device, func); + if (!pData) + return; + pData[offset/sizeof(value)] = value; + objClPcieUnmapEnhCfgSpace(pCl, pData); +} + +void +clPcieWriteDword_IMPL +( + OBJCL *pCl, + NvU32 domain, + NvU8 bus, + NvU8 device, + NvU8 func, + NvU32 offset, + NvU32 value +) +{ + + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + NvU32 *pData = NULL; + NV_ASSERT(offset + sizeof(value) <= 0x1000); // Enhanced Configuration Space is 4K + NV_ASSERT(device < PCI_MAX_DEVICES); // Maximum device number is 32 + NV_ASSERT(func < PCI_MAX_FUNCTIONS); // Maximum function number is 8 + + if (!pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) || + (offset + sizeof(value) > 0x1000)) + { + NV_ASSERT_FAILED("clPcieWriteDword() failed!"); + return; + } + + // Check if there is an OS specific implementation + void *handle = osPciInitHandle(domain, bus, device, func, NULL, NULL); + if ((handle != NULL) && osTestPcieExtendedConfigAccess(handle, offset)) + { + osPciWriteDword(handle, offset, value); + return; + } + + NV_ASSERT_OR_RETURN_VOID(!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS)); + + pData = objClPcieMapEnhCfgSpace(pCl, domain, bus, device, func); + if (!pData) + return; + pData[offset/sizeof(value)] = value; + objClPcieUnmapEnhCfgSpace(pCl, pData); +} + +// root port table and specific overrides + +NV_STATUS Broadcom_HT2100_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_EXTENDED_TAG_FIELD_NOT_CAPABLE, NV_TRUE); + return NV_OK; +} + +NV_STATUS Intel_RP25XX_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + // This root port has an AER cap, but it is not advertised. + // We need to enable it for some bug workarounds. + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE)) + { + pGpu->gpuClData.rootPort.PCIEErrorCapPtr = 0x1c0; + if (!pGpu->getProperty(pGpu, PDB_PROP_GPU_BEHIND_BRIDGE)) + { + // Intel chipset is real root port and upstream of the GPU. + pGpu->gpuClData.upstreamPort.PCIEErrorCapPtr = 0x1c0; + } + } + return NV_OK; +} + +NV_STATUS Intel_RP81XX_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_NOSNOOP_NOT_CAPABLE, NV_TRUE); + pCl->setProperty(pCl, PDB_PROP_CL_RELAXED_ORDERING_NOT_CAPABLE, NV_TRUE); + return NV_OK; +} + +NV_STATUS Intel_RP3C0X_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR, NV_TRUE); + + return NV_OK; +} + +NV_STATUS Intel_RP0C0X_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR, NV_TRUE); + + return NV_OK; +} + +NV_STATUS Intel_RP2F0X_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + OBJHYPERVISOR *pHypervisor = SYS_GET_HYPERVISOR(pSys); + NvU32 domain; + // Socket 0 default PCIE location is bus = 0x7f, device = 0x1e, func = 0x3 + NvU16 bus = 0x7f; + NvU8 device = 0x1e; + NvU8 func = 3; + void *handle; + NvU16 vendorID, deviceID; + NvU32 val; + NvU32 eax, ebx, ecx, edx; + NvBool bC0orC1CPUID = NV_FALSE; + + // Determine if CPU is C0/C1 Stepping by CPUID + if (pOS->osNv_cpuid(pOS, 1, 0, &eax, &ebx, &ecx, &edx)) + { + // CPUID is returned to eax + bC0orC1CPUID = (eax == INTEL_C0_OR_C1_CPUID); + } + + pCl->setProperty(pCl, PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR, NV_TRUE); + + if (pGpu == NULL) + { + return NV_ERR_GENERIC; + } + // For GPU passthrough case, bail out and remain on Gen3. + if (pHypervisor && pHypervisor->bIsHVMGuest) + { + return NV_OK; + } + // The CAPID logic only works for detecting C0 vs C1 CPU + // So we need to rule out other CPUs first and apply this logic only for C0/C1 CPU (Bug 1694363) + if (bC0orC1CPUID) + { + domain = gpuGetDomain(pGpu); + handle = osPciInitHandle(domain, (NvU8)bus, device, func, + &vendorID, &deviceID); + if (handle && (vendorID == 0x8086) && + ((deviceID & 0xfff0) == INTEL_HASWELL_POWER_CONTROL_UNIT_DEVICE_ID) && + pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE)) + { + val = clPcieReadDword(pCl, 0, (NvU8)bus, + device, func, HASWELL_CPU_CAPID4_OFFSET); + // Bit 29 of CAPID4 has 1 for C1-CPU and 0 for C0-CPU + if ((val & (1 << 29)) == 0) + { + pSys->setProperty(pSys, PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING, NV_TRUE); + } + } + else + { + PBUSTOPOLOGYINFO pBusTopologyInfo = pCl->pBusTopologyInfo; + while (pBusTopologyInfo) + { + if ((pBusTopologyInfo->busInfo.vendorID == 0x8086) && + ((pBusTopologyInfo->busInfo.deviceID & 0xFFF0) == INTEL_HASWELL_POWER_CONTROL_UNIT_DEVICE_ID) && + pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE)) + { + val = clPcieReadDword(pCl, 0, + pBusTopologyInfo->bus, + pBusTopologyInfo->device, + pBusTopologyInfo->func, + HASWELL_CPU_CAPID4_OFFSET); + // Bit 29 of CAPID4 has 1 for C1-CPU and 0 for C0-CPU + if ((val & (1 << 29)) == 0) + { + pSys->setProperty(pSys, PDB_PROP_SYS_HASWELL_CPU_C0_STEPPING, NV_TRUE); + } + break; + } + pBusTopologyInfo = pBusTopologyInfo->next; + } + } + } + + return NV_OK; +} + +// Intel Broadwell Setup Function +NV_STATUS Intel_Broadwell_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST, NV_TRUE); + + switch (pCl->FHBBusInfo.deviceID) + { + case DEVICE_ID_INTEL_BROADWELL_U_HOST_BRIDGE: + { + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_TRUE); + + // Enable L0s and L1 on mobile only + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L0S_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + break; + } + + case DEVICE_ID_INTEL_BROADWELL_H_HOST_BRIDGE: + { + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_FALSE, NV_FALSE); + break; + } + default: + break; + } + + return NV_OK; +} + +// Intel Skylake Setup Function +NV_STATUS Intel_Skylake_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST, NV_TRUE); + + pCl->setProperty(pCl, PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR, NV_TRUE); + + switch (pCl->FHBBusInfo.deviceID) + { + case DEVICE_ID_INTEL_SKYLAKE_U_HOST_BRIDGE: + { + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_TRUE); + + // Enable L1 on mobile only + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + break; + } + + case DEVICE_ID_INTEL_SKYLAKE_S_HOST_BRIDGE: + case DEVICE_ID_INTEL_SKYLAKE_H_HOST_BRIDGE: + { + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_FALSE); + + break; + } + + case DEVICE_ID_INTEL_KABYLAKE_H_HOST_BRIDGE: + { + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_TRUE); + + // Enable L1 on mobile only + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + + break; + } + + default: + break; + } + + return NV_OK; +} + +// Intel PCH (0x9D18) Setup Function +NV_STATUS Intel_Skylake_U_Pch_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST, NV_TRUE); + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_TRUE); + + pGpu->setProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED, NV_TRUE); + + // Enable L1 on mobile only + pGpu->setProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_MOBILE_ONLY, NV_TRUE); + + pCl->setProperty(pCl, PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR, NV_TRUE); + + return NV_OK; +} + +// Intel PCH (0xA117, 0xA118) Setup Function +NV_STATUS Intel_Skylake_H_Pch_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST, NV_TRUE); + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_FALSE); + + pGpu->setProperty(pGpu, PDB_PROP_GPU_UPSTREAM_PORT_L1_POR_SUPPORTED, NV_TRUE); + + pCl->setProperty(pCl, PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR, NV_TRUE); + + return NV_OK; +} + +// Intel Kabylake Setup Function +NV_STATUS Intel_Kabylake_Y_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + pCl->setProperty(pCl, PDB_PROP_CL_IS_CHIPSET_IN_ASPM_POR_LIST, NV_TRUE); + + // Set ASPM L0S\L1 properties + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_TRUE); + + // Enable L1 on mobile only + pCl->setProperty(pCl, PDB_PROP_CL_ASPM_L1_CHIPSET_ENABLED_MOBILE_ONLY, NV_TRUE); + + pCl->setProperty(pCl, PDB_PROP_CL_ROOTPORT_NEEDS_NOSNOOP_WAR, NV_TRUE); + + return NV_OK; +} + +// Setup function to disable L0s for AMD root port 1483 +NV_STATUS AMD_RP1483_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + // + // Set ASPM L0S\L1 properties + // Bug id: 200533783 + // + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_FALSE); + + return NV_OK; +} + +// +// Setup function to disable L0s for AMD root port 1480 +// AMD Castle Peak +// +NV_STATUS AMD_RP1480_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + // + // Set ASPM L0S\L1 properties + // Bug id: 200533783 + // + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_FALSE); + + return NV_OK; +} + +// +// Setup function to disable L0s for AMD root port 1630 +// AMD Renoir-H +// +NV_STATUS AMD_RP1630_setupFunc(OBJGPU *pGpu, OBJCL *pCl) +{ + // + // Set ASPM L0S\L1 properties + // Bug id: 200533783 + // + _Set_ASPM_L0S_L1(pCl, NV_TRUE, NV_FALSE); + + return NV_OK; +} + +static NV_STATUS +objClGpuIs3DController(OBJGPU *pGpu) +{ + NvU8 bus, dev; + NvU32 domain; + NvU16 vendorID, deviceID; + void *handle; + + domain = gpuGetDomain(pGpu); + bus = gpuGetBus(pGpu); + dev = gpuGetDevice(pGpu); + + // read at bus, device (we're always function 0) + handle = osPciInitHandle(domain, bus, dev, 0, &vendorID, &deviceID); + if (!handle) + { + NV_PRINTF(LEVEL_ERROR, "GPU Config Space not accessible \n"); + return NV_ERR_GENERIC; + } + + if (osPciReadWord(handle, PCI_COMMON_CLASS_SUBCLASS) == PCI_COMMON_CLASS_SUBBASECLASS_3DCTRL) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_3D_CONTROLLER, NV_TRUE); + } + + return NV_OK; +} + +NV_STATUS +clPcieGetMaxCapableLinkWidth_IMPL +( + OBJCL *pCl, + OBJGPU *pGpu, + NvU32 *result +) +{ + NvU32 linkCap; + + // + // Taking care only mobile systems about system max capable link width issue + // of bug 427155. + // + if ((kbifGetBusIntfType_HAL(GPU_GET_KERNEL_BIF(pGpu)) == + NV2080_CTRL_BUS_INFO_TYPE_PCI_EXPRESS) && + pGpu->gpuClData.rootPort.addr.valid) + { + // We are already disabling ASLM for BR03, so leave it alone. + if (!pCl->pHWBC || pCl->pHWBC->bcRes != HWBC_NVIDIA_BR03) + { + if (clPcieReadPortConfigReg(pGpu, pCl, &pGpu->gpuClData.rootPort, CL_PCIE_LINK_CAP, &linkCap) == NV_OK) + { + *result = CL_MAX_LINK_WIDTH(linkCap); + return NV_OK; + } + } + } + + *result = 0; + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +clPcieIsRelaxedOrderingSafe_IMPL +( + OBJCL *pCl, + OBJGPU *pGpu, + NvBool *result +) +{ + // + // TODO. This is left for a follow-on change. The first change allows the + // force-enable option, and establishes all the plumbing for topology + // detection, but it's considered always safe to enable relaxed ordering, + // until this logic is developed. + // + *result = NV_TRUE; + return NV_OK; +} + +/** + * Compares two OBJGPUs to see if they are behind the same upstream bridge. + * Used for identifying GPUs behind the same lowest level BR04. + * + * @param[in] pGpu1 First gpu to compare + * @param[in] pGpu2 Second gpu to compare + * + * @return NV_TRUE if the two GPUs are behind the same bridge. + */ +NvBool +clAreGpusBehindSameBridge_IMPL +( + OBJCL *pCl, + OBJGPU *pGpu1, + OBJGPU *pGpu2 +) +{ + NV_ASSERT((pGpu1 != NULL) && + (pGpu2 != NULL)); + + if ((pGpu1->gpuClData.boardUpstreamPort.addr.valid) && + (pGpu2->gpuClData.boardUpstreamPort.addr.valid) && + (pGpu1->gpuClData.boardUpstreamPort.addr.domain + == pGpu2->gpuClData.boardUpstreamPort.addr.domain) && + (pGpu1->gpuClData.boardUpstreamPort.addr.bus + == pGpu2->gpuClData.boardUpstreamPort.addr.bus) && + (pGpu1->gpuClData.boardUpstreamPort.addr.device + == pGpu2->gpuClData.boardUpstreamPort.addr.device) && + (pGpu1->gpuClData.boardUpstreamPort.addr.func + == pGpu2->gpuClData.boardUpstreamPort.addr.func)) + { + return NV_TRUE; + } + + return NV_FALSE; +} + +/*! + * Pulls the devid info out of the pGpu to pass to gpuDevIdIsMultiGpuBoard(). + * + * @param[in] pGpu OBJGPU pointer + * @param[out] pbIsGX2 NvBool pointer in which to store whether GPU an an SLI + * mutliboard config. May be NULL if caller does not + * care to receive this information. + * @parma[out] pbIsGemini NvBool pointer in which to store whether GPU is a + * Gemini multiboard config. May be NULL if caller does + * not care to receive this information. + * + * @return NV_TRUE if the GPU is in a multigpu board, NV_FALSE otherwise + */ +NvBool +gpuIsMultiGpuBoard +( + OBJGPU *pGpu, + NvBool *pbIsGX2, + NvBool *pbIsGemini +) +{ + + if (pbIsGX2 != NULL) + *pbIsGX2 = NV_FALSE; + if (pbIsGemini != NULL) + *pbIsGemini = NV_FALSE; + + if ((DRF_VAL(_PCI, _DEVID, _DEVICE, pGpu->idInfo.PCIDeviceID) == NV_PCI_DEVID_DEVICE_PG171_SKU200_PG179_SKU220) && + (DRF_VAL(_PCI, _SUBID, _DEVICE, pGpu->idInfo.PCISubDeviceID) == NV_PCI_SUBID_DEVICE_PG171_SKU200) && + (DRF_VAL(_PCI, _SUBID, _VENDOR, pGpu->idInfo.PCISubDeviceID) == NV_PCI_SUBID_VENDOR_NVIDIA)) + { + if (pbIsGemini != NULL) + *pbIsGemini = NV_TRUE; + + return NV_TRUE; + } + + return NV_FALSE; + +} + +/* + * @brief Scans for the RSDP structure and returns the address of + * RSDT or XSDT ACPI table. + * + * We will use these tables to find the addresses of other tables. + * http://www.acpi.info/DOWNLOADS/ACPIspec40.pdf sections 5.2.5.1 and 5.2.5.3. + * + * @param[in] pOS OBJOS pointer + * @param[in] start RmPhysAddr starting point from where to look for the RDSP table + * @param[in] len NvU64 limit until we look for the RDSP table + * @param[out] pRsdtAddr NvU32* physical address of the RSDT table + * @param[out] pXsdtAddr NvU64* physical address of the XSDT table + * + * @returns NV_TRUE the RDST or XDST table has been found, NV_FALSE otherwise. + */ +static NvBool scanForRsdtXsdtTables(OBJOS *pOS, + RmPhysAddr start, + NvU64 len, + NvU32 *pRsdtAddr, + NvU64 *pXsdtAddr) +{ + NvU8 *pData = NULL; + NvU64 i, c; + NvU8 checksum = 0; + NvU8 rsdpRev = 0; + NvU32 mode = NV_MEMORY_UNCACHED; + + if (pOS->getProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE)) + mode = NV_MEMORY_CACHED; + + pData = osMapKernelSpace(start, len, mode, NV_PROTECT_READ_WRITE); + if (NULL == pData) + { + return NV_FALSE; + } + + // + // Look for RSDP signature 'RSD PTR ' which is + // always located at 16 byte boundary. + // + for (i = 0; i < len; i += 0x10) + { + if ((MEM_RD32(pData + i) == NvU32_BUILD(' ', 'D', 'S', 'R')) && + (MEM_RD32(pData + i + 4) == NvU32_BUILD(' ', 'R', 'T', 'P'))) + { + for (c = 0 ; c < 20 && c < len; c++) + { + checksum += MEM_RD08(pData + i + c); + } + + if (checksum == 0) + { + // + // Get RSDP structure revision. RSDP revision must be + // either 0 (ACPI 1.0) or 2 (ACPI 2.0 or later). + // + rsdpRev = MEM_RD08(pData + i + 15); + + // + // As per ACPI specification, we must use XSDT if + // the revision is greater than 1 otherwise use RSDT. + // + if (rsdpRev > 1) + { + *pXsdtAddr = MEM_RD32(pData + i + 24) | ((NvU64)MEM_RD32(pData + i + 28) << 32); + } + else + { + *pRsdtAddr = MEM_RD32(pData + i + 16); + } + break; + } + } + } + osUnmapKernelSpace((void*)pData, len); + + + return ((*pRsdtAddr != 0) || (*pXsdtAddr != 0)); +} + +/* + * @brief Returns the addresses of the tables RSDT and XSDT + * + * @param[out] pRsdtAddr NvU32* physical address of the RSDT table + * @param[out] pXsdtAddr NvU64* physical address of the XSDT table + * + * @returns NV_OK if RDST or XDST table was found, NV_ERR_* otherwise. + */ +NV_STATUS +clGetRsdtXsdtTablesAddr_IMPL +( + OBJCL *pCl, + NvU32 *pRsdtAddr, + NvU64 *pXsdtAddr +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + volatile NvU16 *edbaSeg = NULL; + NvU32 startAddr = 0; + NvU32 size = 0; + NvU32 mode = NV_MEMORY_UNCACHED; + NV_STATUS status = NV_ERR_GENERIC; + + + if ((pRsdtAddr == NULL) || (pXsdtAddr == NULL)) + { + return NV_ERR_INVALID_STATE; + } + + // + // It doesn't make sense to search for the ACPI tables in the BIOS area + // on ARM, so just skip that here. + // + if (NVCPU_IS_FAMILY_ARM) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // Sections from http://www.acpi.info/DOWNLOADS/ACPIspec40.pdf referenced below. + // 1. Section 5.2.5.1 and 5.2.5.2 - get the Root System Description Pointer (RSDP) + // Search physical memory ranges on 16-byte boundaries for a valid + // Root System Description Pointer structure signature and checksum + // match as follows: + // * The first 1 KB of the Extended BIOS Data Area (EBDA). + // For EISA or MCA systems, the EBDA can be found in the two-byte location + // 40:0Eh on the BIOS data area. + // * The BIOS read-only memory space between 0E0000h and 0FFFFFh. + // * In Unified Extensible Firmware Interface (UEFI) enabled systems, a pointer + // to the RSDP structure exists within the EFI System Table (section 5.2.5.2). + // + // 2. Section 5.2.5.3 + // The RSDP structure contains the RSDT address at offset 16 and the XSDT address at offset 24 + // +# define ACPI_EBDA_SEG_ADDR 0x40e +# define ACPI_EBDA_LEN 0x400 +# define BIOS_RO_MEMORY_BASE 0xE0000 +# define BIOS_RO_MEMORY_SIZE 0x20000 +# define ACPI_RSDP_STRUCT_LEN 0x24 + + if (pOS->getProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE)) + { + mode = NV_MEMORY_CACHED; + } + + // First try and get RSDP from OS EFI tables. + if (RMCFG_FEATURE_PLATFORM_UNIX && (pSys->getProperty(pSys, PDB_PROP_SYS_IS_UEFI))) + { + status = osGetAcpiRsdpFromUefi(&startAddr); + if (status != NV_OK) + { + goto clGetRsdtXsdtTablesAddr_exit; + } + + size = ACPI_RSDP_STRUCT_LEN; + if (scanForRsdtXsdtTables(pOS , startAddr, size, pRsdtAddr, pXsdtAddr) == NV_TRUE) + { + status = NV_OK; + goto clGetRsdtXsdtTablesAddr_exit; + } + } + + // + // Now try legacy methods to find RSDP. Search for RSDP + // in first 1 KB of the Extended BIOS Data Area (EBDA). + // + edbaSeg = osMapKernelSpace(ACPI_EBDA_SEG_ADDR, ACPI_EBDA_LEN, + mode, NV_PROTECT_READ_WRITE); + if (NULL != edbaSeg) + { + startAddr = MEM_RD16(edbaSeg) << 4; + size = ACPI_EBDA_LEN; + osUnmapKernelSpace((void*)edbaSeg, ACPI_EBDA_LEN); + if (scanForRsdtXsdtTables(pOS , startAddr, size, pRsdtAddr, pXsdtAddr) == NV_TRUE) + { + status = NV_OK; + goto clGetRsdtXsdtTablesAddr_exit; + } + } + + // Finally, search for RSDP in BIOS read-only memory space. + startAddr = BIOS_RO_MEMORY_BASE; + size = BIOS_RO_MEMORY_SIZE; + if (scanForRsdtXsdtTables(pOS, startAddr, size, pRsdtAddr, pXsdtAddr) == NV_TRUE) + { + status = NV_OK; + } + +clGetRsdtXsdtTablesAddr_exit: + return status; +} + +/* + * @brief Gets a copy of the MCFG table in buffer + * + * @param[in] POS OBJOS pointer + * @param[out] ppMcfgTable void** pointer to buffer for MCFG table + * @param[out] pTableSize NvU32* pointer to MCFG table size + * + * @returns NV_TRUE the RDST or XDST table has been found, NV_FALSE otherwise. + */ +NvBool +clGetMcfgTableFromOS_IMPL +( + OBJCL *pCl, + OBJOS *pOS, + void **ppMcfgTable, + NvU32 *pTableSize +) +{ + NvU32 retSize = 0; + + NV_ASSERT_OR_RETURN (!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS), NV_FALSE); + + if (osInitGetAcpiTable() == NV_OK) + { + // + // The first call to osGetAcpiTable is to get the ACPI table size. + // A non-zero retSize means the function has returned a valid + // size for the table specified. + // + if ((osGetAcpiTable(NV_ACPI_TABLE_SIGNATURE_GFCM, NULL, + *pTableSize, &retSize) != NV_OK) && retSize) + { + *pTableSize = retSize; + + // Allocate memory as per the size of table + *ppMcfgTable = portMemAllocNonPaged(*pTableSize); + if (*ppMcfgTable == NULL) + { + return NV_FALSE; + } + + // Second call to actually get the table + if (osGetAcpiTable(NV_ACPI_TABLE_SIGNATURE_GFCM, ppMcfgTable, + *pTableSize, &retSize) == NV_OK) + { + pOS->setProperty(pOS, PDB_PROP_OS_GET_ACPI_TABLE_FROM_UEFI, NV_TRUE); + } + else + { + portMemFree(*ppMcfgTable); + *pTableSize = 0; + } + } + + return NV_TRUE; + } + + return (((*pTableSize != 0) && (*ppMcfgTable != NULL)) ? NV_TRUE : NV_FALSE); +} + +/* + * @brief Scans the RSDT and/or XSDT tables to get the address of the table matching the + * signature. + * + * http://www.acpi.info/DOWNLOADS/ACPIspec40.pdf sections 5.2.7 and 5.2.8 + * The description header addresses start at offset 36. + * + * @param[in] POS OBJOS pointer + * @param[in] rsdtAddr NvU64 physical address of the RSDT table + * @param[in] xsdtAddr NvU64 physical address of the XSDT table + * @param[in] tableSignature NvU32 the table signature + * + * @returns the address of the DSDT table, or 0 if an error occurred. + */ +NvU64 +clScanForTable_IMPL +( + OBJCL *pCl, + OBJOS *pOS, + NvU64 rsdtAddr, + NvU64 xsdtAddr, + NvU32 tableSignature +) +{ + NvU8 *pData = NULL, *pHeader = NULL; + NvU64 i, c, step; + NvU32 len = 0; + NvU8 checksum = 0; + NvU64 tableAddr = 0; + NvU64 sdtAddr; + NvU32 signature; + NvU32 current_sig; + NvBool bTableFound = NV_FALSE; + NvU32 mode = NV_MEMORY_UNCACHED; + + if ((rsdtAddr == 0) && (xsdtAddr == 0)) + { + return 0; + } + + if (rsdtAddr) + { + sdtAddr = rsdtAddr; + signature = NV_ACPI_TABLE_SIGNATURE_TDSR; + step = 4; + } + else + { + sdtAddr = xsdtAddr; + signature = NV_ACPI_TABLE_SIGNATURE_TDSX; + step = 8; + } + + // First get the length of RSDT/XSDT table + if (pOS->getProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE)) + { + mode = NV_MEMORY_CACHED; + } + + // + // The length of any system description table(SDT), in bytes, including the header, + // is always at byte offset 4 from the table starting offset. The length of table + // is a 4 byte field within SDT header. + // + pData = osMapKernelSpace(sdtAddr, 8, mode, NV_PROTECT_READ_WRITE); + + if (NULL == pData) + { + goto scanForTable_exit; + } + + current_sig = MEM_RD32((NvU32 *)(pData)); + + if (current_sig != signature) + { + goto scanForTable_exit; + } + + len = MEM_RD32((NvU32 *)(pData + 4)); + + osUnmapKernelSpace((void*)pData, 8); + + // Now map the whole of RSDT/XSDT table + pData = osMapKernelSpace(sdtAddr, len, mode, NV_PROTECT_READ_WRITE); + if (NULL == pData) + { + goto scanForTable_exit; + } + + for (c = 0 ; c < len; c++) + { + checksum += MEM_RD08(&(pData[c])); + } + + if (checksum != 0) + { + NV_PRINTF(LEVEL_ERROR, "Checksum mismatch\n"); + goto scanForTable_exit; + } + + // Each System Descriptor Table begins with a "SDT Header" of length 36 bytes. + i = 36; + + // + // Now get the table specified by tableSignature by parsing RSDT/XSDT table + // (specified by table signature). Each entry in RSDT/XSDT refers to other + // ACPI Description Tables. + // + while (i < len) + { + if (rsdtAddr) + { + tableAddr = MEM_RD32((NvU32 *)(pData + i)); + } + else + { + portMemCopy((NvU8 *)&tableAddr, sizeof(NvU64), (NvU8 *)(pData + i), sizeof(NvU64)); + } + pHeader = osMapKernelSpace(tableAddr, 8, mode, NV_PROTECT_READ_WRITE); + if (pHeader == NULL) + { + i += step; + continue; + } + + if (MEM_RD32((NvU32 *)(pHeader)) == tableSignature) + { + bTableFound = NV_TRUE; + break; + } + + osUnmapKernelSpace((void*)pHeader, 8); + pHeader = NULL; + i += step; + } + +scanForTable_exit: + if (pData) + { + osUnmapKernelSpace((void*)pData, len); + } + + if (pHeader) + { + osUnmapKernelSpace((void*)pHeader, 8); + } + + if (bTableFound == NV_TRUE) + { + return tableAddr; + } + + // Table not found + return 0; +} + +typedef struct +{ + NvU64 BaseAddress; + NvU32 PCISegmentGroupNumber:16; + NvU32 StartBusNumber:8; + NvU32 EndBusNumber:8; + NvU32 Reserved; +} MCFG_ADDRESS_ALLOCATION_STRUCTURE; + +/* + * @brief Store PCI-E config space base addresses for all domain numbers + * + * @param[in] pOS OBJOS pointer + * @param[in] pCl OBJCL pointer + * @param[in] pMcfgTable Pointer to buffer for MCFG table + * @param[in] len Length of MCFG table + * + */ +static NV_STATUS storePcieGetConfigSpaceBaseFromMcfgTable(OBJOS *pOS, OBJCL *pCl, NvU8 *pMcfgTable, NvU32 len) +{ + MCFG_ADDRESS_ALLOCATION_STRUCTURE *pMcfgAddressAllocationStructure; + MCFG_ADDRESS_ALLOCATION_STRUCTURE mcfgAddressAllocationStructure; + NvU8 EndBusNumber; + NvU32 i; + NV_STATUS status = NV_OK; + + // + // Get the Exteneded PCI config space address by parsing + // MCFG table through all config space base address + // structures of length 44 bytes each. + // + i = 44; + + while (i < len) + { + if ((len - i) < sizeof(MCFG_ADDRESS_ALLOCATION_STRUCTURE)) + { + // These are trailing bytes, we can exit the loop now. + break; + } + portMemCopy((NvU8*)&mcfgAddressAllocationStructure, sizeof(MCFG_ADDRESS_ALLOCATION_STRUCTURE), (NvU8*)(pMcfgTable + i), sizeof(MCFG_ADDRESS_ALLOCATION_STRUCTURE)); + pMcfgAddressAllocationStructure = &mcfgAddressAllocationStructure; + + // Fix up Bad System Bioses -- See Bug 715753 + EndBusNumber = (NvU8)pMcfgAddressAllocationStructure->EndBusNumber; + + if (!((NvU8)pMcfgAddressAllocationStructure->StartBusNumber < EndBusNumber)) + { + EndBusNumber = (NvU8)(PCI_MAX_BUSES - 1); + } + + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE)) + { + // node for domain 0 is added in pCl->pPcieConfigSpaceBase by chipsetInfo[i].setupFunc(pCl) + PPCIECONFIGSPACEBASE pPcieConfigSpaceBase = pCl->pPcieConfigSpaceBase; + + while (pPcieConfigSpaceBase) + { + if (pPcieConfigSpaceBase->domain == (NvU32)pMcfgAddressAllocationStructure->PCISegmentGroupNumber) + { + // + // node for this domain already exist in pCl->pPcieConfigSpaceBase linked-list, + // if start and end bus number doesn't match, update the existing node + // + if ((pPcieConfigSpaceBase->startBusNumber != (NvU8)pMcfgAddressAllocationStructure->StartBusNumber) || + (pPcieConfigSpaceBase->endBusNumber != (NvU8)EndBusNumber)) + { + pPcieConfigSpaceBase->startBusNumber = (NvU8)pMcfgAddressAllocationStructure->StartBusNumber; + pPcieConfigSpaceBase->endBusNumber = (NvU8)EndBusNumber; + pPcieConfigSpaceBase->baseAddress = pMcfgAddressAllocationStructure->BaseAddress; + } + break; + } + pPcieConfigSpaceBase = pPcieConfigSpaceBase->next; + } + + if (!pPcieConfigSpaceBase) + { + // There are multiple domains, node for this domain doesn't exist, insert node for this domain in the linked-list. + status = clInsertPcieConfigSpaceBase(pCl, + pMcfgAddressAllocationStructure->BaseAddress, + (NvU32)pMcfgAddressAllocationStructure->PCISegmentGroupNumber, + (NvU8)pMcfgAddressAllocationStructure->StartBusNumber, + (NvU8)EndBusNumber); + } + } + else + { + status = clInsertPcieConfigSpaceBase(pCl, + pMcfgAddressAllocationStructure->BaseAddress, + (NvU32)pMcfgAddressAllocationStructure->PCISegmentGroupNumber, + (NvU8)pMcfgAddressAllocationStructure->StartBusNumber, + (NvU8)EndBusNumber); + } + + if (status != NV_OK) + { + break; + } + i += sizeof(MCFG_ADDRESS_ALLOCATION_STRUCTURE); + } + + return status; +} + +/* + * @brief Locate and store the PCI-E config space base address for all domain + * numbers from the MCFG table. + * + * @param[in] pCl OBJCL pointer + * + * @returns NV_OK if successfull, NV_ERR_* otherwise. + */ +NV_STATUS +clStorePcieConfigSpaceBaseFromMcfg_IMPL(OBJCL *pCl) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + NvU64 rsdtAddr = 0; + NvU64 xsdtAddr = 0; + NvU64 mcfgAddr = 0; + NvU8 *pData = NULL; + NvU32 len = 0; + NvU32 mode = NV_MEMORY_UNCACHED; + NvU8 checksum = 0; + NvU64 c = 0; + NV_STATUS status = NV_ERR_GENERIC; + + NV_ASSERT_OR_RETURN (!pOS->getProperty(pOS, PDB_PROP_OS_DOES_NOT_ALLOW_DIRECT_PCIE_MAPPINGS), NV_ERR_INVALID_STATE); + + if (!pCl->FHBAddr.valid) + { + return NV_ERR_INVALID_DATA; + } + + if (clGetMcfgTableFromOS(pCl, pOS, (void **)&pData, &len) == NV_FALSE) + { + // + // If OS api doesn't provide MCFG table then MCFG table address + // can be found by parsing RSDT/XSDT tables. + // + status = clGetRsdtXsdtTablesAddr(pCl, (NvU32*)&rsdtAddr, &xsdtAddr); + if (status != NV_OK) + { + goto clStorePcieConfigSpaceBaseFromMcfg_exit; + } + + mcfgAddr = clScanForTable(pCl, pOS, rsdtAddr, xsdtAddr, NV_ACPI_TABLE_SIGNATURE_GFCM); + if (mcfgAddr == 0) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto clStorePcieConfigSpaceBaseFromMcfg_exit; + } + + if (pOS->getProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE)) + { + mode = NV_MEMORY_CACHED; + } + + // Get MCFG Table Length + pData = osMapKernelSpace(mcfgAddr, 8, mode, NV_PROTECT_READ_WRITE); + if (NULL == pData) + { + status = NV_ERR_OPERATING_SYSTEM; + goto clStorePcieConfigSpaceBaseFromMcfg_exit; + } + + if (MEM_RD32((NvU32 *)(pData)) != NV_ACPI_TABLE_SIGNATURE_GFCM) + { + status = NV_ERR_INVALID_DATA; + goto clStorePcieConfigSpaceBaseFromMcfg_exit; + } + + len = MEM_RD32((NvU32 *)(pData + 4)); + osUnmapKernelSpace((void*)pData, 8); + + // Now Map whole table + pData = osMapKernelSpace(mcfgAddr, len, mode, NV_PROTECT_READ_WRITE); + if (NULL == pData) + { + status = NV_ERR_OPERATING_SYSTEM; + goto clStorePcieConfigSpaceBaseFromMcfg_exit; + } + + // Validate checksum + for (c = 0 ; c < len; c++) + { + checksum += MEM_RD08(&(pData[c])); + } + + if (checksum != 0) + { + status = NV_ERR_INVALID_DATA; + goto clStorePcieConfigSpaceBaseFromMcfg_exit; + } + + } + + status = storePcieGetConfigSpaceBaseFromMcfgTable(pOS, pCl, pData, len); + +clStorePcieConfigSpaceBaseFromMcfg_exit: + if (pData) + { + if (pOS->getProperty(pOS, PDB_PROP_OS_GET_ACPI_TABLE_FROM_UEFI)) + { + portMemFree(pData); + } + else + { + osUnmapKernelSpace((void*)pData, len); + } + } + + return status; +} + +NV_STATUS +clInsertPcieConfigSpaceBase_IMPL +( + OBJCL *pCl, + RmPhysAddr baseAddress, + NvU32 domain, + NvU8 startBusNumber, + NvU8 endBusNumber +) +{ + PPCIECONFIGSPACEBASE pPcieConfigSpaceBase; + + pPcieConfigSpaceBase = portMemAllocNonPaged(sizeof(PCIECONFIGSPACEBASE)); + if (pPcieConfigSpaceBase == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + pPcieConfigSpaceBase->baseAddress = baseAddress; + pPcieConfigSpaceBase->domain = domain; + pPcieConfigSpaceBase->startBusNumber = startBusNumber; + pPcieConfigSpaceBase->endBusNumber = endBusNumber; + pPcieConfigSpaceBase->next = pCl->pPcieConfigSpaceBase; + pCl->pPcieConfigSpaceBase = pPcieConfigSpaceBase; + + NV_PRINTF(LEVEL_INFO, + "PCIe Config BaseAddress 0x%llx Domain %x startBusNumber %x endBusNumber %x\n", + pPcieConfigSpaceBase->baseAddress, pPcieConfigSpaceBase->domain, + pPcieConfigSpaceBase->startBusNumber, + pPcieConfigSpaceBase->endBusNumber); + + return NV_OK; +} + + +RmPhysAddr +clFindPcieConfigSpaceBase_IMPL +( + OBJCL *pCl, + NvU32 domain, + NvU8 bus +) +{ + PPCIECONFIGSPACEBASE pPcieConfigSpaceBase = pCl->pPcieConfigSpaceBase; + + while (pPcieConfigSpaceBase) + { + if ((pPcieConfigSpaceBase->domain == domain) && + (pPcieConfigSpaceBase->startBusNumber <= bus) && + (pPcieConfigSpaceBase->endBusNumber >= bus)) + { + return pPcieConfigSpaceBase->baseAddress; + } + pPcieConfigSpaceBase = pPcieConfigSpaceBase->next; + } + return 0; +} + +void +clFreePcieConfigSpaceBase_IMPL(OBJCL *pCl) +{ + PPCIECONFIGSPACEBASE pPcieConfigSpaceBase = pCl->pPcieConfigSpaceBase; + PPCIECONFIGSPACEBASE pPcieConfigSpaceBaseNext; + + while (pPcieConfigSpaceBase) + { + pPcieConfigSpaceBaseNext = pPcieConfigSpaceBase->next; + portMemFree(pPcieConfigSpaceBase); + pPcieConfigSpaceBase = pPcieConfigSpaceBaseNext; + } + pCl->pPcieConfigSpaceBase = NULL; +} + +// +// Locate and parse the PCI Express Virtual P2P Approval capability from the +// given GPUs PCI configuration space, if it exists. The capability's presence +// across multiple GPUs will define which GPUs are capable of P2P with each +// other. +// +static void +objClLoadPcieVirtualP2PApproval(OBJGPU *pGpu) +{ + void *handle; + NvU32 data32; + NvU8 version; + NvU8 cap; + NvU8 bus = gpuGetBus(pGpu); + NvU8 device = gpuGetDevice(pGpu); + NvU32 domain = gpuGetDomain(pGpu); + NvU32 offset = NV_PCI_VIRTUAL_P2P_APPROVAL_CAP_0; + NvU32 sig = 0; + + if (!IS_PASSTHRU(pGpu)) + { + NV_PRINTF(LEVEL_INFO, + "Skipping non-pass-through GPU%u\n", gpuGetInstance(pGpu)); + return; + } + + handle = osPciInitHandle(domain, bus, device, 0, NULL, NULL); + + // + // Check for the Virtual P2P Approval Capability in PCI config space. + // We've specifically reserved offset 0xC8 for it, but hypervisors + // aren't strictly required to put it there. They are, however, required + // to link it in the capability list. + // + cap = osPciReadByte(handle, PCI_CAPABILITY_LIST); + while ((cap != 0) && (sig != NV_PCI_VIRTUAL_P2P_APPROVAL_SIGNATURE)) + { + offset = cap; + data32 = osPciReadDword(handle, offset); + cap = (NvU8)((data32 >> 8) & 0xFF); + + if ((data32 & CAP_ID_MASK) != CAP_ID_VENDOR_SPECIFIC) + continue; + + sig = DRF_VAL(_PCI, _VIRTUAL_P2P_APPROVAL_CAP_0, _SIG_LO, data32); + data32 = osPciReadDword(handle, offset + 4); + sig |= (DRF_VAL(_PCI, _VIRTUAL_P2P_APPROVAL_CAP_1, _SIG_HI, data32) << 8); + } + + if (sig != NV_PCI_VIRTUAL_P2P_APPROVAL_SIGNATURE) + { + NV_PRINTF(LEVEL_INFO, + "No virtual P2P approval capability found in GPU%u's capability list\n", + gpuGetInstance(pGpu)); + return; + } + + // data32 now contains the second dword of the capability structure. + version = (NvU8)DRF_VAL(_PCI, _VIRTUAL_P2P_APPROVAL_CAP_1, _VERSION, + data32); + if (version != 0) + { + NV_PRINTF(LEVEL_WARNING, + "Unable to handle virtual P2P approval capability version %u on GPU%u\n", + version, gpuGetInstance(pGpu)); + return; + } + + pGpu->pciePeerClique.id = (NvU8)DRF_VAL(_PCI, _VIRTUAL_P2P_APPROVAL_CAP_1, + _PEER_CLIQUE_ID, data32); + pGpu->pciePeerClique.bValid = NV_TRUE; + + NV_PRINTF(LEVEL_INFO, + "Hypervisor has assigned GPU%u to peer clique %u\n", + gpuGetInstance(pGpu), pGpu->pciePeerClique.id); +} + +/*! + * @brief Traverse bus topology till Gpu's root port. + * If any of the intermediate bridge has TB3 supported vendorId and hotplug + * capability(not necessarily same bridge), mark the Gpu as External Gpu. + * + * @params[in] pGpu OBJGPU pointer + * @params[in] pCl OBJCL pointer + * + */ +void +objClCheckForExternalGpu +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + NvU8 bus; + NvU32 domain; + void *handleUp; + NvU8 busUp, devUp, funcUp; + NvU16 vendorIdUp, deviceIdUp; + NvU32 portCaps, pciCaps, slotCaps; + NvU32 PCIECapPtr; + NvBool bTb3Bridge = NV_FALSE, bSlotHotPlugSupport = NV_FALSE; + + domain = gpuGetDomain(pGpu); + bus = gpuGetBus(pGpu); + + do + { + // Find the upstream bridge + handleUp = clFindP2PBrdg(pCl, domain, bus, &busUp, &devUp, &funcUp, &vendorIdUp, &deviceIdUp); + if (!handleUp) + { + return; + } + + if (vendorIdUp == PCI_VENDOR_ID_INTEL) + { + // Check for the supported TB3(ThunderBolt 3) bridges. + bTb3Bridge = isTB3DeviceID(deviceIdUp); + } + + if (NV_OK != clSetPortPcieCapOffset(pCl, handleUp, &PCIECapPtr)) + { + // PCIE bridge but no cap pointer. + return; + } + + // Get the PCIE capabilities. + pciCaps = osPciReadDword(handleUp, CL_PCIE_CAP - CL_PCIE_BEGIN + PCIECapPtr); + if (CL_PCIE_CAP_SLOT & pciCaps) + { + // Get the slot capabilities. + slotCaps = osPciReadDword(handleUp, CL_PCIE_SLOT_CAP - CL_PCIE_BEGIN + PCIECapPtr); + + if ((CL_PCIE_SLOT_CAP_HOTPLUG_CAPABLE & slotCaps) && + (CL_PCIE_SLOT_CAP_HOTPLUG_SURPRISE & slotCaps)) + { + bSlotHotPlugSupport = NV_TRUE; + } + } + + if (bTb3Bridge && bSlotHotPlugSupport) + { + pCl->setProperty(pCl, PDB_PROP_CL_IS_EXTERNAL_GPU, NV_TRUE); + break; + } + + bus = busUp; + + // Get port caps to check if PCIE bridge is the root port + portCaps = osPciReadDword(handleUp, CL_PCIE_CAP - CL_PCIE_BEGIN + PCIECapPtr); + + } while (!CL_IS_ROOT_PORT(portCaps)); +} + + +/*! + * @brief : Enable L0s and L1 support for GPU's upstream port + * Refer Section 7.8.7. Link Control Register of PCIE Spec 3. + * Note: This function is used for force enabling ASPM and shouldn't be used for normal driver operations + */ +NV_STATUS +clControlL0sL1LinkControlUpstreamPort_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + NvBool bEnable +) +{ + void *handle = pGpu->gpuClData.upstreamPort.addr.handle; + NvU32 PCIECapPtr = pGpu->gpuClData.upstreamPort.PCIECapPtr; + NvU32 linkControlRegOffset = PCIECapPtr + 0x10; + NvU16 regVal; + + regVal = osPciReadWord(handle, linkControlRegOffset); + if (regVal == 0xFFFF) + { + NV_PRINTF(LEVEL_ERROR, + "Link Control register read failed for upstream port\n"); + return NV_ERR_GENERIC; + } + + if(bEnable) + { + // 1:0 - 11b L0s and L1 Entry Enabled + regVal |= 0x3; + } + else + { + // 0:0 - 00b L0s and L1 Entry Disabled + regVal &= 0x0; + } + osPciWriteDword(handle, linkControlRegOffset, regVal); + return NV_OK; +} + +/*! + * @brief: Returns the gen speed of the root node + */ +NV_STATUS +clPcieGetRootGenSpeed_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + NvU8 *genSpeed +) +{ + void *handle; + NvU32 PCIECapPtr; + NvU32 linkCaps; + + *genSpeed = 0; + + handle = pGpu->gpuClData.rootPort.addr.handle; + if (handle == NULL) + { + return NV_ERR_GENERIC; + } + + PCIECapPtr = pGpu->gpuClData.rootPort.PCIECapPtr; + + linkCaps = osPciReadDword(handle, CL_PCIE_LINK_CAP - CL_PCIE_BEGIN + PCIECapPtr); + + // Read field 3:0 to get max link speed + *genSpeed = (NvU8) (linkCaps & 0xF); + + return NV_OK; +} + +/*! + * @brief: Returns the value of link_capabilities_2 of the downstream port + * + * @param[i] pGpu GPU object pointer + * @param[out] pLinkCaps2 link_capabilities_2 register value + * + * @return NV_OK + */ +NV_STATUS +clPcieGetDownstreamPortLinkCap2_IMPL +( + OBJGPU *pGpu, + OBJCL *pCl, + NvU32 *pLinkCaps2 +) +{ + void *pHandle; + NvU32 PCIECapPtr; + + // + // If there is a switch this is equal to boardDownstreamPort + // If there is no switch this is equal to rootPort + // + pHandle = pGpu->gpuClData.upstreamPort.addr.handle; + if (pHandle == NULL) + { + return NV_ERR_GENERIC; + } + + PCIECapPtr = pGpu->gpuClData.upstreamPort.PCIECapPtr; + + // + // CL_PCIE_END is a misnomer, we actually want to use CL_PCIE_LINK_CAP_2. + // But it is not present in chipset.h and the offset of what-would-be + // CL_PCIE_LINK_CAP_2 (0x2c) is overlapping with CL_PCIE_END. + // Before replacing CL_PCIE_END with CL_PCIE_LINK_CAP_2, we would need to + // first understand why CL_PCIE_END was restricted to 0x2c and then + // changing it to CL_PCIE_LINK_CAP_2 would require discussion and time. + // Todo by anaikwade: Correct this issue. Bug 200659585. + // Also investigate more correct way if there is any to + // check pcie 4.0 spec support + // + *pLinkCaps2 = osPciReadDword(pHandle, CL_PCIE_END - CL_PCIE_BEGIN + PCIECapPtr); + + return NV_OK; +} + +NvBool clRootportNeedsNosnoopWAR_FWCLIENT(OBJGPU *pGpu, OBJCL *pCl) +{ + const GspStaticConfigInfo *pSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + + NV_ASSERT_OR_RETURN(pSCI != NULL, NV_FALSE); + + return pSCI->bClRootportNeedsNosnoopWAR; +} diff --git a/src/nvidia/src/kernel/platform/chipset/pci_pbi.c b/src/nvidia/src/kernel/platform/chipset/pci_pbi.c new file mode 100644 index 000000000..864ee939d --- /dev/null +++ b/src/nvidia/src/kernel/platform/chipset/pci_pbi.c @@ -0,0 +1,377 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "platform/chipset/pci_pbi.h" +#include "rmpbicmdif.h" +#include "os/os.h" +#include "gpu/gpu_uuid.h" + +// Max number of retries when polling for command completion (1ms per poll) +#define PCI_PBI_POLL_STATIC 100 // Poll iterations for static PBI +// Longer poll time if we know PBI is supported by device. GA100 can take 1.2sec +#define PCI_PBI_POLL_ENABLED 1300 // Poll iterations if Linked/Enabled + +// +// Going forward we expect to find PBI in the PCI capability list if PBI is +// supported by the device. However, there are GPUs using a fixed offset at +// 0xb4, and if the PCI capability is not found then we still want to support +// devices that use the static offset. +// +#define PCI_CAPABILITY_LIST_BASE 0x34 // base of the PCI capability list +#define PCI_PBI_STATIC_CAP_BASE 0xb4 // Base of static PBI capability +#define PCI_PBI_CAP_ADDR(base, offset) ((base == 0 ? PCI_PBI_STATIC_CAP_BASE : base) + offset) + +// PBI register offsets, from the base of the PCI capability +#define PCI_PBI_REG_PBCI 0x00 // PBI cap ID register +#define PCI_PBI_REG_PBLS 0x02 // PBI cap list size register +#define PCI_PBI_REG_PBCC 0x03 // PBI cap control register +#define PCI_PBI_REG_COMMAND 0x04 // PBI command register +#define PCI_PBI_REG_DATAIN 0x08 // PBI data in register +#define PCI_PBI_REG_DATAOUT 0x0c // PBI data out register +#define PCI_PBI_REG_MUTEX 0x10 // PBI mutex register + +// Expected PCI capability values if the Post-Box interface exists for this card +#define PCI_PBI_CAP_ID 0x09 // PCI Capability ID for PBI +#define PCI_PBI_REG_PBLS_LENGTH 0x14 // Expected length of the capability +#define PCI_PBI_REG_PBCC_ENABLED 0x01 // Set when PBI is supported + +// +// Check to see if Post-box interface is found in PCI config space at the +// specified base address +// +// If the PBI capability was found in the PCI cap list then cap_base is the +// PCI config offset of the capability. +// If the capability was not found in the PCI cap list then cap_base is zero, but +// we still check for PBI at a static location to support certain GPUs in the field. +// +static NV_STATUS pciPbiCheck(void *handle, NvU32 cap_base) +{ + if ((osPciReadByte(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_PBCI)) == PCI_PBI_CAP_ID) && + (osPciReadByte(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_PBLS)) == PCI_PBI_REG_PBLS_LENGTH) && + (osPciReadByte(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_PBCC)) == PCI_PBI_REG_PBCC_ENABLED)) + return NV_OK; + + return NV_ERR_NOT_SUPPORTED; +} + +// +// Find the base of the PCI PBI capability and return the base. +// Returns 0 if PBI is not found in the PCI cap list. +// +static NvU32 pciPbiFindCapability(void *handle) +{ + // Start at the beginning of the PCI capability list + NvU32 cap_base = osPciReadByte(handle, PCI_CAPABILITY_LIST_BASE); + + // Walk the PCI capability list looking for a match for PBI + while (cap_base != 0 && pciPbiCheck(handle, cap_base) != NV_OK) + cap_base = osPciReadByte(handle, cap_base + 1); + + return cap_base; +} + +// +// Attempt to acquire Post-box interface mutex +// +static NV_STATUS pciPbiAcquireMutex(void *handle, NvU32 cap_base) +{ + NvU32 id; + + // check to see if mutex is available + id = osPciReadDword(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_MUTEX)); + if (id == PBI_CLIENT_NONE) + { + // attempt to acquire + osPciWriteDword(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_MUTEX), + PBI_CLIENT_DRIVER_PCIPBI_SHIM); + + // check to see if acquired + id = osPciReadDword(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_MUTEX)); + if (id == PBI_CLIENT_DRIVER_PCIPBI_SHIM) + return NV_OK; + } + + return NV_ERR_IN_USE; +} + +// +// Release Post-box interface mutex +// +static void pciPbiReleaseMutex(void *handle, NvU32 cap_base) +{ + NvU32 id; + + // check to see if we own the mutex + id = osPciReadDword(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_MUTEX)); + if (id != PBI_CLIENT_DRIVER_PCIPBI_SHIM) + { + NV_PRINTF(LEVEL_ERROR, + "Attempted to release PBI mutex that does not match client ID\n"); + NV_ASSERT(0); + return; + } + + osPciWriteDword(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_MUTEX), PBI_CLIENT_NONE); +} + +// +// Convert PBI error to NV_STATUS error +// +static NV_STATUS pciPbiError(int status) +{ + switch (status) + { + case NV_PBI_COMMAND_STATUS_SUCCESS: + case NV_PBI_COMMAND_STATUS_MORE_DATA: + return NV_OK; + case NV_PBI_COMMAND_STATUS_INVALID_ADDRESS: + case NV_PBI_COMMAND_STATUS_INVALID_COMMAND: + return NV_ERR_INVALID_COMMAND; + case NV_PBI_COMMAND_STATUS_BUSY: + case NV_PBI_COMMAND_STATUS_PENDING: + return NV_ERR_IN_USE; + default: + return NV_ERR_GENERIC; + } +} + +// +// Polling waiting for PBI command completion +// +static NV_STATUS pciPbiCheckStatusWait(void *handle, NvU32 cap_base) +{ + NvU32 cmdStatus; + NvU32 status; + NvU32 intr; + NvU32 i; + NvU32 poll_limit; + NvU16 devid; + // WAR 2844019 + static const NvU16 ampere_devid[] = {0x2080, 0x2081, 0x2082, 0x20B0, 0x20B1, 0x20B2, + 0x20B3, 0x20B4, 0x20B5, 0x20B6, 0x20B7, 0x20BB, + 0x20BE, 0x20BF, 0x20C0, 0x20C2, 0x20F0, 0x20F1, + 0x20F2, 0x20F3, 0x20FE, 0x20FF, 0}; + + + // Allow longer poll time when we know PBI is supported + if (cap_base != 0) + { + poll_limit = PCI_PBI_POLL_ENABLED; + } + else + { + poll_limit = PCI_PBI_POLL_STATIC; + + // WAR for 2844918, extra delay is needed for early Ampere GA100 devices + // which do not have PBI correctly linked in the PCI Capability list + devid = osPciReadWord(handle, 0x2); + for (i = 0; ampere_devid[i] != 0; i++) + { + if (devid == ampere_devid[i]) + { + poll_limit = PCI_PBI_POLL_ENABLED; + break; + } + } + // End WAR + } + + // poll for command completion + for (i = 0; i < poll_limit; i++) + { + cmdStatus = osPciReadDword(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_COMMAND)); + status = DRF_VAL(_PBI, _COMMAND, _STATUS, cmdStatus); + intr = DRF_VAL(_PBI, _COMMAND, _INTERRUPT, cmdStatus); + if (intr) + { + osDelay(1); + continue; + } + switch (status) + { + case NV_PBI_COMMAND_STATUS_UNDEFINED: + case NV_PBI_COMMAND_STATUS_BUSY: + case NV_PBI_COMMAND_STATUS_PENDING: + osDelay(1); + continue; + default: + return pciPbiError(status); + } + } + + return NV_ERR_TIMEOUT; +} + +// +// Send PBI command and poll waiting for completion +// +static +NV_STATUS pciPbiSendCommandWait(void *handle, NvU32 cap_base, NvU32 command, NvU32 dataIn, + NvU32 *dataOut) +{ + NV_STATUS status; + + // send command + osPciWriteDword(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_DATAIN), dataIn); + osPciWriteDword(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_COMMAND), command); + + // wait for command status + status = pciPbiCheckStatusWait(handle, cap_base); + + if (status == NV_OK) + { + // read dataout + *dataOut = osPciReadDword(handle, PCI_PBI_CAP_ADDR(cap_base, PCI_PBI_REG_DATAOUT)); + } + + return status; +} + +// +// Read 16-byte raw UUID/GID via Post-Box interface +// +// Note: The temporary buffer 'gid' is used here to avoid the expense of +// a possible unaligned exception trap when moving the data. The data +// returned by PBI is a DWORD, and we don't know the alignment of the +// user buffer. +// +NV_STATUS pciPbiReadUuid(void *handle, NvU8 *uuid) +{ + NV_STATUS status; + NvU32 command; + NvU32 capability; + NvU32 gid[RM_SHA1_GID_SIZE/4]; + NvU32 i; + NvU32 cap_base; + + if (uuid == NULL || handle == NULL) + return NV_ERR_INVALID_ARGUMENT; + + cap_base = pciPbiFindCapability(handle); + + // see if PBI exists + status = pciPbiCheck(handle, cap_base); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Device does not support PBI\n"); + return status; + } + + // acquire PBI mutex + status = pciPbiAcquireMutex(handle, cap_base); + if (status != NV_OK) { + NV_PRINTF(LEVEL_ERROR, "Could not acquire pciPbi mutex\n"); + return status; + } + + // get PBI capabilities + PBI_SET_COMMAND_PARAMS(0, 0, 0, 1, NV_PBI_COMMAND_FUNC_ID_GET_CAPABILITIES, + 0, 0, 0, 1, command); + status = pciPbiSendCommandWait(handle, cap_base, command, 0, &capability); + if (status != NV_OK) + { + // + // A device can pretend to have PBI support, using a fake PCI config space entry, + // even though it does not respond to PBI calls. Return NV_ERR_NOT_SUPPORTED + // for such devices if NV_PBI_COMMAND_FUNC_ID_GET_CAPABILITIES fails to respond. + // + NV_PRINTF(LEVEL_INFO, + "Device did not respond to PBI GET_CAPABILITIES\n"); + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + // see if the PBI supports Execute Routine + if ((capability & NVBIT(NV_PBI_COMMAND_FUNC_ID_EXECUTE_ROUTINE)) == 0) + { + NV_PRINTF(LEVEL_INFO, + "Device does not support PBI Execute Routine\n"); + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + // get the UUID (4 words) + for (i = 0; i < (sizeof gid)/(sizeof gid[0]); i++) + { + PBI_SET_COMMAND_PARAMS(0, 0, i, 1, NV_PBI_COMMAND_FUNC_ID_EXECUTE_ROUTINE, + 0, 0, 0, 1, command); + status = pciPbiSendCommandWait(handle, cap_base, command, + NV_PBI_EXECUTE_ROUTINE_GET_GID, &gid[i]); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failure reading GID\n"); + goto done; + } + } + + portMemCopy(uuid, RM_SHA1_GID_SIZE, gid, RM_SHA1_GID_SIZE); + +done: + pciPbiReleaseMutex(handle, cap_base); + return status; +} + +// +// Get Feature support via Post-Box interface +// +NV_STATUS pciPbiGetFeature(void *handle, NvU32 *feature) +{ + NV_STATUS status; + NvU32 command; + NvU32 cap_base; + + if (feature == NULL || handle == NULL) + return NV_ERR_INVALID_ARGUMENT; + + cap_base = pciPbiFindCapability(handle); + + // see if PBI exists + status = pciPbiCheck(handle, cap_base); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Device does not support PBI\n"); + return status; + } + + // acquire PBI mutex + status = pciPbiAcquireMutex(handle, cap_base); + if (status != NV_OK) { + NV_PRINTF(LEVEL_ERROR, "Could not acquire pciPbi mutex\n"); + return status; + } + + // get feature dword + PBI_SET_COMMAND_PARAMS(0, 0, 0, 1, NV_PBI_COMMAND_FUNC_ID_EXECUTE_ROUTINE, + 0, 0, 0, 1, command); + status = pciPbiSendCommandWait(handle, cap_base, command, + NV_PBI_EXECUTE_ROUTINE_GET_FEATURE, feature); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Device did not provide PBI GET FEATURE, %0x\n", + status); + } + + pciPbiReleaseMutex(handle, cap_base); + return status; +} diff --git a/src/nvidia/src/kernel/platform/cpu.c b/src/nvidia/src/kernel/platform/cpu.c new file mode 100644 index 000000000..53534b2a9 --- /dev/null +++ b/src/nvidia/src/kernel/platform/cpu.c @@ -0,0 +1,1530 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************* CPU Information Gather Routines ***********************\ +* * +* One time initialization code to setup the Processor type * +* * +\***************************************************************************/ + +#include "cpuopsys.h" + +#include "Nvcm.h" +#include "os/os.h" +#include "core/system.h" + +#include "ctrl/ctrl0000/ctrl0000system.h" + + +#if NVCPU_IS_AARCH64 + +#include "cpu_arm_def.h" + +#if defined(__GNUC__) + +#define CP_READ_REGISTER(reg) \ + ({ \ + NvU32 __res; \ + \ + asm("mrs %0, " reg "\r\t" \ + : "=r" (__res) \ + ); \ + \ + __res; \ + }) + +#define CP_WRITE_REGISTER(reg, val) \ + ({ \ + asm("msr " reg ", %0\r\t" \ + : \ + : "r" (val) \ + ); \ + }) + +#endif //end defined(__GNUC__) + +static void DecodeAarch64Cache(OBJSYS *pSys) +{ + NvU32 val, field, numsets, assoc, linesize; + + // Select level 1 data cache + CP_WRITE_CSSELR_REGISTER(CP_CSSELR_DATA_CACHE); + + // Retrieve data cache information + val = CP_READ_CCSIDR_REGISTER(); + + field = GET_BITMASK(CCSIDR_CACHE_NUM_SETS, val); + numsets = field + 1; + field = GET_BITMASK(CCSIDR_CACHE_ASSOCIATIVITY, val); + assoc = field + 1; + field = GET_BITMASK(CCSIDR_CACHE_LINE_SIZE, val); + linesize = 1 << (field + 4); + + pSys->cpuInfo.dataCacheLineSize = linesize; + pSys->cpuInfo.l1DataCacheSize = (numsets * assoc * linesize) >> 10; + + // Select level 2 data cache + CP_WRITE_CSSELR_REGISTER(CP_CSSELR_DATA_CACHE_LEVEL2); + + // Retrieve data cache information + val = CP_READ_CCSIDR_REGISTER(); + + field = GET_BITMASK(CCSIDR_CACHE_NUM_SETS, val); + numsets = field + 1; + field = GET_BITMASK(CCSIDR_CACHE_ASSOCIATIVITY, val); + assoc = field + 1; + field = GET_BITMASK(CCSIDR_CACHE_LINE_SIZE, val); + linesize = 1 << (field + 4); + + pSys->cpuInfo.l2DataCacheSize = (numsets * assoc * linesize) >> 10; +} + +/* + * ID the CPU. + */ +void RmInitCpuInfo(void) +{ +#define AARCH64_VENDOR_PART_NUMBER(v, p) \ + (((v)<<16)|(p)) +#define AARCH64_VENDOR_PART(v, p) \ + AARCH64_VENDOR_PART_NUMBER(CP_MIDR_IMPLEMENTER_##v, CP_MIDR_PRIMARY_PART_NUM_##p) + + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (pSys->cpuInfo.bInitialized) + { + return; + } + + // Init structure to default + portMemSet(&pSys->cpuInfo, 0, sizeof(pSys->cpuInfo)); + + // ARM has the equivalent of a fence instruction (DSB) + + // Leave this here for MODS + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_UNKNOWN; + pSys->cpuInfo.caps = (NV0000_CTRL_SYSTEM_CPU_CAP_SFENCE | + NV0000_CTRL_SYSTEM_CPU_CAP_WRITE_COMBINING); + + // Calculate the frequency + pSys->cpuInfo.clock = osGetCpuFrequency(); + + // Number of core is available from SCU configuration. + pSys->cpuInfo.numPhysicalCpus = osGetCpuCount(); + + // There is no hyper-threading on ARM + pSys->cpuInfo.numLogicalCpus = pSys->cpuInfo.numPhysicalCpus; + pSys->cpuInfo.maxLogicalCpus = pSys->cpuInfo.numPhysicalCpus; + + // Zero out the vendor-specific family, model & stepping + pSys->cpuInfo.family = 0; + pSys->cpuInfo.model = 0; + pSys->cpuInfo.stepping = 0; + + NvU32 val; + NvU32 impl; + NvU32 part; + + // Retrieve Main ID register + val = CP_READ_MIDR_REGISTER(); + + impl = GET_BITMASK(MIDR_IMPLEMENTER, val); + part = GET_BITMASK(MIDR_PRIMARY_PART_NUM, val); + + switch(AARCH64_VENDOR_PART_NUMBER(impl, part)) + { + case AARCH64_VENDOR_PART(NVIDIA, DENVER_1): + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_NV_DENVER_1_0; + break; + case AARCH64_VENDOR_PART(NVIDIA, DENVER_2): + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_NV_DENVER_2_0; + break; + case AARCH64_VENDOR_PART(NVIDIA, ESTES_1): + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_ARMV8A_GENERIC; + break; + + case AARCH64_VENDOR_PART(NVIDIA, CARMEL): + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_ARMV8A_GENERIC; + break; + /* + * Applied Micro is now Ampere computing, and the Ampere eMag + * vendor/part ids are the same as AMCC XGENE + */ + case AARCH64_VENDOR_PART(AMCC, XGENE): + case AARCH64_VENDOR_PART(ARM, CORTEX_A76): + case AARCH64_VENDOR_PART(MARVELL, THUNDER_X2): + case AARCH64_VENDOR_PART(HUAWEI, KUNPENG_920): + case AARCH64_VENDOR_PART(ARM, BLUEFIELD): + // The Neoverse N1 is the same as Gravitron + case AARCH64_VENDOR_PART(ARM, GRAVITRON2): + case AARCH64_VENDOR_PART(FUJITSU, A64FX): + case AARCH64_VENDOR_PART(PHYTIUM, FT2000): + case AARCH64_VENDOR_PART(PHYTIUM, S2500): + case AARCH64_VENDOR_PART(AMPERE, ALTRA): + case AARCH64_VENDOR_PART(MARVELL, OCTEON_CN96XX): + case AARCH64_VENDOR_PART(MARVELL, OCTEON_CN98XX): + case AARCH64_VENDOR_PART(ARM, CORTEX_A57): + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_ARMV8A_GENERIC; + break; + default: + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_ARMV8A_GENERIC; + portDbgPrintf( + "NVRM: CPUID: unknown implementer/part 0x%x/0x%x.\n", impl, part); + portDbgPrintf( + "The NVIDIA GPU driver for AArch64 has not been qualified on this CPU\n" + "and therefore it is not recommended or intended for use in any production\n" + "environment.\n"); + break; + } + DecodeAarch64Cache(pSys); + + // Host native page size +#ifdef PAGE_SIZE + pSys->cpuInfo.hostPageSize = PAGE_SIZE; +#else + pSys->cpuInfo.hostPageSize = 4096; +#endif + + pSys->cpuInfo.bInitialized = NV_TRUE; +#undef AARCH64_VENDOR_PART +#undef AARCH64_VENDOR_PART_NUMBER +} + +#endif // NVCPU_IS_AARCH64 + + +/***************************************************************************/ + + +#if NVCPU_IS_ARM + +#include "cpu_arm_def.h" + +#if defined(__GNUC__) + + #define CP_READ_REGISTER(reg) \ + ({ \ + NvU32 __res; \ + \ + asm("mrc p15, " reg ", %0, c0, c0, 0\r\t" \ + : "=r" (__res) \ + : \ + : "cc"); \ + \ + __res; \ + }) + + #define CP_WRITE_REGISTER(reg, val) \ + ({ \ + asm("mcr p15, " reg ", %0, c0, c0, 0\r\t" \ + : \ + : "r"(val)); \ + }) + +#endif //end defined(__GNUC__) + +/* + * Documentation: + * + * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0388f/CIHHDACH.html + */ +static void DecodeCortexA9Cache(OBJSYS *pSys) +{ + NvU32 val, field; + + // Select data cache + CP_WRITE_CSSELR_REGISTER(CP_CSSELR_DATA_CACHE); + + // Retrieve data cache information + val = CP_READ_CCSIDR_REGISTER(); + + // L1 Data Cache Size (from KB to KB) + field = GET_BITMASK(CCSIDR_CACHE_NUM_SETS, val); + + if (field == CP_CCSIDR_CACHE_NUM_SETS_16KB) + { + pSys->cpuInfo.l1DataCacheSize = 16; + } + else if (field == CP_CCSIDR_CACHE_NUM_SETS_32KB) + { + pSys->cpuInfo.l1DataCacheSize = 32; + } + else if (field == CP_CCSIDR_CACHE_NUM_SETS_64KB) + { + pSys->cpuInfo.l1DataCacheSize = 64; + } + else + { + NV_PRINTF(LEVEL_ERROR, "CPUID: Couldn't find L1DataCacheSize.\n"); + } + + // There is only one level of cache in the Cortex-A9 processor + pSys->cpuInfo.l2DataCacheSize = 0; + + // Data Cache Line (from W to B) + field = GET_BITMASK(CCSIDR_CACHE_LINE_SIZE, val); + + if (field & CP_CCSIDR_CACHE_LINE_SIZE_8W) + { + pSys->cpuInfo.dataCacheLineSize = 8 * 4; + } + else + { + NV_PRINTF(LEVEL_ERROR, "CPUID: Couldn't find DataCacheLineSize.\n"); + } +} + +static NvU32 DecodeCortexA15CacheSize(NvU32 field) +{ + switch(field) + { + case CP_CCSIDR_CACHE_NUM_SETS_A15_32KB: + return 32; + case CP_CCSIDR_CACHE_NUM_SETS_A15_512KB: + return 512; + case CP_CCSIDR_CACHE_NUM_SETS_A15_1024KB: + return 1024; + case CP_CCSIDR_CACHE_NUM_SETS_A15_2048KB: + return 2048; + case CP_CCSIDR_CACHE_NUM_SETS_A15_4096KB: + return 4096; + default: + NV_PRINTF(LEVEL_ERROR, "CPUID: Couldn't find DataCacheSize.\n"); + return 0; + } +} + +static void DecodeCortexA15Cache(OBJSYS *pSys) +{ + NvU32 val, field; + + // Select level 1 data cache + CP_WRITE_CSSELR_REGISTER(CP_CSSELR_DATA_CACHE); + + // Retrieve data cache information + val = CP_READ_CCSIDR_REGISTER(); + + // L1 Data Cache Size (from KB to KB) + field = GET_BITMASK(CCSIDR_CACHE_NUM_SETS, val); + + pSys->cpuInfo.l1DataCacheSize = DecodeCortexA15CacheSize(field); + + // Data Cache Line (from W to B) + field = GET_BITMASK(CCSIDR_CACHE_LINE_SIZE, val); + + // line size = 2 ** (field + 2) words + pSys->cpuInfo.dataCacheLineSize = 4 * (1 << (field + 2)); + + // Select level 2 data cache + CP_WRITE_CSSELR_REGISTER(CP_CSSELR_DATA_CACHE_LEVEL2); + + // Retrieve data cache information + val = CP_READ_CCSIDR_REGISTER(); + + // L2 Data Cache Size (from KB to KB) + field = GET_BITMASK(CCSIDR_CACHE_NUM_SETS, val); + + pSys->cpuInfo.l2DataCacheSize = DecodeCortexA15CacheSize(field); +} + +/* + * ID the CPU. + */ +void RmInitCpuInfo(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (pSys->cpuInfo.bInitialized) + { + return; + } + + // Init structure to default + portMemSet(&pSys->cpuInfo, 0, sizeof(pSys->cpuInfo)); + + // ARM has the equivalent of a fence instruction (DSB) + + // Leave this here for MODS + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_UNKNOWN; + pSys->cpuInfo.caps = (NV0000_CTRL_SYSTEM_CPU_CAP_SFENCE | + NV0000_CTRL_SYSTEM_CPU_CAP_WRITE_COMBINING); + + // Calculate the frequency + pSys->cpuInfo.clock = osGetCpuFrequency(); + + // Number of core is available from SCU configuration. + pSys->cpuInfo.numPhysicalCpus = osGetCpuCount(); + pSys->cpuInfo.maxLogicalCpus = pSys->cpuInfo.numPhysicalCpus; + + // There is no hyper-threading on ARM + pSys->cpuInfo.numLogicalCpus = pSys->cpuInfo.numPhysicalCpus; + + // Zero out the vendor-specific family, model & stepping + pSys->cpuInfo.family = 0; + pSys->cpuInfo.model = 0; + pSys->cpuInfo.stepping = 0; + + NvU32 val; + NvU32 field; + + // Retrieve Main ID register + val = CP_READ_MIDR_REGISTER(); + + field = GET_BITMASK(MIDR_PRIMARY_PART_NUM, val); + + switch(field) + { + case CP_MIDR_PRIMARY_PART_NUM_A9: + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_A9; + DecodeCortexA9Cache(pSys); + break; + case CP_MIDR_PRIMARY_PART_NUM_A15: + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_A15; + DecodeCortexA15Cache(pSys); + break; + default: + // Narrow down to an unknown arm cpu + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_ARM_UNKNOWN; + NV_PRINTF(LEVEL_ERROR, "CPUID: unknown part number 0x%x.\n", + field); + break; + } + + // Host native page size +#ifdef PAGE_SIZE + pSys->cpuInfo.hostPageSize = PAGE_SIZE; +#else + pSys->cpuInfo.hostPageSize = 4096; +#endif + + pSys->cpuInfo.bInitialized = NV_TRUE; +} + +#endif // NVCPU_IS_ARM + + +/***************************************************************************/ + + +#if NVCPU_IS_PPC64LE + +/* + * ID the CPU. + */ +void RmInitCpuInfo(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_POWERN; + pSys->cpuInfo.caps = 0; + + // Zero out the vendor-specific family, model & stepping + pSys->cpuInfo.family = 0; + pSys->cpuInfo.model = 0; + pSys->cpuInfo.stepping = 0; + + // Calculate the frequency + pSys->cpuInfo.clock = osGetCpuFrequency(); + + // Number of CPUs. + // Should maybe take into account SMT, etc. + pSys->cpuInfo.numPhysicalCpus = osGetCpuCount(); + pSys->cpuInfo.numLogicalCpus = pSys->cpuInfo.numPhysicalCpus; + pSys->cpuInfo.maxLogicalCpus = pSys->cpuInfo.numPhysicalCpus; + + // host native page size + pSys->cpuInfo.hostPageSize = 64 * 1024; + + return; +} + +#endif // NVCPU_IS_PPC64LE + + +/***************************************************************************/ + + +#if NVCPU_IS_RISCV64 + +/* + * ID the CPU. (stub) + */ +void RmInitCpuInfo( + void +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // XXX + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_UNKNOWN; + + // Zero out the vendor-specific family, model & stepping + pSys->cpuInfo.family = 0; + pSys->cpuInfo.model = 0; + pSys->cpuInfo.stepping = 0; + + // Calculate the frequency + pSys->cpuInfo.clock = 1; + + // host native page size + pSys->cpuInfo.hostPageSize = 4096; +} + +#endif // NVCPU_IS_RISCV64 + + +/***************************************************************************/ + + +#if NVCPU_IS_X86 || NVCPU_IS_X86_64 + +#include "platform/cpu.h" + +#if defined(_M_IX86) || defined(NVCPU_X86) || defined(AMD64) || defined(NVCPU_X86_64) + +// bits returned in EDX register by CPUID instruction with EAX=1 +#define CPU_STD_TSC NVBIT(4) +#define CPU_STD_CMOV NVBIT(15) // Supports conditional move instructions. +#define CPU_STD_CLFSH NVBIT(19) // Supports CLFLUSH instruction. +#define CPU_STD_MMX NVBIT(23) +#define CPU_STD_FXSR NVBIT(24) // Indicates CR4.OSFXSR is available. +#define CPU_STD_SSE NVBIT(25) // Katmai +#define CPU_STD_SSE2 NVBIT(26) // Willamette NI + +// bits returned in ECX register by CPUID instruction with EAX=1 +#define CPU_STD2_SSE3 NVBIT(0) +#define CPU_STD2_SSE41 NVBIT(19) +#define CPU_STD2_SSE42 NVBIT(20) +#define CPU_STD2_OSXSAVE NVBIT(27) // Indicates the OS supports XSAVE/XRESTOR +#define CPU_STD2_AVX NVBIT(28) + +// "Extended Feature Flags" - bits returned in EDX register by CPUID +// instruction with EAX=0x80000001 +#define CPU_EXT_3DNOW NVBIT(31) // 3DNow +#define CPU_EXT_AMD_3DNOW_EXT NVBIT(30) // 3DNow, with Extensions (AMD specific) +#define CPU_EXT_AMD_MMX_EXT NVBIT(22) // MMX, with Extensions (AMD specific) + +// "Structured Extended Feature Identifiers" - bits returned in EBX +// register by CPUID instruction with EAX=7 +#define CPU_EXT2_ERMS NVBIT(9) + +/* + * Identify chip foundry. + * IS_INTEL = "GenuineIntel" + * IS_AMD = "AuthenticAMD" + * IS_WINCHIP = "CentaurHauls" + * IS_CYRIX = "CyrixInstead" + * IS_TRANSM = "GenuineTMx86" // Transmeta + */ +#define IS_INTEL(fndry) (((fndry).StrID[0]==0x756E6547)&&((fndry).StrID[1]==0x49656E69)&&((fndry).StrID[2]==0x6C65746E)) +#define IS_AMD(fndry) (((fndry).StrID[0]==0x68747541)&&((fndry).StrID[1]==0x69746E65)&&((fndry).StrID[2]==0x444D4163)) +#define IS_WINCHIP(fndry) (((fndry).StrID[0]==0x746E6543)&&((fndry).StrID[1]==0x48727561)&&((fndry).StrID[2]==0x736C7561)) +#define IS_CYRIX(fndry) (((fndry).StrID[0]==0x69727943)&&((fndry).StrID[1]==0x736E4978)&&((fndry).StrID[2]==0x64616574)) +#define IS_TRANSM(fndry) (((fndry).StrID[0]==0x756E6547)&&((fndry).StrID[1]==0x54656E69)&&((fndry).StrID[2]==0x3638784D)) + +// CPUID Info +// Used internally in this source. + +typedef struct _def_CPUID_info +{ + union + { + NvU8 String[12]; + NvU32 StrID[3]; + } Foundry; + + NvU32 StandardFeatures; + NvU32 ExtendedFeatures; + + NvU16 Family; + NvU16 ExtFamily; + NvU16 DisplayedFamily; + NvU8 Model; + NvU8 ExtModel; + NvU8 DisplayedModel; + NvU8 Stepping; + NvU32 BrandId; +} CPUIDINFO, *PCPUIDINFO; + +// Forward refernces. +// + +static void getCpuCounts(OBJSYS *pSys, PCPUIDINFO pCpuidInfo); +static NvBool getEmbeddedProcessorName(char *pName, NvU32 size); +static void cpuidInfoAMD(OBJSYS *pSys, PCPUIDINFO pCpuidInfo); +static void cpuidInfoIntel(OBJSYS *pSys, PCPUIDINFO pCpuidInfo); + +#if defined(_M_IX86) || defined(NVCPU_X86) +static void cpuidInfoWinChip(OBJSYS *pSys, PCPUIDINFO pCpuidInfo); +static void cpuidInfoCyrix(OBJSYS *pSys, PCPUIDINFO pCpuidInfo); +static void cpuidInfoTransmeta(OBJSYS *pSys, PCPUIDINFO pCpuidInfo); +#endif + + +/* + * ID the CPU. + */ + +void RmInitCpuInfo(void) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + CPUIDINFO cpuinfo; + NvU32 eax, ebx, ecx, edx; + OBJOS *pOS = SYS_GET_OS(pSys); + + // Do this only once. + if (pSys->cpuInfo.bInitialized) + return; + + // Initialize the processor structure to default values. + // + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_UNKNOWN; + pSys->cpuInfo.caps = 0; + pSys->cpuInfo.clock = 0; + pSys->cpuInfo.dataCacheLineSize = 0; + pSys->cpuInfo.l1DataCacheSize = 0; + pSys->cpuInfo.l2DataCacheSize = 0; + pSys->cpuInfo.coresOnDie = 0; + pSys->cpuInfo.platformID = 0; + portMemSet(pSys->cpuInfo.name, 0, sizeof(pSys->cpuInfo.name)); + + // Init internal structure to default. + // + portMemSet(&cpuinfo, 0, sizeof(cpuinfo)); + + // Get CPUID stuff for all processors. We will figure out what to do with it later. + + // if pOS->osNv_cpuid returns 0, then this cpu does not support cpuid instruction + // We just worry about this on the first call... + if ( ! pOS->osNv_cpuid(pOS, 0, 0, &eax, &cpuinfo.Foundry.StrID[0], + &cpuinfo.Foundry.StrID[2], &cpuinfo.Foundry.StrID[1])) + goto Exit; + + pOS->osNv_cpuid(pOS, 1, 0, &eax, &ebx, &ecx, &edx); + cpuinfo.Family = (NvU16)((eax >> 8) & 0x0F); + cpuinfo.ExtFamily = (NvU16)((eax >> 20) & 0xFF); + if (cpuinfo.Family != 0xF) + { + cpuinfo.DisplayedFamily = cpuinfo.Family; + } + else + { + cpuinfo.DisplayedFamily = cpuinfo.ExtFamily + cpuinfo.Family; + } + + cpuinfo.Model = (NvU8)((eax >> 4) & 0x0F); + cpuinfo.ExtModel = (NvU8)((eax >> 16) & 0x0F); + if (cpuinfo.Family == 6 || cpuinfo.Family == 0xF) + { + cpuinfo.DisplayedModel = (cpuinfo.ExtModel << 4) + cpuinfo.Model; + } + else + { + cpuinfo.DisplayedModel = cpuinfo.Model; + } + + cpuinfo.Stepping = (NvU8)(eax & 0x0F); + cpuinfo.StandardFeatures = edx; + cpuinfo.BrandId = ((ebx & 0xE0) << 3) | (ebx & 0x1F); // 8bit brandID in 12 bit format + + // Decode the standard features. Assume that all CPU vendors use the + // standard feature bits to mean the same thing. Non-Intel vendors use + // the extended CPUID to provide non-standard freture bits, so this + // should be OK. + + if (cpuinfo.StandardFeatures & CPU_STD_MMX) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_MMX; + + if (cpuinfo.StandardFeatures & CPU_STD_CMOV) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_CMOV; + + if (cpuinfo.StandardFeatures & CPU_STD_CLFSH) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_CLFLUSH; + + // Check for Streaming SIMD extensions (Katmai) + if (cpuinfo.StandardFeatures & CPU_STD_SSE) + { + + // SFENCE is an SSE instruction, but it does not require CR4.OSFXSR. + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_SFENCE; + + if (cpuinfo.StandardFeatures & CPU_STD_FXSR) + { + NvBool check_osfxsr; + NvBool check_osxsave; + // Before setting the NV0000_CTRL_SYSTEM_CPU_CAP_SSE bit, we'll + // also check that CR4.OSFXSR (bit 9) is set, which means the OS + // is prepared to switch the additional SSE FP state for us. + // CPU_STD_FXSR indicates that CR4.OSFXSR is valid. + check_osfxsr = ((cpuinfo.StandardFeatures & CPU_STD_FXSR) != 0) && + ((pOS->osNv_rdcr4(pOS) & 0x200) != 0); + + // For NV0000_CTRL_SYSTEM_CPU_CAP_AVX bit, we need: + // - CPU_STD2_OSXSAVE - CR4.OSXSAVE is valid + // - CR4.OSXSAVE (bit 18) - The OS will the additional FP state + // specified by XCR0 + // - XCR0 - bits 1 and 2 indicate SSE and AVX support respectively + check_osxsave = ((ecx & CPU_STD2_OSXSAVE) != 0) && + ((pOS->osNv_rdcr4(pOS) & (1<<18)) != 0) && + ((pOS->osNv_rdxcr0(pOS) & 0x6) != 0); + if(check_osfxsr) + { + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_SSE; + + // supports SSE2 (Willamette NI) instructions + if (cpuinfo.StandardFeatures & CPU_STD_SSE2) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_SSE2; + + // Prescott New Instructions + if (ecx & CPU_STD2_SSE3) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_SSE3; + + // Penryn subset of SSE4 + if (ecx & CPU_STD2_SSE41) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_SSE41; + + // Nehalem subset of SSE4 + if (ecx & CPU_STD2_SSE42) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_SSE42; + } + + // If the OS setup XSAVE / XRESTOR (and set the AVX bit) + // enable AVX + if (check_osxsave) + { + if (ecx & CPU_STD2_AVX) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_AVX; + } + } + } + + if (pOS->osNv_cpuid(pOS, 7, 0, &eax, &ebx, &ecx, &edx)) + { + if (ebx & CPU_EXT2_ERMS) + { + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_ERMS; + } + } + + // Calculate the frequency + if (cpuinfo.StandardFeatures & CPU_STD_TSC) + pSys->cpuInfo.clock = osGetCpuFrequency(); + + // Get the extended features (if they exist). + if (pOS->osNv_cpuid(pOS, 0x80000000, 0, &eax, &ebx, &ecx, &edx) && eax >= 0x80000001) + { + if (pOS->osNv_cpuid(pOS, 0x80000001, 0, &eax, &ebx, &ecx, &edx)) + { + cpuinfo.ExtendedFeatures = edx; + // if 8 bit brandId is 0 + if (!cpuinfo.BrandId) + { + // Check for 12 bit brand ID + cpuinfo.BrandId = (ebx & 0xfff); + } + } + } + + // Get the embedded processor name (if there is one). + getEmbeddedProcessorName(pSys->cpuInfo.name, sizeof(pSys->cpuInfo.name)); + + if (IS_INTEL(cpuinfo.Foundry)) + cpuidInfoIntel(pSys, &cpuinfo); + else if (IS_AMD(cpuinfo.Foundry)) + cpuidInfoAMD(pSys, &cpuinfo); +#if defined(_M_IX86) || defined(NVCPU_X86) + else if (IS_WINCHIP(cpuinfo.Foundry)) + cpuidInfoWinChip(pSys, &cpuinfo); + else if (IS_CYRIX(cpuinfo.Foundry)) + cpuidInfoCyrix(pSys, &cpuinfo); + else if (IS_TRANSM(cpuinfo.Foundry)) + cpuidInfoTransmeta(pSys, &cpuinfo); +#endif + else + { + // We are clueless. If the processor had an embedded name, its already in there. + // If not, use the foundary name as the processor name. + if (pSys->cpuInfo.name[0] == 0) + portMemCopy(pSys->cpuInfo.name, sizeof(cpuinfo.Foundry.String), cpuinfo.Foundry.String, sizeof(cpuinfo.Foundry.String)); + } + + // Pick up the vendor-specific family & model + pSys->cpuInfo.family = cpuinfo.DisplayedFamily; + pSys->cpuInfo.model = cpuinfo.DisplayedModel; + +#if defined(AMD64) || defined(NVCPU_X86_64) + // The WinXP AMD-64 does not context switch the x87/MMX/3DNow registers. We have to zap the bits + // even though the CPU supports them. + // The OS should somehow tell us this, like CR4.OSFXSR above. Need to find a better way... + + pSys->cpuInfo.caps &= ~(NV0000_CTRL_SYSTEM_CPU_CAP_MMX | + NV0000_CTRL_SYSTEM_CPU_CAP_MMX_EXT | + NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW | + NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW_EXT); +#endif + + pSys->cpuInfo.stepping = cpuinfo.Stepping; + pSys->cpuInfo.brandId = cpuinfo.BrandId; + + Exit: + + // set physical/logical processor counts + getCpuCounts(pSys, &cpuinfo); + + // host page size used when allocated host-page-aligned objects in heap +#ifdef PAGE_SIZE + pSys->cpuInfo.hostPageSize = PAGE_SIZE; +#else + pSys->cpuInfo.hostPageSize = 4096; +#endif + + pSys->cpuInfo.bInitialized = NV_TRUE; +} + +// +// This routine determines the number of physical processors enabled +// on the system as well as the number of logical processors per +// physical processors. Intel's HyperThreading technology can yield +// a logical processor count of > 1 per physical processor. +// +// This code was more or less lifted from some Intel sample code. +// + +#define INTEL_HT_BIT 0x10000000 // EDX[28] +#define INTEL_CORE_CNT 0xFC000000 // EAX[31:26] +#define INTEL_LOGICAL_CNT 0x00FF0000 // EBX[23:16] +#define INTEL_LOGICAL_CNT_LEAFB 0x0000FFFF // EBX[15:0] +#define AMD_HT_BIT 0x10000000 // EDX[28] +#define AMD_LOGICAL_CNT 0x00FF0000 // EBX[23:16] + +static void +getCpuCounts(OBJSYS *pSys, PCPUIDINFO pCpuidInfo) +{ + OBJOS *pOS = SYS_GET_OS(pSys); + NvU32 numPhysicalCpus, numLogicalCpus, maxLogicalCpus; + NvU32 eax = 0; + NvU32 ebx = 0; + NvU32 ecx = 0; + NvU32 edx = 0; + + // + // First use OS call to get number of logical CPUs. + // + numLogicalCpus = osGetCpuCount(); + + // + // Assume the number of physical CPUs is the same as the number of logical CPUs. + // + numPhysicalCpus = numLogicalCpus; + maxLogicalCpus = numLogicalCpus; + + // There is no reliable way to tell if hyper-threading is enabled. So, if + // there is more than 1 logical CPUs AND the CPU is hyperthreading capable, + // then assume that HT is enabled. + // + // This should give the right answer for most cases. Some HT capable dual + // CPU systems with HT disabled will be detected as single GPU systems with + // HT enabled. While less than ideal, this should be OK, since logical CPUs + // is 2 in both cases. + // +#if defined(_M_IX86) || defined(NVCPU_X86) || defined(NVCPU_X86_64) + if (IS_INTEL(pCpuidInfo->Foundry)) + { + NvBool cpuHasLeafB = NV_FALSE; + + pOS->osNv_cpuid(pOS, 0, 0, &eax, &ebx, &ecx, &edx); + if (eax >= 0xB) + { + pOS->osNv_cpuid(pOS, 0xB, 0, &eax, &ebx, &ecx, &edx); + if (ebx != 0) + { + cpuHasLeafB = NV_TRUE; + } + } + + pOS->osNv_cpuid(pOS, 1, 0, &eax, &ebx, &ecx, &edx); + + if (edx & INTEL_HT_BIT) + { + NvU32 CpuHT; + + if (cpuHasLeafB) + { + pOS->osNv_cpuid(pOS, 0xB, 0, &eax, &ebx, &ecx, &edx); + CpuHT = (ebx & INTEL_LOGICAL_CNT_LEAFB); + pOS->osNv_cpuid(pOS, 0xB, 1, &eax, &ebx, &ecx, &edx); + maxLogicalCpus = (ebx & INTEL_LOGICAL_CNT_LEAFB); + numPhysicalCpus = maxLogicalCpus/CpuHT; + } + else + { + pOS->osNv_cpuid(pOS, 0, 0, &eax, &ebx, &ecx, &edx); + if (eax >=4) + { + pOS->osNv_cpuid(pOS, 4, 0, &eax, &ebx, &ecx, &edx); + numPhysicalCpus = ((eax & INTEL_CORE_CNT) >> 26) + 1; + pOS->osNv_cpuid(pOS, 1, 0, &eax, &ebx, &ecx, &edx); + maxLogicalCpus = (ebx & INTEL_LOGICAL_CNT) >> 16; + CpuHT = maxLogicalCpus/numPhysicalCpus; + } + } + + if (numPhysicalCpus > numLogicalCpus) + numPhysicalCpus = numLogicalCpus; + + if (numPhysicalCpus < 1) + numPhysicalCpus = 1; + + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_HT_CAPABLE; + } + } + else if(IS_AMD(pCpuidInfo->Foundry)) + { + pOS->osNv_cpuid(pOS, 1, 0, &eax, &ebx, &ecx, &edx); + if( edx & AMD_HT_BIT ) + { + maxLogicalCpus = (ebx & AMD_LOGICAL_CNT) >> 16; + } + } + + NV_PRINTF(LEVEL_INFO, "RmInitCpuCounts: physical 0x%x logical 0x%x\n", + numPhysicalCpus, numLogicalCpus); +#endif + + if(maxLogicalCpus < numLogicalCpus) + maxLogicalCpus = numLogicalCpus; + +#if NVCPU_IS_FAMILY_X86 + // bug1974464: Ryzen physical CPU count is getting misreported + if (IS_AMD(pCpuidInfo->Foundry) && (pCpuidInfo->DisplayedFamily == 0x17)) + { + numPhysicalCpus = NV_MAX(maxLogicalCpus/2, 1); + } +#endif + + pSys->cpuInfo.numPhysicalCpus = numPhysicalCpus; + pSys->cpuInfo.numLogicalCpus = numLogicalCpus; + pSys->cpuInfo.maxLogicalCpus = maxLogicalCpus; + + return; +} + + +// getEmbeddedProcessorName +// +// All processors that have extended CPUID info up through 0x80000004 have an embedded name. +// +static NvBool getEmbeddedProcessorName(char *pName, NvU32 size) +{ + NvU32 op, eax, ebx, ecx, edx; + char *p = pName; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + const NvU32 maxSize = 48; // max 48 bytes on x86 CPUs + + NV_ASSERT_OR_RETURN(size >= maxSize, 0); + + pName[size > maxSize ? maxSize : size-1] = 0; // Make sure it has a zero at the end. + + // Is there is a enough data? If not bail. + if (pOS->osNv_cpuid(pOS, 0x80000000, 0, &eax, &ebx, &ecx, &edx) == 0 || eax < 0x80000004) + return NV_FALSE; + + // Yes, get 48 bytes of CPU name. + for (op = 0x80000002; op < 0x80000005; op++, p += 16) + pOS->osNv_cpuid(pOS, op, 0, (NvU32 *)&p[0], (NvU32 *)&p[4], (NvU32 *)&p[8], (NvU32 *)&p[12]); + + // Kill leading spaces. (Intel's string is right justified.) + if (*pName == ' ') + { + p = pName; + while (*p == ' ') + p++; + do + *(pName++) = *(p++); + while (*p); + } + + return NV_TRUE; +} + + +// Decode Prescott style cache descriptors. +// +static NvBool DecodePrescottCache(OBJSYS *pSys) +{ + NvU32 eax, ebx, ecx, edx; + OBJOS *pOS = SYS_GET_OS(pSys); + + // Decode the cache desciptors. + + if (pOS->osNv_cpuid(pOS, 0, 0, &eax, &ebx, &ecx, &edx)) + { + if (eax >= 4 && eax < 0x80000000) // CPU support new (Prescott) cache descrtiptors? + { + // From Prescot New Instructions Software Developers Guide 252490-003 + + NvU32 uLevel; + NvU32 uLineSize; + NvU32 uCacheSize; + int i; + + // Loop over the cache descriptors by incrementing sub-function. This will never get + // get run on pre-Prescott CPUs since they do not support CPUID 4, but limit number of + // cache descriptors to 20 just in case, so it does not get in an infinite loop. + // + for (i = 0; i < 20; i++) + { + pOS->osNv_cpuid(pOS, 4, i, &eax, &ebx, &ecx, &edx); + + if (i == 0) + { + pSys->cpuInfo.coresOnDie = (eax >> 26) + 1;// eax[31:26] Processor cores on the chip + } + + switch (eax & 0x1f) // Cache type. + { + case 0: // No more cache descriptors. + i = 100; // Break out of loop. + break; + + case 1: // Data cache. + case 3: // Unified cache. + uLevel = (eax >> 5) & 0x7; // eax[7:5] Cache level + uLineSize = (ebx & 0xfff) + 1; // ebx[11:0] System Coherency Line Size + + uCacheSize = uLineSize // ebx[11:0] System Coherency Line Size + * (((ebx >> 12) & 0x3FF) + 1) // ebx[21:12] Physical line partitions + * (((ebx >> 22) & 0x3FF) + 1) // ebx[21:12] Ways of associativity + * (ecx + 1) // ecx[31:0] Number of sets + / 1024; // Put it in KB. + + pSys->cpuInfo.dataCacheLineSize = uLineSize; + + if (uLevel == 1) + pSys->cpuInfo.l1DataCacheSize = uCacheSize; + else if (pSys->cpuInfo.l2DataCacheSize < uCacheSize) + pSys->cpuInfo.l2DataCacheSize = uCacheSize; + break; + + default: // Instruction of unknown cache type. + break; // Do nothing. + } + } + + return NV_TRUE; + } + } + + return NV_FALSE; +} + +#if defined(_M_IX86) || defined(NVCPU_X86) +static void DecodeIntelCacheEntry(OBJSYS *pSys, NvU8 cacheEntry) +{ + // From Intel's AP-485 (11/03). + // + // 00h Null + // 01h Instruction TLB: 4K-byte Pages, 4-way set associative, 32 entries + // 02h Instruction TLB: 4M-byte Pages, fully associative, 2 entries + // 03h Data TLB: 4K-byte Pages, 4-way set associative, 64 entries + // 04h Data TLB: 4M-byte Pages, 4-way set associative, 8 entries + // 06h 1st-level instruction cache: 8K-bytes, 4-way set associative, 32 byte line size + // 08h 1st-level instruction cache: 16K-bytes, 4-way set associative, 32 byte line size + // 0Ah 1st-level data cache: 8K-bytes, 2-way set associative, 32 byte line size + // 0Ch 1st-level data cache: 16K-bytes, 4-way set associative, 32 byte line size + // 22h 3rd-level cache: 512K-bytes, 4-way set associative, sectored cache, 64-byte line size + // 23h 3rd-level cache: 1M-bytes, 8-way set associative, sectored cache, 64-byte line size + // 25h 3rd-level cache: 2MB, 8-way set associative, sectored cache, 64-byte line size + // 29h 3rd-level cache: 4MB, 8-way set associative, sectored cache, 64-byte line size + // 2Ch 1st-level data cache: 32K-bytes, 8-way set associative, 64-byte line size + // 30h 1st-level instruction cache: 32K-bytes, 8-way set associative, 64-byte line size + // 39h 2nd-level cache: 128K-bytes, 4-way set associative, sectored cache, 64-byte line size + // 3Bh 2nd-level cache: 128KB, 2-way set associative, sectored cache, 64-byte line size + // 3Ch 2nd-level cache: 256K-bytes, 4-way set associative, sectored cache, 64-byte line size + // 40h No 2nd-level cache or, if processor contains a valid 2nd-level cache, no3rd-level cache + // 41h 2nd-level cache: 128K-bytes, 4-way set associative, 32 byte line size + // 42h 2nd-level cache: 256K-bytes, 4-way set associative, 32 byte line size + // 43h 2nd-level cache: 512K-bytes, 4-way set associative, 32 byte line size + // 44h 2nd-level cache: 1M-bytes, 4-way set associative, 32 byte line size + // 45h 2nd-level cache: 2M-bytes, 4-way set associative, 32 byte line size + // 50h Instruction TLB: 4K, 2M or 4M pages, fully associative, 64 entries + // 51h Instruction TLB: 4K, 2M or 4M pages, fully associative, 128 entries + // 52h Instruction TLB: 4K, 2M or 4M pages, fully associative, 256 entries + // 5Bh Data TLB: 4K or 4M pages, fully associative, 64 entries + // 5Ch Data TLB: 4K or 4M pages, fully associative, 128 entries + // 5Dh Data TLB: 4K or 4M pages, fully associative, 256 entries + // 66h 1st-level data cache: 8K-bytes, 4-way set associative, sectored cache, 64-byte line size + // 67h 1st-level data cache: 16K-bytes, 4-way set associative, sectored cache, 64-byte line size + // 68h 1st-level data cache: 32K-bytes, 4 way set associative, sectored cache, 64-byte line size + // 70h Trace cache: 12K-uops, 8-way set associative + // 71h Trace cache: 16K-uops, 8-way set associative + // 72h Trace cache: 32K-uops, 8-way set associative + // 79h 2nd-level cache: 128K-bytes, 8-way set associative, sectored cache, 64-byte line size + // 7Ah 2nd-level cache: 256K-bytes, 8-way set associative, sectored cache, 64-byte line size + // 7Bh 2nd-level cache: 512K-bytes, 8-way set associative, sectored cache, 64-byte line size + // 7Ch 2nd-level cache: 1M-bytes, 8-way set associative, sectored cache, 64-byte line size + // 82h 2nd-level cache: 256K-bytes, 8-way set associative, 32 byte line size + // 83h 2nd-level cache: 512K-bytes, 8-way set associative, 32 byte line size + // 84h 2nd-level cache: 1M-bytes, 8-way set associative, 32 byte line size + // 85h 2nd-level cache: 2M-bytes, 8-way set associative, 32 byte line size + // 86h 2nd-level cache: 512K-bytes, 4-way set associative, 64 byte line size + // 87h 2nd-level cache: 1M-bytes, 8-way set associative, 64 byte line size + // B0h Instruction TLB: 4K-byte Pages, 4-way set associative, 128 entries + // B3h Data TLB: 4K-byte Pages, 4-way set associative, 128 entries + // + // From Intel via Michael Diamond (under NDA): + // Fixes bug 75982 - Reporting incorrect cache info on Banias mobile platform. + // + // 7D 2M; 8 way; 64 byte line size; unified on-die + // 78 1M; 8 way; 64 byte line size, unified on-die + // + // Note: Newer GPUs have added an additional cache level. What used to be L2 is + // now L3. Set the L2 cache to the largest L2 or L3 descriptor found. + + switch (cacheEntry) + { + case 0x0A: // 1st-level data cache: 8K-bytes, 2-way set associative, 32 byte line size + pSys->cpuInfo.l1DataCacheSize = 8; + pSys->cpuInfo.dataCacheLineSize = 32; + break; + + case 0x0C: // 1st-level data cache: 16K-bytes, 4-way set associative, 32 byte line size + pSys->cpuInfo.l1DataCacheSize = 16; + pSys->cpuInfo.dataCacheLineSize = 32; + break; + + case 0x66: // 1st-level data cache: 8K-bytes, 4-way set associative, sectored cache, 64-byte line size + pSys->cpuInfo.l1DataCacheSize = 8; + pSys->cpuInfo.dataCacheLineSize = 64; + break; + + case 0x67: // 1st-level data cache: 16K-bytes, 4-way set associative, sectored cache, 64-byte line size + pSys->cpuInfo.l1DataCacheSize = 16; + pSys->cpuInfo.dataCacheLineSize = 64; + break; + + case 0x2C: // 1st-level data cache: 32K-bytes, 8-way set associative, 64-byte line size + case 0x68: // 1st-level data cache: 32K-bytes, 4 way set associative, sectored cache, 64-byte line size + pSys->cpuInfo.l1DataCacheSize = 32; + pSys->cpuInfo.dataCacheLineSize = 64; + break; + + case 0x41: // 2nd-level cache: 128K-bytes, 4-way set associative, 32 byte line size + pSys->cpuInfo.dataCacheLineSize = 32; + if (pSys->cpuInfo.l2DataCacheSize < 128) + pSys->cpuInfo.l2DataCacheSize = 128; + break; + + case 0x39: // 2nd-level cache: 128K-bytes, 4-way set associative, sectored cache, 64-byte line size + case 0x3B: // 2nd-level cache: 128KB, 2-way set associative, sectored cache, 64-byte line size + case 0x79: // 2nd-level cache: 128K-bytes, 8-way set associative, sectored cache, 64-byte line size + pSys->cpuInfo.dataCacheLineSize = 64; + if (pSys->cpuInfo.l2DataCacheSize < 128) + pSys->cpuInfo.l2DataCacheSize = 128; + break; + + case 0x42: // 2nd-level cache: 256K-bytes, 4-way set associative, 32 byte line size + case 0x82: // 2nd-level cache: 256K-bytes, 8-way set associative, 32 byte line size + pSys->cpuInfo.dataCacheLineSize = 32; + if (pSys->cpuInfo.l2DataCacheSize < 256) + pSys->cpuInfo.l2DataCacheSize = 256; + break; + + case 0x3C: // 2nd-level cache: 256K-bytes, 4-way set associative, sectored cache, 64-byte line size + case 0x7A: // 2nd-level cache: 256K-bytes, 8-way set associative, sectored cache, 64-byte line size + pSys->cpuInfo.dataCacheLineSize = 64; + if (pSys->cpuInfo.l2DataCacheSize < 256) + pSys->cpuInfo.l2DataCacheSize = 256; + break; + + case 0x43: // 2nd-level cache: 512K-bytes, 4-way set associative, 32 byte line size + case 0x83: // 2nd-level cache: 512K-bytes, 8-way set associative, 32 byte line size + pSys->cpuInfo.dataCacheLineSize = 32; + if (pSys->cpuInfo.l2DataCacheSize < 512) + pSys->cpuInfo.l2DataCacheSize = 512; + break; + + case 0x22: // 3rd-level cache: 512K-bytes, 4-way set associative, sectored cache, 64-byte line size + case 0x7B: // 2nd-level cache: 512K-bytes, 8-way set associative, sectored cache, 64-byte line size + case 0x86: // 2nd-level cache: 512K-bytes, 4-way set associative, 64 byte line size + pSys->cpuInfo.dataCacheLineSize = 64; + if (pSys->cpuInfo.l2DataCacheSize < 512) + pSys->cpuInfo.l2DataCacheSize = 512; + break; + + case 0x44: // 2nd-level cache: 1M-bytes, 4-way set associative, 32 byte line size + case 0x84: // 2nd-level cache: 1M-bytes, 8-way set associative, 32 byte line size + pSys->cpuInfo.dataCacheLineSize = 32; + if (pSys->cpuInfo.l2DataCacheSize < 1024) + pSys->cpuInfo.l2DataCacheSize = 1024; + break; + + case 0x23: // 3rd-level cache: 1M-bytes, 8-way set associative, sectored cache, 64-byte line size + case 0x78: // 1M; 8 way; 64 byte line size, unified on-die + case 0x7C: // 2nd-level cache: 1M-bytes, 8-way set associative, sectored cache, 64-byte line size + case 0x87: // 2nd-level cache: 1M-bytes, 8-way set associative, 64 byte line size + pSys->cpuInfo.dataCacheLineSize = 64; + if (pSys->cpuInfo.l2DataCacheSize < 1024) + pSys->cpuInfo.l2DataCacheSize = 1024; + break; + + case 0x45: // 2nd-level cache: 2M-bytes, 4-way set associative, 32 byte line size + case 0x85: // 2nd-level cache: 2M-bytes, 8-way set associative, 32 byte line size + pSys->cpuInfo.dataCacheLineSize = 32; + if (pSys->cpuInfo.l2DataCacheSize < 2048) + pSys->cpuInfo.l2DataCacheSize = 2048; + break; + + case 0x25: // 3rd-level cache: 2MB, 8-way set associative, sectored cache, 64-byte line size + case 0x7D: // 2M; 8 way; 64 byte line size; unified on-die + pSys->cpuInfo.dataCacheLineSize = 64; + if (pSys->cpuInfo.l2DataCacheSize < 2048) + pSys->cpuInfo.l2DataCacheSize = 2048; + break; + + case 0x29: // 3rd-level cache: 4MB, 8-way set associative, sectored cache, 64-byte line size + pSys->cpuInfo.dataCacheLineSize = 64; + if (pSys->cpuInfo.l2DataCacheSize < 4096) + pSys->cpuInfo.l2DataCacheSize = 4096; + break; + } +} + +static void DecodeIntelCacheRegister(OBJSYS *pSys, NvU32 cacheRegister /* punny, huh? */) +{ + if ((cacheRegister & NVBIT(31)) == 0) // If bit 31 is set, it is reserved. + { + DecodeIntelCacheEntry(pSys, (NvU8)(cacheRegister >> 24)); + DecodeIntelCacheEntry(pSys, (NvU8)(cacheRegister >> 16)); + DecodeIntelCacheEntry(pSys, (NvU8)(cacheRegister >> 8)); + DecodeIntelCacheEntry(pSys, (NvU8)cacheRegister); + } +} +#endif + +static void cpuidInfoIntel(OBJSYS *pSys, PCPUIDINFO pCpuidInfo) +{ + NvU32 eax, ebx, ecx, edx; + OBJOS *pOS = SYS_GET_OS(pSys); + + if (pCpuidInfo->Family == 5) + { + if (pCpuidInfo->Model == 4) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_P55; + else + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_P5; + } + else if (pCpuidInfo->Family == 6) + { + switch (pCpuidInfo->DisplayedModel) + { + case 1: // Pentium Pro + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_P6; + break; + + case 3: // Pentium II + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_P2; + break; + + case 5: // Pentium II, Pentium II Xeon, or Celeron + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_P2XC; + break; + + case 6: // Pentium II Celeron-A + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_CELA; + break; + + case 7: // Pentium III or Pentium III Xeon (Katmai) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_P3; + break; + + case 15: // Conroe, Core2 Duo + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_CORE2; + break; + + case 22: // Celeron model 16h (65nm) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_CELN_M16H; + break; + + case 23: // Intel Core2 Extreme/Intel Xeon model 17h (45nm) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_CORE2_EXTRM; + break; + + case 28: + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_ATOM; + break; + + case 8: // Pentium III, Pentium III Xeon, or Celeron (Coppermine, 0.18 micron) + case 10: // Pentium III Xeon (Tualatin, 0.13 micron) + case 11: // Pentium III, or Celeron (Tualatin, 0.13 micron) + default: // If it is a new family 6, it is a Pentium III of some type. + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_P3_INTL2; + break; + } + // Flag processors that may be affected by bug 124888. At this time, + // we believe these are Pentium III and Pentium M processors. The + // model numbers for these processors in Family 6 are: + // 7 - Pentium III or Pentium III Xeon + // 8 - Pentium III, Pentium III Xeon, or Celeron + // 9 - Pentium M + // 10 - Pentium III Xeon + // 11 - Pentium III + // 12 - ??? + // 13 - Pentium M ("Dothan") + // 14 - ??? + // 15 - Core 2 (bug 272047) + if (pCpuidInfo->Model >= 7) + { + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WAR_124888; + } + } + else if (pCpuidInfo->Family == 0x0F) + { + // Model 0 & 1 == Pentium 4 or Pentium 4 Xeon (Willamette, 423 or 478-pin packages, 0.18 micron) + // Model 2 == Pentium 4 or Pentium 4 Xeon (Northwood, 478-pin package for brookdale, 0.13 micron) + // + // Be careful if you change this. Both D3D and OpenGL are enabling + // performance options based on NV0000_CTRL_SYSTEM_CPU_TYPE_P4. + // + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_P4; + + // The first P4s (pre-Northwood ones) have a performance problem + // when mixing write combined and cached writes. This is fixed + // with model revision 2. + if ((pCpuidInfo->Model == 0) || (pCpuidInfo->Model == 1)) + { + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_NEEDS_WC_WORKAROUND; + } + } + + if (pCpuidInfo->Family == 0xF || (pCpuidInfo->Family == 6 && pCpuidInfo->Model >= 7)) + { + if (pOS->osNv_cpuid(pOS, 0x17, 0, &eax, &ebx, &ecx, &edx)) + pSys->cpuInfo.platformID = (edx >> 18) & 7; // edx[20:18] PlatformID (package type) + } + + // Decode the cache desciptors. + if (!DecodePrescottCache(pSys)) + { +#if defined(_M_IX86) || defined(NVCPU_X86) + + // Prescott style cache descriptors are not supported. Fall back to older style. + // + if (pOS->osNv_cpuid(pOS, 0, 0, &eax, &ebx, &ecx, &edx)) + { + if (eax >= 2) // CPU support old cache descrtiptors? + { + pOS->osNv_cpuid(pOS, 2, 0, &eax, &ebx, &ecx, &edx); + + if ((eax & 0xff) == 1) // AL contains number of times CPU must be called. This will be 1 forever. + { + DecodeIntelCacheRegister(pSys, eax & 0xffffff00); + DecodeIntelCacheRegister(pSys, ebx); + DecodeIntelCacheRegister(pSys, ecx); + DecodeIntelCacheRegister(pSys, edx); + } + } + } +#endif + } +} + +static void cpuidInfoAMD(OBJSYS *pSys, PCPUIDINFO pCpuidInfo) +{ + NvU32 eax = 0; + NvU32 ebx = 0; + NvU32 ecx = 0; + NvU32 edx = 0; + + OBJOS *pOS = SYS_GET_OS(pSys); + NvU32 largestExtendedFunctionNumberSupported = 0x80000000; + + if (pCpuidInfo->Family == 5) // K5, K6, K6-2 with 3DNow, K6-3 + { + if (pCpuidInfo->Model < 6) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_K5; + else if (pCpuidInfo->Model < 8) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_K6; + else if (pCpuidInfo->Model == 8) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_K62; + else if (pCpuidInfo->Model == 9) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_K63; + } + else if (pCpuidInfo->Family == 6) // K7 + { + // Family 6 is a mixture of Athlon and Duron processors. Just set the + // processor type to Athlon. The processor name will show the branding. + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_K7; + } + else if (pCpuidInfo->Family == 15) // K8 + { + // If family is 15, we need to use AMD's extended family/model information. + pOS->osNv_cpuid(pOS, 1, 0, &eax, &ebx, &ecx, &edx); + pCpuidInfo->Family = (NvU16)(((eax >> 8) & 0x0F) + ((eax >> 16) & 0xFF0)); // 27:20 concat 11:8 + pCpuidInfo->Model = (NvU8) (((eax >> 4) & 0x0F) + ((eax >> 12) & 0xF0)); // 19:16 concat 7:4 + + // Differentiate K8, K10, K11, RYZEN, etc + switch( pCpuidInfo->Family & 0xFF0) + { + case 0x000: + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_K8; + break; + case 0x010: + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_K10; + break; + case 0x020: + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_K11; + break; + case 0x080: + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_RYZEN; + break; + default: + NV_PRINTF(LEVEL_ERROR, + "Unrecognized AMD processor in cpuidInfoAMD\n"); + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_K8; + break; + } + } + + if (pCpuidInfo->ExtendedFeatures & CPU_EXT_3DNOW) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW; // 3DNow + + if (pCpuidInfo->ExtendedFeatures & CPU_EXT_AMD_3DNOW_EXT) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW_EXT; // 3DNow, with Extensions (AMD specific) + + if (pCpuidInfo->ExtendedFeatures & CPU_EXT_AMD_MMX_EXT) + { + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_MMX_EXT; // MMX, with Extensions (AMD specific) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_SFENCE; + } + + // Get the cache info. + if (pOS->osNv_cpuid(pOS, 0x80000000, 0, &eax, &ebx, &ecx, &edx)) + { + largestExtendedFunctionNumberSupported = eax; + + if (largestExtendedFunctionNumberSupported >= 0x80000006) + { + // L1 cache + if (pOS->osNv_cpuid(pOS, 0x80000005, 0, &eax, &ebx, &ecx, &edx)) + { + pSys->cpuInfo.dataCacheLineSize = ecx & 0xff; + pSys->cpuInfo.l1DataCacheSize = ecx >> 24; + } + + // L2 cache + if (pOS->osNv_cpuid(pOS, 0x80000006, 0, &eax, &ebx, &ecx, &edx)) + pSys->cpuInfo.l2DataCacheSize = ecx >> 16; + } + + // Get the SEV capability info + if ((largestExtendedFunctionNumberSupported >= 0x8000001f) && + pOS->osNv_cpuid(pOS, 0x8000001f, 0, &eax, &ebx, &ecx, &edx)) + { + // + // EAX[1] stores capability info + // ECX[31:0] stores # of encrypted guests supported simultaneously + // + if (eax & 0x2) + { + pSys->cpuInfo.bSEVCapable = NV_TRUE; + pSys->cpuInfo.maxEncryptedGuests = ecx; + } + } + } +} + + +#if defined(_M_IX86) || defined(NVCPU_X86) + +static void cpuidInfoWinChip(OBJSYS *pSys, PCPUIDINFO pCpuidInfo) +{ + if (pCpuidInfo->Family == 5) // Winchip C6, Winchip2 w/ 3DNow + { + if (pCpuidInfo->Model == 4) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_C6; + if (pCpuidInfo->Model == 8) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_C62; + } + + if (pCpuidInfo->ExtendedFeatures & CPU_EXT_3DNOW) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW; +} + +static void cpuidInfoCyrix(OBJSYS *pSys, PCPUIDINFO pCpuidInfo) +{ + if (pCpuidInfo->Family == 4) // MediaGX + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_GX; + if (pCpuidInfo->Family == 5) // Cyrix 6x86 or MediaGX w/ MMX + { + if (pCpuidInfo->Model == 2) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_M1; + if (pCpuidInfo->Model == 4) + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_MGX; + } + if (pCpuidInfo->Family == 6) // Cyrix MII + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_M2; + + if (pCpuidInfo->ExtendedFeatures & CPU_EXT_3DNOW) + pSys->cpuInfo.caps |= NV0000_CTRL_SYSTEM_CPU_CAP_3DNOW; +} + +static void cpuidInfoTransmeta(OBJSYS *pSys, PCPUIDINFO pCpuidInfo) +{ + NvU32 eax, ebx, ecx, edx; + OBJOS *pOS = SYS_GET_OS(pSys); + + // + // Transmeta allows the OEM to program the foundry, family, model, and stepping. Arrrrgh... + // If this turns out to be a problem, we will need to use one of the extended CPUID calls to + // get the real info. + // + + // Docs were not real clear on which family/model. Just assume it's a Crusoe + pSys->cpuInfo.type = NV0000_CTRL_SYSTEM_CPU_TYPE_TM_CRUSOE; + + // + // Get the cache info. From preliminary TM8000 programming and config guide, 2/19/03 + // This appears to match AMD's cache CPUID definitions. + // + if (pOS->osNv_cpuid(pOS, 0x80000000, 0, &eax, &ebx, &ecx, &edx) && eax >= 0x80000006) + { + // L1 Cache + if (pOS->osNv_cpuid(pOS, 0x80000005, 0, &eax, &ebx, &ecx, &edx)) + { + pSys->cpuInfo.dataCacheLineSize = ecx & 0xff; + pSys->cpuInfo.l1DataCacheSize = ecx >> 24; + } + + // L2 Cache + if (pOS->osNv_cpuid(pOS, 0x80000006, 0, &eax, &ebx, &ecx, &edx)) + pSys->cpuInfo.l2DataCacheSize = ecx >> 16; + } +} + +#endif // defined(_M_IX86) || defined(NVCPU_X86) + +#endif // defined(_M_IX86) || defined(NVCPU_X86) || defined(AMD64) || defined(NVCPU_X86_64) + +#endif // NVCPU_IS_X86 || NVCPU_IS_X86_64 diff --git a/src/nvidia/src/kernel/platform/cpu_arm_def.h b/src/nvidia/src/kernel/platform/cpu_arm_def.h new file mode 100644 index 000000000..42e028a7f --- /dev/null +++ b/src/nvidia/src/kernel/platform/cpu_arm_def.h @@ -0,0 +1,127 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief includes defines used by cpu.c to get cpu information + */ + +#ifndef CPUID_ARM_DEF_H_ +#define CPUID_ARM_DEF_H_ + +#if defined(__GNUC__) + +#define CP_READ_CCSIDR_REGISTER() CP_READ_REGISTER(CP_CCSIDR) +#define CP_READ_MIDR_REGISTER() CP_READ_REGISTER(CP_MIDR) +#define CP_WRITE_CSSELR_REGISTER(val) CP_WRITE_REGISTER(CP_CSSELR, val) + +#else + +extern NvU32 CP_READ_CCSIDR_REGISTER(); +extern NvU32 CP_READ_MIDR_REGISTER(); +extern void CP_WRITE_CSSELR_REGISTER(NvU32 val); + +#endif // defined(__GNUC__) + +#define GET_BITMASK(f, v) ((v) >> DRF_SHIFT(CP_ ## f) & DRF_MASK(CP_ ## f)) + +#define CP_MIDR_IMPLEMENTER 31:24 +#define CP_MIDR_PRIMARY_PART_NUM 15:4 + +#define CP_MIDR_IMPLEMENTER_BROADCOM 0x42 +#define CP_MIDR_IMPLEMENTER_NVIDIA 0x4e +#define CP_MIDR_IMPLEMENTER_AMCC 0x50 +#define CP_MIDR_IMPLEMENTER_ARM 0x41 +#define CP_MIDR_IMPLEMENTER_MARVELL 0x43 +#define CP_MIDR_IMPLEMENTER_HUAWEI 0x48 +#define CP_MIDR_IMPLEMENTER_FUJITSU 0x46 +#define CP_MIDR_IMPLEMENTER_PHYTIUM 0x70 +#define CP_MIDR_IMPLEMENTER_AMPERE 0x81 + +#define CP_CSSELR_DATA_CACHE 0 +#define CP_CSSELR_INSTRUCTION_CACHE 1 +#define CP_CSSELR_DATA_CACHE_LEVEL2 2 + +#define CP_CCSIDR_CACHE_LINE_SIZE 2:0 +#define CP_CCSIDR_CACHE_ASSOCIATIVITY 12:3 +#define CP_CCSIDR_CACHE_NUM_SETS 27:13 + +#if defined(NVCPU_AARCH64) + +// Main ID Register +#define CP_MIDR "midr_el1" + +#define CP_MIDR_PRIMARY_PART_NUM_DENVER_1 0x0 +#define CP_MIDR_PRIMARY_PART_NUM_DENVER_2 0x3 +#define CP_MIDR_PRIMARY_PART_NUM_CARMEL 0x4 +#define CP_MIDR_PRIMARY_PART_NUM_ESTES_1 0x9 + +#define CP_MIDR_PRIMARY_PART_NUM_XGENE 0x0 +#define CP_MIDR_PRIMARY_PART_NUM_CORTEX_A57 0xd07 +#define CP_MIDR_PRIMARY_PART_NUM_CORTEX_A76 0xd0b +#define CP_MIDR_PRIMARY_PART_NUM_THUNDER_X2 0x0af +#define CP_MIDR_PRIMARY_PART_NUM_KUNPENG_920 0xd01 +#define CP_MIDR_PRIMARY_PART_NUM_BLUEFIELD 0xd08 +#define CP_MIDR_PRIMARY_PART_NUM_GRAVITRON2 0xd0c +#define CP_MIDR_PRIMARY_PART_NUM_A64FX 0x001 +#define CP_MIDR_PRIMARY_PART_NUM_FT2000 0x662 +#define CP_MIDR_PRIMARY_PART_NUM_S2500 0x663 +#define CP_MIDR_PRIMARY_PART_NUM_ALTRA 0x000 +#define CP_MIDR_PRIMARY_PART_NUM_OCTEON_CN96XX 0x0b2 +#define CP_MIDR_PRIMARY_PART_NUM_OCTEON_CN98XX 0x0b1 + +// Cache Size Identification Register +#define CP_CCSIDR "ccsidr_el1" + +// Cache Size Selection Register +#define CP_CSSELR "csselr_el1" + +#else // NVCPU_AARCH64 + +// Main ID Register +#define CP_MIDR "0" + +#define CP_MIDR_PRIMARY_PART_NUM_A9 0xC09 +#define CP_MIDR_PRIMARY_PART_NUM_A15 0xC0F + +// Cache Size Identification Register +#define CP_CCSIDR "1" + +#define CP_CCSIDR_CACHE_LINE_SIZE_8W 1 + +#define CP_CCSIDR_CACHE_NUM_SETS_16KB 0x7F +#define CP_CCSIDR_CACHE_NUM_SETS_32KB 0xFF +#define CP_CCSIDR_CACHE_NUM_SETS_64KB 0x1FF + +#define CP_CCSIDR_CACHE_NUM_SETS_A15_32KB 0xFF +#define CP_CCSIDR_CACHE_NUM_SETS_A15_512KB 0x1FF +#define CP_CCSIDR_CACHE_NUM_SETS_A15_1024KB 0x3FF +#define CP_CCSIDR_CACHE_NUM_SETS_A15_2048KB 0x7FF +#define CP_CCSIDR_CACHE_NUM_SETS_A15_4096KB 0xFFF + +// Cache Size Selection Register +#define CP_CSSELR "2" + +#endif // NVCPU_AARCH64 + +#endif /* CPUID_ARM_DEF_H_ */ diff --git a/src/nvidia/src/kernel/platform/hwbc.c b/src/nvidia/src/kernel/platform/hwbc.c new file mode 100644 index 000000000..9d2c29009 --- /dev/null +++ b/src/nvidia/src/kernel/platform/hwbc.c @@ -0,0 +1,3084 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/******************* Hardware Broadcast Routines ***************************\ +* * +* Initialization code for broadcast devices * +* * +****************************************************************************/ + +#include "platform/chipset/chipset.h" +#include "nvpcie.h" +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" + +#include "ctrl/ctrl2080/ctrl2080bus.h" + +#include "published/br03/dev_br03_xvd.h" +#include "published/br03/dev_br03_xvu.h" +#include "published/br04/br04_ref.h" + +#include "platform/hwbc.h" + +// +// These BR04 registers/bits/values are not properly defined in the headers +// + +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P390 0x0390 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P535 0x0535 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P710 0x0710 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P711 0x0711 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P712 0x0712 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P713 0x0713 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P737 0x0737 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P790 0x0790 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P797 0x0797 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P838 0x0838 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P881 0x0881 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P883 0x0883 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P884 0x0884 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P885 0x0885 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P894 0x0894 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P984 0x0984 +#define NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS 31:16 + +#ifndef NV_BR04_XVD_G2_PRIV_XP_LCTRL_2_REV2P0_COMPLIANCE_DIS_ONE +#define NV_BR04_XVD_G2_PRIV_XP_LCTRL_2_REV2P0_COMPLIANCE_DIS_ONE 1 +#endif + +#ifndef NV_XPU_PEX_PLL_CTL2 +#define NV_XPU_PEX_PLL_CTL2 0x00000E2C +#define NV_XPU_PEX_PLL_CTL2_PLL_CP_CNTL 22:20 +#define NV_XPU_PEX_PLL_CTL2_PLL_CP_CNTL_22P5UA 0x00000004 +#define NV_XPU_PEX_PLL_CTL2_PLL_CP_CNTL_30UA 0x00000007 +#endif + +#ifndef NV_XPD_PEX_PLL_CTL2 +#define NV_XPD_PEX_PLL_CTL2 0x00000F18 +#define NV_XPD_PEX_PLL_CTL2_PLL_CP_CNTL 22:20 +#define NV_XPD_PEX_PLL_CTL2_PLL_CP_CNTL_22P5UA 0x00000004 +#endif + +#ifndef NV_BR04_XVU_CTRIM_DP_1 +#define NV_BR04_XVU_CTRIM_DP_1 0x00000D04 +#define NV_BR04_XVU_CTRIM_DP_2 0x00000D08 +#define NV_BR04_XVU_CTRIM_DP_3 0x00000D0C +#define NV_BR04_XVU_CTRIM_DP_4 0x00000D10 +#define NV_BR04_XVU_CTRIM_DP_5 0x00000D14 +#define NV_BR04_XVU_CTRIM_DP_6 0x00000D18 +#endif + +#ifndef NV_XPU_PEX_PAD_CTL_3 +#define NV_XPU_PEX_PAD_CTL_3 0x00000E0C +#define NV_XPU_PEX_PAD_CTL_3_TX_PEAK_R2_1C 23:20 +#define NV_XPU_PEX_PAD_CTL_3_TX_PEAK_R2_1C_22DB 0x00000007 +#define NV_XPU_PEX_PAD_CTL_3_TX_PEAK_R2_1C_36DB 0x0000000A +#define NV_XPU_PEX_PAD_CTL_3_TX_PEAK_R2_1C_6DB 0x0000000F +#endif + +#ifndef NV_BR04_XVU_CYA_BIT0_RSVD_28_DP0_DE_EMP_NEG_3P5_DB +#define NV_BR04_XVU_CYA_BIT0_RSVD_28_DP0_DE_EMP_NEG_3P5_DB 0x00000001 +#define NV_BR04_XVU_CYA_BIT0_RSVD_29_DP1_DE_EMP_NEG_3P5_DB 0x00000001 +#define NV_BR04_XVU_CYA_BIT0_RSVD_30_DP2_DE_EMP_NEG_3P5_DB 0x00000001 +#define NV_BR04_XVU_CYA_BIT0_RSVD_31_DP3_DE_EMP_NEG_3P5_DB 0x00000001 +#endif + +#ifndef NV_BR04_XVU_CYA_NIBBLE0_RSVD_0_UP0_ASPM_DISABLE +#define NV_BR04_XVU_CYA_NIBBLE0_RSVD_0_UP0_ASPM_DISABLE 0x00000008 +#define NV_BR04_XVU_CYA_NIBBLE0_RSVD_4_DPX_ASPM_DISABLE 0x00000008 +#endif + +#ifndef NV_BR04_XVD_G2_PRIV_XP_LCTRL_2_ADVERTISED_RATE_CHANGE_ONE +#define NV_BR04_XVD_G2_PRIV_XP_LCTRL_2_ADVERTISED_RATE_CHANGE_ONE 1 +#endif + +#ifndef NV_PES_XVU_ROM_ACCESS +#define NV_PES_XVU_ROM_ACCESS(i) (0x00001000+(i)*4) +#endif + +#define NV_BR04_FIRMWARE_SIGNATURE 0x42523034 +#define NV_PLX_FIRMWARE_SIGNATURE 0x0000005a + +// +// PLX PEX8747 definitions +// PLX PEX 8747 data book has info about the header definitions. +// + +#define PCI_VENDOR_ID_PLX 0x10B5 +#define PLX_DEVICE_ID_PEX8747 0x8747 + +#define NV_PLX_PEX8747_TRANSACTION_STATUS_BIT 0x00000004 + +#define NV_PLX_PEX8747_ROM_REVISION_ADDR 0x00000008 +#define NV_PLX_PEX8747_ROM_REVISION_ADDR_ID 7:0 + +#define NV_PLX_EEPROM_CONTROL_AND_STATUS_REG 0x00000260 +#define NV_PLX_EEPROM_CONTROL_AND_STATUS_REG_CTRL 15:0 +#define NV_PLX_EEPROM_CONTROL_AND_STATUS_REG_CTRL_ADDR 12:0 +#define NV_PLX_EEPROM_CONTROL_AND_STATUS_REG_CTRL_CMD 15:13 +#define NV_PLX_EEPROM_CONTROL_AND_STATUS_REG_CTRL_CMD_READ 0x00000003 +#define NV_PLX_EEPROM_CONTROL_AND_STATUS_REG_STATUS 23:16 +#define NV_PLX_EEPROM_CONTROL_AND_STATUS_REG_ADDR_WIDTH 23:22 +#define NV_PLX_EEPROM_CONTROL_AND_STATUS_REG_ADDR_WIDTH_2BYTE 0x00000002 + +#define NV_PLX_EEPROM_STATUS_REG_ADDR 0x00000262 +#define NV_PLX_EEPROM_STATUS_REG_ADDR_TRANSACTION 3:3 +#define NV_PLX_EEPROM_STATUS_REG_ADDR_TRANSACTION_COMPLETE 0x00000000 +#define NV_PLX_EEPROM_STATUS_REG_ADDR_TRANSACTION_NOT_COMPLETE 0x00000001 + +#define NV_PLX_EEPROM_BUFFER_ADDR 0x00000264 + +#define NV_PLX_EEPROM_DATA_ADDR_ZERO 0x00000000 +#define NV_PLX_EEPROM_DATA_ADDR_ZERO_SIGNATURE 7:0 +#define NV_PLX_EEPROM_DATA_ADDR_ZERO_CONFIGBYTE_COUNT 31:16 + +static NV_STATUS Plx_Pex8747_setupFunc(OBJHWBC *pPlx, OBJCL *pCl); +static NV_STATUS Plx_Pex8747_ChangeUpstreamBusSpeed(OBJHWBC *pPlx, OBJCL *pCl, NvU32 cmd); +static NV_STATUS Plx_Pex8747_GetUpstreamBusSpeed(OBJHWBC *pPlx, OBJCL *pCl, NvU32 *speed); +static RmPhysAddr Plx_Pex8747_GetBar0(OBJCL *pCl, OBJHWBC *pPlx); + +// +// static functions +// + +static OBJHWBC *objClFindUpperHWBC(OBJCL *, NBADDR, OBJHWBC *, RmPhysAddr); +static NV_STATUS objClSetupBR03(OBJHWBC *, OBJCL *); +static NvU32 objClGetBr03Bar0(OBJCL *, OBJHWBC *); +static NV_STATUS objClFreeBr03Bar0(OBJCL *, OBJHWBC *); + +static NV_STATUS objClResumeBridgeHWBC(OBJCL *, OBJHWBC *); + +static NV_STATUS Nvidia_BR04_ShiftAliasingRegisters(RmPhysAddr); +static RmPhysAddr Nvidia_BR04_GetBar0(OBJCL *, OBJHWBC *, NvS32); +static NV_STATUS Nvidia_BR04_setupFunc(OBJHWBC *, OBJCL *); +static NV_STATUS Nvidia_BR04_FindDpInfo(OBJCL *, OBJHWBC *, OBJGPU *); +static NV_STATUS Nvidia_BR04_ChangeUpstreamBusSpeed(OBJHWBC *pBR04, OBJCL *pCl, NvU32 cmd); +static NV_STATUS Nvidia_BR04_GetUpstreamBusSpeed(OBJHWBC *pBR04, OBJCL *pCl, NvU32 *speed); + +// +// This function examines a PLX firmware ROM to retrieve firmware details. +// +void +plxPex8747GetFirmwareInfo +( + OBJCL *pCl, + OBJGPU *pGpu, + OBJHWBC *pHWBC +) +{ + return; +} // end of Plx_Pex8747_GetFirmwareInfo() + +// +// Find the broadcast resource in the direct upper hierarchy of the port +// Return NULL if none is found +// Do not perform per-gpu memory tracking as pCl remains +// during the SLI transitions. +// +static POBJHWBC +objClFindUpperHWBC +( + OBJCL *pCl, + NBADDR port, + OBJHWBC *child, + RmPhysAddr currentGpuPhysAddr +) +{ + NvU16 vendorID, deviceID; + void *handle; + OBJHWBC *pHWBC = NULL; + + if (!port.valid) + { + goto objClFindUpperHWBC_exit; + } + + handle = osPciInitHandle(port.domain, port.bus, port.device, port.func, &vendorID, &deviceID); + if (!handle) + { + goto objClFindUpperHWBC_exit; + } + + if ((vendorID == PCI_VENDOR_ID_PLX) && (deviceID == PLX_DEVICE_ID_PEX8747)) + { + pHWBC = portMemAllocNonPaged(sizeof(OBJHWBC)); + if (pHWBC == NULL) + { + NV_ASSERT_OR_GOTO((pHWBC != NULL), objClFindUpperHWBC_exit); + } + portMemSet((void *)pHWBC, 0, sizeof(OBJHWBC)); + + // dpForGpuInstance array must be initialized to -1s. + portMemSet((void *)pHWBC->dpForGpuInstance, 0xff, + NV_MAX_DEVICES * sizeof(NvS8)); + + pHWBC->bcRes = HWBC_PLX_PEX8747; + + // Find upstream port + pHWBC->ctrlDev.domain = port.domain; + pHWBC->ctrlDev.handle = clFindP2PBrdg(pCl, port.domain, port.bus, + &pHWBC->ctrlDev.bus, + &pHWBC->ctrlDev.device, + &pHWBC->ctrlDev.func, + &vendorID, &deviceID); + // check everything + NV_ASSERT(pHWBC->ctrlDev.handle); + NV_ASSERT(vendorID == PCI_VENDOR_ID_PLX && + deviceID == PLX_DEVICE_ID_PEX8747); + NV_ASSERT(osPciReadWord(pHWBC->ctrlDev.handle, + PCI_COMMON_CLASS_SUBCLASS) == + PCI_COMMON_CLASS_SUBBASECLASS_P2P); + NV_ASSERT(osPciReadByte(pHWBC->ctrlDev.handle, + PCI_TYPE_1_SECONDARY_BUS_NUMBER) == port.bus); + + pHWBC->ctrlDev.valid = 1; + + // set upstream port info + pHWBC->domain = port.domain; + pHWBC->minBus = port.bus; + pHWBC->maxBus = osPciReadByte(pHWBC->ctrlDev.handle, + PCI_TYPE_1_SUBORDINATE_BUS_NUMBER); + + pHWBC->pFirstChild = child; + pHWBC->hasPlxFirmwareInfo = NV_FALSE; + + pHWBC->hwbcId = gpuGenerate32BitId(pHWBC->ctrlDev.domain, pHWBC->ctrlDev.bus, pHWBC->ctrlDev.device | pHWBC->ctrlDev.func); + + goto objClFindUpperHWBC_exit; + } + + if (vendorID != PCI_VENDOR_ID_NVIDIA) + { + goto objClFindUpperHWBC_exit; + } + + if (deviceID == NV_BR03_XVU_DEV_ID_DEVICE_ID_BR03) // BR03 + { + pHWBC = portMemAllocNonPaged(sizeof(OBJHWBC)); + if (pHWBC == NULL) + { + NV_ASSERT_OR_GOTO((pHWBC != NULL), objClFindUpperHWBC_exit); + } + portMemSet((void *)pHWBC, 0, sizeof(OBJHWBC)); + + pHWBC->bcRes = HWBC_NVIDIA_BR03; + + pHWBC->ctrlDev.domain = port.domain; + pHWBC->ctrlDev.handle = clFindP2PBrdg(pCl, port.domain, port.bus, + &pHWBC->ctrlDev.bus, + &pHWBC->ctrlDev.device, + &pHWBC->ctrlDev.func, + &vendorID, &deviceID); + // check everything + NV_ASSERT(pHWBC->ctrlDev.handle); + NV_ASSERT(vendorID == PCI_VENDOR_ID_NVIDIA && deviceID == NV_BR03_XVU_DEV_ID_DEVICE_ID_BR03); + NV_ASSERT(osPciReadWord(pHWBC->ctrlDev.handle, PCI_COMMON_CLASS_SUBCLASS) == PCI_COMMON_CLASS_SUBBASECLASS_P2P); + NV_ASSERT(osPciReadByte(pHWBC->ctrlDev.handle, PCI_TYPE_1_SECONDARY_BUS_NUMBER) == port.bus); + pHWBC->ctrlDev.valid = 1; + + // set upstream port info + pHWBC->domain = port.domain; + pHWBC->minBus = port.bus; + pHWBC->maxBus = osPciReadByte(pHWBC->ctrlDev.handle, PCI_TYPE_1_SUBORDINATE_BUS_NUMBER); + + pHWBC->pFirstChild = child; + pHWBC->gpuPhysAddr = currentGpuPhysAddr; + + pHWBC->hwbcId = gpuGenerate32BitId(pHWBC->ctrlDev.domain, pHWBC->ctrlDev.bus, pHWBC->ctrlDev.device | pHWBC->ctrlDev.func); + + if (!pCl->getProperty(pCl, PDB_PROP_CL_DISABLE_BR03_FLOW_CONTROL)) + objClSetupBR03(pHWBC, pCl); + + goto objClFindUpperHWBC_exit; + } + + if (IS_DEVID_BR04(deviceID)) + { + pHWBC = portMemAllocNonPaged(sizeof(OBJHWBC)); + if (pHWBC == NULL) + { + NV_ASSERT_OR_GOTO((pHWBC != NULL), objClFindUpperHWBC_exit); + } + portMemSet((void *)pHWBC, 0, sizeof(OBJHWBC)); + + // dpForGpuInstance array must be initialized to -1s. + portMemSet((void *)pHWBC->dpForGpuInstance, 0xff, + NV_MAX_DEVICES * sizeof(NvS8)); + + pHWBC->bcRes = HWBC_NVIDIA_BR04; + + // Find upstream port + pHWBC->ctrlDev.domain = port.domain; + pHWBC->ctrlDev.handle = clFindP2PBrdg(pCl, port.domain, port.bus, + &pHWBC->ctrlDev.bus, + &pHWBC->ctrlDev.device, + &pHWBC->ctrlDev.func, + &vendorID, &deviceID); + // check everything + NV_ASSERT(pHWBC->ctrlDev.handle); + NV_ASSERT(vendorID == PCI_VENDOR_ID_NVIDIA && + deviceID >= NV_BR04_XVU_DEV_ID_DEVICE_ID_BR04_0 && + deviceID <= NV_BR04_XVU_DEV_ID_DEVICE_ID_BR04_15); + NV_ASSERT(osPciReadWord(pHWBC->ctrlDev.handle, + PCI_COMMON_CLASS_SUBCLASS) == + PCI_COMMON_CLASS_SUBBASECLASS_P2P); + NV_ASSERT(osPciReadByte(pHWBC->ctrlDev.handle, + PCI_TYPE_1_SECONDARY_BUS_NUMBER) == port.bus); + + pHWBC->ctrlDev.valid = 1; + + // set upstream port info + pHWBC->domain = port.domain; + pHWBC->minBus = port.bus; + pHWBC->maxBus = osPciReadByte(pHWBC->ctrlDev.handle, + PCI_TYPE_1_SUBORDINATE_BUS_NUMBER); + + pHWBC->pFirstChild = child; + pHWBC->hasPlxFirmwareInfo = NV_FALSE; + + pHWBC->hwbcId = gpuGenerate32BitId(pHWBC->ctrlDev.domain, pHWBC->ctrlDev.bus, pHWBC->ctrlDev.device | pHWBC->ctrlDev.func); + + pCl->br04HwbcCount++; + + goto objClFindUpperHWBC_exit; + } + + handle = osPciInitHandle(port.domain, port.bus, 0, 0, &vendorID, &deviceID); + + if (!handle || vendorID != PCI_VENDOR_ID_NVIDIA) + { + NV_ASSERT(pHWBC == NULL); + goto objClFindUpperHWBC_exit; + } + + NV_ASSERT(pHWBC == NULL); + +objClFindUpperHWBC_exit: + return pHWBC; +} + +// +// Find all Broadcast resources upstream of the GPU. +// This could be BR03, BR04, PLX, or chipset support. +// +NvBool +objClSetPcieHWBC +( + OBJGPU *pGpu, + OBJCL *pCl +) +{ + NBADDR port; + NvU16 vendorID, deviceID; + OBJHWBC **root = NULL, *father = NULL, *now = NULL, *next = NULL; + + + // check if the upstream port is valid + if (!pGpu || !pGpu->gpuClData.upstreamPort.addr.valid) + { + return NV_FALSE; + } + + // for now all BC HW are from NVIDIA or PLX + if ((pGpu->gpuClData.upstreamPort.VendorID != PCI_VENDOR_ID_NVIDIA) && + (pGpu->gpuClData.upstreamPort.VendorID != PCI_VENDOR_ID_PLX)) + { + return NV_FALSE; + } + + if (pGpu->gpuClData.upstreamPort.DeviceID == + NV_BR03_XVU_DEV_ID_DEVICE_ID_BR03) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_BEHIND_BR03, NV_TRUE); + } + else if (IS_DEVID_BR04(pGpu->gpuClData.upstreamPort.DeviceID)) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_BEHIND_BR04, NV_TRUE); + } + + port = pGpu->gpuClData.upstreamPort.addr; + + // now try to search from up to down + root = &pCl->pHWBC; + while (*root) + { + if (((*root)->domain == port.domain) && ((*root)->minBus <= port.bus) && ((*root)->maxBus >= port.bus)) // bus in range + { + father = *root; + if (father->bcRes == HWBC_NVIDIA_BR03) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_BEHIND_BR03, NV_TRUE); + } + else if (father->bcRes == HWBC_NVIDIA_BR04) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_BEHIND_BR04, NV_TRUE); + // Determine which downstream port of the BR04 the GPU is + // behind. + Nvidia_BR04_FindDpInfo(pCl, father, pGpu); + } + root = &(*root)->pFirstChild; + } + else + { + root = &(*root)->pSibling; + } + } + + + // search from down to up + do + { + if (father && (father->domain == port.domain) && (father->minBus == port.bus)) + { + if (now) + { + now->pParent = father; + } + break; // got it in the list + } + + next = objClFindUpperHWBC(pCl, port, now, + pGpu->busInfo.gpuPhysAddr); + + if (!next) + break; // nothing else + + if (now) + { + now->pParent = next; + } + now = next; + + if (now->bcRes == HWBC_NVIDIA_BR03) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_BEHIND_BR03, NV_TRUE); + } + else if (now->bcRes == HWBC_NVIDIA_BR04) + { + pGpu->setProperty(pGpu, PDB_PROP_GPU_BEHIND_BR04, NV_TRUE); + Nvidia_BR04_FindDpInfo(pCl, now, pGpu); + } + + port.domain = now->ctrlDev.domain; + port.handle = clFindP2PBrdg(pCl, now->ctrlDev.domain, now->ctrlDev.bus, + &port.bus, + &port.device, + &port.func, + &vendorID, + &deviceID); + + } while (port.handle && (vendorID == PCI_VENDOR_ID_NVIDIA || vendorID == PCI_VENDOR_ID_PLX)); + + *root = now; + + // search from top to bottom again and call setup on the bridges + root = &pCl->pHWBC; + port = pGpu->gpuClData.upstreamPort.addr; + + while (*root) + { + if (((*root)->domain == port.domain) && ((*root)->minBus <= port.bus) && ((*root)->maxBus >= port.bus)) // bus in range + { + if ((*root)->bcRes == HWBC_NVIDIA_BR04) + { + Nvidia_BR04_setupFunc(*root, pCl); + } + else if ((*root)->bcRes == HWBC_PLX_PEX8747) + { + Plx_Pex8747_setupFunc(*root, pCl); + } + root = &(*root)->pFirstChild; + } + else + root = &(*root)->pSibling; + } + + return NV_TRUE; +} + +// +// We can access BR03 registers through 3 different ways +// 1. BR03 BAR0 +// 2. PCI Express Enhanced Configuration Space +// 3. Setup BR03 Register Aliasing then through GPU register space +// +// This function unifies all the above approaches +// and returns a 32bit physical address for the BR03 registers +// +// objClFreeBr03Bar0 must be called after we finish setting the registers +// +static NvU32 +objClGetBr03Bar0 +( + OBJCL *pCl, + OBJHWBC *pBR03 +) +{ + NvU8 Rev; + NvU32 bar0 = 0; + + if (!pBR03 || + pBR03->bcRes != HWBC_NVIDIA_BR03 || + !pBR03->ctrlDev.valid) + return 0; + + Rev = osPciReadByte(pBR03->ctrlDev.handle, NV_BR03_XVU_REV_CC); + + if (Rev != 0xA1) // Rev A1 BAR0 is broken + bar0 = osPciReadDword(pBR03->ctrlDev.handle, PCI_BASE_ADDRESS_0) & ~RM_PAGE_MASK; + + // + // Warning: Future OS may forbid us to map the PCIE enhanced configuration space directly + // + if (!bar0 && + pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && + pCl->pPcieConfigSpaceBase) + { + RmPhysAddr pcieConfigSpaceBase; + + pcieConfigSpaceBase = clFindPcieConfigSpaceBase(pCl, + pBR03->ctrlDev.domain, + pBR03->ctrlDev.bus); + if ((pcieConfigSpaceBase) && + ((pcieConfigSpaceBase & ~((RmPhysAddr)(0xFFFFFFFF))) == 0 )) + { + // + // if pcieConfigSpaceBase is limited to under 4GB then the cast is fine. + // + bar0 = (NvU32)pcieConfigSpaceBase | pBR03->ctrlDev.bus << PCIE_BUS_SHIFT; + } + } + + if (!bar0) // we can not setup any registers + return bar0; + + // + // Implement Register Aliasing + // gpuPhysAddr has to be set to a 32bits address + // + if (pBR03->gpuPhysAddr && !(pBR03->gpuPhysAddr>>32)) + { + volatile NvU32 *pData; + NvU32 addr = (NvU32)pBR03->gpuPhysAddr + BR03_GPU_REGISTER_ALIAS_OFFSET; + + pData = osMapKernelSpace(bar0, RM_PAGE_SIZE, NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + + if (!pData) + return 0; + + // must not be enabled already + if (REF_VAL(NV_BR03_XVU_MCC_REG_ALIAS_ACCESS, pData[NV_BR03_XVU_MCC_REG_ALIAS / sizeof(*pData)]) != 0) + + { + osUnmapKernelSpace((void*)pData, RM_PAGE_SIZE); + return HWBC_ERROR_BR03_INVALID_BAR0; + } + + // enable the register aliasing and setup the address + pData[NV_BR03_XVU_MCC_REG_ALIAS / sizeof(*pData)] = + (DRF_SHIFTMASK(NV_BR03_XVU_MCC_REG_ALIAS_BASE_ADDRESS) & addr) | + REF_NUM(NV_BR03_XVU_MCC_REG_ALIAS_ACCESS, NV_BR03_XVU_MCC_REG_ALIAS_ACCESS_ENABLED); + + osUnmapKernelSpace((void*) pData, RM_PAGE_SIZE); + + return addr; + } + + return bar0; +} + +static NV_STATUS +objClFreeBr03Bar0 +( + OBJCL *pCl, + OBJHWBC *pBR03 +) +{ + NvU8 Rev; + NvU32 bar0 = 0; + volatile NvU32 *pData; + + if (!pBR03 || + pBR03->bcRes != HWBC_NVIDIA_BR03 || + !pBR03->ctrlDev.valid) + return NV_ERR_GENERIC; + + Rev = osPciReadByte(pBR03->ctrlDev.handle, NV_BR03_XVU_REV_CC); + + if (Rev != 0xA1) // Rev A1 BAR0 is broken + bar0 = osPciReadDword(pBR03->ctrlDev.handle, PCI_BASE_ADDRESS_0) & ~RM_PAGE_MASK; + + // + // Warning: Future OS may forbid us to map the PCIE enhanced configuration space directly + // + if (!bar0 && + pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && + pCl->pPcieConfigSpaceBase) + { + RmPhysAddr pcieConfigSpaceBase; + + pcieConfigSpaceBase = clFindPcieConfigSpaceBase(pCl, + pBR03->ctrlDev.domain, + pBR03->ctrlDev.bus); + if ((pcieConfigSpaceBase) && + ((pcieConfigSpaceBase & ~((RmPhysAddr)(0xFFFFFFFF))) == 0 )) + { + // If pcieConfigSpaceBase is limited under 4GB then the cast is fine. + bar0 = (NvU32)pcieConfigSpaceBase | pBR03->ctrlDev.bus << PCIE_BUS_SHIFT; + } + } + + if (!bar0) // we can not setup any registers + return NV_ERR_GENERIC; + + // check if register alias has been enabled + pData = osMapKernelSpace(bar0, RM_PAGE_SIZE, NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + + if (!pData) + return NV_ERR_GENERIC; + + // if enabled then disable it + if (REF_VAL(NV_BR03_XVU_MCC_REG_ALIAS_ACCESS, pData[NV_BR03_XVU_MCC_REG_ALIAS / sizeof(*pData)])) + { + pData[NV_BR03_XVU_MCC_REG_ALIAS / sizeof(*pData)] = + REF_NUM(NV_BR03_XVU_MCC_REG_ALIAS_ACCESS, NV_BR03_XVU_MCC_REG_ALIAS_ACCESS_DISABLED); + } + + osUnmapKernelSpace((void*) pData, RM_PAGE_SIZE); + + return NV_OK; +} + +// +// Setup the registers for BR03 +// +static NV_STATUS +objClSetupBR03 +( + OBJHWBC *pBR03, + OBJCL *pCl +) +{ + void *handle; + NvU16 vendorID, deviceID; + volatile NvU32 *pData; + NvU32 bar0 = 0, addr, dev, dport, sport, type; + NvU32 bufferSize[4][3]; // credit limit + NvU8 needRes[4] = {1, 0, 0, 0}, total = 0, totalDevices = 3; + NvU32 laneWidth[2] = {0, 0}; + + // check if it is BR03 + if (!pBR03 || pBR03->bcRes != HWBC_NVIDIA_BR03) + return NV_ERR_GENERIC; + + if (!pBR03->ctrlDev.handle) + return NV_ERR_GENERIC; + + bar0 = objClGetBr03Bar0(pCl, pBR03); + + if (!bar0) + { + // In this case, use specific error code + if (bar0 == HWBC_ERROR_BR03_INVALID_BAR0) + return NV_ERR_INVALID_DEVICE; + else + return NV_ERR_GENERIC; + } + + + pData = osMapKernelSpace(bar0, (3 * RM_PAGE_SIZE), NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + + if (!pData) + return NV_ERR_GENERIC; + + if (pData[NV_BR03_XVU_INT_FLOW_CTL / sizeof(*pData)]) + { + NV_PRINTF(LEVEL_WARNING, + "*** BR03 registers has already been programmed!\n"); + osUnmapKernelSpace((void*) pData, 3 * RM_PAGE_SIZE); + objClFreeBr03Bar0(pCl, pBR03); + return NV_OK; + } + + // check which devices are connected + for (dev = 0; dev < 2; dev++) + { + NvU8 bus; + handle = osPciInitHandle(pBR03->domain, pBR03->minBus, (NvU8)dev, 0, &vendorID, &deviceID); + if (!handle) continue; + + // read laneWidth + laneWidth[dev] = CL_MAX_LINK_WIDTH(osPciReadDword(handle, NV_BR03_XVD_LINK_CTRLSTAT) >> 16); + + bus = (NvU8)osPciReadByte(handle, PCI_TYPE_1_SECONDARY_BUS_NUMBER); + vendorID = PCI_INVALID_VENDORID; + handle = osPciInitHandle(pBR03->domain, bus, 0, 0, &vendorID, &deviceID); + // we only allocated resource for NVIDIA device + if (handle && vendorID == PCI_VENDOR_ID_NVIDIA) + { + needRes[dev+1] = 1; + total++; + } else + if (!handle || vendorID == PCI_INVALID_VENDORID) + totalDevices--; + } + + NV_ASSERT(total); + + // work around for INT_FLOW_CTL state updating bug + if (totalDevices == 2) // one device is not connected + { + for (addr = NV_BR03_XVU_INT_FLOW_CTL_DP0_TO_UP0_CPL / sizeof(*pData); + addr <= NV_BR03_XVU_INT_FLOW_CTL_UP0_TO_MH0_PW / sizeof(*pData); + addr ++) + { + if (pData[addr]) // if any one of them get updated? + { + NV_PRINTF(LEVEL_WARNING, + "*** BR03 registers has already been programmed (one device workaround)!\n"); + osUnmapKernelSpace((void*) pData, 3 * RM_PAGE_SIZE); + objClFreeBr03Bar0(pCl, pBR03); + return NV_OK; + } + } + } + + // Try to read all the credit limit first + addr = NV_BR03_XVU_UP0_INT_BUFSIZE_CPL / sizeof(*pData); + for (sport=0; sport<4; sport++) // for up0, dp0, dp1, mh0 separately + { + for (type=0; type<3; type++) // for cpl, np, pw separately + { + bufferSize[sport][type] = pData[addr]; + addr ++; + } + addr ++; + } + + NV_PRINTF(LEVEL_INFO, "*** Setup BR03 registers!\n"); + + // Now set it + for (sport=0; sport<4; sport++) // source up0, dp0, dp1 or mh0 + { + NvU8 alloc = 0; + if (!needRes[sport]) + continue; + + // register address + addr = ((sport + 3) % 4 * 0x30 + NV_BR03_XVU_INT_FLOW_CTL_DP0_TO_UP0_CPL) / sizeof(*pData); + + for (dport=0; dport<4; dport++) // dest up0, dp0, dp1 or mh0 + { + if (sport == dport) + continue; + if (needRes[dport]) + { + for (type=0; type<3; type++) // type cpl, np, pw + { + NvS16 theader, tdata, header, data; + theader = (NvS16)REF_VAL(NV_BR03_XVU_UP0_INT_BUFSIZE_CPL_H, bufferSize[sport][type]) - totalDevices; + tdata = (NvS16)REF_VAL(NV_BR03_XVU_UP0_INT_BUFSIZE_CPL_D, bufferSize[sport][type]) - totalDevices * 8; + + if (type == 1) // np data is always 0 + { + theader += 1; // MH0 doesn't use any NPH credits + tdata = 0; + } + + NV_ASSERT(theader >= 0 && tdata >= 0); + + header = theader / total + (theader % total > alloc); + data = tdata / total + (tdata % total > alloc); + + pData[addr + type] = REF_NUM(NV_BR03_XVU_UP0_INT_BUFSIZE_CPL_H, header) | + REF_NUM(NV_BR03_XVU_UP0_INT_BUFSIZE_CPL_D, data); + } + alloc++; + } + addr += 4; + } + } + + // now update the change + // BR03_REG32(pData, INT_FLOW_CTL) = 1; + pData[NV_BR03_XVU_INT_FLOW_CTL / sizeof(*pData)] = 1; + + // ITX Allocation + // BR03_REG32(pData, ITX_ALLOCATION) = + pData[NV_BR03_XVU_ITX_ALLOCATION / sizeof(*pData)] = + REF_NUM(NV_BR03_XVU_ITX_ALLOCATION_UP0, 6) | + REF_NUM(NV_BR03_XVU_ITX_ALLOCATION_DP0, 4) | + REF_NUM(NV_BR03_XVU_ITX_ALLOCATION_DP1, 4) | + REF_NUM(NV_BR03_XVU_ITX_ALLOCATION_MH0, 2); + + // OPPORTUNISTIC_ACK and OPPORTUNISTIC_UPDATE_FC for UP0, DP0 and DP1 + pData[NV_BR03_XVU_XP_0 / sizeof(*pData)] |= + REF_NUM(NV_BR03_XVU_XP_0_OPPORTUNISTIC_ACK, 1) | + REF_NUM(NV_BR03_XVU_XP_0_OPPORTUNISTIC_UPDATE_FC, 1); + pData[(NV_BR03_XVD_XP_0 + RM_PAGE_SIZE) / sizeof(*pData)] |= + REF_NUM(NV_BR03_XVD_XP_0_OPPORTUNISTIC_ACK, 1) | + REF_NUM(NV_BR03_XVD_XP_0_OPPORTUNISTIC_UPDATE_FC, 1); + pData[(NV_BR03_XVD_XP_0 + 2 * RM_PAGE_SIZE) / sizeof(*pData)] |= + REF_NUM(NV_BR03_XVD_XP_0_OPPORTUNISTIC_ACK, 1) | + REF_NUM(NV_BR03_XVD_XP_0_OPPORTUNISTIC_UPDATE_FC, 1); + + // both 8 lanes, mini-dagwood + // program UPDATE_FC_THRESHOLD to optimize flow control settings + if (laneWidth[0] == 8 && laneWidth[1] == 8) + { + pData[(NV_BR03_XVD_XP_0 + RM_PAGE_SIZE) / sizeof(*pData)] = + FLD_SET_DRF_NUM(_BR03_XVD, _XP_0, _UPDATE_FC_THRESHOLD, 0x19, pData[(NV_BR03_XVD_XP_0 + RM_PAGE_SIZE) / sizeof(*pData)]); + pData[(NV_BR03_XVD_XP_0 + 2 * RM_PAGE_SIZE) / sizeof(*pData)] = + FLD_SET_DRF_NUM(_BR03_XVD, _XP_0, _UPDATE_FC_THRESHOLD, 0x18, pData[(NV_BR03_XVD_XP_0 + 2 * RM_PAGE_SIZE) / sizeof(*pData)]); + } + + osUnmapKernelSpace((void*) pData, 3 * RM_PAGE_SIZE); + objClFreeBr03Bar0(pCl, pBR03); + + return NV_OK; +} + + + +NV_STATUS +clResumeBridge_IMPL +( + OBJCL *pCl +) +{ + NV_STATUS status = NV_OK; + NvBool bFirstGpuResuming = NV_TRUE; + OBJGPU *pGpu = NULL; + NvU32 gpuCount, gpuMask, gpuIndex, gpuResumingCount; + + if (!pCl->pHWBC) + { + return NV_OK; + } + // + // We set the bridge when the 1st GPU is resuming + // + gpuIndex = gpuResumingCount = 0; + gpumgrGetGpuAttachInfo(&gpuCount, &gpuMask); + while (((pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex)) != NULL) && + (bFirstGpuResuming == NV_TRUE)) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_PM_RESUME_CODEPATH)) + { + if (++gpuResumingCount > 1) + { + bFirstGpuResuming = NV_FALSE; + } + } + else if (gpuIsGpuFullPower(pGpu)) + { + bFirstGpuResuming = NV_FALSE; + } + } + + if (bFirstGpuResuming == NV_TRUE) + { + status = objClResumeBridgeHWBC(pCl, pCl->pHWBC); + } + + return status; +} + +static NV_STATUS +objClResumeBridgeHWBC +( + OBJCL *pCl, + OBJHWBC *pHWBC +) +{ + NV_STATUS status = NV_OK; + + if (pHWBC->bcRes == HWBC_PLX_PEX8747) + { + status = Plx_Pex8747_setupFunc(pHWBC, pCl); + } + + if (status != NV_OK) + { + return status; + } + + if (pHWBC->pSibling) + { + status = objClResumeBridgeHWBC(pCl, pHWBC->pSibling); + if (status != NV_OK) + { + return status; + } + } + + if (pHWBC->pFirstChild) + { + status = objClResumeBridgeHWBC(pCl, pHWBC->pFirstChild); + } + + return status; +} + + +NV_STATUS +clChangeUpstreamBusSpeed_IMPL +( + NvU8 primaryBus, + OBJCL *pCl, + NvU32 cmd +) +{ + OBJHWBC *target = pCl->pHWBC; + + while (target) + { + if (target->ctrlDev.bus == primaryBus && !target->ctrlDev.device && !target->ctrlDev.func) + { + // We found it + break; + } + + if (target->minBus <= primaryBus && target->maxBus >= primaryBus) // bus in range + { + target = target->pFirstChild; + } + else + { + target = target->pSibling; + } + } + + if (!target) + { + NV_PRINTF(LEVEL_ERROR, + "Cannot find HWBC with given primaryBus.\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // father should point to the desired HWBC device + switch(target->bcRes) + { + case HWBC_NVIDIA_BR04: + return Nvidia_BR04_ChangeUpstreamBusSpeed(target, pCl, cmd); + case HWBC_PLX_PEX8747: + return Plx_Pex8747_ChangeUpstreamBusSpeed(target, pCl, cmd); + default: + DBG_BREAKPOINT(); // Unsupported device for this functionality + + } + + return NV_ERR_INVALID_ARGUMENT; +} + +NV_STATUS +clGetUpstreamBusSpeed_IMPL +( + NvU8 primaryBus, + OBJCL *pCl, + NvU32 *speed +) +{ + OBJHWBC *target = pCl->pHWBC; + + while (target) + { + if (target->ctrlDev.bus == primaryBus && !target->ctrlDev.device && !target->ctrlDev.func) + { + // We found it + break; + } + + if (target->minBus <= primaryBus && target->maxBus >= primaryBus) // bus in range + { + target = target->pFirstChild; + } + else + { + target = target->pSibling; + } + } + + if (!target) + { + NV_PRINTF(LEVEL_ERROR, + "Cannot find HWBC with given primaryBus.\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // father should point to the desired HWBC device + switch(target->bcRes) + { + case HWBC_NVIDIA_BR04: + return Nvidia_BR04_GetUpstreamBusSpeed(target, pCl, speed); + case HWBC_PLX_PEX8747: + return Plx_Pex8747_GetUpstreamBusSpeed(target, pCl, speed); + default: + DBG_BREAKPOINT(); // Unsupported device for this functionality + + } + + return NV_ERR_GENERIC; +} + +NV_STATUS +clHWBCGetUpstreamBAR0_IMPL +( + NvU8 primaryBus, + OBJCL *pCl, + RmPhysAddr *pBAR0 +) +{ + OBJHWBC *target = pCl->pHWBC; + + while (target) + { + if (target->ctrlDev.bus == primaryBus && !target->ctrlDev.device && !target->ctrlDev.func) + { + // We found it + break; + } + + if (target->minBus <= primaryBus && target->maxBus >= primaryBus) // bus in range + { + target = target->pFirstChild; + } + else + { + target = target->pSibling; + } + } + + if (!target) + { + NV_PRINTF(LEVEL_ERROR, "Cannot find HWBC with given primaryBus.\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // target should point to the desired HWBC device + switch(target->bcRes) + { + case HWBC_NVIDIA_BR04: + *pBAR0 = Nvidia_BR04_GetBar0(pCl, target, -1); + if (0 == *pBAR0) + { + return NV_ERR_GENERIC; + } + + return NV_OK; + + case HWBC_PLX_PEX8747: + *pBAR0 = Plx_Pex8747_GetBar0(pCl, target); + if (0 == *pBAR0) + { + return NV_ERR_GENERIC; + } + + return NV_OK; + + default: + DBG_BREAKPOINT(); // Unsupported device for this functionality + + } + + return NV_ERR_GENERIC; +} + + +// +// Determine whether a set of GPUs are connected under a conjoint BR04 heirarchy +// +// If flat is NV_TRUE test whether the GPUs are under the same BR04. +// +// If devId is not 0 test for a specific device ID. +// +NvBool +clFindBR04_IMPL +( + OBJGPU **pGpus, + NvU32 numGpus, + NvBool flat, + NvU32 devId, + OBJCL *pCl +) +{ + NvU32 i; + + // Sanity check inputs + if (0 == numGpus) + return NV_FALSE; + + for (i = 0; i < numGpus; i++) + { + if (NULL == pGpus[i]) + return NV_FALSE; + } + + // + // Make sure all GPUs' upstream ports are BR04. Check also if root port is + // the same as first GPU's. If all root ports are the same we should have + // a conjunct BR04 heirarchy. + // + // If we're testing for a "flat" heirarchy, where all GPUs are attached + // directly to the same BR04, check board downstream port. + // + for (i = 0; i < numGpus; i++) + { + // Testing if upstream port PCI addresses are valid won't work, because + // those ports may be hidden and not have PCI addresses. They should + // still have VendorID/DevID set. + + if (PCI_VENDOR_ID_NVIDIA != pGpus[i]->gpuClData.upstreamPort.VendorID || + NV_BR04_XVU_DEV_ID_DEVICE_ID_BR04_0 > + pGpus[i]->gpuClData.upstreamPort.DeviceID || + NV_BR04_XVU_DEV_ID_DEVICE_ID_BR04_15 < + pGpus[i]->gpuClData.upstreamPort.DeviceID || + (devId && devId != pGpus[i]->gpuClData.upstreamPort.DeviceID) + ) + { + return NV_FALSE; + } + + if (flat) + { + if (!pGpus[i]->gpuClData.boardDownstreamPort.addr.valid) + return NV_FALSE; + + if (pGpus[i]->gpuClData.boardDownstreamPort.addr.bus != + pGpus[0]->gpuClData.boardDownstreamPort.addr.bus) + return NV_FALSE; + } + else + { + if (!pGpus[i]->gpuClData.rootPort.addr.valid) + return NV_FALSE; + } + } + + return NV_TRUE; +} + +// +// Nvidia_BR04_ShiftAliasingRegisters : Remaps the aliasing registers for +// the targetted BR04 to the first available empty slot +// +static NV_STATUS +Nvidia_BR04_ShiftAliasingRegisters +( + RmPhysAddr addr +) +{ + OBJGPU *pGpu; + NvU32 gpuMask, gpuInstance; + NvU32 data; + volatile NvU32 *pData = NULL; + NvU32 minBus, maxBus, i; + NvBool Shifted = NV_FALSE; + + pData = osMapKernelSpace(addr, + NV_BR04_XVU_CONFIG_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + if (pData == NULL) + { + return NV_ERR_GENERIC; + } + + gpumgrGetGpuAttachInfo(NULL, &gpuMask); + gpuInstance = 0; + + minBus = DRF_VAL(_BR04_XVU, _BUS, _SEC_NUMBER, + pData[NV_BR04_XVU_BUS / sizeof(*pData)]); + maxBus = DRF_VAL(_BR04_XVU, _BUS, _SUB_NUMBER, + pData[NV_BR04_XVU_BUS / sizeof(*pData)]); + + + while (!Shifted && ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)))) + { + // set up the aliasing in a downstream GPU + + if (gpuGetBus(pGpu) >= minBus && + gpuGetBus(pGpu) <= maxBus) + { + // Check in each of the two slots in the BR04 window. + for (i = 0; i < 2; i++) + { + data = GPU_REG_RD32(pGpu, NV_BR04(i) + NV_BR04_XVU_DEV_ID); + if (!IS_DEVID_BR04(DRF_VAL(_BR04_XVU, _DEV_ID, _DEVICE_ID, + data))) + { + // set up the aliasing in the first empty slot + pData[NV_BR04_XVU_MCC_REG_OFFSET / sizeof(*pData)] = + NV_BR04(i); +# define NV_BR04_XVU_MCC_REG_ALIAS_DONOR_BUS_CUSTOM 0x80 + pData[NV_BR04_XVU_MCC_REG_ALIAS / sizeof(*pData)] = + REF_NUM(NV_BR04_XVU_MCC_REG_ALIAS_ACCESS, + NV_BR04_XVU_MCC_REG_ALIAS_ACCESS_ENABLED) | + REF_NUM(NV_BR04_XVU_MCC_REG_ALIAS_ADDR_SELECT, + NV_BR04_XVU_MCC_REG_ALIAS_ADDR_SELECT_MANUAL) | + REF_NUM(NV_BR04_XVU_MCC_REG_ALIAS_DONOR_BUS, + NV_BR04_XVU_MCC_REG_ALIAS_DONOR_BUS_CUSTOM) | + ((NvU32)pGpu->busInfo.gpuPhysAddr + NV_BR04(i)); + + // Per bug 461247, we need to delay at least 100us for the change to propagate. + osDelay(1); + + Shifted = NV_TRUE; + break; + } + } + } + } + + // If we couldn't find an empty slot, we must mask another slot. + gpuInstance = 0; + while (!Shifted && ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)))) + { + // set up the aliasing in a downstream GPU + if (gpuGetBus(pGpu) >= minBus && + gpuGetBus(pGpu) <= maxBus) + { + // Check in each of the two slots in the BR04 window. + for (i = 0; i < 2; i++) + { + NvU32 SecBus, SubBus; + + SecBus = DRF_VAL(_BR04_XVU, _BUS, _SEC_NUMBER, + GPU_REG_RD32(pGpu, NV_BR04(i) + NV_BR04_XVU_BUS)); + SubBus = DRF_VAL(_BR04_XVU, _BUS, _SUB_NUMBER, + GPU_REG_RD32(pGpu, NV_BR04(i) + NV_BR04_XVU_BUS)); + + + if (SecBus != minBus || SubBus != maxBus) + { + // set up the aliasing in the first different slot + pData[NV_BR04_XVU_MCC_REG_OFFSET / sizeof(*pData)] = + NV_BR04(i); + pData[NV_BR04_XVU_MCC_REG_ALIAS / sizeof(*pData)] = + REF_NUM(NV_BR04_XVU_MCC_REG_ALIAS_ACCESS, + NV_BR04_XVU_MCC_REG_ALIAS_ACCESS_ENABLED) | + REF_NUM(NV_BR04_XVU_MCC_REG_ALIAS_ADDR_SELECT, + NV_BR04_XVU_MCC_REG_ALIAS_ADDR_SELECT_MANUAL) | + REF_NUM(NV_BR04_XVU_MCC_REG_ALIAS_DONOR_BUS, + NV_BR04_XVU_MCC_REG_ALIAS_DONOR_BUS_CUSTOM) | + ((NvU32)pGpu->busInfo.gpuPhysAddr + NV_BR04(i)); + + // Per bug 461247, we need to delay at least 100us for the change to propagate. + osDelay(1); + + Shifted = NV_TRUE; + break; + } + } + } + } + + + osUnmapKernelSpace((void*)pData, NV_BR04_XVU_CONFIG_SIZE); + + return NV_OK; +} + +// +// Nvidia_BR04_GetBar0 : Returns physical address where BR04 config space can +// be accessed. Returns NULL on error. +// +// We can access BR04 registers in the following ways, from most- to +// least-preferred: +// 1. Through the region in GPU register space reserved for bridge register +// access +// 2. BR04 BAR0 +// 3. PCIE enhanced config space +// +// There is no need for a "free" function as with BR03 because the register +// aliasing is done differently; the BR04 claims the space in the GPU hierarchy +// by default at startup. +// +// BR04 registers are accessed one PCI device at a time. portNum should be < 0 +// for the upstream port, or 0 through 3 for each of the downstream ports, +// respectively. +// +static RmPhysAddr +Nvidia_BR04_GetBar0 +( + OBJCL *pCl, + OBJHWBC *pBR04, + NvS32 portNum +) +{ + RmPhysAddr bar0 = 0; + volatile NvU32* pData = NULL; + NvU32 i; + OBJGPU *pGpu; + NvU32 gpuMask, gpuInstance; + NvU32 data; + NV_STATUS rmStatus; + + // Sanity checks + if (!pBR04 || + pBR04->bcRes != HWBC_NVIDIA_BR04 || + !pBR04->ctrlDev.valid || + portNum > 3) + { + return 0; + } + + // + // Access through window in GPU BAR0 space + // + if (pBR04->gpuPhysAddr) + { + // Try the two slots in the GPU's window + for (i = 0; i < 2; i++) + { + bar0 = pBR04->gpuPhysAddr + NV_BR04(i); + + pData = osMapKernelSpace(bar0, + NV_BR04_XVU_CONFIG_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + if (!pData) + { + return 0; + } + + if ( (NvU8)REF_VAL(NV_BR04_XVU_BUS_PRI_NUMBER, + pData[NV_BR04_XVU_BUS / sizeof(*pData)]) == + pBR04->ctrlDev.bus ) + { + osUnmapKernelSpace((void*)pData, NV_BR04_XVU_CONFIG_SIZE); + + // Downstream ports are within the window at an offset + if (portNum >= 0) + { + bar0 += NV_BR04_XVD_OFFSET(portNum); + } + return bar0; + } + + osUnmapKernelSpace((void*)pData, NV_BR04_XVU_CONFIG_SIZE); + } + + // Neither slot is correct. + bar0 = 0; + } + + gpumgrGetGpuAttachInfo(NULL, &gpuMask); + gpuInstance = 0; + + // first pass, see if this BR04 is already visible + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance))) + { + // + // Make sure we only try to look for slots in GPUs + // downstream of ourselves. Checking this saves us a + // few unnecessary BR04 register writes. + // + if (!(gpuGetBus(pGpu) >= pBR04->minBus && + gpuGetBus(pGpu) <= pBR04->maxBus)) + { + continue; + } + + // Check in each of the two slots in the BR04 window. + for (i = 0; i < 2; i++) + { + data = GPU_REG_RD32(pGpu, NV_BR04(i) + NV_BR04_XVU_DEV_ID); + if (IS_DEVID_BR04(DRF_VAL(_BR04_XVU, _DEV_ID, _DEVICE_ID, data))) + { + data = GPU_REG_RD32(pGpu, NV_BR04(i) + NV_BR04_XVU_BUS); + if (DRF_VAL(_BR04_XVU, _BUS, _PRI_NUMBER, data) == + pBR04->ctrlDev.bus) + { + // Set BR04 address selection to manual to keep it from + // hopping to another GPU's BAR0 window. + data = GPU_REG_RD32(pGpu, NV_BR04(i) + NV_BR04_XVU_MCC_REG_ALIAS); + data = FLD_SET_DRF(_BR04, _XVU_MCC_REG_ALIAS, _ADDR_SELECT, _MANUAL, data); + data = FLD_SET_DRF(_BR04, _XVU_MCC_REG_ALIAS, _DONOR_BUS, _CUSTOM, data); + GPU_REG_WR32(pGpu, NV_BR04(i) + NV_BR04_XVU_MCC_REG_ALIAS, data); + + pBR04->gpuPhysAddr = pGpu->busInfo.gpuPhysAddr; + return (pBR04->gpuPhysAddr + NV_BR04(i)); + } + } + } + } + + // second pass, start rearranging aliasing registers + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance))) + { + // + // Make sure we only try to look for slots in GPUs + // downstream of ourselves. Checking this saves us a + // few unnecessary BR04 register writes. + // + if (!(gpuGetBus(pGpu) >= pBR04->minBus && + gpuGetBus(pGpu) <= pBR04->maxBus)) + { + continue; + } + + // Check in each of the two slots in the BR04 window. + for (i = 0; i < 2; i++) + { + data = GPU_REG_RD32(pGpu, NV_BR04(i) + NV_BR04_XVU_DEV_ID); + if (IS_DEVID_BR04(DRF_VAL(_BR04_XVU, _DEV_ID, _DEVICE_ID, data))) + { + data = GPU_REG_RD32(pGpu, NV_BR04(i) + NV_BR04_XVU_BUS); + if (DRF_VAL(_BR04_XVU, _BUS, _PRI_NUMBER, data) == + pBR04->ctrlDev.bus) + { + // Set BR04 address selection to manual to keep it from + // hopping to another GPU's BAR0 window. + data = GPU_REG_RD32(pGpu, NV_BR04(i) + NV_BR04_XVU_MCC_REG_ALIAS); + data = FLD_SET_DRF(_BR04, _XVU_MCC_REG_ALIAS, _ADDR_SELECT, _MANUAL, data); + data = FLD_SET_DRF(_BR04, _XVU_MCC_REG_ALIAS, _DONOR_BUS, _CUSTOM, data); + GPU_REG_WR32(pGpu, NV_BR04(i) + NV_BR04_XVU_MCC_REG_ALIAS, data); + + pBR04->gpuPhysAddr = pGpu->busInfo.gpuPhysAddr; + return (pBR04->gpuPhysAddr + NV_BR04(i)); + } + else + { + rmStatus = Nvidia_BR04_ShiftAliasingRegisters( + pGpu->busInfo.gpuPhysAddr + NV_BR04(i)); + // XXX handle Nvidia_BR04_ShiftAliasingRegisters() failures + NV_ASSERT(rmStatus == NV_OK); + i--; // look again + } + } + } + } + + // + // Access through BR04 BAR0 space + // + // Usually downstream ports are located one bus number higher than upstream + // port, with device number equal to downstream port number. + // + bar0 = osPciReadDword(pBR04->ctrlDev.handle, NV_BR04_XVU_BAR_0) & + ~RM_PAGE_MASK; + + if (bar0) + { + if (portNum >= 0) + { + bar0 += NV_BR04_XVD_OFFSET(portNum); + } + + // BR04 A01 WAR: find this BR04 a home + rmStatus = Nvidia_BR04_ShiftAliasingRegisters(bar0); + NV_ASSERT(rmStatus == NV_OK); + + return bar0; + } + + // Else access through PCIE space + if (pCl->getProperty(pCl, PDB_PROP_CL_PCIE_CONFIG_ACCESSIBLE) && + pCl->pPcieConfigSpaceBase) + { + RmPhysAddr pcieConfigSpaceBase; + + pcieConfigSpaceBase = clFindPcieConfigSpaceBase(pCl, + pBR04->ctrlDev.domain, + pBR04->ctrlDev.bus); + if ((pcieConfigSpaceBase) && + ((pcieConfigSpaceBase & ~((RmPhysAddr)(0xFFFFFFFF))) == 0 )) + { + if (portNum >= 0) + { + bar0 = pcieConfigSpaceBase | + ((pBR04->ctrlDev.bus + 1) << PCIE_BUS_SHIFT) | + (portNum << PCIE_DEVICE_SHIFT); + } + else + { + bar0 = pcieConfigSpaceBase | pBR04->ctrlDev.bus << PCIE_BUS_SHIFT; + } + } + + if (bar0 != 0) + { + // BR04 A01 WAR: find this BR04 a home + rmStatus = Nvidia_BR04_ShiftAliasingRegisters(bar0); + // XXX handle Nvidia_BR04_ShiftAliasingRegisters() failures + NV_ASSERT(rmStatus == NV_OK); + } + } + + return bar0; +} + + + +static NV_STATUS +Nvidia_BR04_ChangeUpstreamBusSpeed +( + OBJHWBC *pBR04, + OBJCL *pCl, + NvU32 cmd +) +{ + volatile NvU32 *pData = NULL; // Register access for upstream port + volatile NvU32 *pDpData[4] = { NULL, NULL, NULL, NULL}; // Register access for each downstream port + RmPhysAddr bar0; + NvU32 regValue; + NvU32 regValue2; + NvS32 i; + NvBool enableCorrErrors = NV_FALSE; + NV_STATUS RetVal = NV_OK; + + // Check if HWBC resource is BR04 + if (!pBR04 || pBR04->bcRes != HWBC_NVIDIA_BR04) + return NV_ERR_INVALID_ARGUMENT; + + if (!pBR04->ctrlDev.handle) + return NV_ERR_INVALID_ARGUMENT; + + // Set up config access + if (0 == (bar0 = Nvidia_BR04_GetBar0(pCl, pBR04, -1))) + { + return NV_ERR_GENERIC; + } + + pData = osMapKernelSpace(bar0, + NV_BR04_XVU_CONFIG_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + if (!pData) + { + return NV_ERR_GENERIC; + } + + for (i = 0; i < 4; i++) + { + pDpData[i] = NULL; + if (!(bar0 = Nvidia_BR04_GetBar0(pCl, pBR04, i)) || + !(pDpData[i] = osMapKernelSpace(bar0, + NV_BR04_XVD_CONFIG_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE))) + { + // If we've failed here we have to roll back any successful + // mappings, then return error. + for (i--; i >= 0; i--) + { + if (pDpData[i] != NULL) + { + osUnmapKernelSpace((void*)(pDpData[i]), + NV_BR04_XVD_CONFIG_SIZE); + } + } + osUnmapKernelSpace((void*)pData, NV_BR04_XVU_CONFIG_SIZE); + + return NV_ERR_GENERIC; + } + } + + switch (cmd) + { + case HWBC_UPSTREAM_BUS_SPEED_GEN1PCIE: + // Get the current pcie speed info + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT / sizeof(*pData)]; + regValue = REF_VAL(NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED, regValue); + + // if already at target speed then return success + if (regValue == NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED_2P5G) + { + NV_PRINTF(LEVEL_INFO, + "Already in Gen1 speed. No need to transition.\n"); + } + else + { + // + // Set the UP to Gen1. + // + + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT2 / sizeof(*pData)]; + regValue = FLD_SET_DRF(_BR04, _XVU_LINK_CTRLSTAT2, _TARGET_LINK_SPEED, _2P5G, regValue); + pData[NV_BR04_XVU_LINK_CTRLSTAT2 / sizeof(*pData)] = regValue; + + // These values needed for bug 361633. +# ifndef NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_ENABLED +# define NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_DISABLED 0 +# define NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_ENABLED 1 +# endif + + // Disable CEs during training per bug 361633 + regValue = pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)]; + if (REF_VAL(NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN, regValue) + != NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_DISABLED) + { + regValue = FLD_SET_DRF(_BR04, _XVU_DEV_CTRLSTAT, _CORR_ERR_RPT_EN, _DISABLED, regValue); + pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)] = regValue; + enableCorrErrors = NV_TRUE; + } + + // + // Trigger speed change on UP + // + + // This #define should be removed when this is available in the hardware ref headers. + // See NVBug 332731 for more details. +# ifndef NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_SPEED_CHANGE_ONE +# define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_SPEED_CHANGE_ONE 0x00000001 +# endif + + regValue = pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)]; + regValue = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_LCTRL_2, _SPEED_CHANGE, _ONE, regValue); + regValue = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_LCTRL_2, _TARGET_LINK_SPEED, _2P5, regValue); + pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)] = regValue; + + // Per bug 340793, we need to delay at least 5us for the training to complete. + osDelay(1); + + // Get the current pcie speed info + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT / sizeof(*pData)]; + regValue = REF_VAL(NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED, regValue); + + // if already at target speed then quit + if (regValue == NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED_2P5G) + { + NV_PRINTF(LEVEL_INFO, "Verified we are at Gen1 speed.\n"); + } + else + { + NV_PRINTF(LEVEL_ERROR, "Failed to train to Gen1 speed.\n"); + RetVal = NV_ERR_GENERIC; + } + + if (enableCorrErrors == NV_TRUE) + { + regValue = pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)]; + regValue = FLD_SET_DRF(_BR04, _XVU_DEV_CTRLSTAT, _CORR_ERR_RPT_EN, _ENABLED, regValue); + pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)] = regValue; + } + } + break; + + case HWBC_UPSTREAM_BUS_SPEED_GEN2PCIE: + // First check to make sure the other end of the link supports Gen2. + regValue = pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)]; + if (REF_VAL(NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_DATA_RATE_SUPPORTED_REMOTE, regValue) != NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_DATA_RATE_SUPPORTED_REMOTE_5P0_2P5) + { + NV_PRINTF(LEVEL_INFO, + "*** Gen2 not supported by other side of link.\n"); + RetVal = NV_ERR_GENERIC; + } + else + { + // Get the current pcie speed info + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT / sizeof(*pData)]; + regValue = REF_VAL(NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED, regValue); + + // if already at target speed then return success + if (regValue == NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED_5P0G) + { + NV_PRINTF(LEVEL_INFO, + "Already in Gen2 speed. No need to transition.\n"); + } + else + { + // + // Allow Gen2 on upstream and downstream ports, if supported. + // + + NV_PRINTF(LEVEL_INFO, + "*** Enabling BR04 Gen2 features.\n"); + + // Enable Gen2 support globally on BR04 links. + regValue = pData[NV_BR04_XVU_BOOT_1 / sizeof(*pData)]; + regValue2 = FLD_SET_DRF(_BR04, _XVU_BOOT_1, _LINK_SPEED, _5000, regValue); + if (regValue2 != regValue) + { + pData[NV_BR04_XVU_BOOT_1 / sizeof(*pData)] = regValue2; + } + + // Set supported data rates to include Gen2 on upstream link. + regValue = pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)]; + regValue2 = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_LCTRL_2, _DATA_RATE_SUPPORTED, _5P0_2P5, regValue); + if (regValue2 != regValue) + { + pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)] = regValue2; + } + + // Set supported data rates to include Gen2 on downstream links. + for (i = 0; i < 4; ++i) + { + regValue = pDpData[i][NV_BR04_XVD_LINK_CTRLSTAT2 / sizeof(*pDpData[i])]; + regValue2 = FLD_SET_DRF(_BR04, _XVD_LINK_CTRLSTAT2, _TARGET_LINK_SPEED, _5P0G, regValue); + if (regValue2 != regValue) + { + pDpData[i][NV_BR04_XVD_LINK_CTRLSTAT2 / sizeof(*pDpData[i])] = regValue2; + } + } + + // + // Set the UP to Gen2. + // + + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT2 / sizeof(*pData)]; + regValue = FLD_SET_DRF(_BR04, _XVU_LINK_CTRLSTAT2, _TARGET_LINK_SPEED, _5P0G, regValue); + pData[NV_BR04_XVU_LINK_CTRLSTAT2 / sizeof(*pData)] = regValue; + + // These values needed for bug 361633. +# ifndef NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_ENABLED +# define NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_DISABLED 0 +# define NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_ENABLED 1 +# endif + + // Disable CEs during training per bug 361633 + regValue = pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)]; + if (REF_VAL(NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN, regValue) + != NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_DISABLED) + { + regValue = FLD_SET_DRF(_BR04, _XVU_DEV_CTRLSTAT, _CORR_ERR_RPT_EN, _DISABLED, regValue); + pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)] = regValue; + enableCorrErrors = NV_TRUE; + } + + // + // Trigger speed change on UP + // + + // This #define should be removed when this is available in the hardware ref headers. + // See NVBug 332731 for more details. + # ifndef NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_SPEED_CHANGE_ONE + # define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_SPEED_CHANGE_ONE 0x00000001 + # endif + + regValue = pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)]; + regValue = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_LCTRL_2, _SPEED_CHANGE, _ONE, regValue); + regValue = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_LCTRL_2, _TARGET_LINK_SPEED, _5P0, regValue); + regValue = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_LCTRL_2, _DATA_RATE_SUPPORTED, _5P0_2P5, regValue); + pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)] = regValue; + + // Per bug 340793, we need to delay at least 5us for the training to complete. + osDelay(1); + + // Get the current pcie speed info + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT / sizeof(*pData)]; + regValue = REF_VAL(NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED, regValue); + + // if already at target speed then quit + if (regValue == NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED_5P0G) + { + NV_PRINTF(LEVEL_INFO, + "Verified we are at Gen2 speed.\n"); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Failed to train to Gen2 speed.\n"); + RetVal = NV_ERR_GENERIC; + } + + if (enableCorrErrors == NV_TRUE) + { + regValue = pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)]; + regValue = FLD_SET_DRF(_BR04, _XVU_DEV_CTRLSTAT, _CORR_ERR_RPT_EN, _ENABLED, regValue); + pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)] = regValue; + } + } + } + break; + } + + for (i = 3; i >= 0; --i) + { + osUnmapKernelSpace((void*)(pDpData[i]), NV_BR04_XVD_CONFIG_SIZE); + } + osUnmapKernelSpace((void*) pData, NV_BR04_XVU_CONFIG_SIZE); + return RetVal; +} + +static NV_STATUS +Nvidia_BR04_GetUpstreamBusSpeed +( + OBJHWBC *pBR04, + OBJCL *pCl, + NvU32 *speed +) +{ + volatile NvU32 *pData = NULL; // Register access for upstream port + RmPhysAddr bar0; + NvU32 regValue; + + // Check if HWBC resource is BR04 + if (!pBR04 || pBR04->bcRes != HWBC_NVIDIA_BR04) + return NV_ERR_INVALID_ARGUMENT; + + if (!pBR04->ctrlDev.handle) + return NV_ERR_INVALID_ARGUMENT; + + // Set up config access + if (0 == (bar0 = Nvidia_BR04_GetBar0(pCl, pBR04, -1))) + { + return NV_ERR_GENERIC; + } + + pData = osMapKernelSpace(bar0, + NV_BR04_XVU_CONFIG_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + if (!pData) + { + return NV_ERR_GENERIC; + } + + // Get the current pcie speed info + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT / sizeof(*pData)]; + regValue = REF_VAL(NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED, regValue); + + osUnmapKernelSpace((void*) pData, NV_BR04_XVU_CONFIG_SIZE); + + if (regValue == NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED_2P5G) + { + *speed = HWBC_UPSTREAM_BUS_SPEED_GEN1PCIE; + } + else if (regValue == NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED_5P0G) + { + *speed = HWBC_UPSTREAM_BUS_SPEED_GEN2PCIE; + } + else + { + NV_PRINTF(LEVEL_ERROR, " BR04 Upstream Port at unknown bus speed.\n"); + return NV_ERR_GENERIC; + } + + return NV_OK; +} + + +NV_STATUS +Nvidia_BR04_disableDownstreamASPM +( + NvU8 bus +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + OBJHWBC *target = pCl->pHWBC; + OBJHWBC *parent = NULL; + NvU32 regValue; + NvU32 regValue2; + volatile NvU32 *pData = NULL; // Register access for BR04 upstream port + RmPhysAddr bar0; + + while (target) + { + if (target->ctrlDev.bus == bus && target->ctrlDev.device == 0 && target->ctrlDev.func == 0) + { + // We found ourselves and our parent + break; + } + + if (target->minBus <= bus && target->maxBus >= bus) // bus in range + { + parent = target; + target = target->pFirstChild; + } + else + { + target = target->pSibling; + } + } + + if (parent == NULL || parent->bcRes != HWBC_NVIDIA_BR04) + { + // We're plugged into the root port or a non-BR04 bridge. Do nothing. + return NV_OK; + } + + // + // There is a BR04 above us. Disable its downstream ASPM. + // + + if (0 == (bar0 = Nvidia_BR04_GetBar0(pCl, parent, -1))) + { + return NV_ERR_GENERIC; + } + + pData = osMapKernelSpace(bar0, + NV_BR04_XVU_CONFIG_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + if (pData == NULL) + { + return NV_ERR_GENERIC; + } + + regValue = pData[NV_BR04_XVU_REV_CC / sizeof(*pData)]; + if (DRF_VAL(_BR04_XVU, _REV_CC, _MAJOR_REVISION_ID, regValue) == 0xa && + DRF_VAL(_BR04_XVU, _REV_CC, _MINOR_REVISION_ID, regValue) >= 3) + { + // + // Turn off ASPM on BR04 A03. + // + + regValue = pData[NV_BR04_XVU_CYA_NIBBLE0 / sizeof(*pData)]; + regValue2 = FLD_SET_DRF(_BR04, _XVU_CYA_NIBBLE0, _RSVD_4, _DPX_ASPM_DISABLE, regValue); + if (regValue2 != regValue) + { + pData[NV_BR04_XVU_CYA_NIBBLE0 / sizeof(*pData)] = regValue2; + } + } + + osUnmapKernelSpace((void*) pData, NV_BR04_XVU_CONFIG_SIZE); + return NV_OK; +} + +// +// Set up registers for BR04 +// +static NV_STATUS +Nvidia_BR04_setupFunc +( + OBJHWBC *pBR04, + OBJCL *pCl +) +{ + volatile NvU32 *pData = NULL; // Register access for upstream port + volatile NvU32 *pDpData[4] = { NULL, NULL, NULL, NULL}; // Register access for each downstream port + RmPhysAddr bar0; + BR04_PORT tport, sport; // Terminating port, source port + NvU32 type; // Transaction type (cpl, non-posted, posted) + NvU32 addr; // Index (# of dwords) into BAR0 + NvS32 i; + NV_STATUS status = NV_OK; + NvU32 numActiveDPs = 0; + NvU8 CreditSet = 0; + NvU32 regValue; + NvU32 regValue2; + NvBool enableCorrErrors = NV_FALSE; + void *pHandle; + NvU16 vendorID, deviceID; + + // Empirically determined values for port credit allocation + // See HW bug 325819. + static NvU16 PortCreditsHeader[2][3][NUM_BR04_PORTS][NUM_BR04_PORTS] = + { + { // 1-DP or 2-DP configuration + { // type = CPL + { 0, 0, 3, 0, 27, 0}, // tport = DP0 + { 0, 0, 0, 0, 0, 0}, // tport = DP1 + { 3, 0, 0, 0, 27, 0}, // tport = DP2 + { 0, 0, 0, 0, 0, 0}, // tport = DP3 + {31, 0, 30, 0, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + }, + { // type = NP + { 0, 0, 15, 0, 15, 0}, // tport = DP0 + { 0, 0, 0, 0, 0, 0}, // tport = DP1 + {15, 0, 0, 0, 15, 0}, // tport = DP2 + { 0, 0, 0, 0, 0, 0}, // tport = DP3 + {31, 0, 31, 0, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + }, + { // type = PW + { 0, 0, 14, 0, 15, 0}, // tport = DP0 + { 0, 0, 0, 0, 0, 0}, // tport = DP1 + {14, 0, 0, 0, 15, 0}, // tport = DP2 + { 0, 0, 0, 0, 0, 0}, // tport = DP3 + {31, 0, 30, 0, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + } + }, + { // 3-DP or 4-DP configuration + { // type = CPL + { 0, 3, 3, 3, 19, 0}, // tport = DP0 + { 3, 0, 3, 3, 19, 0}, // tport = DP1 + { 3, 3, 0, 3, 19, 0}, // tport = DP2 + { 3, 3, 3, 0, 19, 0}, // tport = DP3 + {15, 15, 15, 14, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + }, + { // type = NP + { 0, 7, 7, 7, 7, 0}, // tport = DP0 + { 7, 0, 7, 7, 7, 0}, // tport = DP1 + { 7, 7, 0, 7, 7, 0}, // tport = DP2 + { 7, 7, 7, 0, 7, 0}, // tport = DP3 + {15, 15, 15, 15, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + }, + { // type = PW + { 0, 7, 7, 6, 7, 0}, // tport = DP0 + { 7, 0, 7, 6, 7, 0}, // tport = DP1 + { 7, 6, 0, 7, 7, 0}, // tport = DP2 + { 7, 7, 6, 0, 7, 0}, // tport = DP3 + {15, 15, 15, 14, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + } + } + }; + + static NvU16 PortCreditsData[2][3][NUM_BR04_PORTS][NUM_BR04_PORTS] = + { + { // 1-DP or 2-DP configuration + { // type = CPL + { 0, 0, 24, 0, 216, 0}, // tport = DP0 + { 0, 0, 0, 0, 0, 0}, // tport = DP1 + { 24, 0, 0, 0, 216, 0}, // tport = DP2 + { 0, 0, 0, 0, 0, 0}, // tport = DP3 + {120, 0, 112, 0, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + }, + { // type = NP + { 0, 0, 0, 0, 0, 0}, // tport = DP0 + { 0, 0, 0, 0, 0, 0}, // tport = DP1 + { 0, 0, 0, 0, 0, 0}, // tport = DP2 + { 0, 0, 0, 0, 0, 0}, // tport = DP3 + { 0, 0, 0, 0, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + }, + { // type = PW + { 0, 0, 112, 0, 120, 0}, // tport = DP0 + { 0, 0, 0, 0, 0, 0}, // tport = DP1 + {112, 0, 0, 0, 120, 0}, // tport = DP2 + { 0, 0, 0, 0, 0, 0}, // tport = DP3 + {120, 0, 112, 0, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + } + }, + { // 3-DP or 4-DP configuration + { // type = CPL + { 0, 24, 24, 24, 152, 0}, // tport = DP0 + { 24, 0, 24, 24, 152, 0}, // tport = DP1 + { 24, 24, 0, 24, 152, 0}, // tport = DP2 + { 24, 24, 24, 0, 152, 0}, // tport = DP3 + { 56, 56, 56, 48, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + }, + { // type = NP + { 0, 0, 0, 0, 0, 0}, // tport = DP0 + { 0, 0, 0, 0, 0, 0}, // tport = DP1 + { 0, 0, 0, 0, 0, 0}, // tport = DP2 + { 0, 0, 0, 0, 0, 0}, // tport = DP3 + { 0, 0, 0, 0, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + }, + { // type = PW + { 0, 56, 56, 48, 56, 0}, // tport = DP0 + { 56, 0, 56, 48, 56, 0}, // tport = DP1 + { 56, 48, 0, 56, 56, 0}, // tport = DP2 + { 56, 56, 48, 0, 56, 0}, // tport = DP3 + { 56, 56, 56, 48, 0, 0}, // tport = UP0 + { 0, 0, 0, 0, 0, 0} // tport = MH0 + } + } + }; + + // Check if HWBC resource is BR04 + if (pBR04 == NULL || pBR04->bcRes != HWBC_NVIDIA_BR04) + { + return NV_ERR_GENERIC; + } + + if (pBR04->ctrlDev.handle == NULL) + { + return NV_ERR_GENERIC; + } + + // Set up config access + if (0 == (bar0 = Nvidia_BR04_GetBar0(pCl, pBR04, -1))) + { + return NV_ERR_GENERIC; + } + + pData = osMapKernelSpace(bar0, + NV_BR04_XVU_CONFIG_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + if (pData == NULL) + { + return NV_ERR_GENERIC; + } + + for (i = 0; i < 4; i++) + { + pDpData[i] = NULL; + if (0 == (bar0 = Nvidia_BR04_GetBar0(pCl, pBR04, i)) || + NULL == (pDpData[i] = osMapKernelSpace(bar0, + NV_BR04_XVD_CONFIG_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE))) + { + // If we've failed here we have to roll back any successful + // mappings, then return error. + for (i--; i >= 0; i--) + { + if (pDpData[i] != NULL) + { + osUnmapKernelSpace((void*)(pDpData[i]), + NV_BR04_XVD_CONFIG_SIZE); + } + } + osUnmapKernelSpace((void*)pData, NV_BR04_XVU_CONFIG_SIZE); + + return NV_ERR_GENERIC; + } + } + + // + // Set clock trims for BR04 A01. See bug 344709 and bug 333577 for more details. + // + + regValue = pData[NV_BR04_XVU_REV_CC / sizeof(*pData)]; + if (DRF_VAL(_BR04_XVU, _REV_CC, _MAJOR_REVISION_ID, regValue) == 0xa) + { + switch(DRF_VAL(_BR04_XVU, _REV_CC, _MINOR_REVISION_ID, regValue)) + { + // A01 + case 1: + NV_PRINTF(LEVEL_INFO, "*** Set clock trims for BR04 A01.\n"); + pData[NV_BR04_XVU_CTRIM_DP_1 / sizeof(*pData)] = 0x10D4A0E8; + pData[NV_BR04_XVU_CTRIM_DP_2 / sizeof(*pData)] = 0x12639CC8; + pData[NV_BR04_XVU_CTRIM_DP_3 / sizeof(*pData)] = 0x107418E7; + pData[NV_BR04_XVU_CTRIM_DP_4 / sizeof(*pData)] = 0x10C424C5; + pData[NV_BR04_XVU_CTRIM_DP_5 / sizeof(*pData)] = 0x10953128; + pData[NV_BR04_XVU_CTRIM_DP_6 / sizeof(*pData)] = 0x00000129; + break; + + // A03 + case 3: + // + // Turn off ASPM on BR04 Upstream Port for BR04 A03. + // + + regValue = pData[NV_BR04_XVU_CYA_NIBBLE0 / sizeof(*pData)]; + regValue2 = FLD_SET_DRF(_BR04, _XVU_CYA_NIBBLE0, _RSVD_0, _UP0_ASPM_DISABLE, regValue); + if (regValue2 != regValue) + { + pData[NV_BR04_XVU_CYA_NIBBLE0 / sizeof(*pData)] = regValue2; + } + break; + + default: + break; + } + } + + // + // Allow Gen2 on upstream and downstream ports, if supported. + // + + NV_PRINTF(LEVEL_INFO, "*** Enabling BR04 Gen2 features.\n"); + + // Enable Gen2 support globally on BR04 links. + regValue = pData[NV_BR04_XVU_BOOT_1 / sizeof(*pData)]; + regValue2 = FLD_SET_DRF(_BR04, _XVU_BOOT_1, _LINK_SPEED, _5000, regValue); + if (regValue2 != regValue) + { + pData[NV_BR04_XVU_BOOT_1 / sizeof(*pData)] = regValue2; + } + + // Set supported data rates to include Gen2 on downstream links. + for (i = 0; i < 4; ++i) + { + regValue = pDpData[i][NV_BR04_XVD_LINK_CTRLSTAT2 / sizeof(*pDpData[i])]; + regValue2 = FLD_SET_DRF(_BR04, _XVD_LINK_CTRLSTAT2, _TARGET_LINK_SPEED, _5P0G, regValue); + if (regValue2 != regValue) + { + pDpData[i][NV_BR04_XVD_LINK_CTRLSTAT2 / sizeof(*pDpData[i])] = regValue2; + + regValue = pDpData[i][NV_BR04_XVD_G2_PRIV_XP_LCTRL_2 / sizeof(*pDpData[i])]; + regValue = FLD_SET_DRF(_BR04, _XVD_G2_PRIV_XP_LCTRL_2, _ADVERTISED_RATE_CHANGE, _ONE, regValue); + pDpData[i][NV_BR04_XVD_G2_PRIV_XP_LCTRL_2 / sizeof(*pDpData[i])] = regValue; + } + } + + + // Have at least a 1us delay after an advertised rate change to avoid a chipset replay on the next transaction. + // Bug 778455. + osDelay(1); + + regValue = pData[NV_BR04_XVU_ROM_REVISION / sizeof(*pData)]; + if (REF_VAL(NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS, regValue) == + NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P737 || + REF_VAL(NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS, regValue) == + NV_BR04_XVU_ROM_REVISION_ID_UPPER_16BITS_P535) + { + // + // Set deemphasis to -2.2Db on BR04 Upstream Port on P737 and P535 + // + regValue = pData[NV_XPU_PEX_PAD_CTL_3 / sizeof(*pData)]; + regValue2 = regValue; + regValue2 = FLD_SET_DRF(_XPU, _PEX_PAD_CTL_3, _TX_PEAK_R2_1C, _22DB, regValue2); + if (regValue2 != regValue) + { + pData[NV_XPU_PEX_PAD_CTL_3 / sizeof(*pData)] = regValue2; + } + } + else + { + // + // Set deemphasis to -3.5Db on all BR04 Upstream Ports + // + regValue = pData[NV_XPU_PEX_PAD_CTL_3 / sizeof(*pData)]; + regValue2 = regValue; + regValue2 = FLD_SET_DRF(_XPU, _PEX_PAD_CTL_3, _TX_PEAK_R2_1C, _36DB, regValue2); + if (regValue2 != regValue) + { + pData[NV_XPU_PEX_PAD_CTL_3 / sizeof(*pData)] = regValue2; + } + } + + // + // Set NV_PES_XVU_CYA_BIT0 [31:28] for the DPs where an NV device is + // connected. This ensures these ports are at -3.5dB and the rest remain + // untouched. + // + + regValue2 = pData[NV_BR04_XVU_CYA_BIT0 / sizeof(*pData)]; + for (i = 0; i < 4; ++i) + { + regValue = pDpData[i][NV_BR04_XVD_BUS / sizeof(*pDpData[i])]; + if (regValue == 0xFFFFFFFF) + continue; + + pHandle = osPciInitHandle(pBR04->domain, (NvU8) REF_VAL(NV_BR04_XVD_BUS_SEC_NUMBER, regValue), 0, 0, &vendorID, &deviceID); + if (pHandle != NULL && vendorID == PCI_VENDOR_ID_NVIDIA) + { + switch(i) + { + case 0: + regValue2 = FLD_SET_DRF(_BR04, _XVU_CYA_BIT0, _RSVD_28, _DP0_DE_EMP_NEG_3P5_DB, + regValue2); + break; + case 1: + regValue2 = FLD_SET_DRF(_BR04, _XVU_CYA_BIT0, _RSVD_29, _DP1_DE_EMP_NEG_3P5_DB, + regValue2); + break; + case 2: + regValue2 = FLD_SET_DRF(_BR04, _XVU_CYA_BIT0, _RSVD_30, _DP2_DE_EMP_NEG_3P5_DB, + regValue2); + break; + case 3: + regValue2 = FLD_SET_DRF(_BR04, _XVU_CYA_BIT0, _RSVD_31, _DP3_DE_EMP_NEG_3P5_DB, + regValue2); + break; + } + } + } + pData[NV_BR04_XVU_CYA_BIT0 / sizeof(*pData)] = regValue2; + + + // + // Set NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_CYA_DEEMPHASIS_OVERRIDE_ENABLED + // + regValue = pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)]; + regValue2 = regValue; + regValue2 = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_LCTRL_2, _CYA_DEEMPHASIS_OVERRIDE, _ENABLED, + regValue2); + if (regValue2 != regValue) + { + pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)] = regValue2; + } + + // + // Train upstream port to Gen2, if supported. + // + + NV_PRINTF(LEVEL_INFO, "*** Setup BR04 upstream link speed.\n"); + + // First check to make sure the other end of the link supports Gen2. If not, skip all this. + regValue = pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)]; + if (REF_VAL(NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_DATA_RATE_SUPPORTED_REMOTE, regValue) == NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_DATA_RATE_SUPPORTED_REMOTE_5P0_2P5) + { + // Get the current pcie speed info + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT / sizeof(*pData)]; + regValue = REF_VAL(NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED, regValue); + + // if already at target speed then quit + if (regValue != NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED_5P0G) + { + // + // Set the UP to Gen2 and allow all downstream GPUs to negotiate their own + // upstream Gen2 speed. + // + + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT2 / sizeof(*pData)]; + regValue = FLD_SET_DRF(_BR04, _XVU_LINK_CTRLSTAT2, _TARGET_LINK_SPEED, _5P0G, regValue); + pData[NV_BR04_XVU_LINK_CTRLSTAT2 / sizeof(*pData)] = regValue; + + // These values needed for bug 361633. +# ifndef NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_ENABLED +# define NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_DISABLED 0 +# define NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_ENABLED 1 +# endif + + // Disable CEs during training per bug 361633 + regValue = pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)]; + if (REF_VAL(NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN, regValue) + != NV_BR04_XVU_DEV_CTRLSTAT_CORR_ERR_RPT_EN_DISABLED) + { + regValue = FLD_SET_DRF(_BR04, _XVU_DEV_CTRLSTAT, _CORR_ERR_RPT_EN, _DISABLED, regValue); + pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)] = regValue; + enableCorrErrors = NV_TRUE; + } + + // + // Trigger speed change on UP + // + + // This #define should be removed when this is available in the hardware ref headers. + // See NVBug 332731 for more details. +# ifndef NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_SPEED_CHANGE_ONE +# define NV_BR04_XVU_G2_PRIV_XP_LCTRL_2_SPEED_CHANGE_ONE 0x00000001 +# endif + + regValue = pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)]; + regValue = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_LCTRL_2, _DATA_RATE_SUPPORTED, _5P0_2P5, regValue); + regValue = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_LCTRL_2, _TARGET_LINK_SPEED, _5P0, regValue); + pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)] = regValue; + + regValue = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_LCTRL_2, _SPEED_CHANGE, _ONE, regValue); + pData[NV_BR04_XVU_G2_PRIV_XP_LCTRL_2 / sizeof(*pData)] = regValue; + + // Per bug 340793, we need to delay at least 5us for the training to complete. + osDelay(1); + + // Get the vendor ID and device ID + regValue = pData[NV_BR04_XVU_DEV_ID / sizeof(*pData)]; + if (REF_VAL(NV_BR04_XVU_DEV_ID_VENDOR_ID, regValue) != NV_BR04_XVU_DEV_ID_VENDOR_ID_NVIDIA || + (REF_VAL(NV_BR04_XVU_DEV_ID_DEVICE_ID, regValue) | 0xF) != NV_BR04_XVU_DEV_ID_DEVICE_ID_DEFAULT) + { + // Ouch. The BR04 appears to have fallen off the bus. + NV_PRINTF(LEVEL_ERROR, + "*** BR04 has fallen off the bus after we tried to train it to Gen2!\n"); + DBG_BREAKPOINT(); + + status = NV_ERR_GENERIC; + goto Nvidia_BR04_setupFunc_exit; + } + + // Get the current pcie speed info + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT / sizeof(*pData)]; + regValue = REF_VAL(NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED, regValue); + + // if already at target speed then quit + if (regValue == NV_BR04_XVU_LINK_CTRLSTAT_LINK_SPEED_5P0G) + { + NV_PRINTF(LEVEL_INFO, "Verified we are at Gen2 speed.\n"); + } + else + { + NV_PRINTF(LEVEL_ERROR, "Failed to train to Gen2 speed.\n"); + } + + if (enableCorrErrors == NV_TRUE) + { + regValue = pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)]; + regValue = FLD_SET_DRF(_BR04, _XVU_DEV_CTRLSTAT, _CORR_ERR_RPT_EN, _ENABLED, regValue); + pData[NV_BR04_XVU_DEV_CTRLSTAT / sizeof(*pData)] = regValue; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "Already in Gen2 speed. No need to transition.\n"); + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "*** Gen2 not supported by other side of link.\n"); + } + + + // + // Turn off L0s and L1 on all BR04 XVU (Upstream Port) + // + + regValue = pData[NV_BR04_XVU_LINK_CTRLSTAT / sizeof(*pData)]; + regValue2 = FLD_SET_DRF(_BR04, _XVU_LINK_CTRLSTAT, _ASPM_CTRL, _DISABLED, regValue); + if (regValue2 != regValue) + { + pData[NV_BR04_XVU_LINK_CTRLSTAT / sizeof(*pData)] = regValue2; + } + + // + // Set charge pump to optimal value on all BR04 XVU (Upstream Port) + // + + regValue = pData[NV_XPU_PEX_PLL_CTL2 / sizeof(*pData)]; + regValue2 = FLD_SET_DRF(_XPU, _PEX_PLL_CTL2, _PLL_CP_CNTL, _30UA, regValue); + if (regValue2 != regValue) + { + pData[NV_XPU_PEX_PLL_CTL2 / sizeof(*pData)] = regValue2; + } + + // + // Set charge pump to optimal value on all BR04 XVDs (Downstream Ports) + // + + regValue = pData[NV_XPD_PEX_PLL_CTL2 / sizeof(*pData)]; + regValue2 = FLD_SET_DRF(_XPD, _PEX_PLL_CTL2, _PLL_CP_CNTL, _22P5UA, regValue); + if (regValue2 != regValue) + { + pData[NV_XPD_PEX_PLL_CTL2 / sizeof(*pData)] = regValue2; + } + + // For the upstream port: Set NV_BR04_XVU_G2_PRIV_XP_0_REPLAY_TIMER_LIMIT + // (Bits 28:19) of register NV_BR04_XVU_G2_PRIV_XP_0 (0xC00) to the value + // 0x3E9 when the drivers are loading due to the OS booting up or resuming + // from sleep/hibernation. (WAR for bugs 319189 and 526518) + +# define NV_BR04_XVU_G2_PRIV_XP_0_REPLAY_TIMER_LIMIT_BUG_319189_WAR 0x3E9 + + regValue = pData[NV_BR04_XVU_G2_PRIV_XP_0 / sizeof(*pData)]; + if (REF_VAL(NV_BR04_XVU_G2_PRIV_XP_0_REPLAY_TIMER_LIMIT, regValue) + != NV_BR04_XVU_G2_PRIV_XP_0_REPLAY_TIMER_LIMIT_BUG_319189_WAR) + { + regValue = FLD_SET_DRF(_BR04, _XVU_G2_PRIV_XP_0, _REPLAY_TIMER_LIMIT, _BUG_319189_WAR, regValue); + pData[NV_BR04_XVU_G2_PRIV_XP_0 / sizeof(*pData)] = regValue; + + // For each downstream port: Set NV_BR04_XVD_G2_VEND_XP1_REPLAY_TIMER_LIMIT + // (Bits 9:0) of register NV_BR04_XVD_G2_VEND_XP1 (0x404) and + // NV_BR04_XVD_G2_PRIV_XP_CONFIG_GEN2_REPLAY_TIMER_LIMIT (Bits 11:2) of + // register NV_BR04_XVD_G2_PRIV_XP_CONFIG (0x494) to 0x3E9. (WAR for bugs 319189 and 526518) + +# define NV_BR04_XVD_G2_VEND_XP1_REPLAY_TIMER_LIMIT_BUG_319189_WAR 0x3E9 +# define NV_BR04_XVD_G2_PRIV_XP_CONFIG_GEN2_REPLAY_TIMER_LIMIT_BUG_319189_WAR 0x3E9 + + for (i = 0; i < 4; ++i) + { + regValue = pDpData[i][NV_BR04_XVD_G2_VEND_XP1 / sizeof(*pDpData[i])]; + regValue = FLD_SET_DRF(_BR04, _XVD_G2_VEND_XP1, _REPLAY_TIMER_LIMIT, _BUG_319189_WAR, regValue); + pDpData[i][NV_BR04_XVD_G2_VEND_XP1 / sizeof(*pDpData[i])] = regValue; + + regValue = pDpData[i][NV_BR04_XVD_G2_PRIV_XP_CONFIG / sizeof(*pDpData[i])]; + regValue = FLD_SET_DRF(_BR04, _XVD_G2_PRIV_XP_CONFIG_GEN2, _REPLAY_TIMER_LIMIT, _BUG_319189_WAR, regValue); + pDpData[i][NV_BR04_XVD_G2_PRIV_XP_CONFIG / sizeof(*pDpData[i])] = regValue; + } + } + + // + // Enable ASPM on BR04 A03 Downstream Ports. + // + + regValue = pData[NV_BR04_XVU_REV_CC / sizeof(*pData)]; + if (DRF_VAL(_BR04_XVU, _REV_CC, _MAJOR_REVISION_ID, regValue) == 0xa && + DRF_VAL(_BR04_XVU, _REV_CC, _MINOR_REVISION_ID, regValue) == 0x3) + { + for (i = 0; i < 4; ++i) + { + regValue = pDpData[i][NV_BR04_XVD_LINK_CTRLSTAT / sizeof(*pDpData[i])]; + regValue2 = FLD_SET_DRF(_BR04, _XVD_LINK_CTRLSTAT, _ASPM_CTRL, _L0S_L1, regValue); + if (regValue2 != regValue) + { + pDpData[i][NV_BR04_XVD_LINK_CTRLSTAT / sizeof(*pDpData[i])] = regValue2; + } + } + } + + // Check if registers already programmed + if (pData[NV_BR04_XVU_INT_FLOW_CTL / sizeof(*pData)]) + { + if (!(pData[NV_BR04_XVU_INT_FLOW_CTL / sizeof(*pData)] & 0xff)) + { + // + // WAR for bug 779279. Credits lost on downstream ports during SBIOS POST on some platforms. + // + + for (i = 0; i < 4; ++i) + { + pData[NV_BR04_XVU_INT_FLOW_CTL_UP0_TOO_CPL(i) / sizeof(*pData)] = 0; + pData[NV_BR04_XVU_INT_FLOW_CTL_UP0_TOO_NP(i) / sizeof(*pData)] = 0; + pData[NV_BR04_XVU_INT_FLOW_CTL_UP0_TOO_PW(i) / sizeof(*pData)] = 0; + } + + pData[NV_BR04_XVU_INT_FLOW_CTL / sizeof(*pData)] = 1; + + if (!(pData[NV_BR04_XVU_INT_FLOW_CTL / sizeof(*pData)] & 0xff)) + { + // The WAR did not work! + NV_PRINTF(LEVEL_ERROR, + "*** BR04 WAR for bug 779279 is not working!\n"); + DBG_BREAKPOINT(); + } + } + else + { + NV_PRINTF(LEVEL_WARNING, + "*** BR04 registers have already been programmed.\n"); + } + status = NV_OK; + goto Nvidia_BR04_setupFunc_exit; + } + + // Check if hidden GPU mode is enabled; if so, set phantom range for P2P + // transfers + if (REF_VAL(NV_BR04_XVU_HGPU_CTRL_EN, + pData[NV_BR04_XVU_HGPU_CTRL / sizeof(*pData)]) == + NV_BR04_XVU_HGPU_CTRL_EN_ENABLED) + { + pData[NV_BR04_XVU_HGPU_PEER_FB_UPPER_BASE / sizeof(*pData)] = + NvU64_HI32(HGPU_P2P_PHANTOM_BASE); + pData[NV_BR04_XVU_HGPU_PEER_FB_LOWER_BASE / sizeof(*pData)] = + NvU64_LO32(HGPU_P2P_PHANTOM_BASE & 0xffffffff); + } + + NV_PRINTF(LEVEL_INFO, "*** Setup BR04 registers.\n"); + + // determine how many active DPs we have. See HW bug 325819 for + // original algorithm description and bug 346133 for new logic. + for (i = 0; i < 4; i++) + { + if (REF_VAL(NV_BR04_XVD_LINK_CTRLSTAT_DLL_LINK_SM, + pDpData[i][NV_BR04_XVD_LINK_CTRLSTAT / sizeof(*pDpData[i])]) == + NV_BR04_XVD_LINK_CTRLSTAT_DLL_LINK_SM_ACTIVE) + { + // we detected a presence in the force + ++numActiveDPs; + } + } + + // assert that we have a legal 1-, 2-, 3-, or 4-DP configuration + // and choose the right array entry + if (numActiveDPs >= 3) + CreditSet = 1; + else + CreditSet = 0; + + addr = NV_BR04_XVU_INT_FLOW_CTL_DP0_TOO_CPL(0) / sizeof(*pData); + for (tport = 0; tport < NUM_BR04_PORTS; tport ++) + { + for (sport = 0; sport < NUM_BR04_PORTS; sport ++) + { + for (type = 0; type < 3; type++) + { + pData[addr] = + REF_NUM(NV_BR04_XVU_INT_FLOW_CTL_DP0_TOO_CPL_D, + PortCreditsData[CreditSet][type][tport][sport]) | + REF_NUM(NV_BR04_XVU_INT_FLOW_CTL_DP0_TOO_CPL_H, + PortCreditsHeader[CreditSet][type][tport][sport]); + addr++; + } + addr++; + } + } + + + + // Tell BR04 to use values just written + pData[NV_BR04_XVU_INT_FLOW_CTL / sizeof(*pData)] = 1; + + // ITX Allocation (values based on BR03 values) + pData[NV_BR04_XVU_ITX_ALLOCATION / sizeof(*pData)] = + REF_NUM(NV_BR04_XVU_ITX_ALLOCATION_UP0, 6) | + REF_NUM(NV_BR04_XVU_ITX_ALLOCATION_DP0, 6) | + REF_NUM(NV_BR04_XVU_ITX_ALLOCATION_DP1, 6) | + REF_NUM(NV_BR04_XVU_ITX_ALLOCATION_DP2, 6) | + REF_NUM(NV_BR04_XVU_ITX_ALLOCATION_DP3, 6) | + REF_NUM(NV_BR04_XVU_ITX_ALLOCATION_MH0, 6); + + // OPPORTUNISTIC_ACK and OPPORTUNISTIC_UPDATE_FC for UP0, DP* + pData[NV_BR04_XVU_G2_PRIV_XP_0 / sizeof(*pData)] |= + REF_NUM(NV_BR04_XVU_G2_PRIV_XP_0_OPPORTUNISTIC_ACK, 1) | + REF_NUM(NV_BR04_XVU_G2_PRIV_XP_0_OPPORTUNISTIC_UPDATE_FC, 1); + for (i = 0; i < 4; i++) + { + pDpData[i][NV_BR04_XVD_G2_VEND_XP / sizeof(*pData)] |= + REF_NUM(NV_BR04_XVD_G2_VEND_XP_OPPORTUNISTIC_ACK, 1) | + REF_NUM(NV_BR04_XVD_G2_VEND_XP_OPPORTUNISTIC_UPDATEFC, 1); + } + + status = NV_OK; + +Nvidia_BR04_setupFunc_exit: + osUnmapKernelSpace((void*) pData, NV_BR04_XVU_CONFIG_SIZE); + for (i = 0; i < 4; i++) + { + osUnmapKernelSpace((void*)(pDpData[i]), NV_BR04_XVD_CONFIG_SIZE); + } + + return status; +} + + + +// +// Determines which downstream port of a BR04, if any, the specified GPU is +// behind. +// +static NV_STATUS +Nvidia_BR04_FindDpInfo +( + OBJCL *pCl, + OBJHWBC *pHWBC, + OBJGPU *pGpu +) +{ + RmPhysAddr bar0; + volatile NvU32 *pData = NULL; // Register access for upstream port + volatile NvU32 *pDpData[4] = { NULL, NULL, NULL, NULL}; // Register access for each downstream port + NvU32 gpuBusNum, secNum, subNum; + NvS32 i; // Must be signed to count for a backwards loop + NV_STATUS status = NV_ERR_GENERIC; + + // Check that the HWBC object is correctly initialized and is BR04 + if (!pHWBC || pHWBC->bcRes != HWBC_NVIDIA_BR04 || !pHWBC->ctrlDev.valid) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Setup BR04 config access + if (0 == (bar0 = Nvidia_BR04_GetBar0(pCl, pHWBC, -1))) + { + return NV_ERR_GENERIC; + } + + pData = osMapKernelSpace(bar0, + NV_BR04_XVU_CONFIG_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE); + if (pData == NULL) + { + return NV_ERR_GENERIC; + } + + for (i = 0; i < 4; i++) + { + pDpData[i] = NULL; + if (!(bar0 = Nvidia_BR04_GetBar0(pCl, pHWBC, i)) || + !(pDpData[i] = osMapKernelSpace(bar0, + NV_BR04_XVD_CONFIG_SIZE, + NV_MEMORY_UNCACHED, + NV_PROTECT_READ_WRITE))) + { + // If we've failed here we have to roll back any successful + // mappings, then return error. + for (i--; i >= 0; i--) + { + if (pDpData[i] != NULL) + { + osUnmapKernelSpace((void*)(pDpData[i]), + NV_BR04_XVD_CONFIG_SIZE); + } + } + osUnmapKernelSpace((void*)pData, NV_BR04_XVU_CONFIG_SIZE); + + return NV_ERR_GENERIC; + } + } + + gpuBusNum = gpuGetBus(pGpu); + + // Check if GPU is behind each of the four downstream ports, based on sec, + // sub bus numbers. + for (i = 0; i < 4; i++) + { + secNum = DRF_VAL(_BR04_XVD, _BUS, _SEC_NUMBER, + pDpData[i][NV_BR04_XVD_BUS/sizeof(*pData)]); + subNum = DRF_VAL(_BR04_XVD, _BUS, _SUB_NUMBER, + pDpData[i][NV_BR04_XVD_BUS/sizeof(*pData)]); + + if (gpuBusNum >= secNum && gpuBusNum <= subNum) + { + // This is the correct downstream port + pHWBC->dpForGpuInstance[pGpu->gpuInstance] = (NvS8)i; + status = NV_OK; + goto Nvidia_BR04_FindDpInfo_Exit; + } + } + + // If we fall through to here the GPU is not in range of any BR04 downstream + // ports. Return error. + status = NV_ERR_GENERIC; + +Nvidia_BR04_FindDpInfo_Exit: + osUnmapKernelSpace((void*)pData, NV_BR04_XVU_CONFIG_SIZE); + for (i = 0; i < 4; i++) + { + osUnmapKernelSpace((void*)(pDpData[i]), NV_BR04_XVD_CONFIG_SIZE); + } + + return status; +} + + +// +// Plx_Pex8747_GetBar0 : Returns physical address of chip's BAR0 address. +// Returns NULL on error. +// +static RmPhysAddr +Plx_Pex8747_GetBar0 +( + OBJCL *pCl, + OBJHWBC *pPlx +) +{ + RmPhysAddr bar0 = 0; + + // Sanity checks + if ((!pPlx) || (pPlx->bcRes != HWBC_PLX_PEX8747) || (!pPlx->ctrlDev.valid)) + { + NV_PRINTF(LEVEL_ERROR, "Not a PLX PEX8747!\n"); + return 0; + } + + // + // Usually downstream ports are located one bus number higher than upstream + // port, with device number equal to downstream port number. + // + bar0 = osPciReadDword(pPlx->ctrlDev.handle, 0x10); + + // This is just for sanity's sake. If there is no bar0 it should read back as 0 anyhow. + if (bar0 == 0xffffffff) + { + bar0 = 0; + } + + if (!bar0) + { + NV_PRINTF(LEVEL_ERROR, "Device has no BAR0!\n"); + } + + return bar0; +} + +static NV_STATUS +Plx_Pex8747_ChangeUpstreamBusSpeed +( + OBJHWBC *pPlx, + OBJCL *pCl, + NvU32 cmd +) +{ + volatile NvU32 *pData = NULL; // Register access for upstream port + RmPhysAddr bar0; + NvU32 regValue; + + // Sanity checks + if ((!pPlx) || (pPlx->bcRes != HWBC_PLX_PEX8747) || (!pPlx->ctrlDev.valid)) + { + NV_PRINTF(LEVEL_ERROR, "Not a PLX PEX8747!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + bar0 = Plx_Pex8747_GetBar0(pCl, pPlx); + if (0 == bar0) + { + return NV_ERR_GENERIC; + } + + pData = osMapKernelSpace(bar0, RM_PAGE_SIZE, NV_MEMORY_UNCACHED, NV_PROTECT_READ_WRITE); + if (!pData) + { + return NV_ERR_GENERIC; + } + + // + // Get the current PCIe link speed + // + + regValue = pData[0x78 / sizeof(*pData)]; + regValue = (regValue >> 16) & 0xF; + + // + // If already at target speed then return success + // + + if (regValue == cmd) + { + osUnmapKernelSpace((void*) pData, RM_PAGE_SIZE); + NV_PRINTF(LEVEL_INFO, + "Already at Gen%u speed. No need to transition.\n", + cmd); + return NV_OK; + } + + // + // Set the target link speed. + // + + regValue = pData[0x98 / sizeof(*pData)]; + regValue = (regValue & 0xFFFFFFF0) | cmd; + pData[0x98 / sizeof(*pData)] = regValue; + + // + // Enable Retrain Link bit on Upstream port + // + + regValue = pData[0xF70 / sizeof(*pData)]; + regValue = regValue | 0x20000000; + pData[0xF70 / sizeof(*pData)] = regValue; + + // + // Trigger speed change by writing the Retrain Link bit. + // + + regValue = pData[0x78 / sizeof(*pData)]; + regValue = regValue | 0x20; + pData[0x78 / sizeof(*pData)] = regValue; + + // + // Wait long enough to ensure the speed change has at least begun. + // + + osDelay(1); + + // + // Disable Retrain Link bit on Upstream port + // + + regValue = pData[0xF70 / sizeof(*pData)]; + regValue = regValue & 0xDFFFFFFF; + pData[0xF70 / sizeof(*pData)] = regValue; + + // + // Get the current PCIe link speed + // + + regValue = pData[0x78 / sizeof(*pData)]; + regValue = (regValue >> 16) & 0xF; + if (regValue != cmd) + { + osUnmapKernelSpace((void*) pData, RM_PAGE_SIZE); + NV_PRINTF(LEVEL_ERROR, "Failed to train to Gen%u speed.\n", cmd); + return NV_ERR_GENERIC; + } + + osUnmapKernelSpace((void*) pData, RM_PAGE_SIZE); + return NV_OK; +} + +static NV_STATUS +Plx_Pex8747_GetUpstreamBusSpeed +( + OBJHWBC *pPlx, + OBJCL *pCl, + NvU32 *speed +) +{ + volatile NvU32 *pData = NULL; // Register access for upstream port + RmPhysAddr bar0; + NvU32 regValue; + + // Sanity checks + if ((!pPlx) || (pPlx->bcRes != HWBC_PLX_PEX8747) || (!pPlx->ctrlDev.valid)) + { + NV_PRINTF(LEVEL_ERROR, "Not a PLX PEX8747!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + bar0 = Plx_Pex8747_GetBar0(pCl, pPlx); + if (0 == bar0) + { + return NV_ERR_GENERIC; + } + + pData = osMapKernelSpace(bar0, RM_PAGE_SIZE, NV_MEMORY_UNCACHED, NV_PROTECT_READ_WRITE); + if (!pData) + { + return NV_ERR_GENERIC; + } + + // Get the current pcie speed info + regValue = pData[0x78 / sizeof(*pData)]; + *speed = (regValue >> 16) & 0xF; + + osUnmapKernelSpace((void*) pData, RM_PAGE_SIZE); + return NV_OK; +} + +// +// Set up registers for PLX PEX8747 +// +static NV_STATUS +Plx_Pex8747_setupFunc +( + OBJHWBC *pPlx, + OBJCL *pCl +) +{ + volatile NvU32 *pData = NULL; // Register access for upstream port + RmPhysAddr bar0; + NvU32 regValue; + + // Sanity checks + if ((!pPlx) || (pPlx->bcRes != HWBC_PLX_PEX8747) || (!pPlx->ctrlDev.valid)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + bar0 = Plx_Pex8747_GetBar0(pCl, pPlx); + if (0 == bar0) + { + return NV_ERR_GENERIC; + } + + pData = osMapKernelSpace(bar0, 0x12000, NV_MEMORY_UNCACHED, NV_PROTECT_READ_WRITE); + if (!pData) + { + return NV_ERR_GENERIC; + } + + // + // Get the current PCIe link speed + // + + regValue = pData[0x78 / sizeof(*pData)]; + regValue = (regValue >> 16) & 0xF; + if (regValue == 1) + { + // + // Mask electrical idle on all lanes + // + + regValue = pData[0x204 / sizeof(*pData)]; + regValue = regValue | 0xFFFF; + pData[0x204 / sizeof(*pData)] = regValue; + + // + // Set inferred mode + // + + regValue = pData[0x220 / sizeof(*pData)]; + regValue = regValue | 0x10000; + pData[0x220 / sizeof(*pData)] = regValue; + + // + // Switch to Gen3 + // + + Plx_Pex8747_ChangeUpstreamBusSpeed(pPlx, pCl, HWBC_UPSTREAM_BUS_SPEED_GEN3PCIE); + } + + // + // Enable L1 in Link Control register of Port 8, if it is supported. + // + regValue = pData[0x8074 / sizeof(*pData)]; + if (CL_IS_L1_SUPPORTED(regValue)) + { + regValue = pData[0x8078 / sizeof(*pData)]; + regValue = regValue | 0x2; + pData[0x8078 / sizeof(*pData)] = regValue; + } + + // + // Enable L1 in Link Control register of Port 9, if it is supported. + // + regValue = pData[0x9074 / sizeof(*pData)]; + if (CL_IS_L1_SUPPORTED(regValue)) + { + regValue = pData[0x9078 / sizeof(*pData)]; + regValue = regValue | 0x2; + pData[0x9078 / sizeof(*pData)] = regValue; + } + + // + // Enable L1 in Link Control register of Port 16, if it is supported. + // + regValue = pData[0x10074 / sizeof(*pData)]; + if (CL_IS_L1_SUPPORTED(regValue)) + { + regValue = pData[0x10078 / sizeof(*pData)]; + regValue = regValue | 0x2; + pData[0x10078 / sizeof(*pData)] = regValue; + } + + // + // Enable L1 in Link Control register of Port 17, if it is supported. + // + regValue = pData[0x11074 / sizeof(*pData)]; + if (CL_IS_L1_SUPPORTED(regValue)) + { + regValue = pData[0x11078 / sizeof(*pData)]; + regValue = regValue | 0x2; + pData[0x11078 / sizeof(*pData)] = regValue; + } + + osUnmapKernelSpace((void*) pData, 0x12000); + return NV_OK; +} + +NV_STATUS +subdeviceCtrlCmdBusHWBCGetUpstreamBAR0_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_BUS_HWBC_GET_UPSTREAM_BAR0_PARAMS *pBusInfoParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + + return clHWBCGetUpstreamBAR0(pBusInfoParams->primaryBus, pCl, &pBusInfoParams->physBAR0); +} + +NV_STATUS +subdeviceCtrlCmdBusSetHwbcUpstreamPcieSpeed_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_BUS_SET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS *pBusInfoParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + NV_STATUS status = NV_OK; + + switch (pBusInfoParams->busSpeed) + { + case NV2080_CTRL_BUS_SET_PCIE_SPEED_2500MBPS: + { + status = clChangeUpstreamBusSpeed(pBusInfoParams->primaryBus, + pCl, HWBC_UPSTREAM_BUS_SPEED_GEN1PCIE); + break; + } + case NV2080_CTRL_BUS_SET_PCIE_SPEED_5000MBPS: + { + status = clChangeUpstreamBusSpeed(pBusInfoParams->primaryBus, + pCl, HWBC_UPSTREAM_BUS_SPEED_GEN2PCIE); + break; + } + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + return status; +} + +NV_STATUS +subdeviceCtrlCmdBusGetHwbcUpstreamPcieSpeed_IMPL +( + Subdevice *pSubdevice, + NV2080_CTRL_BUS_GET_HWBC_UPSTREAM_PCIE_SPEED_PARAMS *pBusInfoParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + + return clGetUpstreamBusSpeed(pBusInfoParams->primaryBus, pCl, + &pBusInfoParams->busSpeed); +} + diff --git a/src/nvidia/src/kernel/platform/p2p/p2p_caps.c b/src/nvidia/src/kernel/platform/p2p/p2p_caps.c new file mode 100644 index 000000000..144eee6d8 --- /dev/null +++ b/src/nvidia/src/kernel/platform/p2p/p2p_caps.c @@ -0,0 +1,653 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/system.h" +#include "gpu_mgr/gpu_mgr.h" +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "kernel/gpu/bif/kernel_bif.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/gpu.h" +#include "platform/chipset/chipset.h" +#include "platform/p2p/p2p_caps.h" +#include "nvRmReg.h" +#include "nvlimits.h" + +/** + * @brief Determines if the GPUs are P2P compatible + * + * @param[in] pGpu0 + * @param[in] pGpu1 + * + * @return NV_TRUE if the GPUs are P2P compatible + */ +static NvBool +areGpusP2PCompatible(OBJGPU *pGpu0, OBJGPU *pGpu1) +{ + // Mark GPUs of different arch or impl incapable of P2P over pcie + if ((gpuGetChipArch(pGpu0) != gpuGetChipArch(pGpu1)) || + (gpuGetChipImpl(pGpu0) != gpuGetChipImpl(pGpu1))) + { + return NV_FALSE; + } + + // Mark GPUs of different notebook implementation incapable of P2P over pcie + if (IsMobile(pGpu0) != IsMobile(pGpu1)) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NV_STATUS +p2pGetCaps +( + NvU32 gpuMask, + NvBool *pP2PWriteCapable, + NvBool *pP2PReadCapable, + P2P_CONNECTIVITY *pConnectivity +) +{ + NvU8 p2PWriteCapsStatus; + NvU8 p2PReadCapsStatus; + NV_STATUS status; + P2P_CONNECTIVITY connectivity; + + if ((pP2PWriteCapable == NULL) || (pP2PReadCapable == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + status = p2pGetCapsStatus(gpuMask, &p2PWriteCapsStatus, + &p2PReadCapsStatus, &connectivity + ); + if (status != NV_OK) + { + return status; + } + + // + // The classes like NV50_P2P, NV50_THIRD_PARTY_P2P depends on direct P2P + // connectivity, hence the check. + // + if (!((connectivity == P2P_CONNECTIVITY_PCIE) || + (connectivity == P2P_CONNECTIVITY_PCIE_BAR1) || + (connectivity == P2P_CONNECTIVITY_NVLINK) || + (connectivity == P2P_CONNECTIVITY_C2C))) + { + return NV_ERR_NOT_SUPPORTED; + } + + *pP2PWriteCapable = (p2PWriteCapsStatus == NV0000_P2P_CAPS_STATUS_OK); + *pP2PReadCapable = (p2PReadCapsStatus == NV0000_P2P_CAPS_STATUS_OK); + + if (pConnectivity != NULL) + { + *pConnectivity = connectivity; + } + + return status; +} + +static NV_STATUS +_kp2pCapsGetStatusIndirectOverNvLink +( + NvU32 gpuMask, + NvU8 *pP2PWriteCapStatus, + NvU8 *pP2PReadCapStatus +) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuInstance = 0; + OBJGPU *pFirstGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + NvBool bIndirectPeers = NV_FALSE; + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pFirstGpu); + + if ((pKernelBif->forceP2PType != NV_REG_STR_RM_FORCE_P2P_TYPE_DEFAULT) && + (pKernelBif->forceP2PType != NV_REG_STR_RM_FORCE_P2P_TYPE_NVLINK)) + { + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + return NV_OK; + } + + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + bIndirectPeers = gpumgrCheckIndirectPeer(pFirstGpu, pGpu); + if (!bIndirectPeers) + { + break; + } + } + + if (bIndirectPeers) + { + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_OK; + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_OK; + } + else + { + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + } + + return NV_OK; +} + +static NV_STATUS +_gpumgrGetP2PCapsStatusOverNvLink +( + NvU32 gpuMask, + NvU8 *pP2PWriteCapStatus, + NvU8 *pP2PReadCapStatus +) +{ + OBJGPU *pGpu = NULL; + NvU32 gpuInstance = 0; + NV_STATUS status; + OBJGPU *pFirstGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + RMTIMEOUT timeout; + NvU32 linkTrainingTimeout = 10000000; + KernelBif *pKernelBif = NULL; + KernelNvlink *pKernelNvlink = NULL; + + NV_ASSERT_OR_RETURN(pFirstGpu != NULL, NV_ERR_INVALID_ARGUMENT); + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pFirstGpu); + pKernelBif = GPU_GET_KERNEL_BIF(pFirstGpu); + + if ((pKernelBif->forceP2PType != NV_REG_STR_RM_FORCE_P2P_TYPE_DEFAULT) && + (pKernelBif->forceP2PType != NV_REG_STR_RM_FORCE_P2P_TYPE_NVLINK)) + { + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + return NV_OK; + } + + // + // Re-initialize to check loop back configuration if only single GPU in + // requested mask. + // + gpuInstance = (gpumgrGetSubDeviceCount(gpuMask) > 1) ? gpuInstance : 0; + + // Check NvLink P2P connectivity + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + // + // If ALI is enabled then poll to make sure that the links have + // finished training on the two given gpus. If timeout occurs then + // log an error, but continue on as there could be another gpu pair + // that could have connectivity + // + + if ((pKernelNvlink != NULL) && + knvlinkDiscoverPostRxDetLinks_HAL(pFirstGpu, pKernelNvlink, pGpu) == NV_OK) + { + // Check to make sure that the links are active + + gpuSetTimeout(pGpu, linkTrainingTimeout, &timeout, IS_SILICON(pGpu) ? + (GPU_TIMEOUT_FLAGS_BYPASS_THREAD_STATE | GPU_TIMEOUT_FLAGS_DEFAULT) : 0); + do + { + status = gpuCheckTimeout(pGpu, &timeout); + + if (knvlinkCheckTrainingIsComplete(pFirstGpu, pGpu, pKernelNvlink) == NV_OK) + { + break; + } + + if (status == NV_ERR_TIMEOUT) + { + NV_PRINTF(LEVEL_ERROR, + "Links failed to train for the given gpu pairs!\n"); + return status; + } + } + while(status != NV_ERR_TIMEOUT); + } + + // Ensure that we can create a NvLink P2P object between the two object + if ((pKernelNvlink != NULL) && + knvlinkIsNvlinkP2pSupported(pFirstGpu, pKernelNvlink, pGpu)) + { + // Ensure training completes on legacy nvlink devices + status = knvlinkTrainP2pLinksToActive(pFirstGpu, pGpu, pKernelNvlink); + NV_ASSERT(status == NV_OK); + + if (status != NV_OK) + { + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + return NV_OK; + } + } + else + { + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + return NV_OK; + } + } + + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_OK; + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_OK; + return NV_OK; +} + +// Returns true if overrides are enabled for PCI-E. +static NvBool +_kp2pCapsCheckStatusOverridesForPcie +( + NvU32 gpuMask, + NvU8 *pP2PWriteCapStatus, + NvU8 *pP2PReadCapStatus +) +{ + KernelBif *pKernelBif = NULL; + NvU32 gpuInstance = 0; + OBJGPU *pGpu = NULL; + + // Check overrides for all GPUs in the mask. + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + if (pKernelBif->p2pOverride != BIF_P2P_NOT_OVERRIDEN) + { + switch(DRF_VAL(_REG_STR, _CL_FORCE_P2P, _READ, pKernelBif->p2pOverride)) + { + case NV_REG_STR_CL_FORCE_P2P_READ_DISABLE: + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY; + break; + case NV_REG_STR_CL_FORCE_P2P_READ_ENABLE: + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_OK; + break; + default: + break; + } + + switch(DRF_VAL(_REG_STR, _CL_FORCE_P2P, _WRITE, pKernelBif->p2pOverride)) + { + case NV_REG_STR_CL_FORCE_P2P_WRITE_DISABLE: + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY; + break; + case NV_REG_STR_CL_FORCE_P2P_WRITE_ENABLE: + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_OK; + break; + default: + break; + } + + return NV_TRUE; + } + + } + + return NV_FALSE; +} + +static NV_STATUS +_kp2pCapsGetStatusOverPcie +( + NvU32 gpuMask, + NvU8 *pP2PWriteCapStatus, + NvU8 *pP2PReadCapStatus +) +{ + OBJGPU *pGpu = NULL; + OBJGPU *pFirstGpu = NULL; + NvU32 gpuInstance = 0; + KernelBif *pKernelBif = NULL; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + NvU32 iohDomain_ref = 0xFFFFFFFF; + NvU8 iohBus_ref = 0xFF; + NvU8 pciSwitchBus = 0, pciSwitchBus_ref = 0xFF; + NvBool bCommonPciSwitchFound = NV_TRUE; + NvU16 deviceID; + NvU8 gpuP2PReadCapsStatus = NV0000_P2P_CAPS_STATUS_OK; + NvU8 gpuP2PWriteCapsStatus = NV0000_P2P_CAPS_STATUS_OK; + NvU32 lockedGpuMask = 0; + NV_STATUS status = NV_OK; + + // Check if any overrides are enabled. + if (_kp2pCapsCheckStatusOverridesForPcie(gpuMask, pP2PWriteCapStatus, + pP2PReadCapStatus)) + { + return NV_OK; + } + + // PCI-E topology checks + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + // + // While PCI-E P2P transactions are forwarded between different + // root ports implemented within a given Intel I/O hub, they + // are not forwarded between any two I/O hubs. We must therefore + // complement the table-driven chipset validation check below + // with an IOH-specific topology check. + // + if (pGpu->gpuClData.rootPort.addr.valid && + (pGpu->gpuClData.rootPort.VendorID == PCI_VENDOR_ID_INTEL)) + { + deviceID = pGpu->gpuClData.rootPort.DeviceID; + + if (((deviceID >= DEVICE_ID_INTEL_3408_ROOT_PORT) && + (deviceID <= DEVICE_ID_INTEL_3411_ROOT_PORT)) || + ((deviceID >= DEVICE_ID_INTEL_3C02_ROOT_PORT) && + (deviceID <= DEVICE_ID_INTEL_3C0B_ROOT_PORT)) || + ((deviceID >= DEVICE_ID_INTEL_0E02_ROOT_PORT) && + (deviceID <= DEVICE_ID_INTEL_0E0B_ROOT_PORT)) || + ((deviceID >= DEVICE_ID_INTEL_2F01_ROOT_PORT) && + (deviceID <= DEVICE_ID_INTEL_2F0B_ROOT_PORT)) || + ((deviceID >= DEVICE_ID_INTEL_6F01_ROOT_PORT) && + (deviceID <= DEVICE_ID_INTEL_6F0B_ROOT_PORT)) || + (deviceID == DEVICE_ID_INTEL_3420_ROOT_PORT) || + (deviceID == DEVICE_ID_INTEL_3421_ROOT_PORT)) + { + if (iohDomain_ref == 0xFFFFFFFF) + { + iohDomain_ref = pGpu->gpuClData.rootPort.addr.domain; + iohBus_ref = pGpu->gpuClData.rootPort.addr.bus; + } + else if ((iohDomain_ref != pGpu->gpuClData.rootPort.addr.domain) || + (iohBus_ref != pGpu->gpuClData.rootPort.addr.bus)) + { + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED; + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED; + return NV_OK; + } + } + } + + // Test common bridges. Skip first GPU + if (pFirstGpu == NULL) + { + pFirstGpu = pGpu; + continue; + } + + if (!areGpusP2PCompatible(pFirstGpu, pGpu)) + { + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + return NV_OK; + } + + // This call returns the most upper bridge + clFindCommonDownstreamBR(pFirstGpu, pGpu, pCl, &pciSwitchBus); + + if (pciSwitchBus_ref == 0xFF) + { + pciSwitchBus_ref = pciSwitchBus; + } + + // If no bridge found or different to the one previously found + if ((pciSwitchBus == 0xFF) || (pciSwitchBus_ref != pciSwitchBus)) + { + bCommonPciSwitchFound = NV_FALSE; + } + } + + // Check if GPUs have the HW P2P implementation + + // Only lock for GSP_CLIENT. Get one GPU. + if (pFirstGpu == NULL) + { + gpuInstance = 0; + pFirstGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + } + + if (IS_GSP_CLIENT(pFirstGpu)) + { + // Lock GPUs + lockedGpuMask = gpuMask; + status = rmGpuGroupLockAcquire(0, GPU_LOCK_GRP_MASK, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, RM_LOCK_MODULES_P2P, &lockedGpuMask); + + // If we get NOTHING_TO_DO, we already have the needed locks, so don't free them + if (status == NV_WARN_NOTHING_TO_DO) + lockedGpuMask = 0; + else if (status != NV_OK) + { + lockedGpuMask = 0; + goto done; + } + } + + // Reset P2P caps as statuses will be accumulated below. + *pP2PReadCapStatus = *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_OK; + + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS p2pCapsParams = {0}; + + p2pCapsParams.bCommonPciSwitchFound = bCommonPciSwitchFound; + + NV_ASSERT_OK_OR_GOTO(status, pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalSubdevice, + NV2080_CTRL_CMD_INTERNAL_GET_PCIE_P2P_CAPS, + &p2pCapsParams, + sizeof(NV2080_CTRL_INTERNAL_GET_PCIE_P2P_CAPS_PARAMS)), + done); + + // GPU specific P2P caps + pKernelBif = GPU_GET_KERNEL_BIF(pGpu); + if (pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_WRITES_DISABLED)) + gpuP2PWriteCapsStatus = NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED; + if (pKernelBif->getProperty(pKernelBif, PDB_PROP_KBIF_P2P_READS_DISABLED)) + gpuP2PReadCapsStatus = NV0000_P2P_CAPS_STATUS_GPU_NOT_SUPPORTED; + + // + // Reconcile the system and GPU specific P2P information + // The system P2P status takes precedence + // Do not override status from not OK to OK + // + if (*pP2PReadCapStatus == NV0000_P2P_CAPS_STATUS_OK) + *pP2PReadCapStatus = (p2pCapsParams.p2pReadCapsStatus == NV0000_P2P_CAPS_STATUS_OK ? gpuP2PReadCapsStatus : p2pCapsParams.p2pReadCapsStatus); + if (*pP2PWriteCapStatus == NV0000_P2P_CAPS_STATUS_OK) + *pP2PWriteCapStatus = (p2pCapsParams.p2pWriteCapsStatus == NV0000_P2P_CAPS_STATUS_OK ? gpuP2PWriteCapsStatus : p2pCapsParams.p2pWriteCapsStatus); + + // No need to continue if P2P is not supported + if ((*pP2PReadCapStatus != NV0000_P2P_CAPS_STATUS_OK) && + (*pP2PWriteCapStatus != NV0000_P2P_CAPS_STATUS_OK)) + { + break; + } + } + +done: + if (lockedGpuMask != 0) + { + rmGpuGroupLockRelease(lockedGpuMask, GPUS_LOCK_FLAGS_NONE); + } + + if (status != NV_OK) + { + if (*pP2PReadCapStatus == NV0000_P2P_CAPS_STATUS_OK) + { + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + } + if (*pP2PWriteCapStatus == NV0000_P2P_CAPS_STATUS_OK) + { + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + } + } + + return status; +} + +static NV_STATUS +_kp2pCapsGetStatusOverPcieBar1 +( + NvU32 gpuMask +) +{ + OBJGPU *pGpuPeer = NULL; + NvU32 gpuInstance = 0; + OBJGPU *pFirstGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + KernelBif *pKernelBif = GPU_GET_KERNEL_BIF(pFirstGpu); + NV_STATUS status = NV_OK; + + if ((pKernelBif->forceP2PType != NV_REG_STR_RM_FORCE_P2P_TYPE_DEFAULT) && + (pKernelBif->forceP2PType != NV_REG_STR_RM_FORCE_P2P_TYPE_BAR1P2P)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // Re-initialize to check loop back configuration if only single GPU in + // requested mask. + // + gpuInstance = (gpumgrGetSubDeviceCount(gpuMask) > 1) ? gpuInstance : 0; + + while ((pGpuPeer = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + if (!kbusIsPcieBar1P2PCapable_HAL(pFirstGpu, GPU_GET_KERNEL_BUS(pFirstGpu), + pGpuPeer, GPU_GET_KERNEL_BUS(pGpuPeer))) + { + status = NV_ERR_NOT_SUPPORTED; + break; + } + } + + return status; +} + +NV_STATUS +p2pGetCapsStatus +( + NvU32 gpuMask, + NvU8 *pP2PWriteCapStatus, + NvU8 *pP2PReadCapStatus, + P2P_CONNECTIVITY *pConnectivity +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + KernelNvlink *pKernelNvlink = NULL; + OBJGPU *pGpu = NULL; + NvU32 gpuInstance = 0; + + if ((pP2PWriteCapStatus == NULL) || + (pP2PReadCapStatus == NULL) || + (pConnectivity == NULL) + ) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Default values + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + *pConnectivity = P2P_CONNECTIVITY_UNKNOWN; + + // MIG-Nvlink-P2P can be incompatible, so check compatibility for all GPUs + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvBool bSmcNvLinkP2PSupported = ((pKernelMIGManager != NULL) && + kmigmgrIsMIGNvlinkP2PSupported(pGpu, pKernelMIGManager)); + + // If any of the GPU has MIG enabled, return with no P2P support + if (!bSmcNvLinkP2PSupported) + { + NV_PRINTF(LEVEL_ERROR, + "P2P is marked unsupported with MIG for GPU instance = 0x%x\n", + gpuInstance); + return NV_OK; + } + } + + gpuInstance = 0; + + // Check NvLink P2P connectivity. + if (_gpumgrGetP2PCapsStatusOverNvLink(gpuMask, pP2PWriteCapStatus, + pP2PReadCapStatus) == NV_OK) + { + if (*pP2PWriteCapStatus == NV0000_P2P_CAPS_STATUS_OK && + *pP2PReadCapStatus == NV0000_P2P_CAPS_STATUS_OK) + { + *pConnectivity = P2P_CONNECTIVITY_NVLINK; + return NV_OK; + } + } + + // + // On NVSwitch systems, if the NVLink P2P path fails, don't fall back to + // other P2P paths. To ensure that, check if any GPU in the mask has NVLink + // support. If supported, enforce NVSwitch/NVLink connectivity by returning + // NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED. + // + gpuInstance = 0; + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance)) != NULL) + { + pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + if (pKernelNvlink != NULL && pKernelNvlink->discoveredLinks != 0 && + (pSys->getProperty(pSys, PDB_PROP_SYS_NVSWITCH_IS_PRESENT) || + knvlinkIsNvswitchProxyPresent(pGpu, pKernelNvlink))) + { + *pP2PReadCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + *pP2PWriteCapStatus = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + return NV_OK; + } + } + + // We didn't find direct P2P, so check for indirect P2P. + if (_kp2pCapsGetStatusIndirectOverNvLink(gpuMask, pP2PWriteCapStatus, + pP2PReadCapStatus) == NV_OK) + { + if ((*pP2PWriteCapStatus == NV0000_P2P_CAPS_STATUS_OK) && + (*pP2PReadCapStatus == NV0000_P2P_CAPS_STATUS_OK)) + { + *pConnectivity = P2P_CONNECTIVITY_NVLINK_INDIRECT; + return NV_OK; + } + } + + // + // Check PCIE P2P connectivity. + // + // We can control P2P connectivity for PCI-E peers using regkeys, hence + // if either read or write is supported, return success. See + // _p2pCapsCheckStatusOverridesForPcie for details. + // + if (_kp2pCapsGetStatusOverPcie(gpuMask, pP2PWriteCapStatus, + pP2PReadCapStatus) == NV_OK) + { + if ((*pP2PWriteCapStatus == NV0000_P2P_CAPS_STATUS_OK) || + (*pP2PReadCapStatus == NV0000_P2P_CAPS_STATUS_OK)) + { + if (_kp2pCapsGetStatusOverPcieBar1(gpuMask) == NV_OK) + *pConnectivity = P2P_CONNECTIVITY_PCIE_BAR1; + else + *pConnectivity = P2P_CONNECTIVITY_PCIE; + + return NV_OK; + } + } + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/platform/platform.c b/src/nvidia/src/kernel/platform/platform.c new file mode 100644 index 000000000..f1e824b6b --- /dev/null +++ b/src/nvidia/src/kernel/platform/platform.c @@ -0,0 +1,200 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/***************************** HW State Rotuines ***************************\ +* Platform object function definitions. * +\***************************************************************************/ + +#include "nvRmReg.h" +#include "nvacpitypes.h" + +#include "core/system.h" +#include "os/os.h" +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" + +#include "platform/platform.h" + +//! OBJPFM's constructor +NV_STATUS pfmConstruct_IMPL(OBJPFM *pPfm) +{ + return NV_OK; +} + +void +pfmBlobDataDestroy_IMPL +( + OBJPFM *pPfm +) +{ + // if blob data cache is allocated, free the memory + portMemFree(pPfm->blobData.pEntry); +} + +void +pfmUpdateAcpiIdMapping_IMPL +( + OBJPFM *pPfm, + OBJGPU *pGpu, + NvU32 acpiId, + NvU32 displayId, + NvU32 dodIndex, + NvU32 index +) +{ + NvU32 gpuInst = 0; + + gpuInst = gpuGetInstance(pGpu); + + pPfm->acpiIdMapping[gpuInst][index].acpiId = acpiId; + pPfm->acpiIdMapping[gpuInst][index].displayId = displayId; + pPfm->acpiIdMapping[gpuInst][index].dodIndex = dodIndex; +} + +NvU32 +pfmFindAcpiId_IMPL +( + OBJPFM *pPfm, + OBJGPU *pGpu, + NvU32 displayId +) +{ + NvU8 i; + NvU32 gpuInst = 0; + NvU32 acpiId = 0; + + gpuInst = gpuGetInstance(pGpu); + + for (i = 0; i < NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES; i++) + { + if(pPfm->acpiIdMapping[gpuInst][i].displayId == displayId) + { + acpiId = pPfm->acpiIdMapping[gpuInst][i].acpiId; + break; + } + } + + return acpiId; +} + +NvU32 +pfmFindDodIndex_IMPL +( + OBJPFM *pPfm, + OBJGPU *pGpu, + NvU32 displayId +) +{ + NvU8 i; + NvU32 gpuInst = 0; + NvU32 dodIndex = 0; + + gpuInst = gpuGetInstance(pGpu); + + for (i = 0; i < NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES; i++) + { + if(pPfm->acpiIdMapping[gpuInst][i].displayId == displayId) + { + dodIndex = pPfm->acpiIdMapping[gpuInst][i].dodIndex; + break; + } + } + + return dodIndex; +} + +NvU32 +pfmFindDevMaskFromDodIndex_IMPL +( + OBJPFM *pPfm, + OBJGPU *pGpu, + NvU32 dodIndex +) +{ + NvU8 i; + NvU32 gpuInst = 0; + NvU32 devMask = 0; + + gpuInst = gpuGetInstance(pGpu); + + for (i = 0; i < NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES; i++) + { + if(pPfm->acpiIdMapping[gpuInst][i].dodIndex == dodIndex) + { + devMask = pPfm->acpiIdMapping[gpuInst][i].displayId; + break; + } + } + + return devMask; +} + +NvU32 +pfmFindDevMaskFromAcpiId_IMPL +( + OBJPFM *pPfm, + OBJGPU *pGpu, + NvU32 AcpiId +) +{ + NvU8 i; + NvU32 gpuInst = 0; + NvU32 devMask = 0; + + gpuInst = gpuGetInstance(pGpu); + + for (i = 0; i < NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES; i++) + { + if(pPfm->acpiIdMapping[gpuInst][i].acpiId == AcpiId) + { + devMask = pPfm->acpiIdMapping[gpuInst][i].displayId; + break; + } + } + + return devMask; +} + +void +pfmUpdateDeviceAcpiId_IMPL +( + OBJPFM *pPfm, + OBJGPU *pGpu, + NvU32 acpiId, + NvU32 devMask +) +{ + NvU32 gpuInst = 0; + NvU32 i; + + gpuInst = gpuGetInstance(pGpu); + + for (i = 0; i < NV0073_CTRL_SPECIFIC_MAX_ACPI_DEVICES; i++) + { + if(pPfm->acpiIdMapping[gpuInst][i].displayId == devMask) + { + pPfm->acpiIdMapping[gpuInst][i].acpiId = acpiId; + break; + } + } +} diff --git a/src/nvidia/src/kernel/power/gpu_boost_mgr.c b/src/nvidia/src/kernel/power/gpu_boost_mgr.c new file mode 100644 index 000000000..d76959b63 --- /dev/null +++ b/src/nvidia/src/kernel/power/gpu_boost_mgr.c @@ -0,0 +1,818 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Implementation of the Sync Gpu Boost Manager methods. + */ + +/*------------------------------Includes--------------------------------------*/ +#include "power/gpu_boost_mgr.h" +#include "os/os.h" +#include "core/locks.h" +#include "gpu/gpu_access.h" +#include "gpu/gpu.h" +#include "syncgpuboost.h" +#include "nvlimits.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gsp/gsp_static_config.h" +#include "vgpu/vgpu_events.h" +#include "gpu/perf/kern_perf_gpuboostsync.h" + +/*-----------------------Static Private Method Prototypes---------------------*/ +static NV_STATUS _gpuboostmgrApplyPolicyFilters(NV0000_SYNC_GPU_BOOST_GROUP_CONFIG *); + +/*----------------------------Object Methods----------------------------------*/ + +/*! + * @brief OBJGPUBOOSTMGR object method to create a Sync Gpu Boost Group (SGBG) + * + * This method will create a new SGBG if none of the GPUs specified in the input + * params is already a part of another SGBG. + * + * @param [in] pBoostConfig @ref NV0000_SYNC_GPU_BOOST_GROUP_CONFIG + * @param [out] pBoostConfig::boostGroupId If SGBG creation succeeds, this field + * has the assigned group ID. This ID is + * unique across RM. + * @returns NV_OK Success + * @returns NV_ERR_INSUFFICIENT_RESOURCES No new groups possible. + * @returns NV_ERR_INVALID_STATE Internal state messed up. + * and a few more returned from second level functions. + */ +NV_STATUS +gpuboostmgrCreateGroup_IMPL +( + OBJGPUBOOSTMGR *pBoostMgr, + NV0000_SYNC_GPU_BOOST_GROUP_CONFIG *pBoostConfig +) +{ + NV_STATUS status = NV_OK; + NODE *pGpuIdNode = NULL; + NvBool bCleanup = NV_FALSE; + NvU32 i; + + // See if we can accomodate one more SGBG + NV_ASSERT_OR_RETURN(pBoostMgr->groupCount < NV0000_SYNC_GPU_BOOST_MAX_GROUPS, + NV_ERR_INSUFFICIENT_RESOURCES); + + // Check if requested config is valid. + status = gpuboostmgrCheckConfig(pBoostMgr, pBoostConfig); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, + "Invalid Boost Config. Failing Boost Group creation.\n"); + NV_ASSERT_OR_GOTO(NV_OK == status, gpuboostmgrCreateGroup_exit); + } + + // + // Allocate a @ref SYNC_GPU_BOOST_GROUP for the requested group at the first + // available index. + // + // Note: A linear search is sufficient for practical purposes currently. + // + for (i = 0; i < NV0000_SYNC_GPU_BOOST_MAX_GROUPS; i++) + { + // Found an unused index. + if (0 == pBoostMgr->pBoostGroups[i].gpuCount) + { + NvU32 j; + + // Setup the internal state for the new group + for(j = 0; j < pBoostConfig->gpuCount; j++) + { + // Allocate GPU ID node. + pGpuIdNode = portMemAllocNonPaged(sizeof(*pGpuIdNode)); + if (NULL == pGpuIdNode) + { + bCleanup = NV_TRUE; + NV_ASSERT_OR_GOTO((pGpuIdNode != NULL), gpuboostmgrCreateGroup_exit); + } + + // Add each unique GPU ID in the GPU ID tree + portMemSet(pGpuIdNode, 0, sizeof(*pGpuIdNode)); + pGpuIdNode->keyStart = pBoostConfig->gpuIds[j]; + pGpuIdNode->keyEnd = pBoostConfig->gpuIds[j]; + status = btreeInsert(pGpuIdNode, &pBoostMgr->pGpuIdTree); + if (NV_OK != status) + { + bCleanup = NV_TRUE; + NV_ASSERT_OR_GOTO(NV_OK == status, gpuboostmgrCreateGroup_exit); + } + + pBoostMgr->pBoostGroups[i].gpuIds[j] = pBoostConfig->gpuIds[j]; + } + + pBoostMgr->groupCount++; + pBoostMgr->pBoostGroups[i].gpuCount = pBoostConfig->gpuCount; + pBoostMgr->pBoostGroups[i].bBridgeless = pBoostConfig->bBridgeless; + + // Set the out param + pBoostConfig->boostGroupId = i; + + // We are done. + break; + } + } + + // Since we've come so far, there can't be 0 unused entries + if (i == NV0000_SYNC_GPU_BOOST_MAX_GROUPS) + { + NV_PRINTF(LEVEL_ERROR, "Inconsistency in pBoostGroups state.\n"); + status = NV_ERR_INVALID_STATE; + NV_ASSERT_OR_GOTO(0, gpuboostmgrCreateGroup_exit); + } + +gpuboostmgrCreateGroup_exit: + if (bCleanup) + { + NvU32 k; + + // Clean up stray state in case of failure + for(k = 0; k < pBoostConfig->gpuCount; k++) + { + btreeSearch(pBoostConfig->gpuIds[k], + &pGpuIdNode, + pBoostMgr->pGpuIdTree); + if (NULL != pGpuIdNode) + { + btreeUnlink(pGpuIdNode, &pBoostMgr->pGpuIdTree); + portMemFree(pGpuIdNode); + } + } + + // Destroy @ref SYNC_GPU_BOOST_GROUP object + portMemSet(&(pBoostMgr->pBoostGroups[i]), 0, sizeof(SYNC_GPU_BOOST_GROUP)); + } + + return status; +} + +/*! + * @brief OBJGPUBOOSTMGR object method to destroy a Sync Gpu Boost Group (SGBG) + * + * This method will destroy an existing SGBG + * + * @param [in] boostGroupId Unique ID of the SGBG to be destroyed + + * @returns NV_OK Success + * @returns NV_ERR_ILLEGAL_ACTION No SGBGs to destroy in the first place + * Or the requested SGBG is already destroyed. + * and a few more returned from second level functions. + */ +NV_STATUS +gpuboostmgrDestroyGroup_IMPL +( + OBJGPUBOOSTMGR *pBoostMgr, + NvU32 boostGroupId +) +{ + NV_STATUS status = NV_OK; + NODE *pGpuIdNode = NULL; + NvU32 i; + + // Can't try to destroy a non-existing group + NV_ASSERT_OR_RETURN(pBoostMgr->groupCount > 0, NV_ERR_ILLEGAL_ACTION); + NV_ASSERT_OR_RETURN(NV0000_SYNC_GPU_BOOST_MAX_GROUPS > boostGroupId, NV_ERR_OUT_OF_RANGE); + NV_ASSERT_OR_RETURN(0 != pBoostMgr->pBoostGroups[boostGroupId].gpuCount, NV_ERR_ILLEGAL_ACTION); + + // Remove each GPU ID from the ID tree before destorying the group. + for(i = 0; i < pBoostMgr->pBoostGroups[boostGroupId].gpuCount; i++) + { + status = btreeSearch(pBoostMgr->pBoostGroups[boostGroupId].gpuIds[i], + &pGpuIdNode, + pBoostMgr->pGpuIdTree); + NV_ASSERT_OR_RETURN(((NV_OK == status) && (NULL != pGpuIdNode)), status); + btreeUnlink(pGpuIdNode, &pBoostMgr->pGpuIdTree); + portMemFree(pGpuIdNode); + } + + // Destroy @ref SYNC_GPU_BOOST_GROUP object + portMemSet(&(pBoostMgr->pBoostGroups[boostGroupId]), 0, sizeof(SYNC_GPU_BOOST_GROUP)); + + // Decrement groupCount + pBoostMgr->groupCount--; + + return NV_OK; +} + +/*! + * @brief Returns information about each Sync Gpu Boost Group defined in the system + */ +NV_STATUS +gpuboostmgrQueryGroups_IMPL +( + OBJGPUBOOSTMGR *pBoostMgr, + NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS *pParams +) +{ + NvU32 i; + NvU32 j; + + j = 0; + for (i = 0; i < NV0000_SYNC_GPU_BOOST_MAX_GROUPS; i++) + { + if (NV_OK == gpuboostmgrValidateGroupId(pBoostMgr, i)) + { + pParams->pBoostGroups[j].gpuCount = pBoostMgr->pBoostGroups[i].gpuCount; + pParams->pBoostGroups[j].boostGroupId = i; + portMemCopy(pParams->pBoostGroups[j].gpuIds, + sizeof(pParams->pBoostGroups[j].gpuIds), + pBoostMgr->pBoostGroups[i].gpuIds, + sizeof(pParams->pBoostGroups[j].gpuIds)); + j++; + } + } + + pParams->groupCount = j; + + return NV_OK; +} + +/*! + * @brief OBJGPUBOOSTMGR object method to validate a config for a Sync Gpu Boost Group (SGBG) + * @ref NV0000_SYNC_GPU_BOOST_GROUP_CONFIG + * + * The following checks needs to be met for a config to be converted to an SGBG + * 1. There is room for a new SGBG to be tracked in RM. + * 2. Valid GPU IDs are specified + * 3. None of the GPUs specified is already a part of an existing SGBG + * + * @param [in] boostConfig @ref NV0000_SYNC_GPU_BOOST_GROUP_CONFIG + * + * @returns NV_OK Success + * @returns other values Config specified cannot be accepted. + */ +NV_STATUS +gpuboostmgrCheckConfig_IMPL +( + OBJGPUBOOSTMGR *pBoostMgr, + NV0000_SYNC_GPU_BOOST_GROUP_CONFIG *pBoostConfig +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = NULL; + NODE *pNode = NULL; + NvU32 i; + + NV_ASSERT_OR_RETURN(NULL != pBoostConfig, NV_ERR_INVALID_ARGUMENT); + + if (0 == pBoostConfig->gpuCount || + NV_MAX_DEVICES < pBoostConfig->gpuCount) + { + status = NV_ERR_OUT_OF_RANGE; + NV_PRINTF(LEVEL_ERROR, "Invalid Gpu Count 0x%x\n", pBoostConfig->gpuCount); + DBG_BREAKPOINT(); + goto gpuboostmgrCheckConfig_exit; + } + + // Policy filters will specify if we can support a given config. + status = _gpuboostmgrApplyPolicyFilters(pBoostConfig); + NV_ASSERT_OR_GOTO(NV_OK == status, gpuboostmgrCheckConfig_exit); + + for (i = 0; i < pBoostConfig->gpuCount; i++) + { + // Check for invalid GPU ID + if (NV0000_CTRL_GPU_INVALID_ID == pBoostConfig->gpuIds[i]) + { + status = NV_ERR_INVALID_ARGUMENT; + NV_PRINTF(LEVEL_ERROR, + "Invalid GPU ID 0x%x at index 0x%x\n", + pBoostConfig->gpuIds[i], i); + DBG_BREAKPOINT(); + goto gpuboostmgrCheckConfig_exit; + } + + // + // Check for OBJGPU being available. + // + // We do not need to take a GPU lock here, as currently, the API lock + // guarantees that OBJ* will not be destroyed while the API + // lock is being held. If at all the check needs to read/write OBJGPU state, + // we will need the GPU lock. + // + // We expect that going ahead, client locks will provide similar guarantees + // on the GPU state. + // + pGpu = gpumgrGetGpuFromId(pBoostConfig->gpuIds[i]); + if (NULL == pGpu) + { + status = NV_ERR_INVALID_ARGUMENT; + NV_PRINTF(LEVEL_ERROR, + "OBJGPU not constructed yet for ID 0x%x at index 0x%x\n", + pBoostConfig->gpuIds[i], i); + DBG_BREAKPOINT(); + goto gpuboostmgrCheckConfig_exit; + } + + // A GPU cannot be in more than one Boost Group + if (NV_OK == btreeSearch(pBoostConfig->gpuIds[i], &pNode, pBoostMgr->pGpuIdTree)) + { + status = NV_ERR_INVALID_ARGUMENT; + NV_PRINTF(LEVEL_ERROR, + "GPU with ID 0x%x already in use in another group\n", + pBoostConfig->gpuIds[i]); + DBG_BREAKPOINT(); + goto gpuboostmgrCheckConfig_exit; + } + } + +gpuboostmgrCheckConfig_exit: + return status; +} + +/*! + * @brief Constructor + */ +NV_STATUS +gpuboostmgrConstruct_IMPL(OBJGPUBOOSTMGR *pBoostMgr) +{ + return NV_OK; +} + +/*! + * @brief Destructor + */ +void +gpuboostmgrDestruct_IMPL(OBJGPUBOOSTMGR *pBoostMgr) +{ + btreeDestroyNodes(pBoostMgr->pGpuIdTree); + portMemSet(pBoostMgr->pBoostGroups, 0, NV0000_SYNC_GPU_BOOST_MAX_GROUPS * sizeof(SYNC_GPU_BOOST_GROUP)); + pBoostMgr->groupCount = 0; +} + + +/*! + * @brief Checks if the boost group ID belongs to a valid Sync Gpu Boost Group + * + * @param [in] boostGroupId ID to be checked + * + * @returns NV_OK group ID belongs to a valid Group. + * @returns NV_ERR_INVALID_INDEX group ID is not used by any SGBG + */ +NV_STATUS +gpuboostmgrValidateGroupId_IMPL +( + OBJGPUBOOSTMGR *pBoostMgr, + NvU32 boostGroupId +) +{ + if (NV0000_SYNC_GPU_BOOST_MAX_GROUPS <= boostGroupId) + return NV_ERR_INVALID_INDEX; + + // If group count is 0, the group index points to an invalid/empty boost group + if (0 == pBoostMgr->pBoostGroups[boostGroupId].gpuCount) + return NV_ERR_INVALID_INDEX; + + return NV_OK; +} + +/*! + * @brief Increments the ref count for a Sync Gpu Boost Group + * + * @param [in] boostGroupId ID of the SGBG whose ref count needs incrementing. + * + * @returns NV_OK Ref count incremented successfully + * + */ +NV_STATUS +gpuboostmgrIncrementRefCount_IMPL +( + OBJGPUBOOSTMGR *pBoostMgr, + NvU32 boostGroupId +) +{ + NV_STATUS status = NV_OK; + + status = gpuboostmgrValidateGroupId(pBoostMgr, boostGroupId); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "Invalid group ID 0x%x\n", boostGroupId); + NV_ASSERT_OR_RETURN(0, status); + } + + if (NV_U32_MAX == pBoostMgr->pBoostGroups[boostGroupId].refCount) + { + NV_PRINTF(LEVEL_ERROR, + "Max limit reached for ref count on group 0x%x\n", + boostGroupId); + NV_ASSERT_OR_RETURN(0, NV_ERR_INSUFFICIENT_RESOURCES); + } + + // + // Increment the ref count on the SGBG in @ref OJGPUBOOSTMGR + // Trigger state change for the SGB Algorithm if this is the first client + // referencing the group + // + pBoostMgr->pBoostGroups[boostGroupId].refCount++; + if (1 == pBoostMgr->pBoostGroups[boostGroupId].refCount) + { + status = kperfGpuBoostSyncStateUpdate(pBoostMgr, boostGroupId, NV_TRUE); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, + "Could not activate Sync GPU Boost on group 0x%x. Status: 0x%08x\n", + boostGroupId, status); + pBoostMgr->pBoostGroups[boostGroupId].refCount--; + NV_ASSERT(0); + } + } + + return status; +} + +/*! + * @brief Decrements the ref count for a Sync Gpu Boost + * Group + * + * @param [in] boostGroupId ID of the SGBG whose ref count needs decrementing. + * + * @returns NV_OK Ref count decremented successfully + * + */ +NV_STATUS +gpuboostmgrDecrementRefCount_IMPL +( + OBJGPUBOOSTMGR *pBoostMgr, + NvU32 boostGroupId +) +{ + NV_STATUS status = NV_OK; + + status = gpuboostmgrValidateGroupId(pBoostMgr, boostGroupId); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, "Invalid group ID 0x%x\n", boostGroupId); + NV_ASSERT_OR_RETURN(0, status); + } + + if (0 == pBoostMgr->pBoostGroups[boostGroupId].refCount) + { + NV_PRINTF(LEVEL_ERROR, "Ref count on group 0x%x is already 0\n", + boostGroupId); + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_REQUEST); + } + + // + // Decrement the ref count on the SGBG in @ref OJGPUBOOSTMGR + // Trigger state change for the SGB Algorithm if this was the last client + // referencing the group + // + pBoostMgr->pBoostGroups[boostGroupId].refCount--; + if (0 == pBoostMgr->pBoostGroups[boostGroupId].refCount) + { + status = kperfGpuBoostSyncStateUpdate(pBoostMgr, boostGroupId, NV_FALSE); + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, + "Could not deactivate Sync GPU Boost on group 0x%x. Status: 0x%08x\n", + boostGroupId, status); + NV_ASSERT(0); + } + } + + return status; +} + +/*! + * This iterator function return the GPU at given index from the given SGBG + * + * @params [in] pBoostGrp Pointer to the SGBG to iterate over + * @params [in] grpId ID of the Boost Group over which to iterate. + * ID corresponds to index in @ref OBJGPUBOOSTMGR::pBoostGroups + * @params [in] pIndex 0 based index into the pBoostGrp, at which the + * iteration commences. + * + * @returns OBJGPU* if a GPU object is found. + * NULL otherwise + * + * Note: Always pass in an initial index of 0, when beginning the iteration. This + * method lends itself to a while() construct like so - + * while (NULL != (pGpu = ItrMethod(pBoostGrp, &index)){ //foo }; + */ +POBJGPU +gpuboostmgrGpuItr_IMPL +( + OBJGPUBOOSTMGR *pBoostMgr, + NvU32 grpId, + NvU32 *pIndex +) +{ + NV_ASSERT_OR_RETURN(NULL != pIndex, NULL); + + if (0 == pBoostMgr->pBoostGroups[grpId].gpuCount) + { + NV_PRINTF(LEVEL_ERROR, "Gpu Count is 0 for group ID: 0x%x\n", grpId); + return NULL; + } + if (*pIndex == pBoostMgr->pBoostGroups[grpId].gpuCount) + { + return NULL; + } + + return gpumgrGetGpuFromId(pBoostMgr->pBoostGroups[grpId].gpuIds[(*pIndex)++]); +} + +/*! + * @brief Retrieves the ID of the SGBG to which a GPU belongs + * + * @param [in] pBoostMgr + * @param [in] pGpu + * @param [out] pBoostGrpId ID of the @ref SYNC_GPU_BOOST_GROUP to which the + * given GPU belongs. + * NV0000_SYNC_GPU_BOOST_INVALID_GROUP_ID value if + * group is not found + * + * @return NV_OK if SGBG is found + * @return NV_ERR_OBJECT_NOT_FOUND if SGBG is not found + * @return NV_ERR_INVALID_ARGUMENT Null or incorrect arguments passed in. + */ +NV_STATUS +gpuboostmgrGetBoostGrpIdFromGpu_IMPL +( + OBJGPUBOOSTMGR *pBoostMgr, + OBJGPU *pGpu, + NvU32 *pBoostGrpId +) +{ + NvU32 i; + NvU32 index = 0; + OBJGPU *pGpuTemp = NULL; + + *pBoostGrpId = NV0000_SYNC_GPU_BOOST_INVALID_GROUP_ID; + + NV_ASSERT_OR_RETURN(NULL != pGpu, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBoostGrpId, NV_ERR_INVALID_ARGUMENT); + + for (i = 0; i < pBoostMgr->groupCount; i++) + { + while (NULL != (pGpuTemp = gpuboostmgrGpuItr(pBoostMgr, i, &index))) + { + if (pGpuTemp->gpuId == pGpu->gpuId) + { + *pBoostGrpId = i; + return NV_OK; + } + } + } + + NV_PRINTF(LEVEL_INFO, + "No Boost Group found for the gpu with ID: 0x%08x\n", + pGpu->gpuId); + + return NV_ERR_OBJECT_NOT_FOUND; +} + +/*! + * @return NV_TRUE If the Sync GPU Boost Group at the given ID has the algorithm + * active. + * NV_FALSE If the group ID is invalid or the algorithm is inactive. + */ +NvBool +gpuboostmgrIsBoostGrpActive_IMPL +( + OBJGPUBOOSTMGR *pBoostMgr, + NvU32 grpId +) +{ + if (grpId == NV0000_SYNC_GPU_BOOST_INVALID_GROUP_ID) + { + return NV_FALSE; + } + + return ((0 != pBoostMgr->pBoostGroups[grpId].gpuCount) && + (0 != pBoostMgr->pBoostGroups[grpId].refCount)); +} + +/*------------------------------Static Private Methods------------------------*/ + +/*! + * Applies filter policies which determine whether or not to allow the given GPUs + * to be in the same SGBG. + */ +static NV_STATUS +_gpuboostmgrApplyPolicyFilters(NV0000_SYNC_GPU_BOOST_GROUP_CONFIG *pBoostConfig) +{ + NV_STATUS status = NV_ERR_NOT_COMPATIBLE; + OBJGPU *pGpu = NULL; + OBJGPU *pGpuItr = NULL; + OBJGPUGRP *pGpuGrp = NULL; + NvBool bIsSli = NV_TRUE; + NvBool bIsUnlinkedSli = NV_TRUE; + NvBool bMatchingDevId = NV_TRUE; + NvU32 i; + + NV_ASSERT_OR_RETURN(NULL != pBoostConfig, NV_ERR_INVALID_ARGUMENT); + + pGpu = gpumgrGetGpuFromId(pBoostConfig->gpuIds[0]); + NV_ASSERT_OR_RETURN(NULL != pGpu, NV_ERR_OBJECT_NOT_FOUND); + pGpuGrp = gpumgrGetGpuGrpFromGpu(pGpu); + + // + // Group Filter Policy: + // If GPUs are in same SLI device - Allow + // else if GPUs have the same (devid, subsystem id, subvendor id, board proj id, board sku id) - Allow + // else - Disallow + // + + // + // Check if all GPUs are in same SLI Device. + // We compare @ref OBJGPUGRP for each GPU as opposed to PBJGPU:deviceInstance + // because in the future we may move away from the SLI Device Model. + // + for (i = 1; i < pBoostConfig->gpuCount; i++) + { + pGpuItr = gpumgrGetGpuFromId(pBoostConfig->gpuIds[i]); + if (pGpuGrp == gpumgrGetGpuGrpFromGpu(pGpuItr)) + { + continue; + } + else + { + bIsSli = NV_FALSE; + break; + } + } + if (bIsSli) + { + status = NV_OK; + goto _gpuboostmgrApplyPolicyFilters_exit; + } + + // + // Check if Unlinked SLI is enabled for all GPUs within the group + // + for (i = 0; i < pBoostConfig->gpuCount; i++) + { + pGpuItr = gpumgrGetGpuFromId(pBoostConfig->gpuIds[i]); + if (IsUnlinkedSLIEnabled(pGpuItr)) + { + continue; + } + else + { + bIsUnlinkedSli = NV_FALSE; + break; + } + } + if (bIsUnlinkedSli) + { + status = NV_OK; + goto _gpuboostmgrApplyPolicyFilters_exit; + } + + // + // Check if all the GPUs have same dev id. This needs to be ensured for the Sync + // Boost algorithm to provide the expected benefits, unless otherwise specified. + // + NvU32 pciDevId = 0; + NvU64 boardProjNum = 0; + NvU64 boardSkuNum = 0; + NvU16 subVendor = 0; + NvU16 subDevice = 0; + GspStaticConfigInfo *pGSCI = NULL; + + pciDevId = DRF_VAL(_PCI, _DEVID, _DEVICE, pGpu->idInfo.PCIDeviceID); + + // Cache all necessary values for one of the GPUs + { + if ( IS_GSP_CLIENT(pGpu) ) + { + pGSCI = GPU_GET_GSP_STATIC_INFO(pGpu); + } + + if ( !IS_GSP_CLIENT(pGpu) || !pGSCI->bVbiosValid ) + { + status = NV_ERR_NOT_SUPPORTED; + NV_ASSERT_OR_GOTO(NV_FALSE, _gpuboostmgrApplyPolicyFilters_exit); + } + + portMemCopy(&boardSkuNum, sizeof(boardSkuNum), pGSCI->SKUInfo.projectSKU, sizeof(pGSCI->SKUInfo.projectSKU)); + portMemCopy(&boardProjNum, sizeof(boardProjNum), pGSCI->SKUInfo.project, sizeof(pGSCI->SKUInfo.project)); + subVendor = pGSCI->vbiosSubVendor; + subDevice = pGSCI->vbiosSubDevice; + } + + // Compare each GPU's values with the cached values. + for (i = 1; i < pBoostConfig->gpuCount; i++) + { + NvU64 boardProjNumItr = 0; + NvU64 boardSkuNumItr = 0; + NvU16 subVendorItr = 0; + NvU16 subDeviceItr = 0; + + pGpuItr = gpumgrGetGpuFromId(pBoostConfig->gpuIds[i]); + + // Extract values for the GPU in the iteration. + { + if ( IS_GSP_CLIENT(pGpuItr) ) + { + pGSCI = GPU_GET_GSP_STATIC_INFO(pGpuItr); + } + + if ( !IS_GSP_CLIENT(pGpuItr) || !pGSCI->bVbiosValid ) + { + status = NV_ERR_OBJECT_NOT_FOUND; + bMatchingDevId = NV_FALSE; + NV_ASSERT_OR_GOTO(NV_FALSE, _gpuboostmgrApplyPolicyFilters_exit); + } + portMemCopy(&boardSkuNumItr, sizeof(boardSkuNumItr), pGSCI->SKUInfo.projectSKU, sizeof(pGSCI->SKUInfo.projectSKU)); + portMemCopy(&boardProjNumItr, sizeof(boardProjNumItr), pGSCI->SKUInfo.project, sizeof(pGSCI->SKUInfo.project)); + subVendorItr = pGSCI->vbiosSubVendor; + subDeviceItr = pGSCI->vbiosSubDevice; + } + + // Go to the next GPU if all values match + if ((pciDevId == DRF_VAL(_PCI, _DEVID, _DEVICE, pGpuItr->idInfo.PCIDeviceID)) && + (boardSkuNum == boardSkuNumItr) && + (boardProjNum == boardProjNumItr) && + (subVendor == subVendorItr) && + (subDevice == subDeviceItr)) + { + continue; + } + else + { + // At least one mismatch. + bMatchingDevId = NV_FALSE; + break; + } + } + if (bMatchingDevId) + { + status = NV_OK; + goto _gpuboostmgrApplyPolicyFilters_exit; + } + +_gpuboostmgrApplyPolicyFilters_exit: + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "GPUs not compatible to be put in the same group\n"); + DBG_BREAKPOINT(); + } + + return status; +} + +NV_STATUS +syncgpuboostConstruct_IMPL +( + SyncGpuBoost *pSyncGpuBoost, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUBOOSTMGR *pBoostMgr = SYS_GET_GPUBOOSTMGR(pSys); + NV_STATUS rmStatus = NV_OK; + NV0060_ALLOC_PARAMETERS *p0060Params = pParams->pAllocParams; + + NV_ASSERT_OR_RETURN(NULL != p0060Params, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OK_OR_RETURN(gpuboostmgrIncrementRefCount(pBoostMgr, p0060Params->gpuBoostGroupId)); + + pSyncGpuBoost->gpuBoostGroupId = p0060Params->gpuBoostGroupId; + + return rmStatus; +} + +void +syncgpuboostDestruct_IMPL +( + SyncGpuBoost *pSyncGpuBoost +) +{ + RS_RES_FREE_PARAMS_INTERNAL *pParams = NULL; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUBOOSTMGR *pBoostMgr = SYS_GET_GPUBOOSTMGR(pSys); + NV_STATUS rmStatus = NV_OK; + + resGetFreeParams(staticCast(pSyncGpuBoost, RsResource), NULL, &pParams); + + // Can't do much if ref count decrement failed. Assert and continue deletion. + rmStatus = gpuboostmgrDecrementRefCount(pBoostMgr, pSyncGpuBoost->gpuBoostGroupId); + NV_ASSERT(NV_OK == rmStatus); + + pParams->status = rmStatus; + + return; +} diff --git a/src/nvidia/src/kernel/rmapi/alloc_free.c b/src/nvidia/src/kernel/rmapi/alloc_free.c new file mode 100644 index 000000000..697215580 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/alloc_free.c @@ -0,0 +1,1501 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/rmapi.h" +#include "rmapi/client.h" +#include "entry_points.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "vgpu/rpc.h" +#include "resource_desc.h" +#include "gpu/disp/disp_objs.h" +#include "gpu/disp/disp_channel.h" +#include "nvsecurityinfo.h" + +#include "gpu/device/device.h" + +#include "class/cl0005.h" // NV01_EVENT +#include "class/clc574.h" // UVM_CHANNEL_RETAINER + +#include "class/cl83de.h" // GT200_DEBUGGER +#include "gpu/gr/kernel_sm_debugger_session.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "tmr.h" + +// +// RM Alloc & Free internal flags -- code should be migrated to use rsresdesc +// and rmapi types directly where possible. +// +#define RM_ALLOC_STATES_NONE 0 +#define RM_ALLOC_STATES_INTERNAL_CLIENT_HANDLE ALLOC_STATE_INTERNAL_CLIENT_HANDLE // NVBIT(5) +#define RM_ALLOC_STATES_SKIP_RPC NVBIT(6) +#define RM_ALLOC_STATES_INTERNAL_ALLOC NVBIT(7) + +#define RM_FREE_STATES_NONE 0 + +static void +rmapiResourceDescToLegacyFlags +( + const RS_RESOURCE_DESC *pResDesc, + NvU32 *pAllocFlags, + NvU32 *pFreeFlags +) +{ + if (pAllocFlags) + { + *pAllocFlags = (pResDesc->flags & RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC) ? RM_LOCK_FLAGS_NONE : RM_LOCK_FLAGS_NO_GPUS_LOCK; + *pAllocFlags |= (pResDesc->flags & RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC) ? RM_LOCK_FLAGS_GPU_GROUP_LOCK : 0; + } + + if (pFreeFlags) + { + *pFreeFlags = (pResDesc->flags & RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE) ? RM_LOCK_FLAGS_NONE : RM_LOCK_FLAGS_NO_GPUS_LOCK; + *pFreeFlags |= (pResDesc->flags & RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE) ? RM_LOCK_FLAGS_GPU_GROUP_LOCK : 0; + } +} + +NV_STATUS +serverAllocApiCopyIn +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams, + API_STATE **ppParamCopy +) +{ + NV_STATUS status; + API_SECURITY_INFO *pSecInfo = pRmAllocParams->pSecInfo; + NvBool bCopyInParams = pSecInfo->paramLocation == PARAM_LOCATION_USER; + RMAPI_PARAM_COPY *pParamCopy = NULL; + NvU32 allocParamsSize = 0; + void *pUserParams = pRmAllocParams->pAllocParams; + + pParamCopy = (RMAPI_PARAM_COPY*)PORT_ALLOC(g_resServ.pAllocator, sizeof(*pParamCopy)); + if (pParamCopy == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + portMemSet(pParamCopy, 0, sizeof(*pParamCopy)); + pRmAllocParams->pAllocParams = NULL; + + // Setup for access to param + // Param size is initialized to zero, and then set via rmapiParamsCopyInit + RMAPI_PARAM_COPY_INIT(*pParamCopy, pRmAllocParams->pAllocParams, NV_PTR_TO_NvP64(pUserParams), allocParamsSize, 1); + + // Look up param size based on hClass + status = rmapiParamsCopyInit(pParamCopy, pRmAllocParams->externalClassId); + if (NV_OK != status) + goto done; + + // Using the per-class info set above, pull in the parameters for this allocation + if (pParamCopy->paramsSize > 0) + { + // gain access to client's parameters via 'pKernelCtrl' + status = rmapiParamsAcquire(pParamCopy, bCopyInParams); + if (status != NV_OK) + goto done; + } + + // Prevent requesting rights before rights are enabled, just in case old code doesn't zero it properly. + if (!pServer->bRsAccessEnabled) + pRmAllocParams->pRightsRequested = NULL; + + if (pRmAllocParams->pRightsRequested != NULL) + { + // copyFromUser requires a non-stack buffer, allocate one to copy into + RS_ACCESS_MASK *pMaskBuffer = (RS_ACCESS_MASK*)PORT_ALLOC(g_resServ.pAllocator, sizeof(RS_ACCESS_MASK)); + if (pMaskBuffer == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + // Mask is a fixed size, just copy it directly into allocParams + status = rmapiParamsCopyIn("RightsRequested", + pMaskBuffer, + NV_PTR_TO_NvP64(pRmAllocParams->pRightsRequested), + sizeof(RS_ACCESS_MASK), + bCopyInParams); + + portMemCopy(&pRmAllocParams->rightsRequestedCopy, sizeof(RS_ACCESS_MASK), + pMaskBuffer, sizeof(RS_ACCESS_MASK)); + + PORT_FREE(g_resServ.pAllocator, pMaskBuffer); + + if (status != NV_OK) + goto done; + + pRmAllocParams->pRightsRequested = &pRmAllocParams->rightsRequestedCopy; + } +done: + if (status != NV_OK) + { + if (pParamCopy != NULL) + PORT_FREE(g_resServ.pAllocator, pParamCopy); + pParamCopy = NULL; + } + + if (ppParamCopy != NULL) + *ppParamCopy = pParamCopy; + + return status; +} + +NV_STATUS +serverAllocApiCopyOut +( + RsServer *pServer, + NV_STATUS status, + API_STATE *pParamCopy +) +{ + NV_STATUS cpStatus = NV_OK; + if (pParamCopy->paramsSize > 0) + { + // don't copyout if an error + if (status != NV_OK) + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + + cpStatus = rmapiParamsRelease(pParamCopy); + if (status == NV_OK) + status = cpStatus; + } + + PORT_FREE(g_resServ.pAllocator, pParamCopy); + + return status; +} + +NV_STATUS +serverLookupSecondClient +( + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvHandle *phClient +) +{ + *phClient = 0; + + switch (pParams->externalClassId) + { + case GT200_DEBUGGER: + { + NV83DE_ALLOC_PARAMETERS *pNv83deParams = pParams->pAllocParams; + + if (pNv83deParams->hAppClient != pParams->hClient) + *phClient = pNv83deParams->hAppClient; + + break; + } + case UVM_CHANNEL_RETAINER: + { + NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS *pUvmChannelRetainerParams = pParams->pAllocParams; + + if (pUvmChannelRetainerParams->hClient != pParams->hClient) + *phClient = pUvmChannelRetainerParams->hClient; + + break; + } + default: + break; + } + + return NV_OK; +} + +NV_STATUS +serverTopLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status; + if ((pLockInfo->flags & RM_LOCK_FLAGS_RM_SEMA) && + !(pLockInfo->state & RM_LOCK_STATES_RM_SEMA_ACQUIRED)) + { + if ((status = osAcquireRmSema(pSys->pSema)) != NV_OK) + return status; + pLockInfo->state |= RM_LOCK_STATES_RM_SEMA_ACQUIRED; + *pReleaseFlags |= RM_LOCK_RELEASE_RM_SEMA; + } + + if (!(pLockInfo->flags & RM_LOCK_FLAGS_NO_API_LOCK)) + { + if (!(pLockInfo->state & RM_LOCK_STATES_API_LOCK_ACQUIRED)) + { + NvU32 flags = RMAPI_LOCK_FLAGS_NONE; + if (access == LOCK_ACCESS_READ) + flags |= RMAPI_LOCK_FLAGS_READ; + + if ((status = rmApiLockAcquire(flags, RM_LOCK_MODULES_CLIENT)) != NV_OK) + { + return status; + } + pLockInfo->state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + *pReleaseFlags |= RM_LOCK_RELEASE_API_LOCK; + } + else + { + if (!rmApiLockIsOwner()) + { + NV_ASSERT(0); + return NV_ERR_INVALID_LOCK_STATE; + } + } + } + + return NV_OK; +} + +void +serverTopLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (*pReleaseFlags & RM_LOCK_RELEASE_API_LOCK) + { + rmApiLockRelease(); + pLockInfo->state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_API_LOCK; + } + + if (*pReleaseFlags & RM_LOCK_RELEASE_RM_SEMA) + { + osReleaseRmSema(pSys->pSema, NULL); + pLockInfo->state &= ~RM_LOCK_STATES_RM_SEMA_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_RM_SEMA; + } +} + +NV_STATUS +serverResLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pParentGpu = NULL; + + if (pLockInfo->state & RM_LOCK_STATES_GPUS_LOCK_ACQUIRED) + { + if (rmGpuLockIsOwner()) + { + return NV_OK; + } + else + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + + if (!(pLockInfo->flags & RM_LOCK_FLAGS_NO_GPUS_LOCK)) + { + if (rmGpuLockIsOwner()) + { + if (!(pLockInfo->state & RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS)) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + else + { + if ((status = rmGpuLocksAcquire(API_LOCK_FLAGS_NONE, RM_LOCK_MODULES_CLIENT)) != NV_OK) + goto done; + + *pReleaseFlags |= RM_LOCK_RELEASE_GPUS_LOCK; + pLockInfo->state |= RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + } + } + + if (pLockInfo->flags & RM_LOCK_FLAGS_GPU_GROUP_LOCK) + { + RsResourceRef *pParentRef; + GpuResource *pGpuResource; + NvU32 gpuMask; + (void)gpuMask; + + pParentRef = pLockInfo->pContextRef; + if (pParentRef == NULL) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_OBJECT_PARENT; + goto done; + } + + // + // Use the pGpu from parent resource as it will work on alloc & free. + // Everything below NV0080_DEVICE uses the same pGpu group + // + // GPU teardown paths free client resources before tearing down pGpu so + // pGpu should always be valid at this point. + // + pGpuResource = dynamicCast(pParentRef->pResource, GpuResource); + + if (pGpuResource == NULL) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_OBJECT_PARENT; + goto done; + } + + pParentGpu = GPU_RES_GET_GPU(pGpuResource); + + if (pLockInfo->state & RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED) + { + if (rmGpuGroupLockIsOwner(pParentGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, &gpuMask)) + { + goto done; + } + else + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + + if (rmGpuGroupLockIsOwner(pParentGpu->gpuInstance, GPU_LOCK_GRP_DEVICE, &gpuMask)) + { + if (!(pLockInfo->state & RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS)) + { + NV_ASSERT(0); + status = NV_ERR_INVALID_LOCK_STATE; + goto done; + } + } + else + { + status = rmGpuGroupLockAcquire(pParentGpu->gpuInstance, + GPU_LOCK_GRP_DEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_CLIENT, + &pLockInfo->gpuMask); + if (status != NV_OK) + goto done; + + *pReleaseFlags |= RM_LOCK_RELEASE_GPU_GROUP_LOCK; + pLockInfo->state |= RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED; + } + } + +done: + switch(pLockInfo->traceOp) + { + case RS_LOCK_TRACE_ALLOC: + LOCK_METER_DATA(ALLOC, pLockInfo->traceClassId, 0, 0); + break; + case RS_LOCK_TRACE_FREE: + LOCK_METER_DATA(FREE_OBJECT, pLockInfo->traceClassId, 0, 0); + break; + case RS_LOCK_TRACE_CTRL: + LOCK_METER_DATA(RMCTRL, pLockInfo->traceClassId, pLockInfo->flags, status); + break; + default: + break; + } + + return status; +} + +NV_STATUS +serverAllocEpilogue_WAR +( + RsServer *pServer, + NV_STATUS status, + NvBool bClientAlloc, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + // + // Pre-Volta Linux swapgroups is the only remaining use of channel grabbing. + // Bug 2869820 is tracking the transition of swapgroups from requiring this + // RM feature. + // + NV_STATUS tmpStatus; + if (!bClientAlloc && status == NV_ERR_INSERT_DUPLICATE_NAME) + { + NvBool gpulockRelease = NV_FALSE; + RsResourceRef *pResourceRef; + + if (!rmGpuLockIsOwner()) + { + tmpStatus = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_CLIENT); + + if (tmpStatus != NV_OK) + return tmpStatus; + + gpulockRelease = NV_TRUE; + } + + // + // Hack for taking ownership of display channels. Clients call rmAlloc + // on a previously allocated handle to indicate they want to grab + // ownership of the underlying hardware channel. + // + // TODO - this should be moved to an RM control and called directly by + // clients instead of the overloaded allocation call. RmAlloc should + // be for allocating objects only. + // + tmpStatus = clientGetResourceRef(pRmAllocParams->pClient, pRmAllocParams->hResource, &pResourceRef); + if (tmpStatus == NV_OK) + { + DispChannel *pDispChannel = dynamicCast(pResourceRef->pResource, DispChannel); + if (pDispChannel != NULL) + { + status = dispchnGrabChannel(pDispChannel, + pRmAllocParams->hClient, + pRmAllocParams->hParent, + pRmAllocParams->hResource, + pRmAllocParams->externalClassId, + pRmAllocParams->pAllocParams); + } + } + + if (gpulockRelease) + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + return status; +} + +static NV_STATUS +_rmAlloc +( + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pUserAllocParams, + NvU32 allocFlags, + NvU32 allocInitStates, + RS_LOCK_INFO *pLockInfo, + NvP64 pRightsRequested, + API_SECURITY_INFO secInfo +) +{ + NV_STATUS status; + RS_RES_ALLOC_PARAMS_INTERNAL rmAllocParams = {0}; + + NV_ASSERT_OR_RETURN(phObject != NULL, NV_ERR_INVALID_ARGUMENT); + + // init RmAllocParams + rmAllocParams.hClient = hClient; + rmAllocParams.hParent = hParent; + rmAllocParams.hResource = *phObject; + rmAllocParams.externalClassId = hClass; + rmAllocParams.allocFlags = allocFlags; + rmAllocParams.allocState = allocInitStates; + rmAllocParams.pSecInfo = &secInfo; + rmAllocParams.pResourceRef = NULL; + rmAllocParams.pAllocParams = NvP64_VALUE(pUserAllocParams); + rmAllocParams.pLockInfo = pLockInfo; + rmAllocParams.pRightsRequested = NvP64_VALUE(pRightsRequested); + rmAllocParams.pRightsRequired = NULL; + + status = serverAllocResource(&g_resServ, &rmAllocParams); + *phObject = rmAllocParams.hResource; + + return status; + +} + +static +NV_STATUS +_fixupAllocParams +( + RS_RESOURCE_DESC **ppResDesc, + RS_RES_ALLOC_PARAMS_INTERNAL *pRmAllocParams +) +{ + RS_RESOURCE_DESC *pResDesc = *ppResDesc; + + if ((pResDesc->pClassInfo != NULL) && (pResDesc->pClassInfo->classId == classId(Event))) + { + NV0005_ALLOC_PARAMETERS *pNv0005Params = pRmAllocParams->pAllocParams; + + // + // This field isn't filled out consistently by clients. Some clients specify NV01_EVENT as the class + // and then override it using the subclass in the event parameters, while other clients specify the + // same subclass in both the RmAllocParams and event params. NV01_EVENT isn't a valid class to allocate + // so overwrite it with the subclass from the event params. + // + if (pRmAllocParams->externalClassId == NV01_EVENT) + pRmAllocParams->externalClassId = pNv0005Params->hClass; + + pNv0005Params->hSrcResource = pRmAllocParams->hParent; + + // No support for event and src resource that reside under different clients + if (pNv0005Params->hParentClient != pRmAllocParams->hClient) + pRmAllocParams->hParent = pRmAllocParams->hClient; + + // class id may have changed so refresh the resource descriptor, but make sure it is still an Event + pResDesc = RsResInfoByExternalClassId(pRmAllocParams->externalClassId); + if (pResDesc == NULL || pResDesc->pClassInfo == NULL || pResDesc->pClassInfo->classId != classId(Event)) + return NV_ERR_INVALID_CLASS; + + *ppResDesc = pResDesc; + } + + return NV_OK; +} + +NV_STATUS +serverAllocResourceUnderLock +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS *pRmAllocParams +) +{ + NvHandle hClient = pRmAllocParams->hClient; + NvHandle hParent; + RS_RESOURCE_DESC *pResDesc; + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus; + RsClient *pClient = pRmAllocParams->pClient; + RsResourceRef *pParentRef = NULL; + RsResourceRef *pResourceRef = NULL; + NvU32 i = 0; + RS_LOCK_INFO *pLockInfo = pRmAllocParams->pLockInfo; + NvU32 releaseFlags = 0; + RS_ACCESS_MASK rightsRequired; + LOCK_ACCESS_TYPE resLockAccess = LOCK_ACCESS_WRITE; + OBJGPU *pGpu = NULL; + NvBool bClearRecursiveStateFlag = NV_FALSE; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + if (pRmAllocParams->pSecInfo == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pResDesc = RsResInfoByExternalClassId(pRmAllocParams->externalClassId); + if (pResDesc == NULL) + { + return NV_ERR_INVALID_CLASS; + } + + NV_ASSERT_OK_OR_RETURN(_fixupAllocParams(&pResDesc, pRmAllocParams)); + rmapiResourceDescToLegacyFlags(pResDesc, &pLockInfo->flags, NULL); + + pLockInfo->traceOp = RS_LOCK_TRACE_ALLOC; + pLockInfo->traceClassId = pRmAllocParams->externalClassId; + hParent = pRmAllocParams->hParent; + if (pRmAllocParams->hResource == hClient) + { + if (pResDesc->pParentList[i] != 0) + status = NV_ERR_INVALID_OBJECT_PARENT; + hParent = 0; + + // Single instance restriction is implied + NV_ASSERT(!pResDesc->bMultiInstance); + } + else + { + // Check if parent is valid + status = clientGetResourceRef(pClient, hParent, &pParentRef); + if (status != NV_OK) + { + goto done; + } + pLockInfo->pContextRef = pParentRef; + } + + if ((pResDesc->flags & RS_FLAGS_INTERNAL_ONLY) && + !(pRmAllocParams->allocState & RM_ALLOC_STATES_INTERNAL_ALLOC)) + { + status = NV_ERR_INVALID_CLASS; + goto done; + } + + status = serverAllocResourceLookupLockFlags(&g_resServ, RS_LOCK_RESOURCE, pRmAllocParams, &resLockAccess); + if (status != NV_OK) + goto done; + + // + // We can get the GPU pointer for alloc of a device child. + // Device allocs need to be handled separately. See deviceInit_IMPL() + // + tmpStatus = gpuGetByRef(pParentRef, NULL, &pGpu); + + // Override locking flags if we'll need to RPC to GSP + if (pGpu != NULL && IS_GSP_CLIENT(pGpu) && + (pResDesc->flags & RS_FLAGS_ALLOC_RPC_TO_PHYS_RM)) + { + resLockAccess = LOCK_ACCESS_WRITE; // always write as we're RPCing to GSP + + // + // If the resource desc says no need for GPU locks, we still need to lock + // the current pGpu in order to send the RPC + // + if (pLockInfo->flags & RM_LOCK_FLAGS_NO_GPUS_LOCK) + { + NV_PRINTF(LEVEL_INFO, "Overriding flags for alloc of class %04x\n", + pRmAllocParams->externalClassId); + pLockInfo->flags &= ~RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + if ((pLockInfo->state & RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS) == 0) + { + pLockInfo->state |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + bClearRecursiveStateFlag = NV_TRUE; + } + } + } + + status = serverResLock_Prologue(&g_resServ, resLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + if (pParentRef != NULL) + { + // + // Check if GPU in fatal condition. + // + if (pGpu != NULL) + { + // + // Abort if any GPU in SLI has fatal error. + // This property will be cleared on RM init, by which time the + // client should have reset the GPUs. + // + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY | + SLI_LOOP_FLAGS_IGNORE_REENTRANCY) + { + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_IN_FATAL_ERROR)) + { + KernelRc *pKernelRc = GPU_GET_KERNEL_RC(pGpu); + if (pKernelRc != NULL) + { + krcBreakpoint(pKernelRc); + } + status = NV_ERR_RESET_REQUIRED; + SLI_LOOP_GOTO(done); + } + } + SLI_LOOP_END; + } + + // If single instance, ensure parent doesn't yet have a class of this type + if (!pResDesc->bMultiInstance) + { + if (refFindChildOfType(pParentRef, pResDesc->pClassInfo->classId, NV_TRUE, NULL) == NV_OK) + { + status = NV_ERR_STATE_IN_USE; + } + } + + // Check if hParent is an allowed parent for this resource + if (status == NV_OK && !pResDesc->bAnyParent) + { + status = NV_ERR_INVALID_OBJECT_PARENT; + for (i = 0; pResDesc->pParentList[i]; i++) + { + if (pParentRef->internalClassId == pResDesc->pParentList[i]) + { + status = NV_OK; + break; + } + } + } + } + + if (status != NV_OK) + goto done; + + status = clientAssignResourceHandle(pClient, &pRmAllocParams->hResource); + if (status != NV_OK) + goto done; + + pRmAllocParams->hParent = (pRmAllocParams->hParent == 0) ? pRmAllocParams->hClient : pRmAllocParams->hParent; + + if (pServer->bRsAccessEnabled) + { + rsAccessMaskFromArray(&rightsRequired, pResDesc->pRightsRequiredArray, + pResDesc->rightsRequiredLength); + pRmAllocParams->pRightsRequired = &rightsRequired; + } + + status = clientAllocResource(pClient, &g_resServ, pRmAllocParams); + if (status != NV_OK) + goto done; + + pResourceRef = pRmAllocParams->pResourceRef; + + // + // Alloc RPC handling + // + if (!(pRmAllocParams->allocState & RM_ALLOC_STATES_SKIP_RPC)) + { + if (pResDesc->flags & (RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM)) + { + OBJGPU *pGpu = NULL; + RmResource *pRmResource = dynamicCast(pResourceRef->pResource, RmResource); + CALL_CONTEXT callContext = {0}; + CALL_CONTEXT *pOldContext = NULL; + + status = gpuGetByRef(pResourceRef, NULL, &pGpu); + if (status != NV_OK || pRmResource == NULL) + { + status = NV_ERR_INVALID_CLASS; + goto done; + } + + if (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) + { + status = NV_OK; + goto done; + } + + // if physical RM RPC make sure we're a GSP client otherwise skip + if (((pResDesc->flags & (RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM)) == RS_FLAGS_ALLOC_RPC_TO_PHYS_RM) && + (!IS_GSP_CLIENT(pGpu))) + { + status = NV_OK; + goto done; + } + + // Set the call context to allow vgpuGetCallingContextDevice() + // and other context dependent functions to operate in the RPC code. + // + // The context is assigned in the above clientAllocResource() call, + // but we can't simply extend the context scope to this place + // as pResourceRef is allocated internally in clientAllocResource(). + // + // Instead, we create basically the same context here once again + // and use it for the RPC call. + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pRmAllocParams->pLockInfo; + callContext.secInfo = *pRmAllocParams->pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + NV_RM_RPC_ALLOC_OBJECT(pGpu, + pRmAllocParams->hClient, + pRmAllocParams->hParent, + pRmAllocParams->hResource, + pRmAllocParams->externalClassId, + pRmAllocParams->pAllocParams, + status); + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + goto done; + + pRmResource->bRpcFree = NV_TRUE; + } + } + +done: + if ((status != NV_OK) && (pResourceRef != NULL)) + { + RS_RES_FREE_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = hClient; + params.hResource = pRmAllocParams->hResource; + params.pResourceRef = pResourceRef; + params.pSecInfo = pRmAllocParams->pSecInfo; + params.pLockInfo = pRmAllocParams->pLockInfo; + tmpStatus = clientFreeResource(pClient, &g_resServ, ¶ms); + NV_ASSERT(tmpStatus == NV_OK); + pRmAllocParams->pResourceRef = NULL; + } + + serverResLock_Epilogue(&g_resServ, resLockAccess, pLockInfo, &releaseFlags); + + if (bClearRecursiveStateFlag) + { + pLockInfo->state &= ~RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + } + return status; +} + +NV_STATUS +serverFreeResourceRpcUnderLock +( + RsServer *pServer, + RS_RES_FREE_PARAMS *pFreeParams +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef = pFreeParams->pResourceRef; + OBJGPU *pGpu = NULL; + NvBool bBcResource; + RmResource *pRmResource = NULL; + + NV_ASSERT_OR_RETURN(pResourceRef != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + pRmResource = dynamicCast(pResourceRef->pResource, RmResource); + status = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if ((status != NV_OK) || + (!IS_VIRTUAL(pGpu) && !IS_GSP_CLIENT(pGpu)) || + (pRmResource == NULL) || + (pRmResource->bRpcFree == NV_FALSE)) + { + status = NV_OK; + goto rpc_done; + } + + gpuSetThreadBcState(pGpu, bBcResource); + NV_RM_RPC_FREE(pGpu, pResourceRef->pClient->hClient, + pResourceRef->pParentRef->hResource, + pResourceRef->hResource, status); + +rpc_done: + return status; +} + +void +serverResLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + NvU32 gpuLockFlags = GPUS_LOCK_FLAGS_NONE; + if (access == LOCK_ACCESS_READ) + gpuLockFlags |= GPU_LOCK_FLAGS_READ; + + if (*pReleaseFlags & RM_LOCK_RELEASE_GPU_GROUP_LOCK) + { + // UNLOCK: release GPU group lock + rmGpuGroupLockRelease(pLockInfo->gpuMask, GPUS_LOCK_FLAGS_NONE); + pLockInfo->state &= ~RM_LOCK_STATES_GPU_GROUP_LOCK_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_GPU_GROUP_LOCK; + } + + if (*pReleaseFlags & RM_LOCK_RELEASE_GPUS_LOCK) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + pLockInfo->state &= ~RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + *pReleaseFlags &= ~RM_LOCK_RELEASE_GPUS_LOCK; + } +} + +NV_STATUS +serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO *pLockInfo, RS_RES_FREE_PARAMS *pParams) +{ + portMemSet(pParams, 0, sizeof(*pParams)); + pParams->hClient = hClient; + pParams->hResource = hResource; + pParams->pLockInfo = pLockInfo; + return NV_OK; +} + +NV_STATUS +serverUpdateLockFlagsForFree +( + RsServer *pServer, + RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams +) +{ + RS_LOCK_INFO *pLockInfo = pRmFreeParams->pLockInfo; + OBJGPU *pGpu = NULL; + + rmapiResourceDescToLegacyFlags(pRmFreeParams->pResourceRef->pResourceDesc, NULL, &pLockInfo->flags); + + pLockInfo->pContextRef = pRmFreeParams->pResourceRef->pParentRef; + if (gpuGetByRef(pLockInfo->pContextRef, NULL, &pGpu) == NV_OK) + { + RmResource *pRmResource = dynamicCast(pRmFreeParams->pResourceRef->pResource, RmResource); + if (pGpu != NULL && IS_GSP_CLIENT(pGpu) && pRmResource != NULL && pRmResource->bRpcFree) + { + // + // If the resource desc says no need for GPU locks, we still need to lock + // the current pGpu in order to send the RPC + // + if (pLockInfo->flags & RM_LOCK_FLAGS_NO_GPUS_LOCK) + { + NV_PRINTF(LEVEL_INFO, "Overriding flags for free of class %04x\n", + pRmFreeParams->pResourceRef->externalClassId); + pLockInfo->flags &= ~RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + pLockInfo->state |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + } + } + } + + return NV_OK; +} + +NV_STATUS +rmapiFreeResourcePrologue +( + RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams +) +{ + RsResourceRef *pResourceRef = pRmFreeParams->pResourceRef; + NV_STATUS tmpStatus; + OBJGPU *pGpu = NULL; + NvBool bBcResource; + + NV_ASSERT_OR_RETURN(pResourceRef, NV_ERR_INVALID_OBJECT_HANDLE); + + rmapiControlCacheFreeObject(pRmFreeParams->hClient, pRmFreeParams->hResource); + + // + // Use gpuGetByRef instead of GpuResource because gpuGetByRef will work even + // if resource isn't a GpuResource (e.g.: Memory which can be allocated + // under a subdevice, device or client root) + // + tmpStatus = gpuGetByRef(pResourceRef, &bBcResource, &pGpu); + if (tmpStatus == NV_OK) + gpuSetThreadBcState(pGpu, bBcResource); + + // + // Need to cancel pending timer callbacks before event structs are freed. + // RS-TODO: provide notifications to objects referencing events or add + // dependency + // + TimerApi *pTimerApi = dynamicCast(pResourceRef->pResource, TimerApi); + if (pTimerApi != NULL) + { + tmrapiDeregisterEvents(pTimerApi); + } + + CliDelObjectEvents(pRmFreeParams->hClient, pRmFreeParams->hResource); + + return NV_OK; +} + +NV_STATUS +rmapiAlloc +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + void *pAllocParams +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->AllocWithSecInfo(pRmApi, hClient, hParent, phObject, hClass, NV_PTR_TO_NvP64(pAllocParams), + RMAPI_ALLOC_FLAGS_NONE, NvP64_NULL, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiAllocWithHandle +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->AllocWithSecInfo(pRmApi, hClient, hParent, &hObject, hClass, NV_PTR_TO_NvP64(pAllocParams), + RMAPI_ALLOC_FLAGS_NONE, NvP64_NULL, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiAllocWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 flags, + NvP64 pRightsRequested, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + NvU32 allocInitStates = RM_ALLOC_STATES_NONE; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO *pLockInfo; + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + pLockInfo = portMemAllocNonPaged(sizeof(*pLockInfo)); + if (pLockInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemSet(pLockInfo, 0, sizeof(*pLockInfo)); + rmapiInitLockInfo(pRmApi, hClient, pLockInfo); + + // RS-TODO: Fix calls that use RMAPI_GPU_LOCK_INTERNAL without holding the API lock + if (pRmApi->bGpuLockInternal && !rmApiLockIsOwner()) + { + NV_PRINTF(LEVEL_ERROR, "RMAPI_GPU_LOCK_INTERNAL alloc requested without holding the RMAPI lock\n"); + pLockInfo->flags |= RM_LOCK_FLAGS_NO_API_LOCK; + pLockInfo->state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + + // This flag applies to both VGPU and GSP cases + if (flags & RMAPI_ALLOC_FLAGS_SKIP_RPC) + allocInitStates |= RM_ALLOC_STATES_SKIP_RPC; + + // + // Mark internal client allocations as such, so the resource server generates + // the internal client handle with a distinct template. + // The distinct template purpose is to make sure that GSP client provided + // client handles do not collide with the client handles ganerated by the GSP/FW RM. + // + if ((pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) && + (pSecInfo->paramLocation == PARAM_LOCATION_KERNEL) && pRmApi->bGpuLockInternal) + allocInitStates |= RM_ALLOC_STATES_INTERNAL_CLIENT_HANDLE; + + if ((pSecInfo->paramLocation == PARAM_LOCATION_KERNEL) && + (pRmApi->bApiLockInternal || pRmApi->bGpuLockInternal)) + allocInitStates |= RM_ALLOC_STATES_INTERNAL_ALLOC; + + NV_PRINTF(LEVEL_INFO, "client:0x%x parent:0x%x object:0x%x class:0x%x\n", + hClient, hParent, *phObject, hClass); + + NVRM_TRACE_API('ALOC', hParent, *phObject, hClass); + NVRM_TRACE(pAllocParams); + + status = _rmAlloc(hClient, + hParent, + phObject, + hClass, + pAllocParams, + flags, + allocInitStates, + pLockInfo, + pRightsRequested, + *pSecInfo); + + + // + // If hClient is allocated behind GPU locks, client is marked as internal + // + if ((status == NV_OK) && ((hClass == NV01_ROOT) || (hClass == NV01_ROOT_NON_PRIV) || (hClass == NV01_ROOT_CLIENT)) && + pSecInfo->paramLocation == PARAM_LOCATION_KERNEL && pRmApi->bGpuLockInternal) + { + void *pHClient = *(void **)&pAllocParams; + + // flag this client as an RM internal client + rmclientSetClientFlagsByHandle(*(NvU32*)pHClient /* hClient */, RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT); + } + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "allocation complete\n"); + NVRM_TRACE('aloc'); + } + else + { + NV_PRINTF(LEVEL_WARNING, "allocation failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_WARNING, + "client:0x%x parent:0x%x object:0x%x class:0x%x\n", hClient, + hParent, *phObject, hClass); + + NVRM_TRACE_ERROR('aloc', status); + } + + portMemFree(pLockInfo); + +done: + rmapiEpilogue(pRmApi, &rmApiContext); + + return status; +} + +NV_STATUS +resservClientFactory +( + PORT_MEM_ALLOCATOR *pAllocator, + RS_RES_ALLOC_PARAMS *pParams, + RsClient **ppRsClient +) +{ + RmClient *pClient; + NV_STATUS status; + + status = objCreate(&pClient, NVOC_NULL_OBJECT, RmClient, pAllocator, pParams); + + if (status != NV_OK) + { + return status; + } + NV_ASSERT(pClient != NULL); + + *ppRsClient = staticCast(pClient, RsClient); + return NV_OK; +} + +NV_STATUS +resservResourceFactory +( + PORT_MEM_ALLOCATOR *pAllocator, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS *pParams, + RsResource **ppResource +) +{ + RS_RESOURCE_DESC *pResDesc; + NV_STATUS status; + Dynamic *pDynamic = NULL; + RsResource *pResource = NULL; + OBJGPU *pGpu = NULL; + + pResDesc = RsResInfoByExternalClassId(pParams->externalClassId); + if (pResDesc == NULL) + return NV_ERR_INVALID_CLASS; + + if (pCallContext->pResourceRef->pParentRef != NULL && + pCallContext->pResourceRef->pParentRef->pResource != NULL) + { + GpuResource *pParentGpuResource = dynamicCast(pCallContext->pResourceRef->pParentRef->pResource, + GpuResource); + if (pParentGpuResource != NULL) + { + pGpu = GPU_RES_GET_GPU(pParentGpuResource); + } + } + + status = objCreateDynamicWithFlags(&pDynamic, + (Object*)pGpu, + pResDesc->pClassInfo, + NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY, + pCallContext, + pParams); + if (status != NV_OK) + return status; + + pResource = dynamicCast(pDynamic, RsResource); + + if (pResource == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + *ppResource = pResource; + + return status; +} + +NV_STATUS +rmapiAllocWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 flags, + NvP64 pRightsRequested, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiAllocWithSecInfo(pRmApi, hClient, hParent, phObject, hClass, + pAllocParams, flags, pRightsRequested, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiFree +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->FreeWithSecInfo(pRmApi, hClient, hObject, RMAPI_FREE_FLAGS_NONE, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiFreeWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RS_RES_FREE_PARAMS freeParams; + RS_LOCK_INFO lockInfo; + RM_API_CONTEXT rmApiContext = {0}; + + portMemSet(&freeParams, 0, sizeof(freeParams)); + + NV_PRINTF(LEVEL_INFO, "Nv01Free: client:0x%x object:0x%x\n", hClient, + hObject); + + NVRM_TRACE_API('FREE', hClient, hObject, 0); + + status = rmapiPrologue(pRmApi, &rmApiContext); + + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + + // RS-TODO: Fix calls that use RMAPI_GPU_LOCK_INTERNAL without holding the API lock + if (pRmApi->bGpuLockInternal && !rmApiLockIsOwner()) + { + NV_PRINTF(LEVEL_ERROR, "RMAPI_GPU_LOCK_INTERNAL free requested without holding the RMAPI lock\n"); + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + lockInfo.state &= ~RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + + freeParams.hClient = hClient; + freeParams.hResource = hObject; + freeParams.freeState = RM_FREE_STATES_NONE; + freeParams.pLockInfo = &lockInfo; + freeParams.freeFlags = flags; + freeParams.pSecInfo = pSecInfo; + + status = serverFreeResourceTree(&g_resServ, &freeParams); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv01Free: free complete\n"); + NVRM_TRACE('free'); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv01Free: free failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_WARNING, "Nv01Free: client:0x%x object:0x%x\n", + hClient, hObject); + NVRM_TRACE_ERROR('free', status); + } + + return status; +} + +NV_STATUS +rmapiFreeWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiFreeWithSecInfo(pRmApi, hClient, hObject, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiFreeClientList +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->FreeClientListWithSecInfo(pRmApi, phClientList, numClients, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiFreeClientListWithSecInfo +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 lockState = 0; + + NV_PRINTF(LEVEL_INFO, "Nv01FreeClientList: numClients: %d\n", numClients); + + if (!pRmApi->bRmSemaInternal && osAcquireRmSema(pSys->pSema) != NV_OK) + return NV_ERR_INVALID_LOCK_STATE; + + if (pRmApi->bApiLockInternal) + lockState |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + + if (pRmApi->bGpuLockInternal) + lockState |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; + + status = serverFreeClientList(&g_resServ, phClientList, numClients, lockState, pSecInfo); + + if (!pRmApi->bRmSemaInternal) + osReleaseRmSema(pSys->pSema, NULL); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv01FreeClientList: free complete\n"); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv01FreeList: free failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiFreeClientListWithSecInfoTls +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiFreeClientListWithSecInfo(pRmApi, phClientList, numClients, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NvBool +serverRwApiLockIsOwner +( + RsServer *pServer +) +{ + return rmapiLockIsOwner(); +} + +NV_STATUS +serverAllocResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + if (lock == RS_LOCK_TOP) + { + RS_RESOURCE_DESC *pResDesc; + + if (!serverSupportsReadOnlyLock(&g_resServ, RS_LOCK_TOP, RS_API_ALLOC_RESOURCE)) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + + pResDesc = RsResInfoByExternalClassId(pParams->externalClassId); + + if (pResDesc == NULL) + { + return NV_ERR_INVALID_CLASS; + } + + if (pResDesc->flags & RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC) + *pAccess = LOCK_ACCESS_READ; + else + *pAccess = LOCK_ACCESS_WRITE; + + return NV_OK; + } + + if (lock == RS_LOCK_RESOURCE) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +serverFreeResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_FREE_RESOURCE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} diff --git a/src/nvidia/src/kernel/rmapi/binary_api.c b/src/nvidia/src/kernel/rmapi/binary_api.c new file mode 100644 index 000000000..661922bdd --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/binary_api.c @@ -0,0 +1,120 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/binary_api.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "rmapi/client.h" +#include "rmapi/resource.h" +#include "rmapi/rmapi.h" +#include "rmapi/control.h" +#include "ctrl/ctrlxxxx.h" +#include "gpu/gpu_resource.h" +#include "gpu/gpu.h" +#include "core/locks.h" + + +NV_STATUS +binapiConstruct_IMPL +( + BinaryApi *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +binapiprivConstruct_IMPL +( + BinaryApiPrivileged *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +binapiControl_IMPL +( + BinaryApi *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + OBJGPU *pGpu = GPU_RES_GET_GPU(pResource); + GPU_MASK gpuMaskRelease = 0; + + // check if CMD is NULL, return early + if (RMCTRL_IS_NULL_CMD(pParams->cmd)) + return NV_OK; + + if (pGpu == NULL) + return NV_ERR_INVALID_ARGUMENT; + + NV_ASSERT_OK_OR_RETURN(rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_RPC, + &gpuMaskRelease)); + + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + status = pRmApi->Control(pRmApi, + pParams->hClient, + pParams->hObject, + pParams->cmd, + pParams->pParams, + pParams->paramsSize); + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + return status; +} + +NV_STATUS +binapiprivControl_IMPL +( + BinaryApiPrivileged *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + // check if CMD is NULL, return early + if (RMCTRL_IS_NULL_CMD(pParams->cmd)) + return NV_OK; + + // Add check if privileged client + if (pParams->secInfo.privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + return binapiControl_IMPL(staticCast(pResource, BinaryApi), pCallContext, pParams); + } + else + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } +} + diff --git a/src/nvidia/src/kernel/rmapi/client.c b/src/nvidia/src/kernel/rmapi/client.c new file mode 100644 index 000000000..f17666a94 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/client.c @@ -0,0 +1,836 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" + +#include "rmapi/rmapi.h" +#include "rmapi/rs_utils.h" +#include "rmapi/client.h" +#include "rmapi/client_resource.h" +#include "rmapi/resource_fwd_decls.h" +#include "core/locks.h" +#include "core/system.h" +#include "gpu/device/device.h" +#include "resource_desc.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/gpu.h" + +#include "gpu/bus/third_party_p2p.h" + +UserInfoList g_userInfoList; +RmClientList g_clientListBehindGpusLock; // RS-TODO remove this WAR + +#define RS_FW_UNIQUE_HANDLE_BASE (0xc9f00000) + +NV_STATUS _registerUserInfo(PUID_TOKEN *ppUidToken, UserInfo **ppUserInfo); +NV_STATUS _unregisterUserInfo(UserInfo *pUserInfo); + +NV_STATUS +rmclientConstruct_IMPL +( + RmClient *pClient, + PORT_MEM_ALLOCATOR* pAllocator, + RS_RES_ALLOC_PARAMS_INTERNAL* pParams +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + OBJSYS *pSys = SYS_GET_INSTANCE(); + RsClient *pRsClient = staticCast(pClient, RsClient); + NvBool bReleaseLock = NV_FALSE; + API_SECURITY_INFO *pSecInfo = pParams->pSecInfo; + + pClient->bIsRootNonPriv = (pParams->externalClassId == NV01_ROOT_NON_PRIV); + pClient->ProcID = osGetCurrentProcess(); + pClient->pUserInfo = NULL; + pClient->pSecurityToken = NULL; + pClient->pOSInfo = pSecInfo->clientOSInfo; + + pClient->cachedPrivilege = pSecInfo->privLevel; + + // Set user-friendly client name from current process + osGetCurrentProcessName(pClient->name, NV_PROC_NAME_MAX_LENGTH); + + for (i = 0; i < NV0000_NOTIFIERS_MAXCOUNT; i++) + { + pClient->CliSysEventInfo.notifyActions[i] = + NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + + // Prevent kernel clients from requesting handles in the FW handle generator range + status = clientSetRestrictedRange(pRsClient, + RS_FW_UNIQUE_HANDLE_BASE, RS_UNIQUE_HANDLE_RANGE); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Failed to set host client restricted resource handle range. Status=%x\n", status); + return status; + } + + if (!rmGpuLockIsOwner()) + { + // LOCK: acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_CLIENT)) != NV_OK) + { + NV_ASSERT(0); + return status; + } + bReleaseLock = NV_TRUE; + } + + pClient->bIsClientVirtualMode = (pSecInfo->pProcessToken != NULL); + + // + // Cache the security/uid tokens only if the client handle validation is + // enabled AND its a user mode path or a non privileged kernel class. + // + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE) && + ((pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) || pClient->bIsRootNonPriv)) + { + PSECURITY_TOKEN pSecurityToken; + PUID_TOKEN pUidToken = osGetCurrentUidToken(); + UserInfo *pUserInfo = NULL; + + pSecurityToken = (pClient->bIsClientVirtualMode ? + pSecInfo->pProcessToken : osGetSecurityToken()); + + // pUserInfo takes ownership of pUidToken upon successful registration + status = _registerUserInfo(&pUidToken, &pUserInfo); + + if (status == NV_OK) + { + pClient->pUserInfo = pUserInfo; + pClient->pSecurityToken = pSecurityToken; + } + else + { + portMemFree(pUidToken); + + if (pSecurityToken != NULL && !pClient->bIsClientVirtualMode) + portMemFree(pSecurityToken); + } + } + + if (listAppendValue(&g_clientListBehindGpusLock, (void*)&pClient) == NULL) + status = NV_ERR_INSUFFICIENT_RESOURCES; + + if (bReleaseLock) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } + + // RM gets the client handle from the allocation parameters + if (status == NV_OK && pParams->pAllocParams != NULL) + *(NvHandle*)(pParams->pAllocParams) = pParams->hClient; + + return status; +} + +void +rmclientDestruct_IMPL +( + RmClient *pClient +) +{ + NV_STATUS status = NV_OK; + RsClient *pRsClient = staticCast(pClient, RsClient); + NV_STATUS tmpStatus; + NvHandle hClient = pRsClient->hClient; + NvBool bReleaseLock = NV_FALSE; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + NV_PRINTF(LEVEL_INFO, " type: client\n"); + + LOCK_METER_DATA(FREE_CLIENT, hClient, 0, 0); + + // Free any association of the client with existing third-party p2p object + CliUnregisterFromThirdPartyP2P(pClient); + + rmapiControlCacheFreeClient(hClient); + + // + // Free all of the devices of the client (do it in reverse order to + // facilitate tear down of things like ctxdmas, etc) + // + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + while (clientRefIterNext(it.pClient, &it)) + { + Device *pDeviceInfo = dynamicCast(it.pResourceRef->pResource, Device); + + // This path is deprecated. + NV_ASSERT(0); + + tmpStatus = pRmApi->Free(pRmApi, hClient, RES_GET_HANDLE(pDeviceInfo)); + if ((tmpStatus != NV_OK) && (status == NV_OK)) + status = tmpStatus; + + // re-snap iterator as Device list was mutated + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + } + + // Updating the client list just before client handle unregister // + // in case child free functions need to iterate over all clients // + if (!rmGpuLockIsOwner()) + { + // LOCK: acquire GPUs lock + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_CLIENT)) != NV_OK) + { + // This is the only chance that the shadow client list can be + // updated so modify it regardless of whether or not we obtained the lock + NV_ASSERT(0); + } + else + { + bReleaseLock = NV_TRUE; + } + } + + listRemoveFirstByValue(&g_clientListBehindGpusLock, (void*)&pClient); + + if (pClient->pUserInfo != NULL) + { + _unregisterUserInfo(pClient->pUserInfo); + pClient->pUserInfo = NULL; + } + + if (pClient->pSecurityToken != NULL) + { + if (!pClient->bIsClientVirtualMode) + portMemFree(pClient->pSecurityToken); + + pClient->pSecurityToken = NULL; + } + + if (bReleaseLock) + { + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + } +} + +NV_STATUS +rmclientInterMap_IMPL +( + RmClient *pClient, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pParams +) +{ + RS_INTER_MAP_PRIVATE *pPrivate = pParams->pPrivate; + RS_RES_MAP_TO_PARAMS mapToParams; + + // Use virtual MapTo to perform the class-specific mapping to pMapperRef + portMemSet(&mapToParams, 0, sizeof(mapToParams)); + + mapToParams.pMemoryRef = pMappableRef; + mapToParams.offset = pParams->offset; + mapToParams.length = pParams->length; + mapToParams.flags = pParams->flags; + mapToParams.pDmaOffset = &pParams->dmaOffset; + mapToParams.ppMemDesc = (MEMORY_DESCRIPTOR**)&pParams->pMemDesc; + + mapToParams.pGpu = pPrivate->pGpu; + mapToParams.pSrcGpu = pPrivate->pSrcGpu; + mapToParams.pSrcMemDesc = pPrivate->pSrcMemDesc; + mapToParams.hBroadcastDevice = pPrivate->hBroadcastDevice; + mapToParams.hMemoryDevice = pPrivate->hMemoryDevice; + mapToParams.gpuMask = pPrivate->gpuMask; + mapToParams.bSubdeviceHandleProvided = pPrivate->bSubdeviceHandleProvided; + mapToParams.bDmaMapNeeded = pPrivate->bDmaMapNeeded; + mapToParams.bFlaMapping = pPrivate->bFlaMapping; + + return resMapTo(pMapperRef->pResource, &mapToParams); +} + +void +rmclientInterUnmap_IMPL +( + RmClient *pClient, + RsResourceRef *pMapperRef, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate; + RS_RES_UNMAP_FROM_PARAMS unmapFromParams; + + // Use virtual UnmapFrom to perform the class-specific unmapping from pMapperRef + portMemSet(&unmapFromParams, 0, sizeof(unmapFromParams)); + + unmapFromParams.pMemDesc = pParams->pMemDesc; + unmapFromParams.hMemory = pParams->hMappable; + unmapFromParams.flags = pParams->flags; + unmapFromParams.dmaOffset = pParams->dmaOffset; + + unmapFromParams.pGpu = pPrivate->pGpu; + unmapFromParams.hBroadcastDevice = pPrivate->hBroadcastDevice; + unmapFromParams.gpuMask = pPrivate->gpuMask; + unmapFromParams.bSubdeviceHandleProvided = pPrivate->bSubdeviceHandleProvided; + + resUnmapFrom(pMapperRef->pResource, &unmapFromParams); +} + +RS_PRIV_LEVEL +rmclientGetCachedPrivilege_IMPL +( + RmClient *pClient +) +{ + return pClient->cachedPrivilege; +} + +NvBool +rmclientIsAdmin_IMPL +( + RmClient *pClient, + RS_PRIV_LEVEL privLevel +) +{ + if (pClient == NULL) + return NV_FALSE; + + return (privLevel >= RS_PRIV_LEVEL_USER_ROOT) && !pClient->bIsRootNonPriv; +} + +void +rmclientSetClientFlags_IMPL +( + RmClient *pClient, + NvU32 clientFlags +) +{ + pClient->Flags |= clientFlags; +} + +static void +_rmclientPromoteDebuggerState +( + RmClient *pClient, + NvU32 newMinimumState +) +{ + if (pClient->ClientDebuggerState < newMinimumState) + { + pClient->ClientDebuggerState = newMinimumState; + } +} + +void * +rmclientGetSecurityToken_IMPL +( + RmClient *pClient +) +{ + return pClient->pSecurityToken; +} + +/*! + * @brief Given a client handle, validate the handle for security. + * + * Important!! This function should be called ONLY in the user mode paths. + * The security validations will fail in kernel paths, especially if called + * with privileged kernel handles. + * + * @param[in] hClient The client handle + * @param[in] pSecInfo The new calling context's security info. + * + * @return NV_OK if validated + * NV_ERR_INVALID_CLIENT if client cannot be found + * or if there isn't a match. + */ +static NV_STATUS +_rmclientUserClientSecurityCheck +( + RmClient *pClient, + const API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + PSECURITY_TOKEN pCurrentToken = NULL; + PSECURITY_TOKEN pSecurityToken = pSecInfo->pProcessToken; + + if ((pSys == NULL) || + (!pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE))) + { + return NV_OK; + } + + // + // Check 1: + // The following check to make sure that user paths cannot be called with + // privileged kernel handles + // + // Note: For the user paths, we are checking against both kernel and admin. + // client The reason is that KMD today creates unprivileged kernel handles + // (of class NV01_ROOT_NON_PRIV) on behalf of user clients (cuda debugger, + // profiler, OGL etc) and gives out those handles. These handles are + // kernel, but they do not have admin privileges and since clients already + // use these handles to call into RM through the user paths, we are allowing + // them through ... for now. + // + // Till we either fix the clients to wean off these kernel handles or change + // KMD to not give out the kernel handles, we need to keep the check restricted + // to handles created with NV01_ROOT using the the CliCheckAdmin interface. + // + if ((pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) && !pClient->bIsRootNonPriv) + { + NV_PRINTF(LEVEL_WARNING, "Incorrect client handle used in the User export\n"); + return NV_ERR_INVALID_CLIENT; + } + + // + // Check 2: + // Validate the client handle to make sure that the user who created the + // handle is the one that uses it. Otherwise a malicious user can guess the + // client handle created by another user and access information that its + // not privy to. + // + pCurrentToken = (pSecurityToken != NULL ? pSecurityToken : osGetSecurityToken()); + if (pCurrentToken == NULL) + { + NV_PRINTF(LEVEL_WARNING, + "Cannot get the security token for the current user.\n"); + NV_PRINTF(LEVEL_WARNING, + "The user client cannot be validated\n"); + status = NV_ERR_INVALID_CLIENT; + DBG_BREAKPOINT(); + goto CliUserClientSecurityCheck_exit; + } + + status = osValidateClientTokens((void*)rmclientGetSecurityToken(pClient), + (void*)pCurrentToken); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "Error validating client token. Status = 0x%08x\n", status); + goto CliUserClientSecurityCheck_exit; + } + +CliUserClientSecurityCheck_exit: + if (pCurrentToken != NULL && pSecurityToken == NULL) + { + portMemFree(pCurrentToken); + pCurrentToken = NULL; + } + return status; +} + +NV_STATUS +rmclientPostProcessPendingFreeList_IMPL +( + RmClient *pClient, + RsResourceRef **ppFirstLowPriRef +) +{ + RsClient *pRsClient = staticCast(pClient, RsClient); + RsResourceRef *pTargetRef = NULL; + RsResourceRef *pStopRef = NULL; + RsResourceRef *pFirstLowPriRef = NULL; + + pStopRef = pRsClient->pFreeStack->pResourceRef; + pTargetRef = listHead(&pRsClient->pendingFreeList); + while (pTargetRef != pStopRef) + { + RsResourceRef *pNextRef = listNext(&pRsClient->pendingFreeList, pTargetRef); + + // Ensure that high priority resources (and their children/dependents) are freed first + if (pTargetRef->pResourceDesc->freePriority == RS_FREE_PRIORITY_HIGH) + { + clientUpdatePendingFreeList(pRsClient, pTargetRef, pTargetRef, NV_TRUE); + } + pTargetRef = pNextRef; + } + + // + // Find the last high-priority resource in the list. + // The next resource will be the first low priority resource. + // If there are no high-priority resources: use the head of the list + // + pTargetRef = (pStopRef != NULL) + ? pStopRef + : listTail(&pRsClient->pendingFreeList); + pFirstLowPriRef = listHead(&pRsClient->pendingFreeList); + + while (pTargetRef != NULL) + { + RsResourceRef *pPrevRef = listPrev(&pRsClient->pendingFreeList, pTargetRef); + + if (pTargetRef->pResourceDesc->freePriority == RS_FREE_PRIORITY_HIGH) + { + pFirstLowPriRef = listNext(&pRsClient->pendingFreeList, pTargetRef); + break; + } + pTargetRef = pPrevRef; + } + + if (ppFirstLowPriRef) + *ppFirstLowPriRef = pFirstLowPriRef; + + return NV_OK; +} + +static RmClient *handleToObject(NvHandle hClient) +{ + RmClient *pClient; + return (NV_OK == serverutilGetClientUnderLock(hClient, &pClient)) ? pClient : NULL; +} + +RS_PRIV_LEVEL rmclientGetCachedPrivilegeByHandle(NvHandle hClient) +{ + RmClient *pClient = handleToObject(hClient); + return pClient ? rmclientGetCachedPrivilege(pClient) : RS_PRIV_LEVEL_USER; +} + +NvBool rmclientIsAdminByHandle(NvHandle hClient, RS_PRIV_LEVEL privLevel) +{ + RmClient *pClient = handleToObject(hClient); + return pClient ? rmclientIsAdmin(pClient, privLevel) : NV_FALSE; +} + +NvBool rmclientSetClientFlagsByHandle(NvHandle hClient, NvU32 clientFlags) +{ + RmClient *pClient = handleToObject(hClient); + if (pClient) + rmclientSetClientFlags(pClient, clientFlags); + return !!pClient; +} + +void rmclientPromoteDebuggerStateByHandle(NvHandle hClient, NvU32 newMinimumState) +{ + RmClient *pClient = handleToObject(hClient); + if (pClient) + _rmclientPromoteDebuggerState(pClient, newMinimumState); +} + +void *rmclientGetSecurityTokenByHandle(NvHandle hClient) +{ + RmClient *pClient = handleToObject(hClient); + return pClient ? rmclientGetSecurityToken(pClient) : NULL; +} + +NV_STATUS rmclientUserClientSecurityCheckByHandle(NvHandle hClient, const API_SECURITY_INFO *pSecInfo) +{ + RmClient *pClient = handleToObject(hClient); + + // + // Return early if it's a null object. This is probably the allocation of + // the root client object, so the client class is going to be null. + // + // RS-TODO - This check should move to the caller. + // + if (hClient == NV01_NULL_OBJECT) + { + return NV_OK; + } + + if (pClient) + { + return _rmclientUserClientSecurityCheck(pClient, pSecInfo); + } + else + return NV_ERR_INVALID_CLIENT; +} + +/** + * Register a uid token with the client database and return a UserInfo that + * corresponds to the uid token. + * + * If the uid token has not been registered before, a new UserInfo will be registered and returned. + * If the uid token is already registered, an existing UserInfo will be ref-counted and + * returned. + * + * This function must be protected by a lock (currently the GPUs lock.) + * + * @param[inout] ppUidToken + * @param[out] ppUserInfo + */ +NV_STATUS +_registerUserInfo +( + PUID_TOKEN *ppUidToken, + UserInfo **ppUserInfo +) +{ + NV_STATUS status = NV_OK; + NvBool bFound = NV_FALSE; + UserInfo *pUserInfo = NULL; + UserInfoListIter it = listIterAll(&g_userInfoList); + PUID_TOKEN pUidToken; + + if ((!ppUidToken) || (!(*ppUidToken))) + return NV_ERR_INVALID_ARGUMENT; + + pUidToken = *ppUidToken; + + // Find matching user token + while(listIterNext(&it)) + { + pUserInfo = *it.pValue; + if (osUidTokensEqual(pUserInfo->pUidToken, pUidToken)) + { + bFound = NV_TRUE; + break; + } + } + + if (!bFound) + { + RsShared *pShared; + status = serverAllocShare(&g_resServ, classInfo(UserInfo), &pShared); + if (status != NV_OK) + return status; + + pUserInfo = dynamicCast(pShared, UserInfo); + pUserInfo->pUidToken = pUidToken; + + if (listAppendValue(&g_userInfoList, (void*)&pUserInfo) == NULL) + { + serverFreeShare(&g_resServ, pShared); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + else + { + serverRefShare(&g_resServ, staticCast(pUserInfo, RsShared)); + portMemFree(pUidToken); + *ppUidToken = NULL; + } + + *ppUserInfo = pUserInfo; + + return NV_OK; +} + +/** + * + * Unregister a client from a user info list + * + * This function must be protected by a lock (currently the GPUs lock.) + * + * @param[in] pUserInfo + */ +NV_STATUS +_unregisterUserInfo +( + UserInfo *pUserInfo +) +{ + NvS32 refCount = serverGetShareRefCount(&g_resServ, staticCast(pUserInfo, RsShared)); + if (--refCount == 0) + { + listRemoveFirstByValue(&g_userInfoList, (void*)&pUserInfo); + } + return serverFreeShare(&g_resServ, staticCast(pUserInfo, RsShared)); +} + +NV_STATUS userinfoConstruct_IMPL +( + UserInfo *pUserInfo +) +{ + return NV_OK; +} + +void +userinfoDestruct_IMPL +( + UserInfo *pUserInfo +) +{ + portMemFree(pUserInfo->pUidToken); +} + +NV_STATUS +rmclientValidate_IMPL +( + RmClient *pClient, + const API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE) && + pSecInfo != NULL) + { + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_CLIENT_HANDLE_STRICT) && + pSecInfo->clientOSInfo != NULL) + { + if (pClient->pOSInfo != pSecInfo->clientOSInfo) + { + status = NV_ERR_INVALID_CLIENT; + } + } + else if (pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) + { + status = _rmclientUserClientSecurityCheck(pClient, pSecInfo); + } + } + + return status; +} + +NV_STATUS +rmclientFreeResource_IMPL +( + RmClient *pClient, + RsServer *pServer, + RS_RES_FREE_PARAMS_INTERNAL *pRmFreeParams +) +{ + NV_STATUS status; + OBJGPU *pGpu; + NvBool bBcState; + NvBool bRestoreBcState = NV_FALSE; + + if (gpuGetByRef(pRmFreeParams->pResourceRef, NULL, &pGpu) == NV_OK) + { + bBcState = gpumgrGetBcEnabledStatus(pGpu); + bRestoreBcState = NV_TRUE; + } + + rmapiFreeResourcePrologue(pRmFreeParams); + + status = clientFreeResource_IMPL(staticCast(pClient, RsClient), pServer, pRmFreeParams); + + if (bRestoreBcState) + { + gpumgrSetBcEnabledStatus(pGpu, bBcState); + } + return status; +} + +static NvBool _rmclientIsCapable +( + NvHandle hClient, + NvU32 capability +) +{ + NvU32 internalClassId; + RsResourceRef *pResourceRef = NULL; + + switch(capability) + { + case NV_RM_CAP_SYS_SMC_CONFIG: + { + internalClassId = classId(MIGConfigSession); + break; + } + case NV_RM_CAP_EXT_FABRIC_MGMT: + { + internalClassId = classId(FmSessionApi); + break; + } + case NV_RM_CAP_SYS_SMC_MONITOR: + { + internalClassId = classId(MIGMonitorSession); + break; + } + default: + { + NV_ASSERT(0); + return NV_FALSE; + } + } + + // Check if client has allocated a given class + pResourceRef = serverutilFindChildRefByType(hClient, hClient, internalClassId, NV_TRUE); + if (pResourceRef == NULL) + { + return NV_FALSE; + } + + return NV_TRUE; +} + +NvBool rmclientIsCapableOrAdmin_IMPL +( + RmClient *pClient, + NvU32 capability, + RS_PRIV_LEVEL privLevel +) +{ + RsClient *pRsClient = staticCast(pClient, RsClient); + NvHandle hClient = pRsClient->hClient; + + if (rmclientIsAdmin(pClient, privLevel)) + { + return NV_TRUE; + } + + return _rmclientIsCapable(hClient, capability); +} + +// +// RS-TODO: Delete this function once the RM Capabilities framework is in place. +// JIRA GR-139 +// +NvBool rmclientIsCapableOrAdminByHandle +( + NvHandle hClient, + NvU32 capability, + RS_PRIV_LEVEL privLevel +) +{ + RmClient *pClient = handleToObject(hClient); + if (pClient == NULL) + { + return NV_FALSE; + } + + return rmclientIsCapableOrAdmin(pClient, capability, privLevel); +} + +NvBool rmclientIsCapable_IMPL +( + RmClient *pClient, + NvU32 capability +) +{ + RsClient *pRsClient = staticCast(pClient, RsClient); + NvHandle hClient = pRsClient->hClient; + + return _rmclientIsCapable(hClient, capability); +} + +// +// RS-TODO: Delete this function once the RM Capabilities framework is in place. +// JIRA GR-139 +// +NvBool rmclientIsCapableByHandle +( + NvHandle hClient, + NvU32 capability +) +{ + RmClient *pClient = handleToObject(hClient); + if (pClient == NULL) + { + return NV_FALSE; + } + + return rmclientIsCapable(pClient, capability); +} diff --git a/src/nvidia/src/kernel/rmapi/client_resource.c b/src/nvidia/src/kernel/rmapi/client_resource.c new file mode 100644 index 000000000..e2acc7ebd --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/client_resource.c @@ -0,0 +1,2903 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "core/system.h" +#include "os/os.h" +#include "rmapi/client_resource.h" +#include "rmapi/param_copy.h" +#include "rmapi/rs_utils.h" +#include "gpu/gpu.h" +#include "gpu/device/device.h" +#include "gpu_mgr/gpu_mgr.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "resserv/rs_access_map.h" +#include "nvBldVer.h" +#include "nvVer.h" +#include "mem_mgr/mem.h" +#include "nvsecurityinfo.h" +#include "kernel/gpu/rc/kernel_rc.h" +#include "resource_desc.h" + +#include "mem_mgr/fla_mem.h" +#include "platform/chipset/chipset_info.h" +#include "platform/chipset/chipset.h" +#include "platform/cpu.h" +#include "platform/platform.h" +#include "platform/p2p/p2p_caps.h" +#include "platform/acpi_common.h" +#include "kernel/gpu/nvlink/kernel_nvlink.h" +#include "vgpu/rpc.h" +#include "diagnostics/gpu_acct.h" +#include "mem_mgr/virt_mem_mgr.h" +#include "diagnostics/journal.h" +#include "ctrl/ctrl0000/ctrl0000nvd.h" +#include "nvdump.h" +#include "gpu/gsp/kernel_gsp.h" +#include "power/gpu_boost_mgr.h" + +NV_STATUS +cliresConstruct_IMPL +( + RmClientResource *pRmCliRes, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL* pParams +) +{ + return NV_OK; +} + +void +cliresDestruct_IMPL +( + RmClientResource *pRmCliRes +) +{ +} + +NvBool +cliresAccessCallback_IMPL +( + RmClientResource *pRmCliRes, + RsClient *pInvokingClient, + void *pAllocParams, + RsAccessRight accessRight +) +{ + // Client resource's access callback will grant any rights here to any resource it owns + switch (accessRight) + { + case RS_ACCESS_NICE: + { + // Grant if the caller satisfies osAllowPriorityOverride + return osAllowPriorityOverride(); + } + } + + // Delegate to superclass + return resAccessCallback_IMPL(staticCast(pRmCliRes, RsResource), pInvokingClient, pAllocParams, accessRight); +} + +NvBool +cliresShareCallback_IMPL +( + RmClientResource *pRmCliRes, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + RmClient *pSrcClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + RmClient *pDstClient = dynamicCast(pInvokingClient, RmClient); + NvBool bDstKernel = (pDstClient != NULL) && + (rmclientGetCachedPrivilege(pDstClient) >= RS_PRIV_LEVEL_KERNEL); + + // Client resource's share callback will also share rights it shares here with any resource it owns + // + // If a kernel client is validating share policies, that means it's most likely duping on behalf of + // a user space client. For this case, we check against the current process instead of the kernel + // client object's process. + // + switch (pSharePolicy->type) + { + case RS_SHARE_TYPE_OS_SECURITY_TOKEN: + if ((pSrcClient != NULL) && (pDstClient != NULL) && + (pSrcClient->pSecurityToken != NULL)) + { + if (bDstKernel) + { + NV_STATUS status; + PSECURITY_TOKEN *pCurrentToken; + + pCurrentToken = osGetSecurityToken(); + if (pCurrentToken == NULL) + { + NV_ASSERT_FAILED("Cannot get the security token for the current user"); + return NV_FALSE; + } + + status = osValidateClientTokens(pSrcClient->pSecurityToken, pCurrentToken); + portMemFree(pCurrentToken); + if (status == NV_OK) + { + return NV_TRUE; + } + } + else if (pDstClient->pSecurityToken != NULL) + { + if (osValidateClientTokens(pSrcClient->pSecurityToken, pDstClient->pSecurityToken) == NV_OK) + return NV_TRUE; + } + } + break; + case RS_SHARE_TYPE_PID: + if ((pSrcClient != NULL) && (pDstClient != NULL)) + { + if ((pParentRef != NULL) && bDstKernel) + { + if (pSrcClient->ProcID == osGetCurrentProcess()) + return NV_TRUE; + } + else + { + if (pSrcClient->ProcID == pDstClient->ProcID) + return NV_TRUE; + } + } + break; + case RS_SHARE_TYPE_SMC_PARTITION: + case RS_SHARE_TYPE_GPU: + // Require exceptions, since RmClientResource is not an RmResource + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + return NV_TRUE; + break; + } + + // Delegate to superclass + return resShareCallback_IMPL(staticCast(pRmCliRes, RsResource), pInvokingClient, pParentRef, pSharePolicy); +} + +// **************************************************************************** +// Helper functions +// **************************************************************************** + + +static NV_STATUS +CliControlSystemEvent +( + NvHandle hClient, + NvU32 event, + NvU32 action +) +{ + NV_STATUS status = NV_OK; + RmClient *pClient; + PEVENTNOTIFICATION *pEventNotification = NULL; + + if (event >= NV0000_NOTIFIERS_MAXCOUNT) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + CliGetEventNotificationList(hClient, hClient, NULL, &pEventNotification); + if (pEventNotification != NULL) + { + switch (action) + { + case NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE: + case NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT: + { + if (pClient->CliSysEventInfo.notifyActions[event] != NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + status = NV_ERR_INVALID_STATE; + break; + } + + //fall through + } + case NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE: + { + pClient->CliSysEventInfo.notifyActions[event] = action; + break; + } + + default: + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + } + else + { + status = NV_ERR_INVALID_STATE; + } + + return status; +} + + + +static NV_STATUS +CliGetSystemEventStatus +( + NvHandle hClient, + NvU32 *pEvent, + NvU32 *pStatus +) +{ + NvU32 Head, Tail; + RmClient *pClient; + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + Head = pClient->CliSysEventInfo.systemEventsQueue.Head; + Tail = pClient->CliSysEventInfo.systemEventsQueue.Tail; + + if (Head == Tail) + { + *pEvent = NV0000_NOTIFIERS_EVENT_NONE_PENDING; + *pStatus = 0; + } + else + { + *pEvent = pClient->CliSysEventInfo.systemEventsQueue.EventQueue[Tail].event; + *pStatus = pClient->CliSysEventInfo.systemEventsQueue.EventQueue[Tail].status; + pClient->CliSysEventInfo.systemEventsQueue.Tail = (Tail + 1) % NV_SYSTEM_EVENT_QUEUE_SIZE; + } + + return NV_OK; +} + + + +NV_STATUS +CliGetSystemP2pCaps +( + NvU32 *gpuIds, + NvU32 gpuCount, + NvU32 *p2pCaps, + NvU32 *p2pOptimalReadCEs, + NvU32 *p2pOptimalWriteCEs, + NvU8 *p2pCapsStatus, + NvU32 *pBusPeerIds +) +{ + OBJGPU *pGpuLocal = NULL; + OBJGPU *pGpuLocalLoop = NULL; + OBJGPU *pGpuPeer = NULL; + KernelNvlink *pLocalKernelNvlink = NULL; + NvU32 gpuMask = 0; + NvU32 localGpuIndex, peerGpuIndex; + NvU32 i; + NvU8 p2pWriteCapStatus, p2pReadCapStatus; + NV_STATUS rmStatus = NV_OK; + P2P_CONNECTIVITY connectivity; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if ((gpuCount == 0) || (gpuCount > NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS)) + { + NV_PRINTF(LEVEL_WARNING, "Invalid GPU count\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + // Assume no peer to peer capabilities. + *p2pOptimalReadCEs = 0; + *p2pOptimalWriteCEs = 0; + if (p2pCaps != NULL) + { + *p2pCaps = 0; + } + + if (p2pCapsStatus != NULL) + { + for (i = 0; i < NV0000_CTRL_P2P_CAPS_INDEX_TABLE_SIZE; i++) + { + p2pCapsStatus[i] = NV0000_P2P_CAPS_STATUS_NOT_SUPPORTED; + } + } + + // Construct the GPU mask + for (localGpuIndex = 0; localGpuIndex < gpuCount; localGpuIndex++) + { + pGpuLocalLoop = gpumgrGetGpuFromId(gpuIds[localGpuIndex]); + if (pGpuLocalLoop == NULL) + { + NV_PRINTF(LEVEL_WARNING, "GPU ID not found: 0x%x\n", + gpuIds[localGpuIndex]); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + gpuMask |= NVBIT(gpuGetInstance(pGpuLocalLoop)); + + for (peerGpuIndex = 0; peerGpuIndex < gpuCount; peerGpuIndex++) + { + pGpuPeer = gpumgrGetGpuFromId(gpuIds[peerGpuIndex]); + if (pGpuPeer == NULL) + { + NV_PRINTF(LEVEL_WARNING, "GPU ID not found: 0x%x\n", + gpuIds[peerGpuIndex]); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (pBusPeerIds != NULL) + { + pBusPeerIds[(localGpuIndex * gpuCount) + peerGpuIndex] = + kbusGetPeerId_HAL(pGpuLocalLoop, GPU_GET_KERNEL_BUS(pGpuLocalLoop), pGpuPeer); + } + } + } + + // Local GPU needs to be the first in the list + pGpuLocal = gpumgrGetGpuFromId(gpuIds[0]); + if (pGpuLocal == NULL) + { + NV_PRINTF(LEVEL_WARNING, "GPU ID not found: 0x%x\n", + gpuIds[localGpuIndex]); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + rmStatus = p2pGetCapsStatus(gpuMask, &p2pWriteCapStatus, + &p2pReadCapStatus, &connectivity + ); + if (rmStatus != NV_OK) + { + goto done; + } + + if (connectivity == P2P_CONNECTIVITY_NVLINK) + { + pLocalKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpuLocal); + NV_ASSERT_OR_RETURN(pLocalKernelNvlink != NULL, NV_ERR_INVALID_STATE); + + if (p2pCaps != NULL) + { + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED, _TRUE); + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED, _TRUE); + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED, _TRUE); + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED, _TRUE); + } + + if (p2pCapsStatus != NULL) + { + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_NVLINK] = NV0000_P2P_CAPS_STATUS_OK; + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_ATOMICS] = NV0000_P2P_CAPS_STATUS_OK; + } + + // Get the optimal CEs for P2P read/write for 2 gpu masks only + if (gpuCount == 2) + { + knvlinkGetP2POptimalCEs_HAL(pGpuLocal, pLocalKernelNvlink, gpuMask, + NULL, NULL, + p2pOptimalReadCEs, + p2pOptimalWriteCEs); + } + + if (gpuCount == 1 && + knvlinkIsP2pLoopbackSupported(pGpuLocal, pLocalKernelNvlink)) + { + if (p2pCaps != NULL) + { + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED, _TRUE); + } + + if (p2pCapsStatus != NULL) + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_LOOPBACK] = NV0000_P2P_CAPS_STATUS_OK; + } + } + else if (connectivity == P2P_CONNECTIVITY_PCIE_BAR1 || (connectivity == P2P_CONNECTIVITY_PCIE)) + { + if (p2pCaps != NULL) + { + *p2pCaps |= (p2pReadCapStatus == NV0000_P2P_CAPS_STATUS_OK) ? + REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED, _TRUE) : 0; + *p2pCaps |= (p2pWriteCapStatus == NV0000_P2P_CAPS_STATUS_OK) ? + REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED, _TRUE) : 0; + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PROP_SUPPORTED, _TRUE); + + if (connectivity == P2P_CONNECTIVITY_PCIE_BAR1) + { + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_BAR1_SUPPORTED, _TRUE); + } + else + { + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PCI_SUPPORTED, _TRUE); + } + } + + if (p2pCapsStatus != NULL) + { + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_PROP] = NV0000_P2P_CAPS_STATUS_OK; + + if (connectivity == P2P_CONNECTIVITY_PCIE_BAR1) + { + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_PCI_BAR1] = NV0000_P2P_CAPS_STATUS_OK; + } + else + { + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_PCI] = NV0000_P2P_CAPS_STATUS_OK; + } + } + + if (gpuCount == 1) + { + if (p2pCaps != NULL) + { + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED, _TRUE); + } + + if (p2pCapsStatus != NULL) + { + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_LOOPBACK] = NV0000_P2P_CAPS_STATUS_OK; + } + } + } + else if (connectivity == P2P_CONNECTIVITY_NVLINK_INDIRECT) + { + pLocalKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpuLocal); + NV_ASSERT_OR_RETURN(pLocalKernelNvlink != NULL, NV_ERR_INVALID_STATE); + + if (p2pCaps != NULL) + { + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_READS_SUPPORTED, _TRUE); + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_WRITES_SUPPORTED, _TRUE); + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_ATOMICS_SUPPORTED, _TRUE); + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED, _TRUE); + } + + if (p2pCapsStatus != NULL) + { + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_ATOMICS] = NV0000_P2P_CAPS_STATUS_OK; + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_NVLINK] = NV0000_P2P_CAPS_STATUS_OK; + } + + // Use sysmem CEs as optimal CEs for indirect peers. + knvlinkGetP2POptimalCEs_HAL(pGpuLocal, pLocalKernelNvlink, gpuMask, + p2pOptimalReadCEs, p2pOptimalWriteCEs, NULL, NULL); + } + else if (connectivity == P2P_CONNECTIVITY_C2C) + { + + if (p2pCaps != NULL) + { + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED, _TRUE); + *p2pCaps |= (p2pReadCapStatus == NV0000_P2P_CAPS_STATUS_OK) ? + REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED, _TRUE) : 0; + *p2pCaps |= (p2pWriteCapStatus == NV0000_P2P_CAPS_STATUS_OK) ? + REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED, _TRUE) : 0; + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_C2C_SUPPORTED, _TRUE); + } + + if (p2pCapsStatus != NULL) + { + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_ATOMICS] = NV0000_P2P_CAPS_STATUS_OK; + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_C2C] = NV0000_P2P_CAPS_STATUS_OK; + } + if (gpuCount == 1) + { + if (p2pCaps != NULL) + { + *p2pCaps |= REF_DEF(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_LOOPBACK_SUPPORTED, _TRUE); + } + + if (p2pCapsStatus != NULL) + { + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_LOOPBACK] = NV0000_P2P_CAPS_STATUS_OK; + } + } + } + + if (p2pCapsStatus != NULL) + { + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_READ] = p2pReadCapStatus; + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_WRITE] = p2pWriteCapStatus; + } + + // Honor the regkey for loopback status. + if (p2pCapsStatus != NULL && + pGpuLocal->getProperty(pGpuLocal, PDB_PROP_GPU_NVLINK_P2P_LOOPBACK_DISABLED)) + { + p2pCapsStatus[NV0000_CTRL_P2P_CAPS_INDEX_LOOPBACK] = NV0000_P2P_CAPS_STATUS_DISABLED_BY_REGKEY; + } + +done: + return rmStatus; +} + +// **************************************************************************** +// Other functions +// **************************************************************************** + +// +// cliresCtrlCmdSystemGetFeatures +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetFeatures_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_FEATURES_PARAMS *pFeaturesParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 featuresMask = 0; + + NV_ASSERT_OR_RETURN(pSys != NULL, NV_ERR_INVALID_STATE); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (pSys->getProperty(pSys, PDB_PROP_SYS_IS_UEFI)) + { + featuresMask = FLD_SET_DRF(0000, _CTRL_SYSTEM_GET_FEATURES, + _UEFI, _TRUE, featuresMask); + } + + // Don't update EFI init on non Display system + if (pSys->getProperty(pSys, PDB_PROP_SYS_IS_EFI_INIT)) + { + featuresMask = FLD_SET_DRF(0000, _CTRL_SYSTEM_GET_FEATURES, + _IS_EFI_INIT, _TRUE, featuresMask); + } + + pFeaturesParams->featuresMask = featuresMask; + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetBuildVersionV2 +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetBuildVersionV2_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_BUILD_VERSION_V2_PARAMS *pParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + ct_assert(sizeof(NV_VERSION_STRING) <= sizeof(pParams->driverVersionBuffer)); + ct_assert(sizeof(NV_BUILD_BRANCH_VERSION) <= sizeof(pParams->versionBuffer)); + ct_assert(sizeof(NV_DISPLAY_DRIVER_TITLE) <= sizeof(pParams->titleBuffer)); + + portMemCopy(pParams->driverVersionBuffer, sizeof(pParams->driverVersionBuffer), + NV_VERSION_STRING, sizeof(NV_VERSION_STRING)); + portMemCopy(pParams->versionBuffer, sizeof(pParams->versionBuffer), + NV_BUILD_BRANCH_VERSION, sizeof(NV_BUILD_BRANCH_VERSION)); + portMemCopy(pParams->titleBuffer, sizeof(pParams->titleBuffer), + NV_DISPLAY_DRIVER_TITLE, sizeof(NV_DISPLAY_DRIVER_TITLE)); + + pParams->changelistNumber = NV_BUILD_CHANGELIST_NUM; + pParams->officialChangelistNumber = NV_LAST_OFFICIAL_CHANGELIST_NUM; + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetCpuInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetCpuInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_CPU_INFO_PARAMS *pCpuInfoParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pCpuInfoParams->type = pSys->cpuInfo.type; + pCpuInfoParams->capabilities = pSys->cpuInfo.caps; + pCpuInfoParams->clock = pSys->cpuInfo.clock; + pCpuInfoParams->L1DataCacheSize = pSys->cpuInfo.l1DataCacheSize; + pCpuInfoParams->L2DataCacheSize = pSys->cpuInfo.l2DataCacheSize; + pCpuInfoParams->dataCacheLineSize = pSys->cpuInfo.dataCacheLineSize; + pCpuInfoParams->numLogicalCpus = pSys->cpuInfo.numLogicalCpus; + pCpuInfoParams->numPhysicalCpus = pSys->cpuInfo.numPhysicalCpus; + pCpuInfoParams->coresOnDie = pSys->cpuInfo.coresOnDie; + pCpuInfoParams->family = pSys->cpuInfo.family; + pCpuInfoParams->model = pSys->cpuInfo.model; + pCpuInfoParams->stepping = pSys->cpuInfo.stepping; + portMemCopy(pCpuInfoParams->name, + sizeof (pCpuInfoParams->name), pSys->cpuInfo.name, + sizeof (pCpuInfoParams->name)); + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetChipsetInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetChipsetInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_CHIPSET_INFO_PARAMS *pChipsetInfo +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pChipsetInfo->flags = 0; + + if (pCl->chipsetIDBusAddr.valid) + { + pChipsetInfo->vendorId = pCl->chipsetIDInfo.vendorID; + pChipsetInfo->deviceId = pCl->chipsetIDInfo.deviceID; + pChipsetInfo->subSysVendorId = pCl->chipsetIDInfo.subvendorID; + pChipsetInfo->subSysDeviceId = pCl->chipsetIDInfo.subdeviceID; + } + else + { + pChipsetInfo->vendorId = NV0000_SYSTEM_CHIPSET_INVALID_ID; + pChipsetInfo->deviceId = NV0000_SYSTEM_CHIPSET_INVALID_ID; + pChipsetInfo->subSysVendorId = NV0000_SYSTEM_CHIPSET_INVALID_ID; + pChipsetInfo->subSysDeviceId = NV0000_SYSTEM_CHIPSET_INVALID_ID; + } + + if (pCl->FHBAddr.valid) + { + pChipsetInfo->HBvendorId = pCl->FHBBusInfo.vendorID; + pChipsetInfo->HBdeviceId = pCl->FHBBusInfo.deviceID; + pChipsetInfo->HBsubSysVendorId = pCl->FHBBusInfo.subvendorID; + pChipsetInfo->HBsubSysDeviceId = pCl->FHBBusInfo.subdeviceID; + } + else + { + pChipsetInfo->HBvendorId = NV0000_SYSTEM_CHIPSET_INVALID_ID; + pChipsetInfo->HBdeviceId = NV0000_SYSTEM_CHIPSET_INVALID_ID; + pChipsetInfo->HBsubSysVendorId = NV0000_SYSTEM_CHIPSET_INVALID_ID; + pChipsetInfo->HBsubSysDeviceId = NV0000_SYSTEM_CHIPSET_INVALID_ID; + } + + pChipsetInfo->sliBondId = pCl->ChipsetSliBondType; + + csGetInfoStrings(pCl, + &pChipsetInfo->chipsetNameString[0], + &pChipsetInfo->vendorNameString[0], + &pChipsetInfo->sliBondNameString[0], + &pChipsetInfo->subSysVendorNameString[0], + NV0000_SYSTEM_MAX_CHIPSET_STRING_LENGTH); + + if (pCl->getProperty(pCl, PDB_PROP_CL_HAS_RESIZABLE_BAR_ISSUE)) + { + pChipsetInfo->flags = FLD_SET_DRF(0000, _CTRL_SYSTEM_CHIPSET_FLAG, _HAS_RESIZABLE_BAR_ISSUE, _YES, pChipsetInfo->flags); + } + else + { + pChipsetInfo->flags = FLD_SET_DRF(0000, _CTRL_SYSTEM_CHIPSET_FLAG, _HAS_RESIZABLE_BAR_ISSUE, _NO, pChipsetInfo->flags); + } + + return NV_OK; +} + +// +// cliresCtrlCmdSystemSetMemorySize +// +// Set system memory size in pages. +// +// Lock Requirements: +// Assert that API and GPUs locks held on entry +// +NV_STATUS +cliresCtrlCmdSystemSetMemorySize_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_SET_MEMORY_SIZE_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJOS *pOS = SYS_GET_OS(pSys); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + pOS->SystemMemorySize = pParams->memorySize; + + return NV_OK; +} + +static NV_STATUS +classGetSystemClasses(NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams) +{ + NvU32 i; + NvU32 numResources; + const RS_RESOURCE_DESC *resources; + NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS params; + + NV_ASSERT_OR_RETURN(pParams, NV_ERR_INVALID_ARGUMENT); + + RsResInfoGetResourceList(&resources, &numResources); + + portMemSet(¶ms, 0x0, sizeof(params)); + + for (i = 0; i < numResources; i++) + { + if ((resources[i].pParentList[0] == classId(RmClientResource)) && + (resources[i].pParentList[1] == 0x0)) + { + NV_ASSERT_OR_RETURN(params.numClasses < NV0000_CTRL_SYSTEM_MAX_CLASSLIST_SIZE, + NV_ERR_INVALID_STATE); + + params.classes[params.numClasses] = resources[i].externalClassId; + params.numClasses++; + } + } + + portMemCopy(pParams, sizeof(*pParams), ¶ms, sizeof(params)); + + return NV_OK; +} + +// +// cliresCtrlCmdSystemGetClassList +// +// Get list of supported system classes. +// +// Lock Requirements: +// Assert that API and GPUs locks held on entry +// +NV_STATUS +cliresCtrlCmdSystemGetClassList_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_CLASSLIST_PARAMS *pParams +) +{ + NV_STATUS status; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner() && rmGpuLockIsOwner()); + + status = classGetSystemClasses(pParams); + + return status; +} + +// +// cliresCtrlCmdSystemNotifyEvent +// +// This function exists to allow the RM Client to notify us when they receive +// a system event message. We generally will store off the data, but in some +// cases, we'll trigger our own handling of that code. Prior to Vista, we +// would just poll a scratch bit for these events. But for Vista, we get them +// directly from the OS. +// +// Added Support for notifying power change event to perfhandler +// +NV_STATUS +cliresCtrlCmdSystemNotifyEvent_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_NOTIFY_EVENT_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + + switch(pParams->eventType) + { + case NV0000_CTRL_SYSTEM_EVENT_TYPE_LID_STATE: + case NV0000_CTRL_SYSTEM_EVENT_TYPE_DOCK_STATE: + case NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_LID: + case NV0000_CTRL_SYSTEM_EVENT_TYPE_TRUST_DOCK: + { + status = NV_ERR_NOT_SUPPORTED; + break; + } + + case NV0000_CTRL_SYSTEM_EVENT_TYPE_POWER_SOURCE: + status = NV_ERR_NOT_SUPPORTED; + break; + + default: + status = NV_ERR_INVALID_ARGUMENT; + break; + } + + return status; +} + +// +// cliresCtrlCmdSystemGetPlatformType +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdSystemGetPlatformType_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CMD_SYSTEM_GET_PLATFORM_TYPE_PARAMS *pSysParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJPFM *pPfm = SYS_GET_PFM(pSys); + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (pPfm->getProperty(pPfm, PDB_PROP_PFM_IS_TOSHIBA_MOBILE)) + { + pSysParams->systemType = NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_TOSHIBA; + } + else if (pPfm->getProperty(pPfm, PDB_PROP_PFM_IS_MOBILE)) + { + pSysParams->systemType = NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_MOBILE_GENERIC; + } + else + { + pSysParams->systemType = NV0000_CTRL_SYSTEM_GET_PLATFORM_TYPE_DESKTOP; + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdSystemDebugCtrlRmMsg_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_PARAMS *pParams +) +{ +// NOTE: RmMsg is only available when NV_PRINTF_STRINGS_ALLOWED is true. +#if NV_PRINTF_STRINGS_ALLOWED + NvU32 len = 0; + + extern char RmMsg[NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE]; + + switch (pParams->cmd) + { + case NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_GET: + { + len = (NvU32)portStringLength(RmMsg); + portMemCopy(pParams->data, len, RmMsg, len); + pParams->count = len; + break; + } + case NV0000_CTRL_SYSTEM_DEBUG_RMMSG_CTRL_CMD_SET: + { +#if !(defined(DEBUG) || defined(DEVELOP)) + RmClient *pRmClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pRmClient != NULL, NV_ERR_INVALID_CLIENT); + + if (!rmclientIsAdmin(pRmClient, pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_WARNING, "Non-privileged context issued privileged cmd\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } +#endif + portMemCopy(RmMsg, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE, pParams->data, NV0000_CTRL_SYSTEM_DEBUG_RMMSG_SIZE); + break; + } + default: + return NV_ERR_INVALID_ARGUMENT; + break; + } + + return NV_OK; +#else + return NV_ERR_NOT_SUPPORTED; +#endif +} + +NV_STATUS +cliresCtrlCmdSystemGetRmInstanceId_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_RM_INSTANCE_ID_PARAMS *pRmInstanceIdParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + pRmInstanceIdParams->rm_instance_id = pSys->rmInstanceId; + + return NV_OK; +} + +// +// cliresCtrlCmdGpuGetAttachedIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetAttachedIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpumgrGetAttachedGpuIds(pGpuAttachedIds); +} + +// +// cliresCtrlCmdGpuGetIdInfo +// +// Lock Requirements: +// Assert that API lock and Gpus lock held on entry +// +NV_STATUS +cliresCtrlCmdGpuGetIdInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS *pGpuIdInfoParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpumgrGetGpuIdInfo(pGpuIdInfoParams); +} + +NV_STATUS +cliresCtrlCmdGpuGetIdInfoV2_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_ID_INFO_V2_PARAMS *pGpuIdInfoParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpumgrGetGpuIdInfoV2(pGpuIdInfoParams); +} + +// +// cliresCtrlCmdGpuGetInitStatus +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetInitStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_INIT_STATUS_PARAMS *pGpuInitStatusParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpumgrGetGpuInitStatus(pGpuInitStatusParams); +} + +// +// cliresCtrlCmdGpuGetDeviceIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetDeviceIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_DEVICE_IDS_PARAMS *pDeviceIdsParams +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pDeviceIdsParams->deviceIds = gpumgrGetDeviceInstanceMask(); + + return NV_OK; +} + +// +// cliresCtrlCmdGpuGetPciInfo +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetPciInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_PCI_INFO_PARAMS *pPciInfoParams +) +{ + NV_STATUS status; + NvU64 gpuDomainBusDevice; + + NV_ASSERT(rmApiLockIsOwner()); + + status = gpumgrGetProbedGpuDomainBusDevice(pPciInfoParams->gpuId, &gpuDomainBusDevice); + if (status != NV_OK) + return status; + + pPciInfoParams->domain = gpuDecodeDomain(gpuDomainBusDevice); + pPciInfoParams->bus = gpuDecodeBus(gpuDomainBusDevice); + pPciInfoParams->slot = gpuDecodeDevice(gpuDomainBusDevice); + + return NV_OK; +} + +// +// cliresCtrlCmdGpuGetProbedIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuGetProbedIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds +) +{ + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + return gpumgrGetProbedGpuIds(pGpuProbedIds); +} + +// +// cliresCtrlCmdGpuAttachIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuAttachIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_ATTACH_IDS_PARAMS *pGpuAttachIds +) +{ + NV0000_CTRL_GPU_GET_PROBED_IDS_PARAMS *pGpuProbedIds = NULL; + NvU32 i, j; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (pGpuAttachIds->gpuIds[0] == NV0000_CTRL_GPU_ATTACH_ALL_PROBED_IDS) + { + // XXX add callback to attach logic on Windows + status = NV_OK; + goto done; + } + + pGpuProbedIds = portMemAllocNonPaged(sizeof(*pGpuProbedIds)); + if (pGpuProbedIds == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = gpumgrGetProbedGpuIds(pGpuProbedIds); + if (status != NV_OK) + { + goto done; + } + + for (i = 0; (i < NV0000_CTRL_GPU_MAX_PROBED_GPUS) && + (pGpuAttachIds->gpuIds[i] != NV0000_CTRL_GPU_INVALID_ID); i++) + { + for (j = 0; (j < NV0000_CTRL_GPU_MAX_PROBED_GPUS) && + (pGpuProbedIds->gpuIds[j] != NV0000_CTRL_GPU_INVALID_ID); j++) + { + if (pGpuAttachIds->gpuIds[i] == pGpuProbedIds->gpuIds[j]) + break; + } + + if ((j == NV0000_CTRL_GPU_MAX_PROBED_GPUS) || + (pGpuProbedIds->gpuIds[j] == NV0000_CTRL_GPU_INVALID_ID)) + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + } + + // XXX add callback to attach logic on Windows +done: + portMemFree(pGpuProbedIds); + return status; +} + +// +// cliresCtrlCmdGpuDetachIds +// +// Lock Requirements: +// Assert that API lock held on entry +// No GPUs lock +// +NV_STATUS +cliresCtrlCmdGpuDetachIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_DETACH_IDS_PARAMS *pGpuDetachIds +) +{ + NV0000_CTRL_GPU_GET_ATTACHED_IDS_PARAMS *pGpuAttachedIds = NULL; + NvU32 i, j; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (pGpuDetachIds->gpuIds[0] == NV0000_CTRL_GPU_DETACH_ALL_ATTACHED_IDS) + { + // XXX add callback to detach logic on Windows + status = NV_OK; + goto done; + } + else + { + pGpuAttachedIds = portMemAllocNonPaged(sizeof(*pGpuAttachedIds)); + if (pGpuAttachedIds == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = gpumgrGetAttachedGpuIds(pGpuAttachedIds); + if (status != NV_OK) + { + goto done; + } + + for (i = 0; (i < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) && + (pGpuDetachIds->gpuIds[i] != NV0000_CTRL_GPU_INVALID_ID); i++) + { + for (j = 0; (j < NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) && + (pGpuAttachedIds->gpuIds[j] != NV0000_CTRL_GPU_INVALID_ID); j++) + { + if (pGpuDetachIds->gpuIds[i] == pGpuAttachedIds->gpuIds[j]) + break; + } + + if ((j == NV0000_CTRL_GPU_MAX_ATTACHED_GPUS) || + (pGpuAttachedIds->gpuIds[j] == NV0000_CTRL_GPU_INVALID_ID)) + { + status = NV_ERR_INVALID_ARGUMENT; + break; + } + else + { + // XXX add callback to detach logic on Windows + break; + } + } + } + +done: + portMemFree(pGpuAttachedIds); + return status; +} + +NV_STATUS +cliresCtrlCmdGpuGetSvmSize_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_SVM_SIZE_PARAMS *pSvmSizeGetParams +) +{ + OBJGPU *pGpu = NULL; + + // error check incoming gpu id + pGpu = gpumgrGetGpuFromId(pSvmSizeGetParams->gpuId); + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_WARNING, "GET_SVM_SIZE: bad gpuid: 0x%x\n", + pSvmSizeGetParams->gpuId); + return NV_ERR_INVALID_ARGUMENT; + } + + // Get the SVM size in MB. + pSvmSizeGetParams->svmSize = 0; + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGsyncGetAttachedIds_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GSYNC_GET_ATTACHED_IDS_PARAMS *pGsyncAttachedIds +) +{ + NvU32 i; + + for (i = 0; i < NV_ARRAY_ELEMENTS32(pGsyncAttachedIds->gsyncIds); i++) + { + pGsyncAttachedIds->gsyncIds[i] = NV0000_CTRL_GSYNC_INVALID_ID; + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGsyncGetIdInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GSYNC_GET_ID_INFO_PARAMS *pGsyncIdInfoParams +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +cliresCtrlCmdEventSetNotification_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_EVENT_SET_NOTIFICATION_PARAMS *pEventSetNotificationParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + + return CliControlSystemEvent(hClient, pEventSetNotificationParams->event, pEventSetNotificationParams->action); +} + +NV_STATUS +cliresCtrlCmdEventGetSystemEventStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GET_SYSTEM_EVENT_STATUS_PARAMS *pSystemEventStatusParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + + return CliGetSystemEventStatus(hClient, &pSystemEventStatusParams->event, &pSystemEventStatusParams->status); +} + +NV_STATUS +cliresCtrlCmdGpuAcctGetProcAccountingInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPUACCT_GET_PROC_ACCOUNTING_INFO_PARAMS *pAcctInfoParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(pSys); + OBJGPU *pGpu; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pGpu = gpumgrGetGpuFromId(pAcctInfoParams->gpuId); + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return gpuacctGetProcAcctInfo(pGpuAcct, pAcctInfoParams); +} + +NV_STATUS +cliresCtrlCmdGpuAcctSetAccountingState_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPUACCT_SET_ACCOUNTING_STATE_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + NV_STATUS status = NV_OK; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pGpu = gpumgrGetGpuFromId(pParams->gpuId); + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(pSys); + + if (IS_GSP_CLIENT(pGpu)) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RmCtrlParams *pRmCtrlParams = pCallContext->pControlParams; + + NV_RM_RPC_CONTROL(pGpu, + pRmCtrlParams->hClient, + pRmCtrlParams->hObject, + pRmCtrlParams->cmd, + pRmCtrlParams->pParams, + pRmCtrlParams->paramsSize, + status); + if (status != NV_OK) + return status; + } + + if (NV0000_CTRL_GPU_ACCOUNTING_STATE_ENABLED == pParams->newState) + { + status = gpuacctEnableAccounting(pGpuAcct, + pGpu->gpuInstance, pParams); + } + else if (NV0000_CTRL_GPU_ACCOUNTING_STATE_DISABLED == pParams->newState) + { + status = gpuacctDisableAccounting(pGpuAcct, + pGpu->gpuInstance, pParams); + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + + return status; +} + +NV_STATUS +cliresCtrlCmdGpuAcctClearAccountingData_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPUACCT_CLEAR_ACCOUNTING_DATA_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(pSys); + OBJGPU *pGpu = NULL; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pGpu = gpumgrGetGpuFromId(pParams->gpuId); + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return gpuacctClearAccountingData(pGpuAcct, pGpu->gpuInstance, pParams); +} + +NV_STATUS +cliresCtrlCmdGpuAcctGetAccountingState_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPUACCT_GET_ACCOUNTING_STATE_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(pSys); + OBJGPU *pGpu = NULL; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pGpu = gpumgrGetGpuFromId(pParams->gpuId); + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return gpuacctGetAccountingMode(pGpuAcct, pGpu->gpuInstance, pParams); +} + +NV_STATUS +cliresCtrlCmdGpuAcctGetAccountingPids_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPUACCT_GET_ACCOUNTING_PIDS_PARAMS *pAcctPidsParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + GpuAccounting *pGpuAcct = SYS_GET_GPUACCT(pSys); + OBJGPU *pGpu; + + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + pGpu = gpumgrGetGpuFromId(pAcctPidsParams->gpuId); + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return gpuacctGetAcctPids(pGpuAcct, pAcctPidsParams); +} + + +static void +getHwbcInfo +( + NV0000_CTRL_SYSTEM_HWBC_INFO *pHwbcInfo, + OBJHWBC *pHWBC, + NvU32 *pIndex +) +{ + if (pHWBC->pFirstChild) + getHwbcInfo(pHwbcInfo, pHWBC->pFirstChild, pIndex); + if (pHWBC->pSibling) + getHwbcInfo(pHwbcInfo, pHWBC->pSibling, pIndex); + + if (HWBC_NVIDIA_BR04 == pHWBC->bcRes) + { + if (*pIndex >= NV0000_CTRL_SYSTEM_MAX_HWBCS) + { + // + // Should never happen! Return whatever info we've + // gathered till now. + // + NV_ASSERT(*pIndex < NV0000_CTRL_SYSTEM_MAX_HWBCS); + return; + } + pHwbcInfo[*pIndex].hwbcId = pHWBC->hwbcId; + pHwbcInfo[*pIndex].firmwareVersion = pHWBC->fwVersion; + pHwbcInfo[*pIndex].subordinateBus = pHWBC->maxBus; + pHwbcInfo[*pIndex].secondaryBus = pHWBC->minBus; + (*pIndex)++; + } +} + +NV_STATUS +cliresCtrlCmdSystemGetHwbcInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_HWBC_INFO_PARAMS *pParams +) +{ + NV0000_CTRL_SYSTEM_HWBC_INFO *pHwbcInfo = pParams->hwbcInfo; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJCL *pCl = SYS_GET_CL(pSys); + NvU32 index = 0; + + if (pCl->pHWBC) + getHwbcInfo(pHwbcInfo, pCl->pHWBC, &index); + + for (; index < NV0000_CTRL_SYSTEM_MAX_HWBCS; index++) + { + pHwbcInfo[index].hwbcId = NV0000_CTRL_SYSTEM_HWBC_INVALID_ID; + } + + return NV_OK; +} + +/*! + * @brief Get Dump Size. Returns an estimate of the number of bytes in the dump + * that can be used to allocate a buffer. The size is based on the component + * argument. + * + * @returns NV_OK on success + */ +NV_STATUS +cliresCtrlCmdNvdGetDumpSize_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_NVD_GET_DUMP_SIZE_PARAMS *pDumpSizeParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + NVDUMP_BUFFER nvDumpBuffer = {0}; + NV_STATUS rmStatus; + + nvDumpBuffer.size = NV0000_CTRL_NVD_MAX_DUMP_SIZE; + + rmStatus = rcdbDumpComponent(pRcDB, pDumpSizeParams->component, + &nvDumpBuffer, + NVDUMP_BUFFER_COUNT, + NULL); + + pDumpSizeParams->size = nvDumpBuffer.curNumBytes; + + return rmStatus; +} + +/*! + * @brief Get Dump. Returns a dump that includes the component specified + * when the conditions in the trigger are set. + * + * @returns NV_OK on success + */ +NV_STATUS +cliresCtrlCmdNvdGetDump_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_NVD_GET_DUMP_PARAMS *pDumpParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + Journal *pRcDB = SYS_GET_RCDB(pSys); + NVDUMP_BUFFER nvDumpBuffer = {0}; + NV_STATUS rmStatus; + + NV_ASSERT_OR_RETURN(pDumpParams->size <= NV0000_CTRL_NVD_MAX_DUMP_SIZE, NV_ERR_INVALID_ARGUMENT); + + nvDumpBuffer.size = pDumpParams->size; + nvDumpBuffer.address = pDumpParams->pBuffer; + + // Dump the component + rmStatus = rcdbDumpComponent(pRcDB, pDumpParams->component, + &nvDumpBuffer, + NVDUMP_BUFFER_PROVIDED, + NULL); + + pDumpParams->size = nvDumpBuffer.curNumBytes; + + return rmStatus; +} + +/*! + * @brief Get Timestamp. Returns a standard timestamp, osGetCurrentTime. + * + * @returns NV_OK + */ +NV_STATUS +cliresCtrlCmdNvdGetTimestamp_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_NVD_GET_TIMESTAMP_PARAMS *pTimestampParams +) +{ + NV_STATUS status = NV_OK; + + switch (pTimestampParams->cpuClkId) + { + case NV0000_NVD_CPU_TIME_CLK_ID_DEFAULT: + case NV0000_NVD_CPU_TIME_CLK_ID_OSTIME: + { + NvU32 sec; + NvU32 uSec; + + osGetCurrentTime(&sec, &uSec); + pTimestampParams->timestamp = (((NvU64)sec) * 1000000) + uSec; + break; + } + + case NV0000_NVD_CPU_TIME_CLK_ID_PLATFORM_API: + { + osGetPerformanceCounter(&pTimestampParams->timestamp); + break; + } + + case NV0000_NVD_CPU_TIME_CLK_ID_TSC: + { +#if PORT_IS_FUNC_SUPPORTED(portUtilExReadTimestampCounter) + pTimestampParams->timestamp = portUtilExReadTimestampCounter(); +#else + status = NV_ERR_NOT_SUPPORTED; +#endif + break; + } + + default: + { + status = NV_ERR_INVALID_ARGUMENT; + } + } + + return status; +} + +/*! + * @brief Get Nvlog Info. Returns the current state of the NVLOG subsystem. + * + * @returns NV_OK on success + */ +NV_STATUS +cliresCtrlCmdNvdGetNvlogInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_NVD_GET_NVLOG_INFO_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + NvU32 i; + + NV_ASSERT_OR_RETURN(pParams->component >= NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pParams->component < NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED, NV_ERR_INVALID_ARGUMENT); + + if ((pParams->component == NVDUMP_COMPONENT_NVLOG_RM) || (pParams->component == NVDUMP_COMPONENT_NVLOG_ALL)) + { + // + // Copy a snapshot of the GSP log buffers into the NvLog buffers for all + // GPUs. This code assumes that GetNvlogInfo is called just before + // GetNvlogBufferInfo and GetNvlog. + // + NvU32 gpuMask = 0; + NvU32 gpuInstance = 0; + OBJGPU *pGpu; + + (void)gpumgrGetGpuAttachInfo(NULL, &gpuMask); + + for (;;) + { + pGpu = gpumgrGetNextGpu(gpuMask, &gpuInstance); + + if (pGpu == NULL) + break; + + if (IS_GSP_CLIENT(pGpu)) + { + KernelGsp *pKernelGsp = GPU_GET_KERNEL_GSP(pGpu); + kgspDumpGspLogs(pGpu, pKernelGsp, NV_TRUE); + } + } + + pParams->version = NvLogLogger.version; + + portMemSet(pParams->bufferTags, 0, sizeof(pParams->bufferTags)); + for (i = 0; i < NVLOG_MAX_BUFFERS; i++) + { + if (NvLogLogger.pBuffers[i] != NULL) + pParams->bufferTags[i] = NvLogLogger.pBuffers[i]->tag; + } + status = NV_OK; + } + + return status; +} + +/*! + * @brief Get Nvlog Buf Info. Returns current state of a specific buffer in + * the nvlog buffer system. + * + * @returns NV_OK on success + */ +NV_STATUS +cliresCtrlCmdNvdGetNvlogBufferInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_NVD_GET_NVLOG_BUFFER_INFO_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + NV_ASSERT_OR_RETURN(pParams->component >= NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pParams->component < NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED, NV_ERR_INVALID_ARGUMENT); + + if ((pParams->component == NVDUMP_COMPONENT_NVLOG_RM) || (pParams->component == NVDUMP_COMPONENT_NVLOG_ALL)) + { + NVLOG_BUFFER *pBuffer; + NVLOG_BUFFER_HANDLE hBuffer; + NvBool bPause; + + if (pParams->tag != 0) + { + status = nvlogGetBufferHandleFromTag(pParams->tag, &hBuffer); + NV_ASSERT_OR_RETURN(status == NV_OK, status); + } + else + { + NV_ASSERT_OR_RETURN(pParams->buffer < NVLOG_MAX_BUFFERS, NV_ERR_INVALID_ARGUMENT); + hBuffer = pParams->buffer; + } + + pBuffer = NvLogLogger.pBuffers[hBuffer]; + NV_ASSERT_OR_RETURN(pBuffer != NULL, NV_ERR_OBJECT_NOT_FOUND); + + bPause = pParams->flags & DRF_DEF(0000, _CTRL_NVD_NVLOG_BUFFER_INFO_FLAGS, _PAUSE, _YES); + nvlogPauseLoggingToBuffer(hBuffer, bPause); + + pParams->tag = pBuffer->tag; + pParams->size = pBuffer->size; + pParams->flags = pBuffer->flags; + pParams->pos = pBuffer->pos; + pParams->overflow = pBuffer->extra.ring.overflow; + status = NV_OK; + } + + return status; +} + +/*! + * @brief Get Nvlog. Returns a dump that includes the buffer specified + * by the caller. + * + * @returns NV_OK on success + */ +NV_STATUS +cliresCtrlCmdNvdGetNvlog_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_NVD_GET_NVLOG_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_NOT_SUPPORTED; + + NV_ASSERT_OR_RETURN(pParams->component >= NV0000_CTRL_NVD_DUMP_COMPONENT_NVLOG, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pParams->component < NV0000_CTRL_NVD_DUMP_COMPONENT_RESERVED, NV_ERR_INVALID_ARGUMENT); + + if ((pParams->component == NVDUMP_COMPONENT_NVLOG_RM) || (pParams->component == NVDUMP_COMPONENT_NVLOG_ALL)) + { + NVLOG_BUFFER_HANDLE hBuffer = pParams->buffer; + NvU32 oldSize = pParams->size; + + NV_ASSERT_OR_RETURN(pParams->size <= NV0000_CTRL_NVLOG_MAX_BLOCK_SIZE, NV_ERR_INVALID_ARGUMENT); + + nvlogPauseLoggingToBuffer(hBuffer, NV_TRUE); + status = nvlogExtractBufferChunk(hBuffer, pParams->blockNum, &pParams->size, pParams->data); + + // + // If size returned is different than asked, entire buffer has been + // extracted, and logging can be resumed. + // + if (oldSize != pParams->size) + { + nvlogPauseLoggingToBuffer(hBuffer, NV_FALSE); + } + } + + return status; +} + +NV_STATUS +cliresCtrlCmdSystemGetP2pCaps_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS *pP2PParams +) +{ + OBJGPU *pGpu; + + if ((pP2PParams->gpuCount == 0) || (pP2PParams->gpuCount > NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = gpumgrGetGpuFromId(pP2PParams->gpuIds[0]); + + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return CliGetSystemP2pCaps(pP2PParams->gpuIds, + pP2PParams->gpuCount, + &pP2PParams->p2pCaps, + &pP2PParams->p2pOptimalReadCEs, + &pP2PParams->p2pOptimalWriteCEs, + NvP64_VALUE(pP2PParams->p2pCapsStatus), + NvP64_VALUE(pP2PParams->busPeerIds)); +} + +NV_STATUS +cliresCtrlCmdSystemGetP2pCapsV2_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS *pP2PParams +) +{ + OBJGPU *pGpu; + + if ((pP2PParams->gpuCount == 0) || (pP2PParams->gpuCount > NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = gpumgrGetGpuFromId(pP2PParams->gpuIds[0]); + + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return CliGetSystemP2pCaps(pP2PParams->gpuIds, + pP2PParams->gpuCount, + &pP2PParams->p2pCaps, + &pP2PParams->p2pOptimalReadCEs, + &pP2PParams->p2pOptimalWriteCEs, + NvP64_VALUE(pP2PParams->p2pCapsStatus), + NvP64_VALUE(pP2PParams->busPeerIds)); +} + +NV_STATUS +cliresCtrlCmdSystemGetP2pCapsMatrix_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_MATRIX_PARAMS *pP2PParams +) +{ + NvU32 grpAIdx, grpBIdx; + NV_STATUS status = NV_OK; + NvBool bReflexive = NV_FALSE; + NvU32 *groupA = NULL; + NvU32 *groupB = NULL; + OBJGPU *pGpu; + + if (pP2PParams->grpACount == 0 || + pP2PParams->grpACount > NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS || + pP2PParams->grpBCount > NV0000_CTRL_SYSTEM_MAX_P2P_GROUP_GPUS) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pGpu = gpumgrGetGpuFromId(pP2PParams->gpuIdGrpA[0]); + if (pGpu == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + groupA = pP2PParams->gpuIdGrpA; + groupB = pP2PParams->gpuIdGrpB; + + // Check for the reflexive case + if (pP2PParams->grpBCount == 0) + { + bReflexive = NV_TRUE; + groupB = groupA; + } + + for (grpAIdx = 0; grpAIdx < pP2PParams->grpACount; grpAIdx++) + { + // + // When group A == group B, skip the last half of the queries. These are + // the same as the backward-direction queries done inside the inner loop + // during the first half in this case. + // + for (grpBIdx = 0; + bReflexive ? grpBIdx <= grpAIdx : grpBIdx < pP2PParams->grpBCount; + grpBIdx++) + { + // + // Get *both* directions of P2P capabilities. This is necessary to + // reduce the number of crossings into the RM. The caller invokes + // the control once for two sets of GPUs and gets both directions of + // P2P caps for all pairs between the sets. + // + + // Get the A-to-B directional caps + status = CliGetSystemP2pCaps((NvU32[]){groupA[grpAIdx], + groupB[grpBIdx]}, + 2, + &pP2PParams->p2pCaps[grpAIdx][grpBIdx], + &pP2PParams->a2bOptimalReadCes[grpAIdx][grpBIdx], + &pP2PParams->a2bOptimalWriteCes[grpAIdx][grpBIdx], + NULL, + NULL); + if (status != NV_OK) + { + return status; + } + + // + // Note that when we're in the reflexive case *and* we're looking at + // a single GPU against itself, we can skip the backward direction + // query as well because the above query gave us total information. + // + if (bReflexive && grpAIdx == grpBIdx) + { + continue; + } + + // Get the B-to-A (asymmetric) CEs, skipping (symmetric) p2pCaps + status = CliGetSystemP2pCaps((NvU32[]){groupB[grpBIdx], + groupA[grpAIdx]}, + 2, + NULL, // Skip p2pCaps + &pP2PParams->b2aOptimalReadCes[grpAIdx][grpBIdx], + &pP2PParams->b2aOptimalWriteCes[grpAIdx][grpBIdx], + NULL, + NULL); + if (status != NV_OK) + { + return status; + } + + // + // If a set is being compared against itself, we can copy any result + // from the a2b query as the b2a result in the opposite direction. + // This is not true when two different sets are being compared. + // + if (bReflexive) + { + pP2PParams->p2pCaps[grpBIdx][grpAIdx] = pP2PParams->p2pCaps[grpAIdx][grpBIdx]; + pP2PParams->a2bOptimalReadCes[grpBIdx][grpAIdx] = pP2PParams->b2aOptimalReadCes[grpAIdx][grpBIdx]; + pP2PParams->a2bOptimalWriteCes[grpBIdx][grpAIdx] = pP2PParams->b2aOptimalWriteCes[grpAIdx][grpBIdx]; + pP2PParams->b2aOptimalReadCes[grpBIdx][grpAIdx] = pP2PParams->a2bOptimalReadCes[grpAIdx][grpBIdx]; + pP2PParams->b2aOptimalWriteCes[grpBIdx][grpAIdx] = pP2PParams->a2bOptimalWriteCes[grpAIdx][grpBIdx]; + } + } + } + + return status; +} + +/*! + * @brief get the GPUs Power status. + * + * @returns NV_OK, or some error. + * @note Possible errors include no initialized GPU (invalid request) + */ +NV_STATUS +cliresCtrlCmdSystemGetGpusPowerStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_GPUS_POWER_STATUS_PARAMS *pGpusPowerStatus +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUMGR *pGpuMgr = SYS_GET_GPUMGR(pSys); + OBJGPU *pGpu = NULL; + NvU32 gpuAttachCnt = 0, gpuAttachMask = 0, i = 0; + NvU32 gpuIndex = 0; + RM_API *pRmApi; + NV0080_CTRL_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT_PARAMS params = {0}; + + pGpu = gpumgrGetSomeGpu(); + + if (pGpu == NULL) + { + return NV_ERR_INVALID_REQUEST; + } + + pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV_CHECK_OK_OR_RETURN( + LEVEL_INFO, + pRmApi->Control(pRmApi, + pGpu->hInternalClient, + pGpu->hInternalDevice, + NV0080_CTRL_CMD_INTERNAL_PERF_GET_UNDERPOWERED_GPU_COUNT, + ¶ms, + sizeof(params))); + + pGpuMgr->powerDisconnectedGpuCount = params.powerDisconnectedGpuCount; + portMemCopy(pGpuMgr->powerDisconnectedGpuBus, sizeof(*pGpuMgr->powerDisconnectedGpuBus) * NV_MAX_DEVICES, + ¶ms.powerDisconnectedGpuBus, sizeof(*params.powerDisconnectedGpuBus) * NV_MAX_DEVICES); + + // Loop though the GPUs with power disconnected + for (gpuIndex = 0; gpuIndex < pGpuMgr->powerDisconnectedGpuCount; gpuIndex++) + { + pGpusPowerStatus->gpuBus[gpuIndex] = pGpuMgr->powerDisconnectedGpuBus[gpuIndex]; + pGpusPowerStatus->gpuExternalPowerStatus[gpuIndex] = NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_NOT_CONNECTED; + } + + pGpusPowerStatus->gpuCount = pGpuMgr->powerDisconnectedGpuCount; + + // Loop though the attached GPU and not already in the list. + gpumgrGetGpuAttachInfo(&gpuAttachCnt, &gpuAttachMask); + while ((pGpu = gpumgrGetNextGpu(gpuAttachMask, &i))) + { + for (gpuIndex = 0; gpuIndex < pGpusPowerStatus->gpuCount; gpuIndex++) + { + if (gpuGetBus(pGpu) == pGpusPowerStatus->gpuBus[gpuIndex]) + { + break; + } + } + if ((gpuIndex == pGpusPowerStatus->gpuCount) && (pGpusPowerStatus->gpuCount < NV0000_CTRL_SYSTEM_MAX_ATTACHED_GPUS)) + { + pGpusPowerStatus->gpuBus[gpuIndex] = gpuGetBus(pGpu); + pGpusPowerStatus->gpuExternalPowerStatus[gpuIndex] = NV0000_CTRL_SYSTEM_GPU_EXTERNAL_POWER_STATUS_CONNECTED; + pGpusPowerStatus->gpuCount++; + } + } + + return status; +} + +NV_STATUS +cliresCtrlCmdSystemGetPrivilegedStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PARAMS *pParams +) +{ + RmClient *pClient = dynamicCast(RES_GET_CLIENT(pRmCliRes), RmClient); + NvU8 privStatus = 0; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN (pClient != NULL, NV_ERR_INVALID_CLIENT); + + if (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL) + { + privStatus |= NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_KERNEL_HANDLE_FLAG; + } + + if (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + privStatus |= NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_USER_FLAG; + } + + if (rmclientIsAdmin(pClient, pCallContext->secInfo.privLevel)) + { + privStatus |= NV0000_CTRL_SYSTEM_GET_PRIVILEGED_STATUS_PRIV_HANDLE_FLAG; + } + + pParams->privStatusFlags = privStatus; + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdSystemGetFabricStatus_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_FABRIC_STATUS_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_SKIP; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED)) + { + fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_UNINITIALIZED; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_REGISTERED)) + { + fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_IN_PROGRESS; + } + + if (pSys->getProperty(pSys, PDB_PROP_SYS_FABRIC_MANAGER_IS_INITIALIZED)) + { + fabricStatus = NV0000_CTRL_GET_SYSTEM_FABRIC_STATUS_INITIALIZED; + } + } + + pParams->fabricStatus = fabricStatus; + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGpuGetUuidInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + + pGpu = gpumgrGetGpuFromUuid(pParams->gpuUuid, pParams->flags); + + if (NULL == pGpu) + return NV_ERR_OBJECT_NOT_FOUND; + + pParams->gpuId = pGpu->gpuId; + pParams->deviceInstance = gpuGetDeviceInstance(pGpu); + pParams->subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGpuGetUuidFromGpuId_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_UUID_FROM_GPU_ID_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + NvU8 *pGidString = NULL; + NvU32 gidStrLen = 0; + NV_STATUS rmStatus; + + // First check for UUID cached by gpumgr + rmStatus = gpumgrGetGpuUuidInfo(pParams->gpuId, &pGidString, &gidStrLen, pParams->flags); + + if (rmStatus != NV_OK) + { + // If UUID not cached by gpumgr then try to query device + pGpu = gpumgrGetGpuFromId(pParams->gpuId); + + if (NULL == pGpu) + return NV_ERR_OBJECT_NOT_FOUND; + + // get the UUID of this GPU + rmStatus = gpuGetGidInfo(pGpu, &pGidString, &gidStrLen, pParams->flags); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "gpumgrGetGpuInfo: getting gpu GUID failed\n"); + return rmStatus; + } + } + + if (gidStrLen <= NV0000_GPU_MAX_GID_LENGTH) + { + portMemCopy(pParams->gpuUuid, gidStrLen, pGidString, gidStrLen); + pParams->uuidStrLen = gidStrLen; + } + + // cleanup the allocated gidstring + portMemFree(pGidString); + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdGpuModifyGpuDrainState_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_MODIFY_DRAIN_STATE_PARAMS *pParams +) +{ + NV_STATUS status; + NvBool bEnable; + NvBool bRemove = NV_FALSE; + NvBool bLinkDisable = NV_FALSE; + OBJGPU *pGpu = gpumgrGetGpuFromId(pParams->gpuId); + + if (NV0000_CTRL_GPU_DRAIN_STATE_ENABLED == pParams->newState) + { + if ((pGpu != NULL) && IsSLIEnabled(pGpu)) + { + // "drain" state not supported in SLI configurations + return NV_ERR_NOT_SUPPORTED; + } + + bEnable = NV_TRUE; + bRemove = + ((pParams->flags & NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE) != 0); + bLinkDisable = + ((pParams->flags & NV0000_CTRL_GPU_DRAIN_STATE_FLAG_LINK_DISABLE) != 0); + + if (bLinkDisable && !bRemove) + { + return NV_ERR_INVALID_ARGUMENT; + } + } + else if (NV0000_CTRL_GPU_DRAIN_STATE_DISABLED == + pParams->newState) + { + bEnable = NV_FALSE; + } + else + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Set/Clear GPU manager drain state + status = gpumgrModifyGpuDrainState(pParams->gpuId, bEnable, bRemove, bLinkDisable); + + // + // For now, assume that when drain state is set up by a client, it is in + // anticipation of some form of external GPU reset. + // + if ((pGpu != NULL) && (status == NV_OK)) + { + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + + if (pKernelNvlink != NULL) + { + pKernelNvlink->setProperty(pKernelNvlink, + PDB_PROP_KNVLINK_LANE_SHUTDOWN_ON_UNLOAD, + bEnable); + + status = knvlinkSyncLaneShutdownProps(pGpu, pKernelNvlink); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "Failed to sync lane shutdown properties\n"); + return status; + } + } + } + + return status; +} + +NV_STATUS +cliresCtrlCmdGpuQueryGpuDrainState_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_QUERY_DRAIN_STATE_PARAMS *pParams +) +{ + NvBool bDrainState; + NvBool bRemove; + NV_STATUS status; + + status = gpumgrQueryGpuDrainState(pParams->gpuId, &bDrainState, &bRemove); + + if (status != NV_OK) + { + return status; + } + + pParams->drainState = bDrainState ? NV0000_CTRL_GPU_DRAIN_STATE_ENABLED + : NV0000_CTRL_GPU_DRAIN_STATE_DISABLED; + + pParams->flags = bRemove ? NV0000_CTRL_GPU_DRAIN_STATE_FLAG_REMOVE_DEVICE : 0; + + return NV_OK; +} + +/* + * Associate sub process ID with client handle + * + * @return 'NV_OK' on success. Otherwise return NV_ERR_INVALID_CLIENT + */ +NV_STATUS +cliresCtrlCmdSetSubProcessID_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SET_SUB_PROCESS_ID_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmClient *pClient; + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + pClient->SubProcessID = pParams->subProcessID; + portStringCopy(pClient->SubProcessName, sizeof(pClient->SubProcessName), pParams->subProcessName, sizeof(pParams->subProcessName)); + + return NV_OK; +} + +/* + * Disable USERD isolation among all the sub processes within a user process + * + * @return 'NV_OK' on success. Otherwise return NV_ERR_INVALID_CLIENT + */ +NV_STATUS +cliresCtrlCmdDisableSubProcessUserdIsolation_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_DISABLE_SUB_PROCESS_USERD_ISOLATION_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RmClient *pClient; + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + pClient->bIsSubProcessDisabled = pParams->bIsSubProcessDisabled; + + return NV_OK; +} + +/*! + * @brief Queries whether Sync Gpu Boost Manager is enabled or not. + */ +NV_STATUS +cliresCtrlCmdSyncGpuBoostInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_SYNC_GPU_BOOST_INFO_PARAMS *pParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUBOOSTMGR *pBoostMgr = SYS_GET_GPUBOOSTMGR(pSys); + + NV_ASSERT_OR_RETURN(NULL != pParams, NV_ERR_INVALID_ARGUMENT); + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (NULL == pBoostMgr) + { + pParams->bEnabled = NV_FALSE; + return NV_OK; + } + + pParams->bEnabled = NV_TRUE; + return NV_OK; +} + +/*! + * @brief Creates a Sync GPU Boost Group containing the GPUs + * specified + * + * @returns NV_OK Success + * @returns other value Failure + */ +NV_STATUS +cliresCtrlCmdSyncGpuBoostGroupCreate_IMPL +( + RmClientResource *pRmCliRes, + NV0000_SYNC_GPU_BOOST_GROUP_CREATE_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUBOOSTMGR *pBoostMgr = SYS_GET_GPUBOOSTMGR(pSys); + + NV_ASSERT_OR_RETURN(NULL != pParams, NV_ERR_INVALID_ARGUMENT); + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // Start off with invalid boost group ID + pParams->boostConfig.boostGroupId = NV0000_SYNC_GPU_BOOST_INVALID_GROUP_ID; + + // Create the boost group + status = gpuboostmgrCreateGroup(pBoostMgr, &pParams->boostConfig); + NV_ASSERT(NV_OK == status); + return status; +} + +/*! + * @brief Destroys a previously created GPU Boost Group. + * + * @returns NV_OK Success + */ +NV_STATUS +cliresCtrlCmdSyncGpuBoostGroupDestroy_IMPL +( + RmClientResource *pRmCliRes, + NV0000_SYNC_GPU_BOOST_GROUP_DESTROY_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUBOOSTMGR *pBoostMgr = SYS_GET_GPUBOOSTMGR(pSys); + + NV_ASSERT_OR_RETURN(NULL != pParams, NV_ERR_INVALID_ARGUMENT); + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + // Destroy the boost group + status = gpuboostmgrDestroyGroup(pBoostMgr, pParams->boostGroupId); + NV_ASSERT(NV_OK == status); + return status; +} + +/*! + * @brief Queries information for a specified Sync Gpu Boost Group. + */ +NV_STATUS +cliresCtrlCmdSyncGpuBoostGroupInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_SYNC_GPU_BOOST_GROUP_INFO_PARAMS *pParams +) +{ + + NV_STATUS status = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + OBJGPUBOOSTMGR *pBoostMgr = SYS_GET_GPUBOOSTMGR(pSys); + + NV_ASSERT_OR_RETURN(NULL != pParams, NV_ERR_INVALID_ARGUMENT); + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + status = gpuboostmgrQueryGroups(pBoostMgr, pParams); + NV_ASSERT(NV_OK == status); + return status; +} + +NV_STATUS +cliresCtrlCmdClientGetAddrSpaceType_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_GET_ADDR_SPACE_TYPE_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + CALL_CONTEXT callContext; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + Memory *pMemory = NULL; + GpuResource *pGpuResource = NULL; + NV_ADDRESS_SPACE memType; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, pParams->hObject, &pResourceRef)); + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pRsClient; + callContext.pResourceRef = pResourceRef; + + pMemory = dynamicCast(pResourceRef->pResource, Memory); + if (pMemory != NULL) + { + NV_ASSERT_OK_OR_RETURN(memGetMapAddrSpace(pMemory, &callContext, pParams->mapFlags, &memType)); + + // Soon FlaMemory will be moved to ADDR_FABRIC. For now, this WAR. + if ((memType == ADDR_FBMEM) && (dynamicCast(pMemory, FlaMemory) != NULL)) + { + memType = ADDR_FABRIC; + } + } + else + { + pGpuResource = dynamicCast(pResourceRef->pResource, GpuResource); + if (pGpuResource != NULL) + { + NV_ASSERT_OK_OR_RETURN(gpuresGetMapAddrSpace(pGpuResource, &callContext, pParams->mapFlags, &memType)); + } + else + { + return NV_ERR_INVALID_OBJECT; + } + } + + switch (memType) + { + case ADDR_SYSMEM: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_SYSMEM; + break; + case ADDR_FBMEM: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_VIDMEM; + break; + case ADDR_REGMEM: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_REGMEM; + break; + case ADDR_FABRIC: + case ADDR_FABRIC_V2: + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_FABRIC; + break; + case ADDR_VIRTUAL: + NV_PRINTF(LEVEL_ERROR, + "VIRTUAL (0x%x) is not a valid NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE\n", + memType); + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + default: + NV_PRINTF(LEVEL_ERROR, "Cannot determine address space 0x%x\n", + memType); + pParams->addrSpaceType = NV0000_CTRL_CMD_CLIENT_GET_ADDR_SPACE_TYPE_INVALID; + DBG_BREAKPOINT(); + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientGetHandleInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_GET_HANDLE_INFO_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + NV_STATUS status; + RsResourceRef *pRsResourceRef; + + status = serverutilGetResourceRef(hClient, pParams->hObject, &pRsResourceRef); + if (status != NV_OK) + { + return status; + } + + switch (pParams->index) + { + case NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_PARENT: + pParams->data.hResult = pRsResourceRef->pParentRef ? pRsResourceRef->pParentRef->hResource : 0; + break; + case NV0000_CTRL_CMD_CLIENT_GET_HANDLE_INFO_INDEX_CLASSID: + pParams->data.iResult = pRsResourceRef->externalClassId; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + } + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientGetAccessRights_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_GET_ACCESS_RIGHTS_PARAMS *pParams +) +{ + NV_STATUS status; + RsResourceRef *pRsResourceRef; + RsResourceRef *pClientRef = RES_GET_REF(pRmCliRes); + RsClient *pClient = pClientRef->pClient; + + status = serverutilGetResourceRef(pParams->hClient, pParams->hObject, &pRsResourceRef); + if (status != NV_OK) + { + return status; + } + + rsAccessUpdateRights(pRsResourceRef, pClient, NULL); + + rsAccessGetAvailableRights(pRsResourceRef, pClient, &pParams->maskResult); + + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdClientSetInheritedSharePolicy_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_SET_INHERITED_SHARE_POLICY_PARAMS *pParams +) +{ + NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.sharePolicy = pParams->sharePolicy; + params.hObject = RES_GET_REF(pRmCliRes)->hResource; + + return cliresCtrlCmdClientShareObject(pRmCliRes, ¶ms); +} + +NV_STATUS +cliresCtrlCmdClientShareObject_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CLIENT_SHARE_OBJECT_PARAMS *pParams +) +{ + RS_SHARE_POLICY *pSharePolicy = &pParams->sharePolicy; + RsClient *pClient = RES_GET_CLIENT(pRmCliRes); + RsResourceRef *pObjectRef; + + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldCallContext; + + NV_STATUS status; + + if (pSharePolicy->type >= RS_SHARE_TYPE_MAX) + return NV_ERR_INVALID_ARGUMENT; + + status = clientGetResourceRef(pClient, pParams->hObject, &pObjectRef); + if (status != NV_OK) + return status; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = &g_resServ; + callContext.pClient = pClient; + callContext.pResourceRef = pObjectRef; + callContext.secInfo = pCallContext->secInfo; + + resservSwapTlsCallContext(&pOldCallContext, &callContext); + status = clientShareResource(pClient, pObjectRef, pSharePolicy, &callContext); + resservRestoreTlsCallContext(pOldCallContext); + if (status != NV_OK) + return status; + + // + // Above clientShareResource does everything needed for normal sharing, + // but we may still need to add a backref if we're sharing with a client, + // to prevent stale access. + // + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE) && + (pSharePolicy->type == RS_SHARE_TYPE_CLIENT)) + { + RsClient *pClientTarget; + + // Trying to share with self, nothing to do. + if (pSharePolicy->target == pClient->hClient) + return NV_OK; + + status = serverGetClientUnderLock(&g_resServ, pSharePolicy->target, &pClientTarget); + if (status != NV_OK) + return status; + + status = clientAddAccessBackRef(pClientTarget, pObjectRef); + if (status != NV_OK) + return status; + } + + return status; +} + +NV_STATUS +cliresCtrlCmdClientGetChildHandle_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CMD_CLIENT_GET_CHILD_HANDLE_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + NV_STATUS status; + RsResourceRef *pParentRef; + RsResourceRef *pResourceRef; + + status = serverutilGetResourceRef(hClient, pParams->hParent, &pParentRef); + if (status != NV_OK) + { + return status; + } + + status = refFindChildOfType(pParentRef, pParams->classId, NV_TRUE, &pResourceRef); + if (status == NV_OK) + { + pParams->hObject = pResourceRef ? pResourceRef->hResource : 0; + } + return status; +} + +NV_STATUS +cliresCtrlCmdGpuGetMemOpEnable_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_GET_MEMOP_ENABLE_PARAMS *pMemOpEnableParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + NV_STATUS status = NV_OK; + + pMemOpEnableParams->enableMask = 0; + + if (pSys->getProperty(pSys, PDB_PROP_SYS_ENABLE_STREAM_MEMOPS)) + { + NV_PRINTF(LEVEL_INFO, "MemOpOverride enabled\n"); + pMemOpEnableParams->enableMask = NV0000_CTRL_GPU_FLAGS_MEMOP_ENABLE; + } + + return status; +} + +NV_STATUS +cliresCtrlCmdGpuDisableNvlinkInit_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_DISABLE_NVLINK_INIT_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + + if (!rmclientIsCapableOrAdminByHandle(hClient, + NV_RM_CAP_EXT_FABRIC_MGMT, + pCallContext->secInfo.privLevel)) + { + NV_PRINTF(LEVEL_WARNING, "Non-privileged context issued privileged cmd\n"); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + if (pParams->gpuId == NV0000_CTRL_GPU_INVALID_ID) + { + return NV_ERR_INVALID_ARGUMENT; + } + + return gpumgrSetGpuInitDisabledNvlinks(pParams->gpuId, pParams->mask, pParams->bSkipHwNvlinkDisable); +} + +/*! +* @brief Get Rcerr Rpt. Returns an Rc Error report form the circular buffer. +* +* @returns NV_OK on success +*/ +NV_STATUS +cliresCtrlCmdNvdGetRcerrRpt_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + NvU32 gpuMask = 0; + NvU32 gpuAttachCount = 0; + NvU32 gpuIdx = 0; + OBJGPU *pGpu = NULL; + NvU32 processId = osGetCurrentProcess(); + + NV_ASSERT_OK_OR_RETURN(gpumgrGetGpuAttachInfo(&gpuAttachCount, &gpuMask)); + + pGpu = gpumgrGetNextGpu(gpuMask, &gpuIdx); + if (pGpu == NULL) + { + return NV_ERR_INVALID_STATE; + } + + // Assume we won't transfer any data, set the rptIdx & flags accordingly. + pParams->rptIdx = ~pParams->reqIdx; + pParams->flags = 0; + if (!RMCFG_FEATURE_PLATFORM_GSP) + { + pParams->processId = processId; + } + + if ((status = krcCliresCtrlNvdGetRcerrRptCheckPermissions_HAL( + GPU_GET_KERNEL_RC(pGpu), + pRmCliRes, + pParams)) != NV_OK) + { + return status; + } + + if (IS_GSP_CLIENT(pGpu)) + { + // + // Pre-GSP, RcDiagRec from all GPUs were stored in kernel sysmem in a + // single RING_BUFFER_LOG. + // + // With GSP, each GPU its own separate RING_BUFFER_LOG. We need to + // search in all of them. + // + // However, we will always return only the first matching record in all + // cases (similar to pre-GSP behavior) + // + for (; pGpu != NULL ; pGpu = gpumgrGetNextGpu(gpuMask, &gpuIdx)) + { + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS localParams = {0}; + localParams.reqIdx = pParams->reqIdx; + localParams.owner = pParams->owner; + localParams.processId = pParams->processId; + + status = pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pRmCliRes), + RES_GET_HANDLE(pRmCliRes), + NV0000_CTRL_CMD_NVD_GET_RCERR_RPT, + &localParams, + sizeof localParams); + if (status == NV_OK && + (localParams.flags & + NV0000_CTRL_CMD_NVD_RCERR_RPT_FLAGS_DATA_VALID)) + { + // + // Each RING_BUFFER_LOG can contain MAX_RCDB_RCDIAG_WRAP_BUFF + // RmRcDiag_RECORD. We will multiply indices returned to the + // client by this value so the GPU can be uniquely identified + // (in addition to GPUTag) from + // NV0000_CTRL_CMD_NVD_GET_RCERR_RPT_PARAMS.rptIdx + // + // Note that this will result in clients receivinga rptIdx value + // larger than MAX_RCDB_RCDIAG_WRAP_BUFF. + // + NvU16 indexOffset = gpuIdx * MAX_RCDB_RCDIAG_WRAP_BUFF; + + *pParams = localParams; + pParams->startIdx += indexOffset; + pParams->endIdx += indexOffset; + pParams->rptIdx += indexOffset; + + return NV_OK; + } + + if (status == NV_ERR_BUSY_RETRY) + { + // + // To avoid the case where we silently fail to find a record + // because we skipped over to the next Gpu on getting a + // BUSY_RETRY on one of the Gpus (which might have contained the + // record). + // + return status; + } + } + } + + return status; +} + +NV_STATUS +cliresCtrlCmdLegacyConfig_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_LEGACY_CONFIG_PARAMS *pParams +) +{ + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + RsClient *pClient = RES_GET_CLIENT(pRmCliRes); + RmClient *pRmClient = dynamicCast(pClient, RmClient); + NvHandle hDeviceOrSubdevice = pParams->hContext; + NvHandle hDevice; + OBJGPU *pGpu; + GpuResource *pGpuResource; + NV_STATUS rmStatus = NV_OK; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pRmClient != NULL, NV_ERR_INVALID_CLIENT); + + // + // Clients pass in device or subdevice as context for NvRmConfigXyz. + // + rmStatus = gpuresGetByDeviceOrSubdeviceHandle(pClient, + hDeviceOrSubdevice, + &pGpuResource); + if (rmStatus != NV_OK) + return rmStatus; + + hDevice = RES_GET_HANDLE(GPU_RES_GET_DEVICE(pGpuResource)); + pGpu = GPU_RES_GET_GPU(pGpuResource); + + // + // GSP client builds should have these legacy APIs disabled, + // but a monolithic build running in offload mode can still reach here, + // so log those cases and bail early to keep the same behavior. + // + NV_ASSERT_OR_RETURN(!IS_GSP_CLIENT(pGpu), NV_ERR_NOT_SUPPORTED); + + GPU_RES_SET_THREAD_BC_STATE(pGpuResource); + + pParams->dataType = pParams->opType; + + switch (pParams->opType) + { + default: + PORT_UNREFERENCED_VARIABLE(pGpu); + PORT_UNREFERENCED_VARIABLE(hDevice); + PORT_UNREFERENCED_VARIABLE(hClient); + rmStatus = NV_ERR_NOT_SUPPORTED; + break; + } + + return rmStatus; +} + +NV_STATUS +cliresCtrlCmdIdleChannels_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_GPU_IDLE_CHANNELS_PARAMS *pParams +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + NvBool bUserModeArgs = pCallContext->secInfo.paramLocation != PARAM_LOCATION_KERNEL; + NvHandle hClient = RES_GET_CLIENT_HANDLE(pRmCliRes); + + return RmIdleChannels(hClient, pParams->hDevice, pParams->hChannel, pParams->numChannels, + pParams->phClients, pParams->phDevices, pParams->phChannels, + pParams->flags, pParams->timeout, bUserModeArgs); +} + +NV_STATUS +cliresCtrlCmdSystemSyncExternalFabricMgmt_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_CMD_SYSTEM_SYNC_EXTERNAL_FABRIC_MGMT_PARAMS *pExtFabricMgmtParams +) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + + pSys->setProperty(pSys, PDB_PROP_SYS_FABRIC_IS_EXTERNALLY_MANAGED, + pExtFabricMgmtParams->bExternalFabricMgmt); + return NV_OK; +} + +NV_STATUS cliresCtrlCmdSystemGetClientDatabaseInfo_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GET_CLIENT_DATABASE_INFO_PARAMS *pParams +) +{ + pParams->clientCount = g_resServ.activeClientCount; + pParams->resourceCount = g_resServ.activeResourceCount; + return NV_OK; +} + +NV_STATUS +cliresCtrlCmdSystemGetPerfSensorCounters_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + portMemSet(pParams, 0, sizeof(*pParams)); + return status; +} + +NV_STATUS +cliresCtrlCmdSystemGetExtendedPerfSensorCounters_IMPL +( + RmClientResource *pRmCliRes, + NV0000_CTRL_SYSTEM_GPS_GET_PERF_SENSOR_COUNTERS_PARAMS *pParams +) +{ + NV_STATUS status = NV_OK; + portMemSet(pParams, 0, sizeof(*pParams)); + return status; +} diff --git a/src/nvidia/src/kernel/rmapi/control.c b/src/nvidia/src/kernel/rmapi/control.c new file mode 100644 index 000000000..9d9f9de30 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/control.c @@ -0,0 +1,880 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "rmapi/rmapi.h" +#include "rmapi/control.h" +#include "rmapi/client.h" +#include "rmapi/rs_utils.h" +#include "diagnostics/tracer.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "gpu/device/device.h" + +#include "entry_points.h" +#include "resserv/rs_access_map.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" + +#include "ctrl/ctrl0000/ctrl0000client.h" // NV0000_CTRL_CMD_CLIENT_* +#include "ctrl/ctrl0000/ctrl0000gpu.h" // NV0000_CTRL_CMD_GPU_* +#include "ctrl/ctrl0000/ctrl0000system.h" // NV0000_CTRL_CMD_SYSTEM_* +#include "ctrl/ctrl0000/ctrl0000syncgpuboost.h" // NV0000_CTRL_CMD_SYNC_GPU_BOOST_* +#include "ctrl/ctrl0000/ctrl0000nvd.h" // NV0000_CTRL_CMD_NVD_* +#include "ctrl/ctrl2080/ctrl2080rc.h" // NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM +#include "ctrl/ctrl0002.h" // N09002_CTRL_CMD_*_CONTEXTDMA +#include "ctrl/ctrl906f.h" // NV906F_CTRL_CMD_GET_MMU_FAULT_INFO +#include "ctrl/ctrlc370/ctrlc370chnc.h" // NVC370_CTRL_CMD_* +#include "ctrl/ctrl9010.h" //NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION +#include "ctrl/ctrl2080/ctrl2080tmr.h" // NV2080_CTRL_CMD_TIMER_* + +static NV_STATUS +releaseDeferRmCtrlBuffer(RmCtrlDeferredCmd* pRmCtrlDeferredCmd) +{ + portMemSet(&pRmCtrlDeferredCmd->paramBuffer, 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE); + + portAtomicSetS32(&pRmCtrlDeferredCmd->pending, RMCTRL_DEFERRED_FREE); + + return NV_OK; +} + +// +// This is the rmControl internal handler for deferred calls. +// +// + +NV_STATUS +rmControl_Deferred(RmCtrlDeferredCmd* pRmCtrlDeferredCmd) +{ + RmCtrlParams rmCtrlParams; + RmClient *pClient; + NvU8 paramBuffer[RMCTRL_DEFERRED_MAX_PARAM_SIZE]; + NV_STATUS status; + RS_LOCK_INFO lockInfo = {0}; + RS_CONTROL_COOKIE rmCtrlExecuteCookie = {0}; + + // init RmCtrlParams + portMemCopy(&rmCtrlParams, sizeof(RmCtrlParams), &pRmCtrlDeferredCmd->rmCtrlDeferredParams, sizeof(RmCtrlParams)); + rmCtrlParams.hParent = NV01_NULL_OBJECT; + rmCtrlParams.pGpu = NULL; + rmCtrlParams.pLockInfo = &lockInfo; + rmCtrlParams.pCookie = &rmCtrlExecuteCookie; + + // Temporary: tell ResServ not to take any locks + lockInfo.flags = RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_NO_CLIENT_LOCK; + + if (rmapiLockIsOwner()) + { + lockInfo.state = RM_LOCK_STATES_API_LOCK_ACQUIRED; + } + else + { + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + } + + // paramsSize not > _MAX already verified in _rmControlDeferred + if ((rmCtrlParams.pParams != NvP64_NULL) && (rmCtrlParams.paramsSize != 0)) + { + // copy param to a local buffer so that pRmCtrlDeferredCmd can be released + portMemSet(paramBuffer, 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE); + portMemCopy(paramBuffer, rmCtrlParams.paramsSize, rmCtrlParams.pParams, rmCtrlParams.paramsSize); + rmCtrlParams.pParams = paramBuffer; + } + + releaseDeferRmCtrlBuffer(pRmCtrlDeferredCmd); + + // client was checked when we came in through rmControl() + // but check again to make sure it's still good + if (serverutilGetClientUnderLock(rmCtrlParams.hClient, &pClient) != NV_OK) + { + status = NV_ERR_INVALID_CLIENT; + goto exit; + } + + status = serverControl(&g_resServ, &rmCtrlParams); + +exit: + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, "deferred rmctrl %x failed %x!\n", + rmCtrlParams.cmd, status); + } + + return status; +} + +static NV_STATUS +_rmControlDeferred(RmCtrlParams *pRmCtrlParams, NvP64 pUserParams, NvU32 paramsSize) +{ + // Schedule a deferred rmctrl call + OBJGPU *pGpu; + NvBool bBcResource; + NV_STATUS rmStatus; + RsClient *pClient; + + // We can't allocate memory at DIRQL, so use pre-allocated buffer to store any rmctrl param. + // The size can't be large than DEFERRED_RMCTRL_MAX_PARAM_SIZE (defined in rmctrl.h), otherwise, + // fail this call. + if (paramsSize > RMCTRL_DEFERRED_MAX_PARAM_SIZE) + { + NV_PRINTF(LEVEL_WARNING, + "rmctrl param size (%d) larger than limit (%d).\n", + paramsSize, RMCTRL_DEFERRED_MAX_PARAM_SIZE); + rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + rmStatus = serverGetClientUnderLock(&g_resServ, pRmCtrlParams->hClient, &pClient); + if (rmStatus != NV_OK) + return rmStatus; + + rmStatus = gpuGetByHandle(pClient, pRmCtrlParams->hObject, &bBcResource, &pGpu); + if (rmStatus != NV_OK) + return rmStatus; + + // Set SLI BC state for thread + gpuSetThreadBcState(pGpu, bBcResource); + + pRmCtrlParams->pGpu = pGpu; + pRmCtrlParams->pLockInfo = NULL; + + switch (pRmCtrlParams->cmd) + { + // we don't have available bit left in RmCtrlParams.cmd to + // indicate a rmctrl type as deferrable so use cmd list here + case NV2080_CTRL_CMD_TIMER_SCHEDULE: + { + if (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvU32 idx; + + for ( idx = 0; idx < MAX_DEFERRED_CMDS; idx++) + { + if (portAtomicCompareAndSwapS32(&pGpu->pRmCtrlDeferredCmd[idx].pending, + RMCTRL_DEFERRED_ACQUIRED, + RMCTRL_DEFERRED_FREE)) + { + portMemCopy(&pGpu->pRmCtrlDeferredCmd[idx].rmCtrlDeferredParams, + sizeof(RmCtrlParams), pRmCtrlParams, sizeof(RmCtrlParams)); + + // copyin param to kernel buffer for deferred rmctrl + if (paramsSize != 0 && pUserParams != 0) + { + portMemCopy(pGpu->pRmCtrlDeferredCmd[idx].paramBuffer, paramsSize, + NvP64_VALUE(pUserParams), paramsSize); + + if (paramsSize < RMCTRL_DEFERRED_MAX_PARAM_SIZE) + { + portMemSet(pGpu->pRmCtrlDeferredCmd[idx].paramBuffer + + paramsSize, + 0, RMCTRL_DEFERRED_MAX_PARAM_SIZE - paramsSize); + } + + pGpu->pRmCtrlDeferredCmd[idx].rmCtrlDeferredParams.pParams = + pGpu->pRmCtrlDeferredCmd[idx].paramBuffer; + } + + portAtomicSetS32(&pGpu->pRmCtrlDeferredCmd[idx].pending, + RMCTRL_DEFERRED_READY); + + // Make sure there's a release call to trigger the deferred rmctrl. + // Previous rmctrl that is holding the lock can already + // finished (release its lock) during the period before the pending + // flag is set and after this rmctrl failed to acquire lock. + + // LOCK: try to acquire GPUs lock + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_COND_ACQUIRE, + RM_LOCK_MODULES_CLIENT) == NV_OK) + { + if (osCondAcquireRmSema(pSys->pSema) == NV_OK) + { + // In case this is called from device interrupt, use pGpu to queue DPC. + osReleaseRmSema(pSys->pSema, pGpu); + } + // In case this is called from device interrupt, use pGpu to queue DPC. + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, pGpu); + } + + rmStatus = NV_OK; + goto done; + } + } + } + + rmStatus = NV_ERR_STATE_IN_USE; + break; + } + + default: + rmStatus = NV_ERR_BUSY_RETRY; + break; + } + +done: + return rmStatus; +} + +NV_STATUS +serverControlApiCopyIn +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams, + RS_CONTROL_COOKIE *pCookie +) +{ + NV_STATUS rmStatus; + API_STATE *pParamCopy; + API_STATE *pEmbeddedParamCopies; + NvP64 pUserParams; + NvU32 paramsSize; + + NV_ASSERT_OR_RETURN(pCookie != NULL, NV_ERR_INVALID_ARGUMENT); + pParamCopy = &pCookie->paramCopy; + pEmbeddedParamCopies = pCookie->embeddedParamCopies; + pUserParams = NV_PTR_TO_NvP64(pRmCtrlParams->pParams); + paramsSize = pRmCtrlParams->paramsSize; + + RMAPI_PARAM_COPY_INIT(*pParamCopy, pRmCtrlParams->pParams, pUserParams, paramsSize, 1); + + rmStatus = rmapiParamsAcquire(pParamCopy, (pRmCtrlParams->secInfo.paramLocation == PARAM_LOCATION_USER)); + if (rmStatus != NV_OK) + return rmStatus; + pCookie->bFreeParamCopy = NV_TRUE; + + rmStatus = embeddedParamCopyIn(pEmbeddedParamCopies, pRmCtrlParams); + if (rmStatus != NV_OK) + return rmStatus; + pCookie->bFreeEmbeddedCopy = NV_TRUE; + + return NV_OK; +} + +NV_STATUS +serverControlApiCopyOut +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus +) +{ + NV_STATUS cpStatus; + API_STATE *pParamCopy; + API_STATE *pEmbeddedParamCopies; + NvP64 pUserParams; + NvBool bFreeEmbeddedCopy; + NvBool bFreeParamCopy; + + NV_ASSERT_OR_RETURN(pCookie != NULL, NV_ERR_INVALID_ARGUMENT); + + pParamCopy = &pCookie->paramCopy; + pEmbeddedParamCopies = pCookie->embeddedParamCopies; + pUserParams = pCookie->paramCopy.pUserParams; + bFreeParamCopy = pCookie->bFreeParamCopy; + bFreeEmbeddedCopy = pCookie->bFreeEmbeddedCopy; + + if ((rmStatus != NV_OK) && !(pCookie->ctrlFlags & RMCTRL_FLAGS_COPYOUT_ON_ERROR)) + { + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + + if (bFreeEmbeddedCopy) + { + pEmbeddedParamCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + pEmbeddedParamCopies[1].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + pEmbeddedParamCopies[2].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + pEmbeddedParamCopies[3].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + } + } + + if (bFreeEmbeddedCopy) + { + cpStatus = embeddedParamCopyOut(pEmbeddedParamCopies, pRmCtrlParams); + if (rmStatus == NV_OK) + rmStatus = cpStatus; + pCookie->bFreeEmbeddedCopy = NV_FALSE; + } + + if (bFreeParamCopy) + { + cpStatus = rmapiParamsRelease(pParamCopy); + if (rmStatus == NV_OK) + rmStatus = cpStatus; + pRmCtrlParams->pParams = NvP64_VALUE(pUserParams); + pCookie->bFreeParamCopy = NV_FALSE; + } + + return rmStatus; +} + +static NvBool _rmapiRmControlCanBeRaisedIrql(NvU32 cmd) +{ + switch (cmd) + { + case NV2080_CTRL_CMD_TIMER_SCHEDULE: + case NV2080_CTRL_CMD_TIMER_GET_TIME: + // Below 2 control calls are used for flip canceling (HW Flip Queue) + // We use TRASH/ABORT mode to discard queued hw commands in the push buffer (bug 200644346) + case NVC370_CTRL_CMD_SET_ACCL: + case NVC370_CTRL_CMD_GET_CHANNEL_INFO: + case NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION: + return NV_TRUE; + default: + return NV_FALSE; + } +} + +static NvBool _rmapiRmControlCanBeBypassLock(NvU32 cmd) +{ + switch (cmd) + { + case NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM: + case NV2080_CTRL_CMD_TIMER_GET_TIME: + case NV906F_CTRL_CMD_GET_MMU_FAULT_INFO: + // Below 2 control calls are used for flip canceling (HW Flip Queue) + // We use TRASH/ABORT mode to discard queued hw commands in the push buffer (bug 200644346) + case NVC370_CTRL_CMD_SET_ACCL: + case NVC370_CTRL_CMD_GET_CHANNEL_INFO: + case NV2080_CTRL_CMD_BUS_SYSMEM_ACCESS: + case NV9010_CTRL_CMD_SET_VBLANK_NOTIFICATION: + case NV2080_CTRL_CMD_NVD_SET_NOCAT_JOURNAL_DATA: + return NV_TRUE; + default: + return NV_FALSE; + } +} + +static NV_STATUS +_rmapiRmControl(NvHandle hClient, NvHandle hObject, NvU32 cmd, NvP64 pUserParams, NvU32 paramsSize, NvU32 flags, RM_API *pRmApi, API_SECURITY_INFO *pSecInfo) +{ + OBJSYS *pSys = SYS_GET_INSTANCE(); + RmClient *pClient; + RmCtrlParams rmCtrlParams; + RS_CONTROL_COOKIE rmCtrlExecuteCookie = {0}; + NvBool bIsRaisedIrqlCmd; + NvBool bIsLockBypassCmd; + NvBool bInternalRequest; + NV_STATUS rmStatus = NV_OK; + RS_LOCK_INFO lockInfo = {0}; + + RMTRACE_RMAPI(_RMCTRL_ENTRY, cmd); + + // Check first for the NULL command. + // Return NV_OK immediately for NVXXXX_CTRL_CMD_NULL (0x00000000) + // as well as the per-class NULL cmd ( _CATEGORY==0x00 and _INDEX==0x00 ) + if ((cmd == NVXXXX_CTRL_CMD_NULL) || + (FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _CATEGORY, 0x00, cmd) && + FLD_TEST_DRF_NUM(XXXX, _CTRL_CMD, _INDEX, 0x00, cmd))) + { + return NV_OK; + } + + NV_PRINTF(LEVEL_INFO, + "rmControl: hClient 0x%x hObject 0x%x cmd 0x%x\n", hClient, + hObject, cmd); + + NV_PRINTF(LEVEL_INFO, "rmControl: pUserParams 0x%p paramSize 0x%x\n", + NvP64_VALUE(pUserParams), paramsSize); + + // If we're behind either API lock or GPU lock treat as internal. + bInternalRequest = pRmApi->bApiLockInternal || pRmApi->bGpuLockInternal; + + // is this a raised IRQL cmd? + bIsRaisedIrqlCmd = (flags & NVOS54_FLAGS_IRQL_RAISED); + + // is this a lock bypass cmd? + bIsLockBypassCmd = ((flags & NVOS54_FLAGS_LOCK_BYPASS) || pRmApi->bGpuLockInternal); + + // NVOS54_FLAGS_IRQL_RAISED cmds are only allowed to be called in raised irq level. + if (bIsRaisedIrqlCmd) + { + // Check that we support this control call at raised IRQL + if (!_rmapiRmControlCanBeRaisedIrql(cmd)) + { + NV_PRINTF(LEVEL_WARNING, + "rmControl: cmd 0x%x cannot be called at raised irq level\n", cmd); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + if (!osIsRaisedIRQL()) + { + NV_PRINTF(LEVEL_WARNING, + "rmControl: raised cmd 0x%x at normal irq level\n", cmd); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + + if (bIsLockBypassCmd) + { + flags |= NVOS54_FLAGS_LOCK_BYPASS; + + if (!bInternalRequest) + { + // Check that we support bypassing locks with this control call + if (!_rmapiRmControlCanBeBypassLock(cmd)) + { + NV_PRINTF(LEVEL_WARNING, + "rmControl: cmd 0x%x cannot bypass locks\n", cmd); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + } + + // Potential race condition if run lockless? + if (serverutilGetClientUnderLock(hClient, &pClient) != NV_OK) + { + rmStatus = NV_ERR_INVALID_CLIENT; + goto done; + } + + // only kernel clients can issue raised IRQL or lock bypass cmds + // bypass client priv check for internal calls done on behalf of lower priv + // clients + if ((bIsRaisedIrqlCmd || bIsLockBypassCmd) && !bInternalRequest) + { + if (pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) + { + rmStatus = NV_ERR_INVALID_CLIENT; + goto done; + } + } + + // error check parameters + if (((paramsSize != 0) && (pUserParams == (NvP64) 0)) || + ((paramsSize == 0) && (pUserParams != (NvP64) 0))) + { + NV_PRINTF(LEVEL_WARNING, "bad params: ptr " NvP64_fmt " size: 0x%x\n", + pUserParams, paramsSize); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // init RmCtrlParams + portMemSet(&rmCtrlParams, 0, sizeof(rmCtrlParams)); + rmCtrlParams.hClient = hClient; + rmCtrlParams.hObject = hObject; + rmCtrlParams.cmd = cmd; + rmCtrlParams.flags = flags; + rmCtrlParams.pParams = NvP64_VALUE(pUserParams); + rmCtrlParams.paramsSize = paramsSize; + rmCtrlParams.hParent = NV01_NULL_OBJECT; + rmCtrlParams.pGpu = NULL; + rmCtrlParams.pResourceRef = NULL; + rmCtrlParams.secInfo = *pSecInfo; + rmCtrlParams.pLockInfo = &lockInfo; + rmCtrlParams.pCookie = &rmCtrlExecuteCookie; + rmCtrlParams.bInternal = bInternalRequest; + + if (pRmApi->bApiLockInternal) + { + lockInfo.state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK; + } + + // + // Three separate rmctrl command modes: + // + // mode#1: lock bypass rmctrl request + // mode#2: raised-irql rmctrl request + // mode#3: normal rmctrl request + // + if (bIsLockBypassCmd) + { + lockInfo.state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_NO_CLIENT_LOCK; + + // + // Lock bypass rmctrl request. + // + rmStatus = serverControl(&g_resServ, &rmCtrlParams); + } + else if (bIsRaisedIrqlCmd) + { + // + // Raised IRQL rmctrl request. + // + // Try to get lock; if we cannot get it then place on deferred queue. + // + + // LOCK: try to acquire GPUs lock + if (osCondAcquireRmSema(pSys->pSema) == NV_OK) + { + if (rmGpuLocksAcquire(GPUS_LOCK_FLAGS_COND_ACQUIRE, RM_LOCK_MODULES_CLIENT) == NV_OK) + { + lockInfo.state |= RM_LOCK_STATES_GPUS_LOCK_ACQUIRED; + lockInfo.flags |= RM_LOCK_FLAGS_NO_API_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_NO_CLIENT_LOCK; + rmStatus = serverControl(&g_resServ, &rmCtrlParams); + + // UNLOCK: release GPUs lock + rmGpuLocksRelease(GPUS_LOCK_FLAGS_COND_ACQUIRE, osIsISR() ? rmCtrlParams.pGpu : NULL); + } + else + { + rmStatus = _rmControlDeferred(&rmCtrlParams, pUserParams, paramsSize); + } + // we must have a pGpu here for queuing of a DPC. + NV_ASSERT(!osIsISR() || rmCtrlParams.pGpu); + osReleaseRmSema(pSys->pSema, osIsISR() ? rmCtrlParams.pGpu : NULL); + } + else + { + rmStatus = _rmControlDeferred(&rmCtrlParams, pUserParams, paramsSize); + } + } + else + { + // + // Normal rmctrl request. + // + + RM_API_CONTEXT rmApiContext = {0}; + rmStatus = rmapiPrologue(pRmApi, &rmApiContext); + if (rmStatus != NV_OK) + goto done; + + lockInfo.flags |= RM_LOCK_FLAGS_RM_SEMA; + rmStatus = serverControl(&g_resServ, &rmCtrlParams); + rmapiEpilogue(pRmApi, &rmApiContext); + } +done: + + RMTRACE_RMAPI(_RMCTRL_EXIT, cmd); + return rmStatus; +} + +// validate rmctrl flags +NV_STATUS serverControl_ValidateCookie +( + RS_RES_CONTROL_PARAMS_INTERNAL *pRmCtrlParams, + RS_CONTROL_COOKIE *pRmCtrlExecuteCookie +) +{ + NV_STATUS status; + + if (g_resServ.bRsAccessEnabled) + { + if (pRmCtrlParams->pResourceRef != NULL) + { + // + // Check that the invoking client has appropriate access rights + // For control calls, the invoking client is the owner of the ref + // + status = rsAccessCheckRights(pRmCtrlParams->pResourceRef, + pRmCtrlParams->pResourceRef->pClient, + &pRmCtrlExecuteCookie->rightsRequired); + if (status != NV_OK) + return status; + } + else + { + // pResourceRef can be NULL when rmControlCmdExecute is manually + // invoked from the deferred API path (see class5080DeferredApiV2). + // For now, we skip performing any access right checks in this case. + } + } + else + { + // + // When access rights are disabled, any control calls that have the + // *_IF_RS_ACCESS_DISABLED flags should be treated as if they were declared + // with the corresponding flags + // + if ((pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_PRIVILEGED_IF_RS_ACCESS_DISABLED) != 0) + { + pRmCtrlExecuteCookie->ctrlFlags |= RMCTRL_FLAGS_PRIVILEGED; + } + } + + if ((pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_INTERNAL)) + { + NvBool bInternalCall = pRmCtrlParams->bInternal; + if (!bInternalCall) + return NV_ERR_NOT_SUPPORTED; + } + + if (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_PRIVILEGED) + { + // + // Calls originating from usermode require admin perms while calls + // originating from other kernel drivers are always allowed. + // + if ((pRmCtrlParams->secInfo.privLevel < RS_PRIV_LEVEL_USER_ROOT) + ) + { + NV_PRINTF(LEVEL_WARNING, + "hClient: 0x%08x, hObject 0x%08x, cmd 0x%08x: non-privileged context issued privileged cmd\n", + pRmCtrlParams->hClient, pRmCtrlParams->hObject, + pRmCtrlParams->cmd); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + // permissions check for KERNEL_PRIVILEGED (default) unless NON_PRIVILEGED, PRIVILEGED or INTERNAL is specified + if ( !(pRmCtrlExecuteCookie->ctrlFlags & (RMCTRL_FLAGS_NON_PRIVILEGED | RMCTRL_FLAGS_PRIVILEGED | RMCTRL_FLAGS_INTERNAL))) + { + if ((pRmCtrlParams->secInfo.privLevel < RS_PRIV_LEVEL_KERNEL) + ) + { + NV_PRINTF(LEVEL_WARNING, + "hClient: 0x%08x, hObject 0x%08x, cmd 0x%08x: non-kernel client issued kernel-only cmd\n", + pRmCtrlParams->hClient, pRmCtrlParams->hObject, + pRmCtrlParams->cmd); + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + } + + // fail if GPU isn't ready + if ((!(pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_NO_GPUS_ACCESS)) && (pRmCtrlParams->pGpu != NULL)) + { + API_GPU_FULL_POWER_SANITY_CHECK(pRmCtrlParams->pGpu, NV_FALSE, + pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_ALLOW_WITHOUT_SYSMEM_ACCESS); + + if ( ! API_GPU_ATTACHED_SANITY_CHECK(pRmCtrlParams->pGpu)) + return NV_ERR_GPU_IS_LOST; + } + + if ((pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) && + (pRmCtrlParams->secInfo.paramLocation != PARAM_LOCATION_KERNEL)) + { + return NV_ERR_INVALID_PARAMETER; + } + + return NV_OK; +} + +NV_STATUS +serverControlLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RmCtrlParams *pRmCtrlParams, + RmCtrlExecuteCookie *pRmCtrlExecuteCookie, + LOCK_ACCESS_TYPE *pAccess +) +{ + if (pAccess == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *pAccess = LOCK_ACCESS_WRITE; + + if (lock == RS_LOCK_TOP) + { + if (!serverSupportsReadOnlyLock(&g_resServ, RS_LOCK_TOP, RS_API_CTRL)) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + + if (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_API_LOCK_READONLY) + *pAccess = LOCK_ACCESS_READ; + + return NV_OK; + } + + if (lock == RS_LOCK_RESOURCE) + { + RS_LOCK_INFO *pLockInfo = pRmCtrlParams->pLockInfo; + + // + // Do not acquire the GPU lock if we were explicitly told + // not to or if this is an Internal Call meaning that + // we already own the GPUs Lock. + // + if ((pLockInfo->state & RM_LOCK_STATES_GPUS_LOCK_ACQUIRED) || + (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_NO_GPUS_LOCK) || + (pRmCtrlParams->flags & NVOS54_FLAGS_IRQL_RAISED) || + (pRmCtrlParams->flags & NVOS54_FLAGS_LOCK_BYPASS)) + { + pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags &= ~RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + else + { + if (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_GPU_LOCK_DEVICE_ONLY) + { + pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + else + { + pLockInfo->flags &= ~RM_LOCK_FLAGS_NO_GPUS_LOCK; + pLockInfo->flags &= ~RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + + if (pRmCtrlExecuteCookie->ctrlFlags & RMCTRL_FLAGS_GPU_LOCK_READONLY) + *pAccess = LOCK_ACCESS_READ; + } + + return NV_OK; + } + + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +rmapiControl +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->ControlWithSecInfo(pRmApi, hClient, hObject, cmd, NV_PTR_TO_NvP64(pParams), + paramsSize, 0, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiControlWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + + NV_PRINTF(LEVEL_INFO, + "Nv04Control: hClient:0x%x hObject:0x%x cmd:0x%x params:" NvP64_fmt " paramSize:0x%x flags:0x%x\n", + hClient, hObject, cmd, pParams, paramsSize, flags); + + NVRM_TRACE_API('CTRL', hClient, hObject, cmd); + + status = _rmapiRmControl(hClient, hObject, cmd, pParams, paramsSize, flags, pRmApi, pSecInfo); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04Control: control complete\n"); + NVRM_TRACE('ctrl'); + } + else + { + NV_PRINTF(LEVEL_INFO, + "Nv04Control: control failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_INFO, + "Nv04Control: hClient:0x%x hObject:0x%x cmd:0x%x params:" NvP64_fmt " paramSize:0x%x flags:0x%x\n", + hClient, hObject, cmd, pParams, paramsSize, flags); + NVRM_TRACE_ERROR('ctrl', status); + } + + return status; +} + + +// +// Called at DIRQL, where we can't do memory allocations +// Do not inline that function to save stack space +// +static NV_NOINLINE NV_STATUS +_rmapiControlWithSecInfoTlsIRQL +( + RM_API* pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO* pSecInfo +) +{ + NV_STATUS status; + THREAD_STATE_NODE threadState; + + NvU8 stackAllocator[TLS_ISR_ALLOCATOR_SIZE]; + PORT_MEM_ALLOCATOR* pIsrAllocator = portMemAllocatorCreateOnExistingBlock(stackAllocator, sizeof(stackAllocator)); + tlsIsrInit(pIsrAllocator); + + // + // SMP synchronization for Nv04Control is handled lower in the + // call sequence to accommodate the various operation-specific + // lock requirements (e.g. some operations can run locklessly). + // + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiControlWithSecInfo(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + tlsIsrDestroy(pIsrAllocator); + portMemAllocatorRelease(pIsrAllocator); + + return status; +} + + +NV_STATUS +rmapiControlWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + THREAD_STATE_NODE threadState; + + if (!portMemExSafeForNonPagedAlloc()) + { + return _rmapiControlWithSecInfoTlsIRQL(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo); + } + + // + // SMP synchronization for Nv04Control is handled lower in the + // call sequence to accommodate the various operation-specific + // lock requirements (e.g. some operations can run locklessly). + // + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiControlWithSecInfo(pRmApi, hClient, hObject, cmd, pParams, paramsSize, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + diff --git a/src/nvidia/src/kernel/rmapi/deprecated_context.c b/src/nvidia/src/kernel/rmapi/deprecated_context.c new file mode 100644 index 000000000..cbdfd443b --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/deprecated_context.c @@ -0,0 +1,205 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "rmapi/rmapi.h" +#include "rmapi/param_copy.h" +#include "os/os.h" +#include "deprecated_context.h" + +static NV_STATUS +_rmAllocForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->AllocWithSecInfo(pRmApi, hClient, hParent, phObject, + hClass, NV_PTR_TO_NvP64(pAllocParams), + RMAPI_ALLOC_FLAGS_NONE, NvP64_NULL, &pContext->secInfo); +} + +static NV_STATUS +_rmControlForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hObject, + NvU32 cmd, void *pParams, NvU32 paramsSize) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->ControlWithSecInfo(pRmApi, hClient, hObject, cmd, + NV_PTR_TO_NvP64(pParams), paramsSize, 0, + &pContext->secInfo); +} + +static NV_STATUS +_rmFreeForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hObject) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->FreeWithSecInfo(pRmApi, hClient, hObject, + RMAPI_FREE_FLAGS_NONE, &pContext->secInfo); +} + +static NV_STATUS +_rmMapMemoryForDeprecatedApi(DEPRECATED_CONTEXT *_pContext, NvHandle hClient, NvHandle hDevice, + NvHandle hMemory, NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags) +{ + DEPRECATED_CONTEXT_EXT *pContext = (DEPRECATED_CONTEXT_EXT *)_pContext; + RM_API *pRmApi = pContext->pRmApi; + + return pRmApi->MapToCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, flags, &pContext->secInfo); +} + +NV_STATUS +RmCopyUserForDeprecatedApi +( + RMAPI_DEPRECATED_COPY_OP op, + RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, + NvP64 dataPtr, + NvU32 dataSize, + void **ppKernelPtr, + NvBool bUserModeArgs +) +{ + NV_STATUS status = NV_OK; + + switch (op) + { + case RMAPI_DEPRECATED_COPYIN: + if (bufPolicy == RMAPI_DEPRECATED_BUFFER_ALLOCATE) + { + *ppKernelPtr = portMemAllocNonPaged(dataSize); + + if (*ppKernelPtr == NULL) + return NV_ERR_NO_MEMORY; + } + + status = rmapiParamsCopyIn(NULL, + *ppKernelPtr, + dataPtr, + dataSize, + bUserModeArgs); + + if (bufPolicy == RMAPI_DEPRECATED_BUFFER_ALLOCATE) + { + if (status != NV_OK) + { + portMemFree(*ppKernelPtr); + *ppKernelPtr = NULL; + } + } + break; + case RMAPI_DEPRECATED_COPYOUT: + status = rmapiParamsCopyOut(NULL, + *ppKernelPtr, + dataPtr, + dataSize, + bUserModeArgs); + + // intentionally fall through to release memory... + case RMAPI_DEPRECATED_COPYRELEASE: + if (bufPolicy == RMAPI_DEPRECATED_BUFFER_ALLOCATE) + { + portMemFree(*ppKernelPtr); + *ppKernelPtr = NULL; + } + break; + } + + return status; +} + +static NV_STATUS +_rmCopyUserForDeprecatedApi +( + DEPRECATED_CONTEXT *_pContext, + RMAPI_DEPRECATED_COPY_OP op, + RMAPI_DEPRECATED_BUFFER_POLICY bufPolicy, + NvP64 dataPtr, + NvU32 dataSize, + void **ppKernelPtr +) +{ + return RmCopyUserForDeprecatedApi(op, bufPolicy, dataPtr, dataSize, + ppKernelPtr, + ((DEPRECATED_CONTEXT_EXT *)_pContext)->bUserModeArgs); +} + +static void * +_rmAllocMemForDeprecatedApi(NvU32 length) +{ + return portMemAllocNonPaged(length); +} + +static void +_rmFreeMemForDeprecatedApi(void *pAddress) +{ + portMemFree(pAddress); +} + +/** + * Setting bUserModeArgs to NV_FALSE can lead to Security issues where + * Privileged RM CTRL APIs are accessible by non-admin users. + * Please find more details in Bug: 3136168. + */ +void rmapiInitDeprecatedContext +( + DEPRECATED_CONTEXT_EXT *pContext, + API_SECURITY_INFO *pSecInfo, + NvBool bUserModeArgs, + NvBool bInternal +) +{ + if (pSecInfo == NULL) + { + portMemSet(&pContext->secInfo, 0, sizeof(pContext->secInfo)); + + if (bUserModeArgs) + { + pContext->secInfo.privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER; + } + else + { + pContext->secInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + } + } + else + { + pContext->secInfo = *pSecInfo; + } + + pContext->secInfo.paramLocation = PARAM_LOCATION_KERNEL; + + pContext->bInternal = bInternal; + pContext->pRmApi = rmapiGetInterface(bInternal ? RMAPI_GPU_LOCK_INTERNAL : RMAPI_EXTERNAL); + pContext->bUserModeArgs = bUserModeArgs; + + pContext->parent.RmAlloc = _rmAllocForDeprecatedApi; + pContext->parent.RmControl = _rmControlForDeprecatedApi; + pContext->parent.RmFree = _rmFreeForDeprecatedApi; + pContext->parent.RmMapMemory = _rmMapMemoryForDeprecatedApi; + pContext->parent.CopyUser = _rmCopyUserForDeprecatedApi; + pContext->parent.AllocMem = _rmAllocMemForDeprecatedApi; + pContext->parent.FreeMem = _rmFreeMemForDeprecatedApi; +} diff --git a/src/nvidia/src/kernel/rmapi/deprecated_context.h b/src/nvidia/src/kernel/rmapi/deprecated_context.h new file mode 100644 index 000000000..1459c3232 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/deprecated_context.h @@ -0,0 +1,42 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _DEPRECATED_CONTEXT_ +#define _DEPRECATED_CONTEXT_ + +#include "deprecated/rmapi_deprecated.h" + +typedef struct +{ + DEPRECATED_CONTEXT parent; + API_SECURITY_INFO secInfo; + NvBool bInternal; + NvBool bUserModeArgs; + RM_API *pRmApi; +} DEPRECATED_CONTEXT_EXT; + +void rmapiInitDeprecatedContext (DEPRECATED_CONTEXT_EXT *pContext, + API_SECURITY_INFO *pSecInfo, + NvBool bUserModeArgs, + NvBool bInternal); + +#endif // _DEPRECATED_CONTEXT_ diff --git a/src/nvidia/src/kernel/rmapi/embedded_param_copy.c b/src/nvidia/src/kernel/rmapi/embedded_param_copy.c new file mode 100644 index 000000000..b8a24fd48 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/embedded_param_copy.c @@ -0,0 +1,967 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "rmapi/control.h" + +#include "ctrl/ctrl0000/ctrl0000gpu.h" +#include "ctrl/ctrl0000/ctrl0000nvd.h" +#include "ctrl/ctrl0000/ctrl0000system.h" +#include "ctrl/ctrl0041.h" +#include "ctrl/ctrl0080/ctrl0080dma.h" +#include "ctrl/ctrl0080/ctrl0080fb.h" +#include "ctrl/ctrl0080/ctrl0080fifo.h" +#include "ctrl/ctrl0080/ctrl0080gr.h" +#include "ctrl/ctrl0080/ctrl0080gpu.h" +#include "ctrl/ctrl0080/ctrl0080host.h" +#include "ctrl/ctrl0080/ctrl0080msenc.h" +#include "ctrl/ctrl0080/ctrl0080perf.h" +#include "ctrl/ctrl2080/ctrl2080bus.h" +#include "ctrl/ctrl2080/ctrl2080ce.h" +#include "ctrl/ctrl2080/ctrl2080fb.h" +#include "ctrl/ctrl2080/ctrl2080gpu.h" +#include "ctrl/ctrl2080/ctrl2080i2c.h" +#include "ctrl/ctrl2080/ctrl2080mc.h" +#include "ctrl/ctrl2080/ctrl2080nvd.h" +#include "ctrl/ctrl2080/ctrl2080perf.h" +#include "ctrl/ctrl2080/ctrl2080pmgr.h" +#include "ctrl/ctrl2080/ctrl2080rc.h" +#include "ctrl/ctrl2080/ctrl2080thermal.h" +#include "ctrl/ctrl208f/ctrl208fgpu.h" +#include "ctrl/ctrl402c.h" +#include "ctrl/ctrl0073.h" +#include "ctrl/ctrlb06f.h" +#include "ctrl/ctrl83de.h" +#ifdef USE_AMAPLIB +#include "amap_v1.h" +#endif + +// +// Validates pRmCtrlParams->pParams is non-NULL and user-provided paramsSize is correct +// This check is used in early validation paths outside the resource server lock +// +#define CHECK_PARAMS_OR_RETURN(pRmCtrlParams, paramsType) \ + do { \ + NV_CHECK_OR_RETURN(LEVEL_WARNING, \ + (((pRmCtrlParams)->pParams != NULL) && \ + ((pRmCtrlParams)->paramsSize) == sizeof(paramsType)), \ + NV_ERR_INVALID_ARGUMENT); \ + } while(0) + +static NvBool _i2cTransactionCopyIn(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmCtrlParams) +{ + NV402C_CTRL_I2C_TRANSACTION_PARAMS *pParams = (NV402C_CTRL_I2C_TRANSACTION_PARAMS*)pRmCtrlParams->pParams; + NvBool bCopyInitDone = NV_FALSE; + + switch (pParams->transType) + { + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW: + { + RMAPI_PARAM_COPY_INIT(paramCopies[0], + pParams->transData.i2cBlockData.pMessage, + pParams->transData.i2cBlockData.pMessage, + pParams->transData.i2cBlockData.messageLength, 1); + + bCopyInitDone = NV_TRUE; + if (pParams->transData.i2cBlockData.bWrite == NV_TRUE) + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + else + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW: + { + RMAPI_PARAM_COPY_INIT(paramCopies[0], + pParams->transData.i2cBufferData.pMessage, + pParams->transData.i2cBufferData.pMessage, + pParams->transData.i2cBufferData.messageLength, 1); + + bCopyInitDone = NV_TRUE; + if (pParams->transData.i2cBufferData.bWrite == NV_TRUE) + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + else + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW: + { + RMAPI_PARAM_COPY_INIT(paramCopies[0], + pParams->transData.smbusBlockData.pMessage, + pParams->transData.smbusBlockData.pMessage, + pParams->transData.smbusBlockData.messageLength, 1); + + bCopyInitDone = NV_TRUE; + if (pParams->transData.smbusBlockData.bWrite == NV_TRUE) + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + else + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW: + { + RMAPI_PARAM_COPY_INIT(paramCopies[0], + pParams->transData.smbusMultibyteRegisterData.pMessage, + pParams->transData.smbusMultibyteRegisterData.pMessage, + pParams->transData.smbusMultibyteRegisterData.messageLength, 1); + + bCopyInitDone = NV_TRUE; + if (pParams->transData.smbusMultibyteRegisterData.bWrite == NV_TRUE) + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + else + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC: + { + RMAPI_PARAM_COPY_INIT(paramCopies[0], + pParams->transData.edidData.pMessage, + pParams->transData.edidData.pMessage, + pParams->transData.edidData.messageLength, 1); + + bCopyInitDone = NV_TRUE; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + + break; + } + + default: + break; + } + + return bCopyInitDone; +} + +static NV_STATUS i2cTransactionCopyOut(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmCtrlParams) +{ + NV_STATUS status = NV_OK; + NV402C_CTRL_I2C_TRANSACTION_PARAMS *pParams = (NV402C_CTRL_I2C_TRANSACTION_PARAMS*)pRmCtrlParams->pParams; + + switch (pParams->transType) + { + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BLOCK_RW: + { + status = rmapiParamsRelease(¶mCopies[0]); + pParams->transData.i2cBlockData.pMessage = paramCopies[0].pUserParams; + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_I2C_BUFFER_RW: + { + status = rmapiParamsRelease(¶mCopies[0]); + pParams->transData.i2cBufferData.pMessage = paramCopies[0].pUserParams; + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_BLOCK_RW: + { + status = rmapiParamsRelease(¶mCopies[0]); + pParams->transData.smbusBlockData.pMessage = paramCopies[0].pUserParams; + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_SMBUS_MULTIBYTE_REGISTER_BLOCK_RW: + { + status = rmapiParamsRelease(¶mCopies[0]); + pParams->transData.smbusMultibyteRegisterData.pMessage = paramCopies[0].pUserParams; + break; + } + case NV402C_CTRL_I2C_TRANSACTION_TYPE_READ_EDID_DDC: + { + status = rmapiParamsRelease(¶mCopies[0]); + pParams->transData.edidData.pMessage = paramCopies[0].pUserParams; + break; + } + + default: + break; + } + + return status; +} + +/* + * Helper routine to handle all embedded pointer user to kernel copies + * Top level parameters are already copied to kernel (done in rmControlCmdExecute/dispControlSynchronizedCmdExecute) + * After successful execution each embedded pointer within pRmCtrlParams->pParams will be kernel + * paramCopies is a 4-element array as we currently have up to 4 embedded pointers in existing RM Controls + * + * No new RM Controls with embedded pointers should ever be added! + * + * See bug 1867098 for more reference - [RM Linux/UVM] Fix GPU lock/mmap_sem lock inversion + */ +NV_STATUS embeddedParamCopyIn(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmCtrlParams) +{ + NV_STATUS status = NV_OK; + NV_STATUS statusCleanUp = NV_OK; + void* pParams = pRmCtrlParams->pParams; + NvU32 paramsCnt = 1; + NvU32 i, j = 0; + + if (pRmCtrlParams->secInfo.paramLocation == PARAM_LOCATION_KERNEL) + { + return NV_OK; + } + + switch (pRmCtrlParams->cmd) + { + case NV2080_CTRL_CMD_GPU_GET_ENGINES: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_GPU_GET_ENGINES_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_GPU_GET_ENGINES_PARAMS*)pParams)->engineList, + ((NV2080_CTRL_GPU_GET_ENGINES_PARAMS*)pParams)->engineList, + ((NV2080_CTRL_GPU_GET_ENGINES_PARAMS*)pParams)->engineCount, sizeof(NvU32)); + break; + } + case NV2080_CTRL_CMD_BUS_GET_INFO: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_BUS_GET_INFO_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_BUS_GET_INFO_PARAMS*)pParams)->busInfoList, + ((NV2080_CTRL_BUS_GET_INFO_PARAMS*)pParams)->busInfoList, + ((NV2080_CTRL_BUS_GET_INFO_PARAMS*)pParams)->busInfoListSize, + sizeof(NV2080_CTRL_BUS_INFO)); + break; + } + case NV2080_CTRL_CMD_FB_GET_INFO: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_FB_GET_INFO_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_FB_GET_INFO_PARAMS*)pParams)->fbInfoList, + ((NV2080_CTRL_FB_GET_INFO_PARAMS*)pParams)->fbInfoList, + ((NV2080_CTRL_FB_GET_INFO_PARAMS*)pParams)->fbInfoListSize, + sizeof(NV2080_CTRL_FB_INFO)); + break; + } +#ifdef USE_AMAPLIB + case NV2080_CTRL_CMD_FB_GET_AMAP_CONF: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS); + + NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS *userParams = (NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS*)pParams; + NvU32 amapConfParamsSize = (userParams->pAmapConfParams != NvP64_NULL) ? + sizeof(ConfParamsV1) : 0; + NvU32 cbcSwizzleParamsSize = (userParams->pCbcSwizzleParams != NvP64_NULL) ? + sizeof(CbcSwizzleParamsV1) : 0; + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + userParams->pAmapConfParams, + userParams->pAmapConfParams, + 1, amapConfParamsSize); + RMAPI_PARAM_COPY_INIT(paramCopies[1], + userParams->pCbcSwizzleParams, + userParams->pCbcSwizzleParams, + 1, cbcSwizzleParamsSize); + + paramsCnt++; + + break; + } +#endif + case NV2080_CTRL_CMD_CE_GET_CAPS: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_CE_GET_CAPS_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_CE_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV2080_CTRL_CE_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV2080_CTRL_CE_GET_CAPS_PARAMS*)pParams)->capsTblSize, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + case NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS*)pParams)->pChannelHandleList, + ((NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS*)pParams)->pChannelHandleList, + ((NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS*)pParams)->numChannels, sizeof(NvU32)); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + + RMAPI_PARAM_COPY_INIT(paramCopies[1], + ((NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS*)pParams)->pChannelList, + ((NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS*)pParams)->pChannelList, + ((NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS*)pParams)->numChannels, sizeof(NvU32)); + paramsCnt++; + + break; + } + case NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS); + + NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS *pUserParams = (NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS*)pParams; + NvU32 featureDebugValuesSize = (pUserParams->pFeatureDebugValues != NULL) ? sizeof(NV0073_CTRL_DP_MSA_PROPERTIES_VALUES) + : 0; + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + pUserParams->pFeatureDebugValues, + pUserParams->pFeatureDebugValues, + 1, featureDebugValuesSize); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + case NV0080_CTRL_CMD_HOST_GET_CAPS: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_HOST_GET_CAPS_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0080_CTRL_HOST_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0080_CTRL_HOST_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0080_CTRL_HOST_GET_CAPS_PARAMS*)pParams)->capsTblSize, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + case NV0080_CTRL_CMD_MSENC_GET_CAPS: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_MSENC_GET_CAPS_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0080_CTRL_MSENC_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0080_CTRL_MSENC_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0080_CTRL_MSENC_GET_CAPS_PARAMS*)pParams)->capsTblSize, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + case NV2080_CTRL_CMD_BIOS_GET_INFO: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_BIOS_GET_INFO_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_BIOS_GET_INFO_PARAMS*)pParams)->biosInfoList, + ((NV2080_CTRL_BIOS_GET_INFO_PARAMS*)pParams)->biosInfoList, + ((NV2080_CTRL_BIOS_GET_INFO_PARAMS*)pParams)->biosInfoListSize, + sizeof(NV2080_CTRL_BIOS_INFO)); + break; + } + case NV0080_CTRL_CMD_GR_GET_INFO: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_GR_GET_INFO_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0080_CTRL_GR_GET_INFO_PARAMS*)pParams)->grInfoList, + ((NV0080_CTRL_GR_GET_INFO_PARAMS*)pParams)->grInfoList, + ((NV0080_CTRL_GR_GET_INFO_PARAMS*)pParams)->grInfoListSize, + sizeof(NV0080_CTRL_GR_INFO)); + break; + } + case NV0080_CTRL_CMD_FIFO_START_SELECTED_CHANNELS: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS*)pParams)->fifoStartChannelList, + ((NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS*)pParams)->fifoStartChannelList, + ((NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS*)pParams)->fifoStartChannelListSize, + sizeof(NV0080_CTRL_FIFO_CHANNEL)); + break; + } + case NV0080_CTRL_CMD_FIFO_GET_CAPS: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_FIFO_GET_CAPS_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0080_CTRL_FIFO_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0080_CTRL_FIFO_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0080_CTRL_FIFO_GET_CAPS_PARAMS*)pParams)->capsTblSize, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + case NV83DE_CTRL_CMD_DEBUG_READ_MEMORY: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS*)pParams)->buffer, + ((NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS*)pParams)->buffer, + ((NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS*)pParams)->length, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK; + break; + } + case NV83DE_CTRL_CMD_DEBUG_WRITE_MEMORY: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS*)pParams)->buffer, + ((NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS*)pParams)->buffer, + ((NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS*)pParams)->length, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK; + break; + } + case NV83DE_CTRL_CMD_DEBUG_READ_BATCH_MEMORY: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS*)pParams)->pData, + ((NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS*)pParams)->pData, + ((NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS*)pParams)->dataLength, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK; + break; + } + case NV83DE_CTRL_CMD_DEBUG_WRITE_BATCH_MEMORY: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS*)pParams)->pData, + ((NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS*)pParams)->pData, + ((NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS*)pParams)->dataLength, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK; + break; + } + case NV402C_CTRL_CMD_I2C_INDEXED: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV402C_CTRL_I2C_INDEXED_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV402C_CTRL_I2C_INDEXED_PARAMS*)pParams)->pMessage, + ((NV402C_CTRL_I2C_INDEXED_PARAMS*)pParams)->pMessage, + ((NV402C_CTRL_I2C_INDEXED_PARAMS*)pParams)->messageLength, 1); + break; + } + case NV402C_CTRL_CMD_I2C_TRANSACTION: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV402C_CTRL_I2C_TRANSACTION_PARAMS); + + if (!_i2cTransactionCopyIn(paramCopies, pRmCtrlParams)) + { + return status; + } + + break; + } + case NV2080_CTRL_CMD_GPU_EXEC_REG_OPS: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS*)pParams)->regOps, + ((NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS*)pParams)->regOps, + ((NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS*)pParams)->regOpCount, + sizeof(NV2080_CTRL_GPU_REG_OP)); + break; + } + case NV2080_CTRL_CMD_NVD_GET_DUMP: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_NVD_GET_DUMP_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_NVD_GET_DUMP_PARAMS*)pParams)->pBuffer, + ((NV2080_CTRL_NVD_GET_DUMP_PARAMS*)pParams)->pBuffer, + ((NV2080_CTRL_NVD_GET_DUMP_PARAMS*)pParams)->size, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + case NV0000_CTRL_CMD_NVD_GET_DUMP: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0000_CTRL_NVD_GET_DUMP_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0000_CTRL_NVD_GET_DUMP_PARAMS*)pParams)->pBuffer, + ((NV0000_CTRL_NVD_GET_DUMP_PARAMS*)pParams)->pBuffer, + ((NV0000_CTRL_NVD_GET_DUMP_PARAMS*)pParams)->size, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + case NV0041_CTRL_CMD_GET_SURFACE_INFO: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0041_CTRL_GET_SURFACE_INFO_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0041_CTRL_GET_SURFACE_INFO_PARAMS*)pParams)->surfaceInfoList, + ((NV0041_CTRL_GET_SURFACE_INFO_PARAMS*)pParams)->surfaceInfoList, + ((NV0041_CTRL_GET_SURFACE_INFO_PARAMS*)pParams)->surfaceInfoListSize, + sizeof(NV0041_CTRL_SURFACE_INFO)); + break; + } +#ifdef NV0000_CTRL_CMD_OS_GET_CAPS +// Not defined on all platforms + case NV0000_CTRL_CMD_OS_GET_CAPS: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0000_CTRL_OS_GET_CAPS_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0000_CTRL_OS_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0000_CTRL_OS_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0000_CTRL_OS_GET_CAPS_PARAMS*)pParams)->capsTblSize, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } +#endif + case NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS: + { + NvU32 numEntries = 0; + + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS); + + if (NvP64_VALUE(((NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS*)pParams)->busPeerIds) != NULL) + { + // The handler will check gpuCount * gpuCount against overflow + numEntries = ((NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS*)pParams)->gpuCount * + ((NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS*)pParams)->gpuCount; + } + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS*)pParams)->busPeerIds, + ((NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS*)pParams)->busPeerIds, + numEntries, sizeof(NvU32)); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + + break; + } + case NV0080_CTRL_CMD_FB_GET_CAPS: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_FB_GET_CAPS_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0080_CTRL_FB_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0080_CTRL_FB_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0080_CTRL_FB_GET_CAPS_PARAMS*)pParams)->capsTblSize, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + case NV0080_CTRL_CMD_GPU_GET_CLASSLIST: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS*)pParams)->classList, + ((NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS*)pParams)->classList, + ((NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS*)pParams)->numClasses, sizeof(NvU32)); + break; + } + case NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS*)pParams)->classList, + ((NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS*)pParams)->classList, + ((NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS*)pParams)->numClasses, sizeof(NvU32)); + break; + } + case NV0080_CTRL_CMD_GR_GET_CAPS: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_GR_GET_CAPS_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0080_CTRL_GR_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0080_CTRL_GR_GET_CAPS_PARAMS*)pParams)->capsTbl, + ((NV0080_CTRL_GR_GET_CAPS_PARAMS*)pParams)->capsTblSize, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + case NV2080_CTRL_CMD_I2C_ACCESS: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_I2C_ACCESS_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_I2C_ACCESS_PARAMS*)pParams)->data, + ((NV2080_CTRL_I2C_ACCESS_PARAMS*)pParams)->data, + ((NV2080_CTRL_I2C_ACCESS_PARAMS*)pParams)->dataBuffSize, 1); + break; + } + case NV2080_CTRL_CMD_GR_GET_INFO: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_GR_GET_INFO_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_GR_GET_INFO_PARAMS*)pParams)->grInfoList, + ((NV2080_CTRL_GR_GET_INFO_PARAMS*)pParams)->grInfoList, + ((NV2080_CTRL_GR_GET_INFO_PARAMS*)pParams)->grInfoListSize, + sizeof(NV2080_CTRL_GR_INFO)); + break; + } + case NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS*)pParams)->pEngineCtxBuff, + ((NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS*)pParams)->pEngineCtxBuff, + ((NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS*)pParams)->size, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK; + + break; + } + case NVB06F_CTRL_CMD_GET_ENGINE_CTX_DATA: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS*)pParams)->pEngineCtxBuff, + ((NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS*)pParams)->pEngineCtxBuff, + ((NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS*)pParams)->size, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK; + + break; + } + case NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS*)pParams)->bufferPtr, + ((NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS*)pParams)->bufferPtr, + ((NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS*)pParams)->bufferSize, 1); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + case NV0080_CTRL_CMD_DMA_UPDATE_PDE_2: + { + CHECK_PARAMS_OR_RETURN(pRmCtrlParams, NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS); + + RMAPI_PARAM_COPY_INIT(paramCopies[0], + ((NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS*)pParams)->pPdeBuffer, + ((NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS*)pParams)->pPdeBuffer, + 1, 0x8/*NV_MMU_VER2_PDE__SIZE*/); + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN; + paramCopies[0].flags |= RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER; + + break; + } + + default: + { + return status; + } + } + + for (i = 0; i < paramsCnt; i++) + { + status = rmapiParamsAcquire(¶mCopies[i], + (pRmCtrlParams->secInfo.paramLocation != PARAM_LOCATION_KERNEL)); + if (status != NV_OK) + break; + } + + // Preserve the original pRmCtrlParams->secInfo.paramLocation value since: + // - paramLocation should not be used beyond this point except + // - in embeddedParamCopyOut to skip copy-out in case when all parameters were in kernel space + + // If a single rmapiParamsAcquire() fails release all previous paramCopies + if (status != NV_OK) + { + for (j = 0; j <= i; j++) + { + paramCopies[j].flags |= RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT; + statusCleanUp = rmapiParamsRelease(¶mCopies[j]); + } + } + + return (status == NV_OK) ? statusCleanUp : status; +} + +NV_STATUS embeddedParamCopyOut(RMAPI_PARAM_COPY *paramCopies, RmCtrlParams *pRmCtrlParams) +{ + NV_STATUS status = NV_OK; + void* pParams = pRmCtrlParams->pParams; + + if (pRmCtrlParams->secInfo.paramLocation == PARAM_LOCATION_KERNEL) + { + return NV_OK; + } + + switch (pRmCtrlParams->cmd) + { + case NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_GPU_GET_NVENC_SW_SESSION_INFO_PARAMS*)pParams)->sessionInfoTbl = paramCopies[0].pUserParams; + break; + } + case NV2080_CTRL_CMD_GPU_GET_ENGINES: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_GPU_GET_ENGINES_PARAMS*)pParams)->engineList = paramCopies[0].pUserParams; + break; + } + case NV2080_CTRL_CMD_BUS_GET_INFO: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_BUS_GET_INFO_PARAMS*)pParams)->busInfoList = paramCopies[0].pUserParams; + break; + } + case NV2080_CTRL_CMD_FB_GET_INFO: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_FB_GET_INFO_PARAMS*)pParams)->fbInfoList = paramCopies[0].pUserParams; + break; + } +#ifdef USE_AMAPLIB + case NV2080_CTRL_CMD_FB_GET_AMAP_CONF: + { + NV_STATUS status2; + NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS *pParamsUser = + (NV2080_CTRL_CMD_FB_GET_AMAP_CONF_PARAMS *) pParams; + + status = rmapiParamsRelease(¶mCopies[0]); + status2 = rmapiParamsRelease(¶mCopies[1]); + pParamsUser->pAmapConfParams = paramCopies[0].pUserParams; + pParamsUser->pCbcSwizzleParams = paramCopies[1].pUserParams; + + status = (status == NV_OK) ? status2 : status; + + break; + } +#endif + case NV2080_CTRL_CMD_CE_GET_CAPS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_CE_GET_CAPS_PARAMS*)pParams)->capsTbl = paramCopies[0].pUserParams; + break; + } + case NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST: + { + NV_STATUS handleListParamStatus = rmapiParamsRelease(¶mCopies[0]); + ((NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS*)pParams)->pChannelHandleList = paramCopies[0].pUserParams; + + status = rmapiParamsRelease(¶mCopies[1]); + ((NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS*)pParams)->pChannelList = paramCopies[1].pUserParams; + + if (handleListParamStatus != NV_OK) + status = handleListParamStatus; + + break; + } + case NV83DE_CTRL_CMD_DEBUG_READ_MEMORY: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV83DE_CTRL_DEBUG_READ_MEMORY_PARAMS*)pRmCtrlParams->pParams)->buffer = paramCopies[0].pUserParams; + break; + } + case NV83DE_CTRL_CMD_DEBUG_WRITE_MEMORY: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV83DE_CTRL_DEBUG_WRITE_MEMORY_PARAMS*)pRmCtrlParams->pParams)->buffer = paramCopies[0].pUserParams; + break; + } + case NV83DE_CTRL_CMD_DEBUG_READ_BATCH_MEMORY: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS*)pRmCtrlParams->pParams)->pData = paramCopies[0].pUserParams; + break; + } + case NV83DE_CTRL_CMD_DEBUG_WRITE_BATCH_MEMORY: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV83DE_CTRL_DEBUG_ACCESS_MEMORY_PARAMS*)pRmCtrlParams->pParams)->pData = paramCopies[0].pUserParams; + break; + } + case NV0080_CTRL_CMD_HOST_GET_CAPS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0080_CTRL_HOST_GET_CAPS_PARAMS*)pParams)->capsTbl = paramCopies[0].pUserParams; + break; + } + case NV0080_CTRL_CMD_MSENC_GET_CAPS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0080_CTRL_MSENC_GET_CAPS_PARAMS*)pParams)->capsTbl = paramCopies[0].pUserParams; + break; + } + case NV2080_CTRL_CMD_BIOS_GET_INFO: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_BIOS_GET_INFO_PARAMS*)pParams)->biosInfoList = paramCopies[0].pUserParams; + break; + } + case NV0080_CTRL_CMD_GR_GET_INFO: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0080_CTRL_GR_GET_INFO_PARAMS*)pParams)->grInfoList = paramCopies[0].pUserParams; + break; + } + case NV0080_CTRL_CMD_FIFO_START_SELECTED_CHANNELS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0080_CTRL_FIFO_START_SELECTED_CHANNELS_PARAMS*)pParams)->fifoStartChannelList = paramCopies[0].pUserParams; + break; + } + case NV0080_CTRL_CMD_FIFO_GET_CAPS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0080_CTRL_FIFO_GET_CAPS_PARAMS*)pParams)->capsTbl = paramCopies[0].pUserParams; + break; + } + case NV402C_CTRL_CMD_I2C_INDEXED: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV402C_CTRL_I2C_INDEXED_PARAMS*)pParams)->pMessage = paramCopies[0].pUserParams; + break; + } + case NV402C_CTRL_CMD_I2C_TRANSACTION: + { + return i2cTransactionCopyOut(paramCopies, pRmCtrlParams); + } + case NV2080_CTRL_CMD_GPU_EXEC_REG_OPS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_GPU_EXEC_REG_OPS_PARAMS*)pParams)->regOps = paramCopies[0].pUserParams; + break; + } + case NV2080_CTRL_CMD_NVD_GET_DUMP: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_NVD_GET_DUMP_PARAMS*)pParams)->pBuffer = paramCopies[0].pUserParams; + break; + } + case NV0000_CTRL_CMD_NVD_GET_DUMP: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0000_CTRL_NVD_GET_DUMP_PARAMS*)pParams)->pBuffer = paramCopies[0].pUserParams; + break; + } + case NV0041_CTRL_CMD_GET_SURFACE_INFO: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0041_CTRL_GET_SURFACE_INFO_PARAMS*)pParams)->surfaceInfoList = paramCopies[0].pUserParams; + break; + } +#ifdef NV0000_CTRL_CMD_OS_GET_CAPS + // Not defined on all platforms + case NV0000_CTRL_CMD_OS_GET_CAPS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0000_CTRL_OS_GET_CAPS_PARAMS*)pParams)->capsTbl = paramCopies[0].pUserParams; + break; + } +#endif + case NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0000_CTRL_SYSTEM_GET_P2P_CAPS_PARAMS*)pParams)->busPeerIds = paramCopies[0].pUserParams; + break; + } + case NV0080_CTRL_CMD_FB_GET_CAPS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0080_CTRL_FB_GET_CAPS_PARAMS*)pParams)->capsTbl = paramCopies[0].pUserParams; + break; + } + case NV0080_CTRL_CMD_GPU_GET_CLASSLIST: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS*)pParams)->classList = paramCopies[0].pUserParams; + break; + } + case NV2080_CTRL_CMD_GPU_GET_ENGINE_CLASSLIST: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_GPU_GET_ENGINE_CLASSLIST_PARAMS*)pParams)->classList = paramCopies[0].pUserParams; + break; + } + case NV0080_CTRL_CMD_GR_GET_CAPS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0080_CTRL_GR_GET_CAPS_PARAMS*)pParams)->capsTbl = paramCopies[0].pUserParams; + break; + } + case NV2080_CTRL_CMD_I2C_ACCESS: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_I2C_ACCESS_PARAMS*)pParams)->data = paramCopies[0].pUserParams; + break; + } + case NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0073_CTRL_CMD_DP_SET_MSA_PROPERTIES_PARAMS*)pParams)->pFeatureDebugValues = NvP64_VALUE(paramCopies[0].pUserParams); + break; + } + case NV2080_CTRL_CMD_GR_GET_INFO: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_GR_GET_INFO_PARAMS*)pParams)->grInfoList = paramCopies[0].pUserParams; + break; + } + case NVB06F_CTRL_CMD_MIGRATE_ENGINE_CTX_DATA: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NVB06F_CTRL_MIGRATE_ENGINE_CTX_DATA_PARAMS*)pParams)->pEngineCtxBuff = paramCopies[0].pUserParams; + break; + } + case NVB06F_CTRL_CMD_GET_ENGINE_CTX_DATA: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NVB06F_CTRL_GET_ENGINE_CTX_DATA_PARAMS*)pParams)->pEngineCtxBuff = paramCopies[0].pUserParams; + break; + } + case NV2080_CTRL_CMD_RC_READ_VIRTUAL_MEM: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV2080_CTRL_RC_READ_VIRTUAL_MEM_PARAMS*)pRmCtrlParams->pParams)->bufferPtr = paramCopies[0].pUserParams; + break; + } + case NV0080_CTRL_CMD_DMA_UPDATE_PDE_2: + { + status = rmapiParamsRelease(¶mCopies[0]); + ((NV0080_CTRL_DMA_UPDATE_PDE_2_PARAMS*)pParams)->pPdeBuffer = paramCopies[0].pUserParams; + + break; + } + + default: + { + return NV_OK; + } + } + + return status; +} diff --git a/src/nvidia/src/kernel/rmapi/entry_points.c b/src/nvidia/src/kernel/rmapi/entry_points.c new file mode 100644 index 000000000..fcb7c61d0 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/entry_points.c @@ -0,0 +1,615 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi.h" +#include "entry_points.h" +#include "deprecated_context.h" +#include "os/os.h" + +#define RMAPI_DEPRECATED(pFunc, pArgs, bUserModeArgs) \ + do \ + { \ + DEPRECATED_CONTEXT_EXT context; \ + rmapiInitDeprecatedContext(&context, NULL, bUserModeArgs, NV_FALSE); \ + pFunc(&context.parent, pArgs); \ + } while (0) + +#define RMAPI_NOT_SUPPORTED(pArgs) \ + pArgs->status = NV_ERR_NOT_SUPPORTED; + +// Primary APIs +static void _nv04Alloc(NVOS21_PARAMETERS*, NvBool); +static void _nv01Free(NVOS00_PARAMETERS*, NvBool); +static void _nv04Control(NVOS54_PARAMETERS*, NvBool, NvBool); +static void _nv04DupObject(NVOS55_PARAMETERS*, NvBool); +static void _nv04Share(NVOS57_PARAMETERS*, NvBool); +static void _nv04MapMemory(NVOS33_PARAMETERS*, NvBool, NvBool); +static void _nv04UnmapMemory(NVOS34_PARAMETERS*, NvBool, NvBool); +static void _nv04MapMemoryDma(NVOS46_PARAMETERS*, NvBool); +static void _nv04UnmapMemoryDma(NVOS47_PARAMETERS*, NvBool); + +// Legacy APIs +static void _nv01AllocMemory(NVOS02_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAllocMemory, pArgs, bUserModeArgs); } +static void _nv01AllocObject(NVOS05_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAllocObject, pArgs, bUserModeArgs); } +static void _nv04AddVblankCallback(NVOS61_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAddVblankCallback, pArgs, bUserModeArgs); } +static void _nv04AllocContextDma(NVOS39_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedAllocContextDma, pArgs, bUserModeArgs); } +static void _nv04BindContextDma(NVOS49_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedBindContextDma, pArgs, bUserModeArgs); } +static void _nv04GetMemoryInfo(NVOS58_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_NOT_SUPPORTED(pArgs); } +static void _nv04I2CAccess(NVOS_I2C_ACCESS_PARAMS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedI2CAccess, pArgs, bUserModeArgs); } +static void _nv04IdleChannels(NVOS30_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedIdleChannels, pArgs, bUserModeArgs); } +static void _nv04MapMemoryDmaOffset(NVOS59_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_NOT_SUPPORTED(pArgs); } +static void _nv04UnmapMemoryDmaOffset(NVOS60_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_NOT_SUPPORTED(pArgs); } +static void _nv04UpdateContextDma(NVOS37_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_NOT_SUPPORTED(pArgs); } +static void _nv04VidHeapControl(NVOS32_PARAMETERS *pArgs, NvBool bUserModeArgs) { RMAPI_DEPRECATED(RmDeprecatedVidHeapControl, pArgs, bUserModeArgs); } + +static void _nv04AllocWithSecInfo(NVOS21_PARAMETERS*, API_SECURITY_INFO); +static void _nv04AllocWithAccessSecInfo(NVOS64_PARAMETERS*, API_SECURITY_INFO); +static void _nv04ControlWithSecInfo(NVOS54_PARAMETERS*, API_SECURITY_INFO, NvBool bInternalCall); +static void _nv01FreeWithSecInfo(NVOS00_PARAMETERS*, API_SECURITY_INFO); +static void _nv04AllocWithAccess(NVOS64_PARAMETERS*, NvBool); +static void _nv04MapMemoryWithSecInfo(NVOS33_PARAMETERS*, API_SECURITY_INFO); +static void _nv04UnmapMemoryWithSecInfo(NVOS34_PARAMETERS*, API_SECURITY_INFO); +static void _nv04MapMemoryDmaWithSecInfo(NVOS46_PARAMETERS*, API_SECURITY_INFO); +static void _nv04UnmapMemoryDmaWithSecInfo(NVOS47_PARAMETERS*, API_SECURITY_INFO); +static void _nv04DupObjectWithSecInfo(NVOS55_PARAMETERS*, API_SECURITY_INFO); +static void _nv04ShareWithSecInfo(NVOS57_PARAMETERS*, API_SECURITY_INFO); + + +// +// RM API entry points +// +// User mode clients should call base version (no suffix). +// +// Kernel mode clients should call Kernel or User version +// (call User if the parameters come from a user mode source). +// + +void Nv01AllocMemory(NVOS02_PARAMETERS *pArgs) { _nv01AllocMemory(pArgs, NV_TRUE); } +void Nv01AllocObject(NVOS05_PARAMETERS *pArgs) { _nv01AllocObject(pArgs, NV_TRUE); } +void Nv01Free(NVOS00_PARAMETERS *pArgs) { _nv01Free(pArgs, NV_TRUE); } +void Nv04AddVblankCallback(NVOS61_PARAMETERS *pArgs) { _nv04AddVblankCallback(pArgs, NV_TRUE); } +void Nv04Alloc(NVOS21_PARAMETERS *pArgs) { _nv04Alloc(pArgs, NV_TRUE); } +void Nv04AllocWithAccess(NVOS64_PARAMETERS *pArgs) { _nv04AllocWithAccess(pArgs, NV_TRUE); } +void Nv04AllocContextDma(NVOS39_PARAMETERS *pArgs) { _nv04AllocContextDma(pArgs, NV_TRUE); } +void Nv04BindContextDma(NVOS49_PARAMETERS *pArgs) { _nv04BindContextDma(pArgs, NV_TRUE); } +void Nv04Control(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_TRUE, NV_FALSE); } +void Nv04DupObject(NVOS55_PARAMETERS *pArgs) { _nv04DupObject(pArgs, NV_TRUE); } +void Nv04Share(NVOS57_PARAMETERS *pArgs) { _nv04Share(pArgs, NV_TRUE); } +void Nv04GetMemoryInfo(NVOS58_PARAMETERS *pArgs) { _nv04GetMemoryInfo(pArgs, NV_TRUE); } +void Nv04I2CAccess(NVOS_I2C_ACCESS_PARAMS *pArgs) { _nv04I2CAccess(pArgs, NV_TRUE); } +void Nv04IdleChannels(NVOS30_PARAMETERS *pArgs) { _nv04IdleChannels(pArgs, NV_TRUE); } +void Nv04MapMemory(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04MapMemoryDma(NVOS46_PARAMETERS *pArgs) { _nv04MapMemoryDma(pArgs, NV_TRUE); } +void Nv04MapMemoryDmaOffset (NVOS59_PARAMETERS *pArgs) { _nv04MapMemoryDmaOffset(pArgs, NV_TRUE); } +void Nv04UnmapMemory(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04UnmapMemoryDma(NVOS47_PARAMETERS *pArgs) { _nv04UnmapMemoryDma(pArgs, NV_TRUE); } +void Nv04UnmapMemoryDmaOffset(NVOS60_PARAMETERS *pArgs) { _nv04UnmapMemoryDmaOffset(pArgs, NV_TRUE); } +void Nv04UpdateContextDma(NVOS37_PARAMETERS *pArgs) { _nv04UpdateContextDma(pArgs, NV_TRUE); } +void Nv04VidHeapControl(NVOS32_PARAMETERS *pArgs) { _nv04VidHeapControl(pArgs, NV_TRUE); } + +void Nv01AllocMemoryUser(NVOS02_PARAMETERS *pArgs) { _nv01AllocMemory(pArgs, NV_TRUE); } +void Nv01AllocObjectUser(NVOS05_PARAMETERS *pArgs) { _nv01AllocObject(pArgs, NV_TRUE); } +void Nv01FreeUser(NVOS00_PARAMETERS *pArgs) { _nv01Free(pArgs, NV_TRUE); } +void Nv04AddVblankCallbackUser(NVOS61_PARAMETERS *pArgs) { _nv04AddVblankCallback(pArgs, NV_TRUE); } +void Nv04AllocUser(NVOS21_PARAMETERS *pArgs) { _nv04Alloc(pArgs, NV_TRUE); } +void Nv04AllocWithAccessUser(NVOS64_PARAMETERS *pArgs) { _nv04AllocWithAccess(pArgs, NV_TRUE); } +void Nv04AllocContextDmaUser(NVOS39_PARAMETERS *pArgs) { _nv04AllocContextDma(pArgs, NV_TRUE); } +void Nv04BindContextDmaUser(NVOS49_PARAMETERS *pArgs) { _nv04BindContextDma(pArgs, NV_TRUE); } +void Nv04ControlUser(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_TRUE, NV_FALSE); } +void Nv04DupObjectUser(NVOS55_PARAMETERS *pArgs) { _nv04DupObject(pArgs, NV_TRUE); } +void Nv04ShareUser(NVOS57_PARAMETERS *pArgs) { _nv04Share(pArgs, NV_TRUE); } +void Nv04GetMemoryInfoUser(NVOS58_PARAMETERS *pArgs) { _nv04GetMemoryInfo(pArgs, NV_TRUE); } +void Nv04I2CAccessUser(NVOS_I2C_ACCESS_PARAMS *pArgs) { _nv04I2CAccess(pArgs, NV_TRUE); } +void Nv04IdleChannelsUser(NVOS30_PARAMETERS *pArgs) { _nv04IdleChannels(pArgs, NV_TRUE); } +void Nv04MapMemoryUser(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04MapMemoryDmaUser(NVOS46_PARAMETERS *pArgs) { _nv04MapMemoryDma(pArgs, NV_TRUE); } +void Nv04MapMemoryDmaOffsetUser(NVOS59_PARAMETERS *pArgs) { _nv04MapMemoryDmaOffset(pArgs, NV_TRUE); } +void Nv04UnmapMemoryUser(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_TRUE, NV_FALSE); } +void Nv04UnmapMemoryDmaUser(NVOS47_PARAMETERS *pArgs) { _nv04UnmapMemoryDma(pArgs, NV_TRUE); } +void Nv04UnmapMemoryDmaOffsetUser(NVOS60_PARAMETERS *pArgs) { _nv04UnmapMemoryDmaOffset(pArgs, NV_TRUE); } +void Nv04UpdateContextDmaUser(NVOS37_PARAMETERS *pArgs) { _nv04UpdateContextDma(pArgs, NV_TRUE); } +void Nv04VidHeapControlUser(NVOS32_PARAMETERS *pArgs) { _nv04VidHeapControl(pArgs, NV_TRUE); } + +void Nv01AllocMemoryKernel(NVOS02_PARAMETERS *pArgs) { _nv01AllocMemory(pArgs, NV_FALSE); } +void Nv01AllocObjectKernel(NVOS05_PARAMETERS *pArgs) { _nv01AllocObject(pArgs, NV_FALSE); } +void Nv01FreeKernel(NVOS00_PARAMETERS *pArgs) { _nv01Free(pArgs, NV_FALSE); } +void Nv04AddVblankCallbackKernel(NVOS61_PARAMETERS *pArgs) { _nv04AddVblankCallback(pArgs, NV_FALSE); } +void Nv04AllocKernel(NVOS21_PARAMETERS *pArgs) { _nv04Alloc(pArgs, NV_FALSE); } +void Nv04AllocWithAccessKernel(NVOS64_PARAMETERS *pArgs) { _nv04AllocWithAccess(pArgs, NV_FALSE); } +void Nv04AllocContextDmaKernel(NVOS39_PARAMETERS *pArgs) { _nv04AllocContextDma(pArgs, NV_FALSE); } +void Nv04BindContextDmaKernel(NVOS49_PARAMETERS *pArgs) { _nv04BindContextDma(pArgs, NV_FALSE); } +void Nv04ControlKernel(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_FALSE, NV_FALSE); } +void Nv04DupObjectKernel(NVOS55_PARAMETERS *pArgs) { _nv04DupObject(pArgs, NV_FALSE); } +void Nv04ShareKernel(NVOS57_PARAMETERS *pArgs) { _nv04Share(pArgs, NV_FALSE); } +void Nv04GetMemoryInfoKernel(NVOS58_PARAMETERS *pArgs) { _nv04GetMemoryInfo(pArgs, NV_FALSE); } +void Nv04I2CAccessKernel(NVOS_I2C_ACCESS_PARAMS *pArgs) { _nv04I2CAccess(pArgs, NV_FALSE); } +void Nv04IdleChannelsKernel(NVOS30_PARAMETERS *pArgs) { _nv04IdleChannels(pArgs, NV_FALSE); } +void Nv04MapMemoryKernel(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_FALSE, NV_FALSE); } +void Nv04MapMemoryDmaKernel(NVOS46_PARAMETERS *pArgs) { _nv04MapMemoryDma(pArgs, NV_FALSE); } +void Nv04MapMemoryDmaOffsetKernel(NVOS59_PARAMETERS *pArgs) { _nv04MapMemoryDmaOffset(pArgs, NV_FALSE); } +void Nv04UnmapMemoryKernel(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_FALSE, NV_FALSE); } +void Nv04UnmapMemoryDmaKernel(NVOS47_PARAMETERS *pArgs) { _nv04UnmapMemoryDma(pArgs, NV_FALSE); } +void Nv04UnmapMemoryDmaOffsetKernel(NVOS60_PARAMETERS *pArgs) { _nv04UnmapMemoryDmaOffset(pArgs, NV_FALSE); } +void Nv04UpdateContextDmaKernel(NVOS37_PARAMETERS *pArgs) { _nv04UpdateContextDma(pArgs, NV_FALSE); } +void Nv04VidHeapControlKernel(NVOS32_PARAMETERS *pArgs) { _nv04VidHeapControl(pArgs, NV_FALSE); } + +// MODS-specific API functions which ignore RM locking model +#if defined(LINUX_MFG) +void Nv04ControlInternal(NVOS54_PARAMETERS *pArgs) { _nv04Control(pArgs, NV_FALSE, NV_TRUE); } +void Nv04MapMemoryInternal(NVOS33_PARAMETERS *pArgs) { _nv04MapMemory(pArgs, NV_FALSE, NV_TRUE); } +void Nv04UnmapMemoryInternal(NVOS34_PARAMETERS *pArgs) { _nv04UnmapMemory(pArgs, NV_FALSE, NV_TRUE); } +#endif + +#define RMAPI_DEPRECATED_WITH_SECINFO(pFunc, pArgs, secInfo) \ + do \ + { \ + DEPRECATED_CONTEXT_EXT context; \ + rmapiInitDeprecatedContext(&context, &(secInfo), \ + (secInfo).paramLocation != PARAM_LOCATION_KERNEL, \ + NV_FALSE); \ + (pFunc)(&context.parent, (pArgs)); \ + } while (0) + +void Nv01AllocMemoryWithSecInfo(NVOS02_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedAllocMemory, pArgs, secInfo); } +void Nv01AllocObjectWithSecInfo(NVOS05_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedAllocObject, pArgs, secInfo); } +void Nv04AllocWithSecInfo(NVOS21_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04AllocWithSecInfo(pArgs, secInfo); } +void Nv04AllocWithAccessSecInfo(NVOS64_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04AllocWithAccessSecInfo(pArgs, secInfo); } +void Nv01FreeWithSecInfo(NVOS00_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv01FreeWithSecInfo(pArgs, secInfo); } +void Nv04ControlWithSecInfo(NVOS54_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04ControlWithSecInfo(pArgs, secInfo, NV_FALSE); } +void Nv04VidHeapControlWithSecInfo(NVOS32_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedVidHeapControl, pArgs, secInfo); } +void Nv04IdleChannelsWithSecInfo(NVOS30_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedIdleChannels, pArgs, secInfo); } +void Nv04MapMemoryWithSecInfo(NVOS33_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04MapMemoryWithSecInfo(pArgs, secInfo); } +void Nv04UnmapMemoryWithSecInfo(NVOS34_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04UnmapMemoryWithSecInfo(pArgs, secInfo); } +void Nv04I2CAccessWithSecInfo(NVOS_I2C_ACCESS_PARAMS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedI2CAccess, pArgs, secInfo); } +void Nv04AllocContextDmaWithSecInfo(NVOS39_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedAllocContextDma, pArgs, secInfo); } +void Nv04BindContextDmaWithSecInfo(NVOS49_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { RMAPI_DEPRECATED_WITH_SECINFO(RmDeprecatedBindContextDma, pArgs, secInfo); } +void Nv04MapMemoryDmaWithSecInfo(NVOS46_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04MapMemoryDmaWithSecInfo(pArgs, secInfo); } +void Nv04UnmapMemoryDmaWithSecInfo(NVOS47_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04UnmapMemoryDmaWithSecInfo(pArgs, secInfo); } +void Nv04DupObjectWithSecInfo(NVOS55_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04DupObjectWithSecInfo(pArgs, secInfo); } +void Nv04ShareWithSecInfo(NVOS57_PARAMETERS *pArgs, API_SECURITY_INFO secInfo) { _nv04ShareWithSecInfo(pArgs, secInfo); } + + +static void +XlateUserModeArgsToSecInfo +( + NvBool bUserModeArgs, + NvBool bInternalCall, + API_SECURITY_INFO *pSecInfo +) +{ + portMemSet(pSecInfo, 0, sizeof(*pSecInfo)); + + if (bInternalCall == NV_FALSE && bUserModeArgs == NV_TRUE) + { + pSecInfo->privLevel = osIsAdministrator() ? RS_PRIV_LEVEL_USER_ROOT : RS_PRIV_LEVEL_USER; + pSecInfo->paramLocation = PARAM_LOCATION_USER; + } + else + { + pSecInfo->privLevel = RS_PRIV_LEVEL_KERNEL; + pSecInfo->paramLocation = PARAM_LOCATION_KERNEL; + } +} + +/* +NV04_ALLOC + NVOS21_PARAMETERS; + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms; + NvV32 status; +*/ + +static void _nv04Alloc +( + NVOS21_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, RMAPI_ALLOC_FLAGS_NONE, + NvP64_NULL, &secInfo); +} // end of Nv04Alloc() + +/* +NV04_ALLOC + NVOS21_PARAMETERS; + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms; + NvV32 status; +*/ + +static void _nv04AllocWithSecInfo +( + NVOS21_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, RMAPI_ALLOC_FLAGS_NONE, + NvP64_NULL, &secInfo); +} // end of _nv04AllocWithSecInfo() + +/* +NV04_ALLOC_WITH_ACCESS + NVOS64_PARAMETERS; + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectNew; + NvV32 hClass; + NvP64 pAllocParms; + NvP64 pRightsRequested; + NvV32 status; +*/ + +static void _nv04AllocWithAccess +( + NVOS64_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, RMAPI_ALLOC_FLAGS_NONE, + pArgs->pRightsRequested, &secInfo); +} // end of _nv04AllocWithAccess() + +static void _nv04AllocWithAccessSecInfo +( + NVOS64_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->AllocWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectParent, &pArgs->hObjectNew, + pArgs->hClass, pArgs->pAllocParms, RMAPI_ALLOC_FLAGS_NONE, + pArgs->pRightsRequested, &secInfo); +} // end of _nv04AllocWithAccessSecInfo() + +/* +NV01_FREE + NVOS00_PARAMETERS: + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +*/ + +static void _nv01Free +( + NVOS00_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->FreeWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectOld, RMAPI_FREE_FLAGS_NONE, &secInfo); +} // end of Nv01Free() + +/* +NV01_FREE + NVOS00_PARAMETERS: + NvHandle hRoot; + NvHandle hObjectParent; + NvHandle hObjectOld; + NvV32 status; +*/ + +static void _nv01FreeWithSecInfo +( + NVOS00_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->FreeWithSecInfo(pRmApi, pArgs->hRoot, pArgs->hObjectOld, RMAPI_FREE_FLAGS_NONE, &secInfo); +} // end of Nv01FreeWithSecInfo() + +/* +NV04_MAP_MEMORY + NVOS33_PARAMETERS: + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvU64 offset; + NvU64 length; + NvP64 pLinearAddress; + NvU32 status; + NvU32 flags; +*/ +static void _nv04MapMemory +( + NVOS33_PARAMETERS *pArgs, + NvBool bUserModeArgs, + NvBool bInternalCall +) +{ + RM_API *pRmApi = rmapiGetInterface(bInternalCall ? RMAPI_MODS_LOCK_BYPASS : RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->MapToCpuWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, pArgs->offset, + pArgs->length, &pArgs->pLinearAddress, pArgs->flags, &secInfo); +} // end of Nv04MapMemory() + +static void _nv04MapMemoryWithSecInfo +( + NVOS33_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->MapToCpuWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, pArgs->offset, + pArgs->length, &pArgs->pLinearAddress, pArgs->flags, &secInfo); +} + +/* +NV04_UNMAP_MEMORY + NVOS34_PARAMETERS: + NvHandle hClient; + NvHandle hDevice; + NvHandle hMemory; + NvP64 pLinearAddress; + NvU32 status; + NvU32 flags; +*/ +static void _nv04UnmapMemory +( + NVOS34_PARAMETERS *pArgs, + NvBool bUserModeArgs, + NvBool bInternalCall +) +{ + RM_API *pRmApi = rmapiGetInterface(bInternalCall ? RMAPI_MODS_LOCK_BYPASS : RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->UnmapFromCpuWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, + pArgs->pLinearAddress, pArgs->flags, osGetCurrentProcess(), &secInfo); +} // end of Nv04UnmapMemory() + +static void _nv04UnmapMemoryWithSecInfo +( + NVOS34_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->UnmapFromCpuWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hMemory, + pArgs->pLinearAddress, pArgs->flags, osGetCurrentProcess(), &secInfo); +} + +static void _nv04MapMemoryDma +( + NVOS46_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->MapWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hDma, + pArgs->hMemory, pArgs->offset, pArgs->length, pArgs->flags, + &pArgs->dmaOffset, &secInfo); +} // end of Nv04MapMemoryDma() + +static void _nv04MapMemoryDmaWithSecInfo +( + NVOS46_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->MapWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hDma, + pArgs->hMemory, pArgs->offset, pArgs->length, pArgs->flags, + &pArgs->dmaOffset, &secInfo); +} + +/* +NV04_UNMAP_MEMORY_DMA + NVOS47_PARAMETERS: + NvHandle hClient; + NvHandle hDevice; + NvHandle hDma; + NvHandle hMemory; + NvV32 flags; + NvU64 dmaOffset; + NvV32 status; +*/ +static void _nv04UnmapMemoryDma +( + NVOS47_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->UnmapWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hDma, + pArgs->hMemory, pArgs->flags, pArgs->dmaOffset, &secInfo); +} // end of Nv04UnmapMemoryDma() + +static void _nv04UnmapMemoryDmaWithSecInfo +( + NVOS47_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->UnmapWithSecInfo(pRmApi, pArgs->hClient, pArgs->hDevice, pArgs->hDma, + pArgs->hMemory, pArgs->flags, pArgs->dmaOffset, &secInfo); +} + +/* +NV04_CONTROL + NVOS54_PARAMETERS: + NvHandle hClient; + NvHandle hObject; + NvV32 cmd; + NvP64 params; + NvU32 paramsSize; + NvV32 status; +*/ +static void _nv04ControlWithSecInfo +( + NVOS54_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo, + NvBool bInternalCall +) +{ + RmDeprecatedControlHandler pDeprecatedHandler = RmDeprecatedGetControlHandler(pArgs); + if (pDeprecatedHandler != NULL) + { + DEPRECATED_CONTEXT_EXT ctxGraveyard; + rmapiInitDeprecatedContext(&ctxGraveyard, &secInfo, NV_FALSE, NV_FALSE); + pArgs->status = pDeprecatedHandler(&secInfo, &ctxGraveyard.parent, pArgs); + } + else + { + RM_API *pRmApi = rmapiGetInterface(bInternalCall ? RMAPI_MODS_LOCK_BYPASS : RMAPI_EXTERNAL); + + pArgs->status = pRmApi->ControlWithSecInfo(pRmApi, pArgs->hClient, pArgs->hObject, pArgs->cmd, + pArgs->params, pArgs->paramsSize, pArgs->flags, &secInfo); + } +} // end of Nv04Control() + +/* +NV04_CONTROL + NVOS54_PARAMETERS: + NvHandle hClient; + NvHandle hObject; + NvV32 cmd; + NvP64 params; + NvU32 paramsSize; + NvV32 status; +*/ +static void _nv04Control +( + NVOS54_PARAMETERS *pArgs, + NvBool bUserModeArgs, + NvBool bInternalCall +) +{ + API_SECURITY_INFO secInfo = {0}; + XlateUserModeArgsToSecInfo(bUserModeArgs, bInternalCall, &secInfo); + _nv04ControlWithSecInfo(pArgs, secInfo, bInternalCall); +} // end of Nv04Control() + +/* +NV04_DUP_OBJECT + NVOS55_PARAMETERS: + NvHandle hClient; + NvHandle hParent; + NvHandle hObject; + NvHandle hClientSrc; + NvHandle hObjectSrc; + NvU32 flags; + NvU32 status; +*/ +static void _nv04DupObject +( + NVOS55_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->DupObjectWithSecInfo(pRmApi, pArgs->hClient, pArgs->hParent, &pArgs->hObject, + pArgs->hClientSrc, pArgs->hObjectSrc, pArgs->flags, &secInfo); +} // end of Nv04DupObject() + +static void _nv04DupObjectWithSecInfo +( + NVOS55_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->DupObjectWithSecInfo(pRmApi, pArgs->hClient, pArgs->hParent, &pArgs->hObject, + pArgs->hClientSrc, pArgs->hObjectSrc, pArgs->flags, &secInfo); +} + +static void _nv04Share +( + NVOS57_PARAMETERS *pArgs, + NvBool bUserModeArgs +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + API_SECURITY_INFO secInfo; + + XlateUserModeArgsToSecInfo(bUserModeArgs, NV_FALSE, &secInfo); + + pArgs->status = pRmApi->ShareWithSecInfo(pRmApi, pArgs->hClient, pArgs->hObject, + &pArgs->sharePolicy, &secInfo); +} // end of Nv04Share() + +static void _nv04ShareWithSecInfo +( + NVOS57_PARAMETERS *pArgs, + API_SECURITY_INFO secInfo +) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL); + + pArgs->status = pRmApi->ShareWithSecInfo(pRmApi, pArgs->hClient, pArgs->hObject, + &pArgs->sharePolicy, &secInfo); +} diff --git a/src/nvidia/src/kernel/rmapi/entry_points.h b/src/nvidia/src/kernel/rmapi/entry_points.h new file mode 100644 index 000000000..aa3d9cbd4 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/entry_points.h @@ -0,0 +1,401 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _ENTRYPOINTS_H_ +#define _ENTRYPOINTS_H_ + +// +// Internal handlers for RM APIs +// + +NV_STATUS +rmapiAlloc +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + void *pAllocParams +); + +NV_STATUS +rmapiAllocWithHandle +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 hClass, + void *pAllocParams +); + +NV_STATUS +rmapiAllocWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 flags, + NvP64 pRightsRequired, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiAllocWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvU32 hClass, + NvP64 pAllocParams, + NvU32 flags, + NvP64 pRightsRequired, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiFree +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject +); + +NV_STATUS +rmapiFreeWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiFreeWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiFreeClientList +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients +); + +NV_STATUS +rmapiFreeClientListWithSecInfo +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiFreeClientListWithSecInfoTls +( + RM_API *pRmApi, + NvHandle *phClientList, + NvU32 numClients, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiControl +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void *pParams, + NvU32 paramsSize +); + +NV_STATUS +rmapiControlWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiControlWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + NvP64 pParams, + NvU32 paramsSize, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiDupObject +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags +); + +NV_STATUS +rmapiDupObjectWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiDupObjectWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiShare +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy +); + +NV_STATUS +rmapiShareWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiShareWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMapToCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + void **ppCpuVirtAddr, + NvU32 flags +); + +NV_STATUS +rmapiMapToCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMapToCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmapFromCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + void *pLinearAddress, + NvU32 flags, + NvU32 ProcessId +); + +NV_STATUS +rmapiUnmapFromCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmapFromCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiMap +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset +); + +NV_STATUS +rmapiMapWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset, + API_SECURITY_INFO *pSecInfo +); + + +NV_STATUS +rmapiMapWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmap +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset +); + +NV_STATUS +rmapiUnmapWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset, + API_SECURITY_INFO *pSecInfo +); + +NV_STATUS +rmapiUnmapWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset, + API_SECURITY_INFO *pSecInfo +); + +#endif // _ENTRYPOINTS_H_ + diff --git a/src/nvidia/src/kernel/rmapi/event.c b/src/nvidia/src/kernel/rmapi/event.c new file mode 100644 index 000000000..95b347d94 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/event.c @@ -0,0 +1,633 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "os/os.h" +#include "rmapi/event.h" +#include "rmapi/resource_fwd_decls.h" +#include "vgpu/rpc.h" +#include "gpu/device/device.h" +#include "core/locks.h" +#include "rmapi/rs_utils.h" + +#include "resserv/rs_client.h" +#include "class/cl0005.h" + +#include "ctrl/ctrl0000/ctrl0000event.h" // NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_* + +#if (!NV_RM_STUB_RPC) +static NV_STATUS _eventRpcForType(NvHandle hClient, NvHandle hObject); +#endif + +NV_STATUS +eventConstruct_IMPL +( + Event *pEvent, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV0005_ALLOC_PARAMETERS *pNv0050AllocParams = pParams->pAllocParams; + RsClient *pRsClient = pCallContext->pClient; + RsResourceRef *pClientRef; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NV_STATUS rmStatus = NV_OK; + PEVENTNOTIFICATION *ppEventNotification; + NvHandle hChannel = 0x0; + OBJGPU *pGpu = NULL; + RS_PRIV_LEVEL privLevel = pParams->pSecInfo->privLevel; + NvBool bUserOsEventHandle = NV_FALSE; + + // never allow user mode/non-root clients to create ring0 callbacks as + // we can not trust the function pointer (encoded in data). + if ((NV01_EVENT_KERNEL_CALLBACK == pResourceRef->externalClassId) || + (NV01_EVENT_KERNEL_CALLBACK_EX == pResourceRef->externalClassId)) + { + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + // sometimes it is nice to hook up callbacks for debug purposes + // -- but disable the override for release builds! +#if defined(DEBUG) || defined(DEVELOP) + if (!(pNv0050AllocParams->notifyIndex & NV01_EVENT_PERMIT_NON_ROOT_EVENT_KERNEL_CALLBACK_CREATION)) +#endif + { + return NV_ERR_ILLEGAL_ACTION; + } + } + } + +#if (!NV_RM_STUB_RPC) + if (_eventRpcForType(pNv0050AllocParams->hParentClient, pNv0050AllocParams->hSrcResource)) + { + RsResourceRef *pSrcRef; + NV_STATUS tmpStatus; + + tmpStatus = serverutilGetResourceRef(pNv0050AllocParams->hParentClient, + pNv0050AllocParams->hSrcResource, + &pSrcRef); + + if (tmpStatus == NV_OK) + { + hChannel = pSrcRef->pParentRef ? pSrcRef->pParentRef->hResource : 0; + pGpu = CliGetGpuFromContext(pSrcRef, NULL); + + if (pGpu == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "RmAllocEvent could not set pGpu. hClient=0x%x, hObject=0x%x\n", + pRsClient->hClient, pResourceRef->hResource); + } + } + } +#endif + + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, pRsClient->hClient, &pClientRef)); + + // add event to client and parent object + rmStatus = eventInit(pEvent, + pCallContext, + pNv0050AllocParams->hParentClient, + pNv0050AllocParams->hSrcResource, + &ppEventNotification); + if (rmStatus == NV_OK) + { + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + // In RM-offload, we don't allocate ContextDma in GSP-RM unless there + // is any necessity to use it (e.g. display channel binding time). So + // GSP-RM will find no valid object if the event is associated with + // ContextDma object. So we are ignoring the event allocation here if + // the event is associated with ContextDma object. + // + if (pGpu != NULL) + { + RsResourceRef *pSourceRef; + + if (IS_GSP_CLIENT(pGpu)) + { + NV_ASSERT_OK_OR_RETURN( + serverutilGetResourceRef(pNv0050AllocParams->hParentClient, + pNv0050AllocParams->hSrcResource, + &pSourceRef)); + } + + if ( + (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_GSP_CLIENT(pGpu) && pSourceRef->internalClassId != classId(ContextDma)) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && !(pNv0050AllocParams->notifyIndex & NV01_EVENT_NONSTALL_INTR)))) + { + // + // In SR-IOV enabled systems, nonstall events can be registered + // directly with guest RM since guest RM is capable of + // receiving and handling nonstall interrupts itself. In + // paravirtualized systems, we always need to use the RPC to + // host RM. + // + NV_RM_RPC_ALLOC_EVENT(pGpu, + pRsClient->hClient, + pEvent->hNotifierClient, + hChannel, + pEvent->hNotifierResource, + pResourceRef->hResource, + pResourceRef->externalClassId, + pNv0050AllocParams->notifyIndex, + rmStatus); + } + } + + if (NV01_EVENT_OS_EVENT == pResourceRef->externalClassId) + { + // convert a user event handle to its kernel equivalent. + if (privLevel <= RS_PRIV_LEVEL_USER_ROOT) + { + rmStatus = osUserHandleToKernelPtr(pRsClient->hClient, + pNv0050AllocParams->data, + &pNv0050AllocParams->data); + bUserOsEventHandle = NV_TRUE; + } + } + + if (rmStatus == NV_OK) + rmStatus = registerEventNotification(ppEventNotification, + pRsClient->hClient, + pEvent->hNotifierResource, + pResourceRef->hResource, + pNv0050AllocParams->notifyIndex, + pResourceRef->externalClassId, + pNv0050AllocParams->data, + bUserOsEventHandle); + } + + if (rmStatus != NV_OK) + goto cleanup; + + return NV_OK; + +cleanup: + eventDestruct_IMPL(pEvent); + return rmStatus; +} + +void eventDestruct_IMPL +( + Event *pEvent +) +{ + CALL_CONTEXT *pCallContext; + RS_RES_FREE_PARAMS_INTERNAL *pParams; + + RsClient* pRsClient; + NvHandle hEventClient; + NV_STATUS status = NV_OK; + NvHandle hEvent; + NotifShare *pNotifierShare; + + resGetFreeParams(staticCast(pEvent, RsResource), &pCallContext, &pParams); + pRsClient = pCallContext->pClient; + hEventClient = pRsClient->hClient; + hEvent = pCallContext->pResourceRef->hResource; + + LOCK_METER_DATA(FREE_EVENT, 0, 0, 0); + + pNotifierShare = pEvent->pNotifierShare; + if (pNotifierShare != NULL) + { + if (pNotifierShare->pNotifier != NULL) + { + status = inotifyUnregisterEvent(pNotifierShare->pNotifier, + pNotifierShare->hNotifierClient, + pNotifierShare->hNotifierResource, + hEventClient, + hEvent); + } + serverFreeShare(&g_resServ, staticCast(pEvent->pNotifierShare, RsShared)); + } + + if (pParams != NULL) + pParams->status = status; +} + +NV_STATUS notifyUnregisterEvent_IMPL +( + Notifier *pNotifier, + NvHandle hNotifierClient, + NvHandle hNotifierResource, + NvHandle hEventClient, + NvHandle hEvent +) +{ + NV_STATUS status = NV_OK; + PEVENTNOTIFICATION *ppEventNotification; + + ppEventNotification = inotifyGetNotificationListPtr(staticCast(pNotifier, INotifier)); + + // delete the event from the parent object and client + if (*ppEventNotification != NULL) + { + +#if (!NV_RM_STUB_RPC) + if (_eventRpcForType(hNotifierClient, hNotifierResource)) + { + OBJGPU *pGpu = CliGetGpuFromHandle(hNotifierClient, hNotifierResource, NULL); + + if (pGpu != NULL) + { + RsResourceRef *pNotifierRef = NULL; + + if (IS_GSP_CLIENT(pGpu)) + { + NV_ASSERT_OK_OR_RETURN(serverutilGetResourceRef(hNotifierClient, hNotifierResource, &pNotifierRef)); + } + + // + // vGPU: + // + // Since vGPU does all real hardware management in the + // host, if we are in guest OS (where IS_VIRTUAL(pGpu) is true), + // do an RPC to the host to do the hardware update. + // + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu) || + (IS_GSP_CLIENT(pGpu) && pNotifierRef->internalClassId != classId(ContextDma)) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && !((*ppEventNotification)->bNonStallIntrEvent))) + { + // + // In SR-IOV enabled systems, nonstall events are registered + // directly with guest RM since guest RM is capable of + // receiving and handling nonstall interrupts itself. We skip + // the allocation, so here, we skip the free too. In + // paravirtualized systems, we always need to use the RPC to + // host RM. + // + NV_RM_RPC_FREE(pGpu, hEventClient, hEventClient, hEvent, status); + } + } + else + { + NV_PRINTF(LEVEL_ERROR, + "RmFreeEvent could not set pGpu. hClient=0x%x, hObject=0x%x\n", + hNotifierClient, hNotifierResource); + } + } +#endif + + unregisterEventNotification(ppEventNotification, + hEventClient, + hNotifierResource, + hEvent); + + } + + return status; +} + +NV_STATUS +eventInit_IMPL +( + Event *pEvent, + CALL_CONTEXT *pCallContext, + NvHandle hNotifierClient, + NvHandle hNotifierResource, + PEVENTNOTIFICATION **pppEventNotification +) +{ + NV_STATUS rmStatus = NV_OK; + RsClient *pRsClient = pCallContext->pClient; + RsClient *pNotifierClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + NotifShare *pNotifierShare = NULL; + + // validate event class + switch (pResourceRef->externalClassId) + { + case NV01_EVENT_KERNEL_CALLBACK: + case NV01_EVENT_KERNEL_CALLBACK_EX: + case NV01_EVENT_OS_EVENT: + break; + + default: + return NV_ERR_INVALID_CLASS; + } + + // RS-TODO remove support for this after per-client locking is enabled + if (pRsClient->hClient != hNotifierClient) + { + rmStatus = serverGetClientUnderLock(&g_resServ, hNotifierClient, &pNotifierClient); + if (rmStatus != NV_OK) + return rmStatus; + } + else + { + pNotifierClient = pRsClient; + } + + if (pNotifierClient != NULL) + { + RsResourceRef *pNotifierRef; + INotifier *pNotifier; + if (clientGetResourceRef(pNotifierClient, hNotifierResource, &pNotifierRef) != NV_OK) + return NV_ERR_INVALID_OBJECT; + + pNotifier = dynamicCast(pNotifierRef->pResource, INotifier); + if (pNotifier == NULL) + return NV_ERR_INVALID_OBJECT; + + rmStatus = inotifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, &pNotifierShare); + if (rmStatus != NV_OK) + return rmStatus; + + *pppEventNotification = inotifyGetNotificationListPtr(pNotifierShare->pNotifier); + } + + serverRefShare(&g_resServ, staticCast(pNotifierShare, RsShared)); + pEvent->pNotifierShare = pNotifierShare; + + // RS-TODO these can be looked up from share + pEvent->hNotifierClient = hNotifierClient; + pEvent->hNotifierResource = hNotifierResource; + pEvent->hEvent = pCallContext->pResourceRef->hResource; + + return rmStatus; +} + +NV_STATUS +notifyGetOrAllocNotifShare_IMPL +( + Notifier *pNotifier, + NvHandle hNotifierClient, + NvHandle hNotifierResource, + NotifShare **ppNotifierShare +) +{ + NV_STATUS status; + NotifShare *pNotifierShare; + + // + // Most objects that are notifiers will never have any events to notify so + // notifier shares are allocated as needed (i.e., when an event + // registers itself with the notifier.) + // + pNotifierShare = inotifyGetNotificationShare(staticCast(pNotifier, INotifier)); + if (pNotifierShare == NULL) + { + RsShared *pShare; + status = serverAllocShare(&g_resServ, classInfo(NotifShare), &pShare); + if (status != NV_OK) + return status; + + pNotifierShare = dynamicCast(pShare, NotifShare); + pNotifierShare->pNotifier = staticCast(pNotifier, INotifier); + pNotifierShare->hNotifierClient = hNotifierClient; + pNotifierShare->hNotifierResource = hNotifierResource; + inotifySetNotificationShare(staticCast(pNotifier, INotifier), pNotifierShare); + } + + if (ppNotifierShare) + *ppNotifierShare = pNotifierShare; + + return NV_OK; +} + +NV_STATUS +CliGetEventNotificationList +( + NvHandle hClient, + NvHandle hObject, + INotifier **ppNotifier, + PEVENTNOTIFICATION **pppEventNotification +) +{ + NV_STATUS status = NV_OK; + RsResourceRef *pResourceRef; + RsClient *pRsClient; + INotifier *pNotifier; + + *pppEventNotification = NULL; + + // Populate Resource Server information + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return status; + + status = clientGetResourceRef(pRsClient, hObject, &pResourceRef); + if (status != NV_OK) + return status; + + pNotifier = dynamicCast(pResourceRef->pResource, INotifier); + if (pNotifier != NULL) + *pppEventNotification = inotifyGetNotificationListPtr(pNotifier); + + if (*pppEventNotification == NULL) + return NV_ERR_INVALID_OBJECT; + + if (ppNotifier != NULL) + *ppNotifier = pNotifier; + + return NV_OK; +} + +NvBool +CliGetEventInfo +( + NvHandle hClient, + NvHandle hEvent, + Event **ppEvent +) +{ + RmClient *pClient; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_FALSE; + + pRsClient = staticCast(pClient, RsClient); + if (clientGetResourceRefByType(pRsClient, hEvent, classId(Event), &pResourceRef) != NV_OK) + return NV_FALSE; + + if (pResourceRef->pResource != NULL) + { + *ppEvent = dynamicCast(pResourceRef->pResource, Event); + return NV_TRUE; + } + + return NV_FALSE; + +} + +NvBool +CliDelObjectEvents +( + NvHandle hClient, + NvHandle hResource +) +{ + NotifShare *pNotifierShare; + INotifier *pNotifier; + RsClient *pRsClient; + NV_STATUS status = NV_OK; + RsResourceRef *pResourceRef; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return NV_FALSE; + + status = clientGetResourceRef(pRsClient, hResource, &pResourceRef); + if (status != NV_OK) + return NV_FALSE; + + // If not a notifier object, there aren't any events to free + pNotifier = dynamicCast(pResourceRef->pResource, INotifier); + + if (pNotifier == NULL) + return NV_TRUE; + + pNotifierShare = inotifyGetNotificationShare(pNotifier); + if (pNotifierShare != NULL) + { + while(pNotifierShare->pEventList != NULL) + { + PEVENTNOTIFICATION pEventNotif = pNotifierShare->pEventList; + status = inotifyUnregisterEvent(pNotifier, + pNotifierShare->hNotifierClient, + pNotifierShare->hNotifierResource, + pEventNotif->hEventClient, + pEventNotif->hEvent); + } + pNotifierShare->pNotifier = NULL; + } + + return NV_TRUE; + +} // end of CliDelObjectEvents() + +// **************************************************************************** +// System events +// **************************************************************************** + +void CliAddSystemEvent( + NvU32 event, + NvU32 status +) +{ + NvU32 temp; + PEVENTNOTIFICATION pEventNotification = NULL; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RsResourceRef *pCliResRef; + NV_STATUS rmStatus = NV_OK; + Notifier *pNotifier; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if (pClient->CliSysEventInfo.notifyActions[event] == NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE) + { + continue; + } + + temp = (pClient->CliSysEventInfo.systemEventsQueue.Head + 1) % NV_SYSTEM_EVENT_QUEUE_SIZE; + + if (temp == pClient->CliSysEventInfo.systemEventsQueue.Tail) + { + NV_PRINTF(LEVEL_ERROR, "system event queue is full"); + return; + } + + pClient->CliSysEventInfo.systemEventsQueue.EventQueue[pClient->CliSysEventInfo.systemEventsQueue.Head].event = event; + pClient->CliSysEventInfo.systemEventsQueue.EventQueue[pClient->CliSysEventInfo.systemEventsQueue.Head].status = status; + pClient->CliSysEventInfo.systemEventsQueue.Head = temp; + + rmStatus = clientGetResourceRef(staticCast(pClient, RsClient), pRsClient->hClient, &pCliResRef); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to look up resource reference handle: 0x%x\n", + pRsClient->hClient); + return; + } + + pNotifier = dynamicCast(pCliResRef->pResource, Notifier); + if (pNotifier != NULL) + pEventNotification = inotifyGetNotificationList(staticCast(pNotifier, INotifier)); + + if (pEventNotification != NULL) + { + while (pEventNotification) + { + if (pEventNotification->NotifyIndex == event) + { + if (osNotifyEvent(NULL, pEventNotification, 0, 0, 0) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to deliver event 0x%x", + event); + } + } + pEventNotification = pEventNotification->Next; + } + + if (pClient->CliSysEventInfo.notifyActions[event] == NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE) + { + pClient->CliSysEventInfo.notifyActions[event] = NV0000_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + } + } + } + + return; +} + +#if (!NV_RM_STUB_RPC) +static NV_STATUS +_eventRpcForType(NvHandle hClient, NvHandle hObject) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = serverutilGetResourceRef(hClient, hObject, &pResourceRef); + + if (status != NV_OK) + { + return NV_FALSE; + } + + if (objDynamicCastById(pResourceRef->pResource, classId(Subdevice)) || + objDynamicCastById(pResourceRef->pResource, classId(ChannelDescendant)) || + objDynamicCastById(pResourceRef->pResource, classId(ContextDma)) || + objDynamicCastById(pResourceRef->pResource, classId(DispChannel)) || + objDynamicCastById(pResourceRef->pResource, classId(TimerApi)) || + objDynamicCastById(pResourceRef->pResource, classId(KernelSMDebuggerSession))) + { + return NV_TRUE; + } + + return NV_FALSE; +} +#endif diff --git a/src/nvidia/src/kernel/rmapi/event_buffer.c b/src/nvidia/src/kernel/rmapi/event_buffer.c new file mode 100644 index 000000000..cbf6301a7 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/event_buffer.c @@ -0,0 +1,707 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/event_buffer.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu_mgr/gpu_mgr.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi_utils.h" +#include "kernel/gpu/gr/fecs_event_list.h" +#include "mem_mgr/no_device_mem.h" +#include "class/cl90ce.h" +#include "class/cl0040.h" + +static NV_STATUS _allocAndMapMemory(CALL_CONTEXT *pCallContext, NvP64 pAddress, MEMORY_DESCRIPTOR** ppMemDesc, NvU64 size, NvBool bKernel, + NvP64* pKernelAddr, NvP64* pKernelPriv, NvP64* pUserAddr, NvP64* pUserPriv); + +static void _unmapAndFreeMemory(MEMORY_DESCRIPTOR *pMemDesc, NvBool bKernel, NvP64 kernelAddr, + NvP64 kernelPriv, NvP64 userAddr, NvP64 userPriv); + +NV_STATUS +eventbufferConstruct_IMPL +( + EventBuffer *pEventBuffer, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + NV_EVENT_BUFFER_ALLOC_PARAMETERS *pAllocParams = pParams->pAllocParams; + + EVENT_BUFFER_MAP_INFO *pKernelMap = &pEventBuffer->kernelMapInfo; + EVENT_BUFFER_MAP_INFO *pClientMap = &pEventBuffer->clientMapInfo; + + NvU32 hClient = pCallContext->pClient->hClient; + NvBool bKernel = (rmclientGetCachedPrivilegeByHandle(hClient) >= RS_PRIV_LEVEL_KERNEL); + + NvU32 recordBufferSize; + NvP64 kernelNotificationhandle; + Subdevice *pSubdevice = NULL; + NvBool bInternalAlloc = (pAllocParams->hBufferHeader == 0); + NvBool bNoDeviceMem = NV_FALSE; + NvBool bUsingVgpuStagingBuffer = NV_FALSE; + OBJGPU *pGpu = NULL; + RsResourceRef *pHeaderRef = NULL; + RsResourceRef *pRecordRef = NULL; + RsResourceRef *pVardataRef = NULL; + NvHandle hMapperClient = 0; + NvHandle hMapperDevice = 0; + + pAllocParams->bufferHeader = NvP64_NULL; + pAllocParams->recordBuffer = NvP64_NULL; + pAllocParams->vardataBuffer = NvP64_NULL; + + if (bInternalAlloc) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + NvBool bSupported = pSys->getProperty(pSys, PDB_PROP_SYS_INTERNAL_EVENT_BUFFER_ALLOC_ALLOWED); + NV_ASSERT_OR_RETURN(bSupported, NV_ERR_NOT_SUPPORTED); + } + else + { + NV_ASSERT_OR_RETURN((pAllocParams->hRecordBuffer != 0), NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((pAllocParams->vardataBufferSize == 0) ^ (pAllocParams->hVardataBuffer != 0)), + NV_ERR_INVALID_ARGUMENT); + + status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hBufferHeader, &pHeaderRef); + if (status != NV_OK) + return status; + + status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hRecordBuffer, &pRecordRef); + if (status != NV_OK) + return status; + + // Avoid mixing and matching backing-memory + if (pRecordRef->externalClassId != pHeaderRef->externalClassId) + return NV_ERR_INVALID_ARGUMENT; + + if (pAllocParams->hVardataBuffer != 0) + { + status = clientGetResourceRef(pCallContext->pClient, pAllocParams->hVardataBuffer, &pVardataRef); + if (status != NV_OK) + return status; + + if (pVardataRef->externalClassId != pHeaderRef->externalClassId) + return NV_ERR_INVALID_ARGUMENT; + } + + bNoDeviceMem = (pRecordRef->externalClassId == NV01_MEMORY_DEVICELESS); + + if (!bNoDeviceMem) + { + if (pAllocParams->hSubDevice == 0) + { + NV_PRINTF(LEVEL_WARNING, "hSubDevice must be provided.\n"); + return NV_ERR_INVALID_ARGUMENT; + } + } + } + + // bound check inputs and also check for overflow + if ((pAllocParams->recordSize == 0) || (pAllocParams->recordCount == 0) || + (!portSafeMulU32(pAllocParams->recordSize, pAllocParams->recordCount, &recordBufferSize)) || + (recordBufferSize / pAllocParams->recordCount != pAllocParams->recordSize) || + (pAllocParams->recordsFreeThreshold > pAllocParams->recordCount) || + (pAllocParams->vardataFreeThreshold > pAllocParams->vardataBufferSize)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + pEventBuffer->hClient = pCallContext->pClient->hClient; + pEventBuffer->hSubDevice = pAllocParams->hSubDevice; + if (pEventBuffer->hSubDevice) + { + status = subdeviceGetByHandle(pCallContext->pClient, pEventBuffer->hSubDevice, &pSubdevice); + if (status != NV_OK) + return NV_ERR_INVALID_OBJECT_HANDLE; + + pEventBuffer->subDeviceInst = pSubdevice->subDeviceInst; + pGpu = GPU_RES_GET_GPU(pSubdevice); + + if (!bNoDeviceMem) + { + if (IS_VIRTUAL_WITHOUT_SRIOV(pGpu)) + { + // Staging buffer should be mapped as read-only in guest RM + bUsingVgpuStagingBuffer = NV_TRUE; + } + + if (!bKernel) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + status = rmapiutilAllocClientAndDeviceHandles(pRmApi, + pGpu, + &pEventBuffer->hInternalClient, + &pEventBuffer->hInternalDevice, + &pEventBuffer->hInternalSubdevice); + + if (status != NV_OK) + return status; + + hMapperClient = pEventBuffer->hInternalClient; + hMapperDevice = pEventBuffer->hInternalDevice; + } + else + { + hMapperClient = pCallContext->pClient->hClient; + hMapperDevice = RES_GET_PARENT_HANDLE(pSubdevice); + } + } + } + + + // + // Use goto cleanup on failure below here + // + + if (!bInternalAlloc) + { + Memory *pMemory; + NvBool bRequireReadOnly = bUsingVgpuStagingBuffer || !bKernel; + + // + // Buffer header + // + pEventBuffer->pHeader = dynamicCast(pHeaderRef->pResource, Memory); + pMemory = pEventBuffer->pHeader; + if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY))) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (pMemory->Length < sizeof(NV_EVENT_BUFFER_HEADER)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (!bNoDeviceMem) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + if (!bKernel) + { + status = pRmApi->DupObject(pRmApi, + hMapperClient, + hMapperDevice, + &hMemory, + pCallContext->pClient->hClient, + hMemory, 0); + if (status != NV_OK) + { + goto cleanup; + } + } + + status = pRmApi->MapToCpu(pRmApi, + hMapperClient, + hMapperDevice, + hMemory, + 0, + pMemory->Length, + &pKernelMap->headerAddr, + bUsingVgpuStagingBuffer + ? DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY) + : DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_WRITE)); + + if (status != NV_OK) + { + goto cleanup; + } + } + else + { + status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE); + if (status != NV_OK) + goto cleanup; + + pKernelMap->headerAddr = pMemory->KernelVAddr; + } + + // + // Record buffer + // + pEventBuffer->pRecord = dynamicCast(pRecordRef->pResource, Memory); + pMemory = pEventBuffer->pRecord; + if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY))) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (pMemory->Length < recordBufferSize) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (!bNoDeviceMem) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + if (!bKernel) + { + status = pRmApi->DupObject(pRmApi, + hMapperClient, + hMapperDevice, + &hMemory, + pCallContext->pClient->hClient, + hMemory, 0); + if (status != NV_OK) + { + goto cleanup; + } + } + + status = pRmApi->MapToCpu(pRmApi, + hMapperClient, + hMapperDevice, + hMemory, + 0, + pMemory->Length, + &pKernelMap->recordBuffAddr, + bUsingVgpuStagingBuffer + ? DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY) + : DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_WRITE)); + if (status != NV_OK) + { + goto cleanup; + } + } + else + { + status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE); + pKernelMap->recordBuffAddr = pMemory->KernelVAddr; + if (status != NV_OK) + goto cleanup; + } + + // + // Vardata buffer [optional] + // + if (pAllocParams->hVardataBuffer != 0) + { + pEventBuffer->pVardata = dynamicCast(pVardataRef->pResource, Memory); + pMemory = pEventBuffer->pVardata; + if ((pMemory == NULL) || (bRequireReadOnly && !memdescGetFlag(pMemory->pMemDesc, MEMDESC_FLAGS_USER_READ_ONLY))) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (pMemory->Length < pAllocParams->vardataBufferSize) + { + status = NV_ERR_INVALID_ARGUMENT; + goto cleanup; + } + + if (!bNoDeviceMem) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hMemory = RES_GET_HANDLE(pMemory); + + if (!bKernel) + { + status = pRmApi->DupObject(pRmApi, + hMapperClient, + hMapperDevice, + &hMemory, + pCallContext->pClient->hClient, + hMemory, 0); + if (status != NV_OK) + { + goto cleanup; + } + } + + status = pRmApi->MapToCpu(pRmApi, + hMapperClient, + hMapperDevice, + hMemory, + 0, + pMemory->Length, + &pKernelMap->recordBuffAddr, + bUsingVgpuStagingBuffer + ? DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY) + : DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_WRITE)); + if (status != NV_OK) + { + goto cleanup; + } + } + else + { + status = memCreateKernelMapping(pMemory, NV_PROTECT_READ_WRITE, NV_TRUE); + if (status != NV_OK) + goto cleanup; + } + + pKernelMap->vardataBuffAddr = pMemory->KernelVAddr; + + refAddDependant(pVardataRef, pCallContext->pResourceRef); + } + + refAddDependant(pHeaderRef, pCallContext->pResourceRef); + refAddDependant(pRecordRef, pCallContext->pResourceRef); + } + + if (bInternalAlloc) + { + status = _allocAndMapMemory(pCallContext, + pAllocParams->bufferHeader, + &pEventBuffer->pHeaderDesc, + sizeof(NV_EVENT_BUFFER_HEADER), + bKernel, + &pKernelMap->headerAddr, + &pKernelMap->headerPriv, + &pClientMap->headerAddr, + &pClientMap->headerPriv); + if (status != NV_OK) + goto cleanup; + + status = _allocAndMapMemory(pCallContext, + pAllocParams->recordBuffer, + &pEventBuffer->pRecordBufDesc, + recordBufferSize, + bKernel, + &pKernelMap->recordBuffAddr, + &pKernelMap->recordBuffPriv, + &pClientMap->recordBuffAddr, + &pClientMap->recordBuffPriv); + if (status != NV_OK) + goto cleanup; + } + + eventBufferInitRecordBuffer(&pEventBuffer->producerInfo, + KERNEL_POINTER_FROM_NvP64(NV_EVENT_BUFFER_HEADER*, pKernelMap->headerAddr), + pKernelMap->recordBuffAddr, + pAllocParams->recordSize, + pAllocParams->recordCount, + recordBufferSize, + pAllocParams->recordsFreeThreshold); + + // not needed for all events, such as FECS context switch events + if (pAllocParams->vardataBufferSize != 0) + { + if (bInternalAlloc) + { + status = _allocAndMapMemory(pCallContext, + pAllocParams->vardataBuffer, + &pEventBuffer->pVardataBufDesc, + pAllocParams->vardataBufferSize, + bKernel, + &pKernelMap->vardataBuffAddr, + &pKernelMap->vardataBuffPriv, + &pClientMap->vardataBuffAddr, + &pClientMap->vardataBuffPriv); + + if (status != NV_OK) + goto cleanup; + } + + eventBufferInitVardataBuffer(&pEventBuffer->producerInfo, + pKernelMap->vardataBuffAddr, + pAllocParams->vardataBufferSize, + pAllocParams->vardataFreeThreshold); + } + + kernelNotificationhandle = (NvP64)pAllocParams->notificationHandle; + if (bKernel != NV_TRUE) + status = osUserHandleToKernelPtr(pCallContext->pClient->hClient, + kernelNotificationhandle, + &kernelNotificationhandle); + + eventBufferInitNotificationHandle(&pEventBuffer->producerInfo, kernelNotificationhandle); + eventBufferSetEnable(&pEventBuffer->producerInfo, NV_FALSE); + + // return user mode mappings + pAllocParams->bufferHeader = pClientMap->headerAddr; + pAllocParams->recordBuffer = pClientMap->recordBuffAddr; + pAllocParams->vardataBuffer = pClientMap->vardataBuffAddr; + + return NV_OK; + +cleanup: + eventbufferDestruct_IMPL(pEventBuffer); + return status; +} + +void +eventbufferDestruct_IMPL +( + EventBuffer *pEventBuffer +) +{ + CALL_CONTEXT *pCallContext; + EVENT_BUFFER_MAP_INFO *pClientMap = &pEventBuffer->clientMapInfo; + EVENT_BUFFER_MAP_INFO *pKernelMap = &pEventBuffer->kernelMapInfo; + NvBool bKernel = rmclientGetCachedPrivilegeByHandle(pEventBuffer->hClient) >= RS_PRIV_LEVEL_KERNEL; + void *notificationHandle = NvP64_VALUE(pEventBuffer->producerInfo.notificationHandle); + + resGetFreeParams(staticCast(pEventBuffer, RsResource), &pCallContext, NULL); + + if (notificationHandle != NULL) + { + osDereferenceObjectCount(notificationHandle); + } + + fecsRemoveAllBindpoints(pEventBuffer); + + _unmapAndFreeMemory(pEventBuffer->pHeaderDesc, bKernel, pKernelMap->headerAddr, + pKernelMap->headerPriv, pClientMap->headerAddr, pClientMap->headerPriv); + + _unmapAndFreeMemory(pEventBuffer->pRecordBufDesc, bKernel, pKernelMap->recordBuffAddr, + pKernelMap->recordBuffPriv, pClientMap->recordBuffAddr, pClientMap->recordBuffPriv); + + _unmapAndFreeMemory(pEventBuffer->pVardataBufDesc, bKernel, pKernelMap->vardataBuffAddr, + pKernelMap->vardataBuffPriv, pClientMap->vardataBuffAddr, pClientMap->vardataBuffPriv); + + if (pEventBuffer->hInternalClient != 0) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + pRmApi->Free(pRmApi, pEventBuffer->hInternalClient, pEventBuffer->hInternalClient); + } + +} + +NV_STATUS +_allocAndMapMemory +( + CALL_CONTEXT *pCallContext, + NvP64 pAddress, + MEMORY_DESCRIPTOR** ppMemDesc, + NvU64 size, + NvBool bKernel, + NvP64* pKernelAddr, + NvP64* pKernelPriv, + NvP64* pUserAddr, + NvP64* pUserPriv +) +{ + NV_STATUS status; + MEMORY_DESCRIPTOR* pMemDesc = NULL; + + NV_ASSERT_OR_RETURN(pAddress == NvP64_NULL, NV_ERR_NOT_SUPPORTED); + + status = memdescCreate(ppMemDesc, NULL, size, 0, NV_MEMORY_CONTIGUOUS, + ADDR_SYSMEM, NV_MEMORY_WRITECOMBINED, MEMDESC_FLAGS_CPU_ONLY); + if (status != NV_OK) + return status; + + pMemDesc = *ppMemDesc; + + status = osAllocPages(pMemDesc); + if (status != NV_OK) + goto cleanup; + pMemDesc->Allocated = 1; + + // map memory to kernel VA space + status = memdescMap(pMemDesc, 0, size, NV_TRUE, NV_PROTECT_READ_WRITE, + pKernelAddr, pKernelPriv); + if (status != NV_OK) + goto cleanup; + + portMemSet(NvP64_VALUE(*pKernelAddr), 0, size); + + // map memory to user VA space + status = memdescMap(pMemDesc, 0, size, bKernel, NV_PROTECT_READABLE, + pUserAddr, pUserPriv); + + if (status != NV_OK) + goto cleanup; + + return NV_OK; + +cleanup: + _unmapAndFreeMemory(pMemDesc, bKernel, *pKernelAddr, *pKernelPriv, *pUserAddr, *pUserPriv); + return status; +} + +static void +_unmapAndFreeMemory +( + MEMORY_DESCRIPTOR *pMemDesc, + NvBool bKernel, + NvP64 kernelAddr, + NvP64 kernelPriv, + NvP64 userAddr, + NvP64 userPriv +) +{ + if (pMemDesc == NULL) + return; + + if (userAddr) + memdescUnmap(pMemDesc, bKernel, osGetCurrentProcess(), userAddr, userPriv); + + if (kernelAddr) + memdescUnmap(pMemDesc, NV_TRUE, osGetCurrentProcess(), kernelAddr, kernelPriv); + + memdescFree(pMemDesc); + memdescDestroy(pMemDesc); +} + +NV_STATUS +eventbuffertBufferCtrlCmdFlush_IMPL +( + EventBuffer *pEventBuffer +) +{ + OBJGPU *pGpu; + NvU32 gpuMask = 0; + NvU32 gpuIndex = 0; + gpumgrGetGpuAttachInfo(NULL, &gpuMask); + while ((pGpu = gpumgrGetNextGpu(gpuMask, &gpuIndex)) != NULL) + { + nvEventBufferFecsCallback(pGpu, NULL); + } + return NV_OK; +} + +NV_STATUS +eventbuffertBufferCtrlCmdEnableEvent_IMPL +( + EventBuffer *pEventBuffer, + NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS *pEnableParams +) +{ + GPU_MASK gpuMask; + NV_STATUS status = NV_OK; + NvBool updateTelemetry = NV_FALSE; + + if (pEnableParams->flags & + ~(NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST|NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pEnableParams->enable && !pEventBuffer->producerInfo.isEnabled) + { + updateTelemetry = NV_TRUE; + } + + eventBufferSetEnable(&pEventBuffer->producerInfo, pEnableParams->enable); + if (pEnableParams->flags & NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_NEWEST) + eventBufferSetKeepNewest(&pEventBuffer->producerInfo, NV_TRUE); + else if (pEnableParams->flags & NV_EVENT_BUFFER_FLAG_OVERFLOW_POLICY_KEEP_OLDEST) + eventBufferSetKeepNewest(&pEventBuffer->producerInfo, NV_FALSE); + + // NvTelemetry requires a valid subdevice + if (updateTelemetry && pEventBuffer->hSubDevice) + { + NvHandle hClient = RES_GET_CLIENT_HANDLE(pEventBuffer); + NvHandle hDevice; + OBJGPU *pGpu; + + status = rmGpuGroupLockAcquire(pEventBuffer->subDeviceInst, + GPU_LOCK_GRP_SUBDEVICE, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU, &gpuMask); + if (status != NV_OK) + return status; + + status = CliSetSubDeviceContext(hClient, pEventBuffer->hSubDevice, &hDevice, &pGpu); + + rmGpuGroupLockRelease(gpuMask, GPUS_LOCK_FLAGS_NONE); + } + return NV_OK; +} + +NV_STATUS +eventbuffertBufferCtrlCmdUpdateGet_IMPL +( + EventBuffer *pEventBuffer, + NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS *pUpdateParams +) +{ + EVENT_BUFFER_PRODUCER_INFO *pProducerInfo = &pEventBuffer->producerInfo; + NvP64 pVardataBuf = pEventBuffer->kernelMapInfo.vardataBuffAddr; + + if ((pUpdateParams->recordBufferGet >= eventBufferGetRecordBufferCount(pProducerInfo)) || + (pVardataBuf == NvP64_NULL && pUpdateParams->varDataBufferGet > 0) || + (pVardataBuf != NvP64_NULL && pUpdateParams->varDataBufferGet >= eventBufferGetVardataBufferCount(pProducerInfo))) + { + return NV_ERR_INVALID_ARGUMENT; + } + + eventBufferUpdateRecordBufferGet(pProducerInfo, pUpdateParams->recordBufferGet); + if (pVardataBuf) + eventBufferUpdateVardataBufferGet(pProducerInfo, pUpdateParams->varDataBufferGet); + + pEventBuffer->bNotifyPending = NV_FALSE; + + return NV_OK; +} + +/* + * eventbuffertBufferCtrlCmdPostTelemetryEvent posts an event to the event buffer for testing purposes. + * Note -- in order to post an event, a handle to the buffer is required. since the handle is + * only available to the client that created the buffer, one can only post events to buffers that + * it created. this has been done to limit the ability to post to buffers for testing purposes + * only. if it is determined that we want to open this up to other callers, then this ctrl call + * should be moved to the 2080 class & adjustments made for acquiring the pGpu based on the + * subdevice handle there. + */ +NV_STATUS +eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL +( + EventBuffer *pEventBuffer, + NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS *pPostTelemetryEvent +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +eventBufferAdd(EventBuffer* pEventBuffer, void *pEventData, NvU32 recordType, NvBool *pBNotify, NvP64 *pHandle) +{ + EVENT_BUFFER_PRODUCER_DATA *pProducerData = (EVENT_BUFFER_PRODUCER_DATA*)pEventData; + RECORD_BUFFER_INFO *pRBI; + NV_EVENT_BUFFER_HEADER *pHeader; + + if (!pEventBuffer->producerInfo.isEnabled) + return NV_WARN_NOTHING_TO_DO; + + pRBI = &pEventBuffer->producerInfo.recordBuffer; + pHeader = pEventBuffer->producerInfo.recordBuffer.pHeader; + + NV_ASSERT_OR_RETURN(pHeader->recordPut < pRBI->totalRecordCount, NV_ERR_INVALID_STATE); + + eventBufferProducerAddEvent(&pEventBuffer->producerInfo, + recordType, 0, pProducerData); + + *pBNotify = (!pEventBuffer->bNotifyPending) && + (eventBufferIsNotifyThresholdMet(&pEventBuffer->producerInfo)); + *pHandle = pEventBuffer->producerInfo.notificationHandle; + return NV_OK; +} diff --git a/src/nvidia/src/kernel/rmapi/event_notification.c b/src/nvidia/src/kernel/rmapi/event_notification.c new file mode 100644 index 000000000..924d24e41 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/event_notification.c @@ -0,0 +1,900 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/********************************* DMA Manager *****************************\ +* * +* Event notifications are handled in this module. DMA report and OS * +* action are dealt with on a per-object basis. * +* * +****************************************************************************/ + +#include "core/core.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "class/cl0000.h" +#include "os/os.h" +#include "class/cl0005.h" +#include "gpu/subdevice/subdevice.h" +#include "rmapi/rs_utils.h" +#include "mem_mgr/mem.h" + +#include "kernel/gpu/mig_mgr/kernel_mig_manager.h" + +static NV_STATUS _insertEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvU32 NotifyIndex, + NvU32 NotifyType, + NvP64 Data, + NvBool bUserOsEventHandle +); + +static NV_STATUS _removeEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvBool bMatchData, + NvP64 Data, + PEVENTNOTIFICATION *ppOldEvent +); + +//--------------------------------------------------------------------------- +// +// Event support. +// +//--------------------------------------------------------------------------- + +static NV_STATUS engineNonStallEventOp +( + OBJGPU *pGpu, + NvU32 engineId, + PEVENTNOTIFICATION pEventNotify, + Memory *pMemory, + NvBool bInsert +) +{ + ENGINE_EVENT_NODE *pTempNode; + NvBool bFound = NV_FALSE; + + if (bInsert) + { + pTempNode = portMemAllocNonPaged(sizeof(ENGINE_EVENT_NODE)); + + if (pTempNode == NULL) + return NV_ERR_NO_MEMORY; + + // Acquire engine list spinlock before adding to engine event list + portSyncSpinlockAcquire(pGpu->engineNonstallIntr[engineId].pSpinlock); + pTempNode->pNext = pGpu->engineNonstallIntr[engineId].pEventNode; + pTempNode->pEventNotify = pEventNotify; + pTempNode->pMemory = pMemory; + + pGpu->engineNonstallIntr[engineId].pEventNode = pTempNode; + + // Release engine list spinlock + portSyncSpinlockRelease(pGpu->engineNonstallIntr[engineId].pSpinlock); + } + else + { + ENGINE_EVENT_NODE *pEngNode, *pPrevNode = NULL; + + // Acquire engine list spinlock before traversing engine event list + portSyncSpinlockAcquire(pGpu->engineNonstallIntr[engineId].pSpinlock); + + pEngNode = pGpu->engineNonstallIntr[engineId].pEventNode; + while (pEngNode) + { + if (pEngNode->pEventNotify == pEventNotify) + { + if (pPrevNode == NULL) + pGpu->engineNonstallIntr[engineId].pEventNode = pEngNode->pNext; + else + pPrevNode->pNext = pEngNode->pNext; + + pTempNode = pEngNode; + bFound = NV_TRUE; + break; + } + else + { + pPrevNode = pEngNode; + } + pEngNode = pEngNode->pNext; + } + + // Release engine list spinlock + portSyncSpinlockRelease(pGpu->engineNonstallIntr[engineId].pSpinlock); + + if (bFound) + { + portMemFree(pTempNode); + } + else + { + NV_ASSERT_FAILED("failed to find non-stall event!"); + return NV_ERR_INVALID_STATE; + } + } + + return NV_OK; +} + +static NV_STATUS _engineNonStallIntrNotifyImpl(OBJGPU *pGpu, NvU32 engineId, NvHandle hEvent) +{ + ENGINE_EVENT_NODE *pTempHead; + Memory *pSemMemory; + NvU32 semValue; + NvU32 *pTempKernelMapping = NULL; + NV_STATUS rmStatus = NV_OK; + + // + // Acquire engine list spinlock before traversing the list. Note that this + // is called without holding locks from ISR for Linux. This spinlock is used + // to protect per GPU per engine event node list. + // + portSyncSpinlockAcquire(pGpu->engineNonstallIntr[engineId].pSpinlock); + + pTempHead = pGpu->engineNonstallIntr[engineId].pEventNode; + while (pTempHead) + { + if (!pTempHead->pEventNotify) + { + rmStatus = NV_ERR_INVALID_STATE; + break; + } + + if (hEvent && pTempHead->pEventNotify->hEvent != hEvent) + goto nextEvent; + + pSemMemory = pTempHead->pMemory; + + if (pSemMemory && pSemMemory->vgpuNsIntr.isSemaMemValidationEnabled && + pSemMemory->pMemDesc && pSemMemory->pMemDesc->Allocated) + { + pTempKernelMapping = (NvU32 *)NvP64_VALUE(memdescGetKernelMapping(pSemMemory->pMemDesc)); + if (pTempKernelMapping == NULL) + { + NV_PRINTF(LEVEL_WARNING, "Per-vGPU semaphore location mapping is NULL. Skipping the current node.\n"); + pTempHead = pTempHead->pNext; + continue; + } + semValue = MEM_RD32(pTempKernelMapping + (pSemMemory->vgpuNsIntr.nsSemOffset / sizeof(NvU32))); + + if (pSemMemory->vgpuNsIntr.nsSemValue == semValue) + { + pTempHead = pTempHead->pNext; + continue; + } + + pSemMemory->vgpuNsIntr.nsSemValue = semValue; + + } + + if (osNotifyEvent(pGpu, pTempHead->pEventNotify, 0, 0, NV_OK) != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "failed to notify event for engine 0x%x\n", + engineId); + NV_ASSERT(0); + rmStatus = NV_ERR_INVALID_STATE; + break; + } + + nextEvent: + pTempHead = pTempHead->pNext; + } + + portSyncSpinlockRelease(pGpu->engineNonstallIntr[engineId].pSpinlock); + return rmStatus; +} + +NV_STATUS +engineNonStallIntrNotify(OBJGPU *pGpu, NvU32 engineId) +{ + return _engineNonStallIntrNotifyImpl(pGpu, engineId, 0); +} + +NV_STATUS +engineNonStallIntrNotifyEvent(OBJGPU *pGpu, NvU32 engineId, NvHandle hEvent) +{ + return _engineNonStallIntrNotifyImpl(pGpu, engineId, hEvent); +} + +static NV_STATUS +eventGetEngineTypeFromSubNotifyIndex +( + NvU32 notifyIndex, + NvU32 *engineIdx +) +{ + NV_ASSERT_OR_RETURN(engineIdx, NV_ERR_INVALID_ARGUMENT); + + *engineIdx = NV2080_ENGINE_TYPE_NULL; + + switch (notifyIndex) + { + case NV2080_NOTIFIERS_FIFO_EVENT_MTHD: + *engineIdx = NV2080_ENGINE_TYPE_HOST; + break; + case NV2080_NOTIFIERS_CE0: + *engineIdx = NV2080_ENGINE_TYPE_COPY0; + break; + case NV2080_NOTIFIERS_CE1: + *engineIdx = NV2080_ENGINE_TYPE_COPY1; + break; + case NV2080_NOTIFIERS_CE2: + *engineIdx = NV2080_ENGINE_TYPE_COPY2; + break; + case NV2080_NOTIFIERS_CE3: + *engineIdx = NV2080_ENGINE_TYPE_COPY3; + break; + case NV2080_NOTIFIERS_CE4: + *engineIdx = NV2080_ENGINE_TYPE_COPY4; + break; + case NV2080_NOTIFIERS_CE5: + *engineIdx = NV2080_ENGINE_TYPE_COPY5; + break; + case NV2080_NOTIFIERS_CE6: + *engineIdx = NV2080_ENGINE_TYPE_COPY6; + break; + case NV2080_NOTIFIERS_CE7: + *engineIdx = NV2080_ENGINE_TYPE_COPY7; + break; + case NV2080_NOTIFIERS_CE8: + *engineIdx = NV2080_ENGINE_TYPE_COPY8; + break; + case NV2080_NOTIFIERS_CE9: + *engineIdx = NV2080_ENGINE_TYPE_COPY9; + break; + case NV2080_NOTIFIERS_GR0: + *engineIdx = NV2080_ENGINE_TYPE_GR0; + break; + case NV2080_NOTIFIERS_GR1: + *engineIdx = NV2080_ENGINE_TYPE_GR1; + break; + case NV2080_NOTIFIERS_GR2: + *engineIdx = NV2080_ENGINE_TYPE_GR2; + break; + case NV2080_NOTIFIERS_GR3: + *engineIdx = NV2080_ENGINE_TYPE_GR3; + break; + case NV2080_NOTIFIERS_GR4: + *engineIdx = NV2080_ENGINE_TYPE_GR4; + break; + case NV2080_NOTIFIERS_GR5: + *engineIdx = NV2080_ENGINE_TYPE_GR5; + break; + case NV2080_NOTIFIERS_GR6: + *engineIdx = NV2080_ENGINE_TYPE_GR6; + break; + case NV2080_NOTIFIERS_GR7: + *engineIdx = NV2080_ENGINE_TYPE_GR7; + break; + case NV2080_NOTIFIERS_PPP: + *engineIdx = NV2080_ENGINE_TYPE_PPP; + break; + case NV2080_NOTIFIERS_NVDEC0: + *engineIdx = NV2080_ENGINE_TYPE_NVDEC0; + break; + case NV2080_NOTIFIERS_NVDEC1: + *engineIdx = NV2080_ENGINE_TYPE_NVDEC1; + break; + case NV2080_NOTIFIERS_NVDEC2: + *engineIdx = NV2080_ENGINE_TYPE_NVDEC2; + break; + case NV2080_NOTIFIERS_NVDEC3: + *engineIdx = NV2080_ENGINE_TYPE_NVDEC3; + break; + case NV2080_NOTIFIERS_NVDEC4: + *engineIdx = NV2080_ENGINE_TYPE_NVDEC4; + break; + case NV2080_NOTIFIERS_PDEC: + *engineIdx = NV2080_ENGINE_TYPE_VP; + break; + case NV2080_NOTIFIERS_MSENC: + NV_ASSERT(NV2080_NOTIFIERS_MSENC == NV2080_NOTIFIERS_NVENC0); + NV_ASSERT(NV2080_ENGINE_TYPE_MSENC == NV2080_ENGINE_TYPE_NVENC0); + *engineIdx = NV2080_ENGINE_TYPE_MSENC; + break; + case NV2080_NOTIFIERS_NVENC1: + *engineIdx = NV2080_ENGINE_TYPE_NVENC1; + break; + case NV2080_NOTIFIERS_NVENC2: + *engineIdx = NV2080_ENGINE_TYPE_NVENC2; + break; + case NV2080_NOTIFIERS_SEC2: + *engineIdx = NV2080_ENGINE_TYPE_SEC2; + break; + case NV2080_NOTIFIERS_NVJPEG0: + *engineIdx = NV2080_ENGINE_TYPE_NVJPEG0; + break; + case NV2080_NOTIFIERS_OFA: + *engineIdx = NV2080_ENGINE_TYPE_OFA; + break; + default: + NV_PRINTF(LEVEL_WARNING, + "engine 0x%x doesn't use the fast non-stall interrupt path!\n", + notifyIndex); + NV_ASSERT(0); + return NV_ERR_NOT_SUPPORTED; + } + + return NV_OK; +} + +NV_STATUS registerEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hNotifier, + NvHandle hEvent, + NvU32 NotifyIndex, + NvU32 NotifyType, + NvP64 Data, + NvBool bUserOsEventHandle +) +{ + Subdevice *pSubDevice; + PEVENTNOTIFICATION pTargetEvent = NULL; + NV_STATUS rmStatus = NV_OK, rmTmpStatus = NV_OK; + OBJGPU *pGpu; + NvBool bNonStallIntrEvent = NV_FALSE; + NvU32 engineId; + NvHandle hDevice; + RsResourceRef *pResourceRef; + Memory *pSemMemory = NULL; + + rmStatus = _insertEventNotification(ppEventNotification, hEventClient, + hEvent, NotifyIndex, NotifyType, Data, bUserOsEventHandle); + + if (rmStatus != NV_OK) + goto failed_insert; + + bNonStallIntrEvent = ((NotifyIndex & NV01_EVENT_NONSTALL_INTR) ? NV_TRUE : NV_FALSE); + + if (bNonStallIntrEvent) + { + // + // For non-stall interrupt, the event parent type is NV20_SUBDEVICE, so we can locate + // the correct OBJGPU and attach to its per-engine non-stall event list. + // + if ((serverutilGetResourceRef(hEventClient, hNotifier, &pResourceRef) != NV_OK) || + (!dynamicCast(pResourceRef->pResource, Subdevice))) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto free_entry; + } + + pSubDevice = dynamicCast(pResourceRef->pResource, Subdevice); + hDevice = RES_GET_PARENT_HANDLE(pSubDevice); + + if (CliSetSubDeviceContext(hEventClient, RES_GET_HANDLE(pSubDevice), &hDevice, &pGpu) != NV_OK) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto free_entry; + } + + rmStatus = eventGetEngineTypeFromSubNotifyIndex( + DRF_VAL(0005, _NOTIFY_INDEX, _INDEX, NotifyIndex), &engineId); + + if (rmStatus != NV_OK) + goto free_entry; + + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvU32 globalEngineId = engineId; + MIG_INSTANCE_REF ref; + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hEventClient, &ref), + free_entry); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, engineId, &globalEngineId), + free_entry); + + engineId = globalEngineId; + } + + if (pSubDevice->hSemMemory != NV01_NULL_OBJECT) + { + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_SILENT, + memGetByHandle(RES_GET_CLIENT(pSubDevice), + pSubDevice->hSemMemory, + &pSemMemory), + free_entry); + } + + rmStatus = engineNonStallEventOp(pGpu, engineId, + *ppEventNotification, pSemMemory, NV_TRUE); + + if (rmStatus != NV_OK) + goto free_entry; + + return rmStatus; + } + +free_entry: + if (rmStatus != NV_OK) + { + rmTmpStatus = _removeEventNotification(ppEventNotification, hEventClient, + hEvent, NV_TRUE, Data, &pTargetEvent); + + if (rmTmpStatus == NV_OK) + portMemFree(pTargetEvent); + } + +failed_insert: + NV_ASSERT(rmStatus == NV_OK); + return rmStatus; +} + +static NV_STATUS _insertEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvU32 NotifyIndex, + NvU32 NotifyType, + NvP64 Data, + NvBool bUserOsEventHandle + +) +{ + PEVENTNOTIFICATION EventNotify; + + // + // Create the event notification object + // + EventNotify = portMemAllocNonPaged(sizeof(EVENTNOTIFICATION)); + if (EventNotify == NULL) + return NV_ERR_NO_MEMORY; + + // + // Fill in the fields + // + if (NotifyIndex & NV01_EVENT_BROADCAST) + { + EventNotify->bBroadcastEvent = NV_TRUE; + } + else + { + EventNotify->bBroadcastEvent = NV_FALSE; + } + + if (NotifyIndex & NV01_EVENT_SUBDEVICE_SPECIFIC) + { + EventNotify->bSubdeviceSpecificEvent = NV_TRUE; + EventNotify->SubdeviceSpecificValue = + DRF_VAL(0005, _NOTIFY_INDEX, _SUBDEVICE, NotifyIndex); + } + else + { + EventNotify->bSubdeviceSpecificEvent = NV_FALSE; + EventNotify->SubdeviceSpecificValue = 0; + } + + if (NotifyIndex & NV01_EVENT_WITHOUT_EVENT_DATA) + { + EventNotify->bEventDataRequired = NV_FALSE; + } + else + { + EventNotify->bEventDataRequired = NV_TRUE; + } + + if (NotifyIndex & NV01_EVENT_CLIENT_RM) + { + EventNotify->bClientRM = NV_TRUE; + } + else + { + EventNotify->bClientRM = NV_FALSE; + } + + EventNotify->bNonStallIntrEvent = + ((NotifyIndex & NV01_EVENT_NONSTALL_INTR) ? NV_TRUE : NV_FALSE); + + // strip the upper bits as they are actually flags + NotifyIndex = DRF_VAL(0005, _NOTIFY_INDEX, _INDEX, NotifyIndex); + + EventNotify->hEventClient = hEventClient; + EventNotify->hEvent = hEvent; + EventNotify->subdeviceInst = 0; + EventNotify->NotifyIndex = NotifyIndex; + EventNotify->NotifyType = NotifyType; + EventNotify->Data = Data; + EventNotify->NotifyTriggerCount = 0; + EventNotify->bUserOsEventHandle = bUserOsEventHandle; + + // + // Now insert the event into the event chain of this object. + // Order doesn't really matter. + // + EventNotify->Next = *ppEventNotification; + *ppEventNotification = EventNotify; + + return (NV_OK); +} + +//--------------------------------------------------------------------------- +// +// Event Notification support. +// +//--------------------------------------------------------------------------- + +NV_STATUS unregisterEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hNotifier, + NvHandle hEvent +) +{ + return unregisterEventNotificationWithData(ppEventNotification, + hEventClient, + hNotifier, + hEvent, + NV_FALSE, + NvP64_NULL); +} + +NV_STATUS unregisterEventNotificationWithData +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hNotifier, + NvHandle hEvent, + NvBool bMatchData, + NvP64 Data +) +{ + NV_STATUS rmStatus = NV_OK; + PEVENTNOTIFICATION pTargetEvent = NULL; + Subdevice *pSubDevice; + RsResourceRef *pResourceRef; + NvHandle hDevice; + NvU32 engineId; + OBJGPU *pGpu; + + rmStatus = _removeEventNotification(ppEventNotification, hEventClient, + hEvent, bMatchData, Data, &pTargetEvent); + + if (rmStatus != NV_OK) + goto error; + + if (pTargetEvent->bNonStallIntrEvent) + { + // + // For non-stall interrupt, the event parent type is NV20_SUBDEVICE, so we can locate + // the correct OBJGPU and attach to its per-engine non-stall event list. + // + if ((serverutilGetResourceRef(hEventClient, hNotifier, &pResourceRef) != NV_OK) || + (!dynamicCast(pResourceRef->pResource, Subdevice))) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto free_entry; + } + + pSubDevice = dynamicCast(pResourceRef->pResource, Subdevice); + hDevice = RES_GET_PARENT_HANDLE(pSubDevice); + + if (CliSetSubDeviceContext(hEventClient, RES_GET_HANDLE(pSubDevice), &hDevice, &pGpu) != NV_OK) + { + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto free_entry; + } + + rmStatus = eventGetEngineTypeFromSubNotifyIndex(pTargetEvent->NotifyIndex, &engineId); + + if (rmStatus != NV_OK) + goto free_entry; + + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + NvU32 globalEngineId = engineId; + MIG_INSTANCE_REF ref; + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrGetInstanceRefFromClient(pGpu, pKernelMIGManager, hEventClient, &ref), + free_entry); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + kmigmgrGetLocalToGlobalEngineType(pGpu, pKernelMIGManager, ref, engineId, &globalEngineId), + free_entry); + + engineId = globalEngineId; + } + + rmStatus = engineNonStallEventOp(pGpu, engineId, + pTargetEvent, NULL, NV_FALSE); + } + +free_entry: + portMemFree(pTargetEvent); + +error: + NV_ASSERT(rmStatus == NV_OK); + return rmStatus; +} + +static NV_STATUS _removeEventNotification +( + PEVENTNOTIFICATION *ppEventNotification, + NvHandle hEventClient, + NvHandle hEvent, + NvBool bMatchData, + NvP64 Data, + PEVENTNOTIFICATION *ppOldEvent +) +{ + PEVENTNOTIFICATION nextEvent, lastEvent; + NvBool found = NV_FALSE; + + // check for null list + nextEvent = NULL; + + if (*ppEventNotification != NULL) + { + // check for head of list + nextEvent = lastEvent = *ppEventNotification; + if ((nextEvent->hEventClient == hEventClient) && + (nextEvent->hEvent == hEvent) && + (!bMatchData || (nextEvent->Data == Data))) + { + *ppEventNotification = nextEvent->Next; + found = NV_TRUE; + } + else + { + // check for internal nodes + nextEvent = nextEvent->Next; + while (nextEvent) + { + if ((nextEvent->hEventClient == hEventClient) && + (nextEvent->hEvent == hEvent) && + (!bMatchData || (nextEvent->Data == Data))) + { + lastEvent->Next = nextEvent->Next; + found = NV_TRUE; + break; + } + lastEvent = nextEvent; + nextEvent = nextEvent->Next; + } + } + } + + // delete the event if it was found + if (found) + { + if (nextEvent->bUserOsEventHandle) + osDereferenceObjectCount(NvP64_VALUE(nextEvent->Data)); + + *ppOldEvent = nextEvent; + } + + return (found) ? NV_OK : NV_ERR_GENERIC; + +} // end of unregisterEventNotificationEventNotify() + +NV_STATUS notifyEvents +( + OBJGPU *pGpu, + PEVENTNOTIFICATION pEventNotification, + NvU32 Notifier, + NvU32 Method, + NvU32 Data, + NV_STATUS Status, + NvU32 Action +) +{ + NV_STATUS rmStatus = NV_OK; + PEVENTNOTIFICATION NotifyEvent; + + NV_PRINTF(LEVEL_INFO, " Method = 0x%x\n", Method); + NV_PRINTF(LEVEL_INFO, " Data = 0x%x\n", Data); + NV_PRINTF(LEVEL_INFO, " Status = 0x%x\n", Status); + NV_PRINTF(LEVEL_INFO, " Action = 0x%x\n", Action); + + // perform the type of action + switch (Action) + { + case NV_OS_WRITE_THEN_AWAKEN: + + // walk this object's event list and find any matches for this specific notify + for (NotifyEvent = pEventNotification; NotifyEvent; NotifyEvent = NotifyEvent->Next) + { + if (NotifyEvent->bSubdeviceSpecificEvent) + { + if (gpumgrGetSubDeviceInstanceFromGpu(pGpu) != NotifyEvent->SubdeviceSpecificValue) + { + continue; + } + } + + if (NotifyEvent->NotifyIndex == Notifier) + { + // Do any OS specified action related to this notification. + if (NotifyEvent->bBroadcastEvent) + { + // + // Only do the OS notify when all sub devices under + // a BC device have seen the event. + // + if (++NotifyEvent->NotifyTriggerCount == NumSubDevices(pGpu)) + { + rmStatus = osNotifyEvent(pGpu, NotifyEvent, Method, Data, Status); + NotifyEvent->NotifyTriggerCount = 0x0; + } + } + else + { + rmStatus = osNotifyEvent(pGpu, NotifyEvent, Method, Data, Status); + } + } + } + break; + + default: + // any other actions are legacy channel-based notifies + rmStatus = NV_ERR_INVALID_EVENT; + break; + } + + return rmStatus; +} + +// +// bindEventNotificationToSubdevice +// +// This routine walks the given EVENTNOTIFICATION list and sets +// the designated subdevice instance value for any that are associated +// with the specific NV01_EVENT handle hEvent. +// +NV_STATUS +bindEventNotificationToSubdevice +( + PEVENTNOTIFICATION pEventNotificationList, + NvHandle hEvent, + NvU32 subdeviceInst +) +{ + PEVENTNOTIFICATION pEventNotify; + NvU32 count = 0; + + if (pEventNotificationList == NULL) + return NV_ERR_INVALID_STATE; + + pEventNotify = pEventNotificationList; + while (pEventNotify) + { + if (pEventNotify->hEvent == hEvent) + { + pEventNotify->subdeviceInst = subdeviceInst; + count++; + } + pEventNotify = pEventNotify->Next; + } + + if (count == 0) + return NV_ERR_INVALID_STATE; + + return NV_OK; +} + +NV_STATUS +inotifyConstruct_IMPL(INotifier *pNotifier, CALL_CONTEXT *pCallContext) +{ + if (dynamicCast(pNotifier, RsResource) == NULL) + return NV_ERR_INVALID_OBJECT; + + return NV_OK; +} + +void inotifyDestruct_IMPL(INotifier* pNotifier) +{ + return; +} + +PEVENTNOTIFICATION +inotifyGetNotificationList_IMPL +( + INotifier *pNotifier +) +{ + PEVENTNOTIFICATION *ppEventNotifications = inotifyGetNotificationListPtr(pNotifier); + if (ppEventNotifications != NULL) + return *ppEventNotifications; + + return NULL; +} + +NV_STATUS +notifyConstruct_IMPL(Notifier *pNotifier, CALL_CONTEXT *pCallContext) +{ + return NV_OK; +} + +void notifyDestruct_IMPL(Notifier* pNotifier) +{ + NotifShare *pNotifierShare = inotifyGetNotificationShare(staticCast(pNotifier, INotifier)); + if (pNotifierShare != NULL) + { + pNotifierShare->pNotifier = NULL; + serverFreeShare(&g_resServ, staticCast(pNotifierShare, RsShared)); + } +} + +PEVENTNOTIFICATION +*notifyGetNotificationListPtr_IMPL +( + Notifier *pNotifier +) +{ + NotifShare *pNotifierShare = pNotifier->pNotifierShare; + if (pNotifierShare == NULL) + return NULL; + + return &pNotifierShare->pEventList; +} + +NotifShare +*notifyGetNotificationShare_IMPL +( + Notifier *pNotifier +) +{ + return pNotifier->pNotifierShare; +} + +void +notifySetNotificationShare_IMPL +( + Notifier *pNotifier, + NotifShare *pNotifierShare +) +{ + pNotifier->pNotifierShare = pNotifierShare; +} + +NV_STATUS +shrnotifConstruct_IMPL +( + NotifShare *pNotifShare +) +{ + return NV_OK; +} + +void +shrnotifDestruct_IMPL +( + NotifShare *pNotifShare +) +{ +} diff --git a/src/nvidia/src/kernel/rmapi/mapping.c b/src/nvidia/src/kernel/rmapi/mapping.c new file mode 100644 index 000000000..508a8bcdf --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/mapping.c @@ -0,0 +1,595 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/device/device.h" +#include "kernel/mem_mgr/virtual_mem.h" +#include "class/cl0000.h" // NV01_NULL_OBJECT + +#include "rmapi/rs_utils.h" + +#include "entry_points.h" +#include "gpu/gpu.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/mem_mgr/mem_mgr.h" + +static NvU32 +_getMappingPageSize +( + RsResourceRef *pMappableRef +) +{ + Memory *pMemory = dynamicCast(pMappableRef->pResource, Memory); + if (pMemory != NULL) + { + return memdescGetPageSize(pMemory->pMemDesc, AT_GPU); + } + return RM_PAGE_SIZE; +} + +NV_STATUS +serverInterMap_Prologue +( + RsServer *pServer, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pParams, + NvU32 *pReleaseFlags +) +{ + OBJGPU *pGpu; + Device *pDevice; + Subdevice *pSubdevice; + NV_STATUS rmStatus = NV_OK; + NvU64 offset = pParams->offset; + NvU64 length = pParams->length; + + MEMORY_DESCRIPTOR *pSrcMemDesc = NULL; + NvHandle hBroadcastDevice; + NvBool bSubdeviceHandleProvided; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RsResourceRef *pDeviceRef = pCallContext->pContextRef; + RS_INTER_MAP_PRIVATE *pPrivate = pParams->pPrivate; + + NV_ASSERT_OR_RETURN(pPrivate != NULL, NV_ERR_INVALID_ARGUMENT); + + // Get pGpu, assuming user passed in either a device or subdevice handle. + pDevice = dynamicCast(pDeviceRef->pResource, Device); + if (pDevice == NULL) + { + pSubdevice = dynamicCast(pDeviceRef->pResource, Subdevice); + if (pSubdevice == NULL) + return NV_ERR_INVALID_OBJECT; + + pGpu = GPU_RES_GET_GPU(pSubdevice); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + + hBroadcastDevice = RES_GET_HANDLE(pSubdevice->pDevice); + bSubdeviceHandleProvided = NV_TRUE; + pPrivate->gpuMask = NVBIT(gpuGetInstance(pGpu)); + } + else + { + pGpu = GPU_RES_GET_GPU(pDevice); + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + hBroadcastDevice = pParams->hDevice; + bSubdeviceHandleProvided = NV_FALSE; + pPrivate->gpuMask = gpumgrGetGpuMask(pGpu); + } + + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + + // For non-memory/dma objects, below call simply returns + if (memmgrIsPmaInitialized(pMemoryManager) && + memmgrAreClientPageTablesPmaManaged(pMemoryManager)) + { + VirtualMemory *pVirtualMemory; + + pVirtualMemory = dynamicCast(pMapperRef->pResource, VirtualMemory); + + if (pVirtualMemory != NULL) + { + NvU32 pageSize = RM_PAGE_SIZE; + + if (pVirtualMemory->bOptimizePageTableMempoolUsage) + { + pageSize = _getMappingPageSize(pMappableRef); + } + + NV_CHECK_OK_OR_RETURN(LEVEL_ERROR, + virtmemReserveMempool(pVirtualMemory, pGpu, hBroadcastDevice, + pParams->length, pageSize)); + } + } + + rmStatus = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, pReleaseFlags); + if (rmStatus != NV_OK) + return rmStatus; + + pPrivate->pGpu = pGpu; + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_TRUE, NV_FALSE); + + // Use virtual GetMemInterMapParams to get information needed for mapping from pMappableRef->pResource + RMRES_MEM_INTER_MAP_PARAMS memInterMapParams; + portMemSet(&memInterMapParams, 0, sizeof(memInterMapParams)); + + memInterMapParams.pGpu = pGpu; + memInterMapParams.pMemoryRef = pMappableRef; + memInterMapParams.bSubdeviceHandleProvided = bSubdeviceHandleProvided; + + rmStatus = rmresGetMemInterMapParams(dynamicCast(pMappableRef->pResource, RmResource), &memInterMapParams); + if (rmStatus != NV_OK) + return rmStatus; + + pSrcMemDesc = memInterMapParams.pSrcMemDesc; + NV_ASSERT_OR_RETURN(pSrcMemDesc != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + pPrivate->pSrcGpu = memInterMapParams.pSrcGpu; + pPrivate->hMemoryDevice = memInterMapParams.hMemoryDevice; + pPrivate->bDmaMapNeeded = memInterMapParams.bDmaMapNeeded; + pPrivate->bFlaMapping = memInterMapParams.bFlaMapping; + + // Check length for overflow and against the physical memory size. + if (((offset + length) < offset) || + ((offset + length) > pSrcMemDesc->Size)) + { + NV_PRINTF(LEVEL_ERROR, + "Mapping offset 0x%llX or length 0x%llX out of bounds!\n", + offset, length); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_LIMIT; + } + + if (memdescGetFlag(memdescGetMemDescFromGpu(pSrcMemDesc, pGpu), MEMDESC_FLAGS_DEVICE_READ_ONLY) && + !FLD_TEST_DRF(OS46, _FLAGS, _ACCESS, _READ_ONLY, pParams->flags)) + { + NV_PRINTF(LEVEL_ERROR, "Attempting to map READ_ONLY surface as READ_WRITE / WRITE_ONLY!\n"); + return NV_ERR_INVALID_ARGUMENT; + } + + pPrivate->hBroadcastDevice = hBroadcastDevice; + pPrivate->pSrcMemDesc = pSrcMemDesc; + pPrivate->bSubdeviceHandleProvided = bSubdeviceHandleProvided; + + return NV_OK; +} + +void +serverInterMap_Epilogue +( + RsServer *pServer, + RS_INTER_MAP_PARAMS *pParams, + NvU32 *pReleaseFlags +) +{ + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, pReleaseFlags); +} + +NV_STATUS +serverInterUnmap_Prologue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + OBJGPU *pGpu = NULL; + Device *pDevice = NULL; + Subdevice *pSubdevice = NULL; + + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RsResourceRef *pDeviceRef = pCallContext->pContextRef; + + RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate; + + // Alloc pPrivate if not set, Unmap does not require any input into Prologue + if (pPrivate == NULL) + { + pPrivate = portMemAllocNonPaged(sizeof(*pPrivate)); + if (pPrivate == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pPrivate, 0, sizeof(*pPrivate)); + pParams->pPrivate = pPrivate; + pPrivate->bAllocated = NV_TRUE; + } + + // Set subdevice or device context. + pDevice = dynamicCast(pDeviceRef->pResource, Device); + if (pDevice == NULL) + { + pSubdevice = dynamicCast(pDeviceRef->pResource, Subdevice); + if (pSubdevice == NULL) + return NV_ERR_INVALID_OBJECT; + + pGpu = GPU_RES_GET_GPU(pSubdevice); + pPrivate->bcState = gpumgrGetBcEnabledStatus(pGpu); + GPU_RES_SET_THREAD_BC_STATE(pSubdevice); + pPrivate->hBroadcastDevice = RES_GET_HANDLE(pSubdevice->pDevice); + pPrivate->bSubdeviceHandleProvided = NV_TRUE; + pPrivate->gpuMask = NVBIT(gpuGetInstance(pGpu)); + } + else + { + pGpu = GPU_RES_GET_GPU(pDevice); + pPrivate->bcState = gpumgrGetBcEnabledStatus(pGpu); + GPU_RES_SET_THREAD_BC_STATE(pDevice); + pPrivate->hBroadcastDevice = RES_GET_HANDLE(pDevice); + pPrivate->bSubdeviceHandleProvided = NV_FALSE; + pPrivate->gpuMask = gpumgrGetGpuMask(pGpu); + } + + pPrivate->pGpu = pGpu; + + API_GPU_FULL_POWER_SANITY_CHECK(pGpu, NV_FALSE, NV_FALSE); + + return NV_OK; +} + +void +serverInterUnmap_Epilogue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + RS_INTER_UNMAP_PRIVATE *pPrivate = pParams->pPrivate; + OBJGPU *pGpu; + + if (pPrivate == NULL) + return; + + pGpu = pPrivate->pGpu; + + if (pGpu != NULL) + { + gpumgrSetBcEnabledStatus(pGpu, pPrivate->bcState); + } + + if (pPrivate->bAllocated) + { + portMemFree(pPrivate); + pParams->pPrivate = NULL; + } +} + +static NV_STATUS +_rmapiRmUnmapMemoryDma +( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset, + RS_LOCK_INFO *pLockInfo, + API_SECURITY_INFO *pSecInfo +) +{ + RsClient *pRsClient = NULL; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + Memory *pMemory = NULL; + + RS_INTER_UNMAP_PARAMS params; + RS_INTER_UNMAP_PRIVATE private; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + + // Translate hMemory to pMemDesc + if (memGetByHandle(pRsClient, hMemory, &pMemory) == NV_OK) + { + pMemDesc = pMemory->pMemDesc; + } + + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = hClient; + params.hMapper = hMemCtx; + params.hDevice = hDevice; + params.hMappable = hMemory; + params.flags = flags; + params.dmaOffset = dmaOffset; + params.pMemDesc = pMemDesc; + params.pLockInfo = pLockInfo; + params.pSecInfo = pSecInfo; + + portMemSet(&private, 0, sizeof(private)); + params.pPrivate = &private; + + return serverInterUnmap(&g_resServ, ¶ms); +} + +NV_STATUS +rmapiMap +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->MapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, offset, + length, flags, pDmaOffset, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiMapWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_INTER_MAP_PARAMS params; + RS_INTER_MAP_PRIVATE private; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04Map: client:0x%x device:0x%x context:0x%x memory:0x%x flags:0x%x\n", + hClient, hDevice, hMemCtx, hMemory, flags); + NV_PRINTF(LEVEL_INFO, + "Nv04Map: offset:0x%llx length:0x%llx dmaOffset:0x%08llx\n", + offset, length, *pDmaOffset); + + NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Nv04Map 0x%x\n", flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + lockInfo.flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK; + + LOCK_METER_DATA(MAPMEM_DMA, flags, 0, 0); + + + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = hClient; + params.hMapper = hMemCtx; + params.hDevice = hDevice; + params.hMappable = hMemory; + params.offset = offset; + params.length = length; + params.flags = flags; + params.dmaOffset = *pDmaOffset; + params.pLockInfo = &lockInfo; + params.pSecInfo = pSecInfo; + + portMemSet(&private, 0, sizeof(private)); + params.pPrivate = &private; + + // map DMA memory + status = serverInterMap(&g_resServ, ¶ms); + + *pDmaOffset = params.dmaOffset; + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04Map: map complete\n"); + NV_PRINTF(LEVEL_INFO, "Nv04Map: dmaOffset: 0x%08llx\n", *pDmaOffset); + } + else + { + NV_PRINTF(LEVEL_ERROR, "Nv04Map: map failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiMapWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvU32 flags, + NvU64 *pDmaOffset, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiMapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, offset, + length, flags, pDmaOffset, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiUnmap +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->UnmapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, + flags, dmaOffset, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiUnmapWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04Unmap: client:0x%x device:0x%x context:0x%x memory:0x%x\n", + hClient, hDevice, hMemCtx, hMemory); + NV_PRINTF(LEVEL_INFO, "Nv04Unmap: flags:0x%x dmaOffset:0x%08llx\n", + flags, dmaOffset); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + lockInfo.flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK | + RM_LOCK_FLAGS_NO_GPUS_LOCK; + + LOCK_METER_DATA(UNMAPMEM_DMA, flags, 0, 0); + + // Unmap DMA memory + status = _rmapiRmUnmapMemoryDma(hClient, hDevice, hMemCtx, hMemory, flags, + dmaOffset, &lockInfo, pSecInfo); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04Unmap: Unmap complete\n"); + } + else + { + NV_PRINTF(LEVEL_ERROR, + "Nv04Unmap: ummap failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiUnmapWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvHandle hMemory, + NvU32 flags, + NvU64 dmaOffset, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiUnmapWithSecInfo(pRmApi, hClient, hDevice, hMemCtx, hMemory, flags, dmaOffset, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +serverInterMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverInterUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverUpdateLockFlagsForInterAutoUnmap +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + pParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK | + RM_LOCK_FLAGS_GPU_GROUP_LOCK; + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/rmapi/mapping_cpu.c b/src/nvidia/src/kernel/rmapi/mapping_cpu.c new file mode 100644 index 000000000..35fefa3fe --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/mapping_cpu.c @@ -0,0 +1,1338 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "core/locks.h" +#include "core/thread_state.h" +#include "os/os.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "gpu/device/device.h" +#include "gpu/subdevice/generic_engine.h" +#include "gpu/subdevice/subdevice.h" +#include "gpu/mem_mgr/mem_mgr.h" +#include "mem_mgr/fla_mem.h" + +#include "class/cl0000.h" // NV01_NULL_OBJECT + +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" + +#include "class/cl0071.h" // NV01_MEMORY_SYSTEM_OS_DESCRIPTOR +#include "gpu/mem_sys/kern_mem_sys.h" +#include "gpu/bus/kern_bus.h" + +#include "rmapi/rs_utils.h" +#include "rmapi/mapping_list.h" +#include "entry_points.h" + +static void RmUnmapBusAperture (OBJGPU *, NvP64, NvU64, NvBool, NvP64); + +typedef struct RS_CPU_MAP_PARAMS RmMapParams; +typedef struct RS_CPU_UNMAP_PARAMS RmUnmapParams; + +NV_STATUS +rmapiMapGpuCommon +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping, + OBJGPU *pGpu, + NvU32 regionOffset, + NvU32 regionSize +) +{ + NV_STATUS rmStatus; + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + NvU64 offset; + + // Validate the offset and limit passed in. + if (pCpuMapping->offset >= regionSize) + return NV_ERR_INVALID_BASE; + if (pCpuMapping->length == 0) + return NV_ERR_INVALID_LIMIT; + if ((pCpuMapping->offset + pCpuMapping->length > regionSize) || + !portSafeAddU64(pCpuMapping->offset, pCpuMapping->length, &offset)) + return NV_ERR_INVALID_LIMIT; + + if (!portSafeAddU64((NvU64)regionOffset, pCpuMapping->offset, &offset)) + return NV_ERR_INVALID_OFFSET; + + // Create a mapping of BAR0 + rmStatus = osMapGPU(pGpu, + rmclientGetCachedPrivilege(pClient), + offset, + pCpuMapping->length, + pCpuMapping->pPrivate->protect, + &pCpuMapping->pLinearAddress, + &pCpuMapping->pPrivate->pPriv); + return rmStatus; +} + + + +NV_STATUS +rmapiGetEffectiveAddrSpace +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 mapFlags, + NV_ADDRESS_SPACE *pAddrSpace +) +{ + NV_ADDRESS_SPACE addrSpace; + NvBool bDirectSysMappingAllowed = NV_TRUE; + + KernelBus *pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + + NV_ASSERT_OK_OR_RETURN( + kbusIsDirectMappingAllowed_HAL(pGpu, pKernelBus, pMemDesc, mapFlags, + &bDirectSysMappingAllowed)); + + // + // Bug 1482818: Deprecate reflected mappings in production code. + // The usage of reflected writes, in addition to causing several deadlock + // scenarios involving P2P transfers, are disallowed on NVLINK (along with + // reflected reads), and should no longer be used. + // The below PDB property should be unset once the remaining usages in MODS + // have been culled. (Bug 1780557) + // + if ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && + !bDirectSysMappingAllowed && + (DRF_VAL(OS33, _FLAGS, _MAPPING, mapFlags) != NVOS33_FLAGS_MAPPING_DIRECT) && + !kbusIsReflectedMappingAccessAllowed(pKernelBus)) + { + NV_ASSERT(0); + return NV_ERR_NOT_SUPPORTED; + } + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1)) + { + addrSpace = ADDR_FBMEM; + } + else if ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && + (bDirectSysMappingAllowed || FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, mapFlags) || + (IS_VIRTUAL_WITH_SRIOV(pGpu) && !IS_FMODEL(pGpu) && !IS_RTLSIM(pGpu)))) + { + addrSpace = ADDR_SYSMEM; + } + else if ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) || + ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && !bDirectSysMappingAllowed)) + { + addrSpace = ADDR_FBMEM; + } + else + { + addrSpace = memdescGetAddressSpace(pMemDesc); + } + + if (pAddrSpace) + *pAddrSpace = addrSpace; + + return NV_OK; +} + +// +// Map memory entry points. +// +NV_STATUS +memMap_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pMapParams, + RsCpuMapping *pCpuMapping +) +{ + OBJGPU *pGpu = NULL; + KernelBus *pKernelBus = NULL; + MemoryManager *pMemoryManager = NULL; + KernelMemorySystem *pKernelMemorySystem = NULL; + RsClient *pRsClient; + RmClient *pRmClient; + RsResourceRef *pContextRef; + RsResourceRef *pMemoryRef; + Memory *pMemoryInfo; // TODO: rename this field. pMemoryInfo is the legacy name. + // Name should be clear on how pMemoryInfo different from pMemory + MEMORY_DESCRIPTOR *pMemDesc; + NvP64 priv = NvP64_NULL; + NV_STATUS rmStatus = NV_OK; + NV_ADDRESS_SPACE effectiveAddrSpace; + NvBool bBroadcast; + NvU64 mapLimit; + NvBool bIsSysmem = NV_FALSE; + + NV_ASSERT_OR_RETURN(RMCFG_FEATURE_KERNEL_RM, NV_ERR_NOT_SUPPORTED); + + NV_ASSERT_OR_RETURN(pMapParams->pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + pContextRef = pMapParams->pLockInfo->pContextRef; + if (pContextRef != NULL) + { + NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pContextRef, &bBroadcast, &pGpu)); + gpuSetThreadBcState(pGpu, bBroadcast); + + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + } + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, pMapParams->hClient, &pRsClient)); + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, pMapParams->hMemory, &pMemoryRef)); + + pMemoryInfo = dynamicCast(pMemoryRef->pResource, Memory); + NV_ASSERT_OR_RETURN(pMemoryInfo != NULL, NV_ERR_NOT_SUPPORTED); + pMemDesc = pMemoryInfo->pMemDesc; + + if ((pMemoryInfo->categoryClassId == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) && + !(memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM && + RMCFG_FEATURE_PLATFORM_MODS)) + { + return NV_ERR_NOT_SUPPORTED; + } + + // + // PROTECTED memory is memory which is hidden from the CPU and used for + // storing protected content. The CPU is not allowed to read it, but is + // allowed to write it in order to initialize memory allocated within the + // PROTECTED region. + // + // CPU to directly access protected memory is allowed on MODS + // + if ((pMemoryInfo->Flags & NVOS32_ALLOC_FLAGS_PROTECTED) && + (pMapParams->protect != NV_PROTECT_WRITEABLE) && + ! RMCFG_FEATURE_PLATFORM_MODS) + { + return NV_ERR_NOT_SUPPORTED; + } + + if (!pMapParams->bKernel && + FLD_TEST_DRF(OS32, _ATTR2, _PROTECTION_USER, _READ_ONLY, pMemoryInfo->Attr2) && + (pMapParams->protect != NV_PROTECT_READABLE)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Validate the offset and limit passed in. + if (pMapParams->offset >= pMemoryInfo->Length) + { + return NV_ERR_INVALID_BASE; + } + if (pMapParams->length == 0) + { + return NV_ERR_INVALID_LIMIT; + } + + // + // See bug #140807 and #150889 - we need to pad memory mappings to past their + // actual allocation size (to PAGE_SIZE+1) because of a buggy ms function so + // skip the allocation size sanity check so the map operation still succeeds. + // + if ((DRF_VAL(OS33, _FLAGS, _SKIP_SIZE_CHECK, pMapParams->flags) == NVOS33_FLAGS_SKIP_SIZE_CHECK_DISABLE) && + (!portSafeAddU64(pMapParams->offset, pMapParams->length, &mapLimit) || + (mapLimit > pMemoryInfo->Length))) + { + return NV_ERR_INVALID_LIMIT; + } + + if (pGpu != NULL) + { + NV_ASSERT_OK_OR_RETURN(rmapiGetEffectiveAddrSpace(pGpu, memdescGetMemDescFromGpu(pMemDesc, pGpu), pMapParams->flags, &effectiveAddrSpace)); + } + else + { + effectiveAddrSpace = ADDR_SYSMEM; + } + + bIsSysmem = (effectiveAddrSpace == ADDR_SYSMEM); + + if (dynamicCast(pMemoryInfo, FlaMemory) != NULL) + { + NV_PRINTF(LEVEL_WARNING, "CPU mapping to FLA memory not allowed\n"); + return NV_ERR_NOT_SUPPORTED; + } + + // + // NVLINK2 ATS: Coherent NVLINK mappings may be returned if the client + // doesn't specifically request PCI-E and if the surface is pitch. + // + if ((pGpu != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) && + (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM)) + { + NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)); + if ((memdescGetPteKind(pMemDesc) == + memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, RM_DEFAULT_PTE_KIND)) && // pitch + (!memdescGetFlag(memdescGetMemDescFromGpu(pMemDesc, pGpu), MEMDESC_FLAGS_ENCRYPTED))) + { + if (pMapParams->bKernel) + { + if (pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + NvP64 tempCpuPtr = kbusMapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc); + if (tempCpuPtr == NULL) + { + rmStatus = NV_ERR_GENERIC; + } + else + { + rmStatus = NV_OK; + tempCpuPtr = NvP64_PLUS_OFFSET(tempCpuPtr, pMapParams->offset); + } + *pMapParams->ppCpuVirtAddr = tempCpuPtr; + + if (rmStatus != NV_OK) + return rmStatus; + } + else + { + rmStatus = osMapSystemMemory(pMemDesc, + pMapParams->offset, + pMapParams->length, + pMapParams->bKernel, + pMapParams->protect, + pMapParams->ppCpuVirtAddr, + &priv); + if (rmStatus != NV_OK) + return rmStatus; + } + } + else + { + NV_ASSERT(DRF_VAL(OS33, _FLAGS, _BUS, pMapParams->flags) == NVOS33_FLAGS_BUS_NVLINK_COHERENT); + + // + // Allocating mapping for user mode client + // NOTE: This function intentionally leaves priv uninitialized. + // It simply copies the busAddress [argument 2] into ppCpuVirtAddr. + // During the FD mapping cleanup for bug 1784955, it is expected that + // this function will transition to storing the mapping parameters onto + // the FD. Also note: All mapping parameters are ignored (!). + // + // For now, we're going to return the first page of the nvlink aperture + // mapping of this allocation. See nvidia_mmap_helper for establishment + // of direct mapping. + // + + rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo, + ((NvUPtr)pKernelMemorySystem->coherentCpuFbBase + + (NvUPtr)memdescGetPhysAddr(pMemDesc, + AT_CPU, pMapParams->offset)), + pMapParams->length, + pMapParams->protect, + pMapParams->ppCpuVirtAddr, + &priv, + NV_MEMORY_UNCACHED); + if (rmStatus != NV_OK) + return rmStatus; + } + + NV_PRINTF(LEVEL_INFO, + "NVLINK mapping allocated: AtsBase=0x%llx, _pteArray[0]=0x%llx, mappedCpuAddr=0x%llx, length=%d\n", + (NvU64)pKernelMemorySystem->coherentCpuFbBase, + (NvU64)((NvUPtr)pMemDesc->_pteArray[0]), + (*((NvU64 *)(pMapParams->ppCpuVirtAddr))), + (int)pMapParams->length); + + rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping, + pMapParams->bKernel, + priv, + *(pMapParams->ppCpuVirtAddr), + pMapParams->length, + -1, + -1, + pMapParams->flags); + pCpuMapping->pPrivate->pGpu = pGpu; + + if (rmStatus != NV_OK) + return rmStatus; + + } + else + { + // + // RM should fail gracefully when clients map FB in the Coherent link path with special KIND. + // There is no GMMU in the Coherent link path, only regular KIND(GMK) is supported and other special + // KIND(s) (like encrypted, compressed etc.) are not supported. + // + NV_PRINTF(LEVEL_ERROR, "Need BAR mapping on coherent link! FAIL!!\n"); + return NV_ERR_NOT_SUPPORTED; + } + } + else if (effectiveAddrSpace == ADDR_FBMEM) + { + RmPhysAddr fbAddr = 0; + NvBool bcState = NV_FALSE; + NvU64 gpuVirtAddr = 0; + NvU64 gpuMapLength = 0; + + // + // MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1 indicates a special mapping type of HW registers, + // so map it as device memory (uncached). + // + NvU32 cachingType = NV_MEMORY_WRITECOMBINED; + if (pMemDesc != NULL && !memdescHasSubDeviceMemDescs(pMemDesc)) + { + cachingType = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_MAP_SYSCOH_OVER_BAR1) ? + NV_MEMORY_UNCACHED : NV_MEMORY_WRITECOMBINED; + } + + if (!kbusIsBar1PhysicalModeEnabled(pKernelBus)) + { + // + // For Multi-Board, the BC area has a NULL address range. So we have + // to bring in the master. + // + bcState = gpumgrGetBcEnabledStatus(pGpu); + if (bcState) + { + pGpu = gpumgrGetParentGPU(pGpu); + gpumgrSetBcEnabledStatus(pGpu, NV_FALSE); + } + + // + // Allocate a GPU virtual address space for the video memory region + // for those GPUs that support it. + // + + gpuMapLength = pMapParams->length; + + // + // If client ask for Direct mapping , we cannot do much here but just + // simulate as it is non encrypted surface. + // It is currently totaly for testing purpose. + // + NV_ASSERT(pGpu->busInfo.gpuPhysFbAddr); + + { + // Below, we only map one GPU's address for CPU access, so we can use UNICAST here + NvU32 busMapFbFlags = BUS_MAP_FB_FLAGS_MAP_UNICAST; + if(DRF_VAL(OS33, _FLAGS, _MAPPING, pMapParams->flags) == NVOS33_FLAGS_MAPPING_DIRECT) + { + busMapFbFlags |= BUS_MAP_FB_FLAGS_DISABLE_ENCRYPTION; + } + switch (pMapParams->protect) + { + case NV_PROTECT_READABLE: + busMapFbFlags |= BUS_MAP_FB_FLAGS_READ_ONLY; + break; + case NV_PROTECT_WRITEABLE: + busMapFbFlags |= BUS_MAP_FB_FLAGS_WRITE_ONLY; + break; + } + + pMemDesc = memdescGetMemDescFromGpu(pMemDesc, pGpu); + + rmStatus = kbusMapFbAperture_HAL(pGpu, pKernelBus, + pMemDesc, pMapParams->offset, + &gpuVirtAddr, &gpuMapLength, + busMapFbFlags, pMapParams->hClient); + } + + if (rmStatus != NV_OK) + goto _rmMapMemory_busFail; + } + else + { + NV_ASSERT_OR_RETURN(memdescGetContiguity(pMemDesc, AT_GPU), + NV_ERR_NOT_SUPPORTED); + + fbAddr = gpumgrGetGpuPhysFbAddr(pGpu) + memdescGetPte(pMemDesc, AT_GPU, 0) + + memdescGetPteAdjust(pMemDesc) + pMapParams->offset; + } + + if (pMapParams->bKernel) + { + rmStatus = osMapPciMemoryKernel64(pGpu, + (kbusIsBar1PhysicalModeEnabled(pKernelBus)? + fbAddr: gpumgrGetGpuPhysFbAddr(pGpu) + gpuVirtAddr), + pMapParams->length, + pMapParams->protect, + pMapParams->ppCpuVirtAddr, + cachingType); + } + else + { + rmStatus = osMapPciMemoryUser(pGpu->pOsGpuInfo, + (kbusIsBar1PhysicalModeEnabled(pKernelBus)? + fbAddr: gpumgrGetGpuPhysFbAddr(pGpu) + gpuVirtAddr), + pMapParams->length, + pMapParams->protect, + pMapParams->ppCpuVirtAddr, + &priv, + cachingType); + } + + // + // It's possible that NVOS33_FLAGS_MAPPING is set to NVOS33_FLAGS_MAPPING_DIRECT + // at this point--set it to REFLECTED to indicate that we aren't using + // direct mapping. + // + pMapParams->flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _REFLECTED, pMapParams->flags); + + if (rmStatus != NV_OK) + goto _rmMapMemory_pciFail; + + rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping, + pMapParams->bKernel, + priv, + *(pMapParams->ppCpuVirtAddr), + pMapParams->length, + kbusIsBar1PhysicalModeEnabled(pKernelBus) + ? (NvU64)-1 + : gpuVirtAddr, + kbusIsBar1PhysicalModeEnabled(pKernelBus) + ? (NvU64)-1 + : gpuMapLength, + pMapParams->flags); + pCpuMapping->pPrivate->pGpu = pGpu; + + if (rmStatus != NV_OK) + { + RmUnmapBusAperture(pGpu, + *(pMapParams->ppCpuVirtAddr), + pMapParams->length, + pMapParams->bKernel, + priv); + _rmMapMemory_pciFail: + if (!kbusIsBar1PhysicalModeEnabled(pKernelBus)) + { + kbusUnmapFbAperture_HAL(pGpu, + pKernelBus, + pMemDesc, + gpuVirtAddr, + gpuMapLength, + BUS_MAP_FB_FLAGS_MAP_UNICAST); + _rmMapMemory_busFail: + gpumgrSetBcEnabledStatus(pGpu, bcState); + } + } + } + else + if (bIsSysmem) + { + // A client can specify not to map memory by default when + // calling into RmAllocMemory. In those cases, we don't have + // a mapping yet, so go ahead and map it for the client now. + rmStatus = memdescMap(pMemDesc, + pMapParams->offset, + pMapParams->length, + pMapParams->bKernel, + pMapParams->protect, + pMapParams->ppCpuVirtAddr, + &priv); + + // Associate this mapping with the client + if (rmStatus == NV_OK && *(pMapParams->ppCpuVirtAddr)) + { + pMapParams->flags = FLD_SET_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags); + rmStatus = CliUpdateMemoryMappingInfo(pCpuMapping, + pMapParams->bKernel, + *(pMapParams->ppCpuVirtAddr), + priv, + pMapParams->length, + pMapParams->flags); + pCpuMapping->pPrivate->pGpu = pGpu; + } + } + else if (effectiveAddrSpace == ADDR_VIRTUAL) + { + rmStatus = NV_ERR_NOT_SUPPORTED; + } + else if (effectiveAddrSpace == ADDR_REGMEM) + { + RS_PRIV_LEVEL privLevel; + + pRmClient = dynamicCast(pRsClient, RmClient); + if (pRmClient == NULL) + return NV_ERR_OPERATING_SYSTEM; + + privLevel = rmclientGetCachedPrivilege(pRmClient); + if (!rmclientIsAdmin(pRmClient, privLevel) && !memdescGetFlag(pMemDesc, MEMDESC_FLAGS_SKIP_REGMEM_PRIV_CHECK)) + return NV_ERR_PROTECTION_FAULT; + + if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pMapParams->flags) == NVOS33_FLAGS_MEM_SPACE_USER) + { + privLevel = RS_PRIV_LEVEL_USER; + } + + // Create a mapping of BAR0 + rmStatus = osMapGPU(pGpu, + privLevel, + pMapParams->offset + pMemDesc-> _pteArray[0], + pMapParams->length, + pMapParams->protect, + pMapParams->ppCpuVirtAddr, + &priv); + if (rmStatus != NV_OK) + return rmStatus; + + // Save off the mapping + rmStatus = CliUpdateDeviceMemoryMapping(pCpuMapping, + pMapParams->bKernel, + priv, + *(pMapParams->ppCpuVirtAddr), + pMapParams->length, + -1, // gpu virtual addr + -1, // gpu map length + pMapParams->flags); + pCpuMapping->pPrivate->pGpu = pGpu; + + if (rmStatus != NV_OK) + { + osUnmapGPU(pGpu->pOsGpuInfo, + privLevel, + *(pMapParams->ppCpuVirtAddr), + pMapParams->length, + priv); + return rmStatus; + } + } + else + { + return NV_ERR_INVALID_CLASS; + } + + if (rmStatus == NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "%s created. CPU Virtual Address: " NvP64_fmt "\n", + FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pMapParams->flags) ? "Direct mapping" : "Mapping", + *(pMapParams->ppCpuVirtAddr)); + } + + return rmStatus; +} + +NV_STATUS +memUnmap_IMPL +( + Memory *pMemory, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + RmClient *pClient = dynamicCast(pCallContext->pClient, RmClient); + OBJGPU *pGpu = pCpuMapping->pPrivate->pGpu; + MEMORY_DESCRIPTOR *pMemDesc = pMemory->pMemDesc; + + KernelBus *pKernelBus = NULL; + MemoryManager *pMemoryManager = NULL; + + if (pGpu != NULL) + { + pKernelBus = GPU_GET_KERNEL_BUS(pGpu); + pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + } + + if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, pCpuMapping->flags)) + { + // Nothing more to do + } + else if ((pGpu != NULL) && pGpu->getProperty(pGpu, PDB_PROP_GPU_COHERENT_CPU_MAPPING) && + (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM)) + { + NV_ASSERT(pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)); + NV_ASSERT((memdescGetPteKind(pMemDesc) == + memmgrGetHwPteKindFromSwPteKind_HAL(pGpu, pMemoryManager, RM_DEFAULT_PTE_KIND)) && // pitch + (!memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED))); + + if (pCpuMapping->pPrivate->bKernel) + { + if(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS) + { + NV_ASSERT(pMemDesc->_flags & MEMDESC_FLAGS_PHYSICALLY_CONTIGUOUS); + kbusUnmapCoherentCpuMapping_HAL(pGpu, pKernelBus, pMemDesc); + } + else + { + osUnmapSystemMemory(pMemDesc, + pCpuMapping->pPrivate->bKernel, + pCpuMapping->processId, + pCpuMapping->pLinearAddress, + pCpuMapping->pPrivate->pPriv); + } + } + + NV_PRINTF(LEVEL_INFO, + "Unmapping from NVLINK handle = 0x%x, addr= 0x%llx\n", + RES_GET_HANDLE(pMemory), (NvU64)pCpuMapping->pLinearAddress); + + // + // No BAR aperture mapping to delete. + // No kernel mapping to remove + // User-space will call munmap to eliminate PTE mappings + // + } + // System Memory case + else if ((pGpu == NULL) || ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && + FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags))) + { + if (FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _DIRECT, pCpuMapping->flags)) + { + memdescUnmap(pMemDesc, + pCpuMapping->pPrivate->bKernel, + pCpuMapping->processId, + pCpuMapping->pLinearAddress, + pCpuMapping->pPrivate->pPriv); + } + } + else if ((memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) || + ((memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM) && + FLD_TEST_DRF(OS33, _FLAGS, _MAPPING, _REFLECTED, pCpuMapping->flags))) + { + RmUnmapBusAperture(pGpu, + pCpuMapping->pLinearAddress, + pCpuMapping->length, + pCpuMapping->pPrivate->bKernel, + pCpuMapping->pPrivate->pPriv); + + if (!kbusIsBar1PhysicalModeEnabled(pKernelBus)) + { + { + kbusUnmapFbAperture_HAL(pGpu, pKernelBus, + pMemory->pMemDesc, + pCpuMapping->pPrivate->gpuAddress, + pCpuMapping->pPrivate->gpuMapLength, + BUS_MAP_FB_FLAGS_MAP_UNICAST); + } + } + } + else if (memdescGetAddressSpace(pMemDesc) == ADDR_VIRTUAL) + { + // If the memory is tiled, then it's being mapped through BAR1 + if( DRF_VAL(OS32, _ATTR, _TILED, pMemory->Attr) ) + { + // BAR1 mapping. Unmap it. + if (pCpuMapping->pPrivate->bKernel) + { + osUnmapPciMemoryKernel64(pGpu, pCpuMapping->pLinearAddress); + } + else + { + osUnmapPciMemoryUser(pGpu->pOsGpuInfo, + pCpuMapping->pLinearAddress, + pCpuMapping->length, + pCpuMapping->pPrivate->pPriv); + } + } + else + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_STATE); + } + } + else if (memdescGetAddressSpace(pMemDesc) == ADDR_REGMEM) + { + osUnmapGPU(pGpu->pOsGpuInfo, + rmclientGetCachedPrivilege(pClient), + pCpuMapping->pLinearAddress, + pCpuMapping->length, + pCpuMapping->pPrivate->pPriv); + } + return NV_OK; +} + +NV_STATUS +rmapiValidateKernelMapping +( + RS_PRIV_LEVEL privLevel, + NvU32 flags, + NvBool *pbKernel +) +{ + NvBool bKernel; + NV_STATUS status = NV_OK; + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + // only kernel clients should be specifying the user mapping flags + if (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_USER) + status = NV_ERR_INVALID_FLAGS; + bKernel = NV_FALSE; + } + else + { + // + // Kernel clients can only use the persistent flag if they are + // doing a user mapping. + // + bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT); + } + + // OS descriptor will already be mapped + if (FLD_TEST_DRF(OS33, _FLAGS, _OS_DESCRIPTOR, _ENABLE, flags)) + status = NV_ERR_INVALID_FLAGS; + + if (pbKernel != NULL) + *pbKernel = bKernel; + + return status; +} + +NV_STATUS +serverMap_Prologue +( + RsServer *pServer, RS_CPU_MAP_PARAMS *pMapParams +) +{ + NV_STATUS rmStatus; + RsClient *pRsClient; + RmClient *pRmClient; + RsResourceRef *pMemoryRef; + NvHandle hClient = pMapParams->hClient; + NvHandle hParent = hClient; + NvHandle hSubDevice = NV01_NULL_OBJECT; + NvBool bClientAlloc = (hClient == pMapParams->hDevice); + NvU32 flags = pMapParams->flags; + RS_PRIV_LEVEL privLevel; + + // Persistent sysmem mapping support is no longer supported + if (DRF_VAL(OS33, _FLAGS, _PERSISTENT, flags) == NVOS33_FLAGS_PERSISTENT_ENABLE) + return NV_ERR_INVALID_FLAGS; + + // Populate Resource Server information + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + + // Validate hClient + pRmClient = dynamicCast(pRsClient, RmClient); + if (pRmClient == NULL) + return NV_ERR_OPERATING_SYSTEM; + privLevel = rmclientGetCachedPrivilege(pRmClient); + + // RS-TODO: Assert if this fails after all objects are converted + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, pMapParams->hMemory, &pMemoryRef)); + + if (pMemoryRef->pParentRef != NULL) + hParent = pMemoryRef->pParentRef->hResource; + + // check if we have a user or kernel RM client + rmStatus = rmapiValidateKernelMapping(privLevel, flags, &pMapParams->bKernel); + if (rmStatus != NV_OK) + return rmStatus; + + // + // First check to see if it is a standard device or the BC region of + // a MC adapter. + // + pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + if (!bClientAlloc) + { + NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT); + + RsResourceRef *pContextRef; + rmStatus = clientGetResourceRef(pRsClient, pMapParams->hDevice, &pContextRef); + if (rmStatus != NV_OK) + return rmStatus; + + if (pContextRef->internalClassId == classId(Device)) + { + } + else if (pContextRef->internalClassId == classId(Subdevice)) + { + hSubDevice = pMapParams->hDevice; + pMapParams->hDevice = pContextRef->pParentRef->hResource; + } + else + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + pMapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + pMapParams->pLockInfo->pContextRef = pContextRef; + } + else + { + NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT); + } + + pMapParams->hContext = (hSubDevice != NV01_NULL_OBJECT) + ? hSubDevice + : pMapParams->hDevice; + + + // convert from OS33 flags to RM's memory protection flags + switch (DRF_VAL(OS33, _FLAGS, _ACCESS, flags)) + { + case NVOS33_FLAGS_ACCESS_READ_WRITE: + pMapParams->protect = NV_PROTECT_READ_WRITE; + break; + case NVOS33_FLAGS_ACCESS_READ_ONLY: + pMapParams->protect = NV_PROTECT_READABLE; + break; + case NVOS33_FLAGS_ACCESS_WRITE_ONLY: + pMapParams->protect = NV_PROTECT_WRITEABLE; + break; + default: + return NV_ERR_INVALID_FLAGS; + } + + return NV_OK; +} + +NV_STATUS +serverUnmap_Prologue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ + OBJGPU *pGpu = NULL; + NV_STATUS rmStatus; + RsClient *pRsClient; + RmClient *pRmClient; + RsResourceRef *pMemoryRef; + NvHandle hClient = pUnmapParams->hClient; + NvHandle hParent = hClient; + NvHandle hMemory = pUnmapParams->hMemory; + NvBool bClientAlloc = (pUnmapParams->hDevice == pUnmapParams->hClient); + NvBool bKernel; + NvBool bBroadcast; + NvU32 ProcessId = pUnmapParams->processId; + RS_PRIV_LEVEL privLevel; + void *pProcessHandle = NULL; + + // Populate Resource Server information + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, hClient, &pRsClient)); + + // check if we have a user or kernel RM client + pRmClient = dynamicCast(pRsClient, RmClient); + if (pRmClient == NULL) + return NV_ERR_OPERATING_SYSTEM; + privLevel = rmclientGetCachedPrivilege(pRmClient); + + // RS-TODO: Assert if this fails after all objects are converted + NV_ASSERT_OK_OR_RETURN(clientGetResourceRef(pRsClient, hMemory, &pMemoryRef)); + + if (pMemoryRef->pParentRef != NULL) + hParent = pMemoryRef->pParentRef->hResource; + + // + // First check to see if it is a standard device or the BC region of + // a MC adapter. + // + pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + if (!bClientAlloc) + { + NV_ASSERT_OR_RETURN(hParent != hClient, NV_ERR_INVALID_OBJECT_PARENT); + + RsResourceRef *pContextRef; + rmStatus = clientGetResourceRef(pRsClient, pUnmapParams->hDevice, &pContextRef); + if (rmStatus != NV_OK) + return rmStatus; + + if (pContextRef->internalClassId == classId(Subdevice)) + { + pUnmapParams->hDevice = pContextRef->pParentRef->hResource; + } + else if (pContextRef->internalClassId != classId(Device)) + { + return NV_ERR_INVALID_OBJECT_PARENT; + } + + pUnmapParams->pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + pUnmapParams->pLockInfo->pContextRef = pContextRef; + NV_ASSERT_OK_OR_RETURN(gpuGetByRef(pUnmapParams->pLockInfo->pContextRef, &bBroadcast, &pGpu)); + gpuSetThreadBcState(pGpu, bBroadcast); + } + else + { + NV_ASSERT_OR_RETURN(hParent == hClient, NV_ERR_INVALID_OBJECT_PARENT); + } + + // Decide what sort of mapping it is, user or kernel + if (privLevel < RS_PRIV_LEVEL_KERNEL) + { + bKernel = NV_FALSE; + } + else + { + bKernel = (DRF_VAL(OS33, _FLAGS, _MEM_SPACE, pUnmapParams->flags) == NVOS33_FLAGS_MEM_SPACE_CLIENT); + } + + // + // If it's a user mapping, and we're not currently in the same process that + // it's mapped into, then attempt to attach to the other process first. + // + if (!bKernel && (ProcessId != osGetCurrentProcess())) + { + rmStatus = osAttachToProcess(&pProcessHandle, ProcessId); + if (rmStatus != NV_OK) + return rmStatus; + + pUnmapParams->pProcessHandle = pProcessHandle; + } + + pUnmapParams->fnFilter = bKernel + ? serverutilMappingFilterKernel + : serverutilMappingFilterCurrentUserProc; + + return NV_OK; +} + +void +serverUnmap_Epilogue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ + // do we need to detach? + if (pUnmapParams->pProcessHandle != NULL) + { + osDetachFromProcess(pUnmapParams->pProcessHandle); + pUnmapParams->pProcessHandle = NULL; + } +} + +void RmUnmapBusAperture +( + OBJGPU *pGpu, + NvP64 pCpuVirtualAddress, + NvU64 length, + NvBool bKernel, + NvP64 pPrivateData +) +{ + if (bKernel) + { + osUnmapPciMemoryKernel64(pGpu, pCpuVirtualAddress); + } + else + { + osUnmapPciMemoryUser(pGpu->pOsGpuInfo, pCpuVirtualAddress, length, pPrivateData); + } +} + +NV_STATUS +rmapiMapToCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + void **ppCpuVirtAddr, + NvU32 flags +) +{ + NvP64 pCpuVirtAddrNvP64 = NvP64_NULL; + NV_STATUS status; + + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + status = pRmApi->MapToCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, offset, length, + &pCpuVirtAddrNvP64, flags, &pRmApi->defaultSecInfo); + + if (ppCpuVirtAddr) + *ppCpuVirtAddr = NvP64_VALUE(pCpuVirtAddrNvP64); + + return status; +} + + +/** + * Call into Resource Server to register and execute a CPU mapping operation. + * + * Resource Server will: + * 1. Callback into RM (serverMap_Prologue) to set up mapping parameters, mapping context object, + * and locking requirements + * 2. Take locks (if required) + * 3. Allocate and register a RsCpuMapping book-keeping entry on the target object's RsResourceRef + * 4. Call the target object's mapping virtual function (xxxMap_IMPL, defined in RM) + * 5. Setup back-references to the mapping context object (if required.) This mapping will automatically + * be unmapped if either the target object or mapping context object are freed. + * 6. Release any locks taken + */ +NV_STATUS +rmapiMapToCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RmMapParams rmMapParams; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04MapMemory: client:0x%x device:0x%x memory:0x%x\n", hClient, + hDevice, hMemory); + NV_PRINTF(LEVEL_INFO, + "Nv04MapMemory: offset: %llx length: %llx flags:0x%x\n", + offset, length, flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + NV_PRINTF(LEVEL_INFO, "MMU_PROFILER Nv04MapMemory 0x%x\n", flags); + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + + LOCK_METER_DATA(MAPMEM, flags, 0, 0); + + // clear params for good measure + portMemSet(&rmMapParams, 0, sizeof (rmMapParams)); + + // load user args + rmMapParams.hClient = hClient; + rmMapParams.hDevice = hDevice; + rmMapParams.hMemory = hMemory; + rmMapParams.offset = offset; + rmMapParams.length = length; + rmMapParams.ppCpuVirtAddr = ppCpuVirtAddr; + rmMapParams.flags = flags; + rmMapParams.pLockInfo = &lockInfo; + rmMapParams.pSecInfo = pSecInfo; + + status = serverMap(&g_resServ, rmMapParams.hClient, rmMapParams.hMemory, &rmMapParams); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04MapMemory: complete\n"); + NV_PRINTF(LEVEL_INFO, + "Nv04MapMemory: *ppCpuVirtAddr:" NvP64_fmt "\n", + *ppCpuVirtAddr); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv04MapMemory: map failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiMapToCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvU64 offset, + NvU64 length, + NvP64 *ppCpuVirtAddr, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiMapToCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, offset, length, ppCpuVirtAddr, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +rmapiUnmapFromCpu +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + void *pLinearAddress, + NvU32 flags, + NvU32 ProcessId +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->UnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, NV_PTR_TO_NvP64(pLinearAddress), + flags, ProcessId, &pRmApi->defaultSecInfo); +} + +/** + * Call into Resource Server to execute a CPU unmapping operation. + * + * Resource Server will: + * 1. Callback into RM (serverUnmap_Prologue) to set up unmapping parameters, locking requirements, + * and attempt to attach to the mapping's user process (for user mappings only) + * 2. Take locks (if required) + * 3. Lookup the mapping + * 4. Call the target object's unmapping virtual function (xxxUnmap_IMPL, defined in RM) + * 5. Unregister the mapping from its back-references, and free the mapping + * 6. Callback into RM (serverUnmap_Epilogue) to detach from the mapping's user process (if required) + * 7. Release any locks taken + */ +NV_STATUS +rmapiUnmapFromCpuWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RmUnmapParams rmUnmapParams; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04UnmapMemory: client:0x%x device:0x%x memory:0x%x pLinearAddr:" NvP64_fmt " flags:0x%x\n", + hClient, hDevice, hMemory, pLinearAddress, flags); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + return status; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + + LOCK_METER_DATA(UNMAPMEM, flags, 0, 0); + + portMemSet(&rmUnmapParams, 0, sizeof (rmUnmapParams)); + rmUnmapParams.hClient = hClient; + rmUnmapParams.hDevice = hDevice; + rmUnmapParams.hMemory = hMemory; + rmUnmapParams.pLinearAddress = pLinearAddress; + rmUnmapParams.flags = flags; + rmUnmapParams.processId = ProcessId; + rmUnmapParams.pLockInfo = &lockInfo; + rmUnmapParams.pSecInfo = pSecInfo; + + status = serverUnmap(&g_resServ, hClient, hMemory, &rmUnmapParams); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "Nv04UnmapMemory: unmap complete\n"); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv04UnmapMemory: unmap failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + } + + return status; +} + +NV_STATUS +rmapiUnmapFromCpuWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemory, + NvP64 pLinearAddress, + NvU32 flags, + NvU32 ProcessId, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiUnmapFromCpuWithSecInfo(pRmApi, hClient, hDevice, hMemory, pLinearAddress, + flags, ProcessId, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +serverMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +refAllocCpuMappingPrivate +( + RS_CPU_MAP_PARAMS *pMapParams, + RsCpuMapping *pCpuMapping +) +{ + pCpuMapping->pPrivate = portMemAllocNonPaged(sizeof(RS_CPU_MAPPING_PRIVATE)); + if (pCpuMapping->pPrivate == NULL) + return NV_ERR_NO_MEMORY; + + pCpuMapping->pPrivate->protect = pMapParams->protect; + pCpuMapping->pPrivate->bKernel = pMapParams->bKernel; + + return NV_OK; +} + +void +refFreeCpuMappingPrivate +( + RsCpuMapping *pCpuMapping +) +{ + portMemFree(pCpuMapping->pPrivate); +} diff --git a/src/nvidia/src/kernel/rmapi/mapping_list.c b/src/nvidia/src/kernel/rmapi/mapping_list.c new file mode 100644 index 000000000..a7d834928 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/mapping_list.c @@ -0,0 +1,668 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/core.h" +#include "os/os.h" +#include "mem_mgr/vaspace.h" +#include "gpu/mem_mgr/mem_desc.h" +#include "rmapi/mapping_list.h" +#include "resserv/rs_server.h" +#include "mem_mgr/virtual_mem.h" +#include "mem_mgr/mem.h" +#include "resserv/rs_client.h" +#include "gpu/mem_mgr/virt_mem_allocator_common.h" +#include "gpu/mmu/kern_gmmu.h" + +#include "gpu/device/device.h" +#include "rmapi/rs_utils.h" + +// **************************************************************************** +// Client Memory Mappings +// **************************************************************************** + +RsCpuMapping* +CliFindMappingInClient +( + NvHandle hClient, + NvHandle hDevice, + NvP64 cpuAddress +) +{ + RsClient *pRsClient; + RsResourceRef *pDeviceRef; + RS_ORDERED_ITERATOR it; + + NvU32 processId; + CALL_CONTEXT *pCallContext; + NvBool bKernel; + + if ((serverGetClientUnderLock(&g_resServ, hClient, &pRsClient) != NV_OK) || + (clientGetResourceRef(pRsClient, hDevice, &pDeviceRef) != NV_OK)) + { + return NULL; + } + + pCallContext = resservGetTlsCallContext(); + + // This function only called from control call; call context should be available. + NV_ASSERT_OR_RETURN(pCallContext != NULL, NULL); + + bKernel = (pCallContext->secInfo.privLevel >= RS_PRIV_LEVEL_KERNEL); + processId = osGetCurrentProcess(); + + // iterator will match derived classes + it = clientRefOrderedIter(pRsClient, pDeviceRef, classId(Memory), NV_FALSE); + while (clientRefOrderedIterNext(pRsClient, &it)) + { + RsResourceRef *pMemoryRef = it.pResourceRef; + RsCpuMappingListIter mapIt = listIterAll(&pMemoryRef->cpuMappings); + + while (listIterNext(&mapIt)) + { + RsCpuMapping *pMapping = mapIt.pValue; + if ((pMapping->pPrivate->bKernel == bKernel) && + (bKernel || (pMapping->processId == processId)) && + (pMapping->pPrivate->gpuAddress != ((NvU64) ~0x0)) && + (pMapping->pLinearAddress <= cpuAddress) && + ((NvU64)cpuAddress < ((NvU64)pMapping->pLinearAddress + pMapping->length))) + { + return pMapping; + } + } + } + + return NULL; +} + + + +// **************************************************************************** +// DMA Mappings +// **************************************************************************** + +// +// allocates/initializes a new CLI_DMA_MAPPING_INFO. +// +// Ideally, we would know the dmaOffset by now but we typically don't. Thus the caller needs +// to call intermapRegisterDmaMapping() to record the dma mapping at the proper hDevice/dmaOffset location +// +NV_STATUS +intermapCreateDmaMapping +( + RsClient *pClient, + RsResourceRef *pMemoryRef, + NvHandle hDevice, + NvHandle hMemCtx, + PCLI_DMA_MAPPING_INFO *ppDmaMapping, + NvU32 flags +) +{ + VirtualMemory *pVirtualMemory; + Memory *pMemory = NULL; + PCLI_DMA_MAPPING_INFO pDmaMapping; + OBJVASPACE *pVAS = NULL; + + // Mapping is always virtual memory object + if (memGetByHandleAndDevice(pClient, hMemCtx, hDevice, &pMemory) != NV_OK) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + pVirtualMemory = dynamicCast(pMemory, VirtualMemory); + + if ((pMemory->pMemDesc == NULL) || (pVirtualMemory == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (vaspaceGetByHandleOrDeviceDefault(pClient, hDevice, pVirtualMemory->hVASpace, &pVAS) != NV_OK) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // allocate a new mapping info struct and add to the dma mapping object + pDmaMapping = portMemAllocNonPaged(sizeof(CLI_DMA_MAPPING_INFO)); + if (NULL == pDmaMapping) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + // initialize the dma mapping info (not registered yet) + portMemSet(pDmaMapping, 0, sizeof(CLI_DMA_MAPPING_INFO)); + pDmaMapping->hDevice = hDevice; + pDmaMapping->DmaOffset = 0; + pDmaMapping->pP2PInfo = NULL; + pDmaMapping->Flags = flags; // NV0S46_* + pDmaMapping->addressTranslation = VAS_ADDRESS_TRANSLATION(pVAS); + + *ppDmaMapping = pDmaMapping; + return NV_OK; +} + +// +// registers/stores a pDmaMapping created by intermapCreateDmaMapping() at the hDevice/dmaOffset. +// +// important: we assume the hDevice/dmaOffset does NOT change (needs to be re-registerd) +// +NV_STATUS +intermapRegisterDmaMapping +( + RsClient *pClient, + NvHandle hDevice, + NvHandle hMemCtx, + PCLI_DMA_MAPPING_INFO pDmaMapping, + NvU64 dmaOffset, + NvU32 gpuMask +) +{ + NV_STATUS rmStatus = NV_OK; + VirtualMemory *pVirtualMemory = NULL; + PNODE pNode; + PNODE *ppDmaMappingList; + PCLI_DMA_MAPPING_INFO pDmaMappingFirst, pDmaMappingNext; + + // eventually remove Next/Prev once all other linear list based on PCLI_DMA_MAPPING_INFO are gone... + NV_ASSERT(!pDmaMapping->Next && !pDmaMapping->Prev); + + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + virtmemGetByHandleAndDevice(pClient, hMemCtx, hDevice, &pVirtualMemory)); + + // the top level consists of lists sorted by hDevice - already created the hDevice specific list? + if (btreeSearch(hDevice, &pNode, pVirtualMemory->pDmaMappingList) != NV_OK) + { + // create a NODE for all pDmaMappings of this hDevice + pNode = portMemAllocNonPaged(sizeof(NODE)); + if (NULL == pNode) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + portMemSet(pNode, 0, sizeof(NODE)); + pNode->keyStart = hDevice; + pNode->keyEnd = hDevice; + pNode->Data = NULL; + + // register the hDevice list itself + rmStatus = btreeInsert(pNode, &pVirtualMemory->pDmaMappingList); + if (rmStatus != NV_OK) + { + portMemFree(pNode); + return rmStatus; + } + } + + NV_ASSERT(pNode); + ppDmaMappingList = (PNODE*)&pNode->Data; + + pDmaMapping->gpuMask = gpuMask; + + // the second level consists of CLI_DMA_MAPPING_INFO sorted by dmaOffset - + if (DRF_VAL(OS46, _FLAGS, _DMA_UNICAST_REUSE_ALLOC, pDmaMapping->Flags) == + NVOS46_FLAGS_DMA_UNICAST_REUSE_ALLOC_FALSE) + { + NvU64 alignment = 0; + + if (pDmaMapping->pMemDesc->pGpu != NULL) + { + OBJGPU *pGpu = pDmaMapping->pMemDesc->pGpu; + // + // For verify purposes we should allow small page override for mapping. + // This will be used for testing VASpace interop. + // However, this info is not captured in the DMA mapping info for guest. + // So explicitly check for this case in guest. + // + if (IS_VIRTUAL_WITH_SRIOV(pGpu) + && RMCFG_FEATURE_PLATFORM_MODS + && FLD_TEST_DRF(OS46, _FLAGS, _PAGE_SIZE, _4KB, pDmaMapping->Flags) + && kgmmuIsVaspaceInteropSupported(GPU_GET_KERNEL_GMMU(pGpu)) + ) + { + alignment = RM_PAGE_SIZE; + } + else + { + alignment = (NvU64)memdescGetPageSize(memdescGetMemDescFromGpu(pDmaMapping->pMemDesc, pGpu), + pDmaMapping->addressTranslation); + } + } + + // + // In some cases page size may not be set (e.g. NV50, AMODEL, VGPU). + // Ideally we should fix all paths for consistency, but for now + // allowing fallback to unaligned tracking (no worse than before). + // + // TODO: Revisit this with memdesc page size cleanup. + // + if (alignment == 0) + { + alignment = 1; + } + + // create the node for this dmaOffset + pNode = portMemAllocNonPaged(sizeof(NODE)); + if (NULL == pNode) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + portMemSet(pNode, 0, sizeof(NODE)); + + // + // For normal GPU devices, track the mapping over its entire + // virtual range so overlapping mappings are caught. + // + // keyStart and keyEnd must be aligned to the physical page size to + // ensure no page can be mapped twice. + // (e.g. small pages mapped into the leftovers of a big page). + // + // NOTE: Unfortunately this check occurs after the internal mapping + // has already taken place, so the state is already corrupted. + // Failure here means "Oops, your're screwed." + // + // For Fermi+ we have added checks in the internal mapping code + // that will fail safely. + // + pNode->keyStart = RM_ALIGN_DOWN(dmaOffset, alignment); + pNode->keyEnd = RM_ALIGN_UP(dmaOffset + pDmaMapping->pMemDesc->Size, + alignment) - 1; + pNode->Data = pDmaMapping; + + // register the 'dmaOffset' list itself + rmStatus = btreeInsert(pNode, ppDmaMappingList); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to insert new mapping node for range 0x%llX-0x%llX!\n", + pNode->keyStart, pNode->keyEnd); + DBG_BREAKPOINT(); + portMemFree(pNode); + return rmStatus; + } + } + else + { + // The node for this DMA offset should be already created + rmStatus = btreeSearch(dmaOffset, &pNode, *ppDmaMappingList); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to find existing mapping node for offset 0x%llX!\n", + dmaOffset); + DBG_BREAKPOINT(); + return rmStatus; + } + + NV_ASSERT(pNode); + pDmaMappingFirst = (PCLI_DMA_MAPPING_INFO)pNode->Data; + + // check that we do not exceed the original mapping length + if (pDmaMapping->pMemDesc->Size > pDmaMappingFirst->pMemDesc->Size) + { + NV_PRINTF(LEVEL_ERROR, + "Mapping length 0x%llX exceeds existing mapping length of 0x%llX!\n", + pDmaMapping->pMemDesc->Size, + pDmaMappingFirst->pMemDesc->Size); + DBG_BREAKPOINT(); + return NV_ERR_INVALID_LIMIT; + } + + // Insert the gpuMask element to the list + pDmaMapping->Next = pDmaMappingFirst; + pDmaMappingFirst->Prev = pDmaMapping; + pNode->Data = pDmaMapping; + + // Change the other mappings to remove this gpuMask from them + pDmaMapping = pDmaMapping->Next; + while (pDmaMapping) + { + pDmaMappingNext = pDmaMapping->Next; + if (pDmaMapping->gpuMask & gpuMask) + { + pDmaMapping->gpuMask &= ~gpuMask; + if (pDmaMapping->gpuMask == 0) + { + // free the pDmaMapping itself + intermapFreeDmaMapping(pDmaMapping); + } + } + pDmaMapping = pDmaMappingNext; + } + } + + return NV_OK; +} + +NV_STATUS +intermapDelDmaMapping +( + RsClient *pClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvU64 dmaOffset, + NvU32 gpuMask, + NvBool *pbUnmapped +) +{ + NV_STATUS rmStatus = NV_OK; + VirtualMemory *pVirtualMemory = NULL; + PCLI_DMA_MAPPING_INFO pDmaMapping, pDmaMappingNext, pDmaMappingPrev; + PNODE pDeviceNode; + PNODE pOffsetNode; + PNODE pNode; + + if (pbUnmapped != NULL) + *pbUnmapped = NV_FALSE; + + // Mapping is always virtual memory object + NV_CHECK_OK_OR_RETURN(LEVEL_SILENT, + virtmemGetByHandleAndDevice(pClient, hMemCtx, hDevice, &pVirtualMemory)); + + // first find the list specific to the hDevice + rmStatus = btreeSearch(hDevice, &pDeviceNode, pVirtualMemory->pDmaMappingList); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + // then get the node belonging to the search offset + rmStatus = btreeSearch(dmaOffset, &pOffsetNode, (PNODE)pDeviceNode->Data); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + pDmaMapping = pOffsetNode->Data; + + // Remove the first dma mappings intersecting with this GPU mask + while (pDmaMapping != NULL) + { + pDmaMappingNext = pDmaMapping->Next; + + if (pDmaMapping->gpuMask & gpuMask) + { + // Remove the element + pDmaMappingPrev = pDmaMapping->Prev; + + if (pDmaMappingPrev != NULL) + { + pDmaMappingPrev->Next = pDmaMappingNext; + } + else + { + pOffsetNode->Data = pDmaMappingNext; + } + + if (pDmaMappingNext != NULL) + { + pDmaMappingNext->Prev = pDmaMappingPrev; + } + + // free the pDmaMapping itself + intermapFreeDmaMapping(pDmaMapping); + + if (pbUnmapped != NULL) + *pbUnmapped = NV_TRUE; + + break; + } + + pDmaMapping = pDmaMappingNext; + } + + // Is the list empty ? + if (pOffsetNode->Data == NULL) + { + // unlink the node + rmStatus = btreeSearch(dmaOffset, &pNode, (PNODE)pDeviceNode->Data); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + rmStatus = btreeUnlink(pNode, (PNODE*)&pDeviceNode->Data); + if (rmStatus == NV_OK) + { + // free the node memory itself + portMemFree(pOffsetNode); + + // is our dmaOffset list empty now? + if (pDeviceNode->Data == NULL) + { + // remove the whole hDevice list + rmStatus = btreeSearch(hDevice, &pNode, pVirtualMemory->pDmaMappingList); + if (rmStatus != NV_OK) + { + return rmStatus; + } + + rmStatus = btreeUnlink(pNode, &pVirtualMemory->pDmaMappingList); + if (rmStatus == NV_OK) + { + portMemFree(pDeviceNode); + } + } + } + } + + return rmStatus; +} + +void +intermapFreeDmaMapping +( + PCLI_DMA_MAPPING_INFO pDmaMapping +) +{ + NV_ASSERT(pDmaMapping != NULL); + + // free the list element + portMemFree(pDmaMapping); +} + +static NvBool +_getDmaMappingInfoFromMemory +( + VirtualMemory *pVirtualMemory, + NvHandle hDevice, + NvU64 dmaOffset, + NvU32 gpuMask, + PCLI_DMA_MAPPING_INFO *ppDmaMappingInfo +) +{ + PNODE pDmaMappingList; + PNODE pNode; + PCLI_DMA_MAPPING_INFO pDmaMappingInfo; + + // first find the list specific to the hDevice + pDmaMappingList = pVirtualMemory->pDmaMappingList; + if (btreeSearch(hDevice, &pNode, pDmaMappingList) == NV_OK) + { + pDmaMappingList = (PNODE)pNode->Data; + NV_ASSERT(pDmaMappingList); + + // then get the node belonging to the search offset + if (btreeSearch(dmaOffset, &pNode, pDmaMappingList) == NV_OK) + { + // Then look for the GPU mask + pDmaMappingInfo = pNode->Data; + while (pDmaMappingInfo) + { + if (pDmaMappingInfo->gpuMask & gpuMask) + { + // Returns the first mapping that intersects with this gpu mask. + break; + } + pDmaMappingInfo = pDmaMappingInfo->Next; + } + if (pDmaMappingInfo != NULL) + { + *ppDmaMappingInfo = pDmaMappingInfo; + return NV_TRUE; + } + } + } + return NV_FALSE; +} + +/*! + * @brief Lookup mapping info in memory context or VA space + * + * This is useful when processing SW methods. We can find the hVASpace + * from the channel context uniquely. Previous lookup within the whole + * client could mistakenly find an alias on another device. + */ +NvBool +CliGetDmaMappingInfo +( + NvHandle hClient, + NvHandle hDevice, + NvHandle hMemCtx, + NvU64 dmaOffset, + NvU32 gpuMask, + PCLI_DMA_MAPPING_INFO *ppDmaMappingInfo +) +{ + VirtualMemory *pVirtualMemory; + RsClient *pClient; + Device *pDevice; + NODE *pNode; + NV_STATUS status; + NvBool bFound; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + return NV_FALSE; + + // Try a non-zero handle as a NVxx_MEMORY_VIRTUAL object + if (hMemCtx != NV01_NULL_OBJECT) + { + RsResourceRef *pMemCtxRef; + + status = clientGetResourceRef(pClient, hMemCtx, &pMemCtxRef); + if (status != NV_OK) + return NV_FALSE; + + // If passed a memory handle directly go ahead and look for an associated mapping + pVirtualMemory = dynamicCast(pMemCtxRef->pResource, VirtualMemory); + if (pVirtualMemory != NULL) + { + return _getDmaMappingInfoFromMemory(pVirtualMemory, hDevice, dmaOffset, gpuMask, ppDmaMappingInfo); + } + } + + status = deviceGetByHandle(pClient, hDevice, &pDevice); + if (status != NV_OK) + return NV_FALSE; + + btreeEnumStart(0, &pNode, pDevice->DevMemoryTable); + while (pNode != NULL) + { + Memory *pMemory = pNode->Data; + VirtualMemory *pVirtualMemory = dynamicCast(pMemory, VirtualMemory); + btreeEnumNext(&pNode, pDevice->DevMemoryTable); + + if ((pVirtualMemory != NULL) && + virtmemMatchesVASpace(pVirtualMemory, hClient, hMemCtx)) + { + bFound = _getDmaMappingInfoFromMemory(pVirtualMemory, hDevice, + dmaOffset, gpuMask, + ppDmaMappingInfo); + if (bFound) + return bFound; + } + } + + return NV_FALSE; +} + +void +CliGetDmaMappingIterator +( + PCLI_DMA_MAPPING_INFO *ppFirstDmaMapping, // [OUT] first found pDmaMapping + PCLI_DMA_MAPPING_INFO_ITERATOR pIt, // [OUT] iterator object to enum all other pDmaMappings + PNODE pDmaMappingList // [IN] the two level pDmaMapping list to iterate +) +{ + // don't iterate if we didn't get a empty list + *ppFirstDmaMapping = NULL; + portMemSet(pIt, 0, sizeof(*pIt)); + if (pDmaMappingList != NULL) + { + // find the first hDevice list + pIt->pDmaMappingList = pDmaMappingList; + btreeEnumStart(0, &pIt->pCurrentList, pIt->pDmaMappingList); + if (pIt->pCurrentList != NULL) + { + // find the first pDmaMapping of the hDevice list (hDevice lists can't be empty *ever*) + NV_ASSERT(pIt->pCurrentList->Data); + btreeEnumStart(0, &pIt->pNextDmaMapping, pIt->pCurrentList->Data); + NV_ASSERT(pIt->pNextDmaMapping); + NV_ASSERT(pIt->pNextDmaMapping->Data); + + CliGetDmaMappingNext(ppFirstDmaMapping, pIt); + } + } +} + +void +CliGetDmaMappingNext +( + PCLI_DMA_MAPPING_INFO *ppDmaMapping, + PCLI_DMA_MAPPING_INFO_ITERATOR pIt +) +{ + PCLI_DMA_MAPPING_INFO pDmaMapping = NULL; + + // are we done with all hDevice lists? + if ((pIt->pDmaMappingList != NULL) && + (pIt->pCurrentList != NULL) && + (pIt->pNextDmaMapping != NULL)) + { + // return the current node. + NV_ASSERT(pIt->pNextDmaMapping->Data); + pDmaMapping = (PCLI_DMA_MAPPING_INFO)pIt->pNextDmaMapping->Data; + + // iterate to the next DmaOffset (so the caller is free to delete the node) + btreeEnumNext(&pIt->pNextDmaMapping, pIt->pCurrentList); + + // reached the end of the hDevice list? move to next hDevice + if (pIt->pNextDmaMapping == NULL) + { + btreeEnumNext(&pIt->pCurrentList, pIt->pDmaMappingList); + if (pIt->pCurrentList != NULL) + { + // restart iteration process for the new list + NV_ASSERT(pIt->pCurrentList->Data); + btreeEnumStart(0, &pIt->pNextDmaMapping, pIt->pCurrentList->Data); + NV_ASSERT(pIt->pNextDmaMapping); + } + } + } + + // stop iterating once we hit the end of list [or something bad happened] + if (pDmaMapping == NULL) + { + pIt->pDmaMappingList = NULL; + pIt->pCurrentList = NULL; + pIt->pNextDmaMapping = NULL; + } + *ppDmaMapping = pDmaMapping; +} diff --git a/src/nvidia/src/kernel/rmapi/nv_gpu_ops.c b/src/nvidia/src/kernel/rmapi/nv_gpu_ops.c new file mode 100644 index 000000000..5515a1d75 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/nv_gpu_ops.c @@ -0,0 +1,8782 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/prelude.h" + + +#include +#include +#include // NV01_MEMORY_SYSTEM +#include // NV01_MEMORY_LOCAL_USER +#include +#include +#include // NV50_MEMORY_VIRTUAL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // PHYSICAL_CHANNEL_GPFIFO +#include // UVM_CHANNEL_RETAINER +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +#define NV_GPU_OPS_NUM_GPFIFO_ENTRIES_DEFAULT 1024 +#define NV_GPU_SMALL_PAGESIZE (4 * 1024) + +#define PAGE_SIZE_DEFAULT UVM_PAGE_SIZE_DEFAULT + +typedef struct +{ + NODE btreeNode; + NvU64 address; + NvHandle handle; + NvU64 size; + // childHandle tightly couples a physical allocation with a VA memdesc. + // A VA memsdesc is considered as a parent memdesc i.e. childHandle will be non-zero (valid). + // - If childHandle is non-zero,there is a corresponding PA allocation present. + // - If childHandle is zero, this is an invalid state for a VA memdesc. + NvHandle childHandle; +} gpuMemDesc; + +typedef struct +{ + NvU64 vaStart; // Needs to be alinged to pagesize + NvBool bFixedAddressAllocate; // rangeBegin & rangeEnd both included + NvU32 pageSize; // default is 4k or 64k else use pagesize= 2M +} gpuVaAllocInfo; + +typedef struct +{ + NODE btreeNode; + NvU64 cpuPointer; + NvHandle handle; +} cpuMappingDesc; + +typedef struct +{ + NODE btreeNode; + PORT_RWLOCK *btreeLock; + NvHandle deviceHandle; + PNODE subDevices; + NvU32 subDeviceCount; + NvU32 arch; + NvU32 implementation; +} deviceDesc; + +typedef struct +{ + NODE btreeNode; + NvHandle subDeviceHandle; + NvU64 refCount; + struct + { + NvHandle handle; + + // Pointer to the SMC partition information. It is used as a flag to + // indicate that the SMC information has been initialized. + KERNEL_MIG_GPU_INSTANCE *info; + } smcPartition; + NvU32 eccOffset; + NvU32 eccMask; + void *eccReadLocation; + NvHandle eccMasterHandle; + NvHandle eccCallbackHandle; + NvBool bEccInitialized; + NvBool bEccEnabled; + NvBool eccErrorNotifier; + NVOS10_EVENT_KERNEL_CALLBACK_EX eccDbeCallback; + + // The below is used for controlling channel(s) in the GPU. + // Example: Volta maps the doorbell work submission register in this + // region. + NvHandle clientRegionHandle; + volatile void *clientRegionMapping; +} subDeviceDesc; + +struct gpuSession +{ + NvHandle handle; + PNODE devices; + PORT_RWLOCK *btreeLock; +}; + +MAKE_MAP(MemdescMap, PMEMORY_DESCRIPTOR); + +struct gpuDevice +{ + deviceDesc *rmDevice; + subDeviceDesc *rmSubDevice; + + // same as rmDevice->deviceHandle + NvHandle handle; + + // same as rmSubDevice->subDeviceHandle + NvHandle subhandle; + + NvU32 deviceInstance; + NvU32 subdeviceInstance; + NvU32 gpuId; + NvU32 hostClass; + NvU32 ceClass; + NvU32 sec2Class; + NvU32 computeClass; + NvU32 faultBufferClass; + NvU32 accessCounterBufferClass; + NvBool isTccMode; + NvBool isWddmMode; + struct gpuSession *session; + NvU8 gpuUUID[NV_GPU_UUID_LEN]; + gpuFbInfo fbInfo; + UVM_LINK_TYPE sysmemLink; + NvU32 sysmemLinkRateMBps; + NvBool connectedToSwitch; + + MemdescMap kern2PhysDescrMap; +}; + +struct gpuAddressSpace +{ + NvHandle handle; + struct gpuDevice *device; + PNODE allocations; + PORT_RWLOCK *allocationsLock; + PNODE cpuMappings; + PORT_RWLOCK *cpuMappingsLock; + PNODE physAllocations; + PORT_RWLOCK *physAllocationsLock; + NvU64 vaBase; + NvU64 vaSize; + + // Dummy BAR1 allocation required on PCIe systems when GPPut resides in + // sysmem. + struct + { + NvU64 refCount; + NvU64 gpuAddr; + volatile void *cpuAddr; + } dummyGpuAlloc; +}; + +struct gpuChannel +{ + NvHandle channelHandle; + NvU32 hwRunlistId; + NvU32 hwChannelId; + UVM_GPU_CHANNEL_ENGINE_TYPE engineType; + + // If engineType is CE, engineIndex is a zero-based offset from + // NV2080_ENGINE_TYPE_COPY0. If engineType is GR, engineIndex is a + // zero-based offset from NV2080_ENGINE_TYPE_GR0. + NvU32 engineIndex; + struct gpuAddressSpace *vaSpace; + NvU64 gpFifo; + NvNotification *errorNotifier; + NvU64 errorNotifierOffset; + NvU64 *gpFifoEntries; + NvU32 fifoEntries; + KeplerAControlGPFifo *controlPage; + struct gpuObject *nextAttachedEngine; + NvHandle hFaultCancelSwMethodClass; + volatile unsigned *workSubmissionOffset; + NvU32 workSubmissionToken; + volatile NvU32 *pWorkSubmissionToken; + NvHandle hUserdPhysHandle; + NvU64 userdGpuAddr; + UVM_BUFFER_LOCATION gpFifoLoc; + UVM_BUFFER_LOCATION gpPutLoc; + NvBool retainedDummyAlloc; +}; + +// Add 3 to include local ctx buffer, patch context buffer and PM ctxsw buffer +ct_assert(UVM_GPU_CHANNEL_MAX_RESOURCES >= (GR_GLOBALCTX_BUFFER_COUNT + 3)); + +// A retained channel is a user client's channel which has been registered with +// the UVM driver. +struct gpuRetainedChannel_struct +{ + struct gpuDevice *device; + deviceDesc *rmDevice; + subDeviceDesc *rmSubDevice; + struct gpuSession *session; + OBJGPU *pGpu; + MEMORY_DESCRIPTOR *instanceMemDesc; + MEMORY_DESCRIPTOR *resourceMemDesc[UVM_GPU_CHANNEL_MAX_RESOURCES]; + UVM_GPU_CHANNEL_ENGINE_TYPE channelEngineType; + NvU32 resourceCount; + NvU32 chId; + NvU32 runlistId; + NvU32 grIdx; + + // Dup of user's TSG (if one exists) under our RM client + NvHandle hDupTsg; + + // Dup to context share object + NvHandle hDupKernelCtxShare; + + // Handle for object that retains chId and instance mem + NvHandle hChannelRetainer; +}; + +struct gpuObject +{ + NvHandle handle; + NvU32 type; + struct gpuObject *next; +}; + +struct allocFlags +{ + NvBool bGetKernelVA; + NvBool bfixedAddressAllocate; +}; + +struct ChannelAllocInfo +{ + NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS gpFifoAllocParams; + gpuAllocInfo gpuAllocInfo; +}; + +struct systemP2PCaps +{ + // peerId[i] contains gpu[i]'s peer id of gpu[1 - i] + NvU32 peerIds[2]; + + // true if the two GPUs are direct NvLink or PCIe peers + NvU32 accessSupported : 1; + + // true if the two GPUs are indirect (NvLink) peers + NvU32 indirectAccessSupported : 1; + + // true if the two GPUs are direct NvLink peers + NvU32 nvlinkSupported : 1; + + NvU32 atomicSupported : 1; + + // optimalNvlinkWriteCEs[i] contains the index of the optimal CE to use when + // writing from gpu[i] to gpu[1 - i] + NvU32 optimalNvlinkWriteCEs[2]; +}; + +static NV_STATUS findUvmAddressSpace(NvHandle hClient, NvU32 gpuInstance, NvHandle *pHandle, OBJVASPACE **ppVaspace); +static NV_STATUS nvGpuOpsGpuMalloc(struct gpuAddressSpace *vaSpace, + NvBool isSystemMemory, + NvLength length, + NvU64 *gpuOffset, + struct allocFlags flags, + gpuAllocInfo *allocInfo); +static NV_STATUS trackDescriptor(PNODE *pRoot, NvU64 key, void *desc); +static NV_STATUS findDescriptor(PNODE pRoot, NvU64 key, void **desc); +static NV_STATUS deleteDescriptor(PNODE *pRoot, NvU64 key, void **desc); +static NV_STATUS destroyAllGpuMemDescriptors(NvHandle hClient, PNODE pNode); +static NV_STATUS getHandleForVirtualAddr(struct gpuAddressSpace *vaSpace, + NvU64 allocationVa, + NvBool bPhysical, + NvHandle *pHandle); +static NV_STATUS findDeviceClasses(NvHandle hRoot, + NvHandle hDevice, + NvHandle hSubdevice, + NvU32 *hostClass, + NvU32 *ceClass, + NvU32 *computeClass, + NvU32 *faultBufferClass, + NvU32 *accessCounterBufferClass, + NvU32 *sec2Class); +static NV_STATUS queryCopyEngines(struct gpuDevice *gpu, gpuCesCaps *cesCaps); +static void nvGpuOpsFreeVirtual(struct gpuAddressSpace *vaSpace, + NvU64 vaOffset); +static NvBool isDeviceVoltaPlus(const struct gpuDevice *device); +static NvBool isDeviceTuringPlus(const struct gpuDevice *device); +static NV_STATUS gpuDeviceMapUsermodeRegion(struct gpuDevice *device); +static void gpuDeviceDestroyUsermodeRegion(struct gpuDevice *device); +static void gpuDeviceUnmapCpuFreeHandle(struct gpuDevice *device, + NvHandle handle, + void *ptr, + NvU32 flags); +static NV_STATUS allocNvlinkStatusForSubdevice(struct gpuDevice *device, + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS **nvlinkStatusOut); +static NvU32 getNvlinkConnectionToNpu(const NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus, + NvBool *atomicSupported, + NvU32 *linkBandwidthMBps); +static NvU32 getNvlinkConnectionToSwitch(const NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus, + NvU32 *linkBandwidthMBps); +static NV_STATUS nvGpuOpsGetMemoryByHandle(NvHandle hClient, NvHandle hMemory, Memory **ppMemory); +static void _nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel); +static NV_STATUS _nvGpuOpsRetainChannelResources(struct gpuDevice *device, + NvHandle hClient, + NvHandle hKernelChannel, + gpuRetainedChannel *retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo); +static void _nvGpuOpsReleaseChannelResources(gpuRetainedChannel *retainedChannel); + +/* + * This function will lock the RM API lock according to rmApiLockFlags, and then + * examine numLocksNeeded. If this is 0, no GPU locks will be acquired. If it + * is 1, the GPU lock for deviceInstance1 will be locked. If it is 2, both GPU + * locks for deviceInstance1 and deviceInstance2 will be locked. If it is any + * other number, all the GPU locks will be acquired. + * + * This function will attempt to grab the needed GPU locks, and will write the + * resulting mask into *lockedGpusMask. In the event of a failure to acquire any + * needed GPU locks, the written mask is 0 and the function returns + * NV_ERR_INVALID_LOCK_STATE. In this case, all locks held are released and the + * caller does not need to release any locks. + */ + +typedef struct nvGpuOpsLockSet +{ + NvBool isRmLockAcquired; + NvBool isRmSemaAcquired; + GPU_MASK gpuMask; + RsClient *pClientLocked; +} nvGpuOpsLockSet; + +static void _nvGpuOpsLocksRelease(nvGpuOpsLockSet *acquiredLocks) +{ + OBJSYS *pSys; + pSys = SYS_GET_INSTANCE(); + + if (acquiredLocks->gpuMask != 0) + { + rmGpuGroupLockRelease(acquiredLocks->gpuMask, GPUS_LOCK_FLAGS_NONE); + acquiredLocks->gpuMask = 0; + } + + if (acquiredLocks->pClientLocked != NULL) + { + serverReleaseClient(&g_resServ, LOCK_ACCESS_WRITE, acquiredLocks->pClientLocked); + acquiredLocks->pClientLocked = NULL; + } + + if (acquiredLocks->isRmLockAcquired == NV_TRUE) + { + rmApiLockRelease(); + acquiredLocks->isRmLockAcquired = NV_FALSE; + } + + if (acquiredLocks->isRmSemaAcquired == NV_TRUE) + { + osReleaseRmSema(pSys->pSema, NULL); + acquiredLocks->isRmSemaAcquired = NV_FALSE; + } +} + +static NV_STATUS _nvGpuOpsLocksAcquire(NvU32 rmApiLockFlags, + NvHandle hClient, + RsClient **ppClient, + NvU32 numLocksNeeded, + NvU32 deviceInstance1, + NvU32 deviceInstance2, + nvGpuOpsLockSet *acquiredLocks) +{ + NV_STATUS status; + OBJSYS *pSys; + GPU_MASK gpuMaskRequested; + GPU_MASK gpuMaskAcquired; + + acquiredLocks->isRmSemaAcquired = NV_FALSE; + acquiredLocks->isRmLockAcquired = NV_FALSE; + acquiredLocks->gpuMask = 0; + acquiredLocks->pClientLocked = NULL; + + pSys = SYS_GET_INSTANCE(); + if (pSys == NULL) + { + return NV_ERR_GENERIC; + } + + status = osAcquireRmSema(pSys->pSema); + if (status != NV_OK) + { + return status; + } + acquiredLocks->isRmSemaAcquired = NV_TRUE; + + status = rmApiLockAcquire(rmApiLockFlags, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(acquiredLocks); + return status; + } + acquiredLocks->isRmLockAcquired = NV_TRUE; + + if (hClient != NV01_NULL_OBJECT) + { + status = serverAcquireClient(&g_resServ, hClient, LOCK_ACCESS_WRITE, &acquiredLocks->pClientLocked); + + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(acquiredLocks); + return status; + } + + if (ppClient != NULL) + *ppClient = acquiredLocks->pClientLocked; + } + + // + // Determine the GPU lock mask we need. If we are asked for 0, 1, or 2 locks + // then we should use neither, just the first, or both deviceInstance + // parameters, respectively. If any other number of locks is requested, we + // acquire all of the lockable GPUS. + // + // We cannot simply determine the mask outside of this function and pass in + // the mask, because gpumgrGetDeviceGpuMask requires that we hold the RM API + // lock. Otherwise, SLI rewiring could preempt lock acquisition and render + // the mask invalid. + // + gpuMaskRequested = 0; + + if (numLocksNeeded > 2) + { + gpuMaskRequested = GPUS_LOCK_ALL; + } + else + { + if (numLocksNeeded > 0) + { + gpuMaskRequested |= gpumgrGetDeviceGpuMask(deviceInstance1); + } + + if (numLocksNeeded > 1) + { + gpuMaskRequested |= gpumgrGetDeviceGpuMask(deviceInstance2); + } + } + + // + // The gpuMask parameter to rmGpuGroupLockAcquire is both input and output, + // so we have to copy in what we want here to make comparisons later. + // + gpuMaskAcquired = gpuMaskRequested; + if (gpuMaskRequested != 0) + { + status = rmGpuGroupLockAcquire(0, GPU_LOCK_GRP_MASK, + GPUS_LOCK_FLAGS_NONE, + RM_LOCK_MODULES_GPU_OPS, &gpuMaskAcquired); + } + acquiredLocks->gpuMask = gpuMaskAcquired; + + // + // If we cannot acquire all the locks requested, we release all the locks + // we *were* able to get and bail out here. There is never a safe way to + // proceed with a GPU ops function with fewer locks than requested. If there + // was a safe way to proceed, the client should have asked for fewer locks + // in the first place. + // + // That said, callers sometimes want "all available GPUs", and then the call + // to rmGpuGroupLockAcquire will mask off invalid GPUs for us. Hence the + // exception for GPUS_LOCK_ALL. + // + if (gpuMaskAcquired != gpuMaskRequested && gpuMaskRequested != GPUS_LOCK_ALL) + { + status = NV_ERR_INVALID_LOCK_STATE; + } + + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(acquiredLocks); + } + + return status; +} + +static NV_STATUS _nvGpuOpsLocksAcquireAll(NvU32 rmApiLockFlags, + NvHandle hClient, RsClient **ppClient, + nvGpuOpsLockSet *acquiredLocks) +{ + return _nvGpuOpsLocksAcquire(rmApiLockFlags, hClient, ppClient, 3, 0, 0, acquiredLocks); +} + +static NV_STATUS nvGpuOpsCreateClient(RM_API *pRmApi, NvHandle *hClient) +{ + NV_STATUS status; + RS_SHARE_POLICY sharePolicy; + + *hClient = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, NV01_NULL_OBJECT, NV01_NULL_OBJECT, + hClient, NV01_ROOT, hClient); + if (status != NV_OK) + { + return status; + } + + // Override default system share policy. Prohibit sharing of any and all + // objects owned by this client. + portMemSet(&sharePolicy, 0, sizeof(sharePolicy)); + sharePolicy.type = RS_SHARE_TYPE_ALL; + sharePolicy.action = RS_SHARE_ACTION_FLAG_REVOKE; + RS_ACCESS_MASK_ADD(&sharePolicy.accessMask, RS_ACCESS_DUP_OBJECT); + + status = pRmApi->Share(pRmApi, *hClient, *hClient, &sharePolicy); + if (status != NV_OK) + { + pRmApi->Free(pRmApi, *hClient, *hClient); + } + + return status; +} + +NV_STATUS nvGpuOpsCreateSession(struct gpuSession **session) +{ + struct gpuSession *gpuSession = NULL; + NV_STATUS status; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + PORT_MEM_ALLOCATOR *pAlloc = portMemAllocatorGetGlobalNonPaged(); + + gpuSession = portMemAllocNonPaged(sizeof(*gpuSession)); + if (gpuSession == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(gpuSession, 0, sizeof(*gpuSession)); + + status = nvGpuOpsCreateClient(pRmApi, &gpuSession->handle); + if (status != NV_OK) + { + portMemFree(gpuSession); + return status; + } + + gpuSession->devices = NULL; + gpuSession->btreeLock = portSyncRwLockCreate(pAlloc); + *session = (gpuSession); + return status; +} + +NV_STATUS nvGpuOpsDestroySession(struct gpuSession *session) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + if (!session) + return NV_OK; + + // Sanity Check: There should not be any attached devices with the session! + NV_ASSERT(!session->devices); + + // freeing session will free everything under it + pRmApi->Free(pRmApi, session->handle, session->handle); + portSyncRwLockDestroy(session->btreeLock); + portMemFree(session); + return NV_OK; +} + +static void *gpuBar0BaseAddress(OBJGPU *pGpu) +{ + DEVICE_MAPPING *pMapping = gpuGetDeviceMapping(pGpu, DEVICE_INDEX_GPU, 0); + + NV_ASSERT(pMapping); + + return pMapping->gpuNvAddr; +} + +static void eccErrorCallback(void *pArg, void *pData, NvHandle hEvent, + NvU32 data, NvU32 status) +{ + subDeviceDesc *rmSubDevice = (subDeviceDesc *)pArg; + + NV_ASSERT(rmSubDevice); + + rmSubDevice->eccErrorNotifier = NV_TRUE; +} + +static NvBool deviceNeedsDummyAlloc(struct gpuDevice *device) +{ + // The dummy mapping is needed so the client can issue a read to flush out + // any CPU BAR1 PCIE writes prior to updating GPPUT. This is only needed + // when the bus is non-coherent and when not in ZeroFB (where there can't be + // any BAR1 mappings). + return device->sysmemLink < UVM_LINK_TYPE_NVLINK_2 && !device->fbInfo.bZeroFb; +} + +static NV_STATUS nvGpuOpsVaSpaceRetainDummyAlloc(struct gpuAddressSpace *vaSpace) +{ + struct gpuDevice *device; + NV_STATUS status = NV_OK; + gpuAllocInfo allocInfo = {0}; + struct allocFlags flags = {0}; + + device = vaSpace->device; + NV_ASSERT(device); + NV_ASSERT(deviceNeedsDummyAlloc(device)); + + if (vaSpace->dummyGpuAlloc.refCount > 0) + goto done; + + flags.bGetKernelVA = NV_FALSE; + status = nvGpuOpsGpuMalloc(vaSpace, + NV_FALSE, + NV_GPU_SMALL_PAGESIZE, + &vaSpace->dummyGpuAlloc.gpuAddr, + flags, + &allocInfo); + if (status != NV_OK) + return status; + + status = nvGpuOpsMemoryCpuMap(vaSpace, + vaSpace->dummyGpuAlloc.gpuAddr, + NV_GPU_SMALL_PAGESIZE, + (void **)&vaSpace->dummyGpuAlloc.cpuAddr, + PAGE_SIZE_DEFAULT); + if (status != NV_OK) + nvGpuOpsMemoryFree(vaSpace, vaSpace->dummyGpuAlloc.gpuAddr); + +done: + if (status == NV_OK) + { + ++vaSpace->dummyGpuAlloc.refCount; + NV_ASSERT(vaSpace->dummyGpuAlloc.gpuAddr); + NV_ASSERT(vaSpace->dummyGpuAlloc.cpuAddr); + } + + return status; +} + +static void nvGpuOpsVaSpaceReleaseDummyAlloc(struct gpuAddressSpace *vaSpace) +{ + NV_ASSERT(deviceNeedsDummyAlloc(vaSpace->device)); + NV_ASSERT(vaSpace->dummyGpuAlloc.refCount != 0); + + if (--vaSpace->dummyGpuAlloc.refCount > 0) + return; + + if (vaSpace->dummyGpuAlloc.cpuAddr) + nvGpuOpsMemoryCpuUnMap(vaSpace, (void *)vaSpace->dummyGpuAlloc.cpuAddr); + + if (vaSpace->dummyGpuAlloc.gpuAddr) + nvGpuOpsMemoryFree(vaSpace, vaSpace->dummyGpuAlloc.gpuAddr); + + vaSpace->dummyGpuAlloc.cpuAddr = NULL; + vaSpace->dummyGpuAlloc.gpuAddr = 0; +} + +static NV_STATUS nvGpuOpsDisableVaSpaceChannels(struct gpuAddressSpace *vaSpace) +{ + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = NULL; + Device *pDevice; + RsClient *pClient; + RS_ORDERED_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS disableParams = {0}; + + if (vaSpace == NULL) + return NV_ERR_INVALID_ARGUMENT; + + status = serverGetClientUnderLock(&g_resServ, vaSpace->device->session->handle, &pClient); + if (status != NV_OK) + return status; + + status = deviceGetByHandle(pClient, vaSpace->device->handle, &pDevice); + if (status != NV_OK) + return status; + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + status = vaspaceGetByHandleOrDeviceDefault(pClient, + vaSpace->device->handle, + vaSpace->handle, + &pVAS); + if ((status != NV_OK) || (pVAS == NULL)) + return NV_ERR_INVALID_ARGUMENT; + + // Stop all channels under the VAS, but leave them bound. + it = kchannelGetIter(pClient, RES_GET_REF(pDevice)); + while (clientRefOrderedIterNext(pClient, &it)) + { + KernelChannel *pKernelChannel = dynamicCast(it.pResourceRef->pResource, KernelChannel); + + NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue); + if (pKernelChannel->pVAS != pVAS) + continue; + + NV_ASSERT_OR_RETURN(disableParams.numChannels < NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES, NV_ERR_NOT_SUPPORTED); + disableParams.hClientList[disableParams.numChannels] = RES_GET_CLIENT_HANDLE(pKernelChannel); + disableParams.hChannelList[disableParams.numChannels] = RES_GET_HANDLE(pKernelChannel); + disableParams.numChannels++; + } + + if (disableParams.numChannels == 0) + return status; + + disableParams.bDisable = NV2080_CTRL_FIFO_DISABLE_CHANNEL_TRUE; + status = pRmApi->Control(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->subhandle, + NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS, + &disableParams, + sizeof(disableParams)); + return status; +} + +static NV_STATUS nvGpuOpsEnableVaSpaceChannels(struct gpuAddressSpace *vaSpace) +{ + NV_STATUS status = NV_OK; + OBJVASPACE *pVAS = NULL; + Device *pDevice; + RsClient *pClient; + RS_ORDERED_ITERATOR it; + NV2080_CTRL_FIFO_DISABLE_CHANNELS_PARAMS disableParams = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (vaSpace == NULL) + return NV_ERR_INVALID_ARGUMENT; + + status = serverGetClientUnderLock(&g_resServ, vaSpace->device->session->handle, &pClient); + if (status != NV_OK) + return status; + + status = deviceGetByHandle(pClient, vaSpace->device->handle, &pDevice); + if (status != NV_OK) + return status; + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + status = vaspaceGetByHandleOrDeviceDefault(pClient, + vaSpace->device->handle, + vaSpace->handle, + &pVAS); + if ((status != NV_OK) || (pVAS == NULL)) + return NV_ERR_INVALID_ARGUMENT; + + it = kchannelGetIter(pClient, RES_GET_REF(pDevice)); + while (clientRefOrderedIterNext(pClient, &it)) + { + KernelChannel *pKernelChannel = dynamicCast(it.pResourceRef->pResource, KernelChannel); + + NV_ASSERT_OR_ELSE(pKernelChannel != NULL, continue); + if (pKernelChannel->pVAS != pVAS) + continue; + + NV_ASSERT_OR_RETURN(disableParams.numChannels < NV2080_CTRL_FIFO_DISABLE_CHANNELS_MAX_ENTRIES, NV_ERR_NOT_SUPPORTED); + disableParams.hClientList[disableParams.numChannels] = RES_GET_CLIENT_HANDLE(pKernelChannel); + disableParams.hChannelList[disableParams.numChannels] = RES_GET_HANDLE(pKernelChannel); + disableParams.numChannels++; + } + + if (disableParams.numChannels == 0) + return status; + + disableParams.bDisable = NV2080_CTRL_FIFO_DISABLE_CHANNEL_FALSE; + status = pRmApi->Control(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->subhandle, + NV2080_CTRL_CMD_FIFO_DISABLE_CHANNELS, + &disableParams, + sizeof(disableParams)); + return status; +} + +static NV_STATUS nvGpuOpsRmDeviceCreate(struct gpuDevice *device) +{ + NV_STATUS status; + NV0080_ALLOC_PARAMETERS nv0080AllocParams = { 0 }; + deviceDesc *rmDevice = NULL; + struct gpuSession *session = device->session; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + PORT_MEM_ALLOCATOR *pAlloc = portMemAllocatorGetGlobalNonPaged(); + OBJGPU *pGpu; + + // Find the existing rmDevice. + // Otherwise, allocate an rmDevice. + portSyncRwLockAcquireRead(session->btreeLock); + status = findDescriptor(session->devices, device->deviceInstance, (void**)&rmDevice); + portSyncRwLockReleaseRead(session->btreeLock); + if (status == NV_OK) + { + NV_ASSERT(rmDevice); + device->rmDevice = rmDevice; + device->handle = rmDevice->deviceHandle; + return NV_OK; + } + + rmDevice = portMemAllocNonPaged(sizeof(*rmDevice)); + if (rmDevice == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + portMemSet(rmDevice, 0, sizeof(*rmDevice)); + + nv0080AllocParams.deviceId = device->deviceInstance; + nv0080AllocParams.hClientShare = session->handle; + device->handle = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, + session->handle, + session->handle, + &device->handle, + NV01_DEVICE_0, + &nv0080AllocParams); + if (status != NV_OK) + goto cleanup_device_desc; + + device->rmDevice = rmDevice; + rmDevice->deviceHandle = device->handle; + rmDevice->subDevices = NULL; + rmDevice->subDeviceCount = 0; + + portSyncRwLockAcquireWrite(session->btreeLock); + status = trackDescriptor(&session->devices, device->deviceInstance, rmDevice); + portSyncRwLockReleaseWrite(session->btreeLock); + if (status != NV_OK) + goto cleanup_device; + + // TODO: Acquired because CliGetGpuContext expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + goto cleanup_device; + status = CliSetGpuContext(session->handle, device->handle, &pGpu, NULL); + rmapiLockRelease(); + if (status != NV_OK) + goto cleanup_device; + + rmDevice->arch = gpuGetChipArch(pGpu); + rmDevice->implementation = gpuGetChipImpl(pGpu); + rmDevice->btreeLock = portSyncRwLockCreate(pAlloc); + + return NV_OK; + +cleanup_device: + pRmApi->Free(pRmApi, session->handle, device->handle); +cleanup_device_desc: + portMemFree(rmDevice); + return status; +} + +static void nvGpuOpsRmDeviceDestroy(struct gpuDevice *device) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + deviceDesc *rmDevice = device->rmDevice; + + NV_ASSERT(rmDevice != NULL); + + if (rmDevice->subDeviceCount == 0) + { + struct gpuSession *session = device->session; + portSyncRwLockAcquireWrite(session->btreeLock); + deleteDescriptor(&session->devices, device->deviceInstance, (void**)&rmDevice); + pRmApi->Free(pRmApi, session->handle, rmDevice->deviceHandle); + portSyncRwLockDestroy(rmDevice->btreeLock); + portMemFree(rmDevice); + portSyncRwLockReleaseWrite(session->btreeLock); + } +} + +static void gpuDeviceRmSubDeviceDeinitEcc(struct gpuDevice *device) +{ + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS eventDbeParams = {0}; + subDeviceDesc *rmSubDevice = device->rmSubDevice; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + if (!rmSubDevice->bEccInitialized || !rmSubDevice->bEccEnabled) + return; + + // Disable all notifications specific to ECC on this device + eventDbeParams.event = NV2080_NOTIFIERS_ECC_DBE; + eventDbeParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE; + + pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + (void *)&eventDbeParams, + sizeof(eventDbeParams)); + + if (!isDeviceTuringPlus(device)) + { + gpuDeviceUnmapCpuFreeHandle(device, + rmSubDevice->eccMasterHandle, + rmSubDevice->eccReadLocation, + DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY)); + } + + rmSubDevice->eccReadLocation = NULL; + + if (rmSubDevice->eccCallbackHandle) + pRmApi->Free(pRmApi, device->session->handle, rmSubDevice->eccCallbackHandle); + + rmSubDevice->bEccEnabled = NV_FALSE; + rmSubDevice->bEccInitialized = NV_FALSE; +} + +// +// Initialize the ECC state for an RM subdevice +// +// This can only be done once per RM subdevice as GF100_SUBDEVICE_MASTER can +// only be allocated once. +// +static NV_STATUS gpuDeviceRmSubDeviceInitEcc(struct gpuDevice *device) +{ + NV_STATUS status = NV_OK; + int i = 0; + int tempPtr = 0; + + NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS eccStatusParams; + NV90E6_CTRL_MASTER_GET_ECC_INTR_OFFSET_MASK_PARAMS eccMaskParams = {0}; + NV90E6_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK_PARAMS errContIntrMaskParams = {0}; + NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS eventDbeParams = {0}; + NV0005_ALLOC_PARAMETERS allocDbeParams = {0}; + OBJGPU *pGpu = NULL; + NvBool supportedOnAnyUnits = NV_FALSE; + subDeviceDesc *rmSubDevice = device->rmSubDevice; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + NV_ASSERT(device); + + // TODO: Acquired because CliGetGpuContext expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + status = CliSetGpuContext(device->session->handle, device->handle, &pGpu, NULL); + rmapiLockRelease(); + if (status != NV_OK) + return status; + + portMemSet(&eccStatusParams, 0, sizeof(eccStatusParams)); + + rmSubDevice->eccOffset = 0; + rmSubDevice->eccMask = 0; + rmSubDevice->eccReadLocation = NULL; + rmSubDevice->eccMasterHandle = 0; + rmSubDevice->bEccInitialized = NV_FALSE; + rmSubDevice->bEccEnabled = NV_FALSE; + + // Do not initialize ECC for this device if SMC is enabled, but no partition + // was subscribed to. This will be the case for select devices created + // on behalf of the UVM driver. + if (IS_MIG_IN_USE(pGpu) && rmSubDevice->smcPartition.info == NULL) + return NV_OK; + + // Check ECC before doing anything here + status = pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS, + &eccStatusParams, + sizeof(eccStatusParams)); + + if (status == NV_ERR_NOT_SUPPORTED) + { + // Nothing to do if ECC not supported + rmSubDevice->bEccEnabled = NV_FALSE; + goto success; + } + else if (status != NV_OK) + { + return status; + } + + // + // ECC is considered as supported only if it's enabled for all supported units, + // and there's at least 1 supported unit + // + rmSubDevice->bEccEnabled = NV_TRUE; + + for (i = 0; i < NV2080_CTRL_GPU_ECC_UNIT_COUNT; i++) + { + // Check the ECC status only on the units supported by HW + if (eccStatusParams.units[i].supported) + { + supportedOnAnyUnits = NV_TRUE; + if (!eccStatusParams.units[i].enabled) + rmSubDevice->bEccEnabled = NV_FALSE; + } + } + + if (!supportedOnAnyUnits) + rmSubDevice->bEccEnabled = NV_FALSE; + + if (!rmSubDevice->bEccEnabled) + { + // ECC not enabled, early-out + goto success; + } + + //Allocate memory for interrupt tree + rmSubDevice->eccMasterHandle = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, device->session->handle, + device->subhandle, + &rmSubDevice->eccMasterHandle, + GF100_SUBDEVICE_MASTER, + &tempPtr); + if (status != NV_OK) + goto error; + + if (isDeviceTuringPlus(device)) + { + rmSubDevice->eccReadLocation = gpuBar0BaseAddress(pGpu); + status = pRmApi->Control(pRmApi, + device->session->handle, + rmSubDevice->eccMasterHandle, + NV90E6_CTRL_CMD_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK, + &errContIntrMaskParams, + sizeof(errContIntrMaskParams)); + if (status != NV_OK) + goto error; + + rmSubDevice->eccOffset = GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_ERR_CONT); + rmSubDevice->eccMask = errContIntrMaskParams.eccMask; + } + else + { + // Map memory + status = pRmApi->MapToCpu(pRmApi, + device->session->handle, + device->subhandle, + rmSubDevice->eccMasterHandle, 0, + sizeof(GF100MASTERMap), + (void **)(&rmSubDevice->eccReadLocation), + DRF_DEF(OS33, _FLAGS, _ACCESS, _READ_ONLY)); + if (status != NV_OK) + goto error; + + NV_ASSERT(rmSubDevice->eccReadLocation); + + status = pRmApi->Control(pRmApi, + device->session->handle, + rmSubDevice->eccMasterHandle, + NV90E6_CTRL_CMD_MASTER_GET_ECC_INTR_OFFSET_MASK, + (void *)&eccMaskParams, + sizeof(eccMaskParams)); + if (status != NV_OK) + goto error; + + // Fill the mask and offset which has been read from control call + rmSubDevice->eccOffset = eccMaskParams.offset; + rmSubDevice->eccMask = eccMaskParams.mask; + } + + // Setup callback for ECC DBE + rmSubDevice->eccDbeCallback.func = eccErrorCallback; + rmSubDevice->eccDbeCallback.arg = rmSubDevice; + + allocDbeParams.hParentClient = device->session->handle; + allocDbeParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX; + allocDbeParams.notifyIndex = NV2080_NOTIFIERS_ECC_DBE; + allocDbeParams.data = NV_PTR_TO_NvP64(&rmSubDevice->eccDbeCallback); + + rmSubDevice->eccCallbackHandle = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, device->session->handle, + device->subhandle, + &rmSubDevice->eccCallbackHandle, + NV01_EVENT_KERNEL_CALLBACK_EX, + &allocDbeParams); + + if (status != NV_OK) + goto error; + + eventDbeParams.event = NV2080_NOTIFIERS_ECC_DBE; + eventDbeParams.action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE; + + status = pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, + (void *)&eventDbeParams, + sizeof(eventDbeParams)); + if (status != NV_OK) + goto error; + +success: + rmSubDevice->bEccInitialized = NV_TRUE; + return NV_OK; + +error: + gpuDeviceRmSubDeviceDeinitEcc(device); + return status; +} + +static NV_STATUS getSwizzIdFromSmcPartHandle(RM_API *pRmApi, + NvHandle hClient, + NvHandle hGPUInstanceSubscription, + NvU32 *swizzId) +{ + NV_STATUS status; + RsResourceRef *pSmcResourceRef; + GPUInstanceSubscription *pGPUInstanceSubscription; + + // get GPUInstanceSubscription handle + // TODO: Acquired because serverutilGetResourceRef expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + status = serverutilGetResourceRef(hClient, hGPUInstanceSubscription, &pSmcResourceRef); + rmapiLockRelease(); + if (status != NV_OK) + return status; + + pGPUInstanceSubscription = dynamicCast(pSmcResourceRef->pResource, GPUInstanceSubscription); + if (!pGPUInstanceSubscription) + return NV_ERR_INVALID_OBJECT; + + *swizzId = pGPUInstanceSubscription->pKernelMIGGpuInstance->swizzId; + + return NV_OK; +} + +// +// Determine an SMC partition's swizzId given a user subscription +// +// This requires temporarily duplicating the handle to validate it, as well +// as to prevent removal of the partition for the duration of the look-up. +// However, neither the partition, nor the swizzId uniquely identifying +// it (within the scope of its parent GPU) are guaranteed to remain valid, and +// callers of this function must be prepared for removal of the partition +// between nvGpuOpsGetGpuInfo() and nvGpuOpsDeviceCreate(). +// +static NV_STATUS getSwizzIdFromUserSmcPartHandle(RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle hUserClient, + NvHandle hUserGPUInstanceSubscription, + NvU32 *swizzId) +{ + NV_STATUS status; + NvHandle dupedGPUInstanceSubscription; + + // TODO: Acquired because serverutilGenResourceHandle expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + status = serverutilGenResourceHandle(hClient, &dupedGPUInstanceSubscription); + rmapiLockRelease(); + if (status != NV_OK) + return status; + + status = pRmApi->DupObject(pRmApi, + hClient, + hParent, + &dupedGPUInstanceSubscription, + hUserClient, + hUserGPUInstanceSubscription, + NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE); + if (status != NV_OK) + return status; + + // get GPUInstanceSubscription handle + status = getSwizzIdFromSmcPartHandle(pRmApi, hClient, dupedGPUInstanceSubscription, + swizzId); + + pRmApi->Free(pRmApi, hClient, dupedGPUInstanceSubscription); + + return status; +} + +static void nvGpuOpsRmSmcPartitionDestroy(struct gpuDevice *device) +{ + subDeviceDesc *rmSubDevice = device->rmSubDevice; + + if (rmSubDevice->smcPartition.info != NULL) + { + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + pRmApi->Free(pRmApi, + device->session->handle, + rmSubDevice->smcPartition.handle); + + rmSubDevice->smcPartition.info = NULL; + } +} + +static NV_STATUS nvGpuOpsRmSmcPartitionCreate(struct gpuDevice *device, const gpuInfo *pGpuInfo) +{ + NV_STATUS status; + OBJGPU *pGpu = NULL; + subDeviceDesc *rmSubDevice = device->rmSubDevice; + NvHandle dupUserHandle; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + struct gpuSession *session = device->session; + RsResourceRef *pSmcResourceRef; + GPUInstanceSubscription *pGPUInstanceSubscription; + NvU32 swizzId; + + NV_ASSERT(rmSubDevice->smcPartition.info == NULL); + + if (!pGpuInfo->smcEnabled) + return NV_ERR_INVALID_ARGUMENT; + + // TODO: Acquired because CliSetGpuContext expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + status = CliSetGpuContext(session->handle, device->handle, &pGpu, NULL); + rmapiLockRelease(); + if (status != NV_OK) + return status; + + // Allocate the SMC partition object + + // SMC GPU partitioning was disabled since we detected the partition in + // nvGpuOpsGetGpuInfo + if (!IS_MIG_IN_USE(pGpu)) + return NV_ERR_INVALID_STATE; + + status = pRmApi->DupObject(pRmApi, + session->handle, + rmSubDevice->subDeviceHandle, + &dupUserHandle, + pGpuInfo->smcUserClientInfo.hClient, + pGpuInfo->smcUserClientInfo.hSmcPartRef, + NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE); + if (status != NV_OK) + return status; + + status = getSwizzIdFromSmcPartHandle(pRmApi, + session->handle, + dupUserHandle, + &swizzId); + if (status != NV_OK) + goto cleanup_dup_user_handle; + + // The swizzId changed since the call to nvGpuOpsGetGpuInfo: either the + // object identified by smcUser*Handle changed, or else its configuration + // was altered. + if (swizzId != pGpuInfo->smcSwizzId) + { + status = NV_ERR_INVALID_STATE; + goto cleanup_dup_user_handle; + } + + rmSubDevice->smcPartition.handle = dupUserHandle; + + // get GPUInstanceSubscription handle + // TODO: Acquired because serverutilGetResourceRef expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + goto cleanup_dup_user_handle; + status = serverutilGetResourceRef(session->handle, rmSubDevice->smcPartition.handle, &pSmcResourceRef); + rmapiLockRelease(); + if (status != NV_OK) + goto cleanup_dup_user_handle; + + pGPUInstanceSubscription = dynamicCast(pSmcResourceRef->pResource, GPUInstanceSubscription); + NV_ASSERT(pGPUInstanceSubscription != NULL); + + NV_ASSERT(pGPUInstanceSubscription->pKernelMIGGpuInstance->swizzId == pGpuInfo->smcSwizzId); + + rmSubDevice->smcPartition.info = pGPUInstanceSubscription->pKernelMIGGpuInstance; + + return NV_OK; + +cleanup_dup_user_handle: + pRmApi->Free(pRmApi, session->handle, dupUserHandle); + + return status; +} + +static NV_STATUS nvGpuOpsRmSubDeviceCreate(struct gpuDevice *device) +{ + NV_STATUS status; + NV2080_ALLOC_PARAMETERS nv2080AllocParams = { 0 }; + deviceDesc *rmDevice = NULL; + subDeviceDesc *rmSubDevice = NULL; + struct gpuSession *session = device->session; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + NV_ASSERT(session); + + // Query the rmDevice which needed to create an rmSubDevice. + portSyncRwLockAcquireRead(session->btreeLock); + status = findDescriptor(session->devices, device->deviceInstance, (void**)&rmDevice); + if (status != NV_OK) + { + portSyncRwLockReleaseRead(session->btreeLock); + return status; + } + + NV_ASSERT(rmDevice); + NV_ASSERT(rmDevice->deviceHandle == device->handle); + + // Find the existing rmSubDevice. + // Otherwise, allocate an rmSubDevice. + portSyncRwLockAcquireWrite(rmDevice->btreeLock); + if (findDescriptor(rmDevice->subDevices, device->subdeviceInstance, (void**)&rmSubDevice) == NV_OK) + { + NV_ASSERT(rmSubDevice); + device->rmSubDevice = rmSubDevice; + device->subhandle = rmSubDevice->subDeviceHandle; + rmSubDevice->refCount++; + portSyncRwLockReleaseWrite(rmDevice->btreeLock); + portSyncRwLockReleaseRead(session->btreeLock); + return NV_OK; + } + + rmSubDevice = portMemAllocNonPaged(sizeof(*rmSubDevice)); + if (rmSubDevice == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + portMemSet(rmSubDevice, 0, sizeof(*rmSubDevice)); + + device->rmSubDevice = rmSubDevice; + rmSubDevice->refCount = 1; + nv2080AllocParams.subDeviceId = device->subdeviceInstance; + device->subhandle = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, + session->handle, + device->handle, + &device->subhandle, + NV20_SUBDEVICE_0, + &nv2080AllocParams); + if (status != NV_OK) + goto cleanup_subdevice_desc; + rmSubDevice->subDeviceHandle = device->subhandle; + + status = trackDescriptor(&rmDevice->subDevices, device->subdeviceInstance, rmSubDevice); + if (status != NV_OK) + goto cleanup_subdevice; + + rmDevice->subDeviceCount++; + + portSyncRwLockReleaseWrite(rmDevice->btreeLock); + portSyncRwLockReleaseRead(session->btreeLock); + return NV_OK; + +cleanup_subdevice: + pRmApi->Free(pRmApi, session->handle, device->subhandle); +cleanup_subdevice_desc: + portMemFree(rmSubDevice); + portSyncRwLockReleaseWrite(rmDevice->btreeLock); + portSyncRwLockReleaseRead(session->btreeLock); + return status; +} + +static NvBool isDevicePascalPlus(const struct gpuDevice *device) +{ + NV_ASSERT(device->rmDevice); + return device->rmDevice->arch >= GPU_ARCHITECTURE_PASCAL; +} + +static NvBool isDeviceVoltaPlus(const struct gpuDevice *device) +{ + NV_ASSERT(device->rmDevice); + return device->rmDevice->arch >= GPU_ARCHITECTURE_VOLTA; +} + +static NvBool isDeviceTuringPlus(const struct gpuDevice *device) +{ + NV_ASSERT(device->rmDevice); + return device->rmDevice->arch >= GPU_ARCHITECTURE_TURING; +} + +static NvBool isDeviceAmperePlus(const struct gpuDevice *device) +{ + NV_ASSERT(device->rmDevice); + return device->rmDevice->arch >= GPU_ARCHITECTURE_AMPERE; +} + +static UVM_LINK_TYPE rmControlToUvmNvlinkVersion(NvU32 nvlinkVersion) +{ + if (nvlinkVersion == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID) + return UVM_LINK_TYPE_NONE; + else if (nvlinkVersion == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_1_0) + return UVM_LINK_TYPE_NVLINK_1; + else if (nvlinkVersion == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_2_0 || + nvlinkVersion == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_2_2) + return UVM_LINK_TYPE_NVLINK_2; + else if (nvlinkVersion == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_3_0 || + nvlinkVersion == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_3_1) + return UVM_LINK_TYPE_NVLINK_3; + + NV_ASSERT(0); + return (NvU32)-1; +} + +static NV_STATUS queryFbInfo(struct gpuDevice *device) +{ + NV_STATUS nvStatus = NV_OK; + NV2080_CTRL_FB_GET_INFO_PARAMS fbInfoParams; + NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS *fbRegionInfoParams; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + NvU32 i; + + struct fbInputParams + { + NV2080_CTRL_FB_INFO heapSize; + NV2080_CTRL_FB_INFO reservedHeapSize; + NV2080_CTRL_FB_INFO zeroFb; + } fbParams; + + fbRegionInfoParams = portMemAllocNonPaged(sizeof(*fbRegionInfoParams)); + if (fbRegionInfoParams == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(fbRegionInfoParams, 0, sizeof(*fbRegionInfoParams)); + portMemSet(&fbInfoParams, 0, sizeof(fbInfoParams)); + portMemSet(&fbParams, 0, sizeof(fbParams)); + + // Set up the list of parameters we are looking to extract + fbParams.heapSize.index = NV2080_CTRL_FB_INFO_INDEX_HEAP_SIZE; + fbParams.reservedHeapSize.index = NV2080_CTRL_FB_INFO_INDEX_VISTA_RESERVED_HEAP_SIZE; + fbParams.zeroFb.index = NV2080_CTRL_FB_INFO_INDEX_FB_IS_BROKEN; + + fbInfoParams.fbInfoListSize = sizeof(fbParams) / sizeof(fbParams.heapSize); + fbInfoParams.fbInfoList = NV_PTR_TO_NvP64(&fbParams); + + nvStatus = pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_FB_GET_INFO, + &fbInfoParams, + sizeof(fbInfoParams)); + if (nvStatus != NV_OK) + goto out; + + nvStatus = pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO, + fbRegionInfoParams, + sizeof(*fbRegionInfoParams)); + if (nvStatus != NV_OK) + goto out; + + device->fbInfo.heapSize = fbParams.heapSize.data; + device->fbInfo.reservedHeapSize = fbParams.reservedHeapSize.data; + device->fbInfo.bZeroFb = (NvBool)fbParams.zeroFb.data; + + device->fbInfo.maxAllocatableAddress = 0; + + for (i = 0; i < fbRegionInfoParams->numFBRegions; ++i) + { + device->fbInfo.maxAllocatableAddress = NV_MAX(device->fbInfo.maxAllocatableAddress, + fbRegionInfoParams->fbRegion[i].limit); + } + +out: + portMemFree(fbRegionInfoParams); + return nvStatus; +} + +// Return the PCIE link cap max speed associated with the given GPU in +// megabytes per seconds.. +static NV_STATUS getPCIELinkRateMBps(struct gpuDevice *device, NvU32 *pcieLinkRate) +{ + // PCI Express Base Specification: http://www.pcisig.com/specifications/pciexpress + const NvU32 PCIE_1_ENCODING_RATIO_TOTAL = 10; + const NvU32 PCIE_1_ENCODING_RATIO_EFFECTIVE = 8; + const NvU32 PCIE_2_ENCODING_RATIO_TOTAL = 10; + const NvU32 PCIE_2_ENCODING_RATIO_EFFECTIVE = 8; + const NvU32 PCIE_3_ENCODING_RATIO_TOTAL = 130; + const NvU32 PCIE_3_ENCODING_RATIO_EFFECTIVE = 128; + const NvU32 PCIE_4_ENCODING_RATIO_TOTAL = 130; + const NvU32 PCIE_4_ENCODING_RATIO_EFFECTIVE = 128; + const NvU32 PCIE_5_ENCODING_RATIO_TOTAL = 130; + const NvU32 PCIE_5_ENCODING_RATIO_EFFECTIVE = 128; + + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + NV2080_CTRL_BUS_INFO busInfo = {0}; + NV2080_CTRL_BUS_GET_INFO_PARAMS busInfoParams = {0}; + NvU32 linkRate = 0; + NvU32 lanes; + + busInfo.index = NV2080_CTRL_BUS_INFO_INDEX_PCIE_GPU_LINK_CAPS; + busInfoParams.busInfoListSize = 1; + busInfoParams.busInfoList = NV_PTR_TO_NvP64(&busInfo); + + NV_STATUS status = pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_BUS_GET_INFO, + &busInfoParams, + sizeof(busInfoParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "%s:%d: %s\n", __FUNCTION__, + __LINE__, nvstatusToString(status)); + return status; + } + + lanes = DRF_VAL(2080, _CTRL_BUS_INFO, _PCIE_LINK_CAP_MAX_WIDTH, busInfo.data); + + // Bug 2606540: RM reports PCIe transfer rate in GT/s but labels it as Gbps + switch (DRF_VAL(2080, _CTRL_BUS_INFO, _PCIE_LINK_CAP_MAX_SPEED, busInfo.data)) + { + case NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_2500MBPS: + linkRate = ((2500 * lanes * PCIE_1_ENCODING_RATIO_EFFECTIVE) + / PCIE_1_ENCODING_RATIO_TOTAL) / 8; + break; + case NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_5000MBPS: + linkRate = ((5000 * lanes * PCIE_2_ENCODING_RATIO_EFFECTIVE) + / PCIE_2_ENCODING_RATIO_TOTAL) / 8; + break; + case NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_8000MBPS: + linkRate = ((8000 * lanes * PCIE_3_ENCODING_RATIO_EFFECTIVE) + / PCIE_3_ENCODING_RATIO_TOTAL) / 8; + break; + case NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_16000MBPS: + linkRate = ((16000 * lanes * PCIE_4_ENCODING_RATIO_EFFECTIVE) + / PCIE_4_ENCODING_RATIO_TOTAL) / 8; + break; + case NV2080_CTRL_BUS_INFO_PCIE_LINK_CAP_MAX_SPEED_32000MBPS: + linkRate = ((32000 * lanes * PCIE_5_ENCODING_RATIO_EFFECTIVE) + / PCIE_5_ENCODING_RATIO_TOTAL) / 8; + break; + default: + status = NV_ERR_INVALID_STATE; + NV_PRINTF(LEVEL_ERROR, "Unknown PCIe speed\n"); + } + + *pcieLinkRate = linkRate; + + return status; +} + +NV_STATUS nvGpuOpsDeviceCreate(struct gpuSession *session, + const gpuInfo *pGpuInfo, + const NvProcessorUuid *gpuUuid, + struct gpuDevice **outDevice, + NvBool bCreateSmcPartition) +{ + NV_STATUS status; + struct gpuDevice *device = NULL; + NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS gpuIdInfoParams = {{0}}; + NV2080_CTRL_BUS_GET_INFO_V2_PARAMS *busInfoParams; + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus; + NvU32 nvlinkVersion; + NvU32 sysmemLink; + NvU32 linkBandwidthMBps; + NvU32 sysmemConnType; + NvBool atomicSupported; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + device = portMemAllocNonPaged(sizeof(*device)); + if (device == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + portMemSet(device, 0, sizeof(*device)); + device->session = session; + + portMemCopy(&gpuIdInfoParams.gpuUuid, NV_UUID_LEN, gpuUuid->uuid, NV_UUID_LEN); + gpuIdInfoParams.flags = NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT_BINARY; + status = pRmApi->Control(pRmApi, + session->handle, + session->handle, + NV0000_CTRL_CMD_GPU_GET_UUID_INFO, + &gpuIdInfoParams, + sizeof(gpuIdInfoParams)); + if (status != NV_OK) + goto cleanup_device_obj; + + device->deviceInstance = gpuIdInfoParams.deviceInstance; + device->subdeviceInstance = gpuIdInfoParams.subdeviceInstance; + device->gpuId = gpuIdInfoParams.gpuId; + + status = nvGpuOpsRmDeviceCreate(device); + if (status != NV_OK) + goto cleanup_device_obj; + + status = nvGpuOpsRmSubDeviceCreate(device); + if (status != NV_OK) + goto cleanup_rm_device; + + if (bCreateSmcPartition) + { + status = nvGpuOpsRmSmcPartitionCreate(device, pGpuInfo); + if (status != NV_OK) + goto cleanup_rm_subdevice; + } + + // Create the work submission info mapping: + // * SMC is disabled, we create for the device. + // * SMC is enabled, we create only for SMC partitions. + if (isDeviceVoltaPlus(device) && (!pGpuInfo->smcEnabled || bCreateSmcPartition)) + { + status = gpuDeviceMapUsermodeRegion(device); + if (status != NV_OK) + goto cleanup_smc_partition; + } + + status = gpuDeviceRmSubDeviceInitEcc(device); + if (status != NV_OK) + goto cleanup_subdevice_usermode; + + status = queryFbInfo(device); + if (status != NV_OK) + goto cleanup_ecc; + + device->isTccMode = NV_FALSE; + + // Non-TCC mode on Windows implies WDDM mode. + device->isWddmMode = !device->isTccMode; + + status = findDeviceClasses(session->handle, + device->handle, + device->subhandle, + &device->hostClass, + &device->ceClass, + &device->computeClass, + &device->faultBufferClass, + &device->accessCounterBufferClass, + &device->sec2Class); + if (status != NV_OK) + goto cleanup_ecc; + + busInfoParams = portMemAllocNonPaged(sizeof(*busInfoParams)); + if (busInfoParams == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto cleanup_ecc; + } + portMemSet(busInfoParams, 0, sizeof(*busInfoParams)); + busInfoParams->busInfoListSize = 1; + busInfoParams->busInfoList[0].index = NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE; + status = pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_BUS_GET_INFO_V2, + busInfoParams, + sizeof(*busInfoParams)); + if (status != NV_OK) + { + portMemFree(busInfoParams); + goto cleanup_ecc; + } + + sysmemConnType = busInfoParams->busInfoList[0].data; + portMemFree(busInfoParams); + + sysmemLink = UVM_LINK_TYPE_NONE; + switch (sysmemConnType) + { + case NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_NVLINK: + { + status = allocNvlinkStatusForSubdevice(device, &nvlinkStatus); + if (status != NV_OK) + goto cleanup_ecc; + + nvlinkVersion = getNvlinkConnectionToNpu(nvlinkStatus, + &atomicSupported, + &linkBandwidthMBps); + + sysmemLink = rmControlToUvmNvlinkVersion(nvlinkVersion); + + portMemFree(nvlinkStatus); + nvlinkStatus = NULL; + break; + } + case NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_PCIE: + { + sysmemLink = UVM_LINK_TYPE_PCIE; + status = getPCIELinkRateMBps(device, &linkBandwidthMBps); + if (status != NV_OK) + goto cleanup_ecc; + break; + } + default: + { + NV_PRINTF(LEVEL_ERROR, "Unsupported sysmem connection type: %d\n", + sysmemConnType); + NV_ASSERT(0); + break; + } + } + + NV_PRINTF(LEVEL_INFO, "sysmem link type: %d bw: %u\n", sysmemLink, linkBandwidthMBps); + + NV_ASSERT(sysmemLink != UVM_LINK_TYPE_NONE); + device->sysmemLink = sysmemLink; + device->sysmemLinkRateMBps = linkBandwidthMBps; + + status = allocNvlinkStatusForSubdevice(device, &nvlinkStatus); + if (status != NV_OK) + goto cleanup_ecc; + nvlinkVersion = getNvlinkConnectionToSwitch(nvlinkStatus, + &linkBandwidthMBps); + + if (rmControlToUvmNvlinkVersion(nvlinkVersion) != UVM_LINK_TYPE_NONE) + { + NV_ASSERT(rmControlToUvmNvlinkVersion(nvlinkVersion) != UVM_LINK_TYPE_NVLINK_1); + + // If the GPU is ever connected to the CPU via a switch, sysmemLink + // and sysmemLinkRateMBps need to be updated accordingly. + NV_ASSERT(sysmemConnType != NV2080_CTRL_BUS_INFO_INDEX_SYSMEM_CONNECTION_TYPE_NVLINK); + + device->connectedToSwitch = NV_TRUE; + } + + portMemFree(nvlinkStatus); + + mapInit(&device->kern2PhysDescrMap, portMemAllocatorGetGlobalNonPaged()); + + *outDevice = device; + return NV_OK; + +cleanup_ecc: + gpuDeviceRmSubDeviceDeinitEcc(device); +cleanup_subdevice_usermode: + gpuDeviceDestroyUsermodeRegion(device); +cleanup_smc_partition: + nvGpuOpsRmSmcPartitionDestroy(device); +cleanup_rm_subdevice: + nvGpuOpsDeviceDestroy(device); + device = NULL; +cleanup_rm_device: + if (device) + nvGpuOpsRmDeviceDestroy(device); +cleanup_device_obj: + portMemFree(device); + return status; +} + +NV_STATUS nvGpuOpsDeviceDestroy(struct gpuDevice *device) +{ + deviceDesc *rmDevice = device->rmDevice; + subDeviceDesc *rmSubDevice = device->rmSubDevice; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + rmSubDevice->refCount--; + + if (rmSubDevice->refCount == 0) + { + gpuDeviceDestroyUsermodeRegion(device); + + gpuDeviceRmSubDeviceDeinitEcc(device); + + nvGpuOpsRmSmcPartitionDestroy(device); + + portSyncRwLockAcquireWrite(rmDevice->btreeLock); + rmDevice->subDeviceCount--; + deleteDescriptor(&rmDevice->subDevices, device->subdeviceInstance, (void**)&rmSubDevice); + pRmApi->Free(pRmApi, device->session->handle, rmSubDevice->subDeviceHandle); + portMemFree(rmSubDevice); + portSyncRwLockReleaseWrite(rmDevice->btreeLock); + + nvGpuOpsRmDeviceDestroy(device); + } + + mapDestroy(&device->kern2PhysDescrMap); + + portMemFree(device); + return NV_OK; +} + +NV_STATUS nvGpuOpsOwnPageFaultIntr(struct gpuDevice *device, + NvBool bOwnInterrupts) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + NV2080_CTRL_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP_PARAMS changeParams = {0}; + changeParams.bOwnedByRm = !bOwnInterrupts; + return pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_MC_CHANGE_REPLAYABLE_FAULT_OWNERSHIP, + &changeParams, + sizeof(changeParams)); +} + +static NV_STATUS getAddressSpaceInfo(struct gpuAddressSpace *vaSpace, + OBJGPU *pGpu, + UvmGpuAddressSpaceInfo *vaSpaceInfo) +{ + NV_STATUS status; + NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS params = {0}; + OBJVASPACE *pVAS = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + struct gpuDevice *device = vaSpace->device; + struct gpuSession *session = device->session; + subDeviceDesc *rmSubDevice = device->rmSubDevice; + + params.hVASpace = vaSpace->handle; + status = pRmApi->Control(pRmApi, + session->handle, + device->handle, + NV0080_CTRL_CMD_DMA_ADV_SCHED_GET_VA_CAPS, + ¶ms, + sizeof(params)); + if (status != NV_OK) + return status; + + vaSpaceInfo->bigPageSize = params.bigPageSize; + + // TODO: Acquired because resserv expects RMAPI lock. Necessary? + { + RsClient *pClient; + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + + status = serverGetClientUnderLock(&g_resServ, session->handle, &pClient); + if (status == NV_OK) + { + status = vaspaceGetByHandleOrDeviceDefault(pClient, device->handle, vaSpace->handle, &pVAS); + } + rmapiLockRelease(); + if (status != NV_OK) + return status; + } + + vaSpaceInfo->atsEnabled = vaspaceIsAtsEnabled(pVAS); + + if (isDeviceTuringPlus(vaSpace->device)) + { + // + // On Turing+ use the VIRTUAL_FUNCTION so this works fine in hosts and + // guests + // + void *bar0Mapping = gpuBar0BaseAddress(pGpu); + vaSpaceInfo->time0Offset = (NvU32 *)((NvU8*)bar0Mapping + GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_TIME_0)); + vaSpaceInfo->time1Offset = (NvU32 *)((NvU8*)bar0Mapping + GPU_GET_VREG_OFFSET(pGpu, NV_VIRTUAL_FUNCTION_TIME_1)); + } + else if (isDeviceVoltaPlus(vaSpace->device)) + { + NV_ASSERT(rmSubDevice->clientRegionMapping); + + // + // On Volta prefer USERMODE mappings for better passthrough + // performance on some hypervisors (see CL23003453 for more details) + // + vaSpaceInfo->time0Offset = (NvU32 *)((NvU8*)rmSubDevice->clientRegionMapping + NVC361_TIME_0); + vaSpaceInfo->time1Offset = (NvU32 *)((NvU8*)rmSubDevice->clientRegionMapping + NVC361_TIME_1); + } + else + { + void *bar0Mapping = gpuBar0BaseAddress(pGpu); + vaSpaceInfo->time0Offset = (NvU32 *)((NvU8*)bar0Mapping + NV_PTIMER_TIME_0); + vaSpaceInfo->time1Offset = (NvU32 *)((NvU8*)bar0Mapping + NV_PTIMER_TIME_1); + } + + if (IS_MIG_IN_USE(pGpu)) + { + KERNEL_MIG_GPU_INSTANCE *pKernelMIGGpuInstance = rmSubDevice->smcPartition.info; + MIG_RESOURCE_ALLOCATION *pResourceAllocation = &pKernelMIGGpuInstance->resourceAllocation; + + vaSpaceInfo->maxSubctxCount = pResourceAllocation->veidCount; + vaSpaceInfo->smcGpcCount = pResourceAllocation->gpcCount; + } + else + { + NV2080_CTRL_FIFO_GET_INFO_PARAMS *fifoGetInfoParams; + + // + // NV2080_CTRL_FIFO_GET_INFO_PARAMS takes over 2KB, so we use a heap + // allocation + // + fifoGetInfoParams = portMemAllocNonPaged(sizeof(*fifoGetInfoParams)); + if (fifoGetInfoParams == NULL) + return NV_ERR_NO_MEMORY; + + fifoGetInfoParams->fifoInfoTblSize = 1; + fifoGetInfoParams->fifoInfoTbl[0].index = NV2080_CTRL_FIFO_INFO_INDEX_MAX_SUBCONTEXT_PER_GROUP; + + status = pRmApi->Control(pRmApi, + session->handle, + rmSubDevice->subDeviceHandle, + NV2080_CTRL_CMD_FIFO_GET_INFO, + fifoGetInfoParams, + sizeof(*fifoGetInfoParams)); + + vaSpaceInfo->maxSubctxCount = fifoGetInfoParams->fifoInfoTbl[0].data; + + portMemFree(fifoGetInfoParams); + + if (status != NV_OK) + return status; + } + + return NV_OK; +} + +// This function will create a new address space object of type FERMI_VASPACE_A. +NV_STATUS nvGpuOpsAddressSpaceCreate(struct gpuDevice *device, + NvU64 vaBase, + NvU64 vaSize, + struct gpuAddressSpace **vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo) +{ + NV_STATUS status; + struct gpuAddressSpace *gpuVaSpace = NULL; + OBJGPU *pGpu = NULL; + NV_VASPACE_ALLOCATION_PARAMETERS vaParams = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + PORT_MEM_ALLOCATOR *pAlloc = portMemAllocatorGetGlobalNonPaged(); + + gpuVaSpace = portMemAllocNonPaged(sizeof(*gpuVaSpace)); + if (gpuVaSpace == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(gpuVaSpace, 0, sizeof(*gpuVaSpace)); + gpuVaSpace->vaBase = vaBase; + gpuVaSpace->vaSize = vaSize; + gpuVaSpace->handle = NV01_NULL_OBJECT; + gpuVaSpace->allocationsLock = portSyncRwLockCreate(pAlloc); + gpuVaSpace->cpuMappingsLock = portSyncRwLockCreate(pAlloc); + gpuVaSpace->physAllocationsLock = portSyncRwLockCreate(pAlloc); + + *vaSpace = NULL; + portMemSet(vaSpaceInfo, 0, sizeof(*vaSpaceInfo)); + + // Create a new vaSpace object + vaParams.index= NV_VASPACE_ALLOCATION_INDEX_GPU_NEW; + vaParams.vaBase = gpuVaSpace->vaBase; + vaParams.vaSize = gpuVaSpace->vaSize; + vaParams.flags = gpuVaSpace->vaSize ? + NV_VASPACE_ALLOCATION_FLAGS_SHARED_MANAGEMENT : + NV_VASPACE_ALLOCATION_FLAGS_NONE; + + // TODO: Acquired because CliSetGpuContext expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + goto cleanup_vaspace; + status = CliSetGpuContext(device->session->handle, device->handle, &pGpu, NULL); + rmapiLockRelease(); + if (status != NV_OK) + { + goto cleanup_vaspace; + } + + status = pRmApi->Alloc(pRmApi, + device->session->handle, + device->handle, + &gpuVaSpace->handle, FERMI_VASPACE_A, + &vaParams); + if (status != NV_OK) + { + goto cleanup_struct; + } + + // If base & Size were not provided before, they would have been filled now + gpuVaSpace->vaBase = vaParams.vaBase; + gpuVaSpace->vaSize = vaParams.vaSize; + gpuVaSpace->device = device; + + status = getAddressSpaceInfo(gpuVaSpace, pGpu, vaSpaceInfo); + if (status != NV_OK) + { + goto cleanup_vaspace; + } + + *vaSpace = gpuVaSpace; + return status; + +cleanup_vaspace: + pRmApi->Free(pRmApi, device->session->handle, gpuVaSpace->handle); + +cleanup_struct: + portSyncRwLockDestroy(gpuVaSpace->allocationsLock); + portSyncRwLockDestroy(gpuVaSpace->cpuMappingsLock); + portSyncRwLockDestroy(gpuVaSpace->physAllocationsLock); + portMemFree(gpuVaSpace); + return status; +} + +NV_STATUS nvGpuOpsDupAddressSpace(struct gpuDevice *device, + NvHandle hUserClient, + NvHandle hUserVASpace, + struct gpuAddressSpace **vaSpace, + UvmGpuAddressSpaceInfo *vaSpaceInfo) +{ + NV_STATUS status = NV_OK; + struct gpuAddressSpace *gpuVaSpace = NULL; + struct gpuSession *session = device->session; + OBJVASPACE *pVAS = NULL; + OBJGPU *pGpu = NULL; + RsResourceRef *pVaSpaceRef; + RsResourceRef *pDeviceRef; + Device *pDevice = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + PORT_MEM_ALLOCATOR *pAlloc = portMemAllocatorGetGlobalNonPaged(); + + *vaSpace = NULL; + portMemSet(vaSpaceInfo, 0, sizeof(*vaSpaceInfo)); + + // TODO - Move this check to RMDupObject later. + // TODO: Acquired because serverutilGetResourceRef expects RMAPI lock. Necessary? + // Find the device associated with the hUserVASpace and verify that the UUID belongs to it. + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + status = serverutilGetResourceRef(hUserClient, hUserVASpace, &pVaSpaceRef); + rmapiLockRelease(); + if (status != NV_OK) + return status; + + if (!dynamicCast(pVaSpaceRef->pResource, VaSpaceApi)) + return NV_ERR_INVALID_OBJECT; + + // The parent must be valid and a device if this is a VA space handle + // TODO: Acquired because serverutilGetResourceRef expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + status = serverutilGetResourceRef(hUserClient, pVaSpaceRef->pParentRef->hResource, &pDeviceRef); + rmapiLockRelease(); + NV_ASSERT(status == NV_OK); + + pDevice = dynamicCast(pDeviceRef->pResource, Device); + NV_ASSERT(pDevice != NULL); + + if (pDevice->deviceInst != device->deviceInstance) + return NV_ERR_OTHER_DEVICE_FOUND; + + gpuVaSpace = portMemAllocNonPaged(sizeof(*gpuVaSpace)); + if (gpuVaSpace == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(gpuVaSpace, 0, sizeof(*gpuVaSpace)); + + gpuVaSpace->device = device; + gpuVaSpace->allocationsLock = portSyncRwLockCreate(pAlloc); + gpuVaSpace->cpuMappingsLock = portSyncRwLockCreate(pAlloc); + gpuVaSpace->physAllocationsLock = portSyncRwLockCreate(pAlloc); + + // dup the vaspace + gpuVaSpace->handle = NV01_NULL_OBJECT; + status = pRmApi->DupObject(pRmApi, + session->handle, + device->handle, + &gpuVaSpace->handle, + hUserClient, + hUserVASpace, + NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE); + if (status != NV_OK) + goto cleanup_vaspace; + + // TODO: Acquired because these functions expect RMAPI lock. Necessary? + { + RsClient *pClient; + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + goto cleanup_dup_vaspace; + + status = serverGetClientUnderLock(&g_resServ, session->handle, &pClient); + if (status == NV_OK) + { + status = CliSetGpuContext(session->handle, device->handle, &pGpu, NULL); + if (status == NV_OK) + { + status = vaspaceGetByHandleOrDeviceDefault(pClient, device->handle, gpuVaSpace->handle, &pVAS); + } + } + rmapiLockRelease(); + if (status != NV_OK) + goto cleanup_dup_vaspace; + } + + if (!vaspaceIsExternallyOwned(pVAS)) + { + status = NV_ERR_INVALID_FLAGS; + goto cleanup_dup_vaspace; + } + + status = getAddressSpaceInfo(gpuVaSpace, pGpu, vaSpaceInfo); + if (status != NV_OK) + goto cleanup_dup_vaspace; + + *vaSpace = gpuVaSpace; + + return NV_OK; + +cleanup_dup_vaspace: + pRmApi->Free(pRmApi, session->handle, gpuVaSpace->handle); +cleanup_vaspace: + portSyncRwLockDestroy(gpuVaSpace->allocationsLock); + portSyncRwLockDestroy(gpuVaSpace->cpuMappingsLock); + portSyncRwLockDestroy(gpuVaSpace->physAllocationsLock); + portMemFree(gpuVaSpace); + return status; +} + +// Get the NVLink connection status for the given device. On success, caller is +// responsible of freeing the memory. +static NV_STATUS allocNvlinkStatusForSubdevice(struct gpuDevice *device, + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS **nvlinkStatusOut) +{ + NV_STATUS status; + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + *nvlinkStatusOut = NULL; + + nvlinkStatus = portMemAllocNonPaged(sizeof(*nvlinkStatus)); + if (nvlinkStatus == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(nvlinkStatus, 0, sizeof(*nvlinkStatus)); + status = pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS, + nvlinkStatus, + sizeof(*nvlinkStatus)); + if (status == NV_ERR_NOT_SUPPORTED) + { + portMemSet(nvlinkStatus, 0, sizeof(*nvlinkStatus)); + } + else if (status != NV_OK) + { + portMemFree(nvlinkStatus); + NV_PRINTF(LEVEL_ERROR, "%s:%d: %s\n", __FUNCTION__, + __LINE__, nvstatusToString(status)); + return status; + } + + *nvlinkStatusOut = nvlinkStatus; + + return NV_OK; +} + +// If the given NvLink connection has a GPU device as an endpoint, return the +// version of the NvLink connection with that GPU , and the maximum +// unidirectional bandwidth in megabytes per second. Otherwise, return +// NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID. +static NvU32 getNvlinkConnectionToGpu(const NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus, + OBJGPU *pGpu, + NvU32 *linkBandwidthMBps) +{ + NvU32 i; + + NvU32 version = NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID; + NvU32 domain = gpuGetDomain(pGpu); + NvU16 bus = gpuGetBus(pGpu); + NvU16 device = gpuGetDevice(pGpu); + NvU32 bwMBps = 0; + + for (i = 0; i < NV2080_CTRL_NVLINK_MAX_LINKS; ++i) + { + if (((1 << i) & nvlinkStatus->enabledLinkMask) == 0) + continue; + + if (!nvlinkStatus->linkInfo[i].connected) + continue; + + // Skip loopback/loopout connections + if (nvlinkStatus->linkInfo[i].loopProperty != NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_NONE) + continue; + + if (nvlinkStatus->linkInfo[i].remoteDeviceInfo.deviceType == NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_GPU) + { + if ((nvlinkStatus->linkInfo[i].remoteDeviceInfo.deviceIdFlags & + NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_ID_FLAGS_PCI) == 0) + { + NV_PRINTF(LEVEL_ERROR, "No PCI information for GPU.\n"); + continue; + } + + if ((domain == nvlinkStatus->linkInfo[i].remoteDeviceInfo.domain) && + (bus == nvlinkStatus->linkInfo[i].remoteDeviceInfo.bus) && + (device == nvlinkStatus->linkInfo[i].remoteDeviceInfo.device) && + (pGpu->idInfo.PCIDeviceID == nvlinkStatus->linkInfo[i].remoteDeviceInfo.pciDeviceId)) + { + NvU32 capsTbl = nvlinkStatus->linkInfo[i].capsTbl; + + NV_ASSERT(NV2080_CTRL_NVLINK_GET_CAP(((NvU8 *)&capsTbl), NV2080_CTRL_NVLINK_CAPS_P2P_ATOMICS)); + + if (bwMBps == 0) + version = nvlinkStatus->linkInfo[i].nvlinkVersion; + + bwMBps += nvlinkStatus->linkInfo[i].nvlinkLineRateMbps; + NV_ASSERT(version == nvlinkStatus->linkInfo[i].nvlinkVersion); + } + } + } + + *linkBandwidthMBps = bwMBps; + if (version == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID) + NV_ASSERT(*linkBandwidthMBps == 0); + + return version; +} + +// If the given NvLink connection has a NPU device as an endpoint, return the +// version of the NvLink connection with that NPU , and the maximum +// unidirectional bandwidth in megabytes per second. Otherwise, return +// NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID. +static NvU32 getNvlinkConnectionToNpu(const NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus, + NvBool *atomicSupported, + NvU32 *linkBandwidthMBps) +{ + NvU32 i; + NvU32 bwMBps = 0; + NvU32 version = NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID; + + *atomicSupported = NV_FALSE; + + for (i = 0; i < NV2080_CTRL_NVLINK_MAX_LINKS; ++i) + { + if (((1 << i) & nvlinkStatus->enabledLinkMask) == 0) + continue; + + if (!nvlinkStatus->linkInfo[i].connected) + continue; + + // Skip loopback/loopout connections + if (nvlinkStatus->linkInfo[i].loopProperty != NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_NONE) + continue; + + if (nvlinkStatus->linkInfo[i].remoteDeviceInfo.deviceType == NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_NPU) + { + NvU32 capsTbl = nvlinkStatus->linkInfo[i].capsTbl; + NvBool atomicCap = !!NV2080_CTRL_NVLINK_GET_CAP(((NvU8 *)&capsTbl), NV2080_CTRL_NVLINK_CAPS_SYSMEM_ATOMICS); + + if (bwMBps == 0) + { + *atomicSupported = atomicCap; + version = nvlinkStatus->linkInfo[i].nvlinkVersion; + } + bwMBps += nvlinkStatus->linkInfo[i].nvlinkLineRateMbps; + NV_ASSERT(version == nvlinkStatus->linkInfo[i].nvlinkVersion); + NV_ASSERT(*atomicSupported == atomicCap); + } + } + + *linkBandwidthMBps = bwMBps; + if (version == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID) + NV_ASSERT(*linkBandwidthMBps == 0); + + return version; +} + +// If the given NvLink connection has a switch as an endpoint, return the +// version of the NvLink connection with that switch, and the maximum +// unidirectional bandwidth in megabytes per second. Otherwise, return +// NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID. +static NvU32 getNvlinkConnectionToSwitch(const NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus, + NvU32 *linkBandwidthMBps) +{ + NvU32 i; + NvU32 bwMBps = 0; + NvU32 version = NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID; + + for (i = 0; i < NV2080_CTRL_NVLINK_MAX_LINKS; ++i) + { + if (((1 << i) & nvlinkStatus->enabledLinkMask) == 0) + continue; + + if (!nvlinkStatus->linkInfo[i].connected) + continue; + + // Skip loopback/loopout connections + if (nvlinkStatus->linkInfo[i].loopProperty != NV2080_CTRL_NVLINK_STATUS_LOOP_PROPERTY_NONE) + continue; + + if (nvlinkStatus->linkInfo[i].remoteDeviceInfo.deviceType == NV2080_CTRL_NVLINK_DEVICE_INFO_DEVICE_TYPE_SWITCH) + { + if (bwMBps == 0) + version = nvlinkStatus->linkInfo[i].nvlinkVersion; + + bwMBps += nvlinkStatus->linkInfo[i].nvlinkLineRateMbps; + NV_ASSERT(version == nvlinkStatus->linkInfo[i].nvlinkVersion); + } + } + + *linkBandwidthMBps = bwMBps; + if (version == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID) + NV_ASSERT(*linkBandwidthMBps == 0); + + return version; +} + +// Compute whether the non-peer GPUs with the given NVLink connections can +// communicate through P9 NPUs +static NV_STATUS gpusHaveNpuNvlink(NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus1, + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus2, + NvU32 *nvlinkVersion, + NvU32 *linkBandwidthMBps) +{ + NvU32 nvlinkVersion1, nvlinkVersion2; + NvU32 tmpLinkBandwidthMBps; + NvBool atomicSupported1, atomicSupported2; + + *nvlinkVersion = NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID; + + nvlinkVersion1 = getNvlinkConnectionToNpu(nvlinkStatus1, + &atomicSupported1, + &tmpLinkBandwidthMBps); + nvlinkVersion2 = getNvlinkConnectionToNpu(nvlinkStatus2, + &atomicSupported2, + &tmpLinkBandwidthMBps); + + if (nvlinkVersion1 == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID || + nvlinkVersion2 == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID) + return NV_OK; + + // Non-peer GPU communication over NPU is only supported on NVLink 2.0 or + // greater + if (nvlinkVersion1 == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_1_0 || + nvlinkVersion2 == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_1_0) + { + // NVLink1 devices cannot be mixed with other versions. NVLink3 + // supports mixing NVLink2 and NVLink3 devices + NV_ASSERT(nvlinkVersion1 == nvlinkVersion2); + return NV_OK; + } + + NV_ASSERT(atomicSupported1); + NV_ASSERT(atomicSupported2); + + // We do not explore the whole connectivity graph. We assume that NPUs + // connected to NVLink2 (and greater) can forward memory requests so that + // if GPU A is connected to NPU M and GPU B is connected to NPU N, A can + // access B. + *nvlinkVersion = NV_MIN(nvlinkVersion1, nvlinkVersion2); + + // Link bandwidth not provided because the intermediate link rate could + // vary a lot with system topologies & current load, making this bandwidth + // obsolete. + *linkBandwidthMBps = 0; + + return NV_OK; +} + +static NV_STATUS rmSystemP2PCapsControl(struct gpuDevice *device1, + struct gpuDevice *device2, + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS *p2pCapsParams) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + portMemSet(p2pCapsParams, 0, sizeof(*p2pCapsParams)); + p2pCapsParams->gpuIds[0] = device1->gpuId; + p2pCapsParams->gpuIds[1] = device2->gpuId; + p2pCapsParams->gpuCount = 2; + + NvHandle handle = device1->session->handle; + NV_STATUS status = pRmApi->Control(pRmApi, + handle, + handle, + NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2, + p2pCapsParams, + sizeof(*p2pCapsParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "%s:%d: %s\n", __FUNCTION__, + __LINE__, nvstatusToString(status)); + } + + return status; +} + +// Get R/W/A access capabilities and the link type between the two given GPUs +static NV_STATUS getSystemP2PCaps(struct gpuDevice *device1, + struct gpuDevice *device2, + struct systemP2PCaps *p2pCaps) +{ + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS *p2pCapsParams = NULL; + NV_STATUS status = NV_OK; + + p2pCapsParams = portMemAllocNonPaged(sizeof(*p2pCapsParams)); + if (p2pCapsParams == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = rmSystemP2PCapsControl(device1, device2, p2pCapsParams); + if (status != NV_OK) + goto done; + + portMemSet(p2pCaps, 0, sizeof(*p2pCaps)); + p2pCaps->peerIds[0] = p2pCapsParams->busPeerIds[0 * 2 + 1]; + p2pCaps->peerIds[1] = p2pCapsParams->busPeerIds[1 * 2 + 0]; + p2pCaps->nvlinkSupported = !!REF_VAL(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_NVLINK_SUPPORTED, p2pCapsParams->p2pCaps); + p2pCaps->atomicSupported = !!REF_VAL(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_ATOMICS_SUPPORTED, p2pCapsParams->p2pCaps); + p2pCaps->indirectAccessSupported = !!REF_VAL(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_INDIRECT_NVLINK_SUPPORTED, + p2pCapsParams->p2pCaps); + + // TODO: Bug 1768805: Check both reads and writes since RM seems to be + // currently incorrectly reporting just the P2P write cap on some + // systems that cannot support P2P at all. See the bug for more + // details. + if (REF_VAL(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED, p2pCapsParams->p2pCaps) && + REF_VAL(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED, p2pCapsParams->p2pCaps)) + { + NV_ASSERT(!p2pCaps->indirectAccessSupported); + + p2pCaps->accessSupported = NV_TRUE; + } + + if (p2pCaps->nvlinkSupported || p2pCaps->indirectAccessSupported) + { + // Exactly one CE is expected to be recommended for transfers between + // NvLink peers + NV_ASSERT(nvPopCount32(p2pCapsParams->p2pOptimalWriteCEs) == 1); + + // Query the write mask only; UVM has no use for the read mask + p2pCaps->optimalNvlinkWriteCEs[0] = BIT_IDX_32(p2pCapsParams->p2pOptimalWriteCEs); + + // Query the P2P capabilities of device2->device1, which may be + // different from those of device1->device2 + status = rmSystemP2PCapsControl(device2, device1, p2pCapsParams); + if (status != NV_OK) + goto done; + + NV_ASSERT(nvPopCount32(p2pCapsParams->p2pOptimalWriteCEs) == 1); + + p2pCaps->optimalNvlinkWriteCEs[1] = BIT_IDX_32(p2pCapsParams->p2pOptimalWriteCEs); + } + +done: + portMemFree(p2pCapsParams); + return status; +} + +// Return the NVLink P2P capabilities of the peer GPUs with the given devices +static NV_STATUS getNvlinkP2PCaps(struct gpuDevice *device1, + struct gpuDevice *device2, + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus1, + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus2, + NvU32 *nvlinkVersion, + NvU32 *linkBandwidthMBps) +{ + NvU32 nvlinkVersion1, nvlinkVersion2; + NvU32 linkBandwidthMBps1, linkBandwidthMBps2; + + *nvlinkVersion = NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID; + + if (device1->connectedToSwitch && device2->connectedToSwitch) + { + nvlinkVersion1 = getNvlinkConnectionToSwitch(nvlinkStatus1, + &linkBandwidthMBps1); + nvlinkVersion2 = getNvlinkConnectionToSwitch(nvlinkStatus2, + &linkBandwidthMBps2); + } + else + { + OBJGPU *pGpu1, *pGpu2; + + pGpu1 = gpumgrGetGpuFromId(device1->gpuId); + if (!pGpu1) + return NV_ERR_OBJECT_NOT_FOUND; + + pGpu2 = gpumgrGetGpuFromId(device2->gpuId); + if (!pGpu2) + return NV_ERR_OBJECT_NOT_FOUND; + + nvlinkVersion1 = getNvlinkConnectionToGpu(nvlinkStatus1, + pGpu2, + &linkBandwidthMBps1); + nvlinkVersion2 = getNvlinkConnectionToGpu(nvlinkStatus2, + pGpu1, + &linkBandwidthMBps2); + } + + if (nvlinkVersion1 == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID || + nvlinkVersion2 == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_INVALID) + { + *linkBandwidthMBps = 0; + return NV_OK; + } + + // NVLink1 devices cannot be mixed with other versions. NVLink3 supports + // mixing NVLink2 and NVLink3 devices. NVLink4 devices cannot be mixed with + // prior NVLink versions. + if (nvlinkVersion1 == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_1_0 || + nvlinkVersion2 == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_1_0 || + nvlinkVersion1 == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_4_0 || + nvlinkVersion2 == NV2080_CTRL_NVLINK_STATUS_NVLINK_VERSION_4_0) + { + NV_ASSERT(nvlinkVersion1 == nvlinkVersion2); + NV_ASSERT(linkBandwidthMBps1 == linkBandwidthMBps2); + } + + *nvlinkVersion = NV_MIN(nvlinkVersion1, nvlinkVersion2); + *linkBandwidthMBps = NV_MIN(linkBandwidthMBps1, linkBandwidthMBps2); + + return NV_OK; +} + +NV_STATUS nvGpuOpsGetP2PCaps(struct gpuDevice *device1, + struct gpuDevice *device2, + getP2PCapsParams *p2pCapsParams) +{ + NV_STATUS status = NV_OK; + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus1 = NULL; + NV2080_CTRL_CMD_NVLINK_GET_NVLINK_STATUS_PARAMS *nvlinkStatus2 = NULL; + struct systemP2PCaps p2pCaps; + + if (!device1 || !device2) + return NV_ERR_INVALID_ARGUMENT; + + if (device1->session != device2->session) + return NV_ERR_INVALID_ARGUMENT; + + if (!p2pCapsParams) + return NV_ERR_INVALID_ARGUMENT; + + status = allocNvlinkStatusForSubdevice(device1, &nvlinkStatus1); + if (status != NV_OK) + goto cleanup; + + status = allocNvlinkStatusForSubdevice(device2, &nvlinkStatus2); + if (status != NV_OK) + goto cleanup; + + portMemSet(p2pCapsParams, 0, sizeof(*p2pCapsParams)); + p2pCapsParams->peerIds[0] = (NvU32)-1; + p2pCapsParams->peerIds[1] = (NvU32)-1; + p2pCapsParams->p2pLink = UVM_LINK_TYPE_NONE; + p2pCapsParams->indirectAccess = NV_FALSE; + + status = getSystemP2PCaps(device1, device2, &p2pCaps); + if (status != NV_OK) + goto cleanup; + + if (p2pCaps.indirectAccessSupported) + { + NvU32 nvlinkVersion; + NvU32 linkBandwidthMBps; + NvU32 p2pLink; + + status = gpusHaveNpuNvlink(nvlinkStatus1, + nvlinkStatus2, + &nvlinkVersion, + &linkBandwidthMBps); + if (status != NV_OK) + goto cleanup; + + p2pLink = rmControlToUvmNvlinkVersion(nvlinkVersion); + + NV_ASSERT(p2pLink >= UVM_LINK_TYPE_NVLINK_2); + NV_ASSERT(linkBandwidthMBps == 0); + + p2pCapsParams->indirectAccess = NV_TRUE; + p2pCapsParams->p2pLink = p2pLink; + p2pCapsParams->optimalNvlinkWriteCEs[0] = p2pCaps.optimalNvlinkWriteCEs[0]; + p2pCapsParams->optimalNvlinkWriteCEs[1] = p2pCaps.optimalNvlinkWriteCEs[1]; + p2pCapsParams->totalLinkLineRateMBps = linkBandwidthMBps; + } + else if (p2pCaps.accessSupported) + { + p2pCapsParams->peerIds[0] = p2pCaps.peerIds[0]; + p2pCapsParams->peerIds[1] = p2pCaps.peerIds[1]; + + if (p2pCaps.nvlinkSupported) + { + NvU32 nvlinkVersion; + NvU32 linkBandwidthMBps; + + NV_ASSERT(p2pCaps.atomicSupported); + + status = getNvlinkP2PCaps(device1, + device2, + nvlinkStatus1, + nvlinkStatus2, + &nvlinkVersion, + &linkBandwidthMBps); + if (status != NV_OK) + goto cleanup; + + p2pCapsParams->p2pLink = rmControlToUvmNvlinkVersion(nvlinkVersion); + p2pCapsParams->optimalNvlinkWriteCEs[0] = p2pCaps.optimalNvlinkWriteCEs[0]; + p2pCapsParams->optimalNvlinkWriteCEs[1] = p2pCaps.optimalNvlinkWriteCEs[1]; + + NV_ASSERT(p2pCapsParams->p2pLink != UVM_LINK_TYPE_NONE); + NV_ASSERT(linkBandwidthMBps != 0); + + p2pCapsParams->totalLinkLineRateMBps = linkBandwidthMBps; + } + else + { + NvU32 linkBandwidthMBps1, linkBandwidthMBps2; + + status = getPCIELinkRateMBps(device1, &linkBandwidthMBps1); + if (status != NV_OK) + goto cleanup; + + status = getPCIELinkRateMBps(device2, &linkBandwidthMBps2); + if (status != NV_OK) + goto cleanup; + + p2pCapsParams->p2pLink = UVM_LINK_TYPE_PCIE; + p2pCapsParams->totalLinkLineRateMBps = NV_MIN(linkBandwidthMBps1, linkBandwidthMBps2); + } + } + +cleanup: + portMemFree(nvlinkStatus1); + portMemFree(nvlinkStatus2); + + return status; +} + +static NV_STATUS nvGpuOpsGetExternalAllocP2pInfo(struct gpuSession *session, + NvU32 memOwnerGpuId, + NvU32 gpuId, + NvBool *isPeerSupported, + NvU32 *peerId) +{ + NV_STATUS status = NV_OK; + NV0000_CTRL_SYSTEM_GET_P2P_CAPS_V2_PARAMS *p2pCapsParams = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + NV_ASSERT(gpuId != memOwnerGpuId); + + p2pCapsParams = portMemAllocNonPaged(sizeof(*p2pCapsParams)); + if (p2pCapsParams == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemSet(p2pCapsParams, 0, sizeof(*p2pCapsParams)); + p2pCapsParams->gpuIds[0] = gpuId; + p2pCapsParams->gpuIds[1] = memOwnerGpuId; + p2pCapsParams->gpuCount = 2; + + status = pRmApi->Control(pRmApi, + session->handle, + session->handle, + NV0000_CTRL_CMD_SYSTEM_GET_P2P_CAPS_V2, + p2pCapsParams, + sizeof(*p2pCapsParams)); + if (status != NV_OK) + goto done; + + *isPeerSupported = + (REF_VAL(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_WRITES_SUPPORTED, p2pCapsParams->p2pCaps) && + REF_VAL(NV0000_CTRL_SYSTEM_GET_P2P_CAPS_READS_SUPPORTED, p2pCapsParams->p2pCaps)); + + *peerId = p2pCapsParams->busPeerIds[0 * 2 + 1]; + +done: + portMemFree(p2pCapsParams); + return status; +} + +static GMMU_APERTURE nvGpuOpsGetExternalAllocAperture(PMEMORY_DESCRIPTOR pMemDesc, + NvBool isIndirectPeerSupported, + NvBool isPeerSupported) +{ + // Don't support both direct and indirect peers + NV_ASSERT(!(isIndirectPeerSupported && isPeerSupported)); + + // Get the aperture + if (memdescGetAddressSpace(pMemDesc) == ADDR_FBMEM) + { + if (isIndirectPeerSupported) + return GMMU_APERTURE_SYS_COH; + + if (isPeerSupported) + return GMMU_APERTURE_PEER; + + return GMMU_APERTURE_VIDEO; + } + else if ((memdescGetAddressSpace(pMemDesc) == ADDR_FABRIC) || + (memdescGetAddressSpace(pMemDesc) == ADDR_FABRIC_V2)) + { + return GMMU_APERTURE_PEER; + } + else + { + return GMMU_APERTURE_SYS_COH; + } +} + +static NvBool nvGpuOpsGetExternalAllocVolatility(PMEMORY_DESCRIPTOR pMemDesc, + GMMU_APERTURE aperture, + NvBool isIndirectPeerSupported, + UvmRmGpuCachingType cachingType) +{ + if (cachingType == UvmRmGpuCachingTypeDefault) + { + if (aperture == GMMU_APERTURE_PEER || isIndirectPeerSupported) + return (memdescGetGpuP2PCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) ? NV_TRUE : NV_FALSE; + else + return (memdescGetGpuCacheAttrib(pMemDesc) == NV_MEMORY_UNCACHED) ? NV_TRUE : NV_FALSE; + } + else if (cachingType == UvmRmGpuCachingTypeForceUncached) + { + return NV_TRUE; + } + else + { + return NV_FALSE; + } +} + +static NV_STATUS nvGpuOpsGetExternalAllocMappingAttribute(UvmRmGpuMappingType mappingType, + PMEMORY_DESCRIPTOR pMemDesc, + NvBool *readOnly, + NvBool *atomic) +{ + *readOnly = NV_FALSE; + *atomic = NV_FALSE; + + if (memdescGetFlag(pMemDesc, MEMDESC_FLAGS_DEVICE_READ_ONLY)) + { + if (mappingType != UvmRmGpuMappingTypeDefault && + mappingType != UvmRmGpuMappingTypeReadOnly) + return NV_ERR_INVALID_ACCESS_TYPE; + + *readOnly = NV_TRUE; + *atomic = NV_FALSE; + } + else + { + *readOnly = (mappingType == UvmRmGpuMappingTypeReadOnly); + *atomic = (mappingType == UvmRmGpuMappingTypeDefault || + mappingType == UvmRmGpuMappingTypeReadWriteAtomic); + } + + return NV_OK; +} + +static NV_STATUS nvGpuOpsGetPteKind(OBJGPU *pMappingGpu, + MemoryManager *pMemoryManager, + PMEMORY_DESCRIPTOR pMemDesc, + Memory *pMemory, + gpuExternalMappingInfo *pGpuExternalMappingInfo, + NvU32 *newKind) +{ + NV_STATUS status = NV_OK; + FB_ALLOC_PAGE_FORMAT fbAllocPageFormat = {0}; + NvU32 ctagId; + + if (pGpuExternalMappingInfo->compressionType == UvmRmGpuCompressionTypeEnabledNoPlc) + { + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, *newKind)) + { + status = memmgrChooseKind_HAL(pMappingGpu, + pMemoryManager, + &fbAllocPageFormat, + NVOS32_ATTR_COMPR_DISABLE_PLC_ANY, + newKind); + } + else + { + status = NV_ERR_INVALID_ARGUMENT; + } + + if (status != NV_OK) + return status; + } + + if (pGpuExternalMappingInfo->formatType != UvmRmGpuFormatTypeDefault) + { + NV_ASSERT(pGpuExternalMappingInfo->elementBits != UvmRmGpuFormatElementBitsDefault); + + fbAllocPageFormat.attr = pMemory->Attr; + fbAllocPageFormat.attr2 = pMemory->Attr2; + fbAllocPageFormat.flags = pMemory->Flags; + fbAllocPageFormat.type = pMemory->Type; + + switch (pGpuExternalMappingInfo->formatType) + { + case UvmRmGpuFormatTypeBlockLinear: + fbAllocPageFormat.attr = FLD_SET_DRF(OS32, _ATTR, _FORMAT, _BLOCK_LINEAR, fbAllocPageFormat.attr); + break; + default: + break; + } + + switch (pGpuExternalMappingInfo->elementBits) + { + case UvmRmGpuFormatElementBits8: + fbAllocPageFormat.attr = FLD_SET_DRF(OS32, _ATTR, _DEPTH, _8, fbAllocPageFormat.attr); + break; + case UvmRmGpuFormatElementBits16: + fbAllocPageFormat.attr = FLD_SET_DRF(OS32, _ATTR, _DEPTH, _16, fbAllocPageFormat.attr); + break; + // CUDA does not support 24-bit width + case UvmRmGpuFormatElementBits32: + fbAllocPageFormat.attr = FLD_SET_DRF(OS32, _ATTR, _DEPTH, _32, fbAllocPageFormat.attr); + break; + case UvmRmGpuFormatElementBits64: + fbAllocPageFormat.attr = FLD_SET_DRF(OS32, _ATTR, _DEPTH, _64, fbAllocPageFormat.attr); + break; + case UvmRmGpuFormatElementBits128: + fbAllocPageFormat.attr = FLD_SET_DRF(OS32, _ATTR, _DEPTH, _128, fbAllocPageFormat.attr); + break; + default: + break; + } + + status = memmgrChooseKind_HAL(pMappingGpu, pMemoryManager, &fbAllocPageFormat, + DRF_VAL(OS32, _ATTR, _COMPR, fbAllocPageFormat.attr), + newKind); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Invalid kind type (%x)\n", *newKind); + return status; + } + + // + // Check that the memory descriptor already has allocated comptags + // if the new mapping enables compression. Downgrade the kind if no + // comptags are present. + // + ctagId = FB_HWRESID_CTAGID_VAL_FERMI(memdescGetHwResId(pMemDesc)); + if (memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, *newKind) && !ctagId) + *newKind = memmgrGetUncompressedKind_HAL(pMappingGpu, pMemoryManager, *newKind, NV_FALSE); + + if (*newKind == NV_MMU_PTE_KIND_INVALID) + return NV_ERR_INVALID_ARGUMENT; + } + else + { + NV_ASSERT((pGpuExternalMappingInfo->elementBits == UvmRmGpuFormatElementBitsDefault) || + (pGpuExternalMappingInfo->elementBits == UvmRmGpuFormatElementBits8)); + } + + return NV_OK; +} + +static +NV_STATUS +nvGpuOpsMemGetPageSize +( + OBJGPU *pGpu, + MEMORY_DESCRIPTOR *pMemDesc, + NvU32 *pPageSize +) +{ + NvU32 pageSize; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + NV_STATUS status; + + pageSize = memdescGetPageSize(pMemDesc, AT_GPU); + if (pageSize == 0) + { + status = memmgrSetMemDescPageSize_HAL(pGpu, + pMemoryManager, + pMemDesc, + AT_GPU, + RM_ATTR_PAGE_SIZE_DEFAULT); + if (status != NV_OK) + return status; + + pageSize = memdescGetPageSize(pMemDesc, AT_GPU); + NV_ASSERT(pageSize != 0); + } + + *pPageSize = pageSize; + + return NV_OK; +} + +static +NV_STATUS +nvGpuOpsBuildExternalAllocPtes +( + OBJVASPACE *pVAS, + OBJGPU *pMappingGpu, + MEMORY_DESCRIPTOR *pMemDesc, + Memory *pMemory, + NvU64 offset, + NvU64 size, + NvBool isIndirectPeerSupported, + NvBool isPeerSupported, + NvU32 peerId, + gpuExternalMappingInfo *pGpuExternalMappingInfo +) +{ + NV_STATUS status = NV_OK; + OBJGVASPACE *pGVAS = NULL; + const GMMU_FMT *pFmt = NULL; + const GMMU_FMT_PTE *pPteFmt = NULL; + const MMU_FMT_LEVEL *pLevelFmt = NULL; + GMMU_APERTURE aperture; + COMPR_INFO comprInfo; + GMMU_ENTRY_VALUE pte = {{0}}; + + NvU64 fabricBaseAddress = NVLINK_INVALID_FABRIC_ADDR; + NvU32 kind, pageSize; + NvU32 skipPteCount; + NvBool vol, atomic, readOnly; + NvBool encrypted, privileged; + NvU64 iter, physAddr, mappingSize, pteCount; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pMappingGpu); + KernelGmmu *pKernelGmmu = GPU_GET_KERNEL_GMMU(pMappingGpu); + NvU64 allocSize; + NvBool isCompressedKind; + NvU64 *physicalAddresses = NULL; + NvU32 newKind, oldKind; + NvBool kindChanged = NV_FALSE; + NvU64 gpaOffset; + NvBool *isPLCable = NULL; + NvU64 *guestPhysicalAddress = NULL; + + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + status = nvGpuOpsMemGetPageSize(pMappingGpu, + pMemDesc, + &pageSize); + if (status != NV_OK) + return status; + + // memdescGetSize returns the requested size of the allocation. But, the + // actual allocation size could be larger than the requested size due + // to alignment requirement. So, make sure the correct size is used. + // Note, alignment can be greater than the pageSize. + allocSize = RM_ALIGN_UP(pMemDesc->ActualSize, pageSize); + + if (offset >= allocSize) + return NV_ERR_INVALID_BASE; + + if ((offset + size) > allocSize) + return NV_ERR_INVALID_LIMIT; + + if ((size & (pageSize - 1)) != 0) + return NV_ERR_INVALID_ARGUMENT; + + if ((offset & (pageSize - 1)) != 0) + return NV_ERR_INVALID_ARGUMENT; + + pGVAS = dynamicCast(pVAS, OBJGVASPACE); + + // Get the GMMU format + pFmt = gvaspaceGetGmmuFmt(pGVAS, pMappingGpu); + pPteFmt = (GMMU_FMT_PTE*)pFmt->pPte; + pLevelFmt = mmuFmtFindLevelWithPageShift(pFmt->pRoot, BIT_IDX_32(pageSize)); + + oldKind = newKind = memdescGetPteKindForGpu(pMemDesc, pMappingGpu); + if (pMemory) + { + // + // The physical memory layout can be specified after allocation using + // UvmMapExternalAllocation, so the kind attribute needs to be computed + // again + // + status = nvGpuOpsGetPteKind(pMappingGpu, pMemoryManager, pMemDesc, pMemory, + pGpuExternalMappingInfo, &newKind); + + if (status != NV_OK) + return status; + + if (oldKind != newKind) + { + memdescSetPteKindForGpu(pMemDesc, pMappingGpu, newKind); + kindChanged = NV_TRUE; + } + } + + // Get the CompTag range and Kind. + status = memmgrGetKindComprForGpu_HAL(pMemoryManager, pMemDesc, pMappingGpu, 0, &kind, &comprInfo); + if (status != NV_OK) + return status; + + if (kindChanged) + memdescSetPteKindForGpu(pMemDesc, pMappingGpu, oldKind); + + aperture = nvGpuOpsGetExternalAllocAperture(pMemDesc, isIndirectPeerSupported, isPeerSupported); + + vol = nvGpuOpsGetExternalAllocVolatility(pMemDesc, aperture, isIndirectPeerSupported, + pGpuExternalMappingInfo->cachingType); + + status = nvGpuOpsGetExternalAllocMappingAttribute(pGpuExternalMappingInfo->mappingType, + pMemDesc, + &readOnly, + &atomic); + if (status != NV_OK) + return status; + + encrypted = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_ENCRYPTED); + + privileged = memdescGetFlag(pMemDesc, MEMDESC_FLAGS_GPU_PRIVILEGED); + + mappingSize = size ? size : allocSize; + + skipPteCount = pLevelFmt->entrySize / sizeof(NvU64); + + isCompressedKind = memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_COMPRESSIBLE, kind); + + pteCount = NV_MIN((pGpuExternalMappingInfo->pteBufferSize / pLevelFmt->entrySize), (mappingSize / pageSize)); + if (!pteCount) + return NV_ERR_BUFFER_TOO_SMALL; + + { + if (nvFieldIsValid32(&pPteFmt->fldValid.desc)) + nvFieldSetBool(&pPteFmt->fldValid, NV_TRUE, pte.v8); + + if (nvFieldIsValid32(&pPteFmt->fldVolatile.desc)) + nvFieldSetBool(&pPteFmt->fldVolatile, vol, pte.v8); + + if (nvFieldIsValid32(&pPteFmt->fldPrivilege.desc)) + nvFieldSetBool(&pPteFmt->fldPrivilege, privileged, pte.v8); + + if (nvFieldIsValid32(&pPteFmt->fldEncrypted.desc)) + nvFieldSetBool(&pPteFmt->fldEncrypted, encrypted, pte.v8); + + if (nvFieldIsValid32(&pPteFmt->fldReadOnly.desc)) + nvFieldSetBool(&pPteFmt->fldReadOnly, readOnly, pte.v8); + + if (nvFieldIsValid32(&pPteFmt->fldWriteDisable.desc)) + nvFieldSetBool(&pPteFmt->fldWriteDisable, readOnly, pte.v8); + + if (nvFieldIsValid32(&pPteFmt->fldReadDisable.desc)) + nvFieldSetBool(&pPteFmt->fldReadDisable, NV_FALSE, pte.v8); + + if (nvFieldIsValid32(&pPteFmt->fldAtomicDisable.desc)) + nvFieldSetBool(&pPteFmt->fldAtomicDisable, !atomic, pte.v8); + + gmmuFieldSetAperture(&pPteFmt->fldAperture, aperture, pte.v8); + + if (!isCompressedKind) + { + nvFieldSet32(&pPteFmt->fldKind, kind, pte.v8); + nvFieldSet32(&pPteFmt->fldCompTagLine, 0, pte.v8); + if (nvFieldIsValid32(&pPteFmt->fldCompTagSubIndex)) + nvFieldSet32(&pPteFmt->fldCompTagSubIndex, 0, pte.v8); + } + } + + if (aperture == GMMU_APERTURE_PEER) + { + FlaMemory* pFlaMemory = dynamicCast(pMemory, FlaMemory); + nvFieldSet32(&pPteFmt->fldPeerIndex, peerId, pte.v8); + + if ((memdescGetAddressSpace(pMemDesc) == ADDR_FABRIC) || + (memdescGetAddressSpace(pMemDesc) == ADDR_FABRIC_V2) || pFlaMemory) + { + // + // ADDR_FABRIC/ADDR_FABRIC_V2 memory descriptors are pre-encoded with the fabric base address + // use NVLINK_INVALID_FABRIC_ADDR to avoid encoding twice + // + fabricBaseAddress = NVLINK_INVALID_FABRIC_ADDR; + } + else + { + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pMemDesc->pGpu); + if (pKernelNvlink == NULL) + { + fabricBaseAddress = NVLINK_INVALID_FABRIC_ADDR; + } + else + { + fabricBaseAddress = knvlinkGetUniqueFabricBaseAddress(pMemDesc->pGpu, pKernelNvlink); + } + } + } + + // + // Both memdescGetPhysAddr() and kgmmuEncodePhysAddr() have pretty high overhead. + // To avoid it, allocate an array for the physical addresses and use the + // flavors of the APIs that work on multiple addresses at a time. + // + // Notably the pteBuffer array could be re-used for that, but it gets a bit + // tricky if skipPteCount is greater than 1 so just keep it simple. + // + physicalAddresses = portMemAllocNonPaged((NvU32)pteCount * sizeof(*physicalAddresses)); + if (physicalAddresses == NULL) + return NV_ERR_NO_MEMORY; + + // + // Ask for physical addresses for the GPU being mapped as it may not be the + // same as the GPU owning the memdesc. This matters for sysmem as accessing + // it requires IOMMU mappings to be set up and these are different for each + // GPU. The IOMMU mappings are currently added by nvGpuOpsDupMemory(). + // + memdescGetPhysAddrsForGpu(pMemDesc, pMappingGpu, AT_GPU, offset, pageSize, pteCount, physicalAddresses); + kgmmuEncodePhysAddrs(pKernelGmmu, aperture, physicalAddresses, fabricBaseAddress, pteCount); + + + // + // Get information whether given physical address needs PLCable kind + // + if (IS_VIRTUAL_WITH_SRIOV(pMappingGpu) && + gpuIsWarBug200577889SriovHeavyEnabled(pMappingGpu) && + isCompressedKind && + !memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_DISALLOW_PLC, comprInfo.kind)) + { + guestPhysicalAddress = portMemAllocNonPaged((NvU32)pteCount * sizeof(*guestPhysicalAddress)); + if (guestPhysicalAddress == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemSet(guestPhysicalAddress, 0, ((NvU32)pteCount * sizeof(*guestPhysicalAddress))); + + gpaOffset = offset; + for (iter = 0; iter < pteCount; iter++) + { + guestPhysicalAddress[iter] = gpaOffset; + gpaOffset += pageSize; + } + + isPLCable = portMemAllocNonPaged((NvU32)pteCount * sizeof(*isPLCable)); + if (isPLCable == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemSet(isPLCable, 0, ((NvU32)pteCount * sizeof(*isPLCable))); + + NV_RM_RPC_GET_PLCABLE_ADDRESS_KIND(pMappingGpu, guestPhysicalAddress, pageSize, (NvU32)pteCount, + isPLCable, status); + if (status != NV_OK) + goto done; + } + + for (iter = 0; iter < pteCount; iter++) + { + physAddr = physicalAddresses[iter]; + + gmmuFieldSetAddress(gmmuFmtPtePhysAddrFld(pPteFmt, aperture), + physAddr, + pte.v8); + + if (isCompressedKind) + { + // We have to reset pte.v8 fields in care of partially compressed allocations + // Otherwise, non-compressed PTEs will get bits from compressed PTEs + if (pFmt->version <= GMMU_FMT_VERSION_2) + { + NvBool bIsWarApplied = NV_FALSE; + NvU32 savedKind = comprInfo.kind; + MemoryManager *pMemoryManager = GPU_GET_MEMORY_MANAGER(pMappingGpu); + KernelMemorySystem *pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pMappingGpu); + const MEMORY_SYSTEM_STATIC_CONFIG *pMemorySystemConfig = + kmemsysGetStaticConfig(pMappingGpu, pKernelMemorySystem); + + nvFieldSet32(&pPteFmt->fldKind, 0, pte.v8); + nvFieldSet32(&pPteFmt->fldCompTagLine, 0, pte.v8); + if (nvFieldIsValid32(&pPteFmt->fldCompTagSubIndex)) + nvFieldSet32(&pPteFmt->fldCompTagSubIndex, 0, pte.v8); + + if (pMemorySystemConfig->bUseRawModeComptaglineAllocation && + pMemorySystemConfig->bDisablePlcForCertainOffsetsBug3046774 && + !memmgrIsKind_HAL(pMemoryManager, FB_IS_KIND_DISALLOW_PLC, comprInfo.kind)) + { + NvBool bEnablePlc = NV_TRUE; + + if (IS_VIRTUAL_WITH_SRIOV(pMappingGpu) && + gpuIsWarBug200577889SriovHeavyEnabled(pMappingGpu)) + { + bEnablePlc = isPLCable[iter]; + } + else + { + bEnablePlc = kmemsysIsPagePLCable_HAL(pMappingGpu, pKernelMemorySystem, offset, pageSize); + } + + if (!bEnablePlc) + { + bIsWarApplied = NV_TRUE; + memmgrGetDisablePlcKind_HAL(pMemoryManager, &comprInfo.kind); + } + } + + kgmmuFieldSetKindCompTags(GPU_GET_KERNEL_GMMU(pMappingGpu), pFmt, pLevelFmt, &comprInfo, physAddr, + offset, mmuFmtVirtAddrToEntryIndex(pLevelFmt, offset), pte.v8); + // + // restore the kind to PLC if changd, since kind is associated with entire surface, and the WAR applies to + // individual pages in the surface. + if (bIsWarApplied) + comprInfo.kind = savedKind; + } + } + + portMemCopy(&pGpuExternalMappingInfo->pteBuffer[iter * skipPteCount], pLevelFmt->entrySize, pte.v8, pLevelFmt->entrySize); + + offset += pageSize; + } + + pGpuExternalMappingInfo->numWrittenPtes = pteCount; + pGpuExternalMappingInfo->numRemainingPtes = (mappingSize / pageSize) - pteCount; + pGpuExternalMappingInfo->pteSize = pLevelFmt->entrySize; + +done: + portMemFree(physicalAddresses); + + portMemFree(guestPhysicalAddress); + + portMemFree(isPLCable); + + return status; +} + +NV_STATUS nvGpuOpsGetExternalAllocPtes(struct gpuAddressSpace *vaSpace, + NvHandle hMemory, + NvU64 offset, + NvU64 size, + gpuExternalMappingInfo *pGpuExternalMappingInfo) +{ + NV_STATUS status = NV_OK; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + Memory *pMemory = NULL; + PMEMORY_DESCRIPTOR pMemDesc = NULL; + OBJGPU *pMappingGpu = NULL; + NvU32 peerId = 0; + NvBool isSliSupported = NV_FALSE; + NvBool isPeerSupported = NV_FALSE; + NvBool isIndirectPeerSupported = NV_FALSE; + OBJVASPACE *pVAS = NULL; + FlaMemory *pFlaMemory = NULL; + OBJGPU *pSrcGpu = NULL; + OBJGPU *pPeerGpu = NULL; + RsClient *pClient; + MEMORY_DESCRIPTOR *pAdjustedMemDesc = NULL; + FABRIC_VASPACE *pFabricVAS = NULL; + + if (!pGpuExternalMappingInfo || !hMemory || !vaSpace) + return NV_ERR_INVALID_ARGUMENT; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + vaSpace->device->session->handle, + &pClient, + &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = vaspaceGetByHandleOrDeviceDefault(pClient, + vaSpace->device->handle, + vaSpace->handle, + &pVAS); + if (status != NV_OK) + goto done; + + status = nvGpuOpsGetMemoryByHandle(vaSpace->device->session->handle, + hMemory, + &pMemory); + if (status != NV_OK) + goto done; + + // RM client allocations can't have multiple subDevice memdescs. + pMemDesc = pMemory->pMemDesc; + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + // Do not support mapping on anything other than sysmem/vidmem/fabric! + if ((memdescGetAddressSpace(pMemDesc) != ADDR_SYSMEM) && + (memdescGetAddressSpace(pMemDesc) != ADDR_FBMEM) && + (memdescGetAddressSpace(pMemDesc) != ADDR_FABRIC) && + (memdescGetAddressSpace(pMemDesc) != ADDR_FABRIC_V2)) + { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + status = CliSetGpuContext(vaSpace->device->session->handle, + vaSpace->device->handle, + &pMappingGpu, + NULL); + if (status != NV_OK) + goto done; + + pAdjustedMemDesc = pMemDesc; + pFabricVAS = dynamicCast(pMappingGpu->pFabricVAS, FABRIC_VASPACE); + if (pFabricVAS != NULL) + { + status = fabricvaspaceGetGpaMemdesc(pFabricVAS, pMemDesc, pMappingGpu, &pAdjustedMemDesc); + if (status != NV_OK) + goto done; + } + + // Check if P2P supported + if ((memdescGetAddressSpace(pAdjustedMemDesc) == ADDR_FABRIC) || + (memdescGetAddressSpace(pAdjustedMemDesc) == ADDR_FABRIC_V2)) + { + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pMappingGpu); + + isPeerSupported = NV_TRUE; + pPeerGpu = pAdjustedMemDesc->pGpu; + peerId = BUS_INVALID_PEER; + + if (pPeerGpu != NULL) + { + if ((pKernelNvlink != NULL) && + knvlinkIsNvlinkP2pSupported(pMappingGpu, pKernelNvlink, pPeerGpu)) + { + peerId = kbusGetPeerId_HAL(pMappingGpu, GPU_GET_KERNEL_BUS(pMappingGpu), pPeerGpu); + } + } + + if (peerId == BUS_INVALID_PEER) + { + status = NV_ERR_INVALID_STATE; + goto freeGpaMemdesc; + } + } + else if (memdescGetAddressSpace(pAdjustedMemDesc) == ADDR_FBMEM && + (pAdjustedMemDesc->pGpu->gpuId != pMappingGpu->gpuId || + dynamicCast(pMemory, FlaMemory))) + { + if (gpumgrCheckIndirectPeer(pAdjustedMemDesc->pGpu, pMappingGpu)) + { + isIndirectPeerSupported = NV_TRUE; + } + else + { + pFlaMemory = dynamicCast(pMemory, FlaMemory); + if (pFlaMemory != NULL) + { + pSrcGpu = gpumgrGetGpu(pFlaMemory->peerGpuInst); + if (!pSrcGpu) + { + status = NV_ERR_INVALID_ARGUMENT; + goto freeGpaMemdesc; + } + } + + status = nvGpuOpsGetExternalAllocP2pInfo(vaSpace->device->session, + (pFlaMemory) ? (pSrcGpu->gpuId) :(pAdjustedMemDesc->pGpu->gpuId), + pMappingGpu->gpuId, + &isPeerSupported, + &peerId); + if (status != NV_OK) + goto freeGpaMemdesc; + } + + // + // If GPUs are in the same SLI group, don't do peer mappings even if the GPUs are different. In SLI config, + // if a caller can try to map a memory on a GPU other than the GPU which is associated with the memdesc, + // always return local VIDMEM mapping because RM shares a memdesc among such GPUs for client allocations. + // Note: This check could be avoided if we could know that pMemDesc->pGpu is always the SLI master i.e. same + // as the pGPU returned by CliSetGpuContext. + // + if (!pFlaMemory && pAdjustedMemDesc->pGpu->deviceInstance == pMappingGpu->deviceInstance) + { + isPeerSupported = NV_FALSE; + isSliSupported = NV_TRUE; + } + + // Even if the RM returns P2P or indirect peer supported, make sure the GPUs are not from different SLI groups. See Bug# 759980. + if ((isPeerSupported || isIndirectPeerSupported) && + (IsSLIEnabled(pMappingGpu) || IsSLIEnabled(pAdjustedMemDesc->pGpu))) + { + status = NV_ERR_NOT_SUPPORTED; + goto freeGpaMemdesc; + } + + NV_ASSERT(!(isPeerSupported && isSliSupported)); + + // If a caller is trying to map VIDMEM on GPUs with no P2P support and are not in the same SLI group, error out. + if (!isPeerSupported && !isIndirectPeerSupported && !isSliSupported) + { + status = NV_ERR_NOT_SUPPORTED; + goto freeGpaMemdesc; + } + } + + status = nvGpuOpsBuildExternalAllocPtes(pVAS, pMappingGpu, pAdjustedMemDesc, pMemory, offset, size, + isIndirectPeerSupported, isPeerSupported, peerId, + pGpuExternalMappingInfo); + +freeGpaMemdesc: + if (pAdjustedMemDesc != pMemDesc) + fabricvaspacePutGpaMemdesc(pFabricVAS, pAdjustedMemDesc); + +done: + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +void nvGpuOpsAddressSpaceDestroy(struct gpuAddressSpace *vaSpace) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + NV_ASSERT(vaSpace->dummyGpuAlloc.refCount == 0); + + // free all the mallocs + if (vaSpace->allocations) + { + portSyncRwLockAcquireWrite(vaSpace->allocationsLock); + destroyAllGpuMemDescriptors(vaSpace->device->session->handle, + vaSpace->allocations); + portSyncRwLockReleaseWrite(vaSpace->allocationsLock); + } + + // free all the physical allocations + if (vaSpace->physAllocations) + { + portSyncRwLockAcquireWrite(vaSpace->physAllocationsLock); + destroyAllGpuMemDescriptors(vaSpace->device->session->handle, + vaSpace->physAllocations); + portSyncRwLockReleaseWrite(vaSpace->physAllocationsLock); + } + + // Destroy CPU mappings + if (vaSpace->cpuMappings) + { + portSyncRwLockAcquireWrite(vaSpace->cpuMappingsLock); + btreeDestroyData(vaSpace->cpuMappings); + portSyncRwLockReleaseWrite(vaSpace->cpuMappingsLock); + } + + if (vaSpace->handle) + pRmApi->Free(pRmApi, vaSpace->device->session->handle, vaSpace->handle); + + portSyncRwLockDestroy(vaSpace->allocationsLock); + portSyncRwLockDestroy(vaSpace->cpuMappingsLock); + portSyncRwLockDestroy(vaSpace->physAllocationsLock); + + portMemFree(vaSpace); +} + +static NV_STATUS nvGpuOpsAllocPhysical(struct gpuDevice *device, + NvBool isSystemMemory, + NvLength length, + NvU64 *paOffset, + gpuAllocInfo *allocInfo) +{ + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = {0}; + NV_STATUS status = NV_OK; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + NvHandle physHandle = 0; + + NV_ASSERT(allocInfo); + NV_ASSERT(device); + NV_ASSERT(paOffset); + + // then allocate the physical memory in either sysmem or fb. + memAllocParams.owner = HEAP_OWNER_RM_KERNEL_CLIENT; + + // Physical allocations don't expect vaSpace handles + memAllocParams.hVASpace = 0; + + // Reset previous offset + memAllocParams.offset = 0; + + memAllocParams.size = length; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.attr = isSystemMemory ? + DRF_DEF(OS32, _ATTR, _LOCATION, _PCI) : + DRF_DEF(OS32, _ATTR, _LOCATION, _VIDMEM); + + // Always enable caching for System Memory as all the currently supported + // platforms are IO coherent. + memAllocParams.attr |= isSystemMemory ? + DRF_DEF(OS32, _ATTR, _COHERENCY, _CACHED): + DRF_DEF(OS32, _ATTR, _COHERENCY, _UNCACHED); + + // Allocate contigous allocation if requested by client + memAllocParams.attr |= allocInfo->bContiguousPhysAlloc ? + DRF_DEF(OS32, _ATTR, _PHYSICALITY, _CONTIGUOUS): + DRF_DEF(OS32, _ATTR, _PHYSICALITY, _DEFAULT); + + // Set pageSize for PA-allocation. RM default is Big page size + switch (allocInfo->pageSize) + { + case RM_PAGE_SIZE: + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB); + break; + case RM_PAGE_SIZE_64K: + case RM_PAGE_SIZE_128K: + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _BIG); + break; + case RM_PAGE_SIZE_HUGE: + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _HUGE); + memAllocParams.attr2 |= DRF_DEF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _2MB); + break; + case RM_PAGE_SIZE_512M: + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _HUGE); + memAllocParams.attr2 |= DRF_DEF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _512MB); + break; + default: + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT); + break; + } + + // Set the alignment + if (allocInfo->alignment) + { + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE; + memAllocParams.alignment = allocInfo->alignment; + } + + // Do we have a range Hint ? + if (allocInfo->rangeBegin != allocInfo->rangeEnd) + { + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_USE_BEGIN_END; + memAllocParams.rangeLo = allocInfo->rangeBegin; + memAllocParams.rangeHi = allocInfo->rangeEnd; + } + + // Do we need to allocate at top of FB + if (allocInfo->bMemGrowsDown) + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN; + + // Ask RM to allocate persistent video memory + if (!isSystemMemory && allocInfo->bPersistentVidmem) + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_PERSISTENT_VIDMEM; + + // + // vid heap ctrl has a different policy as compared to other internal APIS + // it expects the gpu lock to not be held. This means we have to drop the gpu lock + // here. It is safe in this scenario because we still have the API lock and nothing + // from a GPU interrupt can change anything in the OPS state. + // + + physHandle = NV01_NULL_OBJECT; + NV_ASSERT_OK_OR_GOTO(status, pRmApi->Alloc(pRmApi, + device->session->handle, + isSystemMemory ? device->handle : device->subhandle, + &physHandle, + isSystemMemory ? NV01_MEMORY_SYSTEM : NV01_MEMORY_LOCAL_USER, + &memAllocParams), done); + if (allocInfo->bContiguousPhysAlloc) + allocInfo->gpuPhysOffset = memAllocParams.offset; + + allocInfo->hPhysHandle = physHandle; + *paOffset = (NvU64)allocInfo->gpuPhysOffset; + +done: + + if (status != NV_OK) + pRmApi->Free(pRmApi, device->session->handle, physHandle); + + return status; +} + +// The call allocates a virtual memory and associates a PA with it. +static NV_STATUS nvGpuOpsAllocVirtual(struct gpuAddressSpace *vaSpace, + NvLength length, + NvU64 *vaOffset, + NvHandle physHandle, + struct allocFlags flags, + gpuVaAllocInfo *allocInfo) +{ + NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { 0 }; + NV_STATUS status; + gpuMemDesc *memDesc = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + NV_ASSERT(allocInfo); + NV_ASSERT(vaSpace); + NV_ASSERT(vaOffset); + NV_ASSERT(physHandle); + + memDesc = portMemAllocNonPaged(sizeof(*memDesc)); + if (memDesc == NULL) + return NV_ERR_NO_MEMORY; + + // first allocate the virtual memory + + memAllocParams.owner = HEAP_OWNER_RM_KERNEL_CLIENT; + memAllocParams.size = length; + memAllocParams.type = NVOS32_TYPE_IMAGE; + memAllocParams.alignment = NV_GPU_SMALL_PAGESIZE; + memAllocParams.flags = NVOS32_ALLOC_FLAGS_VIRTUAL | + NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE | + NVOS32_ALLOC_FLAGS_ALLOCATE_KERNEL_PRIVILEGED; + + if (allocInfo->bFixedAddressAllocate) + { + memAllocParams.flags |= NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE; + memAllocParams.offset = allocInfo->vaStart; + } + + // Set pageSize for VA-allocation. RM default is Big page size + switch (allocInfo->pageSize) + { + case RM_PAGE_SIZE: + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _4KB); + break; + case RM_PAGE_SIZE_64K: + case RM_PAGE_SIZE_128K: + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _BIG); + break; + case RM_PAGE_SIZE_HUGE: + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _HUGE); + memAllocParams.attr2 |= DRF_DEF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _2MB); + break; + case RM_PAGE_SIZE_512M: + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _HUGE); + memAllocParams.attr2 |= DRF_DEF(OS32, _ATTR2, _PAGE_SIZE_HUGE, _512MB); + break; + default: + memAllocParams.attr |= DRF_DEF(OS32, _ATTR, _PAGE_SIZE, _DEFAULT); + break; + } + + memAllocParams.hVASpace = vaSpace->handle; + + memDesc->handle = NV01_NULL_OBJECT; + NV_ASSERT_OK_OR_GOTO(status, pRmApi->Alloc(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->handle, + &memDesc->handle, + NV50_MEMORY_VIRTUAL, + &memAllocParams), done); + memDesc->address = (NvU64)memAllocParams.offset; + memDesc->size = length; + memDesc->childHandle = physHandle; + + portSyncRwLockAcquireWrite(vaSpace->allocationsLock); + status = trackDescriptor(&vaSpace->allocations, memDesc->address, memDesc); + portSyncRwLockReleaseWrite(vaSpace->allocationsLock); + if (status != NV_OK) + goto done; + + // return the allocated GPU VA + *vaOffset = memDesc->address; + +done: + + if (status != NV_OK) + pRmApi->Free(pRmApi, vaSpace->device->session->handle, memDesc->handle); + + if ((status != NV_OK) && (memDesc != NULL)) + portMemFree(memDesc); + + return status; +} + +// will need to support offset within allocation +static NV_STATUS nvGpuOpsMapGpuMemory(struct gpuAddressSpace *vaSpace, + NvU64 vaOffset, + NvLength length, + NvU32 pageSize, + NvU64 *gpuOffset, + struct allocFlags flags) +{ + gpuMemDesc *memDescVa = NULL; + NV_STATUS status; + NvU64 mappedVa = 0; + NvU32 mapFlags = 0; + NvU32 mapPageSize = 0; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + if (!vaSpace || !gpuOffset) + return NV_ERR_INVALID_ARGUMENT; + + portSyncRwLockAcquireRead(vaSpace->allocationsLock); + status = findDescriptor(vaSpace->allocations, vaOffset, (void**)&memDescVa); + portSyncRwLockReleaseRead(vaSpace->allocationsLock); + if (status != NV_OK) + return status; + + NV_ASSERT(memDescVa); + NV_ASSERT(memDescVa->handle); + NV_ASSERT(memDescVa->childHandle); + + if (pageSize == RM_PAGE_SIZE) + { + mapPageSize |= DRF_DEF(OS46, _FLAGS, _PAGE_SIZE, _4KB); + } + else if (pageSize == RM_PAGE_SIZE_HUGE) + { + // TODO: this flag is ignored, remove it once it is deprecated + mapPageSize |= DRF_DEF(OS46, _FLAGS, _PAGE_SIZE, _HUGE); + } + else + { + mapPageSize |= DRF_DEF(OS46, _FLAGS, _PAGE_SIZE, _DEFAULT); + } + + // map the 2 surfaces + mapFlags |= ((flags.bGetKernelVA) ? DRF_DEF(OS46, _FLAGS, _KERNEL_MAPPING, _ENABLE) : + DRF_DEF(OS46, _FLAGS, _KERNEL_MAPPING, _NONE)); + mapFlags |= mapPageSize; + + // Always enable snooping as that's what's needed for sysmem allocations and + // it's ignored for vidmem. + mapFlags |= DRF_DEF(OS46, _FLAGS, _CACHE_SNOOP, _ENABLE); + + // map the 2 surfaces + status = pRmApi->Map(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->handle, + memDescVa->handle, + memDescVa->childHandle, + 0, + length, + mapFlags, + &mappedVa); + if (status != NV_OK) + return status; + + NV_ASSERT(memDescVa->address == mappedVa); + + *gpuOffset = memDescVa->address; + + return NV_OK; +} + +// +// This function provides a gpu virtual address to a physical region +// that can either be in sysmem or vidmem. +// +static NV_STATUS nvGpuOpsGpuMalloc(struct gpuAddressSpace *vaSpace, + NvBool isSystemMemory, + NvLength length, + NvU64 *gpuOffset, + struct allocFlags flags, + gpuAllocInfo *allocInfo) +{ + NV_STATUS status; + NvU64 vaOffset = 0; + NvU64 paOffset = 0; + gpuVaAllocInfo vaAllocInfo = { 0 }; + NvHandle paMemDescHandle; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + NV_ASSERT(allocInfo); + NV_ASSERT(vaSpace); + NV_ASSERT(gpuOffset); + + // Allocate physical memory first. So that we can associate PA with the memDesc of VA. + // This simplifies tracking of VA and PA handles. + status = nvGpuOpsAllocPhysical(vaSpace->device, isSystemMemory, length, + &paOffset, allocInfo); + if (status != NV_OK) + return status; + + NV_ASSERT(allocInfo->hPhysHandle); + + paMemDescHandle = allocInfo->hPhysHandle; + vaAllocInfo.pageSize = allocInfo->pageSize; + + status = nvGpuOpsAllocVirtual(vaSpace, length, &vaOffset, paMemDescHandle, + flags, &vaAllocInfo); + if (status != NV_OK) + goto cleanup_physical; + + status = nvGpuOpsMapGpuMemory(vaSpace, vaOffset, length, + allocInfo->pageSize, gpuOffset, flags); + if (status != NV_OK) + goto cleanup_virtual; + + return NV_OK; + +cleanup_virtual: + nvGpuOpsFreeVirtual(vaSpace, vaOffset); +cleanup_physical: + pRmApi->Free(pRmApi, vaSpace->device->session->handle, paMemDescHandle); + return status; +} + +static void nvGpuOpsFreeVirtual(struct gpuAddressSpace *vaSpace, NvU64 vaOffset) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + gpuMemDesc *memDescVa = NULL; + portSyncRwLockAcquireWrite(vaSpace->allocationsLock); + deleteDescriptor(&vaSpace->allocations, vaOffset, (void**)&memDescVa); + portSyncRwLockReleaseWrite(vaSpace->allocationsLock); + NV_ASSERT(memDescVa); + pRmApi->Free(pRmApi, vaSpace->device->session->handle, memDescVa->handle); + portMemFree(memDescVa); +} + +NV_STATUS nvGpuOpsMemoryAllocFb(struct gpuAddressSpace *vaSpace, + NvLength length, + NvU64 *gpuOffset, + gpuAllocInfo *allocInfo) +{ + gpuAllocInfo allocInfoTemp = {0}; + gpuAllocInfo *pAllocInfo; + struct allocFlags flags = {0}; + + if (!vaSpace || !gpuOffset) + return NV_ERR_INVALID_ARGUMENT; + + // Use default settings if user hasn't provided one. + if (allocInfo == NULL) + { + pAllocInfo = &allocInfoTemp; + } + else + { + pAllocInfo = allocInfo; + } + + return nvGpuOpsGpuMalloc(vaSpace, NV_FALSE, length, gpuOffset, flags, + pAllocInfo); +} + +NV_STATUS nvGpuOpsMemoryAllocSys(struct gpuAddressSpace *vaSpace, + NvLength length, + NvU64 *gpuOffset, + gpuAllocInfo *allocInfo) +{ + gpuAllocInfo allocInfoTemp = {0}; + gpuAllocInfo *pAllocInfo; + struct allocFlags flags = {0}; + + if (!vaSpace || !gpuOffset) + return NV_ERR_INVALID_ARGUMENT; + + // Use default settings if user hasn't provided one. + if (allocInfo == NULL) + { + pAllocInfo = &allocInfoTemp; + } + else + { + pAllocInfo = allocInfo; + } + + return nvGpuOpsGpuMalloc(vaSpace, NV_TRUE, length, gpuOffset, flags, + pAllocInfo); +} + +NV_STATUS nvGpuOpsMemoryReopen(struct gpuAddressSpace *vaSpace, + NvHandle hSrcClient, + NvHandle hSrcAllocation, + NvLength length, + NvU64 *gpuOffset) +{ + NV_STATUS status; + NvHandle hAllocation = 0; + gpuVaAllocInfo allocInfoTemp = { 0 }; + struct allocFlags flags = { 0 }; + NvU64 vaOffset; + NvHandle hVirtual = 0; + RsResourceRef *pResourceRef; + NvU64 addressOffset = 0; + NvHandle hParent; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + // find device type + // TODO: Acquired because serverutilGetResourceRef expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + + status = serverutilGetResourceRef(hSrcClient, hSrcAllocation, &pResourceRef); + if (status != NV_OK) + { + rmapiLockRelease(); + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (!dynamicCast(pResourceRef->pResource, Memory)) + { + rmapiLockRelease(); + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + hParent = pResourceRef->pParentRef ? pResourceRef->pParentRef->hResource : 0; + + status = serverutilGetResourceRef(hSrcClient, hParent, &pResourceRef); + rmapiLockRelease(); + if (status != NV_OK || !dynamicCast(pResourceRef->pResource, Device)) + return NV_ERR_GENERIC; + + if (!vaSpace || !gpuOffset || !hSrcAllocation || !hSrcClient) + return NV_ERR_INVALID_ARGUMENT; + + // Dup the physical memory object + hAllocation = NV01_NULL_OBJECT; + status = pRmApi->DupObject(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->handle, + &hAllocation, + hSrcClient, + hSrcAllocation, + NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE); + if (status != NV_OK) + return status; + + // Associate the duped object with the newly created virtual memory object + status = nvGpuOpsAllocVirtual(vaSpace, length, &vaOffset, hAllocation, + flags, &allocInfoTemp); + if (status != NV_OK) + goto cleanup_dup; + + status = getHandleForVirtualAddr(vaSpace, vaOffset, NV_FALSE, &hVirtual); + if (status != NV_OK) + goto cleanup_virt_allocation; + + // map the memory + status = pRmApi->Map(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->handle, + hVirtual, + hAllocation, + 0, + length, + 0, + &addressOffset); + if (status != NV_OK) + goto cleanup_virt_allocation; + + NV_ASSERT((vaOffset == addressOffset) && "nvGpuOpsMemoryReopen: VA offset Mistmatch!"); + + // return the mapped GPU pointer + *gpuOffset = vaOffset; + + return NV_OK; + +cleanup_virt_allocation: + nvGpuOpsFreeVirtual(vaSpace, vaOffset); +cleanup_dup: + pRmApi->Free(pRmApi, vaSpace->device->session->handle, hAllocation); + return status; +} + +NV_STATUS nvGpuOpsPmaAllocPages(void *pPma, NvLength pageCount, NvU32 pageSize, + gpuPmaAllocationOptions *pPmaAllocOptions, + NvU64 *pPages) +{ + NV_STATUS status; + gpuPmaAllocationOptions pmaAllocOptionsTemp = {0}; + gpuPmaAllocationOptions *pAllocInfo; + THREAD_STATE_NODE threadState; + + if (!pPma || !pPages) + return NV_ERR_INVALID_ARGUMENT; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // Use default settings if user hasn't provided one. + if (NULL == pPmaAllocOptions) + { + pAllocInfo = &pmaAllocOptionsTemp; + } + else + { + pAllocInfo = pPmaAllocOptions; + } + + // Invoke PMA module to alloc pages. + status = pmaAllocatePages((PMA *)pPma, + pageCount, + pageSize, + (PMA_ALLOCATION_OPTIONS *)pAllocInfo, + pPages); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +// +// When this API is called from UVM as part of PMA eviction, the thread state +// should have been initialized already and recursive re-init needs to be +// skipped as it's not supported. +// +NV_STATUS nvGpuOpsPmaPinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags) +{ + NV_STATUS status; + THREAD_STATE_NODE threadState; + NvBool pmaEvictionCall = (flags & UVM_PMA_CALLED_FROM_PMA_EVICTION) != 0; + + if (!pPma || !pPages) + return NV_ERR_INVALID_ARGUMENT; + + if (!pmaEvictionCall) + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // Invoke PMA module to Pin pages. + status = pmaPinPages((PMA *)pPma, pPages, pageCount, pageSize); + + if (!pmaEvictionCall) + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +NV_STATUS nvGpuOpsPmaUnpinPages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize) +{ + NV_STATUS status; + THREAD_STATE_NODE threadState; + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (!pPma || !pPages) + return NV_ERR_INVALID_ARGUMENT; + + // Invoke PMA module to Unpin pages. + status = pmaUnpinPages((PMA *)pPma, pPages, pageCount, pageSize); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +// +// When this API is called from UVM as part of PMA eviction, the thread state +// should have been initialized already and recursive re-init needs to be +// skipped as it's not supported. +// +void nvGpuOpsPmaFreePages(void *pPma, + NvU64 *pPages, + NvLength pageCount, + NvU32 pageSize, + NvU32 flags) +{ + THREAD_STATE_NODE threadState; + NvU32 pmaFreeFlag = ((flags & UVM_PMA_FREE_IS_ZERO) ? PMA_FREE_SKIP_SCRUB : 0); + NvBool pmaEvictionCall = (flags & UVM_PMA_CALLED_FROM_PMA_EVICTION) != 0; + + if (!pmaEvictionCall) + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (!pPma || !pPages) + return; + + // Invoke PMA module to free pages. + if (flags & UVM_PMA_ALLOCATE_CONTIGUOUS) + pmaFreePages((PMA *)pPma, pPages, 1, pageCount * pageSize, pmaFreeFlag); + else + pmaFreePages((PMA *)pPma, pPages, pageCount, pageSize, pmaFreeFlag); + + if (!pmaEvictionCall) + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); +} + +static NV_STATUS nvGpuOpsChannelGetHwChannelId(struct gpuChannel *channel, + NvU32 *hwChannelId) +{ + NV0080_CTRL_FIFO_GET_CHANNELLIST_PARAMS params = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + params.numChannels = 1; + params.pChannelHandleList = NV_PTR_TO_NvP64(&channel->channelHandle); + params.pChannelList = NV_PTR_TO_NvP64(hwChannelId); + + return pRmApi->Control(pRmApi, + channel->vaSpace->device->session->handle, + channel->vaSpace->device->handle, + NV0080_CTRL_CMD_FIFO_GET_CHANNELLIST, + ¶ms, + sizeof(params)); +} + +static void gpuDeviceUnmapCpuFreeHandle(struct gpuDevice *device, + NvHandle handle, + void *ptr, + NvU32 flags) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + struct gpuSession *session = device->session; + + // Unmap the pointer + if (ptr) + { + NV_STATUS status; + const NvU32 pid = osGetCurrentProcess(); + + status = pRmApi->UnmapFromCpu(pRmApi, session->handle, device->subhandle, handle, ptr, flags, pid); + NV_ASSERT(status == NV_OK); + } + + // Free the handle + if (handle) + pRmApi->Free(pRmApi, session->handle, handle); +} + +static void gpuDeviceDestroyUsermodeRegion(struct gpuDevice *device) +{ + subDeviceDesc *rmSubDevice = device->rmSubDevice; + + gpuDeviceUnmapCpuFreeHandle(device, + rmSubDevice->clientRegionHandle, + (void *)rmSubDevice->clientRegionMapping, + 0); +} + +static NV_STATUS gpuDeviceMapUsermodeRegion(struct gpuDevice *device) +{ + NV_STATUS status = NV_OK; + NvHandle regionHandle = 0; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + struct gpuSession *session = device->session; + subDeviceDesc *rmSubDevice = device->rmSubDevice; + + NV_ASSERT(isDeviceVoltaPlus(device)); + NV_ASSERT(rmSubDevice->clientRegionHandle == 0 && rmSubDevice->clientRegionMapping == NULL); + + regionHandle = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, + session->handle, + device->subhandle, + ®ionHandle, + VOLTA_USERMODE_A, + NULL); + if (NV_OK != status) + return status; + + status = pRmApi->MapToCpu(pRmApi, + session->handle, + device->subhandle, + regionHandle, + 0, + NVC361_NV_USERMODE__SIZE, + (void **)(&rmSubDevice->clientRegionMapping), + DRF_DEF(OS33, _FLAGS, _ACCESS, _WRITE_ONLY)); + if (NV_OK != status) + goto failure_case; + + rmSubDevice->clientRegionHandle = regionHandle; + return status; + +failure_case: + pRmApi->Free(pRmApi, device->session->handle, regionHandle); + return status; +} + +// +// In Volta+, a channel can submit work by "ringing a doorbell" on the gpu after +// updating the GP_PUT. The doorbell is a register mapped in the client's address +// space and can be shared by all channels in that address space. Each channel writes +// a channel-specific token to the doorbell to trigger the work. +// +static NV_STATUS nvGpuOpsGetWorkSubmissionInfo(struct gpuAddressSpace *vaSpace, + struct gpuChannel *channel) +{ + NV_STATUS status = NV_OK; + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS params = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + struct gpuDevice *device = vaSpace->device; + struct gpuSession *session = device->session; + subDeviceDesc *rmSubDevice = device->rmSubDevice; + + // Only valid for VOLTA+ (sub)Devices. + NV_ASSERT(isDeviceVoltaPlus(vaSpace->device)); + + // Now get the token for submission on given channel. + status = pRmApi->Control(pRmApi, + session->handle, + channel->channelHandle, + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN, + ¶ms, + sizeof(params)); + if (status != NV_OK) + return status; + + channel->workSubmissionOffset = (NvU32 *)((NvU8*)rmSubDevice->clientRegionMapping + NVC361_NOTIFY_CHANNEL_PENDING); + channel->workSubmissionToken = params.workSubmitToken; + + // + // pWorkSubmissionToken cannot be NULL even if errorNotifier is NULL. + // errorNotifier is checked for NULL previously, so just an assert is + // sufficient. + // + NV_ASSERT_OR_RETURN((channel->errorNotifier != NULL), NV_ERR_INVALID_POINTER); + + channel->pWorkSubmissionToken = + (NvU32 *)((NvU8 *)channel->errorNotifier + + (NV_CHANNELGPFIFO_NOTIFICATION_TYPE_WORK_SUBMIT_TOKEN * sizeof(NvNotification)) + + NV_OFFSETOF(NvNotification, info32)); + + return status; +} + +static NvBool channelNeedsDummyAlloc(struct gpuChannel *channel) +{ + return channel->gpPutLoc == UVM_BUFFER_LOCATION_SYS && deviceNeedsDummyAlloc(channel->vaSpace->device); +} + +static NV_STATUS channelRetainDummyAlloc(struct gpuChannel *channel, gpuChannelInfo *channelInfo) +{ + struct gpuAddressSpace *vaSpace = channel->vaSpace; + NV_STATUS status; + + if (!channelNeedsDummyAlloc(channel)) + return NV_OK; + + status = nvGpuOpsVaSpaceRetainDummyAlloc(vaSpace); + if (status != NV_OK) + return status; + + channel->retainedDummyAlloc = NV_TRUE; + channelInfo->dummyBar1Mapping = vaSpace->dummyGpuAlloc.cpuAddr; + + return NV_OK; +} + +static void channelReleaseDummyAlloc(struct gpuChannel *channel) +{ + if (channel != NULL && channel->retainedDummyAlloc) + { + NV_ASSERT(channelNeedsDummyAlloc(channel)); + nvGpuOpsVaSpaceReleaseDummyAlloc(channel->vaSpace); + } +} + +static NvU32 channelEngineType(const struct gpuChannel *channel) +{ + if (channel->engineType == UVM_GPU_CHANNEL_ENGINE_TYPE_CE) + return NV2080_ENGINE_TYPE_COPY(channel->engineIndex); + else if (channel->engineType == UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2) + return NV2080_ENGINE_TYPE_SEC2; + else + return NV2080_ENGINE_TYPE_GR(channel->engineIndex); +} + +static NV_STATUS channelAllocate(struct gpuAddressSpace *vaSpace, + UVM_GPU_CHANNEL_ENGINE_TYPE engineType, + const gpuChannelAllocParams *params, + struct gpuChannel **channelHandle, + gpuChannelInfo *channelInfo) +{ + NV_STATUS status; + struct gpuChannel *channel = NULL; + struct gpuDevice *device = NULL; + struct gpuSession *session = NULL; + void *cpuMap = NULL; + NvHandle hErrorNotifier; + struct ChannelAllocInfo *pAllocInfo = NULL; + void *gpfifoCtrl = NULL; + PCLI_DMA_MAPPING_INFO pDmaMappingInfo = NULL; + struct allocFlags flags = {0}; + OBJGPU *pGpu = NULL; + KernelFifo *pKernelFifo = NULL; + NvU32 pid = osGetCurrentProcess(); + NvU32 subdeviceInstance; + UVM_BUFFER_LOCATION gpFifoLoc; + UVM_BUFFER_LOCATION gpPutLoc; + NvLength gpFifoSize, errorNotifierSize; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + if (!vaSpace || !channelHandle || !params || !channelInfo) + return NV_ERR_INVALID_ARGUMENT; + + if (params->numGpFifoEntries == 0) + return NV_ERR_INVALID_ARGUMENT; + + if (engineType != UVM_GPU_CHANNEL_ENGINE_TYPE_CE && + engineType != UVM_GPU_CHANNEL_ENGINE_TYPE_GR && + engineType != UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2) + return NV_ERR_INVALID_ARGUMENT; + + // TODO: Bug 2458492: Ampere-SMC Verify GR/CE indices within partition/SMC Engine + + device = vaSpace->device; + NV_ASSERT(device); + session = device->session; + NV_ASSERT(session); + + // Set location defaults + gpFifoLoc = UVM_BUFFER_LOCATION_SYS; + if (device->fbInfo.bZeroFb) + gpPutLoc = UVM_BUFFER_LOCATION_SYS; + else + gpPutLoc = UVM_BUFFER_LOCATION_VID; + + if (isDeviceVoltaPlus(device)) + { + if (params->gpFifoLoc > UVM_BUFFER_LOCATION_VID) + return NV_ERR_INVALID_ARGUMENT; + if (params->gpPutLoc > UVM_BUFFER_LOCATION_VID) + return NV_ERR_INVALID_ARGUMENT; + + if (params->gpFifoLoc != UVM_BUFFER_LOCATION_DEFAULT) + gpFifoLoc = params->gpFifoLoc; + if (params->gpPutLoc != UVM_BUFFER_LOCATION_DEFAULT) + gpPutLoc = params->gpPutLoc; + } + else + { + // GPFIFO needs to be placed in sysmem on Pascal and + // pre-Pascal devices (Bug 1750713) + if (params->gpFifoLoc != UVM_BUFFER_LOCATION_DEFAULT || params->gpPutLoc != UVM_BUFFER_LOCATION_DEFAULT) + return NV_ERR_INVALID_ARGUMENT; + } + + // TODO: Acquired because CliSetGpuContext expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + status = CliSetGpuContext(session->handle, device->handle, &pGpu, NULL); + rmapiLockRelease(); + if (status != NV_OK) + return status; + + pAllocInfo = portMemAllocNonPaged(sizeof(*pAllocInfo)); + if (pAllocInfo == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pAllocInfo, 0, sizeof(*pAllocInfo)); + + subdeviceInstance = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + channel = portMemAllocNonPaged(sizeof(*channel)); + if (channel == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup_free_memory; + } + + portMemSet(channel, 0, sizeof(*channel)); + + channel->vaSpace = vaSpace; + channel->fifoEntries = params->numGpFifoEntries; + channel->gpFifoLoc = gpFifoLoc; + channel->gpPutLoc = gpPutLoc; + + // Remember which engine we are using, so that RC recovery can reset it if + // it hangs: + channel->engineType = engineType; + channel->engineIndex = params->engineIndex; + + gpFifoSize = (NvLength)params->numGpFifoEntries * NVA06F_GP_ENTRY__SIZE; + + // If the allocation is vidmem ask RM to allocate persistent vidmem + pAllocInfo->gpuAllocInfo.bPersistentVidmem = NV_TRUE; + + // 1. Allocate the GPFIFO entries. Dont pass any special flags. + flags.bGetKernelVA = NV_FALSE; + status = nvGpuOpsGpuMalloc(vaSpace, + gpFifoLoc == UVM_BUFFER_LOCATION_SYS, + gpFifoSize, + &channel->gpFifo, + flags, + &pAllocInfo->gpuAllocInfo); + if (status != NV_OK) + goto cleanup_free_memory; + + // 2. Map the gpfifo entries + status = nvGpuOpsMemoryCpuMap(vaSpace, + channel->gpFifo, + gpFifoSize, + &cpuMap, + PAGE_SIZE_DEFAULT); + if (status != NV_OK) + goto cleanup_free_gpfifo_entries; + + channel->gpFifoEntries = (NvU64 *) cpuMap; + + // + // 3. Allocate memory for the error notifier. Make the allocation + // sufficiently large to also accommodate any other channel + // notifiers, and request a kernel VA and CPU caching. + // + flags.bGetKernelVA = NV_TRUE; + errorNotifierSize = sizeof(NvNotification) * + NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1; + status = nvGpuOpsGpuMalloc(vaSpace, + NV_TRUE, + errorNotifierSize, + &channel->errorNotifierOffset, + flags, + &pAllocInfo->gpuAllocInfo); + if (status != NV_OK) + goto cleanup_unmap_gpfifo_entries; + + NV_ASSERT(channel->errorNotifierOffset); + + status = getHandleForVirtualAddr(vaSpace, + channel->errorNotifierOffset, + NV_FALSE /*virtual*/, + &hErrorNotifier); + if (status != NV_OK) + goto cleanup_free_virtual; + + // 4. Find and share the VA with UVM driver + + // TODO: Acquired because CliGetDmaMappingInfo expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + goto cleanup_free_virtual; + + if (!CliGetDmaMappingInfo(session->handle, + device->handle, + hErrorNotifier, + channel->errorNotifierOffset, + gpumgrGetDeviceGpuMask(device->deviceInstance), + &pDmaMappingInfo)) + { + rmapiLockRelease(); + status = NV_ERR_GENERIC; + goto cleanup_free_virtual; + } + + rmapiLockRelease(); + + // + // RM uses the parent subdevice index to fill the notifier on SYSMEM. So use the same. + // NOTE: the same assumption does not hold for VIDMEM allocations. + // + channel->errorNotifier = (NvNotification*)pDmaMappingInfo->KernelVAddr[subdeviceInstance]; + if (!channel->errorNotifier) + { + status = NV_ERR_GENERIC; + goto cleanup_free_virtual; + } + + // Let's allocate the channel + pAllocInfo->gpFifoAllocParams.hObjectError = hErrorNotifier; + status = getHandleForVirtualAddr(vaSpace, + channel->gpFifo, + NV_FALSE /*virtual*/, + &pAllocInfo->gpFifoAllocParams.hObjectBuffer); + if (status != NV_OK) + goto cleanup_free_virtual; + + pAllocInfo->gpFifoAllocParams.gpFifoOffset = channel->gpFifo; + pAllocInfo->gpFifoAllocParams.gpFifoEntries = channel->fifoEntries; + // If zero then it will attach to the device address space + pAllocInfo->gpFifoAllocParams.hVASpace = vaSpace->handle; + pAllocInfo->gpFifoAllocParams.engineType = channelEngineType(channel); + + if (isDeviceVoltaPlus(device)) + { + flags.bGetKernelVA = NV_FALSE; + status = nvGpuOpsGpuMalloc(vaSpace, + gpPutLoc == UVM_BUFFER_LOCATION_SYS, + sizeof(KeplerAControlGPFifo), + &channel->userdGpuAddr, + flags, + &pAllocInfo->gpuAllocInfo); + if (status != NV_OK) + goto cleanup_free_virtual; + + channel->hUserdPhysHandle = pAllocInfo->gpuAllocInfo.hPhysHandle; + + SLI_LOOP_START(SLI_LOOP_FLAGS_BC_ONLY) + pAllocInfo->gpFifoAllocParams.hUserdMemory[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = channel->hUserdPhysHandle; + pAllocInfo->gpFifoAllocParams.userdOffset[gpumgrGetSubDeviceInstanceFromGpu(pGpu)] = 0; + SLI_LOOP_END + + status = nvGpuOpsMemoryCpuMap(vaSpace, + channel->userdGpuAddr, + sizeof(KeplerAControlGPFifo), + &gpfifoCtrl, + PAGE_SIZE_DEFAULT); + if (status != NV_OK) + goto cleanup_free_virtual; + } + + channel->channelHandle = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, session->handle, + device->handle, + &channel->channelHandle, + device->hostClass, + &pAllocInfo->gpFifoAllocParams); + if (status != NV_OK) + { + goto cleanup_free_virtual; + } + + // Query runlist ID + pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + status = kfifoEngineInfoXlate_HAL(pGpu, + pKernelFifo, + ENGINE_INFO_TYPE_NV2080, + channelEngineType(channel), + ENGINE_INFO_TYPE_RUNLIST, + &channel->hwRunlistId); + if (status != NV_OK) + goto cleanup_free_virtual; + + // Query channel ID + status = nvGpuOpsChannelGetHwChannelId(channel, &channel->hwChannelId); + if (status != NV_OK) + goto cleanup_free_channel; + + // Map USERD (controlPage) + if (!isDeviceVoltaPlus(device)) + { + status = pRmApi->MapToCpu(pRmApi, + session->handle, + device->subhandle, + channel->channelHandle, + 0, + sizeof(KeplerAControlGPFifo), + &gpfifoCtrl, + 0); + if (status != NV_OK) + goto cleanup_free_channel; + } + + channel->controlPage = gpfifoCtrl; + + status = channelRetainDummyAlloc(channel, channelInfo); + if (status != NV_OK) + goto cleanup_free_controlpage; + + // Allocate the SW method class for fault cancel + if (isDevicePascalPlus(device) && (engineType != UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2)) + { + channel->hFaultCancelSwMethodClass = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, + session->handle, + channel->channelHandle, + &channel->hFaultCancelSwMethodClass, + GP100_UVM_SW, + NULL); + if (status != NV_OK) + goto cleanup_free_controlpage; + } + + portMemFree(pAllocInfo); + + *channelHandle = channel; + channelInfo->gpGet = &channel->controlPage->GPGet; + channelInfo->gpPut = &channel->controlPage->GPPut; + channelInfo->gpFifoEntries = channel->gpFifoEntries; + channelInfo->channelClassNum = device->hostClass; + channelInfo->numGpFifoEntries = channel->fifoEntries; + channelInfo->errorNotifier = channel->errorNotifier; + channelInfo->hwRunlistId = channel->hwRunlistId; + channelInfo->hwChannelId = channel->hwChannelId; + + return NV_OK; + +cleanup_free_controlpage: + if (!isDeviceVoltaPlus(device) && (gpfifoCtrl != NULL)) + pRmApi->UnmapFromCpu(pRmApi, session->handle, device->subhandle, channel->channelHandle, gpfifoCtrl, 0, pid); +cleanup_free_channel: + pRmApi->Free(pRmApi, session->handle, channel->channelHandle); +cleanup_free_virtual: + if (isDeviceVoltaPlus(device)) + { + if (gpfifoCtrl != NULL) + nvGpuOpsMemoryCpuUnMap(vaSpace, gpfifoCtrl); + + if (channel->userdGpuAddr != 0) + nvGpuOpsMemoryFree(vaSpace, channel->userdGpuAddr); + } + + nvGpuOpsMemoryFree(vaSpace, channel->errorNotifierOffset); +cleanup_unmap_gpfifo_entries: + nvGpuOpsMemoryCpuUnMap(vaSpace, channel->gpFifoEntries); +cleanup_free_gpfifo_entries: + nvGpuOpsMemoryFree(vaSpace, channel->gpFifo); +cleanup_free_memory: + channelReleaseDummyAlloc(channel); + portMemFree(channel); + portMemFree(pAllocInfo); + + return status; +} + +static NV_STATUS engineAllocate(struct gpuChannel *channel, gpuChannelInfo *channelInfo, UVM_GPU_CHANNEL_ENGINE_TYPE engineType) +{ + NV_STATUS status = NV_OK; + struct gpuObject *object = NULL; + NVB0B5_ALLOCATION_PARAMETERS ceAllocParams = {0}; + NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS channelGrpParams = {0}; + struct gpuAddressSpace *vaSpace = NULL; + struct gpuDevice *device = NULL; + struct gpuSession *session = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + NvU32 class; + void *params; + + NV_ASSERT(channel); + NV_ASSERT(channelInfo); + NV_ASSERT(channel->engineType == UVM_GPU_CHANNEL_ENGINE_TYPE_CE || + channel->engineType == UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2); + + // TODO: Bug 2458492: Ampere-SMC Verify GR/CE indices within partition + + vaSpace = channel->vaSpace; + NV_ASSERT(vaSpace); + device = vaSpace->device; + NV_ASSERT(device); + session = device->session; + NV_ASSERT(session); + + object = portMemAllocNonPaged(sizeof(*object)); + if (object == NULL) + return NV_ERR_NO_MEMORY; + + object->handle = NV01_NULL_OBJECT; + + if (engineType == UVM_GPU_CHANNEL_ENGINE_TYPE_CE) + { + ceAllocParams.version = NVB0B5_ALLOCATION_PARAMETERS_VERSION_1; + ceAllocParams.engineType = NV2080_ENGINE_TYPE_COPY(channel->engineIndex); + params = &ceAllocParams; + class = device->ceClass; + } + else + { + params = NULL; + class = device->sec2Class; + } + + status = pRmApi->Alloc(pRmApi, session->handle, + channel->channelHandle, + &object->handle, + class, + params); + + if (status != NV_OK) + goto cleanup_free_memory; + + // In volta+ gpus, the channel has a submission offset used as doorbell. + if (isDeviceVoltaPlus(device)) + { + status = nvGpuOpsGetWorkSubmissionInfo(vaSpace, channel); + if (status != NV_OK) + goto cleanup_free_engine; + + channelInfo->workSubmissionOffset = channel->workSubmissionOffset; + channelInfo->workSubmissionToken = channel->workSubmissionToken; + channelInfo->pWorkSubmissionToken = channel->pWorkSubmissionToken; + } + + // Schedule the channel + channelGrpParams.bEnable = NV_TRUE; + status = pRmApi->Control(pRmApi, + session->handle, + channel->channelHandle, + NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, + &channelGrpParams, + sizeof(channelGrpParams)); + + if (status != NV_OK) + goto cleanup_free_engine; + + object->next = channel->nextAttachedEngine; + channel->nextAttachedEngine = object; + object->type = class; + + return NV_OK; + +cleanup_free_engine: + pRmApi->Free(pRmApi, session->handle, object->handle); +cleanup_free_memory: + portMemFree(object); + return status; +} + +NV_STATUS nvGpuOpsChannelAllocate(struct gpuAddressSpace *vaSpace, + const gpuChannelAllocParams *params, + struct gpuChannel **channelHandle, + gpuChannelInfo *channelInfo) +{ + NV_STATUS status; + UVM_GPU_CHANNEL_ENGINE_TYPE channelType = params->engineType; + + NV_ASSERT_OR_RETURN((channelType == UVM_GPU_CHANNEL_ENGINE_TYPE_CE || channelType == UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2), NV_ERR_NOT_SUPPORTED); + + status = channelAllocate(vaSpace, channelType, params, + channelHandle, channelInfo); + if (status != NV_OK) + return status; + + status = engineAllocate(*channelHandle, channelInfo, channelType); + if (status != NV_OK) + nvGpuOpsChannelDestroy(*channelHandle); + + return status; +} + +void nvGpuOpsChannelDestroy(struct gpuChannel *channel) +{ + struct gpuObject *nextEngine; + struct gpuObject *currEngine; + NvU32 pid = osGetCurrentProcess(); + struct gpuAddressSpace *vaSpace = NULL; + struct gpuDevice *device = NULL; + struct gpuSession *session = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + if (!channel) + return; + + vaSpace = channel->vaSpace; + NV_ASSERT(vaSpace); + device = vaSpace->device; + NV_ASSERT(device); + session = device->session; + NV_ASSERT(session); + + // destroy the engines under this channel + if (channel->nextAttachedEngine) + { + currEngine = channel->nextAttachedEngine; + nextEngine = currEngine; + do + { + currEngine = nextEngine; + nextEngine = currEngine->next; + pRmApi->Free(pRmApi, session->handle, currEngine->handle); + portMemFree(currEngine); + } while (nextEngine != NULL); + } + + // Tear down the channel + if (isDevicePascalPlus(device)) + pRmApi->Free(pRmApi, session->handle, channel->hFaultCancelSwMethodClass); + + if (isDeviceVoltaPlus(device)) + { + nvGpuOpsMemoryCpuUnMap(vaSpace, (void *)channel->controlPage); + nvGpuOpsMemoryFree(vaSpace, channel->userdGpuAddr); + } + else + { + pRmApi->UnmapFromCpu(pRmApi, + session->handle, + device->subhandle, + channel->channelHandle, + (void *)channel->controlPage, + 0, + pid); + } + + // Free the channel + pRmApi->Free(pRmApi, session->handle, channel->channelHandle); + + nvGpuOpsMemoryFree(vaSpace, channel->errorNotifierOffset); + + nvGpuOpsMemoryCpuUnMap(vaSpace, channel->gpFifoEntries); + + nvGpuOpsMemoryFree(vaSpace, channel->gpFifo); + + channelReleaseDummyAlloc(channel); + + portMemFree(channel); +} + +static NV_STATUS trackDescriptor(PNODE *pRoot, NvU64 key, void *desc) +{ + PNODE btreeNode; + NV_ASSERT(desc); + NV_ASSERT(pRoot); + + btreeNode = (PNODE)desc; + + btreeNode->keyStart = key; + btreeNode->keyEnd = key; + btreeNode->Data = desc; + return btreeInsert(btreeNode, pRoot); +} + +static NV_STATUS findDescriptor(PNODE pRoot, NvU64 key, void **desc) +{ + PNODE btreeNode = NULL; + NV_STATUS status = NV_OK; + + NV_ASSERT(desc); + + status = btreeSearch(key, &btreeNode, pRoot); + if (status != NV_OK) + return status; + + *desc = btreeNode->Data; + return NV_OK; +} + +static NV_STATUS deleteDescriptor(PNODE *pRoot, NvU64 key, void **desc) +{ + PNODE btreeNode = NULL; + NV_STATUS status = NV_OK; + + NV_ASSERT(desc); + NV_ASSERT(pRoot); + + status = btreeSearch(key, &btreeNode, *pRoot); + if (status != NV_OK) + return status ; + + *desc = btreeNode->Data; + status = btreeUnlink(btreeNode, pRoot); + return NV_OK; +} + +static NV_STATUS destroyAllGpuMemDescriptors(NvHandle hClient, PNODE pNode) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + gpuMemDesc *memDesc = NULL; + + if (pNode == NULL) + return NV_OK; + + destroyAllGpuMemDescriptors(hClient, pNode->left); + destroyAllGpuMemDescriptors(hClient, pNode->right); + + memDesc = (gpuMemDesc*)pNode->Data; + if (memDesc->childHandle) + pRmApi->Free(pRmApi, hClient, memDesc->childHandle); + + if (memDesc->handle) + pRmApi->Free(pRmApi, hClient, memDesc->handle); + + portMemFree(pNode->Data); + + return NV_OK; +} + +// Returns childHandle/handle to a VA memdesc associated with a VA. +static NV_STATUS getHandleForVirtualAddr(struct gpuAddressSpace *vaSpace, + NvU64 allocationAddress, + NvBool bPhysical, + NvHandle *pHandle) +{ + NV_STATUS status = NV_OK; + gpuMemDesc *memDesc = NULL; + + NV_ASSERT(vaSpace); + NV_ASSERT(pHandle); + + portSyncRwLockAcquireRead(vaSpace->allocationsLock); + status = findDescriptor(vaSpace->allocations, allocationAddress, (void**)&memDesc); + portSyncRwLockReleaseRead(vaSpace->allocationsLock); + if (status != NV_OK) + return status; + + NV_ASSERT(memDesc); + + *pHandle = bPhysical ? memDesc->childHandle : memDesc->handle; + + if (!*pHandle) + return NV_ERR_GENERIC; + + return NV_OK; +} + +// +// Returns a cpu mapping to the provided GPU Offset +// +NV_STATUS nvGpuOpsMemoryCpuMap(struct gpuAddressSpace *vaSpace, + NvU64 memory, + NvLength length, + void **cpuPtr, + NvU32 pageSize) +{ + gpuMemDesc *memDesc = NULL; + cpuMappingDesc *cpuMapDesc = NULL; + NV_STATUS status; + void *pMappedAddr = NULL; + NvP64 mappedAddr = 0; + NvU32 flags = 0; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + if (!vaSpace || !cpuPtr) + return NV_ERR_INVALID_ARGUMENT; + + cpuMapDesc = portMemAllocNonPaged(sizeof(*cpuMapDesc)); + if (cpuMapDesc == NULL) + return NV_ERR_GENERIC; + + portSyncRwLockAcquireRead(vaSpace->allocationsLock); + status = findDescriptor(vaSpace->allocations, memory, (void**)&memDesc); + portSyncRwLockReleaseRead(vaSpace->allocationsLock); + if (status != NV_OK) + goto cleanup_desc; + + NV_ASSERT(memDesc); + NV_ASSERT(memDesc->childHandle); + + // + // Set correct page size for Bar mappings. + // + if (pageSize == RM_PAGE_SIZE) + { + flags |= DRF_DEF(OS46, _FLAGS, _PAGE_SIZE, _4KB); + } + else if (pageSize == RM_PAGE_SIZE_HUGE) + { + // TODO: this flag is ignored, remove it once it is deprecated + flags |= DRF_DEF(OS46, _FLAGS, _PAGE_SIZE, _HUGE); + } + else + { + flags |= DRF_DEF(OS46, _FLAGS, _PAGE_SIZE, _DEFAULT); + } + + // + // If the length passed in is zero we will force the mapping + // to the size that was used for allocation of the passed in + // NvU64 + // + status = pRmApi->MapToCpu(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->subhandle, + memDesc->childHandle, + 0, + length != 0 ? length : memDesc->size, + &pMappedAddr, + flags); + if (status != NV_OK) + goto cleanup_desc; + + mappedAddr = NV_PTR_TO_NvP64(pMappedAddr); + + cpuMapDesc->cpuPointer = (NvUPtr) mappedAddr; + cpuMapDesc->handle = memDesc->childHandle; + cpuMapDesc->btreeNode.keyStart = (NvU64)cpuMapDesc->cpuPointer; + cpuMapDesc->btreeNode.keyEnd = (NvU64)cpuMapDesc->cpuPointer; + cpuMapDesc->btreeNode.Data = (void *) cpuMapDesc; + + // Track CPU memdesc + portSyncRwLockAcquireWrite(vaSpace->cpuMappingsLock); + status = btreeInsert(&cpuMapDesc->btreeNode, &vaSpace->cpuMappings); + portSyncRwLockReleaseWrite(vaSpace->cpuMappingsLock); + if (status != NV_OK) + goto cleanup_desc; + + // can use this address as key as Bar1 address space is unique + *cpuPtr = NvP64_VALUE(mappedAddr); + + return NV_OK; + +cleanup_desc: + portMemFree(cpuMapDesc); + return status; +} + +void nvGpuOpsMemoryCpuUnMap(struct gpuAddressSpace *vaSpace, void *cpuPtr) +{ + unsigned pid =0; + cpuMappingDesc *mappingDesc = NULL; + PNODE btreeNode; + NV_STATUS status = NV_OK; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + if (!vaSpace || !cpuPtr) + return; + + portSyncRwLockAcquireRead(vaSpace->cpuMappingsLock); + status = btreeSearch((NvUPtr)cpuPtr, &btreeNode, vaSpace->cpuMappings); + portSyncRwLockReleaseRead(vaSpace->cpuMappingsLock); + if (status != NV_OK) + return; + + mappingDesc = (cpuMappingDesc *)btreeNode->Data; + if (mappingDesc) + { + pid = osGetCurrentProcess(); + status = pRmApi->UnmapFromCpu(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->subhandle, + mappingDesc->handle, + NvP64_VALUE(((NvP64)mappingDesc->cpuPointer)), + 0, + pid); + NV_ASSERT(status == NV_OK); + } + + portSyncRwLockAcquireWrite(vaSpace->cpuMappingsLock); + btreeUnlink(btreeNode, &vaSpace->cpuMappings); + portSyncRwLockReleaseWrite(vaSpace->cpuMappingsLock); + + portMemFree(mappingDesc); + return; +} + +// This function frees both physical and and virtual memory allocations +// This is a counter-function of nvGpuOpsGpuMalloc! +void nvGpuOpsMemoryFree(struct gpuAddressSpace *vaSpace, NvU64 pointer) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + gpuMemDesc *memDesc = NULL; + + NV_ASSERT(vaSpace); + + portSyncRwLockAcquireWrite(vaSpace->allocationsLock); + deleteDescriptor(&vaSpace->allocations, pointer, (void**)&memDesc); + portSyncRwLockReleaseWrite(vaSpace->allocationsLock); + + NV_ASSERT(memDesc); + NV_ASSERT(memDesc->childHandle); + NV_ASSERT(memDesc->handle); + + // Free physical allocation + pRmApi->Free(pRmApi, vaSpace->device->session->handle, memDesc->childHandle); + + // Free virtual allocation + pRmApi->Free(pRmApi, vaSpace->device->session->handle, memDesc->handle); + + portMemFree(memDesc); +} + + + +NV_STATUS nvGpuOpsQueryCesCaps(struct gpuDevice *device, + gpuCesCaps *cesCaps) +{ + NV_STATUS status; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + + if (!device || !cesCaps) + return NV_ERR_INVALID_ARGUMENT; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + device->session->handle, + NULL, + &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + // Refresh CE information, which may have changed if a GPU has been + // initialized by RM for the first time + status = queryCopyEngines(device, cesCaps); + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +NV_STATUS nvGpuOpsQueryCaps(struct gpuDevice *device, gpuCaps *caps) +{ + NV_STATUS status; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + OBJGPU *pGpu = NULL; + KernelMemorySystem *pKernelMemorySystem; + NV0000_CTRL_GPU_GET_ID_INFO_PARAMS infoParams = {0}; + struct gpuSession *session = device->session; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, device->session->handle, NULL, &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + caps->sysmemLink = device->sysmemLink; + caps->sysmemLinkRateMBps = device->sysmemLinkRateMBps; + caps->connectedToSwitch = device->connectedToSwitch; + + infoParams.gpuId = device->gpuId; + status = pRmApi->Control(pRmApi, + session->handle, + session->handle, + NV0000_CTRL_CMD_GPU_GET_ID_INFO, + &infoParams, + sizeof(infoParams)); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + if (infoParams.numaId != NV0000_CTRL_NO_NUMA_NODE) + { + caps->numaEnabled = NV_TRUE; + caps->numaNodeId = infoParams.numaId; + } + + status = CliSetGpuContext(session->handle, device->handle, &pGpu, NULL); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + pKernelMemorySystem = GPU_GET_KERNEL_MEMORY_SYSTEM(pGpu); + if (!pKernelMemorySystem) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (pGpu->getProperty(pGpu, PDB_PROP_GPU_ATS_SUPPORTED)) + { + caps->systemMemoryWindowStart = pKernelMemorySystem->coherentCpuFbBase; + caps->systemMemoryWindowSize = pKernelMemorySystem->coherentCpuFbEnd - + pKernelMemorySystem->coherentCpuFbBase; + } + else + { + caps->systemMemoryWindowStart = 0; + caps->systemMemoryWindowSize = 0; + } + + if (device->connectedToSwitch) + { + KernelNvlink *pKernelNvlink = GPU_GET_KERNEL_NVLINK(pGpu); + if (pKernelNvlink == NULL) + { + caps->nvswitchMemoryWindowStart = NVLINK_INVALID_FABRIC_ADDR; + } + else + { + caps->nvswitchMemoryWindowStart = knvlinkGetUniqueFabricBaseAddress( + pGpu, pKernelNvlink); + } + } + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_OK; +} + +static NV_STATUS findVaspaceFromPid(unsigned pid, unsigned gpuId, + NvHandle *hClient, NvHandle *hDevice, + NvHandle *hSubdevice, NvHandle *hVaSpace) +{ + // + // This function iterates through all the vaspace objects under the client, + // that matches the pid argument, and returns any address space that is + // tagged as UVM. + // + Device *pDevice = NULL; + Subdevice *pSubDevice = NULL; + OBJVASPACE *pVAS = NULL; + OBJGPU *pGpu; + unsigned hDeviceLocal = 0; + unsigned hSubDeviceLocal = 0; + NV_STATUS status; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + if (pClient->ProcID == pid) + { + pGpu = gpumgrGetGpuFromId(gpuId); + if (!pGpu) + return NV_ERR_INVALID_ARGUMENT; + + pSubDevice = CliGetSubDeviceInfoFromGpu(pRsClient->hClient, + pGpu); + + status = deviceGetByGpu(pRsClient, pGpu, NV_TRUE, &pDevice); + if (status == NV_OK) + { + hDeviceLocal = RES_GET_HANDLE(pDevice); + + if (pSubDevice != NULL) + hSubDeviceLocal = RES_GET_HANDLE(pSubDevice); + + *hClient = pRsClient->hClient; + *hDevice = hDeviceLocal; + *hSubdevice = hSubDeviceLocal; + + if (pDevice->vaMode != + NV_DEVICE_ALLOCATION_VAMODE_MULTIPLE_VASPACES) + { + status = vaspaceGetByHandleOrDeviceDefault(pRsClient, hDeviceLocal, 0, &pVAS); + if ((status != NV_OK) || (pVAS == NULL)) + return NV_ERR_GENERIC; + + // + // TODO: Bug 1632484: + // Check to see if pVAS is UVM_MANAGED, once + // that vaspace property is introduced. + // No need to check FaultCapable. + // + if ((vaspaceIsMirrored(pVAS)) || + (vaspaceIsFaultCapable(pVAS))) + { + // + // This means that this client is + // using the vaspace associated to its device + // + *hVaSpace = 0; + return NV_OK; + } + } + + // + // if the default VASPACE is not tagged as UVM + // will search for all vaspace objects under + // this client for this device to find the first + // vaspace that is tagged as UVM. + // + if (findUvmAddressSpace(*hClient, pGpu->gpuInstance, hVaSpace, &pVAS) == NV_OK) + { + return NV_OK; + } + } + } + } + return NV_ERR_GENERIC; +} + +// +// This function will look through all the vaspaces under a client for a device and return +// the one that is tagged as UVM, or NULL if there is no UVM vaspace. +// +static NV_STATUS findUvmAddressSpace(NvHandle hClient, NvU32 gpuInstance, NvHandle *phVaSpace, OBJVASPACE **ppVASpace) +{ + RsResourceRef *pResourceRef; + RS_ITERATOR iter; + NvU32 gpuMask = NVBIT(gpuInstance); + + iter = serverutilRefIter(hClient, NV01_NULL_OBJECT, classId(VaSpaceApi), RS_ITERATE_DESCENDANTS, NV_TRUE); + + while (clientRefIterNext(iter.pClient, &iter)) + { + pResourceRef = iter.pResourceRef; + + *ppVASpace = dynamicCast(pResourceRef->pResource, VaSpaceApi)->pVASpace; + *phVaSpace = pResourceRef->hResource; + + if ((vaspaceIsMirrored(*ppVASpace) || vaspaceIsExternallyOwned(*ppVASpace)) && + (((*ppVASpace)->gpuMask & gpuMask) == gpuMask)) + { + return NV_OK; + } + } + *phVaSpace = 0; + *ppVASpace = NULL; + return NV_ERR_INVALID_ARGUMENT; +} + +// Make sure UVM_GPU_NAME_LENGTH has the same length as +// NV2080_GPU_MAX_NAME_STRING_LENGTH. +ct_assert(NV2080_GPU_MAX_NAME_STRING_LENGTH == UVM_GPU_NAME_LENGTH); + +static void getGpcTpcInfo(OBJGPU *pGpu, gpuInfo *pGpuInfo) +{ + KernelGraphicsManager *pKernelGraphicsManager = GPU_GET_KERNEL_GRAPHICS_MANAGER(pGpu); + + pGpuInfo->maxTpcPerGpcCount = 0; + pGpuInfo->maxGpcCount = 0; + pGpuInfo->gpcCount = 0; + pGpuInfo->tpcCount = 0; + + NV_ASSERT_OR_RETURN_VOID(pKernelGraphicsManager->legacyKgraphicsStaticInfo.bInitialized); + NV_ASSERT_OR_RETURN_VOID(pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo != NULL); + + pGpuInfo->maxTpcPerGpcCount = + pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_TPC_PER_GPC].data; + pGpuInfo->maxGpcCount = + pKernelGraphicsManager->legacyKgraphicsStaticInfo.pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_LITTER_NUM_GPCS].data; + pGpuInfo->gpcCount = + nvPopCount32(pKernelGraphicsManager->legacyKgraphicsStaticInfo.floorsweepingMasks.gpcMask); + + // + // When MIG GPU partitioning is enabled, compute the upper bound on the number + // of TPCs that may be available in this partition, to enable UVM to + // conservatively size relevant data structures. + // + if (IS_MIG_IN_USE(pGpu)) + { + pGpuInfo->tpcCount = pGpuInfo->gpcCount * pGpuInfo->maxTpcPerGpcCount; + } + else + { + KernelGraphics *pKernelGraphics = GPU_GET_KERNEL_GRAPHICS(pGpu, 0); + const KGRAPHICS_STATIC_INFO *pKernelGraphicsStaticInfo = kgraphicsGetStaticInfo(pGpu, pKernelGraphics); + + NV_ASSERT_OR_RETURN_VOID(pKernelGraphicsStaticInfo != NULL); + pGpuInfo->tpcCount = pKernelGraphicsStaticInfo->pGrInfo->infoList[NV2080_CTRL_GR_INFO_INDEX_SHADER_PIPE_SUB_COUNT].data; + } +} + +static NV_STATUS queryVirtMode(NvHandle hClient, NvHandle hDevice, NvU32 *virtMode) +{ + NV_STATUS status = NV_OK; + *virtMode = UVM_VIRT_MODE_NONE; + return status; +} + +NV_STATUS nvGpuOpsGetGpuInfo(const NvProcessorUuid *pUuid, + const gpuClientInfo *pGpuClientInfo, + gpuInfo *pGpuInfo) +{ + NV_STATUS status; + NV0080_ALLOC_PARAMETERS nv0080AllocParams = {0}; + NV2080_ALLOC_PARAMETERS nv2080AllocParams = {0}; + NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS gpuIdInfoParams = {{0}}; + NV2080_CTRL_MC_GET_ARCH_INFO_PARAMS archInfoParams = {0}; + NV2080_CTRL_GPU_GET_NAME_STRING_PARAMS gpuNameParams = {0}; + NvHandle clientHandle = 0; + NvHandle deviceHandle = 1; + NvHandle subDeviceHandle = 2; + NvBool isClientAllocated = NV_FALSE; + NvBool isDeviceAllocated = NV_FALSE; + NvBool isSubdeviceAllocated = NV_FALSE; + NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS subDevParams = { 0 }; + NV2080_CTRL_GPU_GET_SIMULATION_INFO_PARAMS simulationInfoParams = {0}; + OBJGPU *pGpu = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + NvU32 dummy; + + pGpu = gpumgrGetGpuFromUuid(pUuid->uuid, + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY)); + if (!pGpu) + { + return NV_ERR_GPU_UUID_NOT_FOUND; + } + + if (!osIsGpuAccessible(pGpu)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + status = nvGpuOpsCreateClient(pRmApi, &clientHandle); + if (status != NV_OK) + { + return status; + } + + isClientAllocated = NV_TRUE; + + portMemCopy(&gpuIdInfoParams.gpuUuid, sizeof(*pUuid), pUuid, sizeof(*pUuid)); + + gpuIdInfoParams.flags = NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT_BINARY; + status = pRmApi->Control(pRmApi, + clientHandle, + clientHandle, + NV0000_CTRL_CMD_GPU_GET_UUID_INFO, + &gpuIdInfoParams, + sizeof(gpuIdInfoParams)); + if (NV_OK != status) + goto cleanup; + + nv0080AllocParams.deviceId = gpuIdInfoParams.deviceInstance; + + status = pRmApi->Alloc(pRmApi, + clientHandle, + clientHandle, + &deviceHandle, + NV01_DEVICE_0, + &nv0080AllocParams); + if (NV_OK != status) + goto cleanup; + + isDeviceAllocated = NV_TRUE; + + nv2080AllocParams.subDeviceId = gpuIdInfoParams.subdeviceInstance; + status = pRmApi->Alloc(pRmApi, + clientHandle, + deviceHandle, + &subDeviceHandle, + NV20_SUBDEVICE_0, + &nv2080AllocParams); + if (NV_OK != status) + goto cleanup; + + isSubdeviceAllocated = NV_TRUE; + + portMemCopy(&pGpuInfo->uuid, sizeof(*pUuid), pUuid, sizeof(*pUuid)); + + status = pRmApi->Control(pRmApi, + clientHandle, + subDeviceHandle, + NV2080_CTRL_CMD_MC_GET_ARCH_INFO, + &archInfoParams, + sizeof(archInfoParams)); + if (NV_OK != status) + goto cleanup; + + pGpuInfo->gpuArch = archInfoParams.architecture; + pGpuInfo->gpuImplementation = archInfoParams.implementation; + + gpuNameParams.gpuNameStringFlags = NV2080_CTRL_GPU_GET_NAME_STRING_FLAGS_TYPE_ASCII; + status = pRmApi->Control(pRmApi, + clientHandle, + subDeviceHandle, + NV2080_CTRL_CMD_GPU_GET_NAME_STRING, + &gpuNameParams, + sizeof(gpuNameParams)); + if (NV_OK != status) + goto cleanup; + + portStringCopy(pGpuInfo->name, sizeof(pGpuInfo->name), + (const char *)gpuNameParams.gpuNameString.ascii, + sizeof(gpuNameParams.gpuNameString.ascii)); + + status = queryVirtMode(clientHandle, deviceHandle, &pGpuInfo->virtMode); + if (status != NV_OK) + goto cleanup; + + pGpuInfo->gpuInTcc = NV_FALSE; + + status = findDeviceClasses(clientHandle, + deviceHandle, + subDeviceHandle, + &pGpuInfo->hostClass, + &pGpuInfo->ceClass, + &pGpuInfo->computeClass, + &dummy, + &dummy, + &dummy); + if (status != NV_OK) + goto cleanup; + + status = pRmApi->Control(pRmApi, + clientHandle, + deviceHandle, + NV0080_CTRL_CMD_GPU_GET_NUM_SUBDEVICES, + &subDevParams, + sizeof(subDevParams)); + if (status != NV_OK) + goto cleanup; + + pGpuInfo->subdeviceCount = subDevParams.numSubDevices; + + getGpcTpcInfo(pGpu, pGpuInfo); + + if (IS_MIG_IN_USE(pGpu)) + { + NvU32 swizzId; + + NV_ASSERT(pGpuInfo->subdeviceCount == 1); + + status = getSwizzIdFromUserSmcPartHandle(pRmApi, + clientHandle, + deviceHandle, + pGpuClientInfo->hClient, + pGpuClientInfo->hSmcPartRef, + &swizzId); + if (status != NV_OK) + goto cleanup; + + pGpuInfo->smcEnabled = NV_TRUE; + pGpuInfo->smcSwizzId = swizzId; + pGpuInfo->smcUserClientInfo.hClient = pGpuClientInfo->hClient; + pGpuInfo->smcUserClientInfo.hSmcPartRef = pGpuClientInfo->hSmcPartRef; + } + + status = pRmApi->Control(pRmApi, + clientHandle, + subDeviceHandle, + NV2080_CTRL_CMD_GPU_GET_SIMULATION_INFO, + &simulationInfoParams, + sizeof(simulationInfoParams)); + if (status != NV_OK) + goto cleanup; + + pGpuInfo->isSimulated = (simulationInfoParams.type != NV2080_CTRL_GPU_GET_SIMULATION_INFO_TYPE_NONE); + +cleanup: + if (isSubdeviceAllocated) + pRmApi->Free(pRmApi, clientHandle, subDeviceHandle); + + if (isDeviceAllocated) + pRmApi->Free(pRmApi, clientHandle, deviceHandle); + + if (isClientAllocated) + pRmApi->Free(pRmApi, clientHandle, clientHandle); + + return status; +} + +NV_STATUS nvGpuOpsGetGpuIds(const NvU8 *pUuid, + unsigned uuidLength, + NvU32 *pDeviceId, + NvU32 *pSubdeviceId) +{ + NV_STATUS nvStatus; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS gpuIdInfoParams = {{0}}; + NvHandle clientHandle = 0; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + nvStatus = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_NONE, NV01_NULL_OBJECT, NULL, &acquiredLocks); + if (nvStatus != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return nvStatus; + } + + nvStatus = nvGpuOpsCreateClient(pRmApi, &clientHandle); + if (nvStatus != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return nvStatus; + } + + portMemCopy(&gpuIdInfoParams.gpuUuid, uuidLength, pUuid, uuidLength); + + gpuIdInfoParams.flags = NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT_BINARY; + nvStatus = pRmApi->Control(pRmApi, + clientHandle, + clientHandle, + NV0000_CTRL_CMD_GPU_GET_UUID_INFO, + &gpuIdInfoParams, + sizeof(gpuIdInfoParams)); + if (NV_OK == nvStatus) + { + *pDeviceId = gpuIdInfoParams.deviceInstance; + *pSubdeviceId = gpuIdInfoParams.subdeviceInstance; + } + + pRmApi->Free(pRmApi, clientHandle, clientHandle); + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return nvStatus; +} + +NV_STATUS nvGpuOpsServiceDeviceInterruptsRM(struct gpuDevice *device) +{ + NV_STATUS status; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + NV2080_CTRL_MC_SERVICE_INTERRUPTS_PARAMS params = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_NONE, device->session->handle, NULL, &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + params.engines = NV2080_CTRL_MC_ENGINE_ID_ALL; + status = pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_MC_SERVICE_INTERRUPTS, + ¶ms, + sizeof(params)); + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +NV_STATUS nvGpuOpsCheckEccErrorSlowpath(struct gpuChannel *channel, + NvBool *bEccDbeSet) +{ + NV_STATUS status = NV_OK; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + NV2080_CTRL_GPU_QUERY_ECC_STATUS_PARAMS eccStatus; + NvU32 i = 0; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (!channel || !bEccDbeSet) + { + return NV_ERR_INVALID_ARGUMENT; + } + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + channel->vaSpace->device->session->handle, + NULL, + &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + *bEccDbeSet = NV_FALSE; + + // Do anything only if ECC is enabled on this device + if (channel->vaSpace->device->rmSubDevice->bEccEnabled) + { + portMemSet(&eccStatus, 0, sizeof(eccStatus)); + + status = pRmApi->Control(pRmApi, + channel->vaSpace->device->session->handle, + channel->vaSpace->device->subhandle, + NV2080_CTRL_CMD_GPU_QUERY_ECC_STATUS, + &eccStatus, + sizeof(eccStatus)); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_GENERIC; + } + + for (i = 0; i < NV2080_CTRL_GPU_ECC_UNIT_COUNT; i++) + { + if (eccStatus.units[i].dbe.count != 0) + { + *bEccDbeSet = NV_TRUE; + } + } + } + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +static NV_STATUS nvGpuOpsFillGpuMemoryInfo(PMEMORY_DESCRIPTOR pMemDesc, + OBJGPU *pMappingGpu, + gpuMemoryInfo *pGpuMemoryInfo) +{ + NV_STATUS status; + PMEMORY_DESCRIPTOR pRootMemDesc = memdescGetRootMemDesc(pMemDesc, NULL); + OBJGPU *pGpu = (pMemDesc->pGpu == NULL) ? pMappingGpu : pMemDesc->pGpu; + + status = nvGpuOpsMemGetPageSize(pMappingGpu, + pMemDesc, + &pGpuMemoryInfo->pageSize); + if (status != NV_OK) + return status; + + pGpuMemoryInfo->size = memdescGetSize(pMemDesc); + + pGpuMemoryInfo->contig = memdescGetContiguity(pMemDesc, AT_GPU); + + if (pGpuMemoryInfo->contig) + { + GMMU_APERTURE aperture = nvGpuOpsGetExternalAllocAperture(pMemDesc, NV_FALSE, NV_FALSE); + NvU64 physAddr; + + memdescGetPhysAddrsForGpu(pMemDesc, pMappingGpu, AT_GPU, 0, 0, 1, &physAddr); + + pGpuMemoryInfo->physAddr = + kgmmuEncodePhysAddr(GPU_GET_KERNEL_GMMU(pGpu), aperture, physAddr, NVLINK_INVALID_FABRIC_ADDR); + } + + pGpuMemoryInfo->kind = memdescGetPteKindForGpu(pMemDesc, pMappingGpu); + + pGpuMemoryInfo->sysmem = (memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM); + + pGpuMemoryInfo->deviceDescendant = pRootMemDesc->pGpu != NULL; + + if (pGpuMemoryInfo->deviceDescendant) + { + NvU8 *uuid; + NvU32 uuidLength, flags; + NV_STATUS status; + flags = DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _TYPE, _SHA1) | + DRF_DEF(2080_GPU_CMD, _GPU_GET_GID_FLAGS, _FORMAT, _BINARY); + + // on success, allocates memory for uuid + status = gpuGetGidInfo(pGpu, &uuid, &uuidLength, flags); + if (status != NV_OK) + return status; + + portMemCopy(&pGpuMemoryInfo->uuid, uuidLength, uuid, uuidLength); + portMemFree(uuid); + } + + return NV_OK; +} + +static NvBool memdescIsSysmem(PMEMORY_DESCRIPTOR pMemDesc) +{ + return memdescGetAddressSpace(pMemDesc) == ADDR_SYSMEM; +} + +static NV_STATUS dupMemory(struct gpuDevice *device, + NvHandle hClient, + NvHandle hPhysMemory, + NvU32 flags, + NvHandle *hDupMemory, + gpuMemoryInfo *pGpuMemoryInfo) +{ + NV_STATUS status = NV_OK; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + NvHandle dupedMemHandle; + Memory *pMemory = NULL; + PMEMORY_DESCRIPTOR pMemDesc = NULL; + MEMORY_DESCRIPTOR *pAdjustedMemDesc = NULL; + FABRIC_VASPACE *pFabricVAS = NULL; + OBJGPU *pMappingGpu; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + RsResourceRef *pResourceRef; + RsResourceRef *pParentRef; + struct gpuSession *session; + NvHandle hParent; + NvHandle hSubDevice; + NvBool bIsIndirectPeer = NV_FALSE; + + if (!device || !hDupMemory) + return NV_ERR_INVALID_ARGUMENT; + + NV_ASSERT((flags == NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE) || (flags == NV04_DUP_HANDLE_FLAGS_NONE)); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + // RS-TODO use dual client locking + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_NONE, NV01_NULL_OBJECT, NULL, &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = CliSetSubDeviceContext(device->session->handle, + device->subhandle, + &hSubDevice, + &pMappingGpu); + + if (status != NV_OK) + goto done; + + // Get all the necessary information about the memory + status = nvGpuOpsGetMemoryByHandle(hClient, + hPhysMemory, + &pMemory); + if (status != NV_OK) + goto done; + + // RM client allocations can't have multiple memdesc. + pMemDesc = pMemory->pMemDesc; + NV_ASSERT(!memdescHasSubDeviceMemDescs(pMemDesc)); + + pAdjustedMemDesc = pMemDesc; + pFabricVAS = dynamicCast(pMappingGpu->pFabricVAS, FABRIC_VASPACE); + if (pFabricVAS != NULL) + { + status = fabricvaspaceGetGpaMemdesc(pFabricVAS, pMemDesc, pMappingGpu, &pAdjustedMemDesc); + if (status != NV_OK) + goto done; + } + + if (memdescGetAddressSpace(pAdjustedMemDesc) != ADDR_FBMEM && + memdescGetAddressSpace(pAdjustedMemDesc) != ADDR_SYSMEM && + memdescGetAddressSpace(pAdjustedMemDesc) != ADDR_FABRIC && + memdescGetAddressSpace(pAdjustedMemDesc) != ADDR_FABRIC_V2) + { + status = NV_ERR_NOT_SUPPORTED; + goto freeGpaMemdesc; + } + + // For SYSMEM or indirect peer mappings + bIsIndirectPeer = gpumgrCheckIndirectPeer(pMappingGpu, pAdjustedMemDesc->pGpu); + if (bIsIndirectPeer || + memdescIsSysmem(pAdjustedMemDesc)) + { + // For sysmem allocations, the dup done below is very shallow and in + // particular doesn't create IOMMU mappings required for the mapped GPU + // to access the memory. That's a problem if the mapped GPU is different + // from the GPU that the allocation was created under. Add them + // explicitly here and remove them when the memory is freed in n + // nvGpuOpsFreeDupedHandle(). Notably memdescMapIommu() refcounts the + // mappings so it's ok to call it if the mappings are already there. + // + // TODO: Bug 1811060: Add native support for this use-case in RM API. + status = memdescMapIommu(pAdjustedMemDesc, pMappingGpu->busInfo.iovaspaceId); + if (status != NV_OK) + goto freeGpaMemdesc; + } + + session = device->session; + + if (pGpuMemoryInfo) + { + RsClient *pClient; + status = serverGetClientUnderLock(&g_resServ, session->handle, &pClient); + if (status != NV_OK) + goto freeGpaMemdesc; + + status = nvGpuOpsFillGpuMemoryInfo(pAdjustedMemDesc, pMappingGpu, pGpuMemoryInfo); + if (status != NV_OK) + goto freeGpaMemdesc; + } + + pResourceRef = RES_GET_REF(pMemory); + pParentRef = pResourceRef->pParentRef; + + // TODO: Bug 2479851: temporarily detect the type of the parent of the + // memory object (device or subdevice). Once CUDA switches to subdevices, + // we will use subdevice handles unconditionally, here. + if (dynamicCast(pParentRef->pResource, Subdevice)) + { + hParent = device->subhandle; + } + else if (dynamicCast(pParentRef->pResource, RsClientResource)) + { + NV_ASSERT((memdescGetAddressSpace(pAdjustedMemDesc) == ADDR_FABRIC) || + (memdescGetAddressSpace(pAdjustedMemDesc) == ADDR_FABRIC_V2)); + hParent = session->handle; + } + else + { + NV_ASSERT(dynamicCast(pParentRef->pResource, Device)); + hParent = device->handle; + } + + dupedMemHandle = NV01_NULL_OBJECT; + status = pRmApi->DupObject(pRmApi, + session->handle, + hParent, + &dupedMemHandle, + hClient, + hPhysMemory, + flags); + if (status != NV_OK) + goto freeGpaMemdesc; + + *hDupMemory = dupedMemHandle; + +freeGpaMemdesc: + if (pAdjustedMemDesc != pMemDesc) + fabricvaspacePutGpaMemdesc(pFabricVAS, pAdjustedMemDesc); + +done: + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +NV_STATUS nvGpuOpsDupMemory(struct gpuDevice *device, + NvHandle hClient, + NvHandle hPhysMemory, + NvHandle *hDupMemory, + gpuMemoryInfo *pGpuMemoryInfo) +{ + return dupMemory(device, + hClient, + hPhysMemory, + NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE, + hDupMemory, + pGpuMemoryInfo); +} + +NV_STATUS nvGpuOpsDupAllocation(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuAddressSpace *dstVaSpace, + NvU64 *dstAddress) +{ + NV_STATUS status; + NvHandle dstPhysHandle = 0; + NvHandle srcPhysHandle = 0; + NvU64 tmpDstAddress = 0; + gpuMemoryInfo gpuMemoryInfo = {0}; + gpuVaAllocInfo allocInfo = {0}; + struct allocFlags flags = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, srcVaSpace != 0, NV_ERR_INVALID_ARGUMENT); + NV_CHECK_OR_RETURN(LEVEL_ERROR, dstVaSpace != 0, NV_ERR_INVALID_ARGUMENT); + + NV_CHECK_OR_RETURN(LEVEL_ERROR, srcVaSpace != dstVaSpace, NV_ERR_INVALID_ARGUMENT); + NV_CHECK_OR_RETURN(LEVEL_ERROR, srcAddress != 0, NV_ERR_INVALID_ARGUMENT); + NV_CHECK_OR_RETURN(LEVEL_ERROR, dstAddress != NULL, NV_ERR_INVALID_ARGUMENT); + + // If the given combination of source VA space and address does not + // correspond to a previous allocation, the physical handle retrieval fails + status = getHandleForVirtualAddr(srcVaSpace, srcAddress, NV_TRUE, &srcPhysHandle); + if (status != NV_OK) + return status; + + // Dupe the physical allocation, and return information about the associated + // memory descriptor + // + // Passing NV04_DUP_HANDLE_FLAGS_NONE allows duping across MIG partitions + status = dupMemory(dstVaSpace->device, + srcVaSpace->device->session->handle, + srcPhysHandle, + NV04_DUP_HANDLE_FLAGS_NONE, + &dstPhysHandle, + &gpuMemoryInfo); + + if (status != NV_OK) + return status; + + // Vidmem dups across GPUs are not currently supported + if (!gpuMemoryInfo.sysmem && (srcVaSpace->device != dstVaSpace->device)) + { + status = NV_ERR_NOT_SUPPORTED; + goto cleanup_dup; + } + + // The virtual allocation and mapping use the size, and page size, of the + // destination memory descriptor + allocInfo.pageSize = gpuMemoryInfo.pageSize; + + status = nvGpuOpsAllocVirtual(dstVaSpace, + gpuMemoryInfo.size, + dstAddress, + dstPhysHandle, + flags, + &allocInfo); + if (status != NV_OK) + goto cleanup_dup; + + // Map the entire memory + status = nvGpuOpsMapGpuMemory(dstVaSpace, + *dstAddress, + gpuMemoryInfo.size, + gpuMemoryInfo.pageSize, + &tmpDstAddress, + flags); + + if (status != NV_OK) + goto cleanup_virt_allocation; + + NV_ASSERT(tmpDstAddress == *dstAddress); + + return NV_OK; + +cleanup_virt_allocation: + nvGpuOpsFreeVirtual(dstVaSpace, *dstAddress); + +cleanup_dup: + pRmApi->Free(pRmApi, dstVaSpace->device->session->handle, dstPhysHandle); + return status; +} + +NV_STATUS nvGpuOpsGetGuid(NvHandle hClient, NvHandle hDevice, + NvHandle hSubDevice, NvU8 *gpuGuid, + unsigned guidLength) +{ + NV_STATUS status; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + NV2080_CTRL_GPU_GET_GID_INFO_PARAMS getGidParams = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (!gpuGuid) + return NV_ERR_INVALID_ARGUMENT; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, hClient, NULL, &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + getGidParams.index = 0; + getGidParams.flags = NV2080_GPU_CMD_GPU_GET_GID_FLAGS_FORMAT_BINARY; + status = pRmApi->Control(pRmApi, + hClient, hSubDevice, + NV2080_CTRL_CMD_GPU_GET_GID_INFO, + &getGidParams, + sizeof(getGidParams)); + + if ((guidLength != getGidParams.length) || (status != NV_OK)) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_INVALID_ARGUMENT; + } + + portMemCopy(gpuGuid, guidLength, &getGidParams.data, guidLength); + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +// Make sure UVM_COPY_ENGINE_COUNT_MAX is at least the number of copy engines +// supported by RM. +ct_assert(UVM_COPY_ENGINE_COUNT_MAX >= NV2080_ENGINE_TYPE_COPY_SIZE); + +static void setCeCaps(const NvU8 *rmCeCaps, gpuCeCaps *ceCaps) +{ + ceCaps->grce = !!NV2080_CTRL_CE_GET_CAP(rmCeCaps, NV2080_CTRL_CE_CAPS_CE_GRCE); + ceCaps->shared = !!NV2080_CTRL_CE_GET_CAP(rmCeCaps, NV2080_CTRL_CE_CAPS_CE_SHARED); + ceCaps->sysmemRead = !!NV2080_CTRL_CE_GET_CAP(rmCeCaps, NV2080_CTRL_CE_CAPS_CE_SYSMEM_READ); + ceCaps->sysmemWrite = !!NV2080_CTRL_CE_GET_CAP(rmCeCaps, NV2080_CTRL_CE_CAPS_CE_SYSMEM_WRITE); + ceCaps->nvlinkP2p = !!NV2080_CTRL_CE_GET_CAP(rmCeCaps, NV2080_CTRL_CE_CAPS_CE_NVLINK_P2P); + ceCaps->sysmem = !!NV2080_CTRL_CE_GET_CAP(rmCeCaps, NV2080_CTRL_CE_CAPS_CE_SYSMEM); + ceCaps->p2p = !!NV2080_CTRL_CE_GET_CAP(rmCeCaps, NV2080_CTRL_CE_CAPS_CE_P2P); +} + +static NV_STATUS queryCopyEngines(struct gpuDevice *gpu, gpuCesCaps *cesCaps) +{ + NV_STATUS status = NV_OK; + NV2080_CTRL_GPU_GET_ENGINES_PARAMS getEnginesParams = {0}; + NvU32 *engineList; + NvU32 i; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + status = pRmApi->Control(pRmApi, + gpu->session->handle, + gpu->subhandle, + NV2080_CTRL_CMD_GPU_GET_ENGINES, + &getEnginesParams, + sizeof(getEnginesParams)); + if (status != NV_OK) + return status; + + engineList = portMemAllocNonPaged( + sizeof(*engineList) * getEnginesParams.engineCount); + if (engineList == NULL) + return NV_ERR_NO_MEMORY; + + getEnginesParams.engineList = NV_PTR_TO_NvP64(engineList); + + status = pRmApi->Control(pRmApi, + gpu->session->handle, + gpu->subhandle, + NV2080_CTRL_CMD_GPU_GET_ENGINES, + &getEnginesParams, + sizeof(getEnginesParams)); + if (status != NV_OK) + goto done; + + portMemSet(cesCaps, 0, sizeof(*cesCaps)); + + for (i = 0; i < getEnginesParams.engineCount; i++) + { + NV2080_CTRL_CE_GET_CAPS_PARAMS ceParams = {0}; + NV2080_CTRL_CE_GET_CE_PCE_MASK_PARAMS pceMaskParams = {0}; + NvU8 rmCeCaps[NV2080_CTRL_CE_CAPS_TBL_SIZE] = {0}; + UvmGpuCopyEngineCaps *ceCaps; + + NvU32 ceIndex = engineList[i] - NV2080_ENGINE_TYPE_COPY0; + if (ceIndex >= NV2080_ENGINE_TYPE_COPY_SIZE) + continue; + + ceParams.ceEngineType = NV2080_ENGINE_TYPE_COPY(ceIndex); + ceParams.capsTblSize = NV2080_CTRL_CE_CAPS_TBL_SIZE; + ceParams.capsTbl = NV_PTR_TO_NvP64(rmCeCaps); + + status = pRmApi->Control(pRmApi, + gpu->session->handle, + gpu->subhandle, + NV2080_CTRL_CMD_CE_GET_CAPS, + &ceParams, + sizeof(ceParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "%s:%d: %s\n", __FUNCTION__, + __LINE__, nvstatusToString(status)); + goto done; + } + + ceCaps = cesCaps->copyEngineCaps + ceIndex; + setCeCaps(rmCeCaps, ceCaps); + + pceMaskParams.ceEngineType = NV2080_ENGINE_TYPE_COPY(ceIndex); + pceMaskParams.pceMask = 0; + status = pRmApi->Control(pRmApi, + gpu->session->handle, + gpu->subhandle, + NV2080_CTRL_CMD_CE_GET_CE_PCE_MASK, + &pceMaskParams, + sizeof(pceMaskParams)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "%s:%d: %s\n", __FUNCTION__, + __LINE__, nvstatusToString(status)); + goto done; + } + ceCaps->cePceMask = pceMaskParams.pceMask; + + ceCaps->supported = NV_TRUE; + } + +done: + portMemFree(engineList); + return status; +} + +static NvBool isClassHost(NvU32 class) +{ + NvBool bHostClass = NV_FALSE; + CLI_CHANNEL_CLASS_INFO classInfo; + CliGetChannelClassInfo(class, &classInfo); + bHostClass = (classInfo.classType == CHANNEL_CLASS_TYPE_GPFIFO); + return bHostClass; +} + +static NvBool isClassCE(NvU32 class) +{ + switch (class) + { + case MAXWELL_DMA_COPY_A: + case PASCAL_DMA_COPY_A: + case PASCAL_DMA_COPY_B: + case VOLTA_DMA_COPY_A: + case TURING_DMA_COPY_A: + case AMPERE_DMA_COPY_A: + case AMPERE_DMA_COPY_B: + return NV_TRUE; + + default: + return NV_FALSE; + } +} + +static NvBool isClassSec2(NvU32 class) +{ + switch (class) + { + + default: + return NV_FALSE; + } +} + +static NvBool isClassCompute(NvU32 class) +{ + switch (class) + { + case MAXWELL_COMPUTE_A: + case MAXWELL_COMPUTE_B: + case PASCAL_COMPUTE_A: + case PASCAL_COMPUTE_B: + case VOLTA_COMPUTE_A: + case VOLTA_COMPUTE_B: + case TURING_COMPUTE_A: + case AMPERE_COMPUTE_A: + case AMPERE_COMPUTE_B: + return NV_TRUE; + + default: + return NV_FALSE; + } +} + +static NvBool isClassFaultBuffer(NvU32 class) +{ + switch (class) + { + case MAXWELL_FAULT_BUFFER_A: + case MMU_FAULT_BUFFER: + return NV_TRUE; + + default: + return NV_FALSE; + } +} + +static NvBool isClassAccessCounterBuffer(NvU32 class) +{ + switch (class) + { + case ACCESS_COUNTER_NOTIFY_BUFFER: + return NV_TRUE; + + default: + return NV_FALSE; + } +} + +static NV_STATUS findDeviceClasses(NvHandle hRoot, + NvHandle hDevice, + NvHandle hSubdevice, + NvU32 *hostClass, + NvU32 *ceClass, + NvU32 *computeClass, + NvU32 *faultBufferClass, + NvU32 *accessCounterBufferClass, + NvU32 *sec2Class) +{ + NvU32 *classList; + NV_STATUS status = NV_OK; + NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS classParams = {0}; + NvU32 i = 0; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + *hostClass = 0; + *ceClass = 0; + *computeClass = 0; + *faultBufferClass = 0; + *accessCounterBufferClass = 0; + + status = pRmApi->Control(pRmApi, + hRoot, + hDevice, + NV0080_CTRL_CMD_GPU_GET_CLASSLIST, + &classParams, + sizeof(classParams)); + if (status != NV_OK) + return status; + + classList = portMemAllocNonPaged( + (sizeof(NvU32) * classParams.numClasses)); + + if (classList == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + classParams.classList = NV_PTR_TO_NvP64(classList); + status = pRmApi->Control(pRmApi, + hRoot, + hDevice, + NV0080_CTRL_CMD_GPU_GET_CLASSLIST, + &classParams, + sizeof(classParams)); + + if (status != NV_OK) + goto Cleanup_classlist; + + for (i = 0; i < classParams.numClasses; i++) + { + if (classList[i] == PHYSICAL_CHANNEL_GPFIFO) + continue; + if (isClassHost(classList[i])) + *hostClass = NV_MAX(*hostClass, classList[i]); + else if (isClassCE(classList[i])) + *ceClass = NV_MAX(*ceClass, classList[i]); + else if (isClassCompute(classList[i])) + *computeClass = NV_MAX(*computeClass, classList[i]); + else if (isClassFaultBuffer(classList[i])) + *faultBufferClass = NV_MAX(*faultBufferClass, classList[i]); + else if (isClassAccessCounterBuffer(classList[i])) + { + NV_ASSERT(accessCounterBufferClass); + *accessCounterBufferClass = NV_MAX(*accessCounterBufferClass, classList[i]); + } + else if (isClassSec2(classList[i])) + *sec2Class = NV_MAX(*sec2Class, classList[i]); + } + +Cleanup_classlist: + portMemFree(classList); + return status; +} + +NV_STATUS nvGpuOpsGetClientInfoFromPid(unsigned pid, + const NvU8 *gpuUuid, + NvHandle *hClient, + NvHandle *hDevice, + NvHandle *hSubDevice) +{ + NV0000_CTRL_GPU_GET_UUID_INFO_PARAMS gpuIdInfoParams = {{0}}; + unsigned gpuId = 0; + NvHandle hPidClient = 0; + NvHandle hPidDevice = 0; + NvHandle hPidVaSpace = 0; + NvHandle hPidSubDevice = 0; + NvHandle clientHandle = 0; + NV_STATUS status; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_NONE, NV01_NULL_OBJECT, NULL, &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = nvGpuOpsCreateClient(pRmApi, &clientHandle); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + // find the gpuId from the given uuid + portMemCopy(&gpuIdInfoParams.gpuUuid, NV_GPU_UUID_LEN, gpuUuid, NV_GPU_UUID_LEN); + gpuIdInfoParams.flags = NV0000_CTRL_CMD_GPU_GET_UUID_INFO_FLAGS_FORMAT_BINARY; + status = pRmApi->Control(pRmApi, + clientHandle, + clientHandle, + NV0000_CTRL_CMD_GPU_GET_UUID_INFO, + &gpuIdInfoParams, + sizeof(gpuIdInfoParams)); + if (status != NV_OK) + goto cleanup; + + gpuId = gpuIdInfoParams.gpuId; + + status = findVaspaceFromPid(pid, gpuId, &hPidClient, + &hPidDevice, &hPidSubDevice, &hPidVaSpace); + + // free the session we just created + pRmApi->Free(pRmApi, clientHandle, clientHandle); + if (status != NV_OK) + goto cleanup; + + *hClient = hPidClient; + *hDevice = hPidDevice; + *hSubDevice = hPidSubDevice; + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_OK; + +cleanup: + *hClient = 0; + *hDevice = 0; + *hSubDevice = 0; + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +NV_STATUS nvGpuOpsSetPageDirectory(struct gpuAddressSpace *vaSpace, + NvU64 physAddress, + unsigned numEntries, + NvBool bVidMemAperture, NvU32 pasid) +{ + NV_STATUS status; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS params = {0}; + OBJGPU *pGpu = NULL; + OBJVASPACE *pVAS = NULL; + RsClient *pClient; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (!vaSpace || !numEntries) + return NV_ERR_INVALID_ARGUMENT; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + vaSpace->device->session->handle, + &pClient, + &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = CliSetGpuContext(vaSpace->device->session->handle, + vaSpace->device->handle, + &pGpu, + NULL); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = vaspaceGetByHandleOrDeviceDefault(pClient, + vaSpace->device->handle, + vaSpace->handle, + &pVAS); + if ((status != NV_OK) || (pVAS == NULL)) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_INVALID_ARGUMENT; + } + + if (vaspaceIsExternallyOwned(pVAS)) + { + // make sure there is no PDB set if already externally owned + if ((NULL != vaspaceGetPageDirBase(pVAS, pGpu))) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_NOT_SUPPORTED; + } + + // Stop all channels under the VAS + status = nvGpuOpsDisableVaSpaceChannels(vaSpace); + if (status != NV_OK) + { + // + // If stopping any channels failed, reenable the channels which were + // able to be stopped before bailing + // + nvGpuOpsEnableVaSpaceChannels(vaSpace); + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + } + + params.physAddress = physAddress; + params.numEntries = numEntries; + params.hVASpace = vaSpace->handle; + params.flags = bVidMemAperture ? + DRF_DEF(0080, _CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS, _APERTURE, _VIDMEM) : + DRF_DEF(0080, _CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS, _APERTURE, _SYSMEM_COH); + params.flags |= DRF_DEF(0080, _CTRL_DMA_SET_PAGE_DIRECTORY_FLAGS, + _ALL_CHANNELS, _TRUE); + params.pasid = pasid; + + // Always do Unicast by passing non-zero subDeviceId! + params.subDeviceId = vaSpace->device->subdeviceInstance + 1; + + status = pRmApi->Control(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->handle, + NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY, + ¶ms, + sizeof(params)); + + if (vaspaceIsExternallyOwned(pVAS)) + { + // Reschedule all channels in this VAS + nvGpuOpsEnableVaSpaceChannels(vaSpace); + } + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +NV_STATUS nvGpuOpsUnsetPageDirectory(struct gpuAddressSpace *vaSpace) +{ + NV_STATUS status; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS params = {0}; + OBJGPU *pGpu = NULL; + OBJVASPACE *pVAS = NULL; + RsClient *pClient; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (!vaSpace) + return NV_ERR_INVALID_ARGUMENT; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + vaSpace->device->session->handle, + &pClient, + &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = CliSetGpuContext(vaSpace->device->session->handle, + vaSpace->device->handle, + &pGpu, + NULL); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = vaspaceGetByHandleOrDeviceDefault(pClient, + vaSpace->device->handle, + vaSpace->handle, + &pVAS); + if ((status != NV_OK) || (pVAS == NULL)) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_INVALID_ARGUMENT; + } + + if (vaspaceIsExternallyOwned(pVAS)) + { + // Stop all channels under the VAS + status = nvGpuOpsDisableVaSpaceChannels(vaSpace); + if (status != NV_OK) + { + // + // If stopping any channels failed, reenable the channels which were + // able to be stopped before bailing + // + nvGpuOpsEnableVaSpaceChannels(vaSpace); + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + } + + params.hVASpace = vaSpace->handle; + + // Always do Unicast by passing non-zero subDeviceId! + params.subDeviceId = vaSpace->device->subdeviceInstance + 1; + + status = pRmApi->Control(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->handle, + NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY, + ¶ms, + sizeof(params)); + + if (vaspaceIsExternallyOwned(pVAS)) + { + // Reschedule all channels in this VAS + nvGpuOpsEnableVaSpaceChannels(vaSpace); + } + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +NV_STATUS nvGpuOpsGetGmmuFmt(struct gpuAddressSpace *vaSpace, void **pFmt) +{ + NV_STATUS status = NV_OK; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + NV90F1_CTRL_VASPACE_GET_GMMU_FORMAT_PARAMS params = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (!vaSpace || !pFmt) + return NV_ERR_INVALID_ARGUMENT; + + if (!vaSpace->handle) + return NV_ERR_INVALID_OBJECT_HANDLE; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + vaSpace->device->session->handle, + NULL, + &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + *pFmt = NULL; + params.hSubDevice = vaSpace->device->subhandle; + + status = pRmApi->Control(pRmApi, + vaSpace->device->session->handle, + vaSpace->handle, + NV90F1_CTRL_CMD_VASPACE_GET_GMMU_FORMAT, + ¶ms, + sizeof(params)); + if (status == NV_OK) + *pFmt = (void *)params.pFmt; + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +NV_STATUS nvGpuOpsInvalidateTlb(struct gpuAddressSpace *vaSpace) +{ + NV2080_CTRL_DMA_INVALIDATE_TLB_PARAMS params = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + if (!vaSpace) + return NV_ERR_INVALID_ARGUMENT; + + params.hVASpace = vaSpace->handle; + return pRmApi->Control(pRmApi, + vaSpace->device->session->handle, + vaSpace->device->subhandle, + NV2080_CTRL_CMD_DMA_INVALIDATE_TLB, + ¶ms, + sizeof(params)); +} + +NV_STATUS nvGpuOpsGetFbInfo(struct gpuDevice *device, gpuFbInfo *fbInfo) +{ + NV_STATUS status; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + + if (!device || !fbInfo) + return NV_ERR_INVALID_ARGUMENT; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + device->session->handle, + NULL, + &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + portMemCopy(fbInfo, sizeof(*fbInfo), &device->fbInfo, sizeof(*fbInfo)); + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_OK; +} + +NV_STATUS nvGpuOpsGetEccInfo(struct gpuDevice *device, gpuEccInfo *eccInfo) +{ + subDeviceDesc *rmSubDevice; + + if (!device || !eccInfo) + return NV_ERR_INVALID_ARGUMENT; + + rmSubDevice = device->rmSubDevice; + + if (!rmSubDevice->bEccInitialized) + return NV_ERR_NOT_SUPPORTED; + + eccInfo->eccMask = rmSubDevice->eccMask; + eccInfo->eccOffset = rmSubDevice->eccOffset; + eccInfo->eccReadLocation = rmSubDevice->eccReadLocation; + eccInfo->bEccEnabled = rmSubDevice->bEccEnabled; + eccInfo->eccErrorNotifier = &rmSubDevice->eccErrorNotifier; + + return NV_OK; +} + +// +// Do not acquire the GPU locks as all nvGpuOpsFreeDupedHandle() does is +// call pRmApi->Free(pRmApi, ) that drops the GPU locks if acquired (and +// re-acquires it later). +// +NV_STATUS nvGpuOpsFreeDupedHandle(struct gpuDevice *device, + NvHandle hPhysHandle) +{ + NV_STATUS status = NV_OK; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + Memory *pMemory = NULL; + OBJGPU *pMappingGpu = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NvHandle hClient; + NvHandle hSubDevice; + + if (!device) + return NV_ERR_INVALID_ARGUMENT; + + hClient = device->session->handle; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquire(RMAPI_LOCK_FLAGS_READ, hClient, NULL, 0, 0, 0, &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = CliSetSubDeviceContext(device->session->handle, + device->subhandle, + &hSubDevice, + &pMappingGpu); + if (status != NV_OK) + goto out; + + status = nvGpuOpsGetMemoryByHandle(device->session->handle, + hPhysHandle, + &pMemory); + if (status != NV_OK) + goto out; + + if (memdescIsSysmem(pMemory->pMemDesc)) + { + // Release the mappings acquired in nvGpuOpsDupMemory(). + // + // TODO: Bug 1811060: Add native support for this use-case in RM API. + memdescUnmapIommu(pMemory->pMemDesc, pMappingGpu->busInfo.iovaspaceId); + } + +out: + pRmApi->Free(pRmApi, device->session->handle, hPhysHandle); + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +NV_STATUS nvGpuOpsInitFaultInfo(struct gpuDevice *device, + gpuFaultInfo *pFaultInfo) +{ + struct gpuSession *session = device->session; + NV_STATUS status = NV_OK; + NVB069_ALLOCATION_PARAMETERS faultBufferAllocParams = {0}; + NVB069_CTRL_FAULTBUFFER_GET_SIZE_PARAMS sizeParams = {0}; + NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS_PARAMS registermappingsParams = {0}; + void *bufferAddress; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + pFaultInfo->faultBufferHandle = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, + session->handle, + device->subhandle, + &pFaultInfo->faultBufferHandle, + device->faultBufferClass, + &faultBufferAllocParams); + if (status != NV_OK) + goto cleanup; + + // Get the Size of the fault buffer + status = pRmApi->Control(pRmApi, + session->handle, + pFaultInfo->faultBufferHandle, + NVB069_CTRL_CMD_FAULTBUFFER_GET_SIZE, + &sizeParams, + sizeof(sizeParams)); + if (status != NV_OK) + goto cleanup_fault_buffer; + + // Map the fault buffer pointer to CPU + status = pRmApi->MapToCpu(pRmApi, + session->handle, + device->subhandle, + pFaultInfo->faultBufferHandle, + 0, + pFaultInfo->replayable.bufferSize, + &bufferAddress, + 0); + if (status != NV_OK) + goto cleanup_fault_buffer; + + status = pRmApi->Control(pRmApi, + session->handle, + pFaultInfo->faultBufferHandle, + NVB069_CTRL_CMD_FAULTBUFFER_GET_REGISTER_MAPPINGS, + ®istermappingsParams, + sizeof(registermappingsParams)); + if (status != NV_OK) + goto cleanup_fault_buffer; + + if (isDeviceVoltaPlus(device)) + { + NVC369_CTRL_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF_PARAMS nonReplayableFaultsParams = {0}; + + status = pRmApi->Control(pRmApi, + session->handle, + pFaultInfo->faultBufferHandle, + NVC369_CTRL_CMD_MMU_FAULT_BUFFER_REGISTER_NON_REPLAY_BUF, + &nonReplayableFaultsParams, + sizeof(nonReplayableFaultsParams)); + if (status != NV_OK) + goto cleanup_fault_buffer; + + pFaultInfo->nonReplayable.shadowBufferAddress = (void *)NvP64_VALUE(nonReplayableFaultsParams.pShadowBuffer); + pFaultInfo->nonReplayable.shadowBufferContext = (void *)NvP64_VALUE(nonReplayableFaultsParams.pShadowBufferContext); + pFaultInfo->nonReplayable.bufferSize = nonReplayableFaultsParams.bufferSize; + } + + pFaultInfo->replayable.pFaultBufferGet = (NvU32*)(NvUPtr)registermappingsParams.pFaultBufferGet; + pFaultInfo->replayable.pFaultBufferPut = (NvU32*)(NvUPtr)registermappingsParams.pFaultBufferPut; + pFaultInfo->replayable.pFaultBufferInfo = (NvU32*)(NvUPtr)registermappingsParams.pFaultBufferInfo; + pFaultInfo->replayable.pPmcIntr = (NvU32*)(NvUPtr)registermappingsParams.pPmcIntr; + pFaultInfo->replayable.pPmcIntrEnSet = (NvU32*)(NvUPtr)registermappingsParams.pPmcIntrEnSet; + pFaultInfo->replayable.pPmcIntrEnClear = (NvU32*)(NvUPtr)registermappingsParams.pPmcIntrEnClear; + pFaultInfo->replayable.replayableFaultMask = registermappingsParams.replayableFaultMask; + pFaultInfo->replayable.pPrefetchCtrl = (NvU32*)(NvUPtr)registermappingsParams.pPrefetchCtrl; + pFaultInfo->replayable.bufferSize = sizeParams.faultBufferSize; + pFaultInfo->replayable.bufferAddress = bufferAddress; + + return NV_OK; + +cleanup_fault_buffer: + gpuDeviceUnmapCpuFreeHandle(device, + pFaultInfo->faultBufferHandle, + pFaultInfo->replayable.bufferAddress, + 0); +cleanup: + portMemSet(pFaultInfo, 0, sizeof(*pFaultInfo)); + return status; +} + +NV_STATUS nvGpuOpsInitAccessCntrInfo(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo) +{ + struct gpuSession *session = device->session; + NV_STATUS status = NV_OK; + NvU32 accessCntrBufferAllocParams = {0}; + NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_SIZE_PARAMS sizeParams = {0}; + NVC365_CTRL_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS_PARAMS registermappings; + void *bufferAddress; + NV0080_CTRL_BIF_GET_DMA_BASE_SYSMEM_ADDR_PARAMS getDmaBaseSysmemAddrParams = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + OBJGPU *pGpu = NULL; + + // TODO: Acquired because CliSetGpuContext expects RMAPI lock. Necessary? + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + status = CliSetGpuContext(session->handle, device->handle, &pGpu, NULL); + rmapiLockRelease(); + if (status != NV_OK) + return status; + + pAccessCntrInfo->accessCntrBufferHandle = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, + session->handle, + device->subhandle, + &pAccessCntrInfo->accessCntrBufferHandle, + device->accessCounterBufferClass, + &accessCntrBufferAllocParams); + + if (status != NV_OK) + goto cleanup; + + status = pRmApi->MapToCpu(pRmApi, session->handle, device->subhandle, pAccessCntrInfo->accessCntrBufferHandle, + 0, pAccessCntrInfo->bufferSize, &bufferAddress, 0); + + if (status != NV_OK) + goto cleanup_access_ctr_buffer; + + pAccessCntrInfo->bufferAddress = bufferAddress; + + status = pRmApi->Control(pRmApi, + session->handle, + pAccessCntrInfo->accessCntrBufferHandle, + NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_GET_SIZE, + &sizeParams, + sizeof(sizeParams)); + + if (status != NV_OK) + goto cleanup_access_ctr_buffer; + + pAccessCntrInfo->bufferSize = sizeParams.accessCntrBufferSize; + + status = pRmApi->Control(pRmApi, + session->handle, + pAccessCntrInfo->accessCntrBufferHandle, + NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_GET_REGISTER_MAPPINGS, + ®istermappings, + sizeof(registermappings)); + if (status != NV_OK) + goto cleanup_access_ctr_buffer; + + status = pRmApi->Control(pRmApi, + session->handle, + device->handle, + NV0080_CTRL_CMD_BIF_GET_DMA_BASE_SYSMEM_ADDR, + &getDmaBaseSysmemAddrParams, + sizeof(getDmaBaseSysmemAddrParams)); + if (status != NV_OK) + goto cleanup_access_ctr_buffer; + + pAccessCntrInfo->pAccessCntrBufferGet = (NvU32*)(NvUPtr)registermappings.pAccessCntrBufferGet; + pAccessCntrInfo->pAccessCntrBufferPut = (NvU32*)(NvUPtr)registermappings.pAccessCntrBufferPut; + pAccessCntrInfo->pAccessCntrBufferFull = (NvU32*)(NvUPtr)registermappings.pAccessCntrBufferFull; + pAccessCntrInfo->pHubIntr = (NvU32*)(NvUPtr)registermappings.pHubIntr; + pAccessCntrInfo->pHubIntrEnSet = (NvU32*)(NvUPtr)registermappings.pHubIntrEnSet; + pAccessCntrInfo->pHubIntrEnClear = (NvU32*)(NvUPtr)registermappings.pHubIntrEnClear; + pAccessCntrInfo->accessCounterMask = registermappings.accessCntrMask; + pAccessCntrInfo->baseDmaSysmemAddr = getDmaBaseSysmemAddrParams.baseDmaSysmemAddr; + + return NV_OK; + +cleanup_access_ctr_buffer: + gpuDeviceUnmapCpuFreeHandle(device, + pAccessCntrInfo->accessCntrBufferHandle, + pAccessCntrInfo->bufferAddress, + 0); +cleanup: + pAccessCntrInfo->accessCntrBufferHandle = 0; + pAccessCntrInfo->bufferAddress = 0; + return status; +} + +static NV_STATUS +getAccessCounterGranularityValue(UVM_ACCESS_COUNTER_GRANULARITY granularity, NvU32 *value) +{ + *value = 0; + + switch (granularity) + { + case UVM_ACCESS_COUNTER_GRANULARITY_64K: + *value = NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_64K; + break; + case UVM_ACCESS_COUNTER_GRANULARITY_2M: + *value = NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_2M; + break; + case UVM_ACCESS_COUNTER_GRANULARITY_16M: + *value = NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16M; + break; + case UVM_ACCESS_COUNTER_GRANULARITY_16G: + *value = NVC365_CTRL_ACCESS_COUNTER_GRANULARITY_16G; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + }; + + return NV_OK; +} + +static NV_STATUS +getAccessCounterLimitValue(UVM_ACCESS_COUNTER_USE_LIMIT limit, NvU32 *value) +{ + *value = 0; + + switch (limit) + { + case UVM_ACCESS_COUNTER_USE_LIMIT_NONE: + *value = NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_NONE; + break; + case UVM_ACCESS_COUNTER_USE_LIMIT_QTR: + *value = NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_QTR; + break; + case UVM_ACCESS_COUNTER_USE_LIMIT_HALF: + *value = NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_HALF; + break; + case UVM_ACCESS_COUNTER_USE_LIMIT_FULL: + *value = NVC365_CTRL_ACCESS_COUNTER_USE_LIMIT_FULL; + break; + default: + return NV_ERR_INVALID_ARGUMENT; + }; + + return NV_OK; +} + +NV_STATUS nvGpuOpsEnableAccessCntr(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo, + gpuAccessCntrConfig *pAccessCntrConfig) +{ + NV_STATUS status = NV_OK; + NVC365_CTRL_ACCESS_CNTR_SET_CONFIG_PARAMS setConfigParams = { 0 }; + NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS enableParams = { 0 }; + struct gpuSession *session = device->session; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + status = getAccessCounterGranularityValue(pAccessCntrConfig->mimcGranularity, &setConfigParams.mimcGranularity); + if (status != NV_OK) + return status; + + status = getAccessCounterGranularityValue(pAccessCntrConfig->momcGranularity, &setConfigParams.momcGranularity); + if (status != NV_OK) + return status; + + status = getAccessCounterLimitValue(pAccessCntrConfig->mimcUseLimit, &setConfigParams.mimcLimit); + if (status != NV_OK) + return status; + + status = getAccessCounterLimitValue(pAccessCntrConfig->momcUseLimit, &setConfigParams.momcLimit); + if (status != NV_OK) + return status; + + setConfigParams.threshold = pAccessCntrConfig->threshold; + setConfigParams.cmd = NVC365_CTRL_ACCESS_COUNTER_SET_MIMC_GRANULARITY | + NVC365_CTRL_ACCESS_COUNTER_SET_MOMC_GRANULARITY | + NVC365_CTRL_ACCESS_COUNTER_SET_MIMC_LIMIT | + NVC365_CTRL_ACCESS_COUNTER_SET_MOMC_LIMIT | + NVC365_CTRL_ACCESS_COUNTER_SET_THRESHOLD; + + status = pRmApi->Control(pRmApi, + session->handle, + pAccessCntrInfo->accessCntrBufferHandle, + NVC365_CTRL_CMD_ACCESS_CNTR_SET_CONFIG, + &setConfigParams, + sizeof(setConfigParams)); + if (status != NV_OK) + return status; + + enableParams.intrOwnership = NVC365_CTRL_ACCESS_COUNTER_INTERRUPT_OWNERSHIP_NOT_RM; + enableParams.enable = NV_TRUE; + + status = pRmApi->Control(pRmApi, + session->handle, + pAccessCntrInfo->accessCntrBufferHandle, + NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_ENABLE, + &enableParams, + sizeof(enableParams)); + return status; +} + +NV_STATUS nvGpuOpsDisableAccessCntr(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo) +{ + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + NVC365_CTRL_ACCESS_CNTR_BUFFER_ENABLE_PARAMS enableParams = { 0 }; + + enableParams.intrOwnership = NVC365_CTRL_ACCESS_COUNTER_INTERRUPT_OWNERSHIP_RM; + enableParams.enable = NV_FALSE; + return pRmApi->Control(pRmApi, + device->session->handle, + pAccessCntrInfo->accessCntrBufferHandle, + NVC365_CTRL_CMD_ACCESS_CNTR_BUFFER_ENABLE, + &enableParams, + sizeof(enableParams)); +} + +NV_STATUS nvGpuOpsDestroyAccessCntrInfo(struct gpuDevice *device, + gpuAccessCntrInfo *pAccessCntrInfo) +{ + gpuDeviceUnmapCpuFreeHandle(device, + pAccessCntrInfo->accessCntrBufferHandle, + pAccessCntrInfo->bufferAddress, + 0); + portMemSet(pAccessCntrInfo, 0, sizeof(gpuAccessCntrInfo)); + return NV_OK; +} + +NV_STATUS nvGpuOpsDestroyFaultInfo(struct gpuDevice *device, + gpuFaultInfo *pFaultInfo) +{ + NV_STATUS status = NV_OK; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + if (pFaultInfo->faultBufferHandle && isDeviceVoltaPlus(device)) + { + NVC369_CTRL_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF_PARAMS params = {0}; + + params.pShadowBuffer = NV_PTR_TO_NvP64(pFaultInfo->nonReplayable.shadowBufferAddress); + + status = pRmApi->Control(pRmApi, + device->session->handle, + pFaultInfo->faultBufferHandle, + NVC369_CTRL_CMD_MMU_FAULT_BUFFER_UNREGISTER_NON_REPLAY_BUF, + ¶ms, + sizeof(params)); + NV_ASSERT(status == NV_OK); + } + + gpuDeviceUnmapCpuFreeHandle(device, + pFaultInfo->faultBufferHandle, + pFaultInfo->replayable.bufferAddress, + 0); + + portMemSet(pFaultInfo, 0, sizeof(gpuFaultInfo)); + return status; +} + +NV_STATUS nvGpuOpsHasPendingNonReplayableFaults(gpuFaultInfo *pFaultInfo, + NvBool *hasPendingFaults) +{ + GMMU_SHADOW_FAULT_BUF *pQueue = + (GMMU_SHADOW_FAULT_BUF *) pFaultInfo->nonReplayable.shadowBufferAddress; + + if (!pQueue || !hasPendingFaults) + return NV_ERR_INVALID_ARGUMENT; + + *hasPendingFaults = !queueIsEmpty(pQueue); + + return NV_OK; +} + +NV_STATUS nvGpuOpsGetNonReplayableFaults(gpuFaultInfo *pFaultInfo, + void *faultBuffer, + NvU32 *numFaults) +{ + GMMU_SHADOW_FAULT_BUF *pQueue = + (GMMU_SHADOW_FAULT_BUF *) pFaultInfo->nonReplayable.shadowBufferAddress; + QueueContext *pQueueCtx = + (QueueContext *) pFaultInfo->nonReplayable.shadowBufferContext; + + if (!pQueue || !faultBuffer || !numFaults) + return NV_ERR_INVALID_ARGUMENT; + + *numFaults = 0; + + // Copy all faults in the client shadow fault buffer to the given buffer + while (queuePopAndCopyNonManaged(pQueue, pQueueCtx, faultBuffer)) + { + ++(*numFaults); + faultBuffer = (char *)faultBuffer + NVC369_BUF_SIZE; + } + + return NV_OK; +} + +static NV_STATUS nvGpuOpsVerifyChannel(struct gpuAddressSpace *vaSpace, + NvHandle hClient, + NvHandle hKernelChannel, + OBJGPU **pGpu, + KernelChannel **ppKernelChannel) +{ + NV_STATUS status = NV_OK; + NvHandle hDevice, hSubDevice; + OBJVASPACE *pVAS = NULL; + OBJGPU *pVaSpaceGpu; + RsClient *pClient; + + NV_ASSERT_OR_RETURN(ppKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + status = serverGetClientUnderLock(&g_resServ, vaSpace->device->session->handle, &pClient); + if (status != NV_OK) + return status; + + status = vaspaceGetByHandleOrDeviceDefault(pClient, + vaSpace->device->handle, + vaSpace->handle, + &pVAS); + if (status != NV_OK) + return status; + + status = CliGetKernelChannel(hClient, hKernelChannel, ppKernelChannel); + if (status != NV_OK) + return NV_ERR_INVALID_OBJECT_HANDLE; + + hDevice = RES_GET_HANDLE(GPU_RES_GET_DEVICE(*ppKernelChannel)); + status = CliSetGpuContext(hClient, hDevice, pGpu, NULL); + if (status != NV_OK) + return status; + + if ((*ppKernelChannel)->pVAS != pVAS) + { + if (CliSetGpuContext(vaSpace->device->session->handle, + vaSpace->device->handle, + &pVaSpaceGpu, + NULL) == NV_OK && pVaSpaceGpu != *pGpu) + return NV_ERR_OTHER_DEVICE_FOUND; + + return NV_ERR_INVALID_CHANNEL; + } + + // In SLI config, RM's internal allocations such as channel instance + // are tracked with a memdesc per subdevice. Hence, Get the correct pGpu. + status = CliSetSubDeviceContext(vaSpace->device->session->handle, + vaSpace->device->subhandle, + &hSubDevice, + pGpu); + if (status != NV_OK) + return status; + + return NV_OK; +} + +static NV_STATUS nvGpuOpsGetChannelEngineType(OBJGPU *pGpu, + KernelChannel *pKernelChannel, + UVM_GPU_CHANNEL_ENGINE_TYPE *engineType) +{ + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvU32 engDesc, engineType2080; + NV_STATUS status; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + status = kchannelGetEngine_HAL(pGpu, pKernelChannel, &engDesc); + if (status != NV_OK) + return status; + + status = kfifoEngineInfoXlate_HAL(pGpu, + pKernelFifo, + ENGINE_INFO_TYPE_ENG_DESC, + engDesc, + ENGINE_INFO_TYPE_NV2080, + &engineType2080); + if (status != NV_OK) + return status; + + if (NV2080_ENGINE_TYPE_IS_GR(engineType2080)) + *engineType = UVM_GPU_CHANNEL_ENGINE_TYPE_GR; + else if (engineType2080 == NV2080_ENGINE_TYPE_SEC2) + *engineType = UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2; + else + *engineType = UVM_GPU_CHANNEL_ENGINE_TYPE_CE; + + return NV_OK; +} + +static void _memdescRetain(MEMORY_DESCRIPTOR *pMemDesc) +{ + if (pMemDesc->Allocated > 0) + { + pMemDesc->Allocated++; + } + + memdescAddRef(pMemDesc); +} + +static NV_STATUS nvGpuOpsGetChannelInstanceMemInfo(gpuRetainedChannel *retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo) +{ + PMEMORY_DESCRIPTOR pMemDesc = NULL; + NV2080_CTRL_FIFO_MEM_INFO instanceMemInfo; + NV_STATUS status; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(retainedChannel->pGpu); + KernelChannel *pKernelChannel = NULL; + CHID_MGR *pChidMgr = kfifoGetChidMgr(retainedChannel->pGpu, + pKernelFifo, + retainedChannel->runlistId); + + pKernelChannel = kfifoChidMgrGetKernelChannel(retainedChannel->pGpu, + pKernelFifo, + pChidMgr, + channelInstanceInfo->chId); + NV_CHECK_OR_RETURN(LEVEL_ERROR, pKernelChannel != NULL, NV_ERR_INVALID_CHANNEL); + + status = kfifoChannelGetFifoContextMemDesc_HAL(retainedChannel->pGpu, + pKernelFifo, + pKernelChannel, + FIFO_CTX_INST_BLOCK, + &pMemDesc); + if (status != NV_OK) + return status; + + pMemDesc = memdescGetMemDescFromGpu(pMemDesc, retainedChannel->pGpu); + + kfifoFillMemInfo(pKernelFifo, pMemDesc, &instanceMemInfo); + + if (instanceMemInfo.aperture == NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_INVALID) + return NV_ERR_INVALID_OBJECT_HANDLE; + + retainedChannel->instanceMemDesc = pMemDesc; + channelInstanceInfo->base = instanceMemInfo.base; + channelInstanceInfo->sysmem = (instanceMemInfo.aperture != NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_VIDMEM); + + return NV_OK; +} + +static NV_STATUS nvGpuOpsGetChannelTsgInfo(gpuRetainedChannel *retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo, + KernelChannel *pKernelChannel) +{ + OBJGPU *pGpu = retainedChannel->pGpu; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + KernelChannelGroup *pKernelChannelGroup; + NvHandle hDupTsg; + NvU32 tsgMaxSubctxCount; + NV_STATUS status; + NvBool bLockAcquire = NV_FALSE; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + pKernelChannelGroup = pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup; + NV_ASSERT_OR_RETURN(pKernelChannelGroup != NULL, NV_ERR_INVALID_STATE); + + if (rmGpuLockIsOwner()) + { + rmGpuLocksRelease(GPUS_LOCK_FLAGS_NONE, NULL); + bLockAcquire = NV_TRUE; + } + // Take a reference on the TSG ID by duping the TSG. Note that this is + // the easy way out because we dup more than strictly necessary: every + // channel registered under the same TSG will re-dup that TSG. In + // practice there's very little overhead to re-duping the TSG for each + // channel. + hDupTsg = NV01_NULL_OBJECT; + status = pRmApi->DupObject(pRmApi, + retainedChannel->session->handle, + retainedChannel->rmDevice->deviceHandle, + &hDupTsg, + RES_GET_CLIENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel->pKernelChannelGroupApi), + NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE); + if (status != NV_OK) + { + if (bLockAcquire) + { + NV_ASSERT_OK_OR_RETURN(rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_GPU_OPS)); + } + return status; + } + + if (bLockAcquire) + { + if ((status = rmGpuLocksAcquire(GPUS_LOCK_FLAGS_NONE, RM_LOCK_MODULES_GPU_OPS)) != NV_OK) + { + pRmApi->Free(pRmApi, retainedChannel->session->handle, hDupTsg); + return status; + } + } + + tsgMaxSubctxCount = kfifoChannelGroupGetLocalMaxSubcontext_HAL( + pGpu, pKernelFifo, + pKernelChannelGroup, + pKernelChannelGroup->bLegacyMode); + + channelInstanceInfo->bTsgChannel = NV_TRUE; + channelInstanceInfo->tsgId = pKernelChannelGroup->grpID; + channelInstanceInfo->tsgMaxSubctxCount = tsgMaxSubctxCount; + + retainedChannel->hDupTsg = hDupTsg; + + return NV_OK; +} + + +static NV_STATUS nvGpuOpsGetChannelSmcInfo(gpuRetainedChannel *retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo, + KernelChannel *pKernelChannel, + struct gpuDevice *device) +{ + channelInstanceInfo->smcEngineId = 0; + channelInstanceInfo->smcEngineVeIdOffset = 0; + + NV_ASSERT_OR_RETURN(pKernelChannel != NULL, NV_ERR_INVALID_ARGUMENT); + + if (isDeviceAmperePlus(device) && retainedChannel->channelEngineType == UVM_GPU_CHANNEL_ENGINE_TYPE_GR) + { + OBJGPU *pGpu = retainedChannel->pGpu; + + if (IS_MIG_IN_USE(pGpu)) + { + NvU32 grFaultId; + NvU32 grMmuFaultEngId; + + const NvU32 grIdx = NV2080_ENGINE_TYPE_GR_IDX(kchannelGetEngineType(pKernelChannel)); + + NV_ASSERT_OK_OR_RETURN(kfifoEngineInfoXlate_HAL(pGpu, + GPU_GET_KERNEL_FIFO(pGpu), + ENGINE_INFO_TYPE_ENG_DESC, + ENG_GR(grIdx), + ENGINE_INFO_TYPE_MMU_FAULT_ID, + &grFaultId)); + + grMmuFaultEngId = kgmmuGetGraphicsEngineId_HAL(GPU_GET_KERNEL_GMMU(pGpu)); + NV_ASSERT(grFaultId >= grMmuFaultEngId); + + channelInstanceInfo->smcEngineId = grIdx; + channelInstanceInfo->smcEngineVeIdOffset = grFaultId - grMmuFaultEngId; + } + } + + return NV_OK; +} + + +static void nvGpuOpsGetChannelSubctxInfo(gpuRetainedChannel *retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo, + KernelChannel *pKernelChannel) +{ + OBJGPU *pGpu = retainedChannel->pGpu; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(pGpu); + NvHandle hDupKernelCtxShare = NV01_NULL_OBJECT; + RM_API *pRmApi; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN_VOID(pKernelChannel != NULL); + + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // Subcontexts are parented by the TSG, so we must have a reference on the + // TSG in order to retain the subcontext. The exception is if this channel + // was allocated without a TSG, in which case RM creates an internal TSG and + // subcontext which we shouldn't attempt to retain. In that case, we will + // have skipped duping the TSG earlier and hDupTsg == 0. + // + // pKernelChannelGroup->bLegacyMode means that the subcontext was + // created internally by RM, not by the user. + if (kfifoIsSubcontextSupported(pKernelFifo) && + pKernelChannel->pKernelCtxShareApi && + retainedChannel->channelEngineType == UVM_GPU_CHANNEL_ENGINE_TYPE_GR && + retainedChannel->hDupTsg && + !pKernelChannel->pKernelChannelGroupApi->pKernelChannelGroup->bLegacyMode) + { + + status = pRmApi->DupObject(pRmApi, + retainedChannel->session->handle, + retainedChannel->hDupTsg, + &hDupKernelCtxShare, + RES_GET_CLIENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel->pKernelCtxShareApi), + NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE); + + NV_ASSERT(status == NV_OK); + retainedChannel->hDupKernelCtxShare = hDupKernelCtxShare; + + // Faults report the VEID (aka subcontext ID), so we need to retain the + // subcontext ID. We do that by taking a reference on the entire + // subcontext object. + // + // pKernelCtxShare->pShareData is a pointer to the broadcast kctxshare data object + // We get VEID for this retained channel's GPU through that. + + // Possibly better to go through the handle qw just duped for this? Nor sure how to do so. + channelInstanceInfo->subctxId = pKernelChannel->pKernelCtxShareApi->pShareData->subctxId; + channelInstanceInfo->bInSubctx = NV_TRUE; + + // Make sure that we saw our GPU + NV_ASSERT(channelInstanceInfo->bInSubctx); + NV_ASSERT(channelInstanceInfo->subctxId < channelInstanceInfo->tsgMaxSubctxCount); + } + else + { + channelInstanceInfo->subctxId = 0; + channelInstanceInfo->bInSubctx = NV_FALSE; + } +} + +// This function verifies that the instance pointer of the retainedChannel still +// refers to a valid channel. +static NV_STATUS nvGpuOpsGetChannelData(gpuRetainedChannel *retainedChannel, + KernelChannel **ppKernelChannel) +{ + NV2080_CTRL_FIFO_MEM_INFO instanceMemInfo; + INST_BLOCK_DESC inst; + KernelFifo *pKernelFifo = GPU_GET_KERNEL_FIFO(retainedChannel->pGpu); + + kfifoFillMemInfo(pKernelFifo, retainedChannel->instanceMemDesc, &instanceMemInfo); + + switch (instanceMemInfo.aperture) + { + case NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_COH: + inst.aperture = NV_MMU_PTE_APERTURE_SYSTEM_COHERENT_MEMORY; + break; + case NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_SYSMEM_NCOH: + inst.aperture = NV_MMU_PTE_APERTURE_SYSTEM_NON_COHERENT_MEMORY; + break; + case NV2080_CTRL_CMD_FIFO_GET_CHANNEL_MEM_APERTURE_VIDMEM: + inst.aperture = NV_MMU_PTE_APERTURE_VIDEO_MEMORY; + break; + default: + return NV_ERR_INVALID_CHANNEL; + } + + inst.address = instanceMemInfo.base; + inst.gfid = GPU_GFID_PF; // Run in VF context w/o GFID + + return kfifoConvertInstToKernelChannel_HAL(retainedChannel->pGpu, + pKernelFifo, + &inst, + ppKernelChannel); +} + +NV_STATUS nvGpuOpsRetainChannel(struct gpuAddressSpace *vaSpace, + NvHandle hClient, + NvHandle hKernelChannel, + gpuRetainedChannel **retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo) +{ + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + KernelChannel *pKernelChannel = NULL; + OBJGPU *pGpu = NULL; + gpuRetainedChannel *channel = NULL; + NV_STATUS status = NV_OK; + struct gpuDevice *device; + subDeviceDesc *rmSubDevice; + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN_PARAMS params = {0}; + NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS channelRetainerParams = {0}; + RM_API *pRmApi = NULL; + NvHandle hChannelParent = 0; + + if (!vaSpace || !channelInstanceInfo) + return NV_ERR_INVALID_ARGUMENT; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + hClient, + NULL, + &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + device = vaSpace->device; + rmSubDevice = device->rmSubDevice; + + status = nvGpuOpsVerifyChannel(vaSpace, hClient, hKernelChannel, &pGpu, + &pKernelChannel); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + portMemSet(channelInstanceInfo, 0, sizeof(*channelInstanceInfo)); + + channel = portMemAllocNonPaged(sizeof(*channel)); + if (channel == NULL) + { + status = NV_ERR_NO_MEMORY; + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + portMemSet(channel, 0, sizeof(*channel)); + channel->device = device; + channel->rmDevice = device->rmDevice; + channel->rmSubDevice = rmSubDevice; + channel->session = device->session; + channel->pGpu = pGpu; + + channelInstanceInfo->runlistId = kchannelGetRunlistId(pKernelChannel); + channelInstanceInfo->chId = pKernelChannel->ChID; + channel->chId = pKernelChannel->ChID; + channel->runlistId = kchannelGetRunlistId(pKernelChannel); + + status = nvGpuOpsGetChannelEngineType(pGpu, pKernelChannel, &channel->channelEngineType); + if (status != NV_OK) + goto error; + + status = nvGpuOpsGetChannelInstanceMemInfo(channel, channelInstanceInfo); + if (status != NV_OK) + goto error; + + status = nvGpuOpsGetChannelTsgInfo(channel, channelInstanceInfo, + pKernelChannel); + if (status != NV_OK) + goto error; + + status = nvGpuOpsGetChannelSmcInfo(channel, channelInstanceInfo, + pKernelChannel, device); + if (status != NV_OK) + goto error; + + nvGpuOpsGetChannelSubctxInfo(channel, channelInstanceInfo, pKernelChannel); + + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (channelInstanceInfo->bTsgChannel) + hChannelParent = channel->hDupTsg; + else + hChannelParent = channel->rmDevice->deviceHandle; + + channelRetainerParams.hClient = hClient; + channelRetainerParams.hChannel = hKernelChannel; + + NV_PRINTF(LEVEL_INFO, "%s:Channel duping is not supported. Fall back to UVM_CHANNEL_RETAINER\n", + __FUNCTION__); + + status = pRmApi->Alloc(pRmApi, + device->session->handle, + hChannelParent, + &channel->hChannelRetainer, + UVM_CHANNEL_RETAINER, + &channelRetainerParams); + if (status != NV_OK) + goto error; + + // Now get the token for submission on given channel. + status = pRmApi->Control(pRmApi, + hClient, + hKernelChannel, + NVC36F_CTRL_CMD_GPFIFO_GET_WORK_SUBMIT_TOKEN, + ¶ms, + sizeof(params)); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "%s:%d: %s\n", __FUNCTION__, + __LINE__, nvstatusToString(status)); + goto error; + } + + channelInstanceInfo->workSubmissionToken = params.workSubmitToken; + channelInstanceInfo->workSubmissionOffset = + (NvU32 *)((NvU8*)rmSubDevice->clientRegionMapping + NVC361_NOTIFY_CHANNEL_PENDING); + + // In Turing+ gpus, the CLEAR_FAULTED method requires a RM-provided handle + // to identify the channel. + // + // TODO: Bug 1905719: We are currently using the channel handle that is + // used for the doorbell mechanism. However, the values may differ in the + // future, so we may need a dedicated API to get the channel handle for + // CLEAR_FAULTED in RM. + channelInstanceInfo->clearFaultedToken = channelInstanceInfo->workSubmissionToken; + + if (isDeviceAmperePlus(device)) + { + void *bar0Mapping = gpuBar0BaseAddress(pGpu); + NvU32 chramPri; + + NV_ASSERT_OK_OR_GOTO(status, kfifoEngineInfoXlate_HAL(pGpu, + GPU_GET_KERNEL_FIFO(pGpu), + ENGINE_INFO_TYPE_RUNLIST, + kchannelGetRunlistId(pKernelChannel), + ENGINE_INFO_TYPE_CHRAM_PRI_BASE, + &chramPri), error); + + chramPri += NV_CHRAM_CHANNEL(pKernelChannel->ChID); + + channelInstanceInfo->pChramChannelRegister = (NvU32 *)((NvU8*)bar0Mapping + chramPri); + } + + status = _nvGpuOpsRetainChannelResources(device, + hClient, + hKernelChannel, + channel, + channelInstanceInfo); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "%s:%d: %s\n", __FUNCTION__, + __LINE__, nvstatusToString(status)); + goto error; + } + + channelInstanceInfo->channelEngineType = channel->channelEngineType; + *retainedChannel = channel; + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_OK; + +error: + _nvGpuOpsReleaseChannel(channel); + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +static void _nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel) +{ + NV_STATUS status = NV_OK; + struct gpuSession *session; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + if (!retainedChannel) + return; + + _nvGpuOpsReleaseChannelResources(retainedChannel); + + session = retainedChannel->session; + NV_ASSERT(session); + + if (retainedChannel->hChannelRetainer) + { + status = pRmApi->Free(pRmApi, session->handle, retainedChannel->hChannelRetainer); + NV_ASSERT(status == NV_OK); + } + + // Release the subcontext if we retained it. Subcontexts are parented by the + // TSG, so we must release the subcontext before releasing the TSG. + if (retainedChannel->hDupKernelCtxShare) + { + NV_ASSERT(retainedChannel->hDupTsg); + status = pRmApi->Free(pRmApi, session->handle, retainedChannel->hDupKernelCtxShare); + NV_ASSERT(status == NV_OK); + } + + if (retainedChannel->hDupTsg) + { + status = pRmApi->Free(pRmApi, session->handle, retainedChannel->hDupTsg); + NV_ASSERT(status == NV_OK); + } + + + // Releasing the channel ID can only fail if the ID is no longer valid, + // which indicates a bug elsewhere. + NV_ASSERT(status == NV_OK); + + portMemFree(retainedChannel); +} + +void nvGpuOpsReleaseChannel(gpuRetainedChannel *retainedChannel) +{ + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + // TODO can we lock fewer GPUS with Channel information? + if (_nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + retainedChannel->session->handle, + NULL, + &acquiredLocks) != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return; + } + _nvGpuOpsReleaseChannel(retainedChannel); + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); +} + +static void +_shadowMemdescDestroy(gpuRetainedChannel *retainedChannel, + MEMORY_DESCRIPTOR *pMemDesc) +{ + if (pMemDesc->RefCount == 1) + { + mapRemoveByKey(&retainedChannel->device->kern2PhysDescrMap, (NvU64) pMemDesc); + } + + memdescDestroy(pMemDesc); +} + +NvBool _memDescFindAndRetain(gpuRetainedChannel *retainedChannel, + MEMORY_DESCRIPTOR *pBufferHandle, + MEMORY_DESCRIPTOR **ppMemDesc) +{ + MEMORY_DESCRIPTOR *pMemDesc = NULL; + MemdescMapIter iter = mapIterAll(&retainedChannel->device->kern2PhysDescrMap); + while (mapIterNext(&iter)) + { + MEMORY_DESCRIPTOR **ppValue = iter.pValue; + if (pBufferHandle == *ppValue) + { + NvU64 key = mapKey(&retainedChannel->device->kern2PhysDescrMap, ppValue); + pMemDesc = (MEMORY_DESCRIPTOR *) key; + break; + } + } + + if (pMemDesc != NULL) + { + _memdescRetain(pMemDesc); + *ppMemDesc = pMemDesc; + return NV_TRUE; + } + return NV_FALSE; +} + +static NV_STATUS +_shadowMemdescCreateFlcn(gpuRetainedChannel *retainedChannel, + NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS *pCtxBufferInfo, + MEMORY_DESCRIPTOR **ppMemDesc) +{ + MEMORY_DESCRIPTOR *pMemDesc = NULL; + MEMORY_DESCRIPTOR *pBufferHandle = (MEMORY_DESCRIPTOR *) pCtxBufferInfo->bufferHandle; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pCtxBufferInfo->bIsContigous, NV_ERR_INVALID_STATE); + + if (_memDescFindAndRetain(retainedChannel, pBufferHandle, ppMemDesc)) + return status; + + memdescCreate(&pMemDesc, + retainedChannel->pGpu, + pCtxBufferInfo->size, + pCtxBufferInfo->alignment, + pCtxBufferInfo->bIsContigous, + pCtxBufferInfo->aperture, + NV_MEMORY_CACHED, + MEMDESC_FLAGS_NONE + ); + + memdescSetPageSize(pMemDesc, 0, pCtxBufferInfo->pageSize); + + memdescDescribe(pMemDesc, pCtxBufferInfo->aperture, pCtxBufferInfo->physAddr, pCtxBufferInfo->size); + + (void) mapInsertValue(&retainedChannel->device->kern2PhysDescrMap, + (NvU64) pMemDesc, + &pBufferHandle); + *ppMemDesc = pMemDesc; + + return status; +} + + +static NV_STATUS +_shadowMemdescCreate(gpuRetainedChannel *retainedChannel, + NV2080_CTRL_GR_CTX_BUFFER_INFO *pCtxBufferInfo, + MEMORY_DESCRIPTOR **ppMemDesc) +{ + NvU32 j; + NvU32 pageSize = pCtxBufferInfo->pageSize; + NvU32 numBufferPages = NV_ROUNDUP(pCtxBufferInfo->size, pageSize) / pageSize; + MEMORY_DESCRIPTOR *pMemDesc = NULL; + MEMORY_DESCRIPTOR *pBufferHandle = (MEMORY_DESCRIPTOR *) pCtxBufferInfo->bufferHandle; + NV2080_CTRL_KGR_GET_CTX_BUFFER_PTES_PARAMS params = { 0 }; + NvU64 *pPages = NULL; + NV_STATUS status = NV_OK; + KernelChannel *pKernelChannel; + RM_API *pRmApi; + + if (_memDescFindAndRetain(retainedChannel, pBufferHandle, ppMemDesc)) + goto done; + + pPages = portMemAllocNonPaged(sizeof(*pPages) * numBufferPages); + if (pPages == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + status = nvGpuOpsGetChannelData(retainedChannel, &pKernelChannel); + if (status != NV_OK) + { + goto done; + } + + params.hUserClient = RES_GET_CLIENT_HANDLE(pKernelChannel); + params.hChannel = RES_GET_HANDLE(pKernelChannel); + params.bufferType = pCtxBufferInfo->bufferType; + + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + for (j = 0; j < numBufferPages;) + { + params.firstPage = j; + status = pRmApi->Control(pRmApi, + retainedChannel->session->handle, + retainedChannel->rmSubDevice->subDeviceHandle, + NV2080_CTRL_CMD_KGR_GET_CTX_BUFFER_PTES, + ¶ms, + sizeof(params)); + if (status != NV_OK) + { + goto done; + } + + NV_ASSERT(j + params.numPages <= numBufferPages); + + if (pCtxBufferInfo->bIsContigous) + { + pPages[0] = (NvU64)params.physAddrs[0]; + break; + } + + portMemCopy(&pPages[j], params.numPages * sizeof(*pPages), + params.physAddrs, params.numPages * sizeof(*pPages)); + j += params.numPages; + } + + NV_ASSERT(params.bNoMorePages); + + memdescCreate(&pMemDesc, + retainedChannel->pGpu, + pCtxBufferInfo->size, + pCtxBufferInfo->alignment, + pCtxBufferInfo->bIsContigous, + pCtxBufferInfo->aperture, + NV_MEMORY_CACHED, + MEMDESC_FLAGS_NONE + ); + + memdescSetPageSize(pMemDesc, 0, pCtxBufferInfo->pageSize); + + if (pCtxBufferInfo->bIsContigous) + { + memdescDescribe(pMemDesc, pCtxBufferInfo->aperture, pPages[0], pCtxBufferInfo->size); + } + else + { + memdescFillPages(pMemDesc, 0, pPages, numBufferPages, pCtxBufferInfo->pageSize); + } + + (void) mapInsertValue(&retainedChannel->device->kern2PhysDescrMap, + (NvU64) pMemDesc, + &pBufferHandle); + *ppMemDesc = pMemDesc; + +done: + portMemFree(pPages); + return status; +} + +static NV_STATUS _nvGpuOpsRetainChannelResources(struct gpuDevice *device, + NvHandle hClient, + NvHandle hKernelChannel, + gpuRetainedChannel *retainedChannel, + gpuChannelInstanceInfo *channelInstanceInfo) +{ + NV_STATUS status = NV_OK; + NV2080_CTRL_GR_GET_CTX_BUFFER_INFO_PARAMS *pParams = NULL; + NV2080_CTRL_FLCN_GET_CTX_BUFFER_INFO_PARAMS *pFlcnParams = NULL; + gpuChannelResourceInfo *channelResourceInfo = channelInstanceInfo->resourceInfo; + KernelChannel *pKernelChannel; + RM_API *pRmApi; + NvU32 channelEngineType = retainedChannel->channelEngineType; + NvU32 i; + NvU32 j; + + NV_ASSERT(channelEngineType == UVM_GPU_CHANNEL_ENGINE_TYPE_CE || + channelEngineType == UVM_GPU_CHANNEL_ENGINE_TYPE_GR || + channelEngineType == UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2); + + // CE channels have 0 resources, so they skip this step + if (channelEngineType == UVM_GPU_CHANNEL_ENGINE_TYPE_CE) + { + goto done; + } + + status = nvGpuOpsGetChannelData(retainedChannel, &pKernelChannel); + if (status != NV_OK) + { + goto done; + } + + if (channelEngineType == UVM_GPU_CHANNEL_ENGINE_TYPE_SEC2) + { + // get engine context memdesc, then get its PTEs. + MEMORY_DESCRIPTOR *pMemDesc = NULL; + + // single buffer + NV_ASSERT_OR_GOTO(NV_ARRAY_ELEMENTS(channelInstanceInfo->resourceInfo) >= 1, done); + + pFlcnParams = portMemAllocNonPaged(sizeof(*pFlcnParams)); + if (pFlcnParams == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + pFlcnParams->hUserClient = RES_GET_CLIENT_HANDLE(pKernelChannel); + pFlcnParams->hChannel = RES_GET_HANDLE(pKernelChannel); + + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // This RM CTRL refcounts all the resource memdescs. + status = pRmApi->Control(pRmApi, + retainedChannel->session->handle, + retainedChannel->rmSubDevice->subDeviceHandle, + NV2080_CTRL_CMD_FLCN_GET_CTX_BUFFER_INFO, + pFlcnParams, + sizeof(*pFlcnParams)); + if (status != NV_OK) + goto done; + + gpuMemoryInfo *pGpuMemoryInfo = &channelResourceInfo[0].resourceInfo; + + channelResourceInfo[0].resourceDescriptor = pFlcnParams->bufferHandle; + channelResourceInfo[0].alignment = pFlcnParams->alignment; + pGpuMemoryInfo->pageSize = pFlcnParams->pageSize; + pGpuMemoryInfo->size = pFlcnParams->size; + pGpuMemoryInfo->contig = pFlcnParams->bIsContigous; + pGpuMemoryInfo->physAddr = pFlcnParams->physAddr; + pGpuMemoryInfo->kind = pFlcnParams->kind; + pGpuMemoryInfo->sysmem = pFlcnParams->aperture == ADDR_SYSMEM; + pGpuMemoryInfo->deviceDescendant = pFlcnParams->bDeviceDescendant; + + portMemCopy(pGpuMemoryInfo->uuid.uuid, sizeof(pGpuMemoryInfo->uuid.uuid), + pFlcnParams->uuid, sizeof(pFlcnParams->uuid)); + + status = _shadowMemdescCreateFlcn(retainedChannel, pFlcnParams, &pMemDesc); + if (status != NV_OK) + goto done; + + channelResourceInfo[0].resourceDescriptor = (NvP64) pMemDesc; + retainedChannel->resourceMemDesc[0] = pMemDesc; + + channelInstanceInfo->resourceCount = 1; + retainedChannel->resourceCount = 1; + goto done; + } + + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + pParams->hUserClient = RES_GET_CLIENT_HANDLE(pKernelChannel); + pParams->hChannel = RES_GET_HANDLE(pKernelChannel); + + pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + // This RM CTRL refcounts all the resource memdescs. + status = pRmApi->Control(pRmApi, + retainedChannel->session->handle, + retainedChannel->rmSubDevice->subDeviceHandle, + NV2080_CTRL_CMD_GR_GET_CTX_BUFFER_INFO, + pParams, + sizeof(*pParams)); + if (status != NV_OK) + goto done; + + NV_ASSERT(pParams->bufferCount <= NV_ARRAY_ELEMENTS(channelInstanceInfo->resourceInfo)); + + for (i = 0; i < pParams->bufferCount; i++) + { + MEMORY_DESCRIPTOR *pMemDesc = NULL; + NV2080_CTRL_GR_CTX_BUFFER_INFO *pCtxBufferInfo = &pParams->ctxBufferInfo[i]; + gpuMemoryInfo *pGpuMemoryInfo = &channelResourceInfo[i].resourceInfo; + + channelResourceInfo[i].resourceDescriptor = pCtxBufferInfo->bufferHandle; + channelResourceInfo[i].resourceId = pCtxBufferInfo->bufferType; + channelResourceInfo[i].alignment = pCtxBufferInfo->alignment; + pGpuMemoryInfo->pageSize = pCtxBufferInfo->pageSize; + pGpuMemoryInfo->size = pCtxBufferInfo->size; + pGpuMemoryInfo->contig = pCtxBufferInfo->bIsContigous; + pGpuMemoryInfo->physAddr = pCtxBufferInfo->physAddr; + pGpuMemoryInfo->kind = pCtxBufferInfo->kind; + pGpuMemoryInfo->sysmem = pCtxBufferInfo->aperture == ADDR_SYSMEM; + pGpuMemoryInfo->deviceDescendant = pCtxBufferInfo->bDeviceDescendant; + + portMemCopy(pGpuMemoryInfo->uuid.uuid, sizeof(pGpuMemoryInfo->uuid.uuid), + pCtxBufferInfo->uuid, sizeof(pCtxBufferInfo->uuid)); + + status = _shadowMemdescCreate(retainedChannel, pCtxBufferInfo, &pMemDesc); + if (status != NV_OK) + goto cleanup; + + channelResourceInfo[i].resourceDescriptor = (NvP64) pMemDesc; + retainedChannel->resourceMemDesc[i] = pMemDesc; + } + + channelInstanceInfo->resourceCount = pParams->bufferCount; + retainedChannel->resourceCount = pParams->bufferCount; + +cleanup: + if (status != NV_OK) + { + for (j = 0; j < i; j++) + { + _shadowMemdescDestroy(retainedChannel, retainedChannel->resourceMemDesc[j]); + } + } + +done: + portMemFree(pParams); + portMemFree(pFlcnParams); + return status; +} + +static void _nvGpuOpsReleaseChannelResources(gpuRetainedChannel *retainedChannel) +{ + NvU32 i; + NvU32 descriptorCount = retainedChannel->resourceCount; + + for (i = 0; i < descriptorCount; i++) + { + MEMORY_DESCRIPTOR *pMemDesc = retainedChannel->resourceMemDesc[i]; + + _shadowMemdescDestroy(retainedChannel, pMemDesc); + } +} + +NV_STATUS nvGpuOpsGetChannelResourcePtes(struct gpuAddressSpace *vaSpace, + NvP64 resourceDescriptor, + NvU64 offset, + NvU64 size, + gpuExternalMappingInfo *pGpuExternalMappingInfo) +{ + NV_STATUS status = NV_OK; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + NvHandle hSubDevice; + PMEMORY_DESCRIPTOR pMemDesc = NULL; + OBJGPU *pMappingGpu = NULL; + OBJVASPACE *pVAS = NULL; + RsClient *pClient; + + if (!vaSpace || !resourceDescriptor || !pGpuExternalMappingInfo) + return NV_ERR_INVALID_ARGUMENT; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + vaSpace->device->session->handle, + NULL, + &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + pMemDesc = (MEMORY_DESCRIPTOR *) NvP64_VALUE(resourceDescriptor); + + status = CliSetSubDeviceContext(vaSpace->device->session->handle, + vaSpace->device->subhandle, + &hSubDevice, + &pMappingGpu); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + if (pMemDesc->pGpu != pMappingGpu) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_NOT_SUPPORTED; + } + + // Do not support mapping on anything other than sysmem/vidmem! + if ((memdescGetAddressSpace(pMemDesc) != ADDR_SYSMEM) && + (memdescGetAddressSpace(pMemDesc) != ADDR_FBMEM)) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_NOT_SUPPORTED; + } + + status = serverGetClientUnderLock(&g_resServ, vaSpace->device->session->handle, &pClient); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = vaspaceGetByHandleOrDeviceDefault(pClient, + vaSpace->device->handle, + vaSpace->handle, + &pVAS); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = nvGpuOpsBuildExternalAllocPtes(pVAS, pMappingGpu, pMemDesc, NULL, + offset, size, NV_FALSE, NV_FALSE, + 0, pGpuExternalMappingInfo); + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +NV_STATUS nvGpuOpsBindChannelResources(gpuRetainedChannel *retainedChannel, + gpuChannelResourceBindParams *channelResourceBindParams) +{ + NV_STATUS status = NV_OK; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *pParams; + NvU32 i; + KernelChannel *pKernelChannel = NULL; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + retainedChannel->session->handle, + NULL, + &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = nvGpuOpsGetChannelData(retainedChannel, &pKernelChannel); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + // Unregister channel resources. CE channels have 0 resources, so they skip this step + if (retainedChannel->resourceCount != 0) + { + pParams = portMemAllocNonPaged(sizeof(*pParams)); + if (pParams == NULL) + { + status = NV_ERR_NO_MEMORY; + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + portMemSet(pParams, 0, sizeof(*pParams)); + + pParams->hChanClient = RES_GET_CLIENT_HANDLE(pKernelChannel); + pParams->hObject = RES_GET_HANDLE(pKernelChannel); + pParams->entryCount = retainedChannel->resourceCount; + + status = kfifoEngineInfoXlate_HAL(retainedChannel->pGpu, + GPU_GET_KERNEL_FIFO(retainedChannel->pGpu), + ENGINE_INFO_TYPE_RUNLIST, + retainedChannel->runlistId, + ENGINE_INFO_TYPE_NV2080, + &(pParams->engineType)); + + for (i = 0; i < retainedChannel->resourceCount; i++) + { + if (NV2080_ENGINE_TYPE_IS_GR(pParams->engineType)) + pParams->promoteEntry[i].bufferId = channelResourceBindParams[i].resourceId; + pParams->promoteEntry[i].gpuVirtAddr = channelResourceBindParams[i].resourceVa; + } + + status = pRmApi->Control(pRmApi, + retainedChannel->session->handle, + retainedChannel->rmSubDevice->subDeviceHandle, + NV2080_CTRL_CMD_GPU_PROMOTE_CTX, + pParams, + sizeof(*pParams)); + + portMemFree(pParams); + } + + if (NV_OK == status) + { + pKernelChannel->bIsContextBound = NV_TRUE; + } + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; +} + +// nvGpuOpsRetainChannelResources only increments the ref-counts of the memdescs under the channel. +// It does not prevent the user from freeing the associated hClient and hChannel handles, which means +// the instance pointer may no longer be associated with a user object at this point. +// If the instance pointer still has an associated channel, the channel is preempted and disabled. +// Otherwise that must have already happened, so we just need to drop the ref counts on the resources +void nvGpuOpsStopChannel(gpuRetainedChannel *retainedChannel, + NvBool bImmediate) +{ + NV_STATUS status = NV_OK; + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + KernelChannel *pKernelChannel = NULL; + RsResourceRef *pResourceRef; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + NVA06F_CTRL_STOP_CHANNEL_PARAMS stopChannelParams = {0}; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + if (_nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, + retainedChannel->session->handle, + NULL, + &acquiredLocks) != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return; + } + + status = nvGpuOpsGetChannelData(retainedChannel, &pKernelChannel); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return; + } + + // Verify this channel handle is still valid + status = serverutilGetResourceRef(RES_GET_CLIENT_HANDLE(pKernelChannel), RES_GET_HANDLE(pKernelChannel), &pResourceRef); + if (status != NV_OK) + { + NV_ASSERT(0); + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return; + } + + stopChannelParams.bImmediate = bImmediate; + NV_ASSERT_OK( + pRmApi->Control(pRmApi, + RES_GET_CLIENT_HANDLE(pKernelChannel), + RES_GET_HANDLE(pKernelChannel), + NVA06F_CTRL_CMD_STOP_CHANNEL, + &stopChannelParams, + sizeof(stopChannelParams))); + + pKernelChannel->bIsContextBound = NV_FALSE; + + if (retainedChannel->channelEngineType == UVM_GPU_CHANNEL_ENGINE_TYPE_GR) + { + NV2080_CTRL_GPU_EVICT_CTX_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + params.engineType = NV2080_ENGINE_TYPE_GR(0); + params.hClient = retainedChannel->session->handle; + params.hChanClient = RES_GET_CLIENT_HANDLE(pKernelChannel); + params.hObject = RES_GET_HANDLE(pKernelChannel); + + NV_ASSERT_OK( + pRmApi->Control(pRmApi, + retainedChannel->session->handle, + retainedChannel->rmSubDevice->subDeviceHandle, + NV2080_CTRL_CMD_GPU_EVICT_CTX, + ¶ms, + sizeof(params))); + } + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); +} + +// Make sure the UVM and PMA structs are in sync +// The following location(s) need to be synced as well: +// - uvm8_pmm_gpu.c:uvm8_test_pmm_query_pma_stats +ct_assert(sizeof(UvmPmaStatistics) == sizeof(PMA_STATS)); +ct_assert(NV_OFFSETOF(UvmPmaStatistics, numPages2m) == NV_OFFSETOF(PMA_STATS, num2mbPages)); +ct_assert(NV_OFFSETOF(UvmPmaStatistics, numFreePages64k) == NV_OFFSETOF(PMA_STATS, numFreeFrames)); +ct_assert(NV_OFFSETOF(UvmPmaStatistics, numFreePages2m) == NV_OFFSETOF(PMA_STATS, numFree2mbPages)); + +/*! + * Retrieve the PMA (Physical Memory Allocator) object initialized by RM + * for the given device. + * + * @param[in] device device handle obtained in a prior call + * to nvGpuOpsRmDeviceCreate. + * + * @param[out] pPmaObject Void pointer to RM PMA object of associated GPU + * NULL if PMA not enabled & initialized. + * @param[out] pPmaPubStats Pointer to UVM PMA statistics object of + * associated GPU. Cannot be NULL. + * + * @returns NV_OK on success, + * NV_ERR_INVALID_ARGUMENT if NULL pPmaObject, + * NV_ERR_OBJECT_NOT_FOUND if PMA object not found + * NV_ERR_NOT_SUPPORTED if PMA not supported + */ +NV_STATUS nvGpuOpsGetPmaObject(struct gpuDevice *device, + void **pPmaObject, + const UvmPmaStatistics **pPmaStats) +{ + nvGpuOpsLockSet acquiredLocks; + THREAD_STATE_NODE threadState; + OBJGPU *pGpu = NULL; + Heap *pHeap = NULL; + MemoryManager *pMemoryManager; + struct gpuSession *session = device->session; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + status = _nvGpuOpsLocksAcquireAll(RMAPI_LOCK_FLAGS_READ, session->handle, NULL, &acquiredLocks); + if (status != NV_OK) + { + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return status; + } + + status = CliSetGpuContext(session->handle, device->handle, &pGpu, NULL); + if (status != NV_OK) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_OBJECT_NOT_FOUND; + } + + pMemoryManager = GPU_GET_MEMORY_MANAGER(pGpu); + if (pMemoryManager == NULL) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (IS_MIG_IN_USE(pGpu)) + { + KernelMIGManager *pKernelMIGManager = GPU_GET_KERNEL_MIG_MANAGER(pGpu); + + status = kmigmgrGetMemoryPartitionHeapFromClient(pGpu, pKernelMIGManager, session->handle, &pHeap); + if (status != NV_OK) + return status; + } + else + pHeap = GPU_GET_HEAP(pGpu); + + if (pHeap == NULL) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (!memmgrIsPmaInitialized(pMemoryManager)) + { + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_ERR_OBJECT_NOT_FOUND; + } + + *pPmaObject = (void *)&pHeap->pmaObject; + *pPmaStats = (const UvmPmaStatistics *)&pHeap->pmaObject.pmaStats; + + _nvGpuOpsLocksRelease(&acquiredLocks); + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + return NV_OK; +} + +NV_STATUS nvGpuOpsP2pObjectCreate(struct gpuDevice *device1, + struct gpuDevice *device2, + NvHandle *hP2pObject) +{ + NV_STATUS status; + NV503B_ALLOC_PARAMETERS p2pAllocParams = {0}; + NvHandle hTemp = 0; + struct systemP2PCaps p2pCaps; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + struct gpuSession *session; + + if (!device1 || !device2 || !hP2pObject) + return NV_ERR_INVALID_ARGUMENT; + + if (device1->session != device2->session) + return NV_ERR_INVALID_ARGUMENT; + + status = getSystemP2PCaps(device1, device2, &p2pCaps); + if (status != NV_OK) + return status; + + if (!p2pCaps.accessSupported) + return NV_ERR_NOT_SUPPORTED; + + p2pAllocParams.hSubDevice = device1->subhandle; + p2pAllocParams.hPeerSubDevice = device2->subhandle; + + session = device1->session; + hTemp = NV01_NULL_OBJECT; + status = pRmApi->Alloc(pRmApi, session->handle, session->handle, &hTemp, NV50_P2P, &p2pAllocParams); + if (status == NV_OK) + *hP2pObject = hTemp; + + return status; +} + +NV_STATUS nvGpuOpsP2pObjectDestroy(struct gpuSession *session, + NvHandle hP2pObject) +{ + NV_STATUS status = NV_OK; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + NV_ASSERT(session); + + status = pRmApi->Free(pRmApi, session->handle, hP2pObject); + NV_ASSERT(status == NV_OK); + return status; +} + +NV_STATUS nvGpuOpsReportNonReplayableFault(struct gpuDevice *device, + const void *pFaultPacket) +{ + NV_STATUS status = NV_OK; + NV2080_CTRL_GPU_REPORT_NON_REPLAYABLE_FAULT_PARAMS params; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + + if (device == NULL || pFaultPacket == NULL) + return NV_ERR_INVALID_ARGUMENT; + + portMemSet(¶ms, 0, sizeof(params)); + + portMemCopy(¶ms.faultPacket.data, + NV2080_CTRL_GPU_FAULT_PACKET_SIZE, + pFaultPacket, + NV2080_CTRL_GPU_FAULT_PACKET_SIZE); + + status = pRmApi->Control(pRmApi, + device->session->handle, + device->subhandle, + NV2080_CTRL_CMD_GPU_REPORT_NON_REPLAYABLE_FAULT, + ¶ms, + sizeof(params)); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: NV2080_CTRL_CMD_GPU_REPORT_NON_REPLAYABLE_FAULTreturned error %s!\n", + __FUNCTION__, nvstatusToString(status)); + } + + return status; +} + +NV_STATUS nvGpuOpsPagingChannelAllocate(struct gpuDevice *device, + const gpuPagingChannelAllocParams *params, + gpuPagingChannelHandle *channelHandle, + gpuPagingChannelInfo *channelInfo) +{ + NV_STATUS status, status2; + UvmGpuPagingChannel *channel = NULL; + Device *pDevice; + RsClient *pClient; + NvHandle hClient; + NvLength errorNotifierSize; + NvU64 paOffset; + gpuAllocInfo allocInfo = {0}; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + NvU32 pid = osGetCurrentProcess(); + + if (!device || !params || !channelHandle || !channelInfo) + return NV_ERR_INVALID_ARGUMENT; + + if (!NV2080_ENGINE_TYPE_IS_COPY(NV2080_ENGINE_TYPE_COPY(params->engineIndex))) + return NV_ERR_INVALID_ARGUMENT; + + hClient = device->session->handle; + NV_ASSERT(hClient); + + channel = portMemAllocNonPaged(sizeof(*channel)); + if (!channel) + return NV_ERR_NO_MEMORY; + + portMemSet(channel, 0, sizeof(*channel)); + channel->device = device; + + errorNotifierSize = sizeof(NvNotification) * + NV_CHANNELGPFIFO_NOTIFICATION_TYPE__SIZE_1; + status = nvGpuOpsAllocPhysical(device, + NV_TRUE, + errorNotifierSize, + &paOffset, + &allocInfo); + if (status != NV_OK) + goto cleanup_free_channel; + + channel->errorNotifierHandle = allocInfo.hPhysHandle; + NV_ASSERT(channel->errorNotifierHandle); + + status = pRmApi->MapToCpu(pRmApi, + hClient, + device->subhandle, + channel->errorNotifierHandle, + 0, + errorNotifierSize, + (void **)&channel->errorNotifier, + 0); + if (status != NV_OK) + goto cleanup_free_error_notifier; + + NV_ASSERT(channel->errorNotifier); + + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + goto cleanup_unmap_error_notifier; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + goto cleanup_under_rmapi_lock; + + status = deviceGetByHandle(pClient, device->handle, &pDevice); + if (status != NV_OK) + goto cleanup_under_rmapi_lock; + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + rmapiLockRelease(); + + *channelHandle = channel; + + channelInfo->shadowErrorNotifier = channel->errorNotifier; + + return NV_OK; + +cleanup_under_rmapi_lock: + rmapiLockRelease(); + +cleanup_unmap_error_notifier: + status2 = pRmApi->UnmapFromCpu(pRmApi, + hClient, + device->subhandle, + channel->errorNotifierHandle, + (void *)channel->errorNotifier, + 0, + pid); + NV_ASSERT(status2 == NV_OK); + +cleanup_free_error_notifier: + pRmApi->Free(pRmApi, hClient, channel->errorNotifierHandle); + +cleanup_free_channel: + portMemFree(channel); + + return status; +} + +void nvGpuOpsPagingChannelDestroy(UvmGpuPagingChannel *channel) +{ + NV_STATUS status; + struct gpuDevice *device; + Device *pDevice; + RsClient *pClient; + NvHandle hClient; + RM_API *pRmApi = rmapiGetInterface(RMAPI_EXTERNAL_KERNEL); + NvU32 pid = osGetCurrentProcess(); + + NV_ASSERT(channel); + + device = channel->device; + NV_ASSERT(device); + + hClient = device->session->handle; + NV_ASSERT(hClient); + + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + NV_ASSERT(status == NV_OK); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: rmapiLockAcquire returned error %s!\n", + __FUNCTION__, nvstatusToString(status)); + goto cleanup; + } + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + NV_ASSERT(status == NV_OK); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: serverGetClientUnderLock returned error %s!\n", + __FUNCTION__, nvstatusToString(status)); + goto cleanup_under_rmapi_lock; + } + + status = deviceGetByHandle(pClient, device->handle, &pDevice); + NV_ASSERT(status == NV_OK); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: deviceGetByHandle returned error %s!\n", + __FUNCTION__, nvstatusToString(status)); + goto cleanup_under_rmapi_lock; + } + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + +cleanup_under_rmapi_lock: + rmapiLockRelease(); + +cleanup: + status = pRmApi->UnmapFromCpu(pRmApi, + hClient, + device->subhandle, + channel->errorNotifierHandle, + (void *)channel->errorNotifier, + 0, + pid); + NV_ASSERT(status == NV_OK); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: UnmapFromCpu returned error %s!\n", + __FUNCTION__, nvstatusToString(status)); + } + + pRmApi->Free(pRmApi, hClient, channel->errorNotifierHandle); + portMemFree(channel); +} + +NV_STATUS nvGpuOpsPagingChannelsMap(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuDevice *device, + NvU64 *dstAddress) +{ + NV_STATUS status; + Device *pDevice; + RsClient *pClient; + NvHandle hAllocation; + NvHandle hClient; + + if (!srcVaSpace || !device || !dstAddress) + return NV_ERR_INVALID_ARGUMENT; + + hClient = device->session->handle; + NV_ASSERT(hClient); + + + status = getHandleForVirtualAddr(srcVaSpace, srcAddress, NV_TRUE, &hAllocation); + if (status != NV_OK) + return status; + + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + if (status != NV_OK) + return status; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + if (status != NV_OK) + goto exit_under_rmapi_lock; + + status = deviceGetByHandle(pClient, device->handle, &pDevice); + if (status != NV_OK) + goto exit_under_rmapi_lock; + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + +exit_under_rmapi_lock: + rmapiLockRelease(); + + return status; +} + +void nvGpuOpsPagingChannelsUnmap(struct gpuAddressSpace *srcVaSpace, + NvU64 srcAddress, + struct gpuDevice *device) +{ + NV_STATUS status; + Device *pDevice; + RsClient *pClient; + NvHandle hAllocation; + NvHandle hClient; + + NV_ASSERT(srcVaSpace && device); + if (!srcVaSpace || !device) + return; + + hClient = device->session->handle; + NV_ASSERT(hClient); + + status = getHandleForVirtualAddr(srcVaSpace, srcAddress, NV_TRUE, &hAllocation); + NV_ASSERT(status == NV_OK); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: getHandleForVirtualAddr returned error %s!\n", + __FUNCTION__, nvstatusToString(status)); + return; + } + + status = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_GPU_OPS); + NV_ASSERT(status == NV_OK); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: rmapiLockAcquire returned error %s!\n", + __FUNCTION__, nvstatusToString(status)); + return; + } + + status = serverGetClientUnderLock(&g_resServ, hClient, &pClient); + NV_ASSERT(status == NV_OK); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: serverGetClientUnderLock returned error %s!\n", + __FUNCTION__, nvstatusToString(status)); + goto exit_under_rmapi_lock; + } + + status = deviceGetByHandle(pClient, device->handle, &pDevice); + NV_ASSERT(status == NV_OK); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "%s: deviceGetByHandle returned error %s!\n", + __FUNCTION__, nvstatusToString(status)); + goto exit_under_rmapi_lock; + } + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + +exit_under_rmapi_lock: + rmapiLockRelease(); +} + +NV_STATUS nvGpuOpsPagingChannelPushStream(UvmGpuPagingChannel *channel, + char *methodStream, + NvU32 methodStreamSize) +{ + NV_STATUS status; + struct gpuDevice *device = NULL; + Device *pDevice; + RsClient *pClient; + + if (!channel || !methodStream) + return NV_ERR_INVALID_ARGUMENT; + if (methodStreamSize == 0) + return NV_OK; + + device = channel->device; + NV_ASSERT(device); + + status = serverGetClientUnderLock(&g_resServ, device->session->handle, &pClient); + if (status != NV_OK) + return status; + + status = deviceGetByHandle(pClient, device->handle, &pDevice); + if (status != NV_OK) + return status; + + GPU_RES_SET_THREAD_BC_STATE(pDevice); + + return status; +} + +static NV_STATUS nvGpuOpsGetMemoryByHandle(NvHandle hClient, NvHandle hMemory, Memory **ppMemory) +{ + RsClient *pRsClient = NULL; + + NV_ASSERT_OK_OR_RETURN(serverGetClientUnderLock(&g_resServ, + hClient, &pRsClient)); + + return memGetByHandle(pRsClient, + hMemory, + ppMemory); +} + diff --git a/src/nvidia/src/kernel/rmapi/param_copy.c b/src/nvidia/src/kernel/rmapi/param_copy.c new file mode 100644 index 000000000..694cf6a9d --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/param_copy.c @@ -0,0 +1,314 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "core/system.h" +#include "rmapi/rmapi.h" +#include "rmapi/param_copy.h" +#include "rmapi/alloc_size.h" +#include "rmapi/control.h" +#include "os/os.h" + +NV_STATUS rmapiParamsAcquire +( + RMAPI_PARAM_COPY *pParamCopy, + NvBool bUserModeArgs +) +{ + NvBool bUseParamsDirectly; + void *pKernelParams = NULL; + NV_STATUS rmStatus = NV_OK; + OBJSYS *pSys = SYS_GET_INSTANCE(); + + // Error check parameters + if (((pParamCopy->paramsSize != 0) && (pParamCopy->pUserParams == NvP64_NULL)) || + ((pParamCopy->paramsSize == 0) && (pParamCopy->pUserParams != NvP64_NULL)) || + !pParamCopy->bSizeValid) + { + NV_PRINTF(LEVEL_WARNING, + "%s: bad params from client: ptr " NvP64_fmt " size: 0x%x (%s)\n", + pParamCopy->msgTag, pParamCopy->pUserParams, pParamCopy->paramsSize, + pParamCopy->bSizeValid ? "valid" : "invalid"); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + bUseParamsDirectly = (pParamCopy->paramsSize == 0) || (!bUserModeArgs); + + // if we can use client params directly, we're done. + if (bUseParamsDirectly) + { + if (pSys->getProperty(pSys, PDB_PROP_SYS_VALIDATE_KERNEL_BUFFERS)) + { + // Check that its a kernel pointer + rmStatus = osIsKernelBuffer((void*)NvP64_VALUE(pParamCopy->pUserParams), + pParamCopy->paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Error validating kernel pointer. Status 0x%x\n", + rmStatus); + goto done; + } + } + + pParamCopy->flags |= RMAPI_PARAM_COPY_FLAGS_IS_DIRECT_USAGE; + pKernelParams = NvP64_VALUE(pParamCopy->pUserParams); + goto done; + } + + if (!(pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_DISABLE_MAX_SIZE_CHECK)) + { + if (pParamCopy->paramsSize > RMAPI_PARAM_COPY_MAX_PARAMS_SIZE) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): Requested size exceeds max (%ud > %ud)\n", + pParamCopy->msgTag, pParamCopy->paramsSize, + RMAPI_PARAM_COPY_MAX_PARAMS_SIZE); + rmStatus = NV_ERR_INVALID_ARGUMENT; + goto done; + } + } + + pKernelParams = portMemAllocNonPaged(pParamCopy->paramsSize); + if (pKernelParams == NULL) + { + rmStatus = NV_ERR_INSUFFICIENT_RESOURCES; + NV_PRINTF(LEVEL_WARNING, "(%s): portMemAllocNonPaged failure: status 0x%x\n", + pParamCopy->msgTag, rmStatus); + goto done; + } + + // Copyin unless directed otherwise + if (pParamCopy->pUserParams) + { + if (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_SKIP_COPYIN) + { + if (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_ZERO_BUFFER) + portMemSet(pKernelParams, 0, pParamCopy->paramsSize); + } + else + { + rmStatus = portMemExCopyFromUser(pParamCopy->pUserParams, pKernelParams, pParamCopy->paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyFromUser failure: status 0x%x\n", + pParamCopy->msgTag, rmStatus); + goto done; + } + } + } + +done: + if (rmStatus != NV_OK) // There was an error, be sure to free the buffer + { + portMemFree(pKernelParams); + pKernelParams = NULL; + } + + NV_ASSERT(pParamCopy->ppKernelParams != NULL); + *(pParamCopy->ppKernelParams) = pKernelParams; + return rmStatus; +} + +// +// Copyout if needed and free any tmp param buffer +// Skips copyout if API_PARAMS_SKIP_COPYOUT is set. +// +NV_STATUS rmapiParamsRelease +( + RMAPI_PARAM_COPY *pParamCopy +) +{ + NV_STATUS rmStatus = NV_OK; + + // nothing to do, rmapiParamsAcquire() is either not called or not completed + if (NULL == pParamCopy->ppKernelParams) + return NV_OK; + + // if using the client's buffer directly, there's nothing to do + if (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_IS_DIRECT_USAGE) + goto done; + + // if no kernel param ptr, there must be nothing to copy out + // This can only happen if rmapiParamsAccess() returned an error, + // but we need to handle it since rmapiParamsRelease() might be + // called anyway. + if (NULL == *pParamCopy->ppKernelParams) + goto done; + + // do the copyout if something to copy, unless told to skip it... + if (pParamCopy->pUserParams && ! (pParamCopy->flags & RMAPI_PARAM_COPY_FLAGS_SKIP_COPYOUT)) + { + rmStatus = portMemExCopyToUser(*(pParamCopy->ppKernelParams), pParamCopy->pUserParams, pParamCopy->paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyToUser failure: status 0x%x\n", + pParamCopy->msgTag, rmStatus); + + // even if the copyout fails, we still need to free the kernel mem + } + } + + portMemFree(*pParamCopy->ppKernelParams); + +done: + // no longer ok to use the ptr, even if it was a direct usage + *pParamCopy->ppKernelParams = NULL; + return rmStatus; +} + +// This is a one-shot suitable for a case where we already have a kernel +// buffer and just need to copy into it from a user buffer. +// Not for general use... +// +// It uses the same logic as rmapiParamsAccess(), but does not maintain +// an RMAPI_PARAM_COPY container. +NV_STATUS rmapiParamsCopyIn +( + const char *msgTag, + void *pKernelParams, + NvP64 pUserParams, + NvU32 paramsSize, + NvBool bUserModeArgs +) +{ + NV_STATUS rmStatus; + + // error check parameters + if ((paramsSize == 0) || + (pKernelParams == NULL) || + (pUserParams == NvP64_NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): bad params from client: ptr " NvP64_fmt " size: 0x%x\n", + msgTag, pUserParams, paramsSize); + + return NV_ERR_INVALID_ARGUMENT; + } + + // if we can use client params directly, just memcpy() + if (bUserModeArgs == NV_FALSE) + { + // If the same ptr we can skip the memcpy + if (pKernelParams != NvP64_VALUE(pUserParams)) + { + (void) portMemCopy(pKernelParams, paramsSize, NvP64_VALUE(pUserParams), paramsSize); + } + rmStatus = NV_OK; + } + else + { + rmStatus = portMemExCopyFromUser(pUserParams, pKernelParams, paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyFromUser failure: status 0x%x\n", + msgTag, rmStatus); + } + } + + return rmStatus; +} + +// This is a one-shot suitable for a case where we already have a kernel +// buffer and just need to copy it out correctly to a user buffer. +// Not for general use... +// +// It uses the same logic as rmapiParamsAccess(), but does not maintain +// an RMAPI_PARAM_COPY container. + +NV_STATUS rmapiParamsCopyOut +( + const char *msgTag, + void *pKernelParams, + NvP64 pUserParams, + NvU32 paramsSize, + NvBool bUserModeArgs +) +{ + NV_STATUS rmStatus; + + // error check parameters + if ((paramsSize == 0) || + (pKernelParams == NULL) || + (pUserParams == NvP64_NULL)) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): bad params from client: ptr " NvP64_fmt " size: 0x%x\n", + msgTag, pUserParams, paramsSize); + + return NV_ERR_INVALID_ARGUMENT; + } + + // if we can use client params directly, just memcpy() + if (bUserModeArgs == NV_FALSE) + { + // If the same ptr we can skip the memcpy + if (pKernelParams != NvP64_VALUE(pUserParams)) + { + (void) portMemCopy(NvP64_VALUE(pUserParams), paramsSize, pKernelParams, paramsSize); + } + rmStatus = NV_OK; + } + else + { + rmStatus = portMemExCopyToUser(pKernelParams, pUserParams, paramsSize); + if (rmStatus != NV_OK) + { + NV_PRINTF(LEVEL_WARNING, + "(%s): portMemExCopyToUser failure: status 0x%x\n", + msgTag, rmStatus); + } + } + + return rmStatus; +} + +NV_STATUS +rmapiParamsCopyInit +( + RMAPI_PARAM_COPY *pParamCopy, + NvU32 hClass +) +{ + NvU32 status; + NvBool bAllowNull; + + status = rmapiGetClassAllocParamSize(&pParamCopy->paramsSize, + pParamCopy->pUserParams, + &bAllowNull, + hClass); + if (status != NV_OK) + return status; + + // NULL pUserParams is not allowed for given class + if (bAllowNull == NV_FALSE && pParamCopy->pUserParams == NvP64_NULL) + return NV_ERR_INVALID_ARGUMENT; + + pParamCopy->bSizeValid = NV_TRUE; + return NV_OK; +} + + diff --git a/src/nvidia/src/kernel/rmapi/resource.c b/src/nvidia/src/kernel/rmapi/resource.c new file mode 100644 index 000000000..7d018d773 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/resource.c @@ -0,0 +1,311 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "rmapi/client.h" +#include "rmapi/resource.h" +#include "rmapi/rmapi.h" +#include "rmapi/control.h" +#include "ctrl/ctrlxxxx.h" +#include "gpu/gpu_resource.h" +#include "gpu/gpu.h" +#include "vgpu/rpc.h" +#include "core/locks.h" + +NV_STATUS +rmrescmnConstruct_IMPL +( + RmResourceCommon *pResourceCommmon +) +{ + return NV_OK; +} + +NV_STATUS +rmresConstruct_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + if (RS_IS_COPY_CTOR(pParams)) + { + RmResource *pSrcResource = dynamicCast(pParams->pSrcRef->pResource, RmResource); + + pResource->rpcGpuInstance = pSrcResource->rpcGpuInstance; + pResource->bRpcFree = pSrcResource->bRpcFree; + } + else + { + pResource->rpcGpuInstance = ~0; + pResource->bRpcFree = NV_FALSE; + } + + return NV_OK; +} + +NvBool +rmresAccessCallback_IMPL +( + RmResource *pResource, + RsClient *pInvokingClient, + void *pAllocParams, + RsAccessRight accessRight +) +{ + NV_STATUS status; + RsResourceRef *pCliResRef; + + status = clientGetResourceRef(RES_GET_CLIENT(pResource), + RES_GET_CLIENT_HANDLE(pResource), + &pCliResRef); + + if (status == NV_OK) + { + // Allow access if the resource's owner would get the access right + if(resAccessCallback(pCliResRef->pResource, pInvokingClient, pAllocParams, accessRight)) + return NV_TRUE; + } + + // Delegate to superclass + return resAccessCallback_IMPL(staticCast(pResource, RsResource), pInvokingClient, pAllocParams, accessRight); +} + +NvBool +rmresShareCallback_IMPL +( + RmResource *pResource, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + NV_STATUS status; + RsResourceRef *pCliResRef; + + // + // cliresShareCallback contains some require exceptions for non-GpuResource, + // which we don't want to hit. ClientResource doesn't normally implement these + // share types anyway, so we're fine with skipping them. + // + switch (pSharePolicy->type) + { + case RS_SHARE_TYPE_SMC_PARTITION: + case RS_SHARE_TYPE_GPU: + { + // + // We do not want to lock down these GpuResource-specific require policies + // when the check cannot be applied for other resources, so add these checks + // as an alternative bypass for those policies + // + if ((pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) && + (NULL == dynamicCast(pResource, GpuResource))) + { + return NV_TRUE; + } + break; + } + case RS_SHARE_TYPE_FM_CLIENT: + { + RmClient *pSrcClient = dynamicCast(RES_GET_CLIENT(pResource), RmClient); + NvBool bSrcIsKernel = (pSrcClient != NULL) && (rmclientGetCachedPrivilege(pSrcClient) >= RS_PRIV_LEVEL_KERNEL); + + if (rmclientIsCapable(dynamicCast(pInvokingClient, RmClient), + NV_RM_CAP_EXT_FABRIC_MGMT) && !bSrcIsKernel) + { + return NV_TRUE; + } + break; + } + default: + { + status = clientGetResourceRef(RES_GET_CLIENT(pResource), + RES_GET_CLIENT_HANDLE(pResource), + &pCliResRef); + if (status == NV_OK) + { + // Allow sharing if the resource's owner would be shared with + if (resShareCallback(pCliResRef->pResource, pInvokingClient, + pParentRef, pSharePolicy)) + return NV_TRUE; + } + break; + } + } + + // Delegate to superclass + return resShareCallback_IMPL(staticCast(pResource, RsResource), + pInvokingClient, pParentRef, pSharePolicy); +} + +void serverControl_InitCookie +( + const struct NVOC_EXPORTED_METHOD_DEF *exportedEntry, + RmCtrlExecuteCookie *pRmCtrlExecuteCookie +) +{ + // Copy from NVOC exportedEntry + pRmCtrlExecuteCookie->cmd = exportedEntry->methodId; + pRmCtrlExecuteCookie->ctrlFlags = exportedEntry->flags; + // One time initialization of a const variable + *(NvU32 *)&pRmCtrlExecuteCookie->rightsRequired.limbs[0] + = exportedEntry->accessRight; +} + +// +// This routine searches through the Resource's NVOC exported methods for an entry +// that matches the specified command. +// +// Same logic as rmControlCmdLookup() in legacy RMCTRL path +// +NV_STATUS rmresControlLookup_IMPL +( + RmResource *pResource, + RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams, + const struct NVOC_EXPORTED_METHOD_DEF **ppEntry +) +{ + NvU32 cmd = pRsParams->cmd; + + if (RMCTRL_IS_NULL_CMD(cmd)) + return NV_WARN_NOTHING_TO_DO; + + return resControlLookup_IMPL(staticCast(pResource, RsResource), pRsParams, ppEntry); +} + +NV_STATUS +rmresGetMemInterMapParams_IMPL +( + RmResource *pRmResource, + RMRES_MEM_INTER_MAP_PARAMS *pParams +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +rmresCheckMemInterUnmap_IMPL +( + RmResource *pRmResource, + NvBool bSubdeviceHandleProvided +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +rmresGetMemoryMappingDescriptor_IMPL +( + RmResource *pRmResource, + struct MEMORY_DESCRIPTOR **ppMemDesc +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +rmresControl_Prologue_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + OBJGPU *pGpu = gpumgrGetGpu(pResource->rpcGpuInstance); + + if (pGpu == NULL) + return NV_OK; + + if (rmapiControlIsCacheable(pParams->pCookie->ctrlFlags, IS_GSP_CLIENT(pGpu))) + { + void* cached = rmapiControlCacheGet(pParams->hClient, pParams->hObject, pParams->cmd); + if (cached) + { + portMemCopy(pParams->pParams, pParams->paramsSize, cached, pParams->paramsSize); + return NV_WARN_NOTHING_TO_DO; + } + } + + if ((IS_VIRTUAL(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_VGPU_HOST)) || + (IS_GSP_CLIENT(pGpu) && (pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL))) + { + // + // GPU lock is required to protect the RPC buffers. + // However, some controls have ROUTE_TO_PHYSICAL + NO_GPUS_LOCK flags set. + // This is not valid in offload mode, but is in monolithic. + // In those cases, just acquire the lock for the RPC + // + GPU_MASK gpuMaskRelease = 0; + if (!rmDeviceGpuLockIsOwner(pGpu->gpuInstance)) + { + // + // Log any case where the above assumption is not true, but continue + // anyway. Use SAFE_LOCK_UPGRADE to try and recover in these cases. + // + NV_ASSERT(pParams->pCookie->ctrlFlags & RMCTRL_FLAGS_NO_GPUS_LOCK); + NV_ASSERT_OK_OR_RETURN(rmGpuGroupLockAcquire(pGpu->gpuInstance, + GPU_LOCK_GRP_SUBDEVICE, + GPU_LOCK_FLAGS_SAFE_LOCK_UPGRADE, + RM_LOCK_MODULES_RPC, + &gpuMaskRelease)); + } + + NV_RM_RPC_CONTROL(pGpu, pParams->hClient, pParams->hObject, pParams->cmd, + pParams->pParams, pParams->paramsSize, status); + + if (gpuMaskRelease != 0) + { + rmGpuGroupLockRelease(gpuMaskRelease, GPUS_LOCK_FLAGS_NONE); + } + + return (status == NV_OK) ? NV_WARN_NOTHING_TO_DO : status; + } + return NV_OK; +} + +void +rmresControl_Epilogue_IMPL +( + RmResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + OBJGPU *pGpu = gpumgrGetGpu(pResource->rpcGpuInstance); + + if (pGpu == NULL) + return; + + if (rmapiControlIsCacheable(pParams->pCookie->ctrlFlags, IS_GSP_CLIENT(pGpu))) + { + void* cached = rmapiControlCacheGet(pParams->hClient, pParams->hObject, pParams->cmd); + if (!cached) + { + NV_PRINTF(LEVEL_INFO, "rmControl: caching cmd 0x%x params\n", pParams->cmd); + NV_ASSERT_OK(rmapiControlCacheSet(pParams->hClient, pParams->hObject, pParams->cmd, + NvP64_VALUE(pParams->pParams), pParams->paramsSize)); + } + } +} diff --git a/src/nvidia/src/kernel/rmapi/resource_desc.c b/src/nvidia/src/kernel/rmapi/resource_desc.c new file mode 100644 index 000000000..d04d7cb25 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/resource_desc.c @@ -0,0 +1,219 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "core/core.h" +#include "resource_desc.h" + +// Need the full header for the class allocation param structure. +#define SDK_ALL_CLASSES_INCLUDE_FULL_HEADER +#include "g_allclasses.h" +// Not a class header, but contains an allocation struct used by several classes +#include "class/clb0b5sw.h" +#include "nvos.h" + +#include "rmapi/alloc_size.h" +#include "rmapi/resource_fwd_decls.h" +#include "resserv/rs_access_rights.h" + +// +// Macros to transform list into static table +// + +// NULL terminated list +#define RS_LIST(...) {__VA_ARGS__, 0} +#define RS_ROOT_OBJECT {0} +#define RS_ANY_PARENT {0} + +// Populate parents +#define RS_ENTRY(cls, internalClass, bMultiInstance, parentList, allocParam, freePriority, flags, rightsRequired) \ + NvU32 cls##ParentList[] = parentList; + +#include "resource_list.h" + +#undef RS_LIST +#undef RS_ROOT_OBJECT +#undef RS_ANY_PARENT + + +#define RS_ACCESS_NONE {-1} +#define RS_ACCESS_LIST(...) {__VA_ARGS__} + +// Populate rights required +#define RS_ENTRY(cls, internalClass, bMultiInstance, parentList, allocParam, freePriority, flags, rightsRequired) \ + static const RsAccessRight cls##_RightsRequiredArray[] = rightsRequired; + +#include "resource_list.h" + +#undef RS_ACCESS_NONE +#undef RS_ACCESS_LIST + +// Populate forward declarations +#define RS_ENTRY(cls, internalClass, bMultiInstance, parentList, allocParam, freePriority, flags, rightsRequired) \ + extern const struct NVOC_CLASS_DEF __nvoc_class_def_##internalClass; /* defn here to keep POPULATE_STRUCT happy if the class is disabled */ + +#include "resource_list.h" + + +#define RS_REQUIRED(allocParam) sizeof(allocParam), NV_TRUE +#define RS_OPTIONAL(allocParam) sizeof(allocParam), NV_FALSE +#define RS_NONE 0, NV_FALSE +#define RS_ENTRY(cls, internalClass, bMultiInstance, bAnyParent, allocParam, freePriority, flags, bRightsRequired) \ +{ \ + cls, \ + classId(internalClass), \ + classInfo(internalClass), \ + allocParam, \ + bMultiInstance, \ + bAnyParent, \ + cls##ParentList, \ + freePriority, \ + flags, \ + cls##_RightsRequiredArray, \ + bRightsRequired ? NV_ARRAY_ELEMENTS(cls##_RightsRequiredArray) : 0, \ +}, + +#define RS_LIST(...) NV_FALSE +#define RS_ROOT_OBJECT NV_FALSE +#define RS_ANY_PARENT NV_TRUE +#define RS_ACCESS_NONE NV_FALSE +#define RS_ACCESS_LIST(...) NV_TRUE +static RS_RESOURCE_DESC +g_RsResourceDescList[] = +{ +#include "resource_list.h" +}; +#undef RS_LIST +#undef RS_ROOT_OBJECT +#undef RS_ANY_PARENT +#undef RS_ACCESS_NONE +#undef RS_ACCESS_LIST +#undef RS_REQUIRED +#undef RS_OPTIONAL +#undef RS_NONE + +#define NUM_ENTRIES_DESC_LIST NV_ARRAY_ELEMENTS32(g_RsResourceDescList) + +void RsResInfoInitialize(void) +{ + // + // Keep the array sorted by externalClassId, so we can binary search it + // Simple bubble-sort is fine here as the number of elements is below 300, + // and we only call this once on boot anyway. + // + NvU32 i, j; + for (i = 0; i < NUM_ENTRIES_DESC_LIST - 1; i++) + { + for (j = i + 1; j < NUM_ENTRIES_DESC_LIST; j++) + { + RS_RESOURCE_DESC *a = &g_RsResourceDescList[i]; + RS_RESOURCE_DESC *b = &g_RsResourceDescList[j]; + + if (a->externalClassId > b->externalClassId) + { + RS_RESOURCE_DESC tmp; + portMemCopy(&tmp, sizeof(tmp), a, sizeof(*a)); + portMemCopy(a, sizeof(*a), b, sizeof(*b)); + portMemCopy(b, sizeof(*b), &tmp, sizeof(tmp)); + } + } + } +} + +RS_RESOURCE_DESC * +RsResInfoByExternalClassId +( + NvU32 externalClassId +) +{ + NvU32 low = 0; + NvU32 high = NUM_ENTRIES_DESC_LIST; + + // Binary search the array; If not found, the break in the middle will be hit + while (1) + { + NvU32 mid = (low + high) / 2; + + if (g_RsResourceDescList[mid].externalClassId == externalClassId) + return &g_RsResourceDescList[mid]; + + if (high == mid || low == mid) + break; + + if (g_RsResourceDescList[mid].externalClassId > externalClassId) + high = mid; + else + low = mid; + } + + return NULL; +} + +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *pResDesc) +{ + return pResDesc ? pResDesc->internalClassId : 0; +} + +void RsResInfoGetResourceList(const RS_RESOURCE_DESC **ppResourceList, NvU32 *numResources) +{ + *ppResourceList = g_RsResourceDescList; + *numResources = NV_ARRAY_ELEMENTS(g_RsResourceDescList); +} + +NV_STATUS +rmapiGetClassAllocParamSize +( + NvU32 *pAllocParamSizeBytes, + NvP64 pUserParams, + NvBool *pBAllowNull, + NvU32 hClass +) +{ + RS_RESOURCE_DESC *pResDesc; + + *pAllocParamSizeBytes = 0; + *pBAllowNull = NV_FALSE; + + pResDesc = RsResInfoByExternalClassId(hClass); + + if (!pResDesc) + return NV_ERR_INVALID_CLASS; + + if (pResDesc->bParamRequired) + { + // params are required + *pAllocParamSizeBytes = pResDesc->allocParamSize; + } + else if (pResDesc->allocParamSize) + { + // params are *optional* + *pBAllowNull = NV_TRUE; + if (pUserParams != (NvP64) 0) + *pAllocParamSizeBytes = pResDesc->allocParamSize; + } + else + { + // no params + *pBAllowNull = NV_TRUE; + } + + return NV_OK; +} diff --git a/src/nvidia/src/kernel/rmapi/resource_desc.h b/src/nvidia/src/kernel/rmapi/resource_desc.h new file mode 100644 index 000000000..4750f50ad --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/resource_desc.h @@ -0,0 +1,88 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _RESOURCE_DESC_H_ +#define _RESOURCE_DESC_H_ + +#include "nvtypes.h" +#include "nvoc/runtime.h" +#include "resserv/rs_access_rights.h" + +// Flags for RS_ENTRY +#define RS_FLAGS_NONE 0 + +#define RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC NVBIT(0) ///< GPUs Lock is acquired on allocation +#define RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE NVBIT(1) ///< GPUs Lock is acquired for free +#define RS_FLAGS_ACQUIRE_GPUS_LOCK (RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC | RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE) + +#define RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC NVBIT(2) ///< GPU Group Lock is acquired on allocation +#define RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE NVBIT(3) ///< GPU Group Lock is acquired for free +#define RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK (RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC | RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_FREE) + +#define RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST NVBIT(4) ///< Issue RPC to host to allocate resource for virtual GPUs + +#define RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC NVBIT(5) ///< Acquire the RO API lock for allocation, default is RW API lock + +#define RS_FLAGS_ALLOC_RPC_TO_PHYS_RM NVBIT(6) ///< Issue RPC to allocate resource in physical RM + +#define RS_FLAGS_ALLOC_RPC_TO_ALL (RS_FLAGS_ALLOC_RPC_TO_VGPU_HOST | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM) + +#define RS_FLAGS_INTERNAL_ONLY NVBIT(7) ///< Class cannot be allocated outside of RM + +#define RS_FLAGS_CHANNEL_DESCENDANT_COMMON (RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL) + +#define RS_FREE_PRIORITY_DEFAULT 0 +#define RS_FREE_PRIORITY_HIGH 1 ///< Resources with this priority will be freed ahead of others + +/** + * Information about a RsResource subclass. + */ +typedef struct RS_RESOURCE_DESC +{ + NvU32 externalClassId; ///< Id of the class as seen by the client + NvU32 internalClassId; ///< NVOC class ID, mirrored from pClassInfo->classId + const NVOC_CLASS_INFO *pClassInfo; ///< RTTI information for internal class + NvU32 allocParamSize; ///< Size of allocation param structure + NvBool bParamRequired; ///< If not required, param size can be 0 or allocParamSize + NvBool bMultiInstance; ///< Multiple instances of this object under a parent + NvBool bAnyParent; ///< Resource can be allocated under any parent + NvU32 *pParentList; ///< NULL terminated list of internalClassId of parents + NvU32 freePriority; ///< RS_FREE_PRIORITY_* + NvU32 flags; ///< Flags + const RsAccessRight *pRightsRequiredArray; ///< Access rights required to allocate this resource + NvLength rightsRequiredLength; ///< Length of pRightsRequiredArray +} RS_RESOURCE_DESC; + +/** Initialize the global resource info table */ +void RsResInfoInitialize(void); + +/** + * Look up RS_RESOURCE_DESC using the externalClassId. The id of the class as + * seen by clients. + */ +RS_RESOURCE_DESC *RsResInfoByExternalClassId(NvU32 externalClassId); +NvU32 RsResInfoGetInternalClassId(const RS_RESOURCE_DESC *); + +/** Get the global resource info table */ +void RsResInfoGetResourceList(const RS_RESOURCE_DESC **ppResourceList, NvU32 *numResources); + +#endif // _RESOURCE_DESC_H_ diff --git a/src/nvidia/src/kernel/rmapi/resource_list.h b/src/nvidia/src/kernel/rmapi/resource_list.h new file mode 100644 index 000000000..34b28fed0 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/resource_list.h @@ -0,0 +1,1156 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +// +// No include guards - this file is included multiple times, each time with a +// different definition for RS_ENTRY +// + +// +// Table describing all RsResource subclasses. +// +// Internal Class - there is a RM internal class representing all classes +// exported to RM clients. The internal name of the class should be similar to +// the symbolic name used by clients. If there is ambiguity between RM internal +// classes, e.g.: between the PMU engine (OBJPMU) and the exported class, it's +// recommended to use Api as the suffix to disambiguate; for example, OBJPMU +// (the engine) vs PmuApi (the per-client api object). It's also recommended to +// avoid using Object, Resource, etc as those terms don't improve clarity. +// If there is no ambiguity, there is no need to add the Api suffix; for example, +// Channel is preferred over ChannelApi (there is no other Channel object in +// RM). +// +// Multi-Instance - NV_TRUE if there can be multiple instances of this object's +// *internal* class id under a parent. +// +// This list should eventually replace the similar lists in nvapi.c and +// rmctrl.c. The number of fields in the table should be kept minimal, just +// enough to create the object, with as much of the detail being specified +// within the class itself. +// +// In the future we should consider switching to a registration approach or +// generating with NVOC and/or annotating the class definition. +// +// RS-TODO: Rename classes that have 'Object' in their names +// + + + +RS_ENTRY( + /* External Class */ NV01_ROOT, + /* Internal Class */ RmClientResource, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_ROOT_OBJECT, + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_ROOT_NON_PRIV, + /* Internal Class */ RmClientResource, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_ROOT_OBJECT, + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_ROOT_CLIENT, + /* Internal Class */ RmClientResource, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_ROOT_OBJECT, + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ MPS_COMPUTE, + /* Internal Class */ MpsApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_NONE, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ FABRIC_MANAGER_SESSION, + /* Internal Class */ FmSessionApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_REQUIRED(NV000F_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV0020_GPU_MANAGEMENT, + /* Internal Class */ GpuManagementApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_OPTIONAL(NvHandle), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV_EVENT_BUFFER, + /* Internal Class */ EventBuffer, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_REQUIRED(NV_EVENT_BUFFER_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV50_P2P, + /* Internal Class */ P2PApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_REQUIRED(NV503B_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV0060_SYNC_GPU_BOOST, + /* Internal Class */ SyncGpuBoost, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_REQUIRED(NV0060_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_NONE, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_DEVICE_0, + /* Internal Class */ Device, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_OPTIONAL(NV0080_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ GT200_DEBUGGER, + /* Internal Class */ KernelSMDebuggerSession, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV83DE_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ GF100_PROFILER, + /* Internal Class */ Profiler, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice), classId(KernelChannel), classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ MAXWELL_PROFILER_DEVICE, + /* Internal Class */ ProfilerDev, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_REQUIRED(NVB2CC_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ G84_PERFBUFFER, + /* Internal Class */ PerfBuffer, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ GF100_HDACODEC, + /* Internal Class */ Hdacodec, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Channels can have a CHANNEL_GROUP, a DEVICE, or a CONTEXT_SHARE (starting in Volta) as parents */ + /* RS-TODO: Update channel parent list when CONTEXT_SHARE is added */ +RS_ENTRY( + /* External Class */ GF100_CHANNEL_GPFIFO, + /* Internal Class */ KernelChannel, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_REQUIRED(NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_HIGH, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ KEPLER_CHANNEL_GPFIFO_A, + /* Internal Class */ KernelChannel, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_REQUIRED(NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_HIGH, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ KEPLER_CHANNEL_GPFIFO_B, + /* Internal Class */ KernelChannel, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_REQUIRED(NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_HIGH, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ MAXWELL_CHANNEL_GPFIFO_A, + /* Internal Class */ KernelChannel, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_REQUIRED(NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_HIGH, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ PASCAL_CHANNEL_GPFIFO_A, + /* Internal Class */ KernelChannel, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_REQUIRED(NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_HIGH, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ VOLTA_CHANNEL_GPFIFO_A, + /* Internal Class */ KernelChannel, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_REQUIRED(NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_HIGH, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ TURING_CHANNEL_GPFIFO_A, + /* Internal Class */ KernelChannel, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_REQUIRED(NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_HIGH, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_CHANNEL_GPFIFO_A, + /* Internal Class */ KernelChannel, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_REQUIRED(NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_HIGH, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ UVM_CHANNEL_RETAINER, + /* Internal Class */ UvmChannelRetainer, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_REQUIRED(NV_UVM_CHANNEL_RETAINER_ALLOC_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_HIGH, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ FERMI_CONTEXT_SHARE_A, + /* Internal Class */ KernelCtxShareApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannelGroupApi)), + /* Alloc Param Info */ RS_REQUIRED(NV_CTXSHARE_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ KERNEL_GRAPHICS_CONTEXT, + /* Internal Class */ KernelGraphicsContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannelGroupApi), classId(KernelChannel)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_INTERNAL_ONLY, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV20_SUBDEVICE_0, + /* Internal Class */ Subdevice, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_OPTIONAL(NV2080_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV2081_BINAPI, + /* Internal Class */ BinaryApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_OPTIONAL(NV2081_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV2082_BINAPI_PRIVILEGED, + /* Internal Class */ BinaryApiPrivileged, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_OPTIONAL(NV2082_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ KEPLER_CHANNEL_GROUP_A, + /* Internal Class */ KernelChannelGroupApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_CHANNEL_GROUP_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_HIGH, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_LOCAL_PRIVILEGED, + /* Internal Class */ RegisterMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_LOCAL_USER, + /* Internal Class */ VideoMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(Subdevice)), + /* Alloc Param Info */ RS_REQUIRED(NV_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_LOCAL_PHYSICAL, + /* Internal Class */ PhysicalMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_PHYSICAL_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV50_MEMORY_VIRTUAL, + /* Internal Class */ VirtualMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_SYSTEM, + /* Internal Class */ SystemMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_VIRTUAL, + /* Internal Class */ VirtualMemoryRange, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_MEMORY_VIRTUAL_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_SYSTEM_OS_DESCRIPTOR, + /* Internal Class */ OsDescMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_OS_DESC_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_DEVICELESS, + /* Internal Class */ NoDeviceMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_REQUIRED(NV_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_NONE, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_FRAMEBUFFER_CONSOLE, + /* Internal Class */ ConsoleMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_HW_RESOURCES, + /* Internal Class */ MemoryHwResources, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(Subdevice)), + /* Alloc Param Info */ RS_REQUIRED(NV_MEMORY_HW_RESOURCES_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_MEMORY_FLA, + /* Internal Class */ FlaMemory, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_REQUIRED(NV_FLA_MEMORY_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV_MEMORY_FABRIC, + /* Internal Class */ MemoryFabric, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_REQUIRED(NV00F8_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ FERMI_VASPACE_A, + /* Internal Class */ VaSpaceApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(Subdevice)), + /* Alloc Param Info */ RS_REQUIRED(NV_VASPACE_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Subdevice Children: */ +RS_ENTRY( + /* External Class */ NV50_THIRD_PARTY_P2P, + /* Internal Class */ ThirdPartyP2P, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_OPTIONAL(NV503C_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ GF100_SUBDEVICE_MASTER, + /* Internal Class */ GenericEngineApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_TIMER, + /* Internal Class */ TimerApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV40_I2C, + /* Internal Class */ I2cApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_PHYS_RM, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV20_SUBDEVICE_DIAG, + /* Internal Class */ DiagApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ GF100_ZBC_CLEAR, + /* Internal Class */ ZbcApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV40_DEBUG_BUFFER, + /* Internal Class */ DebugBufferApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_REQUIRED(NV00DB_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ VOLTA_USERMODE_A, + /* Internal Class */ UserModeApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ TURING_USERMODE_A, + /* Internal Class */ UserModeApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_USERMODE_A, + /* Internal Class */ UserModeApi, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC371_DISP_SF_USER, + /* Internal Class */ DispSfUser, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC671_DISP_SF_USER, + /* Internal Class */ DispSfUser, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ MMU_FAULT_BUFFER, + /* Internal Class */ MmuFaultBuffer, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ ACCESS_COUNTER_NOTIFY_BUFFER, + /* Internal Class */ AccessCounterBuffer, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_SMC_PARTITION_REF, + /* Internal Class */ GPUInstanceSubscription, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Subdevice)), + /* Alloc Param Info */ RS_REQUIRED(NVC637_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_SMC_EXEC_PARTITION_REF, + /* Internal Class */ ComputeInstanceSubscription, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(GPUInstanceSubscription)), + /* Alloc Param Info */ RS_REQUIRED(NVC638_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_SMC_CONFIG_SESSION, + /* Internal Class */ MIGConfigSession, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_REQUIRED(NVC639_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_NONE, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_SMC_MONITOR_SESSION, + /* Internal Class */ MIGMonitorSession, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(RmClientResource)), + /* Alloc Param Info */ RS_REQUIRED(NVC640_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_NONE, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Display classes: */ +RS_ENTRY( + /* External Class */ NVC570_DISPLAY, + /* Internal Class */ NvDispApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC670_DISPLAY, + /* Internal Class */ NvDispApi, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC372_DISPLAY_SW, + /* Internal Class */ DispSwObj, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV04_DISPLAY_COMMON, + /* Internal Class */ DispCommon, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(Device)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_FREE | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV9010_VBLANK_CALLBACK, + /* Internal Class */ VblankCallback, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(Device), classId(Subdevice)), + /* Alloc Param Info */ RS_REQUIRED(NV_VBLANK_CALLBACK_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV0092_RG_LINE_CALLBACK, + /* Internal Class */ RgLineCallback, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(DispCommon)), + /* Alloc Param Info */ RS_REQUIRED(NV0092_RG_LINE_CALLBACK_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC57A_CURSOR_IMM_CHANNEL_PIO, + /* Internal Class */ DispChannelPio, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67A_CURSOR_IMM_CHANNEL_PIO, + /* Internal Class */ DispChannelPio, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC57B_WINDOW_IMM_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC57D_CORE_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC57E_WINDOW_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC573_DISP_CAPABILITIES, + /* Internal Class */ DispCapabilities, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67B_WINDOW_IMM_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67D_CORE_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC67E_WINDOW_CHANNEL_DMA, + /* Internal Class */ DispChannelDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_REQUIRED(NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK | RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC673_DISP_CAPABILITIES, + /* Internal Class */ DispCapabilities, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(NvDispApi)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) + /* Classes allocated under channel: */ +RS_ENTRY( + /* External Class */ GF100_DISP_SW, + /* Internal Class */ DispSwObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_REQUIRED(NV9072_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON| RS_FLAGS_ALLOC_RPC_TO_ALL, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ GF100_TIMED_SEMAPHORE_SW, + /* Internal Class */ TimedSemaSwObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV50_DEFERRED_API_CLASS, + /* Internal Class */ DeferredApiObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV5080_ALLOC_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ GP100_UVM_SW, + /* Internal Class */ UvmSwObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV04_SOFTWARE_TEST, + /* Internal Class */ SoftwareMethodTest, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_NONE, + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ MAXWELL_DMA_COPY_A, + /* Internal Class */ KernelCeContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NVB0B5_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ PASCAL_DMA_COPY_A, + /* Internal Class */ KernelCeContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NVB0B5_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ TURING_DMA_COPY_A, + /* Internal Class */ KernelCeContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NVB0B5_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_DMA_COPY_A, + /* Internal Class */ KernelCeContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NVB0B5_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_DMA_COPY_B, + /* Internal Class */ KernelCeContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NVB0B5_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC4B0_VIDEO_DECODER, + /* Internal Class */ NvdecContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_BSP_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC6B0_VIDEO_DECODER, + /* Internal Class */ NvdecContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_BSP_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC7B0_VIDEO_DECODER, + /* Internal Class */ NvdecContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_BSP_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC4D1_VIDEO_NVJPG, + /* Internal Class */ NvjpgContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_NVJPG_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC6FA_VIDEO_OFA, + /* Internal Class */ OfaContext, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_OFA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC7FA_VIDEO_OFA, + /* Internal Class */ OfaContext, + /* Multi-Instance */ NV_FALSE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_OFA_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC4B7_VIDEO_ENCODER, + /* Internal Class */ MsencContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_MSENC_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVB4B7_VIDEO_ENCODER, + /* Internal Class */ MsencContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_MSENC_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NVC7B7_VIDEO_ENCODER, + /* Internal Class */ MsencContext, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_MSENC_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_A, + /* Internal Class */ KernelGraphicsObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_GR_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_COMPUTE_A, + /* Internal Class */ KernelGraphicsObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_GR_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_B, + /* Internal Class */ KernelGraphicsObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_GR_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ AMPERE_COMPUTE_B, + /* Internal Class */ KernelGraphicsObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_GR_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ FERMI_TWOD_A, + /* Internal Class */ KernelGraphicsObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_GR_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ KEPLER_INLINE_TO_MEMORY_B, + /* Internal Class */ KernelGraphicsObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_GR_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ TURING_A, + /* Internal Class */ KernelGraphicsObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_GR_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ TURING_COMPUTE_A, + /* Internal Class */ KernelGraphicsObject, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_LIST(classId(KernelChannel)), + /* Alloc Param Info */ RS_OPTIONAL(NV_GR_ALLOCATION_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_CHANNEL_DESCENDANT_COMMON | RS_FLAGS_ACQUIRE_RO_API_LOCK_ON_ALLOC, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_CONTEXT_DMA, + /* Internal Class */ ContextDma, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV_CONTEXT_DMA_ALLOCATION_PARAMS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT_OS_EVENT, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT_KERNEL_CALLBACK, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) +RS_ENTRY( + /* External Class */ NV01_EVENT_KERNEL_CALLBACK_EX, + /* Internal Class */ Event, + /* Multi-Instance */ NV_TRUE, + /* Parents */ RS_ANY_PARENT, + /* Alloc Param Info */ RS_REQUIRED(NV0005_ALLOC_PARAMETERS), + /* Resource Free Priority */ RS_FREE_PRIORITY_DEFAULT, + /* Flags */ RS_FLAGS_ACQUIRE_GPUS_LOCK, + /* Required Access Rights */ RS_ACCESS_NONE +) + +// Undefine the entry macro to simplify call sites +#undef RS_ENTRY diff --git a/src/nvidia/src/kernel/rmapi/rmapi.c b/src/nvidia/src/kernel/rmapi/rmapi.c new file mode 100644 index 000000000..8827d5784 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi.c @@ -0,0 +1,693 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvrm_registry.h" +#include "rmapi/rmapi.h" +#include "entry_points.h" +#include "resserv/rs_server.h" +#include "rmapi/rs_utils.h" +#include "gpu/gpu_resource.h" +#include "gpu/device/device.h" +#include "core/locks.h" +#include "gpu/gpu.h" +#include "diagnostics/tracer.h" +#include "tls/tls.h" +#include "core/thread_state.h" +#include "gpu_mgr/gpu_mgr.h" +#include "resource_desc.h" + +typedef struct +{ + PORT_RWLOCK * pLock; + NvU64 threadId; + NvU64 timestamp; + LOCK_TRACE_INFO traceInfo; + NvU64 tlsEntryId; + +} RMAPI_LOCK; + +RsServer g_resServ; +static RM_API g_RmApiList[RMAPI_TYPE_MAX]; +static NvBool g_bResServInit = NV_FALSE; +static RMAPI_LOCK g_RmApiLock; + +static void _rmapiInitInterface(RM_API *pRmApi, API_SECURITY_INFO *pDefaultSecurityInfo, NvBool bTlsInternal, + NvBool bApiLockInternal, NvBool bGpuLockInternal); +static NV_STATUS _rmapiLockAlloc(void); +static void _rmapiLockFree(void); + +// from rmapi_stubs.c +void rmapiInitStubInterface(RM_API *pRmApi); + +NV_STATUS +rmapiInitialize +( + void +) +{ + NV_STATUS status = NV_OK; + API_SECURITY_INFO secInfo = {0}; + + NV_ASSERT(!g_bResServInit); + + status = _rmapiLockAlloc(); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot allocate rmapi locks\n"); + return status; + } + + RsResInfoInitialize(); + status = serverConstruct(&g_resServ, RS_PRIV_LEVEL_HOST, 0); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "*** Cannot initialize resource server\n"); + _rmapiLockFree(); + return status; + } + + rmapiControlCacheInit(); + + listInit(&g_clientListBehindGpusLock, g_resServ.pAllocator); + listInit(&g_userInfoList, g_resServ.pAllocator); + + secInfo.privLevel = RS_PRIV_LEVEL_KERNEL; + secInfo.paramLocation = PARAM_LOCATION_KERNEL; + + _rmapiInitInterface(&g_RmApiList[RMAPI_EXTERNAL], NULL, NV_FALSE /* bTlsInternal */, NV_FALSE /* bApiLockInternal */, NV_FALSE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_EXTERNAL_KERNEL], &secInfo, NV_FALSE /* bTlsInternal */, NV_FALSE /* bApiLockInternal */, NV_FALSE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_MODS_LOCK_BYPASS], &secInfo, NV_FALSE /* bTlsInternal */, NV_TRUE /* bApiLockInternal */, NV_TRUE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_API_LOCK_INTERNAL], &secInfo, NV_TRUE /* bTlsInternal */, NV_TRUE /* bApiLockInternal */, NV_FALSE /* bGpuLockInternal */); + _rmapiInitInterface(&g_RmApiList[RMAPI_GPU_LOCK_INTERNAL], &secInfo, NV_TRUE /* bTlsInternal */, NV_TRUE /* bApiLockInternal */, NV_TRUE /* bGpuLockInternal */); + + rmapiInitStubInterface(&g_RmApiList[RMAPI_STUBS]); + + g_bResServInit = NV_TRUE; + + return status; +} + +void +rmapiShutdown +( + void +) +{ + if (!g_bResServInit) + return; + + serverFreeDomain(&g_resServ, 0); + serverDestruct(&g_resServ); + _rmapiLockFree(); + + rmapiControlCacheFree(); + + g_bResServInit = NV_FALSE; +} + +static void +_rmapiInitInterface +( + RM_API *pRmApi, + API_SECURITY_INFO *pDefaultSecInfo, + NvBool bTlsInternal, + NvBool bApiLockInternal, + NvBool bGpuLockInternal +) +{ + // + // Initialize to all stubs first, so any APIs not explicitly set here + // will return NV_ERR_NOT_SUPPORTED if called + // + rmapiInitStubInterface(pRmApi); + + // + // Init members + // + if (pDefaultSecInfo) + pRmApi->defaultSecInfo = *pDefaultSecInfo; + + pRmApi->bHasDefaultSecInfo = !!pDefaultSecInfo; + pRmApi->bTlsInternal = bTlsInternal; + pRmApi->bApiLockInternal = bApiLockInternal; + pRmApi->bRmSemaInternal = bApiLockInternal; + pRmApi->bGpuLockInternal = bGpuLockInternal; + pRmApi->pPrivateContext = NULL; + + // + // Init function pointers + // + pRmApi->Alloc = rmapiAlloc; + pRmApi->AllocWithHandle = rmapiAllocWithHandle; + pRmApi->AllocWithSecInfo = pRmApi->bTlsInternal ? rmapiAllocWithSecInfo : rmapiAllocWithSecInfoTls; + + pRmApi->FreeClientList = rmapiFreeClientList; + pRmApi->FreeClientListWithSecInfo = pRmApi->bTlsInternal ? rmapiFreeClientListWithSecInfo : rmapiFreeClientListWithSecInfoTls; + + pRmApi->Free = rmapiFree; + pRmApi->FreeWithSecInfo = pRmApi->bTlsInternal ? rmapiFreeWithSecInfo : rmapiFreeWithSecInfoTls; + + pRmApi->Control = rmapiControl; + pRmApi->ControlWithSecInfo = pRmApi->bTlsInternal ? rmapiControlWithSecInfo : rmapiControlWithSecInfoTls; + + pRmApi->DupObject = rmapiDupObject; + pRmApi->DupObjectWithSecInfo = pRmApi->bTlsInternal ? rmapiDupObjectWithSecInfo : rmapiDupObjectWithSecInfoTls; + + pRmApi->Share = rmapiShare; + pRmApi->ShareWithSecInfo = pRmApi->bTlsInternal ? rmapiShareWithSecInfo : rmapiShareWithSecInfoTls; + + pRmApi->MapToCpu = rmapiMapToCpu; + pRmApi->MapToCpuWithSecInfo = pRmApi->bTlsInternal ? rmapiMapToCpuWithSecInfo : rmapiMapToCpuWithSecInfoTls; + + pRmApi->UnmapFromCpu = rmapiUnmapFromCpu; + pRmApi->UnmapFromCpuWithSecInfo = pRmApi->bTlsInternal ? rmapiUnmapFromCpuWithSecInfo : rmapiUnmapFromCpuWithSecInfoTls; + + pRmApi->Map = rmapiMap; + pRmApi->MapWithSecInfo = pRmApi->bTlsInternal ? rmapiMapWithSecInfo : rmapiMapWithSecInfoTls; + + pRmApi->Unmap = rmapiUnmap; + pRmApi->UnmapWithSecInfo = pRmApi->bTlsInternal ? rmapiUnmapWithSecInfo : rmapiUnmapWithSecInfoTls; +} + +RM_API * +rmapiGetInterface +( + RMAPI_TYPE rmapiType +) +{ + return &g_RmApiList[rmapiType]; +} + +NV_STATUS +rmapiPrologue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +) +{ + NV_STATUS status = NV_OK; + return status; +} + +void +rmapiEpilogue +( + RM_API *pRmApi, + RM_API_CONTEXT *pContext +) +{ +} + +void +rmapiInitLockInfo +( + RM_API *pRmApi, + NvHandle hClient, + RS_LOCK_INFO *pLockInfo +) +{ + NV_ASSERT_OR_RETURN_VOID(pLockInfo != NULL); + pLockInfo->flags = 0; + pLockInfo->state = 0; + + if (hClient != 0) + { + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + if ((pCallContext != NULL) && (pCallContext->pLockInfo != NULL)) + { + pLockInfo->state = pCallContext->pLockInfo->state; + + if ((pCallContext->pLockInfo->pClient != NULL) && + (pCallContext->pLockInfo->pClient->hClient == hClient)) + { + pLockInfo->pClient = pCallContext->pLockInfo->pClient; + } + else + { + pLockInfo->state &= ~RM_LOCK_STATES_CLIENT_LOCK_ACQUIRED; + } + } + } + + if (!pRmApi->bRmSemaInternal) + pLockInfo->flags |= RM_LOCK_FLAGS_RM_SEMA; + + if (pRmApi->bApiLockInternal) + { + pLockInfo->state |= RM_LOCK_STATES_API_LOCK_ACQUIRED; + + // RS-TODO: Assert that API rwlock is taken if no client is locked + if (pLockInfo->pClient == NULL) + pLockInfo->flags |= RM_LOCK_FLAGS_NO_CLIENT_LOCK; + } + + if (pRmApi->bGpuLockInternal) + pLockInfo->state |= RM_LOCK_STATES_ALLOW_RECURSIVE_LOCKS; +} + +static NV_STATUS +_rmapiLockAlloc(void) +{ + // Turn on by default for Linux to get some soak time + // bug 2539044, bug 2536036: Enable by default. + g_resServ.bUnlockedParamCopy = NV_TRUE; + + NvU32 val = 0; + if ((osReadRegistryDword(NULL, + NV_REG_STR_RM_PARAM_COPY_NO_LOCK, + &val) == NV_OK)) + { + g_resServ.bUnlockedParamCopy = (val != 0); + } + + portMemSet(&g_RmApiLock, 0, sizeof(g_RmApiLock)); + g_RmApiLock.threadId = ~((NvU64)(0)); + g_RmApiLock.pLock = portSyncRwLockCreate(portMemAllocatorGetGlobalNonPaged()); + if (g_RmApiLock.pLock == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + g_RmApiLock.tlsEntryId = tlsEntryAlloc(); + + return NV_OK; +} + +static void +_rmapiLockFree(void) +{ + portSyncRwLockDestroy(g_RmApiLock.pLock); +} + +NV_STATUS +rmapiLockAcquire(NvU32 flags, NvU32 module) +{ + NV_STATUS rmStatus = NV_OK; + NvU64 threadId = portThreadGetCurrentThreadId(); + + NvU64 myPriority = 0; + + LOCK_ASSERT_AND_RETURN(!rmapiLockIsOwner()); + + // + // If a read-only lock was requested, check to see if the module is allowed + // to take read-only locks + // + if ((flags & RMAPI_LOCK_FLAGS_READ) && (module != RM_LOCK_MODULES_NONE)) + { + OBJSYS *pSys = SYS_GET_INSTANCE(); + if ((pSys->apiLockModuleMask & RM_LOCK_MODULE_GRP(module)) == 0) + { + flags &= ~RMAPI_LOCK_FLAGS_READ; + } + } + + // + // For conditional acquires and DISPATCH_LEVEL we want to exit + // immediately without waiting. + // + // If RM Locking V3 Lite is not enabled, *always* acquire the API + // lock in WRITE mode to ensure compatibility with Locking model V2 + // behavior (providing exclusive access to the resource). + // + flags = osApiLockAcquireConfigureFlags(flags); + if (flags & API_LOCK_FLAGS_COND_ACQUIRE) + { + if ((flags & RMAPI_LOCK_FLAGS_READ)) + { + if (!portSyncRwLockAcquireReadConditional(g_RmApiLock.pLock)) + rmStatus = NV_ERR_TIMEOUT_RETRY; + } + else + { + if (portSyncRwLockAcquireWriteConditional(g_RmApiLock.pLock)) + { + g_RmApiLock.threadId = threadId; + } + else + { + rmStatus = NV_ERR_TIMEOUT_RETRY; + } + } + } + else + { + if ((flags & RMAPI_LOCK_FLAGS_READ)) + { + portSyncRwLockAcquireRead(g_RmApiLock.pLock); + } + else + { + + portSyncRwLockAcquireWrite(g_RmApiLock.pLock); + g_RmApiLock.threadId = threadId; + } + } + + + if (rmStatus == NV_OK) + { + NvU64 timestamp; + osGetCurrentTick(×tamp); + + if (g_RmApiLock.threadId == threadId) + g_RmApiLock.timestamp = timestamp; + + // save off owning thread + RMTRACE_RMLOCK(_API_LOCK_ACQUIRE); + + // add api lock trace record + INSERT_LOCK_TRACE(&g_RmApiLock.traceInfo, + NV_RETURN_ADDRESS(), + lockTraceAcquire, + flags, module, + threadId, + !portSyncExSafeToSleep(), + myPriority, + timestamp); + + // + // If enabled, reset the timeout now that we are running and off + // the Sleep Queue. + // + if (threadStateGetSetupFlags() & + THREAD_STATE_SETUP_FLAGS_DO_NOT_INCLUDE_SLEEP_TIME_ENABLED) + { + threadStateResetTimeout(NULL); + } + } + + NvP64 *pAcquireAddress = tlsEntryAcquire(g_RmApiLock.tlsEntryId); + if (pAcquireAddress != NULL) + { + *pAcquireAddress = (NvP64)(NvUPtr)NV_RETURN_ADDRESS(); + } + + return rmStatus; +} + +void +rmapiLockRelease(void) +{ + NvU64 threadId = portThreadGetCurrentThreadId(); + NvU64 timestamp; + + osGetCurrentTick(×tamp); + + RMTRACE_RMLOCK(_API_LOCK_RELEASE); + + // add api lock trace record + INSERT_LOCK_TRACE(&g_RmApiLock.traceInfo, + NV_RETURN_ADDRESS(), + lockTraceRelease, + 0, 0, + threadId, + !portSyncExSafeToSleep(), + 0, + timestamp); + + if (g_RmApiLock.threadId == threadId) + { + // + // If the threadId in the global is same as current thread id, then + // we know that it was acquired in WRITE mode. + // + g_RmApiLock.threadId = ~0ull; + g_RmApiLock.timestamp = timestamp; + portSyncRwLockReleaseWrite(g_RmApiLock.pLock); + + } + else + { + portSyncRwLockReleaseRead(g_RmApiLock.pLock); + } + + tlsEntryRelease(g_RmApiLock.tlsEntryId); +} + +NvBool +rmapiLockIsOwner(void) +{ + return tlsEntryGet(g_RmApiLock.tlsEntryId) != NvP64_NULL; +} + +// +// Mark for deletion the client resources from the data base, given a GPU mask +// +void +rmapiSetDelPendingClientResourcesFromGpuMask +( + NvU32 gpuMask +) +{ + RS_ITERATOR it; + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + Device *pDevice; + NvBool bDevicesInMask = NV_FALSE; + OBJGPU *pGpu; + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + + // Check that one of the devices is in the GPU mask + bDevicesInMask = NV_FALSE; + while (clientRefIterNext(it.pClient, &it)) + { + pDevice = dynamicCast(it.pResourceRef->pResource, Device); + + if (!pDevice) + { + continue; + } + + pGpu = GPU_RES_GET_GPU(pDevice); + if ((gpuMask & NVBIT(gpuGetInstance(pGpu))) != 0) + { + bDevicesInMask = NV_TRUE; + break; + } + } + + if (bDevicesInMask == NV_FALSE) + { + continue; + } + + pClient->Flags |= RMAPI_CLIENT_FLAG_DELETE_PENDING; + } +} + +void +rmapiDelPendingDevices +( + NvU32 gpuMask +) +{ + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + ppClient = serverutilGetFirstClientUnderLock(); + while (ppClient) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if (((pClient->Flags & RMAPI_CLIENT_FLAG_DELETE_PENDING) != 0) && + ((pClient->Flags & RMAPI_CLIENT_FLAG_RM_INTERNAL_CLIENT) == 0)) + { + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + while(clientRefIterNext(pRsClient, &it)) + { + RsResourceRef *pDeviceRef = it.pResourceRef; + Device *pDevice = dynamicCast(pDeviceRef->pResource, Device); + + if ((gpuMask & NVBIT(gpuGetInstance(GPU_RES_GET_GPU(pDevice)))) != 0) + { + pRmApi->Free(pRmApi, pRsClient->hClient, pDeviceRef->hResource); + + // Client's resource map has been modified, re-snap iterator + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + } + } + + } + + ppClient = serverutilGetNextClientUnderLock(ppClient); + } +} + +void +rmapiReportLeakedDevices +( + NvU32 gpuMask +) +{ + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + ppClient = serverutilGetFirstClientUnderLock(); + while (ppClient) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + while(clientRefIterNext(pRsClient, &it)) + { + RsResourceRef *pDeviceRef = it.pResourceRef; + Device *pDevice = dynamicCast(pDeviceRef->pResource, Device); + + if ((gpuMask & NVBIT(gpuGetInstance(GPU_RES_GET_GPU(pDevice)))) != 0) + { + NV_PRINTF(LEVEL_ERROR, + "Device object leak: (0x%x, 0x%x). Please file a bug against RM-core.\n", + pRsClient->hClient, pDeviceRef->hResource); + NV_ASSERT(0); + + // Delete leaked resource from database + pRmApi->Free(pRmApi, pRsClient->hClient, pDeviceRef->hResource); + + // Client's resource map has been modified, re-snap iterator + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + } + } + + ppClient = serverutilGetNextClientUnderLock(ppClient); + } +} + +// +// Delete the marked client resources +// +void +rmapiDelPendingClients +( + void +) +{ + RmClient **ppClient; + RmClient *pClient; + RsClient *pRsClient; + RS_ITERATOR it; + RM_API *pRmApi = rmapiGetInterface(RMAPI_GPU_LOCK_INTERNAL); + + ppClient = serverutilGetFirstClientUnderLock(); + while (ppClient) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + ppClient = serverutilGetNextClientUnderLock(ppClient); + if ((pClient->Flags & RMAPI_CLIENT_FLAG_DELETE_PENDING) != 0) + { + // Only free clients that have no devices left + it = clientRefIter(pRsClient, NULL, classId(Device), RS_ITERATE_CHILDREN, NV_TRUE); + if (!clientRefIterNext(pRsClient, &it)) + pRmApi->Free(pRmApi, pRsClient->hClient, pRsClient->hClient); + } + } +} + +NV_STATUS +rmapiGetClientHandlesFromOSInfo +( + void *pOSInfo, + NvHandle **ppClientHandleList, + NvU32 *pClientHandleListSize +) +{ + NvHandle *pClientHandleList; + NvU32 clientHandleListSize = 0; + NvU32 k; + + RmClient **ppClient; + RmClient **ppFirstClient; + RmClient *pClient; + RsClient *pRsClient; + + ppFirstClient = NULL; + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + if (pClient->pOSInfo != pOSInfo) + { + continue; + } + clientHandleListSize++; + + if (NULL == ppFirstClient) + ppFirstClient = ppClient; + } + + if (clientHandleListSize == 0) + { + *pClientHandleListSize = 0; + *ppClientHandleList = NULL; + return NV_ERR_INVALID_ARGUMENT; + } + + pClientHandleList = portMemAllocNonPaged(clientHandleListSize * sizeof(NvU32)); + if (pClientHandleList == NULL) + { + return NV_ERR_NO_MEMORY; + } + + *pClientHandleListSize = clientHandleListSize; + *ppClientHandleList = pClientHandleList; + + k = 0; + for (ppClient = ppFirstClient; + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + if (pClient->pOSInfo != pOSInfo) + { + continue; + } + pClientHandleList[k++] = pRsClient->hClient; + + if (clientHandleListSize <= k) + break; + } + + return NV_OK; +} + diff --git a/src/nvidia/src/kernel/rmapi/rmapi_cache.c b/src/nvidia/src/kernel/rmapi/rmapi_cache.c new file mode 100644 index 000000000..8ec5b04ef --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi_cache.c @@ -0,0 +1,254 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/map.h" +#include "containers/multimap.h" +#include "nvctassert.h" +#include "nvport/sync.h" +#include "nvrm_registry.h" +#include "os/os.h" +#include "rmapi/control.h" +#include "rmapi/rmapi.h" + +typedef struct +{ + void* params; +} RmapiControlCacheEntry; + +MAKE_MULTIMAP(CachedCallParams, RmapiControlCacheEntry); + +ct_assert(sizeof(NvHandle) <= 4); + +#define CLIENT_KEY_SHIFT (sizeof(NvHandle) * 8) + +static NvHandle keyToClient(NvU64 key) +{ + return (key >> CLIENT_KEY_SHIFT); +} + +static NvU64 handlesToKey(NvHandle hClient, NvHandle hObject) +{ + return ((NvU64)hClient << CLIENT_KEY_SHIFT) | hObject; +} + +static struct { + /* NOTE: Size unbounded for now */ + CachedCallParams cachedCallParams; + NvU32 mode; + PORT_MUTEX *mtx; +} RmapiControlCache; + +NvBool rmapiControlIsCacheable(NvU32 flags, NvBool isGSPClient) +{ + if (RmapiControlCache.mode == NV_REG_STR_RM_CACHEABLE_CONTROLS_ENABLE) + { + return !!(flags & RMCTRL_FLAGS_CACHEABLE); + } + if (RmapiControlCache.mode == NV_REG_STR_RM_CACHEABLE_CONTROLS_GSP_ONLY) + { + return (flags & RMCTRL_FLAGS_CACHEABLE) && + (flags & RMCTRL_FLAGS_ROUTE_TO_PHYSICAL) && + isGSPClient; + } + return NV_FALSE; +} + +void rmapiControlCacheInit() +{ + RmapiControlCache.mode = NV_REG_STR_RM_CACHEABLE_CONTROLS_GSP_ONLY; + + osReadRegistryDword(NULL, NV_REG_STR_RM_CACHEABLE_CONTROLS, &RmapiControlCache.mode); + NV_PRINTF(LEVEL_INFO, "using cache mode %d\n", RmapiControlCache.mode); + + if (RmapiControlCache.mode) + { + multimapInit(&RmapiControlCache.cachedCallParams, portMemAllocatorGetGlobalNonPaged()); + RmapiControlCache.mtx = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (!RmapiControlCache.mtx) + { + NV_PRINTF(LEVEL_ERROR, "failed to create mutex"); + RmapiControlCache.mode = NV_REG_STR_RM_CACHEABLE_CONTROLS_DISABLE; + } + } +} + +void* rmapiControlCacheGet(NvHandle hClient, NvHandle hObject, NvU32 cmd) +{ + NV_PRINTF(LEVEL_INFO, "cache lookup for 0x%x 0x%x 0x%x\n", hClient, hObject, cmd); + portSyncMutexAcquire(RmapiControlCache.mtx); + RmapiControlCacheEntry* entry = multimapFindItem(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject), cmd); + portSyncMutexRelease(RmapiControlCache.mtx); + NV_PRINTF(LEVEL_INFO, "cache entry for 0x%x 0x%x 0x%x: entry 0x%p\n", hClient, hObject, cmd, entry); + if (entry) + return entry->params; + return NULL; +} + +NV_STATUS rmapiControlCacheSet +( + NvHandle hClient, + NvHandle hObject, + NvU32 cmd, + void* params, + NvU32 paramsSize +) +{ + portSyncMutexAcquire(RmapiControlCache.mtx); + NV_STATUS status = NV_OK; + RmapiControlCacheEntry* entry = multimapFindItem(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject), cmd); + CachedCallParamsSubmap* insertedSubmap = NULL; + + if (!entry) + { + if (!multimapFindSubmap(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject))) + { + insertedSubmap = multimapInsertSubmap(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject)); + if (!insertedSubmap) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + } + + entry = multimapInsertItemNew(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject), cmd); + } + + if (!entry) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + entry->params = portMemAllocNonPaged(paramsSize); + if (!entry->params) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + portMemCopy(entry->params, paramsSize, params, paramsSize); + +done: + if (status != NV_OK) + { + /* To avoid leaking memory, remove the newly inserted empty submap and entry */ + if (entry) + { + portMemFree(entry->params); + multimapRemoveItem(&RmapiControlCache.cachedCallParams, entry); + } + + if (insertedSubmap) + multimapRemoveSubmap(&RmapiControlCache.cachedCallParams, insertedSubmap); + } + + portSyncMutexRelease(RmapiControlCache.mtx); + + return status; +} + +static void freeSubmap(CachedCallParamsSubmap* submap) +{ + /* (Sub)map modification invalidates the iterator, so we have to restart */ + while (NV_TRUE) + { + CachedCallParamsIter it = multimapSubmapIterItems(&RmapiControlCache.cachedCallParams, submap); + + if (multimapItemIterNext(&it)) + { + RmapiControlCacheEntry* entry = it.pValue; + portMemFree(entry->params); + multimapRemoveItem(&RmapiControlCache.cachedCallParams, entry); + } + else + { + break; + } + } + multimapRemoveSubmap(&RmapiControlCache.cachedCallParams, submap); +} + +void rmapiControlCacheFreeClient(NvHandle hClient) +{ + if (!RmapiControlCache.mode) + return; + + portSyncMutexAcquire(RmapiControlCache.mtx); + while (NV_TRUE) + { + CachedCallParamsSubmap* start = multimapFindSubmapGEQ(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, 0)); + CachedCallParamsSubmap* end = multimapFindSubmapLEQ(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, NV_U32_MAX)); + + if (!start || !end || + keyToClient(multimapSubmapKey(&RmapiControlCache.cachedCallParams, start)) != hClient || + keyToClient(multimapSubmapKey(&RmapiControlCache.cachedCallParams, end)) != hClient) + { + break; + } + + CachedCallParamsSupermapIter it = multimapSubmapIterRange(&RmapiControlCache.cachedCallParams, start, end); + + if (multimapSubmapIterNext(&it)) + { + CachedCallParamsSubmap* submap = it.pValue; + freeSubmap(submap); + } + else + { + break; + } + } + portSyncMutexRelease(RmapiControlCache.mtx); +} + +void rmapiControlCacheFreeObject(NvHandle hClient, NvHandle hObject) +{ + CachedCallParamsSubmap* submap; + + if (!RmapiControlCache.mode) + return; + + portSyncMutexAcquire(RmapiControlCache.mtx); + + submap = multimapFindSubmap(&RmapiControlCache.cachedCallParams, handlesToKey(hClient, hObject)); + if (submap) + freeSubmap(submap); + + portSyncMutexRelease(RmapiControlCache.mtx); +} + +void rmapiControlCacheFree(void) { + CachedCallParamsIter it; + + if (!RmapiControlCache.mode) + return; + + it = multimapItemIterAll(&RmapiControlCache.cachedCallParams); + while (multimapItemIterNext(&it)) + { + RmapiControlCacheEntry* entry = it.pValue; + portMemFree(entry->params); + } + + multimapDestroy(&RmapiControlCache.cachedCallParams); + portSyncMutexDestroy(RmapiControlCache.mtx); +} diff --git a/src/nvidia/src/kernel/rmapi/rmapi_stubs.c b/src/nvidia/src/kernel/rmapi/rmapi_stubs.c new file mode 100644 index 000000000..dabb2cba4 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi_stubs.c @@ -0,0 +1,183 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi.h" + + +static NV_STATUS _rmapiAlloc_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, void *pAllocParams) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiAllocWithHandle_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle hObject, NvU32 hClass, void *pAllocParams) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiAllocWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvU32 hClass, NvP64 pAllocParams, + NvU32 flags, NvP64 pRightsRequested, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiFree_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiFreeWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + NvU32 flags, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiFreeClientList_STUB(RM_API *pRmApi, NvHandle *phClientList, NvU32 numClients) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiFreeClientListWithSecInfo_STUB(RM_API *pRmApi, NvHandle *phClientList, + NvU32 numClients, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiControl_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + void *pParams, NvU32 paramsSize) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiControlWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, NvU32 cmd, + NvP64 pParams, NvU32 paramsSize, NvU32 flags, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiControlPrefetch_STUB(RM_API *pRmApi, NvU32 cmd) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiDupObject_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, NvHandle *phObject, + NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiDupObjectWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hParent, + NvHandle *phObject, NvHandle hClientSrc, NvHandle hObjectSrc, NvU32 flags, + API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiShare_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiShareWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMapToCpu_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, void **ppCpuVirtAddr, NvU32 flags) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMapToCpuWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvU64 offset, NvU64 length, NvP64 *ppCpuVirtAddr, NvU32 flags, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmapFromCpu_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, void *pLinearAddress, + NvU32 flags, NvU32 ProcessId) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmapFromCpuWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemory, + NvP64 pLinearAddress, NvU32 flags, NvU32 ProcessId, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMap_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiMapWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU64 offset, NvU64 length, NvU32 flags, NvU64 *pDmaOffset, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmap_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU32 flags, NvU64 dmaOffset) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static NV_STATUS _rmapiUnmapWithSecInfo_STUB(RM_API *pRmApi, NvHandle hClient, NvHandle hDevice, NvHandle hMemCtx, NvHandle hMemory, + NvU32 flags, NvU64 dmaOffset, API_SECURITY_INFO *pSecInfo) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void rmapiInitStubInterface(RM_API *pRmApi) +{ + portMemSet(pRmApi, 0, sizeof(*pRmApi)); + + pRmApi->Alloc = _rmapiAlloc_STUB; + pRmApi->AllocWithHandle = _rmapiAllocWithHandle_STUB; + pRmApi->AllocWithSecInfo = _rmapiAllocWithSecInfo_STUB; + pRmApi->Free = _rmapiFree_STUB; + pRmApi->FreeWithSecInfo = _rmapiFreeWithSecInfo_STUB; + pRmApi->FreeClientList = _rmapiFreeClientList_STUB; + pRmApi->FreeClientListWithSecInfo = _rmapiFreeClientListWithSecInfo_STUB; + pRmApi->Control = _rmapiControl_STUB; + pRmApi->ControlWithSecInfo = _rmapiControlWithSecInfo_STUB; + pRmApi->ControlPrefetch = _rmapiControlPrefetch_STUB; + pRmApi->DupObject = _rmapiDupObject_STUB; + pRmApi->DupObjectWithSecInfo = _rmapiDupObjectWithSecInfo_STUB; + pRmApi->Share = _rmapiShare_STUB; + pRmApi->ShareWithSecInfo = _rmapiShareWithSecInfo_STUB; + pRmApi->MapToCpu = _rmapiMapToCpu_STUB; + pRmApi->MapToCpuWithSecInfo = _rmapiMapToCpuWithSecInfo_STUB; + pRmApi->UnmapFromCpu = _rmapiUnmapFromCpu_STUB; + pRmApi->UnmapFromCpuWithSecInfo = _rmapiUnmapFromCpuWithSecInfo_STUB; + pRmApi->Map = _rmapiMap_STUB; + pRmApi->MapWithSecInfo = _rmapiMapWithSecInfo_STUB; + pRmApi->Unmap = _rmapiUnmap_STUB; + pRmApi->UnmapWithSecInfo = _rmapiUnmapWithSecInfo_STUB; +} diff --git a/src/nvidia/src/kernel/rmapi/rmapi_utils.c b/src/nvidia/src/kernel/rmapi/rmapi_utils.c new file mode 100644 index 000000000..7090b1dfb --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rmapi_utils.c @@ -0,0 +1,147 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi_utils.h" +#include "rmapi/rs_utils.h" +#include "resource_desc.h" + +#include "gpu/gpu.h" +#include "gpu_mgr/gpu_mgr.h" + +#include "class/cl0080.h" +#include "class/cl2080.h" + +NV_STATUS +rmapiutilAllocClientAndDeviceHandles +( + RM_API *pRmApi, + OBJGPU *pGpu, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +) +{ + NV_STATUS rmStatus; + NV0080_ALLOC_PARAMETERS nv0080AllocParams; + NV2080_ALLOC_PARAMETERS nv2080AllocParams; + NvHandle hClient = NV01_NULL_OBJECT; + NvHandle hDevice = NV01_NULL_OBJECT; + NvHandle hSubDevice = NV01_NULL_OBJECT; + + NV_ASSERT_OR_RETURN(phClient != NULL, NV_ERR_INVALID_ARGUMENT); + + // Allocate a client + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_NULL_OBJECT, + NV01_ROOT, + &hClient), + cleanup); + + // Allocate a device + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + serverutilGenResourceHandle(hClient, &hDevice), + cleanup); + + portMemSet(&nv0080AllocParams, 0, sizeof(nv0080AllocParams)); + nv0080AllocParams.deviceId = gpuGetDeviceInstance(pGpu); + nv0080AllocParams.hClientShare = hClient; + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + hClient, + hClient, + hDevice, + NV01_DEVICE_0, + &nv0080AllocParams), + cleanup); + + // Allocate a subDevice + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + serverutilGenResourceHandle(hClient, &hSubDevice), + cleanup); + + portMemSet(&nv2080AllocParams, 0, sizeof(nv2080AllocParams)); + nv2080AllocParams.subDeviceId = gpumgrGetSubDeviceInstanceFromGpu(pGpu); + + NV_CHECK_OK_OR_GOTO(rmStatus, LEVEL_ERROR, + pRmApi->AllocWithHandle(pRmApi, + hClient, + hDevice, + hSubDevice, + NV20_SUBDEVICE_0, + &nv2080AllocParams), + cleanup); + + *phClient = hClient; + if (phDevice != NULL) + *phDevice = hDevice; + if (phSubDevice != NULL) + *phSubDevice = hSubDevice; + + return rmStatus; + +cleanup: + rmapiutilFreeClientAndDeviceHandles(pRmApi, &hClient, &hDevice, &hSubDevice); + return rmStatus; +} + +void +rmapiutilFreeClientAndDeviceHandles +( + RM_API *pRmApi, + NvHandle *phClient, + NvHandle *phDevice, + NvHandle *phSubDevice +) +{ + NV_ASSERT_OR_RETURN_VOID(phClient != NULL); + NV_CHECK_OR_RETURN_VOID(LEVEL_ERROR, *phClient != NV01_NULL_OBJECT); + + if (phSubDevice != NULL && *phSubDevice != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, *phClient, *phSubDevice); + *phSubDevice = NV01_NULL_OBJECT; + } + + if (phDevice != NULL && *phDevice != NV01_NULL_OBJECT) + { + pRmApi->Free(pRmApi, *phClient, *phDevice); + *phDevice = NV01_NULL_OBJECT; + } + + pRmApi->Free(pRmApi, *phClient, *phClient); + *phClient = NV01_NULL_OBJECT; +} + +NvBool +rmapiutilIsExternalClassIdInternalOnly +( + NvU32 externalClassId +) +{ + RS_RESOURCE_DESC *pResDesc = RsResInfoByExternalClassId(externalClassId); + NV_ASSERT_OR_RETURN(pResDesc != NULL, NV_FALSE); + return (pResDesc->flags & RS_FLAGS_INTERNAL_ONLY) != 0x0; +} diff --git a/src/nvidia/src/kernel/rmapi/rpc_common.c b/src/nvidia/src/kernel/rmapi/rpc_common.c new file mode 100644 index 000000000..705b77452 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rpc_common.c @@ -0,0 +1,122 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +//****************************************************************************** +// +// Description: +// This file implements RPC code common to all builds. +// +//****************************************************************************** + +#include "gpu/gpu.h" +#include "vgpu/rpc.h" +#include "os/os.h" + +#include "vgpu/vgpu_version.h" +#include "gpu/gsp/kernel_gsp.h" + +#define RPC_STRUCTURES +#define RPC_GENERIC_UNION +#include "g_rpc-structures.h" +#undef RPC_STRUCTURES +#undef RPC_GENERIC_UNION + +#define RPC_MESSAGE_STRUCTURES +#define RPC_MESSAGE_GENERIC_UNION +#include "g_rpc-message-header.h" +#undef RPC_MESSAGE_STRUCTURES +#undef RPC_MESSAGE_GENERIC_UNION + +void rpcRmApiSetup(OBJGPU *pGpu) +{ + // + // Physical RMAPI is already initialized for monolithic, and this function + // just needs to overwrite individual methods as needed + // + RM_API *pRmApi = GPU_GET_PHYSICAL_RMAPI(pGpu); + PORT_UNREFERENCED_VARIABLE(pRmApi); + + if (IS_VIRTUAL(pGpu)) + { + // none for now + } + else if (IS_GSP_CLIENT(pGpu)) + { + pRmApi->Control = rpcRmApiControl_GSP; + pRmApi->AllocWithHandle = rpcRmApiAlloc_GSP; + pRmApi->Free = rpcRmApiFree_GSP; + pRmApi->DupObject = rpcRmApiDupObject_GSP; + } +} + +OBJRPC *initRpcObject(OBJGPU *pGpu) +{ + OBJRPC *pRpc = NULL; + + pRpc = portMemAllocNonPaged(sizeof(OBJRPC)); + if (pRpc == NULL) + { + NV_PRINTF(LEVEL_ERROR, + "cannot allocate memory for OBJRPC (instance %d)\n", + gpuGetInstance(pGpu)); + return NULL; + } + + // VIRTUALIZATION is disabled on DCE. Only run the below code on VGPU and GSP. + rpcSetIpVersion(pGpu, pRpc, + RPC_VERSION_FROM_VGX_VERSION(VGX_MAJOR_VERSION_NUMBER, + VGX_MINOR_VERSION_NUMBER)); + rpcObjIfacesSetup(pRpc); + + rpcRmApiSetup(pGpu); + + return pRpc; +} + +NV_STATUS rpcWriteCommonHeader(OBJGPU *pGpu, OBJRPC *pRpc, NvU32 func, NvU32 paramLength) +{ + NV_STATUS status = NV_OK; + + if (!pRpc) + { + NV_PRINTF(LEVEL_ERROR, + "NVRM_RPC: called with NULL pRpc. Function %d.\n", func); + NV_ASSERT(0); + return NV_ERR_INVALID_STATE; + } + + portMemSet(pRpc->message_buffer, 0, pRpc->maxRpcSize); + + vgpu_rpc_message_header_v->header_version = DRF_DEF(_VGPU, _MSG_HEADER_VERSION, _MAJOR, _TOT) | + DRF_DEF(_VGPU, _MSG_HEADER_VERSION, _MINOR, _TOT); + vgpu_rpc_message_header_v->signature = NV_VGPU_MSG_SIGNATURE_VALID; + vgpu_rpc_message_header_v->rpc_result = NV_VGPU_MSG_RESULT_RPC_PENDING; + vgpu_rpc_message_header_v->rpc_result_private = NV_VGPU_MSG_RESULT_RPC_PENDING; + { + vgpu_rpc_message_header_v->u.spare = NV_VGPU_MSG_UNION_INIT; + } + vgpu_rpc_message_header_v->function = func; + vgpu_rpc_message_header_v->length = sizeof(rpc_message_header_v) + paramLength; + + return status; +} diff --git a/src/nvidia/src/kernel/rmapi/rs_utils.c b/src/nvidia/src/kernel/rmapi/rs_utils.c new file mode 100644 index 000000000..da5bb7550 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/rs_utils.c @@ -0,0 +1,383 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rs_utils.h" +#include "rmapi/rmapi.h" +#include "core/locks.h" + +NV_STATUS +serverutilGetResourceRef +( + NvHandle hClient, + NvHandle hObject, + RsResourceRef **ppResourceRef +) +{ + RsResourceRef *pResourceRef; + RsClient *pRsClient; + NV_STATUS status; + + *ppResourceRef = NULL; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return NV_ERR_INVALID_CLIENT; + + status = clientGetResourceRef(pRsClient, hObject, &pResourceRef); + if (status != NV_OK) + return status; + + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +serverutilGetResourceRefWithType +( + NvHandle hClient, + NvHandle hObject, + NvU32 internalClassId, + RsResourceRef **ppResourceRef +) +{ + if (serverutilGetResourceRef(hClient, hObject, ppResourceRef) != NV_OK) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (!objDynamicCastById((*ppResourceRef)->pResource, internalClassId)) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + return NV_OK; +} + +NV_STATUS +serverutilGetResourceRefWithParent +( + NvHandle hClient, + NvHandle hParent, + NvHandle hObject, + NvU32 internalClassId, + RsResourceRef **ppResourceRef +) +{ + NvHandle hFoundParent; + + if (serverutilGetResourceRef(hClient, hObject, ppResourceRef) != NV_OK) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + hFoundParent = (*ppResourceRef)->pParentRef ? (*ppResourceRef)->pParentRef->hResource : 0; + + if (!objDynamicCastById((*ppResourceRef)->pResource, internalClassId) || + hFoundParent != hParent) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + return NV_OK; +} + +NV_STATUS +serverutilGetClientUnderLock +( + NvHandle hClient, + RmClient **ppClient +) +{ + NV_STATUS status; + RsClient *pRsClient; + RmClient *pClient; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return status; + + pClient = dynamicCast(pRsClient, RmClient); + NV_ASSERT(pClient != NULL); + + if (ppClient) + *ppClient = pClient; + + return NV_OK; +} + +RmClient +**serverutilGetFirstClientUnderLock +( + void +) +{ + RmClient **ppClient; + + // + // Resource server's client list is not protected by any RM locks + // so, as a WAR, we access a lock-protected shadow client list. This avoids + // the race condition where a client is freed while a DPC is iterating + // through the client list. + // + ppClient = listHead(&g_clientListBehindGpusLock); + if (NULL == ppClient) + return NULL; + + return ppClient; +} + +RmClient +**serverutilGetNextClientUnderLock +( + RmClient **ppClient +) +{ + // + // Resource server's client list is not protected by any RM locks + // so, as a WAR, we access a lock-protected shadow client list. This avoids + // the race condition where a client is freed while a DPC is iterating + // through the client list. + // + ppClient = listNext(&g_clientListBehindGpusLock, ppClient); + if (NULL == ppClient) + return NULL; + + return ppClient; +} + +RsResourceRef * +serverutilFindChildRefByType +( + NvHandle hClient, + NvHandle hParent, + NvU32 internalClassId, + NvBool bExactMatch +) +{ + NV_STATUS status; + RsClient *pRsClient; + RsResourceRef *pResourceRef; + RsResourceRef *pParentRef; + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return NULL; + + status = clientGetResourceRef(pRsClient, hParent, &pParentRef); + if (status != NV_OK) + { + return NULL; + } + + status = refFindChildOfType(pParentRef, internalClassId, bExactMatch, &pResourceRef); + if (status != NV_OK) + { + return NULL; + } + + return pResourceRef; +} + +RS_ITERATOR +serverutilRefIter +( + NvHandle hClient, + NvHandle hScopedObject, + NvU32 internalClassId, + RS_ITER_TYPE iterType, + NvBool bExactMatch +) +{ + NV_STATUS status; + RsClient *pRsClient; + RsResourceRef *pScopedRef = NULL; + RS_ITERATOR it; + + portMemSet(&it, 0, sizeof(it)); + + status = serverGetClientUnderLock(&g_resServ, hClient, &pRsClient); + if (status != NV_OK) + return it; + + if (hScopedObject != NV01_NULL_OBJECT) + { + status = clientGetResourceRef(pRsClient, hScopedObject, &pScopedRef); + if (status != NV_OK) + { + return it; + } + } + + return clientRefIter(pRsClient, pScopedRef, internalClassId, iterType, bExactMatch); +} + +NvBool +serverutilValidateNewResourceHandle +( + NvHandle hClient, + NvHandle hObject +) +{ + RmClient *pClient; + + return ((NV_OK == serverutilGetClientUnderLock(hClient, &pClient)) && + (NV_OK == clientValidateNewResourceHandle(staticCast(pClient, RsClient), hObject, NV_TRUE))); +} + +NV_STATUS +serverutilGenResourceHandle +( + NvHandle hClient, + NvHandle *returnHandle +) +{ + NV_STATUS status; + RmClient *pClient; + + // LOCK TEST: we should have the API lock here + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (NV_OK != serverutilGetClientUnderLock(hClient, &pClient)) + return NV_ERR_INVALID_CLIENT; + + status = clientGenResourceHandle(staticCast(pClient, RsClient), returnHandle); + return status; +} + +RS_SHARE_ITERATOR +serverutilShareIter +( + NvU32 internalClassId +) +{ + return serverShareIter(&g_resServ, internalClassId); +} + +NvBool +serverutilShareIterNext +( + RS_SHARE_ITERATOR* pIt +) +{ + return serverShareIterNext(pIt); +} + +NV_STATUS +serverutilGetClientHandlesFromPid +( + NvU32 procID, + NvU32 subProcessID, + ClientHandlesList *pClientList +) +{ + RmClient **ppClient; + RmClient *pClient; + + // If the list passed in has old elements, lets clear its elements. + if (listCount(pClientList)) + { + // Clear & free nodes in temp list + listDestroy(pClientList); + } + + for (ppClient = serverutilGetFirstClientUnderLock(); + ppClient; + ppClient = serverutilGetNextClientUnderLock(ppClient)) + { + RsClient *pRsClient; + + pClient = *ppClient; + pRsClient = staticCast(pClient, RsClient); + + if ((pClient->ProcID == procID) && + (pClient->SubProcessID == subProcessID)) + { + if (listAppendValue(pClientList, + &pRsClient->hClient) == NULL) + { + listClear(pClientList); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + } + } + + return NV_OK; +} + +NvBool +serverutilMappingFilterCurrentUserProc +( + RsCpuMapping *pMapping +) +{ + return (!pMapping->pPrivate->bKernel && + (pMapping->processId == osGetCurrentProcess())); +} + +NvBool +serverutilMappingFilterKernel +( + RsCpuMapping *pMapping +) +{ + return pMapping->pPrivate->bKernel; +} + + +NV_STATUS +serverutilAcquireClient +( + NvHandle hClient, + LOCK_ACCESS_TYPE access, + RmClient **ppClient +) +{ + RsClient *pRsClient; + RmClient *pClient; + + // LOCK TEST: we should have the API lock here + LOCK_ASSERT_AND_RETURN(rmApiLockIsOwner()); + + if (NV_OK != serverAcquireClient(&g_resServ, hClient, access, &pRsClient)) + return NV_ERR_INVALID_CLIENT; + + pClient = dynamicCast(pRsClient, RmClient); + if (pClient == NULL) + { + serverReleaseClient(&g_resServ, access, pRsClient); + return NV_ERR_INVALID_CLIENT; + } + + *ppClient = pClient; + return NV_OK; +} + +void +serverutilReleaseClient +( + LOCK_ACCESS_TYPE access, + RmClient *pClient +) +{ + serverReleaseClient(&g_resServ, access, staticCast(pClient, RsClient)); +} diff --git a/src/nvidia/src/kernel/rmapi/sharing.c b/src/nvidia/src/kernel/rmapi/sharing.c new file mode 100644 index 000000000..a99a03c33 --- /dev/null +++ b/src/nvidia/src/kernel/rmapi/sharing.c @@ -0,0 +1,418 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "rmapi/rmapi.h" +#include "entry_points.h" +#include "core/thread_state.h" +#include "rmapi/rs_utils.h" +#include "resserv/rs_access_map.h" +#include "resource_desc.h" +#include "class/cl0071.h" + +#include "gpu/device/device.h" +#include "gpu/subdevice/subdevice.h" + +static NV_STATUS +_RmDupObject +( + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS rmStatus; + RS_RES_DUP_PARAMS params; + + NV_ASSERT_OR_RETURN(phObject != NULL, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + portMemSet(¶ms, 0, sizeof(params)); + params.hClientSrc = hClientSrc; + params.hResourceSrc = hObjectSrc; + params.hClientDst = hClient; + params.hParentDst = hParent; + params.hResourceDst = *phObject; + params.pSecInfo = pSecInfo; + params.flags = flags; + params.pLockInfo = pLockInfo; + + rmStatus = serverCopyResource(&g_resServ, ¶ms); + + if (rmStatus == NV_OK) + *phObject = params.hResourceDst; + + return rmStatus; +} + +NV_STATUS +rmapiDupObject +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->DupObjectWithSecInfo(pRmApi, hClient, hParent, phObject, hClientSrc, hObjectSrc, + flags, &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiDupObjectWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04DupObject: hClient:0x%x hParent:0x%x hObject:0x%x\n", + hClient, hParent, *phObject); + NV_PRINTF(LEVEL_INFO, + "Nv04DupObject: hClientSrc:0x%x hObjectSrc:0x%x flags:0x%x\n", + hClientSrc, hObjectSrc, flags); + + NVRM_TRACE_API('DUPH', hClient, hObject, hObjectSrc); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + { + return status; + } + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + + if (pRmApi->bApiLockInternal) + { + // + // DupObject requires taking two client locks, but internal calls have probably + // already taken one client lock. Taking a second would require unlocking + // the first lock in the middle of the API call, which could mess with the client. + // In such cases, we need an exclusive API lock, then skip taking client locks. + // + if (lockInfo.pClient != NULL) + { + NV_ASSERT(rmapiLockIsOwner()); + // RS-TODO assert RW api lock + lockInfo.flags |= RM_LOCK_FLAGS_NO_CLIENT_LOCK; + } + } + + status = _RmDupObject(hClient, hParent, phObject, hClientSrc, hObjectSrc, flags, pSecInfo, &lockInfo); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "...handle dup complete\n"); + NVRM_TRACE('DUPH'); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv04DupObject: dup failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_WARNING, + "Nv04DupObject: hClient:0x%x hParent:0x%x hObject:0x%x\n", + hClient, hParent, *phObject); + NV_PRINTF(LEVEL_WARNING, + "Nv04DupObject: hClientSrc:0x%x hObjectSrc:0x%x flags:0x%x\n", + hClientSrc, hObjectSrc, flags); + } + + return status; +} + +NV_STATUS +rmapiDupObjectWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hParent, + NvHandle *phObject, + NvHandle hClientSrc, + NvHandle hObjectSrc, + NvU32 flags, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiDupObjectWithSecInfo(pRmApi, hClient, hParent, phObject, hClientSrc, hObjectSrc, flags, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + + +static NV_STATUS +_RmShare +( + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo, + RS_LOCK_INFO *pLockInfo +) +{ + RS_RES_SHARE_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = hClient; + params.hResource = hObject; + params.pSharePolicy = pSharePolicy; + params.pSecInfo = pSecInfo; + params.pLockInfo = pLockInfo; + + return serverShareResourceAccess(&g_resServ, ¶ms); +} + +NV_STATUS +rmapiShare +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy +) +{ + if (!pRmApi->bHasDefaultSecInfo) + return NV_ERR_NOT_SUPPORTED; + + return pRmApi->ShareWithSecInfo(pRmApi, hClient, hObject, pSharePolicy, + &pRmApi->defaultSecInfo); +} + +NV_STATUS +rmapiShareWithSecInfo +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + RM_API_CONTEXT rmApiContext = {0}; + RS_LOCK_INFO lockInfo; + + NV_PRINTF(LEVEL_INFO, + "Nv04Share: hClient:0x%x hObject:0x%x pSharePolicy:%p\n", + hClient, hObject, pSharePolicy); + + NVRM_TRACE_API('SHAR', hClient, hObject, 0); + + status = rmapiPrologue(pRmApi, &rmApiContext); + if (status != NV_OK) + { + return status; + } + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + rmapiInitLockInfo(pRmApi, hClient, &lockInfo); + + // + // Currently, Share should have no internal callers. + // If this changes and one takes a client lock, this could mess with + // Share since it may require two clients when sharing with SHARE_TYPE_CLIENT. + // Assert this for now, handle it properly if this ever changes (See DupObject) + // + NV_ASSERT (lockInfo.pClient == NULL); + + status = _RmShare(hClient, hObject, pSharePolicy, pSecInfo, &lockInfo); + + rmapiEpilogue(pRmApi, &rmApiContext); + + if (status == NV_OK) + { + NV_PRINTF(LEVEL_INFO, "...resource share complete\n"); + NVRM_TRACE('SHAR'); + } + else + { + NV_PRINTF(LEVEL_WARNING, + "Nv04Share: share failed; status: %s (0x%08x)\n", + nvstatusToString(status), status); + NV_PRINTF(LEVEL_WARNING, + "Nv04Share: hClient:0x%x hObject:0x%x pSharePolicy:%p\n", + hClient, hObject, pSharePolicy); + } + + return status; +} + +NV_STATUS +rmapiShareWithSecInfoTls +( + RM_API *pRmApi, + NvHandle hClient, + NvHandle hObject, + RS_SHARE_POLICY *pSharePolicy, + API_SECURITY_INFO *pSecInfo +) +{ + THREAD_STATE_NODE threadState; + NV_STATUS status; + + threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE); + + status = rmapiShareWithSecInfo(pRmApi, hClient, hObject, pSharePolicy, pSecInfo); + + threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE); + + return status; +} + +NV_STATUS +serverCopyResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_COPY)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverShareResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + NV_ASSERT_OR_RETURN(pAccess != NULL, NV_ERR_INVALID_ARGUMENT); + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_SHARE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverInitGlobalSharePolicies +( + RsServer *pServer +) +{ + RS_SHARE_POLICY sharePolicy; + + // Global default policies, these can be overridden by clients/objects + portMemSet(&sharePolicy, 0, sizeof(sharePolicy)); + RS_ACCESS_MASK_ADD(&sharePolicy.accessMask, RS_ACCESS_DUP_OBJECT); + sharePolicy.type = RS_SHARE_TYPE_PID; + + if (listAppendValue(&pServer->defaultInheritedSharePolicyList, + &sharePolicy) == NULL) + return NV_ERR_NO_MEMORY; + + // Internal share policies, these can't be overridden + + // SMC dup policy: Do not allow duping across different SMC partition + portMemSet(&sharePolicy, 0, sizeof(sharePolicy)); + sharePolicy.type = RS_SHARE_TYPE_SMC_PARTITION; + sharePolicy.action = RS_SHARE_ACTION_FLAG_REQUIRE; + RS_ACCESS_MASK_ADD(&sharePolicy.accessMask, RS_ACCESS_DUP_OBJECT); + + if (listAppendValue(&pServer->globalInternalSharePolicyList, + &sharePolicy) == NULL) + return NV_ERR_NO_MEMORY; + + // FM dup policy: Allow FM to dup any user-mode client's resource. + portMemSet(&sharePolicy, 0, sizeof(sharePolicy)); + sharePolicy.type = RS_SHARE_TYPE_FM_CLIENT; + RS_ACCESS_MASK_ADD(&sharePolicy.accessMask, RS_ACCESS_DUP_OBJECT); + + if (listAppendValue(&pServer->globalInternalSharePolicyList, + &sharePolicy) == NULL) + return NV_ERR_NO_MEMORY; + + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams) +{ + RS_RESOURCE_DESC *pResDesc; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + + if (pParams->pSrcRef == NULL) + return NV_ERR_INVALID_STATE; + + // Special cases; TODO move these to resource_list.h + if (pParams->pSrcRef->externalClassId == NV01_MEMORY_SYSTEM_OS_DESCRIPTOR) + { + // Lock all GPUs + return NV_OK; + } + + pResDesc = RsResInfoByExternalClassId(pParams->pSrcRef->externalClassId); + if (pResDesc == NULL) + return NV_ERR_INVALID_OBJECT; + + // Use the same flags from alloc. These should be split out in the future. + if (!(pResDesc->flags & RS_FLAGS_ACQUIRE_GPUS_LOCK_ON_ALLOC)) + { + pLockInfo->flags |= RM_LOCK_FLAGS_NO_GPUS_LOCK; + } + + if (pResDesc->flags & RS_FLAGS_ACQUIRE_GPU_GROUP_LOCK_ON_ALLOC) + { + pLockInfo->flags |= RM_LOCK_FLAGS_GPU_GROUP_LOCK; + } + + pLockInfo->pContextRef = pParams->pSrcRef->pParentRef; + + return NV_OK; +} diff --git a/src/nvidia/src/lib/base_utils.c b/src/nvidia/src/lib/base_utils.c new file mode 100644 index 000000000..9a02e3db9 --- /dev/null +++ b/src/nvidia/src/lib/base_utils.c @@ -0,0 +1,358 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Common utility code that has no natural home + */ + + +#include "lib/base_utils.h" +#include "os/os.h" + +// +// Log2 approximation that assumes a power of 2 number passed in. +// +NvU32 nvLogBase2(NvU64 val) +{ + NvU32 i; + + NV_ASSERT(((val)&(val-1)) == 0); + + for (i = 0; i < 64; i++) + { + if ((1ull << i) == val) + { + break; + } + } + + NV_ASSERT(i < 64); + + return i; +} + + +/** + * @brief Finds the lowest unset bit of a given bitfield. + * + * Returns the lowest value of X such that the expression + * pBitField[X/32] & (1<<(X%32)) is zero. + * + * If all bits are set, returns numElements*32. + * + * @param[in] pBitField + * @param[in] numElements size of array pBitField + * + * @return the lowest zero bit, numElements*32 otherwise. + */ +NvU32 nvBitFieldLSZero(NvU32 *pBitField32, NvU32 numElements) +{ + NvU32 i; + + for (i = 0; i < numElements; ++i) + { + NvU32 temp = ~pBitField32[i]; + if (temp) + { + LOWESTBITIDX_32(temp); + return temp + i * sizeof(NvU32) * 8; + } + } + + return numElements*32; +} + +/** + * @brief Finds the highest unset bit of a given bitfield. + * + * Returns the highest value of X such that the expression + * pBitField[X/32] & (1<<(X%32)) is zero. + * + * If all bits are set, returns numElements*32. + * + * @param[in] pBitField + * @param[in] numBits must be a multiple of 32. + * + * @return The highest zero bit, numElements*32 otherwise. + */ +NvU32 nvBitFieldMSZero(NvU32 *pBitField32, NvU32 numElements) +{ + NvU32 i = 0, j = numElements - 1; + + while (i++ < numElements) + { + NvU32 temp = ~pBitField32[j]; + if (temp) + { + HIGHESTBITIDX_32(temp); + return temp + j * sizeof(NvU32) * 8; + } + j--; + } + + return numElements * 32; +} + +NvBool nvBitFieldTest(NvU32 *pBitField, NvU32 numElements, NvU32 bit) +{ + return (bit < numElements*32 ? (NvBool) !!(pBitField[bit/32] & NVBIT(bit%32)) : NV_FALSE); +} + +void nvBitFieldSet(NvU32 *pBitField, NvU32 numElements, NvU32 bit, NvBool val) +{ + NV_ASSERT(bit < numElements*32); + pBitField[bit/32] = (pBitField[bit/32] & ~NVBIT(bit%32)) | (val ? NVBIT(bit%32) : 0); +} + +// +// Sort an array of n elements/structures. +// Example: +// NvBool integerLess(void * a, void * b) +// { +// return *(NvU32 *)a < *(NvU32 *)b; +// } +// NvU32 array[1000]; +// ... +// NvU32 temp[1000]; +// nvMergeSort(array, arrsize(array), temp, sizeof(NvU32), integerLess); +// +#define EL(n) ((char *)array+(n)*elementSize) +void nvMergeSort(void * array, NvU32 n, void * tempBuffer, NvU32 elementSize, NvBool (*less)(void *, void *)) +{ + char * mergeArray = (char *)tempBuffer; + NvU32 m, i; + + // + // Bottom-up merge sort divides the sort into a sequence of passes. + // In each pass, the array is divided into blocks of size 'm'. + // Every pair of two adjacent blocks are merged (in place). + // The next pass is started with twice the block size + // + for (m = 1; m<=n; m*=2) + { + for (i = 0; i<(n-m); i+=2*m) + { + NvU32 loMin = i; + NvU32 lo = loMin; + NvU32 loMax = i+m; + NvU32 hi = i+m; + NvU32 hiMax = NV_MIN(n,i+2*m); + + char * dest = mergeArray; + + // + // Standard merge of [lo, loMax) and [hi, hiMax) + // + while (1) + { + if (less(EL(lo), EL(hi))) + { + portMemCopy(dest, elementSize, EL(lo), elementSize); + lo++; + dest+=elementSize; + if (lo >= loMax) + break; + } + else + { + portMemCopy(dest, elementSize, EL(hi), elementSize); + hi++; + dest+=elementSize; + if (hi >= hiMax) + break; + } + } + + // + // Copy remaining items (only one of these loops can run) + // + while (lo < loMax) + { + portMemCopy(dest, elementSize,EL(lo), elementSize); + dest+=elementSize; + lo++; + } + + while (hi < hiMax) + { + portMemCopy(dest, elementSize,EL(hi), elementSize); + dest+=elementSize; + hi++; + } + + // + // Copy merged data back over array + // + portMemCopy(EL(loMin), (NvU32)(dest - mergeArray), mergeArray, (NvU32)(dest - mergeArray)); + } + } +} + +#define RANGE(val,low,hi) (((val) >= (low)) && ((val) <= (hi))) + +// Do not conflict with libc naming +NvS32 nvStrToL +( + NvU8* pStr, + NvU8** pEndStr, + NvS32 base, + NvU8 stopChar, + NvU32 *numFound +) +{ + NvU32 num; + NvU32 newnum; + + *numFound = 0; + + // scan for start of number + for (;*pStr;pStr++) + { + if (RANGE(*pStr, '0', '9')) + { + *numFound = 1; + break; + } + else if ((BASE16 == base) && (RANGE(*pStr,'a','f'))) + { + *numFound = 1; + break; + } + else if ((BASE16 == base) && (RANGE(*pStr,'A', 'F'))) + { + *numFound = 1; + break; + } + else if(*pStr == stopChar) + { + break; + } + } + + // convert number + num = 0; + for (;*pStr;pStr++) + { + if (RANGE(*pStr, '0', '9')) + { + newnum = *pStr - '0'; + } + else if ((BASE16 == base) && (RANGE(*pStr,'a','f'))) + { + newnum = *pStr - 'a' + 10; + } + else if ((BASE16 == base) && (RANGE(*pStr,'A', 'F'))) + { + newnum = *pStr - 'A' + 10; + } + else + break; + + num *= base; + num += newnum; + + } + + *pEndStr = pStr; + + return num; +} + +/** + * @brief Returns MSB of input as a bit mask + * + * @param x + * @return MSB of x + */ +NvU64 +nvMsb64(NvU64 x) +{ + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); + x |= (x >> 32); + // + // At this point, x has same MSB as input, but with all 1's below it, clear + // everything but MSB + // + return(x & ~(x >> 1)); +} + +/** + * @brief Convert unsigned long int to char* + * + * @param value to be converted to string + * @param *string is the char array to be have the converted data + * @param radix denoted the base of the operation : hex(16),octal(8)..etc + * @return the converted string + */ +char * nvU32ToStr(NvU32 value, char *string, NvU32 radix) +{ + char tmp[33]; + char *tp = tmp; + NvS32 i; + NvU32 v = value; + char *sp; + + if (radix > 36 || radix <= 1) + { + return 0; + } + + while (v || tp == tmp) + { + i = v % radix; + v = v / radix; + if (i < 10) + *tp++ = (char)(i + '0'); + else + *tp++ = (char)(i + 'a' - 10); + } + + sp = string; + + while (tp > tmp) + *sp++ = *--tp; + *sp = 0; + + return string; +} + + +/** + * @brief Get the string length + * + * @param string for which length has to be calculated + * @return the string length + */ +NvU32 nvStringLen(const char * str) +{ + NvU32 i = 0; + while (str[i++] != '\0') + ; + return i - 1; +} + diff --git a/src/nvidia/src/lib/protobuf/prbenc.c b/src/nvidia/src/lib/protobuf/prbenc.c new file mode 100644 index 000000000..0eaa67601 --- /dev/null +++ b/src/nvidia/src/lib/protobuf/prbenc.c @@ -0,0 +1,1068 @@ +/* + * Implementation of the protocol buffers encoder. + * + * Based on code taken from + * https://code.google.com/archive/p/lwpb/source/default/source + * + * The code there is licensed as Apache 2.0. However, NVIDIA has received the + * code from the original author under MIT license terms. + * + * + * Copyright 2009 Simon Kallweit + * Copyright 2009-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "os/os.h" +#include "lib/protobuf/prb.h" + +static NV_STATUS prbEncAddField(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + PRB_VALUE *value); +static NV_STATUS prbEndPackedField(PRB_ENCODER *encoder); +static NV_STATUS prbStartPackedField(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc); + +#define MSG_RESERVE_BYTES 10 + +// Handy macros to check flags +#define COUNT_FLAG(encoder) ((encoder->flags & PRB_COUNT_ONLY) != 0) +#define FIXED_FLAG(encoder) ((encoder->flags & PRB_FIXED_MODE) != 0) + +/*! + * @brief Initializes a memory buffer. Sets the position to the base address. + * @param [in] buf Memory buffer + * @param [in] data Base address of memory + * @param [in] len Length of memory + * + */ +static void +prbBufInit(PRB_BUF *buf, void *data, NvU32 len) +{ + buf->base = data; + buf->pos = data; + buf->end = &buf->base[len]; +} + +/*! + * @brief frees a buffer + * @param [in] buff Memory buffer + */ + +static void +prbBuffFree(PRB_BUF *buf) +{ + portMemFree(buf->base); + + buf->base = NULL; + buf->pos = NULL; + buf->end = NULL; +} + +/*! + * @brief Returns the number of used bytes in the buffer. + * @param [in] buf Memory buffer + * + * @returns the number of used bytes. + */ +static NvU32 +prbBufUsed(PRB_BUF *buf) +{ + return (NvU32)(buf->pos - buf->base); +} + +/*! + * @brief Returns the number of bytes left in the buffer. + * @param [in] buf Memory buffer + * + * @returns the number of bytes left. + */ +static NvU32 +prbBufLeft(PRB_BUF *buf) +{ + return (NvU32)(buf->end - buf->pos); +} + +// Encoder utilities + +/*! + * @brief Encodes a variable integer in base-128 format. + * See http://code.google.com/apis/protocolbuffers/docs/encoding.html for more + * information. + * @param [in] buf Memory buffer + * @param [in] varint Value to encode + * @param [in] countOnly Set to just count the bytes + * @returns NV_STATUS + */ + +static +NV_STATUS +encode_varint(PRB_BUF *buf, NvU64 varint, NvBool countOnly) +{ + do { + if (prbBufLeft(buf) < 1) + return NV_ERR_BUFFER_TOO_SMALL; + if (!countOnly) + { + if (varint > 127) { + *buf->pos = (NvU8)(0x80 | (varint & 0x7F)); + } else { + *buf->pos = (NvU8)(varint & 0x7F); + } + } + varint >>= 7; + buf->pos++; + } while (varint); + + return NV_OK; +} + +/** + * Decodes a variable integer in base-128 format. + * See http://code.google.com/apis/protocolbuffers/docs/encoding.html for more + * information. + * @param [in] pBuff Buffer to decode + * @param [in] len Length of input buffer + * @param [out] pUsed Number of bytes used + * @param [out] pData Data value returned + * @return Returns NV_STATUS + */ +static NV_STATUS +decode_varint(NvU8 *pBuff, NvU64 len, NvU32 *pUsed, NvU64 *pData) +{ + NvU32 bitpos; + + if (pBuff == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + *pUsed = 0; + + *pData = 0; + for (bitpos = 0; *pBuff & 0x80 && bitpos < 64; bitpos += 7, pBuff++) + { + *pData |= (NvU64) (*pBuff & 0x7f) << bitpos; + (*pUsed)++; + if (--len < 1) + return NV_ERR_BUFFER_TOO_SMALL; + } + *pData |= (NvU64)(*pBuff & 0x7f) << bitpos; + (*pUsed)++; + + return NV_OK; +} + +/*! + * @brief Encodes a 32 bit integer. + * @param [in] buf Memory buffer + * @param [in] value Value to encode + * @param [in] countOnly Set to just count the bytes + * @returns NV_STATUS + */ +static NV_STATUS +encode_32bit(PRB_BUF *buf, NvU32 value, NvBool countOnly) +{ + if (prbBufLeft(buf) < 4) + return NV_ERR_BUFFER_TOO_SMALL; + + if (!countOnly) + { + buf->pos[0] = (NvU8)((value) & 0xff); + buf->pos[1] = (NvU8)((value >> 8) & 0xff); + buf->pos[2] = (NvU8)((value >> 16) & 0xff); + buf->pos[3] = (NvU8)((value >> 24) & 0xff); + } + buf->pos += 4; + + return NV_OK; +} + +/*! + * @brief Encodes a 64 bit integer. + * @param [in] buf Memory buffer + * @param [in] value Value to encode + * @param [in] countOnly Set to just count the bytes + * @returns NV_STATUS + */ +static NV_STATUS +encode_64bit(PRB_BUF *buf, NvU64 value, NvBool countOnly) +{ + if (prbBufLeft(buf) < 8) + return NV_ERR_BUFFER_TOO_SMALL; + + if (!countOnly) + { + buf->pos[0] = (NvU8)((value) & 0xff); + buf->pos[1] = (NvU8)((value >> 8) & 0xff); + buf->pos[2] = (NvU8)((value >> 16) & 0xff); + buf->pos[3] = (NvU8)((value >> 24) & 0xff); + value >>= 32; + buf->pos[4] = (NvU8)((value) & 0xff); + buf->pos[5] = (NvU8)((value >> 8) & 0xff); + buf->pos[6] = (NvU8)((value >> 16) & 0xff); + buf->pos[7] = (NvU8)((value >> 24) & 0xff); + } + buf->pos += 8; + + return NV_OK; +} + +// Encoder + +/*! + * @brief Starts the encoder with the first message + * This variant allows the caller to pass in a buffer to use. + * @param [in] encoder The encoder structure + * @param [in] msg_desc The message to encode + * @param [in] data The buffer to use + * @param [in] len Length of the buffer + */ + +void +prbEncStart +( + PRB_ENCODER *encoder, + const PRB_MSG_DESC *msg_desc, + void *data, + NvU32 len, + PrbBufferCallback *pBufferCallback +) +{ + encoder->flags = 0; + encoder->depth = 1; + prbBufInit(&encoder->stack[0].buf, data, len); + encoder->stack[0].field_desc = NULL; + encoder->stack[0].msg_desc = msg_desc; + + encoder->pBufferCallback = pBufferCallback; + if (pBufferCallback != NULL) + { + encoder->flags |= PRB_FIXED_MODE; + } +} + +/*! + * @brief Starts the encoder with the first message + * Allocates memory to hold the data. + * If no memory was available, logging will be disabled + * and future calls to the encoding routines with this encoder + * will succeed but no data will be logged. + * @param [in] encoder The encoder structure + * @param [in] msg_desc The message to encode + * @param [in] len The caller's estimate of the number of bytes needed + * @returns NV_STATUS + */ + +NV_STATUS +prbEncStartAlloc +( + PRB_ENCODER *encoder, + const PRB_MSG_DESC *msg_desc, + NvU32 len, + PrbBufferCallback *pBufferCallback +) +{ + void *data = NULL; + NV_STATUS rmstatus = NV_OK; + + data = portMemAllocNonPaged(len); + if (data != NULL) + { + prbEncStart(encoder, msg_desc, data, len, pBufferCallback); + encoder->flags |= PRB_BUFFER_ALLOCATED; + } + else + { + rmstatus = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, + "Can't allocate memory for protocol buffers.\n"); + // Disable all encoding + encoder->flags = PRB_ENCODE_DISABLED; + } + return rmstatus; +} + +/*! + * @brief Starts the encoder with the first message + * This variant just counts the length of the message + * No data is stored + * @param [in] encoder The encoder structure + * @param [in] msg_desc The message to encode + * @param [in] len Length of the buffer + */ + +void +prbEncStartCount(PRB_ENCODER *encoder, + const PRB_MSG_DESC *msg_desc, + NvU32 len) +{ + prbEncStart(encoder, msg_desc, NULL, len, NULL); + encoder->flags |= PRB_COUNT_ONLY; +} + +/*! + * @brief free an allocated buffer and disables encoding. + * @param [in] encoder the encoder structure. + */ + +void +prbFreeAllocatedBuffer(PRB_ENCODER *encoder) +{ + if (encoder->flags & PRB_BUFFER_ALLOCATED) + prbBuffFree(&encoder->stack[0].buf); + + encoder->flags &= ~PRB_BUFFER_ALLOCATED; + encoder->flags |= PRB_ENCODE_DISABLED; + encoder->depth = 1; +} + +/*! + * @brief Finish encoding + * @param [in] encoder The current encoder structure + * @param [out] buff The address of the data buffer. + * @returns the number of bytes encoded + */ + +NvU32 +prbEncFinish(PRB_ENCODER *encoder, void **buff) +{ + if (!(encoder->flags & PRB_ENCODE_DISABLED)) + prbEndPackedField(encoder); + + *buff = encoder->stack[0].buf.base; + encoder->flags |= PRB_ENCODE_DISABLED; + return prbBufUsed(&encoder->stack[0].buf); +} + +/*! + * @brief Start a nested message + * @param [in] encoder The current encoder + * @param [in] field_desc The field where the message starts + * @returns NV_STATUS + */ + +NV_STATUS +prbEncNestedStart(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc) +{ + NV_STATUS status; + + PRB_ENCODER_STACK_FRAME *frame, *new_frame; + + if (encoder->flags & PRB_ENCODE_DISABLED) + { + return NV_ERR_INVALID_REQUEST; + } + + status = prbEndPackedField(encoder); + if (status != NV_OK) + return status; + + // The field must be a message + if (field_desc->opts.typ != PRB_MESSAGE) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_REQUEST; + } + + // Check max depth + if (encoder->depth >= PRB_MAX_DEPTH) + { + DBG_BREAKPOINT(); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + // Get parent frame + frame = &encoder->stack[encoder->depth - 1]; + + // Create a new frame + encoder->depth++; + new_frame = &encoder->stack[encoder->depth - 1]; + new_frame->field_desc = field_desc; + new_frame->msg_desc = field_desc->msg_desc; + + // Reserve a few bytes for the field on the parent frame. This is where + // the field key (message) and the message length will be stored, once it + // is known. + if (prbBufLeft(&frame->buf) < MSG_RESERVE_BYTES) + { + encoder->depth--; + return NV_ERR_BUFFER_TOO_SMALL; + } + + prbBufInit(&new_frame->buf, frame->buf.pos + MSG_RESERVE_BYTES, + prbBufLeft(&frame->buf) - MSG_RESERVE_BYTES); + return NV_OK; +} + +/*! + * @brief Add bytes to a stubbed message. + * @param [in] encoder The current encoder + * @param [in] buffer The data to copy + * @param [in] len The data length + */ + +NV_STATUS +prbEncStubbedAddBytes(PRB_ENCODER *encoder, NvU8 *buffer, NvU32 len) +{ + PRB_ENCODER_STACK_FRAME *frame; + NV_STATUS status = NV_OK; + + frame = &encoder->stack[encoder->depth - 1]; + + // Check length + if (len <= 0) { + return NV_ERR_INVALID_ARGUMENT; + } + + // Check if we have enough space + if (prbBufLeft(&frame->buf) < len) { + return NV_ERR_BUFFER_TOO_SMALL; + } + + status = prbEndPackedField(encoder); + if (status != NV_OK) + return status; + + // Move the (possibly overlapping) memory + if (!COUNT_FLAG(encoder)) + portMemMove(frame->buf.pos, len, buffer, len); + + // Adjust current buffer position + frame->buf.pos += len; + return NV_OK; +} + +/*! + * @brief Internal helper routine to keep track of packed fields. + * @param [in] encoder The encoder + * @param [in] field_desc The field being packed + * @returns NV_STATUS + */ + +static NV_STATUS +prbStartPackedField(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc) +{ + PRB_ENCODER_STACK_FRAME *frame, *new_frame; + + // The field must be packed + if (!(field_desc->opts.flags & PRB_IS_PACKED)) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_REQUEST; + } + + // Check max depth + if (encoder->depth >= PRB_MAX_DEPTH) + { + DBG_BREAKPOINT(); + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + // Get parent frame + frame = &encoder->stack[encoder->depth - 1]; + + // Create a new frame + encoder->depth++; + new_frame = &encoder->stack[encoder->depth - 1]; + new_frame->field_desc = field_desc; + new_frame->msg_desc = frame->msg_desc; + encoder->flags |= PRB_PACKED_FRAME; + + // Reserve a few bytes for the field on the parent frame. This is where + // the field key (message) and the message length will be stored, once it + // is known. + if (prbBufLeft(&frame->buf) < MSG_RESERVE_BYTES) + { + encoder->depth--; + encoder->flags &= ~PRB_PACKED_FRAME; + return NV_ERR_BUFFER_TOO_SMALL; + } + + prbBufInit(&new_frame->buf, frame->buf.pos + MSG_RESERVE_BYTES, + prbBufLeft(&frame->buf) - MSG_RESERVE_BYTES); + return NV_OK; +} + +/*! + * @brief End a nested message + * @param [in] encoder The current encoder + * @returns NV_STATUS + */ + +NV_STATUS +prbEncNestedEnd(PRB_ENCODER *encoder) +{ + PRB_ENCODER_STACK_FRAME *frame; + PRB_VALUE value; + NV_STATUS status; + + if (encoder->flags & PRB_ENCODE_DISABLED) + { + return NV_ERR_INVALID_REQUEST; + } + + status = prbEndPackedField(encoder); + if (status != NV_OK) + return status; + + if (encoder->depth <= 1) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_REQUEST; + } + + frame = &encoder->stack[encoder->depth - 1]; + + encoder->depth--; + + value.message.data = frame->buf.base; + value.message.len = prbBufUsed(&frame->buf); + + return prbEncAddField(encoder, frame->field_desc, &value); +} + +/*! + * @brief Return size of remaining buffer + * @param [in] encoder The encoder to use + * @returns bytes left + */ + +NvU32 +prbEncBufLeft(PRB_ENCODER *encoder) +{ + PRB_ENCODER_STACK_FRAME *frame; + + if (encoder->depth <= 0) + { + DBG_BREAKPOINT(); + return 0; + } + + frame = &encoder->stack[encoder->depth - 1]; + + return prbBufLeft(&frame->buf); +} + +/*! + * @brief End a packed field + * @param [in] encoder The encoder to use + * @returns NV_STATUS + */ + +static NV_STATUS +prbEndPackedField(PRB_ENCODER *encoder) +{ + PRB_ENCODER_STACK_FRAME *frame; + const PRB_FIELD_DESC *field_desc; + NV_STATUS ret; + NvU64 key; + NvU8 *packed_start; + NvU32 packed_len; + + if (encoder->depth < 1) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_REQUEST; + } + + frame = &encoder->stack[encoder->depth - 1]; + if (encoder->flags & PRB_PACKED_FRAME) + { + // Clear the flag + encoder->flags &= ~PRB_PACKED_FRAME; + + // Pick up field desc, packed start and length + packed_start = frame->buf.base; + packed_len = prbBufUsed(&frame->buf); + field_desc = frame->field_desc; + + if (encoder->depth == 1) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_REQUEST; + } + + // Switch to parent frame + encoder->depth--; + frame = &encoder->stack[encoder->depth - 1]; + + if (packed_len == 0) + return(NV_OK); + + key = WT_STRING | (field_desc->number << 3); + ret = encode_varint(&frame->buf, key, COUNT_FLAG(encoder)); + if (ret != NV_OK) + return ret; + + ret = encode_varint(&frame->buf, packed_len, COUNT_FLAG(encoder)); + if (ret != NV_OK) + return ret; + + if (prbBufLeft(&frame->buf) < packed_len) + return NV_ERR_BUFFER_TOO_SMALL; + + if (!COUNT_FLAG(encoder)) + portMemMove(frame->buf.pos, packed_len, packed_start, packed_len); + frame->buf.pos += packed_len; + } + return NV_OK; +} + +/*! + * @brief Add a field to a message + * @param [in] encoder The encoder to use + * @param [in] field_desc Which field to add + * @param [in] value The value to send + * @returns NV_STATUS + * NV_ERR_INVALID_REQUEST if the field is not found + */ + +static NV_STATUS +prbEncAddField(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + PRB_VALUE *value) +{ + NV_STATUS ret; + PRB_ENCODER_STACK_FRAME *frame; + NvU32 i; + NvU64 key; + WIRE_TYPE wire_type = WT_32BIT; + WIRE_VALUE wire_value = {0}; + + if (encoder->flags & PRB_ENCODE_DISABLED) + { + return NV_ERR_INVALID_REQUEST; + } + + if (encoder->depth <= 0) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_REQUEST; + } + + frame = &encoder->stack[encoder->depth - 1]; + + // If this field is not stubbed, then... + if (!(field_desc->opts.flags & PRB_STUBBED_FIELD)) { + + // Check that field belongs to the current message + for (i = 0; i < frame->msg_desc->num_fields; i++) + if (field_desc == &frame->msg_desc->fields[i]) + break; + if (i == frame->msg_desc->num_fields) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_REQUEST; + } + } + + if (field_desc->opts.flags & PRB_IS_DEPRECATED) + { + // OK to read deprecated fields, but we should not write them. + DBG_BREAKPOINT(); + } + + if (field_desc->opts.flags & PRB_IS_PACKED) + { + // Need to start a new packed field? + if (frame->field_desc != field_desc) + { + ret = prbEndPackedField(encoder); + if (ret != NV_OK) + return ret; + + ret = prbStartPackedField(encoder, field_desc); + if (ret != NV_OK) + return ret; + } + + } + else + { + ret = prbEndPackedField(encoder); + if (ret != NV_OK) + return ret; + } + + // Messing with packed fields may have changed depth + frame = &encoder->stack[encoder->depth - 1]; + + // Encode wire value + switch (field_desc->opts.typ) + { + case PRB_DOUBLE: + wire_type = WT_64BIT; + wire_value.int64 = *((NvU64 *) &value->double_); + break; + case PRB_FLOAT: + wire_type = WT_32BIT; + wire_value.int32 = *((NvU32 *) &value->float_); + break; + case PRB_INT32: + wire_type = WT_VARINT; + wire_value.varint = value->int32; + break; + case PRB_UINT32: + wire_type = WT_VARINT; + wire_value.varint = value->uint32; + break; + case PRB_SINT32: + // Zig-zag encoding + wire_type = WT_VARINT; + wire_value.varint = (NvU32) ((value->int32 << 1) ^ (value->int32 >> 31)); + break; + case PRB_INT64: + wire_type = WT_VARINT; + wire_value.varint = value->int64; + break; + case PRB_UINT64: + wire_type = WT_VARINT; + wire_value.varint = value->uint64; + break; + case PRB_SINT64: + // Zig-zag encoding + wire_type = WT_VARINT; + wire_value.varint = (NvU64) ((value->int64 << 1) ^ (value->int64 >> 63)); + break; + case PRB_FIXED32: + wire_type = WT_32BIT; + wire_value.int32 = value->uint32; + break; + case PRB_FIXED64: + wire_type = WT_64BIT; + wire_value.int64 = value->uint64; + break; + case PRB_SFIXED32: + wire_type = WT_32BIT; + wire_value.int32 = value->int32; + break; + case PRB_SFIXED64: + wire_type = WT_64BIT; + wire_value.int64 = value->int64; + break; + case PRB_BOOL: + wire_type = WT_VARINT; + wire_value.varint = value->bool_; + break; + case PRB_ENUM: + wire_type = WT_VARINT; + wire_value.varint = value->enum_; + break; + case PRB_STRING: + wire_type = WT_STRING; + wire_value.string.data = value->string.str; + wire_value.string.len = value->string.len; + break; + case PRB_BYTES: + wire_type = WT_STRING; + wire_value.string.data = value->bytes.data; + wire_value.string.len = value->bytes.len; + break; + case PRB_MESSAGE: + wire_type = WT_STRING; + wire_value.string.data = value->message.data; + wire_value.string.len = value->message.len; + break; + } + + if (!(field_desc->opts.flags & PRB_IS_PACKED)) + { + key = wire_type | (field_desc->number << 3); + ret = encode_varint(&frame->buf, key, COUNT_FLAG(encoder)); + if (ret != NV_OK) + return ret; + } + + switch (wire_type) + { + case WT_VARINT: + ret = encode_varint(&frame->buf, wire_value.varint, COUNT_FLAG(encoder)); + if (ret != NV_OK) + return ret; + break; + case WT_64BIT: + ret = encode_64bit(&frame->buf, wire_value.int64, COUNT_FLAG(encoder)); + if (ret != NV_OK) + return ret; + break; + case WT_STRING: + ret = encode_varint(&frame->buf, wire_value.string.len, COUNT_FLAG(encoder)); + if (ret != NV_OK) + return ret; + if (prbBufLeft(&frame->buf) < wire_value.string.len) + return NV_ERR_BUFFER_TOO_SMALL; + if (!COUNT_FLAG(encoder)) + { + // Use memmove() when writing a message field as the memory areas are + // overlapping. + if (field_desc->opts.typ == PRB_MESSAGE) + { + portMemMove(frame->buf.pos, (NvU32)wire_value.string.len, + wire_value.string.data, (NvU32)wire_value.string.len); + } + else + { + portMemCopy(frame->buf.pos, (NvU32)wire_value.string.len, wire_value.string.data, (NvU32)wire_value.string.len); + } + } + frame->buf.pos += wire_value.string.len; + break; + case WT_32BIT: + ret = encode_32bit(&frame->buf, wire_value.int32, COUNT_FLAG(encoder)); + if (ret != NV_OK) + return ret; + break; + default: + DBG_BREAKPOINT(); + break; + } + + return NV_OK; +} + +/*! + * @brief Encode a signed 32 bit integer argument + * @param [in] encoder The encoder to use + * @param [in] field_desc The field descriptor to use + * @param [in] int32 The value to encode + * @returns NV_STATUS + */ + +NV_STATUS +prbEncAddInt32(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + NvS32 int32) +{ + PRB_VALUE value; + + value.int32 = int32; + return prbEncAddField(encoder, field_desc, &value); +} + +/*! + * @brief Encode an unsigned 32 bit integer argument + * @param [in] encoder The encoder to use + * @param [in] field_desc The field descriptor to use + * @param [in] uint32 The value to encode + * @returns NV_STATUS + */ + +NV_STATUS +prbEncAddUInt32(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + NvU32 uint32) +{ + PRB_VALUE value; + + value.uint32 = uint32; + return prbEncAddField(encoder, field_desc, &value); +} + +/*! + * @brief Encode a 64 bit signed integer + * @param [in] encoder The encoder to use + * @param [in] field_desc The field descriptor to use + * @param [in] int64 The value to encode + * @returns NV_STATUS + */ + +NV_STATUS +prbEncAddInt64(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + NvS64 int64) +{ + PRB_VALUE value; + + value.int64 = int64; + return prbEncAddField(encoder, field_desc, &value); +} + +/*! + * @brief Encode a 64 bit unsigned integer + * @param [in] encoder The encoder to use + * @param [in] field_desc The field descriptor to use + * @param [in] uint64 The value to encode + * @returns NV_STATUS + */ + +NV_STATUS +prbEncAddUInt64(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + NvU64 uint64) +{ + PRB_VALUE value; + + value.uint64 = uint64; + return prbEncAddField(encoder, field_desc, &value); +} + +/*! + * @brief Encode a boolean + * @param [in] encoder The encoder to use + * @param [in] field_desc The field descriptor to use + * @param [in] bool_ The value to encode + * @returns NV_STATUS + */ + +NV_STATUS +prbEncAddBool(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + NvBool bool_) +{ + PRB_VALUE value; + + value.bool_ = bool_; + return prbEncAddField(encoder, field_desc, &value); +} + +/*! + * @brief Encode an enum + * @param [in] encoder The encoder to use + * @param [in] field_desc The field descriptor to use + * @param [in] enum_ The value to encode + * @returns NV_STATUS + */ + +NV_STATUS +prbEncAddEnum(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + int enum_) +{ + PRB_VALUE value; + + value.enum_ = enum_; + return prbEncAddField(encoder, field_desc, &value); +} + +/*! + * @brief Encode a null terminated string + * @param [in] encoder The encoder to use + * @param [in] field_desc The field descriptor to use + * @param [in] str The value to encode + * @returns NV_STATUS + */ + +NV_STATUS +prbEncAddString(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + const char *str) +{ + PRB_VALUE value; + + value.string.str = str; + value.string.len = (NvU32)portStringLength(str); + return prbEncAddField(encoder, field_desc, &value); +} + +/*! + * @brief Encode a byte array + * @param [in] encoder The encoder to use + * @param [in] field_desc The field descriptor to use + * @param [in] data The value to encode + * @param [in] len The length of the byte array + * @returns NV_STATUS + */ + +NV_STATUS +prbEncAddBytes(PRB_ENCODER *encoder, + const PRB_FIELD_DESC *field_desc, + const NvU8 *data, + NvU32 len) +{ + PRB_VALUE value; + + value.string.str = (const char *) data; + value.string.len = len; + return prbEncAddField(encoder, field_desc, &value); +} + +/*! + * @brief concatenate a message to an encoded message stream + * @param [in] encoder The encoder to use + * @param [in] pMsg The pointer to the message to add to the steam + * @param [in] len Length of the message + * @returns NV_STATUS + */ + +NV_STATUS +prbEncCatMsg(PRB_ENCODER *encoder, void *pMsg, NvU32 len) +{ + NV_STATUS status; + NvU8 *pBuff = pMsg; + NvU32 used; + NvU64 msgVarint1; + NvU64 msgVarint2; + WIRE_TYPE msgWireType; + NvU32 msgField; + NvU32 msgLen; + PRB_ENCODER_STACK_FRAME *frame; + const PRB_FIELD_DESC *field_desc; + NvU32 i; + + // Get field, wiretype + status = decode_varint(pBuff, len, &used, &msgVarint1); + if (status != NV_OK) + return status; + + if (used >= len) + return NV_ERR_BUFFER_TOO_SMALL; + + msgWireType = (WIRE_TYPE)msgVarint1 & 0x7; + if (msgWireType != WT_STRING) + return NV_ERR_INVALID_REQUEST; + + msgField = (NvU32)msgVarint1 >> 3; + + // Find a field descriptor for the new message + frame = &encoder->stack[encoder->depth - 1]; + for (i = 0; i < frame->msg_desc->num_fields; i++) + { + if (frame->msg_desc->fields[i].number == msgField) + break; + } + + if (i == frame->msg_desc->num_fields) + { + DBG_BREAKPOINT(); + return NV_ERR_INVALID_REQUEST; + } + else + { + field_desc = &frame->msg_desc->fields[i]; + } + + pBuff += used; + len -= used; + + // Get length + status = decode_varint(pBuff, len, &used, &msgVarint2); + if (status != NV_OK) + return status; + + msgLen = (NvU32)msgVarint2; + if (msgLen > len - used) + return NV_ERR_BUFFER_TOO_SMALL; + + return prbEncAddBytes(encoder, field_desc, pBuff + used, msgLen); +} + diff --git a/src/nvidia/src/lib/protobuf/prbutil.c b/src/nvidia/src/lib/protobuf/prbutil.c new file mode 100644 index 000000000..2d35219c2 --- /dev/null +++ b/src/nvidia/src/lib/protobuf/prbutil.c @@ -0,0 +1,805 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2000-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "lib/protobuf/prb_util.h" + +#include "os/os.h" + +#include "g_regs_pb.h" +#include "g_all_dcl_pb.h" + +/*! + * @brief Encode Gpu registers. Wraps REG_RD32. + * @param [in] pGpu The Gpu + * @param [in] offset The offset of the regs to read + * @param [in] numRegs Number of contiguous regs to encode + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. Must be + * a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncGpuRegs(POBJGPU pGpu, IO_APERTURE *pAperture, NvU64 offset, NvU32 numRegs, + PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status; + NV_STATUS statusEnd; + NvU32 data; + + if (pAperture == NULL) + { + return NV_ERR_INVALID_REQUEST; + } + + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + return status; + + status = prbEncAddEnum(pPrbEnc, REGS_REGSANDMEM_TYPE, REGS_REGSANDMEM_GPU_REGS); + if (status != NV_OK) + goto done; + status = prbEncAddUInt64(pPrbEnc, REGS_REGSANDMEM_OFFSET, REG_GET_ADDR(pAperture, offset)); + if (status != NV_OK) + goto done; + while (numRegs--) + { + data = REG_RD32(pAperture, (NvU32)offset); + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_VAL, data); + if (status != NV_OK) + goto done; + offset += sizeof(NvU32); + } + +done: + // Try to close even if preceding error + statusEnd = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + return status; + else + return statusEnd; +} + + +/*! + * @brief Encode Gpu register immediate. + * Use this version when you have the register in hand. + * @param [in] pGpu The Gpu + * @param [in] offset The offset of the regs to read + * @param [in] reg The register to encode + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. Must be + * a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncGpuRegImm(POBJGPU pGpu, NvU64 offset, NvU32 reg, + PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status; + NV_STATUS statusEnd; + + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + return status; + + status = prbEncAddEnum(pPrbEnc, REGS_REGSANDMEM_TYPE, REGS_REGSANDMEM_GPU_REGS); + if (status != NV_OK) + goto done; + status = prbEncAddUInt64(pPrbEnc, REGS_REGSANDMEM_OFFSET, offset); + if (status != NV_OK) + goto done; + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_VAL, reg); + goto done; + +done: + // Try to close even if preceding error + statusEnd = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + return status; + else + return statusEnd; +} + +/*! + * @brief Encode Gpu registers, table slice version + * Adds a base address to each offset. Useful for register + * sets like the tpc registers. + * @param [in] pGpu The Gpu + * @param [in] tbl The start of the table + * @param [in] numEntries Number of entries in the table + * @param [in] base Base address for the slice of registers in the table + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. Must be + * a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncGpuRegSliceTbl(POBJGPU pGpu, IO_APERTURE *pAperture, const PRB_GPU_REG_TABLE *pTbl, NvU32 numEntries, NvU32 base, + PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status = NV_OK; // Init keeps the Mac compiler quiet + NvU64 regAddr = 0; // Ditto + NvU32 stride = 0; // Ditto + NV_STATUS statusEnd; + NvU32 data; + NvU32 numRegs; + NvBool inMsg = NV_FALSE; + NvU8 startingDepth = prbEncNestingLevel(pPrbEnc); + + if ((pAperture == NULL) || (pTbl == NULL) || (numEntries == 0)) + return NV_ERR_INVALID_REQUEST; + + // + // This loop collapses registers that are adjacent in the table to + // avoid repeatedly encoding the register address and to take advantage of the + // protobuf packed encoding. + // + while(numEntries--) + { + if (!inMsg) + { + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + goto done; + + inMsg = NV_TRUE; + status = prbEncAddEnum(pPrbEnc, REGS_REGSANDMEM_TYPE, REGS_REGSANDMEM_GPU_REGS); + if (status != NV_OK) + goto done; + + regAddr = pTbl->offset + base; + status = prbEncAddUInt64(pPrbEnc, REGS_REGSANDMEM_OFFSET, REG_GET_ADDR(pAperture, regAddr)); + if (status != NV_OK) + goto done; + stride = pTbl->stride; + if (stride != 4) + { + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_STRIDE, stride); + if (status != NV_OK) + goto done; + } + } + for (numRegs = pTbl->numRegs; numRegs > 0; numRegs--) + { + data = REG_RD32(pAperture, (NvU32)regAddr); + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_VAL, data); + if (status != NV_OK) + goto done; + regAddr += stride; + } + pTbl++; + if (numEntries > 0 && + (regAddr != pTbl->offset + base || + stride != pTbl->stride)) + { + status = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + goto done; + inMsg = NV_FALSE; + } + } + +done: + // Try to close even if preceding error + statusEnd = prbEncUnwindNesting(pPrbEnc, startingDepth); + + if (status != NV_OK) + return status; + else + return statusEnd; +} + +/*! + * @brief Encode Gpu registers, table slice indexed version. + * Adds a base address and an index multipled by the table length + * to each offset. Useful for register sets like the smc registers. + * @param [in] pGpu The Gpu + * @param [in] tbl The start of the table + * @param [in] numEntries Number of entries in the table + * @param [in] base Base address for the slice of registers in the table + * @param [in] index Index to apply to the ilen table entry + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. Must be + * a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncGpuRegSliceIndexedTbl(POBJGPU pGpu, IO_APERTURE *pAperture, const PRB_GPU_REG_INDEXED_TABLE *pTbl, NvU32 numEntries, + NvU32 base, NvU32 index, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status = NV_OK; // Init keeps the Mac compiler quiet + NvU64 regAddr = 0; // Ditto + NvU32 stride = 0; // Ditto + NV_STATUS statusEnd; + NvU32 data; + NvU32 numRegs; + NvBool inMsg = NV_FALSE; + NvU8 startingDepth = prbEncNestingLevel(pPrbEnc); + + if (pTbl == NULL || numEntries == 0) + return NV_ERR_INVALID_REQUEST; + + // + // This loop collapses registers that are adjacent in the table to + // avoid repeatedly encoding the register address and to take advantage of the + // protobuf packed encoding. + // + while(numEntries--) + { + if (!inMsg) + { + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + goto done; + + inMsg = NV_TRUE; + status = prbEncAddEnum(pPrbEnc, REGS_REGSANDMEM_TYPE, REGS_REGSANDMEM_GPU_REGS); + if (status != NV_OK) + goto done; + + regAddr = pTbl->offset + base + (NvU64)index * pTbl->ilen; + status = prbEncAddUInt64(pPrbEnc, REGS_REGSANDMEM_OFFSET, REG_GET_ADDR(pAperture, regAddr)); + if (status != NV_OK) + goto done; + stride = pTbl->stride; + if (stride != 4) + { + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_STRIDE, stride); + if (status != NV_OK) + goto done; + } + } + for (numRegs = pTbl->numRegs; numRegs > 0; numRegs--) + { + data = REG_RD32(pAperture, (NvU32)regAddr); + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_VAL, data); + if (status != NV_OK) + goto done; + regAddr += stride; + } + pTbl++; + if (numEntries > 0 && + (regAddr != pTbl->offset + base + (NvU64)index * pTbl->ilen || + stride != pTbl->stride)) + { + status = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + goto done; + inMsg = NV_FALSE; + } + } + +done: + // Try to close even if preceding error + statusEnd = prbEncUnwindNesting(pPrbEnc, startingDepth); + + if (status != NV_OK) + return status; + else + return statusEnd; +} + +/*! + * @brief Encode Gpu registers, table simplified version + * @param [in] pGpu The Gpu + * @param [in] pOffset The start of an array with register offsets + * @param [in] numEntries Number of entries in the table + * @param [in] base Base address for the slice of registers in the table + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. Must be + * a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncGpuRegSliceOffset(POBJGPU pGpu, IO_APERTURE *pAperture, const NvU32 *pOffset, NvU32 numEntries, + NvU32 base, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *pFieldDesc) +{ + NV_STATUS status = NV_OK; // if numEntries == 0 + NvU32 regAddr = 0; // Init keeps the Mac compiler quiet + NvBool inMsg = NV_FALSE; + NvU8 uDepth = prbEncNestingLevel(pPrbEnc); + + if (pAperture == NULL) + { + return NV_ERR_INVALID_REQUEST; + } + + // + // This loop collapses registers that are adjacent in the table to + // avoid repeatedly encoding the register address and to take advantage + // of the protobuf packed encoding. + // + while(numEntries--) + { + if (!inMsg) + { + status = prbEncNestedStart(pPrbEnc, pFieldDesc); + if (status != NV_OK) + goto done; + + inMsg = NV_TRUE; + status = prbEncAddEnum(pPrbEnc, REGS_REGSANDMEM_TYPE, REGS_REGSANDMEM_GPU_REGS); + if (status != NV_OK) + goto done; + + regAddr = *pOffset + base; + status = prbEncAddUInt64(pPrbEnc, REGS_REGSANDMEM_OFFSET, REG_GET_ADDR(pAperture, regAddr)); + if (status != NV_OK) + goto done; + } + + status = prbEncAddUInt32(pPrbEnc,REGS_REGSANDMEM_VAL, REG_RD32(pAperture, regAddr)); + if (status != NV_OK) + goto done; + + pOffset++; + regAddr += 4; + + if (regAddr != *pOffset + base) + { + inMsg = NV_FALSE; + + status = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + goto done; + } + } + +done: + // Try to close even if preceding error + prbEncUnwindNesting(pPrbEnc, uDepth); + return status; +} + +/*! + * @brief Encode Gpu registers, table simplified version + * @param [in] pGpu The Gpu + * @param [in] pOffset The start of an array with register offsets + * @param [in] numEntries Number of entries in the table + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. Must be + * a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncGpuRegOffset(POBJGPU pGpu, IO_APERTURE *pAperture, const NvU32 *pOffset, NvU32 numEntries, + PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *pFieldDesc) +{ + return prbEncGpuRegSliceOffset(pGpu, pAperture, pOffset, numEntries, 0, pPrbEnc, pFieldDesc); +} + +/*! + * @brief Encode Gpu registers, table version + * @param [in] pGpu The Gpu + * @param [in] tbl The start of the table + * @param [in] numEntries Number of entries in the table + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. Must be + * a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncGpuRegTbl(POBJGPU pGpu, IO_APERTURE *pAperture, const PRB_GPU_REG_TABLE *pTbl, NvU32 numEntries, + PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc) +{ + return prbEncGpuRegSliceTbl(pGpu, pAperture, pTbl, numEntries, 0, pPrbEnc, fieldDesc); +} + + +/*! + * @brief Encode Instance Memory. + * + * @param [in] physAddr The physical address of the memory, saved as offset. + * @param [in] pVirtAddr The virtual address of the memory to MEM_RD32. + * @param [in] numWords The number of NvU32 words to read. + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. + * Must be a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncInstMem(NvU64 physAddr, NvU8 *pVirtAddr, NvU32 numWords, + PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status; + NV_STATUS statusEnd; + NvU32 data; + + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + return status; + + status = prbEncAddEnum(pPrbEnc, REGS_REGSANDMEM_TYPE, REGS_REGSANDMEM_INSTANCE); + if (status != NV_OK) + goto done; + + status = prbEncAddUInt64(pPrbEnc, REGS_REGSANDMEM_OFFSET, physAddr); + if (status != NV_OK) + goto done; + + while (numWords--) + { + data = MEM_RD32(pVirtAddr); + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_VAL, data); + if (status != NV_OK) + goto done; + pVirtAddr += sizeof(NvU32); + } + +done: + // Try to close even if preceding error + statusEnd = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + return status; + else + return statusEnd; +} + +/*! + * @brief Encode Instance Memory Immediate. + * Assumes the caller has done the MEM_RD32 call. + * + * @param [in] physAddr The physical address of the memory read + * @param [in] data The data at that memory + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. + * Must be a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncInstMemImm(NvU64 physAddr, NvU32 data, + PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status; + NV_STATUS statusEnd; + + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + return status; + + status = prbEncAddEnum(pPrbEnc, REGS_REGSANDMEM_TYPE, REGS_REGSANDMEM_INSTANCE); + if (status != NV_OK) + goto done; + + status = prbEncAddUInt64(pPrbEnc, REGS_REGSANDMEM_OFFSET, physAddr); + if (status != NV_OK) + goto done; + + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_VAL, data); + if (status != NV_OK) + goto done; + +done: + // Try to close even if preceding error + statusEnd = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + return status; + else + return statusEnd; +} + +/*! + * @brief Encode Memory. Wraps MEM_RD32. + * @param [in] offset The offset of the memory to read + * @param [in] numRegs Number of contiguous 32 bit memory cells to encode + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. Must be + * a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncMem(NvU64 offset, NvU32 numRegs, PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status; + NV_STATUS statusEnd; + NvU32 data; + + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + return status; + + status = prbEncAddEnum(pPrbEnc, REGS_REGSANDMEM_TYPE, REGS_REGSANDMEM_SYS_MEM); + if (status != NV_OK) + goto done; + status = prbEncAddUInt64(pPrbEnc, REGS_REGSANDMEM_OFFSET, offset); + if (status != NV_OK) + goto done; + while (numRegs--) + { + data = MEM_RD32((NvU8 *)(NvUPtr)offset); + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_VAL, data); + if (status != NV_OK) + goto done; + offset += sizeof(NvU32); + } + +done: + // Try to close even if preceding error + statusEnd = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + return status; + else + return statusEnd; +} + +/*! + * @brief Encode pci config registers. Wraps GPU_BUS_CFG_RD32. + * @param [in] pGpu The Gpu + * @param [in] index Config Index + * @param [in] numRegs Number of contiguous regs to encode + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. Must be + * a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncPciConfigRegs(POBJGPU pGpu, NvU64 index, NvU32 numRegs, + PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status; + NV_STATUS statusEnd; + NvU32 data; + + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + return status; + + status = prbEncAddEnum(pPrbEnc, REGS_REGSANDMEM_TYPE, REGS_REGSANDMEM_PCI_CONFIG_REGS); + if (status != NV_OK) + goto done; + status = prbEncAddUInt64(pPrbEnc, REGS_REGSANDMEM_OFFSET, index); + if (status != NV_OK) + goto done; + while (numRegs--) + { + if (NV_OK != GPU_BUS_CFG_RD32(pGpu, (NvU32)index, &data)) + goto done; + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_VAL, data); + if (status != NV_OK) + goto done; + index += sizeof(NvU32); + } + +done: + // Try to close even if preceding error + statusEnd = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + return status; + else + return statusEnd; +} + +/*! + * @brief Encode pci space data. Wraps osPciReadDword. + * @param [in] handle PCI space handle + * @param [in] offset Offset + * @param [in] pPrbEnc The protobuf encoder to use + * @param [in] fieldDesc The field we are encoding. Must be + * a RegsAndMem message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncPciReadDword(void *handle, NvU64 offset, + PRB_ENCODER *pPrbEnc, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status; + NV_STATUS statusEnd; + NvU32 data; + + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + return status; + + status = prbEncAddEnum(pPrbEnc, REGS_REGSANDMEM_TYPE, REGS_REGSANDMEM_PCI_SPACE); + if (status != NV_OK) + goto done; + status = prbEncAddUInt64(pPrbEnc, REGS_REGSANDMEM_OFFSET, offset); + if (status != NV_OK) + goto done; + data = osPciReadDword(handle, (NvU32)offset); + status = prbEncAddUInt32(pPrbEnc, REGS_REGSANDMEM_VAL, data); + +done: + // Try to close even if preceding error + statusEnd = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + return status; + else + return statusEnd; +} + +/*! + * @brief Add a generic exception data record to protobuf + * @param [in] prbEnc The protobuf encoder + * @param [in] pGpu The gpu + * @param [in] chId The channel where the exception occurred + * @param [in] fieldDesc The field we are encoding. Must be + * an ExceptionData message. + * @returns NV_STATUS +*/ + +NV_STATUS +prbEncGenExData(PRB_ENCODER *pPrbEnc, POBJGPU pGpu, NvU32 chId, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status; + NV_STATUS statusEnd; + static NvU32 errorSequenceNumber; + NvU32 sec; + NvU32 uSec; + NvU64 curTime; + + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + return status; + + status = prbEncAddUInt32(pPrbEnc, GR_EXCEPTION_EXCEPTIONDATA_GPU_INSTANCE, pGpu->gpuInstance); + if (status != NV_OK) + goto done; + status = prbEncAddUInt32(pPrbEnc, GR_EXCEPTION_EXCEPTIONDATA_CH_ID, chId); + if (status != NV_OK) + goto done; + status = prbEncAddUInt32(pPrbEnc, GR_EXCEPTION_EXCEPTIONDATA_ERROR_SEQUENCE_NUMBER, errorSequenceNumber++); + if (status != NV_OK) + goto done; + osGetCurrentTime(&sec, &uSec); + curTime = (NvU64)sec * 1000000 + (NvU64)uSec; + status = prbEncAddUInt64(pPrbEnc, GR_EXCEPTION_EXCEPTIONDATA_TIME_STAMP, curTime); + +done: + // Try to close even if preceding error + statusEnd = prbEncNestedEnd(pPrbEnc); + if (status != NV_OK) + return status; + else + return statusEnd; +} + +/*! + * @brief Wrap a message and queue it to the RC error list + * @param [in] prbEnc The protobuf encoder + * @param [in] pErrorHeader The error block to queue it to + * @returns NV_STATUS +*/ + +NV_STATUS +prbWrapAndQueue +( + PRB_ENCODER *pPrbEnc, + RMERRORHEADER *pErrorHeader, + RMCD_ERROR_BLOCK **ppErrorBlock +) +{ + NvU32 len; + NvU8 *pBuff; + RMCD_ERROR_BLOCK *pCurrErrorBlock; + RMCD_ERROR_BLOCK *pOldErrorBlock = NULL; + RMCD_ERROR_BLOCK *pNewErrorBlock = NULL; + + prbEncNestedEnd(pPrbEnc); + len = prbEncFinish(pPrbEnc, (void **)&pBuff); + + if (pBuff) + { + pNewErrorBlock = portMemAllocNonPaged(sizeof(RMCD_ERROR_BLOCK)); + if (NULL != pNewErrorBlock) + { + pNewErrorBlock->pBlock = pBuff; + pNewErrorBlock->blockSize = len; + pNewErrorBlock->pNext = NULL; + + for (pCurrErrorBlock = pErrorHeader->pErrorBlock; pCurrErrorBlock != NULL; + pCurrErrorBlock = pCurrErrorBlock->pNext) + { + pOldErrorBlock = pCurrErrorBlock; + } + + if (pOldErrorBlock) + { + pOldErrorBlock->pNext = pNewErrorBlock; + } + else + { + pErrorHeader->pErrorBlock = pNewErrorBlock; + } + } + else + { + portMemFree(pNewErrorBlock); + portMemFree(pBuff); + pNewErrorBlock = NULL; + } + } + if (ppErrorBlock != NULL) + { + *ppErrorBlock = pNewErrorBlock; + } + return NV_OK; +} + +/*! + * @brief Allocate and startup a DCL message + * @param [in] prbEnc The protobuf encoder + * @param [in] len The number of bytes to allocate + * @param [in] fieldDesc The dcl field to set up + * @returns NV_STATUS +*/ + +NV_STATUS +prbSetupDclMsg(PRB_ENCODER *pPrbEnc, NvU32 len, const PRB_FIELD_DESC *fieldDesc) +{ + NV_STATUS status; + + status = prbEncStartAlloc(pPrbEnc, DCL_DCLMSG, len, NULL); + if (status == NV_OK) + { + status = prbEncNestedStart(pPrbEnc, fieldDesc); + if (status != NV_OK) + { + prbFreeAllocatedBuffer(pPrbEnc); + } + + } + return status; +} + +/*! + * @brief Unwinds the nesting to the level specified. + * + * For best results, use @ref prbEncNestingLevel to obtain the current nesting + * level before making calls to @ref prbEncNestedStart. + * + * @param [in] prbEnc The protobuf encoder + * @param [in] pErrorHeader The error block to queue it to + * + * @returns NV_STATUS if successfully unwound; detailed error code otherwise. +*/ + +NV_STATUS +prbEncUnwindNesting(PRB_ENCODER *pPrbEnc, NvU8 level) +{ + NV_STATUS status = NV_OK; + + while (pPrbEnc->depth > level) + { + status = prbEncNestedEnd(pPrbEnc); + if (NV_OK != status) + { + break; + } + } + + return status; +} + diff --git a/src/nvidia/src/lib/ref_count.c b/src/nvidia/src/lib/ref_count.c new file mode 100644 index 000000000..ab42899b9 --- /dev/null +++ b/src/nvidia/src/lib/ref_count.c @@ -0,0 +1,583 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2013-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "core/prelude.h" +#include "lib/ref_count.h" +#include "os/os.h" + +static NV_STATUS _refcntReleaseReferences (POBJREFCNT, NvU64, NvBool, + NvBool, NvBool*); +static NV_STATUS _refcntInvokeStateChangeCallback(POBJREFCNT, REFCNT_STATE, + REFCNT_STATE); +static void _refcntInvokeResetCallback (POBJREFCNT, NvU64); + +/*! + * @brief Construct the reference counter according to the parameters. + * + * @param[in] pRefcnt - REFCNT object pointer + * @param[in] pParent - The parent ODB object of the reference + * counter + * @param[in] tag - An identifier the caller can use to + * distinguish multiple REFCNTs - the + * implementation does not use this value + * @param[in] pStateChangeCallback - A callback function to be called when + * the state of the reference-counted + * setting changes. Optional. + * @param[in] pResetCallback - A callback function to be called for + * each requester when the counter is + * reset. Optional. + * + * @returns NV_OK if the reference counter is constructed successfully + */ +NV_STATUS +refcntConstruct_IMPL +( + POBJREFCNT pRefcnt, + Dynamic *pParent, + NvU32 tag, + RefcntStateChangeCallback *pStateChangeCallback, + RefcntResetCallback *pResetCallback +) +{ + pRefcnt->pParent = pParent; + pRefcnt->tag = tag; + pRefcnt->refcntStateChangeCallback = pStateChangeCallback; + pRefcnt->refcntResetCallback = pResetCallback; + pRefcnt->state = REFCNT_STATE_DEFAULT; + pRefcnt->count = 0; + + mapInit(&pRefcnt->requesterTree, portMemAllocatorGetGlobalNonPaged()); + + return NV_OK; +} + +/*! + * @brief Destructor + */ +void +refcntDestruct_IMPL +( + POBJREFCNT pRefcnt +) +{ + + // If there are any requesters left, remove them now + mapDestroy(&pRefcnt->requesterTree); +} + +/*! + * @brief Request a state for the reference-counted setting on behalf of the + * requester. + * + * @param[in] pRefcnt - REFCNT object pointer + * @param[in] requesterId - Unique identifier representing the requester + * @param[in] requestedState - The state of the setting that the requester + * wants; should be either REFCNT_STATE_ENABLED + * or REFCNT_STATE_DISABLED + * @param[in] bForce - The setting should be forced to the requested + * state; if this state is different from the + * current state, this will force a reset of the + * counter + * + * @returns NV_OK if the reference-counted setting is in the requested state + * upon exit + * NV_ERR_NOT_SUPPORTED if the REFCNT engine is not enabled + * NV_ERR_STATE_IN_USE if the reference-counted setting could not be + * put in the requested state due to conflicting requests + * NV_ERR_INVALID_ARGUMENT if the requested state is not one of the + * valid values + * NV_ERR_INVALID_STATE if the reference counter's bookkeeping has + * become unreliable + * NV_ERR_ILLEGAL_ACTION if the requester is attempting to request a + * state while it already has a reference and the + * reference-counter is configured not to allow recursive + * requests + * Other errors from _refcntInvokeStateChangeCallback(), + * refcntReset(), btreeInsert(), and btreeUnlink() + */ +NV_STATUS +refcntRequestReference_IMPL +( + POBJREFCNT pRefcnt, + NvU64 requesterId, + NvU32 requestedState, + NvBool bForce +) +{ + NV_STATUS status; + PREFCNT_REQUESTER_ENTRY pRequesterInfo = NULL; + REFCNT_REQUESTER_ENTRY requesterInfo; + NvBool bInfoInTree = NV_FALSE; + + // Releasing requests should go through refcntReleaseReferences() + if (requestedState != REFCNT_STATE_ENABLED && + requestedState != REFCNT_STATE_DISABLED) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + // Do not allow any operations on a reference counter in an error state. + if (pRefcnt->state == REFCNT_STATE_ERROR) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + // + // If there are any outstanding references to the setting, we need to + // make sure that the requested state is the current state, or forcibly + // reset the reference counter. + // + if ((pRefcnt->count > 0) && (requestedState != pRefcnt->state)) + { + if (bForce) + { + // + // Reset the counter, but don't do a state transition to the + // default state. + // + status = refcntReset(pRefcnt, NV_FALSE); + if (status != NV_OK) + { + goto done; + } + } + else + { + NV_PRINTF(LEVEL_INFO, + "Cannot enter requested state %d (current state: %d, count: %d)\n", + requestedState, pRefcnt->state, + pRefcnt->count); + status = NV_ERR_STATE_IN_USE; + goto done; + } + } + + // Find out if the requester has existing references to the current setting + pRequesterInfo = mapFind(&pRefcnt->requesterTree, requesterId); + if (pRequesterInfo != NULL) + { + bInfoInTree = NV_TRUE; + // + // Recursive requests are only legal if the reference counter + // allows it. + // + if (pRequesterInfo->numReferences > 0 && + !pRefcnt->getProperty(pRefcnt, + PDB_PROP_REFCNT_ALLOW_RECURSIVE_REQUESTS)) + { + NV_PRINTF(LEVEL_NOTICE, + "Requester 0x%016llx already has a reference to this setting (current state: %d, count: %d)\n", + requesterId, pRefcnt->state, + pRefcnt->count); + status = NV_ERR_ILLEGAL_ACTION; + goto done; + } + } + else + { + // We need to create a new requester entry + requesterInfo.numReferences = 0; + } + + // + // Determine if we should transition into a new state. If the transition + // fails, we don't update the counts so we don't get out of sync with the + // setting. + // + if (pRefcnt->count == 0) + { + status = _refcntInvokeStateChangeCallback(pRefcnt, pRefcnt->state, + requestedState); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to enter state %d (current state: %d, status: 0x%08x)\n", + requestedState, pRefcnt->state, status); + goto done; + } + } + + // Only insert new entries if we could make the state change (if any). + if (!bInfoInTree) + { + pRequesterInfo = mapInsertValue(&pRefcnt->requesterTree, requesterId, &requesterInfo); + if (pRequesterInfo == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_PRINTF(LEVEL_ERROR, + "Failed to insert entry for requester 0x%016llx (status: 0x%08x)\n", + requesterId, status); + pRefcnt->state = REFCNT_STATE_ERROR; + goto done; + } + + bInfoInTree = NV_TRUE; + } + + // + // The state change (if any) was successful, so record the new reference + // now. + // + pRequesterInfo->numReferences++; + pRefcnt->count++; + pRefcnt->state = requestedState; + status = NV_OK; + +done: + // Clean up the requester info if it's not being used. + if (pRequesterInfo && pRequesterInfo->numReferences == 0) + { + if (bInfoInTree) + { + mapRemove(&pRefcnt->requesterTree, pRequesterInfo); + } + } + + return status; +} + +/*! + * @brief Release an outstanding request for the reference-counted setting on + * behalf of the requester. + * + * The request must have been created via a successful call to + * refcntRequestReference(). + * + * @param[in] pRefcnt - REFCNT object pointer + * @param[in] requesterId - Unique identifier representing the + * requester + * @param[in] bReleaseAllRequests - Indicates whether all requests made by the + * requester for this setting should be + * released; this parameter only matters for + * REFCNTs with the ALLOW_RECURSIVE_REQUESTS + * PDB property enabled + * + * @returns The status of _refcntReleaseReferences() + */ +NV_STATUS +refcntReleaseReferences_IMPL +( + POBJREFCNT pRefcnt, + NvU64 requesterId, + NvBool bReleaseAllRequests +) +{ + NvBool bNodeIsRemoved; + return _refcntReleaseReferences(pRefcnt, requesterId, bReleaseAllRequests, + NV_FALSE, &bNodeIsRemoved); +} + +/*! + * @brief Reset the reference counter to the default state. + * + * This will release all existing requests for the reference-counted setting, + * invoking the reset callback for each requester. + * + * @param[in] pRefcnt - REFCNT object pointer + * @param[in] bForceCallback - If NV_TRUE, indicates that the counter should + * invoke the state change callback to the default + * state once it has been reset, regardless of + * whether the counter was already in the default + * state; if NV_FALSE, indicates that the counter + * should not invoke the state change callback at + * all (it will remain in its current state with + * 0 requesters) + * + * @returns NV_OK if the counter is reset successfully or was already in the + * DEFAULT state + * NV_ERR_INVALID_STATE if there are still outstanding requesters + * with references upon exit + * Other errors from refcntReleaseReferences() and + * _refcntInvokeStateChangeCallback() + */ +NV_STATUS +refcntReset_IMPL +( + POBJREFCNT pRefcnt, + NvBool bForceCallback +) +{ + NvU64 requesterId; + NV_STATUS status; + NV_STATUS errStatus = NV_OK; + REFCNT_REQUESTER_ENTRY_MAPIter requesterMapIter = mapIterAll(&pRefcnt->requesterTree); + + // + // For every requester that has an outstanding reference, release its + // references and invoke the reset callback for it. + // + while (mapIterNext(&requesterMapIter)) + { + NvBool bNodeIsRemoved = NV_FALSE; + requesterId = mapKey(&pRefcnt->requesterTree, requesterMapIter.pValue); + + // + // Suppress the potential state change - we can decide later if we + // need to invoke it explicitly. + // + status = _refcntReleaseReferences(pRefcnt, requesterId, NV_TRUE, + NV_TRUE, &bNodeIsRemoved); + + if (bNodeIsRemoved) + { + // + // pNode is removed from the map and freed in + // _refcntReleaseReferences(), so re-initialize requesterMapIter. + // The iteration should start from the smallest key which is larger + // than requesterId now. + // + requesterMapIter = mapIterRange(&pRefcnt->requesterTree, + mapFindGEQ(&pRefcnt->requesterTree, requesterId), + mapFindLEQ(&pRefcnt->requesterTree, NV_U64_MAX)); + } + + if (status != NV_OK) + { + errStatus = (errStatus == NV_OK) ? status : errStatus; + NV_PRINTF(LEVEL_WARNING, + "Failed to release references on behalf of requester 0x%016llx prior to a forced reset (status: 0x%08x)\n", + requesterId, status); + } + + // Call the reset callback for the requester, if any + _refcntInvokeResetCallback(pRefcnt, requesterId); + } + + // + // After a forced reset, the reference counter should be cleared + // out (but not in the default state yet) + // + if (pRefcnt->count != 0) + { + NV_ASSERT(pRefcnt->count == 0); + pRefcnt->state = REFCNT_STATE_ERROR; + errStatus = NV_ERR_INVALID_STATE; + } + + if (bForceCallback && errStatus == NV_OK) + { + errStatus = _refcntInvokeStateChangeCallback(pRefcnt, + pRefcnt->state, + REFCNT_STATE_DEFAULT); + if (errStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to reset setting (status: 0x%x)\n", + errStatus); + DBG_BREAKPOINT(); + pRefcnt->state = REFCNT_STATE_ERROR; + } + else + { + // + // Only mark the counter as in the default state if we're actually + // there + // + pRefcnt->state = REFCNT_STATE_DEFAULT; + } + } + + return errStatus; +} + +/*! + * @brief Perform the actual reference release on behalf of the requester. + * + * @param[in] pRefcnt - REFCNT object pointer + * @param[in] requesterId - Unique identifier representing the + * requester + * @param[in] bReleaseAllRequests - Indicates whether all requests made by the + * requester for this setting should be + * released; this parameter only matters for + * REFCNTs with the ALLOW_RECURSIVE_REQUESTS + * PDB property enabled + * @param[in] bSuppressStateChange - Indicates whether the state change (and + * its callback) should be suppressed + * @param[out] bNodeIsRemoved - Whether the requester is removed from + * requesterTree. + * + * @returns NV_OK if the requester's references are successfully removed from + * the reference counter + * NV_ERR_INVALID_STATE if the reference counter's bookkeeping has + * become unreliable + * NV_ERR_ILLEGAL_ACTION if the requester does not own any references + * to the reference-counted setting + * Other errors from _refcntInvokeStateChangeCallback() + */ +static NV_STATUS +_refcntReleaseReferences +( + POBJREFCNT pRefcnt, + NvU64 requesterId, + NvBool bReleaseAllRequests, + NvBool bSuppressStateChange, + NvBool *bNodeIsRemoved +) +{ + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus = NV_OK; + PREFCNT_REQUESTER_ENTRY pRequesterInfo = NULL; + NvU32 nAdjust = 0; + + (*bNodeIsRemoved) = NV_FALSE; + + // Do not allow any operations on a reference counter in an error state. + if (pRefcnt->state == REFCNT_STATE_ERROR) + { + return NV_ERR_INVALID_STATE; + } + + pRequesterInfo = mapFind(&pRefcnt->requesterTree, requesterId); + if (pRequesterInfo != NULL) + { + if (bReleaseAllRequests) + { + nAdjust = pRequesterInfo->numReferences; + } + else if (pRequesterInfo->numReferences > 0) + { + nAdjust = 1; + } + + if (pRefcnt->count >= nAdjust) + { + pRefcnt->count -= nAdjust; + pRequesterInfo->numReferences -= nAdjust; + } + else + { + // + // The global refcount should always be at least the number of + // references from a single requester. + // + NV_ASSERT(0); + pRefcnt->state = REFCNT_STATE_ERROR; + status = NV_ERR_INVALID_STATE; + } + + // Remove the entry from the reference counter, if necessary + if (pRequesterInfo == NULL || pRequesterInfo->numReferences == 0) + { + mapRemove(&pRefcnt->requesterTree, pRequesterInfo); + (*bNodeIsRemoved) = NV_TRUE; + } + + // + // Transition into the default state if necessary - only change the + // counter state if the transition is successful, but leave the count + // at 0 to inidicate it's not in use. Do not do the state transition + // while in reset - refcntReset() will handle this if needed. + // + if (pRefcnt->state != REFCNT_STATE_DEFAULT && pRefcnt->count == 0 && + !bSuppressStateChange) + { + tmpStatus = _refcntInvokeStateChangeCallback(pRefcnt, + pRefcnt->state, + REFCNT_STATE_DEFAULT); + if (tmpStatus != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to enter default state (current state: %d, status: 0x%08x)\n", + pRefcnt->state, tmpStatus); + if (status == NV_OK) + { + status = tmpStatus; + } + } + else + { + pRefcnt->state = REFCNT_STATE_DEFAULT; + } + } + } + else + { + status = NV_ERR_ILLEGAL_ACTION; + } + + // Sanity check - the count should be 0 when we're in the _DEFAULT state. + if (pRefcnt->state == REFCNT_STATE_DEFAULT) + { + NV_ASSERT(pRefcnt->count == 0); + } + + return status; +} + +/*! + * @brief Invoke the state-change callback to trigger the actual change in the + * reference-counted setting. + * + * If there is no state-change callback, the state change simply succeeds. + * + * @param[in] pRefcnt - REFCNT object pointer + * @param[in] prevState - The state of the reference-counted setting prior to + * the state change + * @param[in] newState - The intended new state of the reference-counted + * setting after the state change + * + * @returns NV_OK if there is no state change callback or the state change is + * successful + * Other errors from the state change callback wired up to + * pRefcnt->refcntStateChangeCallback + */ +static NV_STATUS +_refcntInvokeStateChangeCallback +( + POBJREFCNT pRefcnt, + REFCNT_STATE prevState, + REFCNT_STATE newState +) +{ + if (pRefcnt->refcntStateChangeCallback != NULL) + { + return pRefcnt->refcntStateChangeCallback(pRefcnt, pRefcnt->pParent, + prevState, newState); + } + + // No state change callback is hooked up, assume that's okay + return NV_OK; +} + +/*! + * @brief Invoke the reset callback to handle the reference-counted setting + * being reset underneath the requester. + * + * @param[in] pRefcnt - REFCNT object pointer + * @param[in] requesterId - Unique identifier representing the requester + */ +static void +_refcntInvokeResetCallback +( + POBJREFCNT pRefcnt, + NvU64 requesterId +) +{ + if (pRefcnt->refcntResetCallback != NULL) + { + pRefcnt->refcntResetCallback(pRefcnt, pRefcnt->pParent, requesterId); + } +} + diff --git a/src/nvidia/src/lib/zlib/inflate.c b/src/nvidia/src/lib/zlib/inflate.c new file mode 100644 index 000000000..f1f6ba970 --- /dev/null +++ b/src/nvidia/src/lib/zlib/inflate.c @@ -0,0 +1,1157 @@ +/* inflate.c -- Not copyrighted 1992 by Mark Adler + version c10p1, 10 January 1993 */ + +/* You can do whatever you like with this source file, though I would + prefer that if you modify it and redistribute it that you include + comments to that effect with your name and the date. Thank you. + [The history has been moved to the file ChangeLog.] + */ + +/* + Inflate deflated (PKZIP's method 8 compressed) data. The compression + method searches for as much of the current string of bytes (up to a + length of 258) in the previous 32K bytes. If it doesn't find any + matches (of at least length 3), it codes the next byte. Otherwise, it + codes the length of the matched string and its distance backwards from + the current position. There is a single Huffman code that codes both + single bytes (called "literals") and match lengths. A second Huffman + code codes the distance information, which follows a length code. Each + length or distance code actually represents a base value and a number + of "extra" (sometimes zero) bits to get to add to the base value. At + the end of each deflated block is a special end-of-block (EOB) literal/ + length code. The decoding process is basically: get a literal/length + code; if EOB then done; if a literal, emit the decoded byte; if a + length then get the distance and emit the referred-to bytes from the + sliding window of previously emitted data. + + There are (currently) three kinds of inflate blocks: stored, fixed, and + dynamic. The compressor deals with some chunk of data at a time, and + decides which method to use on a chunk-by-chunk basis. A chunk might + typically be 32K or 64K. If the chunk is uncompressible, then the + "stored" method is used. In this case, the bytes are simply stored as + is, eight bits per byte, with none of the above coding. The bytes are + preceded by a count, since there is no longer an EOB code. + + If the data is compressible, then either the fixed or dynamic methods + are used. In the dynamic method, the compressed data is preceded by + an encoding of the literal/length and distance Huffman codes that are + to be used to decode this block. The representation is itself Huffman + coded, and so is preceded by a description of that code. These code + descriptions take up a little space, and so for small blocks, there is + a predefined set of codes, called the fixed codes. The fixed method is + used if the block codes up smaller that way (usually for quite small + chunks), otherwise the dynamic method is used. In the latter case, the + codes are customized to the probabilities in the current block, and so + can code it much better than the pre-determined fixed codes. + + The Huffman codes themselves are decoded using a mutli-level table + lookup, in order to maximize the speed of decoding plus the speed of + building the decoding tables. See the comments below that precede the + lbits and dbits tuning parameters. + */ + + +/* + Notes beyond the 1.93a appnote.txt: + + 1. Distance pointers never point before the beginning of the output + stream. + 2. Distance pointers can point back across blocks, up to 32k away. + 3. There is an implied maximum of 7 bits for the bit length table and + 15 bits for the actual data. + 4. If only one code exists, then it is encoded using one bit. (Zero + would be more efficient, but perhaps a little confusing.) If two + codes exist, they are coded using one bit each (0 and 1). + 5. There is no way of sending zero distance codes--a dummy must be + sent if there are none. (History: a pre 2.0 version of PKZIP would + store blocks with no distance codes, but this was discovered to be + too harsh a criterion.) Valid only for 1.93a. 2.04c does allow + zero distance codes, which is sent as one code of zero bits in + length. + 6. There are up to 286 literal/length codes. Code 256 represents the + end-of-block. Note however that the static length tree defines + 288 codes just to fill out the Huffman codes. Codes 286 and 287 + cannot be used though, since there is no length base or extra bits + defined for them. Similarly, there are up to 30 distance codes. + However, static trees define 32 codes (all 5 bits) to fill out the + Huffman codes, but the last two had better not show up in the data. + 7. Unzip can check dynamic Huffman blocks for complete code sets. + The exception is that a single code would not be complete (see #4). + 8. The five bits following the block type is really the number of + literal codes sent minus 257. + 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits + (1+6+6). Therefore, to output three times the length, you output + three codes (1+1+1), whereas to output four times the same length, + you only need two codes (1+3). Hmm. + 10. In the tree reconstruction algorithm, Code = Code + Increment + only if BitLength(i) is not zero. (Pretty obvious.) + 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) + 12. Note: length code 284 can represent 227-258, but length code 285 + really is 258. The last length deserves its own, short code + since it gets used a lot in very redundant files. The length + 258 is special since 258 - 3 (the min match length) is 255. + 13. The literal/length and distance code bit lengths are read as a + single stream of lengths. It is possible (and advantageous) for + a repeat code (16, 17, or 18) to go across the boundary between + the two sets of lengths. + */ + +//----------------------------------------------------------------------------- +// NVIDIA modifications are solely around interface cleanup, compiler warnings, etc. +//----------------------------------------------------------------------------- + +#include "nvtypes.h" +#include "nvstatus.h" + +#ifndef NVGZ_USER +#define __DRIVER_BUILD__ +// driver build +#include "os/os.h" +#endif /* NVGZ_USER */ + +#ifndef __DRIVER_BUILD__ +// user build : NVGZ_USER +#include +#include +#include +#include + +#define osMemCopy memcpy +#define portMemSet memset +#define portMemAllocNonPaged malloc +#define portMemFree free +#define sizeof sizeof +#define NV_PRINTF(a,b) printf(b) +#endif + +#include "lib/zlib/inflate.h" + +/* Function prototypes */ +static NvU32 huft_build(NvU8 *, NvU16, NvU32 , ush *, ush *, + struct huft **, NvS32 *); +static NvU32 huft_free(struct huft *); +static NvU32 inflate_codes_iterator(PGZ_INFLATE_STATE); +static NvU32 fixed_huft_build(PGZ_INFLATE_STATE); +static NvU32 dynamic_huft_build(PGZ_INFLATE_STATE); + +static void flush_window(PGZ_INFLATE_STATE pGzState) +{ + if ( pGzState->wp == 0) return; + + pGzState->wp2 = pGzState->wp; + + // If output range is not specified, do normal output + if (pGzState->outLower == 0xFFFFFFFF && pGzState->outUpper == 0xFFFFFFFF) + { + portMemCopy(pGzState->outbuf + pGzState->outptr, pGzState->wp, pGzState->window, pGzState->wp); + pGzState->wp1 += pGzState->wp; + pGzState->optSize += pGzState->wp; + } + // slide pGzState->outLower pGzState->outUpper slide + // ----============-----|--------------|-----============ + else if (pGzState->outptr + pGzState->wp - 1 < pGzState->outLower + || pGzState->outptr > pGzState->outUpper) + { + } + // slide pGzState->outLower pGzState->outUpper + // ----=================|===-----------|----------------- + else if (pGzState->outptr <= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 >= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 <= pGzState->outUpper) + { + portMemCopy(pGzState->outbuf, + pGzState->wp - (pGzState->outLower - pGzState->outptr), pGzState->window + pGzState->outLower - pGzState->outptr, + pGzState->wp - (pGzState->outLower - pGzState->outptr)); + pGzState->wp1 += pGzState->wp - (pGzState->outLower - pGzState->outptr); + pGzState->optSize += pGzState->wp - (pGzState->outLower - pGzState->outptr); + } + // slide pGzState->outLower pGzState->outUpper + // ----=================|==============|===-------------- + else if (pGzState->outptr <= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 > pGzState->outUpper ) + { + portMemCopy(pGzState->outbuf, + pGzState->outUpper - pGzState->outLower + 1, pGzState->window + pGzState->outLower - pGzState->outptr, + pGzState->outUpper - pGzState->outLower + 1); + pGzState->wp1 += pGzState->outUpper - pGzState->outptr + 1; + pGzState->optSize += pGzState->outUpper - pGzState->outLower + 1; + } + // slide pGzState->outLower pGzState->outUpper + // ---------------------|===========---|----------------- + else if (pGzState->outptr >= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 <= pGzState->outUpper) + { + portMemCopy(pGzState->outbuf + pGzState->outptr - pGzState->outLower, + pGzState->wp, pGzState->window, + pGzState->wp); + pGzState->wp1 += pGzState->wp; + pGzState->optSize += pGzState->wp; + } + // slide pGzState->outLower pGzState->outUpper + // ---------------------|==============|===-------------- + else if (pGzState->outptr >= pGzState->outLower + && pGzState->outptr + pGzState->wp - 1 > pGzState->outUpper) + { + portMemCopy(pGzState->outbuf + pGzState->outptr - pGzState->outLower, + pGzState->outUpper - pGzState->outptr + 1, pGzState->window, + pGzState->outUpper - pGzState->outptr + 1); + pGzState->wp1 += pGzState->outUpper - pGzState->outptr + 1; + pGzState->optSize += pGzState->outUpper - pGzState->outptr + 1; + } + + pGzState->outptr += pGzState->wp; + pGzState->wp = 0; +} + + +/* Tables for deflate from PKZIP's appnote.txt. */ +static NvU32 border[] = { /* Order of the bit length code lengths */ + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; +static ush cplens[] = { /* Copy lengths for literal codes 257..285 */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + /* note: see note #13 above about the 258 in this list. */ +static ush cplext[] = { /* Extra bits for literal codes 257..285 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */ +static ush cpdist[] = { /* Copy offsets for distance codes 0..29 */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577}; +static ush cpdext[] = { /* Extra bits for distance codes */ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 13, 13}; + +/* Macros for inflate() bit peeking and grabbing. + The usage is: + + NEEDBITS(j) + x = b & mask_bits[j]; + DUMPBITS(j) + + where NEEDBITS makes sure that b has at least j bits in it, and + DUMPBITS removes the bits from b. The macros use the variable k + for the number of bits in b. Normally, b and k are register + variables for speed, and are initialized at the beginning of a + routine that uses these macros from a global bit buffer and count. + + If we assume that EOB will be the longest code, then we will never + ask for bits with NEEDBITS that are beyond the end of the stream. + So, NEEDBITS should not read any more bytes than are needed to + meet the request. Then no bytes need to be "returned" to the buffer + at the end of the last block. + + However, this assumption is not true for fixed blocks--the EOB code + is 7 bits, but the other literal/length codes can be 8 or 9 bits. + (The EOB code is shorter than other codes because fixed blocks are + generally short. So, while a block always has an EOB, many other + literal/length codes have a significantly lower probability of + showing up at all.) However, by making the first table have a + lookup of seven bits, the EOB code will be found in that first + lookup, and so will not require that too many bits be pulled from + the stream. + */ + +static ush mask_bits[] = { + 0x0000, + 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, + 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff +}; + +/* + Huffman code decoding is performed using a multi-level table lookup. + The fastest way to decode is to simply build a lookup table whose + size is determined by the longest code. However, the time it takes + to build this table can also be a factor if the data being decoded + is not very long. The most common codes are necessarily the + shortest codes, so those codes dominate the decoding time, and hence + the speed. The idea is you can have a shorter table that decodes the + shorter, more probable codes, and then point to subsidiary tables for + the longer codes. The time it costs to decode the longer codes is + then traded against the time it takes to make longer tables. + + This results of this trade are in the variables lbits and dbits + below. lbits is the number of bits the first level table for literal/ + length codes can decode in one step, and dbits is the same thing for + the distance codes. Subsequent tables are also less than or equal to + those sizes. These values may be adjusted either when all of the + codes are shorter than that, in which case the longest code length in + bits is used, or when the shortest code is *longer* than the requested + table size, in which case the length of the shortest code in bits is + used. + + There are two different values for the two tables, since they code a + different number of possibilities each. The literal/length table + codes 286 possible values, or in a flat code, a little over eight + bits. The distance table codes 30 possible values, or a little less + than five bits, flat. The optimum values for speed end up being + about one bit more than those, so lbits is 8+1 and dbits is 5+1. + The optimum values may differ though from machine to machine, and + possibly even between compilers. Your mileage may vary. + */ + + +const NvU32 lbits = 9; /* bits in base literal/length lookup table */ +const NvU32 dbits = 6; /* bits in base distance lookup table */ + +static NvU32 hufts; /* track memory usage */ + +/* + * Given a list of code lengths and a maximum table size, make a set of + * tables to decode that set of codes. Return zero on success, one if + * the given code set is incomplete (the tables are still built in + * case), two if the input is invalid (all zero length codes or an + * oversubscribed set of lengths), and three if not enough memory. + */ +static NvU32 huft_build +( + NvU8 *b, /* code lengths in bits (all assumed <= BMAX) */ + NvU16 n, /* number of codes (assumed <= N_MAX) */ + NvU32 s, /* number of simple-valued codes (0..s-1) */ + ush *d, /* list of base values for non-simple codes */ + ush *e, /* list of extra bits for non-simple codes */ + struct huft **t, /* result: starting table */ + NvS32 *m /* maximum lookup bits, returns actual */ +) +{ + NvU32 a; /* counter for codes of length k */ + NvU32 c[BMAX+1]; /* bit length count table */ + NvU32 f; /* i repeats in table every f entries */ + NvS32 g; /* maximum code length */ + NvS32 h; /* table level */ + NvU16 i; /* counter, current code */ + NvU32 j; /* counter */ + NvS32 k; /* number of bits in current code */ + NvS32 l; /* bits per table (returned in m) */ + NvU8 *p8; /* pointer into b[] */ + NvU16 *p16; /* pointer into v[] */ + NvU32 *p32; /* pointer into c[] */ + struct huft *q; /* points to current table */ + struct huft r; /* table entry for structure assignment */ + struct huft *u[BMAX]; /* table stack */ + NvU16 v[N_MAX]; /* values in order of bit length */ + NvS32 w; /* bits before this table == (l * h) */ + NvU32 x[BMAX+1]; /* bit offsets, then code stack */ + NvU32 *xp; /* pointer into x */ + NvS32 y; /* number of dummy codes added */ + NvU32 z; /* number of entries in current table */ + + /* Generate counts for each bit length */ + portMemSet((void*)c,0,sizeof(c)); + + p8 = b; i = n; + do { + Tracecv(*p8, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"), + n-i, *p8)); + c[*p8]++; /* assume all entries <= BMAX */ + p8++; /* Can't combine with above line (Solaris bug) */ + } while (--i); + if (c[0] == n) /* null input--all zero length codes */ + { + *t = (struct huft *)NULL; + *m = 0; + return GZ_STATE_HUFT_OK; + } + + + /* Find minimum and maximum length, bound *m by those */ + l = *m; + for (j = 1; j <= BMAX; j++) + if (c[j]) + break; + k = j; /* minimum code length */ + if ((NvU32)l < j) + l = j; + for (i = BMAX; i; i--) + if (c[i]) + break; + g = i; /* maximum code length */ + if ((NvU32)l > i) + l = i; + *m = l; + + + /* Adjust last length count to fill out codes, if needed */ + for (y = 1 << j; j < i; j++, y <<= 1) + if ((y -= c[j]) < 0) + return GZ_STATE_HUFT_ERROR; /* bad input: more codes than bits */ + if ((y -= c[i]) < 0) + return GZ_STATE_HUFT_ERROR; + c[i] += y; + + + /* Generate starting offsets into the value table for each length */ + x[1] = j = 0; + p32 = c + 1; xp = x + 2; + while (--i) { /* note that i == g from above */ + *xp++ = (j += *p32++); + } + + + /* Make a table of values in order of bit lengths */ + p8 = b; i = 0; + do { + if ((j = *p8++) != 0) + v[x[j]++] = i; + } while (++i < n); + + + /* Generate the Huffman codes and for each, make the table entries */ + x[0] = i = 0; /* first Huffman code is zero */ + p16 = v; /* grab values in bit order */ + h = -1; /* no tables yet--level -1 */ + w = -l; /* bits decoded == (l * h) */ + u[0] = (struct huft *)NULL; /* just to keep compilers happy */ + q = (struct huft *)NULL; /* ditto */ + z = 0; /* ditto */ + + /* go through the bit lengths (k already is bits in shortest code) */ + for (; k <= g; k++) + { + a = c[k]; + while (a--) + { + /* here i is the Huffman code of length k bits for value *p */ + /* make tables up to required level */ + while (k > w + l) + { + h++; + w += l; /* previous table always l bits */ + + /* compute minimum size table less than or equal to l bits */ + z = (NvU32)((z = (NvU32)(g - w)) > (NvU32)l ? (NvU32)l : z); /* upper limit on table size */ + if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ + { /* too few codes for k-w bit table */ + f -= a + 1; /* deduct codes from patterns left */ + xp = c + k; + while (++j < z) /* try smaller tables up to z bits */ + { + if ((f <<= 1) <= *++xp) + break; /* enough codes to use up j bits */ + f -= *xp; /* else deduct codes from patterns */ + } + } + z = 1 << j; /* table entries for j-bit table */ + + /* allocate and link in new table */ + + q = portMemAllocNonPaged((z + 1)*sizeof(struct huft)); + if (q == NULL) + { + return GZ_STATE_HUFT_ERROR; + } + + if (q == (struct huft *)NULL) + { + if (h) + huft_free(u[0]); + return GZ_STATE_HUFT_ERROR; /* not enough memory */ + } + hufts += z + 1; /* track memory usage */ + *t = q + 1; /* link to list for huft_free() */ + *(t = &(q->v.t)) = (struct huft *)NULL; + u[h] = ++q; /* table starts after link */ + + /* connect to last table, if there is one */ + if (h) + { + x[h] = i; /* save pattern for backing up */ + r.b = (uch)l; /* bits to dump before this table */ + r.e = (uch)(16 + j); /* bits in this table */ + r.v.t = q; /* pointer to this table */ + j = i >> (w - l); /* (get around Turbo C bug) */ + u[h-1][j] = r; /* connect to last table */ + } + } + + /* set up table entry in r */ + r.b = (uch)(k - w); + if (p16 >= v + n) + r.e = 99; /* out of values--invalid code */ + else if (*p16 < s) + { + r.e = (uch)(*p16 < 256 ? 16 : 15); /* 256 is end-of-block code */ + r.v.n = (ush)(*p16); /* simple code is just the value */ + p16++; /* one compiler does not like *p++ */ + } + else + { + r.e = (uch)e[*p16 - s]; /* non-simple--look up in lists */ + r.v.n = d[*p16++ - s]; + } + + /* fill code-like entries with r */ + f = 1 << (k - w); + for (j = i >> w; j < z; j += f) + q[j] = r; + + /* backwards increment the k-bit code i */ + for (j = 1 << (k - 1); i & j; j >>= 1) + i ^= j; + i ^= j; + + /* backup over finished tables */ + while ((i & ((NvU32)(1 << w) - 1)) != x[h]) + { + h--; /* don't need to update q */ + w -= l; + } + } + } + + + /* Return true (1) if we were given an incomplete table */ + return y != 0 && g != 1; +} + +/* + * Free the malloc'ed tables built by huft_build(), which makes a linked + * list of the tables it made, with the links in a dummy first entry of + * each table. + */ +static NvU32 huft_free +( + struct huft *t /* table to free */ +) +{ + struct huft *p, *q; + + /* Go through linked list, freeing from the malloced (t[-1]) address. */ + p = t; + while (p != (struct huft *)NULL) + { + q = (--p)->v.t; + portMemFree((void*)p); + p = q; + } + return GZ_STATE_HUFT_OK; +} + +static NvU32 inflate_codes_iterator_store(PGZ_INFLATE_STATE pGzState) +{ + NvU32 n = pGzState->codesState.sn; /* number of bytes in block */ + NvU32 w = pGzState->codesState.w; /* current window position */ + NvU32 k = pGzState->codesState.k; /* number of bits in bit buffer */ + ulg b = pGzState->codesState.b; /* bit buffer */ + + /* read and output the compressed data */ + while (n) + { + n--; + NEEDBITS(8) + pGzState->window[w++] = (uch)b; + DUMPBITS(8) + if (w == WSIZE) + { + flush_output(w); + w = 0; + break; + } + } + + /* restore the globals from the locals */ + pGzState->codesState.sn = n; + pGzState->codesState.w = w; + pGzState->codesState.b = b; + pGzState->codesState.k = k; + + if (n != 0) + { + return GZ_STATE_ITERATOR_OK; + } + else + { + return GZ_STATE_ITERATOR_END; + } +} + +/* inflate (decompress) the codes in a deflated (compressed) block. +Return an error code or zero if it all goes ok. */ +static NvU32 inflate_codes_iterator(PGZ_INFLATE_STATE pGzState) +{ + NvU32 e = pGzState->codesState.e; /* table entry flag/number of extra bits */ + NvU32 n = pGzState->codesState.n; /* length and index for copy */ + NvU32 d = pGzState->codesState.d; + NvU32 w = pGzState->codesState.w; /* current window position */ + struct huft *t = pGzState->codesState.t; /* pointer to table entry */ + ulg b = pGzState->codesState.b; /* bit buffer */ + NvU32 k = pGzState->codesState.k; /* number of bits in bit buffer */ + NvU32 ml = mask_bits[pGzState->bl]; /* masks for bl and bd bits */ + NvU32 md = mask_bits[pGzState->bd]; + NvU32 r = 0; + + if (pGzState->codesState.continue_copy == 1) + goto continue_copy; + + for (;;) + { + NEEDBITS((unsigned)pGzState->bl) + if ((e = (t = pGzState->tl + ((unsigned)b & ml))->e) > 16) + { + do { + if (e == 99) + return GZ_STATE_ITERATOR_ERROR; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + } + DUMPBITS(t->b) + + if (e == 16) /* then it's a literal */ + { + pGzState->window[w++] = (uch)t->v.n; + Tracevv((stderr, "%c", pGzState->window[w-1])); + if (w == WSIZE) + { + pGzState->wp1 = 0; + flush_output(w); + w = 0; + r = GZ_STATE_ITERATOR_OK; + goto exit; + } + } + else /* it's an EOB or a length */ + { + /* exit if end of block */ + if (e == 15) + { + r = GZ_STATE_ITERATOR_END; + goto exit; + } + + /* get length of block to copy */ + NEEDBITS(e) + n = t->v.n + ((unsigned)b & mask_bits[e]); + DUMPBITS(e); + + /* decode distance of block to copy */ + NEEDBITS((unsigned)pGzState->bd) + if ((e = (t = pGzState->td + ((unsigned)b & md))->e) > 16) + { + do { + if (e == 99) + return 1; + DUMPBITS(t->b) + e -= 16; + NEEDBITS(e) + } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16); + } + DUMPBITS(t->b) + + NEEDBITS(e) + d = w - t->v.n - ((unsigned)b & mask_bits[e]); + DUMPBITS(e) + + Tracevv((stderr,"\\[%d,%d]", w-d, n)); + + /* do the copy */ + do { + n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e); +#if !defined(NOMEMCPY) && !defined(DEBUG) + if (w - d >= e) /* (this test assumes unsigned comparison) */ + { + memcpy(pGzState->window + w, pGzState->window + d, e); + w += e; + d += e; + } + else /* do it slow to avoid memcpy() overlap */ +#endif /* !NOMEMCPY */ + { + do { + pGzState->window[w++] = pGzState->window[d++]; + Tracevv((stderr, "%c", pGzState->window[w-1])); + } while (--e); + } + if (w == WSIZE) + { + pGzState->wp1 = 0; + flush_output(w); + w = 0; + r = GZ_STATE_ITERATOR_OK; + pGzState->codesState.continue_copy = 1; + goto exit; + } +continue_copy: ; + } while (n); + + pGzState->codesState.continue_copy = 0; + } + } + +exit: + + pGzState->codesState.e = e; /* table entry flag/number of extra bits */ + pGzState->codesState.n = n; + pGzState->codesState.d = d; /* length and index for copy */ + pGzState->codesState.w = w; /* current window position */ + pGzState->codesState.t = t; /* pointer to table entry */ + pGzState->codesState.b = b; /* bit buffer */ + pGzState->codesState.k = k; /* number of bits in bit buffer */ + + /* done */ + return r; +} + +static void huft_destroy(PGZ_INFLATE_STATE pGzState) +{ + /* free the decoding tables, return */ + if (pGzState->tl != NULL) + { + huft_free(pGzState->tl); + pGzState->tl = NULL; + } + + if (pGzState->td != NULL) + { + huft_free(pGzState->td); + pGzState->td = NULL; + } +} + +static NvU32 fixed_huft_build(PGZ_INFLATE_STATE pGzState) +/* decompress an inflated type 1 (fixed Huffman codes) block. We should + either replace this with a custom decoder, or at least precompute the + Huffman tables. */ +{ + NvU32 i; /* temporary variable */ + NvU8 l[N_MAX]; /* length list for huft_build */ + + /* set up literal table */ + for (i = 0; i < 144; i++) + l[i] = 8; + for (; i < 256; i++) + l[i] = 9; + for (; i < 280; i++) + l[i] = 7; + for (; i < N_MAX; i++) /* make a complete, but wrong code set */ + l[i] = 8; + pGzState->bl = 7; + if ((i = huft_build(l, N_MAX, 257, cplens, cplext, &pGzState->tl, &pGzState->bl)) != 0) + return i; + + + /* set up distance table */ + for (i = 0; i < 30; i++) /* make an incomplete code set */ + l[i] = 5; + pGzState->bd = 5; + if ((i = huft_build(l, 30, 0, cpdist, cpdext, &pGzState->td, &pGzState->bd)) > GZ_STATE_HUFT_INCOMP) + { + huft_free(pGzState->tl); + return i; + } + + return GZ_STATE_HUFT_OK; +} + +/* decompress an inflated type 2 (dynamic Huffman codes) block. */ +static NvU32 dynamic_huft_build(PGZ_INFLATE_STATE pGzState) +{ + NvU32 i; /* temporary variables */ + NvU32 j; + NvU32 l; /* last length */ + NvU32 m; /* mask for bit lengths table */ + NvU32 n; /* number of lengths to get */ + NvU32 nb; /* number of bit length codes */ + NvU16 nl; /* number of literal/length codes */ + NvU16 nd; /* number of distance codes */ +#ifdef PKZIP_BUG_WORKAROUND + NvU8 ll[288+32]; /* literal/length and distance code lengths */ +#else + NvU8 ll[286+30]; /* literal/length and distance code lengths */ +#endif + ulg b; /* bit buffer */ + NvU32 k; /* number of bits in bit buffer */ + + + /* make local bit buffer */ + b = pGzState->bb; + k = pGzState->bk; + + + /* read in table lengths */ + NEEDBITS(5) + nl = 257 + ((NvU8)b & 0x1f); /* number of literal/length codes */ + DUMPBITS(5) + NEEDBITS(5) + nd = 1 + ((NvU8)b & 0x1f); /* number of distance codes */ + DUMPBITS(5) + NEEDBITS(4) + nb = 4 + ((NvU8)b & 0xf); /* number of bit length codes */ + DUMPBITS(4) +#ifdef PKZIP_BUG_WORKAROUND + if (nl > 288 || nd > 32) +#else + if (nl > 286 || nd > 30) +#endif + return GZ_STATE_HUFT_INCOMP; /* bad lengths */ + + /* read in bit-length-code lengths */ + for (j = 0; j < nb; j++) + { + NEEDBITS(3) + ll[border[j]] = (NvU8)b & 7; + DUMPBITS(3) + } + for (; j < 19; j++) + ll[border[j]] = 0; + + /* build decoding table for trees--single level, 7 bit lookup */ + pGzState->bl = 7; + if ((i = huft_build(ll, 19, 19, NULL, NULL, &pGzState->tl, &pGzState->bl)) != 0) + { + if (i == GZ_STATE_HUFT_INCOMP) + huft_free(pGzState->tl); + return i; /* incomplete code set */ + } + + /* read in literal and distance code lengths */ + n = nl + nd; + m = mask_bits[pGzState->bl]; + i = l = 0; + while ((NvU32)i < n) + { + NEEDBITS((NvU32)pGzState->bl) + j = (pGzState->td = pGzState->tl + ((NvU32)b & m))->b; + DUMPBITS(j) + j = pGzState->td->v.n; + if (j < 16) /* length of code in bits (0..15) */ + ll[i++] = (NvU8)(l = j); /* save last length in l */ + else if (j == 16) /* repeat last length 3 to 6 times */ + { + NEEDBITS(2) + j = 3 + ((NvU32)b & 3); + DUMPBITS(2) + if ((NvU32)i + j > n) + return GZ_STATE_HUFT_INCOMP; + while (j--) + ll[i++] = (NvU8)l; + } + else if (j == 17) /* 3 to 10 zero length codes */ + { + NEEDBITS(3) + j = 3 + ((NvU32)b & 7); + DUMPBITS(3) + if ((NvU32)i + j > n) + return GZ_STATE_HUFT_INCOMP; + while (j--) + ll[i++] = 0; + l = 0; + } + else /* j == 18: 11 to 138 zero length codes */ + { + NEEDBITS(7) + j = 11 + ((NvU32)b & 0x7f); + DUMPBITS(7) + if ((NvU32)i + j > n) + return GZ_STATE_HUFT_INCOMP; + while (j--) + ll[i++] = 0; + l = 0; + } + } + + /* free decoding table for trees */ + huft_free(pGzState->tl); + + /* restore the global bit buffer */ + pGzState->bb = b; + pGzState->bk = k; + + /* build the decoding tables for literal/length and distance codes */ + pGzState->bl = lbits; + if ((i = huft_build(ll, nl, 257, cplens, cplext, &pGzState->tl, &pGzState->bl)) != 0) + { + if (i == GZ_STATE_HUFT_INCOMP) { + NV_PRINTF(LEVEL_ERROR, "dload, incomplete literal tree\n"); + huft_free(pGzState->tl); + } + return i; /* incomplete code set */ + } + pGzState->bd = dbits; + if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &pGzState->td, &pGzState->bd)) != 0) + { + if (i == GZ_STATE_HUFT_INCOMP) { + NV_PRINTF(LEVEL_ERROR, "dload, incomplete distance tree\n"); +#ifdef PKZIP_BUG_WORKAROUND + i = GZ_STATE_HUFT_OK; + } +#else + huft_free(pGzState->td); + } + huft_free(pGzState->tl); + return i; /* incomplete code set */ +#endif + } + + return GZ_STATE_HUFT_OK; +} + +static +NV_STATUS utilGzInit(const NvU8 *zArray, NvU8* oBuffer, NvU32 numTotalBytes, NvU8* window, PGZ_INFLATE_STATE pGzState) +{ + portMemSet(pGzState, 0, sizeof(GZ_INFLATE_STATE)); + portMemSet(window, 0, sizeof(GZ_SLIDE_WINDOW_SIZE)); + + pGzState->inbuf = (NvU8*)zArray; + pGzState->outbuf = oBuffer; + pGzState->outBufSize = numTotalBytes; + pGzState->window = window; + pGzState->newblock = 1; + pGzState->outLower = 0xFFFFFFFF; + pGzState->outUpper = 0xFFFFFFFF; + + return NV_OK; +} + +/* NVIDIA addition: give pointers to input and known-large-enough output buffers. */ +/* decompress an inflated entry */ +NV_STATUS utilGzAllocate(const NvU8 *zArray, NvU32 numTotalBytes, PGZ_INFLATE_STATE *ppGzState) +{ + PGZ_INFLATE_STATE pGzState = NULL; + NvU8 *window = NULL; + NV_STATUS status = NV_OK; + + pGzState = portMemAllocNonPaged(sizeof(GZ_INFLATE_STATE)); + if (pGzState == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + window = portMemAllocNonPaged(GZ_SLIDE_WINDOW_SIZE); + if (window == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + utilGzInit(zArray, 0, numTotalBytes, window, pGzState); + + *ppGzState = pGzState; + +done: + if (status != NV_OK) + { + portMemFree(pGzState); + portMemFree(window); + } + return status; + +} + +NvU32 utilGzIterator(PGZ_INFLATE_STATE pGzState) +{ + NvU32 t; /* block type */ + NvU32 w; /* current window position */ + NvU32 b; /* bit buffer */ + NvU32 k; /* number of bits in bit buffer */ + NvU32 gzStatus = GZ_STATE_ITERATOR_ERROR; + + // new decompression block, we need to construct huffman tree. + if (pGzState->newblock == 1) + { + /* make local bit buffer */ + b = pGzState->bb; + k = pGzState->bk; + + /* read in last block bit */ + NEEDBITS(1) + pGzState->e = (NvU32)b & 1; + DUMPBITS(1) + + /* read in block type */ + NEEDBITS(2) + t = (NvU32)b & 3; + DUMPBITS(2) + + /* restore the global bit buffer */ + pGzState->bb = b; + pGzState->bk = k; + + /* inflate that block type */ + switch (t) + { + case 2: + { + gzStatus = dynamic_huft_build(pGzState); + break; + } + case 1: + { + gzStatus = fixed_huft_build(pGzState); + break; + } + case 0: + { + NvU32 n; + b = pGzState->bb; + k = pGzState->bk; + w = pGzState->wp; + + n = k & 7; + DUMPBITS(n); + + /* get the length and its complement */ + NEEDBITS(16) + n = ((unsigned int)b & 0xffff); + DUMPBITS(16) + NEEDBITS(16) + if (n != (unsigned int)((~b) & 0xffff)) + { + return GZ_STATE_ITERATOR_ERROR; /* error in compressed data */ + } + DUMPBITS(16) + + pGzState->wp = w; /* restore global window pointer */ + pGzState->bb = b; /* restore global bit buffer */ + pGzState->bk = k; + pGzState->codesState.sn = n; + break; + } + default: + { + return GZ_STATE_ITERATOR_ERROR; + } + } + + if (t != 0 && gzStatus != GZ_STATE_HUFT_OK) + { + return GZ_STATE_ITERATOR_ERROR; + } + + pGzState->newblock = 0; + + /* make local copies of globals */ + pGzState->codesState.b = pGzState->bb; /* initialize bit buffer */ + pGzState->codesState.k = pGzState->bk; + pGzState->codesState.w = pGzState->wp; /* initialize window position */ + } + + // decompress one slide window + if (pGzState->codesState.sn == 0) + { + gzStatus = inflate_codes_iterator(pGzState); + } + else + { + gzStatus = inflate_codes_iterator_store(pGzState); + } + + // decompression ok and current block finished. + if (gzStatus == GZ_STATE_ITERATOR_END) + { + /* restore the globals from the locals */ + pGzState->wp = pGzState->codesState.w; /* restore global window pointer */ + pGzState->bb = pGzState->codesState.b; /* restore global bit buffer */ + pGzState->bk = pGzState->codesState.k; + portMemSet(&pGzState->codesState, 0, sizeof(GZ_INFLATE_CODES_STATE)); + + huft_destroy(pGzState); + pGzState->newblock = 1; + + // current block is the last one, flush remain data in slide window + if (pGzState->e) + { + while (pGzState->bk >= 8) + { + pGzState->bk -= 8; + pGzState->inptr--; + } + + /* flush out slide */ + flush_output(pGzState->wp); + } + + // continue iteration + gzStatus = GZ_STATE_ITERATOR_OK; + } + + return gzStatus; +} + +NV_STATUS utilGzDestroy(PGZ_INFLATE_STATE pGzState) +{ + huft_destroy(pGzState); + portMemFree(pGzState->window); + portMemFree(pGzState); + return NV_OK; +} + +NvU32 utilGzGetData(PGZ_INFLATE_STATE pGzState, NvU32 offset, NvU32 size, NvU8 * outBuffer) +{ + NvU32 sizew = 0, oldOutBufSize; + NvU8 * oldInBuf, *oldOutBuf; + uch * oldWindow; + NV_STATUS status = NV_OK; + + if (pGzState == NULL || outBuffer == NULL || offset >= pGzState->outBufSize) + { + return 0; + } + + pGzState->optSize = 0; + // check requested range [offset, offset + size) with outptr + if (pGzState->outptr != 0) + { + if ( offset >= ((pGzState->outptr + WSIZE - 1) / WSIZE - 1) * WSIZE + pGzState->wp1 ) + { + // check remaining data in previous slide window + pGzState->wp1 = offset - (((pGzState->outptr + WSIZE -1 ) / WSIZE - 1) * WSIZE); + + if (pGzState->wp1 < pGzState->wp2) + { + sizew = pGzState->wp2 - pGzState->wp1; + + // request can be satisfied from window + if (sizew >= size) + { + portMemCopy(outBuffer, size, pGzState->window + pGzState->wp1, size); + pGzState->wp1 += size; + pGzState->optSize += size; + return pGzState->optSize; + } + // copy data from slide window and continue iteration + else + { + portMemCopy(outBuffer, sizew,pGzState->window + pGzState->wp1, sizew); + outBuffer += sizew; + pGzState->optSize += sizew; + } + } + } + else + { + // slide window passed requested range, restart decompression from beginning. + huft_destroy(pGzState); + + oldInBuf = pGzState->inbuf; + oldOutBuf = pGzState->outbuf; + oldOutBufSize = pGzState->outBufSize; + oldWindow = pGzState->window; + + utilGzInit(oldInBuf, oldOutBuf, oldOutBufSize, oldWindow, pGzState); + } + } + + pGzState->outLower = offset + sizew; + pGzState->outUpper = offset + size - 1; + pGzState->outbuf = outBuffer; + pGzState->wp1 = 0; + pGzState->wp2 = 0; + + while (pGzState->outptr < offset + size) + { + if ((status = utilGzIterator(pGzState)) != GZ_STATE_ITERATOR_OK) + break; + } + + if (status == GZ_STATE_ITERATOR_ERROR) + { + return 0; + } + + return pGzState->optSize; +} + diff --git a/src/nvidia/src/libraries/containers/btree/btree.c b/src/nvidia/src/libraries/containers/btree/btree.c new file mode 100644 index 000000000..155dee131 --- /dev/null +++ b/src/nvidia/src/libraries/containers/btree/btree.c @@ -0,0 +1,841 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/***************************** Balanced Tree *******************************\ +* * +* A generic library to red black tree -- every operation is O(log(n)) * +* check http://en.wikipedia.org/wiki/Red-black_tree or similar www pages * +* * +\***************************************************************************/ + +#include "utils/nvprintf.h" +#include "utils/nvassert.h" +#include "nvport/nvport.h" +#include "containers/btree.h" + +// +// Debugging support. +// +#if PORT_IS_CHECKED_BUILD + +// +// Dump current tree to debug port. +// +static NV_STATUS +_btreeDumpBranch +( + NODE *pNode, + NvU32 level +) +{ + NvU32 i; + if (pNode) + { + _btreeDumpBranch(pNode->left, level+1); + + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: "); + for (i=0; ikeyStart); + NV_PRINTF(LEVEL_INFO, "keyEnd = 0x%llx\n", pNode->keyEnd); + NV_PRINTF(LEVEL_INFO, "isRed = 0x%d\n", pNode->isRed ? 1 : 0); + NV_PRINTF(LEVEL_INFO, "parent = 0x%p\n", pNode->parent); + NV_PRINTF(LEVEL_INFO, "left = 0x%p\n", pNode->left); + NV_PRINTF(LEVEL_INFO, "right = 0x%p\n", pNode->right); + + _btreeDumpBranch(pNode->right, level+1); + } + return (NV_OK); +} + +static NV_STATUS +_btreeDumpTree +( + NODE *pRoot +) +{ + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: ======================== Tree Dump ==========================\n\r"); + if (pRoot == NULL) + { + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: NULL\n\r"); + } + else + { + _btreeDumpBranch(pRoot, 0); + } + NV_PRINTF(LEVEL_INFO, "NVRM_BTREE: =============================================================\n\r"); + return (NV_OK); +} + +// +// Validate node. +// +#define VALIDATE_NODE(pn) \ +{ \ + NV_ASSERT(_btreeNodeValidate(pn) == NV_OK); \ +} + +#define VALIDATE_TREE(pt) \ +{ \ + NV_ASSERT(_btreeTreeValidate(pt) == NV_OK); \ +} + +// +// Validate a nodes branch and count values. +// +static NV_STATUS +_btreeNodeValidate +( + NODE *pNode +) +{ + NV_STATUS status; + + status = NV_OK; + if (pNode == NULL) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR validating NULL NODE.\n\r"); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + return (NV_ERR_INVALID_PARAMETER); + } + if (pNode->left) + { + if (pNode->left->keyEnd >= pNode->keyStart) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent left branch, keyStart = 0x%llx\n", pNode->keyStart); + NV_PRINTF(LEVEL_ERROR, " Left keyEnd = 0x%llx\n", pNode->left->keyEnd); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + if (pNode->left->parent != pNode) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent left branch, Node = 0x%p\n", pNode); + NV_PRINTF(LEVEL_ERROR, " left->parent = 0x%p\n", pNode->left); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + } + if (pNode->right) + { + if (pNode->right->keyStart <= pNode->keyEnd) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent right branch, keyEnd = 0x%llx\n", pNode->keyEnd); + NV_PRINTF(LEVEL_ERROR, " Right keyStart = 0x%llx\n", pNode->right->keyStart); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + if (pNode->right->parent != pNode) + { + NV_PRINTF(LEVEL_ERROR, "NVRM_BTREE: ERROR inconsistent right branch, Node = 0x%p\n", pNode); + NV_PRINTF(LEVEL_ERROR, " right->parent = 0x%p\n", pNode->right); + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + } + + // red black tree property: Every red node that is not a leaf has only black children. + if (pNode->isRed) + { + if (pNode->left && pNode->left->isRed) + { + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + if (pNode->right && pNode->right->isRed) + { + NV_ASSERT_FAILED("DBG_BREAKPOINT"); + status = NV_ERR_INVALID_PARAMETER; + } + } + + return (status); +} + + +static NV_STATUS +_btreeBranchValidate +( + NODE *pNode +) +{ + NV_STATUS status; + status = NV_OK; + if (pNode) + { + if (pNode->left) + { + status |= _btreeBranchValidate(pNode->left); + } + status |= _btreeNodeValidate(pNode); + if (pNode->right) + { + status |= _btreeBranchValidate(pNode->right); + } + } + return (status); +} + +static NV_STATUS +_btreeTreeValidate +( + NODE *pRoot +) +{ + NV_STATUS status; + + status = NV_OK; + if (pRoot) + { + NV_ASSERT(!pRoot->isRed); + status = _btreeNodeValidate(pRoot); + if (pRoot->left) + { + status |= _btreeBranchValidate(pRoot->left); + } + if (pRoot->right) + { + status |= _btreeBranchValidate(pRoot->right); + } + } + if (status) + { + _btreeDumpTree(pRoot); + } + return (status); +} + +#else +// +// Validate nothing. +// +#define VALIDATE_NODE(pn) +#define VALIDATE_TREE(pt) +#endif // PORT_IS_CHECKED_BUILD + +// rbt helper function +static void _rotateLeft(NODE **pRoot, NODE *x) +{ + // rotate node x to left + NODE *y = x->right; + + NV_ASSERT (x); + NV_ASSERT (y); + + // establish x->right link + x->right = y->left; + if (y->left) + { + y->left->parent = x; + } + + // establish y->parent link + y->parent = x->parent; + if (x->parent) + { + if (x == x->parent->left) + { + x->parent->left = y; + } + else + { + x->parent->right = y; + } + } + else + { + *pRoot = y; + } + + // link x and y + y->left = x; + x->parent = y; + VALIDATE_NODE(x); +} + +// rbt helper function +static void _rotateRight(NODE **pRoot, NODE *x) +{ + // rotate node x to right + NODE *y = x->left; + + NV_ASSERT (x); + NV_ASSERT (y); + + // establish x->left link + x->left = y->right; + if (y->right) + { + y->right->parent = x; + } + + // establish y->parent link + y->parent = x->parent; + if (x->parent) + { + if (x == x->parent->right) + { + x->parent->right = y; + } + else + { + x->parent->left = y; + } + } + else + { + *pRoot = y; + } + + // link x and y + y->right = x; + x->parent = y; + VALIDATE_NODE(x); +} + +// rbt helper function: +// - maintain red-black tree balance after inserting node x +static void _insertFixup(NODE **pRoot, NODE *x) +{ + // check red-black properties + while((x!=*pRoot) && x->parent->isRed) + { + // we have a violation + if (x->parent == x->parent->parent->left) + { + NODE *y = x->parent->parent->right; + if (y && y->isRed) + { + // uncle is RED + x->parent->isRed = NV_FALSE; + y->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + x = x->parent->parent; + } + else + { + // uncle is BLACK + if (x == x->parent->right) + { + // make x a left child + x = x->parent; + _rotateLeft(pRoot, x); + } + + // recolor and rotate + x->parent->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + _rotateRight(pRoot, x->parent->parent); + } + } + else + { + // mirror image of above code + NODE *y = x->parent->parent->left; + if (y && y->isRed) + { + // uncle is RED + x->parent->isRed = NV_FALSE; + y->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + x = x->parent->parent; + } + else + { + // uncle is BLACK + if (x == x->parent->left) + { + x = x->parent; + _rotateRight(pRoot, x); + } + x->parent->isRed = NV_FALSE; + x->parent->parent->isRed = NV_TRUE; + _rotateLeft(pRoot, x->parent->parent); + } + } + } + (*pRoot)->isRed = NV_FALSE; +} + +// insert a new node (no duplicates allowed) +NV_STATUS +btreeInsert +( + PNODE newNode, + PNODE *pRoot +) +{ + NODE *current; + NODE *parent; + + if (newNode == NULL || pRoot == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + // find future parent + current = *pRoot; + parent = NULL; + + if (newNode->keyEnd < newNode->keyStart) + { + return NV_ERR_INVALID_ARGUMENT; + } + + while (current) + { + parent = current; + if (newNode->keyEnd < current->keyStart) + { + current = current->left; + } + else if (newNode->keyStart > current->keyEnd) + { + current = current->right; + } + else + { + return NV_ERR_INSERT_DUPLICATE_NAME; + } + } + + // the caller allocated the node already, just fix the links + newNode->parent = parent; + newNode->left = NULL; + newNode->right = NULL; + newNode->isRed = NV_TRUE; + + // insert node in tree + if(parent) + { + if (newNode->keyEnd < parent->keyStart) + { + parent->left = newNode; + } + else + { + parent->right = newNode; + } + } + else + { + *pRoot = newNode; + } + + _insertFixup(pRoot, newNode); + VALIDATE_NODE(newNode); + + return NV_OK; +} + +// rbt helper function +// - maintain red-black tree balance after deleting node x +// - this is a bit ugly because we use NULL as a sentinel +static void _deleteFixup(NODE **pRoot, NODE *parentOfX, NODE *x) +{ + while ((x != *pRoot) && (!x || !x->isRed)) + { + NV_ASSERT (!(x == NULL && parentOfX == NULL)); + // NULL nodes are sentinel nodes. If we delete a sentinel node (x==NULL) it + // must have a parent node (or be the root). Hence, parentOfX == NULL with + // x==NULL is never possible (tree invariant) + + if ((parentOfX != NULL) && (x == parentOfX->left)) + { + NODE *w = parentOfX->right; + if (w && w->isRed) + { + w->isRed = NV_FALSE; + parentOfX->isRed = NV_TRUE; + _rotateLeft(pRoot, parentOfX); + w = parentOfX->right; + } + if (!w || (((!w->left || !w->left->isRed) && (!w->right || !w->right->isRed)))) + { + if (w) + { + w->isRed = NV_TRUE; + } + x = parentOfX; + } + else + { + if (!w->right || !w->right->isRed) + { + w->left->isRed = NV_FALSE; + w->isRed = NV_TRUE; + _rotateRight(pRoot, w); + w = parentOfX->right; + } + w->isRed = parentOfX->isRed; + parentOfX->isRed = NV_FALSE; + w->right->isRed = NV_FALSE; + _rotateLeft(pRoot, parentOfX); + x = *pRoot; + } + } + else if (parentOfX != NULL) + { + NODE *w = parentOfX->left; + if (w && w->isRed) + { + w->isRed = NV_FALSE; + parentOfX->isRed = NV_TRUE; + _rotateRight(pRoot, parentOfX); + w = parentOfX->left; + } + if (!w || ((!w->right || !w->right->isRed) && (!w->left || !w->left->isRed))) + { + if (w) + { + w->isRed = NV_TRUE; + } + x = parentOfX; + } + else + { + if (!w->left || !w->left->isRed) + { + w->right->isRed = NV_FALSE; + w->isRed = NV_TRUE; + _rotateLeft(pRoot, w); + w = parentOfX->left; + } + w->isRed = parentOfX->isRed; + parentOfX->isRed = NV_FALSE; + w->left->isRed = NV_FALSE; + _rotateRight(pRoot, parentOfX); + x = *pRoot; + } + } + else if (x == NULL) + { + // This should never happen. + break; + } + parentOfX = x->parent; + } + if (x) + { + x->isRed = NV_FALSE; + } +} + +// +// Unlink node from tree +// +NV_STATUS +btreeUnlink +( + PNODE pNode, + PNODE *pRoot +) +{ + NODE *x; + NODE *y; + NODE *z; + NODE *parentOfX; + NvU32 yWasBlack; + + NV_ASSERT_CHECKED(btreeSearch(pNode->keyStart, &z, *pRoot) == NV_OK); + NV_ASSERT_CHECKED(z == pNode); + + if (pNode == NULL || pRoot == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + z = pNode; + + // unlink + if (!z->left || !z->right) + { + // y has a SENTINEL node as a child + y = z; + } + else + { + // find tree successor + y = z->right; + while (y->left) + { + y = y->left; + } + } + + // x is y's only child + if (y->left) + { + x = y->left; + } + else + { + x = y->right; + } + + // remove y from the parent chain + parentOfX = y->parent; + if (x) + { + x->parent = parentOfX; + } + if (y->parent) + { + if (y == y->parent->left) + { + y->parent->left = x; + } + else + { + y->parent->right = x; + } + } + else + { + *pRoot = x; + } + + yWasBlack = !y->isRed; + if (y != z) + { + // we need to replace z with y so the memory for z can be freed + y->parent = z->parent; + if (z->parent) + { + if (z == z->parent->left) + { + z->parent->left = y; + } + else + { + z->parent->right = y; + } + } + else + { + *pRoot = y; + } + + y->isRed = z->isRed; + + y->left = z->left; + if (z->left) + { + z->left->parent = y; + } + y->right = z->right; + if (z->right) + { + z->right->parent = y; + } + + if (parentOfX == z) + { + parentOfX = y; + } + } + + if (yWasBlack) + { + _deleteFixup(pRoot, parentOfX, x); + if (parentOfX) + { + VALIDATE_NODE(parentOfX); + } + } + + return NV_OK; +} + +// +// Search for node in tree. +// +NV_STATUS +btreeSearch +( + NvU64 keyOffset, + PNODE *pNode, + PNODE root +) +{ + // uninitialized ? + NODE *current = root; + while(current) + { + VALIDATE_NODE(current); + if (keyOffset < current->keyStart) + { + current = current->left; + } + else if (keyOffset > current->keyEnd) + { + current = current->right; + } + else + { + *pNode = current; + return NV_OK; + } + } + *pNode = NULL; + return NV_ERR_OBJECT_NOT_FOUND; +} + +// +// Enumerate tree (starting at the node with specified value) +// +NV_STATUS +btreeEnumStart +( + NvU64 keyOffset, + PNODE *pNode, + PNODE root +) +{ + *pNode = NULL; + + // initialized ? + if (root) + { + NODE *current = root; + VALIDATE_TREE(root); + while(current) + { + if (keyOffset < current->keyStart) + { + *pNode = current; + current = current->left; + } + else if (keyOffset > current->keyEnd) + { + current = current->right; + } + else + { + *pNode = current; + break; + + } + } + if (*pNode) + { + VALIDATE_NODE(*pNode); + } + return NV_OK; + } + return NV_OK; +} + +NV_STATUS +btreeEnumNext +( + PNODE *pNode, + PNODE root +) +{ + // no nodes ? + NODE *current = NULL; + VALIDATE_NODE(*pNode); + VALIDATE_NODE(root); + if (root && *pNode) + { + // if we don't have a right subtree return the parent + current = *pNode; + + // pick the leftmost node of the right subtree ? + if (current->right) + { + current = current->right; + for(;current->left;) + { + current = current->left; + } + } + else + { + // go up until we find the right inorder node + for(current = current->parent; current; current = current->parent) + { + if (current->keyStart > (*pNode)->keyEnd) + { + break; + } + } + } + } + *pNode = current; + if (*pNode) + { + VALIDATE_NODE(*pNode); + } + return NV_OK; +} + + + +// +// Frees all the "Data" fields stored in Nodes. +// If each Node is embedded in the structure pointed by its "Data" field, then +// this function destroys the whole btree +// +NV_STATUS +btreeDestroyData +( + PNODE pNode +) +{ + if (pNode == NULL) + return NV_OK; + + btreeDestroyData(pNode->left); + btreeDestroyData(pNode->right); + portMemFree (pNode->Data); + + return NV_OK; +} + + + +// +// Frees all the nodes and data stored in them. +// Don't use if the nodes were allocated within other structs +// (e.g. if the Node is embedded within the struct pointed by its "Data" field) +// +NV_STATUS +btreeDestroyNodes +( + PNODE pNode +) +{ + if (pNode == NULL) + return NV_OK; + + btreeDestroyNodes(pNode->left); + btreeDestroyNodes(pNode->right); + portMemFree (pNode); + + return NV_OK; +} diff --git a/src/nvidia/src/libraries/containers/eheap/eheap_old.c b/src/nvidia/src/libraries/containers/eheap/eheap_old.c new file mode 100644 index 000000000..ec0707842 --- /dev/null +++ b/src/nvidia/src/libraries/containers/eheap/eheap_old.c @@ -0,0 +1,1418 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#if defined(NVRM) +# include "os/os.h" +#else +# include "shrdebug.h" +# include "nvos.h" +#endif +#include "containers/eheap_old.h" + +#if !defined(SRT_BUILD) +#include "os/os.h" +#endif + +static void initPublicObjectFunctionPointers_EHeap(POBJEHEAP pHeap); +static NV_STATUS eheapInit(POBJEHEAP, NvU64, NvU64, NvU32, NvU32); +static NV_STATUS eheapDestruct(POBJEHEAP); +static NV_STATUS eheapAlloc(POBJEHEAP, NvU32, NvU32 *, NvU64 *, NvU64 *,NvU64, NvU64, PEMEMBLOCK*, void*, EHeapOwnershipComparator*); +static NV_STATUS eheapFree(POBJEHEAP, NvU64); +static void eheapInfo(POBJEHEAP, NvU64 *, NvU64 *, NvU64 *, NvU64 *, NvU32 *, NvU64 *); +static void eheapInfoForRange(POBJEHEAP, NV_RANGE, NvU64 *, NvU64 *, NvU32 *, NvU64 *); +static NV_STATUS eheapGetSize(POBJEHEAP, NvU64 *); +static NV_STATUS eheapGetFree(POBJEHEAP, NvU64 *); +static NV_STATUS eheapGetBase(POBJEHEAP, NvU64 *); +static PEMEMBLOCK eheapGetBlock(POBJEHEAP, NvU64, NvBool); +static NV_STATUS eheapSetAllocRange(POBJEHEAP, NvU64, NvU64); +static NV_STATUS eheapTraverse(POBJEHEAP, void *, EHeapTraversalFn, NvS32); +static NV_STATUS _eheapBlockFree(POBJEHEAP pHeap, PEMEMBLOCK block); +static NvU32 eheapGetNumBlocks(POBJEHEAP); +static NV_STATUS eheapGetBlockInfo(POBJEHEAP, NvU32, NVOS32_HEAP_DUMP_BLOCK *); +static NV_STATUS eheapSetOwnerIsolation(POBJEHEAP, NvBool, NvU32); +static NvBool _eheapCheckOwnership(POBJEHEAP, void*, NvU64, NvU64, PEMEMBLOCK, EHeapOwnershipComparator*); + +void +constructObjEHeap(POBJEHEAP pHeap, NvU64 Base, NvU64 LimitPlusOne, NvU32 sizeofMemBlock, NvU32 numPreAllocMemStruct) +{ + initPublicObjectFunctionPointers_EHeap(pHeap); + + eheapInit(pHeap, Base, LimitPlusOne, sizeofMemBlock, numPreAllocMemStruct); +} + +static void +initPublicObjectFunctionPointers_EHeap(POBJEHEAP pHeap) +{ + pHeap->eheapDestruct = eheapDestruct; + pHeap->eheapAlloc = eheapAlloc; + pHeap->eheapFree = eheapFree; + pHeap->eheapInfo = eheapInfo; + pHeap->eheapInfoForRange = eheapInfoForRange; + pHeap->eheapGetSize = eheapGetSize; + pHeap->eheapGetFree = eheapGetFree; + pHeap->eheapGetBase = eheapGetBase; + pHeap->eheapGetBlock = eheapGetBlock; + pHeap->eheapSetAllocRange = eheapSetAllocRange; + pHeap->eheapTraverse = eheapTraverse; + pHeap->eheapGetNumBlocks = eheapGetNumBlocks; + pHeap->eheapGetBlockInfo = eheapGetBlockInfo; + pHeap->eheapSetOwnerIsolation = eheapSetOwnerIsolation; +} + +static NV_STATUS +_eheapAllocMemStruct +( + POBJEHEAP pHeap, + PEMEMBLOCK* ppMemBlock +) +{ + if (pHeap->numPreAllocMemStruct > 0) + { + // We are out of pre-allocated mem data structs + if (NULL == pHeap->pFreeMemStructList) + { + NV_ASSERT(0); + return NV_ERR_OPERATING_SYSTEM; + } + + *ppMemBlock = pHeap->pFreeMemStructList; + pHeap->pFreeMemStructList = pHeap->pFreeMemStructList->next; + } + else + { + *ppMemBlock = portMemAllocNonPaged(pHeap->sizeofMemBlock); + + if (*ppMemBlock == NULL) + { + NV_ASSERT(0); + return NV_ERR_OPERATING_SYSTEM; + } + portMemSet(*ppMemBlock, 0, pHeap->sizeofMemBlock); + } + + return NV_OK; +} + +static NV_STATUS +_eheapFreeMemStruct +( + POBJEHEAP pHeap, + PEMEMBLOCK* ppMemBlock +) +{ + if (pHeap->numPreAllocMemStruct > 0) + { + portMemSet(*ppMemBlock, 0, pHeap->sizeofMemBlock); + + (*ppMemBlock)->next = pHeap->pFreeMemStructList; + pHeap->pFreeMemStructList = *ppMemBlock; + + *ppMemBlock = NULL; + } + else + { + portMemFree(*ppMemBlock); + *ppMemBlock = NULL; + } + + return NV_OK; +} + +// +// Create a heap. Even though we can return error here the resultant +// object must be self consistent (zero pointers, etc) if there were +// alloc failures, etc. +// +static NV_STATUS +eheapInit +( + POBJEHEAP pHeap, + NvU64 Base, + NvU64 LimitPlusOne, + NvU32 sizeofData, + NvU32 numPreAllocMemStruct +) +{ + PEMEMBLOCK block; + NvU32 i; + + // + // Simply create a free heap. + // + pHeap->base = Base; + pHeap->total = LimitPlusOne - Base; + pHeap->rangeLo = pHeap->base; + pHeap->rangeHi = pHeap->base + pHeap->total - 1; + pHeap->free = pHeap->total; + pHeap->sizeofMemBlock = sizeofData + sizeof(EMEMBLOCK); + + pHeap->numPreAllocMemStruct = 0; + pHeap->pPreAllocAddr = NULL; + pHeap->pBlockList = NULL; + pHeap->pFreeBlockList = NULL; + pHeap->pFreeMemStructList = NULL; + pHeap->numBlocks = 0; + pHeap->pBlockTree = NULL; + pHeap->bOwnerIsolation = NV_FALSE; + pHeap->ownerGranularity = 0; + + // + // User requested a static eheap that has a list of pre-allocated + // EMEMBLOCK data structure. + // + if (numPreAllocMemStruct > 0) + { + ++numPreAllocMemStruct; // reserve one for us - see below + + pHeap->pPreAllocAddr = portMemAllocNonPaged(pHeap->sizeofMemBlock * numPreAllocMemStruct); + + if (pHeap->pPreAllocAddr) + { + pHeap->numPreAllocMemStruct = numPreAllocMemStruct; + pHeap->pFreeMemStructList = pHeap->pPreAllocAddr; + + portMemSet(pHeap->pFreeMemStructList, 0, pHeap->sizeofMemBlock * numPreAllocMemStruct); + + // + // Form the list of free mem structures. Just need to utilize the next field of EMEMBLOCK. + // + for (i = 0; i < numPreAllocMemStruct - 1; i++) + { + ((PEMEMBLOCK)((NvU8 *)pHeap->pFreeMemStructList + (i * pHeap->sizeofMemBlock)))->next + = (PEMEMBLOCK)((NvU8 *)pHeap->pFreeMemStructList + (i + 1) * pHeap->sizeofMemBlock); + } + } + } + + if (_eheapAllocMemStruct(pHeap, &block) != NV_OK) + { + return NV_ERR_OPERATING_SYSTEM; + } + + block->owner = NVOS32_BLOCK_TYPE_FREE; + block->refCount = 0; + block->begin = Base; + block->align = Base; + block->end = LimitPlusOne - 1; + block->prevFree = block; + block->nextFree = block; + block->next = block; + block->prev = block; + block->pData = (void*)(block+1); + + // + // Fill in the heap bank info. + // + pHeap->pBlockList = block; + pHeap->pFreeBlockList = block; + pHeap->numBlocks = 1; + + portMemSet((void *)&block->node, 0, sizeof(NODE)); + block->node.keyStart = block->begin; + block->node.keyEnd = block->end; + block->node.Data = (void *)block; + if (btreeInsert(&block->node, &pHeap->pBlockTree) != NV_OK) + { + eheapDestruct(pHeap); + return NV_ERR_OPERATING_SYSTEM; + } + + return NV_OK; +} + +static NV_STATUS +eheapDestruct +( + POBJEHEAP pHeap +) +{ + PEMEMBLOCK block, blockFirst, blockNext; + NvBool headptr_updated; + + if (!pHeap->pBlockList) + return NV_OK; + + // + // Free all allocated blocks + // + do { + block = blockFirst = pHeap->pBlockList; + headptr_updated = NV_FALSE; + + do { + blockNext = block->next; + + _eheapBlockFree(pHeap, block); + + // restart scanning the list, if the heap->pBlockList changed + if (blockFirst != pHeap->pBlockList) { + headptr_updated = NV_TRUE; + break; + } + + block = blockNext; + + } while (block != pHeap->pBlockList); + + } while (headptr_updated); + + if (pHeap->numPreAllocMemStruct > 0) + { + // free static blocks + portMemFree(pHeap->pPreAllocAddr); + pHeap->pPreAllocAddr = NULL; + } + else + { + portMemFree(pHeap->pBlockList); + pHeap->pBlockList = NULL; + } + + return NV_OK; +} + +// 'flags' using NVOS32_ALLOC_FLAGS_* though some are n/a +static NV_STATUS +eheapAlloc +( + POBJEHEAP pHeap, + NvU32 owner, + NvU32 *flags, + NvU64 *offset, + NvU64 *size, + NvU64 offsetAlign, + NvU64 sizeAlign, + PEMEMBLOCK * ppMemBlock, // not generally useful over e.g. a split! + void *pIsolationID, + EHeapOwnershipComparator *checker +) +{ + NvU64 allocLo, allocAl, allocHi; + PEMEMBLOCK blockFirstFree, blockFree; + PEMEMBLOCK blockNew = NULL, blockSplit = NULL; + NvU64 desiredOffset; + NvU64 allocSize; + NvU64 rangeLo, rangeHi; + + if ((*flags & NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX) && + (*flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // Save the offset for fixed address requests, or it's likely uninitialized. + desiredOffset = (*flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE) ? *offset: 0; + + // + // zero result so that apps that ignore return code have another + // chance to see the error of their ways... + // + *offset = 0; + + // + // Check for valid size. + // + if (*size == 0) + return NV_ERR_INVALID_ARGUMENT; + + // + // Range-limited the request. + // + rangeLo = pHeap->rangeLo; + rangeHi = pHeap->rangeHi; + + if (rangeLo == 0 && rangeHi == 0) { + rangeLo = pHeap->base; + rangeHi = pHeap->base + pHeap->total - 1; + } + if (rangeHi > pHeap->base + pHeap->total - 1) { + rangeHi = pHeap->base + pHeap->total - 1; + } + if (rangeLo > rangeHi) + return NV_ERR_INVALID_ARGUMENT; + + // Align size up. + allocSize = ((*size + (sizeAlign - 1)) / sizeAlign) * sizeAlign; + + // + // Trivial reject size vs. free. + // + if (pHeap->free < allocSize) + return NV_ERR_NO_MEMORY; + + /* This flag will force an exclusive allocation of the request + * within the range of ownerGranularity + */ + + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_INTERNAL_INDEX ) + { + NvU64 desiredOffsetLo, desiredOffsetHi; + + NV_ASSERT_OR_RETURN(pHeap->ownerGranularity, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pHeap->bOwnerIsolation && checker, NV_ERR_INVALID_ARGUMENT); + + blockFree = pHeap->pFreeBlockList; + + if (blockFree == NULL) + goto failed; + + do + { + desiredOffset = NV_ALIGN_DOWN(blockFree->begin, pHeap->ownerGranularity) + offsetAlign; + + while (desiredOffset + allocSize - 1 <= blockFree->end) + { + desiredOffsetLo = NV_ALIGN_DOWN(desiredOffset, pHeap->ownerGranularity); + desiredOffsetHi = (((desiredOffset % pHeap->ownerGranularity) == 0) ? + NV_ALIGN_UP((desiredOffset + 1), pHeap->ownerGranularity) : + NV_ALIGN_UP(desiredOffset, pHeap->ownerGranularity)); + + if ((desiredOffset >= blockFree->begin) && + ((desiredOffsetLo >= blockFree->begin) && + (desiredOffsetHi <= blockFree->end))) + { + if (_eheapCheckOwnership(pHeap, pIsolationID, desiredOffset, + desiredOffset + allocSize - 1, blockFree, checker)) + { + allocLo = desiredOffset; + allocHi = desiredOffset + allocSize - 1; + allocAl = allocLo; + goto got_one; + } + } + + desiredOffset += pHeap->ownerGranularity; + } + + blockFree = blockFree->nextFree; + + } while (blockFree != pHeap->pFreeBlockList); + + /* return error if can't get that particular address */ + goto failed; + } + + // Ensure a valid allocation type was passed in + //if (type > NVOS32_NUM_MEM_TYPES - 1) + //return NV_ERR_INVALID_ARGUMENT; + + // + // Check for fixed address request. + // This allows caller to say: I really want this memory at a particular + // offset. Returns error if can't get that offset. + // + if ( *flags & NVOS32_ALLOC_FLAGS_FIXED_ADDRESS_ALLOCATE ) + { + // is our desired offset suitably aligned? + if (desiredOffset % offsetAlign) + goto failed; + + blockFree = pHeap->pFreeBlockList; + + if (blockFree == NULL) + { + goto failed; + } + + do + { + // + // Allocate from the bottom of the memory block. + // + blockFree = blockFree->nextFree; + + // Does this block contain our desired range? + if ( (desiredOffset >= blockFree->begin) && + (desiredOffset + allocSize - 1) <= blockFree->end ) + { + // + // Make sure no allocated block between ALIGN_DOWN(allocLo, granularity) + // and ALIGN_UP(allocHi, granularity) have a different owner than the current allocation + // + if (pHeap->bOwnerIsolation) + { + NV_ASSERT(NULL != checker); + if (!_eheapCheckOwnership(pHeap, pIsolationID, desiredOffset, + desiredOffset + allocSize - 1, blockFree, checker)) + { + break; + } + } + + // we have a match, now remove it from the pool + allocLo = desiredOffset; + allocHi = desiredOffset + allocSize - 1; + allocAl = allocLo; + goto got_one; + } + + } while (blockFree != pHeap->pFreeBlockList); + + // return error if can't get that particular address + goto failed; + } + + blockFirstFree = pHeap->pFreeBlockList; + if (!blockFirstFree) + goto failed; + + // + // When scanning upwards, start at the bottom - 1 so the following loop looks symmetric. + // + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) { + blockFirstFree = blockFirstFree->prevFree; + } + blockFree = blockFirstFree; + do + { + NvU64 blockLo; + NvU64 blockHi; + + // + // Is this block completely out of range? + // + if ( ( blockFree->end < rangeLo ) || ( blockFree->begin > rangeHi ) ) + { + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) + blockFree = blockFree->prevFree; + else + blockFree = blockFree->nextFree; + continue; + } + + // + // Find the intersection of the free block and the specified range. + // + blockLo = (rangeLo > blockFree->begin) ? rangeLo : blockFree->begin; + blockHi = (rangeHi < blockFree->end) ? rangeHi : blockFree->end; + + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) + { + // + // Allocate from the top of the memory block. + // + allocLo = (blockHi - allocSize + 1) / offsetAlign * offsetAlign; + allocAl = allocLo; + allocHi = allocAl + allocSize - 1; + } + else + { + // + // Allocate from the bottom of the memory block. + // + allocAl = (blockLo + (offsetAlign - 1)) / offsetAlign * offsetAlign; + allocLo = allocAl; + allocHi = allocAl + allocSize - 1; + } + + // + // Make sure no allocated block between ALIGN_DOWN(allocLo, granularity) + // and ALIGN_UP(allocHi, granularity) have a different owner than the current allocation + // + if (pHeap->bOwnerIsolation) + { + NV_ASSERT(NULL != checker); + + if (_eheapCheckOwnership(pHeap, pIsolationID, allocLo, allocHi, blockFree, checker)) + { + goto alloc_done; + } + + // + // Try realloc if we still have enough free memory in current free block + // + if (*flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN) + { + NvU64 checkLo = NV_ALIGN_DOWN(allocLo, pHeap->ownerGranularity); + + if (checkLo > blockFree->begin) + { + blockHi = checkLo; + + allocLo = (blockHi - allocSize + 1) / offsetAlign * offsetAlign; + allocAl = allocLo; + allocHi = allocAl + allocSize - 1; + + if (_eheapCheckOwnership(pHeap, pIsolationID, allocLo, allocHi, blockFree, checker)) + { + goto alloc_done; + } + } + } + else + { + NvU64 checkHi = NV_ALIGN_UP(allocHi, pHeap->ownerGranularity); + + if (checkHi < blockFree->end) + { + blockLo = checkHi; + + allocAl = (blockLo + (offsetAlign - 1)) / offsetAlign * offsetAlign; + allocLo = allocAl; + allocHi = allocAl + allocSize - 1; + + if (_eheapCheckOwnership(pHeap, pIsolationID, allocLo, allocHi, blockFree, checker)) + { + goto alloc_done; + } + } + } + + // + // Cannot find any available memory in current free block, go to the next + // + goto next_free; + } + +alloc_done: + // + // Does the desired range fall completely within this block? + // Also make sure it does not wrap-around. + // Also make sure it is within the desired range. + // + if ((allocLo >= blockFree->begin) && (allocHi <= blockFree->end)) + { + if (allocLo <= allocHi) + if ((allocLo >= rangeLo) && (allocHi <= rangeHi)) + goto got_one; + + } + +next_free: + if ( *flags & NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN ) + blockFree = blockFree->prevFree; + else + blockFree = blockFree->nextFree; + + } while (blockFree != blockFirstFree); + + // + // Out of memory. + // + goto failed; + + // + // We have a match. Now link it in, trimming or splitting + // any slop from the enclosing block as needed. + // + + got_one: + + if ((allocLo == blockFree->begin) && (allocHi == blockFree->end)) + { + // + // Wow, exact match so replace free block. + // Remove from free list. + // + blockFree->nextFree->prevFree = blockFree->prevFree; + blockFree->prevFree->nextFree = blockFree->nextFree; + if (pHeap->pFreeBlockList == blockFree) + { + // + // This could be the last free block. + // + if (blockFree->nextFree == blockFree) + pHeap->pFreeBlockList = NULL; + else + pHeap->pFreeBlockList = blockFree->nextFree; + } + + // + // Set owner/type values here. Don't move because some fields are unions. + // + blockFree->owner = owner; + blockFree->refCount = 1; + blockFree->align = allocAl; + + // tail end code below assumes 'blockFree' is the new block + blockNew = blockFree; + } + else if ((allocLo >= blockFree->begin) && (allocHi <= blockFree->end)) + { + // + // Found a fit. + // It isn't exact, so we'll have to do a split + // + if (_eheapAllocMemStruct(pHeap, &blockNew) != NV_OK) + { + goto failed; + } + + blockNew->owner = owner; + blockNew->refCount = 1; + blockNew->begin = allocLo; + blockNew->align = allocAl; + blockNew->end = allocHi; + + if ((blockFree->begin < blockNew->begin) && (blockFree->end > blockNew->end)) + { + // + // Split free block in two. + // + if (_eheapAllocMemStruct(pHeap, &blockSplit) != NV_OK) + { + goto failed; + } + + // + // Remove free block from rb-tree since node's range will be + // changed. + // + if (btreeUnlink(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + blockSplit->owner = NVOS32_BLOCK_TYPE_FREE; + blockSplit->refCount = 0; + blockSplit->begin = blockNew->end + 1; + blockSplit->align = blockSplit->begin; + blockSplit->end = blockFree->end; + blockSplit->pData = (void*)(blockNew+1); + blockFree->end = blockNew->begin - 1; + // + // Insert free split block into free list. + // + blockSplit->nextFree = blockFree->nextFree; + blockSplit->prevFree = blockFree; + blockSplit->nextFree->prevFree = blockSplit; + blockFree->nextFree = blockSplit; + // + // Insert new and split blocks into block list. + // + blockNew->next = blockSplit; + blockNew->prev = blockFree; + blockSplit->next = blockFree->next; + blockSplit->prev = blockNew; + blockFree->next = blockNew; + blockSplit->next->prev = blockSplit; + + // update numBlocks count + pHeap->numBlocks++; + + // re-insert updated free block into rb-tree + blockFree->node.keyEnd = blockFree->end; + if (btreeInsert(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // insert new and split blocks into rb-tree + portMemSet((void *)&blockNew->node, 0, sizeof(NODE)); + portMemSet((void *)&blockSplit->node, 0, sizeof(NODE)); + blockNew->node.keyStart = blockNew->begin; + blockNew->node.keyEnd = blockNew->end; + blockNew->node.Data = (void *)blockNew; + blockSplit->node.keyStart = blockSplit->begin; + blockSplit->node.keyEnd = blockSplit->end; + blockSplit->node.Data = (void *)blockSplit; + if (btreeInsert(&blockNew->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + if (btreeInsert(&blockSplit->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + } + else if (blockFree->end == blockNew->end) + { + // + // Remove free block from rb-tree since node's range will be + // changed. + // + if (btreeUnlink(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // + // New block inserted after free block. + // + blockFree->end = blockNew->begin - 1; + blockNew->next = blockFree->next; + blockNew->prev = blockFree; + blockFree->next->prev = blockNew; + blockFree->next = blockNew; + + // re-insert updated free block into rb-tree + blockFree->node.keyEnd = blockFree->end; + if (btreeInsert(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // insert new block into rb-tree + portMemSet((void *)&blockNew->node, 0, sizeof(NODE)); + blockNew->node.keyStart = blockNew->begin; + blockNew->node.keyEnd = blockNew->end; + blockNew->node.Data = (void *)blockNew; + if (btreeInsert(&blockNew->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + } + else if (blockFree->begin == blockNew->begin) + { + // + // Remove free block from rb-tree since node's range will be + // changed. + // + if (btreeUnlink(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // + // New block inserted before free block. + // + blockFree->begin = blockNew->end + 1; + blockFree->align = blockFree->begin; + blockNew->next = blockFree; + blockNew->prev = blockFree->prev; + blockFree->prev->next = blockNew; + blockFree->prev = blockNew; + if (pHeap->pBlockList == blockFree) + pHeap->pBlockList = blockNew; + + // re-insert updated free block into rb-tree + blockFree->node.keyStart = blockFree->begin; + if (btreeInsert(&blockFree->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + + // insert new block into rb-tree + portMemSet((void *)&blockNew->node, 0, sizeof(NODE)); + blockNew->node.keyStart = blockNew->begin; + blockNew->node.keyEnd = blockNew->end; + blockNew->node.Data = (void *)blockNew; + if (btreeInsert(&blockNew->node, &pHeap->pBlockTree) != NV_OK) + { + goto failed; + } + } + else + { + failed: + if (blockNew) _eheapFreeMemStruct(pHeap, &blockNew); + if (blockSplit) _eheapFreeMemStruct(pHeap, &blockSplit); + return NV_ERR_NO_MEMORY; + } + + pHeap->numBlocks++; + } + + NV_ASSERT(blockNew != NULL); // assert is for Coverity + pHeap->free -= blockNew->end - blockNew->begin + 1; // Reduce free amount by allocated block size. + + // Initialize a pointer to the outer wrapper's specific control structure, tacked to the end of the EMEMBLOCK + blockNew->pData = (void*)(blockNew+1); + + // Return values + *size = allocSize; + *offset = blockNew->align; + if ( ppMemBlock) *ppMemBlock = blockNew; + + return NV_OK; +} + +static NV_STATUS +_eheapBlockFree +( + POBJEHEAP pHeap, + PEMEMBLOCK block +) +{ + PEMEMBLOCK blockTmp; + + // + // Check for valid owner. + // + if (block->owner == NVOS32_BLOCK_TYPE_FREE) return NV_ERR_INVALID_ARGUMENT; + + // + // Check refCount. + // + if (--block->refCount != 0) + return NV_OK; + + // + // Update free count. + // + pHeap->free += block->end - block->begin + 1; + + // + // + // Can this merge with any surrounding free blocks? + // + if ((block->prev->owner == NVOS32_BLOCK_TYPE_FREE) && (block != pHeap->pBlockList)) + { + // + // Remove block to be freed and previous one since nodes will be + // combined into single one. + // + if (btreeUnlink(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + if (btreeUnlink(&block->prev->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + + // + // Merge with previous block. + // + block->prev->next = block->next; + block->next->prev = block->prev; + block->prev->end = block->end; + blockTmp = block; + block = block->prev; + pHeap->numBlocks--; + _eheapFreeMemStruct(pHeap, &blockTmp); + + // re-insert updated free block into rb-tree + block->node.keyEnd = block->end; + if (btreeInsert(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + } + if ((block->next->owner == NVOS32_BLOCK_TYPE_FREE) && (block->next != pHeap->pBlockList)) + { + // + // Remove block to be freed and next one since nodes will be + // combined into single one. + // + if (btreeUnlink(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + if (btreeUnlink(&block->next->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + + // + // Merge with next block. + // + block->prev->next = block->next; + block->next->prev = block->prev; + block->next->begin = block->begin; + if (pHeap->pBlockList == block) + pHeap->pBlockList = block->next; + if (block->owner == NVOS32_BLOCK_TYPE_FREE) + { + if (pHeap->pFreeBlockList == block) + pHeap->pFreeBlockList = block->nextFree; + block->nextFree->prevFree = block->prevFree; + block->prevFree->nextFree = block->nextFree; + } + blockTmp = block; + block = block->next; + pHeap->numBlocks--; + _eheapFreeMemStruct(pHeap, &blockTmp); + + // re-insert updated free block into rb-tree + block->node.keyStart = block->begin; + if (btreeInsert(&block->node, &pHeap->pBlockTree) != NV_OK) + { + return NV_ERR_INVALID_OFFSET; + } + } + if (block->owner != NVOS32_BLOCK_TYPE_FREE) + { + // + // Nothing was merged. Add to free list. + // + blockTmp = pHeap->pFreeBlockList; + if (!blockTmp) + { + pHeap->pFreeBlockList = block; + block->nextFree = block; + block->prevFree = block; + } + else + { + if (blockTmp->begin > block->begin) + // + // Insert into beginning of free list. + // + pHeap->pFreeBlockList = block; + else if (blockTmp->prevFree->begin > block->begin) + // + // Insert into free list. + // + do + { + blockTmp = blockTmp->nextFree; + } while (blockTmp->begin < block->begin); + /* + else + * Insert at end of list. + */ + block->nextFree = blockTmp; + block->prevFree = blockTmp->prevFree; + block->prevFree->nextFree = block; + blockTmp->prevFree = block; + } + } + block->owner = NVOS32_BLOCK_TYPE_FREE; + //block->mhandle = 0x0; + block->align = block->begin; + + portMemSet((block+1), 0, pHeap->sizeofMemBlock - sizeof(EMEMBLOCK)); + + return NV_OK; +} + +static NV_STATUS +eheapFree +( + POBJEHEAP pHeap, + NvU64 offset +) +{ + PEMEMBLOCK block; + + block = (PEMEMBLOCK) eheapGetBlock(pHeap, offset, 0); + if (!block) + return NV_ERR_INVALID_OFFSET; + + return _eheapBlockFree(pHeap, block); +} + +static PEMEMBLOCK +eheapGetBlock +( + POBJEHEAP pHeap, + NvU64 offset, + NvBool bReturnFreeBlock +) +{ + PEMEMBLOCK block; + PNODE pNode; + + if (btreeSearch(offset, &pNode, pHeap->pBlockTree) != NV_OK) + { + return NULL; + } + + block = (PEMEMBLOCK)pNode->Data; + if ((block->owner == NVOS32_BLOCK_TYPE_FREE ) && !bReturnFreeBlock) + { + return NULL; + } + + return block; +} + +static NV_STATUS +eheapGetSize +( + POBJEHEAP pHeap, + NvU64 *size +) +{ + *size = pHeap->total; + return NV_OK; +} + +static NV_STATUS +eheapGetFree +( + POBJEHEAP pHeap, + NvU64 *free +) +{ + *free = pHeap->free; + return NV_OK; +} + +static NV_STATUS +eheapGetBase +( + POBJEHEAP pHeap, + NvU64 *base +) +{ + *base = pHeap->base; + return NV_OK; +} + +static void +eheapInfo +( + POBJEHEAP pHeap, + NvU64 *pBytesFree, // in all of the space managed + NvU64 *pBytesTotal, // in all of the space managed + NvU64 *pLargestFreeOffset, // constrained to pHeap->rangeLo, pHeap->rangeHi + NvU64 *pLargestFreeSize, // constrained to pHeap->rangeLo, pHeap->rangeHi + NvU32 *pNumFreeBlocks, + NvU64 *pUsableBytesFree // constrained to pHeap->rangeLo, pHeap->rangeHi +) +{ + NV_RANGE range = rangeMake(pHeap->rangeLo, pHeap->rangeHi); + + if (pBytesFree) + { + *pBytesFree = pHeap->free; + } + if (pBytesTotal) + { + *pBytesTotal = pHeap->total; + } + eheapInfoForRange(pHeap, range, pLargestFreeOffset, pLargestFreeSize, pNumFreeBlocks, pUsableBytesFree); +} + +static void +eheapInfoForRange +( + POBJEHEAP pHeap, + NV_RANGE range, + NvU64 *pLargestFreeOffset, // constrained to rangeLo, rangeHi + NvU64 *pLargestFreeSize, // constrained to rangeLo, rangeHi + NvU32 *pNumFreeBlocks, + NvU64 *pUsableBytesFree // constrained to rangeLo, rangeHi +) +{ + PEMEMBLOCK blockFirstFree, blockFree; + NvU64 freeBlockSize = 0; + NvU64 largestFreeOffset = 0; + NvU64 largestFreeSize = 0; + NvU32 numFreeBlocks = 0; + + if (pUsableBytesFree) + *pUsableBytesFree = 0; + + blockFirstFree = pHeap->pFreeBlockList; + if (blockFirstFree) + { + NV_ASSERT( range.lo <= range.hi ); + + blockFree = blockFirstFree; + do { + NvU64 clampedBlockBegin = (blockFree->begin >= range.lo) ? + blockFree->begin : range.lo; + NvU64 clampedBlockEnd = (blockFree->end <= range.hi) ? + blockFree->end : range.hi; + if (clampedBlockBegin <= clampedBlockEnd) + { + numFreeBlocks++; + freeBlockSize = clampedBlockEnd - clampedBlockBegin + 1; + + if (pUsableBytesFree) + *pUsableBytesFree += freeBlockSize; + + if ( freeBlockSize > largestFreeSize ) + { + largestFreeOffset = clampedBlockBegin; + largestFreeSize = freeBlockSize; + } + } + blockFree = blockFree->nextFree; + } while (blockFree != blockFirstFree); + } + + if (pLargestFreeOffset) + { + *pLargestFreeOffset = largestFreeOffset; + } + if (pLargestFreeSize) + { + *pLargestFreeSize = largestFreeSize; + } + if (pNumFreeBlocks) + { + *pNumFreeBlocks = numFreeBlocks; + } +} + +static NV_STATUS +eheapSetAllocRange +( + POBJEHEAP pHeap, + NvU64 rangeLo, + NvU64 rangeHi +) +{ + + if ( rangeLo < pHeap->base ) + rangeLo = pHeap->base; + + if ( rangeHi > (pHeap->base + pHeap->total - 1) ) + rangeHi = (pHeap->base + pHeap->total - 1); + + if ( rangeHi < rangeLo ) + return NV_ERR_INVALID_ARGUMENT; + + pHeap->rangeLo = rangeLo; + pHeap->rangeHi = rangeHi; + + return NV_OK; +} + +static NV_STATUS +eheapTraverse +( + POBJEHEAP pHeap, + void *pEnv, + EHeapTraversalFn traversalFn, + NvS32 direction +) +{ + NvU32 cont = 1, backAtFirstBlock = 0; + PEMEMBLOCK pBlock, pBlockNext; + NV_STATUS rc; + NvU64 cursorOffset; // for dealing with cursor invalidates. + NvU64 firstBlockBegin, firstBlockEnd; // we'll never call the traversal fn twice on the same (sub)extent. + + pBlock = (direction > 0) ? pHeap->pBlockList : pHeap->pBlockList->prev; + NV_ASSERT(pBlock); + + // + // Cursor invalidates mean we can't compare with 'pHeap->pBlockList'. + // Instead we'll compare with the extent. If we intersect it at all in + // a later block then we'll consider that as having returned to the first block. + // + firstBlockBegin = pBlock->begin; + firstBlockEnd = pBlock->end; + + do + { + NvU32 invalCursor = 0; + + if ( direction > 0 ) + { + pBlockNext = pBlock->next; + cursorOffset = pBlockNext->begin; + } + else + { + pBlockNext = pBlock->prev; + cursorOffset = pBlockNext->end; + } + + rc = traversalFn(pHeap, pEnv, pBlock, &cont, &invalCursor); + + if ( invalCursor ) + { + // A block was added at or freed. So far only freeing the current block. + pBlock = eheapGetBlock(pHeap, cursorOffset, 1 /*return even if it is a free block*/); + + // Advance to the next block if the cursor block was merged. + if ((direction > 0) && (pBlock->begin < cursorOffset)) + { + pBlock = pBlock->next; + } + else if ((direction <= 0) && (pBlock->end > cursorOffset)) + { + pBlock = pBlock->prev; + } + } + else + { + // No change to the list, use the fast way to find the next block. + pBlock = pBlockNext; + + } + + NV_ASSERT(pBlock); // 1. list is circular, 2. cursorOffset should always be found unless the list is badly malformed. + + // + // Back to first block? Defined as being at a block for which the + // intersection with the original first block is non-null. + // + if ( ((firstBlockBegin >= pBlock->begin ) && (firstBlockBegin <= pBlock->end)) || + ((firstBlockEnd <= pBlock->end ) && (firstBlockEnd >= pBlock->begin)) ) + { + backAtFirstBlock = 1; + } + + } while (cont && !backAtFirstBlock); + + return rc; +} + +/*! + * @brief returns number of blocks in eHeap. + * + * @param[in] pHeap: pointer to eHeap struct to get data from + * + * @returns the number of blocks (free or allocated) currently in the heap + */ +static NvU32 +eheapGetNumBlocks +( + POBJEHEAP pHeap +) +{ + return pHeap->numBlocks; +} + +/*! + * @brief Copies over block information for each block + * in the heap into the provided buffer. + * + * @param[in] pHeap: pointer to eHeap struct to get data from + * @param[in] numBlocks: number of blocks passed in block buffer + * @param[out] pBlockBuffer: pointer to buffer where info will be copied to + * + * @return 'NV_OK' Operation completed successfully + * 'NV_ERR_INVALID_ARGUMENT' size of buffer passed in is + * incorrect + * 'NV_ERR_INVALID_STATE' if the blocklist doesn't match the + * heapSize + */ +static NV_STATUS +eheapGetBlockInfo +( + POBJEHEAP pHeap, + NvU32 numBlocks, + NVOS32_HEAP_DUMP_BLOCK *pBlockBuffer +) +{ + PEMEMBLOCK pBlock; + NvU32 heapSize, i; + NV_STATUS rmStatus = NV_OK; + + // ensure buffer is the same numBlocks + heapSize = eheapGetNumBlocks(pHeap); + NV_ASSERT_OR_RETURN(heapSize == numBlocks, NV_ERR_INVALID_ARGUMENT); + + pBlock = pHeap->pBlockList; + for (i = 0; i < heapSize; i++) + { + pBlockBuffer->begin = pBlock->begin; + pBlockBuffer->align = pBlock->align; + pBlockBuffer->end = pBlock->end; + pBlockBuffer->owner = pBlock->owner; + pBlockBuffer->format = 0; // EMEMBLOCK does not have format, ignore for now + pBlock = pBlock->next; + if (pBlock == NULL) + { + return NV_ERR_INVALID_STATE; + } + pBlockBuffer++; + } + + return rmStatus; +} + +/** + * @brief Set up block owner isolation + * + * Owner isolation means that no two block owners can own allocations which live within a specified range. + * + * @param[in] pHeap pointer to EHEAP object + * @param[in] bEnable NV_TRUE to enable the allocation isolation + * @param[in] granularity allocation granularity + * + * @return NV_OK on success + */ +NV_STATUS +eheapSetOwnerIsolation +( + POBJEHEAP pHeap, + NvBool bEnable, + NvU32 granularity +) +{ + // This can only be set before any allocations have occurred. + if (pHeap->free != pHeap->total) + { + return NV_ERR_INVALID_STATE; + } + // Saying no 2 block owners can share the same block doesn't make sense. + if (bEnable && granularity < 2) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (bEnable && (granularity & (granularity-1))) + { + return NV_ERR_INVALID_ARGUMENT; + } + pHeap->bOwnerIsolation = bEnable; + pHeap->ownerGranularity = granularity; + + return NV_OK; +} + +/** + * @brief Check heap block ownership + * + * @param[in] pHeap Pointer to EHEAP object + * @param[in] pIsolationID Unique isolation ID constructed by the caller + * @param[in] allocLo Allocated range low + * @param[in] allocHi Allocated range high + * @param[in] blockFree Free block list + * @param[in] pChecker Caller defined ownership ID comparator + * + * @return NV_TRUE if success + */ +static NvBool +_eheapCheckOwnership +( + POBJEHEAP pHeap, + void *pIsolationID, + NvU64 allocLo, + NvU64 allocHi, + PEMEMBLOCK blockFree, + EHeapOwnershipComparator *pComparator +) +{ + EMEMBLOCK *pTmpBlock; + NvU64 checkLo = NV_ALIGN_DOWN(allocLo, pHeap->ownerGranularity); + NvU64 checkHi = (((allocHi % pHeap->ownerGranularity) == 0) ? + NV_ALIGN_UP((allocHi + 1), pHeap->ownerGranularity) : + NV_ALIGN_UP(allocHi, pHeap->ownerGranularity)); + NvU64 check; + + checkLo = (checkLo <= pHeap->base) ? pHeap->base : checkLo; + checkHi = (checkHi >= pHeap->base + pHeap->total - 1) ? (pHeap->base + pHeap->total - 1) : checkHi; + + NV_ASSERT(NULL != blockFree); + + if (blockFree->begin > checkLo || blockFree->end < checkHi) + { + for (check = checkLo; check < checkHi; /* in-loop */) + { + pTmpBlock = pHeap->eheapGetBlock(pHeap, check, NV_TRUE); + NV_ASSERT(pTmpBlock); + + if (pTmpBlock->owner != NVOS32_BLOCK_TYPE_FREE) + { + if (!pComparator(pIsolationID, pTmpBlock->pData)) + { + return NV_FALSE; + } + } + + check = pTmpBlock->end + 1; + } + } + + return NV_TRUE; +} diff --git a/src/nvidia/src/libraries/containers/list.c b/src/nvidia/src/libraries/containers/list.c new file mode 100644 index 000000000..ad38ba4c6 --- /dev/null +++ b/src/nvidia/src/libraries/containers/list.c @@ -0,0 +1,409 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/list.h" +#include "utils/nvassert.h" + +CONT_VTABLE_DEFN(ListBase, listIterRange_IMPL, NULL); + +#if PORT_IS_CHECKED_BUILD +static NvBool _listIterRangeCheck(ListBase *pList, ListNode *pFirst, + ListNode *pLast); +#endif +static void _listInsertBase(ListBase *pList, void *pNext, void *pValue); + +void listInit_IMPL(NonIntrusiveList *pList, PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + NV_ASSERT_OR_RETURN_VOID(NULL != pAllocator); + + portMemSet(&(pList->base), 0, sizeof(pList->base)); + CONT_VTABLE_INIT(ListBase, &pList->base); + pList->pAllocator = pAllocator; + pList->valueSize = valueSize; + pList->base.nodeOffset = (NvS32)(0 - sizeof(ListNode)); +} + +void listInitIntrusive_IMPL(IntrusiveList *pList, NvS32 nodeOffset) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + portMemSet(&(pList->base), 0, sizeof(pList->base)); + CONT_VTABLE_INIT(ListBase, &pList->base); + pList->base.nodeOffset = nodeOffset; +} + +static void +_listDestroy(ListBase *pList, PORT_MEM_ALLOCATOR *pAllocator) +{ + ListNode *pNode; + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + + pNode = pList->pHead; + + pList->pHead = NULL; + pList->pTail = NULL; + pList->count = 0; + NV_CHECKED_ONLY(pList->versionNumber++); + + while (pNode != NULL) + { + ListNode *pTemp = pNode; + pNode = pNode->pNext; + pTemp->pPrev = NULL; + pTemp->pNext = NULL; + NV_CHECKED_ONLY(pTemp->pList = NULL); + if (NULL != pAllocator) + { + PORT_FREE(pAllocator, pTemp); + } + } +} + +void listDestroy_IMPL(NonIntrusiveList *pList) +{ + _listDestroy(&pList->base, pList->pAllocator); +} + +void listDestroyIntrusive_IMPL(ListBase *pList) +{ + _listDestroy(pList, NULL); +} + +NvU32 listCount_IMPL(ListBase *pList) +{ + NV_ASSERT_OR_RETURN(pList, 0); + return pList->count; +} + +void *listInsertNew_IMPL(NonIntrusiveList *pList, void *pNext) +{ + void *pNode = NULL; + void *pValue; + + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + + pNode = PORT_ALLOC(pList->pAllocator, sizeof(ListNode) + pList->valueSize); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + + portMemSet(pNode, 0, sizeof(ListNode) + pList->valueSize); + pValue = listNodeToValue(&pList->base, pNode); + _listInsertBase(&(pList->base), pNext, pValue); + + return pValue; +} + +void *listAppendNew_IMPL(NonIntrusiveList *pList) +{ + return listInsertNew_IMPL(pList, NULL); +} + +void *listPrependNew_IMPL(NonIntrusiveList *pList) +{ + return listInsertNew_IMPL(pList, listHead_IMPL(&(pList->base))); +} + +void *listInsertValue_IMPL(NonIntrusiveList *pList, void *pNext, void *pValue) +{ + void *pCurrent; + + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pCurrent = listInsertNew_IMPL(pList, pNext); + if (NULL == pCurrent) + return NULL; + + return portMemCopy(pCurrent, pList->valueSize, pValue, pList->valueSize); +} + +void *listAppendValue_IMPL(NonIntrusiveList *pList, void *pValue) +{ + return listInsertValue_IMPL(pList, NULL, pValue); +} + +void *listPrependValue_IMPL(NonIntrusiveList *pList, void *pValue) +{ + return listInsertValue_IMPL(pList, listHead_IMPL(&(pList->base)), pValue); +} + +void listInsertExisting_IMPL(IntrusiveList *pList, void *pNext, void *pValue) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + NV_ASSERT_OR_RETURN_VOID(NULL != pValue); + _listInsertBase(&(pList->base), pNext, pValue); +} + +void listAppendExisting_IMPL(IntrusiveList *pList, void *pValue) +{ + listInsertExisting_IMPL(pList, NULL, pValue); +} + +void listPrependExisting_IMPL(IntrusiveList *pList, void *pValue) +{ + listInsertExisting_IMPL(pList, listHead_IMPL(&(pList->base)), pValue); +} + +// for nonintrusive version +void listRemove_IMPL(NonIntrusiveList *pList, void *pValue) +{ + if (pValue == NULL) + return; + listRemoveIntrusive_IMPL(&(pList->base), pValue); + PORT_FREE(pList->pAllocator, listValueToNode(&pList->base, pValue)); +} + +// intrusive version +void listRemoveIntrusive_IMPL +( + ListBase *pList, + void *pValue +) +{ + ListNode *pNode; + + if (pValue == NULL) + return; + + pNode = listValueToNode(pList, pValue); + NV_ASSERT_OR_RETURN_VOID(NULL != pNode); + NV_ASSERT_CHECKED(pNode->pList == pList); + + if (pNode->pPrev != NULL) + pNode->pPrev->pNext = pNode->pNext; + else + pList->pHead = pNode->pNext; + + if (pNode->pNext != NULL) + pNode->pNext->pPrev = pNode->pPrev; + else + pList->pTail = pNode->pPrev; + + pNode->pNext = NULL; + pNode->pPrev = NULL; + + pList->count--; + NV_CHECKED_ONLY(pList->versionNumber++); + NV_CHECKED_ONLY(pNode->pList = NULL); +} + +// pvalue here means the value +void listRemoveFirstByValue_IMPL +( + NonIntrusiveList *pList, + void *pValue +) +{ + void *pValueFound = listFindByValue_IMPL(pList, pValue); + if (pValueFound) + { + listRemove_IMPL(pList, pValueFound); + } +} + +void listRemoveAllByValue_IMPL +( + NonIntrusiveList *pList, + void *pValue +) +{ + void *pValueFound; + ListNode *pNode; + + NV_ASSERT_OR_RETURN_VOID(NULL != pList); + NV_ASSERT_OR_RETURN_VOID(NULL != pValue); + + pNode = pList->base.pHead; + while (pNode != NULL) + { + pValueFound = listNodeToValue(&pList->base, pNode); + pNode = pNode->pNext; + + if (portMemCmp(pValueFound, pValue, pList->valueSize) == 0) + { + listRemove_IMPL(pList, pValueFound); + pValueFound = NULL; + } + } +} + +void *listFindByValue_IMPL +( + NonIntrusiveList *pList, + void *pValue +) +{ + void *pResult; + ListNode *pNode; + + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pNode = pList->base.pHead; + while (pNode != NULL) + { + pResult = listNodeToValue(&pList->base, pNode); + + if (portMemCmp(pResult, pValue, pList->valueSize) == 0) + return pResult; + + pNode = pNode->pNext; + } + + return NULL; +} + +void *listHead_IMPL +( + ListBase *pList +) +{ + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + return listNodeToValue(pList, pList->pHead); +} + +void *listTail_IMPL +( + ListBase *pList +) +{ + NV_ASSERT_OR_RETURN(NULL != pList, NULL); + return listNodeToValue(pList, pList->pTail); +} + +void *listNext_IMPL +( + ListBase *pList, + void *pValue +) +{ + ListNode *pNode = listValueToNode(pList, pValue); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pList == pList); + return listNodeToValue(pList, pNode->pNext); +} + +void *listPrev_IMPL +( + ListBase *pList, + void *pValue +) +{ + ListNode *pNode = listValueToNode(pList, pValue); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pList == pList); + return listNodeToValue(pList, pNode->pPrev); +} + +ListIterBase listIterRange_IMPL +( + ListBase *pList, + void *pFirst, + void *pLast +) +{ + ListIterBase it; + + NV_ASSERT(NULL != pList); + + NV_CHECKED_ONLY(it.versionNumber = pList->versionNumber); + it.pList = pList; + it.pNode = listValueToNode(pList, pFirst); + it.pLast = listValueToNode(pList, pLast); + it.pValue = NULL; + + NV_ASSERT_CHECKED(it.pNode == NULL || it.pNode->pList == pList); + NV_ASSERT_CHECKED(it.pLast == NULL || it.pLast->pList == pList); + NV_ASSERT_CHECKED(_listIterRangeCheck(pList, it.pNode, it.pLast)); + + return it; +} + +NvBool listIterNext_IMPL(ListIterBase *pIt) +{ + NV_ASSERT_OR_RETURN(NULL != pIt, NV_FALSE); + + NV_ASSERT_CHECKED(pIt->versionNumber == pIt->pList->versionNumber); + + if (!pIt->pNode) + return NV_FALSE; + + pIt->pValue = listNodeToValue(pIt->pList, pIt->pNode); + + if (pIt->pNode == pIt->pLast) + pIt->pNode = NULL; + else + pIt->pNode = pIt->pNode->pNext; + + return NV_TRUE; +} + +#if PORT_IS_CHECKED_BUILD +// @todo: optimize for best average complexity +// assumption: nodes ownership checked in the caller function +// allow same node +static NvBool _listIterRangeCheck +( + ListBase *pList, + ListNode *pFirst, + ListNode *pLast +) +{ + ListNode *pNode; + + for (pNode = pFirst; pNode != NULL; pNode = pNode->pNext) + { + if (pNode == pLast) + return NV_TRUE; + } + + // Check for both NULL (empty range) case. + return pNode == pLast; +} +#endif + +static void _listInsertBase +( + ListBase *pList, + void *pNextValue, + void *pValue +) +{ + ListNode *pNext = listValueToNode(pList, pNextValue); + ListNode *pNode = listValueToNode(pList, pValue); + + pNode->pPrev = pNext ? pNext->pPrev : pList->pTail; + pNode->pNext = pNext; + + if (pNode->pPrev) + pNode->pPrev->pNext = pNode; + else + pList->pHead = pNode; + + if (pNode->pNext) + pNode->pNext->pPrev = pNode; + else + pList->pTail = pNode; + + pList->count++; + NV_CHECKED_ONLY(pList->versionNumber++); + NV_CHECKED_ONLY(pNode->pList = pList); +} diff --git a/src/nvidia/src/libraries/containers/map.c b/src/nvidia/src/libraries/containers/map.c new file mode 100644 index 000000000..ede588928 --- /dev/null +++ b/src/nvidia/src/libraries/containers/map.c @@ -0,0 +1,898 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/map.h" + +CONT_VTABLE_DEFN(MapBase, mapIterRange_IMPL, NULL); + +static void _mapRotateLeft(MapNode **pPRoot, MapNode *x); +static void _mapRotateRight(MapNode **pPRoot, MapNode *x); +static void _mapInsertFixup(MapNode **pRoot, MapNode *x); +static void _mapDeleteFixup(MapNode **pRoot, MapNode *parentOfX, MapNode *x); + +/** + * @brief Replace the old node with the new one. + * @details Does nothing if old node is NULL. Does not + * update oldnode links + */ +static void _mapPutNodeInPosition(MapBase *pMap, MapNode *pTargetPosition, + MapNode *pNewNode); + +/** + * @brief Take on target node's children connections. + * @details Does nothing is any of the input is NULL. + * Does not update oldnode links + */ +static void _mapAdoptChildrenNodes(MapNode *pTargetNode, MapNode *pNewNode); + +/** + * @brief Basic insertion procedure + * @details Shared by three versions of map insertion functions + */ +static NvBool _mapInsertBase(MapBase *pMap, NvU64 key, void *pValue); + +void mapInit_IMPL +( + NonIntrusiveMap *pMap, + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + NV_ASSERT_OR_RETURN_VOID(NULL != pAllocator); + portMemSet(&(pMap->base), 0, sizeof(pMap->base)); + CONT_VTABLE_INIT(MapBase, &pMap->base); + pMap->pAllocator = pAllocator; + pMap->valueSize = valueSize; + pMap->base.nodeOffset = (NvS32)(0 - sizeof(MapNode)); +} + +void mapInitIntrusive_IMPL +( + IntrusiveMap *pMap, + NvS32 nodeOffset +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + portMemSet(&(pMap->base), 0, sizeof(pMap->base)); + CONT_VTABLE_INIT(MapBase, &pMap->base); + pMap->base.nodeOffset = nodeOffset; +} + +static void _mapDestroy(MapBase *pMap, PORT_MEM_ALLOCATOR *pAllocator) +{ + MapNode *pNode; + + NV_ASSERT_OR_RETURN_VOID(NULL != pMap); + + pNode = pMap->pRoot; + while (NULL != pNode) + { + while (NULL != pNode->pLeft) + pNode = pNode->pLeft; + + while (NULL != pNode->pRight) + pNode = pNode->pRight; + + if ((NULL == pNode->pLeft) && (NULL == pNode->pRight)) + { + MapNode *pTemp = pNode->pParent; + + // update parent node + if (NULL != pTemp) + { + if (pTemp->pLeft == pNode) + pTemp->pLeft = NULL; + else + pTemp->pRight = NULL; + } + + // free the node + pNode->pParent = NULL; + NV_CHECKED_ONLY(pNode->pMap = NULL); + if (NULL != pAllocator) + { + PORT_FREE(pAllocator, pNode); + } + + pNode = pTemp; + } + } + + pMap->pRoot = NULL; + pMap->count = 0; + NV_CHECKED_ONLY(pMap->versionNumber++); +} + +void mapDestroy_IMPL +( + NonIntrusiveMap *pMap +) +{ + _mapDestroy(&pMap->base, pMap->pAllocator); +} + +void mapDestroyIntrusive_IMPL +( + MapBase *pMap +) +{ + _mapDestroy(pMap, NULL); +} + +NvU32 mapCount_IMPL +( + MapBase *pMap +) +{ + NV_ASSERT_OR_RETURN(pMap, 0); + return pMap->count; +} + +NvU64 mapKey_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *pNode = mapValueToNode(pMap, pValue); + NV_ASSERT_OR_RETURN(NULL != pNode, 0); + NV_ASSERT_CHECKED(pNode->pMap == pMap); + return pNode->key; +} + +void *mapInsertNew_IMPL +( + NonIntrusiveMap *pMap, + NvU64 key +) +{ + void *pNode = NULL; + void *pValue; + + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + + pNode = PORT_ALLOC(pMap->pAllocator, sizeof(MapNode) + pMap->valueSize); + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + + portMemSet(pNode, 0, sizeof(MapNode) + pMap->valueSize); + pValue = mapNodeToValue(&pMap->base, pNode); + + // check key duplication + if (!_mapInsertBase(&(pMap->base), key, pValue)) + { + PORT_FREE(pMap->pAllocator, pNode); + return NULL; + } + + return pValue; +} + +void *mapInsertValue_IMPL +( + NonIntrusiveMap *pMap, + NvU64 key, + void *pValue +) +{ + void *pCurrent; + + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pCurrent = mapInsertNew_IMPL(pMap, key); + if (NULL == pCurrent) + return NULL; + + return portMemCopy(pCurrent, pMap->valueSize, pValue, + pMap->valueSize); +} + +NvBool mapInsertExisting_IMPL +( + IntrusiveMap *pMap, + NvU64 key, + void *pValue +) +{ + NV_ASSERT_OR_RETURN(NULL != pMap, NV_FALSE); + NV_ASSERT_OR_RETURN(NULL != pValue, NV_FALSE); + return _mapInsertBase(&(pMap->base), key, pValue); +} + +void mapRemove_IMPL +( + NonIntrusiveMap *pMap, + void *pValue +) +{ + if (pValue == NULL) + return; + mapRemoveIntrusive_IMPL(&(pMap->base), pValue); + PORT_FREE(pMap->pAllocator, mapValueToNode(&pMap->base, pValue)); +} + +void mapRemoveIntrusive_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *x; // child node of y, might be NULL + MapNode *y; // successor for z + MapNode *z; // node to remove + MapNode *parentOfX; + NvU32 yWasBlack; + + // do nothing is pValue is NULL + if (pValue == NULL) + return; + + // 1. find y, the successor for z + z = mapValueToNode(pMap, pValue); + NV_ASSERT_OR_RETURN_VOID(NULL != z); + NV_ASSERT_CHECKED(z->pMap == pMap); + + if (z->pLeft == NULL || z->pRight == NULL) + { + // z has at least one empty successor, y = z + y = z; + } + + else + { + // y is z's least greater node + y = z->pRight; + + while (y->pLeft != NULL) + y = y->pLeft; + } + + // 2. find x, y's children + if (y->pLeft != NULL) + x = y->pLeft; + else + x = y->pRight; + + // 3. put x into y's position + _mapPutNodeInPosition(pMap, y, x); + // 4. put y into z's position if not the same + parentOfX = y->pParent; + yWasBlack = !y->bIsRed; + + if (y != z) + { + _mapPutNodeInPosition(pMap, z, y); + _mapAdoptChildrenNodes(z, y); + y->bIsRed = z->bIsRed; + + if (parentOfX == z) + parentOfX = y; + } + + // 5. fixup, to rebalance the tree + if (yWasBlack) + _mapDeleteFixup(&(pMap->pRoot), parentOfX, x); + + // 6. update the count + NV_CHECKED_ONLY(pMap->versionNumber++); + NV_CHECKED_ONLY(z->pMap = NULL); + pMap->count--; + return; +} + +void mapRemoveByKey_IMPL +( + NonIntrusiveMap *pMap, + NvU64 key +) +{ + mapRemove_IMPL(pMap, mapFind_IMPL(&(pMap->base), key)); +} + +void mapRemoveByKeyIntrusive_IMPL +( + MapBase *pMap, + NvU64 key +) +{ + mapRemoveIntrusive_IMPL(pMap, mapFind_IMPL(pMap, key)); +} + +void *mapFind_IMPL +( + MapBase *pMap, + NvU64 key +) +{ + MapNode *pCurrent; + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + pCurrent = pMap->pRoot; + + while (pCurrent != NULL) + { + if (key < pCurrent->key) + pCurrent = pCurrent->pLeft; + else if (key > pCurrent->key) + pCurrent = pCurrent->pRight; + else + return mapNodeToValue(pMap, pCurrent); + } + + return NULL; +} + +void *mapFindGEQ_IMPL +( + MapBase *pMap, + NvU64 keyMin +) +{ + MapNode *pCurrent; + MapNode *pResult; + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + pCurrent = pMap->pRoot; + pResult = NULL; + + while (pCurrent != NULL) + { + if (pCurrent->key > keyMin) + { + pResult = pCurrent; + pCurrent = pCurrent->pLeft; + } + + else if (pCurrent->key == keyMin) + return mapNodeToValue(pMap, pCurrent); + else + pCurrent = pCurrent->pRight; + } + + if (pResult == NULL) + return NULL; + + return mapNodeToValue(pMap, pResult); +} + +void *mapFindLEQ_IMPL +( + MapBase *pMap, + NvU64 keyMax +) +{ + MapNode *pCurrent; + MapNode *pResult; + NV_ASSERT_OR_RETURN(NULL != pMap, NULL); + pCurrent = pMap->pRoot; + pResult = NULL; + + while (pCurrent != NULL) + { + if (pCurrent->key > keyMax) + pCurrent = pCurrent->pLeft; + else if (pCurrent->key == keyMax) + return mapNodeToValue(pMap, pCurrent); + else + { + pResult = pCurrent; + pCurrent = pCurrent->pRight; + } + } + + if (pResult == NULL) + return NULL; + + return mapNodeToValue(pMap, pResult); +} + +void *mapNext_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *pCurrent; + MapNode *pNode = mapValueToNode(pMap, pValue); + + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pMap == pMap); + + if (NULL != (pCurrent = pNode->pRight)) + { + while (pCurrent->pLeft != NULL) + pCurrent = pCurrent->pLeft; + + return mapNodeToValue(pMap, pCurrent); + } + + else + { + pCurrent = pNode->pParent; + + while (pCurrent != NULL && pNode == pCurrent->pRight) + { + if (pCurrent == pMap->pRoot) + return NULL; + + pNode = pCurrent; + pCurrent = pCurrent->pParent; + } + + if (pCurrent == NULL) + return NULL; + + return mapNodeToValue(pMap, pCurrent); + } +} + +void *mapPrev_IMPL +( + MapBase *pMap, + void *pValue +) +{ + MapNode *pCurrent; + MapNode *pNode = mapValueToNode(pMap, pValue); + + NV_ASSERT_OR_RETURN(NULL != pNode, NULL); + NV_ASSERT_CHECKED(pNode->pMap == pMap); + + if (NULL != (pCurrent = pNode->pLeft)) + { + while (pCurrent->pRight != NULL) + pCurrent = pCurrent->pRight; + + return mapNodeToValue(pMap, pCurrent); + } + + else + { + pCurrent = pNode->pParent; + + while (pCurrent != NULL && pNode == pCurrent->pLeft) + { + if (pCurrent == pMap->pRoot) + { + return NULL; + } + + pNode = pCurrent; + pCurrent = pCurrent->pParent; + } + + if (pCurrent == NULL) + return NULL; + + return mapNodeToValue(pMap, pCurrent); + } +} + +// @todo: do we need to change the definition of pFirst and pLast? +// currently they are mapNodes +MapIterBase mapIterRange_IMPL +( + MapBase *pMap, + void *pFirst, + void *pLast +) +{ + MapIterBase it; + MapNode *pFirstNode; + MapNode *pLastNode; + NV_ASSERT(pMap); + + portMemSet(&it, 0, sizeof(it)); + it.pMap = pMap; + + if (pMap->count == 0) + { + NV_CHECKED_ONLY(it.versionNumber = pMap->versionNumber); + return it; + } + + NV_ASSERT(pFirst); + NV_ASSERT(pLast); + NV_ASSERT_CHECKED((mapValueToNode(pMap, pFirst))->pMap == pMap); + NV_ASSERT_CHECKED((mapValueToNode(pMap, pLast))->pMap == pMap); + NV_ASSERT(mapKey_IMPL(pMap, pLast) >= mapKey_IMPL(pMap, pFirst)); + pFirstNode = mapValueToNode(pMap, pFirst); + pLastNode = mapValueToNode(pMap, pLast); + it.pNode = pFirstNode; + it.pLast = pLastNode; + NV_CHECKED_ONLY(it.versionNumber = pMap->versionNumber); + return it; +} + +// @todo: not sure about ppvalue, change it from void * to void ** +NvBool mapIterNext_IMPL(MapIterBase *pIt) +{ + NV_ASSERT_OR_RETURN(pIt, NV_FALSE); + + // + // Check whether the map was mutated during the iteration. + // If the map changed (by adding or removing entries), + // the iterator becomes invalid and must be reinitialized. + // + NV_ASSERT_CHECKED(pIt->versionNumber == pIt->pMap->versionNumber); + + if (!pIt->pNode) + return NV_FALSE; + + pIt->pValue = mapNodeToValue(pIt->pMap, pIt->pNode); + + if (pIt->pNode == pIt->pLast) + pIt->pNode = NULL; + else + pIt->pNode = mapValueToNode(pIt->pMap, + mapNext_IMPL(pIt->pMap, pIt->pValue)); + + return NV_TRUE; +} + +static void _mapRotateLeft +( + MapNode **pPRoot, + MapNode *x +) +{ + // rotate node x to left + MapNode *y = x->pRight; + // establish x->pRight link + x->pRight = y->pLeft; + + if (y->pLeft) + y->pLeft->pParent = x; + + // establish y->pParent link + y->pParent = x->pParent; + + if (x->pParent) + { + if (x == x->pParent->pLeft) + x->pParent->pLeft = y; + else + x->pParent->pRight = y; + } + + else + (*pPRoot) = y; + + // link x and y + y->pLeft = x; + x->pParent = y; +} + +static void _mapRotateRight +( + MapNode **pPRoot, + MapNode *x +) +{ + // rotate node x to right + MapNode *y = x->pLeft; + // establish x->pLeft link + x->pLeft = y->pRight; + + if (y->pRight) + y->pRight->pParent = x; + + // establish y->pParent link + y->pParent = x->pParent; + + if (x->pParent) + { + if (x == x->pParent->pRight) + x->pParent->pRight = y; + else + x->pParent->pLeft = y; + } + + else + (*pPRoot) = y; + + // link x and y + y->pRight = x; + x->pParent = y; +} + +static void _mapInsertFixup +( + MapNode **pPRoot, + MapNode *x +) +{ + // check red-black properties + while ((x != *pPRoot) && x->pParent->bIsRed) + { + // we have a violation + if (x->pParent == x->pParent->pParent->pLeft) + { + MapNode *y = x->pParent->pParent->pRight; + + if (y && y->bIsRed) + { + // uncle is RED + x->pParent->bIsRed = NV_FALSE; + y->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + x = x->pParent->pParent; + } + + else + { + // uncle is BLACK + if (x == x->pParent->pRight) + { + // make x a left child + x = x->pParent; + _mapRotateLeft(pPRoot, x); + } + + // recolor and rotate + x->pParent->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + _mapRotateRight(pPRoot, x->pParent->pParent); + } + } + + else + { + // mirror image of above code + MapNode *y = x->pParent->pParent->pLeft; + + if (y && y->bIsRed) + { + // uncle is RED + x->pParent->bIsRed = NV_FALSE; + y->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + x = x->pParent->pParent; + } + + else + { + // uncle is BLACK + if (x == x->pParent->pLeft) + { + x = x->pParent; + _mapRotateRight(pPRoot, x); + } + + x->pParent->bIsRed = NV_FALSE; + x->pParent->pParent->bIsRed = NV_TRUE; + _mapRotateLeft(pPRoot, x->pParent->pParent); + } + } + } + + (*pPRoot)->bIsRed = NV_FALSE; +} + +static void _mapDeleteFixup +( + MapNode **pPRoot, + MapNode *parentOfX, + MapNode *x +) +{ + while ((x != *pPRoot) && (!x || !x->bIsRed)) + { + //NV_ASSERT (!(x == NULL && parentOfX == NULL)); + // NULL nodes are sentinel nodes. If we delete a sentinel node (x==NULL) it + // must have a parent node (or be the root). Hence, parentOfX == NULL with + // x==NULL is never possible (tree invariant) + if ((parentOfX != NULL) && (x == parentOfX->pLeft)) + { + MapNode *w = parentOfX->pRight; + + if (w && w->bIsRed) + { + w->bIsRed = NV_FALSE; + parentOfX->bIsRed = NV_TRUE; + _mapRotateLeft(pPRoot, parentOfX); + w = parentOfX->pRight; + } + + if (!w || (((!w->pLeft || !w->pLeft->bIsRed) + && (!w->pRight || !w->pRight->bIsRed)))) + { + if (w) + w->bIsRed = NV_TRUE; + + x = parentOfX; + } + + else + { + if (!w->pRight || !w->pRight->bIsRed) + { + w->pLeft->bIsRed = NV_FALSE; + w->bIsRed = NV_TRUE; + _mapRotateRight(pPRoot, w); + w = parentOfX->pRight; + } + + w->bIsRed = parentOfX->bIsRed; + parentOfX->bIsRed = NV_FALSE; + w->pRight->bIsRed = NV_FALSE; + _mapRotateLeft(pPRoot, parentOfX); + x = *pPRoot; + } + } + + else if (parentOfX != NULL) + { + MapNode *w = parentOfX->pLeft; + + if (w && w->bIsRed) + { + w->bIsRed = NV_FALSE; + parentOfX->bIsRed = NV_TRUE; + _mapRotateRight(pPRoot, parentOfX); + w = parentOfX->pLeft; + } + + if (!w || ((!w->pRight || !w->pRight->bIsRed) && + (!w->pLeft || !w->pLeft->bIsRed))) + { + if (w) + w->bIsRed = NV_TRUE; + + x = parentOfX; + } + + else + { + if (!w->pLeft || !w->pLeft->bIsRed) + { + w->pRight->bIsRed = NV_FALSE; + w->bIsRed = NV_TRUE; + _mapRotateLeft(pPRoot, w); + w = parentOfX->pLeft; + } + + w->bIsRed = parentOfX->bIsRed; + parentOfX->bIsRed = NV_FALSE; + w->pLeft->bIsRed = NV_FALSE; + _mapRotateRight(pPRoot, parentOfX); + x = *pPRoot; + } + } + + else if (x == NULL) + { + // This should never happen. + break; + } + + parentOfX = x->pParent; + } + + if (x) + x->bIsRed = NV_FALSE; +} + +static void _mapPutNodeInPosition +( + MapBase *pMap, + MapNode *pTargetPosition, + MapNode *pNewNode +) +{ + // error check - can be removed + if (pTargetPosition == NULL) + return; + + // 1. change connection from new node side + if (pNewNode != NULL) + pNewNode->pParent = pTargetPosition->pParent; + + // 2. connection from parent side + if (pTargetPosition->pParent != NULL) + { + if (pTargetPosition == pTargetPosition->pParent->pLeft) + pTargetPosition->pParent->pLeft = pNewNode; + else + pTargetPosition->pParent->pRight = pNewNode; + } + + else + pMap->pRoot = pNewNode; +} + +static void _mapAdoptChildrenNodes +( + MapNode *pTargetNode, + MapNode *pNewNode +) +{ + // error check - can be removed + if (pTargetNode == NULL || pNewNode == NULL) + return; + + // take on connections + pNewNode->pLeft = pTargetNode->pLeft; + + if (pTargetNode->pLeft != NULL) + pTargetNode->pLeft->pParent = pNewNode; + + pNewNode->pRight = pTargetNode->pRight; + + if (pTargetNode->pRight != NULL) + pTargetNode->pRight->pParent = pNewNode; +} + +static NvBool _mapInsertBase +( + MapBase *pMap, + NvU64 key, + void *pValue +) +{ + MapNode *pCurrent; + MapNode *pParent; + MapNode *pNode; + pNode = mapValueToNode(pMap, pValue); + // 1. locate parent leaf node for the new node + pCurrent = pMap->pRoot; + pParent = NULL; + + while (pCurrent != NULL) + { + pParent = pCurrent; + + if (key < pCurrent->key) + pCurrent = pCurrent->pLeft; + else if (key > pCurrent->key) + pCurrent = pCurrent->pRight; + else + { + // duplication detected + return NV_FALSE; + } + } + + // 2. set up the new node structure + NV_CHECKED_ONLY(pNode->pMap = pMap); + pNode->key = key; + pNode->pParent = pParent; + pNode->pLeft = NULL; + pNode->pRight = NULL; + pNode->bIsRed = NV_TRUE; + + // 3. insert node in tree + if (pParent != NULL) + { + if (pNode->key < pParent->key) + pParent->pLeft = pNode; + else + pParent->pRight = pNode; + } + + else + pMap->pRoot = pNode; + + // 4. balance the tree + _mapInsertFixup(&(pMap->pRoot), pNode); + NV_CHECKED_ONLY(pMap->versionNumber++); + pMap->count++; + return NV_TRUE; +} diff --git a/src/nvidia/src/libraries/containers/multimap.c b/src/nvidia/src/libraries/containers/multimap.c new file mode 100644 index 000000000..b95b9f130 --- /dev/null +++ b/src/nvidia/src/libraries/containers/multimap.c @@ -0,0 +1,380 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/multimap.h" + +CONT_VTABLE_DEFN(MultimapBase, multimapItemIterRange_IMPL, NULL); + +void multimapInit_IMPL +( + MultimapBase *pBase, + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 valueSize, + NvS32 nodeOffset, + NvU32 submapSize +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + NV_ASSERT_OR_RETURN_VOID(NULL != pAllocator); + mapInit_IMPL(&pBase->map, pAllocator, submapSize); + CONT_VTABLE_INIT(MultimapBase, pBase); + pBase->multimapNodeOffset = nodeOffset; + pBase->itemCount = 0; + pBase->itemSize = valueSize; +} + +void multimapDestroy_IMPL +( + MultimapBase *pBase +) +{ + void *pLeaf; + IntrusiveMap *pSubmap; + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + + pLeaf = multimapFirstItem_IMPL(pBase); + while (NULL != pLeaf) + { + void *pNext = multimapNextItem_IMPL(pBase, pLeaf); + multimapRemoveItem_IMPL(pBase, pLeaf); + pLeaf = pNext; + } + + while (NULL != (pSubmap = (IntrusiveMap *)mapFindGEQ_IMPL(&pBase->map.base, 0))) + { + mapDestroyIntrusive_IMPL(&pSubmap->base); + mapRemove_IMPL(&pBase->map, pSubmap); + } + + mapDestroy_IMPL(&pBase->map); +} + +void multimapClear_IMPL +( + MultimapBase *pBase +) +{ + PORT_MEM_ALLOCATOR *pAllocator; + NvU32 valueSize; + NvS32 nodeOffset; + NvU32 submapSize; + + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + pAllocator = pBase->map.pAllocator; + valueSize = pBase->itemSize; + nodeOffset = pBase->multimapNodeOffset; + submapSize = pBase->map.valueSize; + + multimapDestroy_IMPL(pBase); + multimapInit_IMPL(pBase, pAllocator, valueSize, nodeOffset, submapSize); +} + +void *multimapInsertSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + void *pSubmap; + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = mapInsertNew_IMPL(&pBase->map, submapKey); + if (NULL != pSubmap) + { + NvS32 submapNodeOffset = pBase->multimapNodeOffset + + NV_OFFSETOF(MultimapNode, submapNode); + mapInitIntrusive_IMPL((IntrusiveMap *)pSubmap, submapNodeOffset); + } + + return pSubmap; +} + +void *multimapFindSubmap_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + return mapFind_IMPL(&pBase->map.base, submapKey); +} + +void *multimapFindSubmapLEQ_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + return mapFindLEQ_IMPL(&pBase->map.base, submapKey); +} + +void *multimapFindSubmapGEQ_IMPL(MultimapBase *pBase, NvU64 submapKey) +{ + return mapFindGEQ_IMPL(&pBase->map.base, submapKey); +} + +void *multimapInsertItemNew_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey +) +{ + IntrusiveMap *pSubmap; + void *pLeaf; + NvU32 leafSize; + + if (NULL == pBase) + return NULL; + + pSubmap = (IntrusiveMap *)multimapFindSubmap_IMPL(pBase, submapKey); + if (NULL == pSubmap) + return NULL; + + leafSize = pBase->multimapNodeOffset + sizeof(MultimapNode); + pLeaf = PORT_ALLOC(pBase->map.pAllocator, leafSize); + + if (NULL == pLeaf) + return NULL; + + portMemSet(pLeaf, 0, leafSize); + + multimapValueToNode(pBase, pLeaf)->pSubmap = pSubmap; + + if (!mapInsertExisting_IMPL(pSubmap, itemKey, pLeaf)) + { + PORT_FREE(pBase->map.pAllocator, pLeaf); + return NULL; + } + + pBase->itemCount++; + + return pLeaf; +} + +void *multimapInsertItemValue_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey, + void *pValue +) +{ + void *pLeaf; + + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + NV_ASSERT_OR_RETURN(NULL != pValue, NULL); + + pLeaf = multimapInsertItemNew_IMPL(pBase, submapKey, itemKey); + + if (NULL == pLeaf) + return NULL; + + return portMemCopy(pLeaf, pBase->itemSize, pValue, pBase->itemSize); +} + +void *multimapFindItem_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey +) +{ + IntrusiveMap *pSubmap; + + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = (IntrusiveMap *)multimapFindSubmap_IMPL(pBase, submapKey); + if (NULL == pSubmap) + return NULL; + + return mapFind_IMPL(&pSubmap->base, itemKey); +} + +void multimapRemoveItem_IMPL(MultimapBase *pBase, void *pLeaf) +{ + IntrusiveMap *pSubmap; + NvU32 itemCount; + + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + NV_ASSERT_OR_RETURN_VOID(NULL != pLeaf); + + pSubmap = (IntrusiveMap *)multimapValueToNode(pBase, pLeaf)->pSubmap; + NV_ASSERT_OR_RETURN_VOID(NULL != pSubmap); + + itemCount = pSubmap->base.count; + mapRemoveIntrusive_IMPL(&pSubmap->base, pLeaf); + // Only continue if an item was actually removed + if (itemCount == pSubmap->base.count) + return; + + PORT_FREE(pBase->map.pAllocator, pLeaf); + + pBase->itemCount--; +} + +void multimapRemoveSubmap_IMPL +( + MultimapBase *pBase, + MapBase *pSubmap +) +{ + NV_ASSERT_OR_RETURN_VOID(NULL != pBase); + NV_ASSERT_OR_RETURN_VOID(NULL != pSubmap); + NV_ASSERT_OR_RETURN_VOID(pSubmap->count == 0); + mapDestroyIntrusive_IMPL(pSubmap); + mapRemove_IMPL(&pBase->map, pSubmap); +} + +void multimapRemoveItemByKey_IMPL +( + MultimapBase *pBase, + NvU64 submapKey, + NvU64 itemKey +) +{ + void *pLeaf = multimapFindItem_IMPL(pBase, submapKey, itemKey); + if (NULL != pLeaf) + multimapRemoveItem_IMPL(pBase, pLeaf); +} + +void *multimapNextItem_IMPL(MultimapBase *pBase, void *pValue) +{ + IntrusiveMap *pSubmap; + + NV_ASSERT_OR_RETURN(NULL != pBase && NULL != pValue, NULL); + + pSubmap = (IntrusiveMap *)multimapValueToNode(pBase, pValue)->pSubmap; + NV_ASSERT_OR_RETURN(NULL != pSubmap, NULL); + + pValue = mapNext_IMPL(&pSubmap->base, pValue); + while (NULL == pValue) + { + pSubmap = (IntrusiveMap *)mapNext_IMPL(&pBase->map.base, pSubmap); + if (NULL == pSubmap) + return NULL; + + pValue = mapFindGEQ_IMPL(&pSubmap->base, 0); + } + + return pValue; +} + +void *multimapPrevItem_IMPL(MultimapBase *pBase, void *pValue) +{ + IntrusiveMap *pSubmap; + + NV_ASSERT_OR_RETURN(NULL != pBase && NULL != pValue, NULL); + + pSubmap = (IntrusiveMap *)multimapValueToNode(pBase, pValue)->pSubmap; + NV_ASSERT_OR_RETURN(NULL != pSubmap, NULL); + + pValue = mapPrev_IMPL(&pSubmap->base, pValue); + while (NULL == pValue) + { + pSubmap = (IntrusiveMap *)mapPrev_IMPL(&pBase->map.base, pSubmap); + if (NULL == pSubmap) + return NULL; + + pValue = mapFindLEQ_IMPL(&pSubmap->base, NV_U64_MAX); + } + + return pValue; +} + +void *multimapFirstItem_IMPL(MultimapBase *pBase) +{ + IntrusiveMap *pSubmap; + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = mapFindGEQ_IMPL(&pBase->map.base, 0); + while (NULL != pSubmap) + { + void *pItem = mapFindGEQ_IMPL(&pSubmap->base, 0); + if (NULL != pItem) + return pItem; + + pSubmap = mapNext_IMPL(&pBase->map.base, pSubmap); + } + + return NULL; +} + +void *multimapLastItem_IMPL(MultimapBase *pBase) +{ + IntrusiveMap *pSubmap; + NV_ASSERT_OR_RETURN(NULL != pBase, NULL); + + pSubmap = mapFindLEQ_IMPL(&pBase->map.base, NV_U64_MAX); + while (NULL != pSubmap) + { + void *pItem = mapFindLEQ_IMPL(&pSubmap->base, NV_U64_MAX); + if (NULL != pItem) + return pItem; + + pSubmap = mapPrev_IMPL(&pBase->map.base, pSubmap); + } + + return NULL; +} + +MultimapIterBase multimapItemIterRange_IMPL +( + MultimapBase *pBase, + void *pFirst, + void *pLast +) +{ + MultimapIterBase it; + + portMemSet(&it, 0, sizeof(it)); + it.pMultimap = pBase; + + NV_ASSERT_OR_RETURN(NULL != pBase, it); + + if (pBase->itemCount == 0 || pFirst == NULL || pLast == NULL) + return it; + + { + MultimapNode *pFirstNode; + MultimapNode *pLastNode; + NvU64 firstKey, lastKey, firstSubmapKey, lastSubmapKey; + + pFirstNode = multimapValueToNode(pBase, pFirst); + pLastNode = multimapValueToNode(pBase, pLast); + + firstKey = pFirstNode->submapNode.key; + lastKey = pLastNode->submapNode.key; + firstSubmapKey = mapValueToNode(&pBase->map.base, pFirstNode->pSubmap)->key; + lastSubmapKey = mapValueToNode(&pBase->map.base, pLastNode->pSubmap)->key; + + NV_ASSERT(firstSubmapKey < lastSubmapKey || + (firstSubmapKey == lastSubmapKey && firstKey <= lastKey)); + } + it.pNext = pFirst; + it.pLast = pLast; + return it; +} + +NvBool multimapItemIterNext_IMPL(MultimapIterBase *pIt) +{ + NV_ASSERT_OR_RETURN(NULL != pIt, NV_FALSE); + + pIt->pValue = pIt->pNext; + + if (NULL == pIt->pNext) + return NV_FALSE; + + if (pIt->pNext == pIt->pLast) + pIt->pNext = NULL; + else + pIt->pNext = multimapNextItem_IMPL(pIt->pMultimap, pIt->pNext); + + return NV_TRUE; +} diff --git a/src/nvidia/src/libraries/containers/queue.c b/src/nvidia/src/libraries/containers/queue.c new file mode 100644 index 000000000..9cb681e9a --- /dev/null +++ b/src/nvidia/src/libraries/containers/queue.c @@ -0,0 +1,299 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "containers/queue.h" + +#define MEM_RD64(a) ((NvLength) (*(volatile NvU64 *)(a))) +#define MEM_WR64(a, d) do { *(volatile NvU64 *)(a) = (NvU64)(d); } while (0) + +#define MEM_WR(a, d) portMemCopy((a), sizeof(*(a)), &(d), sizeof(d)) +#define MEM_RD(v, a) portMemCopy(&(v), sizeof(v), (a), sizeof(*(a))) + +static +NV_STATUS circularQueueInitCommon +( + Queue *pQueue, + void *pData, + NvLength capacity, + PORT_MEM_ALLOCATOR *pAllocator, + NvLength msgSize +) +{ + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_ERR_INVALID_ARGUMENT); + + MEM_WR(&pQueue->pData, pData); + MEM_WR(&pQueue->pAllocator, pAllocator); + MEM_WR64(&pQueue->msgSize, msgSize); + MEM_WR64(&pQueue->capacity, capacity); + MEM_WR64(&pQueue->getIdx, 0); + MEM_WR64(&pQueue->putIdx, 0); + + return NV_OK; +} + +static +NvLength queueGetCount(Queue *pQueue) +{ + NvLength get = MEM_RD64(&pQueue->getIdx); + NvLength put = MEM_RD64(&pQueue->putIdx); + + if (put >= get) + { + return put - get; + } + else + { + return put + MEM_RD64(&pQueue->capacity) - get; + } +} + +static +void managedCopyData(NvLength msgSize, + NvLength opIdx, + QueueContext *pCtx, + void *pClientData, + NvLength count, + NvBool bCopyIn) +{ + NvLength size = msgSize * count; + void *pQueueData = (NvU8 *)pCtx->pData + (opIdx * msgSize); + void *src = bCopyIn ? pClientData : pQueueData; + void *dst = bCopyIn ? pQueueData : pClientData; + + portMemCopy(dst, size, src, size); +} + +NV_STATUS circularQueueInit_IMPL +( + Queue *pQueue, + PORT_MEM_ALLOCATOR *pAllocator, + NvLength capacity, + NvLength msgSize +) +{ + void *pData = NULL; + + // One element is wasted as no separate count/full/empty state + // is kept - only indices. + // Managed queue, can hide this due to owning the buffer and + // preserve original queue semantics. + capacity += 1; + + NV_ASSERT_OR_RETURN(pAllocator != NULL, NV_ERR_INVALID_ARGUMENT); + + pData = PORT_ALLOC(pAllocator, capacity * msgSize); + if (pData == NULL) + return NV_ERR_NO_MEMORY; + + return circularQueueInitCommon(pQueue, pData, capacity, pAllocator, msgSize); +} + +NV_STATUS circularQueueInitNonManaged_IMPL +( + Queue *pQueue, + NvLength capacity, + NvLength msgSize +) +{ + return circularQueueInitCommon(pQueue, NULL /*pData*/, capacity, NULL /*pAllocator*/, msgSize); +} + +void circularQueueDestroy_IMPL(Queue *pQueue) +{ + PORT_MEM_ALLOCATOR *pAllocator; + + NV_ASSERT_OR_RETURN_VOID(NULL != pQueue); + + MEM_WR64(&pQueue->capacity, 1); + MEM_WR64(&pQueue->getIdx, 0); + MEM_WR64(&pQueue->putIdx, 0); + MEM_RD(pAllocator, &pQueue->pAllocator); + + if (pAllocator) + PORT_FREE(pQueue->pAllocator, pQueue->pData); +} + +NvLength circularQueueCapacity_IMPL(Queue *pQueue) +{ + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + return MEM_RD64(&pQueue->capacity) - 1; +} + +NvLength circularQueueCount_IMPL(Queue *pQueue) +{ + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + return queueGetCount(pQueue); +} + +NvBool circularQueueIsEmpty_IMPL(Queue *pQueue) +{ + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + return queueGetCount(pQueue) == 0; +} + +NvLength circularQueuePushNonManaged_IMPL +( + Queue *pQueue, + QueueContext *pCtx, + void* pElements, + NvLength numElements +) +{ + void *src; + NvLength cntLimit = 0; + NvLength elemToCpy, srcSize; + NvLength putIdx; + NvLength msgSize; + NvLength capacity; + + NV_ASSERT_OR_RETURN(NULL != pQueue, 0); + + putIdx = MEM_RD64(&pQueue->putIdx); + msgSize = MEM_RD64(&pQueue->msgSize); + capacity = MEM_RD64(&pQueue->capacity); + + // Calculate the elements to copy + cntLimit = capacity - queueGetCount(pQueue) - 1; + if (numElements > cntLimit) + { + numElements = cntLimit; + } + + src = pElements; + if (numElements > 0) + { + NvLength remainingElemToCpy = numElements; + + // We need a max of 2 copies to take care of wrapAround case. See if we have a wrap around + if ((putIdx + numElements) > capacity) + { + // do the extra copy here + elemToCpy = capacity - putIdx; + srcSize = msgSize * elemToCpy; + + pCtx->pCopyData(msgSize, putIdx, pCtx, src, elemToCpy, NV_TRUE /*bCopyIn*/); + + // Update variables for next copy + remainingElemToCpy -= elemToCpy; + src = (void *)((NvU8 *)src + srcSize); + + putIdx = 0; + } + + NV_ASSERT(remainingElemToCpy <= capacity - putIdx); + + pCtx->pCopyData(msgSize, putIdx, pCtx, src, remainingElemToCpy, NV_TRUE /*bCopyIn*/); + + // The data must land before index update. + portAtomicMemoryFenceStore(); + MEM_WR64(&pQueue->putIdx, (putIdx + remainingElemToCpy) % capacity); + } + + return numElements; +} + +NvLength circularQueuePush_IMPL +( + Queue *pQueue, + void* pElements, + NvLength numElements +) +{ + QueueContext ctx = {0}; + + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_FALSE); + NV_ASSERT_OR_RETURN(pQueue->pAllocator != NULL, NV_FALSE); + + ctx.pCopyData = managedCopyData; + ctx.pData = pQueue->pData; + + return circularQueuePushNonManaged_IMPL(pQueue, &ctx, pElements, numElements); +} + +void* circularQueuePeek_IMPL(Queue *pQueue) +{ + void *top; + + NV_ASSERT_OR_RETURN(pQueue != NULL, 0); + NV_ASSERT_OR_RETURN(pQueue->pAllocator != NULL, 0); + + if (queueGetCount(pQueue) == 0) return NULL; + top = (void*)((NvU8*)pQueue->pData + pQueue->getIdx * pQueue->msgSize); + return top; +} + +void circularQueuePop_IMPL(Queue *pQueue) +{ + NvLength getIdx; + NvLength capacity; + + NV_ASSERT_OR_RETURN_VOID(NULL != pQueue); + + getIdx = MEM_RD64(&pQueue->getIdx); + capacity = MEM_RD64(&pQueue->capacity); + + if (queueGetCount(pQueue) > 0) + { + MEM_WR64(&pQueue->getIdx, (getIdx + 1) % capacity); + } +} + +NvBool circularQueuePopAndCopyNonManaged_IMPL(Queue *pQueue, QueueContext *pCtx, void *pCopyTo) +{ + NvLength capacity; + NvLength msgSize; + + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_FALSE); + + capacity = MEM_RD64(&pQueue->capacity); + msgSize = MEM_RD64(&pQueue->msgSize); + + if (queueGetCount(pQueue) > 0) + { + NvLength getIdx = MEM_RD64(&pQueue->getIdx); + pCtx->pCopyData(msgSize, getIdx, pCtx, pCopyTo, 1, NV_FALSE /*bCopyIn*/); + + // Update of index can't happen before we read all the data. + portAtomicMemoryFenceLoad(); + + MEM_WR64(&pQueue->getIdx, (getIdx + 1) % capacity); + + return NV_TRUE; + } + return NV_FALSE; +} + +NvBool circularQueuePopAndCopy_IMPL(Queue *pQueue, void *pCopyTo) +{ + QueueContext ctx = {0}; + + NV_ASSERT_OR_RETURN(pQueue != NULL, NV_FALSE); + NV_ASSERT_OR_RETURN(pQueue->pAllocator != NULL, NV_FALSE); + + ctx.pCopyData = managedCopyData; + ctx.pData = pQueue->pData; + + return circularQueuePopAndCopyNonManaged_IMPL(pQueue, &ctx, pCopyTo); +} diff --git a/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c b/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c new file mode 100644 index 000000000..45d776fc5 --- /dev/null +++ b/src/nvidia/src/libraries/eventbuffer/eventbufferproducer.c @@ -0,0 +1,308 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "eventbufferproducer.h" +#include "nvport/nvport.h" + +// +// This file contains generic event buffer producer implementation for adding variable length data +// +// Data format: +// +// Event Record buffer holds fixed size records +// +// |---------|---------|---------|---------|...|---------| +// | record1 | record2 | record3 | record4 |...| recordn | +// |---------|---------|---------|---------|...|---------| +// +// Variable length data buffer: +// The fixed event record can optionally contain a pointer to variable length data. +// This buffer stores the varlength data that doesn't fit in the fixed size records. +// +// |------------|--------|...|---------| +// | data2 | data4 |...| data n | +// |------------|--------|...|---------| +// + +static NV_EVENT_BUFFER_RECORD* _eventBufferGetFreeRecord(EVENT_BUFFER_PRODUCER_INFO *); +static void _eventBufferAddVardata(EVENT_BUFFER_PRODUCER_INFO*, NvP64, NvU32, NV_EVENT_BUFFER_RECORD_HEADER*); +static void _eventBufferUpdateRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO*); +static void _eventBufferUpdateVarRemaingSize(EVENT_BUFFER_PRODUCER_INFO* info); + +void +eventBufferInitRecordBuffer +( + EVENT_BUFFER_PRODUCER_INFO *info, + NV_EVENT_BUFFER_HEADER* pHeader, + NvP64 recordBuffAddr, + NvU32 recordSize, + NvU32 recordCount, + NvU32 bufferSize, + NvU32 notificationThreshold +) +{ + RECORD_BUFFER_INFO* pRecordBuffer = &info->recordBuffer; + pRecordBuffer->pHeader = pHeader; + pRecordBuffer->recordBuffAddr = recordBuffAddr; + pRecordBuffer->recordSize = recordSize; + pRecordBuffer->totalRecordCount = recordCount; + pRecordBuffer->bufferSize = bufferSize; + pRecordBuffer->notificationThreshold = notificationThreshold; +} + +void +eventBufferInitVardataBuffer +( + EVENT_BUFFER_PRODUCER_INFO *info, + NvP64 vardataBuffAddr, + NvU32 bufferSize, + NvU32 notificationThreshold +) +{ + VARDATA_BUFFER_INFO* pVardataBuffer = &info->vardataBuffer; + pVardataBuffer->vardataBuffAddr = vardataBuffAddr; + pVardataBuffer->bufferSize = bufferSize; + pVardataBuffer->notificationThreshold = notificationThreshold; + pVardataBuffer->get = 0; + pVardataBuffer->put = 0; + pVardataBuffer->remainingSize = bufferSize; +} + +void +eventBufferInitNotificationHandle(EVENT_BUFFER_PRODUCER_INFO *info, NvP64 notificationHandle) +{ + info->notificationHandle = notificationHandle; +} + +void +eventBufferSetEnable(EVENT_BUFFER_PRODUCER_INFO *info, NvBool isEnabled) +{ + info->isEnabled = isEnabled; +} + +void +eventBufferSetKeepNewest(EVENT_BUFFER_PRODUCER_INFO *info,NvBool isKeepNewest) +{ + info->isKeepNewest = isKeepNewest; +} + +void +eventBufferUpdateRecordBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get) +{ + RECORD_BUFFER_INFO* pRecordBuffer = &info->recordBuffer; + pRecordBuffer->pHeader->recordGet = get; + + // used for notification + _eventBufferUpdateRecordBufferCount(info); + + // dropCounts get reset on every updateGet call + pRecordBuffer->pHeader->recordDropcount = 0; + pRecordBuffer->pHeader->vardataDropcount = 0; + +} + +void +_eventBufferUpdateRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO *info) +{ + RECORD_BUFFER_INFO* pRecordBuffer = &info->recordBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = info->recordBuffer.pHeader; + + if (pHeader->recordGet <= pHeader->recordPut) + pHeader->recordCount = (pHeader->recordPut - pHeader->recordGet); + else + pHeader->recordCount = pHeader->recordPut + (pRecordBuffer->totalRecordCount - pHeader->recordGet); +} + +void +eventBufferUpdateVardataBufferGet(EVENT_BUFFER_PRODUCER_INFO *info, NvU32 get) +{ + VARDATA_BUFFER_INFO* pVardataBuffer = &info->vardataBuffer; + pVardataBuffer->get = get; + + _eventBufferUpdateVarRemaingSize(info); +} + +NvU32 +eventBufferGetRecordBufferCount(EVENT_BUFFER_PRODUCER_INFO *info) +{ + return info->recordBuffer.totalRecordCount; +} + +NvU32 +eventBufferGetVardataBufferCount(EVENT_BUFFER_PRODUCER_INFO *info) +{ + return info->vardataBuffer.bufferSize; +} + +// +// eventBufferProducerAddEvent +// +// Adds an event to an event buffer +// This function is called after acquiring correct locks (depending on which module includes it) +// and bound checks for input parameters +// eventType : for RM this would be either 2080 subdevice events or 0000 system events +// eventSubtype: optional +// payloadSize and vardataSize must be 64 bit aligned +// +void +eventBufferProducerAddEvent +( + EVENT_BUFFER_PRODUCER_INFO *info, + NvU16 eventType, + NvU16 eventSubtype, + EVENT_BUFFER_PRODUCER_DATA* pData +) +{ + NV_EVENT_BUFFER_RECORD *record; + + if (info->isEnabled) + { + record = _eventBufferGetFreeRecord(info); + if (record) + { + record->recordHeader.type = eventType; + record->recordHeader.subtype = eventSubtype; + + if (pData->payloadSize) + portMemCopy(record->inlinePayload, pData->payloadSize, + NvP64_VALUE(pData->pPayload), pData->payloadSize); + + _eventBufferAddVardata(info, pData->pVardata, pData->vardataSize, &record->recordHeader); + } + } +} + +NV_EVENT_BUFFER_RECORD* +_eventBufferGetFreeRecord(EVENT_BUFFER_PRODUCER_INFO *info) +{ + RECORD_BUFFER_INFO* pRecInfo = &info->recordBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = pRecInfo->pHeader; + NvU32 recordOffset = 0; + NV_EVENT_BUFFER_RECORD* pFreeRecord = NULL; + + NvU32 putNext = pHeader->recordPut + 1; + + if (putNext == pRecInfo->totalRecordCount) + putNext = 0; + + if ((!info->isKeepNewest) && (putNext == pHeader->recordGet)) + { + pHeader->recordDropcount++; + } + else + { + recordOffset = pHeader->recordPut * pRecInfo->recordSize; + pFreeRecord = (NV_EVENT_BUFFER_RECORD*)((NvUPtr)pRecInfo->recordBuffAddr + recordOffset); + + pHeader->recordCount++; + pHeader->recordPut = putNext; + } + return pFreeRecord; +} + +void +_eventBufferAddVardata +( + EVENT_BUFFER_PRODUCER_INFO *info, + NvP64 data, + NvU32 size, + NV_EVENT_BUFFER_RECORD_HEADER* recordHeader +) +{ + VARDATA_BUFFER_INFO *pVarInfo = &info->vardataBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = info->recordBuffer.pHeader; + NvU32 pVardataOffset; + NvU32 alignedSize = NV_ALIGN_UP(size, NV_EVENT_VARDATA_GRANULARITY); + NvU32 vardataOffsetEnd = pVarInfo->put + alignedSize; + + if (vardataOffsetEnd <= pVarInfo->bufferSize) + { + if ((!info->isKeepNewest) && (pVarInfo->remainingSize < alignedSize)) + goto skip; + + pVardataOffset = pVarInfo->put; + recordHeader->varData = vardataOffsetEnd; + } + else + { + // wrap-around; the effective vardataPut=0, vardataOffsetEnd=size + vardataOffsetEnd = 0 + alignedSize; + if ((!info->isKeepNewest) && (pVarInfo->get <= vardataOffsetEnd)) + goto skip; + + recordHeader->varData = vardataOffsetEnd | NV_EVENT_VARDATA_START_OFFSET_ZERO; + pVardataOffset = 0; + } + + if(size) + { + portMemCopy((void*)((NvUPtr)pVarInfo->vardataBuffAddr + pVardataOffset), size, NvP64_VALUE(data), size); + + if (alignedSize != size) + { + pVardataOffset += size; + portMemSet((void*)((NvUPtr)pVarInfo->vardataBuffAddr + pVardataOffset), 0, (alignedSize - size)); + } + } + + pVarInfo->put = vardataOffsetEnd; + _eventBufferUpdateVarRemaingSize(info); + return; + +skip: + recordHeader->varData = pVarInfo->put; + pHeader->vardataDropcount += 1; +} + +void +_eventBufferUpdateVarRemaingSize(EVENT_BUFFER_PRODUCER_INFO* info) +{ + VARDATA_BUFFER_INFO *pVarInfo = &info->vardataBuffer; + + if (!info->isKeepNewest) + { + if (pVarInfo->get <= pVarInfo->put) + pVarInfo->remainingSize = pVarInfo->get + (pVarInfo->bufferSize - pVarInfo->put); + else + pVarInfo->remainingSize = pVarInfo->get - pVarInfo->put; + } +} + +NvBool +eventBufferIsNotifyThresholdMet(EVENT_BUFFER_PRODUCER_INFO* info) +{ + VARDATA_BUFFER_INFO *pVarInfo = &info->vardataBuffer; + RECORD_BUFFER_INFO* pRecInfo = &info->recordBuffer; + NV_EVENT_BUFFER_HEADER* pHeader = pRecInfo->pHeader; + + if (!info->isKeepNewest) + { + if (((pRecInfo->totalRecordCount - pHeader->recordCount) <= pRecInfo->notificationThreshold) || + (pVarInfo->remainingSize <= pVarInfo->notificationThreshold)) + { + return NV_TRUE; + } + } + return NV_FALSE; +} + diff --git a/src/nvidia/src/libraries/ioaccess/ioaccess.c b/src/nvidia/src/libraries/ioaccess/ioaccess.c new file mode 100644 index 000000000..a05f00aaf --- /dev/null +++ b/src/nvidia/src/libraries/ioaccess/ioaccess.c @@ -0,0 +1,146 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ioaccess/ioaccess.h" +#include "utils/nvprintf.h" +#include "nvport/nvport.h" + +/*! + * @brief: Allocate and initialize an IO_APERTURE instance. + * + * @param[out] ppAperture pointer to the new IO_APERTURE. + * @param[in] pParentAperture pointer to the parent of the new IO_APERTURE. + * @param[in] pDevice pointer to IO_DEVICE of the APERTURE. + * @param[in] offset offset from the parent APERTURE's baseAddress. + * @param[in] length length of the APERTURE. + * + * @return NV_OK upon success + * NV_ERR* otherwise. + */ +NV_STATUS +ioaccessCreateIOAperture +( + IO_APERTURE **ppAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +) +{ + NV_STATUS status = NV_OK; + IO_APERTURE *pAperture = portMemAllocNonPaged(sizeof(IO_APERTURE)); + + if (pAperture == NULL) + { + return NV_ERR_NO_MEMORY; + } + + portMemSet(pAperture, 0, sizeof(IO_APERTURE)); + + status = ioaccessInitIOAperture(pAperture, pParentAperture, pDevice, offset, length); + if (status != NV_OK) + { + portMemFree(pAperture); + } + else + { + *ppAperture = pAperture; + } + + return status; +} + + +/*! + * Initialize an IO_APERTURE instance. This enables initialization for derived IO_APERTURE instances + * that are not allocated via CreateIOAperture. + * + * @param[in,out] pAperture pointer to IO_APERTURE instance to be initialized. + * @param[in] pParentAperture pointer to parent of the new IO_APERTURE. + * @param[in] pDevice pointer to IO_DEVICE of the APERTURE. + * @param[in] offset offset from the parent APERTURE's baseAddress. + * @param[in] length length of the APERTURE. + * + * @return NV_OK when inputs are valid. + */ +NV_STATUS +ioaccessInitIOAperture +( + IO_APERTURE *pAperture, + IO_APERTURE *pParentAperture, + IO_DEVICE *pDevice, + NvU32 offset, + NvU32 length +) +{ + if (pAperture == NULL) + { + return NV_ERR_INVALID_ARGUMENT; + } + + // + // Aperture's IO device can't be set if both the parent aperture and IO device + // input arguments are NULL. + // + if ((pDevice == NULL) && (pParentAperture == NULL)) + { + return NV_ERR_INVALID_ARGUMENT; + } + + if (pDevice != NULL) + { + pAperture->pDevice = pDevice; + } + + if (pParentAperture != NULL) + { + pAperture->pDevice = pParentAperture->pDevice; + pAperture->baseAddress = pParentAperture->baseAddress; + + // Check if the child Aperture strides beyond the parent's boundary. + if ((length + offset) > pParentAperture->length) + { + NV_PRINTF(LEVEL_WARNING, + "Child aperture crosses parent's boundary, length %u offset %u, Parent's length %u\n", + length, offset, pParentAperture->length); + } + } + else + { + pAperture->baseAddress = 0; + } + + pAperture->baseAddress += offset; + pAperture->length = length; + + return NV_OK; +} + +void +ioaccessDestroyIOAperture +( + IO_APERTURE *pAperture +) +{ + portMemFree(pAperture); +} diff --git a/src/nvidia/src/libraries/mmu/gmmu_fmt.c b/src/nvidia/src/libraries/mmu/gmmu_fmt.c new file mode 100644 index 000000000..6f438adf1 --- /dev/null +++ b/src/nvidia/src/libraries/mmu/gmmu_fmt.c @@ -0,0 +1,271 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mmu/gmmu_fmt.h" + +#if PORT_IS_CHECKED_BUILD +#define GMMU_DBG_CHECKS +#endif + +const NvU32 g_gmmuFmtVersions[GMMU_FMT_MAX_VERSION_COUNT] = +{ + GMMU_FMT_VERSION_1, + GMMU_FMT_VERSION_2, +}; + +const NvU32 g_gmmuFmtBigPageShifts[GMMU_FMT_MAX_BIG_PAGE_SIZES] = +{ + 16, + 17, +}; + +const GMMU_FMT_PDE* gmmuFmtGetPde +( + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevel, + const NvU32 subLevel +) +{ + switch (pLevel->numSubLevels) + { + case 0: + return NULL; + case 1: + return pFmt->pPde; + default: + NV_ASSERT_OR_RETURN(subLevel < MMU_FMT_MAX_SUB_LEVELS, NULL); + return &pFmt->pPdeMulti->subLevels[subLevel]; + } + return NULL; +} + +NvBool +gmmuFmtEntryIsPte +( + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevel, + const NvU8 *pEntry +) +{ + const NvBool bPageTable = pLevel->bPageTable; + const NvBool bPageDir = pLevel->numSubLevels > 0; + if (bPageTable && bPageDir) + { + return nvFieldGetBool(&pFmt->pPte->fldValid, pEntry); + } + else if (bPageTable) + { + return NV_TRUE; + } + else + { + NV_ASSERT(bPageDir); + return NV_FALSE; + } +} + +const GMMU_FIELD_ADDRESS * +gmmuFmtPdePhysAddrFld +( + const GMMU_FMT_PDE *pPde, + const GMMU_APERTURE aperture +) +{ + { + switch (aperture) + { + case GMMU_APERTURE_SYS_COH: + case GMMU_APERTURE_SYS_NONCOH: + return &pPde->fldAddrSysmem; + case GMMU_APERTURE_VIDEO: + return &pPde->fldAddrVidmem; + default: + NV_ASSERT(0); + return NULL; + } + } +} + +const GMMU_FIELD_ADDRESS * +gmmuFmtPtePhysAddrFld +( + const GMMU_FMT_PTE *pPte, + const GMMU_APERTURE aperture +) +{ + switch (aperture) + { + case GMMU_APERTURE_SYS_COH: + case GMMU_APERTURE_SYS_NONCOH: + return &pPte->fldAddrSysmem; + case GMMU_APERTURE_PEER: + return &pPte->fldAddrPeer; + case GMMU_APERTURE_VIDEO: + return &pPte->fldAddrVidmem; + default: + NV_ASSERT(0); + return NULL; + } +} + +void gmmuFmtInitPteCompTags +( + const GMMU_FMT *pFmt, + const MMU_FMT_LEVEL *pLevel, + const GMMU_COMPR_INFO *pCompr, + const NvU64 surfOffset, + const NvU32 startPteIndex, + const NvU32 numPages, + NvU8 *pEntries +) +{ + NvU32 i, compPageIndex, endCompPageIndex; + NvU64 offset = surfOffset; + const NvU32 pageSize = NvU64_LO32(mmuFmtLevelPageSize(pLevel)); + const NV_FIELD_DESC32 *pCtlSubIndexFld = &pFmt->pPte->fldCompTagSubIndex; + NvU32 ctlSubIndexMask = 0; + NvU32 ctlSubIndexShift = 0; + + // + // Surface offset must be aligned to the page size. + // Otherwise we're trying to map part-way into the physical pages. + // + NV_ASSERT(0 == (surfOffset & (pageSize - 1))); + + // + // On GM20X the MSB bit of the CTL field selects which half of a 128KB + // compression page is used when page size is <= 64KB. + // This bit is generalized in the format description as a separate + // CTL sub index field. + // + // If the field is valid, calculate the mask and shift that will be + // applied to the surface offset to select the sub index. + // + // TODO: This should be precomputed, but need to update APIs. + // + if (nvFieldIsValid32(pCtlSubIndexFld)) + { + ctlSubIndexMask = pCtlSubIndexFld->maskPos >> pCtlSubIndexFld->shift; + ctlSubIndexShift = pCompr->compPageShift - + nvPopCount32(pCtlSubIndexFld->maskPos); + } + // + // If not supported (pre-GM20X) HW takes the CTL sub index + // from the virtual address instead. This adds a restriction + // that virtual addresses must be aligned to compression + // page size when compression is used. + // + // This is further complicated with the use of Tiled Pools/Resources + // where two or more virtual mappings alias to the same compressed surface + // without control over the alignment (application controlled). + // For this case the only pre-GM20X option is to assign each + // 64KB physical page an entire 128KB compression page, wasting + // half of each comptagline. + // This implies that the aliased virtual mappings cannot be + // used consistently *at the same time* since the views may not use the + // same comptagline half. + // Therefore each view requires a surface clear when it takes ownership + // of the memory. + // Note this double-comptagline assignment is not handled in this + // function. See CNvLPagerFermi::overrideCompTagLineInfo for details. + // + // If this assertion fails then the alignment is not being + // enforced properly higher up in the driver stack. + // This API cannot fail so there is no corrective action, + // but visual corruption will likely occur. + // +#if defined(GMMU_DBG_CHECKS) + else + { + const NvU64 comprPageMask = NVBIT64(pCompr->compPageShift) - 1; + const NvU64 virtCtlOffset = (startPteIndex * pageSize) & comprPageMask; + const NvU64 surfCtlOffset = surfOffset & comprPageMask; + NV_ASSERT(virtCtlOffset == surfCtlOffset); + } +#endif + + // + // The following table is an example of how comptaglines are assigned + // to a surface with N 64KB pages on HW with 128KB compression page size. + // + // The compPageIndex variables index compression pages (e.g. 128KB chunks) + // starting from the start of the surface (0). + // The below factor of (compPageIndex * 2) derives from + // 128KB compression page size / 64KB page size. + // + // Notice that the compPageIndex range allows for any contiguous subset + // of the surface to be compressed. Normally the entire surface + // is compressed but the clamping allows partial compression as a + // fallback (when comptags fragment) and for verification purposes. + // + // +---------------------------+---------------------+---------------+ + // | Surface Page Index (64KB) | CompTagLine (128KB) | CTL Sub Index | + // +---------------------------+---------------------+---------------+ + // | 0 | N/A | N/A | + // | 1 | N/A | N/A | + // | ... | N/A | N/A | + // | compPageIndexLo * 2 + 0 | compTagLineMin + 0 | 0 | + // | compPageIndexLo * 2 + 1 | compTagLineMin + 0 | 1 | + // | compPageIndexLo * 2 + 2 | compTagLineMin + 1 | 0 | + // | compPageIndexLo * 2 + 3 | compTagLineMin + 1 | 1 | + // | ... | ... | ... | + // | compPageIndexHi * 2 - 3 | compTagLineMax - 1 | 0 | + // | compPageIndexHi * 2 - 2 | compTagLineMax - 1 | 1 | + // | compPageIndexHi * 2 - 1 | compTagLineMax - 0 | 0 | + // | compPageIndexHi * 2 - 0 | compTagLineMax - 0 | 1 | + // | ... | N/A | N/A | + // | N - 2 | N/A | N/A | + // | N - 1 | N/A | N/A | + // +---------------------------+---------------------+---------------+ + // + // compTagLineMax = compTagLineMin + (compPageIndexHi - compPageIndexLo) + // + for (i = 0; i < numPages; ++i) + { + // 2MB page require 0x20 comptags to be contiguous. so check for endPage limit too. + compPageIndex = (NvU32)(offset >> pCompr->compPageShift); + endCompPageIndex = (NvU32)((offset + pageSize -1)>> pCompr->compPageShift); + if ((compPageIndex >= pCompr->compPageIndexLo) && + (compPageIndex <= pCompr->compPageIndexHi) && + (endCompPageIndex >= pCompr->compPageIndexLo) && + (endCompPageIndex <= pCompr->compPageIndexHi)) + { + NvU8 *pPte = pEntries + (i * pLevel->entrySize); + NvU32 compTagLine = (compPageIndex - pCompr->compPageIndexLo) * pCompr->compTagLineMultiplier + + pCompr->compTagLineMin; + + nvFieldSet32(&pFmt->pPte->fldKind, pCompr->compressedKind, pPte); + nvFieldSet32(&pFmt->pPte->fldCompTagLine, compTagLine, pPte); + + // Calculate the CTL sub index if supported. + if (0 != ctlSubIndexMask) + { + NvU32 ctlSubIndex = (NvU32)(offset >> ctlSubIndexShift) & + ctlSubIndexMask; + nvFieldSet32(pCtlSubIndexFld, ctlSubIndex, pPte); + } + } + + offset += pageSize; + } +} diff --git a/src/nvidia/src/libraries/mmu/mmu_fmt.c b/src/nvidia/src/libraries/mmu/mmu_fmt.c new file mode 100644 index 000000000..d8c9f68de --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_fmt.c @@ -0,0 +1,134 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "mmu/mmu_fmt.h" + +NvU64 +mmuFmtAllPageSizes(const MMU_FMT_LEVEL *pLevel) +{ + NvU32 i; + NvU64 mask = 0; + if (pLevel->bPageTable) + { + mask |= mmuFmtLevelPageSize(pLevel); + } + for (i = 0; i < pLevel->numSubLevels; ++i) + { + mask |= mmuFmtAllPageSizes(pLevel->subLevels + i); + } + return mask; +} + +NvU64 +mmuFmtAllLevelCoverages(const MMU_FMT_LEVEL *pLevel) +{ + NvU32 i; + NvU64 mask = mmuFmtLevelPageSize(pLevel); + for (i = 0; i < pLevel->numSubLevels; ++i) + { + mask |= mmuFmtAllLevelCoverages(pLevel->subLevels + i); + } + return mask; +} + +const MMU_FMT_LEVEL * +mmuFmtFindLevelWithPageShift +( + const MMU_FMT_LEVEL *pLevel, + const NvU64 pageShift +) +{ + NvU32 i; + if (pLevel->virtAddrBitLo == pageShift) + { + return pLevel; + } + for (i = 0; i < pLevel->numSubLevels; ++i) + { + const MMU_FMT_LEVEL *pRes = + mmuFmtFindLevelWithPageShift(pLevel->subLevels + i, pageShift); + if (NULL != pRes) + { + return pRes; + } + } + return NULL; +} + +const MMU_FMT_LEVEL * +mmuFmtFindLevelParent +( + const MMU_FMT_LEVEL *pRoot, + const MMU_FMT_LEVEL *pLevel, + NvU32 *pSubLevel +) +{ + NvU32 i; + for (i = 0; i < pRoot->numSubLevels; ++i) + { + const MMU_FMT_LEVEL *pRes; + if ((pRoot->subLevels + i) == pLevel) + { + if (NULL != pSubLevel) + { + *pSubLevel = i; + } + pRes = pRoot; + } + else + { + pRes = mmuFmtFindLevelParent(pRoot->subLevels + i, pLevel, pSubLevel); + } + if (NULL != pRes) + { + return pRes; + } + } + return NULL; +} + +const MMU_FMT_LEVEL * +mmuFmtGetNextLevel +( + const MMU_FMT_LEVEL *pLevelFmt, + const MMU_FMT_LEVEL *pTargetFmt +) +{ + if (pLevelFmt != pTargetFmt) + { + NvU32 subLevel = 0; + if (1 == pLevelFmt->numSubLevels) + { + return pLevelFmt->subLevels; + } + for (subLevel = 0; subLevel < pLevelFmt->numSubLevels; ++subLevel) + { + const MMU_FMT_LEVEL *pSubLevelFmt = pLevelFmt->subLevels + subLevel; + if (pSubLevelFmt == pTargetFmt) + { + return pSubLevelFmt; + } + } + } + return NULL; +} diff --git a/src/nvidia/src/libraries/mmu/mmu_walk.c b/src/nvidia/src/libraries/mmu/mmu_walk.c new file mode 100644 index 000000000..51b19ffaf --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_walk.c @@ -0,0 +1,1820 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file + * @brief Describes the structures and interfaces used to walk N level page tables + */ + +/*--------------------------------Includes------------------------------------*/ +#if defined(SRT_BUILD) +#include "shrdebug.h" +#else +#include "os/os.h" +#endif +#include "nvport/nvport.h" +#include "nvctassert.h" +#include "mmu_walk_private.h" + +/*--------------------------Static Function Prototypes------------------------*/ +static NV_STATUS +_mmuWalkLevelInit(const MMU_WALK *pWalk, MMU_WALK_LEVEL *pParent, + const MMU_FMT_LEVEL *pLevelFmt, MMU_WALK_LEVEL *pLevel); +static void +_mmuWalkLevelDestroy(const MMU_WALK *pWalk, MMU_WALK_LEVEL *pLevel); +static NV_STATUS +_mmuWalkLevelInstAcquire(const MMU_WALK *pWalk, MMU_WALK_LEVEL *pLevel, + const NvU64 vaLo, const NvU64 vaHi, const NvBool bTarget, + const NvBool bRelease, const NvBool bCommit, + NvBool *pBChanged, MMU_WALK_LEVEL_INST **ppLevelInst, + const NvBool bInitNv4k); +static void +_mmuWalkLevelInstRelease(const MMU_WALK *pWalk, MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst); +static NV_STATUS NV_NOINLINE +_mmuWalkPdeAcquire(const MMU_WALK *pWalk, const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, MMU_WALK_LEVEL_INST *pLevelInst, + const NvU32 entryIndex, const NvU32 subLevel, + const NvU64 vaLo, const NvU64 vaHi, + MMU_WALK_LEVEL_INST *pSubLevelInsts[]); +static void NV_NOINLINE +_mmuWalkPdeRelease(const MMU_WALK *pWalk, const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, MMU_WALK_LEVEL_INST *pLevelInst, + const NvU32 entryIndex, const NvU64 entryVaLo); +static NV_STATUS NV_NOINLINE +_mmuWalkResolveSubLevelConflicts(const MMU_WALK *pWalk, const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, MMU_WALK_LEVEL_INST *pSubLevelInsts[], + NvU32 subLevel, NvU64 clippedVaLo, NvU64 clippedVaHi); +static void +_mmuWalkLevelInstancesForceFree(MMU_WALK *pWalk, MMU_WALK_LEVEL *pLevel); + +/* -----------------------------Inline Functions----------------------------- */ +/*! + Returns the @ref MMU_ENTRY_STATE of the entry. + */ +MMU_ENTRY_STATE +mmuWalkGetEntryState(MMU_WALK_LEVEL_INST *pLevelInst, NvU32 entryIndex) +{ + return (MMU_ENTRY_STATE)pLevelInst->pStateTracker[entryIndex].state; +} + +/*----------------------------Public Functions--------------------------------*/ + +NV_STATUS +mmuWalkCreate +( + const MMU_FMT_LEVEL *pRootFmt, + MMU_WALK_USER_CTX *pUserCtx, + const MMU_WALK_CALLBACKS *pCb, + const MMU_WALK_FLAGS flags, + MMU_WALK **ppWalk, + MMU_WALK_MEMDESC *pStagingBuffer +) +{ + NV_STATUS status = NV_OK; + MMU_WALK *pWalk = NULL; + + NV_ASSERT_OR_RETURN(NULL != pRootFmt, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pCb, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != ppWalk, NV_ERR_INVALID_ARGUMENT); + + // Alloc and init walker structure. + pWalk = portMemAllocNonPaged(sizeof(*pWalk)); + status = (pWalk == NULL) ? NV_ERR_NO_MEMORY : NV_OK; + NV_ASSERT_OR_GOTO(NV_OK == status, done); + portMemSet(pWalk, 0, sizeof(*pWalk)); + + pWalk->pUserCtx = pUserCtx; + pWalk->pCb = pCb; + pWalk->flags = flags; + pWalk->pStagingBuffer = pStagingBuffer; + pWalk->bUseStagingBuffer = NV_FALSE; + pWalk->bInvalidateOnReserve = NV_TRUE; + + // Create level hierarchy. + status = _mmuWalkLevelInit(pWalk, NULL, pRootFmt, &pWalk->root); + NV_ASSERT_OR_GOTO(NV_OK == status, done); + + // Commit. + *ppWalk = pWalk; + +done: + if (NV_OK != status) + { + mmuWalkDestroy(pWalk); + } + return status; +} + +void +mmuWalkDestroy +( + MMU_WALK *pWalk +) +{ + if (NULL != pWalk) + { + // Destroy level hierarchy. + _mmuWalkLevelDestroy(pWalk, &pWalk->root); + + // Free walker struct. + portMemFree(pWalk); + } +} + +NV_STATUS +mmuWalkContinue +( + MMU_WALK *pWalk +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +void +mmuWalkCommit +( + MMU_WALK *pWalk +) +{ + // TODO +} + +MMU_WALK_USER_CTX * +mmuWalkGetUserCtx +( + const MMU_WALK *pWalk +) +{ + return pWalk->pUserCtx; +} + +void +mmuWalkSetUserCtx +( + MMU_WALK *pWalk, + MMU_WALK_USER_CTX *pUserCtx +) +{ + pWalk->pUserCtx = pUserCtx; +} + +const MMU_WALK_CALLBACKS * +mmuWalkGetCallbacks +( + const MMU_WALK *pWalk +) +{ + return pWalk->pCb; +} + +void +mmuWalkSetCallbacks +( + MMU_WALK *pWalk, + const MMU_WALK_CALLBACKS *pCb +) +{ + pWalk->pCb = pCb; +} + +void +mmuWalkLevelInstancesForceFree +( + MMU_WALK *pWalk +) +{ + _mmuWalkLevelInstancesForceFree(pWalk, &pWalk->root); +} + +/*----------------------------Private Functions--------------------------------*/ + +const MMU_WALK_LEVEL * +mmuWalkFindLevel +( + const MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt +) +{ + const MMU_WALK_LEVEL *pLevel = &pWalk->root; + while (pLevel->pFmt != pLevelFmt) + { + NvU32 subLevel; + // Single sub-level always continues. + if (1 == pLevel->pFmt->numSubLevels) + { + pLevel = pLevel->subLevels; + continue; + } + // Multi sub-level must pick branch based on target. + for (subLevel = 0; subLevel < pLevel->pFmt->numSubLevels; ++subLevel) + { + if ((pLevel->pFmt->subLevels + subLevel) == pLevelFmt) + { + return pLevel->subLevels + subLevel; + } + } + // Nothing found. + return NULL; + } + return pLevel; +} + +/*! + * @brief This function traverses the topology described by @ref + * MMU_FMT_LEVEL and @ref MMU_DESC_PDE. The @ref MmuOpFunc + * opFunc implements the actions needed to be perfomed at each + * sublevel in the recursion. + * + * @param[in] vaLo The lower end of the Virtual Address range that is + * being processed. + * @param[in] vaHi The upper end of the Virtual Address range that is + * being processed + * + * @return NV_OK if processing this level succeeds. + * Other errors, if not. + */ +NV_STATUS mmuWalkProcessPdes +( + const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst, + NvU64 vaLo, + NvU64 vaHi +) +{ + + if (pWalk->flags.bUseIterative) + { + // Iterative MMU Walker Implementation + NV_STATUS status = NV_OK; + NV_ASSERT_OR_RETURN(pOpParams != NULL, NV_ERR_INVALID_ARGUMENT); + + // Call opFunc inititially to see if we need to walk + status = pOpParams->opFunc(pWalk, + pOpParams, + pLevel, + pLevelInst, + vaLo, + vaHi); + + // + // If NV_ERR_MORE_PROCESSING_REQUIRED is returned above, + // the recursive MMU Walker would have started recursing down, + // so here we kick off the iteration. + // If NV_OK is returned above, the recursive MMU Walker would + // not recurse at all, so return immediately. + // + if (NV_ERR_MORE_PROCESSING_REQUIRED == status) + { + status = NV_OK; + + NvU64 vaLevelBase = mmuFmtLevelVirtAddrLo(pLevel->pFmt, vaLo); + NvU32 entryIndexLo = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaLo); + NvU32 entryIndexHi = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaHi); + NvU32 entryIndex; + NvU32 index; + NvU32 entryIndexFillStart; + NvU32 entryIndexFillEnd; + NvU32 pendingFillCount = 0; + + // Declarations for mmuWalk recursion conversion + MMU_WALK_PROCESS_PDES_ENTRY *pProcessPdeEntry; + MMU_WALK_RELEASE_PDES_ENTRY *pReleasePdeEntry; + PROCESS_PDES_STACK processPdesStack; + RELEASE_PDES_STACK releasePdesStack; + listInit(&processPdesStack, portMemAllocatorGetGlobalNonPaged()); + listInit(&releasePdesStack, portMemAllocatorGetGlobalNonPaged()); + + // + // Walk over each relevant entry (PDE) within this Page Level + // Do one initial loop to kick off iteration + // Add entries in reverse order because processPdesStack is a stack + // + for (entryIndex = entryIndexHi; entryIndex >= entryIndexLo; entryIndex--) + { + pProcessPdeEntry = listPrependNew(&processPdesStack); + if (pProcessPdeEntry == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_ASSERT_OR_GOTO(0, cleanupIter); + } + + // + // The values pushed to the stack must NOT be pointers to variables on the stack + // All of these are simple values or pointers to a variable allocated by a function + // calling the MMU Walker. + // + pProcessPdeEntry->pLevel = pLevel; + pProcessPdeEntry->pLevelInst = pLevelInst; + pProcessPdeEntry->vaLo = vaLo; + pProcessPdeEntry->vaHi = vaHi; + pProcessPdeEntry->vaLevelBase = vaLevelBase; + pProcessPdeEntry->entryIndexHi = entryIndexHi; + pProcessPdeEntry->entryIndex = entryIndex; + + // Prevent underflow because of adding entries in reverse order + if (entryIndex == 0) break; + } + + while ((pProcessPdeEntry = listHead(&processPdesStack)) != NULL) + { + pLevel = pProcessPdeEntry->pLevel; + pLevelInst = pProcessPdeEntry->pLevelInst; + vaLo = pProcessPdeEntry->vaLo; + vaHi = pProcessPdeEntry->vaHi; + vaLevelBase = pProcessPdeEntry->vaLevelBase; + entryIndexHi = pProcessPdeEntry->entryIndexHi; + entryIndex = pProcessPdeEntry->entryIndex; + + listRemove(&processPdesStack, pProcessPdeEntry); + + const NvU64 entryVaLo = mmuFmtEntryIndexVirtAddrLo(pLevel->pFmt, + vaLevelBase, entryIndex); + const NvU64 entryVaHi = mmuFmtEntryIndexVirtAddrHi(pLevel->pFmt, + vaLevelBase, entryIndex); + const NvU64 clippedVaLo = NV_MAX(vaLo, entryVaLo); + const NvU64 clippedVaHi = NV_MIN(vaHi, entryVaHi); + const MMU_ENTRY_STATE currEntryState = mmuWalkGetEntryState(pLevelInst, entryIndex); + NvU32 subLevel = 0; + MMU_WALK_LEVEL_INST *pSubLevelInsts[MMU_FMT_MAX_SUB_LEVELS] = {0}; + + // Optimizations for release operations. + if (pOpParams->bRelease) + { + // Skip this entry if it is neither a PDE nor marked as a hybrid entry. + if ((MMU_ENTRY_STATE_IS_PDE != currEntryState) && + !pLevelInst->pStateTracker[entryIndex].bHybrid) + continue; + } + + // Optimizations for fill operations. + if (pOpParams->bFill) + { + const MMU_FILL_TARGET *pTarget = (const MMU_FILL_TARGET *) pOpParams->pOpCtx; + + if (pendingFillCount == 0) + entryIndexFillStart = entryIndexFillEnd = entryIndex; + + // + // Check if the entire entry's coverage is being filled to + // a constant state. + // + // If this entry is not currently a PDE we can + // apply the fill operation directly + // at this level and avoid "splitting" the PDE. + // + // If this entry is currently a PDE we must + // clear the entries of the lower levels to free + // unused level instances. + // + if ((pTarget->entryState != currEntryState) && + (MMU_ENTRY_STATE_IS_PDE != currEntryState) && + (entryVaLo == clippedVaLo) && + (entryVaHi == clippedVaHi)) + { + entryIndexFillEnd = entryIndex; + pendingFillCount++; + + // Not the last iteration, keep batching.. + if (entryIndex < entryIndexHi) + continue; + } + + if (pendingFillCount != 0) + { + NvU32 progress = 0; + + // Flush pending fills + pWalk->pCb->FillEntries(pWalk->pUserCtx, + pLevel->pFmt, + pLevelInst->pMemDesc, + entryIndexFillStart, + entryIndexFillEnd, + pTarget->fillState, + &progress); + + if (progress != (entryIndexFillEnd - entryIndexFillStart + 1)) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT_OR_GOTO(0, cleanupIter); + } + + for (index = entryIndexFillStart; index <= entryIndexFillEnd; index++) + mmuWalkSetEntryState(pLevelInst, index, pTarget->entryState); + + pendingFillCount = 0; + } + + // Recheck the state after fill. If nothing to do, continue.. + if (pTarget->entryState == mmuWalkGetEntryState(pLevelInst, entryIndex)) + continue; + + } // End of fill optimizations. + + // Determine the sublevel we need to operate on. + status = pOpParams->selectSubLevel(pOpParams->pOpCtx, + pLevel, + &subLevel, + clippedVaLo, + clippedVaHi); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanupIter); + + // + // Allocate the sublevel instances for the current PDE and update the current + // Page Dir (i.e. write the PDE into the Page Dir) if needed. + // + status = _mmuWalkPdeAcquire(pWalk, + pOpParams, + pLevel, + pLevelInst, + entryIndex, + subLevel, + clippedVaLo, + clippedVaHi, + pSubLevelInsts); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanupIter); + + // Release op is done if the target sub-level is absent. + if (pOpParams->bRelease && (NULL == pSubLevelInsts[subLevel])) + { + continue; + } + + // + // Split sparse PDE's range. + // When only a subrange of the original PDE's VA range is being operated + // on we sparsify the remaining range lying outside the operational + // subrange (clippedVaLo to clippedVaHi) + // + if (MMU_ENTRY_STATE_SPARSE == currEntryState) + { + // + // Sparsify the lower part of the VA range that outside the operational + // subrange. + // + if (clippedVaLo > entryVaLo) + { + status = mmuWalkProcessPdes(pWalk, + &g_opParamsSparsify, + pLevel->subLevels + subLevel, + pSubLevelInsts[subLevel], + entryVaLo, + clippedVaLo - 1); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanupIter); + } + + // + // Sparsify the upper part of the VA range that is outside the operational + // subrange. + // + if (clippedVaHi < entryVaHi) + { + status = mmuWalkProcessPdes(pWalk, + &g_opParamsSparsify, + pLevel->subLevels + subLevel, + pSubLevelInsts[subLevel], + clippedVaHi + 1, + entryVaHi); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanupIter); + } + } // Sparse PDE split + + // Resolve potential conflicts in multiple sized page tables + if (pLevel->pFmt->numSubLevels != 1 && + !pOpParams->bIgnoreSubLevelConflicts) + { + status = _mmuWalkResolveSubLevelConflicts(pWalk, + pOpParams, + pLevel, + pSubLevelInsts, + subLevel, + clippedVaLo, + clippedVaHi); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanupIter); + } + + status = pOpParams->opFunc(pWalk, + pOpParams, + pLevel->subLevels + subLevel, + pSubLevelInsts[subLevel], + clippedVaLo, + clippedVaHi); + + if (NV_ERR_MORE_PROCESSING_REQUIRED == status) + { + // + // If NV_ERR_MORE_PROCESSING_REQUIRED is returned above, + // the recursive MMU Walker would have recursed down one + // more level. In this code block, we keep the iteration + // going by doing everything the recursion previously did. + // + status = NV_OK; + pReleasePdeEntry = listPrependNew(&releasePdesStack); + if (pReleasePdeEntry == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_ASSERT_OR_GOTO(0, cleanupIter); + } + + // + // Queue the current level for pdeRelease so that pdeRelease + // can be called AFTER exploring the current level's sublevels. + // + pReleasePdeEntry->pLevel = pLevel; + pReleasePdeEntry->pLevelInst = pLevelInst; + pReleasePdeEntry->entryVaLo = entryVaLo; + pReleasePdeEntry->entryIndexHi = entryIndexHi; + pReleasePdeEntry->entryIndex = entryIndex; + + // + // Here use variables that would be used in the next recursion downwards. + // Calculate new vaLevelBase, entryIndexLo, entryIndexHi, entryIndex + // + vaLevelBase = mmuFmtLevelVirtAddrLo((pLevel->subLevels + subLevel)->pFmt, clippedVaLo); + entryIndexLo = mmuFmtVirtAddrToEntryIndex((pLevel->subLevels + subLevel)->pFmt, clippedVaLo); + entryIndexHi = mmuFmtVirtAddrToEntryIndex((pLevel->subLevels + subLevel)->pFmt, clippedVaHi); + + for (entryIndex = entryIndexHi; entryIndex >= entryIndexLo; entryIndex--) + { + pProcessPdeEntry = listPrependNew(&processPdesStack); + if (pProcessPdeEntry == NULL) + { + status = NV_ERR_NO_MEMORY; + NV_ASSERT_OR_GOTO(0, cleanupIter); + } + + pProcessPdeEntry->pLevel = pLevel->subLevels + subLevel; + pProcessPdeEntry->pLevelInst = pSubLevelInsts[subLevel]; + pProcessPdeEntry->vaLo = clippedVaLo; + pProcessPdeEntry->vaHi = clippedVaHi; + pProcessPdeEntry->vaLevelBase = vaLevelBase; + pProcessPdeEntry->entryIndexHi = entryIndexHi; + pProcessPdeEntry->entryIndex = entryIndex; + + if (entryIndex == 0) break; + } + } + else if (NV_OK == status) + { + // + // If NV_OK is returned above, the recursive MMU Walker would have reached + // the target format level and so reached the base case of its recursion. + // It would then return from recursive function calls an call pdeRelease + // for all levels whose sublevels are done being processed. + // + + // PdeRelease itself immediately since this level does not recurse. + _mmuWalkPdeRelease(pWalk, + pOpParams, + pLevel, + pLevelInst, + entryIndex, + entryVaLo); + + // + // If this is the last processed sublevel of a level, pdeRelease the level. + // Continue doing so for all parent levels. + // + while (entryIndex == entryIndexHi) + { + if ((pReleasePdeEntry = listHead(&releasePdesStack)) != NULL) + { + // Extract variables for the next loop around. + entryIndexHi = pReleasePdeEntry->entryIndexHi; + entryIndex = pReleasePdeEntry->entryIndex; + + _mmuWalkPdeRelease(pWalk, + pOpParams, + pReleasePdeEntry->pLevel, + pReleasePdeEntry->pLevelInst, + pReleasePdeEntry->entryIndex, + pReleasePdeEntry->entryVaLo); + + listRemove(&releasePdesStack, pReleasePdeEntry); + } + else + { + break; + } + } + } + else + { + // Stop processing PDEs if we are in error state. + goto cleanupIter; + } + } // per entry loop + + + if (listHead(&processPdesStack) != NULL) + { + // + // If this assertion fails, it is a result of a programming + // error in the iterative MMU Walker implementation. + // + status = NV_ERR_INVALID_STATE; + NV_ASSERT_OR_GOTO(0, cleanupIter); + } + + // + // Note that if releasePdesStack is not empty at this point, + // we hit an empty sublevel, but we still need to pdeRelease + // the parent sublevels in cleanup below. + // + +// Temporarily change the name of this label to avoid conflicting with other "cleanup" +cleanupIter: + + // + // In the recrusive MMU Walker, when a sublevel failed, that level would pdeRelease, + // return to the parent, and the parent would pdeRelease and return to its parent and so on. + // Here emulate that and pdeRelease all parents. + // + + while ((pReleasePdeEntry = listHead(&releasePdesStack)) != NULL) + { + _mmuWalkPdeRelease(pWalk, + pOpParams, + pReleasePdeEntry->pLevel , + pReleasePdeEntry->pLevelInst, + pReleasePdeEntry->entryIndex, + pReleasePdeEntry->entryVaLo); + + listRemove(&releasePdesStack, pReleasePdeEntry); + } + + listDestroy(&processPdesStack); + listDestroy(&releasePdesStack); + + + } + return status; + } + else + { + // Recursive MMU Walker Implementation + NV_STATUS status = NV_OK; + NvU64 vaLevelBase = mmuFmtLevelVirtAddrLo(pLevel->pFmt, vaLo); + NvU32 entryIndexLo = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaLo); + NvU32 entryIndexHi = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaHi); + NvU32 entryIndex; + NvU32 index; + NvU32 entryIndexFillStart; + NvU32 entryIndexFillEnd; + NvU32 pendingFillCount = 0; + + NV_ASSERT_OR_RETURN(NULL != pOpParams, NV_ERR_INVALID_ARGUMENT); + + // Walk over each relevant entry (PDE) within this Page Level + for (entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++) + { + const NvU64 entryVaLo = mmuFmtEntryIndexVirtAddrLo(pLevel->pFmt, + vaLevelBase, entryIndex); + const NvU64 entryVaHi = mmuFmtEntryIndexVirtAddrHi(pLevel->pFmt, + vaLevelBase, entryIndex); + const NvU64 clippedVaLo = NV_MAX(vaLo, entryVaLo); + const NvU64 clippedVaHi = NV_MIN(vaHi, entryVaHi); + const MMU_ENTRY_STATE currEntryState = mmuWalkGetEntryState(pLevelInst, entryIndex); + NvU32 subLevel = 0; + MMU_WALK_LEVEL_INST *pSubLevelInsts[MMU_FMT_MAX_SUB_LEVELS] = {0}; + + // Optimizations for release operations. + if (pOpParams->bRelease) + { + // Skip this entry if it is neither a PDE nor marked as a hybrid entry. + if ((MMU_ENTRY_STATE_IS_PDE != currEntryState) && + !pLevelInst->pStateTracker[entryIndex].bHybrid) + continue; + } + + // Optimizations for fill operations. + if (pOpParams->bFill) + { + const MMU_FILL_TARGET *pTarget = (const MMU_FILL_TARGET *) pOpParams->pOpCtx; + + if (pendingFillCount == 0) + entryIndexFillStart = entryIndexFillEnd = entryIndex; + + // + // Check if the entire entry's coverage is being filled to + // a constant state. + // + // If this entry is not currently a PDE we can + // apply the fill operation directly + // at this level and avoid "splitting" the PDE. + // + // If this entry is currently a PDE we must + // clear the entries of the lower levels to free + // unused level instances. + // + if ((pTarget->entryState != currEntryState) && + (MMU_ENTRY_STATE_IS_PDE != currEntryState) && + (entryVaLo == clippedVaLo) && + (entryVaHi == clippedVaHi)) + { + entryIndexFillEnd = entryIndex; + pendingFillCount++; + + // Not the last iteration, keep batching.. + if (entryIndex < entryIndexHi) + continue; + } + + if (pendingFillCount != 0) + { + NvU32 progress = 0; + + // Flush pending fills + pWalk->pCb->FillEntries(pWalk->pUserCtx, + pLevel->pFmt, + pLevelInst->pMemDesc, + entryIndexFillStart, + entryIndexFillEnd, + pTarget->fillState, + &progress); + + NV_ASSERT_OR_RETURN( + progress == (entryIndexFillEnd - entryIndexFillStart + 1), + NV_ERR_INVALID_STATE); + + for (index = entryIndexFillStart; index <= entryIndexFillEnd; index++) + mmuWalkSetEntryState(pLevelInst, index, pTarget->entryState); + + pendingFillCount = 0; + } + + // Recheck the state after fill. If nothing to do, continue.. + if (pTarget->entryState == mmuWalkGetEntryState(pLevelInst, entryIndex)) + continue; + + } // End of fill optimizations. + + // Determine the sublevel we need to operate on. + status = pOpParams->selectSubLevel(pOpParams->pOpCtx, + pLevel, + &subLevel, + clippedVaLo, + clippedVaHi); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + + // + // Allocate the sublevel instances for the current PDE and update the current + // Page Dir (i.e. write the PDE into the Page Dir) if needed. + // + status = _mmuWalkPdeAcquire(pWalk, + pOpParams, + pLevel, + pLevelInst, + entryIndex, //PDE index being processed + subLevel, //Sub level processed within the PDE + clippedVaLo, //Low VA for the PDE + clippedVaHi, //High VA for the PDE + pSubLevelInsts); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + + // Release op is done if the target sub-level is absent. + if (pOpParams->bRelease && (NULL == pSubLevelInsts[subLevel])) + { + continue; + } + + // + // Split sparse PDE's range. + // When only a subrange of the original PDE's VA range is being operated + // on we sparsify the remaining range lying outside the operational + // subrange (clippedVaLo to clippedVaHi) + // + if (MMU_ENTRY_STATE_SPARSE == currEntryState) + { + // + // Sparsify the lower part of the VA range that outside the operational + // subrange. + // + if (clippedVaLo > entryVaLo) + { + status = g_opParamsSparsify.opFunc(pWalk, + &g_opParamsSparsify, + pLevel->subLevels + subLevel, + pSubLevelInsts[subLevel], + entryVaLo, + clippedVaLo - 1); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + } + + // + // Sparsify the upper part of the VA range that is outside the operational + // subrange. + // + if (clippedVaHi < entryVaHi) + { + status = g_opParamsSparsify.opFunc(pWalk, + &g_opParamsSparsify, + pLevel->subLevels + subLevel, + pSubLevelInsts[subLevel], + clippedVaHi + 1, + entryVaHi); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + } + } // Sparse PDE split + + // Resolve potential conflicts in multiple sized page tables + if (pLevel->pFmt->numSubLevels != 1 && + !pOpParams->bIgnoreSubLevelConflicts) + { + status = _mmuWalkResolveSubLevelConflicts(pWalk, + pOpParams, + pLevel, + pSubLevelInsts, + subLevel, + clippedVaLo, + clippedVaHi); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + } + + // Recurse to update the next level for this PDE + status = pOpParams->opFunc(pWalk, + pOpParams, + pLevel->subLevels + subLevel, + pSubLevelInsts[subLevel], + clippedVaLo, + clippedVaHi); + NV_ASSERT_OR_GOTO(NV_OK == status, cleanup); + +cleanup: + // Free unused sublevel instances. Clear the PDE if all sublevels are deallocated. + _mmuWalkPdeRelease(pWalk, + pOpParams, + pLevel, + pLevelInst, + entryIndex, + entryVaLo); + + // Stop processing PDEs if we are in error state. + if (NV_OK != status) + break; + } // per entry loop + return status; + } + + +} + +/*! + * @brief This function allocates the root Page Directory and commits it the + * related channels. + * + * @param[in] vaLo The lower end of the Virtual Address range that is + * being processed. + * @param[in] vaHi The upper end of the Virtual Address range that is + * being processed + * + * @param[in] bCommit Force commit the PDB + * + * @return NV_OK of allocating this level succeeds. + * Other errors, if not. + */ +NV_STATUS +mmuWalkRootAcquire +( + MMU_WALK *pWalk, + NvU64 vaLo, + NvU64 vaHi, + NvBool bCommit +) +{ + MMU_WALK_LEVEL_INST *pLevelInst = NULL; + NvBool bChanged = NV_FALSE; + + // Acquire root level instance memory. + NV_ASSERT_OK_OR_RETURN( + _mmuWalkLevelInstAcquire(pWalk, &pWalk->root, vaLo, vaHi, + NV_TRUE, NV_FALSE, bCommit, &bChanged, + &pLevelInst, NV_FALSE /*bInitNv4k*/)); + + // We check pLevelInst to catch the corner case, where Commit() is called before PDB allocation. + if (bChanged || (bCommit && pLevelInst)) + { + NvBool bDone; + + // Bind this Page Dir to the affected channels + bDone = pWalk->pCb->UpdatePdb(pWalk->pUserCtx, pWalk->root.pFmt, + pLevelInst->pMemDesc, NV_FALSE); + NV_ASSERT_OR_RETURN(bDone, NV_ERR_INVALID_STATE); + } + + return NV_OK; +} + +/*! + * @brief This function releases the root Page Directory + */ +void +mmuWalkRootRelease +( + MMU_WALK *pWalk +) +{ + MMU_WALK_LEVEL_INST *pLevelInst = pWalk->root.pInstances; + if (NULL != pLevelInst) + { + // Free the level instance if the entry ref count is 0. + if ((0 == pLevelInst->numValid + pLevelInst->numSparse) && + (0 == pLevelInst->numReserved)) + { + NvBool bDone; + + // Commit NULL root page directory (clear usage). + bDone = pWalk->pCb->UpdatePdb(pWalk->pUserCtx, pWalk->root.pFmt, NULL, NV_FALSE); + NV_ASSERT(bDone); + + // Free unused root memory. + _mmuWalkLevelInstRelease(pWalk, &pWalk->root, pLevelInst); + } + } +} + +/*! + * @brief This function updates the @ref MMU_WALK_LEVEL_INST::pStateTracker for an + * entry specified by the entryIndex. + * + * @param[in] entryIndex Index of the entry whose state needs to be updated. + * @param[in] newEntryState The new state of the entry specified by entryIndex + */ +void +mmuWalkSetEntryState +( + MMU_WALK_LEVEL_INST *pLevelInst, + NvU32 entryIndex, + MMU_ENTRY_STATE newEntryState +) +{ + MMU_ENTRY_STATE currEntryState = mmuWalkGetEntryState(pLevelInst, entryIndex); + + // Decrement ref count for current state + switch (currEntryState) + { + case MMU_ENTRY_STATE_IS_PTE: + case MMU_ENTRY_STATE_IS_PDE: + NV_ASSERT(0 != pLevelInst->numValid); + pLevelInst->numValid--; + break; + case MMU_ENTRY_STATE_SPARSE: + NV_ASSERT(0 != pLevelInst->numSparse); + pLevelInst->numSparse--; + break; + case MMU_ENTRY_STATE_NV4K: + NV_ASSERT(0 != pLevelInst->numNv4k); + pLevelInst->numNv4k--; + break; + case MMU_ENTRY_STATE_INVALID: + break; + default: + NV_ASSERT(0); + } + + // Increment new state ref count + switch (newEntryState) + { + case MMU_ENTRY_STATE_IS_PTE: + case MMU_ENTRY_STATE_IS_PDE: + pLevelInst->numValid++; + break; + case MMU_ENTRY_STATE_SPARSE: + pLevelInst->numSparse++; + break; + case MMU_ENTRY_STATE_NV4K: + pLevelInst->numNv4k++; + break; + case MMU_ENTRY_STATE_INVALID: + break; + default: + NV_ASSERT(0); + } + + // Commit new state. + pLevelInst->pStateTracker[entryIndex].state = newEntryState; +} + +void +mmuWalkSetEntryReserved +( + MMU_WALK_LEVEL_INST *pLevelInst, + NvU32 entryIndex, + NvBool bReserved +) +{ + if (pLevelInst->pStateTracker[entryIndex].bReserved) + { + NV_ASSERT(0 != pLevelInst->numReserved); + pLevelInst->numReserved--; + } + if (bReserved) + { + pLevelInst->numReserved++; + } + pLevelInst->pStateTracker[entryIndex].bReserved = bReserved; +} + +void +mmuWalkSetEntryHybrid +( + MMU_WALK_LEVEL_INST *pLevelInst, + NvU32 entryIndex, + NvBool bHybrid +) +{ + if (pLevelInst->pStateTracker[entryIndex].bHybrid) + { + NV_ASSERT(0 != pLevelInst->numHybrid); + pLevelInst->numHybrid--; + } + if (bHybrid) + { + pLevelInst->numHybrid++; + } + pLevelInst->pStateTracker[entryIndex].bHybrid = bHybrid; +} + +/** + * @brief Calculate target entry indices that covers VA range for + * source entries + * + * @details For example, entry 1 in 64K PT is aligned to 4K PT entry 0 to + * 15. 4K PTE 1 to 18 will be covered by 64K PTE 0 to 1. + * + * It is introduced by NV4K encoding. Updating big page table + * according to small page table requires index transfering + * + * @param[in] pPageFmtIn Source format + * @param[in] indexLoIn The index lower in + * @param[in] indexHiIn The index higher in + * @param[in] pPageFmtOut Target format + * @param[out] pIndexLoOut The lower result index + * @param[out] pIndexHiOut The higher result index + */ +void +mmuFmtCalcAlignedEntryIndices +( + const MMU_FMT_LEVEL *pPageFmtIn, + const NvU32 indexLoIn, + const NvU32 indexHiIn, + const MMU_FMT_LEVEL *pPageFmtOut, + NvU32 *pIndexLoOut, + NvU32 *pIndexHiOut +) +{ + NvU64 pageSizeIn, pageSizeOut; + NvU64 pageSizeRatio; + NV_ASSERT(pIndexLoOut != NULL && pIndexHiOut != NULL); + NV_ASSERT(pPageFmtIn != NULL && pPageFmtOut != NULL); + + pageSizeIn = mmuFmtLevelPageSize(pPageFmtIn); + pageSizeOut = mmuFmtLevelPageSize(pPageFmtOut); + + if (pageSizeIn < pageSizeOut) + { + pageSizeRatio = pageSizeOut / pageSizeIn; + NV_ASSERT(NvU64_HI32(pageSizeRatio) == 0); + *pIndexLoOut = (NvU32)(indexLoIn / pageSizeRatio); + *pIndexHiOut = (NvU32)(indexHiIn / pageSizeRatio); + } + else + { + pageSizeRatio = pageSizeIn / pageSizeOut; + NV_ASSERT(NvU64_HI32(pageSizeRatio) == 0); + *pIndexLoOut = (NvU32)(indexLoIn * pageSizeRatio); + *pIndexHiOut = (NvU32)((indexHiIn + 1) * pageSizeRatio - 1); + } +} + +/*----------------------------Static Functions--------------------------------*/ + +static NV_STATUS +_mmuWalkLevelInit +( + const MMU_WALK *pWalk, + MMU_WALK_LEVEL *pParent, + const MMU_FMT_LEVEL *pLevelFmt, + MMU_WALK_LEVEL *pLevel +) +{ + // Init pointers. + pLevel->pFmt = pLevelFmt; + pLevel->pParent = pParent; + + if (0 != pLevelFmt->numSubLevels) + { + NvU32 subLevel; + const NvU32 size = pLevelFmt->numSubLevels * (NvU32)sizeof(*pLevel->subLevels); + + // Allocate sub-level array. + pLevel->subLevels = portMemAllocNonPaged(size); + if (pLevel->subLevels == NULL) + return NV_ERR_NO_MEMORY; + + portMemSet(pLevel->subLevels, 0, size); + + // Recursively create each sub-level. + for (subLevel = 0; subLevel < pLevelFmt->numSubLevels; ++subLevel) + { + NV_ASSERT_OK_OR_RETURN( + _mmuWalkLevelInit(pWalk, pLevel, pLevelFmt->subLevels + subLevel, + pLevel->subLevels + subLevel)); + } + } + + return NV_OK; +} + +static void +_mmuWalkLevelDestroy +( + const MMU_WALK *pWalk, + MMU_WALK_LEVEL *pLevel +) +{ + NvU32 subLevel; + + if (NULL != pLevel->subLevels) + { + // Recursively destroy each sub-level. + for (subLevel = 0; subLevel < pLevel->pFmt->numSubLevels; ++subLevel) + { + _mmuWalkLevelDestroy(pWalk, pLevel->subLevels + subLevel); + } + // Free sub-level array. + portMemFree(pLevel->subLevels); + } + + // All level instance memory should be freed already. + NV_ASSERT(NULL == pLevel->pInstances); +} + +/** + * @brief Resolve upcoming state conflicts before mmu walk operations + * + * @example Say we are to mmuWalkMap VA range [vaLo, vaHi] on small PT. + * Assume we have 4K PT and 64K PT as our small PT and big PT, and [vaLo, vaHi] + * is a strict subset of VA range covered by BigPTE[1, 3] and SmallPTE[18, 61]. + * Let's say BigPTE[1, 3] are sparse right now. + * + * To resolve the conflict, we need to preserve sparse state for part of the + * VA range that is not going to be mapped. We need to move those states from + * BigPT to SmallPT. + * + * Before: + * BigPTE[1, 3]: sparse, SmallPTE[16 - 63]: invalid + * (BigPTE[1, 3] and SmallPTE[16 - 63] are VA aligned) + * After: + * BigPTE[1, 3]: invalid, SmallPTE[16 - 17]: sparse + * SmallPTE[18 - 61]: invalid, will later be mapped + * SmallPTE[62 - 63]: sparse + * + * @example If we are to mmuWalkMap on big PT instead of samll PT, + * and sparse state was on small PT, we just need to invalidate the small PTEs. + * + * Before: + * BigPTE[1, 3]: invalid, + * SmallPTE[16 - 63]: sparse + * After: + * BigPTE[1, 3]: invalid, will later be mapped + * SmallPTE[16 - 63]: invalid + * + * @return NV_OK on success, no other values for now + */ +static NV_STATUS NV_NOINLINE +_mmuWalkResolveSubLevelConflicts +( + const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pSubLevelInsts[], + NvU32 subLevelIdx, + NvU64 clippedVaLo, + NvU64 clippedVaHi +) +{ + NvU32 i = 0; + NvU32 progress = 0; + NV_STATUS status = NV_OK; + NvBool bConflictLo = NV_FALSE; + NvBool bConflictHi = NV_FALSE; + const MMU_FMT_LEVEL *pLevelFmtBig = pLevel->subLevels[0].pFmt; + const MMU_FMT_LEVEL *pLevelFmtSmall = pLevel->subLevels[1].pFmt; + MMU_WALK_LEVEL_INST *pLevelBigInst = pSubLevelInsts[0]; + MMU_WALK_LEVEL_INST *pLevelSmallInst = pSubLevelInsts[1]; + // Entry indicies for target page table + NvU32 entryIndexLo, entryIndexHi; + // Entry indicies involved in both page tables + NvU32 indexLo_Small, indexHi_Small, indexLo_Big, indexHi_Big; + + if (0 == subLevelIdx) + { + entryIndexLo = mmuFmtVirtAddrToEntryIndex(pLevelFmtBig, clippedVaLo); + entryIndexHi = mmuFmtVirtAddrToEntryIndex(pLevelFmtBig, clippedVaHi); + indexLo_Big = entryIndexLo; + indexHi_Big = entryIndexHi; + mmuFmtCalcAlignedEntryIndices(pLevelFmtBig, indexLo_Big, indexHi_Big, + pLevelFmtSmall, &indexLo_Small, &indexHi_Small); + } + else + { + entryIndexLo = mmuFmtVirtAddrToEntryIndex(pLevelFmtSmall, clippedVaLo); + entryIndexHi = mmuFmtVirtAddrToEntryIndex(pLevelFmtSmall, clippedVaHi); + mmuFmtCalcAlignedEntryIndices(pLevelFmtSmall, entryIndexLo, + entryIndexHi, pLevelFmtBig, &indexLo_Big, &indexHi_Big); + mmuFmtCalcAlignedEntryIndices(pLevelFmtBig, indexLo_Big, indexHi_Big, + pLevelFmtSmall, &indexLo_Small, &indexHi_Small); + } + + // check if involved Small PTEs need to be sparsified + if (1 == subLevelIdx && NULL != pLevelSmallInst && NULL != pLevelBigInst) + { + // check lower part + MMU_ENTRY_STATE entryStateBig; + entryStateBig = mmuWalkGetEntryState(pLevelBigInst, indexLo_Big); + bConflictLo = (MMU_ENTRY_STATE_SPARSE == entryStateBig); + + // check higher part + entryStateBig = mmuWalkGetEntryState(pLevelBigInst, indexHi_Big); + bConflictHi = (MMU_ENTRY_STATE_SPARSE == entryStateBig); + } + + if (bConflictLo && entryIndexLo > indexLo_Small) + { + // sparsify lower range of entries + pWalk->pCb->FillEntries(pWalk->pUserCtx, pLevelFmtSmall, + pLevelSmallInst->pMemDesc, indexLo_Small, entryIndexLo - 1, + MMU_WALK_FILL_SPARSE, &progress); + NV_ASSERT_OR_RETURN(progress == entryIndexLo - indexLo_Small, + NV_ERR_INVALID_STATE); + + for (i = indexLo_Small; i <= entryIndexLo - 1; i++) + { + mmuWalkSetEntryState(pLevelSmallInst, i, MMU_ENTRY_STATE_SPARSE); + } + } + + if (bConflictHi && entryIndexHi < indexHi_Small) + { + // sparsify higher range of entries + pWalk->pCb->FillEntries(pWalk->pUserCtx, pLevelFmtSmall, + pLevelSmallInst->pMemDesc, entryIndexHi + 1, indexHi_Small, + MMU_WALK_FILL_SPARSE, &progress); + NV_ASSERT_OR_RETURN(progress == indexHi_Small - entryIndexHi, + NV_ERR_INVALID_STATE); + + for (i = entryIndexHi + 1; i <= indexHi_Small; i++) + { + mmuWalkSetEntryState(pLevelSmallInst, i, MMU_ENTRY_STATE_SPARSE); + } + } + + // invalidate the VA range in the other page table + if (NULL != pLevelSmallInst && NULL != pLevelBigInst) + { + NvU32 indexLo_tmp, indexHi_tmp; + const MMU_FMT_LEVEL *pSubLevelFmt; + MMU_WALK_LEVEL_INST *pSubLevelInst; + + if (subLevelIdx == 0) + { + indexLo_tmp = indexLo_Small; + indexHi_tmp = indexHi_Small; + pSubLevelFmt = pLevelFmtSmall; + pSubLevelInst = pLevelSmallInst; + } + else + { + indexLo_tmp = indexLo_Big; + indexHi_tmp = indexHi_Big; + pSubLevelFmt = pLevelFmtBig; + pSubLevelInst = pLevelBigInst; + } + + pWalk->pCb->FillEntries(pWalk->pUserCtx, pSubLevelFmt, + pSubLevelInst->pMemDesc, indexLo_tmp, indexHi_tmp, + MMU_WALK_FILL_INVALID, &progress); + NV_ASSERT_OR_RETURN(progress == indexHi_tmp - indexLo_tmp + 1, + NV_ERR_INVALID_STATE); + + for (i = indexLo_tmp; i <= indexHi_tmp; i++) + { + mmuWalkSetEntryState(pSubLevelInst, i, MMU_ENTRY_STATE_INVALID); + } + } + + return status; +} + +/*! + * Lazily allocates and initializes a level instance. + */ +static NV_STATUS +_mmuWalkLevelInstAcquire +( + const MMU_WALK *pWalk, + MMU_WALK_LEVEL *pLevel, + const NvU64 vaLo, + const NvU64 vaHi, + const NvBool bTarget, + const NvBool bRelease, + const NvBool bCommit, + NvBool *pBChanged, + MMU_WALK_LEVEL_INST **ppLevelInst, + const NvBool bInitNv4k +) +{ + NV_STATUS status; + MMU_WALK_MEMDESC *pOldMem; + NvU32 oldSize; + MMU_WALK_LEVEL_INST *pLevelInst = NULL; + NvBool bNew = NV_FALSE; + + // Lookup level instance. + if (NV_OK != btreeSearch(vaLo, (NODE**)&pLevelInst, (NODE*)pLevel->pInstances)) + { + NvU32 numBytes; + + if (!bTarget || bRelease) + { + // Skip missing non-target instances. + *ppLevelInst = NULL; + return NV_OK; + } + + // We only call Commit() on already allocated page directory. + NV_ASSERT_OR_RETURN(!bCommit, NV_ERR_INVALID_STATE); + + // Mark as newly allocated. + bNew = NV_TRUE; + + // Allocate missing target instances. + pLevelInst = portMemAllocNonPaged(sizeof(*pLevelInst)); + status = (pLevelInst == NULL) ? NV_ERR_NO_MEMORY : NV_OK; + NV_ASSERT_OR_GOTO(NV_OK == status, done); + portMemSet(pLevelInst, 0, sizeof(*pLevelInst)); + + // Insert the new node into the tree of instances for this page level. + pLevelInst->node.keyStart = mmuFmtLevelVirtAddrLo(pLevel->pFmt, vaLo); + pLevelInst->node.keyEnd = mmuFmtLevelVirtAddrHi(pLevel->pFmt, vaHi); + + status = btreeInsert(&pLevelInst->node, (NODE**)&pLevel->pInstances); + NV_ASSERT_OR_GOTO(NV_OK == status, done); + + // Allocate entry tracker. + numBytes = mmuFmtLevelEntryCount(pLevel->pFmt) * sizeof(MMU_ENTRY_INFO); + pLevelInst->pStateTracker = portMemAllocNonPaged(numBytes); + status = (pLevelInst->pStateTracker == NULL) ? NV_ERR_NO_MEMORY : NV_OK; + NV_ASSERT_OR_GOTO(NV_OK == status, done); + portMemSet(pLevelInst->pStateTracker, 0, numBytes); + if (bInitNv4k) + { + NvU32 i; + for (i = 0; i < mmuFmtLevelEntryCount(pLevel->pFmt); ++i) + { + mmuWalkSetEntryState(pLevelInst, i, MMU_ENTRY_STATE_NV4K); + } + } + } + + // Save original memory info. + pOldMem = pLevelInst->pMemDesc; + oldSize = pLevelInst->memSize; + + // Allocate (possibly reallocating) memory for this level instance. + status = pWalk->pCb->LevelAlloc(pWalk->pUserCtx, + pLevel->pFmt, + mmuFmtLevelVirtAddrLo(pLevel->pFmt, vaLo), + vaHi, + bTarget, + &pLevelInst->pMemDesc, + &pLevelInst->memSize, + pBChanged); + NV_ASSERT_OR_GOTO(NV_OK == status, done); + + if (*pBChanged) + { + const NvU32 entryIndexLo = oldSize / pLevel->pFmt->entrySize; + const NvU32 entryIndexHi = (pLevelInst->memSize / pLevel->pFmt->entrySize) - 1; + NvU32 progress = 0; + + // + // default state for new entries + // NV4K for big page table if ATS is enabled + // + MMU_WALK_FILL_STATE newEntryState = bInitNv4k ? MMU_WALK_FILL_NV4K : + MMU_WALK_FILL_INVALID; + + NV_ASSERT(NULL != pLevelInst->pMemDesc); + NV_ASSERT(entryIndexLo <= entryIndexHi); + + // We only call Commit() on already allocated page directory. + if (bCommit) + { + status = NV_ERR_INVALID_STATE; + NV_ASSERT_OR_GOTO(NV_OK == status, done); + } + + // Copy old entries from old to new. + if (entryIndexLo > 0) + { + NV_ASSERT(NULL != pWalk->pCb->CopyEntries); + pWalk->pCb->CopyEntries(pWalk->pUserCtx, + pLevel->pFmt, + pOldMem, + pLevelInst->pMemDesc, + 0, + entryIndexLo - 1, + &progress); + NV_ASSERT(progress == entryIndexLo); + + // Free old memory. + pWalk->pCb->LevelFree(pWalk->pUserCtx, pLevel->pFmt, + pLevelInst->node.keyStart, pOldMem); + } + + if(pWalk->bInvalidateOnReserve) + { + // Clear new entries to invalid. + pWalk->pCb->FillEntries(pWalk->pUserCtx, + pLevel->pFmt, + pLevelInst->pMemDesc, + entryIndexLo, + entryIndexHi, + newEntryState, + &progress); + NV_ASSERT(progress == entryIndexHi - entryIndexLo + 1); + } + } + else + { + // Ensure hasn't changed. + NV_ASSERT(pOldMem == pLevelInst->pMemDesc && oldSize == pLevelInst->memSize); + } + + // Commit return. + *ppLevelInst = pLevelInst; + +done: + // Cleanup newly allocated instance on failure. + if (NV_OK != status && + bNew && NULL != pLevelInst) + { + _mmuWalkLevelInstRelease(pWalk, pLevel, pLevelInst); + } + return status; +} + +/*! + * Frees an unused level instance. + */ +static void +_mmuWalkLevelInstRelease +( + const MMU_WALK *pWalk, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst +) +{ + NV_ASSERT(0 == pLevelInst->numValid); + NV_ASSERT(0 == pLevelInst->numReserved); + // Unlink. + btreeUnlink(&pLevelInst->node, (NODE**)&pLevel->pInstances); + // Free. + if (NULL != pLevelInst->pMemDesc) + { + pWalk->pCb->LevelFree(pWalk->pUserCtx, pLevel->pFmt, pLevelInst->node.keyStart, + pLevelInst->pMemDesc); + } + portMemFree(pLevelInst->pStateTracker); + portMemFree(pLevelInst); +} + +/*! + * This function is used to allocate a sublevel MMU_WALK_LEVEL_INST + * for a given PDE. If the sublevel allocation succeeds, the parent Level is + * updated. + */ +static NV_STATUS NV_NOINLINE +_mmuWalkPdeAcquire +( + const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst, + const NvU32 entryIndex, + const NvU32 subLevel, + const NvU64 vaLo, + const NvU64 vaHi, + MMU_WALK_LEVEL_INST *pSubLevelInsts[] +) +{ + NV_STATUS status = NV_OK; + NvBool bCommit = NV_FALSE; + NvU32 i; + const MMU_WALK_MEMDESC *pSubMemDescs[MMU_FMT_MAX_SUB_LEVELS] = {0}; + NvU64 vaLimit = vaHi; + const NvU32 numSubLevels = pLevel->pFmt->numSubLevels; + MMU_WALK_LEVEL_INST *pCurSubLevelInsts[MMU_FMT_MAX_SUB_LEVELS] = {0}; + + // + // Determine minimum VA limit of existing sub-levels. + // This is required to keep parallel partial page tables in sync. + // MMU HW that supports partial size tables selects the size in the + // parent PDE so each sub-level *MUST* be the same partial size + // once allocated. + // + if (numSubLevels > 1) + { + for (i = 0; i < numSubLevels; ++i) + { + // Lookup sub-level instance. + if (NV_OK == btreeSearch(vaLo, (NODE**)&pCurSubLevelInsts[i], + (NODE*)pLevel->subLevels[i].pInstances)) + { + const MMU_FMT_LEVEL *pSubLevelFmt = pLevel->pFmt->subLevels + i; + const NvU64 minVaLimit = + mmuFmtLevelVirtAddrLo(pSubLevelFmt, vaLo) + + (pCurSubLevelInsts[i]->memSize / + pSubLevelFmt->entrySize * + mmuFmtLevelPageSize(pSubLevelFmt)) - 1; + + vaLimit = NV_MAX(vaLimit, minVaLimit); + } + } + } + + // + // the loop was reversed for NV4K, if there are multiple sublevels + // handling small PT first, then the big PT + // + for (i = numSubLevels; i > 0; --i) + { + NvBool bChanged = NV_FALSE; + NvU32 subLevelIdx = i - 1; + NvBool bTarget = (subLevelIdx == subLevel); + NvBool bInitNv4k = NV_FALSE; + + // + // If NV4K is required (when ATS is enabled), acquire 64K PT + // whenever the 4K PT has been acquired and 64K PT was not + // there + // + if (pWalk->flags.bAtsEnabled && subLevelIdx == 0 && + numSubLevels > 1 && !pOpParams->bRelease) + { + if (pSubLevelInsts[1] != NULL) + { + bTarget = NV_TRUE; + } + if (pSubLevelInsts[0] == NULL) + { + bInitNv4k = NV_TRUE; + } + } + + // Acquire sub-level instance. + NV_ASSERT_OK_OR_RETURN( + _mmuWalkLevelInstAcquire(pWalk, pLevel->subLevels + subLevelIdx, + vaLo, vaLimit, bTarget, + pOpParams->bRelease, pOpParams->bCommit, + &bChanged, &pSubLevelInsts[subLevelIdx], + bInitNv4k)); + if (NULL == pSubLevelInsts[subLevelIdx]) + { + // Skip missing non-target instances. + NV_ASSERT(pOpParams->bRelease || !bTarget); + continue; + } + + // Track info for commit. + bCommit |= bChanged; + pSubMemDescs[subLevelIdx] = pSubLevelInsts[subLevelIdx]->pMemDesc; + } + + // DEBUG assert + if (pWalk->flags.bAtsEnabled && + numSubLevels > 1 && + pSubLevelInsts[1] != NULL && + pSubLevelInsts[0] == NULL) + { + NV_ASSERT(0); + } + + if (bCommit || pOpParams->bCommit) + { + NvBool bDone; + + // Update the current pde + bDone = pWalk->pCb->UpdatePde(pWalk->pUserCtx, pLevel->pFmt, pLevelInst->pMemDesc, + entryIndex, pSubMemDescs); + NV_ASSERT_OR_RETURN(bDone, NV_ERR_INVALID_STATE); + + // Track entry as a PDE. + mmuWalkSetEntryState(pLevelInst, entryIndex, MMU_ENTRY_STATE_IS_PDE); + } + + return status; +} + +/*! + * Frees the sub levels of the PDE passed in if thier refcount is 0. It + * also clears the PDE if both sublevels are released. + */ +static void NV_NOINLINE +_mmuWalkPdeRelease +( + const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst, + const NvU32 entryIndex, + const NvU64 entryVaLo +) +{ + MMU_WALK_LEVEL_INST *pSubLevelInsts[MMU_FMT_MAX_SUB_LEVELS] = {0}; + const MMU_WALK_MEMDESC *pSubMemDescs[MMU_FMT_MAX_SUB_LEVELS] = {0}; + NvBool bChanged = NV_FALSE; + NvU32 subLevel, i; + MMU_ENTRY_STATE state = MMU_ENTRY_STATE_INVALID; + + // Apply target state if this is a fill operation. + if (pOpParams->bFill) + { + const MMU_FILL_TARGET *pTarget = (const MMU_FILL_TARGET *)pOpParams->pOpCtx; + state = pTarget->entryState; + } + + // + // Loop through the sublevels and free up those with 0 ref count. + // We operate on a temp copy of the PDE because we want to update the + // PDE memory before releasing the actual sublevel pointers. We need this order + // to prevent any state inconsistency between the parent MMU_DESC_PDE and + // the sublevel MMU_WALK_LEVEL_INST structures. + // + for (i = pLevel->pFmt->numSubLevels; i > 0; --i) + { + subLevel = i - 1; + if (NV_OK == btreeSearch(entryVaLo, (NODE**)&pSubLevelInsts[subLevel], + (NODE*)pLevel->subLevels[subLevel].pInstances)) + { + MMU_WALK_LEVEL_INST *pSubLevelInst = pSubLevelInsts[subLevel]; + + // for ATS NV4K, check if we need to free the big page + if (pLevel->pFmt->numSubLevels == 2 && subLevel == 0) + { + if (pWalk->flags.bAtsEnabled) + { + if (pSubLevelInsts[0]->numNv4k == + mmuFmtLevelEntryCount(pLevel->subLevels[0].pFmt) && + (0 == pSubLevelInsts[0]->numReserved) && + (pSubMemDescs[1] == NULL || bChanged == NV_TRUE)) + { + bChanged = NV_TRUE; + continue; + } + else + { + state = MMU_ENTRY_STATE_IS_PDE; + pSubMemDescs[subLevel] = pSubLevelInst->pMemDesc; + continue; + } + } + } + + if ((0 != (pSubLevelInst->numValid + pSubLevelInst->numSparse)) || + (0 != (pSubLevelInst->numReserved + pSubLevelInst->numHybrid))) + { + // We've got at least one non-empty sublevel, so leave it mapped. + state = MMU_ENTRY_STATE_IS_PDE; + pSubMemDescs[subLevel] = pSubLevelInst->pMemDesc; + } + else if (NULL != pSubLevelInst->pMemDesc) + { + // We're going to free a sub-level. + bChanged = NV_TRUE; + } + } + } + + // + // Failure path may have aborted early before sub-levels processed, + // so also check that current state matches expected. + // + bChanged |= (state != mmuWalkGetEntryState(pLevelInst, entryIndex)); + + // + // If we've changed any sublevel we need to update the PDE in the parent + // Page Directory + // + if (bChanged) + { + NvBool bDone; + NvU32 progress = 0; + + // Init the PDE attribs with the temp PDE which has the cleared sublevel + switch (state) + { + case MMU_ENTRY_STATE_SPARSE: + case MMU_ENTRY_STATE_INVALID: + pWalk->pCb->FillEntries(pWalk->pUserCtx, + pLevel->pFmt, + pLevelInst->pMemDesc, + entryIndex, + entryIndex, + MMU_ENTRY_STATE_SPARSE == state ? + MMU_WALK_FILL_SPARSE : MMU_WALK_FILL_INVALID, + &progress); + NV_ASSERT_OR_RETURN_VOID(progress == 1); + // Clear the hybrid flag since all sub-levels are now released. + if (pLevelInst->pStateTracker[entryIndex].bHybrid) + { + mmuWalkSetEntryHybrid(pLevelInst, entryIndex, NV_FALSE); + } + break; + case MMU_ENTRY_STATE_IS_PDE: + bDone = pWalk->pCb->UpdatePde(pWalk->pUserCtx, pLevel->pFmt, pLevelInst->pMemDesc, + entryIndex, pSubMemDescs); + NV_ASSERT_OR_RETURN_VOID(bDone); + break; + default: + NV_ASSERT_OR_RETURN_VOID(0); + } + + // Track new state of entry. + mmuWalkSetEntryState(pLevelInst, entryIndex, state); + } + + // Free up the actual sublevels from the PDE + for (subLevel = 0; subLevel < pLevel->pFmt->numSubLevels; ++subLevel) + { + MMU_WALK_LEVEL_INST *pSubLevelInst = pSubLevelInsts[subLevel]; + if (NULL != pSubLevelInst && + NULL == pSubMemDescs[subLevel]) + { + _mmuWalkLevelInstRelease(pWalk, pLevel->subLevels + subLevel, + pSubLevelInst); + } + } +} + +static void +_mmuWalkLevelInstancesForceFree +( + MMU_WALK *pWalk, + MMU_WALK_LEVEL *pLevel +) +{ + MMU_WALK_LEVEL_INST *pLevelInst = NULL; + NvU32 subLevel; + + if (NULL == pLevel) + return; + + // Free all instances at this level. + btreeEnumStart(0, (NODE **)&pLevelInst, (NODE*)pLevel->pInstances); + while (NULL != pLevelInst) + { + // + // Since we are force freeing everything, it is okay to reset these fields + // in order to avoid hitting asserts in _mmuWalkLevelInstRelease. + // + pLevelInst->numValid = 0; + pLevelInst->numReserved = 0; + _mmuWalkLevelInstRelease(pWalk, pLevel, pLevelInst); + btreeEnumStart(0, (NODE **)&pLevelInst, (NODE*)pLevel->pInstances); + } + pLevel->pInstances = NULL; + + if (NULL != pLevel->subLevels) + { + for (subLevel = 0; subLevel < pLevel->pFmt->numSubLevels; subLevel++) + { + _mmuWalkLevelInstancesForceFree(pWalk, pLevel->subLevels + subLevel); + } + } +} + diff --git a/src/nvidia/src/libraries/mmu/mmu_walk_commit.c b/src/nvidia/src/libraries/mmu/mmu_walk_commit.c new file mode 100644 index 000000000..acaacea46 --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_walk_commit.c @@ -0,0 +1,157 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*--------------------------------Includes------------------------------------*/ +#if defined(SRT_BUILD) +#include "shrdebug.h" +#endif +#include "mmu_walk_private.h" + +/* ------------------------ Static Function Prototypes ---------------------- */ +static MmuWalkOp _mmuWalkCommitPDEs; +static MmuWalkOpSelectSubLevel _mmuWalkCommitPDEsSelectSubLevel; + +/*----------------------------Public Functions--------------------------------*/ + +NV_STATUS +mmuWalkCommitPDEs +( + MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaLo, + const NvU64 vaHi +) +{ + MMU_WALK_OP_PARAMS opParams = {0}; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != mmuWalkFindLevel(pWalk, pLevelFmt), + NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED(vaLo, mmuFmtLevelPageSize(pLevelFmt)), + NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED(vaHi + 1, mmuFmtLevelPageSize(pLevelFmt)), + NV_ERR_INVALID_ARGUMENT); + + // Acquire the root. Call unconditionally to account for change of size + status = mmuWalkRootAcquire(pWalk, vaLo, vaHi, NV_TRUE); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + // Construct the map op params + opParams.pOpCtx = pLevelFmt; + opParams.opFunc = _mmuWalkCommitPDEs; + opParams.selectSubLevel = _mmuWalkCommitPDEsSelectSubLevel; + opParams.bIgnoreSubLevelConflicts = NV_TRUE; + opParams.bCommit = NV_TRUE; + + // Start reserving from root (only one instance). + if (pWalk->flags.bUseIterative) + { + status = mmuWalkProcessPdes(pWalk, &opParams, &pWalk->root, pWalk->root.pInstances, + vaLo, vaHi); + } + else + { + status = _mmuWalkCommitPDEs(pWalk, &opParams, &pWalk->root, pWalk->root.pInstances, + vaLo, vaHi); + } + + return status; +} + +/* ----------------------------- Static Functions---------------------------- */ + +static NV_STATUS +_mmuWalkCommitPDEs +( + const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst, + NvU64 vaLo, + NvU64 vaHi +) +{ + const MMU_FMT_LEVEL *pLevelFmt = (const MMU_FMT_LEVEL *) pOpParams->pOpCtx; + + // If this level is not the targetted page level. + if (pLevel->pFmt != pLevelFmt) + { + NV_ASSERT_OR_RETURN(0 != pLevel->pFmt->numSubLevels, NV_ERR_INVALID_ARGUMENT); + + if (pWalk->flags.bUseIterative) + { + return NV_ERR_MORE_PROCESSING_REQUIRED; + } + else + { + // Process all the page level entries falling within [vaLo, vaHi] + NV_ASSERT_OK_OR_RETURN(mmuWalkProcessPdes(pWalk, + pOpParams, + pLevel, + pLevelInst, + vaLo, + vaHi)); + } + } + // + // We don't care about the PTEs here. + // If needed, support for PTEs can be added later. + // + + return NV_OK; +} + +static NV_STATUS +_mmuWalkCommitPDEsSelectSubLevel +( + const void *pOpCtx, + const MMU_WALK_LEVEL *pLevel, + NvU32 *pSubLevel, + NvU64 vaLo, + NvU64 vaHi +) +{ + const MMU_FMT_LEVEL *pLevelFmt = (const MMU_FMT_LEVEL *) pOpCtx; + NvU32 subLevel; + const MMU_FMT_LEVEL *pSubLevelFmt = NULL; + + // If we've only one sublevel, it's at index 0. + if (pLevel->pFmt->numSubLevels == 1) + { + *pSubLevel = 0; + return NV_OK; + } + + for (subLevel = 0; subLevel < pLevel->pFmt->numSubLevels; subLevel++) + { + pSubLevelFmt = pLevel->pFmt->subLevels + subLevel; + + if (pSubLevelFmt == pLevelFmt) + { + *pSubLevel = subLevel; + return NV_OK; + } + } + // Error if we didn't find a matching page size + return NV_ERR_INVALID_STATE; +} diff --git a/src/nvidia/src/libraries/mmu/mmu_walk_fill.c b/src/nvidia/src/libraries/mmu/mmu_walk_fill.c new file mode 100644 index 000000000..037b52d59 --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_walk_fill.c @@ -0,0 +1,396 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/* ------------------------ Includes --------------------------------------- */ +#if defined(SRT_BUILD) +#include "shrdebug.h" +#endif +#include "mmu_walk_private.h" + +static NV_STATUS _mmuWalkPostFillPTETasks +( + const MMU_WALK *pWalk, + MMU_WALK_LEVEL *pLevel, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + const MMU_WALK_FILL_STATE fillState, + const NvU64 vaLo +); + +/* ------------------------ Public Functions ------------------------------ */ +/** + * @brief Fill a VA range to a constant state for levels below the root. + * + * @details This function is of MmuWalkOp function type. Used by + * mmuWalkUnmap and mmuWalkSparsify, which fills INVALID and SPARSE + * states to the target page levels respectively. + * With NV4K introduced with VOLTA ATS, cross PT inconsistency + * during unmapping and sparsifying is handled here. + * + * @todo Recover from failure. It is difficult to do because rollbacks + * are costly and complex. Do we really want recovery or asserts? + * If the later one, we can replace those recovery codes with + * asserts. + * + * @copydoc MmuWalkCBOpFunc + * + * @return NV_OK on success + */ +NV_STATUS mmuWalkFill +( + const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst, + NvU64 vaLo, + NvU64 vaHi +) +{ + const MMU_FILL_TARGET *pTarget = (const MMU_FILL_TARGET *) pOpParams->pOpCtx; + + if (NULL == pLevelInst) + return NV_OK; + + // If this level is a Page Directory, we keep walking down the tree. + if (0 != pLevel->pFmt->numSubLevels) + { + if (pWalk->flags.bUseIterative) + { + return NV_ERR_MORE_PROCESSING_REQUIRED; + } + else + { + // Process all the page level entries.falling within [vaLo, vaHi] + NV_ASSERT_OK_OR_RETURN( + mmuWalkProcessPdes(pWalk, + pOpParams, + pLevel, + pLevelInst, + vaLo, + vaHi)); + } + } + // We have reached a page table + else + { + const NvU32 entryIndexLo = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaLo); + const NvU32 entryIndexHi = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaHi); + NvU32 progress = 0; + NV_STATUS status = NV_OK; + // Calculate number of entries in the level + NvU64 numEntries = mmuFmtLevelEntryCount(pLevel->pFmt); + NvU32 entryIndex; + + NV_ASSERT_OR_RETURN(pLevel->pFmt->bPageTable, NV_ERR_INVALID_ARGUMENT); + + // Make sure all PTEs are contained within one page table + NV_ASSERT_OR_RETURN((entryIndexLo / numEntries) == + (entryIndexHi / numEntries), + NV_ERR_INVALID_ARGUMENT); + + if (pWalk->pStagingBuffer != NULL && pWalk->bUseStagingBuffer) + { + // + // Clear out the PTEs modulo number of entries in table + // We do a modulo of number of entries in the table so that + // we do not exceed the allocated sysmem page. + // + pWalk->pCb->FillEntries(pWalk->pUserCtx, + pLevel->pFmt, + pWalk->pStagingBuffer, + entryIndexLo % numEntries, + entryIndexHi % numEntries, + pTarget->fillState, + &progress); + + // Copy from staging buffer to final location + pWalk->pCb->WriteBuffer(pWalk->pUserCtx, + pWalk->pStagingBuffer, + pLevelInst->pMemDesc, + entryIndexLo, + entryIndexHi, + numEntries, + pLevel->pFmt->entrySize); + } + else + { + // Clear out the PTEs + pWalk->pCb->FillEntries(pWalk->pUserCtx, + pLevel->pFmt, + pLevelInst->pMemDesc, + entryIndexLo, + entryIndexHi, + pTarget->fillState, + &progress); + } + + NV_ASSERT_OR_RETURN(progress == entryIndexHi - entryIndexLo + 1, NV_ERR_INVALID_STATE); + + // Update the state tracker + for (entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++) + { + mmuWalkSetEntryState(pLevelInst, entryIndex, pTarget->entryState); + } + + // Post fill tasks + status = _mmuWalkPostFillPTETasks(pWalk, pLevel, entryIndexLo, + entryIndexHi, pTarget->fillState, vaLo); + NV_ASSERT_OR_RETURN(status == NV_OK, NV_ERR_INVALID_STATE); + } + + return NV_OK; +} + +/*! + * TODO + */ +NV_STATUS +mmuWalkFillSelectSubLevel +( + const void *pOpCtx, + const MMU_WALK_LEVEL *pLevel, + NvU32 *pSubLevel, + NvU64 vaLo, + NvU64 vaHi +) +{ + NvBool bFound = NV_FALSE; + NvU32 subLevel; + const MMU_FMT_LEVEL *pSubLevelFmt = NULL; + + // If we've only one sublevel, it's at index 0. + if (pLevel->pFmt->numSubLevels == 1) + { + *pSubLevel = 0; + return NV_OK; + } + + for (subLevel = 0; subLevel < pLevel->pFmt->numSubLevels; subLevel++) + { + pSubLevelFmt = pLevel->pFmt->subLevels + subLevel; + + if ((( vaLo & mmuFmtEntryVirtAddrMask(pSubLevelFmt)) == 0) && + (((vaHi+1) & mmuFmtEntryVirtAddrMask(pSubLevelFmt)) == 0)) + { + MMU_WALK_LEVEL_INST *pSubLevelInst = NULL; + if (!bFound) + { + bFound = NV_TRUE; + *pSubLevel = subLevel; + } + if (NV_OK == btreeSearch(vaLo, (NODE**)&pSubLevelInst, + (NODE*)pLevel->subLevels[subLevel].pInstances)) + { + *pSubLevel = subLevel; + return NV_OK; + } + } + } + + // Error if virt addresses are not aligned to any page size. + return bFound ? NV_OK : NV_ERR_INVALID_STATE; +} + +/** + * @brief Determines if entries indexLo to indexHi (inclusive) are + * all invalid. + * + * @param pLevelInst The level instance + * @param[in] indexLo The index lower + * @param[in] indexHi The index higher + * + * @return True if no level instance or all entries are invalid, + * False otherwise. + */ +static NvBool +_isRangeAllInvalid +( + MMU_WALK_LEVEL_INST *pLevelInst, + const NvU32 indexLo, + const NvU32 indexHi +) +{ + NvU32 i; + MMU_ENTRY_STATE entryState; + + if (pLevelInst == NULL) + return NV_TRUE; + + for (i = indexLo; i <= indexHi; i++) + { + entryState = mmuWalkGetEntryState(pLevelInst, i); + if (MMU_ENTRY_STATE_INVALID != entryState) + return NV_FALSE; + } + return NV_TRUE; +} + +/** + * @brief Post PTE filling tasks to handle cross PTs inconsistency + * + * @details Helper function inside mmuWalkFill PT level to update 64K PTEs + * after mmuWalkFill operation complete. It gathers mmuWalkFill + * target entry index range and fillState as input and update + * 64K PTEs accordingly. The function doesn't handle extra page + * table allocations and deallocations. It relies on + * _mmuWalkPdeAcquire and _mmuWalkPdeRelease to prepare and + * cleanup page levels accordingly. + * + * @todo Recovery on failure. Same discussion as in mmuWalkFill. + * + * @param[in] pWalk The walk + * @param pLevel The level, used to get fmt and btree root + * @param[in] entryIndexLo The lower entry index (inclusive) + * @param[in] entryIndexHi The entry higher index (inclusive) + * @param[in] fillState The fill state + * @param[in] virtAddr The lower VA, to get the key for btrees + * + * @return NV_OK on success, NV_ERR_INVALID_ARGUMENT otherwise + */ +static NV_STATUS _mmuWalkPostFillPTETasks +( + const MMU_WALK *pWalk, + MMU_WALK_LEVEL *pLevel, + const NvU32 entryIndexLo, + const NvU32 entryIndexHi, + const MMU_WALK_FILL_STATE fillState, + const NvU64 virtAddr +) +{ + const MMU_FMT_LEVEL *pFmtLevel = pLevel->pFmt; + + // + // NV4K is only necessary for ATS + // Only update 64K PTEs on invalidation, not on sparsifying + // + if (pWalk->flags.bAtsEnabled && fillState == MMU_WALK_FILL_INVALID) + { + const NvU32 pageSize = NvU64_LO32(mmuFmtLevelPageSize(pFmtLevel)); + + NvU32 progress = 0; + NvU32 entryIndex = 0; + NvU32 indexLo_4K, indexHi_4K, indexLo_64K, indexHi_64K; + + MMU_WALK_LEVEL *pParent = pLevel->pParent; + MMU_WALK_LEVEL *pLevel64K = pParent->subLevels; + MMU_WALK_LEVEL *pLevel4K = pParent->subLevels + 1; + NvU64 vaLevelBase = mmuFmtLevelVirtAddrLo(pLevel64K->pFmt, virtAddr); + MMU_WALK_LEVEL_INST *pLevel64KInst = NULL; + MMU_WALK_LEVEL_INST *pLevel4KInst = NULL; + + // search for the instances + btreeSearch(vaLevelBase, (NODE**)&pLevel64KInst, + (NODE*)pLevel64K->pInstances); + btreeSearch(vaLevelBase, (NODE**)&pLevel4KInst, + (NODE*)pLevel4K->pInstances); + + // + // if 4K page table was modified in mmuWalkFill + // check the range and update 64K PTEs accordingly + // + if (pageSize == 0x1000) + { + // get involved 64K PTEs and 4K PTEs + mmuFmtCalcAlignedEntryIndices(pLevel4K->pFmt, entryIndexLo, + entryIndexHi, pLevel64K->pFmt, &indexLo_64K, &indexHi_64K); + mmuFmtCalcAlignedEntryIndices(pLevel64K->pFmt, indexLo_64K, + indexHi_64K, pLevel4K->pFmt, &indexLo_4K, &indexHi_4K); + + // if only one 64K PTE involved, check a single 16 4K PTE group + if (indexLo_64K == indexHi_64K) + { + if (!_isRangeAllInvalid(pLevel4KInst, indexLo_4K, indexHi_4K)) + { + indexLo_64K++; + } + } + // otherwise check the head and tail groups + else + { + if (indexLo_4K < entryIndexLo && + !_isRangeAllInvalid(pLevel4KInst, indexLo_4K, + entryIndexLo - 1)) + { + indexLo_64K++; + } + if (indexHi_4K > entryIndexHi && + !_isRangeAllInvalid(pLevel4KInst, entryIndexHi + 1, + indexHi_4K)) + { + indexHi_64K--; + } + } + + // update 64K PT given the indexes calculated above + if (indexLo_64K <= indexHi_64K) + { + pWalk->pCb->FillEntries(pWalk->pUserCtx, pLevel64K->pFmt, + pLevel64KInst->pMemDesc, indexLo_64K, indexHi_64K, + MMU_WALK_FILL_NV4K, &progress); + NV_ASSERT_OR_RETURN(progress == indexHi_64K - indexLo_64K + 1, + NV_ERR_INVALID_STATE); + // update entry states + for (entryIndex = indexLo_64K; entryIndex <= indexHi_64K; + entryIndex++) + { + mmuWalkSetEntryState(pLevel64KInst, entryIndex, + MMU_ENTRY_STATE_NV4K); + } + } + } + // + // if 64K page table is invalidated in mmuWalkFill + // correct the state as NV4K + // @todo move this portion to mmuWalkFill + // + else if (pageSize == 0x10000) + { + mmuFmtCalcAlignedEntryIndices(pLevel64K->pFmt, entryIndexLo, + entryIndexHi, pLevel4K->pFmt, &indexLo_4K, &indexHi_4K); + + // the 4K PTE should have already been invalid + NV_ASSERT_OR_RETURN(_isRangeAllInvalid(pLevel4KInst, indexLo_4K, + indexHi_4K), NV_ERR_INVALID_STATE); + + // Set 64K PTEs NV4K + pWalk->pCb->FillEntries(pWalk->pUserCtx, pLevel64K->pFmt, + pLevel64KInst->pMemDesc, entryIndexLo, entryIndexHi, + MMU_WALK_FILL_NV4K, &progress); + NV_ASSERT_OR_RETURN(progress == entryIndexHi - entryIndexLo + 1, + NV_ERR_INVALID_STATE); + + for (entryIndex = entryIndexLo; entryIndex <= entryIndexHi; + entryIndex++) + { + mmuWalkSetEntryState(pLevel64KInst, entryIndex, + MMU_ENTRY_STATE_NV4K); + } + } + // NV4K only works with 64K PT + 4K PT comibination + else + { + NV_ASSERT_OR_RETURN(0, NV_ERR_INVALID_ARGUMENT); + } + } + return NV_OK; +} diff --git a/src/nvidia/src/libraries/mmu/mmu_walk_info.c b/src/nvidia/src/libraries/mmu/mmu_walk_info.c new file mode 100644 index 000000000..a515d3777 --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_walk_info.c @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*--------------------------------Includes------------------------------------*/ +#if defined(SRT_BUILD) +#include "shrdebug.h" +#endif +#include "mmu_walk_private.h" + +/*----------------------------Public Functions--------------------------------*/ +NV_STATUS +mmuWalkGetPageLevelInfo +( + const MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 virtAddr, + const MMU_WALK_MEMDESC **ppMemDesc, + NvU32 *pMemSize +) +{ + const MMU_WALK_LEVEL *pLevel = mmuWalkFindLevel(pWalk, pLevelFmt); + MMU_WALK_LEVEL_INST *pLevelInst = NULL; + + // Clear outputs. + *ppMemDesc = NULL; + *pMemSize = 0; + + // Validate level. + NV_ASSERT_OR_RETURN(NULL != pLevel, NV_ERR_INVALID_ARGUMENT); + + // Lookup level instance. + if (NV_OK == btreeSearch(virtAddr, (NODE**)&pLevelInst, &pLevel->pInstances->node)) + { + // Return memory info if found. + *ppMemDesc = pLevelInst->pMemDesc; + *pMemSize = pLevelInst->memSize; + } + + return NV_OK; +} + diff --git a/src/nvidia/src/libraries/mmu/mmu_walk_map.c b/src/nvidia/src/libraries/mmu/mmu_walk_map.c new file mode 100644 index 000000000..e7852fb75 --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_walk_map.c @@ -0,0 +1,253 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file mmu_walk_map.c + * @brief Describes the structures and interfaces used to map N level page tables + */ + +/* ------------------------ Includes --------------------------------------- */ +#if defined(SRT_BUILD) +#include "shrdebug.h" +#endif +#include "mmu_walk_private.h" + +/* ------------------------ Macros ------------------------------------------ */ +/* ------------------------ Static Function Prototypes ---------------------- */ +static MmuWalkOp _mmuWalkMap; +static MmuWalkOpSelectSubLevel _mmuWalkMapSelectSubLevel; + +/* ------------------------ Inline Functions ---------------------------------*/ +/* ------------------------ Public Functions ------------------------------ */ + +NV_STATUS +mmuWalkMap +( + MMU_WALK *pWalk, + const NvU64 vaLo, + const NvU64 vaHi, + const MMU_MAP_TARGET *pTarget +) +{ + MMU_WALK_OP_PARAMS opParams = {0}; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pWalk, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pTarget, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(vaLo <= vaHi, NV_ERR_INVALID_ARGUMENT); + + // Acquire the root. Call unconditionally to account for change of size + status = mmuWalkRootAcquire(pWalk, vaLo, vaHi, NV_FALSE); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + // Construct the map op params + opParams.pOpCtx = pTarget; + opParams.opFunc = _mmuWalkMap; + opParams.selectSubLevel = _mmuWalkMapSelectSubLevel; + + // Start mapping from root (only one instance). + if (pWalk->flags.bUseIterative) + { + status = mmuWalkProcessPdes(pWalk, &opParams, &pWalk->root, pWalk->root.pInstances, vaLo, vaHi); + } + else + { + _mmuWalkMap(pWalk, &opParams, &pWalk->root, pWalk->root.pInstances, vaLo, vaHi); + } + if (NV_OK != status) + { + NV_STATUS unmapStatus; + + NV_PRINTF(LEVEL_ERROR, + "Failed to map VA Range 0x%llx to 0x%llx. Status = 0x%08x\n", + vaLo, vaHi, status); + NV_ASSERT(0); + + // Mapping failed, unwind by unmapping the VA range + unmapStatus = mmuWalkUnmap(pWalk, vaLo, vaHi); + if (NV_OK != unmapStatus) + { + NV_PRINTF(LEVEL_ERROR, + "Unmap failed with status = 0x%08x\n", + unmapStatus); + NV_ASSERT(NV_OK == unmapStatus); + } + } + + return status; +} + +/* ----------------------------- Static Functions---------------------------- */ + +/*! + * Implements the VA mapping operation after the root has been allocated. + * @copydoc MmuWalkOp + */ +static NV_STATUS +_mmuWalkMap +( + const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst, + NvU64 vaLo, + NvU64 vaHi +) +{ + const MMU_MAP_TARGET *pTarget = (const MMU_MAP_TARGET *) pOpParams->pOpCtx; + + NV_ASSERT_OR_RETURN(NULL != pLevelInst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pLevel, NV_ERR_INVALID_ARGUMENT); + + // If this level is not the targetted page level. + if (pLevel->pFmt != pTarget->pLevelFmt) + { + NV_ASSERT_OR_RETURN(0 != pLevel->pFmt->numSubLevels, NV_ERR_INVALID_ARGUMENT); + + if (pWalk->flags.bUseIterative) + { + return NV_ERR_MORE_PROCESSING_REQUIRED; + } + else + { + // Process all the page level entries falling within [vaLo, vaHi] + NV_ASSERT_OK_OR_RETURN( + mmuWalkProcessPdes(pWalk, + pOpParams, + pLevel, + pLevelInst, + vaLo, + vaHi)); + } + } + // We have reached the target page level. + else + { + const NvU32 entryIndexLo = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaLo); + const NvU32 entryIndexHi = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaHi); + NvU32 progress = 0; + NvU32 entryIndex; + + // Ensure child-sub-levels are unmapped before mapping "hybrid" PDE-PTEs. + if (0 != pLevel->pFmt->numSubLevels) + { + const NvU64 vaLevelBase = mmuFmtLevelVirtAddrLo(pLevel->pFmt, vaLo); + for (entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++) + { + // But don't unmap existing target entries since the "mapping" below can be RMW. + if (MMU_ENTRY_STATE_IS_PTE != mmuWalkGetEntryState(pLevelInst, entryIndex)) + { + const NvU64 entryVaLo = + mmuFmtEntryIndexVirtAddrLo(pLevel->pFmt, vaLevelBase, entryIndex); + const NvU64 entryVaHi = + mmuFmtEntryIndexVirtAddrHi(pLevel->pFmt, vaLevelBase, entryIndex); + + if (pWalk->flags.bUseIterative) + { + NV_ASSERT_OK_OR_RETURN( + mmuWalkProcessPdes(pWalk, + &g_opParamsUnmap, + pLevel, + pLevelInst, + entryVaLo, + entryVaHi)); + } + else + { + NV_ASSERT_OK_OR_RETURN( + g_opParamsUnmap.opFunc(pWalk, + &g_opParamsUnmap, + pLevel, + pLevelInst, + entryVaLo, + entryVaHi)); + } + + // + // If this entry is still a PDE it means there are reserved sub-levels underneath. + // Mark the entry as a hybrid so that its instance remains pinned appropriately. + // + if (MMU_ENTRY_STATE_IS_PDE == mmuWalkGetEntryState(pLevelInst, entryIndex)) + { + mmuWalkSetEntryHybrid(pLevelInst, entryIndex, NV_TRUE); + } + } + } + } + + // Map the next batch of entry values. + pTarget->MapNextEntries(pWalk->pUserCtx, + pTarget, + pLevelInst->pMemDesc, + entryIndexLo, + entryIndexHi, + &progress); + NV_ASSERT_OR_RETURN(progress == entryIndexHi - entryIndexLo + 1, NV_ERR_INVALID_STATE); + + // Loop over PTEs again to update state tracker. + for (entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++) + { + mmuWalkSetEntryState(pLevelInst, entryIndex, MMU_ENTRY_STATE_IS_PTE); + } + } + + return NV_OK; +} + +/*! + * TODO + */ +static NV_STATUS +_mmuWalkMapSelectSubLevel +( + const void *pOpCtx, + const MMU_WALK_LEVEL *pLevel, + NvU32 *pSubLevel, + NvU64 vaLo, + NvU64 vaHi +) +{ + const MMU_MAP_TARGET *pTarget = (const MMU_MAP_TARGET *) pOpCtx; + NvU32 subLevel; + const MMU_FMT_LEVEL *pSubLevelFmt = NULL; + + // If we've only one sublevel, it's at index 0. + if (pLevel->pFmt->numSubLevels == 1) + { + *pSubLevel = 0; + return NV_OK; + } + + for (subLevel = 0; subLevel < pLevel->pFmt->numSubLevels; subLevel++) + { + pSubLevelFmt = pLevel->pFmt->subLevels + subLevel; + + if (pSubLevelFmt == pTarget->pLevelFmt) + { + *pSubLevel = subLevel; + return NV_OK; + } + } + // Error if we didn't find a matching page size + return NV_ERR_INVALID_STATE; +} diff --git a/src/nvidia/src/libraries/mmu/mmu_walk_migrate.c b/src/nvidia/src/libraries/mmu/mmu_walk_migrate.c new file mode 100644 index 000000000..3dce6eb53 --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_walk_migrate.c @@ -0,0 +1,189 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*--------------------------------Includes------------------------------------*/ +#if defined(SRT_BUILD) +#include "shrdebug.h" +#endif +#include "mmu_walk_private.h" +#include "gpu/mem_mgr/mem_desc.h" + +/*----------------------------Public Functions--------------------------------*/ +NV_STATUS +mmuWalkModifyLevelInstance +( + MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaBase, + MMU_WALK_MEMDESC *pNewMem, + const NvU32 newSize, + const NvBool bCopyEntries, + const NvBool bUpdatePde, + const NvBool bIgnoreChannelBusy +) +{ + const MMU_WALK_LEVEL *pLevel = mmuWalkFindLevel(pWalk, pLevelFmt); + MMU_WALK_LEVEL_INST *pLevelInst = NULL; + MMU_WALK_MEMDESC *pOldMem; + NvU32 oldSize; + NvU32 entryIndexHiOld; + NvU32 entryIndexHiNew; + NvU32 entryIndexHiCopy; + NvU32 progress; + + // Validate user callback. + NV_ASSERT_OR_RETURN(NULL != pWalk->pCb->CopyEntries, NV_ERR_NOT_SUPPORTED); + + // Validate level. + NV_ASSERT_OR_RETURN(NULL != pLevel, NV_ERR_INVALID_ARGUMENT); + + // Lookup level instance. + btreeSearch(vaBase, (NODE**)&pLevelInst, &pLevel->pInstances->node); + NV_ASSERT_OR_RETURN(NULL != pLevelInst, NV_ERR_INVALID_ARGUMENT); + + // Temp old memory. + pOldMem = pLevelInst->pMemDesc; + oldSize = pLevelInst->memSize; + + // Save new memory. + pLevelInst->pMemDesc = pNewMem; + pLevelInst->memSize = newSize; + + // + // Enforce alignment. + // This check is applicable to the case of @ref mmuWalkMigrateLevelInstance + // + if (bUpdatePde) + { + NV_ASSERT_OR_RETURN(vaBase == pLevelInst->node.keyStart, NV_ERR_INVALID_ARGUMENT); + } + + entryIndexHiOld = (oldSize / pLevelFmt->entrySize) - 1; + entryIndexHiNew = (newSize / pLevelFmt->entrySize) - 1; + entryIndexHiCopy = NV_MIN(entryIndexHiOld, entryIndexHiNew); + + if (bCopyEntries) + { + // Copy old entries. + pWalk->pCb->CopyEntries(pWalk->pUserCtx, + pLevel->pFmt, + pOldMem, + pNewMem, + 0, + entryIndexHiCopy, + &progress); + NV_ASSERT(progress == entryIndexHiCopy + 1); + + // Fill new entries to invalid. + if (entryIndexHiNew > entryIndexHiOld) + { + pWalk->pCb->FillEntries(pWalk->pUserCtx, + pLevel->pFmt, + pNewMem, + entryIndexHiOld + 1, + entryIndexHiNew, + MMU_WALK_FILL_INVALID, + &progress); + NV_ASSERT(progress == entryIndexHiNew - entryIndexHiOld); + } + } + + // Commit new instance to parent. + if (NULL == pLevel->pParent) + { + // Root case requires PDB update. + NvBool bDone; + bDone = pWalk->pCb->UpdatePdb(pWalk->pUserCtx, pWalk->root.pFmt, + pNewMem, bIgnoreChannelBusy); + NV_ASSERT(bDone); + } + else if (bUpdatePde) + { + // Non-root case requires PDE update. + NvBool bDone; + NvU32 i; + const MMU_WALK_LEVEL *pParent = pLevel->pParent; + MMU_WALK_LEVEL_INST *pParentInst = NULL; + const MMU_WALK_MEMDESC *pSubMemDescs[MMU_FMT_MAX_SUB_LEVELS] = {0}; + const NvU32 entryIndex = mmuFmtVirtAddrToEntryIndex(pParent->pFmt, vaBase); + + // Lookup parent instance. + btreeSearch(vaBase, (NODE**)&pParentInst, &pParent->pInstances->node); + NV_ASSERT(NULL != pParentInst); + + // Collect sub-level memory. + for (i = 0; i < pParent->pFmt->numSubLevels; ++i) + { + const MMU_WALK_LEVEL *pSubLevel = pParent->subLevels + i; + + // Target sub-level picks up new memory. + if (pSubLevel == pLevel) + { + pSubMemDescs[i] = pNewMem; + } + // Other sub-levels need lookup. + else + { + MMU_WALK_LEVEL_INST *pOtherInst = NULL; + btreeSearch(vaBase, (NODE**)&pOtherInst, &pSubLevel->pInstances->node); + if (NULL != pOtherInst) + { + pSubMemDescs[i] = pOtherInst->pMemDesc; + } + } + } + + // Write new PDE value. + bDone = pWalk->pCb->UpdatePde(pWalk->pUserCtx, pParent->pFmt, pParentInst->pMemDesc, + entryIndex, pSubMemDescs); + NV_ASSERT(bDone); + } + + // + // Free old memory, if needed. + // When + // _gmmuWalkCBLevelAlloc_SharedSLI() is removed as part of the FB heap split effort for SLI, + // we will not need (((MEMORY_DESCRIPTOR *)pOldMem)->Allocated > 1). + // + if ((pOldMem != pNewMem) || (((MEMORY_DESCRIPTOR *)pOldMem)->Allocated > 1)) + { + pWalk->pCb->LevelFree(pWalk->pUserCtx, pLevel->pFmt, pLevelInst->node.keyStart, pOldMem); + } + + return NV_OK; +} + +NV_STATUS +mmuWalkMigrateLevelInstance +( + MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaBase, + MMU_WALK_MEMDESC *pNewMem, + const NvU32 newSize, + const NvBool bIgnoreChannelBusy +) +{ + return mmuWalkModifyLevelInstance(pWalk, pLevelFmt, vaBase, pNewMem, + newSize, NV_TRUE, NV_TRUE, bIgnoreChannelBusy); +} diff --git a/src/nvidia/src/libraries/mmu/mmu_walk_private.h b/src/nvidia/src/libraries/mmu/mmu_walk_private.h new file mode 100644 index 000000000..96b918f76 --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_walk_private.h @@ -0,0 +1,371 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NV_MMU_WALK_PRIVATE_H_ +#define _NV_MMU_WALK_PRIVATE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* ------------------------ Includes --------------------------------------- */ +#include "mmu/mmu_walk.h" +#include "containers/btree.h" +#include "containers/list.h" + +/* --------------------------- Macros ---------------------------------------- */ +#define HI_PRI_SUBLEVEL_INDEX 0 +#define LO_PRI_SUBLEVEL_INDEX 1 + +/* --------------------------- Datatypes ------------------------------------ */ + +typedef struct MMU_WALK_LEVEL_INST MMU_WALK_LEVEL_INST; +typedef struct MMU_WALK_LEVEL MMU_WALK_LEVEL; +typedef struct MMU_WALK_OP_PARAMS MMU_WALK_OP_PARAMS; + +typedef struct MMU_WALK_PROCESS_PDES_ENTRY MMU_WALK_PROCESS_PDES_ENTRY; +typedef struct MMU_WALK_RELEASE_PDES_ENTRY MMU_WALK_RELEASE_PDES_ENTRY; + +/*! + * Higher-level PDE/PTE states. + */ +typedef enum +{ + /*! + * The entry is not valid and will generate an MMU fault on access. + */ + MMU_ENTRY_STATE_INVALID, + + /*! + * Indicates to drop writes and return 0 on reads + * when this entry is translated, instead of generating a fault. + * + * @note Only supported if MMU_FMT_CAPS.bSparse is set. + */ + MMU_ENTRY_STATE_SPARSE, + + /*! + * Indicates the entry is being used as PTE, mapping to a valid page. + */ + MMU_ENTRY_STATE_IS_PTE, + + /*! + * Indicates the entry is being used as PDE, pointing to one + * or more valid sub-levels. + */ + MMU_ENTRY_STATE_IS_PDE, + + /** + * Indicates that there is no aligned valid 4K PTE for the 64K PTE. + * It is a state only for 64K PTE when ATS is enabled + * + * @note Only supported if pWalk->flags.bAtsEnabled + */ + MMU_ENTRY_STATE_NV4K, + + /*! + * Should be kept at the end of the list so that it can be used + * as a count for the number of states. + */ + MMU_ENTRY_STATE_COUNT +} MMU_ENTRY_STATE; + +typedef struct +{ + NvU8 state : 6; + NvU8 bHybrid : 1; + NvU8 bReserved : 1; +} MMU_ENTRY_INFO; + +/*! + * Describes an entire (horizontal) level of an MMU level hiearchy. + */ +struct MMU_WALK_LEVEL +{ + /*! + * Format of this level. + */ + const MMU_FMT_LEVEL *pFmt; + + /*! + * Back-pointer to parent level. + */ + MMU_WALK_LEVEL *pParent; + + /*! + * Sub-level array of length pFmt->numSubLevels. + */ + MMU_WALK_LEVEL *subLevels; + + /*! + * Level instance tree keyed by VA range. + */ + MMU_WALK_LEVEL_INST *pInstances; + + /*! + * Tree tracking ranges of VA that are reserved (locked down) + * for this level. @see mmuWalkReserveEntries. + */ + NODE *pReservedRanges; +}; + +/*! + * Describes the physical aspects of a single page level instance. + */ +struct MMU_WALK_LEVEL_INST +{ + /*! + * Embedded btree node anchor (must be first). + */ + NODE node; + + /*! + * Memory descriptor for the physical memory backing this page level + */ + MMU_WALK_MEMDESC *pMemDesc; + + /*! + * Current size in bytes of the physical level memory. + */ + NvU32 memSize; + + /*! + * Number of valid entries within this page level. + */ + NvU32 numValid; + + /*! + * Number of sparse entries within this page level. + */ + NvU32 numSparse; + + /*! + * Number of reserved entries (in any state) - @see mmuWalkReserveEntries. + */ + NvU32 numReserved; + + /*! + * Number of hybrid entries - mixed PTE/PDE above reserved sub-levels. + */ + NvU32 numHybrid; + + /** + * Number of NV4K (no aligned valid 4K PTE for a 64K PTE) entries + */ + NvU32 numNv4k; + + /*! + * State tracker for entries of this level instance. + */ + MMU_ENTRY_INFO *pStateTracker; +}; + +struct MMU_WALK +{ + MMU_WALK_USER_CTX *pUserCtx; + const MMU_WALK_CALLBACKS *pCb; + MMU_WALK_FLAGS flags; + MMU_WALK_LEVEL root; + MMU_WALK_MEMDESC *pStagingBuffer; + NvBool bUseStagingBuffer; + NvBool bInvalidateOnReserve; +}; + +/*! + * @brief This function type implements the core operation to be + * performed on the page levels. Typically each operation will expose an + * external function to handle the root case. This external function will call + * into an instance of this function type. + * + * @param[in] pWalkParams Pointer to @ref MMU_WALK. + * @param[in] pOpParams Pointer to @ref MMU_WALK_OP_PARAMS. + * @param[in] pFmt Pointer to @ref MMU_FMT_LEVEL. + * @param[in/out] pDesc Pointer to @ref MMU_WALK_LEVEL_INST. + * @param[in] vaLo Lower end of the operational VA range. + * @param[in] vaHi Higher end of the operational VA range. + * + * @return NV_STATUS_SUCCESS if mapping succeeds. Other errors if not. + */ +typedef NV_STATUS +MmuWalkOp(const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst, + NvU64 vaLo, + NvU64 vaHi); + +/*! + * @brief This function type is used to select the right sublevel for the operation + * + * @param[in] pOpCtx Pointer to user supplied operation context needed by the + * implementing function. + * @param[in] pFmt Pointer to @ref MMU_FMT_LEVEL of the level processed. + * @param[out] pSubLevel Pointer to @ref MMU_WALK_LEVEL_INST of the sublevel that + * should be selected for processing. + * @param[in] vaLo Lower end of the operational VA range. + * @param[in] vaHi Higher end of the operational VA range. + * + * @return NV_STATUS_SUCCESS of allocation succeeds. Other errors if not. + */ +typedef NV_STATUS +MmuWalkOpSelectSubLevel(const void *pOpCtx, + const MMU_WALK_LEVEL *pLevel, + NvU32 *pSubLevel, + NvU64 vaLo, + NvU64 vaHi); + +/*! + * This structure is used to represent parameteres needed per operation. + */ +struct MMU_WALK_OP_PARAMS +{ + /*! + * The context needed for the page level operation call back. + * Example: Params for callbacks. + */ + const void *pOpCtx; + + /*! + * @copydoc MmuWalkCBOpFunc + */ + MmuWalkOp *opFunc; + + /*! + * @copydoc MmuWalkCBSelectSubLevel + */ + MmuWalkOpSelectSubLevel *selectSubLevel; + + /*! + * Indicates if this is a fill operation. + */ + NvBool bFill : 1; + + /*! + * Indicates if this is a release operation. + */ + NvBool bRelease : 1; + + /*! + * Indicates to ignore sub-level conflicts. + */ + NvBool bIgnoreSubLevelConflicts : 1; + + /*! + * Commit already allocated page directory entries. + * This is needed in suspend-resume scenarios, where + * the page directory contents are lost during suspend. + * During restore, we need to rewrite the PDEs to the original values. + */ + NvBool bCommit : 1; +}; + +/*! + * Op context for fill operations (unmap and sparse). + */ +typedef struct +{ + MMU_ENTRY_STATE entryState; + MMU_WALK_FILL_STATE fillState; +} MMU_FILL_TARGET; + +extern const MMU_WALK_OP_PARAMS g_opParamsSparsify; +extern const MMU_WALK_OP_PARAMS g_opParamsUnmap; + +struct MMU_WALK_PROCESS_PDES_ENTRY +{ + MMU_WALK_LEVEL *pLevel; + MMU_WALK_LEVEL_INST *pLevelInst; + NvU64 vaLo; + NvU64 vaHi; + NvU64 vaLevelBase; + NvU32 entryIndexHi; + NvU32 entryIndex; +}; +MAKE_LIST(PROCESS_PDES_STACK, MMU_WALK_PROCESS_PDES_ENTRY); + +struct MMU_WALK_RELEASE_PDES_ENTRY +{ + MMU_WALK_LEVEL *pLevel; + MMU_WALK_LEVEL_INST *pLevelInst; + NvU64 entryVaLo; + NvU32 entryIndexHi; + NvU32 entryIndex; +}; +MAKE_LIST(RELEASE_PDES_STACK, MMU_WALK_RELEASE_PDES_ENTRY); + +/*----------------------------Private Interface--------------------------------*/ +const MMU_WALK_LEVEL * +mmuWalkFindLevel(const MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt); + +NV_STATUS +mmuWalkProcessPdes(const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst, + NvU64 vaLo, + NvU64 vaHi); + +NV_STATUS +mmuWalkRootAcquire(MMU_WALK *pWalk, + NvU64 vaLo, + NvU64 vaHi, + NvBool bForceCommit); + +void +mmuWalkRootRelease(MMU_WALK *pWalk); + +MMU_ENTRY_STATE +mmuWalkGetEntryState(MMU_WALK_LEVEL_INST *pLevelInst, NvU32 entryIndex); + +void +mmuWalkSetEntryState(MMU_WALK_LEVEL_INST *pLevelInst, + NvU32 entryIndex, + MMU_ENTRY_STATE state); + +void +mmuWalkSetEntryReserved(MMU_WALK_LEVEL_INST *pLevelInst, + NvU32 entryIndex, + NvBool bReserved); + +void +mmuWalkSetEntryHybrid(MMU_WALK_LEVEL_INST *pLevelInst, + NvU32 entryIndex, + NvBool bHybrid); + +void +mmuFmtCalcAlignedEntryIndices(const MMU_FMT_LEVEL *pPageFmtIn, + const NvU32 indexLoIn, + const NvU32 indexHiIn, + const MMU_FMT_LEVEL *pPageFmtOut, + NvU32 *pIndexLoOut, + NvU32 *pIndexHiOut); + +MmuWalkOp mmuWalkFill; +MmuWalkOpSelectSubLevel mmuWalkFillSelectSubLevel; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/nvidia/src/libraries/mmu/mmu_walk_reserve.c b/src/nvidia/src/libraries/mmu/mmu_walk_reserve.c new file mode 100644 index 000000000..36177eeb5 --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_walk_reserve.c @@ -0,0 +1,286 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*--------------------------------Includes------------------------------------*/ +#if defined(SRT_BUILD) +#include "shrdebug.h" +#endif +#include "mmu_walk_private.h" + +/* ------------------------ Static Function Prototypes ---------------------- */ +static MmuWalkOp _mmuWalkReserveEntries; +static MmuWalkOp _mmuWalkReleaseEntries; +static MmuWalkOpSelectSubLevel _mmuWalkReserveSelectSubLevel; + +/*----------------------------Public Functions--------------------------------*/ +NV_STATUS +mmuWalkReserveEntries +( + MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaLo, + const NvU64 vaHi, + const NvBool bInvalidate +) +{ + MMU_WALK_OP_PARAMS opParams = {0}; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != mmuWalkFindLevel(pWalk, pLevelFmt), + NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED(vaLo, mmuFmtLevelPageSize(pLevelFmt)), + NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED(vaHi + 1, mmuFmtLevelPageSize(pLevelFmt)), + NV_ERR_INVALID_ARGUMENT); + + // Acquire the root. Call unconditionally to account for change of size + status = mmuWalkRootAcquire(pWalk, vaLo, vaHi, NV_FALSE); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + NV_ASSERT_OR_RETURN(pWalk->bInvalidateOnReserve, NV_ERR_INVALID_STATE); + + // + // Whether to skip invalidation of PTEs during reservation (for example, + // when sparsifying immediately afterwards) + // + pWalk->bInvalidateOnReserve = bInvalidate; + + // Construct the map op params + opParams.pOpCtx = pLevelFmt; + opParams.opFunc = _mmuWalkReserveEntries; + opParams.selectSubLevel = _mmuWalkReserveSelectSubLevel; + opParams.bIgnoreSubLevelConflicts = NV_TRUE; + + // Start reserving from root (only one instance). + if (pWalk->flags.bUseIterative) + { + status = mmuWalkProcessPdes(pWalk, &opParams, &pWalk->root, pWalk->root.pInstances, + vaLo, vaHi); + } + else + { + status = _mmuWalkReserveEntries(pWalk, &opParams, &pWalk->root, pWalk->root.pInstances, + vaLo, vaHi); + } + + if (NV_OK != status) + { + NV_STATUS tmpStatus; + + // Reservation failed, unwind by releasing the VA range + tmpStatus = mmuWalkReleaseEntries(pWalk, pLevelFmt, vaLo, vaHi); + NV_ASSERT(NV_OK == tmpStatus); + } + + // Reset invalidation enable + pWalk->bInvalidateOnReserve = NV_TRUE; + + return status; +} + +NV_STATUS +mmuWalkReleaseEntries +( + MMU_WALK *pWalk, + const MMU_FMT_LEVEL *pLevelFmt, + const NvU64 vaLo, + const NvU64 vaHi +) +{ + MMU_WALK_OP_PARAMS opParams = {0}; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != mmuWalkFindLevel(pWalk, pLevelFmt), + NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED(vaLo, mmuFmtLevelPageSize(pLevelFmt)), + NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NV_IS_ALIGNED(vaHi + 1, mmuFmtLevelPageSize(pLevelFmt)), + NV_ERR_INVALID_ARGUMENT); + + // Start releasing from root (only one instance). + if (NULL != pWalk->root.pInstances) + { + // Construct the map op params + opParams.pOpCtx = pLevelFmt; + opParams.opFunc = _mmuWalkReleaseEntries; + opParams.selectSubLevel = _mmuWalkReserveSelectSubLevel; + opParams.bIgnoreSubLevelConflicts = NV_TRUE; + opParams.bRelease = NV_TRUE; + + if (pWalk->flags.bUseIterative) + { + status = mmuWalkProcessPdes(pWalk, &opParams, &pWalk->root, pWalk->root.pInstances, + vaLo, vaHi); + } + else + { + status = _mmuWalkReleaseEntries(pWalk, &opParams, &pWalk->root, pWalk->root.pInstances, + vaLo, vaHi); + } + + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + // Release the root. + mmuWalkRootRelease(pWalk); + } + + return status; +} + +/* ----------------------------- Static Functions---------------------------- */ + +static NV_STATUS +_mmuWalkReserveEntries +( + const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst, + NvU64 vaLo, + NvU64 vaHi +) +{ + const MMU_FMT_LEVEL *pLevelFmt = (const MMU_FMT_LEVEL *) pOpParams->pOpCtx; + + // If this level is not the targetted page level. + if (pLevel->pFmt != pLevelFmt) + { + NV_ASSERT_OR_RETURN(0 != pLevel->pFmt->numSubLevels, NV_ERR_INVALID_ARGUMENT); + + if (pWalk->flags.bUseIterative) + { + return NV_ERR_MORE_PROCESSING_REQUIRED; + } + else + { + // Process all the page level entries falling within [vaLo, vaHi] + NV_ASSERT_OK_OR_RETURN( + mmuWalkProcessPdes(pWalk, + pOpParams, + pLevel, + pLevelInst, + vaLo, + vaHi)); + } + } + // We have reached the target page level. + else + { + const NvU32 entryIndexLo = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaLo); + const NvU32 entryIndexHi = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaHi); + NvU32 entryIndex; + + // Update the state tracker + for (entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++) + { + mmuWalkSetEntryReserved(pLevelInst, entryIndex, NV_TRUE); + } + } + + return NV_OK; +} + +static NV_STATUS +_mmuWalkReleaseEntries +( + const MMU_WALK *pWalk, + const MMU_WALK_OP_PARAMS *pOpParams, + MMU_WALK_LEVEL *pLevel, + MMU_WALK_LEVEL_INST *pLevelInst, + NvU64 vaLo, + NvU64 vaHi +) +{ + const MMU_FMT_LEVEL *pLevelFmt = (const MMU_FMT_LEVEL *) pOpParams->pOpCtx; + + // If this level is not the targetted page level. + if (pLevel->pFmt != pLevelFmt) + { + NV_ASSERT_OR_RETURN(0 != pLevel->pFmt->numSubLevels, NV_ERR_INVALID_ARGUMENT); + + if (pWalk->flags.bUseIterative) + { + return NV_ERR_MORE_PROCESSING_REQUIRED; + } + else + { + // Process all the page level entries falling within [vaLo, vaHi] + NV_ASSERT_OK_OR_RETURN( + mmuWalkProcessPdes(pWalk, + pOpParams, + pLevel, + pLevelInst, + vaLo, + vaHi)); + } + } + // We have reached the target page level. + else + { + const NvU32 entryIndexLo = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaLo); + const NvU32 entryIndexHi = mmuFmtVirtAddrToEntryIndex(pLevel->pFmt, vaHi); + NvU32 entryIndex; + + // Update the state tracker + for (entryIndex = entryIndexLo; entryIndex <= entryIndexHi; entryIndex++) + { + mmuWalkSetEntryReserved(pLevelInst, entryIndex, NV_FALSE); + } + } + + return NV_OK; +} + +static NV_STATUS +_mmuWalkReserveSelectSubLevel +( + const void *pOpCtx, + const MMU_WALK_LEVEL *pLevel, + NvU32 *pSubLevel, + NvU64 vaLo, + NvU64 vaHi +) +{ + const MMU_FMT_LEVEL *pLevelFmt = (const MMU_FMT_LEVEL *) pOpCtx; + NvU32 subLevel; + const MMU_FMT_LEVEL *pSubLevelFmt = NULL; + + // If we've only one sublevel, it's at index 0. + if (pLevel->pFmt->numSubLevels == 1) + { + *pSubLevel = 0; + return NV_OK; + } + + for (subLevel = 0; subLevel < pLevel->pFmt->numSubLevels; subLevel++) + { + pSubLevelFmt = pLevel->pFmt->subLevels + subLevel; + + if (pSubLevelFmt == pLevelFmt) + { + *pSubLevel = subLevel; + return NV_OK; + } + } + // Error if we didn't find a matching page size + return NV_ERR_INVALID_STATE; +} diff --git a/src/nvidia/src/libraries/mmu/mmu_walk_sparse.c b/src/nvidia/src/libraries/mmu/mmu_walk_sparse.c new file mode 100644 index 000000000..5c83ac584 --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_walk_sparse.c @@ -0,0 +1,131 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file mmu_walk_sparse.c + * @brief Describes the structures and interfaces used to sparsify N level page tables + */ + +/* ------------------------ Includes --------------------------------------- */ +#if defined(SRT_BUILD) +#include "shrdebug.h" +#endif +#include "mmu_walk_private.h" + +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Static Function Prototypes --------------------- */ +/* ------------------------ Inline Functions ------------------------------- */ +/* ------------------------ Public Functions ------------------------------ */ + +NV_STATUS +mmuWalkSparsify +( + MMU_WALK *pWalk, + const NvU64 vaLo, + const NvU64 vaHi, + const NvBool bUseStagingBuffer +) +{ + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pWalk, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(vaLo <= vaHi, NV_ERR_INVALID_ARGUMENT); + + // Acquire the root. Call unconditionally to account for change of size + status = mmuWalkRootAcquire(pWalk, vaLo, vaHi, NV_FALSE); + NV_ASSERT_OR_RETURN(NV_OK == status, status); + + // Set field to indicate whether to use staging buffer. + pWalk->bUseStagingBuffer = bUseStagingBuffer; + + // Sparsify + if (pWalk->flags.bUseIterative) + { + status = mmuWalkProcessPdes(pWalk, + &g_opParamsSparsify, + &pWalk->root, pWalk->root.pInstances, + vaLo, vaHi); + } + else + { + status = g_opParamsSparsify.opFunc(pWalk, + &g_opParamsSparsify, + &pWalk->root, pWalk->root.pInstances, + vaLo, vaHi); + } + + if (NV_OK != status) + { + NV_STATUS unmapStatus; + + NV_PRINTF(LEVEL_ERROR, + "Failed to sparsify VA Range 0x%llx to 0x%llx. Status = 0x%08x\n", + vaLo, vaHi, status); + NV_ASSERT(0); + + // Mapping failed, unwind by unmapping the VA range + unmapStatus = mmuWalkUnmap(pWalk, + vaLo, + vaHi); + if (NV_OK != unmapStatus) + { + NV_PRINTF(LEVEL_ERROR, + "Unmap failed with status = 0x%08x\n", + unmapStatus); + NV_ASSERT(NV_OK == unmapStatus); + } + } + + // Reset staging buffer usage status + pWalk->bUseStagingBuffer = NV_FALSE; + + return status; +} + +NV_STATUS +mmuWalkUnsparsify +( + MMU_WALK *pWalk, + const NvU64 vaLo, + const NvU64 vaHi +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +static const MMU_FILL_TARGET g_fillTargetSparse = +{ + MMU_ENTRY_STATE_SPARSE, + MMU_WALK_FILL_SPARSE, +}; + +const MMU_WALK_OP_PARAMS g_opParamsSparsify = +{ + &g_fillTargetSparse, + mmuWalkFill, + mmuWalkFillSelectSubLevel, + NV_TRUE, // bFill + NV_FALSE, // bRelease + NV_FALSE, // bIgnoreSubLevelConflicts +}; + diff --git a/src/nvidia/src/libraries/mmu/mmu_walk_unmap.c b/src/nvidia/src/libraries/mmu/mmu_walk_unmap.c new file mode 100644 index 000000000..dac323915 --- /dev/null +++ b/src/nvidia/src/libraries/mmu/mmu_walk_unmap.c @@ -0,0 +1,97 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/*! + * @file mmu_walk_unmap.c + * @brief Describes the structures and interfaces used to map N level page tables + */ + +/* ------------------------ Includes --------------------------------------- */ +#if defined(SRT_BUILD) +#include "shrdebug.h" +#endif +#include "mmu_walk_private.h" + +/* ------------------------ Macros ----------------------------------------- */ +/* ------------------------ Static Function Prototypes --------------------- */ +/* ------------------------ Inline Functions ------------------------------- */ +/* ------------------------ Public Functions ------------------------------ */ + +NV_STATUS +mmuWalkUnmap +( + MMU_WALK *pWalk, + const NvU64 vaLo, + const NvU64 vaHi +) +{ + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pWalk, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(vaLo <= vaHi, NV_ERR_INVALID_ARGUMENT); + + // Unmap starting from root if it exists. + if (NULL != pWalk->root.pInstances) + { + if (pWalk->flags.bUseIterative) + { + status = mmuWalkProcessPdes(pWalk, &g_opParamsUnmap, + &pWalk->root, pWalk->root.pInstances, vaLo, vaHi); + } + else + { + status = g_opParamsUnmap.opFunc(pWalk, &g_opParamsUnmap, + &pWalk->root, pWalk->root.pInstances, vaLo, vaHi); + } + + if (NV_OK != status) + { + NV_PRINTF(LEVEL_ERROR, + "Failed to unmap VA Range 0x%llx to 0x%llx. Status = 0x%08x\n", + vaLo, vaHi, status); + NV_ASSERT(0); + } + + // Release the root. + mmuWalkRootRelease(pWalk); + } + + return status; +} + +static const MMU_FILL_TARGET g_fillTargetUnmap = +{ + MMU_ENTRY_STATE_INVALID, + MMU_WALK_FILL_INVALID, +}; + +const MMU_WALK_OP_PARAMS g_opParamsUnmap = +{ + &g_fillTargetUnmap, + mmuWalkFill, + mmuWalkFillSelectSubLevel, + NV_TRUE, // bFill + NV_FALSE, // bRelease + NV_FALSE, // bIgnoreSubLevelConflicts +}; + diff --git a/src/nvidia/src/libraries/nvbitvector/nvbitvector.c b/src/nvidia/src/libraries/nvbitvector/nvbitvector.c new file mode 100644 index 000000000..75f487673 --- /dev/null +++ b/src/nvidia/src/libraries/nvbitvector/nvbitvector.c @@ -0,0 +1,864 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "utils/nvbitvector.h" + +/** + * @brief Returns the size, in bytes, of this bitvector. + * @note due to the compiler trick of storing the last index within a + * structure pointer in the data, the minimum size of an NV_BITEVECTOR + * will be the size of one pointer on a given architecture. If the + * storage size of the underlying data is changed to something less + * than the size of a pointer on a given architecture, then two + * libraries running on different architectures transferring bitvectors + * between them may disagree on the value of the direct sizeof operator + * on a struct of an NV_BITVECTOR derivative. This version of SizeOf + * should be agreeable to all architectures, and should be used instead + * of sizeof to marshall data between libraries running on different + * architectures. + */ +NvU32 +bitVectorSizeOf_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + return NV_BITVECTOR_BYTE_SIZE(bitVectorLast); +} + +/** + * @brief Clears all flags in pBitVector. + */ +NV_STATUS +bitVectorClrAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + NvU32 byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + + portMemSet(&pBitVector->qword, 0x0, byteSize); + return NV_OK; +} + +/** + * @brief Clears the flag in pBitVector according to bit index idx + */ +NV_STATUS +bitVectorClr_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +) +{ + NvU64 *qword; + NvU16 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(idx < bitVectorLast, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + qword[qwordIdx] &= ~NVBIT64(qwordOffset); + return NV_OK; +} + +/** + * @brief Clears all flags within a range in pBitVector + */ +NV_STATUS +bitVectorClrRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +) +{ + NvU64 *qword; + NvU16 idx; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeContains(rangeMake(0, bitVectorLast - 1), range), + NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + for (idx = (NvU16)range.lo; idx <= (NvU16)range.hi; ++idx) + { + if ((0 == NV_BITVECTOR_OFFSET(idx)) && + (rangeContains(range, rangeMake(idx + 63, idx + 63)))) + { + qword[NV_BITVECTOR_IDX(idx)] = 0x0; + idx += 63; + continue; + } + + status = bitVectorClr_IMPL(pBitVector, bitVectorLast, idx); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Sets all flags in pBitVector + */ +NV_STATUS +bitVectorSetAll_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + NvU64 *qword; + NvU32 byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + portMemSet(qword, NV_U8_MAX, byteSize); + qword[arraySize - 1] &= (NV_U64_MAX >> (63 - qwordOffset)); + + return NV_OK; +} + +/** + * @brief Sets the flag in pBitVector according to bit index idx + */ +NV_STATUS +bitVectorSet_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +) +{ + NvU64 *qword; + NvU16 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + qword[qwordIdx] |= NVBIT64(qwordOffset); + + return NV_OK; +} + +/** + * @brief Sets all flags within a range in pBitVector + */ +NV_STATUS +bitVectorSetRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +) +{ + NvU64 *qword; + NvU16 idx; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeContains(rangeMake(0, bitVectorLast - 1), range), + NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + for (idx = (NvU16)range.lo; idx <= (NvU16)range.hi; ++idx) + { + if ((0 == NV_BITVECTOR_OFFSET(idx)) && + (rangeContains(range, rangeMake(idx + 63, idx + 63)))) + { + qword[NV_BITVECTOR_IDX(idx)] = (NV_U64_MAX); + idx += 63; + continue; + } + + status = bitVectorSet_IMPL(pBitVector, bitVectorLast, idx); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Toggles the flag in pBitVector according to bit index idx + */ +NV_STATUS +bitVectorInv_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +) +{ + NvU64 *qword; + NvU16 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + qword[qwordIdx] ^= NVBIT64(qwordOffset); + + return NV_OK; +} + +/** + * @brief Toggles all flags within a range in pBitVector + */ +NV_STATUS +bitVectorInvRange_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NV_RANGE range +) +{ + NvU64 *qword; + NvU16 idx; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rangeContains(rangeMake(0, bitVectorLast - 1), range), + NV_ERR_INVALID_ARGUMENT); + + qword = (NvU64 *)&pBitVector->qword; + for (idx = (NvU16)range.lo; idx <= (NvU16)range.hi; ++idx) + { + if ((0 == NV_BITVECTOR_OFFSET(idx)) && + (rangeContains(range, rangeMake(idx + 63, idx + 63)))) + { + qword[NV_BITVECTOR_IDX(idx)] = ~qword[NV_BITVECTOR_IDX(idx)]; + idx += 63; + continue; + } + + status = bitVectorInv_IMPL(pBitVector, bitVectorLast, idx); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Initializes a NV_BITVECTOR with the bit indices contained within + * pIndices set. + */ +NV_STATUS +bitVectorFromArrayU16_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 *pIndices, + NvU32 indicesSize +) +{ + NV_STATUS status = NV_OK; + NvU32 i; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pIndices, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(0 != indicesSize, NV_ERR_INVALID_ARGUMENT); + + status = bitVectorClrAll_IMPL(pBitVector, bitVectorLast); + if (NV_OK != status) + { + return status; + } + + for (i = 0; i < indicesSize; ++i) + { + status = bitVectorSet_IMPL(pBitVector, bitVectorLast, pIndices[i]); + if (NV_OK != status) + { + return status; + } + } + + return status; +} + +/** + * @brief Checks if all flags in pBitVector are set + */ +NvBool +bitVectorTestAllSet_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_FALSE); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (mask != (qword[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if all flags in pBitVector are cleared + */ +NvBool +bitVectorTestAllCleared_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_FALSE); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (0x0 != (qword[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if two bitVectors are equivalent + */ +NvBool +bitVectorTestEqual_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorALast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorALast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((bitVectorALast == bitVectorBLast), NV_ERR_INVALID_ARGUMENT); + + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if ((qwordA[idx] & mask) != (qwordB[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if the set of set flags in bitVectorA is a subset of the set of + * set flags in bitVectorB. + */ +NvBool +bitVectorTestIsSubset_IMPL +( + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorALast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorALast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN((bitVectorALast == bitVectorBLast), NV_ERR_INVALID_ARGUMENT); + + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (((qwordA[idx] & mask) & (qwordB[idx] & mask)) != (qwordA[idx] & mask)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + +/** + * @brief Checks if the flag according to bit index idx in pBitVector is set + */ +NvBool +bitVectorTest_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + NvU16 idx +) +{ + const NvU64 *qword; + NvU16 qwordIdx = NV_BITVECTOR_IDX(idx); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(idx); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_FALSE); + NV_ASSERT_OR_RETURN(idx < bitVectorLast, NV_FALSE); + + qword = (const NvU64 *)&pBitVector->qword; + return !!(qword[qwordIdx] & NVBIT64(qwordOffset)); +} + +/** + * @brief Computes the intersection of flags in pBitVectorA and pBitVectorB, and + * stores the result in pBitVectorDst + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorA First operand + * @param[in] pBitVectorB Second operand + * + * @note it is valid for the same bitVector to be both destination and operand + * for this operation + */ +NV_STATUS +bitVectorAnd_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorALast) && (bitVectorALast == + bitVectorBLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (qwordA[idx] & qwordB[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Computes the union of flags in pBitVectorA and pBitVectorB, and stores + * the result in pBitVectorDst + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorA First operand + * @param[in] pBitVectorB Second operand + * + * @note it is valid for the same bitVector to be both destination and operand + * for this operation + */ +NV_STATUS +bitVectorOr_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorALast) && (bitVectorALast == + bitVectorBLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (qwordA[idx] | qwordB[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Computes the exclusive OR of flags in pBitVectorA and pBitVectorB, and stores + * the result in pBitVectorDst + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorA First operand + * @param[in] pBitVectorB Second operand + * + * @note it is valid for the same bitVector to be both destination and operand + * for this operation + */ +NV_STATUS +bitVectorXor_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorA, + NvU16 bitVectorALast, + const NV_BITVECTOR *pBitVectorB, + NvU16 bitVectorBLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordA; + const NvU64 *qwordB; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorA, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorB, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorALast) && (bitVectorALast == + bitVectorBLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordA = (const NvU64 *)&pBitVectorA->qword; + qwordB = (const NvU64 *)&pBitVectorB->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (qwordA[idx] ^ qwordB[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Causes the set of raised flags in pBitVectorDst to be equal to the + * complement of the set of raised flags in pBitVectorSrc. + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorSrc Source + * + * @note it is valid for the same bitVector to be both destination and + * source for this operation + */ +NV_STATUS +bitVectorComplement_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +) +{ + NvU64 *qwordDst; + const NvU64 *qwordSrc; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorDstLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorDstLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorSrc, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(((bitVectorDstLast == bitVectorSrcLast)), NV_ERR_INVALID_ARGUMENT); + + qwordDst = (NvU64 *)&pBitVectorDst->qword; + qwordSrc = (const NvU64 *)&pBitVectorSrc->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + qwordDst[idx] = (~qwordSrc[idx]) & mask; + } + + return NV_OK; +} + +/** + * @brief Causes the set of raised flags in pBitVectorDst to be equal to the set + * of raised flags in pBitVectorSrc. + * + * @param[out] pBitVectorDst Destination + * @param[in] pBitVectorSrc Source + * + * @note it is \b invalid for the same bitVector to be both destination and + * source for this operation + */ +NV_STATUS +bitVectorCopy_IMPL +( + NV_BITVECTOR *pBitVectorDst, + NvU16 bitVectorDstLast, + const NV_BITVECTOR *pBitVectorSrc, + NvU16 bitVectorSrcLast +) +{ + NvU32 byteSizeDst = NV_BITVECTOR_BYTE_SIZE(bitVectorDstLast); + NvU32 byteSizeSrc = NV_BITVECTOR_BYTE_SIZE(bitVectorSrcLast); + + NV_ASSERT_OR_RETURN(NULL != pBitVectorDst, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pBitVectorSrc, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(bitVectorDstLast == bitVectorSrcLast, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(pBitVectorDst != pBitVectorSrc, NV_WARN_NOTHING_TO_DO); + + portMemCopy(&pBitVectorDst->qword, byteSizeDst, &pBitVectorSrc->qword, byteSizeSrc); + return NV_OK; +} + +/** + * @brief Returns the bit index of the first set flag in pBitVector. + * + * @note in the absence of set flags in pBitVector, the index of the first + * invalid flag is returned. + */ +NvU32 +bitVectorCountTrailingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU16 idx; + NvU32 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (0x0 != (qword[idx] & mask)) + { + return ((idx * (sizeof(NvU64) * 8)) + + portUtilCountTrailingZeros64(qword[idx] & mask)); + } + } + + return bitVectorLast; +} + +/** + * @brief Returns the bit index of the last set flag in pBitVector. + * + * @note in the absence of set flags in pBitVector, the index of the first + * invalid flag is returned. + */ +NvU32 +bitVectorCountLeadingZeros_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU16 idx; + NvU16 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU16 qwordUnused = 63 - qwordOffset; + NvU64 mask; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + qword = (const NvU64 *)&pBitVector->qword; + for (idx = (arraySize - 1); idx != ((NvU16)-1); idx--) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + + if (0x0 != qword[idx]) + { + // + // We're counting from the MSB, and we have to subtract the unused + // portion of the bitvector from the output + // + return (((arraySize - idx - 1) * (sizeof(NvU64) * 8)) + + portUtilCountLeadingZeros64(qword[idx] & mask)) - + qwordUnused; + } + } + + return bitVectorLast; +} + +/** + * @brief Returns the number of set bits in the bitvector. + */ +NvU32 +bitVectorCountSetBits_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast +) +{ + const NvU64 *qword; + NvU16 idx; + NvU16 arraySize = NV_BITVECTOR_ARRAY_SIZE(bitVectorLast); + NvU16 qwordOffset = NV_BITVECTOR_OFFSET(bitVectorLast - 1); + NvU64 mask; + NvU32 count; + + NV_ASSERT_OR_RETURN(NULL != pBitVector, 0); + + count = 0; + qword = (const NvU64 *)&pBitVector->qword; + for (idx = 0; idx < arraySize; idx++) + { + mask = (idx < arraySize - 1) ? NV_U64_MAX : + (NV_U64_MAX >> (63 - qwordOffset)); + count += nvPopCount64(qword[idx] & mask); + } + + return count; +} + +/** + * @brief Exports the bitVector data to an NvU64 raw bitmask array. + */ +NV_STATUS +bitVectorToRaw_IMPL +( + const NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + void *pRawMask, + NvU32 rawMaskSize +) +{ + const NvU32 byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pRawMask, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rawMaskSize >= byteSize, NV_ERR_BUFFER_TOO_SMALL); + + portMemCopy(pRawMask, byteSize, &pBitVector->qword, byteSize); + return NV_OK; +} + +/** + * @brief Imports the bitVector data from an Nvu64 raw bitmask array. + */ +NV_STATUS +bitVectorFromRaw_IMPL +( + NV_BITVECTOR *pBitVector, + NvU16 bitVectorLast, + const void *pRawMask, + NvU32 rawMaskSize +) +{ + const NvU32 byteSize = NV_BITVECTOR_BYTE_SIZE(bitVectorLast); + + NV_ASSERT_OR_RETURN(NULL != pBitVector, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(NULL != pRawMask, NV_ERR_INVALID_ARGUMENT); + NV_ASSERT_OR_RETURN(rawMaskSize >= byteSize, NV_ERR_BUFFER_TOO_SMALL); + + portMemCopy(&pBitVector->qword, byteSize, pRawMask, byteSize); + return NV_OK; +} + diff --git a/src/nvidia/src/libraries/nvoc/src/runtime.c b/src/nvidia/src/libraries/nvoc/src/runtime.c new file mode 100644 index 000000000..29d517a00 --- /dev/null +++ b/src/nvidia/src/libraries/nvoc/src/runtime.c @@ -0,0 +1,297 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvport/nvport.h" + +#include "nvtypes.h" + +#include "nvoc/rtti.h" +#include "nvoc/runtime.h" + +#include "nvoc/object.h" + +# include "utils/nvassert.h" + + +static NV_FORCEINLINE Dynamic *__nvoc_fullyDerive_IMPL(Dynamic *pDynamic) +{ + return (Dynamic*)((NvU8*)pDynamic - pDynamic->__nvoc_rtti->offset); +} + +Dynamic *fullyDeriveWrapper(Dynamic *pDynamic) +{ + return __nvoc_fullyDerive_IMPL(pDynamic); +} + +const struct NVOC_RTTI_PROVIDER __nvoc_rtti_provider = { 0 }; + +NVOC_CLASS_ID __nvoc_objGetClassId(Dynamic *pObj) +{ + Dynamic *pDerivedObj = __nvoc_fullyDerive(pObj); + return pDerivedObj->__nvoc_rtti->pClassDef->classInfo.classId; +} + +const NVOC_CLASS_INFO *__nvoc_objGetClassInfo(Dynamic *pObj) +{ + Dynamic *pDerivedObj = __nvoc_fullyDerive(pObj); + return &pDerivedObj->__nvoc_rtti->pClassDef->classInfo; +} + +Dynamic *objFindAncestor_IMPL(Dynamic *pDynamic, NVOC_CLASS_ID classId) +{ + Object *pObj = dynamicCast(pDynamic, Object); + NV_ASSERT(pObj != NULL); + + while ((pObj = pObj->pParent) != NULL) + { + if (objDynamicCastById(pObj, classId) != NULL) return __nvoc_fullyDerive(pObj); + } + + NV_ASSERT(0); + return NULL; +} + +void objAddChild_IMPL(Object *pObj, Object *pChild) +{ + NV_ASSERT(pChild->pParent == NULL); + pChild->pParent = pObj; + pChild->childTree.pSibling = pObj->childTree.pChild; + pObj->childTree.pChild = pChild; +} + +void objRemoveChild_IMPL(Object *pObj, Object *pChild) +{ + Object **ppChild; + + NV_ASSERT(pObj == pChild->pParent); + pChild->pParent = NULL; + ppChild = &pObj->childTree.pChild; + while (*ppChild != NULL) + { + if (*ppChild == pChild) + { + *ppChild = pChild->childTree.pSibling; + return; + } + + ppChild = &(*ppChild)->childTree.pSibling; + } +} + +Object *objGetChild_IMPL(Object *pObj) +{ + NV_ASSERT(pObj != NULL); + return pObj->childTree.pChild; +} + +Object *objGetSibling_IMPL(Object *pObj) +{ + NV_ASSERT(pObj != NULL); + return pObj->childTree.pSibling; +} + +Object *objGetDirectParent_IMPL(Object *pObj) +{ + NV_ASSERT(pObj != NULL); + return pObj->pParent; +} + +//! Internal backing method for objDelete. +void __nvoc_objDelete(Dynamic *pDynamic) +{ + Dynamic *pDerivedObj; + Object *pObj, *pChild; + + if (pDynamic == NULL) + { + return; + } + + pDynamic->__nvoc_rtti->dtor(pDynamic); + + pObj = dynamicCast(pDynamic, Object); + if (pObj->pParent != NULL) + { + objRemoveChild(pObj->pParent, pObj); + } + + if ((pChild = objGetChild(pObj)) != NULL) + { +#if NV_PRINTF_STRINGS_ALLOWED + portDbgPrintf("NVOC: %s: Child class %s not freed from parent class %s.", + __FUNCTION__, + objGetClassInfo(pChild)->name, + objGetClassInfo(pObj)->name); +#endif + PORT_BREAKPOINT_CHECKED(); + } + + pDerivedObj = __nvoc_fullyDerive(pDynamic); + portMemFree(pDerivedObj); +} + +//! Internal method to fill out an object's RTTI pointers from a class definition. +void __nvoc_initRtti(Dynamic *pNewObject, const struct NVOC_CLASS_DEF *pClassDef) +{ + NvU32 relativeIdx; + for (relativeIdx = 0; relativeIdx < pClassDef->pCastInfo->numRelatives; relativeIdx++) + { + const struct NVOC_RTTI *pRelative = pClassDef->pCastInfo->relatives[relativeIdx]; + const struct NVOC_RTTI **ppRelativeRtti = &((Dynamic*)((NvU8*)pNewObject + pRelative->offset))->__nvoc_rtti; + *ppRelativeRtti = pRelative; + } +} + +//! Internal backing method for objCreateDynamic. +NV_STATUS __nvoc_objCreateDynamic( + Dynamic **ppNewObject, + Dynamic *pParent, + const NVOC_CLASS_INFO *pClassInfo, + NvU32 createFlags, + ...) +{ + NV_STATUS status; + va_list args; + + const struct NVOC_CLASS_DEF *pClassDef = + (const struct NVOC_CLASS_DEF*)pClassInfo; + + va_start(args, createFlags); + status = pClassDef->objCreatefn(ppNewObject, pParent, createFlags, args); + va_end(args); + + return status; +} + +Dynamic *objDynamicCastById_IMPL(Dynamic *pFromObj, NVOC_CLASS_ID classId) +{ + NvU32 i, numBases; + Dynamic *pDerivedObj; + + const struct NVOC_RTTI *const *bases; + const struct NVOC_RTTI *pFromRtti; + const struct NVOC_RTTI *pDerivedRtti; + + if (pFromObj == NULL) + { + return NULL; + } + + pFromRtti = pFromObj->__nvoc_rtti; + + // fastpath, we're dynamic casting to what we already have + if (classId == pFromRtti->pClassDef->classInfo.classId) + { + return pFromObj; + } + + pDerivedObj = __nvoc_fullyDerive(pFromObj); + pDerivedRtti = pDerivedObj->__nvoc_rtti; + + // fastpath, we're dynamic casting to the fully derived class + if (classId == pDerivedRtti->pClassDef->classInfo.classId) + { + return pDerivedObj; + } + + // slowpath, search all the possibilities for a match + numBases = pDerivedRtti->pClassDef->pCastInfo->numRelatives; + bases = pDerivedRtti->pClassDef->pCastInfo->relatives; + + for (i = 0; i < numBases; i++) + { + if (classId == bases[i]->pClassDef->classInfo.classId) + { + return (Dynamic*)((NvU8*)pDerivedObj + bases[i]->offset); + } + } + + return NULL; +} + +//! Internal backing method for dynamicCast. +Dynamic *__nvoc_dynamicCast(Dynamic *pFromObj, const NVOC_CLASS_INFO *pClassInfo) +{ + return objDynamicCastById(pFromObj, pClassInfo->classId); +} + +/*! + * @brief Internal dummy destructor for non-fully-derived pointers. + * + * Resolves pDynamic to its most derived pointer and then calls the real + * destructor on the fully-derived object. + */ +void __nvoc_destructFromBase(Dynamic *pDynamic) +{ + Dynamic *pDerivedObj = __nvoc_fullyDerive(pDynamic); + pDerivedObj->__nvoc_rtti->dtor(pDerivedObj); +} + + +const struct NVOC_EXPORTED_METHOD_DEF *objGetExportedMethodDef_IMPL(Dynamic *pObj, NvU32 methodId) +{ + const struct NVOC_CASTINFO *const pCastInfo = pObj->__nvoc_rtti->pClassDef->pCastInfo; + const NvU32 numRelatives = pCastInfo->numRelatives; + const struct NVOC_RTTI *const *relatives = pCastInfo->relatives; + NvU32 i; + + + for (i = 0; i < numRelatives; i++) + { + const struct NVOC_RTTI *relative; + const struct NVOC_EXPORT_INFO* exportData; + NvU32 exportLength; + const struct NVOC_EXPORTED_METHOD_DEF *exportArray; + + relative = relatives[i]; + + exportData = relative->pClassDef->pExportInfo; + exportLength = exportData->numEntries; + exportArray = exportData->pExportEntries; + + if (exportArray != NULL && exportLength > 0) + { + // The export array is sorted by methodId, so we can binary search it + NvU32 low = 0; + NvU32 high = exportLength; + while (1) + { + NvU32 mid = (low + high) / 2; + + if (exportArray[mid].methodId == methodId) + return &exportArray[mid]; + + if (high == mid || low == mid) + break; + + if (exportArray[mid].methodId > methodId) + high = mid; + else + low = mid; + } + } + } + + return NULL; +} + diff --git a/src/nvidia/src/libraries/nvport/core/core.c b/src/nvidia/src/libraries/nvport/core/core.c new file mode 100644 index 000000000..3ecce65b0 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/core/core.c @@ -0,0 +1,94 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "nvport/nvport.h" + +typedef struct _PORT_STATE +{ + NvU32 initCount; +} PORT_STATE; +static PORT_STATE portState; + + +#if PORT_IS_MODULE_SUPPORTED(atomic) +#define PORT_DEC(x) portAtomicDecrementS32((volatile NvS32 *)&x) +#define PORT_INC(x) portAtomicIncrementS32((volatile NvS32 *)&x) +#else +#define PORT_DEC(x) --x +#define PORT_INC(x) ++x +#endif + + +/// @todo Add better way to initialize all modules +NV_STATUS portInitialize() +{ + if (PORT_INC(portState.initCount) == 1) + { +#if PORT_IS_MODULE_SUPPORTED(debug) + portDbgInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(sync) + portSyncInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(memory) + portMemInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(crypto) + portCryptoInitialize(); +#endif +#if PORT_IS_MODULE_SUPPORTED(cpu) + portCpuInitialize(); +#endif + } + return NV_OK; +} + +void portShutdown() +{ + if (PORT_DEC(portState.initCount) == 0) + { +#if PORT_IS_MODULE_SUPPORTED(cpu) + portCpuShutdown(); +#endif +#if PORT_IS_MODULE_SUPPORTED(crypto) + portCryptoShutdown(); +#endif +#if PORT_IS_MODULE_SUPPORTED(memory) +#if (!defined(DEBUG) || defined(NV_MODS)) && !NVCPU_IS_RISCV64 + portMemShutdown(NV_TRUE); +#else + portMemShutdown(NV_FALSE); +#endif +#endif +#if PORT_IS_MODULE_SUPPORTED(sync) + portSyncShutdown(); +#endif +#if PORT_IS_MODULE_SUPPORTED(debug) + portDbgShutdown(); +#endif + } +} + +NvBool portIsInitialized() +{ + return portState.initCount > 0; +} diff --git a/src/nvidia/src/libraries/nvport/cpu/cpu_common.c b/src/nvidia/src/libraries/nvport/cpu/cpu_common.c new file mode 100644 index 000000000..11c4ab9b3 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/cpu/cpu_common.c @@ -0,0 +1,61 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPU module function implementations which are shared across platforms + * + */ + +#include "nvport/nvport.h" +#include "cpu_common.h" + +void +portCpuInitialize(void) +{ + PORT_CPU_SET_IMC_BAR_DESC_INIT_STATE(NV_FALSE); +} + +void +portCpuShutdown(void) +{ + // + // Not returning status to the caller since that seems like a norm in nvort + // for init and shutdown functions + // + if (PORT_CPU_GET_IMC_BAR_DESC_INIT_STATE() == NV_TRUE) + { + // + // If PORT_CPU_GET_IMC_BAR_DESC_INIT_STATE is true then + // portCpuExFreeImcBarDesc will be supported. Adding following check + // to avoid compile time issues + // + #if PORT_IS_FUNC_SUPPORTED(portCpuExFreeImcBarDesc) + if (portCpuExFreeImcBarDesc(PORT_CPU_GET_IMC_BAR_DESC()) != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + } + #endif + } + PORT_CPU_SET_IMC_BAR_DESC_INIT_STATE(NV_FALSE); +} diff --git a/src/nvidia/src/libraries/nvport/cpu/cpu_common.h b/src/nvidia/src/libraries/nvport/cpu/cpu_common.h new file mode 100644 index 000000000..a9c7ee335 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/cpu/cpu_common.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPU module private defines/interfaces + */ + +#ifndef _NVPORT_CPU_COMMON_H_ +#define _NVPORT_CPU_COMMON_H_ + +#include "nvport/nvport.h" + +// +// Structure representing internal state for CPU +// +typedef struct PORT_CPU_STATE +{ + // BAR descriptor for Integrated Memory controller + PORT_CPU_BAR_DESC imcBarDesc; + + // If init for IMC BAR descriptor is done + NvBool bImcBarDescInit; +} PORT_CPU_STATE; + +PORT_CPU_STATE gCpuPortState; + +#define PORT_CPU_GET_IMC_BAR_DESC() (&(gCpuPortState.imcBarDesc)) + +#define PORT_CPU_GET_IMC_BAR_DESC_INIT_STATE() (gCpuPortState.bImcBarDescInit) + +#define PORT_CPU_SET_IMC_BAR_DESC_INIT_STATE(state) (gCpuPortState.bImcBarDescInit = state) +#endif // _NVPORT_CPU_COMMON_H_ +/// @} diff --git a/src/nvidia/src/libraries/nvport/cpu/cpu_x86_amd64.c b/src/nvidia/src/libraries/nvport/cpu/cpu_x86_amd64.c new file mode 100644 index 000000000..0a2e2cdf9 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/cpu/cpu_x86_amd64.c @@ -0,0 +1,574 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPU functions implementations - X86, X86-64. + */ + +#include "cpuopsys.h" + +#if NVCPU_IS_X86 || NVCPU_IS_X86_64 + +#include "nvport/nvport.h" +#include "cpu_x86_amd64.h" + +static NV_INLINE NV_STATUS portCpuCpuId(NvU32* pCpuInfo, NvU32 functionId, + NvU32 subfunctionId) +{ + NV_STATUS status; +#if PORT_IS_FUNC_SUPPORTED(portCpuExCpuId) + status = portCpuExCpuId(pCpuInfo, functionId, subfunctionId); +#else + status = NV_ERR_NOT_SUPPORTED; +#endif + return status; +} + +static NV_INLINE NV_STATUS portCpuCheckStdFunSupport(NvU32 function_id) +{ + NV_STATUS status; + NvU32 cpuInfo[4]; + + portMemSet(cpuInfo, 0, sizeof(cpuInfo)); + status = portCpuCpuId(cpuInfo, PORT_CPU_CPUID_STD_FUN_BASIC, + PORT_CPU_CPUID_STD_SUBFUN_0); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + if (function_id > cpuInfo[0]) + { + PORT_BREAKPOINT_DEBUG(); + return NV_ERR_NOT_SUPPORTED; + } + + return NV_OK; +} + +static NV_INLINE NV_STATUS portCpuIntelGetVersion(PORT_CPU_INTEL *pCpuVersion) +{ + NV_STATUS status; + NvU32 cpuInfo[4]; + NvU32 model; + NvU32 family; + + if (pCpuVersion == NULL) + { + PORT_BREAKPOINT_DEBUG(); + return NV_ERR_INVALID_ARGUMENT; + } + + status = portCpuCheckStdFunSupport(PORT_CPU_CPUID_STD_FUN_VERSION); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + portMemSet(cpuInfo, 0U, sizeof(cpuInfo)); + status = portCpuCpuId(cpuInfo, PORT_CPU_CPUID_STD_FUN_VERSION, + PORT_CPU_CPUID_STD_SUBFUN_0); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + pCpuVersion->steppingId = PORT_CPU_CPUID_GET_STEPPING_ID(cpuInfo[0]); + family = PORT_CPU_CPUID_GET_FAMILY(cpuInfo[0]); + model = PORT_CPU_CPUID_GET_MODEL(cpuInfo[0]); + if ((family == 0x0FU) || (family == 0x06U)) + { + model |= PORT_CPU_CPUID_GET_EXT_MODEL(cpuInfo[0]) + << PORT_CPU_CPUID_MODEL_SHIFT; + } + + if (family == 0x0FU) + { + family += PORT_CPU_CPUID_GET_EXT_FAMILY(cpuInfo[0]); + } + + pCpuVersion->family = family; + switch (pCpuVersion->family) + { + case PORT_CPU_INTEL_FAMILY_6: + pCpuVersion->model.family6 = model; + break; + default: + // Unknown famliy, ignore model. + break; + } + + pCpuVersion->brandIndex = PORT_CPU_CPUID_GET_BRAND_INDEX(cpuInfo[1]); + pCpuVersion->localApicId = PORT_CPU_CPUID_GET_LOCAL_APIC_ID(cpuInfo[1]); + pCpuVersion->processorType = PORT_CPU_CPUID_GET_PROCESSOR_TYPE(cpuInfo[1]); + + return NV_OK; +} + +static NV_INLINE NV_STATUS portCpuGetIntelFeatures(PORT_CPU_INTEL_X86_FEATURES + *pCpuFeatures) +{ + NV_STATUS status; + NvU32 cpuInfo[4]; + + if (pCpuFeatures == NULL) + { + PORT_BREAKPOINT_DEBUG(); + return NV_ERR_INVALID_ARGUMENT; + } + + status = portCpuCheckStdFunSupport(PORT_CPU_CPUID_STD_FUN_VERSION); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + portMemSet(cpuInfo, 0U, sizeof(cpuInfo)); + status = portCpuCpuId(cpuInfo, PORT_CPU_CPUID_STD_FUN_VERSION, + PORT_CPU_CPUID_STD_SUBFUN_0); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + pCpuFeatures->SSE3 = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_SSE3_BIT); + pCpuFeatures->PCLMULQDQ = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_PCLMULQDQ_BIT); + pCpuFeatures->DTES64 = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_DTES64_BIT); + pCpuFeatures->MONITOR = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_MONITOR_BIT); + pCpuFeatures->DSCPL = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_DSCPL_BIT); + pCpuFeatures->VMX = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_VMX_BIT); + pCpuFeatures->SMX = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_SMX_BIT); + pCpuFeatures->EIST = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_EIST_BIT); + pCpuFeatures->TM2 = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_TM2_BIT); + pCpuFeatures->SSSE3 = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_SSSE3_BIT); + pCpuFeatures->CNXTID = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_CNXTID_BIT); + pCpuFeatures->SDBG = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_SDBG_BIT); + pCpuFeatures->FMA = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_FMA_BIT); + pCpuFeatures->CMPXCHG16B = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_CMPXCHG16B_BIT); + pCpuFeatures->xTPRUpdateControl = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_XTPRUPDATECONTROL_BIT); + pCpuFeatures->PDCM = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_PDCM_BIT); + pCpuFeatures->PCID = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_PCID_BIT); + pCpuFeatures->DCA = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_DCA_BIT); + pCpuFeatures->SSE41 = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_SSE41_BIT); + pCpuFeatures->SSE42 = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_SSE42_BIT); + pCpuFeatures->x2APIC = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_x2APIC_BIT); + pCpuFeatures->MOVBE = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_MOVBE_BIT); + pCpuFeatures->POPCNT = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_POPCNT_BIT); + pCpuFeatures->TSCDeadline = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_TSCDEADLINE_BIT); + pCpuFeatures->AES = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_AES_BIT); + pCpuFeatures->XSAVE = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_XSAVE_BIT); + pCpuFeatures->OSXSAVE = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_OSXSAVE_BIT); + pCpuFeatures->AVX = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_AVX_BIT); + pCpuFeatures->F16C = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_F16C_BIT); + pCpuFeatures->RDEND = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_FEATURE_RDEND_BIT); + + + pCpuFeatures->FPU = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_FPU_BIT); + pCpuFeatures->VME = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_VME_BIT); + pCpuFeatures->DE = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_DE_BIT); + pCpuFeatures->PSE = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_PSE_BIT); + pCpuFeatures->TSC = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_TSC_BIT); + pCpuFeatures->MSR = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_MSR_BIT); + pCpuFeatures->PAE = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_PAE_BIT); + pCpuFeatures->MCE = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_MCE_BIT); + pCpuFeatures->CX8 = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_CX8_BIT); + pCpuFeatures->APIC = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_APIC_BIT); + pCpuFeatures->SEP = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_SEP_BIT); + pCpuFeatures->MTRR = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_MTRR_BIT); + pCpuFeatures->PGE = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_PGE_BIT); + pCpuFeatures->MCA = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_MCA_BIT); + pCpuFeatures->CMOV = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_CMOV_BIT); + pCpuFeatures->PAT = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_PAT_BIT); + pCpuFeatures->PSE36 = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_PSE36_BIT); + pCpuFeatures->PSN = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_PSN_BIT); + pCpuFeatures->CLFSH = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_CLFSH_BIT); + pCpuFeatures->DEBUGS = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_DS_BIT); + pCpuFeatures->ACPI = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_ACPI_BIT); + pCpuFeatures->MMX = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_MMX_BIT); + pCpuFeatures->FXSR = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_FXSR_BIT); + pCpuFeatures->SSE = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_SSE_BIT); + pCpuFeatures->SSE2 = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_SSE2_BIT); + pCpuFeatures->SELFS = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_SS_BIT); + pCpuFeatures->HTT = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_HTT_BIT); + pCpuFeatures->TM = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_TM_BIT); + pCpuFeatures->PBE = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[3], + PORT_CPU_CPUID_FEATURE_PBE_BIT); + return NV_OK; +} + +static NV_INLINE NV_STATUS portCpuGetIntelArchPerfMonitor( + PORT_CPU_INTEL_ARCH_PERF_MONITOR *pCpuArchPerfMonitor) +{ + NV_STATUS status; + NvU32 cpuInfo[4]; + + if (pCpuArchPerfMonitor == NULL) + { + PORT_BREAKPOINT_DEBUG(); + return NV_ERR_INVALID_ARGUMENT; + } + + status = portCpuCheckStdFunSupport(PORT_CPU_CPUID_STD_FUN_TPM); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + portMemSet(cpuInfo, 0U, sizeof(cpuInfo)); + status = portCpuCpuId(cpuInfo, PORT_CPU_CPUID_STD_FUN_ARCH_PERF_MONITOR, + PORT_CPU_CPUID_STD_SUBFUN_0); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + pCpuArchPerfMonitor->versionId = + PORT_CPU_ARCH_PERF_MONITOR_VERSION_ID(cpuInfo[0]); + pCpuArchPerfMonitor->noOfGPPerfMonitoringCounters = + PORT_CPU_ARCH_PERF_MONITOR_COUNTERS(cpuInfo[0]); + pCpuArchPerfMonitor->bitWidthOfGPCounters = + PORT_CPU_ARCH_PERF_MONITOR_COUNTERS_BITWIDTH(cpuInfo[0]); + pCpuArchPerfMonitor->coreCycleEvent = + !PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[1], + PORT_CPU_ARCH_PERF_MONITOR_CORE_CYCLE_EVENT_BIT); + pCpuArchPerfMonitor->instructionRetiredEvent = + !PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[1], + PORT_CPU_ARCH_PERF_MONITOR_INSTR_RET_EVENT_BIT); + pCpuArchPerfMonitor->referenceCycelEvent = + !PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[1], + PORT_CPU_ARCH_PERF_MONITOR_REF_CYCLE_EVENT_BIT); + pCpuArchPerfMonitor->lastLevelCacheRefEvent = + !PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[1], + PORT_CPU_ARCH_PERF_MONITOR_LL_CACHE_REF_EVENT_BIT); + pCpuArchPerfMonitor->lastLevelCacheMissEvent = + !PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[1], + PORT_CPU_ARCH_PERF_MONITOR_LL_CACHE_MIS_EVENT_BIT); + pCpuArchPerfMonitor->branchInstructionRetiredEvent = + !PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[1], + PORT_CPU_ARCH_PERF_MONITOR_BRANCH_INSTR_RET_EVENT_BIT); + pCpuArchPerfMonitor->branchMispredictRetiredEvent = + !PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[1], + PORT_CPU_ARCH_PERF_MONITOR_BRANCH_MISPRE_RET_EVENT_BIT); + pCpuArchPerfMonitor->noOfFixedFuncPerfCounters = + PORT_CPU_ARCH_PERF_MONITOR_FIX_FUN_COUNTERS(cpuInfo[3]); + pCpuArchPerfMonitor->bitWidthOfFixedFuncPerfCounters = + PORT_CPU_ARCH_PERF_MONITOR_FIX_FUN_COUNTERS_BITWIDTH(cpuInfo[3]); + + return NV_OK; +} + +static NV_INLINE NV_STATUS portCpuGetIntelTPMFeatures( + PORT_CPU_INTEL_TPM_FEATURES *pCpuTPMFeatures) +{ + NV_STATUS status; + NvU32 cpuInfo[4]; + + if (pCpuTPMFeatures == NULL) + { + PORT_BREAKPOINT_DEBUG(); + return NV_ERR_INVALID_ARGUMENT; + } + + status = portCpuCheckStdFunSupport(PORT_CPU_CPUID_STD_FUN_TPM); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + portMemSet(cpuInfo, 0U, sizeof(cpuInfo)); + status = portCpuCpuId(cpuInfo, PORT_CPU_CPUID_STD_FUN_TPM, + PORT_CPU_CPUID_STD_SUBFUN_0); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + pCpuTPMFeatures->DTS = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_DTS_BIT); + pCpuTPMFeatures->IntelTurboBoost = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_ITB_BIT); + pCpuTPMFeatures->ARAT = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_ARAT_BIT); + pCpuTPMFeatures->PLN = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_PLN_BIT); + pCpuTPMFeatures->ECMD = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_ECMD_BIT); + pCpuTPMFeatures->PTM = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_PTM_BIT); + pCpuTPMFeatures->HWP = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_HWP_BIT); + pCpuTPMFeatures->HWPNotification = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_HWP_NOTIFICATION_BIT); + pCpuTPMFeatures->HWPActivityWindow = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_HWP_ACTIVITY_WINDOW_BIT); + pCpuTPMFeatures->HWPEPP = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_HWP_ACTIVITY_WINDOW_BIT); + pCpuTPMFeatures->HWPPLR = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_HWPEPP_BIT); + pCpuTPMFeatures->HDC = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_HDC_BIT); + pCpuTPMFeatures->IntelTurboBoostMaxTech30 = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_ITBMT30_BIT); + pCpuTPMFeatures->HWPCapabilities = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_HWP_CAPABILITIES_BIT); + pCpuTPMFeatures->HWPPECI = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_HWPPECI_BIT); + pCpuTPMFeatures->FLEXHWP = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_FLEXHWP_BIT); + pCpuTPMFeatures->FAM = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_FAM_BIT); + pCpuTPMFeatures->ILPHWPRequest = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[0], + PORT_CPU_CPUID_TPM_FEATURE_ILPHWP_REQUEST_BIT); + pCpuTPMFeatures->NoOfInterruptThresholdsInDTS = + PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[1], + PORT_CPU_CPUID_TPM_FEATURE_NFITI_DTS_BIT); + + pCpuTPMFeatures->HCFC = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_TPM_FEATURE_HCFC_BIT); + pCpuTPMFeatures->PEBP = PORT_CPU_CPUID_FEATURE_GET_BIT(cpuInfo[2], + PORT_CPU_CPUID_TPM_FEATURE_PEBP_BIT); + + return NV_OK; +} + +static NV_INLINE NV_STATUS portCpuGetIntelTopology(PORT_CPU_INTEL* pCpuInfo) +{ + NV_STATUS status; + NvU32 cpuInfo[4]; + NvU32 sub = 0; + NvBool cond = NV_TRUE; + + status = portCpuCheckStdFunSupport( + PORT_CPU_CPUID_STD_FUN_EXT_TOPOLOGY_ENUM); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + do + { + portMemSet(cpuInfo, 0U, sizeof(cpuInfo)); + status = portCpuCpuId(cpuInfo, PORT_CPU_CPUID_STD_FUN_EXT_TOPOLOGY_ENUM, + sub); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + switch (PORT_CPU_EXT_TOPOLOGY_ENUM_GET_LEVEL_TYPE(cpuInfo[2])) + { + case PORT_CPU_EXT_TOPOLOGY_ENUM_LEVEL_TYPE_SMT: + pCpuInfo->threadCountPerCore = + PORT_CPU_EXT_TOPOLOGY_ENUM_GET_LOGICAL_PROC_COUNT(cpuInfo[1]); + break; + case PORT_CPU_EXT_TOPOLOGY_ENUM_LEVEL_TYPE_CORE: + pCpuInfo->logicalCoreCount = + PORT_CPU_EXT_TOPOLOGY_ENUM_GET_LOGICAL_PROC_COUNT(cpuInfo[1]); + break; + default: + cond = NV_FALSE; + break; + } + + pCpuInfo->physicalCoreCount = pCpuInfo->logicalCoreCount / pCpuInfo->threadCountPerCore; + sub++; + } while (cond == NV_TRUE); + + return NV_OK; +} + +static NV_INLINE NV_STATUS portCpuGetVendorId(char* vendorId, NvLength length, + NvLength* pVendorIdLength) +{ + NV_STATUS status; + NvU32 cpuInfo[4]; + + if (length < PORT_CPU_CPUID_VENDOR_ID_LENGTH) + { + PORT_BREAKPOINT_DEBUG(); + return NV_ERR_BUFFER_TOO_SMALL; + } + + if (pVendorIdLength == NULL || vendorId == NULL) + { + PORT_BREAKPOINT_DEBUG(); + return NV_ERR_INVALID_ARGUMENT; + } + + portMemSet(cpuInfo, 0U, sizeof(cpuInfo)); + status = portCpuCpuId(cpuInfo, PORT_CPU_CPUID_STD_FUN_BASIC, + PORT_CPU_CPUID_STD_SUBFUN_0); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + portMemSet(vendorId, 0U, length); + NV_SWAP_U32(cpuInfo[2], cpuInfo[3]); + portMemCopy(vendorId, length, &cpuInfo[1], + PORT_CPU_CPUID_VENDOR_ID_LENGTH - 1); + *pVendorIdLength = PORT_CPU_CPUID_VENDOR_ID_LENGTH; + + return NV_OK; +} + +NV_STATUS portCpuGetInfo(PORT_CPU_INFORMATION *pCpuInfo) +{ + NV_STATUS status; + + if (pCpuInfo == NULL) + { + PORT_BREAKPOINT_DEBUG(); + return NV_ERR_INVALID_ARGUMENT; + } + + portMemSet(pCpuInfo, 0U, sizeof(PORT_CPU_INFORMATION)); + status = portCpuGetVendorId(pCpuInfo->vendorId, PORT_CPU_VENDOR_ID_LENGTH, + &pCpuInfo->vendorIdLength); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + if (portStringCompare(pCpuInfo->vendorId, + PORT_CPU_CPUID_VENDOR_ID_INTEL, PORT_CPU_CPUID_VENDOR_ID_LENGTH) == 0) + { + pCpuInfo->type = PORT_CPU_TYPE_INTEL_X86; + status = portCpuIntelGetVersion(&pCpuInfo->cpu.intel); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + status = portCpuGetIntelTopology(&pCpuInfo->cpu.intel); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + status = portCpuGetIntelTPMFeatures( + &pCpuInfo->cpu.intel.tpmFeatures); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + status = portCpuGetIntelArchPerfMonitor( + &pCpuInfo->cpu.intel.archPerfMonitor); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + + status = portCpuGetIntelFeatures(&pCpuInfo->cpu.intel.features); + if (status != NV_OK) + { + PORT_BREAKPOINT_DEBUG(); + return status; + } + } + else if (portStringCompare(pCpuInfo->vendorId, + PORT_CPU_CPUID_VENDOR_ID_AMD, PORT_CPU_CPUID_VENDOR_ID_LENGTH) == 0) + { + pCpuInfo->type = PORT_CPU_TYPE_AMD_X86; + } + + return NV_OK; +} +#endif diff --git a/src/nvidia/src/libraries/nvport/cpu/cpu_x86_amd64.h b/src/nvidia/src/libraries/nvport/cpu/cpu_x86_amd64.h new file mode 100644 index 000000000..ec99a53f3 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/cpu/cpu_x86_amd64.h @@ -0,0 +1,266 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CPUID macros - X86, X86-64. + */ + +#define PORT_CPU_CPUID_STD_FUN_BASIC 0x00U +#define PORT_CPU_CPUID_STD_FUN_VERSION 0x01U +#define PORT_CPU_CPUID_STD_FUN_CACHE_TLB 0x02U +#define PORT_CPU_CPUID_STD_FUN_SERIAL_NO 0x03U +#define PORT_CPU_CPUID_STD_FUN_DETER_CACHE_PARAMS 0x04U +#define PORT_CPU_CPUID_STD_FUN_MONITOR_MWAIT 0x05U +#define PORT_CPU_CPUID_STD_FUN_TPM 0x06U +#define PORT_CPU_CPUID_STD_FUN_EXT_FEATURE_FLAGS 0x07U +#define PORT_CPU_CPUID_STD_FUN_DIRECT_CACHE_ACCESS 0x09U +#define PORT_CPU_CPUID_STD_FUN_ARCH_PERF_MONITOR 0x0AU +#define PORT_CPU_CPUID_STD_FUN_EXT_TOPOLOGY_ENUM 0x0BU +#define PORT_CPU_CPUID_STD_FUN_EXT_STATE_ENUM 0x0DU + +#define PORT_CPU_CPUID_STD_FUN_IRDT_ENUM 0x0FU +#define PORT_CPU_CPUID_STD_SUBFUN_IRDT_MONITOR_ENUM 0x00U +#define PORT_CPU_CPUID_STD_SUBFUN_IRDT_CAPABILITY_ENUM 0x01U + +#define PORT_CPU_CPUID_STD_FUN_IRDT_ALLOCTION_ENUM 0x10U +#define PORT_CPU_CPUID_STD_FUN_IRDT_L3_CACHE_ALLOC_ENUM 0x10U +#define PORT_CPU_CPUID_STD_SUBFUN_L3_CACHE_ALLOC_ENUM 0x00U +#define PORT_CPU_CPUID_STD_SUBFUN_L3_CACHE_ALLOC_TECH_1_ENUM 0x01U +#define PORT_CPU_CPUID_STD_SUBFUN_L3_CACHE_ALLOC_TECH_2_ENUM 0x02U +#define PORT_CPU_CPUID_STD_SUBFUN_L3_CACHE_ALLOC_TECH_3_ENUM 0x03U + +#define PORT_CPU_CPUID_STD_FUN_ISGX_CAPABILITY_ENUM 0x12U +#define PORT_CPU_CPUID_STD_SUBFUN_ISGX_CAPABILITY_ENUM 0x00U + +#define PORT_CPU_CPUID_STD_FUN_ISGX_ATTRIBUTES_ENUM 0x12U +#define PORT_CPU_CPUID_STD_SUBFUN_ISGX_ATTRIBUTES_ENUM 0x01U + +#define PORT_CPU_CPUID_STD_FUN_ISGX_EPC_ENUM 0x12U +#define PORT_CPU_CPUID_STD_SUBFUN_ISGX_EPC_ENUM 0x02U + +#define PORT_CPU_CPUID_STD_FUN_TRACE_ENUM 0x14U +#define PORT_CPU_CPUID_STD_SUBFUN_TRACE_0_ENUM 0x00U +#define PORT_CPU_CPUID_STD_SUBFUN_TRACE_1_ENUM 0x01U + +#define PORT_CPU_CPUID_STD_FUN_TSC_NCCCI 0x15U +#define PORT_CPU_CPUID_STD_FUN_FREQ_INFO 0x16U + +#define PORT_CPU_CPUID_STD_FUN_SOC_VENDOR_ATTR_ENUM 0x17U +#define PORT_CPU_CPUID_STD_SUBFUN_SOC_VENDOR_ATTR_0_ENUM 0x00U +#define PORT_CPU_CPUID_STD_SUBFUN_SOC_VENDOR_ATTR_1_ENUM 0x01U +#define PORT_CPU_CPUID_STD_SUBFUN_SOC_VENDOR_ATTR_2_ENUM 0x02U +#define PORT_CPU_CPUID_STD_SUBFUN_SOC_VENDOR_ATTR_3_ENUM 0x03U + +#define PORT_CPU_CPUID_STD_FUN_ADDRESS_TRANSLATION_PARAMS 0x18U +#define PORT_CPU_CPUID_STD_FUN_EXTENDED_TOPOLOGY_ENUM 0x1FU + +#define PORT_CPU_CPUID_EXT_FUN_BASIC 0x80000000U +#define PORT_CPU_CPUID_EXT_SUBFUN_0 0x0U + +#define PORT_CPU_CPUID_EXT_FUN_FEATURES 0x80000001U + +#define PORT_CPU_CPUID_STD_SUBFUN_0 0x00U + +#define PORT_CPU_CPUID_STEPPING_ID_MASK 0x0000000FU +#define PORT_CPU_CPUID_STEPPING_ID_SHIFT 0U +#define PORT_CPU_CPUID_GET_STEPPING_ID(a) ((a) \ + & 0x0000000FU) + +#define PORT_CPU_CPUID_MODEL_MASK 0x000000F0U +#define PORT_CPU_CPUID_MODEL_SHIFT 4U +#define PORT_CPU_CPUID_GET_MODEL(a) (((a) \ + & 0x000000F0U)\ + >> 4U) + +#define PORT_CPU_CPUID_FAMILY_ID_MASK 0x00000F00U +#define PORT_CPU_CPUID_FAMILY_ID_SHIFT 8U +#define PORT_CPU_CPUID_GET_FAMILY(a) (((a) \ + & 0x00000F00U)\ + >> 8U) + +#define PORT_CPU_CPUID_EXT_MODEL_MASK 0x000F0000U +#define PORT_CPU_CPUID_EXT_MODEL_SHIFT 16U +#define PORT_CPU_CPUID_GET_EXT_MODEL(a) (((a) \ + & 0x000F0000U)\ + >> 16U) + +#define PORT_CPU_CPUID_EXT_FAMILY_ID_MASK 0x0FF00000U +#define PORT_CPU_CPUID_EXT_FAMILY_ID_SHIFT 20U +#define PORT_CPU_CPUID_GET_EXT_FAMILY(a) (((a) \ + & 0x0FF00000U)\ + >> 20U) + +#define PORT_CPU_CPUID_PROCESSOR_TYPE_MASK 0x00003000U +#define PORT_CPU_CPUID_PROCESSOR_TYPE_SHIFT 12U +#define PORT_CPU_CPUID_GET_PROCESSOR_TYPE(a) (((a) \ + & 0x00003000U)\ + >> 12U) + +#define PORT_CPU_CPUID_BRAND_INDEX_MASK 0x000000FFU +#define PORT_CPU_CPUID_BRAND_INDEX_SHIFT 0U +#define PORT_CPU_CPUID_GET_BRAND_INDEX(a) ((a) \ + & 0x000000FFU) + +#define PORT_CPU_CPUID_MAX_LOGICAL_PROC_MASK 0x00FF0000U +#define PORT_CPU_CPUID_MAX_LOGICAL_PROC_SHIFT 16U + +#define PORT_CPU_CPUID_LOCAL_APIC_ID_MASK 0xFF000000U +#define PORT_CPU_CPUID_LOCAL_APIC_ID_SHIFT 24U +#define PORT_CPU_CPUID_GET_LOCAL_APIC_ID(a) (((a) \ + & 0xFF000000U)\ + >> 24U) + +#define PORT_CPU_CPUID_FEATURE_SSE3_BIT (1U << 0U) +#define PORT_CPU_CPUID_FEATURE_PCLMULQDQ_BIT (1U << 1U) +#define PORT_CPU_CPUID_FEATURE_DTES64_BIT (1U << 2U) +#define PORT_CPU_CPUID_FEATURE_MONITOR_BIT (1U << 3U) +#define PORT_CPU_CPUID_FEATURE_DSCPL_BIT (1U << 4U) +#define PORT_CPU_CPUID_FEATURE_VMX_BIT (1U << 5U) +#define PORT_CPU_CPUID_FEATURE_SMX_BIT (1U << 6U) +#define PORT_CPU_CPUID_FEATURE_EIST_BIT (1U << 7U) +#define PORT_CPU_CPUID_FEATURE_TM2_BIT (1U << 8U) +#define PORT_CPU_CPUID_FEATURE_SSSE3_BIT (1U << 9U) +#define PORT_CPU_CPUID_FEATURE_CNXTID_BIT (1U << 10U) +#define PORT_CPU_CPUID_FEATURE_SDBG_BIT (1U << 11U) +#define PORT_CPU_CPUID_FEATURE_FMA_BIT (1U << 12U) +#define PORT_CPU_CPUID_FEATURE_CMPXCHG16B_BIT (1U << 13U) +#define PORT_CPU_CPUID_FEATURE_XTPRUPDATECONTROL_BIT (1U << 14U) +#define PORT_CPU_CPUID_FEATURE_PDCM_BIT (1U << 15U) +#define PORT_CPU_CPUID_FEATURE_PCID_BIT (1U << 17U) +#define PORT_CPU_CPUID_FEATURE_DCA_BIT (1U << 18U) +#define PORT_CPU_CPUID_FEATURE_SSE41_BIT (1U << 19U) +#define PORT_CPU_CPUID_FEATURE_SSE42_BIT (1U << 20U) +#define PORT_CPU_CPUID_FEATURE_x2APIC_BIT (1U << 21U) +#define PORT_CPU_CPUID_FEATURE_MOVBE_BIT (1U << 22U) +#define PORT_CPU_CPUID_FEATURE_POPCNT_BIT (1U << 23U) +#define PORT_CPU_CPUID_FEATURE_TSCDEADLINE_BIT (1U << 24U) +#define PORT_CPU_CPUID_FEATURE_AES_BIT (1U << 25U) +#define PORT_CPU_CPUID_FEATURE_XSAVE_BIT (1U << 26U) +#define PORT_CPU_CPUID_FEATURE_OSXSAVE_BIT (1U << 27U) +#define PORT_CPU_CPUID_FEATURE_AVX_BIT (1U << 28U) +#define PORT_CPU_CPUID_FEATURE_F16C_BIT (1U << 29U) +#define PORT_CPU_CPUID_FEATURE_RDEND_BIT (1U << 30U) + +#define PORT_CPU_CPUID_FEATURE_FPU_BIT (1U << 0U) +#define PORT_CPU_CPUID_FEATURE_VME_BIT (1U << 1U) +#define PORT_CPU_CPUID_FEATURE_DE_BIT (1U << 2U) +#define PORT_CPU_CPUID_FEATURE_PSE_BIT (1U << 3U) +#define PORT_CPU_CPUID_FEATURE_TSC_BIT (1U << 4U) +#define PORT_CPU_CPUID_FEATURE_MSR_BIT (1U << 5U) +#define PORT_CPU_CPUID_FEATURE_PAE_BIT (1U << 6U) +#define PORT_CPU_CPUID_FEATURE_MCE_BIT (1U << 7U) +#define PORT_CPU_CPUID_FEATURE_CX8_BIT (1U << 8U) +#define PORT_CPU_CPUID_FEATURE_APIC_BIT (1U << 9U) +#define PORT_CPU_CPUID_FEATURE_SEP_BIT (1U << 11U) +#define PORT_CPU_CPUID_FEATURE_MTRR_BIT (1U << 12U) +#define PORT_CPU_CPUID_FEATURE_PGE_BIT (1U << 13U) +#define PORT_CPU_CPUID_FEATURE_MCA_BIT (1U << 14U) +#define PORT_CPU_CPUID_FEATURE_CMOV_BIT (1U << 15U) +#define PORT_CPU_CPUID_FEATURE_PAT_BIT (1U << 16U) +#define PORT_CPU_CPUID_FEATURE_PSE36_BIT (1U << 17U) +#define PORT_CPU_CPUID_FEATURE_PSN_BIT (1U << 18U) +#define PORT_CPU_CPUID_FEATURE_CLFSH_BIT (1U << 19U) +#define PORT_CPU_CPUID_FEATURE_DS_BIT (1U << 21U) +#define PORT_CPU_CPUID_FEATURE_ACPI_BIT (1U << 22U) +#define PORT_CPU_CPUID_FEATURE_MMX_BIT (1U << 23U) +#define PORT_CPU_CPUID_FEATURE_FXSR_BIT (1U << 24U) +#define PORT_CPU_CPUID_FEATURE_SSE_BIT (1U << 25U) +#define PORT_CPU_CPUID_FEATURE_SSE2_BIT (1U << 26U) +#define PORT_CPU_CPUID_FEATURE_SS_BIT (1U << 27U) +#define PORT_CPU_CPUID_FEATURE_HTT_BIT (1U << 28U) +#define PORT_CPU_CPUID_FEATURE_TM_BIT (1U << 29U) +#define PORT_CPU_CPUID_FEATURE_PBE_BIT (1U << 31U) + +#define PORT_CPU_CPUID_TPM_FEATURE_DTS_BIT (1U << 0U) +#define PORT_CPU_CPUID_TPM_FEATURE_ITB_BIT (1U << 1U) +#define PORT_CPU_CPUID_TPM_FEATURE_ARAT_BIT (1U << 2U) +#define PORT_CPU_CPUID_TPM_FEATURE_PLN_BIT (1U << 4U) +#define PORT_CPU_CPUID_TPM_FEATURE_ECMD_BIT (1U << 5U) +#define PORT_CPU_CPUID_TPM_FEATURE_PTM_BIT (1U << 6U) +#define PORT_CPU_CPUID_TPM_FEATURE_HWP_BIT (1U << 7U) +#define PORT_CPU_CPUID_TPM_FEATURE_HWP_NOTIFICATION_BIT (1U << 8U) +#define PORT_CPU_CPUID_TPM_FEATURE_HWP_ACTIVITY_WINDOW_BIT (1U << 9U) +#define PORT_CPU_CPUID_TPM_FEATURE_HWPEPP_BIT (1U << 10U) +#define PORT_CPU_CPUID_TPM_FEATURE_HWPPLR_BIT (1U << 11U) +#define PORT_CPU_CPUID_TPM_FEATURE_HDC_BIT (1U << 12U) +#define PORT_CPU_CPUID_TPM_FEATURE_ITBMT30_BIT (1U << 13U) +#define PORT_CPU_CPUID_TPM_FEATURE_HWP_CAPABILITIES_BIT (1U << 14U) +#define PORT_CPU_CPUID_TPM_FEATURE_HWPPECI_BIT (1U << 15U) +#define PORT_CPU_CPUID_TPM_FEATURE_FLEXHWP_BIT (1U << 16U) +#define PORT_CPU_CPUID_TPM_FEATURE_FAM_BIT (1U << 17U) +#define PORT_CPU_CPUID_TPM_FEATURE_ILPHWP_REQUEST_BIT (1U << 18U) + +#define PORT_CPU_CPUID_TPM_FEATURE_NFITI_DTS_BIT (1U << 0U) + +#define PORT_CPU_CPUID_TPM_FEATURE_HCFC_BIT (1U << 0U) +#define PORT_CPU_CPUID_TPM_FEATURE_PEBP_BIT (1U << 3U) + +// +// Refer chapter 18 of 64-ia-32-architectures-software-developer-vol-3b-part-2 +// manual for Perf Monitor defines below +// +// Bits 7:0 of CPUID.0AH.EAX indicate the version of the Performance Monitor +#define PORT_CPU_ARCH_PERF_MONITOR_VERSION_ID(a) ((a) & 0xFFU) +// +// Bits 15:8 of CPUID.0AH.EAX indicate the number of performance +// counters available on the logical processor +// +#define PORT_CPU_ARCH_PERF_MONITOR_COUNTERS(a) (((a) & 0xFF00U)\ + >> 8U) +// Bits 23:16 of CPUID.0AH.EAX indicate the width of counters +#define PORT_CPU_ARCH_PERF_MONITOR_COUNTERS_BITWIDTH(a) (((a) & 0xFF0000U)\ + >> 16U) +// +// Bits 4:0 of CPUID.0AH.EDX indicate the number of fixed-function performance +// counters available per thread +// +#define PORT_CPU_ARCH_PERF_MONITOR_FIX_FUN_COUNTERS(a) ((a) & 0x1FU) +// Bits 12:5 of CPUID.0AH.EDX indicate width of fixed-function counters +#define PORT_CPU_ARCH_PERF_MONITOR_FIX_FUN_COUNTERS_BITWIDTH(a) (((a) & 0x1FE0U) \ + >> 5U) + +#define PORT_CPU_ARCH_PERF_MONITOR_CORE_CYCLE_EVENT_BIT (1U << 0U) +#define PORT_CPU_ARCH_PERF_MONITOR_INSTR_RET_EVENT_BIT (1U << 1U) +#define PORT_CPU_ARCH_PERF_MONITOR_REF_CYCLE_EVENT_BIT (1U << 2U) +#define PORT_CPU_ARCH_PERF_MONITOR_LL_CACHE_REF_EVENT_BIT (1U << 3U) +#define PORT_CPU_ARCH_PERF_MONITOR_LL_CACHE_MIS_EVENT_BIT (1U << 4U) +#define PORT_CPU_ARCH_PERF_MONITOR_BRANCH_INSTR_RET_EVENT_BIT (1U << 5U) +#define PORT_CPU_ARCH_PERF_MONITOR_BRANCH_MISPRE_RET_EVENT_BIT (1U << 6U) + +#define PORT_CPU_EXT_TOPOLOGY_ENUM_GET_LOGICAL_PROC_COUNT(a) ((a) & 0xFFFFU) +#define PORT_CPU_EXT_TOPOLOGY_ENUM_GET_LEVEL(a) ((a) & 0xFFU) +#define PORT_CPU_EXT_TOPOLOGY_ENUM_GET_LEVEL_TYPE(a) (((a) & 0xFF00U) >> 8) + +#define PORT_CPU_EXT_TOPOLOGY_ENUM_LEVEL_TYPE_SMT 1 +#define PORT_CPU_EXT_TOPOLOGY_ENUM_LEVEL_TYPE_CORE 2 +#define PORT_CPU_EXT_TOPOLOGY_ENUM_LEVEL_TYPE_MODULE 3 +#define PORT_CPU_EXT_TOPOLOGY_ENUM_LEVEL_TYPE_TILE 4 +#define PORT_CPU_EXT_TOPOLOGY_ENUM_LEVEL_TYPE_DIE 5 + +#define PORT_CPU_CPUID_FEATURE_GET_BIT(featureBitMap, bit) (((featureBitMap) \ + & (bit)) == (bit)) + +#define PORT_CPU_CPUID_VENDOR_ID_LENGTH 13U +#define PORT_CPU_CPUID_VENDOR_ID_INTEL "GenuineIntel" +#define PORT_CPU_CPUID_VENDOR_ID_AMD "AuthenticAMD" diff --git a/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c b/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c new file mode 100644 index 000000000..d8be920e4 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/crypto/crypto_random_xorshift.c @@ -0,0 +1,190 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief CRYPTO module PRNG implementation using the xorshift algorithm. + * + * For details about the Xorshift algorithms, see: + * https://en.wikipedia.org/wiki/Xorshift + * + * @note Xorshift algorithms take either 128bit or 1024bit seeds. The algorithm + * author suggests seeding a splitmix64.c with a 64bit value, and using its + * output to seed xorshift. + * See http://xorshift.di.unimi.it/ for details. + * + * @warning Xorshift algorithms are NOT CRYPTOGRAPHICALLY SECURE. They generally + * perform really well on various randomness tests, but are not suitable for + * security sensitive operations such as key generation. If you require a CSRNG + * use @ref portCryptoExTrueRandomGetU32 and family. + */ +#include "nvport/nvport.h" + + +/** + * @brief Number of 64bit words used to store the state of the algorithm. + * xorshift128+ uses 2 qwords of state, and xorshift1024* uses 16 qwords + */ +#define XORSHIFT_STATE_QWORDS 2 + +struct PORT_CRYPTO_PRNG +{ + NvU64 state[XORSHIFT_STATE_QWORDS]; +}; +PORT_CRYPTO_PRNG *portCryptoDefaultGenerator; + +void portCryptoInitialize() +{ + NvU64 seed; +#if defined(PORT_CRYPTO_PRNG_SEED) + seed = PORT_CRYPTO_PRNG_SEED; +#elif PORT_IS_FUNC_SUPPORTED(portCryptoExTrueRandomGetU64) + seed = portCryptoExTrueRandomGetU64(); +#elif PORT_IS_MODULE_SUPPORTED(time) + seed = portTimeGetUptimeNanosecondsHighPrecision(); +#elif defined(NVRM) && !defined(NVWATCH) + { + extern NvU64 osGetTimestamp(void); + seed = osGetTimestamp(); + } +#else + seed = (NvUPtr)&portCryptoDefaultGenerator; +#endif + portCryptoPseudoRandomSetSeed(seed); +} + +void portCryptoShutdown() +{ + portCryptoPseudoRandomGeneratorDestroy(portCryptoDefaultGenerator); + portCryptoDefaultGenerator = NULL; +} + + +/** + * @brief Initializes a xorshift state from a 64bit seed. Performed using a + * splitmix64 PRNG. + * + * Adapted from: http://xorshift.di.unimi.it/splitmix64.c + */ +static void _initState(NvU64 seed64, NvU64 state[XORSHIFT_STATE_QWORDS]) +{ + NvU32 i; + for (i = 0; i < XORSHIFT_STATE_QWORDS; i++) + { + NvU64 z = (seed64 += 0x9E3779B97F4A7C15ULL); + z = (z ^ (z >> 30)) * 0xBF58476D1CE4E5B9ULL; + z = (z ^ (z >> 27)) * 0x94D049BB133111EBULL; + state[i] = z ^ (z >> 31); + } +} + +/** + * @brief Get the next 64bit value using the xorshift128+ algorithm + * + * Adapted from: http://xorshift.di.unimi.it/xorshift128plus.c + */ +static NvU64 _xorshift128plus_GetU64(NvU64 state[2]) +{ + NvU64 s1 = state[0]; + const NvU64 s0 = state[1]; + state[0] = s0; + s1 ^= s1 << 23; // a + state[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); // b, c + return state[1] + s0; +} + +PORT_CRYPTO_PRNG *portCryptoPseudoRandomGeneratorCreate(NvU64 seed) +{ + PORT_CRYPTO_PRNG *pPrng = portMemAllocNonPaged(sizeof(*pPrng)); + + if (pPrng != NULL) + { + _initState(seed, pPrng->state); + } + return pPrng; +} + +void portCryptoPseudoRandomGeneratorDestroy(PORT_CRYPTO_PRNG *pPrng) +{ + portMemFree(pPrng); +} + +NvU32 portCryptoPseudoRandomGeneratorGetU32(PORT_CRYPTO_PRNG *pPrng) +{ + + return (NvU32) _xorshift128plus_GetU64(pPrng->state); +} +NvU64 portCryptoPseudoRandomGeneratorGetU64(PORT_CRYPTO_PRNG *pPrng) +{ + return _xorshift128plus_GetU64(pPrng->state); +} + +NV_STATUS portCryptoPseudoRandomGeneratorFillBuffer(PORT_CRYPTO_PRNG *pPrng, NvU8 *pBuffer, NvLength bufSize) +{ + NvLength i; + + PORT_ASSERT_CHECKED(pPrng != NULL); + + /** @note Unlike True Random generators which don't have seeds, here we must + * preserve the complete order of bytes across platforms. That means that + * we cannot fill the misaligned section first, then copy aligned qwords, + * and then fill the remainder - That way we lose some bytes + */ + + // Maybe require 64bit alignment for buffers: + // PORT_ASSERT_CHECKED(portUtilCheckAlignment(pBuffer, sizeof(NvU64))); + + if (pBuffer == NULL) + return NV_ERR_INVALID_POINTER; + + for (i = 0; i < bufSize; i+=8) + { + NvU64 x = _xorshift128plus_GetU64(pPrng->state); + portMemCopy(pBuffer+i, bufSize-i, &x, (bufSize-i < 8) ? bufSize-i : 8); + } + + return NV_OK; +} + + +void portCryptoPseudoRandomSetSeed(NvU64 seed) +{ + if (portCryptoDefaultGenerator) + portCryptoPseudoRandomGeneratorDestroy(portCryptoDefaultGenerator); + portCryptoDefaultGenerator = portCryptoPseudoRandomGeneratorCreate(seed); +} + +NvU32 portCryptoPseudoRandomGetU32() +{ + return portCryptoPseudoRandomGeneratorGetU32(portCryptoDefaultGenerator); +} + +NvU64 portCryptoPseudoRandomGetU64() +{ + return portCryptoPseudoRandomGeneratorGetU64(portCryptoDefaultGenerator); +} + +NV_STATUS portCryptoPseudoRandomFillBuffer(NvU8 *pBuffer, NvLength bufSize) +{ + return portCryptoPseudoRandomGeneratorFillBuffer(portCryptoDefaultGenerator, pBuffer, bufSize); +} diff --git a/src/nvidia/src/libraries/nvport/memory/memory_generic.h b/src/nvidia/src/libraries/nvport/memory/memory_generic.h new file mode 100644 index 000000000..037eee605 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/memory/memory_generic.h @@ -0,0 +1,222 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief MEMORY module tracking functions implementation + * + */ + +#include "nvport/nvport.h" + +// Define accessor macros if not already defined +#ifndef PORT_MEM_RD08 +#define PORT_MEM_RD08(p) (*(p)) +#endif +#ifndef PORT_MEM_RD16 +#define PORT_MEM_RD16(p) (*(p)) +#endif +#ifndef PORT_MEM_RD32 +#define PORT_MEM_RD32(p) (*(p)) +#endif +#ifndef PORT_MEM_RD64 +#define PORT_MEM_RD64(p) (*(p)) +#endif +#ifndef PORT_MEM_WR08 +#define PORT_MEM_WR08(p, v) (*(p) = (v)) +#endif +#ifndef PORT_MEM_WR16 +#define PORT_MEM_WR16(p, v) (*(p) = (v)) +#endif +#ifndef PORT_MEM_WR32 +#define PORT_MEM_WR32(p, v) (*(p) = (v)) +#endif +#ifndef PORT_MEM_WR64 +#define PORT_MEM_WR64(p, v) (*(p) = (v)) +#endif + + +#if defined(PORT_MEM_USE_GENERIC_portMemSetPattern) +void * +portMemSetPattern +( + void *pData, + NvLength lengthBytes, + const NvU8 *pPattern, + NvLength patternBytes +) +{ + PORT_ASSERT_CHECKED(pData != NULL); + PORT_ASSERT_CHECKED(pPattern != NULL); + PORT_ASSERT_CHECKED(patternBytes > 0); + + if (lengthBytes > 0) + { + void *p = pData; + while (lengthBytes > patternBytes) + { + portMemCopy(p, patternBytes, pPattern, patternBytes); + p = (NvU8*)p + patternBytes; + lengthBytes -= patternBytes; + } + portMemCopy(p, lengthBytes, pPattern, lengthBytes); + } + return pData; +} +#endif + +#if defined(PORT_MEM_USE_GENERIC_portMemMove) +void * +portMemMove +( + void *pDestination, + NvLength destSize, + const void *pSource, + NvLength srcSize +) +{ + NvU32 *pDst32; + NvU8 *pDst8; + const NvU32 *pSrc32; + const NvU8 *pSrc8; + NvLength dwords = 0; + NvLength bytes = srcSize; + PORT_ASSERT_CHECKED(pDestination != NULL); + PORT_ASSERT_CHECKED(pSource != NULL); + PORT_ASSERT_CHECKED(srcSize <= destSize); + + if (pDestination == NULL || pSource == NULL || srcSize > destSize) + { + return NULL; + } + + if (pDestination == pSource) + { + return pDestination; + } + + if ((((NvUPtr)pSource & 3) == 0) && (((NvUPtr)pDestination & 3) == 0)) + { + dwords = srcSize / sizeof(NvU32); + bytes = srcSize % sizeof(NvU32); + } + + if (pDestination > pSource) + { + pDst8 = (NvU8*)pDestination + srcSize; + pSrc8 = (const NvU8*)pSource + srcSize; + + while (bytes--) + { + PORT_MEM_WR08(--pDst8, PORT_MEM_RD08(--pSrc8)); + } + pDst32 = (NvU32*)pDst8; + pSrc32 = (const NvU32*)pSrc8; + while (dwords--) + { + PORT_MEM_WR32(--pDst32, PORT_MEM_RD32(--pSrc32)); + } + } + else + { + pDst32 = (NvU32*)pDestination; + pSrc32 = (const NvU32*)pSource; + + while (dwords--) + { + PORT_MEM_WR32(pDst32++, PORT_MEM_RD32(pSrc32++)); + } + pDst8 = (NvU8*)pDst32; + pSrc8 = (const NvU8*)pSrc32; + while (bytes--) + { + PORT_MEM_WR08(pDst8++, PORT_MEM_RD08(pSrc8++)); + } + } + return pDestination; +} +#endif + +#if defined(PORT_MEM_USE_GENERIC_portMemCopy) +void * +portMemCopy +( + void *pDestination, + NvLength destSize, + const void *pSource, + NvLength srcSize +) +{ + // API guarantees this is a NOP when destSize==0 + if (destSize == 0) + return pDestination; + + PORT_ASSERT_CHECKED(!portUtilCheckOverlap((const NvU8*)pDestination, destSize, + (const NvU8*)pSource, srcSize)); + return portMemMove(pDestination, destSize, pSource, srcSize); +} +#endif + + +#if defined(PORT_MEM_USE_GENERIC_portMemCmp) +NvS32 +portMemCmp +( + const void *pData0, + const void *pData1, + NvLength lengthBytes +) +{ + const NvU8 *p0 = (const NvU8*)pData0; + const NvU8 *p1 = (const NvU8*)pData1; + PORT_ASSERT_CHECKED(pData0 != NULL); + PORT_ASSERT_CHECKED(pData1 != NULL); + PORT_ASSERT_CHECKED(lengthBytes > 0); + while (lengthBytes--) + { + NvU8 u0 = PORT_MEM_RD08(p0++); + NvU8 u1 = PORT_MEM_RD08(p1++); + if (u0 != u1) + return u0 - u1; + } + return 0; +} +#endif + +#if defined(PORT_MEM_USE_GENERIC_portMemSet) +void * +portMemSet +( + void *pData, + NvU8 value, + NvLength lengthBytes +) +{ + NvLength i; + for (i = 0; i < lengthBytes; i++) + { + PORT_MEM_WR08(((NvU8 *)pData)+i, value); + } + return pData; +} +#endif diff --git a/src/nvidia/src/libraries/nvport/memory/memory_tracking.c b/src/nvidia/src/libraries/nvport/memory/memory_tracking.c new file mode 100644 index 000000000..fd569476b --- /dev/null +++ b/src/nvidia/src/libraries/nvport/memory/memory_tracking.c @@ -0,0 +1,1340 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief MEMORY module tracking functions implementation + * + */ + +#include "nvport/nvport.h" +#include + +#if !PORT_IS_MODULE_SUPPORTED(debug) +#error "DEBUG module must be present for memory tracking" +#endif + +#if !PORT_IS_MODULE_SUPPORTED(atomic) +#error "ATOMIC module must be present for memory tracking" +#endif + + +struct PORT_MEM_ALLOCATOR_IMPL +{ + PORT_MEM_ALLOCATOR_TRACKING tracking; +}; + +// +// Debug print macros +// +#if PORT_MEM_TRACK_PRINT_LEVEL == PORT_MEM_TRACK_PRINT_LEVEL_SILENT +#define PORT_MEM_PRINT_ERROR(...) +#define PORT_MEM_PRINT_INFO(...) +#elif PORT_MEM_TRACK_PRINT_LEVEL == PORT_MEM_TRACK_PRINT_LEVEL_BASIC +#define PORT_MEM_PRINT_ERROR(...) portDbgPrintf(__VA_ARGS__) +#define PORT_MEM_PRINT_INFO(...) +#else +#define PORT_MEM_PRINT_ERROR(...) portDbgPrintf(__VA_ARGS__) +#define PORT_MEM_PRINT_INFO(...) portDbgPrintf(__VA_ARGS__) +#endif + +// Simple implementation of a spinlock that is going to be used where sync module is not included. +#if !PORT_IS_MODULE_SUPPORTED(sync) +typedef volatile NvU32 PORT_SPINLOCK; +static NvLength portSyncSpinlockSize = sizeof(PORT_SPINLOCK); +static NV_STATUS portSyncSpinlockInitialize(PORT_SPINLOCK *pSpinlock) +{ + *pSpinlock = 0; + return NV_OK; +} +static void portSyncSpinlockAcquire(PORT_SPINLOCK *pSpinlock) +{ + while (!portAtomicCompareAndSwapU32(pSpinlock, 1, 0)); +} +static void portSyncSpinlockRelease(PORT_SPINLOCK *pSpinlock) +{ + portAtomicSetU32(pSpinlock, 0); +} +static void portSyncSpinlockDestroy(PORT_SPINLOCK *pSpinlock) +{ + PORT_UNREFERENCED_VARIABLE(pSpinlock); +} +#endif + +#define PORT_MEM_LOCK_INIT(lock) \ + do { \ + lock = _portMemAllocNonPagedUntracked(portSyncSpinlockSize); \ + portSyncSpinlockInitialize(lock); \ + } while (0) +#define PORT_MEM_LOCK_DESTROY(lock) \ + do { \ + portSyncSpinlockDestroy(lock); \ + _portMemFreeUntracked(lock); \ + } while(0) +#define PORT_MEM_LOCK_ACQUIRE(lock) portSyncSpinlockAcquire(lock) +#define PORT_MEM_LOCK_RELEASE(lock) portSyncSpinlockRelease(lock) + + +// +// List link operation that operates on structures that have pNext and pPrev +// fields. Assumes the root always exists. +// +#define PORT_LOCKED_LIST_LINK(pRoot, pNode, lock) \ + do { \ + PORT_MEM_LOCK_ACQUIRE(lock); \ + (pNode)->pNext = (pRoot); \ + (pNode)->pPrev = (pRoot)->pPrev; \ + (pRoot)->pPrev = (pNode); \ + (pNode)->pPrev->pNext = (pNode); \ + PORT_MEM_LOCK_RELEASE(lock); \ + } while(0) + +#define PORT_LOCKED_LIST_UNLINK(pRoot, pNode, lock) \ + do { \ + PORT_MEM_LOCK_ACQUIRE(lock); \ + (pNode)->pNext->pPrev = (pNode)->pPrev; \ + (pNode)->pPrev->pNext = (pNode)->pNext; \ + PORT_MEM_LOCK_RELEASE(lock); \ + } while (0) + + + +// +// Memory counter implementation +// +#if PORT_MEM_TRACK_USE_COUNTER +static NV_INLINE void +_portMemCounterInit +( + PORT_MEM_COUNTER *pCounter +) +{ + portMemSet(pCounter, 0, sizeof(*pCounter)); +} +static NV_INLINE void +_portMemCounterInc +( + PORT_MEM_COUNTER *pCounter, + NvLength size +) +{ + NvU32 activeAllocs; + NvLength activeSize = 0; + + activeAllocs = portAtomicIncrementU32(&pCounter->activeAllocs); + portAtomicIncrementU32(&pCounter->totalAllocs); + if (PORT_MEM_TRACK_USE_FENCEPOSTS) + { + activeSize = portAtomicAddSize(&pCounter->activeSize, size); + } + portAtomicAddSize(&pCounter->totalSize, size); + + // Atomically compare the peak value with the active, and update if greater. + while (1) + { + NvU32 peakAllocs = pCounter->peakAllocs; + if (activeAllocs <= peakAllocs) + break; + portAtomicCompareAndSwapU32(&pCounter->peakAllocs, activeAllocs, peakAllocs); + } + while (1) + { + NvLength peakSize = pCounter->peakSize; + if (activeSize <= peakSize) + break; + portAtomicCompareAndSwapSize(&pCounter->peakSize, activeSize, peakSize); + } +} +static NV_INLINE void +_portMemCounterDec +( + PORT_MEM_COUNTER *pCounter, + void *pMem +) +{ + portAtomicDecrementU32(&pCounter->activeAllocs); + if (PORT_MEM_TRACK_USE_FENCEPOSTS) + { + portAtomicSubSize(&pCounter->activeSize, + ((PORT_MEM_FENCE_HEAD *)pMem-1)->blockSize); + } +} + +#define PORT_MEM_COUNTER_INIT(pCounter) _portMemCounterInit(pCounter) +#define PORT_MEM_COUNTER_INC(pCounter, size) _portMemCounterInc(pCounter, size) +#define PORT_MEM_COUNTER_DEC(pCounter, pMem) _portMemCounterDec(pCounter, pMem) +#else +#define PORT_MEM_COUNTER_INIT(x) +#define PORT_MEM_COUNTER_INC(x, y) +#define PORT_MEM_COUNTER_DEC(x, y) +#endif // COUNTER + + +// +// Memory fenceposts implementation +// +#if PORT_MEM_TRACK_USE_FENCEPOSTS +#define PORT_MEM_FENCE_HEAD_MAGIC 0x68656164 // 'head' +#define PORT_MEM_FENCE_TAIL_MAGIC 0x7461696c // 'tail' + +static NV_INLINE void +_portMemFenceInit +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem, + NvLength size +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_FOOTER *pTail = (PORT_MEM_FOOTER*)((NvU8*)pMem + size); + + pHead->fence.pAllocator = pAlloc; + pHead->fence.blockSize = size; + pHead->fence.magic = PORT_MEM_FENCE_HEAD_MAGIC; + pTail->fence.magic = PORT_MEM_FENCE_TAIL_MAGIC; +} + +static NV_INLINE void +_portMemFenceCheck +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_FOOTER *pTail = (PORT_MEM_FOOTER*) + ((NvU8*)pMem + pHead->fence.blockSize); + + if (pHead->fence.magic != PORT_MEM_FENCE_HEAD_MAGIC || + pTail->fence.magic != PORT_MEM_FENCE_TAIL_MAGIC) + { + PORT_MEM_PRINT_ERROR("Memory corruption detected on block %p\n", pMem); + PORT_ASSERT_CHECKED(pHead->fence.magic == PORT_MEM_FENCE_HEAD_MAGIC); + PORT_ASSERT_CHECKED(pTail->fence.magic == PORT_MEM_FENCE_TAIL_MAGIC); + } + if (pHead->fence.pAllocator != pAlloc) + { + PORT_MEM_PRINT_ERROR("Freeing block %p using a wrong allocator (%p instead of %p)\n", + pMem, pAlloc, pHead->fence.pAllocator); + PORT_ASSERT_CHECKED(pHead->fence.pAllocator == pAlloc); + + } +} + +#define PORT_MEM_FENCE_CHECK(pAlloc, pMem) _portMemFenceCheck(pAlloc, pMem) +#define PORT_MEM_FENCE_INIT(pAlloc, pMem, size) _portMemFenceInit(pAlloc, pMem, size) +#else +#define PORT_MEM_FENCE_INIT(x, y, z) +#define PORT_MEM_FENCE_CHECK(x, y) +#endif // FENCEPOSTS + + +// +// Memory allocation lists implementation +// +#if PORT_MEM_TRACK_USE_ALLOCLIST +static NV_INLINE void +_portMemListAdd +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_LIST *pList = &pHead->list; + pList->pNext = pList; + pList->pPrev = pList; + if (!portAtomicCompareAndSwapSize(&pTracking->pFirstAlloc, pList, NULL)) + { + PORT_LOCKED_LIST_LINK(pTracking->pFirstAlloc, pList, pTracking->listLock); + } +} +static NV_INLINE void +_portMemListRemove +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_MEM_LIST *pList = &pHead->list; + + if (!portAtomicCompareAndSwapSize(&pList->pNext, NULL, pList)) + { + PORT_LOCKED_LIST_UNLINK(pTracking->pFirstAlloc, pList, pTracking->listLock); + } + portAtomicCompareAndSwapSize(&pTracking->pFirstAlloc, pList->pNext, pList); +} + +static NV_INLINE PORT_MEM_HEADER * +_portMemListGetHeader +( + PORT_MEM_LIST *pList +) +{ + return (PORT_MEM_HEADER*)((NvU8*)pList - (NvUPtr)(&((PORT_MEM_HEADER*)NULL)->list)); +} +#define PORT_MEM_LIST_INIT(pTracking) \ + do { \ + (pTracking)->pFirstAlloc = NULL; \ + PORT_MEM_LOCK_INIT((pTracking)->listLock); \ + } while (0) +#define PORT_MEM_LIST_DESTROY(pTracking) PORT_MEM_LOCK_DESTROY((pTracking)->listLock) +#define PORT_MEM_LIST_ADD(pTracking, pMem) _portMemListAdd(pTracking, pMem) +#define PORT_MEM_LIST_REMOVE(Tracking, pMem) _portMemListRemove(pTracking, pMem) +#else +#define PORT_MEM_LIST_INIT(x) +#define PORT_MEM_LIST_DESTROY(x) +#define PORT_MEM_LIST_ADD(x, y) +#define PORT_MEM_LIST_REMOVE(x, y) +#endif // ALLOCLIST + + + +// +// Memory allocation-caller info implementation +// +#if PORT_MEM_TRACK_USE_CALLERINFO + +static NV_INLINE void +_portMemCallerInfoInitMem +( + void *pMem, + PORT_MEM_CALLERINFO callerInfo +) +{ + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + portMemCopy(&pHead->callerInfo, sizeof(callerInfo), + &callerInfo, sizeof(callerInfo)); +} +static NV_INLINE void +_portMemCallerInfoInitTracking +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + PORT_MEM_CALLERINFO callerInfo +) +{ + portMemCopy(&pTracking->callerInfo, sizeof(callerInfo), + &callerInfo, sizeof(callerInfo)); +} + +#define PORT_MEM_CALLERINFO_INIT_TRACKING(pTracking) \ + _portMemCallerInfoInitTracking(pTracking, PORT_MEM_CALLERINFO_PARAM) +#define PORT_MEM_CALLERINFO_INIT_MEM(pMem) \ + _portMemCallerInfoInitMem(pMem, PORT_MEM_CALLERINFO_PARAM) + +#if PORT_MEM_TRACK_USE_CALLERINFO_IP +#if NVCPU_IS_RISCV64 +// +// Libos has custom %a format specifier that decodes an instruction pointer into +// a function / file / line reference when the binary output is decoded. +// +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "@ %a\n", x +#else +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "@ 0x%016x\n", x +#endif // NVCPU_IS_RISCV64 +#else +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "@ %s:%u (%s)\n", x.file, x.line, x.func +#endif // PORT_MEM_TRACK_USE_CALLERINFO_IP + +#else // PORT_MEM_TRACK_USE_CALLERINFO +#define PORT_MEM_CALLERINFO_INIT_TRACKING(x) +#define PORT_MEM_CALLERINFO_INIT_MEM(x) +#define PORT_MEM_CALLERINFO_PRINT_ARGS(x) "\n" +#endif // PORT_MEM_TRACK_USE_CALLERINFO + + +#if PORT_MEM_TRACK_USE_LOGGING +#include "nvlog/nvlog.h" +/** @brief Single log entry. Uses 64bit values even on 32bit systems. */ +typedef struct PORT_MEM_LOG_ENTRY +{ + NvP64 address; + NvP64 allocator; + NvU64 size; // if size is 0, it is a free() call, not alloc() +} PORT_MEM_LOG_ENTRY; + +#define PORT_MEM_TRACK_LOG_TAG 0x70726d74 +#define PORT_MEM_LOG_ENTRIES 4096 + +static void +_portMemLogInit() +{ + NVLOG_BUFFER_HANDLE hBuffer; + nvlogAllocBuffer(PORT_MEM_LOG_ENTRIES * sizeof(PORT_MEM_LOG_ENTRY), + DRF_DEF(LOG, _BUFFER_FLAGS, _FORMAT, _MEMTRACK), + PORT_MEM_TRACK_LOG_TAG, &hBuffer); +} + +static void +_portMemLogDestroy() +{ + NVLOG_BUFFER_HANDLE hBuffer; + nvlogGetBufferHandleFromTag(PORT_MEM_TRACK_LOG_TAG, &hBuffer); + nvlogDeallocBuffer(hBuffer); +} + +static void +_portMemLogAdd +( + PORT_MEM_ALLOCATOR *pAllocator, + void *pMem, + NvLength lengthBytes +) +{ + NVLOG_BUFFER_HANDLE hBuffer; + PORT_MEM_LOG_ENTRY entry = {0}; + entry.address = NV_PTR_TO_NvP64(pMem); + entry.address = NV_PTR_TO_NvP64(pAllocator); + entry.size = lengthBytes; + nvlogGetBufferHandleFromTag(PORT_MEM_TRACK_LOG_TAG, &hBuffer); + nvlogWriteToBuffer(hBuffer, &entry, sizeof(entry)); +} + +#define PORT_MEM_LOG_INIT() _portMemLogInit() +#define PORT_MEM_LOG_DESTROY() _portMemLogDestroy() +#define PORT_MEM_LOG_ALLOC(pAlloc, pMem, size) \ + _portMemLogAdd(pAlloc, pMem, size) +#define PORT_MEM_LOG_FREE(pAlloc, pMem) \ + _portMemLogAdd(pAlloc, pMem, 0) +#else +#define PORT_MEM_LOG_INIT() +#define PORT_MEM_LOG_DESTROY() +#define PORT_MEM_LOG_ALLOC(x, y, z) +#define PORT_MEM_LOG_FREE(x, y) +#endif // LOGGING + + +//////////////////////////////////////////////////////////////////////////////// +// +// Main memory tracking implementation +// +//////////////////////////////////////////////////////////////////////////////// + +// +// All static functions declarations. Definitions at the end of file. +// +static void *_portMemAllocatorAllocPagedWrapper(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +static void *_portMemAllocatorAllocNonPagedWrapper(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +static void _portMemAllocatorFreeWrapper(PORT_MEM_ALLOCATOR *pAlloc, void *pMem); +static void _portMemAllocatorReleaseWrapper(PORT_MEM_ALLOCATOR *pAlloc); + +static PORT_MEM_ALLOCATOR *_portMemAllocatorCreateOnExistingBlock(void *pAlloc, NvLength blockSizeBytes, void *pSpinlock PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM); +static void *_portMemAllocatorAllocExistingWrapper(PORT_MEM_ALLOCATOR *pAlloc, NvLength length); +static void _portMemAllocatorFreeExistingWrapper(PORT_MEM_ALLOCATOR *pAlloc, void *pMem); + +static void _portMemTrackingRelease(PORT_MEM_ALLOCATOR_TRACKING *pTracking); +static void _portMemTrackAlloc(PORT_MEM_ALLOCATOR_TRACKING *pTracking, void *pMem, NvLength size PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM); +static void _portMemTrackFree(PORT_MEM_ALLOCATOR_TRACKING *pTracking, void *pMem); + + + +#if PORT_MEM_TRACK_USE_CALLERINFO +#undef portMemAllocPaged +#undef portMemAllocNonPaged +#undef portMemAllocatorCreatePaged +#undef portMemAllocatorCreateNonPaged +#undef portMemInitializeAllocatorTracking +#undef _portMemAllocatorAlloc +#undef portMemAllocatorCreateOnExistingBlock +#undef portMemExAllocatorCreateLockedOnExistingBlock +// These functions have different names if CallerInfo is enabled. +#define portMemAllocPaged portMemAllocPaged_CallerInfo +#define portMemAllocNonPaged portMemAllocNonPaged_CallerInfo +#define portMemAllocatorCreatePaged portMemAllocatorCreatePaged_CallerInfo +#define portMemAllocatorCreateNonPaged portMemAllocatorCreateNonPaged_CallerInfo +#define portMemInitializeAllocatorTracking portMemInitializeAllocatorTracking_CallerInfo +#define _portMemAllocatorAlloc _portMemAllocatorAlloc_CallerInfo +#define portMemAllocatorCreateOnExistingBlock portMemAllocatorCreateOnExistingBlock_CallerInfo +#define portMemExAllocatorCreateLockedOnExistingBlock portMemExAllocatorCreateLockedOnExistingBlock_CallerInfo +#endif + +// +// All memory tracking globals are contained in this structure +// +static struct PORT_MEM_GLOBALS +{ + PORT_MEM_ALLOCATOR_TRACKING mainTracking; + void *trackingLock; + struct + { + PORT_MEM_ALLOCATOR paged; + PORT_MEM_ALLOCATOR nonPaged; + PORT_MEM_ALLOCATOR_IMPL pagedImpl; + PORT_MEM_ALLOCATOR_IMPL nonPagedImpl; + } alloc; + NvU32 initCount; + NvU32 totalAllocators; +} portMemGlobals; + +static NV_INLINE PORT_MEM_ALLOCATOR_TRACKING * +_portMemGetTracking +( + const PORT_MEM_ALLOCATOR *pAlloc +) +{ + if (pAlloc == NULL) + return &portMemGlobals.mainTracking; + else + return pAlloc->pTracking; +} + + +void +portMemInitialize(void) +{ +#if PORT_MEM_TRACK_USE_CALLERINFO + PORT_MEM_CALLERINFO_TYPE_PARAM = PORT_MEM_CALLERINFO_MAKE; +#endif + if (portAtomicIncrementU32(&portMemGlobals.initCount) != 1) + return; + + portMemGlobals.mainTracking.pAllocator = NULL; + portMemGlobals.mainTracking.pNext = &portMemGlobals.mainTracking; + portMemGlobals.mainTracking.pPrev = &portMemGlobals.mainTracking; + PORT_MEM_COUNTER_INIT(&portMemGlobals.mainTracking.counter); + PORT_MEM_LIST_INIT(&portMemGlobals.mainTracking); + PORT_MEM_LOCK_INIT(portMemGlobals.trackingLock); + + portMemGlobals.alloc.paged._portAlloc = _portMemAllocatorAllocPagedWrapper; + portMemGlobals.alloc.nonPaged._portAlloc = _portMemAllocatorAllocNonPagedWrapper; + portMemGlobals.alloc.paged._portFree = _portMemAllocatorFreeWrapper; + portMemGlobals.alloc.nonPaged._portFree = _portMemAllocatorFreeWrapper; + portMemGlobals.alloc.paged._portRelease = NULL; + portMemGlobals.alloc.nonPaged._portRelease = NULL; + + if (PORT_MEM_TRACK_USE_FENCEPOSTS) + { + portMemGlobals.alloc.paged.pImpl = &portMemGlobals.alloc.pagedImpl; + portMemGlobals.alloc.nonPaged.pImpl = &portMemGlobals.alloc.nonPagedImpl; + + portMemInitializeAllocatorTracking(&portMemGlobals.alloc.paged, + &portMemGlobals.alloc.paged.pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + portMemInitializeAllocatorTracking(&portMemGlobals.alloc.nonPaged, + &portMemGlobals.alloc.nonPaged.pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + } + else + { + // Use the same impl for both paged and nonpaged. + portMemGlobals.alloc.paged.pImpl = &portMemGlobals.alloc.pagedImpl; + portMemGlobals.alloc.nonPaged.pImpl = &portMemGlobals.alloc.pagedImpl; + portMemInitializeAllocatorTracking(NULL, + &portMemGlobals.alloc.pagedImpl.tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + portMemGlobals.alloc.paged.pTracking = &portMemGlobals.alloc.pagedImpl.tracking; + portMemGlobals.alloc.nonPaged.pTracking = &portMemGlobals.alloc.pagedImpl.tracking; + } + PORT_MEM_LOG_INIT(); +} +void +portMemShutdown(NvBool bForceSilent) +{ + PORT_UNREFERENCED_VARIABLE(bForceSilent); + if (portAtomicDecrementU32(&portMemGlobals.initCount) != 0) + return; + +#if (PORT_MEM_TRACK_PRINT_LEVEL > PORT_MEM_TRACK_PRINT_LEVEL_SILENT) + if (!bForceSilent) + { + portMemPrintTrackingInfo(NULL); + } +#endif + PORT_MEM_LOG_DESTROY(); + + if (PORT_MEM_TRACK_USE_FENCEPOSTS) + { + _portMemTrackingRelease(&portMemGlobals.alloc.nonPaged.pImpl->tracking); + _portMemTrackingRelease(&portMemGlobals.alloc.paged.pImpl->tracking); + } + else + { + _portMemTrackingRelease(&portMemGlobals.alloc.pagedImpl.tracking); + } + + PORT_MEM_LOCK_DESTROY(portMemGlobals.trackingLock); + PORT_MEM_LIST_DESTROY(&portMemGlobals.mainTracking); + portMemSet(&portMemGlobals, 0, sizeof(portMemGlobals)); +} + + +void * +portMemAllocPaged +( + NvLength length + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + PORT_MEM_ALLOCATOR *pAlloc = portMemAllocatorGetGlobalPaged(); + return _portMemAllocatorAlloc(pAlloc, length PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +void * +portMemAllocNonPaged +( + NvLength length + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + PORT_MEM_ALLOCATOR *pAlloc = portMemAllocatorGetGlobalNonPaged(); + return _portMemAllocatorAlloc(pAlloc, length PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +void +portMemFree +( + void *pMem +) +{ + if (pMem != NULL) + { +#if PORT_MEM_TRACK_USE_FENCEPOSTS + PORT_MEM_HEADER *pHead = (PORT_MEM_HEADER*)pMem - 1; + PORT_FREE(pHead->fence.pAllocator, pMem); +#else + // Paged/nonpaged are logged together if we don't have fenceposts. + PORT_FREE(portMemAllocatorGetGlobalPaged(), pMem); +#endif + } + +#if defined(__COVERITY__) + __coverity_free__(pMem); +#endif +} + +PORT_MEM_ALLOCATOR * +portMemAllocatorCreatePaged(PORT_MEM_CALLERINFO_TYPE_PARAM) +{ + PORT_MEM_ALLOCATOR *pAllocator; + + pAllocator = portMemAllocPaged(PORT_MEM_ALLOCATOR_SIZE + PORT_MEM_CALLERINFO_COMMA_PARAM); + if (pAllocator == NULL) + return NULL; + + pAllocator->pImpl = (PORT_MEM_ALLOCATOR_IMPL*)(pAllocator + 1); + pAllocator->_portAlloc = _portMemAllocatorAllocPagedWrapper; + pAllocator->_portFree = _portMemAllocatorFreeWrapper; + pAllocator->_portRelease = _portMemAllocatorReleaseWrapper; + portMemInitializeAllocatorTracking(pAllocator, &pAllocator->pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + + PORT_MEM_PRINT_INFO("Acquired paged allocator %p ", pAllocator); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + + return pAllocator; +} + +PORT_MEM_ALLOCATOR * +portMemAllocatorCreateNonPaged(PORT_MEM_CALLERINFO_TYPE_PARAM) +{ + PORT_MEM_ALLOCATOR *pAllocator; + + pAllocator = portMemAllocNonPaged(PORT_MEM_ALLOCATOR_SIZE + PORT_MEM_CALLERINFO_COMMA_PARAM); + if (pAllocator == NULL) + return NULL; + + pAllocator->pImpl = (PORT_MEM_ALLOCATOR_IMPL*)(pAllocator + 1); + pAllocator->_portAlloc = _portMemAllocatorAllocNonPagedWrapper; + pAllocator->_portFree = _portMemAllocatorFreeWrapper; + pAllocator->_portRelease = _portMemAllocatorReleaseWrapper; + portMemInitializeAllocatorTracking(pAllocator, &pAllocator->pImpl->tracking + PORT_MEM_CALLERINFO_COMMA_PARAM); + + PORT_MEM_PRINT_INFO("Acquired nonpaged allocator %p ", pAllocator); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + return pAllocator; +} + + +PORT_MEM_ALLOCATOR * +portMemAllocatorCreateOnExistingBlock +( + void *pPreallocatedBlock, + NvLength blockSizeBytes + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + return _portMemAllocatorCreateOnExistingBlock(pPreallocatedBlock, blockSizeBytes, + NULL PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +PORT_MEM_ALLOCATOR * +portMemExAllocatorCreateLockedOnExistingBlock +( + void *pPreallocatedBlock, + NvLength blockSizeBytes, + void *pSpinlock + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + return _portMemAllocatorCreateOnExistingBlock(pPreallocatedBlock, blockSizeBytes, + pSpinlock PORT_MEM_CALLERINFO_COMMA_PARAM); +} + +void +portMemAllocatorRelease +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + if (pAllocator == NULL) + { + PORT_BREAKPOINT_CHECKED(); + return; + } + _portMemTrackingRelease(pAllocator->pTracking); + PORT_MEM_PRINT_INFO("Released allocator %p\n", pAllocator); + + if (pAllocator->_portRelease != NULL) + pAllocator->_portRelease(pAllocator); +} + + +PORT_MEM_ALLOCATOR * +portMemAllocatorGetGlobalNonPaged(void) +{ + return &portMemGlobals.alloc.nonPaged; +} +PORT_MEM_ALLOCATOR * +portMemAllocatorGetGlobalPaged(void) +{ + return &portMemGlobals.alloc.paged; +} + +void +portMemInitializeAllocatorTracking +( + PORT_MEM_ALLOCATOR *pAlloc, + PORT_MEM_ALLOCATOR_TRACKING *pTracking + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + if (portMemGlobals.initCount == 0) + { + portMemSet(pTracking, 0, sizeof(*pTracking)); + if (pAlloc != NULL) + pAlloc->pTracking = NULL; + return; + } + + pTracking->pAllocator = pAlloc; + if (pAlloc != NULL) + pAlloc->pTracking = pTracking; + PORT_LOCKED_LIST_LINK(&portMemGlobals.mainTracking, pTracking, portMemGlobals.trackingLock); + PORT_MEM_COUNTER_INIT(&pTracking->counter); + PORT_MEM_LIST_INIT(pTracking); + PORT_MEM_CALLERINFO_INIT_TRACKING(pTracking); + portAtomicIncrementU32(&portMemGlobals.totalAllocators); +} + +void * +_portMemAllocatorAlloc +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + void *pMem = NULL; + if (pAlloc == NULL) + { + PORT_BREAKPOINT_CHECKED(); + return NULL; + } + if (length > 0) + { + NvLength paddedLength; +// RISCV64 requires 64-bit alignment of structures, and length indicates the alignment of the footer +#if defined(__riscv) + if (PORT_MEM_STAGING_SIZE > 0 && (length & 7)) + { + if (!portSafeAddLength(length & ~7, 8, &length)) + { + return NULL; + } + } +#endif + if (!portSafeAddLength(length, PORT_MEM_STAGING_SIZE, &paddedLength)) + { + return NULL; + } + pMem = pAlloc->_portAlloc(pAlloc, paddedLength); + } + if (pMem != NULL) + { + pMem = PORT_MEM_ADD_HEADER_PTR(pMem); + _portMemTrackAlloc(_portMemGetTracking(pAlloc), pMem, length + PORT_MEM_CALLERINFO_COMMA_PARAM); + } + return pMem; +} +void +_portMemAllocatorFree +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + if (pAlloc == NULL) + { + PORT_BREAKPOINT_CHECKED(); + return; + } + if (pMem != NULL) + { + _portMemTrackFree(_portMemGetTracking(pAlloc), pMem); + pMem = PORT_MEM_SUB_HEADER_PTR(pMem); + pAlloc->_portFree(pAlloc, pMem); + } +} + +void +portMemPrintTrackingInfo +( + const PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + + portDbgPrintf("[NvPort] *************************************************\n"); + + if (pAllocator == NULL) + { + portDbgPrintf("NvPort memory tracking information for all allocations:\n"); + } + + if (pTracking == NULL) + { + portDbgPrintf("Allocator %p initialized before portMemInitialize(); no tracking info.\n", pAllocator); + return; + } + + for (;;) + { + if (pTracking->pAllocator == NULL) + { + portDbgPrintf("NULL allocator for tracker %p:\n", pTracking); + goto next_tracking; + } + + portDbgPrintf("NvPort memory tracking information for allocator %p:\n", + pTracking->pAllocator); + +#if PORT_MEM_TRACK_USE_CALLERINFO + { + portDbgPrintf(" Allocator acquired " + PORT_MEM_CALLERINFO_PRINT_ARGS(pTracking->callerInfo)); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetActiveStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS stats; + + portMemSet(&stats, 0, sizeof(stats)); + + portMemExTrackingGetActiveStats(pTracking->pAllocator, &stats); + + // + // rmtest_gsp test script (dvs_gsp_sanity.sh) depends on this print, so do not change + // format without updating script! + // + portDbgPrintf("ACTIVE: %u allocations, %llu bytes allocated (%llu useful, %llu meta)\n", + stats.numAllocations, + (NvU64) stats.allocatedSize, + (NvU64) stats.usefulSize, + (NvU64) stats.metaSize); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetTotalStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS stats; + + portMemSet(&stats, 0, sizeof(stats)); + + portMemExTrackingGetTotalStats(pTracking->pAllocator, &stats); + portDbgPrintf("TOTAL: %u allocations, %llu bytes allocated (%llu useful, %llu meta)\n", + stats.numAllocations, + (NvU64) stats.allocatedSize, + (NvU64) stats.usefulSize, + (NvU64) stats.metaSize); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetPeakStats) + { + PORT_MEM_TRACK_ALLOCATOR_STATS stats; + + portMemSet(&stats, 0, sizeof(stats)); + + portMemExTrackingGetPeakStats(pTracking->pAllocator, &stats); + portDbgPrintf("PEAK: %u allocations, %llu bytes allocated (%llu useful, %llu meta)\n", + stats.numAllocations, + (NvU64) stats.allocatedSize, + (NvU64) stats.usefulSize, + (NvU64) stats.metaSize); + } +#endif + +#if PORT_IS_FUNC_SUPPORTED(portMemExTrackingGetNext) + { + PORT_MEM_TRACK_ALLOC_INFO info; + NvBool bPrinted = NV_FALSE; + void *iterator = NULL; + + do + { + if (portMemExTrackingGetNext(pTracking->pAllocator, &info, &iterator) != NV_OK) + { + portDbgPrintf("(no active allocations)\n"); + break; + } + else if (!bPrinted) + { + portDbgPrintf("Currently active allocations:\n"); + bPrinted = NV_TRUE; + } + portDbgPrintf(" - A:%p - 0x%p [%8llu bytes] T=%llu ", + info.pAllocator, + info.pMemory, + (NvU64)info.size, + info.timestamp); + portDbgPrintf(PORT_MEM_CALLERINFO_PRINT_ARGS(info.callerInfo)); + } while (iterator != NULL); + } +#endif + +next_tracking: + portDbgPrintf("[NvPort] *************************************************\n"); + + if ((pAllocator != NULL) || (pTracking->pNext == &portMemGlobals.mainTracking)) + break; + + pTracking = pTracking->pNext; + } +} + +#if portMemExTrackingGetActiveStats_SUPPORTED +NV_STATUS +portMemExTrackingGetActiveStats +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + pStats->numAllocations = pTracking->counter.activeAllocs; + pStats->usefulSize = pTracking->counter.activeSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#endif + +#if portMemExTrackingGetTotalStats_SUPPORTED +NV_STATUS +portMemExTrackingGetTotalStats +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + pStats->numAllocations = pTracking->counter.totalAllocs; + pStats->usefulSize = pTracking->counter.totalSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#endif + +#if portMemExTrackingGetPeakStats_SUPPORTED +NV_STATUS +portMemExTrackingGetPeakStats +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOCATOR_STATS *pStats +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + pStats->numAllocations = pTracking->counter.peakAllocs; + pStats->usefulSize = pTracking->counter.peakSize; + pStats->metaSize = pStats->numAllocations * PORT_MEM_STAGING_SIZE; + pStats->allocatedSize = pStats->usefulSize + pStats->metaSize; + return NV_OK; +} +#endif + +#if portMemExTrackingGetNext_SUPPORTED +NV_STATUS +portMemExTrackingGetNext +( + const PORT_MEM_ALLOCATOR *pAllocator, + PORT_MEM_TRACK_ALLOC_INFO *pInfo, + void **pIterator +) +{ + PORT_MEM_ALLOCATOR_TRACKING *pTracking = _portMemGetTracking(pAllocator); + PORT_MEM_LIST *pList; + PORT_MEM_HEADER *pHead; + + if (pTracking == NULL) + { + return NV_ERR_OBJECT_NOT_FOUND; + } + + if (pTracking->pFirstAlloc == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + if (*pIterator == NULL) + pList = pTracking->pFirstAlloc; + else + pList = (PORT_MEM_LIST*)(*pIterator); + + pHead = _portMemListGetHeader(pList); + + // Advance iterator + if (pList->pNext == pTracking->pFirstAlloc) + *pIterator = NULL; + else + *pIterator = pList->pNext; + + // Populate pInfo + pInfo->pMemory = pHead + 1; + pInfo->size = pHead->fence.blockSize; + pInfo->pAllocator = pHead->fence.pAllocator; + pInfo->timestamp = 0; + +#if PORT_MEM_TRACK_USE_CALLERINFO + pInfo->callerInfo = pHead->callerInfo; +#endif + + return NV_OK; +} +#endif + +static void +_portMemTrackingRelease +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking +) +{ + if (pTracking == NULL) return; + + if (pTracking->counter.activeAllocs != 0) + { + PORT_MEM_PRINT_ERROR("Allocator %p released with memory allocations\n", pTracking->pAllocator); +#if (PORT_MEM_TRACK_PRINT_LEVEL > PORT_MEM_TRACK_PRINT_LEVEL_SILENT) + portMemPrintTrackingInfo(pTracking->pAllocator); +#endif + } + + PORT_LOCKED_LIST_UNLINK(&portMemGlobals.mainTracking, pTracking, portMemGlobals.trackingLock); + PORT_MEM_LIST_DESTROY(pTracking); + portAtomicDecrementU32(&portMemGlobals.totalAllocators); +} + +static void +_portMemTrackAlloc +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem, + NvLength size + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + PORT_UNREFERENCED_VARIABLE(pMem); + if (pTracking == NULL) return; + PORT_MEM_PRINT_INFO("Allocating %u bytes at address %p", size, pMem); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + + PORT_MEM_COUNTER_INC(&pTracking->counter, size); + PORT_MEM_COUNTER_INC(&portMemGlobals.mainTracking.counter, size); + + PORT_MEM_FENCE_INIT(pTracking->pAllocator, pMem, size); + PORT_MEM_LIST_ADD(pTracking, pMem); + PORT_MEM_CALLERINFO_INIT_MEM(pMem); + PORT_MEM_LOG_ALLOC(pTracking->pAllocator, pMem, size); +} + +static void +_portMemTrackFree +( + PORT_MEM_ALLOCATOR_TRACKING *pTracking, + void *pMem +) +{ + if (pTracking == NULL) return; + PORT_MEM_PRINT_INFO("Freeing block at address %p\n", pMem); + + PORT_MEM_COUNTER_DEC(&pTracking->counter, pMem); + PORT_MEM_COUNTER_DEC(&portMemGlobals.mainTracking.counter, pMem); + + PORT_MEM_FENCE_CHECK(pTracking->pAllocator, pMem); + PORT_MEM_LIST_REMOVE(pTracking, pMem); + PORT_MEM_LOG_FREE(pTracking->pAllocator, pMem); +} + + +static void * +_portMemAllocatorAllocPagedWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length +) +{ + PORT_UNREFERENCED_VARIABLE(pAlloc); + return _portMemAllocPagedUntracked(length); +} + +static void * +_portMemAllocatorAllocNonPagedWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length +) +{ + PORT_UNREFERENCED_VARIABLE(pAlloc); + return _portMemAllocNonPagedUntracked(length); +} + +static void +_portMemAllocatorFreeWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + PORT_UNREFERENCED_VARIABLE(pAlloc); + _portMemFreeUntracked(pMem); +} + +static void +_portMemAllocatorReleaseWrapper +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + portMemFree(pAllocator); +} + +/// @todo Add these as intrinsics to UTIL module +static NV_INLINE NvBool _isBitSet(NvU32 *vect, NvU32 bit) +{ + return !!(vect[bit/32] & NVBIT32(bit%32)); +} +static NV_INLINE void _setBit(NvU32 *vect, NvU32 bit) +{ + vect[bit/32] |= NVBIT32(bit%32); +} +static NV_INLINE void _clearBit(NvU32 *vect, NvU32 bit) +{ + vect[bit/32] &= ~NVBIT32(bit%32); +} + +static PORT_MEM_ALLOCATOR * +_portMemAllocatorCreateOnExistingBlock +( + void *pPreallocatedBlock, + NvLength blockSizeBytes, + void *pSpinlock + PORT_MEM_CALLERINFO_COMMA_TYPE_PARAM +) +{ + PORT_MEM_ALLOCATOR *pAllocator = (PORT_MEM_ALLOCATOR *)pPreallocatedBlock; + PORT_MEM_BITVECTOR *pBitVector; + PORT_MEM_BITVECTOR_CHUNK *pLastChunkInBlock; + NvU32 bitVectorSize; + + if ((pPreallocatedBlock == NULL) || + (blockSizeBytes < PORT_MEM_PREALLOCATED_BLOCK_MINIMAL_EXTRA_SIZE) || + (blockSizeBytes > NV_S32_MAX)) + { + return NULL; + } + + pAllocator->_portAlloc = _portMemAllocatorAllocExistingWrapper; + pAllocator->_portFree = _portMemAllocatorFreeExistingWrapper; + pAllocator->_portRelease = NULL; + pAllocator->pTracking = NULL; // No tracking for this allocator + pAllocator->pImpl = (PORT_MEM_ALLOCATOR_IMPL*)(pAllocator + 1); + + pBitVector = (PORT_MEM_BITVECTOR*)(pAllocator->pImpl); + pBitVector->pSpinlock = pSpinlock; + + // Calculate total number of chunks available + pBitVector->pChunks = (PORT_MEM_BITVECTOR_CHUNK *)(pBitVector + 1); + pBitVector->pChunks = (void*)NV_ALIGN_UP((NvUPtr)pBitVector->pChunks, + (NvUPtr)PORT_MEM_BITVECTOR_CHUNK_SIZE); + + pLastChunkInBlock = (void*)NV_ALIGN_DOWN((NvUPtr)pPreallocatedBlock + + blockSizeBytes - + PORT_MEM_BITVECTOR_CHUNK_SIZE, + (NvUPtr)PORT_MEM_BITVECTOR_CHUNK_SIZE); + if (pLastChunkInBlock < pBitVector->pChunks) + { + pBitVector->numChunks = 0; + } + else + { + pBitVector->numChunks = (NvU32)(pLastChunkInBlock - pBitVector->pChunks) + 1; + } + bitVectorSize = (NvU32)((NvU8*)pBitVector->pChunks - (NvU8*)pBitVector->bits); + + while (bitVectorSize*8 < pBitVector->numChunks*2) + { + // If too many chunks to track in current bit vector, increase bitvector by one chunk + pBitVector->pChunks++; + pBitVector->numChunks--; + bitVectorSize = (NvU32)((NvU8*)pBitVector->pChunks - (NvU8*)pBitVector->bits); + } + portMemSet(pBitVector->bits, 0, bitVectorSize); + + PORT_MEM_PRINT_INFO("Acquired preallocated block allocator %p (%llu bytes) ", pAllocator, (NvU64)blockSizeBytes); + PORT_MEM_PRINT_INFO(PORT_MEM_CALLERINFO_PRINT_ARGS(PORT_MEM_CALLERINFO_PARAM)); + return pAllocator; +} + +static void * +_portMemAllocatorAllocExistingWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + NvLength length +) +{ + NvU32 chunksNeeded = (NvU32)NV_DIV_AND_CEIL(length, PORT_MEM_BITVECTOR_CHUNK_SIZE); + void *pMem = NULL; + NvU32 chunksFound = 0; + NvU32 i; + PORT_MEM_BITVECTOR *pBitVector = (PORT_MEM_BITVECTOR*)(pAlloc->pImpl); + PORT_SPINLOCK *pSpinlock = (PORT_SPINLOCK*)(pBitVector->pSpinlock); + + if (chunksNeeded > pBitVector->numChunks) + { + return NULL; + } + if (pSpinlock != NULL) + { + portSyncSpinlockAcquire(pSpinlock); + } + for (i = 0; i < pBitVector->numChunks; i++) + { + NvBool bWholeWordSet; + bWholeWordSet = pBitVector->bits[i/32] == ~0U; + if (bWholeWordSet || (_isBitSet(pBitVector->bits, i))) + { + // Chunk not available as whole. + chunksFound = 0; + // Skip fully set words + if (bWholeWordSet) + { + i += 31; + } + if (chunksNeeded > (pBitVector->numChunks - i - (bWholeWordSet ? 1 : 0))) + { + break; + } + } + else + { + chunksFound++; + if (chunksFound == chunksNeeded) + { + NvU32 j; + NvU32 firstAllocatedChunk = i - chunksFound + 1; + + pMem = pBitVector->pChunks[firstAllocatedChunk]; + // Mark all acquired chunks as occupied + for (j = firstAllocatedChunk; j <= i; j++) + { + _setBit(pBitVector->bits, j); + } + // Mark last chunk of allocation + _setBit(pBitVector->bits, pBitVector->numChunks + i); + break; + } + } + } + if (pSpinlock != NULL) + { + portSyncSpinlockRelease(pSpinlock); + } + return pMem; +} + +static void +_portMemAllocatorFreeExistingWrapper +( + PORT_MEM_ALLOCATOR *pAlloc, + void *pMem +) +{ + PORT_MEM_BITVECTOR_CHUNK *pChunk = (PORT_MEM_BITVECTOR_CHUNK *)pMem; + NvU32 i; + PORT_MEM_BITVECTOR *pBitVector = (PORT_MEM_BITVECTOR*)(pAlloc->pImpl); + PORT_SPINLOCK *pSpinlock = (PORT_SPINLOCK*)(pBitVector->pSpinlock); + + if (((NvUPtr)pMem < (NvUPtr)pBitVector->pChunks) || + ((NvUPtr)pMem > (NvUPtr)(pBitVector->pChunks + pBitVector->numChunks))) + { + // pMem not inside this allocator. + PORT_BREAKPOINT_CHECKED(); + return; + } + + if (pSpinlock != NULL) + { + portSyncSpinlockAcquire(pSpinlock); + } + for (i = (NvU32)(pChunk - pBitVector->pChunks); i < pBitVector->numChunks; i++) + { + // Mark chunk as free + _clearBit(pBitVector->bits, i); + if (_isBitSet(pBitVector->bits, pBitVector->numChunks + i)) + { + // Clear last-allocation-bit and bail + _clearBit(pBitVector->bits, pBitVector->numChunks + i); + break; + } + } + if (pSpinlock != NULL) + { + portSyncSpinlockRelease(pSpinlock); + } +} diff --git a/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c b/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c new file mode 100644 index 000000000..70fb5ca34 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/memory/memory_unix_kernel_os.c @@ -0,0 +1,206 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/** + * @file + * @brief MEMORY module implementation for Unix kernelmode + * + * This implementation uses the NVIDIA OS interface into the unix kernels. + */ + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nvport/nvport.h" +#include "os-interface.h" + +/** + * @note All kernel memory in unix is non-paged. + */ +void * +_portMemAllocPagedUntracked +( + NvLength lengthBytes +) +{ + return _portMemAllocNonPagedUntracked(lengthBytes); +} + +void * +_portMemAllocNonPagedUntracked +( + NvLength lengthBytes +) +{ + void *pMem = NULL; + PORT_ASSERT_CHECKED(lengthBytes > 0); + if (lengthBytes > 0) + os_alloc_mem(&pMem, lengthBytes); + return pMem; +} + + + +void +_portMemFreeUntracked +( + void *pData +) +{ + if (pData != NULL) + { + os_free_mem(pData); + } +} + +void * +portMemCopy +( + void *pDestination, + NvLength destSize, + const void *pSource, + NvLength srcSize +) +{ + // API guarantees this is a NOP when destSize==0 + if (destSize == 0) + { + return pDestination; + } + + PORT_ASSERT_CHECKED(pDestination != NULL); + PORT_ASSERT_CHECKED(pSource != NULL); + PORT_ASSERT_CHECKED(srcSize <= destSize); + PORT_ASSERT_CHECKED(!portUtilCheckOverlap(pDestination, destSize, + pSource, srcSize)); + + if ((pSource == NULL) || (pDestination == NULL) || (srcSize > destSize)) + { + return NULL; + } + return os_mem_copy(pDestination, pSource, srcSize); +} + + +void * +portMemSet +( + void *pData, + NvU8 value, + NvLength lengthBytes +) +{ + if (lengthBytes == 0) + { + return pData; + } + if (pData == NULL) + { + return pData; + } + return os_mem_set(pData, value, lengthBytes); +} + +NvS32 +portMemCmp +( + const void *pData0, + const void *pData1, + NvLength lengthBytes +) +{ + if (lengthBytes == 0) + { + return 0; + } + if ((pData0 == NULL) || (pData1 == NULL)) + { + return -1; + } + return os_mem_cmp(pData0, pData1, lengthBytes); +} + + + +#define PORT_MEM_USE_GENERIC_portMemSetPattern +#define PORT_MEM_USE_GENERIC_portMemMove +#include "memory_generic.h" + +NV_STATUS +portMemExCopyFromUser +( + const NvP64 pUser, + void *pKernel, + NvLength lengthBytes +) +{ + if (pKernel == NULL) + { + return NV_ERR_INVALID_POINTER; + } + if (lengthBytes == 0) + { + return NV_ERR_INVALID_ARGUMENT; + } + return os_memcpy_from_user(pKernel, NvP64_VALUE(pUser), lengthBytes); +} + +NV_STATUS +portMemExCopyToUser +( + const void *pKernel, + NvP64 pUser, + NvLength lengthBytes +) +{ + if (pKernel == NULL) + { + return NV_ERR_INVALID_POINTER; + } + if (lengthBytes == 0) + { + return NV_ERR_INVALID_ARGUMENT; + } + return os_memcpy_to_user(NvP64_VALUE(pUser), (void*)pKernel, lengthBytes); +} + +NvLength +portMemExGetPageSize(void) +{ + return os_page_size; +} + +// Large allocations (>KMALLOC_LIMIT) will fail, but it is safe to call +NvBool +portMemExSafeForPagedAlloc(void) +{ + return NV_TRUE; +} +NvBool +portMemExSafeForNonPagedAlloc(void) +{ + return NV_TRUE; +} diff --git a/src/nvidia/src/libraries/nvport/string/string_generic.c b/src/nvidia/src/libraries/nvport/string/string_generic.c new file mode 100644 index 000000000..c576ea81c --- /dev/null +++ b/src/nvidia/src/libraries/nvport/string/string_generic.c @@ -0,0 +1,274 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief STRING module implementation for platforms without stdlib support + */ + +#include "nvport/nvport.h" +#include "nvmisc.h" + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringLength +NvLength +portStringLength +( + const char *str +) +{ + const char *begin = str; + + PORT_ASSERT_CHECKED(str != NULL); + + while ('\0' != *str) str++; + + return str - begin; +} + +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringLengthSafe +NvLength +portStringLengthSafe +( + const char *str, + NvLength maxLength +) +{ + const char *begin = str; + + PORT_ASSERT_CHECKED(str != NULL); + + while ((0 != maxLength--) && ('\0' != *str)) + str++; + + return str - begin; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringCompare +NvS32 +portStringCompare +( + const char *str1, + const char *str2, + NvLength maxLength +) +{ + NvLength length; + + PORT_ASSERT_CHECKED(str1 != NULL); + PORT_ASSERT_CHECKED(str2 != NULL); + + length = portStringLengthSafe(str1, maxLength); + + // Add 1 for the null terminator. + if (length < maxLength) + length++; + + return portMemCmp(str1, str2, length); +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringCopy +NvLength +portStringCopy +( + char *dest, + NvLength destSize, + const char *src, + NvLength srcSize +) +{ + NvLength minCopyLength; + NvLength srcLen; + + PORT_ASSERT_CHECKED(dest != NULL); + PORT_ASSERT_CHECKED(src != NULL); + + PORT_ASSERT_CHECKED(destSize != 0); + PORT_ASSERT_CHECKED(srcSize != 0); + + srcLen = portStringLengthSafe(src, srcSize); + if (srcLen == srcSize) srcLen--; + + minCopyLength = NV_MIN(destSize, srcLen + 1); + + PORT_ASSERT_CHECKED(minCopyLength != 0); + + if (minCopyLength > 1) + portMemCopy(dest, destSize, src, minCopyLength - 1); + + dest[minCopyLength - 1] = '\0'; + + return minCopyLength; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringCat +char * +portStringCat +( + char *str, + NvLength strSize, + const char *cat, + NvLength catSize +) +{ + NvLength strLen; + NvLength catLen; + NvLength minCatLength; + char* begin; + + PORT_ASSERT_CHECKED(str != NULL); + PORT_ASSERT_CHECKED(cat != NULL); + + strLen = portStringLengthSafe(str, strSize); + catLen = portStringLengthSafe(cat, catSize); + + // In case of no NULL terminating char in cat. + if (catLen == catSize) catLen--; + + minCatLength = NV_MIN(strSize - strLen, catLen + 1); + if (0 == minCatLength) + return str; + + begin = str; + str = str + strLen; + + // strncat doesn't count NULL char. + if (minCatLength > 1) + portMemCopy(str, strSize, cat, minCatLength - 1); + + begin[strLen + minCatLength - 1] = '\0'; + return begin; +} + +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringConvertAsciiToUtf16 +NvLength +portStringConvertAsciiToUtf16 +( + NvU16 *dest, + NvLength destSize, + const char *src, + NvLength srcSize +) +{ + NvLength i, len; + + PORT_ASSERT_CHECKED(dest != NULL); + PORT_ASSERT_CHECKED(src != NULL); + + if (destSize == 0) + return 0; + + len = portStringLengthSafe(src, srcSize); + if (len >= destSize) + len = destSize - 1; + + i = len; + while (i-- > 0) + dest[i] = src[i]; + + dest[len] = 0; + return len; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringBufferToHex +NvLength +portStringBufferToHex +( + char *str, + NvLength strSize, + const NvU8 *buf, + NvLength bufSize +) +{ + NvLength i, len; + + if (strSize == 0) + return 0; + + PORT_ASSERT_CHECKED(str != NULL); + PORT_ASSERT_CHECKED(buf != NULL); + + len = bufSize * 2; + if (len >= strSize) + len = strSize - 1; + + for (i = 0; i < len; i++) + { + NvU8 n = (i % 2) ? (buf[i/2] & 0xF) : (buf[i/2] >> 4); + str[i] = (n < 0xA) ? ('0' + n) : ('a' + n - 0xA); + } + str[len] = 0; + return len; +} +#endif + +#ifndef NVPORT_STRING_DONT_DEFINE_portStringBufferToHexGroups +NvLength +portStringBufferToHexGroups +( + char *str, + NvLength strSize, + const NvU8 *buf, + NvLength bufSize, + NvLength groupCount, + const NvU32 *groups, + const char *separator +) +{ + NvLength group, sepLength, written; + + if (strSize == 0) + return 0; + + PORT_ASSERT_CHECKED(str != NULL); + PORT_ASSERT_CHECKED(buf != NULL); + PORT_ASSERT_CHECKED(groups != NULL); + PORT_ASSERT_CHECKED(separator != NULL); + + sepLength = portStringLength(separator); + + for (written = 0, group = 0; (group < groupCount) && (written < (strSize - 1)); group++) + { + NvLength groupSize = NV_MIN(groups[group] / 2, bufSize); + written += portStringBufferToHex(str + written, strSize - written, buf, groupSize); + buf += groupSize; + bufSize -= groupSize; + + if (group != groupCount - 1) + { + portMemCopy(str + written, strSize - written, separator, sepLength); + written += sepLength; + } + } + + str[written] = 0; + return written; +} +#endif diff --git a/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h b/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h new file mode 100644 index 000000000..5f956f02e --- /dev/null +++ b/src/nvidia/src/libraries/nvport/sync/inc/sync_rwlock_def.h @@ -0,0 +1,41 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC custom rwlock struct implementations + * + */ + +#ifndef _NVPORT_SYNC_RWLOCK_DEF_H_ +#define _NVPORT_SYNC_RWLOCK_DEF_H_ + +struct PORT_RWLOCK +{ + PORT_SEMAPHORE *pSemRead; + PORT_SEMAPHORE *pSemWrite; + volatile NvS32 numReaders; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +#endif diff --git a/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h b/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h new file mode 100644 index 000000000..a28201d84 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/sync/inc/sync_unix_kernel_os_def.h @@ -0,0 +1,54 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC unix kernel struct implementations + * + */ + +#ifndef _NVPORT_SYNC_UNIX_KERNEL_DEF_H_ +#define _NVPORT_SYNC_UNIX_KERNEL_DEF_H_ + +#include "os-interface.h" + +struct PORT_SPINLOCK +{ + void *lock; + NvU64 oldIrql; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +struct PORT_MUTEX +{ + void *mutex; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +struct PORT_SEMAPHORE +{ + void *sem; + PORT_MEM_ALLOCATOR *pAllocator; +}; + +#endif diff --git a/src/nvidia/src/libraries/nvport/sync/sync_common.h b/src/nvidia/src/libraries/nvport/sync/sync_common.h new file mode 100644 index 000000000..babb90f5a --- /dev/null +++ b/src/nvidia/src/libraries/nvport/sync/sync_common.h @@ -0,0 +1,158 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC common function implementations + * + * The portSyncXxxCreate functions have the same implementation, so they are + * extracted here instead of repeated in every file. + */ + +#define PORT_SYNC_IMPL +#include "nvport/nvport.h" + +#ifdef PORT_SYNC_COMMON_DEFINE_SPINLOCK +PORT_SPINLOCK * +portSyncSpinlockCreate +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_SPINLOCK *pSpinlock; + PORT_ASSERT_CHECKED(pAllocator != NULL); + pSpinlock = PORT_ALLOC(pAllocator, portSyncSpinlockSize); + if (pSpinlock != NULL) + { + if (portSyncSpinlockInitialize(pSpinlock) != NV_OK) + { + PORT_FREE(pAllocator, pSpinlock); + return NULL; + } + pSpinlock->pAllocator = pAllocator; + } + return pSpinlock; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_MUTEX +PORT_MUTEX * +portSyncMutexCreate +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_MUTEX *pMutex; + PORT_ASSERT_CHECKED(pAllocator != NULL); + pMutex = PORT_ALLOC(pAllocator, portSyncMutexSize); + if (pMutex != NULL) + { + if (portSyncMutexInitialize(pMutex) != NV_OK) + { + PORT_FREE(pAllocator, pMutex); + return NULL; + } + pMutex->pAllocator = pAllocator; + } + return pMutex; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_SEMAPHORE +PORT_SEMAPHORE * +portSyncSemaphoreCreate +( + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 startValue +) +{ + PORT_SEMAPHORE *pSemaphore; + PORT_ASSERT_CHECKED(pAllocator != NULL); + pSemaphore = PORT_ALLOC(pAllocator, portSyncSemaphoreSize); + if (pSemaphore != NULL) + { + if (portSyncSemaphoreInitialize(pSemaphore, startValue) != NV_OK) + { + PORT_FREE(pAllocator, pSemaphore); + return NULL; + } + pSemaphore->pAllocator = pAllocator; + } + return pSemaphore; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_RWLOCK +PORT_RWLOCK * +portSyncRwLockCreate +( + PORT_MEM_ALLOCATOR *pAllocator +) +{ + PORT_RWLOCK *pLock; + PORT_ASSERT_CHECKED(pAllocator != NULL); + + pLock = PORT_ALLOC(pAllocator, portSyncRwLockSize); + if (pLock != NULL) + { + if (portSyncRwLockInitialize(pLock) != NV_OK) + { + PORT_FREE(pAllocator, pLock); + return NULL; + } + pLock->pAllocator = pAllocator; + } + return pLock; +} +#endif + +#ifdef PORT_SYNC_COMMON_DEFINE_SYNC_INIT + +NvLength portSyncSpinlockSize; +NvLength portSyncMutexSize; +NvLength portSyncSemaphoreSize; +NvLength portSyncRwLockSize; + +void portSyncInitialize(void) +{ + portSyncSpinlockSize = sizeof(PORT_SPINLOCK); + portSyncMutexSize = sizeof(PORT_MUTEX); + portSyncSemaphoreSize = sizeof(PORT_SEMAPHORE); + portSyncRwLockSize = sizeof(PORT_RWLOCK); +#if LOCK_VAL_ENABLED +{ + extern void portSyncInitialize_LOCKVAL(void); + portSyncInitialize_LOCKVAL(); +} +#endif +} + +void portSyncShutdown(void) +{ +#if LOCK_VAL_ENABLED + extern void portSyncShutdown_LOCKVAL(void); + portSyncShutdown_LOCKVAL(); +#endif +} + +#endif // PORT_SYNC_COMMON_DEFINE_SYNC_INIT diff --git a/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c b/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c new file mode 100644 index 000000000..f33144526 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/sync/sync_rwlock.c @@ -0,0 +1,178 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/** + * @file + * @brief Readers-writer lock implementation using PORT_SEMAPHORE and ATOMIC + * module. + */ +#define PORT_SYNC_IMPL +#include "nvport/nvport.h" + + +#if !PORT_IS_MODULE_SUPPORTED(atomic) +#error "NvPort SYNC RWLock implementation requires ATOMIC module to be present." +#endif + +#include "inc/sync_rwlock_def.h" + +NV_STATUS +portSyncRwLockInitialize +( + PORT_RWLOCK *pLock +) +{ + PORT_MEM_ALLOCATOR *pAllocator = portMemAllocatorGetGlobalNonPaged(); + if (pLock == NULL) + { + return NV_ERR_INVALID_POINTER; + } + + pLock->pSemRead = portSyncSemaphoreCreate(pAllocator, 1); + if (pLock->pSemRead == NULL) + { + return NV_ERR_NO_MEMORY; + } + pLock->pSemWrite = portSyncSemaphoreCreate(pAllocator, 1); + if (pLock->pSemWrite == NULL) + { + portSyncSemaphoreDestroy(pLock->pSemRead); + return NV_ERR_NO_MEMORY; + } + + pLock->numReaders = 0; + pLock->pAllocator = NULL; + + return NV_OK; +} + + +void +portSyncRwLockAcquireRead +( + PORT_RWLOCK *pLock +) +{ + PORT_ASSERT_CHECKED(pLock != NULL); + portSyncSemaphoreAcquire(pLock->pSemRead); + if (portAtomicIncrementS32(&pLock->numReaders) == 1) + { + portSyncSemaphoreAcquire(pLock->pSemWrite); + } + portSyncSemaphoreRelease(pLock->pSemRead); +} + +NvBool +portSyncRwLockAcquireReadConditional +( + PORT_RWLOCK *pLock +) +{ + NvBool bAcquired; + PORT_ASSERT_CHECKED(pLock != NULL); + bAcquired = portSyncSemaphoreAcquireConditional(pLock->pSemRead); + if (!bAcquired) + { + return NV_FALSE; + } + if (portAtomicIncrementS32(&pLock->numReaders) == 1) + { + bAcquired = portSyncSemaphoreAcquireConditional(pLock->pSemWrite); + if (!bAcquired) + { + portAtomicDecrementS32(&pLock->numReaders); + } + } + portSyncSemaphoreRelease(pLock->pSemRead); + return bAcquired; +} + +void +portSyncRwLockAcquireWrite +( + PORT_RWLOCK *pLock +) +{ + PORT_ASSERT_CHECKED(pLock != NULL); + portSyncSemaphoreAcquire(pLock->pSemRead); + portSyncSemaphoreAcquire(pLock->pSemWrite); + portSyncSemaphoreRelease(pLock->pSemRead); +} + +NvBool +portSyncRwLockAcquireWriteConditional +( + PORT_RWLOCK *pLock +) +{ + NvBool bAcquired; + PORT_ASSERT_CHECKED(pLock != NULL); + bAcquired = portSyncSemaphoreAcquireConditional(pLock->pSemRead); + if (bAcquired) + { + bAcquired = portSyncSemaphoreAcquireConditional(pLock->pSemWrite); + portSyncSemaphoreRelease(pLock->pSemRead); + } + return bAcquired; +} + +void +portSyncRwLockReleaseRead +( + PORT_RWLOCK *pLock +) +{ + PORT_ASSERT_CHECKED(pLock != NULL); + if (portAtomicDecrementS32(&pLock->numReaders) == 0) + { + portSyncSemaphoreRelease(pLock->pSemWrite); + } +} + +void +portSyncRwLockReleaseWrite +( + PORT_RWLOCK *pLock +) +{ + PORT_ASSERT_CHECKED(pLock != NULL); + portSyncSemaphoreRelease(pLock->pSemWrite); +} + +void +portSyncRwLockDestroy +( + PORT_RWLOCK *pLock +) +{ + PORT_ASSERT_CHECKED(pLock != NULL); + portSyncSemaphoreDestroy(pLock->pSemRead); + portSyncSemaphoreDestroy(pLock->pSemWrite); + if (pLock->pAllocator != NULL) + { + PORT_FREE(pLock->pAllocator, pLock); + } +} + +// Include implementations common for all platforms +#define PORT_SYNC_COMMON_DEFINE_RWLOCK +#include "sync_common.h" diff --git a/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c b/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c new file mode 100644 index 000000000..fbe60e805 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/sync/sync_unix_kernel_os.c @@ -0,0 +1,242 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2014-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief SYNC module implementation for Unix kernelmode + * + * This implementation uses the NVIDIA OS interface into the unix kernels. + */ + +#define PORT_SYNC_IMPL +#include "nvport/nvport.h" + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "os-interface.h" + +#include "inc/sync_unix_kernel_os_def.h" +#include "inc/sync_rwlock_def.h" + +NV_STATUS +portSyncSpinlockInitialize +( + PORT_SPINLOCK *pSpinlock +) +{ + if (pSpinlock == NULL) + { + return NV_ERR_INVALID_POINTER; + } + pSpinlock->pAllocator = NULL; + return os_alloc_spinlock(&pSpinlock->lock); +} + +void +portSyncSpinlockDestroy +( + PORT_SPINLOCK *pSpinlock +) +{ + PORT_ASSERT_CHECKED(pSpinlock != NULL); + os_free_spinlock(pSpinlock->lock); + if (pSpinlock->pAllocator != NULL) + { + PORT_FREE(pSpinlock->pAllocator, pSpinlock); + } +} + +void +portSyncSpinlockAcquire +( + PORT_SPINLOCK *pSpinlock +) +{ + PORT_ASSERT_CHECKED(pSpinlock != NULL); + pSpinlock->oldIrql = os_acquire_spinlock(pSpinlock->lock); +} + +void +portSyncSpinlockRelease +( + PORT_SPINLOCK *pSpinlock +) +{ + PORT_ASSERT_CHECKED(pSpinlock != NULL); + os_release_spinlock(pSpinlock->lock, pSpinlock->oldIrql); +} + + + +NV_STATUS +portSyncMutexInitialize +( + PORT_MUTEX *pMutex +) +{ + if (pMutex == NULL) + { + return NV_ERR_INVALID_POINTER; + } + pMutex->pAllocator = NULL; + return os_alloc_mutex(&pMutex->mutex); +} + +void +portSyncMutexDestroy +( + PORT_MUTEX *pMutex +) +{ + PORT_ASSERT_CHECKED(pMutex != NULL); + os_free_mutex(pMutex->mutex); + if (pMutex->pAllocator != NULL) + { + PORT_FREE(pMutex->pAllocator, pMutex); + } +} + +void +portSyncMutexAcquire +( + PORT_MUTEX *pMutex +) +{ + NV_STATUS status; + PORT_ASSERT_CHECKED(pMutex != NULL); + PORT_ASSERT_CHECKED(portSyncExSafeToSleep()); + status = os_acquire_mutex(pMutex->mutex); + PORT_ASSERT(status == NV_OK); +} + +NvBool +portSyncMutexAcquireConditional +( + PORT_MUTEX *pMutex +) +{ + PORT_ASSERT_CHECKED(pMutex != NULL); + return os_cond_acquire_mutex(pMutex->mutex) == NV_OK; + +} + +void +portSyncMutexRelease +( + PORT_MUTEX *pMutex +) +{ + PORT_ASSERT_CHECKED(pMutex != NULL); + os_release_mutex(pMutex->mutex); +} + + + +NV_STATUS +portSyncSemaphoreInitialize +( + PORT_SEMAPHORE *pSemaphore, + NvU32 startValue +) +{ + if (pSemaphore == NULL) + { + return NV_ERR_INVALID_POINTER; + } + pSemaphore->pAllocator = NULL; + pSemaphore->sem = os_alloc_semaphore(startValue); + return (pSemaphore->sem != NULL) ? NV_OK : NV_ERR_NO_MEMORY; +} + +void +portSyncSemaphoreDestroy +( + PORT_SEMAPHORE *pSemaphore +) +{ + PORT_ASSERT_CHECKED(pSemaphore != NULL); + os_free_semaphore(pSemaphore->sem); + if (pSemaphore->pAllocator != NULL) + { + PORT_FREE(pSemaphore->pAllocator, pSemaphore); + } +} + +void +portSyncSemaphoreAcquire +( + PORT_SEMAPHORE *pSemaphore +) +{ + NV_STATUS status; + PORT_ASSERT_CHECKED(pSemaphore != NULL); + status = os_acquire_semaphore(pSemaphore->sem); + PORT_ASSERT(status == NV_OK); +} + +NvBool +portSyncSemaphoreAcquireConditional +( + PORT_SEMAPHORE *pSemaphore +) +{ + + PORT_ASSERT_CHECKED(pSemaphore != NULL); + return os_cond_acquire_semaphore(pSemaphore->sem) == NV_OK; +} + +void +portSyncSemaphoreRelease +( + PORT_SEMAPHORE *pSemaphore +) +{ + PORT_ASSERT_CHECKED(pSemaphore != NULL); + os_release_semaphore(pSemaphore->sem); +} + + +NvBool portSyncExSafeToSleep() +{ + return os_semaphore_may_sleep(); +} +NvBool portSyncExSafeToWake() +{ + return NV_TRUE; +} +NvU64 portSyncExGetInterruptLevel() +{ + return !os_semaphore_may_sleep(); +} + +// Include implementations common for all platforms +#define PORT_SYNC_COMMON_DEFINE_SPINLOCK +#define PORT_SYNC_COMMON_DEFINE_MUTEX +#define PORT_SYNC_COMMON_DEFINE_SEMAPHORE +#define PORT_SYNC_COMMON_DEFINE_SYNC_INIT +#include "sync_common.h" diff --git a/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c b/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c new file mode 100644 index 000000000..95d25f03b --- /dev/null +++ b/src/nvidia/src/libraries/nvport/thread/thread_unix_kernel_os.c @@ -0,0 +1,60 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +/** + * @file + * @brief THREAD module implementation for Unix kernelmode + * + * This implementation uses the NVIDIA OS interface into the unix kernels. + */ + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif + +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nvport/nvport.h" +#include "os-interface.h" + +// Invalid value for thread. +const PORT_THREAD PORT_THREAD_INVALID = {0ULL}; + +// Invalid value for process. +const PORT_PROCESS PORT_PROCESS_INVALID = {0ULL}; + +NvU64 portThreadGetCurrentThreadId() +{ + NvU64 tid = 0; + os_get_current_thread(&tid); + return tid; +} + +void portThreadYield() +{ + os_schedule(); +} + diff --git a/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c b/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c new file mode 100644 index 000000000..a4a1e4bc8 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/util/util_gcc_clang.c @@ -0,0 +1,80 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util functions implementations using gcc compiler intrinsics + */ + +#include "nvport/nvport.h" + +#if portUtilExGetStackTrace_SUPPORTED +NV_NOINLINE NvUPtr +portUtilExGetStackTrace +( + NvU32 level +) +{ + switch (level) + { + case 0: return (__builtin_frame_address(0) != 0) ? + (NvUPtr)__builtin_return_address(0) : (NvUPtr)0; + case 1: return (__builtin_frame_address(1) != 0) ? + (NvUPtr)__builtin_return_address(1) : (NvUPtr)0; + case 2: return (__builtin_frame_address(2) != 0) ? + (NvUPtr)__builtin_return_address(2) : (NvUPtr)0; + case 3: return (__builtin_frame_address(3) != 0) ? + (NvUPtr)__builtin_return_address(3) : (NvUPtr)0; + case 4: return (__builtin_frame_address(4) != 0) ? + (NvUPtr)__builtin_return_address(4) : (NvUPtr)0; + case 5: return (__builtin_frame_address(5) != 0) ? + (NvUPtr)__builtin_return_address(5) : (NvUPtr)0; + case 6: return (__builtin_frame_address(6) != 0) ? + (NvUPtr)__builtin_return_address(6) : (NvUPtr)0; + case 7: return (__builtin_frame_address(7) != 0) ? + (NvUPtr)__builtin_return_address(7) : (NvUPtr)0; + case 8: return (__builtin_frame_address(8) != 0) ? + (NvUPtr)__builtin_return_address(8) : (NvUPtr)0; + case 9: return (__builtin_frame_address(9) != 0) ? + (NvUPtr)__builtin_return_address(9) : (NvUPtr)0; + case 10: return (__builtin_frame_address(10) != 0) ? + (NvUPtr)__builtin_return_address(10) : (NvUPtr)0; + case 11: return (__builtin_frame_address(11) != 0) ? + (NvUPtr)__builtin_return_address(11) : (NvUPtr)0; + case 12: return (__builtin_frame_address(12) != 0) ? + (NvUPtr)__builtin_return_address(12) : (NvUPtr)0; + case 13: return (__builtin_frame_address(13) != 0) ? + (NvUPtr)__builtin_return_address(13) : (NvUPtr)0; + case 14: return (__builtin_frame_address(14) != 0) ? + (NvUPtr)__builtin_return_address(14) : (NvUPtr)0; + case 15: return (__builtin_frame_address(15) != 0) ? + (NvUPtr)__builtin_return_address(15) : (NvUPtr)0; + } + return 0; +} +#endif + +NV_NOINLINE NvUPtr portUtilGetIPAddress() +{ + return portUtilGetReturnAddress(); +} diff --git a/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c b/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c new file mode 100644 index 000000000..44a5b6d54 --- /dev/null +++ b/src/nvidia/src/libraries/nvport/util/util_unix_kernel_os.c @@ -0,0 +1,44 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief Util functions implementations for unix based OS. + */ + +#if !PORT_IS_KERNEL_BUILD +#error "This file can only be compiled as part of the kernel build." +#endif + +#if !NVOS_IS_UNIX +#error "This file can only be compiled on Unix." +#endif + +#include "nvport/nvport.h" +#include "os-interface.h" + +NvBool portUtilIsInterruptContext(void) +{ + return os_is_isr(); +} + diff --git a/src/nvidia/src/libraries/poolalloc/poolalloc.c b/src/nvidia/src/libraries/poolalloc/poolalloc.c new file mode 100644 index 000000000..6db3cd2d8 --- /dev/null +++ b/src/nvidia/src/libraries/poolalloc/poolalloc.c @@ -0,0 +1,597 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "poolalloc.h" +#include "nvmisc.h" +#include "utils/nvprintf.h" +#include "utils/nvassert.h" + +// Local defines +#define LOG_ENTER NV_PRINTF(LEVEL_INFO, "--> %s at line %d\n", __FUNCTION__, __LINE__) +#define LOG_EXIT NV_PRINTF(LEVEL_INFO, "<-- %s at line %d\n", __FUNCTION__, __LINE__) + +// Local function declarations. +static NvU32 ntz_64 (NvU64 bits); +static NvU32 countZeros(NvU64 bits); + +// Local helpers. +static NvU32 +ntz_64 +( + NvU64 bits +) +{ + NvU64 bz, b5, b4, b3, b2, b1, b0; + NvU64 y; + + y = bits & (~bits+1); + bz = y ? 0 : 1; + b5 = (y & 0x00000000FFFFFFFFULL) ? 0 : 32; + b4 = (y & 0x0000FFFF0000FFFFULL) ? 0 : 16; + b3 = (y & 0x00FF00FF00FF00FFULL) ? 0 : 8; + b2 = (y & 0x0F0F0F0F0F0F0F0FULL) ? 0 : 4; + b1 = (y & 0x3333333333333333ULL) ? 0 : 2; + b0 = (y & 0x5555555555555555ULL) ? 0 : 1; + + return ((NvU32)(bz + b5 + b4 + b3 + b2 + b1 + b0)); +} + +static void +_setBitmap +( + NvU64 *bitmap, + NvU32 index +) +{ + NvU64 mask; + mask = ((NvU64)1 << index); + *bitmap = ((*bitmap) | mask); +} + +static NvU32 +countZeros +( + NvU64 bits +) +{ + // Flip the bits to covert zeros to ones. + bits = ~bits; + return nvPopCount64(bits); +} + + +static void +poolListDestroy +( + PoolNodeList *pList, + POOLALLOC *pPool +) +{ + POOLNODE *pNode; + + NV_ASSERT(pList != NULL); + + pNode = listHead(pList); + while (pNode != NULL) + { + POOLALLOC_HANDLE freeHandle; + POOLNODE *pNextNode; + + pNextNode = listNext(pList, pNode); + freeHandle.address = pNode->pageAddr; + freeHandle.pMetadata = (pNode->pParent != NULL) ? pNode->pParent : pNode; + + (*((pPool->callBackInfo).freeCb))((pPool->callBackInfo).pUpstreamCtx, + pPool->upstreamPageSize, &freeHandle); + + listRemove(pList, pNode); + PORT_FREE(pPool->pAllocator, pNode); + pNode = pNextNode; + } +} + +#if defined(DEBUG_VERBOSE) +static void +poolListPrint +( + PoolNodeList *pList +) +{ + NvU32 length; + PoolNodeListIter it; + + length = listCount(pList); + NV_PRINTF(LEVEL_NOTICE, "Length = %d\n", length); + + it = listIterAll(pList); + while (listIterNext(&it)) + { + NV_PRINTF_EX(POOLALLOC, LEVEL_NOTICE, + "=> [pageAddr: 0x%llx, bitmap: 0x%llx]", + it.pValue->pageAddr, it.pValue->bitmap); + } + NV_PRINTF_EX(POOLALLOC, LEVEL_NOTICE, "\n"); +} +#endif // defined(DEBUG_VERBOSE) + +static void +allocFreeList +( + POOLALLOC *pPool, + POOLALLOC_HANDLE *pPageHandle +) +{ + POOLNODE *pFirstFree; + + pFirstFree = listHead(&pPool->freeList); + pFirstFree->bitmap = ~((NvU64)1); + + if(ntz_64(pFirstFree->bitmap) >= pPool->ratio) + { + // Move from partial list to full list + listRemove(&pPool->freeList, pFirstFree); + listPrependExisting(&pPool->fullList, pFirstFree); + } + else + { + // Move from free list to partial list. + listRemove(&pPool->freeList, pFirstFree); + listPrependExisting(&pPool->partialList, pFirstFree); + } + + // Construct the page handle. + pPageHandle->address = pFirstFree->pageAddr; + pPageHandle->pMetadata = pFirstFree; +} + +static void +allocPartialList +( + POOLALLOC *pPool, + POOLALLOC_HANDLE *pPageHandle +) +{ + POOLNODE *pFirstPartial; + NvU32 freeIdx; + NvU64 bitmap, mask; + + pFirstPartial = listHead(&pPool->partialList); + bitmap = pFirstPartial->bitmap; + freeIdx = ntz_64(bitmap); + mask = ~((NvU64)1 << freeIdx); + + NV_ASSERT(freeIdx < pPool->ratio); + pFirstPartial->bitmap = bitmap & mask; + if(ntz_64(pFirstPartial->bitmap) >= pPool->ratio) + { + // Move from partial list to full list + listRemove(&pPool->partialList, pFirstPartial); + listPrependExisting(&pPool->fullList, pFirstPartial); + } + + // Construct the page handle + pPageHandle->address = pFirstPartial->pageAddr + + (freeIdx * pPool->allocPageSize); + pPageHandle->pMetadata = pFirstPartial; +} + +void +poolAllocPrint +( + POOLALLOC *pPool +) +{ +#if defined(DEBUG_VERBOSE) + NV_PRINTF(LEVEL_NOTICE, "upstreamPageSize = %dKB, allocPageSize = %d%s\n", + (pPool->upstreamPageSize >> 10), + (pPool->allocPageSize >> 10) ? pPool->allocPageSize >> 10 : + pPool->allocPageSize, + (pPool->allocPageSize >> 10) ? "KB" : "B"); + NV_PRINTF_EX(POOLALLOC, LEVEL_NOTICE, "freeList => "); + poolListPrint(&(pPool->freeList)); + NV_PRINTF_EX(POOLALLOC, LEVEL_NOTICE, "partialList => "); + poolListPrint(&(pPool->partialList)); + NV_PRINTF_EX(POOLALLOC, LEVEL_NOTICE, "fullList => "); + poolListPrint(&(pPool->fullList)); +#endif // defined(DEBUG_VERBOSE) +} + + +POOLALLOC * +poolInitialize +( + NvU32 upstreamPageSize, + NvU32 allocPageSize, + allocCallback_t allocCb, + freeCallback_t freeCb, + void *ctxPtr, + PORT_MEM_ALLOCATOR *pAllocator, + NvU32 flags +) +{ + POOLALLOC *pPool; + LOG_ENTER; + + pPool = PORT_ALLOC(pAllocator, sizeof(*pPool)); + if (pPool == NULL) + { + return NULL; + } + + pPool->upstreamPageSize = upstreamPageSize; + pPool->allocPageSize = allocPageSize; + pPool->ratio = upstreamPageSize / allocPageSize; + pPool->flags = flags; + + (pPool->callBackInfo).allocCb = allocCb; + (pPool->callBackInfo).freeCb = freeCb; + (pPool->callBackInfo).pUpstreamCtx = ctxPtr; + + pPool->pAllocator = pAllocator; + + listInitIntrusive(&pPool->freeList); + listInitIntrusive(&pPool->fullList); + listInitIntrusive(&pPool->partialList); + + NV_PRINTF(LEVEL_INFO, "Initialized pool with upstreamPageSize = %dB, allocPageSize = %dB and autoPopulate %s\n", + pPool->upstreamPageSize, pPool->allocPageSize, + ((pPool->flags & NV_RMPOOL_FLAGS_AUTO_POPULATE_ENABLE) ? "enabled" : "disabled")); + LOG_EXIT; + return pPool; +} + + +NV_STATUS +poolReserve +( + POOLALLOC *pPool, + NvU64 numPages +) +{ + NvU64 i, freeLength; + allocCallback_t allocCb; + POOLALLOC_HANDLE pageHandle; + + LOG_ENTER; + + if (pPool == NULL || (pPool->callBackInfo).allocCb == NULL) + { + LOG_EXIT; + return NV_ERR_INVALID_ARGUMENT; + } + + freeLength = listCount(&pPool->freeList); + if (freeLength >= numPages) + { + LOG_EXIT; + return NV_OK; + } + + allocCb = (pPool->callBackInfo).allocCb; + + for (i = 0; i < (numPages - freeLength); i++) + { + if ((*allocCb)((pPool->callBackInfo).pUpstreamCtx, + pPool->upstreamPageSize, &pageHandle) == NV_OK) + { + POOLNODE *pNode; + + pNode = PORT_ALLOC(pPool->pAllocator, sizeof(*pNode)); + listPrependExisting(&pPool->freeList, pNode); + + pNode->pageAddr = pageHandle.address; + pNode->bitmap = NV_U64_MAX; + pNode->pParent = pageHandle.pMetadata; + } + else + { + LOG_EXIT; + return NV_ERR_NO_MEMORY; + } + } + + freeLength = listCount(&pPool->freeList); + NV_ASSERT(freeLength == numPages); + + LOG_EXIT; + return NV_OK; +} + + +void +poolTrim +( + POOLALLOC *pPool, + NvU64 preserveNum +) +{ + NvU64 i, freeLength; + freeCallback_t freeCb; + + LOG_ENTER; + if (pPool == NULL || (pPool->callBackInfo).freeCb == NULL) + { + LOG_EXIT; + return; + } + + freeLength = listCount(&pPool->freeList); + if (freeLength <= preserveNum) + { + LOG_EXIT; + return; + } + + freeCb = (pPool->callBackInfo).freeCb; + + for (i = 0; i < (freeLength - preserveNum); i++) + { + POOLNODE *pNode; + POOLALLOC_HANDLE freeHandle; + + pNode = listHead(&pPool->freeList); + freeHandle.address = pNode->pageAddr; + freeHandle.pMetadata = (pNode->pParent != NULL) ? pNode->pParent : pNode; + + (*freeCb)((pPool->callBackInfo).pUpstreamCtx, + pPool->upstreamPageSize, &freeHandle); + + listRemove(&pPool->freeList, pNode); + PORT_FREE(pPool->pAllocator, pNode); + } + + freeLength = listCount(&pPool->freeList); + NV_ASSERT(freeLength == preserveNum); + LOG_EXIT; +} + + + +// Allocating from partial and free lists will succeed +NV_STATUS +poolAllocate +( + POOLALLOC *pPool, + POOLALLOC_HANDLE *pPageHandle +) +{ + allocCallback_t allocCb; + + LOG_ENTER; + + // Trying allocating from the partial list first. + if (listCount(&pPool->partialList) > 0) + { + allocPartialList(pPool, pPageHandle); + LOG_EXIT; + return NV_OK; + } + + // Nothing left in partial list! Try allocating from free list. + if (listCount(&pPool->freeList) > 0) + { + allocFreeList(pPool, pPageHandle); + LOG_EXIT; + return NV_OK; + } + + allocCb = (pPool->callBackInfo).allocCb; + + // + // Nothing left in free list as well!! Populate the pool if it is configured to be auto-populated + // Once we have free list then allocate from free list. + // + if (FLD_TEST_DRF(_RMPOOL, _FLAGS, _AUTO_POPULATE, _ENABLE, pPool->flags)) + { + if ((*allocCb)(pPool->callBackInfo.pUpstreamCtx, pPool->upstreamPageSize, pPageHandle) == NV_OK) + { + POOLNODE *pNode; + + pNode = PORT_ALLOC(pPool->pAllocator, sizeof(*pNode)); + listPrependExisting(&pPool->freeList, pNode); + + pNode->pageAddr = pPageHandle->address; + pNode->bitmap = NV_U64_MAX; + pNode->pParent = pPageHandle->pMetadata; + + allocFreeList(pPool, pPageHandle); + + LOG_EXIT; + return NV_OK; + } + } + + LOG_EXIT; + + return NV_ERR_NO_MEMORY; +} + + +NV_STATUS +poolAllocateContig +( + POOLALLOC *pPool, + NvU32 numPages, + PoolPageHandleList *pPageHandleList +) +{ + POOLNODE *pFreeNode, *pPartialNode; + POOLALLOC_HANDLE *pPageHandle; + NV_STATUS status = NV_OK; + NvU32 i; + PoolPageHandleListIter it; + NvU64 prevAddr, curAddr; + + LOG_ENTER; + + // can't allocate more than one upstream chunk + NV_ASSERT_OR_RETURN(numPages <= pPool->ratio, NV_ERR_INVALID_ARGUMENT); + + // Make sure free chunk is available + NV_ASSERT_OR_RETURN(listCount(&pPool->freeList) > 0, NV_ERR_INVALID_STATE); + + // allocate first page from free node + pFreeNode = listHead(&pPool->freeList); + pPageHandle = listAppendNew(pPageHandleList); + if (pPageHandle == NULL) + { + return NV_ERR_NO_MEMORY; + } + allocFreeList(pPool, pPageHandle); + prevAddr = pPageHandle->address; + pPageHandle = NULL; + + // allocate subsequent pages from same node + // which is now a partial node + pPartialNode = listHead(&pPool->partialList); + if (pPartialNode != pFreeNode) + { + status = NV_ERR_INVALID_STATE; + goto cleanup; + } + for (i = 1; i < numPages; i++) + { + pPageHandle = listAppendNew(pPageHandleList); + if (pPageHandle == NULL) + { + status = NV_ERR_NO_MEMORY; + goto cleanup; + } + allocPartialList(pPool, pPageHandle); + curAddr = pPageHandle->address; + if (curAddr != (prevAddr + pPool->allocPageSize)) + { + status = NV_ERR_INVALID_STATE; + goto cleanup; + } + prevAddr = curAddr; + } + + return NV_OK; +cleanup: + + it = listIterAll(pPageHandleList); + while (listIterNext(&it)) + { + poolFree(pPool, it.pValue); + } + listClear(pPageHandleList); + return status; +} + +void +poolFree +( + POOLALLOC *pPool, + POOLALLOC_HANDLE *pPageHandle +) +{ + POOLNODE *pNode; + NvU32 freeIdx; + NvU64 address, baseAddr; + + LOG_ENTER; + + address = pPageHandle->address; + pNode = (POOLNODE *) (pPageHandle->pMetadata); + baseAddr = pNode->pageAddr; + + freeIdx = (NvU32)((address - baseAddr) / (pPool->allocPageSize)); + _setBitmap(&(pNode->bitmap), freeIdx); + +#if defined(DEBUG_VERBOSE) + poolAllocPrint(pPool); +#endif // defined(DEBUG_VERBOSE) + + // node was in full list and needs to move out of full list + if ((countZeros(pNode->bitmap) + 1) == pPool->ratio) + { + listRemove(&pPool->fullList, pNode); + + // node needs to move to free list + if (pNode->bitmap == NV_U64_MAX) + { + listPrependExisting(&pPool->freeList, pNode); + } + else + { + listPrependExisting(&pPool->partialList, pNode); + } + } + // Node is in partial list + else + { + // Node needs to move from partial list to free list + if (pNode->bitmap == NV_U64_MAX) + { + listRemove(&pPool->partialList, pNode); + listPrependExisting(&pPool->freeList, pNode); + } + } + + LOG_EXIT; +} + + +void +poolDestroy +( + POOLALLOC *pPool +) +{ + LOG_ENTER; + + // call back to free all the pages + poolListDestroy(&pPool->fullList, pPool); + poolListDestroy(&pPool->partialList, pPool); + poolListDestroy(&pPool->freeList, pPool); + + PORT_FREE(pPool->pAllocator, pPool); + LOG_EXIT; +} + +void +poolGetListLength +( + POOLALLOC *pPool, + NvU32 *pFreeListLength, + NvU32 *pPartialListLength, + NvU32 *pFullListLength +) +{ + NV_ASSERT(pPool != NULL); + + if (pFreeListLength != NULL) + { + *pFreeListLength = listCount(&pPool->freeList); + } + if (pPartialListLength != NULL) + { + *pPartialListLength = listCount(&pPool->partialList); + } + if (pFullListLength != NULL) + { + *pFullListLength = listCount(&pPool->fullList); + } +} + diff --git a/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c b/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c new file mode 100644 index 000000000..ab9e7e666 --- /dev/null +++ b/src/nvidia/src/libraries/prereq_tracker/prereq_tracker.c @@ -0,0 +1,347 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* ------------------------ Includes --------------------------------------- */ +#include "prereq_tracker/prereq_tracker.h" + +/* ------------------------ Static Function Prototypes --------------------- */ +static NvBool _prereqValid(PrereqTracker *pTracker, PREREQ_ENTRY *pPrereq); + +/* ------------------------ Public Functions ------------------------------ */ + +/*! + * @brief Construct the prereq tracker object + * + * @param[in] pTracker PrereqTracker object to be constructed + * @param[in] pParent Parent GPU passed into the first parameter of callbacks + * + * @return NV_OK Successfully constructed tracker + * @return NV_ERR_INVALID_STATE If already constructed + */ +NV_STATUS +prereqConstruct_IMPL +( + PrereqTracker *pTracker, + OBJGPU *pParent +) +{ + NV_ASSERT_OR_RETURN(!pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pParent != NULL, NV_ERR_INVALID_OBJECT_PARENT); + + bitVectorClrAll(&pTracker->satisfied); + + listInit(&pTracker->prereqList, portMemAllocatorGetGlobalNonPaged()); + pTracker->bInitialized = NV_TRUE; + pTracker->pParent = pParent; + + return NV_OK; +} + +/*! + * @brief Destroys the prerequisite tracker object + * + * @param[in] pTracker PrereqTracker object to be destroyed + */ +void +prereqDestruct_IMPL +( + PrereqTracker *pTracker +) +{ + NV_ASSERT_OR_RETURN_VOID(pTracker->bInitialized); + + listDestroy(&pTracker->prereqList); + pTracker->bInitialized = NV_FALSE; +} + +/*! + * @brief Arms a tracking structure to fire the callback when all prerequisites + * are satisfied. May only be called after all prerequisites are specified. No + * more prerequisites may be specified after arming. + * + * @param[in] pTracker PrereqTracker object + * @param[in] pPrereq PREREQ_ENTRY object pointer + * + * @return NV_OK Prerequisite successfully armed. + * @return error Errors propagated up from functions called. + */ +static NV_STATUS +_prereqArm +( + PrereqTracker *pTracker, + PREREQ_ENTRY *pPrereq +) +{ + PREREQ_ID_BIT_VECTOR requestedAndSatisfied; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(_prereqValid(pTracker, pPrereq), NV_ERR_INVALID_OBJECT); + NV_ASSERT_OR_RETURN(!pPrereq->bArmed, NV_ERR_INVALID_STATE); + + // + // Set the PREREQ_ENTRY state to bArmed. No more PREREQ_IDs may be added + // after this point. + // + pPrereq->bArmed = NV_TRUE; + + // + // Put together a mask of PREREQ_IDs which are both satisfied and requested + // We do not keep track of satisfied prereqs until armed, so we have no existing + // state to worry about here. + // + NV_ASSERT_OK_OR_RETURN(bitVectorAnd(&requestedAndSatisfied, + &pPrereq->requested, + &pTracker->satisfied)); + + pPrereq->countSatisfied = bitVectorCountSetBits(&requestedAndSatisfied); + + if (PREREQ_IS_SATISFIED(pPrereq)) + { + NV_ASSERT_OK_OR_RETURN(pPrereq->callback(pTracker->pParent, NV_TRUE)); + } + + return NV_OK; +} + +/*! + * @brief Creates, adds IDs to, and Arms a prereq tracking structure into the list. + * Caller gives up all control of the prereq structure to the prereq tracker, which + * will take care of storing the completed, final struct and freeing it once done. + * + * @param[in] pTracker PrereqTracker object + * @param[in] callback Callback function pointer + * First parameter passed will be NVOC parent of pTracker + * @param[in] pDepends Bitvector of prerequisite IDs to add as requirement + * @param[out] ppPrereq PREREQ_ENTRY object pointer created, or NULL if not desired + * + * @return NV_OK Prerequisite successfully armed. + * @return error Errors propagated up from functions called. + */ +NV_STATUS +prereqComposeEntry_IMPL +( + PrereqTracker *pTracker, + GpuPrereqCallback *callback, + PREREQ_ID_BIT_VECTOR *pDepends, + PREREQ_ENTRY **ppPrereq +) +{ + PREREQ_ENTRY *pPrereq; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(callback != NULL, NV_ERR_INVALID_POINTER); + NV_ASSERT_OR_RETURN(pDepends != NULL, NV_ERR_INVALID_POINTER); + + pPrereq = listAppendNew(&pTracker->prereqList); + NV_ASSERT_OR_RETURN(pPrereq != NULL, NV_ERR_NO_MEMORY); + + NV_ASSERT_OK_OR_RETURN(bitVectorCopy(&pPrereq->requested, pDepends)); + + pPrereq->countRequested = bitVectorCountSetBits(&pPrereq->requested); + pPrereq->countSatisfied = 0; + pPrereq->callback = callback; + + NV_ASSERT_OK_OR_RETURN(_prereqArm(pTracker, pPrereq)); + + if (ppPrereq != NULL) + *ppPrereq = pPrereq; + + return NV_OK; +} + +/*! + * @brief Notifies that prerequisite was satisfied. + * + * @param[in] pTracker PrereqTracker object + * @param[in] prereqId Prerequisite ID to add as requirement + * + * @return NV_OK Prerequisite successfully satisfied & all callbacks passed. + * @return error Errors propagated up from functions called. + */ +NV_STATUS +prereqSatisfy_IMPL +( + PrereqTracker *pTracker, + PREREQ_ID prereqId +) +{ + PREREQ_ENTRY *pPrereq; + PrereqListIter it; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN((prereqId < PREREQ_ID_VECTOR_SIZE), + NV_ERR_INVALID_REQUEST); + + // + // The prerequisite can be satisfied only once. An attempt to satisfy + // the prerequisite multiple times should indicate bad code design. + // + NV_ASSERT_OR_RETURN(!bitVectorTest(&pTracker->satisfied, prereqId), + NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN(bitVectorSet(&pTracker->satisfied, prereqId)); + + // Broadcast satisfaction of this PREREQ_ID to all armed PREREQ_ENTRY. + it = listIterAll(&pTracker->prereqList); + while (listIterNext(&it)) + { + pPrereq = it.pValue; + if (pPrereq->bArmed && + bitVectorTest(&pPrereq->requested, prereqId)) + { + pPrereq->countSatisfied++; + NV_ASSERT_OR_RETURN(pPrereq->countSatisfied <= pPrereq->countRequested, + NV_ERR_INVALID_STATE); + + if (PREREQ_IS_SATISFIED(pPrereq)) + { + NV_ASSERT_OK_OR_RETURN(pPrereq->callback(pTracker->pParent, NV_TRUE)); + } + } + } + + return NV_OK; +} + +/*! + * @brief Notifies that prerequisite will be retracted. + * + * @param[in] pTracker PrereqTracker object + * @param[in] prereqId Prerequisite ID to add as requirement + * + * @return NV_OK Prerequisite successfully retracted & all callbacks passed. + * @return error Errors propagated up from functions called. + */ +NV_STATUS +prereqRetract_IMPL +( + PrereqTracker *pTracker, + PREREQ_ID prereqId +) +{ + PREREQ_ENTRY *pNode; + PrereqListIter it; + NV_STATUS status = NV_OK; + + NV_ASSERT_OR_RETURN(pTracker->bInitialized, + NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN((prereqId < PREREQ_ID_VECTOR_SIZE), + NV_ERR_INVALID_REQUEST); + + // + // The prerequisite can be retracted even if it was not satisfied. This + // simplifies client code since it no longer need to track if prerequisite + // was satisfied (or not) and allows us avoiding isSatisfied() interface. + // + if (!bitVectorTest(&pTracker->satisfied, prereqId)) + return NV_OK; + + NV_ASSERT_OK_OR_RETURN(bitVectorClr(&pTracker->satisfied, prereqId)); + + it = listIterAll(&pTracker->prereqList); + while (listIterNext(&it)) + { + pNode = it.pValue; + + if (pNode->bArmed && + bitVectorTest(&pNode->requested, prereqId)) + { + if (PREREQ_IS_SATISFIED(pNode)) + { + NV_ASSERT_OK_OR_CAPTURE_FIRST_ERROR(status, pNode->callback(pTracker->pParent, NV_FALSE)); + } + + pNode->countSatisfied--; + if (pNode->countSatisfied < 0) + { + NV_ASSERT(0); + if (status == NV_OK) + { + status = NV_ERR_INVALID_STATE; + } + } + } + } + + return status; +} + +/*! + * @brief Indicates if a prerequisite ID is currently satisfied. + * + * @param[in] pTracker PrereqTracker object pointer + * @param[in] prereqId Prerequisite ID to check + * + * @return NV_TRUE Prerequisite ID is in the satisfied mask. + * NV_FALSE otherwise + */ +NvBool +prereqIdIsSatisfied_IMPL +( + PrereqTracker *pTracker, + PREREQ_ID prereqId +) +{ + NvBool bIsSatisfied; + + if ((pTracker->bInitialized) && + (prereqId < PREREQ_ID_VECTOR_SIZE)) + { + bIsSatisfied = bitVectorTest(&pTracker->satisfied, prereqId); + } + else + { + bIsSatisfied = NV_FALSE; + } + + return bIsSatisfied; +} + +/* ---------------------- Private Static Functions -------------------------- */ +/*! + * Helper function which determines whether a given PREREQ_ENTRY tracking + * structure is valid (i.e. is in the tracker's list at @ref + * PrereqTracker::prereqList). + * + * @param[in] pTracker PrereqTracker object pointer + * @param[in] pPrereq PREREQ_ENTRY object pointer + * + * @return NV_TRUE pPrereq is valid. + * @return NV_FALSE pPrereq is invalid. + */ +static NvBool +_prereqValid +( + PrereqTracker *pTracker, + PREREQ_ENTRY *pPrereq +) +{ + PrereqListIter it = listIterAll(&pTracker->prereqList); + while (listIterNext(&it)) + { + // pPrereq is valid if found in the list. + if (it.pValue == pPrereq) + return NV_TRUE; + } + + return NV_FALSE; +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_access_map.c b/src/nvidia/src/libraries/resserv/src/rs_access_map.c new file mode 100644 index 000000000..2f53c049c --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_access_map.c @@ -0,0 +1,717 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" +#include "nvtypes.h" + +#include "containers/map.h" +#include "resserv/resserv.h" +#include "resserv/rs_resource.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" +#include "resserv/rs_access_rights.h" +#include "resserv/rs_access_map.h" + +static NV_STATUS +_rsAccessGrantCallback +( + RsResourceRef *pResourceRef, + CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pParentRights, + void *pAllocParams, + RsAccessRight accessRight +); + +/*! + * @brief Checks which rights, if any, are being shared with the invoking client by a resource + * This is a static helper function for rsAccessGrantRights. + * + * @param[in] pResourceRef + * @param[in] pInvokingClient + * @param[in] pCallContext May be NULL + * @param[out] pRightsShared The set of access rights shared + * + * @return none + */ +static void +_rsAccessGetSharedRights +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + CALL_CONTEXT *pCallContext, + RS_ACCESS_MASK *pRightsShared +) +{ + RsShareList *pShareList; + RsShareListIter it; + + RsServer *pServer = NULL; + RsResourceRef *pParentRef = NULL; + + RS_ACCESS_MASK rightsGranted; + RS_ACCESS_MASK rightsDenied; + + portMemSet(&rightsGranted, 0, sizeof(RS_ACCESS_MASK)); + portMemSet(&rightsDenied, 0, sizeof(RS_ACCESS_MASK)); + + RS_ACCESS_MASK_CLEAR(pRightsShared); + + // No meaning to sharing rights with self, skip + if (pInvokingClient == pResourceRef->pClient) + return; + + if (pCallContext != NULL) + { + pServer = pCallContext->pServer; + pParentRef = pCallContext->pContextRef; + } + + pShareList = rsAccessGetActiveShareList(pResourceRef, pServer); + + if (pShareList != NULL) + { + it = listIterAll(pShareList); + while (listIterNext(&it)) + { + RS_SHARE_POLICY *pSharePolicy = it.pValue; + + if (resShareCallback(pResourceRef->pResource, pInvokingClient, pParentRef, pSharePolicy)) + { + // Allow policies give rights on success + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE)) + RS_ACCESS_MASK_UNION(&rightsGranted, &pSharePolicy->accessMask); + } + else + { + // Require policies reject rights on failure + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + RS_ACCESS_MASK_UNION(&rightsDenied, &pSharePolicy->accessMask); + } + } + } + + if (pServer != NULL) + { + it = listIterAll(&pServer->globalInternalSharePolicyList); + while (listIterNext(&it)) + { + RS_SHARE_POLICY *pSharePolicy = it.pValue; + + if (resShareCallback(pResourceRef->pResource, pInvokingClient, pParentRef, pSharePolicy)) + { + // Allow policies give rights on success + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE)) + RS_ACCESS_MASK_UNION(&rightsGranted, &pSharePolicy->accessMask); + } + else + { + // Require policies reject rights on failure + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) + RS_ACCESS_MASK_UNION(&rightsDenied, &pSharePolicy->accessMask); + } + } + } + + RS_ACCESS_MASK_UNION(pRightsShared, &rightsGranted); + RS_ACCESS_MASK_SUBTRACT(pRightsShared, &rightsDenied); +} + +void rsAccessGetAvailableRights +( + RsResourceRef *pResourceRef, + RsClient *pClient, + RS_ACCESS_MASK *pAvailableRights +) +{ + RS_ACCESS_MASK *pTargetRights; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + RS_ACCESS_MASK_CLEAR(pAvailableRights); + + // Look up rights client has on target resource + pTargetRights = rsAccessLookup(pResourceRef, pClient); + if (pTargetRights != NULL) + { + // Client owns the resource, use those rights directly + portMemCopy(pAvailableRights, sizeof(*pAvailableRights), + pTargetRights, sizeof(*pTargetRights)); + } + else + { + // Client does not own the resource, add any rights shared with this client + _rsAccessGetSharedRights(pResourceRef, pClient, pCallContext, pAvailableRights); + } +} + +RS_ACCESS_MASK * +rsAccessLookup +( + RsResourceRef *pResourceRef, + RsClient *pClient +) +{ + if (pResourceRef->pClient == pClient) + return &pResourceRef->accessMask; + + return NULL; +} + +NV_STATUS +rsAccessCheckRights +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequired +) +{ + RS_ACCESS_MASK ownedRights; + + NV_ASSERT_OR_RETURN(pRightsRequired != NULL, NV_ERR_INVALID_ARGUMENT); + + // Return if nothing to check + if (rsAccessMaskIsEmpty(pRightsRequired)) + return NV_OK; + + // Uncached access rights require executing the callback every time + rsAccessUpdateRights(pResourceRef, pInvokingClient, pRightsRequired); + + // Look up updated rights on target resource + rsAccessGetAvailableRights(pResourceRef, pInvokingClient, &ownedRights); + + // Check that rights are sufficient + if (!rsAccessMaskIsSubset(&ownedRights, pRightsRequired)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} + +void rsAccessUpdateRights +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsToUpdate +) +{ + RS_ACCESS_MASK *pTargetRights; + RsAccessRight accessRight; + + // Look up rights on target resource + pTargetRights = rsAccessLookup(pResourceRef, pInvokingClient); + + // + // Nothing to update if the resource is not owned by the client + // (Uncached rights only have meaning for resources owned by the client) + // + if (pTargetRights == NULL) + return; + + // Update access rights owned by the client for any uncached rights + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + NV_STATUS status; + const RS_ACCESS_INFO *pAccessRightInfo = &g_rsAccessMetadata[accessRight]; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + if ((pRightsToUpdate != NULL && + !RS_ACCESS_MASK_TEST(pRightsToUpdate, accessRight)) || + !(pAccessRightInfo->flags & RS_ACCESS_FLAG_UNCACHED_CHECK)) + { + continue; + } + + status = _rsAccessGrantCallback(pResourceRef, pCallContext, pInvokingClient, NULL, NULL, accessRight); + + if (status != NV_OK) + { + RS_ACCESS_MASK_REMOVE(pTargetRights, accessRight); + } + else + { + RS_ACCESS_MASK_ADD(pTargetRights, accessRight); + } + } +} + +/*! + * @brief Checks whether two share policies are considered equal and can be merged + * + * This function uses the type and target of a share policy to determine whether + * two share policy entries would match the same clients, in which case they could + * be merged into one policy entry. + * + * @param[in] pSharePolicyA, pSharePolicyB the two policies to compare + * + * @return NV_TRUE if the two policies are equal, + * NV_FALSE otherwise + */ +static NvBool +rsSharePolicyEquals +( + const RS_SHARE_POLICY *pSharePolicyA, + const RS_SHARE_POLICY *pSharePolicyB +) +{ + if (pSharePolicyA == NULL || pSharePolicyB == NULL) + return NV_FALSE; + + if (pSharePolicyA->type != pSharePolicyB->type) + return NV_FALSE; + + if ((pSharePolicyA->action & RS_SHARE_ACTION_FLAG_REQUIRE) != + (pSharePolicyB->action & RS_SHARE_ACTION_FLAG_REQUIRE)) + { + return NV_FALSE; + } + + if (pSharePolicyA->type == RS_SHARE_TYPE_CLIENT) + { + return pSharePolicyA->target == pSharePolicyB->target; + } + + // Otherwise, ignore target entirely + return NV_TRUE; +} + +RS_SHARE_POLICY * +rsShareListLookup +( + RsShareList *pShareList, + RS_SHARE_POLICY *pSharePolicy +) +{ + RsShareListIter it; + + // + // Need to match a condition instead of just pValue, + // can't just use listLookup directly + // + it = listIterAll(pShareList); + while (listIterNext(&it)) + { + if (rsSharePolicyEquals(it.pValue, pSharePolicy)) + { + return it.pValue; + } + } + + return NULL; +} + +NV_STATUS +rsShareListInsert +( + RsShareList *pShareList, + RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask +) +{ + RS_ACCESS_MASK *pCurrentAccessMask; + RS_SHARE_POLICY *pCurrentPolicy; + RS_SHARE_POLICY *pNewPolicy; + + pCurrentPolicy = rsShareListLookup(pShareList, pSharePolicy); + if (pCurrentPolicy == NULL) + { + // Allocate and insert a share policy entry + pNewPolicy = listAppendValue(pShareList, pSharePolicy); + if (pNewPolicy == NULL) + { + return NV_ERR_NO_MEMORY; + } + + if (pAccessMask != NULL) + { + portMemCopy(pAccessMask, sizeof(*pAccessMask), + &pNewPolicy->accessMask, sizeof(pNewPolicy->accessMask)); + } + } + else + { + // Merge into existing share policy entry + pCurrentAccessMask = &pCurrentPolicy->accessMask; + RS_ACCESS_MASK_UNION(pCurrentAccessMask, &pSharePolicy->accessMask); + + if (pAccessMask != NULL) + { + portMemCopy(pAccessMask, sizeof(*pAccessMask), + pCurrentAccessMask, sizeof(*pCurrentAccessMask)); + } + } + + return NV_OK; +} + +void +rsShareListRemove +( + RsShareList *pShareList, + RS_SHARE_POLICY *pSharePolicy, + RS_ACCESS_MASK *pAccessMask +) +{ + RS_SHARE_POLICY *pCurrentPolicy; + RS_ACCESS_MASK *pCurrentAccessMask; + + pCurrentPolicy = rsShareListLookup(pShareList, pSharePolicy); + if (pCurrentPolicy != NULL) + { + // Revoke specified rights from found mask + pCurrentAccessMask = &pCurrentPolicy->accessMask; + RS_ACCESS_MASK_SUBTRACT(pCurrentAccessMask, &pSharePolicy->accessMask); + + // pCurrentAccessMask may not exist afterwards, so copy output first + if (pAccessMask != NULL) + { + portMemCopy(pAccessMask, sizeof(*pAccessMask), + pCurrentAccessMask, sizeof(*pCurrentAccessMask)); + } + + if (rsAccessMaskIsEmpty(pCurrentAccessMask)) + { + // No more rights shared under this policy, erase it from the list + listRemove(pShareList, pCurrentPolicy); + } + } + else + { + // No match, no rights to revoke, output empty mask + if (pAccessMask != NULL) + { + RS_ACCESS_MASK_CLEAR(pAccessMask); + } + } +} + +NV_STATUS +rsShareListCopy +( + RsShareList *pShareListDst, + RsShareList *pShareListSrc +) +{ + RsShareListIter it; + + if (pShareListSrc == NULL) + return NV_OK; + + it = listIterAll(pShareListSrc); + while (listIterNext(&it)) + { + if (NULL == listAppendValue(pShareListDst, it.pValue)) + return NV_ERR_NO_MEMORY; + } + + return NV_OK; +} + +RsShareList * +rsAccessGetActiveShareList +( + RsResourceRef *pResourceRef, + RsServer *pServer +) +{ + RsResourceRef *pSearchRef = pResourceRef; + + // Search up the tree for a resource with an edited share list + while (pSearchRef != NULL) + { + if (pSearchRef->bSharePolicyListModified) + return &pSearchRef->sharePolicyList; + + pSearchRef = pSearchRef->pParentRef; + } + + if (pServer != NULL) + return &pServer->defaultInheritedSharePolicyList; + + return NULL; +} + +/*! + * @brief Checks whether one access right can be granted on a resource + * + * This is a static helper function for rsAccessGrantRights. The pParentRights + * argument is not strictly necessary, but is used to avoid performing multiple + * identical lookups in a map. + * + * @param[in] pResourceRef + * @param[in] pCallContext + * @param[in] pInvokingClient + * @param[in] pParentRights The set of access rights held by the invoking client + * on the resource's parent + * @param[in] accessRight The access right to try to grant + * + * @return NV_OK if the access right can be granted, or an error otherwise + */ +static NV_STATUS +_rsAccessGrantCallback +( + RsResourceRef *pResourceRef, + CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pParentRights, + void *pAllocParams, + RsAccessRight accessRight +) +{ + const RS_ACCESS_INFO *pAccessRightInfo; + API_SECURITY_INFO *pSecInfo = NULL; + + NV_ASSERT_OR_RETURN(accessRight < RS_ACCESS_COUNT, NV_ERR_INVALID_ARGUMENT); + + pAccessRightInfo = &g_rsAccessMetadata[accessRight]; + + if (pCallContext != NULL) + { + pSecInfo = &pCallContext->secInfo; + } + else + { + NV_PRINTF(LEVEL_WARNING, "Called with NULL pCallContext, skipping permission checks\n"); + } + + // + // If the parent object has this access right, then we should be able to + // inherit it without doing any other checks + // + if ((pParentRights != NULL) && RS_ACCESS_MASK_TEST(pParentRights, accessRight)) + { + return NV_OK; + } + + if ((pSecInfo != NULL) && ((pAccessRightInfo->flags & RS_ACCESS_FLAG_ALLOW_PRIVILEGED) != 0)) + { + // Allow admin-privileged contexts + if (pSecInfo->privLevel >= RS_PRIV_LEVEL_USER_ROOT) + { + return NV_OK; + } + } + + if ((pSecInfo != NULL) && ((pAccessRightInfo->flags & RS_ACCESS_FLAG_ALLOW_KERNEL_PRIVILEGED) != 0)) + { + // Allow kernel-privileged contexts + if (pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) + { + return NV_OK; + } + } + + if ((pAccessRightInfo->flags & RS_ACCESS_FLAG_ALLOW_OWNER) != 0) + { + // Allow client this access right on itself + if (pResourceRef->hResource == pInvokingClient->hClient) + { + return NV_OK; + } + } + + // Finally, invoke the resource's access callback + if (resAccessCallback(pResourceRef->pResource, pInvokingClient, pAllocParams, accessRight)) + { + return NV_OK; + } + + // All attempts to grant access failed + return NV_ERR_INSUFFICIENT_PERMISSIONS; +} + + +/*! + * @brief Computes the list of access rights to attempt to grant on a resource + * + * This is a static helper function for rsAccessGrantRights. + * + * @param[in] pResourceRef + * @param[in] pInvokingClient + * @param[in] pRightsRequested The rights specified in the allocation parameters, + * or NULL if no access rights were explicitly requested + * @param[in] pRightsRequired Rights required for the allocation of this object + * to succeed, not used if rights were explicitly requested + * @param[out] pRightsToRequest The set of access rights that should be requested, + * based on input parameters provided + * + * @return NV_TRUE if access rights were explicitly requested, or + * NV_FALSE otherwise + */ +static NvBool +_rsAccessGetRightsToRequest +( + RsResourceRef *pResourceRef, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequested, + const RS_ACCESS_MASK *pRightsRequired, + RS_ACCESS_MASK *pRightsToRequest +) +{ + NvBool bExplicitlyRequested; + + NV_ASSERT(pRightsToRequest != NULL); + RS_ACCESS_MASK_CLEAR(pRightsToRequest); + + if (pRightsRequested != NULL) + { + // A set of access rights was explicitly requested + bExplicitlyRequested = NV_TRUE; + + portMemCopy(pRightsToRequest, sizeof(*pRightsToRequest), + pRightsRequested, sizeof(*pRightsRequested)); + } + else + { + // No rights were explicitly requested + bExplicitlyRequested = NV_FALSE; + + if (pResourceRef->pParentRef == NULL) + { + // Only client resources don't have a parent reference + // Try to request all access rights for new clients + RS_ACCESS_MASK_FILL(pRightsToRequest); + } + else + { + // Inherit access rights from parent reference + RS_ACCESS_MASK *pParentRights = rsAccessLookup(pResourceRef->pParentRef, pInvokingClient); + if (pParentRights != NULL) + { + portMemCopy(pRightsToRequest, sizeof(*pRightsToRequest), + pParentRights, sizeof(*pParentRights)); + } + + // Add any required rights as well + if (pRightsRequired != NULL) + { + RS_ACCESS_MASK_UNION(pRightsToRequest, pRightsRequired); + } + } + } + + return bExplicitlyRequested; +} + +NV_STATUS +rsAccessGrantRights +( + RsResourceRef *pResourceRef, + CALL_CONTEXT *pCallContext, + RsClient *pInvokingClient, + const RS_ACCESS_MASK *pRightsRequested, + const RS_ACCESS_MASK *pRightsRequired, + void *pAllocParams +) +{ + NV_STATUS status; + NvBool bExplicitlyRequested; + RS_ACCESS_MASK rightsToRequest; + RS_ACCESS_MASK rightsShared; + RS_ACCESS_MASK *pResourceRights; + RS_ACCESS_MASK resourceRights; + RS_ACCESS_MASK *pParentRights = NULL; + RsAccessRight accessRight; + + // Determine which rights to request based on pRightsRequested + bExplicitlyRequested = _rsAccessGetRightsToRequest(pResourceRef, pInvokingClient, + pRightsRequested, pRightsRequired, + &rightsToRequest); + + // Return if nothing to grant + if (rsAccessMaskIsEmpty(&rightsToRequest)) + return NV_OK; + + // Find rights on the current resource + pResourceRights = rsAccessLookup(pResourceRef, pInvokingClient); + if (pResourceRights == NULL) + { + // + // When using grant for resources the client doesn't own, we don't modify the + // resource's mask, we only use a local mask to record which rights were available + // + RS_ACCESS_MASK_CLEAR(&resourceRights); + pResourceRights = &resourceRights; + } + + // Explicitly requesting to not get all required rights, cannot possibly succeed + if (bExplicitlyRequested && + (pRightsRequired != NULL) && + !rsAccessMaskIsSubset(&rightsToRequest, pRightsRequired)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + // Get rights on the parent resource to cache for _rsAccessGrantCallback + if (pResourceRef->pParentRef != NULL) + { + pParentRights = rsAccessLookup(pResourceRef->pParentRef, pInvokingClient); + } + + // Get any rights shared with this client + _rsAccessGetSharedRights(pResourceRef, pInvokingClient, pCallContext, &rightsShared); + + // Grant each access right in rightsToRequest + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + if (!RS_ACCESS_MASK_TEST(&rightsToRequest, accessRight)) + continue; + + if (RS_ACCESS_MASK_TEST(&rightsShared, accessRight)) + { + status = NV_OK; + } + else + { + status = _rsAccessGrantCallback(pResourceRef, pCallContext, pInvokingClient, + pParentRights, pAllocParams, accessRight); + } + + if (status == NV_OK) + { + RS_ACCESS_MASK_ADD(pResourceRights, accessRight); + } + else + { + // + // The default behavior is to silently ignore failure to grant an access right, + // which happens when the requested access rights are not specified. + // + // In contrast, if access rights are explicitly requested (i.e. with + // the NvRmAllocWithAccess API), we return an error code when we fail to + // grant access rights. + // + if (bExplicitlyRequested) + return status; + } + } + + // Fail if could not get all required rights + if ((pRightsRequired != NULL) && + !rsAccessMaskIsSubset(pResourceRights, pRightsRequired)) + { + return NV_ERR_INSUFFICIENT_PERMISSIONS; + } + + return NV_OK; +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_access_rights.c b/src/nvidia/src/libraries/resserv/src/rs_access_rights.c new file mode 100644 index 000000000..b8e2f8328 --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_access_rights.c @@ -0,0 +1,119 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvstatus.h" +#include "nvtypes.h" +#include "utils/nvassert.h" +#include "nvctassert.h" + +#include "resserv/rs_access_rights.h" + + +// Ensure the number of declared access rights is within the capacity +// provided by the number of limbs used. +// Also, NVOC acces_right is NvU32 currently. It requires NVOC change to support 32+ bits +ct_assert(RS_ACCESS_COUNT <= SDK_RS_ACCESS_MAX_COUNT); + + +#if !(RS_STANDALONE_TEST) +const RS_ACCESS_INFO g_rsAccessMetadata[RS_ACCESS_COUNT] = +{ + // RS_ACCESS_DUP_OBJECT + { + RS_ACCESS_FLAG_ALLOW_OWNER + }, + + // RS_ACCESS_NICE + { + RS_ACCESS_FLAG_ALLOW_PRIVILEGED | RS_ACCESS_FLAG_UNCACHED_CHECK + }, + + // RS_ACCESS_DEBUG + { + RS_ACCESS_FLAG_ALLOW_OWNER + }, +}; +#endif /* RS_STANDALONE_TEST */ + + +NvBool +rsAccessMaskIsSubset +( + const RS_ACCESS_MASK *pRightsPresent, + const RS_ACCESS_MASK *pRightsRequired +) +{ + RsAccessRight accessRight; + + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + if (RS_ACCESS_MASK_TEST(pRightsRequired, accessRight) && + !RS_ACCESS_MASK_TEST(pRightsPresent, accessRight)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + + +NvBool +rsAccessMaskIsEmpty +( + const RS_ACCESS_MASK *pAccessMask +) +{ + RsAccessRight accessRight; + + for (accessRight = 0; accessRight < RS_ACCESS_COUNT; accessRight++) + { + if (RS_ACCESS_MASK_TEST(pAccessMask, accessRight)) + { + return NV_FALSE; + } + } + + return NV_TRUE; +} + + +void +rsAccessMaskFromArray +( + RS_ACCESS_MASK *pAccessMask, + const RsAccessRight *pRightsArray, + NvLength length +) +{ + NvLength i; + + RS_ACCESS_MASK_CLEAR(pAccessMask); + + NV_ASSERT_OR_RETURN_VOID(pRightsArray != NULL); + + for (i = 0; i < length; i++) + { + RS_ACCESS_MASK_ADD(pAccessMask, pRightsArray[i]); + } +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_client.c b/src/nvidia/src/libraries/resserv/src/rs_client.c new file mode 100644 index 000000000..2ad40a4d6 --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_client.c @@ -0,0 +1,1741 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" + +#if !(RS_STANDALONE) +#include "os/os.h" +#include "resserv/rs_access_map.h" +#endif + +typedef enum +{ + ALLOC_NEW_RESOURCE, + ALLOC_SHARED_RESOURCE +} ALLOC_TYPE; + +/** + * Allocate a new or shared resource in RM for this client + * @param[in] pClient This client + * @param[in] pServer The resource server instance + * @param[in] pParams Parameters for the resource allocation + * @param[in,out] phResource Server will assign a handle if it is 0 + */ +static NV_STATUS _clientAllocResourceHelper(RsClient *pClient, RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvHandle *phResource); + +/** + * Add a resource reference to the client's resource hashmap + * @param[in] pClient This client + * @param[in] pServer The resource server that owns the resource ref + * @param[in] pParentRef The resource's parent reference + * @param[in] hResource The resource's handle + * @param[in] classId The resource's class + * @param[out] ppResourceRef The new resource reference + */ +static NV_STATUS _clientConstructResourceRef(RsClient *pClient, RsServer *pServer, RsResourceRef *pParentRef, + NvHandle hResource, NvU32 classId, RsResourceRef **ppResourceRef); + +/** + * Release all CPU address mappings for a resource + * + * @param[in] pClient Client that owns the resource + * @param[in] pCallContext Caller information (which includes the resource reference whose mappings will be freed) + * @param[in] pLockInfo Information about which locks are already held, for recursive calls + */ +static NV_STATUS _clientUnmapResourceRefMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); + +/** + * Release all CPU address mappings that reference this resource + * + * @param[in] pClient Client that owns the resource + * @param[in] pCallContext Caller information (which includes the resource reference + * whose mapping back references will be freed) + * @param[in] pLockInfo Information about which locks are already held, for recursive calls + */ +static NV_STATUS _clientUnmapBackRefMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); + +static void _clientUnmapInterMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); +static void _clientUnmapInterBackRefMappings(RsClient *pClient, CALL_CONTEXT *pCallContext, RS_LOCK_INFO *pLockInfo); + +NV_STATUS +clientConstruct_IMPL +( + RsClient *pClient, + PORT_MEM_ALLOCATOR *pAllocator, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + CLIENT_TYPE type; + + if (pParams->pSecInfo->privLevel >= RS_PRIV_LEVEL_KERNEL) + type = CLIENT_TYPE_KERNEL; + else + type = CLIENT_TYPE_USER; + + pClient->type = type; + pClient->hClient = pParams->hClient; + + mapInit(&pClient->resourceMap, pAllocator); + listInitIntrusive(&pClient->pendingFreeList); + + listInit(&pClient->accessBackRefList, pAllocator); + + pClient->handleGenIdx = 0; + status = clientSetHandleGenerator(pClient, 0, 0); + if (status != NV_OK) + return status; + + pClient->bActive = NV_TRUE; + + status = clientSetRestrictedRange(pClient, 0, 0); + if (status != NV_OK) + return status; + + return NV_OK; +} + +NV_STATUS +clientSetHandleGenerator_IMPL +( + RsClient *pClient, + NvHandle handleRangeStart, + NvHandle handleRangeSize +) +{ + // + // on vGPU, when client uses RM allocated handles, post allocation of rmclient NV01_ROOT, + // NV01_DEVICE_0 is allocated which increment the handleGenIdx to 0x1. + // In order to avoid the handle clash, we split the default RM handle ranges between Guest RM + // (0xcaf00000, 0xcaf3ffff) and host RM (0xcaf40000, 0xcaf80000). + // Hence, we should take this overriding into consideration when the ranges over the default + // RM handle ranges. + // + NvBool bShrinkUnusedRange = ((pClient->handleRangeStart == handleRangeStart) && + (pClient->handleGenIdx <= handleRangeSize)); + + if (!((pClient->handleGenIdx == 0) || bShrinkUnusedRange)) + { + return NV_ERR_INVALID_STATE; + } + + if ((handleRangeStart == 0) && (handleRangeSize == 0)) + { + pClient->handleRangeStart = RS_UNIQUE_HANDLE_BASE; + pClient->handleRangeSize = RS_UNIQUE_HANDLE_RANGE; + } + else if ((handleRangeStart != 0) && (handleRangeSize != 0)) + { + pClient->handleRangeStart = handleRangeStart; + pClient->handleRangeSize = handleRangeSize; + } + else + { + return NV_ERR_INVALID_PARAMETER; + } + + return NV_OK; +} + +NV_STATUS clientCanShareResource_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext +) +{ + NV_STATUS status = NV_OK; + + RS_ACCESS_MASK rightsNeeded; + RS_ACCESS_MASK *pRightsHeld; + + // + // If sharing, check that the client has the rights it is trying to share + // Revoking does not require this to allow revoking all rights without checking + // + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE)) + { + status = rsAccessCheckRights(pResourceRef, pClient, &pSharePolicy->accessMask); + if (status == NV_ERR_INSUFFICIENT_PERMISSIONS) + { + // Attempt to grant rights which aren't already owned + portMemCopy(&rightsNeeded, sizeof(rightsNeeded), + &pSharePolicy->accessMask, sizeof(pSharePolicy->accessMask)); + + pRightsHeld = rsAccessLookup(pResourceRef, pClient); + if (pRightsHeld != NULL) + { + // Skip trying to grant rights already held + RS_ACCESS_MASK_SUBTRACT(&rightsNeeded, pRightsHeld); + } + + status = rsAccessGrantRights(pResourceRef, pCallContext, pClient, + &rightsNeeded, // pRightsRequested + NULL, // pRightsRequired + NULL); // pAllocParams + } + } + + return status; +} + +NV_STATUS +clientShareResource_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext +) +{ + RsServer *pServer = NULL; + RsShareList *pActiveList; + NV_STATUS status; + + status = clientCanShareResource(pClient, pResourceRef, pSharePolicy, pCallContext); + if (status != NV_OK) + return status; + + if (!pResourceRef->bSharePolicyListModified) + { + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_COMPOSE) + { + if (pCallContext != NULL) + pServer = pCallContext->pServer; + + pActiveList = rsAccessGetActiveShareList(pResourceRef, pServer); + status = rsShareListCopy(&pResourceRef->sharePolicyList, pActiveList); + if (status != NV_OK) + return status; + } + + pResourceRef->bSharePolicyListModified = NV_TRUE; + } + + if (!(pSharePolicy->action & RS_SHARE_ACTION_FLAG_COMPOSE)) + { + listClear(&pResourceRef->sharePolicyList); + } + + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE) + { + rsShareListRemove(&pResourceRef->sharePolicyList, pSharePolicy, NULL); + } + else + { + status = rsShareListInsert(&pResourceRef->sharePolicyList, pSharePolicy, NULL); + } + + return status; +} + +NV_STATUS +clientShareResourceTargetClient_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_SHARE_POLICY *pSharePolicy, + CALL_CONTEXT *pCallContext +) +{ + NV_STATUS status; + RS_ACCESS_MASK *pCurrentRights; + + // Special case: This should only be called when share policy is for own client + NV_ASSERT(pSharePolicy->type == RS_SHARE_TYPE_CLIENT); + NV_ASSERT(pSharePolicy->target == pClient->hClient); + + status = clientCanShareResource(pClient, pResourceRef, pSharePolicy, pCallContext); + if (status != NV_OK) + return status; + + pCurrentRights = rsAccessLookup(pResourceRef, pClient); + + if (pSharePolicy->action & RS_SHARE_ACTION_FLAG_REVOKE) + { + RS_ACCESS_MASK_SUBTRACT(pCurrentRights, &pSharePolicy->accessMask); + } + else + { + RS_ACCESS_MASK_UNION(pCurrentRights, &pSharePolicy->accessMask); + } + + return NV_OK; +} + +NV_STATUS +clientSetRestrictedRange_IMPL +( + RsClient *pClient, + NvHandle handleRangeStart, + NvU32 handleRangeSize +) +{ + NvHandle hFirst = handleRangeStart; + NvHandle hLast; + + // Only allow modification if we haven't generated any handles + if (pClient->handleGenIdx != 0) + return NV_ERR_INVALID_STATE; + + if (handleRangeSize == 0) + { + if (handleRangeStart != 0) + return NV_ERR_INVALID_PARAMETER; + + pClient->handleRestrictRange = NV_RANGE_EMPTY; + return NV_OK; + } + + // Wrapping-around the reserved range is not supported + if (!portSafeAddU32(hFirst, handleRangeSize-1, &hLast)) + return NV_ERR_INVALID_REQUEST; + + pClient->handleRestrictRange = rangeMake(hFirst, hLast); + + return NV_OK; +} + +void clientDestruct_IMPL +( + RsClient *pClient +) +{ + NV_ASSERT(mapCount(&pClient->resourceMap) == 0); + mapDestroy(&pClient->resourceMap); + + NV_ASSERT(listCount(&pClient->accessBackRefList) == 0); + listDestroy(&pClient->accessBackRefList); +} + +NV_STATUS +clientGetResource_IMPL +( + RsClient *pClient, + NvHandle hResource, + NvU32 internalClassId, + RsResource **ppResource +) +{ + NV_STATUS status = NV_OK; + RsResourceRef *pResourceRef; + RsResource *pResource; + + pResourceRef = mapFind(&pClient->resourceMap, hResource); + if (pResourceRef == NULL) + { + status = NV_ERR_OBJECT_NOT_FOUND; + pResource = NULL; + goto done; + } + + if (pResourceRef->internalClassId != internalClassId) + { + status = NV_ERR_INVALID_CLASS; + pResource = NULL; + goto done; + } + + pResource = pResourceRef->pResource; + +done: + if (ppResource != NULL) + *ppResource = pResource; + + return status; +} + +NV_STATUS +clientGetResourceByRef_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RsResource **ppResource +) +{ + if (ppResource != NULL) + *ppResource = pResourceRef->pResource; + + return NV_OK; +} + +NV_STATUS +clientGetResourceRef_IMPL +( + RsClient *pClient, + NvHandle hResource, + RsResourceRef **ppResourceRef +) +{ + RsResourceRef *pResourceRef; + + pResourceRef = mapFind(&pClient->resourceMap, hResource); + if (pResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +clientGetResourceRefWithAccess_IMPL +( + RsClient *pClient, + NvHandle hResource, + const RS_ACCESS_MASK *pRightsRequired, + RsResourceRef **ppResourceRef +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + status = rsAccessCheckRights(pResourceRef, pClient, pRightsRequired); + if (status != NV_OK) + return status; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +clientGetResourceRefByType_IMPL +( + RsClient *pClient, + NvHandle hResource, + NvU32 internalClassId, + RsResourceRef **ppResourceRef +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + return status; + + if (pResourceRef->internalClassId != internalClassId) + return NV_ERR_INVALID_OBJECT_HANDLE; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; +} + +NV_STATUS +clientValidate_IMPL +( + RsClient *pClient, + const API_SECURITY_INFO *pSecInfo +) +{ + return NV_OK; +} + +NV_STATUS +clientAllocResource_IMPL +( + RsClient *pClient, + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + return _clientAllocResourceHelper(pClient, pServer, pParams, &pParams->hResource); +} + +NV_STATUS +clientCopyResource_IMPL +( + RsClient *pClient, + RsServer *pServer, + RS_RES_DUP_PARAMS_INTERNAL *pParams +) +{ + RS_RES_ALLOC_PARAMS_INTERNAL params; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + + RsClient *pClientDst = NULL; + RsResourceRef *pParentRef = NULL; + + NV_STATUS status; + + status = serverGetClientUnderLock(pServer, pParams->hClientDst, &pClientDst); + if (status != NV_OK) + return status; + + status = clientGetResourceRef(pClientDst, pParams->hParentDst, &pParentRef); + if (status != NV_OK) + return status; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pParams->pSrcRef; + callContext.pContextRef = pParentRef; + callContext.secInfo = *pParams->pSecInfo; + callContext.pLockInfo = pParams->pLockInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + + // + // Kernel clients are allowed to dup anything, unless they request otherwise. + // Also, if access rights are disabled, owner client should still be able to dup. + // For anything else, check that the client has dup access on the object + // + if (((pParams->pSecInfo->privLevel < RS_PRIV_LEVEL_KERNEL) || + (pParams->flags & NV04_DUP_HANDLE_FLAGS_REJECT_KERNEL_DUP_PRIVILEGE)) && + (pServer->bRsAccessEnabled || (pParams->pSrcClient->hClient != pClient->hClient))) + { + RS_ACCESS_MASK rightsRequired; + + portMemSet(&rightsRequired, 0, sizeof(rightsRequired)); + RS_ACCESS_MASK_ADD(&rightsRequired, RS_ACCESS_DUP_OBJECT); + + status = rsAccessCheckRights(pParams->pSrcRef, pClient, &rightsRequired); + } + else + { + // Server's globalInternalSharePolicyList applies Require policies even to kernel + RsShareListIter it = listIterAll(&pServer->globalInternalSharePolicyList); + while (listIterNext(&it)) + { + RS_SHARE_POLICY *pSharePolicy = it.pValue; + + // We only care about failing Require policies which apply to Dup, ignore everything else + if ((pSharePolicy->action & RS_SHARE_ACTION_FLAG_REQUIRE) && + RS_ACCESS_MASK_TEST(&pSharePolicy->accessMask, RS_ACCESS_DUP_OBJECT) && + !resShareCallback(pParams->pSrcRef->pResource, pClient, pParentRef, pSharePolicy)) + { + status = NV_ERR_INVALID_REQUEST; + break; + } + } + } + + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + return status; + + portMemSet(¶ms, 0, sizeof(params)); + + params.hClient = pClient->hClient; + params.hParent = pParams->hParentDst; + params.hResource = pParams->hResourceDst; + params.externalClassId = pParams->pSrcRef->externalClassId; + params.pSecInfo = pParams->pSecInfo; + + params.pSrcClient = pParams->pSrcClient; + params.pSrcRef = pParams->pSrcRef; + params.pAllocParams = pParams->pShareParams; + params.pLockInfo = pParams->pLockInfo; + params.allocFlags = pParams->flags; + + return _clientAllocResourceHelper(pClient, pServer, ¶ms, &pParams->hResourceDst); +} + +static +NV_STATUS +_clientAllocResourceHelper +( + RsClient *pClient, + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvHandle *phResource +) +{ + NV_STATUS status; + NvHandle hResource = *phResource; + NvU32 depth = 0; + RsResource *pResource = NULL; + RsResourceRef *pParentRef = NULL; + RsResourceRef *pResourceRef = NULL; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + NvHandle hParent = pParams->hParent; + + status = clientGetResourceRef(pClient, hParent, &pParentRef); + if (status != NV_OK && hParent != pClient->hClient && hParent != 0) + return status; + + status = _clientConstructResourceRef(pClient, pServer, pParentRef, hResource, pParams->externalClassId, &pResourceRef); + if (status != NV_OK) + goto fail; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pContextRef = pParams->pSrcRef; + callContext.pLockInfo = pParams->pLockInfo; + + if (pParams->pSecInfo == NULL) + { + status = NV_ERR_INVALID_ARGUMENT; + goto fail; + } + callContext.secInfo = *pParams->pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + status = resservResourceFactory(pServer->pAllocator, &callContext, pParams, &pResource); + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + goto fail; + + // Clear free params implicitly set by constructor + resSetFreeParams(pResource, NULL, NULL); + pParams->pResourceRef = pResourceRef; + + // + // resConstruct_IMPL sets these fields but we need to set them again until + // Bug 2527351 is fixed + // + pResourceRef->pResource = pResource; + pResource->pResourceRef = pResourceRef; + + if (pParentRef != NULL) + { + depth = pParentRef->depth + 1; + pResourceRef->depth = depth; + + // Allow one additional level of depth to offset the depth used up by the RsClientResource at the root + // of the object hierarchy + if (RS_MAX_RESOURCE_DEPTH + 1 <= depth) + { + status = NV_ERR_ILLEGAL_ACTION; + goto fail; + } + + // Add this ref to the parent's child map + if (NV_OK != indexAdd(&pParentRef->childRefMap, pResourceRef->internalClassId, pResourceRef)) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto fail; + } + } + + if (pServer->bRsAccessEnabled) + { + status = rsAccessGrantRights(pResourceRef, &callContext, pClient, + pParams->pRightsRequested, + pParams->pRightsRequired, + pParams->pAllocParams); + if (status != NV_OK) + goto fail; + } + + *phResource = hResource; + + return NV_OK; + +fail: + if (pResource != NULL) + { + RS_RES_FREE_PARAMS_INTERNAL params; + pOldContext = NULL; + + // First undo dependency tracking since it might access the resource + if (pResourceRef->pDependantSession != NULL) + sessionRemoveDependency(pResourceRef->pDependantSession, pResourceRef); + + if (pResourceRef->pSession != NULL) + sessionRemoveDependant(pResourceRef->pSession, pResourceRef); + + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.secInfo = *pParams->pSecInfo; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pParams->pLockInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + resSetFreeParams(pResource, &callContext, ¶ms); + + objDelete(pResource); + resservRestoreTlsCallContext(pOldContext); + } + + if (pResourceRef != NULL) + { + if (pParentRef != NULL) + { + indexRemove(&pParentRef->childRefMap, pResourceRef->internalClassId, pResourceRef); + } + + clientDestructResourceRef(pClient, pServer, pResourceRef); + } + + return status; +} + +static +NV_STATUS +_refCleanupDependencies +( + RsResourceRef *pResourceRef +) +{ + RsResourceRef **ppIndepRef; + while (NULL != (ppIndepRef = multimapFirstItem(&pResourceRef->depBackRefMap))) + { + refRemoveDependant(*ppIndepRef, pResourceRef); + } + + return NV_OK; +} + +static +NV_STATUS +_refCleanupDependants +( + RsResourceRef *pResourceRef +) +{ + RsResourceRef **ppDepRef; + while (NULL != (ppDepRef = multimapFirstItem(&pResourceRef->depRefMap))) + { + refRemoveDependant(pResourceRef, *ppDepRef); + } + + return NV_OK; +} + +NV_STATUS +clientFreeResource_IMPL +( + RsClient *pClient, + RsServer *pServer, + RS_RES_FREE_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status = NV_OK; + NV_STATUS tmpStatus; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + RsResourceRef *pClientRef = NULL; + RsResourceRef *pParentRef = NULL; + RsResourceRef *pResourceRef; + RsResource *pResource; + + pResourceRef = mapFind(&pClient->resourceMap, pParams->hResource); + if (pResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + if (refPendingFree(pResourceRef, pClient)) + listRemove(&pClient->pendingFreeList, pResourceRef); + + pResource = pResourceRef->pResource; + pParentRef = pResourceRef->pParentRef; + + if (!pParams->bInvalidateOnly && pResourceRef->bInvalidated) + goto done; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pServer = pServer; + callContext.pLockInfo = pParams->pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + resSetFreeParams(pResource, &callContext, pParams); + + resPreDestruct(pResource); + + // Remove all CPU mappings + _clientUnmapResourceRefMappings(pClient, &callContext, pParams->pLockInfo); + _clientUnmapBackRefMappings(pClient, &callContext, pParams->pLockInfo); + + // Remove all inter-mappings + _clientUnmapInterMappings(pClient, &callContext, pParams->pLockInfo); + _clientUnmapInterBackRefMappings(pClient, &callContext, pParams->pLockInfo); + + // Remove this resource as a dependency from other resources + pResourceRef->bInvalidated = NV_TRUE; + _refCleanupDependencies(pResourceRef); + + if (pResourceRef->pDependantSession != NULL) + sessionRemoveDependency(pResourceRef->pDependantSession, pResourceRef); + + if (pResourceRef->pSession != NULL) + sessionRemoveDependant(pResourceRef->pSession, pResourceRef); + + status = serverFreeResourceRpcUnderLock(pServer, pParams); + NV_ASSERT(status == NV_OK); + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Freeing hResource: %x\n", + // pClient->hClient, pResourceRef->hResource); + + objDelete(pResource); + + pResourceRef->pResource = NULL; + + resservRestoreTlsCallContext(pOldContext); + +done: + if (!pParams->bInvalidateOnly) + { + // Remove this ref from its parent's child ref list + if (pParentRef != NULL) + { + multimapRemoveItemByKey(&pParentRef->childRefMap, + pResourceRef->internalClassId, pResourceRef->hResource); + } + + pClientRef = mapFind(&pClient->resourceMap, pClient->hClient); + if (pClientRef != NULL) + refUncacheRef(pClientRef, pResourceRef); + + tmpStatus = clientDestructResourceRef(pClient, pServer, pResourceRef); + NV_ASSERT(tmpStatus == NV_OK); + } + + return status; +} + +NV_STATUS +clientUnmapMemory_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, + RsCpuMapping **ppCpuMapping, + API_SECURITY_INFO *pSecInfo +) +{ + NV_STATUS status; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + RsCpuMapping *pCpuMapping = *ppCpuMapping; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pLockInfo; + + // Some MODS tests don't set secInfo. + if (pSecInfo != NULL) + callContext.secInfo = *pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + status = resUnmap(pResourceRef->pResource, &callContext, pCpuMapping); + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "hClient %x: Failed to unmap cpu mapping: hResource: %x error: 0x%x\n", + pClient->hClient, + pResourceRef->hResource, + status); + + if (pCpuMapping != NULL) + { + NV_PRINTF(LEVEL_ERROR, "hContext: %x\n", + (pCpuMapping->pContextRef == NULL) ? 0 : pCpuMapping->pContextRef->hResource); + } + } + + refRemoveMapping(pResourceRef, pCpuMapping); + *ppCpuMapping = NULL; + + return status; +} + +NV_STATUS +clientInterMap_IMPL +( + RsClient *pClient, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pParams +) +{ + return NV_ERR_INVALID_CLIENT; +} + +void +clientInterUnmap_IMPL +( + RsClient *pClient, + RsResourceRef *pMapperRef, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + return; +} + +NV_STATUS +clientGenResourceHandle_IMPL +( + RsClient *pClient, + NvHandle *pHandle +) +{ + NvHandle hFirst; + NvHandle hResource; + NV_STATUS status; + + NV_ASSERT(pClient->handleRangeStart != 0); + NV_ASSERT(pClient->handleRangeSize != 0); + + hResource = pClient->handleRangeStart + ((pClient->handleGenIdx++) % pClient->handleRangeSize); + status = clientValidateNewResourceHandle(pClient, hResource, NV_FALSE); + if (status == NV_OK) + { + goto done; + } + + hFirst = hResource; + do + { + hResource = pClient->handleRangeStart + ((pClient->handleGenIdx++) % pClient->handleRangeSize); + status = clientValidateNewResourceHandle(pClient, hResource, NV_FALSE); + } while(hResource != hFirst && status != NV_OK); + + if (status != NV_OK) + return NV_ERR_INSUFFICIENT_RESOURCES; + +done: + NV_ASSERT(hResource - pClient->handleRangeStart < pClient->handleRangeSize); + + *pHandle = hResource; + return NV_OK; +} + +NV_STATUS +clientAssignResourceHandle_IMPL +( + RsClient *pClient, + NvHandle *phResource +) +{ + NV_STATUS status; + + if (phResource == NULL) + return NV_ERR_INVALID_ARGUMENT; + + if (*phResource == 0) + { + status = clientGenResourceHandle(pClient, phResource); + } + else + { + status = clientValidateNewResourceHandle(pClient, *phResource, NV_TRUE); + } + + return status; + +} + +static +NV_STATUS +_clientConstructResourceRef +( + RsClient *pClient, + RsServer *pServer, + RsResourceRef *pParentRef, + NvHandle hResource, + NvU32 externalClassId, + RsResourceRef **ppResourceRef +) +{ + PORT_MEM_ALLOCATOR *pAllocator = pServer->pAllocator; + RsResourceRef *pResourceRef = mapInsertNew(&pClient->resourceMap, hResource); + if (pResourceRef == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + if (!pClient->bResourceWarning && (mapCount(&pClient->resourceMap) >= RS_CLIENT_RESOURCE_WARNING_THRESHOLD)) + { + NV_PRINTF(LEVEL_WARNING, "Client 0x%08x has allocated a large number of resources. [Current classid: 0x%04x]\n", pClient->hClient, externalClassId); + NV_PRINTF(LEVEL_WARNING, "The client may be leaking resources. This warning can be ignored if the allocations were intentional.\n"); + pClient->bResourceWarning = NV_TRUE; + } + + pResourceRef->pClient = pClient; + pResourceRef->pResourceDesc = RsResInfoByExternalClassId(externalClassId); + pResourceRef->externalClassId = externalClassId; + pResourceRef->internalClassId = RsResInfoGetInternalClassId(pResourceRef->pResourceDesc); + pResourceRef->pResource = NULL; + pResourceRef->pParentRef = pParentRef; + pResourceRef->hResource = hResource; + pResourceRef->depth = 0; + + multimapInit(&pResourceRef->childRefMap, pAllocator); + multimapInit(&pResourceRef->cachedRefMap, pAllocator); + multimapInit(&pResourceRef->depRefMap, pAllocator); + multimapInit(&pResourceRef->depBackRefMap, pAllocator); + listInit(&pResourceRef->cpuMappings, pAllocator); + listInit(&pResourceRef->backRefs, pAllocator); + listInit(&pResourceRef->interMappings, pAllocator); + listInit(&pResourceRef->interBackRefs, pAllocator); + listInit(&pResourceRef->sharePolicyList, pAllocator); + + portAtomicExIncrementU64(&pServer->activeResourceCount); + + *ppResourceRef = pResourceRef; + return NV_OK; +} + +NV_STATUS +clientDestructResourceRef_IMPL +( + RsClient *pClient, + RsServer *pServer, + RsResourceRef *pResourceRef +) +{ + NV_ASSERT(pResourceRef != NULL); + NV_ASSERT(listCount(&pResourceRef->backRefs) == 0); + NV_ASSERT(listCount(&pResourceRef->cpuMappings) == 0); + NV_ASSERT(listCount(&pResourceRef->interBackRefs) == 0); + NV_ASSERT(listCount(&pResourceRef->interMappings) == 0); + + listDestroy(&pResourceRef->backRefs); + listDestroy(&pResourceRef->cpuMappings); + listDestroy(&pResourceRef->interBackRefs); + listDestroy(&pResourceRef->interMappings); + listDestroy(&pResourceRef->sharePolicyList); + + // All children should be free + NV_ASSERT(0 == multimapCountItems(&pResourceRef->childRefMap)); + multimapDestroy(&pResourceRef->childRefMap); + + // Nothing should be cached + NV_ASSERT(0 == multimapCountItems(&pResourceRef->cachedRefMap)); + multimapDestroy(&pResourceRef->cachedRefMap); + + _refCleanupDependencies(pResourceRef); + multimapDestroy(&pResourceRef->depBackRefMap); + + _refCleanupDependants(pResourceRef); + multimapDestroy(&pResourceRef->depRefMap); + + mapRemove(&pClient->resourceMap, pResourceRef); + + portAtomicExDecrementU64(&pServer->activeResourceCount); + + return NV_OK; +} + +NV_STATUS +_clientUnmapResourceRefMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RsCpuMapping *pCpuMapping; + NV_STATUS status; + RS_LOCK_INFO lockInfo; + RS_CPU_UNMAP_PARAMS params; + + pCpuMapping = listHead(&pResourceRef->cpuMappings); + while(pCpuMapping != NULL) + { + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + params.hClient = pClient->hClient; + params.hDevice = (pCpuMapping->pContextRef == NULL) + ? pClient->hClient + : pCpuMapping->pContextRef->hResource; + params.hMemory = pResourceRef->hResource; + params.pLinearAddress = pCpuMapping->pLinearAddress; + params.processId = pCpuMapping->processId; + params.flags = pCpuMapping->flags; + params.pSecInfo = &pCallContext->secInfo; + params.pLockInfo = &lockInfo; + lockInfo.pClient = pLockInfo->pClient; + lockInfo.state = pLockInfo->state; + + // TODO: temp WAR for bug 2840284: deadlock during recursive free operation + lockInfo.flags |= RS_LOCK_FLAGS_NO_CLIENT_LOCK; + + status = serverUnmap(pCallContext->pServer, params.hClient, params.hMemory, ¶ms); + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap (status=0x%x) hClient %x: hResource: %x\n", + status, pClient->hClient, pResourceRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hContext: %x at addr " NvP64_fmt "\n", + params.hDevice, params.pLinearAddress); + + if (pCpuMapping == listHead(&pResourceRef->cpuMappings)) + { +#if !(RS_STANDALONE_TEST) + NV_ASSERT(0); +#endif + refRemoveMapping(pResourceRef, pCpuMapping); + } + } + pCpuMapping = listHead(&pResourceRef->cpuMappings); + } + + return NV_OK; +} + +NV_STATUS +_clientUnmapBackRefMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS status; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + RS_CPU_MAPPING_BACK_REF *pBackRefItem; + RS_LOCK_INFO lockInfo; + RS_CPU_UNMAP_PARAMS params; + + pBackRefItem = listHead(&pResourceRef->backRefs); + while(pBackRefItem != NULL) + { + RsCpuMapping *pCpuMapping = pBackRefItem->pCpuMapping; + RsResourceRef *pBackRef = pBackRefItem->pBackRef; + + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + params.hClient = pClient->hClient; + params.hDevice = (pCpuMapping->pContextRef == NULL) + ? pClient->hClient + : pCpuMapping->pContextRef->hResource; + params.hMemory = pBackRef->hResource; + params.pLinearAddress = pCpuMapping->pLinearAddress; + params.processId = pCpuMapping->processId; + params.flags = pCpuMapping->flags; + params.pSecInfo = &pCallContext->secInfo; + params.pLockInfo = &lockInfo; + + lockInfo.pClient = pLockInfo->pClient; + lockInfo.state = pLockInfo->state; + + status = serverUnmap(pCallContext->pServer, pClient->hClient, pBackRef->hResource, ¶ms); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap backref (status=0x%x) hClient %x: hResource: %x\n", + status, pClient->hClient, pBackRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hContext: %x at addr " NvP64_fmt "\n", + params.hDevice, params.pLinearAddress); + + if (pBackRefItem == listHead(&pResourceRef->backRefs)) + { + NV_ASSERT(0); + listRemove(&pResourceRef->backRefs, pBackRefItem); + } + } + + pBackRefItem = listHead(&pResourceRef->backRefs); + } + + return NV_OK; +} + +static NV_STATUS +_unmapInterMapping +( + RsServer *pServer, + RsClient *pClient, + RsResourceRef *pMapperRef, + RsInterMapping *pMapping, + RS_LOCK_INFO *pLockInfo, + API_SECURITY_INFO *pSecInfo +) +{ + RS_INTER_UNMAP_PARAMS params; + RS_LOCK_INFO lockInfo; + NV_STATUS status; + + portMemSet(¶ms, 0, sizeof(params)); + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + params.hClient = pClient->hClient; + params.hMapper = pMapperRef->hResource; + params.hDevice = pMapping->pContextRef->hResource; + params.hMappable = pMapping->pMappableRef->hResource; + params.flags = pMapping->flags; + params.dmaOffset = pMapping->dmaOffset; + params.pMemDesc = pMapping->pMemDesc; + params.pSecInfo = pSecInfo; + params.pLockInfo = &lockInfo; + + lockInfo.pClient = pLockInfo->pClient; + lockInfo.pContextRef = (pLockInfo->pContextRef != NULL) + ? pLockInfo->pContextRef + : pMapping->pContextRef; + lockInfo.state = pLockInfo->state; + + status = serverUpdateLockFlagsForInterAutoUnmap(pServer, ¶ms); + if (status != NV_OK) + return status; + + return serverInterUnmap(pServer, ¶ms); +} + +void +_clientUnmapInterMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS status; + RsResourceRef *pMapperRef = pCallContext->pResourceRef; + RsInterMapping *pMapping; + + pMapping = listHead(&pMapperRef->interMappings); + while (pMapping != NULL) + { + status = _unmapInterMapping(pCallContext->pServer, pClient, pMapperRef, + pMapping, pLockInfo, &pCallContext->secInfo); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap (status=0x%x) hClient %x: hMapper: %x\n", + status, pClient->hClient, pMapperRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hMappable: %x hContext: %x\n", + pMapping->pMappableRef->hResource, pMapping->pContextRef->hResource); + + if (pMapping == listHead(&pMapperRef->interMappings)) + { + NV_ASSERT(0); + refRemoveInterMapping(pMapperRef, pMapping); + } + } + + pMapping = listHead(&pMapperRef->interMappings); + } +} + +void +_clientUnmapInterBackRefMappings +( + RsClient *pClient, + CALL_CONTEXT *pCallContext, + RS_LOCK_INFO *pLockInfo +) +{ + NV_STATUS status; + RS_INTER_MAPPING_BACK_REF *pBackRefItem; + + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + + pBackRefItem = listHead(&pResourceRef->interBackRefs); + while (pBackRefItem != NULL) + { + RsResourceRef *pMapperRef = pBackRefItem->pMapperRef; + RsInterMapping *pMapping = pBackRefItem->pMapping; + + status = _unmapInterMapping(pCallContext->pServer, pClient, pMapperRef, + pMapping, pLockInfo, &pCallContext->secInfo); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "Failed to auto-unmap backref (status=0x%x) hClient %x: hMapper: %x\n", + status, pClient->hClient, pMapperRef->hResource); + NV_PRINTF(LEVEL_ERROR, "hMappable: %x hContext: %x\n", + pMapping->pMappableRef->hResource, pMapping->pContextRef->hResource); + + if (pBackRefItem == listHead(&pResourceRef->interBackRefs)) + { + NV_ASSERT(0); + refRemoveInterMapping(pMapperRef, pMapping); + } + } + + pBackRefItem = listHead(&pResourceRef->interBackRefs); + } +} + +NV_STATUS +indexAdd +( + RsIndex *pIndex, + NvU32 index, + RsResourceRef *pResourceRef +) +{ + NV_ASSERT(pResourceRef != NULL && pResourceRef->hResource != 0); + + if (NULL == multimapFindSubmap(pIndex, index)) + { + if (NULL == multimapInsertSubmap(pIndex, index)) + return NV_ERR_INSUFFICIENT_RESOURCES; + } + + if (NULL == multimapInsertItemValue(pIndex, index, pResourceRef->hResource, + &pResourceRef)) + return NV_ERR_INSUFFICIENT_RESOURCES; + + return NV_OK; +} + +NV_STATUS +indexRemove +( + RsIndex *pIndex, + NvU32 index, + RsResourceRef *pResourceRef +) +{ + RsResourceRef **ppResourceRef; + + NV_ASSERT(pResourceRef != NULL && pResourceRef->hResource != 0); + + ppResourceRef = multimapFindItem(pIndex, index, pResourceRef->hResource); + if (ppResourceRef == NULL) + return NV_ERR_OBJECT_NOT_FOUND; + + multimapRemoveItem(pIndex, ppResourceRef); + + return NV_OK; +} + +NV_STATUS +clientValidateNewResourceHandle_IMPL +( + RsClient *pClient, + NvHandle hResource, + NvBool bRestrict +) +{ + // + // Resource handle should not be the same as the client handle + // because some control calls pass hClient in the hObject field + // + if (pClient->hClient == hResource || hResource == 0) + return NV_ERR_INVALID_OBJECT_HANDLE; + + if (bRestrict && !rangeIsEmpty(pClient->handleRestrictRange)) + { + NV_RANGE requestedRange = rangeMake(hResource, hResource); + if (rangeContains(pClient->handleRestrictRange, requestedRange)) + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + if (clientGetResourceRef(pClient, hResource, NULL) == NV_OK) + return NV_ERR_INSERT_DUPLICATE_NAME; + + return NV_OK; +} + +NV_STATUS +clientresConstruct_IMPL +( + RsClientResource *pClientRes, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsClient *pClient = pCallContext->pClient; + RsResourceRef *pResourceRef = pCallContext->pResourceRef; + + // Client proxy resource must have the same handle as its client + if (pClient->hClient != pResourceRef->hResource) + return NV_ERR_INVALID_OBJECT_HANDLE; + + pClientRes->pClient = pCallContext->pClient; + return NV_OK; +} + +void +clientresDestruct_IMPL +( + RsClientResource *pClientRes +) +{ +} + +RsIndexIter +indexRefIter +( + RsIndex *pIndex, + NvU32 index +) +{ + RsIndexIter it; + RsIndexSubmap *pSubmap; + + portMemSet(&it, 0, sizeof(it)); + NV_ASSERT(pIndex); + + pSubmap = multimapFindSubmap(pIndex, index); + if (pSubmap != NULL) + it = multimapSubmapIterItems(pIndex, pSubmap); + + return it; +} + +RsIndexIter +indexRefIterAll +( + RsIndex *pIndex +) +{ + NV_ASSERT(pIndex); + return multimapItemIterAll(pIndex); +} + +NvBool +indexRefIterNext +( + RsIndexIter *pIt +) +{ + return multimapItemIterNext(pIt); +} + +RS_ITERATOR +clientRefIter +( + RsClient *pClient, + RsResourceRef *pScopeRef, + NvU32 internalClassId, + RS_ITER_TYPE type, + NvBool bExactMatch +) +{ + RS_ITERATOR it; + RsIndex *pIndex = NULL; + NvBool bChildrenOnly = (type == RS_ITERATE_CHILDREN); + NvBool bCachedOnly = (type == RS_ITERATE_CACHED); + NvBool bDependantsOnly = (type == RS_ITERATE_DEPENDANTS); + portMemSet(&it, 0, sizeof(it)); + + if (pClient == NULL) + { + NV_ASSERT(0); + return it; + } + + if (pScopeRef == NULL) + { + if (NV_OK != clientGetResourceRef(pClient, pClient->hClient, &pScopeRef)) + return it; + } + + if (bChildrenOnly || bCachedOnly || bDependantsOnly) + { + NvBool bIterAll = (internalClassId == 0) || !bExactMatch; + + if (bChildrenOnly) + { + pIndex = &pScopeRef->childRefMap; + } + else if (bCachedOnly) + { + pIndex = &pScopeRef->cachedRefMap; + } + else if (bDependantsOnly) + { + pIndex = &pScopeRef->depRefMap; + } + + if (!bIterAll && multimapFindSubmap(pIndex, internalClassId) == NULL) + goto done; + + it.idxIt = (bIterAll) + ? indexRefIterAll(pIndex) + : indexRefIter(pIndex, internalClassId); + } + else + { + // Match descendants of the scope resource (specific class / any class) + it.mapIt = mapIterAll(&pClient->resourceMap); + } + + it.pClient = pClient; + it.pScopeRef = pScopeRef; + it.internalClassId = internalClassId; + it.type = type; + it.bExactMatch = bExactMatch; + +done: + return it; +} + +RS_ORDERED_ITERATOR +clientRefOrderedIter +( + RsClient *pClient, + RsResourceRef *pScopeRef, + NvU32 internalClassId, + NvBool bExactMatch +) +{ + RS_ORDERED_ITERATOR it; + RsIndex *pIndex = NULL; + portMemSet(&it, 0, sizeof(it)); + + if (pClient == NULL) + { + NV_ASSERT(0); + return it; + } + + if (pScopeRef == NULL) + { + if (NV_OK != clientGetResourceRef(pClient, pClient->hClient, &pScopeRef)) + return it; + } + + it.depth = -1; + pIndex = &pScopeRef->childRefMap; + it.idxIt[0] = indexRefIterAll(pIndex); + + it.pClient = pClient; + it.pScopeRef = pScopeRef; + it.internalClassId = internalClassId; + it.bExactMatch = bExactMatch; + + return it; +} + +NvBool +clientRefOrderedIterNext +( + RsClient *pClient, + RS_ORDERED_ITERATOR *pIt +) +{ + RsResourceRef *pResourceRef; + NvBool bNext; + + if ((pIt == NULL) || (pIt->pClient != pClient) || pIt->pScopeRef == NULL) + { + // Iterator not initialized or nothing to iterate over + NV_ASSERT(pIt != NULL && pIt->pClient == NULL); + return NV_FALSE; + } + + // Iterate over the scope reference if the scope is not the client + if (pIt->depth == -1) + { + pIt->depth = 0; + if ((pIt->pScopeRef->hResource != pIt->pClient->hClient) && + ((pIt->internalClassId == 0) || (pIt->internalClassId == pIt->pScopeRef->internalClassId)) && + (pIt->pScopeRef->pResource != NULL)) + { + pIt->pResourceRef = pIt->pScopeRef; + return NV_TRUE; + } + } + + pIt->pResourceRef = NULL; + + bNext = NV_TRUE; + while (1) + { + // Get the next sibling, or else backtrack to parent and get its next sibling + do + { + if (!bNext) + --pIt->depth; + bNext = indexRefIterNext(&pIt->idxIt[pIt->depth]); + } while (!bNext && pIt->depth != 0); + + if (!bNext) + break; + + pResourceRef = *pIt->idxIt[pIt->depth].pValue; + + // Iterate over this resource's children next (up to max depth) + if (pIt->depth < RS_MAX_RESOURCE_DEPTH) + { + ++pIt->depth; + pIt->idxIt[pIt->depth] = indexRefIterAll(&pResourceRef->childRefMap); + } + + if (refHasAncestor(pResourceRef, pIt->pScopeRef)) + { + NvBool bMatch = NV_TRUE; + if (pIt->internalClassId != 0) + { + if (pIt->bExactMatch && (pIt->internalClassId != pResourceRef->internalClassId)) + bMatch = NV_FALSE; + + if (!pIt->bExactMatch && objDynamicCastById(pResourceRef->pResource, pIt->internalClassId) == NULL) + bMatch = NV_FALSE; + } + + if (bMatch && (pResourceRef->pResource != NULL)) + { + pIt->pResourceRef = pResourceRef; + return NV_TRUE; + } + } + } + + return NV_FALSE; +} + +NvBool +clientRefIterNext +( + RsClient *pClient, + RS_ITERATOR *pIt +) +{ + RsResourceRef *pResourceRef; + NvBool bLoop; + NvBool bUseIdx; + + if ((pIt == NULL) || (pIt->pClient != pClient) || pIt->pScopeRef == NULL) + { + // Iterator not initialized or nothing to iterate over + NV_ASSERT(pIt != NULL && pIt->pClient == NULL); + return NV_FALSE; + } + + bUseIdx = (pIt->type == RS_ITERATE_CACHED) || + (pIt->type == RS_ITERATE_CHILDREN) || + (pIt->type == RS_ITERATE_DEPENDANTS); + + pIt->pResourceRef = NULL; + + bLoop = bUseIdx ? indexRefIterNext(&pIt->idxIt) : mapIterNext(&pIt->mapIt); + while (bLoop) + { + pResourceRef = bUseIdx ? *pIt->idxIt.pValue : pIt->mapIt.pValue; + + if (bUseIdx || + ((pResourceRef == pIt->pScopeRef) || + (refHasAncestor(pResourceRef, pIt->pScopeRef)))) + { + NvBool bMatch = NV_TRUE; + if (pIt->internalClassId != 0) + { + if (pIt->bExactMatch && (pIt->internalClassId != pResourceRef->internalClassId)) + bMatch = NV_FALSE; + + if (!pIt->bExactMatch && objDynamicCastById(pResourceRef->pResource, pIt->internalClassId) == NULL) + bMatch = NV_FALSE; + } + + if (bMatch && (pResourceRef->pResource != NULL)) + { + pIt->pResourceRef = pResourceRef; + return NV_TRUE; + } + } + + bLoop = bUseIdx ? indexRefIterNext(&pIt->idxIt) : mapIterNext(&pIt->mapIt); + } + + return NV_FALSE; +} + +NV_STATUS +clientPostProcessPendingFreeList_IMPL +( + RsClient *pClient, + RsResourceRef **ppFirstLowPriRef +) +{ + if (ppFirstLowPriRef != NULL) + *ppFirstLowPriRef = NULL; + + return NV_OK; +} + +NV_STATUS +clientAddAccessBackRef_IMPL +( + RsClient *pClient, + RsResourceRef *pResourceRef +) +{ + AccessBackRef *pAccessBackRef = listPrependNew(&pClient->accessBackRefList);; + + if (pAccessBackRef == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pAccessBackRef->hClient = pResourceRef->pClient->hClient; + pAccessBackRef->hResource = pResourceRef->hResource; + + return NV_OK; +} + +void clientFreeAccessBackRefs_IMPL +( + RsClient *pClient, + RsServer *pServer +) +{ + AccessBackRef *pAccessBackRef; + NV_STATUS status; + + while ((pAccessBackRef = listHead(&pClient->accessBackRefList)) != NULL) + { + RsClient *pSharedClient; + + // + // Remove access rights entry if client/resource pair is still in use + // so that another client doesn't get unauthorized access to them + // + status = serverGetClientUnderLock(pServer, pAccessBackRef->hClient, &pSharedClient); + if (status == NV_OK) + { + RsResourceRef *pResourceRef; + + status = clientGetResourceRef(pSharedClient, pAccessBackRef->hResource, &pResourceRef); + if (status == NV_OK) + { + RS_SHARE_POLICY revokePolicy; + + revokePolicy.type = RS_SHARE_TYPE_CLIENT; + revokePolicy.target = pClient->hClient; + revokePolicy.action = RS_SHARE_ACTION_FLAG_REVOKE; + RS_ACCESS_MASK_FILL(&revokePolicy.accessMask); + + // Check the resource's share policy for matching client policies + rsShareListRemove(&pResourceRef->sharePolicyList, &revokePolicy, NULL); + } + } + + listRemove(&pClient->accessBackRefList, pAccessBackRef); + } +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_domain.c b/src/nvidia/src/libraries/resserv/src/rs_domain.c new file mode 100644 index 000000000..bbbc84d13 --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_domain.c @@ -0,0 +1,52 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_domain.h" + +#if !(RS_STANDALONE) +#include "os/os.h" +#endif + +NV_STATUS +domainConstruct +( + RsDomain *pDomain, + PORT_MEM_ALLOCATOR *pAllocator, + NvHandle hDomain, + NvHandle hParentDomain, + ACCESS_CONTROL *pAccessControl +) +{ + return NV_OK; +} + +NV_STATUS +domainDestruct +( + RsDomain *pDomain +) +{ + return NV_OK; +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_resource.c b/src/nvidia/src/libraries/resserv/src/rs_resource.c new file mode 100644 index 000000000..81f5ed412 --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_resource.c @@ -0,0 +1,799 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define NVOC_RS_RESOURCE_H_PRIVATE_ACCESS_ALLOWED + +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_resource.h" +#include "resserv/rs_client.h" +#include "resserv/rs_server.h" + +#if !(RS_STANDALONE) +#include "os/os.h" +#endif + +NV_STATUS +resConstruct_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + RsResourceRef *pResourceRef; + + if (pCallContext == NULL) + { + return NV_OK; + } + + pResourceRef = pCallContext->pResourceRef; + + pResource->bConstructed = NV_TRUE; + + // Init pResourceRef->pResource so iteration APIs work during ctor + pResourceRef->pResource = pResource; + + // Init back-ref so we can use during ctor + pResource->pResourceRef = pResourceRef; + + // Set context for free in case a chained constructor fails. + resSetFreeParams(pResource, pCallContext, NULL); + + // NV_PRINTF(LEVEL_INFO, "Constructing resource with external class: 0x%x\n", pParams->externalClassId); + + return NV_OK; +} + +void +resPreDestruct_IMPL +( + RsResource *pResource +) +{ +} + +void +resDestruct_IMPL +( + RsResource *pResource +) +{ + if (!pResource->bConstructed) + { + return; + } + + // NV_PRINTF(LEVEL_INFO, "Freeing resource: " NvP64_fmt "\n", NV_PTR_TO_NvP64(pResource)); +} + +NV_STATUS +resSetFreeParams_IMPL(RsResource *pResource, CALL_CONTEXT *pCallContext, RS_RES_FREE_PARAMS_INTERNAL *pParams) +{ + if (!pResource->bConstructed) + { + return NV_OK; + } + + pResource->dtorParams.pFreeContext = pCallContext; + pResource->dtorParams.pFreeParams = pParams; + + return NV_OK; +} + +NV_STATUS +resGetFreeParams_IMPL(RsResource *pResource, CALL_CONTEXT **ppCallContext, RS_RES_FREE_PARAMS_INTERNAL **ppParams) +{ + if (ppCallContext != NULL) + *ppCallContext = pResource->dtorParams.pFreeContext; + + if (ppParams != NULL) + *ppParams = pResource->dtorParams.pFreeParams; + + return NV_OK; +} + +NV_STATUS resControlLookup_IMPL +( + RsResource *pResource, + RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams, + const struct NVOC_EXPORTED_METHOD_DEF **ppEntry +) +{ + const struct NVOC_EXPORTED_METHOD_DEF *pEntry; + NvU32 cmd = pRsParams->cmd; + + *ppEntry = NULL; + pEntry = objGetExportedMethodDef(staticCast(objFullyDerive(pResource), Dynamic), cmd); + + if (pEntry == NULL) + return NV_ERR_NOT_SUPPORTED; + + if ((pEntry->paramSize != 0) && (pRsParams->paramsSize != pEntry->paramSize)) + { + NV_PRINTF(LEVEL_NOTICE, + "hObject 0x%08x, cmd 0x%08x: bad paramsize %d, expected %d\n", + RES_GET_HANDLE(pResource), pRsParams->cmd, + (int)pRsParams->paramsSize, + (int)pEntry->paramSize); + + return NV_ERR_INVALID_PARAM_STRUCT; + } + + *ppEntry = pEntry; + return NV_OK; +} + +typedef NV_STATUS (*CONTROL_EXPORT_FNPTR)(void*, void*); +typedef NV_STATUS (*CONTROL_EXPORT_FNPTR_NO_PARAMS)(void*); + +NV_STATUS +resControl_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pRsParams +) +{ + RsServer *pServer = pCallContext->pServer; + const struct NVOC_EXPORTED_METHOD_DEF *pEntry; + NV_STATUS status; + Dynamic *pDynamicObj; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE access = LOCK_ACCESS_WRITE; + + status = resControlLookup(pResource, pRsParams, &pEntry); + if (status != NV_OK) + { + if (status == NV_WARN_NOTHING_TO_DO) + return NV_OK; + return status; + } + + NV_ASSERT_OR_RETURN(pEntry != NULL, NV_ERR_NOT_SUPPORTED); + + // Initialize the execution cookie + serverControl_InitCookie(pEntry, pRsParams->pCookie); + + status = resControlFilter(pResource, pCallContext, pRsParams); + if (status != NV_OK) + return status; + + status = serverControl_Prologue(pServer, pRsParams, &access, &releaseFlags); + if (status != NV_OK) + return status; + + status = resControl_Prologue(pResource, pCallContext, pRsParams); + if ((status != NV_OK) && (status != NV_WARN_NOTHING_TO_DO)) + goto done; + + pDynamicObj = objDynamicCastById(pResource, pEntry->pClassInfo->classId); + + if (status == NV_WARN_NOTHING_TO_DO) + { + // Call handled by the prologue. + status = NV_OK; + } + else + { + // Check the size of paramSize while it is non-zero. + // Zero size means the exported method only have one param (pResource) + if (pEntry->paramSize == 0) + { + CONTROL_EXPORT_FNPTR_NO_PARAMS pFunc = ((CONTROL_EXPORT_FNPTR_NO_PARAMS) pEntry->pFunc); + status = pFunc(pDynamicObj); + } + else + { + CONTROL_EXPORT_FNPTR pFunc = ((CONTROL_EXPORT_FNPTR) pEntry->pFunc); + status = pFunc(pDynamicObj, pRsParams->pParams); + } + } + + resControl_Epilogue(pResource, pCallContext, pRsParams); + +done: + status = serverControl_Epilogue(pServer, pRsParams, access, &releaseFlags, status); + + return status; +} + +NV_STATUS +resControlFilter_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +NV_STATUS +resControl_Prologue_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return NV_OK; +} + +void +resControl_Epilogue_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams +) +{ + return; +} + +NvU32 resGetRefCount_IMPL +( + RsResource *pResource +) +{ + return 1; +} + +NV_STATUS +resMap_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RS_CPU_MAP_PARAMS *pParams, + RsCpuMapping *pCpuMapping +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +resUnmap_IMPL +( + RsResource *pResource, + CALL_CONTEXT *pCallContext, + RsCpuMapping *pCpuMapping +) +{ + return NV_ERR_NOT_SUPPORTED; +} + +NV_STATUS +resMapTo_IMPL +( + RsResource *pResource, + RS_RES_MAP_TO_PARAMS *pParams +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NV_STATUS +resUnmapFrom_IMPL +( + RsResource *pResource, + RS_RES_UNMAP_FROM_PARAMS *pParams +) +{ + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +NvBool +resCanCopy_IMPL +( + RsResource *pResource +) +{ + return NV_FALSE; +} + +NvBool +resAccessCallback_IMPL +( + RsResource *pResource, + RsClient *pInvokingClient, + void *pAllocParams, + RsAccessRight accessRight +) +{ + return NV_FALSE; +} + +NvBool +resShareCallback_IMPL +( + RsResource *pResource, + RsClient *pInvokingClient, + RsResourceRef *pParentRef, + RS_SHARE_POLICY *pSharePolicy +) +{ + switch (pSharePolicy->type) + { + case RS_SHARE_TYPE_ALL: + return NV_TRUE; + case RS_SHARE_TYPE_CLIENT: + if (pSharePolicy->target == pInvokingClient->hClient) + return NV_TRUE; + break; + } + + return NV_FALSE; +} + +NV_STATUS +refFindCpuMapping +( + RsResourceRef *pResourceRef, + NvP64 pAddress, + RsCpuMapping **ppMapping +) +{ + return refFindCpuMappingWithFilter(pResourceRef, pAddress, NULL, ppMapping); +} + +NV_STATUS +refFindCpuMappingWithFilter +( + RsResourceRef *pResourceRef, + NvP64 pAddress, + NvBool (*fnFilter)(RsCpuMapping*), + RsCpuMapping **ppMapping +) +{ + RsCpuMappingListIter it; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + RsCpuMapping *pMapping = NULL; + + if (pResourceRef == NULL) + { + NV_ASSERT(0); + return status; + } + + it = listIterAll(&pResourceRef->cpuMappings); + while (listIterNext(&it)) + { + pMapping = it.pValue; + if ((pMapping->pLinearAddress == pAddress) && + ((fnFilter == NULL) || fnFilter(pMapping))) + { + status = NV_OK; + break; + } + } + + if (status != NV_OK) + pMapping = NULL; + + if (pMapping != NULL) + *ppMapping = pMapping; + + return status; +} + +NV_STATUS +refFindChildOfType +( + RsResourceRef *pParentRef, + NvU32 internalClassId, + NvBool bExactMatch, + RsResourceRef **ppResourceRef +) +{ + if (bExactMatch) + { + RsIndexIter it = indexRefIter(&pParentRef->childRefMap, internalClassId); + if (indexRefIterNext(&it)) + { + RsResourceRef *pResourceRef = *it.pValue; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; + } + } + else + { + RsIndexSupermapIter it = multimapSubmapIterAll(&pParentRef->childRefMap); + while (multimapSubmapIterNext(&it)) + { + RsIndexSubmap *pSubmap = it.pValue; + RsIndexIter subIt = multimapSubmapIterItems(&pParentRef->childRefMap, pSubmap); + if (multimapItemIterNext(&subIt)) + { + RsResourceRef *pResourceRef = *subIt.pValue; + + if (objDynamicCastById(pResourceRef->pResource, internalClassId) == NULL) + continue; + + if (ppResourceRef != NULL) + *ppResourceRef = pResourceRef; + + return NV_OK; + } + } + + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NV_STATUS +refFindAncestorOfType +( + RsResourceRef *pDescendantRef, + NvU32 internalClassId, + RsResourceRef **ppAncestorRef +) +{ + RsResourceRef *pAncestorRef = pDescendantRef->pParentRef; + + while (pAncestorRef != NULL) + { + if (pAncestorRef->internalClassId == internalClassId) + { + if(pAncestorRef->bInvalidated) + return NV_ERR_OBJECT_NOT_FOUND; + + if (ppAncestorRef != NULL) + *ppAncestorRef = pAncestorRef; + + return NV_OK; + } + + pAncestorRef = pAncestorRef->pParentRef; + } + + return NV_ERR_OBJECT_NOT_FOUND; +} + +NvBool +refHasAncestor +( + RsResourceRef *pDescendantRef, + RsResourceRef *pAncestorRef +) +{ + RsResourceRef *pSearchRef = pDescendantRef->pParentRef; + + while (pSearchRef != NULL) + { + if (pSearchRef == pAncestorRef) + return NV_TRUE; + + pSearchRef = pSearchRef->pParentRef; + } + + return NV_FALSE; +} + +NV_STATUS +refAddMapping +( + RsResourceRef *pResourceRef, + RS_CPU_MAP_PARAMS *pParams, + RsResourceRef *pContextRef, + RsCpuMapping **ppMapping +) +{ + NV_STATUS status; + RsCpuMapping *pCpuMapping = listAppendNew(&pResourceRef->cpuMappings); + if (pCpuMapping == NULL) + return NV_ERR_NO_MEMORY; + + status = refAllocCpuMappingPrivate(pParams, pCpuMapping); + if (status != NV_OK) + { + listRemove(&pResourceRef->cpuMappings, pCpuMapping); + return status; + } + + if ((pContextRef != NULL) && + (pContextRef != pResourceRef) && + !refHasAncestor(pResourceRef, pContextRef)) + { + RS_CPU_MAPPING_BACK_REF *pBackRefItem = listAppendNew(&pContextRef->backRefs); + if (pBackRefItem == NULL) + { + refFreeCpuMappingPrivate(pCpuMapping); + listRemove(&pResourceRef->cpuMappings, pCpuMapping); + return NV_ERR_NO_MEMORY; + } + + pBackRefItem->pBackRef = pResourceRef; + pBackRefItem->pCpuMapping = pCpuMapping; + } + + pCpuMapping->offset = pParams->offset; + pCpuMapping->length = pParams->length; + pCpuMapping->flags = pParams->flags; + pCpuMapping->pContextRef = pContextRef; + + if (ppMapping != NULL) + *ppMapping = pCpuMapping; + + return NV_OK; +} + +void +refRemoveMapping +( + RsResourceRef *pResourceRef, + RsCpuMapping *pCpuMapping +) +{ + if ((pCpuMapping->pContextRef != NULL) && + !refHasAncestor(pResourceRef, pCpuMapping->pContextRef)) + { + RS_CPU_MAPPING_BACK_REF *pBackRefItem; + RsCpuMappingBackRefListIter it = listIterAll(&pCpuMapping->pContextRef->backRefs); + + while (listIterNext(&it)) + { + pBackRefItem = it.pValue; + if ((pBackRefItem->pBackRef == pResourceRef) && + (pBackRefItem->pCpuMapping == pCpuMapping)) + { + listRemove(&pCpuMapping->pContextRef->backRefs, pBackRefItem); + break; + } + } + } + + refFreeCpuMappingPrivate(pCpuMapping); + listRemove(&pResourceRef->cpuMappings, pCpuMapping); +} + +#if RS_STANDALONE +NV_STATUS +refAllocCpuMappingPrivate +( + RS_CPU_MAP_PARAMS *pMapParams, + RsCpuMapping *pCpuMapping +) +{ + return NV_OK; +} + +void +refFreeCpuMappingPrivate +( + RsCpuMapping *pCpuMapping +) +{ +} +#endif /* RS_STANDALONE */ + +NV_STATUS +refFindInterMapping +( + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RsResourceRef *pContextRef, + NvU64 dmaOffset, + RsInterMapping **ppMapping +) +{ + RsInterMappingListIter it; + NV_STATUS status = NV_ERR_OBJECT_NOT_FOUND; + RsInterMapping *pMapping = NULL; + + NV_ASSERT(pMapperRef != NULL); + + it = listIterAll(&pMapperRef->interMappings); + while (listIterNext(&it)) + { + pMapping = it.pValue; + if ((pMapping->pMappableRef == pMappableRef) && + (pMapping->pContextRef == pContextRef) && + (pMapping->dmaOffset == dmaOffset)) + { + status = NV_OK; + break; + } + } + + if (status != NV_OK) + pMapping = NULL; + + if (pMapping != NULL) + *ppMapping = pMapping; + + return status; +} + +NV_STATUS +refAddInterMapping +( + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RsResourceRef *pContextRef, + RsInterMapping **ppMapping +) +{ + RsInterMapping *pInterMapping; + RS_INTER_MAPPING_BACK_REF *pBackRefItem; + RS_INTER_MAPPING_BACK_REF *pContextBackRefItem; + + NV_ASSERT(pMapperRef != NULL); + NV_ASSERT(pMappableRef != NULL); + NV_ASSERT(pMappableRef != pMapperRef); + + pInterMapping = listAppendNew(&pMapperRef->interMappings); + if (pInterMapping == NULL) + return NV_ERR_NO_MEMORY; + + // Add backref linked to this inter-mapping + pBackRefItem = listAppendNew(&pMappableRef->interBackRefs); + if (pBackRefItem == NULL) + { + listRemove(&pMapperRef->interMappings, pInterMapping); + return NV_ERR_NO_MEMORY; + } + + pBackRefItem->pMapperRef = pMapperRef; + pBackRefItem->pMapping = pInterMapping; + + // + // Either pMapperRef or pMappableRef should be a descendant of pContextRef + // Otherwise, it becomes possible to have a stale reference if hContext is freed first + // If this is not the case, add a backref to pContextRef as well + // + if (!refHasAncestor(pMapperRef, pContextRef) && + !refHasAncestor(pMappableRef, pContextRef)) + { + pContextBackRefItem = listAppendNew(&pContextRef->interBackRefs); + if (pContextBackRefItem == NULL) + { + listRemove(&pMapperRef->interMappings, pInterMapping); + listRemove(&pMappableRef->interBackRefs, pBackRefItem); + return NV_ERR_NO_MEMORY; + } + + pContextBackRefItem->pMapperRef = pMapperRef; + pContextBackRefItem->pMapping = pInterMapping; + } + + pInterMapping->pMappableRef = pMappableRef; + pInterMapping->pContextRef = pContextRef; + + if (ppMapping != NULL) + *ppMapping = pInterMapping; + + return NV_OK; +} + +void +refRemoveInterMapping +( + RsResourceRef *pMapperRef, + RsInterMapping *pMapping +) +{ + RsInterMappingBackRefListIter it; + RS_INTER_MAPPING_BACK_REF *pBackRefItem = NULL; + RsResourceRef *pMappableRef = pMapping->pMappableRef; + RsResourceRef *pContextRef = pMapping->pContextRef; + + // Find and remove the mappable's backref linked to this inter-mapping + it = listIterAll(&pMappableRef->interBackRefs); + while (listIterNext(&it)) + { + pBackRefItem = it.pValue; + if (pBackRefItem->pMapping == pMapping) + { + listRemove(&pMappableRef->interBackRefs, pBackRefItem); + break; + } + } + + // Find and remove the context's backref linked to this inter-mapping, if present + it = listIterAll(&pContextRef->interBackRefs); + while (listIterNext(&it)) + { + pBackRefItem = it.pValue; + if (pBackRefItem->pMapping == pMapping) + { + listRemove(&pContextRef->interBackRefs, pBackRefItem); + break; + } + } + + listRemove(&pMapperRef->interMappings, pMapping); +} + +NV_STATUS +refCacheRef +( + RsResourceRef *pParentRef, + RsResourceRef *pResourceRef +) +{ + return indexAdd(&pParentRef->cachedRefMap, pResourceRef->internalClassId, pResourceRef); +} + +NV_STATUS +refUncacheRef +( + RsResourceRef *pParentRef, + RsResourceRef *pResourceRef +) +{ + return indexRemove(&pParentRef->cachedRefMap, pResourceRef->internalClassId, pResourceRef); +} + +NV_STATUS +refAddDependant +( + RsResourceRef *pResourceRef, + RsResourceRef *pDependantRef +) +{ + // dependencies are implicit between a parent resource reference and child resource reference + if (refHasAncestor(pDependantRef, pResourceRef)) + return NV_OK; + + indexAdd(&pDependantRef->depBackRefMap, pResourceRef->internalClassId, pResourceRef); + return indexAdd(&pResourceRef->depRefMap, pDependantRef->internalClassId, pDependantRef); +} + +NV_STATUS +refRemoveDependant +( + RsResourceRef *pResourceRef, + RsResourceRef *pDependantRef +) +{ + indexRemove(&pDependantRef->depBackRefMap, pResourceRef->internalClassId, pResourceRef); + return indexRemove(&pResourceRef->depRefMap, pDependantRef->internalClassId, pDependantRef); +} + +NvBool +refPendingFree +( + RsResourceRef *pResourceRef, + RsClient *pClient +) +{ + return ((pResourceRef->freeNode.pNext != NULL) || + (pResourceRef->freeNode.pPrev != NULL) || + (pResourceRef == listHead(&pClient->pendingFreeList))); +} + +void +resAddAdditionalDependants_IMPL +( + RsClient *pClient, + RsResource *pResource, + RsResourceRef *pReference +) +{ + return; +} diff --git a/src/nvidia/src/libraries/resserv/src/rs_server.c b/src/nvidia/src/libraries/resserv/src/rs_server.c new file mode 100644 index 000000000..a5f00f017 --- /dev/null +++ b/src/nvidia/src/libraries/resserv/src/rs_server.c @@ -0,0 +1,3602 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#define NVOC_RS_SERVER_H_PRIVATE_ACCESS_ALLOWED +#include "nvlog_inc.h" +#include "resserv/resserv.h" +#include "resserv/rs_server.h" +#include "resserv/rs_client.h" +#include "resserv/rs_resource.h" +#include "tls/tls.h" +#include "nv_speculation_barrier.h" + +/** + * Get the RsClient from a client handle without taking locks + * @param[in] pServer + * @param[in] hClient The handle to lookup + * @param[out] ppClient The RsClient associated with the handle + */ +static NV_STATUS _serverFindClient(RsServer *pServer, NvHandle hClient, RsClient **ppClient); + +/** + * Get the CLIENT_ENTRY from a client handle without taking locks + * @param[in] pServer + * @param[in] hClient The handle to lookup + * @param[in] bFindPartial Include entries that have not finished constructing + * @param[out] ppClientEntry The client entry associated with the handle + */ +static NV_STATUS _serverFindClientEntry(RsServer *pServer, NvHandle hClient, NvBool bFindPartial, CLIENT_ENTRY **ppClientEntry); + +/** + * Insert a CLIENT_ENTRY in the server database without taking locks + * @param[in] pServer + * @param[in] pClientEntry The client entry associated with the handle + */ +static NV_STATUS _serverInsertClientEntry(RsServer *pServer, CLIENT_ENTRY *pClientEntry, CLIENT_ENTRY **ppClientNext); + +/** + * Find the next available client handle in bucket. + * @param[in] pServer + * @param[in] hClientIn + * @param[out] pClientOut + */ +static NV_STATUS _serverFindNextAvailableClientHandleInBucket(RsServer *pServer, NvHandle hClientIn, NvHandle *phClientOut, CLIENT_ENTRY ***pppClientNext); + +/** + * Create a client entry and a client lock for a client that does not exist yet. Used during client + * construction. No locks will be taken if this call fails. + * @param[in] pServer + * @param[in] hClient + */ +static NV_STATUS _serverCreateEntryAndLockForNewClient(RsServer *pServer, NvHandle *phClient, NvBool bInternalHandle, CLIENT_ENTRY **ppClientEntry ); + +/** + * Lock and retrieve the RsClient associated with a client handle. + * @param[in] pServer + * @param[in] access + * @param[in] hClient Handle of client to look-up + * @param[out] pClient RsClient associated with the client handle + */ +static NV_STATUS _serverLockClient(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient, RsClient **ppClient); + +/** + * Lock and retrieve the RsClient associated with a client handle, and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] hClient Handle of client to look-up + * @param[inout] pLockInfo Lock state + * @param[out] pClient RsClient associated with the client handle + */ +static NV_STATUS _serverLockClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags, RsClient **ppClient); + +/** + * Lock and retrieve two RsClient associated with a pair of client handles, and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] hClient1, hClient2 Handles of clients to look-up and lock + * @param[inout] pLockInfo Lock state + * @param[out] pClient1, pClient2 RsClient associated with the client handles + */ +static NV_STATUS _serverLockDualClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient1, NvHandle hClient2, RS_LOCK_INFO *pLockInfo, NvU32 *pReleaseFlags, RsClient **ppClient1, RsClient **ppClient2); + +/** + * Unlock a client by handle + * @param[in] pServer + * @param[in] access + * @param[in] hClient Handle of the client to unlock + */ +static NV_STATUS _serverUnlockClient(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient); + +/** + * Unlock a client by handle, and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] hClient Handle of the client to unlock + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +static NV_STATUS _serverUnlockClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient, RS_LOCK_INFO* pLockInfo, NvU32 *pReleaseFlags); + +/** + * Unlock a client by handle, and update lock info. + * @param[in] pServer + * @param[in] access + * @param[in] hClient1, hClient2 Handles of the clients to unlock + * @param[inout] pLockInfo Lock state + * @param[inout] pReleaseFlags Flags indicating the locks that need to be released + */ +static NV_STATUS _serverUnlockDualClientWithLockInfo(RsServer *pServer, LOCK_ACCESS_TYPE access, NvHandle hClient1, NvHandle hClient2, RS_LOCK_INFO* pLockInfo, NvU32 *pReleaseFlags); + +NV_STATUS serverFreeResourceTreeUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pFreeParams) +{ + NV_STATUS status; + RsResourceRef *pResourceRef = pFreeParams->pResourceRef; + RS_LOCK_INFO *pLockInfo = pFreeParams->pLockInfo; + NvU32 releaseFlags = 0; + + NV_ASSERT_OR_RETURN(pResourceRef != NULL, NV_ERR_INVALID_OBJECT_HANDLE); + + status = serverUpdateLockFlagsForFree(pServer, pFreeParams); + if (status != NV_OK) + return status; + + status = serverSessionLock_Prologue(LOCK_ACCESS_WRITE, pResourceRef, pLockInfo, &releaseFlags); + if (status != NV_OK) + return status; + + pLockInfo->flags |= RS_LOCK_FLAGS_FREE_SESSION_LOCK; + pLockInfo->traceOp = RS_LOCK_TRACE_FREE; + pLockInfo->traceClassId = pResourceRef->externalClassId; + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientFreeResource(pResourceRef->pClient, pServer, pFreeParams); + NV_ASSERT(status == NV_OK); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + serverSessionLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + return status; +} + +#if RS_STANDALONE +NV_STATUS +serverInitFreeParams_Recursive(NvHandle hClient, NvHandle hResource, RS_LOCK_INFO* pLockInfo, RS_RES_FREE_PARAMS *pParams) +{ + portMemSet(pParams, 0, sizeof(*pParams)); + pParams->hClient = hClient; + pParams->hResource = hResource; + pParams->pLockInfo = pLockInfo; + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForCopy(RsServer *pServer, RS_RES_DUP_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForFree(RsServer *pServer, RS_RES_FREE_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS serverUpdateLockFlagsForInterAutoUnmap(RsServer *pServer, RS_INTER_UNMAP_PARAMS *pParams) +{ + return NV_OK; +} + +NV_STATUS serverFreeResourceRpcUnderLock(RsServer *pServer, RS_RES_FREE_PARAMS *pParams) +{ + return NV_OK; +} +#endif + + +// +// Client handle format: +// +// fn [ C 1 D/E ] [ *INDEX* ] +// bit 31 20 19 0 +// + +#define RS_CLIENT_HANDLE_DECODE_MASK 0xFFFFF +#define CLIENT_DECODEHANDLE(handle) (handle & RS_CLIENT_HANDLE_DECODE_MASK) + +#define CLIENT_ENCODEHANDLE(index) (RS_CLIENT_HANDLE_BASE | index) +#define CLIENT_ENCODEHANDLE_INTERNAL(internalBase, index) (internalBase | index) + +NV_STATUS +serverConstruct +( + RsServer *pServer, + RS_PRIV_LEVEL privilegeLevel, + NvU32 maxDomains +) +{ + NvU32 i; + PORT_MEM_ALLOCATOR *pAllocator = portMemAllocatorCreateNonPaged(); + + pServer->privilegeLevel = privilegeLevel; + pServer->bConstructed = NV_TRUE; + pServer->pAllocator = pAllocator; + pServer->bDebugFreeList = NV_FALSE; + pServer->bRsAccessEnabled = NV_TRUE; + pServer->internalHandleBase = RS_CLIENT_INTERNAL_HANDLE_BASE; + pServer->activeClientCount = 0; + pServer->activeResourceCount= 0; + pServer->roTopLockApiMask = 0; + /* pServer->bUnlockedParamCopy is set in _rmapiLockAlloc */ + + pServer->pClientSortedList = PORT_ALLOC(pAllocator, sizeof(RsClientList)*RS_CLIENT_HANDLE_BUCKET_COUNT); + if (NULL == pServer->pClientSortedList) + goto fail; + + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + listInit(&pServer->pClientSortedList[i], pAllocator); + } + pServer->clientCurrentHandleIndex = 0; + + pServer->pClientListLock = portSyncRwLockCreate(pAllocator); + if (pServer->pClientListLock == NULL) + goto fail; + +#if RS_STANDALONE + RS_LOCK_VALIDATOR_INIT(&pServer->topLockVal, LOCK_VAL_LOCK_CLASS_API, 0xdead0000); + pServer->pTopLock = portSyncRwLockCreate(pAllocator); + if (pServer->pTopLock == NULL) + goto fail; + + RS_LOCK_VALIDATOR_INIT(&pServer->resLockVal, LOCK_VAL_LOCK_CLASS_GPU, 0xbeef0000); + pServer->pResLock = portSyncRwLockCreate(pAllocator); + if (pServer->pResLock == NULL) + goto fail; + + pServer->topLockOwnerTid = ~0; +#endif + + pServer->pShareMapLock = portSyncSpinlockCreate(pAllocator); + + mapInitIntrusive(&pServer->shareMap); + + listInit(&pServer->defaultInheritedSharePolicyList, pAllocator); + listInit(&pServer->globalInternalSharePolicyList, pAllocator); + + if (NV_OK != serverInitGlobalSharePolicies(pServer)) + { + mapDestroy(&pServer->shareMap); + listDestroy(&pServer->defaultInheritedSharePolicyList); + listDestroy(&pServer->globalInternalSharePolicyList); + goto fail; + } + + return NV_OK; +fail: + +#if RS_STANDALONE + if (pServer->pResLock != NULL) + portSyncRwLockDestroy(pServer->pResLock); + + if (pServer->pTopLock != NULL) + portSyncRwLockDestroy(pServer->pTopLock); +#endif + + if (pServer->pClientListLock != NULL) + portSyncRwLockDestroy(pServer->pClientListLock); + + if (pServer->pShareMapLock != NULL) + portSyncSpinlockDestroy(pServer->pShareMapLock); + + if (pServer->pClientSortedList != NULL) + { + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + listDestroy(&pServer->pClientSortedList[i]); + } + PORT_FREE(pAllocator, pServer->pClientSortedList); + } + + if (pAllocator != NULL) + portMemAllocatorRelease(pAllocator); + + return NV_ERR_INSUFFICIENT_RESOURCES; +} + + +NV_STATUS +serverDestruct +( + RsServer *pServer +) +{ + NvU32 i; + RS_LOCK_INFO lockInfo; + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + + if (!pServer->bConstructed) + return NV_ERR_INVALID_OBJECT; + + for (i = 0; i < RS_CLIENT_HANDLE_BUCKET_COUNT; i++) + { + CLIENT_ENTRY **ppClientEntry; + NvHandle hClient = 0; + + while ((ppClientEntry = listHead(&pServer->pClientSortedList[i])) != NULL) + { + RS_RES_FREE_PARAMS_INTERNAL freeParams; + lockInfo.pClient = (*ppClientEntry)->pClient; + hClient = lockInfo.pClient->hClient; + serverInitFreeParams_Recursive(hClient, hClient, &lockInfo, &freeParams); + serverFreeResourceTree(pServer, &freeParams); + } + + listDestroy(&pServer->pClientSortedList[i]); + } + + PORT_FREE(pServer->pAllocator, pServer->pClientSortedList); + mapDestroy(&pServer->shareMap); + listDestroy(&pServer->defaultInheritedSharePolicyList); + listDestroy(&pServer->globalInternalSharePolicyList); + +#if RS_STANDALONE + portSyncRwLockDestroy(pServer->pResLock); + portSyncRwLockDestroy(pServer->pTopLock); +#endif + + portSyncSpinlockDestroy(pServer->pShareMapLock); + portSyncRwLockDestroy(pServer->pClientListLock); + + portMemAllocatorRelease(pServer->pAllocator); + + pServer->bConstructed = NV_FALSE; + + return NV_OK; +} + +static +NV_STATUS +_serverFreeClient_underlock +( + RsServer *pServer, + RsClient *pClient +) +{ + CLIENT_ENTRY *pClientEntry = NULL; + NvHandle hClient; + NV_STATUS status; + PORT_RWLOCK *pLock = NULL; + + status =_serverFindClientEntry(pServer, pClient->hClient, NV_FALSE, &pClientEntry); + if (status != NV_OK) + { + return status; + } + + NV_ASSERT(pClientEntry->pClient != NULL); + + hClient = pClient->hClient; + pClientEntry->pClient = NULL; + pClientEntry->hClient = 0; + + clientFreeAccessBackRefs(pClient, pServer); + + objDelete(pClient); + + listRemoveFirstByValue(&pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK], &pClientEntry); + pLock = pClientEntry->pLock; + + RS_RWLOCK_RELEASE_WRITE_EXT(pLock, &pClientEntry->lockVal, NV_TRUE); + portSyncRwLockDestroy(pLock); + PORT_FREE(pServer->pAllocator, pClientEntry); + + return NV_OK; +} + +NV_STATUS +serverAllocDomain +( + RsServer *pServer, + NvU32 hParentDomain, + ACCESS_CONTROL *pAccessControl, + NvHandle *phDomain +) +{ + return NV_OK; +} + +NV_STATUS +serverFreeDomain +( + RsServer *pServer, + NvHandle hDomain +) +{ + NvU32 bucket; + for (bucket = 0; bucket < RS_CLIENT_HANDLE_BUCKET_COUNT; bucket ++) + { + RsClientList *pClientList = &(pServer->pClientSortedList[bucket]); + CLIENT_ENTRY **ppClientEntry = listHead(pClientList); + while (ppClientEntry != NULL) + { + CLIENT_ENTRY *pClientEntry = *ppClientEntry; + RS_CLIENT_FREE_PARAMS params; + + portMemSet(¶ms, 0, sizeof(params)); + if (pClientEntry == NULL) + { + ppClientEntry = listNext(pClientList, ppClientEntry); + continue; + } + params.hClient = pClientEntry->hClient; + + serverFreeClient(pServer, ¶ms); + ppClientEntry = listHead(pClientList); + } + } + return NV_OK; +} + +NV_STATUS serverValidate +( + RsServer *pServer, + NvU32 hDomain, + NvHandle hClient +) +{ + return NV_OK; +} + +NV_STATUS +serverValidateAlloc +( + RsServer *pServer, + NvU32 hDomain, + NvU32 externalClassId +) +{ + // Placeholder for allocation validation + return NV_OK; +} + +NV_STATUS +serverAllocClient +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams +) +{ + NV_STATUS status; + NvHandle hClient = 0; + RsClient *pClient = NULL; + CLIENT_ENTRY *pClientEntry = NULL; + NvBool bLockedClient = NV_FALSE; + + if (!pServer->bConstructed) + { + status = NV_ERR_NOT_READY; + goto done; + } + + // RS-TODO Assert that the RW top lock is held + + hClient = pParams->hClient; +#if !(RS_COMPATABILITY_MODE) + if (hClient != 0) + { + // Fail if the server supplied a client id + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } +#endif + + status = _serverCreateEntryAndLockForNewClient(pServer, &hClient, !!(pParams->allocState & ALLOC_STATE_INTERNAL_CLIENT_HANDLE), &pClientEntry); + + if (status != NV_OK) + { + goto done; + } + pParams->hClient = hClient; + pParams->hResource = hClient; + bLockedClient = NV_TRUE; + + status = resservClientFactory(pServer->pAllocator, pParams, &pClient); + if (NV_OK != status) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + + pClientEntry->pClient = pClient; + + // Automatically allocate client proxy resource + status = clientAllocResource(pClient, pServer, pParams); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "Allocated hClient: %x\n", hClient); + portAtomicIncrementU32(&pServer->activeClientCount); + +done: + if (bLockedClient) + _serverUnlockClient(pServer, LOCK_ACCESS_WRITE, pParams->hClient); + + if ((status != NV_OK) && (status != NV_ERR_INSERT_DUPLICATE_NAME) && (hClient != 0)) + { + if (_serverFindClientEntry(pServer, hClient, NV_TRUE, &pClientEntry) == NV_OK) + { + listRemoveFirstByValue(&pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK], &pClientEntry); + portSyncRwLockDestroy(pClientEntry->pLock); + PORT_FREE(pServer->pAllocator, pClientEntry); + } + + if (pClient != NULL) + { + objDelete(pClient); + } + } + + return status; +} + +static +NV_STATUS +_serverFreeClient +( + RsServer *pServer, + RS_CLIENT_FREE_PARAMS *pParams +) +{ + NV_STATUS status; + NV_STATUS lockStatus; + NvU32 releaseFlags = 0; + RsClient *pClient; + + lockStatus = _serverLockClient(pServer, LOCK_ACCESS_WRITE, pParams->hClient, &pClient); + if (lockStatus != NV_OK) + { + status = NV_ERR_INVALID_CLIENT; + goto done; + } + releaseFlags |= RS_LOCK_RELEASE_CLIENT_LOCK; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pResFreeParams->pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverFreeClient_underlock(pServer, pClient); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "Freeing hClient: %x\n", hClient); + portAtomicDecrementU32(&pServer->activeClientCount); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pResFreeParams->pLockInfo, &releaseFlags); + + if (releaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + _serverUnlockClient(pServer, LOCK_ACCESS_WRITE, pParams->hClient); + + return status; +} + +NV_STATUS +serverAllocResource +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS *pParams +) +{ + NV_STATUS status; + NvU32 releaseFlags = 0; + API_STATE *pApiState; + NvBool bClientAlloc = (pParams->externalClassId == NV01_ROOT || + pParams->externalClassId == NV01_ROOT_CLIENT || + pParams->externalClassId == NV01_ROOT_NON_PRIV); + LOCK_ACCESS_TYPE topLockAccess; + NvU32 initialLockState; + RS_LOCK_INFO *pLockInfo; + RsClient *pSecondClient = NULL; + NvHandle hSecondClient; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + initialLockState = pLockInfo->state; + + status = serverAllocApiCopyIn(pServer, pParams, &pApiState); + if (status != NV_OK) + return status; + + status = serverAllocResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + if ((status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags)) != NV_OK) + goto done; + + if (status == NV_OK) + { + if (bClientAlloc) + { + status = serverAllocClient(pServer, pParams); + } + else + { + status = serverLookupSecondClient(pParams, &hSecondClient); + + if (status != NV_OK) + goto done; + + if (hSecondClient == 0) + { + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pParams->hClient, pLockInfo, + &releaseFlags, &pParams->pClient); + + if (status != NV_OK) + goto done; + + if (!pParams->pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + } + else + { + status = _serverLockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pParams->hClient, hSecondClient, + pLockInfo, &releaseFlags, + &pParams->pClient, &pSecondClient); + + if (status != NV_OK) + goto done; + + if (!pParams->pClient->bActive || !pSecondClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + } + + // The second client's usage is class-dependent and should be validated + // by the class's constructor + status = clientValidate(pParams->pClient, pParams->pSecInfo); + + if (status != NV_OK) + goto done; + + status = serverAllocResourceUnderLock(pServer, pParams); + } + } + + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, + "hParent 0x%08x : hClass 0x%08x allocation failed\n", + pParams->hParent, pParams->externalClassId); + } + + // RS-TODO: Can this be moved before _ResLock? + status = serverAllocEpilogue_WAR(pServer, status, bClientAlloc, pParams); + +done: + + if (!bClientAlloc) + { + if (pSecondClient != NULL) + { + _serverUnlockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + pParams->hClient, pSecondClient->hClient, + pLockInfo, &releaseFlags); + } + else + { + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, + pLockInfo, &releaseFlags); + } + } + + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + // copyout as needed, being careful not to overwrite a useful status value + status = serverAllocApiCopyOut(pServer, status, pApiState); + + NV_ASSERT(pLockInfo->state == initialLockState); + + return status; +} + +#if RS_STANDALONE +// RS-TODO rename to UnderClientLock +NV_STATUS +serverAllocResourceUnderLock +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS *pParams +) +{ + NV_STATUS status; + RsClient *pClient = pParams->pClient; + NvHandle hResource = pParams->hResource; + NvU32 releaseFlags = 0; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientAssignResourceHandle(pClient, &hResource); + if (status != NV_OK) + goto done; + + pParams->hResource = hResource; + pParams->hParent = (pParams->hParent == 0) ? pParams->hClient : pParams->hParent; + status = clientAllocResource(pClient, pServer, pParams); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Allocated hResource %x with class %x\n", + // pParams->hClient, pParams->hResource, pParams->externalClassId); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags); + return status; +} +#endif + +NV_STATUS +clientUpdatePendingFreeList_IMPL +( + RsClient *pClient, + RsResourceRef *pTargetRef, + RsResourceRef *pReference, + NvBool bMove +) +{ + RsIndexIter it; + NvBool bInList = refPendingFree(pTargetRef, pClient); + RS_FREE_STACK *pFs = pClient->pFreeStack; + if (bMove) + { + if (pReference != pTargetRef) + { + // Basic circular dependency check + while (pFs != NULL) + { + RsResourceRef *pFsRef = pFs->pResourceRef; + NV_ASSERT_OR_GOTO(pFsRef != pTargetRef, done); + + pFs = pFs->pPrev; + } + } + + if (bInList) + listRemove(&pClient->pendingFreeList, pTargetRef); + listPrependExisting(&pClient->pendingFreeList, pTargetRef); + } + else if (!bInList) + { + listPrependExisting(&pClient->pendingFreeList, pTargetRef); + } + + // + // Recursively add children to the pending free list and move + // them to the front of the list + // + it = indexRefIterAll(&pTargetRef->childRefMap); + while (indexRefIterNext(&it)) + { + clientUpdatePendingFreeList(pClient, *it.pValue, pReference, NV_TRUE); + } + + // + // Recursively add dependencies to the pending free list and + // move them to the front of the list + // + it = indexRefIterAll(&pTargetRef->depRefMap); + while (indexRefIterNext(&it)) + { + clientUpdatePendingFreeList(pClient, *it.pValue, pReference, NV_TRUE); + } + + if (pTargetRef->pResource != NULL) + { + // Allow some objects to add more dependants here + resAddAdditionalDependants(pClient, pTargetRef->pResource, pReference); + } + +done: + return NV_OK; +} + +NV_STATUS +serverFreeClientList +( + RsServer *pServer, + NvHandle *phClientList, + NvU32 numClients, + NvU32 freeState, + API_SECURITY_INFO *pSecInfo +) +{ + NvU32 i, j; + + // + // Call serverFreeClient twice; first for high priority resources + // then again for remaining resources + // + for (i = 0; i < 2; ++i) + { + for (j = 0; j < numClients; ++j) + { + RS_CLIENT_FREE_PARAMS params; + portMemSet(¶ms, 0, sizeof(params)); + + if (phClientList[j] == 0) + continue; + + params.hClient = phClientList[j]; + params.bHiPriOnly = (i == 0); + params.state = freeState; + params.pSecInfo = pSecInfo; + + serverFreeClient(pServer, ¶ms); + } + } + + return NV_OK; +} + +NV_STATUS +serverFreeResourceTree +( + RsServer *pServer, + RS_RES_FREE_PARAMS *pParams +) +{ + RsClient *pClient = NULL; + NV_STATUS status; + RsResourceRef *pResourceRef = NULL; + RsResourceRef *pTargetRef; + RsResourceRef *pFirstLowPriRef; + NvBool bHiPriOnly = pParams->bHiPriOnly; + NvBool bRecursive = NV_FALSE; + RS_FREE_STACK freeStack; + NvBool bPopFreeStack = NV_FALSE; + RS_LOCK_INFO *pLockInfo; + NvU32 initialLockState; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE topLockAccess; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + initialLockState = pLockInfo->state; + + portMemSet(&freeStack, 0, sizeof(freeStack)); + + status = serverFreeResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + if (pClient->pFreeStack != NULL) + freeStack.pPrev = pClient->pFreeStack; + pClient->pFreeStack = &freeStack; + bPopFreeStack = NV_TRUE; + + status = clientGetResourceRef(pClient, pParams->hResource, &pResourceRef); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_ERROR, "hObject 0x%x not found for client 0x%x\n", + pParams->hResource, + pParams->hClient); +#if (RS_COMPATABILITY_MODE) + status = NV_OK; +#endif + goto done; + } + pParams->pResourceRef = pResourceRef; + freeStack.pResourceRef = pResourceRef; + + if (pParams->bInvalidateOnly && pResourceRef->bInvalidated) + { + status = NV_OK; + goto done; + } + + bRecursive = (freeStack.pPrev != NULL); + status = clientUpdatePendingFreeList(pClient, pResourceRef, pResourceRef, bRecursive); + if (status != NV_OK) + goto done; + + clientPostProcessPendingFreeList(pClient, &pFirstLowPriRef); + + if (pServer->bDebugFreeList) + { + NV_PRINTF(LEVEL_INFO, "PENDING FREE LIST START (0x%x)\n", pClient->hClient); + NV_PRINTF(LEVEL_INFO, " _HI_PRIORITY_:\n"); + pTargetRef = listHead(&pClient->pendingFreeList); + while (pTargetRef != NULL) + { + if (pTargetRef == pFirstLowPriRef) + NV_PRINTF(LEVEL_INFO, " _LO_PRIORITY_:\n"); + + NV_PRINTF(LEVEL_INFO, " 0x%08x [%04x]\n", + pTargetRef->hResource, + pTargetRef->externalClassId); + pTargetRef = listNext(&pClient->pendingFreeList, pTargetRef); + } + NV_PRINTF(LEVEL_INFO, "PENDING FREE LIST END (0x%x)\n", pClient->hClient); + } + + while ((pTargetRef = listHead(&pClient->pendingFreeList)) != NULL) + { + NvBool bInvalidateOnly = NV_TRUE; + RS_FREE_STACK *pFs = &freeStack; + RS_RES_FREE_PARAMS_INTERNAL freeParams; + NvHandle hTarget = pTargetRef->hResource; + + if (bHiPriOnly && pTargetRef == pFirstLowPriRef) + goto done; + + if (pServer->bDebugFreeList) + { + NV_PRINTF(LEVEL_INFO, "(%08x, %08x)\n", pClient->hClient, hTarget); + } + + if (hTarget == pParams->hResource) + { + // Target resource should always be the last one to be freed + NV_ASSERT((listCount(&pClient->pendingFreeList) == 1) || bRecursive); + status = serverFreeResourceTreeUnderLock(pServer, pParams); + break; + } + + while (pFs != NULL) + { + RsResourceRef *pFsRef = pFs->pResourceRef; + if (refHasAncestor(pTargetRef, pFsRef)) + { + bInvalidateOnly = pParams->bInvalidateOnly; + break; + } + pFs = pFs->pPrev; + } + + serverInitFreeParams_Recursive(pClient->hClient, hTarget, pLockInfo, &freeParams); + freeParams.pResourceRef = pTargetRef; + freeParams.bInvalidateOnly = bInvalidateOnly; + freeParams.pSecInfo = pParams->pSecInfo; + status = serverFreeResourceTreeUnderLock(pServer, &freeParams); + NV_ASSERT(status == NV_OK); + + if (pServer->bDebugFreeList) + { + NV_PRINTF(LEVEL_INFO, "(%08x, %08x) status=0x%x\n", + pClient->hClient, + hTarget, + status); + } + } + + if (bPopFreeStack) + { + if (pClient != NULL) + pClient->pFreeStack = freeStack.pPrev; + bPopFreeStack = NV_FALSE; + } + + if (pParams->hClient == pParams->hResource) + { + pClient->bActive = NV_FALSE; + } + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + + if (pParams->hClient == pParams->hResource) + { + NvBool bReAcquireLock = (topLockAccess != LOCK_ACCESS_WRITE); + RS_CLIENT_FREE_PARAMS_INTERNAL clientFreeParams; + portMemSet(&clientFreeParams, 0, sizeof(clientFreeParams)); + clientFreeParams.pResFreeParams = pParams; + clientFreeParams.hClient = pParams->hClient; + + if (bReAcquireLock) + { + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + serverTopLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + _serverFreeClient(pServer, &clientFreeParams); + serverTopLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + initialLockState &= ~RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + } + else + { + _serverFreeClient(pServer, &clientFreeParams); + } + + pClient = NULL; + } + +done: + if (bPopFreeStack) + { + if (pClient != NULL) + pClient->pFreeStack = freeStack.pPrev; + bPopFreeStack = NV_FALSE; + } + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + // + // Log any changes to lock state, but ignore the ALLOW_RECURSIVE_LOCKS flag + // as that can be set by serverUpdateLockFlagsForFree() when dealing with + // RPCs to GSP; this would have already printed the relevant message. + // + NV_ASSERT((pLockInfo->state == initialLockState) || + (pLockInfo->state == (initialLockState | RS_LOCK_STATE_ALLOW_RECURSIVE_RES_LOCK))); + + return status; +} + +NV_STATUS +serverControl +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS *pParams +) +{ + NV_STATUS status; + RsClient *pClient; + RsResourceRef *pResourceRef = NULL; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + LOCK_ACCESS_TYPE access = LOCK_ACCESS_WRITE; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + status = serverControlLookupLockFlags(pServer, RS_LOCK_TOP, pParams, pParams->pCookie, &access); + if (status != NV_OK) + goto done; + + if (pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyIn(pServer, pParams, pParams->pCookie); + if (status != NV_OK) + goto done; + } + + status = serverTopLock_Prologue(pServer, access, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + if (!pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClient, &pParams->secInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hObject, &pResourceRef); + if (status != NV_OK) + goto done; + pParams->pResourceRef = pResourceRef; + + if (pResourceRef->bInvalidated || pResourceRef->pResource == NULL) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + pLockInfo->flags |= RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK; + + status = serverSessionLock_Prologue(LOCK_ACCESS_WRITE, pResourceRef, + pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + if (pResourceRef->pSession != NULL) + { + if (!pResourceRef->pSession->bValid) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + } + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pResourceRef = pResourceRef; + callContext.pClient = pClient; + callContext.secInfo = pParams->secInfo; + callContext.pServer = pServer; + callContext.pControlParams = pParams; + callContext.pLockInfo = pParams->pLockInfo; + + // RS-TODO removeme + pParams->pLegacyParams = pParams; + + if (pParams->hClient == pParams->hObject) + { + pParams->hParent = pParams->hClient; + } + else + { + pParams->hParent = pResourceRef->pParentRef->hResource; + } + pLockInfo->pContextRef = pResourceRef->pParentRef; + + resservSwapTlsCallContext(&pOldContext, &callContext); + status = resControl(pResourceRef->pResource, &callContext, pParams); + resservRestoreTlsCallContext(pOldContext); + +done: + + serverSessionLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, access, pLockInfo, &releaseFlags); + + if (pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyOut(pServer, pParams, pParams->pCookie, status); + } + + return status; +} + +NV_STATUS +serverCopyResource +( + RsServer *pServer, + RS_RES_DUP_PARAMS *pParams +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + RsClient *pClientSrc; + RsClient *pClientDst; + RsResourceRef *pResourceRefSrc; + LOCK_ACCESS_TYPE topLockAccess; + + NvHandle hClientSrc = pParams->hClientSrc; + NvHandle hClientDst = pParams->hClientDst; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + status = serverCopyResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + hClientSrc, hClientDst, + pLockInfo, &releaseFlags, + &pClientSrc, &pClientDst); + if (status != NV_OK) + goto done; + + if (!pClientSrc->bActive || !pClientDst->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClientDst, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClientSrc, pParams->hResourceSrc, &pResourceRefSrc); + if (status != NV_OK) + goto done; + + if (pResourceRefSrc->bInvalidated) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + if (!resCanCopy(pResourceRefSrc->pResource)) + { + status = NV_ERR_INVALID_ARGUMENT; + goto done; + } + + status = clientAssignResourceHandle(pClientDst, &pParams->hResourceDst); + if (status != NV_OK) + goto done; + + pParams->pSrcClient = pClientSrc; + pParams->pSrcRef = pResourceRefSrc; + + status = serverUpdateLockFlagsForCopy(pServer, pParams); + if (status != NV_OK) + return status; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientCopyResource(pClientDst, pServer, pParams); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Copied hResource: %x from hClientSrc: %x hResourceSrc: %x\n", + // hClientDst, hResourceDst, hClientSrc, hResourceSrc); + +done: + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pParams->pLockInfo, &releaseFlags); + + _serverUnlockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + hClientSrc, hClientDst, + pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +/** + * Special case of serverShareResourceAccess for sharing with a specific client + * Requires two client locks, so separated into a different function from the normal + * @param[in] pServer + * @param[in] pParams Parameters passed into share function + */ +static NV_STATUS +_serverShareResourceAccessClient +( + RsServer *pServer, + RS_RES_SHARE_PARAMS *pParams +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + RsClient *pClientOwner; + RsClient *pClientTarget; + RsResourceRef *pResourceRef; + LOCK_ACCESS_TYPE topLockAccess; + + NvHandle hClientOwner = pParams->hClient; + NvHandle hClientTarget = pParams->pSharePolicy->target; + + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + status = serverShareResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + hClientOwner, hClientTarget, + pLockInfo, &releaseFlags, + &pClientOwner, &pClientTarget); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClientOwner, pParams->hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + if (pResourceRef->bInvalidated) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClientOwner; + callContext.pResourceRef = pResourceRef; + callContext.secInfo = *pParams->pSecInfo; + callContext.pLockInfo = pParams->pLockInfo; + resservSwapTlsCallContext(&pOldContext, &callContext); + + if (hClientOwner == hClientTarget) + { + // + // Special case: RS_SHARE_TYPE_CLIENT with own client + // Allows the caller to directly modify the access map of their object + // + status = clientShareResourceTargetClient(pClientOwner, pResourceRef, pParams->pSharePolicy, &callContext); + if (status != NV_OK) + goto restore_context; + } + + // Add backref into pClientTarget to prevent stale client handles + status = clientAddAccessBackRef(pClientTarget, pResourceRef); + if (status != NV_OK) + goto restore_context; + + status = clientShareResource(pClientOwner, pResourceRef, pParams->pSharePolicy, &callContext); + if (status != NV_OK) + goto restore_context; + +restore_context: + resservRestoreTlsCallContext(pOldContext); + + // NV_PRINTF(LEVEL_INFO, "hClientOwner %x: Shared hResource: %x with hClientTarget: %x\n", + // hClientOwner, pParams->hResource, hClientTarget); + +done: + _serverUnlockDualClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, + hClientOwner, hClientTarget, + pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + + +NV_STATUS +serverShareResourceAccess +( + RsServer *pServer, + RS_RES_SHARE_PARAMS *pParams +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + RsClient *pClient; + RsResourceRef *pResourceRef; + NvU16 shareType; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + LOCK_ACCESS_TYPE topLockAccess; + + if (!pServer->bConstructed) + return NV_ERR_NOT_READY; + + if (!pServer->bRsAccessEnabled) + return NV_ERR_FEATURE_NOT_ENABLED; + + if (pParams->pSharePolicy == NULL) + return NV_ERR_INVALID_ARGUMENT; + + shareType = pParams->pSharePolicy->type; + if (shareType >= RS_SHARE_TYPE_MAX) + return NV_ERR_INVALID_ARGUMENT; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + if (shareType == RS_SHARE_TYPE_CLIENT) + { + // Special case: This requires two locks, so it has its own function + return _serverShareResourceAccessClient(pServer, pParams); + } + + status = serverShareResourceLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + if (pResourceRef->bInvalidated) + { + status = NV_ERR_RESOURCE_LOST; + goto done; + } + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.secInfo = *pParams->pSecInfo; + callContext.pLockInfo = pParams->pLockInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + status = clientShareResource(pClient, pResourceRef, pParams->pSharePolicy, &callContext); + resservRestoreTlsCallContext(pOldContext); + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Shared hResource: %x\n", hClient, pParams->hResource); + +done: + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverMap +( + RsServer *pServer, + NvHandle hClient, + NvHandle hResource, + RS_CPU_MAP_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_INVALID_STATE; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + RsClient *pClient; + RsResourceRef *pResourceRef; + RsResourceRef *pContextRef = NULL; + RsResource *pResource; + RsCpuMapping *pCpuMapping = NULL; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE topLockAccess = LOCK_ACCESS_WRITE; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_GOTO(pLockInfo != NULL, done); + + status = serverMapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, hClient, pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + if (!pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + pResource = pResourceRef->pResource; + if (pResource == NULL) + { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + status = serverMap_Prologue(pServer, pParams); + if (status != NV_OK) + goto done; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + if (pParams->hContext != 0) + { + status = clientGetResourceRef(pClient, pParams->hContext, &pContextRef); + if (status != NV_OK) + { + NV_PRINTF(LEVEL_INFO, "hClient %x: Cannot find hContext: 0x%x\n", pClient->hClient, pParams->hContext); + goto done; + } + } + + status = refAddMapping(pResourceRef, pParams, pContextRef, &pCpuMapping); + if (status != NV_OK) + goto done; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pClient = pClient; + callContext.pResourceRef = pResourceRef; + callContext.pLockInfo = pParams->pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + status = resMap(pResource, &callContext, pParams, pCpuMapping); + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + goto done; + + // NV_PRINTF(LEVEL_INFO, "hClient %x: Mapped hResource: 0x%x hContext: %x at addr: " NvP64_fmt "\n", + // hClient, hResource, pParams->hContext, pCpuMapping->pAddress); + + if (pParams->ppCpuVirtAddr != NULL) + *pParams->ppCpuVirtAddr = pCpuMapping->pLinearAddress; + +done: + if (status != NV_OK) + { + if (pCpuMapping != NULL) + refRemoveMapping(pResourceRef, pCpuMapping); + } + + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverUnmap +( + RsServer *pServer, + NvHandle hClient, + NvHandle hResource, + RS_CPU_UNMAP_PARAMS *pParams +) +{ + NV_STATUS status = NV_ERR_INVALID_STATE; + RsClient *pClient; + RsResourceRef *pResourceRef; + RsResource *pResource; + RsCpuMapping *pCpuMapping; + RS_LOCK_INFO *pLockInfo; + NvU32 releaseFlags = 0; + LOCK_ACCESS_TYPE topLockAccess = LOCK_ACCESS_WRITE; + + pLockInfo = pParams->pLockInfo; + NV_ASSERT_OR_GOTO(pLockInfo != NULL, done); + + status = serverUnmapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, hClient, pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, hResource, &pResourceRef); + if (status != NV_OK) + goto done; + + pResource = pResourceRef->pResource; + if (pResource == NULL) + { + status = NV_ERR_NOT_SUPPORTED; + goto done; + } + + status = serverUnmap_Prologue(pServer, pParams); + if (status != NV_OK) + goto done; + + status = refFindCpuMappingWithFilter(pResourceRef, + pParams->pLinearAddress, + pParams->fnFilter, + &pCpuMapping); + if (status != NV_OK) + goto done; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientUnmapMemory(pClient, pResourceRef, pLockInfo, &pCpuMapping, pParams->pSecInfo); + +done: + serverUnmap_Epilogue(pServer, pParams); + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverInterMap +( + RsServer *pServer, + RS_INTER_MAP_PARAMS *pParams +) +{ + RsClient *pClient; + RsResourceRef *pMapperRef; + RsResourceRef *pMappableRef; + RsResourceRef *pContextRef; + RsInterMapping *pMapping = NULL; + LOCK_ACCESS_TYPE topLockAccess; + + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + NvBool bRestoreCallContext = NV_FALSE; + + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + if (pParams->length == 0) + return NV_ERR_INVALID_LIMIT; + + status = serverInterMapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, + pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + if (!pClient->bActive) + { + status = NV_ERR_INVALID_STATE; + goto done; + } + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hMapper, &pMapperRef); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hMappable, &pMappableRef); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hDevice, &pContextRef); + if (status != NV_OK) + goto done; + + pLockInfo->pContextRef = pContextRef; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pMapperRef; + callContext.pContextRef = pContextRef; + callContext.pLockInfo = pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + resservSwapTlsCallContext(&pOldContext, &callContext); + bRestoreCallContext = NV_TRUE; + + status = refAddInterMapping(pMapperRef, pMappableRef, pContextRef, &pMapping); + if (status != NV_OK) + goto done; + + // serverResLock_Prologue should be called during serverInterMap_Prologue + status = serverInterMap_Prologue(pServer, pMapperRef, pMappableRef, pParams, &releaseFlags); + if (status != NV_OK) + goto done; + + status = clientInterMap(pClient, pMapperRef, pMappableRef, pParams); + if (status != NV_OK) + goto done; + + pMapping->flags = pParams->flags; + pMapping->dmaOffset = pParams->dmaOffset; + pMapping->pMemDesc = pParams->pMemDesc; + +done: + serverInterMap_Epilogue(pServer, pParams, &releaseFlags); + + if (bRestoreCallContext) + resservRestoreTlsCallContext(pOldContext); + + if (status != NV_OK) + { + if (pMapping != NULL) + refRemoveInterMapping(pMapperRef, pMapping); + } + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverInterUnmap +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pParams +) +{ + RsClient *pClient; + RsResourceRef *pMapperRef; + RsResourceRef *pMappableRef; + RsResourceRef *pContextRef; + RsInterMapping *pMapping; + LOCK_ACCESS_TYPE topLockAccess; + + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + NvU32 releaseFlags = 0; + CALL_CONTEXT callContext; + CALL_CONTEXT *pOldContext = NULL; + NvBool bRestoreCallContext = NV_FALSE; + + NV_ASSERT_OR_RETURN(pLockInfo != NULL, NV_ERR_INVALID_ARGUMENT); + + status = serverInterUnmapLookupLockFlags(pServer, RS_LOCK_TOP, pParams, &topLockAccess); + if (status != NV_OK) + goto done; + + status = serverTopLock_Prologue(pServer, topLockAccess, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = _serverLockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, + pLockInfo, &releaseFlags, &pClient); + if (status != NV_OK) + goto done; + + status = clientValidate(pClient, pParams->pSecInfo); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hMapper, &pMapperRef); + if (status != NV_OK) + goto done; + + if ((pMapperRef->bInvalidated) && (pMapperRef->pResource == NULL)) + { + // Object has already been freed and unmapped + goto done; + } + + status = clientGetResourceRef(pClient, pParams->hMappable, &pMappableRef); + if (status != NV_OK) + goto done; + + status = clientGetResourceRef(pClient, pParams->hDevice, &pContextRef); + if (status != NV_OK) + goto done; + + status = refFindInterMapping(pMapperRef, pMappableRef, pContextRef, pParams->dmaOffset, &pMapping); + if (status != NV_OK) + goto done; + + portMemSet(&callContext, 0, sizeof(callContext)); + callContext.pServer = pServer; + callContext.pClient = pClient; + callContext.pResourceRef = pMapperRef; + callContext.pContextRef = pContextRef; + callContext.pLockInfo = pLockInfo; + + // Some MODS tests don't set secInfo. + if (pParams->pSecInfo != NULL) + callContext.secInfo = *pParams->pSecInfo; + + if (pLockInfo->pContextRef == NULL) + pLockInfo->pContextRef = pContextRef; + + resservSwapTlsCallContext(&pOldContext, &callContext); + bRestoreCallContext = NV_TRUE; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + if (status != NV_OK) + goto done; + + status = serverInterUnmap_Prologue(pServer, pParams); + if (status != NV_OK) + goto done; + + clientInterUnmap(pClient, pMapperRef, pParams); + + refRemoveInterMapping(pMapperRef, pMapping); + +done: + serverInterUnmap_Epilogue(pServer, pParams); + + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pLockInfo, &releaseFlags); + + if (bRestoreCallContext) + resservRestoreTlsCallContext(pOldContext); + + _serverUnlockClientWithLockInfo(pServer, LOCK_ACCESS_WRITE, pParams->hClient, pLockInfo, &releaseFlags); + serverTopLock_Epilogue(pServer, topLockAccess, pLockInfo, &releaseFlags); + + return status; +} + +NV_STATUS +serverAcquireClient +( + RsServer *pServer, + NvHandle hClient, + LOCK_ACCESS_TYPE lockAccess, + RsClient **ppClient +) +{ + NV_STATUS status; + RsClient *pClient; + + // NV_PRINTF(LEVEL_INFO, "Acquiring hClient %x\n", hClient); + status = _serverLockClient(pServer, lockAccess, hClient, &pClient); + if (status != NV_OK) + return status; + + if (ppClient != NULL) + *ppClient = pClient; + + return NV_OK; +} + +NV_STATUS +serverGetClientUnderLock +( + RsServer *pServer, + NvHandle hClient, + RsClient **ppClient +) +{ + NV_STATUS status; + RsClient *pClient; + + // NV_PRINTF(LEVEL_INFO, "Acquiring hClient %x (without lock)\n", hClient); + status = _serverFindClient(pServer, hClient, &pClient); + if (status != NV_OK) + { + return status; + } + + if (ppClient != NULL) + *ppClient = pClient; + + return NV_OK; +} + +NV_STATUS +serverReleaseClient +( + RsServer *pServer, + LOCK_ACCESS_TYPE lockAccess, + RsClient *pClient +) +{ + NV_STATUS status; + status = _serverUnlockClient(pServer, lockAccess, pClient->hClient); + return status; +} + +static +NV_STATUS +_serverFindClientEntry +( + RsServer *pServer, + NvHandle hClient, + NvBool bFindPartial, + CLIENT_ENTRY **ppClientEntry +) +{ + RsClientList *pClientList = &(pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK]); + CLIENT_ENTRY **ppClientEntryLoop = listHead(pClientList); + + if (ppClientEntry != NULL) + *ppClientEntry = NULL; + + while (ppClientEntryLoop != NULL) + { + CLIENT_ENTRY *pClientEntry = *ppClientEntryLoop; + ppClientEntryLoop = listNext(pClientList, ppClientEntryLoop); + if (pClientEntry == NULL) + { + continue; + } + else if (pClientEntry->hClient == hClient) + { + // Client may not have finished constructing yet + if (pClientEntry->pClient == NULL && !bFindPartial) + return NV_ERR_INVALID_OBJECT_HANDLE; + + if (ppClientEntry != NULL) + *ppClientEntry = pClientEntry; + + return NV_OK; + } + else if (pClientEntry->hClient > hClient) + { + // Not found in sorted list + return NV_ERR_INVALID_OBJECT; + } + } + + return NV_ERR_INVALID_OBJECT_HANDLE; +} + +static +NV_STATUS +_serverFindClient +( + RsServer *pServer, + NvHandle hClient, + RsClient **ppClient +) +{ + CLIENT_ENTRY *pClientEntry; + NV_STATUS status; + status =_serverFindClientEntry(pServer, hClient, NV_FALSE, &pClientEntry); + if (status != NV_OK) + { + return status; + } + + *ppClient = pClientEntry->pClient; + return NV_OK; +} + +static +NV_STATUS +_serverInsertClientEntry +( + RsServer *pServer, + CLIENT_ENTRY *pClientEntry, + CLIENT_ENTRY **ppClientNext +) +{ + RsClientList *pClientList; + CLIENT_ENTRY **ppClientEntry; + NvHandle hClient = pClientEntry->hClient; + + if (hClient == 0) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + pClientList = &(pServer->pClientSortedList[hClient & RS_CLIENT_HANDLE_BUCKET_MASK]); + + if (ppClientNext == NULL) + { + ppClientEntry = (CLIENT_ENTRY **)listAppendNew(pClientList); + } + else + { + ppClientEntry = (CLIENT_ENTRY **)listInsertNew(pClientList, ppClientNext); + } + *ppClientEntry = pClientEntry; + + return NV_OK; +} + +static +NV_STATUS +_serverFindNextAvailableClientHandleInBucket +( + RsServer *pServer, + NvHandle hClientIn, + NvHandle *phClientOut, + CLIENT_ENTRY ***pppClientNext +) +{ + NvHandle hPrefixIn, hPrefixOut; + RsClientList *pClientList = &(pServer->pClientSortedList[hClientIn & RS_CLIENT_HANDLE_BUCKET_MASK]); + NvHandle hClientOut = hClientIn; + CLIENT_ENTRY **ppClientEntry = listHead(pClientList); + + *pppClientNext = NULL; + if (ppClientEntry == NULL) + { + *phClientOut = hClientOut; + return NV_OK; + } + + // + // The list is ordered by increased client handles + // We need to find a value to insert or change the handle + // + while (ppClientEntry != NULL) + { + CLIENT_ENTRY *pClientEntry = *ppClientEntry; + if ((pClientEntry == NULL) || (pClientEntry->hClient < hClientOut)) + { + ppClientEntry = listNext(pClientList, ppClientEntry); + continue; + } + else if (pClientEntry->hClient == hClientOut) + { + // Increase client handle by one unit in same bucket + hClientOut = hClientOut + RS_CLIENT_HANDLE_BUCKET_COUNT; + NV_ASSERT((hClientIn & RS_CLIENT_HANDLE_BUCKET_MASK) == (hClientOut & RS_CLIENT_HANDLE_BUCKET_MASK)); + } + else // last pClientEntry->hClient > hClientOut + { + break; + } + ppClientEntry = listNext(pClientList, ppClientEntry); + } + + hPrefixIn = hClientIn & ~RS_CLIENT_HANDLE_DECODE_MASK; + hPrefixOut = hClientOut & ~RS_CLIENT_HANDLE_DECODE_MASK; + if (hPrefixIn != hPrefixOut) + return NV_ERR_INSUFFICIENT_RESOURCES; + + *phClientOut = hClientOut; + if (ppClientEntry != NULL) + { + *pppClientNext = ppClientEntry; + } + return NV_OK; +} + + +static +NV_STATUS +_serverCreateEntryAndLockForNewClient +( + RsServer *pServer, + NvHandle *phClient, + NvBool bInternalHandle, + CLIENT_ENTRY **ppClientEntry +) +{ + CLIENT_ENTRY *pClientEntry; + NV_STATUS status = NV_OK; + NvHandle hClient = *phClient; + CLIENT_ENTRY **ppClientNext = 0; + PORT_RWLOCK *pLock=NULL; + + if (hClient == 0) + { + NvU32 clientHandleIndex = pServer->clientCurrentHandleIndex; + NvU16 clientHandleBucketInit = clientHandleIndex & RS_CLIENT_HANDLE_BUCKET_MASK; + do + { + hClient = bInternalHandle + ? CLIENT_ENCODEHANDLE_INTERNAL(pServer->internalHandleBase, clientHandleIndex) + : CLIENT_ENCODEHANDLE(clientHandleIndex); + + clientHandleIndex++; + if (clientHandleIndex > RS_CLIENT_HANDLE_DECODE_MASK) + { + // We will override the client base, loop over + clientHandleIndex = 0; + } + if (clientHandleBucketInit == (clientHandleIndex & RS_CLIENT_HANDLE_BUCKET_MASK)) + { + // We looked through all buckets and we did not find any available client (very unlikely) + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto _serverCreateEntryAndLockForNewClient_exit; + } + } + while (_serverFindNextAvailableClientHandleInBucket(pServer, hClient, &hClient, &ppClientNext) != NV_OK); + + pServer->clientCurrentHandleIndex = clientHandleIndex; + } + else + { + NvHandle hClientOut = 0; + +#if !(RS_COMPATABILITY_MODE) + // Re-encode handle so it matches expected format + NvU32 clientIndex = CLIENT_DECODEHANDLE(hClient); + hClient = bInternalHandle + ? CLIENT_ENCODEHANDLE_INTERNAL(clientIndex) + : CLIENT_ENCODEHANDLE(clientIndex); +#endif + + if (_serverFindClientEntry(pServer, hClient, NV_FALSE, NULL) == NV_OK) + { + // The handle already exists + status = NV_ERR_INSERT_DUPLICATE_NAME; + goto _serverCreateEntryAndLockForNewClient_exit; + } + status = _serverFindNextAvailableClientHandleInBucket(pServer, hClient, &hClientOut, &ppClientNext); + if (status != NV_OK) + { + goto _serverCreateEntryAndLockForNewClient_exit; + } + if (hClient != hClientOut) + { + // This should not happen as we checked for duplicates already + NV_PRINTF(LEVEL_ERROR, "Client handle mismatch: %x != %x.\n", hClient, hClientOut); + status = NV_ERR_INVALID_STATE; + goto _serverCreateEntryAndLockForNewClient_exit; + } + } + + pLock = portSyncRwLockCreate(pServer->pAllocator); + if (pLock == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto _serverCreateEntryAndLockForNewClient_exit; + } + + // At this point we have a hClient, we know in which bucket and where in the bucket to insert the entry. + pClientEntry = (CLIENT_ENTRY *)PORT_ALLOC(pServer->pAllocator, sizeof(CLIENT_ENTRY)); + if (pClientEntry == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto _serverCreateEntryAndLockForNewClient_exit; + } + portMemSet(pClientEntry, 0, sizeof(*pClientEntry)); + + pClientEntry->hClient = hClient; + pClientEntry->pLock = pLock; + + + RS_LOCK_VALIDATOR_INIT(&pClientEntry->lockVal, + bInternalHandle ? LOCK_VAL_LOCK_CLASS_CLIENT_INTERNAL : LOCK_VAL_LOCK_CLASS_CLIENT, + hClient); + + status = _serverInsertClientEntry(pServer, pClientEntry, ppClientNext); + if (status != NV_OK) + { + PORT_FREE(pServer->pAllocator, pClientEntry); + goto _serverCreateEntryAndLockForNewClient_exit; + } + + RS_RWLOCK_ACQUIRE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + pClientEntry->lockOwnerTid = portThreadGetCurrentThreadId(); + + *phClient = hClient; + *ppClientEntry = pClientEntry; + +_serverCreateEntryAndLockForNewClient_exit: + if (status != NV_OK && pLock != NULL) + portSyncRwLockDestroy(pLock); + + return status; +} + + +static +NV_STATUS +_serverLockClient +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient, + RsClient **ppClient +) +{ + RsClient *pClient; + CLIENT_ENTRY *pClientEntry = NULL; + NV_STATUS status = NV_OK; + + status =_serverFindClientEntry(pServer, hClient, NV_FALSE, &pClientEntry); + if (status != NV_OK) + { + return status; + } + + nv_speculation_barrier(); + + if (pClientEntry->pLock == NULL) + { + return NV_ERR_INVALID_OBJECT_HANDLE; + } + + if (access == LOCK_ACCESS_READ) + { + RS_RWLOCK_ACQUIRE_READ(pClientEntry->pLock, &pClientEntry->lockVal); + } + else + { + RS_RWLOCK_ACQUIRE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + pClientEntry->lockOwnerTid = portThreadGetCurrentThreadId(); + } + + pClient = pClientEntry->pClient; + NV_ASSERT(pClient->hClient == pClientEntry->hClient); + + if ((pClient == NULL) || (pClient->hClient != hClient)) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pClientEntry->pLock, &pClientEntry->lockVal); + else + RS_RWLOCK_RELEASE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + + return NV_ERR_INVALID_OBJECT; + } + + if (ppClient != NULL) + *ppClient = pClient; + + return NV_OK; +} + +static +NV_STATUS +_serverLockClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags, + RsClient **ppClient +) +{ + NV_STATUS status; + if ((pLockInfo->flags & RS_LOCK_FLAGS_NO_CLIENT_LOCK)) + { + status = _serverFindClient(pServer, hClient, ppClient); + return status; + } + + if ((pLockInfo->state & RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED)) + { + CLIENT_ENTRY *pClientEntry; + NV_ASSERT_OK_OR_RETURN(_serverFindClientEntry(pServer, hClient, NV_FALSE, &pClientEntry)); + NV_ASSERT_OR_RETURN(pLockInfo->pClient != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pLockInfo->pClient == pClientEntry->pClient, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pClientEntry->lockOwnerTid == portThreadGetCurrentThreadId(), NV_ERR_INVALID_STATE); + + *ppClient = pLockInfo->pClient; + return NV_OK; + } + + status = _serverLockClient(pServer, access, hClient, ppClient); + if (status != NV_OK) + return status; + + pLockInfo->state |= RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = *ppClient; + *pReleaseFlags |= RS_LOCK_RELEASE_CLIENT_LOCK; + + return NV_OK; +} + +static +NV_STATUS +_serverLockDualClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient1, + NvHandle hClient2, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags, + RsClient **ppClient1, + RsClient **ppClient2 +) +{ + NV_STATUS status; + + // 1st and 2nd in handle order, as opposed to fixed 1 and 2 + NvHandle hClient1st; + NvHandle hClient2nd; + RsClient **ppClient1st; + RsClient **ppClient2nd; + + *ppClient1 = NULL; + *ppClient2 = NULL; + + if ((pLockInfo->flags & RS_LOCK_FLAGS_NO_CLIENT_LOCK)) + { + status = _serverFindClient(pServer, hClient1, ppClient1); + if (status != NV_OK) + return status; + + if (hClient1 == hClient2) + { + *ppClient2 = *ppClient1; + } + else + { + status = _serverFindClient(pServer, hClient2, ppClient2); + } + + return status; + } + + if (hClient1 <= hClient2) + { + hClient1st = hClient1; + ppClient1st = ppClient1; + + hClient2nd = hClient2; + ppClient2nd = ppClient2; + } + else + { + hClient1st = hClient2; + ppClient1st = ppClient2; + + hClient2nd = hClient1; + ppClient2nd = ppClient1; + } + + if ((pLockInfo->state & RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED)) + { + CLIENT_ENTRY *pClientEntry, *pSecondClientEntry; + + NV_ASSERT_OR_RETURN(pLockInfo->pSecondClient != NULL, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pLockInfo->pClient->hClient == hClient1st, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pLockInfo->pSecondClient->hClient == hClient2nd, NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN(_serverFindClientEntry(pServer, hClient1st, NV_FALSE, &pClientEntry)); + NV_ASSERT_OR_RETURN(pClientEntry->pClient == pLockInfo->pClient, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pClientEntry->lockOwnerTid == portThreadGetCurrentThreadId(), NV_ERR_INVALID_STATE); + + NV_ASSERT_OK_OR_RETURN(_serverFindClientEntry(pServer, hClient2nd, NV_FALSE, &pSecondClientEntry)); + NV_ASSERT_OR_RETURN(pSecondClientEntry->pClient == pLockInfo->pSecondClient, NV_ERR_INVALID_STATE); + NV_ASSERT_OR_RETURN(pSecondClientEntry->lockOwnerTid == pClientEntry->lockOwnerTid, NV_ERR_INVALID_STATE); + + *ppClient1st = pLockInfo->pClient; + *ppClient2nd = pLockInfo->pSecondClient; + return NV_OK; + } + + status = _serverLockClient(pServer, access, hClient1st, ppClient1st); + if (status != NV_OK) + return status; + + if (hClient1 == hClient2) + { + *ppClient2nd = *ppClient1st; + } + else + { + status = _serverLockClient(pServer, access, hClient2nd, ppClient2nd); + if (status != NV_OK) + { + _serverUnlockClient(pServer, access, hClient1st); + return status; + } + } + + pLockInfo->state |= RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = *ppClient1st; + pLockInfo->pSecondClient = *ppClient2nd; + *pReleaseFlags |= RS_LOCK_RELEASE_CLIENT_LOCK; + + return NV_OK; +} + +static +NV_STATUS +_serverUnlockClient +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient +) +{ + CLIENT_ENTRY *pClientEntry = NULL; + NV_STATUS status = NV_OK; + + status =_serverFindClientEntry(pServer, hClient, NV_TRUE, &pClientEntry); + if (status != NV_OK) + { + return status; + } + + if (access == LOCK_ACCESS_READ) + { + RS_RWLOCK_RELEASE_READ(pClientEntry->pLock, &pClientEntry->lockVal); + } + else + { + pClientEntry->lockOwnerTid = ~0; + RS_RWLOCK_RELEASE_WRITE(pClientEntry->pLock, &pClientEntry->lockVal); + } + + return NV_OK; +} + +static +NV_STATUS +_serverUnlockClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + NV_STATUS status; + if (*pReleaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + { + status = _serverUnlockClient(pServer, access, hClient); + if (status != NV_OK) + return status; + + pLockInfo->state &= ~RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = NULL; + *pReleaseFlags &= ~RS_LOCK_RELEASE_CLIENT_LOCK; + } + return NV_OK; +} + +static +NV_STATUS +_serverUnlockDualClientWithLockInfo +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + NvHandle hClient1, + NvHandle hClient2, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + // 1st and 2nd in handle order, as opposed to fixed 1 and 2 + NvHandle hClient1st = NV_MIN(hClient1, hClient2); + NvHandle hClient2nd = NV_MAX(hClient1, hClient2); + + if (*pReleaseFlags & RS_LOCK_RELEASE_CLIENT_LOCK) + { + // Try to unlock both, even if one fails + NV_ASSERT_OK(_serverUnlockClient(pServer, access, hClient2nd)); + if (hClient1 != hClient2) + NV_ASSERT_OK(_serverUnlockClient(pServer, access, hClient1st)); + + pLockInfo->state &= ~RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED; + pLockInfo->pClient = NULL; + pLockInfo->pSecondClient = NULL; + *pReleaseFlags &= ~RS_LOCK_RELEASE_CLIENT_LOCK; + } + + return NV_OK; +} + +NvU32 +serverGetClientCount(RsServer *pServer) +{ + return pServer->activeClientCount; +} + +NvU64 +serverGetResourceCount(RsServer *pServer) +{ + return pServer->activeResourceCount; +} + +NV_STATUS +resservSwapTlsCallContext +( + CALL_CONTEXT **ppOldCallContext, + CALL_CONTEXT *pNewCallContext +) +{ + CALL_CONTEXT **ppTlsCallContext; + + if (ppOldCallContext == NULL) + return NV_ERR_INVALID_ARGUMENT; + + ppTlsCallContext = (CALL_CONTEXT**)tlsEntryAcquire(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + if (ppTlsCallContext == NULL) + return NV_ERR_INVALID_STATE; + + *ppOldCallContext = *ppTlsCallContext; + *ppTlsCallContext = pNewCallContext; + + // New call contexts inherit the bDeferredApi flag from the old + if ((*ppOldCallContext != NULL) && (pNewCallContext != NULL) && + (pNewCallContext->pControlParams != NULL) && + ((*ppOldCallContext)->pControlParams != NULL)) + { + pNewCallContext->pControlParams->bDeferredApi |= + (*ppOldCallContext)->pControlParams->bDeferredApi; + } + + return NV_OK; +} + +CALL_CONTEXT * +resservGetTlsCallContext(void) +{ + CALL_CONTEXT *pTlsCallContext = NvP64_VALUE(tlsEntryGet(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT)); + return pTlsCallContext; +} + +NV_STATUS +resservRestoreTlsCallContext +( + CALL_CONTEXT *pOldCallContext +) +{ + CALL_CONTEXT **ppTlsCallContext = (CALL_CONTEXT**)tlsEntryAcquire(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + if (ppTlsCallContext == NULL) + return NV_ERR_INVALID_ARGUMENT; + + *ppTlsCallContext = pOldCallContext; + tlsEntryRelease(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + tlsEntryRelease(TLS_ENTRY_ID_RESSERV_CALL_CONTEXT); + + return NV_OK; +} + +RsResourceRef * +resservGetContextRefByType(NvU32 internalClassId, NvBool bSearchAncestors) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RsResourceRef *pContextRef = NULL; + + if (pCallContext == NULL) + return NULL; + + if (pCallContext->pResourceRef != NULL) + { + if (pCallContext->pResourceRef->internalClassId == internalClassId) + { + return pCallContext->pResourceRef; + } + else if (bSearchAncestors && + (refFindAncestorOfType(pCallContext->pResourceRef, internalClassId, &pContextRef) == NV_OK)) + { + return pContextRef; + } + } + + if (pCallContext->pContextRef != NULL) + { + if (pCallContext->pContextRef->internalClassId == internalClassId) + { + return pCallContext->pContextRef; + } + else if (bSearchAncestors && + (refFindAncestorOfType(pCallContext->pContextRef, internalClassId, &pContextRef) == NV_OK)) + { + return pContextRef; + } + } + + return NULL; +} + +NV_STATUS serverFreeClient(RsServer *pServer, RS_CLIENT_FREE_PARAMS* pParams) +{ + RS_RES_FREE_PARAMS params; + RS_LOCK_INFO lockInfo; + + portMemSet(&lockInfo, 0, sizeof(lockInfo)); + portMemSet(¶ms, 0, sizeof(params)); + params.hClient = pParams->hClient; + params.hResource = pParams->hClient; + params.bHiPriOnly = pParams->bHiPriOnly; + lockInfo.state = pParams->state; + params.pLockInfo = &lockInfo; + params.pSecInfo = pParams->pSecInfo; + + return serverFreeResourceTree(pServer, ¶ms); +} + +NV_STATUS +shrConstruct_IMPL +( + RsShared *pShare +) +{ + return NV_OK; +} + +void +shrDestruct_IMPL +( + RsShared *pShare +) +{ +} + +NV_STATUS +sessionConstruct_IMPL +( + RsSession *pSession +) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + pSession->bValid = NV_TRUE; + listInit(&pSession->dependencies, pCallContext->pServer->pAllocator); + listInit(&pSession->dependants, pCallContext->pServer->pAllocator); + pSession->pLock = portSyncRwLockCreate(pCallContext->pServer->pAllocator); + + RS_LOCK_VALIDATOR_INIT(&pSession->lockVal, LOCK_VAL_LOCK_CLASS_SESSION, LOCK_VAL_LOCK_GENERATE); + return NV_OK; +} + +void +sessionDestruct_IMPL +( + RsSession *pSession +) +{ + NV_ASSERT(listCount(&pSession->dependencies) == 0); + NV_ASSERT(listCount(&pSession->dependants) == 0); + listDestroy(&pSession->dependencies); + listDestroy(&pSession->dependants); + pSession->pLock = NULL; +} + +NV_STATUS +sessionAddDependant_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + NV_STATUS status; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + status = sessionCheckLocksForAdd(pSession, pResourceRef); + + if (status != NV_OK) + return status; + + if (pResourceRef->pSession == pSession) + return NV_OK; + + NV_ASSERT_OR_RETURN(pResourceRef->pSession == NULL, NV_ERR_INVALID_ARGUMENT); + + if (listAppendValue(&pSession->dependants, &pResourceRef) == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + serverRefShare(pCallContext->pServer, staticCast(pSession, RsShared)); + + pResourceRef->pSession = pSession; + + return NV_OK; +} + +NV_STATUS +sessionAddDependency_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + NV_STATUS status; + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + + status = sessionCheckLocksForAdd(pSession, pResourceRef); + + if (status != NV_OK) + return status; + + if (pResourceRef->pDependantSession == pSession) + return NV_OK; + + NV_ASSERT_OR_RETURN(pResourceRef->pDependantSession == NULL, NV_ERR_INVALID_ARGUMENT); + + if (listAppendValue(&pSession->dependencies, &pResourceRef) == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + serverRefShare(pCallContext->pServer, staticCast(pSession, RsShared)); + + pResourceRef->pDependantSession = pSession; + + return NV_OK; +} + +void +sessionRemoveDependant_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + listRemoveFirstByValue(&pSession->dependants, &pResourceRef); + sessionCheckLocksForRemove(pSession, pResourceRef); + pResourceRef->pSession = NULL; +} + +void +sessionRemoveDependency_IMPL +( + RsSession *pSession, + RsResourceRef *pResourceRef +) +{ + listRemoveFirstByValue(&pSession->dependencies, &pResourceRef); + pSession->bValid = NV_FALSE; + sessionCheckLocksForRemove(pSession, pResourceRef); + pResourceRef->pDependantSession = NULL; +} + +NV_STATUS sessionCheckLocksForAdd_IMPL(RsSession *pSession, RsResourceRef *pResourceRef) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RS_LOCK_INFO *pLockInfo; + + NV_ASSERT_OR_RETURN(pCallContext != NULL, NV_ERR_INVALID_STATE); + pLockInfo = pCallContext->pLockInfo; + + NV_ASSERT_OR_RETURN((pLockInfo != NULL), NV_ERR_INVALID_STATE); + + if (!serverRwApiLockIsOwner(pCallContext->pServer)) + { + // Assert clients locked or RW lock + if (pLockInfo->state & RS_LOCK_STATE_CLIENT_LOCK_ACQUIRED) + { + NV_ASSERT_OR_RETURN((pLockInfo->pClient == pResourceRef->pClient) || + (pLockInfo->pSecondClient == pResourceRef->pClient), + NV_ERR_INVALID_ARGUMENT); + } + else if (pLockInfo->state & RS_LOCK_STATE_TOP_LOCK_ACQUIRED) + { + NV_ASSERT_OR_RETURN((pLockInfo->pClient == NULL) && (pLockInfo->pSecondClient == NULL), NV_ERR_INVALID_ARGUMENT); + } + else + { + NV_ASSERT_FAILED("Incorrect locks taken"); + return NV_ERR_INVALID_LOCK_STATE; + } + } + + return NV_OK; +} + +void sessionCheckLocksForRemove_IMPL(RsSession *pSession, RsResourceRef *pResourceRef) +{ + CALL_CONTEXT *pCallContext = resservGetTlsCallContext(); + RS_LOCK_INFO *pLockInfo; + + NV_ASSERT(pCallContext != NULL); + pLockInfo = pCallContext->pLockInfo; + + NV_ASSERT(pLockInfo != NULL); + + if (pLockInfo->flags & RS_LOCK_FLAGS_FREE_SESSION_LOCK) + { + RsShared *pShared = staticCast(pSession, RsShared); + PORT_RWLOCK *pSessionLock = pSession->pLock; + NvBool bDestroy = (pShared->refCount == 1); + + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED) || !bDestroy) + { + serverFreeShare(pCallContext->pServer, pShared); + pLockInfo->flags &= ~RS_LOCK_FLAGS_FREE_SESSION_LOCK; + } + + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED) && bDestroy) + portSyncRwLockDestroy(pSessionLock); + } +} + +NV_STATUS +serverAllocShareWithHalspecParent +( + RsServer *pServer, + const NVOC_CLASS_INFO *pClassInfo, + RsShared **ppShare, + Object *pHalspecParent +) +{ + RsShared *pShare; + NV_STATUS status; + Dynamic *pDynamic = NULL; + NvU32 flags = NVOC_OBJ_CREATE_FLAGS_NONE; + + if (pClassInfo == NULL) + return NV_ERR_INVALID_CLASS; + + if (pHalspecParent != NULL) + flags |= NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY; + + status = objCreateDynamicWithFlags(&pDynamic, + pHalspecParent, + (const NVOC_CLASS_INFO*)(const void*)pClassInfo, + flags); + if (status != NV_OK) + return status; + + if (pDynamic == NULL) + return NV_ERR_INSUFFICIENT_RESOURCES; + + pShare = dynamicCast(pDynamic, RsShared); + if (pShare == NULL) + { + status = NV_ERR_INVALID_CLASS; + goto fail; + } + + pShare->refCount = 1; + + portSyncSpinlockAcquire(pServer->pShareMapLock); + if (mapInsertExisting(&pServer->shareMap, (NvUPtr)pShare, pShare) != NV_TRUE) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + portSyncSpinlockRelease(pServer->pShareMapLock); + goto fail; + } + portSyncSpinlockRelease(pServer->pShareMapLock); + + if (ppShare != NULL) + *ppShare = pShare; + + return NV_OK; + +fail: + if (pShare != NULL) + { + objDelete(pShare); + } + + return status; +} + +NV_STATUS +serverAllocShare +( + RsServer *pServer, + const NVOC_CLASS_INFO *pClassInfo, + RsShared **ppShare +) +{ + return serverAllocShareWithHalspecParent(pServer, pClassInfo, ppShare, NULL); +} + +NvS32 +serverGetShareRefCount +( + RsServer *pServer, + RsShared *pShare +) +{ + return pShare->refCount; +} + +NV_STATUS +serverRefShare +( + RsServer *pServer, + RsShared *pShare +) +{ + portAtomicIncrementS32(&pShare->refCount); + return NV_OK; +} + +NV_STATUS +serverFreeShare +( + RsServer *pServer, + RsShared *pShare +) +{ + if (portAtomicDecrementS32(&pShare->refCount) == 0) + { + portSyncSpinlockAcquire(pServer->pShareMapLock); + mapRemove(&pServer->shareMap, pShare); + portSyncSpinlockRelease(pServer->pShareMapLock); + + objDelete(pShare); + } + return NV_OK; +} + +RS_SHARE_ITERATOR +serverShareIter +( + RsServer *pServer, + NvU32 internalClassId +) +{ + RS_SHARE_ITERATOR it; + portMemSet(&it, 0, sizeof(it)); + it.internalClassId = internalClassId; + it.mapIt = mapIterAll(&pServer->shareMap); + + return it; +} + +NvBool +serverShareIterNext +( + RS_SHARE_ITERATOR* pIt +) +{ + NvBool bLoop = NV_TRUE; + if (pIt == NULL) + return NV_FALSE; + + pIt->pShared = NULL; + bLoop = mapIterNext(&pIt->mapIt); + while(bLoop) + { + RsShared *pShared = pIt->mapIt.pValue; + if ((pIt->internalClassId == 0) || (objDynamicCastById(pShared, pIt->internalClassId) != NULL)) + { + pIt->pShared = pShared; + return NV_TRUE; + } + bLoop = mapIterNext(&pIt->mapIt); + } + + return NV_FALSE; +} + +#if (RS_PROVIDES_API_STATE) +NV_STATUS +serverAllocApiCopyIn +( + RsServer *pServer, + RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams, + API_STATE **ppApiState +) +{ + if (ppApiState != NULL) + *ppApiState = NULL; + + return NV_OK; +} + +NV_STATUS +serverAllocApiCopyOut +( + RsServer *pServer, + NV_STATUS status, + API_STATE *pApiState +) +{ + return status; +} +#endif + +#if (RS_STANDALONE) +NV_STATUS +serverAllocEpilogue_WAR +( + RsServer *pServer, + NV_STATUS status, + NvBool bClientAlloc, + RS_RES_ALLOC_PARAMS_INTERNAL *pAllocParams +) +{ + return status; +} + +NV_STATUS +serverLookupSecondClient +( + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + NvHandle *phClient +) +{ + *phClient = 0; + + return NV_OK; +} + +NV_STATUS serverTopLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if ((pLockInfo->flags & RS_LOCK_FLAGS_NO_TOP_LOCK)) + return NV_OK; + + if (!(pLockInfo->state & RS_LOCK_STATE_TOP_LOCK_ACQUIRED)) + { + if (access == LOCK_ACCESS_READ) + { + RS_RWLOCK_ACQUIRE_READ(pServer->pTopLock, &pServer->topLockVal); + } + else + { + RS_RWLOCK_ACQUIRE_WRITE(pServer->pTopLock, &pServer->topLockVal); + pServer->topLockOwnerTid = portThreadGetCurrentThreadId(); + } + + pLockInfo->state |= RS_LOCK_STATE_TOP_LOCK_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_TOP_LOCK; + } + else if (access == LOCK_ACCESS_WRITE) + { + NV_ASSERT_OR_RETURN(pServer->topLockOwnerTid == portThreadGetCurrentThreadId(), + NV_ERR_INVALID_LOCK_STATE); + } + + return NV_OK; +} + +void +serverTopLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (*pReleaseFlags & RS_LOCK_RELEASE_TOP_LOCK) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pServer->pTopLock, &pServer->topLockVal); + else + { + pServer->topLockOwnerTid = ~0; + RS_RWLOCK_RELEASE_WRITE(pServer->pTopLock, &pServer->topLockVal); + } + + pLockInfo->state &= ~RS_LOCK_STATE_TOP_LOCK_ACQUIRED; + *pReleaseFlags &= ~RS_LOCK_RELEASE_TOP_LOCK; + } +} + +NV_STATUS +serverResLock_Prologue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (!(pLockInfo->state & RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED)) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_ACQUIRE_READ(pServer->pResLock, &pServer->resLockVal); + else + RS_RWLOCK_ACQUIRE_WRITE(pServer->pResLock, &pServer->resLockVal); + + pLockInfo->state |= RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_CUSTOM_LOCK_1; + } + + return NV_OK; +} + +void +serverResLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + if (*pReleaseFlags & RS_LOCK_RELEASE_CUSTOM_LOCK_1) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pServer->pResLock, &pServer->resLockVal); + else + RS_RWLOCK_RELEASE_WRITE(pServer->pResLock, &pServer->resLockVal); + + pLockInfo->state &= ~RS_LOCK_STATE_CUSTOM_LOCK_1_ACQUIRED; + *pReleaseFlags &= ~RS_LOCK_RELEASE_CUSTOM_LOCK_1; + } +} + +#if !(RS_STANDALONE_TEST) +NV_STATUS +serverMap_Prologue +( + RsServer *pServer, + RS_CPU_MAP_PARAMS *pMapParams +) +{ + return NV_OK; +} +#endif /* !RS_STANDALONE_TEST */ + +void +serverMap_Epilogue +( + RsServer *pServer, + RS_CPU_MAP_PARAMS *pMapParams +) +{ +} + +#if !(RS_STANDALONE_TEST) +NV_STATUS +serverUnmap_Prologue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ + return NV_OK; +} +#endif /* !RS_STANDALONE_TEST */ + +void +serverUnmap_Epilogue +( + RsServer *pServer, + RS_CPU_UNMAP_PARAMS *pUnmapParams +) +{ +} + +void +serverControl_InitCookie +( + const struct NVOC_EXPORTED_METHOD_DEF *pExportedEntry, + RS_CONTROL_COOKIE *pCookie +) +{ +} + +NV_STATUS +serverInterMap_Prologue +( + RsServer *pServer, + RsResourceRef *pMapperRef, + RsResourceRef *pMappableRef, + RS_INTER_MAP_PARAMS *pMapParams, + NvU32 *pReleaseFlags +) +{ + NV_STATUS status; + + status = serverResLock_Prologue(pServer, LOCK_ACCESS_WRITE, pMapParams->pLockInfo, pReleaseFlags); + + return status; +} + +void +serverInterMap_Epilogue +( + RsServer *pServer, + RS_INTER_MAP_PARAMS *pMapParams, + NvU32 *pReleaseFlags +) +{ + serverResLock_Epilogue(pServer, LOCK_ACCESS_WRITE, pMapParams->pLockInfo, pReleaseFlags); +} + +NV_STATUS +serverInterUnmap_Prologue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pUnmapParams +) +{ + return NV_OK; +} + +void +serverInterUnmap_Epilogue +( + RsServer *pServer, + RS_INTER_UNMAP_PARAMS *pUnmapParams +) +{ +} + +NvBool +serverRwApiLockIsOwner +( + RsServer *pServer +) +{ + return (pServer->topLockOwnerTid == portThreadGetCurrentThreadId()); +} + +NV_STATUS +serverAllocResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_ALLOC_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + if (lock == RS_LOCK_TOP) + { + NvBool bClientAlloc = (pParams->externalClassId == NV01_ROOT || + pParams->externalClassId == NV01_ROOT_CLIENT || + pParams->externalClassId == NV01_ROOT_NON_PRIV); + + if (bClientAlloc) + { + *pAccess = LOCK_ACCESS_WRITE; + return NV_OK; + } + } + + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_ALLOC_RESOURCE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverFreeResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_FREE_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_FREE_RESOURCE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverCopyResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_DUP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_COPY)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverShareResourceLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_SHARE_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_SHARE)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +#if !(RS_STANDALONE_TEST) +NV_STATUS +serverControlLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_CTRL)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} +#endif + +NV_STATUS +serverMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + return NV_OK; +} + +NV_STATUS +serverUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_CPU_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverInterMapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_MAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_MAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverInterUnmapLookupLockFlags +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_INTER_UNMAP_PARAMS *pParams, + LOCK_ACCESS_TYPE *pAccess +) +{ + *pAccess = (serverSupportsReadOnlyLock(pServer, lock, RS_API_INTER_UNMAP)) + ? LOCK_ACCESS_READ + : LOCK_ACCESS_WRITE; + + return NV_OK; +} + +NV_STATUS +serverControl_ValidateCookie +( + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie +) +{ + return NV_OK; +} + +NV_STATUS +serverControlApiCopyIn +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie +) +{ + return NV_OK; +} + +NV_STATUS +serverControlApiCopyOut +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + RS_CONTROL_COOKIE *pCookie, + NV_STATUS rmStatus +) +{ + return NV_OK; +} + +NV_STATUS +serverInitGlobalSharePolicies +( + RsServer *pServer +) +{ + return NV_OK; +} +#endif + +NV_STATUS +serverSessionLock_Prologue +( + LOCK_ACCESS_TYPE access, + RsResourceRef *pResourceRef, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + RsSession *pSession = pResourceRef->pSession; + RsSession *pDependantSession = pResourceRef->pDependantSession; + + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED)) + { + if (pSession != NULL) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_ACQUIRE_READ(pSession->pLock, &pSession->lockVal); + else + RS_RWLOCK_ACQUIRE_WRITE(pSession->pLock, &pSession->lockVal); + pLockInfo->state |= RS_LOCK_STATE_SESSION_LOCK_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_SESSION_LOCK; + + pLockInfo->pSession = pSession; + } + } + else + { + NV_ASSERT_OR_RETURN(pLockInfo->pSession == pSession, NV_ERR_INVALID_LOCK_STATE); + } + + if (!(pLockInfo->flags & RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK) && + (pDependantSession != NULL)) + { + if (!(pLockInfo->state & RS_LOCK_STATE_SESSION_LOCK_ACQUIRED)) + { + // + // The only reason we lock the back reference session is if we're freeing the + // resource so take the write lock in all cases + // + RS_RWLOCK_ACQUIRE_WRITE(pDependantSession->pLock, &pDependantSession->lockVal); + + pLockInfo->state |= RS_LOCK_STATE_SESSION_LOCK_ACQUIRED; + *pReleaseFlags |= RS_LOCK_RELEASE_SESSION_LOCK; + + pLockInfo->pSession = pDependantSession; + } + else + { + // + // For now, don't allow a resource to be both depended on and depending on a + // session to keep this locking code simpler. We'll have to revisit if that + // becomes necessary. + // + NV_ASSERT_OR_RETURN(pLockInfo->pSession == pDependantSession, NV_ERR_INVALID_LOCK_STATE); + } + } + + pLockInfo->flags &= ~RS_LOCK_FLAGS_NO_DEPENDANT_SESSION_LOCK; + + return NV_OK; +} + +void +serverSessionLock_Epilogue +( + RsServer *pServer, + LOCK_ACCESS_TYPE access, + RS_LOCK_INFO *pLockInfo, + NvU32 *pReleaseFlags +) +{ + RsSession *pSession = pLockInfo->pSession; + + if ((pSession != NULL) && (*pReleaseFlags & RS_LOCK_RELEASE_SESSION_LOCK)) + { + if (access == LOCK_ACCESS_READ) + RS_RWLOCK_RELEASE_READ(pSession->pLock, &pSession->lockVal); + else + RS_RWLOCK_RELEASE_WRITE(pSession->pLock, &pSession->lockVal); + + pLockInfo->state &= ~RS_LOCK_STATE_SESSION_LOCK_ACQUIRED; + *pReleaseFlags &= ~RS_LOCK_RELEASE_SESSION_LOCK; + + if (pLockInfo->flags & RS_LOCK_FLAGS_FREE_SESSION_LOCK) + { + RsShared *pShared = staticCast(pSession, RsShared); + PORT_RWLOCK *pSessionLock = pSession->pLock; + + serverFreeShare(pServer, pShared); + portSyncRwLockDestroy(pSessionLock); + } + + pLockInfo->pSession = NULL; + } + + pLockInfo->flags &= ~RS_LOCK_FLAGS_FREE_SESSION_LOCK; +} + +NV_STATUS serverControl_Prologue +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE *pAccess, + NvU32* pReleaseFlags +) +{ + NV_STATUS status; + RS_LOCK_INFO *pLockInfo = pParams->pLockInfo; + + status = serverControl_ValidateCookie(pParams, pParams->pCookie); + if (status != NV_OK) + return status; + + serverControlLookupLockFlags(pServer, RS_LOCK_RESOURCE, pParams, pParams->pCookie, pAccess); + if (status != NV_OK) + return status; + + if (!pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyIn(pServer, pParams, pParams->pCookie); + if (status != NV_OK) + return status; + } + + pLockInfo->traceOp = RS_LOCK_TRACE_CTRL; + pLockInfo->traceClassId = pParams->cmd; + status = serverResLock_Prologue(pServer, *pAccess, pParams->pLockInfo, pReleaseFlags); + if (status != NV_OK) + return status; + + return NV_OK; +} + +NV_STATUS +serverControl_Epilogue +( + RsServer *pServer, + RS_RES_CONTROL_PARAMS_INTERNAL *pParams, + LOCK_ACCESS_TYPE access, + NvU32 *pReleaseFlags, + NV_STATUS status +) +{ + serverResLock_Epilogue(pServer, access, pParams->pLockInfo, pReleaseFlags); + + if (!pServer->bUnlockedParamCopy) + { + status = serverControlApiCopyOut(pServer, pParams, pParams->pCookie, status); + } + + return status; +} + +NvBool +serverSupportsReadOnlyLock +( + RsServer *pServer, + RS_LOCK_ENUM lock, + RS_API_ENUM api +) +{ + NV_ASSERT(api < RS_API_MAX); + if (lock == RS_LOCK_TOP) + { + return (!!(pServer->roTopLockApiMask & NVBIT(api))); + } + + return NV_FALSE; +} diff --git a/src/nvidia/src/libraries/tls/tls.c b/src/nvidia/src/libraries/tls/tls.c new file mode 100644 index 000000000..a7867a6a2 --- /dev/null +++ b/src/nvidia/src/libraries/tls/tls.c @@ -0,0 +1,661 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2016-2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "tls/tls.h" +#include "containers/map.h" +#include "nvport/nvport.h" + +/// @todo Figure out which builds have upward stack. Looks like none? +#define STACK_GROWS_DOWNWARD 1 + + +/** + * @brief Entry which counts how many times some data in TLS has been referenced. + */ +typedef struct TlsEntry +{ + NvU32 refCount; + NvP64 pUserData; + MapNode node; +} TlsEntry; + +MAKE_INTRUSIVE_MAP(TlsEntryMap, TlsEntry, node); + +/** + * @brief Single thread's TLS information + */ +typedef struct ThreadEntry +{ + union { + NvU64 threadId; /// < For passive threads + NvU64 sp; /// < For ISR threads + } key; /// @todo Use node.key instead? + TlsEntryMap map; + MapNode node; +} ThreadEntry; + +MAKE_INTRUSIVE_MAP(ThreadEntryMap, ThreadEntry, node); + +/** + * @brief Stores all necessary data for TLS mechanism. + * + * @todo Use RW Spinlocks instead. Nice perf boost. + */ +typedef struct TlsDatabase +{ + /// @brief Allocator which allocates all necessary data for current @ref TlsDatabase. + PORT_MEM_ALLOCATOR *pAllocator; + /// @brief Last allocated entry id. + NvU64 lastEntryId; + + /// @brief Lock for the passive thread entry map + PORT_SPINLOCK *pLock; + /// @brief Map of thread entries of non ISR threads. + ThreadEntryMap threadEntries; + +#if TLS_ISR_CAN_USE_LOCK + /// @brief Lock which controls access to ISR-specific structures + PORT_SPINLOCK *pIsrLock; + /// @brief Map of thread entries of ISR threads. + ThreadEntryMap isrEntries; +#else +#if !defined(TLS_ISR_UNIT_TEST) +#define TLS_MAX_ISRS 64 +#else +#define TLS_MAX_ISRS 1024 +#endif + struct { + volatile NvU64 sp; + ThreadEntry *pThreadEntry; + } isrEntries[TLS_MAX_ISRS]; +#endif + +#if TLS_THREADS_CAN_RAISE_IRQL + /** + * @brief Number of ISRs / DPCs active on a given CPU. + * + * Every time an ISR starts, it increments this, and decrements on end. + * Since ISRs never get rescheduled, and passive threads will never preempt + * them, (isrCount[current_cpu] == 0) will be true IFF we're in ISR/DPC. + */ + NvU32 *isrCount; +#endif + + volatile NvU32 initCount; +} TlsDatabase; + +TlsDatabase tlsDatabase; // Zero initialized + +// Helper function prototypes +static NvBool _tlsIsIsr(void); +static ThreadEntry *_tlsThreadEntryGet(void); +static ThreadEntry *_tlsThreadEntryGetOrAlloc(void); +static NvP64 *_tlsEntryAcquire(ThreadEntry *pThreadEntry, NvU64 entryId, PORT_MEM_ALLOCATOR *pCustomAllocator); +static NvU32 _tlsEntryRelease(ThreadEntry *pThreadEntry, TlsEntry *pTlsEntry, PORT_MEM_ALLOCATOR *pCustomAllocator); +static NV_STATUS _tlsIsrEntriesInit(void); +static void _tlsIsrEntriesDestroy(void); +static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry); +static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp); +static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp); +static PORT_MEM_ALLOCATOR *_tlsIsrAllocatorGet(void); +static PORT_MEM_ALLOCATOR *_tlsAllocatorGet(void); + +#if TLS_THREADS_CAN_RAISE_IRQL +/// @todo move to NvPort (bug 1583359) +NvU32 osGetCurrentProcessorNumber(void); +#if defined(NVRM) +NvU32 osGetMaximumCoreCount(void); +#else +#define osGetMaximumCoreCount() 0x0 +#endif +#endif + + +#if !PORT_IS_FUNC_SUPPORTED(portSyncExSafeToSleep) +#define portSyncExSafeToSleep() NV_TRUE +#endif + +#if !PORT_IS_FUNC_SUPPORTED(portMemExSafeForNonPagedAlloc) +#define portMemExSafeForNonPagedAlloc() NV_TRUE +#endif + +#if defined(TLS_PROFILING) +#include "tls_profiling.h" +#endif + + + + +NV_STATUS tlsInitialize() +{ + NV_STATUS status; + + if (portAtomicIncrementU32(&tlsDatabase.initCount) != 1) + { + return NV_OK; /// @todo Maybe return NV_WARN_NOTHING_TO_DO? + } + + status = portInitialize(); + if (status != NV_OK) + return status; + + tlsDatabase.pAllocator = portMemAllocatorCreateNonPaged(); + if (tlsDatabase.pAllocator == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + + tlsDatabase.pLock = portSyncSpinlockCreate(tlsDatabase.pAllocator); + if (tlsDatabase.pLock == NULL) + { + status = NV_ERR_INSUFFICIENT_RESOURCES; + goto done; + } + mapInitIntrusive(&tlsDatabase.threadEntries); + + status = _tlsIsrEntriesInit(); + if (status != NV_OK) + goto done; + + tlsDatabase.lastEntryId = TLS_ENTRY_ID_DYNAMIC; + +#if TLS_THREADS_CAN_RAISE_IRQL +{ + NvU32 maxCoreCount = osGetMaximumCoreCount(); + if (maxCoreCount == 0) + maxCoreCount = 1; // MODS reports only 1 CPU at index 0. + + tlsDatabase.isrCount = PORT_ALLOC(tlsDatabase.pAllocator, maxCoreCount * sizeof(NvU32)); + if (tlsDatabase.isrCount == NULL) + { + status = NV_ERR_NO_MEMORY; + goto done; + } + portMemSet(tlsDatabase.isrCount, 0, maxCoreCount * sizeof(NvU32)); +} +#endif // TLS_THREADS_CAN_RAISE_IRQL + +done: + if (status != NV_OK) + { + tlsShutdown(); + } + return status; +} + +void tlsShutdown() +{ + if (portAtomicDecrementU32(&tlsDatabase.initCount) != 0) + { + return; + } + +#if defined(TLS_PROFILING) + _tlsProfilePrint(); +#endif + + mapDestroy(&tlsDatabase.threadEntries); + if (tlsDatabase.pLock) + portSyncSpinlockDestroy(tlsDatabase.pLock); + + _tlsIsrEntriesDestroy(); + + if (tlsDatabase.pAllocator) + { +#if TLS_THREADS_CAN_RAISE_IRQL + PORT_FREE(tlsDatabase.pAllocator, tlsDatabase.isrCount); +#endif + portMemAllocatorRelease(tlsDatabase.pAllocator); + } + portMemSet(&tlsDatabase, 0, sizeof(tlsDatabase)); + portShutdown(); +} + +void tlsIsrInit(PORT_MEM_ALLOCATOR *pIsrAllocator) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN_VOID(tlsDatabase.initCount > 0); + + // + // If TLS_THREADS_CAN_RAISE_IRQL we treat anything that calls tlsIsrInit as + // ISR, and cannot perform this check. Will be moved to ASSERT later. + // See CORERM-96 + // + if (!TLS_THREADS_CAN_RAISE_IRQL && !_tlsIsIsr()) + { + static NvBool bAlreadyPrinted = NV_FALSE; + if (!bAlreadyPrinted) + { + NV_PRINTF(LEVEL_WARNING, + "TLS: Unnecessary tlsIsrInit() call at %p. Will stop reporting further violations.\n", + (void*)portUtilGetReturnAddress()); + bAlreadyPrinted = NV_TRUE; + } + return; + } + + pThreadEntry = PORT_ALLOC(pIsrAllocator, sizeof(*pThreadEntry)); + NV_ASSERT_OR_RETURN_VOID(pThreadEntry != NULL); + + pThreadEntry->key.sp = (NvU64)(NvUPtr)pIsrAllocator; + mapInitIntrusive(&pThreadEntry->map); + + _tlsIsrEntriesInsert(pThreadEntry); + +#if TLS_THREADS_CAN_RAISE_IRQL + portAtomicIncrementU32(&tlsDatabase.isrCount[osGetCurrentProcessorNumber()]); +#endif +} + +void tlsIsrDestroy(PORT_MEM_ALLOCATOR *pIsrAllocator) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN_VOID(tlsDatabase.initCount > 0); + + if (!_tlsIsIsr()) + { + if (TLS_THREADS_CAN_RAISE_IRQL) + { + NV_PRINTF(LEVEL_ERROR, + "TLS: Calling tlsIsrDestroy() without accompanying tlsIsrInit at %p\n", + (void*)portUtilGetReturnAddress()); + } + return; + } + + pThreadEntry = _tlsIsrEntriesRemove((NvU64)(NvUPtr)pIsrAllocator); + + mapDestroy(&pThreadEntry->map); + PORT_FREE(pIsrAllocator, pThreadEntry); + +#if TLS_THREADS_CAN_RAISE_IRQL + portAtomicDecrementU32(&tlsDatabase.isrCount[osGetCurrentProcessorNumber()]); +#endif +} + +PORT_MEM_ALLOCATOR *tlsIsrAllocatorGet(void) +{ + + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NULL); + return _tlsIsrAllocatorGet(); +} + +NvU64 tlsEntryAlloc() +{ + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + return portAtomicExIncrementU64(&tlsDatabase.lastEntryId); +} + +NvP64 *tlsEntryAcquire(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NULL); + + // User tries allocation of unallocated entryId. + NV_ASSERT_OR_RETURN(entryId <= tlsDatabase.lastEntryId || + entryId >= TLS_ENTRY_ID_TAG_START, NULL); + + pThreadEntry = _tlsThreadEntryGetOrAlloc(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, NULL); + + return _tlsEntryAcquire(pThreadEntry, entryId, NULL); +} + +NvP64 *tlsEntryAcquireWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pCustomAllocator) +{ + ThreadEntry *pThreadEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NULL); + + // User tries allocation of unallocated entryId. + NV_ASSERT_OR_RETURN(entryId <= tlsDatabase.lastEntryId || + entryId >= TLS_ENTRY_ID_TAG_START, NULL); + NV_ASSERT_OR_RETURN(pCustomAllocator != NULL, NULL); + + pThreadEntry = _tlsThreadEntryGetOrAlloc(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, NULL); + + return _tlsEntryAcquire(pThreadEntry, entryId, pCustomAllocator); +} + +NvU32 tlsEntryRelease(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return _tlsEntryRelease(pThreadEntry, pTlsEntry, NULL); +} + +NvU32 tlsEntryReleaseWithAllocator(NvU64 entryId, PORT_MEM_ALLOCATOR *pCustomAllocator) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + NV_ASSERT_OR_RETURN(pCustomAllocator != NULL, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return _tlsEntryRelease(pThreadEntry, pTlsEntry, pCustomAllocator); +} + +NvP64 tlsEntryGet(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, NvP64_NULL); + + pThreadEntry = _tlsThreadEntryGet(); + if (pThreadEntry == NULL) + return NvP64_NULL; + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + return pTlsEntry ? pTlsEntry->pUserData : NvP64_NULL; +} + +NvU32 tlsEntryReference(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return ++pTlsEntry->refCount; +} + +NvU32 tlsEntryUnreference(NvU64 entryId) +{ + ThreadEntry *pThreadEntry; + TlsEntry *pTlsEntry; + NV_ASSERT_OR_RETURN(tlsDatabase.initCount > 0, TLS_ERROR_VAL); + + pThreadEntry = _tlsThreadEntryGet(); + NV_ASSERT_OR_RETURN(pThreadEntry != NULL, TLS_ERROR_VAL); + + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, TLS_ERROR_VAL); + + return --pTlsEntry->refCount; +} + + +static ThreadEntry * +_tlsThreadEntryGet() +{ + ThreadEntry *pThreadEntry; + + if (_tlsIsIsr()) + { + pThreadEntry = _tlsIsrEntriesFind((NvU64)(NvUPtr)&pThreadEntry); + } + else + { + NvU64 threadId = portThreadGetCurrentThreadId(); + portSyncSpinlockAcquire(tlsDatabase.pLock); + pThreadEntry = mapFind(&tlsDatabase.threadEntries, threadId); + portSyncSpinlockRelease(tlsDatabase.pLock); + } + return pThreadEntry; +} + + +static ThreadEntry * +_tlsThreadEntryGetOrAlloc() +{ + ThreadEntry* pThreadEntry = NULL; + + pThreadEntry = _tlsThreadEntryGet(); + if (pThreadEntry == NULL) // Only non-ISRs can be missing + { + NV_ASSERT(portMemExSafeForNonPagedAlloc()); + pThreadEntry = PORT_ALLOC(tlsDatabase.pAllocator, sizeof(*pThreadEntry)); + if (pThreadEntry != NULL) + { + pThreadEntry->key.threadId = portThreadGetCurrentThreadId(); + mapInitIntrusive(&pThreadEntry->map); + portSyncSpinlockAcquire(tlsDatabase.pLock); + mapInsertExisting(&tlsDatabase.threadEntries, + pThreadEntry->key.threadId, + pThreadEntry); + portSyncSpinlockRelease(tlsDatabase.pLock); + } + } + + return pThreadEntry; +} + +static NvP64* +_tlsEntryAcquire +( + ThreadEntry *pThreadEntry, + NvU64 entryId, + PORT_MEM_ALLOCATOR *pCustomAllocator +) +{ + TlsEntry *pTlsEntry; + PORT_MEM_ALLOCATOR *pAllocator; + + pAllocator = (pCustomAllocator != NULL) ? pCustomAllocator : _tlsAllocatorGet(); + pTlsEntry = mapFind(&pThreadEntry->map, entryId); + if (pTlsEntry != NULL) + { + pTlsEntry->refCount++; + } + else + { + pTlsEntry = PORT_ALLOC(pAllocator, sizeof(*pTlsEntry)); + NV_ASSERT_OR_RETURN(pTlsEntry != NULL, NULL); + mapInsertExisting(&pThreadEntry->map, entryId, pTlsEntry); + + pTlsEntry->refCount = 1; + pTlsEntry->pUserData = NvP64_NULL; + } + return &pTlsEntry->pUserData; +} + +static NvU32 +_tlsEntryRelease +( + ThreadEntry* pThreadEntry, + TlsEntry *pTlsEntry, + PORT_MEM_ALLOCATOR *pCustomAllocator +) +{ + NvU32 refCount; + PORT_MEM_ALLOCATOR *pAllocator; + pAllocator = (pCustomAllocator != NULL) ? pCustomAllocator : _tlsAllocatorGet(); + + refCount = --pTlsEntry->refCount; + if (refCount == 0) + { + mapRemove(&pThreadEntry->map, pTlsEntry); + PORT_FREE(pAllocator, pTlsEntry); + // Only non ISR Thread Entry can be deallocated. + if (!_tlsIsIsr() && (mapCount(&pThreadEntry->map) == 0)) + { + NV_ASSERT(portMemExSafeForNonPagedAlloc()); + mapDestroy(&pThreadEntry->map); + portSyncSpinlockAcquire(tlsDatabase.pLock); + mapRemove(&tlsDatabase.threadEntries, pThreadEntry); + portSyncSpinlockRelease(tlsDatabase.pLock); + PORT_FREE(tlsDatabase.pAllocator, pThreadEntry); + } + } + return refCount; +} + +static PORT_MEM_ALLOCATOR *_tlsIsrAllocatorGet(void) +{ + ThreadEntry *pThreadEntry; + + if (!_tlsIsIsr()) { return NULL; } + pThreadEntry = _tlsThreadEntryGet(); + + return (PORT_MEM_ALLOCATOR*)(NvUPtr)pThreadEntry->key.sp; +} + +static PORT_MEM_ALLOCATOR *_tlsAllocatorGet(void) +{ + PORT_MEM_ALLOCATOR *pIsrAllocator = _tlsIsrAllocatorGet(); + return (pIsrAllocator == NULL) ? tlsDatabase.pAllocator : pIsrAllocator; +} + +#if TLS_ISR_CAN_USE_LOCK + +static NV_STATUS _tlsIsrEntriesInit() +{ + tlsDatabase.pIsrLock = portSyncSpinlockCreate(tlsDatabase.pAllocator); + if (tlsDatabase.pLock == NULL) + { + return NV_ERR_INSUFFICIENT_RESOURCES; + } + mapInitIntrusive(&tlsDatabase.isrEntries); + return NV_OK; +} +static void _tlsIsrEntriesDestroy() +{ + if (tlsDatabase.pIsrLock) + portSyncSpinlockDestroy(tlsDatabase.pIsrLock); + mapDestroy(&tlsDatabase.isrEntries); +} +static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry) +{ + portSyncSpinlockAcquire(tlsDatabase.pIsrLock); + mapInsertExisting(&tlsDatabase.isrEntries, pThreadEntry->key.sp, pThreadEntry); + portSyncSpinlockRelease(tlsDatabase.pIsrLock); +} +static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp) +{ + ThreadEntry *pThreadEntry; + portSyncSpinlockAcquire(tlsDatabase.pIsrLock); + pThreadEntry = mapFind(&tlsDatabase.isrEntries, sp); + mapRemove(&tlsDatabase.isrEntries, pThreadEntry); + portSyncSpinlockRelease(tlsDatabase.pIsrLock); + return pThreadEntry; +} +static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp) +{ + ThreadEntry *pThreadEntry; + portSyncSpinlockAcquire(tlsDatabase.pIsrLock); +#if STACK_GROWS_DOWNWARD + pThreadEntry = mapFindGEQ(&tlsDatabase.isrEntries, approxSp); +#else + pThreadEntry = mapFindLEQ(&tlsDatabase.isrEntries, approxSp); +#endif + portSyncSpinlockRelease(tlsDatabase.pIsrLock); + return pThreadEntry; +} + +#else // Lockless + +static NV_STATUS _tlsIsrEntriesInit() +{ + portMemSet(tlsDatabase.isrEntries, 0, sizeof(tlsDatabase.isrEntries)); + return NV_OK; +} +static void _tlsIsrEntriesDestroy() +{ + portMemSet(tlsDatabase.isrEntries, 0, sizeof(tlsDatabase.isrEntries)); +} +static void _tlsIsrEntriesInsert(ThreadEntry *pThreadEntry) +{ + NvU32 i = 0; + + while (!portAtomicExCompareAndSwapU64(&tlsDatabase.isrEntries[i].sp, + pThreadEntry->key.sp, 0)) + { + i = (i + 1) % TLS_MAX_ISRS; + } + tlsDatabase.isrEntries[i].pThreadEntry = pThreadEntry; +} +static ThreadEntry *_tlsIsrEntriesRemove(NvU64 sp) +{ + ThreadEntry *pThreadEntry; + NvU32 i = 0; + + while (tlsDatabase.isrEntries[i].sp != sp) + { + i++; + } + pThreadEntry = tlsDatabase.isrEntries[i].pThreadEntry; + portAtomicExSetU64(&tlsDatabase.isrEntries[i].sp, 0); + + return pThreadEntry; +} +static ThreadEntry *_tlsIsrEntriesFind(NvU64 approxSp) +{ + NvU32 i; + NvU32 closestIdx = ~0x0; + NvU64 closestSp = STACK_GROWS_DOWNWARD ? ~0ULL : 0; + + for (i = 0; i < TLS_MAX_ISRS; i++) + { + NvU64 sp = tlsDatabase.isrEntries[i].sp; +#if STACK_GROWS_DOWNWARD + if (sp != 0 && sp >= approxSp && sp < closestSp) +#else + if (sp != 0 && sp <= approxSp && sp > closestSp) +#endif + { + closestSp = sp; + closestIdx = i; + } + } + NV_ASSERT_OR_RETURN(closestIdx != ~0x0, NULL); + return tlsDatabase.isrEntries[closestIdx].pThreadEntry; +} + +#endif // TLS_ISR_CAN_USE_LOCK + + + +static NvBool _tlsIsIsr() +{ +#if defined (TLS_ISR_UNIT_TEST) + // In unit tests we simulate ISR tests in different ways, so tests define this + extern NvBool tlsTestIsIsr(void); + return tlsTestIsIsr(); +#elif TLS_THREADS_CAN_RAISE_IRQL + NvU64 preempt = portSyncExDisablePreemption(); + NvBool bIsIsr = (tlsDatabase.isrCount[osGetCurrentProcessorNumber()] > 0); + portSyncExRestorePreemption(preempt); + return bIsIsr; +#else // Usermode and most kernelmode platforms + return portUtilIsInterruptContext(); +#endif // TLS_ISR_UNIT_TEST +} diff --git a/src/nvidia/src/libraries/utils/nvassert.c b/src/nvidia/src/libraries/utils/nvassert.c new file mode 100644 index 000000000..69e92a4c5 --- /dev/null +++ b/src/nvidia/src/libraries/utils/nvassert.c @@ -0,0 +1,416 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +/** + * @file + * @brief UTIL module implementation implements helpter functions for + * + */ + +#include "nvport/nvport.h" +#include "utils/nvassert.h" + +#if defined(NVRM) && !defined(NVWATCH) +#include "containers/map.h" +#include "os/os.h" +#include "nvrm_registry.h" +#include "rmconfig.h" +#elif !defined(RMCFG_FEATURE_ENABLED) +#define RMCFG_FEATURE_x 0 +#endif + +#if NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE + +// Hook NV_ASSERT into RCDB. +#if NV_JOURNAL_ASSERT_ENABLE +void rcdbRmAssert(NvU32 lineNum, NvU64 ip); +void rcdbRmAssertStatus(NvU32 status, NvU32 lineNum, NvU64 ip); +#define NV_JOURNAL_ASSERT_FAILURE(lineNum, ip) rcdbRmAssert(lineNum, (NvU64)(ip)) +#define NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status) rcdbRmAssertStatus((status), lineNum, (NvU64)(ip)) +#else +#define NV_JOURNAL_ASSERT_FAILURE(lineNum, ip) ((void)0) +#define NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status) ((void)0) +#endif /* NV_JOURNAL_ASSERT_ENABLE*/ + +#if defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +#if NV_JOURNAL_ASSERT_ENABLE +/* + * Helper function for NV_ASSERT_FAILED + */ +void +nvAssertFailed(void) +{ + NV_JOURNAL_ASSERT_FAILURE(NV_RM_ASSERT_UNKNOWN_LINE_NUM, portUtilGetReturnAddress()); +} + +void +nvAssertOkFailed(NvU32 status) +{ + NV_JOURNAL_ASSERT_FAILURE_STATUS(NV_RM_ASSERT_UNKNOWN_LINE_NUM, portUtilGetReturnAddress(), status); +} +#endif + +#else //defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) + +#if NV_ASSERT_FAILED_USES_STRINGS +#define NV_ASSERT_FAILED_PRINTF_FMT "%s @ %s:%d\n" +#define NV_ASSERT_FAILED_PRINTF_PARAM pszExpr, trimFN(pszFileName), lineNum +#else +#define NV_ASSERT_FAILED_PRINTF_FMT "0x%016llx\n" +#define NV_ASSERT_FAILED_PRINTF_PARAM ip +#endif + +#define NV_ASSERT_PRINTF(level, fmt, ...) NV_PRINTF_STRING \ + (NV_PRINTF_MODULE, level, NV_PRINTF_ADD_PREFIX(fmt), ##__VA_ARGS__) + +#define PATH_SEP '/' + +/* + * Trim path from source filename. + */ +#if NV_ASSERT_FAILED_USES_STRINGS +static const char *trimFN(const char *pszFileName) +{ + NvLength i; + + for (i = 0; pszFileName[i] != 0; i++) + ; + + for (; i > 0; i--) + { + if (pszFileName[i] == PATH_SEP) + return &pszFileName[i + 1]; + } + + return pszFileName; +} +#endif + +/* + * Helper function for NV_ASSERT_FAILED + */ +void +nvAssertFailed +( + NV_ASSERT_FAILED_FUNC_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, "Assertion failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed @ 0x%016x", ip); + NV_JOURNAL_ASSERT_FAILURE(lineNum, ip); +} + +/* + * Helper functions for NV_ASSERT_OK_FAILED + */ +void +nvAssertOkFailed +( + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, + "Assertion failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvAssertStatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(LEVEL_ERROR, "Assertion failed: 0x%08X returned from 0x%016llx", + status, ip); + NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status); +} + +/* + * Helper function for NV_CHECK_FAILED + */ +void +nvCheckFailed +( + NvU32 level + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, "Check failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(level, "Check failed @ 0x%016llx", ip); +} + +/* + * Helper function for NV_CHECK_OK_FAILED + */ +void +nvCheckOkFailed +( + NvU32 level, + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, + "Check failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvAssertStatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); + NV_ASSERT_LOG(level, "Check failed: 0x%08X returned from 0x%016llx", status, ip); +} + +/* + * Helper function for NV_ASSERT_FAILED + */ +void +nvAssertFailedNoLog +( + NV_ASSERT_FAILED_FUNC_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, "Assertion failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); + NV_JOURNAL_ASSERT_FAILURE(lineNum, ip); +} + +/* + * Helper function for NV_ASSERT_OK_FAILED + */ +void +nvAssertOkFailedNoLog +( + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(LEVEL_ERROR, + "Assertion failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvAssertStatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); + NV_JOURNAL_ASSERT_FAILURE_STATUS(lineNum, ip, status); +} + +/* + * Helper function for NV_CHECK_FAILED + */ +void +nvCheckFailedNoLog +( + NvU32 level + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, "Check failed: " NV_ASSERT_FAILED_PRINTF_FMT, + NV_ASSERT_FAILED_PRINTF_PARAM); +} + +/* + * Helper function for NV_CHECK_OK_FAILED + */ +void +nvCheckOkFailedNoLog +( + NvU32 level, + NvU32 status + NV_ASSERT_FAILED_FUNC_COMMA_TYPE +) +{ + NvU64 ip = portUtilGetReturnAddress(); + PORT_UNREFERENCED_VARIABLE(ip); + + NV_ASSERT_PRINTF(level, + "Check failed: %s (0x%08X) returned from " NV_ASSERT_FAILED_PRINTF_FMT, + nvAssertStatusToString(status), status, NV_ASSERT_FAILED_PRINTF_PARAM); +} + +#endif // defined(GSP_PLUGIN_BUILD) || (defined(NVRM) && NVCPU_IS_RISCV64) +#endif // NV_PRINTF_ENABLED || NV_JOURNAL_ASSERT_ENABLE + +/* + * Temporarily duplicate the nvstatusToString code to nvAssertStatusToString. + * + * Ideally, nvassert.c and nvstatus.c should both be included in shared.nvmk. + * But nvstatus.c is already directly included in projects from multiple module + * branches that also include shared.nvmk. It is going to take some serious + * cross-module magic to move it. + */ + +#if !defined(NV_PRINTF_STRING_SECTION) +#if defined(NVRM) && NVCPU_IS_RISCV64 +#define NV_PRINTF_STRING_SECTION __attribute__ ((section (".logging"))) +#else // defined(NVRM) && NVCPU_IS_RISCV64 +#define NV_PRINTF_STRING_SECTION +#endif // defined(NVRM) && NVCPU_IS_RISCV64 +#endif // !defined(NV_PRINTF_STRING_SECTION) + +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) static NV_PRINTF_STRING_SECTION \ + const char rm_pvt_##name##_str[] = string " [" #name "]"; +#include "nvstatuscodes.h" + +#undef NV_STATUS_CODE +#undef SDK_NVSTATUSCODES_H +#define NV_STATUS_CODE( name, code, string ) { name, rm_pvt_##name##_str }, +static struct NvStatusCodeString +{ + NV_STATUS statusCode; + const char *statusString; +} g_StatusCodeList[] = { + #include "nvstatuscodes.h" + { 0xffffffff, "Unknown error code!" } // Some compilers don't like the trailing ',' +}; +#undef NV_STATUS_CODE + +/*! + * @brief Given an NV_STATUS code, returns the corresponding status string. + * + * @param[in] nvStatusIn NV_STATUS code for which the string is required + * + * @returns Corresponding status string from the nvstatuscodes.h + * + * TODO: Bug 200025711: convert this to an array-indexed lookup, instead of a linear search + * +*/ +const char *nvAssertStatusToString(NV_STATUS nvStatusIn) +{ + static NV_PRINTF_STRING_SECTION const char rm_pvt_UNKNOWN_str[] = "Unknown error code!"; + NvU32 i; + NvU32 n = ((NvU32)(sizeof(g_StatusCodeList))/(NvU32)(sizeof(g_StatusCodeList[0]))); + for (i = 0; i < n; i++) + { + if (g_StatusCodeList[i].statusCode == nvStatusIn) + { + return g_StatusCodeList[i].statusString; + } + } + + return rm_pvt_UNKNOWN_str; +} + +#if defined(NV_ASSERT_FAILED_BACKTRACE) +MAKE_MAP(AssertedIPMap, NvU8); + +static struct +{ + AssertedIPMap map; + NvU32 mode; + PORT_MUTEX *mtx; + NvBool init; + OS_THREAD_HANDLE tid; +} osAssertInternal; + +void nvAssertInit(void) +{ + if (osAssertInternal.init) + return; + + osAssertInternal.mode = NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE; + + // Map is not thread-safe and osAssertFailed can be called concurrently. + osReadRegistryDword(NULL, NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE, &osAssertInternal.mode); + if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE) + { + osAssertInternal.mtx = portSyncMutexCreate(portMemAllocatorGetGlobalNonPaged()); + if (!osAssertInternal.mtx) + { + osAssertInternal.mode = NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_DISABLE; + } + else + { + mapInit(&osAssertInternal.map, portMemAllocatorGetGlobalNonPaged()); + } + } + osAssertInternal.init = NV_TRUE; +} + +static void nvAssertFailedBacktrace(NvU64 ip) +{ + if (!osAssertInternal.init) + return; + + if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE) + { + OS_THREAD_HANDLE tid; + if (osGetCurrentThread(&tid) != NV_OK) + return; + + // nvport mutex is not reentrant and will deadlock with nested locking. + // If the next condition holds, we're in a reentrant call. + if (tid == osAssertInternal.tid) + return; + + portSyncMutexAcquire(osAssertInternal.mtx); + osAssertInternal.tid = tid; + + if (!mapFind(&osAssertInternal.map, ip)) + { + // If we're out of memory, do not dump anything to avoid spam + if (mapInsertNew(&osAssertInternal.map, ip)) + osAssertFailed(); + } + + osAssertInternal.tid = 0; + portSyncMutexRelease(osAssertInternal.mtx); + } + else if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_ENABLE) + osAssertFailed(); +} + +void nvAssertDestroy(void) +{ + if (!osAssertInternal.init) + return; + + if (osAssertInternal.mode == NV_REG_STR_RM_PRINT_ASSERT_BACKTRACE_UNIQUE && osAssertInternal.mtx) + { + portSyncMutexDestroy(osAssertInternal.mtx); + mapDestroy(&osAssertInternal.map); + } + osAssertInternal.init = 0; +} + +#elif defined(NVRM) && !defined(NVWATCH) // ignore in nvlog_decoder/nvwatch build + +// We do not expose NV_ASSERT_FAILED_BACKTRACE outside this file. The callers will use these stubs. +void nvAssertInit(void) +{ +} + +void nvAssertDestroy(void) +{ +} +#endif /* defined(NV_ASSERT_FAILED_BACKTRACE) */ diff --git a/src/nvidia/srcs.mk b/src/nvidia/srcs.mk new file mode 100644 index 000000000..26f646f51 --- /dev/null +++ b/src/nvidia/srcs.mk @@ -0,0 +1,651 @@ +SRCS ?= +SRCS_CXX ?= + +SRCS += generated/g_access_cntr_buffer_nvoc.c +SRCS += generated/g_all_dcl_pb.c +SRCS += generated/g_binary_api_nvoc.c +SRCS += generated/g_bindata.c +SRCS += generated/g_channel_descendant_nvoc.c +SRCS += generated/g_chips2halspec_nvoc.c +SRCS += generated/g_chipset_nvoc.c +SRCS += generated/g_client_nvoc.c +SRCS += generated/g_client_resource_nvoc.c +SRCS += generated/g_compute_instance_subscription_nvoc.c +SRCS += generated/g_console_mem_nvoc.c +SRCS += generated/g_context_dma_nvoc.c +SRCS += generated/g_dbgbuffer_nvoc.c +SRCS += generated/g_deferred_api_nvoc.c +SRCS += generated/g_device_nvoc.c +SRCS += generated/g_disp_capabilities_nvoc.c +SRCS += generated/g_disp_channel_nvoc.c +SRCS += generated/g_disp_inst_mem_nvoc.c +SRCS += generated/g_disp_objs_nvoc.c +SRCS += generated/g_disp_sf_user_nvoc.c +SRCS += generated/g_dispsw_nvoc.c +SRCS += generated/g_eng_state_nvoc.c +SRCS += generated/g_engines_pb.c +SRCS += generated/g_event_buffer_nvoc.c +SRCS += generated/g_event_nvoc.c +SRCS += generated/g_fabric_nvoc.c +SRCS += generated/g_fabric_vaspace_nvoc.c +SRCS += generated/g_fbsr_nvoc.c +SRCS += generated/g_fla_mem_nvoc.c +SRCS += generated/g_fm_session_api_nvoc.c +SRCS += generated/g_generic_engine_nvoc.c +SRCS += generated/g_gpu_acct_nvoc.c +SRCS += generated/g_gpu_boost_mgr_nvoc.c +SRCS += generated/g_gpu_class_list.c +SRCS += generated/g_gpu_db_nvoc.c +SRCS += generated/g_gpu_group_nvoc.c +SRCS += generated/g_gpu_halspec_nvoc.c +SRCS += generated/g_gpu_instance_subscription_nvoc.c +SRCS += generated/g_gpu_mgmt_api_nvoc.c +SRCS += generated/g_gpu_mgr_nvoc.c +SRCS += generated/g_gpu_nvoc.c +SRCS += generated/g_gpu_resource_nvoc.c +SRCS += generated/g_gpu_vaspace_nvoc.c +SRCS += generated/g_gr_pb.c +SRCS += generated/g_hal_mgr_nvoc.c +SRCS += generated/g_hal_nvoc.c +SRCS += generated/g_hda_codec_api_nvoc.c +SRCS += generated/g_heap_nvoc.c +SRCS += generated/g_host_eng_nvoc.c +SRCS += generated/g_hw_resources_nvoc.c +SRCS += generated/g_i2c_api_nvoc.c +SRCS += generated/g_intr_nvoc.c +SRCS += generated/g_intr_service_nvoc.c +SRCS += generated/g_intrable_nvoc.c +SRCS += generated/g_io_vaspace_nvoc.c +SRCS += generated/g_journal_nvoc.c +SRCS += generated/g_journal_pb.c +SRCS += generated/g_kern_bus_nvoc.c +SRCS += generated/g_kern_disp_nvoc.c +SRCS += generated/g_kern_gmmu_nvoc.c +SRCS += generated/g_kern_mem_sys_nvoc.c +SRCS += generated/g_kern_perf_nvoc.c +SRCS += generated/g_kern_perfbuffer_nvoc.c +SRCS += generated/g_kern_pmu_nvoc.c +SRCS += generated/g_kernel_bif_nvoc.c +SRCS += generated/g_kernel_ce_context_nvoc.c +SRCS += generated/g_kernel_ce_nvoc.c +SRCS += generated/g_kernel_channel_group_api_nvoc.c +SRCS += generated/g_kernel_channel_group_nvoc.c +SRCS += generated/g_kernel_channel_nvoc.c +SRCS += generated/g_kernel_ctxshare_nvoc.c +SRCS += generated/g_kernel_falcon_nvoc.c +SRCS += generated/g_kernel_fifo_nvoc.c +SRCS += generated/g_kernel_graphics_context_nvoc.c +SRCS += generated/g_kernel_graphics_manager_nvoc.c +SRCS += generated/g_kernel_graphics_nvoc.c +SRCS += generated/g_kernel_graphics_object_nvoc.c +SRCS += generated/g_kernel_gsp_nvoc.c +SRCS += generated/g_kernel_head_nvoc.c +SRCS += generated/g_kernel_ioctrl_nvoc.c +SRCS += generated/g_kernel_mc_nvoc.c +SRCS += generated/g_kernel_mig_manager_nvoc.c +SRCS += generated/g_kernel_nvdec_ctx_nvoc.c +SRCS += generated/g_kernel_nvdec_nvoc.c +SRCS += generated/g_kernel_nvenc_ctx_nvoc.c +SRCS += generated/g_kernel_nvjpg_ctx_nvoc.c +SRCS += generated/g_kernel_nvlink_nvoc.c +SRCS += generated/g_kernel_ofa_ctx_nvoc.c +SRCS += generated/g_kernel_rc_nvoc.c +SRCS += generated/g_kernel_sched_mgr_nvoc.c +SRCS += generated/g_kernel_sec2_nvoc.c +SRCS += generated/g_kernel_sm_debugger_session_nvoc.c +SRCS += generated/g_mem_fabric_nvoc.c +SRCS += generated/g_mem_mgr_nvoc.c +SRCS += generated/g_mem_nvoc.c +SRCS += generated/g_mig_config_session_nvoc.c +SRCS += generated/g_mig_monitor_session_nvoc.c +SRCS += generated/g_mmu_fault_buffer_nvoc.c +SRCS += generated/g_mps_api_nvoc.c +SRCS += generated/g_no_device_mem_nvoc.c +SRCS += generated/g_nv_debug_dump_nvoc.c +SRCS += generated/g_nvdebug_pb.c +SRCS += generated/g_object_nvoc.c +SRCS += generated/g_objgpumon_nvoc.c +SRCS += generated/g_objsweng_nvoc.c +SRCS += generated/g_objtmr_nvoc.c +SRCS += generated/g_os_desc_mem_nvoc.c +SRCS += generated/g_os_iom.c +SRCS += generated/g_p2p_api_nvoc.c +SRCS += generated/g_phys_mem_nvoc.c +SRCS += generated/g_platform_nvoc.c +SRCS += generated/g_prereq_tracker_nvoc.c +SRCS += generated/g_profiler_v1_nvoc.c +SRCS += generated/g_profiler_v2_nvoc.c +SRCS += generated/g_rc_pb.c +SRCS += generated/g_ref_count_nvoc.c +SRCS += generated/g_reg_mem_nvoc.c +SRCS += generated/g_regs_pb.c +SRCS += generated/g_resource_nvoc.c +SRCS += generated/g_rg_line_callback_nvoc.c +SRCS += generated/g_rmconfig_util.c +SRCS += generated/g_rpc_iom.c +SRCS += generated/g_rs_client_nvoc.c +SRCS += generated/g_rs_resource_nvoc.c +SRCS += generated/g_rs_server_nvoc.c +SRCS += generated/g_standard_mem_nvoc.c +SRCS += generated/g_subdevice_diag_nvoc.c +SRCS += generated/g_subdevice_nvoc.c +SRCS += generated/g_sw_test_nvoc.c +SRCS += generated/g_swintr_nvoc.c +SRCS += generated/g_syncgpuboost_nvoc.c +SRCS += generated/g_system_mem_nvoc.c +SRCS += generated/g_system_nvoc.c +SRCS += generated/g_third_party_p2p_nvoc.c +SRCS += generated/g_timed_sema_nvoc.c +SRCS += generated/g_tmr_nvoc.c +SRCS += generated/g_traceable_nvoc.c +SRCS += generated/g_usermode_api_nvoc.c +SRCS += generated/g_uvm_channel_retainer_nvoc.c +SRCS += generated/g_uvm_nvoc.c +SRCS += generated/g_uvm_sw_nvoc.c +SRCS += generated/g_vaspace_api_nvoc.c +SRCS += generated/g_vaspace_nvoc.c +SRCS += generated/g_vblank_callback_nvoc.c +SRCS += generated/g_video_mem_nvoc.c +SRCS += generated/g_virt_mem_allocator_nvoc.c +SRCS += generated/g_virt_mem_mgr_nvoc.c +SRCS += generated/g_virt_mem_range_nvoc.c +SRCS += generated/g_virtual_mem_nvoc.c +SRCS += generated/g_zbc_api_nvoc.c +SRCS += ../common/shared/msgq/msgq.c +SRCS += ../common/shared/nvstatus/nvstatus.c +SRCS += ../common/src/nvSha256.c +SRCS += ../common/nvlink/kernel/nvlink/core/nvlink_conn_mgmt.c +SRCS += ../common/nvlink/kernel/nvlink/core/nvlink_discovery.c +SRCS += ../common/nvlink/kernel/nvlink/core/nvlink_initialize.c +SRCS += ../common/nvlink/kernel/nvlink/core/nvlink_ioctl.c +SRCS += ../common/nvlink/kernel/nvlink/core/nvlink_link_mgmt.c +SRCS += ../common/nvlink/kernel/nvlink/core/nvlink_logger.c +SRCS += ../common/nvlink/kernel/nvlink/core/nvlink_shutdown.c +SRCS += ../common/nvlink/kernel/nvlink/core/nvlink_training.c +SRCS += ../common/nvlink/kernel/nvlink/interface/nvlink_ioctl_entry.c +SRCS += ../common/nvlink/kernel/nvlink/interface/nvlink_kern_discovery_entry.c +SRCS += ../common/nvlink/kernel/nvlink/interface/nvlink_kern_initialize_entry.c +SRCS += ../common/nvlink/kernel/nvlink/interface/nvlink_kern_link_mgmt_entry.c +SRCS += ../common/nvlink/kernel/nvlink/interface/nvlink_kern_registration_entry.c +SRCS += ../common/nvlink/kernel/nvlink/interface/nvlink_kern_shutdown_entry.c +SRCS += ../common/nvlink/kernel/nvlink/interface/nvlink_kern_training_entry.c +SRCS += ../common/nvlink/kernel/nvlink/nvlink_lib_mgmt.c +SRCS += ../common/nvlink/kernel/nvlink/nvlink_lock.c +SRCS += ../common/nvswitch/kernel/bios_nvswitch.c +SRCS += ../common/nvswitch/kernel/error_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/flcn_call_hal_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/flcn_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/flcnable_call_hal_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/flcnable_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/flcndmem_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/flcnqueue_dmem_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/flcnqueue_fb_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/flcnqueue_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/flcnqueuerd_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/flcnrtosdebug_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/v03/flcn0300_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/v04/flcn0400_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/v05/flcn0501_nvswitch.c +SRCS += ../common/nvswitch/kernel/flcn/v06/flcn0600_nvswitch.c +SRCS += ../common/nvswitch/kernel/inforom/ifrbbx_nvswitch.c +SRCS += ../common/nvswitch/kernel/inforom/ifrecc_nvswitch.c +SRCS += ../common/nvswitch/kernel/inforom/ifrnvlink_nvswitch.c +SRCS += ../common/nvswitch/kernel/inforom/ifroms_nvswitch.c +SRCS += ../common/nvswitch/kernel/inforom/ifrro_nvswitch.c +SRCS += ../common/nvswitch/kernel/inforom/inforom_nvswitch.c +SRCS += ../common/nvswitch/kernel/ipmi/fru_nvswitch.c +SRCS += ../common/nvswitch/kernel/lr10/clock_lr10.c +SRCS += ../common/nvswitch/kernel/lr10/discovery_lr10.c +SRCS += ../common/nvswitch/kernel/lr10/flcn_lr10.c +SRCS += ../common/nvswitch/kernel/lr10/inforom_lr10.c +SRCS += ../common/nvswitch/kernel/lr10/intr_lr10.c +SRCS += ../common/nvswitch/kernel/lr10/link_lr10.c +SRCS += ../common/nvswitch/kernel/lr10/lr10.c +SRCS += ../common/nvswitch/kernel/lr10/minion_lr10.c +SRCS += ../common/nvswitch/kernel/lr10/pmgr_lr10.c +SRCS += ../common/nvswitch/kernel/lr10/smbpbi_lr10.c +SRCS += ../common/nvswitch/kernel/lr10/soe_lr10.c +SRCS += ../common/nvswitch/kernel/lr10/therm_lr10.c +SRCS += ../common/nvswitch/kernel/nvswitch.c +SRCS += ../common/nvswitch/kernel/pmgr_nvswitch.c +SRCS += ../common/nvswitch/kernel/rom_nvswitch.c +SRCS += ../common/nvswitch/kernel/smbpbi_nvswitch.c +SRCS += ../common/nvswitch/kernel/soe/soe_call_hal_nvswitch.c +SRCS += ../common/nvswitch/kernel/soe/soe_nvswitch.c +SRCS += ../common/nvswitch/kernel/spi_nvswitch.c +SRCS += arch/nvalloc/unix/src/asm/x86/nv_cpuid.c +SRCS += arch/nvalloc/unix/src/asm/x86/nv_rdcr.c +SRCS += arch/nvalloc/unix/src/escape.c +SRCS += arch/nvalloc/unix/src/exports-stubs.c +SRCS += arch/nvalloc/unix/src/gcc_helper.c +SRCS += arch/nvalloc/unix/src/os-hypervisor-stubs.c +SRCS += arch/nvalloc/unix/src/os.c +SRCS += arch/nvalloc/unix/src/osapi.c +SRCS += arch/nvalloc/unix/src/osinit.c +SRCS += arch/nvalloc/unix/src/osmemdesc.c +SRCS += arch/nvalloc/unix/src/osnvlink.c +SRCS += arch/nvalloc/unix/src/osunix.c +SRCS += arch/nvalloc/unix/src/registry.c +SRCS += arch/nvalloc/unix/src/rm-gpu-ops.c +SRCS += arch/nvalloc/unix/src/rmobjexportimport.c +SRCS += arch/nvalloc/unix/src/subdevice_ctrl_os_unix.c +SRCS += arch/nvalloc/unix/src/unix_console.c +SRCS += arch/nvalloc/unix/src/unix_intr.c +SRCS += interface/deprecated/rmapi_deprecated_allocmemory.c +SRCS += interface/deprecated/rmapi_deprecated_control.c +SRCS += interface/deprecated/rmapi_deprecated_misc.c +SRCS += interface/deprecated/rmapi_deprecated_utils.c +SRCS += interface/deprecated/rmapi_deprecated_vidheapctrl.c +SRCS += interface/deprecated/rmapi_gss_legacy_control.c +SRCS += interface/rmapi/src/finn_rm_api.c +SRCS += kernel/nvd/nv/dbgbuffer.c +SRCS += kernel/nvd/nv/nvdctrl.c +SRCS += kernel/vgpu/nv/rpc.c +SRCS += src/kernel/compute/fabric.c +SRCS += src/kernel/compute/fm_session_api.c +SRCS += src/kernel/compute/mps_api.c +SRCS += src/kernel/core/bin_data.c +SRCS += src/kernel/core/hal/hal.c +SRCS += src/kernel/core/hal/hals_all.c +SRCS += src/kernel/core/hal/hals_stub.c +SRCS += src/kernel/core/hal/info_block.c +SRCS += src/kernel/core/hal_mgr.c +SRCS += src/kernel/core/locks.c +SRCS += src/kernel/core/locks_common.c +SRCS += src/kernel/core/system.c +SRCS += src/kernel/core/thread_state.c +SRCS += src/kernel/diagnostics/gpu_acct.c +SRCS += src/kernel/diagnostics/journal.c +SRCS += src/kernel/diagnostics/nv_debug_dump.c +SRCS += src/kernel/diagnostics/nvlog.c +SRCS += src/kernel/diagnostics/nvlog_printf.c +SRCS += src/kernel/diagnostics/profiler.c +SRCS += src/kernel/disp/disp_sw.c +SRCS += src/kernel/gpu/arch/ampere/kern_gpu_ga100.c +SRCS += src/kernel/gpu/arch/maxwell/kern_gpu_gm107.c +SRCS += src/kernel/gpu/arch/turing/kern_gpu_tu102.c +SRCS += src/kernel/gpu/audio/hda_codec_api.c +SRCS += src/kernel/gpu/bif/arch/ampere/kernel_bif_ga100.c +SRCS += src/kernel/gpu/bif/arch/maxwell/kernel_bif_gm107.c +SRCS += src/kernel/gpu/bif/arch/turing/kernel_bif_tu102.c +SRCS += src/kernel/gpu/bif/kernel_bif.c +SRCS += src/kernel/gpu/bus/arch/ampere/kern_bus_ga100.c +SRCS += src/kernel/gpu/bus/arch/maxwell/kern_bus_gm107.c +SRCS += src/kernel/gpu/bus/arch/maxwell/kern_bus_gm200.c +SRCS += src/kernel/gpu/bus/arch/pascal/kern_bus_gp100.c +SRCS += src/kernel/gpu/bus/arch/turing/kern_bus_tu102.c +SRCS += src/kernel/gpu/bus/arch/volta/kern_bus_gv100.c +SRCS += src/kernel/gpu/bus/kern_bus.c +SRCS += src/kernel/gpu/bus/kern_bus_ctrl.c +SRCS += src/kernel/gpu/bus/kern_bus_vbar2.c +SRCS += src/kernel/gpu/bus/p2p.c +SRCS += src/kernel/gpu/bus/p2p_api.c +SRCS += src/kernel/gpu/bus/third_party_p2p.c +SRCS += src/kernel/gpu/bus/third_party_p2p_ctrl.c +SRCS += src/kernel/gpu/ce/arch/ampere/kernel_ce_ga100.c +SRCS += src/kernel/gpu/ce/arch/ampere/kernel_ce_ga102.c +SRCS += src/kernel/gpu/ce/arch/pascal/kernel_ce_gp100.c +SRCS += src/kernel/gpu/ce/arch/turing/kernel_ce_tu102.c +SRCS += src/kernel/gpu/ce/arch/volta/kernel_ce_gv100.c +SRCS += src/kernel/gpu/ce/kernel_ce.c +SRCS += src/kernel/gpu/ce/kernel_ce_context.c +SRCS += src/kernel/gpu/ce/kernel_ce_ctrl.c +SRCS += src/kernel/gpu/ce/kernel_ce_shared.c +SRCS += src/kernel/gpu/deferred_api.c +SRCS += src/kernel/gpu/device.c +SRCS += src/kernel/gpu/device_ctrl.c +SRCS += src/kernel/gpu/device_share.c +SRCS += src/kernel/gpu/disp/arch/v03/kern_disp_0300.c +SRCS += src/kernel/gpu/disp/arch/v04/kern_disp_0400.c +SRCS += src/kernel/gpu/disp/arch/v04/kernel_head_gpu.c +SRCS += src/kernel/gpu/disp/disp_capabilities.c +SRCS += src/kernel/gpu/disp/disp_channel.c +SRCS += src/kernel/gpu/disp/disp_common_kern_ctrl_minimal.c +SRCS += src/kernel/gpu/disp/disp_object_kern_ctrl_minimal.c +SRCS += src/kernel/gpu/disp/disp_objs.c +SRCS += src/kernel/gpu/disp/disp_sf_user.c +SRCS += src/kernel/gpu/disp/head/kernel_head.c +SRCS += src/kernel/gpu/disp/inst_mem/arch/v03/disp_inst_mem_0300.c +SRCS += src/kernel/gpu/disp/inst_mem/disp_inst_mem.c +SRCS += src/kernel/gpu/disp/kern_disp.c +SRCS += src/kernel/gpu/disp/rg_line_callback/rg_line_callback.c +SRCS += src/kernel/gpu/disp/vblank_callback/vblank.c +SRCS += src/kernel/gpu/disp/vblank_callback/vblank_callback.c +SRCS += src/kernel/gpu/eng_state.c +SRCS += src/kernel/gpu/falcon/arch/ampere/kernel_falcon_ga100.c +SRCS += src/kernel/gpu/falcon/arch/ampere/kernel_falcon_ga102.c +SRCS += src/kernel/gpu/falcon/arch/turing/kernel_falcon_tu102.c +SRCS += src/kernel/gpu/falcon/kernel_falcon.c +SRCS += src/kernel/gpu/fifo/arch/ampere/kernel_channel_ga100.c +SRCS += src/kernel/gpu/fifo/arch/ampere/kernel_fifo_ga100.c +SRCS += src/kernel/gpu/fifo/arch/ampere/kernel_fifo_ga102.c +SRCS += src/kernel/gpu/fifo/arch/maxwell/kernel_channel_gm107.c +SRCS += src/kernel/gpu/fifo/arch/maxwell/kernel_channel_group_gm107.c +SRCS += src/kernel/gpu/fifo/arch/maxwell/kernel_fifo_gm107.c +SRCS += src/kernel/gpu/fifo/arch/pascal/kernel_fifo_gp102.c +SRCS += src/kernel/gpu/fifo/arch/turing/kernel_fifo_tu102.c +SRCS += src/kernel/gpu/fifo/arch/volta/kernel_channel_group_gv100.c +SRCS += src/kernel/gpu/fifo/arch/volta/kernel_channel_gv100.c +SRCS += src/kernel/gpu/fifo/arch/volta/kernel_fifo_gv100.c +SRCS += src/kernel/gpu/fifo/arch/volta/usermode_api_gv100.c +SRCS += src/kernel/gpu/fifo/channel_descendant.c +SRCS += src/kernel/gpu/fifo/kernel_channel.c +SRCS += src/kernel/gpu/fifo/kernel_channel_group.c +SRCS += src/kernel/gpu/fifo/kernel_channel_group_api.c +SRCS += src/kernel/gpu/fifo/kernel_ctxshare.c +SRCS += src/kernel/gpu/fifo/kernel_fifo.c +SRCS += src/kernel/gpu/fifo/kernel_fifo_ctrl.c +SRCS += src/kernel/gpu/fifo/kernel_fifo_init.c +SRCS += src/kernel/gpu/fifo/kernel_idle_channels.c +SRCS += src/kernel/gpu/fifo/kernel_sched_mgr.c +SRCS += src/kernel/gpu/fifo/usermode_api.c +SRCS += src/kernel/gpu/fifo/uvm_channel_retainer.c +SRCS += src/kernel/gpu/gpu.c +SRCS += src/kernel/gpu/gpu_access.c +SRCS += src/kernel/gpu/gpu_device_mapping.c +SRCS += src/kernel/gpu/gpu_gspclient.c +SRCS += src/kernel/gpu/gpu_name_kernel.c +SRCS += src/kernel/gpu/gpu_protobuf.c +SRCS += src/kernel/gpu/gpu_register_access_map.c +SRCS += src/kernel/gpu/gpu_registry.c +SRCS += src/kernel/gpu/gpu_resource.c +SRCS += src/kernel/gpu/gpu_resource_desc.c +SRCS += src/kernel/gpu/gpu_rmapi.c +SRCS += src/kernel/gpu/gpu_timeout.c +SRCS += src/kernel/gpu/gpu_uuid.c +SRCS += src/kernel/gpu/gr/arch/maxwell/kgraphics_gm200.c +SRCS += src/kernel/gpu/gr/arch/pascal/kgraphics_gp100.c +SRCS += src/kernel/gpu/gr/arch/turing/kgraphics_tu102.c +SRCS += src/kernel/gpu/gr/fecs_event_list.c +SRCS += src/kernel/gpu/gr/kernel_graphics.c +SRCS += src/kernel/gpu/gr/kernel_graphics_context.c +SRCS += src/kernel/gpu/gr/kernel_graphics_manager.c +SRCS += src/kernel/gpu/gr/kernel_graphics_object.c +SRCS += src/kernel/gpu/gr/kernel_sm_debugger_session.c +SRCS += src/kernel/gpu/gr/kernel_sm_debugger_session_ctrl.c +SRCS += src/kernel/gpu/gsp/arch/ampere/kernel_gsp_falcon_ga102.c +SRCS += src/kernel/gpu/gsp/arch/ampere/kernel_gsp_ga100.c +SRCS += src/kernel/gpu/gsp/arch/ampere/kernel_gsp_ga102.c +SRCS += src/kernel/gpu/gsp/arch/turing/kernel_gsp_booter_tu102.c +SRCS += src/kernel/gpu/gsp/arch/turing/kernel_gsp_falcon_tu102.c +SRCS += src/kernel/gpu/gsp/arch/turing/kernel_gsp_frts_tu102.c +SRCS += src/kernel/gpu/gsp/arch/turing/kernel_gsp_tu102.c +SRCS += src/kernel/gpu/gsp/arch/turing/kernel_gsp_vbios_tu102.c +SRCS += src/kernel/gpu/gsp/kernel_gsp.c +SRCS += src/kernel/gpu/gsp/kernel_gsp_booter.c +SRCS += src/kernel/gpu/gsp/kernel_gsp_fwsec.c +SRCS += src/kernel/gpu/gsp/message_queue_cpu.c +SRCS += src/kernel/gpu/host_eng/host_eng.c +SRCS += src/kernel/gpu/hwpm/profiler_v1/kern_profiler_v1.c +SRCS += src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2.c +SRCS += src/kernel/gpu/hwpm/profiler_v2/kern_profiler_v2_ctrl.c +SRCS += src/kernel/gpu/i2c/i2c_api.c +SRCS += src/kernel/gpu/intr/arch/ampere/intr_cpu_ga102.c +SRCS += src/kernel/gpu/intr/arch/ampere/intr_ga100.c +SRCS += src/kernel/gpu/intr/arch/maxwell/intr_gm107.c +SRCS += src/kernel/gpu/intr/arch/pascal/intr_gp100.c +SRCS += src/kernel/gpu/intr/arch/turing/intr_cpu_tu102.c +SRCS += src/kernel/gpu/intr/arch/turing/intr_nonstall_tu102.c +SRCS += src/kernel/gpu/intr/arch/turing/intr_swintr_tu102.c +SRCS += src/kernel/gpu/intr/arch/turing/intr_tu102.c +SRCS += src/kernel/gpu/intr/intr.c +SRCS += src/kernel/gpu/intr/intr_service.c +SRCS += src/kernel/gpu/intr/swintr.c +SRCS += src/kernel/gpu/intrable/intrable.c +SRCS += src/kernel/gpu/mc/arch/ampere/kernel_mc_ga100.c +SRCS += src/kernel/gpu/mc/arch/maxwell/kernel_mc_gm107.c +SRCS += src/kernel/gpu/mc/kernel_mc.c +SRCS += src/kernel/gpu/mem_mgr/arch/ampere/fbsr_ga100.c +SRCS += src/kernel/gpu/mem_mgr/arch/ampere/mem_mgr_ga100.c +SRCS += src/kernel/gpu/mem_mgr/arch/ampere/mem_mgr_ga102.c +SRCS += src/kernel/gpu/mem_mgr/arch/maxwell/fbsr_gm107.c +SRCS += src/kernel/gpu/mem_mgr/arch/maxwell/mem_mgr_gm107.c +SRCS += src/kernel/gpu/mem_mgr/arch/maxwell/mem_mgr_gm200.c +SRCS += src/kernel/gpu/mem_mgr/arch/maxwell/mem_utils_gm107.c +SRCS += src/kernel/gpu/mem_mgr/arch/maxwell/virt_mem_allocator_gm107.c +SRCS += src/kernel/gpu/mem_mgr/arch/pascal/mem_mgr_gp100.c +SRCS += src/kernel/gpu/mem_mgr/arch/pascal/mem_mgr_scrub_gp100.c +SRCS += src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102.c +SRCS += src/kernel/gpu/mem_mgr/arch/turing/mem_mgr_tu102_base.c +SRCS += src/kernel/gpu/mem_mgr/arch/volta/mem_mgr_gv100.c +SRCS += src/kernel/gpu/mem_mgr/context_dma.c +SRCS += src/kernel/gpu/mem_mgr/dma.c +SRCS += src/kernel/gpu/mem_mgr/fbsr.c +SRCS += src/kernel/gpu/mem_mgr/heap.c +SRCS += src/kernel/gpu/mem_mgr/mem_ctrl.c +SRCS += src/kernel/gpu/mem_mgr/mem_desc.c +SRCS += src/kernel/gpu/mem_mgr/mem_mgr.c +SRCS += src/kernel/gpu/mem_mgr/mem_mgr_ctrl.c +SRCS += src/kernel/gpu/mem_mgr/mem_mgr_gsp_client.c +SRCS += src/kernel/gpu/mem_mgr/mem_mgr_regions.c +SRCS += src/kernel/gpu/mem_mgr/mem_scrub.c +SRCS += src/kernel/gpu/mem_mgr/mem_utils.c +SRCS += src/kernel/gpu/mem_mgr/method_notification.c +SRCS += src/kernel/gpu/mem_mgr/objheap.c +SRCS += src/kernel/gpu/mem_mgr/phys_mem_allocator/addrtree.c +SRCS += src/kernel/gpu/mem_mgr/phys_mem_allocator/numa.c +SRCS += src/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator.c +SRCS += src/kernel/gpu/mem_mgr/phys_mem_allocator/phys_mem_allocator_util.c +SRCS += src/kernel/gpu/mem_mgr/phys_mem_allocator/regmap.c +SRCS += src/kernel/gpu/mem_mgr/vaspace_api.c +SRCS += src/kernel/gpu/mem_mgr/virt_mem_allocator.c +SRCS += src/kernel/gpu/mem_mgr/virt_mem_allocator_vgpu.c +SRCS += src/kernel/gpu/mem_sys/arch/ampere/kern_mem_sys_ga100.c +SRCS += src/kernel/gpu/mem_sys/arch/ampere/kern_mem_sys_ga102.c +SRCS += src/kernel/gpu/mem_sys/arch/maxwell/kern_mem_sys_gm107.c +SRCS += src/kernel/gpu/mem_sys/arch/maxwell/kern_mem_sys_gm200.c +SRCS += src/kernel/gpu/mem_sys/arch/pascal/kern_mem_sys_gp102.c +SRCS += src/kernel/gpu/mem_sys/arch/turing/kern_mem_sys_tu102.c +SRCS += src/kernel/gpu/mem_sys/arch/volta/kern_mem_sys_gv100.c +SRCS += src/kernel/gpu/mem_sys/kern_mem_sys.c +SRCS += src/kernel/gpu/mem_sys/kern_mem_sys_ctrl.c +SRCS += src/kernel/gpu/mig_mgr/arch/ampere/kmigmgr_ga100.c +SRCS += src/kernel/gpu/mig_mgr/compute_instance_subscription.c +SRCS += src/kernel/gpu/mig_mgr/gpu_instance_subscription.c +SRCS += src/kernel/gpu/mig_mgr/kernel_mig_manager.c +SRCS += src/kernel/gpu/mig_mgr/mig_config_session.c +SRCS += src/kernel/gpu/mig_mgr/mig_monitor_session.c +SRCS += src/kernel/gpu/mmu/arch/ampere/kern_gmmu_fmt_ga10x.c +SRCS += src/kernel/gpu/mmu/arch/ampere/kern_gmmu_ga100.c +SRCS += src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_fmt_gm10x.c +SRCS += src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_fmt_gm20x.c +SRCS += src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_gm107.c +SRCS += src/kernel/gpu/mmu/arch/maxwell/kern_gmmu_gm200.c +SRCS += src/kernel/gpu/mmu/arch/pascal/kern_gmmu_fmt_gp10x.c +SRCS += src/kernel/gpu/mmu/arch/pascal/kern_gmmu_gp100.c +SRCS += src/kernel/gpu/mmu/arch/turing/kern_gmmu_fmt_tu10x.c +SRCS += src/kernel/gpu/mmu/arch/turing/kern_gmmu_tu102.c +SRCS += src/kernel/gpu/mmu/arch/volta/kern_gmmu_gv100.c +SRCS += src/kernel/gpu/mmu/bar2_walk.c +SRCS += src/kernel/gpu/mmu/fault_buffer_ctrl.c +SRCS += src/kernel/gpu/mmu/gmmu_trace.c +SRCS += src/kernel/gpu/mmu/gmmu_walk.c +SRCS += src/kernel/gpu/mmu/kern_gmmu.c +SRCS += src/kernel/gpu/mmu/mmu_fault_buffer.c +SRCS += src/kernel/gpu/mmu/mmu_fault_buffer_ctrl.c +SRCS += src/kernel/gpu/mmu/mmu_trace.c +SRCS += src/kernel/gpu/mmu/uvm_sw.c +SRCS += src/kernel/gpu/nvdec/arch/ampere/kernel_nvdec_ga100.c +SRCS += src/kernel/gpu/nvdec/arch/ampere/kernel_nvdec_ga102.c +SRCS += src/kernel/gpu/nvdec/arch/turing/kernel_nvdec_tu102.c +SRCS += src/kernel/gpu/nvdec/kernel_nvdec.c +SRCS += src/kernel/gpu/nvdec/kernel_nvdec_ctx.c +SRCS += src/kernel/gpu/nvdec/kernel_nvdec_engdesc.c +SRCS += src/kernel/gpu/nvenc/kernel_nvenc_ctx.c +SRCS += src/kernel/gpu/nvenc/kernel_nvenc_engdesc.c +SRCS += src/kernel/gpu/nvjpg/kernel_nvjpg_ctx.c +SRCS += src/kernel/gpu/nvjpg/kernel_nvjpg_engdesc.c +SRCS += src/kernel/gpu/nvlink/arch/ampere/kernel_nvlink_ga100.c +SRCS += src/kernel/gpu/nvlink/arch/pascal/kernel_nvlink_gp100.c +SRCS += src/kernel/gpu/nvlink/arch/turing/kernel_nvlink_tu102.c +SRCS += src/kernel/gpu/nvlink/arch/volta/kernel_minion_gv100.c +SRCS += src/kernel/gpu/nvlink/arch/volta/kernel_nvlink_gv100.c +SRCS += src/kernel/gpu/nvlink/kernel_ioctrl.c +SRCS += src/kernel/gpu/nvlink/kernel_nvlink.c +SRCS += src/kernel/gpu/nvlink/kernel_nvlinkapi.c +SRCS += src/kernel/gpu/nvlink/kernel_nvlinkcorelib.c +SRCS += src/kernel/gpu/nvlink/kernel_nvlinkcorelibcallback.c +SRCS += src/kernel/gpu/nvlink/kernel_nvlinkcorelibtrain.c +SRCS += src/kernel/gpu/nvlink/kernel_nvlinkoverrides.c +SRCS += src/kernel/gpu/nvlink/kernel_nvlinkstate.c +SRCS += src/kernel/gpu/ofa/kernel_ofa_ctx.c +SRCS += src/kernel/gpu/perf/kern_cuda_limit.c +SRCS += src/kernel/gpu/perf/kern_perf.c +SRCS += src/kernel/gpu/perf/kern_perf_1hz.c +SRCS += src/kernel/gpu/perf/kern_perf_boost.c +SRCS += src/kernel/gpu/perf/kern_perf_gpuboostsync.c +SRCS += src/kernel/gpu/perf/kern_perf_pm.c +SRCS += src/kernel/gpu/perf/kern_perf_pwr.c +SRCS += src/kernel/gpu/perf/kern_perfbuffer.c +SRCS += src/kernel/gpu/pmu/kern_pmu.c +SRCS += src/kernel/gpu/rc/kernel_rc.c +SRCS += src/kernel/gpu/rc/kernel_rc_callback.c +SRCS += src/kernel/gpu/rc/kernel_rc_ctrl.c +SRCS += src/kernel/gpu/rc/kernel_rc_misc.c +SRCS += src/kernel/gpu/rc/kernel_rc_notification.c +SRCS += src/kernel/gpu/rc/kernel_rc_watchdog.c +SRCS += src/kernel/gpu/rc/kernel_rc_watchdog_callback.c +SRCS += src/kernel/gpu/rc/kernel_rc_watchdog_ctrl.c +SRCS += src/kernel/gpu/sec2/arch/ampere/kernel_sec2_ga100.c +SRCS += src/kernel/gpu/sec2/arch/ampere/kernel_sec2_ga102.c +SRCS += src/kernel/gpu/sec2/arch/turing/kernel_sec2_tu102.c +SRCS += src/kernel/gpu/sec2/kernel_sec2.c +SRCS += src/kernel/gpu/subdevice/generic_engine.c +SRCS += src/kernel/gpu/subdevice/subdevice.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_event_kernel.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_fla.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_gpu_kernel.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_gpu_regops.c +SRCS += src/kernel/gpu/subdevice/subdevice_ctrl_timer_kernel.c +SRCS += src/kernel/gpu/subdevice/subdevice_diag.c +SRCS += src/kernel/gpu/sw_eng.c +SRCS += src/kernel/gpu/sw_test.c +SRCS += src/kernel/gpu/timed_semaphore.c +SRCS += src/kernel/gpu/timer/arch/ampere/timer_ga100.c +SRCS += src/kernel/gpu/timer/arch/maxwell/timer_gm107.c +SRCS += src/kernel/gpu/timer/arch/maxwell/timer_gm200.c +SRCS += src/kernel/gpu/timer/arch/turing/timer_tu102.c +SRCS += src/kernel/gpu/timer/arch/volta/timer_gv100.c +SRCS += src/kernel/gpu/timer/timer.c +SRCS += src/kernel/gpu/timer/timer_ostimer.c +SRCS += src/kernel/gpu/timer/timer_ptimer.c +SRCS += src/kernel/gpu/uvm/access_cntr_buffer.c +SRCS += src/kernel/gpu/uvm/access_cntr_buffer_ctrl.c +SRCS += src/kernel/gpu/uvm/arch/turing/uvm_tu102.c +SRCS += src/kernel/gpu/uvm/arch/volta/uvm_gv100.c +SRCS += src/kernel/gpu/uvm/uvm.c +SRCS += src/kernel/gpu_mgr/gpu_db.c +SRCS += src/kernel/gpu_mgr/gpu_group.c +SRCS += src/kernel/gpu_mgr/gpu_mgmt_api.c +SRCS += src/kernel/gpu_mgr/gpu_mgr.c +SRCS += src/kernel/mem_mgr/console_mem.c +SRCS += src/kernel/mem_mgr/ctx_buf_pool.c +SRCS += src/kernel/mem_mgr/fabric_vaspace.c +SRCS += src/kernel/mem_mgr/fla_mem.c +SRCS += src/kernel/mem_mgr/gpu_vaspace.c +SRCS += src/kernel/mem_mgr/hw_resources.c +SRCS += src/kernel/mem_mgr/io_vaspace.c +SRCS += src/kernel/mem_mgr/mem.c +SRCS += src/kernel/mem_mgr/mem_fabric.c +SRCS += src/kernel/mem_mgr/no_device_mem.c +SRCS += src/kernel/mem_mgr/os_desc_mem.c +SRCS += src/kernel/mem_mgr/phys_mem.c +SRCS += src/kernel/mem_mgr/pool_alloc.c +SRCS += src/kernel/mem_mgr/reg_mem.c +SRCS += src/kernel/mem_mgr/standard_mem.c +SRCS += src/kernel/mem_mgr/system_mem.c +SRCS += src/kernel/mem_mgr/vaddr_list.c +SRCS += src/kernel/mem_mgr/vaspace.c +SRCS += src/kernel/mem_mgr/video_mem.c +SRCS += src/kernel/mem_mgr/virt_mem_mgr.c +SRCS += src/kernel/mem_mgr/virt_mem_range.c +SRCS += src/kernel/mem_mgr/virtual_mem.c +SRCS += src/kernel/os/os_init.c +SRCS += src/kernel/os/os_sanity.c +SRCS += src/kernel/os/os_stubs.c +SRCS += src/kernel/os/os_timer.c +SRCS += src/kernel/platform/chipset/chipset.c +SRCS += src/kernel/platform/chipset/chipset_info.c +SRCS += src/kernel/platform/chipset/chipset_pcie.c +SRCS += src/kernel/platform/chipset/pci_pbi.c +SRCS += src/kernel/platform/cpu.c +SRCS += src/kernel/platform/hwbc.c +SRCS += src/kernel/platform/p2p/p2p_caps.c +SRCS += src/kernel/platform/platform.c +SRCS += src/kernel/power/gpu_boost_mgr.c +SRCS += src/kernel/rmapi/alloc_free.c +SRCS += src/kernel/rmapi/binary_api.c +SRCS += src/kernel/rmapi/client.c +SRCS += src/kernel/rmapi/client_resource.c +SRCS += src/kernel/rmapi/control.c +SRCS += src/kernel/rmapi/deprecated_context.c +SRCS += src/kernel/rmapi/embedded_param_copy.c +SRCS += src/kernel/rmapi/entry_points.c +SRCS += src/kernel/rmapi/event.c +SRCS += src/kernel/rmapi/event_buffer.c +SRCS += src/kernel/rmapi/event_notification.c +SRCS += src/kernel/rmapi/mapping.c +SRCS += src/kernel/rmapi/mapping_cpu.c +SRCS += src/kernel/rmapi/mapping_list.c +SRCS += src/kernel/rmapi/nv_gpu_ops.c +SRCS += src/kernel/rmapi/param_copy.c +SRCS += src/kernel/rmapi/resource.c +SRCS += src/kernel/rmapi/resource_desc.c +SRCS += src/kernel/rmapi/rmapi.c +SRCS += src/kernel/rmapi/rmapi_cache.c +SRCS += src/kernel/rmapi/rmapi_stubs.c +SRCS += src/kernel/rmapi/rmapi_utils.c +SRCS += src/kernel/rmapi/rpc_common.c +SRCS += src/kernel/rmapi/rs_utils.c +SRCS += src/kernel/rmapi/sharing.c +SRCS += src/lib/base_utils.c +SRCS += src/lib/protobuf/prbenc.c +SRCS += src/lib/protobuf/prbutil.c +SRCS += src/lib/ref_count.c +SRCS += src/lib/zlib/inflate.c +SRCS += src/libraries/containers/btree/btree.c +SRCS += src/libraries/containers/eheap/eheap_old.c +SRCS += src/libraries/containers/list.c +SRCS += src/libraries/containers/map.c +SRCS += src/libraries/containers/multimap.c +SRCS += src/libraries/containers/queue.c +SRCS += src/libraries/eventbuffer/eventbufferproducer.c +SRCS += src/libraries/ioaccess/ioaccess.c +SRCS += src/libraries/mmu/gmmu_fmt.c +SRCS += src/libraries/mmu/mmu_fmt.c +SRCS += src/libraries/mmu/mmu_walk.c +SRCS += src/libraries/mmu/mmu_walk_commit.c +SRCS += src/libraries/mmu/mmu_walk_fill.c +SRCS += src/libraries/mmu/mmu_walk_info.c +SRCS += src/libraries/mmu/mmu_walk_map.c +SRCS += src/libraries/mmu/mmu_walk_migrate.c +SRCS += src/libraries/mmu/mmu_walk_reserve.c +SRCS += src/libraries/mmu/mmu_walk_sparse.c +SRCS += src/libraries/mmu/mmu_walk_unmap.c +SRCS += src/libraries/nvbitvector/nvbitvector.c +SRCS += src/libraries/nvoc/src/runtime.c +SRCS += src/libraries/nvport/core/core.c +SRCS += src/libraries/nvport/cpu/cpu_common.c +SRCS += src/libraries/nvport/cpu/cpu_x86_amd64.c +SRCS += src/libraries/nvport/crypto/crypto_random_xorshift.c +SRCS += src/libraries/nvport/memory/memory_tracking.c +SRCS += src/libraries/nvport/memory/memory_unix_kernel_os.c +SRCS += src/libraries/nvport/string/string_generic.c +SRCS += src/libraries/nvport/sync/sync_rwlock.c +SRCS += src/libraries/nvport/sync/sync_unix_kernel_os.c +SRCS += src/libraries/nvport/thread/thread_unix_kernel_os.c +SRCS += src/libraries/nvport/util/util_gcc_clang.c +SRCS += src/libraries/nvport/util/util_unix_kernel_os.c +SRCS += src/libraries/poolalloc/poolalloc.c +SRCS += src/libraries/prereq_tracker/prereq_tracker.c +SRCS += src/libraries/resserv/src/rs_access_map.c +SRCS += src/libraries/resserv/src/rs_access_rights.c +SRCS += src/libraries/resserv/src/rs_client.c +SRCS += src/libraries/resserv/src/rs_domain.c +SRCS += src/libraries/resserv/src/rs_resource.c +SRCS += src/libraries/resserv/src/rs_server.c +SRCS += src/libraries/tls/tls.c +SRCS += src/libraries/utils/nvassert.c +SRCS += ../common/uproc/os/libos-v2.0.0/debug/elf.c +SRCS += ../common/uproc/os/libos-v2.0.0/debug/lines.c +SRCS += ../common/uproc/os/libos-v2.0.0/debug/logdecode.c diff --git a/utils.mk b/utils.mk new file mode 100644 index 000000000..d09807eb2 --- /dev/null +++ b/utils.mk @@ -0,0 +1,558 @@ +# +# Copyright (C) 2008 NVIDIA Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +# +# utils.mk: common Makefile fragment used by nvidia-xconfig, +# nvidia-installer, and nvidia-settings +# + + + +############################################################################## +# The calling Makefile (when building as part of the NVIDIA graphics +# driver) may export any of the following variables; we assign default +# values if they are not exported by the caller +############################################################################## + +CC ?= gcc +CXX ?= g++ +LD ?= ld +AR ?= ar +# only set these warnings if CFLAGS is unset +CFLAGS ?= -Wall +# always set these -f CFLAGS +CFLAGS += -fno-strict-aliasing -fno-omit-frame-pointer -Wformat=2 +CC_ONLY_CFLAGS ?= +CXX_ONLY_CFLAGS ?= +LDFLAGS ?= +BIN_LDFLAGS ?= + +STACK_USAGE_WARNING ?= +CFLAGS += $(if $(STACK_USAGE_WARNING),-Wstack-usage=$(STACK_USAGE_WARNING)) + +HOST_CC ?= $(CC) +HOST_LD ?= $(LD) +HOST_CFLAGS ?= $(CFLAGS) +HOST_CC_ONLY_CFLAGS ?= +HOST_CXX_ONLY_CFLAGS ?= +HOST_LDFLAGS ?= $(LDFLAGS) +HOST_BIN_LDFLAGS ?= + +# always disable warnings that will break the build +CC_ONLY_CFLAGS += -Wno-format-zero-length +CFLAGS += -Wno-unused-parameter +HOST_CC_ONLY_CFLAGS += -Wno-format-zero-length +HOST_CFLAGS += -Wno-unused-parameter + +# Treat warnings as errors, if requested +WARNINGS_AS_ERRORS ?= +CFLAGS += $(if $(WARNINGS_AS_ERRORS),-Werror) + +DEBUG ?= +DEVELOP ?= + +ifeq ($(DEBUG),1) + STRIP_CMD ?= true + DO_STRIP ?= + CFLAGS += -O0 -g + CFLAGS += -DDEBUG=1 +else + CFLAGS += -O2 +endif + +ifeq ($(DEVELOP),1) + STRIP_CMD ?= true + DO_STRIP ?= + CFLAGS += -DDEVELOP=1 +endif + +STRIP_CMD ?= strip +DO_STRIP ?= 1 + +INSTALL ?= install +INSTALL_BIN_ARGS ?= -m 755 +INSTALL_LIB_ARGS ?= -m 644 +INSTALL_DOC_ARGS ?= -m 644 + +M4 ?= m4 +SED ?= sed +M4 ?= m4 +ECHO ?= echo +PRINTF ?= printf +MKDIR ?= mkdir -p +RM ?= rm -f +TOUCH ?= touch +HARDLINK ?= ln -f +DATE ?= date +GZIP_CMD ?= gzip +CHMOD ?= chmod +OBJCOPY ?= objcopy +XZ ?= xz +WHOAMI ?= whoami +HOSTNAME ?= hostname + +NV_AUTO_DEPEND ?= 1 +NV_VERBOSE ?= 0 + +ifndef TARGET_OS + TARGET_OS := $(shell uname) +endif + +ifeq ($(TARGET_OS),Linux) + CFLAGS += -DNV_LINUX +endif + +ifeq ($(TARGET_OS),FreeBSD) + CFLAGS += -DNV_BSD +endif + +ifeq ($(TARGET_OS),SunOS) + CFLAGS += -DNV_SUNOS +endif + +ifndef TARGET_ARCH + TARGET_ARCH := $(shell uname -m) + TARGET_ARCH := $(subst i386,x86,$(TARGET_ARCH)) + TARGET_ARCH := $(subst i486,x86,$(TARGET_ARCH)) + TARGET_ARCH := $(subst i586,x86,$(TARGET_ARCH)) + TARGET_ARCH := $(subst i686,x86,$(TARGET_ARCH)) +endif + +ifeq ($(TARGET_ARCH),x86) + CFLAGS += -DNV_X86 -DNV_ARCH_BITS=32 +endif + +ifeq ($(TARGET_ARCH),x86_64) + CFLAGS += -DNV_X86_64 -DNV_ARCH_BITS=64 +endif + +ifeq ($(TARGET_ARCH),armv7l) + CFLAGS += -DNV_ARMV7 -DNV_ARCH_BITS=32 +endif + +ifeq ($(TARGET_ARCH),aarch64) + CFLAGS += -DNV_AARCH64 -DNV_ARCH_BITS=64 +endif + +ifeq ($(TARGET_ARCH),ppc64le) + CFLAGS += -DNV_PPC64LE -DNV_ARCH_BITS=64 +endif + +ifeq ($(TARGET_OS),Linux) + LIBDL_LIBS = -ldl +else + LIBDL_LIBS = +endif + +# This variable controls which floating-point ABI is targeted. For ARM, it +# defaults to "gnueabi" for softfp. Another option is "gnueabihf" for +# hard(fp). This is necessary to pick up the correct rtld_test binary. +# All other architectures default to empty. +ifeq ($(TARGET_ARCH),armv7l) + TARGET_ARCH_ABI ?= gnueabi +endif +TARGET_ARCH_ABI ?= + +ifeq ($(TARGET_ARCH_ABI),gnueabi) + CFLAGS += -DNV_GNUEABI +endif + +ifeq ($(TARGET_ARCH_ABI),gnueabihf) + CFLAGS += -DNV_GNUEABIHF +endif + +OUTPUTDIR ?= _out/$(TARGET_OS)_$(TARGET_ARCH) +OUTPUTDIR_ABSOLUTE ?= $(CURDIR)/$(OUTPUTDIR) + +NV_SEPARATE_DEBUG_INFO ?= +NV_KEEP_UNSTRIPPED_BINARIES ?= + +NV_QUIET_COMMAND_REMOVED_TARGET_PREFIX ?= + +NV_GENERATED_HEADERS ?= + +PCIACCESS_CFLAGS ?= +PCIACCESS_LDFLAGS ?= + +############################################################################## +# This makefile uses the $(eval) builtin function, which was added in +# GNU make 3.80. Check that the current make version recognizes it. +# Idea suggested by: http://www.jgc.org/blog/cookbook-sample.pdf +############################################################################## + +_eval_available := +$(eval _eval_available := T) + +ifneq ($(_eval_available),T) + $(error This Makefile requires a GNU Make that supports 'eval'. Please upgrade to GNU make 3.80 or later) +endif + + +############################################################################## +# Test passing $(1) to $(CC). If $(CC) succeeds, then echo $(1). +# +# Because this uses $(shell), it is best to use this to assign simply expanded +# variables (e.g., ":="). +# +# Example usage: +# CONDITIONAL_CFLAGS := $(call TEST_CC_ARG, -ffoo) +############################################################################## + +TEST_CC_ARG = \ + $(shell $(CC) -c -x c /dev/null -Werror $(1) -o /dev/null > /dev/null 2>&1 && \ + $(ECHO) $(1)) + + +############################################################################## +# define variables used when installing the open source utilities from +# the source tarball +############################################################################## + +PREFIX ?= /usr/local + +BINDIR = $(DESTDIR)$(PREFIX)/bin +LIBDIR = $(DESTDIR)$(PREFIX)/lib +MANDIR = $(DESTDIR)$(PREFIX)/share/man/man1 + + +############################################################################## +# default build rule, so that nothing here in utils.mk accidentally +# gets selected as the default rule +############################################################################## + +default build: all + + +############################################################################## +# get the definition of NVIDIA_VERSION from version.mk +# +# version.mk may be in one of two places: either in $(OUTPUTDIR) when +# building as part of the NVIDIA driver build, or directly in the +# source directory when building from the source tarball +# +# Throw an error if one of these two places did not define NVIDIA_VERSION. +############################################################################## + +VERSION_MK_DIR ?= . +VERSION_MK := $(wildcard $(OUTPUTDIR)/version.mk $(VERSION_MK_DIR)/version.mk ) +include $(VERSION_MK) + +ifndef NVIDIA_VERSION +$(error NVIDIA_VERSION undefined) +endif + +############################################################################## +# NV_GET_SOURCE_TYPE: if the source file $(1) should be compiled as C, this +# evalutes to "CC"; if the source file $(1) should be compiled as C++, this +# evalutes to "CXX". +############################################################################## + +NV_GET_SOURCE_TYPE = $(strip \ + $(if $(filter %.c, $(1)),CC, \ + $(if $(filter %.cpp, $(1)),CXX, \ + $(error Unrecognized source $(1))))) + + +############################################################################## +# Several of the functions below take an argument that indicates if +# the expression is for the target platform (the system the built +# program is going to run on) or the host platform (the system +# performing the build). The argument is either "HOST" or "TARGET" +# and needs to be converted: +# +# "HOST" -> "HOST_" +# "TARGET" -> "" +############################################################################## + +host_target = $(patsubst HOST,HOST_,$(patsubst TARGET,,$(1))) + + +############################################################################## +# To generate the dependency files: +# +# - Use the compiler's "-MMD" option to generate output of the form +# "foo.o : foo.c foo.h bar.h". +# +# - Also, "-MMD" will cause the compiler to name the target as if it were in the +# current directory ("foo.o: "); use -MT to rename the target in the output +# directory ("_out/Linux_x86/foo.o: ") so that the target actually applies to +# the object files produced in the build. +# +# - Use -MP to generate a phony target for each of those prerequisites (except +# the source file being compiled). E.g., +# "foo.o : foo.c foo.h bar.h +# foo.h: +# bar.h:" +# so that the makefile can handle incremental builds after a prerequisite has +# been deleted from source control. +# +# - Use sed to remove the source file from the list of prerequisties in the +# above, so that the makefile can handle increment builds after the source has +# moved from one directory to another. The DEFINE_OBJECT_RULE macro spells +# out the obj: src dependency, so we don't require it here. +############################################################################## + +ifeq ($(NV_AUTO_DEPEND),1) + AUTO_DEP_SUFFIX = -MMD -MF $$(@:.o=.d.to_be_processed) -MP -MT $$@ && \ + $$(SED) -e "1,3s| $$< | |" < $$(@:.o=.d.to_be_processed) > $$(@:.o=.d) +else + AUTO_DEP_SUFFIX = +endif + + +############################################################################## +# echo minimal compile information in the non-NV_VERBOSE case +# +# NV_MODULE_LOGGING_NAME can be set to prepend quiet build output with a +# label of which build component is being built +############################################################################## + +NV_MODULE_LOGGING_NAME ?= + +ifeq ($(NV_VERBOSE),0) + at_if_quiet := @ + quiet_cmd_no_at = $(PRINTF) \ + " $(if $(NV_MODULE_LOGGING_NAME),[ %-17.17s ],%s) $(quiet_$(1))\n" \ + "$(NV_MODULE_LOGGING_NAME)" && $($(1)) + quiet_cmd = @$(quiet_cmd_no_at) +else + at_if_quiet := + quiet_cmd_no_at = $($(1)) + quiet_cmd = $($(1)) +endif + +# define LINK and HOST_LINK to be the same as CC; this is so that, +# even though we use CC to link programs, we can have a different +# quiet rule that uses '$@' as it's arg, rather than '$<' +LINK = $(CC) +HOST_LINK = $(HOST_CC) + +# strip NV_QUIET_COMMAND_REMOVED_TARGET_PREFIX from the target string +define_quiet_cmd = $(1) $(patsubst $(NV_QUIET_COMMAND_REMOVED_TARGET_PREFIX)/%,%,$(2)) + +# define the quiet commands: +quiet_CC = $(call define_quiet_cmd,CC ,$<) +quiet_CXX = $(call define_quiet_cmd,CXX ,$<) +quiet_HOST_CC = $(call define_quiet_cmd,HOST_CC ,$<) +quiet_HOST_CXX = $(call define_quiet_cmd,HOST_CXX ,$<) +quiet_LINK = $(call define_quiet_cmd,LINK ,$@) +quiet_HOST_LINK = $(call define_quiet_cmd,HOST_LINK ,$@) +quiet_M4 = $(call define_quiet_cmd,M4 ,$<) +quiet_STRIP_CMD = $(call define_quiet_cmd,STRIP ,$@) +quiet_HARDLINK = $(call define_quiet_cmd,HARDLINK ,$@) +quiet_LD = $(call define_quiet_cmd,LD ,$@) +quiet_OBJCOPY = $(call define_quiet_cmd,OBJCOPY ,$@) +quiet_AR = $(call define_quiet_cmd,AR ,$@) +quiet_XZ = $(call define_quiet_cmd,XZ ,$@) + +############################################################################## +# Tell gmake to delete the target of a rule if it has changed and its +# commands exit with a nonzero exit status. +############################################################################## +.DELETE_ON_ERROR: + + +############################################################################## +# function to generate a list of object files from their corresponding +# source files using the specified path. The _WITH_DIR variant takes an +# output path as the second argument while the BUILD_OBJECT_LIST defaults +# to using the value of OUTPUTDIR as the output path. example usage: +# +# OBJS = $(call BUILD_OBJECT_LIST_WITH_DIR,$(SRC),$(DIR)) +############################################################################## + +BUILD_OBJECT_LIST_WITH_DIR = \ + $(addprefix $(2)/,$(notdir $(addsuffix .o,$(basename $(1))))) + +BUILD_OBJECT_LIST = \ + $(call BUILD_OBJECT_LIST_WITH_DIR,$(1),$(OUTPUTDIR)) + +$(call BUILD_OBJECT_LIST,nvpci-utils.c): CFLAGS += $(PCIACCESS_CFLAGS) + +############################################################################## +# function to generate a list of dependency files from their +# corresponding source files using the specified path. The _WITH_DIR +# variant takes an output path as the second argument while the +# BUILD_DEPENDENCY_LIST default to using the value of OUTPUTDIR as the +# output path. example usage: +# +# DEPS = $(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(SRC),$(DIR)) +############################################################################## + +BUILD_DEPENDENCY_LIST_WITH_DIR = \ + $(addprefix $(2)/,$(notdir $(addsuffix .d,$(basename $(1))))) + +BUILD_DEPENDENCY_LIST = \ + $(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(1),$(OUTPUTDIR)) + + +############################################################################## +# functions to define a rule to build an object file; the first +# argument for all functions is whether the rule is for the target or +# host platform ("HOST" or "TARGET"), the second argument for all +# functions is the source file to compile. +# +# An order-only dependency is added on any generated header files listed in +# $(NV_GENERATED_HEADERS), to ensure they're present before invoking the +# compiler. For incremental builds where the object file already exists, a +# real (not order-only) dependency will be created by automatic dependency +# tracking if needed. +# +# The _WITH_OBJECT_NAME and _WITH_DIR function name suffixes describe +# the third and possibly fourth arguments based on order. The +# _WITH_OBJECT_NAME argument is the object filename to produce while +# the _WITH_DIR argument is the destination path for the object file. +# +# Example usage: +# +# $(eval $(call DEFINE_OBJECT_RULE,TARGET,foo.c)) +# +# Note this also attempts to include the dependency file for this +# source file. +# +# The DEFINE_OBJECT_RULE is functionally equivalent to +# DEFINE_OBJECT_RULE_WITH_OBJECT_NAME, but infers the object file name +# from the source file name (this is normally what you want). +# +# Arguments: +# $(1) : HOST or TARGET +# $(2) : source file +# $(3) : object file +# $(4) : directory +############################################################################## + +define DEFINE_OBJECT_RULE_WITH_OBJECT_NAME_WITH_DIR + $(3): NV_SOURCE_TYPE = $$(call NV_GET_SOURCE_TYPE,$(2)) + + # obj: {HOST_,}CFLAGS += $$({HOST_,}{CC,CXX}_ONLY_CFLAGS) + $(3): $$(call host_target,$(1))CFLAGS += \ + $$($(call host_target,$(1))$$(NV_SOURCE_TYPE)_ONLY_CFLAGS) + + $(3): $(2) | $$(NV_GENERATED_HEADERS) + @$(MKDIR) $(4) + $$(call quiet_cmd,$(call host_target,$(1))$$(NV_SOURCE_TYPE)) \ + $$($(call host_target,$(1))CFLAGS) -c $$< -o $$@ \ + $(AUTO_DEP_SUFFIX) + + -include $$(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(3),$(4)) + + # declare empty rule for generating dependency file; we generate the + # dependency files implicitly when compiling the source file (see + # AUTO_DEP_SUFFIX above), so we don't want gmake to spend time searching + # for an explicit rule to generate the dependency file + $$(call BUILD_DEPENDENCY_LIST_WITH_DIR,$(3),$(4)): ; + +endef + +define DEFINE_OBJECT_RULE_WITH_OBJECT_NAME + $$(eval $$(call DEFINE_OBJECT_RULE_WITH_OBJECT_NAME_WITH_DIR,$(1),$(2),\ + $(3),$(OUTPUTDIR))) +endef + +define DEFINE_OBJECT_RULE_WITH_DIR + $$(eval $$(call DEFINE_OBJECT_RULE_WITH_OBJECT_NAME_WITH_DIR,$(1),$(2),\ + $$(call BUILD_OBJECT_LIST_WITH_DIR,$(2),$(3)),$(3))) +endef + +define DEFINE_OBJECT_RULE + $$(eval $$(call DEFINE_OBJECT_RULE_WITH_DIR,$(1),$(2),$(OUTPUTDIR))) +endef + +# This is a function that will generate rules to build +# files with separate debug information, if so requested. +# +# It takes one parameter: (1) Name of unstripped binary +# +# When used, the target for linking should be named (1).unstripped +# +# If separate debug information is requested, it will +# generate a rule to build one from the unstripped binary. +# If requested, it will also retain the unstripped binary. +define DEBUG_INFO_RULES + $(1): $(1).unstripped + ifneq ($(or $(DO_STRIP),$(NV_SEPARATE_DEBUG_INFO)),) + $$(call quiet_cmd,STRIP_CMD) -o $$@ $$< + else + $$(call quiet_cmd,HARDLINK) $$^ $$@ + endif + ifeq ($(NV_SEPARATE_DEBUG_INFO),1) + $(1).debug: $(1).unstripped + $$(call quiet_cmd,STRIP_CMD) --only-keep-debug -o $$@ $$< + $(1): $(1).debug + endif + ifneq ($(NV_KEEP_UNSTRIPPED_BINARIES),1) + .INTERMEDIATE: $(1).unstripped + endif +endef + +############################################################################## +# Define rule for generating a source file containing identification information +# for the build. +# +# $(1) string name +# $(2) module name +# $(3) prerequisite object files +############################################################################## + +NVIDSTRING = $(OUTPUTDIR)/g_nvid_string.c + +ifeq ($(DEBUG),1) + NVIDSTRING_BUILD_TYPE_STRING = Debug Build +else + NVIDSTRING_BUILD_TYPE_STRING = Release Build +endif + +define GENERATE_NVIDSTRING + # g_nvid_string.c depends on all objects except g_nvid_string.o, and version.mk + $(NVIDSTRING): $$(filter-out $$(call BUILD_OBJECT_LIST,$$(NVIDSTRING)), $(3)) $$(VERSION_MK) + $(at_if_quiet)$$(MKDIR) $$(dir $$@) + $(at_if_quiet)$$(ECHO) "const char $(1)[] = \"nvidia id: NVIDIA $$(strip $(2)) for $$(TARGET_ARCH) $$(NVIDIA_VERSION) $$(NVIDSTRING_BUILD_TYPE_STRING) (`$$(WHOAMI)`@`$$(HOSTNAME)`) `$$(DATE)`\";" > $$@ + $(at_if_quiet)$$(ECHO) "const char *const p$$(strip $(1)) = $(1) + 11;" >> $$@; +endef + + +############################################################################## +# Define rules that can be used for embedding a file into an ELF object that +# contains the raw contents of that file and symbols pointing to the embedded +# data. +# +# Note that objcopy will name the symbols in the resulting object file based on +# the filename specified in $(1). For example, +# +# $(eval $(call $(READ_ONLY_OBJECT_FROM_FILE_RULE),a/b/c)) +# +# will create an object named $(OUTPUTDIR)/c.o with the symbols _binary_c_start, +# _binary_c_end, and _binary_c_size. +# +# Arguments: +# $(1): Path to the file to convert +############################################################################## + +define READ_ONLY_OBJECT_FROM_FILE_RULE + $$(OUTPUTDIR)/$$(notdir $(1)).o: $(1) + $(at_if_quiet)$$(MKDIR) $$(OUTPUTDIR) + $(at_if_quiet)cd $$(dir $(1)); \ + $$(call quiet_cmd_no_at,LD) -r -z noexecstack --format=binary \ + $$(notdir $(1)) -o $$(OUTPUTDIR_ABSOLUTE)/$$(notdir $$@) + $$(call quiet_cmd,OBJCOPY) \ + --rename-section .data=.rodata,contents,alloc,load,data,readonly \ + $$@ +endef diff --git a/version.mk b/version.mk new file mode 100644 index 000000000..c6189e5ca --- /dev/null +++ b/version.mk @@ -0,0 +1,9 @@ +NVIDIA_VERSION = 515.43.04 + +# This file. +VERSION_MK_FILE := $(lastword $(MAKEFILE_LIST)) +$(OUTPUTDIR)/version.h: $(VERSION_MK_FILE) + @$(MKDIR) $(OUTPUTDIR) + @$(ECHO) '#define NVIDIA_VERSION "$(NVIDIA_VERSION)"' > $@ + +NV_GENERATED_HEADERS += $(OUTPUTDIR)/version.h